From: Linus Torvalds Date: Wed, 13 Mar 2019 17:06:28 +0000 (-0700) Subject: Merge tag 'kconfig-v5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy... X-Git-Tag: 5.1-rc-smb3~32 X-Git-Url: http://git.samba.org/samba.git/?p=sfrench%2Fcifs-2.6.git;a=commitdiff_plain;h=5453a3df2a5eb49bc24615d4cf0d66b2aae05e5f;hp=8741908b3e29d35a33eeab6de60175958db8e54b Merge tag 'kconfig-v5.1' of git://git./linux/kernel/git/masahiroy/linux-kbuild Pull Kconfig updates from Masahiro Yamada: - rename lexer and parse files - fix 'Save as' menu of xconfig * tag 'kconfig-v5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild: kconfig: fix 'Save As' menu of xconfig kconfig: rename zconf.y to parser.y kconfig: rename zconf.l to lexer.l --- diff --git a/.clang-format b/.clang-format index bc2ffb2a0b53..f49620f506f1 100644 --- a/.clang-format +++ b/.clang-format @@ -240,6 +240,7 @@ ForEachMacros: - 'for_each_set_bit' - 'for_each_set_bit_from' - 'for_each_sg' + - 'for_each_sg_dma_page' - 'for_each_sg_page' - 'for_each_sibling_event' - '__for_each_thread' @@ -289,7 +290,6 @@ ForEachMacros: - 'idr_for_each_entry_ul' - 'inet_bind_bucket_for_each' - 'inet_lhash2_for_each_icsk_rcu' - - 'iov_for_each' - 'key_for_each' - 'key_for_each_safe' - 'klp_for_each_func' @@ -360,6 +360,7 @@ ForEachMacros: - 'radix_tree_for_each_slot' - 'radix_tree_for_each_tagged' - 'rbtree_postorder_for_each_entry_safe' + - 'rdma_for_each_port' - 'resource_list_for_each_entry' - 'resource_list_for_each_entry_safe' - 'rhl_for_each_entry_rcu' diff --git a/.mailmap b/.mailmap index ea98fcc197e4..37e1847c7988 100644 --- a/.mailmap +++ b/.mailmap @@ -123,6 +123,7 @@ Mark Brown Mark Yao Martin Kepplinger Martin Kepplinger +Mathieu Othacehe Matthew Wilcox Matthew Wilcox Matthew Wilcox diff --git a/CREDITS b/CREDITS index e818eb6a3e71..8e0342620a06 100644 --- a/CREDITS +++ b/CREDITS @@ -842,10 +842,9 @@ D: ax25-utils maintainer. N: Helge Deller E: deller@gmx.de -E: hdeller@redhat.de -D: PA-RISC Linux hacker, LASI-, ASP-, WAX-, LCD/LED-driver -S: Schimmelsrain 1 -S: D-69231 Rauenberg +W: http://www.parisc-linux.org/ +D: PA-RISC Linux architecture maintainer +D: LASI-, ASP-, WAX-, LCD/LED-driver S: Germany N: Jean Delvare @@ -1222,7 +1221,7 @@ S: Brazil N: Oded Gabbay E: oded.gabbay@gmail.com -D: AMD KFD maintainer +D: HabanaLabs and AMD KFD maintainer S: 12 Shraga Raphaeli S: Petah-Tikva, 4906418 S: Israel @@ -1361,7 +1360,7 @@ S: Stellenbosch, Western Cape S: South Africa N: Grant Grundler -E: grundler@parisc-linux.org +E: grantgrundler@gmail.com W: http://obmouse.sourceforge.net/ W: http://www.parisc-linux.org/ D: obmouse - rewrote Olivier Florent's Omnibook 600 "pop-up" mouse driver @@ -2492,7 +2491,7 @@ S: Syracuse, New York 13206 S: USA N: Kyle McMartin -E: kyle@parisc-linux.org +E: kyle@mcmartin.ca D: Linux/PARISC hacker D: AD1889 sound driver S: Ottawa, Canada @@ -3780,14 +3779,13 @@ S: 21513 Conradia Ct S: Cupertino, CA 95014 S: USA -N: Thibaut Varene -E: T-Bone@parisc-linux.org -W: http://www.parisc-linux.org/~varenet/ -P: 1024D/B7D2F063 E67C 0D43 A75E 12A5 BB1C FA2F 1E32 C3DA B7D2 F063 +N: Thibaut Varène +E: hacks+kernel@slashdirt.org +W: http://hacks.slashdirt.org/ D: PA-RISC port minion, PDC and GSCPS2 drivers, debuglocks and other bits D: Some ARM at91rm9200 bits, S1D13XXX FB driver, random patches here and there D: AD1889 sound driver -S: Paris, France +S: France N: Heikki Vatiainen E: hessu@cs.tut.fi diff --git a/Documentation/ABI/stable/sysfs-bus-vmbus b/Documentation/ABI/stable/sysfs-bus-vmbus index 3fed8fdb873d..826689dcc2e6 100644 --- a/Documentation/ABI/stable/sysfs-bus-vmbus +++ b/Documentation/ABI/stable/sysfs-bus-vmbus @@ -146,3 +146,36 @@ KernelVersion: 4.16 Contact: Stephen Hemminger Description: Binary file created by uio_hv_generic for ring buffer Users: Userspace drivers + +What: /sys/bus/vmbus/devices//channels//intr_in_full +Date: February 2019 +KernelVersion: 5.0 +Contact: Michael Kelley +Description: Number of guest to host interrupts caused by the inbound ring + buffer transitioning from full to not full while a packet is + waiting for buffer space to become available +Users: Debugging tools + +What: /sys/bus/vmbus/devices//channels//intr_out_empty +Date: February 2019 +KernelVersion: 5.0 +Contact: Michael Kelley +Description: Number of guest to host interrupts caused by the outbound ring + buffer transitioning from empty to not empty +Users: Debugging tools + +What: /sys/bus/vmbus/devices//channels//out_full_first +Date: February 2019 +KernelVersion: 5.0 +Contact: Michael Kelley +Description: Number of write operations that were the first to encounter an + outbound ring buffer full condition +Users: Debugging tools + +What: /sys/bus/vmbus/devices//channels//out_full_total +Date: February 2019 +KernelVersion: 5.0 +Contact: Michael Kelley +Description: Total number of write operations that encountered an outbound + ring buffer full condition +Users: Debugging tools diff --git a/Documentation/ABI/stable/sysfs-driver-mlxreg-io b/Documentation/ABI/stable/sysfs-driver-mlxreg-io index 9b642669cb16..156319fc5b80 100644 --- a/Documentation/ABI/stable/sysfs-driver-mlxreg-io +++ b/Documentation/ABI/stable/sysfs-driver-mlxreg-io @@ -21,10 +21,22 @@ Description: These files show with which CPLD versions have been burned The files are read only. What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/ - cpld3_version + fan_dir + +Date: December 2018 +KernelVersion: 5.0 +Contact: Vadim Pasternak +Description: This file shows the system fans direction: + forward direction - relevant bit is set 0; + reversed direction - relevant bit is set 1. + + The files are read only. + +What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/ + jtag_enable Date: November 2018 -KernelVersion: 4.21 +KernelVersion: 5.0 Contact: Vadim Pasternak Description: These files show with which CPLD versions have been burned on LED board. @@ -35,7 +47,7 @@ What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/ jtag_enable Date: November 2018 -KernelVersion: 4.21 +KernelVersion: 5.0 Contact: Vadim Pasternak Description: These files enable and disable the access to the JTAG domain. By default access to the JTAG domain is disabled. @@ -105,7 +117,7 @@ What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/ reset_voltmon_upgrade_fail Date: November 2018 -KernelVersion: 4.21 +KernelVersion: 5.0 Contact: Vadim Pasternak Description: These files show the system reset cause, as following: ComEx power fail, reset from ComEx, system platform reset, reset diff --git a/Documentation/ABI/testing/debugfs-driver-habanalabs b/Documentation/ABI/testing/debugfs-driver-habanalabs new file mode 100644 index 000000000000..2f5b80be07a3 --- /dev/null +++ b/Documentation/ABI/testing/debugfs-driver-habanalabs @@ -0,0 +1,126 @@ +What: /sys/kernel/debug/habanalabs/hl/addr +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Sets the device address to be used for read or write through + PCI bar. The acceptable value is a string that starts with "0x" + +What: /sys/kernel/debug/habanalabs/hl/command_buffers +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Displays a list with information about the currently allocated + command buffers + +What: /sys/kernel/debug/habanalabs/hl/command_submission +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Displays a list with information about the currently active + command submissions + +What: /sys/kernel/debug/habanalabs/hl/command_submission_jobs +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Displays a list with detailed information about each JOB (CB) of + each active command submission + +What: /sys/kernel/debug/habanalabs/hl/data32 +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Allows the root user to read or write directly through the + device's PCI bar. Writing to this file generates a write + transaction while reading from the file generates a read + transcation. This custom interface is needed (instead of using + the generic Linux user-space PCI mapping) because the DDR bar + is very small compared to the DDR memory and only the driver can + move the bar before and after the transaction + +What: /sys/kernel/debug/habanalabs/hl/device +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Enables the root user to set the device to specific state. + Valid values are "disable", "enable", "suspend", "resume". + User can read this property to see the valid values + +What: /sys/kernel/debug/habanalabs/hl/i2c_addr +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Sets I2C device address for I2C transaction that is generated + by the device's CPU + +What: /sys/kernel/debug/habanalabs/hl/i2c_bus +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Sets I2C bus address for I2C transaction that is generated by + the device's CPU + +What: /sys/kernel/debug/habanalabs/hl/i2c_data +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Triggers an I2C transaction that is generated by the device's + CPU. Writing to this file generates a write transaction while + reading from the file generates a read transcation + +What: /sys/kernel/debug/habanalabs/hl/i2c_reg +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Sets I2C register id for I2C transaction that is generated by + the device's CPU + +What: /sys/kernel/debug/habanalabs/hl/led0 +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Sets the state of the first S/W led on the device + +What: /sys/kernel/debug/habanalabs/hl/led1 +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Sets the state of the second S/W led on the device + +What: /sys/kernel/debug/habanalabs/hl/led2 +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Sets the state of the third S/W led on the device + +What: /sys/kernel/debug/habanalabs/hl/mmu +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Displays the hop values and physical address for a given ASID + and virtual address. The user should write the ASID and VA into + the file and then read the file to get the result. + e.g. to display info about VA 0x1000 for ASID 1 you need to do: + echo "1 0x1000" > /sys/kernel/debug/habanalabs/hl0/mmu + +What: /sys/kernel/debug/habanalabs/hl/set_power_state +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Sets the PCI power state. Valid values are "1" for D0 and "2" + for D3Hot + +What: /sys/kernel/debug/habanalabs/hl/userptr +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Displays a list with information about the currently user + pointers (user virtual addresses) that are pinned and mapped + to DMA addresses + +What: /sys/kernel/debug/habanalabs/hl/vm +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Displays a list with information about all the active virtual + address mappings per ASID diff --git a/Documentation/ABI/testing/debugfs-wilco-ec b/Documentation/ABI/testing/debugfs-wilco-ec new file mode 100644 index 000000000000..f814f112e213 --- /dev/null +++ b/Documentation/ABI/testing/debugfs-wilco-ec @@ -0,0 +1,23 @@ +What: /sys/kernel/debug/wilco_ec/raw +Date: January 2019 +KernelVersion: 5.1 +Description: + Write and read raw mailbox commands to the EC. + + For writing: + Bytes 0-1 indicate the message type: + 00 F0 = Execute Legacy Command + 00 F2 = Read/Write NVRAM Property + Byte 2 provides the command code + Bytes 3+ consist of the data passed in the request + + At least three bytes are required, for the msg type and command, + with additional bytes optional for additional data. + + Example: + // Request EC info type 3 (EC firmware build date) + $ echo 00 f0 38 00 03 00 > raw + // View the result. The decoded ASCII result "12/21/18" is + // included after the raw hex. + $ cat raw + 00 31 32 2f 32 31 2f 31 38 00 38 00 01 00 2f 00 .12/21/18.8... diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio index 8127a08e366d..864f8efd12e5 100644 --- a/Documentation/ABI/testing/sysfs-bus-iio +++ b/Documentation/ABI/testing/sysfs-bus-iio @@ -1554,6 +1554,10 @@ What: /sys/bus/iio/devices/iio:deviceX/in_concentration_raw What: /sys/bus/iio/devices/iio:deviceX/in_concentrationX_raw What: /sys/bus/iio/devices/iio:deviceX/in_concentration_co2_raw What: /sys/bus/iio/devices/iio:deviceX/in_concentrationX_co2_raw +What: /sys/bus/iio/devices/iio:deviceX/in_concentration_ethanol_raw +What: /sys/bus/iio/devices/iio:deviceX/in_concentrationX_ethanol_raw +What: /sys/bus/iio/devices/iio:deviceX/in_concentration_h2_raw +What: /sys/bus/iio/devices/iio:deviceX/in_concentrationX_h2_raw What: /sys/bus/iio/devices/iio:deviceX/in_concentration_voc_raw What: /sys/bus/iio/devices/iio:deviceX/in_concentrationX_voc_raw KernelVersion: 4.3 @@ -1684,4 +1688,19 @@ KernelVersion: 4.18 Contact: linux-iio@vger.kernel.org Description: Raw (unscaled) phase difference reading from channel Y - that can be processed to radians. \ No newline at end of file + that can be processed to radians. + +What: /sys/bus/iio/devices/iio:deviceX/in_massconcentration_pm1_input +What: /sys/bus/iio/devices/iio:deviceX/in_massconcentrationY_pm1_input +What: /sys/bus/iio/devices/iio:deviceX/in_massconcentration_pm2p5_input +What: /sys/bus/iio/devices/iio:deviceX/in_massconcentrationY_pm2p5_input +What: /sys/bus/iio/devices/iio:deviceX/in_massconcentration_pm4_input +What: /sys/bus/iio/devices/iio:deviceX/in_massconcentrationY_pm4_input +What: /sys/bus/iio/devices/iio:deviceX/in_massconcentration_pm10_input +What: /sys/bus/iio/devices/iio:deviceX/in_massconcentrationY_pm10_input +KernelVersion: 4.22 +Contact: linux-iio@vger.kernel.org +Description: + Mass concentration reading of particulate matter in ug / m3. + pmX consists of particles with aerodynamic diameter less or + equal to X micrometers. diff --git a/Documentation/ABI/testing/sysfs-bus-iio-sps30 b/Documentation/ABI/testing/sysfs-bus-iio-sps30 new file mode 100644 index 000000000000..143df8e89d08 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-iio-sps30 @@ -0,0 +1,28 @@ +What: /sys/bus/iio/devices/iio:deviceX/start_cleaning +Date: December 2018 +KernelVersion: 4.22 +Contact: linux-iio@vger.kernel.org +Description: + Writing 1 starts sensor self cleaning. Internal fan accelerates + to its maximum speed and keeps spinning for about 10 seconds in + order to blow out accumulated dust. + +What: /sys/bus/iio/devices/iio:deviceX/cleaning_period +Date: January 2019 +KernelVersion: 5.1 +Contact: linux-iio@vger.kernel.org +Description: + Sensor is capable of triggering self cleaning periodically. + Period can be changed by writing a new value here. Upon reading + the current one is returned. Units are seconds. + + Writing 0 disables periodical self cleaning entirely. + +What: /sys/bus/iio/devices/iio:deviceX/cleaning_period_available +Date: January 2019 +KernelVersion: 5.1 +Contact: linux-iio@vger.kernel.org +Description: + The range of available values in seconds represented as the + minimum value, the step and the maximum value, all enclosed in + square brackets. diff --git a/Documentation/ABI/testing/sysfs-bus-intel_th-output-devices b/Documentation/ABI/testing/sysfs-bus-intel_th-output-devices index 4d48a9451866..d1f667104944 100644 --- a/Documentation/ABI/testing/sysfs-bus-intel_th-output-devices +++ b/Documentation/ABI/testing/sysfs-bus-intel_th-output-devices @@ -3,11 +3,13 @@ Date: June 2015 KernelVersion: 4.3 Contact: Alexander Shishkin Description: (RW) Writes of 1 or 0 enable or disable trace output to this - output device. Reads return current status. + output device. Reads return current status. Requires that the + correstponding output port driver be loaded. What: /sys/bus/intel_th/devices/-msc/port Date: June 2015 KernelVersion: 4.3 Contact: Alexander Shishkin Description: (RO) Port number, corresponding to this output device on the - switch (GTH). + switch (GTH) or "unassigned" if the corresponding output + port driver is not loaded. diff --git a/Documentation/ABI/testing/sysfs-bus-usb b/Documentation/ABI/testing/sysfs-bus-usb index 559baa5c418c..614d216dff1d 100644 --- a/Documentation/ABI/testing/sysfs-bus-usb +++ b/Documentation/ABI/testing/sysfs-bus-usb @@ -186,7 +186,7 @@ Contact: Lan Tianyu Description: Some platforms provide usb port connect types through ACPI. This attribute is to expose these information to user space. - The file will read "hotplug", "wired" and "not used" if the + The file will read "hotplug", "hardwired" and "not used" if the information is available, and "unknown" otherwise. What: /sys/bus/usb/devices/.../(hub interface)/portX/location diff --git a/Documentation/ABI/testing/sysfs-class-chromeos b/Documentation/ABI/testing/sysfs-class-chromeos new file mode 100644 index 000000000000..5819699d66ec --- /dev/null +++ b/Documentation/ABI/testing/sysfs-class-chromeos @@ -0,0 +1,32 @@ +What: /sys/class/chromeos//flashinfo +Date: August 2015 +KernelVersion: 4.2 +Description: + Show the EC flash information. + +What: /sys/class/chromeos//kb_wake_angle +Date: March 2018 +KernelVersion: 4.17 +Description: + Control the keyboard wake lid angle. Values are between + 0 and 360. This file will also show the keyboard wake lid + angle by querying the hardware. + +What: /sys/class/chromeos//reboot +Date: August 2015 +KernelVersion: 4.2 +Description: + Tell the EC to reboot in various ways. Options are: + "cancel": Cancel a pending reboot. + "ro": Jump to RO without rebooting. + "rw": Jump to RW without rebooting. + "cold": Cold reboot. + "disable-jump": Disable jump until next reboot. + "hibernate": Hibernate the EC. + "at-shutdown": Reboot after an AP shutdown. + +What: /sys/class/chromeos//version +Date: August 2015 +KernelVersion: 4.2 +Description: + Show the information about the EC software and hardware. diff --git a/Documentation/ABI/testing/sysfs-class-chromeos-driver-cros-ec-lightbar b/Documentation/ABI/testing/sysfs-class-chromeos-driver-cros-ec-lightbar new file mode 100644 index 000000000000..57a037791403 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-class-chromeos-driver-cros-ec-lightbar @@ -0,0 +1,74 @@ +What: /sys/class/chromeos//lightbar/brightness +Date: August 2015 +KernelVersion: 4.2 +Description: + Writing to this file adjusts the overall brightness of + the lightbar, separate from any color intensity. The + valid range is 0 (off) to 255 (maximum brightness). + +What: /sys/class/chromeos//lightbar/interval_msec +Date: August 2015 +KernelVersion: 4.2 +Description: + The lightbar is controlled by an embedded controller (EC), + which also manages the keyboard, battery charging, fans, + and other system hardware. To prevent unprivileged users + from interfering with the other EC functions, the rate at + which the lightbar control files can be read or written is + limited. + + Reading this file will return the number of milliseconds + that must elapse between accessing any of the lightbar + functions through this interface. Going faster will simply + block until the necessary interval has lapsed. The interval + applies uniformly to all accesses of any kind by any user. + +What: /sys/class/chromeos//lightbar/led_rgb +Date: August 2015 +KernelVersion: 4.2 +Description: + This allows you to control each LED segment. If the + lightbar is already running one of the automatic + sequences, you probably won’t see anything change because + your color setting will be almost immediately replaced. + To get useful results, you should stop the lightbar + sequence first. + + The values written to this file are sets of four integers, + indicating LED, RED, GREEN, BLUE. The LED number is 0 to 3 + to select a single segment, or 4 to set all four segments + to the same value at once. The RED, GREEN, and BLUE + numbers should be in the range 0 (off) to 255 (maximum). + You can update more than one segment at a time by writing + more than one set of four integers. + +What: /sys/class/chromeos//lightbar/program +Date: August 2015 +KernelVersion: 4.2 +Description: + This allows you to upload and run custom lightbar sequences. + +What: /sys/class/chromeos//lightbar/sequence +Date: August 2015 +KernelVersion: 4.2 +Description: + The Pixel lightbar has a number of built-in sequences + that it displays under various conditions, such as at + power on, shut down, or while running. Reading from this + file displays the current sequence that the lightbar is + displaying. Writing to this file allows you to change the + sequence. + +What: /sys/class/chromeos//lightbar/userspace_control +Date: August 2015 +KernelVersion: 4.2 +Description: + This allows you to take the control of the lightbar. This + prevents the kernel from going through its normal + sequences. + +What: /sys/class/chromeos//lightbar/version +Date: August 2015 +KernelVersion: 4.2 +Description: + Show the information about the lightbar version. diff --git a/Documentation/ABI/testing/sysfs-class-chromeos-driver-cros-ec-vbc b/Documentation/ABI/testing/sysfs-class-chromeos-driver-cros-ec-vbc new file mode 100644 index 000000000000..38c5aaaaa89a --- /dev/null +++ b/Documentation/ABI/testing/sysfs-class-chromeos-driver-cros-ec-vbc @@ -0,0 +1,6 @@ +What: /sys/class/chromeos//vbc/vboot_context +Date: October 2015 +KernelVersion: 4.4 +Description: + Read/write the verified boot context data included on a + small nvram space on some EC implementations. diff --git a/Documentation/ABI/testing/sysfs-class-led-trigger-pattern b/Documentation/ABI/testing/sysfs-class-led-trigger-pattern index 1e5d172e0646..bd92ef9d6faa 100644 --- a/Documentation/ABI/testing/sysfs-class-led-trigger-pattern +++ b/Documentation/ABI/testing/sysfs-class-led-trigger-pattern @@ -7,55 +7,10 @@ Description: timer. It can do gradual dimming and step change of brightness. The pattern is given by a series of tuples, of brightness and - duration (ms). The LED is expected to traverse the series and - each brightness value for the specified duration. Duration of - 0 means brightness should immediately change to new value, and - writing malformed pattern deactivates any active one. + duration (ms). - 1. For gradual dimming, the dimming interval now is set as 50 - milliseconds. So the tuple with duration less than dimming - interval (50ms) is treated as a step change of brightness, - i.e. the subsequent brightness will be applied without adding - intervening dimming intervals. - - The gradual dimming format of the software pattern values should be: - "brightness_1 duration_1 brightness_2 duration_2 brightness_3 - duration_3 ...". For example: - - echo 0 1000 255 2000 > pattern - - It will make the LED go gradually from zero-intensity to max (255) - intensity in 1000 milliseconds, then back to zero intensity in 2000 - milliseconds: - - LED brightness - ^ - 255-| / \ / \ / - | / \ / \ / - | / \ / \ / - | / \ / \ / - 0-| / \/ \/ - +---0----1----2----3----4----5----6------------> time (s) - - 2. To make the LED go instantly from one brightness value to another, - we should use zero-time lengths (the brightness must be same as - the previous tuple's). So the format should be: - "brightness_1 duration_1 brightness_1 0 brightness_2 duration_2 - brightness_2 0 ...". For example: - - echo 0 1000 0 0 255 2000 255 0 > pattern - - It will make the LED stay off for one second, then stay at max brightness - for two seconds: - - LED brightness - ^ - 255-| +---------+ +---------+ - | | | | | - | | | | | - | | | | | - 0-| -----+ +----+ +---- - +---0----1----2----3----4----5----6------------> time (s) + The exact format is described in: + Documentation/devicetree/bindings/leds/leds-trigger-pattern.txt What: /sys/class/leds//hw_pattern Date: September 2018 diff --git a/Documentation/ABI/testing/sysfs-class-watchdog b/Documentation/ABI/testing/sysfs-class-watchdog index 736046b33040..6317ade5ad19 100644 --- a/Documentation/ABI/testing/sysfs-class-watchdog +++ b/Documentation/ABI/testing/sysfs-class-watchdog @@ -49,3 +49,26 @@ Contact: Wim Van Sebroeck Description: It is a read only file. It is read to know about current value of timeout programmed. + +What: /sys/class/watchdog/watchdogn/pretimeout +Date: December 2016 +Contact: Wim Van Sebroeck +Description: + It is a read only file. It specifies the time in seconds before + timeout when the pretimeout interrupt is delivered. Pretimeout + is an optional feature. + +What: /sys/class/watchdog/watchdogn/pretimeout_avaialable_governors +Date: February 2017 +Contact: Wim Van Sebroeck +Description: + It is a read only file. It shows the pretimeout governors + available for this watchdog. + +What: /sys/class/watchdog/watchdogn/pretimeout_governor +Date: February 2017 +Contact: Wim Van Sebroeck +Description: + It is a read/write file. When read, the currently assigned + pretimeout governor is returned. When written, it sets + the pretimeout governor. diff --git a/Documentation/ABI/testing/sysfs-driver-habanalabs b/Documentation/ABI/testing/sysfs-driver-habanalabs new file mode 100644 index 000000000000..78b2bcf316a3 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-driver-habanalabs @@ -0,0 +1,190 @@ +What: /sys/class/habanalabs/hl/armcp_kernel_ver +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Version of the Linux kernel running on the device's CPU + +What: /sys/class/habanalabs/hl/armcp_ver +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Version of the application running on the device's CPU + +What: /sys/class/habanalabs/hl/cpld_ver +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Version of the Device's CPLD F/W + +What: /sys/class/habanalabs/hl/device_type +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Displays the code name of the device according to its type. + The supported values are: "GOYA" + +What: /sys/class/habanalabs/hl/eeprom +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: A binary file attribute that contains the contents of the + on-board EEPROM + +What: /sys/class/habanalabs/hl/fuse_ver +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Displays the device's version from the eFuse + +What: /sys/class/habanalabs/hl/hard_reset +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Interface to trigger a hard-reset operation for the device. + Hard-reset will reset ALL internal components of the device + except for the PCI interface and the internal PLLs + +What: /sys/class/habanalabs/hl/hard_reset_cnt +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Displays how many times the device have undergone a hard-reset + operation since the driver was loaded + +What: /sys/class/habanalabs/hl/high_pll +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Allows the user to set the maximum clock frequency for MME, TPC + and IC when the power management profile is set to "automatic". + +What: /sys/class/habanalabs/hl/ic_clk +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Allows the user to set the maximum clock frequency of the + Interconnect fabric. Writes to this parameter affect the device + only when the power management profile is set to "manual" mode. + The device IC clock might be set to lower value then the + maximum. The user should read the ic_clk_curr to see the actual + frequency value of the IC + +What: /sys/class/habanalabs/hl/ic_clk_curr +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Displays the current clock frequency of the Interconnect fabric + +What: /sys/class/habanalabs/hl/infineon_ver +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Version of the Device's power supply F/W code + +What: /sys/class/habanalabs/hl/max_power +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Allows the user to set the maximum power consumption of the + device in milliwatts. + +What: /sys/class/habanalabs/hl/mme_clk +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Allows the user to set the maximum clock frequency of the + MME compute engine. Writes to this parameter affect the device + only when the power management profile is set to "manual" mode. + The device MME clock might be set to lower value then the + maximum. The user should read the mme_clk_curr to see the actual + frequency value of the MME + +What: /sys/class/habanalabs/hl/mme_clk_curr +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Displays the current clock frequency of the MME compute engine + +What: /sys/class/habanalabs/hl/pci_addr +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Displays the PCI address of the device. This is needed so the + user would be able to open a device based on its PCI address + +What: /sys/class/habanalabs/hl/pm_mng_profile +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Power management profile. Values are "auto", "manual". In "auto" + mode, the driver will set the maximum clock frequency to a high + value when a user-space process opens the device's file (unless + it was already opened by another process). The driver will set + the max clock frequency to a low value when there are no user + processes that are opened on the device's file. In "manual" + mode, the user sets the maximum clock frequency by writing to + ic_clk, mme_clk and tpc_clk + + +What: /sys/class/habanalabs/hl/preboot_btl_ver +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Version of the device's preboot F/W code + +What: /sys/class/habanalabs/hl/soft_reset +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Interface to trigger a soft-reset operation for the device. + Soft-reset will reset only the compute and DMA engines of the + device + +What: /sys/class/habanalabs/hl/soft_reset_cnt +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Displays how many times the device have undergone a soft-reset + operation since the driver was loaded + +What: /sys/class/habanalabs/hl/status +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Status of the card: "Operational", "Malfunction", "In reset". + +What: /sys/class/habanalabs/hl/thermal_ver +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Version of the Device's thermal daemon + +What: /sys/class/habanalabs/hl/tpc_clk +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Allows the user to set the maximum clock frequency of the + TPC compute engines. Writes to this parameter affect the device + only when the power management profile is set to "manual" mode. + The device TPC clock might be set to lower value then the + maximum. The user should read the tpc_clk_curr to see the actual + frequency value of the TPC + +What: /sys/class/habanalabs/hl/tpc_clk_curr +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Displays the current clock frequency of the TPC compute engines + +What: /sys/class/habanalabs/hl/uboot_ver +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Version of the u-boot running on the device's CPU + +What: /sys/class/habanalabs/hl/write_open_cnt +Date: Jan 2019 +KernelVersion: 5.1 +Contact: oded.gabbay@gmail.com +Description: Displays the total number of user processes that are currently + opened on the device's file diff --git a/Documentation/ABI/testing/sysfs-fs-ext4 b/Documentation/ABI/testing/sysfs-fs-ext4 index c631253cf85c..78604db56279 100644 --- a/Documentation/ABI/testing/sysfs-fs-ext4 +++ b/Documentation/ABI/testing/sysfs-fs-ext4 @@ -109,3 +109,10 @@ Description: write operation (since a 4k random write might turn into a much larger write due to the zeroout operation). + +What: /sys/fs/ext4//journal_task +Date: February 2019 +Contact: "Theodore Ts'o" +Description: + This file is read-only and shows the pid of journal thread in + current pid-namespace or 0 if task is unreachable. diff --git a/Documentation/ABI/testing/sysfs-kernel-livepatch b/Documentation/ABI/testing/sysfs-kernel-livepatch index dac7e1e62a8b..85db352f68f9 100644 --- a/Documentation/ABI/testing/sysfs-kernel-livepatch +++ b/Documentation/ABI/testing/sysfs-kernel-livepatch @@ -33,18 +33,6 @@ Description: An attribute which indicates whether the patch is currently in transition. -What: /sys/kernel/livepatch//signal -Date: Nov 2017 -KernelVersion: 4.15.0 -Contact: live-patching@vger.kernel.org -Description: - A writable attribute that allows administrator to affect the - course of an existing transition. Writing 1 sends a fake - signal to all remaining blocking tasks. The fake signal - means that no proper signal is delivered (there is no data in - signal pending structures). Tasks are interrupted or woken up, - and forced to change their patched state. - What: /sys/kernel/livepatch//force Date: Nov 2017 KernelVersion: 4.15.0 diff --git a/Documentation/DMA-API-HOWTO.txt b/Documentation/DMA-API-HOWTO.txt index f0cc3f772265..1a721d0f35c8 100644 --- a/Documentation/DMA-API-HOWTO.txt +++ b/Documentation/DMA-API-HOWTO.txt @@ -146,114 +146,75 @@ What about block I/O and networking buffers? The block I/O and networking subsystems make sure that the buffers they use are valid for you to DMA from/to. -DMA addressing limitations +DMA addressing capabilities ========================== -Does your device have any DMA addressing limitations? For example, is -your device only capable of driving the low order 24-bits of address? -If so, you need to inform the kernel of this fact. +By default, the kernel assumes that your device can address 32-bits of DMA +addressing. For a 64-bit capable device, this needs to be increased, and for +a device with limitations, it needs to be decreased. -By default, the kernel assumes that your device can address the full -32-bits. For a 64-bit capable device, this needs to be increased. -And for a device with limitations, as discussed in the previous -paragraph, it needs to be decreased. +Special note about PCI: PCI-X specification requires PCI-X devices to support +64-bit addressing (DAC) for all transactions. And at least one platform (SGI +SN2) requires 64-bit consistent allocations to operate correctly when the IO +bus is in PCI-X mode. -Special note about PCI: PCI-X specification requires PCI-X devices to -support 64-bit addressing (DAC) for all transactions. And at least -one platform (SGI SN2) requires 64-bit consistent allocations to -operate correctly when the IO bus is in PCI-X mode. +For correct operation, you must set the DMA mask to inform the kernel about +your devices DMA addressing capabilities. -For correct operation, you must interrogate the kernel in your device -probe routine to see if the DMA controller on the machine can properly -support the DMA addressing limitation your device has. It is good -style to do this even if your device holds the default setting, -because this shows that you did think about these issues wrt. your -device. - -The query is performed via a call to dma_set_mask_and_coherent():: +This is performed via a call to dma_set_mask_and_coherent():: int dma_set_mask_and_coherent(struct device *dev, u64 mask); -which will query the mask for both streaming and coherent APIs together. -If you have some special requirements, then the following two separate -queries can be used instead: +which will set the mask for both streaming and coherent APIs together. If you +have some special requirements, then the following two separate calls can be +used instead: - The query for streaming mappings is performed via a call to + The setup for streaming mappings is performed via a call to dma_set_mask():: int dma_set_mask(struct device *dev, u64 mask); - The query for consistent allocations is performed via a call + The setup for consistent allocations is performed via a call to dma_set_coherent_mask():: int dma_set_coherent_mask(struct device *dev, u64 mask); -Here, dev is a pointer to the device struct of your device, and mask -is a bit mask describing which bits of an address your device -supports. It returns zero if your card can perform DMA properly on -the machine given the address mask you provided. In general, the -device struct of your device is embedded in the bus-specific device -struct of your device. For example, &pdev->dev is a pointer to the -device struct of a PCI device (pdev is a pointer to the PCI device -struct of your device). +Here, dev is a pointer to the device struct of your device, and mask is a bit +mask describing which bits of an address your device supports. Often the +device struct of your device is embedded in the bus-specific device struct of +your device. For example, &pdev->dev is a pointer to the device struct of a +PCI device (pdev is a pointer to the PCI device struct of your device). -If it returns non-zero, your device cannot perform DMA properly on -this platform, and attempting to do so will result in undefined -behavior. You must either use a different mask, or not use DMA. +These calls usually return zero to indicated your device can perform DMA +properly on the machine given the address mask you provided, but they might +return an error if the mask is too small to be supportable on the given +system. If it returns non-zero, your device cannot perform DMA properly on +this platform, and attempting to do so will result in undefined behavior. +You must not use DMA on this device unless the dma_set_mask family of +functions has returned success. -This means that in the failure case, you have three options: +This means that in the failure case, you have two options: -1) Use another DMA mask, if possible (see below). -2) Use some non-DMA mode for data transfer, if possible. -3) Ignore this device and do not initialize it. +1) Use some non-DMA mode for data transfer, if possible. +2) Ignore this device and do not initialize it. -It is recommended that your driver print a kernel KERN_WARNING message -when you end up performing either #2 or #3. In this manner, if a user -of your driver reports that performance is bad or that the device is not -even detected, you can ask them for the kernel messages to find out -exactly why. +It is recommended that your driver print a kernel KERN_WARNING message when +setting the DMA mask fails. In this manner, if a user of your driver reports +that performance is bad or that the device is not even detected, you can ask +them for the kernel messages to find out exactly why. -The standard 32-bit addressing device would do something like this:: +The standard 64-bit addressing device would do something like this:: - if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) { + if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) { dev_warn(dev, "mydev: No suitable DMA available\n"); goto ignore_this_device; } -Another common scenario is a 64-bit capable device. The approach here -is to try for 64-bit addressing, but back down to a 32-bit mask that -should not fail. The kernel may fail the 64-bit mask not because the -platform is not capable of 64-bit addressing. Rather, it may fail in -this case simply because 32-bit addressing is done more efficiently -than 64-bit addressing. For example, Sparc64 PCI SAC addressing is -more efficient than DAC addressing. - -Here is how you would handle a 64-bit capable device which can drive -all 64-bits when accessing streaming DMA:: - - int using_dac; +If the device only supports 32-bit addressing for descriptors in the +coherent allocations, but supports full 64-bits for streaming mappings +it would look like this: - if (!dma_set_mask(dev, DMA_BIT_MASK(64))) { - using_dac = 1; - } else if (!dma_set_mask(dev, DMA_BIT_MASK(32))) { - using_dac = 0; - } else { - dev_warn(dev, "mydev: No suitable DMA available\n"); - goto ignore_this_device; - } - -If a card is capable of using 64-bit consistent allocations as well, -the case would look like this:: - - int using_dac, consistent_using_dac; - - if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) { - using_dac = 1; - consistent_using_dac = 1; - } else if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) { - using_dac = 0; - consistent_using_dac = 0; - } else { + if (dma_set_mask(dev, DMA_BIT_MASK(64))) { dev_warn(dev, "mydev: No suitable DMA available\n"); goto ignore_this_device; } diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt index e133ccd60228..0076150fdccb 100644 --- a/Documentation/DMA-API.txt +++ b/Documentation/DMA-API.txt @@ -195,6 +195,14 @@ Requesting the required mask does not alter the current mask. If you wish to take advantage of it, you should issue a dma_set_mask() call to set the mask to the value returned. +:: + + size_t + dma_direct_max_mapping_size(struct device *dev); + +Returns the maximum size of a mapping for the device. The size parameter +of the mapping functions like dma_map_single(), dma_map_page() and +others should not be larger than the returned value. Part Id - Streaming DMA mappings -------------------------------- @@ -530,8 +538,8 @@ that simply cannot make consistent memory. dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) -Free memory allocated by the dma_alloc_attrs(). All parameters common -parameters must identical to those otherwise passed to dma_fre_coherent, +Free memory allocated by the dma_alloc_attrs(). All common +parameters must be identical to those otherwise passed to dma_free_coherent, and the attrs argument must be identical to the attrs passed to dma_alloc_attrs(). @@ -566,8 +574,7 @@ boundaries when doing this. int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, - dma_addr_t device_addr, size_t size, int - flags) + dma_addr_t device_addr, size_t size); Declare region of memory to be handed out by dma_alloc_coherent() when it's asked for coherent memory for this device. @@ -581,12 +588,6 @@ dma_addr_t in dma_alloc_coherent()). size is the size of the area (must be multiples of PAGE_SIZE). -flags can be ORed together and are: - -- DMA_MEMORY_EXCLUSIVE - only allocate memory from the declared regions. - Do not allow dma_alloc_coherent() to fall back to system memory when - it's out of memory in the declared region. - As a simplification for the platforms, only *one* such region of memory may be declared per device. @@ -605,23 +606,6 @@ unconditionally having removed all the required structures. It is the driver's job to ensure that no parts of this memory region are currently in use. -:: - - void * - dma_mark_declared_memory_occupied(struct device *dev, - dma_addr_t device_addr, size_t size) - -This is used to occupy specific regions of the declared space -(dma_alloc_coherent() will hand out the first free region it finds). - -device_addr is the *device* address of the region requested. - -size is the size (and should be a page-sized multiple). - -The return value will be either a pointer to the processor virtual -address of the memory, or an error (via PTR_ERR()) if any part of the -region is occupied. - Part III - Debug drivers use of the DMA-API ------------------------------------------- @@ -696,6 +680,9 @@ dma-api/disabled This read-only file contains the character 'Y' happen when it runs out of memory or if it was disabled at boot time +dma-api/dump This read-only file contains current DMA + mappings. + dma-api/error_count This file is read-only and shows the total numbers of errors found. @@ -717,7 +704,7 @@ dma-api/num_free_entries The current number of free dma_debug_entries dma-api/nr_total_entries The total number of dma_debug_entries in the allocator, both free and used. -dma-api/driver-filter You can write a name of a driver into this file +dma-api/driver_filter You can write a name of a driver into this file to limit the debug output to requests from that particular driver. Write an empty string to that file to disable the filter and see diff --git a/Documentation/DMA-ISA-LPC.txt b/Documentation/DMA-ISA-LPC.txt index 8c2b8be6e45b..b1ec7b16c21f 100644 --- a/Documentation/DMA-ISA-LPC.txt +++ b/Documentation/DMA-ISA-LPC.txt @@ -52,8 +52,8 @@ Address translation ------------------- To translate the virtual address to a bus address, use the normal DMA -API. Do _not_ use isa_virt_to_phys() even though it does the same -thing. The reason for this is that the function isa_virt_to_phys() +API. Do _not_ use isa_virt_to_bus() even though it does the same +thing. The reason for this is that the function isa_virt_to_bus() will require a Kconfig dependency to ISA, not just ISA_DMA_API which is really all you need. Remember that even though the DMA controller has its origins in ISA it is used elsewhere. diff --git a/Documentation/RCU/Design/Expedited-Grace-Periods/ExpSchedFlow.svg b/Documentation/RCU/Design/Expedited-Grace-Periods/ExpSchedFlow.svg index e4233ac93c2b..6189ffcc6aff 100644 --- a/Documentation/RCU/Design/Expedited-Grace-Periods/ExpSchedFlow.svg +++ b/Documentation/RCU/Design/Expedited-Grace-Periods/ExpSchedFlow.svg @@ -328,13 +328,13 @@ inkscape:window-height="1148" id="namedview90" showgrid="true" - inkscape:zoom="0.80021373" - inkscape:cx="462.49289" - inkscape:cy="473.6718" + inkscape:zoom="0.69092787" + inkscape:cx="476.34085" + inkscape:cy="712.80957" inkscape:window-x="770" inkscape:window-y="24" inkscape:window-maximized="0" - inkscape:current-layer="g4114-9-3-9" + inkscape:current-layer="g4" inkscape:snap-grids="false" fit-margin-top="5" fit-margin-right="5" @@ -813,14 +813,18 @@ Requestreched_cpu() + id="tspan3100">context switch diff --git a/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html b/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html index 8e4f873b979f..19e7a5fb6b73 100644 --- a/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html +++ b/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html @@ -72,10 +72,10 @@ will ignore it because idle and offline CPUs are already residing in quiescent states. Otherwise, the expedited grace period will use smp_call_function_single() to send the CPU an IPI, which -is handled by sync_rcu_exp_handler(). +is handled by rcu_exp_handler().

-However, because this is preemptible RCU, sync_rcu_exp_handler() +However, because this is preemptible RCU, rcu_exp_handler() can check to see if the CPU is currently running in an RCU read-side critical section. If not, the handler can immediately report a quiescent state. @@ -145,19 +145,18 @@ expedited grace period is shown in the following diagram:

ExpSchedFlow.svg

-As with RCU-preempt's synchronize_rcu_expedited(), +As with RCU-preempt, RCU-sched's synchronize_sched_expedited() ignores offline and idle CPUs, again because they are in remotely detectable quiescent states. -However, the synchronize_rcu_expedited() handler -is sync_sched_exp_handler(), and because the +However, because the rcu_read_lock_sched() and rcu_read_unlock_sched() leave no trace of their invocation, in general it is not possible to tell whether or not the current CPU is in an RCU read-side critical section. -The best that sync_sched_exp_handler() can do is to check +The best that RCU-sched's rcu_exp_handler() can do is to check for idle, on the off-chance that the CPU went idle while the IPI was in flight. -If the CPU is idle, then sync_sched_exp_handler() reports +If the CPU is idle, then rcu_exp_handler() reports the quiescent state.

Otherwise, the handler forces a future context switch by setting the @@ -298,19 +297,18 @@ Instead, the task pushing the grace period forward will include the idle CPUs in the mask passed to rcu_report_exp_cpu_mult().

-For RCU-sched, there is an additional check for idle in the IPI -handler, sync_sched_exp_handler(). +For RCU-sched, there is an additional check: If the IPI has interrupted the idle loop, then -sync_sched_exp_handler() invokes rcu_report_exp_rdp() +rcu_exp_handler() invokes rcu_report_exp_rdp() to report the corresponding quiescent state.

For RCU-preempt, there is no specific check for idle in the -IPI handler (sync_rcu_exp_handler()), but because +IPI handler (rcu_exp_handler()), but because RCU read-side critical sections are not permitted within the -idle loop, if sync_rcu_exp_handler() sees that the CPU is within +idle loop, if rcu_exp_handler() sees that the CPU is within RCU read-side critical section, the CPU cannot possibly be idle. -Otherwise, sync_rcu_exp_handler() invokes +Otherwise, rcu_exp_handler() invokes rcu_report_exp_rdp() to report the corresponding quiescent state, regardless of whether or not that quiescent state was due to the CPU being idle. @@ -625,6 +623,8 @@ checks, but only during the mid-boot dead zone.

With this refinement, synchronous grace periods can now be used from task context pretty much any time during the life of the kernel. +That is, aside from some points in the suspend, hibernate, or shutdown +code path.

Summary

diff --git a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html index e4d94fba6c89..8d21af02b1f0 100644 --- a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html +++ b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html @@ -485,13 +485,13 @@ section that the grace period must wait on. noted by rcu_node_context_switch() on the left. On the other hand, if the CPU takes a scheduler-clock interrupt while executing in usermode, a quiescent state will be noted by -rcu_check_callbacks() on the right. +rcu_sched_clock_irq() on the right. Either way, the passage through a quiescent state will be noted in a per-CPU variable.

The next time an RCU_SOFTIRQ handler executes on this CPU (for example, after the next scheduler-clock -interrupt), __rcu_process_callbacks() will invoke +interrupt), rcu_core() will invoke rcu_check_quiescent_state(), which will notice the recorded quiescent state, and invoke rcu_report_qs_rdp(). @@ -651,7 +651,7 @@ to end. These callbacks are identified by rcu_advance_cbs(), which is usually invoked by __note_gp_changes(). As shown in the diagram below, this invocation can be triggered by -the scheduling-clock interrupt (rcu_check_callbacks() on +the scheduling-clock interrupt (rcu_sched_clock_irq() on the left) or by idle entry (rcu_cleanup_after_idle() on the right, but only for kernels build with CONFIG_RCU_FAST_NO_HZ=y). diff --git a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-callback-invocation.svg b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-callback-invocation.svg index 832408313d93..3fcf0c17cef2 100644 --- a/Documentation/RCU/Design/Memory-Ordering/TreeRCU-callback-invocation.svg +++ b/Documentation/RCU/Design/Memory-Ordering/TreeRCU-callback-invocation.svg @@ -349,7 +349,7 @@ font-weight="bold" font-size="192" id="text202-7-5" - style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_check_callbacks() + style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_sched_clock_irq() rcu_check_callbacks() + xml:space="preserve">rcu_sched_clock_irq() rcu_process_callbacks() + xml:space="preserve">rcu_core() rcu_check_quiescent_state()) + xml:space="preserve">rcu_check_quiescent_state() rcu_check_callbacks() + style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_sched_clock_irq() rcu_check_callbacks() + xml:space="preserve">rcu_sched_clock_irq() rcu_process_callbacks() + xml:space="preserve">rcu_core() rcu_check_quiescent_state()) + xml:space="preserve">rcu_check_quiescent_state() srcu_read_unlock(), guarantees a full memory barrier.

-Also unlike other RCU flavors, SRCU's callbacks-wait function -srcu_barrier() may be invoked from CPU-hotplug notifiers, -though this is not necessarily a good idea. -The reason that this is possible is that SRCU is insensitive -to whether or not a CPU is online, which means that srcu_barrier() -need not exclude CPU-hotplug operations. +Also unlike other RCU flavors, synchronize_srcu() may not +be invoked from CPU-hotplug notifiers, due to the fact that SRCU grace +periods make use of timers and the possibility of timers being temporarily +“stranded” on the outgoing CPU. +This stranding of timers means that timers posted to the outgoing CPU +will not fire until late in the CPU-hotplug process. +The problem is that if a notifier is waiting on an SRCU grace period, +that grace period is waiting on a timer, and that timer is stranded on the +outgoing CPU, then the notifier will never be awakened, in other words, +deadlock has occurred. +This same situation of course also prohibits srcu_barrier() +from being invoked from CPU-hotplug notifiers.

SRCU also differs from other RCU flavors in that SRCU's expedited and diff --git a/Documentation/RCU/lockdep-splat.txt b/Documentation/RCU/lockdep-splat.txt index 238e9f61352f..9c015976b174 100644 --- a/Documentation/RCU/lockdep-splat.txt +++ b/Documentation/RCU/lockdep-splat.txt @@ -14,9 +14,9 @@ being the real world and all that. So let's look at an example RCU lockdep splat from 3.0-rc5, one that has long since been fixed: -=============================== -[ INFO: suspicious RCU usage. ] -------------------------------- +============================= +WARNING: suspicious RCU usage +----------------------------- block/cfq-iosched.c:2776 suspicious rcu_dereference_protected() usage! other info that might help us debug this: @@ -24,11 +24,11 @@ other info that might help us debug this: rcu_scheduler_active = 1, debug_locks = 0 3 locks held by scsi_scan_6/1552: - #0: (&shost->scan_mutex){+.+.+.}, at: [] + #0: (&shost->scan_mutex){+.+.}, at: [] scsi_scan_host_selected+0x5a/0x150 - #1: (&eq->sysfs_lock){+.+...}, at: [] + #1: (&eq->sysfs_lock){+.+.}, at: [] elevator_exit+0x22/0x60 - #2: (&(&q->__queue_lock)->rlock){-.-...}, at: [] + #2: (&(&q->__queue_lock)->rlock){-.-.}, at: [] cfq_exit_queue+0x43/0x190 stack backtrace: diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt index 073dbc12d1ea..1ab70c37921f 100644 --- a/Documentation/RCU/stallwarn.txt +++ b/Documentation/RCU/stallwarn.txt @@ -219,17 +219,18 @@ an estimate of the total number of RCU callbacks queued across all CPUs In kernels with CONFIG_RCU_FAST_NO_HZ, more information is printed for each CPU: - 0: (64628 ticks this GP) idle=dd5/3fffffffffffffff/0 softirq=82/543 last_accelerate: a345/d342 nonlazy_posted: 25 .D + 0: (64628 ticks this GP) idle=dd5/3fffffffffffffff/0 softirq=82/543 last_accelerate: a345/d342 Nonlazy posted: ..D The "last_accelerate:" prints the low-order 16 bits (in hex) of the jiffies counter when this CPU last invoked rcu_try_advance_all_cbs() from rcu_needs_cpu() or last invoked rcu_accelerate_cbs() from -rcu_prepare_for_idle(). The "nonlazy_posted:" prints the number -of non-lazy callbacks posted since the last call to rcu_needs_cpu(). -Finally, an "L" indicates that there are currently no non-lazy callbacks -("." is printed otherwise, as shown above) and "D" indicates that -dyntick-idle processing is enabled ("." is printed otherwise, for example, -if disabled via the "nohz=" kernel boot parameter). +rcu_prepare_for_idle(). The "Nonlazy posted:" indicates lazy-callback +status, so that an "l" indicates that all callbacks were lazy at the start +of the last idle period and an "L" indicates that there are currently +no non-lazy callbacks (in both cases, "." is printed otherwise, as +shown above) and "D" indicates that dyntick-idle processing is enabled +("." is printed otherwise, for example, if disabled via the "nohz=" +kernel boot parameter). If the grace period ends just as the stall warning starts printing, there will be a spurious stall-warning message, which will include diff --git a/Documentation/RCU/torture.txt b/Documentation/RCU/torture.txt index 55918b54808b..a41a0384d20c 100644 --- a/Documentation/RCU/torture.txt +++ b/Documentation/RCU/torture.txt @@ -10,173 +10,8 @@ status messages via printk(), which can be examined via the dmesg command (perhaps grepping for "torture"). The test is started when the module is loaded, and stops when the module is unloaded. - -MODULE PARAMETERS - -This module has the following parameters: - -fqs_duration Duration (in microseconds) of artificially induced bursts - of force_quiescent_state() invocations. In RCU - implementations having force_quiescent_state(), these - bursts help force races between forcing a given grace - period and that grace period ending on its own. - -fqs_holdoff Holdoff time (in microseconds) between consecutive calls - to force_quiescent_state() within a burst. - -fqs_stutter Wait time (in seconds) between consecutive bursts - of calls to force_quiescent_state(). - -gp_normal Make the fake writers use normal synchronous grace-period - primitives. - -gp_exp Make the fake writers use expedited synchronous grace-period - primitives. If both gp_normal and gp_exp are set, or - if neither gp_normal nor gp_exp are set, then randomly - choose the primitive so that about 50% are normal and - 50% expedited. By default, neither are set, which - gives best overall test coverage. - -irqreader Says to invoke RCU readers from irq level. This is currently - done via timers. Defaults to "1" for variants of RCU that - permit this. (Or, more accurately, variants of RCU that do - -not- permit this know to ignore this variable.) - -n_barrier_cbs If this is nonzero, RCU barrier testing will be conducted, - in which case n_barrier_cbs specifies the number of - RCU callbacks (and corresponding kthreads) to use for - this testing. The value cannot be negative. If you - specify this to be non-zero when torture_type indicates a - synchronous RCU implementation (one for which a member of - the synchronize_rcu() rather than the call_rcu() family is - used -- see the documentation for torture_type below), an - error will be reported and no testing will be carried out. - -nfakewriters This is the number of RCU fake writer threads to run. Fake - writer threads repeatedly use the synchronous "wait for - current readers" function of the interface selected by - torture_type, with a delay between calls to allow for various - different numbers of writers running in parallel. - nfakewriters defaults to 4, which provides enough parallelism - to trigger special cases caused by multiple writers, such as - the synchronize_srcu() early return optimization. - -nreaders This is the number of RCU reading threads supported. - The default is twice the number of CPUs. Why twice? - To properly exercise RCU implementations with preemptible - read-side critical sections. - -onoff_interval - The number of seconds between each attempt to execute a - randomly selected CPU-hotplug operation. Defaults to - zero, which disables CPU hotplugging. In HOTPLUG_CPU=n - kernels, rcutorture will silently refuse to do any - CPU-hotplug operations regardless of what value is - specified for onoff_interval. - -onoff_holdoff The number of seconds to wait until starting CPU-hotplug - operations. This would normally only be used when - rcutorture was built into the kernel and started - automatically at boot time, in which case it is useful - in order to avoid confusing boot-time code with CPUs - coming and going. - -shuffle_interval - The number of seconds to keep the test threads affinitied - to a particular subset of the CPUs, defaults to 3 seconds. - Used in conjunction with test_no_idle_hz. - -shutdown_secs The number of seconds to run the test before terminating - the test and powering off the system. The default is - zero, which disables test termination and system shutdown. - This capability is useful for automated testing. - -stall_cpu The number of seconds that a CPU should be stalled while - within both an rcu_read_lock() and a preempt_disable(). - This stall happens only once per rcutorture run. - If you need multiple stalls, use modprobe and rmmod to - repeatedly run rcutorture. The default for stall_cpu - is zero, which prevents rcutorture from stalling a CPU. - - Note that attempts to rmmod rcutorture while the stall - is ongoing will hang, so be careful what value you - choose for this module parameter! In addition, too-large - values for stall_cpu might well induce failures and - warnings in other parts of the kernel. You have been - warned! - -stall_cpu_holdoff - The number of seconds to wait after rcutorture starts - before stalling a CPU. Defaults to 10 seconds. - -stat_interval The number of seconds between output of torture - statistics (via printk()). Regardless of the interval, - statistics are printed when the module is unloaded. - Setting the interval to zero causes the statistics to - be printed -only- when the module is unloaded, and this - is the default. - -stutter The length of time to run the test before pausing for this - same period of time. Defaults to "stutter=5", so as - to run and pause for (roughly) five-second intervals. - Specifying "stutter=0" causes the test to run continuously - without pausing, which is the old default behavior. - -test_boost Whether or not to test the ability of RCU to do priority - boosting. Defaults to "test_boost=1", which performs - RCU priority-inversion testing only if the selected - RCU implementation supports priority boosting. Specifying - "test_boost=0" never performs RCU priority-inversion - testing. Specifying "test_boost=2" performs RCU - priority-inversion testing even if the selected RCU - implementation does not support RCU priority boosting, - which can be used to test rcutorture's ability to - carry out RCU priority-inversion testing. - -test_boost_interval - The number of seconds in an RCU priority-inversion test - cycle. Defaults to "test_boost_interval=7". It is - usually wise for this value to be relatively prime to - the value selected for "stutter". - -test_boost_duration - The number of seconds to do RCU priority-inversion testing - within any given "test_boost_interval". Defaults to - "test_boost_duration=4". - -test_no_idle_hz Whether or not to test the ability of RCU to operate in - a kernel that disables the scheduling-clock interrupt to - idle CPUs. Boolean parameter, "1" to test, "0" otherwise. - Defaults to omitting this test. - -torture_type The type of RCU to test, with string values as follows: - - "rcu": rcu_read_lock(), rcu_read_unlock() and call_rcu(), - along with expedited, synchronous, and polling - variants. - - "rcu_bh": rcu_read_lock_bh(), rcu_read_unlock_bh(), and - call_rcu_bh(), along with expedited and synchronous - variants. - - "rcu_busted": This tests an intentionally incorrect version - of RCU in order to help test rcutorture itself. - - "srcu": srcu_read_lock(), srcu_read_unlock() and - call_srcu(), along with expedited and - synchronous variants. - - "sched": preempt_disable(), preempt_enable(), and - call_rcu_sched(), along with expedited, - synchronous, and polling variants. - - "tasks": voluntary context switch and call_rcu_tasks(), - along with expedited and synchronous variants. - - Defaults to "rcu". - -verbose Enable debug printk()s. Default is disabled. - +Module parameters are prefixed by "rcutorture." in +Documentation/admin-guide/kernel-parameters.txt. OUTPUT diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt index 4a6854318b17..1ace20815bb1 100644 --- a/Documentation/RCU/whatisRCU.txt +++ b/Documentation/RCU/whatisRCU.txt @@ -302,7 +302,7 @@ rcu_dereference() must prohibit. The rcu_dereference_protected() variant takes a lockdep expression to indicate which locks must be acquired by the caller. If the indicated protection is not provided, - a lockdep splat is emitted. See RCU/Design/Requirements.html + a lockdep splat is emitted. See RCU/Design/Requirements/Requirements.html and the API's code comments for more details and example usage. The following diagram shows how each API communicates among the @@ -560,7 +560,7 @@ presents two such "toy" implementations of RCU, one that is implemented in terms of familiar locking primitives, and another that more closely resembles "classic" RCU. Both are way too simple for real-world use, lacking both functionality and performance. However, they are useful -in getting a feel for how RCU works. See kernel/rcupdate.c for a +in getting a feel for how RCU works. See kernel/rcu/update.c for a production-quality implementation, and see: http://www.rdrop.com/users/paulmck/RCU diff --git a/Documentation/acpi/initrd_table_override.txt b/Documentation/acpi/initrd_table_override.txt index eb651a6aa285..30437a6db373 100644 --- a/Documentation/acpi/initrd_table_override.txt +++ b/Documentation/acpi/initrd_table_override.txt @@ -14,6 +14,10 @@ upgrade the ACPI execution environment that is defined by the ACPI tables via upgrading the ACPI tables provided by the BIOS with an instrumented, modified, more recent version one, or installing brand new ACPI tables. +When building initrd with kernel in a single image, option +ACPI_TABLE_OVERRIDE_VIA_BUILTIN_INITRD should also be true for this +feature to work. + For a full list of ACPI tables that can be upgraded/installed, take a look at the char *table_sigs[MAX_ACPI_SIGNATURE]; definition in drivers/acpi/tables.c. diff --git a/Documentation/admin-guide/LSM/SafeSetID.rst b/Documentation/admin-guide/LSM/SafeSetID.rst new file mode 100644 index 000000000000..212434ef65ad --- /dev/null +++ b/Documentation/admin-guide/LSM/SafeSetID.rst @@ -0,0 +1,107 @@ +========= +SafeSetID +========= +SafeSetID is an LSM module that gates the setid family of syscalls to restrict +UID/GID transitions from a given UID/GID to only those approved by a +system-wide whitelist. These restrictions also prohibit the given UIDs/GIDs +from obtaining auxiliary privileges associated with CAP_SET{U/G}ID, such as +allowing a user to set up user namespace UID mappings. + + +Background +========== +In absence of file capabilities, processes spawned on a Linux system that need +to switch to a different user must be spawned with CAP_SETUID privileges. +CAP_SETUID is granted to programs running as root or those running as a non-root +user that have been explicitly given the CAP_SETUID runtime capability. It is +often preferable to use Linux runtime capabilities rather than file +capabilities, since using file capabilities to run a program with elevated +privileges opens up possible security holes since any user with access to the +file can exec() that program to gain the elevated privileges. + +While it is possible to implement a tree of processes by giving full +CAP_SET{U/G}ID capabilities, this is often at odds with the goals of running a +tree of processes under non-root user(s) in the first place. Specifically, +since CAP_SETUID allows changing to any user on the system, including the root +user, it is an overpowered capability for what is needed in this scenario, +especially since programs often only call setuid() to drop privileges to a +lesser-privileged user -- not elevate privileges. Unfortunately, there is no +generally feasible way in Linux to restrict the potential UIDs that a user can +switch to through setuid() beyond allowing a switch to any user on the system. +This SafeSetID LSM seeks to provide a solution for restricting setid +capabilities in such a way. + +The main use case for this LSM is to allow a non-root program to transition to +other untrusted uids without full blown CAP_SETUID capabilities. The non-root +program would still need CAP_SETUID to do any kind of transition, but the +additional restrictions imposed by this LSM would mean it is a "safer" version +of CAP_SETUID since the non-root program cannot take advantage of CAP_SETUID to +do any unapproved actions (e.g. setuid to uid 0 or create/enter new user +namespace). The higher level goal is to allow for uid-based sandboxing of system +services without having to give out CAP_SETUID all over the place just so that +non-root programs can drop to even-lesser-privileged uids. This is especially +relevant when one non-root daemon on the system should be allowed to spawn other +processes as different uids, but its undesirable to give the daemon a +basically-root-equivalent CAP_SETUID. + + +Other Approaches Considered +=========================== + +Solve this problem in userspace +------------------------------- +For candidate applications that would like to have restricted setid capabilities +as implemented in this LSM, an alternative option would be to simply take away +setid capabilities from the application completely and refactor the process +spawning semantics in the application (e.g. by using a privileged helper program +to do process spawning and UID/GID transitions). Unfortunately, there are a +number of semantics around process spawning that would be affected by this, such +as fork() calls where the program doesn???t immediately call exec() after the +fork(), parent processes specifying custom environment variables or command line +args for spawned child processes, or inheritance of file handles across a +fork()/exec(). Because of this, as solution that uses a privileged helper in +userspace would likely be less appealing to incorporate into existing projects +that rely on certain process-spawning semantics in Linux. + +Use user namespaces +------------------- +Another possible approach would be to run a given process tree in its own user +namespace and give programs in the tree setid capabilities. In this way, +programs in the tree could change to any desired UID/GID in the context of their +own user namespace, and only approved UIDs/GIDs could be mapped back to the +initial system user namespace, affectively preventing privilege escalation. +Unfortunately, it is not generally feasible to use user namespaces in isolation, +without pairing them with other namespace types, which is not always an option. +Linux checks for capabilities based off of the user namespace that ???owns??? some +entity. For example, Linux has the notion that network namespaces are owned by +the user namespace in which they were created. A consequence of this is that +capability checks for access to a given network namespace are done by checking +whether a task has the given capability in the context of the user namespace +that owns the network namespace -- not necessarily the user namespace under +which the given task runs. Therefore spawning a process in a new user namespace +effectively prevents it from accessing the network namespace owned by the +initial namespace. This is a deal-breaker for any application that expects to +retain the CAP_NET_ADMIN capability for the purpose of adjusting network +configurations. Using user namespaces in isolation causes problems regarding +other system interactions, including use of pid namespaces and device creation. + +Use an existing LSM +------------------- +None of the other in-tree LSMs have the capability to gate setid transitions, or +even employ the security_task_fix_setuid hook at all. SELinux says of that hook: +"Since setuid only affects the current process, and since the SELinux controls +are not based on the Linux identity attributes, SELinux does not need to control +this operation." + + +Directions for use +================== +This LSM hooks the setid syscalls to make sure transitions are allowed if an +applicable restriction policy is in place. Policies are configured through +securityfs by writing to the safesetid/add_whitelist_policy and +safesetid/flush_whitelist_policies files at the location where securityfs is +mounted. The format for adding a policy is ':', using literal +numbers, such as '123:456'. To flush the policies, any write to the file is +sufficient. Again, configuring a policy for a UID will prevent that UID from +obtaining auxiliary setid privileges, such as allowing a user to set up user +namespace UID mappings. diff --git a/Documentation/admin-guide/LSM/index.rst b/Documentation/admin-guide/LSM/index.rst index c980dfe9abf1..a6ba95fbaa9f 100644 --- a/Documentation/admin-guide/LSM/index.rst +++ b/Documentation/admin-guide/LSM/index.rst @@ -17,9 +17,8 @@ MAC extensions, other extensions can be built using the LSM to provide specific changes to system operation when these tweaks are not available in the core functionality of Linux itself. -Without a specific LSM built into the kernel, the default LSM will be the -Linux capabilities system. Most LSMs choose to extend the capabilities -system, building their checks on top of the defined capability hooks. +The Linux capabilities modules will always be included. This may be +followed by any number of "minor" modules and at most one "major" module. For more details on capabilities, see ``capabilities(7)`` in the Linux man-pages project. @@ -30,6 +29,14 @@ order in which checks are made. The capability module will always be first, followed by any "minor" modules (e.g. Yama) and then the one "major" module (e.g. SELinux) if there is one configured. +Process attributes associated with "major" security modules should +be accessed and maintained using the special files in ``/proc/.../attr``. +A security module may maintain a module specific subdirectory there, +named after the module. ``/proc/.../attr/smack`` is provided by the Smack +security module and contains all its special files. The files directly +in ``/proc/.../attr`` remain as legacy interfaces for modules that provide +subdirectories. + .. toctree:: :maxdepth: 1 @@ -39,3 +46,4 @@ the one "major" module (e.g. SELinux) if there is one configured. Smack tomoyo Yama + SafeSetID diff --git a/Documentation/admin-guide/README.rst b/Documentation/admin-guide/README.rst index 0797eec76be1..a582c780c3bd 100644 --- a/Documentation/admin-guide/README.rst +++ b/Documentation/admin-guide/README.rst @@ -1,9 +1,9 @@ .. _readme: -Linux kernel release 4.x +Linux kernel release 5.x ============================================= -These are the release notes for Linux version 4. Read them carefully, +These are the release notes for Linux version 5. Read them carefully, as they tell you what this is all about, explain how to install the kernel, and what to do if something goes wrong. @@ -63,7 +63,7 @@ Installing the kernel source directory where you have permissions (e.g. your home directory) and unpack it:: - xz -cd linux-4.X.tar.xz | tar xvf - + xz -cd linux-5.x.tar.xz | tar xvf - Replace "X" with the version number of the latest kernel. @@ -72,26 +72,26 @@ Installing the kernel source files. They should match the library, and not get messed up by whatever the kernel-du-jour happens to be. - - You can also upgrade between 4.x releases by patching. Patches are + - You can also upgrade between 5.x releases by patching. Patches are distributed in the xz format. To install by patching, get all the newer patch files, enter the top level directory of the kernel source - (linux-4.X) and execute:: + (linux-5.x) and execute:: - xz -cd ../patch-4.x.xz | patch -p1 + xz -cd ../patch-5.x.xz | patch -p1 - Replace "x" for all versions bigger than the version "X" of your current + Replace "x" for all versions bigger than the version "x" of your current source tree, **in_order**, and you should be ok. You may want to remove the backup files (some-file-name~ or some-file-name.orig), and make sure that there are no failed patches (some-file-name# or some-file-name.rej). If there are, either you or I have made a mistake. - Unlike patches for the 4.x kernels, patches for the 4.x.y kernels + Unlike patches for the 5.x kernels, patches for the 5.x.y kernels (also known as the -stable kernels) are not incremental but instead apply - directly to the base 4.x kernel. For example, if your base kernel is 4.0 - and you want to apply the 4.0.3 patch, you must not first apply the 4.0.1 - and 4.0.2 patches. Similarly, if you are running kernel version 4.0.2 and - want to jump to 4.0.3, you must first reverse the 4.0.2 patch (that is, - patch -R) **before** applying the 4.0.3 patch. You can read more on this in + directly to the base 5.x kernel. For example, if your base kernel is 5.0 + and you want to apply the 5.0.3 patch, you must not first apply the 5.0.1 + and 5.0.2 patches. Similarly, if you are running kernel version 5.0.2 and + want to jump to 5.0.3, you must first reverse the 5.0.2 patch (that is, + patch -R) **before** applying the 5.0.3 patch. You can read more on this in :ref:`Documentation/process/applying-patches.rst `. Alternatively, the script patch-kernel can be used to automate this @@ -114,7 +114,7 @@ Installing the kernel source Software requirements --------------------- - Compiling and running the 4.x kernels requires up-to-date + Compiling and running the 5.x kernels requires up-to-date versions of various software packages. Consult :ref:`Documentation/process/changes.rst ` for the minimum version numbers required and how to get updates for these packages. Beware that using @@ -132,12 +132,12 @@ Build directory for the kernel place for the output files (including .config). Example:: - kernel source code: /usr/src/linux-4.X + kernel source code: /usr/src/linux-5.x build directory: /home/name/build/kernel To configure and build the kernel, use:: - cd /usr/src/linux-4.X + cd /usr/src/linux-5.x make O=/home/name/build/kernel menuconfig make O=/home/name/build/kernel sudo make O=/home/name/build/kernel modules_install install @@ -251,7 +251,7 @@ Configuring the kernel Compiling the kernel -------------------- - - Make sure you have at least gcc 3.2 available. + - Make sure you have at least gcc 4.6 available. For more information, refer to :ref:`Documentation/process/changes.rst `. Please note that you can still run a.out user programs with this kernel. diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index 7bf3f129c68b..20f92c16ffbf 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -1189,6 +1189,10 @@ PAGE_SIZE multiple when read back. Amount of cached filesystem data that was modified and is currently being written back to disk + anon_thp + Amount of memory used in anonymous mappings backed by + transparent hugepages + inactive_anon, active_anon, inactive_file, active_file, unevictable Amount of memory, swap-backed and filesystem-backed, on the internal memory management lists used by the @@ -1248,6 +1252,18 @@ PAGE_SIZE multiple when read back. Amount of reclaimed lazyfree pages + thp_fault_alloc + + Number of transparent hugepages which were allocated to satisfy + a page fault, including COW faults. This counter is not present + when CONFIG_TRANSPARENT_HUGEPAGE is not set. + + thp_collapse_alloc + + Number of transparent hugepages which were allocated to allow + collapsing an existing range of pages. This counter is not + present when CONFIG_TRANSPARENT_HUGEPAGE is not set. + memory.swap.current A read-only single value file which exists on non-root cgroups. @@ -1503,7 +1519,7 @@ protected workload. The limits are only applied at the peer level in the hierarchy. This means that in the diagram below, only groups A, B, and C will influence each other, and -groups D and F will influence each other. Group G will influence nobody. +groups D and F will influence each other. Group G will influence nobody:: [root] / | \ diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index b799bcf67d7b..2b8ee90bb644 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -461,6 +461,11 @@ possible to determine what the correct size should be. This option provides an override for these situations. + carrier_timeout= + [NET] Specifies amount of time (in seconds) that + the kernel should wait for a network carrier. By default + it waits 120 seconds. + ca_keys= [KEYS] This parameter identifies a specific key(s) on the system trusted keyring to be used for certificate trust validation. @@ -910,6 +915,10 @@ The filter can be disabled or changed to another driver later using sysfs. + driver_async_probe= [KNL] + List of driver names to be probed asynchronously. + Format: ,... + drm.edid_firmware=[:][,[:]] Broken monitors, graphic adapters, KVMs and EDIDless panels may send no or incorrect EDID data sets. @@ -1073,9 +1082,15 @@ specified address. The serial port must already be setup and configured. Options are not yet supported. + efifb,[options] + Start an early, unaccelerated console on the EFI + memory mapped framebuffer (if available). On cache + coherent non-x86 systems that use system memory for + the framebuffer, pass the 'ram' option so that it is + mapped with the correct attributes. + earlyprintk= [X86,SH,ARM,M68k,S390] earlyprintk=vga - earlyprintk=efi earlyprintk=sclp earlyprintk=xen earlyprintk=serial[,ttySn[,baudrate]] @@ -1182,9 +1197,10 @@ arch/x86/kernel/cpu/cpufreq/elanfreq.c. elevator= [IOSCHED] - Format: {"cfq" | "deadline" | "noop"} - See Documentation/block/cfq-iosched.txt and - Documentation/block/deadline-iosched.txt for details. + Format: { "mq-deadline" | "kyber" | "bfq" } + See Documentation/block/deadline-iosched.txt, + Documentation/block/kyber-iosched.txt and + Documentation/block/bfq-iosched.txt for details. elfcorehdr=[size[KMG]@]offset[KMG] [IA64,PPC,SH,X86,S390] Specifies physical address of start of kernel core @@ -1696,12 +1712,11 @@ By default, super page will be supported if Intel IOMMU has the capability. With this option, super page will not be supported. - sm_off [Default Off] - By default, scalable mode will be supported if the + sm_on [Default Off] + By default, scalable mode will be disabled even if the hardware advertises that it has support for the scalable mode translation. With this option set, scalable mode - will not be used even on hardware which claims to support - it. + will be used on hardware which claims to support it. tboot_noforce [Default Off] Do not force the Intel IOMMU enabled under tboot. By default, tboot will force Intel IOMMU on, which @@ -1831,6 +1846,11 @@ to let secondary kernels in charge of setting up LPIs. + irqchip.gicv3_pseudo_nmi= [ARM64] + Enables support for pseudo-NMIs in the kernel. This + requires the kernel to be built with + CONFIG_ARM64_PSEUDO_NMI. + irqfixup [HW] When an interrupt is not handled search all handlers for it. Intended to get systems with badly broken @@ -1982,6 +2002,12 @@ Built with CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y, the default is off. + kpti= [ARM64] Control page table isolation of user + and kernel address spaces. + Default: enabled on cores which need mitigation. + 0: force disabled + 1: force enabled + kvm.ignore_msrs=[KVM] Ignore guest accesses to unhandled MSRs. Default is 0 (don't ignore, but inject #GP) @@ -2319,6 +2345,10 @@ lsm.debug [SECURITY] Enable LSM initialization debugging output. + lsm=lsm1,...,lsmN + [SECURITY] Choose order of LSM initialization. This + overrides CONFIG_LSM, and the "security=" parameter. + machvec= [IA-64] Force the use of a particular machine-vector (machvec) in a generic kernel. Example: machvec=hpzx1_swiotlb @@ -3654,19 +3684,6 @@ latencies, which will choose a value aligned with the appropriate hardware boundaries. - rcutree.jiffies_till_sched_qs= [KNL] - Set required age in jiffies for a - given grace period before RCU starts - soliciting quiescent-state help from - rcu_note_context_switch(). If not specified, the - kernel will calculate a value based on the most - recent settings of rcutree.jiffies_till_first_fqs - and rcutree.jiffies_till_next_fqs. - This calculated value may be viewed in - rcutree.jiffies_to_sched_qs. Any attempt to - set rcutree.jiffies_to_sched_qs will be - cheerfully overwritten. - rcutree.jiffies_till_first_fqs= [KNL] Set delay from grace-period initialization to first attempt to force quiescent states. @@ -3678,6 +3695,20 @@ quiescent states. Units are jiffies, minimum value is one, and maximum value is HZ. + rcutree.jiffies_till_sched_qs= [KNL] + Set required age in jiffies for a + given grace period before RCU starts + soliciting quiescent-state help from + rcu_note_context_switch() and cond_resched(). + If not specified, the kernel will calculate + a value based on the most recent settings + of rcutree.jiffies_till_first_fqs + and rcutree.jiffies_till_next_fqs. + This calculated value may be viewed in + rcutree.jiffies_to_sched_qs. Any attempt to set + rcutree.jiffies_to_sched_qs will be cheerfully + overwritten. + rcutree.kthread_prio= [KNL,BOOT] Set the SCHED_FIFO priority of the RCU per-CPU kthreads (rcuc/N). This value is also used for @@ -3721,6 +3752,11 @@ This wake_up() will be accompanied by a WARN_ONCE() splat and an ftrace_dump(). + rcutree.sysrq_rcu= [KNL] + Commandeer a sysrq key to dump out Tree RCU's + rcu_node tree with an eye towards determining + why a new grace period has not yet started. + rcuperf.gp_async= [KNL] Measure performance of asynchronous grace-period primitives such as call_rcu(). @@ -4090,11 +4126,9 @@ Note: increases power consumption, thus should only be enabled if running jitter sensitive (HPC/RT) workloads. - security= [SECURITY] Choose a security module to enable at boot. - If this boot parameter is not specified, only the first - security module asking for security registration will be - loaded. An invalid security module name will be treated - as if no module has been chosen. + security= [SECURITY] Choose a legacy "major" security module to + enable at boot. This has been deprecated by the + "lsm=" parameter. selinux= [SELINUX] Disable or enable SELinux at boot time. Format: { "0" | "1" } @@ -4698,7 +4732,8 @@ usbcore.authorized_default= [USB] Default USB device authorization: (default -1 = authorized except for wireless USB, - 0 = not authorized, 1 = authorized) + 0 = not authorized, 1 = authorized, 2 = authorized + if device connected to internal port) usbcore.autosuspend= [USB] The autosuspend time delay (in seconds) used @@ -5043,6 +5078,14 @@ or other driver-specific files in the Documentation/watchdog/ directory. + watchdog_thresh= + [KNL] + Set the hard lockup detector stall duration + threshold in seconds. The soft lockup detector + threshold is set to twice the value. A value of 0 + disables both lockup detectors. Default is 10 + seconds. + workqueue.watchdog_thresh= If CONFIG_WQ_WATCHDOG is configured, workqueue can warn stall conditions and dump internal state to diff --git a/Documentation/admin-guide/mm/pagemap.rst b/Documentation/admin-guide/mm/pagemap.rst index 3f7bade2c231..340a5aee9b80 100644 --- a/Documentation/admin-guide/mm/pagemap.rst +++ b/Documentation/admin-guide/mm/pagemap.rst @@ -75,9 +75,10 @@ number of times a page is mapped. 20. NOPAGE 21. KSM 22. THP - 23. BALLOON + 23. OFFLINE 24. ZERO_PAGE 25. IDLE + 26. PGTABLE * ``/proc/kpagecgroup``. This file contains a 64-bit inode number of the memory cgroup each page is charged to, indexed by PFN. Only available when @@ -118,8 +119,8 @@ Short descriptions to the page flags identical memory pages dynamically shared between one or more processes 22 - THP contiguous pages which construct transparent hugepages -23 - BALLOON - balloon compaction page +23 - OFFLINE + page is logically offline 24 - ZERO_PAGE zero page for pfn_zero or huge_zero page 25 - IDLE @@ -128,6 +129,8 @@ Short descriptions to the page flags Note that this flag may be stale in case the page was accessed via a PTE. To make sure the flag is up-to-date one has to read ``/sys/kernel/mm/page_idle/bitmap`` first. +26 - PGTABLE + page is in use as a page table IO related page flags --------------------- diff --git a/Documentation/admin-guide/perf-security.rst b/Documentation/admin-guide/perf-security.rst index f73ebfe9bfe2..72effa7c23b9 100644 --- a/Documentation/admin-guide/perf-security.rst +++ b/Documentation/admin-guide/perf-security.rst @@ -6,83 +6,211 @@ Perf Events and tool security Overview -------- -Usage of Performance Counters for Linux (perf_events) [1]_ , [2]_ , [3]_ can -impose a considerable risk of leaking sensitive data accessed by monitored -processes. The data leakage is possible both in scenarios of direct usage of -perf_events system call API [2]_ and over data files generated by Perf tool user -mode utility (Perf) [3]_ , [4]_ . The risk depends on the nature of data that -perf_events performance monitoring units (PMU) [2]_ collect and expose for -performance analysis. Having that said perf_events/Perf performance monitoring -is the subject for security access control management [5]_ . +Usage of Performance Counters for Linux (perf_events) [1]_ , [2]_ , [3]_ +can impose a considerable risk of leaking sensitive data accessed by +monitored processes. The data leakage is possible both in scenarios of +direct usage of perf_events system call API [2]_ and over data files +generated by Perf tool user mode utility (Perf) [3]_ , [4]_ . The risk +depends on the nature of data that perf_events performance monitoring +units (PMU) [2]_ and Perf collect and expose for performance analysis. +Collected system and performance data may be split into several +categories: + +1. System hardware and software configuration data, for example: a CPU + model and its cache configuration, an amount of available memory and + its topology, used kernel and Perf versions, performance monitoring + setup including experiment time, events configuration, Perf command + line parameters, etc. + +2. User and kernel module paths and their load addresses with sizes, + process and thread names with their PIDs and TIDs, timestamps for + captured hardware and software events. + +3. Content of kernel software counters (e.g., for context switches, page + faults, CPU migrations), architectural hardware performance counters + (PMC) [8]_ and machine specific registers (MSR) [9]_ that provide + execution metrics for various monitored parts of the system (e.g., + memory controller (IMC), interconnect (QPI/UPI) or peripheral (PCIe) + uncore counters) without direct attribution to any execution context + state. + +4. Content of architectural execution context registers (e.g., RIP, RSP, + RBP on x86_64), process user and kernel space memory addresses and + data, content of various architectural MSRs that capture data from + this category. + +Data that belong to the fourth category can potentially contain +sensitive process data. If PMUs in some monitoring modes capture values +of execution context registers or data from process memory then access +to such monitoring capabilities requires to be ordered and secured +properly. So, perf_events/Perf performance monitoring is the subject for +security access control management [5]_ . perf_events/Perf access control ------------------------------- -To perform security checks, the Linux implementation splits processes into two -categories [6]_ : a) privileged processes (whose effective user ID is 0, referred -to as superuser or root), and b) unprivileged processes (whose effective UID is -nonzero). Privileged processes bypass all kernel security permission checks so -perf_events performance monitoring is fully available to privileged processes -without access, scope and resource restrictions. - -Unprivileged processes are subject to a full security permission check based on -the process's credentials [5]_ (usually: effective UID, effective GID, and -supplementary group list). - -Linux divides the privileges traditionally associated with superuser into -distinct units, known as capabilities [6]_ , which can be independently enabled -and disabled on per-thread basis for processes and files of unprivileged users. - -Unprivileged processes with enabled CAP_SYS_ADMIN capability are treated as -privileged processes with respect to perf_events performance monitoring and -bypass *scope* permissions checks in the kernel. - -Unprivileged processes using perf_events system call API is also subject for -PTRACE_MODE_READ_REALCREDS ptrace access mode check [7]_ , whose outcome -determines whether monitoring is permitted. So unprivileged processes provided -with CAP_SYS_PTRACE capability are effectively permitted to pass the check. - -Other capabilities being granted to unprivileged processes can effectively -enable capturing of additional data required for later performance analysis of -monitored processes or a system. For example, CAP_SYSLOG capability permits -reading kernel space memory addresses from /proc/kallsyms file. +To perform security checks, the Linux implementation splits processes +into two categories [6]_ : a) privileged processes (whose effective user +ID is 0, referred to as superuser or root), and b) unprivileged +processes (whose effective UID is nonzero). Privileged processes bypass +all kernel security permission checks so perf_events performance +monitoring is fully available to privileged processes without access, +scope and resource restrictions. + +Unprivileged processes are subject to a full security permission check +based on the process's credentials [5]_ (usually: effective UID, +effective GID, and supplementary group list). + +Linux divides the privileges traditionally associated with superuser +into distinct units, known as capabilities [6]_ , which can be +independently enabled and disabled on per-thread basis for processes and +files of unprivileged users. + +Unprivileged processes with enabled CAP_SYS_ADMIN capability are treated +as privileged processes with respect to perf_events performance +monitoring and bypass *scope* permissions checks in the kernel. + +Unprivileged processes using perf_events system call API is also subject +for PTRACE_MODE_READ_REALCREDS ptrace access mode check [7]_ , whose +outcome determines whether monitoring is permitted. So unprivileged +processes provided with CAP_SYS_PTRACE capability are effectively +permitted to pass the check. + +Other capabilities being granted to unprivileged processes can +effectively enable capturing of additional data required for later +performance analysis of monitored processes or a system. For example, +CAP_SYSLOG capability permits reading kernel space memory addresses from +/proc/kallsyms file. + +perf_events/Perf privileged users +--------------------------------- + +Mechanisms of capabilities, privileged capability-dumb files [6]_ and +file system ACLs [10]_ can be used to create a dedicated group of +perf_events/Perf privileged users who are permitted to execute +performance monitoring without scope limits. The following steps can be +taken to create such a group of privileged Perf users. + +1. Create perf_users group of privileged Perf users, assign perf_users + group to Perf tool executable and limit access to the executable for + other users in the system who are not in the perf_users group: + +:: + + # groupadd perf_users + # ls -alhF + -rwxr-xr-x 2 root root 11M Oct 19 15:12 perf + # chgrp perf_users perf + # ls -alhF + -rwxr-xr-x 2 root perf_users 11M Oct 19 15:12 perf + # chmod o-rwx perf + # ls -alhF + -rwxr-x--- 2 root perf_users 11M Oct 19 15:12 perf + +2. Assign the required capabilities to the Perf tool executable file and + enable members of perf_users group with performance monitoring + privileges [6]_ : + +:: + + # setcap "cap_sys_admin,cap_sys_ptrace,cap_syslog=ep" perf + # setcap -v "cap_sys_admin,cap_sys_ptrace,cap_syslog=ep" perf + perf: OK + # getcap perf + perf = cap_sys_ptrace,cap_sys_admin,cap_syslog+ep + +As a result, members of perf_users group are capable of conducting +performance monitoring by using functionality of the configured Perf +tool executable that, when executes, passes perf_events subsystem scope +checks. + +This specific access control management is only available to superuser +or root running processes with CAP_SETPCAP, CAP_SETFCAP [6]_ +capabilities. perf_events/Perf unprivileged users ----------------------------------- -perf_events/Perf *scope* and *access* control for unprivileged processes is -governed by perf_event_paranoid [2]_ setting: +perf_events/Perf *scope* and *access* control for unprivileged processes +is governed by perf_event_paranoid [2]_ setting: -1: - Impose no *scope* and *access* restrictions on using perf_events performance - monitoring. Per-user per-cpu perf_event_mlock_kb [2]_ locking limit is - ignored when allocating memory buffers for storing performance data. - This is the least secure mode since allowed monitored *scope* is - maximized and no perf_events specific limits are imposed on *resources* - allocated for performance monitoring. + Impose no *scope* and *access* restrictions on using perf_events + performance monitoring. Per-user per-cpu perf_event_mlock_kb [2]_ + locking limit is ignored when allocating memory buffers for storing + performance data. This is the least secure mode since allowed + monitored *scope* is maximized and no perf_events specific limits + are imposed on *resources* allocated for performance monitoring. >=0: *scope* includes per-process and system wide performance monitoring - but excludes raw tracepoints and ftrace function tracepoints monitoring. - CPU and system events happened when executing either in user or - in kernel space can be monitored and captured for later analysis. - Per-user per-cpu perf_event_mlock_kb locking limit is imposed but - ignored for unprivileged processes with CAP_IPC_LOCK [6]_ capability. + but excludes raw tracepoints and ftrace function tracepoints + monitoring. CPU and system events happened when executing either in + user or in kernel space can be monitored and captured for later + analysis. Per-user per-cpu perf_event_mlock_kb locking limit is + imposed but ignored for unprivileged processes with CAP_IPC_LOCK + [6]_ capability. >=1: - *scope* includes per-process performance monitoring only and excludes - system wide performance monitoring. CPU and system events happened when - executing either in user or in kernel space can be monitored and - captured for later analysis. Per-user per-cpu perf_event_mlock_kb - locking limit is imposed but ignored for unprivileged processes with - CAP_IPC_LOCK capability. + *scope* includes per-process performance monitoring only and + excludes system wide performance monitoring. CPU and system events + happened when executing either in user or in kernel space can be + monitored and captured for later analysis. Per-user per-cpu + perf_event_mlock_kb locking limit is imposed but ignored for + unprivileged processes with CAP_IPC_LOCK capability. >=2: - *scope* includes per-process performance monitoring only. CPU and system - events happened when executing in user space only can be monitored and - captured for later analysis. Per-user per-cpu perf_event_mlock_kb - locking limit is imposed but ignored for unprivileged processes with - CAP_IPC_LOCK capability. + *scope* includes per-process performance monitoring only. CPU and + system events happened when executing in user space only can be + monitored and captured for later analysis. Per-user per-cpu + perf_event_mlock_kb locking limit is imposed but ignored for + unprivileged processes with CAP_IPC_LOCK capability. + +perf_events/Perf resource control +--------------------------------- + +Open file descriptors ++++++++++++++++++++++ + +The perf_events system call API [2]_ allocates file descriptors for +every configured PMU event. Open file descriptors are a per-process +accountable resource governed by the RLIMIT_NOFILE [11]_ limit +(ulimit -n), which is usually derived from the login shell process. When +configuring Perf collection for a long list of events on a large server +system, this limit can be easily hit preventing required monitoring +configuration. RLIMIT_NOFILE limit can be increased on per-user basis +modifying content of the limits.conf file [12]_ . Ordinarily, a Perf +sampling session (perf record) requires an amount of open perf_event +file descriptors that is not less than the number of monitored events +multiplied by the number of monitored CPUs. + +Memory allocation ++++++++++++++++++ + +The amount of memory available to user processes for capturing +performance monitoring data is governed by the perf_event_mlock_kb [2]_ +setting. This perf_event specific resource setting defines overall +per-cpu limits of memory allowed for mapping by the user processes to +execute performance monitoring. The setting essentially extends the +RLIMIT_MEMLOCK [11]_ limit, but only for memory regions mapped +specifically for capturing monitored performance events and related data. + +For example, if a machine has eight cores and perf_event_mlock_kb limit +is set to 516 KiB, then a user process is provided with 516 KiB * 8 = +4128 KiB of memory above the RLIMIT_MEMLOCK limit (ulimit -l) for +perf_event mmap buffers. In particular, this means that, if the user +wants to start two or more performance monitoring processes, the user is +required to manually distribute the available 4128 KiB between the +monitoring processes, for example, using the --mmap-pages Perf record +mode option. Otherwise, the first started performance monitoring process +allocates all available 4128 KiB and the other processes will fail to +proceed due to the lack of memory. + +RLIMIT_MEMLOCK and perf_event_mlock_kb resource constraints are ignored +for processes with the CAP_IPC_LOCK capability. Thus, perf_events/Perf +privileged users can be provided with memory above the constraints for +perf_events/Perf performance monitoring purpose by providing the Perf +executable with CAP_IPC_LOCK capability. Bibliography ------------ @@ -94,4 +222,9 @@ Bibliography .. [5] ``_ .. [6] ``_ .. [7] ``_ +.. [8] ``_ +.. [9] ``_ +.. [10] ``_ +.. [11] ``_ +.. [12] ``_ diff --git a/Documentation/admin-guide/pm/cpuidle.rst b/Documentation/admin-guide/pm/cpuidle.rst index 106379e2619f..9c58b35a81cb 100644 --- a/Documentation/admin-guide/pm/cpuidle.rst +++ b/Documentation/admin-guide/pm/cpuidle.rst @@ -155,14 +155,14 @@ governor uses that information depends on what algorithm is implemented by it and that is the primary reason for having more than one governor in the ``CPUIdle`` subsystem. -There are two ``CPUIdle`` governors available, ``menu`` and ``ladder``. Which -of them is used depends on the configuration of the kernel and in particular on -whether or not the scheduler tick can be `stopped by the idle -loop `_. It is possible to change the governor at run time -if the ``cpuidle_sysfs_switch`` command line parameter has been passed to the -kernel, but that is not safe in general, so it should not be done on production -systems (that may change in the future, though). The name of the ``CPUIdle`` -governor currently used by the kernel can be read from the +There are three ``CPUIdle`` governors available, ``menu``, `TEO `_ +and ``ladder``. Which of them is used by default depends on the configuration +of the kernel and in particular on whether or not the scheduler tick can be +`stopped by the idle loop `_. It is possible to change the +governor at run time if the ``cpuidle_sysfs_switch`` command line parameter has +been passed to the kernel, but that is not safe in general, so it should not be +done on production systems (that may change in the future, though). The name of +the ``CPUIdle`` governor currently used by the kernel can be read from the :file:`current_governor_ro` (or :file:`current_governor` if ``cpuidle_sysfs_switch`` is present in the kernel command line) file under :file:`/sys/devices/system/cpu/cpuidle/` in ``sysfs``. @@ -256,6 +256,8 @@ the ``menu`` governor by default and if it is not tickless, the default ``CPUIdle`` governor on it will be ``ladder``. +.. _menu-gov: + The ``menu`` Governor ===================== @@ -333,6 +335,92 @@ that time, the governor may need to select a shallower state with a suitable target residency. +.. _teo-gov: + +The Timer Events Oriented (TEO) Governor +======================================== + +The timer events oriented (TEO) governor is an alternative ``CPUIdle`` governor +for tickless systems. It follows the same basic strategy as the ``menu`` `one +`_: it always tries to find the deepest idle state suitable for the +given conditions. However, it applies a different approach to that problem. + +First, it does not use sleep length correction factors, but instead it attempts +to correlate the observed idle duration values with the available idle states +and use that information to pick up the idle state that is most likely to +"match" the upcoming CPU idle interval. Second, it does not take the tasks +that were running on the given CPU in the past and are waiting on some I/O +operations to complete now at all (there is no guarantee that they will run on +the same CPU when they become runnable again) and the pattern detection code in +it avoids taking timer wakeups into account. It also only uses idle duration +values less than the current time till the closest timer (with the scheduler +tick excluded) for that purpose. + +Like in the ``menu`` governor `case `_, the first step is to obtain +the *sleep length*, which is the time until the closest timer event with the +assumption that the scheduler tick will be stopped (that also is the upper bound +on the time until the next CPU wakeup). That value is then used to preselect an +idle state on the basis of three metrics maintained for each idle state provided +by the ``CPUIdle`` driver: ``hits``, ``misses`` and ``early_hits``. + +The ``hits`` and ``misses`` metrics measure the likelihood that a given idle +state will "match" the observed (post-wakeup) idle duration if it "matches" the +sleep length. They both are subject to decay (after a CPU wakeup) every time +the target residency of the idle state corresponding to them is less than or +equal to the sleep length and the target residency of the next idle state is +greater than the sleep length (that is, when the idle state corresponding to +them "matches" the sleep length). The ``hits`` metric is increased if the +former condition is satisfied and the target residency of the given idle state +is less than or equal to the observed idle duration and the target residency of +the next idle state is greater than the observed idle duration at the same time +(that is, it is increased when the given idle state "matches" both the sleep +length and the observed idle duration). In turn, the ``misses`` metric is +increased when the given idle state "matches" the sleep length only and the +observed idle duration is too short for its target residency. + +The ``early_hits`` metric measures the likelihood that a given idle state will +"match" the observed (post-wakeup) idle duration if it does not "match" the +sleep length. It is subject to decay on every CPU wakeup and it is increased +when the idle state corresponding to it "matches" the observed (post-wakeup) +idle duration and the target residency of the next idle state is less than or +equal to the sleep length (i.e. the idle state "matching" the sleep length is +deeper than the given one). + +The governor walks the list of idle states provided by the ``CPUIdle`` driver +and finds the last (deepest) one with the target residency less than or equal +to the sleep length. Then, the ``hits`` and ``misses`` metrics of that idle +state are compared with each other and it is preselected if the ``hits`` one is +greater (which means that that idle state is likely to "match" the observed idle +duration after CPU wakeup). If the ``misses`` one is greater, the governor +preselects the shallower idle state with the maximum ``early_hits`` metric +(or if there are multiple shallower idle states with equal ``early_hits`` +metric which also is the maximum, the shallowest of them will be preselected). +[If there is a wakeup latency constraint coming from the `PM QoS framework +`_ which is hit before reaching the deepest idle state with the +target residency within the sleep length, the deepest idle state with the exit +latency within the constraint is preselected without consulting the ``hits``, +``misses`` and ``early_hits`` metrics.] + +Next, the governor takes several idle duration values observed most recently +into consideration and if at least a half of them are greater than or equal to +the target residency of the preselected idle state, that idle state becomes the +final candidate to ask for. Otherwise, the average of the most recent idle +duration values below the target residency of the preselected idle state is +computed and the governor walks the idle states shallower than the preselected +one and finds the deepest of them with the target residency within that average. +That idle state is then taken as the final candidate to ask for. + +Still, at this point the governor may need to refine the idle state selection if +it has not decided to `stop the scheduler tick `_. That +generally happens if the target residency of the idle state selected so far is +less than the tick period and the tick has not been stopped already (in a +previous iteration of the idle loop). Then, like in the ``menu`` governor +`case `_, the sleep length used in the previous computations may not +reflect the real time until the closest timer event and if it really is greater +than that time, a shallower state with a suitable target residency may need to +be selected. + + .. _idle-states-representation: Representation of Idle States diff --git a/Documentation/admin-guide/tainted-kernels.rst b/Documentation/admin-guide/tainted-kernels.rst index 28a869c509a0..71e9184a9079 100644 --- a/Documentation/admin-guide/tainted-kernels.rst +++ b/Documentation/admin-guide/tainted-kernels.rst @@ -1,59 +1,164 @@ Tainted kernels --------------- -Some oops reports contain the string **'Tainted: '** after the program -counter. This indicates that the kernel has been tainted by some -mechanism. The string is followed by a series of position-sensitive -characters, each representing a particular tainted value. - - 1) ``G`` if all modules loaded have a GPL or compatible license, ``P`` if +The kernel will mark itself as 'tainted' when something occurs that might be +relevant later when investigating problems. Don't worry too much about this, +most of the time it's not a problem to run a tainted kernel; the information is +mainly of interest once someone wants to investigate some problem, as its real +cause might be the event that got the kernel tainted. That's why bug reports +from tainted kernels will often be ignored by developers, hence try to reproduce +problems with an untainted kernel. + +Note the kernel will remain tainted even after you undo what caused the taint +(i.e. unload a proprietary kernel module), to indicate the kernel remains not +trustworthy. That's also why the kernel will print the tainted state when it +notices an internal problem (a 'kernel bug'), a recoverable error +('kernel oops') or a non-recoverable error ('kernel panic') and writes debug +information about this to the logs ``dmesg`` outputs. It's also possible to +check the tainted state at runtime through a file in ``/proc/``. + + +Tainted flag in bugs, oops or panics messages +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +You find the tainted state near the top in a line starting with 'CPU:'; if or +why the kernel was tainted is shown after the Process ID ('PID:') and a shortened +name of the command ('Comm:') that triggered the event:: + + BUG: unable to handle kernel NULL pointer dereference at 0000000000000000 + Oops: 0002 [#1] SMP PTI + CPU: 0 PID: 4424 Comm: insmod Tainted: P W O 4.20.0-0.rc6.fc30 #1 + Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011 + RIP: 0010:my_oops_init+0x13/0x1000 [kpanic] + [...] + +You'll find a 'Not tainted: ' there if the kernel was not tainted at the +time of the event; if it was, then it will print 'Tainted: ' and characters +either letters or blanks. In above example it looks like this:: + + Tainted: P W O + +The meaning of those characters is explained in the table below. In tis case +the kernel got tainted earlier because a proprietary Module (``P``) was loaded, +a warning occurred (``W``), and an externally-built module was loaded (``O``). +To decode other letters use the table below. + + +Decoding tainted state at runtime +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +At runtime, you can query the tainted state by reading +``cat /proc/sys/kernel/tainted``. If that returns ``0``, the kernel is not +tainted; any other number indicates the reasons why it is. The easiest way to +decode that number is the script ``tools/debugging/kernel-chktaint``, which your +distribution might ship as part of a package called ``linux-tools`` or +``kernel-tools``; if it doesn't you can download the script from +`git.kernel.org `_ +and execute it with ``sh kernel-chktaint``, which would print something like +this on the machine that had the statements in the logs that were quoted earlier:: + + Kernel is Tainted for following reasons: + * Proprietary module was loaded (#0) + * Kernel issued warning (#9) + * Externally-built ('out-of-tree') module was loaded (#12) + See Documentation/admin-guide/tainted-kernels.rst in the the Linux kernel or + https://www.kernel.org/doc/html/latest/admin-guide/tainted-kernels.html for + a more details explanation of the various taint flags. + Raw taint value as int/string: 4609/'P W O ' + +You can try to decode the number yourself. That's easy if there was only one +reason that got your kernel tainted, as in this case you can find the number +with the table below. If there were multiple reasons you need to decode the +number, as it is a bitfield, where each bit indicates the absence or presence of +a particular type of taint. It's best to leave that to the aforementioned +script, but if you need something quick you can use this shell command to check +which bits are set:: + + $ for i in $(seq 18); do echo $(($i-1)) $(($(cat /proc/sys/kernel/tainted)>>($i-1)&1));done + +Table for decoding tainted state +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +=== === ====== ======================================================== +Bit Log Number Reason that got the kernel tainted +=== === ====== ======================================================== + 0 G/P 1 proprietary module was loaded + 1 _/F 2 module was force loaded + 2 _/S 4 SMP kernel oops on an officially SMP incapable processor + 3 _/R 8 module was force unloaded + 4 _/M 16 processor reported a Machine Check Exception (MCE) + 5 _/B 32 bad page referenced or some unexpected page flags + 6 _/U 64 taint requested by userspace application + 7 _/D 128 kernel died recently, i.e. there was an OOPS or BUG + 8 _/A 256 ACPI table overridden by user + 9 _/W 512 kernel issued warning + 10 _/C 1024 staging driver was loaded + 11 _/I 2048 workaround for bug in platform firmware applied + 12 _/O 4096 externally-built ("out-of-tree") module was loaded + 13 _/E 8192 unsigned module was loaded + 14 _/L 16384 soft lockup occurred + 15 _/K 32768 kernel has been live patched + 16 _/X 65536 auxiliary taint, defined for and used by distros + 17 _/T 131072 kernel was built with the struct randomization plugin +=== === ====== ======================================================== + +Note: The character ``_`` is representing a blank in this table to make reading +easier. + +More detailed explanation for tainting +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + 0) ``G`` if all modules loaded have a GPL or compatible license, ``P`` if any proprietary module has been loaded. Modules without a MODULE_LICENSE or with a MODULE_LICENSE that is not recognised by insmod as GPL compatible are assumed to be proprietary. - 2) ``F`` if any module was force loaded by ``insmod -f``, ``' '`` if all + 1) ``F`` if any module was force loaded by ``insmod -f``, ``' '`` if all modules were loaded normally. - 3) ``S`` if the oops occurred on an SMP kernel running on hardware that + 2) ``S`` if the oops occurred on an SMP kernel running on hardware that hasn't been certified as safe to run multiprocessor. Currently this occurs only on various Athlons that are not SMP capable. - 4) ``R`` if a module was force unloaded by ``rmmod -f``, ``' '`` if all + 3) ``R`` if a module was force unloaded by ``rmmod -f``, ``' '`` if all modules were unloaded normally. - 5) ``M`` if any processor has reported a Machine Check Exception, + 4) ``M`` if any processor has reported a Machine Check Exception, ``' '`` if no Machine Check Exceptions have occurred. - 6) ``B`` if a page-release function has found a bad page reference or - some unexpected page flags. + 5) ``B`` If a page-release function has found a bad page reference or some + unexpected page flags. This indicates a hardware problem or a kernel bug; + there should be other information in the log indicating why this tainting + occured. - 7) ``U`` if a user or user application specifically requested that the + 6) ``U`` if a user or user application specifically requested that the Tainted flag be set, ``' '`` otherwise. - 8) ``D`` if the kernel has died recently, i.e. there was an OOPS or BUG. + 7) ``D`` if the kernel has died recently, i.e. there was an OOPS or BUG. - 9) ``A`` if the ACPI table has been overridden. + 8) ``A`` if an ACPI table has been overridden. - 10) ``W`` if a warning has previously been issued by the kernel. + 9) ``W`` if a warning has previously been issued by the kernel. (Though some warnings may set more specific taint flags.) - 11) ``C`` if a staging driver has been loaded. + 10) ``C`` if a staging driver has been loaded. - 12) ``I`` if the kernel is working around a severe bug in the platform + 11) ``I`` if the kernel is working around a severe bug in the platform firmware (BIOS or similar). - 13) ``O`` if an externally-built ("out-of-tree") module has been loaded. + 12) ``O`` if an externally-built ("out-of-tree") module has been loaded. - 14) ``E`` if an unsigned module has been loaded in a kernel supporting + 13) ``E`` if an unsigned module has been loaded in a kernel supporting module signature. - 15) ``L`` if a soft lockup has previously occurred on the system. + 14) ``L`` if a soft lockup has previously occurred on the system. + + 15) ``K`` if the kernel has been live patched. - 16) ``K`` if the kernel has been live patched. + 16) ``X`` Auxiliary taint, defined for and used by Linux distributors. -The primary reason for the **'Tainted: '** string is to tell kernel -debuggers if this is a clean kernel or if anything unusual has -occurred. Tainting is permanent: even if an offending module is -unloaded, the tainted value remains to indicate that the kernel is not -trustworthy. + 17) ``T`` Kernel was build with the randstruct plugin, which can intentionally + produce extremely unusual kernel structure layouts (even performance + pathological ones), which is important to know when debugging. Set at + build time. diff --git a/Documentation/arm64/booting.txt b/Documentation/arm64/booting.txt index 8df9f4658d6f..fbab7e21d116 100644 --- a/Documentation/arm64/booting.txt +++ b/Documentation/arm64/booting.txt @@ -188,6 +188,11 @@ Before jumping into the kernel, the following conditions must be met: the kernel image will be entered must be initialised by software at a higher exception level to prevent execution in an UNKNOWN state. + - SCR_EL3.FIQ must have the same value across all CPUs the kernel is + executing on. + - The value of SCR_EL3.FIQ must be the same as the one present at boot + time whenever the kernel is executing. + For systems with a GICv3 interrupt controller to be used in v3 mode: - If EL3 is present: ICC_SRE_EL3.Enable (bit 3) must be initialiased to 0b1. diff --git a/Documentation/arm64/pointer-authentication.txt b/Documentation/arm64/pointer-authentication.txt index a25cd21290e9..5baca42ba146 100644 --- a/Documentation/arm64/pointer-authentication.txt +++ b/Documentation/arm64/pointer-authentication.txt @@ -78,6 +78,11 @@ bits can vary between the two. Note that the masks apply to TTBR0 addresses, and are not valid to apply to TTBR1 addresses (e.g. kernel pointers). +Additionally, when CONFIG_CHECKPOINT_RESTORE is also set, the kernel +will expose the NT_ARM_PACA_KEYS and NT_ARM_PACG_KEYS regsets (struct +user_pac_address_keys and struct user_pac_generic_keys). These can be +used to get and set the keys for a thread. + Virtualization -------------- diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt index 1f09d043d086..d1e2bb801e1b 100644 --- a/Documentation/arm64/silicon-errata.txt +++ b/Documentation/arm64/silicon-errata.txt @@ -44,6 +44,8 @@ stable kernels. | Implementor | Component | Erratum ID | Kconfig | +----------------+-----------------+-----------------+-----------------------------+ +| Allwinner | A64/R18 | UNKNOWN1 | SUN50I_ERRATUM_UNKNOWN1 | +| | | | | | ARM | Cortex-A53 | #826319 | ARM64_ERRATUM_826319 | | ARM | Cortex-A53 | #827319 | ARM64_ERRATUM_827319 | | ARM | Cortex-A53 | #824069 | ARM64_ERRATUM_824069 | @@ -80,3 +82,4 @@ stable kernels. | Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 | | Qualcomm Tech. | QDF2400 ITS | E0065 | QCOM_QDF2400_ERRATUM_0065 | | Qualcomm Tech. | Falkor v{1,2} | E1041 | QCOM_FALKOR_ERRATUM_1041 | +| Fujitsu | A64FX | E#010001 | FUJITSU_ERRATUM_010001 | diff --git a/Documentation/block/biovecs.txt b/Documentation/block/biovecs.txt index 25689584e6e0..ce6eccaf5df7 100644 --- a/Documentation/block/biovecs.txt +++ b/Documentation/block/biovecs.txt @@ -117,3 +117,28 @@ Other implications: size limitations and the limitations of the underlying devices. Thus there's no need to define ->merge_bvec_fn() callbacks for individual block drivers. + +Usage of helpers: +================= + +* The following helpers whose names have the suffix of "_all" can only be used +on non-BIO_CLONED bio. They are usually used by filesystem code. Drivers +shouldn't use them because the bio may have been split before it reached the +driver. + + bio_for_each_segment_all() + bio_first_bvec_all() + bio_first_page_all() + bio_last_bvec_all() + +* The following helpers iterate over single-page segment. The passed 'struct +bio_vec' will contain a single-page IO vector during the iteration + + bio_for_each_segment() + bio_for_each_segment_all() + +* The following helpers iterate over multi-page bvec. The passed 'struct +bio_vec' will contain a multi-page IO vector during the iteration + + bio_for_each_bvec() + rq_for_each_bvec() diff --git a/Documentation/bpf/bpf_design_QA.rst b/Documentation/bpf/bpf_design_QA.rst index 7cc9e368c1e9..10453c627135 100644 --- a/Documentation/bpf/bpf_design_QA.rst +++ b/Documentation/bpf/bpf_design_QA.rst @@ -36,27 +36,27 @@ consideration important quirks of other architectures) and defines calling convention that is compatible with C calling convention of the linux kernel on those architectures. -Q: can multiple return values be supported in the future? +Q: Can multiple return values be supported in the future? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A: NO. BPF allows only register R0 to be used as return value. -Q: can more than 5 function arguments be supported in the future? +Q: Can more than 5 function arguments be supported in the future? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A: NO. BPF calling convention only allows registers R1-R5 to be used as arguments. BPF is not a standalone instruction set. (unlike x64 ISA that allows msft, cdecl and other conventions) -Q: can BPF programs access instruction pointer or return address? +Q: Can BPF programs access instruction pointer or return address? ----------------------------------------------------------------- A: NO. -Q: can BPF programs access stack pointer ? +Q: Can BPF programs access stack pointer ? ------------------------------------------ A: NO. Only frame pointer (register R10) is accessible. From compiler point of view it's necessary to have stack pointer. -For example LLVM defines register R11 as stack pointer in its +For example, LLVM defines register R11 as stack pointer in its BPF backend, but it makes sure that generated code never uses it. Q: Does C-calling convention diminishes possible use cases? @@ -66,8 +66,8 @@ A: YES. BPF design forces addition of major functionality in the form of kernel helper functions and kernel objects like BPF maps with seamless interoperability between them. It lets kernel call into -BPF programs and programs call kernel helpers with zero overhead. -As all of them were native C code. That is particularly the case +BPF programs and programs call kernel helpers with zero overhead, +as all of them were native C code. That is particularly the case for JITed BPF programs that are indistinguishable from native kernel C code. @@ -75,9 +75,9 @@ Q: Does it mean that 'innovative' extensions to BPF code are disallowed? ------------------------------------------------------------------------ A: Soft yes. -At least for now until BPF core has support for +At least for now, until BPF core has support for bpf-to-bpf calls, indirect calls, loops, global variables, -jump tables, read only sections and all other normal constructs +jump tables, read-only sections, and all other normal constructs that C code can produce. Q: Can loops be supported in a safe way? @@ -109,16 +109,16 @@ For example why BPF_JNE and other compare and jumps are not cpu-like? A: This was necessary to avoid introducing flags into ISA which are impossible to make generic and efficient across CPU architectures. -Q: why BPF_DIV instruction doesn't map to x64 div? +Q: Why BPF_DIV instruction doesn't map to x64 div? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A: Because if we picked one-to-one relationship to x64 it would have made it more complicated to support on arm64 and other archs. Also it needs div-by-zero runtime check. -Q: why there is no BPF_SDIV for signed divide operation? +Q: Why there is no BPF_SDIV for signed divide operation? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A: Because it would be rarely used. llvm errors in such case and -prints a suggestion to use unsigned divide instead +prints a suggestion to use unsigned divide instead. Q: Why BPF has implicit prologue and epilogue? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/Documentation/bpf/btf.rst b/Documentation/bpf/btf.rst new file mode 100644 index 000000000000..9a60a5d60e38 --- /dev/null +++ b/Documentation/bpf/btf.rst @@ -0,0 +1,848 @@ +===================== +BPF Type Format (BTF) +===================== + +1. Introduction +*************** + +BTF (BPF Type Format) is the metadata format which encodes the debug info +related to BPF program/map. The name BTF was used initially to describe data +types. The BTF was later extended to include function info for defined +subroutines, and line info for source/line information. + +The debug info is used for map pretty print, function signature, etc. The +function signature enables better bpf program/function kernel symbol. The line +info helps generate source annotated translated byte code, jited code and +verifier log. + +The BTF specification contains two parts, + * BTF kernel API + * BTF ELF file format + +The kernel API is the contract between user space and kernel. The kernel +verifies the BTF info before using it. The ELF file format is a user space +contract between ELF file and libbpf loader. + +The type and string sections are part of the BTF kernel API, describing the +debug info (mostly types related) referenced by the bpf program. These two +sections are discussed in details in :ref:`BTF_Type_String`. + +.. _BTF_Type_String: + +2. BTF Type and String Encoding +******************************* + +The file ``include/uapi/linux/btf.h`` provides high-level definition of how +types/strings are encoded. + +The beginning of data blob must be:: + + struct btf_header { + __u16 magic; + __u8 version; + __u8 flags; + __u32 hdr_len; + + /* All offsets are in bytes relative to the end of this header */ + __u32 type_off; /* offset of type section */ + __u32 type_len; /* length of type section */ + __u32 str_off; /* offset of string section */ + __u32 str_len; /* length of string section */ + }; + +The magic is ``0xeB9F``, which has different encoding for big and little +endian systems, and can be used to test whether BTF is generated for big- or +little-endian target. The ``btf_header`` is designed to be extensible with +``hdr_len`` equal to ``sizeof(struct btf_header)`` when a data blob is +generated. + +2.1 String Encoding +=================== + +The first string in the string section must be a null string. The rest of +string table is a concatenation of other null-terminated strings. + +2.2 Type Encoding +================= + +The type id ``0`` is reserved for ``void`` type. The type section is parsed +sequentially and type id is assigned to each recognized type starting from id +``1``. Currently, the following types are supported:: + + #define BTF_KIND_INT 1 /* Integer */ + #define BTF_KIND_PTR 2 /* Pointer */ + #define BTF_KIND_ARRAY 3 /* Array */ + #define BTF_KIND_STRUCT 4 /* Struct */ + #define BTF_KIND_UNION 5 /* Union */ + #define BTF_KIND_ENUM 6 /* Enumeration */ + #define BTF_KIND_FWD 7 /* Forward */ + #define BTF_KIND_TYPEDEF 8 /* Typedef */ + #define BTF_KIND_VOLATILE 9 /* Volatile */ + #define BTF_KIND_CONST 10 /* Const */ + #define BTF_KIND_RESTRICT 11 /* Restrict */ + #define BTF_KIND_FUNC 12 /* Function */ + #define BTF_KIND_FUNC_PROTO 13 /* Function Proto */ + +Note that the type section encodes debug info, not just pure types. +``BTF_KIND_FUNC`` is not a type, and it represents a defined subprogram. + +Each type contains the following common data:: + + struct btf_type { + __u32 name_off; + /* "info" bits arrangement + * bits 0-15: vlen (e.g. # of struct's members) + * bits 16-23: unused + * bits 24-27: kind (e.g. int, ptr, array...etc) + * bits 28-30: unused + * bit 31: kind_flag, currently used by + * struct, union and fwd + */ + __u32 info; + /* "size" is used by INT, ENUM, STRUCT and UNION. + * "size" tells the size of the type it is describing. + * + * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT, + * FUNC and FUNC_PROTO. + * "type" is a type_id referring to another type. + */ + union { + __u32 size; + __u32 type; + }; + }; + +For certain kinds, the common data are followed by kind-specific data. The +``name_off`` in ``struct btf_type`` specifies the offset in the string table. +The following sections detail encoding of each kind. + +2.2.1 BTF_KIND_INT +~~~~~~~~~~~~~~~~~~ + +``struct btf_type`` encoding requirement: + * ``name_off``: any valid offset + * ``info.kind_flag``: 0 + * ``info.kind``: BTF_KIND_INT + * ``info.vlen``: 0 + * ``size``: the size of the int type in bytes. + +``btf_type`` is followed by a ``u32`` with the following bits arrangement:: + + #define BTF_INT_ENCODING(VAL) (((VAL) & 0x0f000000) >> 24) + #define BTF_INT_OFFSET(VAL) (((VAL & 0x00ff0000)) >> 16) + #define BTF_INT_BITS(VAL) ((VAL) & 0x000000ff) + +The ``BTF_INT_ENCODING`` has the following attributes:: + + #define BTF_INT_SIGNED (1 << 0) + #define BTF_INT_CHAR (1 << 1) + #define BTF_INT_BOOL (1 << 2) + +The ``BTF_INT_ENCODING()`` provides extra information: signedness, char, or +bool, for the int type. The char and bool encoding are mostly useful for +pretty print. At most one encoding can be specified for the int type. + +The ``BTF_INT_BITS()`` specifies the number of actual bits held by this int +type. For example, a 4-bit bitfield encodes ``BTF_INT_BITS()`` equals to 4. +The ``btf_type.size * 8`` must be equal to or greater than ``BTF_INT_BITS()`` +for the type. The maximum value of ``BTF_INT_BITS()`` is 128. + +The ``BTF_INT_OFFSET()`` specifies the starting bit offset to calculate values +for this int. For example, a bitfield struct member has: * btf member bit +offset 100 from the start of the structure, * btf member pointing to an int +type, * the int type has ``BTF_INT_OFFSET() = 2`` and ``BTF_INT_BITS() = 4`` + +Then in the struct memory layout, this member will occupy ``4`` bits starting +from bits ``100 + 2 = 102``. + +Alternatively, the bitfield struct member can be the following to access the +same bits as the above: + + * btf member bit offset 102, + * btf member pointing to an int type, + * the int type has ``BTF_INT_OFFSET() = 0`` and ``BTF_INT_BITS() = 4`` + +The original intention of ``BTF_INT_OFFSET()`` is to provide flexibility of +bitfield encoding. Currently, both llvm and pahole generate +``BTF_INT_OFFSET() = 0`` for all int types. + +2.2.2 BTF_KIND_PTR +~~~~~~~~~~~~~~~~~~ + +``struct btf_type`` encoding requirement: + * ``name_off``: 0 + * ``info.kind_flag``: 0 + * ``info.kind``: BTF_KIND_PTR + * ``info.vlen``: 0 + * ``type``: the pointee type of the pointer + +No additional type data follow ``btf_type``. + +2.2.3 BTF_KIND_ARRAY +~~~~~~~~~~~~~~~~~~~~ + +``struct btf_type`` encoding requirement: + * ``name_off``: 0 + * ``info.kind_flag``: 0 + * ``info.kind``: BTF_KIND_ARRAY + * ``info.vlen``: 0 + * ``size/type``: 0, not used + +``btf_type`` is followed by one ``struct btf_array``:: + + struct btf_array { + __u32 type; + __u32 index_type; + __u32 nelems; + }; + +The ``struct btf_array`` encoding: + * ``type``: the element type + * ``index_type``: the index type + * ``nelems``: the number of elements for this array (``0`` is also allowed). + +The ``index_type`` can be any regular int type (``u8``, ``u16``, ``u32``, +``u64``, ``unsigned __int128``). The original design of including +``index_type`` follows DWARF, which has an ``index_type`` for its array type. +Currently in BTF, beyond type verification, the ``index_type`` is not used. + +The ``struct btf_array`` allows chaining through element type to represent +multidimensional arrays. For example, for ``int a[5][6]``, the following type +information illustrates the chaining: + + * [1]: int + * [2]: array, ``btf_array.type = [1]``, ``btf_array.nelems = 6`` + * [3]: array, ``btf_array.type = [2]``, ``btf_array.nelems = 5`` + +Currently, both pahole and llvm collapse multidimensional array into +one-dimensional array, e.g., for ``a[5][6]``, the ``btf_array.nelems`` is +equal to ``30``. This is because the original use case is map pretty print +where the whole array is dumped out so one-dimensional array is enough. As +more BTF usage is explored, pahole and llvm can be changed to generate proper +chained representation for multidimensional arrays. + +2.2.4 BTF_KIND_STRUCT +~~~~~~~~~~~~~~~~~~~~~ +2.2.5 BTF_KIND_UNION +~~~~~~~~~~~~~~~~~~~~ + +``struct btf_type`` encoding requirement: + * ``name_off``: 0 or offset to a valid C identifier + * ``info.kind_flag``: 0 or 1 + * ``info.kind``: BTF_KIND_STRUCT or BTF_KIND_UNION + * ``info.vlen``: the number of struct/union members + * ``info.size``: the size of the struct/union in bytes + +``btf_type`` is followed by ``info.vlen`` number of ``struct btf_member``.:: + + struct btf_member { + __u32 name_off; + __u32 type; + __u32 offset; + }; + +``struct btf_member`` encoding: + * ``name_off``: offset to a valid C identifier + * ``type``: the member type + * ``offset``: + +If the type info ``kind_flag`` is not set, the offset contains only bit offset +of the member. Note that the base type of the bitfield can only be int or enum +type. If the bitfield size is 32, the base type can be either int or enum +type. If the bitfield size is not 32, the base type must be int, and int type +``BTF_INT_BITS()`` encodes the bitfield size. + +If the ``kind_flag`` is set, the ``btf_member.offset`` contains both member +bitfield size and bit offset. The bitfield size and bit offset are calculated +as below.:: + + #define BTF_MEMBER_BITFIELD_SIZE(val) ((val) >> 24) + #define BTF_MEMBER_BIT_OFFSET(val) ((val) & 0xffffff) + +In this case, if the base type is an int type, it must be a regular int type: + + * ``BTF_INT_OFFSET()`` must be 0. + * ``BTF_INT_BITS()`` must be equal to ``{1,2,4,8,16} * 8``. + +The following kernel patch introduced ``kind_flag`` and explained why both +modes exist: + + https://github.com/torvalds/linux/commit/9d5f9f701b1891466fb3dbb1806ad97716f95cc3#diff-fa650a64fdd3968396883d2fe8215ff3 + +2.2.6 BTF_KIND_ENUM +~~~~~~~~~~~~~~~~~~~ + +``struct btf_type`` encoding requirement: + * ``name_off``: 0 or offset to a valid C identifier + * ``info.kind_flag``: 0 + * ``info.kind``: BTF_KIND_ENUM + * ``info.vlen``: number of enum values + * ``size``: 4 + +``btf_type`` is followed by ``info.vlen`` number of ``struct btf_enum``.:: + + struct btf_enum { + __u32 name_off; + __s32 val; + }; + +The ``btf_enum`` encoding: + * ``name_off``: offset to a valid C identifier + * ``val``: any value + +2.2.7 BTF_KIND_FWD +~~~~~~~~~~~~~~~~~~ + +``struct btf_type`` encoding requirement: + * ``name_off``: offset to a valid C identifier + * ``info.kind_flag``: 0 for struct, 1 for union + * ``info.kind``: BTF_KIND_FWD + * ``info.vlen``: 0 + * ``type``: 0 + +No additional type data follow ``btf_type``. + +2.2.8 BTF_KIND_TYPEDEF +~~~~~~~~~~~~~~~~~~~~~~ + +``struct btf_type`` encoding requirement: + * ``name_off``: offset to a valid C identifier + * ``info.kind_flag``: 0 + * ``info.kind``: BTF_KIND_TYPEDEF + * ``info.vlen``: 0 + * ``type``: the type which can be referred by name at ``name_off`` + +No additional type data follow ``btf_type``. + +2.2.9 BTF_KIND_VOLATILE +~~~~~~~~~~~~~~~~~~~~~~~ + +``struct btf_type`` encoding requirement: + * ``name_off``: 0 + * ``info.kind_flag``: 0 + * ``info.kind``: BTF_KIND_VOLATILE + * ``info.vlen``: 0 + * ``type``: the type with ``volatile`` qualifier + +No additional type data follow ``btf_type``. + +2.2.10 BTF_KIND_CONST +~~~~~~~~~~~~~~~~~~~~~ + +``struct btf_type`` encoding requirement: + * ``name_off``: 0 + * ``info.kind_flag``: 0 + * ``info.kind``: BTF_KIND_CONST + * ``info.vlen``: 0 + * ``type``: the type with ``const`` qualifier + +No additional type data follow ``btf_type``. + +2.2.11 BTF_KIND_RESTRICT +~~~~~~~~~~~~~~~~~~~~~~~~ + +``struct btf_type`` encoding requirement: + * ``name_off``: 0 + * ``info.kind_flag``: 0 + * ``info.kind``: BTF_KIND_RESTRICT + * ``info.vlen``: 0 + * ``type``: the type with ``restrict`` qualifier + +No additional type data follow ``btf_type``. + +2.2.12 BTF_KIND_FUNC +~~~~~~~~~~~~~~~~~~~~ + +``struct btf_type`` encoding requirement: + * ``name_off``: offset to a valid C identifier + * ``info.kind_flag``: 0 + * ``info.kind``: BTF_KIND_FUNC + * ``info.vlen``: 0 + * ``type``: a BTF_KIND_FUNC_PROTO type + +No additional type data follow ``btf_type``. + +A BTF_KIND_FUNC defines not a type, but a subprogram (function) whose +signature is defined by ``type``. The subprogram is thus an instance of that +type. The BTF_KIND_FUNC may in turn be referenced by a func_info in the +:ref:`BTF_Ext_Section` (ELF) or in the arguments to :ref:`BPF_Prog_Load` +(ABI). + +2.2.13 BTF_KIND_FUNC_PROTO +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +``struct btf_type`` encoding requirement: + * ``name_off``: 0 + * ``info.kind_flag``: 0 + * ``info.kind``: BTF_KIND_FUNC_PROTO + * ``info.vlen``: # of parameters + * ``type``: the return type + +``btf_type`` is followed by ``info.vlen`` number of ``struct btf_param``.:: + + struct btf_param { + __u32 name_off; + __u32 type; + }; + +If a BTF_KIND_FUNC_PROTO type is referred by a BTF_KIND_FUNC type, then +``btf_param.name_off`` must point to a valid C identifier except for the +possible last argument representing the variable argument. The btf_param.type +refers to parameter type. + +If the function has variable arguments, the last parameter is encoded with +``name_off = 0`` and ``type = 0``. + +3. BTF Kernel API +***************** + +The following bpf syscall command involves BTF: + * BPF_BTF_LOAD: load a blob of BTF data into kernel + * BPF_MAP_CREATE: map creation with btf key and value type info. + * BPF_PROG_LOAD: prog load with btf function and line info. + * BPF_BTF_GET_FD_BY_ID: get a btf fd + * BPF_OBJ_GET_INFO_BY_FD: btf, func_info, line_info + and other btf related info are returned. + +The workflow typically looks like: +:: + + Application: + BPF_BTF_LOAD + | + v + BPF_MAP_CREATE and BPF_PROG_LOAD + | + V + ...... + + Introspection tool: + ...... + BPF_{PROG,MAP}_GET_NEXT_ID (get prog/map id's) + | + V + BPF_{PROG,MAP}_GET_FD_BY_ID (get a prog/map fd) + | + V + BPF_OBJ_GET_INFO_BY_FD (get bpf_prog_info/bpf_map_info with btf_id) + | | + V | + BPF_BTF_GET_FD_BY_ID (get btf_fd) | + | | + V | + BPF_OBJ_GET_INFO_BY_FD (get btf) | + | | + V V + pretty print types, dump func signatures and line info, etc. + + +3.1 BPF_BTF_LOAD +================ + +Load a blob of BTF data into kernel. A blob of data, described in +:ref:`BTF_Type_String`, can be directly loaded into the kernel. A ``btf_fd`` +is returned to a userspace. + +3.2 BPF_MAP_CREATE +================== + +A map can be created with ``btf_fd`` and specified key/value type id.:: + + __u32 btf_fd; /* fd pointing to a BTF type data */ + __u32 btf_key_type_id; /* BTF type_id of the key */ + __u32 btf_value_type_id; /* BTF type_id of the value */ + +In libbpf, the map can be defined with extra annotation like below: +:: + + struct bpf_map_def SEC("maps") btf_map = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(int), + .value_size = sizeof(struct ipv_counts), + .max_entries = 4, + }; + BPF_ANNOTATE_KV_PAIR(btf_map, int, struct ipv_counts); + +Here, the parameters for macro BPF_ANNOTATE_KV_PAIR are map name, key and +value types for the map. During ELF parsing, libbpf is able to extract +key/value type_id's and assign them to BPF_MAP_CREATE attributes +automatically. + +.. _BPF_Prog_Load: + +3.3 BPF_PROG_LOAD +================= + +During prog_load, func_info and line_info can be passed to kernel with proper +values for the following attributes: +:: + + __u32 insn_cnt; + __aligned_u64 insns; + ...... + __u32 prog_btf_fd; /* fd pointing to BTF type data */ + __u32 func_info_rec_size; /* userspace bpf_func_info size */ + __aligned_u64 func_info; /* func info */ + __u32 func_info_cnt; /* number of bpf_func_info records */ + __u32 line_info_rec_size; /* userspace bpf_line_info size */ + __aligned_u64 line_info; /* line info */ + __u32 line_info_cnt; /* number of bpf_line_info records */ + +The func_info and line_info are an array of below, respectively.:: + + struct bpf_func_info { + __u32 insn_off; /* [0, insn_cnt - 1] */ + __u32 type_id; /* pointing to a BTF_KIND_FUNC type */ + }; + struct bpf_line_info { + __u32 insn_off; /* [0, insn_cnt - 1] */ + __u32 file_name_off; /* offset to string table for the filename */ + __u32 line_off; /* offset to string table for the source line */ + __u32 line_col; /* line number and column number */ + }; + +func_info_rec_size is the size of each func_info record, and +line_info_rec_size is the size of each line_info record. Passing the record +size to kernel make it possible to extend the record itself in the future. + +Below are requirements for func_info: + * func_info[0].insn_off must be 0. + * the func_info insn_off is in strictly increasing order and matches + bpf func boundaries. + +Below are requirements for line_info: + * the first insn in each func must have a line_info record pointing to it. + * the line_info insn_off is in strictly increasing order. + +For line_info, the line number and column number are defined as below: +:: + + #define BPF_LINE_INFO_LINE_NUM(line_col) ((line_col) >> 10) + #define BPF_LINE_INFO_LINE_COL(line_col) ((line_col) & 0x3ff) + +3.4 BPF_{PROG,MAP}_GET_NEXT_ID + +In kernel, every loaded program, map or btf has a unique id. The id won't +change during the lifetime of a program, map, or btf. + +The bpf syscall command BPF_{PROG,MAP}_GET_NEXT_ID returns all id's, one for +each command, to user space, for bpf program or maps, respectively, so an +inspection tool can inspect all programs and maps. + +3.5 BPF_{PROG,MAP}_GET_FD_BY_ID + +An introspection tool cannot use id to get details about program or maps. +A file descriptor needs to be obtained first for reference-counting purpose. + +3.6 BPF_OBJ_GET_INFO_BY_FD +========================== + +Once a program/map fd is acquired, an introspection tool can get the detailed +information from kernel about this fd, some of which are BTF-related. For +example, ``bpf_map_info`` returns ``btf_id`` and key/value type ids. +``bpf_prog_info`` returns ``btf_id``, func_info, and line info for translated +bpf byte codes, and jited_line_info. + +3.7 BPF_BTF_GET_FD_BY_ID +======================== + +With ``btf_id`` obtained in ``bpf_map_info`` and ``bpf_prog_info``, bpf +syscall command BPF_BTF_GET_FD_BY_ID can retrieve a btf fd. Then, with +command BPF_OBJ_GET_INFO_BY_FD, the btf blob, originally loaded into the +kernel with BPF_BTF_LOAD, can be retrieved. + +With the btf blob, ``bpf_map_info``, and ``bpf_prog_info``, an introspection +tool has full btf knowledge and is able to pretty print map key/values, dump +func signatures and line info, along with byte/jit codes. + +4. ELF File Format Interface +**************************** + +4.1 .BTF section +================ + +The .BTF section contains type and string data. The format of this section is +same as the one describe in :ref:`BTF_Type_String`. + +.. _BTF_Ext_Section: + +4.2 .BTF.ext section +==================== + +The .BTF.ext section encodes func_info and line_info which needs loader +manipulation before loading into the kernel. + +The specification for .BTF.ext section is defined at ``tools/lib/bpf/btf.h`` +and ``tools/lib/bpf/btf.c``. + +The current header of .BTF.ext section:: + + struct btf_ext_header { + __u16 magic; + __u8 version; + __u8 flags; + __u32 hdr_len; + + /* All offsets are in bytes relative to the end of this header */ + __u32 func_info_off; + __u32 func_info_len; + __u32 line_info_off; + __u32 line_info_len; + }; + +It is very similar to .BTF section. Instead of type/string section, it +contains func_info and line_info section. See :ref:`BPF_Prog_Load` for details +about func_info and line_info record format. + +The func_info is organized as below.:: + + func_info_rec_size + btf_ext_info_sec for section #1 /* func_info for section #1 */ + btf_ext_info_sec for section #2 /* func_info for section #2 */ + ... + +``func_info_rec_size`` specifies the size of ``bpf_func_info`` structure when +.BTF.ext is generated. ``btf_ext_info_sec``, defined below, is a collection of +func_info for each specific ELF section.:: + + struct btf_ext_info_sec { + __u32 sec_name_off; /* offset to section name */ + __u32 num_info; + /* Followed by num_info * record_size number of bytes */ + __u8 data[0]; + }; + +Here, num_info must be greater than 0. + +The line_info is organized as below.:: + + line_info_rec_size + btf_ext_info_sec for section #1 /* line_info for section #1 */ + btf_ext_info_sec for section #2 /* line_info for section #2 */ + ... + +``line_info_rec_size`` specifies the size of ``bpf_line_info`` structure when +.BTF.ext is generated. + +The interpretation of ``bpf_func_info->insn_off`` and +``bpf_line_info->insn_off`` is different between kernel API and ELF API. For +kernel API, the ``insn_off`` is the instruction offset in the unit of ``struct +bpf_insn``. For ELF API, the ``insn_off`` is the byte offset from the +beginning of section (``btf_ext_info_sec->sec_name_off``). + +5. Using BTF +************ + +5.1 bpftool map pretty print +============================ + +With BTF, the map key/value can be printed based on fields rather than simply +raw bytes. This is especially valuable for large structure or if your data +structure has bitfields. For example, for the following map,:: + + enum A { A1, A2, A3, A4, A5 }; + typedef enum A ___A; + struct tmp_t { + char a1:4; + int a2:4; + int :4; + __u32 a3:4; + int b; + ___A b1:4; + enum A b2:4; + }; + struct bpf_map_def SEC("maps") tmpmap = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(__u32), + .value_size = sizeof(struct tmp_t), + .max_entries = 1, + }; + BPF_ANNOTATE_KV_PAIR(tmpmap, int, struct tmp_t); + +bpftool is able to pretty print like below: +:: + + [{ + "key": 0, + "value": { + "a1": 0x2, + "a2": 0x4, + "a3": 0x6, + "b": 7, + "b1": 0x8, + "b2": 0xa + } + } + ] + +5.2 bpftool prog dump +===================== + +The following is an example showing how func_info and line_info can help prog +dump with better kernel symbol names, function prototypes and line +information.:: + + $ bpftool prog dump jited pinned /sys/fs/bpf/test_btf_haskv + [...] + int test_long_fname_2(struct dummy_tracepoint_args * arg): + bpf_prog_44a040bf25481309_test_long_fname_2: + ; static int test_long_fname_2(struct dummy_tracepoint_args *arg) + 0: push %rbp + 1: mov %rsp,%rbp + 4: sub $0x30,%rsp + b: sub $0x28,%rbp + f: mov %rbx,0x0(%rbp) + 13: mov %r13,0x8(%rbp) + 17: mov %r14,0x10(%rbp) + 1b: mov %r15,0x18(%rbp) + 1f: xor %eax,%eax + 21: mov %rax,0x20(%rbp) + 25: xor %esi,%esi + ; int key = 0; + 27: mov %esi,-0x4(%rbp) + ; if (!arg->sock) + 2a: mov 0x8(%rdi),%rdi + ; if (!arg->sock) + 2e: cmp $0x0,%rdi + 32: je 0x0000000000000070 + 34: mov %rbp,%rsi + ; counts = bpf_map_lookup_elem(&btf_map, &key); + [...] + +5.3 Verifier Log +================ + +The following is an example of how line_info can help debugging verification +failure.:: + + /* The code at tools/testing/selftests/bpf/test_xdp_noinline.c + * is modified as below. + */ + data = (void *)(long)xdp->data; + data_end = (void *)(long)xdp->data_end; + /* + if (data + 4 > data_end) + return XDP_DROP; + */ + *(u32 *)data = dst->dst; + + $ bpftool prog load ./test_xdp_noinline.o /sys/fs/bpf/test_xdp_noinline type xdp + ; data = (void *)(long)xdp->data; + 224: (79) r2 = *(u64 *)(r10 -112) + 225: (61) r2 = *(u32 *)(r2 +0) + ; *(u32 *)data = dst->dst; + 226: (63) *(u32 *)(r2 +0) = r1 + invalid access to packet, off=0 size=4, R2(id=0,off=0,r=0) + R2 offset is outside of the packet + +6. BTF Generation +***************** + +You need latest pahole + + https://git.kernel.org/pub/scm/devel/pahole/pahole.git/ + +or llvm (8.0 or later). The pahole acts as a dwarf2btf converter. It doesn't +support .BTF.ext and btf BTF_KIND_FUNC type yet. For example,:: + + -bash-4.4$ cat t.c + struct t { + int a:2; + int b:3; + int c:2; + } g; + -bash-4.4$ gcc -c -O2 -g t.c + -bash-4.4$ pahole -JV t.o + File t.o: + [1] STRUCT t kind_flag=1 size=4 vlen=3 + a type_id=2 bitfield_size=2 bits_offset=0 + b type_id=2 bitfield_size=3 bits_offset=2 + c type_id=2 bitfield_size=2 bits_offset=5 + [2] INT int size=4 bit_offset=0 nr_bits=32 encoding=SIGNED + +The llvm is able to generate .BTF and .BTF.ext directly with -g for bpf target +only. The assembly code (-S) is able to show the BTF encoding in assembly +format.:: + + -bash-4.4$ cat t2.c + typedef int __int32; + struct t2 { + int a2; + int (*f2)(char q1, __int32 q2, ...); + int (*f3)(); + } g2; + int main() { return 0; } + int test() { return 0; } + -bash-4.4$ clang -c -g -O2 -target bpf t2.c + -bash-4.4$ readelf -S t2.o + ...... + [ 8] .BTF PROGBITS 0000000000000000 00000247 + 000000000000016e 0000000000000000 0 0 1 + [ 9] .BTF.ext PROGBITS 0000000000000000 000003b5 + 0000000000000060 0000000000000000 0 0 1 + [10] .rel.BTF.ext REL 0000000000000000 000007e0 + 0000000000000040 0000000000000010 16 9 8 + ...... + -bash-4.4$ clang -S -g -O2 -target bpf t2.c + -bash-4.4$ cat t2.s + ...... + .section .BTF,"",@progbits + .short 60319 # 0xeb9f + .byte 1 + .byte 0 + .long 24 + .long 0 + .long 220 + .long 220 + .long 122 + .long 0 # BTF_KIND_FUNC_PROTO(id = 1) + .long 218103808 # 0xd000000 + .long 2 + .long 83 # BTF_KIND_INT(id = 2) + .long 16777216 # 0x1000000 + .long 4 + .long 16777248 # 0x1000020 + ...... + .byte 0 # string offset=0 + .ascii ".text" # string offset=1 + .byte 0 + .ascii "/home/yhs/tmp-pahole/t2.c" # string offset=7 + .byte 0 + .ascii "int main() { return 0; }" # string offset=33 + .byte 0 + .ascii "int test() { return 0; }" # string offset=58 + .byte 0 + .ascii "int" # string offset=83 + ...... + .section .BTF.ext,"",@progbits + .short 60319 # 0xeb9f + .byte 1 + .byte 0 + .long 24 + .long 0 + .long 28 + .long 28 + .long 44 + .long 8 # FuncInfo + .long 1 # FuncInfo section string offset=1 + .long 2 + .long .Lfunc_begin0 + .long 3 + .long .Lfunc_begin1 + .long 5 + .long 16 # LineInfo + .long 1 # LineInfo section string offset=1 + .long 2 + .long .Ltmp0 + .long 7 + .long 33 + .long 7182 # Line 7 Col 14 + .long .Ltmp3 + .long 7 + .long 58 + .long 8206 # Line 8 Col 14 + +7. Testing +********** + +Kernel bpf selftest `test_btf.c` provides extensive set of BTF-related tests. diff --git a/Documentation/bpf/index.rst b/Documentation/bpf/index.rst index 00a8450a602f..4e77932959cc 100644 --- a/Documentation/bpf/index.rst +++ b/Documentation/bpf/index.rst @@ -15,6 +15,13 @@ that goes into great technical depth about the BPF Architecture. The primary info for the bpf syscall is available in the `man-pages`_ for `bpf(2)`_. +BPF Type Format (BTF) +===================== + +.. toctree:: + :maxdepth: 1 + + btf Frequently asked questions (FAQ) diff --git a/Documentation/cgroup-v1/memcg_test.txt b/Documentation/cgroup-v1/memcg_test.txt index 5c7f310f32bb..621e29ffb358 100644 --- a/Documentation/cgroup-v1/memcg_test.txt +++ b/Documentation/cgroup-v1/memcg_test.txt @@ -107,9 +107,9 @@ Under below explanation, we assume CONFIG_MEM_RES_CTRL_SWAP=y. 8. LRU Each memcg has its own private LRU. Now, its handling is under global - VM's control (means that it's handled under global zone_lru_lock). + VM's control (means that it's handled under global pgdat->lru_lock). Almost all routines around memcg's LRU is called by global LRU's - list management functions under zone_lru_lock(). + list management functions under pgdat->lru_lock. A special function is mem_cgroup_isolate_pages(). This scans memcg's private LRU and call __isolate_lru_page() to extract a page diff --git a/Documentation/cgroup-v1/memory.txt b/Documentation/cgroup-v1/memory.txt index 3682e99234c2..a33cedf85427 100644 --- a/Documentation/cgroup-v1/memory.txt +++ b/Documentation/cgroup-v1/memory.txt @@ -70,7 +70,7 @@ Brief summary of control files. memory.soft_limit_in_bytes # set/show soft limit of memory usage memory.stat # show various statistics memory.use_hierarchy # set/show hierarchical account enabled - memory.force_empty # trigger forced move charge to parent + memory.force_empty # trigger forced page reclaim memory.pressure_level # set memory pressure notifications memory.swappiness # set/show swappiness parameter of vmscan (See sysctl's vm.swappiness) @@ -267,11 +267,11 @@ When oom event notifier is registered, event will be delivered. Other lock order is following: PG_locked. mm->page_table_lock - zone_lru_lock + pgdat->lru_lock lock_page_cgroup. In many cases, just lock_page_cgroup() is called. per-zone-per-cgroup LRU (cgroup's private LRU) is just guarded by - zone_lru_lock, it has no lock of its own. + pgdat->lru_lock, it has no lock of its own. 2.7 Kernel Memory Extension (CONFIG_MEMCG_KMEM) @@ -459,8 +459,9 @@ About use_hierarchy, see Section 6. the cgroup will be reclaimed and as many pages reclaimed as possible. The typical use case for this interface is before calling rmdir(). - Because rmdir() moves all pages to parent, some out-of-use page caches can be - moved to the parent. If you want to avoid that, force_empty will be useful. + Though rmdir() offlines memcg, but the memcg may still stay there due to + charged file caches. Some out-of-use page caches may keep charged until + memory pressure happens. If you want to avoid that, force_empty will be useful. Also, note that when memory.kmem.limit_in_bytes is set the charges due to kernel pages will still be seen. This is not considered a failure and the diff --git a/Documentation/cgroup-v1/pids.txt b/Documentation/cgroup-v1/pids.txt index 1a078b5d281a..e105d708ccde 100644 --- a/Documentation/cgroup-v1/pids.txt +++ b/Documentation/cgroup-v1/pids.txt @@ -33,6 +33,9 @@ limit in the hierarchy is followed). pids.current tracks all child cgroup hierarchies, so parent/pids.current is a superset of parent/child/pids.current. +The pids.events file contains event counters: + - max: Number of times fork failed because limit was hit. + Example ------- diff --git a/Documentation/core-api/flexible-arrays.rst b/Documentation/core-api/flexible-arrays.rst deleted file mode 100644 index b6b85a1b518e..000000000000 --- a/Documentation/core-api/flexible-arrays.rst +++ /dev/null @@ -1,130 +0,0 @@ - -=================================== -Using flexible arrays in the kernel -=================================== - -Large contiguous memory allocations can be unreliable in the Linux kernel. -Kernel programmers will sometimes respond to this problem by allocating -pages with :c:func:`vmalloc()`. This solution not ideal, though. On 32-bit -systems, memory from vmalloc() must be mapped into a relatively small address -space; it's easy to run out. On SMP systems, the page table changes required -by vmalloc() allocations can require expensive cross-processor interrupts on -all CPUs. And, on all systems, use of space in the vmalloc() range increases -pressure on the translation lookaside buffer (TLB), reducing the performance -of the system. - -In many cases, the need for memory from vmalloc() can be eliminated by piecing -together an array from smaller parts; the flexible array library exists to make -this task easier. - -A flexible array holds an arbitrary (within limits) number of fixed-sized -objects, accessed via an integer index. Sparse arrays are handled -reasonably well. Only single-page allocations are made, so memory -allocation failures should be relatively rare. The down sides are that the -arrays cannot be indexed directly, individual object size cannot exceed the -system page size, and putting data into a flexible array requires a copy -operation. It's also worth noting that flexible arrays do no internal -locking at all; if concurrent access to an array is possible, then the -caller must arrange for appropriate mutual exclusion. - -The creation of a flexible array is done with :c:func:`flex_array_alloc()`:: - - #include - - struct flex_array *flex_array_alloc(int element_size, - unsigned int total, - gfp_t flags); - -The individual object size is provided by ``element_size``, while total is the -maximum number of objects which can be stored in the array. The flags -argument is passed directly to the internal memory allocation calls. With -the current code, using flags to ask for high memory is likely to lead to -notably unpleasant side effects. - -It is also possible to define flexible arrays at compile time with:: - - DEFINE_FLEX_ARRAY(name, element_size, total); - -This macro will result in a definition of an array with the given name; the -element size and total will be checked for validity at compile time. - -Storing data into a flexible array is accomplished with a call to -:c:func:`flex_array_put()`:: - - int flex_array_put(struct flex_array *array, unsigned int element_nr, - void *src, gfp_t flags); - -This call will copy the data from src into the array, in the position -indicated by ``element_nr`` (which must be less than the maximum specified when -the array was created). If any memory allocations must be performed, flags -will be used. The return value is zero on success, a negative error code -otherwise. - -There might possibly be a need to store data into a flexible array while -running in some sort of atomic context; in this situation, sleeping in the -memory allocator would be a bad thing. That can be avoided by using -``GFP_ATOMIC`` for the flags value, but, often, there is a better way. The -trick is to ensure that any needed memory allocations are done before -entering atomic context, using :c:func:`flex_array_prealloc()`:: - - int flex_array_prealloc(struct flex_array *array, unsigned int start, - unsigned int nr_elements, gfp_t flags); - -This function will ensure that memory for the elements indexed in the range -defined by ``start`` and ``nr_elements`` has been allocated. Thereafter, a -``flex_array_put()`` call on an element in that range is guaranteed not to -block. - -Getting data back out of the array is done with :c:func:`flex_array_get()`:: - - void *flex_array_get(struct flex_array *fa, unsigned int element_nr); - -The return value is a pointer to the data element, or NULL if that -particular element has never been allocated. - -Note that it is possible to get back a valid pointer for an element which -has never been stored in the array. Memory for array elements is allocated -one page at a time; a single allocation could provide memory for several -adjacent elements. Flexible array elements are normally initialized to the -value ``FLEX_ARRAY_FREE`` (defined as 0x6c in ), so errors -involving that number probably result from use of unstored array entries. -Note that, if array elements are allocated with ``__GFP_ZERO``, they will be -initialized to zero and this poisoning will not happen. - -Individual elements in the array can be cleared with -:c:func:`flex_array_clear()`:: - - int flex_array_clear(struct flex_array *array, unsigned int element_nr); - -This function will set the given element to ``FLEX_ARRAY_FREE`` and return -zero. If storage for the indicated element is not allocated for the array, -``flex_array_clear()`` will return ``-EINVAL`` instead. Note that clearing an -element does not release the storage associated with it; to reduce the -allocated size of an array, call :c:func:`flex_array_shrink()`:: - - int flex_array_shrink(struct flex_array *array); - -The return value will be the number of pages of memory actually freed. -This function works by scanning the array for pages containing nothing but -``FLEX_ARRAY_FREE`` bytes, so (1) it can be expensive, and (2) it will not work -if the array's pages are allocated with ``__GFP_ZERO``. - -It is possible to remove all elements of an array with a call to -:c:func:`flex_array_free_parts()`:: - - void flex_array_free_parts(struct flex_array *array); - -This call frees all elements, but leaves the array itself in place. -Freeing the entire array is done with :c:func:`flex_array_free()`:: - - void flex_array_free(struct flex_array *array); - -As of this writing, there are no users of flexible arrays in the mainline -kernel. The functions described here are also not exported to modules; -that will probably be fixed when somebody comes up with a need for it. - - -Flexible array functions ------------------------- - -.. kernel-doc:: include/linux/flex_array.h diff --git a/Documentation/core-api/generic-radix-tree.rst b/Documentation/core-api/generic-radix-tree.rst new file mode 100644 index 000000000000..ed42839ae42f --- /dev/null +++ b/Documentation/core-api/generic-radix-tree.rst @@ -0,0 +1,12 @@ +================================= +Generic radix trees/sparse arrays +================================= + +.. kernel-doc:: include/linux/generic-radix-tree.h + :doc: Generic radix trees/sparse arrays + +generic radix tree functions +---------------------------- + +.. kernel-doc:: include/linux/generic-radix-tree.h + :functions: diff --git a/Documentation/core-api/index.rst b/Documentation/core-api/index.rst index 3adee82be311..6870baffef82 100644 --- a/Documentation/core-api/index.rst +++ b/Documentation/core-api/index.rst @@ -28,6 +28,7 @@ Core utilities errseq printk-formats circular-buffers + generic-radix-tree memory-allocation mm-api gfp_mask-from-fs-io diff --git a/Documentation/core-api/kernel-api.rst b/Documentation/core-api/kernel-api.rst index cdd24943fbcc..71f5d2fe39b7 100644 --- a/Documentation/core-api/kernel-api.rst +++ b/Documentation/core-api/kernel-api.rst @@ -356,10 +356,6 @@ Read-Copy Update (RCU) .. kernel-doc:: include/linux/rcupdate.h -.. kernel-doc:: include/linux/rcupdate_wait.h - -.. kernel-doc:: include/linux/rcutree.h - .. kernel-doc:: kernel/rcu/tree.c .. kernel-doc:: kernel/rcu/tree_plugin.h diff --git a/Documentation/core-api/memory-allocation.rst b/Documentation/core-api/memory-allocation.rst index 8954a88ff5b7..7744aa3bf2e0 100644 --- a/Documentation/core-api/memory-allocation.rst +++ b/Documentation/core-api/memory-allocation.rst @@ -1,4 +1,4 @@ -.. _memory-allocation: +.. _memory_allocation: ======================= Memory Allocation Guide @@ -113,9 +113,11 @@ see :c:func:`kvmalloc_node` reference documentation. Note that If you need to allocate many identical objects you can use the slab cache allocator. The cache should be set up with -:c:func:`kmem_cache_create` before it can be used. Afterwards -:c:func:`kmem_cache_alloc` and its convenience wrappers can allocate -memory from that cache. +:c:func:`kmem_cache_create` or :c:func:`kmem_cache_create_usercopy` +before it can be used. The second function should be used if a part of +the cache might be copied to the userspace. After the cache is +created :c:func:`kmem_cache_alloc` and its convenience wrappers can +allocate memory from that cache. When the allocated memory is no longer needed it must be freed. You can use :c:func:`kvfree` for the memory allocated with `kmalloc`, diff --git a/Documentation/core-api/mm-api.rst b/Documentation/core-api/mm-api.rst index aa8e54b85221..128e8a721c1e 100644 --- a/Documentation/core-api/mm-api.rst +++ b/Documentation/core-api/mm-api.rst @@ -35,7 +35,7 @@ users will want to use a plain ``GFP_KERNEL``. :doc: Reclaim modifiers .. kernel-doc:: include/linux/gfp.h - :doc: Common combinations + :doc: Useful GFP flag combinations The Slab Cache ============== diff --git a/Documentation/core-api/printk-formats.rst b/Documentation/core-api/printk-formats.rst index a7fae4538946..c37ec7cd9c06 100644 --- a/Documentation/core-api/printk-formats.rst +++ b/Documentation/core-api/printk-formats.rst @@ -13,6 +13,10 @@ Integer types If variable is of Type, use printk format specifier: ------------------------------------------------------------ + char %hhd or %hhx + unsigned char %hhu or %hhx + short int %hd or %hx + unsigned short int %hu or %hx int %d or %x unsigned int %u or %x long %ld or %lx @@ -21,6 +25,10 @@ Integer types unsigned long long %llu or %llx size_t %zu or %zx ssize_t %zd or %zx + s8 %hhd or %hhx + u8 %hhu or %hhx + s16 %hd or %hx + u16 %hu or %hx s32 %d or %x u32 %u or %x s64 %lld or %llx diff --git a/Documentation/core-api/refcount-vs-atomic.rst b/Documentation/core-api/refcount-vs-atomic.rst index 322851bada16..976e85adffe8 100644 --- a/Documentation/core-api/refcount-vs-atomic.rst +++ b/Documentation/core-api/refcount-vs-atomic.rst @@ -54,6 +54,13 @@ must propagate to all other CPUs before the release operation (A-cumulative property). This is implemented using :c:func:`smp_store_release`. +An ACQUIRE memory ordering guarantees that all post loads and +stores (all po-later instructions) on the same CPU are +completed after the acquire operation. It also guarantees that all +po-later stores on the same CPU must propagate to all other CPUs +after the acquire operation executes. This is implemented using +:c:func:`smp_acquire__after_ctrl_dep`. + A control dependency (on success) for refcounters guarantees that if a reference for an object was successfully obtained (reference counter increment or addition happened, function returned true), @@ -119,13 +126,24 @@ Memory ordering guarantees changes: result of obtaining pointer to the object! -case 5) - decrement-based RMW ops that return a value ------------------------------------------------------ +case 5) - generic dec/sub decrement-based RMW ops that return a value +--------------------------------------------------------------------- Function changes: * :c:func:`atomic_dec_and_test` --> :c:func:`refcount_dec_and_test` * :c:func:`atomic_sub_and_test` --> :c:func:`refcount_sub_and_test` + +Memory ordering guarantees changes: + + * fully ordered --> RELEASE ordering + ACQUIRE ordering on success + + +case 6) other decrement-based RMW ops that return a value +--------------------------------------------------------- + +Function changes: + * no atomic counterpart --> :c:func:`refcount_dec_if_one` * ``atomic_add_unless(&var, -1, 1)`` --> ``refcount_dec_not_one(&var)`` @@ -136,7 +154,7 @@ Memory ordering guarantees changes: .. note:: :c:func:`atomic_add_unless` only provides full order on success. -case 6) - lock-based RMW +case 7) - lock-based RMW ------------------------ Function changes: diff --git a/Documentation/core-api/xarray.rst b/Documentation/core-api/xarray.rst index 5d54b27c6eba..ef6f9f98f595 100644 --- a/Documentation/core-api/xarray.rst +++ b/Documentation/core-api/xarray.rst @@ -85,7 +85,7 @@ which was at that index; if it returns the same entry which was passed as If you want to only store a new entry to an index if the current entry at that index is ``NULL``, you can use :c:func:`xa_insert` which -returns ``-EEXIST`` if the entry is not empty. +returns ``-EBUSY`` if the entry is not empty. You can enquire whether a mark is set on an entry by using :c:func:`xa_get_mark`. If the entry is not ``NULL``, you can set a mark @@ -131,17 +131,23 @@ If you use :c:func:`DEFINE_XARRAY_ALLOC` to define the XArray, or initialise it by passing ``XA_FLAGS_ALLOC`` to :c:func:`xa_init_flags`, the XArray changes to track whether entries are in use or not. -You can call :c:func:`xa_alloc` to store the entry at any unused index +You can call :c:func:`xa_alloc` to store the entry at an unused index in the XArray. If you need to modify the array from interrupt context, you can use :c:func:`xa_alloc_bh` or :c:func:`xa_alloc_irq` to disable interrupts while allocating the ID. -Using :c:func:`xa_store`, :c:func:`xa_cmpxchg` or :c:func:`xa_insert` -will mark the entry as being allocated. Unlike a normal XArray, storing +Using :c:func:`xa_store`, :c:func:`xa_cmpxchg` or :c:func:`xa_insert` will +also mark the entry as being allocated. Unlike a normal XArray, storing ``NULL`` will mark the entry as being in use, like :c:func:`xa_reserve`. To free an entry, use :c:func:`xa_erase` (or :c:func:`xa_release` if you only want to free the entry if it's ``NULL``). +By default, the lowest free entry is allocated starting from 0. If you +want to allocate entries starting at 1, it is more efficient to use +:c:func:`DEFINE_XARRAY_ALLOC1` or ``XA_FLAGS_ALLOC1``. If you want to +allocate IDs up to a maximum, then wrap back around to the lowest free +ID, you can use :c:func:`xa_alloc_cyclic`. + You cannot use ``XA_MARK_0`` with an allocating XArray as this mark is used to track whether an entry is free or not. The other marks are available for your use. @@ -209,7 +215,6 @@ Assumes xa_lock held on entry: * :c:func:`__xa_erase` * :c:func:`__xa_cmpxchg` * :c:func:`__xa_alloc` - * :c:func:`__xa_reserve` * :c:func:`__xa_set_mark` * :c:func:`__xa_clear_mark` diff --git a/Documentation/cpuidle/driver.txt b/Documentation/cpuidle/driver.txt deleted file mode 100644 index 1b0d81d92583..000000000000 --- a/Documentation/cpuidle/driver.txt +++ /dev/null @@ -1,37 +0,0 @@ - - - Supporting multiple CPU idle levels in kernel - - cpuidle drivers - - - - -cpuidle driver hooks into the cpuidle infrastructure and handles the -architecture/platform dependent part of CPU idle states. Driver -provides the platform idle state detection capability and also -has mechanisms in place to support actual entry-exit into CPU idle states. - -cpuidle driver initializes the cpuidle_device structure for each CPU device -and registers with cpuidle using cpuidle_register_device. - -If all the idle states are the same, the wrapper function cpuidle_register -could be used instead. - -It can also support the dynamic changes (like battery <-> AC), by using -cpuidle_pause_and_lock, cpuidle_disable_device and cpuidle_enable_device, -cpuidle_resume_and_unlock. - -Interfaces: -extern int cpuidle_register(struct cpuidle_driver *drv, - const struct cpumask *const coupled_cpus); -extern int cpuidle_unregister(struct cpuidle_driver *drv); -extern int cpuidle_register_driver(struct cpuidle_driver *drv); -extern void cpuidle_unregister_driver(struct cpuidle_driver *drv); -extern int cpuidle_register_device(struct cpuidle_device *dev); -extern void cpuidle_unregister_device(struct cpuidle_device *dev); - -extern void cpuidle_pause_and_lock(void); -extern void cpuidle_resume_and_unlock(void); -extern int cpuidle_enable_device(struct cpuidle_device *dev); -extern void cpuidle_disable_device(struct cpuidle_device *dev); diff --git a/Documentation/cpuidle/governor.txt b/Documentation/cpuidle/governor.txt deleted file mode 100644 index d9020f5e847b..000000000000 --- a/Documentation/cpuidle/governor.txt +++ /dev/null @@ -1,28 +0,0 @@ - - - - Supporting multiple CPU idle levels in kernel - - cpuidle governors - - - - -cpuidle governor is policy routine that decides what idle state to enter at -any given time. cpuidle core uses different callbacks to the governor. - -* enable() to enable governor for a particular device -* disable() to disable governor for a particular device -* select() to select an idle state to enter -* reflect() called after returning from the idle state, which can be used - by the governor for some record keeping. - -More than one governor can be registered at the same time and -users can switch between drivers using /sysfs interface (when enabled). -More than one governor part is supported for developers to easily experiment -with different governors. By default, most optimal governor based on your -kernel configuration and platform will be selected by cpuidle. - -Interfaces: -extern int cpuidle_register_governor(struct cpuidle_governor *gov); -struct cpuidle_governor diff --git a/Documentation/dev-tools/kcov.rst b/Documentation/dev-tools/kcov.rst index c2f6452e38ed..42b612677799 100644 --- a/Documentation/dev-tools/kcov.rst +++ b/Documentation/dev-tools/kcov.rst @@ -22,7 +22,7 @@ Configure the kernel with:: CONFIG_KCOV=y -CONFIG_KCOV requires gcc built on revision 231296 or later. +CONFIG_KCOV requires gcc 6.1.0 or later. If the comparison operands need to be collected, set:: diff --git a/Documentation/device-mapper/cache.txt b/Documentation/device-mapper/cache.txt index ff0841711fd5..8ae1cf8e94da 100644 --- a/Documentation/device-mapper/cache.txt +++ b/Documentation/device-mapper/cache.txt @@ -206,6 +206,9 @@ Optional feature arguments are: in a separate btree, which improves speed of shutting down the cache. + no_discard_passdown : disable passing down discards from the cache + to the origin's data device. + A policy called 'default' is always registered. This is an alias for the policy we currently think is giving best all round performance. diff --git a/Documentation/device-mapper/dm-init.txt b/Documentation/device-mapper/dm-init.txt new file mode 100644 index 000000000000..8464ee7c01b8 --- /dev/null +++ b/Documentation/device-mapper/dm-init.txt @@ -0,0 +1,114 @@ +Early creation of mapped devices +==================================== + +It is possible to configure a device-mapper device to act as the root device for +your system in two ways. + +The first is to build an initial ramdisk which boots to a minimal userspace +which configures the device, then pivot_root(8) in to it. + +The second is to create one or more device-mappers using the module parameter +"dm-mod.create=" through the kernel boot command line argument. + +The format is specified as a string of data separated by commas and optionally +semi-colons, where: + - a comma is used to separate fields like name, uuid, flags and table + (specifies one device) + - a semi-colon is used to separate devices. + +So the format will look like this: + + dm-mod.create=,,,,[,
+][;,,,,
[,
+]+] + +Where, + ::= The device name. + ::= xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx | "" + ::= The device minor number | "" + ::= "ro" | "rw" +
::= + ::= "verity" | "linear" | ... (see list below) + +The dm line should be equivalent to the one used by the dmsetup tool with the +--concise argument. + +Target types +============ + +Not all target types are available as there are serious risks in allowing +activation of certain DM targets without first using userspace tools to check +the validity of associated metadata. + + "cache": constrained, userspace should verify cache device + "crypt": allowed + "delay": allowed + "era": constrained, userspace should verify metadata device + "flakey": constrained, meant for test + "linear": allowed + "log-writes": constrained, userspace should verify metadata device + "mirror": constrained, userspace should verify main/mirror device + "raid": constrained, userspace should verify metadata device + "snapshot": constrained, userspace should verify src/dst device + "snapshot-origin": allowed + "snapshot-merge": constrained, userspace should verify src/dst device + "striped": allowed + "switch": constrained, userspace should verify dev path + "thin": constrained, requires dm target message from userspace + "thin-pool": constrained, requires dm target message from userspace + "verity": allowed + "writecache": constrained, userspace should verify cache device + "zero": constrained, not meant for rootfs + +If the target is not listed above, it is constrained by default (not tested). + +Examples +======== +An example of booting to a linear array made up of user-mode linux block +devices: + + dm-mod.create="lroot,,,rw, 0 4096 linear 98:16 0, 4096 4096 linear 98:32 0" root=/dev/dm-0 + +This will boot to a rw dm-linear target of 8192 sectors split across two block +devices identified by their major:minor numbers. After boot, udev will rename +this target to /dev/mapper/lroot (depending on the rules). No uuid was assigned. + +An example of multiple device-mappers, with the dm-mod.create="..." contents is shown here +split on multiple lines for readability: + + vroot,,,ro, + 0 1740800 verity 254:0 254:0 1740800 sha1 + 76e9be054b15884a9fa85973e9cb274c93afadb6 + 5b3549d54d6c7a3837b9b81ed72e49463a64c03680c47835bef94d768e5646fe; + vram,,,rw, + 0 32768 linear 1:0 0, + 32768 32768 linear 1:1 0 + +Other examples (per target): + +"crypt": + dm-crypt,,8,ro, + 0 1048576 crypt aes-xts-plain64 + babebabebabebabebabebabebabebabebabebabebabebabebabebabebabebabe 0 + /dev/sda 0 1 allow_discards + +"delay": + dm-delay,,4,ro,0 409600 delay /dev/sda1 0 500 + +"linear": + dm-linear,,,rw, + 0 32768 linear /dev/sda1 0, + 32768 1024000 linear /dev/sda2 0, + 1056768 204800 linear /dev/sda3 0, + 1261568 512000 linear /dev/sda4 0 + +"snapshot-origin": + dm-snap-orig,,4,ro,0 409600 snapshot-origin 8:2 + +"striped": + dm-striped,,4,ro,0 1638400 striped 4 4096 + /dev/sda1 0 /dev/sda2 0 /dev/sda3 0 /dev/sda4 0 + +"verity": + dm-verity,,4,ro, + 0 1638400 verity 1 8:1 8:2 4096 4096 204800 1 sha256 + fb1a5a0f00deb908d8b53cb270858975e76cf64105d412ce764225d53b8f3cfd + 51934789604d1b92399c52e7cb149d1b3a1b74bbbcb103b2a0aaacbed5c08584 diff --git a/Documentation/devicetree/bindings/Makefile b/Documentation/devicetree/bindings/Makefile index 6e5cef0ed6fb..63b139f9ae28 100644 --- a/Documentation/devicetree/bindings/Makefile +++ b/Documentation/devicetree/bindings/Makefile @@ -15,9 +15,13 @@ DT_TMP_SCHEMA := processed-schema.yaml extra-y += $(DT_TMP_SCHEMA) quiet_cmd_mk_schema = SCHEMA $@ - cmd_mk_schema = $(DT_MK_SCHEMA) $(DT_MK_SCHEMA_FLAGS) -o $@ $(filter-out FORCE, $^) + cmd_mk_schema = $(DT_MK_SCHEMA) $(DT_MK_SCHEMA_FLAGS) -o $@ $(real-prereqs) + +DT_DOCS = $(shell \ + cd $(srctree)/$(src) && \ + find * \( -name '*.yaml' ! -name $(DT_TMP_SCHEMA) \) \ + ) -DT_DOCS = $(shell cd $(srctree)/$(src) && find * -name '*.yaml') DT_SCHEMA_FILES ?= $(addprefix $(src)/,$(DT_DOCS)) extra-y += $(patsubst $(src)/%.yaml,%.example.dts, $(DT_SCHEMA_FILES)) diff --git a/Documentation/devicetree/bindings/arm/amlogic.txt b/Documentation/devicetree/bindings/arm/amlogic.txt index 8dbc259081e4..7f40cb5f490b 100644 --- a/Documentation/devicetree/bindings/arm/amlogic.txt +++ b/Documentation/devicetree/bindings/arm/amlogic.txt @@ -109,6 +109,7 @@ Board compatible values (alphabetically, grouped by SoC): - "amlogic,s400" (Meson axg a113d) - "amlogic,u200" (Meson g12a s905d2) + - "amediatech,x96-max" (Meson g12a s905x2) Amlogic Meson Firmware registers Interface ------------------------------------------ diff --git a/Documentation/devicetree/bindings/arm/armadeus.txt b/Documentation/devicetree/bindings/arm/armadeus.txt deleted file mode 100644 index 9821283ff516..000000000000 --- a/Documentation/devicetree/bindings/arm/armadeus.txt +++ /dev/null @@ -1,6 +0,0 @@ -Armadeus i.MX Platforms Device Tree Bindings ------------------------------------------------ - -APF51: i.MX51 based module. -Required root node properties: - - compatible = "armadeus,imx51-apf51", "fsl,imx51"; diff --git a/Documentation/devicetree/bindings/arm/atmel-sysregs.txt b/Documentation/devicetree/bindings/arm/atmel-sysregs.txt index 14f319f694b7..e61d00e25b95 100644 --- a/Documentation/devicetree/bindings/arm/atmel-sysregs.txt +++ b/Documentation/devicetree/bindings/arm/atmel-sysregs.txt @@ -21,7 +21,8 @@ Its subnodes can be: RSTC Reset Controller required properties: - compatible: Should be "atmel,-rstc". - can be "at91sam9260" or "at91sam9g45" or "sama5d3" + can be "at91sam9260", "at91sam9g45", "sama5d3" or "samx7" + it also can be "microchip,sam9x60-rstc" - reg: Should contain registers location and length - clocks: phandle to input clock. @@ -147,6 +148,7 @@ required properties: - compatible: Should be "atmel,-sfr", "syscon" or "atmel,-sfrbu", "syscon" can be "sama5d3", "sama5d4" or "sama5d2". + It also can be "microchip,sam9x60-sfr", "syscon". - reg: Should contain registers location and length sfr@f0038000 { diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm2835.txt b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm2835.txt index 0dcc3ea5adff..245328f36580 100644 --- a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm2835.txt +++ b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm2835.txt @@ -30,6 +30,10 @@ Raspberry Pi 2 Model B Required root node properties: compatible = "raspberrypi,2-model-b", "brcm,bcm2836"; +Raspberry Pi 3 Model A+ +Required root node properties: +compatible = "raspberrypi,3-model-a-plus", "brcm,bcm2837"; + Raspberry Pi 3 Model B Required root node properties: compatible = "raspberrypi,3-model-b", "brcm,bcm2837"; diff --git a/Documentation/devicetree/bindings/arm/bhf.txt b/Documentation/devicetree/bindings/arm/bhf.txt deleted file mode 100644 index 886b503caf9c..000000000000 --- a/Documentation/devicetree/bindings/arm/bhf.txt +++ /dev/null @@ -1,6 +0,0 @@ -Beckhoff Automation Platforms Device Tree Bindings --------------------------------------------------- - -CX9020 Embedded PC -Required root node properties: - - compatible = "bhf,cx9020", "fsl,imx53"; diff --git a/Documentation/devicetree/bindings/arm/bitmain.yaml b/Documentation/devicetree/bindings/arm/bitmain.yaml new file mode 100644 index 000000000000..0efdb4ac028e --- /dev/null +++ b/Documentation/devicetree/bindings/arm/bitmain.yaml @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/arm/bitmain.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Bitmain platform device tree bindings + +maintainers: + - Manivannan Sadhasivam + +properties: + compatible: + items: + - enum: + - bitmain,sophon-edge + - const: bitmain,bm1880 +... diff --git a/Documentation/devicetree/bindings/arm/compulab-boards.txt b/Documentation/devicetree/bindings/arm/compulab-boards.txt deleted file mode 100644 index 42a10285af9c..000000000000 --- a/Documentation/devicetree/bindings/arm/compulab-boards.txt +++ /dev/null @@ -1,25 +0,0 @@ -CompuLab SB-SOM is a multi-module baseboard capable of carrying: - - CM-T43 - - CM-T54 - - CM-QS600 - - CL-SOM-AM57x - - CL-SOM-iMX7 -modules with minor modifications to the SB-SOM assembly. - -Required root node properties: - - compatible = should be "compulab,sb-som" - -Compulab CL-SOM-iMX7 is a miniature System-on-Module (SoM) based on -Freescale i.MX7 ARM Cortex-A7 System-on-Chip. - -Required root node properties: - - compatible = "compulab,cl-som-imx7", "fsl,imx7d"; - -Compulab SBC-iMX7 is a single board computer based on the -Freescale i.MX7 system-on-chip. SBC-iMX7 is implemented with -the CL-SOM-iMX7 System-on-Module providing most of the functions, -and SB-SOM-iMX7 carrier board providing additional peripheral -functions and connectors. - -Required root node properties: - - compatible = "compulab,sbc-imx7", "compulab,cl-som-imx7", "fsl,imx7d"; diff --git a/Documentation/devicetree/bindings/arm/cpus.yaml b/Documentation/devicetree/bindings/arm/cpus.yaml index 298c17b327c6..365dcf384d73 100644 --- a/Documentation/devicetree/bindings/arm/cpus.yaml +++ b/Documentation/devicetree/bindings/arm/cpus.yaml @@ -228,6 +228,7 @@ patternProperties: - renesas,r9a06g032-smp - rockchip,rk3036-smp - rockchip,rk3066-smp + - socionext,milbeaut-m10v-smp - ste,dbx500-smp cpu-release-addr: diff --git a/Documentation/devicetree/bindings/arm/freescale/fsl,imx7ulp-sim.txt b/Documentation/devicetree/bindings/arm/freescale/fsl,imx7ulp-sim.txt new file mode 100644 index 000000000000..7d0c7f002401 --- /dev/null +++ b/Documentation/devicetree/bindings/arm/freescale/fsl,imx7ulp-sim.txt @@ -0,0 +1,16 @@ +Freescale i.MX7ULP System Integration Module +---------------------------------------------- +The system integration module (SIM) provides system control and chip configuration +registers. In this module, chip revision information is located in JTAG ID register, +and a set of registers have been made available in DGO domain for SW use, with the +objective to maintain its value between system resets. + +Required properties: +- compatible: Should be "fsl,imx7ulp-sim". +- reg: Specifies base physical address and size of the register sets. + +Example: +sim: sim@410a3000 { + compatible = "fsl,imx7ulp-sim", "syscon"; + reg = <0x410a3000 0x1000>; +}; diff --git a/Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt b/Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt index 27784b6edfed..72d481c8dd48 100644 --- a/Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt +++ b/Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt @@ -58,7 +58,11 @@ This binding for the SCU power domain providers uses the generic power domain binding[2]. Required properties: -- compatible: Should be "fsl,imx8qxp-scu-pd". +- compatible: Should be one of: + "fsl,imx8qm-scu-pd", + "fsl,imx8qxp-scu-pd" + followed by "fsl,scu-pd" + - #power-domain-cells: Must be 1. Contains the Resource ID used by SCU commands. See detailed Resource ID list from: @@ -70,7 +74,10 @@ Clock bindings based on SCU Message Protocol This binding uses the common clock binding[1]. Required properties: -- compatible: Should be "fsl,imx8qxp-clock". +- compatible: Should be one of: + "fsl,imx8qm-clock" + "fsl,imx8qxp-clock" + followed by "fsl,scu-clk" - #clock-cells: Should be 1. Contains the Clock ID value. - clocks: List of clock specifiers, must contain an entry for each required entry in clock-names @@ -137,7 +144,7 @@ firmware { &lsio_mu1 1 3>; clk: clk { - compatible = "fsl,imx8qxp-clk"; + compatible = "fsl,imx8qxp-clk", "fsl,scu-clk"; #clock-cells = <1>; }; @@ -154,7 +161,7 @@ firmware { }; pd: imx8qx-pd { - compatible = "fsl,imx8qxp-scu-pd"; + compatible = "fsl,imx8qxp-scu-pd", "fsl,scu-pd"; #power-domain-cells = <1>; }; diff --git a/Documentation/devicetree/bindings/arm/fsl.txt b/Documentation/devicetree/bindings/arm/fsl.txt deleted file mode 100644 index 7fbc42484001..000000000000 --- a/Documentation/devicetree/bindings/arm/fsl.txt +++ /dev/null @@ -1,237 +0,0 @@ -Freescale i.MX Platforms Device Tree Bindings ------------------------------------------------ - -i.MX23 Evaluation Kit -Required root node properties: - - compatible = "fsl,imx23-evk", "fsl,imx23"; - -i.MX25 Product Development Kit -Required root node properties: - - compatible = "fsl,imx25-pdk", "fsl,imx25"; - -i.MX27 Product Development Kit -Required root node properties: - - compatible = "fsl,imx27-pdk", "fsl,imx27"; - -i.MX28 Evaluation Kit -Required root node properties: - - compatible = "fsl,imx28-evk", "fsl,imx28"; - -i.MX51 Babbage Board -Required root node properties: - - compatible = "fsl,imx51-babbage", "fsl,imx51"; - -i.MX53 Automotive Reference Design Board -Required root node properties: - - compatible = "fsl,imx53-ard", "fsl,imx53"; - -i.MX53 Evaluation Kit -Required root node properties: - - compatible = "fsl,imx53-evk", "fsl,imx53"; - -i.MX53 Quick Start Board -Required root node properties: - - compatible = "fsl,imx53-qsb", "fsl,imx53"; - -i.MX53 Smart Mobile Reference Design Board -Required root node properties: - - compatible = "fsl,imx53-smd", "fsl,imx53"; - -i.MX6 Quad Armadillo2 Board -Required root node properties: - - compatible = "fsl,imx6q-arm2", "fsl,imx6q"; - -i.MX6 Quad SABRE Lite Board -Required root node properties: - - compatible = "fsl,imx6q-sabrelite", "fsl,imx6q"; - -i.MX6 Quad SABRE Smart Device Board -Required root node properties: - - compatible = "fsl,imx6q-sabresd", "fsl,imx6q"; - -i.MX6 Quad SABRE Automotive Board -Required root node properties: - - compatible = "fsl,imx6q-sabreauto", "fsl,imx6q"; - -i.MX6SLL EVK board -Required root node properties: - - compatible = "fsl,imx6sll-evk", "fsl,imx6sll"; - -i.MX6 Quad Plus SABRE Smart Device Board -Required root node properties: - - compatible = "fsl,imx6qp-sabresd", "fsl,imx6qp"; - -i.MX6 Quad Plus SABRE Automotive Board -Required root node properties: - - compatible = "fsl,imx6qp-sabreauto", "fsl,imx6qp"; - -i.MX6 DualLite SABRE Smart Device Board -Required root node properties: - - compatible = "fsl,imx6dl-sabresd", "fsl,imx6dl"; - -i.MX6 DualLite/Solo SABRE Automotive Board -Required root node properties: - - compatible = "fsl,imx6dl-sabreauto", "fsl,imx6dl"; - -i.MX6 SoloLite EVK Board -Required root node properties: - - compatible = "fsl,imx6sl-evk", "fsl,imx6sl"; - -i.MX6 UltraLite 14x14 EVK Board -Required root node properties: - - compatible = "fsl,imx6ul-14x14-evk", "fsl,imx6ul"; - -i.MX6 UltraLiteLite 14x14 EVK Board -Required root node properties: - - compatible = "fsl,imx6ull-14x14-evk", "fsl,imx6ull"; - -i.MX6 ULZ 14x14 EVK Board -Required root node properties: - - compatible = "fsl,imx6ulz-14x14-evk", "fsl,imx6ull", "fsl,imx6ulz"; - -i.MX6 SoloX SDB Board -Required root node properties: - - compatible = "fsl,imx6sx-sdb", "fsl,imx6sx"; - -i.MX6 SoloX Sabre Auto Board -Required root node properties: - - compatible = "fsl,imx6sx-sabreauto", "fsl,imx6sx"; - -i.MX7 SabreSD Board -Required root node properties: - - compatible = "fsl,imx7d-sdb", "fsl,imx7d"; - -i.MX7ULP Evaluation Kit -Required root node properties: - - compatible = "fsl,imx7ulp-evk", "fsl,imx7ulp"; - -Generic i.MX boards -------------------- - -No iomux setup is done for these boards, so this must have been configured -by the bootloader for boards to work with the generic bindings. - -i.MX27 generic board -Required root node properties: - - compatible = "fsl,imx27"; - -i.MX51 generic board -Required root node properties: - - compatible = "fsl,imx51"; - -i.MX53 generic board -Required root node properties: - - compatible = "fsl,imx53"; - -i.MX6q generic board -Required root node properties: - - compatible = "fsl,imx6q"; - -i.MX7ULP generic board -Required root node properties: - - compatible = "fsl,imx7ulp"; - -Freescale Vybrid Platform Device Tree Bindings ----------------------------------------------- - -For the Vybrid SoC familiy all variants with DDR controller are supported, -which is the VF5xx and VF6xx series. Out of historical reasons, in most -places the kernel uses vf610 to refer to the whole familiy. -The compatible string "fsl,vf610m4" is used for the secondary Cortex-M4 -core support. - -Required root node compatible property (one of them): - - compatible = "fsl,vf500"; - - compatible = "fsl,vf510"; - - compatible = "fsl,vf600"; - - compatible = "fsl,vf610"; - - compatible = "fsl,vf610m4"; - -Freescale LS1021A Platform Device Tree Bindings ------------------------------------------------- - -Required root node compatible properties: - - compatible = "fsl,ls1021a"; - -Freescale ARMv8 based Layerscape SoC family Device Tree Bindings ----------------------------------------------------------------- - -LS1012A SoC -Required root node properties: - - compatible = "fsl,ls1012a"; - -LS1012A ARMv8 based RDB Board -Required root node properties: - - compatible = "fsl,ls1012a-rdb", "fsl,ls1012a"; - -LS1012A ARMv8 based FRDM Board -Required root node properties: - - compatible = "fsl,ls1012a-frdm", "fsl,ls1012a"; - -LS1012A ARMv8 based QDS Board -Required root node properties: - - compatible = "fsl,ls1012a-qds", "fsl,ls1012a"; - -LS1043A SoC -Required root node properties: - - compatible = "fsl,ls1043a"; - -LS1043A ARMv8 based RDB Board -Required root node properties: - - compatible = "fsl,ls1043a-rdb", "fsl,ls1043a"; - -LS1043A ARMv8 based QDS Board -Required root node properties: - - compatible = "fsl,ls1043a-qds", "fsl,ls1043a"; - -LS1046A SoC -Required root node properties: - - compatible = "fsl,ls1046a"; - -LS1046A ARMv8 based QDS Board -Required root node properties: - - compatible = "fsl,ls1046a-qds", "fsl,ls1046a"; - -LS1046A ARMv8 based RDB Board -Required root node properties: - - compatible = "fsl,ls1046a-rdb", "fsl,ls1046a"; - -LS1088A SoC -Required root node properties: - - compatible = "fsl,ls1088a"; - -LS1088A ARMv8 based QDS Board -Required root node properties: - - compatible = "fsl,ls1088a-qds", "fsl,ls1088a"; - -LS1088A ARMv8 based RDB Board -Required root node properties: - - compatible = "fsl,ls1088a-rdb", "fsl,ls1088a"; - -LS2080A SoC -Required root node properties: - - compatible = "fsl,ls2080a"; - -LS2080A ARMv8 based Simulator model -Required root node properties: - - compatible = "fsl,ls2080a-simu", "fsl,ls2080a"; - -LS2080A ARMv8 based QDS Board -Required root node properties: - - compatible = "fsl,ls2080a-qds", "fsl,ls2080a"; - -LS2080A ARMv8 based RDB Board -Required root node properties: - - compatible = "fsl,ls2080a-rdb", "fsl,ls2080a"; - -LS2088A SoC -Required root node properties: - - compatible = "fsl,ls2088a"; - -LS2088A ARMv8 based QDS Board -Required root node properties: - - compatible = "fsl,ls2088a-qds", "fsl,ls2088a"; - -LS2088A ARMv8 based RDB Board -Required root node properties: - - compatible = "fsl,ls2088a-rdb", "fsl,ls2088a"; diff --git a/Documentation/devicetree/bindings/arm/fsl.yaml b/Documentation/devicetree/bindings/arm/fsl.yaml new file mode 100644 index 000000000000..7e2cd6ad26bd --- /dev/null +++ b/Documentation/devicetree/bindings/arm/fsl.yaml @@ -0,0 +1,232 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/bindings/arm/fsl.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Freescale i.MX Platforms Device Tree Bindings + +maintainers: + - Shawn Guo + - Li Yang + +properties: + $nodename: + const: '/' + compatible: + oneOf: + - description: i.MX23 based Boards + items: + - enum: + - fsl,imx23-evk + - olimex,imx23-olinuxino + - const: fsl,imx23 + + - description: i.MX25 Product Development Kit + items: + - enum: + - fsl,imx25-pdk + - const: fsl,imx25 + + - description: i.MX27 Product Development Kit + items: + - enum: + - fsl,imx27-pdk + - const: fsl,imx27 + + - description: i.MX28 based Boards + items: + - enum: + - fsl,imx28-evk + - i2se,duckbill + - i2se,duckbill-2 + - technologic,imx28-ts4600 + - const: fsl,imx28 + - description: i.MX28 Duckbill 2 based Boards + items: + - enum: + - i2se,duckbill-2-485 + - i2se,duckbill-2-enocean + - i2se,duckbill-2-spi + - const: i2se,duckbill-2 + - const: fsl,imx28 + + - description: i.MX51 Babbage Board + items: + - enum: + - armadeus,imx51-apf51 + - fsl,imx51-babbage + - technologic,imx51-ts4800 + - const: fsl,imx51 + + - description: i.MX53 based Boards + items: + - enum: + - bhf,cx9020 + - fsl,imx53-ard + - fsl,imx53-evk + - fsl,imx53-qsb + - fsl,imx53-smd + - const: fsl,imx53 + + - description: i.MX6Q based Boards + items: + - enum: + - fsl,imx6q-arm2 + - fsl,imx6q-sabreauto + - fsl,imx6q-sabrelite + - fsl,imx6q-sabresd + - technologic,imx6q-ts4900 + - technologic,imx6q-ts7970 + - const: fsl,imx6q + + - description: i.MX6QP based Boards + items: + - enum: + - fsl,imx6qp-sabreauto # i.MX6 Quad Plus SABRE Automotive Board + - fsl,imx6qp-sabresd # i.MX6 Quad Plus SABRE Smart Device Board + - const: fsl,imx6qp + + - description: i.MX6DL based Boards + items: + - enum: + - fsl,imx6dl-sabreauto # i.MX6 DualLite/Solo SABRE Automotive Board + - fsl,imx6dl-sabresd # i.MX6 DualLite SABRE Smart Device Board + - technologic,imx6dl-ts4900 + - technologic,imx6dl-ts7970 + - ysoft,imx6dl-yapp4-draco # i.MX6 DualLite Y Soft IOTA Draco board + - ysoft,imx6dl-yapp4-hydra # i.MX6 DualLite Y Soft IOTA Hydra board + - ysoft,imx6dl-yapp4-ursa # i.MX6 Solo Y Soft IOTA Ursa board + - const: fsl,imx6dl + + - description: i.MX6SL based Boards + items: + - enum: + - fsl,imx6sl-evk # i.MX6 SoloLite EVK Board + - const: fsl,imx6sl + + - description: i.MX6SLL based Boards + items: + - enum: + - fsl,imx6sll-evk + - const: fsl,imx6sll + + - description: i.MX6SX based Boards + items: + - enum: + - fsl,imx6sx-sabreauto # i.MX6 SoloX Sabre Auto Board + - fsl,imx6sx-sdb # i.MX6 SoloX SDB Board + - const: fsl,imx6sx + + - description: i.MX6UL based Boards + items: + - enum: + - fsl,imx6ul-14x14-evk # i.MX6 UltraLite 14x14 EVK Board + - const: fsl,imx6ul + + - description: i.MX6ULL based Boards + items: + - enum: + - fsl,imx6ull-14x14-evk # i.MX6 UltraLiteLite 14x14 EVK Board + - const: fsl,imx6ull + + - description: i.MX6ULZ based Boards + items: + - enum: + - fsl,imx6ulz-14x14-evk # i.MX6 ULZ 14x14 EVK Board + - const: fsl,imx6ull # This seems odd. Should be last? + - const: fsl,imx6ulz + + - description: i.MX7D based Boards + items: + - enum: + - fsl,imx7d-sdb # i.MX7 SabreSD Board + - const: fsl,imx7d + + - description: + Compulab SBC-iMX7 is a single board computer based on the + Freescale i.MX7 system-on-chip. SBC-iMX7 is implemented with + the CL-SOM-iMX7 System-on-Module providing most of the functions, + and SB-SOM-iMX7 carrier board providing additional peripheral + functions and connectors. + items: + - const: compulab,sbc-imx7 + - const: compulab,cl-som-imx7 + - const: fsl,imx7d + + - description: i.MX8QXP based Boards + items: + - enum: + - fsl,imx8qxp-mek # i.MX8QXP MEK Board + - const: fsl,imx8qxp + + - description: + Freescale Vybrid Platform Device Tree Bindings + + For the Vybrid SoC familiy all variants with DDR controller are supported, + which is the VF5xx and VF6xx series. Out of historical reasons, in most + places the kernel uses vf610 to refer to the whole familiy. + The compatible string "fsl,vf610m4" is used for the secondary Cortex-M4 + core support. + items: + - enum: + - fsl,vf500 + - fsl,vf510 + - fsl,vf600 + - fsl,vf610 + - fsl,vf610m4 + + - description: LS1012A based Boards + items: + - enum: + - ebs-systart,oxalis + - fsl,ls1012a-rdb + - fsl,ls1012a-frdm + - fsl,ls1012a-qds + - const: fsl,ls1012a + + - description: LS1021A based Boards + items: + - enum: + - fsl,ls1021a-moxa-uc-8410a + - fsl,ls1021a-qds + - fsl,ls1021a-twr + - const: fsl,ls1021a + + - description: LS1043A based Boards + items: + - enum: + - fsl,ls1043a-rdb + - fsl,ls1043a-qds + - const: fsl,ls1043a + + - description: LS1046A based Boards + items: + - enum: + - fsl,ls1046a-qds + - fsl,ls1046a-rdb + - const: fsl,ls1046a + + - description: LS1088A based Boards + items: + - enum: + - fsl,ls1088a-qds + - fsl,ls1088a-rdb + - const: fsl,ls1088a + + - description: LS2080A based Boards + items: + - enum: + - fsl,ls2080a-simu + - fsl,ls2080a-qds + - fsl,ls2080a-rdb + - const: fsl,ls2080a + + - description: LS2088A based Boards + items: + - enum: + - fsl,ls2088a-qds + - fsl,ls2088a-rdb + - const: fsl,ls2088a + +... diff --git a/Documentation/devicetree/bindings/arm/i2se.txt b/Documentation/devicetree/bindings/arm/i2se.txt deleted file mode 100644 index dbd54a3aa07d..000000000000 --- a/Documentation/devicetree/bindings/arm/i2se.txt +++ /dev/null @@ -1,22 +0,0 @@ -I2SE Device Tree Bindings -------------------------- - -Duckbill Board -Required root node properties: - - compatible = "i2se,duckbill", "fsl,imx28"; - -Duckbill 2 Board -Required root node properties: - - compatible = "i2se,duckbill-2", "fsl,imx28"; - -Duckbill 2 485 Board -Required root node properties: - - compatible = "i2se,duckbill-2-485", "i2se,duckbill-2", "fsl,imx28"; - -Duckbill 2 EnOcean Board -Required root node properties: - - compatible = "i2se,duckbill-2-enocean", "i2se,duckbill-2", "fsl,imx28"; - -Duckbill 2 SPI Board -Required root node properties: - - compatible = "i2se,duckbill-2-spi", "i2se,duckbill-2", "fsl,imx28"; diff --git a/Documentation/devicetree/bindings/arm/l2c2x0.txt b/Documentation/devicetree/bindings/arm/l2c2x0.txt deleted file mode 100644 index fbe6cb21f4cf..000000000000 --- a/Documentation/devicetree/bindings/arm/l2c2x0.txt +++ /dev/null @@ -1,114 +0,0 @@ -* ARM L2 Cache Controller - -ARM cores often have a separate L2C210/L2C220/L2C310 (also known as PL210/PL220/ -PL310 and variants) based level 2 cache controller. All these various implementations -of the L2 cache controller have compatible programming models (Note 1). -Some of the properties that are just prefixed "cache-*" are taken from section -3.7.3 of the Devicetree Specification which can be found at: -https://www.devicetree.org/specifications/ - -The ARM L2 cache representation in the device tree should be done as follows: - -Required properties: - -- compatible : should be one of: - "arm,pl310-cache" - "arm,l220-cache" - "arm,l210-cache" - "bcm,bcm11351-a2-pl310-cache": DEPRECATED by "brcm,bcm11351-a2-pl310-cache" - "brcm,bcm11351-a2-pl310-cache": For Broadcom bcm11351 chipset where an - offset needs to be added to the address before passing down to the L2 - cache controller - "marvell,aurora-system-cache": Marvell Controller designed to be - compatible with the ARM one, with system cache mode (meaning - maintenance operations on L1 are broadcasted to the L2 and L2 - performs the same operation). - "marvell,aurora-outer-cache": Marvell Controller designed to be - compatible with the ARM one with outer cache mode. - "marvell,tauros3-cache": Marvell Tauros3 cache controller, compatible - with arm,pl310-cache controller. -- cache-unified : Specifies the cache is a unified cache. -- cache-level : Should be set to 2 for a level 2 cache. -- reg : Physical base address and size of cache controller's memory mapped - registers. - -Optional properties: - -- arm,data-latency : Cycles of latency for Data RAM accesses. Specifies 3 cells of - read, write and setup latencies. Minimum valid values are 1. Controllers - without setup latency control should use a value of 0. -- arm,tag-latency : Cycles of latency for Tag RAM accesses. Specifies 3 cells of - read, write and setup latencies. Controllers without setup latency control - should use 0. Controllers without separate read and write Tag RAM latency - values should only use the first cell. -- arm,dirty-latency : Cycles of latency for Dirty RAMs. This is a single cell. -- arm,filter-ranges : Starting address and length of window to - filter. Addresses in the filter window are directed to the M1 port. Other - addresses will go to the M0 port. -- arm,io-coherent : indicates that the system is operating in an hardware - I/O coherent mode. Valid only when the arm,pl310-cache compatible - string is used. -- interrupts : 1 combined interrupt. -- cache-size : specifies the size in bytes of the cache -- cache-sets : specifies the number of associativity sets of the cache -- cache-block-size : specifies the size in bytes of a cache block -- cache-line-size : specifies the size in bytes of a line in the cache, - if this is not specified, the line size is assumed to be equal to the - cache block size -- cache-id-part: cache id part number to be used if it is not present - on hardware -- wt-override: If present then L2 is forced to Write through mode -- arm,double-linefill : Override double linefill enable setting. Enable if - non-zero, disable if zero. -- arm,double-linefill-incr : Override double linefill on INCR read. Enable - if non-zero, disable if zero. -- arm,double-linefill-wrap : Override double linefill on WRAP read. Enable - if non-zero, disable if zero. -- arm,prefetch-drop : Override prefetch drop enable setting. Enable if non-zero, - disable if zero. -- arm,prefetch-offset : Override prefetch offset value. Valid values are - 0-7, 15, 23, and 31. -- arm,shared-override : The default behavior of the L220 or PL310 cache - controllers with respect to the shareable attribute is to transform "normal - memory non-cacheable transactions" into "cacheable no allocate" (for reads) - or "write through no write allocate" (for writes). - On systems where this may cause DMA buffer corruption, this property must be - specified to indicate that such transforms are precluded. -- arm,parity-enable : enable parity checking on the L2 cache (L220 or PL310). -- arm,parity-disable : disable parity checking on the L2 cache (L220 or PL310). -- arm,outer-sync-disable : disable the outer sync operation on the L2 cache. - Some core tiles, especially ARM PB11MPCore have a faulty L220 cache that - will randomly hang unless outer sync operations are disabled. -- prefetch-data : Data prefetch. Value: <0> (forcibly disable), <1> - (forcibly enable), property absent (retain settings set by firmware) -- prefetch-instr : Instruction prefetch. Value: <0> (forcibly disable), - <1> (forcibly enable), property absent (retain settings set by - firmware) -- arm,dynamic-clock-gating : L2 dynamic clock gating. Value: <0> (forcibly - disable), <1> (forcibly enable), property absent (OS specific behavior, - preferably retain firmware settings) -- arm,standby-mode: L2 standby mode enable. Value <0> (forcibly disable), - <1> (forcibly enable), property absent (OS specific behavior, - preferably retain firmware settings) -- arm,early-bresp-disable : Disable the CA9 optimization Early BRESP (PL310) -- arm,full-line-zero-disable : Disable the CA9 optimization Full line of zero - write (PL310) - -Example: - -L2: cache-controller { - compatible = "arm,pl310-cache"; - reg = <0xfff12000 0x1000>; - arm,data-latency = <1 1 1>; - arm,tag-latency = <2 2 2>; - arm,filter-ranges = <0x80000000 0x8000000>; - cache-unified; - cache-level = <2>; - interrupts = <45>; -}; - -Note 1: The description in this document doesn't apply to integrated L2 - cache controllers as found in e.g. Cortex-A15/A7/A57/A53. These - integrated L2 controllers are assumed to be all preconfigured by - early secure boot code. Thus no need to deal with their configuration - in the kernel at all. diff --git a/Documentation/devicetree/bindings/arm/l2c2x0.yaml b/Documentation/devicetree/bindings/arm/l2c2x0.yaml new file mode 100644 index 000000000000..bfc5c185561c --- /dev/null +++ b/Documentation/devicetree/bindings/arm/l2c2x0.yaml @@ -0,0 +1,248 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/arm/l2c2x0.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: ARM L2 Cache Controller + +maintainers: + - Rob Herring + +description: |+ + ARM cores often have a separate L2C210/L2C220/L2C310 (also known as PL210/ + PL220/PL310 and variants) based level 2 cache controller. All these various + implementations of the L2 cache controller have compatible programming + models (Note 1). Some of the properties that are just prefixed "cache-*" are + taken from section 3.7.3 of the Devicetree Specification which can be found + at: + https://www.devicetree.org/specifications/ + + Note 1: The description in this document doesn't apply to integrated L2 + cache controllers as found in e.g. Cortex-A15/A7/A57/A53. These + integrated L2 controllers are assumed to be all preconfigured by + early secure boot code. Thus no need to deal with their configuration + in the kernel at all. + +allOf: + - $ref: /schemas/cache-controller.yaml# + +properties: + compatible: + enum: + - arm,pl310-cache + - arm,l220-cache + - arm,l210-cache + # DEPRECATED by "brcm,bcm11351-a2-pl310-cache" + - bcm,bcm11351-a2-pl310-cache + # For Broadcom bcm11351 chipset where an + # offset needs to be added to the address before passing down to the L2 + # cache controller + - brcm,bcm11351-a2-pl310-cache + # Marvell Controller designed to be + # compatible with the ARM one, with system cache mode (meaning + # maintenance operations on L1 are broadcasted to the L2 and L2 + # performs the same operation). + - marvell,aurora-system-cache + # Marvell Controller designed to be + # compatible with the ARM one with outer cache mode. + - marvell,aurora-outer-cache + # Marvell Tauros3 cache controller, compatible + # with arm,pl310-cache controller. + - marvell,tauros3-cache + + cache-level: + const: 2 + + cache-unified: true + cache-size: true + cache-sets: true + cache-block-size: true + cache-line-size: true + + reg: + maxItems: 1 + + arm,data-latency: + description: Cycles of latency for Data RAM accesses. Specifies 3 cells of + read, write and setup latencies. Minimum valid values are 1. Controllers + without setup latency control should use a value of 0. + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32-array + - minItems: 2 + maxItems: 3 + items: + minimum: 0 + maximum: 8 + + arm,tag-latency: + description: Cycles of latency for Tag RAM accesses. Specifies 3 cells of + read, write and setup latencies. Controllers without setup latency control + should use 0. Controllers without separate read and write Tag RAM latency + values should only use the first cell. + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32-array + - minItems: 1 + maxItems: 3 + items: + minimum: 0 + maximum: 8 + + arm,dirty-latency: + description: Cycles of latency for Dirty RAMs. This is a single cell. + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + - minimum: 1 + maximum: 8 + + arm,filter-ranges: + description: Starting address and length of window to + filter. Addresses in the filter window are directed to the M1 port. Other + addresses will go to the M0 port. + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32-array + - items: + minItems: 2 + maxItems: 2 + + arm,io-coherent: + description: indicates that the system is operating in an hardware + I/O coherent mode. Valid only when the arm,pl310-cache compatible + string is used. + type: boolean + + interrupts: + # Either a single combined interrupt or up to 9 individual interrupts + minItems: 1 + maxItems: 9 + + cache-id-part: + description: cache id part number to be used if it is not present + on hardware + $ref: /schemas/types.yaml#/definitions/uint32 + + wt-override: + description: If present then L2 is forced to Write through mode + type: boolean + + arm,double-linefill: + description: Override double linefill enable setting. Enable if + non-zero, disable if zero. + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + - enum: [ 0, 1 ] + + arm,double-linefill-incr: + description: Override double linefill on INCR read. Enable + if non-zero, disable if zero. + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + - enum: [ 0, 1 ] + + arm,double-linefill-wrap: + description: Override double linefill on WRAP read. Enable + if non-zero, disable if zero. + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + - enum: [ 0, 1 ] + + arm,prefetch-drop: + description: Override prefetch drop enable setting. Enable if non-zero, + disable if zero. + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + - enum: [ 0, 1 ] + + arm,prefetch-offset: + description: Override prefetch offset value. + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + - enum: [ 0, 1, 2, 3, 4, 5, 6, 7, 15, 23, 31 ] + + arm,shared-override: + description: The default behavior of the L220 or PL310 cache + controllers with respect to the shareable attribute is to transform "normal + memory non-cacheable transactions" into "cacheable no allocate" (for reads) + or "write through no write allocate" (for writes). + On systems where this may cause DMA buffer corruption, this property must + be specified to indicate that such transforms are precluded. + type: boolean + + arm,parity-enable: + description: enable parity checking on the L2 cache (L220 or PL310). + type: boolean + + arm,parity-disable: + description: disable parity checking on the L2 cache (L220 or PL310). + type: boolean + + arm,outer-sync-disable: + description: disable the outer sync operation on the L2 cache. + Some core tiles, especially ARM PB11MPCore have a faulty L220 cache that + will randomly hang unless outer sync operations are disabled. + type: boolean + + prefetch-data: + description: | + Data prefetch. Value: <0> (forcibly disable), <1> + (forcibly enable), property absent (retain settings set by firmware) + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + - enum: [ 0, 1 ] + + prefetch-instr: + description: | + Instruction prefetch. Value: <0> (forcibly disable), + <1> (forcibly enable), property absent (retain settings set by + firmware) + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + - enum: [ 0, 1 ] + + arm,dynamic-clock-gating: + description: | + L2 dynamic clock gating. Value: <0> (forcibly + disable), <1> (forcibly enable), property absent (OS specific behavior, + preferably retain firmware settings) + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + - enum: [ 0, 1 ] + + arm,standby-mode: + description: L2 standby mode enable. Value <0> (forcibly disable), + <1> (forcibly enable), property absent (OS specific behavior, + preferably retain firmware settings) + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + - enum: [ 0, 1 ] + + arm,early-bresp-disable: + description: Disable the CA9 optimization Early BRESP (PL310) + type: boolean + + arm,full-line-zero-disable: + description: Disable the CA9 optimization Full line of zero + write (PL310) + type: boolean + +required: + - compatible + - cache-unified + - reg + +additionalProperties: false + +examples: + - | + cache-controller@fff12000 { + compatible = "arm,pl310-cache"; + reg = <0xfff12000 0x1000>; + arm,data-latency = <1 1 1>; + arm,tag-latency = <2 2 2>; + arm,filter-ranges = <0x80000000 0x8000000>; + cache-unified; + cache-level = <2>; + interrupts = <45>; + }; + +... diff --git a/Documentation/devicetree/bindings/arm/mediatek.txt b/Documentation/devicetree/bindings/arm/mediatek.txt index 8f260e5cfd16..56ac7896d6d8 100644 --- a/Documentation/devicetree/bindings/arm/mediatek.txt +++ b/Documentation/devicetree/bindings/arm/mediatek.txt @@ -15,11 +15,12 @@ compatible: Must contain one of "mediatek,mt6795" "mediatek,mt6797" "mediatek,mt7622" - "mediatek,mt7623" which is referred to MT7623N SoC - "mediatek,mt7623a" + "mediatek,mt7623" + "mediatek,mt7629" "mediatek,mt8127" "mediatek,mt8135" "mediatek,mt8173" + "mediatek,mt8183" Supported boards: @@ -57,6 +58,9 @@ Supported boards: - Reference board variant 1 for MT7622: Required root node properties: - compatible = "mediatek,mt7622-rfb1", "mediatek,mt7622"; +- Bananapi BPI-R64 for MT7622: + Required root node properties: + - compatible = "bananapi,bpi-r64", "mediatek,mt7622"; - Reference board for MT7623a with eMMC: Required root node properties: - compatible = "mediatek,mt7623a-rfb-emmc", "mediatek,mt7623"; @@ -68,6 +72,9 @@ Supported boards: - compatible = "mediatek,mt7623n-rfb-emmc", "mediatek,mt7623"; - Bananapi BPI-R2 board: - compatible = "bananapi,bpi-r2", "mediatek,mt7623"; +- Reference board for MT7629: + Required root node properties: + - compatible = "mediatek,mt7629-rfb", "mediatek,mt7629"; - MTK mt8127 tablet moose EVB: Required root node properties: - compatible = "mediatek,mt8127-moose", "mediatek,mt8127"; @@ -77,3 +84,6 @@ Supported boards: - MTK mt8173 tablet EVB: Required root node properties: - compatible = "mediatek,mt8173-evb", "mediatek,mt8173"; +- Evaluation board for MT8183: + Required root node properties: + - compatible = "mediatek,mt8183-evb", "mediatek,mt8183"; diff --git a/Documentation/devicetree/bindings/arm/olimex.txt b/Documentation/devicetree/bindings/arm/olimex.txt deleted file mode 100644 index d726aeca56be..000000000000 --- a/Documentation/devicetree/bindings/arm/olimex.txt +++ /dev/null @@ -1,10 +0,0 @@ -Olimex Device Tree Bindings ---------------------------- - -SAM9-L9260 Board -Required root node properties: - - compatible = "olimex,sam9-l9260", "atmel,at91sam9260"; - -i.MX23 Olinuxino Low Cost Board -Required root node properties: - - compatible = "olimex,imx23-olinuxino", "fsl,imx23"; diff --git a/Documentation/devicetree/bindings/arm/pmu.txt b/Documentation/devicetree/bindings/arm/pmu.txt deleted file mode 100644 index 13611a8199bb..000000000000 --- a/Documentation/devicetree/bindings/arm/pmu.txt +++ /dev/null @@ -1,70 +0,0 @@ -* ARM Performance Monitor Units - -ARM cores often have a PMU for counting cpu and cache events like cache misses -and hits. The interface to the PMU is part of the ARM ARM. The ARM PMU -representation in the device tree should be done as under:- - -Required properties: - -- compatible : should be one of - "apm,potenza-pmu" - "arm,armv8-pmuv3" - "arm,cortex-a73-pmu" - "arm,cortex-a72-pmu" - "arm,cortex-a57-pmu" - "arm,cortex-a53-pmu" - "arm,cortex-a35-pmu" - "arm,cortex-a17-pmu" - "arm,cortex-a15-pmu" - "arm,cortex-a12-pmu" - "arm,cortex-a9-pmu" - "arm,cortex-a8-pmu" - "arm,cortex-a7-pmu" - "arm,cortex-a5-pmu" - "arm,arm11mpcore-pmu" - "arm,arm1176-pmu" - "arm,arm1136-pmu" - "brcm,vulcan-pmu" - "cavium,thunder-pmu" - "qcom,scorpion-pmu" - "qcom,scorpion-mp-pmu" - "qcom,krait-pmu" -- interrupts : 1 combined interrupt or 1 per core. If the interrupt is a per-cpu - interrupt (PPI) then 1 interrupt should be specified. - -Optional properties: - -- interrupt-affinity : When using SPIs, specifies a list of phandles to CPU - nodes corresponding directly to the affinity of - the SPIs listed in the interrupts property. - - When using a PPI, specifies a list of phandles to CPU - nodes corresponding to the set of CPUs which have - a PMU of this type signalling the PPI listed in the - interrupts property, unless this is already specified - by the PPI interrupt specifier itself (in which case - the interrupt-affinity property shouldn't be present). - - This property should be present when there is more than - a single SPI. - - -- qcom,no-pc-write : Indicates that this PMU doesn't support the 0xc and 0xd - events. - -- secure-reg-access : Indicates that the ARMv7 Secure Debug Enable Register - (SDER) is accessible. This will cause the driver to do - any setup required that is only possible in ARMv7 secure - state. If not present the ARMv7 SDER will not be touched, - which means the PMU may fail to operate unless external - code (bootloader or security monitor) has performed the - appropriate initialisation. Note that this property is - not valid for non-ARMv7 CPUs or ARMv7 CPUs booting Linux - in Non-secure state. - -Example: - -pmu { - compatible = "arm,cortex-a9-pmu"; - interrupts = <100 101>; -}; diff --git a/Documentation/devicetree/bindings/arm/pmu.yaml b/Documentation/devicetree/bindings/arm/pmu.yaml new file mode 100644 index 000000000000..52ae094ce330 --- /dev/null +++ b/Documentation/devicetree/bindings/arm/pmu.yaml @@ -0,0 +1,87 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/arm/pmu.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: ARM Performance Monitor Units + +maintainers: + - Mark Rutland + - Will Deacon + +description: |+ + ARM cores often have a PMU for counting cpu and cache events like cache misses + and hits. The interface to the PMU is part of the ARM ARM. The ARM PMU + representation in the device tree should be done as under:- + +properties: + compatible: + items: + - enum: + - apm,potenza-pmu + - arm,armv8-pmuv3 + - arm,cortex-a73-pmu + - arm,cortex-a72-pmu + - arm,cortex-a57-pmu + - arm,cortex-a53-pmu + - arm,cortex-a35-pmu + - arm,cortex-a17-pmu + - arm,cortex-a15-pmu + - arm,cortex-a12-pmu + - arm,cortex-a9-pmu + - arm,cortex-a8-pmu + - arm,cortex-a7-pmu + - arm,cortex-a5-pmu + - arm,arm11mpcore-pmu + - arm,arm1176-pmu + - arm,arm1136-pmu + - brcm,vulcan-pmu + - cavium,thunder-pmu + - qcom,scorpion-pmu + - qcom,scorpion-mp-pmu + - qcom,krait-pmu + + interrupts: + # Don't know how many CPUs, so no constraints to specify + description: 1 per-cpu interrupt (PPI) or 1 interrupt per core. + + interrupt-affinity: + $ref: /schemas/types.yaml#/definitions/phandle-array + description: + When using SPIs, specifies a list of phandles to CPU + nodes corresponding directly to the affinity of + the SPIs listed in the interrupts property. + + When using a PPI, specifies a list of phandles to CPU + nodes corresponding to the set of CPUs which have + a PMU of this type signalling the PPI listed in the + interrupts property, unless this is already specified + by the PPI interrupt specifier itself (in which case + the interrupt-affinity property shouldn't be present). + + This property should be present when there is more than + a single SPI. + + qcom,no-pc-write: + type: boolean + description: + Indicates that this PMU doesn't support the 0xc and 0xd events. + + secure-reg-access: + type: boolean + description: + Indicates that the ARMv7 Secure Debug Enable Register + (SDER) is accessible. This will cause the driver to do + any setup required that is only possible in ARMv7 secure + state. If not present the ARMv7 SDER will not be touched, + which means the PMU may fail to operate unless external + code (bootloader or security monitor) has performed the + appropriate initialisation. Note that this property is + not valid for non-ARMv7 CPUs or ARMv7 CPUs booting Linux + in Non-secure state. + +required: + - compatible + +... diff --git a/Documentation/devicetree/bindings/arm/renesas.yaml b/Documentation/devicetree/bindings/arm/renesas.yaml new file mode 100644 index 000000000000..19f379863d50 --- /dev/null +++ b/Documentation/devicetree/bindings/arm/renesas.yaml @@ -0,0 +1,238 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/arm/shmobile.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Renesas SH-Mobile, R-Mobile, and R-Car Platform Device Tree Bindings + +maintainers: + - Geert Uytterhoeven + +properties: + $nodename: + const: '/' + compatible: + oneOf: + - description: Emma Mobile EV2 + items: + - enum: + - renesas,kzm9d # Kyoto Microcomputer Co. KZM-A9-Dual + - const: renesas,emev2 + + - description: RZ/A1H (R7S72100) + items: + - enum: + - renesas,genmai # Genmai (RTK772100BC00000BR) + - renesas,gr-peach # GR-Peach (X28A-M01-E/F) + - renesas,rskrza1 # RSKRZA1 (YR0K77210C000BE) + - const: renesas,r7s72100 + + - description: RZ/A2 (R7S9210) + items: + - enum: + - renesas,rza2mevb # RZ/A2M Eval Board (RTK7921053S00000BE) + - const: renesas,r7s9210 + + - description: SH-Mobile AG5 (R8A73A00/SH73A0) + items: + - enum: + - renesas,kzm9g # Kyoto Microcomputer Co. KZM-A9-GT + - const: renesas,sh73a0 + + - description: R-Mobile APE6 (R8A73A40) + items: + - enum: + - renesas,ape6evm + - const: renesas,r8a73a4 + + - description: R-Mobile A1 (R8A77400) + items: + - enum: + - renesas,armadillo800eva # Atmark Techno Armadillo-800 EVA + - const: renesas,r8a7740 + + - description: RZ/G1H (R8A77420) + items: + - const: renesas,r8a7742 + + - description: RZ/G1M (R8A77430) + items: + - enum: + # iWave Systems RZ/G1M Qseven Development Platform (iW-RainboW-G20D-Qseven) + - iwave,g20d + - const: iwave,g20m + - const: renesas,r8a7743 + + - items: + - enum: + # iWave Systems RZ/G1M Qseven System On Module (iW-RainboW-G20M-Qseven) + - iwave,g20m + - renesas,sk-rzg1m # SK-RZG1M (YR8A77430S000BE) + - const: renesas,r8a7743 + + - description: RZ/G1N (R8A77440) + items: + - enum: + # iWave Systems RZ/G1N Qseven Development Platform (iW-RainboW-G20D-Qseven) + - iwave,g20d + - const: iwave,g20m + - const: renesas,r8a7744 + + - items: + - enum: + # iWave Systems RZ/G1N Qseven System On Module (iW-RainboW-G20M-Qseven) + - iwave,g20m + - const: renesas,r8a7744 + + - description: RZ/G1E (R8A77450) + items: + - enum: + - iwave,g22m # iWave Systems RZ/G1E SODIMM System On Module (iW-RainboW-G22M-SM) + - renesas,sk-rzg1e # SK-RZG1E (YR8A77450S000BE) + - const: renesas,r8a7745 + + - description: iWave Systems RZ/G1E SODIMM SOM Development Platform (iW-RainboW-G22D) + items: + - const: iwave,g22d + - const: iwave,g22m + - const: renesas,r8a7745 + + - description: RZ/G1C (R8A77470) + items: + - enum: + - iwave,g23s #iWave Systems RZ/G1C Single Board Computer (iW-RainboW-G23S) + - const: renesas,r8a77470 + + - description: RZ/G2M (R8A774A1) + items: + - const: renesas,r8a774a1 + + - description: RZ/G2E (R8A774C0) + items: + - enum: + - si-linux,cat874 # Silicon Linux RZ/G2E 96board platform (CAT874) + - const: renesas,r8a774c0 + + - items: + - enum: + - si-linux,cat875 # Silicon Linux sub board for CAT874 (CAT875) + - const: si-linux,cat874 + - const: renesas,r8a774c0 + + - description: R-Car M1A (R8A77781) + items: + - enum: + - renesas,bockw + - const: renesas,r8a7778 + + - description: R-Car H1 (R8A77790) + items: + - enum: + - renesas,marzen # Marzen (R0P7779A00010S) + - const: renesas,r8a7779 + + - description: R-Car H2 (R8A77900) + items: + - enum: + - renesas,lager # Lager (RTP0RC7790SEB00010S) + - renesas,stout # Stout (ADAS Starterkit, Y-R-CAR-ADAS-SKH2-BOARD) + - const: renesas,r8a7790 + + - description: R-Car M2-W (R8A77910) + items: + - enum: + - renesas,henninger + - renesas,koelsch # Koelsch (RTP0RC7791SEB00010S) + - renesas,porter # Porter (M2-LCDP) + - const: renesas,r8a7791 + + - description: R-Car V2H (R8A77920) + items: + - enum: + - renesas,blanche # Blanche (RTP0RC7792SEB00010S) + - renesas,wheat # Wheat (RTP0RC7792ASKB0000JE) + - const: renesas,r8a7792 + + - description: R-Car M2-N (R8A77930) + items: + - enum: + - renesas,gose # Gose (RTP0RC7793SEB00010S) + - const: renesas,r8a7793 + + - description: R-Car E2 (R8A77940) + items: + - enum: + - renesas,alt # Alt (RTP0RC7794SEB00010S) + - renesas,silk # SILK (RTP0RC7794LCB00011S) + - const: renesas,r8a7794 + + - description: R-Car H3 (R8A77950) + items: + - enum: + # H3ULCB (R-Car Starter Kit Premier, RTP0RC7795SKBX0010SA00 (H3 ES1.1)) + # H3ULCB (R-Car Starter Kit Premier, RTP0RC77951SKBX010SA00 (H3 ES2.0)) + - renesas,h3ulcb + - renesas,salvator-x # Salvator-X (RTP0RC7795SIPB0010S) + - renesas,salvator-xs # Salvator-XS (Salvator-X 2nd version, RTP0RC7795SIPB0012S) + - const: renesas,r8a7795 + + - description: R-Car M3-W (R8A77960) + items: + - enum: + - renesas,m3ulcb # M3ULCB (R-Car Starter Kit Pro, RTP0RC7796SKBX0010SA09 (M3 ES1.0)) + - renesas,salvator-x # Salvator-X (RTP0RC7796SIPB0011S) + - renesas,salvator-xs # Salvator-XS (Salvator-X 2nd version, RTP0RC7796SIPB0012S) + - const: renesas,r8a7796 + + - description: Kingfisher (SBEV-RCAR-KF-M03) + items: + - const: shimafuji,kingfisher + - enum: + - renesas,h3ulcb + - renesas,m3ulcb + - enum: + - renesas,r8a7795 + - renesas,r8a7796 + + - description: R-Car M3-N (R8A77965) + items: + - enum: + - renesas,m3nulcb # M3NULCB (R-Car Starter Kit Pro, RTP0RC77965SKBX010SA00 (M3-N ES1.1)) + - renesas,salvator-x # Salvator-X (RTP0RC7796SIPB0011S (M3-N)) + - renesas,salvator-xs # Salvator-XS (Salvator-X 2nd version, RTP0RC77965SIPB012S) + - const: renesas,r8a77965 + + - description: R-Car V3M (R8A77970) + items: + - enum: + - renesas,eagle # Eagle (RTP0RC77970SEB0010S) + - renesas,v3msk # V3MSK (Y-ASK-RCAR-V3M-WS10) + - const: renesas,r8a77970 + + - description: R-Car V3H (R8A77980) + items: + - enum: + - renesas,condor # Condor (RTP0RC77980SEB0010SS/RTP0RC77980SEB0010SA01) + - renesas,v3hsk # V3HSK (Y-ASK-RCAR-V3H-WS10) + - const: renesas,r8a77980 + + - description: R-Car E3 (R8A77990) + items: + - enum: + - renesas,ebisu # Ebisu (RTP0RC77990SEB0010S) + - const: renesas,r8a77990 + + - description: R-Car D3 (R8A77995) + items: + - enum: + - renesas,draak # Draak (RTP0RC77995SEB0010S) + - const: renesas,r8a77995 + + - description: RZ/N1D (R9A06G032) + items: + - enum: + - renesas,rzn1d400-db # RZN1D-DB (RZ/N1D Demo Board for the RZ/N1D 400 pins package) + - const: renesas,r9a06g032 + +... diff --git a/Documentation/devicetree/bindings/arm/rockchip.yaml b/Documentation/devicetree/bindings/arm/rockchip.yaml index b12958bda09c..061a03edf9c8 100644 --- a/Documentation/devicetree/bindings/arm/rockchip.yaml +++ b/Documentation/devicetree/bindings/arm/rockchip.yaml @@ -60,6 +60,11 @@ properties: - const: chipspark,rayeager-px2 - const: rockchip,rk3066a + - description: Elgin RV1108 R1 + items: + - const: elgin,rv1108-r1 + - const: rockchip,rv1108 + - description: Firefly Firefly-RK3288 items: - enum: @@ -87,6 +92,13 @@ properties: - const: firefly,roc-rk3399-pc - const: rockchip,rk3399 + - description: FriendlyElec NanoPi4 series boards + items: + - enum: + - friendlyarm,nanopc-t4 + - friendlyarm,nanopi-m4 + - const: rockchip,rk3399 + - description: GeekBuying GeekBox items: - const: geekbuying,geekbox @@ -317,6 +329,11 @@ properties: - const: radxa,rock - const: rockchip,rk3188 + - description: Radxa ROCK Pi 4 + items: + - const: radxa,rockpi4 + - const: rockchip,rk3399 + - description: Radxa Rock2 Square items: - const: radxa,rock2-square diff --git a/Documentation/devicetree/bindings/arm/shmobile.txt b/Documentation/devicetree/bindings/arm/shmobile.txt deleted file mode 100644 index 7f91c2a8b54e..000000000000 --- a/Documentation/devicetree/bindings/arm/shmobile.txt +++ /dev/null @@ -1,155 +0,0 @@ -Renesas SH-Mobile, R-Mobile, and R-Car Platform Device Tree Bindings --------------------------------------------------------------------- - -SoCs: - - - Emma Mobile EV2 - compatible = "renesas,emev2" - - RZ/A1H (R7S72100) - compatible = "renesas,r7s72100" - - RZ/A2 (R7S9210) - compatible = "renesas,r7s9210" - - SH-Mobile AG5 (R8A73A00/SH73A0) - compatible = "renesas,sh73a0" - - R-Mobile APE6 (R8A73A40) - compatible = "renesas,r8a73a4" - - R-Mobile A1 (R8A77400) - compatible = "renesas,r8a7740" - - RZ/G1H (R8A77420) - compatible = "renesas,r8a7742" - - RZ/G1M (R8A77430) - compatible = "renesas,r8a7743" - - RZ/G1N (R8A77440) - compatible = "renesas,r8a7744" - - RZ/G1E (R8A77450) - compatible = "renesas,r8a7745" - - RZ/G1C (R8A77470) - compatible = "renesas,r8a77470" - - RZ/G2M (R8A774A1) - compatible = "renesas,r8a774a1" - - RZ/G2E (R8A774C0) - compatible = "renesas,r8a774c0" - - R-Car M1A (R8A77781) - compatible = "renesas,r8a7778" - - R-Car H1 (R8A77790) - compatible = "renesas,r8a7779" - - R-Car H2 (R8A77900) - compatible = "renesas,r8a7790" - - R-Car M2-W (R8A77910) - compatible = "renesas,r8a7791" - - R-Car V2H (R8A77920) - compatible = "renesas,r8a7792" - - R-Car M2-N (R8A77930) - compatible = "renesas,r8a7793" - - R-Car E2 (R8A77940) - compatible = "renesas,r8a7794" - - R-Car H3 (R8A77950) - compatible = "renesas,r8a7795" - - R-Car M3-W (R8A77960) - compatible = "renesas,r8a7796" - - R-Car M3-N (R8A77965) - compatible = "renesas,r8a77965" - - R-Car V3M (R8A77970) - compatible = "renesas,r8a77970" - - R-Car V3H (R8A77980) - compatible = "renesas,r8a77980" - - R-Car E3 (R8A77990) - compatible = "renesas,r8a77990" - - R-Car D3 (R8A77995) - compatible = "renesas,r8a77995" - - RZ/N1D (R9A06G032) - compatible = "renesas,r9a06g032" - -Boards: - - - Alt (RTP0RC7794SEB00010S) - compatible = "renesas,alt", "renesas,r8a7794" - - APE6-EVM - compatible = "renesas,ape6evm", "renesas,r8a73a4" - - Atmark Techno Armadillo-800 EVA - compatible = "renesas,armadillo800eva", "renesas,r8a7740" - - Blanche (RTP0RC7792SEB00010S) - compatible = "renesas,blanche", "renesas,r8a7792" - - BOCK-W - compatible = "renesas,bockw", "renesas,r8a7778" - - Condor (RTP0RC77980SEB0010SS/RTP0RC77980SEB0010SA01) - compatible = "renesas,condor", "renesas,r8a77980" - - Draak (RTP0RC77995SEB0010S) - compatible = "renesas,draak", "renesas,r8a77995" - - Eagle (RTP0RC77970SEB0010S) - compatible = "renesas,eagle", "renesas,r8a77970" - - Ebisu (RTP0RC77990SEB0010S) - compatible = "renesas,ebisu", "renesas,r8a77990" - - Genmai (RTK772100BC00000BR) - compatible = "renesas,genmai", "renesas,r7s72100" - - GR-Peach (X28A-M01-E/F) - compatible = "renesas,gr-peach", "renesas,r7s72100" - - Gose (RTP0RC7793SEB00010S) - compatible = "renesas,gose", "renesas,r8a7793" - - H3ULCB (R-Car Starter Kit Premier, RTP0RC7795SKBX0010SA00 (H3 ES1.1)) - H3ULCB (R-Car Starter Kit Premier, RTP0RC77951SKBX010SA00 (H3 ES2.0)) - compatible = "renesas,h3ulcb", "renesas,r8a7795" - - Henninger - compatible = "renesas,henninger", "renesas,r8a7791" - - iWave Systems RZ/G1C Single Board Computer (iW-RainboW-G23S) - compatible = "iwave,g23s", "renesas,r8a77470" - - iWave Systems RZ/G1E SODIMM SOM Development Platform (iW-RainboW-G22D) - compatible = "iwave,g22d", "iwave,g22m", "renesas,r8a7745" - - iWave Systems RZ/G1E SODIMM System On Module (iW-RainboW-G22M-SM) - compatible = "iwave,g22m", "renesas,r8a7745" - - iWave Systems RZ/G1M Qseven Development Platform (iW-RainboW-G20D-Qseven) - compatible = "iwave,g20d", "iwave,g20m", "renesas,r8a7743" - - iWave Systems RZ/G1M Qseven System On Module (iW-RainboW-G20M-Qseven) - compatible = "iwave,g20m", "renesas,r8a7743" - - iWave Systems RZ/G1N Qseven Development Platform (iW-RainboW-G20D-Qseven) - compatible = "iwave,g20d", "iwave,g20m", "renesas,r8a7744" - - iWave Systems RZ/G1N Qseven System On Module (iW-RainboW-G20M-Qseven) - compatible = "iwave,g20m", "renesas,r8a7744" - - Kingfisher (SBEV-RCAR-KF-M03) - compatible = "shimafuji,kingfisher" - - Koelsch (RTP0RC7791SEB00010S) - compatible = "renesas,koelsch", "renesas,r8a7791" - - Kyoto Microcomputer Co. KZM-A9-Dual - compatible = "renesas,kzm9d", "renesas,emev2" - - Kyoto Microcomputer Co. KZM-A9-GT - compatible = "renesas,kzm9g", "renesas,sh73a0" - - Lager (RTP0RC7790SEB00010S) - compatible = "renesas,lager", "renesas,r8a7790" - - M3ULCB (R-Car Starter Kit Pro, RTP0RC7796SKBX0010SA09 (M3 ES1.0)) - compatible = "renesas,m3ulcb", "renesas,r8a7796" - - M3NULCB (R-Car Starter Kit Pro, RTP0RC77965SKBX010SA00 (M3-N ES1.1)) - compatible = "renesas,m3nulcb", "renesas,r8a77965" - - Marzen (R0P7779A00010S) - compatible = "renesas,marzen", "renesas,r8a7779" - - Porter (M2-LCDP) - compatible = "renesas,porter", "renesas,r8a7791" - - RSKRZA1 (YR0K77210C000BE) - compatible = "renesas,rskrza1", "renesas,r7s72100" - - RZN1D-DB (RZ/N1D Demo Board for the RZ/N1D 400 pins package) - compatible = "renesas,rzn1d400-db", "renesas,r9a06g032" - - Salvator-X (RTP0RC7795SIPB0010S) - compatible = "renesas,salvator-x", "renesas,r8a7795" - - Salvator-X (RTP0RC7796SIPB0011S) - compatible = "renesas,salvator-x", "renesas,r8a7796" - - Salvator-X (RTP0RC7796SIPB0011S (M3-N)) - compatible = "renesas,salvator-x", "renesas,r8a77965" - - Salvator-XS (Salvator-X 2nd version, RTP0RC7795SIPB0012S) - compatible = "renesas,salvator-xs", "renesas,r8a7795" - - Salvator-XS (Salvator-X 2nd version, RTP0RC7796SIPB0012S) - compatible = "renesas,salvator-xs", "renesas,r8a7796" - - Salvator-XS (Salvator-X 2nd version, RTP0RC77965SIPB012S) - compatible = "renesas,salvator-xs", "renesas,r8a77965" - - SILK (RTP0RC7794LCB00011S) - compatible = "renesas,silk", "renesas,r8a7794" - - SK-RZG1E (YR8A77450S000BE) - compatible = "renesas,sk-rzg1e", "renesas,r8a7745" - - SK-RZG1M (YR8A77430S000BE) - compatible = "renesas,sk-rzg1m", "renesas,r8a7743" - - Stout (ADAS Starterkit, Y-R-CAR-ADAS-SKH2-BOARD) - compatible = "renesas,stout", "renesas,r8a7790" - - V3HSK (Y-ASK-RCAR-V3H-WS10) - compatible = "renesas,v3hsk", "renesas,r8a77980" - - V3MSK (Y-ASK-RCAR-V3M-WS10) - compatible = "renesas,v3msk", "renesas,r8a77970" - - Wheat (RTP0RC7792ASKB0000JE) - compatible = "renesas,wheat", "renesas,r8a7792" diff --git a/Documentation/devicetree/bindings/arm/socionext/milbeaut.yaml b/Documentation/devicetree/bindings/arm/socionext/milbeaut.yaml new file mode 100644 index 000000000000..aae53fc3cb1e --- /dev/null +++ b/Documentation/devicetree/bindings/arm/socionext/milbeaut.yaml @@ -0,0 +1,22 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/arm/milbeaut.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Milbeaut platforms device tree bindings + +maintainers: + - Taichi Sugaya + - Takao Orito + +properties: + $nodename: + const: '/' + compatible: + oneOf: + - items: + - enum: + - socionext,milbeaut-m10v-evb + - const: socionext,sc2000a +... diff --git a/Documentation/devicetree/bindings/arm/technologic.txt b/Documentation/devicetree/bindings/arm/technologic.txt deleted file mode 100644 index f1cedc00dcab..000000000000 --- a/Documentation/devicetree/bindings/arm/technologic.txt +++ /dev/null @@ -1,23 +0,0 @@ -Technologic Systems Platforms Device Tree Bindings --------------------------------------------------- - -TS-4600 is a System-on-Module based on the Freescale i.MX28 System-on-Chip. -It can be mounted on a carrier board providing additional peripheral connectors. -Required root node properties: - - compatible = "technologic,imx28-ts4600", "fsl,imx28" - -TS-4800 board -Required root node properties: - - compatible = "technologic,imx51-ts4800", "fsl,imx51"; - -TS-4900 is a System-on-Module based on the Freescale i.MX6 System-on-Chip. -It can be mounted on a carrier board providing additional peripheral connectors. -Required root node properties: - - compatible = "technologic,imx6dl-ts4900", "fsl,imx6dl" - - compatible = "technologic,imx6q-ts4900", "fsl,imx6q" - -TS-7970 is a System-on-Module based on the Freescale i.MX6 System-on-Chip. -It can be mounted on a carrier board providing additional peripheral connectors. -Required root node properties: - - compatible = "technologic,imx6dl-ts7970", "fsl,imx6dl" - - compatible = "technologic,imx6q-ts7970", "fsl,imx6q" diff --git a/Documentation/devicetree/bindings/arm/tegra.yaml b/Documentation/devicetree/bindings/arm/tegra.yaml index fbcde8a7e067..60b38eb5c61a 100644 --- a/Documentation/devicetree/bindings/arm/tegra.yaml +++ b/Documentation/devicetree/bindings/arm/tegra.yaml @@ -87,9 +87,11 @@ properties: - const: nvidia,tegra124 - items: - enum: + - nvidia,darcy - nvidia,p2371-0000 - nvidia,p2371-2180 - nvidia,p2571 + - nvidia,p2894-0050-a08 - const: nvidia,tegra210 - items: - enum: diff --git a/Documentation/devicetree/bindings/bus/imx-weim.txt b/Documentation/devicetree/bindings/bus/imx-weim.txt index 683eaf3aed79..dda7d6d66479 100644 --- a/Documentation/devicetree/bindings/bus/imx-weim.txt +++ b/Documentation/devicetree/bindings/bus/imx-weim.txt @@ -47,9 +47,9 @@ Optional properties: Timing property for child nodes. It is mandatory, not optional. - fsl,weim-cs-timing: The timing array, contains timing values for the - child node. We can get the CS index from the child - node's "reg" property. The number of registers depends - on the selected chip. + child node. We get the CS indexes from the address + ranges in the child node's "reg" property. + The number of registers depends on the selected chip: For i.MX1, i.MX21 ("fsl,imx1-weim") there are two registers: CSxU, CSxL. For i.MX25, i.MX27, i.MX31 and i.MX35 ("fsl,imx27-weim") @@ -80,3 +80,29 @@ Example for an imx6q-sabreauto board, the NOR flash connected to the WEIM: 0x0000c000 0x1404a38e 0x00000000>; }; }; + +Example for an imx6q-based board, a multi-chipselect device connected to WEIM: + +In this case, both chip select 0 and 1 will be configured with the same timing +array values. + + weim: weim@21b8000 { + compatible = "fsl,imx6q-weim"; + reg = <0x021b8000 0x4000>; + clocks = <&clks 196>; + #address-cells = <2>; + #size-cells = <1>; + ranges = <0 0 0x08000000 0x02000000 + 1 0 0x0a000000 0x02000000 + 2 0 0x0c000000 0x02000000 + 3 0 0x0e000000 0x02000000>; + fsl,weim-cs-gpr = <&gpr>; + + acme@0 { + compatible = "acme,whatever"; + reg = <0 0 0x100>, <0 0x400000 0x800>, + <1 0x400000 0x800>; + fsl,weim-cs-timing = <0x024400b1 0x00001010 0x20081100 + 0x00000000 0xa0000240 0x00000000>; + }; + }; diff --git a/Documentation/devicetree/bindings/clock/nvidia,tegra124-dfll.txt b/Documentation/devicetree/bindings/clock/nvidia,tegra124-dfll.txt index dff236f524a7..958e0ad78c52 100644 --- a/Documentation/devicetree/bindings/clock/nvidia,tegra124-dfll.txt +++ b/Documentation/devicetree/bindings/clock/nvidia,tegra124-dfll.txt @@ -8,10 +8,11 @@ the fast CPU cluster. It consists of a free-running voltage controlled oscillator connected to the CPU voltage rail (VDD_CPU), and a closed loop control module that will automatically adjust the VDD_CPU voltage by communicating with an off-chip PMIC either via an I2C bus or via PWM signals. -Currently only the I2C mode is supported by these bindings. Required properties: -- compatible : should be "nvidia,tegra124-dfll" +- compatible : should be one of: + - "nvidia,tegra124-dfll": for Tegra124 + - "nvidia,tegra210-dfll": for Tegra210 - reg : Defines the following set of registers, in the order listed: - registers for the DFLL control logic. - registers for the I2C output logic. @@ -45,10 +46,31 @@ Required properties for the control loop parameters: Optional properties for the control loop parameters: - nvidia,cg-scale: Boolean value, see the field DFLL_PARAMS_CG_SCALE in the TRM. +Optional properties for mode selection: +- nvidia,pwm-to-pmic: Use PWM to control regulator rather then I2C. + Required properties for I2C mode: - nvidia,i2c-fs-rate: I2C transfer rate, if using full speed mode. -Example: +Required properties for PWM mode: +- nvidia,pwm-period-nanoseconds: period of PWM square wave in nanoseconds. +- nvidia,pwm-tristate-microvolts: Regulator voltage in micro volts when PWM + control is disabled and the PWM output is tristated. Note that this voltage is + configured in hardware, typically via a resistor divider. +- nvidia,pwm-min-microvolts: Regulator voltage in micro volts when PWM control + is enabled and PWM output is low. Hence, this is the minimum output voltage + that the regulator supports when PWM control is enabled. +- nvidia,pwm-voltage-step-microvolts: Voltage increase in micro volts + corresponding to a 1/33th increase in duty cycle. Eg the voltage for 2/33th + duty cycle would be: nvidia,pwm-min-microvolts + + nvidia,pwm-voltage-step-microvolts * 2. +- pinctrl-0: I/O pad configuration when PWM control is enabled. +- pinctrl-1: I/O pad configuration when PWM control is disabled. +- pinctrl-names: must include the following entries: + - dvfs_pwm_enable: I/O pad configuration when PWM control is enabled. + - dvfs_pwm_disable: I/O pad configuration when PWM control is disabled. + +Example for I2C: clock@70110000 { compatible = "nvidia,tegra124-dfll"; @@ -76,3 +98,58 @@ clock@70110000 { nvidia,i2c-fs-rate = <400000>; }; + +Example for PWM: + +clock@70110000 { + compatible = "nvidia,tegra124-dfll"; + reg = <0 0x70110000 0 0x100>, /* DFLL control */ + <0 0x70110000 0 0x100>, /* I2C output control */ + <0 0x70110100 0 0x100>, /* Integrated I2C controller */ + <0 0x70110200 0 0x100>; /* Look-up table RAM */ + interrupts = ; + clocks = <&tegra_car TEGRA210_CLK_DFLL_SOC>, + <&tegra_car TEGRA210_CLK_DFLL_REF>, + <&tegra_car TEGRA124_CLK_I2C5>;; + clock-names = "soc", "ref", "i2c"; + resets = <&tegra_car TEGRA124_RST_DFLL_DVCO>; + reset-names = "dvco"; + #clock-cells = <0>; + clock-output-names = "dfllCPU_out"; + + nvidia,sample-rate = <25000>; + nvidia,droop-ctrl = <0x00000f00>; + nvidia,force-mode = <1>; + nvidia,cf = <6>; + nvidia,ci = <0>; + nvidia,cg = <2>; + + nvidia,pwm-min-microvolts = <708000>; /* 708mV */ + nvidia,pwm-period-nanoseconds = <2500>; /* 2.5us */ + nvidia,pwm-to-pmic; + nvidia,pwm-tristate-microvolts = <1000000>; + nvidia,pwm-voltage-step-microvolts = <19200>; /* 19.2mV */ + + pinctrl-names = "dvfs_pwm_enable", "dvfs_pwm_disable"; + pinctrl-0 = <&dvfs_pwm_active_state>; + pinctrl-1 = <&dvfs_pwm_inactive_state>; +}; + +/* pinmux nodes added for completeness. Binding doc can be found in: + * Documentation/devicetree/bindings/pinctrl/nvidia,tegra210-pinmux.txt + */ + +pinmux: pinmux@700008d4 { + dvfs_pwm_active_state: dvfs_pwm_active { + dvfs_pwm_pbb1 { + nvidia,pins = "dvfs_pwm_pbb1"; + nvidia,tristate = ; + }; + }; + dvfs_pwm_inactive_state: dvfs_pwm_inactive { + dvfs_pwm_pbb1 { + nvidia,pins = "dvfs_pwm_pbb1"; + nvidia,tristate = ; + }; + }; +}; diff --git a/Documentation/devicetree/bindings/cpufreq/nvidia,tegra124-cpufreq.txt b/Documentation/devicetree/bindings/cpufreq/nvidia,tegra124-cpufreq.txt index b1669fbfb740..03196d5ea515 100644 --- a/Documentation/devicetree/bindings/cpufreq/nvidia,tegra124-cpufreq.txt +++ b/Documentation/devicetree/bindings/cpufreq/nvidia,tegra124-cpufreq.txt @@ -9,11 +9,9 @@ Required properties: See ../clocks/clock-bindings.txt for details. - clock-names: Must include the following entries: - cpu_g: Clock mux for the fast CPU cluster. - - cpu_lp: Clock mux for the low-power CPU cluster. - pll_x: Fast PLL clocksource. - pll_p: Auxiliary PLL used during fast PLL rate changes. - dfll: Fast DFLL clocksource that also automatically scales CPU voltage. -- vdd-cpu-supply: Regulator for CPU voltage Optional properties: - clock-latency: Specify the possible maximum transition latency for clock, @@ -31,13 +29,11 @@ cpus { reg = <0>; clocks = <&tegra_car TEGRA124_CLK_CCLK_G>, - <&tegra_car TEGRA124_CLK_CCLK_LP>, <&tegra_car TEGRA124_CLK_PLL_X>, <&tegra_car TEGRA124_CLK_PLL_P>, <&dfll>; - clock-names = "cpu_g", "cpu_lp", "pll_x", "pll_p", "dfll"; + clock-names = "cpu_g", "pll_x", "pll_p", "dfll"; clock-latency = <300000>; - vdd-cpu-supply: <&vdd_cpu>; }; <...> diff --git a/Documentation/devicetree/bindings/crypto/samsung-slimsss.txt b/Documentation/devicetree/bindings/crypto/samsung-slimsss.txt new file mode 100644 index 000000000000..7ec9a5a7727a --- /dev/null +++ b/Documentation/devicetree/bindings/crypto/samsung-slimsss.txt @@ -0,0 +1,19 @@ +Samsung SoC SlimSSS (Slim Security SubSystem) module + +The SlimSSS module in Exynos5433 SoC supports the following: +-- Feeder (FeedCtrl) +-- Advanced Encryption Standard (AES) with ECB,CBC,CTR,XTS and (CBC/XTS)/CTS +-- SHA-1/SHA-256 and (SHA-1/SHA-256)/HMAC + +Required properties: + +- compatible : Should contain entry for slimSSS version: + - "samsung,exynos5433-slim-sss" for Exynos5433 SoC. +- reg : Offset and length of the register set for the module +- interrupts : interrupt specifiers of SlimSSS module interrupts (one feed + control interrupt). + +- clocks : list of clock phandle and specifier pairs for all clocks listed in + clock-names property. +- clock-names : list of device clock input names; should contain "pclk" and + "aclk" for slim-sss in Exynos5433. diff --git a/Documentation/devicetree/bindings/display/amlogic,simple-framebuffer.txt b/Documentation/devicetree/bindings/display/amlogic,simple-framebuffer.txt new file mode 100644 index 000000000000..aaa6c24c8e70 --- /dev/null +++ b/Documentation/devicetree/bindings/display/amlogic,simple-framebuffer.txt @@ -0,0 +1,33 @@ +Meson specific Simple Framebuffer bindings + +This binding documents meson specific extensions to the simple-framebuffer +bindings. The meson simplefb u-boot code relies on the devicetree containing +pre-populated simplefb nodes. + +These extensions are intended so that u-boot can select the right node based +on which pipeline is being used. As such they are solely intended for +firmware / bootloader use, and the OS should ignore them. + +Required properties: +- compatible: "amlogic,simple-framebuffer", "simple-framebuffer" +- amlogic,pipeline, one of: + "vpu-cvbs" + "vpu-hdmi" + +Example: + +chosen { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + simplefb_hdmi: framebuffer-hdmi { + compatible = "amlogic,simple-framebuffer", + "simple-framebuffer"; + amlogic,pipeline = "vpu-hdmi"; + clocks = <&clkc CLKID_HDMI_PCLK>, + <&clkc CLKID_CLK81>, + <&clkc CLKID_GCLK_VENCI_INT0>; + power-domains = <&pwrc_vpu>; + }; +}; diff --git a/Documentation/devicetree/bindings/display/arm,komeda.txt b/Documentation/devicetree/bindings/display/arm,komeda.txt new file mode 100644 index 000000000000..02b226532ebd --- /dev/null +++ b/Documentation/devicetree/bindings/display/arm,komeda.txt @@ -0,0 +1,73 @@ +Device Tree bindings for Arm Komeda display driver + +Required properties: +- compatible: Should be "arm,mali-d71" +- reg: Physical base address and length of the registers in the system +- interrupts: the interrupt line number of the device in the system +- clocks: A list of phandle + clock-specifier pairs, one for each entry + in 'clock-names' +- clock-names: A list of clock names. It should contain: + - "mclk": for the main processor clock + - "pclk": for the APB interface clock +- #address-cells: Must be 1 +- #size-cells: Must be 0 + +Required properties for sub-node: pipeline@nq +Each device contains one or two pipeline sub-nodes (at least one), each +pipeline node should provide properties: +- reg: Zero-indexed identifier for the pipeline +- clocks: A list of phandle + clock-specifier pairs, one for each entry + in 'clock-names' +- clock-names: should contain: + - "pxclk": pixel clock + - "aclk": AXI interface clock + +- port: each pipeline connect to an encoder input port. The connection is + modeled using the OF graph bindings specified in + Documentation/devicetree/bindings/graph.txt + +Optional properties: + - memory-region: phandle to a node describing memory (see + Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt) + to be used for the framebuffer; if not present, the framebuffer may + be located anywhere in memory. + +Example: +/ { + ... + + dp0: display@c00000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "arm,mali-d71"; + reg = <0xc00000 0x20000>; + interrupts = <0 168 4>; + clocks = <&dpu_mclk>, <&dpu_aclk>; + clock-names = "mclk", "pclk"; + + dp0_pipe0: pipeline@0 { + clocks = <&fpgaosc2>, <&dpu_aclk>; + clock-names = "pxclk", "aclk"; + reg = <0>; + + port { + dp0_pipe0_out: endpoint { + remote-endpoint = <&db_dvi0_in>; + }; + }; + }; + + dp0_pipe1: pipeline@1 { + clocks = <&fpgaosc2>, <&dpu_aclk>; + clock-names = "pxclk", "aclk"; + reg = <1>; + + port { + dp0_pipe1_out: endpoint { + remote-endpoint = <&db_dvi1_in>; + }; + }; + }; + }; + ... +}; diff --git a/Documentation/devicetree/bindings/display/bridge/cdns,dsi.txt b/Documentation/devicetree/bindings/display/bridge/cdns,dsi.txt index f5725bb6c61c..525a4bfd8634 100644 --- a/Documentation/devicetree/bindings/display/bridge/cdns,dsi.txt +++ b/Documentation/devicetree/bindings/display/bridge/cdns,dsi.txt @@ -31,28 +31,7 @@ Required subnodes: - one subnode per DSI device connected on the DSI bus. Each DSI device should contain a reg property encoding its virtual channel. -Cadence DPHY -============ - -Cadence DPHY block. - -Required properties: -- compatible: should be set to "cdns,dphy". -- reg: physical base address and length of the DPHY registers. -- clocks: DPHY reference clocks. -- clock-names: must contain "psm" and "pll_ref". -- #phy-cells: must be set to 0. - - Example: - dphy0: dphy@fd0e0000{ - compatible = "cdns,dphy"; - reg = <0x0 0xfd0e0000 0x0 0x1000>; - clocks = <&psm_clk>, <&pll_ref_clk>; - clock-names = "psm", "pll_ref"; - #phy-cells = <0>; - }; - dsi0: dsi@fd0c0000 { compatible = "cdns,dsi"; reg = <0x0 0xfd0c0000 0x0 0x1000>; diff --git a/Documentation/devicetree/bindings/display/bridge/lvds-transmitter.txt b/Documentation/devicetree/bindings/display/bridge/lvds-transmitter.txt index 50220190c203..60091db5dfa5 100644 --- a/Documentation/devicetree/bindings/display/bridge/lvds-transmitter.txt +++ b/Documentation/devicetree/bindings/display/bridge/lvds-transmitter.txt @@ -22,13 +22,11 @@ among others. Required properties: -- compatible: Must be one or more of the following - - "ti,ds90c185" for the TI DS90C185 FPD-Link Serializer - - "lvds-encoder" for a generic LVDS encoder device +- compatible: Must be "lvds-encoder" - When compatible with the generic version, nodes must list the - device-specific version corresponding to the device first - followed by the generic version. + Any encoder compatible with this generic binding, but with additional + properties not listed here, must list a device specific compatible first + followed by this generic compatible. Required nodes: @@ -44,8 +42,6 @@ Example lvds-encoder { compatible = "lvds-encoder"; - #address-cells = <1>; - #size-cells = <0>; ports { #address-cells = <1>; diff --git a/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt b/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt index ba5469dd09f3..900a884ad9f5 100644 --- a/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt +++ b/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt @@ -8,6 +8,8 @@ Required properties: - compatible : Shall contain one of - "renesas,r8a7743-lvds" for R8A7743 (RZ/G1M) compatible LVDS encoders + - "renesas,r8a7744-lvds" for R8A7744 (RZ/G1N) compatible LVDS encoders + - "renesas,r8a774c0-lvds" for R8A774C0 (RZ/G2E) compatible LVDS encoders - "renesas,r8a7790-lvds" for R8A7790 (R-Car H2) compatible LVDS encoders - "renesas,r8a7791-lvds" for R8A7791 (R-Car M2-W) compatible LVDS encoders - "renesas,r8a7793-lvds" for R8A7793 (R-Car M2-N) compatible LVDS encoders @@ -25,7 +27,7 @@ Required properties: - clock-names: Name of the clocks. This property is model-dependent. - The functional clock, which mandatory for all models, shall be listed first, and shall be named "fck". - - On R8A77990 and R8A77995, the LVDS encoder can use the EXTAL or + - On R8A77990, R8A77995 and R8A774C0, the LVDS encoder can use the EXTAL or DU_DOTCLKINx clocks. Those clocks are optional. When supplied they must be named "extal" and "dclkin.x" respectively, with "x" being the DU_DOTCLKIN numerical index. diff --git a/Documentation/devicetree/bindings/display/bridge/thine,thc63lvdm83d.txt b/Documentation/devicetree/bindings/display/bridge/thine,thc63lvdm83d.txt index 527e236e9a2a..fee3c88e1a17 100644 --- a/Documentation/devicetree/bindings/display/bridge/thine,thc63lvdm83d.txt +++ b/Documentation/devicetree/bindings/display/bridge/thine,thc63lvdm83d.txt @@ -10,7 +10,7 @@ Required properties: Optional properties: -- pwdn-gpios: Power down control GPIO +- powerdown-gpios: Power down control GPIO (the /PWDN pin, active low). Required nodes: diff --git a/Documentation/devicetree/bindings/display/bridge/ti,ds90c185.txt b/Documentation/devicetree/bindings/display/bridge/ti,ds90c185.txt new file mode 100644 index 000000000000..e575f996959a --- /dev/null +++ b/Documentation/devicetree/bindings/display/bridge/ti,ds90c185.txt @@ -0,0 +1,55 @@ +Texas Instruments FPD-Link (LVDS) Serializer +-------------------------------------------- + +The DS90C185 and DS90C187 are low-power serializers for portable +battery-powered applications that reduces the size of the RGB +interface between the host GPU and the display. + +Required properties: + +- compatible: Should be + "ti,ds90c185", "lvds-encoder" for the TI DS90C185 FPD-Link Serializer + "ti,ds90c187", "lvds-encoder" for the TI DS90C187 FPD-Link Serializer + +Optional properties: + +- powerdown-gpios: Power down control GPIO (the PDB pin, active-low) + +Required nodes: + +The devices have two video ports. Their connections are modeled using the OF +graph bindings specified in Documentation/devicetree/bindings/graph.txt. + +- Video port 0 for parallel input +- Video port 1 for LVDS output + + +Example +------- + +lvds-encoder { + compatible = "ti,ds90c185", "lvds-encoder"; + + powerdown-gpios = <&gpio 17 GPIO_ACTIVE_LOW>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + + lvds_enc_in: endpoint { + remote-endpoint = <&lcdc_out_rgb>; + }; + }; + + port@1 { + reg = <1>; + + lvds_enc_out: endpoint { + remote-endpoint = <&lvds_panel_in>; + }; + }; + }; +}; diff --git a/Documentation/devicetree/bindings/display/msm/gmu.txt b/Documentation/devicetree/bindings/display/msm/gmu.txt new file mode 100644 index 000000000000..3439b38e60f2 --- /dev/null +++ b/Documentation/devicetree/bindings/display/msm/gmu.txt @@ -0,0 +1,59 @@ +Qualcomm adreno/snapdragon GMU (Graphics management unit) + +The GMU is a programmable power controller for the GPU. the CPU controls the +GMU which in turn handles power controls for the GPU. + +Required properties: +- compatible: "qcom,adreno-gmu-XYZ.W", "qcom,adreno-gmu" + for example: "qcom,adreno-gmu-630.2", "qcom,adreno-gmu" + Note that you need to list the less specific "qcom,adreno-gmu" + for generic matches and the more specific identifier to identify + the specific device. +- reg: Physical base address and length of the GMU registers. +- reg-names: Matching names for the register regions + * "gmu" + * "gmu_pdc" + * "gmu_pdc_seg" +- interrupts: The interrupt signals from the GMU. +- interrupt-names: Matching names for the interrupts + * "hfi" + * "gmu" +- clocks: phandles to the device clocks +- clock-names: Matching names for the clocks + * "gmu" + * "cxo" + * "axi" + * "mnoc" +- power-domains: should be <&clock_gpucc GPU_CX_GDSC> +- iommus: phandle to the adreno iommu +- operating-points-v2: phandle to the OPP operating points + +Example: + +/ { + ... + + gmu: gmu@506a000 { + compatible="qcom,adreno-gmu-630.2", "qcom,adreno-gmu"; + + reg = <0x506a000 0x30000>, + <0xb280000 0x10000>, + <0xb480000 0x10000>; + reg-names = "gmu", "gmu_pdc", "gmu_pdc_seq"; + + interrupts = , + ; + interrupt-names = "hfi", "gmu"; + + clocks = <&gpucc GPU_CC_CX_GMU_CLK>, + <&gpucc GPU_CC_CXO_CLK>, + <&gcc GCC_DDRSS_GPU_AXI_CLK>, + <&gcc GCC_GPU_MEMNOC_GFX_CLK>; + clock-names = "gmu", "cxo", "axi", "memnoc"; + + power-domains = <&gpucc GPU_CX_GDSC>; + iommus = <&adreno_smmu 5>; + + operating-points-v2 = <&gmu_opp_table>; + }; +}; diff --git a/Documentation/devicetree/bindings/display/msm/gpu.txt b/Documentation/devicetree/bindings/display/msm/gpu.txt index f8759145ce1a..aad1aef682f7 100644 --- a/Documentation/devicetree/bindings/display/msm/gpu.txt +++ b/Documentation/devicetree/bindings/display/msm/gpu.txt @@ -10,14 +10,23 @@ Required properties: If "amd,imageon" is used, there should be no top level msm device. - reg: Physical base address and length of the controller's registers. - interrupts: The interrupt signal from the gpu. -- clocks: device clocks +- clocks: device clocks (if applicable) See ../clocks/clock-bindings.txt for details. -- clock-names: the following clocks are required: +- clock-names: the following clocks are required by a3xx, a4xx and a5xx + cores: * "core" * "iface" * "mem_iface" + For GMU attached devices the GPU clocks are not used and are not required. The + following devices should not list clocks: + - qcom,adreno-630.2 +- iommus: optional phandle to an adreno iommu instance +- operating-points-v2: optional phandle to the OPP operating points +- qcom,gmu: For GMU attached devices a phandle to the GMU device that will + control the power for the GPU. Applicable targets: + - qcom,adreno-630.2 -Example: +Example 3xx/4xx/a5xx: / { ... @@ -37,3 +46,30 @@ Example: <&mmcc MMSS_IMEM_AHB_CLK>; }; }; + +Example a6xx (with GMU): + +/ { + ... + + gpu@5000000 { + compatible = "qcom,adreno-630.2", "qcom,adreno"; + #stream-id-cells = <16>; + + reg = <0x5000000 0x40000>, <0x509e000 0x10>; + reg-names = "kgsl_3d0_reg_memory", "cx_mem"; + + /* + * Look ma, no clocks! The GPU clocks and power are + * controlled entirely by the GMU + */ + + interrupts = ; + + iommus = <&adreno_smmu 0>; + + operating-points-v2 = <&gpu_opp_table>; + + qcom,gmu = <&gmu>; + }; +}; diff --git a/Documentation/devicetree/bindings/display/panel/auo,g101evn010 b/Documentation/devicetree/bindings/display/panel/auo,g101evn010.txt similarity index 100% rename from Documentation/devicetree/bindings/display/panel/auo,g101evn010 rename to Documentation/devicetree/bindings/display/panel/auo,g101evn010.txt diff --git a/Documentation/devicetree/bindings/display/panel/innolux,ee101ia-01d.txt b/Documentation/devicetree/bindings/display/panel/innolux,ee101ia-01d.txt new file mode 100644 index 000000000000..e5ca4ccd55ed --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/innolux,ee101ia-01d.txt @@ -0,0 +1,7 @@ +Innolux Corporation 10.1" EE101IA-01D WXGA (1280x800) LVDS panel + +Required properties: +- compatible: should be "innolux,ee101ia-01d" + +This binding is compatible with the lvds-panel binding, which is specified +in panel-lvds.txt in this directory. diff --git a/Documentation/devicetree/bindings/display/panel/lemaker,bl035-rgb-002.txt b/Documentation/devicetree/bindings/display/panel/lemaker,bl035-rgb-002.txt new file mode 100644 index 000000000000..74ee7ea6b493 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/lemaker,bl035-rgb-002.txt @@ -0,0 +1,12 @@ +LeMaker BL035-RGB-002 3.5" QVGA TFT LCD panel + +Required properties: +- compatible: should be "lemaker,bl035-rgb-002" +- power-supply: as specified in the base binding + +Optional properties: +- backlight: as specified in the base binding +- enable-gpios: as specified in the base binding + +This binding is compatible with the simple-panel binding, which is specified +in simple-panel.txt in this directory. diff --git a/Documentation/devicetree/bindings/display/panel/pda,91-00156-a0.txt b/Documentation/devicetree/bindings/display/panel/pda,91-00156-a0.txt new file mode 100644 index 000000000000..1639fb17a9f0 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/pda,91-00156-a0.txt @@ -0,0 +1,14 @@ +PDA 91-00156-A0 5.0" WVGA TFT LCD panel + +Required properties: +- compatible: should be "pda,91-00156-a0" +- power-supply: this panel requires a single power supply. A phandle to a +regulator needs to be specified here. Compatible with panel-common binding which +is specified in the panel-common.txt in this directory. +- backlight: this panel's backlight is controlled by an external backlight +controller. A phandle to this controller needs to be specified here. +Compatible with panel-common binding which is specified in the panel-common.txt +in this directory. + +This binding is compatible with the simple-panel binding, which is specified +in simple-panel.txt in this directory. diff --git a/Documentation/devicetree/bindings/display/panel/sitronix,st7701.txt b/Documentation/devicetree/bindings/display/panel/sitronix,st7701.txt new file mode 100644 index 000000000000..ccd17597f1f6 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/sitronix,st7701.txt @@ -0,0 +1,30 @@ +Sitronix ST7701 based LCD panels + +ST7701 designed for small and medium sizes of TFT LCD display, is +capable of supporting up to 480RGBX864 in resolution. It provides +several system interfaces like MIPI/RGB/SPI. + +Techstar TS8550B is 480x854, 2-lane MIPI DSI LCD panel which has +inbuilt ST7701 chip. + +Required properties: +- compatible: must be "sitronix,st7701" and one of + * "techstar,ts8550b" +- reset-gpios: a GPIO phandle for the reset pin + +Required properties for techstar,ts8550b: +- reg: DSI virtual channel used by that screen +- VCC-supply: analog regulator for MIPI circuit +- IOVCC-supply: I/O system regulator + +Optional properties: +- backlight: phandle for the backlight control. + +panel@0 { + compatible = "techstar,ts8550b", "sitronix,st7701"; + reg = <0>; + VCC-supply = <®_dldo2>; + IOVCC-supply = <®_dldo2>; + reset-gpios = <&pio 3 24 GPIO_ACTIVE_HIGH>; /* LCD-RST: PD24 */ + backlight = <&backlight>; +}; diff --git a/Documentation/devicetree/bindings/display/renesas,du.txt b/Documentation/devicetree/bindings/display/renesas,du.txt index 3c855d9f2719..aedb22b4d161 100644 --- a/Documentation/devicetree/bindings/display/renesas,du.txt +++ b/Documentation/devicetree/bindings/display/renesas,du.txt @@ -7,6 +7,7 @@ Required Properties: - "renesas,du-r8a7744" for R8A7744 (RZ/G1N) compatible DU - "renesas,du-r8a7745" for R8A7745 (RZ/G1E) compatible DU - "renesas,du-r8a77470" for R8A77470 (RZ/G1C) compatible DU + - "renesas,du-r8a774c0" for R8A774C0 (RZ/G2E) compatible DU - "renesas,du-r8a7779" for R8A7779 (R-Car H1) compatible DU - "renesas,du-r8a7790" for R8A7790 (R-Car H2) compatible DU - "renesas,du-r8a7791" for R8A7791 (R-Car M2-W) compatible DU @@ -57,6 +58,7 @@ corresponding to each DU output. R8A7744 (RZ/G1N) DPAD 0 LVDS 0 - - R8A7745 (RZ/G1E) DPAD 0 DPAD 1 - - R8A77470 (RZ/G1C) DPAD 0 DPAD 1 LVDS 0 - + R8A774C0 (RZ/G2E) DPAD 0 LVDS 0 LVDS 1 - R8A7779 (R-Car H1) DPAD 0 DPAD 1 - - R8A7790 (R-Car H2) DPAD 0 LVDS 0 LVDS 1 - R8A7791 (R-Car M2-W) DPAD 0 LVDS 0 - - diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt index b79e5769f0ae..4f58c5a2d195 100644 --- a/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt +++ b/Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt @@ -10,6 +10,7 @@ Required properties: "rockchip,rk3126-vop"; "rockchip,px30-vop-lit"; "rockchip,px30-vop-big"; + "rockchip,rk3066-vop"; "rockchip,rk3188-vop"; "rockchip,rk3288-vop"; "rockchip,rk3368-vop"; diff --git a/Documentation/devicetree/bindings/display/sitronix,st7735r.txt b/Documentation/devicetree/bindings/display/sitronix,st7735r.txt index f0a5090a3326..cd5c7186890a 100644 --- a/Documentation/devicetree/bindings/display/sitronix,st7735r.txt +++ b/Documentation/devicetree/bindings/display/sitronix,st7735r.txt @@ -20,7 +20,7 @@ Example: backlight: backlight { compatible = "gpio-backlight"; gpios = <&gpio 44 GPIO_ACTIVE_HIGH>; - } + }; ... diff --git a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt index f426bdb42f18..31ab72cba3d4 100644 --- a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt +++ b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt @@ -156,6 +156,7 @@ Required properties: * allwinner,sun6i-a31-tcon * allwinner,sun6i-a31s-tcon * allwinner,sun7i-a20-tcon + * allwinner,sun8i-a23-tcon * allwinner,sun8i-a33-tcon * allwinner,sun8i-a83t-tcon-lcd * allwinner,sun8i-a83t-tcon-tv @@ -276,6 +277,7 @@ Required properties: - compatible: value must be one of: * allwinner,sun6i-a31-drc * allwinner,sun6i-a31s-drc + * allwinner,sun8i-a23-drc * allwinner,sun8i-a33-drc * allwinner,sun9i-a80-drc - reg: base address and size of the memory-mapped region. @@ -303,6 +305,7 @@ Required properties: * allwinner,sun5i-a13-display-backend * allwinner,sun6i-a31-display-backend * allwinner,sun7i-a20-display-backend + * allwinner,sun8i-a23-display-backend * allwinner,sun8i-a33-display-backend * allwinner,sun9i-a80-display-backend - reg: base address and size of the memory-mapped region. @@ -360,6 +363,7 @@ Required properties: * allwinner,sun5i-a13-display-frontend * allwinner,sun6i-a31-display-frontend * allwinner,sun7i-a20-display-frontend + * allwinner,sun8i-a23-display-frontend * allwinner,sun8i-a33-display-frontend * allwinner,sun9i-a80-display-frontend - reg: base address and size of the memory-mapped region. @@ -419,6 +423,7 @@ Required properties: * allwinner,sun6i-a31-display-engine * allwinner,sun6i-a31s-display-engine * allwinner,sun7i-a20-display-engine + * allwinner,sun8i-a23-display-engine * allwinner,sun8i-a33-display-engine * allwinner,sun8i-a83t-display-engine * allwinner,sun8i-h3-display-engine diff --git a/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt b/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt index 593be44a53c9..9999255ac5b6 100644 --- a/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt +++ b/Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt @@ -238,6 +238,9 @@ of the following host1x client modules: - nvidia,hpd-gpio: specifies a GPIO used for hotplug detection - nvidia,edid: supplies a binary EDID blob - nvidia,panel: phandle of a display panel + - nvidia,xbar-cfg: 5 cells containing the crossbar configuration. Each lane + of the SOR, identified by the cell's index, is mapped via the crossbar to + the pad specified by the cell's value. Optional properties when driving an eDP output: - nvidia,dpaux: phandle to a DispayPort AUX interface diff --git a/Documentation/devicetree/bindings/edac/aspeed-sdram-edac.txt b/Documentation/devicetree/bindings/edac/aspeed-sdram-edac.txt new file mode 100644 index 000000000000..6a0f3d90d682 --- /dev/null +++ b/Documentation/devicetree/bindings/edac/aspeed-sdram-edac.txt @@ -0,0 +1,25 @@ +Aspeed AST2500 SoC EDAC node + +The Aspeed AST2500 SoC supports DDR3 and DDR4 memory with and without ECC (error +correction check). + +The memory controller supports SECDED (single bit error correction, double bit +error detection) and single bit error auto scrubbing by reserving 8 bits for +every 64 bit word (effectively reducing available memory to 8/9). + +Note, the bootloader must configure ECC mode in the memory controller. + + +Required properties: +- compatible: should be "aspeed,ast2500-sdram-edac" +- reg: sdram controller register set should be <0x1e6e0000 0x174> +- interrupts: should be AVIC interrupt #0 + + +Example: + + edac: sdram@1e6e0000 { + compatible = "aspeed,ast2500-sdram-edac"; + reg = <0x1e6e0000 0x174>; + interrupts = <0>; + }; diff --git a/Documentation/devicetree/bindings/eeprom/at24.txt b/Documentation/devicetree/bindings/eeprom/at24.txt index f9a7c984274c..0e456bbc1213 100644 --- a/Documentation/devicetree/bindings/eeprom/at24.txt +++ b/Documentation/devicetree/bindings/eeprom/at24.txt @@ -75,6 +75,8 @@ Optional properties: - address-width: number of address bits (one of 8, 16). + - num-addresses: total number of i2c slave addresses this device takes + Example: eeprom@52 { @@ -82,4 +84,5 @@ eeprom@52 { reg = <0x52>; pagesize = <32>; wp-gpios = <&gpio1 3 0>; + num-addresses = <8>; }; diff --git a/Documentation/devicetree/bindings/extcon/extcon-ptn5150.txt b/Documentation/devicetree/bindings/extcon/extcon-ptn5150.txt new file mode 100644 index 000000000000..936fbdf12815 --- /dev/null +++ b/Documentation/devicetree/bindings/extcon/extcon-ptn5150.txt @@ -0,0 +1,27 @@ +* PTN5150 CC (Configuration Channel) Logic device + +PTN5150 is a small thin low power CC logic chip supporting the USB Type-C +connector application with CC control logic detection and indication functions. +It is interfaced to the host controller using an I2C interface. + +Required properties: +- compatible: should be "nxp,ptn5150" +- reg: specifies the I2C slave address of the device +- int-gpio: should contain a phandle and GPIO specifier for the GPIO pin + connected to the PTN5150's INTB pin. +- vbus-gpio: should contain a phandle and GPIO specifier for the GPIO pin which + is used to control VBUS. +- pinctrl-names : a pinctrl state named "default" must be defined. +- pinctrl-0 : phandle referencing pin configuration of interrupt and vbus + control. + +Example: + ptn5150@1d { + compatible = "nxp,ptn5150"; + reg = <0x1d>; + int-gpio = <&msmgpio 78 GPIO_ACTIVE_HIGH>; + vbus-gpio = <&msmgpio 148 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&ptn5150_default>; + status = "okay"; + }; diff --git a/Documentation/devicetree/bindings/firmware/nvidia,tegra186-bpmp.txt b/Documentation/devicetree/bindings/firmware/nvidia,tegra186-bpmp.txt index 0c10802c8327..ff380dadb5f9 100644 --- a/Documentation/devicetree/bindings/firmware/nvidia,tegra186-bpmp.txt +++ b/Documentation/devicetree/bindings/firmware/nvidia,tegra186-bpmp.txt @@ -8,7 +8,6 @@ which can create the interprocessor communication (IPC) between the CPU and BPMP. Required properties: -- name : Should be bpmp - compatible Array of strings One of: diff --git a/Documentation/devicetree/bindings/firmware/nvidia,tegra210-bpmp.txt b/Documentation/devicetree/bindings/firmware/nvidia,tegra210-bpmp.txt new file mode 100644 index 000000000000..68d814e8c09d --- /dev/null +++ b/Documentation/devicetree/bindings/firmware/nvidia,tegra210-bpmp.txt @@ -0,0 +1,35 @@ +NVIDIA Tegra210 Boot and Power Management Processor (BPMP) + +The Boot and Power Management Processor (BPMP) is a co-processor found +in Tegra210 SoC. It is designed to handle the early stages of the boot +process as well as to assisting in entering deep low power state +(suspend to ram), and also offloading DRAM memory clock scaling on +some platforms. The binding document defines the resources that would +be used by the BPMP T210 firmware driver, which can create the +interprocessor communication (IPC) between the CPU and BPMP. + +Required properties: +- compatible + Array of strings + One of: + - "nvidia,tegra210-bpmp" +- reg: physical base address and length for HW synchornization primitives + 1) base address and length to Tegra 'atomics' hardware + 2) base address and length to Tegra 'semaphore' hardware +- interrupts: specifies the interrupt number for receiving messages ("rx") + and for triggering messages ("tx") + +Optional properties: +- #clock-cells : Should be 1 for platforms where DRAM clock control is + offloaded to bpmp. + +Example: + +bpmp@70016000 { + compatible = "nvidia,tegra210-bpmp"; + reg = <0x0 0x70016000 0x0 0x2000 + 0x0 0x60001000 0x0 0x1000>; + interrupts = , + ; + interrupt-names = "tx", "rx"; +}; diff --git a/Documentation/devicetree/bindings/gnss/gnss.txt b/Documentation/devicetree/bindings/gnss/gnss.txt index f1e4a2ff47c5..f547bd4549fe 100644 --- a/Documentation/devicetree/bindings/gnss/gnss.txt +++ b/Documentation/devicetree/bindings/gnss/gnss.txt @@ -17,6 +17,7 @@ Required properties: represents Optional properties: +- lna-supply : Separate supply for an LNA - enable-gpios : GPIO used to enable the device - timepulse-gpios : Time pulse GPIO diff --git a/Documentation/devicetree/bindings/gnss/mediatek.txt b/Documentation/devicetree/bindings/gnss/mediatek.txt new file mode 100644 index 000000000000..80cb802813c5 --- /dev/null +++ b/Documentation/devicetree/bindings/gnss/mediatek.txt @@ -0,0 +1,35 @@ +Mediatek-based GNSS Receiver DT binding + +Mediatek chipsets are used in GNSS-receiver modules produced by several +vendors and can use a UART interface. + +Please see Documentation/devicetree/bindings/gnss/gnss.txt for generic +properties. + +Required properties: + +- compatible : Must be + + "globaltop,pa6h" + +- vcc-supply : Main voltage regulator (pin name: VCC) + +Optional properties: + +- current-speed : Default UART baud rate +- gnss-fix-gpios : GPIO used to determine device position fix state + (pin name: FIX, 3D_FIX) +- reset-gpios : GPIO used to reset the device (pin name: RESET, NRESET) +- timepulse-gpios : Time pulse GPIO (pin name: PPS1, 1PPS) +- vbackup-supply : Backup voltage regulator (pin name: VBAT, VBACKUP) + +Example: + +serial@1234 { + compatible = "ns16550a"; + + gnss { + compatible = "globaltop,pa6h"; + vcc-supply = <&vcc_3v3>; + }; +}; diff --git a/Documentation/devicetree/bindings/gnss/sirfstar.txt b/Documentation/devicetree/bindings/gnss/sirfstar.txt index 648d183cdb77..f4252b6b660b 100644 --- a/Documentation/devicetree/bindings/gnss/sirfstar.txt +++ b/Documentation/devicetree/bindings/gnss/sirfstar.txt @@ -12,6 +12,7 @@ Required properties: "fastrax,uc430" "linx,r4" + "wi2wi,w2sg0004" "wi2wi,w2sg0008i" "wi2wi,w2sg0084i" diff --git a/Documentation/devicetree/bindings/gpio/gateworks,pld-gpio.txt b/Documentation/devicetree/bindings/gpio/gateworks,pld-gpio.txt new file mode 100644 index 000000000000..6e81f8b755c5 --- /dev/null +++ b/Documentation/devicetree/bindings/gpio/gateworks,pld-gpio.txt @@ -0,0 +1,20 @@ +Gateworks PLD GPIO controller bindings + +The GPIO controller should be a child node on an I2C bus, +see: i2c/i2c.txt for details. + +Required properties: +- compatible: Should be "gateworks,pld-gpio" +- reg: I2C slave address +- gpio-controller: Marks the device node as a GPIO controller. +- #gpio-cells: Should be <2>. The first cell is the gpio number and + the second cell is used to specify optional parameters. + +Example: + +pld@56 { + compatible = "gateworks,pld-gpio"; + reg = <0x56>; + gpio-controller; + #gpio-cells = <2>; +}; diff --git a/Documentation/devicetree/bindings/gpio/gpio-eic-sprd.txt b/Documentation/devicetree/bindings/gpio/gpio-eic-sprd.txt index 93d98d09d92b..54040a2bfe3a 100644 --- a/Documentation/devicetree/bindings/gpio/gpio-eic-sprd.txt +++ b/Documentation/devicetree/bindings/gpio/gpio-eic-sprd.txt @@ -33,7 +33,7 @@ Required properties: "sprd,sc9860-eic-latch", "sprd,sc9860-eic-async", "sprd,sc9860-eic-sync", - "sprd,sc27xx-eic". + "sprd,sc2731-eic". - reg: Define the base and range of the I/O address space containing the GPIO controller registers. - gpio-controller: Marks the device node as a GPIO controller. @@ -86,7 +86,7 @@ Example: }; pmic_eic: gpio@300 { - compatible = "sprd,sc27xx-eic"; + compatible = "sprd,sc2731-eic"; reg = <0x300>; interrupt-parent = <&sc2731_pmic>; interrupts = <5 IRQ_TYPE_LEVEL_HIGH>; diff --git a/Documentation/devicetree/bindings/gpio/gpio-pca953x.txt b/Documentation/devicetree/bindings/gpio/gpio-pca953x.txt index 4e3c550e319a..fb144e2b6522 100644 --- a/Documentation/devicetree/bindings/gpio/gpio-pca953x.txt +++ b/Documentation/devicetree/bindings/gpio/gpio-pca953x.txt @@ -16,6 +16,7 @@ Required properties: nxp,pca9574 nxp,pca9575 nxp,pca9698 + nxp,pcal6416 nxp,pcal6524 nxp,pcal9555a maxim,max7310 diff --git a/Documentation/devicetree/bindings/gpio/gpio.txt b/Documentation/devicetree/bindings/gpio/gpio.txt index f0ba154b5723..a8895d339bfe 100644 --- a/Documentation/devicetree/bindings/gpio/gpio.txt +++ b/Documentation/devicetree/bindings/gpio/gpio.txt @@ -67,6 +67,18 @@ Optional standard bitfield specifiers for the last cell: https://en.wikipedia.org/wiki/Open_collector - Bit 3: 0 means the output should be maintained during sleep/low-power mode 1 means the output state can be lost during sleep/low-power mode +- Bit 4: 0 means no pull-up resistor should be enabled + 1 means a pull-up resistor should be enabled + This setting only applies to hardware with a simple on/off + control for pull-up configuration. If the hardware has more + elaborate pull-up configuration, it should be represented + using a pin control binding. +- Bit 5: 0 means no pull-down resistor should be enabled + 1 means a pull-down resistor should be enabled + This setting only applies to hardware with a simple on/off + control for pull-down configuration. If the hardware has more + elaborate pull-down configuration, it should be represented + using a pin control binding. 1.1) GPIO specifier best practices ---------------------------------- diff --git a/Documentation/devicetree/bindings/gpio/intel,ixp4xx-gpio.txt b/Documentation/devicetree/bindings/gpio/intel,ixp4xx-gpio.txt new file mode 100644 index 000000000000..8dc41ed99685 --- /dev/null +++ b/Documentation/devicetree/bindings/gpio/intel,ixp4xx-gpio.txt @@ -0,0 +1,38 @@ +Intel IXP4xx XScale Networking Processors GPIO + +This GPIO controller is found in the Intel IXP4xx processors. +It supports 16 GPIO lines. + +The interrupt portions of the GPIO controller is hierarchical: +the synchronous edge detector is part of the GPIO block, but the +actual enabling/disabling of the interrupt line is done in the +main IXP4xx interrupt controller which has a 1:1 mapping for +the first 12 GPIO lines to 12 system interrupts. + +The remaining 4 GPIO lines can not be used for receiving +interrupts. + +The interrupt parent of this GPIO controller must be the +IXP4xx interrupt controller. + +Required properties: + +- compatible : Should be + "intel,ixp4xx-gpio" +- reg : Should contain registers location and length +- gpio-controller : marks this as a GPIO controller +- #gpio-cells : Should be 2, see gpio/gpio.txt +- interrupt-controller : marks this as an interrupt controller +- #interrupt-cells : a standard two-cell interrupt, see + interrupt-controller/interrupts.txt + +Example: + +gpio0: gpio@c8004000 { + compatible = "intel,ixp4xx-gpio"; + reg = <0xc8004000 0x1000>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; +}; diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-utgard.txt b/Documentation/devicetree/bindings/gpu/arm,mali-utgard.txt index 3f128e4f95c6..ae63f09fda7d 100644 --- a/Documentation/devicetree/bindings/gpu/arm,mali-utgard.txt +++ b/Documentation/devicetree/bindings/gpu/arm,mali-utgard.txt @@ -13,6 +13,8 @@ Required properties: + allwinner,sun8i-h3-mali + allwinner,sun50i-a64-mali + allwinner,sun50i-h5-mali + + amlogic,meson8-mali + + amlogic,meson8b-mali + amlogic,meson-gxbb-mali + amlogic,meson-gxl-mali + rockchip,rk3036-mali @@ -82,6 +84,10 @@ to specify one more vendor-specific compatible, among: Required properties: * resets: phandle to the reset line for the GPU + - amlogic,meson8-mali and amlogic,meson8b-mali + Required properties: + * resets: phandle to the reset line for the GPU + - Rockchip variants: Required properties: * resets: phandle to the reset line for the GPU diff --git a/Documentation/devicetree/bindings/gpu/samsung-rotator.txt b/Documentation/devicetree/bindings/gpu/samsung-rotator.txt index 82cd1ed0be93..3aca2578da0b 100644 --- a/Documentation/devicetree/bindings/gpu/samsung-rotator.txt +++ b/Documentation/devicetree/bindings/gpu/samsung-rotator.txt @@ -2,9 +2,10 @@ Required properties: - compatible : value should be one of the following: - (a) "samsung,exynos4210-rotator" for Rotator IP in Exynos4210 - (b) "samsung,exynos4212-rotator" for Rotator IP in Exynos4212/4412 - (c) "samsung,exynos5250-rotator" for Rotator IP in Exynos5250 + * "samsung,s5pv210-rotator" for Rotator IP in S5PV210 + * "samsung,exynos4210-rotator" for Rotator IP in Exynos4210 + * "samsung,exynos4212-rotator" for Rotator IP in Exynos4212/4412 + * "samsung,exynos5250-rotator" for Rotator IP in Exynos5250 - reg : Physical base address of the IP registers and length of memory mapped region. diff --git a/Documentation/devicetree/bindings/hwmon/ad741x.txt b/Documentation/devicetree/bindings/hwmon/ad741x.txt new file mode 100644 index 000000000000..9102152c8410 --- /dev/null +++ b/Documentation/devicetree/bindings/hwmon/ad741x.txt @@ -0,0 +1,15 @@ +* AD7416/AD7417/AD7418 Temperature Sensor Device Tree Bindings + +Required properties: +- compatible: one of + "adi,ad7416" + "adi,ad7417" + "adi,ad7418" +- reg: I2C address + +Example: + +hwmon@28 { + compatible = "adi,ad7418"; + reg = <0x28>; +}; diff --git a/Documentation/devicetree/bindings/hwmon/dps650ab.txt b/Documentation/devicetree/bindings/hwmon/dps650ab.txt new file mode 100644 index 000000000000..76780e795899 --- /dev/null +++ b/Documentation/devicetree/bindings/hwmon/dps650ab.txt @@ -0,0 +1,11 @@ +Bindings for Delta Electronics DPS-650-AB power supply + +Required properties: +- compatible : "delta,dps650ab" +- reg : I2C address, one of 0x58, 0x59. + +Example: + dps650ab@58 { + compatible = "delta,dps650ab"; + reg = <0x58>; + }; diff --git a/Documentation/devicetree/bindings/hwmon/hih6130.txt b/Documentation/devicetree/bindings/hwmon/hih6130.txt new file mode 100644 index 000000000000..2c43837af4c2 --- /dev/null +++ b/Documentation/devicetree/bindings/hwmon/hih6130.txt @@ -0,0 +1,12 @@ +Honeywell Humidicon HIH-6130 humidity/temperature sensor +-------------------------------------------------------- + +Requires node properties: +- compatible : "honeywell,hi6130" +- reg : the I2C address of the device. This is 0x27. + +Example: + hih6130@27 { + compatible = "honeywell,hih6130"; + reg = <0x27>; + }; diff --git a/Documentation/devicetree/bindings/hwmon/ina3221.txt b/Documentation/devicetree/bindings/hwmon/ina3221.txt index a7b25caa2b8e..fa63b6171407 100644 --- a/Documentation/devicetree/bindings/hwmon/ina3221.txt +++ b/Documentation/devicetree/bindings/hwmon/ina3221.txt @@ -6,6 +6,16 @@ Texas Instruments INA3221 Device Tree Bindings - reg: I2C address Optional properties: + - ti,single-shot: This chip has two power modes: single-shot (chip takes one + measurement and then shuts itself down) and continuous ( + chip takes continuous measurements). The continuous mode is + more reliable and suitable for hardware monitor type device, + but the single-shot mode is more power-friendly and useful + for battery-powered device which cares power consumptions + while still needs some measurements occasionally. + If this property is present, the single-shot mode will be + used, instead of the default continuous one for monitoring. + = The node contains optional child nodes for three channels = = Each child node describes the information of input source = diff --git a/Documentation/devicetree/bindings/hwmon/lm75.txt b/Documentation/devicetree/bindings/hwmon/lm75.txt new file mode 100644 index 000000000000..12d8cf7cf592 --- /dev/null +++ b/Documentation/devicetree/bindings/hwmon/lm75.txt @@ -0,0 +1,37 @@ +*LM75 hwmon sensor. + +Required properties: +- compatible: manufacturer and chip name, one of + "adi,adt75", + "dallas,ds1775", + "dallas,ds75", + "dallas,ds7505", + "gmt,g751", + "national,lm75", + "national,lm75a", + "national,lm75b", + "maxim,max6625", + "maxim,max6626", + "maxim,max31725", + "maxim,max31726", + "maxim,mcp980x", + "st,stds75", + "st,stlm75", + "microchip,tcn75", + "ti,tmp100", + "ti,tmp101", + "ti,tmp105", + "ti,tmp112", + "ti,tmp175", + "ti,tmp275", + "ti,tmp75", + "ti,tmp75c", + +- reg: I2C bus address of the device + +Example: + +sensor@48 { + compatible = "st,stlm75"; + reg = <0x48>; +}; diff --git a/Documentation/devicetree/bindings/hwmon/pwm-fan.txt b/Documentation/devicetree/bindings/hwmon/pwm-fan.txt index c6d533202d3e..49ca5d83ed13 100644 --- a/Documentation/devicetree/bindings/hwmon/pwm-fan.txt +++ b/Documentation/devicetree/bindings/hwmon/pwm-fan.txt @@ -6,6 +6,9 @@ Required properties: - cooling-levels : PWM duty cycle values in a range from 0 to 255 which correspond to thermal cooling states +Optional properties: +- fan-supply : phandle to the regulator that provides power to the fan + Example: fan0: pwm-fan { compatible = "pwm-fan"; diff --git a/Documentation/devicetree/bindings/i2c/i2c-mtk.txt b/Documentation/devicetree/bindings/i2c/i2c-mtk.txt index e199695b1c96..ee4c32454198 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-mtk.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-mtk.txt @@ -10,6 +10,7 @@ Required properties: "mediatek,mt6589-i2c": for MediaTek MT6589 "mediatek,mt7622-i2c": for MediaTek MT7622 "mediatek,mt7623-i2c", "mediatek,mt6577-i2c": for MediaTek MT7623 + "mediatek,mt7629-i2c", "mediatek,mt2712-i2c": for MediaTek MT7629 "mediatek,mt8173-i2c": for MediaTek MT8173 - reg: physical base address of the controller and dma base, length of memory mapped region. diff --git a/Documentation/devicetree/bindings/i2c/i2c-xscale.txt b/Documentation/devicetree/bindings/i2c/i2c-xscale.txt new file mode 100644 index 000000000000..dcc8390e0d24 --- /dev/null +++ b/Documentation/devicetree/bindings/i2c/i2c-xscale.txt @@ -0,0 +1,20 @@ +i2c Controller on XScale platforms such as IOP3xx and IXP4xx + +Required properties: +- compatible : Must be one of + "intel,iop3xx-i2c" + "intel,ixp4xx-i2c"; +- reg +- #address-cells = <1>; +- #size-cells = <0>; + +Optional properties: +- Child nodes conforming to i2c bus binding + +Example: + +i2c@c8011000 { + compatible = "intel,ixp4xx-i2c"; + reg = <0xc8011000 0x18>; + interrupts = <33 IRQ_TYPE_LEVEL_LOW>; +}; diff --git a/Documentation/devicetree/bindings/iio/accel/mma8452.txt b/Documentation/devicetree/bindings/iio/accel/mma8452.txt index 2100e9af379c..e132394375a1 100644 --- a/Documentation/devicetree/bindings/iio/accel/mma8452.txt +++ b/Documentation/devicetree/bindings/iio/accel/mma8452.txt @@ -20,6 +20,10 @@ Optional properties: - interrupt-names: should contain "INT1" and/or "INT2", the accelerometer's interrupt line in use. + - vdd-supply: phandle to the regulator that provides vdd power to the accelerometer. + + - vddio-supply: phandle to the regulator that provides vddio power to the accelerometer. + Example: mma8453fc@1d { diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.txt b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.txt new file mode 100644 index 000000000000..d7b6241ca881 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.txt @@ -0,0 +1,65 @@ +Analog Devices AD7606 Simultaneous Sampling ADC + +Required properties for the AD7606: + +- compatible: Must be one of + * "adi,ad7605-4" + * "adi,ad7606-8" + * "adi,ad7606-6" + * "adi,ad7606-4" +- reg: SPI chip select number for the device +- spi-max-frequency: Max SPI frequency to use + see: Documentation/devicetree/bindings/spi/spi-bus.txt +- spi-cpha: See Documentation/devicetree/bindings/spi/spi-bus.txt +- avcc-supply: phandle to the Avcc power supply +- interrupts: IRQ line for the ADC + see: Documentation/devicetree/bindings/interrupt-controller/interrupts.txt +- adi,conversion-start-gpios: must be the device tree identifier of the CONVST pin. + This logic input is used to initiate conversions on the analog + input channels. As the line is active high, it should be marked + GPIO_ACTIVE_HIGH. + +Optional properties: + +- reset-gpios: must be the device tree identifier of the RESET pin. If specified, + it will be asserted during driver probe. As the line is active high, + it should be marked GPIO_ACTIVE_HIGH. +- standby-gpios: must be the device tree identifier of the STBY pin. This pin is used + to place the AD7606 into one of two power-down modes, Standby mode or + Shutdown mode. As the line is active low, it should be marked + GPIO_ACTIVE_LOW. +- adi,first-data-gpios: must be the device tree identifier of the FRSTDATA pin. + The FRSTDATA output indicates when the first channel, V1, is + being read back on either the parallel, byte or serial interface. + As the line is active high, it should be marked GPIO_ACTIVE_HIGH. +- adi,range-gpios: must be the device tree identifier of the RANGE pin. The polarity on + this pin determines the input range of the analog input channels. If + this pin is tied to a logic high, the analog input range is ±10V for + all channels. If this pin is tied to a logic low, the analog input range + is ±5V for all channels. As the line is active high, it should be marked + GPIO_ACTIVE_HIGH. +- adi,oversampling-ratio-gpios: must be the device tree identifier of the over-sampling + mode pins. As the line is active high, it should be marked + GPIO_ACTIVE_HIGH. + +Example: + + adc@0 { + compatible = "adi,ad7606-8"; + reg = <0>; + spi-max-frequency = <1000000>; + spi-cpol; + + avcc-supply = <&adc_vref>; + + interrupts = <25 IRQ_TYPE_EDGE_FALLING>; + interrupt-parent = <&gpio>; + + adi,conversion-start-gpios = <&gpio 17 GPIO_ACTIVE_HIGH>; + reset-gpios = <&gpio 27 GPIO_ACTIVE_HIGH>; + adi,first-data-gpios = <&gpio 22 GPIO_ACTIVE_HIGH>; + adi,oversampling-ratio-gpios = <&gpio 18 GPIO_ACTIVE_HIGH + &gpio 23 GPIO_ACTIVE_HIGH + &gpio 26 GPIO_ACTIVE_HIGH>; + standby-gpios = <&gpio 24 GPIO_ACTIVE_LOW>; + }; diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7768-1.txt b/Documentation/devicetree/bindings/iio/adc/adi,ad7768-1.txt new file mode 100644 index 000000000000..9f5b88cf680d --- /dev/null +++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7768-1.txt @@ -0,0 +1,41 @@ +Analog Devices AD7768-1 ADC device driver + +Required properties for the AD7768-1: + +- compatible: Must be "adi,ad7768-1" +- reg: SPI chip select number for the device +- spi-max-frequency: Max SPI frequency to use + see: Documentation/devicetree/bindings/spi/spi-bus.txt +- clocks: phandle to the master clock (mclk) + see: Documentation/devicetree/bindings/clock/clock-bindings.txt +- clock-names: Must be "mclk". +- interrupts: IRQ line for the ADC + see: Documentation/devicetree/bindings/interrupt-controller/interrupts.txt +- vref-supply: vref supply can be used as reference for conversion +- adi,sync-in-gpios: must be the device tree identifier of the SYNC-IN pin. Enables + synchronization of multiple devices that require simultaneous sampling. + A pulse is always required if the configuration is changed in any way, for example + if the filter decimation rate changes. As the line is active low, it should + be marked GPIO_ACTIVE_LOW. + +Optional properties: + + - reset-gpios : GPIO spec for the RESET pin. If specified, it will be asserted during + driver probe. As the line is active low, it should be marked GPIO_ACTIVE_LOW. + +Example: + + adc@0 { + compatible = "adi,ad7768-1"; + reg = <0>; + spi-max-frequency = <2000000>; + spi-cpol; + spi-cpha; + vref-supply = <&adc_vref>; + interrupts = <25 IRQ_TYPE_EDGE_RISING>; + interrupt-parent = <&gpio>; + adi,sync-in-gpios = <&gpio 22 GPIO_ACTIVE_LOW>; + reset-gpios = <&gpio 27 GPIO_ACTIVE_LOW>; + clocks = <&ad7768_mclk>; + clock-names = "mclk"; + }; diff --git a/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt b/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt index 325090e43ce6..75c775954102 100644 --- a/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt +++ b/Documentation/devicetree/bindings/iio/adc/amlogic,meson-saradc.txt @@ -23,6 +23,10 @@ Required properties: - #io-channel-cells: must be 1, see ../iio-bindings.txt Optional properties: +- amlogic,hhi-sysctrl: phandle to the syscon which contains the 5th bit + of the TSC (temperature sensor coefficient) on + Meson8b and Meson8m2 (which used to calibrate the + temperature sensor) - nvmem-cells: phandle to the temperature_calib eFuse cells - nvmem-cell-names: if present (to enable the temperature sensor calibration) this must contain "temperature_calib" diff --git a/Documentation/devicetree/bindings/iio/adc/ingenic,adc.txt b/Documentation/devicetree/bindings/iio/adc/ingenic,adc.txt new file mode 100644 index 000000000000..f01159f20d87 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/adc/ingenic,adc.txt @@ -0,0 +1,48 @@ +* Ingenic JZ47xx ADC controller IIO bindings + +Required properties: + +- compatible: Should be one of: + * ingenic,jz4725b-adc + * ingenic,jz4740-adc +- reg: ADC controller registers location and length. +- clocks: phandle to the SoC's ADC clock. +- clock-names: Must be set to "adc". +- #io-channel-cells: Must be set to <1> to indicate channels are selected + by index. + +ADC clients must use the format described in iio-bindings.txt, giving +a phandle and IIO specifier pair ("io-channels") to the ADC controller. + +Example: + +#include + +adc: adc@10070000 { + compatible = "ingenic,jz4740-adc"; + #io-channel-cells = <1>; + + reg = <0x10070000 0x30>; + + clocks = <&cgu JZ4740_CLK_ADC>; + clock-names = "adc"; + + interrupt-parent = <&intc>; + interrupts = <18>; +}; + +adc-keys { + ... + compatible = "adc-keys"; + io-channels = <&adc INGENIC_ADC_AUX>; + io-channel-names = "buttons"; + ... +}; + +battery { + ... + compatible = "ingenic,jz4740-battery"; + io-channels = <&adc INGENIC_ADC_BATTERY>; + io-channel-names = "battery"; + ... +}; diff --git a/Documentation/devicetree/bindings/staging/iio/adc/lpc32xx-adc.txt b/Documentation/devicetree/bindings/iio/adc/lpc32xx-adc.txt similarity index 100% rename from Documentation/devicetree/bindings/staging/iio/adc/lpc32xx-adc.txt rename to Documentation/devicetree/bindings/iio/adc/lpc32xx-adc.txt diff --git a/Documentation/devicetree/bindings/iio/adc/nuvoton,npcm-adc.txt b/Documentation/devicetree/bindings/iio/adc/nuvoton,npcm-adc.txt new file mode 100644 index 000000000000..eb939fe77836 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/adc/nuvoton,npcm-adc.txt @@ -0,0 +1,24 @@ +Nuvoton NPCM Analog to Digital Converter (ADC) + +The NPCM ADC is a 10-bit converter for eight channel inputs. + +Required properties: +- compatible: "nuvoton,npcm750-adc" for the NPCM7XX BMC. +- reg: specifies physical base address and size of the registers. +- interrupts: Contain the ADC interrupt with flags for falling edge. + +Optional properties: +- clocks: phandle of ADC reference clock, in case the clock is not + added the ADC will use the default ADC sample rate. +- vref-supply: The regulator supply ADC reference voltage, in case the + vref-supply is not added the ADC will use internal voltage + reference. + +Example: + +adc: adc@f000c000 { + compatible = "nuvoton,npcm750-adc"; + reg = <0xf000c000 0x8>; + interrupts = ; + clocks = <&clk NPCM7XX_CLK_ADC>; +}; diff --git a/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt b/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt index a10c1f89037d..e1fe02f3e3e9 100644 --- a/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt +++ b/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt @@ -11,11 +11,13 @@ New driver handles the following Required properties: - compatible: Must be "samsung,exynos-adc-v1" - for exynos4412/5250 controllers. + for Exynos5250 controllers. Must be "samsung,exynos-adc-v2" for future controllers. Must be "samsung,exynos3250-adc" for controllers compatible with ADC of Exynos3250. + Must be "samsung,exynos4212-adc" for + controllers compatible with ADC of Exynos4212 and Exynos4412. Must be "samsung,exynos7-adc" for the ADC in Exynos7 and compatibles Must be "samsung,s3c2410-adc" for diff --git a/Documentation/devicetree/bindings/iio/adc/stmpe-adc.txt b/Documentation/devicetree/bindings/iio/adc/stmpe-adc.txt new file mode 100644 index 000000000000..480e66422625 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/adc/stmpe-adc.txt @@ -0,0 +1,21 @@ +STMPE ADC driver +---------------- + +Required properties: + - compatible: "st,stmpe-adc" + +Optional properties: +Note that the ADC is shared with the STMPE touchscreen. ADC related settings +have to be done in the mfd. +- st,norequest-mask: bitmask specifying which ADC channels should _not_ be + requestable due to different usage (e.g. touch) + +Node name must be stmpe_adc and should be child node of stmpe node to +which it belongs. + +Example: + + stmpe_adc { + compatible = "st,stmpe-adc"; + st,norequest-mask = <0x0F>; /* dont use ADC CH3-0 */ + }; diff --git a/Documentation/devicetree/bindings/iio/adc/ti-ads124s08.txt b/Documentation/devicetree/bindings/iio/adc/ti-ads124s08.txt new file mode 100644 index 000000000000..ecf807bb32f7 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/adc/ti-ads124s08.txt @@ -0,0 +1,25 @@ +* Texas Instruments' ads124s08 and ads124s06 ADC chip + +Required properties: + - compatible : + "ti,ads124s08" + "ti,ads124s06" + - reg : spi chip select number for the device + +Recommended properties: + - spi-max-frequency : Definition as per + Documentation/devicetree/bindings/spi/spi-bus.txt + - spi-cpha : Definition as per + Documentation/devicetree/bindings/spi/spi-bus.txt + +Optional properties: + - reset-gpios : GPIO pin used to reset the device. + +Example: +adc@0 { + compatible = "ti,ads124s08"; + reg = <0>; + spi-max-frequency = <1000000>; + spi-cpha; + reset-gpios = <&gpio1 16 GPIO_ACTIVE_LOW>; +}; diff --git a/Documentation/devicetree/bindings/iio/chemical/bme680.txt b/Documentation/devicetree/bindings/iio/chemical/bme680.txt new file mode 100644 index 000000000000..7f3827cfb2ff --- /dev/null +++ b/Documentation/devicetree/bindings/iio/chemical/bme680.txt @@ -0,0 +1,11 @@ +Bosch Sensortec BME680 pressure/temperature/humidity/voc sensors + +Required properties: +- compatible: must be "bosch,bme680" + +Example: + +bme680@76 { + compatible = "bosch,bme680"; + reg = <0x76>; +}; diff --git a/Documentation/devicetree/bindings/iio/chemical/plantower,pms7003.txt b/Documentation/devicetree/bindings/iio/chemical/plantower,pms7003.txt new file mode 100644 index 000000000000..7b5f06f324c8 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/chemical/plantower,pms7003.txt @@ -0,0 +1,20 @@ +* Plantower PMS7003 particulate matter sensor + +Required properties: +- compatible: must be "plantower,pms7003" +- vcc-supply: phandle to the regulator that provides power to the sensor + +Optional properties: +- plantower,set-gpios: phandle to the GPIO connected to the SET line +- reset-gpios: phandle to the GPIO connected to the RESET line + +Refer to serial/slave-device.txt for generic serial attached device bindings. + +Example: + +&uart0 { + air-pollution-sensor { + compatible = "plantower,pms7003"; + vcc-supply = <®_vcc5v0>; + }; +}; diff --git a/Documentation/devicetree/bindings/iio/chemical/sensirion,sgp30.txt b/Documentation/devicetree/bindings/iio/chemical/sensirion,sgp30.txt new file mode 100644 index 000000000000..5844ed58173c --- /dev/null +++ b/Documentation/devicetree/bindings/iio/chemical/sensirion,sgp30.txt @@ -0,0 +1,15 @@ +* Sensirion SGP30/SGPC3 multi-pixel Gas Sensor + +Required properties: + + - compatible: must be one of + "sensirion,sgp30" + "sensirion,sgpc3" + - reg: the I2C address of the sensor + +Example: + +gas@58 { + compatible = "sensirion,sgp30"; + reg = <0x58>; +}; diff --git a/Documentation/devicetree/bindings/iio/chemical/sensirion,sps30.txt b/Documentation/devicetree/bindings/iio/chemical/sensirion,sps30.txt new file mode 100644 index 000000000000..6eee2709b5b6 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/chemical/sensirion,sps30.txt @@ -0,0 +1,12 @@ +* Sensirion SPS30 particulate matter sensor + +Required properties: +- compatible: must be "sensirion,sps30" +- reg: the I2C address of the sensor + +Example: + +sps30@69 { + compatible = "sensirion,sps30"; + reg = <0x69>; +}; diff --git a/Documentation/devicetree/bindings/iio/dac/ti,dac7612.txt b/Documentation/devicetree/bindings/iio/dac/ti,dac7612.txt new file mode 100644 index 000000000000..639c94ed83e9 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/dac/ti,dac7612.txt @@ -0,0 +1,28 @@ +* Texas Instruments Dual, 12-Bit Serial Input Digital-to-Analog Converter + +The DAC7612 is a dual, 12-bit digital-to-analog converter (DAC) with guaranteed +12-bit monotonicity performance over the industrial temperature range. +Is is programmable through an SPI interface. + +The internal DACs are loaded when the LOADDACS pin is pulled down. + +http://www.ti.com/lit/ds/sbas106/sbas106.pdf + +Required Properties: +- compatible: Should be one of: + "ti,dac7612" + "ti,dac7612u" + "ti,dac7612ub" +- reg: Definition as per Documentation/devicetree/bindings/spi/spi-bus.txt + +Optional Properties: +- ti,loaddacs-gpios: GPIO descriptor for the LOADDACS pin. +- spi-*: Definition as per Documentation/devicetree/bindings/spi/spi-bus.txt + +Example: + + dac@1 { + compatible = "ti,dac7612"; + reg = <0x1>; + ti,loaddacs-gpios = <&msmgpio 25 GPIO_ACTIVE_LOW>; + }; diff --git a/Documentation/devicetree/bindings/iio/impedance-analyzer/ad5933.txt b/Documentation/devicetree/bindings/iio/impedance-analyzer/ad5933.txt new file mode 100644 index 000000000000..5ff38728ff91 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/impedance-analyzer/ad5933.txt @@ -0,0 +1,26 @@ +Analog Devices AD5933/AD5934 Impedance Converter, Network Analyzer + +https://www.analog.com/media/en/technical-documentation/data-sheets/AD5933.pdf +https://www.analog.com/media/en/technical-documentation/data-sheets/AD5934.pdf + +Required properties: + - compatible : should be one of + "adi,ad5933" + "adi,ad5934" + - reg : the I2C address. + - vdd-supply : The regulator supply for DVDD, AVDD1 and AVDD2 when they + are connected together. + +Optional properties: +- clocks : external clock reference. +- clock-names : must be "mclk" if clocks is set. + +Example for a I2C device node: + + impedance-analyzer@0d { + compatible = "adi,adxl345"; + reg = <0x0d>; + vdd-supply = <&vdd_supply>; + clocks = <&ref_clk>; + clock-names = "mclk"; + }; diff --git a/Documentation/devicetree/bindings/iio/imu/bmi160.txt b/Documentation/devicetree/bindings/iio/imu/bmi160.txt index 0c1c105fb503..900c169de00f 100644 --- a/Documentation/devicetree/bindings/iio/imu/bmi160.txt +++ b/Documentation/devicetree/bindings/iio/imu/bmi160.txt @@ -9,9 +9,11 @@ Required properties: - spi-max-frequency : set maximum clock frequency (only for SPI) Optional properties: - - interrupts : interrupt mapping for IRQ, must be IRQ_TYPE_LEVEL_LOW + - interrupts : interrupt mapping for IRQ - interrupt-names : set to "INT1" if INT1 pin should be used as interrupt input, set to "INT2" if INT2 pin should be used instead + - drive-open-drain : set if the specified interrupt pin should be configured as + open drain. If not set, defaults to push-pull. Examples: @@ -20,7 +22,7 @@ bmi160@68 { reg = <0x68>; interrupt-parent = <&gpio4>; - interrupts = <12 IRQ_TYPE_LEVEL_LOW>; + interrupts = <12 IRQ_TYPE_EDGE_RISING>; interrupt-names = "INT1"; }; diff --git a/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt b/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt index 6ab9a9d196b0..268bf7568e19 100644 --- a/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt +++ b/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt @@ -11,6 +11,7 @@ Required properties: "invensense,mpu9250" "invensense,mpu9255" "invensense,icm20608" + "invensense,icm20602" - reg : the I2C address of the sensor - interrupts: interrupt mapping for IRQ. It should be configured with flags IRQ_TYPE_LEVEL_HIGH, IRQ_TYPE_EDGE_RISING, IRQ_TYPE_LEVEL_LOW or diff --git a/Documentation/devicetree/bindings/iio/light/max44009.txt b/Documentation/devicetree/bindings/iio/light/max44009.txt new file mode 100644 index 000000000000..4a98848e35c0 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/light/max44009.txt @@ -0,0 +1,24 @@ +* MAX44009 Ambient Light Sensor + +Required properties: + +- compatible: should be "maxim,max44009" +- reg: the I2C address of the device (default is <0x4a>) + +Optional properties: + +- interrupts: interrupt mapping for GPIO IRQ. Should be configured with + IRQ_TYPE_EDGE_FALLING. + +Refer to interrupt-controller/interrupts.txt for generic interrupt client +node bindings. + +Example: + +light-sensor@4a { + compatible = "maxim,max44009"; + reg = <0x4a>; + + interrupt-parent = <&gpio1>; + interrupts = <17 IRQ_TYPE_EDGE_FALLING>; +}; diff --git a/Documentation/devicetree/bindings/iio/st-sensors.txt b/Documentation/devicetree/bindings/iio/st-sensors.txt index ddcb95509599..52ee4baec6f0 100644 --- a/Documentation/devicetree/bindings/iio/st-sensors.txt +++ b/Documentation/devicetree/bindings/iio/st-sensors.txt @@ -77,3 +77,4 @@ Pressure sensors: - st,lps22hb-press - st,lps33hw - st,lps35hw +- st,lps22hh diff --git a/Documentation/devicetree/bindings/input/cypress,tm2-touchkey.txt b/Documentation/devicetree/bindings/input/cypress,tm2-touchkey.txt index 0c252d9306da..ef2ae729718f 100644 --- a/Documentation/devicetree/bindings/input/cypress,tm2-touchkey.txt +++ b/Documentation/devicetree/bindings/input/cypress,tm2-touchkey.txt @@ -1,13 +1,19 @@ Samsung tm2-touchkey Required properties: -- compatible: must be "cypress,tm2-touchkey" +- compatible: + * "cypress,tm2-touchkey" - for the touchkey found on the tm2 board + * "cypress,midas-touchkey" - for the touchkey found on midas boards + * "cypress,aries-touchkey" - for the touchkey found on aries boards - reg: I2C address of the chip. - interrupts: interrupt to which the chip is connected (see interrupt binding[0]). - vcc-supply : internal regulator output. 1.8V - vdd-supply : power supply for IC 3.3V +Optional properties: +- linux,keycodes: array of keycodes (max 4), default KEY_PHONE and KEY_BACK + [0]: Documentation/devicetree/bindings/interrupt-controller/interrupts.txt Example: @@ -21,5 +27,6 @@ Example: interrupts = <2 IRQ_TYPE_EDGE_FALLING>; vcc-supply=<&ldo32_reg>; vdd-supply=<&ldo33_reg>; + linux,keycodes = ; }; }; diff --git a/Documentation/devicetree/bindings/input/ilitek,ili2xxx.txt b/Documentation/devicetree/bindings/input/ilitek,ili2xxx.txt new file mode 100644 index 000000000000..b2a76301e632 --- /dev/null +++ b/Documentation/devicetree/bindings/input/ilitek,ili2xxx.txt @@ -0,0 +1,25 @@ +Ilitek ILI210x/ILI251x touchscreen controller + +Required properties: +- compatible: + ilitek,ili210x for ILI210x + ilitek,ili251x for ILI251x + +- reg: The I2C address of the device + +- interrupts: The sink for the touchscreen's IRQ output + See ../interrupt-controller/interrupts.txt + +Optional properties for main touchpad device: + +- reset-gpios: GPIO specifier for the touchscreen's reset pin (active low) + +Example: + + touchscreen@41 { + compatible = "ilitek,ili251x"; + reg = <0x41>; + interrupt-parent = <&gpio4>; + interrupts = <7 IRQ_TYPE_EDGE_FALLING>; + reset-gpios = <&gpio5 21 GPIO_ACTIVE_LOW>; + }; diff --git a/Documentation/devicetree/bindings/input/msm-vibrator.txt b/Documentation/devicetree/bindings/input/msm-vibrator.txt new file mode 100644 index 000000000000..8dcf014ef2e5 --- /dev/null +++ b/Documentation/devicetree/bindings/input/msm-vibrator.txt @@ -0,0 +1,36 @@ +* Device tree bindings for the Qualcomm MSM vibrator + +Required properties: + + - compatible: Should be one of + "qcom,msm8226-vibrator" + "qcom,msm8974-vibrator" + - reg: the base address and length of the IO memory for the registers. + - pinctrl-names: set to default. + - pinctrl-0: phandles pointing to pin configuration nodes. See + Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt + - clock-names: set to pwm + - clocks: phandle of the clock. See + Documentation/devicetree/bindings/clock/clock-bindings.txt + - enable-gpios: GPIO that enables the vibrator. + +Optional properties: + + - vcc-supply: phandle to the regulator that provides power to the sensor. + +Example from a LG Nexus 5 (hammerhead) phone: + +vibrator@fd8c3450 { + reg = <0xfd8c3450 0x400>; + compatible = "qcom,msm8974-vibrator"; + + vcc-supply = <&pm8941_l19>; + + clocks = <&mmcc CAMSS_GP1_CLK>; + clock-names = "pwm"; + + enable-gpios = <&msmgpio 60 GPIO_ACTIVE_HIGH>; + + pinctrl-names = "default"; + pinctrl-0 = <&vibrator_pin>; +}; diff --git a/Documentation/devicetree/bindings/input/st,stpmic1-onkey.txt b/Documentation/devicetree/bindings/input/st,stpmic1-onkey.txt new file mode 100644 index 000000000000..4494613ae7ad --- /dev/null +++ b/Documentation/devicetree/bindings/input/st,stpmic1-onkey.txt @@ -0,0 +1,28 @@ +STMicroelectronics STPMIC1 Onkey + +Required properties: + +- compatible = "st,stpmic1-onkey"; +- interrupts: interrupt line to use +- interrupt-names = "onkey-falling", "onkey-rising" + onkey-falling: happens when onkey is pressed; IT_PONKEY_F of pmic + onkey-rising: happens when onkey is released; IT_PONKEY_R of pmic + +Optional properties: + +- st,onkey-clear-cc-flag: onkey is able power on after an + over-current shutdown event. +- st,onkey-pu-inactive: onkey pull up is not active +- power-off-time-sec: Duration in seconds which the key should be kept + pressed for device to power off automatically (from 1 to 16 seconds). + see See Documentation/devicetree/bindings/input/keys.txt + +Example: + +onkey { + compatible = "st,stpmic1-onkey"; + interrupt-parent = <&pmic>; + interrupts = ,; + interrupt-names = "onkey-falling", "onkey-rising"; + power-off-time-sec = <10>; +}; diff --git a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt index da2dc5d6c98b..870b8c5cce9b 100644 --- a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt +++ b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt @@ -1,11 +1,12 @@ FocalTech EDT-FT5x06 Polytouch driver ===================================== -There are 3 variants of the chip for various touch panel sizes +There are 5 variants of the chip for various touch panel sizes FT5206GE1 2.8" .. 3.8" FT5306DE4 4.3" .. 7" FT5406EE8 7" .. 8.9" FT5506EEG 7" .. 8.9" +FT5726NEI 5.7” .. 11.6" The software interface is identical for all those chips, so that currently there is no need for the driver to distinguish between the @@ -19,6 +20,7 @@ Required properties: or: "edt,edt-ft5306" or: "edt,edt-ft5406" or: "edt,edt-ft5506" + or: "evervision,ev-ft5726" or: "focaltech,ft6236" - reg: I2C slave address of the chip (0x38) @@ -42,6 +44,15 @@ Optional properties: - offset: allows setting the edge compensation in the range from 0 to 31. + + - offset-x: Same as offset, but applies only to the horizontal position. + Range from 0 to 80, only supported by evervision,ev-ft5726 + devices. + + - offset-y: Same as offset, but applies only to the vertical position. + Range from 0 to 80, only supported by evervision,ev-ft5726 + devices. + - touchscreen-size-x : See touchscreen.txt - touchscreen-size-y : See touchscreen.txt - touchscreen-fuzz-x : See touchscreen.txt diff --git a/Documentation/devicetree/bindings/input/touchscreen/goodix.txt b/Documentation/devicetree/bindings/input/touchscreen/goodix.txt index f7e95c52f3c7..8cf0b4d38a7e 100644 --- a/Documentation/devicetree/bindings/input/touchscreen/goodix.txt +++ b/Documentation/devicetree/bindings/input/touchscreen/goodix.txt @@ -3,6 +3,7 @@ Device tree bindings for Goodix GT9xx series touchscreen controller Required properties: - compatible : Should be "goodix,gt1151" + or "goodix,gt5688" or "goodix,gt911" or "goodix,gt9110" or "goodix,gt912" @@ -18,11 +19,14 @@ Optional properties: - irq-gpios : GPIO pin used for IRQ. The driver uses the interrupt gpio pin as output to reset the device. - reset-gpios : GPIO pin used for reset - - - touchscreen-inverted-x : X axis is inverted (boolean) - - touchscreen-inverted-y : Y axis is inverted (boolean) - - touchscreen-swapped-x-y : X and Y axis are swapped (boolean) - (swapping is done after inverting the axis) + - touchscreen-inverted-x + - touchscreen-inverted-y + - touchscreen-size-x + - touchscreen-size-y + - touchscreen-swapped-x-y + +The touchscreen-* properties are documented in touchscreen.txt in this +directory. Example: diff --git a/Documentation/devicetree/bindings/input/touchscreen/sitronix-st1232.txt b/Documentation/devicetree/bindings/input/touchscreen/sitronix-st1232.txt index 64ad48b824a2..019373253b28 100644 --- a/Documentation/devicetree/bindings/input/touchscreen/sitronix-st1232.txt +++ b/Documentation/devicetree/bindings/input/touchscreen/sitronix-st1232.txt @@ -1,13 +1,17 @@ -* Sitronix st1232 touchscreen controller +* Sitronix st1232 or st1633 touchscreen controller Required properties: -- compatible: must be "sitronix,st1232" +- compatible: must contain one of + * "sitronix,st1232" + * "sitronix,st1633" - reg: I2C address of the chip - interrupts: interrupt to which the chip is connected Optional properties: - gpios: a phandle to the reset GPIO +For additional optional properties see: touchscreen.txt + Example: i2c@00000000 { diff --git a/Documentation/devicetree/bindings/input/touchscreen/stmpe.txt b/Documentation/devicetree/bindings/input/touchscreen/stmpe.txt index 127baa31a77a..c549924603d2 100644 --- a/Documentation/devicetree/bindings/input/touchscreen/stmpe.txt +++ b/Documentation/devicetree/bindings/input/touchscreen/stmpe.txt @@ -5,39 +5,105 @@ Required properties: - compatible: "st,stmpe-ts" Optional properties: -- st,sample-time: ADC converstion time in number of clock. (0 -> 36 clocks, 1 -> - 44 clocks, 2 -> 56 clocks, 3 -> 64 clocks, 4 -> 80 clocks, 5 -> 96 clocks, 6 - -> 144 clocks), recommended is 4. -- st,mod-12b: ADC Bit mode (0 -> 10bit ADC, 1 -> 12bit ADC) -- st,ref-sel: ADC reference source (0 -> internal reference, 1 -> external - reference) -- st,adc-freq: ADC Clock speed (0 -> 1.625 MHz, 1 -> 3.25 MHz, 2 || 3 -> 6.5 MHz) -- st,ave-ctrl: Sample average control (0 -> 1 sample, 1 -> 2 samples, 2 -> 4 - samples, 3 -> 8 samples) -- st,touch-det-delay: Touch detect interrupt delay (0 -> 10 us, 1 -> 50 us, 2 -> - 100 us, 3 -> 500 us, 4-> 1 ms, 5 -> 5 ms, 6 -> 10 ms, 7 -> 50 ms) recommended - is 3 -- st,settling: Panel driver settling time (0 -> 10 us, 1 -> 100 us, 2 -> 500 us, 3 - -> 1 ms, 4 -> 5 ms, 5 -> 10 ms, 6 for 50 ms, 7 -> 100 ms) recommended is 2 -- st,fraction-z: Length of the fractional part in z (fraction-z ([0..7]) = Count of - the fractional part) recommended is 7 -- st,i-drive: current limit value of the touchscreen drivers (0 -> 20 mA typical 35 - mA max, 1 -> 50 mA typical 80 mA max) +- st,ave-ctrl : Sample average control + 0 -> 1 sample + 1 -> 2 samples + 2 -> 4 samples + 3 -> 8 samples +- st,touch-det-delay : Touch detect interrupt delay (recommended is 3) + 0 -> 10 us + 1 -> 50 us + 2 -> 100 us + 3 -> 500 us + 4 -> 1 ms + 5 -> 5 ms + 6 -> 10 ms + 7 -> 50 ms +- st,settling : Panel driver settling time (recommended is 2) + 0 -> 10 us + 1 -> 100 us + 2 -> 500 us + 3 -> 1 ms + 4 -> 5 ms + 5 -> 10 ms + 6 -> 50 ms + 7 -> 100 ms +- st,fraction-z : Length of the fractional part in z (recommended is 7) + (fraction-z ([0..7]) = Count of the fractional part) +- st,i-drive : current limit value of the touchscreen drivers + 0 -> 20 mA (typical 35mA max) + 1 -> 50 mA (typical 80 mA max) + +Optional properties common with MFD (deprecated): + - st,sample-time : ADC conversion time in number of clock. + 0 -> 36 clocks + 1 -> 44 clocks + 2 -> 56 clocks + 3 -> 64 clocks + 4 -> 80 clocks (recommended) + 5 -> 96 clocks + 6 -> 124 clocks + - st,mod-12b : ADC Bit mode + 0 -> 10bit ADC + 1 -> 12bit ADC + - st,ref-sel : ADC reference source + 0 -> internal + 1 -> external + - st,adc-freq : ADC Clock speed + 0 -> 1.625 MHz + 1 -> 3.25 MHz + 2 || 3 -> 6.5 MHz Node name must be stmpe_touchscreen and should be child node of stmpe node to which it belongs. +Note that common ADC settings of stmpe_touchscreen (child) will take precedence +over the settings done in MFD. + Example: +stmpe811@41 { + compatible = "st,stmpe811"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_touch_int>; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x41>; + interrupts = <10 IRQ_TYPE_LEVEL_LOW>; + interrupt-parent = <&gpio4>; + interrupt-controller; + id = <0>; + blocks = <0x5>; + irq-trigger = <0x1>; + /* Common ADC settings */ + /* 3.25 MHz ADC clock speed */ + st,adc-freq = <1>; + /* 12-bit ADC */ + st,mod-12b = <1>; + /* internal ADC reference */ + st,ref-sel = <0>; + /* ADC converstion time: 80 clocks */ + st,sample-time = <4>; + stmpe_touchscreen { compatible = "st,stmpe-ts"; - st,sample-time = <4>; - st,mod-12b = <1>; - st,ref-sel = <0>; - st,adc-freq = <1>; - st,ave-ctrl = <1>; - st,touch-det-delay = <2>; - st,settling = <2>; + reg = <0>; + /* 8 sample average control */ + st,ave-ctrl = <3>; + /* 5 ms touch detect interrupt delay */ + st,touch-det-delay = <5>; + /* 1 ms panel driver settling time */ + st,settling = <3>; + /* 7 length fractional part in z */ st,fraction-z = <7>; + /* + * 50 mA typical 80 mA max touchscreen drivers + * current limit value + */ st,i-drive = <1>; }; + stmpe_adc { + compatible = "st,stmpe-adc"; + st,norequest-mask = <0x0F>; + }; +}; diff --git a/Documentation/devicetree/bindings/input/touchscreen/sx8654.txt b/Documentation/devicetree/bindings/input/touchscreen/sx8654.txt index 4886c4aa2906..0ebe6dd043c7 100644 --- a/Documentation/devicetree/bindings/input/touchscreen/sx8654.txt +++ b/Documentation/devicetree/bindings/input/touchscreen/sx8654.txt @@ -1,10 +1,17 @@ * Semtech SX8654 I2C Touchscreen Controller Required properties: -- compatible: must be "semtech,sx8654" +- compatible: must be one of the following, depending on the model: + "semtech,sx8650" + "semtech,sx8654" + "semtech,sx8655" + "semtech,sx8656" - reg: i2c slave address - interrupts: touch controller interrupt +Optional properties: + - reset-gpios: GPIO specification for the NRST input + Example: sx8654@48 { @@ -12,4 +19,5 @@ Example: reg = <0x48>; interrupt-parent = <&gpio6>; interrupts = <3 IRQ_TYPE_EDGE_FALLING>; + reset-gpios = <&gpio4 2 GPIO_ACTIVE_LOW>; }; diff --git a/Documentation/devicetree/bindings/input/touchscreen/ti-tsc-adc.txt b/Documentation/devicetree/bindings/input/touchscreen/ti-tsc-adc.txt index b1163bf97146..aad5e34965eb 100644 --- a/Documentation/devicetree/bindings/input/touchscreen/ti-tsc-adc.txt +++ b/Documentation/devicetree/bindings/input/touchscreen/ti-tsc-adc.txt @@ -2,7 +2,12 @@ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Required properties: +- mfd + compatible: Should be + "ti,am3359-tscadc" for AM335x/AM437x SoCs + "ti,am654-tscadc", "ti,am3359-tscadc" for AM654 SoCs - child "tsc" + compatible: Should be "ti,am3359-tsc". ti,wires: Wires refer to application modes i.e. 4/5/8 wire touchscreen support on the platform. ti,x-plate-resistance: X plate resistance @@ -25,6 +30,9 @@ Required properties: AIN0 = 0, AIN1 = 1 and so on till AIN7 = 7. XP = 0, XN = 1, YP = 2, YN = 3. - child "adc" + compatible: Should be + "ti,am3359-adc" for AM335x/AM437x SoCs + "ti,am654-adc", "ti,am3359-adc" for AM654 SoCs ti,adc-channels: List of analog inputs available for ADC. AIN0 = 0, AIN1 = 1 and so on till AIN7 = 7. diff --git a/Documentation/devicetree/bindings/interconnect/interconnect.txt b/Documentation/devicetree/bindings/interconnect/interconnect.txt new file mode 100644 index 000000000000..5a3c575b387a --- /dev/null +++ b/Documentation/devicetree/bindings/interconnect/interconnect.txt @@ -0,0 +1,60 @@ +Interconnect Provider Device Tree Bindings +========================================= + +The purpose of this document is to define a common set of generic interconnect +providers/consumers properties. + + += interconnect providers = + +The interconnect provider binding is intended to represent the interconnect +controllers in the system. Each provider registers a set of interconnect +nodes, which expose the interconnect related capabilities of the interconnect +to consumer drivers. These capabilities can be throughput, latency, priority +etc. The consumer drivers set constraints on interconnect path (or endpoints) +depending on the use case. Interconnect providers can also be interconnect +consumers, such as in the case where two network-on-chip fabrics interface +directly. + +Required properties: +- compatible : contains the interconnect provider compatible string +- #interconnect-cells : number of cells in a interconnect specifier needed to + encode the interconnect node id + +Example: + + snoc: interconnect@580000 { + compatible = "qcom,msm8916-snoc"; + #interconnect-cells = <1>; + reg = <0x580000 0x14000>; + clock-names = "bus_clk", "bus_a_clk"; + clocks = <&rpmcc RPM_SMD_SNOC_CLK>, + <&rpmcc RPM_SMD_SNOC_A_CLK>; + }; + + += interconnect consumers = + +The interconnect consumers are device nodes which dynamically express their +bandwidth requirements along interconnect paths they are connected to. There +can be multiple interconnect providers on a SoC and the consumer may consume +multiple paths from different providers depending on use case and the +components it has to interact with. + +Required properties: +interconnects : Pairs of phandles and interconnect provider specifier to denote + the edge source and destination ports of the interconnect path. + +Optional properties: +interconnect-names : List of interconnect path name strings sorted in the same + order as the interconnects property. Consumers drivers will use + interconnect-names to match interconnect paths with interconnect + specifier pairs. + +Example: + + sdhci@7864000 { + ... + interconnects = <&pnoc MASTER_SDCC_1 &bimc SLAVE_EBI_CH0>; + interconnect-names = "sdhc-mem"; + }; diff --git a/Documentation/devicetree/bindings/interconnect/qcom,sdm845.txt b/Documentation/devicetree/bindings/interconnect/qcom,sdm845.txt new file mode 100644 index 000000000000..5c4f1d911630 --- /dev/null +++ b/Documentation/devicetree/bindings/interconnect/qcom,sdm845.txt @@ -0,0 +1,24 @@ +Qualcomm SDM845 Network-On-Chip interconnect driver binding +----------------------------------------------------------- + +SDM845 interconnect providers support system bandwidth requirements through +RPMh hardware accelerators known as Bus Clock Manager (BCM). The provider is +able to communicate with the BCM through the Resource State Coordinator (RSC) +associated with each execution environment. Provider nodes must reside within +an RPMh device node pertaining to their RSC and each provider maps to a single +RPMh resource. + +Required properties : +- compatible : shall contain only one of the following: + "qcom,sdm845-rsc-hlos" +- #interconnect-cells : should contain 1 + +Examples: + +apps_rsc: rsc { + rsc_hlos: interconnect { + compatible = "qcom,sdm845-rsc-hlos"; + #interconnect-cells = <1>; + }; +}; + diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt deleted file mode 100644 index a3be5298a5eb..000000000000 --- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt +++ /dev/null @@ -1,175 +0,0 @@ -* ARM Generic Interrupt Controller, version 3 - -AArch64 SMP cores are often associated with a GICv3, providing Private -Peripheral Interrupts (PPI), Shared Peripheral Interrupts (SPI), -Software Generated Interrupts (SGI), and Locality-specific Peripheral -Interrupts (LPI). - -Main node required properties: - -- compatible : should at least contain "arm,gic-v3" or either - "qcom,msm8996-gic-v3", "arm,gic-v3" for msm8996 SoCs - to address SoC specific bugs/quirks -- interrupt-controller : Identifies the node as an interrupt controller -- #interrupt-cells : Specifies the number of cells needed to encode an - interrupt source. Must be a single cell with a value of at least 3. - If the system requires describing PPI affinity, then the value must - be at least 4. - - The 1st cell is the interrupt type; 0 for SPI interrupts, 1 for PPI - interrupts. Other values are reserved for future use. - - The 2nd cell contains the interrupt number for the interrupt type. - SPI interrupts are in the range [0-987]. PPI interrupts are in the - range [0-15]. - - The 3rd cell is the flags, encoded as follows: - bits[3:0] trigger type and level flags. - 1 = edge triggered - 4 = level triggered - - The 4th cell is a phandle to a node describing a set of CPUs this - interrupt is affine to. The interrupt must be a PPI, and the node - pointed must be a subnode of the "ppi-partitions" subnode. For - interrupt types other than PPI or PPIs that are not partitionned, - this cell must be zero. See the "ppi-partitions" node description - below. - - Cells 5 and beyond are reserved for future use and must have a value - of 0 if present. - -- reg : Specifies base physical address(s) and size of the GIC - registers, in the following order: - - GIC Distributor interface (GICD) - - GIC Redistributors (GICR), one range per redistributor region - - GIC CPU interface (GICC) - - GIC Hypervisor interface (GICH) - - GIC Virtual CPU interface (GICV) - - GICC, GICH and GICV are optional. - -- interrupts : Interrupt source of the VGIC maintenance interrupt. - -Optional - -- redistributor-stride : If using padding pages, specifies the stride - of consecutive redistributors. Must be a multiple of 64kB. - -- #redistributor-regions: The number of independent contiguous regions - occupied by the redistributors. Required if more than one such - region is present. - -- msi-controller: Boolean property. Identifies the node as an MSI - controller. Only present if the Message Based Interrupt - functionnality is being exposed by the HW, and the mbi-ranges - property present. - -- mbi-ranges: A list of pairs , where "intid" is the first - SPI of a range that can be used an MBI, and "span" the size of that - range. Multiple ranges can be provided. Requires "msi-controller" to - be set. - -- mbi-alias: Address property. Base address of an alias of the GICD - region containing only the {SET,CLR}SPI registers to be used if - isolation is required, and if supported by the HW. - -Sub-nodes: - -PPI affinity can be expressed as a single "ppi-partitions" node, -containing a set of sub-nodes, each with the following property: -- affinity: Should be a list of phandles to CPU nodes (as described in - Documentation/devicetree/bindings/arm/cpus.yaml). - -GICv3 has one or more Interrupt Translation Services (ITS) that are -used to route Message Signalled Interrupts (MSI) to the CPUs. - -These nodes must have the following properties: -- compatible : Should at least contain "arm,gic-v3-its". -- msi-controller : Boolean property. Identifies the node as an MSI controller -- #msi-cells: Must be <1>. The single msi-cell is the DeviceID of the device - which will generate the MSI. -- reg: Specifies the base physical address and size of the ITS - registers. - -Optional: -- socionext,synquacer-pre-its: (u32, u32) tuple describing the untranslated - address and size of the pre-ITS window. - -The main GIC node must contain the appropriate #address-cells, -#size-cells and ranges properties for the reg property of all ITS -nodes. - -Examples: - - gic: interrupt-controller@2cf00000 { - compatible = "arm,gic-v3"; - #interrupt-cells = <3>; - #address-cells = <2>; - #size-cells = <2>; - ranges; - interrupt-controller; - reg = <0x0 0x2f000000 0 0x10000>, // GICD - <0x0 0x2f100000 0 0x200000>, // GICR - <0x0 0x2c000000 0 0x2000>, // GICC - <0x0 0x2c010000 0 0x2000>, // GICH - <0x0 0x2c020000 0 0x2000>; // GICV - interrupts = <1 9 4>; - - msi-controller; - mbi-ranges = <256 128>; - - gic-its@2c200000 { - compatible = "arm,gic-v3-its"; - msi-controller; - #msi-cells = <1>; - reg = <0x0 0x2c200000 0 0x20000>; - }; - }; - - gic: interrupt-controller@2c010000 { - compatible = "arm,gic-v3"; - #interrupt-cells = <4>; - #address-cells = <2>; - #size-cells = <2>; - ranges; - interrupt-controller; - redistributor-stride = <0x0 0x40000>; // 256kB stride - #redistributor-regions = <2>; - reg = <0x0 0x2c010000 0 0x10000>, // GICD - <0x0 0x2d000000 0 0x800000>, // GICR 1: CPUs 0-31 - <0x0 0x2e000000 0 0x800000>; // GICR 2: CPUs 32-63 - <0x0 0x2c040000 0 0x2000>, // GICC - <0x0 0x2c060000 0 0x2000>, // GICH - <0x0 0x2c080000 0 0x2000>; // GICV - interrupts = <1 9 4>; - - gic-its@2c200000 { - compatible = "arm,gic-v3-its"; - msi-controller; - #msi-cells = <1>; - reg = <0x0 0x2c200000 0 0x20000>; - }; - - gic-its@2c400000 { - compatible = "arm,gic-v3-its"; - msi-controller; - #msi-cells = <1>; - reg = <0x0 0x2c400000 0 0x20000>; - }; - - ppi-partitions { - part0: interrupt-partition-0 { - affinity = <&cpu0 &cpu2>; - }; - - part1: interrupt-partition-1 { - affinity = <&cpu1 &cpu3>; - }; - }; - }; - - - device@0 { - reg = <0 0 0 4>; - interrupts = <1 1 4 &part0>; - }; diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml new file mode 100644 index 000000000000..c34df35a25fc --- /dev/null +++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml @@ -0,0 +1,279 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/interrupt-controller/arm,gic-v3.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: ARM Generic Interrupt Controller, version 3 + +maintainers: + - Marc Zyngier + +description: | + AArch64 SMP cores are often associated with a GICv3, providing Private + Peripheral Interrupts (PPI), Shared Peripheral Interrupts (SPI), + Software Generated Interrupts (SGI), and Locality-specific Peripheral + Interrupts (LPI). + +allOf: + - $ref: /schemas/interrupt-controller.yaml# + +properties: + compatible: + oneOf: + - items: + - enum: + - qcom,msm8996-gic-v3 + - const: arm,gic-v3 + - const: arm,gic-v3 + + interrupt-controller: true + + "#address-cells": + enum: [ 0, 1, 2 ] + "#size-cells": + enum: [ 1, 2 ] + + ranges: true + + "#interrupt-cells": + description: | + Specifies the number of cells needed to encode an interrupt source. + Must be a single cell with a value of at least 3. + If the system requires describing PPI affinity, then the value must + be at least 4. + + The 1st cell is the interrupt type; 0 for SPI interrupts, 1 for PPI + interrupts. Other values are reserved for future use. + + The 2nd cell contains the interrupt number for the interrupt type. + SPI interrupts are in the range [0-987]. PPI interrupts are in the + range [0-15]. + + The 3rd cell is the flags, encoded as follows: + bits[3:0] trigger type and level flags. + 1 = edge triggered + 4 = level triggered + + The 4th cell is a phandle to a node describing a set of CPUs this + interrupt is affine to. The interrupt must be a PPI, and the node + pointed must be a subnode of the "ppi-partitions" subnode. For + interrupt types other than PPI or PPIs that are not partitionned, + this cell must be zero. See the "ppi-partitions" node description + below. + + Cells 5 and beyond are reserved for future use and must have a value + of 0 if present. + enum: [ 3, 4 ] + + reg: + description: | + Specifies base physical address(s) and size of the GIC + registers, in the following order: + - GIC Distributor interface (GICD) + - GIC Redistributors (GICR), one range per redistributor region + - GIC CPU interface (GICC) + - GIC Hypervisor interface (GICH) + - GIC Virtual CPU interface (GICV) + + GICC, GICH and GICV are optional. + minItems: 2 + maxItems: 4096 # Should be enough? + + interrupts: + description: + Interrupt source of the VGIC maintenance interrupt. + maxItems: 1 + + redistributor-stride: + description: + If using padding pages, specifies the stride of consecutive + redistributors. Must be a multiple of 64kB. + allOf: + - $ref: /schemas/types.yaml#/definitions/uint64 + - multipleOf: 0x10000 + exclusiveMinimum: 0 + + "#redistributor-regions": + description: + The number of independent contiguous regions occupied by the + redistributors. Required if more than one such region is present. + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + - maximum: 4096 # Should be enough? + + msi-controller: + description: + Only present if the Message Based Interrupt functionnality is + being exposed by the HW, and the mbi-ranges property present. + + mbi-ranges: + description: + A list of pairs , where "intid" is the first SPI of a range + that can be used an MBI, and "span" the size of that range. Multiple + ranges can be provided. + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32-matrix + - items: + minItems: 2 + maxItems: 2 + + mbi-alias: + description: + Address property. Base address of an alias of the GICD region containing + only the {SET,CLR}SPI registers to be used if isolation is required, + and if supported by the HW. + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32-array + - items: + minItems: 1 + maxItems: 2 + + ppi-partitions: + type: object + description: + PPI affinity can be expressed as a single "ppi-partitions" node, + containing a set of sub-nodes. + patternProperties: + "^interrupt-partition-[0-9]+$": + properties: + affinity: + $ref: /schemas/types.yaml#/definitions/phandle-array + description: + Should be a list of phandles to CPU nodes (as described in + Documentation/devicetree/bindings/arm/cpus.yaml). + + required: + - affinity + +dependencies: + mbi-ranges: [ msi-controller ] + msi-controller: [ mbi-ranges ] + +required: + - compatible + - interrupts + - reg + +patternProperties: + "^gic-its@": false + "^interrupt-controller@[0-9a-f]+$": false + # msi-controller is preferred, but allow other names + "^(msi-controller|gic-its|interrupt-controller)@[0-9a-f]+$": + type: object + description: + GICv3 has one or more Interrupt Translation Services (ITS) that are + used to route Message Signalled Interrupts (MSI) to the CPUs. + properties: + compatible: + const: arm,gic-v3-its + + msi-controller: true + + "#msi-cells": + description: + The single msi-cell is the DeviceID of the device which will generate + the MSI. + const: 1 + + reg: + description: + Specifies the base physical address and size of the ITS registers. + maxItems: 1 + + socionext,synquacer-pre-its: + description: + (u32, u32) tuple describing the untranslated + address and size of the pre-ITS window. + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32-array + - items: + minItems: 2 + maxItems: 2 + + required: + - compatible + - msi-controller + - "#msi-cells" + - reg + + additionalProperties: false + +additionalProperties: false + +examples: + - | + gic: interrupt-controller@2cf00000 { + compatible = "arm,gic-v3"; + #interrupt-cells = <3>; + #address-cells = <1>; + #size-cells = <1>; + ranges; + interrupt-controller; + reg = <0x2f000000 0x10000>, // GICD + <0x2f100000 0x200000>, // GICR + <0x2c000000 0x2000>, // GICC + <0x2c010000 0x2000>, // GICH + <0x2c020000 0x2000>; // GICV + interrupts = <1 9 4>; + + msi-controller; + mbi-ranges = <256 128>; + + msi-controller@2c200000 { + compatible = "arm,gic-v3-its"; + msi-controller; + #msi-cells = <1>; + reg = <0x2c200000 0x20000>; + }; + }; + + interrupt-controller@2c010000 { + compatible = "arm,gic-v3"; + #interrupt-cells = <4>; + #address-cells = <1>; + #size-cells = <1>; + ranges; + interrupt-controller; + redistributor-stride = <0x0 0x40000>; // 256kB stride + #redistributor-regions = <2>; + reg = <0x2c010000 0x10000>, // GICD + <0x2d000000 0x800000>, // GICR 1: CPUs 0-31 + <0x2e000000 0x800000>, // GICR 2: CPUs 32-63 + <0x2c040000 0x2000>, // GICC + <0x2c060000 0x2000>, // GICH + <0x2c080000 0x2000>; // GICV + interrupts = <1 9 4>; + + msi-controller@2c200000 { + compatible = "arm,gic-v3-its"; + msi-controller; + #msi-cells = <1>; + reg = <0x2c200000 0x20000>; + }; + + msi-controller@2c400000 { + compatible = "arm,gic-v3-its"; + msi-controller; + #msi-cells = <1>; + reg = <0x2c400000 0x20000>; + }; + + ppi-partitions { + part0: interrupt-partition-0 { + affinity = <&cpu0 &cpu2>; + }; + + part1: interrupt-partition-1 { + affinity = <&cpu1 &cpu3>; + }; + }; + }; + + + device@0 { + reg = <0 4>; + interrupts = <1 1 4 &part0>; + }; + +... diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt deleted file mode 100644 index 2f3244648646..000000000000 --- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt +++ /dev/null @@ -1,171 +0,0 @@ -* ARM Generic Interrupt Controller - -ARM SMP cores are often associated with a GIC, providing per processor -interrupts (PPI), shared processor interrupts (SPI) and software -generated interrupts (SGI). - -Primary GIC is attached directly to the CPU and typically has PPIs and SGIs. -Secondary GICs are cascaded into the upward interrupt controller and do not -have PPIs or SGIs. - -Main node required properties: - -- compatible : should be one of: - "arm,arm1176jzf-devchip-gic" - "arm,arm11mp-gic" - "arm,cortex-a15-gic" - "arm,cortex-a7-gic" - "arm,cortex-a9-gic" - "arm,eb11mp-gic" - "arm,gic-400" - "arm,pl390" - "arm,tc11mp-gic" - "brcm,brahma-b15-gic" - "nvidia,tegra210-agic" - "qcom,msm-8660-qgic" - "qcom,msm-qgic2" -- interrupt-controller : Identifies the node as an interrupt controller -- #interrupt-cells : Specifies the number of cells needed to encode an - interrupt source. The type shall be a and the value shall be 3. - - The 1st cell is the interrupt type; 0 for SPI interrupts, 1 for PPI - interrupts. - - The 2nd cell contains the interrupt number for the interrupt type. - SPI interrupts are in the range [0-987]. PPI interrupts are in the - range [0-15]. - - The 3rd cell is the flags, encoded as follows: - bits[3:0] trigger type and level flags. - 1 = low-to-high edge triggered - 2 = high-to-low edge triggered (invalid for SPIs) - 4 = active high level-sensitive - 8 = active low level-sensitive (invalid for SPIs). - bits[15:8] PPI interrupt cpu mask. Each bit corresponds to each of - the 8 possible cpus attached to the GIC. A bit set to '1' indicated - the interrupt is wired to that CPU. Only valid for PPI interrupts. - Also note that the configurability of PPI interrupts is IMPLEMENTATION - DEFINED and as such not guaranteed to be present (most SoC available - in 2014 seem to ignore the setting of this flag and use the hardware - default value). - -- reg : Specifies base physical address(s) and size of the GIC registers. The - first region is the GIC distributor register base and size. The 2nd region is - the GIC cpu interface register base and size. - -Optional -- interrupts : Interrupt source of the parent interrupt controller on - secondary GICs, or VGIC maintenance interrupt on primary GIC (see - below). - -- cpu-offset : per-cpu offset within the distributor and cpu interface - regions, used when the GIC doesn't have banked registers. The offset is - cpu-offset * cpu-nr. - -- clocks : List of phandle and clock-specific pairs, one for each entry - in clock-names. -- clock-names : List of names for the GIC clock input(s). Valid clock names - depend on the GIC variant: - "ic_clk" (for "arm,arm11mp-gic") - "PERIPHCLKEN" (for "arm,cortex-a15-gic") - "PERIPHCLK", "PERIPHCLKEN" (for "arm,cortex-a9-gic") - "clk" (for "arm,gic-400" and "nvidia,tegra210") - "gclk" (for "arm,pl390") - -- power-domains : A phandle and PM domain specifier as defined by bindings of - the power controller specified by phandle, used when the GIC - is part of a Power or Clock Domain. - - -Example: - - intc: interrupt-controller@fff11000 { - compatible = "arm,cortex-a9-gic"; - #interrupt-cells = <3>; - #address-cells = <1>; - interrupt-controller; - reg = <0xfff11000 0x1000>, - <0xfff10100 0x100>; - }; - - -* GIC virtualization extensions (VGIC) - -For ARM cores that support the virtualization extensions, additional -properties must be described (they only exist if the GIC is the -primary interrupt controller). - -Required properties: - -- reg : Additional regions specifying the base physical address and - size of the VGIC registers. The first additional region is the GIC - virtual interface control register base and size. The 2nd additional - region is the GIC virtual cpu interface register base and size. - -- interrupts : VGIC maintenance interrupt. - -Example: - - interrupt-controller@2c001000 { - compatible = "arm,cortex-a15-gic"; - #interrupt-cells = <3>; - interrupt-controller; - reg = <0x2c001000 0x1000>, - <0x2c002000 0x2000>, - <0x2c004000 0x2000>, - <0x2c006000 0x2000>; - interrupts = <1 9 0xf04>; - }; - - -* GICv2m extension for MSI/MSI-x support (Optional) - -Certain revisions of GIC-400 supports MSI/MSI-x via V2M register frame(s). -This is enabled by specifying v2m sub-node(s). - -Required properties: - -- compatible : The value here should contain "arm,gic-v2m-frame". - -- msi-controller : Identifies the node as an MSI controller. - -- reg : GICv2m MSI interface register base and size - -Optional properties: - -- arm,msi-base-spi : When the MSI_TYPER register contains an incorrect - value, this property should contain the SPI base of - the MSI frame, overriding the HW value. - -- arm,msi-num-spis : When the MSI_TYPER register contains an incorrect - value, this property should contain the number of - SPIs assigned to the frame, overriding the HW value. - -Example: - - interrupt-controller@e1101000 { - compatible = "arm,gic-400"; - #interrupt-cells = <3>; - #address-cells = <2>; - #size-cells = <2>; - interrupt-controller; - interrupts = <1 8 0xf04>; - ranges = <0 0 0 0xe1100000 0 0x100000>; - reg = <0x0 0xe1110000 0 0x01000>, - <0x0 0xe112f000 0 0x02000>, - <0x0 0xe1140000 0 0x10000>, - <0x0 0xe1160000 0 0x10000>; - v2m0: v2m@8000 { - compatible = "arm,gic-v2m-frame"; - msi-controller; - reg = <0x0 0x80000 0 0x1000>; - }; - - .... - - v2mN: v2m@9000 { - compatible = "arm,gic-v2m-frame"; - msi-controller; - reg = <0x0 0x90000 0 0x1000>; - }; - }; diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic.yaml b/Documentation/devicetree/bindings/interrupt-controller/arm,gic.yaml new file mode 100644 index 000000000000..758fbd7128e7 --- /dev/null +++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic.yaml @@ -0,0 +1,223 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/interrupt-controller/arm,gic.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: ARM Generic Interrupt Controller v1 and v2 + +maintainers: + - Marc Zyngier + +description: |+ + ARM SMP cores are often associated with a GIC, providing per processor + interrupts (PPI), shared processor interrupts (SPI) and software + generated interrupts (SGI). + + Primary GIC is attached directly to the CPU and typically has PPIs and SGIs. + Secondary GICs are cascaded into the upward interrupt controller and do not + have PPIs or SGIs. + +allOf: + - $ref: /schemas/interrupt-controller.yaml# + +properties: + compatible: + oneOf: + - items: + - enum: + - arm,arm11mp-gic + - arm,cortex-a15-gic + - arm,cortex-a7-gic + - arm,cortex-a5-gic + - arm,cortex-a9-gic + - arm,eb11mp-gic + - arm,gic-400 + - arm,pl390 + - arm,tc11mp-gic + - nvidia,tegra210-agic + - qcom,msm-8660-qgic + - qcom,msm-qgic2 + + - items: + - const: arm,arm1176jzf-devchip-gic + - const: arm,arm11mp-gic + + - items: + - const: brcm,brahma-b15-gic + - const: arm,cortex-a15-gic + + interrupt-controller: true + + "#address-cells": + enum: [ 0, 1 ] + "#size-cells": + const: 1 + + "#interrupt-cells": + const: 3 + description: | + The 1st cell is the interrupt type; 0 for SPI interrupts, 1 for PPI + interrupts. + + The 2nd cell contains the interrupt number for the interrupt type. + SPI interrupts are in the range [0-987]. PPI interrupts are in the + range [0-15]. + + The 3rd cell is the flags, encoded as follows: + bits[3:0] trigger type and level flags. + 1 = low-to-high edge triggered + 2 = high-to-low edge triggered (invalid for SPIs) + 4 = active high level-sensitive + 8 = active low level-sensitive (invalid for SPIs). + bits[15:8] PPI interrupt cpu mask. Each bit corresponds to each of + the 8 possible cpus attached to the GIC. A bit set to '1' indicated + the interrupt is wired to that CPU. Only valid for PPI interrupts. + Also note that the configurability of PPI interrupts is IMPLEMENTATION + DEFINED and as such not guaranteed to be present (most SoC available + in 2014 seem to ignore the setting of this flag and use the hardware + default value). + + reg: + description: | + Specifies base physical address(s) and size of the GIC registers. The + first region is the GIC distributor register base and size. The 2nd region + is the GIC cpu interface register base and size. + + For GICv2 with virtualization extensions, additional regions are + required for specifying the base physical address and size of the VGIC + registers. The first additional region is the GIC virtual interface + control register base and size. The 2nd additional region is the GIC + virtual cpu interface register base and size. + minItems: 2 + maxItems: 4 + + interrupts: + description: Interrupt source of the parent interrupt controller on + secondary GICs, or VGIC maintenance interrupt on primary GIC (see + below). + maxItems: 1 + + cpu-offset: + description: per-cpu offset within the distributor and cpu interface + regions, used when the GIC doesn't have banked registers. The offset + is cpu-offset * cpu-nr. + $ref: /schemas/types.yaml#/definitions/uint32 + + clocks: + minItems: 1 + maxItems: 2 + + clock-names: + description: List of names for the GIC clock input(s). Valid clock names + depend on the GIC variant. + oneOf: + - const: ic_clk # for "arm,arm11mp-gic" + - const: PERIPHCLKEN # for "arm,cortex-a15-gic" + - items: # for "arm,cortex-a9-gic" + - const: PERIPHCLK + - const: PERIPHCLKEN + - const: clk # for "arm,gic-400" and "nvidia,tegra210" + - const: gclk #for "arm,pl390" + + power-domains: + maxItems: 1 + +required: + - compatible + - reg + +patternProperties: + "^v2m@[0-9a-f]+$": + description: | + * GICv2m extension for MSI/MSI-x support (Optional) + + Certain revisions of GIC-400 supports MSI/MSI-x via V2M register frame(s). + This is enabled by specifying v2m sub-node(s). + + properties: + compatible: + const: arm,gic-v2m-frame + + msi-controller: true + + reg: + maxItems: 1 + description: GICv2m MSI interface register base and size + + arm,msi-base-spi: + description: When the MSI_TYPER register contains an incorrect value, + this property should contain the SPI base of the MSI frame, overriding + the HW value. + $ref: /schemas/types.yaml#/definitions/uint32 + + arm,msi-num-spis: + description: When the MSI_TYPER register contains an incorrect value, + this property should contain the number of SPIs assigned to the + frame, overriding the HW value. + $ref: /schemas/types.yaml#/definitions/uint32 + + required: + - compatible + - msi-controller + - reg + + additionalProperties: false + +additionalProperties: false + +examples: + - | + // GICv1 + intc: interrupt-controller@fff11000 { + compatible = "arm,cortex-a9-gic"; + #interrupt-cells = <3>; + #address-cells = <1>; + interrupt-controller; + reg = <0xfff11000 0x1000>, + <0xfff10100 0x100>; + }; + + - | + // GICv2 + interrupt-controller@2c001000 { + compatible = "arm,cortex-a15-gic"; + #interrupt-cells = <3>; + interrupt-controller; + reg = <0x2c001000 0x1000>, + <0x2c002000 0x2000>, + <0x2c004000 0x2000>, + <0x2c006000 0x2000>; + interrupts = <1 9 0xf04>; + }; + + - | + // GICv2m extension for MSI/MSI-x support + interrupt-controller@e1101000 { + compatible = "arm,gic-400"; + #interrupt-cells = <3>; + #address-cells = <2>; + #size-cells = <2>; + interrupt-controller; + interrupts = <1 8 0xf04>; + ranges = <0 0 0 0xe1100000 0 0x100000>; + reg = <0x0 0xe1110000 0 0x01000>, + <0x0 0xe112f000 0 0x02000>, + <0x0 0xe1140000 0 0x10000>, + <0x0 0xe1160000 0 0x10000>; + + v2m0: v2m@8000 { + compatible = "arm,gic-v2m-frame"; + msi-controller; + reg = <0x0 0x80000 0 0x1000>; + }; + + //... + + v2mN: v2m@9000 { + compatible = "arm,gic-v2m-frame"; + msi-controller; + reg = <0x0 0x90000 0 0x1000>; + }; + }; +... diff --git a/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.txt b/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.txt index 45790ce6f5b9..582991c426ee 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.txt @@ -6,8 +6,9 @@ Required properties: - "fsl,imx8m-irqsteer" - "fsl,imx-irqsteer" - reg: Physical base address and size of registers. -- interrupts: Should contain the parent interrupt line used to multiplex the - input interrupts. +- interrupts: Should contain the up to 8 parent interrupt lines used to + multiplex the input interrupts. They should be specified sequentially + from output 0 to 7. - clocks: Should contain one clock for entry in clock-names see Documentation/devicetree/bindings/clock/clock-bindings.txt - clock-names: @@ -16,8 +17,8 @@ Required properties: - #interrupt-cells: Specifies the number of cells needed to encode an interrupt source. The value must be 1. - fsl,channel: The output channel that all input IRQs should be steered into. -- fsl,irq-groups: Number of IRQ groups managed by this controller instance. - Each group manages 64 input interrupts. +- fsl,num-irqs: Number of input interrupts of this channel. + Should be multiple of 32 input interrupts and up to 512 interrupts. Example: @@ -28,7 +29,7 @@ Example: clocks = <&clk IMX8MQ_CLK_DISP_APB_ROOT>; clock-names = "ipg"; fsl,channel = <0>; - fsl,irq-groups = <1>; + fsl,num-irqs = <64>; interrupt-controller; #interrupt-cells = <1>; }; diff --git a/Documentation/devicetree/bindings/interrupt-controller/loongson,ls1x-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/loongson,ls1x-intc.txt new file mode 100644 index 000000000000..a63ed9fcb535 --- /dev/null +++ b/Documentation/devicetree/bindings/interrupt-controller/loongson,ls1x-intc.txt @@ -0,0 +1,24 @@ +Loongson ls1x Interrupt Controller + +Required properties: + +- compatible : should be "loongson,ls1x-intc". Valid strings are: + +- reg : Specifies base physical address and size of the registers. +- interrupt-controller : Identifies the node as an interrupt controller +- #interrupt-cells : Specifies the number of cells needed to encode an + interrupt source. The value shall be 2. +- interrupts : Specifies the CPU interrupt the controller is connected to. + +Example: + +intc: interrupt-controller@1fd01040 { + compatible = "loongson,ls1x-intc"; + reg = <0x1fd01040 0x18>; + + interrupt-controller; + #interrupt-cells = <2>; + + interrupt-parent = <&cpu_intc>; + interrupts = <2>; +}; diff --git a/Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt b/Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt index 33a98eb44949..c5d589108a94 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/mediatek,sysirq.txt @@ -9,6 +9,7 @@ Required properties: "mediatek,mt8135-sysirq", "mediatek,mt6577-sysirq": for MT8135 "mediatek,mt8127-sysirq", "mediatek,mt6577-sysirq": for MT8127 "mediatek,mt7622-sysirq", "mediatek,mt6577-sysirq": for MT7622 + "mediatek,mt7623-sysirq", "mediatek,mt6577-sysirq": for MT7623 "mediatek,mt6795-sysirq", "mediatek,mt6577-sysirq": for MT6795 "mediatek,mt6797-sysirq", "mediatek,mt6577-sysirq": for MT6797 "mediatek,mt6765-sysirq", "mediatek,mt6577-sysirq": for MT6765 diff --git a/Documentation/devicetree/bindings/iommu/nvidia,tegra20-gart.txt b/Documentation/devicetree/bindings/iommu/nvidia,tegra20-gart.txt deleted file mode 100644 index 099d9362ebc1..000000000000 --- a/Documentation/devicetree/bindings/iommu/nvidia,tegra20-gart.txt +++ /dev/null @@ -1,14 +0,0 @@ -NVIDIA Tegra 20 GART - -Required properties: -- compatible: "nvidia,tegra20-gart" -- reg: Two pairs of cells specifying the physical address and size of - the memory controller registers and the GART aperture respectively. - -Example: - - gart { - compatible = "nvidia,tegra20-gart"; - reg = <0x7000f024 0x00000018 /* controller registers */ - 0x58000000 0x02000000>; /* GART aperture */ - }; diff --git a/Documentation/devicetree/bindings/leds/common.txt b/Documentation/devicetree/bindings/leds/common.txt index aa1399814a2a..70876ac11367 100644 --- a/Documentation/devicetree/bindings/leds/common.txt +++ b/Documentation/devicetree/bindings/leds/common.txt @@ -37,6 +37,18 @@ Optional properties for child nodes: "ide-disk" - LED indicates IDE disk activity (deprecated), in new implementations use "disk-activity" "timer" - LED flashes at a fixed, configurable rate + "pattern" - LED alters the brightness for the specified duration with one + software timer (requires "led-pattern" property) + +- led-pattern : Array of integers with default pattern for certain triggers. + Each trigger may parse this property differently: + - one-shot : two numbers specifying delay on and delay off (in ms), + - timer : two numbers specifying delay on and delay off (in ms), + - pattern : the pattern is given by a series of tuples, of + brightness and duration (in ms). The exact format is + described in: + Documentation/devicetree/bindings/leds/leds-trigger-pattern.txt + - led-max-microamp : Maximum LED supply current in microamperes. This property can be made mandatory for the board configurations diff --git a/Documentation/devicetree/bindings/leds/leds-trigger-pattern.txt b/Documentation/devicetree/bindings/leds/leds-trigger-pattern.txt new file mode 100644 index 000000000000..d3696680bfc8 --- /dev/null +++ b/Documentation/devicetree/bindings/leds/leds-trigger-pattern.txt @@ -0,0 +1,49 @@ +* Pattern format for LED pattern trigger + +The pattern is given by a series of tuples, of brightness and duration (ms). +The LED is expected to traverse the series and each brightness value for the +specified duration. Duration of 0 means brightness should immediately change to +new value, and writing malformed pattern deactivates any active one. + +1. For gradual dimming, the dimming interval now is set as 50 milliseconds. So +the tuple with duration less than dimming interval (50ms) is treated as a step +change of brightness, i.e. the subsequent brightness will be applied without +adding intervening dimming intervals. + +The gradual dimming format of the software pattern values should be: +"brightness_1 duration_1 brightness_2 duration_2 brightness_3 duration_3 ...". +For example (using sysfs interface): + +echo 0 1000 255 2000 > pattern + +It will make the LED go gradually from zero-intensity to max (255) intensity in +1000 milliseconds, then back to zero intensity in 2000 milliseconds: + +LED brightness + ^ +255-| / \ / \ / + | / \ / \ / + | / \ / \ / + | / \ / \ / + 0-| / \/ \/ + +---0----1----2----3----4----5----6------------> time (s) + +2. To make the LED go instantly from one brightness value to another, we should +use zero-time lengths (the brightness must be same as the previous tuple's). So +the format should be: "brightness_1 duration_1 brightness_1 0 brightness_2 +duration_2 brightness_2 0 ...". +For example (using sysfs interface): + +echo 0 1000 0 0 255 2000 255 0 > pattern + +It will make the LED stay off for one second, then stay at max brightness for +two seconds: + +LED brightness + ^ +255-| +---------+ +---------+ + | | | | | + | | | | | + | | | | | + 0-| -----+ +----+ +---- + +---0----1----2----3----4----5----6------------> time (s) diff --git a/Documentation/devicetree/bindings/mailbox/xlnx,zynqmp-ipi-mailbox.txt b/Documentation/devicetree/bindings/mailbox/xlnx,zynqmp-ipi-mailbox.txt new file mode 100644 index 000000000000..4438432bfe9b --- /dev/null +++ b/Documentation/devicetree/bindings/mailbox/xlnx,zynqmp-ipi-mailbox.txt @@ -0,0 +1,127 @@ +Xilinx IPI Mailbox Controller +======================================== + +The Xilinx IPI(Inter Processor Interrupt) mailbox controller is to manage +messaging between two Xilinx Zynq UltraScale+ MPSoC IPI agents. Each IPI +agent owns registers used for notification and buffers for message. + + +-------------------------------------+ + | Xilinx ZynqMP IPI Controller | + +-------------------------------------+ + +--------------------------------------------------+ +ATF | | + | | + | | + +--------------------------+ | + | | + | | + +--------------------------------------------------+ + +------------------------------------------+ + | +----------------+ +----------------+ | +Hardware | | IPI Agent | | IPI Buffers | | + | | Registers | | | | + | | | | | | + | +----------------+ +----------------+ | + | | + | Xilinx IPI Agent Block | + +------------------------------------------+ + + +Controller Device Node: +=========================== +Required properties: +-------------------- +IPI agent node: +- compatible: Shall be: "xlnx,zynqmp-ipi-mailbox" +- interrupt-parent: Phandle for the interrupt controller +- interrupts: Interrupt information corresponding to the + interrupt-names property. +- xlnx,ipi-id: local Xilinx IPI agent ID +- #address-cells: number of address cells of internal IPI mailbox nodes +- #size-cells: number of size cells of internal IPI mailbox nodes + +Internal IPI mailbox node: +- reg: IPI buffers address ranges +- reg-names: Names of the reg resources. It should have: + * local_request_region + - IPI request msg buffer written by local and read + by remote + * local_response_region + - IPI response msg buffer written by local and read + by remote + * remote_request_region + - IPI request msg buffer written by remote and read + by local + * remote_response_region + - IPI response msg buffer written by remote and read + by local +- #mbox-cells: Shall be 1. It contains: + * tx(0) or rx(1) channel +- xlnx,ipi-id: remote Xilinx IPI agent ID of which the mailbox is + connected to. + +Optional properties: +-------------------- +- method: The method of accessing the IPI agent registers. + Permitted values are: "smc" and "hvc". Default is + "smc". + +Client Device Node: +=========================== +Required properties: +-------------------- +- mboxes: Standard property to specify a mailbox + (See ./mailbox.txt) +- mbox-names: List of identifier strings for each mailbox + channel. + +Example: +=========================== + zynqmp_ipi { + compatible = "xlnx,zynqmp-ipi-mailbox"; + interrupt-parent = <&gic>; + interrupts = <0 29 4>; + xlnx,ipi-id = <0>; + #address-cells = <1>; + #size-cells = <1>; + ranges; + + /* APU<->RPU0 IPI mailbox controller */ + ipi_mailbox_rpu0: mailbox@ff90400 { + reg = <0xff990400 0x20>, + <0xff990420 0x20>, + <0xff990080 0x20>, + <0xff9900a0 0x20>; + reg-names = "local_request_region", + "local_response_region", + "remote_request_region", + "remote_response_region"; + #mbox-cells = <1>; + xlnx,ipi-id = <1>; + }; + /* APU<->RPU1 IPI mailbox controller */ + ipi_mailbox_rpu1: mailbox@ff990440 { + reg = <0xff990440 0x20>, + <0xff990460 0x20>, + <0xff990280 0x20>, + <0xff9902a0 0x20>; + reg-names = "local_request_region", + "local_response_region", + "remote_request_region", + "remote_response_region"; + #mbox-cells = <1>; + xlnx,ipi-id = <2>; + }; + }; + rpu0 { + ... + mboxes = <&ipi_mailbox_rpu0 0>, + <&ipi_mailbox_rpu0 1>; + mbox-names = "tx", "rx"; + }; + rpu1 { + ... + mboxes = <&ipi_mailbox_rpu1 0>, + <&ipi_mailbox_rpu1 1>; + mbox-names = "tx", "rx"; + }; diff --git a/Documentation/devicetree/bindings/media/i2c/adv748x.txt b/Documentation/devicetree/bindings/media/i2c/adv748x.txt index 5dddc95f9cc4..4f91686e54a6 100644 --- a/Documentation/devicetree/bindings/media/i2c/adv748x.txt +++ b/Documentation/devicetree/bindings/media/i2c/adv748x.txt @@ -48,7 +48,16 @@ are numbered as follows. TXA source 10 TXB source 11 -The digital output port nodes must contain at least one endpoint. +The digital output port nodes, when present, shall contain at least one +endpoint. Each of those endpoints shall contain the data-lanes property as +described in video-interfaces.txt. + +Required source endpoint properties: + - data-lanes: an array of physical data lane indexes + The accepted value(s) for this property depends on which of the two + sources are described. For TXA 1, 2 or 4 data lanes can be described + while for TXB only 1 data lane is valid. See video-interfaces.txt + for detailed description. Ports are optional if they are not connected to anything at the hardware level. diff --git a/Documentation/devicetree/bindings/media/i2c/melexis,mlx90640.txt b/Documentation/devicetree/bindings/media/i2c/melexis,mlx90640.txt new file mode 100644 index 000000000000..060d2b7a5893 --- /dev/null +++ b/Documentation/devicetree/bindings/media/i2c/melexis,mlx90640.txt @@ -0,0 +1,20 @@ +* Melexis MLX90640 FIR Sensor + +Melexis MLX90640 FIR sensor support which allows recording of thermal data +with 32x24 resolution excluding 2 lines of coefficient data that is used by +userspace to render processed frames. + +Required Properties: + - compatible : Must be "melexis,mlx90640" + - reg : i2c address of the device + +Example: + + i2c0@1c22000 { + ... + mlx90640@33 { + compatible = "melexis,mlx90640"; + reg = <0x33>; + }; + ... + }; diff --git a/Documentation/devicetree/bindings/media/i2c/mt9m001.txt b/Documentation/devicetree/bindings/media/i2c/mt9m001.txt new file mode 100644 index 000000000000..c920552b03ef --- /dev/null +++ b/Documentation/devicetree/bindings/media/i2c/mt9m001.txt @@ -0,0 +1,38 @@ +MT9M001: 1/2-Inch Megapixel Digital Image Sensor + +The MT9M001 is an SXGA-format with a 1/2-inch CMOS active-pixel digital +image sensor. It is programmable through I2C interface. + +Required Properties: + +- compatible: shall be "onnn,mt9m001". +- clocks: reference to the master clock into sensor + +Optional Properties: + +- reset-gpios: GPIO handle which is connected to the reset pin of the chip. + Active low. +- standby-gpios: GPIO handle which is connected to the standby pin of the chip. + Active high. + +The device node must contain one 'port' child node with one 'endpoint' child +sub-node for its digital output video port, in accordance with the video +interface bindings defined in: +Documentation/devicetree/bindings/media/video-interfaces.txt + +Example: + + &i2c1 { + camera-sensor@5d { + compatible = "onnn,mt9m001"; + reg = <0x5d>; + reset-gpios = <&gpio0 0 GPIO_ACTIVE_LOW>; + standby-gpios = <&gpio0 1 GPIO_ACTIVE_HIGH>; + clocks = <&camera_clk>; + port { + mt9m001_out: endpoint { + remote-endpoint = <&vcap_in>; + }; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/media/i2c/ov5645.txt b/Documentation/devicetree/bindings/media/i2c/ov5645.txt index fd7aec9f8e24..72ad992f77be 100644 --- a/Documentation/devicetree/bindings/media/i2c/ov5645.txt +++ b/Documentation/devicetree/bindings/media/i2c/ov5645.txt @@ -26,9 +26,9 @@ Example: &i2c1 { ... - ov5645: ov5645@78 { + ov5645: ov5645@3c { compatible = "ovti,ov5645"; - reg = <0x78>; + reg = <0x3c>; enable-gpios = <&gpio1 6 GPIO_ACTIVE_HIGH>; reset-gpios = <&gpio5 20 GPIO_ACTIVE_LOW>; @@ -37,7 +37,7 @@ Example: clocks = <&clks 200>; clock-names = "xclk"; - clock-frequency = <23880000>; + clock-frequency = <24000000>; vdddo-supply = <&camera_dovdd_1v8>; vdda-supply = <&camera_avdd_2v8>; diff --git a/Documentation/devicetree/bindings/media/imx7-csi.txt b/Documentation/devicetree/bindings/media/imx7-csi.txt new file mode 100644 index 000000000000..3c07bc676bc3 --- /dev/null +++ b/Documentation/devicetree/bindings/media/imx7-csi.txt @@ -0,0 +1,45 @@ +Freescale i.MX7 CMOS Sensor Interface +===================================== + +csi node +-------- + +This is device node for the CMOS Sensor Interface (CSI) which enables the chip +to connect directly to external CMOS image sensors. + +Required properties: + +- compatible : "fsl,imx7-csi"; +- reg : base address and length of the register set for the device; +- interrupts : should contain CSI interrupt; +- clocks : list of clock specifiers, see + Documentation/devicetree/bindings/clock/clock-bindings.txt for details; +- clock-names : must contain "axi", "mclk" and "dcic" entries, matching + entries in the clock property; + +The device node shall contain one 'port' child node with one child 'endpoint' +node, according to the bindings defined in: +Documentation/devicetree/bindings/media/video-interfaces.txt. + +In the following example a remote endpoint is a video multiplexer. + +example: + + csi: csi@30710000 { + #address-cells = <1>; + #size-cells = <0>; + + compatible = "fsl,imx7-csi"; + reg = <0x30710000 0x10000>; + interrupts = ; + clocks = <&clks IMX7D_CLK_DUMMY>, + <&clks IMX7D_CSI_MCLK_ROOT_CLK>, + <&clks IMX7D_CLK_DUMMY>; + clock-names = "axi", "mclk", "dcic"; + + port { + csi_from_csi_mux: endpoint { + remote-endpoint = <&csi_mux_to_csi>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/media/imx7-mipi-csi2.txt b/Documentation/devicetree/bindings/media/imx7-mipi-csi2.txt new file mode 100644 index 000000000000..71fd74ed3ec8 --- /dev/null +++ b/Documentation/devicetree/bindings/media/imx7-mipi-csi2.txt @@ -0,0 +1,90 @@ +Freescale i.MX7 Mipi CSI2 +========================= + +mipi_csi2 node +-------------- + +This is the device node for the MIPI CSI-2 receiver core in i.MX7 SoC. It is +compatible with previous version of Samsung D-phy. + +Required properties: + +- compatible : "fsl,imx7-mipi-csi2"; +- reg : base address and length of the register set for the device; +- interrupts : should contain MIPI CSIS interrupt; +- clocks : list of clock specifiers, see + Documentation/devicetree/bindings/clock/clock-bindings.txt for details; +- clock-names : must contain "pclk", "wrap" and "phy" entries, matching + entries in the clock property; +- power-domains : a phandle to the power domain, see + Documentation/devicetree/bindings/power/power_domain.txt for details. +- reset-names : should include following entry "mrst"; +- resets : a list of phandle, should contain reset entry of + reset-names; +- phy-supply : from the generic phy bindings, a phandle to a regulator that + provides power to MIPI CSIS core; + +Optional properties: + +- clock-frequency : The IP's main (system bus) clock frequency in Hz, default + value when this property is not specified is 166 MHz; +- fsl,csis-hs-settle : differential receiver (HS-RX) settle time; + +The device node should contain two 'port' child nodes with one child 'endpoint' +node, according to the bindings defined in: + Documentation/devicetree/bindings/ media/video-interfaces.txt. + The following are properties specific to those nodes. + +port node +--------- + +- reg : (required) can take the values 0 or 1, where 0 shall be + related to the sink port and port 1 shall be the source + one; + +endpoint node +------------- + +- data-lanes : (required) an array specifying active physical MIPI-CSI2 + data input lanes and their mapping to logical lanes; this + shall only be applied to port 0 (sink port), the array's + content is unused only its length is meaningful, + in this case the maximum length supported is 2; + +example: + + mipi_csi: mipi-csi@30750000 { + #address-cells = <1>; + #size-cells = <0>; + + compatible = "fsl,imx7-mipi-csi2"; + reg = <0x30750000 0x10000>; + interrupts = ; + clocks = <&clks IMX7D_IPG_ROOT_CLK>, + <&clks IMX7D_MIPI_CSI_ROOT_CLK>, + <&clks IMX7D_MIPI_DPHY_ROOT_CLK>; + clock-names = "pclk", "wrap", "phy"; + clock-frequency = <166000000>; + power-domains = <&pgc_mipi_phy>; + phy-supply = <®_1p0d>; + resets = <&src IMX7_RESET_MIPI_PHY_MRST>; + reset-names = "mrst"; + fsl,csis-hs-settle = <3>; + + port@0 { + reg = <0>; + + mipi_from_sensor: endpoint { + remote-endpoint = <&ov2680_to_mipi>; + data-lanes = <1>; + }; + }; + + port@1 { + reg = <1>; + + mipi_vc0_to_csi_mux: endpoint { + remote-endpoint = <&csi_mux_from_mipi_vc0>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/media/mediatek-vcodec.txt b/Documentation/devicetree/bindings/media/mediatek-vcodec.txt index 2a615d84a682..b6b5dde6abd8 100644 --- a/Documentation/devicetree/bindings/media/mediatek-vcodec.txt +++ b/Documentation/devicetree/bindings/media/mediatek-vcodec.txt @@ -66,6 +66,15 @@ vcodec_dec: vcodec@16000000 { "vencpll", "venc_lt_sel", "vdec_bus_clk_src"; + assigned-clocks = <&topckgen CLK_TOP_VENC_LT_SEL>, + <&topckgen CLK_TOP_CCI400_SEL>, + <&topckgen CLK_TOP_VDEC_SEL>, + <&apmixedsys CLK_APMIXED_VCODECPLL>, + <&apmixedsys CLK_APMIXED_VENCPLL>; + assigned-clock-parents = <&topckgen CLK_TOP_VCODECPLL_370P5>, + <&topckgen CLK_TOP_UNIVPLL_D2>, + <&topckgen CLK_TOP_VCODECPLL>; + assigned-clock-rates = <0>, <0>, <0>, <1482000000>, <800000000>; }; vcodec_enc: vcodec@18002000 { @@ -105,4 +114,8 @@ vcodec_dec: vcodec@16000000 { "venc_sel", "venc_lt_sel_src", "venc_lt_sel"; + assigned-clocks = <&topckgen CLK_TOP_VENC_SEL>, + <&topckgen CLK_TOP_VENC_LT_SEL>; + assigned-clock-parents = <&topckgen CLK_TOP_VENCPLL_D2>, + <&topckgen CLK_TOP_UNIVPLL1_D2>; }; diff --git a/Documentation/devicetree/bindings/media/rcar_vin.txt b/Documentation/devicetree/bindings/media/rcar_vin.txt index 0dd84a183ca7..224a4615b418 100644 --- a/Documentation/devicetree/bindings/media/rcar_vin.txt +++ b/Documentation/devicetree/bindings/media/rcar_vin.txt @@ -7,12 +7,13 @@ family of devices. Each VIN instance has a single parallel input that supports RGB and YUV video, with both external synchronization and BT.656 synchronization for the latter. Depending on the instance the VIN input is connected to external SoC pins, or -on Gen3 platforms to a CSI-2 receiver. +on Gen3 and RZ/G2 platforms to a CSI-2 receiver. - compatible: Must be one or more of the following - "renesas,vin-r8a7743" for the R8A7743 device - "renesas,vin-r8a7744" for the R8A7744 device - "renesas,vin-r8a7745" for the R8A7745 device + - "renesas,vin-r8a774c0" for the R8A774C0 device - "renesas,vin-r8a7778" for the R8A7778 device - "renesas,vin-r8a7779" for the R8A7779 device - "renesas,vin-r8a7790" for the R8A7790 device @@ -61,10 +62,10 @@ The per-board settings Gen2 platforms: - data-enable-active: polarity of CLKENB signal, see [1] for description. Default is active high. -The per-board settings Gen3 platforms: +The per-board settings Gen3 and RZ/G2 platforms: -Gen3 platforms can support both a single connected parallel input source -from external SoC pins (port@0) and/or multiple parallel input sources +Gen3 and RZ/G2 platforms can support both a single connected parallel input +source from external SoC pins (port@0) and/or multiple parallel input sources from local SoC CSI-2 receivers (port@1) depending on SoC. - renesas,id - ID number of the VIN, VINx in the documentation. diff --git a/Documentation/devicetree/bindings/media/renesas,fcp.txt b/Documentation/devicetree/bindings/media/renesas,fcp.txt index 3ec91803ba58..79c37395b396 100644 --- a/Documentation/devicetree/bindings/media/renesas,fcp.txt +++ b/Documentation/devicetree/bindings/media/renesas,fcp.txt @@ -2,8 +2,9 @@ Renesas R-Car Frame Compression Processor (FCP) ----------------------------------------------- The FCP is a companion module of video processing modules in the Renesas R-Car -Gen3 SoCs. It provides data compression and decompression, data caching, and -conversion of AXI transactions in order to reduce the memory bandwidth. +Gen3 and RZ/G2 SoCs. It provides data compression and decompression, data +caching, and conversion of AXI transactions in order to reduce the memory +bandwidth. There are three types of FCP: FCP for Codec (FCPC), FCP for VSP (FCPV) and FCP for FDP (FCPF). Their configuration and behaviour depend on the module they diff --git a/Documentation/devicetree/bindings/media/renesas,rcar-csi2.txt b/Documentation/devicetree/bindings/media/renesas,rcar-csi2.txt index 541d936b62e8..d63275e17afd 100644 --- a/Documentation/devicetree/bindings/media/renesas,rcar-csi2.txt +++ b/Documentation/devicetree/bindings/media/renesas,rcar-csi2.txt @@ -2,12 +2,13 @@ Renesas R-Car MIPI CSI-2 ------------------------ The R-Car CSI-2 receiver device provides MIPI CSI-2 capabilities for the -Renesas R-Car family of devices. It is used in conjunction with the +Renesas R-Car and RZ/G2 family of devices. It is used in conjunction with the R-Car VIN module, which provides the video capture capabilities. Mandatory properties -------------------- - compatible: Must be one or more of the following + - "renesas,r8a774c0-csi2" for the R8A774C0 device. - "renesas,r8a7795-csi2" for the R8A7795 device. - "renesas,r8a7796-csi2" for the R8A7796 device. - "renesas,r8a77965-csi2" for the R8A77965 device. diff --git a/Documentation/devicetree/bindings/media/renesas,vsp1.txt b/Documentation/devicetree/bindings/media/renesas,vsp1.txt index 16427017cb45..cd5a955b2ea0 100644 --- a/Documentation/devicetree/bindings/media/renesas,vsp1.txt +++ b/Documentation/devicetree/bindings/media/renesas,vsp1.txt @@ -2,13 +2,13 @@ The VSP is a video processing engine that supports up-/down-scaling, alpha blending, color space conversion and various other image processing features. -It can be found in the Renesas R-Car second generation SoCs. +It can be found in the Renesas R-Car Gen2, R-Car Gen3, RZ/G1, and RZ/G2 SoCs. Required properties: - compatible: Must contain one of the following values - - "renesas,vsp1" for the R-Car Gen2 VSP1 - - "renesas,vsp2" for the R-Car Gen3 VSP2 + - "renesas,vsp1" for the R-Car Gen2 and RZ/G1 VSP1 + - "renesas,vsp2" for the R-Car Gen3 and RZ/G2 VSP2 - reg: Base address and length of the registers block for the VSP. - interrupts: VSP interrupt specifier. diff --git a/Documentation/devicetree/bindings/media/si470x.txt b/Documentation/devicetree/bindings/media/si470x.txt new file mode 100644 index 000000000000..a9403558362e --- /dev/null +++ b/Documentation/devicetree/bindings/media/si470x.txt @@ -0,0 +1,26 @@ +* Silicon Labs FM Radio receiver + +The Silicon Labs Si470x is family of FM radio receivers with receive power scan +supporting 76-108 MHz, programmable through an I2C interface. +Some of them includes an RDS encoder. + +Required Properties: +- compatible: Should contain "silabs,si470x" +- reg: the I2C address of the device + +Optional Properties: +- interrupts : The interrupt number +- reset-gpios: GPIO specifier for the chips reset line + +Example: + +&i2c2 { + si470x@63 { + compatible = "silabs,si470x"; + reg = <0x63>; + + interrupt-parent = <&gpj2>; + interrupts = <4 IRQ_TYPE_EDGE_FALLING>; + reset-gpios = <&gpj2 5 GPIO_ACTIVE_HIGH>; + }; +}; diff --git a/Documentation/devicetree/bindings/media/sun6i-csi.txt b/Documentation/devicetree/bindings/media/sun6i-csi.txt index d4ab34f2240c..0dd540bb03db 100644 --- a/Documentation/devicetree/bindings/media/sun6i-csi.txt +++ b/Documentation/devicetree/bindings/media/sun6i-csi.txt @@ -6,8 +6,9 @@ Allwinner V3s SoC features a CSI module(CSI1) with parallel interface. Required properties: - compatible: value must be one of: * "allwinner,sun6i-a31-csi" - * "allwinner,sun8i-h3-csi", "allwinner,sun6i-a31-csi" + * "allwinner,sun8i-h3-csi" * "allwinner,sun8i-v3s-csi" + * "allwinner,sun50i-a64-csi" - reg: base address and size of the memory-mapped region. - interrupts: interrupt associated to this IP - clocks: phandles to the clocks feeding the CSI diff --git a/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra20-mc.txt b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra20-mc.txt index 7d60a50a4fa1..e55328237df4 100644 --- a/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra20-mc.txt +++ b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra20-mc.txt @@ -1,26 +1,37 @@ NVIDIA Tegra20 MC(Memory Controller) Required properties: -- compatible : "nvidia,tegra20-mc" -- reg : Should contain 2 register ranges(address and length); see the - example below. Note that the MC registers are interleaved with the - GART registers, and hence must be represented as multiple ranges. +- compatible : "nvidia,tegra20-mc-gart" +- reg : Should contain 2 register ranges: physical base address and length of + the controller's registers and the GART aperture respectively. +- clocks: Must contain an entry for each entry in clock-names. + See ../clocks/clock-bindings.txt for details. +- clock-names: Must include the following entries: + - mc: the module's clock input - interrupts : Should contain MC General interrupt. - #reset-cells : Should be 1. This cell represents memory client module ID. The assignments may be found in header file or in the TRM documentation. +- #iommu-cells: Should be 0. This cell represents the number of cells in an + IOMMU specifier needed to encode an address. GART supports only a single + address space that is shared by all devices, therefore no additional + information needed for the address encoding. Example: mc: memory-controller@7000f000 { - compatible = "nvidia,tegra20-mc"; - reg = <0x7000f000 0x024 - 0x7000f03c 0x3c4>; - interrupts = <0 77 0x04>; + compatible = "nvidia,tegra20-mc-gart"; + reg = <0x7000f000 0x400 /* controller registers */ + 0x58000000 0x02000000>; /* GART aperture */ + clocks = <&tegra_car TEGRA20_CLK_MC>; + clock-names = "mc"; + interrupts = ; #reset-cells = <1>; + #iommu-cells = <0>; }; video-codec@6001a000 { compatible = "nvidia,tegra20-vde"; ... resets = <&mc TEGRA20_MC_RESET_VDE>; + iommus = <&mc>; }; diff --git a/Documentation/devicetree/bindings/mfd/aspeed-lpc.txt b/Documentation/devicetree/bindings/mfd/aspeed-lpc.txt index 34dd89087cff..86446074e206 100644 --- a/Documentation/devicetree/bindings/mfd/aspeed-lpc.txt +++ b/Documentation/devicetree/bindings/mfd/aspeed-lpc.txt @@ -135,6 +135,8 @@ Required properties: - clocks: contains a phandle to the syscon node describing the clocks. There should then be one cell representing the clock to use +Optional properties: + - memory-region: A phandle to a reserved_memory region to be used for the LPC to AHB mapping diff --git a/Documentation/devicetree/bindings/mfd/cirrus,lochnagar.txt b/Documentation/devicetree/bindings/mfd/cirrus,lochnagar.txt new file mode 100644 index 000000000000..004b0158cf4d --- /dev/null +++ b/Documentation/devicetree/bindings/mfd/cirrus,lochnagar.txt @@ -0,0 +1,68 @@ +Cirrus Logic Lochnagar Audio Development Board + +Lochnagar is an evaluation and development board for Cirrus Logic +Smart CODEC and Amp devices. It allows the connection of most Cirrus +Logic devices on mini-cards, as well as allowing connection of +various application processor systems to provide a full evaluation +platform. Audio system topology, clocking and power can all be +controlled through the Lochnagar, allowing the device under test +to be used in a variety of possible use cases. + +Also see these documents for generic binding information: + [1] GPIO : ../gpio/gpio.txt + +And these for relevant defines: + [2] include/dt-bindings/pinctrl/lochnagar.h + [3] include/dt-bindings/clock/lochnagar.h + +And these documents for the required sub-node binding details: + [4] Clock: ../clock/cirrus,lochnagar.txt + [5] Pinctrl: ../pinctrl/cirrus,lochnagar.txt + [6] Regulator: ../regulator/cirrus,lochnagar.txt + +Required properties: + + - compatible : One of the following strings: + "cirrus,lochnagar1" + "cirrus,lochnagar2" + + - reg : I2C slave address + + - reset-gpios : Reset line to the Lochnagar, see [1]. + +Required sub-nodes: + + - lochnagar-clk : Binding for the clocking components, see [4]. + + - lochnagar-pinctrl : Binding for the pin control components, see [5]. + +Optional sub-nodes: + + - Bindings for the regulator components, see [6]. Only available on + Lochnagar 2. + +Optional properties: + + - present-gpios : Host present line, indicating the presence of a + host system, see [1]. This can be omitted if the present line is + tied in hardware. + +Example: + +lochnagar: lochnagar@22 { + compatible = "cirrus,lochnagar2"; + reg = <0x22>; + + reset-gpios = <&gpio0 55 0>; + present-gpios = <&gpio0 60 0>; + + lochnagar-clk { + compatible = "cirrus,lochnagar2-clk"; + ... + }; + + lochnagar-pinctrl { + compatible = "cirrus,lochnagar-pinctrl"; + ... + }; +}; diff --git a/Documentation/devicetree/bindings/mfd/rohm,bd71837-pmic.txt b/Documentation/devicetree/bindings/mfd/rohm,bd71837-pmic.txt index a4b056761eaa..d5f68ac78d15 100644 --- a/Documentation/devicetree/bindings/mfd/rohm,bd71837-pmic.txt +++ b/Documentation/devicetree/bindings/mfd/rohm,bd71837-pmic.txt @@ -23,6 +23,20 @@ Required properties: Optional properties: - clock-output-names : Should contain name for output clock. +- rohm,reset-snvs-powered : Transfer BD718x7 to SNVS state at reset. + +The BD718x7 supports two different HW states as reset target states. States +are called as SNVS and READY. At READY state all the PMIC power outputs go +down and OTP is reload. At the SNVS state all other logic and external +devices apart from the SNVS power domain are shut off. Please refer to NXP +i.MX8 documentation for further information regarding SNVS state. When a +reset is done via SNVS state the PMIC OTP data is not reload. This causes +power outputs that have been under SW control to stay down when reset has +switched power state to SNVS. If reset is done via READY state the power +outputs will be returned to HW control by OTP loading. Thus the reset +target state is set to READY by default. If SNVS state is used the boot +crucial regulators must have the regulator-always-on and regulator-boot-on +properties set in regulator node. Example: @@ -43,6 +57,7 @@ Example: #clock-cells = <0>; clocks = <&osc 0>; clock-output-names = "bd71837-32k-out"; + rohm,reset-snvs-powered; regulators { buck1: BUCK1 { @@ -50,8 +65,10 @@ Example: regulator-min-microvolt = <700000>; regulator-max-microvolt = <1300000>; regulator-boot-on; + regulator-always-on; regulator-ramp-delay = <1250>; }; + // [...] }; }; diff --git a/Documentation/devicetree/bindings/mfd/st,stpmic1.txt b/Documentation/devicetree/bindings/mfd/st,stpmic1.txt new file mode 100644 index 000000000000..afd45c089585 --- /dev/null +++ b/Documentation/devicetree/bindings/mfd/st,stpmic1.txt @@ -0,0 +1,61 @@ +* STMicroelectronics STPMIC1 Power Management IC + +Required properties: +- compatible: : "st,stpmic1" +- reg: : The I2C slave address for the STPMIC1 chip. +- interrupts: : The interrupt line the device is connected to. +- #interrupt-cells: : Should be 1. +- interrupt-controller: : Marks the device node as an interrupt controller. + Interrupt numbers are defined at + dt-bindings/mfd/st,stpmic1.h. + +STPMIC1 consists in a varied group of sub-devices. +Each sub-device binding is be described in own documentation file. + +Device Description +------ ------------ +st,stpmic1-onkey : Power on key, see ../input/st,stpmic1-onkey.txt +st,stpmic1-regulators : Regulators, see ../regulator/st,stpmic1-regulator.txt +st,stpmic1-wdt : Watchdog, see ../watchdog/st,stpmic1-wdt.txt + +Example: + +#include + +pmic: pmic@33 { + compatible = "st,stpmic1"; + reg = <0x33>; + interrupt-parent = <&gpioa>; + interrupts = <0 2>; + + interrupt-controller; + #interrupt-cells = <2>; + + onkey { + compatible = "st,stpmic1-onkey"; + interrupts = ,; + interrupt-names = "onkey-falling", "onkey-rising"; + power-off-time-sec = <10>; + }; + + watchdog { + compatible = "st,stpmic1-wdt"; + }; + + regulators { + compatible = "st,stpmic1-regulators"; + + vdd_core: buck1 { + regulator-name = "vdd_core"; + regulator-boot-on; + regulator-min-microvolt = <700000>; + regulator-max-microvolt = <1200000>; + }; + vdd: buck3 { + regulator-name = "vdd"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; + regulator-pull-down; + }; + }; diff --git a/Documentation/devicetree/bindings/mfd/stmpe.txt b/Documentation/devicetree/bindings/mfd/stmpe.txt index c797c05cd3c2..d4408a417193 100644 --- a/Documentation/devicetree/bindings/mfd/stmpe.txt +++ b/Documentation/devicetree/bindings/mfd/stmpe.txt @@ -4,15 +4,29 @@ STMPE is an MFD device which may expose the following inbuilt devices: gpio, keypad, touchscreen, adc, pwm, rotator. Required properties: - - compatible : "st,stmpe[610|801|811|1600|1601|2401|2403]" - - reg : I2C/SPI address of the device + - compatible : "st,stmpe[610|801|811|1600|1601|2401|2403]" + - reg : I2C/SPI address of the device Optional properties: - - interrupts : The interrupt outputs from the controller - - interrupt-controller : Marks the device node as an interrupt controller - - wakeup-source : Marks the input device as wakable - - st,autosleep-timeout : Valid entries (ms); 4, 16, 32, 64, 128, 256, 512 and 1024 - - irq-gpio : If present, which GPIO to use for event IRQ + - interrupts : The interrupt outputs from the controller + - interrupt-controller : Marks the device node as an interrupt controller + - wakeup-source : Marks the input device as wakable + - st,autosleep-timeout : Valid entries (ms); 4, 16, 32, 64, 128, 256, 512 and 1024 + - irq-gpio : If present, which GPIO to use for event IRQ + +Optional properties for devices with touch and ADC (STMPE811|STMPE610): + - st,sample-time : ADC conversion time in number of clock. + 0 -> 36 clocks 4 -> 80 clocks (recommended) + 1 -> 44 clocks 5 -> 96 clocks + 2 -> 56 clocks 6 -> 124 clocks + 3 -> 64 clocks + - st,mod-12b : ADC Bit mode + 0 -> 10bit ADC 1 -> 12bit ADC + - st,ref-sel : ADC reference source + 0 -> internal 1 -> external + - st,adc-freq : ADC Clock speed + 0 -> 1.625 MHz 2 || 3 -> 6.5 MHz + 1 -> 3.25 MHz Example: diff --git a/Documentation/devicetree/bindings/mips/lantiq/rcu-gphy.txt b/Documentation/devicetree/bindings/mips/lantiq/rcu-gphy.txt deleted file mode 100644 index a0c19bd1ce66..000000000000 --- a/Documentation/devicetree/bindings/mips/lantiq/rcu-gphy.txt +++ /dev/null @@ -1,36 +0,0 @@ -Lantiq XWAY SoC GPHY binding -============================ - -This binding describes a software-defined ethernet PHY, provided by the RCU -module on newer Lantiq XWAY SoCs (xRX200 and newer). - -------------------------------------------------------------------------------- -Required properties: -- compatible : Should be one of - "lantiq,xrx200a1x-gphy" - "lantiq,xrx200a2x-gphy" - "lantiq,xrx300-gphy" - "lantiq,xrx330-gphy" -- reg : Addrress of the GPHY FW load address register -- resets : Must reference the RCU GPHY reset bit -- reset-names : One entry, value must be "gphy" or optional "gphy2" -- clocks : A reference to the (PMU) GPHY clock gate - -Optional properties: -- lantiq,gphy-mode : GPHY_MODE_GE (default) or GPHY_MODE_FE as defined in - - - -------------------------------------------------------------------------------- -Example for the GPHys on the xRX200 SoCs: - -#include - gphy0: gphy@20 { - compatible = "lantiq,xrx200a2x-gphy"; - reg = <0x20 0x4>; - - resets = <&reset0 31 30>, <&reset1 7 7>; - reset-names = "gphy", "gphy2"; - clocks = <&pmu0 XRX200_PMU_GATE_GPHY>; - lantiq,gphy-mode = ; - }; diff --git a/Documentation/devicetree/bindings/mips/lantiq/rcu.txt b/Documentation/devicetree/bindings/mips/lantiq/rcu.txt index 7f0822b4beae..58d51f480c9e 100644 --- a/Documentation/devicetree/bindings/mips/lantiq/rcu.txt +++ b/Documentation/devicetree/bindings/mips/lantiq/rcu.txt @@ -26,24 +26,6 @@ Example of the RCU bindings on a xRX200 SoC: ranges = <0x0 0x203000 0x100>; big-endian; - gphy0: gphy@20 { - compatible = "lantiq,xrx200a2x-gphy"; - reg = <0x20 0x4>; - - resets = <&reset0 31 30>, <&reset1 7 7>; - reset-names = "gphy", "gphy2"; - lantiq,gphy-mode = ; - }; - - gphy1: gphy@68 { - compatible = "lantiq,xrx200a2x-gphy"; - reg = <0x68 0x4>; - - resets = <&reset0 29 28>, <&reset1 6 6>; - reset-names = "gphy", "gphy2"; - lantiq,gphy-mode = ; - }; - reset0: reset-controller@10 { compatible = "lantiq,xrx200-reset"; reg = <0x10 4>, <0x14 4>; diff --git a/Documentation/devicetree/bindings/misc/qcom,fastrpc.txt b/Documentation/devicetree/bindings/misc/qcom,fastrpc.txt new file mode 100644 index 000000000000..2a1827ab50d2 --- /dev/null +++ b/Documentation/devicetree/bindings/misc/qcom,fastrpc.txt @@ -0,0 +1,78 @@ +Qualcomm Technologies, Inc. FastRPC Driver + +The FastRPC implements an IPC (Inter-Processor Communication) +mechanism that allows for clients to transparently make remote method +invocations across DSP and APPS boundaries. This enables developers +to offload tasks to the DSP and free up the application processor for +other tasks. + +- compatible: + Usage: required + Value type: + Definition: must be "qcom,fastrpc" + +- label + Usage: required + Value type: + Definition: should specify the dsp domain name this fastrpc + corresponds to. must be one of this: "adsp", "mdsp", "sdsp", "cdsp" + +- #address-cells + Usage: required + Value type: + Definition: Must be 1 + +- #size-cells + Usage: required + Value type: + Definition: Must be 0 + += COMPUTE BANKS +Each subnode of the Fastrpc represents compute context banks available +on the dsp. +- All Compute context banks MUST contain the following properties: + +- compatible: + Usage: required + Value type: + Definition: must be "qcom,fastrpc-compute-cb" + +- reg + Usage: required + Value type: + Definition: Context Bank ID. + +- qcom,nsessions: + Usage: Optional + Value type: + Defination: A value indicating how many sessions can share this + context bank. Defaults to 1 when this property + is not specified. + +Example: + +adsp-pil { + compatible = "qcom,msm8996-adsp-pil"; + ... + smd-edge { + label = "lpass"; + fastrpc { + compatible = "qcom,fastrpc"; + qcom,smd-channels = "fastrpcsmd-apps-dsp"; + label = "adsp"; + #address-cells = <1>; + #size-cells = <0>; + + cb@1 { + compatible = "qcom,fastrpc-compute-cb"; + reg = <1>; + }; + + cb@2 { + compatible = "qcom,fastrpc-compute-cb"; + reg = <2>; + }; + ... + }; + }; +}; diff --git a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt index 9201a7d8d7b0..540c65ed9cba 100644 --- a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt +++ b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt @@ -15,6 +15,7 @@ Required properties: "fsl,imx6q-usdhc" "fsl,imx6sl-usdhc" "fsl,imx6sx-usdhc" + "fsl,imx6ull-usdhc" "fsl,imx7d-usdhc" "fsl,imx8qxp-usdhc" diff --git a/Documentation/devicetree/bindings/mmc/mmc.txt b/Documentation/devicetree/bindings/mmc/mmc.txt index f5a0923b34ca..cdbcfd3a4ff2 100644 --- a/Documentation/devicetree/bindings/mmc/mmc.txt +++ b/Documentation/devicetree/bindings/mmc/mmc.txt @@ -62,6 +62,8 @@ Optional properties: be referred to mmc-pwrseq-simple.txt. But now it's reused as a tunable delay waiting for I/O signalling and card power supply to be stable, regardless of whether pwrseq-simple is used. Default to 10ms if no available. +- supports-cqe : The presence of this property indicates that the corresponding + MMC host controller supports HW command queue feature. *NOTE* on CD and WP polarity. To use common for all SD/MMC host controllers line polarity properties, we have to fix the meaning of the "normal" and "inverted" diff --git a/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt b/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt index 32b4b4e41923..2cecdc71d94c 100644 --- a/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt +++ b/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt @@ -39,12 +39,16 @@ sdhci@c8000200 { bus-width = <8>; }; -Optional properties for Tegra210 and Tegra186: +Optional properties for Tegra210, Tegra186 and Tegra194: - pinctrl-names, pinctrl-0, pinctrl-1 : Specify pad voltage configurations. Valid pinctrl-names are "sdmmc-3v3" and "sdmmc-1v8" for controllers supporting multiple voltage levels. The order of names should correspond to the pin configuration states in pinctrl-0 and pinctrl-1. +- pinctrl-names : "sdmmc-3v3-drv" and "sdmmc-1v8-drv" are applicable for + Tegra210 where pad config registers are in the pinmux register domain + for pull-up-strength and pull-down-strength values configuration when + using pads at 3V3 and 1V8 levels. - nvidia,only-1-8-v : The presence of this property indicates that the controller operates at a 1.8 V fixed I/O voltage. - nvidia,pad-autocal-pull-up-offset-3v3, diff --git a/Documentation/devicetree/bindings/mmc/ti-omap.txt b/Documentation/devicetree/bindings/mmc/ti-omap.txt index 8de579969763..02fd31cf361d 100644 --- a/Documentation/devicetree/bindings/mmc/ti-omap.txt +++ b/Documentation/devicetree/bindings/mmc/ti-omap.txt @@ -24,31 +24,3 @@ Examples: dmas = <&sdma 61 &sdma 62>; dma-names = "tx", "rx"; }; - -* TI MMC host controller for OMAP1 and 2420 - -The MMC Host Controller on TI OMAP1 and 2420 family provides -an interface for MMC, SD, and SDIO types of memory cards. - -This file documents differences between the core properties described -by mmc.txt and the properties used by the omap mmc driver. - -Note that this driver will not work with omap2430 or later omaps, -please see the omap hsmmc driver for the current omaps. - -Required properties: -- compatible: Must be "ti,omap2420-mmc", for OMAP2420 controllers -- ti,hwmods: For 2420, must be "msdi", where n is controller - instance starting 1 - -Examples: - - msdi1: mmc@4809c000 { - compatible = "ti,omap2420-mmc"; - ti,hwmods = "msdi1"; - reg = <0x4809c000 0x80>; - interrupts = <83>; - dmas = <&sdma 61 &sdma 62>; - dma-names = "tx", "rx"; - }; - diff --git a/Documentation/devicetree/bindings/mtd/amlogic,meson-nand.txt b/Documentation/devicetree/bindings/mtd/amlogic,meson-nand.txt new file mode 100644 index 000000000000..3983c11e062c --- /dev/null +++ b/Documentation/devicetree/bindings/mtd/amlogic,meson-nand.txt @@ -0,0 +1,60 @@ +Amlogic NAND Flash Controller (NFC) for GXBB/GXL/AXG family SoCs + +This file documents the properties in addition to those available in +the MTD NAND bindings. + +Required properties: +- compatible : contains one of: + - "amlogic,meson-gxl-nfc" + - "amlogic,meson-axg-nfc" +- clocks : + A list of phandle + clock-specifier pairs for the clocks listed + in clock-names. + +- clock-names: Should contain the following: + "core" - NFC module gate clock + "device" - device clock from eMMC sub clock controller + "rx" - rx clock phase + "tx" - tx clock phase + +- amlogic,mmc-syscon : Required for NAND clocks, it's shared with SD/eMMC + controller port C + +Optional children nodes: +Children nodes represent the available nand chips. + +Other properties: +see Documentation/devicetree/bindings/mtd/nand.txt for generic bindings. + +Example demonstrate on AXG SoC: + + sd_emmc_c_clkc: mmc@7000 { + compatible = "amlogic,meson-axg-mmc-clkc", "syscon"; + reg = <0x0 0x7000 0x0 0x800>; + }; + + nand-controller@7800 { + compatible = "amlogic,meson-axg-nfc"; + reg = <0x0 0x7800 0x0 0x100>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = ; + + clocks = <&clkc CLKID_SD_EMMC_C>, + <&sd_emmc_c_clkc CLKID_MMC_DIV>, + <&sd_emmc_c_clkc CLKID_MMC_PHASE_RX>, + <&sd_emmc_c_clkc CLKID_MMC_PHASE_TX>; + clock-names = "core", "device", "rx", "tx"; + amlogic,mmc-syscon = <&sd_emmc_c_clkc>; + + pinctrl-names = "default"; + pinctrl-0 = <&nand_pins>; + + nand@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <1>; + + nand-on-flash-bbt; + }; + }; diff --git a/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt b/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt index bb2075df9b38..4345c3a6f530 100644 --- a/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt +++ b/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt @@ -4,6 +4,7 @@ Required properties: - compatible : should be one of the following: Generic default - "cdns,qspi-nor". For TI 66AK2G SoC - "ti,k2g-qspi", "cdns,qspi-nor". + For TI AM654 SoC - "ti,am654-ospi", "cdns,qspi-nor". - reg : Contains two entries, each of which is a tuple consisting of a physical address and length. The first entry is the address and length of the controller register set. The second entry is the diff --git a/Documentation/devicetree/bindings/mtd/mtk-quadspi.txt b/Documentation/devicetree/bindings/mtd/mtk-quadspi.txt index 56d3668e2c50..a12e3b5c495d 100644 --- a/Documentation/devicetree/bindings/mtd/mtk-quadspi.txt +++ b/Documentation/devicetree/bindings/mtd/mtk-quadspi.txt @@ -1,4 +1,4 @@ -* Serial NOR flash controller for MTK MT81xx (and similar) +* Serial NOR flash controller for MediaTek SoCs Required properties: - compatible: For mt8173, compatible should be "mediatek,mt8173-nor", @@ -10,6 +10,7 @@ Required properties: "mediatek,mt2712-nor", "mediatek,mt8173-nor" "mediatek,mt7622-nor", "mediatek,mt8173-nor" "mediatek,mt7623-nor", "mediatek,mt8173-nor" + "mediatek,mt7629-nor", "mediatek,mt8173-nor" "mediatek,mt8173-nor" - reg: physical base address and length of the controller's register - clocks: the phandle of the clocks needed by the nor controller diff --git a/Documentation/devicetree/bindings/mtd/stm32-fmc2-nand.txt b/Documentation/devicetree/bindings/mtd/stm32-fmc2-nand.txt new file mode 100644 index 000000000000..ad2bef826582 --- /dev/null +++ b/Documentation/devicetree/bindings/mtd/stm32-fmc2-nand.txt @@ -0,0 +1,61 @@ +STMicroelectronics Flexible Memory Controller 2 (FMC2) +NAND Interface + +Required properties: +- compatible: Should be one of: + * st,stm32mp15-fmc2 +- reg: NAND flash controller memory areas. + First region contains the register location. + Regions 2 to 4 respectively contain the data, command, + and address space for CS0. + Regions 5 to 7 contain the same areas for CS1. +- interrupts: The interrupt number +- pinctrl-0: Standard Pinctrl phandle (see: pinctrl/pinctrl-bindings.txt) +- clocks: The clock needed by the NAND flash controller + +Optional properties: +- resets: Reference to a reset controller asserting the FMC controller +- dmas: DMA specifiers (see: dma/stm32-mdma.txt) +- dma-names: Must be "tx", "rx" and "ecc" + +* NAND device bindings: + +Required properties: +- reg: describes the CS lines assigned to the NAND device. + +Optional properties: +- nand-on-flash-bbt: see nand.txt +- nand-ecc-strength: see nand.txt +- nand-ecc-step-size: see nand.txt + +The following ECC strength and step size are currently supported: + - nand-ecc-strength = <1>, nand-ecc-step-size = <512> (Hamming) + - nand-ecc-strength = <4>, nand-ecc-step-size = <512> (BCH4) + - nand-ecc-strength = <8>, nand-ecc-step-size = <512> (BCH8) (default) + +Example: + + fmc: nand-controller@58002000 { + compatible = "st,stm32mp15-fmc2"; + reg = <0x58002000 0x1000>, + <0x80000000 0x1000>, + <0x88010000 0x1000>, + <0x88020000 0x1000>, + <0x81000000 0x1000>, + <0x89010000 0x1000>, + <0x89020000 0x1000>; + interrupts = ; + clocks = <&rcc FMC_K>; + resets = <&rcc FMC_R>; + pinctrl-names = "default"; + pinctrl-0 = <&fmc_pins_a>; + #address-cells = <1>; + #size-cells = <0>; + + nand@0 { + reg = <0>; + nand-on-flash-bbt; + #address-cells = <1>; + #size-cells = <1>; + }; + }; diff --git a/Documentation/devicetree/bindings/net/btusb.txt b/Documentation/devicetree/bindings/net/btusb.txt index 37d67926dd6d..b1ad6ee68e90 100644 --- a/Documentation/devicetree/bindings/net/btusb.txt +++ b/Documentation/devicetree/bindings/net/btusb.txt @@ -9,6 +9,9 @@ Required properties: (more may be added later) are: "usb1286,204e" (Marvell 8997) + "usbcf3,e300" (Qualcomm QCA6174A) + "usb4ca,301a" (Qualcomm QCA6174A (Lite-On)) + Also, vendors that use btusb may have device additional properties, e.g: Documentation/devicetree/bindings/net/marvell-bt-8xxx.txt diff --git a/Documentation/devicetree/bindings/net/cpsw-phy-sel.txt b/Documentation/devicetree/bindings/net/cpsw-phy-sel.txt index 764c0c79b43d..5d76f991c027 100644 --- a/Documentation/devicetree/bindings/net/cpsw-phy-sel.txt +++ b/Documentation/devicetree/bindings/net/cpsw-phy-sel.txt @@ -1,4 +1,4 @@ -TI CPSW Phy mode Selection Device Tree Bindings +TI CPSW Phy mode Selection Device Tree Bindings (DEPRECATED) ----------------------------------------------- Required properties: diff --git a/Documentation/devicetree/bindings/net/dsa/ksz.txt b/Documentation/devicetree/bindings/net/dsa/ksz.txt index 0f407fb371ce..e7db7268fd0f 100644 --- a/Documentation/devicetree/bindings/net/dsa/ksz.txt +++ b/Documentation/devicetree/bindings/net/dsa/ksz.txt @@ -7,6 +7,11 @@ Required properties: of the following: - "microchip,ksz9477" - "microchip,ksz9897" + - "microchip,ksz9896" + - "microchip,ksz9567" + - "microchip,ksz8565" + - "microchip,ksz9893" + - "microchip,ksz9563" Optional properties: @@ -19,58 +24,96 @@ Examples: Ethernet switch connected via SPI to the host, CPU port wired to eth0: - eth0: ethernet@10001000 { - fixed-link { - speed = <1000>; - full-duplex; - }; - }; + eth0: ethernet@10001000 { + fixed-link { + speed = <1000>; + full-duplex; + }; + }; - spi1: spi@f8008000 { - pinctrl-0 = <&pinctrl_spi_ksz>; - cs-gpios = <&pioC 25 0>; - id = <1>; + spi1: spi@f8008000 { + pinctrl-0 = <&pinctrl_spi_ksz>; + cs-gpios = <&pioC 25 0>; + id = <1>; - ksz9477: ksz9477@0 { - compatible = "microchip,ksz9477"; - reg = <0>; + ksz9477: ksz9477@0 { + compatible = "microchip,ksz9477"; + reg = <0>; - spi-max-frequency = <44000000>; - spi-cpha; - spi-cpol; + spi-max-frequency = <44000000>; + spi-cpha; + spi-cpol; - ports { - #address-cells = <1>; - #size-cells = <0>; - port@0 { - reg = <0>; - label = "lan1"; - }; - port@1 { - reg = <1>; - label = "lan2"; - }; - port@2 { - reg = <2>; - label = "lan3"; - }; - port@3 { - reg = <3>; - label = "lan4"; - }; - port@4 { - reg = <4>; - label = "lan5"; - }; - port@5 { - reg = <5>; - label = "cpu"; - ethernet = <ð0>; - fixed-link { - speed = <1000>; - full-duplex; - }; - }; - }; - }; - }; + ports { + #address-cells = <1>; + #size-cells = <0>; + port@0 { + reg = <0>; + label = "lan1"; + }; + port@1 { + reg = <1>; + label = "lan2"; + }; + port@2 { + reg = <2>; + label = "lan3"; + }; + port@3 { + reg = <3>; + label = "lan4"; + }; + port@4 { + reg = <4>; + label = "lan5"; + }; + port@5 { + reg = <5>; + label = "cpu"; + ethernet = <ð0>; + fixed-link { + speed = <1000>; + full-duplex; + }; + }; + }; + }; + ksz8565: ksz8565@0 { + compatible = "microchip,ksz8565"; + reg = <0>; + + spi-max-frequency = <44000000>; + spi-cpha; + spi-cpol; + + ports { + #address-cells = <1>; + #size-cells = <0>; + port@0 { + reg = <0>; + label = "lan1"; + }; + port@1 { + reg = <1>; + label = "lan2"; + }; + port@2 { + reg = <2>; + label = "lan3"; + }; + port@3 { + reg = <3>; + label = "lan4"; + }; + port@6 { + reg = <6>; + label = "cpu"; + ethernet = <ð0>; + fixed-link { + speed = <1000>; + full-duplex; + }; + }; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/net/dsa/mt7530.txt b/Documentation/devicetree/bindings/net/dsa/mt7530.txt index aa3527f71fdc..47aa205ee0bd 100644 --- a/Documentation/devicetree/bindings/net/dsa/mt7530.txt +++ b/Documentation/devicetree/bindings/net/dsa/mt7530.txt @@ -3,12 +3,16 @@ Mediatek MT7530 Ethernet switch Required properties: -- compatible: Must be compatible = "mediatek,mt7530"; +- compatible: may be compatible = "mediatek,mt7530" + or compatible = "mediatek,mt7621" - #address-cells: Must be 1. - #size-cells: Must be 0. - mediatek,mcm: Boolean; if defined, indicates that either MT7530 is the part on multi-chip module belong to MT7623A has or the remotely standalone chip as the function MT7623N reference board provided for. + +If compatible mediatek,mt7530 is set then the following properties are required + - core-supply: Phandle to the regulator node necessary for the core power. - io-supply: Phandle to the regulator node necessary for the I/O power. See Documentation/devicetree/bindings/regulator/mt6323-regulator.txt diff --git a/Documentation/devicetree/bindings/net/fsl-enetc.txt b/Documentation/devicetree/bindings/net/fsl-enetc.txt new file mode 100644 index 000000000000..c812e25ae90f --- /dev/null +++ b/Documentation/devicetree/bindings/net/fsl-enetc.txt @@ -0,0 +1,69 @@ +* ENETC ethernet device tree bindings + +Depending on board design and ENETC port type (internal or +external) there are two supported link modes specified by +below device tree bindings. + +Required properties: + +- reg : Specifies PCIe Device Number and Function + Number of the ENETC endpoint device, according + to parent node bindings. +- compatible : Should be "fsl,enetc". + +1) The ENETC external port is connected to a MDIO configurable phy: + +In this case, the ENETC node should include a "mdio" sub-node +that in turn should contain the "ethernet-phy" node describing the +external phy. Below properties are required, their bindings +already defined in ethernet.txt or phy.txt, under +Documentation/devicetree/bindings/net/*. + +Required: + +- phy-handle : Phandle to a PHY on the MDIO bus. + Defined in ethernet.txt. + +- phy-connection-type : Defined in ethernet.txt. + +- mdio : "mdio" node, defined in mdio.txt. + +- ethernet-phy : "ethernet-phy" node, defined in phy.txt. + +Example: + + ethernet@0,0 { + compatible = "fsl,enetc"; + reg = <0x000000 0 0 0 0>; + phy-handle = <&sgmii_phy0>; + phy-connection-type = "sgmii"; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + sgmii_phy0: ethernet-phy@2 { + reg = <0x2>; + }; + }; + }; + +2) The ENETC port is an internal port or has a fixed-link external +connection: + +In this case, the ENETC port node defines a fixed link connection, +as specified by "fixed-link.txt", under +Documentation/devicetree/bindings/net/*. + +Required: + +- fixed-link : "fixed-link" node, defined in "fixed-link.txt". + +Example: + ethernet@0,2 { + compatible = "fsl,enetc"; + reg = <0x000200 0 0 0 0>; + fixed-link { + speed = <1000>; + full-duplex; + }; + }; diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt index 3e17ac1d5d58..174f292d8a3e 100644 --- a/Documentation/devicetree/bindings/net/macb.txt +++ b/Documentation/devicetree/bindings/net/macb.txt @@ -3,8 +3,8 @@ Required properties: - compatible: Should be "cdns,[-]{macb|gem}" Use "cdns,at91rm9200-emac" Atmel at91rm9200 SoC. - Use "cdns,at91sam9260-macb" for Atmel at91sam9 SoCs or the 10/100Mbit IP - available on sama5d3 SoCs. + Use "cdns,at91sam9260-macb" for Atmel at91sam9 SoCs. + Use "cdns,sam9x60-macb" for Microchip sam9x60 SoC. Use "cdns,np4-macb" for NP4 SoC devices. Use "cdns,at32ap7000-macb" for other 10/100 usage or use the generic form: "cdns,macb". Use "cdns,pc302-gem" for Picochip picoXcell pc302 and later devices based on diff --git a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt index bedcfd5a52cd..691f886cfc4a 100644 --- a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt +++ b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt @@ -19,7 +19,7 @@ Optional properties: "marvell,armada-370-neta" and 9800B for others. - clock-names: List of names corresponding to clocks property; shall be "core" for core clock and "bus" for the optional bus clock. - +- phys: comphy for the ethernet port, see ../phy/phy-bindings.txt Optional properties (valid only for Armada XP/38x): diff --git a/Documentation/devicetree/bindings/net/mdio-mux-multiplexer.txt b/Documentation/devicetree/bindings/net/mdio-mux-multiplexer.txt new file mode 100644 index 000000000000..534e38058fe0 --- /dev/null +++ b/Documentation/devicetree/bindings/net/mdio-mux-multiplexer.txt @@ -0,0 +1,82 @@ +Properties for an MDIO bus multiplexer consumer device + +This is a special case of MDIO mux when MDIO mux is defined as a consumer +of a mux producer device. The mux producer can be of any type like mmio mux +producer, gpio mux producer or generic register based mux producer. + +Required properties in addition to the MDIO Bus multiplexer properties: + +- compatible : should be "mmio-mux-multiplexer" +- mux-controls : mux controller node to use for operating the mux +- mdio-parent-bus : phandle to the parent MDIO bus. + +each child node of mdio bus multiplexer consumer device represent a mdio +bus. + +for more information please refer +Documentation/devicetree/bindings/mux/mux-controller.txt +and Documentation/devicetree/bindings/net/mdio-mux.txt + +Example: +In below example the Mux producer and consumer are separate nodes. + +&i2c0 { + fpga@66 { // fpga connected to i2c + compatible = "fsl,lx2160aqds-fpga", "fsl,fpga-qixis-i2c", + "simple-mfd"; + reg = <0x66>; + + mux: mux-controller { // Mux Producer + compatible = "reg-mux"; + #mux-control-cells = <1>; + mux-reg-masks = <0x54 0xf8>, /* 0: reg 0x54, bits 7:3 */ + <0x54 0x07>; /* 1: reg 0x54, bits 2:0 */ + }; + }; +}; + +mdio-mux-1 { // Mux consumer + compatible = "mdio-mux-multiplexer"; + mux-controls = <&mux 0>; + mdio-parent-bus = <&emdio1>; + #address-cells = <1>; + #size-cells = <0>; + + mdio@0 { + reg = <0x0>; + #address-cells = <1>; + #size-cells = <0>; + }; + + mdio@8 { + reg = <0x8>; + #address-cells = <1>; + #size-cells = <0>; + }; + + .. + .. +}; + +mdio-mux-2 { // Mux consumer + compatible = "mdio-mux-multiplexer"; + mux-controls = <&mux 1>; + mdio-parent-bus = <&emdio2>; + #address-cells = <1>; + #size-cells = <0>; + + mdio@0 { + reg = <0x0>; + #address-cells = <1>; + #size-cells = <0>; + }; + + mdio@1 { + reg = <0x1>; + #address-cells = <1>; + #size-cells = <0>; + }; + + .. + .. +}; diff --git a/Documentation/devicetree/bindings/net/mediatek-bluetooth.txt b/Documentation/devicetree/bindings/net/mediatek-bluetooth.txt index 14ceb2a5b4e8..41a7dcc80f5b 100644 --- a/Documentation/devicetree/bindings/net/mediatek-bluetooth.txt +++ b/Documentation/devicetree/bindings/net/mediatek-bluetooth.txt @@ -33,3 +33,67 @@ Example: clock-names = "ref"; }; }; + +MediaTek UART based Bluetooth Devices +================================== + +This device is a serial attached device to UART device and thus it must be a +child node of the serial node with UART. + +Please refer to the following documents for generic properties: + + Documentation/devicetree/bindings/serial/slave-device.txt + +Required properties: + +- compatible: Must be + "mediatek,mt7663u-bluetooth": for MT7663U device + "mediatek,mt7668u-bluetooth": for MT7668U device +- vcc-supply: Main voltage regulator +- pinctrl-names: Should be "default", "runtime" +- pinctrl-0: Should contain UART RXD low when the device is powered up to + enter proper bootstrap mode. +- pinctrl-1: Should contain UART mode pin ctrl + +Optional properties: + +- reset-gpios: GPIO used to reset the device whose initial state keeps low, + if the GPIO is missing, then board-level design should be + guaranteed. +- current-speed: Current baud rate of the device whose defaults to 921600 + +Example: + + uart1_pins_boot: uart1-default { + pins-dat { + pinmux = ; + output-low; + }; + }; + + uart1_pins_runtime: uart1-runtime { + pins-dat { + pinmux = , + ; + }; + }; + + uart1: serial@11003000 { + compatible = "mediatek,mt7623-uart", + "mediatek,mt6577-uart"; + reg = <0 0x11003000 0 0x400>; + interrupts = ; + clocks = <&pericfg CLK_PERI_UART1_SEL>, + <&pericfg CLK_PERI_UART1>; + clock-names = "baud", "bus"; + + bluetooth { + compatible = "mediatek,mt7663u-bluetooth"; + vcc-supply = <®_5v>; + reset-gpios = <&pio 24 GPIO_ACTIVE_LOW>; + pinctrl-names = "default", "runtime"; + pinctrl-0 = <&uart1_pins_boot>; + pinctrl-1 = <&uart1_pins_runtime>; + current-speed = <921600>; + }; + }; diff --git a/Documentation/devicetree/bindings/net/nixge.txt b/Documentation/devicetree/bindings/net/nixge.txt index e55af7f0881a..85d7240a9b20 100644 --- a/Documentation/devicetree/bindings/net/nixge.txt +++ b/Documentation/devicetree/bindings/net/nixge.txt @@ -1,16 +1,52 @@ * NI XGE Ethernet controller Required properties: -- compatible: Should be "ni,xge-enet-2.00" -- reg: Address and length of the register set for the device +- compatible: Should be "ni,xge-enet-3.00", but can be "ni,xge-enet-2.00" for + older device trees with DMA engines co-located in the address map, + with the one reg entry to describe the whole device. +- reg: Address and length of the register set for the device. It contains the + information of registers in the same order as described by reg-names. +- reg-names: Should contain the reg names + "dma": DMA engine control and status region + "ctrl": MDIO and PHY control and status region - interrupts: Should contain tx and rx interrupt - interrupt-names: Should be "rx" and "tx" - phy-mode: See ethernet.txt file in the same directory. -- phy-handle: See ethernet.txt file in the same directory. - nvmem-cells: Phandle of nvmem cell containing the MAC address - nvmem-cell-names: Should be "address" +Optional properties: +- mdio subnode to indicate presence of MDIO controller +- fixed-link : Assume a fixed link. See fixed-link.txt in the same directory. + Use instead of phy-handle. +- phy-handle: See ethernet.txt file in the same directory. + Examples (10G generic PHY): + nixge0: ethernet@40000000 { + compatible = "ni,xge-enet-3.00"; + reg = <0x40000000 0x4000 + 0x41002000 0x2000>; + reg-names = "dma", "ctrl"; + + nvmem-cells = <ð1_addr>; + nvmem-cell-names = "address"; + + interrupts = <0 29 IRQ_TYPE_LEVEL_HIGH>, <0 30 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "rx", "tx"; + interrupt-parent = <&intc>; + + phy-mode = "xgmii"; + phy-handle = <ðernet_phy1>; + + mdio { + ethernet_phy1: ethernet-phy@4 { + compatible = "ethernet-phy-ieee802.3-c45"; + reg = <4>; + }; + }; + }; + +Examples (10G generic PHY, no MDIO): nixge0: ethernet@40000000 { compatible = "ni,xge-enet-2.00"; reg = <0x40000000 0x6000>; @@ -24,9 +60,33 @@ Examples (10G generic PHY): phy-mode = "xgmii"; phy-handle = <ðernet_phy1>; + }; + +Examples (1G generic fixed-link + MDIO): + nixge0: ethernet@40000000 { + compatible = "ni,xge-enet-2.00"; + reg = <0x40000000 0x6000>; - ethernet_phy1: ethernet-phy@4 { - compatible = "ethernet-phy-ieee802.3-c45"; - reg = <4>; + nvmem-cells = <ð1_addr>; + nvmem-cell-names = "address"; + + interrupts = <0 29 IRQ_TYPE_LEVEL_HIGH>, <0 30 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "rx", "tx"; + interrupt-parent = <&intc>; + + phy-mode = "xgmii"; + + fixed-link { + speed = <1000>; + pause; + link-gpios = <&gpio0 63 GPIO_ACTIVE_HIGH>; + }; + + mdio { + ethernet_phy1: ethernet-phy@4 { + compatible = "ethernet-phy-ieee802.3-c22"; + reg = <4>; + }; }; + }; diff --git a/Documentation/devicetree/bindings/net/qcom,ethqos.txt b/Documentation/devicetree/bindings/net/qcom,ethqos.txt new file mode 100644 index 000000000000..fcf5035810b5 --- /dev/null +++ b/Documentation/devicetree/bindings/net/qcom,ethqos.txt @@ -0,0 +1,64 @@ +Qualcomm Ethernet ETHQOS device + +This documents dwmmac based ethernet device which supports Gigabit +ethernet for version v2.3.0 onwards. + +This device has following properties: + +Required properties: + +- compatible: Should be qcom,qcs404-ethqos" + +- reg: Address and length of the register set for the device + +- reg-names: Should contain register names "stmmaceth", "rgmii" + +- clocks: Should contain phandle to clocks + +- clock-names: Should contain clock names "stmmaceth", "pclk", + "ptp_ref", "rgmii" + +- interrupts: Should contain phandle to interrupts + +- interrupt-names: Should contain interrupt names "macirq", "eth_lpi" + +Rest of the properties are defined in stmmac.txt file in same directory + + +Example: + +ethernet: ethernet@7a80000 { + compatible = "qcom,qcs404-ethqos"; + reg = <0x07a80000 0x10000>, + <0x07a96000 0x100>; + reg-names = "stmmaceth", "rgmii"; + clock-names = "stmmaceth", "pclk", "ptp_ref", "rgmii"; + clocks = <&gcc GCC_ETH_AXI_CLK>, + <&gcc GCC_ETH_SLAVE_AHB_CLK>, + <&gcc GCC_ETH_PTP_CLK>, + <&gcc GCC_ETH_RGMII_CLK>; + interrupts = , + ; + interrupt-names = "macirq", "eth_lpi"; + snps,reset-gpio = <&tlmm 60 GPIO_ACTIVE_LOW>; + snps,reset-active-low; + + snps,txpbl = <8>; + snps,rxpbl = <2>; + snps,aal; + snps,tso; + + phy-handle = <&phy1>; + phy-mode = "rgmii"; + + mdio { + #address-cells = <0x1>; + #size-cells = <0x0>; + compatible = "snps,dwmac-mdio"; + phy1: phy@4 { + device_type = "ethernet-phy"; + reg = <0x4>; + }; + }; + +}; diff --git a/Documentation/devicetree/bindings/net/stm32-dwmac.txt b/Documentation/devicetree/bindings/net/stm32-dwmac.txt index 1341012722aa..a90eef11dc46 100644 --- a/Documentation/devicetree/bindings/net/stm32-dwmac.txt +++ b/Documentation/devicetree/bindings/net/stm32-dwmac.txt @@ -14,8 +14,7 @@ Required properties: - clock-names: Should be "stmmaceth" for the host clock. Should be "mac-clk-tx" for the MAC TX clock. Should be "mac-clk-rx" for the MAC RX clock. - For MPU family need to add also "ethstp" for power mode clock and, - "syscfg-clk" for SYSCFG clock. + For MPU family need to add also "ethstp" for power mode clock - interrupt-names: Should contain a list of interrupt names corresponding to the interrupts in the interrupts property, if available. Should be "macirq" for the main MAC IRQ @@ -24,9 +23,9 @@ Required properties: encompases the glue register, and the offset of the control register. Optional properties: -- clock-names: For MPU family "mac-clk-ck" for PHY without quartz -- st,int-phyclk (boolean) : valid only where PHY do not have quartz and need to be clock - by RCC +- clock-names: For MPU family "eth-ck" for PHY without quartz +- st,eth-clk-sel (boolean) : set this property in RGMII PHY when you want to select RCC clock instead of ETH_CLK125. +- st,eth-ref-clk-sel (boolean) : set this property in RMII mode when you have PHY without crystal 50MHz and want to select RCC clock instead of ETH_REF_CLK. Example: diff --git a/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt b/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt index 0c17a0ec9b7b..7b9a776230c0 100644 --- a/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt +++ b/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt @@ -4,6 +4,13 @@ This node provides properties for configuring the MediaTek mt76xx wireless device. The node is expected to be specified as a child node of the PCI controller to which the wireless chip is connected. +Alternatively, it can specify the wireless part of the MT7628/MT7688 SoC. +For SoC, use the compatible string "mediatek,mt7628-wmac" and the following +properties: + +- reg: Address and length of the register set for the device. +- interrupts: Main device interrupt + Optional properties: - mac-address: See ethernet.txt in the parent directory @@ -30,3 +37,15 @@ Optional nodes: }; }; }; + +MT7628 example: + +wmac: wmac@10300000 { + compatible = "mediatek,mt7628-wmac"; + reg = <0x10300000 0x100000>; + + interrupt-parent = <&cpuintc>; + interrupts = <6>; + + mediatek,mtd-eeprom = <&factory 0x0000>; +}; diff --git a/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt b/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt index 792bc5fafeb9..7a999a135e56 100644 --- a/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt +++ b/Documentation/devicetree/bindings/nvmem/imx-ocotp.txt @@ -1,7 +1,7 @@ Freescale i.MX6 On-Chip OTP Controller (OCOTP) device tree bindings This binding represents the on-chip eFuse OTP controller found on -i.MX6Q/D, i.MX6DL/S, i.MX6SL, i.MX6SX, i.MX6UL and i.MX6SLL SoCs. +i.MX6Q/D, i.MX6DL/S, i.MX6SL, i.MX6SX, i.MX6UL, i.MX6ULL/ULZ and i.MX6SLL SoCs. Required properties: - compatible: should be one of @@ -9,8 +9,10 @@ Required properties: "fsl,imx6sl-ocotp" (i.MX6SL), or "fsl,imx6sx-ocotp" (i.MX6SX), "fsl,imx6ul-ocotp" (i.MX6UL), + "fsl,imx6ull-ocotp" (i.MX6ULL/ULZ), "fsl,imx7d-ocotp" (i.MX7D/S), "fsl,imx6sll-ocotp" (i.MX6SLL), + "fsl,imx7ulp-ocotp" (i.MX7ULP), followed by "syscon". - #address-cells : Should be 1 - #size-cells : Should be 1 diff --git a/Documentation/devicetree/bindings/nvmem/xlnx,zynqmp-nvmem.txt b/Documentation/devicetree/bindings/nvmem/xlnx,zynqmp-nvmem.txt new file mode 100644 index 000000000000..4881561b3a02 --- /dev/null +++ b/Documentation/devicetree/bindings/nvmem/xlnx,zynqmp-nvmem.txt @@ -0,0 +1,46 @@ +-------------------------------------------------------------------------- += Zynq UltraScale+ MPSoC nvmem firmware driver binding = +-------------------------------------------------------------------------- +The nvmem_firmware node provides access to the hardware related data +like soc revision, IDCODE... etc, By using the firmware interface. + +Required properties: +- compatible: should be "xlnx,zynqmp-nvmem-fw" + += Data cells = +Are child nodes of silicon id, bindings of which as described in +bindings/nvmem/nvmem.txt + +------- + Example +------- +firmware { + zynqmp_firmware: zynqmp-firmware { + compatible = "xlnx,zynqmp-firmware"; + method = "smc"; + + nvmem_firmware { + compatible = "xlnx,zynqmp-nvmem-fw"; + #address-cells = <1>; + #size-cells = <1>; + + /* Data cells */ + soc_revision: soc_revision { + reg = <0x0 0x4>; + }; + }; + }; +}; + += Data consumers = +Are device nodes which consume nvmem data cells. + +For example: + pcap { + ... + + nvmem-cells = <&soc_revision>; + nvmem-cell-names = "soc_revision"; + + ... + }; diff --git a/Documentation/devicetree/bindings/opp/opp.txt b/Documentation/devicetree/bindings/opp/opp.txt index c396c4c0af92..76b6c79604a5 100644 --- a/Documentation/devicetree/bindings/opp/opp.txt +++ b/Documentation/devicetree/bindings/opp/opp.txt @@ -129,6 +129,9 @@ Optional properties: - opp-microamp-: Named opp-microamp property. Similar to opp-microvolt- property, but for microamp instead. +- opp-level: A value representing the performance level of the device, + expressed as a 32-bit integer. + - clock-latency-ns: Specifies the maximum possible transition latency (in nanoseconds) for switching to this OPP from any other OPP. diff --git a/Documentation/devicetree/bindings/pci/altera-pcie.txt b/Documentation/devicetree/bindings/pci/altera-pcie.txt index 6c396f17c91a..816b244a221e 100644 --- a/Documentation/devicetree/bindings/pci/altera-pcie.txt +++ b/Documentation/devicetree/bindings/pci/altera-pcie.txt @@ -1,11 +1,13 @@ * Altera PCIe controller Required properties: -- compatible : should contain "altr,pcie-root-port-1.0" +- compatible : should contain "altr,pcie-root-port-1.0" or "altr,pcie-root-port-2.0" - reg: a list of physical base address and length for TXS and CRA. + For "altr,pcie-root-port-2.0", additional HIP base address and length. - reg-names: must include the following entries: "Txs": TX slave port region "Cra": Control register access region + "Hip": Hard IP region (if "altr,pcie-root-port-2.0") - interrupts: specifies the interrupt source of the parent interrupt controller. The format of the interrupt specifier depends on the parent interrupt controller. diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt index d514c1f2365f..a7f5f5afa0e6 100644 --- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt +++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt @@ -9,6 +9,7 @@ Required properties: - "fsl,imx6sx-pcie", - "fsl,imx6qp-pcie" - "fsl,imx7d-pcie" + - "fsl,imx8mq-pcie" - reg: base address and length of the PCIe controller - interrupts: A list of interrupt outputs of the controller. Must contain an entry for each entry in the interrupt-names property. @@ -45,7 +46,7 @@ Additional required properties for imx6sx-pcie: PCIE_PHY power domains - power-domain-names: Must be "pcie", "pcie_phy" -Additional required properties for imx7d-pcie: +Additional required properties for imx7d-pcie and imx8mq-pcie: - power-domains: Must be set to a phandle pointing to PCIE_PHY power domain - resets: Must contain phandles to PCIe-related reset lines exposed by SRC IP block @@ -53,6 +54,11 @@ Additional required properties for imx7d-pcie: - "pciephy" - "apps" - "turnoff" +- fsl,imx7d-pcie-phy: A phandle to an fsl,imx7d-pcie-phy node. + +Additional required properties for imx8mq-pcie: +- clock-names: Must include the following additional entries: + - "pcie_aux" Example: @@ -79,3 +85,13 @@ Example: clocks = <&clks 144>, <&clks 206>, <&clks 189>; clock-names = "pcie", "pcie_bus", "pcie_phy"; }; + +* Freescale i.MX7d PCIe PHY + +This is the PHY associated with the IMX7d PCIe controller. It's used by the +PCI-e controller via the fsl,imx7d-pcie-phy phandle. + +Required properties: +- compatible: + - "fsl,imx7d-pcie-phy" +- reg: base address and length of the PCIe PHY controller diff --git a/Documentation/devicetree/bindings/pci/layerscape-pci.txt b/Documentation/devicetree/bindings/pci/layerscape-pci.txt index 9b2b8d66d1f4..e20ceaab9b38 100644 --- a/Documentation/devicetree/bindings/pci/layerscape-pci.txt +++ b/Documentation/devicetree/bindings/pci/layerscape-pci.txt @@ -13,6 +13,7 @@ information. Required properties: - compatible: should contain the platform identifier such as: + RC mode: "fsl,ls1021a-pcie" "fsl,ls2080a-pcie", "fsl,ls2085a-pcie" "fsl,ls2088a-pcie" @@ -20,6 +21,8 @@ Required properties: "fsl,ls1046a-pcie" "fsl,ls1043a-pcie" "fsl,ls1012a-pcie" + EP mode: + "fsl,ls1046a-pcie-ep", "fsl,ls-pcie-ep" - reg: base addresses and lengths of the PCIe controller register blocks. - interrupts: A list of interrupt outputs of the controller. Must contain an entry for each entry in the interrupt-names property. diff --git a/Documentation/devicetree/bindings/pci/rcar-pci.txt b/Documentation/devicetree/bindings/pci/rcar-pci.txt index 976ef7bfff93..6904882a0e94 100644 --- a/Documentation/devicetree/bindings/pci/rcar-pci.txt +++ b/Documentation/devicetree/bindings/pci/rcar-pci.txt @@ -3,6 +3,7 @@ Required properties: compatible: "renesas,pcie-r8a7743" for the R8A7743 SoC; "renesas,pcie-r8a7744" for the R8A7744 SoC; + "renesas,pcie-r8a774c0" for the R8A774C0 SoC; "renesas,pcie-r8a7779" for the R8A7779 SoC; "renesas,pcie-r8a7790" for the R8A7790 SoC; "renesas,pcie-r8a7791" for the R8A7791 SoC; @@ -13,7 +14,8 @@ compatible: "renesas,pcie-r8a7743" for the R8A7743 SoC; "renesas,pcie-r8a77990" for the R8A77990 SoC; "renesas,pcie-rcar-gen2" for a generic R-Car Gen2 or RZ/G1 compatible device. - "renesas,pcie-rcar-gen3" for a generic R-Car Gen3 compatible device. + "renesas,pcie-rcar-gen3" for a generic R-Car Gen3 or + RZ/G2 compatible device. When compatible with the generic version, nodes must list the SoC-specific version corresponding to the platform first diff --git a/Documentation/devicetree/bindings/pci/ti-pci.txt b/Documentation/devicetree/bindings/pci/ti-pci.txt index 452fe48c4fdd..d5cbfe6b0d89 100644 --- a/Documentation/devicetree/bindings/pci/ti-pci.txt +++ b/Documentation/devicetree/bindings/pci/ti-pci.txt @@ -1,14 +1,21 @@ TI PCI Controllers PCIe DesignWare Controller - - compatible: Should be "ti,dra7-pcie" for RC - Should be "ti,dra7-pcie-ep" for EP + - compatible: Should be "ti,dra7-pcie" for RC (deprecated) + Should be "ti,dra7-pcie-ep" for EP (deprecated) + Should be "ti,dra746-pcie-rc" for dra74x/dra76 in RC mode + Should be "ti,dra746-pcie-ep" for dra74x/dra76 in EP mode + Should be "ti,dra726-pcie-rc" for dra72x in RC mode + Should be "ti,dra726-pcie-ep" for dra72x in EP mode - phys : list of PHY specifiers (used by generic PHY framework) - phy-names : must be "pcie-phy0", "pcie-phy1", "pcie-phyN".. based on the number of PHYs as specified in *phys* property. - ti,hwmods : Name of the hwmod associated to the pcie, "pcie", where is the instance number of the pcie from the HW spec. - num-lanes as specified in ../designware-pcie.txt + - ti,syscon-lane-sel : phandle/offset pair. Phandle to the system control + module and the register offset to specify lane + selection. HOST MODE ========= diff --git a/Documentation/devicetree/bindings/phy/cdns,dphy.txt b/Documentation/devicetree/bindings/phy/cdns,dphy.txt new file mode 100644 index 000000000000..1095bc4e72d9 --- /dev/null +++ b/Documentation/devicetree/bindings/phy/cdns,dphy.txt @@ -0,0 +1,20 @@ +Cadence DPHY +============ + +Cadence DPHY block. + +Required properties: +- compatible: should be set to "cdns,dphy". +- reg: physical base address and length of the DPHY registers. +- clocks: DPHY reference clocks. +- clock-names: must contain "psm" and "pll_ref". +- #phy-cells: must be set to 0. + +Example: + dphy0: dphy@fd0e0000{ + compatible = "cdns,dphy"; + reg = <0x0 0xfd0e0000 0x0 0x1000>; + clocks = <&psm_clk>, <&pll_ref_clk>; + clock-names = "psm", "pll_ref"; + #phy-cells = <0>; + }; diff --git a/Documentation/devicetree/bindings/phy/phy-armada38x-comphy.txt b/Documentation/devicetree/bindings/phy/phy-armada38x-comphy.txt new file mode 100644 index 000000000000..ad49e5c01334 --- /dev/null +++ b/Documentation/devicetree/bindings/phy/phy-armada38x-comphy.txt @@ -0,0 +1,40 @@ +mvebu armada 38x comphy driver +------------------------------ + +This comphy controller can be found on Marvell Armada 38x. It provides a +number of shared PHYs used by various interfaces (network, sata, usb, +PCIe...). + +Required properties: + +- compatible: should be "marvell,armada-380-comphy" +- reg: should contain the comphy register location and length. +- #address-cells: should be 1. +- #size-cells: should be 0. + +A sub-node is required for each comphy lane provided by the comphy. + +Required properties (child nodes): + +- reg: comphy lane number. +- #phy-cells : from the generic phy bindings, must be 1. Defines the + input port to use for a given comphy lane. + +Example: + + comphy: phy@18300 { + compatible = "marvell,armada-380-comphy"; + reg = <0x18300 0x100>; + #address-cells = <1>; + #size-cells = <0>; + + cpm_comphy0: phy@0 { + reg = <0>; + #phy-cells = <1>; + }; + + cpm_comphy1: phy@1 { + reg = <1>; + #phy-cells = <1>; + }; + }; diff --git a/Documentation/devicetree/bindings/phy/phy-mvebu-comphy.txt b/Documentation/devicetree/bindings/phy/phy-mvebu-comphy.txt index bfcf80341657..cf2cd86db267 100644 --- a/Documentation/devicetree/bindings/phy/phy-mvebu-comphy.txt +++ b/Documentation/devicetree/bindings/phy/phy-mvebu-comphy.txt @@ -1,16 +1,27 @@ -mvebu comphy driver -------------------- +MVEBU comphy drivers +-------------------- -A comphy controller can be found on Marvell Armada 7k/8k on the CP110. It -provides a number of shared PHYs used by various interfaces (network, sata, -usb, PCIe...). +COMPHY controllers can be found on the following Marvell MVEBU SoCs: +* Armada 7k/8k (on the CP110) +* Armada 3700 +It provides a number of shared PHYs used by various interfaces (network, SATA, +USB, PCIe...). Required properties: -- compatible: should be "marvell,comphy-cp110" -- reg: should contain the comphy register location and length. -- marvell,system-controller: should contain a phandle to the - system controller node. +- compatible: should be one of: + * "marvell,comphy-cp110" for Armada 7k/8k + * "marvell,comphy-a3700" for Armada 3700 +- reg: should contain the COMPHY register(s) location(s) and length(s). + * 1 entry for Armada 7k/8k + * 4 entries for Armada 3700 along with the corresponding reg-names + properties, memory areas are: + * Generic COMPHY registers + * Lane 1 (PCIe/GbE) + * Lane 0 (USB3/GbE) + * Lane 2 (SATA/USB3) +- marvell,system-controller: should contain a phandle to the system + controller node (only for Armada 7k/8k) - #address-cells: should be 1. - #size-cells: should be 0. @@ -18,11 +29,11 @@ A sub-node is required for each comphy lane provided by the comphy. Required properties (child nodes): -- reg: comphy lane number. -- #phy-cells : from the generic phy bindings, must be 1. Defines the +- reg: COMPHY lane number. +- #phy-cells : from the generic PHY bindings, must be 1. Defines the input port to use for a given comphy lane. -Example: +Examples: cpm_comphy: phy@120000 { compatible = "marvell,comphy-cp110"; @@ -41,3 +52,33 @@ Example: #phy-cells = <1>; }; }; + + comphy: phy@18300 { + compatible = "marvell,comphy-a3700"; + reg = <0x18300 0x300>, + <0x1F000 0x400>, + <0x5C000 0x400>, + <0xe0178 0x8>; + reg-names = "comphy", + "lane1_pcie_gbe", + "lane0_usb3_gbe", + "lane2_sata_usb3"; + #address-cells = <1>; + #size-cells = <0>; + + + comphy0: phy@0 { + reg = <0>; + #phy-cells = <1>; + }; + + comphy1: phy@1 { + reg = <1>; + #phy-cells = <1>; + }; + + comphy2: phy@2 { + reg = <2>; + #phy-cells = <1>; + }; + }; diff --git a/Documentation/devicetree/bindings/phy/phy-mvebu-utmi.txt b/Documentation/devicetree/bindings/phy/phy-mvebu-utmi.txt new file mode 100644 index 000000000000..aa99ceec73b0 --- /dev/null +++ b/Documentation/devicetree/bindings/phy/phy-mvebu-utmi.txt @@ -0,0 +1,38 @@ +MVEBU A3700 UTMI PHY +-------------------- + +USB2 UTMI+ PHY controllers can be found on the following Marvell MVEBU SoCs: +* Armada 3700 + +On Armada 3700, there are two USB controllers, one is compatible with the USB2 +and USB3 specifications and supports OTG. The other one is USB2 compliant and +only supports host mode. Both of these controllers come with a slightly +different UTMI PHY. + +Required Properties: + +- compatible: Should be one of: + * "marvell,a3700-utmi-host-phy" for the PHY connected to + the USB2 host-only controller. + * "marvell,a3700-utmi-otg-phy" for the PHY connected to + the USB3 and USB2 OTG capable controller. +- reg: PHY IP register range. +- marvell,usb-misc-reg: handle on the "USB miscellaneous registers" shared + region covering registers related to both the host + controller and the PHY. +- #phy-cells: Standard property (Documentation: phy-bindings.txt) Should be 0. + + +Example: + + usb2_utmi_host_phy: phy@5f000 { + compatible = "marvell,armada-3700-utmi-host-phy"; + reg = <0x5f000 0x800>; + marvell,usb-misc-reg = <&usb2_syscon>; + #phy-cells = <0>; + }; + + usb2_syscon: system-controller@5f800 { + compatible = "marvell,armada-3700-usb2-host-misc", "syscon"; + reg = <0x5f800 0x800>; + }; diff --git a/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt b/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt index 074a7b3b0425..00639baae74a 100644 --- a/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt +++ b/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt @@ -23,6 +23,8 @@ Optional properties: register files". When set driver will request its phandle as one companion-grf for some special SoCs (e.g RV1108). + - extcon : phandle to the extcon device providing the cable state for + the otg phy. Required nodes : a sub-node is required for each port the phy provides. The sub-node name is used to identify host or otg port, diff --git a/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt b/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt index 41a1074228ba..5d181fc3cc18 100644 --- a/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt +++ b/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt @@ -9,6 +9,8 @@ Required properties: "qcom,ipq8074-qmp-pcie-phy" for PCIe phy on IPQ8074 "qcom,msm8996-qmp-pcie-phy" for 14nm PCIe phy on msm8996, "qcom,msm8996-qmp-usb3-phy" for 14nm USB3 phy on msm8996, + "qcom,msm8998-qmp-usb3-phy" for USB3 QMP V3 phy on msm8998, + "qcom,msm8998-qmp-ufs-phy" for UFS QMP phy on msm8998, "qcom,sdm845-qmp-usb3-phy" for USB3 QMP V3 phy on sdm845, "qcom,sdm845-qmp-usb3-uni-phy" for USB3 QMP V3 UNI phy on sdm845, "qcom,sdm845-qmp-ufs-phy" for UFS QMP phy on sdm845. @@ -42,6 +44,10 @@ Required properties: "aux", "cfg_ahb", "ref". For "qcom,msm8996-qmp-usb3-phy" must contain: "aux", "cfg_ahb", "ref". + For "qcom,msm8998-qmp-usb3-phy" must contain: + "aux", "cfg_ahb", "ref". + For "qcom,msm8998-qmp-ufs-phy" must contain: + "ref", "ref_aux". For "qcom,sdm845-qmp-usb3-phy" must contain: "aux", "cfg_ahb", "ref", "com_aux". For "qcom,sdm845-qmp-usb3-uni-phy" must contain: @@ -61,6 +67,9 @@ Required properties: "phy", "common", "cfg". For "qcom,msm8996-qmp-usb3-phy" must contain "phy", "common". + For "qcom,msm8998-qmp-usb3-phy" must contain + "phy", "common". + For "qcom,msm8998-qmp-ufs-phy": no resets are listed. For "qcom,sdm845-qmp-usb3-phy" must contain: "phy", "common". For "qcom,sdm845-qmp-usb3-uni-phy" must contain: diff --git a/Documentation/devicetree/bindings/phy/qcom-qusb2-phy.txt b/Documentation/devicetree/bindings/phy/qcom-qusb2-phy.txt index 03025d97998b..fe29f9e0af6d 100644 --- a/Documentation/devicetree/bindings/phy/qcom-qusb2-phy.txt +++ b/Documentation/devicetree/bindings/phy/qcom-qusb2-phy.txt @@ -6,6 +6,7 @@ QUSB2 controller supports LS/FS/HS usb connectivity on Qualcomm chipsets. Required properties: - compatible: compatible list, contains "qcom,msm8996-qusb2-phy" for 14nm PHY on msm8996, + "qcom,msm8998-qusb2-phy" for 10nm PHY on msm8998, "qcom,sdm845-qusb2-phy" for 10nm PHY on sdm845. - reg: offset and length of the PHY register set. diff --git a/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt b/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt index de7b5393c163..ad9c290d8f15 100644 --- a/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt +++ b/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt @@ -5,6 +5,8 @@ This file provides information on what the device node for the R-Car generation Required properties: - compatible: "renesas,usb2-phy-r8a774a1" if the device is a part of an R8A774A1 + SoC. + "renesas,usb2-phy-r8a774c0" if the device is a part of an R8A774C0 SoC. "renesas,usb2-phy-r8a7795" if the device is a part of an R8A7795 SoC. diff --git a/Documentation/devicetree/bindings/phy/ti-phy.txt b/Documentation/devicetree/bindings/phy/ti-phy.txt index 57dfda8a7a1d..8f93c3b694a7 100644 --- a/Documentation/devicetree/bindings/phy/ti-phy.txt +++ b/Documentation/devicetree/bindings/phy/ti-phy.txt @@ -35,6 +35,7 @@ Required properties: DRA7x Should be "ti,dra7x-usb2-phy2" for the 2nd instance of USB2 PHY in DRA7x + Should be "ti,am654-usb2" for the USB2 PHYs on AM654. - reg : Address and length of the register set for the device. - #phy-cells: determine the number of cells that should be given in the phandle while referencing this phy. diff --git a/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt index 3e23fece99da..eb39f5051159 100644 --- a/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt @@ -19,7 +19,7 @@ such as pull-up, multi drive, etc. Required properties for iomux controller: - compatible: "atmel,at91rm9200-pinctrl" or "atmel,at91sam9x5-pinctrl" - or "atmel,sama5d3-pinctrl" + or "atmel,sama5d3-pinctrl" or "microchip,sam9x60-pinctrl" - atmel,mux-mask: array of mask (periph per bank) to describe if a pin can be configured in this periph mode. All the periph and bank need to be describe. @@ -100,6 +100,7 @@ DRIVE_STRENGTH (3 << 5): indicate the drive strength of the pin using the 11 - High OUTPUT (1 << 7): indicate this pin need to be configured as an output. OUTPUT_VAL (1 << 8): output val (1 = high, 0 = low) +SLEWRATE (1 << 9): slew rate of the pin: 0 = disable, 1 = enable DEBOUNCE (1 << 16): indicate this pin needs debounce. DEBOUNCE_VAL (0x3fff << 17): debounce value. @@ -116,6 +117,19 @@ Some requirements for using atmel,at91rm9200-pinctrl binding: configurations by referring to the phandle of that pin configuration node. 4. The gpio controller must be describe in the pinctrl simple-bus. +For each bank the required properties are: +- compatible: "atmel,at91sam9x5-gpio" or "atmel,at91rm9200-gpio" or + "microchip,sam9x60-gpio" +- reg: physical base address and length of the controller's registers +- interrupts: interrupt outputs from the controller +- interrupt-controller: marks the device node as an interrupt controller +- #interrupt-cells: should be 2; refer to ../interrupt-controller/interrupts.txt + for more details. +- gpio-controller +- #gpio-cells: should be 2; the first cell is the GPIO number and the second + cell specifies GPIO flags as defined in . +- clocks: bank clock + Examples: pinctrl@fffff400 { @@ -125,6 +139,17 @@ pinctrl@fffff400 { compatible = "atmel,at91rm9200-pinctrl", "simple-bus"; reg = <0xfffff400 0x600>; + pioA: gpio@fffff400 { + compatible = "atmel,at91sam9x5-gpio"; + reg = <0xfffff400 0x200>; + interrupts = <2 IRQ_TYPE_LEVEL_HIGH 1>; + #gpio-cells = <2>; + gpio-controller; + interrupt-controller; + #interrupt-cells = <2>; + clocks = <&pmc PMC_TYPE_PERIPHERAL 2>; + }; + atmel,mux-mask = < /* A B */ 0xffffffff 0xffc00c3b /* pioA */ diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx50-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/fsl,imx50-pinctrl.txt new file mode 100644 index 000000000000..6da01d619d33 --- /dev/null +++ b/Documentation/devicetree/bindings/pinctrl/fsl,imx50-pinctrl.txt @@ -0,0 +1,32 @@ +* Freescale IMX50 IOMUX Controller + +Please refer to fsl,imx-pinctrl.txt in this directory for common binding part +and usage. + +Required properties: +- compatible: "fsl,imx50-iomuxc" +- fsl,pins: two integers array, represents a group of pins mux and config + setting. The format is fsl,pins = , PIN_FUNC_ID is a + pin working on a specific function, CONFIG is the pad setting value like + pull-up for this pin. Please refer to imx50 datasheet for the valid pad + config settings. + +CONFIG bits definition: +PAD_CTL_HVE (1 << 13) +PAD_CTL_HYS (1 << 8) +PAD_CTL_PKE (1 << 7) +PAD_CTL_PUE (1 << 6) +PAD_CTL_PUS_100K_DOWN (0 << 4) +PAD_CTL_PUS_47K_UP (1 << 4) +PAD_CTL_PUS_100K_UP (2 << 4) +PAD_CTL_PUS_22K_UP (3 << 4) +PAD_CTL_ODE (1 << 3) +PAD_CTL_DSE_LOW (0 << 1) +PAD_CTL_DSE_MED (1 << 1) +PAD_CTL_DSE_HIGH (2 << 1) +PAD_CTL_DSE_MAX (3 << 1) +PAD_CTL_SRE_FAST (1 << 0) +PAD_CTL_SRE_SLOW (0 << 0) + +Refer to imx50-pinfunc.h in device tree source folder for all available +imx50 PIN_FUNC_ID. diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx8mm-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/fsl,imx8mm-pinctrl.txt new file mode 100644 index 000000000000..524a16fca666 --- /dev/null +++ b/Documentation/devicetree/bindings/pinctrl/fsl,imx8mm-pinctrl.txt @@ -0,0 +1,36 @@ +* Freescale IMX8MM IOMUX Controller + +Please refer to fsl,imx-pinctrl.txt and pinctrl-bindings.txt in this directory +for common binding part and usage. + +Required properties: +- compatible: "fsl,imx8mm-iomuxc" +- reg: should contain the base physical address and size of the iomuxc + registers. + +Required properties in sub-nodes: +- fsl,pins: each entry consists of 6 integers and represents the mux and config + setting for one pin. The first 5 integers are specified using a PIN_FUNC_ID macro, which can be found in + . The last integer CONFIG is + the pad setting value like pull-up on this pin. Please refer to i.MX8M Mini + Reference Manual for detailed CONFIG settings. + +Examples: + +&uart1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart1>; +}; + +iomuxc: pinctrl@30330000 { + compatible = "fsl,imx8mm-iomuxc"; + reg = <0x0 0x30330000 0x0 0x10000>; + + pinctrl_uart1: uart1grp { + fsl,pins = < + MX8MM_IOMUXC_UART1_RXD_UART1_DCE_RX 0x140 + MX8MM_IOMUXC_UART1_TXD_UART1_DCE_TX 0x140 + >; + }; +}; diff --git a/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt index c7c088d2dd50..38dc56a57760 100644 --- a/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt @@ -58,11 +58,11 @@ group pwm3 - functions pwm, gpio group pmic1 - - pin 17 + - pin 7 - functions pmic, gpio group pmic0 - - pin 16 + - pin 6 - functions pmic, gpio group i2c2 @@ -112,19 +112,31 @@ group usb2_drvvbus1 - functions drvbus, gpio group sdio_sb - - pins 60-64 + - pins 60-65 - functions sdio, gpio group rgmii - - pins 42-55 + - pins 42-53 - functions mii, gpio group pcie1 - - pins 39-40 + - pins 39 + - functions pcie, gpio + +group pcie1_clkreq + - pins 40 - functions pcie, gpio +group pcie1_wakeup + - pins 41 + - functions pcie, gpio + +group smi + - pins 54-55 + - functions smi, gpio + group ptp - - pins 56-58 + - pins 56 - functions ptp, gpio group ptp_clk diff --git a/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt index 82ead40311f6..a47dd990a8d3 100644 --- a/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt @@ -23,11 +23,11 @@ The GPIO bank for the controller is represented as a sub-node and it acts as a GPIO controller. Required properties for sub-nodes are: - - reg: should contain address and size for mux, pull-enable, pull and - gpio register sets - - reg-names: an array of strings describing the "reg" entries. Must - contain "mux", "pull" and "gpio". "pull-enable" is optional and - when it is missing the "pull" registers are used instead + - reg: should contain a list of address and size, one tuple for each entry + in reg-names. + - reg-names: an array of strings describing the "reg" entries. + Must contain "mux" and "gpio". + May contain "pull", "pull-enable" and "ds" when appropriate. - gpio-controller: identifies the node as a gpio controller - #gpio-cells: must be 2 diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt index 759aa1732e48..7f64a7e92c28 100644 --- a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt +++ b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt @@ -19,6 +19,7 @@ PMIC's from Qualcomm. "qcom,pm8998-gpio" "qcom,pma8084-gpio" "qcom,pmi8994-gpio" + "qcom,pmi8998-gpio" "qcom,pms405-gpio" And must contain either "qcom,spmi-gpio" or "qcom,ssbi-gpio" diff --git a/Documentation/devicetree/bindings/power/fsl,imx-gpcv2.txt b/Documentation/devicetree/bindings/power/fsl,imx-gpcv2.txt index 7c947a996df1..7c7e972aaa42 100644 --- a/Documentation/devicetree/bindings/power/fsl,imx-gpcv2.txt +++ b/Documentation/devicetree/bindings/power/fsl,imx-gpcv2.txt @@ -32,6 +32,9 @@ Required properties: Optional properties: - power-supply: Power supply used to power the domain +- clocks: a number of phandles to clocks that need to be enabled during + domain power-up sequencing to ensure reset propagation into devices + located inside this power domain Example: diff --git a/Documentation/devicetree/bindings/power/qcom,rpmpd.txt b/Documentation/devicetree/bindings/power/qcom,rpmpd.txt new file mode 100644 index 000000000000..980e5413d18f --- /dev/null +++ b/Documentation/devicetree/bindings/power/qcom,rpmpd.txt @@ -0,0 +1,145 @@ +Qualcomm RPM/RPMh Power domains + +For RPM/RPMh Power domains, we communicate a performance state to RPM/RPMh +which then translates it into a corresponding voltage on a rail + +Required Properties: + - compatible: Should be one of the following + * qcom,msm8996-rpmpd: RPM Power domain for the msm8996 family of SoC + * qcom,sdm845-rpmhpd: RPMh Power domain for the sdm845 family of SoC + - #power-domain-cells: number of cells in Power domain specifier + must be 1. + - operating-points-v2: Phandle to the OPP table for the Power domain. + Refer to Documentation/devicetree/bindings/power/power_domain.txt + and Documentation/devicetree/bindings/opp/opp.txt for more details + +Refer to for the level values for +various OPPs for different platforms as well as Power domain indexes + +Example: rpmh power domain controller and OPP table + +#include + +opp-level values specified in the OPP tables for RPMh power domains +should use the RPMH_REGULATOR_LEVEL_* constants from + + + rpmhpd: power-controller { + compatible = "qcom,sdm845-rpmhpd"; + #power-domain-cells = <1>; + operating-points-v2 = <&rpmhpd_opp_table>; + + rpmhpd_opp_table: opp-table { + compatible = "operating-points-v2"; + + rpmhpd_opp_ret: opp1 { + opp-level = ; + }; + + rpmhpd_opp_min_svs: opp2 { + opp-level = ; + }; + + rpmhpd_opp_low_svs: opp3 { + opp-level = ; + }; + + rpmhpd_opp_svs: opp4 { + opp-level = ; + }; + + rpmhpd_opp_svs_l1: opp5 { + opp-level = ; + }; + + rpmhpd_opp_nom: opp6 { + opp-level = ; + }; + + rpmhpd_opp_nom_l1: opp7 { + opp-level = ; + }; + + rpmhpd_opp_nom_l2: opp8 { + opp-level = ; + }; + + rpmhpd_opp_turbo: opp9 { + opp-level = ; + }; + + rpmhpd_opp_turbo_l1: opp10 { + opp-level = ; + }; + }; + }; + +Example: rpm power domain controller and OPP table + + rpmpd: power-controller { + compatible = "qcom,msm8996-rpmpd"; + #power-domain-cells = <1>; + operating-points-v2 = <&rpmpd_opp_table>; + + rpmpd_opp_table: opp-table { + compatible = "operating-points-v2"; + + rpmpd_opp_low: opp1 { + opp-level = <1>; + }; + + rpmpd_opp_ret: opp2 { + opp-level = <2>; + }; + + rpmpd_opp_svs: opp3 { + opp-level = <3>; + }; + + rpmpd_opp_normal: opp4 { + opp-level = <4>; + }; + + rpmpd_opp_high: opp5 { + opp-level = <5>; + }; + + rpmpd_opp_turbo: opp6 { + opp-level = <6>; + }; + }; + }; + +Example: Client/Consumer device using OPP table + + leaky-device0@12350000 { + compatible = "foo,i-leak-current"; + reg = <0x12350000 0x1000>; + power-domains = <&rpmhpd SDM845_MX>; + operating-points-v2 = <&leaky_opp_table>; + }; + + + leaky_opp_table: opp-table { + compatible = "operating-points-v2"; + + opp1 { + opp-hz = /bits/ 64 <144000>; + required-opps = <&rpmhpd_opp_low>; + }; + + opp2 { + opp-hz = /bits/ 64 <400000>; + required-opps = <&rpmhpd_opp_ret>; + }; + + opp3 { + opp-hz = /bits/ 64 <20000000>; + required-opps = <&rpmpd_opp_svs>; + }; + + opp4 { + opp-hz = /bits/ 64 <25000000>; + required-opps = <&rpmpd_opp_normal>; + }; + }; diff --git a/Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.txt b/Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.txt new file mode 100644 index 000000000000..d366f1eb623a --- /dev/null +++ b/Documentation/devicetree/bindings/power/reset/xlnx,zynqmp-power.txt @@ -0,0 +1,25 @@ +-------------------------------------------------------------------- +Device Tree Bindings for the Xilinx Zynq MPSoC Power Management +-------------------------------------------------------------------- +The zynqmp-power node describes the power management configurations. +It will control remote suspend/shutdown interfaces. + +Required properties: + - compatible: Must contain: "xlnx,zynqmp-power" + - interrupts: Interrupt specifier + +------- +Example +------- + +firmware { + zynqmp_firmware: zynqmp-firmware { + compatible = "xlnx,zynqmp-firmware"; + method = "smc"; + + zynqmp_power: zynqmp-power { + compatible = "xlnx,zynqmp-power"; + interrupts = <0 35 4>; + }; + }; +}; diff --git a/Documentation/devicetree/bindings/power/supply/battery.txt b/Documentation/devicetree/bindings/power/supply/battery.txt index 89871ab8c704..5c913d4cf36c 100644 --- a/Documentation/devicetree/bindings/power/supply/battery.txt +++ b/Documentation/devicetree/bindings/power/supply/battery.txt @@ -16,6 +16,7 @@ Required Properties: Optional Properties: - voltage-min-design-microvolt: drained battery voltage + - voltage-max-design-microvolt: fully charged battery voltage - energy-full-design-microwatt-hours: battery design energy - charge-full-design-microamp-hours: battery design capacity - precharge-current-microamp: current for pre-charge phase @@ -48,6 +49,7 @@ Example: bat: battery { compatible = "simple-battery"; voltage-min-design-microvolt = <3200000>; + voltage-max-design-microvolt = <4200000>; energy-full-design-microwatt-hours = <5290000>; charge-full-design-microamp-hours = <1430000>; precharge-current-microamp = <256000>; diff --git a/Documentation/devicetree/bindings/power/supply/sc27xx-fg.txt b/Documentation/devicetree/bindings/power/supply/sc27xx-fg.txt index fc35ac577401..0a5705b8b592 100644 --- a/Documentation/devicetree/bindings/power/supply/sc27xx-fg.txt +++ b/Documentation/devicetree/bindings/power/supply/sc27xx-fg.txt @@ -9,8 +9,8 @@ Required properties: "sprd,sc2731-fgu". - reg: The address offset of fuel gauge unit. - battery-detect-gpios: GPIO for battery detection. -- io-channels: Specify the IIO ADC channel to get temperature. -- io-channel-names: Should be "bat-temp". +- io-channels: Specify the IIO ADC channels to get temperature and charge voltage. +- io-channel-names: Should be "bat-temp" or "charge-vol". - nvmem-cells: A phandle to the calibration cells provided by eFuse device. - nvmem-cell-names: Should be "fgu_calib". - monitored-battery: Phandle of battery characteristics devicetree node. @@ -47,8 +47,8 @@ Example: compatible = "sprd,sc2731-fgu"; reg = <0xa00>; battery-detect-gpios = <&pmic_eic 9 GPIO_ACTIVE_HIGH>; - io-channels = <&pmic_adc 5>; - io-channel-names = "bat-temp"; + io-channels = <&pmic_adc 5>, <&pmic_adc 14>; + io-channel-names = "bat-temp", "charge-vol"; nvmem-cells = <&fgu_calib>; nvmem-cell-names = "fgu_calib"; monitored-battery = <&bat>; diff --git a/Documentation/devicetree/bindings/power/xlnx,zynqmp-genpd.txt b/Documentation/devicetree/bindings/power/xlnx,zynqmp-genpd.txt new file mode 100644 index 000000000000..8d1b8200ebd0 --- /dev/null +++ b/Documentation/devicetree/bindings/power/xlnx,zynqmp-genpd.txt @@ -0,0 +1,34 @@ +----------------------------------------------------------- +Device Tree Bindings for the Xilinx Zynq MPSoC PM domains +----------------------------------------------------------- +The binding for zynqmp-power-controller follow the common +generic PM domain binding[1]. + +[1] Documentation/devicetree/bindings/power/power_domain.txt + +== Zynq MPSoC Generic PM Domain Node == + +Required property: + - Below property should be in zynqmp-firmware node. + - #power-domain-cells: Number of cells in a PM domain specifier. Must be 1. + +Power domain ID indexes are mentioned in +include/dt-bindings/power/xlnx-zynqmp-power.h. + +------- +Example +------- + +firmware { + zynqmp_firmware: zynqmp-firmware { + ... + #power-domain-cells = <1>; + ... + }; +}; + +sata { + ... + power-domains = <&zynqmp_firmware 28>; + ... +}; diff --git a/Documentation/devicetree/bindings/property-units.txt b/Documentation/devicetree/bindings/property-units.txt index 45ce054d844d..bfd33734faca 100644 --- a/Documentation/devicetree/bindings/property-units.txt +++ b/Documentation/devicetree/bindings/property-units.txt @@ -31,6 +31,7 @@ Electricity -microwatt-hours: micro Watt-hours -microvolt : micro volts -picofarads : picofarads +-femtofarads : femtofarads Temperature ---------------------------------------- diff --git a/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt b/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt index c5d0e7998e2b..454c937076a2 100644 --- a/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt +++ b/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt @@ -17,6 +17,11 @@ Clock Properties: - fsl,tmr-fiper1 Fixed interval period pulse generator. - fsl,tmr-fiper2 Fixed interval period pulse generator. - fsl,max-adj Maximum frequency adjustment in parts per billion. + - fsl,extts-fifo The presence of this property indicates hardware + support for the external trigger stamp FIFO. + - little-endian The presence of this property indicates the 1588 timer + IP block is little-endian mode. The default endian mode + is big-endian. These properties set the operational parameters for the PTP clock. You must choose these carefully for the clock to work right. diff --git a/Documentation/devicetree/bindings/pwm/atmel-pwm.txt b/Documentation/devicetree/bindings/pwm/atmel-pwm.txt index c8c831d7b0d1..591ecdd39c7b 100644 --- a/Documentation/devicetree/bindings/pwm/atmel-pwm.txt +++ b/Documentation/devicetree/bindings/pwm/atmel-pwm.txt @@ -5,6 +5,7 @@ Required properties: - "atmel,at91sam9rl-pwm" - "atmel,sama5d3-pwm" - "atmel,sama5d2-pwm" + - "microchip,sam9x60-pwm" - reg: physical base address and length of the controller's registers - #pwm-cells: Should be 3. See pwm.txt in this directory for a description of the cells format. diff --git a/Documentation/devicetree/bindings/pwm/pwm-hibvt.txt b/Documentation/devicetree/bindings/pwm/pwm-hibvt.txt index fa7849d67836..daedfef09bb6 100644 --- a/Documentation/devicetree/bindings/pwm/pwm-hibvt.txt +++ b/Documentation/devicetree/bindings/pwm/pwm-hibvt.txt @@ -5,6 +5,8 @@ Required properties: The SoC specific strings supported including: "hisilicon,hi3516cv300-pwm" "hisilicon,hi3519v100-pwm" + "hisilicon,hi3559v100-shub-pwm" + "hisilicon,hi3559v100-pwm - reg: physical base address and length of the controller's registers. - clocks: phandle and clock specifier of the PWM reference clock. - resets: phandle and reset specifier for the PWM controller reset. diff --git a/Documentation/devicetree/bindings/regulator/fan53555.txt b/Documentation/devicetree/bindings/regulator/fan53555.txt index 54a3f2c80e3a..e7fc045281d1 100644 --- a/Documentation/devicetree/bindings/regulator/fan53555.txt +++ b/Documentation/devicetree/bindings/regulator/fan53555.txt @@ -1,7 +1,8 @@ Binding for Fairchild FAN53555 regulators Required properties: - - compatible: one of "fcs,fan53555", "silergy,syr827", "silergy,syr828" + - compatible: one of "fcs,fan53555", "fcs,fan53526", "silergy,syr827" or + "silergy,syr828" - reg: I2C address Optional properties: diff --git a/Documentation/devicetree/bindings/regulator/fixed-regulator.txt b/Documentation/devicetree/bindings/regulator/fixed-regulator.txt deleted file mode 100644 index 0c2a6c8a1536..000000000000 --- a/Documentation/devicetree/bindings/regulator/fixed-regulator.txt +++ /dev/null @@ -1,35 +0,0 @@ -Fixed Voltage regulators - -Required properties: -- compatible: Must be "regulator-fixed"; -- regulator-name: Defined in regulator.txt as optional, but required here. - -Optional properties: -- gpio: gpio to use for enable control -- startup-delay-us: startup time in microseconds -- enable-active-high: Polarity of GPIO is Active high -If this property is missing, the default assumed is Active low. -- gpio-open-drain: GPIO is open drain type. - If this property is missing then default assumption is false. --vin-supply: Input supply name. - -Any property defined as part of the core regulator -binding, defined in regulator.txt, can also be used. -However a fixed voltage regulator is expected to have the -regulator-min-microvolt and regulator-max-microvolt -to be the same. - -Example: - - abc: fixedregulator@0 { - compatible = "regulator-fixed"; - regulator-name = "fixed-supply"; - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <1800000>; - gpio = <&gpio1 16 0>; - startup-delay-us = <70000>; - enable-active-high; - regulator-boot-on; - gpio-open-drain; - vin-supply = <&parent_reg>; - }; diff --git a/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml b/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml new file mode 100644 index 000000000000..d289c2f7455a --- /dev/null +++ b/Documentation/devicetree/bindings/regulator/fixed-regulator.yaml @@ -0,0 +1,67 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/regulator/fixed-regulator.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Fixed Voltage regulators + +maintainers: + - Liam Girdwood + - Mark Brown + +description: + Any property defined as part of the core regulator binding, defined in + regulator.txt, can also be used. However a fixed voltage regulator is + expected to have the regulator-min-microvolt and regulator-max-microvolt + to be the same. + +properties: + compatible: + const: regulator-fixed + + regulator-name: true + + gpio: + description: gpio to use for enable control + maxItems: 1 + + startup-delay-us: + description: startup time in microseconds + $ref: /schemas/types.yaml#/definitions/uint32 + + enable-active-high: + description: + Polarity of GPIO is Active high. If this property is missing, + the default assumed is Active low. + type: boolean + + gpio-open-drain: + description: + GPIO is open drain type. If this property is missing then default + assumption is false. + type: boolean + + vin-supply: + description: Input supply phandle. + $ref: /schemas/types.yaml#/definitions/phandle + +required: + - compatible + - regulator-name + +examples: + - | + reg_1v8: regulator-1v8 { + compatible = "regulator-fixed"; + regulator-name = "1v8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + gpio = <&gpio1 16 0>; + startup-delay-us = <70000>; + enable-active-high; + regulator-boot-on; + gpio-open-drain; + vin-supply = <&parent_reg>; + }; +... diff --git a/Documentation/devicetree/bindings/regulator/max77650-regulator.txt b/Documentation/devicetree/bindings/regulator/max77650-regulator.txt new file mode 100644 index 000000000000..f1cbe813c30f --- /dev/null +++ b/Documentation/devicetree/bindings/regulator/max77650-regulator.txt @@ -0,0 +1,41 @@ +Regulator driver for MAX77650 PMIC from Maxim Integrated. + +This module is part of the MAX77650 MFD device. For more details +see Documentation/devicetree/bindings/mfd/max77650.txt. + +The regulator controller is represented as a sub-node of the PMIC node +on the device tree. + +The device has a single LDO regulator and a SIMO buck-boost regulator with +three independent power rails. + +Required properties: +-------------------- +- compatible: Must be "maxim,max77650-regulator" + +Each rail must be instantiated under the regulators subnode of the top PMIC +node. Up to four regulators can be defined. For standard regulator properties +refer to Documentation/devicetree/bindings/regulator/regulator.txt. + +Available regulator compatible strings are: "ldo", "sbb0", "sbb1", "sbb2". + +Example: +-------- + + regulators { + compatible = "maxim,max77650-regulator"; + + max77650_ldo: regulator@0 { + regulator-compatible = "ldo"; + regulator-name = "max77650-ldo"; + regulator-min-microvolt = <1350000>; + regulator-max-microvolt = <2937500>; + }; + + max77650_sbb0: regulator@1 { + regulator-compatible = "sbb0"; + regulator-name = "max77650-sbb0"; + regulator-min-microvolt = <800000>; + regulator-max-microvolt = <1587500>; + }; + }; diff --git a/Documentation/devicetree/bindings/regulator/pfuze100.txt b/Documentation/devicetree/bindings/regulator/pfuze100.txt index f9be1acf891c..4d3b12b92cb3 100644 --- a/Documentation/devicetree/bindings/regulator/pfuze100.txt +++ b/Documentation/devicetree/bindings/regulator/pfuze100.txt @@ -8,7 +8,7 @@ Optional properties: - fsl,pfuze-support-disable-sw: Boolean, if present disable all unused switch regulators to save power consumption. Attention, ensure that all important regulators (e.g. DDR ref, DDR supply) has set the "regulator-always-on" - property. If not present, the switched regualtors are always on and can't be + property. If not present, the switched regulators are always on and can't be disabled. This binding is a workaround to keep backward compatibility with old dtb's which rely on the fact that the switched regulators are always on and don't mark them explicit as "regulator-always-on". diff --git a/Documentation/devicetree/bindings/regulator/rohm,bd70528-regulator.txt b/Documentation/devicetree/bindings/regulator/rohm,bd70528-regulator.txt new file mode 100644 index 000000000000..698cfc3bc3dd --- /dev/null +++ b/Documentation/devicetree/bindings/regulator/rohm,bd70528-regulator.txt @@ -0,0 +1,68 @@ +ROHM BD70528 Power Management Integrated Circuit regulator bindings + +Required properties: + - regulator-name: should be "buck1", "buck2", "buck3", "ldo1", "ldo2", "ldo3", + "led_ldo1", "led_ldo2" + +List of regulators provided by this controller. BD70528 regulators node +should be sub node of the BD70528 MFD node. See BD70528 MFD bindings at +Documentation/devicetree/bindings/mfd/rohm,bd70528-pmic.txt + +The valid names for BD70528 regulator nodes are: +BUCK1, BUCK2, BUCK3, LDO1, LDO2, LDO3, LED_LDO1, LED_LDO2 + +Optional properties: +- Any optional property defined in bindings/regulator/regulator.txt + +Example: +regulators { + buck1: BUCK1 { + regulator-name = "buck1"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <3400000>; + regulator-boot-on; + regulator-ramp-delay = <125>; + }; + buck2: BUCK2 { + regulator-name = "buck2"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; + regulator-ramp-delay = <125>; + }; + buck3: BUCK3 { + regulator-name = "buck3"; + regulator-min-microvolt = <800000>; + regulator-max-microvolt = <1800000>; + regulator-boot-on; + regulator-ramp-delay = <250>; + }; + ldo1: LDO1 { + regulator-name = "ldo1"; + regulator-min-microvolt = <1650000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; + }; + ldo2: LDO2 { + regulator-name = "ldo2"; + regulator-min-microvolt = <1650000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; + }; + + ldo3: LDO3 { + regulator-name = "ldo3"; + regulator-min-microvolt = <1650000>; + regulator-max-microvolt = <3300000>; + }; + led_ldo1: LED_LDO1 { + regulator-name = "led_ldo1"; + regulator-min-microvolt = <200000>; + regulator-max-microvolt = <300000>; + }; + led_ldo2: LED_LDO2 { + regulator-name = "led_ldo2"; + regulator-min-microvolt = <200000>; + regulator-max-microvolt = <300000>; + }; +}; diff --git a/Documentation/devicetree/bindings/regulator/rohm,bd71837-regulator.txt b/Documentation/devicetree/bindings/regulator/rohm,bd71837-regulator.txt index 4b98ca26e61a..cbce62c22b60 100644 --- a/Documentation/devicetree/bindings/regulator/rohm,bd71837-regulator.txt +++ b/Documentation/devicetree/bindings/regulator/rohm,bd71837-regulator.txt @@ -27,8 +27,38 @@ BUCK1, BUCK2, BUCK3, BUCK4, BUCK5, BUCK6 LDO1, LDO2, LDO3, LDO4, LDO5, LDO6 Optional properties: +- rohm,dvs-run-voltage : PMIC default "RUN" state voltage in uV. + See below table for bucks which support this. +- rohm,dvs-idle-voltage : PMIC default "IDLE" state voltage in uV. + See below table for bucks which support this. +- rohm,dvs-suspend-voltage : PMIC default "SUSPEND" state voltage in uV. + See below table for bucks which support this. - Any optional property defined in bindings/regulator/regulator.txt +Supported default DVS states: + +BD71837: +buck | dvs-run-voltage | dvs-idle-voltage | dvs-suspend-voltage +----------------------------------------------------------------------------- +1 | supported | supported | supported +---------------------------------------------------------------------------- +2 | supported | supported | not supported +---------------------------------------------------------------------------- +3 | supported | not supported | not supported +---------------------------------------------------------------------------- +4 | supported | not supported | not supported +---------------------------------------------------------------------------- +rest | not supported | not supported | not supported + +BD71847: +buck | dvs-run-voltage | dvs-idle-voltage | dvs-suspend-voltage +----------------------------------------------------------------------------- +1 | supported | supported | supported +---------------------------------------------------------------------------- +2 | supported | supported | not supported +---------------------------------------------------------------------------- +rest | not supported | not supported | not supported + Example: regulators { buck1: BUCK1 { @@ -36,7 +66,11 @@ regulators { regulator-min-microvolt = <700000>; regulator-max-microvolt = <1300000>; regulator-boot-on; + regulator-always-on; regulator-ramp-delay = <1250>; + rohm,dvs-run-voltage = <900000>; + rohm,dvs-idle-voltage = <850000>; + rohm,dvs-suspend-voltage = <800000>; }; buck2: BUCK2 { regulator-name = "buck2"; @@ -45,18 +79,22 @@ regulators { regulator-boot-on; regulator-always-on; regulator-ramp-delay = <1250>; + rohm,dvs-run-voltage = <1000000>; + rohm,dvs-idle-voltage = <900000>; }; buck3: BUCK3 { regulator-name = "buck3"; regulator-min-microvolt = <700000>; regulator-max-microvolt = <1300000>; regulator-boot-on; + rohm,dvs-run-voltage = <1000000>; }; buck4: BUCK4 { regulator-name = "buck4"; regulator-min-microvolt = <700000>; regulator-max-microvolt = <1300000>; regulator-boot-on; + rohm,dvs-run-voltage = <1000000>; }; buck5: BUCK5 { regulator-name = "buck5"; diff --git a/Documentation/devicetree/bindings/regulator/st,stpmic1-regulator.txt b/Documentation/devicetree/bindings/regulator/st,stpmic1-regulator.txt index a3f476240565..6189df71ea98 100644 --- a/Documentation/devicetree/bindings/regulator/st,stpmic1-regulator.txt +++ b/Documentation/devicetree/bindings/regulator/st,stpmic1-regulator.txt @@ -23,16 +23,14 @@ Switches are fixed voltage regulators with only enable/disable capability. Optional properties: - st,mask-reset: mask reset for this regulator: the regulator configuration is maintained during pmic reset. -- regulator-pull-down: enable high pull down - if not specified light pull down is used - regulator-over-current-protection: if set, all regulators are switched off in case of over-current detection on this regulator, if not set, the driver only sends an over-current event. -- interrupt-parent: phandle to the parent interrupt controller - interrupts: index of current limit detection interrupt - -supply: phandle to the parent supply/regulator node each regulator supply can be described except vref_ddr. +- regulator-active-discharge: can be used on pwr_sw1 and pwr_sw2. Example: regulators { @@ -43,7 +41,6 @@ regulators { vdd_core: buck1 { regulator-name = "vdd_core"; interrupts = ; - interrupt-parent = <&pmic>; st,mask-reset; regulator-pull-down; regulator-min-microvolt = <700000>; @@ -53,7 +50,6 @@ regulators { v3v3: buck4 { regulator-name = "v3v3"; interrupts = ; - interrupt-parent = <&mypmic>; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; diff --git a/Documentation/devicetree/bindings/regulator/tps65218.txt b/Documentation/devicetree/bindings/regulator/tps65218.txt index 02f0e9bbfbf8..54aded3b78e2 100644 --- a/Documentation/devicetree/bindings/regulator/tps65218.txt +++ b/Documentation/devicetree/bindings/regulator/tps65218.txt @@ -71,8 +71,13 @@ tps65218: tps65218@24 { regulator-always-on; }; + ls2: regulator-ls2 { + regulator-min-microamp = <100000>; + regulator-max-microamp = <1000000>; + }; + ls3: regulator-ls3 { - regulator-min-microvolt = <100000>; - regulator-max-microvolt = <1000000>; + regulator-min-microamp = <100000>; + regulator-max-microamp = <1000000>; }; }; diff --git a/Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.txt b/Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.txt new file mode 100644 index 000000000000..6e5341b4f891 --- /dev/null +++ b/Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.txt @@ -0,0 +1,27 @@ +Broadcom STB SW_INIT-style reset controller +=========================================== + +Broadcom STB SoCs have a SW_INIT-style reset controller with separate +SET/CLEAR/STATUS registers and possibly multiple banks, each of 32 bit +reset lines. + +Please also refer to reset.txt in this directory for common reset +controller binding usage. + +Required properties: +- compatible: should be brcm,brcmstb-reset +- reg: register base and length +- #reset-cells: must be set to 1 + +Example: + + reset: reset-controller@8404318 { + compatible = "brcm,brcmstb-reset"; + reg = <0x8404318 0x30>; + #reset-cells = <1>; + }; + + ðernet_switch { + resets = <&reset>; + reset-names = "switch"; + }; diff --git a/Documentation/devicetree/bindings/reset/fsl,imx7-src.txt b/Documentation/devicetree/bindings/reset/fsl,imx7-src.txt index 1ab1d109318e..2ecf33815d18 100644 --- a/Documentation/devicetree/bindings/reset/fsl,imx7-src.txt +++ b/Documentation/devicetree/bindings/reset/fsl,imx7-src.txt @@ -5,7 +5,9 @@ Please also refer to reset.txt in this directory for common reset controller binding usage. Required properties: -- compatible: Should be "fsl,imx7d-src", "syscon" +- compatible: + - For i.MX7 SoCs should be "fsl,imx7d-src", "syscon" + - For i.MX8MQ SoCs should be "fsl,imx8mq-src", "syscon" - reg: should be register base and length as documented in the datasheet - interrupts: Should contain SRC interrupt @@ -44,4 +46,5 @@ Example: For list of all valid reset indicies see - + for i.MX7 and + for i.MX8MQ diff --git a/Documentation/devicetree/bindings/reset/xlnx,zynqmp-reset.txt b/Documentation/devicetree/bindings/reset/xlnx,zynqmp-reset.txt new file mode 100644 index 000000000000..27a45fe5ecf1 --- /dev/null +++ b/Documentation/devicetree/bindings/reset/xlnx,zynqmp-reset.txt @@ -0,0 +1,52 @@ +-------------------------------------------------------------------------- + = Zynq UltraScale+ MPSoC reset driver binding = +-------------------------------------------------------------------------- +The Zynq UltraScale+ MPSoC has several different resets. + +See Chapter 36 of the Zynq UltraScale+ MPSoC TRM (UG) for more information +about zynqmp resets. + +Please also refer to reset.txt in this directory for common reset +controller binding usage. + +Required Properties: +- compatible: "xlnx,zynqmp-reset" +- #reset-cells: Specifies the number of cells needed to encode reset + line, should be 1 + +------- +Example +------- + +firmware { + zynqmp_firmware: zynqmp-firmware { + compatible = "xlnx,zynqmp-firmware"; + method = "smc"; + + zynqmp_reset: reset-controller { + compatible = "xlnx,zynqmp-reset"; + #reset-cells = <1>; + }; + }; +}; + +Specifying reset lines connected to IP modules +============================================== + +Device nodes that need access to reset lines should +specify them as a reset phandle in their corresponding node as +specified in reset.txt. + +For list of all valid reset indicies see + + +Example: + +serdes: zynqmp_phy@fd400000 { + ... + + resets = <&zynqmp_reset ZYNQMP_RESET_SATA>; + reset-names = "sata_rst"; + + ... +}; diff --git a/Documentation/devicetree/bindings/rtc/abracon,abx80x.txt b/Documentation/devicetree/bindings/rtc/abracon,abx80x.txt index 18b892d010d8..2405e35a1bc0 100644 --- a/Documentation/devicetree/bindings/rtc/abracon,abx80x.txt +++ b/Documentation/devicetree/bindings/rtc/abracon,abx80x.txt @@ -16,6 +16,7 @@ Required properties: "abracon,ab1803" "abracon,ab1804" "abracon,ab1805" + "microcrystal,rv1805" Using "abracon,abx80x" will enable chip autodetection. - "reg": I2C bus address of the device diff --git a/Documentation/devicetree/bindings/rtc/cdns,rtc.txt b/Documentation/devicetree/bindings/rtc/cdns,rtc.txt new file mode 100644 index 000000000000..14a04487b432 --- /dev/null +++ b/Documentation/devicetree/bindings/rtc/cdns,rtc.txt @@ -0,0 +1,25 @@ +Cadence Real Time Clock + +The Cadence RTC controller with date, time and alarm capabilities. +The alarm may wake the system from low-power state. + +Required properties: +- compatible: Should be "cdns,rtc-r109v3" +- reg: Specifies base physical address and size of the register area. +- interrupts: A single interrupt specifier. +- clocks: Must contain two entries: + - pclk: APB registers clock + - ref_clk: reference 1Hz or 100Hz clock, depending on IP configuration + See ../clocks/clock-bindings.txt for details. + +Example: + rtc0: rtc@fd080000 { + compatible = "cdns,rtc-r109v3"; + reg = <0xfd080000 0x1000>; + + clock-names = "pclk", "ref_clk"; + clocks = <&sysclock>, <&refclock>; + + interrupt-parent = <&gic>; + interrupts = <0 6 IRQ_TYPE_LEVEL_HIGH>; + }; diff --git a/Documentation/devicetree/bindings/rtc/isil,isl1208.txt b/Documentation/devicetree/bindings/rtc/isil,isl1208.txt new file mode 100644 index 000000000000..51f003006f04 --- /dev/null +++ b/Documentation/devicetree/bindings/rtc/isil,isl1208.txt @@ -0,0 +1,38 @@ +Intersil ISL1209/19 I2C RTC/Alarm chip with event in + +ISL12X9 have additional pins EVIN and #EVDET for tamper detection, while the +ISL1208 and ISL1218 do not. They are all use the same driver with the bindings +described here, with chip specific properties as noted. + +Required properties supported by the device: + - "compatible": Should be one of the following: + - "isil,isl1208" + - "isil,isl1209" + - "isil,isl1218" + - "isil,isl1219" + - "reg": I2C bus address of the device + +Optional properties: + - "interrupt-names": list which may contains "irq" and "evdet" + evdet applies to isl1209 and isl1219 only + - "interrupts": list of interrupts for "irq" and "evdet" + evdet applies to isl1209 and isl1219 only + - "isil,ev-evienb": Enable or disable internal pull on EVIN pin + Applies to isl1209 and isl1219 only + Possible values are 0 and 1 + Value 0 enables internal pull-up on evin pin, 1 disables it. + Default will leave the non-volatile configuration of the pullup + as is. + +Example isl1219 node with #IRQ pin connected to SoC gpio1 pin12 and #EVDET pin +connected to SoC gpio2 pin 24 and internal pull-up enabled in EVIN pin. + + isl1219: rtc@68 { + compatible = "isil,isl1219"; + reg = <0x68>; + interrupt-names = "irq", "evdet"; + interrupts-extended = <&gpio1 12 IRQ_TYPE_EDGE_FALLING>, + <&gpio2 24 IRQ_TYPE_EDGE_FALLING>; + isil,ev-evienb = <1>; + }; + diff --git a/Documentation/devicetree/bindings/rtc/isil,isl1219.txt b/Documentation/devicetree/bindings/rtc/isil,isl1219.txt deleted file mode 100644 index c3efd48e91c2..000000000000 --- a/Documentation/devicetree/bindings/rtc/isil,isl1219.txt +++ /dev/null @@ -1,29 +0,0 @@ -Intersil ISL1219 I2C RTC/Alarm chip with event in - -ISL1219 has additional pins EVIN and #EVDET for tamper detection. - -Required properties supported by the device: - - - "compatible": must be "isil,isl1219" - - "reg": I2C bus address of the device - -Optional properties: - - - "interrupt-names": list which may contains "irq" and "evdet" - - "interrupts": list of interrupts for "irq" and "evdet" - - "isil,ev-evienb": if present EV.EVIENB bit is set to the specified - value for proper operation. - - -Example isl1219 node with #IRQ pin connected to SoC gpio1 pin12 - and #EVDET pin connected to SoC gpio2 pin 24: - - isl1219: rtc@68 { - compatible = "isil,isl1219"; - reg = <0x68>; - interrupt-names = "irq", "evdet"; - interrupts-extended = <&gpio1 12 IRQ_TYPE_EDGE_FALLING>, - <&gpio2 24 IRQ_TYPE_EDGE_FALLING>; - isil,ev-evienb = <1>; - }; - diff --git a/Documentation/devicetree/bindings/rtc/nxp,pcf85063.txt b/Documentation/devicetree/bindings/rtc/nxp,pcf85063.txt new file mode 100644 index 000000000000..d3e380ad712d --- /dev/null +++ b/Documentation/devicetree/bindings/rtc/nxp,pcf85063.txt @@ -0,0 +1,18 @@ +* NXP PCF85063 Real Time Clock + +Required properties: +- compatible: Should contain "nxp,pcf85063". +- reg: I2C address for chip. + +Optional property: +- quartz-load-femtofarads: The capacitive load of the quartz(x-tal), + expressed in femto Farad (fF). Valid values are 7000 and 12500. + Default value (if no value is specified) is 7000fF. + +Example: + +pcf85063: rtc@51 { + compatible = "nxp,pcf85063"; + reg = <0x51>; + quartz-load-femtofarads = <12500>; +}; diff --git a/Documentation/devicetree/bindings/rtc/nxp,pcf8523.txt b/Documentation/devicetree/bindings/rtc/nxp,pcf8523.txt new file mode 100644 index 000000000000..0b1080c60f63 --- /dev/null +++ b/Documentation/devicetree/bindings/rtc/nxp,pcf8523.txt @@ -0,0 +1,18 @@ +* NXP PCF8523 Real Time Clock + +Required properties: +- compatible: Should contain "nxp,pcf8523". +- reg: I2C address for chip. + +Optional property: +- quartz-load-femtofarads: The capacitive load of the quartz(x-tal), + expressed in femto Farad (fF). Valid values are 7000 and 12500. + Default value (if no value is specified) is 12500fF. + +Example: + +pcf8523: rtc@68 { + compatible = "nxp,pcf8523"; + reg = <0x68>; + quartz-load-femtofarads = <7000>; +}; diff --git a/Documentation/devicetree/bindings/rtc/rtc-meson.txt b/Documentation/devicetree/bindings/rtc/rtc-meson.txt new file mode 100644 index 000000000000..e921fe66a362 --- /dev/null +++ b/Documentation/devicetree/bindings/rtc/rtc-meson.txt @@ -0,0 +1,35 @@ +* Amlogic Meson6, Meson8, Meson8b and Meson8m2 RTC + +Required properties: +- compatible: should be one of the following describing the hardware: + * "amlogic,meson6-rtc" + * "amlogic,meson8-rtc" + * "amlogic,meson8b-rtc" + * "amlogic,meson8m2-rtc" + +- reg: physical register space for the controller's memory mapped registers. +- interrupts: the interrupt line of the RTC block. +- clocks: reference to the external 32.768kHz crystal oscillator. +- vdd-supply: reference to the power supply of the RTC block. +- resets: reset controller reference to allow reset of the controller + +Optional properties for the battery-backed non-volatile memory: +- #address-cells: should be 1 to address the battery-backed non-volatile memory +- #size-cells: should be 1 to reference the battery-backed non-volatile memory + +Optional child nodes: +- see ../nvmem/nvmem.txt + +Example: + + rtc: rtc@740 { + compatible = "amlogic,meson6-rtc"; + reg = <0x740 0x14>; + interrupts = ; + clocks = <&rtc32k_xtal>; + vdd-supply = <&rtc_vdd>; + resets = <&reset RESET_RTC>; + + #address-cells = <1>; + #size-cells = <1>; + }; diff --git a/Documentation/devicetree/bindings/rtc/rtc.txt b/Documentation/devicetree/bindings/rtc/rtc.txt index 7c8da6926095..f4687c68c08c 100644 --- a/Documentation/devicetree/bindings/rtc/rtc.txt +++ b/Documentation/devicetree/bindings/rtc/rtc.txt @@ -21,12 +21,16 @@ Optional properties The following properties may not be supported by all drivers. However, if a driver wants to support one of the below features, it should adapt the bindings below. -- trickle-resistor-ohms : Selected resistor for trickle charger. Should be given - if trickle charger should be enabled -- trickle-diode-disable : Do not use internal trickle charger diode Should be - given if internal trickle charger diode should be - disabled -- wakeup-source : Enables wake up of host system on alarm +- trickle-resistor-ohms : Selected resistor for trickle charger. Should be given + if trickle charger should be enabled +- trickle-diode-disable : Do not use internal trickle charger diode Should be + given if internal trickle charger diode should be + disabled +- wakeup-source : Enables wake up of host system on alarm +- quartz-load-femtofarads : The capacitive load of the quartz(x-tal), + expressed in femto Farad (fF). + The default value shall be listed (if optional), + and likewise all valid values. Trivial RTCs ------------ @@ -39,21 +43,23 @@ possibly an interrupt line. Compatible Vendor / Chip ========== ============= abracon,abb5zes3 AB-RTCMC-32.768kHz-B5ZE-S3: Real Time Clock/Calendar Module with I2C Interface +abracon,abeoz9 AB-RTCMC-32.768kHz-EOZ9: Real Time Clock/Calendar Module with I2C Interface dallas,ds1374 I2C, 32-Bit Binary Counter Watchdog RTC with Trickle Charger and Reset Input/Output dallas,ds1672 Dallas DS1672 Real-time Clock dallas,ds3232 Extremely Accurate I²C RTC with Integrated Crystal and SRAM epson,rx8010 I2C-BUS INTERFACE REAL TIME CLOCK MODULE +epson,rx8571 I2C-BUS INTERFACE REAL TIME CLOCK MODULE with Battery Backed RAM epson,rx8581 I2C-BUS INTERFACE REAL TIME CLOCK MODULE emmicro,em3027 EM Microelectronic EM3027 Real-time Clock isil,isl1208 Intersil ISL1208 Low Power RTC with Battery Backed SRAM isil,isl1218 Intersil ISL1218 Low Power RTC with Battery Backed SRAM isil,isl12022 Intersil ISL12022 Real-time Clock +microcrystal,rv3028 Real Time Clock Module with I2C-Bus microcrystal,rv3029 Real Time Clock Module with I2C-Bus +microcrystal,rv8523 Real Time Clock nxp,pcf2127 Real-time clock nxp,pcf2129 Real-time clock -nxp,pcf8523 Real-time Clock nxp,pcf8563 Real-time clock/calendar -nxp,pcf85063 Tiny Real-Time Clock pericom,pt7c4338 Real-time Clock Module ricoh,r2025sd I2C bus SERIAL INTERFACE REAL-TIME CLOCK IC ricoh,r2221tl I2C bus SERIAL INTERFACE REAL-TIME CLOCK IC @@ -62,3 +68,4 @@ ricoh,rs5c372b I2C bus SERIAL INTERFACE REAL-TIME CLOCK IC ricoh,rv5c386 I2C bus SERIAL INTERFACE REAL-TIME CLOCK IC ricoh,rv5c387a I2C bus SERIAL INTERFACE REAL-TIME CLOCK IC sii,s35390a 2-wire CMOS real-time clock +whwave,sd3078 I2C bus SERIAL INTERFACE REAL-TIME CLOCK IC diff --git a/Documentation/devicetree/bindings/serial/8250.txt b/Documentation/devicetree/bindings/serial/8250.txt index da50321da34d..3cba12f855b7 100644 --- a/Documentation/devicetree/bindings/serial/8250.txt +++ b/Documentation/devicetree/bindings/serial/8250.txt @@ -21,6 +21,7 @@ Required properties: - "altr,16550-FIFO128" - "fsl,16550-FIFO64" - "fsl,ns16550" + - "intel,xscale-uart" - "ti,da830-uart" - "aspeed,ast2400-vuart" - "aspeed,ast2500-vuart" diff --git a/Documentation/devicetree/bindings/serial/ingenic,uart.txt b/Documentation/devicetree/bindings/serial/ingenic,uart.txt index c3c6406d5cfe..24ed8769f4af 100644 --- a/Documentation/devicetree/bindings/serial/ingenic,uart.txt +++ b/Documentation/devicetree/bindings/serial/ingenic,uart.txt @@ -6,7 +6,8 @@ Required properties: - "ingenic,jz4760-uart", - "ingenic,jz4770-uart", - "ingenic,jz4775-uart", - - "ingenic,jz4780-uart". + - "ingenic,jz4780-uart", + - "ingenic,x1000-uart". - reg : offset and length of the register set for the device. - interrupts : should contain uart interrupt. - clocks : phandles to the module & baud clocks. diff --git a/Documentation/devicetree/bindings/serial/milbeaut-uart.txt b/Documentation/devicetree/bindings/serial/milbeaut-uart.txt new file mode 100644 index 000000000000..3d2fb1a7ba94 --- /dev/null +++ b/Documentation/devicetree/bindings/serial/milbeaut-uart.txt @@ -0,0 +1,21 @@ +Socionext Milbeaut UART controller + +Required properties: +- compatible: should be "socionext,milbeaut-usio-uart". +- reg: offset and length of the register set for the device. +- interrupts: two interrupts specifier. +- interrupt-names: should be "rx", "tx". +- clocks: phandle to the input clock. + +Optional properties: +- auto-flow-control: flow control enable. + +Example: + usio1: usio_uart@1e700010 { + compatible = "socionext,milbeaut-usio-uart"; + reg = <0x1e700010 0x10>; + interrupts = <0 141 0x4>, <0 149 0x4>; + interrupt-names = "rx", "tx"; + clocks = <&clk 2>; + auto-flow-control; + }; diff --git a/Documentation/devicetree/bindings/serial/nvidia,tegra194-tcu.txt b/Documentation/devicetree/bindings/serial/nvidia,tegra194-tcu.txt new file mode 100644 index 000000000000..085a8591accd --- /dev/null +++ b/Documentation/devicetree/bindings/serial/nvidia,tegra194-tcu.txt @@ -0,0 +1,35 @@ +NVIDIA Tegra Combined UART (TCU) + +The TCU is a system for sharing a hardware UART instance among multiple +systems within the Tegra SoC. It is implemented through a mailbox- +based protocol where each "virtual UART" has a pair of mailboxes, one +for transmitting and one for receiving, that is used to communicate +with the hardware implementing the TCU. + +Required properties: +- name : Should be tcu +- compatible + Array of strings + One of: + - "nvidia,tegra194-tcu" +- mbox-names: + "rx" - Mailbox for receiving data from hardware UART + "tx" - Mailbox for transmitting data to hardware UART +- mboxes: Mailboxes corresponding to the mbox-names. + +This node is a mailbox consumer. See the following files for details of +the mailbox subsystem, and the specifiers implemented by the relevant +provider(s): + +- .../mailbox/mailbox.txt +- .../mailbox/nvidia,tegra186-hsp.txt + +Example bindings: +----------------- + +tcu: tcu { + compatible = "nvidia,tegra194-tcu"; + mboxes = <&hsp_top0 TEGRA_HSP_MBOX_TYPE_SM 0>, + <&hsp_aon TEGRA_HSP_MBOX_TYPE_SM 1>; + mbox-names = "rx", "tx"; +}; diff --git a/Documentation/devicetree/bindings/serial/omap_serial.txt b/Documentation/devicetree/bindings/serial/omap_serial.txt index c35d5ece1156..0a9b5444f4e6 100644 --- a/Documentation/devicetree/bindings/serial/omap_serial.txt +++ b/Documentation/devicetree/bindings/serial/omap_serial.txt @@ -22,6 +22,8 @@ Optional properties: - dma-names : "rx" for receive channel, "tx" for transmit channel. - rs485-rts-delay, rs485-rx-during-tx, linux,rs485-enabled-at-boot-time: see rs485.txt - rs485-rts-active-high: drive RTS high when sending (default is low). +- clocks: phandle to the functional clock as per + Documentation/devicetree/bindings/clock/clock-bindings.txt Example: diff --git a/Documentation/devicetree/bindings/serial/pl011.txt b/Documentation/devicetree/bindings/serial/pl011.txt deleted file mode 100644 index 77863aefe9ef..000000000000 --- a/Documentation/devicetree/bindings/serial/pl011.txt +++ /dev/null @@ -1,51 +0,0 @@ -* ARM AMBA Primecell PL011 serial UART - -Required properties: -- compatible: must be "arm,primecell", "arm,pl011", "zte,zx296702-uart" -- reg: exactly one register range with length 0x1000 -- interrupts: exactly one interrupt specifier - -Optional properties: -- pinctrl: - When present, must have one state named "default", - and may contain a second name named "sleep". The former - state sets up pins for ordinary operation whereas - the latter state will put the associated pins to sleep - when the UART is unused -- clocks: - When present, the first clock listed must correspond to - the clock named UARTCLK on the IP block, i.e. the clock - to the external serial line, whereas the second clock - must correspond to the PCLK clocking the internal logic - of the block. Just listing one clock (the first one) is - deprecated. -- clock-names: - When present, the first clock listed must be named - "uartclk" and the second clock listed must be named - "apb_pclk" -- dmas: - When present, may have one or two dma channels. - The first one must be named "rx", the second one - must be named "tx". -- auto-poll: - Enables polling when using RX DMA. -- poll-rate-ms: - Rate at which poll occurs when auto-poll is set, - default 100ms. -- poll-timeout-ms: - Poll timeout when auto-poll is set, default - 3000ms. - -See also bindings/arm/primecell.txt - -Example: - -uart@80120000 { - compatible = "arm,pl011", "arm,primecell"; - reg = <0x80120000 0x1000>; - interrupts = <0 11 IRQ_TYPE_LEVEL_HIGH>; - dmas = <&dma 13 0 0x2>, <&dma 13 0 0x0>; - dma-names = "rx", "tx"; - clocks = <&foo_clk>, <&bar_clk>; - clock-names = "uartclk", "apb_pclk"; -}; diff --git a/Documentation/devicetree/bindings/serial/pl011.yaml b/Documentation/devicetree/bindings/serial/pl011.yaml new file mode 100644 index 000000000000..1a64d59152aa --- /dev/null +++ b/Documentation/devicetree/bindings/serial/pl011.yaml @@ -0,0 +1,126 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/serial/pl011.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: ARM AMBA Primecell PL011 serial UART + +maintainers: + - Rob Herring + +allOf: + - $ref: /schemas/serial.yaml# + +# Need a custom select here or 'arm,primecell' will match on lots of nodes +select: + properties: + compatible: + contains: + enum: + - arm,pl011 + - zte,zx296702-uart + required: + - compatible + +properties: + compatible: + oneOf: + - items: + - const: arm,pl011 + - const: arm,primecell + - items: + - const: zte,zx296702-uart + - const: arm,primecell + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + pinctrl-0: true + pinctrl-1: true + + pinctrl-names: + description: + When present, must have one state named "default", + and may contain a second name named "sleep". The former + state sets up pins for ordinary operation whereas + the latter state will put the associated pins to sleep + when the UART is unused + minItems: 1 + items: + - const: default + - const: sleep + + clocks: + description: + When present, the first clock listed must correspond to + the clock named UARTCLK on the IP block, i.e. the clock + to the external serial line, whereas the second clock + must correspond to the PCLK clocking the internal logic + of the block. Just listing one clock (the first one) is + deprecated. + maxItems: 2 + + clock-names: + items: + - const: uartclk + - const: apb_pclk + + dmas: + minItems: 1 + maxItems: 2 + + dma-names: + minItems: 1 + items: + - const: rx + - const: tx + + auto-poll: + description: + Enables polling when using RX DMA. + type: boolean + + poll-rate-ms: + description: + Rate at which poll occurs when auto-poll is set. + default 100ms. + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + - default: 100 + + poll-timeout-ms: + description: + Poll timeout when auto-poll is set, default + 3000ms. + allOf: + - $ref: /schemas/types.yaml#/definitions/uint32 + - default: 3000 + +required: + - compatible + - reg + - interrupts + +dependencies: + poll-rate-ms: [ auto-poll ] + poll-timeout-ms: [ auto-poll ] + +additionalProperties: false + +examples: + - | + serial@80120000 { + compatible = "arm,pl011", "arm,primecell"; + reg = <0x80120000 0x1000>; + interrupts = <0 11 4>; + dmas = <&dma 13 0 0x2>, <&dma 13 0 0x0>; + dma-names = "rx", "tx"; + clocks = <&foo_clk>, <&bar_clk>; + clock-names = "uartclk", "apb_pclk"; + }; + +... diff --git a/Documentation/devicetree/bindings/serial/renesas,rzn1-uart.txt b/Documentation/devicetree/bindings/serial/renesas,rzn1-uart.txt deleted file mode 100644 index 8b9e0d4dc2e4..000000000000 --- a/Documentation/devicetree/bindings/serial/renesas,rzn1-uart.txt +++ /dev/null @@ -1,10 +0,0 @@ -Renesas RZ/N1 UART - -This controller is based on the Synopsys DesignWare ABP UART and inherits all -properties defined in snps-dw-apb-uart.txt except for the compatible property. - -Required properties: -- compatible : The device specific string followed by the generic RZ/N1 string. - Therefore it must be one of: - "renesas,r9a06g032-uart", "renesas,rzn1-uart" - "renesas,r9a06g033-uart", "renesas,rzn1-uart" diff --git a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt index 20232ad05d89..dd63151dc8b6 100644 --- a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt +++ b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt @@ -29,7 +29,9 @@ Required properties: - "renesas,scif-r8a774c0" for R8A774C0 (RZ/G2E) SCIF compatible UART. - "renesas,hscif-r8a774c0" for R8A774C0 (RZ/G2E) HSCIF compatible UART. - "renesas,scif-r8a7778" for R8A7778 (R-Car M1) SCIF compatible UART. + - "renesas,hscif-r8a7778" for R8A7778 (R-Car M1) HSCIF compatible UART. - "renesas,scif-r8a7779" for R8A7779 (R-Car H1) SCIF compatible UART. + - "renesas,hscif-r8a7779" for R8A7779 (R-Car H1) HSCIF compatible UART. - "renesas,scif-r8a7790" for R8A7790 (R-Car H2) SCIF compatible UART. - "renesas,scifa-r8a7790" for R8A7790 (R-Car H2) SCIFA compatible UART. - "renesas,scifb-r8a7790" for R8A7790 (R-Car H2) SCIFB compatible UART. diff --git a/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.txt b/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.txt deleted file mode 100644 index 12bbe9f22560..000000000000 --- a/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.txt +++ /dev/null @@ -1,76 +0,0 @@ -* Synopsys DesignWare ABP UART - -Required properties: -- compatible : "snps,dw-apb-uart" -- reg : offset and length of the register set for the device. -- interrupts : should contain uart interrupt. - -Clock handling: -The clock rate of the input clock needs to be supplied by one of -- clock-frequency : the input clock frequency for the UART. -- clocks : phandle to the input clock - -The supplying peripheral clock can also be handled, needing a second property -- clock-names: tuple listing input clock names. - Required elements: "baudclk", "apb_pclk" - -Optional properties: -- snps,uart-16550-compatible : reflects the value of UART_16550_COMPATIBLE - configuration parameter. Define this if your UART does not implement the busy - functionality. -- resets : phandle to the parent reset controller. -- reg-shift : quantity to shift the register offsets by. If this property is - not present then the register offsets are not shifted. -- reg-io-width : the size (in bytes) of the IO accesses that should be - performed on the device. If this property is not present then single byte - accesses are used. -- dcd-override : Override the DCD modem status signal. This signal will always - be reported as active instead of being obtained from the modem status - register. Define this if your serial port does not use this pin. -- dsr-override : Override the DTS modem status signal. This signal will always - be reported as active instead of being obtained from the modem status - register. Define this if your serial port does not use this pin. -- cts-override : Override the CTS modem status signal. This signal will always - be reported as active instead of being obtained from the modem status - register. Define this if your serial port does not use this pin. -- ri-override : Override the RI modem status signal. This signal will always be - reported as inactive instead of being obtained from the modem status register. - Define this if your serial port does not use this pin. - -Example: - - uart@80230000 { - compatible = "snps,dw-apb-uart"; - reg = <0x80230000 0x100>; - clock-frequency = <3686400>; - interrupts = <10>; - reg-shift = <2>; - reg-io-width = <4>; - dcd-override; - dsr-override; - cts-override; - ri-override; - }; - -Example with one clock: - - uart@80230000 { - compatible = "snps,dw-apb-uart"; - reg = <0x80230000 0x100>; - clocks = <&baudclk>; - interrupts = <10>; - reg-shift = <2>; - reg-io-width = <4>; - }; - -Example with two clocks: - - uart@80230000 { - compatible = "snps,dw-apb-uart"; - reg = <0x80230000 0x100>; - clocks = <&baudclk>, <&apb_pclk>; - clock-names = "baudclk", "apb_pclk"; - interrupts = <10>; - reg-shift = <2>; - reg-io-width = <4>; - }; diff --git a/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml b/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml new file mode 100644 index 000000000000..b42002542690 --- /dev/null +++ b/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml @@ -0,0 +1,140 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/serial/snps-dw-apb-uart.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Synopsys DesignWare ABP UART + +maintainers: + - Rob Herring + +allOf: + - $ref: /schemas/serial.yaml# + +properties: + compatible: + oneOf: + - items: + - enum: + - renesas,r9a06g032-uart + - renesas,r9a06g033-uart + - const: renesas,rzn1-uart + - items: + - enum: + - rockchip,px30-uart + - rockchip,rk3036-uart + - rockchip,rk3066-uart + - rockchip,rk3188-uart + - rockchip,rk3288-uart + - rockchip,rk3328-uart + - rockchip,rk3368-uart + - rockchip,rk3399-uart + - rockchip,rv1108-uart + - const: snps,dw-apb-uart + - items: + - enum: + - brcm,bcm11351-dw-apb-uart + - brcm,bcm21664-dw-apb-uart + - const: snps,dw-apb-uart + - const: snps,dw-apb-uart + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + clock-frequency: true + + clocks: + minItems: 1 + maxItems: 2 + + clock-names: + items: + - const: baudclk + - const: apb_pclk + + snps,uart-16550-compatible: + description: reflects the value of UART_16550_COMPATIBLE configuration + parameter. Define this if your UART does not implement the busy functionality. + type: boolean + + resets: + maxItems: 1 + + reg-shift: true + + reg-io-width: true + + dcd-override: + description: Override the DCD modem status signal. This signal will + always be reported as active instead of being obtained from the modem + status register. Define this if your serial port does not use this + pin. + type: boolean + + dsr-override: + description: Override the DTS modem status signal. This signal will + always be reported as active instead of being obtained from the modem + status register. Define this if your serial port does not use this + pin. + type: boolean + + cts-override: + description: Override the CTS modem status signal. This signal will + always be reported as active instead of being obtained from the modem + status register. Define this if your serial port does not use this + pin. + type: boolean + + ri-override: + description: Override the RI modem status signal. This signal will always + be reported as inactive instead of being obtained from the modem status + register. Define this if your serial port does not use this pin. + type: boolean + +required: + - compatible + - reg + - interrupts + +examples: + - | + serial@80230000 { + compatible = "snps,dw-apb-uart"; + reg = <0x80230000 0x100>; + clock-frequency = <3686400>; + interrupts = <10>; + reg-shift = <2>; + reg-io-width = <4>; + dcd-override; + dsr-override; + cts-override; + ri-override; + }; + + - | + // Example with one clock: + serial@80230000 { + compatible = "snps,dw-apb-uart"; + reg = <0x80230000 0x100>; + clocks = <&baudclk>; + interrupts = <10>; + reg-shift = <2>; + reg-io-width = <4>; + }; + + - | + // Example with two clocks: + serial@80230000 { + compatible = "snps,dw-apb-uart"; + reg = <0x80230000 0x100>; + clocks = <&baudclk>, <&apb_pclk>; + clock-names = "baudclk", "apb_pclk"; + interrupts = <10>; + reg-shift = <2>; + reg-io-width = <4>; + }; +... diff --git a/Documentation/devicetree/bindings/serio/olpc,ap-sp.txt b/Documentation/devicetree/bindings/serio/olpc,ap-sp.txt index 36603419d6f8..0e72183f52bc 100644 --- a/Documentation/devicetree/bindings/serio/olpc,ap-sp.txt +++ b/Documentation/devicetree/bindings/serio/olpc,ap-sp.txt @@ -4,14 +4,10 @@ Required properties: - compatible : "olpc,ap-sp" - reg : base address and length of SoC's WTM registers - interrupts : SP-AP interrupt -- clocks : phandle + clock-specifier for the clock that drives the WTM -- clock-names: should be "sp" Example: ap-sp@d4290000 { compatible = "olpc,ap-sp"; reg = <0xd4290000 0x1000>; interrupts = <40>; - clocks = <&soc_clocks MMP2_CLK_SP>; - clock-names = "sp"; } diff --git a/Documentation/devicetree/bindings/soc/amlogic/clk-measure.txt b/Documentation/devicetree/bindings/soc/amlogic/clk-measure.txt index 205a54bcd7c7..6bf6b43f8dd8 100644 --- a/Documentation/devicetree/bindings/soc/amlogic/clk-measure.txt +++ b/Documentation/devicetree/bindings/soc/amlogic/clk-measure.txt @@ -9,6 +9,8 @@ Required properties: "amlogic,meson-gx-clk-measure" for GX SoCs "amlogic,meson8-clk-measure" for Meson8 SoCs "amlogic,meson8b-clk-measure" for Meson8b SoCs + "amlogic,meson-axg-clk-measure" for AXG SoCs + "amlogic,meson-g12a-clk-measure" for G12a SoCs - reg: base address and size of the Clock Measurer register space. Example: diff --git a/Documentation/devicetree/bindings/soc/bcm/brcm,bcm2835-pm.txt b/Documentation/devicetree/bindings/soc/bcm/brcm,bcm2835-pm.txt new file mode 100644 index 000000000000..3b7d32956391 --- /dev/null +++ b/Documentation/devicetree/bindings/soc/bcm/brcm,bcm2835-pm.txt @@ -0,0 +1,46 @@ +BCM2835 PM (Power domains, watchdog) + +The PM block controls power domains and some reset lines, and includes +a watchdog timer. This binding supersedes the brcm,bcm2835-pm-wdt +binding which covered some of PM's register range and functionality. + +Required properties: + +- compatible: Should be "brcm,bcm2835-pm" +- reg: Specifies base physical address and size of the two + register ranges ("PM" and "ASYNC_BRIDGE" in that + order) +- clocks: a) v3d: The V3D clock from CPRMAN + b) peri_image: The PERI_IMAGE clock from CPRMAN + c) h264: The H264 clock from CPRMAN + d) isp: The ISP clock from CPRMAN +- #reset-cells: Should be 1. This property follows the reset controller + bindings[1]. +- #power-domain-cells: Should be 1. This property follows the power domain + bindings[2]. + +Optional properties: + +- timeout-sec: Contains the watchdog timeout in seconds +- system-power-controller: Whether the watchdog is controlling the + system power. This node follows the power controller bindings[3]. + +[1] Documentation/devicetree/bindings/reset/reset.txt +[2] Documentation/devicetree/bindings/power/power_domain.txt +[3] Documentation/devicetree/bindings/power/power-controller.txt + +Example: + +pm { + compatible = "brcm,bcm2835-pm", "brcm,bcm2835-pm-wdt"; + #power-domain-cells = <1>; + #reset-cells = <1>; + reg = <0x7e100000 0x114>, + <0x7e00a000 0x24>; + clocks = <&clocks BCM2835_CLOCK_V3D>, + <&clocks BCM2835_CLOCK_PERI_IMAGE>, + <&clocks BCM2835_CLOCK_H264>, + <&clocks BCM2835_CLOCK_ISP>; + clock-names = "v3d", "peri_image", "h264", "isp"; + system-power-controller; +}; diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.txt index ec95705ba692..f3fa313963d5 100644 --- a/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.txt +++ b/Documentation/devicetree/bindings/soc/qcom/qcom,smd-rpm.txt @@ -23,6 +23,7 @@ resources. "qcom,rpm-msm8916" "qcom,rpm-msm8974" "qcom,rpm-msm8998" + "qcom,rpm-sdm660" "qcom,rpm-qcs404" - qcom,smd-channels: diff --git a/Documentation/devicetree/bindings/sound/adi,adau1977.txt b/Documentation/devicetree/bindings/sound/adi,adau1977.txt index e79aeef73f28..9225472c80b4 100644 --- a/Documentation/devicetree/bindings/sound/adi,adau1977.txt +++ b/Documentation/devicetree/bindings/sound/adi,adau1977.txt @@ -17,12 +17,18 @@ Required properties: Documentation/devicetree/bindings/regulator/regulator.txt Optional properties: - - reset-gpio: the reset pin for the chip, for more details consult + - reset-gpios: the reset pin for the chip, for more details consult Documentation/devicetree/bindings/gpio/gpio.txt - DVDD-supply: supply voltage for the digital core, please consult Documentation/devicetree/bindings/regulator/regulator.txt +- adi,micbias: configures the voltage setting for the MICBIAS pin. + Select 0/1/2/3/4/5/6/7/8 to specify MICBIAS voltage + 5V/5.5V/6V/6.5V/7V/7.5V/8V/8.5V/9V + If not specified the default value will be "7" meaning 8.5 Volts. + This property is only valid for the ADAU1977 + For required properties on SPI, please consult Documentation/devicetree/bindings/spi/spi-bus.txt @@ -40,7 +46,8 @@ Examples: AVDD-supply = <®ulator>; DVDD-supply = <®ulator_digital>; - reset_gpio = <&gpio 10 GPIO_ACTIVE_LOW>; + adi,micbias = <3>; + reset-gpios = <&gpio 10 GPIO_ACTIVE_LOW>; }; adau1977_i2c: adau1977@11 { @@ -50,5 +57,5 @@ Examples: AVDD-supply = <®ulator>; DVDD-supply = <®ulator_digital>; - reset_gpio = <&gpio 10 GPIO_ACTIVE_LOW>; + reset-gpios = <&gpio 10 GPIO_ACTIVE_LOW>; }; diff --git a/Documentation/devicetree/bindings/sound/ak4458.txt b/Documentation/devicetree/bindings/sound/ak4458.txt index 7839be78448d..e5820235e0d5 100644 --- a/Documentation/devicetree/bindings/sound/ak4458.txt +++ b/Documentation/devicetree/bindings/sound/ak4458.txt @@ -4,7 +4,7 @@ This device supports I2C mode. Required properties: -- compatible : "asahi-kasei,ak4458" +- compatible : "asahi-kasei,ak4458" or "asahi-kasei,ak4497" - reg : The I2C address of the device for I2C Optional properties: diff --git a/Documentation/devicetree/bindings/sound/audio-graph-scu-card.txt b/Documentation/devicetree/bindings/sound/audio-graph-scu-card.txt deleted file mode 100644 index 62d42768a00b..000000000000 --- a/Documentation/devicetree/bindings/sound/audio-graph-scu-card.txt +++ /dev/null @@ -1,123 +0,0 @@ -Audio-Graph-SCU-Card: - -Audio-Graph-SCU-Card is "Audio-Graph-Card" + "ALSA DPCM". - -It is based on common bindings for device graphs. -see ${LINUX}/Documentation/devicetree/bindings/graph.txt - -Basically, Audio-Graph-SCU-Card property is same as -Simple-Card / Simple-SCU-Card / Audio-Graph-Card. -see ${LINUX}/Documentation/devicetree/bindings/sound/simple-card.txt - ${LINUX}/Documentation/devicetree/bindings/sound/simple-scu-card.txt - ${LINUX}/Documentation/devicetree/bindings/sound/audio-graph-card.txt - -Below are same as Simple-Card / Audio-Graph-Card. - -- label -- dai-format -- frame-master -- bitclock-master -- bitclock-inversion -- frame-inversion -- dai-tdm-slot-num -- dai-tdm-slot-width -- clocks / system-clock-frequency - -Below are same as Simple-SCU-Card. - -- convert-rate -- convert-channels -- prefix -- routing - -Required properties: - -- compatible : "audio-graph-scu-card"; -- dais : list of CPU DAI port{s} - -Example 1. Sampling Rate Conversion - - sound_card { - compatible = "audio-graph-scu-card"; - - label = "sound-card"; - prefix = "codec"; - routing = "codec Playback", "DAI0 Playback", - "DAI0 Capture", "codec Capture"; - convert-rate = <48000>; - - dais = <&cpu_port>; - }; - - audio-codec { - ... - - port { - codec_endpoint: endpoint { - remote-endpoint = <&cpu_endpoint>; - }; - }; - }; - - dai-controller { - ... - cpu_port: port { - cpu_endpoint: endpoint { - remote-endpoint = <&codec_endpoint>; - - dai-format = "left_j"; - ... - }; - }; - }; - -Example 2. 2 CPU 1 Codec (Mixing) - - sound_card { - compatible = "audio-graph-scu-card"; - - label = "sound-card"; - routing = "codec Playback", "DAI0 Playback", - "codec Playback", "DAI1 Playback", - "DAI0 Capture", "codec Capture"; - - dais = <&cpu_port0 - &cpu_port1>; - }; - - audio-codec { - ... - - audio-graph-card,prefix = "codec"; - audio-graph-card,convert-rate = <48000>; - port { - codec_endpoint0: endpoint { - remote-endpoint = <&cpu_endpoint0>; - }; - codec_endpoint1: endpoint { - remote-endpoint = <&cpu_endpoint1>; - }; - }; - }; - - dai-controller { - ... - ports { - cpu_port0: port { - cpu_endpoint0: endpoint { - remote-endpoint = <&codec_endpoint0>; - - dai-format = "left_j"; - ... - }; - }; - cpu_port1: port { - cpu_endpoint1: endpoint { - remote-endpoint = <&codec_endpoint1>; - - dai-format = "left_j"; - ... - }; - }; - }; - }; diff --git a/Documentation/devicetree/bindings/sound/cs35l36.txt b/Documentation/devicetree/bindings/sound/cs35l36.txt new file mode 100644 index 000000000000..912bd162b477 --- /dev/null +++ b/Documentation/devicetree/bindings/sound/cs35l36.txt @@ -0,0 +1,168 @@ +CS35L36 Speaker Amplifier + +Required properties: + + - compatible : "cirrus,cs35l36" + + - reg : the I2C address of the device for I2C + + - VA-supply, VP-supply : power supplies for the device, + as covered in + Documentation/devicetree/bindings/regulator/regulator.txt. + + - cirrus,boost-ctl-millivolt : Boost Voltage Value. Configures the boost + converter's output voltage in mV. The range is from 2550mV to 12000mV with + increments of 50mV. + (Default) VP + + - cirrus,boost-peak-milliamp : Boost-converter peak current limit in mA. + Configures the peak current by monitoring the current through the boost FET. + Range starts at 1600mA and goes to a maximum of 4500mA with increments of + 50mA. + (Default) 4.50 Amps + + - cirrus,boost-ind-nanohenry : Inductor estimation LBST reference value. + Seeds the digital boost converter's inductor estimation block with the initial + inductance value to reference. + + 1000 = 1uH (Default) + 1200 = 1.2uH + +Optional properties: + - cirrus,multi-amp-mode : Boolean to determine if there are more than + one amplifier in the system. If more than one it is best to Hi-Z the ASP + port to prevent bus contention on the output signal + + - cirrus,boost-ctl-select : Boost conerter control source selection. + Selects the source of the BST_CTL target VBST voltage for the boost + converter to generate. + 0x00 - Control Port Value + 0x01 - Class H Tracking (Default) + 0x10 - MultiDevice Sync Value + + - cirrus,amp-pcm-inv : Boolean to determine Amplifier will invert incoming + PCM data + + - cirrus,imon-pol-inv : Boolean to determine Amplifier will invert the + polarity of outbound IMON feedback data + + - cirrus,vmon-pol-inv : Boolean to determine Amplifier will invert the + polarity of outbound VMON feedback data + + - cirrus,dcm-mode-enable : Boost converter automatic DCM Mode enable. + This enables the digital boost converter to operate in a low power + (Discontinuous Conduction) mode during low loading conditions. + + - cirrus,weak-fet-disable : Boolean : The strength of the output drivers is + reduced when operating in a Weak-FET Drive Mode and must not be used to drive + a large load. + + - cirrus,classh-wk-fet-delay : Weak-FET entry delay. Controls the delay + (in ms) before the Class H algorithm switches to the weak-FET voltage + (after the audio falls and remains below the value specified in WKFET_AMP_THLD). + + 0 = 0ms + 1 = 5ms + 2 = 10ms + 3 = 50ms + 4 = 100ms (Default) + 5 = 200ms + 6 = 500ms + 7 = 1000ms + + - cirrus,classh-weak-fet-thld-millivolt : Weak-FET amplifier drive threshold. + Configures the signal threshold at which the PWM output stage enters + weak-FET operation. The range is 50mV to 700mV in 50mV increments. + + - cirrus,temp-warn-threshold : Amplifier overtemperature warning threshold. + Configures the threshold at which the overtemperature warning condition occurs. + When the threshold is met, the overtemperature warning attenuation is applied + and the TEMP_WARN_EINT interrupt status bit is set. + If TEMP_WARN_MASK = 0, INTb is asserted. + + 0 = 105C + 1 = 115C + 2 = 125C (Default) + 3 = 135C + + - cirrus,irq-drive-select : Selects the driver type of the selected interrupt + output. + + 0 = Open-drain + 1 = Push-pull (Default) + + - cirrus,irq-gpio-select : Selects the pin to serve as the programmable + interrupt output. + + 0 = PDM_DATA / SWIRE_SD / INT (Default) + 1 = GPIO + +Optional properties for the "cirrus,vpbr-config" Sub-node + + - cirrus,vpbr-en : VBST brownout prevention enable. Configures whether the + VBST brownout prevention algorithm is enabled or disabled. + + 0 = VBST brownout prevention disabled (default) + 1 = VBST brownout prevention enabled + + See Section 7.31.1 VPBR Config for configuration options & further details + + - cirrus,vpbr-thld : Initial VPBR threshold. Configures the VP brownout + threshold voltage + + - cirrus,cirrus,vpbr-atk-rate : Attenuation attack step rate. Configures the + amount delay between consecutive volume attenuation steps when a brownout + condition is present and the VP brownout condition is in an attacking state. + + - cirrus,vpbr-atk-vol : VP brownout prevention step size. Configures the VP + brownout prevention attacking attenuation step size when operating in either + digital volume or analog gain modes. + + - cirrus,vpbr-max-attn : Maximum attenuation that the VP brownout prevention + can apply to the audio signal. + + - cirrus,vpbr-wait : Configures the delay time between a brownout condition + no longer being present and the VP brownout prevention entering an attenuation + release state. + + - cirrus,vpbr-rel-rate : Attenuation release step rate. Configures the delay + between consecutive volume attenuation release steps when a brownout condition + is not longer present and the VP brownout is in an attenuation release state. + + - cirrus,vpbr-mute-en : During the attack state, if the vpbr-max-attn value + is reached, the error condition still remains, and this bit is set, the audio + is muted. + +Example: + +cs35l36: cs35l36@40 { + compatible = "cirrus,cs35l36"; + reg = <0x40>; + VA-supply = <&dummy_vreg>; + VP-supply = <&dummy_vreg>; + reset-gpios = <&gpio0 54 0>; + interrupt-parent = <&gpio8>; + interrupts = <3 IRQ_TYPE_LEVEL_LOW>; + + cirrus,boost-ind-nanohenry = <1000>; + cirrus,boost-ctl-millivolt = <10000>; + cirrus,boost-peak-milliamp = <4500>; + cirrus,boost-ctl-select = <0x00>; + cirrus,weak-fet-delay = <0x04>; + cirrus,weak-fet-thld = <0x01>; + cirrus,temp-warn-threshold = <0x01>; + cirrus,multi-amp-mode; + cirrus,irq-drive-select = <0x01>; + cirrus,irq-gpio-select = <0x01>; + + cirrus,vpbr-config { + cirrus,vpbr-en = <0x00>; + cirrus,vpbr-thld = <0x05>; + cirrus,vpbr-atk-rate = <0x02>; + cirrus,vpbr-atk-vol = <0x01>; + cirrus,vpbr-max-attn = <0x09>; + cirrus,vpbr-wait = <0x01>; + cirrus,vpbr-rel-rate = <0x05>; + cirrus,vpbr-mute-en = <0x00>; + }; +}; diff --git a/Documentation/devicetree/bindings/sound/cs4341.txt b/Documentation/devicetree/bindings/sound/cs4341.txt new file mode 100644 index 000000000000..12b4aa8ef0db --- /dev/null +++ b/Documentation/devicetree/bindings/sound/cs4341.txt @@ -0,0 +1,22 @@ +Cirrus Logic CS4341 audio DAC + +This device supports both I2C and SPI (configured with pin strapping +on the board). + +Required properties: + - compatible: "cirrus,cs4341a" + - reg : the I2C address of the device for I2C, the chip select + number for SPI. + +For required properties on I2C-bus, please consult +Documentation/devicetree/bindings/i2c/i2c.txt +For required properties on SPI-bus, please consult +Documentation/devicetree/bindings/spi/spi-bus.txt + +Example: + codec: cs4341@0 { + #sound-dai-cells = <0>; + compatible = "cirrus,cs4341a"; + reg = <0>; + spi-max-frequency = <6000000>; + }; diff --git a/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt b/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt index b279b6072bd5..a58f79f5345c 100644 --- a/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt +++ b/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt @@ -45,6 +45,23 @@ Optional properties: - fck_parent : Should contain a valid clock name which will be used as parent for the McASP fck +Optional GPIO support: +If any McASP pin need to be used as GPIO then the McASP node must have: +... + gpio-controller + #gpio-cells = <2>; +... + +When requesting a GPIO, the first parameter is the PIN index in McASP_P* +registers. +For example to request the AXR2 pin of mcasp8: +function-gpios = <&mcasp8 2 0>; + +Or to request the ACLKR pin of mcasp8: +function-gpios = <&mcasp8 29 0>; + +For generic gpio information, please refer to bindings/gpio/gpio.txt + Example: mcasp0: mcasp0@1d00000 { diff --git a/Documentation/devicetree/bindings/sound/fsl,micfil.txt b/Documentation/devicetree/bindings/sound/fsl,micfil.txt new file mode 100644 index 000000000000..53e227b15277 --- /dev/null +++ b/Documentation/devicetree/bindings/sound/fsl,micfil.txt @@ -0,0 +1,32 @@ +NXP MICFIL Digital Audio Interface (MICFIL). + +The MICFIL digital interface provides a 16-bit audio signal from a PDM +microphone bitstream in a configurable output sampling rate. + +Required properties: + + - compatible : Compatible list, contains "fsl,imx8mm-micfil" + + - reg : Offset and length of the register set for the device. + + - interrupts : Contains the micfil interrupts. + + - clocks : Must contain an entry for each entry in clock-names. + + - clock-names : Must include the "ipg_clk" for register access and + "ipg_clk_app" for internal micfil clock. + + - dmas : Generic dma devicetree binding as described in + Documentation/devicetree/bindings/dma/dma.txt. + +Example: +micfil: micfil@30080000 { + compatible = "fsl,imx8mm-micfil"; + reg = <0x0 0x30080000 0x0 0x10000>; + interrupts = , + ; + clocks = <&clk IMX8MM_CLK_PDM_IPG>, + <&clk IMX8MM_CLK_PDM_ROOT>; + clock-names = "ipg_clk", "ipg_clk_app"; + dmas = <&sdma2 24 26 0x80000000>; +}; diff --git a/Documentation/devicetree/bindings/sound/google,cros-ec-codec.txt b/Documentation/devicetree/bindings/sound/google,cros-ec-codec.txt new file mode 100644 index 000000000000..1084f7f22eea --- /dev/null +++ b/Documentation/devicetree/bindings/sound/google,cros-ec-codec.txt @@ -0,0 +1,26 @@ +* Audio codec controlled by ChromeOS EC + +Google's ChromeOS EC codec is a digital mic codec provided by the +Embedded Controller (EC) and is controlled via a host-command interface. + +An EC codec node should only be found as a sub-node of the EC node (see +Documentation/devicetree/bindings/mfd/cros-ec.txt). + +Required properties: +- compatible: Must contain "google,cros-ec-codec" +- #sound-dai-cells: Should be 1. The cell specifies number of DAIs. +- max-dmic-gain: A number for maximum gain in dB on digital microphone. + +Example: + +cros-ec@0 { + compatible = "google,cros-ec-spi"; + + ... + + cros_ec_codec: ec-codec { + compatible = "google,cros-ec-codec"; + #sound-dai-cells = <1>; + max-dmic-gain = <43>; + }; +}; diff --git a/Documentation/devicetree/bindings/sound/ingenic,jz4725b-codec.txt b/Documentation/devicetree/bindings/sound/ingenic,jz4725b-codec.txt new file mode 100644 index 000000000000..05adc0d47b13 --- /dev/null +++ b/Documentation/devicetree/bindings/sound/ingenic,jz4725b-codec.txt @@ -0,0 +1,20 @@ +Ingenic JZ4725B codec controller + +Required properties: +- compatible : "ingenic,jz4725b-codec" +- reg : codec registers location and length +- clocks : phandle to the AIC clock. +- clock-names: must be set to "aic". +- #sound-dai-cells: Must be set to 0. + +Example: + +codec: audio-codec@100200a4 { + compatible = "ingenic,jz4725b-codec"; + reg = <0x100200a4 0x8>; + + #sound-dai-cells = <0>; + + clocks = <&cgu JZ4725B_CLK_AIC>; + clock-names = "aic"; +}; diff --git a/Documentation/devicetree/bindings/sound/ingenic,jz4740-codec.txt b/Documentation/devicetree/bindings/sound/ingenic,jz4740-codec.txt new file mode 100644 index 000000000000..1ffcade87e7b --- /dev/null +++ b/Documentation/devicetree/bindings/sound/ingenic,jz4740-codec.txt @@ -0,0 +1,20 @@ +Ingenic JZ4740 codec controller + +Required properties: +- compatible : "ingenic,jz4740-codec" +- reg : codec registers location and length +- clocks : phandle to the AIC clock. +- clock-names: must be set to "aic". +- #sound-dai-cells: Must be set to 0. + +Example: + +codec: audio-codec@10020080 { + compatible = "ingenic,jz4740-codec"; + reg = <0x10020080 0x8>; + + #sound-dai-cells = <0>; + + clocks = <&cgu JZ4740_CLK_AIC>; + clock-names = "aic"; +}; diff --git a/Documentation/devicetree/bindings/sound/mt6358.txt b/Documentation/devicetree/bindings/sound/mt6358.txt new file mode 100644 index 000000000000..5465730013a1 --- /dev/null +++ b/Documentation/devicetree/bindings/sound/mt6358.txt @@ -0,0 +1,18 @@ +Mediatek MT6358 Audio Codec + +The communication between MT6358 and SoC is through Mediatek PMIC wrapper. +For more detail, please visit Mediatek PMIC wrapper documentation. + +Must be a child node of PMIC wrapper. + +Required properties: + +- compatible : "mediatek,mt6358-sound". +- Avdd-supply : power source of AVDD + +Example: + +mt6358_snd { + compatible = "mediatek,mt6358-sound"; + Avdd-supply = <&mt6358_vaud28_reg>; +}; diff --git a/Documentation/devicetree/bindings/sound/mt8183-afe-pcm.txt b/Documentation/devicetree/bindings/sound/mt8183-afe-pcm.txt new file mode 100644 index 000000000000..396ba38619f6 --- /dev/null +++ b/Documentation/devicetree/bindings/sound/mt8183-afe-pcm.txt @@ -0,0 +1,36 @@ +Mediatek AFE PCM controller for mt8183 + +Required properties: +- compatible = "mediatek,mt68183-audio"; +- reg: register location and size +- interrupts: should contain AFE interrupt +- power-domains: should define the power domain +- clocks: Must contain an entry for each entry in clock-names +- clock-names: should have these clock names: + "infra_sys_audio_clk", + "mtkaif_26m_clk", + "top_mux_audio", + "top_mux_aud_intbus", + "top_sys_pll3_d4", + "top_clk26m_clk"; + +Example: + + afe: mt8183-afe-pcm@11220000 { + compatible = "mediatek,mt8183-audio"; + reg = <0 0x11220000 0 0x1000>; + interrupts = ; + power-domains = <&scpsys MT8183_POWER_DOMAIN_AUDIO>; + clocks = <&infrasys CLK_INFRA_AUDIO>, + <&infrasys CLK_INFRA_AUDIO_26M_BCLK>, + <&topckgen CLK_TOP_MUX_AUDIO>, + <&topckgen CLK_TOP_MUX_AUD_INTBUS>, + <&topckgen CLK_TOP_SYSPLL_D2_D4>, + <&clk26m>; + clock-names = "infra_sys_audio_clk", + "mtkaif_26m_clk", + "top_mux_audio", + "top_mux_aud_intbus", + "top_sys_pll_d2_d4", + "top_clk26m_clk"; + }; diff --git a/Documentation/devicetree/bindings/sound/mtk-btcvsd-snd.txt b/Documentation/devicetree/bindings/sound/mtk-btcvsd-snd.txt new file mode 100644 index 000000000000..679e44839b48 --- /dev/null +++ b/Documentation/devicetree/bindings/sound/mtk-btcvsd-snd.txt @@ -0,0 +1,24 @@ +Mediatek ALSA BT SCO CVSD/MSBC Driver + +Required properties: +- compatible = "mediatek,mtk-btcvsd-snd"; +- reg: register location and size of PKV and SRAM_BANK2 +- interrupts: should contain BTSCO interrupt +- mediatek,infracfg: the phandles of INFRASYS +- mediatek,offset: Array contains of register offset and mask + infra_misc_offset, + infra_conn_bt_cvsd_mask, + cvsd_mcu_read_offset, + cvsd_mcu_write_offset, + cvsd_packet_indicator_offset + +Example: + + mtk-btcvsd-snd@18000000 { + compatible = "mediatek,mtk-btcvsd-snd"; + reg=<0 0x18000000 0 0x1000>, + <0 0x18080000 0 0x8000>; + interrupts = ; + mediatek,infracfg = <&infrasys>; + mediatek,offset = <0xf00 0x800 0xfd0 0xfd4 0xfd8>; + }; diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra30-hda.txt b/Documentation/devicetree/bindings/sound/nvidia,tegra30-hda.txt index 44d27456e8a4..21cd310963b1 100644 --- a/Documentation/devicetree/bindings/sound/nvidia,tegra30-hda.txt +++ b/Documentation/devicetree/bindings/sound/nvidia,tegra30-hda.txt @@ -13,6 +13,10 @@ Required properties: See ../reset/reset.txt for details. - reset-names : Must include the following entries: hda, hda2hdmi, hda2codec_2x +Optional properties: +- nvidia,model : The user-visible name of this sound complex. Since the property + is optional, legacy boards can use default name provided in hda driver. + Example: hda@70030000 { @@ -27,4 +31,5 @@ hda@70030000 { <&tegra_car 128>, /* hda2hdmi */ <&tegra_car 111>; /* hda2codec_2x */ reset-names = "hda", "hda2hdmi", "hda2codec_2x"; + nvidia,model = "jetson-tk1-hda"; }; diff --git a/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt b/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt index fdcea3d12ee5..e7d17dda55db 100644 --- a/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt +++ b/Documentation/devicetree/bindings/sound/qcom,msm8916-wcd-analog.txt @@ -30,6 +30,7 @@ Required properties - vdd-cdc-io-supply: phandle to VDD_CDC_IO regulator DT node. - vdd-cdc-tx-rx-cx-supply: phandle to VDD_CDC_TX/RX/CX regulator DT node. - vdd-micbias-supply: phandle of VDD_MICBIAS supply's regulator DT node. + Optional Properties: - qcom,mbhc-vthreshold-low: Array of 5 threshold voltages in mV for 5 buttons detection on headset when the mbhc is powered up @@ -92,9 +93,9 @@ spmi_bus { "cdc_ear_cnp_int", "cdc_hphr_cnp_int", "cdc_hphl_cnp_int"; - VDD-CDC-IO-supply = <&pm8916_l5>; - VDD-CDC-TX-RX-CX-supply = <&pm8916_l5>; - VDD-MICBIAS-supply = <&pm8916_l13>; + vdd-cdc-io-supply = <&pm8916_l5>; + vdd-cdc-tx-rx-cx-supply = <&pm8916_l5>; + vdd-micbias-supply = <&pm8916_l13>; #sound-dai-cells = <1>; }; }; diff --git a/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt b/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt index 1d8d49e30af7..5d6ea66a863f 100644 --- a/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt +++ b/Documentation/devicetree/bindings/sound/qcom,wcd9335.txt @@ -34,12 +34,12 @@ Required properties with SLIMbus Interface: Definition: Interrupt names of WCD INTR1 and INTR2 Should be: "intr1", "intr2" -- reset-gpio: +- reset-gpios: Usage: required Value type: Definition: Reset gpio line -- qcom,ifd: +- slim-ifc-dev: Usage: required Value type: Definition: SLIM interface device @@ -104,13 +104,13 @@ Required properties with SLIMbus Interface: Value type: Definition: Must be 1 -codec@1{ +audio-codec@1{ compatible = "slim217,1a0"; reg = <1 0>; interrupts = <&msmgpio 54 IRQ_TYPE_LEVEL_HIGH>; interrupt-names = "intr2" - reset-gpio = <&msmgpio 64 0>; - qcom,ifd = <&wc9335_ifd>; + reset-gpios = <&msmgpio 64 0>; + slim-ifc-dev = <&wc9335_ifd>; clock-names = "mclk", "native"; clocks = <&rpmcc RPM_SMD_DIV_CLK1>, <&rpmcc RPM_SMD_BB_CLK1>; diff --git a/Documentation/devicetree/bindings/sound/rockchip,rk3328-codec.txt b/Documentation/devicetree/bindings/sound/rockchip,rk3328-codec.txt new file mode 100644 index 000000000000..2469588c7ccb --- /dev/null +++ b/Documentation/devicetree/bindings/sound/rockchip,rk3328-codec.txt @@ -0,0 +1,23 @@ +* Rockchip Rk3328 internal codec + +Required properties: + +- compatible: "rockchip,rk3328-codec" +- reg: physical base address of the controller and length of memory mapped + region. +- rockchip,grf: the phandle of the syscon node for GRF register. +- clocks: a list of phandle + clock-specifer pairs, one for each entry in clock-names. +- clock-names: should be "pclk". +- spk-depop-time-ms: speak depop time msec. + +Example for rk3328 internal codec: + +codec: codec@ff410000 { + compatible = "rockchip,rk3328-codec"; + reg = <0x0 0xff410000 0x0 0x1000>; + rockchip,grf = <&grf>; + clocks = <&cru PCLK_ACODEC>; + clock-names = "pclk"; + spk-depop-time-ms = 100; + status = "disabled"; +}; diff --git a/Documentation/devicetree/bindings/sound/sgtl5000.txt b/Documentation/devicetree/bindings/sound/sgtl5000.txt index 9c58f724396a..9d9ff5184939 100644 --- a/Documentation/devicetree/bindings/sound/sgtl5000.txt +++ b/Documentation/devicetree/bindings/sound/sgtl5000.txt @@ -37,6 +37,15 @@ VDDIO 1.8V 2.5V 3.3V 2 = 3.33 mA 5.74 mA 8.03 mA 3 = 4.99 mA 8.61 mA 12.05 mA +- sclk-strength: the SCLK pad strength. Possible values are: +0, 1, 2 and 3 as per the table below: + +VDDIO 1.8V 2.5V 3.3V +0 = Disable +1 = 1.66 mA 2.87 mA 4.02 mA +2 = 3.33 mA 5.74 mA 8.03 mA +3 = 4.99 mA 8.61 mA 12.05 mA + Example: sgtl5000: codec@a { diff --git a/Documentation/devicetree/bindings/sound/simple-scu-card.txt b/Documentation/devicetree/bindings/sound/simple-scu-card.txt deleted file mode 100644 index 3a2f71616cda..000000000000 --- a/Documentation/devicetree/bindings/sound/simple-scu-card.txt +++ /dev/null @@ -1,94 +0,0 @@ -ASoC Simple SCU Sound Card - -Simple SCU Sound Card is "Simple Sound Card" + "ALSA DPCM". -For example, you can use this driver if you want to exchange sampling rate convert, -Mixing, etc... - -Required properties: - -- compatible : "simple-scu-audio-card" - "renesas,rsrc-card" -Optional properties: - -- simple-audio-card,name : see simple-audio-card.txt -- simple-audio-card,cpu : see simple-audio-card.txt -- simple-audio-card,codec : see simple-audio-card.txt - -Optional subnode properties: - -- simple-audio-card,format : see simple-audio-card.txt -- simple-audio-card,frame-master : see simple-audio-card.txt -- simple-audio-card,bitclock-master : see simple-audio-card.txt -- simple-audio-card,bitclock-inversion : see simple-audio-card.txt -- simple-audio-card,frame-inversion : see simple-audio-card.txt -- simple-audio-card,convert-rate : platform specified sampling rate convert -- simple-audio-card,convert-channels : platform specified converted channel size (2 - 8 ch) -- simple-audio-card,prefix : see routing -- simple-audio-card,widgets : Please refer to widgets.txt. -- simple-audio-card,routing : A list of the connections between audio components. - Each entry is a pair of strings, the first being the connection's sink, - the second being the connection's source. Valid names for sources. - use audio-prefix if some components is using same sink/sources naming. - it can be used if compatible was "renesas,rsrc-card"; - -Required CPU/CODEC subnodes properties: - -- sound-dai : see simple-audio-card.txt - -Optional CPU/CODEC subnodes properties: - -- clocks / system-clock-frequency : see simple-audio-card.txt - -Example 1. Sampling Rate Conversion - -sound { - compatible = "simple-scu-audio-card"; - - simple-audio-card,name = "rsnd-ak4643"; - simple-audio-card,format = "left_j"; - simple-audio-card,bitclock-master = <&sndcodec>; - simple-audio-card,frame-master = <&sndcodec>; - - simple-audio-card,convert-rate = <48000>; - - simple-audio-card,prefix = "ak4642"; - simple-audio-card,routing = "ak4642 Playback", "DAI0 Playback", - "DAI0 Capture", "ak4642 Capture"; - - sndcpu: simple-audio-card,cpu { - sound-dai = <&rcar_sound>; - }; - - sndcodec: simple-audio-card,codec { - sound-dai = <&ak4643>; - system-clock-frequency = <11289600>; - }; -}; - -Example 2. 2 CPU 1 Codec (Mixing) - -sound { - compatible = "simple-scu-audio-card"; - - simple-audio-card,name = "rsnd-ak4643"; - simple-audio-card,format = "left_j"; - simple-audio-card,bitclock-master = <&dpcmcpu>; - simple-audio-card,frame-master = <&dpcmcpu>; - - simple-audio-card,routing = "ak4642 Playback", "DAI0 Playback", - "ak4642 Playback", "DAI1 Playback"; - - dpcmcpu: cpu@0 { - sound-dai = <&rcar_sound 0>; - }; - - cpu@1 { - sound-dai = <&rcar_sound 1>; - }; - - codec { - prefix = "ak4642"; - sound-dai = <&ak4643>; - clocks = <&audio_clock>; - }; -}; diff --git a/Documentation/devicetree/bindings/sound/sprd-pcm.txt b/Documentation/devicetree/bindings/sound/sprd-pcm.txt new file mode 100644 index 000000000000..4b23e84b2e57 --- /dev/null +++ b/Documentation/devicetree/bindings/sound/sprd-pcm.txt @@ -0,0 +1,23 @@ +* Spreadtrum DMA platfrom bindings + +Required properties: +- compatible: Should be "sprd,pcm-platform". +- dmas: Specify the list of DMA controller phandle and DMA request line ordered pairs. +- dma-names: Identifier string for each DMA request line in the dmas property. + These strings correspond 1:1 with the ordered pairs in dmas. + +Example: + + audio_platform:platform@0 { + compatible = "sprd,pcm-platform"; + dmas = <&agcp_dma 1 1>, <&agcp_dma 2 2>, + <&agcp_dma 3 3>, <&agcp_dma 4 4>, + <&agcp_dma 5 5>, <&agcp_dma 6 6>, + <&agcp_dma 7 7>, <&agcp_dma 8 8>, + <&agcp_dma 9 9>, <&agcp_dma 10 10>; + dma-names = "normal_p_l", "normal_p_r", + "normal_c_l", "normal_c_r", + "voice_c", "fast_p", + "loop_c", "loop_p", + "voip_c", "voip_p"; + }; diff --git a/Documentation/devicetree/bindings/sound/xlnx,audio-formatter.txt b/Documentation/devicetree/bindings/sound/xlnx,audio-formatter.txt new file mode 100644 index 000000000000..cbc93c8f4963 --- /dev/null +++ b/Documentation/devicetree/bindings/sound/xlnx,audio-formatter.txt @@ -0,0 +1,29 @@ +Device-Tree bindings for Xilinx PL audio formatter + +The IP core supports DMA, data formatting(AES<->PCM conversion) +of audio samples. + +Required properties: + - compatible: "xlnx,audio-formatter-1.0" + - interrupt-names: Names specified to list of interrupts in same + order mentioned under "interrupts". + List of supported interrupt names are: + "irq_mm2s" : interrupt from MM2S block + "irq_s2mm" : interrupt from S2MM block + - interrupts-parent: Phandle for interrupt controller. + - interrupts: List of Interrupt numbers. + - reg: Base address and size of the IP core instance. + - clock-names: List of input clocks. + Required elements: "s_axi_lite_aclk", "aud_mclk" + - clocks: Input clock specifier. Refer to common clock bindings. + +Example: + audio_ss_0_audio_formatter_0: audio_formatter@80010000 { + compatible = "xlnx,audio-formatter-1.0"; + interrupt-names = "irq_mm2s", "irq_s2mm"; + interrupt-parent = <&gic>; + interrupts = <0 104 4>, <0 105 4>; + reg = <0x0 0x80010000 0x0 0x1000>; + clock-names = "s_axi_lite_aclk", "aud_mclk"; + clocks = <&clk 71>, <&clk_wiz_1 0>; + }; diff --git a/Documentation/devicetree/bindings/sound/xlnx,spdif.txt b/Documentation/devicetree/bindings/sound/xlnx,spdif.txt new file mode 100644 index 000000000000..15c2d64d247c --- /dev/null +++ b/Documentation/devicetree/bindings/sound/xlnx,spdif.txt @@ -0,0 +1,28 @@ +Device-Tree bindings for Xilinx SPDIF IP + +The IP supports playback and capture of SPDIF audio + +Required properties: + - compatible: "xlnx,spdif-2.0" + - clock-names: List of input clocks. + Required elements: "s_axi_aclk", "aud_clk_i" + - clocks: Input clock specifier. Refer to common clock bindings. + - reg: Base address and address length of the IP core instance. + - interrupts-parent: Phandle for interrupt controller. + - interrupts: List of Interrupt numbers. + - xlnx,spdif-mode: 0 :- receiver mode + 1 :- transmitter mode + - xlnx,aud_clk_i: input audio clock value. + +Example: + spdif_0: spdif@80010000 { + clock-names = "aud_clk_i", "s_axi_aclk"; + clocks = <&misc_clk_0>, <&clk 71>; + compatible = "xlnx,spdif-2.0"; + interrupt-names = "spdif_interrupt"; + interrupt-parent = <&gic>; + interrupts = <0 91 4>; + reg = <0x0 0x80010000 0x0 0x10000>; + xlnx,spdif-mode = <1>; + xlnx,aud_clk_i = <49152913>; + }; diff --git a/Documentation/devicetree/bindings/spi/atmel-quadspi.txt b/Documentation/devicetree/bindings/spi/atmel-quadspi.txt index b93c1e2f25dd..7c40ea694352 100644 --- a/Documentation/devicetree/bindings/spi/atmel-quadspi.txt +++ b/Documentation/devicetree/bindings/spi/atmel-quadspi.txt @@ -1,14 +1,19 @@ * Atmel Quad Serial Peripheral Interface (QSPI) Required properties: -- compatible: Should be "atmel,sama5d2-qspi". +- compatible: Should be one of the following: + - "atmel,sama5d2-qspi" + - "microchip,sam9x60-qspi" - reg: Should contain the locations and lengths of the base registers and the mapped memory. - reg-names: Should contain the resource reg names: - qspi_base: configuration register address space - qspi_mmap: memory mapped address space - interrupts: Should contain the interrupt for the device. -- clocks: The phandle of the clock needed by the QSPI controller. +- clocks: Should reference the peripheral clock and the QSPI system + clock if available. +- clock-names: Should contain "pclk" for the peripheral clock and "qspick" + for the system clock when available. - #address-cells: Should be <1>. - #size-cells: Should be <0>. @@ -19,7 +24,8 @@ spi@f0020000 { reg = <0xf0020000 0x100>, <0xd0000000 0x8000000>; reg-names = "qspi_base", "qspi_mmap"; interrupts = <52 IRQ_TYPE_LEVEL_HIGH 7>; - clocks = <&spi0_clk>; + clocks = <&pmc PMC_TYPE_PERIPHERAL 52>; + clock-names = "pclk"; #address-cells = <1>; #size-cells = <0>; pinctrl-names = "default"; diff --git a/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt b/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt index e3c48b20b1a6..2d3264140cc5 100644 --- a/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt +++ b/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt @@ -10,6 +10,7 @@ Required properties: - "fsl,imx35-cspi" for SPI compatible with the one integrated on i.MX35 - "fsl,imx51-ecspi" for SPI compatible with the one integrated on i.MX51 - "fsl,imx53-ecspi" for SPI compatible with the one integrated on i.MX53 and later Soc + - "fsl,imx8mq-ecspi" for SPI compatible with the one integrated on i.MX8M - reg : Offset and length of the register set for the device - interrupts : Should contain CSPI/eCSPI interrupt - clocks : Clock specifiers for both ipg and per clocks. diff --git a/Documentation/devicetree/bindings/mtd/fsl-quadspi.txt b/Documentation/devicetree/bindings/spi/spi-fsl-qspi.txt similarity index 73% rename from Documentation/devicetree/bindings/mtd/fsl-quadspi.txt rename to Documentation/devicetree/bindings/spi/spi-fsl-qspi.txt index 483e9cfac1b1..e8f1d627d288 100644 --- a/Documentation/devicetree/bindings/mtd/fsl-quadspi.txt +++ b/Documentation/devicetree/bindings/spi/spi-fsl-qspi.txt @@ -14,15 +14,13 @@ Required properties: - clocks : The clocks needed by the QuadSPI controller - clock-names : Should contain the name of the clocks: "qspi_en" and "qspi". -Optional properties: - - fsl,qspi-has-second-chip: The controller has two buses, bus A and bus B. - Each bus can be connected with two NOR flashes. - Most of the time, each bus only has one NOR flash - connected, this is the default case. - But if there are two NOR flashes connected to the - bus, you should enable this property. - (Please check the board's schematic.) - - big-endian : That means the IP register is big endian +Required SPI slave node properties: + - reg: There are two buses (A and B) with two chip selects each. + This encodes to which bus and CS the flash is connected: + <0>: Bus A, CS 0 + <1>: Bus A, CS 1 + <2>: Bus B, CS 0 + <3>: Bus B, CS 1 Example: @@ -40,7 +38,7 @@ qspi0: quadspi@40044000 { }; }; -Example showing the usage of two SPI NOR devices: +Example showing the usage of two SPI NOR devices on bus A: &qspi2 { pinctrl-names = "default"; diff --git a/Documentation/devicetree/bindings/spi/spi-nxp-fspi.txt b/Documentation/devicetree/bindings/spi/spi-nxp-fspi.txt new file mode 100644 index 000000000000..2cd67eb727d4 --- /dev/null +++ b/Documentation/devicetree/bindings/spi/spi-nxp-fspi.txt @@ -0,0 +1,39 @@ +* NXP Flex Serial Peripheral Interface (FSPI) + +Required properties: + - compatible : Should be "nxp,lx2160a-fspi" + - reg : First contains the register location and length, + Second contains the memory mapping address and length + - reg-names : Should contain the resource reg names: + - fspi_base: configuration register address space + - fspi_mmap: memory mapped address space + - interrupts : Should contain the interrupt for the device + +Required SPI slave node properties: + - reg : There are two buses (A and B) with two chip selects each. + This encodes to which bus and CS the flash is connected: + - <0>: Bus A, CS 0 + - <1>: Bus A, CS 1 + - <2>: Bus B, CS 0 + - <3>: Bus B, CS 1 + +Example showing the usage of two SPI NOR slave devices on bus A: + +fspi0: spi@20c0000 { + compatible = "nxp,lx2160a-fspi"; + reg = <0x0 0x20c0000 0x0 0x10000>, <0x0 0x20000000 0x0 0x10000000>; + reg-names = "fspi_base", "fspi_mmap"; + interrupts = <0 25 0x4>; /* Level high type */ + clocks = <&clockgen 4 3>, <&clockgen 4 3>; + clock-names = "fspi_en", "fspi"; + + mt35xu512aba0: flash@0 { + reg = <0>; + .... + }; + + mt35xu512aba1: flash@1 { + reg = <1>; + .... + }; +}; diff --git a/Documentation/devicetree/bindings/spi/spi-sifive.txt b/Documentation/devicetree/bindings/spi/spi-sifive.txt new file mode 100644 index 000000000000..3f5c6e438972 --- /dev/null +++ b/Documentation/devicetree/bindings/spi/spi-sifive.txt @@ -0,0 +1,37 @@ +SiFive SPI controller Device Tree Bindings +------------------------------------------ + +Required properties: +- compatible : Should be "sifive,-spi" and "sifive,spi". + Supported compatible strings are: + "sifive,fu540-c000-spi" for the SiFive SPI v0 as integrated + onto the SiFive FU540 chip, and "sifive,spi0" for the SiFive + SPI v0 IP block with no chip integration tweaks. + Please refer to sifive-blocks-ip-versioning.txt for details +- reg : Physical base address and size of SPI registers map + A second (optional) range can indicate memory mapped flash +- interrupts : Must contain one entry +- interrupt-parent : Must be core interrupt controller +- clocks : Must reference the frequency given to the controller +- #address-cells : Must be '1', indicating which CS to use +- #size-cells : Must be '0' + +Optional properties: +- sifive,fifo-depth : Depth of hardware queues; defaults to 8 +- sifive,max-bits-per-word : Maximum bits per word; defaults to 8 + +SPI RTL that corresponds to the IP block version numbers can be found here: +https://github.com/sifive/sifive-blocks/tree/master/src/main/scala/devices/spi + +Example: + spi: spi@10040000 { + compatible = "sifive,fu540-c000-spi", "sifive,spi0"; + reg = <0x0 0x10040000 0x0 0x1000 0x0 0x20000000 0x0 0x10000000>; + interrupt-parent = <&plic>; + interrupts = <51>; + clocks = <&tlclk>; + #address-cells = <1>; + #size-cells = <0>; + sifive,fifo-depth = <8>; + sifive,max-bits-per-word = <8>; + }; diff --git a/Documentation/devicetree/bindings/spi/spi-sprd.txt b/Documentation/devicetree/bindings/spi/spi-sprd.txt index bad211a19da4..3c7eacce0ee3 100644 --- a/Documentation/devicetree/bindings/spi/spi-sprd.txt +++ b/Documentation/devicetree/bindings/spi/spi-sprd.txt @@ -14,6 +14,11 @@ Required properties: address on the SPI bus. Should be set to 1. - #size-cells: Should be set to 0. +Optional properties: +dma-names: Should contain names of the SPI used DMA channel. +dmas: Should contain DMA channels and DMA slave ids which the SPI used + sorted in the same order as the dma-names property. + Example: spi0: spi@70a00000{ compatible = "sprd,sc9860-spi"; @@ -21,6 +26,8 @@ spi0: spi@70a00000{ interrupts = ; clock-names = "spi", "source","enable"; clocks = <&clk_spi0>, <&ext_26m>, <&clk_ap_apb_gates 5>; + dma-names = "rx_chn", "tx_chn"; + dmas = <&apdma 11 11>, <&apdma 12 12>; #address-cells = <1>; #size-cells = <0>; }; diff --git a/Documentation/devicetree/bindings/spi/spi-stm32.txt b/Documentation/devicetree/bindings/spi/spi-stm32.txt index 1b3fa2c119d5..d82755c63eaf 100644 --- a/Documentation/devicetree/bindings/spi/spi-stm32.txt +++ b/Documentation/devicetree/bindings/spi/spi-stm32.txt @@ -7,7 +7,9 @@ from 4 to 32-bit data size. Although it can be configured as master or slave, only master is supported by the driver. Required properties: -- compatible: Must be "st,stm32h7-spi". +- compatible: Should be one of: + "st,stm32h7-spi" + "st,stm32f4-spi" - reg: Offset and length of the device's register set. - interrupts: Must contain the interrupt id. - clocks: Must contain an entry for spiclk (which feeds the internal clock @@ -30,8 +32,9 @@ Child nodes represent devices on the SPI bus See ../spi/spi-bus.txt Optional properties: -- st,spi-midi-ns: (Master Inter-Data Idleness) minimum time delay in - nanoseconds inserted between two consecutive data frames. +- st,spi-midi-ns: Only for STM32H7, (Master Inter-Data Idleness) minimum time + delay in nanoseconds inserted between two consecutive data + frames. Example: diff --git a/Documentation/devicetree/bindings/sram/milbeaut-smp-sram.txt b/Documentation/devicetree/bindings/sram/milbeaut-smp-sram.txt new file mode 100644 index 000000000000..194f6a3c1c1e --- /dev/null +++ b/Documentation/devicetree/bindings/sram/milbeaut-smp-sram.txt @@ -0,0 +1,24 @@ +Milbeaut SRAM for smp bringup + +Milbeaut SoCs use a part of the sram for the bringup of the secondary cores. +Once they get powered up in the bootloader, they stay at the specific part +of the sram. +Therefore the part needs to be added as the sub-node of mmio-sram. + +Required sub-node properties: +- compatible : should be "socionext,milbeaut-smp-sram" + +Example: + + sram: sram@0 { + compatible = "mmio-sram"; + reg = <0x0 0x10000>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0x0 0x10000>; + + smp-sram@f100 { + compatible = "socionext,milbeaut-smp-sram"; + reg = <0xf100 0x20>; + }; + }; diff --git a/Documentation/devicetree/bindings/sram/sunxi-sram.txt b/Documentation/devicetree/bindings/sram/sunxi-sram.txt index ab5a70bb9a64..380246a805f2 100644 --- a/Documentation/devicetree/bindings/sram/sunxi-sram.txt +++ b/Documentation/devicetree/bindings/sram/sunxi-sram.txt @@ -63,6 +63,7 @@ The valid sections compatible for H5 are: The valid sections compatible for H6 are: - allwinner,sun50i-h6-sram-c, allwinner,sun50i-a64-sram-c + - allwinner,sun50i-h6-sram-c1, allwinner,sun4i-a10-sram-c1 The valid sections compatible for F1C100s are: - allwinner,suniv-f1c100s-sram-d, allwinner,sun4i-a10-sram-d diff --git a/Documentation/devicetree/bindings/thermal/brcm,sr-thermal.txt b/Documentation/devicetree/bindings/thermal/brcm,sr-thermal.txt new file mode 100644 index 000000000000..3ab330219d45 --- /dev/null +++ b/Documentation/devicetree/bindings/thermal/brcm,sr-thermal.txt @@ -0,0 +1,105 @@ +* Broadcom Stingray Thermal + +This binding describes thermal sensors that is part of Stingray SoCs. + +Required properties: +- compatible : Must be "brcm,sr-thermal" +- reg : Memory where tmon data will be available. +- brcm,tmon-mask: A one cell bit mask of valid TMON sources. + Each bit represents single TMON source. +- #thermal-sensor-cells : Thermal sensor phandler +- polling-delay: Max number of milliseconds to wait between polls. +- thermal-sensors: A list of thermal sensor phandles and specifier. + specifier value is tmon ID and it should be + in correspond with brcm,tmon-mask. +- temperature: trip temperature threshold in millicelsius. + +Example: + tmons { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x0 0x8f100000 0x100>; + + tmon: tmon@0 { + compatible = "brcm,sr-thermal"; + reg = <0x0 0x40>; + brcm,tmon-mask = <0x3f>; + #thermal-sensor-cells = <1>; + }; + }; + + thermal-zones { + ihost0_thermal: ihost0-thermal { + polling-delay-passive = <0>; + polling-delay = <1000>; + thermal-sensors = <&tmon 0>; + trips { + cpu-crit { + temperature = <105000>; + hysteresis = <0>; + type = "critical"; + }; + }; + }; + ihost1_thermal: ihost1-thermal { + polling-delay-passive = <0>; + polling-delay = <1000>; + thermal-sensors = <&tmon 1>; + trips { + cpu-crit { + temperature = <105000>; + hysteresis = <0>; + type = "critical"; + }; + }; + }; + ihost2_thermal: ihost2-thermal { + polling-delay-passive = <0>; + polling-delay = <1000>; + thermal-sensors = <&tmon 2>; + trips { + cpu-crit { + temperature = <105000>; + hysteresis = <0>; + type = "critical"; + }; + }; + }; + ihost3_thermal: ihost3-thermal { + polling-delay-passive = <0>; + polling-delay = <1000>; + thermal-sensors = <&tmon 3>; + trips { + cpu-crit { + temperature = <105000>; + hysteresis = <0>; + type = "critical"; + }; + }; + }; + crmu_thermal: crmu-thermal { + polling-delay-passive = <0>; + polling-delay = <1000>; + thermal-sensors = <&tmon 4>; + trips { + cpu-crit { + temperature = <105000>; + hysteresis = <0>; + type = "critical"; + }; + }; + }; + nitro_thermal: nitro-thermal { + polling-delay-passive = <0>; + polling-delay = <1000>; + thermal-sensors = <&tmon 5>; + trips { + cpu-crit { + temperature = <105000>; + hysteresis = <0>; + type = "critical"; + }; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/thermal/mediatek-thermal.txt b/Documentation/devicetree/bindings/thermal/mediatek-thermal.txt index 41d6a443ad66..f8d7831f3974 100644 --- a/Documentation/devicetree/bindings/thermal/mediatek-thermal.txt +++ b/Documentation/devicetree/bindings/thermal/mediatek-thermal.txt @@ -13,6 +13,7 @@ Required properties: - "mediatek,mt2701-thermal" : For MT2701 family of SoCs - "mediatek,mt2712-thermal" : For MT2712 family of SoCs - "mediatek,mt7622-thermal" : For MT7622 SoC + - "mediatek,mt8183-thermal" : For MT8183 family of SoCs - reg: Address range of the thermal controller - interrupts: IRQ for the thermal controller - clocks, clock-names: Clocks needed for the thermal controller. required diff --git a/Documentation/devicetree/bindings/timer/fsl,imxgpt.txt b/Documentation/devicetree/bindings/timer/fsl,imxgpt.txt index 9809b11f7180..5d8fd5b52598 100644 --- a/Documentation/devicetree/bindings/timer/fsl,imxgpt.txt +++ b/Documentation/devicetree/bindings/timer/fsl,imxgpt.txt @@ -2,17 +2,44 @@ Freescale i.MX General Purpose Timer (GPT) Required properties: -- compatible : should be "fsl,-gpt" -- reg : Specifies base physical address and size of the registers. -- interrupts : A list of 4 interrupts; one per timer channel. -- clocks : The clocks provided by the SoC to drive the timer. +- compatible : should be one of following: + for i.MX1: + - "fsl,imx1-gpt"; + for i.MX21: + - "fsl,imx21-gpt"; + for i.MX27: + - "fsl,imx27-gpt", "fsl,imx21-gpt"; + for i.MX31: + - "fsl,imx31-gpt"; + for i.MX25: + - "fsl,imx25-gpt", "fsl,imx31-gpt"; + for i.MX50: + - "fsl,imx50-gpt", "fsl,imx31-gpt"; + for i.MX51: + - "fsl,imx51-gpt", "fsl,imx31-gpt"; + for i.MX53: + - "fsl,imx53-gpt", "fsl,imx31-gpt"; + for i.MX6Q: + - "fsl,imx6q-gpt", "fsl,imx31-gpt"; + for i.MX6DL: + - "fsl,imx6dl-gpt"; + for i.MX6SL: + - "fsl,imx6sl-gpt", "fsl,imx6dl-gpt"; + for i.MX6SX: + - "fsl,imx6sx-gpt", "fsl,imx6dl-gpt"; +- reg : specifies base physical address and size of the registers. +- interrupts : should be the gpt interrupt. +- clocks : the clocks provided by the SoC to drive the timer, must contain + an entry for each entry in clock-names. +- clock-names : must include "ipg" entry first, then "per" entry. Example: gpt1: timer@10003000 { - compatible = "fsl,imx27-gpt", "fsl,imx1-gpt"; + compatible = "fsl,imx27-gpt", "fsl,imx21-gpt"; reg = <0x10003000 0x1000>; interrupts = <26>; - clocks = <&clks 46>, <&clks 61>; + clocks = <&clks IMX27_CLK_GPT1_IPG_GATE>, + <&clks IMX27_CLK_PER1_GATE>; clock-names = "ipg", "per"; }; diff --git a/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt b/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt index 18d4d0166c76..ff7c567a7972 100644 --- a/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt +++ b/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt @@ -1,7 +1,7 @@ -Mediatek Timers +MediaTek Timers --------------- -Mediatek SoCs have two different timers on different platforms, +MediaTek SoCs have two different timers on different platforms, - GPT (General Purpose Timer) - SYST (System Timer) @@ -9,6 +9,7 @@ The proper timer will be selected automatically by driver. Required properties: - compatible should contain: + For those SoCs that use GPT * "mediatek,mt2701-timer" for MT2701 compatible timers (GPT) * "mediatek,mt6580-timer" for MT6580 compatible timers (GPT) * "mediatek,mt6589-timer" for MT6589 compatible timers (GPT) @@ -17,7 +18,11 @@ Required properties: * "mediatek,mt8135-timer" for MT8135 compatible timers (GPT) * "mediatek,mt8173-timer" for MT8173 compatible timers (GPT) * "mediatek,mt6577-timer" for MT6577 and all above compatible timers (GPT) - * "mediatek,mt6765-timer" for MT6765 compatible timers (SYST) + + For those SoCs that use SYST + * "mediatek,mt7629-timer" for MT7629 compatible timers (SYST) + * "mediatek,mt6765-timer" for MT6765 and all above compatible timers (SYST) + - reg: Should contain location and length for timer register. - clocks: Should contain system clock. diff --git a/Documentation/devicetree/bindings/timer/nvidia,tegra210-timer.txt b/Documentation/devicetree/bindings/timer/nvidia,tegra210-timer.txt new file mode 100644 index 000000000000..032cda96fe0d --- /dev/null +++ b/Documentation/devicetree/bindings/timer/nvidia,tegra210-timer.txt @@ -0,0 +1,36 @@ +NVIDIA Tegra210 timer + +The Tegra210 timer provides fourteen 29-bit timer counters and one 32-bit +timestamp counter. The TMRs run at either a fixed 1 MHz clock rate derived +from the oscillator clock (TMR0-TMR9) or directly at the oscillator clock +(TMR10-TMR13). Each TMR can be programmed to generate one-shot, periodic, +or watchdog interrupts. + +Required properties: +- compatible : "nvidia,tegra210-timer". +- reg : Specifies base physical address and size of the registers. +- interrupts : A list of 14 interrupts; one per each timer channels 0 through + 13. +- clocks : Must contain one entry, for the module clock. + See ../clocks/clock-bindings.txt for details. + +timer@60005000 { + compatible = "nvidia,tegra210-timer"; + reg = <0x0 0x60005000 0x0 0x400>; + interrupts = , + , + , + , + , + , + , + , + , + , + , + , + , + ; + clocks = <&tegra_car TEGRA210_CLK_TIMER>; + clock-names = "timer"; +}; diff --git a/Documentation/devicetree/bindings/timer/renesas,cmt.txt b/Documentation/devicetree/bindings/timer/renesas,cmt.txt index 862a80f0380a..c0594450e9ef 100644 --- a/Documentation/devicetree/bindings/timer/renesas,cmt.txt +++ b/Documentation/devicetree/bindings/timer/renesas,cmt.txt @@ -32,6 +32,8 @@ Required Properties: - "renesas,r8a77470-cmt1" for the 48-bit CMT1 device included in r8a77470. - "renesas,r8a774a1-cmt0" for the 32-bit CMT0 device included in r8a774a1. - "renesas,r8a774a1-cmt1" for the 48-bit CMT1 device included in r8a774a1. + - "renesas,r8a774c0-cmt0" for the 32-bit CMT0 device included in r8a774c0. + - "renesas,r8a774c0-cmt1" for the 48-bit CMT1 device included in r8a774c0. - "renesas,r8a7790-cmt0" for the 32-bit CMT0 device included in r8a7790. - "renesas,r8a7790-cmt1" for the 48-bit CMT1 device included in r8a7790. - "renesas,r8a7791-cmt0" for the 32-bit CMT0 device included in r8a7791. diff --git a/Documentation/devicetree/bindings/timer/renesas,tmu.txt b/Documentation/devicetree/bindings/timer/renesas,tmu.txt index 4ddff85837da..13ad07416bdd 100644 --- a/Documentation/devicetree/bindings/timer/renesas,tmu.txt +++ b/Documentation/devicetree/bindings/timer/renesas,tmu.txt @@ -10,6 +10,7 @@ Required Properties: - compatible: must contain one or more of the following: - "renesas,tmu-r8a7740" for the r8a7740 TMU + - "renesas,tmu-r8a774c0" for the r8a774C0 TMU - "renesas,tmu-r8a7778" for the r8a7778 TMU - "renesas,tmu-r8a7779" for the r8a7779 TMU - "renesas,tmu-r8a77970" for the r8a77970 TMU diff --git a/Documentation/devicetree/bindings/timer/socionext,milbeaut-timer.txt b/Documentation/devicetree/bindings/timer/socionext,milbeaut-timer.txt new file mode 100644 index 000000000000..ac44c4b67530 --- /dev/null +++ b/Documentation/devicetree/bindings/timer/socionext,milbeaut-timer.txt @@ -0,0 +1,17 @@ +Milbeaut SoCs Timer Controller + +Required properties: + +- compatible : should be "socionext,milbeaut-timer". +- reg : Specifies base physical address and size of the registers. +- interrupts : The interrupt of the first timer. +- clocks: phandle to the input clk. + +Example: + +timer { + compatible = "socionext,milbeaut-timer"; + reg = <0x1e000050 0x20> + interrupts = <0 91 4>; + clocks = <&clk 4>; +}; diff --git a/Documentation/devicetree/bindings/trivial-devices.yaml b/Documentation/devicetree/bindings/trivial-devices.yaml index cc64ec63a6ad..d79fb22bde39 100644 --- a/Documentation/devicetree/bindings/trivial-devices.yaml +++ b/Documentation/devicetree/bindings/trivial-devices.yaml @@ -322,6 +322,8 @@ properties: - ti,ads7830 # Temperature Monitoring and Fan Control - ti,amc6821 + # Temperature sensor with integrated fan control + - ti,lm96000 # I2C Touch-Screen Controller - ti,tsc2003 # Low Power Digital Temperature Sensor with SMBUS/Two Wire Serial Interface diff --git a/Documentation/devicetree/bindings/ufs/ufs-hisi.txt b/Documentation/devicetree/bindings/ufs/ufs-hisi.txt index a48c44817367..0b83df1a5418 100644 --- a/Documentation/devicetree/bindings/ufs/ufs-hisi.txt +++ b/Documentation/devicetree/bindings/ufs/ufs-hisi.txt @@ -6,9 +6,10 @@ Each UFS Host Controller should have its own node. Required properties: - compatible : compatible list, contains one of the following - "hisilicon,hi3660-ufs", "jedec,ufs-1.1" for hisi ufs - host controller present on Hi36xx chipset. + host controller present on Hi3660 chipset. + "hisilicon,hi3670-ufs", "jedec,ufs-2.1" for hisi ufs + host controller present on Hi3670 chipset. - reg : should contain UFS register address space & UFS SYS CTRL register address, -- interrupt-parent : interrupt device - interrupts : interrupt number - clocks : List of phandle and clock specifier pairs - clock-names : List of clock input name strings sorted in the same diff --git a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt index 8cf59452c675..5111e9130bc3 100644 --- a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt +++ b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt @@ -4,11 +4,14 @@ UFSHC nodes are defined to describe on-chip UFS host controllers. Each UFS controller instance should have its own node. Required properties: -- compatible : must contain "jedec,ufs-1.1" or "jedec,ufs-2.0", may - also list one or more of the following: - "qcom,msm8994-ufshc" - "qcom,msm8996-ufshc" - "qcom,ufshc" +- compatible : must contain "jedec,ufs-1.1" or "jedec,ufs-2.0" + + For Qualcomm SoCs must contain, as below, an + SoC-specific compatible along with "qcom,ufshc" and + the appropriate jedec string: + "qcom,msm8994-ufshc", "qcom,ufshc", "jedec,ufs-2.0" + "qcom,msm8996-ufshc", "qcom,ufshc", "jedec,ufs-2.0" + "qcom,sdm845-ufshc", "qcom,ufshc", "jedec,ufs-2.0" - interrupts : - reg : diff --git a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt index adae82385dd6..a254386a91ad 100644 --- a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt +++ b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt @@ -93,6 +93,7 @@ i.mx specific properties - over-current-active-low: over current signal polarity is active low. - over-current-active-high: over current signal polarity is active high. It's recommended to specify the over current polarity. +- power-active-high: power signal polarity is active high - external-vbus-divider: enables off-chip resistor divider for Vbus Example: diff --git a/Documentation/devicetree/bindings/usb/ingenic,jz4740-musb.txt b/Documentation/devicetree/bindings/usb/ingenic,jz4740-musb.txt new file mode 100644 index 000000000000..620355cee63f --- /dev/null +++ b/Documentation/devicetree/bindings/usb/ingenic,jz4740-musb.txt @@ -0,0 +1,24 @@ +Ingenic JZ4740 MUSB driver + +Required properties: + +- compatible: Must be "ingenic,jz4740-musb" +- reg: Address range of the UDC register set +- interrupts: IRQ number related to the UDC hardware +- interrupt-names: must be "mc" +- clocks: phandle to the "udc" clock +- clock-names: must be "udc" + +Example: + +udc: usb@13040000 { + compatible = "ingenic,jz4740-musb"; + reg = <0x13040000 0x10000>; + + interrupt-parent = <&intc>; + interrupts = <24>; + interrupt-names = "mc"; + + clocks = <&cgu JZ4740_CLK_UDC>; + clock-names = "udc"; +}; diff --git a/Documentation/devicetree/bindings/usb/keystone-usb.txt b/Documentation/devicetree/bindings/usb/keystone-usb.txt index f96e09f784cc..77df82e36138 100644 --- a/Documentation/devicetree/bindings/usb/keystone-usb.txt +++ b/Documentation/devicetree/bindings/usb/keystone-usb.txt @@ -3,7 +3,9 @@ TI Keystone Soc USB Controller DWC3 GLUE Required properties: - - compatible: should be "ti,keystone-dwc3". + - compatible: should be + "ti,keystone-dwc3" for Keystone 2 SoCs + "ti,am654-dwc3" for AM654 SoC - #address-cells, #size-cells : should be '1' if the device has sub-nodes with 'reg' property. - reg : Address and length of the register set for the USB subsystem on @@ -21,7 +23,7 @@ SoCs only: - clock-names: Must be "usb". -The following are mandatory properties for Keystone 2 66AK2G SoCs only: +The following are mandatory properties for 66AK2G and AM654: - power-domains: Should contain a phandle to a PM domain provider node and an args specifier containing the USB device id diff --git a/Documentation/devicetree/bindings/usb/qcom,dwc3.txt b/Documentation/devicetree/bindings/usb/qcom,dwc3.txt index 95afdcf3c337..cb695aa3fba4 100644 --- a/Documentation/devicetree/bindings/usb/qcom,dwc3.txt +++ b/Documentation/devicetree/bindings/usb/qcom,dwc3.txt @@ -4,6 +4,7 @@ Required properties: - compatible: Compatible list, contains "qcom,dwc3" "qcom,msm8996-dwc3" for msm8996 SOC. + "qcom,msm8998-dwc3" for msm8998 SOC. "qcom,sdm845-dwc3" for sdm845 SOC. - reg: Offset and length of register set for QSCRATCH wrapper - power-domains: specifies a phandle to PM domain provider node diff --git a/Documentation/devicetree/bindings/usb/renesas_usb3.txt b/Documentation/devicetree/bindings/usb/renesas_usb3.txt index d366555166d0..35039e720515 100644 --- a/Documentation/devicetree/bindings/usb/renesas_usb3.txt +++ b/Documentation/devicetree/bindings/usb/renesas_usb3.txt @@ -3,6 +3,7 @@ Renesas Electronics USB3.0 Peripheral driver Required properties: - compatible: Must contain one of the following: - "renesas,r8a774a1-usb3-peri" + - "renesas,r8a774c0-usb3-peri" - "renesas,r8a7795-usb3-peri" - "renesas,r8a7796-usb3-peri" - "renesas,r8a77965-usb3-peri" diff --git a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt index 90719f501852..d93b6a1504f2 100644 --- a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt +++ b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt @@ -7,6 +7,7 @@ Required properties: - "renesas,usbhs-r8a7744" for r8a7744 (RZ/G1N) compatible device - "renesas,usbhs-r8a7745" for r8a7745 (RZ/G1E) compatible device - "renesas,usbhs-r8a774a1" for r8a774a1 (RZ/G2M) compatible device + - "renesas,usbhs-r8a774c0" for r8a774c0 (RZ/G2E) compatible device - "renesas,usbhs-r8a7790" for r8a7790 (R-Car H2) compatible device - "renesas,usbhs-r8a7791" for r8a7791 (R-Car M2-W) compatible device - "renesas,usbhs-r8a7792" for r8a7792 (R-Car V2H) compatible device diff --git a/Documentation/devicetree/bindings/usb/usb251xb.txt b/Documentation/devicetree/bindings/usb/usb251xb.txt index 168ff819e827..17915f64b8ee 100644 --- a/Documentation/devicetree/bindings/usb/usb251xb.txt +++ b/Documentation/devicetree/bindings/usb/usb251xb.txt @@ -64,6 +64,8 @@ Optional properties : - power-on-time-ms : Specifies the time it takes from the time the host initiates the power-on sequence to a port until the port has adequate power. The value is given in ms in a 0 - 510 range (default is 100ms). + - swap-dx-lanes : Specifies the ports which will swap the differential-pair + (D+/D-), default is not-swapped. Examples: usb2512b@2c { @@ -81,4 +83,6 @@ Examples: manufacturer = "Foo"; product = "Foo-Bar"; serial = "1234567890A"; + /* correct misplaced usb connectors on port 1,2 */ + swap-dx-lanes = <1 2>; }; diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index 389508584f48..8162b0eb4b50 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -24,6 +24,7 @@ amarula Amarula Solutions amazon Amazon.com, Inc. amcc Applied Micro Circuits Corporation (APM, formally AMCC) amd Advanced Micro Devices (AMD), Inc. +amediatech Shenzhen Amediatech Technology Co., Ltd amlogic Amlogic, Inc. ampire Ampire Co., Ltd. ams AMS AG @@ -65,6 +66,7 @@ bticino Bticino International calxeda Calxeda capella Capella Microsystems, Inc cascoda Cascoda, Ltd. +catalyst Catalyst Semiconductor, Inc. cavium Cavium, Inc. cdns Cadence Design Systems Inc. cdtech CDTech(H.K.) Electronics Limited @@ -108,11 +110,13 @@ dongwoon Dongwoon Anatech dptechnics DPTechnics dragino Dragino Technology Co., Limited ea Embedded Artists AB +ebs-systart EBS-SYSTART GmbH ebv EBV Elektronik eckelmann Eckelmann AG edt Emerging Display Technologies eeti eGalax_eMPIA Technology Inc elan Elan Microelectronic Corp. +elgin Elgin S/A. embest Shenzhen Embest Technology Co., Ltd. emlid Emlid, Ltd. emmicro EM Microelectronic @@ -136,11 +140,13 @@ fairphone Fairphone B.V. faraday Faraday Technology Corporation fastrax Fastrax Oy fcs Fairchild Semiconductor +feiyang Shenzhen Fly Young Technology Co.,LTD. firefly Firefly focaltech FocalTech Systems Co.,Ltd friendlyarm Guangzhou FriendlyARM Computer Tech Co., Ltd fsl Freescale Semiconductor fujitsu Fujitsu Ltd. +gateworks Gateworks Corporation gcw Game Consoles Worldwide ge General Electric Company geekbuying GeekBuying @@ -150,6 +156,7 @@ geniatech Geniatech, Inc. giantec Giantec Semiconductor, Inc. giantplus Giantplus Technology Co., Ltd. globalscale Globalscale Technologies, Inc. +globaltop GlobalTop Technology, Inc. gmt Global Mixed-mode Technology, Inc. goodix Shenzhen Huiding Technology Co., Ltd. google Google, Inc. @@ -211,6 +218,7 @@ laird Laird PLC lantiq Lantiq Semiconductor lattice Lattice Semiconductor lego LEGO Systems A/S +lemaker Shenzhen LeMaker Technology Co., Ltd. lenovo Lenovo Group Ltd. lg LG Corporation libretech Shenzhen Libre Technology Co., Ltd @@ -273,6 +281,7 @@ nintendo Nintendo nlt NLT Technologies, Ltd. nokia Nokia nordic Nordic Semiconductor +novtech NovTech, Inc. nutsboard NutsBoard nuvoton Nuvoton Technology Corporation nvd New Vision Display @@ -297,6 +306,7 @@ ovti OmniVision Technologies oxsemi Oxford Semiconductor, Ltd. panasonic Panasonic Corporation parade Parade Technologies Inc. +pda Precision Design Associates, Inc. pericom Pericom Technology Inc. pervasive Pervasive Displays, Inc. phicomm PHICOMM Co., Ltd. @@ -304,6 +314,7 @@ phytec PHYTEC Messtechnik GmbH picochip Picochip Ltd pine64 Pine64 pixcir PIXCIR MICROELECTRONICS Co., Ltd +plantower Plantower Co., Ltd plathome Plat'Home Co., Ltd. plda PLDA plx Broadcom Corporation (formerly PLX Technology) @@ -390,6 +401,7 @@ tcl Toby Churchill Ltd. technexion TechNexion technologic Technologic Systems tempo Tempo Semiconductor +techstar Shenzhen Techstar Electronics Co., Ltd. terasic Terasic Inc. thine THine Electronics, Inc. ti Texas Instruments @@ -430,6 +442,7 @@ vot Vision Optical Technology Co., Ltd. wd Western Digital Corp. wetek WeTek Electronics, limited. wexler Wexler +whwave Shenzhen whwave Electronics, Inc. wi2wi Wi2Wi, Inc. winbond Winbond Electronics corp. winstar Winstar Display Corp. diff --git a/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt b/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt index ef2b97b72e08..9f365c1a3399 100644 --- a/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt +++ b/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt @@ -8,6 +8,7 @@ Required properties: - "renesas,r8a7743-wdt" (RZ/G1M) - "renesas,r8a7744-wdt" (RZ/G1N) - "renesas,r8a7745-wdt" (RZ/G1E) + - "renesas,r8a77470-wdt" (RZ/G1C) - "renesas,r8a774a1-wdt" (RZ/G2M) - "renesas,r8a774c0-wdt" (RZ/G2E) - "renesas,r8a7790-wdt" (R-Car H2) diff --git a/Documentation/devicetree/bindings/watchdog/st,stpmic1-wdt.txt b/Documentation/devicetree/bindings/watchdog/st,stpmic1-wdt.txt new file mode 100644 index 000000000000..7cc1407f15cb --- /dev/null +++ b/Documentation/devicetree/bindings/watchdog/st,stpmic1-wdt.txt @@ -0,0 +1,11 @@ +STMicroelectronics STPMIC1 Watchdog + +Required properties: + +- compatible : should be "st,stpmic1-wdt" + +Example: + +watchdog { + compatible = "st,stpmic1-wdt"; +}; diff --git a/Documentation/doc-guide/kernel-doc.rst b/Documentation/doc-guide/kernel-doc.rst index 51be62aa4385..f96059767c8c 100644 --- a/Documentation/doc-guide/kernel-doc.rst +++ b/Documentation/doc-guide/kernel-doc.rst @@ -490,7 +490,7 @@ doc: *title* functions: *[ function ...]* Include documentation for each *function* in *source*. - If no *function* if specified, the documentaion for all functions + If no *function* is specified, the documentation for all functions and types in the *source* will be included. Examples:: @@ -517,4 +517,17 @@ How to use kernel-doc to generate man pages If you just want to use kernel-doc to generate man pages you can do this from the kernel git tree:: - $ scripts/kernel-doc -man $(git grep -l '/\*\*' -- :^Documentation :^tools) | scripts/split-man.pl /tmp/man + $ scripts/kernel-doc -man \ + $(git grep -l '/\*\*' -- :^Documentation :^tools) \ + | scripts/split-man.pl /tmp/man + +Some older versions of git do not support some of the variants of syntax for +path exclusion. One of the following commands may work for those versions:: + + $ scripts/kernel-doc -man \ + $(git grep -l '/\*\*' -- . ':!Documentation' ':!tools') \ + | scripts/split-man.pl /tmp/man + + $ scripts/kernel-doc -man \ + $(git grep -l '/\*\*' -- . ":(exclude)Documentation" ":(exclude)tools") \ + | scripts/split-man.pl /tmp/man diff --git a/Documentation/doc-guide/sphinx.rst b/Documentation/doc-guide/sphinx.rst index 02605ee1d876..c039224b404e 100644 --- a/Documentation/doc-guide/sphinx.rst +++ b/Documentation/doc-guide/sphinx.rst @@ -27,8 +27,8 @@ Sphinx Install ============== The ReST markups currently used by the Documentation/ files are meant to be -built with ``Sphinx`` version 1.3 or upper. If you're desiring to build -PDF outputs, it is recommended to use version 1.4.6 or upper. +built with ``Sphinx`` version 1.3 or higher. If you desire to build +PDF output, it is recommended to use version 1.4.6 or higher. There's a script that checks for the Sphinx requirements. Please see :ref:`sphinx-pre-install` for further details. @@ -37,15 +37,15 @@ Most distributions are shipped with Sphinx, but its toolchain is fragile, and it is not uncommon that upgrading it or some other Python packages on your machine would cause the documentation build to break. -A way to get rid of that is to use a different version than the one shipped -on your distributions. In order to do that, it is recommended to install +A way to avoid that is to use a different version than the one shipped +with your distributions. In order to do so, it is recommended to install Sphinx inside a virtual environment, using ``virtualenv-3`` or ``virtualenv``, depending on how your distribution packaged Python 3. .. note:: #) Sphinx versions below 1.5 don't work properly with Python's - docutils version 0.13.1 or upper. So, if you're willing to use + docutils version 0.13.1 or higher. So, if you're willing to use those versions, you should run ``pip install 'docutils==0.12'``. #) It is recommended to use the RTD theme for html output. Depending @@ -82,7 +82,7 @@ output. PDF and LaTeX builds -------------------- -Such builds are currently supported only with Sphinx versions 1.4 and upper. +Such builds are currently supported only with Sphinx versions 1.4 and higher. For PDF and LaTeX output, you'll also need ``XeLaTeX`` version 3.14159265. diff --git a/Documentation/dontdiff b/Documentation/dontdiff index 2228fcc8e29f..ef25a066d952 100644 --- a/Documentation/dontdiff +++ b/Documentation/dontdiff @@ -106,7 +106,6 @@ compile.h* conf config config-* -config_data.h* config.mak config.mak.autogen conmakehash diff --git a/Documentation/driver-api/80211/mac80211.rst b/Documentation/driver-api/80211/mac80211.rst index 85a8335e80b6..eab40bcf3987 100644 --- a/Documentation/driver-api/80211/mac80211.rst +++ b/Documentation/driver-api/80211/mac80211.rst @@ -125,6 +125,9 @@ functions/definitions .. kernel-doc:: include/net/mac80211.h :functions: ieee80211_rx_status +.. kernel-doc:: include/net/mac80211.h + :functions: mac80211_rx_encoding_flags + .. kernel-doc:: include/net/mac80211.h :functions: mac80211_rx_flags diff --git a/Documentation/driver-api/component.rst b/Documentation/driver-api/component.rst new file mode 100644 index 000000000000..2da4a8f20607 --- /dev/null +++ b/Documentation/driver-api/component.rst @@ -0,0 +1,17 @@ +====================================== +Component Helper for Aggregate Drivers +====================================== + +.. kernel-doc:: drivers/base/component.c + :doc: overview + + +API +=== + +.. kernel-doc:: include/linux/component.h + :internal: + +.. kernel-doc:: drivers/base/component.c + :export: + diff --git a/Documentation/driver-api/device_link.rst b/Documentation/driver-api/device_link.rst index d6763272e747..ae1e3d0394b0 100644 --- a/Documentation/driver-api/device_link.rst +++ b/Documentation/driver-api/device_link.rst @@ -1,6 +1,9 @@ .. |struct dev_pm_domain| replace:: :c:type:`struct dev_pm_domain ` .. |struct generic_pm_domain| replace:: :c:type:`struct generic_pm_domain ` + +.. _device_link: + ============ Device links ============ @@ -25,8 +28,8 @@ suspend/resume and shutdown ordering. Device links allow representation of such dependencies in the driver core. -In its standard form, a device link combines *both* dependency types: -It guarantees correct suspend/resume and shutdown ordering between a +In its standard or *managed* form, a device link combines *both* dependency +types: It guarantees correct suspend/resume and shutdown ordering between a "supplier" device and its "consumer" devices, and it guarantees driver presence on the supplier. The consumer devices are not probed before the supplier is bound to a driver, and they're unbound before the supplier @@ -59,18 +62,24 @@ device ``->probe`` callback or a boot-time PCI quirk. Another example for an inconsistent state would be a device link that represents a driver presence dependency, yet is added from the consumer's -``->probe`` callback while the supplier hasn't probed yet: Had the driver -core known about the device link earlier, it wouldn't have probed the +``->probe`` callback while the supplier hasn't started to probe yet: Had the +driver core known about the device link earlier, it wouldn't have probed the consumer in the first place. The onus is thus on the consumer to check presence of the supplier after adding the link, and defer probing on -non-presence. - -If a device link is added in the ``->probe`` callback of the supplier or -consumer driver, it is typically deleted in its ``->remove`` callback for -symmetry. That way, if the driver is compiled as a module, the device -link is added on module load and orderly deleted on unload. The same -restrictions that apply to device link addition (e.g. exclusion of a -parallel suspend/resume transition) apply equally to deletion. +non-presence. [Note that it is valid to create a link from the consumer's +``->probe`` callback while the supplier is still probing, but the consumer must +know that the supplier is functional already at the link creation time (that is +the case, for instance, if the consumer has just acquired some resources that +would not have been available had the supplier not been functional then).] + +If a device link with ``DL_FLAG_STATELESS`` set (i.e. a stateless device link) +is added in the ``->probe`` callback of the supplier or consumer driver, it is +typically deleted in its ``->remove`` callback for symmetry. That way, if the +driver is compiled as a module, the device link is added on module load and +orderly deleted on unload. The same restrictions that apply to device link +addition (e.g. exclusion of a parallel suspend/resume transition) apply equally +to deletion. Device links with ``DL_FLAG_STATELESS`` unset (i.e. managed +device links) are deleted automatically by the driver core. Several flags may be specified on device link addition, two of which have already been mentioned above: ``DL_FLAG_STATELESS`` to express that no @@ -80,25 +89,55 @@ integration is desired. Two other flags are specifically targeted at use cases where the device link is added from the consumer's ``->probe`` callback: ``DL_FLAG_RPM_ACTIVE`` -can be specified to runtime resume the supplier upon addition of the -device link. ``DL_FLAG_AUTOREMOVE_CONSUMER`` causes the device link to be -automatically purged when the consumer fails to probe or later unbinds. -This obviates the need to explicitly delete the link in the ``->remove`` -callback or in the error path of the ``->probe`` callback. +can be specified to runtime resume the supplier and prevent it from suspending +before the consumer is runtime suspended. ``DL_FLAG_AUTOREMOVE_CONSUMER`` +causes the device link to be automatically purged when the consumer fails to +probe or later unbinds. Similarly, when the device link is added from supplier's ``->probe`` callback, ``DL_FLAG_AUTOREMOVE_SUPPLIER`` causes the device link to be automatically purged when the supplier fails to probe or later unbinds. +If neither ``DL_FLAG_AUTOREMOVE_CONSUMER`` nor ``DL_FLAG_AUTOREMOVE_SUPPLIER`` +is set, ``DL_FLAG_AUTOPROBE_CONSUMER`` can be used to request the driver core +to probe for a driver for the consumer driver on the link automatically after +a driver has been bound to the supplier device. + +Note, however, that any combinations of ``DL_FLAG_AUTOREMOVE_CONSUMER``, +``DL_FLAG_AUTOREMOVE_SUPPLIER`` or ``DL_FLAG_AUTOPROBE_CONSUMER`` with +``DL_FLAG_STATELESS`` are invalid and cannot be used. + Limitations =========== -Driver authors should be aware that a driver presence dependency (i.e. when -``DL_FLAG_STATELESS`` is not specified on link addition) may cause probing of -the consumer to be deferred indefinitely. This can become a problem if the -consumer is required to probe before a certain initcall level is reached. -Worse, if the supplier driver is blacklisted or missing, the consumer will -never be probed. +Driver authors should be aware that a driver presence dependency for managed +device links (i.e. when ``DL_FLAG_STATELESS`` is not specified on link addition) +may cause probing of the consumer to be deferred indefinitely. This can become +a problem if the consumer is required to probe before a certain initcall level +is reached. Worse, if the supplier driver is blacklisted or missing, the +consumer will never be probed. + +Moreover, managed device links cannot be deleted directly. They are deleted +by the driver core when they are not necessary any more in accordance with the +``DL_FLAG_AUTOREMOVE_CONSUMER`` and ``DL_FLAG_AUTOREMOVE_SUPPLIER`` flags. +However, stateless device links (i.e. device links with ``DL_FLAG_STATELESS`` +set) are expected to be removed by whoever called :c:func:`device_link_add()` +to add them with the help of either :c:func:`device_link_del()` or +:c:func:`device_link_remove()`. + +Passing ``DL_FLAG_RPM_ACTIVE`` along with ``DL_FLAG_STATELESS`` to +:c:func:`device_link_add()` may cause the PM-runtime usage counter of the +supplier device to remain nonzero after a subsequent invocation of either +:c:func:`device_link_del()` or :c:func:`device_link_remove()` to remove the +device link returned by it. This happens if :c:func:`device_link_add()` is +called twice in a row for the same consumer-supplier pair without removing the +link between these calls, in which case allowing the PM-runtime usage counter +of the supplier to drop on an attempt to remove the link may cause it to be +suspended while the consumer is still PM-runtime-active and that has to be +avoided. [To work around this limitation it is sufficient to let the consumer +runtime suspend at least once, or call :c:func:`pm_runtime_set_suspended()` for +it with PM-runtime disabled, between the :c:func:`device_link_add()` and +:c:func:`device_link_del()` or :c:func:`device_link_remove()` calls.] Sometimes drivers depend on optional resources. They are able to operate in a degraded mode (reduced feature set or performance) when those resources @@ -282,4 +321,4 @@ API === .. kernel-doc:: drivers/base/core.c - :functions: device_link_add device_link_del + :functions: device_link_add device_link_del device_link_remove diff --git a/Documentation/driver-api/dmaengine/client.rst b/Documentation/driver-api/dmaengine/client.rst index fbbb2831f29f..d728e50105eb 100644 --- a/Documentation/driver-api/dmaengine/client.rst +++ b/Documentation/driver-api/dmaengine/client.rst @@ -168,6 +168,13 @@ The details of these operations are: dmaengine_submit() will not start the DMA operation, it merely adds it to the pending queue. For this, see step 5, dma_async_issue_pending. + .. note:: + + After calling ``dmaengine_submit()`` the submitted transfer descriptor + (``struct dma_async_tx_descriptor``) belongs to the DMA engine. + Consequentially, the client must consider invalid the pointer to that + descriptor. + 5. Issue pending DMA requests and wait for callback notification The transactions in the pending queue can be activated by calling the diff --git a/Documentation/driver-api/gpio/board.rst b/Documentation/driver-api/gpio/board.rst index a0f294e2e250..b37f3f7b8926 100644 --- a/Documentation/driver-api/gpio/board.rst +++ b/Documentation/driver-api/gpio/board.rst @@ -204,6 +204,7 @@ between a caller and a respective .get/set_multiple() callback of a GPIO chip. In order to qualify for fast bitmap processing, the array must meet the following requirements: + - pin hardware number of array member 0 must also be 0, - pin hardware numbers of consecutive array members which belong to the same chip as member 0 does must also match their array indexes. diff --git a/Documentation/driver-api/gpio/driver.rst b/Documentation/driver-api/gpio/driver.rst index a92d8837b62b..3043167fc557 100644 --- a/Documentation/driver-api/gpio/driver.rst +++ b/Documentation/driver-api/gpio/driver.rst @@ -135,7 +135,7 @@ This configuration is normally used as a way to achieve one of two things: - inverse wire-OR on an I/O line, for example a GPIO line, making it possible for any driving stage on the line to drive it low even if any other output to the same line is simultaneously driving it high. A special case of this - is driving the SCL and SCA lines of an I2C bus, which is by definition a + is driving the SCL and SDA lines of an I2C bus, which is by definition a wire-OR bus. Both usecases require that the line be equipped with a pull-up resistor. This diff --git a/Documentation/driver-api/gpio/legacy.rst b/Documentation/driver-api/gpio/legacy.rst index 5e9421e05f1d..9bc34ba697d9 100644 --- a/Documentation/driver-api/gpio/legacy.rst +++ b/Documentation/driver-api/gpio/legacy.rst @@ -690,11 +690,10 @@ and have the following read/write attributes: and if it has been configured to generate interrupts (see the description of "edge"), you can poll(2) on that file and poll(2) will return whenever the interrupt was triggered. If - you use poll(2), set the events POLLPRI and POLLERR. If you - use select(2), set the file descriptor in exceptfds. After - poll(2) returns, either lseek(2) to the beginning of the sysfs - file and read the new value or close the file and re-open it - to read the value. + you use poll(2), set the events POLLPRI. If you use select(2), + set the file descriptor in exceptfds. After poll(2) returns, + either lseek(2) to the beginning of the sysfs file and read the + new value or close the file and re-open it to read the value. "edge" ... reads as either "none", "rising", "falling", or "both". Write these strings to select the signal edge(s) diff --git a/Documentation/driver-api/iio/buffers.rst b/Documentation/driver-api/iio/buffers.rst index 02c99a6bee18..e9036ef9f8f4 100644 --- a/Documentation/driver-api/iio/buffers.rst +++ b/Documentation/driver-api/iio/buffers.rst @@ -26,7 +26,7 @@ IIO buffer setup ================ The meta information associated with a channel reading placed in a buffer is -called a scan element . The important bits configuring scan elements are +called a scan element. The important bits configuring scan elements are exposed to userspace applications via the :file:`/sys/bus/iio/iio:device{X}/scan_elements/*` directory. This file contains attributes of the following form: diff --git a/Documentation/driver-api/iio/core.rst b/Documentation/driver-api/iio/core.rst index 9a34ae03b679..b0bc0c028cc5 100644 --- a/Documentation/driver-api/iio/core.rst +++ b/Documentation/driver-api/iio/core.rst @@ -2,8 +2,8 @@ Core elements ============= -The Industrial I/O core offers a unified framework for writing drivers for -many different types of embedded sensors. a standard interface to user space +The Industrial I/O core offers both a unified framework for writing drivers for +many different types of embedded sensors and a standard interface to user space applications manipulating sensors. The implementation can be found under :file:`drivers/iio/industrialio-*` @@ -11,7 +11,7 @@ Industrial I/O Devices ---------------------- * struct :c:type:`iio_dev` - industrial I/O device -* :c:func:`iio_device_alloc()` - alocate an :c:type:`iio_dev` from a driver +* :c:func:`iio_device_alloc()` - allocate an :c:type:`iio_dev` from a driver * :c:func:`iio_device_free()` - free an :c:type:`iio_dev` from a driver * :c:func:`iio_device_register()` - register a device with the IIO subsystem * :c:func:`iio_device_unregister()` - unregister a device from the IIO diff --git a/Documentation/driver-api/iio/hw-consumer.rst b/Documentation/driver-api/iio/hw-consumer.rst index 8facce6a6733..e0fe0b98230e 100644 --- a/Documentation/driver-api/iio/hw-consumer.rst +++ b/Documentation/driver-api/iio/hw-consumer.rst @@ -1,7 +1,7 @@ =========== HW consumer =========== -An IIO device can be directly connected to another device in hardware. in this +An IIO device can be directly connected to another device in hardware. In this case the buffers between IIO provider and IIO consumer are handled by hardware. The Industrial I/O HW consumer offers a way to bond these IIO devices without software buffer for data. The implementation can be found under diff --git a/Documentation/driver-api/iio/triggers.rst b/Documentation/driver-api/iio/triggers.rst index f89d37e7dd82..5c2156de6284 100644 --- a/Documentation/driver-api/iio/triggers.rst +++ b/Documentation/driver-api/iio/triggers.rst @@ -38,7 +38,7 @@ There are two locations in sysfs related to triggers: * :file:`/sys/bus/iio/devices/iio:device{X}/trigger/*`, this directory is created once the device supports a triggered buffer. We can associate a - trigger with our device by writing the trigger's name in the + trigger with our device by writing the trigger's name in the :file:`current_trigger` file. IIO trigger setup diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst index ab38ced66a44..c0b600ed9961 100644 --- a/Documentation/driver-api/index.rst +++ b/Documentation/driver-api/index.rst @@ -22,6 +22,7 @@ available subsections can be seen below. device_connection dma-buf device_link + component message-based sound frame-buffer diff --git a/Documentation/driver-api/pinctl.rst b/Documentation/driver-api/pinctl.rst index 6cb68d67fa75..2bb1bc484278 100644 --- a/Documentation/driver-api/pinctl.rst +++ b/Documentation/driver-api/pinctl.rst @@ -274,15 +274,6 @@ configuration in the pin controller ops like this:: .confops = &foo_pconf_ops, }; -Since some controllers have special logic for handling entire groups of pins -they can exploit the special whole-group pin control function. The -pin_config_group_set() callback is allowed to return the error code -EAGAIN, -for groups it does not want to handle, or if it just wants to do some -group-level handling and then fall through to iterate over all pins, in which -case each individual pin will be treated by separate pin_config_set() calls as -well. - - Interaction with the GPIO subsystem =================================== diff --git a/Documentation/driver-api/pm/cpuidle.rst b/Documentation/driver-api/pm/cpuidle.rst new file mode 100644 index 000000000000..5842ab621a58 --- /dev/null +++ b/Documentation/driver-api/pm/cpuidle.rst @@ -0,0 +1,282 @@ +.. |struct cpuidle_governor| replace:: :c:type:`struct cpuidle_governor ` +.. |struct cpuidle_device| replace:: :c:type:`struct cpuidle_device ` +.. |struct cpuidle_driver| replace:: :c:type:`struct cpuidle_driver ` +.. |struct cpuidle_state| replace:: :c:type:`struct cpuidle_state ` + +======================== +CPU Idle Time Management +======================== + +:: + + Copyright (c) 2019 Intel Corp., Rafael J. Wysocki + + +CPU Idle Time Management Subsystem +================================== + +Every time one of the logical CPUs in the system (the entities that appear to +fetch and execute instructions: hardware threads, if present, or processor +cores) is idle after an interrupt or equivalent wakeup event, which means that +there are no tasks to run on it except for the special "idle" task associated +with it, there is an opportunity to save energy for the processor that it +belongs to. That can be done by making the idle logical CPU stop fetching +instructions from memory and putting some of the processor's functional units +depended on by it into an idle state in which they will draw less power. + +However, there may be multiple different idle states that can be used in such a +situation in principle, so it may be necessary to find the most suitable one +(from the kernel perspective) and ask the processor to use (or "enter") that +particular idle state. That is the role of the CPU idle time management +subsystem in the kernel, called ``CPUIdle``. + +The design of ``CPUIdle`` is modular and based on the code duplication avoidance +principle, so the generic code that in principle need not depend on the hardware +or platform design details in it is separate from the code that interacts with +the hardware. It generally is divided into three categories of functional +units: *governors* responsible for selecting idle states to ask the processor +to enter, *drivers* that pass the governors' decisions on to the hardware and +the *core* providing a common framework for them. + + +CPU Idle Time Governors +======================= + +A CPU idle time (``CPUIdle``) governor is a bundle of policy code invoked when +one of the logical CPUs in the system turns out to be idle. Its role is to +select an idle state to ask the processor to enter in order to save some energy. + +``CPUIdle`` governors are generic and each of them can be used on any hardware +platform that the Linux kernel can run on. For this reason, data structures +operated on by them cannot depend on any hardware architecture or platform +design details as well. + +The governor itself is represented by a |struct cpuidle_governor| object +containing four callback pointers, :c:member:`enable`, :c:member:`disable`, +:c:member:`select`, :c:member:`reflect`, a :c:member:`rating` field described +below, and a name (string) used for identifying it. + +For the governor to be available at all, that object needs to be registered +with the ``CPUIdle`` core by calling :c:func:`cpuidle_register_governor()` with +a pointer to it passed as the argument. If successful, that causes the core to +add the governor to the global list of available governors and, if it is the +only one in the list (that is, the list was empty before) or the value of its +:c:member:`rating` field is greater than the value of that field for the +governor currently in use, or the name of the new governor was passed to the +kernel as the value of the ``cpuidle.governor=`` command line parameter, the new +governor will be used from that point on (there can be only one ``CPUIdle`` +governor in use at a time). Also, if ``cpuidle_sysfs_switch`` is passed to the +kernel in the command line, user space can choose the ``CPUIdle`` governor to +use at run time via ``sysfs``. + +Once registered, ``CPUIdle`` governors cannot be unregistered, so it is not +practical to put them into loadable kernel modules. + +The interface between ``CPUIdle`` governors and the core consists of four +callbacks: + +:c:member:`enable` + :: + + int (*enable) (struct cpuidle_driver *drv, struct cpuidle_device *dev); + + The role of this callback is to prepare the governor for handling the + (logical) CPU represented by the |struct cpuidle_device| object pointed + to by the ``dev`` argument. The |struct cpuidle_driver| object pointed + to by the ``drv`` argument represents the ``CPUIdle`` driver to be used + with that CPU (among other things, it should contain the list of + |struct cpuidle_state| objects representing idle states that the + processor holding the given CPU can be asked to enter). + + It may fail, in which case it is expected to return a negative error + code, and that causes the kernel to run the architecture-specific + default code for idle CPUs on the CPU in question instead of ``CPUIdle`` + until the ``->enable()`` governor callback is invoked for that CPU + again. + +:c:member:`disable` + :: + + void (*disable) (struct cpuidle_driver *drv, struct cpuidle_device *dev); + + Called to make the governor stop handling the (logical) CPU represented + by the |struct cpuidle_device| object pointed to by the ``dev`` + argument. + + It is expected to reverse any changes made by the ``->enable()`` + callback when it was last invoked for the target CPU, free all memory + allocated by that callback and so on. + +:c:member:`select` + :: + + int (*select) (struct cpuidle_driver *drv, struct cpuidle_device *dev, + bool *stop_tick); + + Called to select an idle state for the processor holding the (logical) + CPU represented by the |struct cpuidle_device| object pointed to by the + ``dev`` argument. + + The list of idle states to take into consideration is represented by the + :c:member:`states` array of |struct cpuidle_state| objects held by the + |struct cpuidle_driver| object pointed to by the ``drv`` argument (which + represents the ``CPUIdle`` driver to be used with the CPU at hand). The + value returned by this callback is interpreted as an index into that + array (unless it is a negative error code). + + The ``stop_tick`` argument is used to indicate whether or not to stop + the scheduler tick before asking the processor to enter the selected + idle state. When the ``bool`` variable pointed to by it (which is set + to ``true`` before invoking this callback) is cleared to ``false``, the + processor will be asked to enter the selected idle state without + stopping the scheduler tick on the given CPU (if the tick has been + stopped on that CPU already, however, it will not be restarted before + asking the processor to enter the idle state). + + This callback is mandatory (i.e. the :c:member:`select` callback pointer + in |struct cpuidle_governor| must not be ``NULL`` for the registration + of the governor to succeed). + +:c:member:`reflect` + :: + + void (*reflect) (struct cpuidle_device *dev, int index); + + Called to allow the governor to evaluate the accuracy of the idle state + selection made by the ``->select()`` callback (when it was invoked last + time) and possibly use the result of that to improve the accuracy of + idle state selections in the future. + +In addition, ``CPUIdle`` governors are required to take power management +quality of service (PM QoS) constraints on the processor wakeup latency into +account when selecting idle states. In order to obtain the current effective +PM QoS wakeup latency constraint for a given CPU, a ``CPUIdle`` governor is +expected to pass the number of the CPU to +:c:func:`cpuidle_governor_latency_req()`. Then, the governor's ``->select()`` +callback must not return the index of an indle state whose +:c:member:`exit_latency` value is greater than the number returned by that +function. + + +CPU Idle Time Management Drivers +================================ + +CPU idle time management (``CPUIdle``) drivers provide an interface between the +other parts of ``CPUIdle`` and the hardware. + +First of all, a ``CPUIdle`` driver has to populate the :c:member:`states` array +of |struct cpuidle_state| objects included in the |struct cpuidle_driver| object +representing it. Going forward this array will represent the list of available +idle states that the processor hardware can be asked to enter shared by all of +the logical CPUs handled by the given driver. + +The entries in the :c:member:`states` array are expected to be sorted by the +value of the :c:member:`target_residency` field in |struct cpuidle_state| in +the ascending order (that is, index 0 should correspond to the idle state with +the minimum value of :c:member:`target_residency`). [Since the +:c:member:`target_residency` value is expected to reflect the "depth" of the +idle state represented by the |struct cpuidle_state| object holding it, this +sorting order should be the same as the ascending sorting order by the idle +state "depth".] + +Three fields in |struct cpuidle_state| are used by the existing ``CPUIdle`` +governors for computations related to idle state selection: + +:c:member:`target_residency` + Minimum time to spend in this idle state including the time needed to + enter it (which may be substantial) to save more energy than could + be saved by staying in a shallower idle state for the same amount of + time, in microseconds. + +:c:member:`exit_latency` + Maximum time it will take a CPU asking the processor to enter this idle + state to start executing the first instruction after a wakeup from it, + in microseconds. + +:c:member:`flags` + Flags representing idle state properties. Currently, governors only use + the ``CPUIDLE_FLAG_POLLING`` flag which is set if the given object + does not represent a real idle state, but an interface to a software + "loop" that can be used in order to avoid asking the processor to enter + any idle state at all. [There are other flags used by the ``CPUIdle`` + core in special situations.] + +The :c:member:`enter` callback pointer in |struct cpuidle_state|, which must not +be ``NULL``, points to the routine to execute in order to ask the processor to +enter this particular idle state: + +:: + + void (*enter) (struct cpuidle_device *dev, struct cpuidle_driver *drv, + int index); + +The first two arguments of it point to the |struct cpuidle_device| object +representing the logical CPU running this callback and the +|struct cpuidle_driver| object representing the driver itself, respectively, +and the last one is an index of the |struct cpuidle_state| entry in the driver's +:c:member:`states` array representing the idle state to ask the processor to +enter. + +The analogous ``->enter_s2idle()`` callback in |struct cpuidle_state| is used +only for implementing the suspend-to-idle system-wide power management feature. +The difference between in and ``->enter()`` is that it must not re-enable +interrupts at any point (even temporarily) or attempt to change the states of +clock event devices, which the ``->enter()`` callback may do sometimes. + +Once the :c:member:`states` array has been populated, the number of valid +entries in it has to be stored in the :c:member:`state_count` field of the +|struct cpuidle_driver| object representing the driver. Moreover, if any +entries in the :c:member:`states` array represent "coupled" idle states (that +is, idle states that can only be asked for if multiple related logical CPUs are +idle), the :c:member:`safe_state_index` field in |struct cpuidle_driver| needs +to be the index of an idle state that is not "coupled" (that is, one that can be +asked for if only one logical CPU is idle). + +In addition to that, if the given ``CPUIdle`` driver is only going to handle a +subset of logical CPUs in the system, the :c:member:`cpumask` field in its +|struct cpuidle_driver| object must point to the set (mask) of CPUs that will be +handled by it. + +A ``CPUIdle`` driver can only be used after it has been registered. If there +are no "coupled" idle state entries in the driver's :c:member:`states` array, +that can be accomplished by passing the driver's |struct cpuidle_driver| object +to :c:func:`cpuidle_register_driver()`. Otherwise, :c:func:`cpuidle_register()` +should be used for this purpose. + +However, it also is necessary to register |struct cpuidle_device| objects for +all of the logical CPUs to be handled by the given ``CPUIdle`` driver with the +help of :c:func:`cpuidle_register_device()` after the driver has been registered +and :c:func:`cpuidle_register_driver()`, unlike :c:func:`cpuidle_register()`, +does not do that automatically. For this reason, the drivers that use +:c:func:`cpuidle_register_driver()` to register themselves must also take care +of registering the |struct cpuidle_device| objects as needed, so it is generally +recommended to use :c:func:`cpuidle_register()` for ``CPUIdle`` driver +registration in all cases. + +The registration of a |struct cpuidle_device| object causes the ``CPUIdle`` +``sysfs`` interface to be created and the governor's ``->enable()`` callback to +be invoked for the logical CPU represented by it, so it must take place after +registering the driver that will handle the CPU in question. + +``CPUIdle`` drivers and |struct cpuidle_device| objects can be unregistered +when they are not necessary any more which allows some resources associated with +them to be released. Due to dependencies between them, all of the +|struct cpuidle_device| objects representing CPUs handled by the given +``CPUIdle`` driver must be unregistered, with the help of +:c:func:`cpuidle_unregister_device()`, before calling +:c:func:`cpuidle_unregister_driver()` to unregister the driver. Alternatively, +:c:func:`cpuidle_unregister()` can be called to unregister a ``CPUIdle`` driver +along with all of the |struct cpuidle_device| objects representing CPUs handled +by it. + +``CPUIdle`` drivers can respond to runtime system configuration changes that +lead to modifications of the list of available processor idle states (which can +happen, for example, when the system's power source is switched from AC to +battery or the other way around). Upon a notification of such a change, +a ``CPUIdle`` driver is expected to call :c:func:`cpuidle_pause_and_lock()` to +turn ``CPUIdle`` off temporarily and then :c:func:`cpuidle_disable_device()` for +all of the |struct cpuidle_device| objects representing CPUs affected by that +change. Next, it can update its :c:member:`states` array in accordance with +the new configuration of the system, call :c:func:`cpuidle_enable_device()` for +all of the relevant |struct cpuidle_device| objects and invoke +:c:func:`cpuidle_resume_and_unlock()` to allow ``CPUIdle`` to be used again. diff --git a/Documentation/driver-api/pm/index.rst b/Documentation/driver-api/pm/index.rst index 2f6d0e9cf6b7..56975c6bc789 100644 --- a/Documentation/driver-api/pm/index.rst +++ b/Documentation/driver-api/pm/index.rst @@ -1,9 +1,10 @@ -======================= -Device Power Management -======================= +=============================== +CPU and Device Power Management +=============================== .. toctree:: + cpuidle devices notifiers types diff --git a/Documentation/fault-injection/fault-injection.txt b/Documentation/fault-injection/fault-injection.txt index 4d1b7b4ccfaf..a17517a083c3 100644 --- a/Documentation/fault-injection/fault-injection.txt +++ b/Documentation/fault-injection/fault-injection.txt @@ -195,7 +195,7 @@ o #include o define the fault attributes - DECLARE_FAULT_INJECTION(name); + DECLARE_FAULT_ATTR(name); Please see the definition of struct fault_attr in fault-inject.h for details. diff --git a/Documentation/filesystems/api-summary.rst b/Documentation/filesystems/api-summary.rst new file mode 100644 index 000000000000..aa51ffcfa029 --- /dev/null +++ b/Documentation/filesystems/api-summary.rst @@ -0,0 +1,150 @@ +============================= +Linux Filesystems API summary +============================= + +This section contains API-level documentation, mostly taken from the source +code itself. + +The Linux VFS +============= + +The Filesystem types +-------------------- + +.. kernel-doc:: include/linux/fs.h + :internal: + +The Directory Cache +------------------- + +.. kernel-doc:: fs/dcache.c + :export: + +.. kernel-doc:: include/linux/dcache.h + :internal: + +Inode Handling +-------------- + +.. kernel-doc:: fs/inode.c + :export: + +.. kernel-doc:: fs/bad_inode.c + :export: + +Registration and Superblocks +---------------------------- + +.. kernel-doc:: fs/super.c + :export: + +File Locks +---------- + +.. kernel-doc:: fs/locks.c + :export: + +.. kernel-doc:: fs/locks.c + :internal: + +Other Functions +--------------- + +.. kernel-doc:: fs/mpage.c + :export: + +.. kernel-doc:: fs/namei.c + :export: + +.. kernel-doc:: fs/buffer.c + :export: + +.. kernel-doc:: block/bio.c + :export: + +.. kernel-doc:: fs/seq_file.c + :export: + +.. kernel-doc:: fs/filesystems.c + :export: + +.. kernel-doc:: fs/fs-writeback.c + :export: + +.. kernel-doc:: fs/block_dev.c + :export: + +.. kernel-doc:: fs/anon_inodes.c + :export: + +.. kernel-doc:: fs/attr.c + :export: + +.. kernel-doc:: fs/d_path.c + :export: + +.. kernel-doc:: fs/dax.c + :export: + +.. kernel-doc:: fs/direct-io.c + :export: + +.. kernel-doc:: fs/file_table.c + :export: + +.. kernel-doc:: fs/libfs.c + :export: + +.. kernel-doc:: fs/posix_acl.c + :export: + +.. kernel-doc:: fs/stat.c + :export: + +.. kernel-doc:: fs/sync.c + :export: + +.. kernel-doc:: fs/xattr.c + :export: + +The proc filesystem +=================== + +sysctl interface +---------------- + +.. kernel-doc:: kernel/sysctl.c + :export: + +proc filesystem interface +------------------------- + +.. kernel-doc:: fs/proc/base.c + :internal: + +Events based on file descriptors +================================ + +.. kernel-doc:: fs/eventfd.c + :export: + +The Filesystem for Exporting Kernel Objects +=========================================== + +.. kernel-doc:: fs/sysfs/file.c + :export: + +.. kernel-doc:: fs/sysfs/symlink.c + :export: + +The debugfs filesystem +====================== + +debugfs interface +----------------- + +.. kernel-doc:: fs/debugfs/inode.c + :export: + +.. kernel-doc:: fs/debugfs/file.c + :export: diff --git a/Documentation/filesystems/binderfs.rst b/Documentation/filesystems/binderfs.rst new file mode 100644 index 000000000000..c009671f8434 --- /dev/null +++ b/Documentation/filesystems/binderfs.rst @@ -0,0 +1,68 @@ +.. SPDX-License-Identifier: GPL-2.0 + +The Android binderfs Filesystem +=============================== + +Android binderfs is a filesystem for the Android binder IPC mechanism. It +allows to dynamically add and remove binder devices at runtime. Binder devices +located in a new binderfs instance are independent of binder devices located in +other binderfs instances. Mounting a new binderfs instance makes it possible +to get a set of private binder devices. + +Mounting binderfs +----------------- + +Android binderfs can be mounted with:: + + mkdir /dev/binderfs + mount -t binder binder /dev/binderfs + +at which point a new instance of binderfs will show up at ``/dev/binderfs``. +In a fresh instance of binderfs no binder devices will be present. There will +only be a ``binder-control`` device which serves as the request handler for +binderfs. Mounting another binderfs instance at a different location will +create a new and separate instance from all other binderfs mounts. This is +identical to the behavior of e.g. ``devpts`` and ``tmpfs``. The Android +binderfs filesystem can be mounted in user namespaces. + +Options +------- +max + binderfs instances can be mounted with a limit on the number of binder + devices that can be allocated. The ``max=`` mount option serves as + a per-instance limit. If ``max=`` is set then only ```` number + of binder devices can be allocated in this binderfs instance. + +Allocating binder Devices +------------------------- + +.. _ioctl: http://man7.org/linux/man-pages/man2/ioctl.2.html + +To allocate a new binder device in a binderfs instance a request needs to be +sent through the ``binder-control`` device node. A request is sent in the form +of an `ioctl() `_. + +What a program needs to do is to open the ``binder-control`` device node and +send a ``BINDER_CTL_ADD`` request to the kernel. Users of binderfs need to +tell the kernel which name the new binder device should get. By default a name +can only contain up to ``BINDERFS_MAX_NAME`` chars including the terminating +zero byte. + +Once the request is made via an `ioctl() `_ passing a ``struct +binder_device`` with the name to the kernel it will allocate a new binder +device and return the major and minor number of the new device in the struct +(This is necessary because binderfs allocates a major device number +dynamically.). After the `ioctl() `_ returns there will be a new +binder device located under /dev/binderfs with the chosen name. + +Deleting binder Devices +----------------------- + +.. _unlink: http://man7.org/linux/man-pages/man2/unlink.2.html +.. _rm: http://man7.org/linux/man-pages/man1/rm.1.html + +Binderfs binder devices can be deleted via `unlink() `_. This means +that the `rm() `_ tool can be used to delete them. Note that the +``binder-control`` device cannot be deleted since this would make the binderfs +instance unuseable. The ``binder-control`` device will be deleted when the +binderfs instance is unmounted and all references to it have been dropped. diff --git a/Documentation/filesystems/ceph.txt b/Documentation/filesystems/ceph.txt index 1177052701e1..d2c6a5ccf0f5 100644 --- a/Documentation/filesystems/ceph.txt +++ b/Documentation/filesystems/ceph.txt @@ -22,9 +22,7 @@ In contrast to cluster filesystems like GFS, OCFS2, and GPFS that rely on symmetric access by all clients to shared block devices, Ceph separates data and metadata management into independent server clusters, similar to Lustre. Unlike Lustre, however, metadata and -storage nodes run entirely as user space daemons. Storage nodes -utilize btrfs to store data objects, leveraging its advanced features -(checksumming, metadata replication, etc.). File data is striped +storage nodes run entirely as user space daemons. File data is striped across storage nodes in large chunks to distribute workload and facilitate high throughputs. When storage nodes fail, data is re-replicated in a distributed fashion by the storage nodes themselves @@ -118,6 +116,10 @@ Mount Options of a non-responsive Ceph file system. The default is 30 seconds. + caps_max=X + Specify the maximum number of caps to hold. Unused caps are released + when number of caps exceeds the limit. The default is 0 (no limit) + rbytes When stat() is called on a directory, set st_size to 'rbytes', the summation of file sizes over all files nested beneath that @@ -160,11 +162,11 @@ More Information ================ For more information on Ceph, see the home page at - http://ceph.newdream.net/ + https://ceph.com/ The Linux kernel client source tree is available at - git://ceph.newdream.net/git/ceph-client.git + https://github.com/ceph/ceph-client.git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git and the source for the full system is at - git://ceph.newdream.net/git/ceph.git + https://github.com/ceph/ceph.git diff --git a/Documentation/filesystems/exofs.txt b/Documentation/filesystems/exofs.txt deleted file mode 100644 index 23583a136975..000000000000 --- a/Documentation/filesystems/exofs.txt +++ /dev/null @@ -1,185 +0,0 @@ -=============================================================================== -WHAT IS EXOFS? -=============================================================================== - -exofs is a file system that uses an OSD and exports the API of a normal Linux -file system. Users access exofs like any other local file system, and exofs -will in turn issue commands to the local OSD initiator. - -OSD is a new T10 command set that views storage devices not as a large/flat -array of sectors but as a container of objects, each having a length, quota, -time attributes and more. Each object is addressed by a 64bit ID, and is -contained in a 64bit ID partition. Each object has associated attributes -attached to it, which are integral part of the object and provide metadata about -the object. The standard defines some common obligatory attributes, but user -attributes can be added as needed. - -=============================================================================== -ENVIRONMENT -=============================================================================== - -To use this file system, you need to have an object store to run it on. You -may download a target from: -http://open-osd.org - -See Documentation/scsi/osd.txt for how to setup a working osd environment. - -=============================================================================== -USAGE -=============================================================================== - -1. Download and compile exofs and open-osd initiator: - You need an external Kernel source tree or kernel headers from your - distribution. (anything based on 2.6.26 or later). - - a. download open-osd including exofs source using: - [parent-directory]$ git clone git://git.open-osd.org/open-osd.git - - b. Build the library module like this: - [parent-directory]$ make -C KSRC=$(KER_DIR) open-osd - - This will build both the open-osd initiator as well as the exofs kernel - module. Use whatever parameters you compiled your Kernel with and - $(KER_DIR) above pointing to the Kernel you compile against. See the file - open-osd/top-level-Makefile for an example. - -2. Get the OSD initiator and target set up properly, and login to the target. - See Documentation/scsi/osd.txt for farther instructions. Also see ./do-osd - for example script that does all these steps. - -3. Insmod the exofs.ko module: - [exofs]$ insmod exofs.ko - -4. Make sure the directory where you want to mount exists. If not, create it. - (For example, mkdir /mnt/exofs) - -5. At first run you will need to invoke the mkfs.exofs application - - As an example, this will create the file system on: - /dev/osd0 partition ID 65536 - - mkfs.exofs --pid=65536 --format /dev/osd0 - - The --format is optional. If not specified, no OSD_FORMAT will be - performed and a clean file system will be created in the specified pid, - in the available space of the target. (Use --format=size_in_meg to limit - the total LUN space available) - - If pid already exists, it will be deleted and a new one will be created in - its place. Be careful. - - An exofs lives inside a single OSD partition. You can create multiple exofs - filesystems on the same device using multiple pids. - - (run mkfs.exofs without any parameters for usage help message) - -6. Mount the file system. - - For example, to mount /dev/osd0, partition ID 0x10000 on /mnt/exofs: - - mount -t exofs -o pid=65536 /dev/osd0 /mnt/exofs/ - -7. For reference (See do-exofs example script): - do-exofs start - an example of how to perform the above steps. - do-exofs stop - an example of how to unmount the file system. - do-exofs format - an example of how to format and mkfs a new exofs. - -8. Extra compilation flags (uncomment in fs/exofs/Kbuild): - CONFIG_EXOFS_DEBUG - for debug messages and extra checks. - -=============================================================================== -exofs mount options -=============================================================================== -Similar to any mount command: - mount -t exofs -o exofs_options /dev/osdX mount_exofs_directory - -Where: - -t exofs: specifies the exofs file system - - /dev/osdX: X is a decimal number. /dev/osdX was created after a successful - login into an OSD target. - - mount_exofs_directory: The directory to mount the file system on - - exofs specific options: Options are separated by commas (,) - pid= - The partition number to mount/create as - container of the filesystem. - This option is mandatory. integer can be - Hex by pre-pending an 0x to the number. - osdname= - Mount by a device's osdname. - osdname is usually a 36 character uuid of the - form "d2683732-c906-4ee1-9dbd-c10c27bb40df". - It is one of the device's uuid specified in the - mkfs.exofs format command. - If this option is specified then the /dev/osdX - above can be empty and is ignored. - to= - Timeout in ticks for a single command. - default is (60 * HZ) [for debugging only] - -=============================================================================== -DESIGN -=============================================================================== - -* The file system control block (AKA on-disk superblock) resides in an object - with a special ID (defined in common.h). - Information included in the file system control block is used to fill the - in-memory superblock structure at mount time. This object is created before - the file system is used by mkexofs.c. It contains information such as: - - The file system's magic number - - The next inode number to be allocated - -* Each file resides in its own object and contains the data (and it will be - possible to extend the file over multiple objects, though this has not been - implemented yet). - -* A directory is treated as a file, and essentially contains a list of pairs for files that are found in that directory. The object - IDs correspond to the files' inode numbers and will be allocated according to - a bitmap (stored in a separate object). Now they are allocated using a - counter. - -* Each file's control block (AKA on-disk inode) is stored in its object's - attributes. This applies to both regular files and other types (directories, - device files, symlinks, etc.). - -* Credentials are generated per object (inode and superblock) when they are - created in memory (read from disk or created). The credential works for all - operations and is used as long as the object remains in memory. - -* Async OSD operations are used whenever possible, but the target may execute - them out of order. The operations that concern us are create, delete, - readpage, writepage, update_inode, and truncate. The following pairs of - operations should execute in the order written, and we need to prevent them - from executing in reverse order: - - The following are handled with the OBJ_CREATED and OBJ_2BCREATED - flags. OBJ_CREATED is set when we know the object exists on the OSD - - in create's callback function, and when we successfully do a - read_inode. - OBJ_2BCREATED is set in the beginning of the create function, so we - know that we should wait. - - create/delete: delete should wait until the object is created - on the OSD. - - create/readpage: readpage should be able to return a page - full of zeroes in this case. If there was a write already - en-route (i.e. create, writepage, readpage) then the page - would be locked, and so it would really be the same as - create/writepage. - - create/writepage: if writepage is called for a sync write, it - should wait until the object is created on the OSD. - Otherwise, it should just return. - - create/truncate: truncate should wait until the object is - created on the OSD. - - create/update_inode: update_inode should wait until the - object is created on the OSD. - - Handled by VFS locks: - - readpage/delete: shouldn't happen because of page lock. - - writepage/delete: shouldn't happen because of page lock. - - readpage/writepage: shouldn't happen because of page lock. - -=============================================================================== -LICENSE/COPYRIGHT -=============================================================================== -The exofs file system is based on ext2 v0.5b (distributed with the Linux kernel -version 2.6.10). All files include the original copyrights, and the license -is GPL version 2 (only version 2, as is true for the Linux kernel). The -Linux kernel can be downloaded from www.kernel.org. diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst index 3a7b60521b94..08c23b60e016 100644 --- a/Documentation/filesystems/fscrypt.rst +++ b/Documentation/filesystems/fscrypt.rst @@ -343,9 +343,9 @@ FS_IOC_SET_ENCRYPTION_POLICY can fail with the following errors: - ``ENOTEMPTY``: the file is unencrypted and is a nonempty directory - ``ENOTTY``: this type of filesystem does not implement encryption - ``EOPNOTSUPP``: the kernel was not configured with encryption - support for this filesystem, or the filesystem superblock has not + support for filesystems, or the filesystem superblock has not had encryption enabled on it. (For example, to use encryption on an - ext4 filesystem, CONFIG_EXT4_ENCRYPTION must be enabled in the + ext4 filesystem, CONFIG_FS_ENCRYPTION must be enabled in the kernel config, and the superblock must have had the "encrypt" feature flag enabled using ``tune2fs -O encrypt`` or ``mkfs.ext4 -O encrypt``.) @@ -451,10 +451,18 @@ astute users may notice some differences in behavior: - Unencrypted files, or files encrypted with a different encryption policy (i.e. different key, modes, or flags), cannot be renamed or linked into an encrypted directory; see `Encryption policy - enforcement`_. Attempts to do so will fail with EPERM. However, + enforcement`_. Attempts to do so will fail with EXDEV. However, encrypted files can be renamed within an encrypted directory, or into an unencrypted directory. + Note: "moving" an unencrypted file into an encrypted directory, e.g. + with the `mv` program, is implemented in userspace by a copy + followed by a delete. Be aware that the original unencrypted data + may remain recoverable from free space on the disk; prefer to keep + all files encrypted from the very beginning. The `shred` program + may be used to overwrite the source files but isn't guaranteed to be + effective on all filesystems and storage devices. + - Direct I/O is not supported on encrypted files. Attempts to use direct I/O on such files will fall back to buffered I/O. @@ -541,7 +549,7 @@ not be encrypted. Except for those special files, it is forbidden to have unencrypted files, or files encrypted with a different encryption policy, in an encrypted directory tree. Attempts to link or rename such a file into -an encrypted directory will fail with EPERM. This is also enforced +an encrypted directory will fail with EXDEV. This is also enforced during ->lookup() to provide limited protection against offline attacks that try to disable or downgrade encryption in known locations where applications may later write sensitive data. It is recommended diff --git a/Documentation/filesystems/index.rst b/Documentation/filesystems/index.rst index 605befab300b..1131c34d77f6 100644 --- a/Documentation/filesystems/index.rst +++ b/Documentation/filesystems/index.rst @@ -1,382 +1,43 @@ -===================== -Linux Filesystems API -===================== +=============================== +Filesystems in the Linux kernel +=============================== -The Linux VFS -============= +This under-development manual will, some glorious day, provide +comprehensive information on how the Linux virtual filesystem (VFS) layer +works, along with the filesystems that sit below it. For now, what we have +can be found below. -The Filesystem types --------------------- - -.. kernel-doc:: include/linux/fs.h - :internal: - -The Directory Cache -------------------- - -.. kernel-doc:: fs/dcache.c - :export: - -.. kernel-doc:: include/linux/dcache.h - :internal: - -Inode Handling --------------- - -.. kernel-doc:: fs/inode.c - :export: - -.. kernel-doc:: fs/bad_inode.c - :export: - -Registration and Superblocks ----------------------------- - -.. kernel-doc:: fs/super.c - :export: - -File Locks ----------- - -.. kernel-doc:: fs/locks.c - :export: - -.. kernel-doc:: fs/locks.c - :internal: - -Other Functions ---------------- - -.. kernel-doc:: fs/mpage.c - :export: - -.. kernel-doc:: fs/namei.c - :export: - -.. kernel-doc:: fs/buffer.c - :export: - -.. kernel-doc:: block/bio.c - :export: - -.. kernel-doc:: fs/seq_file.c - :export: - -.. kernel-doc:: fs/filesystems.c - :export: - -.. kernel-doc:: fs/fs-writeback.c - :export: - -.. kernel-doc:: fs/block_dev.c - :export: - -.. kernel-doc:: fs/anon_inodes.c - :export: - -.. kernel-doc:: fs/attr.c - :export: - -.. kernel-doc:: fs/d_path.c - :export: - -.. kernel-doc:: fs/dax.c - :export: - -.. kernel-doc:: fs/direct-io.c - :export: - -.. kernel-doc:: fs/file_table.c - :export: - -.. kernel-doc:: fs/libfs.c - :export: - -.. kernel-doc:: fs/posix_acl.c - :export: - -.. kernel-doc:: fs/stat.c - :export: - -.. kernel-doc:: fs/sync.c - :export: - -.. kernel-doc:: fs/xattr.c - :export: - -The proc filesystem -=================== - -sysctl interface ----------------- - -.. kernel-doc:: kernel/sysctl.c - :export: - -proc filesystem interface -------------------------- - -.. kernel-doc:: fs/proc/base.c - :internal: - -Events based on file descriptors -================================ - -.. kernel-doc:: fs/eventfd.c - :export: - -The Filesystem for Exporting Kernel Objects -=========================================== - -.. kernel-doc:: fs/sysfs/file.c - :export: - -.. kernel-doc:: fs/sysfs/symlink.c - :export: - -The debugfs filesystem +Core VFS documentation ====================== -debugfs interface ------------------ +See these manuals for documentation about the VFS layer itself and how its +algorithms work. -.. kernel-doc:: fs/debugfs/inode.c - :export: +.. toctree:: + :maxdepth: 2 -.. kernel-doc:: fs/debugfs/file.c - :export: + path-lookup.rst + api-summary + splice -The Linux Journalling API +Filesystem support layers ========================= -Overview --------- - -Details -~~~~~~~ - -The journalling layer is easy to use. You need to first of all create a -journal_t data structure. There are two calls to do this dependent on -how you decide to allocate the physical media on which the journal -resides. The :c:func:`jbd2_journal_init_inode` call is for journals stored in -filesystem inodes, or the :c:func:`jbd2_journal_init_dev` call can be used -for journal stored on a raw device (in a continuous range of blocks). A -journal_t is a typedef for a struct pointer, so when you are finally -finished make sure you call :c:func:`jbd2_journal_destroy` on it to free up -any used kernel memory. - -Once you have got your journal_t object you need to 'mount' or load the -journal file. The journalling layer expects the space for the journal -was already allocated and initialized properly by the userspace tools. -When loading the journal you must call :c:func:`jbd2_journal_load` to process -journal contents. If the client file system detects the journal contents -does not need to be processed (or even need not have valid contents), it -may call :c:func:`jbd2_journal_wipe` to clear the journal contents before -calling :c:func:`jbd2_journal_load`. - -Note that jbd2_journal_wipe(..,0) calls -:c:func:`jbd2_journal_skip_recovery` for you if it detects any outstanding -transactions in the journal and similarly :c:func:`jbd2_journal_load` will -call :c:func:`jbd2_journal_recover` if necessary. I would advise reading -:c:func:`ext4_load_journal` in fs/ext4/super.c for examples on this stage. - -Now you can go ahead and start modifying the underlying filesystem. -Almost. - -You still need to actually journal your filesystem changes, this is done -by wrapping them into transactions. Additionally you also need to wrap -the modification of each of the buffers with calls to the journal layer, -so it knows what the modifications you are actually making are. To do -this use :c:func:`jbd2_journal_start` which returns a transaction handle. - -:c:func:`jbd2_journal_start` and its counterpart :c:func:`jbd2_journal_stop`, -which indicates the end of a transaction are nestable calls, so you can -reenter a transaction if necessary, but remember you must call -:c:func:`jbd2_journal_stop` the same number of times as -:c:func:`jbd2_journal_start` before the transaction is completed (or more -accurately leaves the update phase). Ext4/VFS makes use of this feature to -simplify handling of inode dirtying, quota support, etc. - -Inside each transaction you need to wrap the modifications to the -individual buffers (blocks). Before you start to modify a buffer you -need to call :c:func:`jbd2_journal_get_create_access()` / -:c:func:`jbd2_journal_get_write_access()` / -:c:func:`jbd2_journal_get_undo_access()` as appropriate, this allows the -journalling layer to copy the unmodified -data if it needs to. After all the buffer may be part of a previously -uncommitted transaction. At this point you are at last ready to modify a -buffer, and once you are have done so you need to call -:c:func:`jbd2_journal_dirty_metadata`. Or if you've asked for access to a -buffer you now know is now longer required to be pushed back on the -device you can call :c:func:`jbd2_journal_forget` in much the same way as you -might have used :c:func:`bforget` in the past. - -A :c:func:`jbd2_journal_flush` may be called at any time to commit and -checkpoint all your transactions. - -Then at umount time , in your :c:func:`put_super` you can then call -:c:func:`jbd2_journal_destroy` to clean up your in-core journal object. - -Unfortunately there a couple of ways the journal layer can cause a -deadlock. The first thing to note is that each task can only have a -single outstanding transaction at any one time, remember nothing commits -until the outermost :c:func:`jbd2_journal_stop`. This means you must complete -the transaction at the end of each file/inode/address etc. operation you -perform, so that the journalling system isn't re-entered on another -journal. Since transactions can't be nested/batched across differing -journals, and another filesystem other than yours (say ext4) may be -modified in a later syscall. - -The second case to bear in mind is that :c:func:`jbd2_journal_start` can block -if there isn't enough space in the journal for your transaction (based -on the passed nblocks param) - when it blocks it merely(!) needs to wait -for transactions to complete and be committed from other tasks, so -essentially we are waiting for :c:func:`jbd2_journal_stop`. So to avoid -deadlocks you must treat :c:func:`jbd2_journal_start` / -:c:func:`jbd2_journal_stop` as if they were semaphores and include them in -your semaphore ordering rules to prevent -deadlocks. Note that :c:func:`jbd2_journal_extend` has similar blocking -behaviour to :c:func:`jbd2_journal_start` so you can deadlock here just as -easily as on :c:func:`jbd2_journal_start`. - -Try to reserve the right number of blocks the first time. ;-). This will -be the maximum number of blocks you are going to touch in this -transaction. I advise having a look at at least ext4_jbd.h to see the -basis on which ext4 uses to make these decisions. - -Another wriggle to watch out for is your on-disk block allocation -strategy. Why? Because, if you do a delete, you need to ensure you -haven't reused any of the freed blocks until the transaction freeing -these blocks commits. If you reused these blocks and crash happens, -there is no way to restore the contents of the reallocated blocks at the -end of the last fully committed transaction. One simple way of doing -this is to mark blocks as free in internal in-memory block allocation -structures only after the transaction freeing them commits. Ext4 uses -journal commit callback for this purpose. - -With journal commit callbacks you can ask the journalling layer to call -a callback function when the transaction is finally committed to disk, -so that you can do some of your own management. You ask the journalling -layer for calling the callback by simply setting -``journal->j_commit_callback`` function pointer and that function is -called after each transaction commit. You can also use -``transaction->t_private_list`` for attaching entries to a transaction -that need processing when the transaction commits. - -JBD2 also provides a way to block all transaction updates via -:c:func:`jbd2_journal_lock_updates()` / -:c:func:`jbd2_journal_unlock_updates()`. Ext4 uses this when it wants a -window with a clean and stable fs for a moment. E.g. - -:: - - - jbd2_journal_lock_updates() //stop new stuff happening.. - jbd2_journal_flush() // checkpoint everything. - ..do stuff on stable fs - jbd2_journal_unlock_updates() // carry on with filesystem use. - -The opportunities for abuse and DOS attacks with this should be obvious, -if you allow unprivileged userspace to trigger codepaths containing -these calls. - -Summary -~~~~~~~ - -Using the journal is a matter of wrapping the different context changes, -being each mount, each modification (transaction) and each changed -buffer to tell the journalling layer about them. - -Data Types ----------- - -The journalling layer uses typedefs to 'hide' the concrete definitions -of the structures used. As a client of the JBD2 layer you can just rely -on the using the pointer as a magic cookie of some sort. Obviously the -hiding is not enforced as this is 'C'. - -Structures -~~~~~~~~~~ - -.. kernel-doc:: include/linux/jbd2.h - :internal: - -Functions ---------- - -The functions here are split into two groups those that affect a journal -as a whole, and those which are used to manage transactions - -Journal Level -~~~~~~~~~~~~~ - -.. kernel-doc:: fs/jbd2/journal.c - :export: - -.. kernel-doc:: fs/jbd2/recovery.c - :internal: - -Transasction Level -~~~~~~~~~~~~~~~~~~ - -.. kernel-doc:: fs/jbd2/transaction.c - -See also --------- - -`Journaling the Linux ext2fs Filesystem, LinuxExpo 98, Stephen -Tweedie `__ - -`Ext3 Journalling FileSystem, OLS 2000, Dr. Stephen -Tweedie `__ - -splice API -========== - -splice is a method for moving blocks of data around inside the kernel, -without continually transferring them between the kernel and user space. - -.. kernel-doc:: fs/splice.c - -pipes API -========= - -Pipe interfaces are all for in-kernel (builtin image) use. They are not -exported for use by modules. - -.. kernel-doc:: include/linux/pipe_fs_i.h - :internal: - -.. kernel-doc:: fs/pipe.c - -Encryption API -============== - -A library which filesystems can hook into to support transparent -encryption of files and directories. +Documentation for the support code within the filesystem layer for use in +filesystem implementations. .. toctree:: - :maxdepth: 2 - - fscrypt - -Pathname lookup -=============== - - -This write-up is based on three articles published at lwn.net: + :maxdepth: 2 -- Pathname lookup in Linux -- RCU-walk: faster pathname lookup in Linux -- A walk among the symlinks + journalling + fscrypt -Written by Neil Brown with help from Al Viro and Jon Corbet. -It has subsequently been updated to reflect changes in the kernel -including: +Filesystem-specific documentation +================================= -- per-directory parallel name lookup. +Documentation for individual filesystem types can be found here. .. toctree:: :maxdepth: 2 - path-lookup.rst + binderfs.rst diff --git a/Documentation/filesystems/journalling.rst b/Documentation/filesystems/journalling.rst new file mode 100644 index 000000000000..58ce6b395206 --- /dev/null +++ b/Documentation/filesystems/journalling.rst @@ -0,0 +1,184 @@ +The Linux Journalling API +========================= + +Overview +-------- + +Details +~~~~~~~ + +The journalling layer is easy to use. You need to first of all create a +journal_t data structure. There are two calls to do this dependent on +how you decide to allocate the physical media on which the journal +resides. The :c:func:`jbd2_journal_init_inode` call is for journals stored in +filesystem inodes, or the :c:func:`jbd2_journal_init_dev` call can be used +for journal stored on a raw device (in a continuous range of blocks). A +journal_t is a typedef for a struct pointer, so when you are finally +finished make sure you call :c:func:`jbd2_journal_destroy` on it to free up +any used kernel memory. + +Once you have got your journal_t object you need to 'mount' or load the +journal file. The journalling layer expects the space for the journal +was already allocated and initialized properly by the userspace tools. +When loading the journal you must call :c:func:`jbd2_journal_load` to process +journal contents. If the client file system detects the journal contents +does not need to be processed (or even need not have valid contents), it +may call :c:func:`jbd2_journal_wipe` to clear the journal contents before +calling :c:func:`jbd2_journal_load`. + +Note that jbd2_journal_wipe(..,0) calls +:c:func:`jbd2_journal_skip_recovery` for you if it detects any outstanding +transactions in the journal and similarly :c:func:`jbd2_journal_load` will +call :c:func:`jbd2_journal_recover` if necessary. I would advise reading +:c:func:`ext4_load_journal` in fs/ext4/super.c for examples on this stage. + +Now you can go ahead and start modifying the underlying filesystem. +Almost. + +You still need to actually journal your filesystem changes, this is done +by wrapping them into transactions. Additionally you also need to wrap +the modification of each of the buffers with calls to the journal layer, +so it knows what the modifications you are actually making are. To do +this use :c:func:`jbd2_journal_start` which returns a transaction handle. + +:c:func:`jbd2_journal_start` and its counterpart :c:func:`jbd2_journal_stop`, +which indicates the end of a transaction are nestable calls, so you can +reenter a transaction if necessary, but remember you must call +:c:func:`jbd2_journal_stop` the same number of times as +:c:func:`jbd2_journal_start` before the transaction is completed (or more +accurately leaves the update phase). Ext4/VFS makes use of this feature to +simplify handling of inode dirtying, quota support, etc. + +Inside each transaction you need to wrap the modifications to the +individual buffers (blocks). Before you start to modify a buffer you +need to call :c:func:`jbd2_journal_get_create_access()` / +:c:func:`jbd2_journal_get_write_access()` / +:c:func:`jbd2_journal_get_undo_access()` as appropriate, this allows the +journalling layer to copy the unmodified +data if it needs to. After all the buffer may be part of a previously +uncommitted transaction. At this point you are at last ready to modify a +buffer, and once you are have done so you need to call +:c:func:`jbd2_journal_dirty_metadata`. Or if you've asked for access to a +buffer you now know is now longer required to be pushed back on the +device you can call :c:func:`jbd2_journal_forget` in much the same way as you +might have used :c:func:`bforget` in the past. + +A :c:func:`jbd2_journal_flush` may be called at any time to commit and +checkpoint all your transactions. + +Then at umount time , in your :c:func:`put_super` you can then call +:c:func:`jbd2_journal_destroy` to clean up your in-core journal object. + +Unfortunately there a couple of ways the journal layer can cause a +deadlock. The first thing to note is that each task can only have a +single outstanding transaction at any one time, remember nothing commits +until the outermost :c:func:`jbd2_journal_stop`. This means you must complete +the transaction at the end of each file/inode/address etc. operation you +perform, so that the journalling system isn't re-entered on another +journal. Since transactions can't be nested/batched across differing +journals, and another filesystem other than yours (say ext4) may be +modified in a later syscall. + +The second case to bear in mind is that :c:func:`jbd2_journal_start` can block +if there isn't enough space in the journal for your transaction (based +on the passed nblocks param) - when it blocks it merely(!) needs to wait +for transactions to complete and be committed from other tasks, so +essentially we are waiting for :c:func:`jbd2_journal_stop`. So to avoid +deadlocks you must treat :c:func:`jbd2_journal_start` / +:c:func:`jbd2_journal_stop` as if they were semaphores and include them in +your semaphore ordering rules to prevent +deadlocks. Note that :c:func:`jbd2_journal_extend` has similar blocking +behaviour to :c:func:`jbd2_journal_start` so you can deadlock here just as +easily as on :c:func:`jbd2_journal_start`. + +Try to reserve the right number of blocks the first time. ;-). This will +be the maximum number of blocks you are going to touch in this +transaction. I advise having a look at at least ext4_jbd.h to see the +basis on which ext4 uses to make these decisions. + +Another wriggle to watch out for is your on-disk block allocation +strategy. Why? Because, if you do a delete, you need to ensure you +haven't reused any of the freed blocks until the transaction freeing +these blocks commits. If you reused these blocks and crash happens, +there is no way to restore the contents of the reallocated blocks at the +end of the last fully committed transaction. One simple way of doing +this is to mark blocks as free in internal in-memory block allocation +structures only after the transaction freeing them commits. Ext4 uses +journal commit callback for this purpose. + +With journal commit callbacks you can ask the journalling layer to call +a callback function when the transaction is finally committed to disk, +so that you can do some of your own management. You ask the journalling +layer for calling the callback by simply setting +``journal->j_commit_callback`` function pointer and that function is +called after each transaction commit. You can also use +``transaction->t_private_list`` for attaching entries to a transaction +that need processing when the transaction commits. + +JBD2 also provides a way to block all transaction updates via +:c:func:`jbd2_journal_lock_updates()` / +:c:func:`jbd2_journal_unlock_updates()`. Ext4 uses this when it wants a +window with a clean and stable fs for a moment. E.g. + +:: + + + jbd2_journal_lock_updates() //stop new stuff happening.. + jbd2_journal_flush() // checkpoint everything. + ..do stuff on stable fs + jbd2_journal_unlock_updates() // carry on with filesystem use. + +The opportunities for abuse and DOS attacks with this should be obvious, +if you allow unprivileged userspace to trigger codepaths containing +these calls. + +Summary +~~~~~~~ + +Using the journal is a matter of wrapping the different context changes, +being each mount, each modification (transaction) and each changed +buffer to tell the journalling layer about them. + +Data Types +---------- + +The journalling layer uses typedefs to 'hide' the concrete definitions +of the structures used. As a client of the JBD2 layer you can just rely +on the using the pointer as a magic cookie of some sort. Obviously the +hiding is not enforced as this is 'C'. + +Structures +~~~~~~~~~~ + +.. kernel-doc:: include/linux/jbd2.h + :internal: + +Functions +--------- + +The functions here are split into two groups those that affect a journal +as a whole, and those which are used to manage transactions + +Journal Level +~~~~~~~~~~~~~ + +.. kernel-doc:: fs/jbd2/journal.c + :export: + +.. kernel-doc:: fs/jbd2/recovery.c + :internal: + +Transasction Level +~~~~~~~~~~~~~~~~~~ + +.. kernel-doc:: fs/jbd2/transaction.c + +See also +-------- + +`Journaling the Linux ext2fs Filesystem, LinuxExpo 98, Stephen +Tweedie `__ + +`Ext3 Journalling FileSystem, OLS 2000, Dr. Stephen +Tweedie `__ + diff --git a/Documentation/filesystems/mount_api.txt b/Documentation/filesystems/mount_api.txt new file mode 100644 index 000000000000..944d1965e917 --- /dev/null +++ b/Documentation/filesystems/mount_api.txt @@ -0,0 +1,709 @@ + ==================== + FILESYSTEM MOUNT API + ==================== + +CONTENTS + + (1) Overview. + + (2) The filesystem context. + + (3) The filesystem context operations. + + (4) Filesystem context security. + + (5) VFS filesystem context operations. + + (6) Parameter description. + + (7) Parameter helper functions. + + +======== +OVERVIEW +======== + +The creation of new mounts is now to be done in a multistep process: + + (1) Create a filesystem context. + + (2) Parse the parameters and attach them to the context. Parameters are + expected to be passed individually from userspace, though legacy binary + parameters can also be handled. + + (3) Validate and pre-process the context. + + (4) Get or create a superblock and mountable root. + + (5) Perform the mount. + + (6) Return an error message attached to the context. + + (7) Destroy the context. + +To support this, the file_system_type struct gains a new field: + + int (*init_fs_context)(struct fs_context *fc); + +which is invoked to set up the filesystem-specific parts of a filesystem +context, including the additional space. + +Note that security initialisation is done *after* the filesystem is called so +that the namespaces may be adjusted first. + + +====================== +THE FILESYSTEM CONTEXT +====================== + +The creation and reconfiguration of a superblock is governed by a filesystem +context. This is represented by the fs_context structure: + + struct fs_context { + const struct fs_context_operations *ops; + struct file_system_type *fs_type; + void *fs_private; + struct dentry *root; + struct user_namespace *user_ns; + struct net *net_ns; + const struct cred *cred; + char *source; + char *subtype; + void *security; + void *s_fs_info; + unsigned int sb_flags; + unsigned int sb_flags_mask; + enum fs_context_purpose purpose:8; + bool sloppy:1; + bool silent:1; + ... + }; + +The fs_context fields are as follows: + + (*) const struct fs_context_operations *ops + + These are operations that can be done on a filesystem context (see + below). This must be set by the ->init_fs_context() file_system_type + operation. + + (*) struct file_system_type *fs_type + + A pointer to the file_system_type of the filesystem that is being + constructed or reconfigured. This retains a reference on the type owner. + + (*) void *fs_private + + A pointer to the file system's private data. This is where the filesystem + will need to store any options it parses. + + (*) struct dentry *root + + A pointer to the root of the mountable tree (and indirectly, the + superblock thereof). This is filled in by the ->get_tree() op. If this + is set, an active reference on root->d_sb must also be held. + + (*) struct user_namespace *user_ns + (*) struct net *net_ns + + There are a subset of the namespaces in use by the invoking process. They + retain references on each namespace. The subscribed namespaces may be + replaced by the filesystem to reflect other sources, such as the parent + mount superblock on an automount. + + (*) const struct cred *cred + + The mounter's credentials. This retains a reference on the credentials. + + (*) char *source + + This specifies the source. It may be a block device (e.g. /dev/sda1) or + something more exotic, such as the "host:/path" that NFS desires. + + (*) char *subtype + + This is a string to be added to the type displayed in /proc/mounts to + qualify it (used by FUSE). This is available for the filesystem to set if + desired. + + (*) void *security + + A place for the LSMs to hang their security data for the superblock. The + relevant security operations are described below. + + (*) void *s_fs_info + + The proposed s_fs_info for a new superblock, set in the superblock by + sget_fc(). This can be used to distinguish superblocks. + + (*) unsigned int sb_flags + (*) unsigned int sb_flags_mask + + Which bits SB_* flags are to be set/cleared in super_block::s_flags. + + (*) enum fs_context_purpose + + This indicates the purpose for which the context is intended. The + available values are: + + FS_CONTEXT_FOR_MOUNT, -- New superblock for explicit mount + FS_CONTEXT_FOR_SUBMOUNT -- New automatic submount of extant mount + FS_CONTEXT_FOR_RECONFIGURE -- Change an existing mount + + (*) bool sloppy + (*) bool silent + + These are set if the sloppy or silent mount options are given. + + [NOTE] sloppy is probably unnecessary when userspace passes over one + option at a time since the error can just be ignored if userspace deems it + to be unimportant. + + [NOTE] silent is probably redundant with sb_flags & SB_SILENT. + +The mount context is created by calling vfs_new_fs_context() or +vfs_dup_fs_context() and is destroyed with put_fs_context(). Note that the +structure is not refcounted. + +VFS, security and filesystem mount options are set individually with +vfs_parse_mount_option(). Options provided by the old mount(2) system call as +a page of data can be parsed with generic_parse_monolithic(). + +When mounting, the filesystem is allowed to take data from any of the pointers +and attach it to the superblock (or whatever), provided it clears the pointer +in the mount context. + +The filesystem is also allowed to allocate resources and pin them with the +mount context. For instance, NFS might pin the appropriate protocol version +module. + + +================================= +THE FILESYSTEM CONTEXT OPERATIONS +================================= + +The filesystem context points to a table of operations: + + struct fs_context_operations { + void (*free)(struct fs_context *fc); + int (*dup)(struct fs_context *fc, struct fs_context *src_fc); + int (*parse_param)(struct fs_context *fc, + struct struct fs_parameter *param); + int (*parse_monolithic)(struct fs_context *fc, void *data); + int (*get_tree)(struct fs_context *fc); + int (*reconfigure)(struct fs_context *fc); + }; + +These operations are invoked by the various stages of the mount procedure to +manage the filesystem context. They are as follows: + + (*) void (*free)(struct fs_context *fc); + + Called to clean up the filesystem-specific part of the filesystem context + when the context is destroyed. It should be aware that parts of the + context may have been removed and NULL'd out by ->get_tree(). + + (*) int (*dup)(struct fs_context *fc, struct fs_context *src_fc); + + Called when a filesystem context has been duplicated to duplicate the + filesystem-private data. An error may be returned to indicate failure to + do this. + + [!] Note that even if this fails, put_fs_context() will be called + immediately thereafter, so ->dup() *must* make the + filesystem-private data safe for ->free(). + + (*) int (*parse_param)(struct fs_context *fc, + struct struct fs_parameter *param); + + Called when a parameter is being added to the filesystem context. param + points to the key name and maybe a value object. VFS-specific options + will have been weeded out and fc->sb_flags updated in the context. + Security options will also have been weeded out and fc->security updated. + + The parameter can be parsed with fs_parse() and fs_lookup_param(). Note + that the source(s) are presented as parameters named "source". + + If successful, 0 should be returned or a negative error code otherwise. + + (*) int (*parse_monolithic)(struct fs_context *fc, void *data); + + Called when the mount(2) system call is invoked to pass the entire data + page in one go. If this is expected to be just a list of "key[=val]" + items separated by commas, then this may be set to NULL. + + The return value is as for ->parse_param(). + + If the filesystem (e.g. NFS) needs to examine the data first and then + finds it's the standard key-val list then it may pass it off to + generic_parse_monolithic(). + + (*) int (*get_tree)(struct fs_context *fc); + + Called to get or create the mountable root and superblock, using the + information stored in the filesystem context (reconfiguration goes via a + different vector). It may detach any resources it desires from the + filesystem context and transfer them to the superblock it creates. + + On success it should set fc->root to the mountable root and return 0. In + the case of an error, it should return a negative error code. + + The phase on a userspace-driven context will be set to only allow this to + be called once on any particular context. + + (*) int (*reconfigure)(struct fs_context *fc); + + Called to effect reconfiguration of a superblock using information stored + in the filesystem context. It may detach any resources it desires from + the filesystem context and transfer them to the superblock. The + superblock can be found from fc->root->d_sb. + + On success it should return 0. In the case of an error, it should return + a negative error code. + + [NOTE] reconfigure is intended as a replacement for remount_fs. + + +=========================== +FILESYSTEM CONTEXT SECURITY +=========================== + +The filesystem context contains a security pointer that the LSMs can use for +building up a security context for the superblock to be mounted. There are a +number of operations used by the new mount code for this purpose: + + (*) int security_fs_context_alloc(struct fs_context *fc, + struct dentry *reference); + + Called to initialise fc->security (which is preset to NULL) and allocate + any resources needed. It should return 0 on success or a negative error + code on failure. + + reference will be non-NULL if the context is being created for superblock + reconfiguration (FS_CONTEXT_FOR_RECONFIGURE) in which case it indicates + the root dentry of the superblock to be reconfigured. It will also be + non-NULL in the case of a submount (FS_CONTEXT_FOR_SUBMOUNT) in which case + it indicates the automount point. + + (*) int security_fs_context_dup(struct fs_context *fc, + struct fs_context *src_fc); + + Called to initialise fc->security (which is preset to NULL) and allocate + any resources needed. The original filesystem context is pointed to by + src_fc and may be used for reference. It should return 0 on success or a + negative error code on failure. + + (*) void security_fs_context_free(struct fs_context *fc); + + Called to clean up anything attached to fc->security. Note that the + contents may have been transferred to a superblock and the pointer cleared + during get_tree. + + (*) int security_fs_context_parse_param(struct fs_context *fc, + struct fs_parameter *param); + + Called for each mount parameter, including the source. The arguments are + as for the ->parse_param() method. It should return 0 to indicate that + the parameter should be passed on to the filesystem, 1 to indicate that + the parameter should be discarded or an error to indicate that the + parameter should be rejected. + + The value pointed to by param may be modified (if a string) or stolen + (provided the value pointer is NULL'd out). If it is stolen, 1 must be + returned to prevent it being passed to the filesystem. + + (*) int security_fs_context_validate(struct fs_context *fc); + + Called after all the options have been parsed to validate the collection + as a whole and to do any necessary allocation so that + security_sb_get_tree() and security_sb_reconfigure() are less likely to + fail. It should return 0 or a negative error code. + + In the case of reconfiguration, the target superblock will be accessible + via fc->root. + + (*) int security_sb_get_tree(struct fs_context *fc); + + Called during the mount procedure to verify that the specified superblock + is allowed to be mounted and to transfer the security data there. It + should return 0 or a negative error code. + + (*) void security_sb_reconfigure(struct fs_context *fc); + + Called to apply any reconfiguration to an LSM's context. It must not + fail. Error checking and resource allocation must be done in advance by + the parameter parsing and validation hooks. + + (*) int security_sb_mountpoint(struct fs_context *fc, struct path *mountpoint, + unsigned int mnt_flags); + + Called during the mount procedure to verify that the root dentry attached + to the context is permitted to be attached to the specified mountpoint. + It should return 0 on success or a negative error code on failure. + + +================================= +VFS FILESYSTEM CONTEXT OPERATIONS +================================= + +There are four operations for creating a filesystem context and +one for destroying a context: + + (*) struct fs_context *vfs_new_fs_context(struct file_system_type *fs_type, + struct dentry *reference, + unsigned int sb_flags, + unsigned int sb_flags_mask, + enum fs_context_purpose purpose); + + Create a filesystem context for a given filesystem type and purpose. This + allocates the filesystem context, sets the superblock flags, initialises + the security and calls fs_type->init_fs_context() to initialise the + filesystem private data. + + reference can be NULL or it may indicate the root dentry of a superblock + that is going to be reconfigured (FS_CONTEXT_FOR_RECONFIGURE) or + the automount point that triggered a submount (FS_CONTEXT_FOR_SUBMOUNT). + This is provided as a source of namespace information. + + (*) struct fs_context *vfs_dup_fs_context(struct fs_context *src_fc); + + Duplicate a filesystem context, copying any options noted and duplicating + or additionally referencing any resources held therein. This is available + for use where a filesystem has to get a mount within a mount, such as NFS4 + does by internally mounting the root of the target server and then doing a + private pathwalk to the target directory. + + The purpose in the new context is inherited from the old one. + + (*) void put_fs_context(struct fs_context *fc); + + Destroy a filesystem context, releasing any resources it holds. This + calls the ->free() operation. This is intended to be called by anyone who + created a filesystem context. + + [!] filesystem contexts are not refcounted, so this causes unconditional + destruction. + +In all the above operations, apart from the put op, the return is a mount +context pointer or a negative error code. + +For the remaining operations, if an error occurs, a negative error code will be +returned. + + (*) int vfs_get_tree(struct fs_context *fc); + + Get or create the mountable root and superblock, using the parameters in + the filesystem context to select/configure the superblock. This invokes + the ->validate() op and then the ->get_tree() op. + + [NOTE] ->validate() could perhaps be rolled into ->get_tree() and + ->reconfigure(). + + (*) struct vfsmount *vfs_create_mount(struct fs_context *fc); + + Create a mount given the parameters in the specified filesystem context. + Note that this does not attach the mount to anything. + + (*) int vfs_parse_fs_param(struct fs_context *fc, + struct fs_parameter *param); + + Supply a single mount parameter to the filesystem context. This include + the specification of the source/device which is specified as the "source" + parameter (which may be specified multiple times if the filesystem + supports that). + + param specifies the parameter key name and the value. The parameter is + first checked to see if it corresponds to a standard mount flag (in which + case it is used to set an SB_xxx flag and consumed) or a security option + (in which case the LSM consumes it) before it is passed on to the + filesystem. + + The parameter value is typed and can be one of: + + fs_value_is_flag, Parameter not given a value. + fs_value_is_string, Value is a string + fs_value_is_blob, Value is a binary blob + fs_value_is_filename, Value is a filename* + dirfd + fs_value_is_filename_empty, Value is a filename* + dirfd + AT_EMPTY_PATH + fs_value_is_file, Value is an open file (file*) + + If there is a value, that value is stored in a union in the struct in one + of param->{string,blob,name,file}. Note that the function may steal and + clear the pointer, but then becomes responsible for disposing of the + object. + + (*) int vfs_parse_fs_string(struct fs_context *fc, char *key, + const char *value, size_t v_size); + + A wrapper around vfs_parse_fs_param() that just passes a constant string. + + (*) int generic_parse_monolithic(struct fs_context *fc, void *data); + + Parse a sys_mount() data page, assuming the form to be a text list + consisting of key[=val] options separated by commas. Each item in the + list is passed to vfs_mount_option(). This is the default when the + ->parse_monolithic() operation is NULL. + + +===================== +PARAMETER DESCRIPTION +===================== + +Parameters are described using structures defined in linux/fs_parser.h. +There's a core description struct that links everything together: + + struct fs_parameter_description { + const char name[16]; + u8 nr_params; + u8 nr_alt_keys; + u8 nr_enums; + bool ignore_unknown; + bool no_source; + const char *const *keys; + const struct constant_table *alt_keys; + const struct fs_parameter_spec *specs; + const struct fs_parameter_enum *enums; + }; + +For example: + + enum afs_param { + Opt_autocell, + Opt_bar, + Opt_dyn, + Opt_foo, + Opt_source, + nr__afs_params + }; + + static const struct fs_parameter_description afs_fs_parameters = { + .name = "kAFS", + .nr_params = nr__afs_params, + .nr_alt_keys = ARRAY_SIZE(afs_param_alt_keys), + .nr_enums = ARRAY_SIZE(afs_param_enums), + .keys = afs_param_keys, + .alt_keys = afs_param_alt_keys, + .specs = afs_param_specs, + .enums = afs_param_enums, + }; + +The members are as follows: + + (1) const char name[16]; + + The name to be used in error messages generated by the parse helper + functions. + + (2) u8 nr_params; + + The number of discrete parameter identifiers. This indicates the number + of elements in the ->types[] array and also limits the values that may be + used in the values that the ->keys[] array maps to. + + It is expected that, for example, two parameters that are related, say + "acl" and "noacl" with have the same ID, but will be flagged to indicate + that one is the inverse of the other. The value can then be picked out + from the parse result. + + (3) const struct fs_parameter_specification *specs; + + Table of parameter specifications, where the entries are of type: + + struct fs_parameter_type { + enum fs_parameter_spec type:8; + u8 flags; + }; + + and the parameter identifier is the index to the array. 'type' indicates + the desired value type and must be one of: + + TYPE NAME EXPECTED VALUE RESULT IN + ======================= ======================= ===================== + fs_param_is_flag No value n/a + fs_param_is_bool Boolean value result->boolean + fs_param_is_u32 32-bit unsigned int result->uint_32 + fs_param_is_u32_octal 32-bit octal int result->uint_32 + fs_param_is_u32_hex 32-bit hex int result->uint_32 + fs_param_is_s32 32-bit signed int result->int_32 + fs_param_is_enum Enum value name result->uint_32 + fs_param_is_string Arbitrary string param->string + fs_param_is_blob Binary blob param->blob + fs_param_is_blockdev Blockdev path * Needs lookup + fs_param_is_path Path * Needs lookup + fs_param_is_fd File descriptor param->file + + And each parameter can be qualified with 'flags': + + fs_param_v_optional The value is optional + fs_param_neg_with_no If key name is prefixed with "no", it is false + fs_param_neg_with_empty If value is "", it is false + fs_param_deprecated The parameter is deprecated. + + For example: + + static const struct fs_parameter_spec afs_param_specs[nr__afs_params] = { + [Opt_autocell] = { fs_param_is flag }, + [Opt_bar] = { fs_param_is_enum }, + [Opt_dyn] = { fs_param_is flag }, + [Opt_foo] = { fs_param_is_bool, fs_param_neg_with_no }, + [Opt_source] = { fs_param_is_string }, + }; + + Note that if the value is of fs_param_is_bool type, fs_parse() will try + to match any string value against "0", "1", "no", "yes", "false", "true". + + [!] NOTE that the table must be sorted according to primary key name so + that ->keys[] is also sorted. + + (4) const char *const *keys; + + Table of primary key names for the parameters. There must be one entry + per defined parameter. The table is optional if ->nr_params is 0. The + table is just an array of names e.g.: + + static const char *const afs_param_keys[nr__afs_params] = { + [Opt_autocell] = "autocell", + [Opt_bar] = "bar", + [Opt_dyn] = "dyn", + [Opt_foo] = "foo", + [Opt_source] = "source", + }; + + [!] NOTE that the table must be sorted such that the table can be searched + with bsearch() using strcmp(). This means that the Opt_* values must + correspond to the entries in this table. + + (5) const struct constant_table *alt_keys; + u8 nr_alt_keys; + + Table of additional key names and their mappings to parameter ID plus the + number of elements in the table. This is optional. The table is just an + array of { name, integer } pairs, e.g.: + + static const struct constant_table afs_param_keys[] = { + { "baz", Opt_bar }, + { "dynamic", Opt_dyn }, + }; + + [!] NOTE that the table must be sorted such that strcmp() can be used with + bsearch() to search the entries. + + The parameter ID can also be fs_param_key_removed to indicate that a + deprecated parameter has been removed and that an error will be given. + This differs from fs_param_deprecated where the parameter may still have + an effect. + + Further, the behaviour of the parameter may differ when an alternate name + is used (for instance with NFS, "v3", "v4.2", etc. are alternate names). + + (6) const struct fs_parameter_enum *enums; + u8 nr_enums; + + Table of enum value names to integer mappings and the number of elements + stored therein. This is of type: + + struct fs_parameter_enum { + u8 param_id; + char name[14]; + u8 value; + }; + + Where the array is an unsorted list of { parameter ID, name }-keyed + elements that indicate the value to map to, e.g.: + + static const struct fs_parameter_enum afs_param_enums[] = { + { Opt_bar, "x", 1}, + { Opt_bar, "y", 23}, + { Opt_bar, "z", 42}, + }; + + If a parameter of type fs_param_is_enum is encountered, fs_parse() will + try to look the value up in the enum table and the result will be stored + in the parse result. + + (7) bool no_source; + + If this is set, fs_parse() will ignore any "source" parameter and not + pass it to the filesystem. + +The parser should be pointed to by the parser pointer in the file_system_type +struct as this will provide validation on registration (if +CONFIG_VALIDATE_FS_PARSER=y) and will allow the description to be queried from +userspace using the fsinfo() syscall. + + +========================== +PARAMETER HELPER FUNCTIONS +========================== + +A number of helper functions are provided to help a filesystem or an LSM +process the parameters it is given. + + (*) int lookup_constant(const struct constant_table tbl[], + const char *name, int not_found); + + Look up a constant by name in a table of name -> integer mappings. The + table is an array of elements of the following type: + + struct constant_table { + const char *name; + int value; + }; + + and it must be sorted such that it can be searched using bsearch() using + strcmp(). If a match is found, the corresponding value is returned. If a + match isn't found, the not_found value is returned instead. + + (*) bool validate_constant_table(const struct constant_table *tbl, + size_t tbl_size, + int low, int high, int special); + + Validate a constant table. Checks that all the elements are appropriately + ordered, that there are no duplicates and that the values are between low + and high inclusive, though provision is made for one allowable special + value outside of that range. If no special value is required, special + should just be set to lie inside the low-to-high range. + + If all is good, true is returned. If the table is invalid, errors are + logged to dmesg, the stack is dumped and false is returned. + + (*) int fs_parse(struct fs_context *fc, + const struct fs_param_parser *parser, + struct fs_parameter *param, + struct fs_param_parse_result *result); + + This is the main interpreter of parameters. It uses the parameter + description (parser) to look up the name of the parameter to use and to + convert that to a parameter ID (stored in result->key). + + If successful, and if the parameter type indicates the result is a + boolean, integer or enum type, the value is converted by this function and + the result stored in result->{boolean,int_32,uint_32}. + + If a match isn't initially made, the key is prefixed with "no" and no + value is present then an attempt will be made to look up the key with the + prefix removed. If this matches a parameter for which the type has flag + fs_param_neg_with_no set, then a match will be made and the value will be + set to false/0/NULL. + + If the parameter is successfully matched and, optionally, parsed + correctly, 1 is returned. If the parameter isn't matched and + parser->ignore_unknown is set, then 0 is returned. Otherwise -EINVAL is + returned. + + (*) bool fs_validate_description(const struct fs_parameter_description *desc); + + This is validates the parameter description. It returns true if the + description is good and false if it is not. + + (*) int fs_lookup_param(struct fs_context *fc, + struct fs_parameter *value, + bool want_bdev, + struct path *_path); + + This takes a parameter that carries a string or filename type and attempts + to do a path lookup on it. If the parameter expects a blockdev, a check + is made that the inode actually represents one. + + Returns 0 if successful and *_path will be set; returns a negative error + code if not. diff --git a/Documentation/filesystems/path-lookup.rst b/Documentation/filesystems/path-lookup.rst index 9d6b68853f5b..434a07b0002b 100644 --- a/Documentation/filesystems/path-lookup.rst +++ b/Documentation/filesystems/path-lookup.rst @@ -1,3 +1,18 @@ +=============== +Pathname lookup +=============== + +This write-up is based on three articles published at lwn.net: + +- Pathname lookup in Linux +- RCU-walk: faster pathname lookup in Linux +- A walk among the symlinks + +Written by Neil Brown with help from Al Viro and Jon Corbet. +It has subsequently been updated to reflect changes in the kernel +including: + +- per-directory parallel name lookup. Introduction to pathname lookup =============================== @@ -344,7 +359,7 @@ In particular it is held while scanning chains in the dcache hash table, and the mount point hash table. Bringing it together with ``struct nameidata`` --------------------------------------------- +---------------------------------------------- .. _First edition Unix: http://minnie.tuhs.org/cgi-bin/utree.pl?file=V1/u2.s @@ -355,7 +370,7 @@ converts a "name" to an "inode". ``struct nameidata`` contains (among other fields): ``struct path path`` -~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~ A ``path`` contains a ``struct vfsmount`` (which is embedded in a ``struct mount``) and a ``struct dentry``. Together these @@ -366,13 +381,13 @@ step. A reference through ``d_lockref`` and ``mnt_count`` is always held. ``struct qstr last`` -~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~ This is a string together with a length (i.e. _not_ ``nul`` terminated) that is the "next" component in the pathname. ``int last_type`` -~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~ This is one of ``LAST_NORM``, ``LAST_ROOT``, ``LAST_DOT``, ``LAST_DOTDOT``, or ``LAST_BIND``. The ``last`` field is only valid if the type is @@ -381,7 +396,7 @@ components of the symlink have been processed yet. Others should be fairly self-explanatory. ``struct path root`` -~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~ This is used to hold a reference to the effective root of the filesystem. Often that reference won't be needed, so this field is @@ -510,7 +525,7 @@ potentially interesting things about these dentries corresponding to three different flags that might be set in ``dentry->d_flags``: ``DCACHE_MANAGE_TRANSIT`` -~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~ If this flag has been set, then the filesystem has requested that the ``d_manage()`` dentry operation be called before handling any possible @@ -529,7 +544,7 @@ filesystem, which will then give it a special pass through ``d_manage()`` by returning ``-EISDIR``. ``DCACHE_MOUNTED`` -~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~ This flag is set on every dentry that is mounted on. As Linux supports multiple filesystem namespaces, it is possible that the @@ -542,7 +557,7 @@ If this flag is set, and ``d_manage()`` didn't return ``-EISDIR``, and a new ``dentry`` (both with counted references). ``DCACHE_NEED_AUTOMOUNT`` -~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~ If ``d_manage()`` allowed us to get this far, and ``lookup_mnt()`` didn't find a mount point, then this flag causes the ``d_automount()`` dentry @@ -698,7 +713,7 @@ With that little refresher on seqlocks out of the way we can look at the bigger picture of how RCU-walk uses seqlocks. ``mount_lock`` and ``nd->m_seq`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We already met the ``mount_lock`` seqlock when REF-walk used it to ensure that crossing a mount point is performed safely. RCU-walk uses @@ -727,7 +742,7 @@ results would have been the same. This ensures the invariant holds, at least for vfsmount structures. ``dentry->d_seq`` and ``nd->seq`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In place of taking a count or lock on ``d_reflock``, RCU-walk samples the per-dentry ``d_seq`` seqlock, and stores the sequence number in the @@ -774,7 +789,7 @@ getting a counted reference to the new dentry before dropping that for the old dentry which we saw in REF-walk. No ``inode->i_rwsem`` or even ``rename_lock`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A semaphore is a fairly heavyweight lock that can only be taken when it is permissible to sleep. As ``rcu_read_lock()`` forbids sleeping, @@ -796,7 +811,7 @@ locking. This neatly handles all cases, so adding extra checks on rename_lock would bring no significant value. ``unlazy walk()`` and ``complete_walk()`` -------------------------------------- +----------------------------------------- That "dropping down to REF-walk" typically involves a call to ``unlazy_walk()``, so named because "RCU-walk" is also sometimes diff --git a/Documentation/filesystems/splice.rst b/Documentation/filesystems/splice.rst new file mode 100644 index 000000000000..edd874808472 --- /dev/null +++ b/Documentation/filesystems/splice.rst @@ -0,0 +1,22 @@ +================ +splice and pipes +================ + +splice API +========== + +splice is a method for moving blocks of data around inside the kernel, +without continually transferring them between the kernel and user space. + +.. kernel-doc:: fs/splice.c + +pipes API +========= + +Pipe interfaces are all for in-kernel (builtin image) use. They are not +exported for use by modules. + +.. kernel-doc:: include/linux/pipe_fs_i.h + :internal: + +.. kernel-doc:: fs/pipe.c diff --git a/Documentation/filesystems/sysfs.txt b/Documentation/filesystems/sysfs.txt index 41411b0c60a3..5b5311f9358d 100644 --- a/Documentation/filesystems/sysfs.txt +++ b/Documentation/filesystems/sysfs.txt @@ -116,6 +116,27 @@ static struct device_attribute dev_attr_foo = { .store = store_foo, }; +Note as stated in include/linux/kernel.h "OTHER_WRITABLE? Generally +considered a bad idea." so trying to set a sysfs file writable for +everyone will fail reverting to RO mode for "Others". + +For the common cases sysfs.h provides convenience macros to make +defining attributes easier as well as making code more concise and +readable. The above case could be shortened to: + +static struct device_attribute dev_attr_foo = __ATTR_RW(foo); + +the list of helpers available to define your wrapper function is: +__ATTR_RO(name): assumes default name_show and mode 0444 +__ATTR_WO(name): assumes a name_store only and is restricted to mode + 0200 that is root write access only. +__ATTR_RO_MODE(name, mode): fore more restrictive RO access currently + only use case is the EFI System Resource Table + (see drivers/firmware/efi/esrt.c) +__ATTR_RW(name): assumes default name_show, name_store and setting + mode to 0644. +__ATTR_NULL: which sets the name to NULL and is used as end of list + indicator (see: kernel/workqueue.c) Subsystem-Specific Callbacks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt index 8dc8e9c2913f..761c6fd24a53 100644 --- a/Documentation/filesystems/vfs.txt +++ b/Documentation/filesystems/vfs.txt @@ -857,6 +857,7 @@ struct file_operations { ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); ssize_t (*read_iter) (struct kiocb *, struct iov_iter *); ssize_t (*write_iter) (struct kiocb *, struct iov_iter *); + int (*iopoll)(struct kiocb *kiocb, bool spin); int (*iterate) (struct file *, struct dir_context *); int (*iterate_shared) (struct file *, struct dir_context *); __poll_t (*poll) (struct file *, struct poll_table_struct *); @@ -902,6 +903,8 @@ otherwise noted. write_iter: possibly asynchronous write with iov_iter as source + iopoll: called when aio wants to poll for completions on HIPRI iocbs + iterate: called when the VFS needs to read the directory contents iterate_shared: called when the VFS needs to read the directory contents diff --git a/Documentation/filesystems/xfs.txt b/Documentation/filesystems/xfs.txt index 9ccfd1bc6201..a5cbb5e0e3db 100644 --- a/Documentation/filesystems/xfs.txt +++ b/Documentation/filesystems/xfs.txt @@ -272,7 +272,7 @@ The following sysctls are available for the XFS filesystem: XFS_ERRLEVEL_LOW: 1 XFS_ERRLEVEL_HIGH: 5 - fs.xfs.panic_mask (Min: 0 Default: 0 Max: 255) + fs.xfs.panic_mask (Min: 0 Default: 0 Max: 256) Causes certain error conditions to call BUG(). Value is a bitmask; OR together the tags which represent errors which should cause panics: @@ -285,6 +285,7 @@ The following sysctls are available for the XFS filesystem: XFS_PTAG_SHUTDOWN_IOERROR 0x00000020 XFS_PTAG_SHUTDOWN_LOGERROR 0x00000040 XFS_PTAG_FSBLOCK_ZERO 0x00000080 + XFS_PTAG_VERIFIER_ERROR 0x00000100 This option is intended for debugging only. diff --git a/Documentation/flexible-arrays.txt b/Documentation/flexible-arrays.txt deleted file mode 100644 index a0f2989dd804..000000000000 --- a/Documentation/flexible-arrays.txt +++ /dev/null @@ -1,123 +0,0 @@ -=================================== -Using flexible arrays in the kernel -=================================== - -:Updated: Last updated for 2.6.32 -:Author: Jonathan Corbet - -Large contiguous memory allocations can be unreliable in the Linux kernel. -Kernel programmers will sometimes respond to this problem by allocating -pages with vmalloc(). This solution not ideal, though. On 32-bit systems, -memory from vmalloc() must be mapped into a relatively small address space; -it's easy to run out. On SMP systems, the page table changes required by -vmalloc() allocations can require expensive cross-processor interrupts on -all CPUs. And, on all systems, use of space in the vmalloc() range -increases pressure on the translation lookaside buffer (TLB), reducing the -performance of the system. - -In many cases, the need for memory from vmalloc() can be eliminated by -piecing together an array from smaller parts; the flexible array library -exists to make this task easier. - -A flexible array holds an arbitrary (within limits) number of fixed-sized -objects, accessed via an integer index. Sparse arrays are handled -reasonably well. Only single-page allocations are made, so memory -allocation failures should be relatively rare. The down sides are that the -arrays cannot be indexed directly, individual object size cannot exceed the -system page size, and putting data into a flexible array requires a copy -operation. It's also worth noting that flexible arrays do no internal -locking at all; if concurrent access to an array is possible, then the -caller must arrange for appropriate mutual exclusion. - -The creation of a flexible array is done with:: - - #include - - struct flex_array *flex_array_alloc(int element_size, - unsigned int total, - gfp_t flags); - -The individual object size is provided by element_size, while total is the -maximum number of objects which can be stored in the array. The flags -argument is passed directly to the internal memory allocation calls. With -the current code, using flags to ask for high memory is likely to lead to -notably unpleasant side effects. - -It is also possible to define flexible arrays at compile time with:: - - DEFINE_FLEX_ARRAY(name, element_size, total); - -This macro will result in a definition of an array with the given name; the -element size and total will be checked for validity at compile time. - -Storing data into a flexible array is accomplished with a call to:: - - int flex_array_put(struct flex_array *array, unsigned int element_nr, - void *src, gfp_t flags); - -This call will copy the data from src into the array, in the position -indicated by element_nr (which must be less than the maximum specified when -the array was created). If any memory allocations must be performed, flags -will be used. The return value is zero on success, a negative error code -otherwise. - -There might possibly be a need to store data into a flexible array while -running in some sort of atomic context; in this situation, sleeping in the -memory allocator would be a bad thing. That can be avoided by using -GFP_ATOMIC for the flags value, but, often, there is a better way. The -trick is to ensure that any needed memory allocations are done before -entering atomic context, using:: - - int flex_array_prealloc(struct flex_array *array, unsigned int start, - unsigned int nr_elements, gfp_t flags); - -This function will ensure that memory for the elements indexed in the range -defined by start and nr_elements has been allocated. Thereafter, a -flex_array_put() call on an element in that range is guaranteed not to -block. - -Getting data back out of the array is done with:: - - void *flex_array_get(struct flex_array *fa, unsigned int element_nr); - -The return value is a pointer to the data element, or NULL if that -particular element has never been allocated. - -Note that it is possible to get back a valid pointer for an element which -has never been stored in the array. Memory for array elements is allocated -one page at a time; a single allocation could provide memory for several -adjacent elements. Flexible array elements are normally initialized to the -value FLEX_ARRAY_FREE (defined as 0x6c in ), so errors -involving that number probably result from use of unstored array entries. -Note that, if array elements are allocated with __GFP_ZERO, they will be -initialized to zero and this poisoning will not happen. - -Individual elements in the array can be cleared with:: - - int flex_array_clear(struct flex_array *array, unsigned int element_nr); - -This function will set the given element to FLEX_ARRAY_FREE and return -zero. If storage for the indicated element is not allocated for the array, -flex_array_clear() will return -EINVAL instead. Note that clearing an -element does not release the storage associated with it; to reduce the -allocated size of an array, call:: - - int flex_array_shrink(struct flex_array *array); - -The return value will be the number of pages of memory actually freed. -This function works by scanning the array for pages containing nothing but -FLEX_ARRAY_FREE bytes, so (1) it can be expensive, and (2) it will not work -if the array's pages are allocated with __GFP_ZERO. - -It is possible to remove all elements of an array with a call to:: - - void flex_array_free_parts(struct flex_array *array); - -This call frees all elements, but leaves the array itself in place. -Freeing the entire array is done with:: - - void flex_array_free(struct flex_array *array); - -As of this writing, there are no users of flexible arrays in the mainline -kernel. The functions described here are also not exported to modules; -that will probably be fixed when somebody comes up with a need for it. diff --git a/Documentation/gpu/afbc.rst b/Documentation/gpu/afbc.rst new file mode 100644 index 000000000000..4d38dc49d105 --- /dev/null +++ b/Documentation/gpu/afbc.rst @@ -0,0 +1,235 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +=================================== + Arm Framebuffer Compression (AFBC) +=================================== + +AFBC is a proprietary lossless image compression protocol and format. +It provides fine-grained random access and minimizes the amount of +data transferred between IP blocks. + +AFBC can be enabled on drivers which support it via use of the AFBC +format modifiers defined in drm_fourcc.h. See DRM_FORMAT_MOD_ARM_AFBC(*). + +All users of the AFBC modifiers must follow the usage guidelines laid +out in this document, to ensure compatibility across different AFBC +producers and consumers. + +Components and Ordering +======================= + +AFBC streams can contain several components - where a component +corresponds to a color channel (i.e. R, G, B, X, A, Y, Cb, Cr). +The assignment of input/output color channels must be consistent +between the encoder and the decoder for correct operation, otherwise +the consumer will interpret the decoded data incorrectly. + +Furthermore, when the lossless colorspace transform is used +(AFBC_FORMAT_MOD_YTR, which should be enabled for RGB buffers for +maximum compression efficiency), the component order must be: + + * Component 0: R + * Component 1: G + * Component 2: B + +The component ordering is communicated via the fourcc code in the +fourcc:modifier pair. In general, component '0' is considered to +reside in the least-significant bits of the corresponding linear +format. For example, COMP(bits): + + * DRM_FORMAT_ABGR8888 + + * Component 0: R(8) + * Component 1: G(8) + * Component 2: B(8) + * Component 3: A(8) + + * DRM_FORMAT_BGR888 + + * Component 0: R(8) + * Component 1: G(8) + * Component 2: B(8) + + * DRM_FORMAT_YUYV + + * Component 0: Y(8) + * Component 1: Cb(8, 2x1 subsampled) + * Component 2: Cr(8, 2x1 subsampled) + +In AFBC, 'X' components are not treated any differently from any other +component. Therefore, an AFBC buffer with fourcc DRM_FORMAT_XBGR8888 +encodes with 4 components, like so: + + * DRM_FORMAT_XBGR8888 + + * Component 0: R(8) + * Component 1: G(8) + * Component 2: B(8) + * Component 3: X(8) + +Please note, however, that the inclusion of a "wasted" 'X' channel is +bad for compression efficiency, and so it's recommended to avoid +formats containing 'X' bits. If a fourth component is +required/expected by the encoder/decoder, then it is recommended to +instead use an equivalent format with alpha, setting all alpha bits to +'1'. If there is no requirement for a fourth component, then a format +which doesn't include alpha can be used, e.g. DRM_FORMAT_BGR888. + +Number of Planes +================ + +Formats which are typically multi-planar in linear layouts (e.g. YUV +420), can be encoded into one, or multiple, AFBC planes. As with +component order, the encoder and decoder must agree about the number +of planes in order to correctly decode the buffer. The fourcc code is +used to determine the number of encoded planes in an AFBC buffer, +matching the number of planes for the linear (unmodified) format. +Within each plane, the component ordering also follows the fourcc +code: + +For example: + + * DRM_FORMAT_YUYV: nplanes = 1 + + * Plane 0: + + * Component 0: Y(8) + * Component 1: Cb(8, 2x1 subsampled) + * Component 2: Cr(8, 2x1 subsampled) + + * DRM_FORMAT_NV12: nplanes = 2 + + * Plane 0: + + * Component 0: Y(8) + + * Plane 1: + + * Component 0: Cb(8, 2x1 subsampled) + * Component 1: Cr(8, 2x1 subsampled) + +Cross-device interoperability +============================= + +For maximum compatibility across devices, the table below defines +canonical formats for use between AFBC-enabled devices. Formats which +are listed here must be used exactly as specified when using the AFBC +modifiers. Formats which are not listed should be avoided. + +.. flat-table:: AFBC formats + + * - Fourcc code + - Description + - Planes/Components + + * - DRM_FORMAT_ABGR2101010 + - 10-bit per component RGB, with 2-bit alpha + - Plane 0: 4 components + * Component 0: R(10) + * Component 1: G(10) + * Component 2: B(10) + * Component 3: A(2) + + * - DRM_FORMAT_ABGR8888 + - 8-bit per component RGB, with 8-bit alpha + - Plane 0: 4 components + * Component 0: R(8) + * Component 1: G(8) + * Component 2: B(8) + * Component 3: A(8) + + * - DRM_FORMAT_BGR888 + - 8-bit per component RGB + - Plane 0: 3 components + * Component 0: R(8) + * Component 1: G(8) + * Component 2: B(8) + + * - DRM_FORMAT_BGR565 + - 5/6-bit per component RGB + - Plane 0: 3 components + * Component 0: R(5) + * Component 1: G(6) + * Component 2: B(5) + + * - DRM_FORMAT_ABGR1555 + - 5-bit per component RGB, with 1-bit alpha + - Plane 0: 4 components + * Component 0: R(5) + * Component 1: G(5) + * Component 2: B(5) + * Component 3: A(1) + + * - DRM_FORMAT_VUY888 + - 8-bit per component YCbCr 444, single plane + - Plane 0: 3 components + * Component 0: Y(8) + * Component 1: Cb(8) + * Component 2: Cr(8) + + * - DRM_FORMAT_VUY101010 + - 10-bit per component YCbCr 444, single plane + - Plane 0: 3 components + * Component 0: Y(10) + * Component 1: Cb(10) + * Component 2: Cr(10) + + * - DRM_FORMAT_YUYV + - 8-bit per component YCbCr 422, single plane + - Plane 0: 3 components + * Component 0: Y(8) + * Component 1: Cb(8, 2x1 subsampled) + * Component 2: Cr(8, 2x1 subsampled) + + * - DRM_FORMAT_NV16 + - 8-bit per component YCbCr 422, two plane + - Plane 0: 1 component + * Component 0: Y(8) + Plane 1: 2 components + * Component 0: Cb(8, 2x1 subsampled) + * Component 1: Cr(8, 2x1 subsampled) + + * - DRM_FORMAT_Y210 + - 10-bit per component YCbCr 422, single plane + - Plane 0: 3 components + * Component 0: Y(10) + * Component 1: Cb(10, 2x1 subsampled) + * Component 2: Cr(10, 2x1 subsampled) + + * - DRM_FORMAT_P210 + - 10-bit per component YCbCr 422, two plane + - Plane 0: 1 component + * Component 0: Y(10) + Plane 1: 2 components + * Component 0: Cb(10, 2x1 subsampled) + * Component 1: Cr(10, 2x1 subsampled) + + * - DRM_FORMAT_YUV420_8BIT + - 8-bit per component YCbCr 420, single plane + - Plane 0: 3 components + * Component 0: Y(8) + * Component 1: Cb(8, 2x2 subsampled) + * Component 2: Cr(8, 2x2 subsampled) + + * - DRM_FORMAT_YUV420_10BIT + - 10-bit per component YCbCr 420, single plane + - Plane 0: 3 components + * Component 0: Y(10) + * Component 1: Cb(10, 2x2 subsampled) + * Component 2: Cr(10, 2x2 subsampled) + + * - DRM_FORMAT_NV12 + - 8-bit per component YCbCr 420, two plane + - Plane 0: 1 component + * Component 0: Y(8) + Plane 1: 2 components + * Component 0: Cb(8, 2x2 subsampled) + * Component 1: Cr(8, 2x2 subsampled) + + * - DRM_FORMAT_P010 + - 10-bit per component YCbCr 420, two plane + - Plane 0: 1 component + * Component 0: Y(10) + Plane 1: 2 components + * Component 0: Cb(10, 2x2 subsampled) + * Component 1: Cr(10, 2x2 subsampled) diff --git a/Documentation/gpu/dp-mst/topology-figure-1.dot b/Documentation/gpu/dp-mst/topology-figure-1.dot new file mode 100644 index 000000000000..157e17c7e0b0 --- /dev/null +++ b/Documentation/gpu/dp-mst/topology-figure-1.dot @@ -0,0 +1,52 @@ +digraph T { + /* Make sure our payloads are always drawn below the driver node */ + subgraph cluster_driver { + fillcolor = grey; + style = filled; + driver -> {payload1, payload2} [dir=none]; + } + + /* Driver malloc references */ + edge [style=dashed]; + driver -> port1; + driver -> port2; + driver -> port3:e; + driver -> port4; + + payload1:s -> port1:e; + payload2:s -> port3:e; + edge [style=""]; + + subgraph cluster_topology { + label="Topology Manager"; + labelloc=bottom; + + /* Topology references */ + mstb1 -> {port1, port2}; + port1 -> mstb2; + port2 -> mstb3 -> {port3, port4}; + port3 -> mstb4; + + /* Malloc references */ + edge [style=dashed;dir=back]; + mstb1 -> {port1, port2}; + port1 -> mstb2; + port2 -> mstb3 -> {port3, port4}; + port3 -> mstb4; + } + + driver [label="DRM driver";style=filled;shape=box;fillcolor=lightblue]; + + payload1 [label="Payload #1";style=filled;shape=box;fillcolor=lightblue]; + payload2 [label="Payload #2";style=filled;shape=box;fillcolor=lightblue]; + + mstb1 [label="MSTB #1";style=filled;fillcolor=palegreen;shape=oval]; + mstb2 [label="MSTB #2";style=filled;fillcolor=palegreen;shape=oval]; + mstb3 [label="MSTB #3";style=filled;fillcolor=palegreen;shape=oval]; + mstb4 [label="MSTB #4";style=filled;fillcolor=palegreen;shape=oval]; + + port1 [label="Port #1";shape=oval]; + port2 [label="Port #2";shape=oval]; + port3 [label="Port #3";shape=oval]; + port4 [label="Port #4";shape=oval]; +} diff --git a/Documentation/gpu/dp-mst/topology-figure-2.dot b/Documentation/gpu/dp-mst/topology-figure-2.dot new file mode 100644 index 000000000000..4243dd1737cb --- /dev/null +++ b/Documentation/gpu/dp-mst/topology-figure-2.dot @@ -0,0 +1,56 @@ +digraph T { + /* Make sure our payloads are always drawn below the driver node */ + subgraph cluster_driver { + fillcolor = grey; + style = filled; + driver -> {payload1, payload2} [dir=none]; + } + + /* Driver malloc references */ + edge [style=dashed]; + driver -> port1; + driver -> port2; + driver -> port3:e; + driver -> port4 [color=red]; + + payload1:s -> port1:e; + payload2:s -> port3:e; + edge [style=""]; + + subgraph cluster_topology { + label="Topology Manager"; + labelloc=bottom; + + /* Topology references */ + mstb1 -> {port1, port2}; + port1 -> mstb2; + edge [color=red]; + port2 -> mstb3 -> {port3, port4}; + port3 -> mstb4; + edge [color=""]; + + /* Malloc references */ + edge [style=dashed;dir=back]; + mstb1 -> {port1, port2}; + port1 -> mstb2; + port2 -> mstb3 -> port3; + edge [color=red]; + mstb3 -> port4; + port3 -> mstb4; + } + + mstb1 [label="MSTB #1";style=filled;fillcolor=palegreen]; + mstb2 [label="MSTB #2";style=filled;fillcolor=palegreen]; + mstb3 [label="MSTB #3";style=filled;fillcolor=palegreen]; + mstb4 [label="MSTB #4";style=filled;fillcolor=grey]; + + port1 [label="Port #1"]; + port2 [label="Port #2"]; + port3 [label="Port #3"]; + port4 [label="Port #4";style=filled;fillcolor=grey]; + + driver [label="DRM driver";style=filled;shape=box;fillcolor=lightblue]; + + payload1 [label="Payload #1";style=filled;shape=box;fillcolor=lightblue]; + payload2 [label="Payload #2";style=filled;shape=box;fillcolor=lightblue]; +} diff --git a/Documentation/gpu/dp-mst/topology-figure-3.dot b/Documentation/gpu/dp-mst/topology-figure-3.dot new file mode 100644 index 000000000000..6cd78d06778b --- /dev/null +++ b/Documentation/gpu/dp-mst/topology-figure-3.dot @@ -0,0 +1,59 @@ +digraph T { + /* Make sure our payloads are always drawn below the driver node */ + subgraph cluster_driver { + fillcolor = grey; + style = filled; + edge [dir=none]; + driver -> payload1; + driver -> payload2 [penwidth=3]; + edge [dir=""]; + } + + /* Driver malloc references */ + edge [style=dashed]; + driver -> port1; + driver -> port2; + driver -> port3:e; + driver -> port4 [color=grey]; + payload1:s -> port1:e; + payload2:s -> port3:e [penwidth=3]; + edge [style=""]; + + subgraph cluster_topology { + label="Topology Manager"; + labelloc=bottom; + + /* Topology references */ + mstb1 -> {port1, port2}; + port1 -> mstb2; + edge [color=grey]; + port2 -> mstb3 -> {port3, port4}; + port3 -> mstb4; + edge [color=""]; + + /* Malloc references */ + edge [style=dashed;dir=back]; + mstb1 -> {port1, port2}; + port1 -> mstb2; + port2 -> mstb3 [penwidth=3]; + mstb3 -> port3 [penwidth=3]; + edge [color=grey]; + mstb3 -> port4; + port3 -> mstb4; + } + + mstb1 [label="MSTB #1";style=filled;fillcolor=palegreen]; + mstb2 [label="MSTB #2";style=filled;fillcolor=palegreen]; + mstb3 [label="MSTB #3";style=filled;fillcolor=palegreen;penwidth=3]; + mstb4 [label="MSTB #4";style=filled;fillcolor=grey]; + + port1 [label="Port #1"]; + port2 [label="Port #2";penwidth=5]; + port3 [label="Port #3";penwidth=3]; + port4 [label="Port #4";style=filled;fillcolor=grey]; + + driver [label="DRM driver";style=filled;shape=box;fillcolor=lightblue]; + + payload1 [label="Payload #1";style=filled;shape=box;fillcolor=lightblue]; + payload2 [label="Payload #2";style=filled;shape=box;fillcolor=lightblue;penwidth=3]; +} diff --git a/Documentation/gpu/drivers.rst b/Documentation/gpu/drivers.rst index 7c1672118a73..044a7025477c 100644 --- a/Documentation/gpu/drivers.rst +++ b/Documentation/gpu/drivers.rst @@ -17,6 +17,8 @@ GPU Driver Documentation vkms bridge/dw-hdmi xen-front + afbc + komeda-kms .. only:: subproject and html diff --git a/Documentation/gpu/drm-internals.rst b/Documentation/gpu/drm-internals.rst index 5ee9674fb9e9..3ae23a5454ac 100644 --- a/Documentation/gpu/drm-internals.rst +++ b/Documentation/gpu/drm-internals.rst @@ -39,68 +39,6 @@ sections. Driver Information ------------------ -Driver Features -~~~~~~~~~~~~~~~ - -Drivers inform the DRM core about their requirements and supported -features by setting appropriate flags in the driver_features field. -Since those flags influence the DRM core behaviour since registration -time, most of them must be set to registering the :c:type:`struct -drm_driver ` instance. - -u32 driver_features; - -DRIVER_USE_AGP - Driver uses AGP interface, the DRM core will manage AGP resources. - -DRIVER_LEGACY - Denote a legacy driver using shadow attach. Don't use. - -DRIVER_KMS_LEGACY_CONTEXT - Used only by nouveau for backwards compatibility with existing userspace. - Don't use. - -DRIVER_PCI_DMA - Driver is capable of PCI DMA, mapping of PCI DMA buffers to - userspace will be enabled. Deprecated. - -DRIVER_SG - Driver can perform scatter/gather DMA, allocation and mapping of - scatter/gather buffers will be enabled. Deprecated. - -DRIVER_HAVE_DMA - Driver supports DMA, the userspace DMA API will be supported. - Deprecated. - -DRIVER_HAVE_IRQ; DRIVER_IRQ_SHARED - DRIVER_HAVE_IRQ indicates whether the driver has an IRQ handler - managed by the DRM Core. The core will support simple IRQ handler - installation when the flag is set. The installation process is - described in ?. - - DRIVER_IRQ_SHARED indicates whether the device & handler support - shared IRQs (note that this is required of PCI drivers). - -DRIVER_GEM - Driver use the GEM memory manager. - -DRIVER_MODESET - Driver supports mode setting interfaces (KMS). - -DRIVER_PRIME - Driver implements DRM PRIME buffer sharing. - -DRIVER_RENDER - Driver supports dedicated render nodes. - -DRIVER_ATOMIC - Driver supports atomic properties. In this case the driver must - implement appropriate obj->atomic_get_property() vfuncs for any - modeset objects with driver specific properties. - -DRIVER_SYNCOBJ - Driver support drm sync objects. - Major, Minor and Patchlevel ~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -143,6 +81,9 @@ Device Instance and Driver Handling .. kernel-doc:: drivers/gpu/drm/drm_drv.c :doc: driver instance overview +.. kernel-doc:: include/drm/drm_device.h + :internal: + .. kernel-doc:: include/drm/drm_drv.h :internal: @@ -230,6 +171,15 @@ Printer .. kernel-doc:: drivers/gpu/drm/drm_print.c :export: +Utilities +--------- + +.. kernel-doc:: include/drm/drm_util.h + :doc: drm utils + +.. kernel-doc:: include/drm/drm_util.h + :internal: + Legacy Support Code =================== diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst index b422eb8edf16..17ca7f8bf3d3 100644 --- a/Documentation/gpu/drm-kms-helpers.rst +++ b/Documentation/gpu/drm-kms-helpers.rst @@ -116,8 +116,6 @@ Framebuffer CMA Helper Functions Reference .. kernel-doc:: drivers/gpu/drm/drm_fb_cma_helper.c :export: -.. _drm_bridges: - Framebuffer GEM Helper Reference ================================ @@ -127,6 +125,8 @@ Framebuffer GEM Helper Reference .. kernel-doc:: drivers/gpu/drm/drm_gem_framebuffer_helper.c :export: +.. _drm_bridges: + Bridges ======= @@ -208,18 +208,40 @@ Display Port Dual Mode Adaptor Helper Functions Reference .. kernel-doc:: drivers/gpu/drm/drm_dp_dual_mode_helper.c :export: -Display Port MST Helper Functions Reference -=========================================== +Display Port MST Helpers +======================== + +Overview +-------- .. kernel-doc:: drivers/gpu/drm/drm_dp_mst_topology.c :doc: dp mst helper +.. kernel-doc:: drivers/gpu/drm/drm_dp_mst_topology.c + :doc: Branch device and port refcounting + +Functions Reference +------------------- + .. kernel-doc:: include/drm/drm_dp_mst_helper.h :internal: .. kernel-doc:: drivers/gpu/drm/drm_dp_mst_topology.c :export: +Topology Lifetime Internals +--------------------------- + +These functions aren't exported to drivers, but are documented here to help make +the MST topology helpers easier to understand + +.. kernel-doc:: drivers/gpu/drm/drm_dp_mst_topology.c + :functions: drm_dp_mst_topology_try_get_mstb drm_dp_mst_topology_get_mstb + drm_dp_mst_topology_put_mstb + drm_dp_mst_topology_try_get_port drm_dp_mst_topology_get_port + drm_dp_mst_topology_put_port + drm_dp_mst_get_mstb_malloc drm_dp_mst_put_mstb_malloc + MIPI DSI Helper Functions Reference =================================== @@ -274,18 +296,6 @@ SCDC Helper Functions Reference .. kernel-doc:: drivers/gpu/drm/drm_scdc_helper.c :export: -Rectangle Utilities Reference -============================= - -.. kernel-doc:: include/drm/drm_rect.h - :doc: rect utils - -.. kernel-doc:: include/drm/drm_rect.h - :internal: - -.. kernel-doc:: drivers/gpu/drm/drm_rect.c - :export: - HDMI Infoframes Helper Reference ================================ @@ -300,6 +310,18 @@ libraries and hence is also included here. .. kernel-doc:: drivers/video/hdmi.c :export: +Rectangle Utilities Reference +============================= + +.. kernel-doc:: include/drm/drm_rect.h + :doc: rect utils + +.. kernel-doc:: include/drm/drm_rect.h + :internal: + +.. kernel-doc:: drivers/gpu/drm/drm_rect.c + :export: + Flip-work Helper Reference ========================== diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst index 75c882e09fee..23a3c986ef6d 100644 --- a/Documentation/gpu/drm-kms.rst +++ b/Documentation/gpu/drm-kms.rst @@ -410,102 +410,6 @@ Encoder Functions Reference .. kernel-doc:: drivers/gpu/drm/drm_encoder.c :export: -KMS Initialization and Cleanup -============================== - -A KMS device is abstracted and exposed as a set of planes, CRTCs, -encoders and connectors. KMS drivers must thus create and initialize all -those objects at load time after initializing mode setting. - -CRTCs (:c:type:`struct drm_crtc `) --------------------------------------------- - -A CRTC is an abstraction representing a part of the chip that contains a -pointer to a scanout buffer. Therefore, the number of CRTCs available -determines how many independent scanout buffers can be active at any -given time. The CRTC structure contains several fields to support this: -a pointer to some video memory (abstracted as a frame buffer object), a -display mode, and an (x, y) offset into the video memory to support -panning or configurations where one piece of video memory spans multiple -CRTCs. - -CRTC Initialization -~~~~~~~~~~~~~~~~~~~ - -A KMS device must create and register at least one struct -:c:type:`struct drm_crtc ` instance. The instance is -allocated and zeroed by the driver, possibly as part of a larger -structure, and registered with a call to :c:func:`drm_crtc_init()` -with a pointer to CRTC functions. - - -Cleanup -------- - -The DRM core manages its objects' lifetime. When an object is not needed -anymore the core calls its destroy function, which must clean up and -free every resource allocated for the object. Every -:c:func:`drm_\*_init()` call must be matched with a corresponding -:c:func:`drm_\*_cleanup()` call to cleanup CRTCs -(:c:func:`drm_crtc_cleanup()`), planes -(:c:func:`drm_plane_cleanup()`), encoders -(:c:func:`drm_encoder_cleanup()`) and connectors -(:c:func:`drm_connector_cleanup()`). Furthermore, connectors that -have been added to sysfs must be removed by a call to -:c:func:`drm_connector_unregister()` before calling -:c:func:`drm_connector_cleanup()`. - -Connectors state change detection must be cleanup up with a call to -:c:func:`drm_kms_helper_poll_fini()`. - -Output discovery and initialization example -------------------------------------------- - -.. code-block:: c - - void intel_crt_init(struct drm_device *dev) - { - struct drm_connector *connector; - struct intel_output *intel_output; - - intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); - if (!intel_output) - return; - - connector = &intel_output->base; - drm_connector_init(dev, &intel_output->base, - &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); - - drm_encoder_init(dev, &intel_output->enc, &intel_crt_enc_funcs, - DRM_MODE_ENCODER_DAC); - - drm_connector_attach_encoder(&intel_output->base, - &intel_output->enc); - - /* Set up the DDC bus. */ - intel_output->ddc_bus = intel_i2c_create(dev, GPIOA, "CRTDDC_A"); - if (!intel_output->ddc_bus) { - dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " - "failed.\n"); - return; - } - - intel_output->type = INTEL_OUTPUT_ANALOG; - connector->interlace_allowed = 0; - connector->doublescan_allowed = 0; - - drm_encoder_helper_add(&intel_output->enc, &intel_crt_helper_funcs); - drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); - - drm_connector_register(connector); - } - -In the example above (taken from the i915 driver), a CRTC, connector and -encoder combination is created. A device-specific i2c bus is also -created for fetching EDID data and performing monitor detection. Once -the process is complete, the new connector is registered with sysfs to -make its properties available to applications. - KMS Locking =========== diff --git a/Documentation/gpu/drm-uapi.rst b/Documentation/gpu/drm-uapi.rst index a752aa561ea4..c9fd23efd957 100644 --- a/Documentation/gpu/drm-uapi.rst +++ b/Documentation/gpu/drm-uapi.rst @@ -238,6 +238,14 @@ DRM specific patterns. Note that ENOTTY has the slightly unintuitive meaning of Testing and validation ====================== +Testing Requirements for userspace API +-------------------------------------- + +New cross-driver userspace interface extensions, like new IOCTL, new KMS +properties, new files in sysfs or anything else that constitutes an API change +should have driver-agnostic testcases in IGT for that feature, if such a test +can be reasonably made using IGT for the target hardware. + Validating changes with IGT --------------------------- diff --git a/Documentation/gpu/komeda-kms.rst b/Documentation/gpu/komeda-kms.rst new file mode 100644 index 000000000000..b08da1cffecc --- /dev/null +++ b/Documentation/gpu/komeda-kms.rst @@ -0,0 +1,488 @@ +.. SPDX-License-Identifier: GPL-2.0 + +============================== + drm/komeda Arm display driver +============================== + +The drm/komeda driver supports the Arm display processor D71 and later products, +this document gives a brief overview of driver design: how it works and why +design it like that. + +Overview of D71 like display IPs +================================ + +From D71, Arm display IP begins to adopt a flexible and modularized +architecture. A display pipeline is made up of multiple individual and +functional pipeline stages called components, and every component has some +specific capabilities that can give the flowed pipeline pixel data a +particular processing. + +Typical D71 components: + +Layer +----- +Layer is the first pipeline stage, which prepares the pixel data for the next +stage. It fetches the pixel from memory, decodes it if it's AFBC, rotates the +source image, unpacks or converts YUV pixels to the device internal RGB pixels, +then adjusts the color_space of pixels if needed. + +Scaler +------ +As its name suggests, scaler takes responsibility for scaling, and D71 also +supports image enhancements by scaler. +The usage of scaler is very flexible and can be connected to layer output +for layer scaling, or connected to compositor and scale the whole display +frame and then feed the output data into wb_layer which will then write it +into memory. + +Compositor (compiz) +------------------- +Compositor blends multiple layers or pixel data flows into one single display +frame. its output frame can be fed into post image processor for showing it on +the monitor or fed into wb_layer and written to memory at the same time. +user can also insert a scaler between compositor and wb_layer to down scale +the display frame first and and then write to memory. + +Writeback Layer (wb_layer) +-------------------------- +Writeback layer does the opposite things of Layer, which connects to compiz +and writes the composition result to memory. + +Post image processor (improc) +----------------------------- +Post image processor adjusts frame data like gamma and color space to fit the +requirements of the monitor. + +Timing controller (timing_ctrlr) +-------------------------------- +Final stage of display pipeline, Timing controller is not for the pixel +handling, but only for controlling the display timing. + +Merger +------ +D71 scaler mostly only has the half horizontal input/output capabilities +compared with Layer, like if Layer supports 4K input size, the scaler only can +support 2K input/output in the same time. To achieve the ful frame scaling, D71 +introduces Layer Split, which splits the whole image to two half parts and feeds +them to two Layers A and B, and does the scaling independently. After scaling +the result need to be fed to merger to merge two part images together, and then +output merged result to compiz. + +Splitter +-------- +Similar to Layer Split, but Splitter is used for writeback, which splits the +compiz result to two parts and then feed them to two scalers. + +Possible D71 Pipeline usage +=========================== + +Benefitting from the modularized architecture, D71 pipelines can be easily +adjusted to fit different usages. And D71 has two pipelines, which support two +types of working mode: + +- Dual display mode + Two pipelines work independently and separately to drive two display outputs. + +- Single display mode + Two pipelines work together to drive only one display output. + + On this mode, pipeline_B doesn't work indenpendently, but outputs its + composition result into pipeline_A, and its pixel timing also derived from + pipeline_A.timing_ctrlr. The pipeline_B works just like a "slave" of + pipeline_A(master) + +Single pipeline data flow +------------------------- + +.. kernel-render:: DOT + :alt: Single pipeline digraph + :caption: Single pipeline data flow + + digraph single_ppl { + rankdir=LR; + + subgraph { + "Memory"; + "Monitor"; + } + + subgraph cluster_pipeline { + style=dashed + node [shape=box] + { + node [bgcolor=grey style=dashed] + "Scaler-0"; + "Scaler-1"; + "Scaler-0/1" + } + + node [bgcolor=grey style=filled] + "Layer-0" -> "Scaler-0" + "Layer-1" -> "Scaler-0" + "Layer-2" -> "Scaler-1" + "Layer-3" -> "Scaler-1" + + "Layer-0" -> "Compiz" + "Layer-1" -> "Compiz" + "Layer-2" -> "Compiz" + "Layer-3" -> "Compiz" + "Scaler-0" -> "Compiz" + "Scaler-1" -> "Compiz" + + "Compiz" -> "Scaler-0/1" -> "Wb_layer" + "Compiz" -> "Improc" -> "Timing Controller" + } + + "Wb_layer" -> "Memory" + "Timing Controller" -> "Monitor" + } + +Dual pipeline with Slave enabled +-------------------------------- + +.. kernel-render:: DOT + :alt: Slave pipeline digraph + :caption: Slave pipeline enabled data flow + + digraph slave_ppl { + rankdir=LR; + + subgraph { + "Memory"; + "Monitor"; + } + node [shape=box] + subgraph cluster_pipeline_slave { + style=dashed + label="Slave Pipeline_B" + node [shape=box] + { + node [bgcolor=grey style=dashed] + "Slave.Scaler-0"; + "Slave.Scaler-1"; + } + + node [bgcolor=grey style=filled] + "Slave.Layer-0" -> "Slave.Scaler-0" + "Slave.Layer-1" -> "Slave.Scaler-0" + "Slave.Layer-2" -> "Slave.Scaler-1" + "Slave.Layer-3" -> "Slave.Scaler-1" + + "Slave.Layer-0" -> "Slave.Compiz" + "Slave.Layer-1" -> "Slave.Compiz" + "Slave.Layer-2" -> "Slave.Compiz" + "Slave.Layer-3" -> "Slave.Compiz" + "Slave.Scaler-0" -> "Slave.Compiz" + "Slave.Scaler-1" -> "Slave.Compiz" + } + + subgraph cluster_pipeline_master { + style=dashed + label="Master Pipeline_A" + node [shape=box] + { + node [bgcolor=grey style=dashed] + "Scaler-0"; + "Scaler-1"; + "Scaler-0/1" + } + + node [bgcolor=grey style=filled] + "Layer-0" -> "Scaler-0" + "Layer-1" -> "Scaler-0" + "Layer-2" -> "Scaler-1" + "Layer-3" -> "Scaler-1" + + "Slave.Compiz" -> "Compiz" + "Layer-0" -> "Compiz" + "Layer-1" -> "Compiz" + "Layer-2" -> "Compiz" + "Layer-3" -> "Compiz" + "Scaler-0" -> "Compiz" + "Scaler-1" -> "Compiz" + + "Compiz" -> "Scaler-0/1" -> "Wb_layer" + "Compiz" -> "Improc" -> "Timing Controller" + } + + "Wb_layer" -> "Memory" + "Timing Controller" -> "Monitor" + } + +Sub-pipelines for input and output +---------------------------------- + +A complete display pipeline can be easily divided into three sub-pipelines +according to the in/out usage. + +Layer(input) pipeline +~~~~~~~~~~~~~~~~~~~~~ + +.. kernel-render:: DOT + :alt: Layer data digraph + :caption: Layer (input) data flow + + digraph layer_data_flow { + rankdir=LR; + node [shape=box] + + { + node [bgcolor=grey style=dashed] + "Scaler-n"; + } + + "Layer-n" -> "Scaler-n" -> "Compiz" + } + +.. kernel-render:: DOT + :alt: Layer Split digraph + :caption: Layer Split pipeline + + digraph layer_data_flow { + rankdir=LR; + node [shape=box] + + "Layer-0/1" -> "Scaler-0" -> "Merger" + "Layer-2/3" -> "Scaler-1" -> "Merger" + "Merger" -> "Compiz" + } + +Writeback(output) pipeline +~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. kernel-render:: DOT + :alt: writeback digraph + :caption: Writeback(output) data flow + + digraph writeback_data_flow { + rankdir=LR; + node [shape=box] + + { + node [bgcolor=grey style=dashed] + "Scaler-n"; + } + + "Compiz" -> "Scaler-n" -> "Wb_layer" + } + +.. kernel-render:: DOT + :alt: split writeback digraph + :caption: Writeback(output) Split data flow + + digraph writeback_data_flow { + rankdir=LR; + node [shape=box] + + "Compiz" -> "Splitter" + "Splitter" -> "Scaler-0" -> "Merger" + "Splitter" -> "Scaler-1" -> "Merger" + "Merger" -> "Wb_layer" + } + +Display output pipeline +~~~~~~~~~~~~~~~~~~~~~~~ +.. kernel-render:: DOT + :alt: display digraph + :caption: display output data flow + + digraph single_ppl { + rankdir=LR; + node [shape=box] + + "Compiz" -> "Improc" -> "Timing Controller" + } + +In the following section we'll see these three sub-pipelines will be handled +by KMS-plane/wb_conn/crtc respectively. + +Komeda Resource abstraction +=========================== + +struct komeda_pipeline/component +-------------------------------- + +To fully utilize and easily access/configure the HW, the driver side also uses +a similar architecture: Pipeline/Component to describe the HW features and +capabilities, and a specific component includes two parts: + +- Data flow controlling. +- Specific component capabilities and features. + +So the driver defines a common header struct komeda_component to describe the +data flow control and all specific components are a subclass of this base +structure. + +.. kernel-doc:: drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h + :internal: + +Resource discovery and initialization +===================================== + +Pipeline and component are used to describe how to handle the pixel data. We +still need a @struct komeda_dev to describe the whole view of the device, and +the control-abilites of device. + +We have &komeda_dev, &komeda_pipeline, &komeda_component. Now fill devices with +pipelines. Since komeda is not for D71 only but also intended for later products, +of course we’d better share as much as possible between different products. To +achieve this, split the komeda device into two layers: CORE and CHIP. + +- CORE: for common features and capabilities handling. +- CHIP: for register programing and HW specific feature (limitation) handling. + +CORE can access CHIP by three chip function structures: + +- struct komeda_dev_funcs +- struct komeda_pipeline_funcs +- struct komeda_component_funcs + +.. kernel-doc:: drivers/gpu/drm/arm/display/komeda/komeda_dev.h + :internal: + +Format handling +=============== + +.. kernel-doc:: drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h + :internal: +.. kernel-doc:: drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h + :internal: + +Attach komeda_dev to DRM-KMS +============================ + +Komeda abstracts resources by pipeline/component, but DRM-KMS uses +crtc/plane/connector. One KMS-obj cannot represent only one single component, +since the requirements of a single KMS object cannot simply be achieved by a +single component, usually that needs multiple components to fit the requirement. +Like set mode, gamma, ctm for KMS all target on CRTC-obj, but komeda needs +compiz, improc and timing_ctrlr to work together to fit these requirements. +And a KMS-Plane may require multiple komeda resources: layer/scaler/compiz. + +So, one KMS-Obj represents a sub-pipeline of komeda resources. + +- Plane: `Layer(input) pipeline`_ +- Wb_connector: `Writeback(output) pipeline`_ +- Crtc: `Display output pipeline`_ + +So, for komeda, we treat KMS crtc/plane/connector as users of pipeline and +component, and at any one time a pipeline/component only can be used by one +user. And pipeline/component will be treated as private object of DRM-KMS; the +state will be managed by drm_atomic_state as well. + +How to map plane to Layer(input) pipeline +----------------------------------------- + +Komeda has multiple Layer input pipelines, see: +- `Single pipeline data flow`_ +- `Dual pipeline with Slave enabled`_ + +The easiest way is binding a plane to a fixed Layer pipeline, but consider the +komeda capabilities: + +- Layer Split, See `Layer(input) pipeline`_ + + Layer_Split is quite complicated feature, which splits a big image into two + parts and handles it by two layers and two scalers individually. But it + imports an edge problem or effect in the middle of the image after the split. + To avoid such a problem, it needs a complicated Split calculation and some + special configurations to the layer and scaler. We'd better hide such HW + related complexity to user mode. + +- Slave pipeline, See `Dual pipeline with Slave enabled`_ + + Since the compiz component doesn't output alpha value, the slave pipeline + only can be used for bottom layers composition. The komeda driver wants to + hide this limitation to the user. The way to do this is to pick a suitable + Layer according to plane_state->zpos. + +So for komeda, the KMS-plane doesn't represent a fixed komeda layer pipeline, +but multiple Layers with same capabilities. Komeda will select one or more +Layers to fit the requirement of one KMS-plane. + +Make component/pipeline to be drm_private_obj +--------------------------------------------- + +Add :c:type:`drm_private_obj` to :c:type:`komeda_component`, :c:type:`komeda_pipeline` + +.. code-block:: c + + struct komeda_component { + struct drm_private_obj obj; + ... + } + + struct komeda_pipeline { + struct drm_private_obj obj; + ... + } + +Tracking component_state/pipeline_state by drm_atomic_state +----------------------------------------------------------- + +Add :c:type:`drm_private_state` and user to :c:type:`komeda_component_state`, +:c:type:`komeda_pipeline_state` + +.. code-block:: c + + struct komeda_component_state { + struct drm_private_state obj; + void *binding_user; + ... + } + + struct komeda_pipeline_state { + struct drm_private_state obj; + struct drm_crtc *crtc; + ... + } + +komeda component validation +--------------------------- + +Komeda has multiple types of components, but the process of validation are +similar, usually including the following steps: + +.. code-block:: c + + int komeda_xxxx_validate(struct komeda_component_xxx xxx_comp, + struct komeda_component_output *input_dflow, + struct drm_plane/crtc/connector *user, + struct drm_plane/crtc/connector_state, *user_state) + { + setup 1: check if component is needed, like the scaler is optional depending + on the user_state; if unneeded, just return, and the caller will + put the data flow into next stage. + Setup 2: check user_state with component features and capabilities to see + if requirements can be met; if not, return fail. + Setup 3: get component_state from drm_atomic_state, and try set to set + user to component; fail if component has been assigned to another + user already. + Setup 3: configure the component_state, like set its input component, + convert user_state to component specific state. + Setup 4: adjust the input_dflow and prepare it for the next stage. + } + +komeda_kms Abstraction +---------------------- + +.. kernel-doc:: drivers/gpu/drm/arm/display/komeda/komeda_kms.h + :internal: + +komde_kms Functions +------------------- +.. kernel-doc:: drivers/gpu/drm/arm/display/komeda/komeda_crtc.c + :internal: +.. kernel-doc:: drivers/gpu/drm/arm/display/komeda/komeda_plane.c + :internal: + +Build komeda to be a Linux module driver +======================================== + +Now we have two level devices: + +- komeda_dev: describes the real display hardware. +- komeda_kms_dev: attachs or connects komeda_dev to DRM-KMS. + +All komeda operations are supplied or operated by komeda_dev or komeda_kms_dev, +the module driver is only a simple wrapper to pass the Linux command +(probe/remove/pm) into komeda_dev or komeda_kms_dev. diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst index 14191b64446d..159a4aba49e6 100644 --- a/Documentation/gpu/todo.rst +++ b/Documentation/gpu/todo.rst @@ -82,30 +82,6 @@ events for atomic commits correctly. But fixing these bugs is good anyway. Contact: Daniel Vetter, respective driver maintainers -Better manual-upload support for atomic ---------------------------------------- - -This would be especially useful for tinydrm: - -- Add a struct drm_rect dirty_clip to drm_crtc_state. When duplicating the - crtc state, clear that to the max values, x/y = 0 and w/h = MAX_INT, in - __drm_atomic_helper_crtc_duplicate_state(). - -- Move tinydrm_merge_clips into drm_framebuffer.c, dropping the tinydrm\_ - prefix ofc and using drm_fb\_. drm_framebuffer.c makes sense since this - is a function useful to implement the fb->dirty function. - -- Create a new drm_fb_dirty function which does essentially what e.g. - mipi_dbi_fb_dirty does. You can use e.g. drm_atomic_helper_update_plane as the - template. But instead of doing a simple full-screen plane update, this new - helper also sets crtc_state->dirty_clip to the right coordinates. And of - course it needs to check whether the fb is actually active (and maybe where), - so there's some book-keeping involved. There's also some good fun involved in - scaling things appropriately. For that case we might simply give up and - declare the entire area covered by the plane as dirty. - -Contact: Noralf Trønnes, Daniel Vetter - Fallout from atomic KMS ----------------------- @@ -209,6 +185,36 @@ Would be great to refactor this all into a set of small common helpers. Contact: Daniel Vetter +Generic fbdev defio support +--------------------------- + +The defio support code in the fbdev core has some very specific requirements, +which means drivers need to have a special framebuffer for fbdev. Which prevents +us from using the generic fbdev emulation code everywhere. The main issue is +that it uses some fields in struct page itself, which breaks shmem gem objects +(and other things). + +Possible solution would be to write our own defio mmap code in the drm fbdev +emulation. It would need to fully wrap the existing mmap ops, forwarding +everything after it has done the write-protect/mkwrite trickery: + +- In the drm_fbdev_fb_mmap helper, if we need defio, change the + default page prots to write-protected with something like this:: + + vma->vm_page_prot = pgprot_wrprotect(vma->vm_page_prot); + +- Set the mkwrite and fsync callbacks with similar implementions to the core + fbdev defio stuff. These should all work on plain ptes, they don't actually + require a struct page. uff. These should all work on plain ptes, they don't + actually require a struct page. + +- Track the dirty pages in a separate structure (bitfield with one bit per page + should work) to avoid clobbering struct page. + +Might be good to also have some igt testcases for this. + +Contact: Daniel Vetter, Noralf Tronnes + Put a reservation_object into drm_gem_object -------------------------------------------- @@ -256,6 +262,44 @@ As a reference, take a look at the conversions already completed in drm core. Contact: Sean Paul, respective driver maintainers +Rename CMA helpers to DMA helpers +--------------------------------- + +CMA (standing for contiguous memory allocator) is really a bit an accident of +what these were used for first, a much better name would be DMA helpers. In the +text these should even be called coherent DMA memory helpers (so maybe CDM, but +no one knows what that means) since underneath they just use dma_alloc_coherent. + +Contact: Laurent Pinchart, Daniel Vetter + +Convert direct mode.vrefresh accesses to use drm_mode_vrefresh() +---------------------------------------------------------------- + +drm_display_mode.vrefresh isn't guaranteed to be populated. As such, using it +is risky and has been known to cause div-by-zero bugs. Fortunately, drm core +has helper which will use mode.vrefresh if it's !0 and will calculate it from +the timings when it's 0. + +Use simple search/replace, or (more fun) cocci to replace instances of direct +vrefresh access with a call to the helper. Check out +https://lists.freedesktop.org/archives/dri-devel/2019-January/205186.html for +inspiration. + +Once all instances of vrefresh have been converted, remove vrefresh from +drm_display_mode to avoid future use. + +Contact: Sean Paul + +Remove drm_display_mode.hsync +----------------------------- + +We have drm_mode_hsync() to calculate this from hsync_start/end, since drivers +shouldn't/don't use this, remove this member to avoid any temptations to use it +in the future. If there is any debug code using drm_display_mode.hsync, convert +it to use drm_mode_hsync() instead. + +Contact: Sean Paul + Core refactorings ================= @@ -354,13 +398,6 @@ KMS cleanups Some of these date from the very introduction of KMS in 2008 ... -- drm_mode_config.crtc_idr is misnamed, since it contains all KMS object. Should - be renamed to drm_mode_config.object_idr. - -- drm_display_mode doesn't need to be derived from drm_mode_object. That's - leftovers from older (never merged into upstream) KMS designs where modes - where set using their ID, including support to add/remove modes. - - Make ->funcs and ->helper_private vtables optional. There's a bunch of empty function tables in drivers, but before we can remove them we need to make sure that all the users in helpers and drivers do correctly check for a NULL @@ -432,21 +469,10 @@ those drivers as simple as possible, so lots of room for refactoring: one of the ideas for having a shared dsi/dbi helper, abstracting away the transport details more. -- tinydrm_gem_cma_prime_import_sg_table should probably go into the cma - helpers, as a _vmapped variant (since not every driver needs the vmap). - And tinydrm_gem_cma_free_object could the be merged into - drm_gem_cma_free_object(). - -- tinydrm_fb_create we could move into drm_simple_pipe, only need to add - the fb_create hook to drm_simple_pipe_funcs, which would again simplify a - bunch of things (since it gives you a one-stop vfunc for simple drivers). - - Quick aside: The unregister devm stuff is kinda getting the lifetimes of a drm_device wrong. Doesn't matter, since everyone else gets it wrong too :-) -- also rework the drm_framebuffer_funcs->dirty hook wire-up, see above. - Contact: Noralf Trønnes, Daniel Vetter AMD DC Display Driver diff --git a/Documentation/gpu/vkms.rst b/Documentation/gpu/vkms.rst index 7dfc349a4508..61586fc861bb 100644 --- a/Documentation/gpu/vkms.rst +++ b/Documentation/gpu/vkms.rst @@ -23,17 +23,6 @@ CRC API Improvements - Add igt test to check extreme alpha values i.e. fully opaque and fully transparent (intermediate values are affected by hw-specific rounding modes). -Vblank issues -------------- - -Some IGT test cases are failing. Need to analyze why and fix the issues: - -- plain-flip-fb-recreate -- plain-flip-ts-check -- flip-vs-blocking-wf-vblank -- plain-flip-fb-recreate-interruptible -- flip-vs-wf_vblank-interruptible - Runtime Configuration --------------------- diff --git a/Documentation/hwmon/f71882fg b/Documentation/hwmon/f71882fg index de91c0db5846..4c3cb8377d74 100644 --- a/Documentation/hwmon/f71882fg +++ b/Documentation/hwmon/f71882fg @@ -94,7 +94,7 @@ Note that the lowest numbered temperature zone trip point corresponds to to the border between the highest and one but highest temperature zones, and vica versa. So the temperature zone trip points 1-4 (or 1-2) go from high temp to low temp! This is how things are implemented in the IC, and the driver -mimicks this. +mimics this. There are 2 modes to specify the speed of the fan, PWM duty cycle (or DC voltage) mode, where 0-100% duty cycle (0-100% of 12V) is specified. And RPM diff --git a/Documentation/hwmon/lm85 b/Documentation/hwmon/lm85 index 7c49feaa79d2..2329c383efe4 100644 --- a/Documentation/hwmon/lm85 +++ b/Documentation/hwmon/lm85 @@ -3,9 +3,13 @@ Kernel driver lm85 Supported chips: * National Semiconductor LM85 (B and C versions) - Prefix: 'lm85' + Prefix: 'lm85b' or 'lm85c' Addresses scanned: I2C 0x2c, 0x2d, 0x2e Datasheet: http://www.national.com/pf/LM/LM85.html + * Texas Instruments LM96000 + Prefix: 'lm9600' + Addresses scanned: I2C 0x2c, 0x2d, 0x2e + Datasheet: http://www.ti.com/lit/ds/symlink/lm96000.pdf * Analog Devices ADM1027 Prefix: 'adm1027' Addresses scanned: I2C 0x2c, 0x2d, 0x2e @@ -136,6 +140,9 @@ of voltage and temperature channels. SMSC EMC6D103S is similar to EMC6D103, but does not support pwm#_auto_pwm_minctl and temp#_auto_temp_off. +The LM96000 supports additional high frequency PWM modes (22.5 kHz, 24 kHz, +25.7 kHz, 27.7 kHz and 30 kHz), which can be configured on a per-PWM basis. + Hardware Configurations ----------------------- diff --git a/Documentation/i2c/fault-codes b/Documentation/i2c/fault-codes index 47c25abb7d52..0cee0fc545b4 100644 --- a/Documentation/i2c/fault-codes +++ b/Documentation/i2c/fault-codes @@ -112,6 +112,10 @@ EPROTO case is when the length of an SMBus block data response (from the SMBus slave) is outside the range 1-32 bytes. +ESHUTDOWN + Returned when a transfer was requested using an adapter + which is already suspended. + ETIMEDOUT This is returned by drivers when an operation took too much time, and was aborted before it completed. diff --git a/Documentation/i2c/gpio-fault-injection b/Documentation/i2c/gpio-fault-injection index a4ce62090fd5..c87f416d53dd 100644 --- a/Documentation/i2c/gpio-fault-injection +++ b/Documentation/i2c/gpio-fault-injection @@ -1,3 +1,4 @@ +========================= Linux I2C fault injection ========================= @@ -13,6 +14,9 @@ mounted at /sys/kernel/debug. There will be a separate subdirectory per GPIO driven I2C bus. Each subdirectory will contain files to trigger the fault injection. They will be described now along with their intended use-cases. +Wire states +=========== + "scl" ----- @@ -34,10 +38,10 @@ I2C specification version 4, section 3.1.16) using the helpers of the Linux I2C core (see 'struct bus_recovery_info'). However, the bus recovery will not succeed because SDA is still pinned low until you manually release it again with "echo 1 > sda". A test with an automatic release can be done with the -following class of fault injectors. +"incomplete transfers" class of fault injectors. -Introduction to incomplete transfers ------------------------------------- +Incomplete transfers +==================== The following fault injectors create situations where SDA will be held low by a device. Bus recovery should be able to fix these situations. But please note: @@ -79,3 +83,54 @@ This is why bus recovery (up to 9 clock pulses) must either check SDA or send additional STOP conditions to ensure the bus has been released. Otherwise random data will be written to a device! +Lost arbitration +================ + +Here, we want to simulate the condition where the master under test loses the +bus arbitration against another master in a multi-master setup. + +"lose_arbitration" +------------------ + +This file is write only and you need to write the duration of the arbitration +intereference (in µs, maximum is 100ms). The calling process will then sleep +and wait for the next bus clock. The process is interruptible, though. + +Arbitration lost is achieved by waiting for SCL going down by the master under +test and then pulling SDA low for some time. So, the I2C address sent out +should be corrupted and that should be detected properly. That means that the +address sent out should have a lot of '1' bits to be able to detect corruption. +There doesn't need to be a device at this address because arbitration lost +should be detected beforehand. Also note, that SCL going down is monitored +using interrupts, so the interrupt latency might cause the first bits to be not +corrupted. A good starting point for using this fault injector on an otherwise +idle bus is: + +# echo 200 > lose_arbitration & +# i2cget -y 0x3f + +Panic during transfer +===================== + +This fault injector will create a Kernel panic once the master under test +started a transfer. This usually means that the state machine of the bus master +driver will be ungracefully interrupted and the bus may end up in an unusual +state. Use this to check if your shutdown/reboot/boot code can handle this +scenario. + +"inject_panic" +-------------- + +This file is write only and you need to write the delay between the detected +start of a transmission and the induced Kernel panic (in µs, maximum is 100ms). +The calling process will then sleep and wait for the next bus clock. The +process is interruptible, though. + +Start of a transfer is detected by waiting for SCL going down by the master +under test. A good starting point for using this fault injector is: + +# echo 0 > inject_panic & +# i2cget -y + +Note that there doesn't need to be a device listening to the address you are +using. Results may vary depending on that, though. diff --git a/Documentation/index.rst b/Documentation/index.rst index c858c2e66e36..80a421cb935e 100644 --- a/Documentation/index.rst +++ b/Documentation/index.rst @@ -90,6 +90,7 @@ needed). filesystems/index vm/index bpf/index + misc-devices/index Architecture-specific documentation ----------------------------------- diff --git a/Documentation/infiniband/user_verbs.txt b/Documentation/infiniband/user_verbs.txt index df049b9f5b6e..47ebf2f80b2b 100644 --- a/Documentation/infiniband/user_verbs.txt +++ b/Documentation/infiniband/user_verbs.txt @@ -46,11 +46,11 @@ Memory pinning I/O targets be kept resident at the same physical address. The ib_uverbs module manages pinning and unpinning memory regions via get_user_pages() and put_page() calls. It also accounts for the - amount of memory pinned in the process's locked_vm, and checks that + amount of memory pinned in the process's pinned_vm, and checks that unprivileged processes do not exceed their RLIMIT_MEMLOCK limit. Pages that are pinned multiple times are counted each time they are - pinned, so the value of locked_vm may be an overestimate of the + pinned, so the value of pinned_vm may be an overestimate of the number of pages pinned by a process. /dev files diff --git a/Documentation/input/devices/xpad.rst b/Documentation/input/devices/xpad.rst index b8bd65962dd8..173c2acda9fd 100644 --- a/Documentation/input/devices/xpad.rst +++ b/Documentation/input/devices/xpad.rst @@ -218,7 +218,7 @@ References .. [1] http://euc.jp/periphs/xbox-controller.ja.html (ITO Takayuki) .. [2] http://xpad.xbox-scene.com/ .. [3] http://www.markosweb.com/www/xboxhackz.com/ -.. [4] http://lxr.free-electrons.com/ident?i=xpad_device +.. [4] https://elixir.bootlin.com/linux/latest/ident/xpad_device Historic Edits diff --git a/Documentation/interconnect/interconnect.rst b/Documentation/interconnect/interconnect.rst new file mode 100644 index 000000000000..b8107dcc4cd3 --- /dev/null +++ b/Documentation/interconnect/interconnect.rst @@ -0,0 +1,94 @@ +.. SPDX-License-Identifier: GPL-2.0 + +===================================== +GENERIC SYSTEM INTERCONNECT SUBSYSTEM +===================================== + +Introduction +------------ + +This framework is designed to provide a standard kernel interface to control +the settings of the interconnects on an SoC. These settings can be throughput, +latency and priority between multiple interconnected devices or functional +blocks. This can be controlled dynamically in order to save power or provide +maximum performance. + +The interconnect bus is hardware with configurable parameters, which can be +set on a data path according to the requests received from various drivers. +An example of interconnect buses are the interconnects between various +components or functional blocks in chipsets. There can be multiple interconnects +on an SoC that can be multi-tiered. + +Below is a simplified diagram of a real-world SoC interconnect bus topology. + +:: + + +----------------+ +----------------+ + | HW Accelerator |--->| M NoC |<---------------+ + +----------------+ +----------------+ | + | | +------------+ + +-----+ +-------------+ V +------+ | | + | DDR | | +--------+ | PCIe | | | + +-----+ | | Slaves | +------+ | | + ^ ^ | +--------+ | | C NoC | + | | V V | | + +------------------+ +------------------------+ | | +-----+ + | |-->| |-->| |-->| CPU | + | |-->| |<--| | +-----+ + | Mem NoC | | S NoC | +------------+ + | |<--| |---------+ | + | |<--| |<------+ | | +--------+ + +------------------+ +------------------------+ | | +-->| Slaves | + ^ ^ ^ ^ ^ | | +--------+ + | | | | | | V + +------+ | +-----+ +-----+ +---------+ +----------------+ +--------+ + | CPUs | | | GPU | | DSP | | Masters |-->| P NoC |-->| Slaves | + +------+ | +-----+ +-----+ +---------+ +----------------+ +--------+ + | + +-------+ + | Modem | + +-------+ + +Terminology +----------- + +Interconnect provider is the software definition of the interconnect hardware. +The interconnect providers on the above diagram are M NoC, S NoC, C NoC, P NoC +and Mem NoC. + +Interconnect node is the software definition of the interconnect hardware +port. Each interconnect provider consists of multiple interconnect nodes, +which are connected to other SoC components including other interconnect +providers. The point on the diagram where the CPUs connect to the memory is +called an interconnect node, which belongs to the Mem NoC interconnect provider. + +Interconnect endpoints are the first or the last element of the path. Every +endpoint is a node, but not every node is an endpoint. + +Interconnect path is everything between two endpoints including all the nodes +that have to be traversed to reach from a source to destination node. It may +include multiple master-slave pairs across several interconnect providers. + +Interconnect consumers are the entities which make use of the data paths exposed +by the providers. The consumers send requests to providers requesting various +throughput, latency and priority. Usually the consumers are device drivers, that +send request based on their needs. An example for a consumer is a video decoder +that supports various formats and image sizes. + +Interconnect providers +---------------------- + +Interconnect provider is an entity that implements methods to initialize and +configure interconnect bus hardware. The interconnect provider drivers should +be registered with the interconnect provider core. + +.. kernel-doc:: include/linux/interconnect-provider.h + +Interconnect consumers +---------------------- + +Interconnect consumers are the clients which use the interconnect APIs to +get paths between endpoints and set their bandwidth/latency/QoS requirements +for these interconnect paths. + +.. kernel-doc:: include/linux/interconnect.h diff --git a/Documentation/kbuild/kbuild.txt b/Documentation/kbuild/kbuild.txt index c9e3d93e7a89..8a3830b39c7d 100644 --- a/Documentation/kbuild/kbuild.txt +++ b/Documentation/kbuild/kbuild.txt @@ -232,17 +232,12 @@ KBUILD_LDS -------------------------------------------------- The linker script with full path. Assigned by the top-level Makefile. -KBUILD_VMLINUX_INIT +KBUILD_VMLINUX_OBJS -------------------------------------------------- -All object files for the init (first) part of vmlinux. -Files specified with KBUILD_VMLINUX_INIT are linked first. - -KBUILD_VMLINUX_MAIN --------------------------------------------------- -All object files for the main part of vmlinux. +All object files for vmlinux. They are linked to vmlinux in the same +order as listed in KBUILD_VMLINUX_OBJS. KBUILD_VMLINUX_LIBS -------------------------------------------------- -All .a "lib" files for vmlinux. -KBUILD_VMLINUX_INIT, KBUILD_VMLINUX_MAIN, and KBUILD_VMLINUX_LIBS together -specify all the object files used to link vmlinux. +All .a "lib" files for vmlinux. KBUILD_VMLINUX_OBJS and KBUILD_VMLINUX_LIBS +together specify all the object files used to link vmlinux. diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt index bf28c47bfd72..f124be6e4c3a 100644 --- a/Documentation/kbuild/makefiles.txt +++ b/Documentation/kbuild/makefiles.txt @@ -154,13 +154,8 @@ more details, with real examples. Kbuild compiles all the $(obj-y) files. It then calls "$(AR) rcSTP" to merge these files into one built-in.a file. - This is a thin archive without a symbol table, which makes it - unsuitable as a linker input. - - The scripts/link-vmlinux.sh script later makes an aggregate - built-in.a with "${AR} rcsTP", which creates the thin archive - with a symbol table and an index, making it a valid input for - the final vmlinux link passes. + This is a thin archive without a symbol table. It will be later + linked into vmlinux by scripts/link-vmlinux.sh The order of files in $(obj-y) is significant. Duplicates in the lists are allowed: the first instance will be linked into @@ -504,23 +499,6 @@ more details, with real examples. In the above example, -Wno-unused-but-set-variable will be added to KBUILD_CFLAGS only if gcc really accepts it. - cc-version - cc-version returns a numerical version of the $(CC) compiler version. - The format is where both are two digits. So for example - gcc 3.41 would return 0341. - cc-version is useful when a specific $(CC) version is faulty in one - area, for example -mregparm=3 was broken in some gcc versions - even though the option was accepted by gcc. - - Example: - #arch/x86/Makefile - cflags-y += $(shell \ - if [ $(cc-version) -ge 0300 ] ; then \ - echo "-mregparm=3"; fi ;) - - In the above example, -mregparm=3 is only used for gcc version greater - than or equal to gcc 3.0. - cc-ifversion cc-ifversion tests the version of $(CC) and equals the fourth parameter if version expression is true, or the fifth (if given) if the version diff --git a/Documentation/kbuild/modules.txt b/Documentation/kbuild/modules.txt index 3fb39e0116b4..80295c613e37 100644 --- a/Documentation/kbuild/modules.txt +++ b/Documentation/kbuild/modules.txt @@ -140,7 +140,7 @@ executed to make module versioning work. make -C $KDIR M=$PWD bar.lst make -C $KDIR M=$PWD baz.o make -C $KDIR M=$PWD foo.ko - make -C $KDIR M=$PWD / + make -C $KDIR M=$PWD ./ === 3. Creating a Kbuild File for an External Module diff --git a/Documentation/kdump/vmcoreinfo.txt b/Documentation/kdump/vmcoreinfo.txt new file mode 100644 index 000000000000..bb94a4bd597a --- /dev/null +++ b/Documentation/kdump/vmcoreinfo.txt @@ -0,0 +1,495 @@ +================================================================ + VMCOREINFO +================================================================ + +=========== +What is it? +=========== + +VMCOREINFO is a special ELF note section. It contains various +information from the kernel like structure size, page size, symbol +values, field offsets, etc. These data are packed into an ELF note +section and used by user-space tools like crash and makedumpfile to +analyze a kernel's memory layout. + +================ +Common variables +================ + +init_uts_ns.name.release +------------------------ + +The version of the Linux kernel. Used to find the corresponding source +code from which the kernel has been built. For example, crash uses it to +find the corresponding vmlinux in order to process vmcore. + +PAGE_SIZE +--------- + +The size of a page. It is the smallest unit of data used by the memory +management facilities. It is usually 4096 bytes of size and a page is +aligned on 4096 bytes. Used for computing page addresses. + +init_uts_ns +----------- + +The UTS namespace which is used to isolate two specific elements of the +system that relate to the uname(2) system call. It is named after the +data structure used to store information returned by the uname(2) system +call. + +User-space tools can get the kernel name, host name, kernel release +number, kernel version, architecture name and OS type from it. + +node_online_map +--------------- + +An array node_states[N_ONLINE] which represents the set of online nodes +in a system, one bit position per node number. Used to keep track of +which nodes are in the system and online. + +swapper_pg_dir +------------- + +The global page directory pointer of the kernel. Used to translate +virtual to physical addresses. + +_stext +------ + +Defines the beginning of the text section. In general, _stext indicates +the kernel start address. Used to convert a virtual address from the +direct kernel map to a physical address. + +vmap_area_list +-------------- + +Stores the virtual area list. makedumpfile gets the vmalloc start value +from this variable and its value is necessary for vmalloc translation. + +mem_map +------- + +Physical addresses are translated to struct pages by treating them as +an index into the mem_map array. Right-shifting a physical address +PAGE_SHIFT bits converts it into a page frame number which is an index +into that mem_map array. + +Used to map an address to the corresponding struct page. + +contig_page_data +---------------- + +Makedumpfile gets the pglist_data structure from this symbol, which is +used to describe the memory layout. + +User-space tools use this to exclude free pages when dumping memory. + +mem_section|(mem_section, NR_SECTION_ROOTS)|(mem_section, section_mem_map) +-------------------------------------------------------------------------- + +The address of the mem_section array, its length, structure size, and +the section_mem_map offset. + +It exists in the sparse memory mapping model, and it is also somewhat +similar to the mem_map variable, both of them are used to translate an +address. + +page +---- + +The size of a page structure. struct page is an important data structure +and it is widely used to compute contiguous memory. + +pglist_data +----------- + +The size of a pglist_data structure. This value is used to check if the +pglist_data structure is valid. It is also used for checking the memory +type. + +zone +---- + +The size of a zone structure. This value is used to check if the zone +structure has been found. It is also used for excluding free pages. + +free_area +--------- + +The size of a free_area structure. It indicates whether the free_area +structure is valid or not. Useful when excluding free pages. + +list_head +--------- + +The size of a list_head structure. Used when iterating lists in a +post-mortem analysis session. + +nodemask_t +---------- + +The size of a nodemask_t type. Used to compute the number of online +nodes. + +(page, flags|_refcount|mapping|lru|_mapcount|private|compound_dtor| + compound_order|compound_head) +------------------------------------------------------------------- + +User-space tools compute their values based on the offset of these +variables. The variables are used when excluding unnecessary pages. + +(pglist_data, node_zones|nr_zones|node_mem_map|node_start_pfn|node_ + spanned_pages|node_id) +------------------------------------------------------------------- + +On NUMA machines, each NUMA node has a pg_data_t to describe its memory +layout. On UMA machines there is a single pglist_data which describes the +whole memory. + +These values are used to check the memory type and to compute the +virtual address for memory map. + +(zone, free_area|vm_stat|spanned_pages) +--------------------------------------- + +Each node is divided into a number of blocks called zones which +represent ranges within memory. A zone is described by a structure zone. + +User-space tools compute required values based on the offset of these +variables. + +(free_area, free_list) +---------------------- + +Offset of the free_list's member. This value is used to compute the number +of free pages. + +Each zone has a free_area structure array called free_area[MAX_ORDER]. +The free_list represents a linked list of free page blocks. + +(list_head, next|prev) +---------------------- + +Offsets of the list_head's members. list_head is used to define a +circular linked list. User-space tools need these in order to traverse +lists. + +(vmap_area, va_start|list) +-------------------------- + +Offsets of the vmap_area's members. They carry vmalloc-specific +information. Makedumpfile gets the start address of the vmalloc region +from this. + +(zone.free_area, MAX_ORDER) +--------------------------- + +Free areas descriptor. User-space tools use this value to iterate the +free_area ranges. MAX_ORDER is used by the zone buddy allocator. + +log_first_idx +------------- + +Index of the first record stored in the buffer log_buf. Used by +user-space tools to read the strings in the log_buf. + +log_buf +------- + +Console output is written to the ring buffer log_buf at index +log_first_idx. Used to get the kernel log. + +log_buf_len +----------- + +log_buf's length. + +clear_idx +--------- + +The index that the next printk() record to read after the last clear +command. It indicates the first record after the last SYSLOG_ACTION +_CLEAR, like issued by 'dmesg -c'. Used by user-space tools to dump +the dmesg log. + +log_next_idx +------------ + +The index of the next record to store in the buffer log_buf. Used to +compute the index of the current buffer position. + +printk_log +---------- + +The size of a structure printk_log. Used to compute the size of +messages, and extract dmesg log. It encapsulates header information for +log_buf, such as timestamp, syslog level, etc. + +(printk_log, ts_nsec|len|text_len|dict_len) +------------------------------------------- + +It represents field offsets in struct printk_log. User space tools +parse it and check whether the values of printk_log's members have been +changed. + +(free_area.free_list, MIGRATE_TYPES) +------------------------------------ + +The number of migrate types for pages. The free_list is described by the +array. Used by tools to compute the number of free pages. + +NR_FREE_PAGES +------------- + +On linux-2.6.21 or later, the number of free pages is in +vm_stat[NR_FREE_PAGES]. Used to get the number of free pages. + +PG_lru|PG_private|PG_swapcache|PG_swapbacked|PG_slab|PG_hwpoision +|PG_head_mask|PAGE_BUDDY_MAPCOUNT_VALUE(~PG_buddy) +|PAGE_OFFLINE_MAPCOUNT_VALUE(~PG_offline) +----------------------------------------------------------------- + +Page attributes. These flags are used to filter various unnecessary for +dumping pages. + +HUGETLB_PAGE_DTOR +----------------- + +The HUGETLB_PAGE_DTOR flag denotes hugetlbfs pages. Makedumpfile +excludes these pages. + +====== +x86_64 +====== + +phys_base +--------- + +Used to convert the virtual address of an exported kernel symbol to its +corresponding physical address. + +init_top_pgt +------------ + +Used to walk through the whole page table and convert virtual addresses +to physical addresses. The init_top_pgt is somewhat similar to +swapper_pg_dir, but it is only used in x86_64. + +pgtable_l5_enabled +------------------ + +User-space tools need to know whether the crash kernel was in 5-level +paging mode. + +node_data +--------- + +This is a struct pglist_data array and stores all NUMA nodes +information. Makedumpfile gets the pglist_data structure from it. + +(node_data, MAX_NUMNODES) +------------------------- + +The maximum number of nodes in system. + +KERNELOFFSET +------------ + +The kernel randomization offset. Used to compute the page offset. If +KASLR is disabled, this value is zero. + +KERNEL_IMAGE_SIZE +----------------- + +Currently unused by Makedumpfile. Used to compute the module virtual +address by Crash. + +sme_mask +-------- + +AMD-specific with SME support: it indicates the secure memory encryption +mask. Makedumpfile tools need to know whether the crash kernel was +encrypted. If SME is enabled in the first kernel, the crash kernel's +page table entries (pgd/pud/pmd/pte) contain the memory encryption +mask. This is used to remove the SME mask and obtain the true physical +address. + +Currently, sme_mask stores the value of the C-bit position. If needed, +additional SME-relevant info can be placed in that variable. + +For example: +[ misc ][ enc bit ][ other misc SME info ] +0000_0000_0000_0000_1000_0000_0000_0000_0000_0000_..._0000 +63 59 55 51 47 43 39 35 31 27 ... 3 + +====== +x86_32 +====== + +X86_PAE +------- + +Denotes whether physical address extensions are enabled. It has the cost +of a higher page table lookup overhead, and also consumes more page +table space per process. Used to check whether PAE was enabled in the +crash kernel when converting virtual addresses to physical addresses. + +==== +ia64 +==== + +pgdat_list|(pgdat_list, MAX_NUMNODES) +------------------------------------- + +pg_data_t array storing all NUMA nodes information. MAX_NUMNODES +indicates the number of the nodes. + +node_memblk|(node_memblk, NR_NODE_MEMBLKS) +------------------------------------------ + +List of node memory chunks. Filled when parsing the SRAT table to obtain +information about memory nodes. NR_NODE_MEMBLKS indicates the number of +node memory chunks. + +These values are used to compute the number of nodes the crashed kernel used. + +node_memblk_s|(node_memblk_s, start_paddr)|(node_memblk_s, size) +---------------------------------------------------------------- + +The size of a struct node_memblk_s and the offsets of the +node_memblk_s's members. Used to compute the number of nodes. + +PGTABLE_3|PGTABLE_4 +------------------- + +User-space tools need to know whether the crash kernel was in 3-level or +4-level paging mode. Used to distinguish the page table. + +===== +ARM64 +===== + +VA_BITS +------- + +The maximum number of bits for virtual addresses. Used to compute the +virtual memory ranges. + +kimage_voffset +-------------- + +The offset between the kernel virtual and physical mappings. Used to +translate virtual to physical addresses. + +PHYS_OFFSET +----------- + +Indicates the physical address of the start of memory. Similar to +kimage_voffset, which is used to translate virtual to physical +addresses. + +KERNELOFFSET +------------ + +The kernel randomization offset. Used to compute the page offset. If +KASLR is disabled, this value is zero. + +==== +arm +==== + +ARM_LPAE +-------- + +It indicates whether the crash kernel supports large physical address +extensions. Used to translate virtual to physical addresses. + +==== +s390 +==== + +lowcore_ptr +---------- + +An array with a pointer to the lowcore of every CPU. Used to print the +psw and all registers information. + +high_memory +----------- + +Used to get the vmalloc_start address from the high_memory symbol. + +(lowcore_ptr, NR_CPUS) +---------------------- + +The maximum number of CPUs. + +======= +powerpc +======= + + +node_data|(node_data, MAX_NUMNODES) +----------------------------------- + +See above. + +contig_page_data +---------------- + +See above. + +vmemmap_list +------------ + +The vmemmap_list maintains the entire vmemmap physical mapping. Used +to get vmemmap list count and populated vmemmap regions info. If the +vmemmap address translation information is stored in the crash kernel, +it is used to translate vmemmap kernel virtual addresses. + +mmu_vmemmap_psize +----------------- + +The size of a page. Used to translate virtual to physical addresses. + +mmu_psize_defs +-------------- + +Page size definitions, i.e. 4k, 64k, or 16M. + +Used to make vtop translations. + +vmemmap_backing|(vmemmap_backing, list)|(vmemmap_backing, phys)| +(vmemmap_backing, virt_addr) +---------------------------------------------------------------- + +The vmemmap virtual address space management does not have a traditional +page table to track which virtual struct pages are backed by a physical +mapping. The virtual to physical mappings are tracked in a simple linked +list format. + +User-space tools need to know the offset of list, phys and virt_addr +when computing the count of vmemmap regions. + +mmu_psize_def|(mmu_psize_def, shift) +------------------------------------ + +The size of a struct mmu_psize_def and the offset of mmu_psize_def's +member. + +Used in vtop translations. + +== +sh +== + +node_data|(node_data, MAX_NUMNODES) +----------------------------------- + +See above. + +X2TLB +----- + +Indicates whether the crashed kernel enabled SH extended mode. diff --git a/Documentation/laptops/lg-laptop.rst b/Documentation/laptops/lg-laptop.rst index e486fe7ddc35..aa503ee9b3bc 100644 --- a/Documentation/laptops/lg-laptop.rst +++ b/Documentation/laptops/lg-laptop.rst @@ -1,4 +1,5 @@ .. SPDX-License-Identifier: GPL-2.0+ + LG Gram laptop extra features ============================= @@ -9,6 +10,7 @@ Hotkeys ------- The following FN keys are ignored by the kernel without this driver: + - FN-F1 (LG control panel) - Generates F15 - FN-F5 (Touchpad toggle) - Generates F13 - FN-F6 (Airplane mode) - Generates RFKILL @@ -16,7 +18,7 @@ The following FN keys are ignored by the kernel without this driver: This key also changes keyboard backlight mode. - FN-F9 (Reader mode) - Generates F14 -The rest of the FN key work without a need for a special driver. +The rest of the FN keys work without a need for a special driver. Reader mode diff --git a/Documentation/livepatch/callbacks.txt b/Documentation/livepatch/callbacks.txt index c9776f48e458..182e31d4abce 100644 --- a/Documentation/livepatch/callbacks.txt +++ b/Documentation/livepatch/callbacks.txt @@ -118,488 +118,9 @@ similar change to their hw_features value. (Client functions of the value may need to be updated accordingly.) -Test cases -========== - -What follows is not an exhaustive test suite of every possible livepatch -pre/post-(un)patch combination, but a selection that demonstrates a few -important concepts. Each test case uses the kernel modules located in -the samples/livepatch/ and assumes that no livepatches are loaded at the -beginning of the test. - - -Test 1 ------- - -Test a combination of loading a kernel module and a livepatch that -patches a function in the first module. (Un)load the target module -before the livepatch module: - -- load target module -- load livepatch -- disable livepatch -- unload target module -- unload livepatch - -First load a target module: - - % insmod samples/livepatch/livepatch-callbacks-mod.ko - [ 34.475708] livepatch_callbacks_mod: livepatch_callbacks_mod_init - -On livepatch enable, before the livepatch transition starts, pre-patch -callbacks are executed for vmlinux and livepatch_callbacks_mod (those -klp_objects currently loaded). After klp_objects are patched according -to the klp_patch, their post-patch callbacks run and the transition -completes: - - % insmod samples/livepatch/livepatch-callbacks-demo.ko - [ 36.503719] livepatch: enabling patch 'livepatch_callbacks_demo' - [ 36.504213] livepatch: 'livepatch_callbacks_demo': initializing patching transition - [ 36.504238] livepatch_callbacks_demo: pre_patch_callback: vmlinux - [ 36.504721] livepatch_callbacks_demo: pre_patch_callback: livepatch_callbacks_mod -> [MODULE_STATE_LIVE] Normal state - [ 36.505849] livepatch: 'livepatch_callbacks_demo': starting patching transition - [ 37.727133] livepatch: 'livepatch_callbacks_demo': completing patching transition - [ 37.727232] livepatch_callbacks_demo: post_patch_callback: vmlinux - [ 37.727860] livepatch_callbacks_demo: post_patch_callback: livepatch_callbacks_mod -> [MODULE_STATE_LIVE] Normal state - [ 37.728792] livepatch: 'livepatch_callbacks_demo': patching complete - -Similarly, on livepatch disable, pre-patch callbacks run before the -unpatching transition starts. klp_objects are reverted, post-patch -callbacks execute and the transition completes: - - % echo 0 > /sys/kernel/livepatch/livepatch_callbacks_demo/enabled - [ 38.510209] livepatch: 'livepatch_callbacks_demo': initializing unpatching transition - [ 38.510234] livepatch_callbacks_demo: pre_unpatch_callback: vmlinux - [ 38.510982] livepatch_callbacks_demo: pre_unpatch_callback: livepatch_callbacks_mod -> [MODULE_STATE_LIVE] Normal state - [ 38.512209] livepatch: 'livepatch_callbacks_demo': starting unpatching transition - [ 39.711132] livepatch: 'livepatch_callbacks_demo': completing unpatching transition - [ 39.711210] livepatch_callbacks_demo: post_unpatch_callback: vmlinux - [ 39.711779] livepatch_callbacks_demo: post_unpatch_callback: livepatch_callbacks_mod -> [MODULE_STATE_LIVE] Normal state - [ 39.712735] livepatch: 'livepatch_callbacks_demo': unpatching complete - - % rmmod samples/livepatch/livepatch-callbacks-demo.ko - % rmmod samples/livepatch/livepatch-callbacks-mod.ko - [ 42.534183] livepatch_callbacks_mod: livepatch_callbacks_mod_exit - - -Test 2 ------- - -This test is similar to the previous test, but (un)load the livepatch -module before the target kernel module. This tests the livepatch core's -module_coming handler: - -- load livepatch -- load target module -- disable livepatch -- unload livepatch -- unload target module - - -On livepatch enable, only pre/post-patch callbacks are executed for -currently loaded klp_objects, in this case, vmlinux: - - % insmod samples/livepatch/livepatch-callbacks-demo.ko - [ 44.553328] livepatch: enabling patch 'livepatch_callbacks_demo' - [ 44.553997] livepatch: 'livepatch_callbacks_demo': initializing patching transition - [ 44.554049] livepatch_callbacks_demo: pre_patch_callback: vmlinux - [ 44.554845] livepatch: 'livepatch_callbacks_demo': starting patching transition - [ 45.727128] livepatch: 'livepatch_callbacks_demo': completing patching transition - [ 45.727212] livepatch_callbacks_demo: post_patch_callback: vmlinux - [ 45.727961] livepatch: 'livepatch_callbacks_demo': patching complete - -When a targeted module is subsequently loaded, only its pre/post-patch -callbacks are executed: - - % insmod samples/livepatch/livepatch-callbacks-mod.ko - [ 46.560845] livepatch: applying patch 'livepatch_callbacks_demo' to loading module 'livepatch_callbacks_mod' - [ 46.561988] livepatch_callbacks_demo: pre_patch_callback: livepatch_callbacks_mod -> [MODULE_STATE_COMING] Full formed, running module_init - [ 46.563452] livepatch_callbacks_demo: post_patch_callback: livepatch_callbacks_mod -> [MODULE_STATE_COMING] Full formed, running module_init - [ 46.565495] livepatch_callbacks_mod: livepatch_callbacks_mod_init - -On livepatch disable, all currently loaded klp_objects' (vmlinux and -livepatch_callbacks_mod) pre/post-unpatch callbacks are executed: - - % echo 0 > /sys/kernel/livepatch/livepatch_callbacks_demo/enabled - [ 48.568885] livepatch: 'livepatch_callbacks_demo': initializing unpatching transition - [ 48.568910] livepatch_callbacks_demo: pre_unpatch_callback: vmlinux - [ 48.569441] livepatch_callbacks_demo: pre_unpatch_callback: livepatch_callbacks_mod -> [MODULE_STATE_LIVE] Normal state - [ 48.570502] livepatch: 'livepatch_callbacks_demo': starting unpatching transition - [ 49.759091] livepatch: 'livepatch_callbacks_demo': completing unpatching transition - [ 49.759171] livepatch_callbacks_demo: post_unpatch_callback: vmlinux - [ 49.759742] livepatch_callbacks_demo: post_unpatch_callback: livepatch_callbacks_mod -> [MODULE_STATE_LIVE] Normal state - [ 49.760690] livepatch: 'livepatch_callbacks_demo': unpatching complete - - % rmmod samples/livepatch/livepatch-callbacks-demo.ko - % rmmod samples/livepatch/livepatch-callbacks-mod.ko - [ 52.592283] livepatch_callbacks_mod: livepatch_callbacks_mod_exit - - -Test 3 ------- - -Test loading the livepatch after a targeted kernel module, then unload -the kernel module before disabling the livepatch. This tests the -livepatch core's module_going handler: - -- load target module -- load livepatch -- unload target module -- disable livepatch -- unload livepatch - -First load a target module, then the livepatch: - - % insmod samples/livepatch/livepatch-callbacks-mod.ko - [ 54.607948] livepatch_callbacks_mod: livepatch_callbacks_mod_init - - % insmod samples/livepatch/livepatch-callbacks-demo.ko - [ 56.613919] livepatch: enabling patch 'livepatch_callbacks_demo' - [ 56.614411] livepatch: 'livepatch_callbacks_demo': initializing patching transition - [ 56.614436] livepatch_callbacks_demo: pre_patch_callback: vmlinux - [ 56.614818] livepatch_callbacks_demo: pre_patch_callback: livepatch_callbacks_mod -> [MODULE_STATE_LIVE] Normal state - [ 56.615656] livepatch: 'livepatch_callbacks_demo': starting patching transition - [ 57.759070] livepatch: 'livepatch_callbacks_demo': completing patching transition - [ 57.759147] livepatch_callbacks_demo: post_patch_callback: vmlinux - [ 57.759621] livepatch_callbacks_demo: post_patch_callback: livepatch_callbacks_mod -> [MODULE_STATE_LIVE] Normal state - [ 57.760307] livepatch: 'livepatch_callbacks_demo': patching complete - -When a target module is unloaded, the livepatch is only reverted from -that klp_object (livepatch_callbacks_mod). As such, only its pre and -post-unpatch callbacks are executed when this occurs: - - % rmmod samples/livepatch/livepatch-callbacks-mod.ko - [ 58.623409] livepatch_callbacks_mod: livepatch_callbacks_mod_exit - [ 58.623903] livepatch_callbacks_demo: pre_unpatch_callback: livepatch_callbacks_mod -> [MODULE_STATE_GOING] Going away - [ 58.624658] livepatch: reverting patch 'livepatch_callbacks_demo' on unloading module 'livepatch_callbacks_mod' - [ 58.625305] livepatch_callbacks_demo: post_unpatch_callback: livepatch_callbacks_mod -> [MODULE_STATE_GOING] Going away - -When the livepatch is disabled, pre and post-unpatch callbacks are run -for the remaining klp_object, vmlinux: - - % echo 0 > /sys/kernel/livepatch/livepatch_callbacks_demo/enabled - [ 60.638420] livepatch: 'livepatch_callbacks_demo': initializing unpatching transition - [ 60.638444] livepatch_callbacks_demo: pre_unpatch_callback: vmlinux - [ 60.638996] livepatch: 'livepatch_callbacks_demo': starting unpatching transition - [ 61.727088] livepatch: 'livepatch_callbacks_demo': completing unpatching transition - [ 61.727165] livepatch_callbacks_demo: post_unpatch_callback: vmlinux - [ 61.727985] livepatch: 'livepatch_callbacks_demo': unpatching complete - - % rmmod samples/livepatch/livepatch-callbacks-demo.ko - - -Test 4 ------- - -This test is similar to the previous test, however the livepatch is -loaded first. This tests the livepatch core's module_coming and -module_going handlers: - -- load livepatch -- load target module -- unload target module -- disable livepatch -- unload livepatch - -First load the livepatch: - - % insmod samples/livepatch/livepatch-callbacks-demo.ko - [ 64.661552] livepatch: enabling patch 'livepatch_callbacks_demo' - [ 64.662147] livepatch: 'livepatch_callbacks_demo': initializing patching transition - [ 64.662175] livepatch_callbacks_demo: pre_patch_callback: vmlinux - [ 64.662850] livepatch: 'livepatch_callbacks_demo': starting patching transition - [ 65.695056] livepatch: 'livepatch_callbacks_demo': completing patching transition - [ 65.695147] livepatch_callbacks_demo: post_patch_callback: vmlinux - [ 65.695561] livepatch: 'livepatch_callbacks_demo': patching complete - -When a targeted kernel module is subsequently loaded, only its -pre/post-patch callbacks are executed: - - % insmod samples/livepatch/livepatch-callbacks-mod.ko - [ 66.669196] livepatch: applying patch 'livepatch_callbacks_demo' to loading module 'livepatch_callbacks_mod' - [ 66.669882] livepatch_callbacks_demo: pre_patch_callback: livepatch_callbacks_mod -> [MODULE_STATE_COMING] Full formed, running module_init - [ 66.670744] livepatch_callbacks_demo: post_patch_callback: livepatch_callbacks_mod -> [MODULE_STATE_COMING] Full formed, running module_init - [ 66.672873] livepatch_callbacks_mod: livepatch_callbacks_mod_init - -When the target module is unloaded, the livepatch is only reverted from -the livepatch_callbacks_mod klp_object. As such, only pre and -post-unpatch callbacks are executed when this occurs: - - % rmmod samples/livepatch/livepatch-callbacks-mod.ko - [ 68.680065] livepatch_callbacks_mod: livepatch_callbacks_mod_exit - [ 68.680688] livepatch_callbacks_demo: pre_unpatch_callback: livepatch_callbacks_mod -> [MODULE_STATE_GOING] Going away - [ 68.681452] livepatch: reverting patch 'livepatch_callbacks_demo' on unloading module 'livepatch_callbacks_mod' - [ 68.682094] livepatch_callbacks_demo: post_unpatch_callback: livepatch_callbacks_mod -> [MODULE_STATE_GOING] Going away - - % echo 0 > /sys/kernel/livepatch/livepatch_callbacks_demo/enabled - [ 70.689225] livepatch: 'livepatch_callbacks_demo': initializing unpatching transition - [ 70.689256] livepatch_callbacks_demo: pre_unpatch_callback: vmlinux - [ 70.689882] livepatch: 'livepatch_callbacks_demo': starting unpatching transition - [ 71.711080] livepatch: 'livepatch_callbacks_demo': completing unpatching transition - [ 71.711481] livepatch_callbacks_demo: post_unpatch_callback: vmlinux - [ 71.711988] livepatch: 'livepatch_callbacks_demo': unpatching complete - - % rmmod samples/livepatch/livepatch-callbacks-demo.ko - - -Test 5 ------- - -A simple test of loading a livepatch without one of its patch target -klp_objects ever loaded (livepatch_callbacks_mod): - -- load livepatch -- disable livepatch -- unload livepatch - -Load the livepatch: - - % insmod samples/livepatch/livepatch-callbacks-demo.ko - [ 74.711081] livepatch: enabling patch 'livepatch_callbacks_demo' - [ 74.711595] livepatch: 'livepatch_callbacks_demo': initializing patching transition - [ 74.711639] livepatch_callbacks_demo: pre_patch_callback: vmlinux - [ 74.712272] livepatch: 'livepatch_callbacks_demo': starting patching transition - [ 75.743137] livepatch: 'livepatch_callbacks_demo': completing patching transition - [ 75.743219] livepatch_callbacks_demo: post_patch_callback: vmlinux - [ 75.743867] livepatch: 'livepatch_callbacks_demo': patching complete - -As expected, only pre/post-(un)patch handlers are executed for vmlinux: - - % echo 0 > /sys/kernel/livepatch/livepatch_callbacks_demo/enabled - [ 76.716254] livepatch: 'livepatch_callbacks_demo': initializing unpatching transition - [ 76.716278] livepatch_callbacks_demo: pre_unpatch_callback: vmlinux - [ 76.716666] livepatch: 'livepatch_callbacks_demo': starting unpatching transition - [ 77.727089] livepatch: 'livepatch_callbacks_demo': completing unpatching transition - [ 77.727194] livepatch_callbacks_demo: post_unpatch_callback: vmlinux - [ 77.727907] livepatch: 'livepatch_callbacks_demo': unpatching complete +Other Examples +============== - % rmmod samples/livepatch/livepatch-callbacks-demo.ko - - -Test 6 ------- - -Test a scenario where a vmlinux pre-patch callback returns a non-zero -status (ie, failure): - -- load target module -- load livepatch -ENODEV -- unload target module - -First load a target module: - - % insmod samples/livepatch/livepatch-callbacks-mod.ko - [ 80.740520] livepatch_callbacks_mod: livepatch_callbacks_mod_init - -Load the livepatch module, setting its 'pre_patch_ret' value to -19 -(-ENODEV). When its vmlinux pre-patch callback executed, this status -code will propagate back to the module-loading subsystem. The result is -that the insmod command refuses to load the livepatch module: - - % insmod samples/livepatch/livepatch-callbacks-demo.ko pre_patch_ret=-19 - [ 82.747326] livepatch: enabling patch 'livepatch_callbacks_demo' - [ 82.747743] livepatch: 'livepatch_callbacks_demo': initializing patching transition - [ 82.747767] livepatch_callbacks_demo: pre_patch_callback: vmlinux - [ 82.748237] livepatch: pre-patch callback failed for object 'vmlinux' - [ 82.748637] livepatch: failed to enable patch 'livepatch_callbacks_demo' - [ 82.749059] livepatch: 'livepatch_callbacks_demo': canceling transition, going to unpatch - [ 82.749060] livepatch: 'livepatch_callbacks_demo': completing unpatching transition - [ 82.749868] livepatch: 'livepatch_callbacks_demo': unpatching complete - [ 82.765809] insmod: ERROR: could not insert module samples/livepatch/livepatch-callbacks-demo.ko: No such device - - % rmmod samples/livepatch/livepatch-callbacks-mod.ko - [ 84.774238] livepatch_callbacks_mod: livepatch_callbacks_mod_exit - - -Test 7 ------- - -Similar to the previous test, setup a livepatch such that its vmlinux -pre-patch callback returns success. However, when a targeted kernel -module is later loaded, have the livepatch return a failing status code: - -- load livepatch -- setup -ENODEV -- load target module -- disable livepatch -- unload livepatch - -Load the livepatch, notice vmlinux pre-patch callback succeeds: - - % insmod samples/livepatch/livepatch-callbacks-demo.ko - [ 86.787845] livepatch: enabling patch 'livepatch_callbacks_demo' - [ 86.788325] livepatch: 'livepatch_callbacks_demo': initializing patching transition - [ 86.788427] livepatch_callbacks_demo: pre_patch_callback: vmlinux - [ 86.788821] livepatch: 'livepatch_callbacks_demo': starting patching transition - [ 87.711069] livepatch: 'livepatch_callbacks_demo': completing patching transition - [ 87.711143] livepatch_callbacks_demo: post_patch_callback: vmlinux - [ 87.711886] livepatch: 'livepatch_callbacks_demo': patching complete - -Set a trap so subsequent pre-patch callbacks to this livepatch will -return -ENODEV: - - % echo -19 > /sys/module/livepatch_callbacks_demo/parameters/pre_patch_ret - -The livepatch pre-patch callback for subsequently loaded target modules -will return failure, so the module loader refuses to load the kernel -module. Notice that no post-patch or pre/post-unpatch callbacks are -executed for this klp_object: - - % insmod samples/livepatch/livepatch-callbacks-mod.ko - [ 90.796976] livepatch: applying patch 'livepatch_callbacks_demo' to loading module 'livepatch_callbacks_mod' - [ 90.797834] livepatch_callbacks_demo: pre_patch_callback: livepatch_callbacks_mod -> [MODULE_STATE_COMING] Full formed, running module_init - [ 90.798900] livepatch: pre-patch callback failed for object 'livepatch_callbacks_mod' - [ 90.799652] livepatch: patch 'livepatch_callbacks_demo' failed for module 'livepatch_callbacks_mod', refusing to load module 'livepatch_callbacks_mod' - [ 90.819737] insmod: ERROR: could not insert module samples/livepatch/livepatch-callbacks-mod.ko: No such device - -However, pre/post-unpatch callbacks run for the vmlinux klp_object: - - % echo 0 > /sys/kernel/livepatch/livepatch_callbacks_demo/enabled - [ 92.823547] livepatch: 'livepatch_callbacks_demo': initializing unpatching transition - [ 92.823573] livepatch_callbacks_demo: pre_unpatch_callback: vmlinux - [ 92.824331] livepatch: 'livepatch_callbacks_demo': starting unpatching transition - [ 93.727128] livepatch: 'livepatch_callbacks_demo': completing unpatching transition - [ 93.727327] livepatch_callbacks_demo: post_unpatch_callback: vmlinux - [ 93.727861] livepatch: 'livepatch_callbacks_demo': unpatching complete - - % rmmod samples/livepatch/livepatch-callbacks-demo.ko - - -Test 8 ------- - -Test loading multiple targeted kernel modules. This test-case is -mainly for comparing with the next test-case. - -- load busy target module (0s sleep), -- load livepatch -- load target module -- unload target module -- disable livepatch -- unload livepatch -- unload busy target module - - -Load a target "busy" kernel module which kicks off a worker function -that immediately exits: - - % insmod samples/livepatch/livepatch-callbacks-busymod.ko sleep_secs=0 - [ 96.910107] livepatch_callbacks_busymod: livepatch_callbacks_mod_init - [ 96.910600] livepatch_callbacks_busymod: busymod_work_func, sleeping 0 seconds ... - [ 96.913024] livepatch_callbacks_busymod: busymod_work_func exit - -Proceed with loading the livepatch and another ordinary target module, -notice that the post-patch callbacks are executed and the transition -completes quickly: - - % insmod samples/livepatch/livepatch-callbacks-demo.ko - [ 98.917892] livepatch: enabling patch 'livepatch_callbacks_demo' - [ 98.918426] livepatch: 'livepatch_callbacks_demo': initializing patching transition - [ 98.918453] livepatch_callbacks_demo: pre_patch_callback: vmlinux - [ 98.918955] livepatch_callbacks_demo: pre_patch_callback: livepatch_callbacks_busymod -> [MODULE_STATE_LIVE] Normal state - [ 98.923835] livepatch: 'livepatch_callbacks_demo': starting patching transition - [ 99.743104] livepatch: 'livepatch_callbacks_demo': completing patching transition - [ 99.743156] livepatch_callbacks_demo: post_patch_callback: vmlinux - [ 99.743679] livepatch_callbacks_demo: post_patch_callback: livepatch_callbacks_busymod -> [MODULE_STATE_LIVE] Normal state - [ 99.744616] livepatch: 'livepatch_callbacks_demo': patching complete - - % insmod samples/livepatch/livepatch-callbacks-mod.ko - [ 100.930955] livepatch: applying patch 'livepatch_callbacks_demo' to loading module 'livepatch_callbacks_mod' - [ 100.931668] livepatch_callbacks_demo: pre_patch_callback: livepatch_callbacks_mod -> [MODULE_STATE_COMING] Full formed, running module_init - [ 100.932645] livepatch_callbacks_demo: post_patch_callback: livepatch_callbacks_mod -> [MODULE_STATE_COMING] Full formed, running module_init - [ 100.934125] livepatch_callbacks_mod: livepatch_callbacks_mod_init - - % rmmod samples/livepatch/livepatch-callbacks-mod.ko - [ 102.942805] livepatch_callbacks_mod: livepatch_callbacks_mod_exit - [ 102.943640] livepatch_callbacks_demo: pre_unpatch_callback: livepatch_callbacks_mod -> [MODULE_STATE_GOING] Going away - [ 102.944585] livepatch: reverting patch 'livepatch_callbacks_demo' on unloading module 'livepatch_callbacks_mod' - [ 102.945455] livepatch_callbacks_demo: post_unpatch_callback: livepatch_callbacks_mod -> [MODULE_STATE_GOING] Going away - - % echo 0 > /sys/kernel/livepatch/livepatch_callbacks_demo/enabled - [ 104.953815] livepatch: 'livepatch_callbacks_demo': initializing unpatching transition - [ 104.953838] livepatch_callbacks_demo: pre_unpatch_callback: vmlinux - [ 104.954431] livepatch_callbacks_demo: pre_unpatch_callback: livepatch_callbacks_busymod -> [MODULE_STATE_LIVE] Normal state - [ 104.955426] livepatch: 'livepatch_callbacks_demo': starting unpatching transition - [ 106.719073] livepatch: 'livepatch_callbacks_demo': completing unpatching transition - [ 106.722633] livepatch_callbacks_demo: post_unpatch_callback: vmlinux - [ 106.723282] livepatch_callbacks_demo: post_unpatch_callback: livepatch_callbacks_busymod -> [MODULE_STATE_LIVE] Normal state - [ 106.724279] livepatch: 'livepatch_callbacks_demo': unpatching complete - - % rmmod samples/livepatch/livepatch-callbacks-demo.ko - % rmmod samples/livepatch/livepatch-callbacks-busymod.ko - [ 108.975660] livepatch_callbacks_busymod: livepatch_callbacks_mod_exit - - -Test 9 ------- - -A similar test as the previous one, but force the "busy" kernel module -to do longer work. - -The livepatching core will refuse to patch a task that is currently -executing a to-be-patched function -- the consistency model stalls the -current patch transition until this safety-check is met. Test a -scenario where one of a livepatch's target klp_objects sits on such a -function for a long time. Meanwhile, load and unload other target -kernel modules while the livepatch transition is in progress. - -- load busy target module (30s sleep) -- load livepatch -- load target module -- unload target module -- disable livepatch -- unload livepatch -- unload busy target module - - -Load the "busy" kernel module, this time make it do 30 seconds worth of -work: - - % insmod samples/livepatch/livepatch-callbacks-busymod.ko sleep_secs=30 - [ 110.993362] livepatch_callbacks_busymod: livepatch_callbacks_mod_init - [ 110.994059] livepatch_callbacks_busymod: busymod_work_func, sleeping 30 seconds ... - -Meanwhile, the livepatch is loaded. Notice that the patch transition -does not complete as the targeted "busy" module is sitting on a -to-be-patched function: - - % insmod samples/livepatch/livepatch-callbacks-demo.ko - [ 113.000309] livepatch: enabling patch 'livepatch_callbacks_demo' - [ 113.000764] livepatch: 'livepatch_callbacks_demo': initializing patching transition - [ 113.000791] livepatch_callbacks_demo: pre_patch_callback: vmlinux - [ 113.001289] livepatch_callbacks_demo: pre_patch_callback: livepatch_callbacks_busymod -> [MODULE_STATE_LIVE] Normal state - [ 113.005208] livepatch: 'livepatch_callbacks_demo': starting patching transition - -Load a second target module (this one is an ordinary idle kernel -module). Note that *no* post-patch callbacks will be executed while the -livepatch is still in transition: - - % insmod samples/livepatch/livepatch-callbacks-mod.ko - [ 115.012740] livepatch: applying patch 'livepatch_callbacks_demo' to loading module 'livepatch_callbacks_mod' - [ 115.013406] livepatch_callbacks_demo: pre_patch_callback: livepatch_callbacks_mod -> [MODULE_STATE_COMING] Full formed, running module_init - [ 115.015315] livepatch_callbacks_mod: livepatch_callbacks_mod_init - -Request an unload of the simple kernel module. The patch is still -transitioning, so its pre-unpatch callbacks are skipped: - - % rmmod samples/livepatch/livepatch-callbacks-mod.ko - [ 117.022626] livepatch_callbacks_mod: livepatch_callbacks_mod_exit - [ 117.023376] livepatch: reverting patch 'livepatch_callbacks_demo' on unloading module 'livepatch_callbacks_mod' - [ 117.024533] livepatch_callbacks_demo: post_unpatch_callback: livepatch_callbacks_mod -> [MODULE_STATE_GOING] Going away - -Finally the livepatch is disabled. Since none of the patch's -klp_object's post-patch callbacks executed, the remaining klp_object's -pre-unpatch callbacks are skipped: - - % echo 0 > /sys/kernel/livepatch/livepatch_callbacks_demo/enabled - [ 119.035408] livepatch: 'livepatch_callbacks_demo': reversing transition from patching to unpatching - [ 119.035485] livepatch: 'livepatch_callbacks_demo': starting unpatching transition - [ 119.711166] livepatch: 'livepatch_callbacks_demo': completing unpatching transition - [ 119.714179] livepatch_callbacks_demo: post_unpatch_callback: vmlinux - [ 119.714653] livepatch_callbacks_demo: post_unpatch_callback: livepatch_callbacks_busymod -> [MODULE_STATE_LIVE] Normal state - [ 119.715437] livepatch: 'livepatch_callbacks_demo': unpatching complete - - % rmmod samples/livepatch/livepatch-callbacks-demo.ko - % rmmod samples/livepatch/livepatch-callbacks-busymod.ko - [ 141.279111] livepatch_callbacks_busymod: busymod_work_func exit - [ 141.279760] livepatch_callbacks_busymod: livepatch_callbacks_mod_exit +Sample livepatch modules demonstrating the callback API can be found in +samples/livepatch/ directory. These samples were modified for use in +kselftests and can be found in the lib/livepatch directory. diff --git a/Documentation/livepatch/cumulative-patches.txt b/Documentation/livepatch/cumulative-patches.txt new file mode 100644 index 000000000000..0012808e8d44 --- /dev/null +++ b/Documentation/livepatch/cumulative-patches.txt @@ -0,0 +1,102 @@ +=================================== +Atomic Replace & Cumulative Patches +=================================== + +There might be dependencies between livepatches. If multiple patches need +to do different changes to the same function(s) then we need to define +an order in which the patches will be installed. And function implementations +from any newer livepatch must be done on top of the older ones. + +This might become a maintenance nightmare. Especially when more patches +modified the same function in different ways. + +An elegant solution comes with the feature called "Atomic Replace". It allows +creation of so called "Cumulative Patches". They include all wanted changes +from all older livepatches and completely replace them in one transition. + +Usage +----- + +The atomic replace can be enabled by setting "replace" flag in struct klp_patch, +for example: + + static struct klp_patch patch = { + .mod = THIS_MODULE, + .objs = objs, + .replace = true, + }; + +All processes are then migrated to use the code only from the new patch. +Once the transition is finished, all older patches are automatically +disabled. + +Ftrace handlers are transparently removed from functions that are no +longer modified by the new cumulative patch. + +As a result, the livepatch authors might maintain sources only for one +cumulative patch. It helps to keep the patch consistent while adding or +removing various fixes or features. + +Users could keep only the last patch installed on the system after +the transition to has finished. It helps to clearly see what code is +actually in use. Also the livepatch might then be seen as a "normal" +module that modifies the kernel behavior. The only difference is that +it can be updated at runtime without breaking its functionality. + + +Features +-------- + +The atomic replace allows: + + + Atomically revert some functions in a previous patch while + upgrading other functions. + + + Remove eventual performance impact caused by core redirection + for functions that are no longer patched. + + + Decrease user confusion about dependencies between livepatches. + + +Limitations: +------------ + + + Once the operation finishes, there is no straightforward way + to reverse it and restore the replaced patches atomically. + + A good practice is to set .replace flag in any released livepatch. + Then re-adding an older livepatch is equivalent to downgrading + to that patch. This is safe as long as the livepatches do _not_ do + extra modifications in (un)patching callbacks or in the module_init() + or module_exit() functions, see below. + + Also note that the replaced patch can be removed and loaded again + only when the transition was not forced. + + + + Only the (un)patching callbacks from the _new_ cumulative livepatch are + executed. Any callbacks from the replaced patches are ignored. + + In other words, the cumulative patch is responsible for doing any actions + that are necessary to properly replace any older patch. + + As a result, it might be dangerous to replace newer cumulative patches by + older ones. The old livepatches might not provide the necessary callbacks. + + This might be seen as a limitation in some scenarios. But it makes life + easier in many others. Only the new cumulative livepatch knows what + fixes/features are added/removed and what special actions are necessary + for a smooth transition. + + In any case, it would be a nightmare to think about the order of + the various callbacks and their interactions if the callbacks from all + enabled patches were called. + + + + There is no special handling of shadow variables. Livepatch authors + must create their own rules how to pass them from one cumulative + patch to the other. Especially that they should not blindly remove + them in module_exit() functions. + + A good practice might be to remove shadow variables in the post-unpatch + callback. It is called only when the livepatch is properly disabled. diff --git a/Documentation/livepatch/livepatch.txt b/Documentation/livepatch/livepatch.txt index 2d7ed09dbd59..4627b41ff02e 100644 --- a/Documentation/livepatch/livepatch.txt +++ b/Documentation/livepatch/livepatch.txt @@ -12,12 +12,12 @@ Table of Contents: 4. Livepatch module 4.1. New functions 4.2. Metadata - 4.3. Livepatch module handling 5. Livepatch life-cycle - 5.1. Registration + 5.1. Loading 5.2. Enabling - 5.3. Disabling - 5.4. Unregistration + 5.3. Replacing + 5.4. Disabling + 5.5. Removing 6. Sysfs 7. Limitations @@ -143,9 +143,9 @@ without HAVE_RELIABLE_STACKTRACE are not considered fully supported by the kernel livepatching. The /sys/kernel/livepatch//transition file shows whether a patch -is in transition. Only a single patch (the topmost patch on the stack) -can be in transition at a given time. A patch can remain in transition -indefinitely, if any of the tasks are stuck in the initial patch state. +is in transition. Only a single patch can be in transition at a given +time. A patch can remain in transition indefinitely, if any of the tasks +are stuck in the initial patch state. A transition can be reversed and effectively canceled by writing the opposite value to the /sys/kernel/livepatch//enabled file while @@ -158,12 +158,11 @@ If a patch is in transition, this file shows 0 to indicate the task is unpatched and 1 to indicate it's patched. Otherwise, if no patch is in transition, it shows -1. Any tasks which are blocking the transition can be signaled with SIGSTOP and SIGCONT to force them to change their -patched state. This may be harmful to the system though. -/sys/kernel/livepatch//signal attribute provides a better alternative. -Writing 1 to the attribute sends a fake signal to all remaining blocking -tasks. No proper signal is actually delivered (there is no data in signal -pending structures). Tasks are interrupted or woken up, and forced to change -their patched state. +patched state. This may be harmful to the system though. Sending a fake signal +to all remaining blocking tasks is a better alternative. No proper signal is +actually delivered (there is no data in signal pending structures). Tasks are +interrupted or woken up, and forced to change their patched state. The fake +signal is automatically sent every 15 seconds. Administrator can also affect a transition through /sys/kernel/livepatch//force attribute. Writing 1 there clears @@ -298,117 +297,110 @@ into three levels: see the "Consistency model" section. -4.3. Livepatch module handling ------------------------------- - -The usual behavior is that the new functions will get used when -the livepatch module is loaded. For this, the module init() function -has to register the patch (struct klp_patch) and enable it. See the -section "Livepatch life-cycle" below for more details about these -two operations. - -Module removal is only safe when there are no users of the underlying -functions. This is the reason why the force feature permanently disables -the removal. The forced tasks entered the functions but we cannot say -that they returned back. Therefore it cannot be decided when the -livepatch module can be safely removed. When the system is successfully -transitioned to a new patch state (patched/unpatched) without being -forced it is guaranteed that no task sleeps or runs in the old code. - - 5. Livepatch life-cycle ======================= -Livepatching defines four basic operations that define the life cycle of each -live patch: registration, enabling, disabling and unregistration. There are -several reasons why it is done this way. +Livepatching can be described by five basic operations: +loading, enabling, replacing, disabling, removing. -First, the patch is applied only when all patched symbols for already -loaded objects are found. The error handling is much easier if this -check is done before particular functions get redirected. +Where the replacing and the disabling operations are mutually +exclusive. They have the same result for the given patch but +not for the system. -Second, it might take some time until the entire system is migrated with -the hybrid consistency model being used. The patch revert might block -the livepatch module removal for too long. Therefore it is useful to -revert the patch using a separate operation that might be called -explicitly. But it does not make sense to remove all information until -the livepatch module is really removed. +5.1. Loading +------------ -5.1. Registration ------------------ +The only reasonable way is to enable the patch when the livepatch kernel +module is being loaded. For this, klp_enable_patch() has to be called +in the module_init() callback. There are two main reasons: -Each patch first has to be registered using klp_register_patch(). This makes -the patch known to the livepatch framework. Also it does some preliminary -computing and checks. +First, only the module has an easy access to the related struct klp_patch. -In particular, the patch is added into the list of known patches. The -addresses of the patched functions are found according to their names. -The special relocations, mentioned in the section "New functions", are -applied. The relevant entries are created under -/sys/kernel/livepatch/. The patch is rejected when any operation -fails. +Second, the error code might be used to refuse loading the module when +the patch cannot get enabled. 5.2. Enabling ------------- -Registered patches might be enabled either by calling klp_enable_patch() or -by writing '1' to /sys/kernel/livepatch//enabled. The system will -start using the new implementation of the patched functions at this stage. +The livepatch gets enabled by calling klp_enable_patch() from +the module_init() callback. The system will start using the new +implementation of the patched functions at this stage. -When a patch is enabled, livepatch enters into a transition state where -tasks are converging to the patched state. This is indicated by a value -of '1' in /sys/kernel/livepatch//transition. Once all tasks have -been patched, the 'transition' value changes to '0'. For more -information about this process, see the "Consistency model" section. +First, the addresses of the patched functions are found according to their +names. The special relocations, mentioned in the section "New functions", +are applied. The relevant entries are created under +/sys/kernel/livepatch/. The patch is rejected when any above +operation fails. -If an original function is patched for the first time, a function -specific struct klp_ops is created and an universal ftrace handler is -registered. +Second, livepatch enters into a transition state where tasks are converging +to the patched state. If an original function is patched for the first +time, a function specific struct klp_ops is created and an universal +ftrace handler is registered[*]. This stage is indicated by a value of '1' +in /sys/kernel/livepatch//transition. For more information about +this process, see the "Consistency model" section. -Functions might be patched multiple times. The ftrace handler is registered -only once for the given function. Further patches just add an entry to the -list (see field `func_stack`) of the struct klp_ops. The last added -entry is chosen by the ftrace handler and becomes the active function -replacement. +Finally, once all tasks have been patched, the 'transition' value changes +to '0'. -Note that the patches might be enabled in a different order than they were -registered. +[*] Note that functions might be patched multiple times. The ftrace handler + is registered only once for a given function. Further patches just add + an entry to the list (see field `func_stack`) of the struct klp_ops. + The right implementation is selected by the ftrace handler, see + the "Consistency model" section. + That said, it is highly recommended to use cumulative livepatches + because they help keeping the consistency of all changes. In this case, + functions might be patched two times only during the transition period. -5.3. Disabling + +5.3. Replacing -------------- -Enabled patches might get disabled either by calling klp_disable_patch() or -by writing '0' to /sys/kernel/livepatch//enabled. At this stage -either the code from the previously enabled patch or even the original -code gets used. +All enabled patches might get replaced by a cumulative patch that +has the .replace flag set. + +Once the new patch is enabled and the 'transition' finishes then +all the functions (struct klp_func) associated with the replaced +patches are removed from the corresponding struct klp_ops. Also +the ftrace handler is unregistered and the struct klp_ops is +freed when the related function is not modified by the new patch +and func_stack list becomes empty. + +See Documentation/livepatch/cumulative-patches.txt for more details. -When a patch is disabled, livepatch enters into a transition state where -tasks are converging to the unpatched state. This is indicated by a -value of '1' in /sys/kernel/livepatch//transition. Once all tasks -have been unpatched, the 'transition' value changes to '0'. For more -information about this process, see the "Consistency model" section. -Here all the functions (struct klp_func) associated with the to-be-disabled +5.4. Disabling +-------------- + +Enabled patches might get disabled by writing '0' to +/sys/kernel/livepatch//enabled. + +First, livepatch enters into a transition state where tasks are converging +to the unpatched state. The system starts using either the code from +the previously enabled patch or even the original one. This stage is +indicated by a value of '1' in /sys/kernel/livepatch//transition. +For more information about this process, see the "Consistency model" +section. + +Second, once all tasks have been unpatched, the 'transition' value changes +to '0'. All the functions (struct klp_func) associated with the to-be-disabled patch are removed from the corresponding struct klp_ops. The ftrace handler is unregistered and the struct klp_ops is freed when the func_stack list becomes empty. -Patches must be disabled in exactly the reverse order in which they were -enabled. It makes the problem and the implementation much easier. - +Third, the sysfs interface is destroyed. -5.4. Unregistration -------------------- -Disabled patches might be unregistered by calling klp_unregister_patch(). -This can be done only when the patch is disabled and the code is no longer -used. It must be called before the livepatch module gets unloaded. +5.5. Removing +------------- -At this stage, all the relevant sys-fs entries are removed and the patch -is removed from the list of known patches. +Module removal is only safe when there are no users of functions provided +by the module. This is the reason why the force feature permanently +disables the removal. Only when the system is successfully transitioned +to a new patch state (patched/unpatched) without being forced it is +guaranteed that no task sleeps or runs in the old code. 6. Sysfs @@ -418,8 +410,8 @@ Information about the registered patches can be found under /sys/kernel/livepatch. The patches could be enabled and disabled by writing there. -/sys/kernel/livepatch//signal and /sys/kernel/livepatch//force -attributes allow administrator to affect a patching operation. +/sys/kernel/livepatch//force attributes allow administrator to affect a +patching operation. See Documentation/ABI/testing/sysfs-kernel-livepatch for more details. diff --git a/Documentation/locking/lockdep-design.txt b/Documentation/locking/lockdep-design.txt index 49f58a07ee7b..39fae143c9cb 100644 --- a/Documentation/locking/lockdep-design.txt +++ b/Documentation/locking/lockdep-design.txt @@ -45,10 +45,10 @@ When locking rules are violated, these state bits are presented in the locking error messages, inside curlies. A contrived example: modprobe/2287 is trying to acquire lock: - (&sio_locks[i].lock){-.-...}, at: [] mutex_lock+0x21/0x24 + (&sio_locks[i].lock){-.-.}, at: [] mutex_lock+0x21/0x24 but task is already holding lock: - (&sio_locks[i].lock){-.-...}, at: [] mutex_lock+0x21/0x24 + (&sio_locks[i].lock){-.-.}, at: [] mutex_lock+0x21/0x24 The bit position indicates STATE, STATE-read, for each of the states listed diff --git a/Documentation/lzo.txt b/Documentation/lzo.txt index 6fa6a93d0949..f79934225d8d 100644 --- a/Documentation/lzo.txt +++ b/Documentation/lzo.txt @@ -78,16 +78,34 @@ Description is an implementation design choice independent on the algorithm or encoding. +Versions + +0: Original version +1: LZO-RLE + +Version 1 of LZO implements an extension to encode runs of zeros using run +length encoding. This improves speed for data with many zeros, which is a +common case for zram. This modifies the bitstream in a backwards compatible way +(v1 can correctly decompress v0 compressed data, but v0 cannot read v1 data). + +For maximum compatibility, both versions are available under different names +(lzo and lzo-rle). Differences in the encoding are noted in this document with +e.g.: version 1 only. + Byte sequences ============== First byte encoding:: - 0..17 : follow regular instruction encoding, see below. It is worth - noting that codes 16 and 17 will represent a block copy from - the dictionary which is empty, and that they will always be + 0..16 : follow regular instruction encoding, see below. It is worth + noting that code 16 will represent a block copy from the + dictionary which is empty, and that it will always be invalid at this place. + 17 : bitstream version. If the first byte is 17, the next byte + gives the bitstream version (version 1 only). If the first byte + is not 17, the bitstream version is 0. + 18..21 : copy 0..3 literals state = (byte - 17) = 0..3 [ copy literals ] skip byte @@ -140,6 +158,11 @@ Byte sequences state = S (copy S literals after this block) End of stream is reached if distance == 16384 + In version 1 only, this instruction is also used to encode a run of + zeros if distance = 0xbfff, i.e. H = 1 and the D bits are all 1. + In this case, it is followed by a fourth byte, X. + run length = ((X << 3) | (0 0 0 0 0 L L L)) + 4. + 0 0 1 L L L L L (32..63) Copy of small block within 16kB distance (preferably less than 34B) length = 2 + (L ?: 31 + (zero_bytes * 255) + non_zero_byte) @@ -165,7 +188,9 @@ Authors ======= This document was written by Willy Tarreau on 2014/07/19 during an - analysis of the decompression code available in Linux 3.16-rc5. The code is - tricky, it is possible that this document contains mistakes or that a few - corner cases were overlooked. In any case, please report any doubt, fix, or - proposed updates to the author(s) so that the document can be updated. + analysis of the decompression code available in Linux 3.16-rc5, and updated + by Dave Rodgman on 2018/10/30 to introduce run-length + encoding. The code is tricky, it is possible that this document contains + mistakes or that a few corner cases were overlooked. In any case, please + report any doubt, fix, or proposed updates to the author(s) so that the + document can be updated. diff --git a/Documentation/media/dvb-drivers/dvb-usb.rst b/Documentation/media/dvb-drivers/dvb-usb.rst index 6679191819aa..b2d5d9e62b30 100644 --- a/Documentation/media/dvb-drivers/dvb-usb.rst +++ b/Documentation/media/dvb-drivers/dvb-usb.rst @@ -125,7 +125,7 @@ https://linuxtv.org/wiki/index.php/DVB_USB 2004-12-26 - - refactored the dibusb-driver, splitted into separate files + - refactored the dibusb-driver, split into separate files - i2c-probing enabled 2004-12-06 diff --git a/Documentation/media/kapi/dtv-core.rst b/Documentation/media/kapi/dtv-core.rst index 17454a2cf6b0..ac005b46f23e 100644 --- a/Documentation/media/kapi/dtv-core.rst +++ b/Documentation/media/kapi/dtv-core.rst @@ -12,7 +12,7 @@ Digital TV devices are implemented by several different drivers: - Frontend drivers that are usually implemented as two separate drivers: - A tuner driver that implements the logic with commands the part of the - hardware with is reponsible to tune into a digital TV transponder or + hardware with is responsible to tune into a digital TV transponder or physical channel. The output of a tuner is usually a baseband or Intermediate Frequency (IF) signal; diff --git a/Documentation/media/kapi/dtv-frontend.rst b/Documentation/media/kapi/dtv-frontend.rst index 8ea64742c7ba..fbc5517c8d5a 100644 --- a/Documentation/media/kapi/dtv-frontend.rst +++ b/Documentation/media/kapi/dtv-frontend.rst @@ -328,7 +328,7 @@ Statistics collect On almost all frontend hardware, the bit and byte counts are stored by the hardware after a certain amount of time or after the total bit/block -counter reaches a certain value (usually programable), for example, on +counter reaches a certain value (usually programmable), for example, on every 1000 ms or after receiving 1,000,000 bits. So, if you read the registers too soon, you'll end by reading the same diff --git a/Documentation/media/kapi/mc-core.rst b/Documentation/media/kapi/mc-core.rst index 0bcfeadbc52d..f930725e0d6b 100644 --- a/Documentation/media/kapi/mc-core.rst +++ b/Documentation/media/kapi/mc-core.rst @@ -60,7 +60,7 @@ Drivers initialize entity pads by calling Drivers register entities with a media device by calling :c:func:`media_device_register_entity()` -and unregistred by calling +and unregistered by calling :c:func:`media_device_unregister_entity()`. Interfaces diff --git a/Documentation/media/kapi/v4l2-device.rst b/Documentation/media/kapi/v4l2-device.rst index c4311f0421be..5e25bf182c18 100644 --- a/Documentation/media/kapi/v4l2-device.rst +++ b/Documentation/media/kapi/v4l2-device.rst @@ -93,7 +93,7 @@ You can iterate over all registered devices as follows: int err; /* Find driver 'ivtv' on the PCI bus. - pci_bus_type is a global. For USB busses use usb_bus_type. */ + pci_bus_type is a global. For USB buses use usb_bus_type. */ drv = driver_find("ivtv", &pci_bus_type); /* iterate over all ivtv device instances */ err = driver_for_each_device(drv, NULL, p, callback); diff --git a/Documentation/media/kapi/v4l2-intro.rst b/Documentation/media/kapi/v4l2-intro.rst index cea3e263e48b..4d54fa9d7a12 100644 --- a/Documentation/media/kapi/v4l2-intro.rst +++ b/Documentation/media/kapi/v4l2-intro.rst @@ -11,7 +11,7 @@ hardware: most devices have multiple ICs, export multiple device nodes in Especially the fact that V4L2 drivers have to setup supporting ICs to do audio/video muxing/encoding/decoding makes it more complex than most. Usually these ICs are connected to the main bridge driver through one or -more I2C busses, but other busses can also be used. Such devices are +more I2C buses, but other buses can also be used. Such devices are called 'sub-devices'. For a long time the framework was limited to the video_device struct for diff --git a/Documentation/media/kapi/v4l2-subdev.rst b/Documentation/media/kapi/v4l2-subdev.rst index be4970909f40..29e07e23f888 100644 --- a/Documentation/media/kapi/v4l2-subdev.rst +++ b/Documentation/media/kapi/v4l2-subdev.rst @@ -23,7 +23,7 @@ device data. You also need a way to go from the low-level struct to :c:type:`v4l2_subdev`. For the common i2c_client struct the i2c_set_clientdata() call is used to store -a :c:type:`v4l2_subdev` pointer, for other busses you may have to use other +a :c:type:`v4l2_subdev` pointer, for other buses you may have to use other methods. Bridges might also need to store per-subdev private data, such as a pointer to @@ -33,7 +33,7 @@ provides host private data for that purpose that can be accessed with From the bridge driver perspective, you load the sub-device module and somehow obtain the :c:type:`v4l2_subdev` pointer. For i2c devices this is easy: you call -``i2c_get_clientdata()``. For other busses something similar needs to be done. +``i2c_get_clientdata()``. For other buses something similar needs to be done. Helper functions exists for sub-devices on an I2C bus that do most of this tricky work for you. diff --git a/Documentation/media/lirc.h.rst.exceptions b/Documentation/media/lirc.h.rst.exceptions index 379b9e7df5d0..7a8b8ff4f076 100644 --- a/Documentation/media/lirc.h.rst.exceptions +++ b/Documentation/media/lirc.h.rst.exceptions @@ -60,6 +60,9 @@ ignore symbol RC_PROTO_SHARP ignore symbol RC_PROTO_XMP ignore symbol RC_PROTO_CEC ignore symbol RC_PROTO_IMON +ignore symbol RC_PROTO_RCMM12 +ignore symbol RC_PROTO_RCMM24 +ignore symbol RC_PROTO_RCMM32 # Undocumented macros diff --git a/Documentation/media/uapi/dvb/audio-set-bypass-mode.rst b/Documentation/media/uapi/dvb/audio-set-bypass-mode.rst index d537da90acf5..d68f05d48d12 100644 --- a/Documentation/media/uapi/dvb/audio-set-bypass-mode.rst +++ b/Documentation/media/uapi/dvb/audio-set-bypass-mode.rst @@ -57,7 +57,7 @@ Description This ioctl call asks the Audio Device to bypass the Audio decoder and forward the stream without decoding. This mode shall be used if streams -that can’t be handled by the Digial TV system shall be decoded. Dolby +that can’t be handled by the Digital TV system shall be decoded. Dolby DigitalTM streams are automatically forwarded by the Digital TV subsystem if the hardware can handle it. diff --git a/Documentation/media/uapi/dvb/ca-set-descr.rst b/Documentation/media/uapi/dvb/ca-set-descr.rst index 22c8b8f94c7e..d36464ba2317 100644 --- a/Documentation/media/uapi/dvb/ca-set-descr.rst +++ b/Documentation/media/uapi/dvb/ca-set-descr.rst @@ -39,7 +39,7 @@ Description ----------- CA_SET_DESCR is used for feeding descrambler CA slots with descrambling -keys (refered as control words). +keys (referred as control words). Return Value ------------ diff --git a/Documentation/media/uapi/dvb/dmx-qbuf.rst b/Documentation/media/uapi/dvb/dmx-qbuf.rst index 9a1d85147c25..9dc845daa59d 100644 --- a/Documentation/media/uapi/dvb/dmx-qbuf.rst +++ b/Documentation/media/uapi/dvb/dmx-qbuf.rst @@ -61,7 +61,7 @@ the device is closed. Applications call the ``DMX_DQBUF`` ioctl to dequeue a filled (capturing) buffer from the driver's outgoing queue. -They just set the ``index`` field withe the buffer ID to be queued. +They just set the ``index`` field with the buffer ID to be queued. When ``DMX_DQBUF`` is called with a pointer to struct :c:type:`dmx_buffer`, the driver fills the remaining fields or returns an error code. diff --git a/Documentation/media/uapi/dvb/dvbproperty.rst b/Documentation/media/uapi/dvb/dvbproperty.rst index 371c72bb9419..0c4f5598f2be 100644 --- a/Documentation/media/uapi/dvb/dvbproperty.rst +++ b/Documentation/media/uapi/dvb/dvbproperty.rst @@ -44,7 +44,7 @@ with supports all digital TV delivery systems. struct :c:type:`dvb_frontend_parameters`. 2. Don't use DVB API version 3 calls on hardware with supports - newer standards. Such API provides no suport or a very limited + newer standards. Such API provides no support or a very limited support to new standards and/or new hardware. 3. Nowadays, most frontends support multiple delivery systems. diff --git a/Documentation/media/uapi/dvb/video_types.rst b/Documentation/media/uapi/dvb/video_types.rst index 2ed8aad84003..479942ce6fb8 100644 --- a/Documentation/media/uapi/dvb/video_types.rst +++ b/Documentation/media/uapi/dvb/video_types.rst @@ -202,7 +202,7 @@ If video_blank is set video will be blanked out if the channel is changed or if playback is stopped. Otherwise, the last picture will be displayed. play_state indicates if the video is currently frozen, stopped, or being played back. The stream_source corresponds to the -seleted source for the video stream. It can come either from the +selected source for the video stream. It can come either from the demultiplexer or from memory. The video_format indicates the aspect ratio (one of 4:3 or 16:9) of the currently played video stream. Finally, display_format corresponds to the selected cropping mode in diff --git a/Documentation/media/uapi/fdl-appendix.rst b/Documentation/media/uapi/fdl-appendix.rst index f8dc85d3939c..9316b8617502 100644 --- a/Documentation/media/uapi/fdl-appendix.rst +++ b/Documentation/media/uapi/fdl-appendix.rst @@ -363,7 +363,7 @@ various documents with a single copy that is included in the collection, provided that you follow the rules of this License for verbatim copying of each of the documents in all other respects. -You may extract a single document from such a collection, and dispbibute +You may extract a single document from such a collection, and distribute it individually under this License, provided you insert a copy of this License into the extracted document, and follow this License in all other respects regarding verbatim copying of that document. diff --git a/Documentation/media/uapi/mediactl/media-types.rst b/Documentation/media/uapi/mediactl/media-types.rst index 8627587b7075..3af6a414b501 100644 --- a/Documentation/media/uapi/mediactl/media-types.rst +++ b/Documentation/media/uapi/mediactl/media-types.rst @@ -164,7 +164,7 @@ Types and flags used to represent the media graph elements * - ``MEDIA_ENT_F_PROC_VIDEO_PIXEL_ENC_CONV`` - Video pixel encoding converter. An entity capable of pixel - enconding conversion must have at least one sink pad and one + encoding conversion must have at least one sink pad and one source pad, and convert the encoding of pixels received on its sink pad(s) to a different encoding output on its source pad(s). Pixel encoding conversion includes but isn't limited diff --git a/Documentation/media/uapi/mediactl/request-api.rst b/Documentation/media/uapi/mediactl/request-api.rst index 4b25ad03f45a..1ad631e549fe 100644 --- a/Documentation/media/uapi/mediactl/request-api.rst +++ b/Documentation/media/uapi/mediactl/request-api.rst @@ -91,7 +91,7 @@ A request must contain at least one buffer, otherwise ``ENOENT`` is returned. A queued request cannot be modified anymore. .. caution:: - For :ref:`memory-to-memory devices ` you can use requests only for + For :ref:`memory-to-memory devices ` you can use requests only for output buffers, not for capture buffers. Attempting to add a capture buffer to a request will result in an ``EACCES`` error. @@ -152,7 +152,7 @@ if it had just been allocated. Example for a Codec Device -------------------------- -For use-cases such as :ref:`codecs `, the request API can be used +For use-cases such as :ref:`codecs `, the request API can be used to associate specific controls to be applied by the driver for the OUTPUT buffer, allowing user-space to queue many such buffers in advance. It can also take advantage of requests' diff --git a/Documentation/media/uapi/rc/rc-tables.rst b/Documentation/media/uapi/rc/rc-tables.rst index cb670d10998b..f460031d8531 100644 --- a/Documentation/media/uapi/rc/rc-tables.rst +++ b/Documentation/media/uapi/rc/rc-tables.rst @@ -385,7 +385,7 @@ the remote via /dev/input/event devices. - ``KEY_CHANNELDOWN`` - - Decrease channel sequencially + - Decrease channel sequentially - CHANNEL - / CHANNEL DOWN / DOWN @@ -393,7 +393,7 @@ the remote via /dev/input/event devices. - ``KEY_CHANNELUP`` - - Increase channel sequencially + - Increase channel sequentially - CHANNEL + / CHANNEL UP / UP diff --git a/Documentation/media/uapi/v4l/buffer.rst b/Documentation/media/uapi/v4l/buffer.rst index 86878bb0087f..81ffdcb89057 100644 --- a/Documentation/media/uapi/v4l/buffer.rst +++ b/Documentation/media/uapi/v4l/buffer.rst @@ -230,8 +230,7 @@ struct v4l2_buffer * - struct :c:type:`v4l2_timecode` - ``timecode`` - - - When ``type`` is ``V4L2_BUF_TYPE_VIDEO_CAPTURE`` and the - ``V4L2_BUF_FLAG_TIMECODE`` flag is set in ``flags``, this + - When the ``V4L2_BUF_FLAG_TIMECODE`` flag is set in ``flags``, this structure contains a frame timecode. In :c:type:`V4L2_FIELD_ALTERNATE ` mode the top and bottom field contain the same timecode. Timecodes are intended to @@ -714,10 +713,10 @@ enum v4l2_memory Timecodes ========= -The struct :c:type:`v4l2_timecode` structure is designed to hold a -:ref:`smpte12m` or similar timecode. (struct -struct :c:type:`timeval` timestamps are stored in struct -:c:type:`v4l2_buffer` field ``timestamp``.) +The :c:type:`v4l2_buffer_timecode` structure is designed to hold a +:ref:`smpte12m` or similar timecode. +(struct :c:type:`timeval` timestamps are stored in the struct +:c:type:`v4l2_buffer` ``timestamp`` field.) .. c:type:: v4l2_timecode diff --git a/Documentation/media/uapi/v4l/common.rst b/Documentation/media/uapi/v4l/common.rst index 889f2f2632a1..5e87ae24e4b4 100644 --- a/Documentation/media/uapi/v4l/common.rst +++ b/Documentation/media/uapi/v4l/common.rst @@ -46,6 +46,17 @@ applicable to all devices. dv-timings control extended-controls + ext-ctrls-camera + ext-ctrls-flash + ext-ctrls-image-source + ext-ctrls-image-process + ext-ctrls-codec + ext-ctrls-jpeg + ext-ctrls-dv + ext-ctrls-rf-tuner + ext-ctrls-fm-tx + ext-ctrls-fm-rx + ext-ctrls-detect format planar-apis selection-api diff --git a/Documentation/media/uapi/v4l/control.rst b/Documentation/media/uapi/v4l/control.rst index 0d46526b5935..71417bba028c 100644 --- a/Documentation/media/uapi/v4l/control.rst +++ b/Documentation/media/uapi/v4l/control.rst @@ -499,7 +499,7 @@ Example: Changing controls .. [#f1] The use of ``V4L2_CID_PRIVATE_BASE`` is problematic because different drivers may use the same ``V4L2_CID_PRIVATE_BASE`` ID for different - controls. This makes it hard to programatically set such controls + controls. This makes it hard to programmatically set such controls since the meaning of the control with that ID is driver dependent. In order to resolve this drivers use unique IDs and the ``V4L2_CID_PRIVATE_BASE`` IDs are mapped to those unique IDs by the diff --git a/Documentation/media/uapi/v4l/dev-effect.rst b/Documentation/media/uapi/v4l/dev-effect.rst deleted file mode 100644 index b165e2c20910..000000000000 --- a/Documentation/media/uapi/v4l/dev-effect.rst +++ /dev/null @@ -1,28 +0,0 @@ -.. Permission is granted to copy, distribute and/or modify this -.. document under the terms of the GNU Free Documentation License, -.. Version 1.1 or any later version published by the Free Software -.. Foundation, with no Invariant Sections, no Front-Cover Texts -.. and no Back-Cover Texts. A copy of the license is included at -.. Documentation/media/uapi/fdl-appendix.rst. -.. -.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections - -.. _effect: - -************************ -Effect Devices Interface -************************ - -.. note:: - This interface has been be suspended from the V4L2 API. - The implementation for such effects should be done - via mem2mem devices. - -A V4L2 video effect device can do image effects, filtering, or combine -two or more images or image streams. For example video transitions or -wipes. Applications send data to be processed and receive the result -data either with :ref:`read() ` and -:ref:`write() ` functions, or through the streaming I/O -mechanism. - -[to do] diff --git a/Documentation/media/uapi/v4l/dev-codec.rst b/Documentation/media/uapi/v4l/dev-mem2mem.rst similarity index 50% rename from Documentation/media/uapi/v4l/dev-codec.rst rename to Documentation/media/uapi/v4l/dev-mem2mem.rst index b5e017c17834..67a980818dc8 100644 --- a/Documentation/media/uapi/v4l/dev-codec.rst +++ b/Documentation/media/uapi/v4l/dev-mem2mem.rst @@ -7,37 +7,36 @@ .. .. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections -.. _codec: +.. _mem2mem: -*************** -Codec Interface -*************** +******************************** +Video Memory-To-Memory Interface +******************************** -A V4L2 codec can compress, decompress, transform, or otherwise convert -video data from one format into another format, in memory. Typically -such devices are memory-to-memory devices (i.e. devices with the -``V4L2_CAP_VIDEO_M2M`` or ``V4L2_CAP_VIDEO_M2M_MPLANE`` capability set). +A V4L2 memory-to-memory device can compress, decompress, transform, or +otherwise convert video data from one format into another format, in memory. +Such memory-to-memory devices set the ``V4L2_CAP_VIDEO_M2M`` or +``V4L2_CAP_VIDEO_M2M_MPLANE`` capability. Examples of memory-to-memory +devices are codecs, scalers, deinterlacers or format converters (i.e. +converting from YUV to RGB). A memory-to-memory video node acts just like a normal video node, but it -supports both output (sending frames from memory to the codec hardware) -and capture (receiving the processed frames from the codec hardware into +supports both output (sending frames from memory to the hardware) +and capture (receiving the processed frames from the hardware into memory) stream I/O. An application will have to setup the stream I/O for both sides and finally call :ref:`VIDIOC_STREAMON ` -for both capture and output to start the codec. - -Video compression codecs use the MPEG controls to setup their codec -parameters - -.. note:: - - The MPEG controls actually support many more codecs than - just MPEG. See :ref:`mpeg-controls`. +for both capture and output to start the hardware. Memory-to-memory devices function as a shared resource: you can open the video node multiple times, each application setting up their -own codec properties that are local to the file handle, and each can use +own properties that are local to the file handle, and each can use it independently from the others. The driver will arbitrate access to -the codec and reprogram it whenever another file handler gets access. +the hardware and reprogram it whenever another file handler gets access. This is different from the usual video node behavior where the video properties are global to the device (i.e. changing something through one file handle is visible through another file handle). + +One of the most common memory-to-memory device is the codec. Codecs +are more complicated than most and require additional setup for +their codec parameters. This is done through codec controls. +See :ref:`mpeg-controls`. diff --git a/Documentation/media/uapi/v4l/dev-teletext.rst b/Documentation/media/uapi/v4l/dev-teletext.rst deleted file mode 100644 index 35e8c4b35458..000000000000 --- a/Documentation/media/uapi/v4l/dev-teletext.rst +++ /dev/null @@ -1,41 +0,0 @@ -.. Permission is granted to copy, distribute and/or modify this -.. document under the terms of the GNU Free Documentation License, -.. Version 1.1 or any later version published by the Free Software -.. Foundation, with no Invariant Sections, no Front-Cover Texts -.. and no Back-Cover Texts. A copy of the license is included at -.. Documentation/media/uapi/fdl-appendix.rst. -.. -.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections - -.. _ttx: - -****************** -Teletext Interface -****************** - -This interface was aimed at devices receiving and demodulating Teletext -data [:ref:`ets300706`, :ref:`itu653`], evaluating the Teletext -packages and storing formatted pages in cache memory. Such devices are -usually implemented as microcontrollers with serial interface -(I\ :sup:`2`\ C) and could be found on old TV cards, dedicated Teletext -decoding cards and home-brew devices connected to the PC parallel port. - -The Teletext API was designed by Martin Buck. It was defined in the -kernel header file ``linux/videotext.h``, the specification is available -from -`ftp://ftp.gwdg.de/pub/linux/misc/videotext/ `__. -(Videotext is the name of the German public television Teletext -service.) - -Eventually the Teletext API was integrated into the V4L API with -character device file names ``/dev/vtx0`` to ``/dev/vtx31``, device -major number 81, minor numbers 192 to 223. - -However, teletext decoders were quickly replaced by more generic VBI -demodulators and those dedicated teletext decoders no longer exist. For -many years the vtx devices were still around, even though nobody used -them. So the decision was made to finally remove support for the -Teletext API in kernel 2.6.37. - -Modern devices all use the :ref:`raw ` or -:ref:`sliced` VBI API. diff --git a/Documentation/media/uapi/v4l/devices.rst b/Documentation/media/uapi/v4l/devices.rst index 5dbe9d13b6e6..07f8d047662b 100644 --- a/Documentation/media/uapi/v4l/devices.rst +++ b/Documentation/media/uapi/v4l/devices.rst @@ -21,11 +21,9 @@ Interfaces dev-overlay dev-output dev-osd - dev-codec - dev-effect + dev-mem2mem dev-raw-vbi dev-sliced-vbi - dev-teletext dev-radio dev-rds dev-sdr diff --git a/Documentation/media/uapi/v4l/ext-ctrls-camera.rst b/Documentation/media/uapi/v4l/ext-ctrls-camera.rst new file mode 100644 index 000000000000..d3a553cd86c9 --- /dev/null +++ b/Documentation/media/uapi/v4l/ext-ctrls-camera.rst @@ -0,0 +1,508 @@ +.. Permission is granted to copy, distribute and/or modify this +.. document under the terms of the GNU Free Documentation License, +.. Version 1.1 or any later version published by the Free Software +.. Foundation, with no Invariant Sections, no Front-Cover Texts +.. and no Back-Cover Texts. A copy of the license is included at +.. Documentation/media/uapi/fdl-appendix.rst. +.. +.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections + +.. _camera-controls: + +************************ +Camera Control Reference +************************ + +The Camera class includes controls for mechanical (or equivalent +digital) features of a device such as controllable lenses or sensors. + + +.. _camera-control-id: + +Camera Control IDs +================== + +``V4L2_CID_CAMERA_CLASS (class)`` + The Camera class descriptor. Calling + :ref:`VIDIOC_QUERYCTRL` for this control will + return a description of this control class. + +.. _v4l2-exposure-auto-type: + +``V4L2_CID_EXPOSURE_AUTO`` + (enum) + +enum v4l2_exposure_auto_type - + Enables automatic adjustments of the exposure time and/or iris + aperture. The effect of manual changes of the exposure time or iris + aperture while these features are enabled is undefined, drivers + should ignore such requests. Possible values are: + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_EXPOSURE_AUTO`` + - Automatic exposure time, automatic iris aperture. + * - ``V4L2_EXPOSURE_MANUAL`` + - Manual exposure time, manual iris. + * - ``V4L2_EXPOSURE_SHUTTER_PRIORITY`` + - Manual exposure time, auto iris. + * - ``V4L2_EXPOSURE_APERTURE_PRIORITY`` + - Auto exposure time, manual iris. + + + +``V4L2_CID_EXPOSURE_ABSOLUTE (integer)`` + Determines the exposure time of the camera sensor. The exposure time + is limited by the frame interval. Drivers should interpret the + values as 100 µs units, where the value 1 stands for 1/10000th of a + second, 10000 for 1 second and 100000 for 10 seconds. + +``V4L2_CID_EXPOSURE_AUTO_PRIORITY (boolean)`` + When ``V4L2_CID_EXPOSURE_AUTO`` is set to ``AUTO`` or + ``APERTURE_PRIORITY``, this control determines if the device may + dynamically vary the frame rate. By default this feature is disabled + (0) and the frame rate must remain constant. + +``V4L2_CID_AUTO_EXPOSURE_BIAS (integer menu)`` + Determines the automatic exposure compensation, it is effective only + when ``V4L2_CID_EXPOSURE_AUTO`` control is set to ``AUTO``, + ``SHUTTER_PRIORITY`` or ``APERTURE_PRIORITY``. It is expressed in + terms of EV, drivers should interpret the values as 0.001 EV units, + where the value 1000 stands for +1 EV. + + Increasing the exposure compensation value is equivalent to + decreasing the exposure value (EV) and will increase the amount of + light at the image sensor. The camera performs the exposure + compensation by adjusting absolute exposure time and/or aperture. + +.. _v4l2-exposure-metering: + +``V4L2_CID_EXPOSURE_METERING`` + (enum) + +enum v4l2_exposure_metering - + Determines how the camera measures the amount of light available for + the frame exposure. Possible values are: + +.. tabularcolumns:: |p{8.5cm}|p{9.0cm}| + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_EXPOSURE_METERING_AVERAGE`` + - Use the light information coming from the entire frame and average + giving no weighting to any particular portion of the metered area. + * - ``V4L2_EXPOSURE_METERING_CENTER_WEIGHTED`` + - Average the light information coming from the entire frame giving + priority to the center of the metered area. + * - ``V4L2_EXPOSURE_METERING_SPOT`` + - Measure only very small area at the center of the frame. + * - ``V4L2_EXPOSURE_METERING_MATRIX`` + - A multi-zone metering. The light intensity is measured in several + points of the frame and the results are combined. The algorithm of + the zones selection and their significance in calculating the + final value is device dependent. + + + +``V4L2_CID_PAN_RELATIVE (integer)`` + This control turns the camera horizontally by the specified amount. + The unit is undefined. A positive value moves the camera to the + right (clockwise when viewed from above), a negative value to the + left. A value of zero does not cause motion. This is a write-only + control. + +``V4L2_CID_TILT_RELATIVE (integer)`` + This control turns the camera vertically by the specified amount. + The unit is undefined. A positive value moves the camera up, a + negative value down. A value of zero does not cause motion. This is + a write-only control. + +``V4L2_CID_PAN_RESET (button)`` + When this control is set, the camera moves horizontally to the + default position. + +``V4L2_CID_TILT_RESET (button)`` + When this control is set, the camera moves vertically to the default + position. + +``V4L2_CID_PAN_ABSOLUTE (integer)`` + This control turns the camera horizontally to the specified + position. Positive values move the camera to the right (clockwise + when viewed from above), negative values to the left. Drivers should + interpret the values as arc seconds, with valid values between -180 + * 3600 and +180 * 3600 inclusive. + +``V4L2_CID_TILT_ABSOLUTE (integer)`` + This control turns the camera vertically to the specified position. + Positive values move the camera up, negative values down. Drivers + should interpret the values as arc seconds, with valid values + between -180 * 3600 and +180 * 3600 inclusive. + +``V4L2_CID_FOCUS_ABSOLUTE (integer)`` + This control sets the focal point of the camera to the specified + position. The unit is undefined. Positive values set the focus + closer to the camera, negative values towards infinity. + +``V4L2_CID_FOCUS_RELATIVE (integer)`` + This control moves the focal point of the camera by the specified + amount. The unit is undefined. Positive values move the focus closer + to the camera, negative values towards infinity. This is a + write-only control. + +``V4L2_CID_FOCUS_AUTO (boolean)`` + Enables continuous automatic focus adjustments. The effect of manual + focus adjustments while this feature is enabled is undefined, + drivers should ignore such requests. + +``V4L2_CID_AUTO_FOCUS_START (button)`` + Starts single auto focus process. The effect of setting this control + when ``V4L2_CID_FOCUS_AUTO`` is set to ``TRUE`` (1) is undefined, + drivers should ignore such requests. + +``V4L2_CID_AUTO_FOCUS_STOP (button)`` + Aborts automatic focusing started with ``V4L2_CID_AUTO_FOCUS_START`` + control. It is effective only when the continuous autofocus is + disabled, that is when ``V4L2_CID_FOCUS_AUTO`` control is set to + ``FALSE`` (0). + +.. _v4l2-auto-focus-status: + +``V4L2_CID_AUTO_FOCUS_STATUS (bitmask)`` + The automatic focus status. This is a read-only control. + + Setting ``V4L2_LOCK_FOCUS`` lock bit of the ``V4L2_CID_3A_LOCK`` + control may stop updates of the ``V4L2_CID_AUTO_FOCUS_STATUS`` + control value. + +.. tabularcolumns:: |p{6.5cm}|p{11.0cm}| + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_AUTO_FOCUS_STATUS_IDLE`` + - Automatic focus is not active. + * - ``V4L2_AUTO_FOCUS_STATUS_BUSY`` + - Automatic focusing is in progress. + * - ``V4L2_AUTO_FOCUS_STATUS_REACHED`` + - Focus has been reached. + * - ``V4L2_AUTO_FOCUS_STATUS_FAILED`` + - Automatic focus has failed, the driver will not transition from + this state until another action is performed by an application. + + + +.. _v4l2-auto-focus-range: + +``V4L2_CID_AUTO_FOCUS_RANGE`` + (enum) + +enum v4l2_auto_focus_range - + Determines auto focus distance range for which lens may be adjusted. + +.. tabularcolumns:: |p{6.5cm}|p{11.0cm}| + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_AUTO_FOCUS_RANGE_AUTO`` + - The camera automatically selects the focus range. + * - ``V4L2_AUTO_FOCUS_RANGE_NORMAL`` + - Normal distance range, limited for best automatic focus + performance. + * - ``V4L2_AUTO_FOCUS_RANGE_MACRO`` + - Macro (close-up) auto focus. The camera will use its minimum + possible distance for auto focus. + * - ``V4L2_AUTO_FOCUS_RANGE_INFINITY`` + - The lens is set to focus on an object at infinite distance. + + + +``V4L2_CID_ZOOM_ABSOLUTE (integer)`` + Specify the objective lens focal length as an absolute value. The + zoom unit is driver-specific and its value should be a positive + integer. + +``V4L2_CID_ZOOM_RELATIVE (integer)`` + Specify the objective lens focal length relatively to the current + value. Positive values move the zoom lens group towards the + telephoto direction, negative values towards the wide-angle + direction. The zoom unit is driver-specific. This is a write-only + control. + +``V4L2_CID_ZOOM_CONTINUOUS (integer)`` + Move the objective lens group at the specified speed until it + reaches physical device limits or until an explicit request to stop + the movement. A positive value moves the zoom lens group towards the + telephoto direction. A value of zero stops the zoom lens group + movement. A negative value moves the zoom lens group towards the + wide-angle direction. The zoom speed unit is driver-specific. + +``V4L2_CID_IRIS_ABSOLUTE (integer)`` + This control sets the camera's aperture to the specified value. The + unit is undefined. Larger values open the iris wider, smaller values + close it. + +``V4L2_CID_IRIS_RELATIVE (integer)`` + This control modifies the camera's aperture by the specified amount. + The unit is undefined. Positive values open the iris one step + further, negative values close it one step further. This is a + write-only control. + +``V4L2_CID_PRIVACY (boolean)`` + Prevent video from being acquired by the camera. When this control + is set to ``TRUE`` (1), no image can be captured by the camera. + Common means to enforce privacy are mechanical obturation of the + sensor and firmware image processing, but the device is not + restricted to these methods. Devices that implement the privacy + control must support read access and may support write access. + +``V4L2_CID_BAND_STOP_FILTER (integer)`` + Switch the band-stop filter of a camera sensor on or off, or specify + its strength. Such band-stop filters can be used, for example, to + filter out the fluorescent light component. + +.. _v4l2-auto-n-preset-white-balance: + +``V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE`` + (enum) + +enum v4l2_auto_n_preset_white_balance - + Sets white balance to automatic, manual or a preset. The presets + determine color temperature of the light as a hint to the camera for + white balance adjustments resulting in most accurate color + representation. The following white balance presets are listed in + order of increasing color temperature. + +.. tabularcolumns:: |p{7.0 cm}|p{10.5cm}| + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_WHITE_BALANCE_MANUAL`` + - Manual white balance. + * - ``V4L2_WHITE_BALANCE_AUTO`` + - Automatic white balance adjustments. + * - ``V4L2_WHITE_BALANCE_INCANDESCENT`` + - White balance setting for incandescent (tungsten) lighting. It + generally cools down the colors and corresponds approximately to + 2500...3500 K color temperature range. + * - ``V4L2_WHITE_BALANCE_FLUORESCENT`` + - White balance preset for fluorescent lighting. It corresponds + approximately to 4000...5000 K color temperature. + * - ``V4L2_WHITE_BALANCE_FLUORESCENT_H`` + - With this setting the camera will compensate for fluorescent H + lighting. + * - ``V4L2_WHITE_BALANCE_HORIZON`` + - White balance setting for horizon daylight. It corresponds + approximately to 5000 K color temperature. + * - ``V4L2_WHITE_BALANCE_DAYLIGHT`` + - White balance preset for daylight (with clear sky). It corresponds + approximately to 5000...6500 K color temperature. + * - ``V4L2_WHITE_BALANCE_FLASH`` + - With this setting the camera will compensate for the flash light. + It slightly warms up the colors and corresponds roughly to + 5000...5500 K color temperature. + * - ``V4L2_WHITE_BALANCE_CLOUDY`` + - White balance preset for moderately overcast sky. This option + corresponds approximately to 6500...8000 K color temperature + range. + * - ``V4L2_WHITE_BALANCE_SHADE`` + - White balance preset for shade or heavily overcast sky. It + corresponds approximately to 9000...10000 K color temperature. + + + +.. _v4l2-wide-dynamic-range: + +``V4L2_CID_WIDE_DYNAMIC_RANGE (boolean)`` + Enables or disables the camera's wide dynamic range feature. This + feature allows to obtain clear images in situations where intensity + of the illumination varies significantly throughout the scene, i.e. + there are simultaneously very dark and very bright areas. It is most + commonly realized in cameras by combining two subsequent frames with + different exposure times. [#f1]_ + +.. _v4l2-image-stabilization: + +``V4L2_CID_IMAGE_STABILIZATION (boolean)`` + Enables or disables image stabilization. + +``V4L2_CID_ISO_SENSITIVITY (integer menu)`` + Determines ISO equivalent of an image sensor indicating the sensor's + sensitivity to light. The numbers are expressed in arithmetic scale, + as per :ref:`iso12232` standard, where doubling the sensor + sensitivity is represented by doubling the numerical ISO value. + Applications should interpret the values as standard ISO values + multiplied by 1000, e.g. control value 800 stands for ISO 0.8. + Drivers will usually support only a subset of standard ISO values. + The effect of setting this control while the + ``V4L2_CID_ISO_SENSITIVITY_AUTO`` control is set to a value other + than ``V4L2_CID_ISO_SENSITIVITY_MANUAL`` is undefined, drivers + should ignore such requests. + +.. _v4l2-iso-sensitivity-auto-type: + +``V4L2_CID_ISO_SENSITIVITY_AUTO`` + (enum) + +enum v4l2_iso_sensitivity_type - + Enables or disables automatic ISO sensitivity adjustments. + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_CID_ISO_SENSITIVITY_MANUAL`` + - Manual ISO sensitivity. + * - ``V4L2_CID_ISO_SENSITIVITY_AUTO`` + - Automatic ISO sensitivity adjustments. + + + +.. _v4l2-scene-mode: + +``V4L2_CID_SCENE_MODE`` + (enum) + +enum v4l2_scene_mode - + This control allows to select scene programs as the camera automatic + modes optimized for common shooting scenes. Within these modes the + camera determines best exposure, aperture, focusing, light metering, + white balance and equivalent sensitivity. The controls of those + parameters are influenced by the scene mode control. An exact + behavior in each mode is subject to the camera specification. + + When the scene mode feature is not used, this control should be set + to ``V4L2_SCENE_MODE_NONE`` to make sure the other possibly related + controls are accessible. The following scene programs are defined: + +.. tabularcolumns:: |p{6.0cm}|p{11.5cm}| + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_SCENE_MODE_NONE`` + - The scene mode feature is disabled. + * - ``V4L2_SCENE_MODE_BACKLIGHT`` + - Backlight. Compensates for dark shadows when light is coming from + behind a subject, also by automatically turning on the flash. + * - ``V4L2_SCENE_MODE_BEACH_SNOW`` + - Beach and snow. This mode compensates for all-white or bright + scenes, which tend to look gray and low contrast, when camera's + automatic exposure is based on an average scene brightness. To + compensate, this mode automatically slightly overexposes the + frames. The white balance may also be adjusted to compensate for + the fact that reflected snow looks bluish rather than white. + * - ``V4L2_SCENE_MODE_CANDLELIGHT`` + - Candle light. The camera generally raises the ISO sensitivity and + lowers the shutter speed. This mode compensates for relatively + close subject in the scene. The flash is disabled in order to + preserve the ambiance of the light. + * - ``V4L2_SCENE_MODE_DAWN_DUSK`` + - Dawn and dusk. Preserves the colors seen in low natural light + before dusk and after down. The camera may turn off the flash, and + automatically focus at infinity. It will usually boost saturation + and lower the shutter speed. + * - ``V4L2_SCENE_MODE_FALL_COLORS`` + - Fall colors. Increases saturation and adjusts white balance for + color enhancement. Pictures of autumn leaves get saturated reds + and yellows. + * - ``V4L2_SCENE_MODE_FIREWORKS`` + - Fireworks. Long exposure times are used to capture the expanding + burst of light from a firework. The camera may invoke image + stabilization. + * - ``V4L2_SCENE_MODE_LANDSCAPE`` + - Landscape. The camera may choose a small aperture to provide deep + depth of field and long exposure duration to help capture detail + in dim light conditions. The focus is fixed at infinity. Suitable + for distant and wide scenery. + * - ``V4L2_SCENE_MODE_NIGHT`` + - Night, also known as Night Landscape. Designed for low light + conditions, it preserves detail in the dark areas without blowing + out bright objects. The camera generally sets itself to a + medium-to-high ISO sensitivity, with a relatively long exposure + time, and turns flash off. As such, there will be increased image + noise and the possibility of blurred image. + * - ``V4L2_SCENE_MODE_PARTY_INDOOR`` + - Party and indoor. Designed to capture indoor scenes that are lit + by indoor background lighting as well as the flash. The camera + usually increases ISO sensitivity, and adjusts exposure for the + low light conditions. + * - ``V4L2_SCENE_MODE_PORTRAIT`` + - Portrait. The camera adjusts the aperture so that the depth of + field is reduced, which helps to isolate the subject against a + smooth background. Most cameras recognize the presence of faces in + the scene and focus on them. The color hue is adjusted to enhance + skin tones. The intensity of the flash is often reduced. + * - ``V4L2_SCENE_MODE_SPORTS`` + - Sports. Significantly increases ISO and uses a fast shutter speed + to freeze motion of rapidly-moving subjects. Increased image noise + may be seen in this mode. + * - ``V4L2_SCENE_MODE_SUNSET`` + - Sunset. Preserves deep hues seen in sunsets and sunrises. It bumps + up the saturation. + * - ``V4L2_SCENE_MODE_TEXT`` + - Text. It applies extra contrast and sharpness, it is typically a + black-and-white mode optimized for readability. Automatic focus + may be switched to close-up mode and this setting may also involve + some lens-distortion correction. + + + +``V4L2_CID_3A_LOCK (bitmask)`` + This control locks or unlocks the automatic focus, exposure and + white balance. The automatic adjustments can be paused independently + by setting the corresponding lock bit to 1. The camera then retains + the settings until the lock bit is cleared. The following lock bits + are defined: + + When a given algorithm is not enabled, drivers should ignore + requests to lock it and should return no error. An example might be + an application setting bit ``V4L2_LOCK_WHITE_BALANCE`` when the + ``V4L2_CID_AUTO_WHITE_BALANCE`` control is set to ``FALSE``. The + value of this control may be changed by exposure, white balance or + focus controls. + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_LOCK_EXPOSURE`` + - Automatic exposure adjustments lock. + * - ``V4L2_LOCK_WHITE_BALANCE`` + - Automatic white balance adjustments lock. + * - ``V4L2_LOCK_FOCUS`` + - Automatic focus lock. + + + +``V4L2_CID_PAN_SPEED (integer)`` + This control turns the camera horizontally at the specific speed. + The unit is undefined. A positive value moves the camera to the + right (clockwise when viewed from above), a negative value to the + left. A value of zero stops the motion if one is in progress and has + no effect otherwise. + +``V4L2_CID_TILT_SPEED (integer)`` + This control turns the camera vertically at the specified speed. The + unit is undefined. A positive value moves the camera up, a negative + value down. A value of zero stops the motion if one is in progress + and has no effect otherwise. + +.. [#f1] + This control may be changed to a menu control in the future, if more + options are required. diff --git a/Documentation/media/uapi/v4l/ext-ctrls-codec.rst b/Documentation/media/uapi/v4l/ext-ctrls-codec.rst new file mode 100644 index 000000000000..c97fb7923be5 --- /dev/null +++ b/Documentation/media/uapi/v4l/ext-ctrls-codec.rst @@ -0,0 +1,2451 @@ +.. Permission is granted to copy, distribute and/or modify this +.. document under the terms of the GNU Free Documentation License, +.. Version 1.1 or any later version published by the Free Software +.. Foundation, with no Invariant Sections, no Front-Cover Texts +.. and no Back-Cover Texts. A copy of the license is included at +.. Documentation/media/uapi/fdl-appendix.rst. +.. +.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections + +.. _mpeg-controls: + +*********************** +Codec Control Reference +*********************** + +Below all controls within the Codec control class are described. First +the generic controls, then controls specific for certain hardware. + +.. note:: + + These controls are applicable to all codecs and not just MPEG. The + defines are prefixed with V4L2_CID_MPEG/V4L2_MPEG as the controls + were originally made for MPEG codecs and later extended to cover all + encoding formats. + + +Generic Codec Controls +====================== + + +.. _mpeg-control-id: + +Codec Control IDs +----------------- + +``V4L2_CID_MPEG_CLASS (class)`` + The Codec class descriptor. Calling + :ref:`VIDIOC_QUERYCTRL` for this control will + return a description of this control class. This description can be + used as the caption of a Tab page in a GUI, for example. + +.. _v4l2-mpeg-stream-type: + +``V4L2_CID_MPEG_STREAM_TYPE`` + (enum) + +enum v4l2_mpeg_stream_type - + The MPEG-1, -2 or -4 output stream type. One cannot assume anything + here. Each hardware MPEG encoder tends to support different subsets + of the available MPEG stream types. This control is specific to + multiplexed MPEG streams. The currently defined stream types are: + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_STREAM_TYPE_MPEG2_PS`` + - MPEG-2 program stream + * - ``V4L2_MPEG_STREAM_TYPE_MPEG2_TS`` + - MPEG-2 transport stream + * - ``V4L2_MPEG_STREAM_TYPE_MPEG1_SS`` + - MPEG-1 system stream + * - ``V4L2_MPEG_STREAM_TYPE_MPEG2_DVD`` + - MPEG-2 DVD-compatible stream + * - ``V4L2_MPEG_STREAM_TYPE_MPEG1_VCD`` + - MPEG-1 VCD-compatible stream + * - ``V4L2_MPEG_STREAM_TYPE_MPEG2_SVCD`` + - MPEG-2 SVCD-compatible stream + + + +``V4L2_CID_MPEG_STREAM_PID_PMT (integer)`` + Program Map Table Packet ID for the MPEG transport stream (default + 16) + +``V4L2_CID_MPEG_STREAM_PID_AUDIO (integer)`` + Audio Packet ID for the MPEG transport stream (default 256) + +``V4L2_CID_MPEG_STREAM_PID_VIDEO (integer)`` + Video Packet ID for the MPEG transport stream (default 260) + +``V4L2_CID_MPEG_STREAM_PID_PCR (integer)`` + Packet ID for the MPEG transport stream carrying PCR fields (default + 259) + +``V4L2_CID_MPEG_STREAM_PES_ID_AUDIO (integer)`` + Audio ID for MPEG PES + +``V4L2_CID_MPEG_STREAM_PES_ID_VIDEO (integer)`` + Video ID for MPEG PES + +.. _v4l2-mpeg-stream-vbi-fmt: + +``V4L2_CID_MPEG_STREAM_VBI_FMT`` + (enum) + +enum v4l2_mpeg_stream_vbi_fmt - + Some cards can embed VBI data (e. g. Closed Caption, Teletext) into + the MPEG stream. This control selects whether VBI data should be + embedded, and if so, what embedding method should be used. The list + of possible VBI formats depends on the driver. The currently defined + VBI format types are: + + + +.. tabularcolumns:: |p{6 cm}|p{11.5cm}| + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_STREAM_VBI_FMT_NONE`` + - No VBI in the MPEG stream + * - ``V4L2_MPEG_STREAM_VBI_FMT_IVTV`` + - VBI in private packets, IVTV format (documented in the kernel + sources in the file + ``Documentation/media/v4l-drivers/cx2341x.rst``) + + + +.. _v4l2-mpeg-audio-sampling-freq: + +``V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ`` + (enum) + +enum v4l2_mpeg_audio_sampling_freq - + MPEG Audio sampling frequency. Possible values are: + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_AUDIO_SAMPLING_FREQ_44100`` + - 44.1 kHz + * - ``V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000`` + - 48 kHz + * - ``V4L2_MPEG_AUDIO_SAMPLING_FREQ_32000`` + - 32 kHz + + + +.. _v4l2-mpeg-audio-encoding: + +``V4L2_CID_MPEG_AUDIO_ENCODING`` + (enum) + +enum v4l2_mpeg_audio_encoding - + MPEG Audio encoding. This control is specific to multiplexed MPEG + streams. Possible values are: + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_AUDIO_ENCODING_LAYER_1`` + - MPEG-1/2 Layer I encoding + * - ``V4L2_MPEG_AUDIO_ENCODING_LAYER_2`` + - MPEG-1/2 Layer II encoding + * - ``V4L2_MPEG_AUDIO_ENCODING_LAYER_3`` + - MPEG-1/2 Layer III encoding + * - ``V4L2_MPEG_AUDIO_ENCODING_AAC`` + - MPEG-2/4 AAC (Advanced Audio Coding) + * - ``V4L2_MPEG_AUDIO_ENCODING_AC3`` + - AC-3 aka ATSC A/52 encoding + + + +.. _v4l2-mpeg-audio-l1-bitrate: + +``V4L2_CID_MPEG_AUDIO_L1_BITRATE`` + (enum) + +enum v4l2_mpeg_audio_l1_bitrate - + MPEG-1/2 Layer I bitrate. Possible values are: + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_AUDIO_L1_BITRATE_32K`` + - 32 kbit/s + * - ``V4L2_MPEG_AUDIO_L1_BITRATE_64K`` + - 64 kbit/s + * - ``V4L2_MPEG_AUDIO_L1_BITRATE_96K`` + - 96 kbit/s + * - ``V4L2_MPEG_AUDIO_L1_BITRATE_128K`` + - 128 kbit/s + * - ``V4L2_MPEG_AUDIO_L1_BITRATE_160K`` + - 160 kbit/s + * - ``V4L2_MPEG_AUDIO_L1_BITRATE_192K`` + - 192 kbit/s + * - ``V4L2_MPEG_AUDIO_L1_BITRATE_224K`` + - 224 kbit/s + * - ``V4L2_MPEG_AUDIO_L1_BITRATE_256K`` + - 256 kbit/s + * - ``V4L2_MPEG_AUDIO_L1_BITRATE_288K`` + - 288 kbit/s + * - ``V4L2_MPEG_AUDIO_L1_BITRATE_320K`` + - 320 kbit/s + * - ``V4L2_MPEG_AUDIO_L1_BITRATE_352K`` + - 352 kbit/s + * - ``V4L2_MPEG_AUDIO_L1_BITRATE_384K`` + - 384 kbit/s + * - ``V4L2_MPEG_AUDIO_L1_BITRATE_416K`` + - 416 kbit/s + * - ``V4L2_MPEG_AUDIO_L1_BITRATE_448K`` + - 448 kbit/s + + + +.. _v4l2-mpeg-audio-l2-bitrate: + +``V4L2_CID_MPEG_AUDIO_L2_BITRATE`` + (enum) + +enum v4l2_mpeg_audio_l2_bitrate - + MPEG-1/2 Layer II bitrate. Possible values are: + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_AUDIO_L2_BITRATE_32K`` + - 32 kbit/s + * - ``V4L2_MPEG_AUDIO_L2_BITRATE_48K`` + - 48 kbit/s + * - ``V4L2_MPEG_AUDIO_L2_BITRATE_56K`` + - 56 kbit/s + * - ``V4L2_MPEG_AUDIO_L2_BITRATE_64K`` + - 64 kbit/s + * - ``V4L2_MPEG_AUDIO_L2_BITRATE_80K`` + - 80 kbit/s + * - ``V4L2_MPEG_AUDIO_L2_BITRATE_96K`` + - 96 kbit/s + * - ``V4L2_MPEG_AUDIO_L2_BITRATE_112K`` + - 112 kbit/s + * - ``V4L2_MPEG_AUDIO_L2_BITRATE_128K`` + - 128 kbit/s + * - ``V4L2_MPEG_AUDIO_L2_BITRATE_160K`` + - 160 kbit/s + * - ``V4L2_MPEG_AUDIO_L2_BITRATE_192K`` + - 192 kbit/s + * - ``V4L2_MPEG_AUDIO_L2_BITRATE_224K`` + - 224 kbit/s + * - ``V4L2_MPEG_AUDIO_L2_BITRATE_256K`` + - 256 kbit/s + * - ``V4L2_MPEG_AUDIO_L2_BITRATE_320K`` + - 320 kbit/s + * - ``V4L2_MPEG_AUDIO_L2_BITRATE_384K`` + - 384 kbit/s + + + +.. _v4l2-mpeg-audio-l3-bitrate: + +``V4L2_CID_MPEG_AUDIO_L3_BITRATE`` + (enum) + +enum v4l2_mpeg_audio_l3_bitrate - + MPEG-1/2 Layer III bitrate. Possible values are: + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_AUDIO_L3_BITRATE_32K`` + - 32 kbit/s + * - ``V4L2_MPEG_AUDIO_L3_BITRATE_40K`` + - 40 kbit/s + * - ``V4L2_MPEG_AUDIO_L3_BITRATE_48K`` + - 48 kbit/s + * - ``V4L2_MPEG_AUDIO_L3_BITRATE_56K`` + - 56 kbit/s + * - ``V4L2_MPEG_AUDIO_L3_BITRATE_64K`` + - 64 kbit/s + * - ``V4L2_MPEG_AUDIO_L3_BITRATE_80K`` + - 80 kbit/s + * - ``V4L2_MPEG_AUDIO_L3_BITRATE_96K`` + - 96 kbit/s + * - ``V4L2_MPEG_AUDIO_L3_BITRATE_112K`` + - 112 kbit/s + * - ``V4L2_MPEG_AUDIO_L3_BITRATE_128K`` + - 128 kbit/s + * - ``V4L2_MPEG_AUDIO_L3_BITRATE_160K`` + - 160 kbit/s + * - ``V4L2_MPEG_AUDIO_L3_BITRATE_192K`` + - 192 kbit/s + * - ``V4L2_MPEG_AUDIO_L3_BITRATE_224K`` + - 224 kbit/s + * - ``V4L2_MPEG_AUDIO_L3_BITRATE_256K`` + - 256 kbit/s + * - ``V4L2_MPEG_AUDIO_L3_BITRATE_320K`` + - 320 kbit/s + + + +``V4L2_CID_MPEG_AUDIO_AAC_BITRATE (integer)`` + AAC bitrate in bits per second. + +.. _v4l2-mpeg-audio-ac3-bitrate: + +``V4L2_CID_MPEG_AUDIO_AC3_BITRATE`` + (enum) + +enum v4l2_mpeg_audio_ac3_bitrate - + AC-3 bitrate. Possible values are: + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_32K`` + - 32 kbit/s + * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_40K`` + - 40 kbit/s + * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_48K`` + - 48 kbit/s + * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_56K`` + - 56 kbit/s + * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_64K`` + - 64 kbit/s + * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_80K`` + - 80 kbit/s + * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_96K`` + - 96 kbit/s + * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_112K`` + - 112 kbit/s + * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_128K`` + - 128 kbit/s + * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_160K`` + - 160 kbit/s + * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_192K`` + - 192 kbit/s + * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_224K`` + - 224 kbit/s + * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_256K`` + - 256 kbit/s + * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_320K`` + - 320 kbit/s + * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_384K`` + - 384 kbit/s + * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_448K`` + - 448 kbit/s + * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_512K`` + - 512 kbit/s + * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_576K`` + - 576 kbit/s + * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_640K`` + - 640 kbit/s + + + +.. _v4l2-mpeg-audio-mode: + +``V4L2_CID_MPEG_AUDIO_MODE`` + (enum) + +enum v4l2_mpeg_audio_mode - + MPEG Audio mode. Possible values are: + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_AUDIO_MODE_STEREO`` + - Stereo + * - ``V4L2_MPEG_AUDIO_MODE_JOINT_STEREO`` + - Joint Stereo + * - ``V4L2_MPEG_AUDIO_MODE_DUAL`` + - Bilingual + * - ``V4L2_MPEG_AUDIO_MODE_MONO`` + - Mono + + + +.. _v4l2-mpeg-audio-mode-extension: + +``V4L2_CID_MPEG_AUDIO_MODE_EXTENSION`` + (enum) + +enum v4l2_mpeg_audio_mode_extension - + Joint Stereo audio mode extension. In Layer I and II they indicate + which subbands are in intensity stereo. All other subbands are coded + in stereo. Layer III is not (yet) supported. Possible values are: + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4`` + - Subbands 4-31 in intensity stereo + * - ``V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_8`` + - Subbands 8-31 in intensity stereo + * - ``V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_12`` + - Subbands 12-31 in intensity stereo + * - ``V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_16`` + - Subbands 16-31 in intensity stereo + + + +.. _v4l2-mpeg-audio-emphasis: + +``V4L2_CID_MPEG_AUDIO_EMPHASIS`` + (enum) + +enum v4l2_mpeg_audio_emphasis - + Audio Emphasis. Possible values are: + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_AUDIO_EMPHASIS_NONE`` + - None + * - ``V4L2_MPEG_AUDIO_EMPHASIS_50_DIV_15_uS`` + - 50/15 microsecond emphasis + * - ``V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17`` + - CCITT J.17 + + + +.. _v4l2-mpeg-audio-crc: + +``V4L2_CID_MPEG_AUDIO_CRC`` + (enum) + +enum v4l2_mpeg_audio_crc - + CRC method. Possible values are: + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_AUDIO_CRC_NONE`` + - None + * - ``V4L2_MPEG_AUDIO_CRC_CRC16`` + - 16 bit parity check + + + +``V4L2_CID_MPEG_AUDIO_MUTE (boolean)`` + Mutes the audio when capturing. This is not done by muting audio + hardware, which can still produce a slight hiss, but in the encoder + itself, guaranteeing a fixed and reproducible audio bitstream. 0 = + unmuted, 1 = muted. + +.. _v4l2-mpeg-audio-dec-playback: + +``V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK`` + (enum) + +enum v4l2_mpeg_audio_dec_playback - + Determines how monolingual audio should be played back. Possible + values are: + + + +.. tabularcolumns:: |p{9.0cm}|p{8.5cm}| + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_AUDIO_DEC_PLAYBACK_AUTO`` + - Automatically determines the best playback mode. + * - ``V4L2_MPEG_AUDIO_DEC_PLAYBACK_STEREO`` + - Stereo playback. + * - ``V4L2_MPEG_AUDIO_DEC_PLAYBACK_LEFT`` + - Left channel playback. + * - ``V4L2_MPEG_AUDIO_DEC_PLAYBACK_RIGHT`` + - Right channel playback. + * - ``V4L2_MPEG_AUDIO_DEC_PLAYBACK_MONO`` + - Mono playback. + * - ``V4L2_MPEG_AUDIO_DEC_PLAYBACK_SWAPPED_STEREO`` + - Stereo playback with swapped left and right channels. + + + +.. _v4l2-mpeg-audio-dec-multilingual-playback: + +``V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK`` + (enum) + +enum v4l2_mpeg_audio_dec_playback - + Determines how multilingual audio should be played back. + +.. _v4l2-mpeg-video-encoding: + +``V4L2_CID_MPEG_VIDEO_ENCODING`` + (enum) + +enum v4l2_mpeg_video_encoding - + MPEG Video encoding method. This control is specific to multiplexed + MPEG streams. Possible values are: + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_VIDEO_ENCODING_MPEG_1`` + - MPEG-1 Video encoding + * - ``V4L2_MPEG_VIDEO_ENCODING_MPEG_2`` + - MPEG-2 Video encoding + * - ``V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC`` + - MPEG-4 AVC (H.264) Video encoding + + + +.. _v4l2-mpeg-video-aspect: + +``V4L2_CID_MPEG_VIDEO_ASPECT`` + (enum) + +enum v4l2_mpeg_video_aspect - + Video aspect. Possible values are: + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_VIDEO_ASPECT_1x1`` + * - ``V4L2_MPEG_VIDEO_ASPECT_4x3`` + * - ``V4L2_MPEG_VIDEO_ASPECT_16x9`` + * - ``V4L2_MPEG_VIDEO_ASPECT_221x100`` + + + +``V4L2_CID_MPEG_VIDEO_B_FRAMES (integer)`` + Number of B-Frames (default 2) + +``V4L2_CID_MPEG_VIDEO_GOP_SIZE (integer)`` + GOP size (default 12) + +``V4L2_CID_MPEG_VIDEO_GOP_CLOSURE (boolean)`` + GOP closure (default 1) + +``V4L2_CID_MPEG_VIDEO_PULLDOWN (boolean)`` + Enable 3:2 pulldown (default 0) + +.. _v4l2-mpeg-video-bitrate-mode: + +``V4L2_CID_MPEG_VIDEO_BITRATE_MODE`` + (enum) + +enum v4l2_mpeg_video_bitrate_mode - + Video bitrate mode. Possible values are: + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_VIDEO_BITRATE_MODE_VBR`` + - Variable bitrate + * - ``V4L2_MPEG_VIDEO_BITRATE_MODE_CBR`` + - Constant bitrate + + + +``V4L2_CID_MPEG_VIDEO_BITRATE (integer)`` + Video bitrate in bits per second. + +``V4L2_CID_MPEG_VIDEO_BITRATE_PEAK (integer)`` + Peak video bitrate in bits per second. Must be larger or equal to + the average video bitrate. It is ignored if the video bitrate mode + is set to constant bitrate. + +``V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION (integer)`` + For every captured frame, skip this many subsequent frames (default + 0). + +``V4L2_CID_MPEG_VIDEO_MUTE (boolean)`` + "Mutes" the video to a fixed color when capturing. This is useful + for testing, to produce a fixed video bitstream. 0 = unmuted, 1 = + muted. + +``V4L2_CID_MPEG_VIDEO_MUTE_YUV (integer)`` + Sets the "mute" color of the video. The supplied 32-bit integer is + interpreted as follows (bit 0 = least significant bit): + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - Bit 0:7 + - V chrominance information + * - Bit 8:15 + - U chrominance information + * - Bit 16:23 + - Y luminance information + * - Bit 24:31 + - Must be zero. + + + +.. _v4l2-mpeg-video-dec-pts: + +``V4L2_CID_MPEG_VIDEO_DEC_PTS (integer64)`` + This read-only control returns the 33-bit video Presentation Time + Stamp as defined in ITU T-REC-H.222.0 and ISO/IEC 13818-1 of the + currently displayed frame. This is the same PTS as is used in + :ref:`VIDIOC_DECODER_CMD`. + +.. _v4l2-mpeg-video-dec-frame: + +``V4L2_CID_MPEG_VIDEO_DEC_FRAME (integer64)`` + This read-only control returns the frame counter of the frame that + is currently displayed (decoded). This value is reset to 0 whenever + the decoder is started. + +``V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE (boolean)`` + If enabled the decoder expects to receive a single slice per buffer, + otherwise the decoder expects a single frame in per buffer. + Applicable to the decoder, all codecs. + +``V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE (boolean)`` + Enable writing sample aspect ratio in the Video Usability + Information. Applicable to the H264 encoder. + +.. _v4l2-mpeg-video-h264-vui-sar-idc: + +``V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC`` + (enum) + +enum v4l2_mpeg_video_h264_vui_sar_idc - + VUI sample aspect ratio indicator for H.264 encoding. The value is + defined in the table E-1 in the standard. Applicable to the H264 + encoder. + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED`` + - Unspecified + * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_1x1`` + - 1x1 + * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_12x11`` + - 12x11 + * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_10x11`` + - 10x11 + * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_16x11`` + - 16x11 + * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_40x33`` + - 40x33 + * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_24x11`` + - 24x11 + * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_20x11`` + - 20x11 + * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_32x11`` + - 32x11 + * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_80x33`` + - 80x33 + * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_18x11`` + - 18x11 + * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_15x11`` + - 15x11 + * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_64x33`` + - 64x33 + * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_160x99`` + - 160x99 + * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_4x3`` + - 4x3 + * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_3x2`` + - 3x2 + * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_2x1`` + - 2x1 + * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED`` + - Extended SAR + + + +``V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH (integer)`` + Extended sample aspect ratio width for H.264 VUI encoding. + Applicable to the H264 encoder. + +``V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT (integer)`` + Extended sample aspect ratio height for H.264 VUI encoding. + Applicable to the H264 encoder. + +.. _v4l2-mpeg-video-h264-level: + +``V4L2_CID_MPEG_VIDEO_H264_LEVEL`` + (enum) + +enum v4l2_mpeg_video_h264_level - + The level information for the H264 video elementary stream. + Applicable to the H264 encoder. Possible values are: + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_VIDEO_H264_LEVEL_1_0`` + - Level 1.0 + * - ``V4L2_MPEG_VIDEO_H264_LEVEL_1B`` + - Level 1B + * - ``V4L2_MPEG_VIDEO_H264_LEVEL_1_1`` + - Level 1.1 + * - ``V4L2_MPEG_VIDEO_H264_LEVEL_1_2`` + - Level 1.2 + * - ``V4L2_MPEG_VIDEO_H264_LEVEL_1_3`` + - Level 1.3 + * - ``V4L2_MPEG_VIDEO_H264_LEVEL_2_0`` + - Level 2.0 + * - ``V4L2_MPEG_VIDEO_H264_LEVEL_2_1`` + - Level 2.1 + * - ``V4L2_MPEG_VIDEO_H264_LEVEL_2_2`` + - Level 2.2 + * - ``V4L2_MPEG_VIDEO_H264_LEVEL_3_0`` + - Level 3.0 + * - ``V4L2_MPEG_VIDEO_H264_LEVEL_3_1`` + - Level 3.1 + * - ``V4L2_MPEG_VIDEO_H264_LEVEL_3_2`` + - Level 3.2 + * - ``V4L2_MPEG_VIDEO_H264_LEVEL_4_0`` + - Level 4.0 + * - ``V4L2_MPEG_VIDEO_H264_LEVEL_4_1`` + - Level 4.1 + * - ``V4L2_MPEG_VIDEO_H264_LEVEL_4_2`` + - Level 4.2 + * - ``V4L2_MPEG_VIDEO_H264_LEVEL_5_0`` + - Level 5.0 + * - ``V4L2_MPEG_VIDEO_H264_LEVEL_5_1`` + - Level 5.1 + + + +.. _v4l2-mpeg-video-mpeg4-level: + +``V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL`` + (enum) + +enum v4l2_mpeg_video_mpeg4_level - + The level information for the MPEG4 elementary stream. Applicable to + the MPEG4 encoder. Possible values are: + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_0`` + - Level 0 + * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_0B`` + - Level 0b + * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_1`` + - Level 1 + * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_2`` + - Level 2 + * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_3`` + - Level 3 + * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_3B`` + - Level 3b + * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_4`` + - Level 4 + * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_5`` + - Level 5 + + + +.. _v4l2-mpeg-video-h264-profile: + +``V4L2_CID_MPEG_VIDEO_H264_PROFILE`` + (enum) + +enum v4l2_mpeg_video_h264_profile - + The profile information for H264. Applicable to the H264 encoder. + Possible values are: + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE`` + - Baseline profile + * - ``V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE`` + - Constrained Baseline profile + * - ``V4L2_MPEG_VIDEO_H264_PROFILE_MAIN`` + - Main profile + * - ``V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED`` + - Extended profile + * - ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH`` + - High profile + * - ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10`` + - High 10 profile + * - ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422`` + - High 422 profile + * - ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_PREDICTIVE`` + - High 444 Predictive profile + * - ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10_INTRA`` + - High 10 Intra profile + * - ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422_INTRA`` + - High 422 Intra profile + * - ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_INTRA`` + - High 444 Intra profile + * - ``V4L2_MPEG_VIDEO_H264_PROFILE_CAVLC_444_INTRA`` + - CAVLC 444 Intra profile + * - ``V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_BASELINE`` + - Scalable Baseline profile + * - ``V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH`` + - Scalable High profile + * - ``V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH_INTRA`` + - Scalable High Intra profile + * - ``V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH`` + - Stereo High profile + * - ``V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH`` + - Multiview High profile + + + +.. _v4l2-mpeg-video-mpeg4-profile: + +``V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE`` + (enum) + +enum v4l2_mpeg_video_mpeg4_profile - + The profile information for MPEG4. Applicable to the MPEG4 encoder. + Possible values are: + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE`` + - Simple profile + * - ``V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE`` + - Advanced Simple profile + * - ``V4L2_MPEG_VIDEO_MPEG4_PROFILE_CORE`` + - Core profile + * - ``V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE_SCALABLE`` + - Simple Scalable profile + * - ``V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY`` + - + + + +``V4L2_CID_MPEG_VIDEO_MAX_REF_PIC (integer)`` + The maximum number of reference pictures used for encoding. + Applicable to the encoder. + +.. _v4l2-mpeg-video-multi-slice-mode: + +``V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE`` + (enum) + +enum v4l2_mpeg_video_multi_slice_mode - + Determines how the encoder should handle division of frame into + slices. Applicable to the encoder. Possible values are: + + + +.. tabularcolumns:: |p{8.7cm}|p{8.8cm}| + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE`` + - Single slice per frame. + * - ``V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB`` + - Multiple slices with set maximum number of macroblocks per slice. + * - ``V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES`` + - Multiple slice with set maximum size in bytes per slice. + + + +``V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB (integer)`` + The maximum number of macroblocks in a slice. Used when + ``V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE`` is set to + ``V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB``. Applicable to the + encoder. + +``V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES (integer)`` + The maximum size of a slice in bytes. Used when + ``V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE`` is set to + ``V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES``. Applicable to the + encoder. + +.. _v4l2-mpeg-video-h264-loop-filter-mode: + +``V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE`` + (enum) + +enum v4l2_mpeg_video_h264_loop_filter_mode - + Loop filter mode for H264 encoder. Possible values are: + + + +.. tabularcolumns:: |p{14.0cm}|p{3.5cm}| + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED`` + - Loop filter is enabled. + * - ``V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED`` + - Loop filter is disabled. + * - ``V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY`` + - Loop filter is disabled at the slice boundary. + + + +``V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA (integer)`` + Loop filter alpha coefficient, defined in the H264 standard. + This value corresponds to the slice_alpha_c0_offset_div2 slice header + field, and should be in the range of -6 to +6, inclusive. The actual alpha + offset FilterOffsetA is twice this value. + Applicable to the H264 encoder. + +``V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA (integer)`` + Loop filter beta coefficient, defined in the H264 standard. + This corresponds to the slice_beta_offset_div2 slice header field, and + should be in the range of -6 to +6, inclusive. The actual beta offset + FilterOffsetB is twice this value. + Applicable to the H264 encoder. + +.. _v4l2-mpeg-video-h264-entropy-mode: + +``V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE`` + (enum) + +enum v4l2_mpeg_video_h264_entropy_mode - + Entropy coding mode for H264 - CABAC/CAVALC. Applicable to the H264 + encoder. Possible values are: + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC`` + - Use CAVLC entropy coding. + * - ``V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC`` + - Use CABAC entropy coding. + + + +``V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM (boolean)`` + Enable 8X8 transform for H264. Applicable to the H264 encoder. + +``V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION (boolean)`` + Enable constrained intra prediction for H264. Applicable to the H264 + encoder. + +``V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET (integer)`` + Specify the offset that should be added to the luma quantization + parameter to determine the chroma quantization parameter. Applicable + to the H264 encoder. + +``V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB (integer)`` + Cyclic intra macroblock refresh. This is the number of continuous + macroblocks refreshed every frame. Each frame a successive set of + macroblocks is refreshed until the cycle completes and starts from + the top of the frame. Applicable to H264, H263 and MPEG4 encoder. + +``V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE (boolean)`` + Frame level rate control enable. If this control is disabled then + the quantization parameter for each frame type is constant and set + with appropriate controls (e.g. + ``V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP``). If frame rate control is + enabled then quantization parameter is adjusted to meet the chosen + bitrate. Minimum and maximum value for the quantization parameter + can be set with appropriate controls (e.g. + ``V4L2_CID_MPEG_VIDEO_H263_MIN_QP``). Applicable to encoders. + +``V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE (boolean)`` + Macroblock level rate control enable. Applicable to the MPEG4 and + H264 encoders. + +``V4L2_CID_MPEG_VIDEO_MPEG4_QPEL (boolean)`` + Quarter pixel motion estimation for MPEG4. Applicable to the MPEG4 + encoder. + +``V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP (integer)`` + Quantization parameter for an I frame for H263. Valid range: from 1 + to 31. + +``V4L2_CID_MPEG_VIDEO_H263_MIN_QP (integer)`` + Minimum quantization parameter for H263. Valid range: from 1 to 31. + +``V4L2_CID_MPEG_VIDEO_H263_MAX_QP (integer)`` + Maximum quantization parameter for H263. Valid range: from 1 to 31. + +``V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP (integer)`` + Quantization parameter for an P frame for H263. Valid range: from 1 + to 31. + +``V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP (integer)`` + Quantization parameter for an B frame for H263. Valid range: from 1 + to 31. + +``V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP (integer)`` + Quantization parameter for an I frame for H264. Valid range: from 0 + to 51. + +``V4L2_CID_MPEG_VIDEO_H264_MIN_QP (integer)`` + Minimum quantization parameter for H264. Valid range: from 0 to 51. + +``V4L2_CID_MPEG_VIDEO_H264_MAX_QP (integer)`` + Maximum quantization parameter for H264. Valid range: from 0 to 51. + +``V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP (integer)`` + Quantization parameter for an P frame for H264. Valid range: from 0 + to 51. + +``V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP (integer)`` + Quantization parameter for an B frame for H264. Valid range: from 0 + to 51. + +``V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP (integer)`` + Quantization parameter for an I frame for MPEG4. Valid range: from 1 + to 31. + +``V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP (integer)`` + Minimum quantization parameter for MPEG4. Valid range: from 1 to 31. + +``V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP (integer)`` + Maximum quantization parameter for MPEG4. Valid range: from 1 to 31. + +``V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP (integer)`` + Quantization parameter for an P frame for MPEG4. Valid range: from 1 + to 31. + +``V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP (integer)`` + Quantization parameter for an B frame for MPEG4. Valid range: from 1 + to 31. + +``V4L2_CID_MPEG_VIDEO_VBV_SIZE (integer)`` + The Video Buffer Verifier size in kilobytes, it is used as a + limitation of frame skip. The VBV is defined in the standard as a + mean to verify that the produced stream will be successfully + decoded. The standard describes it as "Part of a hypothetical + decoder that is conceptually connected to the output of the encoder. + Its purpose is to provide a constraint on the variability of the + data rate that an encoder or editing process may produce.". + Applicable to the MPEG1, MPEG2, MPEG4 encoders. + +.. _v4l2-mpeg-video-vbv-delay: + +``V4L2_CID_MPEG_VIDEO_VBV_DELAY (integer)`` + Sets the initial delay in milliseconds for VBV buffer control. + +.. _v4l2-mpeg-video-hor-search-range: + +``V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE (integer)`` + Horizontal search range defines maximum horizontal search area in + pixels to search and match for the present Macroblock (MB) in the + reference picture. This V4L2 control macro is used to set horizontal + search range for motion estimation module in video encoder. + +.. _v4l2-mpeg-video-vert-search-range: + +``V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE (integer)`` + Vertical search range defines maximum vertical search area in pixels + to search and match for the present Macroblock (MB) in the reference + picture. This V4L2 control macro is used to set vertical search + range for motion estimation module in video encoder. + +.. _v4l2-mpeg-video-force-key-frame: + +``V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME (button)`` + Force a key frame for the next queued buffer. Applicable to + encoders. This is a general, codec-agnostic keyframe control. + +``V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE (integer)`` + The Coded Picture Buffer size in kilobytes, it is used as a + limitation of frame skip. The CPB is defined in the H264 standard as + a mean to verify that the produced stream will be successfully + decoded. Applicable to the H264 encoder. + +``V4L2_CID_MPEG_VIDEO_H264_I_PERIOD (integer)`` + Period between I-frames in the open GOP for H264. In case of an open + GOP this is the period between two I-frames. The period between IDR + (Instantaneous Decoding Refresh) frames is taken from the GOP_SIZE + control. An IDR frame, which stands for Instantaneous Decoding + Refresh is an I-frame after which no prior frames are referenced. + This means that a stream can be restarted from an IDR frame without + the need to store or decode any previous frames. Applicable to the + H264 encoder. + +.. _v4l2-mpeg-video-header-mode: + +``V4L2_CID_MPEG_VIDEO_HEADER_MODE`` + (enum) + +enum v4l2_mpeg_video_header_mode - + Determines whether the header is returned as the first buffer or is + it returned together with the first frame. Applicable to encoders. + Possible values are: + + + +.. tabularcolumns:: |p{10.3cm}|p{7.2cm}| + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE`` + - The stream header is returned separately in the first buffer. + * - ``V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME`` + - The stream header is returned together with the first encoded + frame. + + + +``V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER (boolean)`` + Repeat the video sequence headers. Repeating these headers makes + random access to the video stream easier. Applicable to the MPEG1, 2 + and 4 encoder. + +``V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER (boolean)`` + Enabled the deblocking post processing filter for MPEG4 decoder. + Applicable to the MPEG4 decoder. + +``V4L2_CID_MPEG_VIDEO_MPEG4_VOP_TIME_RES (integer)`` + vop_time_increment_resolution value for MPEG4. Applicable to the + MPEG4 encoder. + +``V4L2_CID_MPEG_VIDEO_MPEG4_VOP_TIME_INC (integer)`` + vop_time_increment value for MPEG4. Applicable to the MPEG4 + encoder. + +``V4L2_CID_MPEG_VIDEO_H264_SEI_FRAME_PACKING (boolean)`` + Enable generation of frame packing supplemental enhancement + information in the encoded bitstream. The frame packing SEI message + contains the arrangement of L and R planes for 3D viewing. + Applicable to the H264 encoder. + +``V4L2_CID_MPEG_VIDEO_H264_SEI_FP_CURRENT_FRAME_0 (boolean)`` + Sets current frame as frame0 in frame packing SEI. Applicable to the + H264 encoder. + +.. _v4l2-mpeg-video-h264-sei-fp-arrangement-type: + +``V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE`` + (enum) + +enum v4l2_mpeg_video_h264_sei_fp_arrangement_type - + Frame packing arrangement type for H264 SEI. Applicable to the H264 + encoder. Possible values are: + +.. tabularcolumns:: |p{12cm}|p{5.5cm}| + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_CHEKERBOARD`` + - Pixels are alternatively from L and R. + * - ``V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_COLUMN`` + - L and R are interlaced by column. + * - ``V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_ROW`` + - L and R are interlaced by row. + * - ``V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_SIDE_BY_SIDE`` + - L is on the left, R on the right. + * - ``V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_TOP_BOTTOM`` + - L is on top, R on bottom. + * - ``V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_TEMPORAL`` + - One view per frame. + + + +``V4L2_CID_MPEG_VIDEO_H264_FMO (boolean)`` + Enables flexible macroblock ordering in the encoded bitstream. It is + a technique used for restructuring the ordering of macroblocks in + pictures. Applicable to the H264 encoder. + +.. _v4l2-mpeg-video-h264-fmo-map-type: + +``V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE`` + (enum) + +enum v4l2_mpeg_video_h264_fmo_map_type - + When using FMO, the map type divides the image in different scan + patterns of macroblocks. Applicable to the H264 encoder. Possible + values are: + +.. tabularcolumns:: |p{12.5cm}|p{5.0cm}| + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_INTERLEAVED_SLICES`` + - Slices are interleaved one after other with macroblocks in run + length order. + * - ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_SCATTERED_SLICES`` + - Scatters the macroblocks based on a mathematical function known to + both encoder and decoder. + * - ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_FOREGROUND_WITH_LEFT_OVER`` + - Macroblocks arranged in rectangular areas or regions of interest. + * - ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_BOX_OUT`` + - Slice groups grow in a cyclic way from centre to outwards. + * - ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_RASTER_SCAN`` + - Slice groups grow in raster scan pattern from left to right. + * - ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_WIPE_SCAN`` + - Slice groups grow in wipe scan pattern from top to bottom. + * - ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_EXPLICIT`` + - User defined map type. + + + +``V4L2_CID_MPEG_VIDEO_H264_FMO_SLICE_GROUP (integer)`` + Number of slice groups in FMO. Applicable to the H264 encoder. + +.. _v4l2-mpeg-video-h264-fmo-change-direction: + +``V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_DIRECTION`` + (enum) + +enum v4l2_mpeg_video_h264_fmo_change_dir - + Specifies a direction of the slice group change for raster and wipe + maps. Applicable to the H264 encoder. Possible values are: + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_VIDEO_H264_FMO_CHANGE_DIR_RIGHT`` + - Raster scan or wipe right. + * - ``V4L2_MPEG_VIDEO_H264_FMO_CHANGE_DIR_LEFT`` + - Reverse raster scan or wipe left. + + + +``V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_RATE (integer)`` + Specifies the size of the first slice group for raster and wipe map. + Applicable to the H264 encoder. + +``V4L2_CID_MPEG_VIDEO_H264_FMO_RUN_LENGTH (integer)`` + Specifies the number of consecutive macroblocks for the interleaved + map. Applicable to the H264 encoder. + +``V4L2_CID_MPEG_VIDEO_H264_ASO (boolean)`` + Enables arbitrary slice ordering in encoded bitstream. Applicable to + the H264 encoder. + +``V4L2_CID_MPEG_VIDEO_H264_ASO_SLICE_ORDER (integer)`` + Specifies the slice order in ASO. Applicable to the H264 encoder. + The supplied 32-bit integer is interpreted as follows (bit 0 = least + significant bit): + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - Bit 0:15 + - Slice ID + * - Bit 16:32 + - Slice position or order + + + +``V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING (boolean)`` + Enables H264 hierarchical coding. Applicable to the H264 encoder. + +.. _v4l2-mpeg-video-h264-hierarchical-coding-type: + +``V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE`` + (enum) + +enum v4l2_mpeg_video_h264_hierarchical_coding_type - + Specifies the hierarchical coding type. Applicable to the H264 + encoder. Possible values are: + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_VIDEO_H264_HIERARCHICAL_CODING_B`` + - Hierarchical B coding. + * - ``V4L2_MPEG_VIDEO_H264_HIERARCHICAL_CODING_P`` + - Hierarchical P coding. + + + +``V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER (integer)`` + Specifies the number of hierarchical coding layers. Applicable to + the H264 encoder. + +``V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER_QP (integer)`` + Specifies a user defined QP for each layer. Applicable to the H264 + encoder. The supplied 32-bit integer is interpreted as follows (bit + 0 = least significant bit): + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - Bit 0:15 + - QP value + * - Bit 16:32 + - Layer number + + + +.. _v4l2-mpeg-mpeg2: + +``V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS (struct)`` + Specifies the slice parameters (as extracted from the bitstream) for the + associated MPEG-2 slice data. This includes the necessary parameters for + configuring a stateless hardware decoding pipeline for MPEG-2. + The bitstream parameters are defined according to :ref:`mpeg2part2`. + + .. note:: + + This compound control is not yet part of the public kernel API and + it is expected to change. + +.. c:type:: v4l2_ctrl_mpeg2_slice_params + +.. cssclass:: longtable + +.. flat-table:: struct v4l2_ctrl_mpeg2_slice_params + :header-rows: 0 + :stub-columns: 0 + :widths: 1 1 2 + + * - __u32 + - ``bit_size`` + - Size (in bits) of the current slice data. + * - __u32 + - ``data_bit_offset`` + - Offset (in bits) to the video data in the current slice data. + * - struct :c:type:`v4l2_mpeg2_sequence` + - ``sequence`` + - Structure with MPEG-2 sequence metadata, merging relevant fields from + the sequence header and sequence extension parts of the bitstream. + * - struct :c:type:`v4l2_mpeg2_picture` + - ``picture`` + - Structure with MPEG-2 picture metadata, merging relevant fields from + the picture header and picture coding extension parts of the bitstream. + * - __u64 + - ``backward_ref_ts`` + - Timestamp of the V4L2 capture buffer to use as backward reference, used + with B-coded and P-coded frames. The timestamp refers to the + ``timestamp`` field in struct :c:type:`v4l2_buffer`. Use the + :c:func:`v4l2_timeval_to_ns()` function to convert the struct + :c:type:`timeval` in struct :c:type:`v4l2_buffer` to a __u64. + * - __u64 + - ``forward_ref_ts`` + - Timestamp for the V4L2 capture buffer to use as forward reference, used + with B-coded frames. The timestamp refers to the ``timestamp`` field in + struct :c:type:`v4l2_buffer`. Use the :c:func:`v4l2_timeval_to_ns()` + function to convert the struct :c:type:`timeval` in struct + :c:type:`v4l2_buffer` to a __u64. + * - __u32 + - ``quantiser_scale_code`` + - Code used to determine the quantization scale to use for the IDCT. + +.. c:type:: v4l2_mpeg2_sequence + +.. cssclass:: longtable + +.. flat-table:: struct v4l2_mpeg2_sequence + :header-rows: 0 + :stub-columns: 0 + :widths: 1 1 2 + + * - __u16 + - ``horizontal_size`` + - The width of the displayable part of the frame's luminance component. + * - __u16 + - ``vertical_size`` + - The height of the displayable part of the frame's luminance component. + * - __u32 + - ``vbv_buffer_size`` + - Used to calculate the required size of the video buffering verifier, + defined (in bits) as: 16 * 1024 * vbv_buffer_size. + * - __u16 + - ``profile_and_level_indication`` + - The current profile and level indication as extracted from the + bitstream. + * - __u8 + - ``progressive_sequence`` + - Indication that all the frames for the sequence are progressive instead + of interlaced. + * - __u8 + - ``chroma_format`` + - The chrominance sub-sampling format (1: 4:2:0, 2: 4:2:2, 3: 4:4:4). + +.. c:type:: v4l2_mpeg2_picture + +.. cssclass:: longtable + +.. flat-table:: struct v4l2_mpeg2_picture + :header-rows: 0 + :stub-columns: 0 + :widths: 1 1 2 + + * - __u8 + - ``picture_coding_type`` + - Picture coding type for the frame covered by the current slice + (V4L2_MPEG2_PICTURE_CODING_TYPE_I, V4L2_MPEG2_PICTURE_CODING_TYPE_P or + V4L2_MPEG2_PICTURE_CODING_TYPE_B). + * - __u8 + - ``f_code[2][2]`` + - Motion vector codes. + * - __u8 + - ``intra_dc_precision`` + - Precision of Discrete Cosine transform (0: 8 bits precision, + 1: 9 bits precision, 2: 10 bits precision, 3: 11 bits precision). + * - __u8 + - ``picture_structure`` + - Picture structure (1: interlaced top field, 2: interlaced bottom field, + 3: progressive frame). + * - __u8 + - ``top_field_first`` + - If set to 1 and interlaced stream, top field is output first. + * - __u8 + - ``frame_pred_frame_dct`` + - If set to 1, only frame-DCT and frame prediction are used. + * - __u8 + - ``concealment_motion_vectors`` + - If set to 1, motion vectors are coded for intra macroblocks. + * - __u8 + - ``q_scale_type`` + - This flag affects the inverse quantization process. + * - __u8 + - ``intra_vlc_format`` + - This flag affects the decoding of transform coefficient data. + * - __u8 + - ``alternate_scan`` + - This flag affects the decoding of transform coefficient data. + * - __u8 + - ``repeat_first_field`` + - This flag affects the decoding process of progressive frames. + * - __u16 + - ``progressive_frame`` + - Indicates whether the current frame is progressive. + +``V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION (struct)`` + Specifies quantization matrices (as extracted from the bitstream) for the + associated MPEG-2 slice data. + + .. note:: + + This compound control is not yet part of the public kernel API and + it is expected to change. + +.. c:type:: v4l2_ctrl_mpeg2_quantization + +.. cssclass:: longtable + +.. flat-table:: struct v4l2_ctrl_mpeg2_quantization + :header-rows: 0 + :stub-columns: 0 + :widths: 1 1 2 + + * - __u8 + - ``load_intra_quantiser_matrix`` + - One bit to indicate whether to load the ``intra_quantiser_matrix`` data. + * - __u8 + - ``load_non_intra_quantiser_matrix`` + - One bit to indicate whether to load the ``non_intra_quantiser_matrix`` + data. + * - __u8 + - ``load_chroma_intra_quantiser_matrix`` + - One bit to indicate whether to load the + ``chroma_intra_quantiser_matrix`` data, only relevant for non-4:2:0 YUV + formats. + * - __u8 + - ``load_chroma_non_intra_quantiser_matrix`` + - One bit to indicate whether to load the + ``chroma_non_intra_quantiser_matrix`` data, only relevant for non-4:2:0 + YUV formats. + * - __u8 + - ``intra_quantiser_matrix[64]`` + - The quantization matrix coefficients for intra-coded frames, in zigzag + scanning order. It is relevant for both luma and chroma components, + although it can be superseded by the chroma-specific matrix for + non-4:2:0 YUV formats. + * - __u8 + - ``non_intra_quantiser_matrix[64]`` + - The quantization matrix coefficients for non-intra-coded frames, in + zigzag scanning order. It is relevant for both luma and chroma + components, although it can be superseded by the chroma-specific matrix + for non-4:2:0 YUV formats. + * - __u8 + - ``chroma_intra_quantiser_matrix[64]`` + - The quantization matrix coefficients for the chominance component of + intra-coded frames, in zigzag scanning order. Only relevant for + non-4:2:0 YUV formats. + * - __u8 + - ``chroma_non_intra_quantiser_matrix[64]`` + - The quantization matrix coefficients for the chrominance component of + non-intra-coded frames, in zigzag scanning order. Only relevant for + non-4:2:0 YUV formats. + +MFC 5.1 MPEG Controls +===================== + +The following MPEG class controls deal with MPEG decoding and encoding +settings that are specific to the Multi Format Codec 5.1 device present +in the S5P family of SoCs by Samsung. + + +.. _mfc51-control-id: + +MFC 5.1 Control IDs +------------------- + +``V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY_ENABLE (boolean)`` + If the display delay is enabled then the decoder is forced to return + a CAPTURE buffer (decoded frame) after processing a certain number + of OUTPUT buffers. The delay can be set through + ``V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY``. This + feature can be used for example for generating thumbnails of videos. + Applicable to the H264 decoder. + +``V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY (integer)`` + Display delay value for H264 decoder. The decoder is forced to + return a decoded frame after the set 'display delay' number of + frames. If this number is low it may result in frames returned out + of display order, in addition the hardware may still be using the + returned buffer as a reference picture for subsequent frames. + +``V4L2_CID_MPEG_MFC51_VIDEO_H264_NUM_REF_PIC_FOR_P (integer)`` + The number of reference pictures used for encoding a P picture. + Applicable to the H264 encoder. + +``V4L2_CID_MPEG_MFC51_VIDEO_PADDING (boolean)`` + Padding enable in the encoder - use a color instead of repeating + border pixels. Applicable to encoders. + +``V4L2_CID_MPEG_MFC51_VIDEO_PADDING_YUV (integer)`` + Padding color in the encoder. Applicable to encoders. The supplied + 32-bit integer is interpreted as follows (bit 0 = least significant + bit): + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - Bit 0:7 + - V chrominance information + * - Bit 8:15 + - U chrominance information + * - Bit 16:23 + - Y luminance information + * - Bit 24:31 + - Must be zero. + + + +``V4L2_CID_MPEG_MFC51_VIDEO_RC_REACTION_COEFF (integer)`` + Reaction coefficient for MFC rate control. Applicable to encoders. + + .. note:: + + #. Valid only when the frame level RC is enabled. + + #. For tight CBR, this field must be small (ex. 2 ~ 10). For + VBR, this field must be large (ex. 100 ~ 1000). + + #. It is not recommended to use the greater number than + FRAME_RATE * (10^9 / BIT_RATE). + +``V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_DARK (boolean)`` + Adaptive rate control for dark region. Valid only when H.264 and + macroblock level RC is enabled + (``V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE``). Applicable to the H264 + encoder. + +``V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_SMOOTH (boolean)`` + Adaptive rate control for smooth region. Valid only when H.264 and + macroblock level RC is enabled + (``V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE``). Applicable to the H264 + encoder. + +``V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_STATIC (boolean)`` + Adaptive rate control for static region. Valid only when H.264 and + macroblock level RC is enabled + (``V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE``). Applicable to the H264 + encoder. + +``V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_ACTIVITY (boolean)`` + Adaptive rate control for activity region. Valid only when H.264 and + macroblock level RC is enabled + (``V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE``). Applicable to the H264 + encoder. + +.. _v4l2-mpeg-mfc51-video-frame-skip-mode: + +``V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE`` + (enum) + +enum v4l2_mpeg_mfc51_video_frame_skip_mode - + Indicates in what conditions the encoder should skip frames. If + encoding a frame would cause the encoded stream to be larger then a + chosen data limit then the frame will be skipped. Possible values + are: + + +.. tabularcolumns:: |p{9.0cm}|p{8.5cm}| + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_MFC51_FRAME_SKIP_MODE_DISABLED`` + - Frame skip mode is disabled. + * - ``V4L2_MPEG_MFC51_FRAME_SKIP_MODE_LEVEL_LIMIT`` + - Frame skip mode enabled and buffer limit is set by the chosen + level and is defined by the standard. + * - ``V4L2_MPEG_MFC51_FRAME_SKIP_MODE_BUF_LIMIT`` + - Frame skip mode enabled and buffer limit is set by the VBV + (MPEG1/2/4) or CPB (H264) buffer size control. + + + +``V4L2_CID_MPEG_MFC51_VIDEO_RC_FIXED_TARGET_BIT (integer)`` + Enable rate-control with fixed target bit. If this setting is + enabled, then the rate control logic of the encoder will calculate + the average bitrate for a GOP and keep it below or equal the set + bitrate target. Otherwise the rate control logic calculates the + overall average bitrate for the stream and keeps it below or equal + to the set bitrate. In the first case the average bitrate for the + whole stream will be smaller then the set bitrate. This is caused + because the average is calculated for smaller number of frames, on + the other hand enabling this setting will ensure that the stream + will meet tight bandwidth constraints. Applicable to encoders. + +.. _v4l2-mpeg-mfc51-video-force-frame-type: + +``V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE`` + (enum) + +enum v4l2_mpeg_mfc51_video_force_frame_type - + Force a frame type for the next queued buffer. Applicable to + encoders. Possible values are: + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_MFC51_FORCE_FRAME_TYPE_DISABLED`` + - Forcing a specific frame type disabled. + * - ``V4L2_MPEG_MFC51_FORCE_FRAME_TYPE_I_FRAME`` + - Force an I-frame. + * - ``V4L2_MPEG_MFC51_FORCE_FRAME_TYPE_NOT_CODED`` + - Force a non-coded frame. + + + + +CX2341x MPEG Controls +===================== + +The following MPEG class controls deal with MPEG encoding settings that +are specific to the Conexant CX23415 and CX23416 MPEG encoding chips. + + +.. _cx2341x-control-id: + +CX2341x Control IDs +------------------- + +.. _v4l2-mpeg-cx2341x-video-spatial-filter-mode: + +``V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE`` + (enum) + +enum v4l2_mpeg_cx2341x_video_spatial_filter_mode - + Sets the Spatial Filter mode (default ``MANUAL``). Possible values + are: + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL`` + - Choose the filter manually + * - ``V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO`` + - Choose the filter automatically + + + +``V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER (integer (0-15))`` + The setting for the Spatial Filter. 0 = off, 15 = maximum. (Default + is 0.) + +.. _luma-spatial-filter-type: + +``V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE`` + (enum) + +enum v4l2_mpeg_cx2341x_video_luma_spatial_filter_type - + Select the algorithm to use for the Luma Spatial Filter (default + ``1D_HOR``). Possible values: + + + +.. tabularcolumns:: |p{14.5cm}|p{3.0cm}| + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_OFF`` + - No filter + * - ``V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_HOR`` + - One-dimensional horizontal + * - ``V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_VERT`` + - One-dimensional vertical + * - ``V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_HV_SEPARABLE`` + - Two-dimensional separable + * - ``V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_SYM_NON_SEPARABLE`` + - Two-dimensional symmetrical non-separable + + + +.. _chroma-spatial-filter-type: + +``V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE`` + (enum) + +enum v4l2_mpeg_cx2341x_video_chroma_spatial_filter_type - + Select the algorithm for the Chroma Spatial Filter (default + ``1D_HOR``). Possible values are: + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_OFF`` + - No filter + * - ``V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR`` + - One-dimensional horizontal + + + +.. _v4l2-mpeg-cx2341x-video-temporal-filter-mode: + +``V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE`` + (enum) + +enum v4l2_mpeg_cx2341x_video_temporal_filter_mode - + Sets the Temporal Filter mode (default ``MANUAL``). Possible values + are: + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL`` + - Choose the filter manually + * - ``V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO`` + - Choose the filter automatically + + + +``V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER (integer (0-31))`` + The setting for the Temporal Filter. 0 = off, 31 = maximum. (Default + is 8 for full-scale capturing and 0 for scaled capturing.) + +.. _v4l2-mpeg-cx2341x-video-median-filter-type: + +``V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE`` + (enum) + +enum v4l2_mpeg_cx2341x_video_median_filter_type - + Median Filter Type (default ``OFF``). Possible values are: + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF`` + - No filter + * - ``V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR`` + - Horizontal filter + * - ``V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_VERT`` + - Vertical filter + * - ``V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR_VERT`` + - Horizontal and vertical filter + * - ``V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_DIAG`` + - Diagonal filter + + + +``V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM (integer (0-255))`` + Threshold above which the luminance median filter is enabled + (default 0) + +``V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP (integer (0-255))`` + Threshold below which the luminance median filter is enabled + (default 255) + +``V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM (integer (0-255))`` + Threshold above which the chroma median filter is enabled (default + 0) + +``V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP (integer (0-255))`` + Threshold below which the chroma median filter is enabled (default + 255) + +``V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS (boolean)`` + The CX2341X MPEG encoder can insert one empty MPEG-2 PES packet into + the stream between every four video frames. The packet size is 2048 + bytes, including the packet_start_code_prefix and stream_id + fields. The stream_id is 0xBF (private stream 2). The payload + consists of 0x00 bytes, to be filled in by the application. 0 = do + not insert, 1 = insert packets. + + +VPX Control Reference +===================== + +The VPX controls include controls for encoding parameters of VPx video +codec. + + +.. _vpx-control-id: + +VPX Control IDs +--------------- + +.. _v4l2-vpx-num-partitions: + +``V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS`` + (enum) + +enum v4l2_vp8_num_partitions - + The number of token partitions to use in VP8 encoder. Possible + values are: + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_CID_MPEG_VIDEO_VPX_1_PARTITION`` + - 1 coefficient partition + * - ``V4L2_CID_MPEG_VIDEO_VPX_2_PARTITIONS`` + - 2 coefficient partitions + * - ``V4L2_CID_MPEG_VIDEO_VPX_4_PARTITIONS`` + - 4 coefficient partitions + * - ``V4L2_CID_MPEG_VIDEO_VPX_8_PARTITIONS`` + - 8 coefficient partitions + + + +``V4L2_CID_MPEG_VIDEO_VPX_IMD_DISABLE_4X4 (boolean)`` + Setting this prevents intra 4x4 mode in the intra mode decision. + +.. _v4l2-vpx-num-ref-frames: + +``V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES`` + (enum) + +enum v4l2_vp8_num_ref_frames - + The number of reference pictures for encoding P frames. Possible + values are: + +.. tabularcolumns:: |p{7.9cm}|p{9.6cm}| + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_CID_MPEG_VIDEO_VPX_1_REF_FRAME`` + - Last encoded frame will be searched + * - ``V4L2_CID_MPEG_VIDEO_VPX_2_REF_FRAME`` + - Two frames will be searched among the last encoded frame, the + golden frame and the alternate reference (altref) frame. The + encoder implementation will decide which two are chosen. + * - ``V4L2_CID_MPEG_VIDEO_VPX_3_REF_FRAME`` + - The last encoded frame, the golden frame and the altref frame will + be searched. + + + +``V4L2_CID_MPEG_VIDEO_VPX_FILTER_LEVEL (integer)`` + Indicates the loop filter level. The adjustment of the loop filter + level is done via a delta value against a baseline loop filter + value. + +``V4L2_CID_MPEG_VIDEO_VPX_FILTER_SHARPNESS (integer)`` + This parameter affects the loop filter. Anything above zero weakens + the deblocking effect on the loop filter. + +``V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD (integer)`` + Sets the refresh period for the golden frame. The period is defined + in number of frames. For a value of 'n', every nth frame starting + from the first key frame will be taken as a golden frame. For eg. + for encoding sequence of 0, 1, 2, 3, 4, 5, 6, 7 where the golden + frame refresh period is set as 4, the frames 0, 4, 8 etc will be + taken as the golden frames as frame 0 is always a key frame. + +.. _v4l2-vpx-golden-frame-sel: + +``V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL`` + (enum) + +enum v4l2_vp8_golden_frame_sel - + Selects the golden frame for encoding. Possible values are: + +.. raw:: latex + + \footnotesize + +.. tabularcolumns:: |p{9.0cm}|p{8.0cm}| + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_PREV`` + - Use the (n-2)th frame as a golden frame, current frame index being + 'n'. + * - ``V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_REF_PERIOD`` + - Use the previous specific frame indicated by + ``V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD`` as a + golden frame. + +.. raw:: latex + + \normalsize + + +``V4L2_CID_MPEG_VIDEO_VPX_MIN_QP (integer)`` + Minimum quantization parameter for VP8. + +``V4L2_CID_MPEG_VIDEO_VPX_MAX_QP (integer)`` + Maximum quantization parameter for VP8. + +``V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP (integer)`` + Quantization parameter for an I frame for VP8. + +``V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP (integer)`` + Quantization parameter for a P frame for VP8. + +.. _v4l2-mpeg-video-vp8-profile: + +``V4L2_CID_MPEG_VIDEO_VP8_PROFILE`` + (enum) + +enum v4l2_mpeg_video_vp8_profile - + This control allows selecting the profile for VP8 encoder. + This is also used to enumerate supported profiles by VP8 encoder or decoder. + Possible values are: + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_VIDEO_VP8_PROFILE_0`` + - Profile 0 + * - ``V4L2_MPEG_VIDEO_VP8_PROFILE_1`` + - Profile 1 + * - ``V4L2_MPEG_VIDEO_VP8_PROFILE_2`` + - Profile 2 + * - ``V4L2_MPEG_VIDEO_VP8_PROFILE_3`` + - Profile 3 + +.. _v4l2-mpeg-video-vp9-profile: + +``V4L2_CID_MPEG_VIDEO_VP9_PROFILE`` + (enum) + +enum v4l2_mpeg_video_vp9_profile - + This control allows selecting the profile for VP9 encoder. + This is also used to enumerate supported profiles by VP9 encoder or decoder. + Possible values are: + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_VIDEO_VP9_PROFILE_0`` + - Profile 0 + * - ``V4L2_MPEG_VIDEO_VP9_PROFILE_1`` + - Profile 1 + * - ``V4L2_MPEG_VIDEO_VP9_PROFILE_2`` + - Profile 2 + * - ``V4L2_MPEG_VIDEO_VP9_PROFILE_3`` + - Profile 3 + + +High Efficiency Video Coding (HEVC/H.265) Control Reference +=========================================================== + +The HEVC/H.265 controls include controls for encoding parameters of HEVC/H.265 +video codec. + + +.. _hevc-control-id: + +HEVC/H.265 Control IDs +---------------------- + +``V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP (integer)`` + Minimum quantization parameter for HEVC. + Valid range: from 0 to 51. + +``V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP (integer)`` + Maximum quantization parameter for HEVC. + Valid range: from 0 to 51. + +``V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP (integer)`` + Quantization parameter for an I frame for HEVC. + Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP, + V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP]. + +``V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP (integer)`` + Quantization parameter for a P frame for HEVC. + Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP, + V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP]. + +``V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP (integer)`` + Quantization parameter for a B frame for HEVC. + Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP, + V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP]. + +``V4L2_CID_MPEG_VIDEO_HEVC_HIER_QP (boolean)`` + HIERARCHICAL_QP allows the host to specify the quantization parameter + values for each temporal layer through HIERARCHICAL_QP_LAYER. This is + valid only if HIERARCHICAL_CODING_LAYER is greater than 1. Setting the + control value to 1 enables setting of the QP values for the layers. + +.. _v4l2-hevc-hier-coding-type: + +``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE`` + (enum) + +enum v4l2_mpeg_video_hevc_hier_coding_type - + Selects the hierarchical coding type for encoding. Possible values are: + +.. raw:: latex + + \footnotesize + +.. tabularcolumns:: |p{9.0cm}|p{8.0cm}| + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_B`` + - Use the B frame for hierarchical coding. + * - ``V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_P`` + - Use the P frame for hierarchical coding. + +.. raw:: latex + + \normalsize + + +``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER (integer)`` + Selects the hierarchical coding layer. In normal encoding + (non-hierarchial coding), it should be zero. Possible values are [0, 6]. + 0 indicates HIERARCHICAL CODING LAYER 0, 1 indicates HIERARCHICAL CODING + LAYER 1 and so on. + +``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_QP (integer)`` + Indicates quantization parameter for hierarchical coding layer 0. + Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP, + V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP]. + +``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_QP (integer)`` + Indicates quantization parameter for hierarchical coding layer 1. + Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP, + V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP]. + +``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_QP (integer)`` + Indicates quantization parameter for hierarchical coding layer 2. + Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP, + V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP]. + +``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_QP (integer)`` + Indicates quantization parameter for hierarchical coding layer 3. + Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP, + V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP]. + +``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_QP (integer)`` + Indicates quantization parameter for hierarchical coding layer 4. + Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP, + V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP]. + +``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_QP (integer)`` + Indicates quantization parameter for hierarchical coding layer 5. + Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP, + V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP]. + +``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_QP (integer)`` + Indicates quantization parameter for hierarchical coding layer 6. + Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP, + V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP]. + +.. _v4l2-hevc-profile: + +``V4L2_CID_MPEG_VIDEO_HEVC_PROFILE`` + (enum) + +enum v4l2_mpeg_video_hevc_profile - + Select the desired profile for HEVC encoder. + +.. raw:: latex + + \footnotesize + +.. tabularcolumns:: |p{9.0cm}|p{8.0cm}| + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN`` + - Main profile. + * - ``V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE`` + - Main still picture profile. + * - ``V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10`` + - Main 10 profile. + +.. raw:: latex + + \normalsize + + +.. _v4l2-hevc-level: + +``V4L2_CID_MPEG_VIDEO_HEVC_LEVEL`` + (enum) + +enum v4l2_mpeg_video_hevc_level - + Selects the desired level for HEVC encoder. + +.. raw:: latex + + \footnotesize + +.. tabularcolumns:: |p{9.0cm}|p{8.0cm}| + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_1`` + - Level 1.0 + * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_2`` + - Level 2.0 + * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1`` + - Level 2.1 + * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_3`` + - Level 3.0 + * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1`` + - Level 3.1 + * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_4`` + - Level 4.0 + * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1`` + - Level 4.1 + * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_5`` + - Level 5.0 + * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1`` + - Level 5.1 + * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2`` + - Level 5.2 + * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_6`` + - Level 6.0 + * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1`` + - Level 6.1 + * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2`` + - Level 6.2 + +.. raw:: latex + + \normalsize + + +``V4L2_CID_MPEG_VIDEO_HEVC_FRAME_RATE_RESOLUTION (integer)`` + Indicates the number of evenly spaced subintervals, called ticks, within + one second. This is a 16 bit unsigned integer and has a maximum value up to + 0xffff and a minimum value of 1. + +.. _v4l2-hevc-tier: + +``V4L2_CID_MPEG_VIDEO_HEVC_TIER`` + (enum) + +enum v4l2_mpeg_video_hevc_tier - + TIER_FLAG specifies tiers information of the HEVC encoded picture. Tier + were made to deal with applications that differ in terms of maximum bit + rate. Setting the flag to 0 selects HEVC tier as Main tier and setting + this flag to 1 indicates High tier. High tier is for applications requiring + high bit rates. + +.. raw:: latex + + \footnotesize + +.. tabularcolumns:: |p{9.0cm}|p{8.0cm}| + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_VIDEO_HEVC_TIER_MAIN`` + - Main tier. + * - ``V4L2_MPEG_VIDEO_HEVC_TIER_HIGH`` + - High tier. + +.. raw:: latex + + \normalsize + + +``V4L2_CID_MPEG_VIDEO_HEVC_MAX_PARTITION_DEPTH (integer)`` + Selects HEVC maximum coding unit depth. + +.. _v4l2-hevc-loop-filter-mode: + +``V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE`` + (enum) + +enum v4l2_mpeg_video_hevc_loop_filter_mode - + Loop filter mode for HEVC encoder. Possible values are: + +.. raw:: latex + + \footnotesize + +.. tabularcolumns:: |p{10.7cm}|p{6.3cm}| + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_DISABLED`` + - Loop filter is disabled. + * - ``V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_ENABLED`` + - Loop filter is enabled. + * - ``V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY`` + - Loop filter is disabled at the slice boundary. + +.. raw:: latex + + \normalsize + + +``V4L2_CID_MPEG_VIDEO_HEVC_LF_BETA_OFFSET_DIV2 (integer)`` + Selects HEVC loop filter beta offset. The valid range is [-6, +6]. + +``V4L2_CID_MPEG_VIDEO_HEVC_LF_TC_OFFSET_DIV2 (integer)`` + Selects HEVC loop filter tc offset. The valid range is [-6, +6]. + +.. _v4l2-hevc-refresh-type: + +``V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE`` + (enum) + +enum v4l2_mpeg_video_hevc_hier_refresh_type - + Selects refresh type for HEVC encoder. + Host has to specify the period into + V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_PERIOD. + +.. raw:: latex + + \footnotesize + +.. tabularcolumns:: |p{8.0cm}|p{9.0cm}| + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_VIDEO_HEVC_REFRESH_NONE`` + - Use the B frame for hierarchical coding. + * - ``V4L2_MPEG_VIDEO_HEVC_REFRESH_CRA`` + - Use CRA (Clean Random Access Unit) picture encoding. + * - ``V4L2_MPEG_VIDEO_HEVC_REFRESH_IDR`` + - Use IDR (Instantaneous Decoding Refresh) picture encoding. + +.. raw:: latex + + \normalsize + + +``V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_PERIOD (integer)`` + Selects the refresh period for HEVC encoder. + This specifies the number of I pictures between two CRA/IDR pictures. + This is valid only if REFRESH_TYPE is not 0. + +``V4L2_CID_MPEG_VIDEO_HEVC_LOSSLESS_CU (boolean)`` + Indicates HEVC lossless encoding. Setting it to 0 disables lossless + encoding. Setting it to 1 enables lossless encoding. + +``V4L2_CID_MPEG_VIDEO_HEVC_CONST_INTRA_PRED (boolean)`` + Indicates constant intra prediction for HEVC encoder. Specifies the + constrained intra prediction in which intra largest coding unit (LCU) + prediction is performed by using residual data and decoded samples of + neighboring intra LCU only. Setting the value to 1 enables constant intra + prediction and setting the value to 0 disables constant intra prediction. + +``V4L2_CID_MPEG_VIDEO_HEVC_WAVEFRONT (boolean)`` + Indicates wavefront parallel processing for HEVC encoder. Setting it to 0 + disables the feature and setting it to 1 enables the wavefront parallel + processing. + +``V4L2_CID_MPEG_VIDEO_HEVC_GENERAL_PB (boolean)`` + Setting the value to 1 enables combination of P and B frame for HEVC + encoder. + +``V4L2_CID_MPEG_VIDEO_HEVC_TEMPORAL_ID (boolean)`` + Indicates temporal identifier for HEVC encoder which is enabled by + setting the value to 1. + +``V4L2_CID_MPEG_VIDEO_HEVC_STRONG_SMOOTHING (boolean)`` + Indicates bi-linear interpolation is conditionally used in the intra + prediction filtering process in the CVS when set to 1. Indicates bi-linear + interpolation is not used in the CVS when set to 0. + +``V4L2_CID_MPEG_VIDEO_HEVC_MAX_NUM_MERGE_MV_MINUS1 (integer)`` + Indicates maximum number of merge candidate motion vectors. + Values are from 0 to 4. + +``V4L2_CID_MPEG_VIDEO_HEVC_TMV_PREDICTION (boolean)`` + Indicates temporal motion vector prediction for HEVC encoder. Setting it to + 1 enables the prediction. Setting it to 0 disables the prediction. + +``V4L2_CID_MPEG_VIDEO_HEVC_WITHOUT_STARTCODE (boolean)`` + Specifies if HEVC generates a stream with a size of the length field + instead of start code pattern. The size of the length field is configurable + through the V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD control. Setting + the value to 0 disables encoding without startcode pattern. Setting the + value to 1 will enables encoding without startcode pattern. + +.. _v4l2-hevc-size-of-length-field: + +``V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD`` +(enum) + +enum v4l2_mpeg_video_hevc_size_of_length_field - + Indicates the size of length field. + This is valid when encoding WITHOUT_STARTCODE_ENABLE is enabled. + +.. raw:: latex + + \footnotesize + +.. tabularcolumns:: |p{6.0cm}|p{11.0cm}| + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_MPEG_VIDEO_HEVC_SIZE_0`` + - Generate start code pattern (Normal). + * - ``V4L2_MPEG_VIDEO_HEVC_SIZE_1`` + - Generate size of length field instead of start code pattern and length is 1. + * - ``V4L2_MPEG_VIDEO_HEVC_SIZE_2`` + - Generate size of length field instead of start code pattern and length is 2. + * - ``V4L2_MPEG_VIDEO_HEVC_SIZE_4`` + - Generate size of length field instead of start code pattern and length is 4. + +.. raw:: latex + + \normalsize + +``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR (integer)`` + Indicates bit rate for hierarchical coding layer 0 for HEVC encoder. + +``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR (integer)`` + Indicates bit rate for hierarchical coding layer 1 for HEVC encoder. + +``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR (integer)`` + Indicates bit rate for hierarchical coding layer 2 for HEVC encoder. + +``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR (integer)`` + Indicates bit rate for hierarchical coding layer 3 for HEVC encoder. + +``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR (integer)`` + Indicates bit rate for hierarchical coding layer 4 for HEVC encoder. + +``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR (integer)`` + Indicates bit rate for hierarchical coding layer 5 for HEVC encoder. + +``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_BR (integer)`` + Indicates bit rate for hierarchical coding layer 6 for HEVC encoder. + +``V4L2_CID_MPEG_VIDEO_REF_NUMBER_FOR_PFRAMES (integer)`` + Selects number of P reference pictures required for HEVC encoder. + P-Frame can use 1 or 2 frames for reference. + +``V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR (integer)`` + Indicates whether to generate SPS and PPS at every IDR. Setting it to 0 + disables generating SPS and PPS at every IDR. Setting it to one enables + generating SPS and PPS at every IDR. diff --git a/Documentation/media/uapi/v4l/ext-ctrls-detect.rst b/Documentation/media/uapi/v4l/ext-ctrls-detect.rst new file mode 100644 index 000000000000..8a45ce642829 --- /dev/null +++ b/Documentation/media/uapi/v4l/ext-ctrls-detect.rst @@ -0,0 +1,71 @@ +.. Permission is granted to copy, distribute and/or modify this +.. document under the terms of the GNU Free Documentation License, +.. Version 1.1 or any later version published by the Free Software +.. Foundation, with no Invariant Sections, no Front-Cover Texts +.. and no Back-Cover Texts. A copy of the license is included at +.. Documentation/media/uapi/fdl-appendix.rst. +.. +.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections + +.. _detect-controls: + +************************ +Detect Control Reference +************************ + +The Detect class includes controls for common features of various motion +or object detection capable devices. + + +.. _detect-control-id: + +Detect Control IDs +================== + +``V4L2_CID_DETECT_CLASS (class)`` + The Detect class descriptor. Calling + :ref:`VIDIOC_QUERYCTRL` for this control will + return a description of this control class. + +``V4L2_CID_DETECT_MD_MODE (menu)`` + Sets the motion detection mode. + +.. tabularcolumns:: |p{7.5cm}|p{10.0cm}| + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_DETECT_MD_MODE_DISABLED`` + - Disable motion detection. + * - ``V4L2_DETECT_MD_MODE_GLOBAL`` + - Use a single motion detection threshold. + * - ``V4L2_DETECT_MD_MODE_THRESHOLD_GRID`` + - The image is divided into a grid, each cell with its own motion + detection threshold. These thresholds are set through the + ``V4L2_CID_DETECT_MD_THRESHOLD_GRID`` matrix control. + * - ``V4L2_DETECT_MD_MODE_REGION_GRID`` + - The image is divided into a grid, each cell with its own region + value that specifies which per-region motion detection thresholds + should be used. Each region has its own thresholds. How these + per-region thresholds are set up is driver-specific. The region + values for the grid are set through the + ``V4L2_CID_DETECT_MD_REGION_GRID`` matrix control. + + + +``V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD (integer)`` + Sets the global motion detection threshold to be used with the + ``V4L2_DETECT_MD_MODE_GLOBAL`` motion detection mode. + +``V4L2_CID_DETECT_MD_THRESHOLD_GRID (__u16 matrix)`` + Sets the motion detection thresholds for each cell in the grid. To + be used with the ``V4L2_DETECT_MD_MODE_THRESHOLD_GRID`` motion + detection mode. Matrix element (0, 0) represents the cell at the + top-left of the grid. + +``V4L2_CID_DETECT_MD_REGION_GRID (__u8 matrix)`` + Sets the motion detection region value for each cell in the grid. To + be used with the ``V4L2_DETECT_MD_MODE_REGION_GRID`` motion + detection mode. Matrix element (0, 0) represents the cell at the + top-left of the grid. diff --git a/Documentation/media/uapi/v4l/ext-ctrls-dv.rst b/Documentation/media/uapi/v4l/ext-ctrls-dv.rst new file mode 100644 index 000000000000..57edf211875c --- /dev/null +++ b/Documentation/media/uapi/v4l/ext-ctrls-dv.rst @@ -0,0 +1,166 @@ +.. Permission is granted to copy, distribute and/or modify this +.. document under the terms of the GNU Free Documentation License, +.. Version 1.1 or any later version published by the Free Software +.. Foundation, with no Invariant Sections, no Front-Cover Texts +.. and no Back-Cover Texts. A copy of the license is included at +.. Documentation/media/uapi/fdl-appendix.rst. +.. +.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections + +.. _dv-controls: + +******************************* +Digital Video Control Reference +******************************* + +The Digital Video control class is intended to control receivers and +transmitters for `VGA `__, +`DVI `__ +(Digital Visual Interface), HDMI (:ref:`hdmi`) and DisplayPort +(:ref:`dp`). These controls are generally expected to be private to +the receiver or transmitter subdevice that implements them, so they are +only exposed on the ``/dev/v4l-subdev*`` device node. + +.. note:: + + Note that these devices can have multiple input or output pads which are + hooked up to e.g. HDMI connectors. Even though the subdevice will + receive or transmit video from/to only one of those pads, the other pads + can still be active when it comes to EDID (Extended Display + Identification Data, :ref:`vesaedid`) and HDCP (High-bandwidth Digital + Content Protection System, :ref:`hdcp`) processing, allowing the + device to do the fairly slow EDID/HDCP handling in advance. This allows + for quick switching between connectors. + +These pads appear in several of the controls in this section as +bitmasks, one bit for each pad. Bit 0 corresponds to pad 0, bit 1 to pad +1, etc. The maximum value of the control is the set of valid pads. + + +.. _dv-control-id: + +Digital Video Control IDs +========================= + +``V4L2_CID_DV_CLASS (class)`` + The Digital Video class descriptor. + +``V4L2_CID_DV_TX_HOTPLUG (bitmask)`` + Many connectors have a hotplug pin which is high if EDID information + is available from the source. This control shows the state of the + hotplug pin as seen by the transmitter. Each bit corresponds to an + output pad on the transmitter. If an output pad does not have an + associated hotplug pin, then the bit for that pad will be 0. This + read-only control is applicable to DVI-D, HDMI and DisplayPort + connectors. + +``V4L2_CID_DV_TX_RXSENSE (bitmask)`` + Rx Sense is the detection of pull-ups on the TMDS clock lines. This + normally means that the sink has left/entered standby (i.e. the + transmitter can sense that the receiver is ready to receive video). + Each bit corresponds to an output pad on the transmitter. If an + output pad does not have an associated Rx Sense, then the bit for + that pad will be 0. This read-only control is applicable to DVI-D + and HDMI devices. + +``V4L2_CID_DV_TX_EDID_PRESENT (bitmask)`` + When the transmitter sees the hotplug signal from the receiver it + will attempt to read the EDID. If set, then the transmitter has read + at least the first block (= 128 bytes). Each bit corresponds to an + output pad on the transmitter. If an output pad does not support + EDIDs, then the bit for that pad will be 0. This read-only control + is applicable to VGA, DVI-A/D, HDMI and DisplayPort connectors. + +``V4L2_CID_DV_TX_MODE`` + (enum) + +enum v4l2_dv_tx_mode - + HDMI transmitters can transmit in DVI-D mode (just video) or in HDMI + mode (video + audio + auxiliary data). This control selects which + mode to use: V4L2_DV_TX_MODE_DVI_D or V4L2_DV_TX_MODE_HDMI. + This control is applicable to HDMI connectors. + +``V4L2_CID_DV_TX_RGB_RANGE`` + (enum) + +enum v4l2_dv_rgb_range - + Select the quantization range for RGB output. V4L2_DV_RANGE_AUTO + follows the RGB quantization range specified in the standard for the + video interface (ie. :ref:`cea861` for HDMI). + V4L2_DV_RANGE_LIMITED and V4L2_DV_RANGE_FULL override the + standard to be compatible with sinks that have not implemented the + standard correctly (unfortunately quite common for HDMI and DVI-D). + Full range allows all possible values to be used whereas limited + range sets the range to (16 << (N-8)) - (235 << (N-8)) where N is + the number of bits per component. This control is applicable to VGA, + DVI-A/D, HDMI and DisplayPort connectors. + +``V4L2_CID_DV_TX_IT_CONTENT_TYPE`` + (enum) + +enum v4l2_dv_it_content_type - + Configures the IT Content Type of the transmitted video. This + information is sent over HDMI and DisplayPort connectors as part of + the AVI InfoFrame. The term 'IT Content' is used for content that + originates from a computer as opposed to content from a TV broadcast + or an analog source. The enum v4l2_dv_it_content_type defines + the possible content types: + +.. tabularcolumns:: |p{7.0cm}|p{10.5cm}| + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_DV_IT_CONTENT_TYPE_GRAPHICS`` + - Graphics content. Pixel data should be passed unfiltered and + without analog reconstruction. + * - ``V4L2_DV_IT_CONTENT_TYPE_PHOTO`` + - Photo content. The content is derived from digital still pictures. + The content should be passed through with minimal scaling and + picture enhancements. + * - ``V4L2_DV_IT_CONTENT_TYPE_CINEMA`` + - Cinema content. + * - ``V4L2_DV_IT_CONTENT_TYPE_GAME`` + - Game content. Audio and video latency should be minimized. + * - ``V4L2_DV_IT_CONTENT_TYPE_NO_ITC`` + - No IT Content information is available and the ITC bit in the AVI + InfoFrame is set to 0. + + + +``V4L2_CID_DV_RX_POWER_PRESENT (bitmask)`` + Detects whether the receiver receives power from the source (e.g. + HDMI carries 5V on one of the pins). This is often used to power an + eeprom which contains EDID information, such that the source can + read the EDID even if the sink is in standby/power off. Each bit + corresponds to an input pad on the receiver. If an input pad + cannot detect whether power is present, then the bit for that pad + will be 0. This read-only control is applicable to DVI-D, HDMI and + DisplayPort connectors. + +``V4L2_CID_DV_RX_RGB_RANGE`` + (enum) + +enum v4l2_dv_rgb_range - + Select the quantization range for RGB input. V4L2_DV_RANGE_AUTO + follows the RGB quantization range specified in the standard for the + video interface (ie. :ref:`cea861` for HDMI). + V4L2_DV_RANGE_LIMITED and V4L2_DV_RANGE_FULL override the + standard to be compatible with sources that have not implemented the + standard correctly (unfortunately quite common for HDMI and DVI-D). + Full range allows all possible values to be used whereas limited + range sets the range to (16 << (N-8)) - (235 << (N-8)) where N is + the number of bits per component. This control is applicable to VGA, + DVI-A/D, HDMI and DisplayPort connectors. + +``V4L2_CID_DV_RX_IT_CONTENT_TYPE`` + (enum) + +enum v4l2_dv_it_content_type - + Reads the IT Content Type of the received video. This information is + sent over HDMI and DisplayPort connectors as part of the AVI + InfoFrame. The term 'IT Content' is used for content that originates + from a computer as opposed to content from a TV broadcast or an + analog source. See ``V4L2_CID_DV_TX_IT_CONTENT_TYPE`` for the + available content types. diff --git a/Documentation/media/uapi/v4l/ext-ctrls-flash.rst b/Documentation/media/uapi/v4l/ext-ctrls-flash.rst new file mode 100644 index 000000000000..5f30791c35b5 --- /dev/null +++ b/Documentation/media/uapi/v4l/ext-ctrls-flash.rst @@ -0,0 +1,192 @@ +.. Permission is granted to copy, distribute and/or modify this +.. document under the terms of the GNU Free Documentation License, +.. Version 1.1 or any later version published by the Free Software +.. Foundation, with no Invariant Sections, no Front-Cover Texts +.. and no Back-Cover Texts. A copy of the license is included at +.. Documentation/media/uapi/fdl-appendix.rst. +.. +.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections + +.. _flash-controls: + +*********************** +Flash Control Reference +*********************** + +The V4L2 flash controls are intended to provide generic access to flash +controller devices. Flash controller devices are typically used in +digital cameras. + +The interface can support both LED and xenon flash devices. As of +writing this, there is no xenon flash driver using this interface. + + +.. _flash-controls-use-cases: + +Supported use cases +=================== + + +Unsynchronised LED flash (software strobe) +------------------------------------------ + +Unsynchronised LED flash is controlled directly by the host as the +sensor. The flash must be enabled by the host before the exposure of the +image starts and disabled once it ends. The host is fully responsible +for the timing of the flash. + +Example of such device: Nokia N900. + + +Synchronised LED flash (hardware strobe) +---------------------------------------- + +The synchronised LED flash is pre-programmed by the host (power and +timeout) but controlled by the sensor through a strobe signal from the +sensor to the flash. + +The sensor controls the flash duration and timing. This information +typically must be made available to the sensor. + + +LED flash as torch +------------------ + +LED flash may be used as torch in conjunction with another use case +involving camera or individually. + + +.. _flash-control-id: + +Flash Control IDs +----------------- + +``V4L2_CID_FLASH_CLASS (class)`` + The FLASH class descriptor. + +``V4L2_CID_FLASH_LED_MODE (menu)`` + Defines the mode of the flash LED, the high-power white LED attached + to the flash controller. Setting this control may not be possible in + presence of some faults. See V4L2_CID_FLASH_FAULT. + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_FLASH_LED_MODE_NONE`` + - Off. + * - ``V4L2_FLASH_LED_MODE_FLASH`` + - Flash mode. + * - ``V4L2_FLASH_LED_MODE_TORCH`` + - Torch mode. See V4L2_CID_FLASH_TORCH_INTENSITY. + + + +``V4L2_CID_FLASH_STROBE_SOURCE (menu)`` + Defines the source of the flash LED strobe. + +.. tabularcolumns:: |p{7.0cm}|p{10.5cm}| + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_FLASH_STROBE_SOURCE_SOFTWARE`` + - The flash strobe is triggered by using the + V4L2_CID_FLASH_STROBE control. + * - ``V4L2_FLASH_STROBE_SOURCE_EXTERNAL`` + - The flash strobe is triggered by an external source. Typically + this is a sensor, which makes it possible to synchronises the + flash strobe start to exposure start. + + + +``V4L2_CID_FLASH_STROBE (button)`` + Strobe flash. Valid when V4L2_CID_FLASH_LED_MODE is set to + V4L2_FLASH_LED_MODE_FLASH and V4L2_CID_FLASH_STROBE_SOURCE + is set to V4L2_FLASH_STROBE_SOURCE_SOFTWARE. Setting this + control may not be possible in presence of some faults. See + V4L2_CID_FLASH_FAULT. + +``V4L2_CID_FLASH_STROBE_STOP (button)`` + Stop flash strobe immediately. + +``V4L2_CID_FLASH_STROBE_STATUS (boolean)`` + Strobe status: whether the flash is strobing at the moment or not. + This is a read-only control. + +``V4L2_CID_FLASH_TIMEOUT (integer)`` + Hardware timeout for flash. The flash strobe is stopped after this + period of time has passed from the start of the strobe. + +``V4L2_CID_FLASH_INTENSITY (integer)`` + Intensity of the flash strobe when the flash LED is in flash mode + (V4L2_FLASH_LED_MODE_FLASH). The unit should be milliamps (mA) + if possible. + +``V4L2_CID_FLASH_TORCH_INTENSITY (integer)`` + Intensity of the flash LED in torch mode + (V4L2_FLASH_LED_MODE_TORCH). The unit should be milliamps (mA) + if possible. Setting this control may not be possible in presence of + some faults. See V4L2_CID_FLASH_FAULT. + +``V4L2_CID_FLASH_INDICATOR_INTENSITY (integer)`` + Intensity of the indicator LED. The indicator LED may be fully + independent of the flash LED. The unit should be microamps (uA) if + possible. + +``V4L2_CID_FLASH_FAULT (bitmask)`` + Faults related to the flash. The faults tell about specific problems + in the flash chip itself or the LEDs attached to it. Faults may + prevent further use of some of the flash controls. In particular, + V4L2_CID_FLASH_LED_MODE is set to V4L2_FLASH_LED_MODE_NONE + if the fault affects the flash LED. Exactly which faults have such + an effect is chip dependent. Reading the faults resets the control + and returns the chip to a usable state if possible. + +.. tabularcolumns:: |p{8.0cm}|p{9.5cm}| + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_FLASH_FAULT_OVER_VOLTAGE`` + - Flash controller voltage to the flash LED has exceeded the limit + specific to the flash controller. + * - ``V4L2_FLASH_FAULT_TIMEOUT`` + - The flash strobe was still on when the timeout set by the user --- + V4L2_CID_FLASH_TIMEOUT control --- has expired. Not all flash + controllers may set this in all such conditions. + * - ``V4L2_FLASH_FAULT_OVER_TEMPERATURE`` + - The flash controller has overheated. + * - ``V4L2_FLASH_FAULT_SHORT_CIRCUIT`` + - The short circuit protection of the flash controller has been + triggered. + * - ``V4L2_FLASH_FAULT_OVER_CURRENT`` + - Current in the LED power supply has exceeded the limit specific to + the flash controller. + * - ``V4L2_FLASH_FAULT_INDICATOR`` + - The flash controller has detected a short or open circuit + condition on the indicator LED. + * - ``V4L2_FLASH_FAULT_UNDER_VOLTAGE`` + - Flash controller voltage to the flash LED has been below the + minimum limit specific to the flash controller. + * - ``V4L2_FLASH_FAULT_INPUT_VOLTAGE`` + - The input voltage of the flash controller is below the limit under + which strobing the flash at full current will not be possible.The + condition persists until this flag is no longer set. + * - ``V4L2_FLASH_FAULT_LED_OVER_TEMPERATURE`` + - The temperature of the LED has exceeded its allowed upper limit. + + + +``V4L2_CID_FLASH_CHARGE (boolean)`` + Enable or disable charging of the xenon flash capacitor. + +``V4L2_CID_FLASH_READY (boolean)`` + Is the flash ready to strobe? Xenon flashes require their capacitors + charged before strobing. LED flashes often require a cooldown period + after strobe during which another strobe will not be possible. This + is a read-only control. diff --git a/Documentation/media/uapi/v4l/ext-ctrls-fm-rx.rst b/Documentation/media/uapi/v4l/ext-ctrls-fm-rx.rst new file mode 100644 index 000000000000..3ed6dd7f586d --- /dev/null +++ b/Documentation/media/uapi/v4l/ext-ctrls-fm-rx.rst @@ -0,0 +1,95 @@ +.. Permission is granted to copy, distribute and/or modify this +.. document under the terms of the GNU Free Documentation License, +.. Version 1.1 or any later version published by the Free Software +.. Foundation, with no Invariant Sections, no Front-Cover Texts +.. and no Back-Cover Texts. A copy of the license is included at +.. Documentation/media/uapi/fdl-appendix.rst. +.. +.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections + +.. _fm-rx-controls: + +***************************** +FM Receiver Control Reference +***************************** + +The FM Receiver (FM_RX) class includes controls for common features of +FM Reception capable devices. + + +.. _fm-rx-control-id: + +FM_RX Control IDs +================= + +``V4L2_CID_FM_RX_CLASS (class)`` + The FM_RX class descriptor. Calling + :ref:`VIDIOC_QUERYCTRL` for this control will + return a description of this control class. + +``V4L2_CID_RDS_RECEPTION (boolean)`` + Enables/disables RDS reception by the radio tuner + +``V4L2_CID_RDS_RX_PTY (integer)`` + Gets RDS Programme Type field. This encodes up to 31 pre-defined + programme types. + +``V4L2_CID_RDS_RX_PS_NAME (string)`` + Gets the Programme Service name (PS_NAME). It is intended for + static display on a receiver. It is the primary aid to listeners in + programme service identification and selection. In Annex E of + :ref:`iec62106`, the RDS specification, there is a full + description of the correct character encoding for Programme Service + name strings. Also from RDS specification, PS is usually a single + eight character text. However, it is also possible to find receivers + which can scroll strings sized as 8 x N characters. So, this control + must be configured with steps of 8 characters. The result is it must + always contain a string with size multiple of 8. + +``V4L2_CID_RDS_RX_RADIO_TEXT (string)`` + Gets the Radio Text info. It is a textual description of what is + being broadcasted. RDS Radio Text can be applied when broadcaster + wishes to transmit longer PS names, programme-related information or + any other text. In these cases, RadioText can be used in addition to + ``V4L2_CID_RDS_RX_PS_NAME``. The encoding for Radio Text strings is + also fully described in Annex E of :ref:`iec62106`. The length of + Radio Text strings depends on which RDS Block is being used to + transmit it, either 32 (2A block) or 64 (2B block). However, it is + also possible to find receivers which can scroll strings sized as 32 + x N or 64 x N characters. So, this control must be configured with + steps of 32 or 64 characters. The result is it must always contain a + string with size multiple of 32 or 64. + +``V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT (boolean)`` + If set, then a traffic announcement is in progress. + +``V4L2_CID_RDS_RX_TRAFFIC_PROGRAM (boolean)`` + If set, then the tuned programme carries traffic announcements. + +``V4L2_CID_RDS_RX_MUSIC_SPEECH (boolean)`` + If set, then this channel broadcasts music. If cleared, then it + broadcasts speech. If the transmitter doesn't make this distinction, + then it will be set. + +``V4L2_CID_TUNE_DEEMPHASIS`` + (enum) + +enum v4l2_deemphasis - + Configures the de-emphasis value for reception. A de-emphasis filter + is applied to the broadcast to accentuate the high audio + frequencies. Depending on the region, a time constant of either 50 + or 75 useconds is used. The enum v4l2_deemphasis defines possible + values for de-emphasis. Here they are: + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_DEEMPHASIS_DISABLED`` + - No de-emphasis is applied. + * - ``V4L2_DEEMPHASIS_50_uS`` + - A de-emphasis of 50 uS is used. + * - ``V4L2_DEEMPHASIS_75_uS`` + - A de-emphasis of 75 uS is used. diff --git a/Documentation/media/uapi/v4l/ext-ctrls-fm-tx.rst b/Documentation/media/uapi/v4l/ext-ctrls-fm-tx.rst new file mode 100644 index 000000000000..db88346d99fd --- /dev/null +++ b/Documentation/media/uapi/v4l/ext-ctrls-fm-tx.rst @@ -0,0 +1,188 @@ +.. Permission is granted to copy, distribute and/or modify this +.. document under the terms of the GNU Free Documentation License, +.. Version 1.1 or any later version published by the Free Software +.. Foundation, with no Invariant Sections, no Front-Cover Texts +.. and no Back-Cover Texts. A copy of the license is included at +.. Documentation/media/uapi/fdl-appendix.rst. +.. +.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections + +.. _fm-tx-controls: + +******************************** +FM Transmitter Control Reference +******************************** + +The FM Transmitter (FM_TX) class includes controls for common features +of FM transmissions capable devices. Currently this class includes +parameters for audio compression, pilot tone generation, audio deviation +limiter, RDS transmission and tuning power features. + + +.. _fm-tx-control-id: + +FM_TX Control IDs +================= + +``V4L2_CID_FM_TX_CLASS (class)`` + The FM_TX class descriptor. Calling + :ref:`VIDIOC_QUERYCTRL` for this control will + return a description of this control class. + +``V4L2_CID_RDS_TX_DEVIATION (integer)`` + Configures RDS signal frequency deviation level in Hz. The range and + step are driver-specific. + +``V4L2_CID_RDS_TX_PI (integer)`` + Sets the RDS Programme Identification field for transmission. + +``V4L2_CID_RDS_TX_PTY (integer)`` + Sets the RDS Programme Type field for transmission. This encodes up + to 31 pre-defined programme types. + +``V4L2_CID_RDS_TX_PS_NAME (string)`` + Sets the Programme Service name (PS_NAME) for transmission. It is + intended for static display on a receiver. It is the primary aid to + listeners in programme service identification and selection. In + Annex E of :ref:`iec62106`, the RDS specification, there is a full + description of the correct character encoding for Programme Service + name strings. Also from RDS specification, PS is usually a single + eight character text. However, it is also possible to find receivers + which can scroll strings sized as 8 x N characters. So, this control + must be configured with steps of 8 characters. The result is it must + always contain a string with size multiple of 8. + +``V4L2_CID_RDS_TX_RADIO_TEXT (string)`` + Sets the Radio Text info for transmission. It is a textual + description of what is being broadcasted. RDS Radio Text can be + applied when broadcaster wishes to transmit longer PS names, + programme-related information or any other text. In these cases, + RadioText should be used in addition to ``V4L2_CID_RDS_TX_PS_NAME``. + The encoding for Radio Text strings is also fully described in Annex + E of :ref:`iec62106`. The length of Radio Text strings depends on + which RDS Block is being used to transmit it, either 32 (2A block) + or 64 (2B block). However, it is also possible to find receivers + which can scroll strings sized as 32 x N or 64 x N characters. So, + this control must be configured with steps of 32 or 64 characters. + The result is it must always contain a string with size multiple of + 32 or 64. + +``V4L2_CID_RDS_TX_MONO_STEREO (boolean)`` + Sets the Mono/Stereo bit of the Decoder Identification code. If set, + then the audio was recorded as stereo. + +``V4L2_CID_RDS_TX_ARTIFICIAL_HEAD (boolean)`` + Sets the + `Artificial Head `__ + bit of the Decoder Identification code. If set, then the audio was + recorded using an artificial head. + +``V4L2_CID_RDS_TX_COMPRESSED (boolean)`` + Sets the Compressed bit of the Decoder Identification code. If set, + then the audio is compressed. + +``V4L2_CID_RDS_TX_DYNAMIC_PTY (boolean)`` + Sets the Dynamic PTY bit of the Decoder Identification code. If set, + then the PTY code is dynamically switched. + +``V4L2_CID_RDS_TX_TRAFFIC_ANNOUNCEMENT (boolean)`` + If set, then a traffic announcement is in progress. + +``V4L2_CID_RDS_TX_TRAFFIC_PROGRAM (boolean)`` + If set, then the tuned programme carries traffic announcements. + +``V4L2_CID_RDS_TX_MUSIC_SPEECH (boolean)`` + If set, then this channel broadcasts music. If cleared, then it + broadcasts speech. If the transmitter doesn't make this distinction, + then it should be set. + +``V4L2_CID_RDS_TX_ALT_FREQS_ENABLE (boolean)`` + If set, then transmit alternate frequencies. + +``V4L2_CID_RDS_TX_ALT_FREQS (__u32 array)`` + The alternate frequencies in kHz units. The RDS standard allows for + up to 25 frequencies to be defined. Drivers may support fewer + frequencies so check the array size. + +``V4L2_CID_AUDIO_LIMITER_ENABLED (boolean)`` + Enables or disables the audio deviation limiter feature. The limiter + is useful when trying to maximize the audio volume, minimize + receiver-generated distortion and prevent overmodulation. + +``V4L2_CID_AUDIO_LIMITER_RELEASE_TIME (integer)`` + Sets the audio deviation limiter feature release time. Unit is in + useconds. Step and range are driver-specific. + +``V4L2_CID_AUDIO_LIMITER_DEVIATION (integer)`` + Configures audio frequency deviation level in Hz. The range and step + are driver-specific. + +``V4L2_CID_AUDIO_COMPRESSION_ENABLED (boolean)`` + Enables or disables the audio compression feature. This feature + amplifies signals below the threshold by a fixed gain and compresses + audio signals above the threshold by the ratio of Threshold/(Gain + + Threshold). + +``V4L2_CID_AUDIO_COMPRESSION_GAIN (integer)`` + Sets the gain for audio compression feature. It is a dB value. The + range and step are driver-specific. + +``V4L2_CID_AUDIO_COMPRESSION_THRESHOLD (integer)`` + Sets the threshold level for audio compression freature. It is a dB + value. The range and step are driver-specific. + +``V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME (integer)`` + Sets the attack time for audio compression feature. It is a useconds + value. The range and step are driver-specific. + +``V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME (integer)`` + Sets the release time for audio compression feature. It is a + useconds value. The range and step are driver-specific. + +``V4L2_CID_PILOT_TONE_ENABLED (boolean)`` + Enables or disables the pilot tone generation feature. + +``V4L2_CID_PILOT_TONE_DEVIATION (integer)`` + Configures pilot tone frequency deviation level. Unit is in Hz. The + range and step are driver-specific. + +``V4L2_CID_PILOT_TONE_FREQUENCY (integer)`` + Configures pilot tone frequency value. Unit is in Hz. The range and + step are driver-specific. + +``V4L2_CID_TUNE_PREEMPHASIS`` + (enum) + +enum v4l2_preemphasis - + Configures the pre-emphasis value for broadcasting. A pre-emphasis + filter is applied to the broadcast to accentuate the high audio + frequencies. Depending on the region, a time constant of either 50 + or 75 useconds is used. The enum v4l2_preemphasis defines possible + values for pre-emphasis. Here they are: + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_PREEMPHASIS_DISABLED`` + - No pre-emphasis is applied. + * - ``V4L2_PREEMPHASIS_50_uS`` + - A pre-emphasis of 50 uS is used. + * - ``V4L2_PREEMPHASIS_75_uS`` + - A pre-emphasis of 75 uS is used. + + + +``V4L2_CID_TUNE_POWER_LEVEL (integer)`` + Sets the output power level for signal transmission. Unit is in + dBuV. Range and step are driver-specific. + +``V4L2_CID_TUNE_ANTENNA_CAPACITOR (integer)`` + This selects the value of antenna tuning capacitor manually or + automatically if set to zero. Unit, range and step are + driver-specific. + +For more details about RDS specification, refer to :ref:`iec62106` +document, from CENELEC. diff --git a/Documentation/media/uapi/v4l/ext-ctrls-image-process.rst b/Documentation/media/uapi/v4l/ext-ctrls-image-process.rst new file mode 100644 index 000000000000..22fc2d3e433d --- /dev/null +++ b/Documentation/media/uapi/v4l/ext-ctrls-image-process.rst @@ -0,0 +1,63 @@ +.. Permission is granted to copy, distribute and/or modify this +.. document under the terms of the GNU Free Documentation License, +.. Version 1.1 or any later version published by the Free Software +.. Foundation, with no Invariant Sections, no Front-Cover Texts +.. and no Back-Cover Texts. A copy of the license is included at +.. Documentation/media/uapi/fdl-appendix.rst. +.. +.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections + +.. _image-process-controls: + +******************************* +Image Process Control Reference +******************************* + +The Image Process control class is intended for low-level control of +image processing functions. Unlike ``V4L2_CID_IMAGE_SOURCE_CLASS``, the +controls in this class affect processing the image, and do not control +capturing of it. + + +.. _image-process-control-id: + +Image Process Control IDs +========================= + +``V4L2_CID_IMAGE_PROC_CLASS (class)`` + The IMAGE_PROC class descriptor. + +``V4L2_CID_LINK_FREQ (integer menu)`` + Data bus frequency. Together with the media bus pixel code, bus type + (clock cycles per sample), the data bus frequency defines the pixel + rate (``V4L2_CID_PIXEL_RATE``) in the pixel array (or possibly + elsewhere, if the device is not an image sensor). The frame rate can + be calculated from the pixel clock, image width and height and + horizontal and vertical blanking. While the pixel rate control may + be defined elsewhere than in the subdev containing the pixel array, + the frame rate cannot be obtained from that information. This is + because only on the pixel array it can be assumed that the vertical + and horizontal blanking information is exact: no other blanking is + allowed in the pixel array. The selection of frame rate is performed + by selecting the desired horizontal and vertical blanking. The unit + of this control is Hz. + +``V4L2_CID_PIXEL_RATE (64-bit integer)`` + Pixel rate in the source pads of the subdev. This control is + read-only and its unit is pixels / second. + +``V4L2_CID_TEST_PATTERN (menu)`` + Some capture/display/sensor devices have the capability to generate + test pattern images. These hardware specific test patterns can be + used to test if a device is working properly. + +``V4L2_CID_DEINTERLACING_MODE (menu)`` + The video deinterlacing mode (such as Bob, Weave, ...). The menu items are + driver specific and are documented in :ref:`v4l-drivers`. + +``V4L2_CID_DIGITAL_GAIN (integer)`` + Digital gain is the value by which all colour components + are multiplied by. Typically the digital gain applied is the + control value divided by e.g. 0x100, meaning that to get no + digital gain the control value needs to be 0x100. The no-gain + configuration is also typically the default. diff --git a/Documentation/media/uapi/v4l/ext-ctrls-image-source.rst b/Documentation/media/uapi/v4l/ext-ctrls-image-source.rst new file mode 100644 index 000000000000..2c3ab5796d76 --- /dev/null +++ b/Documentation/media/uapi/v4l/ext-ctrls-image-source.rst @@ -0,0 +1,57 @@ +.. Permission is granted to copy, distribute and/or modify this +.. document under the terms of the GNU Free Documentation License, +.. Version 1.1 or any later version published by the Free Software +.. Foundation, with no Invariant Sections, no Front-Cover Texts +.. and no Back-Cover Texts. A copy of the license is included at +.. Documentation/media/uapi/fdl-appendix.rst. +.. +.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections + +.. _image-source-controls: + +****************************** +Image Source Control Reference +****************************** + +The Image Source control class is intended for low-level control of +image source devices such as image sensors. The devices feature an +analogue to digital converter and a bus transmitter to transmit the +image data out of the device. + + +.. _image-source-control-id: + +Image Source Control IDs +======================== + +``V4L2_CID_IMAGE_SOURCE_CLASS (class)`` + The IMAGE_SOURCE class descriptor. + +``V4L2_CID_VBLANK (integer)`` + Vertical blanking. The idle period after every frame during which no + image data is produced. The unit of vertical blanking is a line. + Every line has length of the image width plus horizontal blanking at + the pixel rate defined by ``V4L2_CID_PIXEL_RATE`` control in the + same sub-device. + +``V4L2_CID_HBLANK (integer)`` + Horizontal blanking. The idle period after every line of image data + during which no image data is produced. The unit of horizontal + blanking is pixels. + +``V4L2_CID_ANALOGUE_GAIN (integer)`` + Analogue gain is gain affecting all colour components in the pixel + matrix. The gain operation is performed in the analogue domain + before A/D conversion. + +``V4L2_CID_TEST_PATTERN_RED (integer)`` + Test pattern red colour component. + +``V4L2_CID_TEST_PATTERN_GREENR (integer)`` + Test pattern green (next to red) colour component. + +``V4L2_CID_TEST_PATTERN_BLUE (integer)`` + Test pattern blue colour component. + +``V4L2_CID_TEST_PATTERN_GREENB (integer)`` + Test pattern green (next to blue) colour component. diff --git a/Documentation/media/uapi/v4l/ext-ctrls-jpeg.rst b/Documentation/media/uapi/v4l/ext-ctrls-jpeg.rst new file mode 100644 index 000000000000..cf9cd8a9f9b4 --- /dev/null +++ b/Documentation/media/uapi/v4l/ext-ctrls-jpeg.rst @@ -0,0 +1,113 @@ +.. Permission is granted to copy, distribute and/or modify this +.. document under the terms of the GNU Free Documentation License, +.. Version 1.1 or any later version published by the Free Software +.. Foundation, with no Invariant Sections, no Front-Cover Texts +.. and no Back-Cover Texts. A copy of the license is included at +.. Documentation/media/uapi/fdl-appendix.rst. +.. +.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections + +.. _jpeg-controls: + +********************** +JPEG Control Reference +********************** + +The JPEG class includes controls for common features of JPEG encoders +and decoders. Currently it includes features for codecs implementing +progressive baseline DCT compression process with Huffman entrophy +coding. + + +.. _jpeg-control-id: + +JPEG Control IDs +================ + +``V4L2_CID_JPEG_CLASS (class)`` + The JPEG class descriptor. Calling + :ref:`VIDIOC_QUERYCTRL` for this control will + return a description of this control class. + +``V4L2_CID_JPEG_CHROMA_SUBSAMPLING (menu)`` + The chroma subsampling factors describe how each component of an + input image is sampled, in respect to maximum sample rate in each + spatial dimension. See :ref:`itu-t81`, clause A.1.1. for more + details. The ``V4L2_CID_JPEG_CHROMA_SUBSAMPLING`` control determines + how Cb and Cr components are downsampled after converting an input + image from RGB to Y'CbCr color space. + +.. tabularcolumns:: |p{7.0cm}|p{10.5cm}| + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_JPEG_CHROMA_SUBSAMPLING_444`` + - No chroma subsampling, each pixel has Y, Cr and Cb values. + * - ``V4L2_JPEG_CHROMA_SUBSAMPLING_422`` + - Horizontally subsample Cr, Cb components by a factor of 2. + * - ``V4L2_JPEG_CHROMA_SUBSAMPLING_420`` + - Subsample Cr, Cb components horizontally and vertically by 2. + * - ``V4L2_JPEG_CHROMA_SUBSAMPLING_411`` + - Horizontally subsample Cr, Cb components by a factor of 4. + * - ``V4L2_JPEG_CHROMA_SUBSAMPLING_410`` + - Subsample Cr, Cb components horizontally by 4 and vertically by 2. + * - ``V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY`` + - Use only luminance component. + + + +``V4L2_CID_JPEG_RESTART_INTERVAL (integer)`` + The restart interval determines an interval of inserting RSTm + markers (m = 0..7). The purpose of these markers is to additionally + reinitialize the encoder process, in order to process blocks of an + image independently. For the lossy compression processes the restart + interval unit is MCU (Minimum Coded Unit) and its value is contained + in DRI (Define Restart Interval) marker. If + ``V4L2_CID_JPEG_RESTART_INTERVAL`` control is set to 0, DRI and RSTm + markers will not be inserted. + +.. _jpeg-quality-control: + +``V4L2_CID_JPEG_COMPRESSION_QUALITY (integer)`` + ``V4L2_CID_JPEG_COMPRESSION_QUALITY`` control determines trade-off + between image quality and size. It provides simpler method for + applications to control image quality, without a need for direct + reconfiguration of luminance and chrominance quantization tables. In + cases where a driver uses quantization tables configured directly by + an application, using interfaces defined elsewhere, + ``V4L2_CID_JPEG_COMPRESSION_QUALITY`` control should be set by + driver to 0. + + The value range of this control is driver-specific. Only positive, + non-zero values are meaningful. The recommended range is 1 - 100, + where larger values correspond to better image quality. + +.. _jpeg-active-marker-control: + +``V4L2_CID_JPEG_ACTIVE_MARKER (bitmask)`` + Specify which JPEG markers are included in compressed stream. This + control is valid only for encoders. + + + +.. flat-table:: + :header-rows: 0 + :stub-columns: 0 + + * - ``V4L2_JPEG_ACTIVE_MARKER_APP0`` + - Application data segment APP\ :sub:`0`. + * - ``V4L2_JPEG_ACTIVE_MARKER_APP1`` + - Application data segment APP\ :sub:`1`. + * - ``V4L2_JPEG_ACTIVE_MARKER_COM`` + - Comment segment. + * - ``V4L2_JPEG_ACTIVE_MARKER_DQT`` + - Quantization tables segment. + * - ``V4L2_JPEG_ACTIVE_MARKER_DHT`` + - Huffman tables segment. + + + +For more details about JPEG specification, refer to :ref:`itu-t81`, +:ref:`jfif`, :ref:`w3c-jpeg-jfif`. diff --git a/Documentation/media/uapi/v4l/ext-ctrls-rf-tuner.rst b/Documentation/media/uapi/v4l/ext-ctrls-rf-tuner.rst new file mode 100644 index 000000000000..0fb85ba878dd --- /dev/null +++ b/Documentation/media/uapi/v4l/ext-ctrls-rf-tuner.rst @@ -0,0 +1,96 @@ +.. Permission is granted to copy, distribute and/or modify this +.. document under the terms of the GNU Free Documentation License, +.. Version 1.1 or any later version published by the Free Software +.. Foundation, with no Invariant Sections, no Front-Cover Texts +.. and no Back-Cover Texts. A copy of the license is included at +.. Documentation/media/uapi/fdl-appendix.rst. +.. +.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections + +.. _rf-tuner-controls: + +************************** +RF Tuner Control Reference +************************** + +The RF Tuner (RF_TUNER) class includes controls for common features of +devices having RF tuner. + +In this context, RF tuner is radio receiver circuit between antenna and +demodulator. It receives radio frequency (RF) from the antenna and +converts that received signal to lower intermediate frequency (IF) or +baseband frequency (BB). Tuners that could do baseband output are often +called Zero-IF tuners. Older tuners were typically simple PLL tuners +inside a metal box, while newer ones are highly integrated chips +without a metal box "silicon tuners". These controls are mostly +applicable for new feature rich silicon tuners, just because older +tuners does not have much adjustable features. + +For more information about RF tuners see +`Tuner (radio) `__ +and `RF front end `__ +from Wikipedia. + + +.. _rf-tuner-control-id: + +RF_TUNER Control IDs +==================== + +``V4L2_CID_RF_TUNER_CLASS (class)`` + The RF_TUNER class descriptor. Calling + :ref:`VIDIOC_QUERYCTRL` for this control will + return a description of this control class. + +``V4L2_CID_RF_TUNER_BANDWIDTH_AUTO (boolean)`` + Enables/disables tuner radio channel bandwidth configuration. In + automatic mode bandwidth configuration is performed by the driver. + +``V4L2_CID_RF_TUNER_BANDWIDTH (integer)`` + Filter(s) on tuner signal path are used to filter signal according + to receiving party needs. Driver configures filters to fulfill + desired bandwidth requirement. Used when + V4L2_CID_RF_TUNER_BANDWIDTH_AUTO is not set. Unit is in Hz. The + range and step are driver-specific. + +``V4L2_CID_RF_TUNER_LNA_GAIN_AUTO (boolean)`` + Enables/disables LNA automatic gain control (AGC) + +``V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO (boolean)`` + Enables/disables mixer automatic gain control (AGC) + +``V4L2_CID_RF_TUNER_IF_GAIN_AUTO (boolean)`` + Enables/disables IF automatic gain control (AGC) + +``V4L2_CID_RF_TUNER_RF_GAIN (integer)`` + The RF amplifier is the very first amplifier on the receiver signal + path, just right after the antenna input. The difference between the + LNA gain and the RF gain in this document is that the LNA gain is + integrated in the tuner chip while the RF gain is a separate chip. + There may be both RF and LNA gain controls in the same device. The + range and step are driver-specific. + +``V4L2_CID_RF_TUNER_LNA_GAIN (integer)`` + LNA (low noise amplifier) gain is first gain stage on the RF tuner + signal path. It is located very close to tuner antenna input. Used + when ``V4L2_CID_RF_TUNER_LNA_GAIN_AUTO`` is not set. See + ``V4L2_CID_RF_TUNER_RF_GAIN`` to understand how RF gain and LNA gain + differs from the each others. The range and step are + driver-specific. + +``V4L2_CID_RF_TUNER_MIXER_GAIN (integer)`` + Mixer gain is second gain stage on the RF tuner signal path. It is + located inside mixer block, where RF signal is down-converted by the + mixer. Used when ``V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO`` is not set. + The range and step are driver-specific. + +``V4L2_CID_RF_TUNER_IF_GAIN (integer)`` + IF gain is last gain stage on the RF tuner signal path. It is + located on output of RF tuner. It controls signal level of + intermediate frequency output or baseband output. Used when + ``V4L2_CID_RF_TUNER_IF_GAIN_AUTO`` is not set. The range and step + are driver-specific. + +``V4L2_CID_RF_TUNER_PLL_LOCK (boolean)`` + Is synthesizer PLL locked? RF tuner is receiving given frequency + when that control is set. This is a read-only control. diff --git a/Documentation/media/uapi/v4l/extended-controls.rst b/Documentation/media/uapi/v4l/extended-controls.rst index 286a2dd7ec36..24274b398e63 100644 --- a/Documentation/media/uapi/v4l/extended-controls.rst +++ b/Documentation/media/uapi/v4l/extended-controls.rst @@ -9,9 +9,9 @@ .. _extended-controls: -***************** -Extended Controls -***************** +********************* +Extended Controls API +********************* Introduction @@ -181,3902 +181,3 @@ The flags field of struct :ref:`v4l2_queryctrl ` also contains hints on the behavior of the control. See the :ref:`VIDIOC_QUERYCTRL` documentation for more details. - - -.. _mpeg-controls: - -Codec Control Reference -======================= - -Below all controls within the Codec control class are described. First -the generic controls, then controls specific for certain hardware. - -.. note:: - - These controls are applicable to all codecs and not just MPEG. The - defines are prefixed with V4L2_CID_MPEG/V4L2_MPEG as the controls - were originally made for MPEG codecs and later extended to cover all - encoding formats. - - -Generic Codec Controls ----------------------- - - -.. _mpeg-control-id: - -Codec Control IDs -^^^^^^^^^^^^^^^^^ - -``V4L2_CID_MPEG_CLASS (class)`` - The Codec class descriptor. Calling - :ref:`VIDIOC_QUERYCTRL` for this control will - return a description of this control class. This description can be - used as the caption of a Tab page in a GUI, for example. - -.. _v4l2-mpeg-stream-type: - -``V4L2_CID_MPEG_STREAM_TYPE`` - (enum) - -enum v4l2_mpeg_stream_type - - The MPEG-1, -2 or -4 output stream type. One cannot assume anything - here. Each hardware MPEG encoder tends to support different subsets - of the available MPEG stream types. This control is specific to - multiplexed MPEG streams. The currently defined stream types are: - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_STREAM_TYPE_MPEG2_PS`` - - MPEG-2 program stream - * - ``V4L2_MPEG_STREAM_TYPE_MPEG2_TS`` - - MPEG-2 transport stream - * - ``V4L2_MPEG_STREAM_TYPE_MPEG1_SS`` - - MPEG-1 system stream - * - ``V4L2_MPEG_STREAM_TYPE_MPEG2_DVD`` - - MPEG-2 DVD-compatible stream - * - ``V4L2_MPEG_STREAM_TYPE_MPEG1_VCD`` - - MPEG-1 VCD-compatible stream - * - ``V4L2_MPEG_STREAM_TYPE_MPEG2_SVCD`` - - MPEG-2 SVCD-compatible stream - - - -``V4L2_CID_MPEG_STREAM_PID_PMT (integer)`` - Program Map Table Packet ID for the MPEG transport stream (default - 16) - -``V4L2_CID_MPEG_STREAM_PID_AUDIO (integer)`` - Audio Packet ID for the MPEG transport stream (default 256) - -``V4L2_CID_MPEG_STREAM_PID_VIDEO (integer)`` - Video Packet ID for the MPEG transport stream (default 260) - -``V4L2_CID_MPEG_STREAM_PID_PCR (integer)`` - Packet ID for the MPEG transport stream carrying PCR fields (default - 259) - -``V4L2_CID_MPEG_STREAM_PES_ID_AUDIO (integer)`` - Audio ID for MPEG PES - -``V4L2_CID_MPEG_STREAM_PES_ID_VIDEO (integer)`` - Video ID for MPEG PES - -.. _v4l2-mpeg-stream-vbi-fmt: - -``V4L2_CID_MPEG_STREAM_VBI_FMT`` - (enum) - -enum v4l2_mpeg_stream_vbi_fmt - - Some cards can embed VBI data (e. g. Closed Caption, Teletext) into - the MPEG stream. This control selects whether VBI data should be - embedded, and if so, what embedding method should be used. The list - of possible VBI formats depends on the driver. The currently defined - VBI format types are: - - - -.. tabularcolumns:: |p{6 cm}|p{11.5cm}| - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_STREAM_VBI_FMT_NONE`` - - No VBI in the MPEG stream - * - ``V4L2_MPEG_STREAM_VBI_FMT_IVTV`` - - VBI in private packets, IVTV format (documented in the kernel - sources in the file - ``Documentation/media/v4l-drivers/cx2341x.rst``) - - - -.. _v4l2-mpeg-audio-sampling-freq: - -``V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ`` - (enum) - -enum v4l2_mpeg_audio_sampling_freq - - MPEG Audio sampling frequency. Possible values are: - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_AUDIO_SAMPLING_FREQ_44100`` - - 44.1 kHz - * - ``V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000`` - - 48 kHz - * - ``V4L2_MPEG_AUDIO_SAMPLING_FREQ_32000`` - - 32 kHz - - - -.. _v4l2-mpeg-audio-encoding: - -``V4L2_CID_MPEG_AUDIO_ENCODING`` - (enum) - -enum v4l2_mpeg_audio_encoding - - MPEG Audio encoding. This control is specific to multiplexed MPEG - streams. Possible values are: - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_AUDIO_ENCODING_LAYER_1`` - - MPEG-1/2 Layer I encoding - * - ``V4L2_MPEG_AUDIO_ENCODING_LAYER_2`` - - MPEG-1/2 Layer II encoding - * - ``V4L2_MPEG_AUDIO_ENCODING_LAYER_3`` - - MPEG-1/2 Layer III encoding - * - ``V4L2_MPEG_AUDIO_ENCODING_AAC`` - - MPEG-2/4 AAC (Advanced Audio Coding) - * - ``V4L2_MPEG_AUDIO_ENCODING_AC3`` - - AC-3 aka ATSC A/52 encoding - - - -.. _v4l2-mpeg-audio-l1-bitrate: - -``V4L2_CID_MPEG_AUDIO_L1_BITRATE`` - (enum) - -enum v4l2_mpeg_audio_l1_bitrate - - MPEG-1/2 Layer I bitrate. Possible values are: - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_AUDIO_L1_BITRATE_32K`` - - 32 kbit/s - * - ``V4L2_MPEG_AUDIO_L1_BITRATE_64K`` - - 64 kbit/s - * - ``V4L2_MPEG_AUDIO_L1_BITRATE_96K`` - - 96 kbit/s - * - ``V4L2_MPEG_AUDIO_L1_BITRATE_128K`` - - 128 kbit/s - * - ``V4L2_MPEG_AUDIO_L1_BITRATE_160K`` - - 160 kbit/s - * - ``V4L2_MPEG_AUDIO_L1_BITRATE_192K`` - - 192 kbit/s - * - ``V4L2_MPEG_AUDIO_L1_BITRATE_224K`` - - 224 kbit/s - * - ``V4L2_MPEG_AUDIO_L1_BITRATE_256K`` - - 256 kbit/s - * - ``V4L2_MPEG_AUDIO_L1_BITRATE_288K`` - - 288 kbit/s - * - ``V4L2_MPEG_AUDIO_L1_BITRATE_320K`` - - 320 kbit/s - * - ``V4L2_MPEG_AUDIO_L1_BITRATE_352K`` - - 352 kbit/s - * - ``V4L2_MPEG_AUDIO_L1_BITRATE_384K`` - - 384 kbit/s - * - ``V4L2_MPEG_AUDIO_L1_BITRATE_416K`` - - 416 kbit/s - * - ``V4L2_MPEG_AUDIO_L1_BITRATE_448K`` - - 448 kbit/s - - - -.. _v4l2-mpeg-audio-l2-bitrate: - -``V4L2_CID_MPEG_AUDIO_L2_BITRATE`` - (enum) - -enum v4l2_mpeg_audio_l2_bitrate - - MPEG-1/2 Layer II bitrate. Possible values are: - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_AUDIO_L2_BITRATE_32K`` - - 32 kbit/s - * - ``V4L2_MPEG_AUDIO_L2_BITRATE_48K`` - - 48 kbit/s - * - ``V4L2_MPEG_AUDIO_L2_BITRATE_56K`` - - 56 kbit/s - * - ``V4L2_MPEG_AUDIO_L2_BITRATE_64K`` - - 64 kbit/s - * - ``V4L2_MPEG_AUDIO_L2_BITRATE_80K`` - - 80 kbit/s - * - ``V4L2_MPEG_AUDIO_L2_BITRATE_96K`` - - 96 kbit/s - * - ``V4L2_MPEG_AUDIO_L2_BITRATE_112K`` - - 112 kbit/s - * - ``V4L2_MPEG_AUDIO_L2_BITRATE_128K`` - - 128 kbit/s - * - ``V4L2_MPEG_AUDIO_L2_BITRATE_160K`` - - 160 kbit/s - * - ``V4L2_MPEG_AUDIO_L2_BITRATE_192K`` - - 192 kbit/s - * - ``V4L2_MPEG_AUDIO_L2_BITRATE_224K`` - - 224 kbit/s - * - ``V4L2_MPEG_AUDIO_L2_BITRATE_256K`` - - 256 kbit/s - * - ``V4L2_MPEG_AUDIO_L2_BITRATE_320K`` - - 320 kbit/s - * - ``V4L2_MPEG_AUDIO_L2_BITRATE_384K`` - - 384 kbit/s - - - -.. _v4l2-mpeg-audio-l3-bitrate: - -``V4L2_CID_MPEG_AUDIO_L3_BITRATE`` - (enum) - -enum v4l2_mpeg_audio_l3_bitrate - - MPEG-1/2 Layer III bitrate. Possible values are: - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_AUDIO_L3_BITRATE_32K`` - - 32 kbit/s - * - ``V4L2_MPEG_AUDIO_L3_BITRATE_40K`` - - 40 kbit/s - * - ``V4L2_MPEG_AUDIO_L3_BITRATE_48K`` - - 48 kbit/s - * - ``V4L2_MPEG_AUDIO_L3_BITRATE_56K`` - - 56 kbit/s - * - ``V4L2_MPEG_AUDIO_L3_BITRATE_64K`` - - 64 kbit/s - * - ``V4L2_MPEG_AUDIO_L3_BITRATE_80K`` - - 80 kbit/s - * - ``V4L2_MPEG_AUDIO_L3_BITRATE_96K`` - - 96 kbit/s - * - ``V4L2_MPEG_AUDIO_L3_BITRATE_112K`` - - 112 kbit/s - * - ``V4L2_MPEG_AUDIO_L3_BITRATE_128K`` - - 128 kbit/s - * - ``V4L2_MPEG_AUDIO_L3_BITRATE_160K`` - - 160 kbit/s - * - ``V4L2_MPEG_AUDIO_L3_BITRATE_192K`` - - 192 kbit/s - * - ``V4L2_MPEG_AUDIO_L3_BITRATE_224K`` - - 224 kbit/s - * - ``V4L2_MPEG_AUDIO_L3_BITRATE_256K`` - - 256 kbit/s - * - ``V4L2_MPEG_AUDIO_L3_BITRATE_320K`` - - 320 kbit/s - - - -``V4L2_CID_MPEG_AUDIO_AAC_BITRATE (integer)`` - AAC bitrate in bits per second. - -.. _v4l2-mpeg-audio-ac3-bitrate: - -``V4L2_CID_MPEG_AUDIO_AC3_BITRATE`` - (enum) - -enum v4l2_mpeg_audio_ac3_bitrate - - AC-3 bitrate. Possible values are: - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_32K`` - - 32 kbit/s - * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_40K`` - - 40 kbit/s - * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_48K`` - - 48 kbit/s - * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_56K`` - - 56 kbit/s - * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_64K`` - - 64 kbit/s - * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_80K`` - - 80 kbit/s - * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_96K`` - - 96 kbit/s - * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_112K`` - - 112 kbit/s - * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_128K`` - - 128 kbit/s - * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_160K`` - - 160 kbit/s - * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_192K`` - - 192 kbit/s - * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_224K`` - - 224 kbit/s - * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_256K`` - - 256 kbit/s - * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_320K`` - - 320 kbit/s - * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_384K`` - - 384 kbit/s - * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_448K`` - - 448 kbit/s - * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_512K`` - - 512 kbit/s - * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_576K`` - - 576 kbit/s - * - ``V4L2_MPEG_AUDIO_AC3_BITRATE_640K`` - - 640 kbit/s - - - -.. _v4l2-mpeg-audio-mode: - -``V4L2_CID_MPEG_AUDIO_MODE`` - (enum) - -enum v4l2_mpeg_audio_mode - - MPEG Audio mode. Possible values are: - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_AUDIO_MODE_STEREO`` - - Stereo - * - ``V4L2_MPEG_AUDIO_MODE_JOINT_STEREO`` - - Joint Stereo - * - ``V4L2_MPEG_AUDIO_MODE_DUAL`` - - Bilingual - * - ``V4L2_MPEG_AUDIO_MODE_MONO`` - - Mono - - - -.. _v4l2-mpeg-audio-mode-extension: - -``V4L2_CID_MPEG_AUDIO_MODE_EXTENSION`` - (enum) - -enum v4l2_mpeg_audio_mode_extension - - Joint Stereo audio mode extension. In Layer I and II they indicate - which subbands are in intensity stereo. All other subbands are coded - in stereo. Layer III is not (yet) supported. Possible values are: - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4`` - - Subbands 4-31 in intensity stereo - * - ``V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_8`` - - Subbands 8-31 in intensity stereo - * - ``V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_12`` - - Subbands 12-31 in intensity stereo - * - ``V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_16`` - - Subbands 16-31 in intensity stereo - - - -.. _v4l2-mpeg-audio-emphasis: - -``V4L2_CID_MPEG_AUDIO_EMPHASIS`` - (enum) - -enum v4l2_mpeg_audio_emphasis - - Audio Emphasis. Possible values are: - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_AUDIO_EMPHASIS_NONE`` - - None - * - ``V4L2_MPEG_AUDIO_EMPHASIS_50_DIV_15_uS`` - - 50/15 microsecond emphasis - * - ``V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17`` - - CCITT J.17 - - - -.. _v4l2-mpeg-audio-crc: - -``V4L2_CID_MPEG_AUDIO_CRC`` - (enum) - -enum v4l2_mpeg_audio_crc - - CRC method. Possible values are: - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_AUDIO_CRC_NONE`` - - None - * - ``V4L2_MPEG_AUDIO_CRC_CRC16`` - - 16 bit parity check - - - -``V4L2_CID_MPEG_AUDIO_MUTE (boolean)`` - Mutes the audio when capturing. This is not done by muting audio - hardware, which can still produce a slight hiss, but in the encoder - itself, guaranteeing a fixed and reproducible audio bitstream. 0 = - unmuted, 1 = muted. - -.. _v4l2-mpeg-audio-dec-playback: - -``V4L2_CID_MPEG_AUDIO_DEC_PLAYBACK`` - (enum) - -enum v4l2_mpeg_audio_dec_playback - - Determines how monolingual audio should be played back. Possible - values are: - - - -.. tabularcolumns:: |p{9.0cm}|p{8.5cm}| - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_AUDIO_DEC_PLAYBACK_AUTO`` - - Automatically determines the best playback mode. - * - ``V4L2_MPEG_AUDIO_DEC_PLAYBACK_STEREO`` - - Stereo playback. - * - ``V4L2_MPEG_AUDIO_DEC_PLAYBACK_LEFT`` - - Left channel playback. - * - ``V4L2_MPEG_AUDIO_DEC_PLAYBACK_RIGHT`` - - Right channel playback. - * - ``V4L2_MPEG_AUDIO_DEC_PLAYBACK_MONO`` - - Mono playback. - * - ``V4L2_MPEG_AUDIO_DEC_PLAYBACK_SWAPPED_STEREO`` - - Stereo playback with swapped left and right channels. - - - -.. _v4l2-mpeg-audio-dec-multilingual-playback: - -``V4L2_CID_MPEG_AUDIO_DEC_MULTILINGUAL_PLAYBACK`` - (enum) - -enum v4l2_mpeg_audio_dec_playback - - Determines how multilingual audio should be played back. - -.. _v4l2-mpeg-video-encoding: - -``V4L2_CID_MPEG_VIDEO_ENCODING`` - (enum) - -enum v4l2_mpeg_video_encoding - - MPEG Video encoding method. This control is specific to multiplexed - MPEG streams. Possible values are: - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_VIDEO_ENCODING_MPEG_1`` - - MPEG-1 Video encoding - * - ``V4L2_MPEG_VIDEO_ENCODING_MPEG_2`` - - MPEG-2 Video encoding - * - ``V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC`` - - MPEG-4 AVC (H.264) Video encoding - - - -.. _v4l2-mpeg-video-aspect: - -``V4L2_CID_MPEG_VIDEO_ASPECT`` - (enum) - -enum v4l2_mpeg_video_aspect - - Video aspect. Possible values are: - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_VIDEO_ASPECT_1x1`` - * - ``V4L2_MPEG_VIDEO_ASPECT_4x3`` - * - ``V4L2_MPEG_VIDEO_ASPECT_16x9`` - * - ``V4L2_MPEG_VIDEO_ASPECT_221x100`` - - - -``V4L2_CID_MPEG_VIDEO_B_FRAMES (integer)`` - Number of B-Frames (default 2) - -``V4L2_CID_MPEG_VIDEO_GOP_SIZE (integer)`` - GOP size (default 12) - -``V4L2_CID_MPEG_VIDEO_GOP_CLOSURE (boolean)`` - GOP closure (default 1) - -``V4L2_CID_MPEG_VIDEO_PULLDOWN (boolean)`` - Enable 3:2 pulldown (default 0) - -.. _v4l2-mpeg-video-bitrate-mode: - -``V4L2_CID_MPEG_VIDEO_BITRATE_MODE`` - (enum) - -enum v4l2_mpeg_video_bitrate_mode - - Video bitrate mode. Possible values are: - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_VIDEO_BITRATE_MODE_VBR`` - - Variable bitrate - * - ``V4L2_MPEG_VIDEO_BITRATE_MODE_CBR`` - - Constant bitrate - - - -``V4L2_CID_MPEG_VIDEO_BITRATE (integer)`` - Video bitrate in bits per second. - -``V4L2_CID_MPEG_VIDEO_BITRATE_PEAK (integer)`` - Peak video bitrate in bits per second. Must be larger or equal to - the average video bitrate. It is ignored if the video bitrate mode - is set to constant bitrate. - -``V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION (integer)`` - For every captured frame, skip this many subsequent frames (default - 0). - -``V4L2_CID_MPEG_VIDEO_MUTE (boolean)`` - "Mutes" the video to a fixed color when capturing. This is useful - for testing, to produce a fixed video bitstream. 0 = unmuted, 1 = - muted. - -``V4L2_CID_MPEG_VIDEO_MUTE_YUV (integer)`` - Sets the "mute" color of the video. The supplied 32-bit integer is - interpreted as follows (bit 0 = least significant bit): - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - Bit 0:7 - - V chrominance information - * - Bit 8:15 - - U chrominance information - * - Bit 16:23 - - Y luminance information - * - Bit 24:31 - - Must be zero. - - - -.. _v4l2-mpeg-video-dec-pts: - -``V4L2_CID_MPEG_VIDEO_DEC_PTS (integer64)`` - This read-only control returns the 33-bit video Presentation Time - Stamp as defined in ITU T-REC-H.222.0 and ISO/IEC 13818-1 of the - currently displayed frame. This is the same PTS as is used in - :ref:`VIDIOC_DECODER_CMD`. - -.. _v4l2-mpeg-video-dec-frame: - -``V4L2_CID_MPEG_VIDEO_DEC_FRAME (integer64)`` - This read-only control returns the frame counter of the frame that - is currently displayed (decoded). This value is reset to 0 whenever - the decoder is started. - -``V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE (boolean)`` - If enabled the decoder expects to receive a single slice per buffer, - otherwise the decoder expects a single frame in per buffer. - Applicable to the decoder, all codecs. - -``V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE (boolean)`` - Enable writing sample aspect ratio in the Video Usability - Information. Applicable to the H264 encoder. - -.. _v4l2-mpeg-video-h264-vui-sar-idc: - -``V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC`` - (enum) - -enum v4l2_mpeg_video_h264_vui_sar_idc - - VUI sample aspect ratio indicator for H.264 encoding. The value is - defined in the table E-1 in the standard. Applicable to the H264 - encoder. - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED`` - - Unspecified - * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_1x1`` - - 1x1 - * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_12x11`` - - 12x11 - * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_10x11`` - - 10x11 - * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_16x11`` - - 16x11 - * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_40x33`` - - 40x33 - * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_24x11`` - - 24x11 - * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_20x11`` - - 20x11 - * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_32x11`` - - 32x11 - * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_80x33`` - - 80x33 - * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_18x11`` - - 18x11 - * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_15x11`` - - 15x11 - * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_64x33`` - - 64x33 - * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_160x99`` - - 160x99 - * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_4x3`` - - 4x3 - * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_3x2`` - - 3x2 - * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_2x1`` - - 2x1 - * - ``V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED`` - - Extended SAR - - - -``V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH (integer)`` - Extended sample aspect ratio width for H.264 VUI encoding. - Applicable to the H264 encoder. - -``V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT (integer)`` - Extended sample aspect ratio height for H.264 VUI encoding. - Applicable to the H264 encoder. - -.. _v4l2-mpeg-video-h264-level: - -``V4L2_CID_MPEG_VIDEO_H264_LEVEL`` - (enum) - -enum v4l2_mpeg_video_h264_level - - The level information for the H264 video elementary stream. - Applicable to the H264 encoder. Possible values are: - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_VIDEO_H264_LEVEL_1_0`` - - Level 1.0 - * - ``V4L2_MPEG_VIDEO_H264_LEVEL_1B`` - - Level 1B - * - ``V4L2_MPEG_VIDEO_H264_LEVEL_1_1`` - - Level 1.1 - * - ``V4L2_MPEG_VIDEO_H264_LEVEL_1_2`` - - Level 1.2 - * - ``V4L2_MPEG_VIDEO_H264_LEVEL_1_3`` - - Level 1.3 - * - ``V4L2_MPEG_VIDEO_H264_LEVEL_2_0`` - - Level 2.0 - * - ``V4L2_MPEG_VIDEO_H264_LEVEL_2_1`` - - Level 2.1 - * - ``V4L2_MPEG_VIDEO_H264_LEVEL_2_2`` - - Level 2.2 - * - ``V4L2_MPEG_VIDEO_H264_LEVEL_3_0`` - - Level 3.0 - * - ``V4L2_MPEG_VIDEO_H264_LEVEL_3_1`` - - Level 3.1 - * - ``V4L2_MPEG_VIDEO_H264_LEVEL_3_2`` - - Level 3.2 - * - ``V4L2_MPEG_VIDEO_H264_LEVEL_4_0`` - - Level 4.0 - * - ``V4L2_MPEG_VIDEO_H264_LEVEL_4_1`` - - Level 4.1 - * - ``V4L2_MPEG_VIDEO_H264_LEVEL_4_2`` - - Level 4.2 - * - ``V4L2_MPEG_VIDEO_H264_LEVEL_5_0`` - - Level 5.0 - * - ``V4L2_MPEG_VIDEO_H264_LEVEL_5_1`` - - Level 5.1 - - - -.. _v4l2-mpeg-video-mpeg4-level: - -``V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL`` - (enum) - -enum v4l2_mpeg_video_mpeg4_level - - The level information for the MPEG4 elementary stream. Applicable to - the MPEG4 encoder. Possible values are: - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_0`` - - Level 0 - * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_0B`` - - Level 0b - * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_1`` - - Level 1 - * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_2`` - - Level 2 - * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_3`` - - Level 3 - * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_3B`` - - Level 3b - * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_4`` - - Level 4 - * - ``V4L2_MPEG_VIDEO_MPEG4_LEVEL_5`` - - Level 5 - - - -.. _v4l2-mpeg-video-h264-profile: - -``V4L2_CID_MPEG_VIDEO_H264_PROFILE`` - (enum) - -enum v4l2_mpeg_video_h264_profile - - The profile information for H264. Applicable to the H264 encoder. - Possible values are: - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE`` - - Baseline profile - * - ``V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE`` - - Constrained Baseline profile - * - ``V4L2_MPEG_VIDEO_H264_PROFILE_MAIN`` - - Main profile - * - ``V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED`` - - Extended profile - * - ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH`` - - High profile - * - ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10`` - - High 10 profile - * - ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422`` - - High 422 profile - * - ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_PREDICTIVE`` - - High 444 Predictive profile - * - ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10_INTRA`` - - High 10 Intra profile - * - ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422_INTRA`` - - High 422 Intra profile - * - ``V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_INTRA`` - - High 444 Intra profile - * - ``V4L2_MPEG_VIDEO_H264_PROFILE_CAVLC_444_INTRA`` - - CAVLC 444 Intra profile - * - ``V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_BASELINE`` - - Scalable Baseline profile - * - ``V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH`` - - Scalable High profile - * - ``V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH_INTRA`` - - Scalable High Intra profile - * - ``V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH`` - - Stereo High profile - * - ``V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH`` - - Multiview High profile - - - -.. _v4l2-mpeg-video-mpeg4-profile: - -``V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE`` - (enum) - -enum v4l2_mpeg_video_mpeg4_profile - - The profile information for MPEG4. Applicable to the MPEG4 encoder. - Possible values are: - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE`` - - Simple profile - * - ``V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE`` - - Advanced Simple profile - * - ``V4L2_MPEG_VIDEO_MPEG4_PROFILE_CORE`` - - Core profile - * - ``V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE_SCALABLE`` - - Simple Scalable profile - * - ``V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY`` - - - - - -``V4L2_CID_MPEG_VIDEO_MAX_REF_PIC (integer)`` - The maximum number of reference pictures used for encoding. - Applicable to the encoder. - -.. _v4l2-mpeg-video-multi-slice-mode: - -``V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE`` - (enum) - -enum v4l2_mpeg_video_multi_slice_mode - - Determines how the encoder should handle division of frame into - slices. Applicable to the encoder. Possible values are: - - - -.. tabularcolumns:: |p{8.7cm}|p{8.8cm}| - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE`` - - Single slice per frame. - * - ``V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB`` - - Multiple slices with set maximum number of macroblocks per slice. - * - ``V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES`` - - Multiple slice with set maximum size in bytes per slice. - - - -``V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB (integer)`` - The maximum number of macroblocks in a slice. Used when - ``V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE`` is set to - ``V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB``. Applicable to the - encoder. - -``V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES (integer)`` - The maximum size of a slice in bytes. Used when - ``V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE`` is set to - ``V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES``. Applicable to the - encoder. - -.. _v4l2-mpeg-video-h264-loop-filter-mode: - -``V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE`` - (enum) - -enum v4l2_mpeg_video_h264_loop_filter_mode - - Loop filter mode for H264 encoder. Possible values are: - - - -.. tabularcolumns:: |p{14.0cm}|p{3.5cm}| - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED`` - - Loop filter is enabled. - * - ``V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED`` - - Loop filter is disabled. - * - ``V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY`` - - Loop filter is disabled at the slice boundary. - - - -``V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA (integer)`` - Loop filter alpha coefficient, defined in the H264 standard. - This value corresponds to the slice_alpha_c0_offset_div2 slice header - field, and should be in the range of -6 to +6, inclusive. The actual alpha - offset FilterOffsetA is twice this value. - Applicable to the H264 encoder. - -``V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA (integer)`` - Loop filter beta coefficient, defined in the H264 standard. - This corresponds to the slice_beta_offset_div2 slice header field, and - should be in the range of -6 to +6, inclusive. The actual beta offset - FilterOffsetB is twice this value. - Applicable to the H264 encoder. - -.. _v4l2-mpeg-video-h264-entropy-mode: - -``V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE`` - (enum) - -enum v4l2_mpeg_video_h264_entropy_mode - - Entropy coding mode for H264 - CABAC/CAVALC. Applicable to the H264 - encoder. Possible values are: - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC`` - - Use CAVLC entropy coding. - * - ``V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC`` - - Use CABAC entropy coding. - - - -``V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM (boolean)`` - Enable 8X8 transform for H264. Applicable to the H264 encoder. - -``V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB (integer)`` - Cyclic intra macroblock refresh. This is the number of continuous - macroblocks refreshed every frame. Each frame a successive set of - macroblocks is refreshed until the cycle completes and starts from - the top of the frame. Applicable to H264, H263 and MPEG4 encoder. - -``V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE (boolean)`` - Frame level rate control enable. If this control is disabled then - the quantization parameter for each frame type is constant and set - with appropriate controls (e.g. - ``V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP``). If frame rate control is - enabled then quantization parameter is adjusted to meet the chosen - bitrate. Minimum and maximum value for the quantization parameter - can be set with appropriate controls (e.g. - ``V4L2_CID_MPEG_VIDEO_H263_MIN_QP``). Applicable to encoders. - -``V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE (boolean)`` - Macroblock level rate control enable. Applicable to the MPEG4 and - H264 encoders. - -``V4L2_CID_MPEG_VIDEO_MPEG4_QPEL (boolean)`` - Quarter pixel motion estimation for MPEG4. Applicable to the MPEG4 - encoder. - -``V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP (integer)`` - Quantization parameter for an I frame for H263. Valid range: from 1 - to 31. - -``V4L2_CID_MPEG_VIDEO_H263_MIN_QP (integer)`` - Minimum quantization parameter for H263. Valid range: from 1 to 31. - -``V4L2_CID_MPEG_VIDEO_H263_MAX_QP (integer)`` - Maximum quantization parameter for H263. Valid range: from 1 to 31. - -``V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP (integer)`` - Quantization parameter for an P frame for H263. Valid range: from 1 - to 31. - -``V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP (integer)`` - Quantization parameter for an B frame for H263. Valid range: from 1 - to 31. - -``V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP (integer)`` - Quantization parameter for an I frame for H264. Valid range: from 0 - to 51. - -``V4L2_CID_MPEG_VIDEO_H264_MIN_QP (integer)`` - Minimum quantization parameter for H264. Valid range: from 0 to 51. - -``V4L2_CID_MPEG_VIDEO_H264_MAX_QP (integer)`` - Maximum quantization parameter for H264. Valid range: from 0 to 51. - -``V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP (integer)`` - Quantization parameter for an P frame for H264. Valid range: from 0 - to 51. - -``V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP (integer)`` - Quantization parameter for an B frame for H264. Valid range: from 0 - to 51. - -``V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP (integer)`` - Quantization parameter for an I frame for MPEG4. Valid range: from 1 - to 31. - -``V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP (integer)`` - Minimum quantization parameter for MPEG4. Valid range: from 1 to 31. - -``V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP (integer)`` - Maximum quantization parameter for MPEG4. Valid range: from 1 to 31. - -``V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP (integer)`` - Quantization parameter for an P frame for MPEG4. Valid range: from 1 - to 31. - -``V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP (integer)`` - Quantization parameter for an B frame for MPEG4. Valid range: from 1 - to 31. - -``V4L2_CID_MPEG_VIDEO_VBV_SIZE (integer)`` - The Video Buffer Verifier size in kilobytes, it is used as a - limitation of frame skip. The VBV is defined in the standard as a - mean to verify that the produced stream will be successfully - decoded. The standard describes it as "Part of a hypothetical - decoder that is conceptually connected to the output of the encoder. - Its purpose is to provide a constraint on the variability of the - data rate that an encoder or editing process may produce.". - Applicable to the MPEG1, MPEG2, MPEG4 encoders. - -.. _v4l2-mpeg-video-vbv-delay: - -``V4L2_CID_MPEG_VIDEO_VBV_DELAY (integer)`` - Sets the initial delay in milliseconds for VBV buffer control. - -.. _v4l2-mpeg-video-hor-search-range: - -``V4L2_CID_MPEG_VIDEO_MV_H_SEARCH_RANGE (integer)`` - Horizontal search range defines maximum horizontal search area in - pixels to search and match for the present Macroblock (MB) in the - reference picture. This V4L2 control macro is used to set horizontal - search range for motion estimation module in video encoder. - -.. _v4l2-mpeg-video-vert-search-range: - -``V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE (integer)`` - Vertical search range defines maximum vertical search area in pixels - to search and match for the present Macroblock (MB) in the reference - picture. This V4L2 control macro is used to set vertical search - range for motion estimation module in video encoder. - -.. _v4l2-mpeg-video-force-key-frame: - -``V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME (button)`` - Force a key frame for the next queued buffer. Applicable to - encoders. This is a general, codec-agnostic keyframe control. - -``V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE (integer)`` - The Coded Picture Buffer size in kilobytes, it is used as a - limitation of frame skip. The CPB is defined in the H264 standard as - a mean to verify that the produced stream will be successfully - decoded. Applicable to the H264 encoder. - -``V4L2_CID_MPEG_VIDEO_H264_I_PERIOD (integer)`` - Period between I-frames in the open GOP for H264. In case of an open - GOP this is the period between two I-frames. The period between IDR - (Instantaneous Decoding Refresh) frames is taken from the GOP_SIZE - control. An IDR frame, which stands for Instantaneous Decoding - Refresh is an I-frame after which no prior frames are referenced. - This means that a stream can be restarted from an IDR frame without - the need to store or decode any previous frames. Applicable to the - H264 encoder. - -.. _v4l2-mpeg-video-header-mode: - -``V4L2_CID_MPEG_VIDEO_HEADER_MODE`` - (enum) - -enum v4l2_mpeg_video_header_mode - - Determines whether the header is returned as the first buffer or is - it returned together with the first frame. Applicable to encoders. - Possible values are: - - - -.. tabularcolumns:: |p{10.3cm}|p{7.2cm}| - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE`` - - The stream header is returned separately in the first buffer. - * - ``V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME`` - - The stream header is returned together with the first encoded - frame. - - - -``V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER (boolean)`` - Repeat the video sequence headers. Repeating these headers makes - random access to the video stream easier. Applicable to the MPEG1, 2 - and 4 encoder. - -``V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER (boolean)`` - Enabled the deblocking post processing filter for MPEG4 decoder. - Applicable to the MPEG4 decoder. - -``V4L2_CID_MPEG_VIDEO_MPEG4_VOP_TIME_RES (integer)`` - vop_time_increment_resolution value for MPEG4. Applicable to the - MPEG4 encoder. - -``V4L2_CID_MPEG_VIDEO_MPEG4_VOP_TIME_INC (integer)`` - vop_time_increment value for MPEG4. Applicable to the MPEG4 - encoder. - -``V4L2_CID_MPEG_VIDEO_H264_SEI_FRAME_PACKING (boolean)`` - Enable generation of frame packing supplemental enhancement - information in the encoded bitstream. The frame packing SEI message - contains the arrangement of L and R planes for 3D viewing. - Applicable to the H264 encoder. - -``V4L2_CID_MPEG_VIDEO_H264_SEI_FP_CURRENT_FRAME_0 (boolean)`` - Sets current frame as frame0 in frame packing SEI. Applicable to the - H264 encoder. - -.. _v4l2-mpeg-video-h264-sei-fp-arrangement-type: - -``V4L2_CID_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE`` - (enum) - -enum v4l2_mpeg_video_h264_sei_fp_arrangement_type - - Frame packing arrangement type for H264 SEI. Applicable to the H264 - encoder. Possible values are: - -.. tabularcolumns:: |p{12cm}|p{5.5cm}| - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_CHEKERBOARD`` - - Pixels are alternatively from L and R. - * - ``V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_COLUMN`` - - L and R are interlaced by column. - * - ``V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_ROW`` - - L and R are interlaced by row. - * - ``V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_SIDE_BY_SIDE`` - - L is on the left, R on the right. - * - ``V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_TOP_BOTTOM`` - - L is on top, R on bottom. - * - ``V4L2_MPEG_VIDEO_H264_SEI_FP_ARRANGEMENT_TYPE_TEMPORAL`` - - One view per frame. - - - -``V4L2_CID_MPEG_VIDEO_H264_FMO (boolean)`` - Enables flexible macroblock ordering in the encoded bitstream. It is - a technique used for restructuring the ordering of macroblocks in - pictures. Applicable to the H264 encoder. - -.. _v4l2-mpeg-video-h264-fmo-map-type: - -``V4L2_CID_MPEG_VIDEO_H264_FMO_MAP_TYPE`` - (enum) - -enum v4l2_mpeg_video_h264_fmo_map_type - - When using FMO, the map type divides the image in different scan - patterns of macroblocks. Applicable to the H264 encoder. Possible - values are: - -.. tabularcolumns:: |p{12.5cm}|p{5.0cm}| - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_INTERLEAVED_SLICES`` - - Slices are interleaved one after other with macroblocks in run - length order. - * - ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_SCATTERED_SLICES`` - - Scatters the macroblocks based on a mathematical function known to - both encoder and decoder. - * - ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_FOREGROUND_WITH_LEFT_OVER`` - - Macroblocks arranged in rectangular areas or regions of interest. - * - ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_BOX_OUT`` - - Slice groups grow in a cyclic way from centre to outwards. - * - ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_RASTER_SCAN`` - - Slice groups grow in raster scan pattern from left to right. - * - ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_WIPE_SCAN`` - - Slice groups grow in wipe scan pattern from top to bottom. - * - ``V4L2_MPEG_VIDEO_H264_FMO_MAP_TYPE_EXPLICIT`` - - User defined map type. - - - -``V4L2_CID_MPEG_VIDEO_H264_FMO_SLICE_GROUP (integer)`` - Number of slice groups in FMO. Applicable to the H264 encoder. - -.. _v4l2-mpeg-video-h264-fmo-change-direction: - -``V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_DIRECTION`` - (enum) - -enum v4l2_mpeg_video_h264_fmo_change_dir - - Specifies a direction of the slice group change for raster and wipe - maps. Applicable to the H264 encoder. Possible values are: - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_VIDEO_H264_FMO_CHANGE_DIR_RIGHT`` - - Raster scan or wipe right. - * - ``V4L2_MPEG_VIDEO_H264_FMO_CHANGE_DIR_LEFT`` - - Reverse raster scan or wipe left. - - - -``V4L2_CID_MPEG_VIDEO_H264_FMO_CHANGE_RATE (integer)`` - Specifies the size of the first slice group for raster and wipe map. - Applicable to the H264 encoder. - -``V4L2_CID_MPEG_VIDEO_H264_FMO_RUN_LENGTH (integer)`` - Specifies the number of consecutive macroblocks for the interleaved - map. Applicable to the H264 encoder. - -``V4L2_CID_MPEG_VIDEO_H264_ASO (boolean)`` - Enables arbitrary slice ordering in encoded bitstream. Applicable to - the H264 encoder. - -``V4L2_CID_MPEG_VIDEO_H264_ASO_SLICE_ORDER (integer)`` - Specifies the slice order in ASO. Applicable to the H264 encoder. - The supplied 32-bit integer is interpreted as follows (bit 0 = least - significant bit): - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - Bit 0:15 - - Slice ID - * - Bit 16:32 - - Slice position or order - - - -``V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING (boolean)`` - Enables H264 hierarchical coding. Applicable to the H264 encoder. - -.. _v4l2-mpeg-video-h264-hierarchical-coding-type: - -``V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE`` - (enum) - -enum v4l2_mpeg_video_h264_hierarchical_coding_type - - Specifies the hierarchical coding type. Applicable to the H264 - encoder. Possible values are: - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_VIDEO_H264_HIERARCHICAL_CODING_B`` - - Hierarchical B coding. - * - ``V4L2_MPEG_VIDEO_H264_HIERARCHICAL_CODING_P`` - - Hierarchical P coding. - - - -``V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER (integer)`` - Specifies the number of hierarchical coding layers. Applicable to - the H264 encoder. - -``V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER_QP (integer)`` - Specifies a user defined QP for each layer. Applicable to the H264 - encoder. The supplied 32-bit integer is interpreted as follows (bit - 0 = least significant bit): - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - Bit 0:15 - - QP value - * - Bit 16:32 - - Layer number - - - -.. _v4l2-mpeg-mpeg2: - -``V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS (struct)`` - Specifies the slice parameters (as extracted from the bitstream) for the - associated MPEG-2 slice data. This includes the necessary parameters for - configuring a stateless hardware decoding pipeline for MPEG-2. - The bitstream parameters are defined according to :ref:`mpeg2part2`. - - .. note:: - - This compound control is not yet part of the public kernel API and - it is expected to change. - -.. c:type:: v4l2_ctrl_mpeg2_slice_params - -.. cssclass:: longtable - -.. flat-table:: struct v4l2_ctrl_mpeg2_slice_params - :header-rows: 0 - :stub-columns: 0 - :widths: 1 1 2 - - * - __u32 - - ``bit_size`` - - Size (in bits) of the current slice data. - * - __u32 - - ``data_bit_offset`` - - Offset (in bits) to the video data in the current slice data. - * - struct :c:type:`v4l2_mpeg2_sequence` - - ``sequence`` - - Structure with MPEG-2 sequence metadata, merging relevant fields from - the sequence header and sequence extension parts of the bitstream. - * - struct :c:type:`v4l2_mpeg2_picture` - - ``picture`` - - Structure with MPEG-2 picture metadata, merging relevant fields from - the picture header and picture coding extension parts of the bitstream. - * - __u8 - - ``quantiser_scale_code`` - - Code used to determine the quantization scale to use for the IDCT. - * - __u8 - - ``backward_ref_index`` - - Index for the V4L2 buffer to use as backward reference, used with - B-coded and P-coded frames. - * - __u8 - - ``forward_ref_index`` - - Index for the V4L2 buffer to use as forward reference, used with - B-coded frames. - -.. c:type:: v4l2_mpeg2_sequence - -.. cssclass:: longtable - -.. flat-table:: struct v4l2_mpeg2_sequence - :header-rows: 0 - :stub-columns: 0 - :widths: 1 1 2 - - * - __u16 - - ``horizontal_size`` - - The width of the displayable part of the frame's luminance component. - * - __u16 - - ``vertical_size`` - - The height of the displayable part of the frame's luminance component. - * - __u32 - - ``vbv_buffer_size`` - - Used to calculate the required size of the video buffering verifier, - defined (in bits) as: 16 * 1024 * vbv_buffer_size. - * - __u8 - - ``profile_and_level_indication`` - - The current profile and level indication as extracted from the - bitstream. - * - __u8 - - ``progressive_sequence`` - - Indication that all the frames for the sequence are progressive instead - of interlaced. - * - __u8 - - ``chroma_format`` - - The chrominance sub-sampling format (1: 4:2:0, 2: 4:2:2, 3: 4:4:4). - -.. c:type:: v4l2_mpeg2_picture - -.. cssclass:: longtable - -.. flat-table:: struct v4l2_mpeg2_picture - :header-rows: 0 - :stub-columns: 0 - :widths: 1 1 2 - - * - __u8 - - ``picture_coding_type`` - - Picture coding type for the frame covered by the current slice - (V4L2_MPEG2_PICTURE_CODING_TYPE_I, V4L2_MPEG2_PICTURE_CODING_TYPE_P or - V4L2_MPEG2_PICTURE_CODING_TYPE_B). - * - __u8 - - ``f_code[2][2]`` - - Motion vector codes. - * - __u8 - - ``intra_dc_precision`` - - Precision of Discrete Cosine transform (0: 8 bits precision, - 1: 9 bits precision, 2: 10 bits precision, 3: 11 bits precision). - * - __u8 - - ``picture_structure`` - - Picture structure (1: interlaced top field, 2: interlaced bottom field, - 3: progressive frame). - * - __u8 - - ``top_field_first`` - - If set to 1 and interlaced stream, top field is output first. - * - __u8 - - ``frame_pred_frame_dct`` - - If set to 1, only frame-DCT and frame prediction are used. - * - __u8 - - ``concealment_motion_vectors`` - - If set to 1, motion vectors are coded for intra macroblocks. - * - __u8 - - ``q_scale_type`` - - This flag affects the inverse quantization process. - * - __u8 - - ``intra_vlc_format`` - - This flag affects the decoding of transform coefficient data. - * - __u8 - - ``alternate_scan`` - - This flag affects the decoding of transform coefficient data. - * - __u8 - - ``repeat_first_field`` - - This flag affects the decoding process of progressive frames. - * - __u8 - - ``progressive_frame`` - - Indicates whether the current frame is progressive. - -``V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION (struct)`` - Specifies quantization matrices (as extracted from the bitstream) for the - associated MPEG-2 slice data. - - .. note:: - - This compound control is not yet part of the public kernel API and - it is expected to change. - -.. c:type:: v4l2_ctrl_mpeg2_quantization - -.. cssclass:: longtable - -.. flat-table:: struct v4l2_ctrl_mpeg2_quantization - :header-rows: 0 - :stub-columns: 0 - :widths: 1 1 2 - - * - __u8 - - ``load_intra_quantiser_matrix`` - - One bit to indicate whether to load the ``intra_quantiser_matrix`` data. - * - __u8 - - ``load_non_intra_quantiser_matrix`` - - One bit to indicate whether to load the ``non_intra_quantiser_matrix`` - data. - * - __u8 - - ``load_chroma_intra_quantiser_matrix`` - - One bit to indicate whether to load the - ``chroma_intra_quantiser_matrix`` data, only relevant for non-4:2:0 YUV - formats. - * - __u8 - - ``load_chroma_non_intra_quantiser_matrix`` - - One bit to indicate whether to load the - ``chroma_non_intra_quantiser_matrix`` data, only relevant for non-4:2:0 - YUV formats. - * - __u8 - - ``intra_quantiser_matrix[64]`` - - The quantization matrix coefficients for intra-coded frames, in zigzag - scanning order. It is relevant for both luma and chroma components, - although it can be superseded by the chroma-specific matrix for - non-4:2:0 YUV formats. - * - __u8 - - ``non_intra_quantiser_matrix[64]`` - - The quantization matrix coefficients for non-intra-coded frames, in - zigzag scanning order. It is relevant for both luma and chroma - components, although it can be superseded by the chroma-specific matrix - for non-4:2:0 YUV formats. - * - __u8 - - ``chroma_intra_quantiser_matrix[64]`` - - The quantization matrix coefficients for the chominance component of - intra-coded frames, in zigzag scanning order. Only relevant for - non-4:2:0 YUV formats. - * - __u8 - - ``chroma_non_intra_quantiser_matrix[64]`` - - The quantization matrix coefficients for the chrominance component of - non-intra-coded frames, in zigzag scanning order. Only relevant for - non-4:2:0 YUV formats. - -MFC 5.1 MPEG Controls ---------------------- - -The following MPEG class controls deal with MPEG decoding and encoding -settings that are specific to the Multi Format Codec 5.1 device present -in the S5P family of SoCs by Samsung. - - -.. _mfc51-control-id: - -MFC 5.1 Control IDs -^^^^^^^^^^^^^^^^^^^ - -``V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY_ENABLE (boolean)`` - If the display delay is enabled then the decoder is forced to return - a CAPTURE buffer (decoded frame) after processing a certain number - of OUTPUT buffers. The delay can be set through - ``V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY``. This - feature can be used for example for generating thumbnails of videos. - Applicable to the H264 decoder. - -``V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY (integer)`` - Display delay value for H264 decoder. The decoder is forced to - return a decoded frame after the set 'display delay' number of - frames. If this number is low it may result in frames returned out - of dispaly order, in addition the hardware may still be using the - returned buffer as a reference picture for subsequent frames. - -``V4L2_CID_MPEG_MFC51_VIDEO_H264_NUM_REF_PIC_FOR_P (integer)`` - The number of reference pictures used for encoding a P picture. - Applicable to the H264 encoder. - -``V4L2_CID_MPEG_MFC51_VIDEO_PADDING (boolean)`` - Padding enable in the encoder - use a color instead of repeating - border pixels. Applicable to encoders. - -``V4L2_CID_MPEG_MFC51_VIDEO_PADDING_YUV (integer)`` - Padding color in the encoder. Applicable to encoders. The supplied - 32-bit integer is interpreted as follows (bit 0 = least significant - bit): - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - Bit 0:7 - - V chrominance information - * - Bit 8:15 - - U chrominance information - * - Bit 16:23 - - Y luminance information - * - Bit 24:31 - - Must be zero. - - - -``V4L2_CID_MPEG_MFC51_VIDEO_RC_REACTION_COEFF (integer)`` - Reaction coefficient for MFC rate control. Applicable to encoders. - - .. note:: - - #. Valid only when the frame level RC is enabled. - - #. For tight CBR, this field must be small (ex. 2 ~ 10). For - VBR, this field must be large (ex. 100 ~ 1000). - - #. It is not recommended to use the greater number than - FRAME_RATE * (10^9 / BIT_RATE). - -``V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_DARK (boolean)`` - Adaptive rate control for dark region. Valid only when H.264 and - macroblock level RC is enabled - (``V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE``). Applicable to the H264 - encoder. - -``V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_SMOOTH (boolean)`` - Adaptive rate control for smooth region. Valid only when H.264 and - macroblock level RC is enabled - (``V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE``). Applicable to the H264 - encoder. - -``V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_STATIC (boolean)`` - Adaptive rate control for static region. Valid only when H.264 and - macroblock level RC is enabled - (``V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE``). Applicable to the H264 - encoder. - -``V4L2_CID_MPEG_MFC51_VIDEO_H264_ADAPTIVE_RC_ACTIVITY (boolean)`` - Adaptive rate control for activity region. Valid only when H.264 and - macroblock level RC is enabled - (``V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE``). Applicable to the H264 - encoder. - -.. _v4l2-mpeg-mfc51-video-frame-skip-mode: - -``V4L2_CID_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE`` - (enum) - -enum v4l2_mpeg_mfc51_video_frame_skip_mode - - Indicates in what conditions the encoder should skip frames. If - encoding a frame would cause the encoded stream to be larger then a - chosen data limit then the frame will be skipped. Possible values - are: - - -.. tabularcolumns:: |p{9.0cm}|p{8.5cm}| - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_MFC51_FRAME_SKIP_MODE_DISABLED`` - - Frame skip mode is disabled. - * - ``V4L2_MPEG_MFC51_FRAME_SKIP_MODE_LEVEL_LIMIT`` - - Frame skip mode enabled and buffer limit is set by the chosen - level and is defined by the standard. - * - ``V4L2_MPEG_MFC51_FRAME_SKIP_MODE_BUF_LIMIT`` - - Frame skip mode enabled and buffer limit is set by the VBV - (MPEG1/2/4) or CPB (H264) buffer size control. - - - -``V4L2_CID_MPEG_MFC51_VIDEO_RC_FIXED_TARGET_BIT (integer)`` - Enable rate-control with fixed target bit. If this setting is - enabled, then the rate control logic of the encoder will calculate - the average bitrate for a GOP and keep it below or equal the set - bitrate target. Otherwise the rate control logic calculates the - overall average bitrate for the stream and keeps it below or equal - to the set bitrate. In the first case the average bitrate for the - whole stream will be smaller then the set bitrate. This is caused - because the average is calculated for smaller number of frames, on - the other hand enabling this setting will ensure that the stream - will meet tight bandwidth constraints. Applicable to encoders. - -.. _v4l2-mpeg-mfc51-video-force-frame-type: - -``V4L2_CID_MPEG_MFC51_VIDEO_FORCE_FRAME_TYPE`` - (enum) - -enum v4l2_mpeg_mfc51_video_force_frame_type - - Force a frame type for the next queued buffer. Applicable to - encoders. Possible values are: - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_MFC51_FORCE_FRAME_TYPE_DISABLED`` - - Forcing a specific frame type disabled. - * - ``V4L2_MPEG_MFC51_FORCE_FRAME_TYPE_I_FRAME`` - - Force an I-frame. - * - ``V4L2_MPEG_MFC51_FORCE_FRAME_TYPE_NOT_CODED`` - - Force a non-coded frame. - - - - -CX2341x MPEG Controls ---------------------- - -The following MPEG class controls deal with MPEG encoding settings that -are specific to the Conexant CX23415 and CX23416 MPEG encoding chips. - - -.. _cx2341x-control-id: - -CX2341x Control IDs -^^^^^^^^^^^^^^^^^^^ - -.. _v4l2-mpeg-cx2341x-video-spatial-filter-mode: - -``V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE`` - (enum) - -enum v4l2_mpeg_cx2341x_video_spatial_filter_mode - - Sets the Spatial Filter mode (default ``MANUAL``). Possible values - are: - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL`` - - Choose the filter manually - * - ``V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO`` - - Choose the filter automatically - - - -``V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER (integer (0-15))`` - The setting for the Spatial Filter. 0 = off, 15 = maximum. (Default - is 0.) - -.. _luma-spatial-filter-type: - -``V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE`` - (enum) - -enum v4l2_mpeg_cx2341x_video_luma_spatial_filter_type - - Select the algorithm to use for the Luma Spatial Filter (default - ``1D_HOR``). Possible values: - - - -.. tabularcolumns:: |p{14.5cm}|p{3.0cm}| - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_OFF`` - - No filter - * - ``V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_HOR`` - - One-dimensional horizontal - * - ``V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_VERT`` - - One-dimensional vertical - * - ``V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_HV_SEPARABLE`` - - Two-dimensional separable - * - ``V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_SYM_NON_SEPARABLE`` - - Two-dimensional symmetrical non-separable - - - -.. _chroma-spatial-filter-type: - -``V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE`` - (enum) - -enum v4l2_mpeg_cx2341x_video_chroma_spatial_filter_type - - Select the algorithm for the Chroma Spatial Filter (default - ``1D_HOR``). Possible values are: - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_OFF`` - - No filter - * - ``V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR`` - - One-dimensional horizontal - - - -.. _v4l2-mpeg-cx2341x-video-temporal-filter-mode: - -``V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE`` - (enum) - -enum v4l2_mpeg_cx2341x_video_temporal_filter_mode - - Sets the Temporal Filter mode (default ``MANUAL``). Possible values - are: - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL`` - - Choose the filter manually - * - ``V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO`` - - Choose the filter automatically - - - -``V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER (integer (0-31))`` - The setting for the Temporal Filter. 0 = off, 31 = maximum. (Default - is 8 for full-scale capturing and 0 for scaled capturing.) - -.. _v4l2-mpeg-cx2341x-video-median-filter-type: - -``V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE`` - (enum) - -enum v4l2_mpeg_cx2341x_video_median_filter_type - - Median Filter Type (default ``OFF``). Possible values are: - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF`` - - No filter - * - ``V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR`` - - Horizontal filter - * - ``V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_VERT`` - - Vertical filter - * - ``V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_HOR_VERT`` - - Horizontal and vertical filter - * - ``V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_DIAG`` - - Diagonal filter - - - -``V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM (integer (0-255))`` - Threshold above which the luminance median filter is enabled - (default 0) - -``V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP (integer (0-255))`` - Threshold below which the luminance median filter is enabled - (default 255) - -``V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM (integer (0-255))`` - Threshold above which the chroma median filter is enabled (default - 0) - -``V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP (integer (0-255))`` - Threshold below which the chroma median filter is enabled (default - 255) - -``V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS (boolean)`` - The CX2341X MPEG encoder can insert one empty MPEG-2 PES packet into - the stream between every four video frames. The packet size is 2048 - bytes, including the packet_start_code_prefix and stream_id - fields. The stream_id is 0xBF (private stream 2). The payload - consists of 0x00 bytes, to be filled in by the application. 0 = do - not insert, 1 = insert packets. - - -VPX Control Reference ---------------------- - -The VPX controls include controls for encoding parameters of VPx video -codec. - - -.. _vpx-control-id: - -VPX Control IDs -^^^^^^^^^^^^^^^ - -.. _v4l2-vpx-num-partitions: - -``V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS`` - (enum) - -enum v4l2_vp8_num_partitions - - The number of token partitions to use in VP8 encoder. Possible - values are: - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_CID_MPEG_VIDEO_VPX_1_PARTITION`` - - 1 coefficient partition - * - ``V4L2_CID_MPEG_VIDEO_VPX_2_PARTITIONS`` - - 2 coefficient partitions - * - ``V4L2_CID_MPEG_VIDEO_VPX_4_PARTITIONS`` - - 4 coefficient partitions - * - ``V4L2_CID_MPEG_VIDEO_VPX_8_PARTITIONS`` - - 8 coefficient partitions - - - -``V4L2_CID_MPEG_VIDEO_VPX_IMD_DISABLE_4X4 (boolean)`` - Setting this prevents intra 4x4 mode in the intra mode decision. - -.. _v4l2-vpx-num-ref-frames: - -``V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES`` - (enum) - -enum v4l2_vp8_num_ref_frames - - The number of reference pictures for encoding P frames. Possible - values are: - -.. tabularcolumns:: |p{7.9cm}|p{9.6cm}| - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_CID_MPEG_VIDEO_VPX_1_REF_FRAME`` - - Last encoded frame will be searched - * - ``V4L2_CID_MPEG_VIDEO_VPX_2_REF_FRAME`` - - Two frames will be searched among the last encoded frame, the - golden frame and the alternate reference (altref) frame. The - encoder implementation will decide which two are chosen. - * - ``V4L2_CID_MPEG_VIDEO_VPX_3_REF_FRAME`` - - The last encoded frame, the golden frame and the altref frame will - be searched. - - - -``V4L2_CID_MPEG_VIDEO_VPX_FILTER_LEVEL (integer)`` - Indicates the loop filter level. The adjustment of the loop filter - level is done via a delta value against a baseline loop filter - value. - -``V4L2_CID_MPEG_VIDEO_VPX_FILTER_SHARPNESS (integer)`` - This parameter affects the loop filter. Anything above zero weakens - the deblocking effect on the loop filter. - -``V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD (integer)`` - Sets the refresh period for the golden frame. The period is defined - in number of frames. For a value of 'n', every nth frame starting - from the first key frame will be taken as a golden frame. For eg. - for encoding sequence of 0, 1, 2, 3, 4, 5, 6, 7 where the golden - frame refresh period is set as 4, the frames 0, 4, 8 etc will be - taken as the golden frames as frame 0 is always a key frame. - -.. _v4l2-vpx-golden-frame-sel: - -``V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL`` - (enum) - -enum v4l2_vp8_golden_frame_sel - - Selects the golden frame for encoding. Possible values are: - -.. raw:: latex - - \footnotesize - -.. tabularcolumns:: |p{9.0cm}|p{8.0cm}| - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_PREV`` - - Use the (n-2)th frame as a golden frame, current frame index being - 'n'. - * - ``V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_REF_PERIOD`` - - Use the previous specific frame indicated by - ``V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD`` as a - golden frame. - -.. raw:: latex - - \normalsize - - -``V4L2_CID_MPEG_VIDEO_VPX_MIN_QP (integer)`` - Minimum quantization parameter for VP8. - -``V4L2_CID_MPEG_VIDEO_VPX_MAX_QP (integer)`` - Maximum quantization parameter for VP8. - -``V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP (integer)`` - Quantization parameter for an I frame for VP8. - -``V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP (integer)`` - Quantization parameter for a P frame for VP8. - -.. _v4l2-mpeg-video-vp8-profile: - -``V4L2_CID_MPEG_VIDEO_VP8_PROFILE`` - (enum) - -enum v4l2_mpeg_video_vp8_profile - - This control allows selecting the profile for VP8 encoder. - This is also used to enumerate supported profiles by VP8 encoder or decoder. - Possible values are: - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_VIDEO_VP8_PROFILE_0`` - - Profile 0 - * - ``V4L2_MPEG_VIDEO_VP8_PROFILE_1`` - - Profile 1 - * - ``V4L2_MPEG_VIDEO_VP8_PROFILE_2`` - - Profile 2 - * - ``V4L2_MPEG_VIDEO_VP8_PROFILE_3`` - - Profile 3 - -.. _v4l2-mpeg-video-vp9-profile: - -``V4L2_CID_MPEG_VIDEO_VP9_PROFILE`` - (enum) - -enum v4l2_mpeg_video_vp9_profile - - This control allows selecting the profile for VP9 encoder. - This is also used to enumerate supported profiles by VP9 encoder or decoder. - Possible values are: - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_VIDEO_VP9_PROFILE_0`` - - Profile 0 - * - ``V4L2_MPEG_VIDEO_VP9_PROFILE_1`` - - Profile 1 - * - ``V4L2_MPEG_VIDEO_VP9_PROFILE_2`` - - Profile 2 - * - ``V4L2_MPEG_VIDEO_VP9_PROFILE_3`` - - Profile 3 - - -High Efficiency Video Coding (HEVC/H.265) Control Reference ------------------------------------------------------------ - -The HEVC/H.265 controls include controls for encoding parameters of HEVC/H.265 -video codec. - - -.. _hevc-control-id: - -HEVC/H.265 Control IDs -^^^^^^^^^^^^^^^^^^^^^^ - -``V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP (integer)`` - Minimum quantization parameter for HEVC. - Valid range: from 0 to 51. - -``V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP (integer)`` - Maximum quantization parameter for HEVC. - Valid range: from 0 to 51. - -``V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP (integer)`` - Quantization parameter for an I frame for HEVC. - Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP, - V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP]. - -``V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP (integer)`` - Quantization parameter for a P frame for HEVC. - Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP, - V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP]. - -``V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP (integer)`` - Quantization parameter for a B frame for HEVC. - Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP, - V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP]. - -``V4L2_CID_MPEG_VIDEO_HEVC_HIER_QP (boolean)`` - HIERARCHICAL_QP allows the host to specify the quantization parameter - values for each temporal layer through HIERARCHICAL_QP_LAYER. This is - valid only if HIERARCHICAL_CODING_LAYER is greater than 1. Setting the - control value to 1 enables setting of the QP values for the layers. - -.. _v4l2-hevc-hier-coding-type: - -``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE`` - (enum) - -enum v4l2_mpeg_video_hevc_hier_coding_type - - Selects the hierarchical coding type for encoding. Possible values are: - -.. raw:: latex - - \footnotesize - -.. tabularcolumns:: |p{9.0cm}|p{8.0cm}| - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_B`` - - Use the B frame for hierarchical coding. - * - ``V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_P`` - - Use the P frame for hierarchical coding. - -.. raw:: latex - - \normalsize - - -``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER (integer)`` - Selects the hierarchical coding layer. In normal encoding - (non-hierarchial coding), it should be zero. Possible values are [0, 6]. - 0 indicates HIERARCHICAL CODING LAYER 0, 1 indicates HIERARCHICAL CODING - LAYER 1 and so on. - -``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_QP (integer)`` - Indicates quantization parameter for hierarchical coding layer 0. - Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP, - V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP]. - -``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_QP (integer)`` - Indicates quantization parameter for hierarchical coding layer 1. - Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP, - V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP]. - -``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_QP (integer)`` - Indicates quantization parameter for hierarchical coding layer 2. - Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP, - V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP]. - -``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_QP (integer)`` - Indicates quantization parameter for hierarchical coding layer 3. - Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP, - V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP]. - -``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_QP (integer)`` - Indicates quantization parameter for hierarchical coding layer 4. - Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP, - V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP]. - -``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_QP (integer)`` - Indicates quantization parameter for hierarchical coding layer 5. - Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP, - V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP]. - -``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_QP (integer)`` - Indicates quantization parameter for hierarchical coding layer 6. - Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP, - V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP]. - -.. _v4l2-hevc-profile: - -``V4L2_CID_MPEG_VIDEO_HEVC_PROFILE`` - (enum) - -enum v4l2_mpeg_video_hevc_profile - - Select the desired profile for HEVC encoder. - -.. raw:: latex - - \footnotesize - -.. tabularcolumns:: |p{9.0cm}|p{8.0cm}| - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN`` - - Main profile. - * - ``V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE`` - - Main still picture profile. - * - ``V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10`` - - Main 10 profile. - -.. raw:: latex - - \normalsize - - -.. _v4l2-hevc-level: - -``V4L2_CID_MPEG_VIDEO_HEVC_LEVEL`` - (enum) - -enum v4l2_mpeg_video_hevc_level - - Selects the desired level for HEVC encoder. - -.. raw:: latex - - \footnotesize - -.. tabularcolumns:: |p{9.0cm}|p{8.0cm}| - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_1`` - - Level 1.0 - * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_2`` - - Level 2.0 - * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1`` - - Level 2.1 - * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_3`` - - Level 3.0 - * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1`` - - Level 3.1 - * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_4`` - - Level 4.0 - * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1`` - - Level 4.1 - * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_5`` - - Level 5.0 - * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1`` - - Level 5.1 - * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2`` - - Level 5.2 - * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_6`` - - Level 6.0 - * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1`` - - Level 6.1 - * - ``V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2`` - - Level 6.2 - -.. raw:: latex - - \normalsize - - -``V4L2_CID_MPEG_VIDEO_HEVC_FRAME_RATE_RESOLUTION (integer)`` - Indicates the number of evenly spaced subintervals, called ticks, within - one second. This is a 16 bit unsigned integer and has a maximum value up to - 0xffff and a minimum value of 1. - -.. _v4l2-hevc-tier: - -``V4L2_CID_MPEG_VIDEO_HEVC_TIER`` - (enum) - -enum v4l2_mpeg_video_hevc_tier - - TIER_FLAG specifies tiers information of the HEVC encoded picture. Tier - were made to deal with applications that differ in terms of maximum bit - rate. Setting the flag to 0 selects HEVC tier as Main tier and setting - this flag to 1 indicates High tier. High tier is for applications requiring - high bit rates. - -.. raw:: latex - - \footnotesize - -.. tabularcolumns:: |p{9.0cm}|p{8.0cm}| - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_VIDEO_HEVC_TIER_MAIN`` - - Main tier. - * - ``V4L2_MPEG_VIDEO_HEVC_TIER_HIGH`` - - High tier. - -.. raw:: latex - - \normalsize - - -``V4L2_CID_MPEG_VIDEO_HEVC_MAX_PARTITION_DEPTH (integer)`` - Selects HEVC maximum coding unit depth. - -.. _v4l2-hevc-loop-filter-mode: - -``V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE`` - (enum) - -enum v4l2_mpeg_video_hevc_loop_filter_mode - - Loop filter mode for HEVC encoder. Possible values are: - -.. raw:: latex - - \footnotesize - -.. tabularcolumns:: |p{10.7cm}|p{6.3cm}| - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_DISABLED`` - - Loop filter is disabled. - * - ``V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_ENABLED`` - - Loop filter is enabled. - * - ``V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY`` - - Loop filter is disabled at the slice boundary. - -.. raw:: latex - - \normalsize - - -``V4L2_CID_MPEG_VIDEO_HEVC_LF_BETA_OFFSET_DIV2 (integer)`` - Selects HEVC loop filter beta offset. The valid range is [-6, +6]. - -``V4L2_CID_MPEG_VIDEO_HEVC_LF_TC_OFFSET_DIV2 (integer)`` - Selects HEVC loop filter tc offset. The valid range is [-6, +6]. - -.. _v4l2-hevc-refresh-type: - -``V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_TYPE`` - (enum) - -enum v4l2_mpeg_video_hevc_hier_refresh_type - - Selects refresh type for HEVC encoder. - Host has to specify the period into - V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_PERIOD. - -.. raw:: latex - - \footnotesize - -.. tabularcolumns:: |p{8.0cm}|p{9.0cm}| - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_VIDEO_HEVC_REFRESH_NONE`` - - Use the B frame for hierarchical coding. - * - ``V4L2_MPEG_VIDEO_HEVC_REFRESH_CRA`` - - Use CRA (Clean Random Access Unit) picture encoding. - * - ``V4L2_MPEG_VIDEO_HEVC_REFRESH_IDR`` - - Use IDR (Instantaneous Decoding Refresh) picture encoding. - -.. raw:: latex - - \normalsize - - -``V4L2_CID_MPEG_VIDEO_HEVC_REFRESH_PERIOD (integer)`` - Selects the refresh period for HEVC encoder. - This specifies the number of I pictures between two CRA/IDR pictures. - This is valid only if REFRESH_TYPE is not 0. - -``V4L2_CID_MPEG_VIDEO_HEVC_LOSSLESS_CU (boolean)`` - Indicates HEVC lossless encoding. Setting it to 0 disables lossless - encoding. Setting it to 1 enables lossless encoding. - -``V4L2_CID_MPEG_VIDEO_HEVC_CONST_INTRA_PRED (boolean)`` - Indicates constant intra prediction for HEVC encoder. Specifies the - constrained intra prediction in which intra largest coding unit (LCU) - prediction is performed by using residual data and decoded samples of - neighboring intra LCU only. Setting the value to 1 enables constant intra - prediction and setting the value to 0 disables constant intra prediction. - -``V4L2_CID_MPEG_VIDEO_HEVC_WAVEFRONT (boolean)`` - Indicates wavefront parallel processing for HEVC encoder. Setting it to 0 - disables the feature and setting it to 1 enables the wavefront parallel - processing. - -``V4L2_CID_MPEG_VIDEO_HEVC_GENERAL_PB (boolean)`` - Setting the value to 1 enables combination of P and B frame for HEVC - encoder. - -``V4L2_CID_MPEG_VIDEO_HEVC_TEMPORAL_ID (boolean)`` - Indicates temporal identifier for HEVC encoder which is enabled by - setting the value to 1. - -``V4L2_CID_MPEG_VIDEO_HEVC_STRONG_SMOOTHING (boolean)`` - Indicates bi-linear interpolation is conditionally used in the intra - prediction filtering process in the CVS when set to 1. Indicates bi-linear - interpolation is not used in the CVS when set to 0. - -``V4L2_CID_MPEG_VIDEO_HEVC_MAX_NUM_MERGE_MV_MINUS1 (integer)`` - Indicates maximum number of merge candidate motion vectors. - Values are from 0 to 4. - -``V4L2_CID_MPEG_VIDEO_HEVC_TMV_PREDICTION (boolean)`` - Indicates temporal motion vector prediction for HEVC encoder. Setting it to - 1 enables the prediction. Setting it to 0 disables the prediction. - -``V4L2_CID_MPEG_VIDEO_HEVC_WITHOUT_STARTCODE (boolean)`` - Specifies if HEVC generates a stream with a size of the length field - instead of start code pattern. The size of the length field is configurable - through the V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD control. Setting - the value to 0 disables encoding without startcode pattern. Setting the - value to 1 will enables encoding without startcode pattern. - -.. _v4l2-hevc-size-of-length-field: - -``V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD`` -(enum) - -enum v4l2_mpeg_video_hevc_size_of_length_field - - Indicates the size of length field. - This is valid when encoding WITHOUT_STARTCODE_ENABLE is enabled. - -.. raw:: latex - - \footnotesize - -.. tabularcolumns:: |p{6.0cm}|p{11.0cm}| - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_MPEG_VIDEO_HEVC_SIZE_0`` - - Generate start code pattern (Normal). - * - ``V4L2_MPEG_VIDEO_HEVC_SIZE_1`` - - Generate size of length field instead of start code pattern and length is 1. - * - ``V4L2_MPEG_VIDEO_HEVC_SIZE_2`` - - Generate size of length field instead of start code pattern and length is 2. - * - ``V4L2_MPEG_VIDEO_HEVC_SIZE_4`` - - Generate size of length field instead of start code pattern and length is 4. - -.. raw:: latex - - \normalsize - -``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR (integer)`` - Indicates bit rate for hierarchical coding layer 0 for HEVC encoder. - -``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR (integer)`` - Indicates bit rate for hierarchical coding layer 1 for HEVC encoder. - -``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR (integer)`` - Indicates bit rate for hierarchical coding layer 2 for HEVC encoder. - -``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR (integer)`` - Indicates bit rate for hierarchical coding layer 3 for HEVC encoder. - -``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR (integer)`` - Indicates bit rate for hierarchical coding layer 4 for HEVC encoder. - -``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR (integer)`` - Indicates bit rate for hierarchical coding layer 5 for HEVC encoder. - -``V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_BR (integer)`` - Indicates bit rate for hierarchical coding layer 6 for HEVC encoder. - -``V4L2_CID_MPEG_VIDEO_REF_NUMBER_FOR_PFRAMES (integer)`` - Selects number of P reference pictures required for HEVC encoder. - P-Frame can use 1 or 2 frames for reference. - -``V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR (integer)`` - Indicates whether to generate SPS and PPS at every IDR. Setting it to 0 - disables generating SPS and PPS at every IDR. Setting it to one enables - generating SPS and PPS at every IDR. - - -.. _camera-controls: - -Camera Control Reference -======================== - -The Camera class includes controls for mechanical (or equivalent -digital) features of a device such as controllable lenses or sensors. - - -.. _camera-control-id: - -Camera Control IDs ------------------- - -``V4L2_CID_CAMERA_CLASS (class)`` - The Camera class descriptor. Calling - :ref:`VIDIOC_QUERYCTRL` for this control will - return a description of this control class. - -.. _v4l2-exposure-auto-type: - -``V4L2_CID_EXPOSURE_AUTO`` - (enum) - -enum v4l2_exposure_auto_type - - Enables automatic adjustments of the exposure time and/or iris - aperture. The effect of manual changes of the exposure time or iris - aperture while these features are enabled is undefined, drivers - should ignore such requests. Possible values are: - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_EXPOSURE_AUTO`` - - Automatic exposure time, automatic iris aperture. - * - ``V4L2_EXPOSURE_MANUAL`` - - Manual exposure time, manual iris. - * - ``V4L2_EXPOSURE_SHUTTER_PRIORITY`` - - Manual exposure time, auto iris. - * - ``V4L2_EXPOSURE_APERTURE_PRIORITY`` - - Auto exposure time, manual iris. - - - -``V4L2_CID_EXPOSURE_ABSOLUTE (integer)`` - Determines the exposure time of the camera sensor. The exposure time - is limited by the frame interval. Drivers should interpret the - values as 100 µs units, where the value 1 stands for 1/10000th of a - second, 10000 for 1 second and 100000 for 10 seconds. - -``V4L2_CID_EXPOSURE_AUTO_PRIORITY (boolean)`` - When ``V4L2_CID_EXPOSURE_AUTO`` is set to ``AUTO`` or - ``APERTURE_PRIORITY``, this control determines if the device may - dynamically vary the frame rate. By default this feature is disabled - (0) and the frame rate must remain constant. - -``V4L2_CID_AUTO_EXPOSURE_BIAS (integer menu)`` - Determines the automatic exposure compensation, it is effective only - when ``V4L2_CID_EXPOSURE_AUTO`` control is set to ``AUTO``, - ``SHUTTER_PRIORITY`` or ``APERTURE_PRIORITY``. It is expressed in - terms of EV, drivers should interpret the values as 0.001 EV units, - where the value 1000 stands for +1 EV. - - Increasing the exposure compensation value is equivalent to - decreasing the exposure value (EV) and will increase the amount of - light at the image sensor. The camera performs the exposure - compensation by adjusting absolute exposure time and/or aperture. - -.. _v4l2-exposure-metering: - -``V4L2_CID_EXPOSURE_METERING`` - (enum) - -enum v4l2_exposure_metering - - Determines how the camera measures the amount of light available for - the frame exposure. Possible values are: - -.. tabularcolumns:: |p{8.5cm}|p{9.0cm}| - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_EXPOSURE_METERING_AVERAGE`` - - Use the light information coming from the entire frame and average - giving no weighting to any particular portion of the metered area. - * - ``V4L2_EXPOSURE_METERING_CENTER_WEIGHTED`` - - Average the light information coming from the entire frame giving - priority to the center of the metered area. - * - ``V4L2_EXPOSURE_METERING_SPOT`` - - Measure only very small area at the center of the frame. - * - ``V4L2_EXPOSURE_METERING_MATRIX`` - - A multi-zone metering. The light intensity is measured in several - points of the frame and the results are combined. The algorithm of - the zones selection and their significance in calculating the - final value is device dependent. - - - -``V4L2_CID_PAN_RELATIVE (integer)`` - This control turns the camera horizontally by the specified amount. - The unit is undefined. A positive value moves the camera to the - right (clockwise when viewed from above), a negative value to the - left. A value of zero does not cause motion. This is a write-only - control. - -``V4L2_CID_TILT_RELATIVE (integer)`` - This control turns the camera vertically by the specified amount. - The unit is undefined. A positive value moves the camera up, a - negative value down. A value of zero does not cause motion. This is - a write-only control. - -``V4L2_CID_PAN_RESET (button)`` - When this control is set, the camera moves horizontally to the - default position. - -``V4L2_CID_TILT_RESET (button)`` - When this control is set, the camera moves vertically to the default - position. - -``V4L2_CID_PAN_ABSOLUTE (integer)`` - This control turns the camera horizontally to the specified - position. Positive values move the camera to the right (clockwise - when viewed from above), negative values to the left. Drivers should - interpret the values as arc seconds, with valid values between -180 - * 3600 and +180 * 3600 inclusive. - -``V4L2_CID_TILT_ABSOLUTE (integer)`` - This control turns the camera vertically to the specified position. - Positive values move the camera up, negative values down. Drivers - should interpret the values as arc seconds, with valid values - between -180 * 3600 and +180 * 3600 inclusive. - -``V4L2_CID_FOCUS_ABSOLUTE (integer)`` - This control sets the focal point of the camera to the specified - position. The unit is undefined. Positive values set the focus - closer to the camera, negative values towards infinity. - -``V4L2_CID_FOCUS_RELATIVE (integer)`` - This control moves the focal point of the camera by the specified - amount. The unit is undefined. Positive values move the focus closer - to the camera, negative values towards infinity. This is a - write-only control. - -``V4L2_CID_FOCUS_AUTO (boolean)`` - Enables continuous automatic focus adjustments. The effect of manual - focus adjustments while this feature is enabled is undefined, - drivers should ignore such requests. - -``V4L2_CID_AUTO_FOCUS_START (button)`` - Starts single auto focus process. The effect of setting this control - when ``V4L2_CID_FOCUS_AUTO`` is set to ``TRUE`` (1) is undefined, - drivers should ignore such requests. - -``V4L2_CID_AUTO_FOCUS_STOP (button)`` - Aborts automatic focusing started with ``V4L2_CID_AUTO_FOCUS_START`` - control. It is effective only when the continuous autofocus is - disabled, that is when ``V4L2_CID_FOCUS_AUTO`` control is set to - ``FALSE`` (0). - -.. _v4l2-auto-focus-status: - -``V4L2_CID_AUTO_FOCUS_STATUS (bitmask)`` - The automatic focus status. This is a read-only control. - - Setting ``V4L2_LOCK_FOCUS`` lock bit of the ``V4L2_CID_3A_LOCK`` - control may stop updates of the ``V4L2_CID_AUTO_FOCUS_STATUS`` - control value. - -.. tabularcolumns:: |p{6.5cm}|p{11.0cm}| - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_AUTO_FOCUS_STATUS_IDLE`` - - Automatic focus is not active. - * - ``V4L2_AUTO_FOCUS_STATUS_BUSY`` - - Automatic focusing is in progress. - * - ``V4L2_AUTO_FOCUS_STATUS_REACHED`` - - Focus has been reached. - * - ``V4L2_AUTO_FOCUS_STATUS_FAILED`` - - Automatic focus has failed, the driver will not transition from - this state until another action is performed by an application. - - - -.. _v4l2-auto-focus-range: - -``V4L2_CID_AUTO_FOCUS_RANGE`` - (enum) - -enum v4l2_auto_focus_range - - Determines auto focus distance range for which lens may be adjusted. - -.. tabularcolumns:: |p{6.5cm}|p{11.0cm}| - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_AUTO_FOCUS_RANGE_AUTO`` - - The camera automatically selects the focus range. - * - ``V4L2_AUTO_FOCUS_RANGE_NORMAL`` - - Normal distance range, limited for best automatic focus - performance. - * - ``V4L2_AUTO_FOCUS_RANGE_MACRO`` - - Macro (close-up) auto focus. The camera will use its minimum - possible distance for auto focus. - * - ``V4L2_AUTO_FOCUS_RANGE_INFINITY`` - - The lens is set to focus on an object at infinite distance. - - - -``V4L2_CID_ZOOM_ABSOLUTE (integer)`` - Specify the objective lens focal length as an absolute value. The - zoom unit is driver-specific and its value should be a positive - integer. - -``V4L2_CID_ZOOM_RELATIVE (integer)`` - Specify the objective lens focal length relatively to the current - value. Positive values move the zoom lens group towards the - telephoto direction, negative values towards the wide-angle - direction. The zoom unit is driver-specific. This is a write-only - control. - -``V4L2_CID_ZOOM_CONTINUOUS (integer)`` - Move the objective lens group at the specified speed until it - reaches physical device limits or until an explicit request to stop - the movement. A positive value moves the zoom lens group towards the - telephoto direction. A value of zero stops the zoom lens group - movement. A negative value moves the zoom lens group towards the - wide-angle direction. The zoom speed unit is driver-specific. - -``V4L2_CID_IRIS_ABSOLUTE (integer)`` - This control sets the camera's aperture to the specified value. The - unit is undefined. Larger values open the iris wider, smaller values - close it. - -``V4L2_CID_IRIS_RELATIVE (integer)`` - This control modifies the camera's aperture by the specified amount. - The unit is undefined. Positive values open the iris one step - further, negative values close it one step further. This is a - write-only control. - -``V4L2_CID_PRIVACY (boolean)`` - Prevent video from being acquired by the camera. When this control - is set to ``TRUE`` (1), no image can be captured by the camera. - Common means to enforce privacy are mechanical obturation of the - sensor and firmware image processing, but the device is not - restricted to these methods. Devices that implement the privacy - control must support read access and may support write access. - -``V4L2_CID_BAND_STOP_FILTER (integer)`` - Switch the band-stop filter of a camera sensor on or off, or specify - its strength. Such band-stop filters can be used, for example, to - filter out the fluorescent light component. - -.. _v4l2-auto-n-preset-white-balance: - -``V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE`` - (enum) - -enum v4l2_auto_n_preset_white_balance - - Sets white balance to automatic, manual or a preset. The presets - determine color temperature of the light as a hint to the camera for - white balance adjustments resulting in most accurate color - representation. The following white balance presets are listed in - order of increasing color temperature. - -.. tabularcolumns:: |p{7.0 cm}|p{10.5cm}| - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_WHITE_BALANCE_MANUAL`` - - Manual white balance. - * - ``V4L2_WHITE_BALANCE_AUTO`` - - Automatic white balance adjustments. - * - ``V4L2_WHITE_BALANCE_INCANDESCENT`` - - White balance setting for incandescent (tungsten) lighting. It - generally cools down the colors and corresponds approximately to - 2500...3500 K color temperature range. - * - ``V4L2_WHITE_BALANCE_FLUORESCENT`` - - White balance preset for fluorescent lighting. It corresponds - approximately to 4000...5000 K color temperature. - * - ``V4L2_WHITE_BALANCE_FLUORESCENT_H`` - - With this setting the camera will compensate for fluorescent H - lighting. - * - ``V4L2_WHITE_BALANCE_HORIZON`` - - White balance setting for horizon daylight. It corresponds - approximately to 5000 K color temperature. - * - ``V4L2_WHITE_BALANCE_DAYLIGHT`` - - White balance preset for daylight (with clear sky). It corresponds - approximately to 5000...6500 K color temperature. - * - ``V4L2_WHITE_BALANCE_FLASH`` - - With this setting the camera will compensate for the flash light. - It slightly warms up the colors and corresponds roughly to - 5000...5500 K color temperature. - * - ``V4L2_WHITE_BALANCE_CLOUDY`` - - White balance preset for moderately overcast sky. This option - corresponds approximately to 6500...8000 K color temperature - range. - * - ``V4L2_WHITE_BALANCE_SHADE`` - - White balance preset for shade or heavily overcast sky. It - corresponds approximately to 9000...10000 K color temperature. - - - -.. _v4l2-wide-dynamic-range: - -``V4L2_CID_WIDE_DYNAMIC_RANGE (boolean)`` - Enables or disables the camera's wide dynamic range feature. This - feature allows to obtain clear images in situations where intensity - of the illumination varies significantly throughout the scene, i.e. - there are simultaneously very dark and very bright areas. It is most - commonly realized in cameras by combining two subsequent frames with - different exposure times. [#f1]_ - -.. _v4l2-image-stabilization: - -``V4L2_CID_IMAGE_STABILIZATION (boolean)`` - Enables or disables image stabilization. - -``V4L2_CID_ISO_SENSITIVITY (integer menu)`` - Determines ISO equivalent of an image sensor indicating the sensor's - sensitivity to light. The numbers are expressed in arithmetic scale, - as per :ref:`iso12232` standard, where doubling the sensor - sensitivity is represented by doubling the numerical ISO value. - Applications should interpret the values as standard ISO values - multiplied by 1000, e.g. control value 800 stands for ISO 0.8. - Drivers will usually support only a subset of standard ISO values. - The effect of setting this control while the - ``V4L2_CID_ISO_SENSITIVITY_AUTO`` control is set to a value other - than ``V4L2_CID_ISO_SENSITIVITY_MANUAL`` is undefined, drivers - should ignore such requests. - -.. _v4l2-iso-sensitivity-auto-type: - -``V4L2_CID_ISO_SENSITIVITY_AUTO`` - (enum) - -enum v4l2_iso_sensitivity_type - - Enables or disables automatic ISO sensitivity adjustments. - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_CID_ISO_SENSITIVITY_MANUAL`` - - Manual ISO sensitivity. - * - ``V4L2_CID_ISO_SENSITIVITY_AUTO`` - - Automatic ISO sensitivity adjustments. - - - -.. _v4l2-scene-mode: - -``V4L2_CID_SCENE_MODE`` - (enum) - -enum v4l2_scene_mode - - This control allows to select scene programs as the camera automatic - modes optimized for common shooting scenes. Within these modes the - camera determines best exposure, aperture, focusing, light metering, - white balance and equivalent sensitivity. The controls of those - parameters are influenced by the scene mode control. An exact - behavior in each mode is subject to the camera specification. - - When the scene mode feature is not used, this control should be set - to ``V4L2_SCENE_MODE_NONE`` to make sure the other possibly related - controls are accessible. The following scene programs are defined: - -.. tabularcolumns:: |p{6.0cm}|p{11.5cm}| - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_SCENE_MODE_NONE`` - - The scene mode feature is disabled. - * - ``V4L2_SCENE_MODE_BACKLIGHT`` - - Backlight. Compensates for dark shadows when light is coming from - behind a subject, also by automatically turning on the flash. - * - ``V4L2_SCENE_MODE_BEACH_SNOW`` - - Beach and snow. This mode compensates for all-white or bright - scenes, which tend to look gray and low contrast, when camera's - automatic exposure is based on an average scene brightness. To - compensate, this mode automatically slightly overexposes the - frames. The white balance may also be adjusted to compensate for - the fact that reflected snow looks bluish rather than white. - * - ``V4L2_SCENE_MODE_CANDLELIGHT`` - - Candle light. The camera generally raises the ISO sensitivity and - lowers the shutter speed. This mode compensates for relatively - close subject in the scene. The flash is disabled in order to - preserve the ambiance of the light. - * - ``V4L2_SCENE_MODE_DAWN_DUSK`` - - Dawn and dusk. Preserves the colors seen in low natural light - before dusk and after down. The camera may turn off the flash, and - automatically focus at infinity. It will usually boost saturation - and lower the shutter speed. - * - ``V4L2_SCENE_MODE_FALL_COLORS`` - - Fall colors. Increases saturation and adjusts white balance for - color enhancement. Pictures of autumn leaves get saturated reds - and yellows. - * - ``V4L2_SCENE_MODE_FIREWORKS`` - - Fireworks. Long exposure times are used to capture the expanding - burst of light from a firework. The camera may invoke image - stabilization. - * - ``V4L2_SCENE_MODE_LANDSCAPE`` - - Landscape. The camera may choose a small aperture to provide deep - depth of field and long exposure duration to help capture detail - in dim light conditions. The focus is fixed at infinity. Suitable - for distant and wide scenery. - * - ``V4L2_SCENE_MODE_NIGHT`` - - Night, also known as Night Landscape. Designed for low light - conditions, it preserves detail in the dark areas without blowing - out bright objects. The camera generally sets itself to a - medium-to-high ISO sensitivity, with a relatively long exposure - time, and turns flash off. As such, there will be increased image - noise and the possibility of blurred image. - * - ``V4L2_SCENE_MODE_PARTY_INDOOR`` - - Party and indoor. Designed to capture indoor scenes that are lit - by indoor background lighting as well as the flash. The camera - usually increases ISO sensitivity, and adjusts exposure for the - low light conditions. - * - ``V4L2_SCENE_MODE_PORTRAIT`` - - Portrait. The camera adjusts the aperture so that the depth of - field is reduced, which helps to isolate the subject against a - smooth background. Most cameras recognize the presence of faces in - the scene and focus on them. The color hue is adjusted to enhance - skin tones. The intensity of the flash is often reduced. - * - ``V4L2_SCENE_MODE_SPORTS`` - - Sports. Significantly increases ISO and uses a fast shutter speed - to freeze motion of rapidly-moving subjects. Increased image noise - may be seen in this mode. - * - ``V4L2_SCENE_MODE_SUNSET`` - - Sunset. Preserves deep hues seen in sunsets and sunrises. It bumps - up the saturation. - * - ``V4L2_SCENE_MODE_TEXT`` - - Text. It applies extra contrast and sharpness, it is typically a - black-and-white mode optimized for readability. Automatic focus - may be switched to close-up mode and this setting may also involve - some lens-distortion correction. - - - -``V4L2_CID_3A_LOCK (bitmask)`` - This control locks or unlocks the automatic focus, exposure and - white balance. The automatic adjustments can be paused independently - by setting the corresponding lock bit to 1. The camera then retains - the settings until the lock bit is cleared. The following lock bits - are defined: - - When a given algorithm is not enabled, drivers should ignore - requests to lock it and should return no error. An example might be - an application setting bit ``V4L2_LOCK_WHITE_BALANCE`` when the - ``V4L2_CID_AUTO_WHITE_BALANCE`` control is set to ``FALSE``. The - value of this control may be changed by exposure, white balance or - focus controls. - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_LOCK_EXPOSURE`` - - Automatic exposure adjustments lock. - * - ``V4L2_LOCK_WHITE_BALANCE`` - - Automatic white balance adjustments lock. - * - ``V4L2_LOCK_FOCUS`` - - Automatic focus lock. - - - -``V4L2_CID_PAN_SPEED (integer)`` - This control turns the camera horizontally at the specific speed. - The unit is undefined. A positive value moves the camera to the - right (clockwise when viewed from above), a negative value to the - left. A value of zero stops the motion if one is in progress and has - no effect otherwise. - -``V4L2_CID_TILT_SPEED (integer)`` - This control turns the camera vertically at the specified speed. The - unit is undefined. A positive value moves the camera up, a negative - value down. A value of zero stops the motion if one is in progress - and has no effect otherwise. - - -.. _fm-tx-controls: - -FM Transmitter Control Reference -================================ - -The FM Transmitter (FM_TX) class includes controls for common features -of FM transmissions capable devices. Currently this class includes -parameters for audio compression, pilot tone generation, audio deviation -limiter, RDS transmission and tuning power features. - - -.. _fm-tx-control-id: - -FM_TX Control IDs ------------------ - -``V4L2_CID_FM_TX_CLASS (class)`` - The FM_TX class descriptor. Calling - :ref:`VIDIOC_QUERYCTRL` for this control will - return a description of this control class. - -``V4L2_CID_RDS_TX_DEVIATION (integer)`` - Configures RDS signal frequency deviation level in Hz. The range and - step are driver-specific. - -``V4L2_CID_RDS_TX_PI (integer)`` - Sets the RDS Programme Identification field for transmission. - -``V4L2_CID_RDS_TX_PTY (integer)`` - Sets the RDS Programme Type field for transmission. This encodes up - to 31 pre-defined programme types. - -``V4L2_CID_RDS_TX_PS_NAME (string)`` - Sets the Programme Service name (PS_NAME) for transmission. It is - intended for static display on a receiver. It is the primary aid to - listeners in programme service identification and selection. In - Annex E of :ref:`iec62106`, the RDS specification, there is a full - description of the correct character encoding for Programme Service - name strings. Also from RDS specification, PS is usually a single - eight character text. However, it is also possible to find receivers - which can scroll strings sized as 8 x N characters. So, this control - must be configured with steps of 8 characters. The result is it must - always contain a string with size multiple of 8. - -``V4L2_CID_RDS_TX_RADIO_TEXT (string)`` - Sets the Radio Text info for transmission. It is a textual - description of what is being broadcasted. RDS Radio Text can be - applied when broadcaster wishes to transmit longer PS names, - programme-related information or any other text. In these cases, - RadioText should be used in addition to ``V4L2_CID_RDS_TX_PS_NAME``. - The encoding for Radio Text strings is also fully described in Annex - E of :ref:`iec62106`. The length of Radio Text strings depends on - which RDS Block is being used to transmit it, either 32 (2A block) - or 64 (2B block). However, it is also possible to find receivers - which can scroll strings sized as 32 x N or 64 x N characters. So, - this control must be configured with steps of 32 or 64 characters. - The result is it must always contain a string with size multiple of - 32 or 64. - -``V4L2_CID_RDS_TX_MONO_STEREO (boolean)`` - Sets the Mono/Stereo bit of the Decoder Identification code. If set, - then the audio was recorded as stereo. - -``V4L2_CID_RDS_TX_ARTIFICIAL_HEAD (boolean)`` - Sets the - `Artificial Head `__ - bit of the Decoder Identification code. If set, then the audio was - recorded using an artificial head. - -``V4L2_CID_RDS_TX_COMPRESSED (boolean)`` - Sets the Compressed bit of the Decoder Identification code. If set, - then the audio is compressed. - -``V4L2_CID_RDS_TX_DYNAMIC_PTY (boolean)`` - Sets the Dynamic PTY bit of the Decoder Identification code. If set, - then the PTY code is dynamically switched. - -``V4L2_CID_RDS_TX_TRAFFIC_ANNOUNCEMENT (boolean)`` - If set, then a traffic announcement is in progress. - -``V4L2_CID_RDS_TX_TRAFFIC_PROGRAM (boolean)`` - If set, then the tuned programme carries traffic announcements. - -``V4L2_CID_RDS_TX_MUSIC_SPEECH (boolean)`` - If set, then this channel broadcasts music. If cleared, then it - broadcasts speech. If the transmitter doesn't make this distinction, - then it should be set. - -``V4L2_CID_RDS_TX_ALT_FREQS_ENABLE (boolean)`` - If set, then transmit alternate frequencies. - -``V4L2_CID_RDS_TX_ALT_FREQS (__u32 array)`` - The alternate frequencies in kHz units. The RDS standard allows for - up to 25 frequencies to be defined. Drivers may support fewer - frequencies so check the array size. - -``V4L2_CID_AUDIO_LIMITER_ENABLED (boolean)`` - Enables or disables the audio deviation limiter feature. The limiter - is useful when trying to maximize the audio volume, minimize - receiver-generated distortion and prevent overmodulation. - -``V4L2_CID_AUDIO_LIMITER_RELEASE_TIME (integer)`` - Sets the audio deviation limiter feature release time. Unit is in - useconds. Step and range are driver-specific. - -``V4L2_CID_AUDIO_LIMITER_DEVIATION (integer)`` - Configures audio frequency deviation level in Hz. The range and step - are driver-specific. - -``V4L2_CID_AUDIO_COMPRESSION_ENABLED (boolean)`` - Enables or disables the audio compression feature. This feature - amplifies signals below the threshold by a fixed gain and compresses - audio signals above the threshold by the ratio of Threshold/(Gain + - Threshold). - -``V4L2_CID_AUDIO_COMPRESSION_GAIN (integer)`` - Sets the gain for audio compression feature. It is a dB value. The - range and step are driver-specific. - -``V4L2_CID_AUDIO_COMPRESSION_THRESHOLD (integer)`` - Sets the threshold level for audio compression freature. It is a dB - value. The range and step are driver-specific. - -``V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME (integer)`` - Sets the attack time for audio compression feature. It is a useconds - value. The range and step are driver-specific. - -``V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME (integer)`` - Sets the release time for audio compression feature. It is a - useconds value. The range and step are driver-specific. - -``V4L2_CID_PILOT_TONE_ENABLED (boolean)`` - Enables or disables the pilot tone generation feature. - -``V4L2_CID_PILOT_TONE_DEVIATION (integer)`` - Configures pilot tone frequency deviation level. Unit is in Hz. The - range and step are driver-specific. - -``V4L2_CID_PILOT_TONE_FREQUENCY (integer)`` - Configures pilot tone frequency value. Unit is in Hz. The range and - step are driver-specific. - -``V4L2_CID_TUNE_PREEMPHASIS`` - (enum) - -enum v4l2_preemphasis - - Configures the pre-emphasis value for broadcasting. A pre-emphasis - filter is applied to the broadcast to accentuate the high audio - frequencies. Depending on the region, a time constant of either 50 - or 75 useconds is used. The enum v4l2_preemphasis defines possible - values for pre-emphasis. Here they are: - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_PREEMPHASIS_DISABLED`` - - No pre-emphasis is applied. - * - ``V4L2_PREEMPHASIS_50_uS`` - - A pre-emphasis of 50 uS is used. - * - ``V4L2_PREEMPHASIS_75_uS`` - - A pre-emphasis of 75 uS is used. - - - -``V4L2_CID_TUNE_POWER_LEVEL (integer)`` - Sets the output power level for signal transmission. Unit is in - dBuV. Range and step are driver-specific. - -``V4L2_CID_TUNE_ANTENNA_CAPACITOR (integer)`` - This selects the value of antenna tuning capacitor manually or - automatically if set to zero. Unit, range and step are - driver-specific. - -For more details about RDS specification, refer to :ref:`iec62106` -document, from CENELEC. - - -.. _flash-controls: - -Flash Control Reference -======================= - -The V4L2 flash controls are intended to provide generic access to flash -controller devices. Flash controller devices are typically used in -digital cameras. - -The interface can support both LED and xenon flash devices. As of -writing this, there is no xenon flash driver using this interface. - - -.. _flash-controls-use-cases: - -Supported use cases -------------------- - - -Unsynchronised LED flash (software strobe) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Unsynchronised LED flash is controlled directly by the host as the -sensor. The flash must be enabled by the host before the exposure of the -image starts and disabled once it ends. The host is fully responsible -for the timing of the flash. - -Example of such device: Nokia N900. - - -Synchronised LED flash (hardware strobe) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The synchronised LED flash is pre-programmed by the host (power and -timeout) but controlled by the sensor through a strobe signal from the -sensor to the flash. - -The sensor controls the flash duration and timing. This information -typically must be made available to the sensor. - - -LED flash as torch -^^^^^^^^^^^^^^^^^^ - -LED flash may be used as torch in conjunction with another use case -involving camera or individually. - - -.. _flash-control-id: - -Flash Control IDs -""""""""""""""""" - -``V4L2_CID_FLASH_CLASS (class)`` - The FLASH class descriptor. - -``V4L2_CID_FLASH_LED_MODE (menu)`` - Defines the mode of the flash LED, the high-power white LED attached - to the flash controller. Setting this control may not be possible in - presence of some faults. See V4L2_CID_FLASH_FAULT. - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_FLASH_LED_MODE_NONE`` - - Off. - * - ``V4L2_FLASH_LED_MODE_FLASH`` - - Flash mode. - * - ``V4L2_FLASH_LED_MODE_TORCH`` - - Torch mode. See V4L2_CID_FLASH_TORCH_INTENSITY. - - - -``V4L2_CID_FLASH_STROBE_SOURCE (menu)`` - Defines the source of the flash LED strobe. - -.. tabularcolumns:: |p{7.0cm}|p{10.5cm}| - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_FLASH_STROBE_SOURCE_SOFTWARE`` - - The flash strobe is triggered by using the - V4L2_CID_FLASH_STROBE control. - * - ``V4L2_FLASH_STROBE_SOURCE_EXTERNAL`` - - The flash strobe is triggered by an external source. Typically - this is a sensor, which makes it possible to synchronises the - flash strobe start to exposure start. - - - -``V4L2_CID_FLASH_STROBE (button)`` - Strobe flash. Valid when V4L2_CID_FLASH_LED_MODE is set to - V4L2_FLASH_LED_MODE_FLASH and V4L2_CID_FLASH_STROBE_SOURCE - is set to V4L2_FLASH_STROBE_SOURCE_SOFTWARE. Setting this - control may not be possible in presence of some faults. See - V4L2_CID_FLASH_FAULT. - -``V4L2_CID_FLASH_STROBE_STOP (button)`` - Stop flash strobe immediately. - -``V4L2_CID_FLASH_STROBE_STATUS (boolean)`` - Strobe status: whether the flash is strobing at the moment or not. - This is a read-only control. - -``V4L2_CID_FLASH_TIMEOUT (integer)`` - Hardware timeout for flash. The flash strobe is stopped after this - period of time has passed from the start of the strobe. - -``V4L2_CID_FLASH_INTENSITY (integer)`` - Intensity of the flash strobe when the flash LED is in flash mode - (V4L2_FLASH_LED_MODE_FLASH). The unit should be milliamps (mA) - if possible. - -``V4L2_CID_FLASH_TORCH_INTENSITY (integer)`` - Intensity of the flash LED in torch mode - (V4L2_FLASH_LED_MODE_TORCH). The unit should be milliamps (mA) - if possible. Setting this control may not be possible in presence of - some faults. See V4L2_CID_FLASH_FAULT. - -``V4L2_CID_FLASH_INDICATOR_INTENSITY (integer)`` - Intensity of the indicator LED. The indicator LED may be fully - independent of the flash LED. The unit should be microamps (uA) if - possible. - -``V4L2_CID_FLASH_FAULT (bitmask)`` - Faults related to the flash. The faults tell about specific problems - in the flash chip itself or the LEDs attached to it. Faults may - prevent further use of some of the flash controls. In particular, - V4L2_CID_FLASH_LED_MODE is set to V4L2_FLASH_LED_MODE_NONE - if the fault affects the flash LED. Exactly which faults have such - an effect is chip dependent. Reading the faults resets the control - and returns the chip to a usable state if possible. - -.. tabularcolumns:: |p{8.0cm}|p{9.5cm}| - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_FLASH_FAULT_OVER_VOLTAGE`` - - Flash controller voltage to the flash LED has exceeded the limit - specific to the flash controller. - * - ``V4L2_FLASH_FAULT_TIMEOUT`` - - The flash strobe was still on when the timeout set by the user --- - V4L2_CID_FLASH_TIMEOUT control --- has expired. Not all flash - controllers may set this in all such conditions. - * - ``V4L2_FLASH_FAULT_OVER_TEMPERATURE`` - - The flash controller has overheated. - * - ``V4L2_FLASH_FAULT_SHORT_CIRCUIT`` - - The short circuit protection of the flash controller has been - triggered. - * - ``V4L2_FLASH_FAULT_OVER_CURRENT`` - - Current in the LED power supply has exceeded the limit specific to - the flash controller. - * - ``V4L2_FLASH_FAULT_INDICATOR`` - - The flash controller has detected a short or open circuit - condition on the indicator LED. - * - ``V4L2_FLASH_FAULT_UNDER_VOLTAGE`` - - Flash controller voltage to the flash LED has been below the - minimum limit specific to the flash controller. - * - ``V4L2_FLASH_FAULT_INPUT_VOLTAGE`` - - The input voltage of the flash controller is below the limit under - which strobing the flash at full current will not be possible.The - condition persists until this flag is no longer set. - * - ``V4L2_FLASH_FAULT_LED_OVER_TEMPERATURE`` - - The temperature of the LED has exceeded its allowed upper limit. - - - -``V4L2_CID_FLASH_CHARGE (boolean)`` - Enable or disable charging of the xenon flash capacitor. - -``V4L2_CID_FLASH_READY (boolean)`` - Is the flash ready to strobe? Xenon flashes require their capacitors - charged before strobing. LED flashes often require a cooldown period - after strobe during which another strobe will not be possible. This - is a read-only control. - - -.. _jpeg-controls: - -JPEG Control Reference -====================== - -The JPEG class includes controls for common features of JPEG encoders -and decoders. Currently it includes features for codecs implementing -progressive baseline DCT compression process with Huffman entrophy -coding. - - -.. _jpeg-control-id: - -JPEG Control IDs ----------------- - -``V4L2_CID_JPEG_CLASS (class)`` - The JPEG class descriptor. Calling - :ref:`VIDIOC_QUERYCTRL` for this control will - return a description of this control class. - -``V4L2_CID_JPEG_CHROMA_SUBSAMPLING (menu)`` - The chroma subsampling factors describe how each component of an - input image is sampled, in respect to maximum sample rate in each - spatial dimension. See :ref:`itu-t81`, clause A.1.1. for more - details. The ``V4L2_CID_JPEG_CHROMA_SUBSAMPLING`` control determines - how Cb and Cr components are downsampled after converting an input - image from RGB to Y'CbCr color space. - -.. tabularcolumns:: |p{7.0cm}|p{10.5cm}| - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_JPEG_CHROMA_SUBSAMPLING_444`` - - No chroma subsampling, each pixel has Y, Cr and Cb values. - * - ``V4L2_JPEG_CHROMA_SUBSAMPLING_422`` - - Horizontally subsample Cr, Cb components by a factor of 2. - * - ``V4L2_JPEG_CHROMA_SUBSAMPLING_420`` - - Subsample Cr, Cb components horizontally and vertically by 2. - * - ``V4L2_JPEG_CHROMA_SUBSAMPLING_411`` - - Horizontally subsample Cr, Cb components by a factor of 4. - * - ``V4L2_JPEG_CHROMA_SUBSAMPLING_410`` - - Subsample Cr, Cb components horizontally by 4 and vertically by 2. - * - ``V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY`` - - Use only luminance component. - - - -``V4L2_CID_JPEG_RESTART_INTERVAL (integer)`` - The restart interval determines an interval of inserting RSTm - markers (m = 0..7). The purpose of these markers is to additionally - reinitialize the encoder process, in order to process blocks of an - image independently. For the lossy compression processes the restart - interval unit is MCU (Minimum Coded Unit) and its value is contained - in DRI (Define Restart Interval) marker. If - ``V4L2_CID_JPEG_RESTART_INTERVAL`` control is set to 0, DRI and RSTm - markers will not be inserted. - -.. _jpeg-quality-control: - -``V4L2_CID_JPEG_COMPRESSION_QUALITY (integer)`` - ``V4L2_CID_JPEG_COMPRESSION_QUALITY`` control determines trade-off - between image quality and size. It provides simpler method for - applications to control image quality, without a need for direct - reconfiguration of luminance and chrominance quantization tables. In - cases where a driver uses quantization tables configured directly by - an application, using interfaces defined elsewhere, - ``V4L2_CID_JPEG_COMPRESSION_QUALITY`` control should be set by - driver to 0. - - The value range of this control is driver-specific. Only positive, - non-zero values are meaningful. The recommended range is 1 - 100, - where larger values correspond to better image quality. - -.. _jpeg-active-marker-control: - -``V4L2_CID_JPEG_ACTIVE_MARKER (bitmask)`` - Specify which JPEG markers are included in compressed stream. This - control is valid only for encoders. - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_JPEG_ACTIVE_MARKER_APP0`` - - Application data segment APP\ :sub:`0`. - * - ``V4L2_JPEG_ACTIVE_MARKER_APP1`` - - Application data segment APP\ :sub:`1`. - * - ``V4L2_JPEG_ACTIVE_MARKER_COM`` - - Comment segment. - * - ``V4L2_JPEG_ACTIVE_MARKER_DQT`` - - Quantization tables segment. - * - ``V4L2_JPEG_ACTIVE_MARKER_DHT`` - - Huffman tables segment. - - - -For more details about JPEG specification, refer to :ref:`itu-t81`, -:ref:`jfif`, :ref:`w3c-jpeg-jfif`. - - -.. _image-source-controls: - -Image Source Control Reference -============================== - -The Image Source control class is intended for low-level control of -image source devices such as image sensors. The devices feature an -analogue to digital converter and a bus transmitter to transmit the -image data out of the device. - - -.. _image-source-control-id: - -Image Source Control IDs ------------------------- - -``V4L2_CID_IMAGE_SOURCE_CLASS (class)`` - The IMAGE_SOURCE class descriptor. - -``V4L2_CID_VBLANK (integer)`` - Vertical blanking. The idle period after every frame during which no - image data is produced. The unit of vertical blanking is a line. - Every line has length of the image width plus horizontal blanking at - the pixel rate defined by ``V4L2_CID_PIXEL_RATE`` control in the - same sub-device. - -``V4L2_CID_HBLANK (integer)`` - Horizontal blanking. The idle period after every line of image data - during which no image data is produced. The unit of horizontal - blanking is pixels. - -``V4L2_CID_ANALOGUE_GAIN (integer)`` - Analogue gain is gain affecting all colour components in the pixel - matrix. The gain operation is performed in the analogue domain - before A/D conversion. - -``V4L2_CID_TEST_PATTERN_RED (integer)`` - Test pattern red colour component. - -``V4L2_CID_TEST_PATTERN_GREENR (integer)`` - Test pattern green (next to red) colour component. - -``V4L2_CID_TEST_PATTERN_BLUE (integer)`` - Test pattern blue colour component. - -``V4L2_CID_TEST_PATTERN_GREENB (integer)`` - Test pattern green (next to blue) colour component. - - -.. _image-process-controls: - -Image Process Control Reference -=============================== - -The Image Process control class is intended for low-level control of -image processing functions. Unlike ``V4L2_CID_IMAGE_SOURCE_CLASS``, the -controls in this class affect processing the image, and do not control -capturing of it. - - -.. _image-process-control-id: - -Image Process Control IDs -------------------------- - -``V4L2_CID_IMAGE_PROC_CLASS (class)`` - The IMAGE_PROC class descriptor. - -``V4L2_CID_LINK_FREQ (integer menu)`` - Data bus frequency. Together with the media bus pixel code, bus type - (clock cycles per sample), the data bus frequency defines the pixel - rate (``V4L2_CID_PIXEL_RATE``) in the pixel array (or possibly - elsewhere, if the device is not an image sensor). The frame rate can - be calculated from the pixel clock, image width and height and - horizontal and vertical blanking. While the pixel rate control may - be defined elsewhere than in the subdev containing the pixel array, - the frame rate cannot be obtained from that information. This is - because only on the pixel array it can be assumed that the vertical - and horizontal blanking information is exact: no other blanking is - allowed in the pixel array. The selection of frame rate is performed - by selecting the desired horizontal and vertical blanking. The unit - of this control is Hz. - -``V4L2_CID_PIXEL_RATE (64-bit integer)`` - Pixel rate in the source pads of the subdev. This control is - read-only and its unit is pixels / second. - -``V4L2_CID_TEST_PATTERN (menu)`` - Some capture/display/sensor devices have the capability to generate - test pattern images. These hardware specific test patterns can be - used to test if a device is working properly. - -``V4L2_CID_DEINTERLACING_MODE (menu)`` - The video deinterlacing mode (such as Bob, Weave, ...). The menu items are - driver specific and are documented in :ref:`v4l-drivers`. - -``V4L2_CID_DIGITAL_GAIN (integer)`` - Digital gain is the value by which all colour components - are multiplied by. Typically the digital gain applied is the - control value divided by e.g. 0x100, meaning that to get no - digital gain the control value needs to be 0x100. The no-gain - configuration is also typically the default. - - -.. _dv-controls: - -Digital Video Control Reference -=============================== - -The Digital Video control class is intended to control receivers and -transmitters for `VGA `__, -`DVI `__ -(Digital Visual Interface), HDMI (:ref:`hdmi`) and DisplayPort -(:ref:`dp`). These controls are generally expected to be private to -the receiver or transmitter subdevice that implements them, so they are -only exposed on the ``/dev/v4l-subdev*`` device node. - -.. note:: - - Note that these devices can have multiple input or output pads which are - hooked up to e.g. HDMI connectors. Even though the subdevice will - receive or transmit video from/to only one of those pads, the other pads - can still be active when it comes to EDID (Extended Display - Identification Data, :ref:`vesaedid`) and HDCP (High-bandwidth Digital - Content Protection System, :ref:`hdcp`) processing, allowing the - device to do the fairly slow EDID/HDCP handling in advance. This allows - for quick switching between connectors. - -These pads appear in several of the controls in this section as -bitmasks, one bit for each pad. Bit 0 corresponds to pad 0, bit 1 to pad -1, etc. The maximum value of the control is the set of valid pads. - - -.. _dv-control-id: - -Digital Video Control IDs -------------------------- - -``V4L2_CID_DV_CLASS (class)`` - The Digital Video class descriptor. - -``V4L2_CID_DV_TX_HOTPLUG (bitmask)`` - Many connectors have a hotplug pin which is high if EDID information - is available from the source. This control shows the state of the - hotplug pin as seen by the transmitter. Each bit corresponds to an - output pad on the transmitter. If an output pad does not have an - associated hotplug pin, then the bit for that pad will be 0. This - read-only control is applicable to DVI-D, HDMI and DisplayPort - connectors. - -``V4L2_CID_DV_TX_RXSENSE (bitmask)`` - Rx Sense is the detection of pull-ups on the TMDS clock lines. This - normally means that the sink has left/entered standby (i.e. the - transmitter can sense that the receiver is ready to receive video). - Each bit corresponds to an output pad on the transmitter. If an - output pad does not have an associated Rx Sense, then the bit for - that pad will be 0. This read-only control is applicable to DVI-D - and HDMI devices. - -``V4L2_CID_DV_TX_EDID_PRESENT (bitmask)`` - When the transmitter sees the hotplug signal from the receiver it - will attempt to read the EDID. If set, then the transmitter has read - at least the first block (= 128 bytes). Each bit corresponds to an - output pad on the transmitter. If an output pad does not support - EDIDs, then the bit for that pad will be 0. This read-only control - is applicable to VGA, DVI-A/D, HDMI and DisplayPort connectors. - -``V4L2_CID_DV_TX_MODE`` - (enum) - -enum v4l2_dv_tx_mode - - HDMI transmitters can transmit in DVI-D mode (just video) or in HDMI - mode (video + audio + auxiliary data). This control selects which - mode to use: V4L2_DV_TX_MODE_DVI_D or V4L2_DV_TX_MODE_HDMI. - This control is applicable to HDMI connectors. - -``V4L2_CID_DV_TX_RGB_RANGE`` - (enum) - -enum v4l2_dv_rgb_range - - Select the quantization range for RGB output. V4L2_DV_RANGE_AUTO - follows the RGB quantization range specified in the standard for the - video interface (ie. :ref:`cea861` for HDMI). - V4L2_DV_RANGE_LIMITED and V4L2_DV_RANGE_FULL override the - standard to be compatible with sinks that have not implemented the - standard correctly (unfortunately quite common for HDMI and DVI-D). - Full range allows all possible values to be used whereas limited - range sets the range to (16 << (N-8)) - (235 << (N-8)) where N is - the number of bits per component. This control is applicable to VGA, - DVI-A/D, HDMI and DisplayPort connectors. - -``V4L2_CID_DV_TX_IT_CONTENT_TYPE`` - (enum) - -enum v4l2_dv_it_content_type - - Configures the IT Content Type of the transmitted video. This - information is sent over HDMI and DisplayPort connectors as part of - the AVI InfoFrame. The term 'IT Content' is used for content that - originates from a computer as opposed to content from a TV broadcast - or an analog source. The enum v4l2_dv_it_content_type defines - the possible content types: - -.. tabularcolumns:: |p{7.0cm}|p{10.5cm}| - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_DV_IT_CONTENT_TYPE_GRAPHICS`` - - Graphics content. Pixel data should be passed unfiltered and - without analog reconstruction. - * - ``V4L2_DV_IT_CONTENT_TYPE_PHOTO`` - - Photo content. The content is derived from digital still pictures. - The content should be passed through with minimal scaling and - picture enhancements. - * - ``V4L2_DV_IT_CONTENT_TYPE_CINEMA`` - - Cinema content. - * - ``V4L2_DV_IT_CONTENT_TYPE_GAME`` - - Game content. Audio and video latency should be minimized. - * - ``V4L2_DV_IT_CONTENT_TYPE_NO_ITC`` - - No IT Content information is available and the ITC bit in the AVI - InfoFrame is set to 0. - - - -``V4L2_CID_DV_RX_POWER_PRESENT (bitmask)`` - Detects whether the receiver receives power from the source (e.g. - HDMI carries 5V on one of the pins). This is often used to power an - eeprom which contains EDID information, such that the source can - read the EDID even if the sink is in standby/power off. Each bit - corresponds to an input pad on the receiver. If an input pad - cannot detect whether power is present, then the bit for that pad - will be 0. This read-only control is applicable to DVI-D, HDMI and - DisplayPort connectors. - -``V4L2_CID_DV_RX_RGB_RANGE`` - (enum) - -enum v4l2_dv_rgb_range - - Select the quantization range for RGB input. V4L2_DV_RANGE_AUTO - follows the RGB quantization range specified in the standard for the - video interface (ie. :ref:`cea861` for HDMI). - V4L2_DV_RANGE_LIMITED and V4L2_DV_RANGE_FULL override the - standard to be compatible with sources that have not implemented the - standard correctly (unfortunately quite common for HDMI and DVI-D). - Full range allows all possible values to be used whereas limited - range sets the range to (16 << (N-8)) - (235 << (N-8)) where N is - the number of bits per component. This control is applicable to VGA, - DVI-A/D, HDMI and DisplayPort connectors. - -``V4L2_CID_DV_RX_IT_CONTENT_TYPE`` - (enum) - -enum v4l2_dv_it_content_type - - Reads the IT Content Type of the received video. This information is - sent over HDMI and DisplayPort connectors as part of the AVI - InfoFrame. The term 'IT Content' is used for content that originates - from a computer as opposed to content from a TV broadcast or an - analog source. See ``V4L2_CID_DV_TX_IT_CONTENT_TYPE`` for the - available content types. - - -.. _fm-rx-controls: - -FM Receiver Control Reference -============================= - -The FM Receiver (FM_RX) class includes controls for common features of -FM Reception capable devices. - - -.. _fm-rx-control-id: - -FM_RX Control IDs ------------------ - -``V4L2_CID_FM_RX_CLASS (class)`` - The FM_RX class descriptor. Calling - :ref:`VIDIOC_QUERYCTRL` for this control will - return a description of this control class. - -``V4L2_CID_RDS_RECEPTION (boolean)`` - Enables/disables RDS reception by the radio tuner - -``V4L2_CID_RDS_RX_PTY (integer)`` - Gets RDS Programme Type field. This encodes up to 31 pre-defined - programme types. - -``V4L2_CID_RDS_RX_PS_NAME (string)`` - Gets the Programme Service name (PS_NAME). It is intended for - static display on a receiver. It is the primary aid to listeners in - programme service identification and selection. In Annex E of - :ref:`iec62106`, the RDS specification, there is a full - description of the correct character encoding for Programme Service - name strings. Also from RDS specification, PS is usually a single - eight character text. However, it is also possible to find receivers - which can scroll strings sized as 8 x N characters. So, this control - must be configured with steps of 8 characters. The result is it must - always contain a string with size multiple of 8. - -``V4L2_CID_RDS_RX_RADIO_TEXT (string)`` - Gets the Radio Text info. It is a textual description of what is - being broadcasted. RDS Radio Text can be applied when broadcaster - wishes to transmit longer PS names, programme-related information or - any other text. In these cases, RadioText can be used in addition to - ``V4L2_CID_RDS_RX_PS_NAME``. The encoding for Radio Text strings is - also fully described in Annex E of :ref:`iec62106`. The length of - Radio Text strings depends on which RDS Block is being used to - transmit it, either 32 (2A block) or 64 (2B block). However, it is - also possible to find receivers which can scroll strings sized as 32 - x N or 64 x N characters. So, this control must be configured with - steps of 32 or 64 characters. The result is it must always contain a - string with size multiple of 32 or 64. - -``V4L2_CID_RDS_RX_TRAFFIC_ANNOUNCEMENT (boolean)`` - If set, then a traffic announcement is in progress. - -``V4L2_CID_RDS_RX_TRAFFIC_PROGRAM (boolean)`` - If set, then the tuned programme carries traffic announcements. - -``V4L2_CID_RDS_RX_MUSIC_SPEECH (boolean)`` - If set, then this channel broadcasts music. If cleared, then it - broadcasts speech. If the transmitter doesn't make this distinction, - then it will be set. - -``V4L2_CID_TUNE_DEEMPHASIS`` - (enum) - -enum v4l2_deemphasis - - Configures the de-emphasis value for reception. A de-emphasis filter - is applied to the broadcast to accentuate the high audio - frequencies. Depending on the region, a time constant of either 50 - or 75 useconds is used. The enum v4l2_deemphasis defines possible - values for de-emphasis. Here they are: - - - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_DEEMPHASIS_DISABLED`` - - No de-emphasis is applied. - * - ``V4L2_DEEMPHASIS_50_uS`` - - A de-emphasis of 50 uS is used. - * - ``V4L2_DEEMPHASIS_75_uS`` - - A de-emphasis of 75 uS is used. - - - - -.. _detect-controls: - -Detect Control Reference -======================== - -The Detect class includes controls for common features of various motion -or object detection capable devices. - - -.. _detect-control-id: - -Detect Control IDs ------------------- - -``V4L2_CID_DETECT_CLASS (class)`` - The Detect class descriptor. Calling - :ref:`VIDIOC_QUERYCTRL` for this control will - return a description of this control class. - -``V4L2_CID_DETECT_MD_MODE (menu)`` - Sets the motion detection mode. - -.. tabularcolumns:: |p{7.5cm}|p{10.0cm}| - -.. flat-table:: - :header-rows: 0 - :stub-columns: 0 - - * - ``V4L2_DETECT_MD_MODE_DISABLED`` - - Disable motion detection. - * - ``V4L2_DETECT_MD_MODE_GLOBAL`` - - Use a single motion detection threshold. - * - ``V4L2_DETECT_MD_MODE_THRESHOLD_GRID`` - - The image is divided into a grid, each cell with its own motion - detection threshold. These thresholds are set through the - ``V4L2_CID_DETECT_MD_THRESHOLD_GRID`` matrix control. - * - ``V4L2_DETECT_MD_MODE_REGION_GRID`` - - The image is divided into a grid, each cell with its own region - value that specifies which per-region motion detection thresholds - should be used. Each region has its own thresholds. How these - per-region thresholds are set up is driver-specific. The region - values for the grid are set through the - ``V4L2_CID_DETECT_MD_REGION_GRID`` matrix control. - - - -``V4L2_CID_DETECT_MD_GLOBAL_THRESHOLD (integer)`` - Sets the global motion detection threshold to be used with the - ``V4L2_DETECT_MD_MODE_GLOBAL`` motion detection mode. - -``V4L2_CID_DETECT_MD_THRESHOLD_GRID (__u16 matrix)`` - Sets the motion detection thresholds for each cell in the grid. To - be used with the ``V4L2_DETECT_MD_MODE_THRESHOLD_GRID`` motion - detection mode. Matrix element (0, 0) represents the cell at the - top-left of the grid. - -``V4L2_CID_DETECT_MD_REGION_GRID (__u8 matrix)`` - Sets the motion detection region value for each cell in the grid. To - be used with the ``V4L2_DETECT_MD_MODE_REGION_GRID`` motion - detection mode. Matrix element (0, 0) represents the cell at the - top-left of the grid. - - -.. _rf-tuner-controls: - -RF Tuner Control Reference -========================== - -The RF Tuner (RF_TUNER) class includes controls for common features of -devices having RF tuner. - -In this context, RF tuner is radio receiver circuit between antenna and -demodulator. It receives radio frequency (RF) from the antenna and -converts that received signal to lower intermediate frequency (IF) or -baseband frequency (BB). Tuners that could do baseband output are often -called Zero-IF tuners. Older tuners were typically simple PLL tuners -inside a metal box, while newer ones are highly integrated chips -without a metal box "silicon tuners". These controls are mostly -applicable for new feature rich silicon tuners, just because older -tuners does not have much adjustable features. - -For more information about RF tuners see -`Tuner (radio) `__ -and `RF front end `__ -from Wikipedia. - - -.. _rf-tuner-control-id: - -RF_TUNER Control IDs --------------------- - -``V4L2_CID_RF_TUNER_CLASS (class)`` - The RF_TUNER class descriptor. Calling - :ref:`VIDIOC_QUERYCTRL` for this control will - return a description of this control class. - -``V4L2_CID_RF_TUNER_BANDWIDTH_AUTO (boolean)`` - Enables/disables tuner radio channel bandwidth configuration. In - automatic mode bandwidth configuration is performed by the driver. - -``V4L2_CID_RF_TUNER_BANDWIDTH (integer)`` - Filter(s) on tuner signal path are used to filter signal according - to receiving party needs. Driver configures filters to fulfill - desired bandwidth requirement. Used when - V4L2_CID_RF_TUNER_BANDWIDTH_AUTO is not set. Unit is in Hz. The - range and step are driver-specific. - -``V4L2_CID_RF_TUNER_LNA_GAIN_AUTO (boolean)`` - Enables/disables LNA automatic gain control (AGC) - -``V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO (boolean)`` - Enables/disables mixer automatic gain control (AGC) - -``V4L2_CID_RF_TUNER_IF_GAIN_AUTO (boolean)`` - Enables/disables IF automatic gain control (AGC) - -``V4L2_CID_RF_TUNER_RF_GAIN (integer)`` - The RF amplifier is the very first amplifier on the receiver signal - path, just right after the antenna input. The difference between the - LNA gain and the RF gain in this document is that the LNA gain is - integrated in the tuner chip while the RF gain is a separate chip. - There may be both RF and LNA gain controls in the same device. The - range and step are driver-specific. - -``V4L2_CID_RF_TUNER_LNA_GAIN (integer)`` - LNA (low noise amplifier) gain is first gain stage on the RF tuner - signal path. It is located very close to tuner antenna input. Used - when ``V4L2_CID_RF_TUNER_LNA_GAIN_AUTO`` is not set. See - ``V4L2_CID_RF_TUNER_RF_GAIN`` to understand how RF gain and LNA gain - differs from the each others. The range and step are - driver-specific. - -``V4L2_CID_RF_TUNER_MIXER_GAIN (integer)`` - Mixer gain is second gain stage on the RF tuner signal path. It is - located inside mixer block, where RF signal is down-converted by the - mixer. Used when ``V4L2_CID_RF_TUNER_MIXER_GAIN_AUTO`` is not set. - The range and step are driver-specific. - -``V4L2_CID_RF_TUNER_IF_GAIN (integer)`` - IF gain is last gain stage on the RF tuner signal path. It is - located on output of RF tuner. It controls signal level of - intermediate frequency output or baseband output. Used when - ``V4L2_CID_RF_TUNER_IF_GAIN_AUTO`` is not set. The range and step - are driver-specific. - -``V4L2_CID_RF_TUNER_PLL_LOCK (boolean)`` - Is synthesizer PLL locked? RF tuner is receiving given frequency - when that control is set. This is a read-only control. - -.. [#f1] - This control may be changed to a menu control in the future, if more - options are required. diff --git a/Documentation/media/uapi/v4l/meta-formats.rst b/Documentation/media/uapi/v4l/meta-formats.rst index 5f956fa784b7..b10ca9ee3968 100644 --- a/Documentation/media/uapi/v4l/meta-formats.rst +++ b/Documentation/media/uapi/v4l/meta-formats.rst @@ -19,8 +19,8 @@ These formats are used for the :ref:`metadata` interface only. .. toctree:: :maxdepth: 1 - pixfmt-meta-intel-ipu3 pixfmt-meta-d4xx + pixfmt-meta-intel-ipu3 pixfmt-meta-uvc pixfmt-meta-vsp1-hgo pixfmt-meta-vsp1-hgt diff --git a/Documentation/media/uapi/v4l/pixfmt-compressed.rst b/Documentation/media/uapi/v4l/pixfmt-compressed.rst index e4c5e456df59..2675bef3eefe 100644 --- a/Documentation/media/uapi/v4l/pixfmt-compressed.rst +++ b/Documentation/media/uapi/v4l/pixfmt-compressed.rst @@ -73,7 +73,7 @@ Compressed Formats - 'MG2S' - MPEG-2 parsed slice data, as extracted from the MPEG-2 bitstream. This format is adapted for stateless video decoders that implement a - MPEG-2 pipeline (using the :ref:`codec` and :ref:`media-request-api`). + MPEG-2 pipeline (using the :ref:`mem2mem` and :ref:`media-request-api`). Metadata associated with the frame to decode is required to be passed through the ``V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS`` control and quantization matrices can optionally be specified through the diff --git a/Documentation/media/uapi/v4l/pixfmt-meta-intel-ipu3.rst b/Documentation/media/uapi/v4l/pixfmt-meta-intel-ipu3.rst index dc871006b41a..7fb54339f4a7 100644 --- a/Documentation/media/uapi/v4l/pixfmt-meta-intel-ipu3.rst +++ b/Documentation/media/uapi/v4l/pixfmt-meta-intel-ipu3.rst @@ -1,4 +1,27 @@ -.. -*- coding: utf-8; mode: rst -*- +.. This file is dual-licensed: you can use it either under the terms +.. of the GPL 2.0 or the GFDL 1.1+ license, at your option. Note that this +.. dual licensing only applies to this file, and not this project as a +.. whole. +.. +.. a) This file is free software; you can redistribute it and/or +.. modify it under the terms of the GNU General Public License version +.. 2.0 as published by the Free Software Foundation. +.. +.. This file is distributed in the hope that it will be useful, +.. but WITHOUT ANY WARRANTY; without even the implied warranty of +.. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +.. GNU General Public License version 2.0 for more details. +.. +.. Or, alternatively, +.. +.. b) Permission is granted to copy, distribute and/or modify this +.. document under the terms of the GNU Free Documentation License, +.. Version 1.1 or any later version published by the Free Software +.. Foundation, with no Invariant Sections, no Front-Cover Texts +.. and no Back-Cover Texts. A copy of the license is included at +.. Documentation/media/uapi/fdl-appendix.rst. +.. +.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections .. _v4l2-meta-fmt-params: .. _v4l2-meta-fmt-stat-3a: @@ -7,21 +30,22 @@ V4L2_META_FMT_IPU3_PARAMS ('ip3p'), V4L2_META_FMT_IPU3_3A ('ip3s') ****************************************************************** -.. c:type:: ipu3_uapi_stats_3a +.. ipu3_uapi_stats_3a 3A statistics ============= -For IPU3 ImgU, the 3A statistics accelerators collect different statistics over -an input bayer frame. Those statistics, defined in data struct :c:type:`ipu3_uapi_stats_3a`, -are obtained from "ipu3-imgu 3a stat" metadata capture video node, which are then -passed to user space for statistics analysis using :c:type:`v4l2_meta_format` interface. +The IPU3 ImgU 3A statistics accelerators collect different statistics over +an input Bayer frame. Those statistics are obtained from the "ipu3-imgu [01] 3a +stat" metadata capture video nodes, using the :c:type:`v4l2_meta_format` +interface. They are formatted as described by the :c:type:`ipu3_uapi_stats_3a` +structure. The statistics collected are AWB (Auto-white balance) RGBS (Red, Green, Blue and Saturation measure) cells, AWB filter response, AF (Auto-focus) filter response, and AE (Auto-exposure) histogram. -struct :c:type:`ipu3_uapi_4a_config` saves configurable parameters for all above. +The struct :c:type:`ipu3_uapi_4a_config` saves all configurable parameters. .. code-block:: c @@ -37,105 +61,14 @@ struct :c:type:`ipu3_uapi_4a_config` saves configurable parameters for all above struct ipu3_uapi_ff_status stats_3a_status; }; -.. c:type:: ipu3_uapi_params +.. ipu3_uapi_params Pipeline parameters =================== -IPU3 pipeline has a number of image processing stages, each of which takes a -set of parameters as input. The major stages of pipelines are shown here: - -Raw pixels -> Bayer Downscaling -> Optical Black Correction -> - -Linearization -> Lens Shading Correction -> White Balance / Exposure / - -Focus Apply -> Bayer Noise Reduction -> ANR -> Demosaicing -> Color - -Correction Matrix -> Gamma correction -> Color Space Conversion -> - -Chroma Down Scaling -> Chromatic Noise Reduction -> Total Color - -Correction -> XNR3 -> TNR -> DDR - -The table below presents a description of the above algorithms. - -======================== ======================================================= -Name Description -======================== ======================================================= -Optical Black Correction Optical Black Correction block subtracts a pre-defined - value from the respective pixel values to obtain better - image quality. - Defined in :c:type:`ipu3_uapi_obgrid_param`. -Linearization This algo block uses linearization parameters to - address non-linearity sensor effects. The Lookup table - table is defined in - :c:type:`ipu3_uapi_isp_lin_vmem_params`. -SHD Lens shading correction is used to correct spatial - non-uniformity of the pixel response due to optical - lens shading. This is done by applying a different gain - for each pixel. The gain, black level etc are - configured in :c:type:`ipu3_uapi_shd_config_static`. -BNR Bayer noise reduction block removes image noise by - applying a bilateral filter. - See :c:type:`ipu3_uapi_bnr_static_config` for details. -ANR Advanced Noise Reduction is a block based algorithm - that performs noise reduction in the Bayer domain. The - convolution matrix etc can be found in - :c:type:`ipu3_uapi_anr_config`. -Demosaicing Demosaicing converts raw sensor data in Bayer format - into RGB (Red, Green, Blue) presentation. Then add - outputs of estimation of Y channel for following stream - processing by Firmware. The struct is defined as - :c:type:`ipu3_uapi_dm_config`. (TODO) -Color Correction Color Correction algo transforms sensor specific color - space to the standard "sRGB" color space. This is done - by applying 3x3 matrix defined in - :c:type:`ipu3_uapi_ccm_mat_config`. -Gamma correction Gamma correction :c:type:`ipu3_uapi_gamma_config` is a - basic non-linear tone mapping correction that is - applied per pixel for each pixel component. -CSC Color space conversion transforms each pixel from the - RGB primary presentation to YUV (Y: brightness, - UV: Luminance) presentation. This is done by applying - a 3x3 matrix defined in - :c:type:`ipu3_uapi_csc_mat_config` -CDS Chroma down sampling - After the CSC is performed, the Chroma Down Sampling - is applied for a UV plane down sampling by a factor - of 2 in each direction for YUV 4:2:0 using a 4x2 - configurable filter :c:type:`ipu3_uapi_cds_params`. -CHNR Chroma noise reduction - This block processes only the chrominance pixels and - performs noise reduction by cleaning the high - frequency noise. - See struct :c:type:`ipu3_uapi_yuvp1_chnr_config`. -TCC Total color correction as defined in struct - :c:type:`ipu3_uapi_yuvp2_tcc_static_config`. -XNR3 eXtreme Noise Reduction V3 is the third revision of - noise reduction algorithm used to improve image - quality. This removes the low frequency noise in the - captured image. Two related structs are being defined, - :c:type:`ipu3_uapi_isp_xnr3_params` for ISP data memory - and :c:type:`ipu3_uapi_isp_xnr3_vmem_params` for vector - memory. -TNR Temporal Noise Reduction block compares successive - frames in time to remove anomalies / noise in pixel - values. :c:type:`ipu3_uapi_isp_tnr3_vmem_params` and - :c:type:`ipu3_uapi_isp_tnr3_params` are defined for ISP - vector and data memory respectively. -======================== ======================================================= - -A few stages of the pipeline will be executed by firmware running on the ISP -processor, while many others will use a set of fixed hardware blocks also -called accelerator cluster (ACC) to crunch pixel data and produce statistics. - -ACC parameters of individual algorithms, as defined by -:c:type:`ipu3_uapi_acc_param`, can be chosen to be applied by the user -space through struct :c:type:`ipu3_uapi_flags` embedded in -:c:type:`ipu3_uapi_params` structure. For parameters that are configured as -not enabled by the user space, the corresponding structs are ignored by the -driver, in which case the existing configuration of the algorithm will be -preserved. +The pipeline parameters are passed to the "ipu3-imgu [01] parameters" metadata +output video nodes, using the :c:type:`v4l2_meta_format` interface. They are +formatted as described by the :c:type:`ipu3_uapi_params` structure. Both 3A statistics and pipeline parameters described here are closely tied to the underlying camera sub-system (CSS) APIs. They are usually consumed and @@ -143,13 +76,6 @@ produced by dedicated user space libraries that comprise the important tuning tools, thus freeing the developers from being bothered with the low level hardware and algorithm details. -It should be noted that IPU3 DMA operations require the addresses of all data -structures (that includes both input and output) to be aligned on 32 byte -boundaries. - -The meta data :c:type:`ipu3_uapi_params` will be sent to "ipu3-imgu parameters" -video node in ``V4L2_BUF_TYPE_META_CAPTURE`` format. - .. code-block:: c struct ipu3_uapi_params { diff --git a/Documentation/media/uapi/v4l/pixfmt-packed-yuv.rst b/Documentation/media/uapi/v4l/pixfmt-packed-yuv.rst index f53e8f57a003..7fcee1c11ac4 100644 --- a/Documentation/media/uapi/v4l/pixfmt-packed-yuv.rst +++ b/Documentation/media/uapi/v4l/pixfmt-packed-yuv.rst @@ -190,6 +190,170 @@ component of each pixel in one 16 or 32 bit word. - Cr\ :sub:`2` - Cr\ :sub:`1` - Cr\ :sub:`0` + - + * .. _V4L2-PIX-FMT-AYUV32: + + - ``V4L2_PIX_FMT_AYUV32`` + - 'AYUV' + + - a\ :sub:`7` + - a\ :sub:`6` + - a\ :sub:`5` + - a\ :sub:`4` + - a\ :sub:`3` + - a\ :sub:`2` + - a\ :sub:`1` + - a\ :sub:`0` + + - Y'\ :sub:`7` + - Y'\ :sub:`6` + - Y'\ :sub:`5` + - Y'\ :sub:`4` + - Y'\ :sub:`3` + - Y'\ :sub:`2` + - Y'\ :sub:`1` + - Y'\ :sub:`0` + + - Cb\ :sub:`7` + - Cb\ :sub:`6` + - Cb\ :sub:`5` + - Cb\ :sub:`4` + - Cb\ :sub:`3` + - Cb\ :sub:`2` + - Cb\ :sub:`1` + - Cb\ :sub:`0` + + - Cr\ :sub:`7` + - Cr\ :sub:`6` + - Cr\ :sub:`5` + - Cr\ :sub:`4` + - Cr\ :sub:`3` + - Cr\ :sub:`2` + - Cr\ :sub:`1` + - Cr\ :sub:`0` + - + * .. _V4L2-PIX-FMT-XYUV32: + + - ``V4L2_PIX_FMT_XYUV32`` + - 'XYUV' + + - + - + - + - + - + - + - + - + + - Y'\ :sub:`7` + - Y'\ :sub:`6` + - Y'\ :sub:`5` + - Y'\ :sub:`4` + - Y'\ :sub:`3` + - Y'\ :sub:`2` + - Y'\ :sub:`1` + - Y'\ :sub:`0` + + - Cb\ :sub:`7` + - Cb\ :sub:`6` + - Cb\ :sub:`5` + - Cb\ :sub:`4` + - Cb\ :sub:`3` + - Cb\ :sub:`2` + - Cb\ :sub:`1` + - Cb\ :sub:`0` + + - Cr\ :sub:`7` + - Cr\ :sub:`6` + - Cr\ :sub:`5` + - Cr\ :sub:`4` + - Cr\ :sub:`3` + - Cr\ :sub:`2` + - Cr\ :sub:`1` + - Cr\ :sub:`0` + - + * .. _V4L2-PIX-FMT-VUYA32: + + - ``V4L2_PIX_FMT_VUYA32`` + - 'VUYA' + + - Cr\ :sub:`7` + - Cr\ :sub:`6` + - Cr\ :sub:`5` + - Cr\ :sub:`4` + - Cr\ :sub:`3` + - Cr\ :sub:`2` + - Cr\ :sub:`1` + - Cr\ :sub:`0` + + - Cb\ :sub:`7` + - Cb\ :sub:`6` + - Cb\ :sub:`5` + - Cb\ :sub:`4` + - Cb\ :sub:`3` + - Cb\ :sub:`2` + - Cb\ :sub:`1` + - Cb\ :sub:`0` + + - Y'\ :sub:`7` + - Y'\ :sub:`6` + - Y'\ :sub:`5` + - Y'\ :sub:`4` + - Y'\ :sub:`3` + - Y'\ :sub:`2` + - Y'\ :sub:`1` + - Y'\ :sub:`0` + + - a\ :sub:`7` + - a\ :sub:`6` + - a\ :sub:`5` + - a\ :sub:`4` + - a\ :sub:`3` + - a\ :sub:`2` + - a\ :sub:`1` + - a\ :sub:`0` + - + * .. _V4L2-PIX-FMT-VUYX32: + + - ``V4L2_PIX_FMT_VUYX32`` + - 'VUYX' + + - Cr\ :sub:`7` + - Cr\ :sub:`6` + - Cr\ :sub:`5` + - Cr\ :sub:`4` + - Cr\ :sub:`3` + - Cr\ :sub:`2` + - Cr\ :sub:`1` + - Cr\ :sub:`0` + + - Cb\ :sub:`7` + - Cb\ :sub:`6` + - Cb\ :sub:`5` + - Cb\ :sub:`4` + - Cb\ :sub:`3` + - Cb\ :sub:`2` + - Cb\ :sub:`1` + - Cb\ :sub:`0` + + - Y'\ :sub:`7` + - Y'\ :sub:`6` + - Y'\ :sub:`5` + - Y'\ :sub:`4` + - Y'\ :sub:`3` + - Y'\ :sub:`2` + - Y'\ :sub:`1` + - Y'\ :sub:`0` + + - + - + - + - + - + - + - + - .. raw:: latex @@ -202,4 +366,8 @@ component of each pixel in one 16 or 32 bit word. #) The value of a = alpha bits is undefined when reading from the driver, ignored when writing to the driver, except when alpha blending has been negotiated for a :ref:`Video Overlay ` or - :ref:`Video Output Overlay `. + :ref:`Video Output Overlay ` for the formats Y444, YUV555 and + YUV4. However, for formats AYUV32 and VUYA32, the alpha component is + expected to contain a meaningful value that can be used by drivers + and applications. And, the formats XYUV32 and VUYX32 contain undefined + alpha values that must be ignored by all applications and drivers. diff --git a/Documentation/media/uapi/v4l/subdev-formats.rst b/Documentation/media/uapi/v4l/subdev-formats.rst index ff4b2a972fd2..f5440d55d510 100644 --- a/Documentation/media/uapi/v4l/subdev-formats.rst +++ b/Documentation/media/uapi/v4l/subdev-formats.rst @@ -75,15 +75,15 @@ Media Bus Pixel Codes --------------------- The media bus pixel codes describe image formats as flowing over -physical busses (both between separate physical components and inside +physical buses (both between separate physical components and inside SoC devices). This should not be confused with the V4L2 pixel formats that describe, using four character codes, image formats as stored in memory. -While there is a relationship between image formats on busses and image +While there is a relationship between image formats on buses and image formats in memory (a raw Bayer image won't be magically converted to JPEG just by storing it to memory), there is no one-to-one -correspondance between them. +correspondence between them. Packed RGB Formats diff --git a/Documentation/media/uapi/v4l/vidioc-g-parm.rst b/Documentation/media/uapi/v4l/vidioc-g-parm.rst index 0d2593176c90..d9d5d97848d3 100644 --- a/Documentation/media/uapi/v4l/vidioc-g-parm.rst +++ b/Documentation/media/uapi/v4l/vidioc-g-parm.rst @@ -213,7 +213,7 @@ union holding separate parameters for input and output devices. .. _parm-caps: -.. flat-table:: Streaming Parameters Capabilites +.. flat-table:: Streaming Parameters Capabilities :header-rows: 0 :stub-columns: 0 :widths: 3 1 4 diff --git a/Documentation/media/uapi/v4l/vidioc-prepare-buf.rst b/Documentation/media/uapi/v4l/vidioc-prepare-buf.rst index 60986710967b..7c6b5f4e1011 100644 --- a/Documentation/media/uapi/v4l/vidioc-prepare-buf.rst +++ b/Documentation/media/uapi/v4l/vidioc-prepare-buf.rst @@ -43,10 +43,7 @@ Applications can optionally call the :ref:`VIDIOC_PREPARE_BUF` ioctl to pass ownership of the buffer to the driver before actually enqueuing it, using the :ref:`VIDIOC_QBUF ` ioctl, and to prepare it for future I/O. Such preparations may include cache invalidation or cleaning. Performing them -in advance saves time during the actual I/O. In case such cache -operations are not required, the application can use one of -``V4L2_BUF_FLAG_NO_CACHE_INVALIDATE`` and -``V4L2_BUF_FLAG_NO_CACHE_CLEAN`` flags to skip the respective step. +in advance saves time during the actual I/O. The struct :c:type:`v4l2_buffer` structure is specified in :ref:`buffer`. diff --git a/Documentation/media/uapi/v4l/vidioc-qbuf.rst b/Documentation/media/uapi/v4l/vidioc-qbuf.rst index 3259168a7358..c138d149faea 100644 --- a/Documentation/media/uapi/v4l/vidioc-qbuf.rst +++ b/Documentation/media/uapi/v4l/vidioc-qbuf.rst @@ -123,7 +123,7 @@ then ``EINVAL`` will be returned. :ref:`VIDIOC_STREAMOFF ` or calling :ref:`VIDIOC_REQBUFS` the check for this will be reset. - For :ref:`memory-to-memory devices ` you can specify the + For :ref:`memory-to-memory devices ` you can specify the ``request_fd`` only for output buffers, not for capture buffers. Attempting to specify this for a capture buffer will result in an ``EACCES`` error. diff --git a/Documentation/media/v4l-drivers/bttv.rst b/Documentation/media/v4l-drivers/bttv.rst index d72a0f8fd267..f956ee264099 100644 --- a/Documentation/media/v4l-drivers/bttv.rst +++ b/Documentation/media/v4l-drivers/bttv.rst @@ -85,7 +85,7 @@ same card listens there is much higher... For problems with sound: There are a lot of different systems used for TV sound all over the world. And there are also different chips which decode the audio signal. Reports about sound problems ("stereo -does'nt work") are pretty useless unless you include some details +doesn't work") are pretty useless unless you include some details about your hardware and the TV sound scheme used in your country (or at least the country you are living in). @@ -771,7 +771,7 @@ Identifying: - Lifeview.com.tw states (Feb. 2002): "The FlyVideo2000 and FlyVideo2000s product name have renamed to FlyVideo98." Their Bt8x8 cards are listed as discontinued. - - Flyvideo 2000S was probably sold as Flyvideo 3000 in some contries(Europe?). + - Flyvideo 2000S was probably sold as Flyvideo 3000 in some countries(Europe?). The new Flyvideo 2000/3000 are SAA7130/SAA7134 based. "Flyvideo II" had been the name for the 848 cards, nowadays (in Germany) diff --git a/Documentation/media/v4l-drivers/imx.rst b/Documentation/media/v4l-drivers/imx.rst index 6922dde4a82b..1d7eb8c7bd5c 100644 --- a/Documentation/media/v4l-drivers/imx.rst +++ b/Documentation/media/v4l-drivers/imx.rst @@ -24,12 +24,12 @@ memory. Various dedicated DMA channels exist for both video capture and display paths. During transfer, the IDMAC is also capable of vertical image flip, 8x8 block transfer (see IRT description), pixel component re-ordering (for example UYVY to YUYV) within the same colorspace, and -even packed <--> planar conversion. It can also perform a simple -de-interlacing by interleaving even and odd lines during transfer +packed <--> planar conversion. The IDMAC can also perform a simple +de-interlacing by interweaving even and odd lines during transfer (without motion compensation which requires the VDIC). The CSI is the backend capture unit that interfaces directly with -camera sensors over Parallel, BT.656/1120, and MIPI CSI-2 busses. +camera sensors over Parallel, BT.656/1120, and MIPI CSI-2 buses. The IC handles color-space conversion, resizing (downscaling and upscaling), horizontal flip, and 90/270 degree rotation operations. @@ -175,15 +175,21 @@ via the SMFC and an IDMAC channel, bypassing IC pre-processing. This source pad is routed to a capture device node, with a node name of the format "ipuX_csiY capture". -Note that since the IDMAC source pad makes use of an IDMAC channel, it -can do pixel reordering within the same colorspace. For example, the -sink pad can take UYVY2X8, but the IDMAC source pad can output YUYV2X8. -If the sink pad is receiving YUV, the output at the capture device can -also be converted to a planar YUV format such as YUV420. - -It will also perform simple de-interlace without motion compensation, -which is activated if the sink pad's field type is an interlaced type, -and the IDMAC source pad field type is set to none. +Note that since the IDMAC source pad makes use of an IDMAC channel, +pixel reordering within the same colorspace can be carried out by the +IDMAC channel. For example, if the CSI sink pad is receiving in UYVY +order, the capture device linked to the IDMAC source pad can capture +in YUYV order. Also, if the CSI sink pad is receiving a packed YUV +format, the capture device can capture a planar YUV format such as +YUV420. + +The IDMAC channel at the IDMAC source pad also supports simple +interweave without motion compensation, which is activated if the source +pad's field type is sequential top-bottom or bottom-top, and the +requested capture interface field type is set to interlaced (t-b, b-t, +or unqualified interlaced). The capture interface will enforce the same +field order as the source pad field order (interlaced-bt if source pad +is seq-bt, interlaced-tb if source pad is seq-tb). This subdev can generate the following event when enabling the second IDMAC source pad: @@ -201,7 +207,7 @@ The CSI supports cropping the incoming raw sensor frames. This is implemented in the ipuX_csiY entities at the sink pad, using the crop selection subdev API. -The CSI also supports fixed divide-by-two downscaling indepently in +The CSI also supports fixed divide-by-two downscaling independently in width and height. This is implemented in the ipuX_csiY entities at the sink pad, using the compose selection subdev API. @@ -325,14 +331,14 @@ ipuX_vdic The VDIC carries out motion compensated de-interlacing, with three motion compensation modes: low, medium, and high motion. The mode is -specified with the menu control V4L2_CID_DEINTERLACING_MODE. It has -two sink pads and a single source pad. +specified with the menu control V4L2_CID_DEINTERLACING_MODE. The VDIC +has two sink pads and a single source pad. The direct sink pad receives from an ipuX_csiY direct pad. With this link the VDIC can only operate in high motion mode. When the IDMAC sink pad is activated, it receives from an output -or mem2mem device node. With this pipeline, it can also operate +or mem2mem device node. With this pipeline, the VDIC can also operate in low and medium modes, because these modes require receiving frames from memory buffers. Note that an output or mem2mem device is not implemented yet, so this sink pad currently has no links. @@ -345,8 +351,8 @@ ipuX_ic_prp This is the IC pre-processing entity. It acts as a router, routing data from its sink pad to one or both of its source pads. -It has a single sink pad. The sink pad can receive from the ipuX_csiY -direct pad, or from ipuX_vdic. +This entity has a single sink pad. The sink pad can receive from the +ipuX_csiY direct pad, or from ipuX_vdic. This entity has two source pads. One source pad routes to the pre-process encode task entity (ipuX_ic_prpenc), the other to the @@ -369,8 +375,8 @@ color-space conversion, resizing (downscaling and upscaling), horizontal and vertical flip, and 90/270 degree rotation. Flip and rotation are provided via standard V4L2 controls. -Like the ipuX_csiY IDMAC source, it can also perform simple de-interlace -without motion compensation, and pixel reordering. +Like the ipuX_csiY IDMAC source, this entity also supports simple +de-interlace without motion compensation, and pixel reordering. ipuX_ic_prpvf ------------- @@ -380,18 +386,18 @@ pad from ipuX_ic_prp, and a single source pad. The source pad is routed to a capture device node, with a node name of the format "ipuX_ic_prpvf capture". -It is identical in operation to ipuX_ic_prpenc, with the same resizing -and CSC operations and flip/rotation controls. It will receive and -process de-interlaced frames from the ipuX_vdic if ipuX_ic_prp is +This entity is identical in operation to ipuX_ic_prpenc, with the same +resizing and CSC operations and flip/rotation controls. It will receive +and process de-interlaced frames from the ipuX_vdic if ipuX_ic_prp is receiving from ipuX_vdic. -Like the ipuX_csiY IDMAC source, it can perform simple de-interlace -without motion compensation. However, note that if the ipuX_vdic is -included in the pipeline (ipuX_ic_prp is receiving from ipuX_vdic), -it's not possible to use simple de-interlace in ipuX_ic_prpvf, since -the ipuX_vdic has already carried out de-interlacing (with motion -compensation) and therefore the field type output from ipuX_ic_prp can -only be none. +Like the ipuX_csiY IDMAC source, this entity supports simple +interweaving without motion compensation. However, note that if the +ipuX_vdic is included in the pipeline (ipuX_ic_prp is receiving from +ipuX_vdic), it's not possible to use interweave in ipuX_ic_prpvf, +since the ipuX_vdic has already carried out de-interlacing (with +motion compensation) and therefore the field type output from +ipuX_vdic can only be none (progressive). Capture Pipelines ----------------- @@ -516,10 +522,33 @@ On the SabreAuto, an on-board ADV7180 SD decoder is connected to the parallel bus input on the internal video mux to IPU1 CSI0. The following example configures a pipeline to capture from the ADV7180 -video decoder, assuming NTSC 720x480 input signals, with Motion -Compensated de-interlacing. Pad field types assume the adv7180 outputs -"interlaced". $outputfmt can be any format supported by the ipu1_ic_prpvf -entity at its output pad: +video decoder, assuming NTSC 720x480 input signals, using simple +interweave (unconverted and without motion compensation). The adv7180 +must output sequential or alternating fields (field type 'seq-bt' for +NTSC, or 'alternate'): + +.. code-block:: none + + # Setup links + media-ctl -l "'adv7180 3-0021':0 -> 'ipu1_csi0_mux':1[1]" + media-ctl -l "'ipu1_csi0_mux':2 -> 'ipu1_csi0':0[1]" + media-ctl -l "'ipu1_csi0':2 -> 'ipu1_csi0 capture':0[1]" + # Configure pads + media-ctl -V "'adv7180 3-0021':0 [fmt:UYVY2X8/720x480 field:seq-bt]" + media-ctl -V "'ipu1_csi0_mux':2 [fmt:UYVY2X8/720x480]" + media-ctl -V "'ipu1_csi0':2 [fmt:AYUV32/720x480]" + # Configure "ipu1_csi0 capture" interface (assumed at /dev/video4) + v4l2-ctl -d4 --set-fmt-video=field=interlaced_bt + +Streaming can then begin on /dev/video4. The v4l2-ctl tool can also be +used to select any supported YUV pixelformat on /dev/video4. + +This example configures a pipeline to capture from the ADV7180 +video decoder, assuming PAL 720x576 input signals, with Motion +Compensated de-interlacing. The adv7180 must output sequential or +alternating fields (field type 'seq-tb' for PAL, or 'alternate'). +$outputfmt can be any format supported by the ipu1_ic_prpvf entity +at its output pad: .. code-block:: none @@ -531,11 +560,11 @@ entity at its output pad: media-ctl -l "'ipu1_ic_prp':2 -> 'ipu1_ic_prpvf':0[1]" media-ctl -l "'ipu1_ic_prpvf':1 -> 'ipu1_ic_prpvf capture':0[1]" # Configure pads - media-ctl -V "'adv7180 3-0021':0 [fmt:UYVY2X8/720x480]" - media-ctl -V "'ipu1_csi0_mux':2 [fmt:UYVY2X8/720x480 field:interlaced]" - media-ctl -V "'ipu1_csi0':1 [fmt:AYUV32/720x480 field:interlaced]" - media-ctl -V "'ipu1_vdic':2 [fmt:AYUV32/720x480 field:none]" - media-ctl -V "'ipu1_ic_prp':2 [fmt:AYUV32/720x480 field:none]" + media-ctl -V "'adv7180 3-0021':0 [fmt:UYVY2X8/720x576 field:seq-tb]" + media-ctl -V "'ipu1_csi0_mux':2 [fmt:UYVY2X8/720x576]" + media-ctl -V "'ipu1_csi0':1 [fmt:AYUV32/720x576]" + media-ctl -V "'ipu1_vdic':2 [fmt:AYUV32/720x576 field:none]" + media-ctl -V "'ipu1_ic_prp':2 [fmt:AYUV32/720x576 field:none]" media-ctl -V "'ipu1_ic_prpvf':1 [fmt:$outputfmt field:none]" Streaming can then begin on the capture device node at diff --git a/Documentation/media/v4l-drivers/imx7.rst b/Documentation/media/v4l-drivers/imx7.rst new file mode 100644 index 000000000000..fe411f65c01c --- /dev/null +++ b/Documentation/media/v4l-drivers/imx7.rst @@ -0,0 +1,162 @@ +.. SPDX-License-Identifier: GPL-2.0 + +i.MX7 Video Capture Driver +========================== + +Introduction +------------ + +The i.MX7 contrary to the i.MX5/6 family does not contain an Image Processing +Unit (IPU); because of that the capabilities to perform operations or +manipulation of the capture frames are less feature rich. + +For image capture the i.MX7 has three units: +- CMOS Sensor Interface (CSI) +- Video Multiplexer +- MIPI CSI-2 Receiver + +.. code-block:: none + + MIPI Camera Input ---> MIPI CSI-2 --- > |\ + | \ + | \ + | M | + | U | ------> CSI ---> Capture + | X | + | / + Parallel Camera Input ----------------> | / + |/ + +For additional information, please refer to the latest versions of the i.MX7 +reference manual [#f1]_. + +Entities +-------- + +imx7-mipi-csi2 +-------------- + +This is the MIPI CSI-2 receiver entity. It has one sink pad to receive the pixel +data from MIPI CSI-2 camera sensor. It has one source pad, corresponding to the +virtual channel 0. This module is compliant to previous version of Samsung +D-phy, and supports two D-PHY Rx Data lanes. + +csi_mux +------- + +This is the video multiplexer. It has two sink pads to select from either camera +sensor with a parallel interface or from MIPI CSI-2 virtual channel 0. It has +a single source pad that routes to the CSI. + +csi +--- + +The CSI enables the chip to connect directly to external CMOS image sensor. CSI +can interface directly with Parallel and MIPI CSI-2 buses. It has 256 x 64 FIFO +to store received image pixel data and embedded DMA controllers to transfer data +from the FIFO through AHB bus. + +This entity has one sink pad that receives from the csi_mux entity and a single +source pad that routes video frames directly to memory buffers. This pad is +routed to a capture device node. + +Usage Notes +----------- + +To aid in configuration and for backward compatibility with V4L2 applications +that access controls only from video device nodes, the capture device interfaces +inherit controls from the active entities in the current pipeline, so controls +can be accessed either directly from the subdev or from the active capture +device interface. For example, the sensor controls are available either from the +sensor subdevs or from the active capture device. + +Warp7 with OV2680 +----------------- + +On this platform an OV2680 MIPI CSI-2 module is connected to the internal MIPI +CSI-2 receiver. The following example configures a video capture pipeline with +an output of 800x600, and BGGR 10 bit bayer format: + +.. code-block:: none + + # Setup links + media-ctl -l "'ov2680 1-0036':0 -> 'imx7-mipi-csis.0':0[1]" + media-ctl -l "'imx7-mipi-csis.0':1 -> 'csi_mux':1[1]" + media-ctl -l "'csi_mux':2 -> 'csi':0[1]" + media-ctl -l "'csi':1 -> 'csi capture':0[1]" + + # Configure pads for pipeline + media-ctl -V "'ov2680 1-0036':0 [fmt:SBGGR10_1X10/800x600 field:none]" + media-ctl -V "'csi_mux':1 [fmt:SBGGR10_1X10/800x600 field:none]" + media-ctl -V "'csi_mux':2 [fmt:SBGGR10_1X10/800x600 field:none]" + media-ctl -V "'imx7-mipi-csis.0':0 [fmt:SBGGR10_1X10/800x600 field:none]" + media-ctl -V "'csi':0 [fmt:SBGGR10_1X10/800x600 field:none]" + +After this streaming can start. The v4l2-ctl tool can be used to select any of +the resolutions supported by the sensor. + +.. code-block:: none + + root@imx7s-warp:~# media-ctl -p + Media controller API version 4.17.0 + + Media device information + ------------------------ + driver imx-media + model imx-media + serial + bus info + hw revision 0x0 + driver version 4.17.0 + + Device topology + - entity 1: csi (2 pads, 2 links) + type V4L2 subdev subtype Unknown flags 0 + device node name /dev/v4l-subdev0 + pad0: Sink + [fmt:SBGGR10_1X10/800x600 field:none] + <- "csi_mux":2 [ENABLED] + pad1: Source + [fmt:SBGGR10_1X10/800x600 field:none] + -> "csi capture":0 [ENABLED] + + - entity 4: csi capture (1 pad, 1 link) + type Node subtype V4L flags 0 + device node name /dev/video0 + pad0: Sink + <- "csi":1 [ENABLED] + + - entity 10: csi_mux (3 pads, 2 links) + type V4L2 subdev subtype Unknown flags 0 + device node name /dev/v4l-subdev1 + pad0: Sink + [fmt:unknown/0x0] + pad1: Sink + [fmt:unknown/800x600 field:none] + <- "imx7-mipi-csis.0":1 [ENABLED] + pad2: Source + [fmt:unknown/800x600 field:none] + -> "csi":0 [ENABLED] + + - entity 14: imx7-mipi-csis.0 (2 pads, 2 links) + type V4L2 subdev subtype Unknown flags 0 + device node name /dev/v4l-subdev2 + pad0: Sink + [fmt:SBGGR10_1X10/800x600 field:none] + <- "ov2680 1-0036":0 [ENABLED] + pad1: Source + [fmt:SBGGR10_1X10/800x600 field:none] + -> "csi_mux":1 [ENABLED] + + - entity 17: ov2680 1-0036 (1 pad, 1 link) + type V4L2 subdev subtype Sensor flags 0 + device node name /dev/v4l-subdev3 + pad0: Source + [fmt:SBGGR10_1X10/800x600 field:none] + -> "imx7-mipi-csis.0":0 [ENABLED] + + +References +---------- + +.. [#f1] https://www.nxp.com/docs/en/reference-manual/IMX7SRM.pdf diff --git a/Documentation/media/v4l-drivers/index.rst b/Documentation/media/v4l-drivers/index.rst index f28570ec9e42..dfd4b205937c 100644 --- a/Documentation/media/v4l-drivers/index.rst +++ b/Documentation/media/v4l-drivers/index.rst @@ -44,6 +44,7 @@ For more details see the file COPYING in the source distribution of Linux. davinci-vpbe fimc imx + imx7 ipu3 ivtv max2175 diff --git a/Documentation/media/v4l-drivers/ipu3.rst b/Documentation/media/v4l-drivers/ipu3.rst index f89b51dafadd..c9f780404eee 100644 --- a/Documentation/media/v4l-drivers/ipu3.rst +++ b/Documentation/media/v4l-drivers/ipu3.rst @@ -1,3 +1,5 @@ +.. SPDX-License-Identifier: GPL-2.0 + .. include:: =============================================================== @@ -355,10 +357,157 @@ https://chromium.googlesource.com/chromiumos/platform/arc-camera/+/master/ The source can be located under hal/intel directory. +Overview of IPU3 pipeline +========================= + +IPU3 pipeline has a number of image processing stages, each of which takes a +set of parameters as input. The major stages of pipelines are shown here: + +.. kernel-render:: DOT + :alt: IPU3 ImgU Pipeline + :caption: IPU3 ImgU Pipeline Diagram + + digraph "IPU3 ImgU" { + node [shape=box] + splines="ortho" + rankdir="LR" + + a [label="Raw pixels"] + b [label="Bayer Downscaling"] + c [label="Optical Black Correction"] + d [label="Linearization"] + e [label="Lens Shading Correction"] + f [label="White Balance / Exposure / Focus Apply"] + g [label="Bayer Noise Reduction"] + h [label="ANR"] + i [label="Demosaicing"] + j [label="Color Correction Matrix"] + k [label="Gamma correction"] + l [label="Color Space Conversion"] + m [label="Chroma Down Scaling"] + n [label="Chromatic Noise Reduction"] + o [label="Total Color Correction"] + p [label="XNR3"] + q [label="TNR"] + r [label="DDR"] + + { rank=same; a -> b -> c -> d -> e -> f } + { rank=same; g -> h -> i -> j -> k -> l } + { rank=same; m -> n -> o -> p -> q -> r } + + a -> g -> m [style=invis, weight=10] + + f -> g + l -> m + } + +The table below presents a description of the above algorithms. + +======================== ======================================================= +Name Description +======================== ======================================================= +Optical Black Correction Optical Black Correction block subtracts a pre-defined + value from the respective pixel values to obtain better + image quality. + Defined in :c:type:`ipu3_uapi_obgrid_param`. +Linearization This algo block uses linearization parameters to + address non-linearity sensor effects. The Lookup table + table is defined in + :c:type:`ipu3_uapi_isp_lin_vmem_params`. +SHD Lens shading correction is used to correct spatial + non-uniformity of the pixel response due to optical + lens shading. This is done by applying a different gain + for each pixel. The gain, black level etc are + configured in :c:type:`ipu3_uapi_shd_config_static`. +BNR Bayer noise reduction block removes image noise by + applying a bilateral filter. + See :c:type:`ipu3_uapi_bnr_static_config` for details. +ANR Advanced Noise Reduction is a block based algorithm + that performs noise reduction in the Bayer domain. The + convolution matrix etc can be found in + :c:type:`ipu3_uapi_anr_config`. +DM Demosaicing converts raw sensor data in Bayer format + into RGB (Red, Green, Blue) presentation. Then add + outputs of estimation of Y channel for following stream + processing by Firmware. The struct is defined as + :c:type:`ipu3_uapi_dm_config`. +Color Correction Color Correction algo transforms sensor specific color + space to the standard "sRGB" color space. This is done + by applying 3x3 matrix defined in + :c:type:`ipu3_uapi_ccm_mat_config`. +Gamma correction Gamma correction :c:type:`ipu3_uapi_gamma_config` is a + basic non-linear tone mapping correction that is + applied per pixel for each pixel component. +CSC Color space conversion transforms each pixel from the + RGB primary presentation to YUV (Y: brightness, + UV: Luminance) presentation. This is done by applying + a 3x3 matrix defined in + :c:type:`ipu3_uapi_csc_mat_config` +CDS Chroma down sampling + After the CSC is performed, the Chroma Down Sampling + is applied for a UV plane down sampling by a factor + of 2 in each direction for YUV 4:2:0 using a 4x2 + configurable filter :c:type:`ipu3_uapi_cds_params`. +CHNR Chroma noise reduction + This block processes only the chrominance pixels and + performs noise reduction by cleaning the high + frequency noise. + See struct :c:type:`ipu3_uapi_yuvp1_chnr_config`. +TCC Total color correction as defined in struct + :c:type:`ipu3_uapi_yuvp2_tcc_static_config`. +XNR3 eXtreme Noise Reduction V3 is the third revision of + noise reduction algorithm used to improve image + quality. This removes the low frequency noise in the + captured image. Two related structs are being defined, + :c:type:`ipu3_uapi_isp_xnr3_params` for ISP data memory + and :c:type:`ipu3_uapi_isp_xnr3_vmem_params` for vector + memory. +TNR Temporal Noise Reduction block compares successive + frames in time to remove anomalies / noise in pixel + values. :c:type:`ipu3_uapi_isp_tnr3_vmem_params` and + :c:type:`ipu3_uapi_isp_tnr3_params` are defined for ISP + vector and data memory respectively. +======================== ======================================================= + +Other often encountered acronyms not listed in above table: + + ACC + Accelerator cluster + AWB_FR + Auto white balance filter response statistics + BDS + Bayer downscaler parameters + CCM + Color correction matrix coefficients + IEFd + Image enhancement filter directed + Obgrid + Optical black level compensation + OSYS + Output system configuration + ROI + Region of interest + YDS + Y down sampling + YTM + Y-tone mapping + +A few stages of the pipeline will be executed by firmware running on the ISP +processor, while many others will use a set of fixed hardware blocks also +called accelerator cluster (ACC) to crunch pixel data and produce statistics. + +ACC parameters of individual algorithms, as defined by +:c:type:`ipu3_uapi_acc_param`, can be chosen to be applied by the user +space through struct :c:type:`ipu3_uapi_flags` embedded in +:c:type:`ipu3_uapi_params` structure. For parameters that are configured as +not enabled by the user space, the corresponding structs are ignored by the +driver, in which case the existing configuration of the algorithm will be +preserved. + References ========== -.. [#f5] include/uapi/linux/intel-ipu3.h +.. [#f5] drivers/staging/media/ipu3/include/intel-ipu3.h .. [#f1] https://github.com/intel/nvt diff --git a/Documentation/media/v4l-drivers/pxa_camera.rst b/Documentation/media/v4l-drivers/pxa_camera.rst index e4fbca755e1a..ee1bd96b66dd 100644 --- a/Documentation/media/v4l-drivers/pxa_camera.rst +++ b/Documentation/media/v4l-drivers/pxa_camera.rst @@ -18,7 +18,7 @@ Global video workflow --------------------- a) QCI stopped - Initialy, the QCI interface is stopped. + Initially, the QCI interface is stopped. When a buffer is queued (pxa_videobuf_ops->buf_queue), the QCI starts. b) QCI started diff --git a/Documentation/media/v4l-drivers/qcom_camss.rst b/Documentation/media/v4l-drivers/qcom_camss.rst index 6b15385b12b3..a72e17d09cb7 100644 --- a/Documentation/media/v4l-drivers/qcom_camss.rst +++ b/Documentation/media/v4l-drivers/qcom_camss.rst @@ -123,7 +123,7 @@ The considerations to split the driver in this particular way are as follows: - representing CSIPHY and CSID modules by a separate sub-device for each module allows to model the hardware links between these modules; - representing VFE by a separate sub-devices for each input interface allows - to use the input interfaces concurently and independently as this is + to use the input interfaces concurrently and independently as this is supported by the hardware; - representing ISPIF by a number of sub-devices equal to the number of CSID sub-devices allows to create linear media controller pipelines when using two diff --git a/Documentation/misc-devices/ibmvmc.rst b/Documentation/misc-devices/ibmvmc.rst index 46ded79554d4..b46df4ea2b81 100644 --- a/Documentation/misc-devices/ibmvmc.rst +++ b/Documentation/misc-devices/ibmvmc.rst @@ -1,4 +1,5 @@ .. SPDX-License-Identifier: GPL-2.0+ + ====================================================== IBM Virtual Management Channel Kernel Driver (IBMVMC) ====================================================== diff --git a/Documentation/misc-devices/index.rst b/Documentation/misc-devices/index.rst new file mode 100644 index 000000000000..dfd1f45a3127 --- /dev/null +++ b/Documentation/misc-devices/index.rst @@ -0,0 +1,17 @@ +.. SPDX-License-Identifier: GPL-2.0 + +============================================ +Assorted Miscellaneous Devices Documentation +============================================ + +This documentation contains information for assorted devices that do not +fit into other categories. + +.. class:: toc-title + + Table of contents + +.. toctree:: + :maxdepth: 2 + + ibmvmc diff --git a/Documentation/networking/af_xdp.rst b/Documentation/networking/af_xdp.rst index 4ae4f9d8f8fe..e14d7d40fc75 100644 --- a/Documentation/networking/af_xdp.rst +++ b/Documentation/networking/af_xdp.rst @@ -295,6 +295,41 @@ using:: For XDP_SKB mode, use the switch "-S" instead of "-N" and all options can be displayed with "-h", as usual. +FAQ +======= + +Q: I am not seeing any traffic on the socket. What am I doing wrong? + +A: When a netdev of a physical NIC is initialized, Linux usually + allocates one Rx and Tx queue pair per core. So on a 8 core system, + queue ids 0 to 7 will be allocated, one per core. In the AF_XDP + bind call or the xsk_socket__create libbpf function call, you + specify a specific queue id to bind to and it is only the traffic + towards that queue you are going to get on you socket. So in the + example above, if you bind to queue 0, you are NOT going to get any + traffic that is distributed to queues 1 through 7. If you are + lucky, you will see the traffic, but usually it will end up on one + of the queues you have not bound to. + + There are a number of ways to solve the problem of getting the + traffic you want to the queue id you bound to. If you want to see + all the traffic, you can force the netdev to only have 1 queue, queue + id 0, and then bind to queue 0. You can use ethtool to do this:: + + sudo ethtool -L combined 1 + + If you want to only see part of the traffic, you can program the + NIC through ethtool to filter out your traffic to a single queue id + that you can bind your XDP socket to. Here is one example in which + UDP traffic to and from port 4242 are sent to queue 2:: + + sudo ethtool -N rx-flow-hash udp4 fn + sudo ethtool -N flow-type udp4 src-port 4242 dst-port \ + 4242 action 2 + + A number of other ways are possible all up to the capabilitites of + the NIC you have. + Credits ======= @@ -309,4 +344,3 @@ Credits - Michael S. Tsirkin - Qi Z Zhang - Willem de Bruijn - diff --git a/Documentation/networking/checksum-offloads.rst b/Documentation/networking/checksum-offloads.rst new file mode 100644 index 000000000000..905c8a84b103 --- /dev/null +++ b/Documentation/networking/checksum-offloads.rst @@ -0,0 +1,143 @@ +.. SPDX-License-Identifier: GPL-2.0 + +================= +Checksum Offloads +================= + + +Introduction +============ + +This document describes a set of techniques in the Linux networking stack to +take advantage of checksum offload capabilities of various NICs. + +The following technologies are described: + +* TX Checksum Offload +* LCO: Local Checksum Offload +* RCO: Remote Checksum Offload + +Things that should be documented here but aren't yet: + +* RX Checksum Offload +* CHECKSUM_UNNECESSARY conversion + + +TX Checksum Offload +=================== + +The interface for offloading a transmit checksum to a device is explained in +detail in comments near the top of include/linux/skbuff.h. + +In brief, it allows to request the device fill in a single ones-complement +checksum defined by the sk_buff fields skb->csum_start and skb->csum_offset. +The device should compute the 16-bit ones-complement checksum (i.e. the +'IP-style' checksum) from csum_start to the end of the packet, and fill in the +result at (csum_start + csum_offset). + +Because csum_offset cannot be negative, this ensures that the previous value of +the checksum field is included in the checksum computation, thus it can be used +to supply any needed corrections to the checksum (such as the sum of the +pseudo-header for UDP or TCP). + +This interface only allows a single checksum to be offloaded. Where +encapsulation is used, the packet may have multiple checksum fields in +different header layers, and the rest will have to be handled by another +mechanism such as LCO or RCO. + +CRC32c can also be offloaded using this interface, by means of filling +skb->csum_start and skb->csum_offset as described above, and setting +skb->csum_not_inet: see skbuff.h comment (section 'D') for more details. + +No offloading of the IP header checksum is performed; it is always done in +software. This is OK because when we build the IP header, we obviously have it +in cache, so summing it isn't expensive. It's also rather short. + +The requirements for GSO are more complicated, because when segmenting an +encapsulated packet both the inner and outer checksums may need to be edited or +recomputed for each resulting segment. See the skbuff.h comment (section 'E') +for more details. + +A driver declares its offload capabilities in netdev->hw_features; see +Documentation/networking/netdev-features.txt for more. Note that a device +which only advertises NETIF_F_IP[V6]_CSUM must still obey the csum_start and +csum_offset given in the SKB; if it tries to deduce these itself in hardware +(as some NICs do) the driver should check that the values in the SKB match +those which the hardware will deduce, and if not, fall back to checksumming in +software instead (with skb_csum_hwoffload_help() or one of the +skb_checksum_help() / skb_crc32c_csum_help functions, as mentioned in +include/linux/skbuff.h). + +The stack should, for the most part, assume that checksum offload is supported +by the underlying device. The only place that should check is +validate_xmit_skb(), and the functions it calls directly or indirectly. That +function compares the offload features requested by the SKB (which may include +other offloads besides TX Checksum Offload) and, if they are not supported or +enabled on the device (determined by netdev->features), performs the +corresponding offload in software. In the case of TX Checksum Offload, that +means calling skb_csum_hwoffload_help(skb, features). + + +LCO: Local Checksum Offload +=========================== + +LCO is a technique for efficiently computing the outer checksum of an +encapsulated datagram when the inner checksum is due to be offloaded. + +The ones-complement sum of a correctly checksummed TCP or UDP packet is equal +to the complement of the sum of the pseudo header, because everything else gets +'cancelled out' by the checksum field. This is because the sum was +complemented before being written to the checksum field. + +More generally, this holds in any case where the 'IP-style' ones complement +checksum is used, and thus any checksum that TX Checksum Offload supports. + +That is, if we have set up TX Checksum Offload with a start/offset pair, we +know that after the device has filled in that checksum, the ones complement sum +from csum_start to the end of the packet will be equal to the complement of +whatever value we put in the checksum field beforehand. This allows us to +compute the outer checksum without looking at the payload: we simply stop +summing when we get to csum_start, then add the complement of the 16-bit word +at (csum_start + csum_offset). + +Then, when the true inner checksum is filled in (either by hardware or by +skb_checksum_help()), the outer checksum will become correct by virtue of the +arithmetic. + +LCO is performed by the stack when constructing an outer UDP header for an +encapsulation such as VXLAN or GENEVE, in udp_set_csum(). Similarly for the +IPv6 equivalents, in udp6_set_csum(). + +It is also performed when constructing an IPv4 GRE header, in +net/ipv4/ip_gre.c:build_header(). It is *not* currently performed when +constructing an IPv6 GRE header; the GRE checksum is computed over the whole +packet in net/ipv6/ip6_gre.c:ip6gre_xmit2(), but it should be possible to use +LCO here as IPv6 GRE still uses an IP-style checksum. + +All of the LCO implementations use a helper function lco_csum(), in +include/linux/skbuff.h. + +LCO can safely be used for nested encapsulations; in this case, the outer +encapsulation layer will sum over both its own header and the 'middle' header. +This does mean that the 'middle' header will get summed multiple times, but +there doesn't seem to be a way to avoid that without incurring bigger costs +(e.g. in SKB bloat). + + +RCO: Remote Checksum Offload +============================ + +RCO is a technique for eliding the inner checksum of an encapsulated datagram, +allowing the outer checksum to be offloaded. It does, however, involve a +change to the encapsulation protocols, which the receiver must also support. +For this reason, it is disabled by default. + +RCO is detailed in the following Internet-Drafts: + +* https://tools.ietf.org/html/draft-herbert-remotecsumoffload-00 +* https://tools.ietf.org/html/draft-herbert-vxlan-rco-00 + +In Linux, RCO is implemented individually in each encapsulation protocol, and +most tunnel types have flags controlling its use. For instance, VXLAN has the +flag VXLAN_F_REMCSUM_TX (per struct vxlan_rdst) to indicate that RCO should be +used when transmitting to a given remote destination. diff --git a/Documentation/networking/checksum-offloads.txt b/Documentation/networking/checksum-offloads.txt deleted file mode 100644 index 27bc09cfcf6d..000000000000 --- a/Documentation/networking/checksum-offloads.txt +++ /dev/null @@ -1,122 +0,0 @@ -Checksum Offloads in the Linux Networking Stack - - -Introduction -============ - -This document describes a set of techniques in the Linux networking stack - to take advantage of checksum offload capabilities of various NICs. - -The following technologies are described: - * TX Checksum Offload - * LCO: Local Checksum Offload - * RCO: Remote Checksum Offload - -Things that should be documented here but aren't yet: - * RX Checksum Offload - * CHECKSUM_UNNECESSARY conversion - - -TX Checksum Offload -=================== - -The interface for offloading a transmit checksum to a device is explained - in detail in comments near the top of include/linux/skbuff.h. -In brief, it allows to request the device fill in a single ones-complement - checksum defined by the sk_buff fields skb->csum_start and - skb->csum_offset. The device should compute the 16-bit ones-complement - checksum (i.e. the 'IP-style' checksum) from csum_start to the end of the - packet, and fill in the result at (csum_start + csum_offset). -Because csum_offset cannot be negative, this ensures that the previous - value of the checksum field is included in the checksum computation, thus - it can be used to supply any needed corrections to the checksum (such as - the sum of the pseudo-header for UDP or TCP). -This interface only allows a single checksum to be offloaded. Where - encapsulation is used, the packet may have multiple checksum fields in - different header layers, and the rest will have to be handled by another - mechanism such as LCO or RCO. -CRC32c can also be offloaded using this interface, by means of filling - skb->csum_start and skb->csum_offset as described above, and setting - skb->csum_not_inet: see skbuff.h comment (section 'D') for more details. -No offloading of the IP header checksum is performed; it is always done in - software. This is OK because when we build the IP header, we obviously - have it in cache, so summing it isn't expensive. It's also rather short. -The requirements for GSO are more complicated, because when segmenting an - encapsulated packet both the inner and outer checksums may need to be - edited or recomputed for each resulting segment. See the skbuff.h comment - (section 'E') for more details. - -A driver declares its offload capabilities in netdev->hw_features; see - Documentation/networking/netdev-features.txt for more. Note that a device - which only advertises NETIF_F_IP[V6]_CSUM must still obey the csum_start - and csum_offset given in the SKB; if it tries to deduce these itself in - hardware (as some NICs do) the driver should check that the values in the - SKB match those which the hardware will deduce, and if not, fall back to - checksumming in software instead (with skb_csum_hwoffload_help() or one of - the skb_checksum_help() / skb_crc32c_csum_help functions, as mentioned in - include/linux/skbuff.h). - -The stack should, for the most part, assume that checksum offload is - supported by the underlying device. The only place that should check is - validate_xmit_skb(), and the functions it calls directly or indirectly. - That function compares the offload features requested by the SKB (which - may include other offloads besides TX Checksum Offload) and, if they are - not supported or enabled on the device (determined by netdev->features), - performs the corresponding offload in software. In the case of TX - Checksum Offload, that means calling skb_csum_hwoffload_help(skb, features). - - -LCO: Local Checksum Offload -=========================== - -LCO is a technique for efficiently computing the outer checksum of an - encapsulated datagram when the inner checksum is due to be offloaded. -The ones-complement sum of a correctly checksummed TCP or UDP packet is - equal to the complement of the sum of the pseudo header, because everything - else gets 'cancelled out' by the checksum field. This is because the sum was - complemented before being written to the checksum field. -More generally, this holds in any case where the 'IP-style' ones complement - checksum is used, and thus any checksum that TX Checksum Offload supports. -That is, if we have set up TX Checksum Offload with a start/offset pair, we - know that after the device has filled in that checksum, the ones - complement sum from csum_start to the end of the packet will be equal to - the complement of whatever value we put in the checksum field beforehand. - This allows us to compute the outer checksum without looking at the payload: - we simply stop summing when we get to csum_start, then add the complement of - the 16-bit word at (csum_start + csum_offset). -Then, when the true inner checksum is filled in (either by hardware or by - skb_checksum_help()), the outer checksum will become correct by virtue of - the arithmetic. - -LCO is performed by the stack when constructing an outer UDP header for an - encapsulation such as VXLAN or GENEVE, in udp_set_csum(). Similarly for - the IPv6 equivalents, in udp6_set_csum(). -It is also performed when constructing an IPv4 GRE header, in - net/ipv4/ip_gre.c:build_header(). It is *not* currently performed when - constructing an IPv6 GRE header; the GRE checksum is computed over the - whole packet in net/ipv6/ip6_gre.c:ip6gre_xmit2(), but it should be - possible to use LCO here as IPv6 GRE still uses an IP-style checksum. -All of the LCO implementations use a helper function lco_csum(), in - include/linux/skbuff.h. - -LCO can safely be used for nested encapsulations; in this case, the outer - encapsulation layer will sum over both its own header and the 'middle' - header. This does mean that the 'middle' header will get summed multiple - times, but there doesn't seem to be a way to avoid that without incurring - bigger costs (e.g. in SKB bloat). - - -RCO: Remote Checksum Offload -============================ - -RCO is a technique for eliding the inner checksum of an encapsulated - datagram, allowing the outer checksum to be offloaded. It does, however, - involve a change to the encapsulation protocols, which the receiver must - also support. For this reason, it is disabled by default. -RCO is detailed in the following Internet-Drafts: -https://tools.ietf.org/html/draft-herbert-remotecsumoffload-00 -https://tools.ietf.org/html/draft-herbert-vxlan-rco-00 -In Linux, RCO is implemented individually in each encapsulation protocol, - and most tunnel types have flags controlling its use. For instance, VXLAN - has the flag VXLAN_F_REMCSUM_TX (per struct vxlan_rdst) to indicate that - RCO should be used when transmitting to a given remote destination. diff --git a/Documentation/networking/device_drivers/freescale/dpaa2/dpio-driver.rst b/Documentation/networking/device_drivers/freescale/dpaa2/dpio-driver.rst index a188466b6698..5045df990a4c 100644 --- a/Documentation/networking/device_drivers/freescale/dpaa2/dpio-driver.rst +++ b/Documentation/networking/device_drivers/freescale/dpaa2/dpio-driver.rst @@ -27,11 +27,12 @@ Driver Overview The DPIO driver is bound to DPIO objects discovered on the fsl-mc bus and provides services that: - A) allow other drivers, such as the Ethernet driver, to enqueue and dequeue + + A. allow other drivers, such as the Ethernet driver, to enqueue and dequeue frames for their respective objects - B) allow drivers to register callbacks for data availability notifications + B. allow drivers to register callbacks for data availability notifications when data becomes available on a queue or channel - C) allow drivers to manage hardware buffer pools + C. allow drivers to manage hardware buffer pools The Linux DPIO driver consists of 3 primary components-- DPIO object driver-- fsl-mc driver that manages the DPIO object @@ -140,11 +141,10 @@ QBman portal interface (qbman-portal.c) The qbman-portal component provides APIs to do the low level hardware bit twiddling for operations such as: - -initializing Qman software portals - - -building and sending portal commands - -portal interrupt configuration and processing + - initializing Qman software portals + - building and sending portal commands + - portal interrupt configuration and processing The qbman-portal APIs are not public to other drivers, and are only used by dpio-service. diff --git a/Documentation/networking/device_drivers/intel/e100.rst b/Documentation/networking/device_drivers/intel/e100.rst index 5e2839b4ec92..2b9f4887beda 100644 --- a/Documentation/networking/device_drivers/intel/e100.rst +++ b/Documentation/networking/device_drivers/intel/e100.rst @@ -1,5 +1,6 @@ .. SPDX-License-Identifier: GPL-2.0+ +============================================================== Linux* Base Driver for the Intel(R) PRO/100 Family of Adapters ============================================================== diff --git a/Documentation/networking/device_drivers/intel/e1000.rst b/Documentation/networking/device_drivers/intel/e1000.rst index 6379d4d20771..956560b6e745 100644 --- a/Documentation/networking/device_drivers/intel/e1000.rst +++ b/Documentation/networking/device_drivers/intel/e1000.rst @@ -1,5 +1,6 @@ .. SPDX-License-Identifier: GPL-2.0+ +=========================================================== Linux* Base Driver for Intel(R) Ethernet Network Connection =========================================================== diff --git a/Documentation/networking/device_drivers/intel/e1000e.rst b/Documentation/networking/device_drivers/intel/e1000e.rst index 33554e5416c5..01999f05509c 100644 --- a/Documentation/networking/device_drivers/intel/e1000e.rst +++ b/Documentation/networking/device_drivers/intel/e1000e.rst @@ -1,5 +1,6 @@ .. SPDX-License-Identifier: GPL-2.0+ +====================================================== Linux* Driver for Intel(R) Ethernet Network Connection ====================================================== diff --git a/Documentation/networking/device_drivers/intel/fm10k.rst b/Documentation/networking/device_drivers/intel/fm10k.rst index bf5e5942f28d..ac3269e34f55 100644 --- a/Documentation/networking/device_drivers/intel/fm10k.rst +++ b/Documentation/networking/device_drivers/intel/fm10k.rst @@ -1,5 +1,6 @@ .. SPDX-License-Identifier: GPL-2.0+ +============================================================== Linux* Base Driver for Intel(R) Ethernet Multi-host Controller ============================================================== diff --git a/Documentation/networking/device_drivers/intel/i40e.rst b/Documentation/networking/device_drivers/intel/i40e.rst index 0cc16c525d10..848fd388fa6e 100644 --- a/Documentation/networking/device_drivers/intel/i40e.rst +++ b/Documentation/networking/device_drivers/intel/i40e.rst @@ -1,5 +1,6 @@ .. SPDX-License-Identifier: GPL-2.0+ +================================================================== Linux* Base Driver for the Intel(R) Ethernet Controller 700 Series ================================================================== diff --git a/Documentation/networking/device_drivers/intel/iavf.rst b/Documentation/networking/device_drivers/intel/iavf.rst index f8b42b64eb28..2d0c3baa1752 100644 --- a/Documentation/networking/device_drivers/intel/iavf.rst +++ b/Documentation/networking/device_drivers/intel/iavf.rst @@ -1,5 +1,6 @@ .. SPDX-License-Identifier: GPL-2.0+ +================================================================== Linux* Base Driver for Intel(R) Ethernet Adaptive Virtual Function ================================================================== diff --git a/Documentation/networking/device_drivers/intel/ice.rst b/Documentation/networking/device_drivers/intel/ice.rst index 4d118b827bbb..c220aa2711c6 100644 --- a/Documentation/networking/device_drivers/intel/ice.rst +++ b/Documentation/networking/device_drivers/intel/ice.rst @@ -1,5 +1,6 @@ .. SPDX-License-Identifier: GPL-2.0+ +=================================================================== Linux* Base Driver for the Intel(R) Ethernet Connection E800 Series =================================================================== diff --git a/Documentation/networking/device_drivers/intel/igb.rst b/Documentation/networking/device_drivers/intel/igb.rst index e87a4a72ea2d..fc8cfaa5dcfa 100644 --- a/Documentation/networking/device_drivers/intel/igb.rst +++ b/Documentation/networking/device_drivers/intel/igb.rst @@ -1,5 +1,6 @@ .. SPDX-License-Identifier: GPL-2.0+ +=========================================================== Linux* Base Driver for Intel(R) Ethernet Network Connection =========================================================== diff --git a/Documentation/networking/device_drivers/intel/igbvf.rst b/Documentation/networking/device_drivers/intel/igbvf.rst index a8a9ffa4f8d3..9cddabe8108e 100644 --- a/Documentation/networking/device_drivers/intel/igbvf.rst +++ b/Documentation/networking/device_drivers/intel/igbvf.rst @@ -1,5 +1,6 @@ .. SPDX-License-Identifier: GPL-2.0+ +============================================================ Linux* Base Virtual Function Driver for Intel(R) 1G Ethernet ============================================================ diff --git a/Documentation/networking/device_drivers/intel/ixgb.rst b/Documentation/networking/device_drivers/intel/ixgb.rst index 8bd80e27843d..945018207a92 100644 --- a/Documentation/networking/device_drivers/intel/ixgb.rst +++ b/Documentation/networking/device_drivers/intel/ixgb.rst @@ -1,5 +1,6 @@ .. SPDX-License-Identifier: GPL-2.0+ +===================================================================== Linux Base Driver for 10 Gigabit Intel(R) Ethernet Network Connection ===================================================================== diff --git a/Documentation/networking/device_drivers/intel/ixgbe.rst b/Documentation/networking/device_drivers/intel/ixgbe.rst index 86d887a63606..c7d25483fedb 100644 --- a/Documentation/networking/device_drivers/intel/ixgbe.rst +++ b/Documentation/networking/device_drivers/intel/ixgbe.rst @@ -1,5 +1,6 @@ .. SPDX-License-Identifier: GPL-2.0+ +============================================================================= Linux* Base Driver for the Intel(R) Ethernet 10 Gigabit PCI Express Adapters ============================================================================= diff --git a/Documentation/networking/device_drivers/intel/ixgbevf.rst b/Documentation/networking/device_drivers/intel/ixgbevf.rst index 56cde6366c2f..5d4977360157 100644 --- a/Documentation/networking/device_drivers/intel/ixgbevf.rst +++ b/Documentation/networking/device_drivers/intel/ixgbevf.rst @@ -1,5 +1,6 @@ .. SPDX-License-Identifier: GPL-2.0+ +============================================================= Linux* Base Virtual Function Driver for Intel(R) 10G Ethernet ============================================================= diff --git a/Documentation/networking/device_drivers/stmicro/stmmac.txt b/Documentation/networking/device_drivers/stmicro/stmmac.txt index 2bb07078f535..1ae979fd90d2 100644 --- a/Documentation/networking/device_drivers/stmicro/stmmac.txt +++ b/Documentation/networking/device_drivers/stmicro/stmmac.txt @@ -267,7 +267,7 @@ static struct fixed_phy_status stmmac0_fixed_phy_status = { During the board's device_init we can configure the first MAC for fixed_link by calling: - fixed_phy_add(PHY_POLL, 1, &stmmac0_fixed_phy_status, -1); + fixed_phy_add(PHY_POLL, 1, &stmmac0_fixed_phy_status); and the second one, with a real PHY device attached to the bus, by using the stmmac_mdio_bus_data structure (to provide the id, the reset procedure etc). diff --git a/Documentation/networking/devlink-health.txt b/Documentation/networking/devlink-health.txt new file mode 100644 index 000000000000..1db3fbea0831 --- /dev/null +++ b/Documentation/networking/devlink-health.txt @@ -0,0 +1,86 @@ +The health mechanism is targeted for Real Time Alerting, in order to know when +something bad had happened to a PCI device +- Provide alert debug information +- Self healing +- If problem needs vendor support, provide a way to gather all needed debugging + information. + +The main idea is to unify and centralize driver health reports in the +generic devlink instance and allow the user to set different +attributes of the health reporting and recovery procedures. + +The devlink health reporter: +Device driver creates a "health reporter" per each error/health type. +Error/Health type can be a known/generic (eg pci error, fw error, rx/tx error) +or unknown (driver specific). +For each registered health reporter a driver can issue error/health reports +asynchronously. All health reports handling is done by devlink. +Device driver can provide specific callbacks for each "health reporter", e.g. + - Recovery procedures + - Diagnostics and object dump procedures + - OOB initial parameters +Different parts of the driver can register different types of health reporters +with different handlers. + +Once an error is reported, devlink health will do the following actions: + * A log is being send to the kernel trace events buffer + * Health status and statistics are being updated for the reporter instance + * Object dump is being taken and saved at the reporter instance (as long as + there is no other dump which is already stored) + * Auto recovery attempt is being done. Depends on: + - Auto-recovery configuration + - Grace period vs. time passed since last recover + +The user interface: +User can access/change each reporter's parameters and driver specific callbacks +via devlink, e.g per error type (per health reporter) + - Configure reporter's generic parameters (like: disable/enable auto recovery) + - Invoke recovery procedure + - Run diagnostics + - Object dump + +The devlink health interface (via netlink): +DEVLINK_CMD_HEALTH_REPORTER_GET + Retrieves status and configuration info per DEV and reporter. +DEVLINK_CMD_HEALTH_REPORTER_SET + Allows reporter-related configuration setting. +DEVLINK_CMD_HEALTH_REPORTER_RECOVER + Triggers a reporter's recovery procedure. +DEVLINK_CMD_HEALTH_REPORTER_DIAGNOSE + Retrieves diagnostics data from a reporter on a device. +DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET + Retrieves the last stored dump. Devlink health + saves a single dump. If an dump is not already stored by the devlink + for this reporter, devlink generates a new dump. + dump output is defined by the reporter. +DEVLINK_CMD_HEALTH_REPORTER_DUMP_CLEAR + Clears the last saved dump file for the specified reporter. + + + netlink + +--------------------------+ + | | + | + | + | | | + +--------------------------+ + |request for ops + |(diagnose, + mlx5_core devlink |recover, + |dump) ++--------+ +--------------------------+ +| | | reporter| | +| | | +---------v----------+ | +| | ops execution | | | | +| <----------------------------------+ | | +| | | | | | +| | | + ^------------------+ | +| | | | request for ops | +| | | | (recover, dump) | +| | | | | +| | | +-+------------------+ | +| | health report | | health handler | | +| +-------------------------------> | | +| | | +--------------------+ | +| | health reporter create | | +| +----------------------------> | ++--------+ +--------------------------+ diff --git a/Documentation/networking/devlink-info-versions.rst b/Documentation/networking/devlink-info-versions.rst new file mode 100644 index 000000000000..c79ad8593383 --- /dev/null +++ b/Documentation/networking/devlink-info-versions.rst @@ -0,0 +1,43 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + +===================== +Devlink info versions +===================== + +board.id +======== + +Unique identifier of the board design. + +board.rev +========= + +Board design revision. + +board.manufacture +================= + +An identifier of the company or the facility which produced the part. + +fw.mgmt +======= + +Control unit firmware version. This firmware is responsible for house +keeping tasks, PHY control etc. but not the packet-by-packet data path +operation. + +fw.app +====== + +Data path microcode controlling high-speed packet processing. + +fw.undi +======= + +UNDI software, may include the UEFI driver, firmware or both. + +fw.ncsi +======= + +Version of the software responsible for supporting/handling the +Network Controller Sideband Interface. diff --git a/Documentation/networking/devlink-params-mlxsw.txt b/Documentation/networking/devlink-params-mlxsw.txt new file mode 100644 index 000000000000..c63ea9fc7009 --- /dev/null +++ b/Documentation/networking/devlink-params-mlxsw.txt @@ -0,0 +1,10 @@ +fw_load_policy [DEVICE, GENERIC] + Configuration mode: driverinit + +acl_region_rehash_interval [DEVICE, DRIVER-SPECIFIC] + Sets an interval for periodic ACL region rehashes. + The value is in milliseconds, minimal value is "3000". + Value "0" disables the periodic work. + The first rehash will be run right after value is set. + Type: u32 + Configuration mode: runtime diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt index 25170ad7d25b..43ef767bc440 100644 --- a/Documentation/networking/dsa/dsa.txt +++ b/Documentation/networking/dsa/dsa.txt @@ -236,19 +236,6 @@ description. Design limitations ================== -DSA is a platform device driver -------------------------------- - -DSA is implemented as a DSA platform device driver which is convenient because -it will register the entire DSA switch tree attached to a master network device -in one-shot, facilitating the device creation and simplifying the device driver -model a bit, this comes however with a number of limitations: - -- building DSA and its switch drivers as modules is currently not working -- the device driver parenting does not necessarily reflect the original - bus/device the switch can be created from -- supporting non-MDIO and non-MMIO (platform) switches is not possible - Limits on the number of devices and ports ----------------------------------------- @@ -533,16 +520,12 @@ Bridge VLAN filtering function that the driver has to call for each VLAN the given port is a member of. A switchdev object is used to carry the VID and bridge flags. -- port_fdb_prepare: bridge layer function invoked when the bridge prepares the - installation of a Forwarding Database entry. If the operation is not - supported, this function should return -EOPNOTSUPP to inform the bridge code - to fallback to a software implementation. No hardware setup must be done in - this function. See port_fdb_add for this and details. - - port_fdb_add: bridge layer function invoked when the bridge wants to install a Forwarding Database entry, the switch hardware should be programmed with the specified address in the specified VLAN Id in the forwarding database - associated with this VLAN ID + associated with this VLAN ID. If the operation is not supported, this + function should return -EOPNOTSUPP to inform the bridge code to fallback to + a software implementation. Note: VLAN ID 0 corresponds to the port private database, which, in the context of DSA, would be the its port-based VLAN, used by the associated bridge device. diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt index 2196b824e96c..319e5e041f38 100644 --- a/Documentation/networking/filter.txt +++ b/Documentation/networking/filter.txt @@ -464,10 +464,11 @@ breakpoints: 0 1 JIT compiler ------------ -The Linux kernel has a built-in BPF JIT compiler for x86_64, SPARC, PowerPC, -ARM, ARM64, MIPS and s390 and can be enabled through CONFIG_BPF_JIT. The JIT -compiler is transparently invoked for each attached filter from user space -or for internal kernel users if it has been previously enabled by root: +The Linux kernel has a built-in BPF JIT compiler for x86_64, SPARC, +PowerPC, ARM, ARM64, MIPS, RISC-V and s390 and can be enabled through +CONFIG_BPF_JIT. The JIT compiler is transparently invoked for each +attached filter from user space or for internal kernel users if it has +been previously enabled by root: echo 1 > /proc/sys/net/core/bpf_jit_enable @@ -603,9 +604,10 @@ got from bpf_prog_create(), and 'ctx' the given context (e.g. skb pointer). All constraints and restrictions from bpf_check_classic() apply before a conversion to the new layout is being done behind the scenes! -Currently, the classic BPF format is being used for JITing on most 32-bit -architectures, whereas x86-64, aarch64, s390x, powerpc64, sparc64, arm32 perform -JIT compilation from eBPF instruction set. +Currently, the classic BPF format is being used for JITing on most +32-bit architectures, whereas x86-64, aarch64, s390x, powerpc64, +sparc64, arm32, riscv (RV64G) perform JIT compilation from eBPF +instruction set. Some core changes of the new internal format: @@ -827,7 +829,7 @@ tracing filters may do to maintain counters of events, for example. Register R9 is not used by socket filters either, but more complex filters may be running out of registers and would have to resort to spill/fill to stack. -Internal BPF can used as generic assembler for last step performance +Internal BPF can be used as a generic assembler for last step performance optimizations, socket filters and seccomp are using it as assembler. Tracing filters may use it as assembler to generate code from kernel. In kernel usage may not be bounded by security considerations, since generated internal BPF code @@ -865,7 +867,7 @@ Three LSB bits store instruction class which is one of: BPF_STX 0x03 BPF_STX 0x03 BPF_ALU 0x04 BPF_ALU 0x04 BPF_JMP 0x05 BPF_JMP 0x05 - BPF_RET 0x06 [ class 6 unused, for future if needed ] + BPF_RET 0x06 BPF_JMP32 0x06 BPF_MISC 0x07 BPF_ALU64 0x07 When BPF_CLASS(code) == BPF_ALU or BPF_JMP, 4th bit encodes source operand ... @@ -902,9 +904,9 @@ If BPF_CLASS(code) == BPF_ALU or BPF_ALU64 [ in eBPF ], BPF_OP(code) is one of: BPF_ARSH 0xc0 /* eBPF only: sign extending shift right */ BPF_END 0xd0 /* eBPF only: endianness conversion */ -If BPF_CLASS(code) == BPF_JMP, BPF_OP(code) is one of: +If BPF_CLASS(code) == BPF_JMP or BPF_JMP32 [ in eBPF ], BPF_OP(code) is one of: - BPF_JA 0x00 + BPF_JA 0x00 /* BPF_JMP only */ BPF_JEQ 0x10 BPF_JGT 0x20 BPF_JGE 0x30 @@ -912,8 +914,8 @@ If BPF_CLASS(code) == BPF_JMP, BPF_OP(code) is one of: BPF_JNE 0x50 /* eBPF only: jump != */ BPF_JSGT 0x60 /* eBPF only: signed '>' */ BPF_JSGE 0x70 /* eBPF only: signed '>=' */ - BPF_CALL 0x80 /* eBPF only: function call */ - BPF_EXIT 0x90 /* eBPF only: function return */ + BPF_CALL 0x80 /* eBPF BPF_JMP only: function call */ + BPF_EXIT 0x90 /* eBPF BPF_JMP only: function return */ BPF_JLT 0xa0 /* eBPF only: unsigned '<' */ BPF_JLE 0xb0 /* eBPF only: unsigned '<=' */ BPF_JSLT 0xc0 /* eBPF only: signed '<' */ @@ -936,8 +938,9 @@ Classic BPF wastes the whole BPF_RET class to represent a single 'ret' operation. Classic BPF_RET | BPF_K means copy imm32 into return register and perform function exit. eBPF is modeled to match CPU, so BPF_JMP | BPF_EXIT in eBPF means function exit only. The eBPF program needs to store return -value into register R0 before doing a BPF_EXIT. Class 6 in eBPF is currently -unused and reserved for future use. +value into register R0 before doing a BPF_EXIT. Class 6 in eBPF is used as +BPF_JMP32 to mean exactly the same operations as BPF_JMP, but with 32-bit wide +operands for the comparisons instead. For load and store instructions the 8-bit 'code' field is divided as: diff --git a/Documentation/networking/ieee802154.txt b/Documentation/networking/ieee802154.rst similarity index 58% rename from Documentation/networking/ieee802154.txt rename to Documentation/networking/ieee802154.rst index e74d8e1da0e2..36ca823a1122 100644 --- a/Documentation/networking/ieee802154.txt +++ b/Documentation/networking/ieee802154.rst @@ -1,54 +1,79 @@ - - Linux IEEE 802.15.4 implementation - +=============================== +IEEE 802.15.4 Developer's Guide +=============================== Introduction ============ The IEEE 802.15.4 working group focuses on standardization of the bottom two layers: Medium Access Control (MAC) and Physical access (PHY). And there are mainly two options available for upper layers: - - ZigBee - proprietary protocol from the ZigBee Alliance - - 6LoWPAN - IPv6 networking over low rate personal area networks + +- ZigBee - proprietary protocol from the ZigBee Alliance +- 6LoWPAN - IPv6 networking over low rate personal area networks The goal of the Linux-wpan is to provide a complete implementation of the IEEE 802.15.4 and 6LoWPAN protocols. IEEE 802.15.4 is a stack of protocols for organizing Low-Rate Wireless Personal Area Networks. The stack is composed of three main parts: - - IEEE 802.15.4 layer; We have chosen to use plain Berkeley socket API, - the generic Linux networking stack to transfer IEEE 802.15.4 data - messages and a special protocol over netlink for configuration/management - - MAC - provides access to shared channel and reliable data delivery - - PHY - represents device drivers +- IEEE 802.15.4 layer; We have chosen to use plain Berkeley socket API, + the generic Linux networking stack to transfer IEEE 802.15.4 data + messages and a special protocol over netlink for configuration/management +- MAC - provides access to shared channel and reliable data delivery +- PHY - represents device drivers Socket API ========== -int sd = socket(PF_IEEE802154, SOCK_DGRAM, 0); -..... +.. c:function:: int sd = socket(PF_IEEE802154, SOCK_DGRAM, 0); The address family, socket addresses etc. are defined in the include/net/af_ieee802154.h header or in the special header in the userspace package (see either http://wpan.cakelab.org/ or the git tree at https://github.com/linux-wpan/wpan-tools). +6LoWPAN Linux implementation +============================ + +The IEEE 802.15.4 standard specifies an MTU of 127 bytes, yielding about 80 +octets of actual MAC payload once security is turned on, on a wireless link +with a link throughput of 250 kbps or less. The 6LoWPAN adaptation format +[RFC4944] was specified to carry IPv6 datagrams over such constrained links, +taking into account limited bandwidth, memory, or energy resources that are +expected in applications such as wireless Sensor Networks. [RFC4944] defines +a Mesh Addressing header to support sub-IP forwarding, a Fragmentation header +to support the IPv6 minimum MTU requirement [RFC2460], and stateless header +compression for IPv6 datagrams (LOWPAN_HC1 and LOWPAN_HC2) to reduce the +relatively large IPv6 and UDP headers down to (in the best case) several bytes. + +In September 2011 the standard update was published - [RFC6282]. +It deprecates HC1 and HC2 compression and defines IPHC encoding format which is +used in this Linux implementation. + +All the code related to 6lowpan you may find in files: net/6lowpan/* +and net/ieee802154/6lowpan/* + +To setup a 6LoWPAN interface you need: +1. Add IEEE802.15.4 interface and set channel and PAN ID; +2. Add 6lowpan interface by command like: +# ip link add link wpan0 name lowpan0 type lowpan +3. Bring up 'lowpan0' interface -Kernel side -============= +Drivers +======= Like with WiFi, there are several types of devices implementing IEEE 802.15.4. 1) 'HardMAC'. The MAC layer is implemented in the device itself, the device - exports a management (e.g. MLME) and data API. +exports a management (e.g. MLME) and data API. 2) 'SoftMAC' or just radio. These types of devices are just radio transceivers - possibly with some kinds of acceleration like automatic CRC computation and - comparation, automagic ACK handling, address matching, etc. +possibly with some kinds of acceleration like automatic CRC computation and +comparation, automagic ACK handling, address matching, etc. Those types of devices require different approach to be hooked into Linux kernel. - HardMAC -======= +------- See the header include/net/ieee802154_netdev.h. You have to implement Linux net_device, with .type = ARPHRD_IEEE802154. Data is exchanged with socket family @@ -64,9 +89,8 @@ net_device with a pointer to struct ieee802154_mlme_ops instance. The fields assoc_req, assoc_resp, disassoc_req, start_req, and scan_req are optional. All other fields are required. - SoftMAC -======= +------- The MAC is the middle layer in the IEEE 802.15.4 Linux stack. This moment it provides interface for drivers registration and management of slave interfaces. @@ -79,99 +103,78 @@ This layer is going to be extended soon. See header include/net/mac802154.h and several drivers in drivers/net/ieee802154/. +Fake drivers +------------ + +In addition there is a driver available which simulates a real device with +SoftMAC (fakelb - IEEE 802.15.4 loopback driver) interface. This option +provides a possibility to test and debug the stack without usage of real hardware. Device drivers API ================== The include/net/mac802154.h defines following functions: - - struct ieee802154_hw * - ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops): - allocation of IEEE 802.15.4 compatible hardware device - - void ieee802154_free_hw(struct ieee802154_hw *hw): - freeing allocated hardware device +.. c:function:: struct ieee802154_dev *ieee802154_alloc_device (size_t priv_size, struct ieee802154_ops *ops) - - int ieee802154_register_hw(struct ieee802154_hw *hw): - register PHY which is the allocated hardware device, in the system +Allocation of IEEE 802.15.4 compatible device. - - void ieee802154_unregister_hw(struct ieee802154_hw *hw): - freeing registered PHY +.. c:function:: void ieee802154_free_device(struct ieee802154_dev *dev) - - void ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb, - u8 lqi): - telling 802.15.4 module there is a new received frame in the skb with - the RF Link Quality Indicator (LQI) from the hardware device +Freeing allocated device. - - void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb, - bool ifs_handling): - telling 802.15.4 module the frame in the skb is or going to be - transmitted through the hardware device +.. c:function:: int ieee802154_register_device(struct ieee802154_dev *dev) + +Register PHY in the system. + +.. c:function:: void ieee802154_unregister_device(struct ieee802154_dev *dev) + +Freeing registered PHY. + +.. c:function:: void ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb, u8 lqi): + +Telling 802.15.4 module there is a new received frame in the skb with +the RF Link Quality Indicator (LQI) from the hardware device. + +.. c:function:: void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb, bool ifs_handling): + +Telling 802.15.4 module the frame in the skb is or going to be +transmitted through the hardware device The device driver must implement the following callbacks in the IEEE 802.15.4 -operations structure at least: -struct ieee802154_ops { - ... - int (*start)(struct ieee802154_hw *hw); - void (*stop)(struct ieee802154_hw *hw); - ... - int (*xmit_async)(struct ieee802154_hw *hw, struct sk_buff *skb); - int (*ed)(struct ieee802154_hw *hw, u8 *level); - int (*set_channel)(struct ieee802154_hw *hw, u8 page, u8 channel); - ... -}; - - - int start(struct ieee802154_hw *hw): - handler that 802.15.4 module calls for the hardware device initialization. - - - void stop(struct ieee802154_hw *hw): - handler that 802.15.4 module calls for the hardware device cleanup. - - - int xmit_async(struct ieee802154_hw *hw, struct sk_buff *skb): - handler that 802.15.4 module calls for each frame in the skb going to be - transmitted through the hardware device. - - - int ed(struct ieee802154_hw *hw, u8 *level): - handler that 802.15.4 module calls for Energy Detection from the hardware - device. - - - int set_channel(struct ieee802154_hw *hw, u8 page, u8 channel): - set radio for listening on specific channel of the hardware device. +operations structure at least:: -Moreover IEEE 802.15.4 device operations structure should be filled. + struct ieee802154_ops { + ... + int (*start)(struct ieee802154_hw *hw); + void (*stop)(struct ieee802154_hw *hw); + ... + int (*xmit_async)(struct ieee802154_hw *hw, struct sk_buff *skb); + int (*ed)(struct ieee802154_hw *hw, u8 *level); + int (*set_channel)(struct ieee802154_hw *hw, u8 page, u8 channel); + ... + }; -Fake drivers -============ +.. c:function:: int start(struct ieee802154_hw *hw): -In addition there is a driver available which simulates a real device with -SoftMAC (fakelb - IEEE 802.15.4 loopback driver) interface. This option -provides a possibility to test and debug the stack without usage of real hardware. +Handler that 802.15.4 module calls for the hardware device initialization. -See sources in drivers/net/ieee802154 folder for more details. +.. c:function:: void stop(struct ieee802154_hw *hw): +Handler that 802.15.4 module calls for the hardware device cleanup. -6LoWPAN Linux implementation -============================ +.. c:function:: int xmit_async(struct ieee802154_hw *hw, struct sk_buff *skb): -The IEEE 802.15.4 standard specifies an MTU of 127 bytes, yielding about 80 -octets of actual MAC payload once security is turned on, on a wireless link -with a link throughput of 250 kbps or less. The 6LoWPAN adaptation format -[RFC4944] was specified to carry IPv6 datagrams over such constrained links, -taking into account limited bandwidth, memory, or energy resources that are -expected in applications such as wireless Sensor Networks. [RFC4944] defines -a Mesh Addressing header to support sub-IP forwarding, a Fragmentation header -to support the IPv6 minimum MTU requirement [RFC2460], and stateless header -compression for IPv6 datagrams (LOWPAN_HC1 and LOWPAN_HC2) to reduce the -relatively large IPv6 and UDP headers down to (in the best case) several bytes. +Handler that 802.15.4 module calls for each frame in the skb going to be +transmitted through the hardware device. -In September 2011 the standard update was published - [RFC6282]. -It deprecates HC1 and HC2 compression and defines IPHC encoding format which is -used in this Linux implementation. +.. c:function:: int ed(struct ieee802154_hw *hw, u8 *level): -All the code related to 6lowpan you may find in files: net/6lowpan/* -and net/ieee802154/6lowpan/* +Handler that 802.15.4 module calls for Energy Detection from the hardware +device. -To setup a 6LoWPAN interface you need: -1. Add IEEE802.15.4 interface and set channel and PAN ID; -2. Add 6lowpan interface by command like: - # ip link add link wpan0 name lowpan0 type lowpan -3. Bring up 'lowpan0' interface +.. c:function:: int set_channel(struct ieee802154_hw *hw, u8 page, u8 channel): + +Set radio for listening on specific channel of the hardware device. + +Moreover IEEE 802.15.4 device operations structure should be filled. diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst index 59e86de662cd..5449149be496 100644 --- a/Documentation/networking/index.rst +++ b/Documentation/networking/index.rst @@ -24,14 +24,21 @@ Contents: device_drivers/intel/i40e device_drivers/intel/iavf device_drivers/intel/ice + devlink-info-versions + ieee802154 kapi z8530book msg_zerocopy failover net_failover + phy + sfp-phylink alias bridge snmp_counter + checksum-offloads + segmentation-offloads + scaling .. only:: subproject diff --git a/Documentation/networking/msg_zerocopy.rst b/Documentation/networking/msg_zerocopy.rst index fe46d4867e2d..18c1415e7bfa 100644 --- a/Documentation/networking/msg_zerocopy.rst +++ b/Documentation/networking/msg_zerocopy.rst @@ -7,7 +7,7 @@ Intro ===== The MSG_ZEROCOPY flag enables copy avoidance for socket send calls. -The feature is currently implemented for TCP sockets. +The feature is currently implemented for TCP and UDP sockets. Opportunity and Caveats diff --git a/Documentation/networking/operstates.txt b/Documentation/networking/operstates.txt index 355c6d8ef8ad..b203d1334822 100644 --- a/Documentation/networking/operstates.txt +++ b/Documentation/networking/operstates.txt @@ -22,8 +22,9 @@ and changeable from userspace under certain rules. 2. Querying from userspace Both admin and operational state can be queried via the netlink -operation RTM_GETLINK. It is also possible to subscribe to RTMGRP_LINK -to be notified of updates. This is important for setting from userspace. +operation RTM_GETLINK. It is also possible to subscribe to RTNLGRP_LINK +to be notified of updates while the interface is admin up. This is +important for setting from userspace. These values contain interface state: @@ -101,8 +102,9 @@ because some driver controlled protocol establishment has to complete. Corresponding functions are netif_dormant_on() to set the flag, netif_dormant_off() to clear it and netif_dormant() to query. -On device allocation, networking core sets the flags equivalent to -netif_carrier_ok() and !netif_dormant(). +On device allocation, both flags __LINK_STATE_NOCARRIER and +__LINK_STATE_DORMANT are cleared, so the effective state is equivalent +to netif_carrier_ok() and !netif_dormant(). Whenever the driver CHANGES one of these flags, a workqueue event is @@ -133,11 +135,11 @@ netif_carrier_ok() && !netif_dormant() is set by the driver. Afterwards, the userspace application can set IFLA_OPERSTATE to IF_OPER_DORMANT or IF_OPER_UP as long as the driver does not set netif_carrier_off() or netif_dormant_on(). Changes made by userspace -are multicasted on the netlink group RTMGRP_LINK. +are multicasted on the netlink group RTNLGRP_LINK. So basically a 802.1X supplicant interacts with the kernel like this: --subscribe to RTMGRP_LINK +-subscribe to RTNLGRP_LINK -set IFLA_LINKMODE to 1 via RTM_SETLINK -query RTM_GETLINK once to get initial state -if initial flags are not (IFF_LOWER_UP && !IFF_DORMANT), wait until diff --git a/Documentation/networking/phy.rst b/Documentation/networking/phy.rst new file mode 100644 index 000000000000..0dd90d7df5ec --- /dev/null +++ b/Documentation/networking/phy.rst @@ -0,0 +1,447 @@ +===================== +PHY Abstraction Layer +===================== + +Purpose +======= + +Most network devices consist of set of registers which provide an interface +to a MAC layer, which communicates with the physical connection through a +PHY. The PHY concerns itself with negotiating link parameters with the link +partner on the other side of the network connection (typically, an ethernet +cable), and provides a register interface to allow drivers to determine what +settings were chosen, and to configure what settings are allowed. + +While these devices are distinct from the network devices, and conform to a +standard layout for the registers, it has been common practice to integrate +the PHY management code with the network driver. This has resulted in large +amounts of redundant code. Also, on embedded systems with multiple (and +sometimes quite different) ethernet controllers connected to the same +management bus, it is difficult to ensure safe use of the bus. + +Since the PHYs are devices, and the management busses through which they are +accessed are, in fact, busses, the PHY Abstraction Layer treats them as such. +In doing so, it has these goals: + +#. Increase code-reuse +#. Increase overall code-maintainability +#. Speed development time for new network drivers, and for new systems + +Basically, this layer is meant to provide an interface to PHY devices which +allows network driver writers to write as little code as possible, while +still providing a full feature set. + +The MDIO bus +============ + +Most network devices are connected to a PHY by means of a management bus. +Different devices use different busses (though some share common interfaces). +In order to take advantage of the PAL, each bus interface needs to be +registered as a distinct device. + +#. read and write functions must be implemented. Their prototypes are:: + + int write(struct mii_bus *bus, int mii_id, int regnum, u16 value); + int read(struct mii_bus *bus, int mii_id, int regnum); + + mii_id is the address on the bus for the PHY, and regnum is the register + number. These functions are guaranteed not to be called from interrupt + time, so it is safe for them to block, waiting for an interrupt to signal + the operation is complete + +#. A reset function is optional. This is used to return the bus to an + initialized state. + +#. A probe function is needed. This function should set up anything the bus + driver needs, setup the mii_bus structure, and register with the PAL using + mdiobus_register. Similarly, there's a remove function to undo all of + that (use mdiobus_unregister). + +#. Like any driver, the device_driver structure must be configured, and init + exit functions are used to register the driver. + +#. The bus must also be declared somewhere as a device, and registered. + +As an example for how one driver implemented an mdio bus driver, see +drivers/net/ethernet/freescale/fsl_pq_mdio.c and an associated DTS file +for one of the users. (e.g. "git grep fsl,.*-mdio arch/powerpc/boot/dts/") + +(RG)MII/electrical interface considerations +=========================================== + +The Reduced Gigabit Medium Independent Interface (RGMII) is a 12-pin +electrical signal interface using a synchronous 125Mhz clock signal and several +data lines. Due to this design decision, a 1.5ns to 2ns delay must be added +between the clock line (RXC or TXC) and the data lines to let the PHY (clock +sink) have enough setup and hold times to sample the data lines correctly. The +PHY library offers different types of PHY_INTERFACE_MODE_RGMII* values to let +the PHY driver and optionally the MAC driver, implement the required delay. The +values of phy_interface_t must be understood from the perspective of the PHY +device itself, leading to the following: + +* PHY_INTERFACE_MODE_RGMII: the PHY is not responsible for inserting any + internal delay by itself, it assumes that either the Ethernet MAC (if capable + or the PCB traces) insert the correct 1.5-2ns delay + +* PHY_INTERFACE_MODE_RGMII_TXID: the PHY should insert an internal delay + for the transmit data lines (TXD[3:0]) processed by the PHY device + +* PHY_INTERFACE_MODE_RGMII_RXID: the PHY should insert an internal delay + for the receive data lines (RXD[3:0]) processed by the PHY device + +* PHY_INTERFACE_MODE_RGMII_ID: the PHY should insert internal delays for + both transmit AND receive data lines from/to the PHY device + +Whenever possible, use the PHY side RGMII delay for these reasons: + +* PHY devices may offer sub-nanosecond granularity in how they allow a + receiver/transmitter side delay (e.g: 0.5, 1.0, 1.5ns) to be specified. Such + precision may be required to account for differences in PCB trace lengths + +* PHY devices are typically qualified for a large range of applications + (industrial, medical, automotive...), and they provide a constant and + reliable delay across temperature/pressure/voltage ranges + +* PHY device drivers in PHYLIB being reusable by nature, being able to + configure correctly a specified delay enables more designs with similar delay + requirements to be operate correctly + +For cases where the PHY is not capable of providing this delay, but the +Ethernet MAC driver is capable of doing so, the correct phy_interface_t value +should be PHY_INTERFACE_MODE_RGMII, and the Ethernet MAC driver should be +configured correctly in order to provide the required transmit and/or receive +side delay from the perspective of the PHY device. Conversely, if the Ethernet +MAC driver looks at the phy_interface_t value, for any other mode but +PHY_INTERFACE_MODE_RGMII, it should make sure that the MAC-level delays are +disabled. + +In case neither the Ethernet MAC, nor the PHY are capable of providing the +required delays, as defined per the RGMII standard, several options may be +available: + +* Some SoCs may offer a pin pad/mux/controller capable of configuring a given + set of pins'strength, delays, and voltage; and it may be a suitable + option to insert the expected 2ns RGMII delay. + +* Modifying the PCB design to include a fixed delay (e.g: using a specifically + designed serpentine), which may not require software configuration at all. + +Common problems with RGMII delay mismatch +----------------------------------------- + +When there is a RGMII delay mismatch between the Ethernet MAC and the PHY, this +will most likely result in the clock and data line signals to be unstable when +the PHY or MAC take a snapshot of these signals to translate them into logical +1 or 0 states and reconstruct the data being transmitted/received. Typical +symptoms include: + +* Transmission/reception partially works, and there is frequent or occasional + packet loss observed + +* Ethernet MAC may report some or all packets ingressing with a FCS/CRC error, + or just discard them all + +* Switching to lower speeds such as 10/100Mbits/sec makes the problem go away + (since there is enough setup/hold time in that case) + +Connecting to a PHY +=================== + +Sometime during startup, the network driver needs to establish a connection +between the PHY device, and the network device. At this time, the PHY's bus +and drivers need to all have been loaded, so it is ready for the connection. +At this point, there are several ways to connect to the PHY: + +#. The PAL handles everything, and only calls the network driver when + the link state changes, so it can react. + +#. The PAL handles everything except interrupts (usually because the + controller has the interrupt registers). + +#. The PAL handles everything, but checks in with the driver every second, + allowing the network driver to react first to any changes before the PAL + does. + +#. The PAL serves only as a library of functions, with the network device + manually calling functions to update status, and configure the PHY + + +Letting the PHY Abstraction Layer do Everything +=============================================== + +If you choose option 1 (The hope is that every driver can, but to still be +useful to drivers that can't), connecting to the PHY is simple: + +First, you need a function to react to changes in the link state. This +function follows this protocol:: + + static void adjust_link(struct net_device *dev); + +Next, you need to know the device name of the PHY connected to this device. +The name will look something like, "0:00", where the first number is the +bus id, and the second is the PHY's address on that bus. Typically, +the bus is responsible for making its ID unique. + +Now, to connect, just call this function:: + + phydev = phy_connect(dev, phy_name, &adjust_link, interface); + +*phydev* is a pointer to the phy_device structure which represents the PHY. +If phy_connect is successful, it will return the pointer. dev, here, is the +pointer to your net_device. Once done, this function will have started the +PHY's software state machine, and registered for the PHY's interrupt, if it +has one. The phydev structure will be populated with information about the +current state, though the PHY will not yet be truly operational at this +point. + +PHY-specific flags should be set in phydev->dev_flags prior to the call +to phy_connect() such that the underlying PHY driver can check for flags +and perform specific operations based on them. +This is useful if the system has put hardware restrictions on +the PHY/controller, of which the PHY needs to be aware. + +*interface* is a u32 which specifies the connection type used +between the controller and the PHY. Examples are GMII, MII, +RGMII, and SGMII. For a full list, see include/linux/phy.h + +Now just make sure that phydev->supported and phydev->advertising have any +values pruned from them which don't make sense for your controller (a 10/100 +controller may be connected to a gigabit capable PHY, so you would need to +mask off SUPPORTED_1000baseT*). See include/linux/ethtool.h for definitions +for these bitfields. Note that you should not SET any bits, except the +SUPPORTED_Pause and SUPPORTED_AsymPause bits (see below), or the PHY may get +put into an unsupported state. + +Lastly, once the controller is ready to handle network traffic, you call +phy_start(phydev). This tells the PAL that you are ready, and configures the +PHY to connect to the network. If the MAC interrupt of your network driver +also handles PHY status changes, just set phydev->irq to PHY_IGNORE_INTERRUPT +before you call phy_start and use phy_mac_interrupt() from the network +driver. If you don't want to use interrupts, set phydev->irq to PHY_POLL. +phy_start() enables the PHY interrupts (if applicable) and starts the +phylib state machine. + +When you want to disconnect from the network (even if just briefly), you call +phy_stop(phydev). This function also stops the phylib state machine and +disables PHY interrupts. + +Pause frames / flow control +=========================== + +The PHY does not participate directly in flow control/pause frames except by +making sure that the SUPPORTED_Pause and SUPPORTED_AsymPause bits are set in +MII_ADVERTISE to indicate towards the link partner that the Ethernet MAC +controller supports such a thing. Since flow control/pause frames generation +involves the Ethernet MAC driver, it is recommended that this driver takes care +of properly indicating advertisement and support for such features by setting +the SUPPORTED_Pause and SUPPORTED_AsymPause bits accordingly. This can be done +either before or after phy_connect() and/or as a result of implementing the +ethtool::set_pauseparam feature. + + +Keeping Close Tabs on the PAL +============================= + +It is possible that the PAL's built-in state machine needs a little help to +keep your network device and the PHY properly in sync. If so, you can +register a helper function when connecting to the PHY, which will be called +every second before the state machine reacts to any changes. To do this, you +need to manually call phy_attach() and phy_prepare_link(), and then call +phy_start_machine() with the second argument set to point to your special +handler. + +Currently there are no examples of how to use this functionality, and testing +on it has been limited because the author does not have any drivers which use +it (they all use option 1). So Caveat Emptor. + +Doing it all yourself +===================== + +There's a remote chance that the PAL's built-in state machine cannot track +the complex interactions between the PHY and your network device. If this is +so, you can simply call phy_attach(), and not call phy_start_machine or +phy_prepare_link(). This will mean that phydev->state is entirely yours to +handle (phy_start and phy_stop toggle between some of the states, so you +might need to avoid them). + +An effort has been made to make sure that useful functionality can be +accessed without the state-machine running, and most of these functions are +descended from functions which did not interact with a complex state-machine. +However, again, no effort has been made so far to test running without the +state machine, so tryer beware. + +Here is a brief rundown of the functions:: + + int phy_read(struct phy_device *phydev, u16 regnum); + int phy_write(struct phy_device *phydev, u16 regnum, u16 val); + +Simple read/write primitives. They invoke the bus's read/write function +pointers. +:: + + void phy_print_status(struct phy_device *phydev); + +A convenience function to print out the PHY status neatly. +:: + + void phy_request_interrupt(struct phy_device *phydev); + +Requests the IRQ for the PHY interrupts. +:: + + struct phy_device * phy_attach(struct net_device *dev, const char *phy_id, + phy_interface_t interface); + +Attaches a network device to a particular PHY, binding the PHY to a generic +driver if none was found during bus initialization. +:: + + int phy_start_aneg(struct phy_device *phydev); + +Using variables inside the phydev structure, either configures advertising +and resets autonegotiation, or disables autonegotiation, and configures +forced settings. +:: + + static inline int phy_read_status(struct phy_device *phydev); + +Fills the phydev structure with up-to-date information about the current +settings in the PHY. +:: + + int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd); + +Ethtool convenience functions. +:: + + int phy_mii_ioctl(struct phy_device *phydev, + struct mii_ioctl_data *mii_data, int cmd); + +The MII ioctl. Note that this function will completely screw up the state +machine if you write registers like BMCR, BMSR, ADVERTISE, etc. Best to +use this only to write registers which are not standard, and don't set off +a renegotiation. + +PHY Device Drivers +================== + +With the PHY Abstraction Layer, adding support for new PHYs is +quite easy. In some cases, no work is required at all! However, +many PHYs require a little hand-holding to get up-and-running. + +Generic PHY driver +------------------ + +If the desired PHY doesn't have any errata, quirks, or special +features you want to support, then it may be best to not add +support, and let the PHY Abstraction Layer's Generic PHY Driver +do all of the work. + +Writing a PHY driver +-------------------- + +If you do need to write a PHY driver, the first thing to do is +make sure it can be matched with an appropriate PHY device. +This is done during bus initialization by reading the device's +UID (stored in registers 2 and 3), then comparing it to each +driver's phy_id field by ANDing it with each driver's +phy_id_mask field. Also, it needs a name. Here's an example:: + + static struct phy_driver dm9161_driver = { + .phy_id = 0x0181b880, + .name = "Davicom DM9161E", + .phy_id_mask = 0x0ffffff0, + ... + } + +Next, you need to specify what features (speed, duplex, autoneg, +etc) your PHY device and driver support. Most PHYs support +PHY_BASIC_FEATURES, but you can look in include/mii.h for other +features. + +Each driver consists of a number of function pointers, documented +in include/linux/phy.h under the phy_driver structure. + +Of these, only config_aneg and read_status are required to be +assigned by the driver code. The rest are optional. Also, it is +preferred to use the generic phy driver's versions of these two +functions if at all possible: genphy_read_status and +genphy_config_aneg. If this is not possible, it is likely that +you only need to perform some actions before and after invoking +these functions, and so your functions will wrap the generic +ones. + +Feel free to look at the Marvell, Cicada, and Davicom drivers in +drivers/net/phy/ for examples (the lxt and qsemi drivers have +not been tested as of this writing). + +The PHY's MMD register accesses are handled by the PAL framework +by default, but can be overridden by a specific PHY driver if +required. This could be the case if a PHY was released for +manufacturing before the MMD PHY register definitions were +standardized by the IEEE. Most modern PHYs will be able to use +the generic PAL framework for accessing the PHY's MMD registers. +An example of such usage is for Energy Efficient Ethernet support, +implemented in the PAL. This support uses the PAL to access MMD +registers for EEE query and configuration if the PHY supports +the IEEE standard access mechanisms, or can use the PHY's specific +access interfaces if overridden by the specific PHY driver. See +the Micrel driver in drivers/net/phy/ for an example of how this +can be implemented. + +Board Fixups +============ + +Sometimes the specific interaction between the platform and the PHY requires +special handling. For instance, to change where the PHY's clock input is, +or to add a delay to account for latency issues in the data path. In order +to support such contingencies, the PHY Layer allows platform code to register +fixups to be run when the PHY is brought up (or subsequently reset). + +When the PHY Layer brings up a PHY it checks to see if there are any fixups +registered for it, matching based on UID (contained in the PHY device's phy_id +field) and the bus identifier (contained in phydev->dev.bus_id). Both must +match, however two constants, PHY_ANY_ID and PHY_ANY_UID, are provided as +wildcards for the bus ID and UID, respectively. + +When a match is found, the PHY layer will invoke the run function associated +with the fixup. This function is passed a pointer to the phy_device of +interest. It should therefore only operate on that PHY. + +The platform code can either register the fixup using phy_register_fixup():: + + int phy_register_fixup(const char *phy_id, + u32 phy_uid, u32 phy_uid_mask, + int (*run)(struct phy_device *)); + +Or using one of the two stubs, phy_register_fixup_for_uid() and +phy_register_fixup_for_id():: + + int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask, + int (*run)(struct phy_device *)); + int phy_register_fixup_for_id(const char *phy_id, + int (*run)(struct phy_device *)); + +The stubs set one of the two matching criteria, and set the other one to +match anything. + +When phy_register_fixup() or \*_for_uid()/\*_for_id() is called at module, +unregister fixup and free allocate memory are required. + +Call one of following function before unloading module:: + + int phy_unregister_fixup(const char *phy_id, u32 phy_uid, u32 phy_uid_mask); + int phy_unregister_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask); + int phy_register_fixup_for_id(const char *phy_id); + +Standards +========= + +IEEE Standard 802.3: CSMA/CD Access Method and Physical Layer Specifications, Section Two: +http://standards.ieee.org/getieee802/download/802.3-2008_section2.pdf + +RGMII v1.3: +http://web.archive.org/web/20160303212629/http://www.hp.com/rnd/pdfs/RGMIIv1_3.pdf + +RGMII v2.0: +http://web.archive.org/web/20160303171328/http://www.hp.com/rnd/pdfs/RGMIIv2_0_final_hp.pdf diff --git a/Documentation/networking/phy.txt b/Documentation/networking/phy.txt deleted file mode 100644 index bdec0f700bc1..000000000000 --- a/Documentation/networking/phy.txt +++ /dev/null @@ -1,427 +0,0 @@ - -------- -PHY Abstraction Layer -(Updated 2008-04-08) - -Purpose - - Most network devices consist of set of registers which provide an interface - to a MAC layer, which communicates with the physical connection through a - PHY. The PHY concerns itself with negotiating link parameters with the link - partner on the other side of the network connection (typically, an ethernet - cable), and provides a register interface to allow drivers to determine what - settings were chosen, and to configure what settings are allowed. - - While these devices are distinct from the network devices, and conform to a - standard layout for the registers, it has been common practice to integrate - the PHY management code with the network driver. This has resulted in large - amounts of redundant code. Also, on embedded systems with multiple (and - sometimes quite different) ethernet controllers connected to the same - management bus, it is difficult to ensure safe use of the bus. - - Since the PHYs are devices, and the management busses through which they are - accessed are, in fact, busses, the PHY Abstraction Layer treats them as such. - In doing so, it has these goals: - - 1) Increase code-reuse - 2) Increase overall code-maintainability - 3) Speed development time for new network drivers, and for new systems - - Basically, this layer is meant to provide an interface to PHY devices which - allows network driver writers to write as little code as possible, while - still providing a full feature set. - -The MDIO bus - - Most network devices are connected to a PHY by means of a management bus. - Different devices use different busses (though some share common interfaces). - In order to take advantage of the PAL, each bus interface needs to be - registered as a distinct device. - - 1) read and write functions must be implemented. Their prototypes are: - - int write(struct mii_bus *bus, int mii_id, int regnum, u16 value); - int read(struct mii_bus *bus, int mii_id, int regnum); - - mii_id is the address on the bus for the PHY, and regnum is the register - number. These functions are guaranteed not to be called from interrupt - time, so it is safe for them to block, waiting for an interrupt to signal - the operation is complete - - 2) A reset function is optional. This is used to return the bus to an - initialized state. - - 3) A probe function is needed. This function should set up anything the bus - driver needs, setup the mii_bus structure, and register with the PAL using - mdiobus_register. Similarly, there's a remove function to undo all of - that (use mdiobus_unregister). - - 4) Like any driver, the device_driver structure must be configured, and init - exit functions are used to register the driver. - - 5) The bus must also be declared somewhere as a device, and registered. - - As an example for how one driver implemented an mdio bus driver, see - drivers/net/ethernet/freescale/fsl_pq_mdio.c and an associated DTS file - for one of the users. (e.g. "git grep fsl,.*-mdio arch/powerpc/boot/dts/") - -(RG)MII/electrical interface considerations - - The Reduced Gigabit Medium Independent Interface (RGMII) is a 12-pin - electrical signal interface using a synchronous 125Mhz clock signal and several - data lines. Due to this design decision, a 1.5ns to 2ns delay must be added - between the clock line (RXC or TXC) and the data lines to let the PHY (clock - sink) have enough setup and hold times to sample the data lines correctly. The - PHY library offers different types of PHY_INTERFACE_MODE_RGMII* values to let - the PHY driver and optionally the MAC driver, implement the required delay. The - values of phy_interface_t must be understood from the perspective of the PHY - device itself, leading to the following: - - * PHY_INTERFACE_MODE_RGMII: the PHY is not responsible for inserting any - internal delay by itself, it assumes that either the Ethernet MAC (if capable - or the PCB traces) insert the correct 1.5-2ns delay - - * PHY_INTERFACE_MODE_RGMII_TXID: the PHY should insert an internal delay - for the transmit data lines (TXD[3:0]) processed by the PHY device - - * PHY_INTERFACE_MODE_RGMII_RXID: the PHY should insert an internal delay - for the receive data lines (RXD[3:0]) processed by the PHY device - - * PHY_INTERFACE_MODE_RGMII_ID: the PHY should insert internal delays for - both transmit AND receive data lines from/to the PHY device - - Whenever possible, use the PHY side RGMII delay for these reasons: - - * PHY devices may offer sub-nanosecond granularity in how they allow a - receiver/transmitter side delay (e.g: 0.5, 1.0, 1.5ns) to be specified. Such - precision may be required to account for differences in PCB trace lengths - - * PHY devices are typically qualified for a large range of applications - (industrial, medical, automotive...), and they provide a constant and - reliable delay across temperature/pressure/voltage ranges - - * PHY device drivers in PHYLIB being reusable by nature, being able to - configure correctly a specified delay enables more designs with similar delay - requirements to be operate correctly - - For cases where the PHY is not capable of providing this delay, but the - Ethernet MAC driver is capable of doing so, the correct phy_interface_t value - should be PHY_INTERFACE_MODE_RGMII, and the Ethernet MAC driver should be - configured correctly in order to provide the required transmit and/or receive - side delay from the perspective of the PHY device. Conversely, if the Ethernet - MAC driver looks at the phy_interface_t value, for any other mode but - PHY_INTERFACE_MODE_RGMII, it should make sure that the MAC-level delays are - disabled. - - In case neither the Ethernet MAC, nor the PHY are capable of providing the - required delays, as defined per the RGMII standard, several options may be - available: - - * Some SoCs may offer a pin pad/mux/controller capable of configuring a given - set of pins'strength, delays, and voltage; and it may be a suitable - option to insert the expected 2ns RGMII delay. - - * Modifying the PCB design to include a fixed delay (e.g: using a specifically - designed serpentine), which may not require software configuration at all. - -Common problems with RGMII delay mismatch - - When there is a RGMII delay mismatch between the Ethernet MAC and the PHY, this - will most likely result in the clock and data line signals to be unstable when - the PHY or MAC take a snapshot of these signals to translate them into logical - 1 or 0 states and reconstruct the data being transmitted/received. Typical - symptoms include: - - * Transmission/reception partially works, and there is frequent or occasional - packet loss observed - - * Ethernet MAC may report some or all packets ingressing with a FCS/CRC error, - or just discard them all - - * Switching to lower speeds such as 10/100Mbits/sec makes the problem go away - (since there is enough setup/hold time in that case) - - -Connecting to a PHY - - Sometime during startup, the network driver needs to establish a connection - between the PHY device, and the network device. At this time, the PHY's bus - and drivers need to all have been loaded, so it is ready for the connection. - At this point, there are several ways to connect to the PHY: - - 1) The PAL handles everything, and only calls the network driver when - the link state changes, so it can react. - - 2) The PAL handles everything except interrupts (usually because the - controller has the interrupt registers). - - 3) The PAL handles everything, but checks in with the driver every second, - allowing the network driver to react first to any changes before the PAL - does. - - 4) The PAL serves only as a library of functions, with the network device - manually calling functions to update status, and configure the PHY - - -Letting the PHY Abstraction Layer do Everything - - If you choose option 1 (The hope is that every driver can, but to still be - useful to drivers that can't), connecting to the PHY is simple: - - First, you need a function to react to changes in the link state. This - function follows this protocol: - - static void adjust_link(struct net_device *dev); - - Next, you need to know the device name of the PHY connected to this device. - The name will look something like, "0:00", where the first number is the - bus id, and the second is the PHY's address on that bus. Typically, - the bus is responsible for making its ID unique. - - Now, to connect, just call this function: - - phydev = phy_connect(dev, phy_name, &adjust_link, interface); - - phydev is a pointer to the phy_device structure which represents the PHY. If - phy_connect is successful, it will return the pointer. dev, here, is the - pointer to your net_device. Once done, this function will have started the - PHY's software state machine, and registered for the PHY's interrupt, if it - has one. The phydev structure will be populated with information about the - current state, though the PHY will not yet be truly operational at this - point. - - PHY-specific flags should be set in phydev->dev_flags prior to the call - to phy_connect() such that the underlying PHY driver can check for flags - and perform specific operations based on them. - This is useful if the system has put hardware restrictions on - the PHY/controller, of which the PHY needs to be aware. - - interface is a u32 which specifies the connection type used - between the controller and the PHY. Examples are GMII, MII, - RGMII, and SGMII. For a full list, see include/linux/phy.h - - Now just make sure that phydev->supported and phydev->advertising have any - values pruned from them which don't make sense for your controller (a 10/100 - controller may be connected to a gigabit capable PHY, so you would need to - mask off SUPPORTED_1000baseT*). See include/linux/ethtool.h for definitions - for these bitfields. Note that you should not SET any bits, except the - SUPPORTED_Pause and SUPPORTED_AsymPause bits (see below), or the PHY may get - put into an unsupported state. - - Lastly, once the controller is ready to handle network traffic, you call - phy_start(phydev). This tells the PAL that you are ready, and configures the - PHY to connect to the network. If you want to handle your own interrupts, - just set phydev->irq to PHY_IGNORE_INTERRUPT before you call phy_start. - Similarly, if you don't want to use interrupts, set phydev->irq to PHY_POLL. - - When you want to disconnect from the network (even if just briefly), you call - phy_stop(phydev). - -Pause frames / flow control - - The PHY does not participate directly in flow control/pause frames except by - making sure that the SUPPORTED_Pause and SUPPORTED_AsymPause bits are set in - MII_ADVERTISE to indicate towards the link partner that the Ethernet MAC - controller supports such a thing. Since flow control/pause frames generation - involves the Ethernet MAC driver, it is recommended that this driver takes care - of properly indicating advertisement and support for such features by setting - the SUPPORTED_Pause and SUPPORTED_AsymPause bits accordingly. This can be done - either before or after phy_connect() and/or as a result of implementing the - ethtool::set_pauseparam feature. - - -Keeping Close Tabs on the PAL - - It is possible that the PAL's built-in state machine needs a little help to - keep your network device and the PHY properly in sync. If so, you can - register a helper function when connecting to the PHY, which will be called - every second before the state machine reacts to any changes. To do this, you - need to manually call phy_attach() and phy_prepare_link(), and then call - phy_start_machine() with the second argument set to point to your special - handler. - - Currently there are no examples of how to use this functionality, and testing - on it has been limited because the author does not have any drivers which use - it (they all use option 1). So Caveat Emptor. - -Doing it all yourself - - There's a remote chance that the PAL's built-in state machine cannot track - the complex interactions between the PHY and your network device. If this is - so, you can simply call phy_attach(), and not call phy_start_machine or - phy_prepare_link(). This will mean that phydev->state is entirely yours to - handle (phy_start and phy_stop toggle between some of the states, so you - might need to avoid them). - - An effort has been made to make sure that useful functionality can be - accessed without the state-machine running, and most of these functions are - descended from functions which did not interact with a complex state-machine. - However, again, no effort has been made so far to test running without the - state machine, so tryer beware. - - Here is a brief rundown of the functions: - - int phy_read(struct phy_device *phydev, u16 regnum); - int phy_write(struct phy_device *phydev, u16 regnum, u16 val); - - Simple read/write primitives. They invoke the bus's read/write function - pointers. - - void phy_print_status(struct phy_device *phydev); - - A convenience function to print out the PHY status neatly. - - int phy_start_interrupts(struct phy_device *phydev); - int phy_stop_interrupts(struct phy_device *phydev); - - Requests the IRQ for the PHY interrupts, then enables them for - start, or disables then frees them for stop. - - struct phy_device * phy_attach(struct net_device *dev, const char *phy_id, - phy_interface_t interface); - - Attaches a network device to a particular PHY, binding the PHY to a generic - driver if none was found during bus initialization. - - int phy_start_aneg(struct phy_device *phydev); - - Using variables inside the phydev structure, either configures advertising - and resets autonegotiation, or disables autonegotiation, and configures - forced settings. - - static inline int phy_read_status(struct phy_device *phydev); - - Fills the phydev structure with up-to-date information about the current - settings in the PHY. - - int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd); - - Ethtool convenience functions. - - int phy_mii_ioctl(struct phy_device *phydev, - struct mii_ioctl_data *mii_data, int cmd); - - The MII ioctl. Note that this function will completely screw up the state - machine if you write registers like BMCR, BMSR, ADVERTISE, etc. Best to - use this only to write registers which are not standard, and don't set off - a renegotiation. - - -PHY Device Drivers - - With the PHY Abstraction Layer, adding support for new PHYs is - quite easy. In some cases, no work is required at all! However, - many PHYs require a little hand-holding to get up-and-running. - -Generic PHY driver - - If the desired PHY doesn't have any errata, quirks, or special - features you want to support, then it may be best to not add - support, and let the PHY Abstraction Layer's Generic PHY Driver - do all of the work. - -Writing a PHY driver - - If you do need to write a PHY driver, the first thing to do is - make sure it can be matched with an appropriate PHY device. - This is done during bus initialization by reading the device's - UID (stored in registers 2 and 3), then comparing it to each - driver's phy_id field by ANDing it with each driver's - phy_id_mask field. Also, it needs a name. Here's an example: - - static struct phy_driver dm9161_driver = { - .phy_id = 0x0181b880, - .name = "Davicom DM9161E", - .phy_id_mask = 0x0ffffff0, - ... - } - - Next, you need to specify what features (speed, duplex, autoneg, - etc) your PHY device and driver support. Most PHYs support - PHY_BASIC_FEATURES, but you can look in include/mii.h for other - features. - - Each driver consists of a number of function pointers, documented - in include/linux/phy.h under the phy_driver structure. - - Of these, only config_aneg and read_status are required to be - assigned by the driver code. The rest are optional. Also, it is - preferred to use the generic phy driver's versions of these two - functions if at all possible: genphy_read_status and - genphy_config_aneg. If this is not possible, it is likely that - you only need to perform some actions before and after invoking - these functions, and so your functions will wrap the generic - ones. - - Feel free to look at the Marvell, Cicada, and Davicom drivers in - drivers/net/phy/ for examples (the lxt and qsemi drivers have - not been tested as of this writing). - - The PHY's MMD register accesses are handled by the PAL framework - by default, but can be overridden by a specific PHY driver if - required. This could be the case if a PHY was released for - manufacturing before the MMD PHY register definitions were - standardized by the IEEE. Most modern PHYs will be able to use - the generic PAL framework for accessing the PHY's MMD registers. - An example of such usage is for Energy Efficient Ethernet support, - implemented in the PAL. This support uses the PAL to access MMD - registers for EEE query and configuration if the PHY supports - the IEEE standard access mechanisms, or can use the PHY's specific - access interfaces if overridden by the specific PHY driver. See - the Micrel driver in drivers/net/phy/ for an example of how this - can be implemented. - -Board Fixups - - Sometimes the specific interaction between the platform and the PHY requires - special handling. For instance, to change where the PHY's clock input is, - or to add a delay to account for latency issues in the data path. In order - to support such contingencies, the PHY Layer allows platform code to register - fixups to be run when the PHY is brought up (or subsequently reset). - - When the PHY Layer brings up a PHY it checks to see if there are any fixups - registered for it, matching based on UID (contained in the PHY device's phy_id - field) and the bus identifier (contained in phydev->dev.bus_id). Both must - match, however two constants, PHY_ANY_ID and PHY_ANY_UID, are provided as - wildcards for the bus ID and UID, respectively. - - When a match is found, the PHY layer will invoke the run function associated - with the fixup. This function is passed a pointer to the phy_device of - interest. It should therefore only operate on that PHY. - - The platform code can either register the fixup using phy_register_fixup(): - - int phy_register_fixup(const char *phy_id, - u32 phy_uid, u32 phy_uid_mask, - int (*run)(struct phy_device *)); - - Or using one of the two stubs, phy_register_fixup_for_uid() and - phy_register_fixup_for_id(): - - int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask, - int (*run)(struct phy_device *)); - int phy_register_fixup_for_id(const char *phy_id, - int (*run)(struct phy_device *)); - - The stubs set one of the two matching criteria, and set the other one to - match anything. - - When phy_register_fixup() or *_for_uid()/*_for_id() is called at module, - unregister fixup and free allocate memory are required. - - Call one of following function before unloading module. - - int phy_unregister_fixup(const char *phy_id, u32 phy_uid, u32 phy_uid_mask); - int phy_unregister_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask); - int phy_register_fixup_for_id(const char *phy_id); - -Standards - - IEEE Standard 802.3: CSMA/CD Access Method and Physical Layer Specifications, Section Two: - http://standards.ieee.org/getieee802/download/802.3-2008_section2.pdf - - RGMII v1.3: - http://web.archive.org/web/20160303212629/http://www.hp.com/rnd/pdfs/RGMIIv1_3.pdf - - RGMII v2.0: - http://web.archive.org/web/20160303171328/http://www.hp.com/rnd/pdfs/RGMIIv2_0_final_hp.pdf diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.rst similarity index 92% rename from Documentation/networking/scaling.txt rename to Documentation/networking/scaling.rst index b7056a8a0540..f78d7bf27ff5 100644 --- a/Documentation/networking/scaling.txt +++ b/Documentation/networking/scaling.rst @@ -1,4 +1,8 @@ +.. SPDX-License-Identifier: GPL-2.0 + +===================================== Scaling in the Linux Networking Stack +===================================== Introduction @@ -10,11 +14,11 @@ multi-processor systems. The following technologies are described: - RSS: Receive Side Scaling - RPS: Receive Packet Steering - RFS: Receive Flow Steering - Accelerated Receive Flow Steering - XPS: Transmit Packet Steering +- RSS: Receive Side Scaling +- RPS: Receive Packet Steering +- RFS: Receive Flow Steering +- Accelerated Receive Flow Steering +- XPS: Transmit Packet Steering RSS: Receive Side Scaling @@ -45,7 +49,9 @@ programmable filters. For example, webserver bound TCP port 80 packets can be directed to their own receive queue. Such “n-tuple” filters can be configured from ethtool (--config-ntuple). -==== RSS Configuration + +RSS Configuration +----------------- The driver for a multi-queue capable NIC typically provides a kernel module parameter for specifying the number of hardware queues to @@ -63,7 +69,9 @@ commands (--show-rxfh-indir and --set-rxfh-indir). Modifying the indirection table could be done to give different queues different relative weights. -== RSS IRQ Configuration + +RSS IRQ Configuration +~~~~~~~~~~~~~~~~~~~~~ Each receive queue has a separate IRQ associated with it. The NIC triggers this to notify a CPU when new packets arrive on the given queue. The @@ -77,7 +85,9 @@ affinity of each interrupt see Documentation/IRQ-affinity.txt. Some systems will be running irqbalance, a daemon that dynamically optimizes IRQ assignments and as a result may override any manual settings. -== Suggested Configuration + +Suggested Configuration +~~~~~~~~~~~~~~~~~~~~~~~ RSS should be enabled when latency is a concern or whenever receive interrupt processing forms a bottleneck. Spreading load between CPUs @@ -105,10 +115,12 @@ Whereas RSS selects the queue and hence CPU that will run the hardware interrupt handler, RPS selects the CPU to perform protocol processing above the interrupt handler. This is accomplished by placing the packet on the desired CPU’s backlog queue and waking up the CPU for processing. -RPS has some advantages over RSS: 1) it can be used with any NIC, -2) software filters can easily be added to hash over new protocols, +RPS has some advantages over RSS: + +1) it can be used with any NIC +2) software filters can easily be added to hash over new protocols 3) it does not increase hardware device interrupt rate (although it does -introduce inter-processor interrupts (IPIs)). + introduce inter-processor interrupts (IPIs)) RPS is called during bottom half of the receive interrupt handler, when a driver sends a packet up the network stack with netif_rx() or @@ -135,21 +147,25 @@ packets have been queued to their backlog queue. The IPI wakes backlog processing on the remote CPU, and any queued packets are then processed up the networking stack. -==== RPS Configuration + +RPS Configuration +----------------- RPS requires a kernel compiled with the CONFIG_RPS kconfig symbol (on by default for SMP). Even when compiled in, RPS remains disabled until explicitly configured. The list of CPUs to which RPS may forward traffic -can be configured for each receive queue using a sysfs file entry: +can be configured for each receive queue using a sysfs file entry:: - /sys/class/net//queues/rx-/rps_cpus + /sys/class/net//queues/rx-/rps_cpus This file implements a bitmap of CPUs. RPS is disabled when it is zero (the default), in which case packets are processed on the interrupting CPU. Documentation/IRQ-affinity.txt explains how CPUs are assigned to the bitmap. -== Suggested Configuration + +Suggested Configuration +~~~~~~~~~~~~~~~~~~~~~~~ For a single queue device, a typical RPS configuration would be to set the rps_cpus to the CPUs in the same memory domain of the interrupting @@ -163,7 +179,9 @@ and unnecessary. If there are fewer hardware queues than CPUs, then RPS might be beneficial if the rps_cpus for each queue are the ones that share the same memory domain as the interrupting CPU for that queue. -==== RPS Flow Limit + +RPS Flow Limit +-------------- RPS scales kernel receive processing across CPUs without introducing reordering. The trade-off to sending all packets from the same flow @@ -187,29 +205,33 @@ No packets are dropped when the input packet queue length is below the threshold, so flow limit does not sever connections outright: even large flows maintain connectivity. -== Interface + +Interface +~~~~~~~~~ Flow limit is compiled in by default (CONFIG_NET_FLOW_LIMIT), but not turned on. It is implemented for each CPU independently (to avoid lock and cache contention) and toggled per CPU by setting the relevant bit in sysctl net.core.flow_limit_cpu_bitmap. It exposes the same CPU -bitmap interface as rps_cpus (see above) when called from procfs: +bitmap interface as rps_cpus (see above) when called from procfs:: - /proc/sys/net/core/flow_limit_cpu_bitmap + /proc/sys/net/core/flow_limit_cpu_bitmap Per-flow rate is calculated by hashing each packet into a hashtable bucket and incrementing a per-bucket counter. The hash function is the same that selects a CPU in RPS, but as the number of buckets can be much larger than the number of CPUs, flow limit has finer-grained identification of large flows and fewer false positives. The default -table has 4096 buckets. This value can be modified through sysctl +table has 4096 buckets. This value can be modified through sysctl:: - net.core.flow_limit_table_len + net.core.flow_limit_table_len The value is only consulted when a new table is allocated. Modifying it does not update active tables. -== Suggested Configuration + +Suggested Configuration +~~~~~~~~~~~~~~~~~~~~~~~ Flow limit is useful on systems with many concurrent connections, where a single connection taking up 50% of a CPU indicates a problem. @@ -280,10 +302,10 @@ table), the packet is enqueued onto that CPU’s backlog. If they differ, the current CPU is updated to match the desired CPU if one of the following is true: -- The current CPU's queue head counter >= the recorded tail counter - value in rps_dev_flow[i] -- The current CPU is unset (>= nr_cpu_ids) -- The current CPU is offline + - The current CPU's queue head counter >= the recorded tail counter + value in rps_dev_flow[i] + - The current CPU is unset (>= nr_cpu_ids) + - The current CPU is offline After this check, the packet is sent to the (possibly updated) current CPU. These rules aim to ensure that a flow only moves to a new CPU when @@ -291,19 +313,23 @@ there are no packets outstanding on the old CPU, as the outstanding packets could arrive later than those about to be processed on the new CPU. -==== RFS Configuration + +RFS Configuration +----------------- RFS is only available if the kconfig symbol CONFIG_RPS is enabled (on by default for SMP). The functionality remains disabled until explicitly -configured. The number of entries in the global flow table is set through: +configured. The number of entries in the global flow table is set through:: + + /proc/sys/net/core/rps_sock_flow_entries - /proc/sys/net/core/rps_sock_flow_entries +The number of entries in the per-queue flow table are set through:: -The number of entries in the per-queue flow table are set through: + /sys/class/net//queues/rx-/rps_flow_cnt - /sys/class/net//queues/rx-/rps_flow_cnt -== Suggested Configuration +Suggested Configuration +~~~~~~~~~~~~~~~~~~~~~~~ Both of these need to be set before RFS is enabled for a receive queue. Values for both are rounded up to the nearest power of two. The @@ -347,7 +373,9 @@ functions in the cpu_rmap (“CPU affinity reverse map”) kernel library to populate the map. For each CPU, the corresponding queue in the map is set to be one whose processing CPU is closest in cache locality. -==== Accelerated RFS Configuration + +Accelerated RFS Configuration +----------------------------- Accelerated RFS is only available if the kernel is compiled with CONFIG_RFS_ACCEL and support is provided by the NIC device and driver. @@ -356,11 +384,14 @@ of CPU to queues is automatically deduced from the IRQ affinities configured for each receive queue by the driver, so no additional configuration should be necessary. -== Suggested Configuration + +Suggested Configuration +~~~~~~~~~~~~~~~~~~~~~~~ This technique should be enabled whenever one wants to use RFS and the NIC supports hardware acceleration. + XPS: Transmit Packet Steering ============================= @@ -430,20 +461,25 @@ transport layer is responsible for setting ooo_okay appropriately. TCP, for instance, sets the flag when all data for a connection has been acknowledged. -==== XPS Configuration +XPS Configuration +----------------- XPS is only available if the kconfig symbol CONFIG_XPS is enabled (on by default for SMP). The functionality remains disabled until explicitly configured. To enable XPS, the bitmap of CPUs/receive-queues that may use a transmit queue is configured using the sysfs file entry: -For selection based on CPUs map: -/sys/class/net//queues/tx-/xps_cpus +For selection based on CPUs map:: + + /sys/class/net//queues/tx-/xps_cpus + +For selection based on receive-queues map:: + + /sys/class/net//queues/tx-/xps_rxqs -For selection based on receive-queues map: -/sys/class/net//queues/tx-/xps_rxqs -== Suggested Configuration +Suggested Configuration +~~~~~~~~~~~~~~~~~~~~~~~ For a network device with a single transmission queue, XPS configuration has no effect, since there is no choice in this case. In a multi-queue @@ -460,16 +496,18 @@ explicitly configured mapping receive-queue(s) to transmit queue(s). If the user configuration for receive-queue map does not apply, then the transmit queue is selected based on the CPUs map. -Per TX Queue rate limitation: -============================= + +Per TX Queue rate limitation +============================ These are rate-limitation mechanisms implemented by HW, where currently -a max-rate attribute is supported, by setting a Mbps value to +a max-rate attribute is supported, by setting a Mbps value to:: -/sys/class/net//queues/tx-/tx_maxrate + /sys/class/net//queues/tx-/tx_maxrate A value of zero means disabled, and this is the default. + Further Information =================== RPS and RFS were introduced in kernel 2.6.35. XPS was incorporated into @@ -480,5 +518,6 @@ Accelerated RFS was introduced in 2.6.35. Original patches were submitted by Ben Hutchings (bwh@kernel.org) Authors: -Tom Herbert (therbert@google.com) -Willem de Bruijn (willemb@google.com) + +- Tom Herbert (therbert@google.com) +- Willem de Bruijn (willemb@google.com) diff --git a/Documentation/networking/segmentation-offloads.txt b/Documentation/networking/segmentation-offloads.rst similarity index 88% rename from Documentation/networking/segmentation-offloads.txt rename to Documentation/networking/segmentation-offloads.rst index aca542ec125c..89d1ee933e9f 100644 --- a/Documentation/networking/segmentation-offloads.txt +++ b/Documentation/networking/segmentation-offloads.rst @@ -1,4 +1,9 @@ -Segmentation Offloads in the Linux Networking Stack +.. SPDX-License-Identifier: GPL-2.0 + +===================== +Segmentation Offloads +===================== + Introduction ============ @@ -15,6 +20,7 @@ The following technologies are described: * Partial Generic Segmentation Offload - GSO_PARTIAL * SCTP accelleration with GSO - GSO_BY_FRAGS + TCP Segmentation Offload ======================== @@ -42,6 +48,7 @@ NETIF_F_TSO_MANGLEID set then the IP ID can be ignored when performing TSO and we will either increment the IP ID for all frames, or leave it at a static value based on driver preference. + UDP Fragmentation Offload ========================= @@ -54,6 +61,7 @@ UFO is deprecated: modern kernels will no longer generate UFO skbs, but can still receive them from tuntap and similar devices. Offload of UDP-based tunnel protocols is still supported. + IPIP, SIT, GRE, UDP Tunnel, and Remote Checksum Offloads ======================================================== @@ -71,17 +79,19 @@ refer to the tunnel headers as the outer headers, while the encapsulated data is normally referred to as the inner headers. Below is the list of calls to access the given headers: -IPIP/SIT Tunnel: - Outer Inner -MAC skb_mac_header -Network skb_network_header skb_inner_network_header -Transport skb_transport_header +IPIP/SIT Tunnel:: + + Outer Inner + MAC skb_mac_header + Network skb_network_header skb_inner_network_header + Transport skb_transport_header -UDP/GRE Tunnel: - Outer Inner -MAC skb_mac_header skb_inner_mac_header -Network skb_network_header skb_inner_network_header -Transport skb_transport_header skb_inner_transport_header +UDP/GRE Tunnel:: + + Outer Inner + MAC skb_mac_header skb_inner_mac_header + Network skb_network_header skb_inner_network_header + Transport skb_transport_header skb_inner_transport_header In addition to the above tunnel types there are also SKB_GSO_GRE_CSUM and SKB_GSO_UDP_TUNNEL_CSUM. These two additional tunnel types reflect the @@ -93,6 +103,7 @@ header has requested a remote checksum offload. In this case the inner headers will be left with a partial checksum and only the outer header checksum will be computed. + Generic Segmentation Offload ============================ @@ -106,6 +117,7 @@ Before enabling any hardware segmentation offload a corresponding software offload is required in GSO. Otherwise it becomes possible for a frame to be re-routed between devices and end up being unable to be transmitted. + Generic Receive Offload ======================= @@ -117,6 +129,7 @@ this is IPv4 ID in the case that the DF bit is set for a given IP header. If the value of the IPv4 ID is not sequentially incrementing it will be altered so that it is when a frame assembled via GRO is segmented via GSO. + Partial Generic Segmentation Offload ==================================== @@ -134,6 +147,7 @@ is the outer IPv4 ID field. It is up to the device drivers to guarantee that the IPv4 ID field is incremented in the case that a given header does not have the DF bit set. + SCTP accelleration with GSO =========================== @@ -157,14 +171,14 @@ appropriately. There are some helpers to make this easier: - - skb_is_gso(skb) && skb_is_gso_sctp(skb) is the best way to see if - an skb is an SCTP GSO skb. +- skb_is_gso(skb) && skb_is_gso_sctp(skb) is the best way to see if + an skb is an SCTP GSO skb. - - For size checks, the skb_gso_validate_*_len family of helpers correctly - considers GSO_BY_FRAGS. +- For size checks, the skb_gso_validate_*_len family of helpers correctly + considers GSO_BY_FRAGS. - - For manipulating packets, skb_increase_gso_size and skb_decrease_gso_size - will check for GSO_BY_FRAGS and WARN if asked to manipulate these skbs. +- For manipulating packets, skb_increase_gso_size and skb_decrease_gso_size + will check for GSO_BY_FRAGS and WARN if asked to manipulate these skbs. This also affects drivers with the NETIF_F_FRAGLIST & NETIF_F_GSO_SCTP bits set. Note also that NETIF_F_GSO_SCTP is included in NETIF_F_GSO_SOFTWARE. diff --git a/Documentation/networking/sfp-phylink.rst b/Documentation/networking/sfp-phylink.rst new file mode 100644 index 000000000000..5bd26cb07244 --- /dev/null +++ b/Documentation/networking/sfp-phylink.rst @@ -0,0 +1,268 @@ +.. SPDX-License-Identifier: GPL-2.0 + +======= +phylink +======= + +Overview +======== + +phylink is a mechanism to support hot-pluggable networking modules +without needing to re-initialise the adapter on hot-plug events. + +phylink supports conventional phylib-based setups, fixed link setups +and SFP (Small Formfactor Pluggable) modules at present. + +Modes of operation +================== + +phylink has several modes of operation, which depend on the firmware +settings. + +1. PHY mode + + In PHY mode, we use phylib to read the current link settings from + the PHY, and pass them to the MAC driver. We expect the MAC driver + to configure exactly the modes that are specified without any + negotiation being enabled on the link. + +2. Fixed mode + + Fixed mode is the same as PHY mode as far as the MAC driver is + concerned. + +3. In-band mode + + In-band mode is used with 802.3z, SGMII and similar interface modes, + and we are expecting to use and honor the in-band negotiation or + control word sent across the serdes channel. + +By example, what this means is that: + +.. code-block:: none + + ð { + phy = <&phy>; + phy-mode = "sgmii"; + }; + +does not use in-band SGMII signalling. The PHY is expected to follow +exactly the settings given to it in its :c:func:`mac_config` function. +The link should be forced up or down appropriately in the +:c:func:`mac_link_up` and :c:func:`mac_link_down` functions. + +.. code-block:: none + + ð { + managed = "in-band-status"; + phy = <&phy>; + phy-mode = "sgmii"; + }; + +uses in-band mode, where results from the PHY's negotiation are passed +to the MAC through the SGMII control word, and the MAC is expected to +acknowledge the control word. The :c:func:`mac_link_up` and +:c:func:`mac_link_down` functions must not force the MAC side link +up and down. + +Rough guide to converting a network driver to sfp/phylink +========================================================= + +This guide briefly describes how to convert a network driver from +phylib to the sfp/phylink support. Please send patches to improve +this documentation. + +1. Optionally split the network driver's phylib update function into + three parts dealing with link-down, link-up and reconfiguring the + MAC settings. This can be done as a separate preparation commit. + + An example of this preparation can be found in git commit fc548b991fb0. + +2. Replace:: + + select FIXED_PHY + select PHYLIB + + with:: + + select PHYLINK + + in the driver's Kconfig stanza. + +3. Add:: + + #include + + to the driver's list of header files. + +4. Add:: + + struct phylink *phylink; + + to the driver's private data structure. We shall refer to the + driver's private data pointer as ``priv`` below, and the driver's + private data structure as ``struct foo_priv``. + +5. Replace the following functions: + + .. flat-table:: + :header-rows: 1 + :widths: 1 1 + :stub-columns: 0 + + * - Original function + - Replacement function + * - phy_start(phydev) + - phylink_start(priv->phylink) + * - phy_stop(phydev) + - phylink_stop(priv->phylink) + * - phy_mii_ioctl(phydev, ifr, cmd) + - phylink_mii_ioctl(priv->phylink, ifr, cmd) + * - phy_ethtool_get_wol(phydev, wol) + - phylink_ethtool_get_wol(priv->phylink, wol) + * - phy_ethtool_set_wol(phydev, wol) + - phylink_ethtool_set_wol(priv->phylink, wol) + * - phy_disconnect(phydev) + - phylink_disconnect_phy(priv->phylink) + + Please note that some of these functions must be called under the + rtnl lock, and will warn if not. This will normally be the case, + except if these are called from the driver suspend/resume paths. + +6. Add/replace ksettings get/set methods with: + + .. code-block:: c + + static int foo_ethtool_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) + { + struct foo_priv *priv = netdev_priv(dev); + + return phylink_ethtool_ksettings_set(priv->phylink, cmd); + } + + static int foo_ethtool_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) + { + struct foo_priv *priv = netdev_priv(dev); + + return phylink_ethtool_ksettings_get(priv->phylink, cmd); + } + +7. Replace the call to: + + phy_dev = of_phy_connect(dev, node, link_func, flags, phy_interface); + + and associated code with a call to: + + err = phylink_of_phy_connect(priv->phylink, node, flags); + + For the most part, ``flags`` can be zero; these flags are passed to + the of_phy_attach() inside this function call if a PHY is specified + in the DT node ``node``. + + ``node`` should be the DT node which contains the network phy property, + fixed link properties, and will also contain the sfp property. + + The setup of fixed links should also be removed; these are handled + internally by phylink. + + of_phy_connect() was also passed a function pointer for link updates. + This function is replaced by a different form of MAC updates + described below in (8). + + Manipulation of the PHY's supported/advertised happens within phylink + based on the validate callback, see below in (8). + + Note that the driver no longer needs to store the ``phy_interface``, + and also note that ``phy_interface`` becomes a dynamic property, + just like the speed, duplex etc. settings. + + Finally, note that the MAC driver has no direct access to the PHY + anymore; that is because in the phylink model, the PHY can be + dynamic. + +8. Add a :c:type:`struct phylink_mac_ops ` instance to + the driver, which is a table of function pointers, and implement + these functions. The old link update function for + :c:func:`of_phy_connect` becomes three methods: :c:func:`mac_link_up`, + :c:func:`mac_link_down`, and :c:func:`mac_config`. If step 1 was + performed, then the functionality will have been split there. + + It is important that if in-band negotiation is used, + :c:func:`mac_link_up` and :c:func:`mac_link_down` do not prevent the + in-band negotiation from completing, since these functions are called + when the in-band link state changes - otherwise the link will never + come up. + + The :c:func:`validate` method should mask the supplied supported mask, + and ``state->advertising`` with the supported ethtool link modes. + These are the new ethtool link modes, so bitmask operations must be + used. For an example, see drivers/net/ethernet/marvell/mvneta.c. + + The :c:func:`mac_link_state` method is used to read the link state + from the MAC, and report back the settings that the MAC is currently + using. This is particularly important for in-band negotiation + methods such as 1000base-X and SGMII. + + The :c:func:`mac_config` method is used to update the MAC with the + requested state, and must avoid unnecessarily taking the link down + when making changes to the MAC configuration. This means the + function should modify the state and only take the link down when + absolutely necessary to change the MAC configuration. An example + of how to do this can be found in :c:func:`mvneta_mac_config` in + drivers/net/ethernet/marvell/mvneta.c. + + For further information on these methods, please see the inline + documentation in :c:type:`struct phylink_mac_ops `. + +9. Remove calls to of_parse_phandle() for the PHY, + of_phy_register_fixed_link() for fixed links etc. from the probe + function, and replace with: + + .. code-block:: c + + struct phylink *phylink; + + phylink = phylink_create(dev, node, phy_mode, &phylink_ops); + if (IS_ERR(phylink)) { + err = PTR_ERR(phylink); + fail probe; + } + + priv->phylink = phylink; + + and arrange to destroy the phylink in the probe failure path as + appropriate and the removal path too by calling: + + .. code-block:: c + + phylink_destroy(priv->phylink); + +10. Arrange for MAC link state interrupts to be forwarded into + phylink, via: + + .. code-block:: c + + phylink_mac_change(priv->phylink, link_is_up); + + where ``link_is_up`` is true if the link is currently up or false + otherwise. + +11. Verify that the driver does not call:: + + netif_carrier_on() + netif_carrier_off() + + as these will interfere with phylink's tracking of the link state, + and cause phylink to omit calls via the :c:func:`mac_link_up` and + :c:func:`mac_link_down` methods. + +Network drivers should call phylink_stop() and phylink_start() via their +suspend/resume paths, which ensures that the appropriate +:c:type:`struct phylink_mac_ops ` methods are called +as necessary. + +For information describing the SFP cage in DT, please see the binding +documentation in the kernel source tree +``Documentation/devicetree/bindings/net/sff,sfp.txt`` diff --git a/Documentation/networking/snmp_counter.rst b/Documentation/networking/snmp_counter.rst index fe8f741193be..52b026be028f 100644 --- a/Documentation/networking/snmp_counter.rst +++ b/Documentation/networking/snmp_counter.rst @@ -1,16 +1,17 @@ -=========== +============ SNMP counter -=========== +============ This document explains the meaning of SNMP counters. General IPv4 counters -==================== +===================== All layer 4 packets and ICMP packets will change these counters, but these counters won't be changed by layer 2 packets (such as STP) or ARP packets. * IpInReceives + Defined in `RFC1213 ipInReceives`_ .. _RFC1213 ipInReceives: https://tools.ietf.org/html/rfc1213#page-26 @@ -23,6 +24,7 @@ and so on). It indicates the number of aggregated segments after GRO/LRO. * IpInDelivers + Defined in `RFC1213 ipInDelivers`_ .. _RFC1213 ipInDelivers: https://tools.ietf.org/html/rfc1213#page-28 @@ -33,6 +35,7 @@ supported protocols will be delivered, if someone listens on the raw socket, all valid IP packets will be delivered. * IpOutRequests + Defined in `RFC1213 ipOutRequests`_ .. _RFC1213 ipOutRequests: https://tools.ietf.org/html/rfc1213#page-28 @@ -42,6 +45,7 @@ multicast packets, and would always be updated together with IpExtOutOctets. * IpExtInOctets and IpExtOutOctets + They are Linux kernel extensions, no RFC definitions. Please note, RFC1213 indeed defines ifInOctets and ifOutOctets, but they are different things. The ifInOctets and ifOutOctets include the MAC @@ -49,6 +53,7 @@ layer header size but IpExtInOctets and IpExtOutOctets don't, they only include the IP layer header and the IP layer data. * IpExtInNoECTPkts, IpExtInECT1Pkts, IpExtInECT0Pkts, IpExtInCEPkts + They indicate the number of four kinds of ECN IP packets, please refer `Explicit Congestion Notification`_ for more details. @@ -60,6 +65,7 @@ for the same packet, you might find that IpInReceives count 1, but IpExtInNoECTPkts counts 2 or more. * IpInHdrErrors + Defined in `RFC1213 ipInHdrErrors`_. It indicates the packet is dropped due to the IP header error. It might happen in both IP input and IP forward paths. @@ -67,6 +73,7 @@ and IP forward paths. .. _RFC1213 ipInHdrErrors: https://tools.ietf.org/html/rfc1213#page-27 * IpInAddrErrors + Defined in `RFC1213 ipInAddrErrors`_. It will be increased in two scenarios: (1) The IP address is invalid. (2) The destination IP address is not a local address and IP forwarding is not enabled @@ -74,6 +81,7 @@ address is not a local address and IP forwarding is not enabled .. _RFC1213 ipInAddrErrors: https://tools.ietf.org/html/rfc1213#page-27 * IpExtInNoRoutes + This counter means the packet is dropped when the IP stack receives a packet and can't find a route for it from the route table. It might happen when IP forwarding is enabled and the destination IP address is @@ -81,6 +89,7 @@ not a local address and there is no route for the destination IP address. * IpInUnknownProtos + Defined in `RFC1213 ipInUnknownProtos`_. It will be increased if the layer 4 protocol is unsupported by kernel. If an application is using raw socket, kernel will always deliver the packet to the raw socket @@ -89,10 +98,12 @@ and this counter won't be increased. .. _RFC1213 ipInUnknownProtos: https://tools.ietf.org/html/rfc1213#page-27 * IpExtInTruncatedPkts + For IPv4 packet, it means the actual data size is smaller than the "Total Length" field in the IPv4 header. * IpInDiscards + Defined in `RFC1213 ipInDiscards`_. It indicates the packet is dropped in the IP receiving path and due to kernel internal reasons (e.g. no enough memory). @@ -100,20 +111,23 @@ enough memory). .. _RFC1213 ipInDiscards: https://tools.ietf.org/html/rfc1213#page-28 * IpOutDiscards + Defined in `RFC1213 ipOutDiscards`_. It indicates the packet is dropped in the IP sending path and due to kernel internal reasons. .. _RFC1213 ipOutDiscards: https://tools.ietf.org/html/rfc1213#page-28 * IpOutNoRoutes + Defined in `RFC1213 ipOutNoRoutes`_. It indicates the packet is dropped in the IP sending path and no route is found for it. .. _RFC1213 ipOutNoRoutes: https://tools.ietf.org/html/rfc1213#page-29 ICMP counters -============ +============= * IcmpInMsgs and IcmpOutMsgs + Defined by `RFC1213 icmpInMsgs`_ and `RFC1213 icmpOutMsgs`_ .. _RFC1213 icmpInMsgs: https://tools.ietf.org/html/rfc1213#page-41 @@ -126,6 +140,7 @@ IcmpOutMsgs would still be updated if the IP header is constructed by a userspace program. * ICMP named types + | These counters include most of common ICMP types, they are: | IcmpInDestUnreachs: `RFC1213 icmpInDestUnreachs`_ | IcmpInTimeExcds: `RFC1213 icmpInTimeExcds`_ @@ -180,6 +195,7 @@ straightforward. The 'In' counter means kernel receives such a packet and the 'Out' counter means kernel sends such a packet. * ICMP numeric types + They are IcmpMsgInType[N] and IcmpMsgOutType[N], the [N] indicates the ICMP type number. These counters track all kinds of ICMP packets. The ICMP type number definition could be found in the `ICMP parameters`_ @@ -192,12 +208,14 @@ IcmpMsgOutType8 would increase 1. And if kernel gets an ICMP Echo Reply packet, IcmpMsgInType0 would increase 1. * IcmpInCsumErrors + This counter indicates the checksum of the ICMP packet is wrong. Kernel verifies the checksum after updating the IcmpInMsgs and before updating IcmpMsgInType[N]. If a packet has bad checksum, the IcmpInMsgs would be updated but none of IcmpMsgInType[N] would be updated. * IcmpInErrors and IcmpOutErrors + Defined by `RFC1213 icmpInErrors`_ and `RFC1213 icmpOutErrors`_ .. _RFC1213 icmpInErrors: https://tools.ietf.org/html/rfc1213#page-41 @@ -209,7 +227,7 @@ and the sending packet path use IcmpOutErrors. When IcmpInCsumErrors is increased, IcmpInErrors would always be increased too. relationship of the ICMP counters -------------------------------- +--------------------------------- The sum of IcmpMsgOutType[N] is always equal to IcmpOutMsgs, as they are updated at the same time. The sum of IcmpMsgInType[N] plus IcmpInErrors should be equal or larger than IcmpInMsgs. When kernel @@ -229,8 +247,9 @@ IcmpInMsgs should be less than the sum of IcmpMsgOutType[N] plus IcmpInErrors. General TCP counters -================== +==================== * TcpInSegs + Defined in `RFC1213 tcpInSegs`_ .. _RFC1213 tcpInSegs: https://tools.ietf.org/html/rfc1213#page-48 @@ -247,6 +266,7 @@ isn't aware of GRO. So if two packets are merged by GRO, the TcpInSegs counter would only increase 1. * TcpOutSegs + Defined in `RFC1213 tcpOutSegs`_ .. _RFC1213 tcpOutSegs: https://tools.ietf.org/html/rfc1213#page-48 @@ -258,6 +278,7 @@ GSO, so if a packet would be split to 2 by GSO, TcpOutSegs will increase 2. * TcpActiveOpens + Defined in `RFC1213 tcpActiveOpens`_ .. _RFC1213 tcpActiveOpens: https://tools.ietf.org/html/rfc1213#page-47 @@ -267,6 +288,7 @@ state. Every time TcpActiveOpens increases 1, TcpOutSegs should always increase 1. * TcpPassiveOpens + Defined in `RFC1213 tcpPassiveOpens`_ .. _RFC1213 tcpPassiveOpens: https://tools.ietf.org/html/rfc1213#page-47 @@ -275,6 +297,7 @@ It means the TCP layer receives a SYN, replies a SYN+ACK, come into the SYN-RCVD state. * TcpExtTCPRcvCoalesce + When packets are received by the TCP layer and are not be read by the application, the TCP layer will try to merge them. This counter indicate how many packets are merged in such situation. If GRO is @@ -282,12 +305,14 @@ enabled, lots of packets would be merged by GRO, these packets wouldn't be counted to TcpExtTCPRcvCoalesce. * TcpExtTCPAutoCorking + When sending packets, the TCP layer will try to merge small packets to a bigger one. This counter increase 1 for every packet merged in such situation. Please refer to the LWN article for more details: https://lwn.net/Articles/576263/ * TcpExtTCPOrigDataSent + This counter is explained by `kernel commit f19c29e3e391`_, I pasted the explaination below:: @@ -297,6 +322,7 @@ explaination below:: more useful to track the TCP retransmission rate. * TCPSynRetrans + This counter is explained by `kernel commit f19c29e3e391`_, I pasted the explaination below:: @@ -304,6 +330,7 @@ explaination below:: retransmissions into SYN, fast-retransmits, timeout retransmits, etc. * TCPFastOpenActiveFail + This counter is explained by `kernel commit f19c29e3e391`_, I pasted the explaination below:: @@ -313,6 +340,7 @@ explaination below:: .. _kernel commit f19c29e3e391: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=f19c29e3e391a66a273e9afebaf01917245148cd * TcpExtListenOverflows and TcpExtListenDrops + When kernel receives a SYN from a client, and if the TCP accept queue is full, kernel will drop the SYN and add 1 to TcpExtListenOverflows. At the same time kernel will also add 1 to TcpExtListenDrops. When a @@ -336,17 +364,22 @@ time client replies ACK, this socket will get another chance to move to the accept queue. +TCP Fast Open +============= * TcpEstabResets + Defined in `RFC1213 tcpEstabResets`_. .. _RFC1213 tcpEstabResets: https://tools.ietf.org/html/rfc1213#page-48 * TcpAttemptFails + Defined in `RFC1213 tcpAttemptFails`_. .. _RFC1213 tcpAttemptFails: https://tools.ietf.org/html/rfc1213#page-48 * TcpOutRsts + Defined in `RFC1213 tcpOutRsts`_. The RFC says this counter indicates the 'segments sent containing the RST flag', but in linux kernel, this couner indicates the segments kerenl tried to send. The sending @@ -354,6 +387,30 @@ process might be failed due to some errors (e.g. memory alloc failed). .. _RFC1213 tcpOutRsts: https://tools.ietf.org/html/rfc1213#page-52 +* TcpExtTCPSpuriousRtxHostQueues + +When the TCP stack wants to retransmit a packet, and finds that packet +is not lost in the network, but the packet is not sent yet, the TCP +stack would give up the retransmission and update this counter. It +might happen if a packet stays too long time in a qdisc or driver +queue. + +* TcpEstabResets + +The socket receives a RST packet in Establish or CloseWait state. + +* TcpExtTCPKeepAlive + +This counter indicates many keepalive packets were sent. The keepalive +won't be enabled by default. A userspace program could enable it by +setting the SO_KEEPALIVE socket option. + +* TcpExtTCPSpuriousRTOs + +The spurious retransmission timeout detected by the `F-RTO`_ +algorithm. + +.. _F-RTO: https://tools.ietf.org/html/rfc5682 TCP Fast Path ============ @@ -389,20 +446,23 @@ will disable the fast path at first, and try to enable it after kernel receives packets. * TcpExtTCPPureAcks and TcpExtTCPHPAcks + If a packet set ACK flag and has no data, it is a pure ACK packet, if kernel handles it in the fast path, TcpExtTCPHPAcks will increase 1, if kernel handles it in the slow path, TcpExtTCPPureAcks will increase 1. * TcpExtTCPHPHits + If a TCP packet has data (which means it is not a pure ACK packet), and this packet is handled in the fast path, TcpExtTCPHPHits will increase 1. TCP abort -======== +========= * TcpExtTCPAbortOnData + It means TCP layer has data in flight, but need to close the connection. So TCP layer sends a RST to the other side, indicate the connection is not closed very graceful. An easy way to increase this @@ -421,11 +481,13 @@ when the application closes a connection, kernel will send a RST immediately and increase the TcpExtTCPAbortOnData counter. * TcpExtTCPAbortOnClose + This counter means the application has unread data in the TCP layer when the application wants to close the TCP connection. In such a situation, kernel will send a RST to the other side of the TCP connection. * TcpExtTCPAbortOnMemory + When an application closes a TCP connection, kernel still need to track the connection, let it complete the TCP disconnect process. E.g. an app calls the close method of a socket, kernel sends fin to the other @@ -447,10 +509,12 @@ the tcp_mem. Please refer the tcp_mem section in the `TCP man page`_: * TcpExtTCPAbortOnTimeout + This counter will increase when any of the TCP timers expire. In such situation, kernel won't send RST, just give up the connection. * TcpExtTCPAbortOnLinger + When a TCP connection comes into FIN_WAIT_2 state, instead of waiting for the fin packet from the other side, kernel could send a RST and delete the socket immediately. This is not the default behavior of @@ -458,6 +522,7 @@ Linux kernel TCP stack. By configuring the TCP_LINGER2 socket option, you could let kernel follow this behavior. * TcpExtTCPAbortFailed + The kernel TCP layer will send RST if the `RFC2525 2.17 section`_ is satisfied. If an internal error occurs during this process, TcpExtTCPAbortFailed will be increased. @@ -465,7 +530,7 @@ TcpExtTCPAbortFailed will be increased. .. _RFC2525 2.17 section: https://tools.ietf.org/html/rfc2525#page-50 TCP Hybrid Slow Start -==================== +===================== The Hybrid Slow Start algorithm is an enhancement of the traditional TCP congestion window Slow Start algorithm. It uses two pieces of information to detect whether the max bandwidth of the TCP path is @@ -481,23 +546,27 @@ relate with the Hybrid Slow Start algorithm. .. _Hybrid Slow Start paper: https://pdfs.semanticscholar.org/25e9/ef3f03315782c7f1cbcd31b587857adae7d1.pdf * TcpExtTCPHystartTrainDetect + How many times the ACK train length threshold is detected * TcpExtTCPHystartTrainCwnd + The sum of CWND detected by ACK train length. Dividing this value by TcpExtTCPHystartTrainDetect is the average CWND which detected by the ACK train length. * TcpExtTCPHystartDelayDetect + How many times the packet delay threshold is detected. * TcpExtTCPHystartDelayCwnd + The sum of CWND detected by packet delay. Dividing this value by TcpExtTCPHystartDelayDetect is the average CWND which detected by the packet delay. TCP retransmission and congestion control -====================================== +========================================= The TCP protocol has two retransmission mechanisms: SACK and fast recovery. They are exclusive with each other. When SACK is enabled, the kernel TCP stack would use SACK, or kernel would use fast @@ -516,12 +585,14 @@ https://pdfs.semanticscholar.org/0e9c/968d09ab2e53e24c4dca5b2d67c7f7140f8e.pdf .. _RFC6582: https://tools.ietf.org/html/rfc6582 * TcpExtTCPRenoRecovery and TcpExtTCPSackRecovery + When the congestion control comes into Recovery state, if sack is used, TcpExtTCPSackRecovery increases 1, if sack is not used, TcpExtTCPRenoRecovery increases 1. These two counters mean the TCP stack begins to retransmit the lost packets. * TcpExtTCPSACKReneging + A packet was acknowledged by SACK, but the receiver has dropped this packet, so the sender needs to retransmit this packet. In this situation, the sender adds 1 to TcpExtTCPSACKReneging. A receiver @@ -532,6 +603,7 @@ the RTO expires for this packet, then the sender assumes this packet has been dropped by the receiver. * TcpExtTCPRenoReorder + The reorder packet is detected by fast recovery. It would only be used if SACK is disabled. The fast recovery algorithm detects recorder by the duplicate ACK number. E.g., if retransmission is triggered, and @@ -542,6 +614,7 @@ order packet. Thus the sender would find more ACks than its expectation, and the sender knows out of order occurs. * TcpExtTCPTSReorder + The reorder packet is detected when a hole is filled. E.g., assume the sender sends packet 1,2,3,4,5, and the receiving order is 1,2,4,5,3. When the sender receives the ACK of packet 3 (which will @@ -551,6 +624,7 @@ fill the hole), two conditions will let TcpExtTCPTSReorder increase than the retransmission timestamp. * TcpExtTCPSACKReorder + The reorder packet detected by SACK. The SACK has two methods to detect reorder: (1) DSACK is received by the sender. It means the sender sends the same packet more than one times. And the only reason @@ -562,6 +636,29 @@ packet yet, the sender would know packet 4 is out of order. The TCP stack of kernel will increase TcpExtTCPSACKReorder for both of the above scenarios. +* TcpExtTCPSlowStartRetrans + +The TCP stack wants to retransmit a packet and the congestion control +state is 'Loss'. + +* TcpExtTCPFastRetrans + +The TCP stack wants to retransmit a packet and the congestion control +state is not 'Loss'. + +* TcpExtTCPLostRetransmit + +A SACK points out that a retransmission packet is lost again. + +* TcpExtTCPRetransFail + +The TCP stack tries to deliver a retransmission packet to lower layers +but the lower layers return an error. + +* TcpExtTCPSynRetrans + +The TCP stack retransmits a SYN packet. + DSACK ===== The DSACK is defined in `RFC2883`_. The receiver uses DSACK to report @@ -574,10 +671,12 @@ sender side. .. _RFC2883 : https://tools.ietf.org/html/rfc2883 * TcpExtTCPDSACKOldSent + The TCP stack receives a duplicate packet which has been acked, so it sends a DSACK to the sender. * TcpExtTCPDSACKOfoSent + The TCP stack receives an out of order duplicate packet, so it sends a DSACK to the sender. @@ -586,6 +685,7 @@ The TCP stack receives a DSACK, which indicates an acknowledged duplicate packet is received. * TcpExtTCPDSACKOfoRecv + The TCP stack receives a DSACK, which indicate an out of order duplicate packet is received. @@ -640,23 +740,26 @@ A skb should be shifted or merged, but the TCP stack doesn't do it for some reasons. TCP out of order -=============== +================ * TcpExtTCPOFOQueue + The TCP layer receives an out of order packet and has enough memory to queue it. * TcpExtTCPOFODrop + The TCP layer receives an out of order packet but doesn't have enough memory, so drops it. Such packets won't be counted into TcpExtTCPOFOQueue. * TcpExtTCPOFOMerge + The received out of order packet has an overlay with the previous packet. the overlay part will be dropped. All of TcpExtTCPOFOMerge packets will also be counted into TcpExtTCPOFOQueue. TCP PAWS -======= +======== PAWS (Protection Against Wrapped Sequence numbers) is an algorithm which is used to drop old packets. It depends on the TCP timestamps. For detail information, please refer the `timestamp wiki`_ @@ -666,13 +769,15 @@ and the `RFC of PAWS`_. .. _timestamp wiki: https://en.wikipedia.org/wiki/Transmission_Control_Protocol#TCP_timestamps * TcpExtPAWSActive + Packets are dropped by PAWS in Syn-Sent status. * TcpExtPAWSEstab + Packets are dropped by PAWS in any status other than Syn-Sent. TCP ACK skip -=========== +============ In some scenarios, kernel would avoid sending duplicate ACKs too frequently. Please find more details in the tcp_invalid_ratelimit section of the `sysctl document`_. When kernel decides to skip an ACK @@ -684,6 +789,7 @@ it has no data. .. _sysctl document: https://www.kernel.org/doc/Documentation/networking/ip-sysctl.txt * TcpExtTCPACKSkippedSynRecv + The ACK is skipped in Syn-Recv status. The Syn-Recv status means the TCP stack receives a SYN and replies SYN+ACK. Now the TCP stack is waiting for an ACK. Generally, the TCP stack doesn't need to send ACK @@ -697,6 +803,7 @@ increase TcpExtTCPACKSkippedSynRecv. * TcpExtTCPACKSkippedPAWS + The ACK is skipped due to PAWS (Protect Against Wrapped Sequence numbers) check fails. If the PAWS check fails in Syn-Recv, Fin-Wait-2 or Time-Wait statuses, the skipped ACK would be counted to @@ -705,18 +812,22 @@ TcpExtTCPACKSkippedTimeWait. In all other statuses, the skipped ACK would be counted to TcpExtTCPACKSkippedPAWS. * TcpExtTCPACKSkippedSeq + The sequence number is out of window and the timestamp passes the PAWS check and the TCP status is not Syn-Recv, Fin-Wait-2, and Time-Wait. * TcpExtTCPACKSkippedFinWait2 + The ACK is skipped in Fin-Wait-2 status, the reason would be either PAWS check fails or the received sequence number is out of window. * TcpExtTCPACKSkippedTimeWait + Tha ACK is skipped in Time-Wait status, the reason would be either PAWS check failed or the received sequence number is out of window. * TcpExtTCPACKSkippedChallenge + The ACK is skipped if the ACK is a challenge ACK. The RFC 5961 defines 3 kind of challenge ACK, please refer `RFC 5961 section 3.2`_, `RFC 5961 section 4.2`_ and `RFC 5961 section 5.2`_. Besides these @@ -729,8 +840,9 @@ unacknowledged number (more strict than `RFC 5961 section 5.2`_). .. _RFC 5961 section 5.2: https://tools.ietf.org/html/rfc5961#page-11 TCP receive window -================= +================== * TcpExtTCPWantZeroWindowAdv + Depending on current memory usage, the TCP stack tries to set receive window to zero. But the receive window might still be a no-zero value. For example, if the previous window size is 10, and the TCP @@ -738,14 +850,16 @@ stack receives 3 bytes, the current window size would be 7 even if the window size calculated by the memory usage is zero. * TcpExtTCPToZeroWindowAdv + The TCP receive window is set to zero from a no-zero value. * TcpExtTCPFromZeroWindowAdv + The TCP receive window is set to no-zero value from zero. Delayed ACK -========== +=========== The TCP Delayed ACK is a technique which is used for reducing the packet count in the network. For more details, please refer the `Delayed ACK wiki`_ @@ -753,10 +867,12 @@ packet count in the network. For more details, please refer the .. _Delayed ACK wiki: https://en.wikipedia.org/wiki/TCP_delayed_acknowledgment * TcpExtDelayedACKs + A delayed ACK timer expires. The TCP stack will send a pure ACK packet and exit the delayed ACK mode. * TcpExtDelayedACKLocked + A delayed ACK timer expires, but the TCP stack can't send an ACK immediately due to the socket is locked by a userspace program. The TCP stack will send a pure ACK later (after the userspace program @@ -765,29 +881,152 @@ TCP stack will also update TcpExtDelayedACKs and exit the delayed ACK mode. * TcpExtDelayedACKLost + It will be updated when the TCP stack receives a packet which has been ACKed. A Delayed ACK loss might cause this issue, but it would also be triggered by other reasons, such as a packet is duplicated in the network. Tail Loss Probe (TLP) -=================== +===================== TLP is an algorithm which is used to detect TCP packet loss. For more details, please refer the `TLP paper`_. .. _TLP paper: https://tools.ietf.org/html/draft-dukkipati-tcpm-tcp-loss-probe-01 * TcpExtTCPLossProbes + A TLP probe packet is sent. * TcpExtTCPLossProbeRecovery + A packet loss is detected and recovered by TLP. +TCP Fast Open +============= +TCP Fast Open is a technology which allows data transfer before the +3-way handshake complete. Please refer the `TCP Fast Open wiki`_ for a +general description. + +.. _TCP Fast Open wiki: https://en.wikipedia.org/wiki/TCP_Fast_Open + +* TcpExtTCPFastOpenActive + +When the TCP stack receives an ACK packet in the SYN-SENT status, and +the ACK packet acknowledges the data in the SYN packet, the TCP stack +understand the TFO cookie is accepted by the other side, then it +updates this counter. + +* TcpExtTCPFastOpenActiveFail + +This counter indicates that the TCP stack initiated a TCP Fast Open, +but it failed. This counter would be updated in three scenarios: (1) +the other side doesn't acknowledge the data in the SYN packet. (2) The +SYN packet which has the TFO cookie is timeout at least once. (3) +after the 3-way handshake, the retransmission timeout happens +net.ipv4.tcp_retries1 times, because some middle-boxes may black-hole +fast open after the handshake. + +* TcpExtTCPFastOpenPassive + +This counter indicates how many times the TCP stack accepts the fast +open request. + +* TcpExtTCPFastOpenPassiveFail + +This counter indicates how many times the TCP stack rejects the fast +open request. It is caused by either the TFO cookie is invalid or the +TCP stack finds an error during the socket creating process. + +* TcpExtTCPFastOpenListenOverflow + +When the pending fast open request number is larger than +fastopenq->max_qlen, the TCP stack will reject the fast open request +and update this counter. When this counter is updated, the TCP stack +won't update TcpExtTCPFastOpenPassive or +TcpExtTCPFastOpenPassiveFail. The fastopenq->max_qlen is set by the +TCP_FASTOPEN socket operation and it could not be larger than +net.core.somaxconn. For example: + +setsockopt(sfd, SOL_TCP, TCP_FASTOPEN, &qlen, sizeof(qlen)); + +* TcpExtTCPFastOpenCookieReqd + +This counter indicates how many times a client wants to request a TFO +cookie. + +SYN cookies +=========== +SYN cookies are used to mitigate SYN flood, for details, please refer +the `SYN cookies wiki`_. + +.. _SYN cookies wiki: https://en.wikipedia.org/wiki/SYN_cookies + +* TcpExtSyncookiesSent + +It indicates how many SYN cookies are sent. + +* TcpExtSyncookiesRecv + +How many reply packets of the SYN cookies the TCP stack receives. + +* TcpExtSyncookiesFailed + +The MSS decoded from the SYN cookie is invalid. When this counter is +updated, the received packet won't be treated as a SYN cookie and the +TcpExtSyncookiesRecv counter wont be updated. + +Challenge ACK +============= +For details of challenge ACK, please refer the explaination of +TcpExtTCPACKSkippedChallenge. + +* TcpExtTCPChallengeACK + +The number of challenge acks sent. + +* TcpExtTCPSYNChallenge + +The number of challenge acks sent in response to SYN packets. After +updates this counter, the TCP stack might send a challenge ACK and +update the TcpExtTCPChallengeACK counter, or it might also skip to +send the challenge and update the TcpExtTCPACKSkippedChallenge. + +prune +===== +When a socket is under memory pressure, the TCP stack will try to +reclaim memory from the receiving queue and out of order queue. One of +the reclaiming method is 'collapse', which means allocate a big sbk, +copy the contiguous skbs to the single big skb, and free these +contiguous skbs. + +* TcpExtPruneCalled + +The TCP stack tries to reclaim memory for a socket. After updates this +counter, the TCP stack will try to collapse the out of order queue and +the receiving queue. If the memory is still not enough, the TCP stack +will try to discard packets from the out of order queue (and update the +TcpExtOfoPruned counter) + +* TcpExtOfoPruned + +The TCP stack tries to discard packet on the out of order queue. + +* TcpExtRcvPruned + +After 'collapse' and discard packets from the out of order queue, if +the actually used memory is still larger than the max allowed memory, +this counter will be updated. It means the 'prune' fails. + +* TcpExtTCPRcvCollapsed + +This counter indicates how many skbs are freed during 'collapse'. + examples -======= +======== ping test --------- +--------- Run the ping command against the public dns server 8.8.8.8:: nstatuser@nstat-a:~$ ping 8.8.8.8 -c 1 @@ -831,7 +1070,7 @@ and its corresponding Echo Reply packet are constructed by: So the IpExtInOctets and IpExtOutOctets are 20+16+48=84. tcp 3-way handshake ------------------- +------------------- On server side, we run:: nstatuser@nstat-b:~$ nc -lknv 0.0.0.0 9000 @@ -873,7 +1112,7 @@ ACK, so client sent 2 packets, received 1 packet, TcpInSegs increased 1, TcpOutSegs increased 2. TCP normal traffic ------------------ +------------------ Run nc on server:: nstatuser@nstat-b:~$ nc -lkv 0.0.0.0 9000 @@ -996,7 +1235,7 @@ and the packet received from client qualified for fast path, so it was counted into 'TcpExtTCPHPHits'. TcpExtTCPAbortOnClose --------------------- +--------------------- On the server side, we run below python script:: import socket @@ -1030,7 +1269,7 @@ If we run tcpdump on the server side, we could find the server sent a RST after we type Ctrl-C. TcpExtTCPAbortOnMemory and TcpExtTCPAbortOnTimeout ------------------------------------------------ +--------------------------------------------------- Below is an example which let the orphan socket count be higher than net.ipv4.tcp_max_orphans. Change tcp_max_orphans to a smaller value on client:: @@ -1152,7 +1391,7 @@ FIN_WAIT_1 state finally. So we wait for a few minutes, we could find TcpExtTCPAbortOnTimeout 10 0.0 TcpExtTCPAbortOnLinger ---------------------- +---------------------- The server side code:: nstatuser@nstat-b:~$ cat server_linger.py @@ -1197,7 +1436,7 @@ After run client_linger.py, check the output of nstat:: TcpExtTCPAbortOnLinger 1 0.0 TcpExtTCPRcvCoalesce -------------------- +-------------------- On the server, we run a program which listen on TCP port 9000, but doesn't read any data:: @@ -1257,7 +1496,7 @@ the receiving queue. So the TCP layer merged the two packets, and we could find the TcpExtTCPRcvCoalesce increased 1. TcpExtListenOverflows and TcpExtListenDrops ----------------------------------------- +------------------------------------------- On server, run the nc command, listen on port 9000:: nstatuser@nstat-b:~$ nc -lkv 0.0.0.0 9000 @@ -1305,7 +1544,7 @@ TcpExtListenOverflows and TcpExtListenDrops would be larger, because the SYN of the 4th nc was dropped, the client was retrying. IpInAddrErrors, IpExtInNoRoutes and IpOutNoRoutes ----------------------------------------------- +------------------------------------------------- server A IP address: 192.168.122.250 server B IP address: 192.168.122.251 Prepare on server A, add a route to server B:: @@ -1400,7 +1639,7 @@ a route for the 8.8.8.8 IP address, so server B increased IpOutNoRoutes. TcpExtTCPACKSkippedSynRecv ------------------------- +-------------------------- In this test, we send 3 same SYN packets from client to server. The first SYN will let server create a socket, set it to Syn-Recv status, and reply a SYN/ACK. The second SYN will let server reply the SYN/ACK @@ -1448,7 +1687,7 @@ Check snmp cunter on nstat-b:: As we expected, TcpExtTCPACKSkippedSynRecv is 1. TcpExtTCPACKSkippedPAWS ----------------------- +----------------------- To trigger PAWS, we could send an old SYN. On nstat-b, let nc listen on port 9000:: @@ -1485,7 +1724,7 @@ failed, the nstat-b replied an ACK for the first SYN, skipped the ACK for the second SYN, and updated TcpExtTCPACKSkippedPAWS. TcpExtTCPACKSkippedSeq --------------------- +---------------------- To trigger TcpExtTCPACKSkippedSeq, we send packets which have valid timestamp (to pass PAWS check) but the sequence number is out of window. The linux TCP stack would avoid to skip if the packet has diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt index 82236a17b5e6..86174ce8cd13 100644 --- a/Documentation/networking/switchdev.txt +++ b/Documentation/networking/switchdev.txt @@ -92,11 +92,11 @@ device. Switch ID ^^^^^^^^^ -The switchdev driver must implement the switchdev op switchdev_port_attr_get -for SWITCHDEV_ATTR_ID_PORT_PARENT_ID for each port netdev, returning the same -physical ID for each port of a switch. The ID must be unique between switches -on the same system. The ID does not need to be unique between switches on -different systems. +The switchdev driver must implement the net_device operation +ndo_get_port_parent_id for each port netdev, returning the same physical ID for +each port of a switch. The ID must be unique between switches on the same +system. The ID does not need to be unique between switches on different +systems. The switch ID is used to locate ports on a switch and to know if aggregated ports belong to the same switch. @@ -196,7 +196,7 @@ The switch device will learn/forget source MAC address/VLAN on ingress packets and notify the switch driver of the mac/vlan/port tuples. The switch driver, in turn, will notify the bridge driver using the switchdev notifier call: - err = call_switchdev_notifiers(val, dev, info); + err = call_switchdev_notifiers(val, dev, info, extack); Where val is SWITCHDEV_FDB_ADD when learning and SWITCHDEV_FDB_DEL when forgetting, and info points to a struct switchdev_notifier_fdb_info. On @@ -232,10 +232,8 @@ Learning_sync attribute enables syncing of the learned/forgotten FDB entry to the bridge's FDB. It's possible, but not optimal, to enable learning on the device port and on the bridge port, and disable learning_sync. -To support learning and learning_sync port attributes, the driver implements -switchdev op switchdev_port_attr_get/set for -SWITCHDEV_ATTR_PORT_ID_BRIDGE_FLAGS. The driver should initialize the attributes -to the hardware defaults. +To support learning, the driver implements switchdev op +switchdev_port_attr_set for SWITCHDEV_ATTR_PORT_ID_{PRE}_BRIDGE_FLAGS. FDB Ageing ^^^^^^^^^^ @@ -373,22 +371,3 @@ The driver can monitor for updates to arp_tbl using the netevent notifier NETEVENT_NEIGH_UPDATE. The device can be programmed with resolved nexthops for the routes as arp_tbl updates. The driver implements ndo_neigh_destroy to know when arp_tbl neighbor entries are purged from the port. - -Transaction item queue -^^^^^^^^^^^^^^^^^^^^^^ - -For switchdev ops attr_set and obj_add, there is a 2 phase transaction model -used. First phase is to "prepare" anything needed, including various checks, -memory allocation, etc. The goal is to handle the stuff that is not unlikely -to fail here. The second phase is to "commit" the actual changes. - -Switchdev provides an infrastructure for sharing items (for example memory -allocations) between the two phases. - -The object created by a driver in "prepare" phase and it is queued up by: -switchdev_trans_item_enqueue() -During the "commit" phase, the driver gets the object by: -switchdev_trans_item_dequeue() - -If a transaction is aborted during "prepare" phase, switchdev code will handle -cleanup of the queued-up objects. diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt index 9d1432e0aaa8..bbdaf8990031 100644 --- a/Documentation/networking/timestamping.txt +++ b/Documentation/networking/timestamping.txt @@ -6,11 +6,21 @@ The interfaces for receiving network packages timestamps are: * SO_TIMESTAMP Generates a timestamp for each incoming packet in (not necessarily monotonic) system time. Reports the timestamp via recvmsg() in a - control message as struct timeval (usec resolution). + control message in usec resolution. + SO_TIMESTAMP is defined as SO_TIMESTAMP_NEW or SO_TIMESTAMP_OLD + based on the architecture type and time_t representation of libc. + Control message format is in struct __kernel_old_timeval for + SO_TIMESTAMP_OLD and in struct __kernel_sock_timeval for + SO_TIMESTAMP_NEW options respectively. * SO_TIMESTAMPNS Same timestamping mechanism as SO_TIMESTAMP, but reports the - timestamp as struct timespec (nsec resolution). + timestamp as struct timespec in nsec resolution. + SO_TIMESTAMPNS is defined as SO_TIMESTAMPNS_NEW or SO_TIMESTAMPNS_OLD + based on the architecture type and time_t representation of libc. + Control message format is in struct timespec for SO_TIMESTAMPNS_OLD + and in struct __kernel_timespec for SO_TIMESTAMPNS_NEW options + respectively. * IP_MULTICAST_LOOP + SO_TIMESTAMP[NS] Only for multicast:approximate transmit timestamp obtained by @@ -22,7 +32,7 @@ The interfaces for receiving network packages timestamps are: timestamps for stream sockets. -1.1 SO_TIMESTAMP: +1.1 SO_TIMESTAMP (also SO_TIMESTAMP_OLD and SO_TIMESTAMP_NEW): This socket option enables timestamping of datagrams on the reception path. Because the destination socket, if any, is not known early in @@ -31,15 +41,25 @@ same is true for all early receive timestamp options. For interface details, see `man 7 socket`. +Always use SO_TIMESTAMP_NEW timestamp to always get timestamp in +struct __kernel_sock_timeval format. -1.2 SO_TIMESTAMPNS: +SO_TIMESTAMP_OLD returns incorrect timestamps after the year 2038 +on 32 bit machines. + +1.2 SO_TIMESTAMPNS (also SO_TIMESTAMPNS_OLD and SO_TIMESTAMPNS_NEW): This option is identical to SO_TIMESTAMP except for the returned data type. Its struct timespec allows for higher resolution (ns) timestamps than the timeval of SO_TIMESTAMP (ms). +Always use SO_TIMESTAMPNS_NEW timestamp to always get timestamp in +struct __kernel_timespec format. + +SO_TIMESTAMPNS_OLD returns incorrect timestamps after the year 2038 +on 32 bit machines. -1.3 SO_TIMESTAMPING: +1.3 SO_TIMESTAMPING (also SO_TIMESTAMPING_OLD and SO_TIMESTAMPING_NEW): Supports multiple types of timestamp requests. As a result, this socket option takes a bitmap of flags, not a boolean. In @@ -323,10 +343,23 @@ SO_TIMESTAMP and SO_TIMESTAMPNS records can be retrieved. These timestamps are returned in a control message with cmsg_level SOL_SOCKET, cmsg_type SCM_TIMESTAMPING, and payload of type +For SO_TIMESTAMPING_OLD: + struct scm_timestamping { struct timespec ts[3]; }; +For SO_TIMESTAMPING_NEW: + +struct scm_timestamping64 { + struct __kernel_timespec ts[3]; + +Always use SO_TIMESTAMPING_NEW timestamp to always get timestamp in +struct scm_timestamping64 format. + +SO_TIMESTAMPING_OLD returns incorrect timestamps after the year 2038 +on 32 bit machines. + The structure can return up to three timestamps. This is a legacy feature. At least one field is non-zero at any time. Most timestamps are passed in ts[0]. Hardware timestamps are passed in ts[2]. diff --git a/Documentation/power/energy-model.txt b/Documentation/power/energy-model.txt new file mode 100644 index 000000000000..a2b0ae4c76bd --- /dev/null +++ b/Documentation/power/energy-model.txt @@ -0,0 +1,144 @@ + ==================== + Energy Model of CPUs + ==================== + +1. Overview +----------- + +The Energy Model (EM) framework serves as an interface between drivers knowing +the power consumed by CPUs at various performance levels, and the kernel +subsystems willing to use that information to make energy-aware decisions. + +The source of the information about the power consumed by CPUs can vary greatly +from one platform to another. These power costs can be estimated using +devicetree data in some cases. In others, the firmware will know better. +Alternatively, userspace might be best positioned. And so on. In order to avoid +each and every client subsystem to re-implement support for each and every +possible source of information on its own, the EM framework intervenes as an +abstraction layer which standardizes the format of power cost tables in the +kernel, hence enabling to avoid redundant work. + +The figure below depicts an example of drivers (Arm-specific here, but the +approach is applicable to any architecture) providing power costs to the EM +framework, and interested clients reading the data from it. + + +---------------+ +-----------------+ +---------------+ + | Thermal (IPA) | | Scheduler (EAS) | | Other | + +---------------+ +-----------------+ +---------------+ + | | em_pd_energy() | + | | em_cpu_get() | + +---------+ | +---------+ + | | | + v v v + +---------------------+ + | Energy Model | + | Framework | + +---------------------+ + ^ ^ ^ + | | | em_register_perf_domain() + +----------+ | +---------+ + | | | + +---------------+ +---------------+ +--------------+ + | cpufreq-dt | | arm_scmi | | Other | + +---------------+ +---------------+ +--------------+ + ^ ^ ^ + | | | + +--------------+ +---------------+ +--------------+ + | Device Tree | | Firmware | | ? | + +--------------+ +---------------+ +--------------+ + +The EM framework manages power cost tables per 'performance domain' in the +system. A performance domain is a group of CPUs whose performance is scaled +together. Performance domains generally have a 1-to-1 mapping with CPUFreq +policies. All CPUs in a performance domain are required to have the same +micro-architecture. CPUs in different performance domains can have different +micro-architectures. + + +2. Core APIs +------------ + + 2.1 Config options + +CONFIG_ENERGY_MODEL must be enabled to use the EM framework. + + + 2.2 Registration of performance domains + +Drivers are expected to register performance domains into the EM framework by +calling the following API: + + int em_register_perf_domain(cpumask_t *span, unsigned int nr_states, + struct em_data_callback *cb); + +Drivers must specify the CPUs of the performance domains using the cpumask +argument, and provide a callback function returning tuples +for each capacity state. The callback function provided by the driver is free +to fetch data from any relevant location (DT, firmware, ...), and by any mean +deemed necessary. See Section 3. for an example of driver implementing this +callback, and kernel/power/energy_model.c for further documentation on this +API. + + + 2.3 Accessing performance domains + +Subsystems interested in the energy model of a CPU can retrieve it using the +em_cpu_get() API. The energy model tables are allocated once upon creation of +the performance domains, and kept in memory untouched. + +The energy consumed by a performance domain can be estimated using the +em_pd_energy() API. The estimation is performed assuming that the schedutil +CPUfreq governor is in use. + +More details about the above APIs can be found in include/linux/energy_model.h. + + +3. Example driver +----------------- + +This section provides a simple example of a CPUFreq driver registering a +performance domain in the Energy Model framework using the (fake) 'foo' +protocol. The driver implements an est_power() function to be provided to the +EM framework. + + -> drivers/cpufreq/foo_cpufreq.c + +01 static int est_power(unsigned long *mW, unsigned long *KHz, int cpu) +02 { +03 long freq, power; +04 +05 /* Use the 'foo' protocol to ceil the frequency */ +06 freq = foo_get_freq_ceil(cpu, *KHz); +07 if (freq < 0); +08 return freq; +09 +10 /* Estimate the power cost for the CPU at the relevant freq. */ +11 power = foo_estimate_power(cpu, freq); +12 if (power < 0); +13 return power; +14 +15 /* Return the values to the EM framework */ +16 *mW = power; +17 *KHz = freq; +18 +19 return 0; +20 } +21 +22 static int foo_cpufreq_init(struct cpufreq_policy *policy) +23 { +24 struct em_data_callback em_cb = EM_DATA_CB(est_power); +25 int nr_opp, ret; +26 +27 /* Do the actual CPUFreq init work ... */ +28 ret = do_foo_cpufreq_init(policy); +29 if (ret) +30 return ret; +31 +32 /* Find the number of OPPs for this policy */ +33 nr_opp = foo_get_nr_opp(policy); +34 +35 /* And register the new performance domain */ +36 em_register_perf_domain(policy->cpus, nr_opp, &em_cb); +37 +38 return 0; +39 } diff --git a/Documentation/process/4.Coding.rst b/Documentation/process/4.Coding.rst index cfe264889447..4b7a5ab3cec1 100644 --- a/Documentation/process/4.Coding.rst +++ b/Documentation/process/4.Coding.rst @@ -249,7 +249,7 @@ features; most of these are found in the "kernel hacking" submenu. Several of these options should be turned on for any kernel used for development or testing purposes. In particular, you should turn on: - - ENABLE_WARN_DEPRECATED, ENABLE_MUST_CHECK, and FRAME_WARN to get an + - ENABLE_MUST_CHECK and FRAME_WARN to get an extra set of warnings for problems like the use of deprecated interfaces or ignoring an important return value from a function. The output generated by these warnings can be verbose, but one need not worry about diff --git a/Documentation/process/applying-patches.rst b/Documentation/process/applying-patches.rst index dc2ddc345044..fbb9297e6360 100644 --- a/Documentation/process/applying-patches.rst +++ b/Documentation/process/applying-patches.rst @@ -216,14 +216,14 @@ You can use the ``interdiff`` program (http://cyberelk.net/tim/patchutils/) to generate a patch representing the differences between two patches and then apply the result. -This will let you move from something like 4.7.2 to 4.7.3 in a single +This will let you move from something like 5.7.2 to 5.7.3 in a single step. The -z flag to interdiff will even let you feed it patches in gzip or bzip2 compressed form directly without the use of zcat or bzcat or manual decompression. -Here's how you'd go from 4.7.2 to 4.7.3 in a single step:: +Here's how you'd go from 5.7.2 to 5.7.3 in a single step:: - interdiff -z ../patch-4.7.2.gz ../patch-4.7.3.gz | patch -p1 + interdiff -z ../patch-5.7.2.gz ../patch-5.7.3.gz | patch -p1 Although interdiff may save you a step or two you are generally advised to do the additional steps since interdiff can get things wrong in some cases. @@ -245,62 +245,67 @@ The patches are available at http://kernel.org/ Most recent patches are linked from the front page, but they also have specific homes. -The 4.x.y (-stable) and 4.x patches live at +The 5.x.y (-stable) and 5.x patches live at - https://www.kernel.org/pub/linux/kernel/v4.x/ + https://www.kernel.org/pub/linux/kernel/v5.x/ -The -rc patches live at +The -rc patches are not stored on the webserver but are generated on +demand from git tags such as - https://www.kernel.org/pub/linux/kernel/v4.x/testing/ + https://git.kernel.org/torvalds/p/v5.1-rc1/v5.0 +The stable -rc patches live at -The 4.x kernels + https://www.kernel.org/pub/linux/kernel/v5.x/stable-review/ + + +The 5.x kernels =============== These are the base stable releases released by Linus. The highest numbered release is the most recent. If regressions or other serious flaws are found, then a -stable fix patch -will be released (see below) on top of this base. Once a new 4.x base +will be released (see below) on top of this base. Once a new 5.x base kernel is released, a patch is made available that is a delta between the -previous 4.x kernel and the new one. +previous 5.x kernel and the new one. -To apply a patch moving from 4.6 to 4.7, you'd do the following (note -that such patches do **NOT** apply on top of 4.x.y kernels but on top of the -base 4.x kernel -- if you need to move from 4.x.y to 4.x+1 you need to -first revert the 4.x.y patch). +To apply a patch moving from 5.6 to 5.7, you'd do the following (note +that such patches do **NOT** apply on top of 5.x.y kernels but on top of the +base 5.x kernel -- if you need to move from 5.x.y to 5.x+1 you need to +first revert the 5.x.y patch). Here are some examples:: - # moving from 4.6 to 4.7 + # moving from 5.6 to 5.7 - $ cd ~/linux-4.6 # change to kernel source dir - $ patch -p1 < ../patch-4.7 # apply the 4.7 patch + $ cd ~/linux-5.6 # change to kernel source dir + $ patch -p1 < ../patch-5.7 # apply the 5.7 patch $ cd .. - $ mv linux-4.6 linux-4.7 # rename source dir + $ mv linux-5.6 linux-5.7 # rename source dir - # moving from 4.6.1 to 4.7 + # moving from 5.6.1 to 5.7 - $ cd ~/linux-4.6.1 # change to kernel source dir - $ patch -p1 -R < ../patch-4.6.1 # revert the 4.6.1 patch - # source dir is now 4.6 - $ patch -p1 < ../patch-4.7 # apply new 4.7 patch + $ cd ~/linux-5.6.1 # change to kernel source dir + $ patch -p1 -R < ../patch-5.6.1 # revert the 5.6.1 patch + # source dir is now 5.6 + $ patch -p1 < ../patch-5.7 # apply new 5.7 patch $ cd .. - $ mv linux-4.6.1 linux-4.7 # rename source dir + $ mv linux-5.6.1 linux-5.7 # rename source dir -The 4.x.y kernels +The 5.x.y kernels ================= Kernels with 3-digit versions are -stable kernels. They contain small(ish) critical fixes for security problems or significant regressions discovered -in a given 4.x kernel. +in a given 5.x kernel. This is the recommended branch for users who want the most recent stable kernel and are not interested in helping test development/experimental versions. -If no 4.x.y kernel is available, then the highest numbered 4.x kernel is +If no 5.x.y kernel is available, then the highest numbered 5.x kernel is the current stable kernel. .. note:: @@ -308,23 +313,23 @@ the current stable kernel. The -stable team usually do make incremental patches available as well as patches against the latest mainline release, but I only cover the non-incremental ones below. The incremental ones can be found at - https://www.kernel.org/pub/linux/kernel/v4.x/incr/ + https://www.kernel.org/pub/linux/kernel/v5.x/incr/ -These patches are not incremental, meaning that for example the 4.7.3 -patch does not apply on top of the 4.7.2 kernel source, but rather on top -of the base 4.7 kernel source. +These patches are not incremental, meaning that for example the 5.7.3 +patch does not apply on top of the 5.7.2 kernel source, but rather on top +of the base 5.7 kernel source. -So, in order to apply the 4.7.3 patch to your existing 4.7.2 kernel -source you have to first back out the 4.7.2 patch (so you are left with a -base 4.7 kernel source) and then apply the new 4.7.3 patch. +So, in order to apply the 5.7.3 patch to your existing 5.7.2 kernel +source you have to first back out the 5.7.2 patch (so you are left with a +base 5.7 kernel source) and then apply the new 5.7.3 patch. Here's a small example:: - $ cd ~/linux-4.7.2 # change to the kernel source dir - $ patch -p1 -R < ../patch-4.7.2 # revert the 4.7.2 patch - $ patch -p1 < ../patch-4.7.3 # apply the new 4.7.3 patch + $ cd ~/linux-5.7.2 # change to the kernel source dir + $ patch -p1 -R < ../patch-5.7.2 # revert the 5.7.2 patch + $ patch -p1 < ../patch-5.7.3 # apply the new 5.7.3 patch $ cd .. - $ mv linux-4.7.2 linux-4.7.3 # rename the kernel source dir + $ mv linux-5.7.2 linux-5.7.3 # rename the kernel source dir The -rc kernels =============== @@ -343,38 +348,38 @@ This is a good branch to run for people who want to help out testing development kernels but do not want to run some of the really experimental stuff (such people should see the sections about -next and -mm kernels below). -The -rc patches are not incremental, they apply to a base 4.x kernel, just -like the 4.x.y patches described above. The kernel version before the -rcN +The -rc patches are not incremental, they apply to a base 5.x kernel, just +like the 5.x.y patches described above. The kernel version before the -rcN suffix denotes the version of the kernel that this -rc kernel will eventually turn into. -So, 4.8-rc5 means that this is the fifth release candidate for the 4.8 -kernel and the patch should be applied on top of the 4.7 kernel source. +So, 5.8-rc5 means that this is the fifth release candidate for the 5.8 +kernel and the patch should be applied on top of the 5.7 kernel source. Here are 3 examples of how to apply these patches:: - # first an example of moving from 4.7 to 4.8-rc3 + # first an example of moving from 5.7 to 5.8-rc3 - $ cd ~/linux-4.7 # change to the 4.7 source dir - $ patch -p1 < ../patch-4.8-rc3 # apply the 4.8-rc3 patch + $ cd ~/linux-5.7 # change to the 5.7 source dir + $ patch -p1 < ../patch-5.8-rc3 # apply the 5.8-rc3 patch $ cd .. - $ mv linux-4.7 linux-4.8-rc3 # rename the source dir + $ mv linux-5.7 linux-5.8-rc3 # rename the source dir - # now let's move from 4.8-rc3 to 4.8-rc5 + # now let's move from 5.8-rc3 to 5.8-rc5 - $ cd ~/linux-4.8-rc3 # change to the 4.8-rc3 dir - $ patch -p1 -R < ../patch-4.8-rc3 # revert the 4.8-rc3 patch - $ patch -p1 < ../patch-4.8-rc5 # apply the new 4.8-rc5 patch + $ cd ~/linux-5.8-rc3 # change to the 5.8-rc3 dir + $ patch -p1 -R < ../patch-5.8-rc3 # revert the 5.8-rc3 patch + $ patch -p1 < ../patch-5.8-rc5 # apply the new 5.8-rc5 patch $ cd .. - $ mv linux-4.8-rc3 linux-4.8-rc5 # rename the source dir + $ mv linux-5.8-rc3 linux-5.8-rc5 # rename the source dir - # finally let's try and move from 4.7.3 to 4.8-rc5 + # finally let's try and move from 5.7.3 to 5.8-rc5 - $ cd ~/linux-4.7.3 # change to the kernel source dir - $ patch -p1 -R < ../patch-4.7.3 # revert the 4.7.3 patch - $ patch -p1 < ../patch-4.8-rc5 # apply new 4.8-rc5 patch + $ cd ~/linux-5.7.3 # change to the kernel source dir + $ patch -p1 -R < ../patch-5.7.3 # revert the 5.7.3 patch + $ patch -p1 < ../patch-5.8-rc5 # apply new 5.8-rc5 patch $ cd .. - $ mv linux-4.7.3 linux-4.8-rc5 # rename the kernel source dir + $ mv linux-5.7.3 linux-5.8-rc5 # rename the kernel source dir The -mm patches and the linux-next tree diff --git a/Documentation/process/coding-style.rst b/Documentation/process/coding-style.rst index b78dd680c038..8ea913e99fa1 100644 --- a/Documentation/process/coding-style.rst +++ b/Documentation/process/coding-style.rst @@ -443,7 +443,7 @@ In function prototypes, include parameter names with their data types. Although this is not required by the C language, it is preferred in Linux because it is a simple way to add valuable information for the reader. -Do not use the `extern' keyword with function prototypes as this makes +Do not use the ``extern`` keyword with function prototypes as this makes lines longer and isn't strictly necessary. @@ -595,26 +595,43 @@ values. To do the latter, you can stick the following in your .emacs file: (* (max steps 1) c-basic-offset))) - (add-hook 'c-mode-common-hook - (lambda () - ;; Add kernel style - (c-add-style - "linux-tabs-only" - '("linux" (c-offsets-alist - (arglist-cont-nonempty - c-lineup-gcc-asm-reg - c-lineup-arglist-tabs-only)))))) - - (add-hook 'c-mode-hook - (lambda () - (let ((filename (buffer-file-name))) - ;; Enable kernel mode for the appropriate files - (when (and filename - (string-match (expand-file-name "~/src/linux-trees") - filename)) - (setq indent-tabs-mode t) - (setq show-trailing-whitespace t) - (c-set-style "linux-tabs-only"))))) + (dir-locals-set-class-variables + 'linux-kernel + '((c-mode . ( + (c-basic-offset . 8) + (c-label-minimum-indentation . 0) + (c-offsets-alist . ( + (arglist-close . c-lineup-arglist-tabs-only) + (arglist-cont-nonempty . + (c-lineup-gcc-asm-reg c-lineup-arglist-tabs-only)) + (arglist-intro . +) + (brace-list-intro . +) + (c . c-lineup-C-comments) + (case-label . 0) + (comment-intro . c-lineup-comment) + (cpp-define-intro . +) + (cpp-macro . -1000) + (cpp-macro-cont . +) + (defun-block-intro . +) + (else-clause . 0) + (func-decl-cont . +) + (inclass . +) + (inher-cont . c-lineup-multi-inher) + (knr-argdecl-intro . 0) + (label . -1000) + (statement . 0) + (statement-block-intro . +) + (statement-case-intro . +) + (statement-cont . +) + (substatement . +) + )) + (indent-tabs-mode . t) + (show-trailing-whitespace . t) + )))) + + (dir-locals-set-directory-class + (expand-file-name "~/src/linux-trees") + 'linux-kernel) This will make emacs go better with the kernel coding style for C files below ``~/src/linux-trees``. @@ -921,7 +938,37 @@ result. Typical examples would be functions that return pointers; they use NULL or the ERR_PTR mechanism to report failure. -17) Don't re-invent the kernel macros +17) Using bool +-------------- + +The Linux kernel bool type is an alias for the C99 _Bool type. bool values can +only evaluate to 0 or 1, and implicit or explicit conversion to bool +automatically converts the value to true or false. When using bool types the +!! construction is not needed, which eliminates a class of bugs. + +When working with bool values the true and false definitions should be used +instead of 1 and 0. + +bool function return types and stack variables are always fine to use whenever +appropriate. Use of bool is encouraged to improve readability and is often a +better option than 'int' for storing boolean values. + +Do not use bool if cache line layout or size of the value matters, as its size +and alignment varies based on the compiled architecture. Structures that are +optimized for alignment and size should not use bool. + +If a structure has many true/false values, consider consolidating them into a +bitfield with 1 bit members, or using an appropriate fixed width type, such as +u8. + +Similarly for function arguments, many true/false values can be consolidated +into a single bitwise 'flags' argument and 'flags' can often be a more +readable alternative if the call-sites have naked true/false constants. + +Otherwise limited use of bool in structures and arguments can improve +readability. + +18) Don't re-invent the kernel macros ------------------------------------- The header file include/linux/kernel.h contains a number of macros that @@ -944,7 +991,7 @@ need them. Feel free to peruse that header file to see what else is already defined that you shouldn't reproduce in your code. -18) Editor modelines and other cruft +19) Editor modelines and other cruft ------------------------------------ Some editors can interpret configuration information embedded in source files, @@ -978,7 +1025,7 @@ own custom mode, or may have some other magic method for making indentation work correctly. -19) Inline assembly +20) Inline assembly ------------------- In architecture-specific code, you may need to use inline assembly to interface @@ -1010,7 +1057,7 @@ the next instruction in the assembly output: : /* outputs */ : /* inputs */ : /* clobbers */); -20) Conditional Compilation +21) Conditional Compilation --------------------------- Wherever possible, don't use preprocessor conditionals (#if, #ifdef) in .c diff --git a/Documentation/process/howto.rst b/Documentation/process/howto.rst index 58b2f46c4f98..ad2b6c852b95 100644 --- a/Documentation/process/howto.rst +++ b/Documentation/process/howto.rst @@ -225,7 +225,7 @@ Cross-Reference project, which is able to present source code in a self-referential, indexed webpage format. An excellent up-to-date repository of the kernel code may be found at: - http://lxr.free-electrons.com/ + https://elixir.bootlin.com/ The development process @@ -235,23 +235,21 @@ Linux kernel development process currently consists of a few different main kernel "branches" and lots of different subsystem-specific kernel branches. These different branches are: - - main 4.x kernel tree - - 4.x.y -stable kernel tree - - 4.x -git kernel patches - - subsystem specific kernel trees and patches - - the 4.x -next kernel tree for integration tests + - Linus's mainline tree + - Various stable trees with multiple major numbers + - Subsystem-specific trees + - linux-next integration testing tree -4.x kernel tree -~~~~~~~~~~~~~~~ +Mainline tree +~~~~~~~~~~~~~ -4.x kernels are maintained by Linus Torvalds, and can be found on -https://kernel.org in the pub/linux/kernel/v4.x/ directory. Its development -process is as follows: +Mainline tree are maintained by Linus Torvalds, and can be found at +https://kernel.org or in the repo. Its development process is as follows: - As soon as a new kernel is released a two weeks window is open, during this period of time maintainers can submit big diffs to Linus, usually the patches that have already been included in the - -next kernel for a few weeks. The preferred way to submit big changes + linux-next for a few weeks. The preferred way to submit big changes is using git (the kernel's source management tool, more information can be found at https://git-scm.com/) but plain patches are also just fine. @@ -278,21 +276,19 @@ mailing list about kernel releases: released according to perceived bug status, not according to a preconceived timeline."* -4.x.y -stable kernel tree -~~~~~~~~~~~~~~~~~~~~~~~~~ +Various stable trees with multiple major numbers +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Kernels with 3-part versions are -stable kernels. They contain relatively small and critical fixes for security problems or significant -regressions discovered in a given 4.x kernel. +regressions discovered in a given major mainline release, with the first +2-part of version number are the same correspondingly. This is the recommended branch for users who want the most recent stable kernel and are not interested in helping test development/experimental versions. -If no 4.x.y kernel is available, then the highest numbered 4.x -kernel is the current stable kernel. - -4.x.y are maintained by the "stable" team , and +Stable trees are maintained by the "stable" team , and are released as needs dictate. The normal release period is approximately two weeks, but it can be longer if there are no pressing problems. A security-related problem, instead, can cause a release to happen almost @@ -302,17 +298,8 @@ The file :ref:`Documentation/process/stable-kernel-rules.rst `_. + +_`MODULE_LICENSE` +----------------- + + Loadable kernel modules also require a MODULE_LICENSE() tag. This tag is + neither a replacement for proper source code license information + (SPDX-License-Identifier) nor in any way relevant for expressing or + determining the exact license under which the source code of the module + is provided. + + The sole purpose of this tag is to provide sufficient information + whether the module is free software or proprietary for the kernel + module loader and for user space tools. + + The valid license strings for MODULE_LICENSE() are: + + ============================= ============================================= + "GPL" Module is licensed under GPL version 2. This + does not express any distinction between + GPL-2.0-only or GPL-2.0-or-later. The exact + license information can only be determined + via the license information in the + corresponding source files. + + "GPL v2" Same as "GPL". It exists for historic + reasons. + + "GPL and additional rights" Historical variant of expressing that the + module source is dual licensed under a + GPL v2 variant and MIT license. Please do + not use in new code. + + "Dual MIT/GPL" The correct way of expressing that the + module is dual licensed under a GPL v2 + variant or MIT license choice. + + "Dual BSD/GPL" The module is dual licensed under a GPL v2 + variant or BSD license choice. The exact + variant of the BSD license can only be + determined via the license information + in the corresponding source files. + + "Dual MPL/GPL" The module is dual licensed under a GPL v2 + variant or Mozilla Public License (MPL) + choice. The exact variant of the MPL + license can only be determined via the + license information in the corresponding + source files. + + "Proprietary" The module is under a proprietary license. + This string is solely for proprietary third + party modules and cannot be used for modules + which have their source code in the kernel + tree. Modules tagged that way are tainting + the kernel with the 'P' flag when loaded and + the kernel module loader refuses to link such + modules against symbols which are exported + with EXPORT_SYMBOL_GPL(). + ============================= ============================================= + + + diff --git a/Documentation/process/stable-api-nonsense.rst b/Documentation/process/stable-api-nonsense.rst index 24f5aeecee91..a9625ab1fdc2 100644 --- a/Documentation/process/stable-api-nonsense.rst +++ b/Documentation/process/stable-api-nonsense.rst @@ -169,14 +169,13 @@ driver for every different kernel version for every distribution is a nightmare, and trying to keep up with an ever changing kernel interface is also a rough job. -Simple, get your kernel driver into the main kernel tree (remember we -are talking about GPL released drivers here, if your code doesn't fall -under this category, good luck, you are on your own here, you leech -.) If your -driver is in the tree, and a kernel interface changes, it will be fixed -up by the person who did the kernel change in the first place. This -ensures that your driver is always buildable, and works over time, with -very little effort on your part. +Simple, get your kernel driver into the main kernel tree (remember we are +talking about drivers released under a GPL-compatible license here, if your +code doesn't fall under this category, good luck, you are on your own here, +you leech). If your driver is in the tree, and a kernel interface changes, +it will be fixed up by the person who did the kernel change in the first +place. This ensures that your driver is always buildable, and works over +time, with very little effort on your part. The very good side effects of having your driver in the main kernel tree are: diff --git a/Documentation/process/stable-kernel-rules.rst b/Documentation/process/stable-kernel-rules.rst index 0de6f6145cc6..06f743b612c4 100644 --- a/Documentation/process/stable-kernel-rules.rst +++ b/Documentation/process/stable-kernel-rules.rst @@ -38,6 +38,9 @@ Procedure for submitting patches to the -stable tree - If the patch covers files in net/ or drivers/net please follow netdev stable submission guidelines as described in :ref:`Documentation/networking/netdev-FAQ.rst ` + after first checking the stable networking queue at + https://patchwork.ozlabs.org/bundle/davem/stable/?series=&submitter=&state=*&q=&archive= + to ensure the requested patch is not already queued up. - Security patches should not be handled (solely) by the -stable review process but should follow the procedures in :ref:`Documentation/admin-guide/security-bugs.rst `. @@ -98,9 +101,9 @@ text, like this: commit upstream. -Additionally, some patches submitted via Option 1 may have additional patch -prerequisites which can be cherry-picked. This can be specified in the following -format in the sign-off area: +Additionally, some patches submitted via :ref:`option_1` may have additional +patch prerequisites which can be cherry-picked. This can be specified in the +following format in the sign-off area: .. code-block:: none diff --git a/Documentation/process/submitting-patches.rst b/Documentation/process/submitting-patches.rst index 30dc00a364e8..be7d1829c3af 100644 --- a/Documentation/process/submitting-patches.rst +++ b/Documentation/process/submitting-patches.rst @@ -182,9 +182,11 @@ change five years from now. If your patch fixes a bug in a specific commit, e.g. you found an issue using ``git bisect``, please use the 'Fixes:' tag with the first 12 characters of -the SHA-1 ID, and the one line summary. For example:: +the SHA-1 ID, and the one line summary. Do not split the tag across multiple +lines, tags are exempt from the "wrap at 75 columns" rule in order to simplify +parsing scripts. For example:: - Fixes: e21d2170f366 ("video: remove unnecessary platform_set_drvdata()") + Fixes: 54a4f0239f2e ("KVM: MMU: make kvm_mmu_zap_page() return the number of pages it actually freed") The following ``git config`` settings can be used to add a pretty format for outputting the above style in the ``git log`` or ``git show`` commands:: diff --git a/Documentation/scheduler/sched-energy.txt b/Documentation/scheduler/sched-energy.txt new file mode 100644 index 000000000000..197d81f4b836 --- /dev/null +++ b/Documentation/scheduler/sched-energy.txt @@ -0,0 +1,425 @@ + ======================= + Energy Aware Scheduling + ======================= + +1. Introduction +--------------- + +Energy Aware Scheduling (or EAS) gives the scheduler the ability to predict +the impact of its decisions on the energy consumed by CPUs. EAS relies on an +Energy Model (EM) of the CPUs to select an energy efficient CPU for each task, +with a minimal impact on throughput. This document aims at providing an +introduction on how EAS works, what are the main design decisions behind it, and +details what is needed to get it to run. + +Before going any further, please note that at the time of writing: + + /!\ EAS does not support platforms with symmetric CPU topologies /!\ + +EAS operates only on heterogeneous CPU topologies (such as Arm big.LITTLE) +because this is where the potential for saving energy through scheduling is +the highest. + +The actual EM used by EAS is _not_ maintained by the scheduler, but by a +dedicated framework. For details about this framework and what it provides, +please refer to its documentation (see Documentation/power/energy-model.txt). + + +2. Background and Terminology +----------------------------- + +To make it clear from the start: + - energy = [joule] (resource like a battery on powered devices) + - power = energy/time = [joule/second] = [watt] + +The goal of EAS is to minimize energy, while still getting the job done. That +is, we want to maximize: + + performance [inst/s] + -------------------- + power [W] + +which is equivalent to minimizing: + + energy [J] + ----------- + instruction + +while still getting 'good' performance. It is essentially an alternative +optimization objective to the current performance-only objective for the +scheduler. This alternative considers two objectives: energy-efficiency and +performance. + +The idea behind introducing an EM is to allow the scheduler to evaluate the +implications of its decisions rather than blindly applying energy-saving +techniques that may have positive effects only on some platforms. At the same +time, the EM must be as simple as possible to minimize the scheduler latency +impact. + +In short, EAS changes the way CFS tasks are assigned to CPUs. When it is time +for the scheduler to decide where a task should run (during wake-up), the EM +is used to break the tie between several good CPU candidates and pick the one +that is predicted to yield the best energy consumption without harming the +system's throughput. The predictions made by EAS rely on specific elements of +knowledge about the platform's topology, which include the 'capacity' of CPUs, +and their respective energy costs. + + +3. Topology information +----------------------- + +EAS (as well as the rest of the scheduler) uses the notion of 'capacity' to +differentiate CPUs with different computing throughput. The 'capacity' of a CPU +represents the amount of work it can absorb when running at its highest +frequency compared to the most capable CPU of the system. Capacity values are +normalized in a 1024 range, and are comparable with the utilization signals of +tasks and CPUs computed by the Per-Entity Load Tracking (PELT) mechanism. Thanks +to capacity and utilization values, EAS is able to estimate how big/busy a +task/CPU is, and to take this into consideration when evaluating performance vs +energy trade-offs. The capacity of CPUs is provided via arch-specific code +through the arch_scale_cpu_capacity() callback. + +The rest of platform knowledge used by EAS is directly read from the Energy +Model (EM) framework. The EM of a platform is composed of a power cost table +per 'performance domain' in the system (see Documentation/power/energy-model.txt +for futher details about performance domains). + +The scheduler manages references to the EM objects in the topology code when the +scheduling domains are built, or re-built. For each root domain (rd), the +scheduler maintains a singly linked list of all performance domains intersecting +the current rd->span. Each node in the list contains a pointer to a struct +em_perf_domain as provided by the EM framework. + +The lists are attached to the root domains in order to cope with exclusive +cpuset configurations. Since the boundaries of exclusive cpusets do not +necessarily match those of performance domains, the lists of different root +domains can contain duplicate elements. + +Example 1. + Let us consider a platform with 12 CPUs, split in 3 performance domains + (pd0, pd4 and pd8), organized as follows: + + CPUs: 0 1 2 3 4 5 6 7 8 9 10 11 + PDs: |--pd0--|--pd4--|---pd8---| + RDs: |----rd1----|-----rd2-----| + + Now, consider that userspace decided to split the system with two + exclusive cpusets, hence creating two independent root domains, each + containing 6 CPUs. The two root domains are denoted rd1 and rd2 in the + above figure. Since pd4 intersects with both rd1 and rd2, it will be + present in the linked list '->pd' attached to each of them: + * rd1->pd: pd0 -> pd4 + * rd2->pd: pd4 -> pd8 + + Please note that the scheduler will create two duplicate list nodes for + pd4 (one for each list). However, both just hold a pointer to the same + shared data structure of the EM framework. + +Since the access to these lists can happen concurrently with hotplug and other +things, they are protected by RCU, like the rest of topology structures +manipulated by the scheduler. + +EAS also maintains a static key (sched_energy_present) which is enabled when at +least one root domain meets all conditions for EAS to start. Those conditions +are summarized in Section 6. + + +4. Energy-Aware task placement +------------------------------ + +EAS overrides the CFS task wake-up balancing code. It uses the EM of the +platform and the PELT signals to choose an energy-efficient target CPU during +wake-up balance. When EAS is enabled, select_task_rq_fair() calls +find_energy_efficient_cpu() to do the placement decision. This function looks +for the CPU with the highest spare capacity (CPU capacity - CPU utilization) in +each performance domain since it is the one which will allow us to keep the +frequency the lowest. Then, the function checks if placing the task there could +save energy compared to leaving it on prev_cpu, i.e. the CPU where the task ran +in its previous activation. + +find_energy_efficient_cpu() uses compute_energy() to estimate what will be the +energy consumed by the system if the waking task was migrated. compute_energy() +looks at the current utilization landscape of the CPUs and adjusts it to +'simulate' the task migration. The EM framework provides the em_pd_energy() API +which computes the expected energy consumption of each performance domain for +the given utilization landscape. + +An example of energy-optimized task placement decision is detailed below. + +Example 2. + Let us consider a (fake) platform with 2 independent performance domains + composed of two CPUs each. CPU0 and CPU1 are little CPUs; CPU2 and CPU3 + are big. + + The scheduler must decide where to place a task P whose util_avg = 200 + and prev_cpu = 0. + + The current utilization landscape of the CPUs is depicted on the graph + below. CPUs 0-3 have a util_avg of 400, 100, 600 and 500 respectively + Each performance domain has three Operating Performance Points (OPPs). + The CPU capacity and power cost associated with each OPP is listed in + the Energy Model table. The util_avg of P is shown on the figures + below as 'PP'. + + CPU util. + 1024 - - - - - - - Energy Model + +-----------+-------------+ + | Little | Big | + 768 ============= +-----+-----+------+------+ + | Cap | Pwr | Cap | Pwr | + +-----+-----+------+------+ + 512 =========== - ##- - - - - | 170 | 50 | 512 | 400 | + ## ## | 341 | 150 | 768 | 800 | + 341 -PP - - - - ## ## | 512 | 300 | 1024 | 1700 | + PP ## ## +-----+-----+------+------+ + 170 -## - - - - ## ## + ## ## ## ## + ------------ ------------- + CPU0 CPU1 CPU2 CPU3 + + Current OPP: ===== Other OPP: - - - util_avg (100 each): ## + + + find_energy_efficient_cpu() will first look for the CPUs with the + maximum spare capacity in the two performance domains. In this example, + CPU1 and CPU3. Then it will estimate the energy of the system if P was + placed on either of them, and check if that would save some energy + compared to leaving P on CPU0. EAS assumes that OPPs follow utilization + (which is coherent with the behaviour of the schedutil CPUFreq + governor, see Section 6. for more details on this topic). + + Case 1. P is migrated to CPU1 + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + 1024 - - - - - - - + + Energy calculation: + 768 ============= * CPU0: 200 / 341 * 150 = 88 + * CPU1: 300 / 341 * 150 = 131 + * CPU2: 600 / 768 * 800 = 625 + 512 - - - - - - - ##- - - - - * CPU3: 500 / 768 * 800 = 520 + ## ## => total_energy = 1364 + 341 =========== ## ## + PP ## ## + 170 -## - - PP- ## ## + ## ## ## ## + ------------ ------------- + CPU0 CPU1 CPU2 CPU3 + + + Case 2. P is migrated to CPU3 + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + 1024 - - - - - - - + + Energy calculation: + 768 ============= * CPU0: 200 / 341 * 150 = 88 + * CPU1: 100 / 341 * 150 = 43 + PP * CPU2: 600 / 768 * 800 = 625 + 512 - - - - - - - ##- - -PP - * CPU3: 700 / 768 * 800 = 729 + ## ## => total_energy = 1485 + 341 =========== ## ## + ## ## + 170 -## - - - - ## ## + ## ## ## ## + ------------ ------------- + CPU0 CPU1 CPU2 CPU3 + + + Case 3. P stays on prev_cpu / CPU 0 + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + 1024 - - - - - - - + + Energy calculation: + 768 ============= * CPU0: 400 / 512 * 300 = 234 + * CPU1: 100 / 512 * 300 = 58 + * CPU2: 600 / 768 * 800 = 625 + 512 =========== - ##- - - - - * CPU3: 500 / 768 * 800 = 520 + ## ## => total_energy = 1437 + 341 -PP - - - - ## ## + PP ## ## + 170 -## - - - - ## ## + ## ## ## ## + ------------ ------------- + CPU0 CPU1 CPU2 CPU3 + + + From these calculations, the Case 1 has the lowest total energy. So CPU 1 + is be the best candidate from an energy-efficiency standpoint. + +Big CPUs are generally more power hungry than the little ones and are thus used +mainly when a task doesn't fit the littles. However, little CPUs aren't always +necessarily more energy-efficient than big CPUs. For some systems, the high OPPs +of the little CPUs can be less energy-efficient than the lowest OPPs of the +bigs, for example. So, if the little CPUs happen to have enough utilization at +a specific point in time, a small task waking up at that moment could be better +of executing on the big side in order to save energy, even though it would fit +on the little side. + +And even in the case where all OPPs of the big CPUs are less energy-efficient +than those of the little, using the big CPUs for a small task might still, under +specific conditions, save energy. Indeed, placing a task on a little CPU can +result in raising the OPP of the entire performance domain, and that will +increase the cost of the tasks already running there. If the waking task is +placed on a big CPU, its own execution cost might be higher than if it was +running on a little, but it won't impact the other tasks of the little CPUs +which will keep running at a lower OPP. So, when considering the total energy +consumed by CPUs, the extra cost of running that one task on a big core can be +smaller than the cost of raising the OPP on the little CPUs for all the other +tasks. + +The examples above would be nearly impossible to get right in a generic way, and +for all platforms, without knowing the cost of running at different OPPs on all +CPUs of the system. Thanks to its EM-based design, EAS should cope with them +correctly without too many troubles. However, in order to ensure a minimal +impact on throughput for high-utilization scenarios, EAS also implements another +mechanism called 'over-utilization'. + + +5. Over-utilization +------------------- + +From a general standpoint, the use-cases where EAS can help the most are those +involving a light/medium CPU utilization. Whenever long CPU-bound tasks are +being run, they will require all of the available CPU capacity, and there isn't +much that can be done by the scheduler to save energy without severly harming +throughput. In order to avoid hurting performance with EAS, CPUs are flagged as +'over-utilized' as soon as they are used at more than 80% of their compute +capacity. As long as no CPUs are over-utilized in a root domain, load balancing +is disabled and EAS overridess the wake-up balancing code. EAS is likely to load +the most energy efficient CPUs of the system more than the others if that can be +done without harming throughput. So, the load-balancer is disabled to prevent +it from breaking the energy-efficient task placement found by EAS. It is safe to +do so when the system isn't overutilized since being below the 80% tipping point +implies that: + + a. there is some idle time on all CPUs, so the utilization signals used by + EAS are likely to accurately represent the 'size' of the various tasks + in the system; + b. all tasks should already be provided with enough CPU capacity, + regardless of their nice values; + c. since there is spare capacity all tasks must be blocking/sleeping + regularly and balancing at wake-up is sufficient. + +As soon as one CPU goes above the 80% tipping point, at least one of the three +assumptions above becomes incorrect. In this scenario, the 'overutilized' flag +is raised for the entire root domain, EAS is disabled, and the load-balancer is +re-enabled. By doing so, the scheduler falls back onto load-based algorithms for +wake-up and load balance under CPU-bound conditions. This provides a better +respect of the nice values of tasks. + +Since the notion of overutilization largely relies on detecting whether or not +there is some idle time in the system, the CPU capacity 'stolen' by higher +(than CFS) scheduling classes (as well as IRQ) must be taken into account. As +such, the detection of overutilization accounts for the capacity used not only +by CFS tasks, but also by the other scheduling classes and IRQ. + + +6. Dependencies and requirements for EAS +---------------------------------------- + +Energy Aware Scheduling depends on the CPUs of the system having specific +hardware properties and on other features of the kernel being enabled. This +section lists these dependencies and provides hints as to how they can be met. + + + 6.1 - Asymmetric CPU topology + +As mentioned in the introduction, EAS is only supported on platforms with +asymmetric CPU topologies for now. This requirement is checked at run-time by +looking for the presence of the SD_ASYM_CPUCAPACITY flag when the scheduling +domains are built. + +The flag is set/cleared automatically by the scheduler topology code whenever +there are CPUs with different capacities in a root domain. The capacities of +CPUs are provided by arch-specific code through the arch_scale_cpu_capacity() +callback. As an example, arm and arm64 share an implementation of this callback +which uses a combination of CPUFreq data and device-tree bindings to compute the +capacity of CPUs (see drivers/base/arch_topology.c for more details). + +So, in order to use EAS on your platform your architecture must implement the +arch_scale_cpu_capacity() callback, and some of the CPUs must have a lower +capacity than others. + +Please note that EAS is not fundamentally incompatible with SMP, but no +significant savings on SMP platforms have been observed yet. This restriction +could be amended in the future if proven otherwise. + + + 6.2 - Energy Model presence + +EAS uses the EM of a platform to estimate the impact of scheduling decisions on +energy. So, your platform must provide power cost tables to the EM framework in +order to make EAS start. To do so, please refer to documentation of the +independent EM framework in Documentation/power/energy-model.txt. + +Please also note that the scheduling domains need to be re-built after the +EM has been registered in order to start EAS. + + + 6.3 - Energy Model complexity + +The task wake-up path is very latency-sensitive. When the EM of a platform is +too complex (too many CPUs, too many performance domains, too many performance +states, ...), the cost of using it in the wake-up path can become prohibitive. +The energy-aware wake-up algorithm has a complexity of: + + C = Nd * (Nc + Ns) + +with: Nd the number of performance domains; Nc the number of CPUs; and Ns the +total number of OPPs (ex: for two perf. domains with 4 OPPs each, Ns = 8). + +A complexity check is performed at the root domain level, when scheduling +domains are built. EAS will not start on a root domain if its C happens to be +higher than the completely arbitrary EM_MAX_COMPLEXITY threshold (2048 at the +time of writing). + +If you really want to use EAS but the complexity of your platform's Energy +Model is too high to be used with a single root domain, you're left with only +two possible options: + + 1. split your system into separate, smaller, root domains using exclusive + cpusets and enable EAS locally on each of them. This option has the + benefit to work out of the box but the drawback of preventing load + balance between root domains, which can result in an unbalanced system + overall; + 2. submit patches to reduce the complexity of the EAS wake-up algorithm, + hence enabling it to cope with larger EMs in reasonable time. + + + 6.4 - Schedutil governor + +EAS tries to predict at which OPP will the CPUs be running in the close future +in order to estimate their energy consumption. To do so, it is assumed that OPPs +of CPUs follow their utilization. + +Although it is very difficult to provide hard guarantees regarding the accuracy +of this assumption in practice (because the hardware might not do what it is +told to do, for example), schedutil as opposed to other CPUFreq governors at +least _requests_ frequencies calculated using the utilization signals. +Consequently, the only sane governor to use together with EAS is schedutil, +because it is the only one providing some degree of consistency between +frequency requests and energy predictions. + +Using EAS with any other governor than schedutil is not supported. + + + 6.5 Scale-invariant utilization signals + +In order to make accurate prediction across CPUs and for all performance +states, EAS needs frequency-invariant and CPU-invariant PELT signals. These can +be obtained using the architecture-defined arch_scale{cpu,freq}_capacity() +callbacks. + +Using EAS on a platform that doesn't implement these two callbacks is not +supported. + + + 6.6 Multithreading (SMT) + +EAS in its current form is SMT unaware and is not able to leverage +multithreaded hardware to save energy. EAS considers threads as independent +CPUs, which can actually be counter-productive for both performance and energy. + +EAS on SMT is not supported. diff --git a/Documentation/scsi/osd.txt b/Documentation/scsi/osd.txt deleted file mode 100644 index 5a9879bad073..000000000000 --- a/Documentation/scsi/osd.txt +++ /dev/null @@ -1,197 +0,0 @@ -The OSD Standard -================ -OSD (Object-Based Storage Device) is a T10 SCSI command set that is designed -to provide efficient operation of input/output logical units that manage the -allocation, placement, and accessing of variable-size data-storage containers, -called objects. Objects are intended to contain operating system and application -constructs. Each object has associated attributes attached to it, which are -integral part of the object and provide metadata about the object. The standard -defines some common obligatory attributes, but user attributes can be added as -needed. - -See: http://www.t10.org/ftp/t10/drafts/osd2/ for the latest draft for OSD 2 -or search the web for "OSD SCSI" - -OSD in the Linux Kernel -======================= -osd-initiator: - The main component of OSD in Kernel is the osd-initiator library. Its main -user is intended to be the pNFS-over-objects layout driver, which uses objects -as its back-end data storage. Other clients are the other osd parts listed below. - -osd-uld: - This is a SCSI ULD that registers for OSD type devices and provides a testing -platform, both for the in-kernel initiator as well as connected targets. It -currently has no useful user-mode API, though it could have if need be. - -exofs: - Is an OSD based Linux file system. It uses the osd-initiator and osd-uld, -to export a usable file system for users. -See Documentation/filesystems/exofs.txt for more details - -osd target: - There are no current plans for an OSD target implementation in kernel. For all -needs, a user-mode target that is based on the scsi tgt target framework is -available from Ohio Supercomputer Center (OSC) at: -http://www.open-osd.org/bin/view/Main/OscOsdProject -There are several other target implementations. See http://open-osd.org for more -links. - -Files and Folders -================= -This is the complete list of files included in this work: -include/scsi/ - osd_initiator.h Main API for the initiator library - osd_types.h Common OSD types - osd_sec.h Security Manager API - osd_protocol.h Wire definitions of the OSD standard protocol - osd_attributes.h Wire definitions of OSD attributes - -drivers/scsi/osd/ - osd_initiator.c OSD-Initiator library implementation - osd_uld.c The OSD scsi ULD - osd_ktest.{h,c} In-kernel test suite (called by osd_uld) - osd_debug.h Some printk macros - Makefile For both in-tree and out-of-tree compilation - Kconfig Enables inclusion of the different pieces - osd_test.c User-mode application to call the kernel tests - -The OSD-Initiator Library -========================= -osd_initiator is a low level implementation of an osd initiator encoder. -But even though, it should be intuitive and easy to use. Perhaps over time an -higher lever will form that automates some of the more common recipes. - -init/fini: -- osd_dev_init() associates a scsi_device with an osd_dev structure - and initializes some global pools. This should be done once per scsi_device - (OSD LUN). The osd_dev structure is needed for calling osd_start_request(). - -- osd_dev_fini() cleans up before a osd_dev/scsi_device destruction. - -OSD commands encoding, execution, and decoding of results: - -struct osd_request's is used to iteratively encode an OSD command and carry -its state throughout execution. Each request goes through these stages: - -a. osd_start_request() allocates the request. - -b. Any of the osd_req_* methods is used to encode a request of the specified - type. - -c. osd_req_add_{get,set}_attr_* may be called to add get/set attributes to the - CDB. "List" or "Page" mode can be used exclusively. The attribute-list API - can be called multiple times on the same request. However, only one - attribute-page can be read, as mandated by the OSD standard. - -d. osd_finalize_request() computes offsets into the data-in and data-out buffers - and signs the request using the provided capability key and integrity- - check parameters. - -e. osd_execute_request() may be called to execute the request via the block - layer and wait for its completion. The request can be executed - asynchronously by calling the block layer API directly. - -f. After execution, osd_req_decode_sense() can be called to decode the request's - sense information. - -g. osd_req_decode_get_attr() may be called to retrieve osd_add_get_attr_list() - values. - -h. osd_end_request() must be called to deallocate the request and any resource - associated with it. Note that osd_end_request cleans up the request at any - stage and it must always be called after a successful osd_start_request(). - -osd_request's structure: - -The OSD standard defines a complex structure of IO segments pointed to by -members in the CDB. Up to 3 segments can be deployed in the IN-Buffer and up to -4 in the OUT-Buffer. The ASCII illustration below depicts a secure-read with -associated get+set of attributes-lists. Other combinations very on the same -basic theme. From no-segments-used up to all-segments-used. - -|________OSD-CDB__________| -| | -|read_len (offset=0) -|---------\ -| | | -|get_attrs_list_length | | -|get_attrs_list_offset -|----\ | -| | | | -|retrieved_attrs_alloc_len| | | -|retrieved_attrs_offset -|----|----|-\ -| | | | | -|set_attrs_list_length | | | | -|set_attrs_list_offset -|-\ | | | -| | | | | | -|in_data_integ_offset -|-|--|----|-|-\ -|out_data_integ_offset -|-|--|--\ | | | -\_________________________/ | | | | | | - | | | | | | -|_______OUT-BUFFER________| | | | | | | -| Set attr list | - -More up-to-date information can be found on: -http://open-osd.org - -Boaz Harrosh - -References -========== -Weber, R., "SCSI Object-Based Storage Device Commands", -T10/1355-D ANSI/INCITS 400-2004, -http://www.t10.org/ftp/t10/drafts/osd/osd-r10.pdf - -Weber, R., "SCSI Object-Based Storage Device Commands -2 (OSD-2)" -T10/1729-D, Working Draft, rev. 3 -http://www.t10.org/ftp/t10/drafts/osd2/osd2r03.pdf diff --git a/Documentation/scsi/ufs.txt b/Documentation/scsi/ufs.txt index 520b5b033256..1769f71c4c20 100644 --- a/Documentation/scsi/ufs.txt +++ b/Documentation/scsi/ufs.txt @@ -147,6 +147,17 @@ send SG_IO with the applicable sg_io_v4: io_hdr_v4.max_response_len = reply_len; io_hdr_v4.request_len = request_len; io_hdr_v4.request = (__u64)request_upiu; + if (dir == SG_DXFER_TO_DEV) { + io_hdr_v4.dout_xfer_len = (uint32_t)byte_cnt; + io_hdr_v4.dout_xferp = (uintptr_t)(__u64)buff; + } else { + io_hdr_v4.din_xfer_len = (uint32_t)byte_cnt; + io_hdr_v4.din_xferp = (uintptr_t)(__u64)buff; + } + +If you wish to read or write a descriptor, use the appropriate xferp of +sg_io_v4. + UFS Specifications can be found at, UFS - http://www.jedec.org/sites/default/files/docs/JESD220.pdf diff --git a/Documentation/security/LSM.rst b/Documentation/security/LSM.rst index 8b9ee597e9d0..31d92bc5fdd2 100644 --- a/Documentation/security/LSM.rst +++ b/Documentation/security/LSM.rst @@ -11,4 +11,7 @@ that end users and distros can make a more informed decision about which LSMs suit their requirements. For extensive documentation on the available LSM hook interfaces, please -see ``include/linux/lsm_hooks.h``. +see ``include/linux/lsm_hooks.h`` and associated structures: + +.. kernel-doc:: include/linux/lsm_hooks.h + :internal: diff --git a/Documentation/security/LSM-sctp.rst b/Documentation/security/SCTP.rst similarity index 52% rename from Documentation/security/LSM-sctp.rst rename to Documentation/security/SCTP.rst index 6e5a3925a860..d903eb97fcf3 100644 --- a/Documentation/security/LSM-sctp.rst +++ b/Documentation/security/SCTP.rst @@ -1,6 +1,15 @@ +.. SPDX-License-Identifier: GPL-2.0 + +==== +SCTP +==== + SCTP LSM Support ================ +Security Hooks +-------------- + For security module support, three SCTP specific hooks have been implemented:: security_sctp_assoc_request() @@ -12,11 +21,11 @@ Also the following security hook has been utilised:: security_inet_conn_established() The usage of these hooks are described below with the SELinux implementation -described in ``Documentation/security/SELinux-sctp.rst`` +described in the `SCTP SELinux Support`_ chapter. security_sctp_assoc_request() ------------------------------ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Passes the ``@ep`` and ``@chunk->skb`` of the association INIT packet to the security module. Returns 0 on success, error on failure. :: @@ -26,7 +35,7 @@ security module. Returns 0 on success, error on failure. security_sctp_bind_connect() ------------------------------ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Passes one or more ipv4/ipv6 addresses to the security module for validation based on the ``@optname`` that will result in either a bind or connect service as shown in the permission check tables below. @@ -102,7 +111,7 @@ ASCONF chunk when the corresponding ``@optname``'s are present:: security_sctp_sk_clone() -------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~ Called whenever a new socket is created by **accept**\(2) (i.e. a TCP style socket) or when a socket is 'peeled off' e.g userspace calls **sctp_peeloff**\(3). @@ -114,7 +123,7 @@ calls **sctp_peeloff**\(3). security_inet_conn_established() ---------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Called when a COOKIE ACK is received:: @sk - pointer to sock structure. @@ -122,7 +131,8 @@ Called when a COOKIE ACK is received:: Security Hooks used for Association Establishment -================================================= +------------------------------------------------- + The following diagram shows the use of ``security_sctp_bind_connect()``, ``security_sctp_assoc_request()``, ``security_inet_conn_established()`` when establishing an association. @@ -173,3 +183,161 @@ establishing an association. ------------------------------------------------------------------ +SCTP SELinux Support +==================== + +Security Hooks +-------------- + +The `SCTP LSM Support`_ chapter above describes the following SCTP security +hooks with the SELinux specifics expanded below:: + + security_sctp_assoc_request() + security_sctp_bind_connect() + security_sctp_sk_clone() + security_inet_conn_established() + + +security_sctp_assoc_request() +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Passes the ``@ep`` and ``@chunk->skb`` of the association INIT packet to the +security module. Returns 0 on success, error on failure. +:: + + @ep - pointer to sctp endpoint structure. + @skb - pointer to skbuff of association packet. + +The security module performs the following operations: + IF this is the first association on ``@ep->base.sk``, then set the peer + sid to that in ``@skb``. This will ensure there is only one peer sid + assigned to ``@ep->base.sk`` that may support multiple associations. + + ELSE validate the ``@ep->base.sk peer_sid`` against the ``@skb peer sid`` + to determine whether the association should be allowed or denied. + + Set the sctp ``@ep sid`` to socket's sid (from ``ep->base.sk``) with + MLS portion taken from ``@skb peer sid``. This will be used by SCTP + TCP style sockets and peeled off connections as they cause a new socket + to be generated. + + If IP security options are configured (CIPSO/CALIPSO), then the ip + options are set on the socket. + + +security_sctp_bind_connect() +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Checks permissions required for ipv4/ipv6 addresses based on the ``@optname`` +as follows:: + + ------------------------------------------------------------------ + | BIND Permission Checks | + | @optname | @address contains | + |----------------------------|-----------------------------------| + | SCTP_SOCKOPT_BINDX_ADD | One or more ipv4 / ipv6 addresses | + | SCTP_PRIMARY_ADDR | Single ipv4 or ipv6 address | + | SCTP_SET_PEER_PRIMARY_ADDR | Single ipv4 or ipv6 address | + ------------------------------------------------------------------ + + ------------------------------------------------------------------ + | CONNECT Permission Checks | + | @optname | @address contains | + |----------------------------|-----------------------------------| + | SCTP_SOCKOPT_CONNECTX | One or more ipv4 / ipv6 addresses | + | SCTP_PARAM_ADD_IP | One or more ipv4 / ipv6 addresses | + | SCTP_SENDMSG_CONNECT | Single ipv4 or ipv6 address | + | SCTP_PARAM_SET_PRIMARY | Single ipv4 or ipv6 address | + ------------------------------------------------------------------ + + +`SCTP LSM Support`_ gives a summary of the ``@optname`` +entries and also describes ASCONF chunk processing when Dynamic Address +Reconfiguration is enabled. + + +security_sctp_sk_clone() +~~~~~~~~~~~~~~~~~~~~~~~~ +Called whenever a new socket is created by **accept**\(2) (i.e. a TCP style +socket) or when a socket is 'peeled off' e.g userspace calls +**sctp_peeloff**\(3). ``security_sctp_sk_clone()`` will set the new +sockets sid and peer sid to that contained in the ``@ep sid`` and +``@ep peer sid`` respectively. +:: + + @ep - pointer to current sctp endpoint structure. + @sk - pointer to current sock structure. + @sk - pointer to new sock structure. + + +security_inet_conn_established() +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Called when a COOKIE ACK is received where it sets the connection's peer sid +to that in ``@skb``:: + + @sk - pointer to sock structure. + @skb - pointer to skbuff of the COOKIE ACK packet. + + +Policy Statements +----------------- +The following class and permissions to support SCTP are available within the +kernel:: + + class sctp_socket inherits socket { node_bind } + +whenever the following policy capability is enabled:: + + policycap extended_socket_class; + +SELinux SCTP support adds the ``name_connect`` permission for connecting +to a specific port type and the ``association`` permission that is explained +in the section below. + +If userspace tools have been updated, SCTP will support the ``portcon`` +statement as shown in the following example:: + + portcon sctp 1024-1036 system_u:object_r:sctp_ports_t:s0 + + +SCTP Peer Labeling +------------------ +An SCTP socket will only have one peer label assigned to it. This will be +assigned during the establishment of the first association. Any further +associations on this socket will have their packet peer label compared to +the sockets peer label, and only if they are different will the +``association`` permission be validated. This is validated by checking the +socket peer sid against the received packets peer sid to determine whether +the association should be allowed or denied. + +NOTES: + 1) If peer labeling is not enabled, then the peer context will always be + ``SECINITSID_UNLABELED`` (``unlabeled_t`` in Reference Policy). + + 2) As SCTP can support more than one transport address per endpoint + (multi-homing) on a single socket, it is possible to configure policy + and NetLabel to provide different peer labels for each of these. As the + socket peer label is determined by the first associations transport + address, it is recommended that all peer labels are consistent. + + 3) **getpeercon**\(3) may be used by userspace to retrieve the sockets peer + context. + + 4) While not SCTP specific, be aware when using NetLabel that if a label + is assigned to a specific interface, and that interface 'goes down', + then the NetLabel service will remove the entry. Therefore ensure that + the network startup scripts call **netlabelctl**\(8) to set the required + label (see **netlabel-config**\(8) helper script for details). + + 5) The NetLabel SCTP peer labeling rules apply as discussed in the following + set of posts tagged "netlabel" at: http://www.paul-moore.com/blog/t. + + 6) CIPSO is only supported for IPv4 addressing: ``socket(AF_INET, ...)`` + CALIPSO is only supported for IPv6 addressing: ``socket(AF_INET6, ...)`` + + Note the following when testing CIPSO/CALIPSO: + a) CIPSO will send an ICMP packet if an SCTP packet cannot be + delivered because of an invalid label. + b) CALIPSO does not send an ICMP packet, just silently discards it. + + 7) IPSEC is not supported as RFC 3554 - sctp/ipsec support has not been + implemented in userspace (**racoon**\(8) or **ipsec_pluto**\(8)), + although the kernel supports SCTP/IPSEC. diff --git a/Documentation/security/SELinux-sctp.rst b/Documentation/security/SELinux-sctp.rst deleted file mode 100644 index a332cb1c5334..000000000000 --- a/Documentation/security/SELinux-sctp.rst +++ /dev/null @@ -1,158 +0,0 @@ -SCTP SELinux Support -===================== - -Security Hooks -=============== - -``Documentation/security/LSM-sctp.rst`` describes the following SCTP security -hooks with the SELinux specifics expanded below:: - - security_sctp_assoc_request() - security_sctp_bind_connect() - security_sctp_sk_clone() - security_inet_conn_established() - - -security_sctp_assoc_request() ------------------------------ -Passes the ``@ep`` and ``@chunk->skb`` of the association INIT packet to the -security module. Returns 0 on success, error on failure. -:: - - @ep - pointer to sctp endpoint structure. - @skb - pointer to skbuff of association packet. - -The security module performs the following operations: - IF this is the first association on ``@ep->base.sk``, then set the peer - sid to that in ``@skb``. This will ensure there is only one peer sid - assigned to ``@ep->base.sk`` that may support multiple associations. - - ELSE validate the ``@ep->base.sk peer_sid`` against the ``@skb peer sid`` - to determine whether the association should be allowed or denied. - - Set the sctp ``@ep sid`` to socket's sid (from ``ep->base.sk``) with - MLS portion taken from ``@skb peer sid``. This will be used by SCTP - TCP style sockets and peeled off connections as they cause a new socket - to be generated. - - If IP security options are configured (CIPSO/CALIPSO), then the ip - options are set on the socket. - - -security_sctp_bind_connect() ------------------------------ -Checks permissions required for ipv4/ipv6 addresses based on the ``@optname`` -as follows:: - - ------------------------------------------------------------------ - | BIND Permission Checks | - | @optname | @address contains | - |----------------------------|-----------------------------------| - | SCTP_SOCKOPT_BINDX_ADD | One or more ipv4 / ipv6 addresses | - | SCTP_PRIMARY_ADDR | Single ipv4 or ipv6 address | - | SCTP_SET_PEER_PRIMARY_ADDR | Single ipv4 or ipv6 address | - ------------------------------------------------------------------ - - ------------------------------------------------------------------ - | CONNECT Permission Checks | - | @optname | @address contains | - |----------------------------|-----------------------------------| - | SCTP_SOCKOPT_CONNECTX | One or more ipv4 / ipv6 addresses | - | SCTP_PARAM_ADD_IP | One or more ipv4 / ipv6 addresses | - | SCTP_SENDMSG_CONNECT | Single ipv4 or ipv6 address | - | SCTP_PARAM_SET_PRIMARY | Single ipv4 or ipv6 address | - ------------------------------------------------------------------ - - -``Documentation/security/LSM-sctp.rst`` gives a summary of the ``@optname`` -entries and also describes ASCONF chunk processing when Dynamic Address -Reconfiguration is enabled. - - -security_sctp_sk_clone() -------------------------- -Called whenever a new socket is created by **accept**\(2) (i.e. a TCP style -socket) or when a socket is 'peeled off' e.g userspace calls -**sctp_peeloff**\(3). ``security_sctp_sk_clone()`` will set the new -sockets sid and peer sid to that contained in the ``@ep sid`` and -``@ep peer sid`` respectively. -:: - - @ep - pointer to current sctp endpoint structure. - @sk - pointer to current sock structure. - @sk - pointer to new sock structure. - - -security_inet_conn_established() ---------------------------------- -Called when a COOKIE ACK is received where it sets the connection's peer sid -to that in ``@skb``:: - - @sk - pointer to sock structure. - @skb - pointer to skbuff of the COOKIE ACK packet. - - -Policy Statements -================== -The following class and permissions to support SCTP are available within the -kernel:: - - class sctp_socket inherits socket { node_bind } - -whenever the following policy capability is enabled:: - - policycap extended_socket_class; - -SELinux SCTP support adds the ``name_connect`` permission for connecting -to a specific port type and the ``association`` permission that is explained -in the section below. - -If userspace tools have been updated, SCTP will support the ``portcon`` -statement as shown in the following example:: - - portcon sctp 1024-1036 system_u:object_r:sctp_ports_t:s0 - - -SCTP Peer Labeling -=================== -An SCTP socket will only have one peer label assigned to it. This will be -assigned during the establishment of the first association. Any further -associations on this socket will have their packet peer label compared to -the sockets peer label, and only if they are different will the -``association`` permission be validated. This is validated by checking the -socket peer sid against the received packets peer sid to determine whether -the association should be allowed or denied. - -NOTES: - 1) If peer labeling is not enabled, then the peer context will always be - ``SECINITSID_UNLABELED`` (``unlabeled_t`` in Reference Policy). - - 2) As SCTP can support more than one transport address per endpoint - (multi-homing) on a single socket, it is possible to configure policy - and NetLabel to provide different peer labels for each of these. As the - socket peer label is determined by the first associations transport - address, it is recommended that all peer labels are consistent. - - 3) **getpeercon**\(3) may be used by userspace to retrieve the sockets peer - context. - - 4) While not SCTP specific, be aware when using NetLabel that if a label - is assigned to a specific interface, and that interface 'goes down', - then the NetLabel service will remove the entry. Therefore ensure that - the network startup scripts call **netlabelctl**\(8) to set the required - label (see **netlabel-config**\(8) helper script for details). - - 5) The NetLabel SCTP peer labeling rules apply as discussed in the following - set of posts tagged "netlabel" at: http://www.paul-moore.com/blog/t. - - 6) CIPSO is only supported for IPv4 addressing: ``socket(AF_INET, ...)`` - CALIPSO is only supported for IPv6 addressing: ``socket(AF_INET6, ...)`` - - Note the following when testing CIPSO/CALIPSO: - a) CIPSO will send an ICMP packet if an SCTP packet cannot be - delivered because of an invalid label. - b) CALIPSO does not send an ICMP packet, just silently discards it. - - 7) IPSEC is not supported as RFC 3554 - sctp/ipsec support has not been - implemented in userspace (**racoon**\(8) or **ipsec_pluto**\(8)), - although the kernel supports SCTP/IPSEC. diff --git a/Documentation/security/index.rst b/Documentation/security/index.rst index 85492bfca530..aad6d92ffe31 100644 --- a/Documentation/security/index.rst +++ b/Documentation/security/index.rst @@ -9,7 +9,6 @@ Security Documentation IMA-templates keys/index LSM - LSM-sctp - SELinux-sctp + SCTP self-protection tpm/index diff --git a/Documentation/sound/hd-audio/models.rst b/Documentation/sound/hd-audio/models.rst index 368a07a165f5..7d7c191102a7 100644 --- a/Documentation/sound/hd-audio/models.rst +++ b/Documentation/sound/hd-audio/models.rst @@ -254,10 +254,12 @@ alc274-dell-aio ALC274 fixups on Dell AIO machines alc255-dummy-lineout Dell Precision 3930 fixups -alc255-dell-headset"}, +alc255-dell-headset Dell Precision 3630 fixups alc295-hp-x360 HP Spectre X360 fixups +alc-sense-combo + Headset button support for Chrome platform ALC66x/67x/892 ============== diff --git a/Documentation/sound/kernel-api/writing-an-alsa-driver.rst b/Documentation/sound/kernel-api/writing-an-alsa-driver.rst index b37234afdfa1..6b154dbb02cc 100644 --- a/Documentation/sound/kernel-api/writing-an-alsa-driver.rst +++ b/Documentation/sound/kernel-api/writing-an-alsa-driver.rst @@ -3520,14 +3520,14 @@ allocator will try to get an area as large as possible within the given size. The second argument (type) and the third argument (device pointer) are -dependent on the bus. In the case of the ISA bus, pass -:c:func:`snd_dma_isa_data()` as the third argument with +dependent on the bus. For normal devices, pass the device pointer +(typically identical as ``card->dev``) to the third argument with ``SNDRV_DMA_TYPE_DEV`` type. For the continuous buffer unrelated to the bus can be pre-allocated with ``SNDRV_DMA_TYPE_CONTINUOUS`` type and the ``snd_dma_continuous_data(GFP_KERNEL)`` device pointer, where -``GFP_KERNEL`` is the kernel allocation flag to use. For the PCI -scatter-gather buffers, use ``SNDRV_DMA_TYPE_DEV_SG`` with -``snd_dma_pci_data(pci)`` (see the `Non-Contiguous Buffers`_ +``GFP_KERNEL`` is the kernel allocation flag to use. For the +scatter-gather buffers, use ``SNDRV_DMA_TYPE_DEV_SG`` with the device +pointer (see the `Non-Contiguous Buffers`_ section). Once the buffer is pre-allocated, you can use the allocator in the @@ -3924,15 +3924,12 @@ The scheme of the real suspend job is as follows. 2. Call :c:func:`snd_power_change_state()` with ``SNDRV_CTL_POWER_D3hot`` to change the power status. -3. Call :c:func:`snd_pcm_suspend_all()` to suspend the running - PCM streams. - -4. If AC97 codecs are used, call :c:func:`snd_ac97_suspend()` for +3. If AC97 codecs are used, call :c:func:`snd_ac97_suspend()` for each codec. -5. Save the register values if necessary. +4. Save the register values if necessary. -6. Stop the hardware if necessary. +5. Stop the hardware if necessary. A typical code would be like: @@ -3946,12 +3943,10 @@ A typical code would be like: /* (2) */ snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); /* (3) */ - snd_pcm_suspend_all(chip->pcm); - /* (4) */ snd_ac97_suspend(chip->ac97); - /* (5) */ + /* (4) */ snd_mychip_save_registers(chip); - /* (6) */ + /* (5) */ snd_mychip_stop_hardware(chip); return 0; } @@ -3994,13 +3989,9 @@ A typical code would be like: return 0; } -As shown in the above, it's better to save registers after suspending -the PCM operations via :c:func:`snd_pcm_suspend_all()` or -:c:func:`snd_pcm_suspend()`. It means that the PCM streams are -already stopped when the register snapshot is taken. But, remember that -you don't have to restart the PCM stream in the resume callback. It'll -be restarted via trigger call with ``SNDRV_PCM_TRIGGER_RESUME`` when -necessary. +Note that, at the time this callback gets called, the PCM stream has +been already suspended via its own PM ops calling +:c:func:`snd_pcm_suspend_all()` internally. OK, we have all callbacks now. Let's set them up. In the initialization of the card, make sure that you can get the chip data from the card diff --git a/Documentation/sound/soc/dpcm.rst b/Documentation/sound/soc/dpcm.rst index f6845b2278ea..77f67ded53de 100644 --- a/Documentation/sound/soc/dpcm.rst +++ b/Documentation/sound/soc/dpcm.rst @@ -13,7 +13,7 @@ drivers that expose several ALSA PCMs and can route to multiple DAIs. The DPCM runtime routing is determined by the ALSA mixer settings in the same way as the analog signal is routed in an ASoC codec driver. DPCM uses a DAPM graph representing the DSP internal audio paths and uses the mixer settings to -determine the patch used by each ALSA PCM. +determine the path used by each ALSA PCM. DPCM re-uses all the existing component codec, platform and DAI drivers without any modifications. @@ -101,7 +101,7 @@ The audio driver processes this as follows :- 4. Machine driver or audio HAL enables the speaker path. -5. DPCM runs the PCM ops for startup(), hw_params(), prepapre() and +5. DPCM runs the PCM ops for startup(), hw_params(), prepare() and trigger(start) for DAI1 Speakers since the path is enabled. In this example, the machine driver or userspace audio HAL can alter the routing @@ -221,7 +221,7 @@ like a BT phone call :- This allows the host CPU to sleep while the DSP, MODEM DAI and the BT DAI are still in operation. -A BE DAI link can also set the codec to a dummy device if the code is a device +A BE DAI link can also set the codec to a dummy device if the codec is a device that is managed externally. Likewise a BE DAI can also set a dummy cpu DAI if the CPU DAI is managed by the @@ -249,7 +249,7 @@ configuration. struct snd_interval *channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); - /* The DSP will covert the FE rate to 48k, stereo */ + /* The DSP will convert the FE rate to 48k, stereo */ rate->min = rate->max = 48000; channels->min = channels->max = 2; @@ -386,5 +386,3 @@ This means creating a new FE that is connected with a virtual path to both DAI links. The DAI links will be started when the FE PCM is started and stopped when the FE PCM is stopped. Note that the FE PCM cannot read or write data in this configuration. - - diff --git a/Documentation/spi/pxa2xx b/Documentation/spi/pxa2xx index 13a0b7fb192f..551325b66b23 100644 --- a/Documentation/spi/pxa2xx +++ b/Documentation/spi/pxa2xx @@ -21,15 +21,15 @@ Typically a SPI master is defined in the arch/.../mach-*/board-*.c as a "platform device". The master configuration is passed to the driver via a table found in include/linux/spi/pxa2xx_spi.h: -struct pxa2xx_spi_master { +struct pxa2xx_spi_controller { u16 num_chipselect; u8 enable_dma; }; -The "pxa2xx_spi_master.num_chipselect" field is used to determine the number of +The "pxa2xx_spi_controller.num_chipselect" field is used to determine the number of slave device (chips) attached to this SPI master. -The "pxa2xx_spi_master.enable_dma" field informs the driver that SSP DMA should +The "pxa2xx_spi_controller.enable_dma" field informs the driver that SSP DMA should be used. This caused the driver to acquire two DMA channels: rx_channel and tx_channel. The rx_channel has a higher DMA service priority the tx_channel. See the "PXA2xx Developer Manual" section "DMA Controller". @@ -51,7 +51,7 @@ static struct resource pxa_spi_nssp_resources[] = { }, }; -static struct pxa2xx_spi_master pxa_nssp_master_info = { +static struct pxa2xx_spi_controller pxa_nssp_master_info = { .num_chipselect = 1, /* Matches the number of chips attached to NSSP */ .enable_dma = 1, /* Enables NSSP DMA */ }; @@ -206,7 +206,7 @@ DMA and PIO I/O Support ----------------------- The pxa2xx_spi driver supports both DMA and interrupt driven PIO message transfers. The driver defaults to PIO mode and DMA transfers must be enabled -by setting the "enable_dma" flag in the "pxa2xx_spi_master" structure. The DMA +by setting the "enable_dma" flag in the "pxa2xx_spi_controller" structure. The DMA mode supports both coherent and stream based DMA mappings. The following logic is used to determine the type of I/O to be used on diff --git a/Documentation/static-keys.txt b/Documentation/static-keys.txt index d68135560895..9803e14639bf 100644 --- a/Documentation/static-keys.txt +++ b/Documentation/static-keys.txt @@ -159,7 +159,7 @@ particularly the CPU hotplug lock (in order to avoid races against CPUs being brought in the kernel while the kernel is getting patched). Calling the static key API from within a hotplug notifier is thus a sure deadlock recipe. In order to still allow use of the -functionnality, the following functions are provided: +functionality, the following functions are provided: static_key_enable_cpuslocked() static_key_disable_cpuslocked() diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt index 819caf8ca05f..ebc679bcb2dc 100644 --- a/Documentation/sysctl/fs.txt +++ b/Documentation/sysctl/fs.txt @@ -56,26 +56,34 @@ of any kernel data structures. dentry-state: -From linux/fs/dentry.c: +From linux/include/linux/dcache.h: -------------------------------------------------------------- -struct { +struct dentry_stat_t dentry_stat { int nr_dentry; int nr_unused; int age_limit; /* age in seconds */ int want_pages; /* pages requested by system */ - int dummy[2]; -} dentry_stat = {0, 0, 45, 0,}; --------------------------------------------------------------- - -Dentries are dynamically allocated and deallocated, and -nr_dentry seems to be 0 all the time. Hence it's safe to -assume that only nr_unused, age_limit and want_pages are -used. Nr_unused seems to be exactly what its name says. + int nr_negative; /* # of unused negative dentries */ + int dummy; /* Reserved for future use */ +}; +-------------------------------------------------------------- + +Dentries are dynamically allocated and deallocated. + +nr_dentry shows the total number of dentries allocated (active ++ unused). nr_unused shows the number of dentries that are not +actively used, but are saved in the LRU list for future reuse. + Age_limit is the age in seconds after which dcache entries can be reclaimed when memory is short and want_pages is nonzero when shrink_dcache_pages() has been called and the dcache isn't pruned yet. +nr_negative shows the number of unused dentries that are also +negative dentries which do not map to any files. Instead, +they help speeding up rejection of non-existing files provided +by the users. + ============================================================== dquot-max & dquot-nr: diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt index c0527d8a468a..aa058aa7bf28 100644 --- a/Documentation/sysctl/kernel.txt +++ b/Documentation/sysctl/kernel.txt @@ -79,6 +79,7 @@ show up in /proc/sys/kernel: - reboot-cmd [ SPARC only ] - rtsig-max - rtsig-nr +- sched_energy_aware - seccomp/ ==> Documentation/userspace-api/seccomp_filter.rst - sem - sem_next_id [ sysv ipc ] @@ -94,7 +95,7 @@ show up in /proc/sys/kernel: - stop-a [ SPARC only ] - sysrq ==> Documentation/admin-guide/sysrq.rst - sysctl_writes_strict -- tainted +- tainted ==> Documentation/admin-guide/tainted-kernels.rst - threads-max - unknown_nmi_panic - watchdog @@ -890,6 +891,17 @@ rtsig-nr shows the number of RT signals currently queued. ============================================================== +sched_energy_aware: + +Enables/disables Energy Aware Scheduling (EAS). EAS starts +automatically on platforms where it can run (that is, +platforms with asymmetric CPU topologies and having an Energy +Model available). If your platform happens to meet the +requirements for EAS but you do not want to use it, change +this value to 0. + +============================================================== + sched_schedstats: Enables/disables scheduler statistics. Enabling this feature @@ -1019,39 +1031,33 @@ compilation sees a 1% slowdown, other systems and workloads may vary. 1: kernel stack erasing is enabled (default), it is performed before returning to the userspace at the end of syscalls. - ============================================================== -tainted: +tainted Non-zero if the kernel has been tainted. Numeric values, which can be ORed together. The letters are seen in "Tainted" line of Oops reports. - 1 (P): A module with a non-GPL license has been loaded, this - includes modules with no license. - Set by modutils >= 2.4.9 and module-init-tools. - 2 (F): A module was force loaded by insmod -f. - Set by modutils >= 2.4.9 and module-init-tools. - 4 (S): Unsafe SMP processors: SMP with CPUs not designed for SMP. - 8 (R): A module was forcibly unloaded from the system by rmmod -f. - 16 (M): A hardware machine check error occurred on the system. - 32 (B): A bad page was discovered on the system. - 64 (U): The user has asked that the system be marked "tainted". This - could be because they are running software that directly modifies - the hardware, or for other reasons. - 128 (D): The system has died. - 256 (A): The ACPI DSDT has been overridden with one supplied by the user - instead of using the one provided by the hardware. - 512 (W): A kernel warning has occurred. - 1024 (C): A module from drivers/staging was loaded. - 2048 (I): The system is working around a severe firmware bug. - 4096 (O): An out-of-tree module has been loaded. - 8192 (E): An unsigned module has been loaded in a kernel supporting module - signature. - 16384 (L): A soft lockup has previously occurred on the system. - 32768 (K): The kernel has been live patched. - 65536 (X): Auxiliary taint, defined and used by for distros. -131072 (T): The kernel was built with the struct randomization plugin. + 1 (P): proprietary module was loaded + 2 (F): module was force loaded + 4 (S): SMP kernel oops on an officially SMP incapable processor + 8 (R): module was force unloaded + 16 (M): processor reported a Machine Check Exception (MCE) + 32 (B): bad page referenced or some unexpected page flags + 64 (U): taint requested by userspace application + 128 (D): kernel died recently, i.e. there was an OOPS or BUG + 256 (A): an ACPI table was overridden by user + 512 (W): kernel issued warning + 1024 (C): staging driver was loaded + 2048 (I): workaround for bug in platform firmware applied + 4096 (O): externally-built ("out-of-tree") module was loaded + 8192 (E): unsigned module was loaded + 16384 (L): soft lockup occurred + 32768 (K): kernel has been live patched + 65536 (X): Auxiliary taint, defined and used by for distros +131072 (T): The kernel was built with the struct randomization plugin + +See Documentation/admin-guide/tainted-kernels.rst for more information. ============================================================== diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt index 2793d4eac55f..2ae91d3873bb 100644 --- a/Documentation/sysctl/net.txt +++ b/Documentation/sysctl/net.txt @@ -52,6 +52,7 @@ two flavors of JITs, the newer eBPF JIT currently supported on: - sparc64 - mips64 - s390x + - riscv And the older cBPF JIT supported on the following archs: - mips @@ -291,6 +292,20 @@ user space is responsible for creating them if needed. Default : 0 (for compatibility reasons) +devconf_inherit_init_net +---------------------------- + +Controls if a new network namespace should inherit all current +settings under /proc/sys/net/{ipv4,ipv6}/conf/{all,default}/. By +default, we keep the current behavior: for IPv4 we inherit all current +settings from init_net and for IPv6 we reset all settings to default. + +If set to 1, both IPv4 and IPv6 settings are forced to inherit from +current ones in init_net. If set to 2, both IPv4 and IPv6 settings are +forced to reset to their default values. + +Default : 0 (for compatibility reasons) + 2. /proc/sys/net/unix - Parameters for Unix domain sockets ------------------------------------------------------- diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt index 187ce4f599a2..6af24cdb25cc 100644 --- a/Documentation/sysctl/vm.txt +++ b/Documentation/sysctl/vm.txt @@ -237,7 +237,7 @@ used: cat (1234): drop_caches: 3 These are informational only. They do not mean that anything is wrong -with your system. To disable them, echo 4 (bit 3) into drop_caches. +with your system. To disable them, echo 4 (bit 2) into drop_caches. ============================================================== diff --git a/Documentation/target/tcm_mod_builder.py b/Documentation/target/tcm_mod_builder.py index 94bf6944bb1e..95d6e31f1e3a 100755 --- a/Documentation/target/tcm_mod_builder.py +++ b/Documentation/target/tcm_mod_builder.py @@ -297,7 +297,6 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name): buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n" buf += " .sess_get_initiator_sid = NULL,\n" buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n" - buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n" buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n" buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n" buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n" @@ -479,13 +478,6 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name): buf += "}\n\n" bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n" - if re.search('write_pending_status\)\(', fo): - buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n" - buf += "{\n" - buf += " return 0;\n" - buf += "}\n\n" - bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n" - if re.search('set_default_node_attributes\)\(', fo): buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n" buf += "{\n" diff --git a/Documentation/timers/highres.txt b/Documentation/timers/highres.txt index 9d88f67781c2..8f9741592123 100644 --- a/Documentation/timers/highres.txt +++ b/Documentation/timers/highres.txt @@ -231,7 +231,7 @@ in the idle period to make sure that jiffies are up to date and the interrupt handler has not to deal with an eventually stale jiffy value. The dynamic tick feature provides statistical values which are exported to -userspace via /proc/stats and can be made available for enhanced power +userspace via /proc/stat and can be made available for enhanced power management control. The implementation leaves room for further development like full tickless diff --git a/Documentation/trace/ftrace.rst b/Documentation/trace/ftrace.rst index 0131df7f5968..7c5e6d6ab5d1 100644 --- a/Documentation/trace/ftrace.rst +++ b/Documentation/trace/ftrace.rst @@ -233,6 +233,12 @@ of ftrace. Here is a list of some of the key files: This interface also allows for commands to be used. See the "Filter commands" section for more details. + As a speed up, since processing strings can't be quite expensive + and requires a check of all functions registered to tracing, instead + an index can be written into this file. A number (starting with "1") + written will instead select the same corresponding at the line position + of the "available_filter_functions" file. + set_ftrace_notrace: This has an effect opposite to that of @@ -1396,6 +1402,57 @@ enabling function tracing, we incur an added overhead. This overhead may extend the latency times. But nevertheless, this trace has provided some very helpful debugging information. +If we prefer function graph output instead of function, we can set +display-graph option:: + with echo 1 > options/display-graph + + # tracer: irqsoff + # + # irqsoff latency trace v1.1.5 on 4.20.0-rc6+ + # -------------------------------------------------------------------- + # latency: 3751 us, #274/274, CPU#0 | (M:desktop VP:0, KP:0, SP:0 HP:0 #P:4) + # ----------------- + # | task: bash-1507 (uid:0 nice:0 policy:0 rt_prio:0) + # ----------------- + # => started at: free_debug_processing + # => ended at: return_to_handler + # + # + # _-----=> irqs-off + # / _----=> need-resched + # | / _---=> hardirq/softirq + # || / _--=> preempt-depth + # ||| / + # REL TIME CPU TASK/PID |||| DURATION FUNCTION CALLS + # | | | | |||| | | | | | | + 0 us | 0) bash-1507 | d... | 0.000 us | _raw_spin_lock_irqsave(); + 0 us | 0) bash-1507 | d..1 | 0.378 us | do_raw_spin_trylock(); + 1 us | 0) bash-1507 | d..2 | | set_track() { + 2 us | 0) bash-1507 | d..2 | | save_stack_trace() { + 2 us | 0) bash-1507 | d..2 | | __save_stack_trace() { + 3 us | 0) bash-1507 | d..2 | | __unwind_start() { + 3 us | 0) bash-1507 | d..2 | | get_stack_info() { + 3 us | 0) bash-1507 | d..2 | 0.351 us | in_task_stack(); + 4 us | 0) bash-1507 | d..2 | 1.107 us | } + [...] + 3750 us | 0) bash-1507 | d..1 | 0.516 us | do_raw_spin_unlock(); + 3750 us | 0) bash-1507 | d..1 | 0.000 us | _raw_spin_unlock_irqrestore(); + 3764 us | 0) bash-1507 | d..1 | 0.000 us | tracer_hardirqs_on(); + bash-1507 0d..1 3792us : + => free_debug_processing + => __slab_free + => kmem_cache_free + => vm_area_free + => remove_vma + => exit_mmap + => mmput + => flush_old_exec + => load_elf_binary + => search_binary_handler + => __do_execve_file.isra.32 + => __x64_sys_execve + => do_syscall_64 + => entry_SYSCALL_64_after_hwframe preemptoff ---------- @@ -2784,6 +2841,38 @@ Produces:: We can see that there's no more lock or preempt tracing. +Selecting function filters via index +------------------------------------ + +Because processing of strings is expensive (the address of the function +needs to be looked up before comparing to the string being passed in), +an index can be used as well to enable functions. This is useful in the +case of setting thousands of specific functions at a time. By passing +in a list of numbers, no string processing will occur. Instead, the function +at the specific location in the internal array (which corresponds to the +functions in the "available_filter_functions" file), is selected. + +:: + + # echo 1 > set_ftrace_filter + +Will select the first function listed in "available_filter_functions" + +:: + + # head -1 available_filter_functions + trace_initcall_finish_cb + + # cat set_ftrace_filter + trace_initcall_finish_cb + + # head -50 available_filter_functions | tail -1 + x86_pmu_commit_txn + + # echo 1 50 > set_ftrace_filter + # cat set_ftrace_filter + trace_initcall_finish_cb + x86_pmu_commit_txn Dynamic ftrace with the function graph tracer --------------------------------------------- diff --git a/Documentation/trace/histogram.rst b/Documentation/trace/histogram.rst index 7dda76503127..0ea59d45aef1 100644 --- a/Documentation/trace/histogram.rst +++ b/Documentation/trace/histogram.rst @@ -25,7 +25,7 @@ Documentation written by Tom Zanussi hist:keys=[:values=] [:sort=][:size=#entries][:pause][:continue] - [:clear][:name=histname1] [if ] + [:clear][:name=histname1][:.] [if ] When a matching event is hit, an entry is added to a hash table using the key(s) and value(s) named. Keys and values correspond to @@ -1831,41 +1831,87 @@ and looks and behaves just like any other event:: Like any other event, once a histogram is enabled for the event, the output can be displayed by reading the event's 'hist' file. -2.2.3 Hist trigger 'actions' ----------------------------- +2.2.3 Hist trigger 'handlers' and 'actions' +------------------------------------------- -A hist trigger 'action' is a function that's executed whenever a -histogram entry is added or updated. +A hist trigger 'action' is a function that's executed (in most cases +conditionally) whenever a histogram entry is added or updated. -The default 'action' if no special function is explicitly specified is -as it always has been, to simply update the set of values associated -with an entry. Some applications, however, may want to perform -additional actions at that point, such as generate another event, or -compare and save a maximum. +When a histogram entry is added or updated, a hist trigger 'handler' +is what decides whether the corresponding action is actually invoked +or not. -The following additional actions are available. To specify an action -for a given event, simply specify the action between colons in the -hist trigger specification. +Hist trigger handlers and actions are paired together in the general +form: - - onmatch(matching.event).(param list) + . - The 'onmatch(matching.event).(params)' hist - trigger action is invoked whenever an event matches and the - histogram entry would be added or updated. It causes the named - synthetic event to be generated with the values given in the +To specify a handler.action pair for a given event, simply specify +that handler.action pair between colons in the hist trigger +specification. + +In theory, any handler can be combined with any action, but in +practice, not every handler.action combination is currently supported; +if a given handler.action combination isn't supported, the hist +trigger will fail with -EINVAL; + +The default 'handler.action' if none is explicity specified is as it +always has been, to simply update the set of values associated with an +entry. Some applications, however, may want to perform additional +actions at that point, such as generate another event, or compare and +save a maximum. + +The supported handlers and actions are listed below, and each is +described in more detail in the following paragraphs, in the context +of descriptions of some common and useful handler.action combinations. + +The available handlers are: + + - onmatch(matching.event) - invoke action on any addition or update + - onmax(var) - invoke action if var exceeds current max + - onchange(var) - invoke action if var changes + +The available actions are: + + - trace(,param list) - generate synthetic event + - save(field,...) - save current event fields + - snapshot() - snapshot the trace buffer + +The following commonly-used handler.action pairs are available: + + - onmatch(matching.event).trace(,param list) + + The 'onmatch(matching.event).trace(,param + list)' hist trigger action is invoked whenever an event matches + and the histogram entry would be added or updated. It causes the + named synthetic event to be generated with the values given in the 'param list'. The result is the generation of a synthetic event that consists of the values contained in those variables at the - time the invoking event was hit. - - The 'param list' consists of one or more parameters which may be - either variables or fields defined on either the 'matching.event' - or the target event. The variables or fields specified in the - param list may be either fully-qualified or unqualified. If a - variable is specified as unqualified, it must be unique between - the two events. A field name used as a param can be unqualified - if it refers to the target event, but must be fully qualified if - it refers to the matching event. A fully-qualified name is of the - form 'system.event_name.$var_name' or 'system.event_name.field'. + time the invoking event was hit. For example, if the synthetic + event name is 'wakeup_latency', a wakeup_latency event is + generated using onmatch(event).trace(wakeup_latency,arg1,arg2). + + There is also an equivalent alternative form available for + generating synthetic events. In this form, the synthetic event + name is used as if it were a function name. For example, using + the 'wakeup_latency' synthetic event name again, the + wakeup_latency event would be generated by invoking it as if it + were a function call, with the event field values passed in as + arguments: onmatch(event).wakeup_latency(arg1,arg2). The syntax + for this form is: + + onmatch(matching.event).(param list) + + In either case, the 'param list' consists of one or more + parameters which may be either variables or fields defined on + either the 'matching.event' or the target event. The variables or + fields specified in the param list may be either fully-qualified + or unqualified. If a variable is specified as unqualified, it + must be unique between the two events. A field name used as a + param can be unqualified if it refers to the target event, but + must be fully qualified if it refers to the matching event. A + fully-qualified name is of the form 'system.event_name.$var_name' + or 'system.event_name.field'. The 'matching.event' specification is simply the fully qualified event name of the event that matches the target event for the @@ -1896,6 +1942,12 @@ hist trigger specification. wakeup_new_test($testpid) if comm=="cyclictest"' >> \ /sys/kernel/debug/tracing/events/sched/sched_wakeup_new/trigger + Or, equivalently, using the 'trace' keyword syntax: + + # echo 'hist:keys=$testpid:testpid=pid:onmatch(sched.sched_wakeup_new).\ + trace(wakeup_new_test,$testpid) if comm=="cyclictest"' >> \ + /sys/kernel/debug/tracing/events/sched/sched_wakeup_new/trigger + Creating and displaying a histogram based on those events is now just a matter of using the fields and new synthetic event in the tracing/events/synthetic directory, as usual:: @@ -2000,6 +2052,212 @@ hist trigger specification. Entries: 2 Dropped: 0 + - onmax(var).snapshot() + + The 'onmax(var).snapshot()' hist trigger action is invoked + whenever the value of 'var' associated with a histogram entry + exceeds the current maximum contained in that variable. + + The end result is that a global snapshot of the trace buffer will + be saved in the tracing/snapshot file if 'var' exceeds the current + maximum for any hist trigger entry. + + Note that in this case the maximum is a global maximum for the + current trace instance, which is the maximum across all buckets of + the histogram. The key of the specific trace event that caused + the global maximum and the global maximum itself are displayed, + along with a message stating that a snapshot has been taken and + where to find it. The user can use the key information displayed + to locate the corresponding bucket in the histogram for even more + detail. + + As an example the below defines a couple of hist triggers, one for + sched_waking and another for sched_switch, keyed on pid. Whenever + a sched_waking event occurs, the timestamp is saved in the entry + corresponding to the current pid, and when the scheduler switches + back to that pid, the timestamp difference is calculated. If the + resulting latency, stored in wakeup_lat, exceeds the current + maximum latency, a snapshot is taken. As part of the setup, all + the scheduler events are also enabled, which are the events that + will show up in the snapshot when it is taken at some point: + + # echo 1 > /sys/kernel/debug/tracing/events/sched/enable + + # echo 'hist:keys=pid:ts0=common_timestamp.usecs \ + if comm=="cyclictest"' >> \ + /sys/kernel/debug/tracing/events/sched/sched_waking/trigger + + # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0: \ + onmax($wakeup_lat).save(next_prio,next_comm,prev_pid,prev_prio, \ + prev_comm):onmax($wakeup_lat).snapshot() \ + if next_comm=="cyclictest"' >> \ + /sys/kernel/debug/tracing/events/sched/sched_switch/trigger + + When the histogram is displayed, for each bucket the max value + and the saved values corresponding to the max are displayed + following the rest of the fields. + + If a snaphot was taken, there is also a message indicating that, + along with the value and event that triggered the global maximum: + + # cat /sys/kernel/debug/tracing/events/sched/sched_switch/hist + { next_pid: 2101 } hitcount: 200 + max: 52 next_prio: 120 next_comm: cyclictest \ + prev_pid: 0 prev_prio: 120 prev_comm: swapper/6 + + { next_pid: 2103 } hitcount: 1326 + max: 572 next_prio: 19 next_comm: cyclictest \ + prev_pid: 0 prev_prio: 120 prev_comm: swapper/1 + + { next_pid: 2102 } hitcount: 1982 \ + max: 74 next_prio: 19 next_comm: cyclictest \ + prev_pid: 0 prev_prio: 120 prev_comm: swapper/5 + + Snapshot taken (see tracing/snapshot). Details: + triggering value { onmax($wakeup_lat) }: 572 \ + triggered by event with key: { next_pid: 2103 } + + Totals: + Hits: 3508 + Entries: 3 + Dropped: 0 + + In the above case, the event that triggered the global maximum has + the key with next_pid == 2103. If you look at the bucket that has + 2103 as the key, you'll find the additional values save()'d along + with the local maximum for that bucket, which should be the same + as the global maximum (since that was the same value that + triggered the global snapshot). + + And finally, looking at the snapshot data should show at or near + the end the event that triggered the snapshot (in this case you + can verify the timestamps between the sched_waking and + sched_switch events, which should match the time displayed in the + global maximum): + + # cat /sys/kernel/debug/tracing/snapshot + + <...>-2103 [005] d..3 309.873125: sched_switch: prev_comm=cyclictest prev_pid=2103 prev_prio=19 prev_state=D ==> next_comm=swapper/5 next_pid=0 next_prio=120 + -0 [005] d.h3 309.873611: sched_waking: comm=cyclictest pid=2102 prio=19 target_cpu=005 + -0 [005] dNh4 309.873613: sched_wakeup: comm=cyclictest pid=2102 prio=19 target_cpu=005 + -0 [005] d..3 309.873616: sched_switch: prev_comm=swapper/5 prev_pid=0 prev_prio=120 prev_state=S ==> next_comm=cyclictest next_pid=2102 next_prio=19 + <...>-2102 [005] d..3 309.873625: sched_switch: prev_comm=cyclictest prev_pid=2102 prev_prio=19 prev_state=D ==> next_comm=swapper/5 next_pid=0 next_prio=120 + -0 [005] d.h3 309.874624: sched_waking: comm=cyclictest pid=2102 prio=19 target_cpu=005 + -0 [005] dNh4 309.874626: sched_wakeup: comm=cyclictest pid=2102 prio=19 target_cpu=005 + -0 [005] dNh3 309.874628: sched_waking: comm=cyclictest pid=2103 prio=19 target_cpu=005 + -0 [005] dNh4 309.874630: sched_wakeup: comm=cyclictest pid=2103 prio=19 target_cpu=005 + -0 [005] d..3 309.874633: sched_switch: prev_comm=swapper/5 prev_pid=0 prev_prio=120 prev_state=S ==> next_comm=cyclictest next_pid=2102 next_prio=19 + -0 [004] d.h3 309.874757: sched_waking: comm=gnome-terminal- pid=1699 prio=120 target_cpu=004 + -0 [004] dNh4 309.874762: sched_wakeup: comm=gnome-terminal- pid=1699 prio=120 target_cpu=004 + -0 [004] d..3 309.874766: sched_switch: prev_comm=swapper/4 prev_pid=0 prev_prio=120 prev_state=S ==> next_comm=gnome-terminal- next_pid=1699 next_prio=120 + gnome-terminal--1699 [004] d.h2 309.874941: sched_stat_runtime: comm=gnome-terminal- pid=1699 runtime=180706 [ns] vruntime=1126870572 [ns] + -0 [003] d.s4 309.874956: sched_waking: comm=rcu_sched pid=9 prio=120 target_cpu=007 + -0 [003] d.s5 309.874960: sched_wake_idle_without_ipi: cpu=7 + -0 [003] d.s5 309.874961: sched_wakeup: comm=rcu_sched pid=9 prio=120 target_cpu=007 + -0 [007] d..3 309.874963: sched_switch: prev_comm=swapper/7 prev_pid=0 prev_prio=120 prev_state=S ==> next_comm=rcu_sched next_pid=9 next_prio=120 + rcu_sched-9 [007] d..3 309.874973: sched_stat_runtime: comm=rcu_sched pid=9 runtime=13646 [ns] vruntime=22531430286 [ns] + rcu_sched-9 [007] d..3 309.874978: sched_switch: prev_comm=rcu_sched prev_pid=9 prev_prio=120 prev_state=R+ ==> next_comm=swapper/7 next_pid=0 next_prio=120 + <...>-2102 [005] d..4 309.874994: sched_migrate_task: comm=cyclictest pid=2103 prio=19 orig_cpu=5 dest_cpu=1 + <...>-2102 [005] d..4 309.875185: sched_wake_idle_without_ipi: cpu=1 + -0 [001] d..3 309.875200: sched_switch: prev_comm=swapper/1 prev_pid=0 prev_prio=120 prev_state=S ==> next_comm=cyclictest next_pid=2103 next_prio=19 + + - onchange(var).save(field,.. .) + + The 'onchange(var).save(field,...)' hist trigger action is invoked + whenever the value of 'var' associated with a histogram entry + changes. + + The end result is that the trace event fields specified as the + onchange.save() params will be saved if 'var' changes for that + hist trigger entry. This allows context from the event that + changed the value to be saved for later reference. When the + histogram is displayed, additional fields displaying the saved + values will be printed. + + - onchange(var).snapshot() + + The 'onchange(var).snapshot()' hist trigger action is invoked + whenever the value of 'var' associated with a histogram entry + changes. + + The end result is that a global snapshot of the trace buffer will + be saved in the tracing/snapshot file if 'var' changes for any + hist trigger entry. + + Note that in this case the changed value is a global variable + associated withe current trace instance. The key of the specific + trace event that caused the value to change and the global value + itself are displayed, along with a message stating that a snapshot + has been taken and where to find it. The user can use the key + information displayed to locate the corresponding bucket in the + histogram for even more detail. + + As an example the below defines a hist trigger on the tcp_probe + event, keyed on dport. Whenever a tcp_probe event occurs, the + cwnd field is checked against the current value stored in the + $cwnd variable. If the value has changed, a snapshot is taken. + As part of the setup, all the scheduler and tcp events are also + enabled, which are the events that will show up in the snapshot + when it is taken at some point: + + # echo 1 > /sys/kernel/debug/tracing/events/sched/enable + # echo 1 > /sys/kernel/debug/tracing/events/tcp/enable + + # echo 'hist:keys=dport:cwnd=snd_cwnd: \ + onchange($cwnd).save(snd_wnd,srtt,rcv_wnd): \ + onchange($cwnd).snapshot()' >> \ + /sys/kernel/debug/tracing/events/tcp/tcp_probe/trigger + + When the histogram is displayed, for each bucket the tracked value + and the saved values corresponding to that value are displayed + following the rest of the fields. + + If a snaphot was taken, there is also a message indicating that, + along with the value and event that triggered the snapshot: + + # cat /sys/kernel/debug/tracing/events/tcp/tcp_probe/hist + { dport: 1521 } hitcount: 8 + changed: 10 snd_wnd: 35456 srtt: 154262 rcv_wnd: 42112 + + { dport: 80 } hitcount: 23 + changed: 10 snd_wnd: 28960 srtt: 19604 rcv_wnd: 29312 + + { dport: 9001 } hitcount: 172 + changed: 10 snd_wnd: 48384 srtt: 260444 rcv_wnd: 55168 + + { dport: 443 } hitcount: 211 + changed: 10 snd_wnd: 26960 srtt: 17379 rcv_wnd: 28800 + + Snapshot taken (see tracing/snapshot). Details: + triggering value { onchange($cwnd) }: 10 + triggered by event with key: { dport: 80 } + + Totals: + Hits: 414 + Entries: 4 + Dropped: 0 + + In the above case, the event that triggered the snapshot has the + key with dport == 80. If you look at the bucket that has 80 as + the key, you'll find the additional values save()'d along with the + changed value for that bucket, which should be the same as the + global changed value (since that was the same value that triggered + the global snapshot). + + And finally, looking at the snapshot data should show at or near + the end the event that triggered the snapshot: + + # cat /sys/kernel/debug/tracing/snapshot + + gnome-shell-1261 [006] dN.3 49.823113: sched_stat_runtime: comm=gnome-shell pid=1261 runtime=49347 [ns] vruntime=1835730389 [ns] + kworker/u16:4-773 [003] d..3 49.823114: sched_switch: prev_comm=kworker/u16:4 prev_pid=773 prev_prio=120 prev_state=R+ ==> next_comm=kworker/3:2 next_pid=135 next_prio=120 + gnome-shell-1261 [006] d..3 49.823114: sched_switch: prev_comm=gnome-shell prev_pid=1261 prev_prio=120 prev_state=R+ ==> next_comm=kworker/6:2 next_pid=387 next_prio=120 + kworker/3:2-135 [003] d..3 49.823118: sched_stat_runtime: comm=kworker/3:2 pid=135 runtime=5339 [ns] vruntime=17815800388 [ns] + kworker/6:2-387 [006] d..3 49.823120: sched_stat_runtime: comm=kworker/6:2 pid=387 runtime=9594 [ns] vruntime=14589605367 [ns] + kworker/6:2-387 [006] d..3 49.823122: sched_switch: prev_comm=kworker/6:2 prev_pid=387 prev_prio=120 prev_state=R+ ==> next_comm=gnome-shell next_pid=1261 next_prio=120 + kworker/3:2-135 [003] d..3 49.823123: sched_switch: prev_comm=kworker/3:2 prev_pid=135 prev_prio=120 prev_state=T ==> next_comm=swapper/3 next_pid=0 next_prio=120 + -0 [004] ..s7 49.823798: tcp_probe: src=10.0.0.10:54326 dest=23.215.104.193:80 mark=0x0 length=32 snd_nxt=0xe3ae2ff5 snd_una=0xe3ae2ecd snd_cwnd=10 ssthresh=2147483647 snd_wnd=28960 srtt=19604 rcv_wnd=29312 + 3. User space creating a trigger -------------------------------- diff --git a/Documentation/trace/uprobetracer.rst b/Documentation/trace/uprobetracer.rst index 4c3bfde2ba47..4346e23e3ae7 100644 --- a/Documentation/trace/uprobetracer.rst +++ b/Documentation/trace/uprobetracer.rst @@ -73,10 +73,9 @@ For $comm, the default type is "string"; any other type is invalid. Event Profiling --------------- -You can check the total number of probe hits and probe miss-hits via -/sys/kernel/debug/tracing/uprobe_profile. -The first column is event name, the second is the number of probe hits, -the third is the number of probe miss-hits. +You can check the total number of probe hits per event via +/sys/kernel/debug/tracing/uprobe_profile. The first column is the filename, +the second is the event name, the third is the number of probe hits. Usage examples -------------- diff --git a/Documentation/translations/it_IT/admin-guide/README.rst b/Documentation/translations/it_IT/admin-guide/README.rst index 80f5ffc94a9e..b37166817842 100644 --- a/Documentation/translations/it_IT/admin-guide/README.rst +++ b/Documentation/translations/it_IT/admin-guide/README.rst @@ -4,7 +4,7 @@ .. _it_readme: -Rilascio del kernel Linux 4.x +Rilascio del kernel Linux 5.x =================================================== .. warning:: diff --git a/Documentation/translations/it_IT/doc-guide/sphinx.rst b/Documentation/translations/it_IT/doc-guide/sphinx.rst index 474b7e127893..793b5cc33403 100644 --- a/Documentation/translations/it_IT/doc-guide/sphinx.rst +++ b/Documentation/translations/it_IT/doc-guide/sphinx.rst @@ -3,6 +3,8 @@ .. note:: Per leggere la documentazione originale in inglese: :ref:`Documentation/doc-guide/index.rst ` +.. _it_sphinxdoc: + Introduzione ============ diff --git a/Documentation/translations/it_IT/process/4.Coding.rst b/Documentation/translations/it_IT/process/4.Coding.rst index c61059015e52..c05b89e616dd 100644 --- a/Documentation/translations/it_IT/process/4.Coding.rst +++ b/Documentation/translations/it_IT/process/4.Coding.rst @@ -264,7 +264,7 @@ La maggior parte di queste opzioni possono essere attivate per qualsiasi kernel utilizzato per lo sviluppo o a scopo di test. In particolare dovreste attivare: - - ENABLE_WARN_DEPRECATED, ENABLE_MUST_CHECK, e FRAME_WARN per ottenere degli + - ENABLE_MUST_CHECK e FRAME_WARN per ottenere degli avvertimenti dedicati a problemi come l'uso di interfacce deprecate o l'ignorare un importante valore di ritorno di una funzione. Il risultato generato da questi avvertimenti può risultare verboso, ma non bisogna diff --git a/Documentation/translations/it_IT/process/applying-patches.rst b/Documentation/translations/it_IT/process/applying-patches.rst index f5e9c7d0b16d..1d30e5cd2a3d 100644 --- a/Documentation/translations/it_IT/process/applying-patches.rst +++ b/Documentation/translations/it_IT/process/applying-patches.rst @@ -1,13 +1,15 @@ .. include:: ../disclaimer-ita.rst :Original: :ref:`Documentation/process/applying-patches.rst ` - +:Translator: Federico Vaga .. _it_applying_patches: -Applicare modifiche al kernel Linux -=================================== +Applicare patch al kernel Linux ++++++++++++++++++++++++++++++++ -.. warning:: +.. note:: - TODO ancora da tradurre + Questo documento è obsoleto. Nella maggior parte dei casi, piuttosto + che usare ``patch`` manualmente, vorrete usare Git. Per questo motivo + il documento non verrà tradotto. diff --git a/Documentation/translations/it_IT/process/changes.rst b/Documentation/translations/it_IT/process/changes.rst index 956cf95a1214..d0874327f301 100644 --- a/Documentation/translations/it_IT/process/changes.rst +++ b/Documentation/translations/it_IT/process/changes.rst @@ -1,12 +1,495 @@ .. include:: ../disclaimer-ita.rst :Original: :ref:`Documentation/process/changes.rst ` +:Translator: Federico Vaga .. _it_changes: Requisiti minimi per compilare il kernel ++++++++++++++++++++++++++++++++++++++++ -.. warning:: +Introduzione +============ - TODO ancora da tradurre +Questo documento fornisce una lista dei software necessari per eseguire i +kernel 4.x. + +Questo documento è basato sul file "Changes" del kernel 2.0.x e quindi le +persone che lo scrissero meritano credito (Jared Mauch, Axel Boldt, +Alessandro Sigala, e tanti altri nella rete). + +Requisiti minimi correnti +************************* + +Prima di pensare d'avere trovato un baco, aggiornate i seguenti programmi +**almeno** alla versione indicata! Se non siete certi della versione che state +usando, il comando indicato dovrebbe dirvelo. + +Questa lista presume che abbiate già un kernel Linux funzionante. In aggiunta, +non tutti gli strumenti sono necessari ovunque; ovviamente, se non avete un +modem ISDN, per esempio, probabilmente non dovreste preoccuparvi di +isdn4k-utils. + +====================== ================= ======================================== + Programma Versione minima Comando per verificare la versione +====================== ================= ======================================== +GNU C 4.6 gcc --version +GNU make 3.81 make --version +binutils 2.20 ld -v +flex 2.5.35 flex --version +bison 2.0 bison --version +util-linux 2.10o fdformat --version +kmod 13 depmod -V +e2fsprogs 1.41.4 e2fsck -V +jfsutils 1.1.3 fsck.jfs -V +reiserfsprogs 3.6.3 reiserfsck -V +xfsprogs 2.6.0 xfs_db -V +squashfs-tools 4.0 mksquashfs -version +btrfs-progs 0.18 btrfsck +pcmciautils 004 pccardctl -V +quota-tools 3.09 quota -V +PPP 2.4.0 pppd --version +isdn4k-utils 3.1pre1 isdnctrl 2>&1|grep version +nfs-utils 1.0.5 showmount --version +procps 3.2.0 ps --version +oprofile 0.9 oprofiled --version +udev 081 udevd --version +grub 0.93 grub --version || grub-install --version +mcelog 0.6 mcelog --version +iptables 1.4.2 iptables -V +openssl & libcrypto 1.0.0 openssl version +bc 1.06.95 bc --version +Sphinx\ [#f1]_ 1.3 sphinx-build --version +====================== ================= ======================================== + +.. [#f1] Sphinx è necessario solo per produrre la documentazione del Kernel + +Compilazione del kernel +*********************** + +GCC +--- + +La versione necessaria di gcc potrebbe variare a seconda del tipo di CPU nel +vostro calcolatore. + +Make +---- + +Per compilare il kernel vi servirà GNU make 3.81 o successivo. + +Binutils +-------- + +Il sistema di compilazione, dalla versione 4.13, per la produzione dei passi +intermedi, si è convertito all'uso di *thin archive* (`ar T`) piuttosto che +all'uso del *linking* incrementale (`ld -r`). Questo richiede binutils 2.20 o +successivo. + +pkg-config +---------- + +Il sistema di compilazione, dalla versione 4.18, richiede pkg-config per +verificare l'esistenza degli strumenti kconfig e per determinare le +impostazioni da usare in 'make {g,x}config'. Precedentemente pkg-config +veniva usato ma non verificato o documentato. + +Flex +---- + +Dalla versione 4.16, il sistema di compilazione, durante l'esecuzione, genera +un analizzatore lessicale. Questo richiede flex 2.5.35 o successivo. + +Bison +----- + +Dalla versione 4.16, il sistema di compilazione, durante l'esecuzione, genera +un parsificatore. Questo richiede bison 2.0 o successivo. + +Perl +---- + +Per compilare il kernel vi servirà perl 5 e i seguenti moduli ``Getopt::Long``, +``Getopt::Std``, ``File::Basename``, e ``File::Find``. + +BC +-- + +Vi servirà bc per compilare i kernel dal 3.10 in poi. + +OpenSSL +------- + +Il programma OpenSSL e la libreria crypto vengono usati per la firma dei moduli +e la gestione dei certificati; sono usati per la creazione della chiave e +la generazione della firma. + +Se la firma dei moduli è abilitata, allora vi servirà openssl per compilare il +kernel 3.7 e successivi. Vi serviranno anche i pacchetti di sviluppo di +openssl per compilare il kernel 4.3 o successivi. + + +Strumenti di sistema +******************** + +Modifiche architetturali +------------------------ + +DevFS è stato reso obsoleto da udev +(http://www.kernel.org/pub/linux/utils/kernel/hotplug/) + +Il supporto per UID a 32-bit è ora disponibile. Divertitevi! + +La documentazione delle funzioni in Linux è una fase di transizione +verso una documentazione integrata nei sorgenti stessi usando dei commenti +formattati in modo speciale e posizionati vicino alle funzioni che descrivono. +Al fine di arricchire la documentazione, questi commenti possono essere +combinati con i file ReST presenti in Documentation/; questi potranno +poi essere convertiti in formato PostScript, HTML, LaTex, ePUB o PDF. +Per convertire i documenti da ReST al formato che volete, avete bisogno di +Sphinx. + +Util-linux +---------- + +Le versioni più recenti di util-linux: forniscono il supporto a ``fdisk`` per +dischi di grandi dimensioni; supportano le nuove opzioni di mount; riconoscono +più tipi di partizioni; hanno un fdformat che funziona con i kernel 2.4; +e altre chicche. Probabilmente vorrete aggiornarlo. + +Ksymoops +-------- + +Se l'impensabile succede e il kernel va in oops, potrebbe servirvi lo strumento +ksymoops per decodificarlo, ma nella maggior parte dei casi non vi servirà. +Generalmente è preferibile compilare il kernel con l'opzione ``CONFIG_KALLSYMS`` +cosicché venga prodotto un output più leggibile che può essere usato così com'è +(produce anche un output migliore di ksymoops). Se per qualche motivo il +vostro kernel non è stato compilato con ``CONFIG_KALLSYMS`` e non avete modo di +ricompilarlo e riprodurre l'oops con quell'opzione abilitata, allora potete +usare ksymoops per decodificare l'oops. + +Mkinitrd +-------- + +I cambiamenti della struttura in ``/lib/modules`` necessita l'aggiornamento di +mkinitrd. + +E2fsprogs +--------- + +L'ultima versione di ``e2fsprogs`` corregge diversi bachi in fsck e debugfs. +Ovviamente, aggiornarlo è una buona idea. + +JFSutils +-------- + +Il pacchetto ``jfsutils`` contiene programmi per il file-system JFS. +Sono disponibili i seguenti strumenti: + +- ``fsck.jfs`` - avvia la ripetizione del log delle transizioni, e verifica e + ripara una partizione formattata secondo JFS + +- ``mkfs.jfs`` - crea una partizione formattata secondo JFS + +- sono disponibili altri strumenti per il file-system. + +Reiserfsprogs +------------- + +Il pacchetto reiserfsprogs dovrebbe essere usato con reiserfs-3.6.x (Linux +kernel 2.4.x). Questo è un pacchetto combinato che contiene versioni +funzionanti di ``mkreiserfs``, ``resize_reiserfs``, ``debugreiserfs`` e +``reiserfsck``. Questi programmi funzionano sulle piattaforme i386 e alpha. + +Xfsprogs +-------- + +L'ultima versione di ``xfsprogs`` contiene, fra i tanti, i programmi +``mkfs.xfs``, ``xfs_db`` e ``xfs_repair`` per il file-system XFS. +Dipendono dell'architettura e qualsiasi versione dalla 2.0.0 in poi +dovrebbe funzionare correttamente con la versione corrente del codice +XFS nel kernel (sono raccomandate le versioni 2.6.0 o successive per via +di importanti miglioramenti). + +PCMCIAutils +----------- + +PCMCIAutils sostituisce ``pcmica-cs``. Serve ad impostare correttamente i +connettori PCMCIA all'avvio del sistema e a caricare i moduli necessari per +i dispositivi a 16-bit se il kernel è stato modularizzato e il sottosistema +hotplug è in uso. + +Quota-tools +----------- + +Il supporto per uid e gid a 32 bit richiedono l'uso della versione 2 del +formato quota. La versione 3.07 e successive di quota-tools supportano +questo formato. Usate la versione raccomandata nella lista qui sopra o una +successiva. + +Micro codice per Intel IA32 +--------------------------- + +Per poter aggiornare il micro codice per Intel IA32, è stato aggiunto un +apposito driver; il driver è accessibile come un normale dispositivo a +caratteri (misc). Se non state usando udev probabilmente sarà necessario +eseguire i seguenti comandi come root prima di poterlo aggiornare:: + + mkdir /dev/cpu + mknod /dev/cpu/microcode c 10 184 + chmod 0644 /dev/cpu/microcode + +Probabilmente, vorrete anche il programma microcode_ctl da usare con questo +dispositivo. + +udev +---- + +``udev`` è un programma in spazio utente il cui scopo è quello di popolare +dinamicamente la cartella ``/dev`` coi dispositivi effettivamente presenti. +``udev`` sostituisce le funzionalità base di devfs, consentendo comunque +nomi persistenti per i dispositivi. + +FUSE +---- + +Serve libfuse 2.4.0 o successiva. Il requisito minimo assoluto è 2.3.0 ma +le opzioni di mount ``direct_io`` e ``kernel_cache`` non funzioneranno. + + +Rete +**** + +Cambiamenti generali +-------------------- + +Se per quanto riguarda la configurazione di rete avete esigenze di un certo +livello dovreste prendere in considerazione l'uso degli strumenti in ip-route2. + +Filtro dei pacchetti / NAT +-------------------------- + +Il codice per filtraggio dei pacchetti e il NAT fanno uso degli stessi +strumenti come nelle versioni del kernel antecedenti la 2.4.x (iptables). +Include ancora moduli di compatibilità per 2.2.x ipchains e 2.0.x ipdwadm. + +PPP +--- + +Il driver per PPP è stato ristrutturato per supportare collegamenti multipli e +per funzionare su diversi livelli. Se usate PPP, aggiornate pppd almeno alla +versione 2.4.0. + +Se non usate udev, dovete avere un file /dev/ppp che può essere creato da root +col seguente comando:: + + mknod /dev/ppp c 108 0 + +Isdn4k-utils +------------ + +Per via della modifica del campo per il numero di telefono, il pacchetto +isdn4k-utils dev'essere ricompilato o (preferibilmente) aggiornato. + +NFS-utils +--------- + +Nei kernel più antichi (2.4 e precedenti), il server NFS doveva essere +informato sui clienti ai quali si voleva fornire accesso via NFS. Questa +informazione veniva passata al kernel quando un cliente montava un file-system +mediante ``mountd``, oppure usando ``exportfs`` all'avvio del sistema. +exportfs prende le informazioni circa i clienti attivi da ``/var/lib/nfs/rmtab``. + +Questo approccio è piuttosto delicato perché dipende dalla correttezza di +rmtab, che non è facile da garantire, in particolare quando si cerca di +implementare un *failover*. Anche quando il sistema funziona bene, ``rmtab`` +ha il problema di accumulare vecchie voci inutilizzate. + +Sui kernel più recenti il kernel ha la possibilità di informare mountd quando +arriva una richiesta da una macchina sconosciuta, e mountd può dare al kernel +le informazioni corrette per l'esportazione. Questo rimuove la dipendenza con +``rmtab`` e significa che il kernel deve essere al corrente solo dei clienti +attivi. + +Per attivare questa funzionalità, dovete eseguire il seguente comando prima di +usare exportfs o mountd:: + + mount -t nfsd nfsd /proc/fs/nfsd + +Dove possibile, raccomandiamo di proteggere tutti i servizi NFS dall'accesso +via internet mediante un firewall. + +mcelog +------ + +Quando ``CONFIG_x86_MCE`` è attivo, il programma mcelog processa e registra +gli eventi *machine check*. Gli eventi *machine check* sono errori riportati +dalla CPU. Incoraggiamo l'analisi di questi errori. + + +Documentazione del kernel +************************* + +Sphinx +------ + +Per i dettaglio sui requisiti di Sphinx, fate riferimento a :ref:`it_sphinx_install` +in :ref:`Documentation/translations/it_IT/doc-guide/sphinx.rst ` + +Ottenere software aggiornato +============================ + +Compilazione del kernel +*********************** + +gcc +--- + +- + +Make +---- + +- + +Binutils +-------- + +- + +Flex +---- + +- + +Bison +----- + +- + +OpenSSL +------- + +- + +Strumenti di sistema +******************** + +Util-linux +---------- + +- + +Kmod +---- + +- +- + +Ksymoops +-------- + +- + +Mkinitrd +-------- + +- + +E2fsprogs +--------- + +- + +JFSutils +-------- + +- + +Reiserfsprogs +------------- + +- + +Xfsprogs +-------- + +- + +Pcmciautils +----------- + +- + +Quota-tools +----------- + +- + + +Microcodice Intel P6 +-------------------- + +- + +udev +---- + +- + +FUSE +---- + +- + +mcelog +------ + +- + +Rete +**** + +PPP +--- + +- + +Isdn4k-utils +------------ + +- + +NFS-utils +--------- + +- + +Iptables +-------- + +- + +Ip-route2 +--------- + +- + +OProfile +-------- + +- + +NFS-Utils +--------- + +- + +Documentazione del kernel +************************* + +Sphinx +------ + +- diff --git a/Documentation/translations/it_IT/process/coding-style.rst b/Documentation/translations/it_IT/process/coding-style.rst index b707bdbe178c..2fd0e7f79d55 100644 --- a/Documentation/translations/it_IT/process/coding-style.rst +++ b/Documentation/translations/it_IT/process/coding-style.rst @@ -449,6 +449,9 @@ Nonostante questo non sia richiesto dal linguaggio C, in Linux viene preferito perché è un modo semplice per aggiungere informazioni importanti per il lettore. +Non usate la parola chiave ``extern`` coi prototipi di funzione perché +rende le righe più lunghe e non è strettamente necessario. + 7) Centralizzare il ritorno delle funzioni ------------------------------------------ @@ -600,26 +603,43 @@ segue nel vostro file .emacs: (* (max steps 1) c-basic-offset))) - (add-hook 'c-mode-common-hook - (lambda () - ;; Add kernel style - (c-add-style - "linux-tabs-only" - '("linux" (c-offsets-alist - (arglist-cont-nonempty - c-lineup-gcc-asm-reg - c-lineup-arglist-tabs-only)))))) - - (add-hook 'c-mode-hook - (lambda () - (let ((filename (buffer-file-name))) - ;; Enable kernel mode for the appropriate files - (when (and filename - (string-match (expand-file-name "~/src/linux-trees") - filename)) - (setq indent-tabs-mode t) - (setq show-trailing-whitespace t) - (c-set-style "linux-tabs-only"))))) + (dir-locals-set-class-variables + 'linux-kernel + '((c-mode . ( + (c-basic-offset . 8) + (c-label-minimum-indentation . 0) + (c-offsets-alist . ( + (arglist-close . c-lineup-arglist-tabs-only) + (arglist-cont-nonempty . + (c-lineup-gcc-asm-reg c-lineup-arglist-tabs-only)) + (arglist-intro . +) + (brace-list-intro . +) + (c . c-lineup-C-comments) + (case-label . 0) + (comment-intro . c-lineup-comment) + (cpp-define-intro . +) + (cpp-macro . -1000) + (cpp-macro-cont . +) + (defun-block-intro . +) + (else-clause . 0) + (func-decl-cont . +) + (inclass . +) + (inher-cont . c-lineup-multi-inher) + (knr-argdecl-intro . 0) + (label . -1000) + (statement . 0) + (statement-block-intro . +) + (statement-case-intro . +) + (statement-cont . +) + (substatement . +) + )) + (indent-tabs-mode . t) + (show-trailing-whitespace . t) + )))) + + (dir-locals-set-directory-class + (expand-file-name "~/src/linux-trees") + 'linux-kernel) Questo farà funzionare meglio emacs con lo stile del kernel per i file che si trovano nella cartella ``~/src/linux-trees``. @@ -929,7 +949,40 @@ qualche valore fuori dai limiti. Un tipico esempio è quello delle funzioni che ritornano un puntatore; queste utilizzano NULL o ERR_PTR come meccanismo di notifica degli errori. -17) Non reinventate le macro del kernel +17) L'uso di bool +----------------- + +Nel kernel Linux il tipo bool deriva dal tipo _Bool dello standard C99. +Un valore bool può assumere solo i valori 0 o 1, e implicitamente o +esplicitamente la conversione a bool converte i valori in vero (*true*) o +falso (*false*). Quando si usa un tipo bool il costrutto !! non sarà più +necessario, e questo va ad eliminare una certa serie di bachi. + +Quando si usano i valori booleani, dovreste utilizzare le definizioni di true +e false al posto dei valori 1 e 0. + +Per il valore di ritorno delle funzioni e per le variabili sullo stack, l'uso +del tipo bool è sempre appropriato. L'uso di bool viene incoraggiato per +migliorare la leggibilità e spesso è molto meglio di 'int' nella gestione di +valori booleani. + +Non usate bool se per voi sono importanti l'ordine delle righe di cache o +la loro dimensione; la dimensione e l'allineamento cambia a seconda +dell'architettura per la quale è stato compilato. Le strutture che sono state +ottimizzate per l'allineamento o la dimensione non dovrebbero usare bool. + +Se una struttura ha molti valori true/false, considerate l'idea di raggrupparli +in un intero usando campi da 1 bit, oppure usate un tipo dalla larghezza fissa, +come u8. + +Come per gli argomenti delle funzioni, molti valori true/false possono essere +raggruppati in un singolo argomento a bit denominato 'flags'; spesso 'flags' è +un'alternativa molto più leggibile se si hanno valori costanti per true/false. + +Detto ciò, un uso parsimonioso di bool nelle strutture dati e negli argomenti +può migliorare la leggibilità. + +18) Non reinventate le macro del kernel --------------------------------------- Il file di intestazione include/linux/kernel.h contiene un certo numero @@ -953,7 +1006,7 @@ rigido sui tipi. Sentitevi liberi di leggere attentamente questo file d'intestazione per scoprire cos'altro è stato definito che non dovreste reinventare nel vostro codice. -18) Linee di configurazione degli editor e altre schifezze +19) Linee di configurazione degli editor e altre schifezze ----------------------------------------------------------- Alcuni editor possono interpretare dei parametri di configurazione integrati @@ -987,8 +1040,8 @@ d'indentazione e di modalità d'uso. Le persone potrebbero aver configurato una modalità su misura, oppure potrebbero avere qualche altra magia per far funzionare bene l'indentazione. -19) Inline assembly ---------------------- +20) Inline assembly +------------------- Nel codice specifico per un'architettura, potreste aver bisogno di codice *inline assembly* per interfacciarvi col processore o con una funzionalità @@ -1020,7 +1073,7 @@ al fine di allineare correttamente l'assembler che verrà generato: "more_magic %reg2, %reg3" : /* outputs */ : /* inputs */ : /* clobbers */); -20) Compilazione sotto condizione +21) Compilazione sotto condizione --------------------------------- Ovunque sia possibile, non usate le direttive condizionali del preprocessore diff --git a/Documentation/translations/it_IT/process/howto.rst b/Documentation/translations/it_IT/process/howto.rst index 909e6a55bc43..9903ac7c566b 100644 --- a/Documentation/translations/it_IT/process/howto.rst +++ b/Documentation/translations/it_IT/process/howto.rst @@ -234,7 +234,7 @@ il progetto Linux Cross-Reference, che è in grado di presentare codice sorgente in un formato autoreferenziale ed indicizzato. Un eccellente ed aggiornata fonte di consultazione del codice del kernel la potete trovare qui: - http://lxr.free-electrons.com/ + https://elixir.bootlin.com/ Il processo di sviluppo @@ -244,7 +244,6 @@ e di molti altri rami per specifici sottosistemi. Questi rami sono: - I sorgenti kernel 4.x - I sorgenti stabili del kernel 4.x.y -stable - - Le modifiche in 4.x -git - Sorgenti dei sottosistemi del kernel e le loro modifiche - Il kernel 4.x -next per test d'integrazione @@ -313,16 +312,6 @@ Il file Documentation/process/stable-kernel-rules.rst (nei sorgenti) documenta quali tipologie di modifiche sono accettate per i sorgenti -stable, e come avviene il processo di rilascio. -Le modifiche in 4.x -git -~~~~~~~~~~~~~~~~~~~~~~~~ - -Queste sono istantanee quotidiane del kernel di Linus e sono gestite in -una repositorio git (da qui il nome). Queste modifiche sono solitamente -rilasciate giornalmente e rappresentano l'attuale stato dei sorgenti di -Linus. Queste sono da considerarsi più sperimentali di un -rc in quanto -generate automaticamente senza nemmeno aver dato una rapida occhiata -per verificarne lo stato. - Sorgenti dei sottosistemi del kernel e le loro patch ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/Documentation/translations/it_IT/process/stable-api-nonsense.rst b/Documentation/translations/it_IT/process/stable-api-nonsense.rst index d4fa4abf8dd3..cdfc509b8566 100644 --- a/Documentation/translations/it_IT/process/stable-api-nonsense.rst +++ b/Documentation/translations/it_IT/process/stable-api-nonsense.rst @@ -1,13 +1,209 @@ .. include:: ../disclaimer-ita.rst :Original: :ref:`Documentation/process/stable-api-nonsense.rst ` - +:Translator: Federico Vaga .. _it_stable_api_nonsense: L'interfaccia dei driver per il kernel Linux ============================================ -.. warning:: +(tutte le risposte alle vostre domande e altro) + +Greg Kroah-Hartman + +Questo è stato scritto per cercare di spiegare perché Linux **non ha +un'interfaccia binaria, e non ha nemmeno un'interfaccia stabile**. + +.. note:: + + Questo articolo parla di interfacce **interne al kernel**, non delle + interfacce verso lo spazio utente. + + L'interfaccia del kernel verso lo spazio utente è quella usata dai + programmi, ovvero le chiamate di sistema. Queste interfacce sono **molto** + stabili nel tempo e non verranno modificate. Ho vecchi programmi che sono + stati compilati su un kernel 0.9 (circa) e tuttora funzionano sulle versioni + 2.6 del kernel. Queste interfacce sono quelle che gli utenti e i + programmatori possono considerare stabili. + +Riepilogo generale +------------------ + +Pensate di volere un'interfaccia del kernel stabile, ma in realtà non la +volete, e nemmeno sapete di non volerla. Quello che volete è un driver +stabile che funzioni, e questo può essere ottenuto solo se il driver si trova +nei sorgenti del kernel. Ci sono altri vantaggi nell'avere il proprio driver +nei sorgenti del kernel, ognuno dei quali hanno reso Linux un sistema operativo +robusto, stabile e maturo; questi sono anche i motivi per cui avete scelto +Linux. + +Introduzione +------------ + +Solo le persone un po' strambe vorrebbero scrivere driver per il kernel con +la costante preoccupazione per i cambiamenti alle interfacce interne. Per il +resto del mondo, queste interfacce sono invisibili o non di particolare +interesse. + +Innanzitutto, non tratterò **alcun** problema legale riguardante codice +chiuso, nascosto, avvolto, blocchi binari, o qualsia altra cosa che descrive +driver che non hanno i propri sorgenti rilasciati con licenza GPL. Per favore +fate riferimento ad un avvocato per qualsiasi questione legale, io sono un +programmatore e perciò qui vi parlerò soltanto delle questioni tecniche (non +per essere superficiali sui problemi legali, sono veri e dovete esserne a +conoscenza in ogni circostanza). + +Dunque, ci sono due tematiche principali: interfacce binarie del kernel e +interfacce stabili nei sorgenti. Ognuna dipende dall'altra, ma discuteremo +prima delle cose binarie per toglierle di mezzo. + +Interfaccia binaria del kernel +------------------------------ + +Supponiamo d'avere un'interfaccia stabile nei sorgenti del kernel, di +conseguenza un'interfaccia binaria dovrebbe essere anche'essa stabile, giusto? +Sbagliato. Prendete in considerazione i seguenti fatti che riguardano il +kernel Linux: + + - A seconda della versione del compilatore C che state utilizzando, diverse + strutture dati del kernel avranno un allineamento diverso, e possibilmente + un modo diverso di includere le funzioni (renderle inline oppure no). + L'organizzazione delle singole funzioni non è poi così importante, ma la + spaziatura (*padding*) nelle strutture dati, invece, lo è. + + - In base alle opzioni che sono state selezionate per generare il kernel, + un certo numero di cose potrebbero succedere: + + - strutture dati differenti potrebbero contenere campi differenti + - alcune funzioni potrebbero non essere implementate (per esempio, + alcuni *lock* spariscono se compilati su sistemi mono-processore) + - la memoria interna del kernel può essere allineata in differenti modi + a seconda delle opzioni di compilazione. + + - Linux funziona su una vasta gamma di architetture di processore. Non esiste + alcuna possibilità che il binario di un driver per un'architettura funzioni + correttamente su un'altra. + +Alcuni di questi problemi possono essere risolti compilando il proprio modulo +con la stessa identica configurazione del kernel, ed usando la stessa versione +del compilatore usato per compilare il kernel. Questo è sufficiente se volete +fornire un modulo per uno specifico rilascio su una specifica distribuzione +Linux. Ma moltiplicate questa singola compilazione per il numero di +distribuzioni Linux e il numero dei rilasci supportati da quest'ultime e vi +troverete rapidamente in un incubo fatto di configurazioni e piattaforme +hardware (differenti processori con differenti opzioni); dunque, anche per il +singolo rilascio di un modulo, dovreste creare differenti versioni dello +stesso. + +Fidatevi, se tenterete questa via, col tempo, diventerete pazzi; l'ho imparato +a mie spese molto tempo fa... + + +Interfaccia stabile nei sorgenti del kernel +------------------------------------------- + +Se parlate con le persone che cercano di mantenere aggiornato un driver per +Linux ma che non si trova nei sorgenti, allora per queste persone l'argomento +sarà "ostico". + +Lo sviluppo del kernel Linux è continuo e viaggia ad un ritmo sostenuto, e non +rallenta mai. Perciò, gli sviluppatori del kernel trovano bachi nelle +interfacce attuali, o trovano modi migliori per fare le cose. Se le trovano, +allora le correggeranno per migliorarle. In questo frangente, i nomi delle +funzioni potrebbero cambiare, le strutture dati potrebbero diventare più grandi +o più piccole, e gli argomenti delle funzioni potrebbero essere ripensati. +Se questo dovesse succedere, nello stesso momento, tutte le istanze dove questa +interfaccia viene utilizzata verranno corrette, garantendo che tutto continui +a funzionare senza problemi. + +Portiamo ad esempio l'interfaccia interna per il sottosistema USB che ha subito +tre ristrutturazioni nel corso della sua vita. Queste ristrutturazioni furono +fatte per risolvere diversi problemi: + + - È stato fatto un cambiamento da un flusso di dati sincrono ad uno + asincrono. Questo ha ridotto la complessità di molti driver e ha + aumentato la capacità di trasmissione di tutti i driver fino a raggiungere + quasi la velocità massima possibile. + - È stato fatto un cambiamento nell'allocazione dei pacchetti da parte del + sottosistema USB per conto dei driver, cosicché ora i driver devono fornire + più informazioni al sottosistema USB al fine di correggere un certo numero + di stalli. + +Questo è completamente l'opposto di quello che succede in alcuni sistemi +operativi proprietari che hanno dovuto mantenere, nel tempo, il supporto alle +vecchie interfacce USB. I nuovi sviluppatori potrebbero usare accidentalmente +le vecchie interfacce e sviluppare codice nel modo sbagliato, portando, di +conseguenza, all'instabilità del sistema. + +In entrambe gli scenari, gli sviluppatori hanno ritenuto che queste importanti +modifiche erano necessarie, e quindi le hanno fatte con qualche sofferenza. +Se Linux avesse assicurato di mantenere stabile l'interfaccia interna, si +sarebbe dovuto procedere alla creazione di una nuova, e quelle vecchie, e +mal funzionanti, avrebbero dovuto ricevere manutenzione, creando lavoro +aggiuntivo per gli sviluppatori del sottosistema USB. Dato che gli +sviluppatori devono dedicare il proprio tempo a questo genere di lavoro, +chiedergli di dedicarne dell'altro, senza benefici, magari gratuitamente, non +è contemplabile. + +Le problematiche relative alla sicurezza sono molto importanti per Linux. +Quando viene trovato un problema di sicurezza viene corretto in breve tempo. +A volte, per prevenire il problema di sicurezza, si sono dovute cambiare +delle interfacce interne al kernel. Quando è successo, allo stesso tempo, +tutti i driver che usavano quelle interfacce sono stati aggiornati, garantendo +la correzione definitiva del problema senza doversi preoccupare di rivederlo +per sbaglio in futuro. Se non si fossero cambiate le interfacce interne, +sarebbe stato impossibile correggere il problema e garantire che non si sarebbe +più ripetuto. + +Nel tempo le interfacce del kernel subiscono qualche ripulita. Se nessuno +sta più usando un'interfaccia, allora questa verrà rimossa. Questo permette +al kernel di rimanere il più piccolo possibile, e garantisce che tutte le +potenziali interfacce sono state verificate nel limite del possibile (le +interfacce inutilizzate sono impossibili da verificare). + + +Cosa fare +--------- + +Dunque, se avete un driver per il kernel Linux che non si trova nei sorgenti +principali del kernel, come sviluppatori, cosa dovreste fare? Rilasciare un +file binario del driver per ogni versione del kernel e per ogni distribuzione, +è un incubo; inoltre, tenere il passo con tutti i cambiamenti del kernel è un +brutto lavoro. + +Semplicemente, fate sì che il vostro driver per il kernel venga incluso nei +sorgenti principali (ricordatevi, stiamo parlando di driver rilasciati secondo +una licenza compatibile con la GPL; se il vostro codice non ricade in questa +categoria: buona fortuna, arrangiatevi, siete delle sanguisughe) + +Se il vostro driver è nei sorgenti del kernel e un'interfaccia cambia, il +driver verrà corretto immediatamente dalla persona che l'ha modificata. Questo +garantisce che sia sempre possibile compilare il driver, che funzioni, e tutto +con un minimo sforzo da parte vostra. + +Avere il proprio driver nei sorgenti principali del kernel ha i seguenti +vantaggi: + + - La qualità del driver aumenterà e i costi di manutenzione (per lo + sviluppatore originale) diminuiranno. + - Altri sviluppatori aggiungeranno nuove funzionalità al vostro driver. + - Altri persone troveranno e correggeranno bachi nel vostro driver. + - Altri persone troveranno degli aggiustamenti da fare al vostro driver. + - Altri persone aggiorneranno il driver quando è richiesto da un cambiamento + di un'interfaccia. + - Il driver sarà automaticamente reso disponibile in tutte le distribuzioni + Linux senza dover chiedere a nessuna di queste di aggiungerlo. + +Dato che Linux supporta più dispositivi di qualsiasi altro sistema operativo, +e che girano su molti più tipi di processori di qualsiasi altro sistema +operativo; ciò dimostra che questo modello di sviluppo qualcosa di giusto, +dopo tutto, lo fa :) + + + +------ - TODO ancora da tradurre +Dei ringraziamenti vanno a Randy Dunlap, Andrew Morton, David Brownell, +Hanna Linder, Robert Love, e Nishanth Aravamudan per la loro revisione +e per i loro commenti sulle prime bozze di questo articolo. diff --git a/Documentation/translations/it_IT/process/submit-checklist.rst b/Documentation/translations/it_IT/process/submit-checklist.rst index b6b4dd94a660..70e65a7b3620 100644 --- a/Documentation/translations/it_IT/process/submit-checklist.rst +++ b/Documentation/translations/it_IT/process/submit-checklist.rst @@ -1,12 +1,131 @@ .. include:: ../disclaimer-ita.rst :Original: :ref:`Documentation/process/submit-checklist.rst ` +:Translator: Federico Vaga .. _it_submitchecklist: -Lista delle cose da fare per inviare una modifica al kernel Linux -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Lista delle verifiche da fare prima di inviare una patch per il kernel Linux +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. warning:: +Qui troverete una lista di cose che uno sviluppatore dovrebbe fare per +vedere le proprie patch accettate più rapidamente. - TODO ancora da tradurre +Tutti questi punti integrano la documentazione fornita riguardo alla +sottomissione delle patch, in particolare +:ref:`Documentation/translations/it_IT/process/submitting-patches.rst `. + +1) Se state usando delle funzionalità del kernel allora includete (#include) + i file che le dichiarano/definiscono. Non dipendente dal fatto che un file + d'intestazione include anche quelli usati da voi. + +2) Compilazione pulita: + + a) con le opzioni ``CONFIG`` negli stati ``=y``, ``=m`` e ``=n``. Nessun + avviso/errore di ``gcc`` e nessun avviso/errore dal linker. + + b) con ``allnoconfig``, ``allmodconfig`` + + c) quando si usa ``O=builddir`` + +3) Compilare per diverse architetture di processore usando strumenti per + la cross-compilazione o altri. + +4) Una buona architettura per la verifica della cross-compilazione è la ppc64 + perché tende ad usare ``unsigned long`` per le quantità a 64-bit. + +5) Controllate lo stile del codice della vostra patch secondo le direttive + scritte in :ref:`Documentation/translations/it_IT/process/coding-style.rst `. + Prima dell'invio della patch, usate il verificatore di stile + (``script/checkpatch.pl``) per scovare le violazioni più semplici. + Dovreste essere in grado di giustificare tutte le violazioni rimanenti nella + vostra patch. + +6) Le opzioni ``CONFIG``, nuove o modificate, non scombussolano il menu + di configurazione e sono preimpostate come disabilitate a meno che non + soddisfino i criteri descritti in ``Documentation/kbuild/kconfig-language.txt`` + alla punto "Voci di menu: valori predefiniti". + +7) Tutte le nuove opzioni ``Kconfig`` hanno un messaggio di aiuto. + +8) La patch è stata accuratamente revisionata rispetto alle più importanti + configurazioni ``Kconfig``. Questo è molto difficile da fare + correttamente - un buono lavoro di testa sarà utile. + +9) Verificare con sparse. + +10) Usare ``make checkstack`` e ``make namespacecheck`` e correggere tutti i + problemi rilevati. + + .. note:: + + ``checkstack`` non evidenzia esplicitamente i problemi, ma una funzione + che usa più di 512 byte sullo stack è una buona candidata per una + correzione. + +11) Includete commenti :ref:`kernel-doc ` per documentare API + globali del kernel. Usate ``make htmldocs`` o ``make pdfdocs`` per + verificare i commenti :ref:`kernel-doc ` ed eventualmente + correggerli. + +12) La patch è stata verificata con le seguenti opzioni abilitate + contemporaneamente: ``CONFIG_PREEMPT``, ``CONFIG_DEBUG_PREEMPT``, + ``CONFIG_DEBUG_SLAB``, ``CONFIG_DEBUG_PAGEALLOC``, ``CONFIG_DEBUG_MUTEXES``, + ``CONFIG_DEBUG_SPINLOCK``, ``CONFIG_DEBUG_ATOMIC_SLEEP``, + ``CONFIG_PROVE_RCU`` e ``CONFIG_DEBUG_OBJECTS_RCU_HEAD``. + +13) La patch è stata compilata e verificata in esecuzione con, e senza, + le opzioni ``CONFIG_SMP`` e ``CONFIG_PREEMPT``. + +14) Se la patch ha effetti sull'IO dei dischi, eccetera: allora dev'essere + verificata con, e senza, l'opzione ``CONFIG_LBDAF``. + +15) Tutti i percorsi del codice sono stati verificati con tutte le funzionalità + di lockdep abilitate. + +16) Tutti i nuovi elementi in ``/proc`` sono documentati in ``Documentation/``. + +17) Tutti i nuovi parametri d'avvio del kernel sono documentati in + ``Documentation/admin-guide/kernel-parameters.rst``. + +18) Tutti i nuovi parametri dei moduli sono documentati con ``MODULE_PARM_DESC()``. + +19) Tutte le nuove interfacce verso lo spazio utente sono documentate in + ``Documentation/ABI/``. Leggete ``Documentation/ABI/README`` per maggiori + informazioni. Le patch che modificano le interfacce utente dovrebbero + essere inviate in copia anche a linux-api@vger.kernel.org. + +20) Verifica che il kernel passi con successo ``make headers_check`` + +21) La patch è stata verificata con l'iniezione di fallimenti in slab e + nell'allocazione di pagine. Vedere ``Documentation/fault-injection/``. + + Se il nuovo codice è corposo, potrebbe essere opportuno aggiungere + l'iniezione di fallimenti specifici per il sottosistema. + +22) Il nuovo codice è stato compilato con ``gcc -W`` (usate + ``make EXTRA_CFLAGS=-W``). Questo genererà molti avvisi, ma è ottimo + per scovare bachi come "warning: comparison between signed and unsigned". + +23) La patch è stata verificata dopo essere stata inclusa nella serie di patch + -mm; questo al fine di assicurarsi che continui a funzionare assieme a + tutte le altre patch in coda e i vari cambiamenti nei sottosistemi VM, VFS + e altri. + +24) Tutte le barriere di sincronizzazione {per esempio, ``barrier()``, + ``rmb()``, ``wmb()``} devono essere accompagnate da un commento nei + sorgenti che ne spieghi la logica: cosa fanno e perché. + +25) Se la patch aggiunge nuove chiamate ioctl, allora aggiornate + ``Documentation/ioctl/ioctl-number.txt``. + +26) Se il codice che avete modificato dipende o usa una qualsiasi interfaccia o + funzionalità del kernel che è associata a uno dei seguenti simboli + ``Kconfig``, allora verificate che il kernel compili con diverse + configurazioni dove i simboli sono disabilitati e/o ``=m`` (se c'è la + possibilità) [non tutti contemporaneamente, solo diverse combinazioni + casuali]: + + ``CONFIG_SMP``, ``CONFIG_SYSFS``, ``CONFIG_PROC_FS``, ``CONFIG_INPUT``, + ``CONFIG_PCI``, ``CONFIG_BLOCK``, ``CONFIG_PM``, ``CONFIG_MAGIC_SYSRQ``, + ``CONFIG_NET``, ``CONFIG_INET=n`` (ma l'ultimo con ``CONFIG_NET=y``). diff --git a/Documentation/translations/it_IT/process/submitting-drivers.rst b/Documentation/translations/it_IT/process/submitting-drivers.rst index 16df950ef808..dadd77e47613 100644 --- a/Documentation/translations/it_IT/process/submitting-drivers.rst +++ b/Documentation/translations/it_IT/process/submitting-drivers.rst @@ -1,12 +1,16 @@ .. include:: ../disclaimer-ita.rst :Original: :ref:`Documentation/process/submitting-drivers.rst ` +:Translator: Federico Vaga .. _it_submittingdrivers: Sottomettere driver per il kernel Linux ======================================= -.. warning:: +.. note:: - TODO ancora da tradurre + Questo documento è vecchio e negli ultimi anni non è stato più aggiornato; + dovrebbe essere aggiornato, o forse meglio, rimosso. La maggior parte di + quello che viene detto qui può essere trovato anche negli altri documenti + dedicati allo sviluppo. Per questo motivo il documento non verrà tradotto. diff --git a/Documentation/translations/it_IT/process/submitting-patches.rst b/Documentation/translations/it_IT/process/submitting-patches.rst index d633775ed556..2ab9c1401aa1 100644 --- a/Documentation/translations/it_IT/process/submitting-patches.rst +++ b/Documentation/translations/it_IT/process/submitting-patches.rst @@ -1,13 +1,867 @@ .. include:: ../disclaimer-ita.rst :Original: :ref:`Documentation/process/submitting-patches.rst ` - +:Translator: Federico Vaga .. _it_submittingpatches: -Sottomettere modifiche: la guida essenziale per vedere il vostro codice nel kernel -================================================================================== +Inviare patch: la guida essenziale per vedere il vostro codice nel kernel +========================================================================= + +Una persona o un'azienda che volesse inviare una patch al kernel potrebbe +sentirsi scoraggiata dal processo di sottomissione, specialmente quando manca +una certa familiarità col "sistema". Questo testo è una raccolta di +suggerimenti che aumenteranno significativamente le probabilità di vedere le +vostre patch accettate. + +Questo documento contiene un vasto numero di suggerimenti concisi. Per +maggiori dettagli su come funziona il processo di sviluppo del kernel leggete +:ref:`Documentation/translations/it_IT/process `. +Leggete anche :ref:`Documentation/translations/it_IT/process/submit-checklist.rst ` +per una lista di punti da verificare prima di inviare del codice. Se state +inviando un driver, allora leggete anche :ref:`Documentation/translations/it_IT/process/submitting-drivers.rst `; +per delle patch relative alle associazioni per Device Tree leggete +Documentation/devicetree/bindings/submitting-patches.txt. + +Molti di questi passi descrivono il comportamento di base del sistema di +controllo di versione ``git``; se utilizzate ``git`` per preparare le vostre +patch molto del lavoro più ripetitivo lo troverete già fatto per voi, tuttavia +dovete preparare e documentare un certo numero di patch. Generalmente, l'uso +di ``git`` renderà la vostra vita di sviluppatore del kernel più facile. + +0) Ottenere i sorgenti attuali +------------------------------ + +Se non avete un repositorio coi sorgenti del kernel più recenti, allora usate +``git`` per ottenerli. Vorrete iniziare col repositorio principale che può +essere recuperato col comando:: + + git clone git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git + +Notate, comunque, che potreste non voler sviluppare direttamente coi sorgenti +principali del kernel. La maggior parte dei manutentori hanno i propri +sorgenti e desiderano che le patch siano preparate basandosi su di essi. +Guardate l'elemento **T:** per un determinato sottosistema nel file MAINTANERS +che troverete nei sorgenti, o semplicemente chiedete al manutentore nel caso +in cui i sorgenti da usare non siano elencati il quel file. + +Esiste ancora la possibilità di scaricare un rilascio del kernel come archivio +tar (come descritto in una delle prossime sezioni), ma questa è la via più +complicata per sviluppare per il kernel. + +1) ``diff -up`` +--------------- + +Se dovete produrre le vostre patch a mano, usate ``diff -up`` o ``diff -uprN`` +per crearle. Git produce di base le patch in questo formato; se state +usando ``git``, potete saltare interamente questa sezione. + +Tutte le modifiche al kernel Linux avvengono mediate patch, come descritte +in :manpage:`diff(1)`. Quando create la vostra patch, assicuratevi di +crearla nel formato "unified diff", come l'argomento ``-u`` di +:manpage:`diff(1)`. +Inoltre, per favore usate l'argomento ``-p`` per mostrare la funzione C +alla quale si riferiscono le diverse modifiche - questo rende il risultato +di ``diff`` molto più facile da leggere. Le patch dovrebbero essere basate +sulla radice dei sorgenti del kernel, e non sulle sue sottocartelle. + +Per creare una patch per un singolo file, spesso è sufficiente fare:: + + SRCTREE= linux + MYFILE= drivers/net/mydriver.c + + cd $SRCTREE + cp $MYFILE $MYFILE.orig + vi $MYFILE # make your change + cd .. + diff -up $SRCTREE/$MYFILE{.orig,} > /tmp/patch + +Per creare una patch per molteplici file, dovreste spacchettare i sorgenti +"vergini", o comunque non modificati, e fare un ``diff`` coi vostri. +Per esempio:: + + MYSRC= /devel/linux + + tar xvfz linux-3.19.tar.gz + mv linux-3.19 linux-3.19-vanilla + diff -uprN -X linux-3.19-vanilla/Documentation/dontdiff \ + linux-3.19-vanilla $MYSRC > /tmp/patch + +``dontdiff`` è una lista di file che sono generati durante il processo di +compilazione del kernel; questi dovrebbero essere ignorati in qualsiasi +patch generata con :manpage:`diff(1)`. + +Assicuratevi che la vostra patch non includa file che non ne fanno veramente +parte. Al fine di verificarne la correttezza, assicuratevi anche di +revisionare la vostra patch -dopo- averla generata con :manpage:`diff(1)`. + +Se le vostre modifiche producono molte differenze, allora dovrete dividerle +in patch indipendenti che modificano le cose in passi logici; leggete +:ref:`split_changes`. Questo faciliterà la revisione da parte degli altri +sviluppatori, il che è molto importante se volete che la patch venga accettata. + +Se state utilizzando ``git``, ``git rebase -i`` può aiutarvi nel procedimento. +Se non usate ``git``, un'alternativa popolare è ``quilt`` +. + +.. _it_describe_changes: + +2) Descrivete le vostre modifiche +--------------------------------- + +Descrivete il vostro problema. Esiste sempre un problema che via ha spinto +ha fare il vostro lavoro, che sia la correzione di un baco da una riga o una +nuova funzionalità da 5000 righe di codice. Convincete i revisori che vale +la pena risolvere il vostro problema e che ha senso continuare a leggere oltre +al primo paragrafo. + +Descrivete ciò che sarà visibile agli utenti. Chiari incidenti nel sistema +e blocchi sono abbastanza convincenti, ma non tutti i bachi sono così evidenti. +Anche se il problema è stato scoperto durante la revisione del codice, +descrivete l'impatto che questo avrà sugli utenti. Tenete presente che +la maggior parte delle installazioni Linux usa un kernel che arriva dai +sorgenti stabili o dai sorgenti di una distribuzione particolare che prende +singolarmente le patch dai sorgenti principali; quindi, includete tutte +le informazioni che possono essere utili a capire le vostre modifiche: +le circostanze che causano il problema, estratti da dmesg, descrizioni di +un incidente di sistema, prestazioni di una regressione, picchi di latenza, +blocchi, eccetera. + +Quantificare le ottimizzazioni e i compromessi. Se affermate di aver +migliorato le prestazioni, il consumo di memoria, l'impatto sollo stack, +o la dimensione del file binario, includete dei numeri a supporto della +vostra dichiarazione. Ma ricordatevi di descrivere anche eventuali costi +che non sono ovvi. Solitamente le ottimizzazioni non sono gratuite, ma sono +un compromesso fra l'uso di CPU, la memoria e la leggibilità; o, quando si +parla di ipotesi euristiche, fra differenti carichi. Descrivete i lati +negativi che vi aspettate dall'ottimizzazione cosicché i revisori possano +valutare i costi e i benefici. + +Una volta che il problema è chiaro, descrivete come lo risolvete andando +nel dettaglio tecnico. È molto importante che descriviate la modifica +in un inglese semplice cosicché i revisori possano verificare che il codice si +comporti come descritto. + +I manutentori vi saranno grati se scrivete la descrizione della patch in un +formato che sia compatibile con il gestore dei sorgenti usato dal kernel, +``git``, come un "commit log". Leggete :ref:`it_explicit_in_reply_to`. + +Risolvete solo un problema per patch. Se la vostra descrizione inizia ad +essere lunga, potrebbe essere un segno che la vostra patch necessita d'essere +divisa. Leggete :ref:`split_changes`. + +Quando inviate o rinviate una patch o una serie, includete la descrizione +completa delle modifiche e la loro giustificazione. Non limitatevi a dire che +questa è la versione N della patch (o serie). Non aspettatevi che i +manutentori di un sottosistema vadano a cercare le versioni precedenti per +cercare la descrizione da aggiungere. In pratica, la patch (o serie) e la sua +descrizione devono essere un'unica cosa. Questo aiuta i manutentori e i +revisori. Probabilmente, alcuni revisori non hanno nemmeno ricevuto o visto +le versioni precedenti della patch. + +Descrivete le vostro modifiche usando l'imperativo, per esempio "make xyzzy +do frotz" piuttosto che "[This patch] makes xyzzy do frotz" or "[I] changed +xyzzy to do frotz", come se steste dando ordini al codice di cambiare il suo +comportamento. + +Se la patch corregge un baco conosciuto, fare riferimento a quel baco inserendo +il suo numero o il suo URL. Se la patch è la conseguenza di una discussione +su una lista di discussione, allora fornite l'URL all'archivio di quella +discussione; usate i collegamenti a https://lkml.kernel.org/ con il +``Message-Id``, in questo modo vi assicurerete che il collegamento non diventi +invalido nel tempo. + +Tuttavia, cercate di rendere la vostra spiegazione comprensibile anche senza +far riferimento a fonti esterne. In aggiunta ai collegamenti a bachi e liste +di discussione, riassumente i punti più importanti della discussione che hanno +portato alla creazione della patch. + +Se volete far riferimento a uno specifico commit, non usate solo +l'identificativo SHA-1. Per cortesia, aggiungete anche la breve riga +riassuntiva del commit per rendere la chiaro ai revisori l'oggetto. +Per esempio:: + + Commit e21d2170f36602ae2708 ("video: remove unnecessary + platform_set_drvdata()") removed the unnecessary + platform_set_drvdata(), but left the variable "dev" unused, + delete it. + +Dovreste anche assicurarvi di usare almeno i primi 12 caratteri +dell'identificativo SHA-1. Il repositorio del kernel ha *molti* oggetti e +questo rende possibile la collisione fra due identificativi con pochi +caratteri. Tenete ben presente che anche se oggi non ci sono collisioni con il +vostro identificativo a 6 caratteri, potrebbero essercene fra 5 anni da oggi. + +Se la vostra patch corregge un baco in un commit specifico, per esempio avete +trovato un problema usando ``git bisect``, per favore usate l'etichetta +'Fixes:' indicando i primi 12 caratteri dell'identificativo SHA-1 seguiti +dalla riga riassuntiva. Per esempio:: + + Fixes: e21d2170f366 ("video: remove unnecessary platform_set_drvdata()") + +La seguente configurazione di ``git config`` può essere usata per formattare +i risultati dei comandi ``git log`` o ``git show`` come nell'esempio +precedente:: + + [core] + abbrev = 12 + [pretty] + fixes = Fixes: %h (\"%s\") + +.. _it_split_changes: + +3) Separate le vostre modifiche +------------------------------- + +Separate ogni **cambiamento logico** in patch distinte. + +Per esempio, se i vostri cambiamenti per un singolo driver includono +sia delle correzioni di bachi che miglioramenti alle prestazioni, +allora separateli in due o più patch. Se i vostri cambiamenti includono +un aggiornamento dell'API e un nuovo driver che lo sfrutta, allora separateli +in due patch. + +D'altro canto, se fate una singola modifica su più file, raggruppate tutte +queste modifiche in una singola patch. Dunque, un singolo cambiamento logico +è contenuto in una sola patch. + +Il punto da ricordare è che ogni modifica dovrebbe fare delle modifiche +che siano facilmente comprensibili e che possano essere verificate dai revisori. +Ogni patch dovrebbe essere giustificabile di per sé. + +Se al fine di ottenere un cambiamento completo una patch dipende da un'altra, +va bene. Semplicemente scrivete una nota nella descrizione della patch per +farlo presente: **"this patch depends on patch X"**. + +Quando dividete i vostri cambiamenti in una serie di patch, prestate +particolare attenzione alla verifica di ogni patch della serie; per ognuna +il kernel deve compilare ed essere eseguito correttamente. Gli sviluppatori +che usano ``git bisect`` per scovare i problemi potrebbero finire nel mezzo +della vostra serie in un punto qualsiasi; non vi saranno grati se nel mezzo +avete introdotto dei bachi. + +Se non potete condensare la vostra serie di patch in una più piccola, allora +pubblicatene una quindicina alla volta e aspettate che vengano revisionate +ed integrate. + + +4) Verificate lo stile delle vostre modifiche +--------------------------------------------- + +Controllate che la vostra patch non violi lo stile del codice, maggiori +dettagli sono disponibili in :ref:`Documentation/translations/it_IT/process/coding-style.rst `. +Non farlo porta semplicemente a una perdita di tempo da parte dei revisori e +voi vedrete la vostra patch rifiutata, probabilmente senza nemmeno essere stata +letta. + +Un'eccezione importante si ha quando del codice viene spostato da un file +ad un altro -- in questo caso non dovreste modificare il codice spostato +per nessun motivo, almeno non nella patch che lo sposta. Questo separa +chiaramente l'azione di spostare il codice e il vostro cambiamento. +Questo aiuta enormemente la revisione delle vere differenze e permette agli +strumenti di tenere meglio la traccia della storia del codice. + +Prima di inviare una patch, verificatene lo stile usando l'apposito +verificatore (scripts/checkpatch.pl). Da notare, comunque, che il verificator +di stile dovrebbe essere visto come una guida, non come un sostituto al +giudizio umano. Se il vostro codice è migliore nonostante una violazione +dello stile, probabilmente è meglio lasciarlo com'è. + +Il verificatore ha tre diversi livelli di severità: + - ERROR: le cose sono molto probabilmente sbagliate + - WARNING: le cose necessitano d'essere revisionate con attenzione + - CHECK: le cose necessitano di un pensierino + +Dovreste essere in grado di giustificare tutte le eventuali violazioni rimaste +nella vostra patch. + + +5) Selezionate i destinatari della vostra patch +----------------------------------------------- + +Dovreste sempre inviare una copia della patch ai manutentori dei sottosistemi +interessati dalle modifiche; date un'occhiata al file MAINTAINERS e alla storia +delle revisioni per scoprire chi si occupa del codice. Lo script +scripts/get_maintainer.pl può esservi d'aiuto. Se non riuscite a trovare un +manutentore per il sottosistema su cui state lavorando, allora Andrew Morton +(akpm@linux-foundation.org) sarà la vostra ultima possibilità. + +Normalmente, dovreste anche scegliere una lista di discussione a cui inviare +la vostra serie di patch. La lista di discussione linux-kernel@vger.kernel.org +è proprio l'ultima spiaggia, il volume di email su questa lista fa si che +diversi sviluppatori non la seguano. Guardate nel file MAINTAINERS per trovare +la lista di discussione dedicata ad un sottosistema; probabilmente lì la vostra +patch riceverà molta più attenzione. Tuttavia, per favore, non spammate le +liste di discussione che non sono interessate al vostro lavoro. + +Molte delle liste di discussione relative al kernel vengono ospitate su +vger.kernel.org; potete trovare un loro elenco alla pagina +http://vger.kernel.org/vger-lists.html. Tuttavia, ci sono altre liste di +discussione ospitate altrove. + +Non inviate più di 15 patch alla volta sulle liste di discussione vger!!! + +L'ultimo giudizio sull'integrazione delle modifiche accettate spetta a +Linux Torvalds. Il suo indirizzo e-mail è . +Riceve moltissime e-mail, e, a questo punto, solo poche patch passano +direttamente attraverso il suo giudizio; quindi, dovreste fare del vostro +meglio per -evitare di- inviargli e-mail. + +Se avete una patch che corregge un baco di sicurezza che potrebbe essere +sfruttato, inviatela a security@kernel.org. Per bachi importanti, un breve +embargo potrebbe essere preso in considerazione per dare il tempo alle +distribuzioni di prendere la patch e renderla disponibile ai loro utenti; +in questo caso, ovviamente, la patch non dovrebbe essere inviata su alcuna +lista di discussione pubblica. + +Patch che correggono bachi importanti su un kernel già rilasciato, dovrebbero +essere inviate ai manutentori dei kernel stabili aggiungendo la seguente riga:: + + Cc: stable@vger.kernel.org + +nella vostra patch, nell'area dedicata alle firme (notate, NON come destinatario +delle e-mail). In aggiunta a questo file, dovreste leggere anche +:ref:`Documentation/translations/it_IT/process/stable-kernel-rules.rst ` + +Tuttavia, notate, che alcuni manutentori di sottosistema preferiscono avere +l'ultima parola su quali patch dovrebbero essere aggiunte ai kernel stabili. +La rete di manutentori, in particolare, non vorrebbe vedere i singoli +sviluppatori aggiungere alle loro patch delle righe come quella sopracitata. + +Se le modifiche hanno effetti sull'interfaccia con lo spazio utente, per favore +inviate una patch per le pagine man ai manutentori di suddette pagine (elencati +nel file MAINTAINERS), o almeno una notifica circa la vostra modifica, +cosicché l'informazione possa trovare la sua strada nel manuale. Le modifiche +all'API dello spazio utente dovrebbero essere inviate in copia anche a +linux-api@vger.kernel.org. + +Per le piccole patch potreste aggiungere in CC l'indirizzo +*Trivial Patch Monkey trivial@kernel.org* che ha lo scopo di raccogliere +le patch "banali". Date uno sguardo al file MAINTAINERS per vedere chi +è l'attuale amministratore. + +Le patch banali devono rientrare in una delle seguenti categorie: + +- errori grammaticali nella documentazione +- errori grammaticali negli errori che potrebbero rompere :manpage:`grep(1)` +- correzione di avvisi di compilazione (riempirsi di avvisi inutili è negativo) +- correzione di errori di compilazione (solo se correggono qualcosa sul serio) +- rimozione di funzioni/macro deprecate +- sostituzione di codice non potabile con uno portabile (anche in codice + specifico per un'architettura, dato che le persone copiano, fintanto che + la modifica sia banale) +- qualsiasi modifica dell'autore/manutentore di un file (in pratica + "patch monkey" in modalità ritrasmissione) + + +6) Niente: MIME, links, compressione, allegati. Solo puro testo +---------------------------------------------------------------- + +Linus e gli altri sviluppatori del kernel devono poter commentare +le modifiche che sottomettete. Per uno sviluppatore è importante +essere in grado di "citare" le vostre modifiche, usando normali +programmi di posta elettronica, cosicché sia possibile commentare +una porzione specifica del vostro codice. + +Per questa ragione tutte le patch devono essere inviate via e-mail +come testo. .. warning:: - TODO ancora da tradurre + Se decidete di copiare ed incollare la patch nel corpo dell'e-mail, state + attenti che il vostro programma non corrompa il contenuto con andate + a capo automatiche. + +La patch non deve essere un allegato MIME, compresso o meno. Molti +dei più popolari programmi di posta elettronica non trasmettono un allegato +MIME come puro testo, e questo rende impossibile commentare il vostro codice. +Inoltre, un allegato MIME rende l'attività di Linus più laboriosa, diminuendo +così la possibilità che il vostro allegato-MIME venga accettato. + +Eccezione: se il vostro servizio di posta storpia le patch, allora qualcuno +potrebbe chiedervi di rinviarle come allegato MIME. + +Leggete :ref:`Documentation/translations/it_IT/process/email-clients.rst ` +per dei suggerimenti sulla configurazione del programmi di posta elettronica +per l'invio di patch intatte. + +7) Dimensione delle e-mail +-------------------------- + +Le grosse modifiche non sono adatte ad una lista di discussione, e nemmeno +per alcuni manutentori. Se la vostra patch, non compressa, eccede i 300 kB +di spazio, allora caricatela in una spazio accessibile su internet fornendo +l'URL (collegamento) ad essa. Ma notate che se la vostra patch eccede i 300 kB +è quasi certo che necessiti comunque di essere spezzettata. + +8) Rispondere ai commenti di revisione +-------------------------------------- + +Quasi certamente i revisori vi invieranno dei commenti su come migliorare +la vostra patch. Dovete rispondere a questi commenti; ignorare i revisori +è un ottimo modo per essere ignorati. Riscontri o domande che non conducono +ad una modifica del codice quasi certamente dovrebbero portare ad un commento +nel changelog cosicché il prossimo revisore potrà meglio comprendere cosa stia +accadendo. + +Assicuratevi di dire ai revisori quali cambiamenti state facendo e di +ringraziarli per il loro tempo. Revisionare codice è un lavoro faticoso e che +richiede molto tempo, e a volte i revisori diventano burberi. Tuttavia, anche +in questo caso, rispondete con educazione e concentratevi sul problema che +hanno evidenziato. + +9) Non scoraggiatevi - o impazientitevi +--------------------------------------- + +Dopo che avete inviato le vostre modifiche, siate pazienti e aspettate. +I revisori sono persone occupate e potrebbero non ricevere la vostra patch +immediatamente. + +Un tempo, le patch erano solite scomparire nel vuoto senza alcun commento, +ma ora il processo di sviluppo funziona meglio. Dovreste ricevere commenti +in una settimana o poco più; se questo non dovesse accadere, assicuratevi di +aver inviato le patch correttamente. Aspettate almeno una settimana prima di +rinviare le modifiche o sollecitare i revisori - probabilmente anche di più +durante la finestra d'integrazione. + +10) Aggiungete PATCH nell'oggetto +--------------------------------- + +Dato l'alto volume di e-mail per Linus, e la lista linux-kernel, è prassi +prefiggere il vostro oggetto con [PATCH]. Questo permette a Linus e agli +altri sviluppatori del kernel di distinguere facilmente le patch dalle altre +discussioni. + + +11) Firmate il vostro lavoro - Il certificato d'origine dello sviluppatore +-------------------------------------------------------------------------- + +Per migliorare la tracciabilità su "chi ha fatto cosa", specialmente per +quelle patch che per raggiungere lo stadio finale passano attraverso +diversi livelli di manutentori, abbiamo introdotto la procedura di "firma" +delle patch che vengono inviate per e-mail. + +La firma è una semplice riga alla fine della descrizione della patch che +certifica che l'avete scritta voi o che avete il diritto di pubblicarla +come patch open-source. Le regole sono abbastanza semplici: se potete +certificare quanto segue: + +Il certificato d'origine dello sviluppatore 1.1 +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Contribuendo a questo progetto, io certifico che: + + (a) Il contributo è stato creato interamente, o in parte, da me e che + ho il diritto di inviarlo in accordo con la licenza open-source + indicata nel file; oppure + + (b) Il contributo è basato su un lavoro precedente che, nei limiti + della mia conoscenza, è coperto da un'appropriata licenza + open-source che mi da il diritto di modificarlo e inviarlo, + le cui modifiche sono interamente o in parte mie, in accordo con + la licenza open-source (a meno che non abbia il permesso di usare + un'altra licenza) indicata nel file; oppure + + (c) Il contributo mi è stato fornito direttamente da qualcuno che + ha certificato (a), (b) o (c) e non l'ho modificata. + + (d) Capisco e concordo col fatto che questo progetto e i suoi + contributi sono pubblici e che un registro dei contributi (incluse + tutte le informazioni personali che invio con essi, inclusa la mia + firma) verrà mantenuto indefinitamente e che possa essere + ridistribuito in accordo con questo progetto o le licenze + open-source coinvolte. + +poi dovete solo aggiungere una riga che dice:: + + Signed-off-by: Random J Developer + +usando il vostro vero nome (spiacenti, non si accettano pseudonimi o +contributi anonimi). + +Alcune persone aggiungono delle etichette alla fine. Per ora queste verranno +ignorate, ma potete farlo per meglio identificare procedure aziendali interne o +per aggiungere dettagli circa la firma. + +Se siete un manutentore di un sottosistema o di un ramo, qualche volta dovrete +modificare leggermente le patch che avete ricevuto al fine di poterle +integrare; questo perché il codice non è esattamente lo stesso nei vostri +sorgenti e in quelli dei vostri contributori. Se rispettate rigidamente la +regola (c), dovreste chiedere al mittente di rifare la patch, ma questo è +controproducente e una totale perdita di tempo ed energia. La regola (b) +vi permette di correggere il codice, ma poi diventa davvero maleducato cambiare +la patch di qualcuno e addossargli la responsabilità per i vostri bachi. +Per risolvere questo problema dovreste aggiungere una riga, fra l'ultimo +Signed-off-by e il vostro, che spiega la vostra modifica. Nonostante non ci +sia nulla di obbligatorio, un modo efficace è quello di indicare il vostro +nome o indirizzo email fra parentesi quadre, seguito da una breve descrizione; +questo renderà abbastanza visibile chi è responsabile per le modifiche +dell'ultimo minuto. Per esempio:: + + Signed-off-by: Random J Developer + [lucky@maintainer.example.org: struct foo moved from foo.c to foo.h] + Signed-off-by: Lucky K Maintainer + +Questa pratica è particolarmente utile se siete i manutentori di un ramo +stabile ma al contempo volete dare credito agli autori, tracciare e integrare +le modifiche, e proteggere i mittenti dalle lamentele. Notate che in nessuna +circostanza è permessa la modifica dell'identità dell'autore (l'intestazione +From), dato che è quella che appare nei changelog. + +Un appunto speciale per chi porta il codice su vecchie versioni. Sembra che +sia comune l'utile pratica di inserire un'indicazione circa l'origine della +patch all'inizio del messaggio di commit (appena dopo la riga dell'oggetto) +al fine di migliorare la tracciabilità. Per esempio, questo è quello che si +vede nel rilascio stabile 3.x-stable:: + + Date: Tue Oct 7 07:26:38 2014 -0400 + + libata: Un-break ATA blacklist + + commit 1c40279960bcd7d52dbdf1d466b20d24b99176c8 upstream. + +E qui quello che potrebbe vedersi su un kernel più vecchio dove la patch è +stata applicata:: + + Date: Tue May 13 22:12:27 2008 +0200 + + wireless, airo: waitbusy() won't delay + + [backport of 2.6 commit b7acbdfbd1f277c1eb23f344f899cfa4cd0bf36a] + +Qualunque sia il formato, questa informazione fornisce un importante aiuto +alle persone che vogliono seguire i vostri sorgenti, e quelle che cercano +dei bachi. + + +12) Quando utilizzare Acked-by:, Cc:, e Co-developed-by: +-------------------------------------------------------- + +L'etichetta Signed-off-by: indica che il firmatario è stato coinvolto nello +sviluppo della patch, o che era nel suo percorso di consegna. + +Se una persona non è direttamente coinvolta con la preparazione o gestione +della patch ma desidera firmare e mettere agli atti la loro approvazione, +allora queste persone possono chiedere di aggiungere al changelog della patch +una riga Acked-by:. + +Acked-by: viene spesso utilizzato dai manutentori del sottosistema in oggetto +quando quello stesso manutentore non ha contribuito né trasmesso la patch. + +Acked-by: non è formale come Signed-off-by:. Questo indica che la persona ha +revisionato la patch e l'ha trovata accettabile. Per cui, a volte, chi +integra le patch convertirà un "sì, mi sembra che vada bene" in un Acked-by: +(ma tenete presente che solitamente è meglio chiedere esplicitamente). + +Acked-by: non indica l'accettazione di un'intera patch. Per esempio, quando +una patch ha effetti su diversi sottosistemi e ha un Acked-by: da un +manutentore di uno di questi, significa che il manutentore accetta quella +parte di codice relativa al sottosistema che mantiene. Qui dovremmo essere +giudiziosi. Quando si hanno dei dubbi si dovrebbe far riferimento alla +discussione originale negli archivi della lista di discussione. + +Se una persona ha avuto l'opportunità di commentare la patch, ma non lo ha +fatto, potete aggiungere l'etichetta ``Cc:`` alla patch. Questa è l'unica +etichetta che può essere aggiunta senza che la persona in questione faccia +alcunché - ma dovrebbe indicare che la persona ha ricevuto una copia della +patch. Questa etichetta documenta che terzi potenzialmente interessati sono +stati inclusi nella discussione. + +L'etichetta Co-developed-by: indica che la patch è stata scritta dall'autore in +collaborazione con un altro sviluppatore. Qualche volta questo è utile quando +più persone lavorano sulla stessa patch. Notate, questa persona deve avere +nella patch anche una riga Signed-off-by:. + + +13) Utilizzare Reported-by:, Tested-by:, Reviewed-by:, Suggested-by: e Fixes: +----------------------------------------------------------------------------- + +L'etichetta Reported-by da credito alle persone che trovano e riportano i bachi +e si spera che questo possa ispirarli ad aiutarci nuovamente in futuro. +Rammentate che se il baco è stato riportato in privato, dovrete chiedere il +permesso prima di poter utilizzare l'etichetta Reported-by. + +L'etichetta Tested-by: indica che la patch è stata verificata con successo +(su un qualche sistema) dalla persona citata. Questa etichetta informa i +manutentori che qualche verifica è stata fatta, fornisce un mezzo per trovare +persone che possano verificare il codice in futuro, e garantisce che queste +stesse persone ricevano credito per il loro lavoro. + +Reviewd-by:, invece, indica che la patch è stata revisionata ed è stata +considerata accettabile in accordo con la dichiarazione dei revisori: + +Dichiarazione di svista dei revisori +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Offrendo la mia etichetta Reviewed-by, dichiaro quanto segue: + + (a) Ho effettuato una revisione tecnica di questa patch per valutarne + l'adeguatezza ai fini dell'inclusione nel ramo principale del + kernel. + + (b) Tutti i problemi e le domande riguardanti la patch sono stati + comunicati al mittente. Sono soddisfatto dalle risposte + del mittente. + + (c) Nonostante ci potrebbero essere cose migliorabili in queste + sottomissione, credo che sia, in questo momento, (1) una modifica + di interesse per il kernel, e (2) libera da problemi che + potrebbero metterne in discussione l'integrazione. + + (d) Nonostante abbia revisionato la patch e creda che vada bene, + non garantisco (se non specificato altrimenti) che questa + otterrà quello che promette o funzionerà correttamente in tutte + le possibili situazioni. + +L'etichetta Reviewed-by è la dichiarazione di un parere sulla bontà di +una modifica che si ritiene appropriata e senza alcun problema tecnico +importante. Qualsiasi revisore interessato (quelli che lo hanno fatto) +possono offrire il proprio Reviewed-by per la patch. Questa etichetta serve +a dare credito ai revisori e a informare i manutentori sul livello di revisione +che è stato fatto sulla patch. L'etichetta Reviewd-by, quando fornita da +revisori conosciuti per la loro conoscenza sulla materia in oggetto e per la +loro serietà nella revisione, accrescerà le probabilità che la vostra patch +venga integrate nel kernel. + +L'etichetta Suggested-by: indica che l'idea della patch è stata suggerita +dalla persona nominata e le da credito. Tenete a mente che questa etichetta +non dovrebbe essere aggiunta senza un permesso esplicito, specialmente se +l'idea non è stata pubblicata in un forum pubblico. Detto ciò, dando credito +a chi ci fornisce delle idee, si spera di poterli ispirare ad aiutarci +nuovamente in futuro. + +L'etichetta Fixes: indica che la patch corregge un problema in un commit +precedente. Serve a chiarire l'origine di un baco, il che aiuta la revisione +del baco stesso. Questa etichetta è di aiuto anche per i manutentori dei +kernel stabili al fine di capire quale kernel deve ricevere la correzione. +Questo è il modo suggerito per indicare che un baco è stato corretto nella +patch. Per maggiori dettagli leggete :ref:`it_describe_changes` + + +14) Il formato canonico delle patch +----------------------------------- + +Questa sezione descrive il formato che dovrebbe essere usato per le patch. +Notate che se state usando un repositorio ``git`` per salvare le vostre patch +potere usare il comando ``git format-patch`` per ottenere patch nel formato +appropriato. Lo strumento non crea il testo necessario, per cui, leggete +le seguenti istruzioni. + +L'oggetto di una patch canonica è la riga:: + + Subject: [PATCH 001/123] subsystem: summary phrase + +Il corpo di una patch canonica contiene i seguenti elementi: + + - Una riga ``from`` che specifica l'autore della patch, seguita + da una riga vuota (necessaria soltanto se la persona che invia la + patch non ne è l'autore). + + - Il corpo della spiegazione, con linee non più lunghe di 75 caratteri, + che verrà copiato permanentemente nel changelog per descrivere la patch. + + - Una riga vuota + + - Le righe ``Signed-off-by:``, descritte in precedenza, che finiranno + anch'esse nel changelog. + + - Una linea di demarcazione contenente semplicemente ``---``. + + - Qualsiasi altro commento che non deve finire nel changelog. + + - Le effettive modifiche al codice (il prodotto di ``diff``). + +Il formato usato per l'oggetto permette ai programmi di posta di usarlo +per ordinare le patch alfabeticamente - tutti i programmi di posta hanno +questa funzionalità - dato che al numero sequenziale si antepongono degli zeri; +in questo modo l'ordine numerico ed alfabetico coincidono. + +Il ``subsystem`` nell'oggetto dell'email dovrebbe identificare l'area +o il sottosistema modificato dalla patch. + +La ``summary phrase`` nell'oggetto dell'email dovrebbe descrivere brevemente +il contenuto della patch. La ``summary phrase`` non dovrebbe essere un nome +di file. Non utilizzate la stessa ``summary phrase`` per tutte le patch in +una serie (dove una ``serie di patch`` è una sequenza ordinata di diverse +patch correlate). + +Ricordatevi che la ``summary phrase`` della vostra email diventerà un +identificatore globale ed unico per quella patch. Si propaga fino al +changelog ``git``. La ``summary phrase`` potrà essere usata in futuro +dagli sviluppatori per riferirsi a quella patch. Le persone vorranno +cercare la ``summary phrase`` su internet per leggere le discussioni che la +riguardano. Potrebbe anche essere l'unica cosa che le persone vedranno +quando, in due o tre mesi, riguarderanno centinaia di patch usando strumenti +come ``gitk`` o ``git log --oneline``. + +Per queste ragioni, dovrebbe essere lunga fra i 70 e i 75 caratteri, e deve +descrivere sia cosa viene modificato, sia il perché sia necessario. Essere +brevi e descrittivi è una bella sfida, ma questo è quello che fa un riassunto +ben scritto. + +La ``summary phrase`` può avere un'etichetta (*tag*) di prefisso racchiusa fra +le parentesi quadre "Subject: [PATCH ...] ". +Le etichette non verranno considerate come parte della frase riassuntiva, ma +indicano come la patch dovrebbe essere trattata. Fra le etichette più comuni +ci sono quelle di versione che vengono usate quando una patch è stata inviata +più volte (per esempio, "v1, v2, v3"); oppure "RFC" per indicare che si +attendono dei commenti (*Request For Comments*). Se ci sono quattro patch +nella serie, queste dovrebbero essere enumerate così: 1/4, 2/4, 3/4, 4/4. +Questo assicura che gli sviluppatori capiranno l'ordine in cui le patch +dovrebbero essere applicate, e per tracciare quelle che hanno revisionate o +che hanno applicato. + +Un paio di esempi di oggetti:: + + Subject: [PATCH 2/5] ext2: improve scalability of bitmap searching + Subject: [PATCH v2 01/27] x86: fix eflags tracking + +La riga ``from`` dev'essere la prima nel corpo del messaggio ed è nel +formato: + + From: Original Author + +La riga ``from`` indica chi verrà accreditato nel changelog permanente come +l'autore della patch. Se la riga ``from`` è mancante, allora per determinare +l'autore da inserire nel changelog verrà usata la riga ``From`` +nell'intestazione dell'email. + +Il corpo della spiegazione verrà incluso nel changelog permanente, per cui +deve aver senso per un lettore esperto che è ha dimenticato i dettagli della +discussione che hanno portato alla patch. L'inclusione di informazioni +sui problemi oggetto dalla patch (messaggi del kernel, messaggi di oops, +eccetera) è particolarmente utile per le persone che potrebbero cercare fra +i messaggi di log per la patch che li tratta. Se la patch corregge un errore +di compilazione, non sarà necessario includere proprio _tutto_ quello che +è uscito dal compilatore; aggiungete solo quello che è necessario per far si +che la vostra patch venga trovata. Come nella ``summary phrase``, è importante +essere sia brevi che descrittivi. + +La linea di demarcazione ``---`` serve essenzialmente a segnare dove finisce +il messaggio di changelog. + +Aggiungere il ``diffstat`` dopo ``---`` è un buon uso di questo spazio, per +mostrare i file che sono cambiati, e il numero di file aggiunto o rimossi. +Un ``diffstat`` è particolarmente utile per le patch grandi. Altri commenti +che sono importanti solo per i manutentori, quindi inadatti al changelog +permanente, dovrebbero essere messi qui. Un buon esempio per questo tipo +di commenti potrebbe essere quello di descrivere le differenze fra le versioni +della patch. + +Se includete un ``diffstat`` dopo ``---``, usate le opzioni ``-p 1 -w70`` +cosicché i nomi dei file elencati non occupino troppo spazio (facilmente +rientreranno negli 80 caratteri, magari con qualche indentazione). +(``git`` genera di base dei diffstat adatti). + +Maggiori dettagli sul formato delle patch nei riferimenti qui di seguito. + +.. _it_explicit_in_reply_to: + +15) Usare esplicitamente In-Reply-To nell'intestazione +------------------------------------------------------ + +Aggiungere manualmente In-Reply-To: nell'intestazione dell'e-mail +potrebbe essere d'aiuto per associare una patch ad una discussione +precedente, per esempio per collegare la correzione di un baco con l'e-mail +che lo riportava. Tuttavia, per serie di patch multiple è generalmente +sconsigliato l'uso di In-Reply-To: per collegare precedenti versioni. +In questo modo versioni multiple di una patch non diventeranno un'ingestibile +giungla di riferimenti all'interno dei programmi di posta. Se un collegamento +è utile, potete usare https://lkml.kernel.org/ per ottenere i collegamenti +ad una versione precedente di una serie di patch (per esempio, potete usarlo +per l'email introduttiva alla serie). + +16) Inviare richieste ``git pull`` +---------------------------------- + +Se avete una serie di patch, potrebbe essere più conveniente per un manutentore +tirarle dentro al repositorio del sottosistema attraverso l'operazione +``git pull``. Comunque, tenete presente che prendere patch da uno sviluppatore +in questo modo richiede un livello di fiducia più alto rispetto a prenderle da +una lista di discussione. Di conseguenza, molti manutentori sono riluttanti +ad accettare richieste di *pull*, specialmente dagli sviluppatori nuovi e +quindi sconosciuti. Se siete in dubbio, potete fare una richiesta di *pull* +come messaggio introduttivo ad una normale pubblicazione di patch, così +il manutentore avrà la possibilità di scegliere come integrarle. + +Una richiesta di *pull* dovrebbe avere nell'oggetto [GIT] o [PULL]. +La richiesta stessa dovrebbe includere il nome del repositorio e quello del +ramo su una singola riga; dovrebbe essere più o meno così:: + + Please pull from + + git://jdelvare.pck.nerim.net/jdelvare-2.6 i2c-for-linus + + to get these changes: + +Una richiesta di *pull* dovrebbe includere anche un messaggio generico +che dica cos'è incluso, una lista delle patch usando ``git shortlog``, e una +panoramica sugli effetti della serie di patch con ``diffstat``. Il modo più +semplice per ottenere tutte queste informazioni è, ovviamente, quello di +lasciar fare tutto a ``git`` con il comando ``git request-pull``. + +Alcuni manutentori (incluso Linus) vogliono vedere le richieste di *pull* +da commit firmati con GPG; questo fornisce una maggiore garanzia sul fatto +che siate stati proprio voi a fare la richiesta. In assenza di tale etichetta +firmata Linus, in particolare, non prenderà alcuna patch da siti pubblici come +GitHub. + +Il primo passo verso la creazione di questa etichetta firmata è quello di +creare una chiave GNUPG ed averla fatta firmare da uno o più sviluppatori +principali del kernel. Questo potrebbe essere difficile per i nuovi +sviluppatori, ma non ci sono altre vie. Andare alle conferenze potrebbe +essere un buon modo per trovare sviluppatori che possano firmare la vostra +chiave. + +Una volta che avete preparato la vostra serie di patch in ``git``, e volete che +qualcuno le prenda, create una etichetta firmata col comando ``git tag -s``. +Questo creerà una nuova etichetta che identifica l'ultimo commit della serie +contenente una firma creata con la vostra chiave privata. Avrete anche +l'opportunità di aggiungere un messaggio di changelog all'etichetta; questo è +il posto ideale per descrivere gli effetti della richiesta di *pull*. + +Se i sorgenti da cui il manutentore prenderà le patch non sono gli stessi del +repositorio su cui state lavorando, allora non dimenticatevi di caricare +l'etichetta firmata anche sui sorgenti pubblici. + +Quando generate una richiesta di *pull*, usate l'etichetta firmata come +obiettivo. Un comando come il seguente farà il suo dovere:: + + git request-pull master git://my.public.tree/linux.git my-signed-tag + + +Riferimenti +----------- + +Andrew Morton, "La patch perfetta" (tpp). + + +Jeff Garzik, "Formato per la sottomissione di patch per il kernel Linux" + + +Greg Kroah-Hartman, "Come scocciare un manutentore di un sottosistema" + + + + + + + + + + + + +No!!!! Basta gigantesche bombe patch alle persone sulla lista linux-kernel@vger.kernel.org! + + +Kernel Documentation/translations/it_IT/process/coding-style.rst: + :ref:`Documentation/translations/it_IT/process/coding-style.rst ` + +E-mail di Linus Torvalds sul formato canonico di una patch: + + +Andi Kleen, "Su come sottomettere patch del kernel" + Alcune strategie su come sottomettere modifiche toste o controverse. + + http://halobates.de/on-submitting-patches.pdf diff --git a/Documentation/translations/ja_JP/howto.rst b/Documentation/translations/ja_JP/howto.rst index f3116381c26b..2621b770a745 100644 --- a/Documentation/translations/ja_JP/howto.rst +++ b/Documentation/translations/ja_JP/howto.rst @@ -245,7 +245,7 @@ Linux カーネルソースツリーの中に含まれる、きれいにし、 できます。この最新の素晴しいカーネルコードのリポジトリは以下で見つかり ます - - http://lxr.free-electrons.com/ + https://elixir.bootlin.com/ 開発プロセス ------------ @@ -256,7 +256,6 @@ Linux カーネルの開発プロセスは現在幾つかの異なるメイン - メインの 4.x カーネルツリー - 4.x.y -stable カーネルツリー - - 4.x -git カーネルパッチ - サブシステム毎のカーネルツリーとパッチ - 統合テストのための 4.x -next カーネルツリー @@ -319,15 +318,6 @@ Documentation/process/stable-kernel-rules.rst ファイルにはどのような 類の変更が -stable ツリーに受け入れ可能か、またリリースプロセスがどう 動くかが記述されています。 -4.x -git パッチ -~~~~~~~~~~~~~~~ - -git リポジトリで管理されているLinus のカーネルツリーの毎日のスナップ -ショットがあります。(だから -git という名前がついています)。これらのパッ -チはおおむね毎日リリースされており、Linus のツリーの現状を表します。こ -れは -rc カーネルと比べて、パッチが大丈夫かどうかも確認しないで自動的 -に生成されるので、より実験的です。 - サブシステム毎のカーネルツリーとパッチ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/Documentation/translations/ko_KR/howto.rst b/Documentation/translations/ko_KR/howto.rst index a8197e072599..bcd63731b80a 100644 --- a/Documentation/translations/ko_KR/howto.rst +++ b/Documentation/translations/ko_KR/howto.rst @@ -77,10 +77,12 @@ Documentation/process/howto.rst 리눅스 커널 소스 코드는 GPL로 배포(release)되었다. 소스트리의 메인 디렉토리에 있는 라이센스에 관하여 상세하게 쓰여 있는 COPYING이라는 -파일을 봐라. 여러분이 라이센스에 관한 더 깊은 문제를 가지고 있다면 -리눅스 커널 메일링 리스트에 묻지말고 변호사와 연락하라. 메일링 -리스트들에 있는 사람들은 변호사가 아니기 때문에 법적 문제에 관하여 -그들의 말에 의지해서는 안된다. +파일을 봐라. 리눅스 커널 라이센싱 규칙과 소스 코드 안의 `SPDX +`_ 식별자 사용법은 +:ref:`Documentation/process/license-rules.rst ` 에 설명되어 +있다. 여러분이 라이센스에 관한 더 깊은 문제를 가지고 있다면 리눅스 커널 메일링 +리스트에 묻지말고 변호사와 연락하라. 메일링 리스트들에 있는 사람들은 변호사가 +아니기 때문에 법적 문제에 관하여 그들의 말에 의지해서는 안된다. GPL에 관한 잦은 질문들과 답변들은 다음을 참조하라. @@ -99,7 +101,7 @@ mtk.manpages@gmail.com의 메인테이너에게 보낼 것을 권장한다. 다음은 커널 소스 트리에 있는 읽어야 할 파일들의 리스트이다. - README + :ref:`Documentation/admin-guide/README.rst ` 이 파일은 리눅스 커널에 관하여 간단한 ë°°ê²½ 설명과 커널을 설정하고 빌드하기 위해 필요한 것을 설명한다. 커널에 입문하는 사람들은 여기서 시작해야 한다. @@ -220,13 +222,6 @@ ReST 마크업을 사용하는 문서들은 Documentation/output 에 생성된 가지고 있지 않다면 다음에 무엇을 해야할지에 관한 방향을 배울 수 있을 것이다. -여러분들이 이미 커널 트리에 반영하길 원하는 코드 묶음을 가지고 있지만 -올바른 포맷으로 포장하는데 도움이 필요하다면 그러한 문제를 돕기 위해 -만들어진 kernel-mentors 프로젝트가 있다. 그곳은 메일링 리스트이며 -다음에서 참조할 수 있다. - - https://selenic.com/mailman/listinfo/kernel-mentors - 리눅스 커널 코드에 실제 변경을 하기 전에 반드시 ê·¸ 코드가 어떻게 동작하는지 이해하고 있어야 한다. 코드를 분석하기 위하여 특정한 툴의 도움을 빌려서라도 코드를 직접 읽는 것보다 좋은 것은 없다(대부분의 @@ -235,7 +230,7 @@ ReST 마크업을 사용하는 문서들은 Documentation/output 에 생성된 소스코드를 인덱스된 웹 페이지들의 형태로 보여준다. 최신의 멋진 커널 코드 저장소는 다음을 통하여 참조할 수 있다. - http://lxr.free-electrons.com/ + https://elixir.bootlin.com/ 개발 프로세스 @@ -247,7 +242,6 @@ ReST 마크업을 사용하는 문서들은 Documentation/output 에 생성된 - main 4.x 커널 트리 - 4.x.y - 안정된 커널 트리 - - 4.x -git 커널 패치들 - 서브시스템을 위한 커널 트리들과 패치들 - 4.x - 통합 테스트를 위한 next 커널 트리 @@ -303,17 +297,9 @@ Andrew Morton의 글이 있다. 4.x.y는 "stable" 팀에 의해 관리되며 거의 매번 격주로 배포된다. -커널 트리 문서들 내에 Documentation/process/stable-kernel-rules.rst 파일은 어떤 -종류의 변경들이 -stable 트리로 들어왔는지와 배포 프로세스가 어떻게 -진행되는지를 설명한다. - -4.x -git 패치들 -~~~~~~~~~~~~~~~ - -git 저장소(그러므로 -git이라는 이름이 붙음)에는 날마다 관리되는 Linus의 -커널 트리의 snapshot 들이 있다. 이 패치들은 일반적으로 날마다 배포되며 -Linus의 트리의 현재 상태를 나타낸다. 이 패치들은 정상적인지 조금도 -살펴보지 않고 자동적으로 생성된 것이므로 -rc 커널들 보다도 더 실험적이다. +커널 트리 문서들 내의 :ref:`Documentation/process/stable-kernel-rules.rst ` +파일은 어떤 종류의 변경들이 -stable 트리로 들어왔는지와 +배포 프로세스가 어떻게 진행되는지를 설명한다. 서브시스템 커널 트리들과 패치들 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -360,9 +346,10 @@ https://bugzilla.kernel.org 는 리눅스 커널 개발자들이 커널의 버 https://bugzilla.kernel.org/page.cgi?id=faq.html -메인 커널 소스 디렉토리에 있는 admin-guide/reporting-bugs.rst 파일은 커널 버그라고 생각되는 -것을 보고하는 방법에 관한 좋은 템플릿이며 문제를 추적하기 위해서 커널 -개발자들이 필요로 하는 정보가 무엇들인지를 상세히 설명하고 있다. +메인 커널 소스 디렉토리에 있는 :ref:`admin-guide/reporting-bugs.rst ` +파일은 커널 버그라고 생각되는 것을 보고하는 방법에 관한 좋은 템플릿이며 문제를 +추적하기 위해서 커널 개발자들이 필요로 하는 정보가 무엇들인지를 상세히 설명하고 +있다. 버그 리포트들의 관리 @@ -377,15 +364,7 @@ https://bugzilla.kernel.org 는 리눅스 커널 개발자들이 커널의 버 다른 사람들의 버그들을 수정하기 위하여 시간을 낭비하지 않기 때문이다. 이미 보고된 버그 리포트들을 가지고 작업하기 위해서 https://bugzilla.kernel.org -를 참조하라. 여러분이 앞으로 생겨날 버그 리포트들의 조언자가 되길 원한다면 -bugme-new 메일링 리스트나(새로운 버그 리포트들만이 이곳에서 메일로 전해진다) -bugme-janitor 메일링 리스트(bugzilla에 모든 변화들이 여기서 메일로 전해진다) -에 등록하면 된다. - - https://lists.linux-foundation.org/mailman/listinfo/bugme-new - - https://lists.linux-foundation.org/mailman/listinfo/bugme-janitors - +를 참조하라. 메일링 리스트들 @@ -430,7 +409,8 @@ bugme-janitor 메일링 리스트(bugzilla에 모든 변화들이 여기서 메 "John 커널해커는 작성했다...."를 유지하며 여러분들의 의견을 ê·¸ 메일의 윗부분에 작성하지 말고 각 인용한 단락들 사이에 넣어라. -여러분들이 패치들을 메일에 넣는다면 그것들은 Documentation/process/submitting-patches.rst에 +여러분들이 패치들을 메일에 넣는다면 그것들은 +:ref:`Documentation/process/submitting-patches.rst ` 에 나와있는데로 명백히(plain) 읽을 수 있는 텍스트여야 한다. 커널 개발자들은 첨부파일이나 압축된 패치들을 원하지 않는다. 그들은 여러분들의 패치의 각 라인 단위로 코멘트를 하길 원하며 압축하거나 첨부하지 않고 보내는 것이 diff --git a/Documentation/translations/zh_CN/HOWTO b/Documentation/translations/zh_CN/HOWTO index 5f6d09edc9ac..7a00a8a4eb15 100644 --- a/Documentation/translations/zh_CN/HOWTO +++ b/Documentation/translations/zh_CN/HOWTO @@ -192,7 +192,6 @@ Linux内核代码中包含有大量的文档。这些文档对于学习如何与 些分支包括: - 2.6.x主内核源码树 - 2.6.x.y -stable内核源码树 - - 2.6.x -git内核补丁集 - 2.6.x -mm内核补丁集 - 子系统相关的内核源码树和补丁集 @@ -240,14 +239,6 @@ kernel.org网站的pub/linux/kernel/v2.6/目录下找到它。它的开发遵循 版内核接受的修改类型以及发布的流程。 -2.6.x -git补丁集 ----------------- -Linus的内核源码树的每日快照,这个源码树是由git工具管理的(由此得名)。这 -些补丁通常每天更新以反映Linus的源码树的最新状态。它们比-rc版本的内核源码 -树更具试验性质,因为这个补丁集是全自动生成的,没有任何人来确认其是否真正 -健全。 - - 2.6.x -mm补丁集 --------------- 这是由Andrew Morton维护的试验性内核补丁集。Andrew将所有子系统的内核源码 diff --git a/Documentation/translations/zh_CN/coding-style.rst b/Documentation/translations/zh_CN/coding-style.rst index 1466aa64b8b4..3cb09803e084 100644 --- a/Documentation/translations/zh_CN/coding-style.rst +++ b/Documentation/translations/zh_CN/coding-style.rst @@ -535,26 +535,43 @@ Documentation/doc-guide/ 和 scripts/kernel-doc 以获得详细信息。 (* (max steps 1) c-basic-offset))) - (add-hook 'c-mode-common-hook - (lambda () - ;; Add kernel style - (c-add-style - "linux-tabs-only" - '("linux" (c-offsets-alist - (arglist-cont-nonempty - c-lineup-gcc-asm-reg - c-lineup-arglist-tabs-only)))))) - - (add-hook 'c-mode-hook - (lambda () - (let ((filename (buffer-file-name))) - ;; Enable kernel mode for the appropriate files - (when (and filename - (string-match (expand-file-name "~/src/linux-trees") - filename)) - (setq indent-tabs-mode t) - (setq show-trailing-whitespace t) - (c-set-style "linux-tabs-only"))))) + (dir-locals-set-class-variables + 'linux-kernel + '((c-mode . ( + (c-basic-offset . 8) + (c-label-minimum-indentation . 0) + (c-offsets-alist . ( + (arglist-close . c-lineup-arglist-tabs-only) + (arglist-cont-nonempty . + (c-lineup-gcc-asm-reg c-lineup-arglist-tabs-only)) + (arglist-intro . +) + (brace-list-intro . +) + (c . c-lineup-C-comments) + (case-label . 0) + (comment-intro . c-lineup-comment) + (cpp-define-intro . +) + (cpp-macro . -1000) + (cpp-macro-cont . +) + (defun-block-intro . +) + (else-clause . 0) + (func-decl-cont . +) + (inclass . +) + (inher-cont . c-lineup-multi-inher) + (knr-argdecl-intro . 0) + (label . -1000) + (statement . 0) + (statement-block-intro . +) + (statement-case-intro . +) + (statement-cont . +) + (substatement . +) + )) + (indent-tabs-mode . t) + (show-trailing-whitespace . t) + )))) + + (dir-locals-set-directory-class + (expand-file-name "~/src/linux-trees") + 'linux-kernel) 这会让 emacs 在 ``~/src/linux-trees`` 下的 C 源文件获得更好的内核代码风格。 diff --git a/Documentation/usb/authorization.txt b/Documentation/usb/authorization.txt index f901ec77439c..9dd1dc7b1009 100644 --- a/Documentation/usb/authorization.txt +++ b/Documentation/usb/authorization.txt @@ -34,7 +34,9 @@ $ echo 1 > /sys/bus/usb/devices/usbX/authorized_default By default, Wired USB devices are authorized by default to connect. Wireless USB hosts deauthorize by default all new connected devices (this is so because we need to do an authentication phase -before authorizing). +before authorizing). Writing "2" to the authorized_default attribute +causes kernel to only authorize by default devices connected to internal +USB ports. Example system lockdown (lame) diff --git a/Documentation/userspace-api/spec_ctrl.rst b/Documentation/userspace-api/spec_ctrl.rst index c4dbe6f7cdae..1129c7550a48 100644 --- a/Documentation/userspace-api/spec_ctrl.rst +++ b/Documentation/userspace-api/spec_ctrl.rst @@ -28,18 +28,20 @@ PR_GET_SPECULATION_CTRL returns the state of the speculation misfeature which is selected with arg2 of prctl(2). The return value uses bits 0-3 with the following meaning: -==== ===================== =================================================== -Bit Define Description -==== ===================== =================================================== -0 PR_SPEC_PRCTL Mitigation can be controlled per task by - PR_SET_SPECULATION_CTRL. -1 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is - disabled. -2 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is - enabled. -3 PR_SPEC_FORCE_DISABLE Same as PR_SPEC_DISABLE, but cannot be undone. A - subsequent prctl(..., PR_SPEC_ENABLE) will fail. -==== ===================== =================================================== +==== ====================== ================================================== +Bit Define Description +==== ====================== ================================================== +0 PR_SPEC_PRCTL Mitigation can be controlled per task by + PR_SET_SPECULATION_CTRL. +1 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is + disabled. +2 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is + enabled. +3 PR_SPEC_FORCE_DISABLE Same as PR_SPEC_DISABLE, but cannot be undone. A + subsequent prctl(..., PR_SPEC_ENABLE) will fail. +4 PR_SPEC_DISABLE_NOEXEC Same as PR_SPEC_DISABLE, but the state will be + cleared on :manpage:`execve(2)`. +==== ====================== ================================================== If all bits are 0 the CPU is not affected by the speculation misfeature. @@ -92,6 +94,7 @@ Speculation misfeature controls * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0); * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0); * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_FORCE_DISABLE, 0, 0); + * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE_NOEXEC, 0, 0); - PR_SPEC_INDIR_BRANCH: Indirect Branch Speculation in User Processes (Mitigate Spectre V2 style attacks against user processes) diff --git a/Documentation/virtual/kvm/s390-diag.txt b/Documentation/virtual/kvm/s390-diag.txt index 48c4921794ed..7c52e5f8b210 100644 --- a/Documentation/virtual/kvm/s390-diag.txt +++ b/Documentation/virtual/kvm/s390-diag.txt @@ -68,7 +68,8 @@ Subcode 3 - virtio-ccw notification identifier, it is ignored. After completion of the DIAGNOSE call, general register 2 may contain - a 64bit identifier (in the kvm_io_bus cookie case). + a 64bit identifier (in the kvm_io_bus cookie case), or a negative + error value, if an internal error occurred. See also the virtio standard for a discussion of this hypercall. diff --git a/Documentation/vm/index.rst b/Documentation/vm/index.rst index 2b3ab3a1ccf3..b58cc3bfe777 100644 --- a/Documentation/vm/index.rst +++ b/Documentation/vm/index.rst @@ -4,7 +4,7 @@ Linux Memory Management Documentation This is a collection of documents about the Linux memory management (mm) subsystem. If you are looking for advice on simply allocating memory, -see the :ref:`memory-allocation`. +see the :ref:`memory_allocation`. User guides for MM features =========================== diff --git a/Documentation/vm/slub.rst b/Documentation/vm/slub.rst index 195928808bac..933ada4368ff 100644 --- a/Documentation/vm/slub.rst +++ b/Documentation/vm/slub.rst @@ -66,7 +66,7 @@ Trying to find an issue in the dentry cache? Try:: to only enable debugging on the dentry cache. You may use an asterisk at the end of the slab name, in order to cover all slabs with the same prefix. For example, here's how you can poison the dentry cache as well as all kmalloc -slabs: +slabs:: slub_debug=P,kmalloc-*,dentry @@ -141,7 +141,7 @@ can be influenced by kernel parameters: (list_lock) where contention may occur. ``slub_min_order`` - specifies a minim order of slabs. A similar effect like + specifies a minimum order of slabs. A similar effect like ``slub_min_objects``. ``slub_max_order`` diff --git a/Documentation/watchdog/mlx-wdt.txt b/Documentation/watchdog/mlx-wdt.txt new file mode 100644 index 000000000000..66eeb78505c3 --- /dev/null +++ b/Documentation/watchdog/mlx-wdt.txt @@ -0,0 +1,52 @@ + Mellanox watchdog drivers + for x86 based system switches + +This driver provides watchdog functionality for various Mellanox +Ethernet and Infiniband switch systems. + +Mellanox watchdog device is implemented in a programmable logic device. + +There are 2 types of HW watchdog implementations. + +Type 1: +Actual HW timeout can be defined as a power of 2 msec. +e.g. timeout 20 sec will be rounded up to 32768 msec. +The maximum timeout period is 32 sec (32768 msec.), +Get time-left isn't supported + +Type 2: +Actual HW timeout is defined in sec. and it's the same as +a user-defined timeout. +Maximum timeout is 255 sec. +Get time-left is supported. + +Type 1 HW watchdog implementation exist in old systems and +all new systems have type 2 HW watchdog. +Two types of HW implementation have also different register map. + +Mellanox system can have 2 watchdogs: main and auxiliary. +Main and auxiliary watchdog devices can be enabled together +on the same system. +There are several actions that can be defined in the watchdog: +system reset, start fans on full speed and increase register counter. +The last 2 actions are performed without a system reset. +Actions without reset are provided for auxiliary watchdog device, +which is optional. +Watchdog can be started during a probe, in this case it will be +pinged by watchdog core before watchdog device will be opened by +user space application. +Watchdog can be initialised in nowayout way, i.e. oncse started +it can't be stopped. + +This mlx-wdt driver supports both HW watchdog implementations. + +Watchdog driver is probed from the common mlx_platform driver. +Mlx_platform driver provides an appropriate set of registers for +Mellanox watchdog device, identity name (mlx-wdt-main or mlx-wdt-aux), +initial timeout, performed action in expiration and configuration flags. +watchdog configuration flags: nowayout and start_at_boot, hw watchdog +version - type1 or type2. +The driver checks during initialization if the previous system reset +was done by the watchdog. If yes, it makes a notification about this event. + +Access to HW registers is performed through a generic regmap interface. diff --git a/Documentation/x86/resctrl_ui.txt b/Documentation/x86/resctrl_ui.txt index e8e8d14d3c4e..c1f95b59e14d 100644 --- a/Documentation/x86/resctrl_ui.txt +++ b/Documentation/x86/resctrl_ui.txt @@ -9,7 +9,7 @@ Fenghua Yu Tony Luck Vikas Shivappa -This feature is enabled by the CONFIG_X86_RESCTRL and the x86 /proc/cpuinfo +This feature is enabled by the CONFIG_X86_CPU_RESCTRL and the x86 /proc/cpuinfo flag bits: RDT (Resource Director Technology) Allocation - "rdt_a" CAT (Cache Allocation Technology) - "cat_l3", "cat_l2" diff --git a/Documentation/xtensa/booting.txt b/Documentation/xtensa/booting.txt new file mode 100644 index 000000000000..402b33a2619f --- /dev/null +++ b/Documentation/xtensa/booting.txt @@ -0,0 +1,19 @@ +Passing boot parameters to the kernel. + +Boot parameters are represented as a TLV list in the memory. Please see +arch/xtensa/include/asm/bootparam.h for definition of the bp_tag structure and +tag value constants. First entry in the list must have type BP_TAG_FIRST, last +entry must have type BP_TAG_LAST. The address of the first list entry is +passed to the kernel in the register a2. The address type depends on MMU type: +- For configurations without MMU, with region protection or with MPU the + address must be the physical address. +- For configurations with region translarion MMU or with MMUv3 and CONFIG_MMU=n + the address must be a valid address in the current mapping. The kernel will + not change the mapping on its own. +- For configurations with MMUv2 the address must be a virtual address in the + default virtual mapping (0xd0000000..0xffffffff). +- For configurations with MMUv3 and CONFIG_MMU=y the address may be either a + virtual or physical address. In either case it must be within the default + virtual mapping. It is considered physical if it is within the range of + physical addresses covered by the default KSEG mapping (XCHAL_KSEG_PADDR.. + XCHAL_KSEG_PADDR + XCHAL_KSEG_SIZE), otherwise it is considered virtual. diff --git a/Kbuild b/Kbuild index 65db5bef2e36..8637fd14135f 100644 --- a/Kbuild +++ b/Kbuild @@ -1,15 +1,9 @@ # SPDX-License-Identifier: GPL-2.0 # # Kbuild for top-level directory of the kernel -# This file takes care of the following: -# 1) Generate bounds.h -# 2) Generate timeconst.h -# 3) Generate asm-offsets.h (may need bounds.h and timeconst.h) -# 4) Check for missing system calls -# 5) Generate constants.py (may need bounds.h) ##### -# 1) Generate bounds.h +# Generate bounds.h bounds-file := include/generated/bounds.h @@ -20,7 +14,7 @@ $(bounds-file): kernel/bounds.s FORCE $(call filechk,offsets,__LINUX_BOUNDS_H__) ##### -# 2) Generate timeconst.h +# Generate timeconst.h timeconst-file := include/generated/timeconst.h @@ -32,8 +26,7 @@ $(timeconst-file): kernel/time/timeconst.bc FORCE $(call filechk,gentimeconst) ##### -# 3) Generate asm-offsets.h -# +# Generate asm-offsets.h offsets-file := include/generated/asm-offsets.h @@ -46,8 +39,7 @@ $(offsets-file): arch/$(SRCARCH)/kernel/asm-offsets.s FORCE $(call filechk,offsets,__ASM_OFFSETS_H__) ##### -# 4) Check for missing system calls -# +# Check for missing system calls always += missing-syscalls targets += missing-syscalls @@ -59,13 +51,16 @@ missing-syscalls: scripts/checksyscalls.sh $(offsets-file) FORCE $(call cmd,syscalls) ##### -# 5) Generate constants for Python GDB integration -# +# Check atomic headers are up-to-date + +always += old-atomics +targets += old-atomics -extra-$(CONFIG_GDB_SCRIPTS) += build_constants_py +quiet_cmd_atomics = CALL $< + cmd_atomics = $(CONFIG_SHELL) $< -build_constants_py: $(timeconst-file) $(bounds-file) - @$(MAKE) $(build)=scripts/gdb/linux $@ +old-atomics: scripts/atomic/check-atomics.sh FORCE + $(call cmd,atomics) # Keep these three files during make clean no-clean-files := $(bounds-file) $(offsets-file) $(timeconst-file) diff --git a/LICENSES/exceptions/GCC-exception-2.0 b/LICENSES/exceptions/GCC-exception-2.0 new file mode 100644 index 000000000000..422914a88c36 --- /dev/null +++ b/LICENSES/exceptions/GCC-exception-2.0 @@ -0,0 +1,18 @@ +SPDX-Exception-Identifier: GCC-exception-2.0 +SPDX-URL: https://spdx.org/licenses/GCC-exception-2.0.html +SPDX-Licenses: GPL-2.0, GPL-2.0+, GPL-2.0-only, GPL-2.0-or-later +Usage-Guide: + This exception is used together with one of the above SPDX-Licenses to + allow linking the compiled version of code to non GPL compliant code. + To use this exception add it with the keyword WITH to one of the + identifiers in the SPDX-Licenses tag: + SPDX-License-Identifier: WITH GCC-exception-2.0 +License-Text: + +In addition to the permissions in the GNU Library General Public License, +the Free Software Foundation gives you unlimited permission to link the +compiled version of this file into combinations with other programs, and to +distribute those programs without any restriction coming from the use of +this file. (The General Public License restrictions do apply in other +respects; for example, they cover modification of the file, and +distribution when not linked into another program.) diff --git a/MAINTAINERS b/MAINTAINERS index 9f64f8d3740e..f835946d78ce 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -331,6 +331,7 @@ ACPI APEI M: "Rafael J. Wysocki" M: Len Brown L: linux-acpi@vger.kernel.org +R: James Morse R: Tony Luck R: Borislav Petkov F: drivers/acpi/apei/ @@ -365,6 +366,7 @@ M: Lorenzo Pieralisi M: Hanjun Guo M: Sudeep Holla L: linux-acpi@vger.kernel.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: drivers/acpi/arm64 @@ -409,8 +411,7 @@ F: drivers/platform/x86/wmi.c F: include/uapi/linux/wmi.h AD1889 ALSA SOUND DRIVER -M: Thibaut Varene -W: http://wiki.parisc-linux.org/AD1889 +W: https://parisc.wiki.kernel.org/index.php/AD1889 L: linux-parisc@vger.kernel.org S: Maintained F: sound/pci/ad1889.* @@ -766,6 +767,13 @@ S: Supported F: Documentation/hwmon/fam15h_power F: drivers/hwmon/fam15h_power.c +AMD FCH GPIO DRIVER +M: Enrico Weigelt, metux IT consult +L: linux-gpio@vger.kernel.org +S: Maintained +F: drivers/gpio/gpio-amd-fch.c +F: include/linux/platform_data/gpio/gpio-amd-fch.h + AMD GEODE CS5536 USB DEVICE CONTROLLER DRIVER L: linux-geode@lists.infradead.org (moderated for non-subscribers) S: Orphan @@ -854,6 +862,22 @@ S: Supported F: drivers/iio/adc/ad7124.c F: Documentation/devicetree/bindings/iio/adc/adi,ad7124.txt +ANALOG DEVICES INC AD7606 DRIVER +M: Stefan Popa +L: linux-iio@vger.kernel.org +W: http://ez.analog.com/community/linux-device-drivers +S: Supported +F: drivers/iio/adc/ad7606.c +F: Documentation/devicetree/bindings/iio/adc/ad7606.txt + +ANALOG DEVICES INC AD7768-1 DRIVER +M: Stefan Popa +L: linux-iio@vger.kernel.org +W: http://ez.analog.com/community/linux-device-drivers +S: Supported +F: drivers/iio/adc/ad7768-1.c +F: Documentation/devicetree/bindings/iio/adc/adi,ad7768-1.txt + ANALOG DEVICES INC AD9389B DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org @@ -1035,28 +1059,30 @@ L: netdev@vger.kernel.org S: Odd fixes F: drivers/net/appletalk/ F: net/appletalk/ +F: include/linux/atalk.h +F: include/uapi/linux/atalk.h APPLIED MICRO (APM) X-GENE DEVICE TREE SUPPORT -M: Duc Dang +M: Khuong Dinh S: Supported F: arch/arm64/boot/dts/apm/ APPLIED MICRO (APM) X-GENE SOC EDAC -M: Loc Ho +M: Khuong Dinh S: Supported F: drivers/edac/xgene_edac.c F: Documentation/devicetree/bindings/edac/apm-xgene-edac.txt APPLIED MICRO (APM) X-GENE SOC ETHERNET (V2) DRIVER -M: Iyappan Subramanian -M: Keyur Chudgar +M: Iyappan Subramanian +M: Keyur Chudgar S: Supported F: drivers/net/ethernet/apm/xgene-v2/ APPLIED MICRO (APM) X-GENE SOC ETHERNET DRIVER -M: Iyappan Subramanian -M: Keyur Chudgar -M: Quan Nguyen +M: Iyappan Subramanian +M: Keyur Chudgar +M: Quan Nguyen S: Supported F: drivers/net/ethernet/apm/xgene/ F: drivers/net/phy/mdio-xgene.c @@ -1064,7 +1090,7 @@ F: Documentation/devicetree/bindings/net/apm-xgene-enet.txt F: Documentation/devicetree/bindings/net/apm-xgene-mdio.txt APPLIED MICRO (APM) X-GENE SOC PMU -M: Tai Nguyen +M: Khuong Dinh S: Supported F: drivers/perf/xgene_pmu.c F: Documentation/perf/xgene-pmu.txt @@ -1133,13 +1159,26 @@ S: Supported F: drivers/gpu/drm/arm/hdlcd_* F: Documentation/devicetree/bindings/display/arm,hdlcd.txt +ARM KOMEDA DRM-KMS DRIVER +M: James (Qian) Wang +M: Liviu Dudau +L: Mali DP Maintainers +S: Supported +T: git git://linux-arm.org/linux-ld.git for-upstream/mali-dp +F: drivers/gpu/drm/arm/display/include/ +F: drivers/gpu/drm/arm/display/komeda/ +F: Documentation/devicetree/bindings/display/arm/arm,komeda.txt +F: Documentation/gpu/komeda-kms.rst + ARM MALI-DP DRM DRIVER M: Liviu Dudau M: Brian Starkey -M: Mali DP Maintainers +L: Mali DP Maintainers S: Supported +T: git git://linux-arm.org/linux-ld.git for-upstream/mali-dp F: drivers/gpu/drm/arm/ F: Documentation/devicetree/bindings/display/arm,malidp.txt +F: Documentation/gpu/afbc.rst ARM MFM AND FLOPPY DRIVERS M: Ian Molton @@ -1159,7 +1198,7 @@ F: arch/arm*/include/asm/hw_breakpoint.h F: arch/arm*/include/asm/perf_event.h F: drivers/perf/* F: include/linux/perf/arm_pmu.h -F: Documentation/devicetree/bindings/arm/pmu.txt +F: Documentation/devicetree/bindings/arm/pmu.yaml F: Documentation/devicetree/bindings/perf/ ARM PORT @@ -1372,6 +1411,13 @@ F: arch/arm/mach-aspeed/ F: arch/arm/boot/dts/aspeed-* N: aspeed +ARM/BITMAIN ARCHITECTURE +M: Manivannan Sadhasivam +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Maintained +F: arch/arm64/boot/dts/bitmain/ +F: Documentation/devicetree/bindings/arm/bitmain.yaml + ARM/CALXEDA HIGHBANK ARCHITECTURE M: Rob Herring L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@ -1531,21 +1577,14 @@ ARM/FREESCALE IMX / MXC ARM ARCHITECTURE M: Shawn Guo M: Sascha Hauer R: Pengutronix Kernel Team -R: Fabio Estevam +R: Fabio Estevam R: NXP Linux Team L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git -F: arch/arm/mach-imx/ -F: arch/arm/mach-mxs/ -F: arch/arm/boot/dts/imx* -F: arch/arm/configs/imx*_defconfig -F: arch/arm64/boot/dts/freescale/imx* -F: drivers/clk/imx/ -F: drivers/firmware/imx/ -F: drivers/soc/imx/ -F: include/linux/firmware/imx/ -F: include/soc/imx/ +N: imx +N: mxs +X: drivers/media/i2c/ ARM/FREESCALE VYBRID ARM ARCHITECTURE M: Shawn Guo @@ -1737,6 +1776,7 @@ F: arch/arm/configs/mvebu_*_defconfig F: arch/arm/mach-mvebu/ F: arch/arm64/boot/dts/marvell/armada* F: drivers/cpufreq/armada-37xx-cpufreq.c +F: drivers/cpufreq/armada-8k-cpufreq.c F: drivers/cpufreq/mvebu-cpufreq.c F: drivers/irqchip/irq-armada-370-xp.c F: drivers/irqchip/irq-mvebu-* @@ -1882,10 +1922,11 @@ F: drivers/usb/host/ehci-w90x900.c F: drivers/video/fbdev/nuc900fb.c ARM/OPENMOKO NEO FREERUNNER (GTA02) MACHINE SUPPORT -M: Nelson Castillo L: openmoko-kernel@lists.openmoko.org (subscribers-only) W: http://wiki.openmoko.org/wiki/Neo_FreeRunner -S: Supported +S: Orphan +F: arch/arm/mach-s3c24xx/mach-gta02.c +F: arch/arm/mach-s3c24xx/gta02.h ARM/Orion SoC/Technologic Systems TS-78xx platform support M: Alexander Clouter @@ -1948,19 +1989,37 @@ M: David Brown L: linux-arm-msm@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/soc/qcom/ +F: Documentation/devicetree/bindings/*/qcom* F: arch/arm/boot/dts/qcom-*.dts F: arch/arm/boot/dts/qcom-*.dtsi F: arch/arm/mach-qcom/ -F: arch/arm64/boot/dts/qcom/* +F: arch/arm64/boot/dts/qcom/ +F: drivers/*/qcom/ +F: drivers/*/qcom* +F: drivers/*/*/qcom/ +F: drivers/*/*/qcom* +F: drivers/*/pm8???-* +F: drivers/bluetooth/btqcomsmd.c +F: drivers/clocksource/timer-qcom.c +F: drivers/extcon/extcon-qcom* +F: drivers/iommu/msm* F: drivers/i2c/busses/i2c-qup.c -F: drivers/clk/qcom/ -F: drivers/dma/qcom/ -F: drivers/soc/qcom/ +F: drivers/i2c/busses/i2c-qcom-geni.c +F: drivers/mfd/ssbi.c +F: drivers/mmc/host/mmci_qcom* +F: drivers/mmc/host/sdhci_msm.c +F: drivers/pci/controller/dwc/pcie-qcom.c +F: drivers/phy/qualcomm/ +F: drivers/power/*/msm* +F: drivers/reset/reset-qcom-* +F: drivers/scsi/ufs/ufs-qcom.* F: drivers/spi/spi-qup.c +F: drivers/spi/spi-geni-qcom.c +F: drivers/spi/spi-qcom-qspi.c F: drivers/tty/serial/msm_serial.c -F: drivers/*/pm8???-* -F: drivers/mfd/ssbi.c -F: drivers/firmware/qcom_scm* +F: drivers/usb/dwc3/dwc3-qcom.c +F: include/dt-bindings/*/qcom* +F: include/linux/*/qcom* T: git git://git.kernel.org/pub/scm/linux/kernel/git/agross/linux.git ARM/RADISYS ENP2611 MACHINE SUPPORT @@ -1997,7 +2056,7 @@ Q: http://patchwork.kernel.org/project/linux-renesas-soc/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next S: Supported F: arch/arm64/boot/dts/renesas/ -F: Documentation/devicetree/bindings/arm/shmobile.txt +F: Documentation/devicetree/bindings/arm/renesas.yaml F: drivers/soc/renesas/ F: include/linux/soc/renesas/ @@ -2084,8 +2143,9 @@ F: drivers/media/platform/s5p-cec/ F: Documentation/devicetree/bindings/media/s5p-cec.txt ARM/SAMSUNG S5P SERIES JPEG CODEC SUPPORT -M: Andrzej Pietrasiewicz +M: Andrzej Pietrasiewicz M: Jacek Anaszewski +M: Sylwester Nawrocki L: linux-arm-kernel@lists.infradead.org L: linux-media@vger.kernel.org S: Maintained @@ -2109,6 +2169,8 @@ Q: http://patchwork.kernel.org/project/linux-renesas-soc/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next S: Supported F: arch/arm/boot/dts/emev2* +F: arch/arm/boot/dts/gr-peach* +F: arch/arm/boot/dts/iwg20d-q7* F: arch/arm/boot/dts/r7s* F: arch/arm/boot/dts/r8a* F: arch/arm/boot/dts/r9a* @@ -2116,7 +2178,7 @@ F: arch/arm/boot/dts/sh* F: arch/arm/configs/shmobile_defconfig F: arch/arm/include/debug/renesas-scif.S F: arch/arm/mach-shmobile/ -F: Documentation/devicetree/bindings/arm/shmobile.txt +F: Documentation/devicetree/bindings/arm/renesas.yaml F: drivers/soc/renesas/ F: include/linux/soc/renesas/ @@ -2503,7 +2565,6 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git S: Maintained F: Documentation/devicetree/bindings/eeprom/at24.txt F: drivers/misc/eeprom/at24.c -F: include/linux/platform_data/at24.h ATA OVER ETHERNET (AOE) DRIVER M: "Ed L. Cashin" @@ -2609,6 +2670,7 @@ L: linux-kernel@vger.kernel.org S: Maintained F: arch/*/include/asm/atomic*.h F: include/*/atomic*.h +F: scripts/atomic/ ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER M: Bradley Grove @@ -2848,8 +2910,11 @@ F: include/uapi/linux/if_bonding.h BPF (Safe dynamic programs and tools) M: Alexei Starovoitov M: Daniel Borkmann +R: Martin KaFai Lau +R: Song Liu +R: Yonghong Song L: netdev@vger.kernel.org -L: linux-kernel@vger.kernel.org +L: bpf@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git Q: https://patchwork.ozlabs.org/project/netdev/list/?delegate=77147 @@ -2873,10 +2938,13 @@ F: samples/bpf/ F: tools/bpf/ F: tools/lib/bpf/ F: tools/testing/selftests/bpf/ +K: bpf +N: bpf BPF JIT for ARM M: Shubham Bansal L: netdev@vger.kernel.org +L: bpf@vger.kernel.org S: Maintained F: arch/arm/net/ @@ -2885,18 +2953,21 @@ M: Daniel Borkmann M: Alexei Starovoitov M: Zi Shen Lim L: netdev@vger.kernel.org +L: bpf@vger.kernel.org S: Supported F: arch/arm64/net/ BPF JIT for MIPS (32-BIT AND 64-BIT) M: Paul Burton L: netdev@vger.kernel.org +L: bpf@vger.kernel.org S: Maintained F: arch/mips/net/ BPF JIT for NFP NICs M: Jakub Kicinski L: netdev@vger.kernel.org +L: bpf@vger.kernel.org S: Supported F: drivers/net/ethernet/netronome/nfp/bpf/ @@ -2904,13 +2975,21 @@ BPF JIT for POWERPC (32-BIT AND 64-BIT) M: Naveen N. Rao M: Sandipan Das L: netdev@vger.kernel.org +L: bpf@vger.kernel.org S: Maintained F: arch/powerpc/net/ +BPF JIT for RISC-V (RV64G) +M: Björn Töpel +L: netdev@vger.kernel.org +S: Maintained +F: arch/riscv/net/ + BPF JIT for S390 M: Martin Schwidefsky M: Heiko Carstens L: netdev@vger.kernel.org +L: bpf@vger.kernel.org S: Maintained F: arch/s390/net/ X: arch/s390/net/pnet.c @@ -2918,12 +2997,14 @@ X: arch/s390/net/pnet.c BPF JIT for SPARC (32-BIT AND 64-BIT) M: David S. Miller L: netdev@vger.kernel.org +L: bpf@vger.kernel.org S: Maintained F: arch/sparc/net/ BPF JIT for X86 32-BIT M: Wang YanQing L: netdev@vger.kernel.org +L: bpf@vger.kernel.org S: Maintained F: arch/x86/net/bpf_jit_comp32.c @@ -2931,6 +3012,7 @@ BPF JIT for X86 64-BIT M: Alexei Starovoitov M: Daniel Borkmann L: netdev@vger.kernel.org +L: bpf@vger.kernel.org S: Supported F: arch/x86/net/ X: arch/x86/net/bpf_jit_comp32.c @@ -3385,9 +3467,8 @@ F: Documentation/media/v4l-drivers/cafe_ccic* F: drivers/media/platform/marvell-ccic/ CAIF NETWORK LAYER -M: Dmitry Tarnyagin L: netdev@vger.kernel.org -S: Supported +S: Orphan F: Documentation/networking/caif/ F: drivers/net/caif/ F: include/uapi/linux/caif/ @@ -3511,7 +3592,6 @@ F: include/linux/spi/cc2520.h F: Documentation/devicetree/bindings/net/ieee802154/cc2520.txt CCREE ARM TRUSTZONE CRYPTOCELL REE DRIVER -M: Yael Chemla M: Gilad Ben-Yossef L: linux-crypto@vger.kernel.org S: Supported @@ -3675,7 +3755,7 @@ CHROME HARDWARE PLATFORM SUPPORT M: Benson Leung M: Enric Balletbo i Serra S: Maintained -T: git git://git.kernel.org/pub/scm/linux/kernel/git/bleung/chrome-platform.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/chrome-platform/linux.git F: drivers/platform/chrome/ CHROMEOS EC SUBDRIVERS @@ -3687,6 +3767,14 @@ N: cros_ec N: cros-ec F: drivers/power/supply/cros_usbpd-charger.c +CHROMEOS EC CODEC DRIVER +M: Cheng-Yi Chiang +S: Maintained +R: Enric Balletbo i Serra +R: Guenter Roeck +F: Documentation/devicetree/bindings/sound/google,cros-ec-codec.txt +F: sound/soc/codecs/cros_ec_codec.* + CIRRUS LOGIC AUDIO CODEC DRIVERS M: Brian Austin M: Paul Handrigan @@ -3700,6 +3788,23 @@ L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/cirrus/ep93xx_eth.c +CIRRUS LOGIC LOCHNAGAR DRIVER +M: Charles Keepax +M: Richard Fitzgerald +L: patches@opensource.cirrus.com +S: Supported +F: drivers/clk/clk-lochnagar.c +F: drivers/mfd/lochnagar-i2c.c +F: drivers/pinctrl/cirrus/pinctrl-lochnagar.c +F: drivers/regulator/lochnagar-regulator.c +F: include/dt-bindings/clk/lochnagar.h +F: include/dt-bindings/pinctrl/lochnagar.h +F: include/linux/mfd/lochnagar* +F: Documentation/devicetree/bindings/mfd/cirrus,lochnagar.txt +F: Documentation/devicetree/bindings/clock/cirrus,lochnagar.txt +F: Documentation/devicetree/bindings/pinctrl/cirrus,lochnagar.txt +F: Documentation/devicetree/bindings/regulator/cirrus,lochnagar.txt + CISCO FCOE HBA DRIVER M: Satish Kharat M: Sesidhar Baddela @@ -3906,9 +4011,10 @@ M: Johannes Weiner L: cgroups@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git S: Maintained -F: Documentation/cgroup* +F: Documentation/admin-guide/cgroup-v2.rst +F: Documentation/cgroup-v1/ F: include/linux/cgroup* -F: kernel/cgroup* +F: kernel/cgroup/ CONTROL GROUP - CPUSET M: Li Zefan @@ -3956,7 +4062,7 @@ M: Viresh Kumar L: linux-pm@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git -T: git git://git.linaro.org/people/vireshk/linux.git (For ARM Updates) +T: git git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm.git (For ARM Updates) B: https://bugzilla.kernel.org F: Documentation/admin-guide/pm/cpufreq.rst F: Documentation/admin-guide/pm/intel_pstate.rst @@ -4016,6 +4122,7 @@ S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git B: https://bugzilla.kernel.org F: Documentation/admin-guide/pm/cpuidle.rst +F: Documentation/driver-api/pm/cpuidle.rst F: drivers/cpuidle/* F: include/linux/cpuidle.h @@ -4123,7 +4230,7 @@ S: Maintained F: drivers/media/dvb-frontends/cxd2820r* CXGB3 ETHERNET DRIVER (CXGB3) -M: Arjun Vynipadath +M: Vishal Kulkarni L: netdev@vger.kernel.org W: http://www.chelsio.com S: Supported @@ -4152,7 +4259,7 @@ S: Supported F: drivers/crypto/chelsio CXGB4 ETHERNET DRIVER (CXGB4) -M: Arjun Vynipadath +M: Vishal Kulkarni L: netdev@vger.kernel.org W: http://www.chelsio.com S: Supported @@ -4536,10 +4643,11 @@ S: Maintained F: drivers/i2c/busses/i2c-diolan-u2c.c FILESYSTEM DIRECT ACCESS (DAX) -M: Matthew Wilcox -M: Ross Zwisler -M: Jan Kara +M: Dan Williams +R: Matthew Wilcox +R: Jan Kara L: linux-fsdevel@vger.kernel.org +L: linux-nvdimm@lists.01.org S: Supported F: fs/dax.c F: include/linux/dax.h @@ -4547,9 +4655,9 @@ F: include/trace/events/fs_dax.h DEVICE DIRECT ACCESS (DAX) M: Dan Williams -M: Dave Jiang -M: Ross Zwisler M: Vishal Verma +M: Keith Busch +M: Dave Jiang L: linux-nvdimm@lists.01.org S: Supported F: drivers/dax/ @@ -4833,10 +4941,11 @@ F: Documentation/devicetree/bindings/display/multi-inno,mi0283qt.txt DRM DRIVER FOR MSM ADRENO GPU M: Rob Clark +M: Sean Paul L: linux-arm-msm@vger.kernel.org L: dri-devel@lists.freedesktop.org L: freedreno@lists.freedesktop.org -T: git git://people.freedesktop.org/~robclark/linux +T: git https://gitlab.freedesktop.org/drm/msm.git S: Maintained F: drivers/gpu/drm/msm/ F: include/uapi/drm/msm_drm.h @@ -4876,6 +4985,7 @@ DRM DRIVER FOR QXL VIRTUAL GPU M: Dave Airlie M: Gerd Hoffmann L: virtualization@lists.linux-foundation.org +L: spice-devel@lists.freedesktop.org T: git git://anongit.freedesktop.org/drm/drm-misc S: Maintained F: drivers/gpu/drm/qxl/ @@ -4896,6 +5006,12 @@ S: Orphan / Obsolete F: drivers/gpu/drm/sis/ F: include/uapi/drm/sis_drm.h +DRM DRIVER FOR SITRONIX ST7701 PANELS +M: Jagan Teki +S: Maintained +F: drivers/gpu/drm/panel/panel-sitronix-st7701.c +F: Documentation/devicetree/bindings/display/panel/sitronix,st7701.txt + DRM DRIVER FOR SITRONIX ST7586 PANELS M: David Lechner S: Maintained @@ -4912,6 +5028,13 @@ DRM DRIVER FOR TDFX VIDEO CARDS S: Orphan / Obsolete F: drivers/gpu/drm/tdfx/ +DRM DRIVER FOR TPO TPG110 PANELS +M: Linus Walleij +T: git git://anongit.freedesktop.org/drm/drm-misc +S: Maintained +F: drivers/gpu/drm/panel/panel-tpo-tpg110.c +F: Documentation/devicetree/bindings/display/panel/tpo,tpg110.txt + DRM DRIVER FOR USB DISPLAYLINK VIDEO ADAPTERS M: Dave Airlie R: Sean Paul @@ -4920,6 +5043,16 @@ S: Odd Fixes F: drivers/gpu/drm/udl/ T: git git://anongit.freedesktop.org/drm/drm-misc +DRM DRIVER FOR VIRTUAL KERNEL MODESETTING (VKMS) +M: Rodrigo Siqueira +R: Haneen Mohammed +R: Daniel Vetter +T: git git://anongit.freedesktop.org/drm/drm-misc +S: Maintained +L: dri-devel@lists.freedesktop.org +F: drivers/gpu/drm/vkms/ +F: Documentation/gpu/vkms.rst + DRM DRIVER FOR VMWARE VIRTUAL GPU M: "VMware Graphics" M: Thomas Hellstrom @@ -4989,7 +5122,6 @@ F: Documentation/devicetree/bindings/display/atmel/ T: git git://anongit.freedesktop.org/drm/drm-misc DRM DRIVERS FOR BRIDGE CHIPS -M: Archit Taneja M: Andrzej Hajda R: Laurent Pinchart S: Maintained @@ -5181,7 +5313,7 @@ DRM DRIVERS FOR XEN M: Oleksandr Andrushchenko T: git git://anongit.freedesktop.org/drm/drm-misc L: dri-devel@lists.freedesktop.org -L: xen-devel@lists.xen.org +L: xen-devel@lists.xenproject.org (moderated for non-subscribers) S: Supported F: drivers/gpu/drm/xen/ F: Documentation/gpu/xen-front.rst @@ -5399,6 +5531,12 @@ L: linux-edac@vger.kernel.org S: Maintained F: drivers/edac/amd64_edac* +EDAC-AST2500 +M: Stefan Schaeckeler +S: Supported +F: drivers/edac/aspeed_edac.c +F: Documentation/devicetree/bindings/edac/aspeed-sdram-edac.txt + EDAC-CALXEDA M: Robert Richter L: linux-edac@vger.kernel.org @@ -5423,6 +5561,7 @@ F: drivers/edac/thunderx_edac* EDAC-CORE M: Borislav Petkov M: Mauro Carvalho Chehab +R: James Morse L: linux-edac@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp.git for-next T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-edac.git linux_next @@ -5855,7 +5994,7 @@ S: Maintained F: drivers/media/tuners/fc2580* FCOE SUBSYSTEM (libfc, libfcoe, fcoe) -M: Johannes Thumshirn +M: Hannes Reinecke L: linux-scsi@vger.kernel.org W: www.Open-FCoE.org S: Supported @@ -5882,6 +6021,7 @@ L: linux-fsdevel@vger.kernel.org S: Maintained F: fs/* F: include/linux/fs.h +F: include/linux/fs_types.h F: include/uapi/linux/fs.h FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER @@ -6024,6 +6164,12 @@ L: linuxppc-dev@lists.ozlabs.org S: Maintained F: drivers/dma/fsldma.* +FREESCALE ENETC ETHERNET DRIVERS +M: Claudiu Manoil +L: netdev@vger.kernel.org +S: Maintained +F: drivers/net/ethernet/freescale/enetc/ + FREESCALE eTSEC ETHERNET DRIVER (GIANFAR) M: Claudiu Manoil L: netdev@vger.kernel.org @@ -6087,15 +6233,17 @@ FREESCALE QORIQ PTP CLOCK DRIVER M: Yangbo Lu L: netdev@vger.kernel.org S: Maintained +F: drivers/net/ethernet/freescale/enetc/enetc_ptp.c F: drivers/ptp/ptp_qoriq.c +F: drivers/ptp/ptp_qoriq_debugfs.c F: include/linux/fsl/ptp_qoriq.h F: Documentation/devicetree/bindings/ptp/ptp-qoriq.txt FREESCALE QUAD SPI DRIVER M: Han Xu -L: linux-mtd@lists.infradead.org +L: linux-spi@vger.kernel.org S: Maintained -F: drivers/mtd/spi-nor/fsl-quadspi.c +F: drivers/spi/spi-fsl-qspi.c FREESCALE QUICC ENGINE LIBRARY M: Qiang Zhao @@ -6146,7 +6294,7 @@ FREESCALE SOC SOUND DRIVERS M: Timur Tabi M: Nicolin Chen M: Xiubo Li -R: Fabio Estevam +R: Fabio Estevam L: alsa-devel@alsa-project.org (moderated for non-subscribers) L: linuxppc-dev@lists.ozlabs.org S: Maintained @@ -6194,9 +6342,10 @@ F: include/linux/fscache*.h FSCRYPT: FILE SYSTEM LEVEL ENCRYPTION SUPPORT M: Theodore Y. Ts'o M: Jaegeuk Kim +M: Eric Biggers L: linux-fscrypt@vger.kernel.org Q: https://patchwork.kernel.org/project/linux-fscrypt/list/ -T: git git://git.kernel.org/pub/scm/linux/kernel/git/tytso/fscrypt.git +T: git git://git.kernel.org/pub/scm/fs/fscrypt/fscrypt.git S: Supported F: fs/crypto/ F: include/linux/fscrypt*.h @@ -6642,6 +6791,15 @@ F: drivers/clocksource/h8300_*.c F: drivers/clk/h8300/ F: drivers/irqchip/irq-renesas-h8*.c +HABANALABS PCI DRIVER +M: Oded Gabbay +T: git https://github.com/HabanaAI/linux.git +S: Supported +F: drivers/misc/habanalabs/ +F: include/uapi/misc/habanalabs.h +F: Documentation/ABI/testing/sysfs-driver-habanalabs +F: Documentation/ABI/testing/debugfs-driver-habanalabs + HACKRF MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org @@ -6999,7 +7157,7 @@ M: Haiyang Zhang M: Stephen Hemminger M: Sasha Levin T: git git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux.git -L: devel@linuxdriverproject.org +L: linux-hyperv@vger.kernel.org S: Supported F: Documentation/networking/device_drivers/microsoft/netvsc.txt F: arch/x86/include/asm/mshyperv.h @@ -7015,6 +7173,7 @@ F: drivers/net/hyperv/ F: drivers/scsi/storvsc_drv.c F: drivers/uio/uio_hv_generic.c F: drivers/video/fbdev/hyperv_fb.c +F: drivers/iommu/hyperv_iommu.c F: net/vmw_vsock/hyperv_transport.c F: include/linux/hyperv.h F: include/uapi/linux/hyperv.h @@ -7164,6 +7323,7 @@ F: drivers/i2c/i2c-stub.c I3C SUBSYSTEM M: Boris Brezillon L: linux-i3c@lists.infradead.org +C: irc://chat.freenode.net/linux-i3c T: git git://git.kernel.org/pub/scm/linux/kernel/git/i3c/linux.git S: Maintained F: Documentation/ABI/testing/sysfs-bus-i3c @@ -7698,7 +7858,6 @@ M: Yong Zhi M: Sakari Ailus M: Bingbu Cao R: Tian Shu Qiu -R: Jian Xu Zheng L: linux-media@vger.kernel.org S: Maintained F: drivers/media/pci/intel/ipu3/ @@ -7883,6 +8042,16 @@ L: linux-gpio@vger.kernel.org S: Maintained F: drivers/gpio/gpio-intel-mid.c +INTERCONNECT API +M: Georgi Djakov +S: Maintained +F: Documentation/interconnect/ +F: Documentation/devicetree/bindings/interconnect/ +F: drivers/interconnect/ +F: include/dt-bindings/interconnect/ +F: include/linux/interconnect-provider.h +F: include/linux/interconnect.h + INVENSENSE MPU-3050 GYROSCOPE DRIVER M: Linus Walleij L: linux-iio@vger.kernel.org @@ -8401,7 +8570,7 @@ F: security/keys/encrypted-keys/ KEYS-TRUSTED M: James Bottomley M: Jarkko Sakkinen -M: Mimi Zohar +M: Mimi Zohar L: linux-integrity@vger.kernel.org L: keyrings@vger.kernel.org S: Supported @@ -8482,6 +8651,7 @@ L7 BPF FRAMEWORK M: John Fastabend M: Daniel Borkmann L: netdev@vger.kernel.org +L: bpf@vger.kernel.org S: Maintained F: include/linux/skmsg.h F: net/core/skmsg.c @@ -8643,7 +8813,6 @@ S: Maintained F: tools/lib/lockdep/ LIBNVDIMM BLK: MMIO-APERTURE DRIVER -M: Ross Zwisler M: Dan Williams M: Vishal Verma M: Dave Jiang @@ -8656,7 +8825,6 @@ F: drivers/nvdimm/region_devs.c LIBNVDIMM BTT: BLOCK TRANSLATION TABLE M: Vishal Verma M: Dan Williams -M: Ross Zwisler M: Dave Jiang L: linux-nvdimm@lists.01.org Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ @@ -8664,7 +8832,6 @@ S: Supported F: drivers/nvdimm/btt* LIBNVDIMM PMEM: PERSISTENT MEMORY DRIVER -M: Ross Zwisler M: Dan Williams M: Vishal Verma M: Dave Jiang @@ -8683,9 +8850,10 @@ F: Documentation/devicetree/bindings/pmem/pmem-region.txt LIBNVDIMM: NON-VOLATILE MEMORY DEVICE SUBSYSTEM M: Dan Williams -M: Ross Zwisler M: Vishal Verma M: Dave Jiang +M: Keith Busch +M: Ira Weiny L: linux-nvdimm@lists.01.org Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm.git @@ -8833,10 +9001,10 @@ F: drivers/platform/x86/hp_accel.c LIVE PATCHING M: Josh Poimboeuf -M: Jessica Yu M: Jiri Kosina M: Miroslav Benes -R: Petr Mladek +M: Petr Mladek +R: Joe Lawrence S: Maintained F: kernel/livepatch/ F: include/linux/livepatch.h @@ -8845,8 +9013,9 @@ F: arch/x86/kernel/livepatch.c F: Documentation/livepatch/ F: Documentation/ABI/testing/sysfs-kernel-livepatch F: samples/livepatch/ +F: tools/testing/selftests/livepatch/ L: live-patching@vger.kernel.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/livepatching/livepatching.git LLC (802.2) L: netdev@vger.kernel.org @@ -9084,6 +9253,14 @@ F: drivers/gpu/drm/armada/ F: include/uapi/drm/armada_drm.h F: Documentation/devicetree/bindings/display/armada/ +MARVELL ARMADA 3700 PHY DRIVERS +M: Miquel Raynal +S: Maintained +F: drivers/phy/marvell/phy-mvebu-a3700-comphy.c +F: drivers/phy/marvell/phy-mvebu-a3700-utmi.c +F: Documentation/devicetree/bindings/phy/phy-mvebu-comphy.txt +F: Documentation/devicetree/bindings/phy/phy-mvebu-utmi.txt + MARVELL CRYPTO DRIVER M: Boris Brezillon M: Arnaud Ebalard @@ -9352,6 +9529,17 @@ T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/platform/imx-pxp.[ch] +MEDIA DRIVERS FOR FREESCALE IMX7 +M: Rui Miguel Silva +L: linux-media@vger.kernel.org +T: git git://linuxtv.org/media_tree.git +S: Maintained +F: Documentation/devicetree/bindings/media/imx7-csi.txt +F: Documentation/devicetree/bindings/media/imx7-mipi-csi2.txt +F: Documentation/media/v4l-drivers/imx7.rst +F: drivers/staging/media/imx/imx7-media-csi.c +F: drivers/staging/media/imx/imx7-mipi-csis.c + MEDIA DRIVERS FOR HELENE M: Abylay Ospan L: linux-media@vger.kernel.org @@ -9713,6 +9901,7 @@ M: Vadim Pasternak L: platform-driver-x86@vger.kernel.org S: Supported F: drivers/platform/mellanox/ +F: include/linux/platform_data/mlxreg.h MELLANOX MLX4 core VPI driver M: Tariq Toukan @@ -9787,6 +9976,14 @@ F: kernel/sched/membarrier.c F: include/uapi/linux/membarrier.h F: arch/powerpc/include/asm/membarrier.h +MEMBLOCK +M: Mike Rapoport +L: linux-mm@kvack.org +S: Maintained +F: include/linux/memblock.h +F: mm/memblock.c +F: Documentation/core-api/boot-time-mm.rst + MEMORY MANAGEMENT L: linux-mm@kvack.org W: http://www.linux-mm.org @@ -9853,6 +10050,18 @@ F: drivers/media/platform/meson/ao-cec.c F: Documentation/devicetree/bindings/media/meson-ao-cec.txt T: git git://linuxtv.org/media_tree.git +MESON NAND CONTROLLER DRIVER FOR AMLOGIC SOCS +M: Liang Yang +L: linux-mtd@lists.infradead.org +S: Maintained +F: drivers/mtd/nand/raw/meson_* +F: Documentation/devicetree/bindings/mtd/amlogic,meson-nand.txt + +METHODE UDPU SUPPORT +M: Vladimir Vid +S: Maintained +F: arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts + MICROBLAZE ARCHITECTURE M: Michal Simek W: http://www.monstr.eu/fdt/ @@ -10577,6 +10786,7 @@ F: Documentation/devicetree/bindings/net/dsa/ F: net/dsa/ F: include/net/dsa.h F: include/linux/dsa/ +F: include/linux/platform_data/dsa.h F: drivers/net/dsa/ NETWORKING [GENERAL] @@ -10792,6 +11002,12 @@ F: drivers/power/supply/bq27xxx_battery_i2c.c F: drivers/power/supply/isp1704_charger.c F: drivers/power/supply/rx51_battery.c +NOLIBC HEADER FILE +M: Willy Tarreau +S: Maintained +T: git git://git.kernel.org/pub/scm/linux/kernel/git/wtarreau/nolibc.git +F: tools/include/nolibc/ + NTB AMD DRIVER M: Shyam Sundar S K L: linux-ntb@googlegroups.com @@ -10893,7 +11109,7 @@ F: include/linux/nvmem-consumer.h F: include/linux/nvmem-provider.h NXP SGTL5000 DRIVER -M: Fabio Estevam +M: Fabio Estevam L: alsa-devel@alsa-project.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/sound/sgtl5000.txt @@ -10931,6 +11147,14 @@ F: lib/objagg.c F: lib/test_objagg.c F: include/linux/objagg.h +NXP FSPI DRIVER +R: Yogesh Gaur +M: Ashish Kumar +L: linux-spi@vger.kernel.org +S: Maintained +F: drivers/spi/spi-nxp-fspi.c +F: Documentation/devicetree/bindings/spi/spi-nxp-fspi.txt + OBJTOOL M: Josh Poimboeuf M: Peter Zijlstra @@ -11232,6 +11456,19 @@ S: Maintained F: drivers/media/i2c/ov7740.c F: Documentation/devicetree/bindings/media/i2c/ov7740.txt +OMNIVISION OV9640 SENSOR DRIVER +M: Petr Cvek +L: linux-media@vger.kernel.org +S: Maintained +F: drivers/media/i2c/ov9640.* + +OMNIVISION OV8856 SENSOR DRIVER +M: Ben Kao +L: linux-media@vger.kernel.org +T: git git://linuxtv.org/media_tree.git +S: Maintained +F: drivers/media/i2c/ov8856.c + OMNIVISION OV9650 SENSOR DRIVER M: Sakari Ailus R: Akinobu Mita @@ -11264,6 +11501,11 @@ M: Jens Wiklander S: Maintained F: drivers/tee/optee/ +OP-TEE RANDOM NUMBER GENERATOR (RNG) DRIVER +M: Sumit Garg +S: Maintained +F: drivers/char/hw_random/optee-rng.c + OPA-VNIC DRIVER M: Dennis Dalessandro M: Niranjana Vishwanathapura @@ -11307,10 +11549,12 @@ F: include/dt-bindings/ OPENCORES I2C BUS DRIVER M: Peter Korsgaard +M: Andrew Lunn L: linux-i2c@vger.kernel.org S: Maintained F: Documentation/i2c/busses/i2c-ocores F: drivers/i2c/busses/i2c-ocores.c +F: include/linux/platform_data/i2c-ocores.h OPENRISC ARCHITECTURE M: Jonas Bonn @@ -11389,13 +11633,6 @@ W: http://www.nongnu.org/orinoco/ S: Orphan F: drivers/net/wireless/intersil/orinoco/ -OSD LIBRARY and FILESYSTEM -M: Boaz Harrosh -S: Maintained -F: drivers/scsi/osd/ -F: include/scsi/osd_* -F: fs/exofs/ - OV2659 OMNIVISION SENSOR DRIVER M: "Lad, Prabhakar" L: linux-media@vger.kernel.org @@ -11481,7 +11718,7 @@ F: Documentation/blockdev/paride.txt F: drivers/block/paride/ PARISC ARCHITECTURE -M: "James E.J. Bottomley" +M: "James E.J. Bottomley" M: Helge Deller L: linux-parisc@vger.kernel.org W: http://www.parisc-linux.org/ @@ -11508,6 +11745,11 @@ F: lib/parman.c F: lib/test_parman.c F: include/linux/parman.h +PC ENGINES APU BOARD DRIVER +M: Enrico Weigelt, metux IT consult +S: Maintained +F: drivers/platform/x86/pcengines-apuv2.c + PC87360 HARDWARE MONITORING DRIVER M: Jim Cromie L: linux-hwmon@vger.kernel.org @@ -11561,7 +11803,7 @@ F: Documentation/devicetree/bindings/pci/altera-pcie.txt F: drivers/pci/controller/pcie-altera.c PCI DRIVER FOR APPLIEDMICRO XGENE -M: Tanmay Inamdar +M: Toan Le L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org S: Maintained @@ -11585,7 +11827,7 @@ F: Documentation/devicetree/bindings/pci/pci-armada8k.txt F: drivers/pci/controller/dwc/pcie-armada8k.c PCI DRIVER FOR CADENCE PCIE IP -M: Alan Douglas +M: Tom Joseph L: linux-pci@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/pci/cdns,*.txt @@ -11739,7 +11981,7 @@ F: Documentation/devicetree/bindings/pci/altera-pcie-msi.txt F: drivers/pci/controller/pcie-altera-msi.c PCI MSI DRIVER FOR APPLIEDMICRO XGENE -M: Duc Dang +M: Toan Le L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org S: Maintained @@ -12228,14 +12470,6 @@ S: Maintained F: drivers/net/ppp/pptp.c W: http://sourceforge.net/projects/accel-pptp -PREEMPTIBLE KERNEL -M: Robert Love -L: kpreempt-tech@lists.sourceforge.net -W: https://www.kernel.org/pub/linux/kernel/people/rml/preempt-kernel -S: Supported -F: Documentation/preempt-locking.txt -F: include/linux/preempt.h - PRINTK M: Petr Mladek M: Sergey Senozhatsky @@ -12372,6 +12606,7 @@ L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Odd Fixes F: drivers/media/usb/pwc/* +F: include/trace/events/pwc.h PWM FAN DRIVER M: Kamil Debski @@ -12597,11 +12832,11 @@ F: Documentation/media/v4l-drivers/qcom_camss.rst F: drivers/media/platform/qcom/camss/ QUALCOMM CPUFREQ DRIVER MSM8996/APQ8096 -M: Ilia Lin -L: linux-pm@vger.kernel.org -S: Maintained -F: Documentation/devicetree/bindings/opp/kryo-cpufreq.txt -F: drivers/cpufreq/qcom-cpufreq-kryo.c +M: Ilia Lin +L: linux-pm@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/opp/kryo-cpufreq.txt +F: drivers/cpufreq/qcom-cpufreq-kryo.c QUALCOMM EMAC GIGABIT ETHERNET DRIVER M: Timur Tabi @@ -12609,6 +12844,14 @@ L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/qualcomm/emac/ +QUALCOMM ETHQOS ETHERNET DRIVER +M: Vinod Koul +M: Niklas Cassel +L: netdev@vger.kernel.org +S: Maintained +F: drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +F: Documentation/devicetree/bindings/net/qcom,dwmac.txt + QUALCOMM GENERIC INTERFACE I2C DRIVER M: Alok Chauhan M: Karthikeyan Ramasubramanian @@ -12768,6 +13011,16 @@ M: Alexandre Bounine S: Maintained F: drivers/rapidio/ +RAS INFRASTRUCTURE +M: Tony Luck +M: Borislav Petkov +L: linux-edac@vger.kernel.org +S: Maintained +F: drivers/ras/ +F: include/linux/ras.h +F: include/ras/ras_event.h +F: Documentation/admin-guide/ras.rst + RAYLINK/WEBGEAR 802.11 WIRELESS LAN DRIVER L: linux-wireless@vger.kernel.org S: Orphan @@ -12868,6 +13121,13 @@ F: Documentation/devicetree/bindings/net/dsa/realtek-smi.txt F: drivers/net/dsa/realtek-smi* F: drivers/net/dsa/rtl83* +REDPINE WIRELESS DRIVER +M: Amitkumar Karwar +M: Siva Rebbagondla +L: linux-wireless@vger.kernel.org +S: Maintained +F: drivers/net/wireless/rsi/ + REGISTER MAP ABSTRACTION M: Mark Brown L: linux-kernel@vger.kernel.org @@ -12958,6 +13218,7 @@ F: drivers/reset/ F: Documentation/devicetree/bindings/reset/ F: include/dt-bindings/reset/ F: include/linux/reset.h +F: include/linux/reset/ F: include/linux/reset-controller.h RESTARTABLE SEQUENCES SUPPORT @@ -13458,6 +13719,7 @@ F: kernel/sched/ F: include/linux/sched.h F: include/uapi/linux/sched.h F: include/linux/wait.h +F: include/linux/preempt.h SCR24X CHIP CARD INTERFACE DRIVER M: Lubomir Rintel @@ -13501,6 +13763,7 @@ M: "James E.J. Bottomley" T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git M: "Martin K. Petersen" T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkp/scsi.git +Q: https://patchwork.kernel.org/project/linux-scsi/list/ L: linux-scsi@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/scsi/ @@ -13515,6 +13778,18 @@ F: Documentation/scsi/st.txt F: drivers/scsi/st.* F: drivers/scsi/st_*.h +SCSI TARGET SUBSYSTEM +M: "Martin K. Petersen" +L: linux-scsi@vger.kernel.org +L: target-devel@vger.kernel.org +W: http://www.linux-iscsi.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkp/scsi.git +Q: https://patchwork.kernel.org/project/target-devel/list/ +S: Supported +F: drivers/target/ +F: include/target/ +F: Documentation/target/ + SCTP PROTOCOL M: Vlad Yasevich M: Neil Horman @@ -13586,11 +13861,18 @@ F: drivers/mmc/host/sdhci-brcmstb* SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) DRIVER M: Adrian Hunter L: linux-mmc@vger.kernel.org -T: git git://git.infradead.org/users/ahunter/linux-sdhci.git S: Maintained F: drivers/mmc/host/sdhci* F: include/linux/mmc/sdhci* +EMMC CMDQ HOST CONTROLLER INTERFACE (CQHCI) DRIVER +M: Adrian Hunter +M: Ritesh Harjani +M: Asutosh Das +L: linux-mmc@vger.kernel.org +S: Maintained +F: drivers/mmc/host/cqhci* + SYNOPSYS SDHCI COMPLIANT DWC MSHC DRIVER M: Prabu Thangamuthu M: Manjunath M B @@ -13696,6 +13978,15 @@ L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/sfc/ +SFF/SFP/SFP+ MODULE SUPPORT +M: Russell King +L: netdev@vger.kernel.org +S: Maintained +F: drivers/net/phy/phylink.c +F: drivers/net/phy/sfp* +F: include/linux/phylink.h +F: include/linux/sfp.h + SGI GRU DRIVER M: Dimitri Sivanich S: Maintained @@ -13717,6 +14008,7 @@ F: drivers/misc/sgi-xp/ SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS M: Ursula Braun +M: Karsten Graul L: linux-s390@vger.kernel.org W: http://www.ibm.com/developerworks/linux/linux390/ S: Supported @@ -14308,6 +14600,7 @@ F: arch/arm/mach-spear/ SPI NOR SUBSYSTEM M: Marek Vasut +M: Tudor Ambarus L: linux-mtd@lists.infradead.org W: http://www.linux-mtd.infradead.org/ Q: http://patchwork.ozlabs.org/project/linux-mtd/list/ @@ -14472,11 +14765,6 @@ L: linux-wireless@vger.kernel.org S: Supported F: drivers/staging/wilc1000/ -STAGING - XGI Z7,Z9,Z11 PCI DISPLAY DRIVER -M: Arnaud Patard -S: Odd Fixes -F: drivers/staging/xgifb/ - STAGING SUBSYSTEM M: Greg Kroah-Hartman T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git @@ -14681,7 +14969,7 @@ S: Maintained F: drivers/tty/serial/8250/8250_dw.c SYNOPSYS DESIGNWARE APB GPIO DRIVER -M: Hoan Tran +M: Hoan Tran L: linux-gpio@vger.kernel.org S: Maintained F: drivers/gpio/gpio-dwapb.c @@ -14773,18 +15061,6 @@ F: Documentation/filesystems/sysv-fs.txt F: fs/sysv/ F: include/linux/sysv_fs.h -TARGET SUBSYSTEM -M: "Nicholas A. Bellinger" -L: linux-scsi@vger.kernel.org -L: target-devel@vger.kernel.org -W: http://www.linux-iscsi.org -W: http://groups.google.com/group/linux-iscsi-target-dev -T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master -S: Supported -F: drivers/target/ -F: include/target/ -F: Documentation/target/ - TASKSTATS STATISTICS INTERFACE M: Balbir Singh S: Maintained @@ -15071,6 +15347,13 @@ L: alsa-devel@alsa-project.org (moderated for non-subscribers) S: Maintained F: sound/soc/ti/ +Texas Instruments' DAC7612 DAC Driver +M: Ricardo Ribalda +L: linux-iio@vger.kernel.org +S: Supported +F: drivers/iio/dac/ti-dac7612.c +F: Documentation/devicetree/bindings/iio/dac/ti,dac7612.txt + THANKO'S RAREMONO AM/FM/SW RADIO RECEIVER USB DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org @@ -15382,12 +15665,11 @@ F: mm/shmem.c TOMOYO SECURITY MODULE M: Kentaro Takeda M: Tetsuo Handa -L: tomoyo-dev-en@lists.sourceforge.jp (subscribers-only, for developers in English) -L: tomoyo-users-en@lists.sourceforge.jp (subscribers-only, for users in English) -L: tomoyo-dev@lists.sourceforge.jp (subscribers-only, for developers in Japanese) -L: tomoyo-users@lists.sourceforge.jp (subscribers-only, for users in Japanese) -W: http://tomoyo.sourceforge.jp/ -T: quilt http://svn.sourceforge.jp/svnroot/tomoyo/trunk/2.5.x/tomoyo-lsm/patches/ +L: tomoyo-dev-en@lists.osdn.me (subscribers-only, for developers in English) +L: tomoyo-users-en@lists.osdn.me (subscribers-only, for users in English) +L: tomoyo-dev@lists.osdn.me (subscribers-only, for developers in Japanese) +L: tomoyo-users@lists.osdn.me (subscribers-only, for users in Japanese) +W: https://tomoyo.osdn.jp/ S: Maintained F: security/tomoyo/ @@ -15676,14 +15958,16 @@ F: drivers/visorbus/ F: drivers/staging/unisys/ UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER -M: Vinayak Holikatti +R: Alim Akhtar +R: Avri Altman +R: Pedro Sousa L: linux-scsi@vger.kernel.org S: Supported F: Documentation/scsi/ufs.txt F: drivers/scsi/ufs/ UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER DWC HOOKS -M: Joao Pinto +M: Pedro Sousa L: linux-scsi@vger.kernel.org S: Supported F: drivers/scsi/ufs/*dwc* @@ -16468,6 +16752,12 @@ L: linux-gpio@vger.kernel.org S: Maintained F: drivers/gpio/gpio-wcove.c +WHWAVE RTC DRIVER +M: Dianlong Li +L: linux-rtc@vger.kernel.org +S: Maintained +F: drivers/rtc/rtc-sd3078.c + WIIMOTE HID DRIVER M: David Herrmann L: linux-input@vger.kernel.org @@ -16499,6 +16789,11 @@ M: David Härdeman S: Maintained F: drivers/media/rc/winbond-cir.c +RCMM REMOTE CONTROLS DECODER +M: Patrick Lerda +S: Maintained +F: drivers/media/rc/ir-rcmm-decoder.c + WINSYSTEMS EBC-C384 WATCHDOG DRIVER M: William Breathitt Gray L: linux-watchdog@vger.kernel.org @@ -16641,6 +16936,15 @@ S: Maintained F: drivers/platform/x86/ F: drivers/platform/olpc/ +X86 PLATFORM DRIVERS - ARCH +R: Darren Hart +R: Andy Shevchenko +L: platform-driver-x86@vger.kernel.org +L: x86@kernel.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core +S: Maintained +F: arch/x86/platform + X86 VDSO M: Andy Lutomirski L: linux-kernel@vger.kernel.org @@ -16673,10 +16977,30 @@ T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/tuners/tuner-xc2028.* +XDP (eXpress Data Path) +M: Alexei Starovoitov +M: Daniel Borkmann +M: David S. Miller +M: Jakub Kicinski +M: Jesper Dangaard Brouer +M: John Fastabend +L: netdev@vger.kernel.org +L: xdp-newbies@vger.kernel.org +L: bpf@vger.kernel.org +S: Supported +F: net/core/xdp.c +F: include/net/xdp.h +F: kernel/bpf/devmap.c +F: kernel/bpf/cpumap.c +F: include/trace/events/xdp.h +K: xdp +N: xdp + XDP SOCKETS (AF_XDP) M: Björn Töpel M: Magnus Karlsson L: netdev@vger.kernel.org +L: bpf@vger.kernel.org S: Maintained F: kernel/bpf/xskmap.c F: net/xdp/ diff --git a/Makefile b/Makefile index 141653226f3c..9ef547fc7ffe 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ VERSION = 5 PATCHLEVEL = 0 SUBLEVEL = 0 -EXTRAVERSION = -rc4 +EXTRAVERSION = NAME = Shy Crocodile # *DOCUMENTATION* @@ -15,19 +15,6 @@ NAME = Shy Crocodile PHONY := _all _all: -# Do not use make's built-in rules and variables -# (this increases performance and avoids hard-to-debug behaviour) -MAKEFLAGS += -rR - -# Avoid funny character set dependencies -unexport LC_ALL -LC_COLLATE=C -LC_NUMERIC=C -export LC_COLLATE LC_NUMERIC - -# Avoid interference with shell env settings -unexport GREP_OPTIONS - # We are using a recursive build, so we need to do a little thinking # to get the ordering right. # @@ -44,6 +31,25 @@ unexport GREP_OPTIONS # descending is started. They are now explicitly listed as the # prepare rule. +ifneq ($(sub-make-done),1) + +# Do not use make's built-in rules and variables +# (this increases performance and avoids hard-to-debug behaviour) +MAKEFLAGS += -rR + +# 'MAKEFLAGS += -rR' does not become immediately effective for old +# GNU Make versions. Cancel implicit rules for this Makefile. +$(lastword $(MAKEFILE_LIST)): ; + +# Avoid funny character set dependencies +unexport LC_ALL +LC_COLLATE=C +LC_NUMERIC=C +export LC_COLLATE LC_NUMERIC + +# Avoid interference with shell env settings +unexport GREP_OPTIONS + # Beautify output # --------------------------------------------------------------------------- # @@ -90,7 +96,6 @@ endif ifneq ($(findstring s,$(filter-out --%,$(MAKEFLAGS))),) quiet=silent_ - tools_silent=s endif export quiet Q KBUILD_VERBOSE @@ -112,7 +117,6 @@ export quiet Q KBUILD_VERBOSE # KBUILD_SRC is not intended to be used by the regular user (for now), # it is set on invocation of make with KBUILD_OUTPUT or O= specified. -ifeq ($(KBUILD_SRC),) # OK, Make called in directory where kernel src resides # Do we want to locate output files in a separate directory? @@ -120,9 +124,6 @@ ifeq ("$(origin O)", "command line") KBUILD_OUTPUT := $(O) endif -# Cancel implicit rules on top Makefile -$(CURDIR)/Makefile Makefile: ; - ifneq ($(words $(subst :, ,$(CURDIR))), 1) $(error main directory cannot contain spaces nor colons) endif @@ -142,6 +143,13 @@ $(if $(KBUILD_OUTPUT),, \ # 'sub-make' below. MAKEFLAGS += --include-dir=$(CURDIR) +else + +# Do not print "Entering directory ..." at all for in-tree build. +MAKEFLAGS += --no-print-directory + +endif # ifneq ($(KBUILD_OUTPUT),) + PHONY += $(MAKECMDGOALS) sub-make $(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make @@ -149,16 +157,12 @@ $(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make # Invoke a second make in the output directory, passing relevant variables sub-make: - $(Q)$(MAKE) -C $(KBUILD_OUTPUT) KBUILD_SRC=$(CURDIR) \ + $(Q)$(MAKE) sub-make-done=1 \ + $(if $(KBUILD_OUTPUT),-C $(KBUILD_OUTPUT) KBUILD_SRC=$(CURDIR)) \ -f $(CURDIR)/Makefile $(filter-out _all sub-make,$(MAKECMDGOALS)) -# Leave processing to above invocation of make -skip-makefile := 1 -endif # ifneq ($(KBUILD_OUTPUT),) -endif # ifeq ($(KBUILD_SRC),) - +else # sub-make-done # We process the rest of the Makefile if this is the final invocation of make -ifeq ($(skip-makefile),) # Do not print "Entering directory ...", # but we want to display it when entering to the output directory @@ -215,7 +219,7 @@ objtree := . src := $(srctree) obj := $(objtree) -VPATH := $(srctree)$(if $(KBUILD_EXTMOD),:$(KBUILD_EXTMOD)) +VPATH := $(srctree) export srctree objtree VPATH @@ -300,8 +304,6 @@ __build_one_by_one: else -# We need some generic definitions (do not try to remake the file). -scripts/Kbuild.include: ; include scripts/Kbuild.include # Read KERNELRELEASE from include/config/kernel.release (if it exists) @@ -390,7 +392,6 @@ OBJDUMP = $(CROSS_COMPILE)objdump LEX = flex YACC = bison AWK = awk -GENKSYMS = scripts/genksyms/genksyms INSTALLKERNEL := installkernel DEPMOD = /sbin/depmod PERL = perl @@ -429,7 +430,7 @@ LINUXINCLUDE := \ KBUILD_AFLAGS := -D__ASSEMBLY__ -fno-PIE KBUILD_CFLAGS := -Wall -Wundef -Werror=strict-prototypes -Wno-trigraphs \ -fno-strict-aliasing -fno-common -fshort-wchar -fno-PIE \ - -Werror-implicit-function-declaration -Werror=implicit-int \ + -Werror=implicit-function-declaration -Werror=implicit-int \ -Wno-format-security \ -std=gnu89 KBUILD_CPPFLAGS := -D__KERNEL__ @@ -443,7 +444,7 @@ GCC_PLUGINS_CFLAGS := export ARCH SRCARCH CONFIG_SHELL HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE AS LD CC export CPP AR NM STRIP OBJCOPY OBJDUMP KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS -export MAKE LEX YACC AWK GENKSYMS INSTALLKERNEL PERL PYTHON PYTHON2 PYTHON3 UTS_MACHINE +export MAKE LEX YACC AWK INSTALLKERNEL PERL PYTHON PYTHON2 PYTHON3 UTS_MACHINE export HOSTCXX KBUILD_HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS KBUILD_LDFLAGS @@ -476,23 +477,23 @@ scripts_basic: $(Q)$(MAKE) $(build)=scripts/basic $(Q)rm -f .tmp_quiet_recordmcount -# To avoid any implicit rule to kick in, define an empty command. -scripts/basic/%: scripts_basic ; - PHONY += outputmakefile # outputmakefile generates a Makefile in the output directory, if using a # separate output directory. This allows convenient use of make in the # output directory. +# At the same time when output Makefile generated, generate .gitignore to +# ignore whole output directory outputmakefile: ifneq ($(KBUILD_SRC),) $(Q)ln -fsn $(srctree) source $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkmakefile $(srctree) + $(Q){ echo "# this is build directory, ignore it"; echo "*"; } > .gitignore endif ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),) ifneq ($(CROSS_COMPILE),) CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE:%-=%)) -GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD))) +GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit)) CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR) GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..) endif @@ -583,7 +584,7 @@ export KBUILD_MODULES KBUILD_BUILTIN ifeq ($(KBUILD_EXTMOD),) # Objects we will link into vmlinux / subdirs we need to visit init-y := init/ -drivers-y := drivers/ sound/ firmware/ +drivers-y := drivers/ sound/ net-y := net/ libs-y := lib/ core-y := usr/ @@ -624,13 +625,22 @@ ifeq ($(may-sync-config),1) # because some architectures define CROSS_COMPILE there. -include include/config/auto.conf.cmd -# To avoid any implicit rule to kick in, define an empty command -$(KCONFIG_CONFIG) include/config/auto.conf.cmd: ; +$(KCONFIG_CONFIG): + @echo >&2 '***' + @echo >&2 '*** Configuration file "$@" not found!' + @echo >&2 '***' + @echo >&2 '*** Please run some configurator (e.g. "make oldconfig" or' + @echo >&2 '*** "make menuconfig" or "make xconfig").' + @echo >&2 '***' + @/bin/false # The actual configuration files used during the build are stored in # include/generated/ and include/config/. Update them if .config is newer than # include/config/auto.conf (which mirrors .config). -include/config/%.conf: $(KCONFIG_CONFIG) include/config/auto.conf.cmd +# +# This exploits the 'multi-target pattern rule' trick. +# The syncconfig should be executed only once to make all the targets. +%/auto.conf %/auto.conf.cmd %/tristate.conf: $(KCONFIG_CONFIG) $(Q)$(MAKE) -f $(srctree)/Makefile syncconfig else # External modules and some install targets need include/generated/autoconf.h @@ -658,17 +668,13 @@ KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context) ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE KBUILD_CFLAGS += $(call cc-option,-Oz,-Os) -KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,) -else -ifdef CONFIG_PROFILE_ALL_BRANCHES -KBUILD_CFLAGS += -O2 $(call cc-disable-warning,maybe-uninitialized,) else KBUILD_CFLAGS += -O2 endif -endif -KBUILD_CFLAGS += $(call cc-ifversion, -lt, 0409, \ - $(call cc-disable-warning,maybe-uninitialized,)) +ifdef CONFIG_CC_DISABLE_WARN_MAYBE_UNINITIALIZED +KBUILD_CFLAGS += -Wno-maybe-uninitialized +endif # Tell gcc to never replace conditional load with a non-conditional one KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0) @@ -729,25 +735,28 @@ KBUILD_CFLAGS += -fomit-frame-pointer endif endif -KBUILD_CFLAGS += $(call cc-option, -fno-var-tracking-assignments) +DEBUG_CFLAGS := $(call cc-option, -fno-var-tracking-assignments) ifdef CONFIG_DEBUG_INFO ifdef CONFIG_DEBUG_INFO_SPLIT -KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g) +DEBUG_CFLAGS += -gsplit-dwarf else -KBUILD_CFLAGS += -g +DEBUG_CFLAGS += -g endif KBUILD_AFLAGS += -Wa,-gdwarf-2 endif ifdef CONFIG_DEBUG_INFO_DWARF4 -KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,) +DEBUG_CFLAGS += -gdwarf-4 endif ifdef CONFIG_DEBUG_INFO_REDUCED -KBUILD_CFLAGS += $(call cc-option, -femit-struct-debug-baseonly) \ +DEBUG_CFLAGS += $(call cc-option, -femit-struct-debug-baseonly) \ $(call cc-option,-fno-var-tracking) endif +KBUILD_CFLAGS += $(DEBUG_CFLAGS) +export DEBUG_CFLAGS + ifdef CONFIG_FUNCTION_TRACER ifdef CONFIG_FTRACE_MCOUNT_RECORD # gcc 5 supports generating the mcount tables directly @@ -922,19 +931,6 @@ ifdef CONFIG_MODULE_COMPRESS endif # CONFIG_MODULE_COMPRESS export mod_compress_cmd -# Select initial ramdisk compression format, default is gzip(1). -# This shall be used by the dracut(8) tool while creating an initramfs image. -# -INITRD_COMPRESS-y := gzip -INITRD_COMPRESS-$(CONFIG_RD_BZIP2) := bzip2 -INITRD_COMPRESS-$(CONFIG_RD_LZMA) := lzma -INITRD_COMPRESS-$(CONFIG_RD_XZ) := xz -INITRD_COMPRESS-$(CONFIG_RD_LZO) := lzo -INITRD_COMPRESS-$(CONFIG_RD_LZ4) := lz4 -# do not export INITRD_COMPRESS, since we didn't actually -# choose a sane default compression above. -# export INITRD_COMPRESS := $(INITRD_COMPRESS-y) - ifdef CONFIG_MODULE_SIG_ALL $(eval $(call config_filename,MODULE_SIG_KEY)) @@ -976,15 +972,15 @@ libs-y2 := $(patsubst %/, %/built-in.a, $(filter-out %.a, $(libs-y))) virt-y := $(patsubst %/, %/built-in.a, $(virt-y)) # Externally visible symbols (used by link-vmlinux.sh) -export KBUILD_VMLINUX_INIT := $(head-y) $(init-y) -export KBUILD_VMLINUX_MAIN := $(core-y) $(libs-y2) $(drivers-y) $(net-y) $(virt-y) +export KBUILD_VMLINUX_OBJS := $(head-y) $(init-y) $(core-y) $(libs-y2) \ + $(drivers-y) $(net-y) $(virt-y) export KBUILD_VMLINUX_LIBS := $(libs-y1) export KBUILD_LDS := arch/$(SRCARCH)/kernel/vmlinux.lds export LDFLAGS_vmlinux # used by scripts/package/Makefile export KBUILD_ALLDIRS := $(sort $(filter-out arch/%,$(vmlinux-alldirs)) arch Documentation include samples scripts tools) -vmlinux-deps := $(KBUILD_LDS) $(KBUILD_VMLINUX_INIT) $(KBUILD_VMLINUX_MAIN) $(KBUILD_VMLINUX_LIBS) +vmlinux-deps := $(KBUILD_LDS) $(KBUILD_VMLINUX_OBJS) $(KBUILD_VMLINUX_LIBS) # Recurse until adjust_autoksyms.sh is satisfied PHONY += autoksyms_recursive @@ -1015,9 +1011,6 @@ cmd_link-vmlinux = \ $(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) $@, true) vmlinux: scripts/link-vmlinux.sh autoksyms_recursive $(vmlinux-deps) FORCE -ifdef CONFIG_GDB_SCRIPTS - $(Q)ln -fsn $(abspath $(srctree)/scripts/gdb/vmlinux-gdb.py) -endif +$(call if_changed,link-vmlinux) targets := vmlinux @@ -1062,7 +1055,7 @@ scripts: scripts_basic scripts_dtc # archprepare is used in arch Makefiles and when processed asm symlink, # version.h and scripts_basic is processed / created. -PHONY += prepare archprepare prepare1 prepare2 prepare3 +PHONY += prepare archprepare prepare1 prepare3 # prepare3 is used to check if we are building in a separate output directory, # and if so do: @@ -1077,12 +1070,8 @@ ifneq ($(KBUILD_SRC),) fi; endif -# prepare2 creates a makefile if using a separate output directory. -# From this point forward, .config has been reprocessed, so any rules -# that need to depend on updated CONFIG_* values can be checked here. -prepare2: prepare3 outputmakefile asm-generic - -prepare1: prepare2 $(version_h) $(autoksyms_h) include/generated/utsrelease.h +prepare1: prepare3 outputmakefile asm-generic $(version_h) $(autoksyms_h) \ + include/generated/utsrelease.h $(cmd_crmodverdir) archprepare: archheaders archscripts prepare1 scripts @@ -1517,6 +1506,18 @@ PHONY += $(DOC_TARGETS) $(DOC_TARGETS): scripts_basic FORCE $(Q)$(MAKE) $(build)=Documentation $@ +# Misc +# --------------------------------------------------------------------------- + +PHONY += scripts_gdb +scripts_gdb: prepare + $(Q)$(MAKE) $(build)=scripts/gdb + $(Q)ln -fsn $(abspath $(srctree)/scripts/gdb/vmlinux-gdb.py) + +ifdef CONFIG_GDB_SCRIPTS +all: scripts_gdb +endif + else # KBUILD_EXTMOD ### @@ -1668,6 +1669,11 @@ image_name: @echo $(KBUILD_IMAGE) # Clear a bunch of variables before executing the submake + +ifeq ($(quiet),silent_) +tools_silent=s +endif + tools/: FORCE $(Q)mkdir -p $(objtree)/tools $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(src)/tools/ @@ -1686,45 +1692,32 @@ tools/%: FORCE # target-dir => where to store outputfile # build-dir => directory in kernel source tree to use -ifeq ($(KBUILD_EXTMOD),) - build-dir = $(patsubst %/,%,$(dir $@)) - target-dir = $(dir $@) -else - zap-slash=$(filter-out .,$(patsubst %/,%,$(dir $@))) - build-dir = $(KBUILD_EXTMOD)$(if $(zap-slash),/$(zap-slash)) - target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@)) -endif - -%.s: %.c prepare FORCE - $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) -%.i: %.c prepare FORCE - $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) -%.o: %.c prepare FORCE - $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) -%.lst: %.c prepare FORCE - $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) -%.s: %.S prepare FORCE - $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) -%.o: %.S prepare FORCE - $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) -%.symtypes: %.c prepare FORCE - $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) -%.ll: %.c prepare FORCE - $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) +build-target = $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD)/)$@ +build-dir = $(patsubst %/,%,$(dir $(build-target))) + +%.i: prepare FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(build-target) +%.ll: prepare FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(build-target) +%.lst: prepare FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(build-target) +%.o: prepare FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(build-target) +%.s: prepare FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(build-target) +%.symtypes: prepare FORCE + $(Q)$(MAKE) $(build)=$(build-dir) $(build-target) +%.ko: %.o + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost # Modules -/: prepare FORCE - $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \ - $(build)=$(build-dir) +PHONY += / +/: ./ + # Make sure the latest headers are built for Documentation Documentation/ samples/: headers_install %/: prepare FORCE - $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \ - $(build)=$(build-dir) -%.ko: prepare FORCE - $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \ - $(build)=$(build-dir) $(@:.ko=.o) - $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost + $(Q)$(MAKE) KBUILD_MODULES=1 $(build)=$(build-dir) # FIXME Should go into a make.lib or something # =========================================================================== @@ -1748,13 +1741,11 @@ cmd_crmodverdir = $(Q)mkdir -p $(MODVERDIR) \ # read saved command lines for existing targets existing-targets := $(wildcard $(sort $(targets))) -cmd_files := $(foreach f,$(existing-targets),$(dir $(f)).$(notdir $(f)).cmd) -$(cmd_files): ; # Do not try to update included dependency files --include $(cmd_files) +-include $(foreach f,$(existing-targets),$(dir $(f)).$(notdir $(f)).cmd) endif # ifeq ($(config-targets),1) endif # ifeq ($(mixed-targets),1) -endif # skip-makefile +endif # sub-make-done PHONY += FORCE FORCE: diff --git a/arch/Kconfig b/arch/Kconfig index 4cfb6de48f79..33687dddd86a 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -276,6 +276,16 @@ config ARCH_THREAD_STACK_ALLOCATOR config ARCH_WANTS_DYNAMIC_TASK_STRUCT bool +config ARCH_32BIT_OFF_T + bool + depends on !64BIT + help + All new 32-bit architectures should have 64-bit off_t type on + userspace side which corresponds to the loff_t kernel type. This + is the requirement for modern ABIs. Some existing architectures + still support 32-bit off_t. This option is enabled for all such + architectures explicitly. + config HAVE_REGS_AND_STACK_ACCESS_API bool help @@ -701,6 +711,9 @@ config HAVE_ARCH_HASH file which provides platform-specific implementations of some functions in or fs/namei.c. +config HAVE_ARCH_NVRAM_OPS + bool + config ISA_BUS_API def_bool ISA @@ -759,7 +772,7 @@ config 64BIT_TIME handling. config COMPAT_32BIT_TIME - def_bool (!64BIT && 64BIT_TIME) || COMPAT + def_bool !64BIT || COMPAT help This enables 32 bit time_t support in addition to 64 bit time_t support. This is relevant on all 32-bit architectures, and 64-bit architectures @@ -885,6 +898,9 @@ config HAVE_ARCH_PREL32_RELOCATIONS architectures, and don't require runtime relocation on relocatable kernels. +config ARCH_USE_MEMREMAP_PROT + bool + source "kernel/gcov/Kconfig" source "scripts/gcc-plugins/Kconfig" diff --git a/arch/alpha/include/asm/a.out-core.h b/arch/alpha/include/asm/a.out-core.h deleted file mode 100644 index 1610d078b064..000000000000 --- a/arch/alpha/include/asm/a.out-core.h +++ /dev/null @@ -1,81 +0,0 @@ -/* a.out coredump register dumper - * - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ - -#ifndef _ASM_A_OUT_CORE_H -#define _ASM_A_OUT_CORE_H - -#ifdef __KERNEL__ - -#include -#include - -/* - * Fill in the user structure for an ECOFF core dump. - */ -static inline void aout_dump_thread(struct pt_regs *pt, struct user *dump) -{ - /* switch stack follows right below pt_regs: */ - struct switch_stack * sw = ((struct switch_stack *) pt) - 1; - - dump->magic = CMAGIC; - dump->start_code = current->mm->start_code; - dump->start_data = current->mm->start_data; - dump->start_stack = rdusp() & ~(PAGE_SIZE - 1); - dump->u_tsize = ((current->mm->end_code - dump->start_code) - >> PAGE_SHIFT); - dump->u_dsize = ((current->mm->brk + PAGE_SIZE-1 - dump->start_data) - >> PAGE_SHIFT); - dump->u_ssize = (current->mm->start_stack - dump->start_stack - + PAGE_SIZE-1) >> PAGE_SHIFT; - - /* - * We store the registers in an order/format that is - * compatible with DEC Unix/OSF/1 as this makes life easier - * for gdb. - */ - dump->regs[EF_V0] = pt->r0; - dump->regs[EF_T0] = pt->r1; - dump->regs[EF_T1] = pt->r2; - dump->regs[EF_T2] = pt->r3; - dump->regs[EF_T3] = pt->r4; - dump->regs[EF_T4] = pt->r5; - dump->regs[EF_T5] = pt->r6; - dump->regs[EF_T6] = pt->r7; - dump->regs[EF_T7] = pt->r8; - dump->regs[EF_S0] = sw->r9; - dump->regs[EF_S1] = sw->r10; - dump->regs[EF_S2] = sw->r11; - dump->regs[EF_S3] = sw->r12; - dump->regs[EF_S4] = sw->r13; - dump->regs[EF_S5] = sw->r14; - dump->regs[EF_S6] = sw->r15; - dump->regs[EF_A3] = pt->r19; - dump->regs[EF_A4] = pt->r20; - dump->regs[EF_A5] = pt->r21; - dump->regs[EF_T8] = pt->r22; - dump->regs[EF_T9] = pt->r23; - dump->regs[EF_T10] = pt->r24; - dump->regs[EF_T11] = pt->r25; - dump->regs[EF_RA] = pt->r26; - dump->regs[EF_T12] = pt->r27; - dump->regs[EF_AT] = pt->r28; - dump->regs[EF_SP] = rdusp(); - dump->regs[EF_PS] = pt->ps; - dump->regs[EF_PC] = pt->pc; - dump->regs[EF_GP] = pt->gp; - dump->regs[EF_A0] = pt->r16; - dump->regs[EF_A1] = pt->r17; - dump->regs[EF_A2] = pt->r18; - memcpy((char *)dump->regs + EF_SIZE, sw->fp, 32 * 8); -} - -#endif /* __KERNEL__ */ -#endif /* _ASM_A_OUT_CORE_H */ diff --git a/arch/alpha/include/asm/irq.h b/arch/alpha/include/asm/irq.h index 4d17cacd1462..432402c8e47f 100644 --- a/arch/alpha/include/asm/irq.h +++ b/arch/alpha/include/asm/irq.h @@ -56,15 +56,15 @@ #elif defined(CONFIG_ALPHA_DP264) || \ defined(CONFIG_ALPHA_LYNX) || \ - defined(CONFIG_ALPHA_SHARK) || \ - defined(CONFIG_ALPHA_EIGER) + defined(CONFIG_ALPHA_SHARK) # define NR_IRQS 64 #elif defined(CONFIG_ALPHA_TITAN) #define NR_IRQS 80 #elif defined(CONFIG_ALPHA_RAWHIDE) || \ - defined(CONFIG_ALPHA_TAKARA) + defined(CONFIG_ALPHA_TAKARA) || \ + defined(CONFIG_ALPHA_EIGER) # define NR_IRQS 128 #elif defined(CONFIG_ALPHA_WILDFIRE) diff --git a/arch/alpha/include/asm/topology.h b/arch/alpha/include/asm/topology.h index e6e13a85796a..5a77a40567fa 100644 --- a/arch/alpha/include/asm/topology.h +++ b/arch/alpha/include/asm/topology.h @@ -4,6 +4,7 @@ #include #include +#include #include #ifdef CONFIG_NUMA @@ -29,7 +30,7 @@ static const struct cpumask *cpumask_of_node(int node) { int cpu; - if (node == -1) + if (node == NUMA_NO_NODE) return cpu_all_mask; cpumask_clear(&node_to_cpumask_map[node]); diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h index cf4ac791a592..1fe2b56cb861 100644 --- a/arch/alpha/include/asm/uaccess.h +++ b/arch/alpha/include/asm/uaccess.h @@ -18,7 +18,6 @@ #define USER_DS ((mm_segment_t) { -0x40000000000UL }) #define get_fs() (current_thread_info()->addr_limit) -#define get_ds() (KERNEL_DS) #define set_fs(x) (current_thread_info()->addr_limit = (x)) #define segment_eq(a, b) ((a).seg == (b).seg) diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h index 21b706a5b772..986f5da9b7d8 100644 --- a/arch/alpha/include/asm/unistd.h +++ b/arch/alpha/include/asm/unistd.h @@ -19,25 +19,4 @@ #define __ARCH_WANT_SYS_VFORK #define __ARCH_WANT_SYS_CLONE -/* - * Ignore legacy syscalls that we don't use. - */ -#define __IGNORE_alarm -#define __IGNORE_creat -#define __IGNORE_getegid -#define __IGNORE_geteuid -#define __IGNORE_getgid -#define __IGNORE_getpid -#define __IGNORE_getppid -#define __IGNORE_getuid -#define __IGNORE_pause -#define __IGNORE_time -#define __IGNORE_utime -#define __IGNORE_umount2 - -/* Alpha doesn't have protection keys. */ -#define __IGNORE_pkey_mprotect -#define __IGNORE_pkey_alloc -#define __IGNORE_pkey_free - #endif /* _ALPHA_UNISTD_H */ diff --git a/arch/alpha/include/uapi/asm/mman.h b/arch/alpha/include/uapi/asm/mman.h index f9d4e6b6d4bd..ac23379b7a87 100644 --- a/arch/alpha/include/uapi/asm/mman.h +++ b/arch/alpha/include/uapi/asm/mman.h @@ -10,9 +10,7 @@ #define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ #define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ -#define MAP_SHARED 0x01 /* Share changes */ -#define MAP_PRIVATE 0x02 /* Changes are private */ -#define MAP_SHARED_VALIDATE 0x03 /* share + validate extension flags */ +/* 0x01 - 0x03 are defined in linux/mman.h */ #define MAP_TYPE 0x0f /* Mask for type of mapping (OSF/1 is _wrong_) */ #define MAP_FIXED 0x100 /* Interpret addr exactly */ #define MAP_ANONYMOUS 0x10 /* don't use a file */ diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h index 065fb372e355..0d0fddb7e738 100644 --- a/arch/alpha/include/uapi/asm/socket.h +++ b/arch/alpha/include/uapi/asm/socket.h @@ -3,6 +3,7 @@ #define _UAPI_ASM_SOCKET_H #include +#include /* For setsockopt(2) */ /* @@ -30,8 +31,8 @@ #define SO_RCVBUFFORCE 0x100b #define SO_RCVLOWAT 0x1010 #define SO_SNDLOWAT 0x1011 -#define SO_RCVTIMEO 0x1012 -#define SO_SNDTIMEO 0x1013 +#define SO_RCVTIMEO_OLD 0x1012 +#define SO_SNDTIMEO_OLD 0x1013 #define SO_ACCEPTCONN 0x1014 #define SO_PROTOCOL 0x1028 #define SO_DOMAIN 0x1029 @@ -51,13 +52,9 @@ #define SO_GET_FILTER SO_ATTACH_FILTER #define SO_PEERNAME 28 -#define SO_TIMESTAMP 29 -#define SCM_TIMESTAMP SO_TIMESTAMP #define SO_PEERSEC 30 #define SO_PASSSEC 34 -#define SO_TIMESTAMPNS 35 -#define SCM_TIMESTAMPNS SO_TIMESTAMPNS /* Security levels - as per NRL IPv6 - don't actually do anything */ #define SO_SECURITY_AUTHENTICATION 19 @@ -66,9 +63,6 @@ #define SO_MARK 36 -#define SO_TIMESTAMPING 37 -#define SCM_TIMESTAMPING SO_TIMESTAMPING - #define SO_RXQ_OVFL 40 #define SO_WIFI_STATUS 41 @@ -115,4 +109,41 @@ #define SO_TXTIME 61 #define SCM_TXTIME SO_TXTIME +#define SO_BINDTOIFINDEX 62 + +#define SO_TIMESTAMP_OLD 29 +#define SO_TIMESTAMPNS_OLD 35 +#define SO_TIMESTAMPING_OLD 37 + +#define SO_TIMESTAMP_NEW 63 +#define SO_TIMESTAMPNS_NEW 64 +#define SO_TIMESTAMPING_NEW 65 + +#define SO_RCVTIMEO_NEW 66 +#define SO_SNDTIMEO_NEW 67 + +#if !defined(__KERNEL__) + +#if __BITS_PER_LONG == 64 +#define SO_TIMESTAMP SO_TIMESTAMP_OLD +#define SO_TIMESTAMPNS SO_TIMESTAMPNS_OLD +#define SO_TIMESTAMPING SO_TIMESTAMPING_OLD + +#define SO_RCVTIMEO SO_RCVTIMEO_OLD +#define SO_SNDTIMEO SO_SNDTIMEO_OLD +#else +#define SO_TIMESTAMP (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMP_OLD : SO_TIMESTAMP_NEW) +#define SO_TIMESTAMPNS (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPNS_OLD : SO_TIMESTAMPNS_NEW) +#define SO_TIMESTAMPING (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPING_OLD : SO_TIMESTAMPING_NEW) + +#define SO_RCVTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_RCVTIMEO_OLD : SO_RCVTIMEO_NEW) +#define SO_SNDTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_SNDTIMEO_OLD : SO_SNDTIMEO_NEW) +#endif + +#define SCM_TIMESTAMP SO_TIMESTAMP +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS +#define SCM_TIMESTAMPING SO_TIMESTAMPING + +#endif + #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/alpha/include/uapi/asm/unistd.h b/arch/alpha/include/uapi/asm/unistd.h index 9ba724f116f1..71fd5db06866 100644 --- a/arch/alpha/include/uapi/asm/unistd.h +++ b/arch/alpha/include/uapi/asm/unistd.h @@ -2,6 +2,16 @@ #ifndef _UAPI_ALPHA_UNISTD_H #define _UAPI_ALPHA_UNISTD_H +/* These are traditionally the names linux-alpha uses for + * the two otherwise generic system calls */ +#define __NR_umount __NR_umount2 +#define __NR_osf_shmat __NR_shmat + +/* These return an extra value but can be used as aliases */ +#define __NR_getpid __NR_getxpid +#define __NR_getuid __NR_getxuid +#define __NR_getgid __NR_getxgid + #include #endif /* _UAPI_ALPHA_UNISTD_H */ diff --git a/arch/alpha/kernel/core_cia.c b/arch/alpha/kernel/core_cia.c index 867e8730b0c5..f489170201c3 100644 --- a/arch/alpha/kernel/core_cia.c +++ b/arch/alpha/kernel/core_cia.c @@ -331,7 +331,10 @@ cia_prepare_tbia_workaround(int window) long i; /* Use minimal 1K map. */ - ppte = memblock_alloc_from(CIA_BROKEN_TBIA_SIZE, 32768, 0); + ppte = memblock_alloc(CIA_BROKEN_TBIA_SIZE, 32768); + if (!ppte) + panic("%s: Failed to allocate %u bytes align=0x%x\n", + __func__, CIA_BROKEN_TBIA_SIZE, 32768); pte = (virt_to_phys(ppte) >> (PAGE_SHIFT - 1)) | 1; for (i = 0; i < CIA_BROKEN_TBIA_SIZE / sizeof(unsigned long); ++i) diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c index c1d0c18c71ca..1db9d0eb2922 100644 --- a/arch/alpha/kernel/core_marvel.c +++ b/arch/alpha/kernel/core_marvel.c @@ -83,6 +83,9 @@ mk_resource_name(int pe, int port, char *str) sprintf(tmp, "PCI %s PE %d PORT %d", str, pe, port); name = memblock_alloc(strlen(tmp) + 1, SMP_CACHE_BYTES); + if (!name) + panic("%s: Failed to allocate %zu bytes\n", __func__, + strlen(tmp) + 1); strcpy(name, tmp); return name; @@ -118,6 +121,9 @@ alloc_io7(unsigned int pe) } io7 = memblock_alloc(sizeof(*io7), SMP_CACHE_BYTES); + if (!io7) + panic("%s: Failed to allocate %zu bytes\n", __func__, + sizeof(*io7)); io7->pe = pe; raw_spin_lock_init(&io7->irq_lock); diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index 792586038808..bf497b8b0ec6 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -1253,7 +1253,7 @@ struct timex32 { SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p) { - struct timex txc; + struct __kernel_timex txc; int ret; /* copy relevant bits of struct timex. */ @@ -1270,7 +1270,8 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p) if (copy_to_user(txc_p, &txc, offsetof(struct timex32, time)) || (copy_to_user(&txc_p->tick, &txc.tick, sizeof(struct timex32) - offsetof(struct timex32, tick))) || - (put_tv_to_tv32(&txc_p->time, &txc.time))) + (put_user(txc.time.tv_sec, &txc_p->time.tv_sec)) || + (put_user(txc.time.tv_usec, &txc_p->time.tv_usec))) return -EFAULT; return ret; diff --git a/arch/alpha/kernel/pci-noop.c b/arch/alpha/kernel/pci-noop.c index 091cff3c68fd..ae82061edae9 100644 --- a/arch/alpha/kernel/pci-noop.c +++ b/arch/alpha/kernel/pci-noop.c @@ -34,6 +34,9 @@ alloc_pci_controller(void) struct pci_controller *hose; hose = memblock_alloc(sizeof(*hose), SMP_CACHE_BYTES); + if (!hose) + panic("%s: Failed to allocate %zu bytes\n", __func__, + sizeof(*hose)); *hose_tail = hose; hose_tail = &hose->next; @@ -44,7 +47,13 @@ alloc_pci_controller(void) struct resource * __init alloc_resource(void) { - return memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES); + void *ptr = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES); + + if (!ptr) + panic("%s: Failed to allocate %zu bytes\n", __func__, + sizeof(struct resource)); + + return ptr; } SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, bus, @@ -54,7 +63,7 @@ SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, bus, /* from hose or from bus.devfn */ if (which & IOBASE_FROM_HOSE) { - for (hose = hose_head; hose; hose = hose->next) + for (hose = hose_head; hose; hose = hose->next) if (hose->index == bus) break; if (!hose) diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c index 97098127df83..64fbfb0763b2 100644 --- a/arch/alpha/kernel/pci.c +++ b/arch/alpha/kernel/pci.c @@ -393,6 +393,9 @@ alloc_pci_controller(void) struct pci_controller *hose; hose = memblock_alloc(sizeof(*hose), SMP_CACHE_BYTES); + if (!hose) + panic("%s: Failed to allocate %zu bytes\n", __func__, + sizeof(*hose)); *hose_tail = hose; hose_tail = &hose->next; @@ -403,7 +406,13 @@ alloc_pci_controller(void) struct resource * __init alloc_resource(void) { - return memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES); + void *ptr = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES); + + if (!ptr) + panic("%s: Failed to allocate %zu bytes\n", __func__, + sizeof(struct resource)); + + return ptr; } diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c index aa0f50d0f823..3034d6d936d2 100644 --- a/arch/alpha/kernel/pci_iommu.c +++ b/arch/alpha/kernel/pci_iommu.c @@ -80,6 +80,9 @@ iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base, " falling back to system-wide allocation\n", __func__, nid); arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES); + if (!arena) + panic("%s: Failed to allocate %zu bytes\n", __func__, + sizeof(*arena)); } arena->ptes = memblock_alloc_node(sizeof(*arena), align, nid); @@ -87,13 +90,22 @@ iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base, printk("%s: couldn't allocate arena ptes from node %d\n" " falling back to system-wide allocation\n", __func__, nid); - arena->ptes = memblock_alloc_from(mem_size, align, 0); + arena->ptes = memblock_alloc(mem_size, align); + if (!arena->ptes) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", + __func__, mem_size, align); } #else /* CONFIG_DISCONTIGMEM */ arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES); - arena->ptes = memblock_alloc_from(mem_size, align, 0); + if (!arena) + panic("%s: Failed to allocate %zu bytes\n", __func__, + sizeof(*arena)); + arena->ptes = memblock_alloc(mem_size, align); + if (!arena->ptes) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", + __func__, mem_size, align); #endif /* CONFIG_DISCONTIGMEM */ diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c index 5613aa378a83..4341ccf5c0c4 100644 --- a/arch/alpha/kernel/perf_event.c +++ b/arch/alpha/kernel/perf_event.c @@ -630,12 +630,6 @@ static int __hw_perf_event_init(struct perf_event *event) return ev; } - /* The EV67 does not support mode exclusion */ - if (attr->exclude_kernel || attr->exclude_user - || attr->exclude_hv || attr->exclude_idle) { - return -EPERM; - } - /* * We place the event type in event_base here and leave calculation * of the codes to programme the PMU for alpha_pmu_enable() because @@ -771,6 +765,7 @@ static struct pmu pmu = { .start = alpha_pmu_start, .stop = alpha_pmu_stop, .read = alpha_pmu_read, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }; diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c index 4b5b1b244f86..5d4c76a77a9f 100644 --- a/arch/alpha/kernel/setup.c +++ b/arch/alpha/kernel/setup.c @@ -293,7 +293,7 @@ move_initrd(unsigned long mem_limit) unsigned long size; size = initrd_end - initrd_start; - start = memblock_alloc_from(PAGE_ALIGN(size), PAGE_SIZE, 0); + start = memblock_alloc(PAGE_ALIGN(size), PAGE_SIZE); if (!start || __pa(start) + size > mem_limit) { initrd_start = initrd_end = 0; return NULL; diff --git a/arch/alpha/kernel/syscalls/syscall.tbl b/arch/alpha/kernel/syscalls/syscall.tbl index 7b56a53be5e3..63ed39cbd3bd 100644 --- a/arch/alpha/kernel/syscalls/syscall.tbl +++ b/arch/alpha/kernel/syscalls/syscall.tbl @@ -29,7 +29,7 @@ 19 common lseek sys_lseek 20 common getxpid sys_getxpid 21 common osf_mount sys_osf_mount -22 common umount sys_umount +22 common umount2 sys_umount 23 common setuid sys_setuid 24 common getxuid sys_getxuid 25 common exec_with_loader sys_ni_syscall @@ -174,17 +174,17 @@ 187 common osf_alt_sigpending sys_ni_syscall 188 common osf_alt_setsid sys_ni_syscall 199 common osf_swapon sys_swapon -200 common msgctl sys_msgctl +200 common msgctl sys_old_msgctl 201 common msgget sys_msgget 202 common msgrcv sys_msgrcv 203 common msgsnd sys_msgsnd -204 common semctl sys_semctl +204 common semctl sys_old_semctl 205 common semget sys_semget 206 common semop sys_semop 207 common osf_utsname sys_osf_utsname 208 common lchown sys_lchown -209 common osf_shmat sys_shmat -210 common shmctl sys_shmctl +209 common shmat sys_shmat +210 common shmctl sys_old_shmctl 211 common shmdt sys_shmdt 212 common shmget sys_shmget 213 common osf_mvalid sys_ni_syscall @@ -451,3 +451,15 @@ 520 common preadv2 sys_preadv2 521 common pwritev2 sys_pwritev2 522 common statx sys_statx +523 common io_pgetevents sys_io_pgetevents +524 common pkey_mprotect sys_pkey_mprotect +525 common pkey_alloc sys_pkey_alloc +526 common pkey_free sys_pkey_free +527 common rseq sys_rseq +528 common statfs64 sys_statfs64 +529 common fstatfs64 sys_fstatfs64 +530 common getegid sys_getegid +531 common geteuid sys_geteuid +532 common getppid sys_getppid +# all other architectures have common numbers for new syscall, alpha +# is the exception. diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index d73dc473fbb9..188fc9256baf 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -78,7 +78,7 @@ __load_new_mm_context(struct mm_struct *next_mm) /* Macro for exception fixup code to access integer registers. */ #define dpf_reg(r) \ (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \ - (r) <= 18 ? (r)+8 : (r)-10]) + (r) <= 18 ? (r)+10 : (r)-10]) asmlinkage void do_page_fault(unsigned long address, unsigned long mmcsr, diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 376366a7db81..df55672c59e6 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -11,9 +11,11 @@ config ARC select ARC_TIMERS select ARCH_HAS_DMA_COHERENT_TO_PFN select ARCH_HAS_PTE_SPECIAL + select ARCH_HAS_SETUP_DMA_OPS select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_SUPPORTS_ATOMIC_RMW if ARC_HAS_LLSC + select ARCH_32BIT_OFF_T select BUILDTIME_EXTABLE_SORT select CLONE_BACKWARDS select COMMON_CLK @@ -30,7 +32,6 @@ config ARC select HAVE_ARCH_TRACEHOOK select HAVE_DEBUG_STACKOVERFLOW select HAVE_FUTEX_CMPXCHG if FUTEX - select HAVE_GENERIC_DMA_COHERENT select HAVE_IOREMAP_PROT select HAVE_KERNEL_GZIP select HAVE_KERNEL_LZMA @@ -44,7 +45,6 @@ config ARC select MODULES_USE_ELF_RELA select OF select OF_EARLY_FLATTREE - select OF_RESERVED_MEM select PCI_SYSCALL if PCI select PERF_USE_VMALLOC if ARC_CACHE_VIPT_ALIASING @@ -191,7 +191,6 @@ config NR_CPUS config ARC_SMP_HALT_ON_RESET bool "Enable Halt-on-reset boot mode" - default y if ARC_UBOOT_SUPPORT help In SMP configuration cores can be configured as Halt-on-reset or they could all start at same time. For Halt-on-reset, non @@ -407,6 +406,14 @@ config ARC_HAS_ACCL_REGS (also referred to as r58:r59). These can also be used by gcc as GPR so kernel needs to save/restore per process +config ARC_IRQ_NO_AUTOSAVE + bool "Disable hardware autosave regfile on interrupts" + default n + help + On HS cores, taken interrupt auto saves the regfile on stack. + This is programmable and can be optionally disabled in which case + software INTERRUPT_PROLOGUE/EPILGUE do the needed work + endif # ISA_ARCV2 endmenu # "ARC CPU Configuration" @@ -515,17 +522,6 @@ config ARC_DBG_TLB_PARANOIA endif -config ARC_UBOOT_SUPPORT - bool "Support uboot arg Handling" - help - ARC Linux by default checks for uboot provided args as pointers to - external cmdline or DTB. This however breaks in absence of uboot, - when booting from Metaware debugger directly, as the registers are - not zeroed out on reset by mdb and/or ARCv2 based cores. The bogus - registers look like uboot args to kernel which then chokes. - So only enable the uboot arg checking/processing if users are sure - of uboot being in play. - config ARC_BUILTIN_DTB_NAME string "Built in DTB" help diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig index 020d4493edfd..e31a8ebc3ecc 100644 --- a/arch/arc/configs/axs101_defconfig +++ b/arch/arc/configs/axs101_defconfig @@ -99,7 +99,6 @@ CONFIG_NFS_FS=y CONFIG_NFS_V3_ACL=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_STRIP_ASM_SYMS=y CONFIG_SOFTLOCKUP_DETECTOR=y diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig index 666314fffc60..e0e8567f0d75 100644 --- a/arch/arc/configs/axs103_defconfig +++ b/arch/arc/configs/axs103_defconfig @@ -97,7 +97,6 @@ CONFIG_NFS_FS=y CONFIG_NFS_V3_ACL=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_STRIP_ASM_SYMS=y CONFIG_SOFTLOCKUP_DETECTOR=y diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig index 429832b8560b..fcbc952bc75b 100644 --- a/arch/arc/configs/axs103_smp_defconfig +++ b/arch/arc/configs/axs103_smp_defconfig @@ -100,7 +100,6 @@ CONFIG_NFS_FS=y CONFIG_NFS_V3_ACL=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_STRIP_ASM_SYMS=y CONFIG_SOFTLOCKUP_DETECTOR=y diff --git a/arch/arc/configs/haps_hs_defconfig b/arch/arc/configs/haps_hs_defconfig index 240dd2cd5148..f56cc2070c11 100644 --- a/arch/arc/configs/haps_hs_defconfig +++ b/arch/arc/configs/haps_hs_defconfig @@ -75,7 +75,6 @@ CONFIG_EXT2_FS_XATTR=y CONFIG_TMPFS=y # CONFIG_MISC_FILESYSTEMS is not set CONFIG_NFS_FS=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_DEBUG_MEMORY_INIT=y # CONFIG_DEBUG_PREEMPT is not set diff --git a/arch/arc/configs/haps_hs_smp_defconfig b/arch/arc/configs/haps_hs_smp_defconfig index 14ae7e5acc7c..b6f2482c7e74 100644 --- a/arch/arc/configs/haps_hs_smp_defconfig +++ b/arch/arc/configs/haps_hs_smp_defconfig @@ -78,7 +78,6 @@ CONFIG_EXT2_FS_XATTR=y CONFIG_TMPFS=y # CONFIG_MISC_FILESYSTEMS is not set CONFIG_NFS_FS=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_SOFTLOCKUP_DETECTOR=y # CONFIG_DEBUG_PREEMPT is not set diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig index 87b23b7fb781..6fd3d29546af 100644 --- a/arch/arc/configs/hsdk_defconfig +++ b/arch/arc/configs/hsdk_defconfig @@ -71,7 +71,6 @@ CONFIG_NFS_FS=y CONFIG_NFS_V3_ACL=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_STRIP_ASM_SYMS=y CONFIG_SOFTLOCKUP_DETECTOR=y diff --git a/arch/arc/configs/nps_defconfig b/arch/arc/configs/nps_defconfig index 6e84060e7c90..f0a077c00efa 100644 --- a/arch/arc/configs/nps_defconfig +++ b/arch/arc/configs/nps_defconfig @@ -31,7 +31,6 @@ CONFIG_ARC_CACHE_LINE_SHIFT=5 # CONFIG_ARC_HAS_LLSC is not set CONFIG_ARC_KVADDR_SIZE=402 CONFIG_ARC_EMUL_UNALIGNED=y -CONFIG_ARC_UBOOT_SUPPORT=y CONFIG_PREEMPT=y CONFIG_NET=y CONFIG_UNIX=y @@ -77,7 +76,6 @@ CONFIG_NFS_FS=y CONFIG_NFS_V3_ACL=y CONFIG_ROOT_NFS=y CONFIG_DEBUG_INFO=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_MEMORY_INIT=y diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig index 219c2a65294b..318e4cd29629 100644 --- a/arch/arc/configs/nsim_700_defconfig +++ b/arch/arc/configs/nsim_700_defconfig @@ -56,6 +56,5 @@ CONFIG_EXT2_FS_XATTR=y CONFIG_TMPFS=y # CONFIG_MISC_FILESYSTEMS is not set CONFIG_NFS_FS=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set # CONFIG_DEBUG_PREEMPT is not set diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig index 739b90e5e893..c15807b0e0c1 100644 --- a/arch/arc/configs/nsim_hs_defconfig +++ b/arch/arc/configs/nsim_hs_defconfig @@ -56,6 +56,5 @@ CONFIG_EXT2_FS_XATTR=y CONFIG_TMPFS=y # CONFIG_MISC_FILESYSTEMS is not set CONFIG_NFS_FS=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set # CONFIG_DEBUG_PREEMPT is not set diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig index b5895bdf3a93..65e983fd942b 100644 --- a/arch/arc/configs/nsim_hs_smp_defconfig +++ b/arch/arc/configs/nsim_hs_smp_defconfig @@ -55,5 +55,4 @@ CONFIG_EXT2_FS_XATTR=y CONFIG_TMPFS=y # CONFIG_MISC_FILESYSTEMS is not set CONFIG_NFS_FS=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig index 35dfc6491a09..08c5b99ac341 100644 --- a/arch/arc/configs/nsimosci_defconfig +++ b/arch/arc/configs/nsimosci_defconfig @@ -68,5 +68,4 @@ CONFIG_TMPFS=y # CONFIG_MISC_FILESYSTEMS is not set CONFIG_NFS_FS=y CONFIG_NFS_V3_ACL=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig index 1638e5bc9672..5b5e26d67955 100644 --- a/arch/arc/configs/nsimosci_hs_defconfig +++ b/arch/arc/configs/nsimosci_hs_defconfig @@ -66,5 +66,4 @@ CONFIG_TMPFS=y # CONFIG_MISC_FILESYSTEMS is not set CONFIG_NFS_FS=y CONFIG_NFS_V3_ACL=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig index 11cfbdb0f441..26af9b2f7fcb 100644 --- a/arch/arc/configs/nsimosci_hs_smp_defconfig +++ b/arch/arc/configs/nsimosci_hs_smp_defconfig @@ -77,6 +77,5 @@ CONFIG_TMPFS=y # CONFIG_MISC_FILESYSTEMS is not set CONFIG_NFS_FS=y CONFIG_NFS_V3_ACL=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_FTRACE=y diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig index e71ade3cf9c8..5b5119d2b5d5 100644 --- a/arch/arc/configs/tb10x_defconfig +++ b/arch/arc/configs/tb10x_defconfig @@ -92,7 +92,6 @@ CONFIG_CONFIGFS_FS=y # CONFIG_MISC_FILESYSTEMS is not set # CONFIG_NETWORK_FILESYSTEMS is not set CONFIG_DEBUG_INFO=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set CONFIG_STRIP_ASM_SYMS=y CONFIG_DEBUG_FS=y CONFIG_HEADERS_CHECK=y diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig index 1e59a2e9c602..0c3b21416819 100644 --- a/arch/arc/configs/vdk_hs38_defconfig +++ b/arch/arc/configs/vdk_hs38_defconfig @@ -13,7 +13,6 @@ CONFIG_PARTITION_ADVANCED=y CONFIG_ARC_PLAT_AXS10X=y CONFIG_AXS103=y CONFIG_ISA_ARCV2=y -CONFIG_ARC_UBOOT_SUPPORT=y CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38" CONFIG_PREEMPT=y CONFIG_NET=y @@ -88,7 +87,6 @@ CONFIG_NFS_FS=y CONFIG_NFS_V3_ACL=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_STRIP_ASM_SYMS=y CONFIG_DEBUG_SHIRQ=y diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig index b5c3f6c54b03..f9ad9d3ee702 100644 --- a/arch/arc/configs/vdk_hs38_smp_defconfig +++ b/arch/arc/configs/vdk_hs38_smp_defconfig @@ -15,8 +15,6 @@ CONFIG_AXS103=y CONFIG_ISA_ARCV2=y CONFIG_SMP=y # CONFIG_ARC_TIMERS_64BIT is not set -# CONFIG_ARC_SMP_HALT_ON_RESET is not set -CONFIG_ARC_UBOOT_SUPPORT=y CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38_smp" CONFIG_PREEMPT=y CONFIG_NET=y @@ -93,7 +91,6 @@ CONFIG_NFS_FS=y CONFIG_NFS_V3_ACL=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_STRIP_ASM_SYMS=y CONFIG_DEBUG_SHIRQ=y diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild index caa270261521..b41f8881ecc8 100644 --- a/arch/arc/include/asm/Kbuild +++ b/arch/arc/include/asm/Kbuild @@ -3,6 +3,7 @@ generic-y += bugs.h generic-y += compat.h generic-y += device.h generic-y += div64.h +generic-y += dma-mapping.h generic-y += emergency-restart.h generic-y += extable.h generic-y += ftrace.h diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h index f1b86cef0905..a27eafdc8260 100644 --- a/arch/arc/include/asm/arcregs.h +++ b/arch/arc/include/asm/arcregs.h @@ -151,6 +151,14 @@ struct bcr_isa_arcv2 { #endif }; +struct bcr_uarch_build_arcv2 { +#ifdef CONFIG_CPU_BIG_ENDIAN + unsigned int pad:8, prod:8, maj:8, min:8; +#else + unsigned int min:8, maj:8, prod:8, pad:8; +#endif +}; + struct bcr_mpy { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int pad:8, x1616:8, dsp:4, cycles:2, type:2, ver:8; diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h index f393b663413e..2ad77fb43639 100644 --- a/arch/arc/include/asm/cache.h +++ b/arch/arc/include/asm/cache.h @@ -52,6 +52,17 @@ #define cache_line_size() SMP_CACHE_BYTES #define ARCH_DMA_MINALIGN SMP_CACHE_BYTES +/* + * Make sure slab-allocated buffers are 64-bit aligned when atomic64_t uses + * ARCv2 64-bit atomics (LLOCKD/SCONDD). This guarantess runtime 64-bit + * alignment for any atomic64_t embedded in buffer. + * Default ARCH_SLAB_MINALIGN is __alignof__(long long) which has a relaxed + * value of 4 (and not 8) in ARC ABI. + */ +#if defined(CONFIG_ARC_HAS_LL64) && defined(CONFIG_ARC_HAS_LLSC) +#define ARCH_SLAB_MINALIGN 8 +#endif + extern void arc_cache_init(void); extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len); extern void read_decode_cache_bcr(void); diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h deleted file mode 100644 index c946c0a83e76..000000000000 --- a/arch/arc/include/asm/dma-mapping.h +++ /dev/null @@ -1,13 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -// (C) 2018 Synopsys, Inc. (www.synopsys.com) - -#ifndef ASM_ARC_DMA_MAPPING_H -#define ASM_ARC_DMA_MAPPING_H - -#include - -void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, - const struct iommu_ops *iommu, bool coherent); -#define arch_setup_dma_ops arch_setup_dma_ops - -#endif diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h index 309f4e6721b3..225e7df2d8ed 100644 --- a/arch/arc/include/asm/entry-arcv2.h +++ b/arch/arc/include/asm/entry-arcv2.h @@ -17,6 +17,33 @@ ; ; Now manually save: r12, sp, fp, gp, r25 +#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE +.ifnc \called_from, exception + st.as r9, [sp, -10] ; save r9 in it's final stack slot + sub sp, sp, 12 ; skip JLI, LDI, EI + + PUSH lp_count + PUSHAX lp_start + PUSHAX lp_end + PUSH blink + + PUSH r11 + PUSH r10 + + sub sp, sp, 4 ; skip r9 + + PUSH r8 + PUSH r7 + PUSH r6 + PUSH r5 + PUSH r4 + PUSH r3 + PUSH r2 + PUSH r1 + PUSH r0 +.endif +#endif + #ifdef CONFIG_ARC_HAS_ACCL_REGS PUSH r59 PUSH r58 @@ -86,6 +113,33 @@ POP r59 #endif +#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE +.ifnc \called_from, exception + POP r0 + POP r1 + POP r2 + POP r3 + POP r4 + POP r5 + POP r6 + POP r7 + POP r8 + POP r9 + POP r10 + POP r11 + + POP blink + POPAX lp_end + POPAX lp_start + + POP r9 + mov lp_count, r9 + + add sp, sp, 12 ; skip JLI, LDI, EI + ld.as r9, [sp, -10] ; reload r9 which got clobbered +.endif +#endif + .endm /*------------------------------------------------------------------------*/ diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h index c9173c02081c..eabc3efa6c6d 100644 --- a/arch/arc/include/asm/uaccess.h +++ b/arch/arc/include/asm/uaccess.h @@ -207,7 +207,7 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n) */ "=&r" (tmp), "+r" (to), "+r" (from) : - : "lp_count", "lp_start", "lp_end", "memory"); + : "lp_count", "memory"); return n; } @@ -433,7 +433,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n) */ "=&r" (tmp), "+r" (to), "+r" (from) : - : "lp_count", "lp_start", "lp_end", "memory"); + : "lp_count", "memory"); return n; } @@ -653,7 +653,7 @@ static inline unsigned long __arc_clear_user(void __user *to, unsigned long n) " .previous \n" : "+r"(d_char), "+r"(res) : "i"(0) - : "lp_count", "lp_start", "lp_end", "memory"); + : "lp_count", "memory"); return res; } @@ -686,7 +686,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count) " .previous \n" : "+r"(res), "+r"(dst), "+r"(src), "=r"(val) : "g"(-EFAULT), "r"(count) - : "lp_count", "lp_start", "lp_end", "memory"); + : "lp_count", "memory"); return res; } diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h index 3b3543fd151c..5eafa1115162 100644 --- a/arch/arc/include/uapi/asm/unistd.h +++ b/arch/arc/include/uapi/asm/unistd.h @@ -18,10 +18,12 @@ #define __ARCH_WANT_RENAMEAT #define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SET_GET_RLIMIT #define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_CLONE #define __ARCH_WANT_SYS_VFORK #define __ARCH_WANT_SYS_FORK +#define __ARCH_WANT_TIME32_SYSCALLS #define sys_mmap2 sys_mmap_pgoff diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S index cc558a25b8fa..562089d62d9d 100644 --- a/arch/arc/kernel/entry-arcv2.S +++ b/arch/arc/kernel/entry-arcv2.S @@ -209,7 +209,9 @@ restore_regs: ;####### Return from Intr ####### debug_marker_l1: - bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot + ; bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot + btst r0, STATUS_DE_BIT ; Z flag set if bit clear + bnz .Lintr_ret_to_delay_slot ; branch if STATUS_DE_BIT set .Lisr_ret_fast_path: ; Handle special case #1: (Entry via Exception, Return via IRQ) diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S index 8b90d25a15cc..30e090625916 100644 --- a/arch/arc/kernel/head.S +++ b/arch/arc/kernel/head.S @@ -17,6 +17,7 @@ #include #include #include +#include .macro CPU_EARLY_SETUP @@ -47,6 +48,15 @@ sr r5, [ARC_REG_DC_CTRL] 1: + +#ifdef CONFIG_ISA_ARCV2 + ; Unaligned access is disabled at reset, so re-enable early as + ; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access + ; by default + lr r5, [status32] + bset r5, r5, STATUS_AD_BIT + kflag r5 +#endif .endm .section .init.text, "ax",@progbits @@ -90,15 +100,13 @@ ENTRY(stext) st.ab 0, [r5, 4] 1: -#ifdef CONFIG_ARC_UBOOT_SUPPORT ; Uboot - kernel ABI ; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2 - ; r1 = magic number (board identity, unused as of now + ; r1 = magic number (always zero as of now) ; r2 = pointer to uboot provided cmdline or external DTB in mem - ; These are handled later in setup_arch() + ; These are handled later in handle_uboot_args() st r0, [@uboot_tag] st r2, [@uboot_arg] -#endif ; setup "current" tsk and optionally cache it in dedicated r25 mov r9, @init_task diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c index 067ea362fb3e..cf18b3e5a934 100644 --- a/arch/arc/kernel/intc-arcv2.c +++ b/arch/arc/kernel/intc-arcv2.c @@ -49,11 +49,13 @@ void arc_init_IRQ(void) *(unsigned int *)&ictrl = 0; +#ifndef CONFIG_ARC_IRQ_NO_AUTOSAVE ictrl.save_nr_gpr_pairs = 6; /* r0 to r11 (r12 saved manually) */ ictrl.save_blink = 1; ictrl.save_lp_regs = 1; /* LP_COUNT, LP_START, LP_END */ ictrl.save_u_to_u = 0; /* user ctxt saved on kernel stack */ ictrl.save_idx_regs = 1; /* JLI, LDI, EI */ +#endif WRITE_AUX(AUX_IRQ_CTRL, ictrl); diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index feb90093e6b1..7b2340996cf8 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -199,20 +199,36 @@ static void read_arc_build_cfg_regs(void) cpu->bpu.ret_stk = 4 << bpu.rse; if (cpu->core.family >= 0x54) { - unsigned int exec_ctrl; - READ_BCR(AUX_EXEC_CTRL, exec_ctrl); - cpu->extn.dual_enb = !(exec_ctrl & 1); + struct bcr_uarch_build_arcv2 uarch; - /* dual issue always present for this core */ - cpu->extn.dual = 1; + /* + * The first 0x54 core (uarch maj:min 0:1 or 0:2) was + * dual issue only (HS4x). But next uarch rev (1:0) + * allows it be configured for single issue (HS3x) + * Ensure we fiddle with dual issue only on HS4x + */ + READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch); + + if (uarch.prod == 4) { + unsigned int exec_ctrl; + + /* dual issue hardware always present */ + cpu->extn.dual = 1; + + READ_BCR(AUX_EXEC_CTRL, exec_ctrl); + + /* dual issue hardware enabled ? */ + cpu->extn.dual_enb = !(exec_ctrl & 1); + + } } } READ_BCR(ARC_REG_AP_BCR, ap); if (ap.ver) { cpu->extn.ap_num = 2 << ap.num; - cpu->extn.ap_full = !!ap.min; + cpu->extn.ap_full = !ap.min; } READ_BCR(ARC_REG_SMART_BCR, bcr); @@ -462,43 +478,78 @@ void setup_processor(void) arc_chk_core_config(); } -static inline int is_kernel(unsigned long addr) +static inline bool uboot_arg_invalid(unsigned long addr) { - if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end) - return 1; - return 0; + /* + * Check that it is a untranslated address (although MMU is not enabled + * yet, it being a high address ensures this is not by fluke) + */ + if (addr < PAGE_OFFSET) + return true; + + /* Check that address doesn't clobber resident kernel image */ + return addr >= (unsigned long)_stext && addr <= (unsigned long)_end; } -void __init setup_arch(char **cmdline_p) +#define IGNORE_ARGS "Ignore U-boot args: " + +/* uboot_tag values for U-boot - kernel ABI revision 0; see head.S */ +#define UBOOT_TAG_NONE 0 +#define UBOOT_TAG_CMDLINE 1 +#define UBOOT_TAG_DTB 2 + +void __init handle_uboot_args(void) { -#ifdef CONFIG_ARC_UBOOT_SUPPORT - /* make sure that uboot passed pointer to cmdline/dtb is valid */ - if (uboot_tag && is_kernel((unsigned long)uboot_arg)) - panic("Invalid uboot arg\n"); - - /* See if u-boot passed an external Device Tree blob */ - machine_desc = setup_machine_fdt(uboot_arg); /* uboot_tag == 2 */ - if (!machine_desc) -#endif - { - /* No, so try the embedded one */ + bool use_embedded_dtb = true; + bool append_cmdline = false; + + /* check that we know this tag */ + if (uboot_tag != UBOOT_TAG_NONE && + uboot_tag != UBOOT_TAG_CMDLINE && + uboot_tag != UBOOT_TAG_DTB) { + pr_warn(IGNORE_ARGS "invalid uboot tag: '%08x'\n", uboot_tag); + goto ignore_uboot_args; + } + + if (uboot_tag != UBOOT_TAG_NONE && + uboot_arg_invalid((unsigned long)uboot_arg)) { + pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg); + goto ignore_uboot_args; + } + + /* see if U-boot passed an external Device Tree blob */ + if (uboot_tag == UBOOT_TAG_DTB) { + machine_desc = setup_machine_fdt((void *)uboot_arg); + + /* external Device Tree blob is invalid - use embedded one */ + use_embedded_dtb = !machine_desc; + } + + if (uboot_tag == UBOOT_TAG_CMDLINE) + append_cmdline = true; + +ignore_uboot_args: + + if (use_embedded_dtb) { machine_desc = setup_machine_fdt(__dtb_start); if (!machine_desc) panic("Embedded DT invalid\n"); + } - /* - * If we are here, it is established that @uboot_arg didn't - * point to DT blob. Instead if u-boot says it is cmdline, - * append to embedded DT cmdline. - * setup_machine_fdt() would have populated @boot_command_line - */ - if (uboot_tag == 1) { - /* Ensure a whitespace between the 2 cmdlines */ - strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); - strlcat(boot_command_line, uboot_arg, - COMMAND_LINE_SIZE); - } + /* + * NOTE: @boot_command_line is populated by setup_machine_fdt() so this + * append processing can only happen after. + */ + if (append_cmdline) { + /* Ensure a whitespace between the 2 cmdlines */ + strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); + strlcat(boot_command_line, uboot_arg, COMMAND_LINE_SIZE); } +} + +void __init setup_arch(char **cmdline_p) +{ + handle_uboot_args(); /* Save unparsed command line copy for /proc/cmdline */ *cmdline_p = boot_command_line; diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c index d34f69eb1a95..271e9fafa479 100644 --- a/arch/arc/kernel/unwind.c +++ b/arch/arc/kernel/unwind.c @@ -181,8 +181,7 @@ static void init_unwind_hdr(struct unwind_table *table, */ static void *__init unw_hdr_alloc_early(unsigned long sz) { - return memblock_alloc_from_nopanic(sz, sizeof(unsigned int), - MAX_DMA_ADDRESS); + return memblock_alloc_from(sz, sizeof(unsigned int), MAX_DMA_ADDRESS); } static void *unw_hdr_alloc(unsigned long sz) diff --git a/arch/arc/lib/memcpy-archs.S b/arch/arc/lib/memcpy-archs.S index d61044dd8b58..ea14b0bf3116 100644 --- a/arch/arc/lib/memcpy-archs.S +++ b/arch/arc/lib/memcpy-archs.S @@ -25,15 +25,11 @@ #endif #ifdef CONFIG_ARC_HAS_LL64 -# define PREFETCH_READ(RX) prefetch [RX, 56] -# define PREFETCH_WRITE(RX) prefetchw [RX, 64] # define LOADX(DST,RX) ldd.ab DST, [RX, 8] # define STOREX(SRC,RX) std.ab SRC, [RX, 8] # define ZOLSHFT 5 # define ZOLAND 0x1F #else -# define PREFETCH_READ(RX) prefetch [RX, 28] -# define PREFETCH_WRITE(RX) prefetchw [RX, 32] # define LOADX(DST,RX) ld.ab DST, [RX, 4] # define STOREX(SRC,RX) st.ab SRC, [RX, 4] # define ZOLSHFT 4 @@ -41,8 +37,6 @@ #endif ENTRY_CFI(memcpy) - prefetch [r1] ; Prefetch the read location - prefetchw [r0] ; Prefetch the write location mov.f 0, r2 ;;; if size is zero jz.d [blink] @@ -72,8 +66,6 @@ ENTRY_CFI(memcpy) lpnz @.Lcopy32_64bytes ;; LOOP START LOADX (r6, r1) - PREFETCH_READ (r1) - PREFETCH_WRITE (r3) LOADX (r8, r1) LOADX (r10, r1) LOADX (r4, r1) @@ -117,9 +109,7 @@ ENTRY_CFI(memcpy) lpnz @.Lcopy8bytes_1 ;; LOOP START ld.ab r6, [r1, 4] - prefetch [r1, 28] ;Prefetch the next read location ld.ab r8, [r1,4] - prefetchw [r3, 32] ;Prefetch the next write location SHIFT_1 (r7, r6, 24) or r7, r7, r5 @@ -162,9 +152,7 @@ ENTRY_CFI(memcpy) lpnz @.Lcopy8bytes_2 ;; LOOP START ld.ab r6, [r1, 4] - prefetch [r1, 28] ;Prefetch the next read location ld.ab r8, [r1,4] - prefetchw [r3, 32] ;Prefetch the next write location SHIFT_1 (r7, r6, 16) or r7, r7, r5 @@ -204,9 +192,7 @@ ENTRY_CFI(memcpy) lpnz @.Lcopy8bytes_3 ;; LOOP START ld.ab r6, [r1, 4] - prefetch [r1, 28] ;Prefetch the next read location ld.ab r8, [r1,4] - prefetchw [r3, 32] ;Prefetch the next write location SHIFT_1 (r7, r6, 8) or r7, r7, r5 diff --git a/arch/arc/mm/highmem.c b/arch/arc/mm/highmem.c index 48e700151810..11f57e2ced8a 100644 --- a/arch/arc/mm/highmem.c +++ b/arch/arc/mm/highmem.c @@ -124,6 +124,10 @@ static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr) pmd_k = pmd_offset(pud_k, kvaddr); pte_k = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); + if (!pte_k) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", + __func__, PAGE_SIZE, PAGE_SIZE); + pmd_populate_kernel(&init_mm, pmd_k, pte_k); return pte_k; } diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig index f25c085b9874..23e00216e5a5 100644 --- a/arch/arc/plat-hsdk/Kconfig +++ b/arch/arc/plat-hsdk/Kconfig @@ -9,6 +9,7 @@ menuconfig ARC_SOC_HSDK bool "ARC HS Development Kit SOC" depends on ISA_ARCV2 select ARC_HAS_ACCL_REGS + select ARC_IRQ_NO_AUTOSAVE select CLK_HSDK select RESET_HSDK select HAVE_PCI diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 664e918e2624..5085a1eab9fc 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -2,6 +2,7 @@ config ARM bool default y + select ARCH_32BIT_OFF_T select ARCH_CLOCKSOURCE_DATA select ARCH_DISCARD_MEMBLOCK if !HAVE_ARCH_PFN_VALID && !KEXEC select ARCH_HAS_DEBUG_VIRTUAL if MMU @@ -12,9 +13,11 @@ config ARM select ARCH_HAS_MEMBARRIER_SYNC_CORE select ARCH_HAS_PTE_SPECIAL if ARM_LPAE select ARCH_HAS_PHYS_TO_DMA + select ARCH_HAS_SETUP_DMA_OPS select ARCH_HAS_SET_MEMORY select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL select ARCH_HAS_STRICT_MODULE_RWX if MMU + select ARCH_HAS_TEARDOWN_DMA_OPS if MMU select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAVE_CUSTOM_GPIO_H select ARCH_HAS_GCOV_PROFILE_ALL @@ -30,6 +33,7 @@ config ARM select CLONE_BACKWARDS select CPU_PM if SUSPEND || CPU_IDLE select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS + select DMA_DECLARE_COHERENT select DMA_REMAP if MMU select EDAC_SUPPORT select EDAC_ATOMIC_SCRUB @@ -72,7 +76,6 @@ config ARM select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL select HAVE_FUNCTION_TRACER if !XIP_KERNEL select HAVE_GCC_PLUGINS - select HAVE_GENERIC_DMA_COHERENT select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7) select HAVE_IDE if PCI || ISA || PCMCIA select HAVE_IRQ_TIME_ACCOUNTING @@ -101,7 +104,6 @@ config ARM select MODULES_USE_ELF_REL select NEED_DMA_MAP_STATE select OF_EARLY_FLATTREE if OF - select OF_RESERVED_MEM if OF select OLD_SIGACTION select OLD_SIGSUSPEND3 select PCI_SYSCALL if PCI @@ -589,11 +591,13 @@ config ARCH_DAVINCI select GENERIC_ALLOCATOR select GENERIC_CLOCKEVENTS select GENERIC_IRQ_CHIP + select GENERIC_IRQ_MULTI_HANDLER select GPIOLIB select HAVE_IDE select PM_GENERIC_DOMAINS if PM select PM_GENERIC_DOMAINS_OF if PM && OF select RESET_CONTROLLER + select SPARSE_IRQ select USE_OF select ZONE_DMA help @@ -750,6 +754,8 @@ source "arch/arm/mach-mediatek/Kconfig" source "arch/arm/mach-meson/Kconfig" +source "arch/arm/mach-milbeaut/Kconfig" + source "arch/arm/mach-mmp/Kconfig" source "arch/arm/mach-moxart/Kconfig" @@ -1400,6 +1406,7 @@ config NR_CPUS config HOTPLUG_CPU bool "Support for hot-pluggable CPUs" depends on SMP + select GENERIC_IRQ_MIGRATION help Say Y here to experiment with turning CPUs off and on. CPUs can be controlled through /sys/devices/system/cpu. diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 9db3c584b2cb..00000e91ad65 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -190,6 +190,7 @@ machine-$(CONFIG_ARCH_MV78XX0) += mv78xx0 machine-$(CONFIG_ARCH_MVEBU) += mvebu machine-$(CONFIG_ARCH_MXC) += imx machine-$(CONFIG_ARCH_MEDIATEK) += mediatek +machine-$(CONFIG_ARCH_MILBEAUT) += milbeaut machine-$(CONFIG_ARCH_MXS) += mxs machine-$(CONFIG_ARCH_NETX) += netx machine-$(CONFIG_ARCH_NOMADIK) += nomadik diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile index bd40148a15b2..f4f5aeaf3298 100644 --- a/arch/arm/boot/dts/Makefile +++ b/arch/arm/boot/dts/Makefile @@ -79,6 +79,7 @@ dtb-$(CONFIG_ARCH_BCM2835) += \ bcm2835-rpi-a-plus.dtb \ bcm2835-rpi-cm1-io1.dtb \ bcm2836-rpi-2-b.dtb \ + bcm2837-rpi-3-a-plus.dtb \ bcm2837-rpi-3-b.dtb \ bcm2837-rpi-3-b-plus.dtb \ bcm2837-rpi-cm3-io3.dtb \ @@ -115,6 +116,7 @@ dtb-$(CONFIG_ARCH_BCM_5301X) += \ bcm47094-luxul-xwr-3100.dtb \ bcm47094-luxul-xwr-3150-v1.dtb \ bcm47094-netgear-r8500.dtb \ + bcm47094-phicomm-k3.dtb \ bcm94708.dtb \ bcm94709.dtb \ bcm953012er.dtb \ @@ -313,7 +315,8 @@ dtb-$(CONFIG_MACH_KIRKWOOD) += \ dtb-$(CONFIG_ARCH_LPC18XX) += \ lpc4337-ciaa.dtb \ lpc4350-hitex-eval.dtb \ - lpc4357-ea4357-devkit.dtb + lpc4357-ea4357-devkit.dtb \ + lpc4357-myd-lpc4357.dtb dtb-$(CONFIG_ARCH_LPC32XX) += \ lpc3250-ea3250.dtb \ lpc3250-phy3250.dtb @@ -445,6 +448,9 @@ dtb-$(CONFIG_SOC_IMX6Q) += \ imx6dl-wandboard.dtb \ imx6dl-wandboard-revb1.dtb \ imx6dl-wandboard-revd1.dtb \ + imx6dl-yapp4-draco.dtb \ + imx6dl-yapp4-hydra.dtb \ + imx6dl-yapp4-ursa.dtb \ imx6q-apalis-eval.dtb \ imx6q-apalis-ixora.dtb \ imx6q-apalis-ixora-v1.1.dtb \ @@ -561,6 +567,7 @@ dtb-$(CONFIG_SOC_IMX6UL) += \ imx6ul-opos6uldev.dtb \ imx6ul-pico-hobbit.dtb \ imx6ul-pico-pi.dtb \ + imx6ul-phytec-phyboard-segin-full.dtb \ imx6ul-tx6ul-0010.dtb \ imx6ul-tx6ul-0011.dtb \ imx6ul-tx6ul-mainboard.dtb \ @@ -599,6 +606,7 @@ dtb-$(CONFIG_SOC_VF610) += \ vf610-zii-dev-rev-b.dtb \ vf610-zii-dev-rev-c.dtb \ vf610-zii-scu4-aib.dtb \ + vf610-zii-ssmb-dtu.dtb \ vf610-zii-ssmb-spu3.dtb dtb-$(CONFIG_ARCH_MXS) += \ imx23-evk.dtb \ @@ -700,6 +708,7 @@ dtb-$(CONFIG_ARCH_OMAP3) += \ omap3-thunder.dtb \ omap3-zoom3.dtb dtb-$(CONFIG_SOC_TI81XX) += \ + am3874-iceboard.dtb \ dm8148-evm.dtb \ dm8148-t410.dtb \ dm8168-evm.dtb \ @@ -719,6 +728,7 @@ dtb-$(CONFIG_SOC_AM33XX) += \ am335x-cm-t335.dtb \ am335x-evm.dtb \ am335x-evmsk.dtb \ + am335x-guardian.dtb \ am335x-icev2.dtb \ am335x-lxm.dtb \ am335x-moxa-uc-2101.dtb \ @@ -843,6 +853,7 @@ dtb-$(CONFIG_ARCH_RENESAS) += \ r7s72100-genmai.dtb \ r7s72100-gr-peach.dtb \ r7s72100-rskrza1.dtb \ + r7s9210-rza2mevb.dtb \ r8a73a4-ape6evm.dtb \ r8a7740-armadillo800eva.dtb \ r8a7743-iwg20d-q7.dtb \ @@ -868,6 +879,7 @@ dtb-$(CONFIG_ARCH_RENESAS) += \ r9a06g032-rzn1d400-db.dtb \ sh73a0-kzm9g.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += \ + rv1108-elgin-r1.dtb \ rv1108-evb.dtb \ rk3036-evb.dtb \ rk3036-kylin.dtb \ @@ -919,6 +931,7 @@ dtb-$(CONFIG_ARCH_SOCFPGA) += \ socfpga_arria10_socdk_nand.dtb \ socfpga_arria10_socdk_qspi.dtb \ socfpga_arria10_socdk_sdmmc.dtb \ + socfpga_cyclone5_chameleon96.dtb \ socfpga_cyclone5_mcvevk.dtb \ socfpga_cyclone5_socdk.dtb \ socfpga_cyclone5_de0_nano_soc.dtb \ @@ -1233,6 +1246,7 @@ dtb-$(CONFIG_ARCH_MEDIATEK) += \ mt7623n-bananapi-bpi-r2.dtb \ mt8127-moose.dtb \ mt8135-evbp1.dtb +dtb-$(CONFIG_ARCH_MILBEAUT) += milbeaut-m10v-evb.dtb dtb-$(CONFIG_ARCH_ZX) += zx296702-ad1.dtb dtb-$(CONFIG_ARCH_ASPEED) += \ aspeed-ast2500-evb.dtb \ diff --git a/arch/arm/boot/dts/alphascale-asm9260.dtsi b/arch/arm/boot/dts/alphascale-asm9260.dtsi index 907fc7bfc418..2ce6038536fd 100644 --- a/arch/arm/boot/dts/alphascale-asm9260.dtsi +++ b/arch/arm/boot/dts/alphascale-asm9260.dtsi @@ -4,10 +4,11 @@ * Licensed under the X11 license or the GPL v2 (or later) */ -#include "skeleton.dtsi" #include / { + #address-cells = <1>; + #size-cells = <1>; interrupt-parent = <&icoll>; memory { diff --git a/arch/arm/boot/dts/alpine.dtsi b/arch/arm/boot/dts/alpine.dtsi index 731df7a8c4e6..d3036ea823d1 100644 --- a/arch/arm/boot/dts/alpine.dtsi +++ b/arch/arm/boot/dts/alpine.dtsi @@ -25,12 +25,18 @@ */ #include -#include "skeleton64.dtsi" / { + #address-cells = <2>; + #size-cells = <2>; /* SOC compatibility */ compatible = "al,alpine"; + memory { + device_type = "memory"; + reg = <0 0 0 0>; + }; + /* CPU Configuration */ cpus { #address-cells = <1>; diff --git a/arch/arm/boot/dts/am335x-baltos-ir2110.dts b/arch/arm/boot/dts/am335x-baltos-ir2110.dts index 75de1e723303..50dcf1290ac6 100644 --- a/arch/arm/boot/dts/am335x-baltos-ir2110.dts +++ b/arch/arm/boot/dts/am335x-baltos-ir2110.dts @@ -72,7 +72,3 @@ dual_emac_res_vlan = <2>; phy-handle = <&phy1>; }; - -&phy_sel { - rmii-clock-ext = <1>; -}; diff --git a/arch/arm/boot/dts/am335x-baltos-ir3220.dts b/arch/arm/boot/dts/am335x-baltos-ir3220.dts index 1b215c425c57..f3f1abd26470 100644 --- a/arch/arm/boot/dts/am335x-baltos-ir3220.dts +++ b/arch/arm/boot/dts/am335x-baltos-ir3220.dts @@ -114,7 +114,3 @@ dual_emac_res_vlan = <2>; phy-handle = <&phy1>; }; - -&phy_sel { - rmii-clock-ext = <1>; -}; diff --git a/arch/arm/boot/dts/am335x-baltos-ir5221.dts b/arch/arm/boot/dts/am335x-baltos-ir5221.dts index 832ead864dc5..42f473f0ed77 100644 --- a/arch/arm/boot/dts/am335x-baltos-ir5221.dts +++ b/arch/arm/boot/dts/am335x-baltos-ir5221.dts @@ -133,10 +133,6 @@ phy-handle = <&phy1>; }; -&phy_sel { - rmii-clock-ext = <1>; -}; - &dcan1 { pinctrl-names = "default"; pinctrl-0 = <&dcan1_pins>; diff --git a/arch/arm/boot/dts/am335x-chiliboard.dts b/arch/arm/boot/dts/am335x-chiliboard.dts index 9c2a947aacf5..bffa5dce54ec 100644 --- a/arch/arm/boot/dts/am335x-chiliboard.dts +++ b/arch/arm/boot/dts/am335x-chiliboard.dts @@ -14,6 +14,10 @@ compatible = "grinn,am335x-chiliboard", "grinn,am335x-chilisom", "ti,am33xx"; + chosen { + stdout-path = &uart0; + }; + leds { compatible = "gpio-leds"; pinctrl-names = "default"; @@ -151,10 +155,6 @@ phy-mode = "rmii"; }; -&phy_sel { - rmii-clock-ext; -}; - /* USB */ &usb { status = "okay"; diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts index b67f5fee1469..dce5be5df97b 100644 --- a/arch/arm/boot/dts/am335x-evm.dts +++ b/arch/arm/boot/dts/am335x-evm.dts @@ -729,7 +729,7 @@ &cpsw_emac0 { phy-handle = <ðphy0>; - phy-mode = "rgmii-txid"; + phy-mode = "rgmii-id"; }; &tscadc { diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts index 172c0224e7f6..b128998097ce 100644 --- a/arch/arm/boot/dts/am335x-evmsk.dts +++ b/arch/arm/boot/dts/am335x-evmsk.dts @@ -651,13 +651,13 @@ &cpsw_emac0 { phy-handle = <ðphy0>; - phy-mode = "rgmii-txid"; + phy-mode = "rgmii-id"; dual_emac_res_vlan = <1>; }; &cpsw_emac1 { phy-handle = <ðphy1>; - phy-mode = "rgmii-txid"; + phy-mode = "rgmii-id"; dual_emac_res_vlan = <2>; }; diff --git a/arch/arm/boot/dts/am335x-guardian.dts b/arch/arm/boot/dts/am335x-guardian.dts new file mode 100644 index 000000000000..c9611ea4b884 --- /dev/null +++ b/arch/arm/boot/dts/am335x-guardian.dts @@ -0,0 +1,511 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2018 Robert Bosch Power Tools GmbH + */ +/dts-v1/; + +#include "am33xx.dtsi" +#include +#include + +/ { + model = "Bosch AM335x Guardian"; + compatible = "bosch,am335x-guardian", "ti,am33xx"; + + chosen { + stdout-path = &uart0; + tick-timer = &timer2; + }; + + cpus { + cpu@0 { + cpu0-supply = <&dcdc2_reg>; + }; + }; + + memory@80000000 { + device_type = "memory"; + reg = <0x80000000 0x10000000>; /* 256 MB */ + }; + + gpio_keys { + compatible = "gpio-keys"; + #address-cells = <1>; + #size-cells = <0>; + pinctrl-names = "default"; + pinctrl-0 = <&gpio_keys_pins>; + + button21 { + label = "guardian-power-button"; + linux,code = ; + gpios = <&gpio2 21 0>; + wakeup-source; + }; + }; + + leds { + compatible = "gpio-leds"; + pinctrl-names = "default"; + pinctrl-0 = <&leds_pins>; + + led1 { + label = "green:heartbeat"; + gpios = <&gpio1 27 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "heartbeat"; + default-state = "off"; + }; + + led2 { + label = "green:mmc0"; + gpios = <&gpio1 26 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "mmc0"; + default-state = "off"; + }; + }; + + panel { + compatible = "ti,tilcdc,panel"; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&lcd_pins_default &lcd_disen_pins>; + pinctrl-1 = <&lcd_pins_sleep>; + + display-timings { + 320x240 { + hactive = <320>; + vactive = <240>; + hback-porch = <68>; + hfront-porch = <20>; + hsync-len = <1>; + vback-porch = <18>; + vfront-porch = <4>; + vsync-len = <1>; + clock-frequency = <9000000>; + hsync-active = <0>; + vsync-active = <0>; + }; + }; + panel-info { + ac-bias = <255>; + ac-bias-intrpt = <0>; + dma-burst-sz = <16>; + bpp = <24>; + bus-width = <16>; + fdd = <0x80>; + sync-edge = <0>; + sync-ctrl = <1>; + raster-order = <0>; + fifo-th = <0>; + }; + + }; + + pwm7: dmtimer-pwm { + compatible = "ti,omap-dmtimer-pwm"; + ti,timers = <&timer7>; + pinctrl-names = "default"; + pinctrl-0 = <&dmtimer7_pins>; + }; + + vmmcsd_fixed: regulator-3v3 { + compatible = "regulator-fixed"; + regulator-name = "vmmcsd_fixed"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + }; +}; + +&cppi41dma { + status = "okay"; +}; + +&elm { + status = "okay"; +}; + +&gpmc { + pinctrl-names = "default"; + pinctrl-0 = <&nandflash_pins>; + ranges = <0 0 0x08000000 0x1000000>; /* CS0: 16MB for NAND */ + status = "okay"; + + nand@0,0 { + compatible = "ti,omap2-nand"; + reg = <0 0 4>; /* CS0, offset 0, IO size 4 */ + interrupt-parent = <&gpmc>; + interrupts = <0 IRQ_TYPE_NONE>, /* fifoevent */ + <1 IRQ_TYPE_NONE>; /* termcount */ + rb-gpios = <&gpmc 0 GPIO_ACTIVE_HIGH>; /* gpmc_wait0 */ + ti,nand-ecc-opt = "bch16"; + ti,elm-id = <&elm>; + nand-bus-width = <8>; + gpmc,device-width = <1>; + gpmc,sync-clk-ps = <0>; + gpmc,cs-on-ns = <0>; + gpmc,cs-rd-off-ns = <44>; + gpmc,cs-wr-off-ns = <44>; + gpmc,adv-on-ns = <6>; + gpmc,adv-rd-off-ns = <34>; + gpmc,adv-wr-off-ns = <44>; + gpmc,we-on-ns = <0>; + gpmc,we-off-ns = <40>; + gpmc,oe-on-ns = <0>; + gpmc,oe-off-ns = <54>; + gpmc,access-ns = <64>; + gpmc,rd-cycle-ns = <82>; + gpmc,wr-cycle-ns = <82>; + gpmc,bus-turnaround-ns = <0>; + gpmc,cycle2cycle-delay-ns = <0>; + gpmc,clk-activation-ns = <0>; + gpmc,wr-access-ns = <40>; + gpmc,wr-data-mux-bus-ns = <0>; + + /* + * MTD partition table + * + * All SPL-* partitions are sized to minimal length which can + * be independently programmable. For NAND flash this is equal + * to size of erase-block. + */ + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + label = "SPL"; + reg = <0x0 0x40000>; + }; + + partition@1 { + label = "SPL.backup1"; + reg = <0x40000 0x40000>; + }; + + partition@2 { + label = "SPL.backup2"; + reg = <0x80000 0x40000>; + }; + + partition@3 { + label = "SPL.backup3"; + reg = <0xc0000 0x40000>; + }; + + partition@4 { + label = "u-boot"; + reg = <0x100000 0x100000>; + }; + + partition@5 { + label = "u-boot.backup1"; + reg = <0x200000 0x100000>; + }; + + partition@6 { + label = "u-boot-env"; + reg = <0x300000 0x40000>; + }; + + partition@7 { + label = "u-boot-env.backup1"; + reg = <0x340000 0x40000>; + }; + + partition@8 { + label = "UBI"; + reg = <0x380000 0x1fc80000>; + }; + }; +}; + +&i2c0 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c0_pins>; + clock-frequency = <400000>; + status = "okay"; + + tps: tps@24 { + reg = <0x24>; + }; +}; + +&lcdc { + blue-and-red-wiring = "crossed"; + status = "okay"; +}; + +&mmc1 { + bus-width = <0x4>; + pinctrl-names = "default"; + pinctrl-0 = <&mmc1_pins>; + cd-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>; + vmmc-supply = <&vmmcsd_fixed>; + status = "okay"; +}; + +&rtc { + clocks = <&clk_32768_ck>, <&clk_24mhz_clkctrl AM3_CLK_24MHZ_CLKDIV32K_CLKCTRL 0>; + clock-names = "ext-clk", "int-clk"; + system-power-controller; +}; + +&spi0 { + ti,pindir-d0-out-d1-in; + pinctrl-names = "default"; + pinctrl-0 = <&spi0_pins>; + status = "okay"; +}; + +#include "tps65217.dtsi" + +&tps { + ti,pmic-shutdown-controller; + interrupt-parent = <&intc>; + interrupts = <7>; /* NMI */ + + backlight { + isel = <1>; /* 1 - ISET1, 2 ISET2 */ + fdim = <100>; /* TPS65217_BL_FDIM_100HZ */ + default-brightness = <100>; + }; + + regulators { + dcdc1_reg: regulator@0 { + regulator-name = "vdds_dpr"; + regulator-always-on; + }; + + dcdc2_reg: regulator@1 { + regulator-name = "vdd_mpu"; + regulator-min-microvolt = <925000>; + regulator-max-microvolt = <1351500>; + regulator-boot-on; + regulator-always-on; + }; + + dcdc3_reg: regulator@2 { + regulator-name = "vdd_core"; + regulator-min-microvolt = <925000>; + regulator-max-microvolt = <1150000>; + regulator-boot-on; + regulator-always-on; + }; + + ldo1_reg: regulator@3 { + regulator-name = "vio,vrtc,vdds"; + regulator-always-on; + }; + + ldo2_reg: regulator@4 { + regulator-name = "vdd_3v3aux"; + regulator-always-on; + }; + + ldo3_reg: regulator@5 { + regulator-name = "vdd_1v8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + }; + + ldo4_reg: regulator@6 { + regulator-name = "vdd_3v3a"; + regulator-always-on; + }; + }; +}; + +&tscadc { + status = "okay"; + + adc { + ti,adc-channels = <0 1 2 3 4 5 6>; + }; +}; + +&uart0 { + pinctrl-names = "default"; + pinctrl-0 = <&uart0_pins>; + status = "okay"; +}; + +&usb { + status = "okay"; +}; + +&usb_ctrl_mod { + status = "okay"; +}; + +&usb0 { + dr_mode = "peripheral"; + status = "okay"; +}; + +&usb0_phy { + status = "okay"; +}; + +&usb1 { + dr_mode = "host"; + status = "okay"; +}; + +&usb1_phy { + status = "okay"; +}; + +&am33xx_pinmux { + pinctrl-names = "default"; + pinctrl-0 = <&clkout2_pin &gpio_pins>; + + clkout2_pin: pinmux_clkout2_pin { + pinctrl-single,pins = < + AM33XX_IOPAD(0x9b4, PIN_OUTPUT_PULLDOWN | MUX_MODE3) + >; + }; + + dmtimer7_pins: pinmux_dmtimer7_pins { + pinctrl-single,pins = < + AM33XX_IOPAD(0x968, PIN_OUTPUT | MUX_MODE5) + >; + }; + + gpio_keys_pins: pinmux_gpio_keys_pins { + pinctrl-single,pins = < + AM33XX_IOPAD(0x940, PIN_INPUT | MUX_MODE7) + >; + }; + + gpio_pins: pinmux_gpio_pins { + pinctrl-single,pins = < + AM33XX_IOPAD(0x928, PIN_OUTPUT | MUX_MODE7) + AM33XX_IOPAD(0x990, PIN_OUTPUT | MUX_MODE7) + >; + }; + + i2c0_pins: pinmux_i2c0_pins { + pinctrl-single,pins = < + AM33XX_IOPAD(0x988, PIN_INPUT_PULLUP | MUX_MODE0) + AM33XX_IOPAD(0x98c, PIN_INPUT_PULLUP | MUX_MODE0) + >; + }; + + lcd_disen_pins: pinmux_lcd_disen_pins { + pinctrl-single,pins = < + AM33XX_IOPAD(0x9a4, PIN_OUTPUT_PULLUP | SLEWCTRL_SLOW | MUX_MODE7) + >; + }; + + lcd_pins_default: pinmux_lcd_pins_default { + pinctrl-single,pins = < + AM33XX_IOPAD(0x820, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE1) + AM33XX_IOPAD(0x824, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE1) + AM33XX_IOPAD(0x828, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE1) + AM33XX_IOPAD(0x82c, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE1) + AM33XX_IOPAD(0x830, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE1) + AM33XX_IOPAD(0x834, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE1) + AM33XX_IOPAD(0x838, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE1) + AM33XX_IOPAD(0x83c, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE1) + AM33XX_IOPAD(0x8a0, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0) + AM33XX_IOPAD(0x8a4, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0) + AM33XX_IOPAD(0x8a8, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0) + AM33XX_IOPAD(0x8ac, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0) + AM33XX_IOPAD(0x8b0, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0) + AM33XX_IOPAD(0x8b4, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0) + AM33XX_IOPAD(0x8b8, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0) + AM33XX_IOPAD(0x8bc, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0) + AM33XX_IOPAD(0x8c0, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0) + AM33XX_IOPAD(0x8c4, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0) + AM33XX_IOPAD(0x8c8, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0) + AM33XX_IOPAD(0x8cc, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0) + AM33XX_IOPAD(0x8d0, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0) + AM33XX_IOPAD(0x8d4, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0) + AM33XX_IOPAD(0x8d8, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0) + AM33XX_IOPAD(0x8dc, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0) + AM33XX_IOPAD(0x8e0, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0) + AM33XX_IOPAD(0x8e4, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0) + AM33XX_IOPAD(0x8e8, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0) + AM33XX_IOPAD(0x8ec, PIN_OUTPUT | SLEWCTRL_SLOW | MUX_MODE0) + >; + }; + + lcd_pins_sleep: pinmux_lcd_pins_sleep { + pinctrl-single,pins = < + AM33XX_IOPAD(0x8a0, PULL_DISABLE | SLEWCTRL_SLOW | MUX_MODE7) + AM33XX_IOPAD(0x8a4, PULL_DISABLE | SLEWCTRL_SLOW | MUX_MODE7) + AM33XX_IOPAD(0x8a8, PULL_DISABLE | SLEWCTRL_SLOW | MUX_MODE7) + AM33XX_IOPAD(0x8ac, PULL_DISABLE | SLEWCTRL_SLOW | MUX_MODE7) + AM33XX_IOPAD(0x8b0, PULL_DISABLE | SLEWCTRL_SLOW | MUX_MODE7) + AM33XX_IOPAD(0x8b4, PULL_DISABLE | SLEWCTRL_SLOW | MUX_MODE7) + AM33XX_IOPAD(0x8b8, PULL_DISABLE | SLEWCTRL_SLOW | MUX_MODE7) + AM33XX_IOPAD(0x8bc, PULL_DISABLE | SLEWCTRL_SLOW | MUX_MODE7) + AM33XX_IOPAD(0x8c0, PULL_DISABLE | SLEWCTRL_SLOW | MUX_MODE7) + AM33XX_IOPAD(0x8c4, PULL_DISABLE | SLEWCTRL_SLOW | MUX_MODE7) + AM33XX_IOPAD(0x8c8, PULL_DISABLE | SLEWCTRL_SLOW | MUX_MODE7) + AM33XX_IOPAD(0x8cc, PULL_DISABLE | SLEWCTRL_SLOW | MUX_MODE7) + AM33XX_IOPAD(0x8d0, PULL_DISABLE | SLEWCTRL_SLOW | MUX_MODE7) + AM33XX_IOPAD(0x8d4, PULL_DISABLE | SLEWCTRL_SLOW | MUX_MODE7) + AM33XX_IOPAD(0x8d8, PULL_DISABLE | SLEWCTRL_SLOW | MUX_MODE7) + AM33XX_IOPAD(0x8dc, PULL_DISABLE | SLEWCTRL_SLOW | MUX_MODE7) + AM33XX_IOPAD(0x8e0, PIN_INPUT_PULLDOWN | SLEWCTRL_SLOW | MUX_MODE7) + AM33XX_IOPAD(0x8e4, PIN_INPUT_PULLDOWN | SLEWCTRL_SLOW | MUX_MODE7) + AM33XX_IOPAD(0x8e8, PIN_INPUT_PULLDOWN | SLEWCTRL_SLOW | MUX_MODE7) + AM33XX_IOPAD(0x8ec, PIN_INPUT_PULLDOWN | SLEWCTRL_SLOW | MUX_MODE7) + >; + }; + + leds_pins: pinmux_leds_pins { + pinctrl-single,pins = < + AM33XX_IOPAD(0x868, PIN_OUTPUT | MUX_MODE7) + AM33XX_IOPAD(0x86c, PIN_OUTPUT | MUX_MODE7) + >; + }; + + mmc1_pins: pinmux_mmc1_pins { + pinctrl-single,pins = < + AM33XX_IOPAD(0x8f0, PIN_INPUT_PULLUP | MUX_MODE0) + AM33XX_IOPAD(0x8f4, PIN_INPUT_PULLUP | MUX_MODE0) + AM33XX_IOPAD(0x8f8, PIN_INPUT_PULLUP | MUX_MODE0) + AM33XX_IOPAD(0x8fc, PIN_INPUT_PULLUP | MUX_MODE0) + AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0) + AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0) + AM33XX_IOPAD(0x960, PIN_INPUT | MUX_MODE7) + >; + }; + + spi0_pins: pinmux_spi0_pins { + pinctrl-single,pins = < + AM33XX_IOPAD(0x950, PIN_OUTPUT_PULLDOWN | MUX_MODE0) + AM33XX_IOPAD(0x954, PIN_OUTPUT_PULLUP | MUX_MODE0) + AM33XX_IOPAD(0x958, PIN_INPUT_PULLUP | MUX_MODE0) + AM33XX_IOPAD(0x95c, PIN_OUTPUT_PULLUP | MUX_MODE0) + >; + }; + + uart0_pins: pinmux_uart0_pins { + pinctrl-single,pins = < + AM33XX_IOPAD(0x970, PIN_INPUT_PULLUP | MUX_MODE0) + AM33XX_IOPAD(0x974, PIN_OUTPUT_PULLDOWN | MUX_MODE0) + >; + }; + + nandflash_pins: pinmux_nandflash_pins { + pinctrl-single,pins = < + AM33XX_IOPAD(0x800, PIN_INPUT | MUX_MODE0) + AM33XX_IOPAD(0x804, PIN_INPUT | MUX_MODE0) + AM33XX_IOPAD(0x808, PIN_INPUT | MUX_MODE0) + AM33XX_IOPAD(0x80c, PIN_INPUT | MUX_MODE0) + AM33XX_IOPAD(0x810, PIN_INPUT | MUX_MODE0) + AM33XX_IOPAD(0x814, PIN_INPUT | MUX_MODE0) + AM33XX_IOPAD(0x818, PIN_INPUT | MUX_MODE0) + AM33XX_IOPAD(0x81c, PIN_INPUT | MUX_MODE0) + AM33XX_IOPAD(0x870, PIN_INPUT | MUX_MODE0) + AM33XX_IOPAD(0x874, PIN_OUTPUT | MUX_MODE0) + AM33XX_IOPAD(0x87c, PIN_OUTPUT | MUX_MODE0) + AM33XX_IOPAD(0x890, PIN_OUTPUT | MUX_MODE0) + AM33XX_IOPAD(0x894, PIN_OUTPUT | MUX_MODE0) + AM33XX_IOPAD(0x898, PIN_OUTPUT | MUX_MODE0) + AM33XX_IOPAD(0x89c, PIN_OUTPUT | MUX_MODE0) + >; + }; +}; diff --git a/arch/arm/boot/dts/am335x-icev2.dts b/arch/arm/boot/dts/am335x-icev2.dts index f2005ecca74f..9ac775c71072 100644 --- a/arch/arm/boot/dts/am335x-icev2.dts +++ b/arch/arm/boot/dts/am335x-icev2.dts @@ -484,10 +484,6 @@ dual_emac; }; -&phy_sel { - rmii-clock-ext; -}; - &davinci_mdio { pinctrl-names = "default", "sleep"; pinctrl-0 = <&davinci_mdio_default>; diff --git a/arch/arm/boot/dts/am335x-igep0033.dtsi b/arch/arm/boot/dts/am335x-igep0033.dtsi index 55b4c94cfafb..cbd22f25de95 100644 --- a/arch/arm/boot/dts/am335x-igep0033.dtsi +++ b/arch/arm/boot/dts/am335x-igep0033.dtsi @@ -123,10 +123,6 @@ phy-mode = "rmii"; }; -&phy_sel { - rmii-clock-ext; -}; - &elm { status = "okay"; }; diff --git a/arch/arm/boot/dts/am335x-lxm.dts b/arch/arm/boot/dts/am335x-lxm.dts index 481edcfaf121..d0e8e720a4d6 100644 --- a/arch/arm/boot/dts/am335x-lxm.dts +++ b/arch/arm/boot/dts/am335x-lxm.dts @@ -328,10 +328,6 @@ dual_emac_res_vlan = <3>; }; -&phy_sel { - rmii-clock-ext; -}; - &mac { pinctrl-names = "default", "sleep"; pinctrl-0 = <&cpsw_default>; diff --git a/arch/arm/boot/dts/am335x-moxa-uc-2100-common.dtsi b/arch/arm/boot/dts/am335x-moxa-uc-2100-common.dtsi index 14f781953475..cb5913a69837 100644 --- a/arch/arm/boot/dts/am335x-moxa-uc-2100-common.dtsi +++ b/arch/arm/boot/dts/am335x-moxa-uc-2100-common.dtsi @@ -159,11 +159,6 @@ status = "okay"; }; -&phy_sel { - reg= <0x44e10650 0xf5>; - rmii-clock-ext; -}; - &sham { status = "okay"; }; diff --git a/arch/arm/boot/dts/am335x-moxa-uc-8100-me-t.dts b/arch/arm/boot/dts/am335x-moxa-uc-8100-me-t.dts index 5a58efc0c874..e562ce40f290 100644 --- a/arch/arm/boot/dts/am335x-moxa-uc-8100-me-t.dts +++ b/arch/arm/boot/dts/am335x-moxa-uc-8100-me-t.dts @@ -446,11 +446,6 @@ dual_emac_res_vlan = <2>; }; -&phy_sel { - reg= <0x44e10650 0xf5>; - rmii-clock-ext; -}; - &sham { status = "okay"; }; diff --git a/arch/arm/boot/dts/am335x-phycore-som.dtsi b/arch/arm/boot/dts/am335x-phycore-som.dtsi index 428a25e952b0..015adb626b03 100644 --- a/arch/arm/boot/dts/am335x-phycore-som.dtsi +++ b/arch/arm/boot/dts/am335x-phycore-som.dtsi @@ -100,10 +100,6 @@ status = "okay"; }; -&phy_sel { - rmii-clock-ext; -}; - /* I2C Busses */ &am33xx_pinmux { i2c0_pins: pinmux_i2c0 { diff --git a/arch/arm/boot/dts/am335x-shc.dts b/arch/arm/boot/dts/am335x-shc.dts index d0fd68873689..bfbe27a80006 100644 --- a/arch/arm/boot/dts/am335x-shc.dts +++ b/arch/arm/boot/dts/am335x-shc.dts @@ -1,11 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 /* * support for the bosch am335x based shc c3 board * * Copyright, C) 2015 Heiko Schocher * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ /dts-v1/; @@ -215,7 +213,7 @@ pinctrl-names = "default"; pinctrl-0 = <&mmc1_pins>; bus-width = <0x4>; - cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>; cd-inverted; max-frequency = <26000000>; vmmc-supply = <&vmmcsd_fixed>; diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi index 7b818d9d2eab..f459ec316a22 100644 --- a/arch/arm/boot/dts/am33xx-l4.dtsi +++ b/arch/arm/boot/dts/am33xx-l4.dtsi @@ -279,17 +279,9 @@ #pinctrl-cells = <1>; ranges = <0 0 0x2000>; - phy_sel: cpsw-phy-sel@650 { - compatible = "ti,am3352-cpsw-phy-sel"; - reg= <0x650 0x4>; - reg-names = "gmii-sel"; - }; - am33xx_pinmux: pinmux@800 { compatible = "pinctrl-single"; reg = <0x800 0x238>; - #address-cells = <1>; - #size-cells = <0>; #pinctrl-cells = <1>; pinctrl-single,register-width = <32>; pinctrl-single,function-mask = <0x7f>; @@ -302,6 +294,12 @@ #size-cells = <1>; ranges = <0 0 0x800>; + phy_gmii_sel: phy-gmii-sel { + compatible = "ti,am3352-phy-gmii-sel"; + reg = <0x650 0x4>; + #phy-cells = <2>; + }; + scm_clocks: clocks { #address-cells = <1>; #size-cells = <0>; @@ -717,7 +715,6 @@ interrupts = <40 41 42 43>; ranges = <0 0 0x8000>; syscon = <&scm_conf>; - cpsw-phy-sel = <&phy_sel>; status = "disabled"; davinci_mdio: mdio@1000 { @@ -733,11 +730,13 @@ cpsw_emac0: slave@200 { /* Filled in by U-Boot */ mac-address = [ 00 00 00 00 00 00 ]; + phys = <&phy_gmii_sel 1 1>; }; cpsw_emac1: slave@300 { /* Filled in by U-Boot */ mac-address = [ 00 00 00 00 00 00 ]; + phys = <&phy_gmii_sel 2 1>; }; }; }; diff --git a/arch/arm/boot/dts/am3874-iceboard.dts b/arch/arm/boot/dts/am3874-iceboard.dts new file mode 100644 index 000000000000..883fb85135d4 --- /dev/null +++ b/arch/arm/boot/dts/am3874-iceboard.dts @@ -0,0 +1,496 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Device tree for Winterland IceBoard + * + * http://mcgillcosmology.com + * http://threespeedlogic.com + * + * This is an ARM + FPGA instrumentation board used at telescopes in + * Antarctica (the South Pole Telescope), Chile (POLARBEAR), and at the DRAO + * observatory in British Columbia (CHIME). + * + * Copyright (c) 2019 Three-Speed Logic, Inc. + */ + +/dts-v1/; + +#include "dm814x.dtsi" +#include + +/ { + model = "Winterland IceBoard"; + compatible = "ti,dm8148", "ti,dm814"; + + chosen { + stdout-path = "serial1:115200n8"; + bootargs = "earlycon"; + }; + + memory@80000000 { + device_type = "memory"; + reg = <0x80000000 0x40000000>; /* 1 GB */ + }; + + vmmcsd_fixed: fixedregulator0 { + compatible = "regulator-fixed"; + regulator-name = "vmmcsd_fixed"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + }; +}; + +/* The MAC provides internal delay for the transmit path ONLY, which is enabled + * provided no -id/-txid/-rxid suffix is provided to "phy-mode". + * + * The receive path is delayed at the PHY. The recommended register settings + * are 0xf0 for the control bits, and 0x7777 for the data bits. However, the + * conversion code in the kernel lies: the PHY's registers are 120 ps per tap, + * and the kernel assumes 200 ps per tap. So we have fudged the numbers here to + * obtain the correct register settings. + */ +&mac { dual_emac = <1>; }; +&cpsw_emac0 { + phy-handle = <ðphy0>; + phy-mode = "rgmii"; + dual_emac_res_vlan = <1>; +}; +&cpsw_emac1 { + phy-handle = <ðphy1>; + phy-mode = "rgmii"; + dual_emac_res_vlan = <2>; +}; + +&davinci_mdio { + ethphy0: ethernet-phy@0 { + reg = <0x2>; + + rxc-skew-ps = <3000>; + rxdv-skew-ps = <0>; + + rxd3-skew-ps = <0>; + rxd2-skew-ps = <0>; + rxd1-skew-ps = <0>; + rxd0-skew-ps = <0>; + + phy-reset-gpios = <&gpio2 8 GPIO_ACTIVE_LOW>; + }; + + ethphy1: ethernet-phy@1 { + reg = <0x1>; + + rxc-skew-ps = <3000>; + rxdv-skew-ps = <0>; + + rxd3-skew-ps = <0>; + rxd2-skew-ps = <0>; + rxd1-skew-ps = <0>; + rxd0-skew-ps = <0>; + + phy-reset-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>; + }; +}; + +&mmc1 { status = "disabled"; }; +&mmc2 { + pinctrl-names = "default"; + pinctrl-0 = <&mmc2_pins>; + vmmc-supply = <&vmmcsd_fixed>; + bus-width = <4>; +}; +&mmc3 { status = "disabled"; }; + +&i2c1 { + /* Most I2C activity happens through this port, with the sole exception + * of the backplane. Since there are multiply assigned addresses, the + * "i2c-mux-idle-disconnect" is important. + */ + + pca9548@70 { + compatible = "nxp,pca9548"; + reg = <0x70>; + #address-cells = <1>; + #size-cells = <0>; + + i2c@0 { + /* FMC A */ + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + i2c-mux-idle-disconnect; + }; + + i2c@1 { + /* FMC B */ + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + i2c-mux-idle-disconnect; + }; + + i2c@2 { + /* QSFP A */ + #address-cells = <1>; + #size-cells = <0>; + reg = <2>; + i2c-mux-idle-disconnect; + }; + + i2c@3 { + /* QSFP B */ + #address-cells = <1>; + #size-cells = <0>; + reg = <3>; + i2c-mux-idle-disconnect; + }; + + i2c@4 { + /* SFP */ + #address-cells = <1>; + #size-cells = <0>; + reg = <4>; + i2c-mux-idle-disconnect; + }; + + i2c@5 { + #address-cells = <1>; + #size-cells = <0>; + reg = <5>; + i2c-mux-idle-disconnect; + + ina230@40 { compatible = "ti,ina230"; reg = <0x40>; shunt-resistor = <5000>; }; + ina230@41 { compatible = "ti,ina230"; reg = <0x41>; shunt-resistor = <5000>; }; + ina230@42 { compatible = "ti,ina230"; reg = <0x42>; shunt-resistor = <5000>; }; + + ina230@44 { compatible = "ti,ina230"; reg = <0x44>; shunt-resistor = <5000>; }; + ina230@45 { compatible = "ti,ina230"; reg = <0x45>; shunt-resistor = <5000>; }; + ina230@46 { compatible = "ti,ina230"; reg = <0x46>; shunt-resistor = <5000>; }; + + ina230@47 { compatible = "ti,ina230"; reg = <0x47>; shunt-resistor = <5500>; }; + ina230@48 { compatible = "ti,ina230"; reg = <0x48>; shunt-resistor = <2360>; }; + ina230@49 { compatible = "ti,ina230"; reg = <0x49>; shunt-resistor = <2360>; }; + ina230@43 { compatible = "ti,ina230"; reg = <0x43>; shunt-resistor = <2360>; }; + ina230@4b { compatible = "ti,ina230"; reg = <0x4b>; shunt-resistor = <5500>; }; + ina230@4c { compatible = "ti,ina230"; reg = <0x4c>; shunt-resistor = <2360>; }; + ina230@4d { compatible = "ti,ina230"; reg = <0x4d>; shunt-resistor = <770>; }; + ina230@4e { compatible = "ti,ina230"; reg = <0x4e>; shunt-resistor = <770>; }; + ina230@4f { compatible = "ti,ina230"; reg = <0x4f>; shunt-resistor = <770>; }; + }; + + i2c@6 { + /* Backplane */ + #address-cells = <1>; + #size-cells = <0>; + reg = <6>; + i2c-mux-idle-disconnect; + }; + + i2c@7 { + #address-cells = <1>; + #size-cells = <0>; + reg = <7>; + i2c-mux-idle-disconnect; + + u41: pca9575@20 { + compatible = "nxp,pca9575"; + reg = <0x20>; + gpio-controller; + #gpio-cells = <2>; + + gpio-line-names = + "FMCA_EN_12V0", "FMCA_EN_3V3", "FMCA_EN_VADJ", "FMCA_PG_M2C", + "FMCA_PG_C2M", "FMCA_PRSNT_M2C_L", "FMCA_CLK_DIR", "SFP_LOS", + "FMCB_EN_12V0", "FMCB_EN_3V3", "FMCB_EN_VADJ", "FMCB_PG_M2C", + "FMCB_PG_C2M", "FMCB_PRSNT_M2C_L", "FMCB_CLK_DIR", "SFP_ModPrsL"; + reset_gpios = <&gpio2 11 GPIO_ACTIVE_LOW>; + }; + + u42: pca9575@21 { + compatible = "nxp,pca9575"; + reg = <0x21>; + gpio-controller; + #gpio-cells = <2>; + gpio-line-names = + "QSFPA_ModPrsL", "QSFPA_IntL", "QSFPA_ResetL", "QSFPA_ModSelL", + "QSFPA_LPMode", "QSFPB_ModPrsL", "QSFPB_IntL", "QSFPB_ResetL", + "SFP_TxFault", "SFP_TxDisable", "SFP_RS0", "SFP_RS1", + "QSFPB_ModSelL", "QSFPB_LPMode", "SEL_SFP", "ARM_MR"; + reset_gpios = <&gpio2 11 GPIO_ACTIVE_LOW>; + }; + + u48: pca9575@22 { + compatible = "nxp,pca9575"; + reg=<0x22>; + gpio-controller; + #gpio-cells = <2>; + + sw-gpios = <&u48 0 0>, <&u48 1 0>, <&u48 2 0>, <&u48 3 0>, + <&u48 4 0>, <&u48 5 0>, <&u48 6 0>, <&u48 7 0>; + led-gpios = <&u48 7 0>, <&u48 6 0>, <&u48 5 0>, <&u48 4 0>, + <&u48 3 0>, <&u48 2 0>, <&u48 1 0>, <&u48 0 0>; + + gpio-line-names = + "GP_SW1", "GP_SW2", "GP_SW3", "GP_SW4", + "GP_SW5", "GP_SW6", "GP_SW7", "GP_SW8", + "GP_LED8", "GP_LED7", "GP_LED6", "GP_LED5", + "GP_LED4", "GP_LED3", "GP_LED2", "GP_LED1"; + reset_gpios = <&gpio2 11 GPIO_ACTIVE_LOW>; + }; + + u59: pca9575@23 { + compatible = "nxp,pca9575"; + reg=<0x23>; + gpio-controller; + #gpio-cells = <2>; + gpio-line-names = + "GP_LED9", "GP_LED10", "GP_LED11", "GP_LED12", + "GTX1V8PowerFault", "PHYAPowerFault", "PHYBPowerFault", "ArmPowerFault", + "BP_SLOW_GPIO0", "BP_SLOW_GPIO1", "BP_SLOW_GPIO2", "BP_SLOW_GPIO3", + "BP_SLOW_GPIO4", "BP_SLOW_GPIO5", "__unused_u59_p16", "__unused_u59_p17"; + reset_gpios = <&gpio2 11 GPIO_ACTIVE_LOW>; + }; + + tmp100@48 { compatible = "ti,tmp100"; reg = <0x48>; }; + tmp100@4a { compatible = "ti,tmp100"; reg = <0x4a>; }; + tmp100@4b { compatible = "ti,tmp100"; reg = <0x4b>; }; + tmp100@4c { compatible = "ti,tmp100"; reg = <0x4c>; }; + + /* EEPROM bank and serial number are treated as separate devices */ + at24c01@57 { compatible = "atmel,24c01"; reg = <0x57>; }; + at24cs01@5f { compatible = "atmel,24cs01"; reg = <0x5f>; }; + }; + }; +}; + +&i2c2 { + pca9548@71 { + compatible = "nxp,pca9548"; + reg = <0x71>; + #address-cells = <1>; + #size-cells = <0>; + + i2c@6 { + /* Backplane */ + #address-cells = <1>; + #size-cells = <0>; + reg = <6>; + multi-master; + + /* All backplanes should have this -- it's how we know they're there. */ + at24c08@54 { compatible="atmel,24c08"; reg=<0x54>; }; + at24cs08@5c { compatible="atmel,24cs08"; reg=<0x5c>; }; + + /* 16 slot backplane */ + tmp421@4d { compatible="ti,tmp421"; reg=<0x4d>; }; + tmp421@4e { compatible="ti,tmp421"; reg=<0x4e>; }; + ina230@40 { compatible = "ti,ina230"; reg = <0x40>; shunt-resistor = <2360>; }; + amc6821@18 { compatible = "ti,amc6821"; reg = <0x18>; }; + + /* Single slot backplane */ + }; + }; +}; + +&pincntl { + mmc2_pins: pinmux_mmc2_pins { + pinctrl-single,pins = < + DM814X_IOPAD(0x0800, PIN_INPUT | 0x1) /* SD1_CLK */ + DM814X_IOPAD(0x0804, PIN_INPUT_PULLUP | 0x1) /* SD1_CMD */ + DM814X_IOPAD(0x0808, PIN_INPUT_PULLUP | 0x1) /* SD1_DAT[0] */ + DM814X_IOPAD(0x080c, PIN_INPUT_PULLUP | 0x1) /* SD1_DAT[1] */ + DM814X_IOPAD(0x0810, PIN_INPUT_PULLUP | 0x1) /* SD1_DAT[2] */ + DM814X_IOPAD(0x0814, PIN_INPUT_PULLUP | 0x1) /* SD1_DAT[3] */ + DM814X_IOPAD(0x0924, PIN_INPUT_PULLUP | 0x40) /* SD1_POW */ + DM814X_IOPAD(0x0928, PIN_INPUT | 0x40) /* SD1_SDWP */ + DM814X_IOPAD(0x093C, PIN_INPUT | 0x2) /* SD1_SDCD */ + >; + }; + + usb0_pins: pinmux_usb0_pins { + pinctrl-single,pins = < + DM814X_IOPAD(0x0c34, PIN_OUTPUT | 0x1) /* USB0_DRVVBUS */ + >; + }; + + usb1_pins: pinmux_usb1_pins { + pinctrl-single,pins = < + DM814X_IOPAD(0x0834, PIN_OUTPUT | 0x80) /* USB1_DRVVBUS */ + >; + }; + + gpio1_pins: pinmux_gpio1_pins { + pinctrl-single,pins = < + DM814X_IOPAD(0x081c, PIN_OUTPUT | 0x80) /* PROGRAM_B */ + DM814X_IOPAD(0x0820, PIN_INPUT | 0x80) /* INIT_B */ + DM814X_IOPAD(0x0824, PIN_INPUT | 0x80) /* DONE */ + + DM814X_IOPAD(0x0838, PIN_INPUT_PULLUP | 0x80) /* FMCA_TMS */ + DM814X_IOPAD(0x083c, PIN_INPUT_PULLUP | 0x80) /* FMCA_TCK */ + DM814X_IOPAD(0x0898, PIN_INPUT_PULLUP | 0x80) /* FMCA_TDO */ + DM814X_IOPAD(0x089c, PIN_INPUT_PULLUP | 0x80) /* FMCA_TDI */ + DM814X_IOPAD(0x08ac, PIN_INPUT_PULLUP | 0x80) /* FMCA_TRST */ + + DM814X_IOPAD(0x08b0, PIN_INPUT_PULLUP | 0x80) /* FMCB_TMS */ + DM814X_IOPAD(0x0a88, PIN_INPUT_PULLUP | 0x80) /* FMCB_TCK */ + DM814X_IOPAD(0x0a8c, PIN_INPUT_PULLUP | 0x80) /* FMCB_TDO */ + DM814X_IOPAD(0x08bc, PIN_INPUT_PULLUP | 0x80) /* FMCB_TDI */ + DM814X_IOPAD(0x0a94, PIN_INPUT_PULLUP | 0x80) /* FMCB_TRST */ + + DM814X_IOPAD(0x08d4, PIN_INPUT_PULLUP | 0x80) /* FPGA_TMS */ + DM814X_IOPAD(0x0aa8, PIN_INPUT_PULLUP | 0x80) /* FPGA_TCK */ + DM814X_IOPAD(0x0adc, PIN_INPUT_PULLUP | 0x80) /* FPGA_TDO */ + DM814X_IOPAD(0x0ab0, PIN_INPUT_PULLUP | 0x80) /* FPGA_TDI */ + >; + }; + + gpio2_pins: pinmux_gpio2_pins { + pinctrl-single,pins = < + DM814X_IOPAD(0x090c, PIN_INPUT_PULLUP | 0x80) /* PHY A IRQ */ + DM814X_IOPAD(0x0910, PIN_INPUT_PULLUP | 0x80) /* PHY A RESET */ + DM814X_IOPAD(0x08f4, PIN_INPUT_PULLUP | 0x80) /* PHY B IRQ */ + DM814X_IOPAD(0x08f8, PIN_INPUT_PULLUP | 0x80) /* PHY B RESET */ + + //DM814X_IOPAD(0x0a14, PIN_INPUT_PULLUP | 0x80) /* ARM IRQ */ + //DM814X_IOPAD(0x0900, PIN_INPUT | 0x80) /* GPIO IRQ */ + DM814X_IOPAD(0x0a2c, PIN_INPUT_PULLUP | 0x80) /* GPIO RESET */ + >; + }; + + gpio4_pins: pinmux_gpio4_pins { + pinctrl-single,pins = < + /* The PLL doesn't react well to the SPI controller reset, so + * we force the CS lines to pull up as GPIOs until we're ready. + * See https://e2e.ti.com/support/processors/f/791/t/276011?Linux-support-for-AM3874-DM8148-in-Arago-linux-omap3 + */ + DM814X_IOPAD(0x0b3c, PIN_INPUT_PULLUP | 0x80) /* BP_ARM_GPIO0 */ + DM814X_IOPAD(0x0b40, PIN_INPUT_PULLUP | 0x80) /* BP_ARM_GPIO1 */ + DM814X_IOPAD(0x0b44, PIN_INPUT_PULLUP | 0x80) /* BP_ARM_GPIO2 */ + DM814X_IOPAD(0x0b48, PIN_INPUT_PULLUP | 0x80) /* BP_ARM_GPIO3 */ + DM814X_IOPAD(0x0b4c, PIN_INPUT_PULLUP | 0x80) /* BP_ARM_GPIO4 */ + DM814X_IOPAD(0x0b50, PIN_INPUT_PULLUP | 0x80) /* BP_ARM_GPIO5 */ + >; + }; + + spi2_pins: pinmux_spi2_pins { + pinctrl-single,pins = < + DM814X_IOPAD(0x0950, PIN_INPUT_PULLUP | 0x80) /* PLL SPI CS1 as GPIO */ + DM814X_IOPAD(0x0818, PIN_INPUT_PULLUP | 0x80) /* PLL SPI CS2 as GPIO */ + >; + }; + + spi4_pins: pinmux_spi4_pins { + pinctrl-single,pins = < + DM814X_IOPAD(0x0a7c, 0x20) + DM814X_IOPAD(0x0b74, 0x20) + DM814X_IOPAD(0x0b78, PIN_OUTPUT | 0x20) + DM814X_IOPAD(0x0b7c, PIN_OUTPUT_PULLDOWN | 0x20) + DM814X_IOPAD(0x0b80, PIN_INPUT | 0x20) + >; + }; +}; + +&gpio1 { + pinctrl-names = "default"; + pinctrl-0 = <&gpio1_pins>; + gpio-line-names = + "", "PROGRAM_B", "INIT_B", "DONE", /* 0-3 */ + "", "", "", "", /* 4-7 */ + "FMCA_TMS", "FMCA_TCK", "FMCA_TDO", "FMCA_TDI", /* 8-11 */ + "", "", "", "FMCA_TRST", /* 12-15 */ + "FMCB_TMS", "FMCB_TCK", "FMCB_TDO", "FMCB_TDI", /* 16-19 */ + "FMCB_TRST", "", "", "", /* 20-23 */ + "FPGA_TMS", "FPGA_TCK", "FPGA_TDO", "FPGA_TDI", /* 24-27 */ + "", "", "", ""; /* 28-31 */ +}; + +&gpio2 { + pinctrl-names = "default"; + pinctrl-0 = <&gpio2_pins>; + gpio-line-names = + "PHYA_IRQ_N", "PHYA_RESET_N", "", "", /* 0-3 */ + "", "", "", "PHYB_IRQ_N", /* 4-7 */ + "PHYB_RESET_N", "ARM_IRQ", "GPIO_IRQ", ""; /* 8-11 */ +}; + +&gpio3 { + pinctrl-names = "default"; + /*pinctrl-0 = <&gpio3_pins>;*/ + gpio-line-names = + "", "", "ARMClkSel0", "", /* 0-3 */ + "EnFPGARef", "", "", "ARMClkSel1"; /* 4-7 */ +}; + +&gpio4 { + pinctrl-names = "default"; + pinctrl-0 = <&gpio4_pins>; + gpio-line-names = + "BP_ARM_GPIO0", "BP_ARM_GPIO1", "BP_ARM_GPIO2", "BP_ARM_GPIO3", + "BP_ARM_GPIO4", "BP_ARM_GPIO5"; +}; + +&usb0 { + pinctrl-names = "default"; + pinctrl-0 = <&usb0_pins>; + dr_mode = "host"; +}; + +&usb1 { + pinctrl-names = "default"; + pinctrl-0 = <&usb1_pins>; + dr_mode = "host"; +}; + +&mcspi1 { + s25fl256@0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "jedec,spi-nor"; + reg = <0>; + spi-max-frequency = <40000000>; + + fsbl@0 { + /* 256 kB */ + label = "U-Boot-min"; + reg = <0 0x40000>; + }; + ssbl@1 { + /* 512 kB */ + label = "U-Boot"; + reg = <0x40000 0x80000>; + }; + bootenv@2 { + /* 256 kB */ + label = "U-Boot Env"; + reg = <0xc0000 0x40000>; + }; + kernel@3 { + /* 4 MB */ + label = "Kernel"; + reg = <0x100000 0x400000>; + }; + ipmi@4 { + label = "IPMI FRU"; + reg = <0x500000 0x40000>; + }; + fs@5 { + label = "File System"; + reg = <0x540000 0x1ac0000>; + }; + }; +}; + +&mcspi3 { + /* DMA event numbers stolen from MCASP */ + dmas = <&edma_xbar 8 0 16 &edma_xbar 9 0 17 + &edma_xbar 10 0 18 &edma_xbar 11 0 19>; + dma-names = "tx0", "rx0", "tx1", "rx1"; +}; + +&mcspi4 { + pinctrl-names = "default"; + pinctrl-0 = <&spi4_pins>; + + /* DMA event numbers stolen from MCASP, MCBSP */ + dmas = <&edma_xbar 12 0 20 &edma_xbar 13 0 21>; + dma-names = "tx0", "rx0"; +}; diff --git a/arch/arm/boot/dts/am437x-gp-evm.dts b/arch/arm/boot/dts/am437x-gp-evm.dts index f4a20cade808..4c6ee37ea573 100644 --- a/arch/arm/boot/dts/am437x-gp-evm.dts +++ b/arch/arm/boot/dts/am437x-gp-evm.dts @@ -71,7 +71,7 @@ pinctrl-0 = <&matrix_keypad_default>; pinctrl-1 = <&matrix_keypad_sleep>; - linux,wakeup; + wakeup-source; row-gpios = <&gpio0 3 GPIO_ACTIVE_HIGH /* Bank0, pin3 */ &gpio4 3 GPIO_ACTIVE_HIGH /* Bank4, pin3 */ diff --git a/arch/arm/boot/dts/am437x-l4.dtsi b/arch/arm/boot/dts/am437x-l4.dtsi index ca0896f80248..85c6f4ff1824 100644 --- a/arch/arm/boot/dts/am437x-l4.dtsi +++ b/arch/arm/boot/dts/am437x-l4.dtsi @@ -280,12 +280,6 @@ #size-cells = <1>; ranges = <0 0 0x4000>; - phy_sel: cpsw-phy-sel@650 { - compatible = "ti,am43xx-cpsw-phy-sel"; - reg= <0x650 0x4>; - reg-names = "gmii-sel"; - }; - am43xx_pinmux: pinmux@800 { compatible = "ti,am437-padconf", "pinctrl-single"; @@ -300,11 +294,17 @@ }; scm_conf: scm_conf@0 { - compatible = "syscon"; + compatible = "syscon", "simple-bus"; reg = <0x0 0x800>; #address-cells = <1>; #size-cells = <1>; + phy_gmii_sel: phy-gmii-sel { + compatible = "ti,am43xx-phy-gmii-sel"; + reg = <0x650 0x4>; + #phy-cells = <2>; + }; + scm_clocks: clocks { #address-cells = <1>; #size-cells = <0>; @@ -555,7 +555,6 @@ cpts_clock_shift = <29>; ranges = <0 0 0x8000>; syscon = <&scm_conf>; - cpsw-phy-sel = <&phy_sel>; davinci_mdio: mdio@1000 { compatible = "ti,am4372-mdio","ti,cpsw-mdio","ti,davinci_mdio"; @@ -572,11 +571,13 @@ cpsw_emac0: slave@200 { /* Filled in by U-Boot */ mac-address = [ 00 00 00 00 00 00 ]; + phys = <&phy_gmii_sel 1 0>; }; cpsw_emac1: slave@300 { /* Filled in by U-Boot */ mac-address = [ 00 00 00 00 00 00 ]; + phys = <&phy_gmii_sel 2 0>; }; }; }; diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts index 4ea753b3ee43..9dfd80e3b76e 100644 --- a/arch/arm/boot/dts/am43x-epos-evm.dts +++ b/arch/arm/boot/dts/am43x-epos-evm.dts @@ -584,10 +584,7 @@ &cpsw_emac0 { phy-handle = <ðphy0>; phy-mode = "rmii"; -}; - -&phy_sel { - rmii-clock-ext; + phys = <&phy_gmii_sel 1 1>; }; &i2c0 { diff --git a/arch/arm/boot/dts/arm-realview-eb.dtsi b/arch/arm/boot/dts/arm-realview-eb.dtsi index 0e4c7c4c8c09..610506723ea5 100644 --- a/arch/arm/boot/dts/arm-realview-eb.dtsi +++ b/arch/arm/boot/dts/arm-realview-eb.dtsi @@ -22,9 +22,10 @@ #include #include -#include "skeleton.dtsi" / { + #address-cells = <1>; + #size-cells = <1>; compatible = "arm,realview-eb"; chosen { }; @@ -38,6 +39,7 @@ }; memory { + device_type = "memory"; /* 128 MiB memory @ 0x0 */ reg = <0x00000000 0x08000000>; }; diff --git a/arch/arm/boot/dts/arm-realview-pb1176.dts b/arch/arm/boot/dts/arm-realview-pb1176.dts index 83e0fbc4a1a1..cbbb8878daa3 100644 --- a/arch/arm/boot/dts/arm-realview-pb1176.dts +++ b/arch/arm/boot/dts/arm-realview-pb1176.dts @@ -23,9 +23,10 @@ /dts-v1/; #include #include -#include "skeleton.dtsi" / { + #address-cells = <1>; + #size-cells = <1>; model = "ARM RealView PB1176"; compatible = "arm,realview-pb1176"; @@ -40,6 +41,7 @@ }; memory { + device_type = "memory"; /* 128 MiB memory @ 0x0 */ reg = <0x00000000 0x08000000>; }; diff --git a/arch/arm/boot/dts/arm-realview-pb11mp.dts b/arch/arm/boot/dts/arm-realview-pb11mp.dts index 2f6aa24a0b67..2015619ca22c 100644 --- a/arch/arm/boot/dts/arm-realview-pb11mp.dts +++ b/arch/arm/boot/dts/arm-realview-pb11mp.dts @@ -23,9 +23,10 @@ /dts-v1/; #include #include -#include "skeleton.dtsi" / { + #address-cells = <1>; + #size-cells = <1>; model = "ARM RealView PB11MPcore"; compatible = "arm,realview-pb11mp"; @@ -39,6 +40,7 @@ }; memory { + device_type = "memory"; /* * The PB11MPCore has 512 MiB memory @ 0x70000000 * and the first 256 are also remapped @ 0x00000000 diff --git a/arch/arm/boot/dts/arm-realview-pbx.dtsi b/arch/arm/boot/dts/arm-realview-pbx.dtsi index 916a97734f84..a81e9c282432 100644 --- a/arch/arm/boot/dts/arm-realview-pbx.dtsi +++ b/arch/arm/boot/dts/arm-realview-pbx.dtsi @@ -22,9 +22,10 @@ #include #include -#include "skeleton.dtsi" / { + #address-cells = <1>; + #size-cells = <1>; compatible = "arm,realview-pbx"; chosen { }; @@ -39,6 +40,7 @@ }; memory { + device_type = "memory"; /* 128 MiB memory @ 0x0 */ reg = <0x00000000 0x08000000>; }; diff --git a/arch/arm/boot/dts/armada-370-rd.dts b/arch/arm/boot/dts/armada-370-rd.dts index 2bfb3108b5b2..c910d157a686 100644 --- a/arch/arm/boot/dts/armada-370-rd.dts +++ b/arch/arm/boot/dts/armada-370-rd.dts @@ -114,48 +114,6 @@ }; }; }; - - dsa { - status = "disabled"; - - compatible = "marvell,dsa"; - #address-cells = <2>; - #size-cells = <0>; - - dsa,ethernet = <ð1>; - dsa,mii-bus = <&mdio>; - - switch@0 { - #address-cells = <1>; - #size-cells = <0>; - reg = <0x10 0>; /* MDIO address 16, switch 0 in tree */ - - port@0 { - reg = <0>; - label = "lan0"; - }; - - port@1 { - reg = <1>; - label = "lan1"; - }; - - port@2 { - reg = <2>; - label = "lan2"; - }; - - port@3 { - reg = <3>; - label = "lan3"; - }; - - port@5 { - reg = <5>; - label = "cpu"; - }; - }; - }; }; &pciec { diff --git a/arch/arm/boot/dts/armada-388-clearfog.dts b/arch/arm/boot/dts/armada-388-clearfog.dts index 89a354b43978..20f8d4667753 100644 --- a/arch/arm/boot/dts/armada-388-clearfog.dts +++ b/arch/arm/boot/dts/armada-388-clearfog.dts @@ -30,64 +30,6 @@ }; }; - dsa@0 { - status = "disabled"; - - compatible = "marvell,dsa"; - dsa,ethernet = <ð1>; - dsa,mii-bus = <&mdio>; - pinctrl-0 = <&clearfog_dsa0_clk_pins &clearfog_dsa0_pins>; - pinctrl-names = "default"; - #address-cells = <2>; - #size-cells = <0>; - - switch@0 { - #address-cells = <1>; - #size-cells = <0>; - reg = <4 0>; - - port@0 { - reg = <0>; - label = "lan5"; - }; - - port@1 { - reg = <1>; - label = "lan4"; - }; - - port@2 { - reg = <2>; - label = "lan3"; - }; - - port@3 { - reg = <3>; - label = "lan2"; - }; - - port@4 { - reg = <4>; - label = "lan1"; - }; - - port@5 { - reg = <5>; - label = "cpu"; - }; - - port@6 { - /* 88E1512 external phy */ - reg = <6>; - label = "lan6"; - fixed-link { - speed = <1000>; - full-duplex; - }; - }; - }; - }; - gpio-keys { compatible = "gpio-keys"; pinctrl-0 = <&rear_button_pins>; diff --git a/arch/arm/boot/dts/armada-388-clearfog.dtsi b/arch/arm/boot/dts/armada-388-clearfog.dtsi index 1b0d0680c8b6..0d81600ca247 100644 --- a/arch/arm/boot/dts/armada-388-clearfog.dtsi +++ b/arch/arm/boot/dts/armada-388-clearfog.dtsi @@ -93,6 +93,7 @@ bm,pool-long = <2>; bm,pool-short = <1>; buffer-manager = <&bm>; + phys = <&comphy1 1>; phy-mode = "sgmii"; status = "okay"; }; @@ -103,6 +104,7 @@ bm,pool-short = <1>; buffer-manager = <&bm>; managed = "in-band-status"; + phys = <&comphy5 2>; phy-mode = "sgmii"; sfp = <&sfp>; status = "okay"; diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi index 929459c42760..96c18703e471 100644 --- a/arch/arm/boot/dts/armada-38x.dtsi +++ b/arch/arm/boot/dts/armada-38x.dtsi @@ -9,13 +9,15 @@ * Thomas Petazzoni */ -#include "skeleton.dtsi" #include #include #define MBUS_ID(target,attributes) (((target) << 24) | ((attributes) << 16)) / { + #address-cells = <1>; + #size-cells = <1>; + model = "Marvell Armada 38x family SoC"; compatible = "marvell,armada380"; @@ -335,6 +337,43 @@ #clock-cells = <1>; }; + comphy: phy@18300 { + compatible = "marvell,armada-380-comphy"; + reg = <0x18300 0x100>; + #address-cells = <1>; + #size-cells = <0>; + + comphy0: phy@0 { + reg = <0>; + #phy-cells = <1>; + }; + + comphy1: phy@1 { + reg = <1>; + #phy-cells = <1>; + }; + + comphy2: phy@2 { + reg = <2>; + #phy-cells = <1>; + }; + + comphy3: phy@3 { + reg = <3>; + #phy-cells = <1>; + }; + + comphy4: phy@4 { + reg = <4>; + #phy-cells = <1>; + }; + + comphy5: phy@5 { + reg = <5>; + #phy-cells = <1>; + }; + }; + coreclk: mvebu-sar@18600 { compatible = "marvell,armada-380-core-clock"; reg = <0x18600 0x04>; diff --git a/arch/arm/boot/dts/armada-39x.dtsi b/arch/arm/boot/dts/armada-39x.dtsi index f0c949831efb..b1b86934c688 100644 --- a/arch/arm/boot/dts/armada-39x.dtsi +++ b/arch/arm/boot/dts/armada-39x.dtsi @@ -7,13 +7,14 @@ * Thomas Petazzoni */ -#include "skeleton.dtsi" #include #include #define MBUS_ID(target,attributes) (((target) << 24) | ((attributes) << 16)) / { + #address-cells = <1>; + #size-cells = <1>; model = "Marvell Armada 39x family SoC"; compatible = "marvell,armada390"; diff --git a/arch/arm/boot/dts/armada-xp-db.dts b/arch/arm/boot/dts/armada-xp-db.dts index f3ac7483afed..5d04dc68cf57 100644 --- a/arch/arm/boot/dts/armada-xp-db.dts +++ b/arch/arm/boot/dts/armada-xp-db.dts @@ -144,30 +144,32 @@ status = "okay"; }; - nand@d0000 { + nand-controller@d0000 { status = "okay"; - label = "pxa3xx_nand-0"; - num-cs = <1>; - marvell,nand-keep-config; - nand-on-flash-bbt; - - partitions { - compatible = "fixed-partitions"; - #address-cells = <1>; - #size-cells = <1>; - - partition@0 { - label = "U-Boot"; - reg = <0 0x800000>; - }; - partition@800000 { - label = "Linux"; - reg = <0x800000 0x800000>; - }; - partition@1000000 { - label = "Filesystem"; - reg = <0x1000000 0x3f000000>; + nand@0 { + reg = <0>; + label = "pxa3xx_nand-0"; + nand-rb = <0>; + nand-on-flash-bbt; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + label = "U-Boot"; + reg = <0 0x800000>; + }; + partition@800000 { + label = "Linux"; + reg = <0x800000 0x800000>; + }; + partition@1000000 { + label = "Filesystem"; + reg = <0x1000000 0x3f000000>; + }; }; }; }; diff --git a/arch/arm/boot/dts/armada-xp-gp.dts b/arch/arm/boot/dts/armada-xp-gp.dts index 1139e9469a83..b4cca507cf13 100644 --- a/arch/arm/boot/dts/armada-xp-gp.dts +++ b/arch/arm/boot/dts/armada-xp-gp.dts @@ -160,12 +160,15 @@ status = "okay"; }; - nand@d0000 { + nand-controller@d0000 { status = "okay"; - label = "pxa3xx_nand-0"; - num-cs = <1>; - marvell,nand-keep-config; - nand-on-flash-bbt; + + nand@0 { + reg = <0>; + label = "pxa3xx_nand-0"; + nand-rb = <0>; + nand-on-flash-bbt; + }; }; }; diff --git a/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts b/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts index bbbb38888bb8..87dcb502f72d 100644 --- a/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts +++ b/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts @@ -81,49 +81,52 @@ }; - nand@d0000 { + nand-controller@d0000 { status = "okay"; - label = "pxa3xx_nand-0"; - num-cs = <1>; - marvell,nand-keep-config; - nand-on-flash-bbt; - - partitions { - compatible = "fixed-partitions"; - #address-cells = <1>; - #size-cells = <1>; - - partition@0 { - label = "u-boot"; - reg = <0x00000000 0x000e0000>; - read-only; - }; - - partition@e0000 { - label = "u-boot-env"; - reg = <0x000e0000 0x00020000>; - read-only; - }; - - partition@100000 { - label = "u-boot-env2"; - reg = <0x00100000 0x00020000>; - read-only; - }; - - partition@120000 { - label = "zImage"; - reg = <0x00120000 0x00400000>; - }; - - partition@520000 { - label = "initrd"; - reg = <0x00520000 0x00400000>; - }; - partition@e00000 { - label = "boot"; - reg = <0x00e00000 0x3f200000>; + nand@0 { + reg = <0>; + label = "pxa3xx_nand-0"; + nand-rb = <0>; + nand-on-flash-bbt; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + label = "u-boot"; + reg = <0x00000000 0x000e0000>; + read-only; + }; + + partition@e0000 { + label = "u-boot-env"; + reg = <0x000e0000 0x00020000>; + read-only; + }; + + partition@100000 { + label = "u-boot-env2"; + reg = <0x00100000 0x00020000>; + read-only; + }; + + partition@120000 { + label = "zImage"; + reg = <0x00120000 0x00400000>; + }; + + partition@520000 { + label = "initrd"; + reg = <0x00520000 0x00400000>; + }; + + partition@e00000 { + label = "boot"; + reg = <0x00e00000 0x3f200000>; + }; }; }; }; diff --git a/arch/arm/boot/dts/armada-xp-linksys-mamba.dts b/arch/arm/boot/dts/armada-xp-linksys-mamba.dts index 7a2606c3b62e..8480a16919a0 100644 --- a/arch/arm/boot/dts/armada-xp-linksys-mamba.dts +++ b/arch/arm/boot/dts/armada-xp-linksys-mamba.dts @@ -210,53 +210,6 @@ compatible = "pwm-fan"; pwms = <&gpio0 24 4000>; }; - - dsa { - status = "disabled"; - - compatible = "marvell,dsa"; - #address-cells = <2>; - #size-cells = <0>; - - dsa,ethernet = <ð0>; - dsa,mii-bus = <&mdio>; - - switch@0 { - #address-cells = <1>; - #size-cells = <0>; - reg = <0x0 0>; /* MDIO address 0, switch 0 in tree */ - - port@0 { - reg = <0>; - label = "lan4"; - }; - - port@1 { - reg = <1>; - label = "lan3"; - }; - - port@2 { - reg = <2>; - label = "lan2"; - }; - - port@3 { - reg = <3>; - label = "lan1"; - }; - - port@4 { - reg = <4>; - label = "internet"; - }; - - port@5 { - reg = <5>; - label = "cpu"; - }; - }; - }; }; &pciec { diff --git a/arch/arm/boot/dts/artpec6.dtsi b/arch/arm/boot/dts/artpec6.dtsi index 3e4115c2cd75..037157e6c5ee 100644 --- a/arch/arm/boot/dts/artpec6.dtsi +++ b/arch/arm/boot/dts/artpec6.dtsi @@ -43,9 +43,10 @@ #include #include #include -#include "skeleton.dtsi" / { + #address-cells = <1>; + #size-cells = <1>; compatible = "axis,artpec6"; interrupt-parent = <&intc>; diff --git a/arch/arm/boot/dts/aspeed-bmc-arm-stardragon4800-rep2.dts b/arch/arm/boot/dts/aspeed-bmc-arm-stardragon4800-rep2.dts index bdfd8c9f3a7c..521afbea2c5b 100644 --- a/arch/arm/boot/dts/aspeed-bmc-arm-stardragon4800-rep2.dts +++ b/arch/arm/boot/dts/aspeed-bmc-arm-stardragon4800-rep2.dts @@ -173,6 +173,16 @@ }; }; }; + + dps650ab@58 { + compatible = "delta,dps650ab"; + reg = <0x58>; + }; + + dps650ab@59 { + compatible = "delta,dps650ab"; + reg = <0x59>; + }; }; &i2c9 { diff --git a/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts b/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts index f8e7b71af7e6..4c2dcac738e8 100644 --- a/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts +++ b/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts @@ -21,6 +21,17 @@ memory@80000000 { reg = <0x80000000 0x20000000>; }; + + iio-hwmon { + compatible = "iio-hwmon"; + io-channels = <&adc 0>, <&adc 1>, <&adc 2>, <&adc 3>, + <&adc 4>, <&adc 5>, <&adc 6>; + }; + + iio-hwmon-battery { + compatible = "iio-hwmon"; + io-channels = <&adc 7>; + }; }; &fmc { @@ -43,6 +54,16 @@ }; }; +&lpc_snoop { + status = "okay"; + snoop-ports = <0x80>; +}; + +&lpc_ctrl { + // Enable lpc clock + status = "okay"; +}; + &uart1 { // Host Console status = "okay"; @@ -51,11 +72,33 @@ &pinctrl_rxd1_default>; }; +&uart2 { + // SoL Host Console + status = "okay"; +}; + +&uart3 { + // SoL BMC Console + status = "okay"; +}; + &uart5 { // BMC Console status = "okay"; }; +&kcs2 { + // BMC KCS channel 2 + status = "okay"; + kcs_addr = <0xca8>; +}; + +&kcs3 { + // BMC KCS channel 3 + status = "okay"; + kcs_addr = <0xca2>; +}; + &mac0 { status = "okay"; @@ -64,6 +107,10 @@ use-ncsi; }; +&adc { + status = "okay"; +}; + &i2c0 { status = "okay"; //Airmax Conn B, CPU0 PIROM, CPU1 PIROM @@ -122,6 +169,10 @@ &i2c8 { status = "okay"; + tmp421@1f { + compatible = "ti,tmp421"; + reg = <0x1f>; + }; //Mezz Sensor SMBus }; @@ -140,7 +191,7 @@ }; fan@1 { - reg = <0x00>; - aspeed,fan-tach-ch = /bits/ 8 <0x01>; + reg = <0x01>; + aspeed,fan-tach-ch = /bits/ 8 <0x02>; }; }; diff --git a/arch/arm/boot/dts/aspeed-bmc-inspur-on5263m5.dts b/arch/arm/boot/dts/aspeed-bmc-inspur-on5263m5.dts new file mode 100644 index 000000000000..2337ee23f5c4 --- /dev/null +++ b/arch/arm/boot/dts/aspeed-bmc-inspur-on5263m5.dts @@ -0,0 +1,145 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2018 Inspur Corporation +/dts-v1/; + +#include "aspeed-g5.dtsi" +#include + +/ { + model = "ON5263M5 BMC"; + compatible = "inspur,on5263m5-bmc", "aspeed,ast2500"; + + chosen { + stdout-path = &uart5; + bootargs = "earlyprintk"; + }; + + memory { + reg = <0x80000000 0x20000000>; + }; + + reserved-memory { + #address-cells = <1>; + #size-cells = <1>; + ranges; + + vga_memory: framebuffer@9f000000 { + no-map; + reg = <0x9f000000 0x01000000>; + }; + }; + + leds { + compatible = "gpio-leds"; + bmc_alive { + label = "bmc_alive"; + gpios = <&gpio ASPEED_GPIO(I, 1) GPIO_ACTIVE_LOW>; + linux,default-trigger = "timer"; + }; + }; + + iio-hwmon { + compatible = "iio-hwmon"; + io-channels = <&adc 0>, <&adc 1>, <&adc 2>, <&adc 3>, + <&adc 4>, <&adc 5>, <&adc 6>, <&adc 7>; + }; + +}; + +&fmc { + status = "okay"; + flash@0 { + status = "okay"; + m25p,fast-read; + label = "bmc"; +#include "openbmc-flash-layout.dtsi" + }; +}; + +&spi1 { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_spi1_default>; + + flash@0 { + status = "okay"; + m25p,fast-read; + label = "pnor"; + }; +}; + +&uart5 { + status = "okay"; +}; + +&mac0 { + status = "okay"; + + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_rmii1_default>; + use-ncsi; +}; + +&mac1 { + status = "okay"; + + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_rgmii2_default &pinctrl_mdio2_default>; +}; + +&i2c6 { + status = "okay"; + + tmp421@4e { + compatible = "ti,tmp421"; + reg = <0x4e>; + }; + + tmp112@48 { + compatible = "ti,tmp112"; + reg = <0x48>; + }; + + eeprom@54 { + compatible = "atmel,24c64"; + reg = <0x54>; + pagesize = <32>; + }; +}; + +&i2c7 { + status = "okay"; + + adm1278@11 { + compatible = "adi,adm1278"; + reg = <0x11>; + }; +}; + +&gfx { + status = "okay"; +}; + +&pinctrl { + aspeed,external-nodes = <&gfx &lhc>; +}; + +&pwm_tacho { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_pwm0_default &pinctrl_pwm1_default>; + + fan@0 { + reg = <0x00>; + aspeed,fan-tach-ch = /bits/ 8 <0x00 0x01>; + }; + + fan@1 { + reg = <0x01>; + aspeed,fan-tach-ch = /bits/ 8 <0x02 0x03>; + }; +}; + +&adc { + status = "okay"; +}; diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-palmetto.dts b/arch/arm/boot/dts/aspeed-bmc-opp-palmetto.dts index 9aa1d4467453..b854ac0bae9a 100644 --- a/arch/arm/boot/dts/aspeed-bmc-opp-palmetto.dts +++ b/arch/arm/boot/dts/aspeed-bmc-opp-palmetto.dts @@ -169,6 +169,11 @@ &i2c3 { status = "okay"; + + occ-hwmon@50 { + compatible = "ibm,p8-occ-hwmon"; + reg = <0x50>; + }; }; &i2c4 { diff --git a/arch/arm/boot/dts/aspeed-bmc-quanta-q71l.dts b/arch/arm/boot/dts/aspeed-bmc-quanta-q71l.dts index 385c0f4b69ee..0d7c6339da46 100644 --- a/arch/arm/boot/dts/aspeed-bmc-quanta-q71l.dts +++ b/arch/arm/boot/dts/aspeed-bmc-quanta-q71l.dts @@ -116,6 +116,10 @@ status = "okay"; }; +&lpc_ctrl { + status = "okay"; +}; + &lpc_snoop { status = "okay"; snoop-ports = <0x80>; @@ -134,6 +138,10 @@ pinctrl-0 = <&pinctrl_rgmii2_default &pinctrl_mdio2_default>; }; +&uart1 { + status = "okay"; +}; + &uart5 { status = "okay"; }; diff --git a/arch/arm/boot/dts/aspeed-g4.dtsi b/arch/arm/boot/dts/aspeed-g4.dtsi index 69f6b9d2e7e7..9549f867aa1e 100644 --- a/arch/arm/boot/dts/aspeed-g4.dtsi +++ b/arch/arm/boot/dts/aspeed-g4.dtsi @@ -197,6 +197,7 @@ gpio-ranges = <&pinctrl 0 0 220>; clocks = <&syscon ASPEED_CLK_APB>; interrupt-controller; + #interrupt-cells = <2>; }; timer: timer@1e782000 { diff --git a/arch/arm/boot/dts/aspeed-g5.dtsi b/arch/arm/boot/dts/aspeed-g5.dtsi index d107459fc0f8..85ed9dbec196 100644 --- a/arch/arm/boot/dts/aspeed-g5.dtsi +++ b/arch/arm/boot/dts/aspeed-g5.dtsi @@ -47,6 +47,13 @@ reg = <0x80000000 0>; }; + edac: sdram@1e6e0000 { + compatible = "aspeed,ast2500-sdram-edac"; + reg = <0x1e6e0000 0x174>; + interrupts = <0>; + status = "disabled"; + }; + ahb { compatible = "simple-bus"; #address-cells = <1>; @@ -250,6 +257,7 @@ gpio-ranges = <&pinctrl 0 0 220>; clocks = <&syscon ASPEED_CLK_APB>; interrupt-controller; + #interrupt-cells = <2>; }; timer: timer@1e782000 { @@ -330,8 +338,32 @@ ranges = <0x0 0x1e789000 0x1000>; lpc_bmc: lpc-bmc@0 { - compatible = "aspeed,ast2500-lpc-bmc"; + compatible = "aspeed,ast2500-lpc-bmc", "simple-mfd", "syscon"; reg = <0x0 0x80>; + reg-io-width = <4>; + + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x0 0x80>; + + kcs1: kcs1@0 { + compatible = "aspeed,ast2500-kcs-bmc"; + interrupts = <8>; + kcs_chan = <1>; + status = "disabled"; + }; + kcs2: kcs2@0 { + compatible = "aspeed,ast2500-kcs-bmc"; + interrupts = <8>; + kcs_chan = <2>; + status = "disabled"; + }; + kcs3: kcs3@0 { + compatible = "aspeed,ast2500-kcs-bmc"; + interrupts = <8>; + kcs_chan = <3>; + status = "disabled"; + }; }; lpc_host: lpc-host@80 { @@ -343,6 +375,13 @@ #size-cells = <1>; ranges = <0x0 0x80 0x1e0>; + kcs4: kcs4@0 { + compatible = "aspeed,ast2500-kcs-bmc"; + interrupts = <8>; + kcs_chan = <4>; + status = "disabled"; + }; + lpc_ctrl: lpc-ctrl@0 { compatible = "aspeed,ast2500-lpc-ctrl"; reg = <0x0 0x80>; diff --git a/arch/arm/boot/dts/at91-nattis-2-natte-2.dts b/arch/arm/boot/dts/at91-nattis-2-natte-2.dts index 0f6d335125e2..f245944bd5d7 100644 --- a/arch/arm/boot/dts/at91-nattis-2-natte-2.dts +++ b/arch/arm/boot/dts/at91-nattis-2-natte-2.dts @@ -22,7 +22,7 @@ wakeup { label = "Wakeup"; linux,code = <10>; - gpio-key,wakeup; + wakeup-source; gpios = <&pioB 27 GPIO_ACTIVE_LOW>; }; }; diff --git a/arch/arm/boot/dts/at91-sama5d27_som1.dtsi b/arch/arm/boot/dts/at91-sama5d27_som1.dtsi index cf0087b4c9e1..33a159c0163f 100644 --- a/arch/arm/boot/dts/at91-sama5d27_som1.dtsi +++ b/arch/arm/boot/dts/at91-sama5d27_som1.dtsi @@ -62,6 +62,20 @@ ahb { apb { + qspi1: spi@f0024000 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_qspi1_default>; + + flash@0 { + compatible = "jedec,spi-nor"; + reg = <0>; + spi-max-frequency = <80000000>; + spi-tx-bus-width = <4>; + spi-rx-bus-width = <4>; + m25p,fast-read; + }; + }; + macb0: ethernet@f8008000 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_macb0_default>; @@ -78,6 +92,22 @@ pinctrl@fc038000 { + pinctrl_qspi1_default: qspi1_default { + sck_cs { + pinmux = , + ; + bias-disable; + }; + + data { + pinmux = , + , + , + ; + bias-pull-up; + }; + }; + pinctrl_macb0_default: macb0_default { pinmux = , , diff --git a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts index 4a258867ddf1..a48180555ef5 100644 --- a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts +++ b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts @@ -109,6 +109,10 @@ status = "okay"; }; + qspi1: spi@f0024000 { + status = "okay"; + }; + spi0: spi@f8000000 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_spi0_default>; diff --git a/arch/arm/boot/dts/at91-wb45n.dts b/arch/arm/boot/dts/at91-wb45n.dts index 5b9512a6c89c..54d130c92185 100644 --- a/arch/arm/boot/dts/at91-wb45n.dts +++ b/arch/arm/boot/dts/at91-wb45n.dts @@ -22,7 +22,7 @@ label = "IRQBTN"; linux,code = <99>; gpios = <&pioB 18 GPIO_ACTIVE_LOW>; - gpio-key,wakeup = <1>; + wakeup-source; }; }; }; diff --git a/arch/arm/boot/dts/at91-wb50n.dts b/arch/arm/boot/dts/at91-wb50n.dts index 8cecc7051a86..a5e45bb95c04 100644 --- a/arch/arm/boot/dts/at91-wb50n.dts +++ b/arch/arm/boot/dts/at91-wb50n.dts @@ -23,7 +23,7 @@ label = "BTNESC"; linux,code = <1>; /* ESC button */ gpios = <&pioA 10 GPIO_ACTIVE_LOW>; - gpio-key,wakeup = <1>; + wakeup-source; }; irqbtn@31 { @@ -31,7 +31,7 @@ label = "IRQBTN"; linux,code = <99>; /* SysReq button */ gpios = <&pioE 31 GPIO_ACTIVE_LOW>; - gpio-key,wakeup = <1>; + wakeup-source; }; }; diff --git a/arch/arm/boot/dts/at91rm9200.dtsi b/arch/arm/boot/dts/at91rm9200.dtsi index 2ad69a7fbc00..5a882a053816 100644 --- a/arch/arm/boot/dts/at91rm9200.dtsi +++ b/arch/arm/boot/dts/at91rm9200.dtsi @@ -10,13 +10,14 @@ * Licensed under GPLv2 or later. */ -#include "skeleton.dtsi" #include #include #include #include / { + #address-cells = <1>; + #size-cells = <1>; model = "Atmel AT91RM9200 family SoC"; compatible = "atmel,at91rm9200"; interrupt-parent = <&aic>; @@ -49,6 +50,7 @@ }; memory { + device_type = "memory"; reg = <0x20000000 0x04000000>; }; diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi index 7cd9c3bc4dfb..3b58b94b53c9 100644 --- a/arch/arm/boot/dts/at91sam9260.dtsi +++ b/arch/arm/boot/dts/at91sam9260.dtsi @@ -8,13 +8,14 @@ * Licensed under GPLv2 or later. */ -#include "skeleton.dtsi" #include #include #include #include / { + #address-cells = <1>; + #size-cells = <1>; model = "Atmel AT91SAM9260 family SoC"; compatible = "atmel,at91sam9260"; interrupt-parent = <&aic>; @@ -46,6 +47,7 @@ }; memory { + device_type = "memory"; reg = <0x20000000 0x04000000>; }; diff --git a/arch/arm/boot/dts/at91sam9261.dtsi b/arch/arm/boot/dts/at91sam9261.dtsi index 01d700b63b45..a907a1fdd24c 100644 --- a/arch/arm/boot/dts/at91sam9261.dtsi +++ b/arch/arm/boot/dts/at91sam9261.dtsi @@ -6,13 +6,14 @@ * Licensed under GPLv2 only. */ -#include "skeleton.dtsi" #include #include #include #include / { + #address-cells = <1>; + #size-cells = <1>; model = "Atmel AT91SAM9261 family SoC"; compatible = "atmel,at91sam9261"; interrupt-parent = <&aic>; @@ -43,6 +44,7 @@ }; memory { + device_type = "memory"; reg = <0x20000000 0x08000000>; }; diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi index c5766da4e54e..3fb63d81f18e 100644 --- a/arch/arm/boot/dts/at91sam9263.dtsi +++ b/arch/arm/boot/dts/at91sam9263.dtsi @@ -6,13 +6,14 @@ * Licensed under GPLv2 only. */ -#include "skeleton.dtsi" #include #include #include #include / { + #address-cells = <1>; + #size-cells = <1>; model = "Atmel AT91SAM9263 family SoC"; compatible = "atmel,at91sam9263"; interrupt-parent = <&aic>; @@ -45,6 +46,7 @@ }; memory { + device_type = "memory"; reg = <0x20000000 0x08000000>; }; diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi index d16db1fa7e15..f36819607131 100644 --- a/arch/arm/boot/dts/at91sam9g45.dtsi +++ b/arch/arm/boot/dts/at91sam9g45.dtsi @@ -9,7 +9,6 @@ * Licensed under GPLv2 or later. */ -#include "skeleton.dtsi" #include #include #include @@ -17,6 +16,8 @@ #include / { + #address-cells = <1>; + #size-cells = <1>; model = "Atmel AT91SAM9G45 family SoC"; compatible = "atmel,at91sam9g45"; interrupt-parent = <&aic>; @@ -51,6 +52,7 @@ }; memory { + device_type = "memory"; reg = <0x70000000 0x10000000>; }; diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi index 37cb81f457b5..f71d65e6e510 100644 --- a/arch/arm/boot/dts/at91sam9n12.dtsi +++ b/arch/arm/boot/dts/at91sam9n12.dtsi @@ -7,7 +7,6 @@ * Licensed under GPLv2 or later. */ -#include "skeleton.dtsi" #include #include #include @@ -15,6 +14,8 @@ #include / { + #address-cells = <1>; + #size-cells = <1>; model = "Atmel AT91SAM9N12 SoC"; compatible = "atmel,at91sam9n12"; interrupt-parent = <&aic>; @@ -47,6 +48,7 @@ }; memory { + device_type = "memory"; reg = <0x20000000 0x10000000>; }; diff --git a/arch/arm/boot/dts/at91sam9rl.dtsi b/arch/arm/boot/dts/at91sam9rl.dtsi index 3862ff2f26e0..6b5777f3c20b 100644 --- a/arch/arm/boot/dts/at91sam9rl.dtsi +++ b/arch/arm/boot/dts/at91sam9rl.dtsi @@ -7,7 +7,6 @@ * Licensed under GPLv2 or later. */ -#include "skeleton.dtsi" #include #include #include @@ -15,6 +14,8 @@ #include / { + #address-cells = <1>; + #size-cells = <1>; model = "Atmel AT91SAM9RL family SoC"; compatible = "atmel,at91sam9rl", "atmel,at91sam9"; interrupt-parent = <&aic>; @@ -48,6 +49,7 @@ }; memory { + device_type = "memory"; reg = <0x20000000 0x04000000>; }; diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi index 07443a387a8f..79c4956d3902 100644 --- a/arch/arm/boot/dts/at91sam9x5.dtsi +++ b/arch/arm/boot/dts/at91sam9x5.dtsi @@ -9,7 +9,6 @@ * Licensed under GPLv2 or later. */ -#include "skeleton.dtsi" #include #include #include @@ -17,6 +16,8 @@ #include / { + #address-cells = <1>; + #size-cells = <1>; model = "Atmel AT91SAM9x5 family SoC"; compatible = "atmel,at91sam9x5"; interrupt-parent = <&aic>; @@ -49,6 +50,7 @@ }; memory { + device_type = "memory"; reg = <0x20000000 0x10000000>; }; diff --git a/arch/arm/boot/dts/atlas6-evb.dts b/arch/arm/boot/dts/atlas6-evb.dts index ab042ca8dea1..40882419309d 100644 --- a/arch/arm/boot/dts/atlas6-evb.dts +++ b/arch/arm/boot/dts/atlas6-evb.dts @@ -15,6 +15,7 @@ compatible = "sirf,atlas6-cb", "sirf,atlas6"; memory { + device_type = "memory"; reg = <0x00000000 0x20000000>; }; diff --git a/arch/arm/boot/dts/atlas6.dtsi b/arch/arm/boot/dts/atlas6.dtsi index 29598667420b..5587b98032a3 100644 --- a/arch/arm/boot/dts/atlas6.dtsi +++ b/arch/arm/boot/dts/atlas6.dtsi @@ -6,7 +6,6 @@ * Licensed under GPLv2 or later. */ -/include/ "skeleton.dtsi" / { compatible = "sirf,atlas6"; #address-cells = <1>; diff --git a/arch/arm/boot/dts/atlas7.dtsi b/arch/arm/boot/dts/atlas7.dtsi index 83449b33de6b..f3de9af35b4d 100644 --- a/arch/arm/boot/dts/atlas7.dtsi +++ b/arch/arm/boot/dts/atlas7.dtsi @@ -6,7 +6,6 @@ * Licensed under GPLv2 or later. */ -/include/ "skeleton.dtsi" / { compatible = "sirf,atlas7"; #address-cells = <1>; diff --git a/arch/arm/boot/dts/axm55xx.dtsi b/arch/arm/boot/dts/axm55xx.dtsi index 47799f59faa5..2a93d3ee3b66 100644 --- a/arch/arm/boot/dts/axm55xx.dtsi +++ b/arch/arm/boot/dts/axm55xx.dtsi @@ -12,9 +12,9 @@ #include #include -#include "skeleton64.dtsi" - / { + #address-cells = <2>; + #size-cells = <2>; interrupt-parent = <&gic>; aliases { diff --git a/arch/arm/boot/dts/bcm-cygnus.dtsi b/arch/arm/boot/dts/bcm-cygnus.dtsi index 253df7170a4e..5f7b46503a51 100644 --- a/arch/arm/boot/dts/bcm-cygnus.dtsi +++ b/arch/arm/boot/dts/bcm-cygnus.dtsi @@ -34,9 +34,9 @@ #include #include -#include "skeleton.dtsi" - / { + #address-cells = <1>; + #size-cells = <1>; compatible = "brcm,cygnus"; model = "Broadcom Cygnus SoC"; interrupt-parent = <&gic>; @@ -45,6 +45,11 @@ ethernet0 = ð0; }; + memory { + device_type = "memory"; + reg = <0 0>; + }; + cpus { #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi index 0d2538b46139..6925b30c2253 100644 --- a/arch/arm/boot/dts/bcm-nsp.dtsi +++ b/arch/arm/boot/dts/bcm-nsp.dtsi @@ -34,9 +34,9 @@ #include #include -#include "skeleton.dtsi" - / { + #address-cells = <1>; + #size-cells = <1>; compatible = "brcm,nsp"; model = "Broadcom Northstar Plus SoC"; interrupt-parent = <&gic>; diff --git a/arch/arm/boot/dts/bcm11351.dtsi b/arch/arm/boot/dts/bcm11351.dtsi index db7cded1b7ad..b99c2e579622 100644 --- a/arch/arm/boot/dts/bcm11351.dtsi +++ b/arch/arm/boot/dts/bcm11351.dtsi @@ -16,9 +16,9 @@ #include "dt-bindings/clock/bcm281xx.h" -#include "skeleton.dtsi" - / { + #address-cells = <1>; + #size-cells = <1>; model = "BCM11351 SoC"; compatible = "brcm,bcm11351"; interrupt-parent = <&gic>; diff --git a/arch/arm/boot/dts/bcm21664-garnet.dts b/arch/arm/boot/dts/bcm21664-garnet.dts index e87cb26ddf84..8b045cfab64b 100644 --- a/arch/arm/boot/dts/bcm21664-garnet.dts +++ b/arch/arm/boot/dts/bcm21664-garnet.dts @@ -22,6 +22,7 @@ compatible = "brcm,bcm21664-garnet", "brcm,bcm21664"; memory { + device_type = "memory"; reg = <0x80000000 0x40000000>; /* 1 GB */ }; diff --git a/arch/arm/boot/dts/bcm21664.dtsi b/arch/arm/boot/dts/bcm21664.dtsi index 266f2611dc22..758daa334148 100644 --- a/arch/arm/boot/dts/bcm21664.dtsi +++ b/arch/arm/boot/dts/bcm21664.dtsi @@ -16,9 +16,9 @@ #include "dt-bindings/clock/bcm21664.h" -#include "skeleton.dtsi" - / { + #address-cells = <1>; + #size-cells = <1>; model = "BCM21664 SoC"; compatible = "brcm,bcm21664"; interrupt-parent = <&gic>; diff --git a/arch/arm/boot/dts/bcm23550-sparrow.dts b/arch/arm/boot/dts/bcm23550-sparrow.dts index 4d525ccb48c8..1c66b15f3013 100644 --- a/arch/arm/boot/dts/bcm23550-sparrow.dts +++ b/arch/arm/boot/dts/bcm23550-sparrow.dts @@ -46,6 +46,7 @@ }; memory { + device_type = "memory"; reg = <0x80000000 0x20000000>; /* 512 MB */ }; }; diff --git a/arch/arm/boot/dts/bcm23550.dtsi b/arch/arm/boot/dts/bcm23550.dtsi index a7a643f38385..701198f5f498 100644 --- a/arch/arm/boot/dts/bcm23550.dtsi +++ b/arch/arm/boot/dts/bcm23550.dtsi @@ -36,9 +36,9 @@ /* BCM23550 and BCM21664 have almost identical clocks */ #include "dt-bindings/clock/bcm21664.h" -#include "skeleton.dtsi" - / { + #address-cells = <1>; + #size-cells = <1>; model = "BCM23550 SoC"; compatible = "brcm,bcm23550"; interrupt-parent = <&gic>; diff --git a/arch/arm/boot/dts/bcm28155-ap.dts b/arch/arm/boot/dts/bcm28155-ap.dts index 9ce91dd60cb6..fbfca83bd28f 100644 --- a/arch/arm/boot/dts/bcm28155-ap.dts +++ b/arch/arm/boot/dts/bcm28155-ap.dts @@ -22,6 +22,7 @@ compatible = "brcm,bcm28155-ap", "brcm,bcm11351"; memory { + device_type = "memory"; reg = <0x80000000 0x40000000>; /* 1 GB */ }; diff --git a/arch/arm/boot/dts/bcm2835-rpi-a-plus.dts b/arch/arm/boot/dts/bcm2835-rpi-a-plus.dts index 2cd9c5e4f892..db8a6017f220 100644 --- a/arch/arm/boot/dts/bcm2835-rpi-a-plus.dts +++ b/arch/arm/boot/dts/bcm2835-rpi-a-plus.dts @@ -31,8 +31,8 @@ * "FOO" = GPIO line named "FOO" on the schematic * "FOO_N" = GPIO line named "FOO" on schematic, active low */ - gpio-line-names = "SDA0", - "SCL0", + gpio-line-names = "ID_SDA", + "ID_SCL", "SDA1", "SCL1", "GPIO_GCLK", diff --git a/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts b/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts index cfbdaacbaeba..1e40d672b055 100644 --- a/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts +++ b/arch/arm/boot/dts/bcm2835-rpi-b-plus.dts @@ -33,8 +33,8 @@ * "FOO" = GPIO line named "FOO" on the schematic * "FOO_N" = GPIO line named "FOO" on schematic, active low */ - gpio-line-names = "SDA0", - "SCL0", + gpio-line-names = "ID_SDA", + "ID_SCL", "SDA1", "SCL1", "GPIO_GCLK", diff --git a/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts b/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts index 644d907bafbb..ba0167df6c5f 100644 --- a/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts +++ b/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts @@ -25,8 +25,6 @@ wifi_pwrseq: wifi-pwrseq { compatible = "mmc-pwrseq-simple"; - pinctrl-names = "default"; - pinctrl-0 = <&wl_on>; reset-gpios = <&gpio 41 GPIO_ACTIVE_LOW>; }; }; @@ -40,8 +38,8 @@ * "FOO" = GPIO line named "FOO" on the schematic * "FOO_N" = GPIO line named "FOO" on schematic, active low */ - gpio-line-names = "GPIO0", - "GPIO1", + gpio-line-names = "ID_SDA", + "ID_SCL", "SDA1", "SCL1", "GPIO_GCLK", @@ -98,11 +96,6 @@ "SD_DATA3_R"; pinctrl-0 = <&gpioout &alt0>; - - wl_on: wl-on { - brcm,pins = <41>; - brcm,function = ; - }; }; &hdmi { diff --git a/arch/arm/boot/dts/bcm2835-rpi-zero.dts b/arch/arm/boot/dts/bcm2835-rpi-zero.dts index 00323ba8f7de..3b35a8a4a55f 100644 --- a/arch/arm/boot/dts/bcm2835-rpi-zero.dts +++ b/arch/arm/boot/dts/bcm2835-rpi-zero.dts @@ -28,8 +28,8 @@ * "FOO" = GPIO line named "FOO" on the schematic * "FOO_N" = GPIO line named "FOO" on schematic, active low */ - gpio-line-names = "SDA0", - "SCL0", + gpio-line-names = "ID_SDA", + "ID_SCL", "SDA1", "SCL1", "GPIO_GCLK", diff --git a/arch/arm/boot/dts/bcm2835-rpi.dtsi b/arch/arm/boot/dts/bcm2835-rpi.dtsi index 29f970f864dc..715d50c64529 100644 --- a/arch/arm/boot/dts/bcm2835-rpi.dtsi +++ b/arch/arm/boot/dts/bcm2835-rpi.dtsi @@ -1,7 +1,7 @@ #include / { - memory { + memory@0 { device_type = "memory"; reg = <0 0x10000000>; }; @@ -19,8 +19,6 @@ soc { firmware: firmware { compatible = "raspberrypi,bcm2835-firmware", "simple-bus"; - #address-cells = <0>; - #size-cells = <0>; mboxes = <&mailbox>; }; @@ -87,10 +85,6 @@ power-domains = <&power RPI_POWER_DOMAIN_USB>; }; -&v3d { - power-domains = <&power RPI_POWER_DOMAIN_V3D>; -}; - &hdmi { power-domains = <&power RPI_POWER_DOMAIN_HDMI>; status = "okay"; diff --git a/arch/arm/boot/dts/bcm2836-rpi-2-b.dts b/arch/arm/boot/dts/bcm2836-rpi-2-b.dts index ac4408b34b58..7b4e651bafdd 100644 --- a/arch/arm/boot/dts/bcm2836-rpi-2-b.dts +++ b/arch/arm/boot/dts/bcm2836-rpi-2-b.dts @@ -9,7 +9,7 @@ compatible = "raspberrypi,2-model-b", "brcm,bcm2836"; model = "Raspberry Pi 2 Model B"; - memory { + memory@0 { reg = <0 0x40000000>; }; @@ -28,6 +28,72 @@ }; &gpio { + /* + * Taken from rpi_SCH_2b_1p2_reduced.pdf and + * the official GPU firmware DT blob. + * + * Legend: + * "NC" = not connected (no rail from the SoC) + * "FOO" = GPIO line named "FOO" on the schematic + * "FOO_N" = GPIO line named "FOO" on schematic, active low + */ + gpio-line-names = "ID_SDA", + "ID_SCL", + "SDA1", + "SCL1", + "GPIO_GCLK", + "GPIO5", + "GPIO6", + "SPI_CE1_N", + "SPI_CE0_N", + "SPI_MISO", + "SPI_MOSI", + "SPI_SCLK", + "GPIO12", + "GPIO13", + /* Serial port */ + "TXD0", + "RXD0", + "GPIO16", + "GPIO17", + "GPIO18", + "GPIO19", + "GPIO20", + "GPIO21", + "GPIO22", + "GPIO23", + "GPIO24", + "GPIO25", + "GPIO26", + "GPIO27", + "SDA0", + "SCL0", + "", /* GPIO30 */ + "LAN_RUN", + "CAM_GPIO1", + "", /* GPIO33 */ + "", /* GPIO34 */ + "PWR_LOW_N", + "", /* GPIO36 */ + "", /* GPIO37 */ + "USB_LIMIT", + "", /* GPIO39 */ + "PWM0_OUT", + "CAM_GPIO0", + "SMPS_SCL", + "SMPS_SDA", + "ETHCLK", + "PWM1_OUT", + "HDMI_HPD_N", + "STATUS_LED", + /* Used by SD Card */ + "SD_CLK_R", + "SD_CMD_R", + "SD_DATA0_R", + "SD_DATA1_R", + "SD_DATA2_R", + "SD_DATA3_R"; + pinctrl-0 = <&gpioout &alt0 &i2s_alt0>; /* I2S interface */ diff --git a/arch/arm/boot/dts/bcm2837-rpi-3-a-plus.dts b/arch/arm/boot/dts/bcm2837-rpi-3-a-plus.dts new file mode 100644 index 000000000000..7f4437a8eedb --- /dev/null +++ b/arch/arm/boot/dts/bcm2837-rpi-3-a-plus.dts @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; +#include "bcm2837.dtsi" +#include "bcm2836-rpi.dtsi" +#include "bcm283x-rpi-usb-host.dtsi" + +/ { + compatible = "raspberrypi,3-model-a-plus", "brcm,bcm2837"; + model = "Raspberry Pi 3 Model A+"; + + chosen { + /* 8250 auxiliary UART instead of pl011 */ + stdout-path = "serial1:115200n8"; + }; + + memory@0 { + reg = <0 0x20000000>; + }; + + leds { + act { + gpios = <&gpio 29 GPIO_ACTIVE_HIGH>; + }; + + pwr { + label = "PWR"; + gpios = <&expgpio 2 GPIO_ACTIVE_LOW>; + }; + }; +}; + +&firmware { + expgpio: gpio { + compatible = "raspberrypi,firmware-gpio"; + gpio-controller; + #gpio-cells = <2>; + gpio-line-names = "", + "BT_WL_ON", + "STATUS_LED_R", + "", + "", + "CAM_GPIO0", + "CAM_GPIO1", + ""; + status = "okay"; + }; +}; + +&gpio { + /* + * This is mostly based on the official GPU firmware DT blob. + * + * Legend: + * "NC" = not connected (no rail from the SoC) + * "FOO" = GPIO line named "FOO" on the schematic + * "FOO_N" = GPIO line named "FOO" on schematic, active low + */ + gpio-line-names = "ID_SDA", + "ID_SCL", + "SDA1", + "SCL1", + "GPIO_GCLK", + "GPIO5", + "GPIO6", + "SPI_CE1_N", + "SPI_CE0_N", + "SPI_MISO", + "SPI_MOSI", + "SPI_SCLK", + "GPIO12", + "GPIO13", + /* Serial port */ + "TXD1", + "RXD1", + "GPIO16", + "GPIO17", + "GPIO18", + "GPIO19", + "GPIO20", + "GPIO21", + "GPIO22", + "GPIO23", + "GPIO24", + "GPIO25", + "GPIO26", + "GPIO27", + "HDMI_HPD_N", + "STATUS_LED_G", + /* Used by BT module */ + "CTS0", + "RTS0", + "TXD0", + "RXD0", + /* Used by Wifi */ + "SD1_CLK", + "SD1_CMD", + "SD1_DATA0", + "SD1_DATA1", + "SD1_DATA2", + "SD1_DATA3", + "PWM0_OUT", + "PWM1_OUT", + "", /* GPIO42 */ + "WIFI_CLK", + "SDA0", + "SCL0", + "SMPS_SCL", + "SMPS_SDA", + /* Used by SD Card */ + "SD_CLK_R", + "SD_CMD_R", + "SD_DATA0_R", + "SD_DATA1_R", + "SD_DATA2_R", + "SD_DATA3_R"; +}; + +&hdmi { + hpd-gpios = <&gpio 28 GPIO_ACTIVE_LOW>; +}; + +&pwm { + pinctrl-names = "default"; + pinctrl-0 = <&pwm0_gpio40 &pwm1_gpio41>; + status = "okay"; +}; + +/* + * SDHCI is used to control the SDIO for wireless + * + * WL_REG_ON and BT_REG_ON of the CYW43455 Wifi/BT module are driven + * by a single GPIO. We can't give GPIO control to one of the drivers, + * otherwise the other part would get unexpectedly disturbed. + */ +&sdhci { + #address-cells = <1>; + #size-cells = <0>; + pinctrl-names = "default"; + pinctrl-0 = <&emmc_gpio34>; + status = "okay"; + bus-width = <4>; + non-removable; + + brcmf: wifi@1 { + reg = <1>; + compatible = "brcm,bcm4329-fmac"; + }; +}; + +/* SDHOST is used to drive the SD card */ +&sdhost { + pinctrl-names = "default"; + pinctrl-0 = <&sdhost_gpio48>; + status = "okay"; + bus-width = <4>; +}; + +/* uart0 communicates with the BT module */ +&uart0 { + pinctrl-names = "default"; + pinctrl-0 = <&uart0_ctsrts_gpio30 &uart0_gpio32 &gpclk2_gpio43>; + status = "okay"; + + bluetooth { + compatible = "brcm,bcm43438-bt"; + max-speed = <2000000>; + }; +}; + +/* uart1 is mapped to the pin header */ +&uart1 { + pinctrl-names = "default"; + pinctrl-0 = <&uart1_gpio14>; + status = "okay"; +}; diff --git a/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts b/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts index 42bb09044cc7..c6fa34c24100 100644 --- a/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts +++ b/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts @@ -14,7 +14,7 @@ stdout-path = "serial1:115200n8"; }; - memory { + memory@0 { reg = <0 0x40000000>; }; @@ -42,7 +42,7 @@ #gpio-cells = <2>; gpio-line-names = "BT_ON", "WL_ON", - "STATUS_LED", + "STATUS_LED_R", "LAN_RUN", "", "CAM_GPIO0", @@ -52,6 +52,76 @@ }; }; +&gpio { + /* + * Taken from rpi_SCH_3bplus_1p0_reduced.pdf and + * the official GPU firmware DT blob. + * + * Legend: + * "NC" = not connected (no rail from the SoC) + * "FOO" = GPIO line named "FOO" on the schematic + * "FOO_N" = GPIO line named "FOO" on schematic, active low + */ + gpio-line-names = "ID_SDA", + "ID_SCL", + "SDA1", + "SCL1", + "GPIO_GCLK", + "GPIO5", + "GPIO6", + "SPI_CE1_N", + "SPI_CE0_N", + "SPI_MISO", + "SPI_MOSI", + "SPI_SCLK", + "GPIO12", + "GPIO13", + /* Serial port */ + "TXD1", + "RXD1", + "GPIO16", + "GPIO17", + "GPIO18", + "GPIO19", + "GPIO20", + "GPIO21", + "GPIO22", + "GPIO23", + "GPIO24", + "GPIO25", + "GPIO26", + "GPIO27", + "HDMI_HPD_N", + "STATUS_LED_G", + /* Used by BT module */ + "CTS0", + "RTS0", + "TXD0", + "RXD0", + /* Used by Wifi */ + "SD1_CLK", + "SD1_CMD", + "SD1_DATA0", + "SD1_DATA1", + "SD1_DATA2", + "SD1_DATA3", + "PWM0_OUT", + "PWM1_OUT", + "ETHCLK", + "WIFI_CLK", + "SDA0", + "SCL0", + "SMPS_SCL", + "SMPS_SDA", + /* Used by SD Card */ + "SD_CLK_R", + "SD_CMD_R", + "SD_DATA0_R", + "SD_DATA1_R", + "SD_DATA2_R", + "SD_DATA3_R"; +}; + &hdmi { hpd-gpios = <&gpio 28 GPIO_ACTIVE_LOW>; }; diff --git a/arch/arm/boot/dts/bcm2837-rpi-3-b.dts b/arch/arm/boot/dts/bcm2837-rpi-3-b.dts index 0c155dd4f396..ce71f578c51a 100644 --- a/arch/arm/boot/dts/bcm2837-rpi-3-b.dts +++ b/arch/arm/boot/dts/bcm2837-rpi-3-b.dts @@ -14,7 +14,7 @@ stdout-path = "serial1:115200n8"; }; - memory { + memory@0 { reg = <0 0x40000000>; }; @@ -39,7 +39,7 @@ "WL_ON", "STATUS_LED", "LAN_RUN", - "HPD_N", + "HDMI_HPD_N", "CAM_GPIO0", "CAM_GPIO1", "PWR_LOW_N"; @@ -47,6 +47,76 @@ }; }; +&gpio { + /* + * Taken from rpi_SCH_3b_1p2_reduced.pdf and + * the official GPU firmware DT blob. + * + * Legend: + * "NC" = not connected (no rail from the SoC) + * "FOO" = GPIO line named "FOO" on the schematic + * "FOO_N" = GPIO line named "FOO" on schematic, active low + */ + gpio-line-names = "ID_SDA", + "ID_SCL", + "SDA1", + "SCL1", + "GPIO_GCLK", + "GPIO5", + "GPIO6", + "SPI_CE1_N", + "SPI_CE0_N", + "SPI_MISO", + "SPI_MOSI", + "SPI_SCLK", + "GPIO12", + "GPIO13", + /* Serial port */ + "TXD1", + "RXD1", + "GPIO16", + "GPIO17", + "GPIO18", + "GPIO19", + "GPIO20", + "GPIO21", + "GPIO22", + "GPIO23", + "GPIO24", + "GPIO25", + "GPIO26", + "GPIO27", + "", /* GPIO 28 */ + "LAN_RUN_BOOT", + /* Used by BT module */ + "CTS0", + "RTS0", + "TXD0", + "RXD0", + /* Used by Wifi */ + "SD1_CLK", + "SD1_CMD", + "SD1_DATA0", + "SD1_DATA1", + "SD1_DATA2", + "SD1_DATA3", + "PWM0_OUT", + "PWM1_OUT", + "ETHCLK", + "WIFI_CLK", + "SDA0", + "SCL0", + "SMPS_SCL", + "SMPS_SDA", + /* Used by SD Card */ + "SD_CLK_R", + "SD_CMD_R", + "SD_DATA0_R", + "SD_DATA1_R", + "SD_DATA2_R", + "SD_DATA3_R"; +}; + &pwm { pinctrl-names = "default"; pinctrl-0 = <&pwm0_gpio40 &pwm1_gpio41>; diff --git a/arch/arm/boot/dts/bcm2837-rpi-cm3.dtsi b/arch/arm/boot/dts/bcm2837-rpi-cm3.dtsi index 4a89a1885a3d..81399b2c5af9 100644 --- a/arch/arm/boot/dts/bcm2837-rpi-cm3.dtsi +++ b/arch/arm/boot/dts/bcm2837-rpi-cm3.dtsi @@ -4,7 +4,7 @@ #include "bcm2836-rpi.dtsi" / { - memory { + memory@0 { reg = <0 0x40000000>; }; diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi index 31b29646b14c..9777644c6c2b 100644 --- a/arch/arm/boot/dts/bcm283x.dtsi +++ b/arch/arm/boot/dts/bcm283x.dtsi @@ -3,6 +3,7 @@ #include #include #include +#include /* firmware-provided startup stubs live here, where the secondary CPUs are * spinning. @@ -120,9 +121,18 @@ #interrupt-cells = <2>; }; - watchdog@7e100000 { - compatible = "brcm,bcm2835-pm-wdt"; - reg = <0x7e100000 0x28>; + pm: watchdog@7e100000 { + compatible = "brcm,bcm2835-pm", "brcm,bcm2835-pm-wdt"; + #power-domain-cells = <1>; + #reset-cells = <1>; + reg = <0x7e100000 0x114>, + <0x7e00a000 0x24>; + clocks = <&clocks BCM2835_CLOCK_V3D>, + <&clocks BCM2835_CLOCK_PERI_IMAGE>, + <&clocks BCM2835_CLOCK_H264>, + <&clocks BCM2835_CLOCK_ISP>; + clock-names = "v3d", "peri_image", "h264", "isp"; + system-power-controller; }; clocks: cprman@7e101000 { @@ -629,6 +639,7 @@ compatible = "brcm,bcm2835-v3d"; reg = <0x7ec00000 0x1000>; interrupts = <1 10>; + power-domains = <&pm BCM2835_POWER_DOMAIN_GRAFX_V3D>; }; vc4: gpu { diff --git a/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts b/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts index 76a2bab3bc6f..fe842f2f1ca7 100644 --- a/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts +++ b/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts @@ -20,6 +20,7 @@ }; memory { + device_type = "memory"; reg = <0x00000000 0x08000000 0x88000000 0x08000000>; }; diff --git a/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts b/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts index 69e3570e03dd..6fcbb0509ba0 100644 --- a/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts +++ b/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts @@ -20,6 +20,7 @@ }; memory { + device_type = "memory"; reg = <0x00000000 0x08000000 0x88000000 0x08000000>; }; diff --git a/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts b/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts index 0f6f0fe13bfb..b3e8cc90b13f 100644 --- a/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts +++ b/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts @@ -20,6 +20,7 @@ }; memory { + device_type = "memory"; reg = <0x00000000 0x08000000 0x88000000 0x08000000>; }; diff --git a/arch/arm/boot/dts/bcm47081-luxul-xap-1410.dts b/arch/arm/boot/dts/bcm47081-luxul-xap-1410.dts index f77089744996..fdeaa895512f 100644 --- a/arch/arm/boot/dts/bcm47081-luxul-xap-1410.dts +++ b/arch/arm/boot/dts/bcm47081-luxul-xap-1410.dts @@ -16,6 +16,7 @@ }; memory { + device_type = "memory"; reg = <0x00000000 0x08000000>; }; diff --git a/arch/arm/boot/dts/bcm47081-luxul-xwr-1200.dts b/arch/arm/boot/dts/bcm47081-luxul-xwr-1200.dts index 4d427863756f..0d510cb15ec3 100644 --- a/arch/arm/boot/dts/bcm47081-luxul-xwr-1200.dts +++ b/arch/arm/boot/dts/bcm47081-luxul-xwr-1200.dts @@ -17,6 +17,7 @@ }; memory { + device_type = "memory"; reg = <0x00000000 0x08000000>; }; diff --git a/arch/arm/boot/dts/bcm47081-tplink-archer-c5-v2.dts b/arch/arm/boot/dts/bcm47081-tplink-archer-c5-v2.dts index 189cc3dcd6ef..962e89edba11 100644 --- a/arch/arm/boot/dts/bcm47081-tplink-archer-c5-v2.dts +++ b/arch/arm/boot/dts/bcm47081-tplink-archer-c5-v2.dts @@ -16,6 +16,7 @@ }; memory { + device_type = "memory"; reg = <0x00000000 0x08000000>; }; diff --git a/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts b/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts index 03c1ab188576..658a56ff8a5c 100644 --- a/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts +++ b/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts @@ -20,6 +20,7 @@ }; memory { + device_type = "memory"; reg = <0x00000000 0x08000000 0x88000000 0x08000000>; }; diff --git a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts index 36efe410dcd7..5fd47eec4407 100644 --- a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts +++ b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts @@ -17,6 +17,7 @@ }; memory { + device_type = "memory"; reg = <0x00000000 0x08000000 0x88000000 0x08000000>; }; diff --git a/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts b/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts index 3e5e9972cd97..6604be6ff0a0 100644 --- a/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts +++ b/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts @@ -17,6 +17,7 @@ }; memory { + device_type = "memory"; reg = <0x00000000 0x08000000 0x88000000 0x18000000>; }; diff --git a/arch/arm/boot/dts/bcm47094-luxul-xap-1610.dts b/arch/arm/boot/dts/bcm47094-luxul-xap-1610.dts index 7fd85475893d..567ebbd5a0e9 100644 --- a/arch/arm/boot/dts/bcm47094-luxul-xap-1610.dts +++ b/arch/arm/boot/dts/bcm47094-luxul-xap-1610.dts @@ -16,6 +16,7 @@ }; memory { + device_type = "memory"; reg = <0x00000000 0x08000000>; }; diff --git a/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts b/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts index 7acbecd42950..ac2d136ed334 100644 --- a/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts +++ b/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts @@ -17,6 +17,7 @@ }; memory { + device_type = "memory"; reg = <0x00000000 0x08000000 0x88000000 0x18000000>; }; diff --git a/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts b/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts index f4558d9d2769..74371e821b1a 100644 --- a/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts +++ b/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts @@ -17,6 +17,7 @@ }; memory { + device_type = "memory"; reg = <0x00000000 0x08000000 0x88000000 0x08000000>; }; diff --git a/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts b/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts index bdad7267255a..b44af63ee310 100644 --- a/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts +++ b/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts @@ -17,6 +17,7 @@ }; memory { + device_type = "memory"; reg = <0x00000000 0x08000000 0x88000000 0x18000000>; }; diff --git a/arch/arm/boot/dts/bcm47094-netgear-r8500.dts b/arch/arm/boot/dts/bcm47094-netgear-r8500.dts index 30719380b6c0..eebc0d43e220 100644 --- a/arch/arm/boot/dts/bcm47094-netgear-r8500.dts +++ b/arch/arm/boot/dts/bcm47094-netgear-r8500.dts @@ -17,6 +17,7 @@ }; memory { + device_type = "memory"; reg = <0x00000000 0x08000000 0x88000000 0x18000000>; }; diff --git a/arch/arm/boot/dts/bcm47094-phicomm-k3.dts b/arch/arm/boot/dts/bcm47094-phicomm-k3.dts new file mode 100644 index 000000000000..ec09c0426d16 --- /dev/null +++ b/arch/arm/boot/dts/bcm47094-phicomm-k3.dts @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0-or-later OR MIT +/* + * Copyright (C) 2017 Hamster Tian + * Copyright (C) 2019 Hao Dong + */ + +/dts-v1/; + +#include "bcm47094.dtsi" +#include "bcm5301x-nand-cs0-bch4.dtsi" + +/ { + compatible = "phicomm,k3", "brcm,bcm47094", "brcm,bcm4708"; + model = "Phicomm K3"; + + memory { + reg = <0x00000000 0x08000000 + 0x88000000 0x18000000>; + }; + + gpio-keys { + compatible = "gpio-keys"; + #address-cells = <1>; + #size-cells = <0>; + + restart { + label = "Reset"; + linux,code = ; + gpios = <&chipcommon 17 GPIO_ACTIVE_LOW>; + }; + }; +}; + +&uart1 { + status = "okay"; +}; + +&usb3_phy { + status = "okay"; +}; + +&nandcs { + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + label = "boot"; + reg = <0x0000000 0x0080000>; + read-only; + }; + + partition@80000 { + label = "nvram"; + reg = <0x0080000 0x0100000>; + }; + + partition@180000{ + label = "phicomm"; + reg = <0x0180000 0x0280000>; + read-only; + }; + + partition@400000 { + label = "firmware"; + reg = <0x0400000 0x7C00000>; + compatible = "brcm,trx"; + }; + }; +}; diff --git a/arch/arm/boot/dts/bcm47189-luxul-xap-1440.dts b/arch/arm/boot/dts/bcm47189-luxul-xap-1440.dts index 74c83b0ca54e..eb59508578e4 100644 --- a/arch/arm/boot/dts/bcm47189-luxul-xap-1440.dts +++ b/arch/arm/boot/dts/bcm47189-luxul-xap-1440.dts @@ -1,7 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0-or-later OR MIT /* * Copyright 2017 Luxul Inc. - * - * Licensed under the ISC license. */ /dts-v1/; @@ -17,6 +16,7 @@ }; memory { + device_type = "memory"; reg = <0x00000000 0x08000000>; }; diff --git a/arch/arm/boot/dts/bcm47189-luxul-xap-810.dts b/arch/arm/boot/dts/bcm47189-luxul-xap-810.dts index 214df18f3a75..4c71f5e95e00 100644 --- a/arch/arm/boot/dts/bcm47189-luxul-xap-810.dts +++ b/arch/arm/boot/dts/bcm47189-luxul-xap-810.dts @@ -1,7 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0-or-later OR MIT /* * Copyright 2017 Luxul Inc. - * - * Licensed under the ISC license. */ /dts-v1/; @@ -17,6 +16,7 @@ }; memory { + device_type = "memory"; reg = <0x00000000 0x08000000>; }; diff --git a/arch/arm/boot/dts/bcm47189-tenda-ac9.dts b/arch/arm/boot/dts/bcm47189-tenda-ac9.dts index e15e2a1e9d8c..5ad53ea52d0a 100644 --- a/arch/arm/boot/dts/bcm47189-tenda-ac9.dts +++ b/arch/arm/boot/dts/bcm47189-tenda-ac9.dts @@ -16,6 +16,7 @@ }; memory { + device_type = "memory"; reg = <0x00000000 0x08000000>; }; diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi index fd7af943fb0b..ac5266ee8d4c 100644 --- a/arch/arm/boot/dts/bcm5301x.dtsi +++ b/arch/arm/boot/dts/bcm5301x.dtsi @@ -13,9 +13,10 @@ #include #include #include -#include "skeleton.dtsi" / { + #address-cells = <1>; + #size-cells = <1>; interrupt-parent = <&gic>; chipcommonA { diff --git a/arch/arm/boot/dts/bcm53340-ubnt-unifi-switch8.dts b/arch/arm/boot/dts/bcm53340-ubnt-unifi-switch8.dts index 431cda514230..2e7fda9b998c 100644 --- a/arch/arm/boot/dts/bcm53340-ubnt-unifi-switch8.dts +++ b/arch/arm/boot/dts/bcm53340-ubnt-unifi-switch8.dts @@ -20,6 +20,7 @@ }; memory@0 { + device_type = "memory"; reg = <0x00000000 0x08000000>, <0x68000000 0x08000000>; }; diff --git a/arch/arm/boot/dts/bcm53573.dtsi b/arch/arm/boot/dts/bcm53573.dtsi index 5054fa9eb0d0..b29695bd4855 100644 --- a/arch/arm/boot/dts/bcm53573.dtsi +++ b/arch/arm/boot/dts/bcm53573.dtsi @@ -7,9 +7,10 @@ #include #include #include -#include "skeleton.dtsi" / { + #address-cells = <1>; + #size-cells = <1>; interrupt-parent = <&gic>; aliases { diff --git a/arch/arm/boot/dts/bcm63138.dtsi b/arch/arm/boot/dts/bcm63138.dtsi index f59764008b9c..e6a41e1b27fd 100644 --- a/arch/arm/boot/dts/bcm63138.dtsi +++ b/arch/arm/boot/dts/bcm63138.dtsi @@ -6,9 +6,9 @@ #include #include -#include "skeleton.dtsi" - / { + #address-cells = <1>; + #size-cells = <1>; compatible = "brcm,bcm63138"; model = "Broadcom BCM63138 DSL SoC"; interrupt-parent = <&gic>; diff --git a/arch/arm/boot/dts/bcm7445.dtsi b/arch/arm/boot/dts/bcm7445.dtsi index c859aa6f358c..504a63236a5e 100644 --- a/arch/arm/boot/dts/bcm7445.dtsi +++ b/arch/arm/boot/dts/bcm7445.dtsi @@ -1,8 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include -#include "skeleton.dtsi" - / { #address-cells = <2>; #size-cells = <2>; diff --git a/arch/arm/boot/dts/bcm947189acdbmr.dts b/arch/arm/boot/dts/bcm947189acdbmr.dts index ef263412fea5..4991700ae6b0 100644 --- a/arch/arm/boot/dts/bcm947189acdbmr.dts +++ b/arch/arm/boot/dts/bcm947189acdbmr.dts @@ -18,6 +18,7 @@ }; memory { + device_type = "memory"; reg = <0x00000000 0x08000000>; }; diff --git a/arch/arm/boot/dts/bcm953012er.dts b/arch/arm/boot/dts/bcm953012er.dts index 17f63c7a0437..250a1d6f2d05 100644 --- a/arch/arm/boot/dts/bcm953012er.dts +++ b/arch/arm/boot/dts/bcm953012er.dts @@ -40,6 +40,7 @@ compatible = "brcm,bcm953012er", "brcm,brcm53012", "brcm,bcm4708"; memory { + device_type = "memory"; reg = <0x00000000 0x8000000>; }; diff --git a/arch/arm/boot/dts/bcm953012hr.dts b/arch/arm/boot/dts/bcm953012hr.dts index 11b0f5ed99e6..9140be7ec053 100644 --- a/arch/arm/boot/dts/bcm953012hr.dts +++ b/arch/arm/boot/dts/bcm953012hr.dts @@ -46,6 +46,7 @@ }; memory@80000000 { + device_type = "memory"; reg = <0x80000000 0x10000000>; }; }; diff --git a/arch/arm/boot/dts/bcm953012k.dts b/arch/arm/boot/dts/bcm953012k.dts index e798055d6989..52c4c6c9d3f1 100644 --- a/arch/arm/boot/dts/bcm953012k.dts +++ b/arch/arm/boot/dts/bcm953012k.dts @@ -44,6 +44,7 @@ }; memory { + device_type = "memory"; reg = <0x80000000 0x10000000>; }; }; diff --git a/arch/arm/boot/dts/cx92755.dtsi b/arch/arm/boot/dts/cx92755.dtsi index a5a23c376418..d2e8f36f8c60 100644 --- a/arch/arm/boot/dts/cx92755.dtsi +++ b/arch/arm/boot/dts/cx92755.dtsi @@ -44,9 +44,9 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -#include "skeleton.dtsi" - / { + #address-cells = <1>; + #size-cells = <1>; compatible = "cnxt,cx92755"; interrupt-parent = <&intc>; diff --git a/arch/arm/boot/dts/da850-lcdk.dts b/arch/arm/boot/dts/da850-lcdk.dts index 3a2fa6e035a3..26f453dc8370 100644 --- a/arch/arm/boot/dts/da850-lcdk.dts +++ b/arch/arm/boot/dts/da850-lcdk.dts @@ -74,12 +74,16 @@ simple-audio-card,name = "DA850-OMAPL138 LCDK"; simple-audio-card,widgets = "Line", "Line In", - "Line", "Line Out"; + "Line", "Line Out", + "Microphone", "Mic Jack"; simple-audio-card,routing = "LINE1L", "Line In", "LINE1R", "Line In", "Line Out", "LLOUT", - "Line Out", "RLOUT"; + "Line Out", "RLOUT", + "MIC3L", "Mic Jack", + "MIC3R", "Mic Jack", + "Mic Jack", "Mic Bias"; simple-audio-card,format = "dsp_b"; simple-audio-card,bitclock-master = <&link0_codec>; simple-audio-card,frame-master = <&link0_codec>; @@ -250,6 +254,8 @@ #sound-dai-cells = <0>; compatible = "ti,tlv320aic3106"; reg = <0x18>; + adc-settle-ms = <40>; + ai3x-micbias-vg = <1>; /* 2.0V */ status = "okay"; /* Regulators */ diff --git a/arch/arm/boot/dts/da850.dtsi b/arch/arm/boot/dts/da850.dtsi index 47aa53ba6b92..559659b399d0 100644 --- a/arch/arm/boot/dts/da850.dtsi +++ b/arch/arm/boot/dts/da850.dtsi @@ -476,7 +476,7 @@ clocksource: timer@20000 { compatible = "ti,da830-timer"; reg = <0x20000 0x1000>; - interrupts = <12>, <13>; + interrupts = <21>, <22>; interrupt-names = "tint12", "tint34"; clocks = <&pll0_auxclk>; }; diff --git a/arch/arm/boot/dts/dm814x.dtsi b/arch/arm/boot/dts/dm814x.dtsi index 601c57afd4fe..95de9f214c14 100644 --- a/arch/arm/boot/dts/dm814x.dtsi +++ b/arch/arm/boot/dts/dm814x.dtsi @@ -222,6 +222,30 @@ #interrupt-cells = <2>; }; + gpio3: gpio@1ac000 { + compatible = "ti,omap4-gpio"; + ti,hwmods = "gpio3"; + ti,gpio-always-on; + reg = <0x1ac000 0x2000>; + interrupts = <32>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + gpio4: gpio@1ae000 { + compatible = "ti,omap4-gpio"; + ti,hwmods = "gpio4"; + ti,gpio-always-on; + reg = <0x1ae000 0x2000>; + interrupts = <62>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + i2c2: i2c@2a000 { compatible = "ti,omap4-i2c"; #address-cells = <1>; @@ -240,10 +264,48 @@ ti,spi-num-cs = <4>; ti,hwmods = "mcspi1"; dmas = <&edma 16 0 &edma 17 0 - &edma 18 0 &edma 19 0>; + &edma 18 0 &edma 19 0 + &edma 20 0 &edma 21 0 + &edma 22 0 &edma 23 0>; + + dma-names = "tx0", "rx0", "tx1", "rx1", + "tx2", "rx2", "tx3", "rx3"; + }; + + mcspi2: spi@1a0000 { + compatible = "ti,omap4-mcspi"; + reg = <0x1a0000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <125>; + ti,spi-num-cs = <4>; + ti,hwmods = "mcspi2"; + dmas = <&edma 42 0 &edma 43 0 + &edma 44 0 &edma 45 0>; dma-names = "tx0", "rx0", "tx1", "rx1"; }; + /* Board must configure dmas with edma_xbar for EDMA */ + mcspi3: spi@1a2000 { + compatible = "ti,omap4-mcspi"; + reg = <0x1a2000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <126>; + ti,spi-num-cs = <4>; + ti,hwmods = "mcspi3"; + }; + + mcspi4: spi@1a4000 { + compatible = "ti,omap4-mcspi"; + reg = <0x1a4000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = <127>; + ti,spi-num-cs = <4>; + ti,hwmods = "mcspi4"; + }; + timer1: timer@2e000 { compatible = "ti,dm814-timer"; reg = <0x2e000 0x2000>; @@ -343,6 +405,12 @@ #size-cells = <1>; ranges = <0 0 0x800>; + phy_gmii_sel: phy-gmii-sel { + compatible = "ti,dm814-phy-gmii-sel"; + reg = <0x650 0x4>; + #phy-cells = <1>; + }; + scm_clocks: clocks { #address-cells = <1>; #size-cells = <0>; @@ -549,17 +617,14 @@ cpsw_emac0: slave@4a100200 { /* Filled in by U-Boot */ mac-address = [ 00 00 00 00 00 00 ]; + phys = <&phy_gmii_sel 1>; + }; cpsw_emac1: slave@4a100300 { /* Filled in by U-Boot */ mac-address = [ 00 00 00 00 00 00 ]; - }; - - phy_sel: cpsw-phy-sel@48140650 { - compatible = "ti,am3352-cpsw-phy-sel"; - reg= <0x48140650 0x4>; - reg-names = "gmii-sel"; + phys = <&phy_gmii_sel 2>; }; }; diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi index 250ad0535e8c..2e8a3977219f 100644 --- a/arch/arm/boot/dts/dove.dtsi +++ b/arch/arm/boot/dts/dove.dtsi @@ -1,12 +1,12 @@ // SPDX-License-Identifier: GPL-2.0 -/include/ "skeleton.dtsi" - #include #include #define MBUS_ID(target,attributes) (((target) << 24) | ((attributes) << 16)) / { + #address-cells = <1>; + #size-cells = <1>; compatible = "marvell,dove"; model = "Marvell Armada 88AP510 SoC"; interrupt-parent = <&intc>; diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi index bb45cb7fc3b6..414f1cd68733 100644 --- a/arch/arm/boot/dts/dra7-l4.dtsi +++ b/arch/arm/boot/dts/dra7-l4.dtsi @@ -77,18 +77,18 @@ }; }; + phy_gmii_sel: phy-gmii-sel { + compatible = "ti,dra7xx-phy-gmii-sel"; + reg = <0x554 0x4>; + #phy-cells = <1>; + }; + scm_conf_clocks: clocks { #address-cells = <1>; #size-cells = <0>; }; }; - phy_sel: cpsw-phy-sel@554 { - compatible = "ti,dra7xx-cpsw-phy-sel"; - reg= <0x554 0x4>; - reg-names = "gmii-sel"; - }; - dra7_pmx_core: pinmux@1400 { compatible = "ti,dra7-padconf", "pinctrl-single"; @@ -3099,7 +3099,6 @@ ; ranges = <0 0 0x4000>; syscon = <&scm_conf>; - cpsw-phy-sel = <&phy_sel>; status = "disabled"; davinci_mdio: mdio@1000 { @@ -3114,11 +3113,13 @@ cpsw_emac0: slave@200 { /* Filled in by U-Boot */ mac-address = [ 00 00 00 00 00 00 ]; + phys = <&phy_gmii_sel 1>; }; cpsw_emac1: slave@300 { /* Filled in by U-Boot */ mac-address = [ 00 00 00 00 00 00 ]; + phys = <&phy_gmii_sel 2>; }; }; }; diff --git a/arch/arm/boot/dts/ep7209.dtsi b/arch/arm/boot/dts/ep7209.dtsi index aaf1261d2ee4..0e74222a5eae 100644 --- a/arch/arm/boot/dts/ep7209.dtsi +++ b/arch/arm/boot/dts/ep7209.dtsi @@ -6,11 +6,11 @@ /dts-v1/; -#include "skeleton.dtsi" - #include / { + #address-cells = <1>; + #size-cells = <1>; model = "Cirrus Logic EP7209"; compatible = "cirrus,ep7209"; diff --git a/arch/arm/boot/dts/ep7211-edb7211.dts b/arch/arm/boot/dts/ep7211-edb7211.dts index bc9d5b697452..3475c7777cbc 100644 --- a/arch/arm/boot/dts/ep7211-edb7211.dts +++ b/arch/arm/boot/dts/ep7211-edb7211.dts @@ -12,6 +12,7 @@ compatible = "cirrus,edb7211", "cirrus,ep7211", "cirrus,ep7209"; memory { + device_type = "memory"; reg = <0xc0000000 0x02000000>; }; diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi index 608d17454179..5892a9f7622f 100644 --- a/arch/arm/boot/dts/exynos3250.dtsi +++ b/arch/arm/boot/dts/exynos3250.dtsi @@ -168,6 +168,9 @@ interrupt-controller; #interrupt-cells = <3>; interrupt-parent = <&gic>; + clock-names = "clkout8"; + clocks = <&cmu CLK_FIN_PLL>; + #clock-cells = <1>; }; mipi_phy: video-phy { diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi index 3a9eb1e91c45..08d3a0a7b4eb 100644 --- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi +++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi @@ -49,7 +49,7 @@ }; emmc_pwrseq: pwrseq { - pinctrl-0 = <&sd1_cd>; + pinctrl-0 = <&emmc_rstn>; pinctrl-names = "default"; compatible = "mmc-pwrseq-emmc"; reset-gpios = <&gpk1 2 GPIO_ACTIVE_LOW>; @@ -165,12 +165,6 @@ cpu0-supply = <&buck2_reg>; }; -/* RSTN signal for eMMC */ -&sd1_cd { - samsung,pin-pud = ; - samsung,pin-drv = ; -}; - &pinctrl_1 { gpio_power_key: power_key { samsung,pins = "gpx1-3"; @@ -188,6 +182,11 @@ samsung,pins = "gpx3-7"; samsung,pin-pud = ; }; + + emmc_rstn: emmc-rstn { + samsung,pins = "gpk1-2"; + samsung,pin-pud = ; + }; }; &ehci { @@ -390,7 +389,6 @@ regulator-name = "LDO20_1.8V"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; - regulator-boot-on; }; ldo21_reg: LDO21 { diff --git a/arch/arm/boot/dts/exynos4412-odroidx.dts b/arch/arm/boot/dts/exynos4412-odroidx.dts index 348556fcdd9d..a2251581f6b6 100644 --- a/arch/arm/boot/dts/exynos4412-odroidx.dts +++ b/arch/arm/boot/dts/exynos4412-odroidx.dts @@ -53,7 +53,7 @@ regulator-name = "p3v3_en"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; - gpio = <&gpa1 1 GPIO_ACTIVE_LOW>; + gpio = <&gpa1 1 GPIO_ACTIVE_HIGH>; enable-active-high; regulator-always-on; }; diff --git a/arch/arm/boot/dts/exynos5250-arndale.dts b/arch/arm/boot/dts/exynos5250-arndale.dts index 2ca9319f48f2..dc6fa6fe83f1 100644 --- a/arch/arm/boot/dts/exynos5250-arndale.dts +++ b/arch/arm/boot/dts/exynos5250-arndale.dts @@ -23,7 +23,7 @@ }; chosen { - bootargs = "console=ttySAC2,115200"; + stdout-path = "serial2:115200n8"; }; gpio_keys { @@ -100,7 +100,7 @@ regulator-name = "VDD_33ON_2.8V"; regulator-min-microvolt = <2800000>; regulator-max-microvolt = <2800000>; - gpio = <&gpx1 1 GPIO_ACTIVE_LOW>; + gpio = <&gpx1 1 GPIO_ACTIVE_HIGH>; enable-active-high; }; diff --git a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi index bf09eab90f8a..25d95de15c9b 100644 --- a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi +++ b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi @@ -36,6 +36,11 @@ }; }; +&adc { + vdd-supply = <&ldo4_reg>; + status = "okay"; +}; + &bus_wcore { devfreq-events = <&nocp_mem0_0>, <&nocp_mem0_1>, <&nocp_mem1_0>, <&nocp_mem1_1>; @@ -468,7 +473,7 @@ buck8_reg: BUCK8 { regulator-name = "vdd_1.8v_ldo"; regulator-min-microvolt = <800000>; - regulator-max-microvolt = <1500000>; + regulator-max-microvolt = <2000000>; regulator-always-on; regulator-boot-on; }; diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi index e84544b220b9..51a843bd65ed 100644 --- a/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi +++ b/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi @@ -24,7 +24,9 @@ "Headphone Jack", "MICBIAS", "IN1", "Headphone Jack", "Speakers", "SPKL", - "Speakers", "SPKR"; + "Speakers", "SPKR", + "I2S Playback", "Mixer DAI TX", + "HiFi Playback", "Mixer DAI TX"; assigned-clocks = <&clock CLK_MOUT_EPLL>, <&clock CLK_MOUT_MAU_EPLL>, @@ -51,7 +53,7 @@ <196608000>; cpu { - sound-dai = <&i2s0 0>; + sound-dai = <&i2s0 0>, <&i2s0 1>; }; codec { sound-dai = <&hdmi>, <&max98090>; diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi index b299e541cac0..5f195ad7e467 100644 --- a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi +++ b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi @@ -362,11 +362,6 @@ }; }; -&adc { - vdd-supply = <&ldo4_reg>; - status = "okay"; -}; - &hdmi { status = "okay"; ddc = <&i2c_2>; diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-lite.dts b/arch/arm/boot/dts/exynos5422-odroidxu3-lite.dts index 0db935f2b836..c19b5a51ca44 100644 --- a/arch/arm/boot/dts/exynos5422-odroidxu3-lite.dts +++ b/arch/arm/boot/dts/exynos5422-odroidxu3-lite.dts @@ -18,6 +18,14 @@ compatible = "hardkernel,odroid-xu3-lite", "samsung,exynos5800", "samsung,exynos5"; }; +&arm_a7_pmu { + status = "disabled"; +}; + +&arm_a15_pmu { + status = "disabled"; +}; + &pwm { /* * PWM 0 -- fan diff --git a/arch/arm/boot/dts/exynos5422-odroidxu4.dts b/arch/arm/boot/dts/exynos5422-odroidxu4.dts index 122174ea9e0a..892d389d6d09 100644 --- a/arch/arm/boot/dts/exynos5422-odroidxu4.dts +++ b/arch/arm/boot/dts/exynos5422-odroidxu4.dts @@ -33,6 +33,8 @@ compatible = "samsung,odroid-xu3-audio"; model = "Odroid-XU4"; + samsung,audio-routing = "I2S Playback", "Mixer DAI TX"; + assigned-clocks = <&clock CLK_MOUT_EPLL>, <&clock CLK_MOUT_MAU_EPLL>, <&clock CLK_MOUT_USER_MAU_EPLL>, @@ -58,7 +60,7 @@ <196608000>; cpu { - sound-dai = <&i2s0 0>; + sound-dai = <&i2s0 0>, <&i2s0 1>; }; codec { diff --git a/arch/arm/boot/dts/gemini-dlink-dir-685.dts b/arch/arm/boot/dts/gemini-dlink-dir-685.dts index cc0c3cf89eaa..592111c8d6fd 100644 --- a/arch/arm/boot/dts/gemini-dlink-dir-685.dts +++ b/arch/arm/boot/dts/gemini-dlink-dir-685.dts @@ -443,7 +443,7 @@ }; display-controller@6a000000 { - status = "disabled"; + status = "okay"; port@0 { reg = <0>; diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi index 26ff5d419bfc..3652f5556b29 100644 --- a/arch/arm/boot/dts/imx27.dtsi +++ b/arch/arm/boot/dts/imx27.dtsi @@ -40,7 +40,7 @@ spi2 = &cspi3; }; - aitc: aitc-interrupt-controller@e0000000 { + aitc: aitc-interrupt-controller@10040000 { compatible = "fsl,imx27-aitc", "fsl,avic"; interrupt-controller; #interrupt-cells = <1>; diff --git a/arch/arm/boot/dts/imx51-digi-connectcore-jsk.dts b/arch/arm/boot/dts/imx51-digi-connectcore-jsk.dts index 2967a748d859..a2eea58510dc 100644 --- a/arch/arm/boot/dts/imx51-digi-connectcore-jsk.dts +++ b/arch/arm/boot/dts/imx51-digi-connectcore-jsk.dts @@ -21,12 +21,28 @@ }; }; +&esdhc1 { + status = "okay"; +}; + &owire { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_owire>; status = "okay"; }; +&pmic { + fsl,mc13xxx-uses-rtc; + + regulators { + vcoincell_reg: vcoincell { + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; + regulator-always-on; + }; + }; +}; + &uart1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_uart1>; diff --git a/arch/arm/boot/dts/imx51-digi-connectcore-som.dtsi b/arch/arm/boot/dts/imx51-digi-connectcore-som.dtsi index 82d8df097ef1..d90ba5fe4f1b 100644 --- a/arch/arm/boot/dts/imx51-digi-connectcore-som.dtsi +++ b/arch/arm/boot/dts/imx51-digi-connectcore-som.dtsi @@ -37,7 +37,6 @@ reg = <0>; interrupt-parent = <&gpio1>; interrupts = <5 IRQ_TYPE_LEVEL_HIGH>; - fsl,mc13xxx-uses-rtc; regulators { sw1_reg: sw1 { @@ -142,16 +141,17 @@ pwgt2spi_reg: pwgt2spi { regulator-always-on; }; - - vcoincell_reg: vcoincell { - regulator-min-microvolt = <3000000>; - regulator-max-microvolt = <3000000>; - regulator-always-on; - }; }; }; }; +&esdhc1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_esdhc1>; + max-frequency = <50000000>; + bus-width = <1>; +}; + &esdhc2 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_esdhc2>; @@ -174,9 +174,12 @@ }; &i2c2 { - pinctrl-names = "default"; + pinctrl-names = "default", "gpio"; pinctrl-0 = <&pinctrl_i2c2>; + pinctrl-1 = <&pinctrl_i2c2_gpio>; clock-frequency = <400000>; + scl-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>; + sda-gpios = <&gpio1 3 GPIO_ACTIVE_HIGH>; status = "okay"; mma7455l@1d { @@ -241,6 +244,14 @@ >; }; + pinctrl_esdhc1: esdhc1grp { + fsl,pins = < + MX51_PAD_SD1_CLK__SD1_CLK 0x400021d5 + MX51_PAD_SD1_CMD__SD1_CMD 0x400020d5 + MX51_PAD_SD1_DATA0__SD1_DATA0 0x400020d5 + >; + }; + pinctrl_esdhc2: esdhc2grp { fsl,pins = < MX51_PAD_SD2_CMD__SD2_CMD 0x400020d5 @@ -282,6 +293,13 @@ >; }; + pinctrl_i2c2_gpio: i2c2gpiogrp { + fsl,pins = < + MX51_PAD_GPIO1_2__GPIO1_2 0x400001ed + MX51_PAD_GPIO1_3__GPIO1_3 0x400001ed + >; + }; + pinctrl_nfc: nfcgrp { fsl,pins = < MX51_PAD_NANDF_D0__NANDF_D0 0x80000000 diff --git a/arch/arm/boot/dts/imx6-logicpd-baseboard.dtsi b/arch/arm/boot/dts/imx6-logicpd-baseboard.dtsi new file mode 100644 index 000000000000..fb01fa6e4224 --- /dev/null +++ b/arch/arm/boot/dts/imx6-logicpd-baseboard.dtsi @@ -0,0 +1,555 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (C) 2019 Logic PD, Inc. + +/ { + keyboard { + compatible = "gpio-keys"; + + btn0 { + gpios = <&pcf8575 0 GPIO_ACTIVE_LOW>; + label = "btn0"; + linux,code = ; + debounce-interval = <10>; + wakeup-source; + }; + + btn1 { + gpios = <&pcf8575 1 GPIO_ACTIVE_LOW>; + label = "btn1"; + linux,code = ; + debounce-interval = <10>; + wakeup-source; + }; + + btn2 { + gpios = <&pcf8575 2 GPIO_ACTIVE_LOW>; + label = "btn2"; + linux,code = ; + debounce-interval = <10>; + wakeup-source; + }; + + btn3 { + gpios = <&pcf8575 3 GPIO_ACTIVE_LOW>; + label = "btn3"; + linux,code = ; + debounce-interval = <10>; + wakeup-source; + }; + + }; + + leds { + compatible = "gpio-leds"; + + gen-led0 { + label = "led0"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_led0>; + gpios = <&gpio1 30 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "cpu0"; + }; + + gen-led1 { + label = "led1"; + gpios = <&pcf8575 8 GPIO_ACTIVE_HIGH>; + }; + + gen-led2 { + label = "led2"; + gpios = <&pcf8575 9 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "heartbeat"; + }; + + gen-led3 { + label = "led3"; + gpios = <&pcf8575 10 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "default-on"; + }; + }; + + reg_usb_otg_vbus: regulator-otg-vbus { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_reg_usb_otg>; + compatible = "regulator-fixed"; + regulator-name = "usb_otg_vbus"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + gpio = <&gpio4 15 GPIO_ACTIVE_HIGH>; + enable-active-high; + }; + + reg_usb_h1_vbus: regulator-usb-h1-vbus { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_reg_usb_h1_vbus>; + compatible = "regulator-fixed"; + regulator-name = "usb_h1_vbus"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + gpio = <&gpio7 12 GPIO_ACTIVE_HIGH>; + enable-active-high; + }; + + reg_3v3: regulator-3v3 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_reg_3v3>; + compatible = "regulator-fixed"; + regulator-name = "reg_3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&gpio1 26 GPIO_ACTIVE_HIGH>; + enable-active-high; + regulator-always-on; + }; + + reg_enet: regulator-ethernet { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_reg_enet>; + compatible = "regulator-fixed"; + regulator-name = "ethernet-supply"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&gpio3 31 GPIO_ACTIVE_HIGH>; + startup-delay-us = <70000>; + enable-active-high; + vin-supply = <&sw4_reg>; + }; + + reg_audio: regulator-audio { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_reg_audio>; + compatible = "regulator-fixed"; + regulator-name = "3v3_aud"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&gpio1 29 GPIO_ACTIVE_HIGH>; + enable-active-high; + vin-supply = <®_3v3>; + }; + + reg_hdmi: regulator-hdmi { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_reg_hdmi>; + compatible = "regulator-fixed"; + regulator-name = "hdmi-supply"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&gpio3 20 GPIO_ACTIVE_HIGH>; + enable-active-high; + vin-supply = <®_3v3>; + }; + + reg_uart3: regulator-uart3 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_reg_uart3>; + compatible = "regulator-fixed"; + regulator-name = "uart3-supply"; + gpio = <&gpio1 28 GPIO_ACTIVE_HIGH>; + enable-active-high; + regulator-always-on; + vin-supply = <®_3v3>; + }; + + reg_1v8: regulator-1v8 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_reg_1v8>; + compatible = "regulator-fixed"; + regulator-name = "1v8-supply"; + gpio = <&gpio3 30 GPIO_ACTIVE_HIGH>; + enable-active-high; + regulator-always-on; + vin-supply = <®_3v3>; + }; + + reg_pcie: regulator-pcie { + compatible = "regulator-fixed"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_reg_pcie>; + regulator-name = "mpcie_3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&gpio1 2 GPIO_ACTIVE_HIGH>; + enable-active-high; + }; + + reg_mipi: regulator-mipi { + compatible = "regulator-fixed"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_reg_mipi>; + regulator-name = "mipi_pwr_en"; + regulator-min-microvolt = <2800000>; + regulator-max-microvolt = <2800000>; + gpio = <&gpio3 19 GPIO_ACTIVE_HIGH>; + enable-active-high; + }; + + sound { + compatible = "fsl,imx-audio-wm8962"; + model = "wm8962-audio"; + ssi-controller = <&ssi2>; + audio-codec = <&wm8962>; + audio-routing = + "Headphone Jack", "HPOUTL", + "Headphone Jack", "HPOUTR", + "Ext Spk", "SPKOUTL", + "Ext Spk", "SPKOUTR", + "AMIC", "MICBIAS", + "IN3R", "AMIC"; + mux-int-port = <2>; + mux-ext-port = <4>; + }; +}; + +&audmux { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_audmux>; + status = "okay"; +}; + +&ecspi1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_ecspi1>; + status = "disabled"; +}; + +&fec { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_enet>; + phy-mode = "rgmii"; + phy-reset-duration = <10>; + phy-reset-gpios = <&gpio1 24 GPIO_ACTIVE_LOW>; + phy-supply = <®_enet>; + interrupt-parent = <&gpio1>; + interrupts = <25 IRQ_TYPE_EDGE_FALLING>; + status = "okay"; +}; + +&i2c1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_i2c1>; + clock-frequency = <400000>; + status = "okay"; + + wm8962: audio-codec@1a { + compatible = "wlf,wm8962"; + reg = <0x1a>; + clocks = <&clks IMX6QDL_CLK_CKO>; + clock-names = "xclk"; + DCVDD-supply = <®_audio>; + DBVDD-supply = <®_audio>; + AVDD-supply = <®_audio>; + CPVDD-supply = <®_audio>; + MICVDD-supply = <®_audio>; + PLLVDD-supply = <®_audio>; + SPKVDD1-supply = <®_audio>; + SPKVDD2-supply = <®_audio>; + gpio-cfg = < + 0x0000 /* 0:Default */ + 0x0000 /* 1:Default */ + 0x0013 /* 2:FN_DMICCLK */ + 0x0000 /* 3:Default */ + 0x8014 /* 4:FN_DMICCDAT */ + 0x0000 /* 5:Default */ + >; + }; +}; + +&i2c3 { + ov5640: camera@10 { + compatible = "ovti,ov5640"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_ov5640>; + reg = <0x10>; + clocks = <&clks IMX6QDL_CLK_CKO>; + clock-names = "xclk"; + DOVDD-supply = <®_mipi>; + AVDD-supply = <®_mipi>; + DVDD-supply = <®_mipi>; + reset-gpios = <&gpio3 26 GPIO_ACTIVE_LOW>; + powerdown-gpios = <&gpio3 27 GPIO_ACTIVE_HIGH>; + + port { + ov5640_to_mipi_csi2: endpoint { + remote-endpoint = <&mipi_csi2_in>; + clock-lanes = <0>; + data-lanes = <1 2>; + }; + }; + }; + + pcf8575: gpio@20 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_pcf8574>; + compatible = "nxp,pcf8575"; + reg = <0x20>; + interrupt-parent = <&gpio6>; + interrupts = <31 IRQ_TYPE_EDGE_FALLING>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + lines-initial-states = <0x0710>; + wakeup-source; + }; +}; + +&ipu1_csi1_from_mipi_vc1 { + clock-lanes = <0>; + data-lanes = <1 2>; +}; + +&mipi_csi { + status = "okay"; + + port@0 { + reg = <0>; + + mipi_csi2_in: endpoint { + remote-endpoint = <&ov5640_to_mipi_csi2>; + clock-lanes = <0>; + data-lanes = <1 2>; + }; + }; +}; + +&pcie { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_pcie>; + reset-gpio = <&gpio1 9 GPIO_ACTIVE_LOW>; + vpcie-supply = <®_pcie>; + status = "okay"; +}; + +&pwm3 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_pwm3>; +}; + +&ssi2 { + status = "okay"; +}; + +&uart3 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart3>; + status = "okay"; +}; + +&usbh1 { + vbus-supply = <®_usb_h1_vbus>; + status = "okay"; +}; + +&usbotg { + vbus-supply = <®_usb_otg_vbus>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usbotg>; + disable-over-current; + dr_mode = "otg"; + status = "okay"; +}; + +&usdhc2 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usdhc2>; + pinctrl-1 = <&pinctrl_usdhc2_100mhz>; + pinctrl-2 = <&pinctrl_usdhc2_200mhz>; + vmmc-supply = <®_3v3>; + no-1-8-v; + keep-power-in-suspend; + cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; + status = "okay"; +}; + +&iomuxc { + pinctrl_audmux: audmuxgrp { + fsl,pins = < + MX6QDL_PAD_DISP0_DAT20__AUD4_TXC 0x130b0 + MX6QDL_PAD_DISP0_DAT21__AUD4_TXD 0x110b0 + MX6QDL_PAD_DISP0_DAT22__AUD4_TXFS 0x130b0 + MX6QDL_PAD_DISP0_DAT23__AUD4_RXD 0x130b0 + >; + }; + + pinctrl_ecspi1: ecspi1grp { + fsl,pins = < + MX6QDL_PAD_KEY_COL0__ECSPI1_SCLK 0x100b1 + MX6QDL_PAD_KEY_ROW0__ECSPI1_MOSI 0x100b1 + MX6QDL_PAD_KEY_COL1__ECSPI1_MISO 0x100b1 + MX6QDL_PAD_KEY_ROW1__ECSPI1_SS0 0x100b1 + >; + }; + + pinctrl_enet: enetgrp { + fsl,pins = < + MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b8b0 + MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0 + MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b030 + MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b030 + MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b030 + MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b030 + MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b030 + MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x100b0 + MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b030 + MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x4001b0a8 + MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030 + MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x13030 + MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x13030 + MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030 + MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030 + MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x13030 + MX6QDL_PAD_ENET_CRS_DV__GPIO1_IO25 0x1b0b0 /* ENET_INT */ + MX6QDL_PAD_ENET_RX_ER__GPIO1_IO24 0x1b0b0 /* ETHR_nRST */ + >; + }; + + pinctrl_i2c1: i2c1grp { + fsl,pins = < + MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1 + MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1 + >; + }; + + pinctrl_led0: led0grp { + fsl,pins = < + MX6QDL_PAD_ENET_TXD0__GPIO1_IO30 0x1b0b0 + >; + }; + + pinctrl_ov5640: ov5640grp { + fsl,pins = < + MX6QDL_PAD_EIM_D26__GPIO3_IO26 0x1b0b1 + MX6QDL_PAD_EIM_D27__GPIO3_IO27 0x1b0b1 + >; + }; + + pinctrl_pcf8574: pcf8575grp { + fsl,pins = < + MX6QDL_PAD_EIM_BCLK__GPIO6_IO31 0x1b0b0 + >; + }; + + pinctrl_pcie: pciegrp { + fsl,pins = < + MX6QDL_PAD_GPIO_8__GPIO1_IO08 0x1b0b0 + MX6QDL_PAD_GPIO_9__GPIO1_IO09 0x1b0b0 + >; + }; + + pinctrl_pwm3: pwm3grp { + fsl,pins = < + MX6QDL_PAD_SD4_DAT1__PWM3_OUT 0x1b0b1 + >; + }; + + pinctrl_reg_1v8: reg1v8grp { + fsl,pins = < + MX6QDL_PAD_EIM_D30__GPIO3_IO30 0x1b0b0 + >; + }; + + pinctrl_reg_3v3: reg3v3grp { + fsl,pins = < + MX6QDL_PAD_ENET_RXD1__GPIO1_IO26 0x1b0b0 + >; + }; + + pinctrl_reg_audio: reg-audiogrp { + fsl,pins = < + MX6QDL_PAD_ENET_TXD1__GPIO1_IO29 0x1b0b0 + >; + }; + + pinctrl_reg_enet: reg-enetgrp { + fsl,pins = < + MX6QDL_PAD_EIM_D31__GPIO3_IO31 0x1b0b0 + >; + }; + + pinctrl_reg_hdmi: reg-hdmigrp { + fsl,pins = < + MX6QDL_PAD_EIM_D20__GPIO3_IO20 0x1b0b0 + >; + }; + + pinctrl_reg_mipi: reg-mipigrp { + fsl,pins = ; + }; + + pinctrl_reg_pcie: reg-pciegrp { + fsl,pins = < + MX6QDL_PAD_GPIO_2__GPIO1_IO02 0x1b0b0 + >; + }; + + pinctrl_reg_uart3: reguart3grp { + fsl,pins = < + MX6QDL_PAD_ENET_TX_EN__GPIO1_IO28 0x1b0b0 + >; + }; + + pinctrl_reg_usb_h1_vbus: usbh1grp { + fsl,pins = < + MX6QDL_PAD_GPIO_17__GPIO7_IO12 0x1b0b0 + >; + }; + + pinctrl_reg_usb_otg: reg-usb-otggrp { + fsl,pins = < + MX6QDL_PAD_KEY_ROW4__GPIO4_IO15 0x1b0b0 + >; + }; + + pinctrl_uart3: uart3grp { + fsl,pins = < + MX6QDL_PAD_EIM_D23__UART3_CTS_B 0x1b0b1 + MX6QDL_PAD_EIM_D24__UART3_TX_DATA 0x1b0b1 + MX6QDL_PAD_EIM_D25__UART3_RX_DATA 0x1b0b1 + MX6QDL_PAD_EIM_EB3__UART3_RTS_B 0x1b0b1 + >; + }; + + pinctrl_usbotg: usbotggrp { + fsl,pins = < + MX6QDL_PAD_GPIO_1__USB_OTG_ID 0xd17059 + >; + }; + + pinctrl_usdhc2: usdhc2grp { + fsl,pins = < + MX6QDL_PAD_GPIO_4__GPIO1_IO04 0x1b0b0 /* CD */ + MX6QDL_PAD_SD2_CMD__SD2_CMD 0x17069 + MX6QDL_PAD_SD2_CLK__SD2_CLK 0x10069 + MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x17069 + MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17069 + MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17069 + MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x17069 + >; + }; + + pinctrl_usdhc2_100mhz: h100-usdhc2-100mhz { + fsl,pins = < + MX6QDL_PAD_GPIO_4__GPIO1_IO04 0x1b0b0 /* CD */ + MX6QDL_PAD_SD2_CMD__SD2_CMD 0x170b9 + MX6QDL_PAD_SD2_CLK__SD2_CLK 0x100b9 + MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x170b9 + MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x170b9 + MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x170b9 + MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x170b9 + >; + }; + + pinctrl_usdhc2_200mhz: h100-usdhc2-200mhz { + fsl,pins = < + MX6QDL_PAD_GPIO_4__GPIO1_IO04 0x1b0b0 /* CD */ + MX6QDL_PAD_SD2_CMD__SD2_CMD 0x170f9 + MX6QDL_PAD_SD2_CLK__SD2_CLK 0x100f9 + MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x170f9 + MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x170f9 + MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x170f9 + MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x170f9 + >; + }; + +}; diff --git a/arch/arm/boot/dts/imx6-logicpd-som.dtsi b/arch/arm/boot/dts/imx6-logicpd-som.dtsi new file mode 100644 index 000000000000..7ceae3573248 --- /dev/null +++ b/arch/arm/boot/dts/imx6-logicpd-som.dtsi @@ -0,0 +1,365 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (C) 2019 Logic PD, Inc. + +#include +#include + +/ { + chosen { + stdout-path = &uart1; + }; + + memory@10000000 { + device_type = "memory"; + reg = <0x10000000 0x80000000>; + }; + + reg_wl18xx_vmmc: regulator-wl18xx { + compatible = "regulator-fixed"; + regulator-name = "vwl1837"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&gpio7 0 GPIO_ACTIVE_HIGH>; + startup-delay-us = <70000>; + enable-active-high; + }; +}; + +&clks { + assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>, + <&clks IMX6QDL_CLK_LDB_DI1_SEL>; + assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>, + <&clks IMX6QDL_CLK_PLL3_USB_OTG>; +}; + +&gpmi { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_gpmi_nand>; + nand-on-flash-bbt; + status = "okay"; +}; + +&i2c3 { + clock-frequency = <100000>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_i2c3>; + status = "okay"; + + pfuze100: pmic@8 { + compatible = "fsl,pfuze100"; + reg = <0x08>; + + regulators { + sw1a_reg: sw1ab { + regulator-min-microvolt = <725000>; + regulator-max-microvolt = <1450000>; + regulator-name = "vddcore"; + regulator-boot-on; + regulator-always-on; + regulator-ramp-delay = <6250>; + }; + + sw1c_reg: sw1c { + regulator-min-microvolt = <725000>; + regulator-max-microvolt = <1450000>; + regulator-name = "vddsoc"; + regulator-boot-on; + regulator-always-on; + regulator-ramp-delay = <6250>; + }; + + sw2_reg: sw2 { + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-name = "gen_3v3"; + regulator-boot-on; + }; + + sw3a_reg: sw3a { + regulator-min-microvolt = <1350000>; + regulator-max-microvolt = <1350000>; + regulator-name = "sw3a_vddr"; + regulator-boot-on; + regulator-always-on; + }; + + sw3b_reg: sw3b { + regulator-min-microvolt = <1350000>; + regulator-max-microvolt = <1350000>; + regulator-name = "sw3b_vddr"; + regulator-boot-on; + regulator-always-on; + }; + + sw4_reg: sw4 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + regulator-name = "gen_rgmii"; + }; + + swbst_reg: swbst { + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5150000>; + regulator-name = "gen_5v0"; + }; + + snvs_reg: vsnvs { + regulator-min-microvolt = <1000000>; + regulator-max-microvolt = <3000000>; + regulator-name = "gen_vsns"; + regulator-boot-on; + regulator-always-on; + }; + + vref_reg: vrefddr { + regulator-boot-on; + regulator-always-on; + }; + + vgen1_reg: vgen1 { + regulator-min-microvolt = <1500000>; + regulator-max-microvolt = <1500000>; + regulator-name = "gen_1v5"; + }; + + vgen2_reg: vgen2 { + regulator-name = "vgen2"; + regulator-min-microvolt = <800000>; + regulator-max-microvolt = <1550000>; + }; + + vgen3_reg: vgen3 { + regulator-name = "gen_vadj_0"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + }; + + vgen4_reg: vgen4 { + regulator-name = "gen_1v8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + }; + + vgen5_reg: vgen5 { + regulator-name = "gen_vadj_1"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + }; + + vgen6_reg: vgen6 { + regulator-name = "gen_2v5"; + regulator-min-microvolt = <2500000>; + regulator-max-microvolt = <2500000>; + regulator-always-on; + }; + + coin_reg: coin { + regulator-min-microvolt = <2500000>; + regulator-max-microvolt = <3000000>; + regulator-always-on; + }; + }; + }; + + temperature-sensor@49 { + compatible = "ti,tmp102"; + reg = <0x49>; + interrupt-parent = <&gpio6>; + interrupts = <15 IRQ_TYPE_LEVEL_LOW>; + #thermal-sensor-cells = <1>; + }; + + temperature-sensor@4a { + compatible = "ti,tmp102"; + reg = <0x4a>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_tempsense>; + interrupt-parent = <&gpio6>; + interrupts = <15 IRQ_TYPE_LEVEL_LOW>; + #thermal-sensor-cells = <1>; + }; + + eeprom@51 { + compatible = "atmel,24c64"; + pagesize = <32>; + read-only; /* Manufacturing EEPROM programmed at factory */ + reg = <0x51>; + }; + + eeprom@52 { + compatible = "atmel,24c64"; + pagesize = <32>; + reg = <0x52>; + }; +}; + +/* Reroute power feeding the CPU to come from the external PMIC */ +®_arm +{ + vin-supply = <&sw1a_reg>; +}; + +®_soc +{ + vin-supply = <&sw1c_reg>; +}; + +&iomuxc { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_hog>; + + pinctrl_gpmi_nand: gpmi-nandgrp { + fsl,pins = < + MX6QDL_PAD_NANDF_CLE__NAND_CLE 0x0b0b1 + MX6QDL_PAD_NANDF_ALE__NAND_ALE 0x0b0b1 + MX6QDL_PAD_NANDF_WP_B__NAND_WP_B 0x0b0b1 + MX6QDL_PAD_NANDF_RB0__NAND_READY_B 0x0b000 + MX6QDL_PAD_NANDF_CS0__NAND_CE0_B 0x0b0b1 + MX6QDL_PAD_SD4_CMD__NAND_RE_B 0x0b0b1 + MX6QDL_PAD_SD4_CLK__NAND_WE_B 0x0b0b1 + MX6QDL_PAD_NANDF_D0__NAND_DATA00 0x0b0b1 + MX6QDL_PAD_NANDF_D1__NAND_DATA01 0x0b0b1 + MX6QDL_PAD_NANDF_D2__NAND_DATA02 0x0b0b1 + MX6QDL_PAD_NANDF_D3__NAND_DATA03 0x0b0b1 + MX6QDL_PAD_NANDF_D4__NAND_DATA04 0x0b0b1 + MX6QDL_PAD_NANDF_D5__NAND_DATA05 0x0b0b1 + MX6QDL_PAD_NANDF_D6__NAND_DATA06 0x0b0b1 + MX6QDL_PAD_NANDF_D7__NAND_DATA07 0x0b0b1 + >; + }; + + pinctrl_hog: hoggrp { + fsl,pins = < /* Enable ARM Debugger */ + MX6QDL_PAD_CSI0_MCLK__ARM_TRACE_CTL 0x1b0b0 + MX6QDL_PAD_CSI0_PIXCLK__ARM_EVENTO 0x1b0b0 + MX6QDL_PAD_CSI0_VSYNC__ARM_TRACE00 0x1b0b0 + MX6QDL_PAD_CSI0_DATA_EN__ARM_TRACE_CLK 0x1b0b0 + MX6QDL_PAD_CSI0_DAT4__ARM_TRACE01 0x1b0b0 + MX6QDL_PAD_CSI0_DAT5__ARM_TRACE02 0x1b0b0 + MX6QDL_PAD_CSI0_DAT6__ARM_TRACE03 0x1b0b0 + MX6QDL_PAD_CSI0_DAT7__ARM_TRACE04 0x1b0b0 + MX6QDL_PAD_CSI0_DAT8__ARM_TRACE05 0x1b0b0 + MX6QDL_PAD_CSI0_DAT9__ARM_TRACE06 0x1b0b0 + MX6QDL_PAD_CSI0_DAT10__ARM_TRACE07 0x1b0b0 + MX6QDL_PAD_CSI0_DAT11__ARM_TRACE08 0x1b0b0 + MX6QDL_PAD_CSI0_DAT12__ARM_TRACE09 0x1b0b0 + MX6QDL_PAD_CSI0_DAT13__ARM_TRACE10 0x1b0b0 + MX6QDL_PAD_CSI0_DAT14__ARM_TRACE11 0x1b0b0 + MX6QDL_PAD_CSI0_DAT15__ARM_TRACE12 0x1b0b0 + MX6QDL_PAD_CSI0_DAT16__ARM_TRACE13 0x1b0b0 + MX6QDL_PAD_CSI0_DAT17__ARM_TRACE14 0x1b0b0 + MX6QDL_PAD_CSI0_DAT18__ARM_TRACE15 0x1b0b0 + MX6QDL_PAD_GPIO_0__CCM_CLKO1 0x130b0 + >; + }; + + pinctrl_i2c3: i2c3grp { + fsl,pins = < + MX6QDL_PAD_EIM_D17__I2C3_SCL 0x4001b8b1 + MX6QDL_PAD_EIM_D18__I2C3_SDA 0x4001b8b1 + >; + }; + + pinctrl_tempsense: tempsensegrp { + fsl,pins = < + MX6QDL_PAD_NANDF_CS2__GPIO6_IO15 0x1b0b0 + >; + }; + + pinctrl_uart1: uart1grp { + fsl,pins = < + MX6QDL_PAD_SD3_DAT6__UART1_RX_DATA 0x1b0b1 + MX6QDL_PAD_SD3_DAT7__UART1_TX_DATA 0x1b0b1 + >; + }; + + pinctrl_uart2: uart2grp { + fsl,pins = < + MX6QDL_PAD_SD3_RST__GPIO7_IO08 0x13059 /* BT_EN */ + MX6QDL_PAD_SD4_DAT4__UART2_RX_DATA 0x1b0b1 + MX6QDL_PAD_SD4_DAT5__UART2_RTS_B 0x1b0b1 + MX6QDL_PAD_SD4_DAT6__UART2_CTS_B 0x1b0b1 + MX6QDL_PAD_SD4_DAT7__UART2_TX_DATA 0x1b0b1 + >; + }; + + pinctrl_usdhc1: usdhc1grp { + fsl,pins = < + MX6QDL_PAD_SD1_CMD__SD1_CMD 0x170B9 + MX6QDL_PAD_SD1_CLK__SD1_CLK 0x100B9 + MX6QDL_PAD_SD1_DAT0__SD1_DATA0 0x170B9 + MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x170B9 + MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x170B9 + MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x170B9 + >; + }; + + pinctrl_usdhc3: usdhc3grp { + fsl,pins = < + MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17049 + MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10049 + MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17049 + MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17049 + MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17049 + MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17049 + MX6QDL_PAD_SD3_DAT4__GPIO7_IO01 0x130b0 /* WL_IRQ */ + MX6QDL_PAD_SD3_DAT5__GPIO7_IO00 0x17059 /* WLAN_EN */ + >; + }; +}; + +&snvs_poweroff { + status = "okay"; +}; + +&uart1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart1>; + status = "okay"; +}; + +&uart2 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart2>; + uart-has-rtscts; + status = "okay"; + + bluetooth { + compatible = "ti,wl1837-st"; + enable-gpios = <&gpio7 8 GPIO_ACTIVE_HIGH>; + }; +}; + +&usdhc1 { + pinctrl-names = "default", "state_100mhz", "state_200mhz"; + pinctrl-0 = <&pinctrl_usdhc1>; + non-removable; + keep-power-in-suspend; + wakeup-source; + vmmc-supply = <&sw2_reg>; + status = "okay"; +}; + +&usdhc3 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usdhc3>; + non-removable; + cap-power-off-card; + keep-power-in-suspend; + wakeup-source; + vmmc-supply = <®_wl18xx_vmmc>; + #address-cells = <1>; + #size-cells = <0>; + status = "okay"; + + wlcore: wlcore@2 { + compatible = "ti,wl1837"; + reg = <2>; + interrupt-parent = <&gpio7>; + interrupts = <1 IRQ_TYPE_LEVEL_HIGH>; + tcxo-clock-frequency = <26000000>; + }; +}; diff --git a/arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts b/arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts index d5f7a1703aae..9a5d6c94cca4 100644 --- a/arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts +++ b/arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts @@ -228,10 +228,11 @@ &weim { status = "okay"; - /* weim memory map: 32MB on CS0, 32MB on CS1, 32MB on CS2 */ + /* weim memory map: 32MB on CS0, CS1, CS2 and CS3 */ ranges = <0 0 0x08000000 0x02000000 1 0 0x0a000000 0x02000000 - 2 0 0x0c000000 0x02000000>; + 2 0 0x0c000000 0x02000000 + 3 0 0x0e000000 0x02000000>; /* SRAM on Colibri nEXT_CS0 */ sram@0,0 { diff --git a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi new file mode 100644 index 000000000000..b715ab0fa1ff --- /dev/null +++ b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi @@ -0,0 +1,595 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (C) 2015-2018 Y Soft Corporation, a.s. + +#include +#include +#include + +/ { + backlight: backlight { + compatible = "pwm-backlight"; + pwms = <&pwm1 0 500000 PWM_POLARITY_INVERTED>; + brightness-levels = <0 32 64 128 255>; + default-brightness-level = <32>; + num-interpolated-steps = <8>; + power-supply = <&sw2_reg>; + status = "disabled"; + }; + + lcd_display: display { + compatible = "fsl,imx-parallel-display"; + #address-cells = <1>; + #size-cells = <0>; + interface-pix-fmt = "rgb24"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_ipu1>; + status = "disabled"; + + port@0 { + reg = <0>; + + lcd_display_in: endpoint { + remote-endpoint = <&ipu1_di0_disp0>; + }; + }; + + port@1 { + reg = <1>; + + lcd_display_out: endpoint { + remote-endpoint = <&lcd_panel_in>; + }; + }; + }; + + panel: panel { + compatible = "dataimage,scf0700c48ggu18"; + power-supply = <&sw2_reg>; + status = "disabled"; + + port { + lcd_panel_in: endpoint { + remote-endpoint = <&lcd_display_out>; + }; + }; + }; + + reg_pcie: regulator-pcie { + compatible = "regulator-fixed"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_pcie_reg>; + regulator-name = "MPCIE_3V3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&gpio3 19 GPIO_ACTIVE_HIGH>; + enable-active-high; + status = "disabled"; + }; + + reg_usb_h1_vbus: regulator-usb-h1-vbus { + compatible = "regulator-fixed"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usbh1_vbus>; + regulator-name = "usb_h1_vbus"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + gpio = <&gpio1 29 GPIO_ACTIVE_HIGH>; + enable-active-high; + status = "disabled"; + }; + + reg_usb_otg_vbus: regulator-usb-otg-vbus { + compatible = "regulator-fixed"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usbotg_vbus>; + regulator-name = "usb_otg_vbus"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + gpio = <&gpio3 22 GPIO_ACTIVE_HIGH>; + enable-active-high; + status = "okay"; + }; +}; + +&fec { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_enet>; + phy-mode = "rgmii-id"; + phy-reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>; + phy-reset-duration = <20>; + phy-supply = <&sw2_reg>; + phy-handle = <ðphy0>; + status = "okay"; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + + phy_port2: phy@1 { + reg = <1>; + }; + + phy_port3: phy@2 { + reg = <2>; + }; + + switch@0 { + compatible = "qca,qca8334"; + reg = <0>; + + switch_ports: ports { + #address-cells = <1>; + #size-cells = <0>; + + ethphy0: port@0 { + reg = <0>; + label = "cpu"; + phy-mode = "rgmii"; + ethernet = <&fec>; + + fixed-link { + speed = <1000>; + full-duplex; + }; + }; + + port@2 { + reg = <2>; + label = "eth2"; + phy-handle = <&phy_port2>; + }; + + port@3 { + reg = <3>; + label = "eth1"; + phy-handle = <&phy_port3>; + }; + }; + }; + }; +}; + +&hdmi { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_hdmi_cec>; + ddc-i2c-bus = <&i2c2>; + status = "disabled"; +}; + +&i2c2 { + clock-frequency = <100000>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_i2c2>; + status = "okay"; + + pmic@8 { + compatible = "fsl,pfuze200"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_pmic>; + reg = <0x8>; + + regulators { + sw1a_reg: sw1ab { + regulator-min-microvolt = <300000>; + regulator-max-microvolt = <1875000>; + regulator-boot-on; + regulator-always-on; + regulator-ramp-delay = <6250>; + }; + + sw2_reg: sw2 { + regulator-min-microvolt = <800000>; + regulator-max-microvolt = <3300000>; + regulator-boot-on; + regulator-always-on; + }; + + sw3a_reg: sw3a { + regulator-min-microvolt = <400000>; + regulator-max-microvolt = <1975000>; + regulator-boot-on; + regulator-always-on; + }; + + sw3b_reg: sw3b { + regulator-min-microvolt = <400000>; + regulator-max-microvolt = <1975000>; + regulator-boot-on; + regulator-always-on; + }; + + swbst_reg: swbst { + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5150000>; + }; + + vgen1_reg: vgen1 { + regulator-min-microvolt = <800000>; + regulator-max-microvolt = <1550000>; + }; + + vgen2_reg: vgen2 { + regulator-min-microvolt = <800000>; + regulator-max-microvolt = <1550000>; + }; + + vgen3_reg: vgen3 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + }; + + vgen4_reg: vgen4 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + }; + + vgen5_reg: vgen5 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + }; + + vgen6_reg: vgen6 { + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + }; + + vref_reg: vrefddr { + regulator-boot-on; + regulator-always-on; + }; + + vsnvs_reg: vsnvs { + regulator-min-microvolt = <1000000>; + regulator-max-microvolt = <3000000>; + regulator-boot-on; + regulator-always-on; + }; + }; + }; + + leds: led-controller@30 { + compatible = "ti,lp5562"; + reg = <0x30>; + clock-mode = /bits/ 8 <1>; + status = "disabled"; + + chan0 { + chan-name = "R"; + led-cur = /bits/ 8 <0x20>; + max-cur = /bits/ 8 <0x60>; + }; + + chan1 { + chan-name = "G"; + led-cur = /bits/ 8 <0x20>; + max-cur = /bits/ 8 <0x60>; + }; + + chan2 { + chan-name = "B"; + led-cur = /bits/ 8 <0x20>; + max-cur = /bits/ 8 <0x60>; + }; + + chan3 { + chan-name = "W"; + led-cur = /bits/ 8 <0x0>; + max-cur = /bits/ 8 <0x0>; + }; + }; + + eeprom@57 { + compatible = "atmel,24c128"; + reg = <0x57>; + pagesize = <64>; + status = "okay"; + }; + + touchscreen: touchscreen@5c { + compatible = "pixcir,pixcir_tangoc"; + reg = <0x5c>; + pinctrl-0 = <&pinctrl_touch>; + interrupt-parent = <&gpio4>; + interrupts = <5 IRQ_TYPE_EDGE_FALLING>; + attb-gpio = <&gpio4 5 GPIO_ACTIVE_HIGH>; + reset-gpio = <&gpio1 2 GPIO_ACTIVE_HIGH>; + touchscreen-size-x = <800>; + touchscreen-size-y = <480>; + status = "disabled"; + }; +}; + +&i2c3 { + clock-frequency = <100000>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_i2c3>; + status = "disabled"; + + oled: oled@3d { + compatible = "solomon,ssd1305fb-i2c"; + reg = <0x3d>; + solomon,height = <64>; + solomon,width = <128>; + solomon,page-offset = <0>; + solomon,prechargep2 = <15>; + reset-gpios = <&gpio_oled 1 GPIO_ACTIVE_LOW>; + vbat-supply = <&sw2_reg>; + status = "disabled"; + }; + + gpio_oled: gpio@41 { + compatible = "nxp,pca9536"; + gpio-controller; + #gpio-cells = <2>; + reg = <0x41>; + vcc-supply = <&sw2_reg>; + status = "disabled"; + }; +}; + +&iomuxc { + pinctrl_enet: enetgrp { + fsl,pins = < + MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b020 + MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b020 + MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b020 + MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b020 + MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b020 + MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b020 + MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b020 + MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b020 + MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b020 + MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b020 + MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b020 + MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b020 + MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b020 + MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b020 + MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b010 + MX6QDL_PAD_GPIO_16__ENET_REF_CLK 0x1b010 + MX6QDL_PAD_ENET_CRS_DV__GPIO1_IO25 0x1b098 + >; + }; + + pinctrl_hdmi_cec: hdmicecgrp { + fsl,pins = < + MX6QDL_PAD_EIM_A25__HDMI_TX_CEC_LINE 0x1b898 + >; + }; + + pinctrl_i2c2: i2c2grp { + fsl,pins = < + MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b899 + MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b899 + >; + }; + + pinctrl_i2c3: i2c3grp { + fsl,pins = < + MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b899 + MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b899 + >; + }; + + pinctrl_ipu1: ipu1grp { + fsl,pins = < + MX6QDL_PAD_DI0_DISP_CLK__IPU1_DI0_DISP_CLK 0x10 + MX6QDL_PAD_DI0_PIN2__IPU1_DI0_PIN02 0x10 + MX6QDL_PAD_DI0_PIN3__IPU1_DI0_PIN03 0x10 + MX6QDL_PAD_DISP0_DAT0__IPU1_DISP0_DATA00 0x10 + MX6QDL_PAD_DISP0_DAT1__IPU1_DISP0_DATA01 0x10 + MX6QDL_PAD_DISP0_DAT2__IPU1_DISP0_DATA02 0x10 + MX6QDL_PAD_DISP0_DAT3__IPU1_DISP0_DATA03 0x10 + MX6QDL_PAD_DISP0_DAT4__IPU1_DISP0_DATA04 0x10 + MX6QDL_PAD_DISP0_DAT5__IPU1_DISP0_DATA05 0x10 + MX6QDL_PAD_DISP0_DAT6__IPU1_DISP0_DATA06 0x10 + MX6QDL_PAD_DISP0_DAT7__IPU1_DISP0_DATA07 0x10 + MX6QDL_PAD_DISP0_DAT8__IPU1_DISP0_DATA08 0x10 + MX6QDL_PAD_DISP0_DAT9__IPU1_DISP0_DATA09 0x10 + MX6QDL_PAD_DISP0_DAT10__IPU1_DISP0_DATA10 0x10 + MX6QDL_PAD_DISP0_DAT11__IPU1_DISP0_DATA11 0x10 + MX6QDL_PAD_DISP0_DAT12__IPU1_DISP0_DATA12 0x10 + MX6QDL_PAD_DISP0_DAT13__IPU1_DISP0_DATA13 0x10 + MX6QDL_PAD_DISP0_DAT14__IPU1_DISP0_DATA14 0x10 + MX6QDL_PAD_DISP0_DAT15__IPU1_DISP0_DATA15 0x10 + MX6QDL_PAD_DISP0_DAT16__IPU1_DISP0_DATA16 0x10 + MX6QDL_PAD_DISP0_DAT17__IPU1_DISP0_DATA17 0x10 + MX6QDL_PAD_DISP0_DAT18__IPU1_DISP0_DATA18 0x10 + MX6QDL_PAD_DISP0_DAT19__IPU1_DISP0_DATA19 0x10 + MX6QDL_PAD_DISP0_DAT20__IPU1_DISP0_DATA20 0x10 + MX6QDL_PAD_DISP0_DAT21__IPU1_DISP0_DATA21 0x10 + MX6QDL_PAD_DISP0_DAT22__IPU1_DISP0_DATA22 0x10 + MX6QDL_PAD_DISP0_DAT23__IPU1_DISP0_DATA23 0x10 + >; + }; + + pinctrl_pcie: pciegrp { + fsl,pins = < + MX6QDL_PAD_GPIO_17__GPIO7_IO12 0x1b098 + MX6QDL_PAD_KEY_COL4__GPIO4_IO14 0x1b098 + MX6QDL_PAD_CSI0_DATA_EN__GPIO5_IO20 0x1b098 + >; + }; + + pinctrl_pcie_reg: pciereggrp { + fsl,pins = < + MX6QDL_PAD_EIM_D19__GPIO3_IO19 0x1b098 + >; + }; + + pinctrl_pmic: pmicgrp { + fsl,pins = < + MX6QDL_PAD_GPIO_18__GPIO7_IO13 0x1b098 + >; + }; + + pinctrl_pwm1: pwm1grp { + fsl,pins = < + MX6QDL_PAD_GPIO_9__PWM1_OUT 0x8 + >; + }; + + pinctrl_touch: touchgrp { + fsl,pins = < + MX6QDL_PAD_GPIO_19__GPIO4_IO05 0x1b098 + MX6QDL_PAD_GPIO_2__GPIO1_IO02 0x1b098 + >; + }; + + pinctrl_uart1: uart1grp { + fsl,pins = < + MX6QDL_PAD_CSI0_DAT10__UART1_TX_DATA 0x1b0a8 + MX6QDL_PAD_CSI0_DAT11__UART1_RX_DATA 0x1b0a8 + >; + }; + + pinctrl_usbh1: usbh1grp { + fsl,pins = < + MX6QDL_PAD_EIM_D30__USB_H1_OC 0x1b098 + >; + }; + + pinctrl_usbh1_vbus: usbh1-vbus { + fsl,pins = < + MX6QDL_PAD_ENET_TXD1__GPIO1_IO29 0x98 + >; + }; + + pinctrl_usbotg: usbotggrp { + fsl,pins = < + MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x1b098 + MX6QDL_PAD_EIM_D21__USB_OTG_OC 0x1b098 + >; + }; + + pinctrl_usbotg_vbus: usbotg-vbus { + fsl,pins = < + MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x98 + >; + }; + + pinctrl_usdhc3: usdhc3grp { + fsl,pins = < + MX6QDL_PAD_EIM_A16__GPIO2_IO22 0x1b018 + MX6QDL_PAD_SD3_RST__GPIO7_IO08 0x1b018 + MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059 + MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059 + MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059 + MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059 + MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059 + MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059 + >; + }; + + pinctrl_usdhc4: usdhc4grp { + fsl,pins = < + MX6QDL_PAD_SD4_CMD__SD4_CMD 0x1f069 + MX6QDL_PAD_SD4_CLK__SD4_CLK 0x10069 + MX6QDL_PAD_SD4_DAT0__SD4_DATA0 0x17069 + MX6QDL_PAD_SD4_DAT1__SD4_DATA1 0x17069 + MX6QDL_PAD_SD4_DAT2__SD4_DATA2 0x17069 + MX6QDL_PAD_SD4_DAT3__SD4_DATA3 0x17069 + MX6QDL_PAD_SD4_DAT4__SD4_DATA4 0x17069 + MX6QDL_PAD_SD4_DAT5__SD4_DATA5 0x17069 + MX6QDL_PAD_SD4_DAT6__SD4_DATA6 0x17069 + MX6QDL_PAD_SD4_DAT7__SD4_DATA7 0x17069 + >; + }; + + pinctrl_wdog: wdoggrp { + fsl,pins = < + MX6QDL_PAD_GPIO_1__WDOG2_B 0x1b0b0 + >; + }; +}; + +&ipu1_di0_disp0 { + remote-endpoint = <&lcd_display_in>; +}; + +&pcie { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_pcie>; + reset-gpio = <&gpio7 12 GPIO_ACTIVE_LOW>; + vpcie-supply = <®_pcie>; + status = "disabled"; +}; + +&pwm1 { + #pwm-cells = <3>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_pwm1>; + status = "disabled"; +}; + +&uart1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart1>; + status = "okay"; +}; + +&usbh1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usbh1>; + vbus-supply = <®_usb_h1_vbus>; + status = "disabled"; +}; + +&usbotg { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usbotg>; + vbus-supply = <®_usb_otg_vbus>; + srp-disable; + hnp-disable; + adp-disable; + status = "okay"; +}; + +&usbphy1 { + fsl,tx-d-cal = <106>; + status = "okay"; +}; + +&usbphy2 { + fsl,tx-d-cal = <109>; + status = "disabled"; +}; + +&usdhc3 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usdhc3>; + bus-width = <4>; + cd-gpios = <&gpio7 8 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio2 22 GPIO_ACTIVE_HIGH>; + no-1-8-v; + keep-power-in-suspend; + wakeup-source; + vmmc-supply = <&sw2_reg>; + status = "disabled"; +}; + +&usdhc4 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usdhc4>; + bus-width = <8>; + non-removable; + no-1-8-v; + keep-power-in-suspend; + vmmc-supply = <&sw2_reg>; + status = "okay"; +}; + +&wdog1 { + status = "disabled"; +}; + +&wdog2 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_wdog>; + fsl,ext-reset-output; + status = "okay"; +}; diff --git a/arch/arm/boot/dts/imx6dl-yapp4-draco.dts b/arch/arm/boot/dts/imx6dl-yapp4-draco.dts new file mode 100644 index 000000000000..a38c407fd837 --- /dev/null +++ b/arch/arm/boot/dts/imx6dl-yapp4-draco.dts @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (C) 2015-2018 Y Soft Corporation, a.s. + +/dts-v1/; + +#include "imx6dl.dtsi" +#include "imx6dl-yapp4-common.dtsi" + +/ { + model = "Y Soft IOTA Draco i.MX6Solo board"; + compatible = "ysoft,imx6dl-yapp4-draco", "fsl,imx6dl"; + + memory@10000000 { + device_type = "memory"; + reg = <0x10000000 0x20000000>; + }; +}; + +&backlight { + status = "okay"; +}; + +&lcd_display { + status = "okay"; +}; + +&leds { + status = "okay"; +}; + +&panel { + status = "okay"; +}; + +&pwm1 { + status = "okay"; +}; + +®_usb_h1_vbus { + status = "okay"; +}; + +&touchscreen { + status = "okay"; +}; + +&usbh1 { + status = "okay"; +}; + +&usbphy2 { + status = "okay"; +}; + +&usdhc3 { + status = "okay"; +}; diff --git a/arch/arm/boot/dts/imx6dl-yapp4-hydra.dts b/arch/arm/boot/dts/imx6dl-yapp4-hydra.dts new file mode 100644 index 000000000000..f97927064750 --- /dev/null +++ b/arch/arm/boot/dts/imx6dl-yapp4-hydra.dts @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (C) 2015-2018 Y Soft Corporation, a.s. + +/dts-v1/; + +#include "imx6dl.dtsi" +#include "imx6dl-yapp4-common.dtsi" + +/ { + model = "Y Soft IOTA Hydra i.MX6DualLite board"; + compatible = "ysoft,imx6dl-yapp4-hydra", "fsl,imx6dl"; + + memory@10000000 { + device_type = "memory"; + reg = <0x10000000 0x80000000>; + }; +}; + +&gpio_oled { + status = "okay"; +}; + +&hdmi { + status = "okay"; +}; + +&i2c3 { + status = "okay"; +}; + +&leds { + status = "okay"; +}; + +&oled { + status = "okay"; +}; + +&pcie { + status = "okay"; +}; + +®_pcie { + status = "okay"; +}; + +&usdhc3 { + status = "okay"; +}; diff --git a/arch/arm/boot/dts/imx6dl-yapp4-ursa.dts b/arch/arm/boot/dts/imx6dl-yapp4-ursa.dts new file mode 100644 index 000000000000..0d594e4bd559 --- /dev/null +++ b/arch/arm/boot/dts/imx6dl-yapp4-ursa.dts @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (C) 2015-2018 Y Soft Corporation, a.s. + +/dts-v1/; + +#include "imx6dl.dtsi" +#include "imx6dl-yapp4-common.dtsi" + +/ { + model = "Y Soft IOTA Ursa i.MX6Solo board"; + compatible = "ysoft,imx6dl-yapp4-ursa", "fsl,imx6dl"; + + memory@10000000 { + device_type = "memory"; + reg = <0x10000000 0x20000000>; + }; +}; + +&backlight { + status = "okay"; +}; + +&lcd_display { + status = "okay"; +}; + +&panel { + status = "okay"; +}; + +&pwm1 { + status = "okay"; +}; + +®_usb_h1_vbus { + status = "okay"; +}; + +&switch_ports { + /delete-node/ port@2; +}; + +&touchscreen { + status = "okay"; +}; + +&usbh1 { + status = "okay"; +}; + +&usbphy2 { + status = "okay"; +}; diff --git a/arch/arm/boot/dts/imx6q-logicpd.dts b/arch/arm/boot/dts/imx6q-logicpd.dts new file mode 100644 index 000000000000..45eb0b7f75f8 --- /dev/null +++ b/arch/arm/boot/dts/imx6q-logicpd.dts @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (C) 2019 Logic PD, Inc. + +/dts-v1/; +#include "imx6q.dtsi" +#include "imx6-logicpd-som.dtsi" +#include "imx6-logicpd-baseboard.dtsi" + +/ { + model = "Logic PD i.MX6QD SOM-M3"; + compatible = "fsl,imx6q"; + + backlight: backlight-lvds { + compatible = "pwm-backlight"; + pwms = <&pwm3 0 20000>; + brightness-levels = <0 4 8 16 32 64 128 255>; + default-brightness-level = <6>; + power-supply = <®_lcd>; + }; + + panel-lvds0 { + compatible = "okaya,rs800480t-7x0gp"; + + port { + panel_in_lvds0: endpoint { + remote-endpoint = <&lvds0_out>; + }; + }; + }; + + reg_lcd: regulator-lcd { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_lcd_reg>; + compatible = "regulator-fixed"; + regulator-name = "lcd_panel_pwr"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&gpio4 17 GPIO_ACTIVE_HIGH>; + enable-active-high; + regulator-always-on; + vin-supply = <®_3v3>; + startup-delay-us = <500000>; + }; + + reg_lcd_reset: regulator-lcd-reset { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_lcd_reset>; + compatible = "regulator-fixed"; + regulator-name = "nLCD_RESET"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&gpio5 2 GPIO_ACTIVE_HIGH>; + enable-active-high; + regulator-always-on; + vin-supply = <®_lcd>; + }; +}; + +&clks { + assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>, + <&clks IMX6QDL_CLK_LDB_DI1_SEL>, + <&clks IMX6QDL_CLK_IPU1_DI0_PRE_SEL>, + <&clks IMX6QDL_CLK_IPU2_DI0_PRE_SEL>; + assigned-clock-parents = <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>, + <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>, + <&clks IMX6QDL_CLK_PLL2_PFD2_396M>, + <&clks IMX6QDL_CLK_PLL2_PFD2_396M>; +}; + +&hdmi { + ddc-i2c-bus = <&i2c3>; + status = "okay"; +}; + +&ldb { + status = "okay"; + + lvds-channel@0 { + fsl,data-mapping = "spwg"; + fsl,data-width = <24>; + status = "okay"; + + port@4 { + reg = <4>; + lvds0_out: endpoint { + remote-endpoint = <&panel_in_lvds0>; + }; + }; + }; + +}; + +&pwm3 { + status = "okay"; +}; + +®_hdmi { + regulator-always-on; /* Without this, the level shifter on HDMI doesn't turn on */ +}; + +&iomuxc { + pinctrl_lcd_reg: lcdreg { + fsl,pins = < + MX6QDL_PAD_DI0_PIN15__GPIO4_IO17 0x100b0 /* R_LCD_PANEL_PWR */ + >; + }; + + pinctrl_lcd_reset: lcdreset { + fsl,pins = < + MX6QDL_PAD_EIM_A25__GPIO5_IO02 0x100b0 /* LCD_nRESET */ + >; + }; + + pinctrl_touchscreen: touchscreengrp { + fsl,pins = < + MX6QDL_PAD_GPIO_6__GPIO1_IO06 0x1b0b0 /* TOUCH_nPINTDAV */ + >; + }; +}; diff --git a/arch/arm/boot/dts/imx6q-pistachio.dts b/arch/arm/boot/dts/imx6q-pistachio.dts index 5edf858c8b86..a31b17eaf51c 100644 --- a/arch/arm/boot/dts/imx6q-pistachio.dts +++ b/arch/arm/boot/dts/imx6q-pistachio.dts @@ -103,7 +103,7 @@ power { label = "Power Button"; gpios = <&gpio2 12 GPIO_ACTIVE_LOW>; - gpio-key,wakeup; + wakeup-source; linux,code = ; }; }; diff --git a/arch/arm/boot/dts/imx6q-tbs2910.dts b/arch/arm/boot/dts/imx6q-tbs2910.dts index 279b15e9ae2e..2ce8399a10ba 100644 --- a/arch/arm/boot/dts/imx6q-tbs2910.dts +++ b/arch/arm/boot/dts/imx6q-tbs2910.dts @@ -1,49 +1,6 @@ -/* - * Copyright 2014 Soeren Moch - * - * This file is dual-licensed: you can use it either under the terms - * of the GPL or the X11 license, at your option. Note that this dual - * licensing only applies to this file, and not this project as a - * whole. - * - * a) This file is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; either version 2 of - * the License, or (at your option) any later version. - * - * This file is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public - * License along with this file; if not, write to the Free - * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, - * MA 02110-1301 USA - * - * Or, alternatively, - * - * b) Permission is hereby granted, free of charge, to any person - * obtaining a copy of this software and associated documentation - * files (the "Software"), to deal in the Software without - * restriction, including without limitation the rights to use, - * copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following - * conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES - * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT - * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ +// SPDX-License-Identifier: GPL-2.0+ OR MIT +// +// Copyright 2014 Soeren Moch /dts-v1/; diff --git a/arch/arm/boot/dts/imx6qdl-apalis.dtsi b/arch/arm/boot/dts/imx6qdl-apalis.dtsi index 8380f1b26826..7c4ad541c3f5 100644 --- a/arch/arm/boot/dts/imx6qdl-apalis.dtsi +++ b/arch/arm/boot/dts/imx6qdl-apalis.dtsi @@ -332,11 +332,17 @@ id = <0>; blocks = <0x5>; irq-trigger = <0x1>; + /* 3.25 MHz ADC clock speed */ + st,adc-freq = <1>; + /* 12-bit ADC */ + st,mod-12b = <1>; + /* internal ADC reference */ + st,ref-sel = <0>; + /* ADC converstion time: 80 clocks */ + st,sample-time = <4>; stmpe_touchscreen { compatible = "st,stmpe-ts"; - /* 3.25 MHz ADC clock speed */ - st,adc-freq = <1>; /* 8 sample average control */ st,ave-ctrl = <3>; /* 7 length fractional part in z */ @@ -346,17 +352,17 @@ * current limit value */ st,i-drive = <1>; - /* 12-bit ADC */ - st,mod-12b = <1>; - /* internal ADC reference */ - st,ref-sel = <0>; - /* ADC converstion time: 80 clocks */ - st,sample-time = <4>; /* 1 ms panel driver settling time */ st,settling = <3>; /* 5 ms touch detect interrupt delay */ st,touch-det-delay = <5>; }; + + stmpe_adc { + compatible = "st,stmpe-adc"; + /* forbid to use ADC channels 3-0 (touch) */ + st,norequest-mask = <0x0F>; + }; }; }; @@ -369,8 +375,8 @@ pinctrl-names = "default", "recovery"; pinctrl-0 = <&pinctrl_i2c3>; pinctrl-1 = <&pinctrl_i2c3_recovery>; - scl-gpios = <&gpio3 17 GPIO_ACTIVE_HIGH>; - sda-gpios = <&gpio3 18 GPIO_ACTIVE_HIGH>; + scl-gpios = <&gpio3 17 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>; + sda-gpios = <&gpio3 18 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/imx6qdl-colibri.dtsi b/arch/arm/boot/dts/imx6qdl-colibri.dtsi index 87e15e7cb32b..1beac22266ed 100644 --- a/arch/arm/boot/dts/imx6qdl-colibri.dtsi +++ b/arch/arm/boot/dts/imx6qdl-colibri.dtsi @@ -262,11 +262,17 @@ id = <0>; blocks = <0x5>; irq-trigger = <0x1>; + /* 3.25 MHz ADC clock speed */ + st,adc-freq = <1>; + /* 12-bit ADC */ + st,mod-12b = <1>; + /* internal ADC reference */ + st,ref-sel = <0>; + /* ADC converstion time: 80 clocks */ + st,sample-time = <4>; stmpe_touchscreen { compatible = "st,stmpe-ts"; - /* 3.25 MHz ADC clock speed */ - st,adc-freq = <1>; /* 8 sample average control */ st,ave-ctrl = <3>; /* 7 length fractional part in z */ @@ -276,17 +282,17 @@ * current limit value */ st,i-drive = <1>; - /* 12-bit ADC */ - st,mod-12b = <1>; - /* internal ADC reference */ - st,ref-sel = <0>; - /* ADC converstion time: 80 clocks */ - st,sample-time = <4>; /* 1 ms panel driver settling time */ st,settling = <3>; /* 5 ms touch detect interrupt delay */ st,touch-det-delay = <5>; }; + + stmpe_adc { + compatible = "st,stmpe-adc"; + /* forbid to use ADC channels 3-0 (touch) */ + st,norequest-mask = <0x0F>; + }; }; }; @@ -298,8 +304,8 @@ pinctrl-names = "default", "recovery"; pinctrl-0 = <&pinctrl_i2c3>; pinctrl-1 = <&pinctrl_i2c3_recovery>; - scl-gpios = <&gpio1 3 GPIO_ACTIVE_HIGH>; - sda-gpios = <&gpio1 6 GPIO_ACTIVE_HIGH>; + scl-gpios = <&gpio1 3 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>; + sda-gpios = <&gpio1 6 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi index 1b50b01e9bac..433bf09a1954 100644 --- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi +++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi @@ -89,10 +89,23 @@ &fec { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_enet>; + phy-handle = <ðphy>; phy-mode = "rgmii"; phy-reset-gpios = <&gpio3 23 GPIO_ACTIVE_LOW>; phy-supply = <&vdd_eth_io_reg>; status = "disabled"; + + fec_mdio: mdio { + #address-cells = <1>; + #size-cells = <0>; + + ethphy: ethernet-phy@0 { + compatible = "ethernet-phy-ieee802.3-c22"; + reg = <0>; + txc-skew-ps = <1680>; + rxc-skew-ps = <1860>; + }; + }; }; &gpmi { @@ -117,6 +130,7 @@ reg = <0x58>; interrupt-parent = <&gpio2>; interrupts = <9 IRQ_TYPE_LEVEL_LOW>; /* active-low GPIO2_9 */ + interrupt-controller; regulators { vddcore_reg: bcore1 { diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi index 8930aec6464c..a0705066ccba 100644 --- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi +++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi @@ -64,7 +64,6 @@ regulator-max-microvolt = <3300000>; gpio = <&gpio2 31 GPIO_ACTIVE_HIGH>; enable-active-high; - regulator-always-on; }; gpio-keys { @@ -250,6 +249,8 @@ pinctrl-0 = <&pinctrl_i2c1_mma8451_int>; interrupt-parent = <&gpio1>; interrupts = <18 IRQ_TYPE_LEVEL_LOW>; + vdd-supply = <®_sensors>; + vddio-supply = <®_sensors>; }; ov5642: camera@3c { @@ -440,6 +441,8 @@ pinctrl-0 = <&pinctrl_i2c3_mag3110_int>; interrupt-parent = <&gpio3>; interrupts = <16 IRQ_TYPE_EDGE_RISING>; + vdd-supply = <®_sensors>; + vddio-supply = <®_sensors>; }; light-sensor@44 { @@ -449,6 +452,7 @@ pinctrl-0 = <&pinctrl_i2c3_isl29023_int>; interrupt-parent = <&gpio3>; interrupts = <9 IRQ_TYPE_EDGE_FALLING>; + vcc-supply = <®_sensors>; }; }; diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi index e7524e73efb4..4b4813f176cd 100644 --- a/arch/arm/boot/dts/imx6sl.dtsi +++ b/arch/arm/boot/dts/imx6sl.dtsi @@ -338,7 +338,7 @@ compatible = "fsl,imx6sl-pwm", "fsl,imx27-pwm"; reg = <0x02080000 0x4000>; interrupts = <0 83 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&clks IMX6SL_CLK_PWM1>, + clocks = <&clks IMX6SL_CLK_PERCLK>, <&clks IMX6SL_CLK_PWM1>; clock-names = "ipg", "per"; }; @@ -348,7 +348,7 @@ compatible = "fsl,imx6sl-pwm", "fsl,imx27-pwm"; reg = <0x02084000 0x4000>; interrupts = <0 84 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&clks IMX6SL_CLK_PWM2>, + clocks = <&clks IMX6SL_CLK_PERCLK>, <&clks IMX6SL_CLK_PWM2>; clock-names = "ipg", "per"; }; @@ -358,7 +358,7 @@ compatible = "fsl,imx6sl-pwm", "fsl,imx27-pwm"; reg = <0x02088000 0x4000>; interrupts = <0 85 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&clks IMX6SL_CLK_PWM3>, + clocks = <&clks IMX6SL_CLK_PERCLK>, <&clks IMX6SL_CLK_PWM3>; clock-names = "ipg", "per"; }; @@ -368,7 +368,7 @@ compatible = "fsl,imx6sl-pwm", "fsl,imx27-pwm"; reg = <0x0208c000 0x4000>; interrupts = <0 86 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&clks IMX6SL_CLK_PWM4>, + clocks = <&clks IMX6SL_CLK_PERCLK>, <&clks IMX6SL_CLK_PWM4>; clock-names = "ipg", "per"; }; diff --git a/arch/arm/boot/dts/imx6sll-evk.dts b/arch/arm/boot/dts/imx6sll-evk.dts index d8163705363e..4a31a415f88e 100644 --- a/arch/arm/boot/dts/imx6sll-evk.dts +++ b/arch/arm/boot/dts/imx6sll-evk.dts @@ -309,7 +309,7 @@ pinctrl-2 = <&pinctrl_usdhc3_200mhz>; cd-gpios = <&gpio3 22 GPIO_ACTIVE_LOW>; keep-power-in-suspend; - enable-sdio-wakeup; + wakeup-source; vmmc-supply = <®_sd3_vmmc>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi index 272ff6133ec1..5b16e65f7696 100644 --- a/arch/arm/boot/dts/imx6sx.dtsi +++ b/arch/arm/boot/dts/imx6sx.dtsi @@ -467,7 +467,7 @@ }; gpt: gpt@2098000 { - compatible = "fsl,imx6sx-gpt", "fsl,imx31-gpt"; + compatible = "fsl,imx6sx-gpt", "fsl,imx6dl-gpt"; reg = <0x02098000 0x4000>; interrupts = ; clocks = <&clks IMX6SX_CLK_GPT_BUS>, @@ -785,6 +785,18 @@ clocks = <&clks IMX6SX_CLK_GPU>; }; + pd_disp: power-domain@2 { + reg = <2>; + #power-domain-cells = <0>; + clocks = <&clks IMX6SX_CLK_PXP_AXI>, + <&clks IMX6SX_CLK_DISPLAY_AXI>, + <&clks IMX6SX_CLK_LCDIF1_PIX>, + <&clks IMX6SX_CLK_LCDIF_APB>, + <&clks IMX6SX_CLK_LCDIF2_PIX>, + <&clks IMX6SX_CLK_CSI>, + <&clks IMX6SX_CLK_VADC>; + }; + pd_pci: power-domain@3 { reg = <3>; #power-domain-cells = <0>; @@ -1205,6 +1217,7 @@ interrupts = ; clocks = <&clks IMX6SX_CLK_PXP_AXI>; clock-names = "axi"; + power-domains = <&pd_disp>; status = "disabled"; }; @@ -1226,6 +1239,7 @@ <&clks IMX6SX_CLK_LCDIF_APB>, <&clks IMX6SX_CLK_DISPLAY_AXI>; clock-names = "pix", "axi", "disp_axi"; + power-domains = <&pd_disp>; status = "disabled"; }; @@ -1237,6 +1251,7 @@ <&clks IMX6SX_CLK_LCDIF_APB>, <&clks IMX6SX_CLK_DISPLAY_AXI>; clock-names = "pix", "axi", "disp_axi"; + power-domains = <&pd_disp>; status = "disabled"; }; @@ -1246,6 +1261,7 @@ clocks = <&clks IMX6SX_CLK_VADC>, <&clks IMX6SX_CLK_CSI>; clock-names = "vadc", "csi"; + power-domains = <&pd_disp>; status = "disabled"; }; }; @@ -1370,7 +1386,8 @@ <&clks IMX6SX_CLK_PCIE_REF_125M>, <&clks IMX6SX_CLK_DISPLAY_AXI>; clock-names = "pcie", "pcie_bus", "pcie_phy", "pcie_inbound_axi"; - power-domains = <&pd_pci>; + power-domains = <&pd_disp>, <&pd_pci>; + power-domain-names = "pcie", "pcie_phy"; status = "disabled"; }; }; diff --git a/arch/arm/boot/dts/imx6ul-phytec-pcl063.dtsi b/arch/arm/boot/dts/imx6ul-phytec-pcl063.dtsi new file mode 100644 index 000000000000..fc2997449b49 --- /dev/null +++ b/arch/arm/boot/dts/imx6ul-phytec-pcl063.dtsi @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2016 PHYTEC Messtechnik GmbH + * Author: Christian Hemp + */ + +#include +#include +#include +#include "imx6ul.dtsi" + +/ { + model = "Phytec phyCORE i.MX6 UltraLite"; + compatible = "phytec,imx6ul-pcl063", "fsl,imx6ul"; + + chosen { + stdout-path = &uart1; + }; + + /* + * Set the minimum memory size here and + * let the bootloader set the real size. + */ + memory { + device_type = "memory"; + reg = <0x80000000 0x8000000>; + }; + + gpio_leds_som: leds { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_gpioleds_som>; + compatible = "gpio-leds"; + + led_green { + label = "phycore:green"; + gpios = <&gpio5 4 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "heartbeat"; + }; + }; +}; + +&fec1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_enet1>; + phy-mode = "rmii"; + phy-handle = <ðphy0>; + status = "okay"; + + mdio: mdio { + #address-cells = <1>; + #size-cells = <0>; + + ethphy0: ethernet-phy@1 { + reg = <1>; + interrupt-parent = <&gpio1>; + interrupts = <2 IRQ_TYPE_LEVEL_LOW>; + micrel,led-mode = <1>; + clocks = <&clks IMX6UL_CLK_ENET_REF>; + clock-names = "rmii-ref"; + }; + }; +}; + +&gpmi { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_gpmi_nand>; + nand-on-flash-bbt; + status = "okay"; +}; + +&i2c1 { + pinctrl-names = "default"; + pinctrl-0 =<&pinctrl_i2c1>; + clock-frequency = <100000>; + status = "okay"; + + eeprom@52 { + compatible = "catalyst,24c32", "atmel,24c32"; + reg = <0x52>; + }; +}; + +&snvs_poweroff { + status = "okay"; +}; + +&uart1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart1>; + status = "okay"; +}; + +&iomuxc { + pinctrl_enet1: enet1grp { + fsl,pins = < + MX6UL_PAD_GPIO1_IO07__ENET1_MDC 0x1b0b0 + MX6UL_PAD_GPIO1_IO06__ENET1_MDIO 0x1b0b0 + MX6UL_PAD_ENET1_RX_EN__ENET1_RX_EN 0x1b0b0 + MX6UL_PAD_ENET1_RX_ER__ENET1_RX_ER 0x1b0b0 + MX6UL_PAD_ENET1_RX_DATA0__ENET1_RDATA00 0x1b0b0 + MX6UL_PAD_ENET1_RX_DATA1__ENET1_RDATA01 0x1b0b0 + MX6UL_PAD_ENET1_TX_EN__ENET1_TX_EN 0x1b0b0 + MX6UL_PAD_ENET1_TX_DATA0__ENET1_TDATA00 0x1b0b0 + MX6UL_PAD_ENET1_TX_DATA1__ENET1_TDATA01 0x1b0b0 + MX6UL_PAD_ENET1_TX_CLK__ENET1_REF_CLK1 0x4001b031 + MX6UL_PAD_GPIO1_IO02__GPIO1_IO02 0x17059 + >; + }; + + pinctrl_gpioleds_som: gpioledssomgrp { + fsl,pins = ; + }; + + pinctrl_gpmi_nand: gpminandgrp { + fsl,pins = < + MX6UL_PAD_NAND_CLE__RAWNAND_CLE 0x0b0b1 + MX6UL_PAD_NAND_ALE__RAWNAND_ALE 0x0b0b1 + MX6UL_PAD_NAND_WP_B__RAWNAND_WP_B 0x0b0b1 + MX6UL_PAD_NAND_READY_B__RAWNAND_READY_B 0x0b000 + MX6UL_PAD_NAND_CE0_B__RAWNAND_CE0_B 0x0b0b1 + MX6UL_PAD_NAND_RE_B__RAWNAND_RE_B 0x0b0b1 + MX6UL_PAD_NAND_WE_B__RAWNAND_WE_B 0x0b0b1 + MX6UL_PAD_NAND_DATA00__RAWNAND_DATA00 0x0b0b1 + MX6UL_PAD_NAND_DATA01__RAWNAND_DATA01 0x0b0b1 + MX6UL_PAD_NAND_DATA02__RAWNAND_DATA02 0x0b0b1 + MX6UL_PAD_NAND_DATA03__RAWNAND_DATA03 0x0b0b1 + MX6UL_PAD_NAND_DATA04__RAWNAND_DATA04 0x0b0b1 + MX6UL_PAD_NAND_DATA05__RAWNAND_DATA05 0x0b0b1 + MX6UL_PAD_NAND_DATA06__RAWNAND_DATA06 0x0b0b1 + MX6UL_PAD_NAND_DATA07__RAWNAND_DATA07 0x0b0b1 + >; + }; + + pinctrl_i2c1: i2cgrp { + fsl,pins = < + MX6UL_PAD_UART4_TX_DATA__I2C1_SCL 0x4001b8b0 + MX6UL_PAD_UART4_RX_DATA__I2C1_SDA 0x4001b8b0 + >; + }; + + pinctrl_uart1: uart1grp { + fsl,pins = < + MX6UL_PAD_UART1_TX_DATA__UART1_DCE_TX 0x1b0b1 + MX6UL_PAD_UART1_RX_DATA__UART1_DCE_RX 0x1b0b1 + >; + }; + +}; diff --git a/arch/arm/boot/dts/imx6ul-phytec-peb-eval-01.dtsi b/arch/arm/boot/dts/imx6ul-phytec-peb-eval-01.dtsi new file mode 100644 index 000000000000..e2f38f39a6ad --- /dev/null +++ b/arch/arm/boot/dts/imx6ul-phytec-peb-eval-01.dtsi @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2016 PHYTEC Messtechnik + * Author: Christian Hemp + */ + +#include + +/ { + gpio_keys: gpio-keys { + compatible = "gpio-key"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_gpio_keys>; + status = "disabled"; + + power { + label = "Power Button"; + gpios = <&gpio5 0 GPIO_ACTIVE_LOW>; + linux,code = ; + wakeup-source; + }; + }; + + user_leds: leds { + compatible = "gpio-leds"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_user_leds>; + status = "disabled"; + + led_yellow { + gpios = <&gpio1 1 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "default-on"; + }; + + led_red { + gpios = <&gpio1 10 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "default-on"; + }; + }; +}; + +&iomuxc { + pinctrl_gpio_keys: gpio_keysgrp { + fsl,pins = < + MX6UL_PAD_SNVS_TAMPER0__GPIO5_IO00 0x79 + >; + }; + + pinctrl_user_leds: user_ledsgrp { + fsl,pins = < + MX6UL_PAD_JTAG_MOD__GPIO1_IO10 0x79 + MX6UL_PAD_GPIO1_IO01__GPIO1_IO01 0x79 + >; + }; +}; diff --git a/arch/arm/boot/dts/imx6ul-phytec-phyboard-segin-full.dts b/arch/arm/boot/dts/imx6ul-phytec-phyboard-segin-full.dts new file mode 100644 index 000000000000..b6a1407a9d44 --- /dev/null +++ b/arch/arm/boot/dts/imx6ul-phytec-phyboard-segin-full.dts @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2016 PHYTEC Messtechnik GmbH + * Author: Christian Hemp + */ + +/dts-v1/; +#include "imx6ul-phytec-pcl063.dtsi" +#include "imx6ul-phytec-phyboard-segin.dtsi" +#include "imx6ul-phytec-peb-eval-01.dtsi" + +/ { + model = "Phytec phyBOARD-Segin i.MX6 UltraLite Full Featured"; + compatible = "phytec,imx6ul-pbacd10", "phytec,imx6ul-pcl063", "fsl,imx6ul"; +}; + +&adc1 { + status = "okay"; +}; + +&can1 { + status = "okay"; +}; + +&tlv320 { + status = "okay"; +}; + +&ecspi3 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_ecspi3>; + cs-gpios = <&gpio1 20 GPIO_ACTIVE_HIGH>; + status = "okay"; +}; + +&fec2 { + status = "okay"; +}; + +&i2c_rtc { + status = "okay"; +}; + +®_can1_en { + status = "okay"; +}; + +®_sound_1v8 { + status = "okay"; +}; + +®_sound_3v3 { + status = "okay"; +}; + +&sai2 { + status = "okay"; +}; + +&sound { + status = "okay"; +}; + +&uart5 { + status = "okay"; +}; + +&usbotg1 { + status = "okay"; +}; + +&usbotg2 { + status = "okay"; +}; + +&usdhc1 { + status = "okay"; +}; + +&iomuxc { + pinctrl_ecspi3: ecspi3grp { + fsl,pins = < + MX6UL_PAD_UART2_RTS_B__ECSPI3_MISO 0x10b0 + MX6UL_PAD_UART2_CTS_B__ECSPI3_MOSI 0x10b0 + MX6UL_PAD_UART2_RX_DATA__ECSPI3_SCLK 0x10b0 + MX6UL_PAD_UART2_TX_DATA__GPIO1_IO20 0x10b0 + >; + }; +}; diff --git a/arch/arm/boot/dts/imx6ul-phytec-phyboard-segin.dtsi b/arch/arm/boot/dts/imx6ul-phytec-phyboard-segin.dtsi new file mode 100644 index 000000000000..7bf439a77d2c --- /dev/null +++ b/arch/arm/boot/dts/imx6ul-phytec-phyboard-segin.dtsi @@ -0,0 +1,329 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2016 PHYTEC Messtechnik GmbH + * Author: Christian Hemp + */ + +/ { + model = "Phytec phyBOARD-Segin i.MX6 UltraLite"; + compatible = "phytec,imx6ul-pbacd-10", "phytec,imx6ul-pcl063", "fsl,imx6ul"; + + aliases { + rtc0 = &i2c_rtc; + rtc1 = &snvs_rtc; + }; + + reg_sound_1v8: regulator-1v8 { + compatible = "regulator-fixed"; + regulator-name = "i2s-audio-1v8"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + status = "disabled"; + }; + + reg_sound_3v3: regulator-3v3 { + compatible = "regulator-fixed"; + regulator-name = "i2s-audio-3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + status = "disabled"; + }; + + reg_can1_en: regulator-can1 { + compatible = "regulator-fixed"; + pinctrl-names = "default"; + pinctrl-0 = <&princtrl_flexcan1_en>; + regulator-name = "Can"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&gpio5 2 GPIO_ACTIVE_HIGH>; + enable-active-high; + status = "disabled"; + }; + + reg_adc1_vref_3v3: regulator-vref-3v3 { + compatible = "regulator-fixed"; + regulator-name = "vref-3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + }; + + sound: sound { + compatible = "simple-audio-card"; + simple-audio-card,name = "phyBOARD-Segin-TLV320AIC3007"; + simple-audio-card,format = "i2s"; + simple-audio-card,bitclock-master = <&dailink_master>; + simple-audio-card,frame-master = <&dailink_master>; + simple-audio-card,widgets = + "Line", "Line In", + "Line", "Line Out", + "Speaker", "Speaker"; + simple-audio-card,routing = + "Line Out", "LLOUT", + "Line Out", "RLOUT", + "Speaker", "SPOP", + "Speaker", "SPOM", + "LINE1L", "Line In", + "LINE1R", "Line In"; + status = "disabled"; + + simple-audio-card,cpu { + sound-dai = <&sai2>; + }; + + dailink_master: simple-audio-card,codec { + sound-dai = <&tlv320>; + clocks = <&clks IMX6UL_CLK_SAI2>; + }; + }; + +}; + +&adc1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_adc1>; + vref-supply = <®_adc1_vref_3v3>; + /* + * driver can not separate a specific channel so we request 4 channels + * here - we need only the fourth channel + */ + num-channels = <4>; + status = "disabled"; +}; + +&can1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_flexcan1>; + xceiver-supply = <®_can1_en>; + status = "disabled"; +}; + +&clks { + assigned-clocks = <&clks IMX6UL_CLK_PLL4_AUDIO_DIV>; + assigned-clock-rates = <786432000>; +}; + +&fec2 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_enet2>; + phy-mode = "rmii"; + phy-handle = <ðphy1>; + status = "disabled"; +}; + +&i2c1 { + tlv320: codec@18 { + compatible = "ti,tlv320aic3007"; + #sound-dai-cells = <0>; + reg = <0x18>; + AVDD-supply = <®_sound_3v3>; + IOVDD-supply = <®_sound_3v3>; + DRVDD-supply = <®_sound_3v3>; + DVDD-supply = <®_sound_1v8>; + status = "disabled"; + }; + + stmpe: touchscreen@44 { + compatible = "st,stmpe811"; + reg = <0x44>; + interrupts = <3 IRQ_TYPE_LEVEL_LOW>; + interrupt-parent = <&gpio5>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_stmpe>; + status = "disabled"; + + touchscreen { + compatible = "st,stmpe-ts"; + st,sample-time = <4>; + st,mod-12b = <1>; + st,ref-sel = <0>; + st,adc-freq = <1>; + st,ave-ctrl = <1>; + st,touch-det-delay = <2>; + st,settling = <2>; + st,fraction-z = <7>; + st,i-drive = <1>; + touchscreen-inverted-x = <1>; + touchscreen-inverted-y = <1>; + }; + }; + + i2c_rtc: rtc@68 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_rtc_int>; + compatible = "microcrystal,rv4162"; + reg = <0x68>; + interrupt-parent = <&gpio5>; + interrupts = <1 IRQ_TYPE_LEVEL_LOW>; + status = "disabled"; + }; +}; + +&mdio { + ethphy1: ethernet-phy@2 { + reg = <2>; + micrel,led-mode = <1>; + clocks = <&clks IMX6UL_CLK_ENET2_REF>; + clock-names = "rmii-ref"; + }; +}; + +&pwm3 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_pwm3>; + status = "disabled"; +}; + +&sai2 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_sai2>; + assigned-clocks = <&clks IMX6UL_CLK_SAI2_SEL>, + <&clks IMX6UL_CLK_SAI2>; + assigned-clock-parents = <&clks IMX6UL_CLK_PLL4_AUDIO_DIV>; + assigned-clock-rates = <0>, <19200000>; + fsl,sai-mclk-direction-output; + status = "disabled"; +}; + +&uart5 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart5>; + uart-has-rtscts; + status = "disabled"; +}; + +&usbotg1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usb_otg1_id>; + dr_mode = "otg"; + status = "disabled"; +}; + +&usbotg2 { + dr_mode = "host"; + disable-over-current; + status = "disabled"; +}; + +&usdhc1 { + pinctrl-names = "default", "state_100mhz", "state_200mhz"; + pinctrl-0 = <&pinctrl_usdhc1>; + pinctrl-1 = <&pinctrl_usdhc1_100mhz>; + pinctrl-2 = <&pinctrl_usdhc1_200mhz>; + cd-gpios = <&gpio1 19 GPIO_ACTIVE_LOW>; + no-1-8-v; + keep-power-in-suspend; + wakeup-source; + status = "disabled"; +}; + +&iomuxc { + pinctrl_adc1: adc1grp { + fsl,pins = < + MX6UL_PAD_GPIO1_IO03__GPIO1_IO03 0xb0 + >; + }; + + pinctrl_enet2: enet2grp { + fsl,pins = < + MX6UL_PAD_ENET2_RX_EN__ENET2_RX_EN 0x1b0b0 + MX6UL_PAD_ENET2_RX_ER__ENET2_RX_ER 0x1b0b0 + MX6UL_PAD_ENET2_RX_DATA0__ENET2_RDATA00 0x1b0b0 + MX6UL_PAD_ENET2_RX_DATA1__ENET2_RDATA01 0x1b0b0 + MX6UL_PAD_ENET2_TX_EN__ENET2_TX_EN 0x1b0b0 + MX6UL_PAD_ENET2_TX_DATA0__ENET2_TDATA00 0x1b0b0 + MX6UL_PAD_ENET2_TX_DATA1__ENET2_TDATA01 0x1b0b0 + MX6UL_PAD_ENET2_TX_CLK__ENET2_REF_CLK2 0x4001b031 + >; + }; + + pinctrl_flexcan1: flexcan1 { + fsl,pins = < + MX6UL_PAD_UART3_CTS_B__FLEXCAN1_TX 0x0b0b0 + MX6UL_PAD_UART3_RTS_B__FLEXCAN1_RX 0x0b0b0 + >; + }; + + princtrl_flexcan1_en: flexcan1engrp { + fsl,pins = < + MX6UL_PAD_SNVS_TAMPER2__GPIO5_IO02 0x17059 + >; + }; + + pinctrl_pwm3: pwm3grp { + fsl,pins = < + MX6UL_PAD_GPIO1_IO04__PWM3_OUT 0x0b0b0 + >; + }; + + pinctrl_rtc_int: rtcintgrp { + fsl,pins = < + MX6UL_PAD_SNVS_TAMPER1__GPIO5_IO01 0x17059 + >; + }; + + pinctrl_sai2: sai2grp { + fsl,pins = < + MX6UL_PAD_JTAG_TDI__SAI2_TX_BCLK 0x17088 + MX6UL_PAD_JTAG_TDO__SAI2_TX_SYNC 0x17088 + MX6UL_PAD_JTAG_TRST_B__SAI2_TX_DATA 0x11088 + MX6UL_PAD_JTAG_TCK__SAI2_RX_DATA 0x11088 + MX6UL_PAD_JTAG_TMS__SAI2_MCLK 0x17088 + >; + }; + + pinctrl_stmpe: stmpegrp { + fsl,pins = < + MX6UL_PAD_SNVS_TAMPER3__GPIO5_IO03 0x17059 + >; + }; + + pinctrl_uart5: uart5grp { + fsl,pins = < + MX6UL_PAD_UART5_TX_DATA__UART5_DCE_TX 0x1b0b1 + MX6UL_PAD_UART5_RX_DATA__UART5_DCE_RX 0x1b0b1 + MX6UL_PAD_GPIO1_IO08__UART5_DCE_RTS 0x1b0b1 + MX6UL_PAD_GPIO1_IO09__UART5_DCE_CTS 0x1b0b1 + >; + }; + + pinctrl_usb_otg1_id: usbotg1idgrp { + fsl,pins = < + MX6UL_PAD_GPIO1_IO00__ANATOP_OTG1_ID 0x17059 + >; + }; + + pinctrl_usdhc1: usdhc1grp { + fsl,pins = < + MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x17059 + MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x10059 + MX6UL_PAD_SD1_DATA0__USDHC1_DATA0 0x17059 + MX6UL_PAD_SD1_DATA1__USDHC1_DATA1 0x17059 + MX6UL_PAD_SD1_DATA2__USDHC1_DATA2 0x17059 + MX6UL_PAD_SD1_DATA3__USDHC1_DATA3 0x17059 + MX6UL_PAD_UART1_RTS_B__GPIO1_IO19 0x17059 + >; + }; + + pinctrl_usdhc1_100mhz: usdhc1grp100mhz { + fsl,pins = < + MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x170b9 + MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x100b9 + MX6UL_PAD_SD1_DATA0__USDHC1_DATA0 0x170b9 + MX6UL_PAD_SD1_DATA1__USDHC1_DATA1 0x170b9 + MX6UL_PAD_SD1_DATA2__USDHC1_DATA2 0x170b9 + MX6UL_PAD_SD1_DATA3__USDHC1_DATA3 0x170b9 + >; + }; + + pinctrl_usdhc1_200mhz: usdhc1grp200mhz { + fsl,pins = < + MX6UL_PAD_SD1_CMD__USDHC1_CMD 0x170f9 + MX6UL_PAD_SD1_CLK__USDHC1_CLK 0x100f9 + MX6UL_PAD_SD1_DATA0__USDHC1_DATA0 0x170f9 + MX6UL_PAD_SD1_DATA1__USDHC1_DATA1 0x170f9 + MX6UL_PAD_SD1_DATA2__USDHC1_DATA2 0x170f9 + MX6UL_PAD_SD1_DATA3__USDHC1_DATA3 0x170f9 + >; + }; +}; diff --git a/arch/arm/boot/dts/imx6ull-colibri.dtsi b/arch/arm/boot/dts/imx6ull-colibri.dtsi index 6c63a7384611..9ad1da159768 100644 --- a/arch/arm/boot/dts/imx6ull-colibri.dtsi +++ b/arch/arm/boot/dts/imx6ull-colibri.dtsi @@ -94,16 +94,16 @@ pinctrl-names = "default", "gpio"; pinctrl-0 = <&pinctrl_i2c1>; pinctrl-1 = <&pinctrl_i2c1_gpio>; - sda-gpios = <&gpio1 29 GPIO_ACTIVE_LOW>; - scl-gpios = <&gpio1 28 GPIO_ACTIVE_LOW>; + sda-gpios = <&gpio1 29 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>; + scl-gpios = <&gpio1 28 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>; }; &i2c2 { pinctrl-names = "default", "gpio"; pinctrl-0 = <&pinctrl_i2c2>; pinctrl-1 = <&pinctrl_i2c2_gpio>; - sda-gpios = <&gpio1 31 GPIO_ACTIVE_LOW>; - scl-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>; + sda-gpios = <&gpio1 31 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>; + scl-gpios = <&gpio1 30 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>; status = "okay"; ad7879@2c { diff --git a/arch/arm/boot/dts/imx6ull.dtsi b/arch/arm/boot/dts/imx6ull.dtsi index f3668fe69eac..22e4a307fa59 100644 --- a/arch/arm/boot/dts/imx6ull.dtsi +++ b/arch/arm/boot/dts/imx6ull.dtsi @@ -30,6 +30,18 @@ >; }; +&ocotp { + compatible = "fsl,imx6ull-ocotp", "syscon"; +}; + +&usdhc1 { + compatible = "fsl,imx6ull-usdhc", "fsl,imx6sx-usdhc"; +}; + +&usdhc2 { + compatible = "fsl,imx6ull-usdhc", "fsl,imx6sx-usdhc"; +}; + / { soc { aips3: aips-bus@2200000 { diff --git a/arch/arm/boot/dts/imx7d.dtsi b/arch/arm/boot/dts/imx7d.dtsi index 6b298e388f4b..6eb98e7c568d 100644 --- a/arch/arm/boot/dts/imx7d.dtsi +++ b/arch/arm/boot/dts/imx7d.dtsi @@ -96,6 +96,14 @@ }; }; +&aips2 { + pcie_phy: pcie-phy@306d0000 { + compatible = "fsl,imx7d-pcie-phy"; + reg = <0x306d0000 0x10000>; + status = "disabled"; + }; +}; + &aips3 { usbotg2: usb@30b20000 { compatible = "fsl,imx7d-usb", "fsl,imx27-usb"; @@ -173,6 +181,7 @@ <&src IMX7_RESET_PCIE_CTRL_APPS_EN>, <&src IMX7_RESET_PCIE_CTRL_APPS_TURNOFF>; reset-names = "pciephy", "apps", "turnoff"; + fsl,imx7d-pcie-phy = <&pcie_phy>; status = "disabled"; }; }; diff --git a/arch/arm/boot/dts/imx7ulp.dtsi b/arch/arm/boot/dts/imx7ulp.dtsi index 931b2754b099..fca6e50f37c8 100644 --- a/arch/arm/boot/dts/imx7ulp.dtsi +++ b/arch/arm/boot/dts/imx7ulp.dtsi @@ -199,9 +199,13 @@ assigned-clock-parents = <&scg1 IMX7ULP_CLK_SOSC_BUS_CLK>; }; - smc1: smc1@40410000 { + smc1: clock-controller@40410000 { compatible = "fsl,imx7ulp-smc1"; reg = <0x40410000 0x1000>; + #clock-cells = <1>; + clocks = <&scg1 IMX7ULP_CLK_CORE_DIV>, + <&scg1 IMX7ULP_CLK_HSRUN_CORE_DIV>; + clock-names = "divcore", "hsrun_divcore"; }; pcc3: clock-controller@40b30000 { @@ -343,4 +347,17 @@ gpio-ranges = <&iomuxc1 0 96 32>; }; }; + + m4aips1: bus@41080000 { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; + reg = <0x41080000 0x80000>; + ranges; + + sim: sim@410a3000 { + compatible = "fsl,imx7ulp-sim", "syscon"; + reg = <0x410a3000 0x1000>; + }; + }; }; diff --git a/arch/arm/boot/dts/integrator.dtsi b/arch/arm/boot/dts/integrator.dtsi index 4d58638d104b..1612a869a4f7 100644 --- a/arch/arm/boot/dts/integrator.dtsi +++ b/arch/arm/boot/dts/integrator.dtsi @@ -3,9 +3,15 @@ * SoC core Device Tree for the ARM Integrator platforms */ -/include/ "skeleton.dtsi" - / { + #address-cells = <1>; + #size-cells = <1>; + + memory { + device_type = "memory"; + reg = <0x0 0x0>; + }; + core-module@10000000 { compatible = "arm,core-module-integrator", "syscon", "simple-mfd"; reg = <0x10000000 0x200>; diff --git a/arch/arm/boot/dts/integratorcp.dts b/arch/arm/boot/dts/integratorcp.dts index a185ab8759fa..01fa229e1bd0 100644 --- a/arch/arm/boot/dts/integratorcp.dts +++ b/arch/arm/boot/dts/integratorcp.dts @@ -192,6 +192,43 @@ interrupts = <27>; }; + bridge { + compatible = "ti,ths8134a", "ti,ths8134"; + #address-cells = <1>; + #size-cells = <0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + + vga_bridge_in: endpoint { + remote-endpoint = <&clcd_pads_vga_dac>; + }; + }; + + port@1 { + reg = <1>; + + vga_bridge_out: endpoint { + remote-endpoint = <&vga_con_in>; + }; + }; + }; + }; + + vga { + compatible = "vga-connector"; + + port { + vga_con_in: endpoint { + remote-endpoint = <&vga_bridge_out>; + }; + }; + }; + fpga { /* * These PrimeCells are at the same location and using @@ -254,39 +291,27 @@ interrupts = <22>; clocks = <&auxosc>, <&pclk>; clock-names = "clcdclk", "apb_pclk"; + /* 640x480 16bpp @ 25.175MHz is 36827428 bytes/s */ + max-memory-bandwidth = <40000000>; - port { - /* - * The VGA connected is implemented with a - * THS8134A triple DAC that can be run in 24bit - * or 16bit RGB mode. - */ - clcd_pads: endpoint { - remote-endpoint = <&clcd_panel>; - arm,pl11x,tft-r0g0b0-pads = <1 7 13>; - }; - }; - - panel { - compatible = "panel-dpi"; - - port { - clcd_panel: endpoint { - remote-endpoint = <&clcd_pads>; - }; - }; - - /* Standard 640x480 VGA timings */ - panel-timing { - clock-frequency = <25175000>; - hactive = <640>; - hback-porch = <48>; - hfront-porch = <16>; - hsync-len = <96>; - vactive = <480>; - vback-porch = <33>; - vfront-porch = <10>; - vsync-len = <2>; + /* + * This port is routed through a PLD (Programmable + * Logic Device) that routes the output from the CLCD + * (after transformations) to the VGA DAC and also an + * external panel connector. The PLD is essential for + * supporting RGB565/BGR565. + * + * The signals from the port thus reaches two endpoints. + * The PLD is managed through a few special bits in the + * FPGA "sysreg". + * + * This arrangement can be clearly seen in + * ARM DUI 0225D, page 3-41, figure 3-19. + */ + port@0 { + clcd_pads_vga_dac: endpoint { + remote-endpoint = <&vga_bridge_in>; + arm,pl11x,tft-r0g0b0-pads = <0 8 16>; }; }; }; diff --git a/arch/arm/boot/dts/kirkwood-dir665.dts b/arch/arm/boot/dts/kirkwood-dir665.dts index 31ceacd841de..b3ad3f607d31 100644 --- a/arch/arm/boot/dts/kirkwood-dir665.dts +++ b/arch/arm/boot/dts/kirkwood-dir665.dts @@ -190,53 +190,6 @@ gpios = <&gpio1 14 GPIO_ACTIVE_LOW>; }; }; - - dsa { - status = "disabled"; - - compatible = "marvell,dsa"; - #address-cells = <2>; - #size-cells = <0>; - - dsa,ethernet = <ð0port>; - dsa,mii-bus = <&mdio>; - - switch@0 { - #address-cells = <1>; - #size-cells = <0>; - reg = <0 0>; /* MDIO address 0, switch 0 in tree */ - - port@0 { - reg = <0>; - label = "lan4"; - }; - - port@1 { - reg = <1>; - label = "lan3"; - }; - - port@2 { - reg = <2>; - label = "lan2"; - }; - - port@3 { - reg = <3>; - label = "lan1"; - }; - - port@4 { - reg = <4>; - label = "wan"; - }; - - port@6 { - reg = <6>; - label = "cpu"; - }; - }; - }; }; &mdio { diff --git a/arch/arm/boot/dts/kirkwood-linksys-viper.dts b/arch/arm/boot/dts/kirkwood-linksys-viper.dts index a7d659b7145a..2f9660f3b457 100644 --- a/arch/arm/boot/dts/kirkwood-linksys-viper.dts +++ b/arch/arm/boot/dts/kirkwood-linksys-viper.dts @@ -66,53 +66,6 @@ gpios = <&gpio0 14 GPIO_ACTIVE_HIGH>; }; }; - - dsa { - status = "disabled"; - - compatible = "marvell,dsa"; - #address-cells = <2>; - #size-cells = <0>; - - dsa,ethernet = <ð0port>; - dsa,mii-bus = <&mdio>; - - switch@16,0 { - #address-cells = <1>; - #size-cells = <0>; - reg = <16 0>; /* MDIO address 16, switch 0 in tree */ - - port@0 { - reg = <0>; - label = "ethernet1"; - }; - - port@1 { - reg = <1>; - label = "ethernet2"; - }; - - port@2 { - reg = <2>; - label = "ethernet3"; - }; - - port@3 { - reg = <3>; - label = "ethernet4"; - }; - - port@4 { - reg = <4>; - label = "internet"; - }; - - port@5 { - reg = <5>; - label = "cpu"; - }; - }; - }; }; &pinctrl { diff --git a/arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts b/arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts index 86d532916d56..2e1a75348908 100644 --- a/arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts +++ b/arch/arm/boot/dts/kirkwood-mv88f6281gtw-ge.dts @@ -107,53 +107,6 @@ gpios = <&gpio1 14 GPIO_ACTIVE_LOW>; }; }; - - dsa { - status = "disabled"; - - compatible = "marvell,dsa"; - #address-cells = <1>; - #size-cells = <0>; - - dsa,ethernet = <ð0port>; - dsa,mii-bus = <&mdio>; - - switch@0 { - #address-cells = <1>; - #size-cells = <0>; - reg = <0 0>; /* MDIO address 0, switch 0 in tree */ - - port@0 { - reg = <0>; - label = "lan1"; - }; - - port@1 { - reg = <1>; - label = "lan2"; - }; - - port@2 { - reg = <2>; - label = "lan3"; - }; - - port@3 { - reg = <3>; - label = "lan4"; - }; - - port@4 { - reg = <4>; - label = "wan"; - }; - - port@5 { - reg = <5>; - label = "cpu"; - }; - }; - }; }; &mdio { diff --git a/arch/arm/boot/dts/kirkwood-rd88f6281-z0.dts b/arch/arm/boot/dts/kirkwood-rd88f6281-z0.dts index a9fee2c2bcaf..9d88301daf0e 100644 --- a/arch/arm/boot/dts/kirkwood-rd88f6281-z0.dts +++ b/arch/arm/boot/dts/kirkwood-rd88f6281-z0.dts @@ -16,15 +16,6 @@ model = "Marvell RD88f6281 Reference design, with Z0 SoC"; compatible = "marvell,rd88f6281-z0", "marvell,rd88f6281","marvell,kirkwood-88f6281", "marvell,kirkwood"; - dsa { - switch@0 { - reg = <0 0>; /* MDIO address 0, switch 0 in tree */ - port@4 { - reg = <4>; - label = "wan"; - }; - }; - }; }; ð1 { diff --git a/arch/arm/boot/dts/kirkwood-rd88f6281.dtsi b/arch/arm/boot/dts/kirkwood-rd88f6281.dtsi index 0f22f0e6f56b..f1f8eee132e8 100644 --- a/arch/arm/boot/dts/kirkwood-rd88f6281.dtsi +++ b/arch/arm/boot/dts/kirkwood-rd88f6281.dtsi @@ -48,47 +48,6 @@ cd-gpios = <&gpio0 28 GPIO_ACTIVE_HIGH>; /* No WP GPIO */ }; - }; - - dsa { - status = "disabled"; - - compatible = "marvell,dsa"; - #address-cells = <2>; - #size-cells = <0>; - - dsa,ethernet = <ð0port>; - dsa,mii-bus = <&mdio>; - - switch@0 { - #address-cells = <1>; - #size-cells = <0>; - - port@0 { - reg = <0>; - label = "lan1"; - }; - - port@1 { - reg = <1>; - label = "lan2"; - }; - - port@2 { - reg = <2>; - label = "lan3"; - }; - - port@3 { - reg = <3>; - label = "lan4"; - }; - - port@5 { - reg = <5>; - label = "cpu"; - }; - }; }; }; diff --git a/arch/arm/boot/dts/kirkwood.dtsi b/arch/arm/boot/dts/kirkwood.dtsi index 81c7eda2c442..2161e23bd98e 100644 --- a/arch/arm/boot/dts/kirkwood.dtsi +++ b/arch/arm/boot/dts/kirkwood.dtsi @@ -1,11 +1,12 @@ // SPDX-License-Identifier: GPL-2.0 -/include/ "skeleton.dtsi" #include #include #define MBUS_ID(target,attributes) (((target) << 24) | ((attributes) << 16)) / { + #address-cells = <1>; + #size-cells = <1>; compatible = "marvell,kirkwood"; interrupt-parent = <&intc>; diff --git a/arch/arm/boot/dts/lpc3250-ea3250.dts b/arch/arm/boot/dts/lpc3250-ea3250.dts index 58ea0a4e7afa..f46a11827ef6 100644 --- a/arch/arm/boot/dts/lpc3250-ea3250.dts +++ b/arch/arm/boot/dts/lpc3250-ea3250.dts @@ -17,64 +17,70 @@ / { model = "Embedded Artists LPC3250 board based on NXP LPC3250"; compatible = "ea,ea3250", "nxp,lpc3250"; - #address-cells = <1>; - #size-cells = <1>; - memory { + memory@80000000 { device_type = "memory"; reg = <0x80000000 0x4000000>; }; - gpio_keys { + gpio-keys { compatible = "gpio-keys"; - #address-cells = <1>; - #size-cells = <0>; autorepeat; - button@21 { + + button { label = "Interrupt Key"; linux,code = <103>; gpios = <&gpio 4 1 0>; /* GPI_P3 1 */ }; + key1 { label = "KEY1"; linux,code = <1>; gpios = <&pca9532 0 0>; }; + key2 { label = "KEY2"; linux,code = <2>; gpios = <&pca9532 1 0>; }; + key3 { label = "KEY3"; linux,code = <3>; gpios = <&pca9532 2 0>; }; + key4 { label = "KEY4"; linux,code = <4>; gpios = <&pca9532 3 0>; }; + joy0 { label = "Joystick Key 0"; linux,code = <10>; gpios = <&gpio 2 0 0>; /* P2.0 */ }; + joy1 { label = "Joystick Key 1"; linux,code = <11>; gpios = <&gpio 2 1 0>; /* P2.1 */ }; + joy2 { label = "Joystick Key 2"; linux,code = <12>; gpios = <&gpio 2 2 0>; /* P2.2 */ }; + joy3 { label = "Joystick Key 3"; linux,code = <13>; gpios = <&gpio 2 3 0>; /* P2.3 */ }; + joy4 { label = "Joystick Key 4"; linux,code = <14>; diff --git a/arch/arm/boot/dts/lpc3250-phy3250.dts b/arch/arm/boot/dts/lpc3250-phy3250.dts index 1e1c2f517a82..ebd19258e22b 100644 --- a/arch/arm/boot/dts/lpc3250-phy3250.dts +++ b/arch/arm/boot/dts/lpc3250-phy3250.dts @@ -1,6 +1,7 @@ /* * PHYTEC phyCORE-LPC3250 board * + * Copyright (C) 2015-2019 Vladimir Zapolskiy * Copyright 2012 Roland Stigge * * The code contained herein is licensed under the GNU General Public @@ -17,45 +18,12 @@ / { model = "PHYTEC phyCORE-LPC3250 board based on NXP LPC3250"; compatible = "phytec,phy3250", "nxp,lpc3250"; - #address-cells = <1>; - #size-cells = <1>; - memory { + memory@80000000 { device_type = "memory"; reg = <0x80000000 0x4000000>; }; - regulators { - backlight_reg: regulator@0 { - compatible = "regulator-fixed"; - regulator-name = "backlight_reg"; - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <1800000>; - gpio = <&gpio 5 4 0>; - enable-active-high; - regulator-boot-on; - }; - - lcd_reg: regulator@1 { - compatible = "regulator-fixed"; - regulator-name = "lcd_reg"; - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <1800000>; - gpio = <&gpio 5 0 0>; - enable-active-high; - regulator-boot-on; - }; - - sd_reg: regulator@2 { - compatible = "regulator-fixed"; - regulator-name = "sd_reg"; - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <1800000>; - gpio = <&gpio 5 5 0>; - enable-active-high; - }; - }; - leds { compatible = "gpio-leds"; @@ -69,10 +37,59 @@ linux,default-trigger = "heartbeat"; }; }; + + panel: panel { + compatible = "sharp,lq035q7db03"; + power-supply = <®_lcd>; + + port { + panel_input: endpoint { + remote-endpoint = <&cldc_output>; + }; + }; + }; + + reg_backlight: regulator-backlight { + compatible = "regulator-fixed"; + regulator-name = "backlight"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + gpio = <&gpio 5 4 0>; + enable-active-high; + regulator-boot-on; + }; + + reg_lcd: regulator-lcd { + compatible = "regulator-fixed"; + regulator-name = "lcd"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + gpio = <&gpio 5 0 0>; + enable-active-high; + regulator-boot-on; + }; + + reg_sd: regulator-sd { + compatible = "regulator-fixed"; + regulator-name = "sd"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + gpio = <&gpio 5 5 0>; + enable-active-high; + regulator-boot-on; + }; }; &clcd { + max-memory-bandwidth = <18710000>; status = "okay"; + + port { + cldc_output: endpoint { + remote-endpoint = <&panel_input>; + arm,pl11x,tft-r0g0b0-pads = <0 8 16>; + }; + }; }; &i2c1 { @@ -130,7 +147,7 @@ cd-gpios = <&gpio 3 1 0>; cd-inverted; bus-width = <4>; - vmmc-supply = <&sd_reg>; + vmmc-supply = <®_sd>; status = "okay"; }; diff --git a/arch/arm/boot/dts/lpc32xx.dtsi b/arch/arm/boot/dts/lpc32xx.dtsi index b7303a4e4236..20b38f4ade37 100644 --- a/arch/arm/boot/dts/lpc32xx.dtsi +++ b/arch/arm/boot/dts/lpc32xx.dtsi @@ -11,12 +11,12 @@ * http://www.gnu.org/copyleft/gpl.html */ -#include "skeleton.dtsi" - #include #include / { + #address-cells = <1>; + #size-cells = <1>; compatible = "nxp,lpc3220"; interrupt-parent = <&mic>; @@ -139,11 +139,11 @@ }; clcd: clcd@31040000 { - compatible = "arm,pl110", "arm,primecell"; + compatible = "arm,pl111", "arm,primecell"; reg = <0x31040000 0x1000>; interrupts = <14 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&clk LPC32XX_CLK_LCD>; - clock-names = "apb_pclk"; + clocks = <&clk LPC32XX_CLK_LCD>, <&clk LPC32XX_CLK_LCD>; + clock-names = "clcdclk", "apb_pclk"; status = "disabled"; }; @@ -230,7 +230,7 @@ status = "disabled"; }; - i2s1: i2s@2009C000 { + i2s1: i2s@2009c000 { compatible = "nxp,lpc3220-i2s"; reg = <0x2009C000 0x1000>; }; @@ -273,7 +273,7 @@ status = "disabled"; }; - i2c1: i2c@400A0000 { + i2c1: i2c@400a0000 { compatible = "nxp,pnx-i2c"; reg = <0x400A0000 0x100>; interrupt-parent = <&sic1>; @@ -284,7 +284,7 @@ clocks = <&clk LPC32XX_CLK_I2C1>; }; - i2c2: i2c@400A8000 { + i2c2: i2c@400a8000 { compatible = "nxp,pnx-i2c"; reg = <0x400A8000 0x100>; interrupt-parent = <&sic1>; @@ -295,7 +295,7 @@ clocks = <&clk LPC32XX_CLK_I2C2>; }; - mpwm: mpwm@400E8000 { + mpwm: mpwm@400e8000 { compatible = "nxp,lpc3220-motor-pwm"; reg = <0x400E8000 0x78>; status = "disabled"; @@ -394,7 +394,7 @@ #gpio-cells = <3>; /* bank, pin, flags */ }; - timer4: timer@4002C000 { + timer4: timer@4002c000 { compatible = "nxp,lpc3220-timer"; reg = <0x4002C000 0x1000>; interrupts = <3 IRQ_TYPE_LEVEL_LOW>; @@ -412,7 +412,7 @@ status = "disabled"; }; - watchdog: watchdog@4003C000 { + watchdog: watchdog@4003c000 { compatible = "nxp,pnx4008-wdt"; reg = <0x4003C000 0x1000>; clocks = <&clk LPC32XX_CLK_WDOG>; @@ -451,7 +451,7 @@ status = "disabled"; }; - timer1: timer@4004C000 { + timer1: timer@4004c000 { compatible = "nxp,lpc3220-timer"; reg = <0x4004C000 0x1000>; interrupts = <17 IRQ_TYPE_LEVEL_LOW>; @@ -462,7 +462,9 @@ key: key@40050000 { compatible = "nxp,lpc3220-key"; reg = <0x40050000 0x1000>; - interrupts = <54 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk LPC32XX_CLK_KEY>; + interrupt-parent = <&sic1>; + interrupts = <22 IRQ_TYPE_LEVEL_HIGH>; status = "disabled"; }; @@ -475,7 +477,7 @@ status = "disabled"; }; - pwm1: pwm@4005C000 { + pwm1: pwm@4005c000 { compatible = "nxp,lpc3220-pwm"; reg = <0x4005C000 0x4>; clocks = <&clk LPC32XX_CLK_PWM1>; @@ -484,7 +486,7 @@ status = "disabled"; }; - pwm2: pwm@4005C004 { + pwm2: pwm@4005c004 { compatible = "nxp,lpc3220-pwm"; reg = <0x4005C004 0x4>; clocks = <&clk LPC32XX_CLK_PWM2>; diff --git a/arch/arm/boot/dts/lpc4350-hitex-eval.dts b/arch/arm/boot/dts/lpc4350-hitex-eval.dts index 8b973f537d3a..93d0c2e99e7c 100644 --- a/arch/arm/boot/dts/lpc4350-hitex-eval.dts +++ b/arch/arm/boot/dts/lpc4350-hitex-eval.dts @@ -40,8 +40,6 @@ pca_buttons { compatible = "gpio-keys-polled"; - #address-cells = <1>; - #size-cells = <0>; poll-interval = <100>; autorepeat; diff --git a/arch/arm/boot/dts/lpc4357-ea4357-devkit.dts b/arch/arm/boot/dts/lpc4357-ea4357-devkit.dts index 02b23fa29d75..224f80a4a31d 100644 --- a/arch/arm/boot/dts/lpc4357-ea4357-devkit.dts +++ b/arch/arm/boot/dts/lpc4357-ea4357-devkit.dts @@ -57,8 +57,6 @@ compatible = "gpio-keys-polled"; pinctrl-names = "default"; pinctrl-0 = <&gpio_joystick_pins>; - #address-cells = <1>; - #size-cells = <0>; poll-interval = <100>; autorepeat; diff --git a/arch/arm/boot/dts/lpc4357-myd-lpc4357.dts b/arch/arm/boot/dts/lpc4357-myd-lpc4357.dts new file mode 100644 index 000000000000..1f84654df50c --- /dev/null +++ b/arch/arm/boot/dts/lpc4357-myd-lpc4357.dts @@ -0,0 +1,619 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* + * MYIR Tech MYD-LPC4357 Development Board with 800x480 7" TFT panel + * + * Copyright (C) 2016-2018 Vladimir Zapolskiy + */ + +/dts-v1/; + +#include "lpc18xx.dtsi" +#include "lpc4357.dtsi" + +#include + +/ { + model = "MYIR Tech LPC4357 Development Board"; + compatible = "myir,myd-lpc4357", "nxp,lpc4357"; + + chosen { + stdout-path = "serial3:115200n8"; + }; + + memory@28000000 { + device_type = "memory"; + reg = <0x28000000 0x2000000>; + }; + + leds { + compatible = "gpio-leds"; + pinctrl-names = "default"; + pinctrl-0 = <&led_pins>; + + led1 { + gpios = <&gpio LPC_GPIO(6,15) GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led2 { + gpios = <&gpio LPC_GPIO(6,16) GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led3 { + gpios = <&gpio LPC_GPIO(6,17) GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led4 { + gpios = <&gpio LPC_GPIO(6,10) GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led5 { + gpios = <&gpio LPC_GPIO(7,14) GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + + led6 { + gpios = <&gpio LPC_GPIO(6,14) GPIO_ACTIVE_LOW>; + default-state = "off"; + }; + }; + + panel: panel { + compatible = "innolux,at070tn92"; + + port { + panel_input: endpoint { + remote-endpoint = <&lcdc_output>; + }; + }; + }; + + vcc: vcc_fixed { + compatible = "regulator-fixed"; + regulator-name = "vcc-supply"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + }; + + vmmc: vmmc_fixed { + compatible = "regulator-fixed"; + regulator-name = "vmmc-supply"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + }; +}; + +&pinctrl { + can0_pins: can0-pins { + can_rd_cfg { + pins = "p3_1"; + function = "can0"; + input-enable; + }; + + can_td_cfg { + pins = "p3_2"; + function = "can0"; + }; + }; + + can1_pins: can1-pins { + can_rd_cfg { + pins = "pe_1"; + function = "can1"; + input-enable; + }; + + can_td_cfg { + pins = "pe_0"; + function = "can1"; + }; + }; + + emc_pins: emc-pins { + emc_addr0_22_cfg { + pins = "p2_9", "p2_10", "p2_11", "p2_12", + "p2_13", "p1_0", "p1_1", "p1_2", + "p2_8", "p2_7", "p2_6", "p2_2", + "p2_1", "p2_0", "p6_8", "p6_7", + "pd_16", "pd_15", "pe_0", "pe_1", + "pe_2", "pe_3", "pe_4"; + function = "emc"; + slew-rate = <1>; + bias-disable; + }; + + emc_data0_15_cfg { + pins = "p1_7", "p1_8", "p1_9", "p1_10", + "p1_11", "p1_12", "p1_13", "p1_14", + "p5_4", "p5_5", "p5_6", "p5_7", + "p5_0", "p5_1", "p5_2", "p5_3"; + function = "emc"; + input-enable; + input-schmitt-disable; + slew-rate = <1>; + bias-disable; + }; + + emc_we_oe_cfg { + pins = "p1_6", "p1_3"; + function = "emc"; + slew-rate = <1>; + bias-disable; + }; + + emc_cs0_cfg { + pins = "p1_5"; + function = "emc"; + slew-rate = <1>; + bias-disable; + }; + + emc_sdram_dqm0_1_cfg { + pins = "p6_12", "p6_10"; + function = "emc"; + slew-rate = <1>; + bias-disable; + }; + + emc_sdram_ras_cas_cfg { + pins = "p6_5", "p6_4"; + function = "emc"; + slew-rate = <1>; + bias-disable; + }; + + emc_sdram_dycs0_cfg { + pins = "p6_9"; + function = "emc"; + slew-rate = <1>; + bias-disable; + }; + + emc_sdram_cke_cfg { + pins = "p6_11"; + function = "emc"; + slew-rate = <1>; + bias-disable; + }; + + emc_sdram_clock_cfg { + pins = "clk0"; + function = "emc"; + input-enable; + input-schmitt-disable; + slew-rate = <1>; + bias-disable; + }; + }; + + enet_rmii_pins: enet-rmii-pins { + enet_rmii_rxd_cfg { + pins = "p1_15", "p0_0"; + function = "enet"; + input-enable; + input-schmitt-disable; + slew-rate = <1>; + bias-disable; + }; + + enet_rmii_txd_cfg { + pins = "p1_18", "p1_20"; + function = "enet"; + slew-rate = <1>; + bias-disable; + }; + + enet_rmii_rx_dv_cfg { + pins = "p1_16"; + function = "enet"; + input-enable; + input-schmitt-disable; + bias-disable; + }; + + enet_mdio_cfg { + pins = "p1_17"; + function = "enet"; + input-enable; + input-schmitt-disable; + bias-disable; + }; + + enet_mdc_cfg { + pins = "pc_1"; + function = "enet"; + slew-rate = <1>; + bias-disable; + }; + + enet_rmii_tx_en_cfg { + pins = "p0_1"; + function = "enet"; + bias-disable; + }; + + enet_ref_clk_cfg { + pins = "p1_19"; + function = "enet"; + slew-rate = <1>; + input-enable; + input-schmitt-disable; + bias-disable; + }; + }; + + i2c0_pins: i2c0-pins { + i2c0_pins_cfg { + pins = "i2c0_scl", "i2c0_sda"; + function = "i2c0"; + input-enable; + }; + }; + + i2c1_pins: i2c1-pins { + i2c1_pins_cfg { + pins = "pe_15", "pe_13"; + function = "i2c1"; + input-enable; + }; + }; + + lcd_pins: lcd-pins { + lcd_vd0_23_cfg { + pins = "p4_1", "p4_4", "p4_3", "p4_2", + "p8_7", "p8_6", "p8_5", "p8_4", + "p7_5", "p4_8", "p4_10", "p4_9", + "p8_3", "pb_6", "pb_5", "pb_4", + "p7_4", "p7_3", "p7_2", "p7_1", + "pb_3", "pb_2", "pb_1", "pb_0"; + function = "lcd"; + }; + + lcd_vsync_en_dclk_lp_pwr_cfg { + pins = "p4_5", "p4_6", "p4_7", "p7_6", "p7_7"; + function = "lcd"; + }; + }; + + led_pins: led-pins { + led_1_6_cfg { + pins = "pd_1", "pd_2", "pd_3", "pc_11", "pe_14", "pd_0"; + function = "gpio"; + bias-pull-down; + }; + }; + + sdmmc_pins: sdmmc-pins { + sdmmc_clk_cfg { + pins = "pc_0"; + function = "sdmmc"; + slew-rate = <1>; + bias-pull-down; + }; + + sdmmc_cmd_dat0_3_cfg { + pins = "pc_4", "pc_5", "pc_6", "pc_7", "pc_10"; + function = "sdmmc"; + input-enable; + input-schmitt-disable; + slew-rate = <1>; + bias-disable; + }; + + sdmmc_cd_cfg { + pins = "pc_8"; + function = "sdmmc"; + input-enable; + bias-pull-down; + }; + }; + + spifi_pins: spifi-pins { + spifi_sck_cfg { + pins = "p3_3"; + function = "spifi"; + input-enable; + input-schmitt-disable; + slew-rate = <1>; + bias-disable; + }; + + spifi_mosi_miso_sio2_sio3_cfg { + pins = "p3_7", "p3_6", "p3_5", "p3_4"; + function = "spifi"; + input-enable; + input-schmitt-disable; + slew-rate = <1>; + bias-disable; + }; + + spifi_cs_cfg { + pins = "p3_8"; + function = "spifi"; + bias-disable; + }; + }; + + ssp1_pins: ssp1-pins { + ssp1_sck_cfg { + pins = "pf_4"; + function = "ssp1"; + slew-rate = <1>; + bias-pull-down; + }; + + ssp1_miso_cfg { + pins = "pf_6"; + function = "ssp1"; + input-enable; + input-schmitt-disable; + slew-rate = <1>; + bias-pull-down; + }; + + ssp1_mosi_cfg { + pins = "pf_7"; + function = "ssp1"; + slew-rate = <1>; + bias-pull-down; + }; + + ssp1_ssel_cfg { + pins = "pf_5"; + function = "gpio"; + bias-disable; + }; + }; + + uart0_pins: uart0-pins { + uart0_rxd_cfg { + pins = "pf_11"; + function = "uart0"; + input-enable; + input-schmitt-disable; + bias-disable; + }; + + uart0_clk_dir_txd_cfg { + pins = "pf_8", "pf_9", "pf_10"; + function = "uart0"; + bias-pull-down; + }; + }; + + uart1_pins: uart1-pins { + uart1_rxd_cfg { + pins = "pc_14"; + function = "uart1"; + bias-disable; + input-enable; + input-schmitt-disable; + }; + + uart1_dtr_txd_cfg { + pins = "pc_12", "pc_13"; + function = "uart1"; + bias-pull-down; + }; + }; + + uart2_pins: uart2-pins { + uart2_rxd_cfg { + pins = "pa_2"; + function = "uart2"; + bias-disable; + input-enable; + input-schmitt-disable; + }; + + uart2_txd_cfg { + pins = "pa_1"; + function = "uart2"; + bias-pull-down; + }; + }; + + uart3_pins: uart3-pins { + uart3_rx_cfg { + pins = "p2_4"; + function = "uart3"; + bias-disable; + input-enable; + input-schmitt-disable; + }; + + uart3_tx_cfg { + pins = "p2_3"; + function = "uart3"; + bias-pull-down; + }; + }; + + usb0_pins: usb0-pins { + usb0_pwr_enable_cfg { + pins = "p6_3"; + function = "usb0"; + }; + + usb0_pwr_fault_cfg { + pins = "p8_0"; + function = "usb0"; + bias-disable; + input-enable; + }; + }; +}; + +&adc1 { + status = "okay"; + vref-supply = <&vcc>; +}; + +&can0 { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&can0_pins>; +}; + +/* Pin conflict with EMC, muxed by JP5 and JP6 */ +&can1 { + status = "disabled"; + pinctrl-names = "default"; + pinctrl-0 = <&can1_pins>; +}; + +&emc { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&emc_pins>; + + cs0 { + #address-cells = <2>; + #size-cells = <1>; + ranges; + + mpmc,cs = <0>; + mpmc,memory-width = <16>; + mpmc,byte-lane-low; + mpmc,write-enable-delay = <0>; + mpmc,output-enable-delay = <0>; + mpmc,read-access-delay = <70>; + mpmc,page-mode-read-delay = <70>; + + /* SST/Microchip SST39VF1601 */ + flash@0,0 { + compatible = "cfi-flash"; + reg = <0 0 0x400000>; + bank-width = <2>; + }; + }; +}; + +&enet_tx_clk { + clock-frequency = <50000000>; +}; + +&i2c0 { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&i2c0_pins>; + clock-frequency = <400000>; +}; + +&i2c1 { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&i2c1_pins>; + clock-frequency = <400000>; + + sensor@49 { + compatible = "lm75"; + reg = <0x49>; + }; + + eeprom@50 { + compatible = "atmel,24c512"; + reg = <0x50>; + }; +}; + +&lcdc { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&lcd_pins>; + + max-memory-bandwidth = <92240000>; + + port { + lcdc_output: endpoint { + remote-endpoint = <&panel_input>; + arm,pl11x,tft-r0g0b0-pads = <0 8 16>; + }; + }; +}; + +&mac { + status = "okay"; + phy-mode = "rmii"; + pinctrl-names = "default"; + pinctrl-0 = <&enet_rmii_pins>; + phy-handle = <&phy1>; + + mdio0 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,dwmac-mdio"; + + phy1: ethernet-phy@1 { + reg = <1>; + }; + }; +}; + +&mmcsd { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&sdmmc_pins>; + bus-width = <4>; + vmmc-supply = <&vmmc>; +}; + +/* Pin conflict with SSP0, the latter is routed to J17 pin header */ +&spifi { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&spifi_pins>; + + /* Atmel AT25DF321A */ + flash { + compatible = "jedec,spi-nor"; + spi-max-frequency = <51000000>; + spi-cpol; + spi-cpha; + }; +}; + +&ssp1 { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&ssp1_pins>; + num-cs = <1>; + cs-gpios = <&gpio LPC_GPIO(7,19) GPIO_ACTIVE_LOW>; +}; + +/* Routed to J17 pin header */ +&uart0 { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&uart0_pins>; +}; + +/* RS485 */ +&uart1 { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&uart1_pins>; +}; + +/* Routed to J17 pin header */ +&uart2 { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&uart2_pins>; +}; + +&uart3 { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&uart3_pins>; +}; + +&usb0 { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&usb0_pins>; +}; diff --git a/arch/arm/boot/dts/ls1021a-moxa-uc-8410a.dts b/arch/arm/boot/dts/ls1021a-moxa-uc-8410a.dts index 6a83f30029ea..ba1ddd93b8f8 100644 --- a/arch/arm/boot/dts/ls1021a-moxa-uc-8410a.dts +++ b/arch/arm/boot/dts/ls1021a-moxa-uc-8410a.dts @@ -18,6 +18,7 @@ / { model = "Moxa UC-8410A"; + compatible = "fsl,ls1021a-moxa-uc-8410a", "fsl,ls1021a"; aliases { enet0_rgmii_phy = &rgmii_phy0; diff --git a/arch/arm/boot/dts/ls1021a-qds.dts b/arch/arm/boot/dts/ls1021a-qds.dts index 923a25760516..ca60730dda40 100644 --- a/arch/arm/boot/dts/ls1021a-qds.dts +++ b/arch/arm/boot/dts/ls1021a-qds.dts @@ -51,6 +51,7 @@ / { model = "LS1021A QDS Board"; + compatible = "fsl,ls1021a-qds", "fsl,ls1021a"; aliases { enet0_rgmii_phy = &rgmii_phy1; diff --git a/arch/arm/boot/dts/ls1021a-twr.dts b/arch/arm/boot/dts/ls1021a-twr.dts index 8b48c3c7cd21..97e1fb7ea932 100644 --- a/arch/arm/boot/dts/ls1021a-twr.dts +++ b/arch/arm/boot/dts/ls1021a-twr.dts @@ -51,6 +51,7 @@ / { model = "LS1021A TWR Board"; + compatible = "fsl,ls1021a-twr", "fsl,ls1021a"; aliases { enet2_rgmii_phy = &rgmii_phy1; diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi index ed0941292172..b4f2723ecd86 100644 --- a/arch/arm/boot/dts/ls1021a.dtsi +++ b/arch/arm/boot/dts/ls1021a.dtsi @@ -45,11 +45,12 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -#include "skeleton64.dtsi" #include #include / { + #address-cells = <2>; + #size-cells = <2>; compatible = "fsl,ls1021a"; interrupt-parent = <&gic>; @@ -88,6 +89,11 @@ }; }; + memory { + device_type = "memory"; + reg = <0x0 0x0 0x0 0x0>; + }; + sysclk: sysclk { compatible = "fixed-clock"; #clock-cells = <0>; @@ -125,6 +131,13 @@ interrupt-parent = <&gic>; ranges; + ddr: memory-controller@1080000 { + compatible = "fsl,qoriq-memory-controller"; + reg = <0x0 0x1080000 0x0 0x1000>; + interrupts = ; + big-endian; + }; + gic: interrupt-controller@1400000 { compatible = "arm,gic-400", "arm,cortex-a7-gic"; #interrupt-cells = <3>; @@ -706,6 +719,7 @@ fsl,tmr-fiper1 = <999999995>; fsl,tmr-fiper2 = <99990>; fsl,max-adj = <499999999>; + fsl,extts-fifo; }; enet0: ethernet@2d10000 { @@ -811,6 +825,7 @@ dr_mode = "host"; snps,quirk-frame-length-adjustment = <0x20>; snps,dis_rxdet_inp3_quirk; + snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>; }; pcie@3400000 { @@ -824,6 +839,7 @@ #size-cells = <2>; device_type = "pci"; num-lanes = <4>; + num-viewport = <6>; bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */ 0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ @@ -848,6 +864,7 @@ #size-cells = <2>; device_type = "pci"; num-lanes = <4>; + num-viewport = <6>; bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */ 0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ diff --git a/arch/arm/boot/dts/meson.dtsi b/arch/arm/boot/dts/meson.dtsi index e4645f612712..6f54a8897574 100644 --- a/arch/arm/boot/dts/meson.dtsi +++ b/arch/arm/boot/dts/meson.dtsi @@ -47,9 +47,10 @@ #include #include -/include/ "skeleton.dtsi" / { + #address-cells = <1>; + #size-cells = <1>; interrupt-parent = <&gic>; L2: l2-cache-controller@c4200000 { @@ -72,6 +73,13 @@ #size-cells = <1>; ranges = <0x0 0xc1100000 0x200000>; + hhi: system-controller@4000 { + compatible = "amlogic,meson-hhi-sysctrl", + "simple-mfd", + "syscon"; + reg = <0x4000 0x400>; + }; + assist: assist@7c00 { compatible = "amlogic,meson-mx-assist", "syscon"; reg = <0x7c00 0x200>; @@ -274,7 +282,7 @@ compatible = "amlogic,meson6-dwmac", "snps,dwmac"; reg = <0xc9410000 0x10000 0xc1108108 0x4>; - interrupts = ; + interrupts = ; interrupt-names = "macirq"; status = "disabled"; }; diff --git a/arch/arm/boot/dts/meson6-atv1200.dts b/arch/arm/boot/dts/meson6-atv1200.dts index fc48cff71ddf..997e69c5963e 100644 --- a/arch/arm/boot/dts/meson6-atv1200.dts +++ b/arch/arm/boot/dts/meson6-atv1200.dts @@ -61,6 +61,7 @@ }; memory { + device_type = "memory"; reg = <0x40000000 0x80000000>; }; }; diff --git a/arch/arm/boot/dts/meson6.dtsi b/arch/arm/boot/dts/meson6.dtsi index ca978ab952cd..65585255910a 100644 --- a/arch/arm/boot/dts/meson6.dtsi +++ b/arch/arm/boot/dts/meson6.dtsi @@ -70,6 +70,14 @@ }; }; + apb2: bus@d0000000 { + compatible = "simple-bus"; + reg = <0xd0000000 0x40000>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0xd0000000 0x40000>; + }; + xtal: xtal-clk { compatible = "fixed-clock"; clock-frequency = <24000000>; diff --git a/arch/arm/boot/dts/meson8-minix-neo-x8.dts b/arch/arm/boot/dts/meson8-minix-neo-x8.dts index 55fb090a40ef..8686abd5de7f 100644 --- a/arch/arm/boot/dts/meson8-minix-neo-x8.dts +++ b/arch/arm/boot/dts/meson8-minix-neo-x8.dts @@ -57,6 +57,7 @@ }; memory { + device_type = "memory"; reg = <0x40000000 0x80000000>; }; diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi index e5cd325d7ea8..a9781243453e 100644 --- a/arch/arm/boot/dts/meson8.dtsi +++ b/arch/arm/boot/dts/meson8.dtsi @@ -166,6 +166,32 @@ }; }; + gpu_opp_table: gpu-opp-table { + compatible = "operating-points-v2"; + + opp-182150000 { + opp-hz = /bits/ 64 <182150000>; + opp-microvolt = <1150000>; + }; + opp-318750000 { + opp-hz = /bits/ 64 <318750000>; + opp-microvolt = <1150000>; + }; + opp-425000000 { + opp-hz = /bits/ 64 <425000000>; + opp-microvolt = <1150000>; + }; + opp-510000000 { + opp-hz = /bits/ 64 <510000000>; + opp-microvolt = <1150000>; + }; + opp-637500000 { + opp-hz = /bits/ 64 <637500000>; + opp-microvolt = <1150000>; + turbo-mode; + }; + }; + pmu { compatible = "arm,cortex-a9-pmu"; interrupts = , @@ -201,6 +227,46 @@ no-map; }; }; + + apb: bus@d0000000 { + compatible = "simple-bus"; + reg = <0xd0000000 0x200000>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0xd0000000 0x200000>; + + mali: gpu@c0000 { + compatible = "amlogic,meson8-mali", "arm,mali-450"; + reg = <0xc0000 0x40000>; + interrupts = , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + interrupt-names = "gp", "gpmmu", "pp", "pmu", + "pp0", "ppmmu0", "pp1", "ppmmu1", + "pp2", "ppmmu2", "pp4", "ppmmu4", + "pp5", "ppmmu5", "pp6", "ppmmu6"; + resets = <&reset RESET_MALI>; + clocks = <&clkc CLKID_CLK81>, <&clkc CLKID_MALI>; + clock-names = "bus", "core"; + operating-points-v2 = <&gpu_opp_table>; + switch-delay = <0xffff>; + }; + }; }; /* end of / */ &aobus { @@ -261,13 +327,6 @@ }; &cbus { - clkc: clock-controller@4000 { - #clock-cells = <1>; - #reset-cells = <1>; - compatible = "amlogic,meson8-clkc"; - reg = <0x8000 0x4>, <0x4000 0x400>; - }; - reset: reset-controller@4404 { compatible = "amlogic,meson8b-reset"; reg = <0x4404 0x9c>; @@ -390,6 +449,11 @@ compatible = "amlogic,meson8-efuse"; clocks = <&clkc CLKID_EFUSE>; clock-names = "core"; + + temperature_calib: calib@1f4 { + /* only the upper two bytes are relevant */ + reg = <0x1f4 0x4>; + }; }; ðmac { @@ -402,6 +466,14 @@ status = "okay"; }; +&hhi { + clkc: clock-controller { + compatible = "amlogic,meson8-clkc"; + #clock-cells = <1>; + #reset-cells = <1>; + }; +}; + &hwrng { compatible = "amlogic,meson8-rng", "amlogic,meson-rng"; clocks = <&clkc CLKID_RNG0>; @@ -469,6 +541,9 @@ clocks = <&clkc CLKID_XTAL>, <&clkc CLKID_SAR_ADC>; clock-names = "clkin", "core"; + amlogic,hhi-sysctrl = <&hhi>; + nvmem-cells = <&temperature_calib>; + nvmem-cell-names = "temperature_calib"; }; &sdio { diff --git a/arch/arm/boot/dts/meson8b-ec100.dts b/arch/arm/boot/dts/meson8b-ec100.dts index 0872f6e3abf5..3ca9638fad09 100644 --- a/arch/arm/boot/dts/meson8b-ec100.dts +++ b/arch/arm/boot/dts/meson8b-ec100.dts @@ -23,6 +23,7 @@ }; memory { + device_type = "memory"; reg = <0x40000000 0x40000000>; }; @@ -64,6 +65,11 @@ timeout-ms = <20000>; }; + iio-hwmon { + compatible = "iio-hwmon"; + io-channels = <&saradc 8>; + }; + leds { compatible = "gpio-leds"; @@ -83,6 +89,9 @@ }; usb_vbus: regulator-usb-vbus { + /* + * Silergy SY6288CCAC-GP 2A Power Distribution Switch. + */ compatible = "regulator-fixed"; regulator-name = "USB_VBUS"; @@ -90,11 +99,20 @@ regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; + vin-supply = <&vcc_5v>; + + /* + * signal name from the schematics: USB_PWR_EN + */ gpio = <&gpio_ao GPIOAO_5 GPIO_ACTIVE_HIGH>; enable-active-high; }; vcc_5v: regulator-vcc5v { + /* + * supplied by the main power input which called PWR_5V_STB + * in the schematics + */ compatible = "regulator-fixed"; regulator-name = "VCC5V"; @@ -102,6 +120,9 @@ regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; + /* + * signal name from the schematics: 3V3_5V_EN + */ gpio = <&gpio GPIODV_29 GPIO_ACTIVE_LOW>; regulator-boot-on; @@ -109,12 +130,18 @@ }; vcck: regulator-vcck { + /* + * Silergy SY8089AAC-GP 2A continuous, 3A peak, 1MHz + * Synchronous Step Down Regulator. + */ compatible = "pwm-regulator"; regulator-name = "VCCK"; regulator-min-microvolt = <860000>; regulator-max-microvolt = <1140000>; + vin-supply = <&vcc_5v>; + pwms = <&pwm_cd 0 1148 0>; pwm-dutycycle-range = <100 0>; @@ -123,19 +150,66 @@ }; vcc_1v8: regulator-vcc1v8 { + /* + * ABLIC S-1339D18-M5001-GP + */ compatible = "regulator-fixed"; regulator-name = "VCC1V8"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; + + vin-supply = <&vcc_3v3>; }; vcc_3v3: regulator-vcc3v3 { + /* + * Silergy SY8089AAC-GP 2A continuous, 3A peak, 1MHz + * Synchronous Step Down Regulator. Also called + * VDDIO_AO3.3V in the schematics. + */ compatible = "regulator-fixed"; regulator-name = "VCC3V3"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; + + vin-supply = <&vcc_5v>; + }; + + vcc_ddr3: regulator-vcc-ddr3 { + /* + * Silergy SY8089AAC-GP 2A continuous, 3A peak, 1MHz + * Synchronous Step Down Regulator. Also called + * DDR3_1.5V in the schematics. + */ + compatible = "regulator-fixed"; + + regulator-name = "VCC_DDR3_1V5"; + regulator-min-microvolt = <1500000>; + regulator-max-microvolt = <1500000>; + + vin-supply = <&vcc_5v>; + + regulator-boot-on; + regulator-always-on; + }; + + vcc_rtc: regulator-vcc-rtc { + /* + * Global Mixed-mode Technology Inc. G918T12U-GP + */ + compatible = "regulator-fixed"; + + regulator-name = "VCC_RTC"; + regulator-min-microvolt = <900000>; + regulator-max-microvolt = <900000>; + + /* + * When the board is powered then the input is VCC3V3, + * otherwise power is taken from the coin cell battery. + */ + vin-supply = <&vcc_3v3>; }; }; @@ -164,6 +238,10 @@ eth_phy0: ethernet-phy@0 { /* IC Plus IP101A/G (0x02430c54) */ reg = <0>; + icplus,select-interrupt; + interrupt-parent = <&gpio_intc>; + /* GPIOH_3 */ + interrupts = <17 IRQ_TYPE_LEVEL_LOW>; }; }; }; @@ -205,13 +283,62 @@ cap-sd-highspeed; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vcc_3v3>; }; }; +&gpio_ao { + gpio-line-names = "Linux_TX", "Linux_RX", + "SLP_S5_N", "USB2_OC_FLAG#", + "HUB_RST", "USB_PWR_EN", + "I2S_IN", "SLP_S1_N", + "TCK", "TMS", "TDI", "TDO", + "HDMI_CEC", "5640_IRQ", + "MUTE", "S805_TEST#"; +}; + +&gpio { + gpio-line-names = /* Bank GPIOX */ + "WIFI_SD_D0", "WIFI_SD_D1", "WIFI_SD_D2", + "WIFI_SD_D3", "BTPCM_DOUT", "BTPCM_DIN", + "BTPCM_SYNC", "BTPCM_CLK", "WIFI_SD_CLK", + "WIFI_SD_CMD", "WIFI_32K", "WIFI_PWREN", + "UART_B_TX", "UART_B_RX", "UART_B_CTS_N", + "UART_B_RTS_N", "BT_EN", "WIFI_WAKE_HOST", + /* Bank GPIOY */ + "", "", "", "", "", "", "", "", "", "", + "", "", + /* Bank GPIODV */ + "VCCK_PWM_C", "I2C_SDA_A", "I2C_SCL_A", + "I2C_SDA_B", "I2C_SCL_B", "VDDEE_PWM_D", + "VDDEE_PWM 3V3_5V_EN", + /* Bank GPIOH */ + "HDMI_HPD", "HDMI_I2C_SDA", "HDMI_I2C_SCL", + "RMII_IRQ", "RMII_RST#", "RMII_TXD1", + "RMII_TXD0", "AV_select_1", "AV_select_2", + "MCU_Control_S", + /* Bank CARD */ + "SD_D1_B", "SD_D0_B", "SD_CLK_8726MX", + "SD_CMD_8726MX", "SD_D3_B", "SD_D2_B", + "CARD_EN_DET (CARD_DET)", + /* Bank BOOT */ + "NAND_D0 (EMMC)", "NAND_D1 (EMMC)", + "NAND_D2 (EMMC)", "NAND_D3 (EMMC)", + "NAND_D4 (EMMC)", "NAND_D5 (EMMC)", + "NAND_D6 (EMMC)", "NAND_D7 (EMMC)", + "NAND_CS1 (EMMC)", "NAND_CS2 iNAND_RS1 (EMMC)", + "NAND_nR/B iNAND_CMD (EMMC)", "NAND_ALE (EMMC)", + "NAND_CLE (EMMC)", "nRE_S1 NAND_nRE (EMMC)", + "nWE_S1 NAND_nWE (EMMC)", "", "", "SPI_CS", + /* Bank DIF */ + "RMII_RXD1", "RMII_RXD0", "RMII_CRS_DV", + "RMII_50M_IN", "GPIODIF_4", "GPIODIF_5", + "RMII_TXEN", "CPUETH_25MOUT", "RMII_MDC", + "RMII_MDIO"; +}; + &pwm_cd { status = "okay"; pinctrl-0 = <&pwm_c1_pins>; diff --git a/arch/arm/boot/dts/meson8b-mxq.dts b/arch/arm/boot/dts/meson8b-mxq.dts index 5c9b76af8d42..08ddd7fb0bf8 100644 --- a/arch/arm/boot/dts/meson8b-mxq.dts +++ b/arch/arm/boot/dts/meson8b-mxq.dts @@ -60,6 +60,7 @@ }; memory { + device_type = "memory"; reg = <0x40000000 0x40000000>; }; }; diff --git a/arch/arm/boot/dts/meson8b-odroidc1.dts b/arch/arm/boot/dts/meson8b-odroidc1.dts index 58669abda259..3b0e0f8fbc23 100644 --- a/arch/arm/boot/dts/meson8b-odroidc1.dts +++ b/arch/arm/boot/dts/meson8b-odroidc1.dts @@ -62,6 +62,7 @@ }; memory { + device_type = "memory"; reg = <0x40000000 0x40000000>; }; @@ -118,6 +119,11 @@ 1800000 1>; }; + iio-hwmon { + compatible = "iio-hwmon"; + io-channels = <&saradc 8>; + }; + vcc_1v8: regulator-vcc-1v8 { /* * RICHTEK RT9179 configured for a fixed output voltage of @@ -221,7 +227,6 @@ /* Realtek RTL8211F (0x001cc916) */ eth_phy: ethernet-phy@0 { reg = <0>; - eee-broken-1000t; interrupt-parent = <&gpio_intc>; /* GPIOH_3 */ interrupts = <17 IRQ_TYPE_LEVEL_LOW>; @@ -273,8 +278,7 @@ cap-sd-highspeed; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&tflash_vdd>; vqmmc-supply = <&tf_io>; diff --git a/arch/arm/boot/dts/meson8b.dtsi b/arch/arm/boot/dts/meson8b.dtsi index 22d775460767..fe84a8c3ce81 100644 --- a/arch/arm/boot/dts/meson8b.dtsi +++ b/arch/arm/boot/dts/meson8b.dtsi @@ -158,6 +158,32 @@ }; }; + gpu_opp_table: gpu-opp-table { + compatible = "operating-points-v2"; + + opp-255000000 { + opp-hz = /bits/ 64 <255000000>; + opp-microvolt = <1150000>; + }; + opp-364300000 { + opp-hz = /bits/ 64 <364300000>; + opp-microvolt = <1150000>; + }; + opp-425000000 { + opp-hz = /bits/ 64 <425000000>; + opp-microvolt = <1150000>; + }; + opp-510000000 { + opp-hz = /bits/ 64 <510000000>; + opp-microvolt = <1150000>; + }; + opp-637500000 { + opp-hz = /bits/ 64 <637500000>; + opp-microvolt = <1150000>; + turbo-mode; + }; + }; + pmu { compatible = "arm,cortex-a5-pmu"; interrupts = , @@ -178,6 +204,34 @@ no-map; }; }; + + apb: bus@d0000000 { + compatible = "simple-bus"; + reg = <0xd0000000 0x200000>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0xd0000000 0x200000>; + + mali: gpu@c0000 { + compatible = "amlogic,meson8b-mali", "arm,mali-450"; + reg = <0xc0000 0x40000>; + interrupts = , + , + , + , + , + , + , + ; + interrupt-names = "gp", "gpmmu", "pp", "pmu", + "pp0", "ppmmu0", "pp1", "ppmmu1"; + resets = <&reset RESET_MALI>; + clocks = <&clkc CLKID_CLK81>, <&clkc CLKID_MALI>; + clock-names = "bus", "core"; + operating-points-v2 = <&gpu_opp_table>; + switch-delay = <0xffff>; + }; + }; }; /* end of / */ &aobus { @@ -222,13 +276,6 @@ }; &cbus { - clkc: clock-controller@4000 { - #clock-cells = <1>; - #reset-cells = <1>; - compatible = "amlogic,meson8b-clkc"; - reg = <0x8000 0x4>, <0x4000 0x400>; - }; - reset: reset-controller@4404 { compatible = "amlogic,meson8b-reset"; reg = <0x4404 0x9c>; @@ -270,9 +317,7 @@ groups = "eth_tx_clk", "eth_tx_en", "eth_txd1_0", - "eth_txd1_1", "eth_txd0_0", - "eth_txd0_1", "eth_rx_clk", "eth_rx_dv", "eth_rxd1", @@ -281,7 +326,9 @@ "eth_mdc", "eth_ref_clk", "eth_txd2", - "eth_txd3"; + "eth_txd3", + "eth_rxd3", + "eth_rxd2"; function = "ethernet"; bias-disable; }; @@ -360,6 +407,11 @@ compatible = "amlogic,meson8b-efuse"; clocks = <&clkc CLKID_EFUSE>; clock-names = "core"; + + temperature_calib: calib@1f4 { + /* only the upper two bytes are relevant */ + reg = <0x1f4 0x4>; + }; }; ðmac { @@ -383,6 +435,14 @@ status = "okay"; }; +&hhi { + clkc: clock-controller { + compatible = "amlogic,meson8-clkc"; + #clock-cells = <1>; + #reset-cells = <1>; + }; +}; + &hwrng { compatible = "amlogic,meson8b-rng", "amlogic,meson-rng"; clocks = <&clkc CLKID_RNG0>; @@ -450,6 +510,9 @@ clocks = <&clkc CLKID_XTAL>, <&clkc CLKID_SAR_ADC>; clock-names = "clkin", "core"; + amlogic,hhi-sysctrl = <&hhi>; + nvmem-cells = <&temperature_calib>; + nvmem-cell-names = "temperature_calib"; }; &sdio { diff --git a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts index f5853610b20b..29d830ae4bf4 100644 --- a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts +++ b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts @@ -28,6 +28,7 @@ }; memory { + device_type = "memory"; reg = <0x40000000 0x80000000>; }; @@ -44,6 +45,11 @@ }; }; + iio-hwmon { + compatible = "iio-hwmon"; + io-channels = <&saradc 8>; + }; + vcc_3v3: regulator-vcc3v3 { compatible = "regulator-fixed"; regulator-name = "VCC3V3"; @@ -206,8 +212,7 @@ cap-sd-highspeed; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vcc_3v3>; }; diff --git a/arch/arm/boot/dts/meson8m2.dtsi b/arch/arm/boot/dts/meson8m2.dtsi index d1a28c2adac5..bb87b251e16d 100644 --- a/arch/arm/boot/dts/meson8m2.dtsi +++ b/arch/arm/boot/dts/meson8m2.dtsi @@ -50,6 +50,10 @@ }; }; +&saradc { + compatible = "amlogic,meson8m2-saradc", "amlogic,meson-saradc"; +}; + &wdt { compatible = "amlogic,meson8m2-wdt", "amlogic,meson8b-wdt"; }; diff --git a/arch/arm/boot/dts/milbeaut-m10v-evb.dts b/arch/arm/boot/dts/milbeaut-m10v-evb.dts new file mode 100644 index 000000000000..614f60c6b0a2 --- /dev/null +++ b/arch/arm/boot/dts/milbeaut-m10v-evb.dts @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Socionext Milbeaut M10V Evaluation Board */ +/dts-v1/; +#include "milbeaut-m10v.dtsi" + +/ { + model = "Socionext M10V EVB"; + compatible = "socionext,milbeaut-m10v-evb", "socionext,sc2000a"; + + aliases { + serial0 = &uart1; + }; + + chosen { + bootargs = "rootwait earlycon"; + stdout-path = "serial0:115200n8"; + }; + + clocks { + uclk40xi: uclk40xi { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <40000000>; + }; + }; + + memory@40000000 { + device_type = "memory"; + reg = <0x40000000 0x80000000>; + }; + +}; diff --git a/arch/arm/boot/dts/milbeaut-m10v.dtsi b/arch/arm/boot/dts/milbeaut-m10v.dtsi new file mode 100644 index 000000000000..aa7c6caeb750 --- /dev/null +++ b/arch/arm/boot/dts/milbeaut-m10v.dtsi @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include + +/ { + compatible = "socionext,sc2000a"; + interrupt-parent = <&gic>; + #address-cells = <1>; + #size-cells = <1>; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + enable-method = "socionext,milbeaut-m10v-smp"; + cpu@f00 { + device_type = "cpu"; + compatible = "arm,cortex-a7"; + reg = <0xf00>; + }; + cpu@f01 { + device_type = "cpu"; + compatible = "arm,cortex-a7"; + reg = <0xf01>; + }; + cpu@f02 { + device_type = "cpu"; + compatible = "arm,cortex-a7"; + reg = <0xf02>; + }; + cpu@f03 { + device_type = "cpu"; + compatible = "arm,cortex-a7"; + reg = <0xf03>; + }; + }; + + timer { /* The Generic Timer */ + compatible = "arm,armv7-timer"; + interrupts = , + , + , + ; + clock-frequency = <40000000>; + always-on; + }; + + soc { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; + ranges; + interrupt-parent = <&gic>; + + gic: interrupt-controller@1d000000 { + compatible = "arm,cortex-a7-gic"; + interrupt-controller; + #interrupt-cells = <3>; + reg = <0x1d001000 0x1000>, + <0x1d002000 0x1000>; /* CPU I/f base and size */ + }; + + timer@1e000050 { /* 32-bit Reload Timers */ + compatible = "socionext,milbeaut-timer"; + reg = <0x1e000050 0x20>; + interrupts = <0 91 4>; + }; + + uart1: serial@1e700010 { /* PE4, PE5 */ + /* Enable this as ttyUSI0 */ + compatible = "socionext,milbeaut-usio-uart"; + reg = <0x1e700010 0x10>; + interrupts = <0 141 0x4>, <0 149 0x4>; + interrupt-names = "rx", "tx"; + }; + + }; + + sram@0 { + compatible = "mmio-sram"; + reg = <0x0 0x10000>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0x0 0x10000>; + smp-sram@f100 { + compatible = "socionext,milbeaut-smp-sram"; + reg = <0xf100 0x20>; + }; + }; +}; diff --git a/arch/arm/boot/dts/mmp2-brownstone.dts b/arch/arm/boot/dts/mmp2-brownstone.dts index 350208c5e1ed..3da038ba5733 100644 --- a/arch/arm/boot/dts/mmp2-brownstone.dts +++ b/arch/arm/boot/dts/mmp2-brownstone.dts @@ -19,6 +19,7 @@ }; memory { + device_type = "memory"; reg = <0x00000000 0x08000000>; }; diff --git a/arch/arm/boot/dts/mmp2.dtsi b/arch/arm/boot/dts/mmp2.dtsi index ee03e0846740..f02fb97f515c 100644 --- a/arch/arm/boot/dts/mmp2.dtsi +++ b/arch/arm/boot/dts/mmp2.dtsi @@ -7,10 +7,12 @@ * publishhed by the Free Software Foundation. */ -#include "skeleton.dtsi" #include / { + #address-cells = <1>; + #size-cells = <1>; + aliases { serial0 = &uart1; serial1 = &uart2; diff --git a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi index ddc7a7bb33c0..f57acf8f66b9 100644 --- a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi +++ b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi @@ -105,7 +105,7 @@ interrupts-extended = < &cpcap 15 0 &cpcap 14 0 &cpcap 28 0 &cpcap 19 0 &cpcap 18 0 &cpcap 17 0 &cpcap 16 0 &cpcap 49 0 - &cpcap 48 1 + &cpcap 48 0 >; interrupt-names = "id_ground", "id_float", "se0conn", "vbusvld", diff --git a/arch/arm/boot/dts/moxart.dtsi b/arch/arm/boot/dts/moxart.dtsi index da7b3237bfe9..cbf17656bcc7 100644 --- a/arch/arm/boot/dts/moxart.dtsi +++ b/arch/arm/boot/dts/moxart.dtsi @@ -5,10 +5,11 @@ * Licensed under GPLv2 or later. */ -/include/ "skeleton.dtsi" #include / { + #address-cells = <1>; + #size-cells = <1>; compatible = "moxa,moxart"; model = "MOXART"; interrupt-parent = <&intc>; diff --git a/arch/arm/boot/dts/mps2.dtsi b/arch/arm/boot/dts/mps2.dtsi index 23467390558d..96fb5a5cf4d3 100644 --- a/arch/arm/boot/dts/mps2.dtsi +++ b/arch/arm/boot/dts/mps2.dtsi @@ -171,7 +171,7 @@ uart0: serial@4000 { compatible = "arm,mps2-uart"; reg = <0x4000 0x1000>; - interrupts = <0 1 12>; + interrupts = <0>, <1>, <12>; clocks = <&sysclk>; status = "disabled"; }; @@ -179,7 +179,7 @@ uart1: serial@5000 { compatible = "arm,mps2-uart"; reg = <0x5000 0x1000>; - interrupts = <2 3 12>; + interrupts = <2>, <3>, <12>; clocks = <&sysclk>; status = "disabled"; }; @@ -187,7 +187,7 @@ uart2: serial@6000 { compatible = "arm,mps2-uart"; reg = <0x6000 0x1000>; - interrupts = <4 5 12>; + interrupts = <4>, <5>, <12>; clocks = <&sysclk>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/mt2701-evb.dts b/arch/arm/boot/dts/mt2701-evb.dts index be0edb3dae6c..88f8fd22302a 100644 --- a/arch/arm/boot/dts/mt2701-evb.dts +++ b/arch/arm/boot/dts/mt2701-evb.dts @@ -13,6 +13,7 @@ compatible = "mediatek,mt2701-evb", "mediatek,mt2701"; memory { + device_type = "memory"; reg = <0 0x80000000 0 0x40000000>; }; diff --git a/arch/arm/boot/dts/mt2701.dtsi b/arch/arm/boot/dts/mt2701.dtsi index 180377e56ef4..51e1305c6471 100644 --- a/arch/arm/boot/dts/mt2701.dtsi +++ b/arch/arm/boot/dts/mt2701.dtsi @@ -12,10 +12,11 @@ #include #include #include -#include "skeleton64.dtsi" #include "mt2701-pinfunc.h" / { + #address-cells = <2>; + #size-cells = <2>; compatible = "mediatek,mt2701"; interrupt-parent = <&cirq>; diff --git a/arch/arm/boot/dts/mt6580-evbp1.dts b/arch/arm/boot/dts/mt6580-evbp1.dts index ca137897ed60..755a0774a8ee 100644 --- a/arch/arm/boot/dts/mt6580-evbp1.dts +++ b/arch/arm/boot/dts/mt6580-evbp1.dts @@ -22,6 +22,7 @@ }; memory { + device_type = "memory"; reg = <0x80000000 0x20000000>; }; }; diff --git a/arch/arm/boot/dts/mt6580.dtsi b/arch/arm/boot/dts/mt6580.dtsi index 2bdc5ed12fca..9e17698c0609 100644 --- a/arch/arm/boot/dts/mt6580.dtsi +++ b/arch/arm/boot/dts/mt6580.dtsi @@ -7,7 +7,6 @@ #include #include -#include "skeleton.dtsi" / { compatible = "mediatek,mt6580"; diff --git a/arch/arm/boot/dts/mt6589-aquaris5.dts b/arch/arm/boot/dts/mt6589-aquaris5.dts index 7bbaa1279a26..1e7079a3b449 100644 --- a/arch/arm/boot/dts/mt6589-aquaris5.dts +++ b/arch/arm/boot/dts/mt6589-aquaris5.dts @@ -18,6 +18,7 @@ }; memory { + device_type = "memory"; reg = <0x80000000 0x40000000>; }; diff --git a/arch/arm/boot/dts/mt6589.dtsi b/arch/arm/boot/dts/mt6589.dtsi index 28df8495686a..f3ccb70c0779 100644 --- a/arch/arm/boot/dts/mt6589.dtsi +++ b/arch/arm/boot/dts/mt6589.dtsi @@ -7,9 +7,10 @@ #include #include -#include "skeleton.dtsi" / { + #address-cells = <1>; + #size-cells = <1>; compatible = "mediatek,mt6589"; interrupt-parent = <&sysirq>; diff --git a/arch/arm/boot/dts/mt6592-evb.dts b/arch/arm/boot/dts/mt6592-evb.dts index 02849f6548e3..5e00c1cca2d1 100644 --- a/arch/arm/boot/dts/mt6592-evb.dts +++ b/arch/arm/boot/dts/mt6592-evb.dts @@ -13,7 +13,7 @@ compatible = "mediatek,mt6592-evb", "mediatek,mt6592"; memory { + device_type = "memory"; reg = <0x80000000 0x40000000>; }; }; - diff --git a/arch/arm/boot/dts/mt6592.dtsi b/arch/arm/boot/dts/mt6592.dtsi index 8696ac891d60..3716f8db951c 100644 --- a/arch/arm/boot/dts/mt6592.dtsi +++ b/arch/arm/boot/dts/mt6592.dtsi @@ -7,9 +7,10 @@ #include #include -#include "skeleton.dtsi" / { + #address-cells = <1>; + #size-cells = <1>; compatible = "mediatek,mt6592"; interrupt-parent = <&sysirq>; diff --git a/arch/arm/boot/dts/mt7623.dtsi b/arch/arm/boot/dts/mt7623.dtsi index 98f115966391..a79f0b6c3429 100644 --- a/arch/arm/boot/dts/mt7623.dtsi +++ b/arch/arm/boot/dts/mt7623.dtsi @@ -187,17 +187,26 @@ cooling-maps { map0 { trip = <&cpu_passive>; - cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; }; map1 { trip = <&cpu_active>; - cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; }; map2 { trip = <&cpu_hot>; - cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; }; }; }; diff --git a/arch/arm/boot/dts/mt8127-moose.dts b/arch/arm/boot/dts/mt8127-moose.dts index 308829b2da86..560687af87dc 100644 --- a/arch/arm/boot/dts/mt8127-moose.dts +++ b/arch/arm/boot/dts/mt8127-moose.dts @@ -13,6 +13,7 @@ compatible = "mediatek,mt8127-moose", "mediatek,mt8127"; memory { + device_type = "memory"; reg = <0 0x80000000 0 0x40000000>; }; }; diff --git a/arch/arm/boot/dts/mt8127.dtsi b/arch/arm/boot/dts/mt8127.dtsi index 3adfc6f7859c..aced173c2a52 100644 --- a/arch/arm/boot/dts/mt8127.dtsi +++ b/arch/arm/boot/dts/mt8127.dtsi @@ -7,9 +7,10 @@ #include #include -#include "skeleton64.dtsi" / { + #address-cells = <2>; + #size-cells = <2>; compatible = "mediatek,mt8127"; interrupt-parent = <&sysirq>; diff --git a/arch/arm/boot/dts/mt8135-evbp1.dts b/arch/arm/boot/dts/mt8135-evbp1.dts index 0ace7a40a60d..f6147fe62f41 100644 --- a/arch/arm/boot/dts/mt8135-evbp1.dts +++ b/arch/arm/boot/dts/mt8135-evbp1.dts @@ -13,6 +13,7 @@ compatible = "mediatek,mt8135-evbp1", "mediatek,mt8135"; memory { + device_type = "memory"; reg = <0 0x80000000 0 0x40000000>; }; }; diff --git a/arch/arm/boot/dts/mt8135.dtsi b/arch/arm/boot/dts/mt8135.dtsi index 688069dc1533..0e4e835026db 100644 --- a/arch/arm/boot/dts/mt8135.dtsi +++ b/arch/arm/boot/dts/mt8135.dtsi @@ -9,10 +9,11 @@ #include #include #include -#include "skeleton64.dtsi" #include "mt8135-pinfunc.h" / { + #address-cells = <2>; + #size-cells = <2>; compatible = "mediatek,mt8135"; interrupt-parent = <&sysirq>; diff --git a/arch/arm/boot/dts/nspire.dtsi b/arch/arm/boot/dts/nspire.dtsi index 1a5ae4cd107f..5a3c1f9d1832 100644 --- a/arch/arm/boot/dts/nspire.dtsi +++ b/arch/arm/boot/dts/nspire.dtsi @@ -9,9 +9,9 @@ * */ -/include/ "skeleton.dtsi" - / { + #address-cells = <1>; + #size-cells = <1>; interrupt-parent = <&intc>; cpus { diff --git a/arch/arm/boot/dts/omap3-evm-common.dtsi b/arch/arm/boot/dts/omap3-evm-common.dtsi index 4c1227d1e79b..17c89df6ce6b 100644 --- a/arch/arm/boot/dts/omap3-evm-common.dtsi +++ b/arch/arm/boot/dts/omap3-evm-common.dtsi @@ -122,6 +122,7 @@ }; &mmc2 { + interrupts-extended = <&intc 86 &omap3_pmx_core 0x12e>; vmmc-supply = <&wl12xx_vmmc>; non-removable; bus-width = <4>; @@ -132,8 +133,10 @@ wlcore: wlcore@2 { compatible = "ti,wl1271"; reg = <2>; - interrupt-parent = <&gpio5>; - interrupts = <21 IRQ_TYPE_EDGE_RISING>; /* gpio 149 */ + /* gpio_149 with uart1_rts pad as wakeirq */ + interrupts-extended = <&gpio5 21 IRQ_TYPE_EDGE_RISING>, + <&omap3_pmx_core 0x14e>; + interrupt-names = "irq", "wakeup"; ref-clock-frequency = <38400000>; }; }; diff --git a/arch/arm/boot/dts/omap3-evm-processor-common.dtsi b/arch/arm/boot/dts/omap3-evm-processor-common.dtsi index ce7f42f9448c..b4109f48ec18 100644 --- a/arch/arm/boot/dts/omap3-evm-processor-common.dtsi +++ b/arch/arm/boot/dts/omap3-evm-processor-common.dtsi @@ -86,6 +86,10 @@ OMAP3_CORE1_IOPAD(0x215e, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat1.sdmmc2_dat1 */ OMAP3_CORE1_IOPAD(0x2160, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat2.sdmmc2_dat2 */ OMAP3_CORE1_IOPAD(0x2162, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat3.sdmmc2_dat3 */ + OMAP3_CORE1_IOPAD(0x2164, PIN_OUTPUT | MUX_MODE1) /* sdmmc2_dat4.sdmmc2_dir_dat0 */ + OMAP3_CORE1_IOPAD(0x2166, PIN_OUTPUT | MUX_MODE1) /* sdmmc2_dat5.sdmmc2_dir_dat1 */ + OMAP3_CORE1_IOPAD(0x2168, PIN_OUTPUT | MUX_MODE1) /* sdmmc2_dat6.sdmmc2_dir_cmd */ + OMAP3_CORE1_IOPAD(0x216a, PIN_INPUT | MUX_MODE1) /* sdmmc2_dat7.sdmmc2_clkin */ >; }; @@ -127,9 +131,13 @@ >; }; + /* + * Note that gpio_150 pulled high with internal pull to prevent wlcore + * reset on return from off mode in idle. + */ wl12xx_gpio: pinmux_wl12xx_gpio { pinctrl-single,pins = < - OMAP3_CORE1_IOPAD(0x2180, PIN_OUTPUT | MUX_MODE4) /* uart1_cts.gpio_150 */ + OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT_PULLUP | MUX_MODE7) /* uart1_cts.gpio_150 */ OMAP3_CORE1_IOPAD(0x217e, PIN_INPUT | MUX_MODE4) /* uart1_rts.gpio_149 */ >; }; diff --git a/arch/arm/boot/dts/omap3-gta04.dtsi b/arch/arm/boot/dts/omap3-gta04.dtsi index e53d32691308..04f2b53d4d3d 100644 --- a/arch/arm/boot/dts/omap3-gta04.dtsi +++ b/arch/arm/boot/dts/omap3-gta04.dtsi @@ -32,6 +32,14 @@ display1 = &tv0; }; + ldo_3v3: fixedregulator { + compatible = "regulator-fixed"; + regulator-name = "ldo_3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + }; + /* fixed 26MHz oscillator */ hfclk_26m: oscillator { #clock-cells = <0>; @@ -116,6 +124,7 @@ spi-cpol; spi-cpha; + backlight= <&backlight>; label = "lcd"; port { lcd_in: endpoint { @@ -125,7 +134,7 @@ }; }; - backlight { + backlight: backlight { compatible = "pwm-backlight"; pwms = <&pwm11 0 12000000 0>; pwm-names = "backlight"; @@ -224,6 +233,15 @@ }; }; +&omap3_pmx_wkup { + gpio1_pins: pinmux_gpio1_pins { + pinctrl-single,pins = < + OMAP3_WKUP_IOPAD(0x2a14, PIN_INPUT | PIN_OFF_WAKEUPENABLE | MUX_MODE4) /* sys_boot5.gpio_7 */ + OMAP3_WKUP_IOPAD(0x2a1a, PIN_INPUT | PIN_OFF_WAKEUPENABLE | MUX_MODE4) /* sys_clkout.gpio_10 */ + >; + }; +}; + &omap3_pmx_core { pinctrl-names = "default"; pinctrl-0 = < @@ -312,6 +330,12 @@ >; }; + gps_pins: pinmux_gps_pins { + pinctrl-single,pins = < + OMAP3_CORE1_IOPAD(0x2176, PIN_OUTPUT_PULLDOWN | MUX_MODE4) /* gpio145 */ + >; + }; + hdq_pins: hdq_pins { pinctrl-single,pins = < OMAP3_CORE1_IOPAD(0x21c6, PIN_INPUT_PULLUP | MUX_MODE0) /* i2c3_sda.hdq */ @@ -636,6 +660,11 @@ status = "disabled"; }; +&gpio1 { + pinctrl-names = "default"; + pinctrl-0 = <&gpio1_pins>; +}; + &uart1 { pinctrl-names = "default"; pinctrl-0 = <&uart1_pins>; @@ -644,6 +673,14 @@ &uart2 { pinctrl-names = "default"; pinctrl-0 = <&uart2_pins>; + gnss: gnss { + compatible = "wi2wi,w2sg0004"; + pinctrl-names = "default"; + pinctrl-0 = <&gps_pins>; + sirf,onoff-gpios = <&gpio5 17 GPIO_ACTIVE_HIGH>; + lna-supply = <&vsim>; + vcc-supply = <&ldo_3v3>; + }; }; &uart3 { @@ -714,11 +751,7 @@ vdda-supply = <&vdac>; - #address-cells = <1>; - #size-cells = <0>; - port { - reg = <0>; venc_out: endpoint { remote-endpoint = <&opa_in>; ti,channels = <1>; diff --git a/arch/arm/boot/dts/omap3-gta04a5.dts b/arch/arm/boot/dts/omap3-gta04a5.dts index bd232b1b24cb..223b47ac596e 100644 --- a/arch/arm/boot/dts/omap3-gta04a5.dts +++ b/arch/arm/boot/dts/omap3-gta04a5.dts @@ -82,7 +82,7 @@ /* * for WL183x module see - * http://lxr.free-electrons.com/source/Documentation/devicetree/bindings/net/wireless/ti,wlcore.txt + * Documentation/devicetree/bindings/net/wireless/ti,wlcore.txt */ &wifi_pwrseq { diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index 182a53991c90..826920e6b878 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts @@ -814,7 +814,7 @@ /* For debugging, it is often good idea to remove this GPIO. It means you can remove back cover (to reboot by removing battery) and still use the MMC card. */ - cd-gpios = <&gpio6 0 GPIO_ACTIVE_HIGH>; /* 160 */ + cd-gpios = <&gpio6 0 GPIO_ACTIVE_LOW>; /* 160 */ }; /* most boards use vaux3, only some old versions use vmmc2 instead */ diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi index 0d9b85317529..e142e6c70a59 100644 --- a/arch/arm/boot/dts/omap3-n950-n9.dtsi +++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi @@ -370,6 +370,19 @@ compatible = "ti,omap2-onenand"; reg = <0 0 0x20000>; /* CS0, offset 0, IO size 128K */ + /* + * These timings are based on CONFIG_OMAP_GPMC_DEBUG=y reported + * bootloader set values when booted with v4.19 using both N950 + * and N9 devices (OneNAND Manufacturer: Samsung): + * + * gpmc cs0 before gpmc_cs_program_settings: + * cs0 GPMC_CS_CONFIG1: 0xfd001202 + * cs0 GPMC_CS_CONFIG2: 0x00181800 + * cs0 GPMC_CS_CONFIG3: 0x00030300 + * cs0 GPMC_CS_CONFIG4: 0x18001804 + * cs0 GPMC_CS_CONFIG5: 0x03171d1d + * cs0 GPMC_CS_CONFIG6: 0x97080000 + */ gpmc,sync-read; gpmc,sync-write; gpmc,burst-length = <16>; @@ -379,26 +392,27 @@ gpmc,device-width = <2>; gpmc,mux-add-data = <2>; gpmc,cs-on-ns = <0>; - gpmc,cs-rd-off-ns = <87>; - gpmc,cs-wr-off-ns = <87>; + gpmc,cs-rd-off-ns = <122>; + gpmc,cs-wr-off-ns = <122>; gpmc,adv-on-ns = <0>; - gpmc,adv-rd-off-ns = <10>; - gpmc,adv-wr-off-ns = <10>; - gpmc,oe-on-ns = <15>; - gpmc,oe-off-ns = <87>; + gpmc,adv-rd-off-ns = <15>; + gpmc,adv-wr-off-ns = <15>; + gpmc,oe-on-ns = <20>; + gpmc,oe-off-ns = <122>; gpmc,we-on-ns = <0>; - gpmc,we-off-ns = <87>; - gpmc,rd-cycle-ns = <112>; - gpmc,wr-cycle-ns = <112>; - gpmc,access-ns = <81>; + gpmc,we-off-ns = <122>; + gpmc,rd-cycle-ns = <148>; + gpmc,wr-cycle-ns = <148>; + gpmc,access-ns = <117>; gpmc,page-burst-access-ns = <15>; gpmc,bus-turnaround-ns = <0>; gpmc,cycle2cycle-delay-ns = <0>; gpmc,wait-monitoring-ns = <0>; - gpmc,clk-activation-ns = <5>; - gpmc,wr-data-mux-bus-ns = <30>; - gpmc,wr-access-ns = <81>; - gpmc,sync-clk-ps = <15000>; + gpmc,clk-activation-ns = <10>; + gpmc,wr-data-mux-bus-ns = <40>; + gpmc,wr-access-ns = <117>; + + gpmc,sync-clk-ps = <15000>; /* TBC; Where this value came? */ /* * MTD partition table corresponding to Nokia's MeeGo 1.2 diff --git a/arch/arm/boot/dts/omap4-droid4-xt894.dts b/arch/arm/boot/dts/omap4-droid4-xt894.dts index 04758a2a87f0..e21ec929f096 100644 --- a/arch/arm/boot/dts/omap4-droid4-xt894.dts +++ b/arch/arm/boot/dts/omap4-droid4-xt894.dts @@ -359,20 +359,24 @@ &mmc3 { vmmc-supply = <&wl12xx_vmmc>; + /* uart2_tx.sdmmc3_dat1 pad as wakeirq */ interrupts-extended = <&wakeupgen GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH &omap4_pmx_core 0xde>; - + interrupt-names = "irq", "wakeup"; non-removable; bus-width = <4>; cap-power-off-card; + keep-power-in-suspend; #address-cells = <1>; #size-cells = <0>; wlcore: wlcore@2 { compatible = "ti,wl1285", "ti,wl1283"; reg = <2>; - interrupt-parent = <&gpio4>; - interrupts = <4 IRQ_TYPE_EDGE_RISING>; /* gpio100 */ + /* gpio_100 with gpmc_wait2 pad as wakeirq */ + interrupts-extended = <&gpio4 4 IRQ_TYPE_EDGE_RISING>, + <&omap4_pmx_core 0x4e>; + interrupt-names = "irq", "wakeup"; ref-clock-frequency = <26000000>; tcxo-clock-frequency = <26000000>; }; @@ -644,6 +648,17 @@ }; }; +/* Configure pwm clock source for timers 8 & 9 */ +&timer8 { + assigned-clocks = <&abe_clkctrl OMAP4_TIMER8_CLKCTRL 24>; + assigned-clock-parents = <&sys_clkin_ck>; +}; + +&timer9 { + assigned-clocks = <&l4_per_clkctrl OMAP4_TIMER9_CLKCTRL 24>; + assigned-clock-parents = <&sys_clkin_ck>; +}; + /* * As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for * uart1 wakeirq. diff --git a/arch/arm/boot/dts/omap4-panda-common.dtsi b/arch/arm/boot/dts/omap4-panda-common.dtsi index 27895c1604b9..926f018823a4 100644 --- a/arch/arm/boot/dts/omap4-panda-common.dtsi +++ b/arch/arm/boot/dts/omap4-panda-common.dtsi @@ -485,8 +485,10 @@ wlcore: wlcore@2 { compatible = "ti,wl1271"; reg = <2>; - interrupt-parent = <&gpio2>; - interrupts = <21 IRQ_TYPE_EDGE_RISING>; /* gpio 53 */ + /* gpio_53 with gpmc_ncs3 pad as wakeup */ + interrupts-extended = <&gpio2 21 IRQ_TYPE_EDGE_RISING>, + <&omap4_pmx_core 0x3a>; + interrupt-names = "irq", "wakeup"; ref-clock-frequency = <38400000>; }; }; diff --git a/arch/arm/boot/dts/omap4-sdp.dts b/arch/arm/boot/dts/omap4-sdp.dts index 9dc7ec7655cb..c88817bdcc56 100644 --- a/arch/arm/boot/dts/omap4-sdp.dts +++ b/arch/arm/boot/dts/omap4-sdp.dts @@ -26,6 +26,9 @@ }; vdd_eth: fixedregulator-vdd-eth { + pinctrl-names = "default"; + pinctrl-0 = <&enet_enable_gpio>; + compatible = "regulator-fixed"; regulator-name = "VDD_ETH"; regulator-min-microvolt = <3300000>; @@ -352,6 +355,29 @@ OMAP4_IOPAD(0x152, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc5_dat3.sdmmc5_dat3 */ >; }; + + /* gpio_48 for ENET_ENABLE */ + enet_enable_gpio: pinmux_enet_enable_gpio { + pinctrl-single,pins = < + OMAP4_IOPAD(0x070, PIN_OUTPUT_PULLDOWN | MUX_MODE3) /* gpmc_a24.gpio_48 */ + >; + }; + + ks8851_pins: pinmux_ks8851_pins { + pinctrl-single,pins = < + /* ENET_INT */ + OMAP4_IOPAD(0x054, PIN_INPUT_PULLUP | MUX_MODE3) /* gpmc_ad10.gpio_34 */ + /* + * Misterious pin which makes the ethernet working + * The legacy board file requested this pin on boot + * (ETH_KS8851_QUART) and set it to high, similarly to + * the ENET_ENABLE pin. + * We could use gpio-hog to keep it high, but let's use + * it as a reset GPIO for ks8851. + */ + OMAP4_IOPAD(0x13a, PIN_OUTPUT_PULLUP | MUX_MODE3) /* mcspi1_cs1.gpio_138 */ + >; + }; }; &i2c1 { @@ -452,12 +478,16 @@ pinctrl-0 = <&mcspi1_pins>; eth@0 { + pinctrl-names = "default"; + pinctrl-0 = <&ks8851_pins>; + compatible = "ks8851"; spi-max-frequency = <24000000>; reg = <0>; interrupt-parent = <&gpio2>; interrupts = <2 IRQ_TYPE_LEVEL_LOW>; /* gpio line 34 */ vdd-supply = <&vdd_eth>; + reset-gpios = <&gpio5 10 GPIO_ACTIVE_HIGH>; }; }; diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi index bc853ebeda22..61a06f6add3c 100644 --- a/arch/arm/boot/dts/omap5-board-common.dtsi +++ b/arch/arm/boot/dts/omap5-board-common.dtsi @@ -317,7 +317,8 @@ palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins { pinctrl-single,pins = < - OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) /* sys_nirq1 */ + /* sys_nirq1 is pulled down as the SoC is inverting it for GIC */ + OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) >; }; @@ -385,7 +386,8 @@ palmas: palmas@48 { compatible = "ti,palmas"; - interrupts = ; /* IRQ_SYS_1N */ + /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */ + interrupts = ; reg = <0x48>; interrupt-controller; #interrupt-cells = <2>; @@ -651,7 +653,8 @@ pinctrl-names = "default"; pinctrl-0 = <&twl6040_pins>; - interrupts = ; /* IRQ_SYS_2N cascaded to gic */ + /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */ + interrupts = ; /* audpwron gpio defined in the board specific dts */ diff --git a/arch/arm/boot/dts/omap5-cm-t54.dts b/arch/arm/boot/dts/omap5-cm-t54.dts index 5e21fb430a65..e78d3718f145 100644 --- a/arch/arm/boot/dts/omap5-cm-t54.dts +++ b/arch/arm/boot/dts/omap5-cm-t54.dts @@ -181,6 +181,13 @@ OMAP5_IOPAD(0x0042, PIN_INPUT_PULLDOWN | MUX_MODE6) /* llib_wakereqin.gpio1_wk15 */ >; }; + + palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins { + pinctrl-single,pins = < + /* sys_nirq1 is pulled down as the SoC is inverting it for GIC */ + OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) + >; + }; }; &omap5_pmx_core { @@ -414,8 +421,11 @@ palmas: palmas@48 { compatible = "ti,palmas"; - interrupts = ; /* IRQ_SYS_1N */ reg = <0x48>; + pinctrl-0 = <&palmas_sys_nirq_pins>; + pinctrl-names = "default"; + /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */ + interrupts = ; interrupt-controller; #interrupt-cells = <2>; ti,system-power-controller; diff --git a/arch/arm/boot/dts/omap5-l4.dtsi b/arch/arm/boot/dts/omap5-l4.dtsi index 9c7e309d9c2c..0960348002ad 100644 --- a/arch/arm/boot/dts/omap5-l4.dtsi +++ b/arch/arm/boot/dts/omap5-l4.dtsi @@ -1046,8 +1046,6 @@ , ; ti,syss-mask = <1>; - ti,no-reset-on-init; - ti,no-idle-on-init; /* Domains (V, P, C): core, core_pwrdm, l4per_clkdm */ clocks = <&l4per_clkctrl OMAP5_UART3_CLKCTRL 0>; clock-names = "fck"; diff --git a/arch/arm/boot/dts/orion5x-lacie-d2-network.dts b/arch/arm/boot/dts/orion5x-lacie-d2-network.dts index 8c2449da6f00..422958d13d42 100644 --- a/arch/arm/boot/dts/orion5x-lacie-d2-network.dts +++ b/arch/arm/boot/dts/orion5x-lacie-d2-network.dts @@ -19,6 +19,7 @@ compatible = "lacie,d2-network", "marvell,orion5x-88f5182", "marvell,orion5x"; memory { + device_type = "memory"; reg = <0x00000000 0x4000000>; /* 64 MB */ }; diff --git a/arch/arm/boot/dts/orion5x-lacie-ethernet-disk-mini-v2.dts b/arch/arm/boot/dts/orion5x-lacie-ethernet-disk-mini-v2.dts index b545d0f228a5..0043e0040153 100644 --- a/arch/arm/boot/dts/orion5x-lacie-ethernet-disk-mini-v2.dts +++ b/arch/arm/boot/dts/orion5x-lacie-ethernet-disk-mini-v2.dts @@ -25,6 +25,7 @@ compatible = "lacie,ethernet-disk-mini-v2", "marvell,orion5x-88f5182", "marvell,orion5x"; memory { + device_type = "memory"; reg = <0x00000000 0x4000000>; /* 64 MB */ }; diff --git a/arch/arm/boot/dts/orion5x-lswsgl.dts b/arch/arm/boot/dts/orion5x-lswsgl.dts index 0d97ded66257..2fbc17d6dfa4 100644 --- a/arch/arm/boot/dts/orion5x-lswsgl.dts +++ b/arch/arm/boot/dts/orion5x-lswsgl.dts @@ -55,6 +55,7 @@ compatible = "buffalo,lswsgl", "marvell,orion5x-88f5182", "marvell,orion5x"; memory { + device_type = "memory"; reg = <0x00000000 0x8000000>; /* 128 MB */ }; diff --git a/arch/arm/boot/dts/orion5x-maxtor-shared-storage-2.dts b/arch/arm/boot/dts/orion5x-maxtor-shared-storage-2.dts index 0324cb54939d..0ca6208a267d 100644 --- a/arch/arm/boot/dts/orion5x-maxtor-shared-storage-2.dts +++ b/arch/arm/boot/dts/orion5x-maxtor-shared-storage-2.dts @@ -19,6 +19,7 @@ compatible = "maxtor,shared-storage-2", "marvell,orion5x-88f5182", "marvell,orion5x"; memory { + device_type = "memory"; reg = <0x00000000 0x4000000>; /* 64 MB */ }; diff --git a/arch/arm/boot/dts/orion5x-netgear-wnr854t.dts b/arch/arm/boot/dts/orion5x-netgear-wnr854t.dts index 9f6ae4e1de06..ea081afa469d 100644 --- a/arch/arm/boot/dts/orion5x-netgear-wnr854t.dts +++ b/arch/arm/boot/dts/orion5x-netgear-wnr854t.dts @@ -21,6 +21,7 @@ }; memory { + device_type = "memory"; reg = <0x00000000 0x2000000>; /* 32 MB */ }; diff --git a/arch/arm/boot/dts/orion5x-rd88f5182-nas.dts b/arch/arm/boot/dts/orion5x-rd88f5182-nas.dts index d1817af53e0b..487324f7c54e 100644 --- a/arch/arm/boot/dts/orion5x-rd88f5182-nas.dts +++ b/arch/arm/boot/dts/orion5x-rd88f5182-nas.dts @@ -16,6 +16,7 @@ compatible = "marvell,rd-88f5182-nas", "marvell,orion5x-88f5182", "marvell,orion5x"; memory { + device_type = "memory"; reg = <0x00000000 0x4000000>; /* 64 MB */ }; diff --git a/arch/arm/boot/dts/orion5x.dtsi b/arch/arm/boot/dts/orion5x.dtsi index fbccfbbab223..61e631b3fd8b 100644 --- a/arch/arm/boot/dts/orion5x.dtsi +++ b/arch/arm/boot/dts/orion5x.dtsi @@ -6,11 +6,11 @@ * warranty of any kind, whether express or implied. */ -#include "skeleton.dtsi" - #define MBUS_ID(target,attributes) (((target) << 24) | ((attributes) << 16)) / { + #address-cells = <1>; + #size-cells = <1>; model = "Marvell Orion5x SoC"; compatible = "marvell,orion5x"; interrupt-parent = <&intc>; diff --git a/arch/arm/boot/dts/ox810se.dtsi b/arch/arm/boot/dts/ox810se.dtsi index c2b48a1838eb..3a26650de4eb 100644 --- a/arch/arm/boot/dts/ox810se.dtsi +++ b/arch/arm/boot/dts/ox810se.dtsi @@ -6,11 +6,12 @@ * Licensed under GPLv2 or later */ -/include/ "skeleton.dtsi" #include #include / { + #address-cells = <1>; + #size-cells = <1>; compatible = "oxsemi,ox810se"; cpus { @@ -25,6 +26,7 @@ }; memory { + device_type = "memory"; /* Max 256MB @ 0x48000000 */ reg = <0x48000000 0x10000000>; }; diff --git a/arch/arm/boot/dts/ox820.dtsi b/arch/arm/boot/dts/ox820.dtsi index 085bbd33eadc..f3239586f38d 100644 --- a/arch/arm/boot/dts/ox820.dtsi +++ b/arch/arm/boot/dts/ox820.dtsi @@ -6,12 +6,13 @@ * Licensed under GPLv2 or later */ -/include/ "skeleton.dtsi" #include #include #include / { + #address-cells = <1>; + #size-cells = <1>; compatible = "oxsemi,ox820"; cpus { @@ -35,6 +36,7 @@ }; memory { + device_type = "memory"; /* Max 512MB @ 0x60000000 */ reg = <0x60000000 0x20000000>; }; diff --git a/arch/arm/boot/dts/picoxcell-pc3x2.dtsi b/arch/arm/boot/dts/picoxcell-pc3x2.dtsi index a1266cf8776c..291a28f34762 100644 --- a/arch/arm/boot/dts/picoxcell-pc3x2.dtsi +++ b/arch/arm/boot/dts/picoxcell-pc3x2.dtsi @@ -10,7 +10,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ -/include/ "skeleton.dtsi" / { model = "Picochip picoXcell PC3X2"; compatible = "picochip,pc3x2"; diff --git a/arch/arm/boot/dts/picoxcell-pc3x3.dtsi b/arch/arm/boot/dts/picoxcell-pc3x3.dtsi index d78cd207eca1..bf9a39ea76b0 100644 --- a/arch/arm/boot/dts/picoxcell-pc3x3.dtsi +++ b/arch/arm/boot/dts/picoxcell-pc3x3.dtsi @@ -10,7 +10,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ -/include/ "skeleton.dtsi" / { model = "Picochip picoXcell PC3X3"; compatible = "picochip,pc3x3"; diff --git a/arch/arm/boot/dts/prima2-evb.dts b/arch/arm/boot/dts/prima2-evb.dts index 57286b4e7b87..55594b3bbc99 100644 --- a/arch/arm/boot/dts/prima2-evb.dts +++ b/arch/arm/boot/dts/prima2-evb.dts @@ -15,6 +15,7 @@ compatible = "sirf,prima2", "sirf,prima2-cb"; memory { + device_type = "memory"; reg = <0x00000000 0x20000000>; }; diff --git a/arch/arm/boot/dts/prima2.dtsi b/arch/arm/boot/dts/prima2.dtsi index 1ca1a9aa953f..54d4f8850e22 100644 --- a/arch/arm/boot/dts/prima2.dtsi +++ b/arch/arm/boot/dts/prima2.dtsi @@ -6,7 +6,6 @@ * Licensed under GPLv2 or later. */ -/include/ "skeleton.dtsi" / { compatible = "sirf,prima2"; #address-cells = <1>; diff --git a/arch/arm/boot/dts/pxa168.dtsi b/arch/arm/boot/dts/pxa168.dtsi index b899e25cbb1b..7137f3550183 100644 --- a/arch/arm/boot/dts/pxa168.dtsi +++ b/arch/arm/boot/dts/pxa168.dtsi @@ -7,10 +7,12 @@ * publishhed by the Free Software Foundation. */ -#include "skeleton.dtsi" #include / { + #address-cells = <1>; + #size-cells = <1>; + aliases { serial0 = &uart1; serial1 = &uart2; diff --git a/arch/arm/boot/dts/pxa2xx.dtsi b/arch/arm/boot/dts/pxa2xx.dtsi index e83879d97aea..bd6bf6d9300f 100644 --- a/arch/arm/boot/dts/pxa2xx.dtsi +++ b/arch/arm/boot/dts/pxa2xx.dtsi @@ -6,7 +6,6 @@ * Licensed under GPLv2 or later. */ -#include "skeleton.dtsi" #include "dt-bindings/clock/pxa-clock.h" #define PMGROUP(pin) #pin @@ -29,6 +28,8 @@ } / { + #address-cells = <1>; + #size-cells = <1>; model = "Marvell PXA2xx family SoC"; compatible = "marvell,pxa2xx"; interrupt-parent = <&pxairq>; diff --git a/arch/arm/boot/dts/pxa910.dtsi b/arch/arm/boot/dts/pxa910.dtsi index 0868f6729be1..c88553a8ee29 100644 --- a/arch/arm/boot/dts/pxa910.dtsi +++ b/arch/arm/boot/dts/pxa910.dtsi @@ -7,10 +7,12 @@ * publishhed by the Free Software Foundation. */ -#include "skeleton.dtsi" #include / { + #address-cells = <1>; + #size-cells = <1>; + aliases { serial0 = &uart1; serial1 = &uart2; diff --git a/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts b/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts index 497bb065eb9d..4e6c50d45cb2 100644 --- a/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts +++ b/arch/arm/boot/dts/qcom-apq8060-dragonboard.dts @@ -93,9 +93,8 @@ vdd-supply = <&pm8058_l14>; // 2.85V aset-gpios = <&pm8058_gpio 35 GPIO_ACTIVE_LOW>; capella,aset-resistance-ohms = <100000>; - /* GPIO34 has interrupt 225 on the PM8058 */ /* Trig on both edges - getting close or far away */ - interrupts-extended = <&pm8058 225 IRQ_TYPE_EDGE_BOTH>; + interrupts-extended = <&pm8058_gpio 34 IRQ_TYPE_EDGE_BOTH>; /* MPP05 analog input to the XOADC */ io-channels = <&xoadc 0x00 0x05>; io-channel-names = "aout"; @@ -515,9 +514,8 @@ ak8975@c { compatible = "asahi-kasei,ak8975"; reg = <0x0c>; - /* FIXME: GPIO33 has interrupt 224 on the PM8058 */ - interrupt-parent = <&pm8058>; - interrupts = <224 IRQ_TYPE_EDGE_RISING>; + interrupt-parent = <&pm8058_gpio>; + interrupts = <33 IRQ_TYPE_EDGE_RISING>; pinctrl-names = "default"; pinctrl-0 = <&dragon_ak8975_gpios>; vid-supply = <&pm8058_lvs0>; // 1.8V @@ -526,9 +524,8 @@ bmp085@77 { compatible = "bosch,bmp085"; reg = <0x77>; - /* FIXME: GPIO16 has interrupt 207 on the PM8058 */ - interrupt-parent = <&pm8058>; - interrupts = <207 IRQ_TYPE_EDGE_RISING>; + interrupt-parent = <&pm8058_gpio>; + interrupts = <16 IRQ_TYPE_EDGE_RISING>; reset-gpios = <&tlmm 86 GPIO_ACTIVE_LOW>; pinctrl-names = "default"; pinctrl-0 = <&dragon_bmp085_gpios>; @@ -539,12 +536,11 @@ compatible = "invensense,mpu3050"; reg = <0x68>; /* - * GPIO17 has interrupt 208 on the - * PM8058, it is pulled high by a 10k + * GPIO17 is pulled high by a 10k * resistor to VLOGIC so needs to be * active low/falling edge. */ - interrupts-extended = <&pm8058 208 IRQ_TYPE_EDGE_FALLING>; + interrupts-extended = <&pm8058_gpio 17 IRQ_TYPE_EDGE_FALLING>; pinctrl-names = "default"; pinctrl-0 = <&dragon_mpu3050_gpios>; vlogic-supply = <&pm8058_lvs0>; // 1.8V @@ -589,11 +585,10 @@ compatible = "smsc,lan9221", "smsc,lan9115"; reg = <2 0x0 0x100>; /* - * GPIO7 has interrupt 198 on the PM8058 * The second interrupt is the PME interrupt * for network wakeup, connected to the TLMM. */ - interrupts-extended = <&pm8058 198 IRQ_TYPE_EDGE_FALLING>, + interrupts-extended = <&pm8058_gpio 7 IRQ_TYPE_EDGE_FALLING>, <&tlmm 29 IRQ_TYPE_EDGE_RISING>; reset-gpios = <&tlmm 30 GPIO_ACTIVE_LOW>; vdd33a-supply = <&dragon_veth>; diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi index 48c3cf427610..bd6907db615b 100644 --- a/arch/arm/boot/dts/qcom-apq8064.dtsi +++ b/arch/arm/boot/dts/qcom-apq8064.dtsi @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; -#include "skeleton.dtsi" #include #include #include @@ -10,6 +9,8 @@ #include #include / { + #address-cells = <1>; + #size-cells = <1>; model = "Qualcomm APQ8064"; compatible = "qcom,apq8064"; interrupt-parent = <&intc>; @@ -94,6 +95,11 @@ }; }; + memory { + device_type = "memory"; + reg = <0x0 0x0>; + }; + thermal-zones { cpu-thermal0 { polling-delay-passive = <250>; @@ -705,50 +711,8 @@ compatible = "qcom,pm8921-gpio", "qcom,ssbi-gpio"; reg = <0x150>; - interrupts = <192 IRQ_TYPE_NONE>, - <193 IRQ_TYPE_NONE>, - <194 IRQ_TYPE_NONE>, - <195 IRQ_TYPE_NONE>, - <196 IRQ_TYPE_NONE>, - <197 IRQ_TYPE_NONE>, - <198 IRQ_TYPE_NONE>, - <199 IRQ_TYPE_NONE>, - <200 IRQ_TYPE_NONE>, - <201 IRQ_TYPE_NONE>, - <202 IRQ_TYPE_NONE>, - <203 IRQ_TYPE_NONE>, - <204 IRQ_TYPE_NONE>, - <205 IRQ_TYPE_NONE>, - <206 IRQ_TYPE_NONE>, - <207 IRQ_TYPE_NONE>, - <208 IRQ_TYPE_NONE>, - <209 IRQ_TYPE_NONE>, - <210 IRQ_TYPE_NONE>, - <211 IRQ_TYPE_NONE>, - <212 IRQ_TYPE_NONE>, - <213 IRQ_TYPE_NONE>, - <214 IRQ_TYPE_NONE>, - <215 IRQ_TYPE_NONE>, - <216 IRQ_TYPE_NONE>, - <217 IRQ_TYPE_NONE>, - <218 IRQ_TYPE_NONE>, - <219 IRQ_TYPE_NONE>, - <220 IRQ_TYPE_NONE>, - <221 IRQ_TYPE_NONE>, - <222 IRQ_TYPE_NONE>, - <223 IRQ_TYPE_NONE>, - <224 IRQ_TYPE_NONE>, - <225 IRQ_TYPE_NONE>, - <226 IRQ_TYPE_NONE>, - <227 IRQ_TYPE_NONE>, - <228 IRQ_TYPE_NONE>, - <229 IRQ_TYPE_NONE>, - <230 IRQ_TYPE_NONE>, - <231 IRQ_TYPE_NONE>, - <232 IRQ_TYPE_NONE>, - <233 IRQ_TYPE_NONE>, - <234 IRQ_TYPE_NONE>, - <235 IRQ_TYPE_NONE>; + interrupt-controller; + #interrupt-cells = <2>; gpio-controller; #gpio-cells = <2>; diff --git a/arch/arm/boot/dts/qcom-apq8084.dtsi b/arch/arm/boot/dts/qcom-apq8084.dtsi index 899f28533ed7..0a0fb147ebb9 100644 --- a/arch/arm/boot/dts/qcom-apq8084.dtsi +++ b/arch/arm/boot/dts/qcom-apq8084.dtsi @@ -1,12 +1,12 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; -#include "skeleton.dtsi" - #include #include / { + #address-cells = <1>; + #size-cells = <1>; model = "Qualcomm APQ 8084"; compatible = "qcom,apq8084"; interrupt-parent = <&intc>; @@ -87,6 +87,11 @@ }; }; + memory { + device_type = "memory"; + reg = <0x0 0x0>; + }; + firmware { scm { compatible = "qcom,scm"; diff --git a/arch/arm/boot/dts/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom-ipq4019.dtsi index 2d56008d8d6b..9e75f97770ce 100644 --- a/arch/arm/boot/dts/qcom-ipq4019.dtsi +++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi @@ -13,12 +13,14 @@ /dts-v1/; -#include "skeleton.dtsi" #include #include #include / { + #address-cells = <1>; + #size-cells = <1>; + model = "Qualcomm Technologies, Inc. IPQ4019"; compatible = "qcom,ipq4019"; interrupt-parent = <&intc>; @@ -133,6 +135,11 @@ }; }; + memory { + device_type = "memory"; + reg = <0x0 0x0>; + }; + pmu { compatible = "arm,cortex-a7-pmu"; interrupts = ; - interrupts = ; + interrupts = ; interrupt-names = "msi"; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0x7>; diff --git a/arch/arm/boot/dts/qcom-ipq8064.dtsi b/arch/arm/boot/dts/qcom-ipq8064.dtsi index f793cd1ad6d0..16c0da97932c 100644 --- a/arch/arm/boot/dts/qcom-ipq8064.dtsi +++ b/arch/arm/boot/dts/qcom-ipq8064.dtsi @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; -#include "skeleton.dtsi" #include #include #include @@ -11,6 +10,8 @@ #include / { + #address-cells = <1>; + #size-cells = <1>; model = "Qualcomm IPQ8064"; compatible = "qcom,ipq8064"; interrupt-parent = <&intc>; @@ -45,6 +46,11 @@ }; }; + memory { + device_type = "memory"; + reg = <0x0 0x0>; + }; + cpu-pmu { compatible = "qcom,krait-pmu"; interrupts = ; }; }; diff --git a/arch/arm/boot/dts/qcom-mdm9615.dtsi b/arch/arm/boot/dts/qcom-mdm9615.dtsi index c852b69229c9..02afc6a42005 100644 --- a/arch/arm/boot/dts/qcom-mdm9615.dtsi +++ b/arch/arm/boot/dts/qcom-mdm9615.dtsi @@ -45,8 +45,6 @@ /dts-v1/; -/include/ "skeleton.dtsi" - #include #include #include @@ -54,6 +52,8 @@ #include / { + #address-cells = <1>; + #size-cells = <1>; model = "Qualcomm MDM9615"; compatible = "qcom,mdm9615"; interrupt-parent = <&intc>; @@ -323,13 +323,8 @@ pmicgpio: gpio@150 { compatible = "qcom,pm8018-gpio", "qcom,ssbi-gpio"; - interrupt-parent = <&pmicintc>; - interrupts = <24 IRQ_TYPE_NONE>, - <25 IRQ_TYPE_NONE>, - <26 IRQ_TYPE_NONE>, - <27 IRQ_TYPE_NONE>, - <28 IRQ_TYPE_NONE>, - <29 IRQ_TYPE_NONE>; + interrupt-controller; + #interrupt-cells = <2>; gpio-controller; #gpio-cells = <2>; }; diff --git a/arch/arm/boot/dts/qcom-msm8660.dtsi b/arch/arm/boot/dts/qcom-msm8660.dtsi index 70698941f64c..65a994f0e09b 100644 --- a/arch/arm/boot/dts/qcom-msm8660.dtsi +++ b/arch/arm/boot/dts/qcom-msm8660.dtsi @@ -1,14 +1,14 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; -/include/ "skeleton.dtsi" - #include #include #include #include / { + #address-cells = <1>; + #size-cells = <1>; model = "Qualcomm MSM8660"; compatible = "qcom,msm8660"; interrupt-parent = <&intc>; @@ -39,6 +39,11 @@ }; }; + memory { + device_type = "memory"; + reg = <0x0 0x0>; + }; + cpu-pmu { compatible = "qcom,scorpion-mp-pmu"; interrupts = <1 9 0x304>; @@ -133,6 +138,7 @@ #address-cells = <1>; #size-cells = <1>; ranges; + status = "disabled"; syscon-tcsr = <&tcsr>; @@ -140,7 +146,7 @@ compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm"; reg = <0x16540000 0x1000>, <0x16500000 0x1000>; - interrupts = ; + interrupts = ; clocks = <&gcc GSBI6_UART_CLK>, <&gcc GSBI6_H_CLK>; clock-names = "core", "iface"; status = "disabled"; @@ -149,7 +155,7 @@ gsbi6_i2c: i2c@16580000 { compatible = "qcom,i2c-qup-v1.1.1"; reg = <0x16580000 0x1000>; - interrupts = ; + interrupts = ; clocks = <&gcc GSBI6_QUP_CLK>, <&gcc GSBI6_H_CLK>; clock-names = "core", "iface"; #address-cells = <1>; @@ -167,6 +173,7 @@ #address-cells = <1>; #size-cells = <1>; ranges; + status = "disabled"; syscon-tcsr = <&tcsr>; @@ -174,7 +181,7 @@ compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm"; reg = <0x16640000 0x1000>, <0x16600000 0x1000>; - interrupts = ; + interrupts = ; clocks = <&gcc GSBI7_UART_CLK>, <&gcc GSBI7_H_CLK>; clock-names = "core", "iface"; status = "disabled"; @@ -183,7 +190,7 @@ gsbi7_i2c: i2c@16680000 { compatible = "qcom,i2c-qup-v1.1.1"; reg = <0x16680000 0x1000>; - interrupts = ; + interrupts = ; clocks = <&gcc GSBI7_QUP_CLK>, <&gcc GSBI7_H_CLK>; clock-names = "core", "iface"; #address-cells = <1>; @@ -207,7 +214,7 @@ gsbi8_i2c: i2c@19880000 { compatible = "qcom,i2c-qup-v1.1.1"; reg = <0x19880000 0x1000>; - interrupts = ; + interrupts = ; clocks = <&gcc GSBI8_QUP_CLK>, <&gcc GSBI8_H_CLK>; clock-names = "core", "iface"; #address-cells = <1>; @@ -232,7 +239,7 @@ compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm"; reg = <0x19c40000 0x1000>, <0x19c00000 0x1000>; - interrupts = <0 195 IRQ_TYPE_NONE>; + interrupts = <0 195 IRQ_TYPE_LEVEL_HIGH>; clocks = <&gcc GSBI12_UART_CLK>, <&gcc GSBI12_H_CLK>; clock-names = "core", "iface"; status = "disabled"; @@ -241,7 +248,7 @@ gsbi12_i2c: i2c@19c80000 { compatible = "qcom,i2c-qup-v1.1.1"; reg = <0x19c80000 0x1000>; - interrupts = <0 196 IRQ_TYPE_NONE>; + interrupts = <0 196 IRQ_TYPE_LEVEL_HIGH>; clocks = <&gcc GSBI12_QUP_CLK>, <&gcc GSBI12_H_CLK>; clock-names = "core", "iface"; #address-cells = <1>; @@ -285,51 +292,8 @@ compatible = "qcom,pm8058-gpio", "qcom,ssbi-gpio"; reg = <0x150>; - interrupt-parent = <&pm8058>; - interrupts = <192 IRQ_TYPE_NONE>, - <193 IRQ_TYPE_NONE>, - <194 IRQ_TYPE_NONE>, - <195 IRQ_TYPE_NONE>, - <196 IRQ_TYPE_NONE>, - <197 IRQ_TYPE_NONE>, - <198 IRQ_TYPE_NONE>, - <199 IRQ_TYPE_NONE>, - <200 IRQ_TYPE_NONE>, - <201 IRQ_TYPE_NONE>, - <202 IRQ_TYPE_NONE>, - <203 IRQ_TYPE_NONE>, - <204 IRQ_TYPE_NONE>, - <205 IRQ_TYPE_NONE>, - <206 IRQ_TYPE_NONE>, - <207 IRQ_TYPE_NONE>, - <208 IRQ_TYPE_NONE>, - <209 IRQ_TYPE_NONE>, - <210 IRQ_TYPE_NONE>, - <211 IRQ_TYPE_NONE>, - <212 IRQ_TYPE_NONE>, - <213 IRQ_TYPE_NONE>, - <214 IRQ_TYPE_NONE>, - <215 IRQ_TYPE_NONE>, - <216 IRQ_TYPE_NONE>, - <217 IRQ_TYPE_NONE>, - <218 IRQ_TYPE_NONE>, - <219 IRQ_TYPE_NONE>, - <220 IRQ_TYPE_NONE>, - <221 IRQ_TYPE_NONE>, - <222 IRQ_TYPE_NONE>, - <223 IRQ_TYPE_NONE>, - <224 IRQ_TYPE_NONE>, - <225 IRQ_TYPE_NONE>, - <226 IRQ_TYPE_NONE>, - <227 IRQ_TYPE_NONE>, - <228 IRQ_TYPE_NONE>, - <229 IRQ_TYPE_NONE>, - <230 IRQ_TYPE_NONE>, - <231 IRQ_TYPE_NONE>, - <232 IRQ_TYPE_NONE>, - <233 IRQ_TYPE_NONE>, - <234 IRQ_TYPE_NONE>, - <235 IRQ_TYPE_NONE>; + interrupt-controller; + #interrupt-cells = <2>; gpio-controller; #gpio-cells = <2>; diff --git a/arch/arm/boot/dts/qcom-msm8960.dtsi b/arch/arm/boot/dts/qcom-msm8960.dtsi index 1733d8f40ab1..f2aeaccdc1ad 100644 --- a/arch/arm/boot/dts/qcom-msm8960.dtsi +++ b/arch/arm/boot/dts/qcom-msm8960.dtsi @@ -1,14 +1,14 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; -/include/ "skeleton.dtsi" - #include #include #include #include / { + #address-cells = <1>; + #size-cells = <1>; model = "Qualcomm MSM8960"; compatible = "qcom,msm8960"; interrupt-parent = <&intc>; @@ -44,6 +44,11 @@ }; }; + memory { + device_type = "memory"; + reg = <0x0 0x0>; + }; + cpu-pmu { compatible = "qcom,krait-pmu"; interrupts = <1 10 0x304>; diff --git a/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts b/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts index 51444c53fc72..b3b04736a159 100644 --- a/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts +++ b/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts @@ -220,6 +220,20 @@ }; }; }; + + vreg_wlan: wlan-regulator { + compatible = "regulator-fixed"; + + regulator-name = "wl-reg"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + + gpio = <&msmgpio 26 GPIO_ACTIVE_HIGH>; + enable-active-high; + + pinctrl-names = "default"; + pinctrl-0 = <&wlan_regulator_pin>; + }; }; &soc { @@ -242,6 +256,30 @@ }; }; + sdhc2_pin_a: sdhc2-pin-active { + clk { + pins = "sdc2_clk"; + drive-strength = <6>; + bias-disable; + }; + + cmd-data { + pins = "sdc2_cmd", "sdc2_data"; + drive-strength = <6>; + bias-pull-up; + }; + }; + + i2c1_pins: i2c1 { + mux { + pins = "gpio2", "gpio3"; + function = "blsp_i2c1"; + + drive-strength = <2>; + bias-disable; + }; + }; + i2c3_pins: i2c3 { mux { pins = "gpio10", "gpio11"; @@ -283,6 +321,32 @@ pinctrl-0 = <&sdhc1_pin_a>; }; + sdhci@f98a4900 { + status = "ok"; + + max-frequency = <100000000>; + bus-width = <4>; + non-removable; + vmmc-supply = <&vreg_wlan>; + vqmmc-supply = <&pm8941_s3>; + + pinctrl-names = "default"; + pinctrl-0 = <&sdhc2_pin_a>; + + #address-cells = <1>; + #size-cells = <0>; + + bcrmf@1 { + compatible = "brcm,bcm4339-fmac", "brcm,bcm4329-fmac"; + reg = <1>; + + brcm,drive-strength = <10>; + + pinctrl-names = "default"; + pinctrl-0 = <&wlan_sleep_clk_pin>; + }; + }; + gpio-keys { compatible = "gpio-keys"; input-name = "gpio-keys"; @@ -342,6 +406,24 @@ }; }; + i2c@f9923000 { + status = "ok"; + pinctrl-names = "default"; + pinctrl-0 = <&i2c1_pins>; + clock-frequency = <100000>; + qcom,src-freq = <50000000>; + + charger: bq24192@6b { + compatible = "ti,bq24192"; + reg = <0x6b>; + interrupts-extended = <&spmi_bus 0 0xd5 0 IRQ_TYPE_EDGE_FALLING>; + + omit-battery-class; + + usb_otg_vbus: usb-otg-vbus { }; + }; + }; + i2c@f9925000 { status = "ok"; pinctrl-names = "default"; @@ -359,6 +441,31 @@ amstaos,proximity-diodes = <0>; }; }; + + usb@f9a55000 { + status = "ok"; + + phys = <&usb_hs1_phy>; + phy-select = <&tcsr 0xb000 0>; + + extcon = <&charger>, <&usb_id>; + vbus-supply = <&usb_otg_vbus>; + + hnp-disable; + srp-disable; + adp-disable; + + ulpi { + phy@a { + status = "ok"; + + v1p8-supply = <&pm8941_l6>; + v3p3-supply = <&pm8941_l24>; + + qcom,init-seq = /bits/ 8 <0x1 0x64>; + }; + }; + }; }; &spmi_bus { @@ -371,6 +478,29 @@ bias-pull-up; power-source = ; }; + + wlan_sleep_clk_pin: wl-sleep-clk { + pins = "gpio16"; + function = "func2"; + + output-high; + power-source = ; + }; + + wlan_regulator_pin: wl-reg-active { + pins = "gpio17"; + function = "normal"; + + bias-disable; + power-source = ; + }; + + otg { + gpio-hog; + gpios = <35 GPIO_ACTIVE_HIGH>; + output-high; + line-name = "otg-gpio"; + }; }; }; }; diff --git a/arch/arm/boot/dts/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom-msm8974.dtsi index ca266a5f021d..45b5c8ef0374 100644 --- a/arch/arm/boot/dts/qcom-msm8974.dtsi +++ b/arch/arm/boot/dts/qcom-msm8974.dtsi @@ -6,9 +6,10 @@ #include #include #include -#include "skeleton.dtsi" / { + #address-cells = <1>; + #size-cells = <1>; model = "Qualcomm MSM8974"; compatible = "qcom,msm8974"; interrupt-parent = <&intc>; @@ -130,6 +131,11 @@ }; }; + memory { + device_type = "memory"; + reg = <0x0 0x0>; + }; + thermal-zones { cpu-thermal0 { polling-delay-passive = <250>; @@ -706,6 +712,17 @@ interrupts = ; }; + i2c@f9923000 { + status = "disabled"; + compatible = "qcom,i2c-qup-v2.1.1"; + reg = <0xf9923000 0x1000>; + interrupts = <0 95 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&gcc GCC_BLSP1_QUP1_I2C_APPS_CLK>, <&gcc GCC_BLSP1_AHB_CLK>; + clock-names = "core", "iface"; + #address-cells = <1>; + #size-cells = <0>; + }; + i2c@f9924000 { status = "disabled"; compatible = "qcom,i2c-qup-v2.1.1"; diff --git a/arch/arm/boot/dts/qcom-pm8941.dtsi b/arch/arm/boot/dts/qcom-pm8941.dtsi index 2515c5c217ac..f198480c8ef4 100644 --- a/arch/arm/boot/dts/qcom-pm8941.dtsi +++ b/arch/arm/boot/dts/qcom-pm8941.dtsi @@ -63,43 +63,10 @@ compatible = "qcom,pm8941-gpio", "qcom,spmi-gpio"; reg = <0xc000>; gpio-controller; + gpio-ranges = <&pm8941_gpios 0 0 36>; #gpio-cells = <2>; - interrupts = <0 0xc0 0 IRQ_TYPE_NONE>, - <0 0xc1 0 IRQ_TYPE_NONE>, - <0 0xc2 0 IRQ_TYPE_NONE>, - <0 0xc3 0 IRQ_TYPE_NONE>, - <0 0xc4 0 IRQ_TYPE_NONE>, - <0 0xc5 0 IRQ_TYPE_NONE>, - <0 0xc6 0 IRQ_TYPE_NONE>, - <0 0xc7 0 IRQ_TYPE_NONE>, - <0 0xc8 0 IRQ_TYPE_NONE>, - <0 0xc9 0 IRQ_TYPE_NONE>, - <0 0xca 0 IRQ_TYPE_NONE>, - <0 0xcb 0 IRQ_TYPE_NONE>, - <0 0xcc 0 IRQ_TYPE_NONE>, - <0 0xcd 0 IRQ_TYPE_NONE>, - <0 0xce 0 IRQ_TYPE_NONE>, - <0 0xcf 0 IRQ_TYPE_NONE>, - <0 0xd0 0 IRQ_TYPE_NONE>, - <0 0xd1 0 IRQ_TYPE_NONE>, - <0 0xd2 0 IRQ_TYPE_NONE>, - <0 0xd3 0 IRQ_TYPE_NONE>, - <0 0xd4 0 IRQ_TYPE_NONE>, - <0 0xd5 0 IRQ_TYPE_NONE>, - <0 0xd6 0 IRQ_TYPE_NONE>, - <0 0xd7 0 IRQ_TYPE_NONE>, - <0 0xd8 0 IRQ_TYPE_NONE>, - <0 0xd9 0 IRQ_TYPE_NONE>, - <0 0xda 0 IRQ_TYPE_NONE>, - <0 0xdb 0 IRQ_TYPE_NONE>, - <0 0xdc 0 IRQ_TYPE_NONE>, - <0 0xdd 0 IRQ_TYPE_NONE>, - <0 0xde 0 IRQ_TYPE_NONE>, - <0 0xdf 0 IRQ_TYPE_NONE>, - <0 0xe0 0 IRQ_TYPE_NONE>, - <0 0xe1 0 IRQ_TYPE_NONE>, - <0 0xe2 0 IRQ_TYPE_NONE>, - <0 0xe3 0 IRQ_TYPE_NONE>; + interrupt-controller; + #interrupt-cells = <2>; boost_bypass_n_pin: boost-bypass { pins = "gpio21"; diff --git a/arch/arm/boot/dts/qcom-pma8084.dtsi b/arch/arm/boot/dts/qcom-pma8084.dtsi index aac7e73b6872..8f5ea7add20f 100644 --- a/arch/arm/boot/dts/qcom-pma8084.dtsi +++ b/arch/arm/boot/dts/qcom-pma8084.dtsi @@ -32,28 +32,8 @@ reg = <0xc000>; gpio-controller; #gpio-cells = <2>; - interrupts = <0 0xc0 0 IRQ_TYPE_NONE>, - <0 0xc1 0 IRQ_TYPE_NONE>, - <0 0xc2 0 IRQ_TYPE_NONE>, - <0 0xc3 0 IRQ_TYPE_NONE>, - <0 0xc4 0 IRQ_TYPE_NONE>, - <0 0xc5 0 IRQ_TYPE_NONE>, - <0 0xc6 0 IRQ_TYPE_NONE>, - <0 0xc7 0 IRQ_TYPE_NONE>, - <0 0xc8 0 IRQ_TYPE_NONE>, - <0 0xc9 0 IRQ_TYPE_NONE>, - <0 0xca 0 IRQ_TYPE_NONE>, - <0 0xcb 0 IRQ_TYPE_NONE>, - <0 0xcc 0 IRQ_TYPE_NONE>, - <0 0xcd 0 IRQ_TYPE_NONE>, - <0 0xce 0 IRQ_TYPE_NONE>, - <0 0xcf 0 IRQ_TYPE_NONE>, - <0 0xd0 0 IRQ_TYPE_NONE>, - <0 0xd1 0 IRQ_TYPE_NONE>, - <0 0xd2 0 IRQ_TYPE_NONE>, - <0 0xd3 0 IRQ_TYPE_NONE>, - <0 0xd4 0 IRQ_TYPE_NONE>, - <0 0xd5 0 IRQ_TYPE_NONE>; + interrupt-controller; + #interrupt-cells = <2>; }; pma8084_mpps: mpps@a000 { diff --git a/arch/arm/boot/dts/r7s9210-rza2mevb.dts b/arch/arm/boot/dts/r7s9210-rza2mevb.dts new file mode 100644 index 000000000000..991e09de1219 --- /dev/null +++ b/arch/arm/boot/dts/r7s9210-rza2mevb.dts @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Device Tree Source for the RZA2MEVB board + * + * Copyright (C) 2018 Renesas Electronics + * + */ + +/dts-v1/; +#include "r7s9210.dtsi" +#include +#include + +/ { + model = "RZA2MEVB"; + compatible = "renesas,rza2mevb", "renesas,r7s9210"; + + aliases { + serial0 = &scif4; + }; + + chosen { + bootargs = "ignore_loglevel"; + stdout-path = "serial0:115200n8"; + }; + + memory@40000000 { + device_type = "memory"; + reg = <0x40000000 0x00800000>; /* HyperRAM */ + }; + + lbsc { + #address-cells = <1>; + #size-cells = <1>; + }; + + leds { + compatible = "gpio-leds"; + + red { + gpios = <&pinctrl RZA2_PIN(PORT6, 0) GPIO_ACTIVE_HIGH>; + }; + green { + gpios = <&pinctrl RZA2_PIN(PORTC, 1) GPIO_ACTIVE_HIGH>; + }; + }; +}; + +/* EXTAL */ +&extal_clk { + clock-frequency = <24000000>; /* 24MHz */ +}; + +/* RTC_X1 */ +&rtc_x1_clk { + clock-frequency = <32768>; +}; + +&pinctrl { + /* Serial Console */ + scif4_pins: serial4 { + pinmux = , /* TxD4 */ + ; /* RxD4 */ + }; +}; + +/* High resolution System tick timers */ +&ostm0 { + status = "okay"; +}; + +&ostm1 { + status = "okay"; +}; + +/* Serial Console */ +&scif4 { + pinctrl-names = "default"; + pinctrl-0 = <&scif4_pins>; + + status = "okay"; +}; diff --git a/arch/arm/boot/dts/r7s9210.dtsi b/arch/arm/boot/dts/r7s9210.dtsi new file mode 100644 index 000000000000..22baa96f5974 --- /dev/null +++ b/arch/arm/boot/dts/r7s9210.dtsi @@ -0,0 +1,218 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Device Tree Source for the R7S9210 SoC + * + * Copyright (C) 2018 Renesas Electronics Corporation + * + */ + +#include +#include + +/ { + compatible = "renesas,r7s9210"; + interrupt-parent = <&gic>; + #address-cells = <1>; + #size-cells = <1>; + + /* External clocks */ + extal_clk: extal { + #clock-cells = <0>; + compatible = "fixed-clock"; + /* Value must be set by board */ + clock-frequency = <0>; + }; + + rtc_x1_clk: rtc_x1 { + #clock-cells = <0>; + compatible = "fixed-clock"; + /* If clk present, value (32678) must be set by board */ + clock-frequency = <0>; + }; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu@0 { + device_type = "cpu"; + compatible = "arm,cortex-a9"; + reg = <0>; + clock-frequency = <528000000>; + next-level-cache = <&L2>; + }; + }; + + soc { + compatible = "simple-bus"; + interrupt-parent = <&gic>; + + #address-cells = <1>; + #size-cells = <1>; + ranges; + + L2: cache-controller@1f003000 { + compatible = "arm,pl310-cache"; + reg = <0x1f003000 0x1000>; + interrupts = ; + arm,early-bresp-disable; + arm,full-line-zero-disable; + cache-unified; + cache-level = <2>; + }; + + scif0: serial@e8007000 { + compatible = "renesas,scif-r7s9210"; + reg = <0xe8007000 0x18>; + interrupts = , + , + , + , + , + ; + interrupt-names = "eri", "rxi", "txi", + "bri", "dri", "tei"; + clocks = <&cpg CPG_MOD 47>; + clock-names = "fck"; + power-domains = <&cpg>; + status = "disabled"; + }; + + scif1: serial@e8007800 { + compatible = "renesas,scif-r7s9210"; + reg = <0xe8007800 0x18>; + interrupts = , + , + , + , + , + ; + interrupt-names = "eri", "rxi", "txi", + "bri", "dri", "tei"; + clocks = <&cpg CPG_MOD 46>; + clock-names = "fck"; + power-domains = <&cpg>; + status = "disabled"; + }; + + scif2: serial@e8008000 { + compatible = "renesas,scif-r7s9210"; + reg = <0xe8008000 0x18>; + interrupts = , + , + , + , + , + ; + interrupt-names = "eri", "rxi", "txi", + "bri", "dri", "tei"; + clocks = <&cpg CPG_MOD 45>; + clock-names = "fck"; + power-domains = <&cpg>; + status = "disabled"; + }; + + scif3: serial@e8008800 { + compatible = "renesas,scif-r7s9210"; + reg = <0xe8008800 0x18>; + interrupts = , + , + , + , + , + ; + interrupt-names = "eri", "rxi", "txi", + "bri", "dri", "tei"; + clocks = <&cpg CPG_MOD 44>; + clock-names = "fck"; + power-domains = <&cpg>; + status = "disabled"; + }; + + scif4: serial@e8009000 { + compatible = "renesas,scif-r7s9210"; + reg = <0xe8009000 0x18>; + interrupts = , + , + , + , + , + ; + interrupt-names = "eri", "rxi", "txi", + "bri", "dri", "tei"; + clocks = <&cpg CPG_MOD 43>; + clock-names = "fck"; + power-domains = <&cpg>; + status = "disabled"; + }; + + ostm0: timer@e803b000 { + compatible = "renesas,r7s9210-ostm", "renesas,ostm"; + reg = <0xe803b000 0x30>; + interrupts = ; + clocks = <&cpg CPG_MOD 36>; + clock-names = "ostm0"; + power-domains = <&cpg>; + status = "disabled"; + }; + + ostm1: timer@e803c000 { + compatible = "renesas,r7s9210-ostm", "renesas,ostm"; + reg = <0xe803c000 0x30>; + interrupts = ; + clocks = <&cpg CPG_MOD 35>; + clock-names = "ostm1"; + power-domains = <&cpg>; + status = "disabled"; + }; + + ostm2: timer@e803d000 { + compatible = "renesas,r7s9210-ostm", "renesas,ostm"; + reg = <0xe803d000 0x30>; + interrupts = ; + clocks = <&cpg CPG_MOD 34>; + clock-names = "ostm2"; + power-domains = <&cpg>; + status = "disabled"; + }; + + gic: interrupt-controller@e8221000 { + compatible = "arm,gic-400"; + #interrupt-cells = <3>; + #address-cells = <0>; + interrupt-controller; + reg = <0xe8221000 0x1000>, + <0xe8222000 0x1000>; + }; + + cpg: clock-controller@fcfe0010 { + compatible = "renesas,r7s9210-cpg-mssr"; + reg = <0xfcfe0010 0x455>; + clocks = <&extal_clk>; + clock-names = "extal"; + #clock-cells = <2>; + #power-domain-cells = <0>; + }; + + wdt: watchdog@fcfe7000 { + compatible = "renesas,r7s9210-wdt", "renesas,rza-wdt"; + reg = <0xfcfe7000 0x26>; + interrupts = ; + clocks = <&cpg CPG_CORE R7S9210_CLK_P0>; + }; + + bsid: chipid@fcfe8004 { + compatible = "renesas,bsid"; + reg = <0xfcfe8004 4>; + }; + + pinctrl: pin-controller@fcffe000 { + compatible = "renesas,r7s9210-pinctrl"; + reg = <0xfcffe000 0x1000>; + + gpio-controller; + #gpio-cells = <2>; + gpio-ranges = <&pinctrl 0 0 176>; + }; + }; +}; diff --git a/arch/arm/boot/dts/r8a7743.dtsi b/arch/arm/boot/dts/r8a7743.dtsi index 3cc33f7ff7fe..de981d629bdd 100644 --- a/arch/arm/boot/dts/r8a7743.dtsi +++ b/arch/arm/boot/dts/r8a7743.dtsi @@ -15,25 +15,6 @@ #address-cells = <2>; #size-cells = <2>; - aliases { - i2c0 = &i2c0; - i2c1 = &i2c1; - i2c2 = &i2c2; - i2c3 = &i2c3; - i2c4 = &i2c4; - i2c5 = &i2c5; - i2c6 = &iic0; - i2c7 = &iic1; - i2c8 = &iic3; - spi0 = &qspi; - spi1 = &msiof0; - spi2 = &msiof1; - spi3 = &msiof2; - vin0 = &vin0; - vin1 = &vin1; - vin2 = &vin2; - }; - /* * The external audio clocks are configured as 0 Hz fixed frequency * clocks by default. @@ -154,6 +135,16 @@ #size-cells = <2>; ranges; + rwdt: watchdog@e6020000 { + compatible = "renesas,r8a7743-wdt", + "renesas,rcar-gen2-wdt"; + reg = <0 0xe6020000 0 0x0c>; + clocks = <&cpg CPG_MOD 402>; + power-domains = <&sysc R8A7743_PD_ALWAYS_ON>; + resets = <&cpg 402>; + status = "disabled"; + }; + gpio0: gpio@e6050000 { compatible = "renesas,gpio-r8a7743", "renesas,rcar-gen2-gpio"; @@ -310,16 +301,6 @@ reg = <0 0xe6160000 0 0x100>; }; - rwdt: watchdog@e6020000 { - compatible = "renesas,r8a7743-wdt", - "renesas,rcar-gen2-wdt"; - reg = <0 0xe6020000 0 0x0c>; - clocks = <&cpg CPG_MOD 402>; - power-domains = <&sysc R8A7743_PD_ALWAYS_ON>; - resets = <&cpg 402>; - status = "disabled"; - }; - sysc: system-controller@e6180000 { compatible = "renesas,r8a7743-sysc"; reg = <0 0xe6180000 0 0x200>; @@ -564,9 +545,7 @@ /* doesn't need pinmux */ #address-cells = <1>; #size-cells = <0>; - compatible = "renesas,iic-r8a7743", - "renesas,rcar-gen2-iic", - "renesas,rmobile-iic"; + compatible = "renesas,iic-r8a7743"; reg = <0 0xe60b0000 0 0x425>; interrupts = ; clocks = <&cpg CPG_MOD 926>; @@ -1681,15 +1660,12 @@ du: display@feb00000 { compatible = "renesas,du-r8a7743"; - reg = <0 0xfeb00000 0 0x40000>, - <0 0xfeb90000 0 0x1c>; - reg-names = "du", "lvds.0"; + reg = <0 0xfeb00000 0 0x40000>; interrupts = , ; clocks = <&cpg CPG_MOD 724>, - <&cpg CPG_MOD 723>, - <&cpg CPG_MOD 726>; - clock-names = "du.0", "du.1", "lvds.0"; + <&cpg CPG_MOD 723>; + clock-names = "du.0", "du.1"; status = "disabled"; ports { @@ -1704,6 +1680,33 @@ port@1 { reg = <1>; du_out_lvds0: endpoint { + remote-endpoint = <&lvds0_in>; + }; + }; + }; + }; + + lvds0: lvds@feb90000 { + compatible = "renesas,r8a7743-lvds"; + reg = <0 0xfeb90000 0 0x1c>; + clocks = <&cpg CPG_MOD 726>; + power-domains = <&sysc R8A7743_PD_ALWAYS_ON>; + resets = <&cpg 726>; + status = "disabled"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + lvds0_in: endpoint { + remote-endpoint = <&du_out_lvds0>; + }; + }; + port@1 { + reg = <1>; + lvds0_out: endpoint { }; }; }; diff --git a/arch/arm/boot/dts/r8a7744.dtsi b/arch/arm/boot/dts/r8a7744.dtsi index 04148d608fc4..fa74a262107b 100644 --- a/arch/arm/boot/dts/r8a7744.dtsi +++ b/arch/arm/boot/dts/r8a7744.dtsi @@ -998,6 +998,54 @@ status = "disabled"; }; + msiof0: spi@e6e20000 { + compatible = "renesas,msiof-r8a7744", + "renesas,rcar-gen2-msiof"; + reg = <0 0xe6e20000 0 0x0064>; + interrupts = ; + clocks = <&cpg CPG_MOD 000>; + dmas = <&dmac0 0x51>, <&dmac0 0x52>, + <&dmac1 0x51>, <&dmac1 0x52>; + dma-names = "tx", "rx", "tx", "rx"; + power-domains = <&sysc R8A7744_PD_ALWAYS_ON>; + #address-cells = <1>; + #size-cells = <0>; + resets = <&cpg 000>; + status = "disabled"; + }; + + msiof1: spi@e6e10000 { + compatible = "renesas,msiof-r8a7744", + "renesas,rcar-gen2-msiof"; + reg = <0 0xe6e10000 0 0x0064>; + interrupts = ; + clocks = <&cpg CPG_MOD 208>; + dmas = <&dmac0 0x55>, <&dmac0 0x56>, + <&dmac1 0x55>, <&dmac1 0x56>; + dma-names = "tx", "rx", "tx", "rx"; + power-domains = <&sysc R8A7744_PD_ALWAYS_ON>; + #address-cells = <1>; + #size-cells = <0>; + resets = <&cpg 208>; + status = "disabled"; + }; + + msiof2: spi@e6e00000 { + compatible = "renesas,msiof-r8a7744", + "renesas,rcar-gen2-msiof"; + reg = <0 0xe6e00000 0 0x0064>; + interrupts = ; + clocks = <&cpg CPG_MOD 205>; + dmas = <&dmac0 0x41>, <&dmac0 0x42>, + <&dmac1 0x41>, <&dmac1 0x42>; + dma-names = "tx", "rx", "tx", "rx"; + power-domains = <&sysc R8A7744_PD_ALWAYS_ON>; + #address-cells = <1>; + #size-cells = <0>; + resets = <&cpg 205>; + status = "disabled"; + }; + pwm0: pwm@e6e30000 { compatible = "renesas,pwm-r8a7744", "renesas,pwm-rcar"; reg = <0 0xe6e30000 0 0x8>; @@ -1068,54 +1116,6 @@ status = "disabled"; }; - msiof0: spi@e6e20000 { - compatible = "renesas,msiof-r8a7744", - "renesas,rcar-gen2-msiof"; - reg = <0 0xe6e20000 0 0x0064>; - interrupts = ; - clocks = <&cpg CPG_MOD 000>; - dmas = <&dmac0 0x51>, <&dmac0 0x52>, - <&dmac1 0x51>, <&dmac1 0x52>; - dma-names = "tx", "rx", "tx", "rx"; - power-domains = <&sysc R8A7744_PD_ALWAYS_ON>; - #address-cells = <1>; - #size-cells = <0>; - resets = <&cpg 000>; - status = "disabled"; - }; - - msiof1: spi@e6e10000 { - compatible = "renesas,msiof-r8a7744", - "renesas,rcar-gen2-msiof"; - reg = <0 0xe6e10000 0 0x0064>; - interrupts = ; - clocks = <&cpg CPG_MOD 208>; - dmas = <&dmac0 0x55>, <&dmac0 0x56>, - <&dmac1 0x55>, <&dmac1 0x56>; - dma-names = "tx", "rx", "tx", "rx"; - power-domains = <&sysc R8A7744_PD_ALWAYS_ON>; - #address-cells = <1>; - #size-cells = <0>; - resets = <&cpg 208>; - status = "disabled"; - }; - - msiof2: spi@e6e00000 { - compatible = "renesas,msiof-r8a7744", - "renesas,rcar-gen2-msiof"; - reg = <0 0xe6e00000 0 0x0064>; - interrupts = ; - clocks = <&cpg CPG_MOD 205>; - dmas = <&dmac0 0x41>, <&dmac0 0x42>, - <&dmac1 0x41>, <&dmac1 0x42>; - dma-names = "tx", "rx", "tx", "rx"; - power-domains = <&sysc R8A7744_PD_ALWAYS_ON>; - #address-cells = <1>; - #size-cells = <0>; - resets = <&cpg 205>; - status = "disabled"; - }; - can0: can@e6e80000 { compatible = "renesas,can-r8a7744", "renesas,rcar-gen2-can"; @@ -1589,33 +1589,6 @@ resets = <&cpg 408>; }; - vsp@fe928000 { - compatible = "renesas,vsp1"; - reg = <0 0xfe928000 0 0x8000>; - interrupts = ; - clocks = <&cpg CPG_MOD 131>; - power-domains = <&sysc R8A7744_PD_ALWAYS_ON>; - resets = <&cpg 131>; - }; - - vsp@fe930000 { - compatible = "renesas,vsp1"; - reg = <0 0xfe930000 0 0x8000>; - interrupts = ; - clocks = <&cpg CPG_MOD 128>; - power-domains = <&sysc R8A7744_PD_ALWAYS_ON>; - resets = <&cpg 128>; - }; - - vsp@fe938000 { - compatible = "renesas,vsp1"; - reg = <0 0xfe938000 0 0x8000>; - interrupts = ; - clocks = <&cpg CPG_MOD 127>; - power-domains = <&sysc R8A7744_PD_ALWAYS_ON>; - resets = <&cpg 127>; - }; - pciec: pcie@fe000000 { compatible = "renesas,pcie-r8a7744", "renesas,pcie-rcar-gen2"; @@ -1644,9 +1617,42 @@ status = "disabled"; }; + vsp@fe928000 { + compatible = "renesas,vsp1"; + reg = <0 0xfe928000 0 0x8000>; + interrupts = ; + clocks = <&cpg CPG_MOD 131>; + power-domains = <&sysc R8A7744_PD_ALWAYS_ON>; + resets = <&cpg 131>; + }; + + vsp@fe930000 { + compatible = "renesas,vsp1"; + reg = <0 0xfe930000 0 0x8000>; + interrupts = ; + clocks = <&cpg CPG_MOD 128>; + power-domains = <&sysc R8A7744_PD_ALWAYS_ON>; + resets = <&cpg 128>; + }; + + vsp@fe938000 { + compatible = "renesas,vsp1"; + reg = <0 0xfe938000 0 0x8000>; + interrupts = ; + clocks = <&cpg CPG_MOD 127>; + power-domains = <&sysc R8A7744_PD_ALWAYS_ON>; + resets = <&cpg 127>; + }; + du: display@feb00000 { - reg = <0 0xfeb00000 0 0x40000>, - <0 0xfeb90000 0 0x1c>; + compatible = "renesas,du-r8a7744"; + reg = <0 0xfeb00000 0 0x40000>; + interrupts = , + ; + clocks = <&cpg CPG_MOD 724>, + <&cpg CPG_MOD 723>; + clock-names = "du.0", "du.1"; + status = "disabled"; ports { #address-cells = <1>; @@ -1660,10 +1666,36 @@ port@1 { reg = <1>; du_out_lvds0: endpoint { + remote-endpoint = <&lvds0_in>; + }; + }; + }; + }; + + lvds0: lvds@feb90000 { + compatible = "renesas,r8a7744-lvds"; + reg = <0 0xfeb90000 0 0x1c>; + clocks = <&cpg CPG_MOD 726>; + power-domains = <&sysc R8A7744_PD_ALWAYS_ON>; + resets = <&cpg 726>; + status = "disabled"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + lvds0_in: endpoint { + remote-endpoint = <&du_out_lvds0>; + }; + }; + port@1 { + reg = <1>; + lvds0_out: endpoint { }; }; }; - /* placeholder */ }; prr: chipid@ff000044 { diff --git a/arch/arm/boot/dts/r8a77470-iwg23s-sbc.dts b/arch/arm/boot/dts/r8a77470-iwg23s-sbc.dts index 40b7f98d6013..77d18242ef59 100644 --- a/arch/arm/boot/dts/r8a77470-iwg23s-sbc.dts +++ b/arch/arm/boot/dts/r8a77470-iwg23s-sbc.dts @@ -84,12 +84,30 @@ clock-frequency = <20000000>; }; +&i2c3 { + pinctrl-0 = <&i2c3_pins>; + pinctrl-names = "default"; + + status = "okay"; + clock-frequency = <400000>; + + rtc@51 { + compatible = "nxp,pcf85263"; + reg = <0x51>; + }; +}; + &pfc { avb_pins: avb { groups = "avb_mdio", "avb_gmii_tx_rx"; function = "avb"; }; + i2c3_pins: i2c3 { + groups = "i2c3_c"; + function = "i2c3"; + }; + mmc_pins_uhs: mmc_uhs { groups = "mmc_data8", "mmc_ctrl"; function = "mmc"; diff --git a/arch/arm/boot/dts/r8a7778.dtsi b/arch/arm/boot/dts/r8a7778.dtsi index 05db0ccad7a6..10d996d2941f 100644 --- a/arch/arm/boot/dts/r8a7778.dtsi +++ b/arch/arm/boot/dts/r8a7778.dtsi @@ -367,6 +367,30 @@ status = "disabled"; }; + hscif0: serial@ffe48000 { + compatible = "renesas,hscif-r8a7778", + "renesas,rcar-gen1-hscif", "renesas,hscif"; + reg = <0xffe48000 96>; + interrupts = ; + clocks = <&mstp0_clks R8A7778_CLK_HSCIF0>, + <&cpg_clocks R8A7778_CLK_S>, <&scif_clk>; + clock-names = "fck", "brg_int", "scif_clk"; + power-domains = <&cpg_clocks>; + status = "disabled"; + }; + + hscif1: serial@ffe49000 { + compatible = "renesas,hscif-r8a7778", + "renesas,rcar-gen1-hscif", "renesas,hscif"; + reg = <0xffe49000 96>; + interrupts = ; + clocks = <&mstp0_clks R8A7778_CLK_HSCIF1>, + <&cpg_clocks R8A7778_CLK_S>, <&scif_clk>; + clock-names = "fck", "brg_int", "scif_clk"; + power-domains = <&cpg_clocks>; + status = "disabled"; + }; + mmcif: mmc@ffe4e000 { compatible = "renesas,mmcif-r8a7778", "renesas,sh-mmcif"; reg = <0xffe4e000 0x100>; @@ -535,6 +559,8 @@ <&cpg_clocks R8A7778_CLK_P>, <&cpg_clocks R8A7778_CLK_P>, <&cpg_clocks R8A7778_CLK_P>, + <&cpg_clocks R8A7778_CLK_S>, + <&cpg_clocks R8A7778_CLK_S>, <&cpg_clocks R8A7778_CLK_P>, <&cpg_clocks R8A7778_CLK_P>, <&cpg_clocks R8A7778_CLK_P>, @@ -551,6 +577,7 @@ R8A7778_CLK_SCIF0 R8A7778_CLK_SCIF1 R8A7778_CLK_SCIF2 R8A7778_CLK_SCIF3 R8A7778_CLK_SCIF4 R8A7778_CLK_SCIF5 + R8A7778_CLK_HSCIF0 R8A7778_CLK_HSCIF1 R8A7778_CLK_TMU0 R8A7778_CLK_TMU1 R8A7778_CLK_TMU2 R8A7778_CLK_SSI0 R8A7778_CLK_SSI1 R8A7778_CLK_SSI2 @@ -560,6 +587,7 @@ clock-output-names = "i2c0", "i2c1", "i2c2", "i2c3", "scif0", "scif1", "scif2", "scif3", "scif4", "scif5", + "hscif0", "hscif1", "tmu0", "tmu1", "tmu2", "ssi0", "ssi1", "ssi2", "ssi3", "sru", "hspi"; }; diff --git a/arch/arm/boot/dts/r8a7779.dtsi b/arch/arm/boot/dts/r8a7779.dtsi index 3bc133d9489c..3ff259207527 100644 --- a/arch/arm/boot/dts/r8a7779.dtsi +++ b/arch/arm/boot/dts/r8a7779.dtsi @@ -287,6 +287,32 @@ status = "disabled"; }; + hscif0: serial@ffe48000 { + compatible = "renesas,hscif-r8a7779", + "renesas,rcar-gen1-hscif", "renesas,hscif"; + reg = <0xffe48000 96>; + interrupts = ; + clocks = <&mstp0_clks R8A7779_CLK_HSCIF0>, + <&cpg_clocks R8A7779_CLK_S>, + <&scif_clk>; + clock-names = "fck", "brg_int", "scif_clk"; + power-domains = <&cpg_clocks>; + status = "disabled"; + }; + + hscif1: serial@ffe49000 { + compatible = "renesas,hscif-r8a7779", + "renesas,rcar-gen1-hscif", "renesas,hscif"; + reg = <0xffe49000 96>; + interrupts = ; + clocks = <&mstp0_clks R8A7779_CLK_HSCIF1>, + <&cpg_clocks R8A7779_CLK_S>, + <&scif_clk>; + clock-names = "fck", "brg_int", "scif_clk"; + power-domains = <&cpg_clocks>; + status = "disabled"; + }; + pfc: pin-controller@fffc0000 { compatible = "renesas,pfc-r8a7779"; reg = <0xfffc0000 0x23c>; diff --git a/arch/arm/boot/dts/r8a7790-stout.dts b/arch/arm/boot/dts/r8a7790-stout.dts index 629da4cee1b9..7a7d3b84d1a6 100644 --- a/arch/arm/boot/dts/r8a7790-stout.dts +++ b/arch/arm/boot/dts/r8a7790-stout.dts @@ -94,9 +94,8 @@ status = "okay"; clocks = <&cpg CPG_MOD 724>, <&cpg CPG_MOD 723>, <&cpg CPG_MOD 722>, - <&cpg CPG_MOD 726>, <&cpg CPG_MOD 725>, <&osc1_clk>; - clock-names = "du.0", "du.1", "du.2", "lvds.0", "lvds.1", "dclkin.0"; + clock-names = "du.0", "du.1", "du.2", "dclkin.0"; ports { port@0 { @@ -104,11 +103,21 @@ remote-endpoint = <&adv7511_in>; }; }; + }; +}; + +&lvds0 { + ports { port@1 { lvds_connector0: endpoint { }; }; - port@2 { + }; +}; + +&lvds1 { + ports { + port@1 { lvds_connector1: endpoint { }; }; diff --git a/arch/arm/boot/dts/rk3036-kylin.dts b/arch/arm/boot/dts/rk3036-kylin.dts index 0fd19f9723df..0173eb11ec28 100644 --- a/arch/arm/boot/dts/rk3036-kylin.dts +++ b/arch/arm/boot/dts/rk3036-kylin.dts @@ -310,7 +310,6 @@ }; &i2s { - #sound-dai-cells = <0>; status = "okay"; }; diff --git a/arch/arm/boot/dts/rk3036.dtsi b/arch/arm/boot/dts/rk3036.dtsi index d560fc4051c5..59c90863b0e7 100644 --- a/arch/arm/boot/dts/rk3036.dtsi +++ b/arch/arm/boot/dts/rk3036.dtsi @@ -289,6 +289,7 @@ dma-names = "tx", "rx"; pinctrl-names = "default"; pinctrl-0 = <&i2s_bus>; + #sound-dai-cells = <0>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/rk3066a-bqcurie2.dts b/arch/arm/boot/dts/rk3066a-bqcurie2.dts index 1c925f20dba0..0a56a2f1bc4d 100644 --- a/arch/arm/boot/dts/rk3066a-bqcurie2.dts +++ b/arch/arm/boot/dts/rk3066a-bqcurie2.dts @@ -171,7 +171,6 @@ pinctrl-0 = <&sd1_clk &sd1_cmd &sd1_bus4>; bus-width = <4>; - disable-wp; }; &pwm3 { diff --git a/arch/arm/boot/dts/rk3066a-mk808.dts b/arch/arm/boot/dts/rk3066a-mk808.dts index b6a8a82d219e..9d2216d71f70 100644 --- a/arch/arm/boot/dts/rk3066a-mk808.dts +++ b/arch/arm/boot/dts/rk3066a-mk808.dts @@ -101,7 +101,6 @@ &mmc1 { bus-width = <4>; - disable-wp; non-removable; pinctrl-0 = <&sd1_clk &sd1_cmd &sd1_bus4>; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/rk3066a-rayeager.dts b/arch/arm/boot/dts/rk3066a-rayeager.dts index cd126b927ba8..949fa800582d 100644 --- a/arch/arm/boot/dts/rk3066a-rayeager.dts +++ b/arch/arm/boot/dts/rk3066a-rayeager.dts @@ -147,7 +147,6 @@ &emmc { bus-width = <8>; cap-mmc-highspeed; - disable-wp; non-removable; pinctrl-names = "default"; pinctrl-0 = <&emmc_clk>, <&emmc_cmd>, <&emmc_rst>; @@ -309,7 +308,6 @@ &mmc1 { bus-width = <4>; - disable-wp; non-removable; pinctrl-names = "default"; pinctrl-0 = <&sd1_clk>, <&sd1_cmd>, <&sd1_bus4>; diff --git a/arch/arm/boot/dts/rk3066a.dtsi b/arch/arm/boot/dts/rk3066a.dtsi index 30dc8af0bdcb..653127a377fa 100644 --- a/arch/arm/boot/dts/rk3066a.dtsi +++ b/arch/arm/boot/dts/rk3066a.dtsi @@ -44,6 +44,11 @@ }; }; + display-subsystem { + compatible = "rockchip,display-subsystem"; + ports = <&vop0_out>, <&vop1_out>; + }; + sram: sram@10080000 { compatible = "mmio-sram"; reg = <0x10080000 0x10000>; @@ -57,6 +62,48 @@ }; }; + vop0: vop@1010c000 { + compatible = "rockchip,rk3066-vop"; + reg = <0x1010c000 0x19c>; + interrupts = ; + clocks = <&cru ACLK_LCDC0>, + <&cru DCLK_LCDC0>, + <&cru HCLK_LCDC0>; + clock-names = "aclk_vop", "dclk_vop", "hclk_vop"; + power-domains = <&power RK3066_PD_VIO>; + resets = <&cru SRST_LCDC0_AXI>, + <&cru SRST_LCDC0_AHB>, + <&cru SRST_LCDC0_DCLK>; + reset-names = "axi", "ahb", "dclk"; + status = "disabled"; + + vop0_out: port { + #address-cells = <1>; + #size-cells = <0>; + }; + }; + + vop1: vop@1010e000 { + compatible = "rockchip,rk3066-vop"; + reg = <0x1010e000 0x19c>; + interrupts = ; + clocks = <&cru ACLK_LCDC1>, + <&cru DCLK_LCDC1>, + <&cru HCLK_LCDC1>; + clock-names = "aclk_vop", "dclk_vop", "hclk_vop"; + power-domains = <&power RK3066_PD_VIO>; + resets = <&cru SRST_LCDC1_AXI>, + <&cru SRST_LCDC1_AHB>, + <&cru SRST_LCDC1_DCLK>; + reset-names = "axi", "ahb", "dclk"; + status = "disabled"; + + vop1_out: port { + #address-cells = <1>; + #size-cells = <0>; + }; + }; + i2s0: i2s@10118000 { compatible = "rockchip,rk3066-i2s"; reg = <0x10118000 0x2000>; @@ -669,6 +716,7 @@ <&cru SCLK_CIF0>, <&cru ACLK_CIF0>, <&cru HCLK_CIF0>, + <&cru HCLK_HDMI>, <&cru ACLK_IPP>, <&cru HCLK_IPP>, <&cru ACLK_RGA>, diff --git a/arch/arm/boot/dts/rk3188-bqedison2qc.dts b/arch/arm/boot/dts/rk3188-bqedison2qc.dts index a7477a09fbe8..c8b62bbd6a4a 100644 --- a/arch/arm/boot/dts/rk3188-bqedison2qc.dts +++ b/arch/arm/boot/dts/rk3188-bqedison2qc.dts @@ -227,7 +227,6 @@ &emmc { bus-width = <8>; cap-mmc-highspeed; - disable-wp; non-removable; pinctrl-names = "default"; pinctrl-0 = <&emmc_clk &emmc_cmd>; @@ -408,6 +407,21 @@ &i2c2 { clock-frequency = <400000>; status = "okay"; + + ft5606: touchscreen@3e { + compatible = "edt,edt-ft5506"; + reg = <0x3e>; + interrupt-parent = <&gpio1>; + interrupts = ; + pinctrl-names = "default"; + pinctrl-0 = <&tp_int &tp_rst>; + reset-gpios = <&gpio0 RK_PB6 GPIO_ACTIVE_LOW>; + touchscreen-inverted-y; + /* hw ts resolution does not match display */ + touchscreen-size-y = <1024>; + touchscreen-size-x = <768>; + touchscreen-swapped-x-y; + }; }; &i2c3 { @@ -446,7 +460,6 @@ &mmc1 { bus-width = <4>; cap-sd-highspeed; - cap-mmc-highspeed; keep-power-in-suspend; mmc-pwrseq = <&sdio_pwrseq>; non-removable; @@ -526,7 +539,7 @@ }; cif1_pdn: cif1-pdn { - rockchip,pins = <3 RK_PB4 RK_FUNC_GPIO &pcfg_pull_none>; + rockchip,pins = <3 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>; }; cif_avdd_en: cif-avdd-en { diff --git a/arch/arm/boot/dts/rk3188-px3-evb.dts b/arch/arm/boot/dts/rk3188-px3-evb.dts index 9ae65c767c90..c0eaa9c5490b 100644 --- a/arch/arm/boot/dts/rk3188-px3-evb.dts +++ b/arch/arm/boot/dts/rk3188-px3-evb.dts @@ -62,7 +62,6 @@ &emmc { bus-width = <8>; cap-mmc-highspeed; - disable-wp; non-removable; pinctrl-names = "default"; pinctrl-0 = <&emmc_clk>, <&emmc_cmd>, <&emmc_rst>; diff --git a/arch/arm/boot/dts/rk3188.dtsi b/arch/arm/boot/dts/rk3188.dtsi index 4acb501dd3f8..3ed49898f4b2 100644 --- a/arch/arm/boot/dts/rk3188.dtsi +++ b/arch/arm/boot/dts/rk3188.dtsi @@ -719,7 +719,6 @@ pm_qos = <&qos_lcdc0>, <&qos_lcdc1>, <&qos_cif0>, - <&qos_cif1>, <&qos_ipp>, <&qos_rga>; }; diff --git a/arch/arm/boot/dts/rk3229-evb.dts b/arch/arm/boot/dts/rk3229-evb.dts index 4df7accc3ad7..350497a3ca86 100644 --- a/arch/arm/boot/dts/rk3229-evb.dts +++ b/arch/arm/boot/dts/rk3229-evb.dts @@ -137,7 +137,6 @@ &emmc { cap-mmc-highspeed; - disable-wp; non-removable; status = "okay"; }; diff --git a/arch/arm/boot/dts/rk3288-fennec.dts b/arch/arm/boot/dts/rk3288-fennec.dts index b1b56dfdfdba..29af26e6d442 100644 --- a/arch/arm/boot/dts/rk3288-fennec.dts +++ b/arch/arm/boot/dts/rk3288-fennec.dts @@ -37,7 +37,6 @@ &emmc { bus-width = <8>; cap-mmc-highspeed; - disable-wp; non-removable; pinctrl-names = "default"; pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_pwr &emmc_bus8>; diff --git a/arch/arm/boot/dts/rk3288-firefly-reload.dts b/arch/arm/boot/dts/rk3288-firefly-reload.dts index 58ea8bed040a..3a646c5f4fcf 100644 --- a/arch/arm/boot/dts/rk3288-firefly-reload.dts +++ b/arch/arm/boot/dts/rk3288-firefly-reload.dts @@ -254,7 +254,6 @@ bus-width = <4>; cap-sd-highspeed; cap-sdio-irq; - disable-wp; mmc-pwrseq = <&sdio_pwrseq>; non-removable; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/rk3288-miqi.dts b/arch/arm/boot/dts/rk3288-miqi.dts index 504ab1177aa7..fb7365b604bb 100644 --- a/arch/arm/boot/dts/rk3288-miqi.dts +++ b/arch/arm/boot/dts/rk3288-miqi.dts @@ -87,7 +87,6 @@ &emmc { bus-width = <8>; cap-mmc-highspeed; - disable-wp; non-removable; pinctrl-names = "default"; pinctrl-0 = <&emmc_clk>, <&emmc_cmd>, <&emmc_pwr>, <&emmc_bus8>; diff --git a/arch/arm/boot/dts/rk3288-popmetal.dts b/arch/arm/boot/dts/rk3288-popmetal.dts index 596435e03132..6a51940398b5 100644 --- a/arch/arm/boot/dts/rk3288-popmetal.dts +++ b/arch/arm/boot/dts/rk3288-popmetal.dts @@ -109,7 +109,6 @@ &emmc { bus-width = <8>; cap-mmc-highspeed; - disable-wp; mmc-ddr-1_8v; mmc-hs200-1_8v; non-removable; diff --git a/arch/arm/boot/dts/rk3288-rock2-square.dts b/arch/arm/boot/dts/rk3288-rock2-square.dts index 6a30cadad88a..5b7e1c9e92e1 100644 --- a/arch/arm/boot/dts/rk3288-rock2-square.dts +++ b/arch/arm/boot/dts/rk3288-rock2-square.dts @@ -133,7 +133,6 @@ bus-width = <4>; cap-sd-highspeed; cap-sdio-irq; - disable-wp; mmc-pwrseq = <&sdio_pwrseq>; non-removable; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/rk3288-tinker-s.dts b/arch/arm/boot/dts/rk3288-tinker-s.dts index 37093922b482..d97da89bcd51 100644 --- a/arch/arm/boot/dts/rk3288-tinker-s.dts +++ b/arch/arm/boot/dts/rk3288-tinker-s.dts @@ -15,7 +15,6 @@ &emmc { bus-width = <8>; cap-mmc-highspeed; - disable-wp; non-removable; pinctrl-names = "default"; pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_pwr &emmc_bus8>; diff --git a/arch/arm/boot/dts/rk3288-veyron.dtsi b/arch/arm/boot/dts/rk3288-veyron.dtsi index d8bf939a3aff..0bc2409f6903 100644 --- a/arch/arm/boot/dts/rk3288-veyron.dtsi +++ b/arch/arm/boot/dts/rk3288-veyron.dtsi @@ -10,6 +10,10 @@ #include "rk3288.dtsi" / { + chosen { + stdout-path = "serial2:115200n8"; + }; + /* * The default coreboot on veyron devices ignores memory@0 nodes * and would instead create another memory node. diff --git a/arch/arm/boot/dts/rk3288-vyasa.dts b/arch/arm/boot/dts/rk3288-vyasa.dts index 4856a9fc0aea..40b232eb5011 100644 --- a/arch/arm/boot/dts/rk3288-vyasa.dts +++ b/arch/arm/boot/dts/rk3288-vyasa.dts @@ -121,7 +121,6 @@ &emmc { bus-width = <8>; cap-mmc-highspeed; - disable-wp; non-removable; pinctrl-names = "default"; pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_pwr &emmc_bus8>; diff --git a/arch/arm/boot/dts/rv1108-elgin-r1.dts b/arch/arm/boot/dts/rv1108-elgin-r1.dts new file mode 100644 index 000000000000..1c4507b66fdd --- /dev/null +++ b/arch/arm/boot/dts/rv1108-elgin-r1.dts @@ -0,0 +1,208 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) + +/* + * Copyright (C) 2018 O.S. Systems Software LTDA. + */ + +/dts-v1/; + +#include "rv1108.dtsi" + +/ { + model = "Elgin RV1108 R1 board"; + compatible = "elgin,rv1108-r1", "rockchip,rv1108"; + + memory@60000000 { + device_type = "memory"; + reg = <0x60000000 0x08000000>; + }; + + chosen { + stdout-path = "serial2:1500000n8"; + }; + + vcc_sys: vsys-regulator { + compatible = "regulator-fixed"; + regulator-name = "vsys"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + regulator-boot-on; + }; +}; + +&cpu0 { + cpu-supply = <&vdd_core>; +}; + +&emmc { + bus-width = <8>; + cap-mmc-highspeed; + disable-wp; + no-sd; + no-sdio; + non-removable; + mmc-ddr-1_8v; + mmc-hs200-1_8v; + pinctrl-names = "default"; + pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_bus8>; + status = "okay"; +}; + +&gmac { + clock_in_out = "output"; + pinctrl-names = "default"; + pinctrl-0 = <&rmii_pins>; + snps,reset-gpio = <&gpio1 RK_PC1 GPIO_ACTIVE_LOW>; + snps,reset-active-low; + status = "okay"; +}; + +&i2c0 { + clock-frequency = <400000>; + i2c-scl-rising-time-ns = <275>; + i2c-scl-falling-time-ns = <16>; + status = "okay"; + + rk805: pmic@18 { + compatible = "rockchip,rk805"; + reg = <0x18>; + interrupt-parent = <&gpio0>; + interrupts = ; + rockchip,system-power-controller; + + vcc1-supply = <&vcc_sys>; + vcc2-supply = <&vcc_sys>; + vcc3-supply = <&vcc_sys>; + vcc4-supply = <&vcc_sys>; + vcc5-supply = <&vdd_buck2>; + vcc6-supply = <&vdd_buck2>; + + regulators { + vdd_core: DCDC_REG1 { + regulator-name= "vdd_core"; + regulator-min-microvolt = <700000>; + regulator-max-microvolt = <1500000>; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <900000>; + }; + }; + + vdd_buck2: DCDC_REG2 { + regulator-name= "vdd_buck2"; + regulator-min-microvolt = <2200000>; + regulator-max-microvolt = <2200000>; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc_ddr: DCDC_REG3 { + regulator-name= "vcc_ddr"; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-on-in-suspend; + }; + }; + + vcc_io: DCDC_REG4 { + regulator-name= "vcc_io"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <3300000>; + }; + }; + + vdd_10: LDO_REG1 { + regulator-name= "vdd_10"; + regulator-min-microvolt = <1000000>; + regulator-max-microvolt = <1000000>; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc_18: LDO_REG2 { + regulator-name= "vcc_18"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdd10_pmu: LDO_REG3 { + regulator-name= "vdd10_pmu"; + regulator-min-microvolt = <1000000>; + regulator-max-microvolt = <1000000>; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <1000000>; + }; + }; + }; + }; +}; + +&spi { + pinctrl-names = "default"; + pinctrl-0 = <&spim1_clk &spim1_cs0 &spim1_tx &spim1_rx>; + status = "okay"; + + dh2228fv: dac@0 { + compatible = "rohm,dh2228fv"; + reg = <0>; + spi-max-frequency = <24000000>; + spi-cpha; + spi-cpol; + }; +}; + +&u2phy { + status = "okay"; + + u2phy_host: host-port { + status = "okay"; + }; + + u2phy_otg: otg-port { + status = "okay"; + }; +}; + +&uart0 { + pinctrl-names = "default"; + pinctrl-0 = <&uart0_xfer>; + status = "okay"; +}; + +&uart2 { + status = "okay"; +}; + +&usb_host_ehci { + status = "okay"; +}; + +&usb_host_ohci { + status = "okay"; +}; + +&usb_otg { + status = "okay"; +}; diff --git a/arch/arm/boot/dts/rv1108-evb.dts b/arch/arm/boot/dts/rv1108-evb.dts index 203d83e3bbf5..30f3d0470ad9 100644 --- a/arch/arm/boot/dts/rv1108-evb.dts +++ b/arch/arm/boot/dts/rv1108-evb.dts @@ -97,8 +97,8 @@ regulator-always-on; regulator-boot-on; regulator-state-mem { - regulator-state-enabled; - regulator-state-uv = <900000>; + regulator-on-in-suspend; + regulator-suspend-microvolt = <900000>; }; }; @@ -107,7 +107,7 @@ regulator-min-microvolt = <700000>; regulator-max-microvolt = <2000000>; regulator-state-mem { - regulator-state-disabled; + regulator-off-in-suspend; }; }; @@ -116,7 +116,7 @@ regulator-always-on; regulator-boot-on; regulator-state-mem { - regulator-state-enabled; + regulator-on-in-suspend; }; }; @@ -127,8 +127,8 @@ regulator-always-on; regulator-boot-on; regulator-state-mem { - regulator-state-enabled; - regulator-state-uv = <3300000>; + regulator-on-in-suspend; + regulator-suspend-microvolt = <3300000>; }; }; @@ -139,7 +139,7 @@ regulator-always-on; regulator-boot-on; regulator-state-mem { - regulator-state-disabled; + regulator-off-in-suspend; }; }; @@ -150,7 +150,7 @@ regulator-always-on; regulator-boot-on; regulator-state-mem { - regulator-state-disabled; + regulator-off-in-suspend; }; }; @@ -161,8 +161,8 @@ regulator-always-on; regulator-boot-on; regulator-state-mem { - regulator-state-enabled; - regulator-state-uv = <1000000>; + regulator-on-in-suspend; + regulator-suspend-microvolt = <1000000>; }; }; }; diff --git a/arch/arm/boot/dts/rv1108.dtsi b/arch/arm/boot/dts/rv1108.dtsi index d31370ff28f4..f47ac86d2852 100644 --- a/arch/arm/boot/dts/rv1108.dtsi +++ b/arch/arm/boot/dts/rv1108.dtsi @@ -207,6 +207,7 @@ clocks = <&cru SCLK_SPI>, <&cru PCLK_SPI>; clock-names = "spiclk", "apb_pclk"; dmas = <&pdma 8>, <&pdma 9>; + dma-names = "tx", "rx"; #dma-cells = <2>; #address-cells = <1>; #size-cells = <0>; @@ -833,6 +834,42 @@ }; }; + spim0 { + spim0_clk: spim0-clk { + rockchip,pins = <1 RK_PD0 RK_FUNC_2 &pcfg_pull_up>; + }; + + spim0_cs0: spim0-cs0 { + rockchip,pins = <1 RK_PD1 RK_FUNC_2 &pcfg_pull_up>; + }; + + spim0_tx: spim0-tx { + rockchip,pins = <1 RK_PD3 RK_FUNC_2 &pcfg_pull_up>; + }; + + spim0_rx: spim0-rx { + rockchip,pins = <1 RK_PD2 RK_FUNC_2 &pcfg_pull_up>; + }; + }; + + spim1 { + spim1_clk: spim1-clk { + rockchip,pins = <0 RK_PA3 RK_FUNC_1 &pcfg_pull_up>; + }; + + spim1_cs0: spim1-cs0 { + rockchip,pins = <0 RK_PA4 RK_FUNC_1 &pcfg_pull_up>; + }; + + spim1_rx: spim1-rx { + rockchip,pins = <0 RK_PB0 RK_FUNC_1 &pcfg_pull_up>; + }; + + spim1_tx: spim1-tx { + rockchip,pins = <0 RK_PA7 RK_FUNC_1 &pcfg_pull_up>; + }; + }; + tsadc { otp_out: otp-out { rockchip,pins = <0 RK_PB7 RK_FUNC_1 &pcfg_pull_none>; diff --git a/arch/arm/boot/dts/s3c2416-smdk2416.dts b/arch/arm/boot/dts/s3c2416-smdk2416.dts index 5164386aff3a..cb371bf72f64 100644 --- a/arch/arm/boot/dts/s3c2416-smdk2416.dts +++ b/arch/arm/boot/dts/s3c2416-smdk2416.dts @@ -19,9 +19,12 @@ clocks { compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <0>; - xti: xti { + xti: xti@0 { compatible = "fixed-clock"; + reg = <0>; clock-frequency = <12000000>; clock-output-names = "xti"; #clock-cells = <0>; diff --git a/arch/arm/boot/dts/s5pv210-aries.dtsi b/arch/arm/boot/dts/s5pv210-aries.dtsi index 575094ea7024..8ff70b856334 100644 --- a/arch/arm/boot/dts/s5pv210-aries.dtsi +++ b/arch/arm/boot/dts/s5pv210-aries.dtsi @@ -23,6 +23,31 @@ 0x50000000 0x08000000>; }; + reserved-memory { + #address-cells = <1>; + #size-cells = <1>; + ranges; + + mfc_left: region@43000000 { + compatible = "shared-dma-pool"; + no-map; + reg = <0x43000000 0x2000000>; + }; + + mfc_right: region@51000000 { + compatible = "shared-dma-pool"; + no-map; + reg = <0x51000000 0x2000000>; + }; + }; + + vibrator_pwr: regulator-fixed-0 { + compatible = "regulator-fixed"; + regulator-name = "vibrator-en"; + enable-active-high; + gpio = <&gpj1 1 GPIO_ACTIVE_HIGH>; + }; + wifi_pwrseq: wifi-pwrseq { compatible = "mmc-pwrseq-simple"; reset-gpios = <&gpg1 2 GPIO_ACTIVE_LOW>; @@ -296,6 +321,22 @@ reg = <0x36>; }; }; + + vibrator: pwm-vibrator { + compatible = "pwm-vibrator"; + pwms = <&pwm 1 44642 0>; + pwm-names = "enable"; + vcc-supply = <&vibrator_pwr>; + pinctrl-names = "default"; + pinctrl-0 = <&pwm1_out>; + }; + + poweroff: syscon-poweroff { + compatible = "syscon-poweroff"; + regmap = <&pmu_syscon>; + offset = <0x681c>; /* PS_HOLD_CONTROL */ + value = <0x5200>; + }; }; &fimd { @@ -329,6 +370,27 @@ status = "okay"; }; +&i2c2 { + samsung,i2c-sda-delay = <100>; + samsung,i2c-max-bus-freq = <400000>; + samsung,i2c-slave-addr = <0x10>; + status = "okay"; + + touchscreen@4a { + compatible = "atmel,maxtouch"; + reg = <0x4a>; + interrupt-parent = <&gpj0>; + interrupts = <5 IRQ_TYPE_EDGE_FALLING>; + pinctrl-names = "default"; + pinctrl-0 = <&ts_irq>; + reset-gpios = <&gpj1 3 GPIO_ACTIVE_HIGH>; + }; +}; + +&mfc { + memory-region = <&mfc_left>, <&mfc_right>; +}; + &pinctrl0 { wlan_bt_en: wlan-bt-en { samsung,pins = "gpb-5"; @@ -350,6 +412,13 @@ samsung,pin-drv = ; }; + bt_host_wake: bt-host-wake { + samsung,pins = "gph2-5"; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; + }; + tf_detect: tf-detect { samsung,pins = "gph3-4"; samsung,pin-function = ; @@ -362,6 +431,17 @@ samsung,pin-function = ; samsung,pin-pud = ; }; + + ts_irq: ts-irq { + samsung,pins = "gpj0-5"; + samsung,pin-function = ; + samsung,pin-pud = ; + samsung,pin-drv = ; + }; +}; + +&pwm { + samsung,pwm-outputs = <1>; }; &sdhci1 { @@ -399,6 +479,16 @@ &uart0 { status = "okay"; + + bluetooth { + compatible = "brcm,bcm43438-bt"; + max-speed = <115200>; + pinctrl-names = "default"; + pinctrl-0 = <&uart0_data &uart0_fctl &bt_host_wake>; + shutdown-gpios = <&gpb 3 GPIO_ACTIVE_HIGH>; + device-wakeup-gpios = <&gpg3 4 GPIO_ACTIVE_HIGH>; + host-wakeup-gpios = <&gph2 5 GPIO_ACTIVE_HIGH>; + }; }; &uart1 { diff --git a/arch/arm/boot/dts/s5pv210-fascinate4g.dts b/arch/arm/boot/dts/s5pv210-fascinate4g.dts index ccf761b1babf..07a8d9bbe5b8 100644 --- a/arch/arm/boot/dts/s5pv210-fascinate4g.dts +++ b/arch/arm/boot/dts/s5pv210-fascinate4g.dts @@ -11,13 +11,6 @@ chosen { stdout-path = &uart2; - /* - * It's hard to change those parameters in stock bootloader, - * since it requires special hardware/cable. - * Let's hardocde bootargs for now, till u-boot port is finished, - * with which it should be easier. - */ - bootargs = "root=/dev/mmcblk1p1 rw rootwait ignore_loglevel earlyprintk"; }; gpio-keys { diff --git a/arch/arm/boot/dts/s5pv210-galaxys.dts b/arch/arm/boot/dts/s5pv210-galaxys.dts index 842276749717..cf161bbfbacf 100644 --- a/arch/arm/boot/dts/s5pv210-galaxys.dts +++ b/arch/arm/boot/dts/s5pv210-galaxys.dts @@ -11,13 +11,6 @@ chosen { stdout-path = &uart2; - /* - * It's hard to change those parameters in stock bootloader, - * since it requires special hardware/cable. - * Let's hardocde bootargs for now, till u-boot port is finished, - * with which it should be easier. - */ - bootargs = "root=/dev/mmcblk2p1 rw rootwait ignore_loglevel earlyprintk"; }; nand_pwrseq: nand-pwrseq { diff --git a/arch/arm/boot/dts/s5pv210.dtsi b/arch/arm/boot/dts/s5pv210.dtsi index 12eac8930eac..a44d5eb56bed 100644 --- a/arch/arm/boot/dts/s5pv210.dtsi +++ b/arch/arm/boot/dts/s5pv210.dtsi @@ -25,6 +25,8 @@ aliases { csis0 = &csis0; + dmc0 = &dmc0; + dmc1 = &dmc1; fimc0 = &fimc0; fimc1 = &fimc1; fimc2 = &fimc2; @@ -78,7 +80,7 @@ }; }; - onenand: onenand@b0000000 { + onenand: onenand@b0600000 { compatible = "samsung,s5pv210-onenand"; reg = <0xb0600000 0x2000>, <0xb0000000 0x20000>, @@ -511,7 +513,7 @@ }; fimd: fimd@f8000000 { - compatible = "samsung,exynos4210-fimd"; + compatible = "samsung,s5pv210-fimd"; interrupt-parent = <&vic2>; reg = <0xf8000000 0x20000>; interrupt-names = "fifo", "vsync", "lcd_sys"; @@ -521,6 +523,16 @@ status = "disabled"; }; + dmc0: dmc@f0000000 { + compatible = "samsung,s5pv210-dmc"; + reg = <0xf0000000 0x1000>; + }; + + dmc1: dmc@f1400000 { + compatible = "samsung,s5pv210-dmc"; + reg = <0xf1400000 0x1000>; + }; + g2d: g2d@fa000000 { compatible = "samsung,s5pv210-g2d"; reg = <0xfa000000 0x1000>; @@ -542,6 +554,15 @@ #dma-requests = <1>; }; + rotator: rotator@fa300000 { + compatible = "samsung,s5pv210-rotator"; + reg = <0xfa300000 0x1000>; + interrupt-parent = <&vic2>; + interrupts = <4>; + clocks = <&clocks CLK_ROTATOR>; + clock-names = "rotator"; + }; + i2c1: i2c@fab00000 { compatible = "samsung,s3c2440-i2c"; reg = <0xfab00000 0x1000>; diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi index dc2280d9127f..d159ee42ef29 100644 --- a/arch/arm/boot/dts/sama5d2.dtsi +++ b/arch/arm/boot/dts/sama5d2.dtsi @@ -43,13 +43,14 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -#include "skeleton.dtsi" #include #include #include #include / { + #address-cells = <1>; + #size-cells = <1>; model = "Atmel SAMA5D2 family SoC"; compatible = "atmel,sama5d2"; interrupt-parent = <&aic>; @@ -113,6 +114,7 @@ }; memory { + device_type = "memory"; reg = <0x20000000 0x20000000>; }; diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi index 1408fa4a62e4..02198772eb81 100644 --- a/arch/arm/boot/dts/sama5d3.dtsi +++ b/arch/arm/boot/dts/sama5d3.dtsi @@ -8,7 +8,6 @@ * Licensed under GPLv2 or later. */ -#include "skeleton.dtsi" #include #include #include @@ -16,6 +15,8 @@ #include / { + #address-cells = <1>; + #size-cells = <1>; model = "Atmel SAMA5D3 family SoC"; compatible = "atmel,sama5d3", "atmel,sama5"; interrupt-parent = <&aic>; @@ -56,6 +57,7 @@ }; memory { + device_type = "memory"; reg = <0x20000000 0x8000000>; }; diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi index 2604fd07dd53..6c1e41f94549 100644 --- a/arch/arm/boot/dts/sama5d4.dtsi +++ b/arch/arm/boot/dts/sama5d4.dtsi @@ -43,7 +43,6 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -#include "skeleton.dtsi" #include #include #include @@ -51,6 +50,8 @@ #include / { + #address-cells = <1>; + #size-cells = <1>; model = "Atmel SAMA5D4 family SoC"; compatible = "atmel,sama5d4"; interrupt-parent = <&aic>; @@ -90,6 +91,7 @@ }; memory { + device_type = "memory"; reg = <0x20000000 0x20000000>; }; diff --git a/arch/arm/boot/dts/skeleton.dtsi b/arch/arm/boot/dts/skeleton.dtsi deleted file mode 100644 index 34eda68d9ea2..000000000000 --- a/arch/arm/boot/dts/skeleton.dtsi +++ /dev/null @@ -1,18 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * This file is deprecated, and will be removed once existing users have been - * updated. New dts{,i} files should *not* include skeleton.dtsi, and should - * instead explicitly provide the below nodes only as required. - * - * Skeleton device tree; the bare minimum needed to boot; just include and - * add a compatible value. The bootloader will typically populate the memory - * node. - */ - -/ { - #address-cells = <1>; - #size-cells = <1>; - chosen { }; - aliases { }; - memory { device_type = "memory"; reg = <0 0>; }; -}; diff --git a/arch/arm/boot/dts/skeleton64.dtsi b/arch/arm/boot/dts/skeleton64.dtsi deleted file mode 100644 index 54e637752b9d..000000000000 --- a/arch/arm/boot/dts/skeleton64.dtsi +++ /dev/null @@ -1,14 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Skeleton device tree in the 64 bits version; the bare minimum - * needed to boot; just include and add a compatible value. The - * bootloader will typically populate the memory node. - */ - -/ { - #address-cells = <2>; - #size-cells = <2>; - chosen { }; - aliases { }; - memory { device_type = "memory"; reg = <0 0 0 0>; }; -}; diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi index dcb8fba3d709..ec1966480f2f 100644 --- a/arch/arm/boot/dts/socfpga.dtsi +++ b/arch/arm/boot/dts/socfpga.dtsi @@ -84,6 +84,7 @@ #dma-requests = <32>; clocks = <&l4_main_clk>; clock-names = "apb_pclk"; + resets = <&rst DMA_RESET>; }; }; @@ -100,6 +101,7 @@ reg = <0xffc00000 0x1000>; interrupts = <0 131 4>, <0 132 4>, <0 133 4>, <0 134 4>; clocks = <&can0_clk>; + resets = <&rst CAN0_RESET>; status = "disabled"; }; @@ -108,6 +110,7 @@ reg = <0xffc01000 0x1000>; interrupts = <0 135 4>, <0 136 4>, <0 137 4>, <0 138 4>; clocks = <&can1_clk>; + resets = <&rst CAN1_RESET>; status = "disabled"; }; @@ -585,6 +588,7 @@ compatible = "snps,dw-apb-gpio"; reg = <0xff708000 0x1000>; clocks = <&l4_mp_clk>; + resets = <&rst GPIO0_RESET>; status = "disabled"; porta: gpio-controller@0 { @@ -605,6 +609,7 @@ compatible = "snps,dw-apb-gpio"; reg = <0xff709000 0x1000>; clocks = <&l4_mp_clk>; + resets = <&rst GPIO1_RESET>; status = "disabled"; portb: gpio-controller@0 { @@ -625,6 +630,7 @@ compatible = "snps,dw-apb-gpio"; reg = <0xff70a000 0x1000>; clocks = <&l4_mp_clk>; + resets = <&rst GPIO2_RESET>; status = "disabled"; portc: gpio-controller@0 { @@ -735,6 +741,7 @@ #size-cells = <0>; clocks = <&l4_mp_clk>, <&sdmmc_clk_divided>; clock-names = "biu", "ciu"; + resets = <&rst SDMMC_RESET>; status = "disabled"; }; @@ -748,6 +755,7 @@ interrupts = <0x0 0x90 0x4>; clocks = <&nand_clk>, <&nand_x_clk>, <&nand_ecc_clk>; clock-names = "nand", "nand_x", "ecc"; + resets = <&rst NAND_RESET>; status = "disabled"; }; @@ -767,6 +775,7 @@ cdns,fifo-width = <4>; cdns,trigger-address = <0x00000000>; clocks = <&qspi_clk>; + resets = <&rst QSPI_RESET>; status = "disabled"; }; @@ -785,6 +794,7 @@ sdr: sdr@ffc25000 { compatible = "altr,sdr-ctl", "syscon"; reg = <0xffc25000 0x1000>; + resets = <&rst SDR_RESET>; }; sdramedac { @@ -801,6 +811,7 @@ interrupts = <0 154 4>; num-cs = <4>; clocks = <&spi_m_clk>; + resets = <&rst SPIM0_RESET>; status = "disabled"; }; @@ -812,6 +823,7 @@ interrupts = <0 155 4>; num-cs = <4>; clocks = <&spi_m_clk>; + resets = <&rst SPIM1_RESET>; status = "disabled"; }; @@ -878,6 +890,7 @@ dmas = <&pdma 28>, <&pdma 29>; dma-names = "tx", "rx"; + resets = <&rst UART0_RESET>; }; uart1: serial1@ffc03000 { @@ -890,6 +903,7 @@ dmas = <&pdma 30>, <&pdma 31>; dma-names = "tx", "rx"; + resets = <&rst UART1_RESET>; }; usbphy0: usbphy { @@ -929,6 +943,7 @@ reg = <0xffd02000 0x1000>; interrupts = <0 171 4>; clocks = <&osc1>; + resets = <&rst L4WD0_RESET>; status = "disabled"; }; @@ -937,6 +952,7 @@ reg = <0xffd03000 0x1000>; interrupts = <0 172 4>; clocks = <&osc1>; + resets = <&rst L4WD1_RESET>; status = "disabled"; }; }; diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi index e41fa23481c3..ae24599d5829 100644 --- a/arch/arm/boot/dts/socfpga_arria10.dtsi +++ b/arch/arm/boot/dts/socfpga_arria10.dtsi @@ -470,6 +470,7 @@ tx-fifo-depth = <4096>; rx-fifo-depth = <16384>; clocks = <&l4_mp_clk>; + resets = <&rst EMAC2_RESET>; clock-names = "stmmaceth"; snps,axi-config = <&socfpga_axi_setup>; status = "disabled"; @@ -480,6 +481,7 @@ #size-cells = <0>; compatible = "snps,dw-apb-gpio"; reg = <0xffc02900 0x100>; + resets = <&rst GPIO0_RESET>; status = "disabled"; porta: gpio-controller@0 { @@ -499,6 +501,7 @@ #size-cells = <0>; compatible = "snps,dw-apb-gpio"; reg = <0xffc02a00 0x100>; + resets = <&rst GPIO1_RESET>; status = "disabled"; portb: gpio-controller@0 { @@ -518,6 +521,7 @@ #size-cells = <0>; compatible = "snps,dw-apb-gpio"; reg = <0xffc02b00 0x100>; + resets = <&rst GPIO2_RESET>; status = "disabled"; portc: gpio-controller@0 { @@ -548,6 +552,7 @@ reg = <0xffc02200 0x100>; interrupts = <0 105 IRQ_TYPE_LEVEL_HIGH>; clocks = <&l4_sp_clk>; + resets = <&rst I2C0_RESET>; status = "disabled"; }; @@ -558,6 +563,7 @@ reg = <0xffc02300 0x100>; interrupts = <0 106 IRQ_TYPE_LEVEL_HIGH>; clocks = <&l4_sp_clk>; + resets = <&rst I2C1_RESET>; status = "disabled"; }; @@ -568,6 +574,7 @@ reg = <0xffc02400 0x100>; interrupts = <0 107 IRQ_TYPE_LEVEL_HIGH>; clocks = <&l4_sp_clk>; + resets = <&rst I2C2_RESET>; status = "disabled"; }; @@ -578,6 +585,7 @@ reg = <0xffc02500 0x100>; interrupts = <0 108 IRQ_TYPE_LEVEL_HIGH>; clocks = <&l4_sp_clk>; + resets = <&rst I2C3_RESET>; status = "disabled"; }; @@ -588,6 +596,7 @@ reg = <0xffc02600 0x100>; interrupts = <0 109 IRQ_TYPE_LEVEL_HIGH>; clocks = <&l4_sp_clk>; + resets = <&rst I2C4_RESET>; status = "disabled"; }; @@ -600,6 +609,7 @@ num-cs = <4>; /*32bit_access;*/ clocks = <&spi_m_clk>; + resets = <&rst SPIM0_RESET>; status = "disabled"; }; @@ -614,6 +624,7 @@ tx-dma-channel = <&pdma 16>; rx-dma-channel = <&pdma 17>; clocks = <&spi_m_clk>; + resets = <&rst SPIM1_RESET>; status = "disabled"; }; @@ -642,6 +653,7 @@ fifo-depth = <0x400>; clocks = <&l4_mp_clk>, <&sdmmc_clk>; clock-names = "biu", "ciu"; + resets = <&rst SDMMC_RESET>; status = "disabled"; }; @@ -655,6 +667,7 @@ interrupts = <0 99 4>; clocks = <&nand_clk>, <&nand_x_clk>, <&nand_ecc_clk>; clock-names = "nand", "nand_x", "ecc"; + resets = <&rst NAND_RESET>; status = "disabled"; }; @@ -739,6 +752,7 @@ cdns,fifo-width = <4>; cdns,trigger-address = <0x00000000>; clocks = <&qspi_clk>; + resets = <&rst QSPI_RESET>; status = "disabled"; }; @@ -815,6 +829,7 @@ reg-shift = <2>; reg-io-width = <4>; clocks = <&l4_sp_clk>; + resets = <&rst UART0_RESET>; status = "disabled"; }; @@ -825,6 +840,7 @@ reg-shift = <2>; reg-io-width = <4>; clocks = <&l4_sp_clk>; + resets = <&rst UART1_RESET>; status = "disabled"; }; @@ -865,6 +881,7 @@ reg = <0xffd00200 0x100>; interrupts = <0 119 IRQ_TYPE_LEVEL_HIGH>; clocks = <&l4_sys_free_clk>; + resets = <&rst L4WD0_RESET>; status = "disabled"; }; @@ -873,6 +890,7 @@ reg = <0xffd00300 0x100>; interrupts = <0 120 IRQ_TYPE_LEVEL_HIGH>; clocks = <&l4_sys_free_clk>; + resets = <&rst L4WD1_RESET>; status = "disabled"; }; }; diff --git a/arch/arm/boot/dts/socfpga_cyclone5_chameleon96.dts b/arch/arm/boot/dts/socfpga_cyclone5_chameleon96.dts new file mode 100644 index 000000000000..f6561766d83f --- /dev/null +++ b/arch/arm/boot/dts/socfpga_cyclone5_chameleon96.dts @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Device Tree file for the Chameleon96 + * + * Copyright (c) 2018 Manivannan Sadhasivam + */ + +#include + +#include "socfpga_cyclone5.dtsi" + +/ { + model = "Novetech Chameleon96"; + compatible = "novtech,chameleon96", "altr,socfpga-cyclone5", "altr,socfpga"; + + chosen { + bootargs = "earlyprintk"; + stdout-path = "serial0:115200n8"; + }; + + memory@0 { + name = "memory"; + device_type = "memory"; + reg = <0x0 0x20000000>; /* 512MB */ + }; + + regulator_3_3v: 3-3-v-regulator { + compatible = "regulator-fixed"; + regulator-name = "3.3V"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + }; + + leds { + compatible = "gpio-leds"; + + user_led1 { + label = "green:user1"; + gpios = <&porta 14 GPIO_ACTIVE_LOW>; + linux,default-trigger = "heartbeat"; + }; + + user_led2 { + label = "green:user2"; + gpios = <&porta 22 GPIO_ACTIVE_LOW>; + linux,default-trigger = "mmc0"; + }; + + user_led3 { + label = "green:user3"; + gpios = <&porta 25 GPIO_ACTIVE_LOW>; + linux,default-trigger = "none"; + }; + + user_led4 { + label = "green:user4"; + gpios = <&portb 3 GPIO_ACTIVE_LOW>; + panic-indicator; + linux,default-trigger = "none"; + }; + }; +}; + +&gpio0 { + status = "okay"; +}; + +&gpio1 { + status = "okay"; +}; + +&i2c0 { + /* On Low speed expansion */ + label = "LS-I2C0"; + status = "okay"; +}; + +&i2c1 { + /* On Low speed expansion */ + label = "LS-I2C1"; + status = "okay"; +}; + +&i2c2 { + status = "okay"; +}; + +&i2c3 { + /* On High speed expansion */ + label = "HS-I2C2"; + status = "okay"; +}; + +&mmc0 { + vmmc-supply = <®ulator_3_3v>; + vqmmc-supply = <®ulator_3_3v>; + status = "okay"; +}; + +&spi0 { + /* On High speed expansion */ + label = "HS-SPI1"; + status = "okay"; +}; + +&spi1 { + /* On Low speed expansion */ + label = "LS-SPI0"; + status = "okay"; +}; + +&uart0 { + /* On Low speed expansion */ + label = "LS-UART1"; + status = "okay"; +}; + +&uart1 { + /* On Low speed expansion */ + label = "LS-UART0"; + status = "okay"; +}; + +&usbphy0 { + status = "okay"; +}; + +&usb1 { + status = "okay"; +}; diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi index 086b4b333249..390df643a174 100644 --- a/arch/arm/boot/dts/spear13xx.dtsi +++ b/arch/arm/boot/dts/spear13xx.dtsi @@ -11,9 +11,9 @@ * http://www.gnu.org/copyleft/gpl.html */ -/include/ "skeleton.dtsi" - / { + #address-cells = <1>; + #size-cells = <1>; interrupt-parent = <&gic>; cpus { diff --git a/arch/arm/boot/dts/spear3xx.dtsi b/arch/arm/boot/dts/spear3xx.dtsi index 118135d75899..c47380763cae 100644 --- a/arch/arm/boot/dts/spear3xx.dtsi +++ b/arch/arm/boot/dts/spear3xx.dtsi @@ -11,9 +11,9 @@ * http://www.gnu.org/copyleft/gpl.html */ -/include/ "skeleton.dtsi" - / { + #address-cells = <1>; + #size-cells = <1>; interrupt-parent = <&vic>; cpus { diff --git a/arch/arm/boot/dts/spear600.dtsi b/arch/arm/boot/dts/spear600.dtsi index 00166eb9be86..0a634fb07452 100644 --- a/arch/arm/boot/dts/spear600.dtsi +++ b/arch/arm/boot/dts/spear600.dtsi @@ -9,9 +9,9 @@ * http://www.gnu.org/copyleft/gpl.html */ -/include/ "skeleton.dtsi" - / { + #address-cells = <1>; + #size-cells = <1>; compatible = "st,spear600"; cpus { diff --git a/arch/arm/boot/dts/ste-nomadik-nhk15.dts b/arch/arm/boot/dts/ste-nomadik-nhk15.dts index 12afdc7467e7..04066f9cb8a3 100644 --- a/arch/arm/boot/dts/ste-nomadik-nhk15.dts +++ b/arch/arm/boot/dts/ste-nomadik-nhk15.dts @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Device Tree for the ST-Ericsson Nomadik S8815 board - * Produced by Calao Systems + * Device Tree for the ST Microelectronics Nomadik NHK8815 board */ /dts-v1/; @@ -182,43 +181,12 @@ pinctrl-names = "default"; pinctrl-0 = <&clcd_24bit_mux>; port { - nomadik_clcd_pads: endpoint { + nomadik_clcd: endpoint { remote-endpoint = <&nomadik_clcd_panel>; arm,pl11x,tft-r0g0b0-pads = <16 8 0>; }; }; - /* - * WVGA connector 21 - * WVGA (800x480): 4.3" TPG110 TDO43MTEA2 24-bit RGB - * with TPO touch screen. - */ - panel { - compatible = "tpo,tpg110", "panel-dpi"; - grestb-gpios = <&stmpe_gpio44 5 GPIO_ACTIVE_LOW>; - scen-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>; - scl-gpios = <&gpio0 5 GPIO_ACTIVE_HIGH>; - sda-gpios = <&gpio0 4 GPIO_ACTIVE_HIGH>; - backlight = <&bl>; - - port { - nomadik_clcd_panel: endpoint { - remote-endpoint = <&nomadik_clcd_pads>; - }; - }; - - panel-timing { - clock-frequency = <33200000>; - hactive = <800>; - hback-porch = <216>; - hfront-porch = <40>; - hsync-len = <1>; - vactive = <480>; - vback-porch = <35>; - vfront-porch = <10>; - vsync-len = <1>; - }; - }; }; /* Activate RX/TX and CTS/RTS on UART 0 */ @@ -233,6 +201,55 @@ }; }; + spi { + compatible = "spi-gpio"; + #address-cells = <1>; + #size-cells = <0>; + + /* + * As we're dealing with 3wire SPI, we only define SCK + * and MOSI (in the spec MOSI is called "SDA"). + */ + gpio-sck = <&gpio0 5 GPIO_ACTIVE_HIGH>; + gpio-mosi = <&gpio0 4 GPIO_ACTIVE_HIGH>; + /* + * It's not actually active high, but the frameworks assume + * the polarity of the passed-in GPIO is "normal" (active + * high) then actively drives the line low to select the + * chip. + */ + cs-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>; + num-chipselects = <1>; + + /* + * WVGA connector 21 + * WVGA (800x480): 4.3" TPG110 TDO43MTEA2 24-bit RGB + * with TPO touch screen. + */ + panel: display@0 { + /* + * The TPO display driver is connected to a + * 5.7" OSD OSD057VA01CT TFT display. + */ + compatible = "tpo,tpg110"; + reg = <0>; + spi-3wire; + /* 320 ns min period ~= 3 MHz */ + spi-max-frequency = <3000000>; + /* Width and height from the OSD data sheet */ + width-mm = <116>; + height-mm = <87>; + grestb-gpios = <&stmpe_gpio44 5 GPIO_ACTIVE_LOW>; + backlight = <&bl>; + + port { + nomadik_clcd_panel: endpoint { + remote-endpoint = <&nomadik_clcd>; + }; + }; + }; + }; + bl: backlight { compatible = "pwm-backlight"; pwms = <&stmpe0_pwm 0 500000>; diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi index fca76a696d9d..f78b4eabd68c 100644 --- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi +++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi @@ -4,13 +4,13 @@ */ #include -#include "skeleton.dtsi" / { #address-cells = <1>; #size-cells = <1>; memory { + device_type = "memory"; reg = <0x00000000 0x04000000>, <0x08000000 0x04000000>; }; diff --git a/arch/arm/boot/dts/ste-u300.dts b/arch/arm/boot/dts/ste-u300.dts index 1bd1aba3322f..f4e7660fead7 100644 --- a/arch/arm/boot/dts/ste-u300.dts +++ b/arch/arm/boot/dts/ste-u300.dts @@ -4,7 +4,6 @@ */ /dts-v1/; -/include/ "skeleton.dtsi" / { model = "ST-Ericsson U300"; @@ -22,6 +21,7 @@ }; memory { + device_type = "memory"; reg = <0x48000000 0x03c00000>; }; diff --git a/arch/arm/boot/dts/stm32429i-eval.dts b/arch/arm/boot/dts/stm32429i-eval.dts index ed7d7f46465e..73ea84df7bf4 100644 --- a/arch/arm/boot/dts/stm32429i-eval.dts +++ b/arch/arm/boot/dts/stm32429i-eval.dts @@ -61,6 +61,7 @@ }; memory { + device_type = "memory"; reg = <0x00000000 0x2000000>; }; diff --git a/arch/arm/boot/dts/stm32746g-eval.dts b/arch/arm/boot/dts/stm32746g-eval.dts index 8c081eaf20fe..d90b0d1e18c7 100644 --- a/arch/arm/boot/dts/stm32746g-eval.dts +++ b/arch/arm/boot/dts/stm32746g-eval.dts @@ -55,6 +55,7 @@ }; memory { + device_type = "memory"; reg = <0xc0000000 0x2000000>; }; diff --git a/arch/arm/boot/dts/stm32f429-disco.dts b/arch/arm/boot/dts/stm32f429-disco.dts index 5ceb2cf3777f..e19d0fe7dbda 100644 --- a/arch/arm/boot/dts/stm32f429-disco.dts +++ b/arch/arm/boot/dts/stm32f429-disco.dts @@ -60,6 +60,7 @@ }; memory { + device_type = "memory"; reg = <0x90000000 0x800000>; }; diff --git a/arch/arm/boot/dts/stm32f429.dtsi b/arch/arm/boot/dts/stm32f429.dtsi index 8d6f028ae285..588b6ef94e93 100644 --- a/arch/arm/boot/dts/stm32f429.dtsi +++ b/arch/arm/boot/dts/stm32f429.dtsi @@ -45,12 +45,14 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -#include "skeleton.dtsi" #include "armv7-m.dtsi" #include #include / { + #address-cells = <1>; + #size-cells = <1>; + clocks { clk_hse: clk-hse { #clock-cells = <0>; @@ -314,6 +316,26 @@ status = "disabled"; }; + spi2: spi@40003800 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "st,stm32f4-spi"; + reg = <0x40003800 0x400>; + interrupts = <36>; + clocks = <&rcc 0 STM32F4_APB1_CLOCK(SPI2)>; + status = "disabled"; + }; + + spi3: spi@40003c00 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "st,stm32f4-spi"; + reg = <0x40003c00 0x400>; + interrupts = <51>; + clocks = <&rcc 0 STM32F4_APB1_CLOCK(SPI3)>; + status = "disabled"; + }; + usart2: serial@40004400 { compatible = "st,stm32-uart"; reg = <0x40004400 0x400>; @@ -523,6 +545,26 @@ status = "disabled"; }; + spi1: spi@40013000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "st,stm32f4-spi"; + reg = <0x40013000 0x400>; + interrupts = <35>; + clocks = <&rcc 0 STM32F4_APB2_CLOCK(SPI1)>; + status = "disabled"; + }; + + spi4: spi@40013400 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "st,stm32f4-spi"; + reg = <0x40013400 0x400>; + interrupts = <84>; + clocks = <&rcc 0 STM32F4_APB2_CLOCK(SPI4)>; + status = "disabled"; + }; + syscfg: system-config@40013800 { compatible = "syscon"; reg = <0x40013800 0x400>; @@ -587,6 +629,26 @@ }; }; + spi5: spi@40015000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "st,stm32f4-spi"; + reg = <0x40015000 0x400>; + interrupts = <85>; + clocks = <&rcc 0 STM32F4_APB2_CLOCK(SPI5)>; + status = "disabled"; + }; + + spi6: spi@40015400 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "st,stm32f4-spi"; + reg = <0x40015400 0x400>; + interrupts = <86>; + clocks = <&rcc 0 STM32F4_APB2_CLOCK(SPI6)>; + status = "disabled"; + }; + pwrcfg: power-config@40007000 { compatible = "syscon"; reg = <0x40007000 0x400>; diff --git a/arch/arm/boot/dts/stm32f469-disco.dts b/arch/arm/boot/dts/stm32f469-disco.dts index 7937b43d7788..a3ff04940aec 100644 --- a/arch/arm/boot/dts/stm32f469-disco.dts +++ b/arch/arm/boot/dts/stm32f469-disco.dts @@ -61,6 +61,7 @@ }; memory { + device_type = "memory"; reg = <0x00000000 0x1000000>; }; diff --git a/arch/arm/boot/dts/stm32f746-disco.dts b/arch/arm/boot/dts/stm32f746-disco.dts index e3a7bd338d61..0ba9c5b08ab9 100644 --- a/arch/arm/boot/dts/stm32f746-disco.dts +++ b/arch/arm/boot/dts/stm32f746-disco.dts @@ -56,6 +56,7 @@ }; memory { + device_type = "memory"; reg = <0xC0000000 0x800000>; }; diff --git a/arch/arm/boot/dts/stm32f746.dtsi b/arch/arm/boot/dts/stm32f746.dtsi index f48d06a80d1d..a25b7000a3a1 100644 --- a/arch/arm/boot/dts/stm32f746.dtsi +++ b/arch/arm/boot/dts/stm32f746.dtsi @@ -40,12 +40,14 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -#include "skeleton.dtsi" #include "armv7-m.dtsi" #include #include / { + #address-cells = <1>; + #size-cells = <1>; + clocks { clk_hse: clk-hse { #clock-cells = <0>; diff --git a/arch/arm/boot/dts/stm32f769-disco.dts b/arch/arm/boot/dts/stm32f769-disco.dts index 483d896e2bc1..3c7216844a9b 100644 --- a/arch/arm/boot/dts/stm32f769-disco.dts +++ b/arch/arm/boot/dts/stm32f769-disco.dts @@ -56,6 +56,7 @@ }; memory { + device_type = "memory"; reg = <0xC0000000 0x1000000>; }; diff --git a/arch/arm/boot/dts/stm32h743-pinctrl.dtsi b/arch/arm/boot/dts/stm32h743-pinctrl.dtsi index 24be8e63dec8..980b2769caf9 100644 --- a/arch/arm/boot/dts/stm32h743-pinctrl.dtsi +++ b/arch/arm/boot/dts/stm32h743-pinctrl.dtsi @@ -173,6 +173,21 @@ }; }; + ethernet_rmii: rmii@0 { + pins { + pinmux = , + , + , + , + , + , + , + , + ; + slew-rate = <2>; + }; + }; + usart1_pins: usart1@0 { pins1 { pinmux = ; /* USART1_TX */ diff --git a/arch/arm/boot/dts/stm32h743.dtsi b/arch/arm/boot/dts/stm32h743.dtsi index cbdd69ca9e7a..5cac79ebebb1 100644 --- a/arch/arm/boot/dts/stm32h743.dtsi +++ b/arch/arm/boot/dts/stm32h743.dtsi @@ -40,13 +40,15 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -#include "skeleton.dtsi" #include "armv7-m.dtsi" #include #include #include / { + #address-cells = <1>; + #size-cells = <1>; + clocks { clk_hse: clk-hse { #clock-cells = <0>; @@ -511,6 +513,19 @@ status = "disabled"; }; }; + + mac: ethernet@40028000 { + compatible = "st,stm32-dwmac", "snps,dwmac-4.10a"; + reg = <0x40028000 0x8000>; + reg-names = "stmmaceth"; + interrupts = <61>; + interrupt-names = "macirq"; + clock-names = "stmmaceth", "mac-clk-tx", "mac-clk-rx"; + clocks = <&rcc ETH1MAC_CK>, <&rcc ETH1TX_CK>, <&rcc ETH1RX_CK>; + st,syscon = <&syscfg 0x4>; + snps,pbl = <8>; + status = "disabled"; + }; }; }; diff --git a/arch/arm/boot/dts/stm32h743i-disco.dts b/arch/arm/boot/dts/stm32h743i-disco.dts index 45e088c55741..dd06c8f3d09a 100644 --- a/arch/arm/boot/dts/stm32h743i-disco.dts +++ b/arch/arm/boot/dts/stm32h743i-disco.dts @@ -54,6 +54,7 @@ }; memory { + device_type = "memory"; reg = <0xd0000000 0x2000000>; }; @@ -66,6 +67,23 @@ clock-frequency = <25000000>; }; +&mac { + status = "disabled"; + pinctrl-0 = <ðernet_rmii>; + pinctrl-names = "default"; + phy-mode = "rmii"; + phy-handle = <&phy0>; + + mdio0 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,dwmac-mdio"; + phy0: ethernet-phy@0 { + reg = <0>; + }; + }; +}; + &usart2 { pinctrl-0 = <&usart2_pins>; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/stm32h743i-eval.dts b/arch/arm/boot/dts/stm32h743i-eval.dts index 3f8e0c4a998d..ebc3f0933f5c 100644 --- a/arch/arm/boot/dts/stm32h743i-eval.dts +++ b/arch/arm/boot/dts/stm32h743i-eval.dts @@ -54,6 +54,7 @@ }; memory { + device_type = "memory"; reg = <0xd0000000 0x2000000>; }; @@ -104,6 +105,23 @@ status = "okay"; }; +&mac { + status = "disabled"; + pinctrl-0 = <ðernet_rmii>; + pinctrl-names = "default"; + phy-mode = "rmii"; + phy-handle = <&phy0>; + + mdio0 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,dwmac-mdio"; + phy0: ethernet-phy@0 { + reg = <0>; + }; + }; +}; + &usart1 { pinctrl-0 = <&usart1_pins>; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/stm32mp157-pinctrl.dtsi b/arch/arm/boot/dts/stm32mp157-pinctrl.dtsi index c4851271e810..9ec4694e93a7 100644 --- a/arch/arm/boot/dts/stm32mp157-pinctrl.dtsi +++ b/arch/arm/boot/dts/stm32mp157-pinctrl.dtsi @@ -246,6 +246,13 @@ }; }; + m_can1_sleep_pins_a: m_can1-sleep@0 { + pins { + pinmux = , /* CAN1_TX */ + ; /* CAN1_RX */ + }; + }; + pwm2_pins_a: pwm2-0 { pins { pinmux = ; /* TIM2_CH4 */ diff --git a/arch/arm/boot/dts/stm32mp157c-ed1.dts b/arch/arm/boot/dts/stm32mp157c-ed1.dts index f77bea49c079..d66edb0c66cd 100644 --- a/arch/arm/boot/dts/stm32mp157c-ed1.dts +++ b/arch/arm/boot/dts/stm32mp157c-ed1.dts @@ -17,6 +17,7 @@ }; memory@c0000000 { + device_type = "memory"; reg = <0xC0000000 0x40000000>; }; @@ -49,6 +50,10 @@ }; }; +&dts { + status = "okay"; +}; + &i2c4 { pinctrl-names = "default"; pinctrl-0 = <&i2c4_pins_a>; @@ -72,6 +77,9 @@ &timers6 { status = "okay"; + /* spare dmas for other usage */ + /delete-property/dmas; + /delete-property/dma-names; timer@5 { status = "okay"; }; diff --git a/arch/arm/boot/dts/stm32mp157c-ev1.dts b/arch/arm/boot/dts/stm32mp157c-ev1.dts index 063ee8ac5dcb..b6aca40b9b90 100644 --- a/arch/arm/boot/dts/stm32mp157c-ev1.dts +++ b/arch/arm/boot/dts/stm32mp157c-ev1.dts @@ -124,8 +124,9 @@ }; &m_can1 { - pinctrl-names = "default"; + pinctrl-names = "default", "sleep"; pinctrl-0 = <&m_can1_pins_a>; + pinctrl-1 = <&m_can1_sleep_pins_a>; status = "okay"; }; @@ -161,6 +162,9 @@ }; &timers2 { + /* spare dmas for other usage (un-delete to enable pwm capture) */ + /delete-property/dmas; + /delete-property/dma-names; status = "disabled"; pwm { pinctrl-0 = <&pwm2_pins_a>; @@ -173,6 +177,8 @@ }; &timers8 { + /delete-property/dmas; + /delete-property/dma-names; status = "disabled"; pwm { pinctrl-0 = <&pwm8_pins_a>; @@ -185,6 +191,8 @@ }; &timers12 { + /delete-property/dmas; + /delete-property/dma-names; status = "disabled"; pwm { pinctrl-0 = <&pwm12_pins_a>; diff --git a/arch/arm/boot/dts/stm32mp157c.dtsi b/arch/arm/boot/dts/stm32mp157c.dtsi index 8bf1c17f8cef..f8bbfff5950b 100644 --- a/arch/arm/boot/dts/stm32mp157c.dtsi +++ b/arch/arm/boot/dts/stm32mp157c.dtsi @@ -84,6 +84,31 @@ }; }; + thermal-zones { + cpu_thermal: cpu-thermal { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&dts>; + + trips { + cpu_alert1: cpu-alert1 { + temperature = <85000>; + hysteresis = <0>; + type = "passive"; + }; + + cpu-crit { + temperature = <120000>; + hysteresis = <0>; + type = "critical"; + }; + }; + + cooling-maps { + }; + }; + }; + soc { compatible = "simple-bus"; #address-cells = <1>; @@ -98,6 +123,12 @@ reg = <0x40000000 0x400>; clocks = <&rcc TIM2_K>; clock-names = "int"; + dmas = <&dmamux1 18 0x400 0x1>, + <&dmamux1 19 0x400 0x1>, + <&dmamux1 20 0x400 0x1>, + <&dmamux1 21 0x400 0x1>, + <&dmamux1 22 0x400 0x1>; + dma-names = "ch1", "ch2", "ch3", "ch4", "up"; status = "disabled"; pwm { @@ -119,6 +150,13 @@ reg = <0x40001000 0x400>; clocks = <&rcc TIM3_K>; clock-names = "int"; + dmas = <&dmamux1 23 0x400 0x1>, + <&dmamux1 24 0x400 0x1>, + <&dmamux1 25 0x400 0x1>, + <&dmamux1 26 0x400 0x1>, + <&dmamux1 27 0x400 0x1>, + <&dmamux1 28 0x400 0x1>; + dma-names = "ch1", "ch2", "ch3", "ch4", "up", "trig"; status = "disabled"; pwm { @@ -140,6 +178,11 @@ reg = <0x40002000 0x400>; clocks = <&rcc TIM4_K>; clock-names = "int"; + dmas = <&dmamux1 29 0x400 0x1>, + <&dmamux1 30 0x400 0x1>, + <&dmamux1 31 0x400 0x1>, + <&dmamux1 32 0x400 0x1>; + dma-names = "ch1", "ch2", "ch3", "ch4"; status = "disabled"; pwm { @@ -161,6 +204,13 @@ reg = <0x40003000 0x400>; clocks = <&rcc TIM5_K>; clock-names = "int"; + dmas = <&dmamux1 55 0x400 0x1>, + <&dmamux1 56 0x400 0x1>, + <&dmamux1 57 0x400 0x1>, + <&dmamux1 58 0x400 0x1>, + <&dmamux1 59 0x400 0x1>, + <&dmamux1 60 0x400 0x1>; + dma-names = "ch1", "ch2", "ch3", "ch4", "up", "trig"; status = "disabled"; pwm { @@ -182,6 +232,8 @@ reg = <0x40004000 0x400>; clocks = <&rcc TIM6_K>; clock-names = "int"; + dmas = <&dmamux1 69 0x400 0x1>; + dma-names = "up"; status = "disabled"; timer@5 { @@ -198,6 +250,8 @@ reg = <0x40005000 0x400>; clocks = <&rcc TIM7_K>; clock-names = "int"; + dmas = <&dmamux1 70 0x400 0x1>; + dma-names = "up"; status = "disabled"; timer@6 { @@ -465,6 +519,15 @@ reg = <0x44000000 0x400>; clocks = <&rcc TIM1_K>; clock-names = "int"; + dmas = <&dmamux1 11 0x400 0x1>, + <&dmamux1 12 0x400 0x1>, + <&dmamux1 13 0x400 0x1>, + <&dmamux1 14 0x400 0x1>, + <&dmamux1 15 0x400 0x1>, + <&dmamux1 16 0x400 0x1>, + <&dmamux1 17 0x400 0x1>; + dma-names = "ch1", "ch2", "ch3", "ch4", + "up", "trig", "com"; status = "disabled"; pwm { @@ -486,6 +549,15 @@ reg = <0x44001000 0x400>; clocks = <&rcc TIM8_K>; clock-names = "int"; + dmas = <&dmamux1 47 0x400 0x1>, + <&dmamux1 48 0x400 0x1>, + <&dmamux1 49 0x400 0x1>, + <&dmamux1 50 0x400 0x1>, + <&dmamux1 51 0x400 0x1>, + <&dmamux1 52 0x400 0x1>, + <&dmamux1 53 0x400 0x1>; + dma-names = "ch1", "ch2", "ch3", "ch4", + "up", "trig", "com"; status = "disabled"; pwm { @@ -543,6 +615,11 @@ reg = <0x44006000 0x400>; clocks = <&rcc TIM15_K>; clock-names = "int"; + dmas = <&dmamux1 105 0x400 0x1>, + <&dmamux1 106 0x400 0x1>, + <&dmamux1 107 0x400 0x1>, + <&dmamux1 108 0x400 0x1>; + dma-names = "ch1", "up", "trig", "com"; status = "disabled"; pwm { @@ -564,6 +641,9 @@ reg = <0x44007000 0x400>; clocks = <&rcc TIM16_K>; clock-names = "int"; + dmas = <&dmamux1 109 0x400 0x1>, + <&dmamux1 110 0x400 0x1>; + dma-names = "ch1", "up"; status = "disabled"; pwm { @@ -584,6 +664,9 @@ reg = <0x44008000 0x400>; clocks = <&rcc TIM17_K>; clock-names = "int"; + dmas = <&dmamux1 111 0x400 0x1>, + <&dmamux1 112 0x400 0x1>; + dma-names = "ch1", "up"; status = "disabled"; pwm { @@ -684,14 +767,14 @@ m_can1: can@4400e000 { compatible = "bosch,m_can"; - reg = <0x4400e000 0x400>, <0x44011000 0x2800>; + reg = <0x4400e000 0x400>, <0x44011000 0x1400>; reg-names = "m_can", "message_ram"; interrupts = , ; interrupt-names = "int0", "int1"; clocks = <&rcc CK_HSE>, <&rcc FDCAN_K>; clock-names = "hclk", "cclk"; - bosch,mram-cfg = <0x0 0 0 32 0 0 2 2>; + bosch,mram-cfg = <0x1400 0 0 32 0 0 2 2>; status = "disabled"; }; @@ -908,6 +991,16 @@ status = "disabled"; }; + dts: thermal@50028000 { + compatible = "st,stm32-thermal"; + reg = <0x50028000 0x100>; + interrupts = ; + clocks = <&rcc TMPSENS>; + clock-names = "pclk"; + #thermal-sensor-cells = <0>; + status = "disabled"; + }; + cryp1: cryp@54001000 { compatible = "st,stm32mp1-cryp"; reg = <0x54001000 0x400>; diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi index 5d46bb0139fa..73c3ac42095f 100644 --- a/arch/arm/boot/dts/sun4i-a10.dtsi +++ b/arch/arm/boot/dts/sun4i-a10.dtsi @@ -184,6 +184,26 @@ status = "disabled"; }; + pmu { + compatible = "arm,cortex-a8-pmu"; + interrupts = <3>; + }; + + reserved-memory { + #address-cells = <1>; + #size-cells = <1>; + ranges; + + /* Address must be kept in the lower 256 MiBs of DRAM for VE. */ + default-pool { + compatible = "shared-dma-pool"; + size = <0x6000000>; + alloc-ranges = <0x4a000000 0x6000000>; + reusable; + linux,cma-default; + }; + }; + soc { compatible = "simple-bus"; #address-cells = <1>; @@ -224,6 +244,19 @@ status = "disabled"; }; }; + + sram_c: sram@1d00000 { + compatible = "mmio-sram"; + reg = <0x01d00000 0xd0000>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0x01d00000 0xd0000>; + + ve_sram: sram-section@0 { + compatible = "allwinner,sun4i-a10-sram-c1"; + reg = <0x000000 0x80000>; + }; + }; }; dma: dma-controller@1c02000 { @@ -394,6 +427,17 @@ }; }; + video-codec@1c0e000 { + compatible = "allwinner,sun4i-a10-video-engine"; + reg = <0x01c0e000 0x1000>; + clocks = <&ccu CLK_AHB_VE>, <&ccu CLK_VE>, + <&ccu CLK_DRAM_VE>; + clock-names = "ahb", "mod", "ram"; + resets = <&ccu RST_VE>; + interrupts = <53>; + allwinner,sram = <&ve_sram 1>; + }; + mmc0: mmc@1c0f000 { compatible = "allwinner,sun4i-a10-mmc"; reg = <0x01c0f000 0x1000>; diff --git a/arch/arm/boot/dts/sun5i-a13-q8-tablet.dts b/arch/arm/boot/dts/sun5i-a13-q8-tablet.dts index a89f29fa3e40..7257f39b31ce 100644 --- a/arch/arm/boot/dts/sun5i-a13-q8-tablet.dts +++ b/arch/arm/boot/dts/sun5i-a13-q8-tablet.dts @@ -49,14 +49,15 @@ compatible = "allwinner,q8-a13", "allwinner,sun5i-a13"; panel: panel { - compatible = "urt,umsh-8596md-t", "simple-panel"; + compatible = "bananapi,s070wv20-ct16", "simple-panel"; + power-supply = <®_vcc3v3>; + enable-gpios = <&axp_gpio 0 GPIO_ACTIVE_HIGH>; /* AXP GPIO0 */ + backlight = <&backlight>; #address-cells = <1>; #size-cells = <0>; port@0 { reg = <0>; - /* TODO: lcd panel uses axp gpio0 as enable pin */ - backlight = <&backlight>; #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm/boot/dts/sun5i-reference-design-tablet.dtsi b/arch/arm/boot/dts/sun5i-reference-design-tablet.dtsi index 6202aabedbfe..5b1f0e198eb6 100644 --- a/arch/arm/boot/dts/sun5i-reference-design-tablet.dtsi +++ b/arch/arm/boot/dts/sun5i-reference-design-tablet.dtsi @@ -54,7 +54,7 @@ pwms = <&pwm 0 50000 PWM_POLARITY_INVERTED>; brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>; default-brightness-level = <8>; - /* TODO: backlight uses axp gpio1 as enable pin */ + enable-gpios = <&axp_gpio 1 GPIO_ACTIVE_HIGH>; /* AXP GPIO1 */ }; chosen { diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi index 353d90f99b40..13304b8c5139 100644 --- a/arch/arm/boot/dts/sun6i-a31.dtsi +++ b/arch/arm/boot/dts/sun6i-a31.dtsi @@ -216,6 +216,7 @@ #clock-cells = <0>; compatible = "fixed-clock"; clock-frequency = <24000000>; + clock-output-names = "osc24M"; }; osc32k: clk-32k { diff --git a/arch/arm/boot/dts/sun7i-a20-bananapi.dts b/arch/arm/boot/dts/sun7i-a20-bananapi.dts index 556b1b591c5d..81bc85d398c1 100644 --- a/arch/arm/boot/dts/sun7i-a20-bananapi.dts +++ b/arch/arm/boot/dts/sun7i-a20-bananapi.dts @@ -191,6 +191,11 @@ }; &pio { + vcc-pa-supply = <®_vcc3v3>; + vcc-pc-supply = <®_vcc3v3>; + vcc-pe-supply = <®_vcc3v3>; + vcc-pf-supply = <®_vcc3v3>; + vcc-pg-supply = <®_vcc3v3>; gpio-line-names = /* PA */ "ERXD3", "ERXD2", "ERXD1", "ERXD0", "ETXD3", diff --git a/arch/arm/boot/dts/sun8i-a23-a33.dtsi b/arch/arm/boot/dts/sun8i-a23-a33.dtsi index a9c123de5d2c..43fe215e83ea 100644 --- a/arch/arm/boot/dts/sun8i-a23-a33.dtsi +++ b/arch/arm/boot/dts/sun8i-a23-a33.dtsi @@ -68,6 +68,12 @@ }; }; + de: display-engine { + /* compatible gets set in SoC specific dtsi file */ + allwinner,pipelines = <&fe0>; + status = "disabled"; + }; + timer { compatible = "arm,armv7-timer"; interrupts = , @@ -155,6 +161,55 @@ #dma-cells = <1>; }; + nfc: nand@1c03000 { + compatible = "allwinner,sun4i-a10-nand"; + reg = <0x01c03000 0x1000>; + interrupts = ; + clocks = <&ccu CLK_BUS_NAND>, <&ccu CLK_NAND>; + clock-names = "ahb", "mod"; + resets = <&ccu RST_BUS_NAND>; + reset-names = "ahb"; + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; + }; + + tcon0: lcd-controller@1c0c000 { + /* compatible gets set in SoC specific dtsi file */ + reg = <0x01c0c000 0x1000>; + interrupts = ; + clocks = <&ccu CLK_BUS_LCD>, + <&ccu CLK_LCD_CH0>; + clock-names = "ahb", + "tcon-ch0"; + clock-output-names = "tcon-pixel-clock"; + resets = <&ccu RST_BUS_LCD>; + reset-names = "lcd"; + status = "disabled"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + tcon0_in: port@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + + tcon0_in_drc0: endpoint@0 { + reg = <0>; + remote-endpoint = <&drc0_out_tcon0>; + }; + }; + + tcon0_out: port@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + }; + }; + }; + mmc0: mmc@1c0f000 { compatible = "allwinner,sun7i-a20-mmc"; reg = <0x01c0f000 0x1000>; @@ -214,21 +269,6 @@ #size-cells = <0>; }; - nfc: nand@1c03000 { - compatible = "allwinner,sun4i-a10-nand"; - reg = <0x01c03000 0x1000>; - interrupts = ; - clocks = <&ccu CLK_BUS_NAND>, <&ccu CLK_NAND>; - clock-names = "ahb", "mod"; - resets = <&ccu RST_BUS_NAND>; - reset-names = "ahb"; - pinctrl-names = "default"; - pinctrl-0 = <&nand_pins &nand_pins_cs0 &nand_pins_rb0>; - status = "disabled"; - #address-cells = <1>; - #size-cells = <0>; - }; - usb_otg: usb@1c19000 { /* compatible gets set in SoC specific dtsi file */ reg = <0x01c19000 0x0400>; @@ -572,6 +612,111 @@ interrupts = ; }; + fe0: display-frontend@1e00000 { + /* compatible gets set in SoC specific dtsi file */ + reg = <0x01e00000 0x20000>; + interrupts = ; + clocks = <&ccu CLK_BUS_DE_FE>, <&ccu CLK_DE_FE>, + <&ccu CLK_DRAM_DE_FE>; + clock-names = "ahb", "mod", + "ram"; + resets = <&ccu RST_BUS_DE_FE>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + fe0_out: port@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + + fe0_out_be0: endpoint@0 { + reg = <0>; + remote-endpoint = <&be0_in_fe0>; + }; + }; + }; + }; + + be0: display-backend@1e60000 { + /* compatible gets set in SoC specific dtsi file */ + reg = <0x01e60000 0x10000>; + interrupts = ; + clocks = <&ccu CLK_BUS_DE_BE>, <&ccu CLK_DE_BE>, + <&ccu CLK_DRAM_DE_BE>; + clock-names = "ahb", "mod", + "ram"; + resets = <&ccu RST_BUS_DE_BE>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + be0_in: port@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + + be0_in_fe0: endpoint@0 { + reg = <0>; + remote-endpoint = <&fe0_out_be0>; + }; + }; + + be0_out: port@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + + be0_out_drc0: endpoint@0 { + reg = <0>; + remote-endpoint = <&drc0_in_be0>; + }; + }; + }; + }; + + drc0: drc@1e70000 { + /* compatible gets set in SoC specific dtsi file */ + reg = <0x01e70000 0x10000>; + interrupts = ; + clocks = <&ccu CLK_BUS_DRC>, <&ccu CLK_DRC>, + <&ccu CLK_DRAM_DRC>; + clock-names = "ahb", "mod", "ram"; + resets = <&ccu RST_BUS_DRC>; + + assigned-clocks = <&ccu CLK_DRC>; + assigned-clock-rates = <300000000>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + drc0_in: port@0 { + #address-cells = <1>; + #size-cells = <0>; + reg = <0>; + + drc0_in_be0: endpoint@0 { + reg = <0>; + remote-endpoint = <&be0_out_drc0>; + }; + }; + + drc0_out: port@1 { + #address-cells = <1>; + #size-cells = <0>; + reg = <1>; + + drc0_out_tcon0: endpoint@0 { + reg = <0>; + remote-endpoint = <&tcon0_in_drc0>; + }; + }; + }; + }; + rtc: rtc@1f00000 { compatible = "allwinner,sun8i-a23-rtc"; reg = <0x01f00000 0x400>; diff --git a/arch/arm/boot/dts/sun8i-a23-q8-tablet.dts b/arch/arm/boot/dts/sun8i-a23-q8-tablet.dts index b6958e8f2f01..d4dab7c28398 100644 --- a/arch/arm/boot/dts/sun8i-a23-q8-tablet.dts +++ b/arch/arm/boot/dts/sun8i-a23-q8-tablet.dts @@ -61,3 +61,7 @@ "Headset Mic", "HBIAS"; status = "okay"; }; + +&panel { + compatible = "bananapi,s070wv20-ct16", "simple-panel"; +}; diff --git a/arch/arm/boot/dts/sun8i-a23.dtsi b/arch/arm/boot/dts/sun8i-a23.dtsi index d00055e9eef5..a5e884a8b2ae 100644 --- a/arch/arm/boot/dts/sun8i-a23.dtsi +++ b/arch/arm/boot/dts/sun8i-a23.dtsi @@ -62,10 +62,26 @@ }; }; +&be0 { + compatible = "allwinner,sun8i-a23-display-backend"; +}; + &ccu { compatible = "allwinner,sun8i-a23-ccu"; }; +&de { + compatible = "allwinner,sun8i-a23-display-engine"; +}; + +&drc0 { + compatible = "allwinner,sun8i-a23-drc"; +}; + +&fe0 { + compatible = "allwinner,sun8i-a23-display-frontend"; +}; + &pio { compatible = "allwinner,sun8i-a23-pinctrl"; interrupts = , @@ -73,6 +89,10 @@ ; }; +&tcon0 { + compatible = "allwinner,sun8i-a23-tcon"; +}; + &usb_otg { compatible = "allwinner,sun6i-a31-musb"; }; diff --git a/arch/arm/boot/dts/sun8i-a33.dtsi b/arch/arm/boot/dts/sun8i-a33.dtsi index 626152c30f50..1111a6498102 100644 --- a/arch/arm/boot/dts/sun8i-a33.dtsi +++ b/arch/arm/boot/dts/sun8i-a33.dtsi @@ -159,12 +159,6 @@ }; }; - de: display-engine { - compatible = "allwinner,sun8i-a33-display-engine"; - allwinner,pipelines = <&fe0>; - status = "disabled"; - }; - iio-hwmon { compatible = "iio-hwmon"; io-channels = <&ths>; @@ -209,47 +203,6 @@ }; soc { - tcon0: lcd-controller@1c0c000 { - compatible = "allwinner,sun8i-a33-tcon"; - reg = <0x01c0c000 0x1000>; - interrupts = ; - clocks = <&ccu CLK_BUS_LCD>, - <&ccu CLK_LCD_CH0>; - clock-names = "ahb", - "tcon-ch0"; - clock-output-names = "tcon-pixel-clock"; - resets = <&ccu RST_BUS_LCD>; - reset-names = "lcd"; - status = "disabled"; - - ports { - #address-cells = <1>; - #size-cells = <0>; - - tcon0_in: port@0 { - #address-cells = <1>; - #size-cells = <0>; - reg = <0>; - - tcon0_in_drc0: endpoint@0 { - reg = <0>; - remote-endpoint = <&drc0_out_tcon0>; - }; - }; - - tcon0_out: port@1 { - #address-cells = <1>; - #size-cells = <0>; - reg = <1>; - - tcon0_out_dsi: endpoint@1 { - reg = <1>; - remote-endpoint = <&dsi_in_tcon0>; - }; - }; - }; - }; - video-codec@1c0e000 { compatible = "allwinner,sun8i-a33-video-engine"; reg = <0x01c0e000 0x1000>; @@ -339,115 +292,6 @@ status = "disabled"; #phy-cells = <0>; }; - - fe0: display-frontend@1e00000 { - compatible = "allwinner,sun8i-a33-display-frontend"; - reg = <0x01e00000 0x20000>; - interrupts = ; - clocks = <&ccu CLK_BUS_DE_FE>, <&ccu CLK_DE_FE>, - <&ccu CLK_DRAM_DE_FE>; - clock-names = "ahb", "mod", - "ram"; - resets = <&ccu RST_BUS_DE_FE>; - - ports { - #address-cells = <1>; - #size-cells = <0>; - - fe0_out: port@1 { - #address-cells = <1>; - #size-cells = <0>; - reg = <1>; - - fe0_out_be0: endpoint@0 { - reg = <0>; - remote-endpoint = <&be0_in_fe0>; - }; - }; - }; - }; - - be0: display-backend@1e60000 { - compatible = "allwinner,sun8i-a33-display-backend"; - reg = <0x01e60000 0x10000>, <0x01e80000 0x1000>; - reg-names = "be", "sat"; - interrupts = ; - clocks = <&ccu CLK_BUS_DE_BE>, <&ccu CLK_DE_BE>, - <&ccu CLK_DRAM_DE_BE>, <&ccu CLK_BUS_SAT>; - clock-names = "ahb", "mod", - "ram", "sat"; - resets = <&ccu RST_BUS_DE_BE>, <&ccu RST_BUS_SAT>; - reset-names = "be", "sat"; - assigned-clocks = <&ccu CLK_DE_BE>; - assigned-clock-rates = <300000000>; - - ports { - #address-cells = <1>; - #size-cells = <0>; - - be0_in: port@0 { - #address-cells = <1>; - #size-cells = <0>; - reg = <0>; - - be0_in_fe0: endpoint@0 { - reg = <0>; - remote-endpoint = <&fe0_out_be0>; - }; - }; - - be0_out: port@1 { - #address-cells = <1>; - #size-cells = <0>; - reg = <1>; - - be0_out_drc0: endpoint@0 { - reg = <0>; - remote-endpoint = <&drc0_in_be0>; - }; - }; - }; - }; - - drc0: drc@1e70000 { - compatible = "allwinner,sun8i-a33-drc"; - reg = <0x01e70000 0x10000>; - interrupts = ; - clocks = <&ccu CLK_BUS_DRC>, <&ccu CLK_DRC>, - <&ccu CLK_DRAM_DRC>; - clock-names = "ahb", "mod", "ram"; - resets = <&ccu RST_BUS_DRC>; - - assigned-clocks = <&ccu CLK_DRC>; - assigned-clock-rates = <300000000>; - - ports { - #address-cells = <1>; - #size-cells = <0>; - - drc0_in: port@0 { - #address-cells = <1>; - #size-cells = <0>; - reg = <0>; - - drc0_in_be0: endpoint@0 { - reg = <0>; - remote-endpoint = <&be0_out_drc0>; - }; - }; - - drc0_out: port@1 { - #address-cells = <1>; - #size-cells = <0>; - reg = <1>; - - drc0_out_tcon0: endpoint@0 { - reg = <0>; - remote-endpoint = <&tcon0_in_drc0>; - }; - }; - }; - }; }; thermal-zones { @@ -524,10 +368,37 @@ }; }; +&be0 { + compatible = "allwinner,sun8i-a33-display-backend"; + /* A33 has an extra "SAT" module packed inside the display backend */ + reg = <0x01e60000 0x10000>, <0x01e80000 0x1000>; + reg-names = "be", "sat"; + clocks = <&ccu CLK_BUS_DE_BE>, <&ccu CLK_DE_BE>, + <&ccu CLK_DRAM_DE_BE>, <&ccu CLK_BUS_SAT>; + clock-names = "ahb", "mod", + "ram", "sat"; + resets = <&ccu RST_BUS_DE_BE>, <&ccu RST_BUS_SAT>; + reset-names = "be", "sat"; + assigned-clocks = <&ccu CLK_DE_BE>; + assigned-clock-rates = <300000000>; +}; + &ccu { compatible = "allwinner,sun8i-a33-ccu"; }; +&de { + compatible = "allwinner,sun8i-a33-display-engine"; +}; + +&drc0 { + compatible = "allwinner,sun8i-a33-drc"; +}; + +&fe0 { + compatible = "allwinner,sun8i-a33-display-frontend"; +}; + &mali { operating-points-v2 = <&mali_opp_table>; }; @@ -544,6 +415,17 @@ }; +&tcon0 { + compatible = "allwinner,sun8i-a33-tcon"; +}; + +&tcon0_out { + tcon0_out_dsi: endpoint@1 { + reg = <1>; + remote-endpoint = <&dsi_in_tcon0>; + }; +}; + &usb_otg { compatible = "allwinner,sun8i-a33-musb"; }; diff --git a/arch/arm/boot/dts/sun8i-a83t-allwinner-h8homlet-v2.dts b/arch/arm/boot/dts/sun8i-a83t-allwinner-h8homlet-v2.dts index 1c012a4def16..9c006fc18821 100644 --- a/arch/arm/boot/dts/sun8i-a83t-allwinner-h8homlet-v2.dts +++ b/arch/arm/boot/dts/sun8i-a83t-allwinner-h8homlet-v2.dts @@ -154,6 +154,10 @@ #include "axp81x.dtsi" +&ac_power_supply { + status = "okay"; +}; + ®_aldo1 { regulator-always-on; regulator-min-microvolt = <1800000>; diff --git a/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts b/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts index 7d30d3e530fb..838be7b3715f 100644 --- a/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts +++ b/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts @@ -237,6 +237,14 @@ #include "axp81x.dtsi" +&ac_power_supply { + status = "okay"; +}; + +&battery_power_supply { + status = "okay"; +}; + ®_aldo1 { regulator-always-on; regulator-min-microvolt = <1800000>; diff --git a/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts b/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts index a5a9f5a0603e..fcbec3d7ccd7 100644 --- a/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts +++ b/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts @@ -247,6 +247,14 @@ #include "axp81x.dtsi" +&ac_power_supply { + status = "okay"; +}; + +&battery_power_supply { + status = "okay"; +}; + ®_aldo1 { regulator-always-on; regulator-min-microvolt = <1800000>; diff --git a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts index 5d23667dc2d2..25540b7694d5 100644 --- a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts +++ b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts @@ -53,7 +53,7 @@ aliases { serial0 = &uart0; - /* ethernet0 is the H3 emac, defined in sun8i-h3.dtsi */ + ethernet0 = &emac; ethernet1 = &sdiowifi; }; diff --git a/arch/arm/boot/dts/sun8i-h3-nanopi-m1-plus.dts b/arch/arm/boot/dts/sun8i-h3-nanopi-m1-plus.dts index 65cba1050802..4ec94d72f021 100644 --- a/arch/arm/boot/dts/sun8i-h3-nanopi-m1-plus.dts +++ b/arch/arm/boot/dts/sun8i-h3-nanopi-m1-plus.dts @@ -67,6 +67,21 @@ pinctrl-names = "default"; reset-gpios = <&r_pio 0 7 GPIO_ACTIVE_LOW>; /* PL7 */ }; + + connector { + compatible = "hdmi-connector"; + type = "a"; + + port { + hdmi_con_in: endpoint { + remote-endpoint = <&hdmi_out_con>; + }; + }; + }; +}; + +&de { + status = "okay"; }; &ehci1 { @@ -94,6 +109,16 @@ }; }; +&hdmi { + status = "okay"; +}; + +&hdmi_out { + hdmi_out_con: endpoint { + remote-endpoint = <&hdmi_con_in>; + }; +}; + &ir { pinctrl-names = "default"; pinctrl-0 = <&ir_pins_a>; diff --git a/arch/arm/boot/dts/sun8i-q8-common.dtsi b/arch/arm/boot/dts/sun8i-q8-common.dtsi index 719ad769b837..53104f4ccacc 100644 --- a/arch/arm/boot/dts/sun8i-q8-common.dtsi +++ b/arch/arm/boot/dts/sun8i-q8-common.dtsi @@ -49,6 +49,26 @@ ethernet0 = &sdio_wifi; }; + panel: panel { + /* Tablet dts should provide panel compatible */ + backlight = <&backlight>; + enable-gpios = <&pio 7 7 GPIO_ACTIVE_HIGH>; /* PH7 */ + power-supply = <®_dc1sw>; + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <0>; + + panel_input: endpoint@0 { + reg = <0>; + remote-endpoint = <&tcon0_out_lcd>; + }; + }; + }; + wifi_pwrseq: wifi_pwrseq { compatible = "mmc-pwrseq-simple"; /* @@ -64,6 +84,10 @@ }; }; +&de { + status = "okay"; +}; + &ehci0 { status = "okay"; }; @@ -90,6 +114,19 @@ }; }; +&tcon0 { + pinctrl-names = "default"; + pinctrl-0 = <&lcd_rgb666_pins>; + status = "okay"; +}; + +&tcon0_out { + tcon0_out_lcd: endpoint@0 { + reg = <0>; + remote-endpoint = <&panel_input>; + }; +}; + &usbphy { usb1_vbus-supply = <®_dldo1>; }; diff --git a/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts b/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts index 438b7b44dab3..c488aaacbd68 100644 --- a/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts +++ b/arch/arm/boot/dts/sun8i-r40-bananapi-m2-ultra.dts @@ -102,6 +102,8 @@ wifi_pwrseq: wifi_pwrseq { compatible = "mmc-pwrseq-simple"; reset-gpios = <&pio 6 10 GPIO_ACTIVE_LOW>; /* PG10 WIFI_EN */ + clocks = <&ccu CLK_OUTA>; + clock-names = "ext_clock"; }; }; @@ -196,6 +198,11 @@ status = "okay"; }; +&pio { + pinctrl-names = "default"; + pinctrl-0 = <&clk_out_a_pin>; +}; + ®_aldo2 { regulator-always-on; regulator-min-microvolt = <2500000>; @@ -250,12 +257,27 @@ regulator-name = "vcc-wifi-io"; }; +/* + * Our WiFi chip needs both DLDO2 and DLDO3 to be powered at the same + * time, with the two being in sync, to be able to meet maximum power + * consumption during transmits. Since this is not really supported + * right now, just use the two as always on, and we will fix it later. + */ + ®_dldo2 { + regulator-always-on; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; regulator-name = "vcc-wifi"; }; +®_dldo3 { + regulator-always-on; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-name = "vcc-wifi-2"; +}; + ®_dldo4 { regulator-min-microvolt = <2500000>; regulator-max-microvolt = <2500000>; @@ -278,6 +300,25 @@ status = "okay"; }; +&uart3 { + pinctrl-names = "default"; + pinctrl-0 = <&uart3_pg_pins>, <&uart3_rts_cts_pg_pins>; + uart-has-rtscts; + status = "okay"; + + bluetooth { + compatible = "brcm,bcm43438-bt"; + clocks = <&ccu CLK_OUTA>; + clock-names = "lpo"; + vbat-supply = <®_dldo2>; + vddio-supply = <®_dldo1>; + device-wakeup-gpios = <&pio 6 11 GPIO_ACTIVE_HIGH>; /* PG11 */ + /* TODO host wake line connected to PMIC GPIO pins */ + shutdown-gpios = <&pio 7 12 GPIO_ACTIVE_HIGH>; /* PH12 */ + max-speed = <1500000>; + }; +}; + &usbphy { usb1_vbus-supply = <®_vcc5v0>; usb2_vbus-supply = <®_vcc5v0>; diff --git a/arch/arm/boot/dts/sun8i-r40.dtsi b/arch/arm/boot/dts/sun8i-r40.dtsi index 89762dbefe42..06b685869f52 100644 --- a/arch/arm/boot/dts/sun8i-r40.dtsi +++ b/arch/arm/boot/dts/sun8i-r40.dtsi @@ -342,6 +342,11 @@ #interrupt-cells = <3>; #gpio-cells = <3>; + clk_out_a_pin: clk-out-a-pin { + pins = "PI12"; + function = "clk_out_a"; + }; + gmac_rgmii_pins: gmac-rgmii-pins { pins = "PA0", "PA1", "PA2", "PA3", "PA4", "PA5", "PA6", "PA7", @@ -389,6 +394,16 @@ pins = "PB22", "PB23"; function = "uart0"; }; + + uart3_pg_pins: uart3-pg-pins { + pins = "PG6", "PG7"; + function = "uart3"; + }; + + uart3_rts_cts_pg_pins: uart3-rts-cts-pg-pins { + pins = "PG8", "PG9"; + function = "uart3"; + }; }; wdt: watchdog@1c20c90 { diff --git a/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts b/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts index 85da85faf869..28c034928d67 100644 --- a/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts +++ b/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts @@ -133,6 +133,19 @@ status = "okay"; }; +&gmac { + pinctrl-names = "default"; + pinctrl-0 = <&gmac_rgmii_pins>; + phy = <&phy1>; + phy-mode = "rgmii"; + phy-supply = <®_cldo1>; + status = "okay"; + + phy1: ethernet-phy@1 { + reg = <1>; + }; +}; + &i2c3 { pinctrl-names = "default"; pinctrl-0 = <&i2c3_pins>; @@ -183,10 +196,26 @@ clocks = <&ac100_rtc 0>; }; +&pio { + vcc-pa-supply = <®_ldo_io1>; + vcc-pb-supply = <®_aldo2>; + vcc-pc-supply = <®_dcdc1>; + vcc-pd-supply = <®_dc1sw>; + vcc-pe-supply = <®_eldo2>; + vcc-pf-supply = <®_dcdc1>; + vcc-pg-supply = <®_ldo_io0>; + vcc-ph-supply = <®_dcdc1>; +}; + &r_ir { status = "okay"; }; +&r_pio { + vcc-pl-supply = <®_dldo2>; + vcc-pm-supply = <®_eldo3>; +}; + &r_rsb { status = "okay"; @@ -217,6 +246,10 @@ /* unused */ }; + reg_dc1sw: dc1sw { + regulator-name = "vcc-pd"; + }; + reg_dc5ldo: dc5ldo { regulator-always-on; regulator-min-microvolt = <800000>; @@ -271,7 +304,6 @@ }; reg_dldo2: dldo2 { - regulator-always-on; regulator-min-microvolt = <3000000>; regulator-max-microvolt = <3000000>; regulator-name = "vcc-pl"; @@ -290,14 +322,12 @@ }; reg_eldo3: eldo3 { - regulator-always-on; regulator-min-microvolt = <3000000>; regulator-max-microvolt = <3000000>; regulator-name = "vcc-pm-codec-io1"; }; reg_ldo_io0: ldo_io0 { - regulator-always-on; regulator-min-microvolt = <3000000>; regulator-max-microvolt = <3000000>; regulator-name = "vcc-pg"; @@ -385,6 +415,14 @@ */ regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; + /* + * The PHY requires 20ms after all voltages + * are applied until core logic is ready and + * 30ms after the reset pin is de-asserted. + * Set a 100ms delay to account for PMIC + * ramp time and board traces. + */ + regulator-enable-ramp-delay = <100000>; regulator-name = "vcc-gmac-phy"; }; diff --git a/arch/arm/boot/dts/sun9i-a80-optimus.dts b/arch/arm/boot/dts/sun9i-a80-optimus.dts index 58a199b0e494..864715ec3cb0 100644 --- a/arch/arm/boot/dts/sun9i-a80-optimus.dts +++ b/arch/arm/boot/dts/sun9i-a80-optimus.dts @@ -120,6 +120,19 @@ status = "okay"; }; +&gmac { + pinctrl-names = "default"; + pinctrl-0 = <&gmac_rgmii_pins>; + phy = <&phy1>; + phy-mode = "rgmii"; + phy-supply = <®_cldo1>; + status = "okay"; + + phy1: ethernet-phy@1 { + reg = <1>; + }; +}; + &mmc0 { pinctrl-names = "default"; pinctrl-0 = <&mmc0_pins>; @@ -172,10 +185,26 @@ clocks = <&ac100_rtc 0>; }; +&pio { + vcc-pa-supply = <®_ldo_io1>; + vcc-pb-supply = <®_aldo2>; + vcc-pc-supply = <®_dcdc1>; + vcc-pd-supply = <®_dcdc1>; + vcc-pe-supply = <®_eldo2>; + vcc-pf-supply = <®_dcdc1>; + vcc-pg-supply = <®_ldo_io0>; + vcc-ph-supply = <®_dcdc1>; +}; + &r_ir { status = "okay"; }; +&r_pio { + vcc-pl-supply = <®_dldo2>; + vcc-pm-supply = <®_eldo3>; +}; + &r_rsb { status = "okay"; @@ -213,6 +242,10 @@ regulator-name = "vdd-cpus-09-usbh"; }; + dc1sw { + /* unused */ + }; + reg_dcdc1: dcdc1 { regulator-always-on; regulator-min-microvolt = <3000000>; @@ -260,7 +293,6 @@ }; reg_dldo2: dldo2 { - regulator-always-on; regulator-min-microvolt = <3000000>; regulator-max-microvolt = <3000000>; regulator-name = "vcc-pl"; @@ -279,14 +311,12 @@ }; reg_eldo3: eldo3 { - regulator-always-on; regulator-min-microvolt = <3000000>; regulator-max-microvolt = <3000000>; regulator-name = "vcc-pm-codec-io1"; }; reg_ldo_io0: ldo_io0 { - regulator-always-on; regulator-min-microvolt = <3000000>; regulator-max-microvolt = <3000000>; regulator-name = "vcc-pg"; @@ -374,6 +404,14 @@ */ regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; + /* + * The PHY requires 20ms after all voltages + * are applied until core logic is ready and + * 30ms after the reset pin is de-asserted. + * Set a 100ms delay to account for PMIC + * ramp time and board traces. + */ + regulator-enable-ramp-delay = <100000>; regulator-name = "vcc-gmac-phy"; }; diff --git a/arch/arm/boot/dts/sun9i-a80.dtsi b/arch/arm/boot/dts/sun9i-a80.dtsi index d9532fb1ef65..6fb292e0b662 100644 --- a/arch/arm/boot/dts/sun9i-a80.dtsi +++ b/arch/arm/boot/dts/sun9i-a80.dtsi @@ -56,6 +56,10 @@ #size-cells = <2>; interrupt-parent = <&gic>; + aliases { + ethernet0 = &gmac; + }; + cpus { #address-cells = <1>; #size-cells = <0>; @@ -183,6 +187,37 @@ clock-output-names = "osc32k"; }; + /* + * The following two are dummy clocks, placeholders + * used in the gmac_tx clock. The gmac driver will + * choose one parent depending on the PHY interface + * mode, using clk_set_rate auto-reparenting. + * + * The actual TX clock rate is not controlled by the + * gmac_tx clock. + */ + mii_phy_tx_clk: mii_phy_tx_clk { + #clock-cells = <0>; + compatible = "fixed-clock"; + clock-frequency = <25000000>; + clock-output-names = "mii_phy_tx"; + }; + + gmac_int_tx_clk: gmac_int_tx_clk { + #clock-cells = <0>; + compatible = "fixed-clock"; + clock-frequency = <125000000>; + clock-output-names = "gmac_int_tx"; + }; + + gmac_tx_clk: clk@800030 { + #clock-cells = <0>; + compatible = "allwinner,sun7i-a20-gmac-clk"; + reg = <0x00800030 0x4>; + clocks = <&mii_phy_tx_clk>, <&gmac_int_tx_clk>; + clock-output-names = "gmac_tx"; + }; + cpus_clk: clk@8001410 { compatible = "allwinner,sun9i-a80-cpus-clk"; reg = <0x08001410 0x4>; @@ -283,6 +318,23 @@ }; }; + gmac: ethernet@830000 { + compatible = "allwinner,sun7i-a20-gmac"; + reg = <0x00830000 0x1054>; + interrupts = ; + interrupt-names = "macirq"; + clocks = <&ccu CLK_BUS_GMAC>, <&gmac_tx_clk>; + clock-names = "stmmaceth", "allwinner_gmac_tx"; + resets = <&ccu RST_BUS_GMAC>; + reset-names = "stmmaceth"; + snps,pbl = <2>; + snps,fixed-burst; + snps,force_sf_dma_mode; + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; + }; + ehci0: usb@a00000 { compatible = "allwinner,sun9i-a80-ehci", "generic-ehci"; reg = <0x00a00000 0x100>; @@ -948,6 +1000,19 @@ #size-cells = <0>; #gpio-cells = <3>; + gmac_rgmii_pins: gmac-rgmii-pins { + allwinner,pins = "PA0", "PA1", "PA2", "PA3", + "PA4", "PA5", "PA7", "PA8", + "PA9", "PA10", "PA12", "PA13", + "PA15", "PA16", "PA17"; + allwinner,function = "gmac"; + /* + * data lines in RGMII mode use DDR mode + * and need a higher signal drive strength + */ + drive-strength = <40>; + }; + i2c3_pins: i2c3-pins { pins = "PG10", "PG11"; function = "i2c3"; diff --git a/arch/arm/boot/dts/sunxi-bananapi-m2-plus.dtsi b/arch/arm/boot/dts/sunxi-bananapi-m2-plus.dtsi index b3283aeb5b7d..3bed375b9c03 100644 --- a/arch/arm/boot/dts/sunxi-bananapi-m2-plus.dtsi +++ b/arch/arm/boot/dts/sunxi-bananapi-m2-plus.dtsi @@ -103,6 +103,8 @@ compatible = "mmc-pwrseq-simple"; pinctrl-names = "default"; reset-gpios = <&r_pio 0 7 GPIO_ACTIVE_LOW>; /* PL7 */ + clocks = <&rtc 1>; + clock-names = "ext_clock"; }; }; @@ -215,7 +217,19 @@ &uart1 { pinctrl-names = "default"; pinctrl-0 = <&uart1_pins>, <&uart1_rts_cts_pins>; - status = "okay"; + uart-has-rtscts; + status = "okay"; + + bluetooth { + compatible = "brcm,bcm43438-bt"; + clocks = <&rtc 1>; + clock-names = "lpo"; + vbat-supply = <®_vcc3v3>; + vddio-supply = <®_vcc3v3>; + device-wakeup-gpios = <&pio 6 13 GPIO_ACTIVE_HIGH>; /* PG13 */ + host-wakeup-gpios = <&pio 6 11 GPIO_ACTIVE_HIGH>; /* PG11 */ + shutdown-gpios = <&pio 6 12 GPIO_ACTIVE_HIGH>; /* PG12 */ + }; }; &usb_otg { diff --git a/arch/arm/boot/dts/sunxi-h3-h5.dtsi b/arch/arm/boot/dts/sunxi-h3-h5.dtsi index a4c757c0b741..d74a6cbbfdf4 100644 --- a/arch/arm/boot/dts/sunxi-h3-h5.dtsi +++ b/arch/arm/boot/dts/sunxi-h3-h5.dtsi @@ -740,8 +740,7 @@ }; csi: camera@1cb0000 { - compatible = "allwinner,sun8i-h3-csi", - "allwinner,sun6i-a31-csi"; + compatible = "allwinner,sun8i-h3-csi"; reg = <0x01cb0000 0x1000>; interrupts = ; clocks = <&ccu CLK_BUS_CSI>, diff --git a/arch/arm/boot/dts/tegra114-dalmore.dts b/arch/arm/boot/dts/tegra114-dalmore.dts index 1788556b4977..97a5c3504bbe 100644 --- a/arch/arm/boot/dts/tegra114-dalmore.dts +++ b/arch/arm/boot/dts/tegra114-dalmore.dts @@ -1087,7 +1087,7 @@ status = "okay"; spi-max-frequency = <25000000>; spi-flash@0 { - compatible = "winbond,w25q32dw"; + compatible = "winbond,w25q32dw", "jedec,spi-nor"; reg = <0>; spi-max-frequency = <20000000>; }; diff --git a/arch/arm/boot/dts/tegra124-jetson-tk1.dts b/arch/arm/boot/dts/tegra124-jetson-tk1.dts index 9151b3ebb839..33bbb1c5285d 100644 --- a/arch/arm/boot/dts/tegra124-jetson-tk1.dts +++ b/arch/arm/boot/dts/tegra124-jetson-tk1.dts @@ -1656,7 +1656,7 @@ status = "okay"; spi-max-frequency = <25000000>; spi-flash@0 { - compatible = "winbond,w25q32dw"; + compatible = "winbond,w25q32dw", "jedec,spi-nor"; reg = <0>; spi-max-frequency = <20000000>; }; diff --git a/arch/arm/boot/dts/tegra124-nyan.dtsi b/arch/arm/boot/dts/tegra124-nyan.dtsi index d5f11d6d987e..a1acd872bcf2 100644 --- a/arch/arm/boot/dts/tegra124-nyan.dtsi +++ b/arch/arm/boot/dts/tegra124-nyan.dtsi @@ -13,10 +13,25 @@ stdout-path = "serial0:115200n8"; }; - memory@80000000 { + /* + * Note that recent version of the device tree compiler (starting with + * version 1.4.2) warn about this node containing a reg property, but + * missing a unit-address. However, the bootloader on these Chromebook + * devices relies on the full name of this node to be exactly /memory. + * Adding the unit-address causes the bootloader to create a /memory + * node and write the memory bank configuration to that node, which in + * turn leads the kernel to believe that the device has 2 GiB of + * memory instead of the amount detected by the bootloader. + * + * The name of this node is effectively ABI and must not be changed. + */ + memory { + device_type = "memory"; reg = <0x0 0x80000000 0x0 0x80000000>; }; + /delete-node/ memory@80000000; + host1x@50000000 { hdmi@54280000 { status = "okay"; @@ -355,7 +370,7 @@ spi-max-frequency = <25000000>; flash@0 { - compatible = "winbond,w25q32dw"; + compatible = "winbond,w25q32dw", "jedec,spi-nor"; spi-max-frequency = <25000000>; reg = <0>; }; diff --git a/arch/arm/boot/dts/tegra124-venice2.dts b/arch/arm/boot/dts/tegra124-venice2.dts index 82d139648ef1..4882b61fb680 100644 --- a/arch/arm/boot/dts/tegra124-venice2.dts +++ b/arch/arm/boot/dts/tegra124-venice2.dts @@ -879,7 +879,7 @@ status = "okay"; spi-max-frequency = <25000000>; spi-flash@0 { - compatible = "winbond,w25q32dw"; + compatible = "winbond,w25q32dw", "jedec,spi-nor"; reg = <0>; spi-max-frequency = <20000000>; }; diff --git a/arch/arm/boot/dts/tegra20-trimslice.dts b/arch/arm/boot/dts/tegra20-trimslice.dts index 9eb26dc15f6b..3e5ac096d85e 100644 --- a/arch/arm/boot/dts/tegra20-trimslice.dts +++ b/arch/arm/boot/dts/tegra20-trimslice.dts @@ -287,7 +287,7 @@ status = "okay"; spi-max-frequency = <48000000>; spi-flash@0 { - compatible = "winbond,w25q80bl"; + compatible = "winbond,w25q80bl", "jedec,spi-nor"; reg = <0>; spi-max-frequency = <48000000>; }; diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi index dcad6d6128cf..8c942e60703e 100644 --- a/arch/arm/boot/dts/tegra20.dtsi +++ b/arch/arm/boot/dts/tegra20.dtsi @@ -616,17 +616,14 @@ }; mc: memory-controller@7000f000 { - compatible = "nvidia,tegra20-mc"; - reg = <0x7000f000 0x024 - 0x7000f03c 0x3c4>; + compatible = "nvidia,tegra20-mc-gart"; + reg = <0x7000f000 0x400 /* controller registers */ + 0x58000000 0x02000000>; /* GART aperture */ + clocks = <&tegra_car TEGRA20_CLK_MC>; + clock-names = "mc"; interrupts = ; #reset-cells = <1>; - }; - - iommu@7000f024 { - compatible = "nvidia,tegra20-gart"; - reg = <0x7000f024 0x00000018 /* controller registers */ - 0x58000000 0x02000000>; /* GART aperture */ + #iommu-cells = <0>; }; memory-controller@7000f400 { diff --git a/arch/arm/boot/dts/tegra30-beaver.dts b/arch/arm/boot/dts/tegra30-beaver.dts index b0d40ac8ac6e..a3b0f3555cd2 100644 --- a/arch/arm/boot/dts/tegra30-beaver.dts +++ b/arch/arm/boot/dts/tegra30-beaver.dts @@ -1886,7 +1886,7 @@ status = "okay"; spi-max-frequency = <25000000>; spi-flash@1 { - compatible = "winbond,w25q32"; + compatible = "winbond,w25q32", "jedec,spi-nor"; reg = <1>; spi-max-frequency = <20000000>; }; diff --git a/arch/arm/boot/dts/tegra30-cardhu.dtsi b/arch/arm/boot/dts/tegra30-cardhu.dtsi index fb9222b479d2..7ce61edd52f5 100644 --- a/arch/arm/boot/dts/tegra30-cardhu.dtsi +++ b/arch/arm/boot/dts/tegra30-cardhu.dtsi @@ -360,7 +360,7 @@ status = "okay"; spi-max-frequency = <25000000>; spi-flash@1 { - compatible = "winbond,w25q32"; + compatible = "winbond,w25q32", "jedec,spi-nor"; reg = <1>; spi-max-frequency = <20000000>; }; diff --git a/arch/arm/boot/dts/versatile-ab.dts b/arch/arm/boot/dts/versatile-ab.dts index 6f4f60ba5429..269e6bf99ccb 100644 --- a/arch/arm/boot/dts/versatile-ab.dts +++ b/arch/arm/boot/dts/versatile-ab.dts @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; -/include/ "skeleton.dtsi" / { model = "ARM Versatile AB"; @@ -21,6 +20,7 @@ }; memory { + device_type = "memory"; reg = <0x0 0x08000000>; }; diff --git a/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi b/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi index a9569d15de41..d3963e9eaf48 100644 --- a/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi +++ b/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi @@ -133,7 +133,7 @@ mmci@50000 { compatible = "arm,pl180", "arm,primecell"; reg = <0x050000 0x1000>; - interrupts = <9 10>; + interrupts = <9>, <10>; cd-gpios = <&v2m_mmc_gpios 0 0>; wp-gpios = <&v2m_mmc_gpios 1 0>; max-frequency = <12000000>; diff --git a/arch/arm/boot/dts/vexpress-v2m.dtsi b/arch/arm/boot/dts/vexpress-v2m.dtsi index fd42e1194179..798c97aff7fa 100644 --- a/arch/arm/boot/dts/vexpress-v2m.dtsi +++ b/arch/arm/boot/dts/vexpress-v2m.dtsi @@ -133,7 +133,7 @@ mmci@5000 { compatible = "arm,pl180", "arm,primecell"; reg = <0x05000 0x1000>; - interrupts = <9 10>; + interrupts = <9>, <10>; cd-gpios = <&v2m_mmc_gpios 0 0>; wp-gpios = <&v2m_mmc_gpios 1 0>; max-frequency = <12000000>; diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts index a2ccacd07f4f..00cd9f5bef2e 100644 --- a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts +++ b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts @@ -42,6 +42,7 @@ cci-control-port = <&cci_control1>; cpu-idle-states = <&CLUSTER_SLEEP_BIG>; capacity-dmips-mhz = <1024>; + dynamic-power-coefficient = <990>; }; cpu1: cpu@1 { @@ -51,6 +52,7 @@ cci-control-port = <&cci_control1>; cpu-idle-states = <&CLUSTER_SLEEP_BIG>; capacity-dmips-mhz = <1024>; + dynamic-power-coefficient = <990>; }; cpu2: cpu@2 { @@ -60,6 +62,7 @@ cci-control-port = <&cci_control2>; cpu-idle-states = <&CLUSTER_SLEEP_LITTLE>; capacity-dmips-mhz = <516>; + dynamic-power-coefficient = <133>; }; cpu3: cpu@3 { @@ -69,6 +72,7 @@ cci-control-port = <&cci_control2>; cpu-idle-states = <&CLUSTER_SLEEP_LITTLE>; capacity-dmips-mhz = <516>; + dynamic-power-coefficient = <133>; }; cpu4: cpu@4 { @@ -78,6 +82,7 @@ cci-control-port = <&cci_control2>; cpu-idle-states = <&CLUSTER_SLEEP_LITTLE>; capacity-dmips-mhz = <516>; + dynamic-power-coefficient = <133>; }; idle-states { diff --git a/arch/arm/boot/dts/vf610-bk4.dts b/arch/arm/boot/dts/vf610-bk4.dts index 689c8930dce3..3fa0cbe456db 100644 --- a/arch/arm/boot/dts/vf610-bk4.dts +++ b/arch/arm/boot/dts/vf610-bk4.dts @@ -60,6 +60,29 @@ regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; }; + + spi-gpio { + compatible = "spi-gpio"; + pinctrl-0 = <&pinctrl_gpio_spi>; + pinctrl-names = "default"; + #address-cells = <1>; + #size-cells = <0>; + /* PTD12 ->RPIO[91] */ + sck-gpios = <&gpio2 27 GPIO_ACTIVE_LOW>; + /* PTD10 ->RPIO[89] */ + miso-gpios = <&gpio2 25 GPIO_ACTIVE_HIGH>; + num-chipselects = <0>; + + gpio@0 { + compatible = "pisosr-gpio"; + reg = <0>; + gpio-controller; + #gpio-cells = <2>; + /* PTB18 -> RGPIO[40] */ + load-gpios = <&gpio1 8 GPIO_ACTIVE_LOW>; + spi-max-frequency = <100000>; + }; + }; }; &adc0 { @@ -110,11 +133,11 @@ bus-num = <3>; status = "okay"; spi-slave; + #address-cells = <0>; - slave@0 { + slave { compatible = "lwn,bk4"; spi-max-frequency = <30000000>; - reg = <0>; }; }; @@ -431,6 +454,14 @@ >; }; + pinctrl_gpio_spi: pinctrl-gpio-spi { + fsl,pins = < + VF610_PAD_PTB18__GPIO_40 0x1183 + VF610_PAD_PTD10__GPIO_89 0x1183 + VF610_PAD_PTD12__GPIO_91 0x1183 + >; + }; + pinctrl_i2c2: i2c2grp { fsl,pins = < VF610_PAD_PTA22__I2C2_SCL 0x34df diff --git a/arch/arm/boot/dts/vf610-zii-cfu1.dts b/arch/arm/boot/dts/vf610-zii-cfu1.dts index 7cdcc5fe8282..445c7dc306b2 100644 --- a/arch/arm/boot/dts/vf610-zii-cfu1.dts +++ b/arch/arm/boot/dts/vf610-zii-cfu1.dts @@ -207,7 +207,7 @@ }; &i2c0 { - clock-frequency = <100000>; + clock-frequency = <400000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c0>; status = "okay"; diff --git a/arch/arm/boot/dts/vf610-zii-ssmb-dtu.dts b/arch/arm/boot/dts/vf610-zii-ssmb-dtu.dts new file mode 100644 index 000000000000..2b10672fadbd --- /dev/null +++ b/arch/arm/boot/dts/vf610-zii-ssmb-dtu.dts @@ -0,0 +1,311 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) + +/* + * Device tree file for ZII's SSMB DTU board + * + * SSMB - SPU3 Switch Management Board + * DTU - Digital Tapping Unit + * + * Copyright (C) 2015-2019 Zodiac Inflight Innovations + * + * Based on an original 'vf610-twr.dts' which is Copyright 2015, + * Freescale Semiconductor, Inc. + */ + +/dts-v1/; +#include "vf610.dtsi" + +/ { + model = "ZII VF610 SSMB DTU Board"; + compatible = "zii,vf610dtu", "zii,vf610dev", "fsl,vf610"; + + chosen { + stdout-path = &uart0; + }; + + memory@80000000 { + device_type = "memory"; + reg = <0x80000000 0x20000000>; + }; + + gpio-leds { + compatible = "gpio-leds"; + pinctrl-0 = <&pinctrl_leds_debug>; + pinctrl-names = "default"; + + led-debug { + label = "zii:green:debug1"; + gpios = <&gpio2 18 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "heartbeat"; + max-brightness = <1>; + }; + }; + + reg_vcc_3v3_mcu: regulator { + compatible = "regulator-fixed"; + regulator-name = "vcc_3v3_mcu"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + }; +}; + +&adc0 { + vref-supply = <®_vcc_3v3_mcu>; + status = "okay"; +}; + +&adc1 { + vref-supply = <®_vcc_3v3_mcu>; + status = "okay"; +}; + +&edma0 { + status = "okay"; +}; + +&edma1 { + status = "okay"; +}; + +&esdhc0 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_esdhc0>; + bus-width = <8>; + non-removable; + no-1-8-v; + keep-power-in-suspend; + status = "okay"; +}; + +&esdhc1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_esdhc1>; + bus-width = <4>; + status = "okay"; +}; + +&fec1 { + phy-mode = "rmii"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_fec1>; + status = "okay"; + + fixed-link { + speed = <100>; + full-duplex; + }; + + mdio1: mdio { + #address-cells = <1>; + #size-cells = <0>; + status = "okay"; + + switch0: switch0@0 { + compatible = "marvell,mv88e6190"; + pinctrl-0 = <&pinctrl_gpio_switch0>; + pinctrl-names = "default"; + reg = <0>; + eeprom-length = <65536>; + reset-gpios = <&gpio3 11 GPIO_ACTIVE_LOW>; + interrupt-parent = <&gpio3>; + interrupts = <2 IRQ_TYPE_LEVEL_LOW>; + interrupt-controller; + #interrupt-cells = <2>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + label = "cpu"; + ethernet = <&fec1>; + + fixed-link { + speed = <100>; + full-duplex; + }; + }; + + port@1 { + reg = <1>; + label = "eth_cu_100_3"; + }; + + port@5 { + reg = <5>; + label = "eth_cu_1000_4"; + }; + + port@6 { + reg = <6>; + label = "eth_cu_1000_5"; + }; + + port@8 { + reg = <8>; + label = "eth_cu_1000_1"; + }; + + port@9 { + reg = <9>; + label = "eth_cu_1000_2"; + phy-handle = <&phy9>; + phy-mode = "sgmii"; + managed = "in-band-status"; + }; + }; + + mdio1 { + compatible = "marvell,mv88e6xxx-mdio-external"; + #address-cells = <1>; + #size-cells = <0>; + + phy9: phy9@0 { + compatible = "ethernet-phy-ieee802.3-c45"; + pinctrl-0 = <&pinctrl_gpio_phy9>; + pinctrl-names = "default"; + interrupt-parent = <&gpio2>; + interrupts = <30 IRQ_TYPE_LEVEL_LOW>; + reg = <0>; + }; + }; + }; + }; +}; + +&i2c0 { + clock-frequency = <100000>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_i2c0>; + status = "okay"; + + gpio6: gpio-expander@22 { + compatible = "nxp,pca9554"; + reg = <0x22>; + gpio-controller; + #gpio-cells = <2>; + }; + + /* On SSMB */ + temperature-sensor@48 { + compatible = "national,lm75"; + reg = <0x48>; + }; + + /* On DSB */ + temperature-sensor@4d { + compatible = "national,lm75"; + reg = <0x4d>; + }; + + eeprom@50 { + compatible = "atmel,24c04"; + reg = <0x50>; + label = "nameplate"; + }; + + eeprom@52 { + compatible = "atmel,24c04"; + reg = <0x52>; + }; +}; + +&uart0 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart0>; + status = "okay"; +}; + +&iomuxc { + pinctrl_dspi1: dspi1grp { + fsl,pins = < + VF610_PAD_PTD5__DSPI1_CS0 0x1182 + VF610_PAD_PTD4__DSPI1_CS1 0x1182 + VF610_PAD_PTC6__DSPI1_SIN 0x1181 + VF610_PAD_PTC7__DSPI1_SOUT 0x1182 + VF610_PAD_PTC8__DSPI1_SCK 0x1182 + >; + }; + + pinctrl_esdhc0: esdhc0grp { + fsl,pins = < + VF610_PAD_PTC0__ESDHC0_CLK 0x31ef + VF610_PAD_PTC1__ESDHC0_CMD 0x31ef + VF610_PAD_PTC2__ESDHC0_DAT0 0x31ef + VF610_PAD_PTC3__ESDHC0_DAT1 0x31ef + VF610_PAD_PTC4__ESDHC0_DAT2 0x31ef + VF610_PAD_PTC5__ESDHC0_DAT3 0x31ef + VF610_PAD_PTD23__ESDHC0_DAT4 0x31ef + VF610_PAD_PTD22__ESDHC0_DAT5 0x31ef + VF610_PAD_PTD21__ESDHC0_DAT6 0x31ef + VF610_PAD_PTD20__ESDHC0_DAT7 0x31ef + >; + }; + + pinctrl_esdhc1: esdhc1grp { + fsl,pins = < + VF610_PAD_PTA24__ESDHC1_CLK 0x31ef + VF610_PAD_PTA25__ESDHC1_CMD 0x31ef + VF610_PAD_PTA26__ESDHC1_DAT0 0x31ef + VF610_PAD_PTA27__ESDHC1_DAT1 0x31ef + VF610_PAD_PTA28__ESDHC1_DATA2 0x31ef + VF610_PAD_PTA29__ESDHC1_DAT3 0x31ef + >; + }; + + pinctrl_fec1: fec1grp { + fsl,pins = < + VF610_PAD_PTA6__RMII_CLKIN 0x30d1 + VF610_PAD_PTC9__ENET_RMII1_MDC 0x30d2 + VF610_PAD_PTC10__ENET_RMII1_MDIO 0x30d3 + VF610_PAD_PTC11__ENET_RMII1_CRS 0x30d1 + VF610_PAD_PTC12__ENET_RMII1_RXD1 0x30d1 + VF610_PAD_PTC13__ENET_RMII1_RXD0 0x30d1 + VF610_PAD_PTC14__ENET_RMII1_RXER 0x30d1 + VF610_PAD_PTC15__ENET_RMII1_TXD1 0x30d2 + VF610_PAD_PTC16__ENET_RMII1_TXD0 0x30d2 + VF610_PAD_PTC17__ENET_RMII1_TXEN 0x30d2 + >; + }; + + pinctrl_gpio_phy9: pinctrl-gpio-phy9 { + fsl,pins = < + VF610_PAD_PTB24__GPIO_94 0x219d + >; + }; + + pinctrl_gpio_switch0: pinctrl-gpio-switch0 { + fsl,pins = < + VF610_PAD_PTE2__GPIO_107 0x31c2 + VF610_PAD_PTB28__GPIO_98 0x219d + >; + }; + + pinctrl_i2c0: i2c0grp { + fsl,pins = < + VF610_PAD_PTB14__I2C0_SCL 0x37ff + VF610_PAD_PTB15__I2C0_SDA 0x37ff + >; + }; + + pinctrl_i2c1: i2c1grp { + fsl,pins = < + VF610_PAD_PTB16__I2C1_SCL 0x37ff + VF610_PAD_PTB17__I2C1_SDA 0x37ff + >; + }; + + pinctrl_leds_debug: pinctrl-leds-debug { + fsl,pins = < + VF610_PAD_PTD3__GPIO_82 0x31c2 + >; + }; + + pinctrl_uart0: uart0grp { + fsl,pins = < + VF610_PAD_PTB10__UART0_TX 0x21a2 + VF610_PAD_PTB11__UART0_RX 0x21a1 + >; + }; +}; diff --git a/arch/arm/boot/dts/vf610-zii-ssmb-spu3.dts b/arch/arm/boot/dts/vf610-zii-ssmb-spu3.dts index 757af56e8ee7..0d9fe5ac83a3 100644 --- a/arch/arm/boot/dts/vf610-zii-ssmb-spu3.dts +++ b/arch/arm/boot/dts/vf610-zii-ssmb-spu3.dts @@ -99,6 +99,8 @@ non-removable; no-1-8-v; keep-power-in-suspend; + no-sdio; + no-sd; status = "okay"; }; @@ -106,6 +108,7 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_esdhc1>; bus-width = <4>; + no-sdio; status = "okay"; }; diff --git a/arch/arm/boot/dts/vt8500.dtsi b/arch/arm/boot/dts/vt8500.dtsi index 1929ad390d88..8b5af039b072 100644 --- a/arch/arm/boot/dts/vt8500.dtsi +++ b/arch/arm/boot/dts/vt8500.dtsi @@ -6,9 +6,9 @@ * Licensed under GPLv2 or later */ -/include/ "skeleton.dtsi" - / { + #address-cells = <1>; + #size-cells = <1>; compatible = "via,vt8500"; cpus { @@ -21,6 +21,11 @@ }; }; + memory { + device_type = "memory"; + reg = <0x0 0x0>; + }; + aliases { serial0 = &uart0; serial1 = &uart1; diff --git a/arch/arm/boot/dts/wm8505.dtsi b/arch/arm/boot/dts/wm8505.dtsi index e9ef539e13d3..cca6747304c4 100644 --- a/arch/arm/boot/dts/wm8505.dtsi +++ b/arch/arm/boot/dts/wm8505.dtsi @@ -6,9 +6,9 @@ * Licensed under GPLv2 or later */ -/include/ "skeleton.dtsi" - / { + #address-cells = <1>; + #size-cells = <1>; compatible = "wm,wm8505"; cpus { @@ -21,6 +21,11 @@ }; }; + memory { + device_type = "memory"; + reg = <0x0 0x0>; + }; + aliases { serial0 = &uart0; serial1 = &uart1; diff --git a/arch/arm/boot/dts/wm8650.dtsi b/arch/arm/boot/dts/wm8650.dtsi index e12213d16693..00d01769a68f 100644 --- a/arch/arm/boot/dts/wm8650.dtsi +++ b/arch/arm/boot/dts/wm8650.dtsi @@ -6,9 +6,9 @@ * Licensed under GPLv2 or later */ -/include/ "skeleton.dtsi" - / { + #address-cells = <1>; + #size-cells = <1>; compatible = "wm,wm8650"; cpus { @@ -21,6 +21,11 @@ }; }; + memory { + device_type = "memory"; + reg = <0x0 0x0>; + }; + aliases { serial0 = &uart0; serial1 = &uart1; diff --git a/arch/arm/boot/dts/wm8750.dtsi b/arch/arm/boot/dts/wm8750.dtsi index 46d076d7302b..54d8f7d9bb33 100644 --- a/arch/arm/boot/dts/wm8750.dtsi +++ b/arch/arm/boot/dts/wm8750.dtsi @@ -6,9 +6,9 @@ * Licensed under GPLv2 or later */ -/include/ "skeleton.dtsi" - / { + #address-cells = <1>; + #size-cells = <1>; compatible = "wm,wm8750"; cpus { @@ -21,6 +21,11 @@ }; }; + memory { + device_type = "memory"; + reg = <0x0 0x0>; + }; + aliases { serial0 = &uart0; serial1 = &uart1; diff --git a/arch/arm/boot/dts/wm8850.dtsi b/arch/arm/boot/dts/wm8850.dtsi index 8fbccfbe75f3..c572d777077f 100644 --- a/arch/arm/boot/dts/wm8850.dtsi +++ b/arch/arm/boot/dts/wm8850.dtsi @@ -6,9 +6,9 @@ * Licensed under GPLv2 or later */ -/include/ "skeleton.dtsi" - / { + #address-cells = <1>; + #size-cells = <1>; compatible = "wm,wm8850"; cpus { @@ -22,6 +22,11 @@ }; }; + memory { + device_type = "memory"; + reg = <0x0 0x0>; + }; + aliases { serial0 = &uart0; serial1 = &uart1; diff --git a/arch/arm/boot/dts/zx296702-ad1.dts b/arch/arm/boot/dts/zx296702-ad1.dts index eedd3fcbc002..bd9400840023 100644 --- a/arch/arm/boot/dts/zx296702-ad1.dts +++ b/arch/arm/boot/dts/zx296702-ad1.dts @@ -14,6 +14,7 @@ }; memory { + device_type = "memory"; reg = <0x50000000 0x20000000>; }; }; diff --git a/arch/arm/boot/dts/zx296702.dtsi b/arch/arm/boot/dts/zx296702.dtsi index 240e7a23d81f..afd98de029be 100644 --- a/arch/arm/boot/dts/zx296702.dtsi +++ b/arch/arm/boot/dts/zx296702.dtsi @@ -1,10 +1,12 @@ // SPDX-License-Identifier: GPL-2.0 -#include "skeleton.dtsi" #include #include / { + #address-cells = <1>; + #size-cells = <1>; + cpus { #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm/boot/dts/zynq-zturn.dts b/arch/arm/boot/dts/zynq-zturn.dts index b38704657960..5ec616ebca08 100644 --- a/arch/arm/boot/dts/zynq-zturn.dts +++ b/arch/arm/boot/dts/zynq-zturn.dts @@ -54,7 +54,7 @@ label = "K1"; gpios = <&gpio0 0x32 0x1>; linux,code = <0x66>; - gpio-key,wakeup; + wakeup-source; autorepeat; }; }; diff --git a/arch/arm/configs/axm55xx_defconfig b/arch/arm/configs/axm55xx_defconfig index 8e17e7ed1f02..53864316bee1 100644 --- a/arch/arm/configs/axm55xx_defconfig +++ b/arch/arm/configs/axm55xx_defconfig @@ -155,10 +155,6 @@ CONFIG_PMBUS=y CONFIG_SENSORS_LTC2978=y CONFIG_WATCHDOG=y CONFIG_ARM_SP805_WATCHDOG=y -CONFIG_FB=y -CONFIG_FB_ARMCLCD=y -CONFIG_FRAMEBUFFER_CONSOLE=y -CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set CONFIG_HID_A4TECH=y diff --git a/arch/arm/configs/bcm2835_defconfig b/arch/arm/configs/bcm2835_defconfig index bb6a35fb1dd7..dcf7610cfe55 100644 --- a/arch/arm/configs/bcm2835_defconfig +++ b/arch/arm/configs/bcm2835_defconfig @@ -91,6 +91,8 @@ CONFIG_THERMAL=y CONFIG_BCM2835_THERMAL=y CONFIG_WATCHDOG=y CONFIG_BCM2835_WDT=y +CONFIG_MEDIA_SUPPORT=y +CONFIG_MEDIA_CAMERA_SUPPORT=y CONFIG_DRM=y CONFIG_DRM_VC4=y CONFIG_FB_SIMPLE=y @@ -129,6 +131,7 @@ CONFIG_DMADEVICES=y CONFIG_DMA_BCM2835=y CONFIG_STAGING=y CONFIG_SND_BCM2835=m +CONFIG_VIDEO_BCM2835=m CONFIG_MAILBOX=y CONFIG_BCM2835_MBOX=y # CONFIG_IOMMU_SUPPORT is not set @@ -158,7 +161,6 @@ CONFIG_PRINTK_TIME=y CONFIG_BOOT_PRINTK_DELAY=y CONFIG_DYNAMIC_DEBUG=y CONFIG_DEBUG_INFO=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_UNUSED_SYMBOLS=y CONFIG_DEBUG_MEMORY_INIT=y diff --git a/arch/arm/configs/cns3420vb_defconfig b/arch/arm/configs/cns3420vb_defconfig index c6dcd6e4f4e6..419b73564f29 100644 --- a/arch/arm/configs/cns3420vb_defconfig +++ b/arch/arm/configs/cns3420vb_defconfig @@ -60,7 +60,6 @@ CONFIG_EXT2_FS_XATTR=y CONFIG_AUTOFS4_FS=y CONFIG_FSCACHE=y CONFIG_TMPFS=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_DEBUG_FS=y # CONFIG_ARM_UNWIND is not set diff --git a/arch/arm/configs/efm32_defconfig b/arch/arm/configs/efm32_defconfig index 860d27138e6f..ee42158f41ec 100644 --- a/arch/arm/configs/efm32_defconfig +++ b/arch/arm/configs/efm32_defconfig @@ -94,7 +94,6 @@ CONFIG_ROMFS_BACKED_BY_MTD=y # CONFIG_NETWORK_FILESYSTEMS is not set CONFIG_PRINTK_TIME=y CONFIG_DEBUG_INFO=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_MAGIC_SYSRQ=y # CONFIG_SCHED_DEBUG is not set diff --git a/arch/arm/configs/eseries_pxa_defconfig b/arch/arm/configs/eseries_pxa_defconfig index cd27d651463c..eabb784cf7da 100644 --- a/arch/arm/configs/eseries_pxa_defconfig +++ b/arch/arm/configs/eseries_pxa_defconfig @@ -103,7 +103,6 @@ CONFIG_NFS_V3=y CONFIG_PARTITION_ADVANCED=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_CRYPTO_CBC=m CONFIG_CRYPTO_PCBC=m diff --git a/arch/arm/configs/gemini_defconfig b/arch/arm/configs/gemini_defconfig index 553777ac2814..ef9aae89907d 100644 --- a/arch/arm/configs/gemini_defconfig +++ b/arch/arm/configs/gemini_defconfig @@ -87,6 +87,5 @@ CONFIG_TMPFS_POSIX_ACL=y CONFIG_ROMFS_FS=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_DEBUG_FS=y diff --git a/arch/arm/configs/integrator_defconfig b/arch/arm/configs/integrator_defconfig index 69cb8f1efcea..747550c7af2f 100644 --- a/arch/arm/configs/integrator_defconfig +++ b/arch/arm/configs/integrator_defconfig @@ -27,6 +27,7 @@ CONFIG_CPU_FREQ_GOV_POWERSAVE=y CONFIG_CPU_FREQ_GOV_USERSPACE=y CONFIG_CPU_FREQ_GOV_ONDEMAND=y CONFIG_CPUFREQ_DT=y +CONFIG_CMA=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -53,13 +54,17 @@ CONFIG_E100=y CONFIG_SMC91X=y # CONFIG_KEYBOARD_ATKBD is not set # CONFIG_SERIO_SERPORT is not set -CONFIG_FB=y +CONFIG_DRM=y +CONFIG_DRM_DUMB_VGA_DAC=y +CONFIG_DRM_PL111=y CONFIG_FB_MODE_HELPERS=y -CONFIG_FB_ARMCLCD=y CONFIG_FB_MATROX=y CONFIG_FB_MATROX_MILLENIUM=y CONFIG_FB_MATROX_MYSTIQUE=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_BACKLIGHT_CLASS_DEVICE=y # CONFIG_VGA_CONSOLE is not set +CONFIG_LOGO=y CONFIG_MMC=y CONFIG_MMC_ARMMMCI=y CONFIG_NEW_LEDS=y diff --git a/arch/arm/configs/lpc18xx_defconfig b/arch/arm/configs/lpc18xx_defconfig index 23df2518203d..e3d5e15d66d1 100644 --- a/arch/arm/configs/lpc18xx_defconfig +++ b/arch/arm/configs/lpc18xx_defconfig @@ -1,5 +1,6 @@ CONFIG_CROSS_COMPILE="arm-linux-gnueabihf-" CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT=y CONFIG_BLK_DEV_INITRD=y # CONFIG_RD_BZIP2 is not set # CONFIG_RD_LZMA is not set @@ -17,22 +18,20 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_EMBEDDED=y # CONFIG_VM_EVENT_COUNTERS is not set # CONFIG_SLUB_DEBUG is not set -# CONFIG_LBDAF is not set -# CONFIG_BLK_DEV_BSG is not set -# CONFIG_IOSCHED_DEADLINE is not set -# CONFIG_IOSCHED_CFQ is not set # CONFIG_MMU is not set -CONFIG_ARM_SINGLE_ARMV7M=y CONFIG_ARCH_LPC18XX=y CONFIG_SET_MEM_PARAM=y CONFIG_DRAM_BASE=0x28000000 CONFIG_DRAM_SIZE=0x02000000 CONFIG_FLASH_MEM_BASE=0x1b000000 CONFIG_FLASH_SIZE=0x00080000 -CONFIG_PREEMPT=y CONFIG_ZBOOT_ROM_TEXT=0x0 CONFIG_ZBOOT_ROM_BSS=0x0 CONFIG_ARM_APPENDED_DTB=y +# CONFIG_LBDAF is not set +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_IOSCHED_DEADLINE is not set +# CONFIG_IOSCHED_CFQ is not set CONFIG_BINFMT_FLAT=y CONFIG_BINFMT_ZFLAT=y CONFIG_BINFMT_SHARED_FLAT=y @@ -69,7 +68,6 @@ CONFIG_BLK_DEV_SD=y # CONFIG_SCSI_LOWLEVEL is not set CONFIG_NETDEVICES=y # CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_CIRRUS is not set # CONFIG_NET_VENDOR_FARADAY is not set @@ -90,7 +88,6 @@ CONFIG_STMMAC_ETH=y CONFIG_SMSC_PHY=y # CONFIG_USB_NET_DRIVERS is not set # CONFIG_WLAN is not set -# CONFIG_INPUT_MOUSEDEV is not set CONFIG_INPUT_EVDEV=y # CONFIG_KEYBOARD_ATKBD is not set CONFIG_KEYBOARD_GPIO=y @@ -101,13 +98,11 @@ CONFIG_KEYBOARD_GPIO_POLLED=y # CONFIG_UNIX98_PTYS is not set # CONFIG_LEGACY_PTYS is not set CONFIG_SERIAL_NONSTANDARD=y -# CONFIG_DEVKMEM is not set CONFIG_SERIAL_8250=y # CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_OF_PLATFORM=y # CONFIG_HW_RANDOM is not set -CONFIG_I2C=y CONFIG_I2C_LPC2K=y CONFIG_SPI=y CONFIG_SPI_PL022=y @@ -121,8 +116,10 @@ CONFIG_WATCHDOG=y CONFIG_LPC18XX_WATCHDOG=y CONFIG_REGULATOR=y CONFIG_REGULATOR_FIXED_VOLTAGE=y -CONFIG_FB=y -CONFIG_FB_ARMCLCD=y +CONFIG_DRM=y +CONFIG_DRM_PL111=y +CONFIG_FB_MODE_HELPERS=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y CONFIG_USB=y CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_ROOT_HUB_TT=y @@ -144,15 +141,14 @@ CONFIG_AMBA_PL08X=y CONFIG_LPC18XX_DMAMUX=y CONFIG_MEMORY=y CONFIG_ARM_PL172_MPMC=y -CONFIG_PWM=y -CONFIG_PWM_LPC18XX_SCT=y CONFIG_IIO=y CONFIG_MMA7455_I2C=y CONFIG_LPC18XX_ADC=y CONFIG_LPC18XX_DAC=y CONFIG_IIO_SYSFS_TRIGGER=y +CONFIG_PWM=y +CONFIG_PWM_LPC18XX_SCT=y CONFIG_PHY_LPC18XX_USB_OTG=y -CONFIG_NVMEM=y CONFIG_NVMEM_LPC18XX_EEPROM=y CONFIG_EXT2_FS=y # CONFIG_FILE_LOCKING is not set @@ -160,9 +156,10 @@ CONFIG_EXT2_FS=y # CONFIG_INOTIFY_USER is not set CONFIG_JFFS2_FS=y # CONFIG_NETWORK_FILESYSTEMS is not set +CONFIG_CRC_ITU_T=y +CONFIG_CRC7=y CONFIG_PRINTK_TIME=y CONFIG_DEBUG_INFO=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_DEBUG_FS=y CONFIG_MAGIC_SYSRQ=y @@ -170,5 +167,3 @@ CONFIG_MAGIC_SYSRQ=y # CONFIG_DEBUG_BUGVERBOSE is not set CONFIG_DEBUG_LL=y CONFIG_EARLY_PRINTK=y -CONFIG_CRC_ITU_T=y -CONFIG_CRC7=y diff --git a/arch/arm/configs/lpc32xx_defconfig b/arch/arm/configs/lpc32xx_defconfig index 0b54b4024e51..e752fb704df0 100644 --- a/arch/arm/configs/lpc32xx_defconfig +++ b/arch/arm/configs/lpc32xx_defconfig @@ -1,6 +1,7 @@ CONFIG_SYSVIPC=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=16 @@ -11,13 +12,7 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_SYSCTL_SYSCALL=y CONFIG_EMBEDDED=y CONFIG_SLAB=y -CONFIG_JUMP_LABEL=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -# CONFIG_BLK_DEV_BSG is not set -CONFIG_PARTITION_ADVANCED=y CONFIG_ARCH_LPC32XX=y -CONFIG_PREEMPT=y CONFIG_AEABI=y CONFIG_ZBOOT_ROM_TEXT=0x0 CONFIG_ZBOOT_ROM_BSS=0x0 @@ -26,6 +21,11 @@ CONFIG_ARM_ATAG_DTB_COMPAT=y CONFIG_CMDLINE="console=ttyS0,115200n81 root=/dev/ram0" CONFIG_CPU_IDLE=y CONFIG_VFP=y +CONFIG_JUMP_LABEL=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_BLK_DEV_BSG is not set +CONFIG_PARTITION_ADVANCED=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_NET=y CONFIG_PACKET=y @@ -56,6 +56,7 @@ CONFIG_BLK_DEV_CRYPTOLOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=1 CONFIG_BLK_DEV_RAM_SIZE=16384 +CONFIG_SRAM=y CONFIG_EEPROM_AT24=y CONFIG_EEPROM_AT25=y CONFIG_SCSI=y @@ -75,9 +76,6 @@ CONFIG_LPC_ENET=y # CONFIG_NET_VENDOR_STMICRO is not set CONFIG_SMSC_PHY=y # CONFIG_WLAN is not set -# CONFIG_INPUT_MOUSEDEV_PSAUX is not set -CONFIG_INPUT_MOUSEDEV_SCREEN_X=240 -CONFIG_INPUT_MOUSEDEV_SCREEN_Y=320 CONFIG_INPUT_EVDEV=y # CONFIG_KEYBOARD_ATKBD is not set CONFIG_KEYBOARD_GPIO=y @@ -92,48 +90,39 @@ CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_OF_PLATFORM=y CONFIG_SERIAL_HS_LPC32XX=y +CONFIG_SERIAL_HS_LPC32XX_CONSOLE=y # CONFIG_HW_RANDOM is not set -CONFIG_I2C=y CONFIG_I2C_CHARDEV=y CONFIG_I2C_PNX=y CONFIG_SPI=y CONFIG_SPI_PL022=y CONFIG_GPIO_SYSFS=y -CONFIG_GPIO_EM=y CONFIG_GPIO_GENERIC_PLATFORM=y -CONFIG_GPIO_PL061=y -CONFIG_GPIO_ADP5588=y -CONFIG_GPIO_ADNP=y -CONFIG_GPIO_MAX7300=y -CONFIG_GPIO_MAX732X=y CONFIG_GPIO_PCA953X=y CONFIG_GPIO_PCF857X=y -CONFIG_GPIO_74X164=y -CONFIG_GPIO_MAX7301=y -CONFIG_GPIO_MC33880=y -CONFIG_PINCTRL_MCP23S08=y -CONFIG_PINCTRL_SX150X=y CONFIG_SENSORS_DS620=y CONFIG_SENSORS_MAX6639=y CONFIG_WATCHDOG=y CONFIG_PNX4008_WATCHDOG=y -CONFIG_FB=y -CONFIG_FB_ARMCLCD=y +CONFIG_REGULATOR=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y +CONFIG_DRM=y +CONFIG_DRM_PANEL_SIMPLE=y +CONFIG_DRM_PL111=y +CONFIG_FB_MODE_HELPERS=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_BACKLIGHT_CLASS_DEVICE=y CONFIG_FRAMEBUFFER_CONSOLE=y -CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set CONFIG_SOUND=y CONFIG_SND=y -CONFIG_SND_SEQUENCER=y -CONFIG_SND_MIXER_OSS=y -CONFIG_SND_PCM_OSS=y -CONFIG_SND_SEQUENCER_OSS=y # CONFIG_SND_SUPPORT_OLD_API is not set # CONFIG_SND_VERBOSE_PROCFS is not set CONFIG_SND_DEBUG=y CONFIG_SND_DEBUG_VERBOSE=y +CONFIG_SND_SEQUENCER=y # CONFIG_SND_DRIVERS is not set # CONFIG_SND_ARM is not set # CONFIG_SND_SPI is not set @@ -146,7 +135,6 @@ CONFIG_USB_LPC32XX=y CONFIG_USB_MASS_STORAGE=m CONFIG_USB_G_SERIAL=m CONFIG_MMC=y -# CONFIG_MMC_BLOCK_BOUNCE is not set CONFIG_MMC_ARMMMCI=y CONFIG_MMC_SPI=y CONFIG_NEW_LEDS=y @@ -169,10 +157,10 @@ CONFIG_RTC_DRV_LPC32XX=y CONFIG_DMADEVICES=y CONFIG_AMBA_PL08X=y CONFIG_STAGING=y -CONFIG_LPC32XX_ADC=y CONFIG_MEMORY=y CONFIG_ARM_PL172_MPMC=y CONFIG_IIO=y +CONFIG_LPC32XX_ADC=y CONFIG_MAX517=y CONFIG_PWM=y CONFIG_PWM_LPC32XX=y @@ -186,18 +174,27 @@ CONFIG_JFFS2_FS_WBUF_VERIFY=y CONFIG_UBIFS_FS=y CONFIG_CRAMFS=y CONFIG_NFS_FS=y +CONFIG_NFS_V4=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y CONFIG_ROOT_NFS=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ASCII=y CONFIG_NLS_ISO8859_1=y CONFIG_NLS_UTF8=y +CONFIG_CRYPTO_ANSI_CPRNG=y +# CONFIG_CRYPTO_HW is not set +CONFIG_CRC_CCITT=y +CONFIG_PRINTK_TIME=y +CONFIG_DYNAMIC_DEBUG=y CONFIG_DEBUG_INFO=y +CONFIG_GDB_SCRIPTS=y +CONFIG_DEBUG_FS=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_TIMEOUT=5 # CONFIG_SCHED_DEBUG is not set # CONFIG_DEBUG_PREEMPT is not set # CONFIG_FTRACE is not set -# CONFIG_ARM_UNWIND is not set CONFIG_DEBUG_LL=y CONFIG_EARLY_PRINTK=y -CONFIG_CRYPTO_ANSI_CPRNG=y -# CONFIG_CRYPTO_HW is not set -CONFIG_CRC_CCITT=y diff --git a/arch/arm/configs/milbeaut_m10v_defconfig b/arch/arm/configs/milbeaut_m10v_defconfig new file mode 100644 index 000000000000..7c07f9893a0f --- /dev/null +++ b/arch/arm/configs/milbeaut_m10v_defconfig @@ -0,0 +1,119 @@ +CONFIG_SYSVIPC=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_CGROUPS=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EMBEDDED=y +CONFIG_PERF_EVENTS=y +CONFIG_ARCH_MILBEAUT=y +CONFIG_ARCH_MILBEAUT_M10V=y +CONFIG_ARM_THUMBEE=y +# CONFIG_VDSO is not set +# CONFIG_CACHE_L2X0 is not set +CONFIG_ARM_ERRATA_430973=y +CONFIG_ARM_ERRATA_720789=y +CONFIG_ARM_ERRATA_754322=y +CONFIG_ARM_ERRATA_754327=y +CONFIG_ARM_ERRATA_764369=y +CONFIG_ARM_ERRATA_775420=y +CONFIG_ARM_ERRATA_798181=y +CONFIG_SMP=y +# CONFIG_SMP_ON_UP is not set +# CONFIG_ARM_CPU_TOPOLOGY is not set +CONFIG_HAVE_ARM_ARCH_TIMER=y +CONFIG_NR_CPUS=16 +CONFIG_THUMB2_KERNEL=y +# CONFIG_THUMB2_AVOID_R_ARM_THM_JUMP11 is not set +# CONFIG_ARM_PATCH_IDIV is not set +CONFIG_HIGHMEM=y +CONFIG_FORCE_MAX_ZONEORDER=12 +CONFIG_SECCOMP=y +CONFIG_KEXEC=y +CONFIG_EFI=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=m +CONFIG_CPU_FREQ_GOV_USERSPACE=m +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y +CONFIG_CPUFREQ_DT=y +CONFIG_QORIQ_CPUFREQ=y +CONFIG_CPU_IDLE=y +CONFIG_ARM_CPUIDLE=y +CONFIG_VFP=y +CONFIG_NEON=y +CONFIG_KERNEL_MODE_NEON=y +CONFIG_EFI_VARS=m +CONFIG_EFI_CAPSULE_LOADER=m +CONFIG_ARM_CRYPTO=y +CONFIG_CRYPTO_SHA1_ARM_NEON=m +CONFIG_CRYPTO_SHA1_ARM_CE=m +CONFIG_CRYPTO_SHA2_ARM_CE=m +CONFIG_CRYPTO_SHA512_ARM=m +CONFIG_CRYPTO_AES_ARM=m +CONFIG_CRYPTO_AES_ARM_BS=m +CONFIG_CRYPTO_AES_ARM_CE=m +CONFIG_CRYPTO_GHASH_ARM_CE=m +CONFIG_CRYPTO_CRC32_ARM_CE=m +CONFIG_CRYPTO_CHACHA20_NEON=m +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_CMDLINE_PARTITION=y +CONFIG_CMA=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_DMA_CMA=y +CONFIG_CMA_SIZE_MBYTES=64 +CONFIG_OF_OVERLAY=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=65536 +CONFIG_SRAM=y +CONFIG_INPUT_FF_MEMLESS=m +CONFIG_INPUT_MATRIXKMAP=y +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_SERIAL_DEV_BUS=y +# CONFIG_HW_RANDOM is not set +CONFIG_GPIOLIB=y +CONFIG_GPIO_GENERIC_PLATFORM=y +# CONFIG_HWMON is not set +CONFIG_MEDIA_SUPPORT=m +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_CONTROLLER=y +# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set +# CONFIG_HID is not set +# CONFIG_USB_SUPPORT is not set +CONFIG_SYNC_FILE=y +# CONFIG_VIRTIO_MENU is not set +# CONFIG_IOMMU_SUPPORT is not set +CONFIG_SOC_BRCMSTB=y +CONFIG_MEMORY=y +# CONFIG_ARM_PMU is not set +CONFIG_EXT4_FS=y +CONFIG_AUTOFS4_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_NTFS_FS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_CONFIGFS_FS=y +# CONFIG_MISC_FILESYSTEMS is not set +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_UTF8=y +CONFIG_KEYS=y +CONFIG_CRYPTO_MANAGER=y +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_SEQIV=m +# CONFIG_CRYPTO_ECHAINIV is not set +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_HW is not set +CONFIG_CRC_CCITT=m +CONFIG_CRC_ITU_T=m +CONFIG_PRINTK_TIME=y +CONFIG_MAGIC_SYSRQ=y diff --git a/arch/arm/configs/mini2440_defconfig b/arch/arm/configs/mini2440_defconfig index 88ea02e7ba19..d95a8059d30b 100644 --- a/arch/arm/configs/mini2440_defconfig +++ b/arch/arm/configs/mini2440_defconfig @@ -298,7 +298,6 @@ CONFIG_NLS_KOI8_R=m CONFIG_NLS_KOI8_U=m CONFIG_NLS_UTF8=m CONFIG_DEBUG_INFO=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_STRIP_ASM_SYMS=y CONFIG_DEBUG_FS=y diff --git a/arch/arm/configs/moxart_defconfig b/arch/arm/configs/moxart_defconfig index 2da0d9ee2107..078228a19339 100644 --- a/arch/arm/configs/moxart_defconfig +++ b/arch/arm/configs/moxart_defconfig @@ -125,7 +125,6 @@ CONFIG_CONFIGFS_FS=y CONFIG_JFFS2_FS=y CONFIG_PRINTK_TIME=y CONFIG_DEBUG_INFO=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_DEBUG_PAGEALLOC=y CONFIG_DEBUG_OBJECTS=y diff --git a/arch/arm/configs/mps2_defconfig b/arch/arm/configs/mps2_defconfig index 0bcdec7cc169..1d923dbb9928 100644 --- a/arch/arm/configs/mps2_defconfig +++ b/arch/arm/configs/mps2_defconfig @@ -100,7 +100,6 @@ CONFIG_ROOT_NFS=y CONFIG_NLS=y CONFIG_PRINTK_TIME=y CONFIG_DEBUG_INFO=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_DEBUG_FS=y # CONFIG_SCHED_DEBUG is not set diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index 5bee34a7ff2e..c75051b9392c 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -54,6 +54,8 @@ CONFIG_SOC_VF610=y CONFIG_ARCH_KEYSTONE=y CONFIG_ARCH_MEDIATEK=y CONFIG_ARCH_MESON=y +CONFIG_ARCH_MILBEAUT=y +CONFIG_ARCH_MILBEAUT_M10V=y CONFIG_ARCH_MVEBU=y CONFIG_MACH_ARMADA_370=y CONFIG_MACH_ARMADA_375=y @@ -76,6 +78,7 @@ CONFIG_ARCH_ROCKCHIP=y CONFIG_ARCH_RENESAS=y CONFIG_ARCH_EMEV2=y CONFIG_ARCH_R7S72100=y +CONFIG_ARCH_R7S9210=y CONFIG_ARCH_R8A73A4=y CONFIG_ARCH_R8A7740=y CONFIG_ARCH_R8A7743=y @@ -404,6 +407,7 @@ CONFIG_SPI_XILINX=y CONFIG_SPI_SPIDEV=y CONFIG_SPMI=y CONFIG_PINCTRL_AS3722=y +CONFIG_PINCTRL_RZA2=y CONFIG_PINCTRL_PALMAS=y CONFIG_PINCTRL_APQ8064=y CONFIG_PINCTRL_APQ8084=y @@ -631,6 +635,7 @@ CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0=m CONFIG_DRM_DUMB_VGA_DAC=m CONFIG_DRM_NXP_PTN3460=m CONFIG_DRM_PARADE_PS8622=m +CONFIG_DRM_SII902X=m CONFIG_DRM_SII9234=m CONFIG_DRM_TOSHIBA_TC358764=m CONFIG_DRM_I2C_ADV7511=m @@ -641,7 +646,7 @@ CONFIG_DRM_STM_DSI=m CONFIG_DRM_VC4=m CONFIG_DRM_ETNAVIV=m CONFIG_DRM_MXSFB=m -CONFIG_FB_ARMCLCD=y +CONFIG_DRM_PL111=m CONFIG_FB_EFI=y CONFIG_FB_WM8505=y CONFIG_FB_SH_MOBILE_LCDC=y @@ -826,6 +831,7 @@ CONFIG_RTC_DRV_MAX8997=m CONFIG_RTC_DRV_MAX77686=y CONFIG_RTC_DRV_RK808=m CONFIG_RTC_DRV_RS5C372=m +CONFIG_RTC_DRV_PCF85363=m CONFIG_RTC_DRV_BQ32K=m CONFIG_RTC_DRV_TWL4030=y CONFIG_RTC_DRV_PALMAS=y @@ -1028,3 +1034,5 @@ CONFIG_CRYPTO_AES_ARM_CE=m CONFIG_CRYPTO_GHASH_ARM_CE=m CONFIG_CRYPTO_CRC32_ARM_CE=m CONFIG_CRYPTO_CHACHA20_NEON=m +CONFIG_GCC_PLUGINS=y +CONFIG_GCC_PLUGIN_STRUCTLEAK=y diff --git a/arch/arm/configs/nhk8815_defconfig b/arch/arm/configs/nhk8815_defconfig index 0ac44acd5bc4..5f4c6aaa07f6 100644 --- a/arch/arm/configs/nhk8815_defconfig +++ b/arch/arm/configs/nhk8815_defconfig @@ -3,6 +3,7 @@ CONFIG_SYSVIPC=y CONFIG_NO_HZ_IDLE=y CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 @@ -10,16 +11,16 @@ CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y CONFIG_KALLSYMS_ALL=y CONFIG_SLAB=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -# CONFIG_BLK_DEV_BSG is not set # CONFIG_ARCH_MULTI_V7 is not set CONFIG_ARCH_NOMADIK=y CONFIG_MACH_NOMADIK_8815NHK=y -CONFIG_PREEMPT=y CONFIG_AEABI=y CONFIG_ZBOOT_ROM_TEXT=0x0 CONFIG_ZBOOT_ROM_BSS=0x0 +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_BLK_DEV_BSG is not set +CONFIG_CMA=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -49,12 +50,12 @@ CONFIG_MTD=y CONFIG_MTD_TESTS=m CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_BLOCK=y -CONFIG_MTD_NAND_ECC_SMC=y -CONFIG_MTD_NAND=y -CONFIG_MTD_NAND_FSMC=y CONFIG_MTD_ONENAND=y CONFIG_MTD_ONENAND_VERIFY_WRITE=y CONFIG_MTD_ONENAND_GENERIC=y +CONFIG_MTD_NAND_ECC_SMC=y +CONFIG_MTD_NAND=y +CONFIG_MTD_NAND_FSMC=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_CRYPTOLOOP=y CONFIG_BLK_DEV_RAM=y @@ -75,7 +76,6 @@ CONFIG_PPP_MPPE=m CONFIG_PPPOE=m CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m -# CONFIG_INPUT_MOUSEDEV is not set CONFIG_INPUT_EVDEV=y # CONFIG_KEYBOARD_ATKBD is not set CONFIG_KEYBOARD_GPIO=y @@ -88,13 +88,22 @@ CONFIG_SERIAL_AMBA_PL011_CONSOLE=y CONFIG_HW_RANDOM=y CONFIG_I2C_CHARDEV=y CONFIG_I2C_GPIO=y -CONFIG_DEBUG_GPIO=y +CONFIG_SPI=y +CONFIG_SPI_GPIO=y CONFIG_GPIO_STMPE=y # CONFIG_HWMON is not set CONFIG_MFD_STMPE=y +CONFIG_MFD_STW481X=y CONFIG_REGULATOR=y +CONFIG_DRM=y +CONFIG_DRM_PANEL_TPO_TPG110=y +CONFIG_DRM_PL111=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_BACKLIGHT_CLASS_DEVICE=y +CONFIG_BACKLIGHT_PWM=y +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_LOGO=y CONFIG_MMC=y -# CONFIG_MMC_BLOCK_BOUNCE is not set CONFIG_MMC_ARMMMCI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -105,6 +114,11 @@ CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_PL031=y CONFIG_DMADEVICES=y CONFIG_AMBA_PL08X=y +CONFIG_IIO=y +CONFIG_IIO_BUFFER=y +CONFIG_IIO_ST_ACCEL_3AXIS=y +CONFIG_PWM=y +CONFIG_PWM_STMPE=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y CONFIG_FUSE_FS=y @@ -121,13 +135,12 @@ CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ASCII=y CONFIG_NLS_ISO8859_1=y CONFIG_NLS_ISO8859_15=y +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_DES=y CONFIG_DEBUG_INFO=y # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_DEBUG_FS=y # CONFIG_SCHED_DEBUG is not set # CONFIG_DEBUG_PREEMPT is not set # CONFIG_DEBUG_BUGVERBOSE is not set -CONFIG_CRYPTO_MD5=y -CONFIG_CRYPTO_SHA1=y -CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/arm/configs/nuc910_defconfig b/arch/arm/configs/nuc910_defconfig index a72653645f9d..c0d152c02fba 100644 --- a/arch/arm/configs/nuc910_defconfig +++ b/arch/arm/configs/nuc910_defconfig @@ -47,7 +47,6 @@ CONFIG_ROMFS_FS=y CONFIG_PARTITION_ADVANCED=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_DEBUG_FS=y # CONFIG_CRC32 is not set diff --git a/arch/arm/configs/nuc950_defconfig b/arch/arm/configs/nuc950_defconfig index 614a0a28d0b4..8dde1186c2ef 100644 --- a/arch/arm/configs/nuc950_defconfig +++ b/arch/arm/configs/nuc950_defconfig @@ -64,6 +64,5 @@ CONFIG_ROMFS_FS=y CONFIG_PARTITION_ADVANCED=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_DEBUG_FS=y diff --git a/arch/arm/configs/nuc960_defconfig b/arch/arm/configs/nuc960_defconfig index b84bbd216153..6bb784f8eb5b 100644 --- a/arch/arm/configs/nuc960_defconfig +++ b/arch/arm/configs/nuc960_defconfig @@ -53,7 +53,6 @@ CONFIG_ROMFS_FS=y CONFIG_PARTITION_ADVANCED=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_DEBUG_FS=y # CONFIG_CRC32 is not set diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index 9c6f436d1b12..3f03ec6d2644 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -25,16 +25,6 @@ CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y CONFIG_SLAB=y CONFIG_PROFILING=y -CONFIG_OPROFILE=y -CONFIG_KPROBES=y -CONFIG_MODULES=y -CONFIG_MODULE_FORCE_LOAD=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y -CONFIG_MODVERSIONS=y -CONFIG_MODULE_SRCVERSION_ALL=y -# CONFIG_BLK_DEV_BSG is not set -CONFIG_PARTITION_ADVANCED=y CONFIG_ARCH_MULTI_V6=y CONFIG_POWER_AVS_OMAP=y CONFIG_POWER_AVS_OMAP_CLASS3=y @@ -48,15 +38,8 @@ CONFIG_SOC_AM43XX=y CONFIG_SOC_DRA7XX=y CONFIG_ARM_THUMBEE=y CONFIG_ARM_ERRATA_411920=y -CONFIG_PCI=y -CONFIG_PCI_MSI=y -CONFIG_PCI_DRA7XX_EP=y -CONFIG_PCI_ENDPOINT=y -CONFIG_PCI_ENDPOINT_CONFIGFS=y -CONFIG_PCI_EPF_TEST=m CONFIG_SMP=y CONFIG_NR_CPUS=2 -CONFIG_CMA=y CONFIG_SECCOMP=y CONFIG_ZBOOT_ROM_TEXT=0x0 CONFIG_ZBOOT_ROM_BSS=0x0 @@ -74,8 +57,27 @@ CONFIG_CPUFREQ_DT=m CONFIG_ARM_TI_CPUFREQ=y CONFIG_CPU_IDLE=y CONFIG_KERNEL_MODE_NEON=y -CONFIG_BINFMT_MISC=y CONFIG_PM_DEBUG=y +CONFIG_ARM_CRYPTO=y +CONFIG_CRYPTO_SHA1_ARM_NEON=m +CONFIG_CRYPTO_SHA256_ARM=m +CONFIG_CRYPTO_SHA512_ARM=m +CONFIG_CRYPTO_AES_ARM=m +CONFIG_CRYPTO_AES_ARM_BS=m +CONFIG_CRYPTO_GHASH_ARM_CE=m +CONFIG_CRYPTO_CHACHA20_NEON=m +CONFIG_OPROFILE=y +CONFIG_KPROBES=y +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +# CONFIG_BLK_DEV_BSG is not set +CONFIG_PARTITION_ADVANCED=y +CONFIG_BINFMT_MISC=y +CONFIG_CMA=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -118,6 +120,12 @@ CONFIG_AF_RXRPC=m CONFIG_RXKAD=y CONFIG_CFG80211=m CONFIG_MAC80211=m +CONFIG_PCI=y +CONFIG_PCI_MSI=y +CONFIG_PCI_DRA7XX_EP=y +CONFIG_PCI_ENDPOINT=y +CONFIG_PCI_ENDPOINT_CONFIGFS=y +CONFIG_PCI_EPF_TEST=m CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y CONFIG_DMA_CMA=y @@ -132,13 +140,13 @@ CONFIG_MTD_CFI_INTELEXT=y CONFIG_MTD_PHYSMAP=y CONFIG_MTD_PHYSMAP_OF=y CONFIG_MTD_M25P80=m +CONFIG_MTD_ONENAND=y +CONFIG_MTD_ONENAND_VERIFY_WRITE=y +CONFIG_MTD_ONENAND_OMAP2=y CONFIG_MTD_NAND=y CONFIG_MTD_NAND_ECC_BCH=y CONFIG_MTD_NAND_OMAP2=y CONFIG_MTD_NAND_OMAP_BCH=y -CONFIG_MTD_ONENAND=y -CONFIG_MTD_ONENAND_VERIFY_WRITE=y -CONFIG_MTD_ONENAND_OMAP2=y CONFIG_MTD_SPI_NOR=m CONFIG_MTD_UBI=y CONFIG_BLK_DEV_LOOP=y @@ -155,7 +163,6 @@ CONFIG_SATA_AHCI_PLATFORM=y CONFIG_AHCI_DM816=m CONFIG_NETDEVICES=y # CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set # CONFIG_NET_VENDOR_CIRRUS is not set CONFIG_DM9000=y @@ -283,11 +290,9 @@ CONFIG_HWMON=m CONFIG_SENSORS_GPIO_FAN=m CONFIG_SENSORS_LM75=m CONFIG_SENSORS_TMP102=m -CONFIG_THERMAL=m CONFIG_THERMAL_GOV_FAIR_SHARE=y CONFIG_THERMAL_GOV_USER_SPACE=y CONFIG_CPU_THERMAL=y -CONFIG_TI_SOC_THERMAL=m CONFIG_TI_THERMAL=y CONFIG_OMAP4_THERMAL=y CONFIG_OMAP5_THERMAL=y @@ -381,13 +386,12 @@ CONFIG_SND_VERBOSE_PRINTK=y CONFIG_SND_DEBUG=y CONFIG_SND_USB_AUDIO=m CONFIG_SND_SOC=m -CONFIG_SND_SOC_TLV320AIC3X=m CONFIG_SND_SOC_DAVINCI_MCASP=m CONFIG_SND_SOC_NOKIA_RX51=m -CONFIG_SND_SOC_OMAP_HDMI=m -CONFIG_SND_SOC_OMAP_ABE_TWL6040=m CONFIG_SND_SOC_OMAP3_PANDORA=m CONFIG_SND_SOC_OMAP3_TWL4030=m +CONFIG_SND_SOC_OMAP_ABE_TWL6040=m +CONFIG_SND_SOC_OMAP_HDMI=m CONFIG_SND_SOC_CPCAP=m CONFIG_SND_SOC_TLV320AIC23_I2C=m CONFIG_SND_SIMPLE_CARD=m @@ -475,8 +479,6 @@ CONFIG_RTC_DRV_PALMAS=m CONFIG_RTC_DRV_OMAP=m CONFIG_RTC_DRV_CPCAP=m CONFIG_DMADEVICES=y -CONFIG_DMA_OMAP=y -CONFIG_TI_EDMA=y CONFIG_OMAP_IOMMU=y CONFIG_REMOTEPROC=m CONFIG_OMAP_REMOTEPROC=m @@ -531,24 +533,8 @@ CONFIG_NFS_V4=y CONFIG_ROOT_NFS=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y -CONFIG_PRINTK_TIME=y -CONFIG_DEBUG_INFO=y -CONFIG_DEBUG_INFO_SPLIT=y -CONFIG_DEBUG_INFO_DWARF4=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_SCHEDSTATS=y -CONFIG_PROVE_LOCKING=y -# CONFIG_DEBUG_BUGVERBOSE is not set CONFIG_SECURITY=y CONFIG_CRYPTO_MICHAEL_MIC=y -CONFIG_ARM_CRYPTO=y -CONFIG_CRYPTO_SHA1_ARM_NEON=m -CONFIG_CRYPTO_SHA256_ARM=m -CONFIG_CRYPTO_SHA512_ARM=m -CONFIG_CRYPTO_AES_ARM=m -CONFIG_CRYPTO_AES_ARM_BS=m -CONFIG_CRYPTO_GHASH_ARM_CE=m -CONFIG_CRYPTO_CHACHA20_NEON=m CONFIG_CRC_CCITT=y CONFIG_CRC_T10DIF=y CONFIG_CRC_ITU_T=y @@ -557,3 +543,10 @@ CONFIG_LIBCRC32C=y CONFIG_FONTS=y CONFIG_FONT_8x8=y CONFIG_FONT_8x16=y +CONFIG_PRINTK_TIME=y +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_SPLIT=y +CONFIG_DEBUG_INFO_DWARF4=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_SCHEDSTATS=y +# CONFIG_DEBUG_BUGVERBOSE is not set diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig index 6bb506edb1f5..d4654755b09c 100644 --- a/arch/arm/configs/pxa_defconfig +++ b/arch/arm/configs/pxa_defconfig @@ -66,9 +66,6 @@ CONFIG_MACH_MIOA701=y CONFIG_PXA_EZX=y CONFIG_MACH_MP900C=y CONFIG_ARCH_PXA_PALM=y -CONFIG_MACH_RAUMFELD_RC=y -CONFIG_MACH_RAUMFELD_CONNECTOR=y -CONFIG_MACH_RAUMFELD_SPEAKER=y CONFIG_PXA_SHARPSL=y CONFIG_MACH_POODLE=y CONFIG_MACH_CORGI=y @@ -482,7 +479,6 @@ CONFIG_SND_PCM_OSS=m CONFIG_SND_DYNAMIC_MINORS=y CONFIG_SND_VERBOSE_PRINTK=y CONFIG_SND_DEBUG=y -CONFIG_SND_PXA2XX_AC97=m CONFIG_SND_USB_AUDIO=m CONFIG_SND_SOC=m CONFIG_SND_ATMEL_SOC=m @@ -498,7 +494,6 @@ CONFIG_SND_PXA2XX_SOC_E800=m CONFIG_SND_PXA2XX_SOC_EM_X270=m CONFIG_SND_PXA2XX_SOC_PALM27X=y CONFIG_SND_SOC_ZYLONITE=m -CONFIG_SND_SOC_RAUMFELD=m CONFIG_SND_PXA2XX_SOC_HX4700=m CONFIG_SND_PXA2XX_SOC_MAGICIAN=m CONFIG_SND_PXA2XX_SOC_MIOA701=m diff --git a/arch/arm/configs/raumfeld_defconfig b/arch/arm/configs/raumfeld_defconfig deleted file mode 100644 index 2dd56e9a484e..000000000000 --- a/arch/arm/configs/raumfeld_defconfig +++ /dev/null @@ -1,197 +0,0 @@ -# CONFIG_LOCALVERSION_AUTO is not set -# CONFIG_SWAP is not set -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -# CONFIG_LBDAF is not set -# CONFIG_BLK_DEV_BSG is not set -CONFIG_ARCH_PXA=y -CONFIG_MACH_RAUMFELD_RC=y -CONFIG_MACH_RAUMFELD_CONNECTOR=y -CONFIG_MACH_RAUMFELD_SPEAKER=y -CONFIG_NO_HZ=y -CONFIG_AEABI=y -# CONFIG_OABI_COMPAT is not set -CONFIG_CMDLINE="console=ttyS0,115200 rw" -CONFIG_CPU_FREQ=y -CONFIG_CPU_IDLE=y -CONFIG_PM=y -CONFIG_APM_EMULATION=y -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_PNP=y -CONFIG_SYN_COOKIES=y -CONFIG_IPV6=y -CONFIG_CFG80211=y -CONFIG_MAC80211=y -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -CONFIG_MTD=y -CONFIG_MTD_BLOCK=y -CONFIG_NFTL=y -CONFIG_NFTL_RW=y -CONFIG_MTD_BLOCK2MTD=y -CONFIG_MTD_NAND=y -CONFIG_MTD_NAND_MARVELL=y -CONFIG_MTD_UBI=y -CONFIG_BLK_DEV_LOOP=y -CONFIG_ISL29003=y -CONFIG_IIO=y -CONFIG_AD5446=y -CONFIG_SCSI=y -CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_SG=y -CONFIG_NETDEVICES=y -CONFIG_NET_ETHERNET=y -CONFIG_SMSC911X=y -CONFIG_LIBERTAS=y -CONFIG_LIBERTAS_SDIO=m -CONFIG_USB_USBNET=y -# CONFIG_USB_NET_AX8817X is not set -# CONFIG_USB_NET_NET1080 is not set -CONFIG_USB_NET_MCS7830=y -# CONFIG_USB_NET_CDC_SUBSET is not set -# CONFIG_USB_NET_ZAURUS is not set -CONFIG_INPUT_EVDEV=y -CONFIG_KEYBOARD_GPIO=y -# CONFIG_INPUT_MOUSE is not set -CONFIG_INPUT_TOUCHSCREEN=y -CONFIG_TOUCHSCREEN_EETI=m -CONFIG_INPUT_MISC=y -CONFIG_INPUT_GPIO_ROTARY_ENCODER=y -CONFIG_SERIAL_PXA=y -CONFIG_SERIAL_PXA_CONSOLE=y -CONFIG_HW_RANDOM=y -CONFIG_I2C=y -CONFIG_I2C_CHARDEV=y -CONFIG_I2C_PXA=y -CONFIG_SPI=y -CONFIG_SPI_DEBUG=y -CONFIG_SPI_GPIO=y -CONFIG_SPI_SPIDEV=y -CONFIG_DEBUG_GPIO=y -CONFIG_W1_MASTER_GPIO=m -CONFIG_POWER_SUPPLY=y -CONFIG_PDA_POWER=y -CONFIG_BATTERY_DS2760=m -CONFIG_SENSORS_LIS3_SPI=y -CONFIG_REGULATOR=y -CONFIG_REGULATOR_DEBUG=y -CONFIG_REGULATOR_FIXED_VOLTAGE=y -CONFIG_REGULATOR_MAX8660=y -CONFIG_FB=y -CONFIG_FB_PXA=y -CONFIG_BACKLIGHT_LCD_SUPPORT=y -# CONFIG_LCD_CLASS_DEVICE is not set -CONFIG_BACKLIGHT_CLASS_DEVICE=y -# CONFIG_BACKLIGHT_GENERIC is not set -CONFIG_BACKLIGHT_PWM=y -# CONFIG_VGA_CONSOLE is not set -CONFIG_FRAMEBUFFER_CONSOLE=y -CONFIG_LOGO=y -# CONFIG_LOGO_LINUX_MONO is not set -# CONFIG_LOGO_LINUX_VGA16 is not set -# CONFIG_LOGO_LINUX_CLUT224 is not set -CONFIG_SOUND=y -CONFIG_SND=y -# CONFIG_SND_DRIVERS is not set -# CONFIG_SND_USB is not set -CONFIG_SND_SOC=y -CONFIG_SND_PXA2XX_SOC=y -CONFIG_SND_SOC_RAUMFELD=y -CONFIG_HID_DRAGONRISE=y -CONFIG_HID_GYRATION=y -CONFIG_HID_TWINHAN=y -CONFIG_HID_NTRIG=y -CONFIG_HID_PANTHERLORD=y -CONFIG_HID_PETALYNX=y -CONFIG_HID_SAMSUNG=y -CONFIG_HID_SONY=y -CONFIG_HID_SUNPLUS=y -CONFIG_HID_GREENASIA=y -CONFIG_HID_SMARTJOYPLUS=y -CONFIG_HID_TOPSEED=y -CONFIG_HID_THRUSTMASTER=y -CONFIG_HID_ZEROPLUS=y -CONFIG_USB=y -CONFIG_USB_ANNOUNCE_NEW_DEVICES=y -CONFIG_USB_MON=y -CONFIG_USB_OHCI_HCD=y -CONFIG_USB_STORAGE=y -CONFIG_USB_STORAGE_FREECOM=y -CONFIG_USB_STORAGE_ISD200=y -CONFIG_USB_STORAGE_USBAT=y -CONFIG_USB_STORAGE_SDDR09=y -CONFIG_USB_STORAGE_SDDR55=y -CONFIG_MMC=y -CONFIG_MMC_PXA=m -CONFIG_NEW_LEDS=y -CONFIG_LEDS_CLASS=y -CONFIG_LEDS_GPIO=y -CONFIG_LEDS_LT3593=y -CONFIG_LEDS_TRIGGERS=y -CONFIG_LEDS_TRIGGER_BACKLIGHT=y -CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_PXA=y -CONFIG_DMADEVICES=y -CONFIG_UIO=y -CONFIG_EXT2_FS=y -CONFIG_EXT2_FS_XIP=y -CONFIG_EXT3_FS=y -CONFIG_FSCACHE=y -CONFIG_FSCACHE_STATS=y -CONFIG_CACHEFILES=y -CONFIG_MSDOS_FS=y -CONFIG_VFAT_FS=y -CONFIG_TMPFS=y -CONFIG_UBIFS_FS=y -CONFIG_NFS_FS=y -CONFIG_NFS_V3=y -CONFIG_ROOT_NFS=y -CONFIG_NFS_FSCACHE=y -CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_CODEPAGE_737=y -CONFIG_NLS_CODEPAGE_775=y -CONFIG_NLS_CODEPAGE_850=y -CONFIG_NLS_CODEPAGE_852=y -CONFIG_NLS_CODEPAGE_855=y -CONFIG_NLS_CODEPAGE_857=y -CONFIG_NLS_CODEPAGE_860=y -CONFIG_NLS_CODEPAGE_861=y -CONFIG_NLS_CODEPAGE_862=y -CONFIG_NLS_CODEPAGE_863=y -CONFIG_NLS_CODEPAGE_864=y -CONFIG_NLS_CODEPAGE_865=y -CONFIG_NLS_CODEPAGE_866=y -CONFIG_NLS_CODEPAGE_869=y -CONFIG_NLS_CODEPAGE_936=y -CONFIG_NLS_CODEPAGE_950=y -CONFIG_NLS_CODEPAGE_932=y -CONFIG_NLS_CODEPAGE_949=y -CONFIG_NLS_CODEPAGE_874=y -CONFIG_NLS_ISO8859_8=y -CONFIG_NLS_CODEPAGE_1250=y -CONFIG_NLS_CODEPAGE_1251=y -CONFIG_NLS_ASCII=y -CONFIG_NLS_ISO8859_1=y -CONFIG_NLS_ISO8859_2=y -CONFIG_NLS_ISO8859_3=y -CONFIG_NLS_ISO8859_4=y -CONFIG_NLS_ISO8859_5=y -CONFIG_NLS_ISO8859_6=y -CONFIG_NLS_ISO8859_7=y -CONFIG_NLS_ISO8859_9=y -CONFIG_NLS_ISO8859_13=y -CONFIG_NLS_ISO8859_14=y -CONFIG_NLS_ISO8859_15=y -CONFIG_NLS_KOI8_R=y -CONFIG_NLS_KOI8_U=y -CONFIG_NLS_UTF8=y -CONFIG_PRINTK_TIME=y -CONFIG_DEBUG_KERNEL=y -CONFIG_DEBUG_INFO=y -CONFIG_DEBUG_USER=y -CONFIG_DEBUG_LL=y -# CONFIG_CRYPTO_ANSI_CPRNG is not set -# CONFIG_CRYPTO_HW is not set diff --git a/arch/arm/configs/s5pv210_defconfig b/arch/arm/configs/s5pv210_defconfig index a077597369f1..fd4f28aabda6 100644 --- a/arch/arm/configs/s5pv210_defconfig +++ b/arch/arm/configs/s5pv210_defconfig @@ -1,24 +1,30 @@ CONFIG_SYSVIPC=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT=y CONFIG_CGROUPS=y CONFIG_SYSFS_DEPRECATED=y CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_BLK_DEV_INITRD=y CONFIG_KALLSYMS_ALL=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -# CONFIG_BLK_DEV_BSG is not set -CONFIG_PARTITION_ADVANCED=y -CONFIG_BSD_DISKLABEL=y -CONFIG_SOLARIS_X86_PARTITION=y CONFIG_ARCH_S5PV210=y CONFIG_VMSPLIT_2G=y -CONFIG_PREEMPT=y CONFIG_ARM_APPENDED_DTB=y CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x20800000,8M console=ttySAC1,115200 init=/linuxrc" +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=m +CONFIG_CPU_FREQ_GOV_USERSPACE=m +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m CONFIG_VFP=y CONFIG_NEON=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_BLK_DEV_BSG is not set +CONFIG_PARTITION_ADVANCED=y +CONFIG_BSD_DISKLABEL=y +CONFIG_SOLARIS_X86_PARTITION=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -27,6 +33,11 @@ CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y CONFIG_IP_PNP_RARP=y +CONFIG_BT=m +CONFIG_BT_RFCOMM=y +CONFIG_BT_BNEP=y +CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_BCM=y CONFIG_CFG80211=m CONFIG_MAC80211=m CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" @@ -44,21 +55,35 @@ CONFIG_INPUT_EVDEV=y CONFIG_KEYBOARD_GPIO=y # CONFIG_INPUT_MOUSE is not set CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_ATMEL_MXT=m +CONFIG_INPUT_MISC=y +CONFIG_INPUT_PWM_VIBRA=m CONFIG_SERIAL_8250=y CONFIG_SERIAL_SAMSUNG=y CONFIG_SERIAL_SAMSUNG_CONSOLE=y +CONFIG_SERIAL_DEV_BUS=y CONFIG_HW_RANDOM=y CONFIG_I2C_GPIO=y +CONFIG_I2C_S3C2410=y +CONFIG_POWER_RESET=y +CONFIG_POWER_RESET_SYSCON_POWEROFF=y CONFIG_POWER_SUPPLY=y CONFIG_BATTERY_MAX17040=y # CONFIG_HWMON is not set CONFIG_MFD_MAX8998=y CONFIG_REGULATOR=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_MAX8998=y +CONFIG_MEDIA_SUPPORT=m +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_V4L_MEM2MEM_DRIVERS=y +CONFIG_VIDEO_SAMSUNG_S5P_JPEG=m +CONFIG_VIDEO_SAMSUNG_S5P_MFC=m CONFIG_DRM=y CONFIG_DRM_EXYNOS=y CONFIG_DRM_EXYNOS_FIMD=y CONFIG_DRM_EXYNOS_DPI=y +CONFIG_DRM_EXYNOS_ROTATOR=y CONFIG_USB=y CONFIG_USB_OTG=y CONFIG_USB_EHCI_HCD=y @@ -72,6 +97,9 @@ CONFIG_MMC_SDHCI_S3C=y CONFIG_MMC_SDHCI_S3C_DMA=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_MAX8998=m +CONFIG_DMADEVICES=y +CONFIG_PWM=y +CONFIG_PWM_SAMSUNG=y CONFIG_PHY_SAMSUNG_USB2=m CONFIG_PHY_S5PV210_USB2=y CONFIG_EXT2_FS=y @@ -87,6 +115,7 @@ CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ASCII=y CONFIG_NLS_ISO8859_1=y CONFIG_NLS_UTF8=y +CONFIG_CRC_CCITT=y CONFIG_DEBUG_INFO=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y @@ -96,7 +125,3 @@ CONFIG_DEBUG_SPINLOCK=y CONFIG_DEBUG_MUTEXES=y CONFIG_DEBUG_ATOMIC_SLEEP=y CONFIG_DEBUG_USER=y -CONFIG_DEBUG_LL=y -CONFIG_DEBUG_S3C_UART1=y -CONFIG_EARLY_PRINTK=y -CONFIG_CRC_CCITT=y diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig index 9e5a5ade6cab..9b0efac101ab 100644 --- a/arch/arm/configs/shmobile_defconfig +++ b/arch/arm/configs/shmobile_defconfig @@ -8,29 +8,8 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_PERF_EVENTS=y CONFIG_SLAB=y CONFIG_ARCH_RENESAS=y -CONFIG_ARCH_EMEV2=y -CONFIG_ARCH_R7S72100=y -CONFIG_ARCH_R8A73A4=y -CONFIG_ARCH_R8A7740=y -CONFIG_ARCH_R8A7743=y -CONFIG_ARCH_R8A7744=y -CONFIG_ARCH_R8A7745=y -CONFIG_ARCH_R8A77470=y -CONFIG_ARCH_R8A7778=y -CONFIG_ARCH_R8A7779=y -CONFIG_ARCH_R8A7790=y -CONFIG_ARCH_R8A7791=y -CONFIG_ARCH_R8A7792=y -CONFIG_ARCH_R8A7793=y -CONFIG_ARCH_R8A7794=y -CONFIG_ARCH_R9A06G032=y -CONFIG_ARCH_SH73A0=y CONFIG_PL310_ERRATA_588369=y CONFIG_ARM_ERRATA_754322=y -CONFIG_PCI=y -CONFIG_PCI_MSI=y -CONFIG_PCI_RCAR_GEN2=y -CONFIG_PCIE_RCAR=y CONFIG_SMP=y CONFIG_SCHED_MC=y CONFIG_NR_CPUS=8 @@ -58,6 +37,10 @@ CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y CONFIG_CAN=y CONFIG_CAN_RCAR=y +CONFIG_PCI=y +CONFIG_PCI_MSI=y +CONFIG_PCI_RCAR_GEN2=y +CONFIG_PCIE_RCAR=y CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y CONFIG_DMA_CMA=y @@ -91,7 +74,6 @@ CONFIG_SERIAL_8250_DW=y CONFIG_SERIAL_8250_EM=y CONFIG_SERIAL_SH_SCI=y CONFIG_I2C_CHARDEV=y -CONFIG_I2C_MUX=y CONFIG_I2C_DEMUX_PINCTRL=y CONFIG_I2C_EMEV2=y CONFIG_I2C_GPIO=y @@ -104,6 +86,7 @@ CONFIG_SPI_RSPI=y CONFIG_SPI_SH_MSIOF=y CONFIG_SPI_SH_HSPI=y CONFIG_PINCTRL_RZA1=y +CONFIG_PINCTRL_RZA2=y CONFIG_GPIO_EM=y CONFIG_GPIO_RCAR=y CONFIG_GPIO_PCF857X=y @@ -177,17 +160,35 @@ CONFIG_LEDS_CLASS=y CONFIG_LEDS_GPIO=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_RS5C372=y +CONFIG_RTC_DRV_PCF85363=y CONFIG_RTC_DRV_BQ32K=y CONFIG_RTC_DRV_S35390A=y CONFIG_RTC_DRV_RX8581=y CONFIG_RTC_DRV_DA9063=y CONFIG_DMADEVICES=y -CONFIG_SH_DMAE=y CONFIG_RCAR_DMAC=y CONFIG_RENESAS_USB_DMAC=y CONFIG_STAGING=y CONFIG_STAGING_BOARD=y # CONFIG_IOMMU_SUPPORT is not set +CONFIG_ARCH_EMEV2=y +CONFIG_ARCH_R7S72100=y +CONFIG_ARCH_R7S9210=y +CONFIG_ARCH_R8A73A4=y +CONFIG_ARCH_R8A7740=y +CONFIG_ARCH_R8A7743=y +CONFIG_ARCH_R8A7744=y +CONFIG_ARCH_R8A7745=y +CONFIG_ARCH_R8A77470=y +CONFIG_ARCH_R8A7778=y +CONFIG_ARCH_R8A7779=y +CONFIG_ARCH_R8A7790=y +CONFIG_ARCH_R8A7791=y +CONFIG_ARCH_R8A7792=y +CONFIG_ARCH_R8A7793=y +CONFIG_ARCH_R8A7794=y +CONFIG_ARCH_R9A06G032=y +CONFIG_ARCH_SH73A0=y CONFIG_IIO=y CONFIG_AK8975=y CONFIG_PWM=y @@ -211,4 +212,3 @@ CONFIG_NLS_ISO8859_1=y CONFIG_PRINTK_TIME=y # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_DEBUG_KERNEL=y -# CONFIG_ARM_UNWIND is not set diff --git a/arch/arm/configs/socfpga_defconfig b/arch/arm/configs/socfpga_defconfig index 371fca4e1ab7..08d1b3e11d68 100644 --- a/arch/arm/configs/socfpga_defconfig +++ b/arch/arm/configs/socfpga_defconfig @@ -9,27 +9,20 @@ CONFIG_NAMESPACES=y CONFIG_BLK_DEV_INITRD=y CONFIG_EMBEDDED=y CONFIG_PROFILING=y -CONFIG_OPROFILE=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -# CONFIG_LBDAF is not set -# CONFIG_BLK_DEV_BSG is not set -# CONFIG_IOSCHED_DEADLINE is not set -# CONFIG_IOSCHED_CFQ is not set CONFIG_ARCH_SOCFPGA=y CONFIG_ARM_THUMBEE=y -CONFIG_PCI=y -CONFIG_PCI_MSI=y -CONFIG_PCIE_ALTERA=y -CONFIG_PCIE_ALTERA_MSI=y CONFIG_SMP=y CONFIG_NR_CPUS=2 -CONFIG_AEABI=y CONFIG_HIGHMEM=y CONFIG_ZBOOT_ROM_TEXT=0x0 CONFIG_ZBOOT_ROM_BSS=0x0 CONFIG_VFP=y CONFIG_NEON=y +CONFIG_OPROFILE=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_LBDAF is not set +# CONFIG_BLK_DEV_BSG is not set CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y @@ -48,6 +41,10 @@ CONFIG_CAN=y CONFIG_CAN_C_CAN=y CONFIG_CAN_C_CAN_PLATFORM=y CONFIG_CAN_DEBUG_DEVICES=y +CONFIG_PCI=y +CONFIG_PCI_MSI=y +CONFIG_PCIE_ALTERA=y +CONFIG_PCIE_ALTERA_MSI=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y @@ -60,7 +57,7 @@ CONFIG_MTD_SPI_NOR=y # CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is not set CONFIG_SPI_CADENCE_QUADSPI=y CONFIG_OF_OVERLAY=y -CONFIG_OF_CONFIGFS=y +CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=2 CONFIG_BLK_DEV_RAM_SIZE=8192 @@ -131,12 +128,12 @@ CONFIG_DMADEVICES=y CONFIG_PL330_DMA=y CONFIG_DMATEST=m CONFIG_FPGA=y -CONFIG_FPGA_REGION=y CONFIG_FPGA_MGR_SOCFPGA=y CONFIG_FPGA_MGR_SOCFPGA_A10=y CONFIG_FPGA_BRIDGE=y CONFIG_SOCFPGA_FPGA_BRIDGE=y CONFIG_ALTERA_FREEZE_BRIDGE=y +CONFIG_FPGA_REGION=y CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y diff --git a/arch/arm/configs/spear3xx_defconfig b/arch/arm/configs/spear3xx_defconfig index 2c5e8df33191..f1b52fb3461b 100644 --- a/arch/arm/configs/spear3xx_defconfig +++ b/arch/arm/configs/spear3xx_defconfig @@ -52,8 +52,10 @@ CONFIG_GPIO_PL061=y # CONFIG_HWMON is not set CONFIG_WATCHDOG=y CONFIG_ARM_SP805_WATCHDOG=y -CONFIG_FB=y -CONFIG_FB_ARMCLCD=y +CONFIG_DRM=y +CONFIG_DRM_PL111=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_BACKLIGHT_CLASS_DEVICE=y CONFIG_USB=y CONFIG_USB_EHCI_HCD=y CONFIG_USB_OHCI_HCD=y diff --git a/arch/arm/configs/stm32_defconfig b/arch/arm/configs/stm32_defconfig index ba805b757a8d..0258ba891376 100644 --- a/arch/arm/configs/stm32_defconfig +++ b/arch/arm/configs/stm32_defconfig @@ -80,7 +80,6 @@ CONFIG_EXT3_FS=y CONFIG_NLS=y CONFIG_PRINTK_TIME=y CONFIG_DEBUG_INFO=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_MAGIC_SYSRQ=y # CONFIG_SCHED_DEBUG is not set diff --git a/arch/arm/crypto/aes-ce-core.S b/arch/arm/crypto/aes-ce-core.S index ba8e6a32fdc9..bc53bcaa772e 100644 --- a/arch/arm/crypto/aes-ce-core.S +++ b/arch/arm/crypto/aes-ce-core.S @@ -317,25 +317,27 @@ ENTRY(ce_aes_ctr_encrypt) .Lctrloop: vmov q0, q6 bl aes_encrypt - subs r4, r4, #1 - bmi .Lctrtailblock @ blocks < 0 means tail block - vld1.8 {q3}, [r1]! - veor q3, q0, q3 - vst1.8 {q3}, [r0]! adds r6, r6, #1 @ increment BE ctr rev ip, r6 vmov s27, ip bcs .Lctrcarry - teq r4, #0 + +.Lctrcarrydone: + subs r4, r4, #1 + bmi .Lctrtailblock @ blocks < 0 means tail block + vld1.8 {q3}, [r1]! + veor q3, q0, q3 + vst1.8 {q3}, [r0]! bne .Lctrloop + .Lctrout: - vst1.8 {q6}, [r5] + vst1.8 {q6}, [r5] @ return next CTR value pop {r4-r6, pc} .Lctrtailblock: - vst1.8 {q0}, [r0, :64] @ return just the key stream - pop {r4-r6, pc} + vst1.8 {q0}, [r0, :64] @ return the key stream + b .Lctrout .Lctrcarry: .irp sreg, s26, s25, s24 @@ -344,11 +346,9 @@ ENTRY(ce_aes_ctr_encrypt) adds ip, ip, #1 rev ip, ip vmov \sreg, ip - bcc 0f + bcc .Lctrcarrydone .endr -0: teq r4, #0 - beq .Lctrout - b .Lctrloop + b .Lctrcarrydone ENDPROC(ce_aes_ctr_encrypt) /* diff --git a/arch/arm/crypto/crct10dif-ce-core.S b/arch/arm/crypto/crct10dif-ce-core.S index ce45ba0c0687..86be258a803f 100644 --- a/arch/arm/crypto/crct10dif-ce-core.S +++ b/arch/arm/crypto/crct10dif-ce-core.S @@ -2,12 +2,14 @@ // Accelerated CRC-T10DIF using ARM NEON and Crypto Extensions instructions // // Copyright (C) 2016 Linaro Ltd +// Copyright (C) 2019 Google LLC // // This program is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License version 2 as // published by the Free Software Foundation. // +// Derived from the x86 version: // // Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions // @@ -54,19 +56,11 @@ // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // -// Function API: -// UINT16 crc_t10dif_pcl( -// UINT16 init_crc, //initial CRC value, 16 bits -// const unsigned char *buf, //buffer pointer to calculate CRC on -// UINT64 len //buffer length in bytes (64-bit data) -// ); -// // Reference paper titled "Fast CRC Computation for Generic // Polynomials Using PCLMULQDQ Instruction" // URL: http://www.intel.com/content/dam/www/public/us/en/documents // /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf // -// #include #include @@ -78,13 +72,14 @@ #endif .text + .arch armv7-a .fpu crypto-neon-fp-armv8 - arg1_low32 .req r0 - arg2 .req r1 - arg3 .req r2 + init_crc .req r0 + buf .req r1 + len .req r2 - qzr .req q13 + fold_consts_ptr .req ip q0l .req d0 q0h .req d1 @@ -102,82 +97,35 @@ q6h .req d13 q7l .req d14 q7h .req d15 - -ENTRY(crc_t10dif_pmull) - vmov.i8 qzr, #0 // init zero register - - // adjust the 16-bit initial_crc value, scale it to 32 bits - lsl arg1_low32, arg1_low32, #16 - - // check if smaller than 256 - cmp arg3, #256 - - // for sizes less than 128, we can't fold 64B at a time... - blt _less_than_128 - - // load the initial crc value - // crc value does not need to be byte-reflected, but it needs - // to be moved to the high part of the register. - // because data will be byte-reflected and will align with - // initial crc at correct place. - vmov s0, arg1_low32 // initial crc - vext.8 q10, qzr, q0, #4 - - // receive the initial 64B data, xor the initial crc value - vld1.64 {q0-q1}, [arg2, :128]! - vld1.64 {q2-q3}, [arg2, :128]! - vld1.64 {q4-q5}, [arg2, :128]! - vld1.64 {q6-q7}, [arg2, :128]! -CPU_LE( vrev64.8 q0, q0 ) -CPU_LE( vrev64.8 q1, q1 ) -CPU_LE( vrev64.8 q2, q2 ) -CPU_LE( vrev64.8 q3, q3 ) -CPU_LE( vrev64.8 q4, q4 ) -CPU_LE( vrev64.8 q5, q5 ) -CPU_LE( vrev64.8 q6, q6 ) -CPU_LE( vrev64.8 q7, q7 ) - - vswp d0, d1 - vswp d2, d3 - vswp d4, d5 - vswp d6, d7 - vswp d8, d9 - vswp d10, d11 - vswp d12, d13 - vswp d14, d15 - - // XOR the initial_crc value - veor.8 q0, q0, q10 - - adr ip, rk3 - vld1.64 {q10}, [ip, :128] // xmm10 has rk3 and rk4 - - // - // we subtract 256 instead of 128 to save one instruction from the loop - // - sub arg3, arg3, #256 - - // at this section of the code, there is 64*x+y (0<=y<64) bytes of - // buffer. The _fold_64_B_loop will fold 64B at a time - // until we have 64+y Bytes of buffer - - - // fold 64B at a time. This section of the code folds 4 vector - // registers in parallel -_fold_64_B_loop: - - .macro fold64, reg1, reg2 - vld1.64 {q11-q12}, [arg2, :128]! - - vmull.p64 q8, \reg1\()h, d21 - vmull.p64 \reg1, \reg1\()l, d20 - vmull.p64 q9, \reg2\()h, d21 - vmull.p64 \reg2, \reg2\()l, d20 - -CPU_LE( vrev64.8 q11, q11 ) -CPU_LE( vrev64.8 q12, q12 ) - vswp d22, d23 - vswp d24, d25 + q8l .req d16 + q8h .req d17 + q9l .req d18 + q9h .req d19 + q10l .req d20 + q10h .req d21 + q11l .req d22 + q11h .req d23 + q12l .req d24 + q12h .req d25 + + FOLD_CONSTS .req q10 + FOLD_CONST_L .req q10l + FOLD_CONST_H .req q10h + + // Fold reg1, reg2 into the next 32 data bytes, storing the result back + // into reg1, reg2. + .macro fold_32_bytes, reg1, reg2 + vld1.64 {q11-q12}, [buf]! + + vmull.p64 q8, \reg1\()h, FOLD_CONST_H + vmull.p64 \reg1, \reg1\()l, FOLD_CONST_L + vmull.p64 q9, \reg2\()h, FOLD_CONST_H + vmull.p64 \reg2, \reg2\()l, FOLD_CONST_L + +CPU_LE( vrev64.8 q11, q11 ) +CPU_LE( vrev64.8 q12, q12 ) + vswp q11l, q11h + vswp q12l, q12h veor.8 \reg1, \reg1, q8 veor.8 \reg2, \reg2, q9 @@ -185,242 +133,248 @@ CPU_LE( vrev64.8 q12, q12 ) veor.8 \reg2, \reg2, q12 .endm - fold64 q0, q1 - fold64 q2, q3 - fold64 q4, q5 - fold64 q6, q7 - - subs arg3, arg3, #128 - - // check if there is another 64B in the buffer to be able to fold - bge _fold_64_B_loop - - // at this point, the buffer pointer is pointing at the last y Bytes - // of the buffer the 64B of folded data is in 4 of the vector - // registers: v0, v1, v2, v3 - - // fold the 8 vector registers to 1 vector register with different - // constants - - adr ip, rk9 - vld1.64 {q10}, [ip, :128]! - - .macro fold16, reg, rk - vmull.p64 q8, \reg\()l, d20 - vmull.p64 \reg, \reg\()h, d21 - .ifnb \rk - vld1.64 {q10}, [ip, :128]! + // Fold src_reg into dst_reg, optionally loading the next fold constants + .macro fold_16_bytes, src_reg, dst_reg, load_next_consts + vmull.p64 q8, \src_reg\()l, FOLD_CONST_L + vmull.p64 \src_reg, \src_reg\()h, FOLD_CONST_H + .ifnb \load_next_consts + vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]! .endif - veor.8 q7, q7, q8 - veor.8 q7, q7, \reg + veor.8 \dst_reg, \dst_reg, q8 + veor.8 \dst_reg, \dst_reg, \src_reg .endm - fold16 q0, rk11 - fold16 q1, rk13 - fold16 q2, rk15 - fold16 q3, rk17 - fold16 q4, rk19 - fold16 q5, rk1 - fold16 q6 - - // instead of 64, we add 48 to the loop counter to save 1 instruction - // from the loop instead of a cmp instruction, we use the negative - // flag with the jl instruction - adds arg3, arg3, #(128-16) - blt _final_reduction_for_128 - - // now we have 16+y bytes left to reduce. 16 Bytes is in register v7 - // and the rest is in memory. We can fold 16 bytes at a time if y>=16 - // continue folding 16B at a time - -_16B_reduction_loop: - vmull.p64 q8, d14, d20 - vmull.p64 q7, d15, d21 - veor.8 q7, q7, q8 + .macro __adrl, out, sym + movw \out, #:lower16:\sym + movt \out, #:upper16:\sym + .endm - vld1.64 {q0}, [arg2, :128]! -CPU_LE( vrev64.8 q0, q0 ) - vswp d0, d1 - veor.8 q7, q7, q0 - subs arg3, arg3, #16 - - // instead of a cmp instruction, we utilize the flags with the - // jge instruction equivalent of: cmp arg3, 16-16 - // check if there is any more 16B in the buffer to be able to fold - bge _16B_reduction_loop - - // now we have 16+z bytes left to reduce, where 0<= z < 16. - // first, we reduce the data in the xmm7 register - -_final_reduction_for_128: - // check if any more data to fold. If not, compute the CRC of - // the final 128 bits - adds arg3, arg3, #16 - beq _128_done - - // here we are getting data that is less than 16 bytes. - // since we know that there was data before the pointer, we can - // offset the input pointer before the actual point, to receive - // exactly 16 bytes. after that the registers need to be adjusted. -_get_last_two_regs: - add arg2, arg2, arg3 - sub arg2, arg2, #16 - vld1.64 {q1}, [arg2] -CPU_LE( vrev64.8 q1, q1 ) - vswp d2, d3 - - // get rid of the extra data that was loaded before - // load the shift constant - adr ip, tbl_shf_table + 16 - sub ip, ip, arg3 - vld1.8 {q0}, [ip] - - // shift v2 to the left by arg3 bytes - vtbl.8 d4, {d14-d15}, d0 - vtbl.8 d5, {d14-d15}, d1 - - // shift v7 to the right by 16-arg3 bytes - vmov.i8 q9, #0x80 - veor.8 q0, q0, q9 - vtbl.8 d18, {d14-d15}, d0 - vtbl.8 d19, {d14-d15}, d1 - - // blend - vshr.s8 q0, q0, #7 // convert to 8-bit mask - vbsl.8 q0, q2, q1 - - // fold 16 Bytes - vmull.p64 q8, d18, d20 - vmull.p64 q7, d19, d21 +// +// u16 crc_t10dif_pmull(u16 init_crc, const u8 *buf, size_t len); +// +// Assumes len >= 16. +// +ENTRY(crc_t10dif_pmull) + + // For sizes less than 256 bytes, we can't fold 128 bytes at a time. + cmp len, #256 + blt .Lless_than_256_bytes + + __adrl fold_consts_ptr, .Lfold_across_128_bytes_consts + + // Load the first 128 data bytes. Byte swapping is necessary to make + // the bit order match the polynomial coefficient order. + vld1.64 {q0-q1}, [buf]! + vld1.64 {q2-q3}, [buf]! + vld1.64 {q4-q5}, [buf]! + vld1.64 {q6-q7}, [buf]! +CPU_LE( vrev64.8 q0, q0 ) +CPU_LE( vrev64.8 q1, q1 ) +CPU_LE( vrev64.8 q2, q2 ) +CPU_LE( vrev64.8 q3, q3 ) +CPU_LE( vrev64.8 q4, q4 ) +CPU_LE( vrev64.8 q5, q5 ) +CPU_LE( vrev64.8 q6, q6 ) +CPU_LE( vrev64.8 q7, q7 ) + vswp q0l, q0h + vswp q1l, q1h + vswp q2l, q2h + vswp q3l, q3h + vswp q4l, q4h + vswp q5l, q5h + vswp q6l, q6h + vswp q7l, q7h + + // XOR the first 16 data *bits* with the initial CRC value. + vmov.i8 q8h, #0 + vmov.u16 q8h[3], init_crc + veor q0h, q0h, q8h + + // Load the constants for folding across 128 bytes. + vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]! + + // Subtract 128 for the 128 data bytes just consumed. Subtract another + // 128 to simplify the termination condition of the following loop. + sub len, len, #256 + + // While >= 128 data bytes remain (not counting q0-q7), fold the 128 + // bytes q0-q7 into them, storing the result back into q0-q7. +.Lfold_128_bytes_loop: + fold_32_bytes q0, q1 + fold_32_bytes q2, q3 + fold_32_bytes q4, q5 + fold_32_bytes q6, q7 + subs len, len, #128 + bge .Lfold_128_bytes_loop + + // Now fold the 112 bytes in q0-q6 into the 16 bytes in q7. + + // Fold across 64 bytes. + vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]! + fold_16_bytes q0, q4 + fold_16_bytes q1, q5 + fold_16_bytes q2, q6 + fold_16_bytes q3, q7, 1 + // Fold across 32 bytes. + fold_16_bytes q4, q6 + fold_16_bytes q5, q7, 1 + // Fold across 16 bytes. + fold_16_bytes q6, q7 + + // Add 128 to get the correct number of data bytes remaining in 0...127 + // (not counting q7), following the previous extra subtraction by 128. + // Then subtract 16 to simplify the termination condition of the + // following loop. + adds len, len, #(128-16) + + // While >= 16 data bytes remain (not counting q7), fold the 16 bytes q7 + // into them, storing the result back into q7. + blt .Lfold_16_bytes_loop_done +.Lfold_16_bytes_loop: + vmull.p64 q8, q7l, FOLD_CONST_L + vmull.p64 q7, q7h, FOLD_CONST_H veor.8 q7, q7, q8 + vld1.64 {q0}, [buf]! +CPU_LE( vrev64.8 q0, q0 ) + vswp q0l, q0h veor.8 q7, q7, q0 - -_128_done: - // compute crc of a 128-bit value - vldr d20, rk5 - vldr d21, rk6 // rk5 and rk6 in xmm10 - - // 64b fold - vext.8 q0, qzr, q7, #8 - vmull.p64 q7, d15, d20 + subs len, len, #16 + bge .Lfold_16_bytes_loop + +.Lfold_16_bytes_loop_done: + // Add 16 to get the correct number of data bytes remaining in 0...15 + // (not counting q7), following the previous extra subtraction by 16. + adds len, len, #16 + beq .Lreduce_final_16_bytes + +.Lhandle_partial_segment: + // Reduce the last '16 + len' bytes where 1 <= len <= 15 and the first + // 16 bytes are in q7 and the rest are the remaining data in 'buf'. To + // do this without needing a fold constant for each possible 'len', + // redivide the bytes into a first chunk of 'len' bytes and a second + // chunk of 16 bytes, then fold the first chunk into the second. + + // q0 = last 16 original data bytes + add buf, buf, len + sub buf, buf, #16 + vld1.64 {q0}, [buf] +CPU_LE( vrev64.8 q0, q0 ) + vswp q0l, q0h + + // q1 = high order part of second chunk: q7 left-shifted by 'len' bytes. + __adrl r3, .Lbyteshift_table + 16 + sub r3, r3, len + vld1.8 {q2}, [r3] + vtbl.8 q1l, {q7l-q7h}, q2l + vtbl.8 q1h, {q7l-q7h}, q2h + + // q3 = first chunk: q7 right-shifted by '16-len' bytes. + vmov.i8 q3, #0x80 + veor.8 q2, q2, q3 + vtbl.8 q3l, {q7l-q7h}, q2l + vtbl.8 q3h, {q7l-q7h}, q2h + + // Convert to 8-bit masks: 'len' 0x00 bytes, then '16-len' 0xff bytes. + vshr.s8 q2, q2, #7 + + // q2 = second chunk: 'len' bytes from q0 (low-order bytes), + // then '16-len' bytes from q1 (high-order bytes). + vbsl.8 q2, q1, q0 + + // Fold the first chunk into the second chunk, storing the result in q7. + vmull.p64 q0, q3l, FOLD_CONST_L + vmull.p64 q7, q3h, FOLD_CONST_H veor.8 q7, q7, q0 + veor.8 q7, q7, q2 + +.Lreduce_final_16_bytes: + // Reduce the 128-bit value M(x), stored in q7, to the final 16-bit CRC. + + // Load 'x^48 * (x^48 mod G(x))' and 'x^48 * (x^80 mod G(x))'. + vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]! + + // Fold the high 64 bits into the low 64 bits, while also multiplying by + // x^64. This produces a 128-bit value congruent to x^64 * M(x) and + // whose low 48 bits are 0. + vmull.p64 q0, q7h, FOLD_CONST_H // high bits * x^48 * (x^80 mod G(x)) + veor.8 q0h, q0h, q7l // + low bits * x^64 + + // Fold the high 32 bits into the low 96 bits. This produces a 96-bit + // value congruent to x^64 * M(x) and whose low 48 bits are 0. + vmov.i8 q1, #0 + vmov s4, s3 // extract high 32 bits + vmov s3, s5 // zero high 32 bits + vmull.p64 q1, q1l, FOLD_CONST_L // high 32 bits * x^48 * (x^48 mod G(x)) + veor.8 q0, q0, q1 // + low bits + + // Load G(x) and floor(x^48 / G(x)). + vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128] + + // Use Barrett reduction to compute the final CRC value. + vmull.p64 q1, q0h, FOLD_CONST_H // high 32 bits * floor(x^48 / G(x)) + vshr.u64 q1l, q1l, #32 // /= x^32 + vmull.p64 q1, q1l, FOLD_CONST_L // *= G(x) + vshr.u64 q0l, q0l, #48 + veor.8 q0l, q0l, q1l // + low 16 nonzero bits + // Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of q0. + + vmov.u16 r0, q0l[0] + bx lr - // 32b fold - vext.8 q0, q7, qzr, #12 - vmov s31, s3 - vmull.p64 q0, d0, d21 - veor.8 q7, q0, q7 - - // barrett reduction -_barrett: - vldr d20, rk7 - vldr d21, rk8 - - vmull.p64 q0, d15, d20 - vext.8 q0, qzr, q0, #12 - vmull.p64 q0, d1, d21 - vext.8 q0, qzr, q0, #12 - veor.8 q7, q7, q0 - vmov r0, s29 +.Lless_than_256_bytes: + // Checksumming a buffer of length 16...255 bytes -_cleanup: - // scale the result back to 16 bits - lsr r0, r0, #16 - bx lr + __adrl fold_consts_ptr, .Lfold_across_16_bytes_consts -_less_than_128: - teq arg3, #0 - beq _cleanup + // Load the first 16 data bytes. + vld1.64 {q7}, [buf]! +CPU_LE( vrev64.8 q7, q7 ) + vswp q7l, q7h - vmov.i8 q0, #0 - vmov s3, arg1_low32 // get the initial crc value + // XOR the first 16 data *bits* with the initial CRC value. + vmov.i8 q0h, #0 + vmov.u16 q0h[3], init_crc + veor.8 q7h, q7h, q0h - vld1.64 {q7}, [arg2, :128]! -CPU_LE( vrev64.8 q7, q7 ) - vswp d14, d15 - veor.8 q7, q7, q0 + // Load the fold-across-16-bytes constants. + vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]! - cmp arg3, #16 - beq _128_done // exactly 16 left - blt _less_than_16_left - - // now if there is, load the constants - vldr d20, rk1 - vldr d21, rk2 // rk1 and rk2 in xmm10 - - // check if there is enough buffer to be able to fold 16B at a time - subs arg3, arg3, #32 - addlt arg3, arg3, #16 - blt _get_last_two_regs - b _16B_reduction_loop - -_less_than_16_left: - // shl r9, 4 - adr ip, tbl_shf_table + 16 - sub ip, ip, arg3 - vld1.8 {q0}, [ip] - vmov.i8 q9, #0x80 - veor.8 q0, q0, q9 - vtbl.8 d18, {d14-d15}, d0 - vtbl.8 d15, {d14-d15}, d1 - vmov d14, d18 - b _128_done + cmp len, #16 + beq .Lreduce_final_16_bytes // len == 16 + subs len, len, #32 + addlt len, len, #16 + blt .Lhandle_partial_segment // 17 <= len <= 31 + b .Lfold_16_bytes_loop // 32 <= len <= 255 ENDPROC(crc_t10dif_pmull) -// precomputed constants -// these constants are precomputed from the poly: -// 0x8bb70000 (0x8bb7 scaled to 32 bits) + .section ".rodata", "a" .align 4 -// Q = 0x18BB70000 -// rk1 = 2^(32*3) mod Q << 32 -// rk2 = 2^(32*5) mod Q << 32 -// rk3 = 2^(32*15) mod Q << 32 -// rk4 = 2^(32*17) mod Q << 32 -// rk5 = 2^(32*3) mod Q << 32 -// rk6 = 2^(32*2) mod Q << 32 -// rk7 = floor(2^64/Q) -// rk8 = Q - -rk3: .quad 0x9d9d000000000000 -rk4: .quad 0x7cf5000000000000 -rk5: .quad 0x2d56000000000000 -rk6: .quad 0x1368000000000000 -rk7: .quad 0x00000001f65a57f8 -rk8: .quad 0x000000018bb70000 -rk9: .quad 0xceae000000000000 -rk10: .quad 0xbfd6000000000000 -rk11: .quad 0x1e16000000000000 -rk12: .quad 0x713c000000000000 -rk13: .quad 0xf7f9000000000000 -rk14: .quad 0x80a6000000000000 -rk15: .quad 0x044c000000000000 -rk16: .quad 0xe658000000000000 -rk17: .quad 0xad18000000000000 -rk18: .quad 0xa497000000000000 -rk19: .quad 0x6ee3000000000000 -rk20: .quad 0xe7b5000000000000 -rk1: .quad 0x2d56000000000000 -rk2: .quad 0x06df000000000000 - -tbl_shf_table: -// use these values for shift constants for the tbl/tbx instruction -// different alignments result in values as shown: -// DDQ 0x008f8e8d8c8b8a898887868584838281 # shl 15 (16-1) / shr1 -// DDQ 0x01008f8e8d8c8b8a8988878685848382 # shl 14 (16-3) / shr2 -// DDQ 0x0201008f8e8d8c8b8a89888786858483 # shl 13 (16-4) / shr3 -// DDQ 0x030201008f8e8d8c8b8a898887868584 # shl 12 (16-4) / shr4 -// DDQ 0x04030201008f8e8d8c8b8a8988878685 # shl 11 (16-5) / shr5 -// DDQ 0x0504030201008f8e8d8c8b8a89888786 # shl 10 (16-6) / shr6 -// DDQ 0x060504030201008f8e8d8c8b8a898887 # shl 9 (16-7) / shr7 -// DDQ 0x07060504030201008f8e8d8c8b8a8988 # shl 8 (16-8) / shr8 -// DDQ 0x0807060504030201008f8e8d8c8b8a89 # shl 7 (16-9) / shr9 -// DDQ 0x090807060504030201008f8e8d8c8b8a # shl 6 (16-10) / shr10 -// DDQ 0x0a090807060504030201008f8e8d8c8b # shl 5 (16-11) / shr11 -// DDQ 0x0b0a090807060504030201008f8e8d8c # shl 4 (16-12) / shr12 -// DDQ 0x0c0b0a090807060504030201008f8e8d # shl 3 (16-13) / shr13 -// DDQ 0x0d0c0b0a090807060504030201008f8e # shl 2 (16-14) / shr14 -// DDQ 0x0e0d0c0b0a090807060504030201008f # shl 1 (16-15) / shr15 +// Fold constants precomputed from the polynomial 0x18bb7 +// G(x) = x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + x^0 +.Lfold_across_128_bytes_consts: + .quad 0x0000000000006123 // x^(8*128) mod G(x) + .quad 0x0000000000002295 // x^(8*128+64) mod G(x) +// .Lfold_across_64_bytes_consts: + .quad 0x0000000000001069 // x^(4*128) mod G(x) + .quad 0x000000000000dd31 // x^(4*128+64) mod G(x) +// .Lfold_across_32_bytes_consts: + .quad 0x000000000000857d // x^(2*128) mod G(x) + .quad 0x0000000000007acc // x^(2*128+64) mod G(x) +.Lfold_across_16_bytes_consts: + .quad 0x000000000000a010 // x^(1*128) mod G(x) + .quad 0x0000000000001faa // x^(1*128+64) mod G(x) +// .Lfinal_fold_consts: + .quad 0x1368000000000000 // x^48 * (x^48 mod G(x)) + .quad 0x2d56000000000000 // x^48 * (x^80 mod G(x)) +// .Lbarrett_reduction_consts: + .quad 0x0000000000018bb7 // G(x) + .quad 0x00000001f65a57f8 // floor(x^48 / G(x)) + +// For 1 <= len <= 15, the 16-byte vector beginning at &byteshift_table[16 - +// len] is the index vector to shift left by 'len' bytes, and is also {0x80, +// ..., 0x80} XOR the index vector to shift right by '16 - len' bytes. +.Lbyteshift_table: .byte 0x0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87 .byte 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7 diff --git a/arch/arm/crypto/crct10dif-ce-glue.c b/arch/arm/crypto/crct10dif-ce-glue.c index d428355cf38d..3d6b800b8396 100644 --- a/arch/arm/crypto/crct10dif-ce-glue.c +++ b/arch/arm/crypto/crct10dif-ce-glue.c @@ -21,7 +21,7 @@ #define CRC_T10DIF_PMULL_CHUNK_SIZE 16U -asmlinkage u16 crc_t10dif_pmull(u16 init_crc, const u8 buf[], u32 len); +asmlinkage u16 crc_t10dif_pmull(u16 init_crc, const u8 *buf, size_t len); static int crct10dif_init(struct shash_desc *desc) { @@ -35,26 +35,15 @@ static int crct10dif_update(struct shash_desc *desc, const u8 *data, unsigned int length) { u16 *crc = shash_desc_ctx(desc); - unsigned int l; - if (!may_use_simd()) { - *crc = crc_t10dif_generic(*crc, data, length); + if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) { + kernel_neon_begin(); + *crc = crc_t10dif_pmull(*crc, data, length); + kernel_neon_end(); } else { - if (unlikely((u32)data % CRC_T10DIF_PMULL_CHUNK_SIZE)) { - l = min_t(u32, length, CRC_T10DIF_PMULL_CHUNK_SIZE - - ((u32)data % CRC_T10DIF_PMULL_CHUNK_SIZE)); - - *crc = crc_t10dif_generic(*crc, data, l); - - length -= l; - data += l; - } - if (length > 0) { - kernel_neon_begin(); - *crc = crc_t10dif_pmull(*crc, data, length); - kernel_neon_end(); - } + *crc = crc_t10dif_generic(*crc, data, length); } + return 0; } diff --git a/arch/arm/crypto/sha256-armv4.pl b/arch/arm/crypto/sha256-armv4.pl index b9ec44060ed3..a03cf4dfb781 100644 --- a/arch/arm/crypto/sha256-armv4.pl +++ b/arch/arm/crypto/sha256-armv4.pl @@ -212,10 +212,11 @@ K256: .global sha256_block_data_order .type sha256_block_data_order,%function sha256_block_data_order: +.Lsha256_block_data_order: #if __ARM_ARCH__<7 sub r3,pc,#8 @ sha256_block_data_order #else - adr r3,sha256_block_data_order + adr r3,.Lsha256_block_data_order #endif #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) ldr r12,.LOPENSSL_armcap diff --git a/arch/arm/crypto/sha256-core.S_shipped b/arch/arm/crypto/sha256-core.S_shipped index 3b58300d611c..054aae0edfce 100644 --- a/arch/arm/crypto/sha256-core.S_shipped +++ b/arch/arm/crypto/sha256-core.S_shipped @@ -93,10 +93,11 @@ K256: .global sha256_block_data_order .type sha256_block_data_order,%function sha256_block_data_order: +.Lsha256_block_data_order: #if __ARM_ARCH__<7 sub r3,pc,#8 @ sha256_block_data_order #else - adr r3,sha256_block_data_order + adr r3,.Lsha256_block_data_order #endif #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) ldr r12,.LOPENSSL_armcap diff --git a/arch/arm/crypto/sha512-armv4.pl b/arch/arm/crypto/sha512-armv4.pl index fb5d15048c0b..788c17b56ecc 100644 --- a/arch/arm/crypto/sha512-armv4.pl +++ b/arch/arm/crypto/sha512-armv4.pl @@ -274,10 +274,11 @@ WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817) .global sha512_block_data_order .type sha512_block_data_order,%function sha512_block_data_order: +.Lsha512_block_data_order: #if __ARM_ARCH__<7 sub r3,pc,#8 @ sha512_block_data_order #else - adr r3,sha512_block_data_order + adr r3,.Lsha512_block_data_order #endif #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) ldr r12,.LOPENSSL_armcap diff --git a/arch/arm/crypto/sha512-core.S_shipped b/arch/arm/crypto/sha512-core.S_shipped index b1c334a49cda..710ea309769e 100644 --- a/arch/arm/crypto/sha512-core.S_shipped +++ b/arch/arm/crypto/sha512-core.S_shipped @@ -141,10 +141,11 @@ WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817) .global sha512_block_data_order .type sha512_block_data_order,%function sha512_block_data_order: +.Lsha512_block_data_order: #if __ARM_ARCH__<7 sub r3,pc,#8 @ sha512_block_data_order #else - adr r3,sha512_block_data_order + adr r3,.Lsha512_block_data_order #endif #if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) ldr r12,.LOPENSSL_armcap diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h index 0bd530702118..f6f485f4744e 100644 --- a/arch/arm/include/asm/arch_gicv3.h +++ b/arch/arm/include/asm/arch_gicv3.h @@ -34,6 +34,7 @@ #define ICC_SRE __ACCESS_CP15(c12, 0, c12, 5) #define ICC_IGRPEN1 __ACCESS_CP15(c12, 0, c12, 7) #define ICC_BPR1 __ACCESS_CP15(c12, 0, c12, 3) +#define ICC_RPR __ACCESS_CP15(c12, 0, c11, 3) #define __ICC_AP0Rx(x) __ACCESS_CP15(c12, 0, c8, 4 | x) #define ICC_AP0R0 __ICC_AP0Rx(0) @@ -245,6 +246,21 @@ static inline void gic_write_bpr1(u32 val) write_sysreg(val, ICC_BPR1); } +static inline u32 gic_read_pmr(void) +{ + return read_sysreg(ICC_PMR); +} + +static inline void gic_write_pmr(u32 val) +{ + write_sysreg(val, ICC_PMR); +} + +static inline u32 gic_read_rpr(void) +{ + return read_sysreg(ICC_RPR); +} + /* * Even in 32bit systems that use LPAE, there is no guarantee that the I/O * interface provides true 64bit atomic accesses, so using strd/ldrd doesn't @@ -347,5 +363,22 @@ static inline void gits_write_vpendbaser(u64 val, void * __iomem addr) #define gits_read_vpendbaser(c) __gic_readq_nonatomic(c) +static inline bool gic_prio_masking_enabled(void) +{ + return false; +} + +static inline void gic_pmr_mask_irqs(void) +{ + /* Should not get called. */ + WARN_ON_ONCE(true); +} + +static inline void gic_arch_enable_irqs(void) +{ + /* Should not get called. */ + WARN_ON_ONCE(true); +} + #endif /* !__ASSEMBLY__ */ #endif /* !__ASM_ARCH_GICV3_H */ diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 31d3b96f0f4b..03ba90ffc0f8 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -96,15 +96,6 @@ static inline unsigned long dma_max_pfn(struct device *dev) } #define dma_max_pfn(dev) dma_max_pfn(dev) -#define arch_setup_dma_ops arch_setup_dma_ops -extern void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, - const struct iommu_ops *iommu, bool coherent); - -#ifdef CONFIG_MMU -#define arch_teardown_dma_ops arch_teardown_dma_ops -extern void arch_teardown_dma_ops(struct device *dev); -#endif - /* do not use this function in a driver */ static inline bool is_device_dma_coherent(struct device *dev) { diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h index c883fcbe93b6..46d41140df27 100644 --- a/arch/arm/include/asm/irq.h +++ b/arch/arm/include/asm/irq.h @@ -25,7 +25,6 @@ #ifndef __ASSEMBLY__ struct irqaction; struct pt_regs; -extern void migrate_irqs(void); extern void asm_do_IRQ(unsigned int, struct pt_regs *); void handle_IRQ(unsigned int, struct pt_regs *); diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index ca56537b61bc..50e89869178a 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -48,6 +48,7 @@ #define KVM_REQ_SLEEP \ KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) +#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2) DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); @@ -147,6 +148,13 @@ struct kvm_cpu_context { typedef struct kvm_cpu_context kvm_cpu_context_t; +struct vcpu_reset_state { + unsigned long pc; + unsigned long r0; + bool be; + bool reset; +}; + struct kvm_vcpu_arch { struct kvm_cpu_context ctxt; @@ -186,6 +194,8 @@ struct kvm_vcpu_arch { /* Cache some mmu pages needed inside spinlock regions */ struct kvm_mmu_memory_cache mmu_page_cache; + struct vcpu_reset_state reset_state; + /* Detect first run of a vcpu */ bool has_run_once; }; diff --git a/arch/arm/include/asm/kvm_ras.h b/arch/arm/include/asm/kvm_ras.h new file mode 100644 index 000000000000..e9577292dfe4 --- /dev/null +++ b/arch/arm/include/asm/kvm_ras.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018 - Arm Ltd */ + +#ifndef __ARM_KVM_RAS_H__ +#define __ARM_KVM_RAS_H__ + +#include + +static inline int kvm_handle_guest_sea(phys_addr_t addr, unsigned int esr) +{ + return -1; +} + +#endif /* __ARM_KVM_RAS_H__ */ diff --git a/arch/arm/include/asm/stage2_pgtable.h b/arch/arm/include/asm/stage2_pgtable.h index c4b1d4fb1797..de2089501b8b 100644 --- a/arch/arm/include/asm/stage2_pgtable.h +++ b/arch/arm/include/asm/stage2_pgtable.h @@ -76,4 +76,9 @@ static inline bool kvm_stage2_has_pud(struct kvm *kvm) #define S2_PMD_MASK PMD_MASK #define S2_PMD_SIZE PMD_SIZE +static inline bool kvm_stage2_has_pmd(struct kvm *kvm) +{ + return true; +} + #endif /* __ARM_S2_PGTABLE_H_ */ diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h index 8e76db83c498..66f6a3ae68d2 100644 --- a/arch/arm/include/asm/system_misc.h +++ b/arch/arm/include/asm/system_misc.h @@ -38,11 +38,6 @@ static inline void harden_branch_predictor(void) extern unsigned int user_debug; -static inline int handle_guest_sea(phys_addr_t addr, unsigned int esr) -{ - return -1; -} - #endif /* !__ASSEMBLY__ */ #endif /* __ASM_ARM_SYSTEM_MISC_H */ diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 42aa4a22803c..ae5a0df5316e 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -59,7 +59,6 @@ extern int __put_user_bad(void); * Note that this is actually 0x1,0000,0000 */ #define KERNEL_DS 0x00000000 -#define get_ds() (KERNEL_DS) #ifdef CONFIG_MMU diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 88ef2ce1f69a..7a39e77984ef 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -26,10 +26,10 @@ #define __ARCH_WANT_SYS_SIGPROCMASK #define __ARCH_WANT_SYS_OLD_MMAP #define __ARCH_WANT_SYS_OLD_SELECT -#define __ARCH_WANT_SYS_UTIME +#define __ARCH_WANT_SYS_UTIME32 #if !defined(CONFIG_AEABI) || defined(CONFIG_OABI_COMPAT) -#define __ARCH_WANT_SYS_TIME +#define __ARCH_WANT_SYS_TIME32 #define __ARCH_WANT_SYS_IPC #define __ARCH_WANT_SYS_OLDUMOUNT #define __ARCH_WANT_SYS_ALARM @@ -45,7 +45,6 @@ * Unimplemented (or alternatively implemented) syscalls */ #define __IGNORE_fadvise64_64 -#define __IGNORE_migrate_pages #ifdef __ARM_EABI__ /* diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 9908dacf9229..844861368cd5 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -31,7 +31,6 @@ #include #include #include -#include #include #include #include @@ -109,64 +108,3 @@ int __init arch_probe_nr_irqs(void) return nr_irqs; } #endif - -#ifdef CONFIG_HOTPLUG_CPU -static bool migrate_one_irq(struct irq_desc *desc) -{ - struct irq_data *d = irq_desc_get_irq_data(desc); - const struct cpumask *affinity = irq_data_get_affinity_mask(d); - struct irq_chip *c; - bool ret = false; - - /* - * If this is a per-CPU interrupt, or the affinity does not - * include this CPU, then we have nothing to do. - */ - if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) - return false; - - if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { - affinity = cpu_online_mask; - ret = true; - } - - c = irq_data_get_irq_chip(d); - if (!c->irq_set_affinity) - pr_debug("IRQ%u: unable to set affinity\n", d->irq); - else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) - cpumask_copy(irq_data_get_affinity_mask(d), affinity); - - return ret; -} - -/* - * The current CPU has been marked offline. Migrate IRQs off this CPU. - * If the affinity settings do not allow other CPUs, force them onto any - * available CPU. - * - * Note: we must iterate over all IRQs, whether they have an attached - * action structure or not, as we need to get chained interrupts too. - */ -void migrate_irqs(void) -{ - unsigned int i; - struct irq_desc *desc; - unsigned long flags; - - local_irq_save(flags); - - for_each_irq_desc(i, desc) { - bool affinity_broken; - - raw_spin_lock(&desc->lock); - affinity_broken = migrate_one_irq(desc); - raw_spin_unlock(&desc->lock); - - if (affinity_broken) - pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n", - i, smp_processor_id()); - } - - local_irq_restore(flags); -} -#endif /* CONFIG_HOTPLUG_CPU */ diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 16601d1442d1..72cc0862a30e 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -150,7 +150,7 @@ void __show_regs(struct pt_regs *regs) if ((domain & domain_mask(DOMAIN_USER)) == domain_val(DOMAIN_USER, DOMAIN_NOACCESS)) segment = "none"; - else if (fs == get_ds()) + else if (fs == KERNEL_DS) segment = "kernel"; else segment = "user"; diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 375b13f7e780..5d78b6ac0429 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -867,6 +867,9 @@ static void __init request_standard_resources(const struct machine_desc *mdesc) boot_alias_start = phys_to_idmap(start); if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) { res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES); + if (!res) + panic("%s: Failed to allocate %zu bytes\n", + __func__, sizeof(*res)); res->name = "System RAM (boot alias)"; res->start = boot_alias_start; res->end = phys_to_idmap(end); @@ -875,6 +878,9 @@ static void __init request_standard_resources(const struct machine_desc *mdesc) } res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES); + if (!res) + panic("%s: Failed to allocate %zu bytes\n", __func__, + sizeof(*res)); res->name = "System RAM"; res->start = start; res->end = end; diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 3bf82232b1be..1d6f5ea522f4 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -254,7 +254,7 @@ int __cpu_disable(void) /* * OK - migrate IRQs away from this CPU */ - migrate_irqs(); + irq_migrate_all_off_this_cpu(); /* * Flush user cache and TLB mappings, and then remove this CPU diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c index 92ab36f38795..acd054a42ba2 100644 --- a/arch/arm/kernel/sys_oabi-compat.c +++ b/arch/arm/kernel/sys_oabi-compat.c @@ -317,10 +317,10 @@ struct oabi_sembuf { asmlinkage long sys_oabi_semtimedop(int semid, struct oabi_sembuf __user *tsops, unsigned nsops, - const struct timespec __user *timeout) + const struct old_timespec32 __user *timeout) { struct sembuf *sops; - struct timespec local_timeout; + struct old_timespec32 local_timeout; long err; int i; @@ -350,7 +350,7 @@ asmlinkage long sys_oabi_semtimedop(int semid, } else { mm_segment_t fs = get_fs(); set_fs(KERNEL_DS); - err = sys_semtimedop(semid, sops, nsops, timeout); + err = sys_semtimedop_time32(semid, sops, nsops, timeout); set_fs(fs); } kfree(sops); @@ -375,7 +375,7 @@ asmlinkage int sys_oabi_ipc(uint call, int first, int second, int third, return sys_oabi_semtimedop(first, (struct oabi_sembuf __user *)ptr, second, - (const struct timespec __user *)fifth); + (const struct old_timespec32 __user *)fifth); default: return sys_ipc(call, first, second, third, ptr, fifth); } diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 222c1635bc7a..e8bd288fd5be 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -1450,6 +1450,6 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu) reset_coproc_regs(vcpu, table, num); for (num = 1; num < NR_CP15_REGS; num++) - if (vcpu_cp15(vcpu, num) == 0x42424242) - panic("Didn't reset vcpu_cp15(vcpu, %zi)", num); + WARN(vcpu_cp15(vcpu, num) == 0x42424242, + "Didn't reset vcpu_cp15(vcpu, %zi)", num); } diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c index 5ed0c3ee33d6..e53327912adc 100644 --- a/arch/arm/kvm/reset.c +++ b/arch/arm/kvm/reset.c @@ -26,6 +26,7 @@ #include #include #include +#include #include @@ -69,6 +70,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) /* Reset CP15 registers */ kvm_reset_coprocs(vcpu); + /* + * Additional reset state handling that PSCI may have imposed on us. + * Must be done after all the sys_reg reset. + */ + if (READ_ONCE(vcpu->arch.reset_state.reset)) { + unsigned long target_pc = vcpu->arch.reset_state.pc; + + /* Gracefully handle Thumb2 entry point */ + if (target_pc & 1) { + target_pc &= ~1UL; + vcpu_set_thumb(vcpu); + } + + /* Propagate caller endianness */ + if (vcpu->arch.reset_state.be) + kvm_vcpu_set_be(vcpu); + + *vcpu_pc(vcpu) = target_pc; + vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0); + + vcpu->arch.reset_state.reset = false; + } + /* Reset arch_timer context */ return kvm_timer_vcpu_reset(vcpu); } diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig index a067adf9f1ee..4ef1e55f4a0b 100644 --- a/arch/arm/mach-bcm/Kconfig +++ b/arch/arm/mach-bcm/Kconfig @@ -167,6 +167,7 @@ config ARCH_BCM2835 select BCM2835_TIMER select PINCTRL select PINCTRL_BCM2835 + select MFD_CORE help This enables support for the Broadcom BCM2835 and BCM2836 SoCs. This SoC is used in the Raspberry Pi and Roku 2 devices. diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c index 318394ed5c7a..95a11d5b3587 100644 --- a/arch/arm/mach-cns3xxx/pcie.c +++ b/arch/arm/mach-cns3xxx/pcie.c @@ -83,7 +83,7 @@ static void __iomem *cns3xxx_pci_map_bus(struct pci_bus *bus, } else /* remote PCI bus */ base = cnspci->cfg1_regs + ((busno & 0xf) << 20); - return base + (where & 0xffc) + (devfn << 12); + return base + where + (devfn << 12); } static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn, @@ -93,7 +93,7 @@ static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn, u32 mask = (0x1ull << (size * 8)) - 1; int shift = (where % 4) * 8; - ret = pci_generic_config_read32(bus, devfn, where, size, val); + ret = pci_generic_config_read(bus, devfn, where, size, val); if (ret == PCIBIOS_SUCCESSFUL && !bus->number && !devfn && (where & 0xffc) == PCI_CLASS_REVISION) diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig index da8a039d65f9..5a59cebc7d0a 100644 --- a/arch/arm/mach-davinci/Kconfig +++ b/arch/arm/mach-davinci/Kconfig @@ -1,13 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 if ARCH_DAVINCI -config AINTC - bool - -config CP_INTC - bool - select IRQ_DOMAIN - config ARCH_DAVINCI_DMx bool @@ -17,17 +10,17 @@ comment "DaVinci Core Type" config ARCH_DAVINCI_DM644x bool "DaVinci 644x based system" - select AINTC + select DAVINCI_AINTC select ARCH_DAVINCI_DMx config ARCH_DAVINCI_DM355 bool "DaVinci 355 based system" - select AINTC + select DAVINCI_AINTC select ARCH_DAVINCI_DMx config ARCH_DAVINCI_DM646x bool "DaVinci 646x based system" - select AINTC + select DAVINCI_AINTC select ARCH_DAVINCI_DMx config ARCH_DAVINCI_DA830 @@ -36,20 +29,20 @@ config ARCH_DAVINCI_DA830 select ARCH_DAVINCI_DA8XX # needed on silicon revs 1.0, 1.1: select CPU_DCACHE_WRITETHROUGH if !CPU_DCACHE_DISABLE - select CP_INTC + select DAVINCI_CP_INTC config ARCH_DAVINCI_DA850 bool "DA850/OMAP-L138/AM18x based system" depends on !ARCH_DAVINCI_DMx || (AUTO_ZRELADDR && ARM_PATCH_PHYS_VIRT) select ARCH_DAVINCI_DA8XX - select CP_INTC + select DAVINCI_CP_INTC config ARCH_DAVINCI_DA8XX bool config ARCH_DAVINCI_DM365 bool "DaVinci 365 based system" - select AINTC + select DAVINCI_AINTC select ARCH_DAVINCI_DMx comment "DaVinci Board Type" diff --git a/arch/arm/mach-davinci/Makefile b/arch/arm/mach-davinci/Makefile index 93d271b4d84b..f76a8482784f 100644 --- a/arch/arm/mach-davinci/Makefile +++ b/arch/arm/mach-davinci/Makefile @@ -18,9 +18,6 @@ obj-$(CONFIG_ARCH_DAVINCI_DM365) += dm365.o devices.o obj-$(CONFIG_ARCH_DAVINCI_DA830) += da830.o devices-da8xx.o usb-da8xx.o obj-$(CONFIG_ARCH_DAVINCI_DA850) += da850.o devices-da8xx.o usb-da8xx.o -obj-$(CONFIG_AINTC) += irq.o -obj-$(CONFIG_CP_INTC) += cp_intc.o - # Board specific obj-$(CONFIG_MACH_DA8XX_DT) += da8xx-dt.o pdata-quirks.o obj-$(CONFIG_MACH_DAVINCI_EVM) += board-dm644x-evm.o diff --git a/arch/arm/mach-davinci/asp.h b/arch/arm/mach-davinci/asp.h index 495aa6907cbc..d0ecd1d0f084 100644 --- a/arch/arm/mach-davinci/asp.h +++ b/arch/arm/mach-davinci/asp.h @@ -49,9 +49,9 @@ #define DAVINCI_DA830_DMA_MCASP2_AXEVT 5 /* Interrupts */ -#define DAVINCI_ASP0_RX_INT IRQ_MBRINT -#define DAVINCI_ASP0_TX_INT IRQ_MBXINT -#define DAVINCI_ASP1_RX_INT IRQ_MBRINT -#define DAVINCI_ASP1_TX_INT IRQ_MBXINT +#define DAVINCI_ASP0_RX_INT DAVINCI_INTC_IRQ(IRQ_MBRINT) +#define DAVINCI_ASP0_TX_INT DAVINCI_INTC_IRQ(IRQ_MBXINT) +#define DAVINCI_ASP1_RX_INT DAVINCI_INTC_IRQ(IRQ_MBRINT) +#define DAVINCI_ASP1_TX_INT DAVINCI_INTC_IRQ(IRQ_MBXINT) #endif /* __ASM_ARCH_DAVINCI_ASP_H */ diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c index c4da635ee4ce..ff097ecfa451 100644 --- a/arch/arm/mach-davinci/board-da830-evm.c +++ b/arch/arm/mach-davinci/board-da830-evm.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include #include @@ -36,10 +36,11 @@ #include #include -#include "cp_intc.h" #include #include +#include "irqs.h" + #define DA830_EVM_PHY_ID "" /* * USB1 VBUS is controlled by GPIO1[15], over-current is reported on GPIO2[4]. @@ -52,62 +53,19 @@ static const short da830_evm_usb11_pins[] = { -1 }; -static da8xx_ocic_handler_t da830_evm_usb_ocic_handler; - -static int da830_evm_usb_set_power(unsigned port, int on) -{ - gpio_set_value(ON_BD_USB_DRV, on); - return 0; -} - -static int da830_evm_usb_get_power(unsigned port) -{ - return gpio_get_value(ON_BD_USB_DRV); -} - -static int da830_evm_usb_get_oci(unsigned port) -{ - return !gpio_get_value(ON_BD_USB_OVC); -} - -static irqreturn_t da830_evm_usb_ocic_irq(int, void *); - -static int da830_evm_usb_ocic_notify(da8xx_ocic_handler_t handler) -{ - int irq = gpio_to_irq(ON_BD_USB_OVC); - int error = 0; - - if (handler != NULL) { - da830_evm_usb_ocic_handler = handler; - - error = request_irq(irq, da830_evm_usb_ocic_irq, - IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, - "OHCI over-current indicator", NULL); - if (error) - pr_err("%s: could not request IRQ to watch over-current indicator changes\n", - __func__); - } else - free_irq(irq, NULL); - - return error; -} +static struct gpiod_lookup_table da830_evm_usb_gpio_lookup = { + .dev_id = "ohci-da8xx", + .table = { + GPIO_LOOKUP("davinci_gpio", ON_BD_USB_DRV, "vbus", 0), + GPIO_LOOKUP("davinci_gpio", ON_BD_USB_OVC, "oc", 0), + }, +}; static struct da8xx_ohci_root_hub da830_evm_usb11_pdata = { - .set_power = da830_evm_usb_set_power, - .get_power = da830_evm_usb_get_power, - .get_oci = da830_evm_usb_get_oci, - .ocic_notify = da830_evm_usb_ocic_notify, - /* TPS2065 switch @ 5V */ .potpgt = (3 + 1) / 2, /* 3 ms max */ }; -static irqreturn_t da830_evm_usb_ocic_irq(int irq, void *dev_id) -{ - da830_evm_usb_ocic_handler(&da830_evm_usb11_pdata, 1); - return IRQ_HANDLED; -} - static __init void da830_evm_usb_init(void) { int ret; @@ -142,21 +100,7 @@ static __init void da830_evm_usb_init(void) return; } - ret = gpio_request(ON_BD_USB_DRV, "ON_BD_USB_DRV"); - if (ret) { - pr_err("%s: failed to request GPIO for USB 1.1 port power control: %d\n", - __func__, ret); - return; - } - gpio_direction_output(ON_BD_USB_DRV, 0); - - ret = gpio_request(ON_BD_USB_OVC, "ON_BD_USB_OVC"); - if (ret) { - pr_err("%s: failed to request GPIO for USB 1.1 port over-current indicator: %d\n", - __func__, ret); - return; - } - gpio_direction_input(ON_BD_USB_OVC); + gpiod_add_lookup_table(&da830_evm_usb_gpio_lookup); ret = da8xx_register_usb11(&da830_evm_usb11_pdata); if (ret) @@ -457,12 +401,9 @@ static struct nvmem_cell_lookup da830_evm_nvmem_cell_lookup = { .con_id = "mac-address", }; -static struct at24_platform_data da830_evm_i2c_eeprom_info = { - .byte_len = SZ_256K / 8, - .page_size = 64, - .flags = AT24_FLAG_ADDR16, - .setup = davinci_get_mac_addr, - .context = (void *)0x7f00, +static const struct property_entry da830_evm_i2c_eeprom_properties[] = { + PROPERTY_ENTRY_U32("pagesize", 64), + { } }; static int __init da830_evm_ui_expander_setup(struct i2c_client *client, @@ -496,7 +437,7 @@ static struct pcf857x_platform_data __initdata da830_evm_ui_expander_info = { static struct i2c_board_info __initdata da830_evm_i2c_devices[] = { { I2C_BOARD_INFO("24c256", 0x50), - .platform_data = &da830_evm_i2c_eeprom_info, + .properties = da830_evm_i2c_eeprom_properties, }, { I2C_BOARD_INFO("tlv320aic3x", 0x18), @@ -693,7 +634,7 @@ static void __init da830_evm_map_io(void) MACHINE_START(DAVINCI_DA830_EVM, "DaVinci DA830/OMAP-L137/AM17x EVM") .atag_offset = 0x100, .map_io = da830_evm_map_io, - .init_irq = cp_intc_init, + .init_irq = da830_init_irq, .init_time = da830_init_time, .init_machine = da830_evm_init, .init_late = davinci_init_late, diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c index 44bca048dfd0..1fdc9283a8c5 100644 --- a/arch/arm/mach-davinci/board-da850-evm.c +++ b/arch/arm/mach-davinci/board-da850-evm.c @@ -43,9 +43,10 @@ #include #include -#include "cp_intc.h" #include #include + +#include "irqs.h" #include "sram.h" #include @@ -150,32 +151,6 @@ static struct spi_board_info da850evm_spi_info[] = { }, }; -#ifdef CONFIG_MTD -static void da850_evm_m25p80_notify_add(struct mtd_info *mtd) -{ - char *mac_addr = davinci_soc_info.emac_pdata->mac_addr; - size_t retlen; - - if (!strcmp(mtd->name, "MAC-Address")) { - mtd_read(mtd, 0, ETH_ALEN, &retlen, mac_addr); - if (retlen == ETH_ALEN) - pr_info("Read MAC addr from SPI Flash: %pM\n", - mac_addr); - } -} - -static struct mtd_notifier da850evm_spi_notifier = { - .add = da850_evm_m25p80_notify_add, -}; - -static void da850_evm_setup_mac_addr(void) -{ - register_mtd_user(&da850evm_spi_notifier); -} -#else -static void da850_evm_setup_mac_addr(void) { } -#endif - static struct mtd_partition da850_evm_norflash_partition[] = { { .name = "bootloaders + env", @@ -1064,6 +1039,17 @@ static const short da850_evm_rmii_pins[] = { -1 }; +static struct gpiod_hog da850_evm_emac_gpio_hogs[] = { + { + .chip_label = "davinci_gpio", + .chip_hwnum = DA850_MII_MDIO_CLKEN_PIN, + .line_name = "mdio_clk_en", + .lflags = 0, + /* dflags set in da850_evm_config_emac() */ + }, + { } +}; + static int __init da850_evm_config_emac(void) { void __iomem *cfg_chip3_base; @@ -1102,14 +1088,9 @@ static int __init da850_evm_config_emac(void) if (ret) pr_warn("%s:GPIO(2,6) mux setup failed\n", __func__); - ret = gpio_request(DA850_MII_MDIO_CLKEN_PIN, "mdio_clk_en"); - if (ret) { - pr_warn("Cannot open GPIO %d\n", DA850_MII_MDIO_CLKEN_PIN); - return ret; - } - - /* Enable/Disable MII MDIO clock */ - gpio_direction_output(DA850_MII_MDIO_CLKEN_PIN, rmii_en); + da850_evm_emac_gpio_hogs[0].dflags = rmii_en ? GPIOD_OUT_HIGH + : GPIOD_OUT_LOW; + gpiod_add_hogs(da850_evm_emac_gpio_hogs); soc_info->emac_pdata->phy_id = DA850_EVM_PHY_ID; @@ -1494,8 +1475,6 @@ static __init void da850_evm_init(void) if (ret) pr_warn("%s: SATA registration failed: %d\n", __func__, ret); - da850_evm_setup_mac_addr(); - ret = da8xx_register_rproc(); if (ret) pr_warn("%s: dsp/rproc registration failed: %d\n", @@ -1521,7 +1500,7 @@ static void __init da850_evm_map_io(void) MACHINE_START(DAVINCI_DA850_EVM, "DaVinci DA850/OMAP-L138/AM18x EVM") .atag_offset = 0x100, .map_io = da850_evm_map_io, - .init_irq = cp_intc_init, + .init_irq = da850_init_irq, .init_time = da850_init_time, .init_machine = da850_evm_init, .init_late = davinci_init_late, diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c index f7fa960c23e3..64d81fc86f14 100644 --- a/arch/arm/mach-davinci/board-dm355-evm.c +++ b/arch/arm/mach-davinci/board-dm355-evm.c @@ -438,7 +438,7 @@ static __init void dm355_evm_init(void) MACHINE_START(DAVINCI_DM355_EVM, "DaVinci DM355 EVM") .atag_offset = 0x100, .map_io = dm355_evm_map_io, - .init_irq = davinci_irq_init, + .init_irq = dm355_init_irq, .init_time = dm355_init_time, .init_machine = dm355_evm_init, .init_late = davinci_init_late, diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c index 0fdf1d03eb11..b9e9950dd300 100644 --- a/arch/arm/mach-davinci/board-dm355-leopard.c +++ b/arch/arm/mach-davinci/board-dm355-leopard.c @@ -273,7 +273,7 @@ static __init void dm355_leopard_init(void) MACHINE_START(DM355_LEOPARD, "DaVinci DM355 leopard") .atag_offset = 0x100, .map_io = dm355_leopard_map_io, - .init_irq = davinci_irq_init, + .init_irq = dm355_init_irq, .init_time = dm355_init_time, .init_machine = dm355_leopard_init, .init_late = davinci_init_late, diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c index e3b0b701e395..150a36f333df 100644 --- a/arch/arm/mach-davinci/board-dm365-evm.c +++ b/arch/arm/mach-davinci/board-dm365-evm.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include #include @@ -225,18 +225,15 @@ static struct nvmem_cell_lookup davinci_nvmem_cell_lookup = { .con_id = "mac-address", }; -static struct at24_platform_data eeprom_info = { - .byte_len = (256*1024) / 8, - .page_size = 64, - .flags = AT24_FLAG_ADDR16, - .setup = davinci_get_mac_addr, - .context = (void *)0x7f00, +static const struct property_entry eeprom_properties[] = { + PROPERTY_ENTRY_U32("pagesize", 64), + { } }; static struct i2c_board_info i2c_info[] = { { I2C_BOARD_INFO("24c256", 0x50), - .platform_data = &eeprom_info, + .properties = eeprom_properties, }, { I2C_BOARD_INFO("tlv320aic3x", 0x18), @@ -834,7 +831,7 @@ static __init void dm365_evm_init(void) MACHINE_START(DAVINCI_DM365_EVM, "DaVinci DM365 EVM") .atag_offset = 0x100, .map_io = dm365_evm_map_io, - .init_irq = davinci_irq_init, + .init_irq = dm365_init_irq, .init_time = dm365_init_time, .init_machine = dm365_evm_init, .init_late = davinci_init_late, diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c index b80c4ee76217..de15f782816e 100644 --- a/arch/arm/mach-davinci/board-dm644x-evm.c +++ b/arch/arm/mach-davinci/board-dm644x-evm.c @@ -16,8 +16,8 @@ #include #include #include -#include #include +#include #include #include #include @@ -36,9 +36,10 @@ #include #include -#include -#include #include +#include + +#include #include #include #include @@ -46,6 +47,7 @@ #include #include "davinci.h" +#include "irqs.h" #define DM644X_EVM_PHY_ID "davinci_mdio-0:01" #define LXT971_PHY_ID (0x001378e2) @@ -532,12 +534,9 @@ static struct nvmem_cell_lookup dm644evm_nvmem_cell_lookup = { .con_id = "mac-address", }; -static struct at24_platform_data eeprom_info = { - .byte_len = (256*1024) / 8, - .page_size = 64, - .flags = AT24_FLAG_ADDR16, - .setup = davinci_get_mac_addr, - .context = (void *)0x7f00, +static const struct property_entry eeprom_properties[] = { + PROPERTY_ENTRY_U32("pagesize", 64), + { } }; /* @@ -647,7 +646,7 @@ static struct i2c_board_info __initdata i2c_info[] = { }, { I2C_BOARD_INFO("24c256", 0x50), - .platform_data = &eeprom_info, + .properties = eeprom_properties, }, { I2C_BOARD_INFO("tlv320aic33", 0x1b), @@ -889,7 +888,7 @@ MACHINE_START(DAVINCI_EVM, "DaVinci DM644x EVM") /* Maintainer: MontaVista Software */ .atag_offset = 0x100, .map_io = davinci_evm_map_io, - .init_irq = davinci_irq_init, + .init_irq = dm644x_init_irq, .init_time = dm644x_init_time, .init_machine = davinci_evm_init, .init_late = davinci_init_late, diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c index 8d5be6dd2019..4600b617f9b4 100644 --- a/arch/arm/mach-davinci/board-dm646x-evm.c +++ b/arch/arm/mach-davinci/board-dm646x-evm.c @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include #include @@ -44,10 +44,10 @@ #include #include -#include #include #include "davinci.h" +#include "irqs.h" #define NAND_BLOCK_SIZE SZ_128K @@ -364,12 +364,9 @@ static struct nvmem_cell_lookup dm646x_evm_nvmem_cell_lookup = { .con_id = "mac-address", }; -static struct at24_platform_data eeprom_info = { - .byte_len = (256*1024) / 8, - .page_size = 64, - .flags = AT24_FLAG_ADDR16, - .setup = davinci_get_mac_addr, - .context = (void *)0x7f00, +static const struct property_entry eeprom_properties[] = { + PROPERTY_ENTRY_U32("pagesize", 64), + { } }; #endif @@ -440,7 +437,7 @@ static void evm_init_cpld(void) static struct i2c_board_info __initdata i2c_info[] = { { I2C_BOARD_INFO("24c256", 0x50), - .platform_data = &eeprom_info, + .properties = eeprom_properties, }, { I2C_BOARD_INFO("pcf8574a", 0x38), @@ -863,7 +860,7 @@ static __init void evm_init(void) MACHINE_START(DAVINCI_DM6467_EVM, "DaVinci DM646x EVM") .atag_offset = 0x100, .map_io = davinci_map_io, - .init_irq = davinci_irq_init, + .init_irq = dm646x_init_irq, .init_time = dm646x_evm_init_time, .init_machine = evm_init, .init_late = davinci_init_late, @@ -873,7 +870,7 @@ MACHINE_END MACHINE_START(DAVINCI_DM6467TEVM, "DaVinci DM6467T EVM") .atag_offset = 0x100, .map_io = davinci_map_io, - .init_irq = davinci_irq_init, + .init_irq = dm646x_init_irq, .init_time = dm6467t_evm_init_time, .init_machine = evm_init, .init_late = davinci_init_late, diff --git a/arch/arm/mach-davinci/board-mityomapl138.c b/arch/arm/mach-davinci/board-mityomapl138.c index 8df16e81b69e..dfce421c0579 100644 --- a/arch/arm/mach-davinci/board-mityomapl138.c +++ b/arch/arm/mach-davinci/board-mityomapl138.c @@ -14,11 +14,13 @@ #include #include #include +#include #include +#include +#include #include #include #include -#include #include #include #include @@ -27,7 +29,6 @@ #include #include #include -#include "cp_intc.h" #include #include #include @@ -117,11 +118,15 @@ static void mityomapl138_cpufreq_init(const char *partnum) static void mityomapl138_cpufreq_init(const char *partnum) { } #endif -static void read_factory_config(struct nvmem_device *nvmem, void *context) +static int read_factory_config(struct notifier_block *nb, + unsigned long event, void *data) { int ret; const char *partnum = NULL; - struct davinci_soc_info *soc_info = &davinci_soc_info; + struct nvmem_device *nvmem = data; + + if (strcmp(nvmem_dev_name(nvmem), "1-00500") != 0) + return NOTIFY_DONE; if (!IS_BUILTIN(CONFIG_NVMEM)) { pr_warn("Factory Config not available without CONFIG_NVMEM\n"); @@ -147,21 +152,20 @@ static void read_factory_config(struct nvmem_device *nvmem, void *context) goto bad_config; } - pr_info("Found MAC = %pM\n", factory_config.mac); - if (is_valid_ether_addr(factory_config.mac)) - memcpy(soc_info->emac_pdata->mac_addr, - factory_config.mac, ETH_ALEN); - else - pr_warn("Invalid MAC found in factory config block\n"); - partnum = factory_config.partnum; pr_info("Part Number = %s\n", partnum); bad_config: /* default maximum speed is valid for all platforms */ mityomapl138_cpufreq_init(partnum); + + return NOTIFY_STOP; } +static struct notifier_block mityomapl138_nvmem_notifier = { + .notifier_call = read_factory_config, +}; + /* * We don't define a cell for factory config as it will be accessed from the * board file using the nvmem notifier chain. @@ -187,12 +191,10 @@ static struct nvmem_cell_lookup mityomapl138_nvmem_cell_lookup = { .con_id = "mac-address", }; -static struct at24_platform_data mityomapl138_fd_chip = { - .byte_len = 256, - .page_size = 8, - .flags = AT24_FLAG_READONLY | AT24_FLAG_IRUGO, - .setup = read_factory_config, - .context = NULL, +static const struct property_entry mityomapl138_fd_chip_properties[] = { + PROPERTY_ENTRY_U32("pagesize", 8), + PROPERTY_ENTRY_BOOL("read-only"), + { } }; static struct davinci_i2c_platform_data mityomap_i2c_0_pdata = { @@ -321,7 +323,7 @@ static struct i2c_board_info __initdata mityomap_tps65023_info[] = { }, { I2C_BOARD_INFO("24c02", 0x50), - .platform_data = &mityomapl138_fd_chip, + .properties = mityomapl138_fd_chip_properties, }, }; @@ -569,6 +571,7 @@ static void __init mityomapl138_init(void) davinci_serial_init(da8xx_serial_device); + nvmem_register_notifier(&mityomapl138_nvmem_notifier); nvmem_add_cell_table(&mityomapl138_nvmem_cell_table); nvmem_add_cell_lookups(&mityomapl138_nvmem_cell_lookup, 1); @@ -624,7 +627,7 @@ static void __init mityomapl138_map_io(void) MACHINE_START(MITYOMAPL138, "MityDSP-L138/MityARM-1808") .atag_offset = 0x100, .map_io = mityomapl138_map_io, - .init_irq = cp_intc_init, + .init_irq = da850_init_irq, .init_time = da850_init_time, .init_machine = mityomapl138_init, .init_late = davinci_init_late, diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c index efdaa27241c5..ce99f782811a 100644 --- a/arch/arm/mach-davinci/board-neuros-osd2.c +++ b/arch/arm/mach-davinci/board-neuros-osd2.c @@ -231,7 +231,7 @@ MACHINE_START(NEUROS_OSD2, "Neuros OSD2") /* Maintainer: Neuros Technologies */ .atag_offset = 0x100, .map_io = davinci_ntosd2_map_io, - .init_irq = davinci_irq_init, + .init_irq = dm644x_init_irq, .init_time = dm644x_init_time, .init_machine = davinci_ntosd2_init, .init_late = davinci_init_late, diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c index 94c4f126ef86..0896af2bed24 100644 --- a/arch/arm/mach-davinci/board-omapl138-hawk.c +++ b/arch/arm/mach-davinci/board-omapl138-hawk.c @@ -27,7 +27,6 @@ #include #include -#include "cp_intc.h" #include #include @@ -294,66 +293,24 @@ static int omapl138_hawk_register_aemif(void) return platform_device_register(&omapl138_hawk_aemif_device); } -static irqreturn_t omapl138_hawk_usb_ocic_irq(int irq, void *dev_id); -static da8xx_ocic_handler_t hawk_usb_ocic_handler; - static const short da850_hawk_usb11_pins[] = { DA850_GPIO2_4, DA850_GPIO6_13, -1 }; -static int hawk_usb_set_power(unsigned port, int on) -{ - gpio_set_value(DA850_USB1_VBUS_PIN, on); - return 0; -} - -static int hawk_usb_get_power(unsigned port) -{ - return gpio_get_value(DA850_USB1_VBUS_PIN); -} - -static int hawk_usb_get_oci(unsigned port) -{ - return !gpio_get_value(DA850_USB1_OC_PIN); -} - -static int hawk_usb_ocic_notify(da8xx_ocic_handler_t handler) -{ - int irq = gpio_to_irq(DA850_USB1_OC_PIN); - int error = 0; - - if (handler != NULL) { - hawk_usb_ocic_handler = handler; - - error = request_irq(irq, omapl138_hawk_usb_ocic_irq, - IRQF_TRIGGER_RISING | - IRQF_TRIGGER_FALLING, - "OHCI over-current indicator", NULL); - if (error) - pr_err("%s: could not request IRQ to watch " - "over-current indicator changes\n", __func__); - } else { - free_irq(irq, NULL); - } - return error; -} +static struct gpiod_lookup_table hawk_usb_gpio_lookup = { + .dev_id = "ohci-da8xx", + .table = { + GPIO_LOOKUP("davinci_gpio", DA850_USB1_VBUS_PIN, "vbus", 0), + GPIO_LOOKUP("davinci_gpio", DA850_USB1_OC_PIN, "oc", 0), + }, +}; static struct da8xx_ohci_root_hub omapl138_hawk_usb11_pdata = { - .set_power = hawk_usb_set_power, - .get_power = hawk_usb_get_power, - .get_oci = hawk_usb_get_oci, - .ocic_notify = hawk_usb_ocic_notify, /* TPS2087 switch @ 5V */ .potpgt = (3 + 1) / 2, /* 3 ms max */ }; -static irqreturn_t omapl138_hawk_usb_ocic_irq(int irq, void *dev_id) -{ - hawk_usb_ocic_handler(&omapl138_hawk_usb11_pdata, 1); - return IRQ_HANDLED; -} - static __init void omapl138_hawk_usb_init(void) { int ret; @@ -374,34 +331,13 @@ static __init void omapl138_hawk_usb_init(void) pr_warn("%s: USB PHY registration failed: %d\n", __func__, ret); - ret = gpio_request_one(DA850_USB1_VBUS_PIN, - GPIOF_DIR_OUT, "USB1 VBUS"); - if (ret < 0) { - pr_err("%s: failed to request GPIO for USB 1.1 port " - "power control: %d\n", __func__, ret); - return; - } - - ret = gpio_request_one(DA850_USB1_OC_PIN, - GPIOF_DIR_IN, "USB1 OC"); - if (ret < 0) { - pr_err("%s: failed to request GPIO for USB 1.1 port " - "over-current indicator: %d\n", __func__, ret); - goto usb11_setup_oc_fail; - } + gpiod_add_lookup_table(&hawk_usb_gpio_lookup); ret = da8xx_register_usb11(&omapl138_hawk_usb11_pdata); - if (ret) { + if (ret) pr_warn("%s: USB 1.1 registration failed: %d\n", __func__, ret); - goto usb11_setup_fail; - } return; - -usb11_setup_fail: - gpio_free(DA850_USB1_OC_PIN); -usb11_setup_oc_fail: - gpio_free(DA850_USB1_VBUS_PIN); } static __init void omapl138_hawk_init(void) @@ -462,7 +398,7 @@ static void __init omapl138_hawk_map_io(void) MACHINE_START(OMAPL138_HAWKBOARD, "AM18x/OMAP-L138 Hawkboard") .atag_offset = 0x100, .map_io = omapl138_hawk_map_io, - .init_irq = cp_intc_init, + .init_irq = da850_init_irq, .init_time = da850_init_time, .init_machine = omapl138_hawk_init, .init_late = davinci_init_late, diff --git a/arch/arm/mach-davinci/board-sffsdr.c b/arch/arm/mach-davinci/board-sffsdr.c index 792bb84d5011..bcdefde2f401 100644 --- a/arch/arm/mach-davinci/board-sffsdr.c +++ b/arch/arm/mach-davinci/board-sffsdr.c @@ -26,7 +26,7 @@ #include #include #include -#include +#include #include #include #include @@ -92,16 +92,15 @@ static struct platform_device davinci_sffsdr_nandflash_device = { .resource = davinci_sffsdr_nandflash_resource, }; -static struct at24_platform_data eeprom_info = { - .byte_len = (64*1024) / 8, - .page_size = 32, - .flags = AT24_FLAG_ADDR16, +static const struct property_entry eeprom_properties[] = { + PROPERTY_ENTRY_U32("pagesize", 32), + { } }; static struct i2c_board_info __initdata i2c_info[] = { { - I2C_BOARD_INFO("24lc64", 0x50), - .platform_data = &eeprom_info, + I2C_BOARD_INFO("24c64", 0x50), + .properties = eeprom_properties, }, /* Other I2C devices: * MSP430, addr 0x23 (not used) @@ -153,7 +152,7 @@ static __init void davinci_sffsdr_init(void) MACHINE_START(SFFSDR, "Lyrtech SFFSDR") .atag_offset = 0x100, .map_io = davinci_sffsdr_map_io, - .init_irq = davinci_irq_init, + .init_irq = dm644x_init_irq, .init_time = dm644x_init_time, .init_machine = davinci_sffsdr_init, .init_late = davinci_init_late, diff --git a/arch/arm/mach-davinci/common.c b/arch/arm/mach-davinci/common.c index e1d0f0d841ff..ae61d19f9b3a 100644 --- a/arch/arm/mach-davinci/common.c +++ b/arch/arm/mach-davinci/common.c @@ -23,24 +23,6 @@ struct davinci_soc_info davinci_soc_info; EXPORT_SYMBOL(davinci_soc_info); -void __iomem *davinci_intc_base; -int davinci_intc_type; - -void davinci_get_mac_addr(struct nvmem_device *nvmem, void *context) -{ - char *mac_addr = davinci_soc_info.emac_pdata->mac_addr; - off_t offset = (off_t)context; - - if (!IS_BUILTIN(CONFIG_NVMEM)) { - pr_warn("Cannot read MAC addr from EEPROM without CONFIG_NVMEM\n"); - return; - } - - /* Read MAC addr from EEPROM */ - if (nvmem_device_read(nvmem, offset, ETH_ALEN, mac_addr) == ETH_ALEN) - pr_info("Read MAC addr from EEPROM: %pM\n", mac_addr); -} - static int __init davinci_init_id(struct davinci_soc_info *soc_info) { int i; diff --git a/arch/arm/mach-davinci/cp_intc.c b/arch/arm/mach-davinci/cp_intc.c deleted file mode 100644 index 94085d21018e..000000000000 --- a/arch/arm/mach-davinci/cp_intc.c +++ /dev/null @@ -1,215 +0,0 @@ -/* - * TI Common Platform Interrupt Controller (cp_intc) driver - * - * Author: Steve Chen - * Copyright (C) 2008-2009, MontaVista Software, Inc. - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include "cp_intc.h" - -static inline unsigned int cp_intc_read(unsigned offset) -{ - return __raw_readl(davinci_intc_base + offset); -} - -static inline void cp_intc_write(unsigned long value, unsigned offset) -{ - __raw_writel(value, davinci_intc_base + offset); -} - -static void cp_intc_ack_irq(struct irq_data *d) -{ - cp_intc_write(d->hwirq, CP_INTC_SYS_STAT_IDX_CLR); -} - -/* Disable interrupt */ -static void cp_intc_mask_irq(struct irq_data *d) -{ - /* XXX don't know why we need to disable nIRQ here... */ - cp_intc_write(1, CP_INTC_HOST_ENABLE_IDX_CLR); - cp_intc_write(d->hwirq, CP_INTC_SYS_ENABLE_IDX_CLR); - cp_intc_write(1, CP_INTC_HOST_ENABLE_IDX_SET); -} - -/* Enable interrupt */ -static void cp_intc_unmask_irq(struct irq_data *d) -{ - cp_intc_write(d->hwirq, CP_INTC_SYS_ENABLE_IDX_SET); -} - -static int cp_intc_set_irq_type(struct irq_data *d, unsigned int flow_type) -{ - unsigned reg = BIT_WORD(d->hwirq); - unsigned mask = BIT_MASK(d->hwirq); - unsigned polarity = cp_intc_read(CP_INTC_SYS_POLARITY(reg)); - unsigned type = cp_intc_read(CP_INTC_SYS_TYPE(reg)); - - switch (flow_type) { - case IRQ_TYPE_EDGE_RISING: - polarity |= mask; - type |= mask; - break; - case IRQ_TYPE_EDGE_FALLING: - polarity &= ~mask; - type |= mask; - break; - case IRQ_TYPE_LEVEL_HIGH: - polarity |= mask; - type &= ~mask; - break; - case IRQ_TYPE_LEVEL_LOW: - polarity &= ~mask; - type &= ~mask; - break; - default: - return -EINVAL; - } - - cp_intc_write(polarity, CP_INTC_SYS_POLARITY(reg)); - cp_intc_write(type, CP_INTC_SYS_TYPE(reg)); - - return 0; -} - -static struct irq_chip cp_intc_irq_chip = { - .name = "cp_intc", - .irq_ack = cp_intc_ack_irq, - .irq_mask = cp_intc_mask_irq, - .irq_unmask = cp_intc_unmask_irq, - .irq_set_type = cp_intc_set_irq_type, - .flags = IRQCHIP_SKIP_SET_WAKE, -}; - -static struct irq_domain *cp_intc_domain; - -static int cp_intc_host_map(struct irq_domain *h, unsigned int virq, - irq_hw_number_t hw) -{ - pr_debug("cp_intc_host_map(%d, 0x%lx)\n", virq, hw); - - irq_set_chip(virq, &cp_intc_irq_chip); - irq_set_probe(virq); - irq_set_handler(virq, handle_edge_irq); - return 0; -} - -static const struct irq_domain_ops cp_intc_host_ops = { - .map = cp_intc_host_map, - .xlate = irq_domain_xlate_onetwocell, -}; - -int __init cp_intc_of_init(struct device_node *node, struct device_node *parent) -{ - u32 num_irq = davinci_soc_info.intc_irq_num; - u8 *irq_prio = davinci_soc_info.intc_irq_prios; - u32 *host_map = davinci_soc_info.intc_host_map; - unsigned num_reg = BITS_TO_LONGS(num_irq); - int i, irq_base; - - davinci_intc_type = DAVINCI_INTC_TYPE_CP_INTC; - if (node) { - davinci_intc_base = of_iomap(node, 0); - if (of_property_read_u32(node, "ti,intc-size", &num_irq)) - pr_warn("unable to get intc-size, default to %d\n", - num_irq); - } else { - davinci_intc_base = ioremap(davinci_soc_info.intc_base, SZ_8K); - } - if (WARN_ON(!davinci_intc_base)) - return -EINVAL; - - cp_intc_write(0, CP_INTC_GLOBAL_ENABLE); - - /* Disable all host interrupts */ - cp_intc_write(0, CP_INTC_HOST_ENABLE(0)); - - /* Disable system interrupts */ - for (i = 0; i < num_reg; i++) - cp_intc_write(~0, CP_INTC_SYS_ENABLE_CLR(i)); - - /* Set to normal mode, no nesting, no priority hold */ - cp_intc_write(0, CP_INTC_CTRL); - cp_intc_write(0, CP_INTC_HOST_CTRL); - - /* Clear system interrupt status */ - for (i = 0; i < num_reg; i++) - cp_intc_write(~0, CP_INTC_SYS_STAT_CLR(i)); - - /* Enable nIRQ (what about nFIQ?) */ - cp_intc_write(1, CP_INTC_HOST_ENABLE_IDX_SET); - - /* - * Priority is determined by host channel: lower channel number has - * higher priority i.e. channel 0 has highest priority and channel 31 - * had the lowest priority. - */ - num_reg = (num_irq + 3) >> 2; /* 4 channels per register */ - if (irq_prio) { - unsigned j, k; - u32 val; - - for (k = i = 0; i < num_reg; i++) { - for (val = j = 0; j < 4; j++, k++) { - val >>= 8; - if (k < num_irq) - val |= irq_prio[k] << 24; - } - - cp_intc_write(val, CP_INTC_CHAN_MAP(i)); - } - } else { - /* - * Default everything to channel 15 if priority not specified. - * Note that channel 0-1 are mapped to nFIQ and channels 2-31 - * are mapped to nIRQ. - */ - for (i = 0; i < num_reg; i++) - cp_intc_write(0x0f0f0f0f, CP_INTC_CHAN_MAP(i)); - } - - if (host_map) - for (i = 0; host_map[i] != -1; i++) - cp_intc_write(host_map[i], CP_INTC_HOST_MAP(i)); - - irq_base = irq_alloc_descs(-1, 0, num_irq, 0); - if (irq_base < 0) { - pr_warn("Couldn't allocate IRQ numbers\n"); - irq_base = 0; - } - - /* create a legacy host */ - cp_intc_domain = irq_domain_add_legacy(node, num_irq, - irq_base, 0, &cp_intc_host_ops, NULL); - - if (!cp_intc_domain) { - pr_err("cp_intc: failed to allocate irq host!\n"); - return -EINVAL; - } - - /* Enable global interrupt */ - cp_intc_write(1, CP_INTC_GLOBAL_ENABLE); - - return 0; -} - -void __init cp_intc_init(void) -{ - cp_intc_of_init(NULL, NULL); -} - -IRQCHIP_DECLARE(cp_intc, "ti,cp-intc", cp_intc_of_init); diff --git a/arch/arm/mach-davinci/cp_intc.h b/arch/arm/mach-davinci/cp_intc.h deleted file mode 100644 index 827bbe9baed4..000000000000 --- a/arch/arm/mach-davinci/cp_intc.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - * TI Common Platform Interrupt Controller (cp_intc) definitions - * - * Author: Steve Chen - * Copyright (C) 2008-2009, MontaVista Software, Inc. - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - */ -#ifndef __ASM_HARDWARE_CP_INTC_H -#define __ASM_HARDWARE_CP_INTC_H - -#define CP_INTC_REV 0x00 -#define CP_INTC_CTRL 0x04 -#define CP_INTC_HOST_CTRL 0x0C -#define CP_INTC_GLOBAL_ENABLE 0x10 -#define CP_INTC_GLOBAL_NESTING_LEVEL 0x1C -#define CP_INTC_SYS_STAT_IDX_SET 0x20 -#define CP_INTC_SYS_STAT_IDX_CLR 0x24 -#define CP_INTC_SYS_ENABLE_IDX_SET 0x28 -#define CP_INTC_SYS_ENABLE_IDX_CLR 0x2C -#define CP_INTC_GLOBAL_WAKEUP_ENABLE 0x30 -#define CP_INTC_HOST_ENABLE_IDX_SET 0x34 -#define CP_INTC_HOST_ENABLE_IDX_CLR 0x38 -#define CP_INTC_PACING_PRESCALE 0x40 -#define CP_INTC_VECTOR_BASE 0x50 -#define CP_INTC_VECTOR_SIZE 0x54 -#define CP_INTC_VECTOR_NULL 0x58 -#define CP_INTC_PRIO_IDX 0x80 -#define CP_INTC_PRIO_VECTOR 0x84 -#define CP_INTC_SECURE_ENABLE 0x90 -#define CP_INTC_SECURE_PRIO_IDX 0x94 -#define CP_INTC_PACING_PARAM(n) (0x0100 + (n << 4)) -#define CP_INTC_PACING_DEC(n) (0x0104 + (n << 4)) -#define CP_INTC_PACING_MAP(n) (0x0108 + (n << 4)) -#define CP_INTC_SYS_RAW_STAT(n) (0x0200 + (n << 2)) -#define CP_INTC_SYS_STAT_CLR(n) (0x0280 + (n << 2)) -#define CP_INTC_SYS_ENABLE_SET(n) (0x0300 + (n << 2)) -#define CP_INTC_SYS_ENABLE_CLR(n) (0x0380 + (n << 2)) -#define CP_INTC_CHAN_MAP(n) (0x0400 + (n << 2)) -#define CP_INTC_HOST_MAP(n) (0x0800 + (n << 2)) -#define CP_INTC_HOST_PRIO_IDX(n) (0x0900 + (n << 2)) -#define CP_INTC_SYS_POLARITY(n) (0x0D00 + (n << 2)) -#define CP_INTC_SYS_TYPE(n) (0x0D80 + (n << 2)) -#define CP_INTC_WAKEUP_ENABLE(n) (0x0E00 + (n << 2)) -#define CP_INTC_DEBUG_SELECT(n) (0x0F00 + (n << 2)) -#define CP_INTC_SYS_SECURE_ENABLE(n) (0x1000 + (n << 2)) -#define CP_INTC_HOST_NESTING_LEVEL(n) (0x1100 + (n << 2)) -#define CP_INTC_HOST_ENABLE(n) (0x1500 + (n << 2)) -#define CP_INTC_HOST_PRIO_VECTOR(n) (0x1600 + (n << 2)) -#define CP_INTC_VECTOR_ADDR(n) (0x2000 + (n << 2)) - -void cp_intc_init(void); -int cp_intc_of_init(struct device_node *, struct device_node *); - -#endif /* __ASM_HARDWARE_CP_INTC_H */ diff --git a/arch/arm/mach-davinci/da830.c b/arch/arm/mach-davinci/da830.c index 2cc9fe4c3a91..63511f638ce4 100644 --- a/arch/arm/mach-davinci/da830.c +++ b/arch/arm/mach-davinci/da830.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -19,9 +20,9 @@ #include #include #include -#include #include +#include "irqs.h" #include "mux.h" /* Offsets of the 8 compare registers on the da830 */ @@ -623,101 +624,6 @@ const short da830_eqep1_pins[] __initconst = { -1 }; -/* FIQ are pri 0-1; otherwise 2-7, with 7 lowest priority */ -static u8 da830_default_priorities[DA830_N_CP_INTC_IRQ] = { - [IRQ_DA8XX_COMMTX] = 7, - [IRQ_DA8XX_COMMRX] = 7, - [IRQ_DA8XX_NINT] = 7, - [IRQ_DA8XX_EVTOUT0] = 7, - [IRQ_DA8XX_EVTOUT1] = 7, - [IRQ_DA8XX_EVTOUT2] = 7, - [IRQ_DA8XX_EVTOUT3] = 7, - [IRQ_DA8XX_EVTOUT4] = 7, - [IRQ_DA8XX_EVTOUT5] = 7, - [IRQ_DA8XX_EVTOUT6] = 7, - [IRQ_DA8XX_EVTOUT7] = 7, - [IRQ_DA8XX_CCINT0] = 7, - [IRQ_DA8XX_CCERRINT] = 7, - [IRQ_DA8XX_TCERRINT0] = 7, - [IRQ_DA8XX_AEMIFINT] = 7, - [IRQ_DA8XX_I2CINT0] = 7, - [IRQ_DA8XX_MMCSDINT0] = 7, - [IRQ_DA8XX_MMCSDINT1] = 7, - [IRQ_DA8XX_ALLINT0] = 7, - [IRQ_DA8XX_RTC] = 7, - [IRQ_DA8XX_SPINT0] = 7, - [IRQ_DA8XX_TINT12_0] = 7, - [IRQ_DA8XX_TINT34_0] = 7, - [IRQ_DA8XX_TINT12_1] = 7, - [IRQ_DA8XX_TINT34_1] = 7, - [IRQ_DA8XX_UARTINT0] = 7, - [IRQ_DA8XX_KEYMGRINT] = 7, - [IRQ_DA830_MPUERR] = 7, - [IRQ_DA8XX_CHIPINT0] = 7, - [IRQ_DA8XX_CHIPINT1] = 7, - [IRQ_DA8XX_CHIPINT2] = 7, - [IRQ_DA8XX_CHIPINT3] = 7, - [IRQ_DA8XX_TCERRINT1] = 7, - [IRQ_DA8XX_C0_RX_THRESH_PULSE] = 7, - [IRQ_DA8XX_C0_RX_PULSE] = 7, - [IRQ_DA8XX_C0_TX_PULSE] = 7, - [IRQ_DA8XX_C0_MISC_PULSE] = 7, - [IRQ_DA8XX_C1_RX_THRESH_PULSE] = 7, - [IRQ_DA8XX_C1_RX_PULSE] = 7, - [IRQ_DA8XX_C1_TX_PULSE] = 7, - [IRQ_DA8XX_C1_MISC_PULSE] = 7, - [IRQ_DA8XX_MEMERR] = 7, - [IRQ_DA8XX_GPIO0] = 7, - [IRQ_DA8XX_GPIO1] = 7, - [IRQ_DA8XX_GPIO2] = 7, - [IRQ_DA8XX_GPIO3] = 7, - [IRQ_DA8XX_GPIO4] = 7, - [IRQ_DA8XX_GPIO5] = 7, - [IRQ_DA8XX_GPIO6] = 7, - [IRQ_DA8XX_GPIO7] = 7, - [IRQ_DA8XX_GPIO8] = 7, - [IRQ_DA8XX_I2CINT1] = 7, - [IRQ_DA8XX_LCDINT] = 7, - [IRQ_DA8XX_UARTINT1] = 7, - [IRQ_DA8XX_MCASPINT] = 7, - [IRQ_DA8XX_ALLINT1] = 7, - [IRQ_DA8XX_SPINT1] = 7, - [IRQ_DA8XX_UHPI_INT1] = 7, - [IRQ_DA8XX_USB_INT] = 7, - [IRQ_DA8XX_IRQN] = 7, - [IRQ_DA8XX_RWAKEUP] = 7, - [IRQ_DA8XX_UARTINT2] = 7, - [IRQ_DA8XX_DFTSSINT] = 7, - [IRQ_DA8XX_EHRPWM0] = 7, - [IRQ_DA8XX_EHRPWM0TZ] = 7, - [IRQ_DA8XX_EHRPWM1] = 7, - [IRQ_DA8XX_EHRPWM1TZ] = 7, - [IRQ_DA830_EHRPWM2] = 7, - [IRQ_DA830_EHRPWM2TZ] = 7, - [IRQ_DA8XX_ECAP0] = 7, - [IRQ_DA8XX_ECAP1] = 7, - [IRQ_DA8XX_ECAP2] = 7, - [IRQ_DA830_EQEP0] = 7, - [IRQ_DA830_EQEP1] = 7, - [IRQ_DA830_T12CMPINT0_0] = 7, - [IRQ_DA830_T12CMPINT1_0] = 7, - [IRQ_DA830_T12CMPINT2_0] = 7, - [IRQ_DA830_T12CMPINT3_0] = 7, - [IRQ_DA830_T12CMPINT4_0] = 7, - [IRQ_DA830_T12CMPINT5_0] = 7, - [IRQ_DA830_T12CMPINT6_0] = 7, - [IRQ_DA830_T12CMPINT7_0] = 7, - [IRQ_DA830_T12CMPINT0_1] = 7, - [IRQ_DA830_T12CMPINT1_1] = 7, - [IRQ_DA830_T12CMPINT2_1] = 7, - [IRQ_DA830_T12CMPINT3_1] = 7, - [IRQ_DA830_T12CMPINT4_1] = 7, - [IRQ_DA830_T12CMPINT5_1] = 7, - [IRQ_DA830_T12CMPINT6_1] = 7, - [IRQ_DA830_T12CMPINT7_1] = 7, - [IRQ_DA8XX_ARMCLKSTOPREQ] = 7, -}; - static struct map_desc da830_io_desc[] = { { .virtual = IO_VIRT, @@ -772,17 +678,17 @@ int __init da830_register_gpio(void) static struct davinci_timer_instance da830_timer_instance[2] = { { .base = DA8XX_TIMER64P0_BASE, - .bottom_irq = IRQ_DA8XX_TINT12_0, - .top_irq = IRQ_DA8XX_TINT34_0, + .bottom_irq = DAVINCI_INTC_IRQ(IRQ_DA8XX_TINT12_0), + .top_irq = DAVINCI_INTC_IRQ(IRQ_DA8XX_TINT34_0), .cmp_off = DA830_CMP12_0, - .cmp_irq = IRQ_DA830_T12CMPINT0_0, + .cmp_irq = DAVINCI_INTC_IRQ(IRQ_DA830_T12CMPINT0_0), }, { .base = DA8XX_TIMER64P1_BASE, - .bottom_irq = IRQ_DA8XX_TINT12_1, - .top_irq = IRQ_DA8XX_TINT34_1, + .bottom_irq = DAVINCI_INTC_IRQ(IRQ_DA8XX_TINT12_1), + .top_irq = DAVINCI_INTC_IRQ(IRQ_DA8XX_TINT34_1), .cmp_off = DA830_CMP12_0, - .cmp_irq = IRQ_DA830_T12CMPINT0_1, + .cmp_irq = DAVINCI_INTC_IRQ(IRQ_DA830_T12CMPINT0_1), }, }; @@ -806,10 +712,6 @@ static const struct davinci_soc_info davinci_soc_info_da830 = { .pinmux_base = DA8XX_SYSCFG0_BASE + 0x120, .pinmux_pins = da830_pins, .pinmux_pins_num = ARRAY_SIZE(da830_pins), - .intc_base = DA8XX_CP_INTC_BASE, - .intc_type = DAVINCI_INTC_TYPE_CP_INTC, - .intc_irq_prios = da830_default_priorities, - .intc_irq_num = DA830_N_CP_INTC_IRQ, .timer_info = &da830_timer_info, .emac_pdata = &da8xx_emac_pdata, }; @@ -822,6 +724,20 @@ void __init da830_init(void) WARN(!da8xx_syscfg0_base, "Unable to map syscfg0 module"); } +static const struct davinci_cp_intc_config da830_cp_intc_config = { + .reg = { + .start = DA8XX_CP_INTC_BASE, + .end = DA8XX_CP_INTC_BASE + SZ_8K - 1, + .flags = IORESOURCE_MEM, + }, + .num_irqs = DA830_N_CP_INTC_IRQ, +}; + +void __init da830_init_irq(void) +{ + davinci_cp_intc_init(&da830_cp_intc_config); +} + void __init da830_init_time(void) { void __iomem *pll; diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c index e7b78df2bfef..67ab71ba3ad3 100644 --- a/arch/arm/mach-davinci/da850.c +++ b/arch/arm/mach-davinci/da850.c @@ -18,9 +18,11 @@ #include #include #include +#include #include #include #include +#include #include #include #include @@ -29,13 +31,12 @@ #include #include -#include #include #include -#include #include #include +#include "irqs.h" #include "mux.h" #define DA850_PLL1_BASE 0x01e1a000 @@ -298,111 +299,6 @@ const short da850_vpif_display_pins[] __initconst = { -1 }; -/* FIQ are pri 0-1; otherwise 2-7, with 7 lowest priority */ -static u8 da850_default_priorities[DA850_N_CP_INTC_IRQ] = { - [IRQ_DA8XX_COMMTX] = 7, - [IRQ_DA8XX_COMMRX] = 7, - [IRQ_DA8XX_NINT] = 7, - [IRQ_DA8XX_EVTOUT0] = 7, - [IRQ_DA8XX_EVTOUT1] = 7, - [IRQ_DA8XX_EVTOUT2] = 7, - [IRQ_DA8XX_EVTOUT3] = 7, - [IRQ_DA8XX_EVTOUT4] = 7, - [IRQ_DA8XX_EVTOUT5] = 7, - [IRQ_DA8XX_EVTOUT6] = 7, - [IRQ_DA8XX_EVTOUT7] = 7, - [IRQ_DA8XX_CCINT0] = 7, - [IRQ_DA8XX_CCERRINT] = 7, - [IRQ_DA8XX_TCERRINT0] = 7, - [IRQ_DA8XX_AEMIFINT] = 7, - [IRQ_DA8XX_I2CINT0] = 7, - [IRQ_DA8XX_MMCSDINT0] = 7, - [IRQ_DA8XX_MMCSDINT1] = 7, - [IRQ_DA8XX_ALLINT0] = 7, - [IRQ_DA8XX_RTC] = 7, - [IRQ_DA8XX_SPINT0] = 7, - [IRQ_DA8XX_TINT12_0] = 7, - [IRQ_DA8XX_TINT34_0] = 7, - [IRQ_DA8XX_TINT12_1] = 7, - [IRQ_DA8XX_TINT34_1] = 7, - [IRQ_DA8XX_UARTINT0] = 7, - [IRQ_DA8XX_KEYMGRINT] = 7, - [IRQ_DA850_MPUADDRERR0] = 7, - [IRQ_DA8XX_CHIPINT0] = 7, - [IRQ_DA8XX_CHIPINT1] = 7, - [IRQ_DA8XX_CHIPINT2] = 7, - [IRQ_DA8XX_CHIPINT3] = 7, - [IRQ_DA8XX_TCERRINT1] = 7, - [IRQ_DA8XX_C0_RX_THRESH_PULSE] = 7, - [IRQ_DA8XX_C0_RX_PULSE] = 7, - [IRQ_DA8XX_C0_TX_PULSE] = 7, - [IRQ_DA8XX_C0_MISC_PULSE] = 7, - [IRQ_DA8XX_C1_RX_THRESH_PULSE] = 7, - [IRQ_DA8XX_C1_RX_PULSE] = 7, - [IRQ_DA8XX_C1_TX_PULSE] = 7, - [IRQ_DA8XX_C1_MISC_PULSE] = 7, - [IRQ_DA8XX_MEMERR] = 7, - [IRQ_DA8XX_GPIO0] = 7, - [IRQ_DA8XX_GPIO1] = 7, - [IRQ_DA8XX_GPIO2] = 7, - [IRQ_DA8XX_GPIO3] = 7, - [IRQ_DA8XX_GPIO4] = 7, - [IRQ_DA8XX_GPIO5] = 7, - [IRQ_DA8XX_GPIO6] = 7, - [IRQ_DA8XX_GPIO7] = 7, - [IRQ_DA8XX_GPIO8] = 7, - [IRQ_DA8XX_I2CINT1] = 7, - [IRQ_DA8XX_LCDINT] = 7, - [IRQ_DA8XX_UARTINT1] = 7, - [IRQ_DA8XX_MCASPINT] = 7, - [IRQ_DA8XX_ALLINT1] = 7, - [IRQ_DA8XX_SPINT1] = 7, - [IRQ_DA8XX_UHPI_INT1] = 7, - [IRQ_DA8XX_USB_INT] = 7, - [IRQ_DA8XX_IRQN] = 7, - [IRQ_DA8XX_RWAKEUP] = 7, - [IRQ_DA8XX_UARTINT2] = 7, - [IRQ_DA8XX_DFTSSINT] = 7, - [IRQ_DA8XX_EHRPWM0] = 7, - [IRQ_DA8XX_EHRPWM0TZ] = 7, - [IRQ_DA8XX_EHRPWM1] = 7, - [IRQ_DA8XX_EHRPWM1TZ] = 7, - [IRQ_DA850_SATAINT] = 7, - [IRQ_DA850_TINTALL_2] = 7, - [IRQ_DA8XX_ECAP0] = 7, - [IRQ_DA8XX_ECAP1] = 7, - [IRQ_DA8XX_ECAP2] = 7, - [IRQ_DA850_MMCSDINT0_1] = 7, - [IRQ_DA850_MMCSDINT1_1] = 7, - [IRQ_DA850_T12CMPINT0_2] = 7, - [IRQ_DA850_T12CMPINT1_2] = 7, - [IRQ_DA850_T12CMPINT2_2] = 7, - [IRQ_DA850_T12CMPINT3_2] = 7, - [IRQ_DA850_T12CMPINT4_2] = 7, - [IRQ_DA850_T12CMPINT5_2] = 7, - [IRQ_DA850_T12CMPINT6_2] = 7, - [IRQ_DA850_T12CMPINT7_2] = 7, - [IRQ_DA850_T12CMPINT0_3] = 7, - [IRQ_DA850_T12CMPINT1_3] = 7, - [IRQ_DA850_T12CMPINT2_3] = 7, - [IRQ_DA850_T12CMPINT3_3] = 7, - [IRQ_DA850_T12CMPINT4_3] = 7, - [IRQ_DA850_T12CMPINT5_3] = 7, - [IRQ_DA850_T12CMPINT6_3] = 7, - [IRQ_DA850_T12CMPINT7_3] = 7, - [IRQ_DA850_RPIINT] = 7, - [IRQ_DA850_VPIFINT] = 7, - [IRQ_DA850_CCINT1] = 7, - [IRQ_DA850_CCERRINT1] = 7, - [IRQ_DA850_TCERRINT2] = 7, - [IRQ_DA850_TINTALL_3] = 7, - [IRQ_DA850_MCBSP0RINT] = 7, - [IRQ_DA850_MCBSP0XINT] = 7, - [IRQ_DA850_MCBSP1RINT] = 7, - [IRQ_DA850_MCBSP1XINT] = 7, - [IRQ_DA8XX_ARMCLKSTOPREQ] = 7, -}; - static struct map_desc da850_io_desc[] = { { .virtual = IO_VIRT, @@ -439,23 +335,23 @@ static struct davinci_id da850_ids[] = { static struct davinci_timer_instance da850_timer_instance[4] = { { .base = DA8XX_TIMER64P0_BASE, - .bottom_irq = IRQ_DA8XX_TINT12_0, - .top_irq = IRQ_DA8XX_TINT34_0, + .bottom_irq = DAVINCI_INTC_IRQ(IRQ_DA8XX_TINT12_0), + .top_irq = DAVINCI_INTC_IRQ(IRQ_DA8XX_TINT34_0), }, { .base = DA8XX_TIMER64P1_BASE, - .bottom_irq = IRQ_DA8XX_TINT12_1, - .top_irq = IRQ_DA8XX_TINT34_1, + .bottom_irq = DAVINCI_INTC_IRQ(IRQ_DA8XX_TINT12_1), + .top_irq = DAVINCI_INTC_IRQ(IRQ_DA8XX_TINT34_1), }, { .base = DA850_TIMER64P2_BASE, - .bottom_irq = IRQ_DA850_TINT12_2, - .top_irq = IRQ_DA850_TINT34_2, + .bottom_irq = DAVINCI_INTC_IRQ(IRQ_DA850_TINT12_2), + .top_irq = DAVINCI_INTC_IRQ(IRQ_DA850_TINT34_2), }, { .base = DA850_TIMER64P3_BASE, - .bottom_irq = IRQ_DA850_TINT12_3, - .top_irq = IRQ_DA850_TINT34_3, + .bottom_irq = DAVINCI_INTC_IRQ(IRQ_DA850_TINT12_3), + .top_irq = DAVINCI_INTC_IRQ(IRQ_DA850_TINT34_3), }, }; @@ -658,8 +554,8 @@ static struct platform_device da850_vpif_dev = { static struct resource da850_vpif_display_resource[] = { { - .start = IRQ_DA850_VPIFINT, - .end = IRQ_DA850_VPIFINT, + .start = DAVINCI_INTC_IRQ(IRQ_DA850_VPIFINT), + .end = DAVINCI_INTC_IRQ(IRQ_DA850_VPIFINT), .flags = IORESOURCE_IRQ, }, }; @@ -677,13 +573,13 @@ static struct platform_device da850_vpif_display_dev = { static struct resource da850_vpif_capture_resource[] = { { - .start = IRQ_DA850_VPIFINT, - .end = IRQ_DA850_VPIFINT, + .start = DAVINCI_INTC_IRQ(IRQ_DA850_VPIFINT), + .end = DAVINCI_INTC_IRQ(IRQ_DA850_VPIFINT), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DA850_VPIFINT, - .end = IRQ_DA850_VPIFINT, + .start = DAVINCI_INTC_IRQ(IRQ_DA850_VPIFINT), + .end = DAVINCI_INTC_IRQ(IRQ_DA850_VPIFINT), .flags = IORESOURCE_IRQ, }, }; @@ -738,10 +634,6 @@ static const struct davinci_soc_info davinci_soc_info_da850 = { .pinmux_base = DA8XX_SYSCFG0_BASE + 0x120, .pinmux_pins = da850_pins, .pinmux_pins_num = ARRAY_SIZE(da850_pins), - .intc_base = DA8XX_CP_INTC_BASE, - .intc_type = DAVINCI_INTC_TYPE_CP_INTC, - .intc_irq_prios = da850_default_priorities, - .intc_irq_num = DA850_N_CP_INTC_IRQ, .timer_info = &da850_timer_info, .emac_pdata = &da8xx_emac_pdata, .sram_dma = DA8XX_SHARED_RAM_BASE, @@ -760,6 +652,20 @@ void __init da850_init(void) WARN(!da8xx_syscfg1_base, "Unable to map syscfg1 module"); } +static const struct davinci_cp_intc_config da850_cp_intc_config = { + .reg = { + .start = DA8XX_CP_INTC_BASE, + .end = DA8XX_CP_INTC_BASE + SZ_8K - 1, + .flags = IORESOURCE_MEM, + }, + .num_irqs = DA850_N_CP_INTC_IRQ, +}; + +void __init da850_init_irq(void) +{ + davinci_cp_intc_init(&da850_cp_intc_config); +} + void __init da850_init_time(void) { void __iomem *pll0; diff --git a/arch/arm/mach-davinci/davinci.h b/arch/arm/mach-davinci/davinci.h index db4c95ef4d5c..56c1835c42e5 100644 --- a/arch/arm/mach-davinci/davinci.h +++ b/arch/arm/mach-davinci/davinci.h @@ -88,6 +88,7 @@ int davinci_init_wdt(void); /* DM355 function declarations */ void dm355_init(void); void dm355_init_time(void); +void dm355_init_irq(void); void dm355_register_clocks(void); void dm355_init_spi0(unsigned chipselect_mask, const struct spi_board_info *info, unsigned len); @@ -97,6 +98,7 @@ int dm355_gpio_register(void); /* DM365 function declarations */ void dm365_init(void); +void dm365_init_irq(void); void dm365_init_time(void); void dm365_register_clocks(void); void dm365_init_asp(void); @@ -110,6 +112,7 @@ int dm365_gpio_register(void); /* DM644x function declarations */ void dm644x_init(void); +void dm644x_init_irq(void); void dm644x_init_devices(void); void dm644x_init_time(void); void dm644x_register_clocks(void); @@ -119,6 +122,7 @@ int dm644x_gpio_register(void); /* DM646x function declarations */ void dm646x_init(void); +void dm646x_init_irq(void); void dm646x_init_time(unsigned long ref_clk_rate, unsigned long aux_clkin_rate); void dm646x_register_clocks(void); void dm646x_init_mcasp0(struct snd_platform_data *pdata); diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c index cf78da5ab054..b8dc674e06bc 100644 --- a/arch/arm/mach-davinci/devices-da8xx.c +++ b/arch/arm/mach-davinci/devices-da8xx.c @@ -28,6 +28,7 @@ #include "asp.h" #include "cpuidle.h" +#include "irqs.h" #include "sram.h" #define DA8XX_TPCC_BASE 0x01c00000 @@ -64,7 +65,7 @@ void __iomem *da8xx_syscfg1_base; static struct plat_serial8250_port da8xx_serial0_pdata[] = { { .mapbase = DA8XX_UART0_BASE, - .irq = IRQ_DA8XX_UARTINT0, + .irq = DAVINCI_INTC_IRQ(IRQ_DA8XX_UARTINT0), .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, .iotype = UPIO_MEM, @@ -77,7 +78,7 @@ static struct plat_serial8250_port da8xx_serial0_pdata[] = { static struct plat_serial8250_port da8xx_serial1_pdata[] = { { .mapbase = DA8XX_UART1_BASE, - .irq = IRQ_DA8XX_UARTINT1, + .irq = DAVINCI_INTC_IRQ(IRQ_DA8XX_UARTINT1), .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, .iotype = UPIO_MEM, @@ -90,7 +91,7 @@ static struct plat_serial8250_port da8xx_serial1_pdata[] = { static struct plat_serial8250_port da8xx_serial2_pdata[] = { { .mapbase = DA8XX_UART2_BASE, - .irq = IRQ_DA8XX_UARTINT2, + .irq = DAVINCI_INTC_IRQ(IRQ_DA8XX_UARTINT2), .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, .iotype = UPIO_MEM, @@ -171,12 +172,12 @@ static struct resource da8xx_edma0_resources[] = { }, { .name = "edma3_ccint", - .start = IRQ_DA8XX_CCINT0, + .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_CCINT0), .flags = IORESOURCE_IRQ, }, { .name = "edma3_ccerrint", - .start = IRQ_DA8XX_CCERRINT, + .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_CCERRINT), .flags = IORESOURCE_IRQ, }, }; @@ -196,12 +197,12 @@ static struct resource da850_edma1_resources[] = { }, { .name = "edma3_ccint", - .start = IRQ_DA850_CCINT1, + .start = DAVINCI_INTC_IRQ(IRQ_DA850_CCINT1), .flags = IORESOURCE_IRQ, }, { .name = "edma3_ccerrint", - .start = IRQ_DA850_CCERRINT1, + .start = DAVINCI_INTC_IRQ(IRQ_DA850_CCERRINT1), .flags = IORESOURCE_IRQ, }, }; @@ -306,8 +307,8 @@ static struct resource da8xx_i2c_resources0[] = { .flags = IORESOURCE_MEM, }, { - .start = IRQ_DA8XX_I2CINT0, - .end = IRQ_DA8XX_I2CINT0, + .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_I2CINT0), + .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_I2CINT0), .flags = IORESOURCE_IRQ, }, }; @@ -326,8 +327,8 @@ static struct resource da8xx_i2c_resources1[] = { .flags = IORESOURCE_MEM, }, { - .start = IRQ_DA8XX_I2CINT1, - .end = IRQ_DA8XX_I2CINT1, + .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_I2CINT1), + .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_I2CINT1), .flags = IORESOURCE_IRQ, }, }; @@ -382,23 +383,23 @@ static struct resource da8xx_emac_resources[] = { .flags = IORESOURCE_MEM, }, { - .start = IRQ_DA8XX_C0_RX_THRESH_PULSE, - .end = IRQ_DA8XX_C0_RX_THRESH_PULSE, + .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_C0_RX_THRESH_PULSE), + .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_C0_RX_THRESH_PULSE), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DA8XX_C0_RX_PULSE, - .end = IRQ_DA8XX_C0_RX_PULSE, + .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_C0_RX_PULSE), + .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_C0_RX_PULSE), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DA8XX_C0_TX_PULSE, - .end = IRQ_DA8XX_C0_TX_PULSE, + .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_C0_TX_PULSE), + .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_C0_TX_PULSE), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DA8XX_C0_MISC_PULSE, - .end = IRQ_DA8XX_C0_MISC_PULSE, + .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_C0_MISC_PULSE), + .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_C0_MISC_PULSE), .flags = IORESOURCE_IRQ, }, }; @@ -470,7 +471,7 @@ static struct resource da830_mcasp1_resources[] = { }, { .name = "common", - .start = IRQ_DA8XX_MCASPINT, + .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_MCASPINT), .flags = IORESOURCE_IRQ, }, }; @@ -505,7 +506,7 @@ static struct resource da830_mcasp2_resources[] = { }, { .name = "common", - .start = IRQ_DA8XX_MCASPINT, + .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_MCASPINT), .flags = IORESOURCE_IRQ, }, }; @@ -540,7 +541,7 @@ static struct resource da850_mcasp_resources[] = { }, { .name = "common", - .start = IRQ_DA8XX_MCASPINT, + .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_MCASPINT), .flags = IORESOURCE_IRQ, }, }; @@ -588,43 +589,43 @@ static struct resource da8xx_pruss_resources[] = { .flags = IORESOURCE_MEM, }, { - .start = IRQ_DA8XX_EVTOUT0, - .end = IRQ_DA8XX_EVTOUT0, + .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT0), + .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT0), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DA8XX_EVTOUT1, - .end = IRQ_DA8XX_EVTOUT1, + .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT1), + .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT1), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DA8XX_EVTOUT2, - .end = IRQ_DA8XX_EVTOUT2, + .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT2), + .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT2), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DA8XX_EVTOUT3, - .end = IRQ_DA8XX_EVTOUT3, + .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT3), + .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT3), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DA8XX_EVTOUT4, - .end = IRQ_DA8XX_EVTOUT4, + .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT4), + .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT4), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DA8XX_EVTOUT5, - .end = IRQ_DA8XX_EVTOUT5, + .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT5), + .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT5), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DA8XX_EVTOUT6, - .end = IRQ_DA8XX_EVTOUT6, + .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT6), + .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT6), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DA8XX_EVTOUT7, - .end = IRQ_DA8XX_EVTOUT7, + .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT7), + .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_EVTOUT7), .flags = IORESOURCE_IRQ, }, }; @@ -674,8 +675,8 @@ static struct resource da8xx_lcdc_resources[] = { .flags = IORESOURCE_MEM, }, [1] = { /* interrupt */ - .start = IRQ_DA8XX_LCDINT, - .end = IRQ_DA8XX_LCDINT, + .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_LCDINT), + .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_LCDINT), .flags = IORESOURCE_IRQ, }, }; @@ -700,48 +701,48 @@ static struct resource da8xx_gpio_resources[] = { .flags = IORESOURCE_MEM, }, { /* interrupt */ - .start = IRQ_DA8XX_GPIO0, - .end = IRQ_DA8XX_GPIO0, + .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO0), + .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO0), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DA8XX_GPIO1, - .end = IRQ_DA8XX_GPIO1, + .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO1), + .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO1), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DA8XX_GPIO2, - .end = IRQ_DA8XX_GPIO2, + .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO2), + .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO2), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DA8XX_GPIO3, - .end = IRQ_DA8XX_GPIO3, + .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO3), + .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO3), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DA8XX_GPIO4, - .end = IRQ_DA8XX_GPIO4, + .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO4), + .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO4), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DA8XX_GPIO5, - .end = IRQ_DA8XX_GPIO5, + .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO5), + .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO5), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DA8XX_GPIO6, - .end = IRQ_DA8XX_GPIO6, + .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO6), + .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO6), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DA8XX_GPIO7, - .end = IRQ_DA8XX_GPIO7, + .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO7), + .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO7), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DA8XX_GPIO8, - .end = IRQ_DA8XX_GPIO8, + .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO8), + .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_GPIO8), .flags = IORESOURCE_IRQ, }, }; @@ -766,8 +767,8 @@ static struct resource da8xx_mmcsd0_resources[] = { .flags = IORESOURCE_MEM, }, { /* interrupt */ - .start = IRQ_DA8XX_MMCSDINT0, - .end = IRQ_DA8XX_MMCSDINT0, + .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_MMCSDINT0), + .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_MMCSDINT0), .flags = IORESOURCE_IRQ, }, }; @@ -793,8 +794,8 @@ static struct resource da850_mmcsd1_resources[] = { .flags = IORESOURCE_MEM, }, { /* interrupt */ - .start = IRQ_DA850_MMCSDINT0_1, - .end = IRQ_DA850_MMCSDINT0_1, + .start = DAVINCI_INTC_IRQ(IRQ_DA850_MMCSDINT0_1), + .end = DAVINCI_INTC_IRQ(IRQ_DA850_MMCSDINT0_1), .flags = IORESOURCE_IRQ, }, }; @@ -845,8 +846,8 @@ static struct resource da8xx_rproc_resources[] = { .flags = IORESOURCE_MEM, }, { /* dsp irq */ - .start = IRQ_DA8XX_CHIPINT0, - .end = IRQ_DA8XX_CHIPINT0, + .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_CHIPINT0), + .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_CHIPINT0), .flags = IORESOURCE_IRQ, }, }; @@ -936,13 +937,13 @@ static struct resource da8xx_rtc_resources[] = { .flags = IORESOURCE_MEM, }, { /* timer irq */ - .start = IRQ_DA8XX_RTC, - .end = IRQ_DA8XX_RTC, + .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_RTC), + .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_RTC), .flags = IORESOURCE_IRQ, }, { /* alarm irq */ - .start = IRQ_DA8XX_RTC, - .end = IRQ_DA8XX_RTC, + .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_RTC), + .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_RTC), .flags = IORESOURCE_IRQ, }, }; @@ -1009,8 +1010,8 @@ static struct resource da8xx_spi0_resources[] = { .flags = IORESOURCE_MEM, }, [1] = { - .start = IRQ_DA8XX_SPINT0, - .end = IRQ_DA8XX_SPINT0, + .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_SPINT0), + .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_SPINT0), .flags = IORESOURCE_IRQ, }, }; @@ -1022,8 +1023,8 @@ static struct resource da8xx_spi1_resources[] = { .flags = IORESOURCE_MEM, }, [1] = { - .start = IRQ_DA8XX_SPINT1, - .end = IRQ_DA8XX_SPINT1, + .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_SPINT1), + .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_SPINT1), .flags = IORESOURCE_IRQ, }, }; @@ -1103,7 +1104,7 @@ static struct resource da850_sata_resources[] = { .flags = IORESOURCE_MEM, }, { - .start = IRQ_DA850_SATAINT, + .start = DAVINCI_INTC_IRQ(IRQ_DA850_SATAINT), .flags = IORESOURCE_IRQ, }, }; diff --git a/arch/arm/mach-davinci/devices.c b/arch/arm/mach-davinci/devices.c index e8dbbb7479ab..40bd8029e457 100644 --- a/arch/arm/mach-davinci/devices.c +++ b/arch/arm/mach-davinci/devices.c @@ -11,21 +11,20 @@ #include #include +#include +#include +#include #include #include #include #include -#include -#include #include #include -#include #include -#include - #include "davinci.h" +#include "irqs.h" #define DAVINCI_I2C_BASE 0x01C21000 #define DAVINCI_ATA_BASE 0x01C66000 @@ -56,7 +55,7 @@ static struct resource i2c_resources[] = { .flags = IORESOURCE_MEM, }, { - .start = IRQ_I2C, + .start = DAVINCI_INTC_IRQ(IRQ_I2C), .flags = IORESOURCE_IRQ, }, }; @@ -84,8 +83,8 @@ static struct resource ide_resources[] = { .flags = IORESOURCE_MEM, }, { - .start = IRQ_IDE, - .end = IRQ_IDE, + .start = DAVINCI_INTC_IRQ(IRQ_IDE), + .end = DAVINCI_INTC_IRQ(IRQ_IDE), .flags = IORESOURCE_IRQ, }, }; @@ -133,11 +132,11 @@ static struct resource mmcsd0_resources[] = { }, /* IRQs: MMC/SD, then SDIO */ { - .start = IRQ_MMCINT, + .start = DAVINCI_INTC_IRQ(IRQ_MMCINT), .flags = IORESOURCE_IRQ, }, { /* different on dm355 */ - .start = IRQ_SDIOINT, + .start = DAVINCI_INTC_IRQ(IRQ_SDIOINT), .flags = IORESOURCE_IRQ, }, }; @@ -163,10 +162,10 @@ static struct resource mmcsd1_resources[] = { }, /* IRQs: MMC/SD, then SDIO */ { - .start = IRQ_DM355_MMCINT1, + .start = DAVINCI_INTC_IRQ(IRQ_DM355_MMCINT1), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DM355_SDIOINT1, + .start = DAVINCI_INTC_IRQ(IRQ_DM355_SDIOINT1), .flags = IORESOURCE_IRQ, }, }; @@ -219,7 +218,8 @@ void __init davinci_setup_mmc(int module, struct davinci_mmc_config *config) mmcsd1_resources[0].start = DM365_MMCSD1_BASE; mmcsd1_resources[0].end = DM365_MMCSD1_BASE + SZ_4K - 1; - mmcsd1_resources[2].start = IRQ_DM365_SDIOINT1; + mmcsd1_resources[2].start = DAVINCI_INTC_IRQ( + IRQ_DM365_SDIOINT1); davinci_mmcsd1_device.name = "da830-mmc"; } else break; @@ -230,7 +230,8 @@ void __init davinci_setup_mmc(int module, struct davinci_mmc_config *config) if (cpu_is_davinci_dm355()) { mmcsd0_resources[0].start = DM355_MMCSD0_BASE; mmcsd0_resources[0].end = DM355_MMCSD0_BASE + SZ_4K - 1; - mmcsd0_resources[2].start = IRQ_DM355_SDIOINT0; + mmcsd0_resources[2].start = DAVINCI_INTC_IRQ( + IRQ_DM355_SDIOINT0); /* expose all 6 MMC0 signals: CLK, CMD, DATA[0..3] */ davinci_cfg_reg(DM355_MMCSD0); @@ -241,7 +242,8 @@ void __init davinci_setup_mmc(int module, struct davinci_mmc_config *config) mmcsd0_resources[0].start = DM365_MMCSD0_BASE; mmcsd0_resources[0].end = DM365_MMCSD0_BASE + SZ_4K - 1; - mmcsd0_resources[2].start = IRQ_DM365_SDIOINT0; + mmcsd0_resources[2].start = DAVINCI_INTC_IRQ( + IRQ_DM365_SDIOINT0); davinci_mmcsd0_device.name = "da830-mmc"; } else if (cpu_is_davinci_dm644x()) { /* REVISIT: should this be in board-init code? */ @@ -313,13 +315,13 @@ int davinci_gpio_register(struct resource *res, int size, void *pdata) struct davinci_timer_instance davinci_timer_instance[2] = { { .base = DAVINCI_TIMER0_BASE, - .bottom_irq = IRQ_TINT0_TINT12, - .top_irq = IRQ_TINT0_TINT34, + .bottom_irq = DAVINCI_INTC_IRQ(IRQ_TINT0_TINT12), + .top_irq = DAVINCI_INTC_IRQ(IRQ_TINT0_TINT34), }, { .base = DAVINCI_TIMER1_BASE, - .bottom_irq = IRQ_TINT1_TINT12, - .top_irq = IRQ_TINT1_TINT34, + .bottom_irq = DAVINCI_INTC_IRQ(IRQ_TINT1_TINT12), + .top_irq = DAVINCI_INTC_IRQ(IRQ_TINT1_TINT34), }, }; diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c index 4c6e0bef4509..4a482445b9a2 100644 --- a/arch/arm/mach-davinci/dm355.c +++ b/arch/arm/mach-davinci/dm355.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -26,13 +27,13 @@ #include #include -#include #include #include #include #include "asp.h" #include "davinci.h" +#include "irqs.h" #include "mux.h" #define DM355_UART2_BASE (IO_PHYS + 0x206000) @@ -53,7 +54,7 @@ static struct resource dm355_spi0_resources[] = { .flags = IORESOURCE_MEM, }, { - .start = IRQ_DM355_SPINT0_0, + .start = DAVINCI_INTC_IRQ(IRQ_DM355_SPINT0_0), .flags = IORESOURCE_IRQ, }, }; @@ -273,12 +274,12 @@ static struct resource edma_resources[] = { }, { .name = "edma3_ccint", - .start = IRQ_CCINT0, + .start = DAVINCI_INTC_IRQ(IRQ_CCINT0), .flags = IORESOURCE_IRQ, }, { .name = "edma3_ccerrint", - .start = IRQ_CCERRINT, + .start = DAVINCI_INTC_IRQ(IRQ_CCERRINT), .flags = IORESOURCE_IRQ, }, /* not using (or muxing) TC*_ERR */ @@ -358,13 +359,13 @@ static struct platform_device dm355_vpss_device = { static struct resource vpfe_resources[] = { { - .start = IRQ_VDINT0, - .end = IRQ_VDINT0, + .start = DAVINCI_INTC_IRQ(IRQ_VDINT0), + .end = DAVINCI_INTC_IRQ(IRQ_VDINT0), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_VDINT1, - .end = IRQ_VDINT1, + .start = DAVINCI_INTC_IRQ(IRQ_VDINT1), + .end = DAVINCI_INTC_IRQ(IRQ_VDINT1), .flags = IORESOURCE_IRQ, }, }; @@ -422,8 +423,8 @@ static struct platform_device dm355_osd_dev = { static struct resource dm355_venc_resources[] = { { - .start = IRQ_VENCINT, - .end = IRQ_VENCINT, + .start = DAVINCI_INTC_IRQ(IRQ_VENCINT), + .end = DAVINCI_INTC_IRQ(IRQ_VENCINT), .flags = IORESOURCE_IRQ, }, /* venc registers io space */ @@ -442,8 +443,8 @@ static struct resource dm355_venc_resources[] = { static struct resource dm355_v4l2_disp_resources[] = { { - .start = IRQ_VENCINT, - .end = IRQ_VENCINT, + .start = DAVINCI_INTC_IRQ(IRQ_VENCINT), + .end = DAVINCI_INTC_IRQ(IRQ_VENCINT), .flags = IORESOURCE_IRQ, }, /* venc registers io space */ @@ -547,38 +548,38 @@ static struct resource dm355_gpio_resources[] = { .flags = IORESOURCE_MEM, }, { /* interrupt */ - .start = IRQ_DM355_GPIOBNK0, - .end = IRQ_DM355_GPIOBNK0, + .start = DAVINCI_INTC_IRQ(IRQ_DM355_GPIOBNK0), + .end = DAVINCI_INTC_IRQ(IRQ_DM355_GPIOBNK0), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DM355_GPIOBNK1, - .end = IRQ_DM355_GPIOBNK1, + .start = DAVINCI_INTC_IRQ(IRQ_DM355_GPIOBNK1), + .end = DAVINCI_INTC_IRQ(IRQ_DM355_GPIOBNK1), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DM355_GPIOBNK2, - .end = IRQ_DM355_GPIOBNK2, + .start = DAVINCI_INTC_IRQ(IRQ_DM355_GPIOBNK2), + .end = DAVINCI_INTC_IRQ(IRQ_DM355_GPIOBNK2), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DM355_GPIOBNK3, - .end = IRQ_DM355_GPIOBNK3, + .start = DAVINCI_INTC_IRQ(IRQ_DM355_GPIOBNK3), + .end = DAVINCI_INTC_IRQ(IRQ_DM355_GPIOBNK3), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DM355_GPIOBNK4, - .end = IRQ_DM355_GPIOBNK4, + .start = DAVINCI_INTC_IRQ(IRQ_DM355_GPIOBNK4), + .end = DAVINCI_INTC_IRQ(IRQ_DM355_GPIOBNK4), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DM355_GPIOBNK5, - .end = IRQ_DM355_GPIOBNK5, + .start = DAVINCI_INTC_IRQ(IRQ_DM355_GPIOBNK5), + .end = DAVINCI_INTC_IRQ(IRQ_DM355_GPIOBNK5), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DM355_GPIOBNK6, - .end = IRQ_DM355_GPIOBNK6, + .start = DAVINCI_INTC_IRQ(IRQ_DM355_GPIOBNK6), + .end = DAVINCI_INTC_IRQ(IRQ_DM355_GPIOBNK6), .flags = IORESOURCE_IRQ, }, }; @@ -632,7 +633,7 @@ static struct davinci_timer_info dm355_timer_info = { static struct plat_serial8250_port dm355_serial0_platform_data[] = { { .mapbase = DAVINCI_UART0_BASE, - .irq = IRQ_UARTINT0, + .irq = DAVINCI_INTC_IRQ(IRQ_UARTINT0), .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, .iotype = UPIO_MEM, @@ -645,7 +646,7 @@ static struct plat_serial8250_port dm355_serial0_platform_data[] = { static struct plat_serial8250_port dm355_serial1_platform_data[] = { { .mapbase = DAVINCI_UART1_BASE, - .irq = IRQ_UARTINT1, + .irq = DAVINCI_INTC_IRQ(IRQ_UARTINT1), .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, .iotype = UPIO_MEM, @@ -658,7 +659,7 @@ static struct plat_serial8250_port dm355_serial1_platform_data[] = { static struct plat_serial8250_port dm355_serial2_platform_data[] = { { .mapbase = DM355_UART2_BASE, - .irq = IRQ_DM355_UARTINT2, + .irq = DAVINCI_INTC_IRQ(IRQ_DM355_UARTINT2), .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, .iotype = UPIO_MEM, @@ -704,10 +705,6 @@ static const struct davinci_soc_info davinci_soc_info_dm355 = { .pinmux_base = DAVINCI_SYSTEM_MODULE_BASE, .pinmux_pins = dm355_pins, .pinmux_pins_num = ARRAY_SIZE(dm355_pins), - .intc_base = DAVINCI_ARM_INTC_BASE, - .intc_type = DAVINCI_INTC_TYPE_AINTC, - .intc_irq_prios = dm355_default_priorities, - .intc_irq_num = DAVINCI_N_AINTC_IRQ, .timer_info = &dm355_timer_info, .sram_dma = 0x00010000, .sram_len = SZ_32K, @@ -793,6 +790,21 @@ int __init dm355_init_video(struct vpfe_config *vpfe_cfg, return 0; } +static const struct davinci_aintc_config dm355_aintc_config = { + .reg = { + .start = DAVINCI_ARM_INTC_BASE, + .end = DAVINCI_ARM_INTC_BASE + SZ_4K - 1, + .flags = IORESOURCE_MEM, + }, + .num_irqs = 64, + .prios = dm355_default_priorities, +}; + +void __init dm355_init_irq(void) +{ + davinci_aintc_init(&dm355_aintc_config); +} + static int __init dm355_init_devices(void) { struct platform_device *edma_pdev; diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c index 01fb2b0c82de..8e0a77315add 100644 --- a/arch/arm/mach-davinci/dm365.c +++ b/arch/arm/mach-davinci/dm365.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -31,13 +32,13 @@ #include #include -#include #include #include #include #include "asp.h" #include "davinci.h" +#include "irqs.h" #include "mux.h" #define DM365_REF_FREQ 24000000 /* 24 MHz on the DM365 EVM */ @@ -224,7 +225,7 @@ static struct resource dm365_spi0_resources[] = { .flags = IORESOURCE_MEM, }, { - .start = IRQ_DM365_SPIINT0_0, + .start = DAVINCI_INTC_IRQ(IRQ_DM365_SPIINT0_0), .flags = IORESOURCE_IRQ, }, }; @@ -266,43 +267,43 @@ static struct resource dm365_gpio_resources[] = { .flags = IORESOURCE_MEM, }, { /* interrupt */ - .start = IRQ_DM365_GPIO0, - .end = IRQ_DM365_GPIO0, + .start = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO0), + .end = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO0), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DM365_GPIO1, - .end = IRQ_DM365_GPIO1, + .start = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO1), + .end = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO1), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DM365_GPIO2, - .end = IRQ_DM365_GPIO2, + .start = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO2), + .end = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO2), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DM365_GPIO3, - .end = IRQ_DM365_GPIO3, + .start = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO3), + .end = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO3), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DM365_GPIO4, - .end = IRQ_DM365_GPIO4, + .start = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO4), + .end = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO4), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DM365_GPIO5, - .end = IRQ_DM365_GPIO5, + .start = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO5), + .end = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO5), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DM365_GPIO6, - .end = IRQ_DM365_GPIO6, + .start = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO6), + .end = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO6), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DM365_GPIO7, - .end = IRQ_DM365_GPIO7, + .start = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO7), + .end = DAVINCI_INTC_IRQ(IRQ_DM365_GPIO7), .flags = IORESOURCE_IRQ, }, }; @@ -336,23 +337,23 @@ static struct resource dm365_emac_resources[] = { .flags = IORESOURCE_MEM, }, { - .start = IRQ_DM365_EMAC_RXTHRESH, - .end = IRQ_DM365_EMAC_RXTHRESH, + .start = DAVINCI_INTC_IRQ(IRQ_DM365_EMAC_RXTHRESH), + .end = DAVINCI_INTC_IRQ(IRQ_DM365_EMAC_RXTHRESH), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DM365_EMAC_RXPULSE, - .end = IRQ_DM365_EMAC_RXPULSE, + .start = DAVINCI_INTC_IRQ(IRQ_DM365_EMAC_RXPULSE), + .end = DAVINCI_INTC_IRQ(IRQ_DM365_EMAC_RXPULSE), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DM365_EMAC_TXPULSE, - .end = IRQ_DM365_EMAC_TXPULSE, + .start = DAVINCI_INTC_IRQ(IRQ_DM365_EMAC_TXPULSE), + .end = DAVINCI_INTC_IRQ(IRQ_DM365_EMAC_TXPULSE), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DM365_EMAC_MISCPULSE, - .end = IRQ_DM365_EMAC_MISCPULSE, + .start = DAVINCI_INTC_IRQ(IRQ_DM365_EMAC_MISCPULSE), + .end = DAVINCI_INTC_IRQ(IRQ_DM365_EMAC_MISCPULSE), .flags = IORESOURCE_IRQ, }, }; @@ -518,12 +519,12 @@ static struct resource edma_resources[] = { }, { .name = "edma3_ccint", - .start = IRQ_CCINT0, + .start = DAVINCI_INTC_IRQ(IRQ_CCINT0), .flags = IORESOURCE_IRQ, }, { .name = "edma3_ccerrint", - .start = IRQ_CCERRINT, + .start = DAVINCI_INTC_IRQ(IRQ_CCERRINT), .flags = IORESOURCE_IRQ, }, /* not using TC*_ERR */ @@ -597,7 +598,7 @@ static struct resource dm365_rtc_resources[] = { .flags = IORESOURCE_MEM, }, { - .start = IRQ_DM365_RTCINT, + .start = DAVINCI_INTC_IRQ(IRQ_DM365_RTCINT), .flags = IORESOURCE_IRQ, }, }; @@ -627,8 +628,8 @@ static struct resource dm365_ks_resources[] = { }, { /* interrupt */ - .start = IRQ_DM365_KEYINT, - .end = IRQ_DM365_KEYINT, + .start = DAVINCI_INTC_IRQ(IRQ_DM365_KEYINT), + .end = DAVINCI_INTC_IRQ(IRQ_DM365_KEYINT), .flags = IORESOURCE_IRQ, }, }; @@ -669,7 +670,7 @@ static struct davinci_timer_info dm365_timer_info = { static struct plat_serial8250_port dm365_serial0_platform_data[] = { { .mapbase = DAVINCI_UART0_BASE, - .irq = IRQ_UARTINT0, + .irq = DAVINCI_INTC_IRQ(IRQ_UARTINT0), .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, .iotype = UPIO_MEM, @@ -682,7 +683,7 @@ static struct plat_serial8250_port dm365_serial0_platform_data[] = { static struct plat_serial8250_port dm365_serial1_platform_data[] = { { .mapbase = DM365_UART1_BASE, - .irq = IRQ_UARTINT1, + .irq = DAVINCI_INTC_IRQ(IRQ_UARTINT1), .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, .iotype = UPIO_MEM, @@ -721,10 +722,6 @@ static const struct davinci_soc_info davinci_soc_info_dm365 = { .pinmux_base = DAVINCI_SYSTEM_MODULE_BASE, .pinmux_pins = dm365_pins, .pinmux_pins_num = ARRAY_SIZE(dm365_pins), - .intc_base = DAVINCI_ARM_INTC_BASE, - .intc_type = DAVINCI_INTC_TYPE_AINTC, - .intc_irq_prios = dm365_default_priorities, - .intc_irq_num = DAVINCI_N_AINTC_IRQ, .timer_info = &dm365_timer_info, .emac_pdata = &dm365_emac_pdata, .sram_dma = 0x00010000, @@ -822,13 +819,13 @@ static struct platform_device dm365_vpss_device = { static struct resource vpfe_resources[] = { { - .start = IRQ_VDINT0, - .end = IRQ_VDINT0, + .start = DAVINCI_INTC_IRQ(IRQ_VDINT0), + .end = DAVINCI_INTC_IRQ(IRQ_VDINT0), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_VDINT1, - .end = IRQ_VDINT1, + .start = DAVINCI_INTC_IRQ(IRQ_VDINT1), + .end = DAVINCI_INTC_IRQ(IRQ_VDINT1), .flags = IORESOURCE_IRQ, }, }; @@ -909,8 +906,8 @@ static struct platform_device dm365_osd_dev = { static struct resource dm365_venc_resources[] = { { - .start = IRQ_VENCINT, - .end = IRQ_VENCINT, + .start = DAVINCI_INTC_IRQ(IRQ_VENCINT), + .end = DAVINCI_INTC_IRQ(IRQ_VENCINT), .flags = IORESOURCE_IRQ, }, /* venc registers io space */ @@ -929,8 +926,8 @@ static struct resource dm365_venc_resources[] = { static struct resource dm365_v4l2_disp_resources[] = { { - .start = IRQ_VENCINT, - .end = IRQ_VENCINT, + .start = DAVINCI_INTC_IRQ(IRQ_VENCINT), + .end = DAVINCI_INTC_IRQ(IRQ_VENCINT), .flags = IORESOURCE_IRQ, }, /* venc registers io space */ @@ -1052,6 +1049,21 @@ int __init dm365_init_video(struct vpfe_config *vpfe_cfg, return 0; } +static const struct davinci_aintc_config dm365_aintc_config = { + .reg = { + .start = DAVINCI_ARM_INTC_BASE, + .end = DAVINCI_ARM_INTC_BASE + SZ_4K - 1, + .flags = IORESOURCE_MEM, + }, + .num_irqs = 64, + .prios = dm365_default_priorities, +}; + +void __init dm365_init_irq(void) +{ + davinci_aintc_init(&dm365_aintc_config); +} + static int __init dm365_init_devices(void) { struct platform_device *edma_pdev; diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c index 38f92b7d413e..cecc7ceb8d34 100644 --- a/arch/arm/mach-davinci/dm644x.c +++ b/arch/arm/mach-davinci/dm644x.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -23,13 +24,13 @@ #include #include -#include #include #include #include #include "asp.h" #include "davinci.h" +#include "irqs.h" #include "mux.h" /* @@ -59,8 +60,8 @@ static struct resource dm644x_emac_resources[] = { .flags = IORESOURCE_MEM, }, { - .start = IRQ_EMACINT, - .end = IRQ_EMACINT, + .start = DAVINCI_INTC_IRQ(IRQ_EMACINT), + .end = DAVINCI_INTC_IRQ(IRQ_EMACINT), .flags = IORESOURCE_IRQ, }, }; @@ -260,12 +261,12 @@ static struct resource edma_resources[] = { }, { .name = "edma3_ccint", - .start = IRQ_CCINT0, + .start = DAVINCI_INTC_IRQ(IRQ_CCINT0), .flags = IORESOURCE_IRQ, }, { .name = "edma3_ccerrint", - .start = IRQ_CCERRINT, + .start = DAVINCI_INTC_IRQ(IRQ_CCERRINT), .flags = IORESOURCE_IRQ, }, /* not using TC*_ERR */ @@ -330,13 +331,13 @@ static struct platform_device dm644x_vpss_device = { static struct resource dm644x_vpfe_resources[] = { { - .start = IRQ_VDINT0, - .end = IRQ_VDINT0, + .start = DAVINCI_INTC_IRQ(IRQ_VDINT0), + .end = DAVINCI_INTC_IRQ(IRQ_VDINT0), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_VDINT1, - .end = IRQ_VDINT1, + .start = DAVINCI_INTC_IRQ(IRQ_VDINT1), + .end = DAVINCI_INTC_IRQ(IRQ_VDINT1), .flags = IORESOURCE_IRQ, }, }; @@ -442,8 +443,8 @@ static int dm644x_venc_setup_clock(enum vpbe_enc_timings_type type, static struct resource dm644x_v4l2_disp_resources[] = { { - .start = IRQ_VENCINT, - .end = IRQ_VENCINT, + .start = DAVINCI_INTC_IRQ(IRQ_VENCINT), + .end = DAVINCI_INTC_IRQ(IRQ_VENCINT), .flags = IORESOURCE_IRQ, }, }; @@ -491,28 +492,28 @@ static struct resource dm644_gpio_resources[] = { .flags = IORESOURCE_MEM, }, { /* interrupt */ - .start = IRQ_GPIOBNK0, - .end = IRQ_GPIOBNK0, + .start = DAVINCI_INTC_IRQ(IRQ_GPIOBNK0), + .end = DAVINCI_INTC_IRQ(IRQ_GPIOBNK0), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_GPIOBNK1, - .end = IRQ_GPIOBNK1, + .start = DAVINCI_INTC_IRQ(IRQ_GPIOBNK1), + .end = DAVINCI_INTC_IRQ(IRQ_GPIOBNK1), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_GPIOBNK2, - .end = IRQ_GPIOBNK2, + .start = DAVINCI_INTC_IRQ(IRQ_GPIOBNK2), + .end = DAVINCI_INTC_IRQ(IRQ_GPIOBNK2), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_GPIOBNK3, - .end = IRQ_GPIOBNK3, + .start = DAVINCI_INTC_IRQ(IRQ_GPIOBNK3), + .end = DAVINCI_INTC_IRQ(IRQ_GPIOBNK3), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_GPIOBNK4, - .end = IRQ_GPIOBNK4, + .start = DAVINCI_INTC_IRQ(IRQ_GPIOBNK4), + .end = DAVINCI_INTC_IRQ(IRQ_GPIOBNK4), .flags = IORESOURCE_IRQ, }, }; @@ -573,7 +574,7 @@ static struct davinci_timer_info dm644x_timer_info = { static struct plat_serial8250_port dm644x_serial0_platform_data[] = { { .mapbase = DAVINCI_UART0_BASE, - .irq = IRQ_UARTINT0, + .irq = DAVINCI_INTC_IRQ(IRQ_UARTINT0), .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, .iotype = UPIO_MEM, @@ -586,7 +587,7 @@ static struct plat_serial8250_port dm644x_serial0_platform_data[] = { static struct plat_serial8250_port dm644x_serial1_platform_data[] = { { .mapbase = DAVINCI_UART1_BASE, - .irq = IRQ_UARTINT1, + .irq = DAVINCI_INTC_IRQ(IRQ_UARTINT1), .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, .iotype = UPIO_MEM, @@ -599,7 +600,7 @@ static struct plat_serial8250_port dm644x_serial1_platform_data[] = { static struct plat_serial8250_port dm644x_serial2_platform_data[] = { { .mapbase = DAVINCI_UART2_BASE, - .irq = IRQ_UARTINT2, + .irq = DAVINCI_INTC_IRQ(IRQ_UARTINT2), .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, .iotype = UPIO_MEM, @@ -645,10 +646,6 @@ static const struct davinci_soc_info davinci_soc_info_dm644x = { .pinmux_base = DAVINCI_SYSTEM_MODULE_BASE, .pinmux_pins = dm644x_pins, .pinmux_pins_num = ARRAY_SIZE(dm644x_pins), - .intc_base = DAVINCI_ARM_INTC_BASE, - .intc_type = DAVINCI_INTC_TYPE_AINTC, - .intc_irq_prios = dm644x_default_priorities, - .intc_irq_num = DAVINCI_N_AINTC_IRQ, .timer_info = &dm644x_timer_info, .emac_pdata = &dm644x_emac_pdata, .sram_dma = 0x00008000, @@ -729,6 +726,21 @@ int __init dm644x_init_video(struct vpfe_config *vpfe_cfg, return 0; } +static const struct davinci_aintc_config dm644x_aintc_config = { + .reg = { + .start = DAVINCI_ARM_INTC_BASE, + .end = DAVINCI_ARM_INTC_BASE + SZ_4K - 1, + .flags = IORESOURCE_MEM, + }, + .num_irqs = 64, + .prios = dm644x_default_priorities, +}; + +void __init dm644x_init_irq(void) +{ + davinci_aintc_init(&dm644x_aintc_config); +} + void __init dm644x_init_devices(void) { struct platform_device *edma_pdev; diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c index 7dc54b2a610f..f33392f77a03 100644 --- a/arch/arm/mach-davinci/dm646x.c +++ b/arch/arm/mach-davinci/dm646x.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -24,13 +25,13 @@ #include #include -#include #include #include #include #include "asp.h" #include "davinci.h" +#include "irqs.h" #include "mux.h" #define DAVINCI_VPIF_BASE (0x01C12000) @@ -62,23 +63,23 @@ static struct resource dm646x_emac_resources[] = { .flags = IORESOURCE_MEM, }, { - .start = IRQ_DM646X_EMACRXTHINT, - .end = IRQ_DM646X_EMACRXTHINT, + .start = DAVINCI_INTC_IRQ(IRQ_DM646X_EMACRXTHINT), + .end = DAVINCI_INTC_IRQ(IRQ_DM646X_EMACRXTHINT), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DM646X_EMACRXINT, - .end = IRQ_DM646X_EMACRXINT, + .start = DAVINCI_INTC_IRQ(IRQ_DM646X_EMACRXINT), + .end = DAVINCI_INTC_IRQ(IRQ_DM646X_EMACRXINT), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DM646X_EMACTXINT, - .end = IRQ_DM646X_EMACTXINT, + .start = DAVINCI_INTC_IRQ(IRQ_DM646X_EMACTXINT), + .end = DAVINCI_INTC_IRQ(IRQ_DM646X_EMACTXINT), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DM646X_EMACMISCINT, - .end = IRQ_DM646X_EMACMISCINT, + .start = DAVINCI_INTC_IRQ(IRQ_DM646X_EMACMISCINT), + .end = DAVINCI_INTC_IRQ(IRQ_DM646X_EMACMISCINT), .flags = IORESOURCE_IRQ, }, }; @@ -273,12 +274,12 @@ static struct resource edma_resources[] = { }, { .name = "edma3_ccint", - .start = IRQ_CCINT0, + .start = DAVINCI_INTC_IRQ(IRQ_CCINT0), .flags = IORESOURCE_IRQ, }, { .name = "edma3_ccerrint", - .start = IRQ_CCERRINT, + .start = DAVINCI_INTC_IRQ(IRQ_CCERRINT), .flags = IORESOURCE_IRQ, }, /* not using TC*_ERR */ @@ -315,12 +316,12 @@ static struct resource dm646x_mcasp0_resources[] = { }, { .name = "tx", - .start = IRQ_DM646X_MCASP0TXINT, + .start = DAVINCI_INTC_IRQ(IRQ_DM646X_MCASP0TXINT), .flags = IORESOURCE_IRQ, }, { .name = "rx", - .start = IRQ_DM646X_MCASP0RXINT, + .start = DAVINCI_INTC_IRQ(IRQ_DM646X_MCASP0RXINT), .flags = IORESOURCE_IRQ, }, }; @@ -341,7 +342,7 @@ static struct resource dm646x_mcasp1_resources[] = { }, { .name = "tx", - .start = IRQ_DM646X_MCASP1TXINT, + .start = DAVINCI_INTC_IRQ(IRQ_DM646X_MCASP1TXINT), .flags = IORESOURCE_IRQ, }, }; @@ -388,13 +389,13 @@ static struct platform_device vpif_dev = { static struct resource vpif_display_resource[] = { { - .start = IRQ_DM646X_VP_VERTINT2, - .end = IRQ_DM646X_VP_VERTINT2, + .start = DAVINCI_INTC_IRQ(IRQ_DM646X_VP_VERTINT2), + .end = DAVINCI_INTC_IRQ(IRQ_DM646X_VP_VERTINT2), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DM646X_VP_VERTINT3, - .end = IRQ_DM646X_VP_VERTINT3, + .start = DAVINCI_INTC_IRQ(IRQ_DM646X_VP_VERTINT3), + .end = DAVINCI_INTC_IRQ(IRQ_DM646X_VP_VERTINT3), .flags = IORESOURCE_IRQ, }, }; @@ -412,13 +413,13 @@ static struct platform_device vpif_display_dev = { static struct resource vpif_capture_resource[] = { { - .start = IRQ_DM646X_VP_VERTINT0, - .end = IRQ_DM646X_VP_VERTINT0, + .start = DAVINCI_INTC_IRQ(IRQ_DM646X_VP_VERTINT0), + .end = DAVINCI_INTC_IRQ(IRQ_DM646X_VP_VERTINT0), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DM646X_VP_VERTINT1, - .end = IRQ_DM646X_VP_VERTINT1, + .start = DAVINCI_INTC_IRQ(IRQ_DM646X_VP_VERTINT1), + .end = DAVINCI_INTC_IRQ(IRQ_DM646X_VP_VERTINT1), .flags = IORESOURCE_IRQ, }, }; @@ -441,18 +442,18 @@ static struct resource dm646x_gpio_resources[] = { .flags = IORESOURCE_MEM, }, { /* interrupt */ - .start = IRQ_DM646X_GPIOBNK0, - .end = IRQ_DM646X_GPIOBNK0, + .start = DAVINCI_INTC_IRQ(IRQ_DM646X_GPIOBNK0), + .end = DAVINCI_INTC_IRQ(IRQ_DM646X_GPIOBNK0), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DM646X_GPIOBNK1, - .end = IRQ_DM646X_GPIOBNK1, + .start = DAVINCI_INTC_IRQ(IRQ_DM646X_GPIOBNK1), + .end = DAVINCI_INTC_IRQ(IRQ_DM646X_GPIOBNK1), .flags = IORESOURCE_IRQ, }, { - .start = IRQ_DM646X_GPIOBNK2, - .end = IRQ_DM646X_GPIOBNK2, + .start = DAVINCI_INTC_IRQ(IRQ_DM646X_GPIOBNK2), + .end = DAVINCI_INTC_IRQ(IRQ_DM646X_GPIOBNK2), .flags = IORESOURCE_IRQ, }, }; @@ -513,7 +514,7 @@ static struct davinci_timer_info dm646x_timer_info = { static struct plat_serial8250_port dm646x_serial0_platform_data[] = { { .mapbase = DAVINCI_UART0_BASE, - .irq = IRQ_UARTINT0, + .irq = DAVINCI_INTC_IRQ(IRQ_UARTINT0), .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, .iotype = UPIO_MEM32, @@ -526,7 +527,7 @@ static struct plat_serial8250_port dm646x_serial0_platform_data[] = { static struct plat_serial8250_port dm646x_serial1_platform_data[] = { { .mapbase = DAVINCI_UART1_BASE, - .irq = IRQ_UARTINT1, + .irq = DAVINCI_INTC_IRQ(IRQ_UARTINT1), .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, .iotype = UPIO_MEM32, @@ -539,7 +540,7 @@ static struct plat_serial8250_port dm646x_serial1_platform_data[] = { static struct plat_serial8250_port dm646x_serial2_platform_data[] = { { .mapbase = DAVINCI_UART2_BASE, - .irq = IRQ_DM646X_UARTINT2, + .irq = DAVINCI_INTC_IRQ(IRQ_DM646X_UARTINT2), .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, .iotype = UPIO_MEM32, @@ -585,10 +586,6 @@ static const struct davinci_soc_info davinci_soc_info_dm646x = { .pinmux_base = DAVINCI_SYSTEM_MODULE_BASE, .pinmux_pins = dm646x_pins, .pinmux_pins_num = ARRAY_SIZE(dm646x_pins), - .intc_base = DAVINCI_ARM_INTC_BASE, - .intc_type = DAVINCI_INTC_TYPE_AINTC, - .intc_irq_prios = dm646x_default_priorities, - .intc_irq_num = DAVINCI_N_AINTC_IRQ, .timer_info = &dm646x_timer_info, .emac_pdata = &dm646x_emac_pdata, .sram_dma = 0x10010000, @@ -690,6 +687,21 @@ void __init dm646x_register_clocks(void) platform_device_register(&dm646x_pll2_device); } +static const struct davinci_aintc_config dm646x_aintc_config = { + .reg = { + .start = DAVINCI_ARM_INTC_BASE, + .end = DAVINCI_ARM_INTC_BASE + SZ_4K - 1, + .flags = IORESOURCE_MEM, + }, + .num_irqs = 64, + .prios = dm646x_default_priorities, +}; + +void __init dm646x_init_irq(void) +{ + davinci_aintc_init(&dm646x_aintc_config); +} + static int __init dm646x_init_devices(void) { int ret = 0; diff --git a/arch/arm/mach-davinci/include/mach/common.h b/arch/arm/mach-davinci/include/mach/common.h index b577e13a9c23..9526e5da0d33 100644 --- a/arch/arm/mach-davinci/include/mach/common.h +++ b/arch/arm/mach-davinci/include/mach/common.h @@ -17,11 +17,12 @@ #include #include -void davinci_timer_init(struct clk *clk); +#include + +#define DAVINCI_INTC_START NR_IRQS +#define DAVINCI_INTC_IRQ(_irqnum) (DAVINCI_INTC_START + (_irqnum)) -extern void davinci_irq_init(void); -extern void __iomem *davinci_intc_base; -extern int davinci_intc_type; +void davinci_timer_init(struct clk *clk); struct davinci_timer_instance { u32 base; @@ -57,11 +58,6 @@ struct davinci_soc_info { u32 pinmux_base; const struct mux_config *pinmux_pins; unsigned long pinmux_pins_num; - u32 intc_base; - int intc_type; - u8 *intc_irq_prios; - unsigned long intc_irq_num; - u32 *intc_host_map; struct davinci_timer_info *timer_info; int gpio_type; u32 gpio_base; diff --git a/arch/arm/mach-davinci/include/mach/cpufreq.h b/arch/arm/mach-davinci/include/mach/cpufreq.h deleted file mode 100644 index 3c089cfb6cd6..000000000000 --- a/arch/arm/mach-davinci/include/mach/cpufreq.h +++ /dev/null @@ -1,26 +0,0 @@ -/* - * TI DaVinci CPUFreq platform support. - * - * Copyright (C) 2009 Texas Instruments, Inc. http://www.ti.com/ - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ -#ifndef _MACH_DAVINCI_CPUFREQ_H -#define _MACH_DAVINCI_CPUFREQ_H - -#include - -struct davinci_cpufreq_config { - struct cpufreq_frequency_table *freq_table; - int (*set_voltage) (unsigned int index); - int (*init) (void); -}; - -#endif diff --git a/arch/arm/mach-davinci/include/mach/da8xx.h b/arch/arm/mach-davinci/include/mach/da8xx.h index ab4a57f433f4..1618b30661a9 100644 --- a/arch/arm/mach-davinci/include/mach/da8xx.h +++ b/arch/arm/mach-davinci/include/mach/da8xx.h @@ -88,10 +88,12 @@ extern unsigned int da850_max_speed; #define DA8XX_ARM_RAM_BASE 0xffff0000 void da830_init(void); +void da830_init_irq(void); void da830_init_time(void); void da830_register_clocks(void); void da850_init(void); +void da850_init_irq(void); void da850_init_time(void); void da850_register_clocks(void); diff --git a/arch/arm/mach-davinci/include/mach/entry-macro.S b/arch/arm/mach-davinci/include/mach/entry-macro.S deleted file mode 100644 index cf5f573eb5fd..000000000000 --- a/arch/arm/mach-davinci/include/mach/entry-macro.S +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Low-level IRQ helper macros for TI DaVinci-based platforms - * - * Author: Kevin Hilman, MontaVista Software, Inc. - * - * 2007 (c) MontaVista Software, Inc. This file is licensed under - * the terms of the GNU General Public License version 2. This program - * is licensed "as is" without any warranty of any kind, whether express - * or implied. - */ -#include - - .macro get_irqnr_preamble, base, tmp - ldr \base, =davinci_intc_base - ldr \base, [\base] - .endm - - .macro get_irqnr_and_base, irqnr, irqstat, base, tmp -#if defined(CONFIG_AINTC) && defined(CONFIG_CP_INTC) - ldr \tmp, =davinci_intc_type - ldr \tmp, [\tmp] - cmp \tmp, #DAVINCI_INTC_TYPE_CP_INTC - beq 1001f -#endif -#if defined(CONFIG_AINTC) - ldr \tmp, [\base, #0x14] - movs \tmp, \tmp, lsr #2 - sub \irqnr, \tmp, #1 - b 1002f -#endif -#if defined(CONFIG_CP_INTC) -1001: ldr \irqnr, [\base, #0x80] /* get irq number */ - mov \tmp, \irqnr, lsr #31 - and \irqnr, \irqnr, #0xff /* irq is in bits 0-9 */ - and \tmp, \tmp, #0x1 - cmp \tmp, #0x1 -#endif -1002: - .endm diff --git a/arch/arm/mach-davinci/irq.c b/arch/arm/mach-davinci/irq.c deleted file mode 100644 index 952dc126c390..000000000000 --- a/arch/arm/mach-davinci/irq.c +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Interrupt handler for DaVinci boards. - * - * Copyright (C) 2006 Texas Instruments. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - */ -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#define FIQ_REG0_OFFSET 0x0000 -#define FIQ_REG1_OFFSET 0x0004 -#define IRQ_REG0_OFFSET 0x0008 -#define IRQ_REG1_OFFSET 0x000C -#define IRQ_ENT_REG0_OFFSET 0x0018 -#define IRQ_ENT_REG1_OFFSET 0x001C -#define IRQ_INCTL_REG_OFFSET 0x0020 -#define IRQ_EABASE_REG_OFFSET 0x0024 -#define IRQ_INTPRI0_REG_OFFSET 0x0030 -#define IRQ_INTPRI7_REG_OFFSET 0x004C - -static inline void davinci_irq_writel(unsigned long value, int offset) -{ - __raw_writel(value, davinci_intc_base + offset); -} - -static __init void -davinci_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num) -{ - struct irq_chip_generic *gc; - struct irq_chip_type *ct; - - gc = irq_alloc_generic_chip("AINTC", 1, irq_start, base, handle_edge_irq); - if (!gc) { - pr_err("%s: irq_alloc_generic_chip for IRQ %u failed\n", - __func__, irq_start); - return; - } - - ct = gc->chip_types; - ct->chip.irq_ack = irq_gc_ack_set_bit; - ct->chip.irq_mask = irq_gc_mask_clr_bit; - ct->chip.irq_unmask = irq_gc_mask_set_bit; - - ct->regs.ack = IRQ_REG0_OFFSET; - ct->regs.mask = IRQ_ENT_REG0_OFFSET; - irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE, - IRQ_NOREQUEST | IRQ_NOPROBE, 0); -} - -/* ARM Interrupt Controller Initialization */ -void __init davinci_irq_init(void) -{ - unsigned i, j; - const u8 *davinci_def_priorities = davinci_soc_info.intc_irq_prios; - - davinci_intc_type = DAVINCI_INTC_TYPE_AINTC; - davinci_intc_base = ioremap(davinci_soc_info.intc_base, SZ_4K); - if (WARN_ON(!davinci_intc_base)) - return; - - /* Clear all interrupt requests */ - davinci_irq_writel(~0x0, FIQ_REG0_OFFSET); - davinci_irq_writel(~0x0, FIQ_REG1_OFFSET); - davinci_irq_writel(~0x0, IRQ_REG0_OFFSET); - davinci_irq_writel(~0x0, IRQ_REG1_OFFSET); - - /* Disable all interrupts */ - davinci_irq_writel(0x0, IRQ_ENT_REG0_OFFSET); - davinci_irq_writel(0x0, IRQ_ENT_REG1_OFFSET); - - /* Interrupts disabled immediately, IRQ entry reflects all */ - davinci_irq_writel(0x0, IRQ_INCTL_REG_OFFSET); - - /* we don't use the hardware vector table, just its entry addresses */ - davinci_irq_writel(0, IRQ_EABASE_REG_OFFSET); - - /* Clear all interrupt requests */ - davinci_irq_writel(~0x0, FIQ_REG0_OFFSET); - davinci_irq_writel(~0x0, FIQ_REG1_OFFSET); - davinci_irq_writel(~0x0, IRQ_REG0_OFFSET); - davinci_irq_writel(~0x0, IRQ_REG1_OFFSET); - - for (i = IRQ_INTPRI0_REG_OFFSET; i <= IRQ_INTPRI7_REG_OFFSET; i += 4) { - u32 pri; - - for (j = 0, pri = 0; j < 32; j += 4, davinci_def_priorities++) - pri |= (*davinci_def_priorities & 0x07) << j; - davinci_irq_writel(pri, i); - } - - for (i = 0, j = 0; i < davinci_soc_info.intc_irq_num; i += 32, j += 0x04) - davinci_alloc_gc(davinci_intc_base + j, i, 32); - - irq_set_handler(IRQ_TINT1_TINT34, handle_level_irq); -} diff --git a/arch/arm/mach-davinci/include/mach/irqs.h b/arch/arm/mach-davinci/irqs.h similarity index 98% rename from arch/arm/mach-davinci/include/mach/irqs.h rename to arch/arm/mach-davinci/irqs.h index edb2ca62321a..8f9fc7a56ce8 100644 --- a/arch/arm/mach-davinci/include/mach/irqs.h +++ b/arch/arm/mach-davinci/irqs.h @@ -30,9 +30,6 @@ /* Base address */ #define DAVINCI_ARM_INTC_BASE 0x01C48000 -#define DAVINCI_INTC_TYPE_AINTC 0 -#define DAVINCI_INTC_TYPE_CP_INTC 1 - /* Interrupt lines */ #define IRQ_VDINT0 0 #define IRQ_VDINT1 1 @@ -404,6 +401,5 @@ /* da850 currently has the most gpio pins (144) */ #define DAVINCI_N_GPIO 144 /* da850 currently has the most irqs so use DA850_N_CP_INTC_IRQ */ -#define NR_IRQS (DA850_N_CP_INTC_IRQ + DAVINCI_N_GPIO) #endif /* __ASM_ARCH_IRQS_H */ diff --git a/arch/arm/mach-davinci/usb-da8xx.c b/arch/arm/mach-davinci/usb-da8xx.c index c17ce66a3d95..25f21ee86f1a 100644 --- a/arch/arm/mach-davinci/usb-da8xx.c +++ b/arch/arm/mach-davinci/usb-da8xx.c @@ -18,7 +18,8 @@ #include #include #include -#include + +#include "irqs.h" #define DA8XX_USB0_BASE 0x01e00000 #define DA8XX_USB1_BASE 0x01e25000 @@ -70,7 +71,7 @@ static struct resource da8xx_usb20_resources[] = { .flags = IORESOURCE_MEM, }, { - .start = IRQ_DA8XX_USB_INT, + .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_USB_INT), .flags = IORESOURCE_IRQ, .name = "mc", }, @@ -105,8 +106,8 @@ static struct resource da8xx_usb11_resources[] = { .flags = IORESOURCE_MEM, }, [1] = { - .start = IRQ_DA8XX_IRQN, - .end = IRQ_DA8XX_IRQN, + .start = DAVINCI_INTC_IRQ(IRQ_DA8XX_IRQN), + .end = DAVINCI_INTC_IRQ(IRQ_DA8XX_IRQN), .flags = IORESOURCE_IRQ, }, }; diff --git a/arch/arm/mach-davinci/usb.c b/arch/arm/mach-davinci/usb.c index 31ed7aa47227..dd8db61cdd1c 100644 --- a/arch/arm/mach-davinci/usb.c +++ b/arch/arm/mach-davinci/usb.c @@ -2,16 +2,16 @@ /* * USB */ +#include #include #include -#include - +#include #include #include -#include #include -#include + +#include "irqs.h" #define DAVINCI_USB_OTG_BASE 0x01c64000 @@ -38,7 +38,7 @@ static struct resource usb_resources[] = { .flags = IORESOURCE_MEM, }, { - .start = IRQ_USBINT, + .start = DAVINCI_INTC_IRQ(IRQ_USBINT), .flags = IORESOURCE_IRQ, .name = "mc" }, @@ -70,8 +70,9 @@ void __init davinci_setup_usb(unsigned mA, unsigned potpgt_ms) if (cpu_is_davinci_dm646x()) { /* Override the defaults as DM6467 uses different IRQs. */ - usb_dev.resource[1].start = IRQ_DM646X_USBINT; - usb_dev.resource[2].start = IRQ_DM646X_USBDMAINT; + usb_dev.resource[1].start = DAVINCI_INTC_IRQ(IRQ_DM646X_USBINT); + usb_dev.resource[2].start = DAVINCI_INTC_IRQ( + IRQ_DM646X_USBDMAINT); } else /* other devices don't have dedicated CPPI IRQ */ usb_dev.num_resources = 2; diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c index c6a533699b00..85b74ac943f0 100644 --- a/arch/arm/mach-ep93xx/ts72xx.c +++ b/arch/arm/mach-ep93xx/ts72xx.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c index c39ffd2e2fe6..b6da7edbbd2f 100644 --- a/arch/arm/mach-exynos/platsmp.c +++ b/arch/arm/mach-exynos/platsmp.c @@ -336,9 +336,9 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) /* wait max 10 ms until cpu1 is on */ while (exynos_cpu_power_state(core_id) != S5P_CORE_LOCAL_PWR_EN) { - if (timeout-- == 0) + if (timeout == 0) break; - + timeout--; mdelay(1); } diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile index 8af2f7e91d13..35ff620537e6 100644 --- a/arch/arm/mach-imx/Makefile +++ b/arch/arm/mach-imx/Makefile @@ -29,9 +29,10 @@ obj-$(CONFIG_SOC_IMX6SL) += cpuidle-imx6sl.o obj-$(CONFIG_SOC_IMX6SLL) += cpuidle-imx6sx.o obj-$(CONFIG_SOC_IMX6SX) += cpuidle-imx6sx.o obj-$(CONFIG_SOC_IMX6UL) += cpuidle-imx6sx.o +obj-$(CONFIG_SOC_IMX7ULP) += cpuidle-imx7ulp.o endif -ifdef CONFIG_SND_IMX_SOC +ifdef CONFIG_SND_SOC_IMX_PCM_FIQ obj-y += ssi-fiq.o obj-y += ssi-fiq-ksym.o endif diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h index bc915e5b4d56..c51764a85fd7 100644 --- a/arch/arm/mach-imx/common.h +++ b/arch/arm/mach-imx/common.h @@ -72,6 +72,15 @@ enum mxc_cpu_pwr_mode { STOP_POWER_OFF, /* STOP + SRPG */ }; +enum ulp_cpu_pwr_mode { + ULP_PM_HSRUN, /* High speed run mode */ + ULP_PM_RUN, /* Run mode */ + ULP_PM_WAIT, /* Wait mode */ + ULP_PM_STOP, /* Stop mode */ + ULP_PM_VLPS, /* Very low power stop mode */ + ULP_PM_VLLS, /* very low leakage stop mode */ +}; + void imx_enable_cpu(int cpu, bool enable); void imx_set_cpu_jump(int cpu, void *jump_addr); u32 imx_get_cpu_arg(int cpu); @@ -98,6 +107,7 @@ int imx6_set_lpm(enum mxc_cpu_pwr_mode mode); void imx6_set_int_mem_clk_lpm(bool enable); void imx6sl_set_wait_clk(bool enter); int imx_mmdc_get_ddr_type(void); +int imx7ulp_set_lpm(enum ulp_cpu_pwr_mode mode); void imx_cpu_die(unsigned int cpu); int imx_cpu_kill(unsigned int cpu); diff --git a/arch/arm/mach-imx/cpuidle-imx7ulp.c b/arch/arm/mach-imx/cpuidle-imx7ulp.c new file mode 100644 index 000000000000..ca86c967d19e --- /dev/null +++ b/arch/arm/mach-imx/cpuidle-imx7ulp.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017-2018 NXP + * Anson Huang + */ + +#include +#include +#include + +#include "common.h" +#include "cpuidle.h" + +static int imx7ulp_enter_wait(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + if (index == 1) + imx7ulp_set_lpm(ULP_PM_WAIT); + else + imx7ulp_set_lpm(ULP_PM_STOP); + + cpu_do_idle(); + + imx7ulp_set_lpm(ULP_PM_RUN); + + return index; +} + +static struct cpuidle_driver imx7ulp_cpuidle_driver = { + .name = "imx7ulp_cpuidle", + .owner = THIS_MODULE, + .states = { + /* WFI */ + ARM_CPUIDLE_WFI_STATE, + /* WAIT */ + { + .exit_latency = 50, + .target_residency = 75, + .enter = imx7ulp_enter_wait, + .name = "WAIT", + .desc = "PSTOP2", + }, + /* STOP */ + { + .exit_latency = 100, + .target_residency = 150, + .enter = imx7ulp_enter_wait, + .name = "STOP", + .desc = "PSTOP1", + }, + }, + .state_count = 3, + .safe_state_index = 0, +}; + +int __init imx7ulp_cpuidle_init(void) +{ + return cpuidle_register(&imx7ulp_cpuidle_driver, NULL); +} diff --git a/arch/arm/mach-imx/cpuidle.h b/arch/arm/mach-imx/cpuidle.h index f9140128ba05..7694c8f810a4 100644 --- a/arch/arm/mach-imx/cpuidle.h +++ b/arch/arm/mach-imx/cpuidle.h @@ -15,6 +15,7 @@ extern int imx5_cpuidle_init(void); extern int imx6q_cpuidle_init(void); extern int imx6sl_cpuidle_init(void); extern int imx6sx_cpuidle_init(void); +extern int imx7ulp_cpuidle_init(void); #else static inline int imx5_cpuidle_init(void) { @@ -32,4 +33,8 @@ static inline int imx6sx_cpuidle_init(void) { return 0; } +static inline int imx7ulp_cpuidle_init(void) +{ + return 0; +} #endif diff --git a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c index 5169dfba9718..07d4fcfe5c2e 100644 --- a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c +++ b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c @@ -258,8 +258,7 @@ static void __init visstrim_analog_camera_init(void) return; dma_declare_coherent_memory(&pdev->dev, mx2_camera_base, - mx2_camera_base, MX2_CAMERA_BUF_SIZE, - DMA_MEMORY_EXCLUSIVE); + mx2_camera_base, MX2_CAMERA_BUF_SIZE); } static void __init visstrim_reserve(void) @@ -445,8 +444,7 @@ static void __init visstrim_coda_init(void) dma_declare_coherent_memory(&pdev->dev, mx2_camera_base + MX2_CAMERA_BUF_SIZE, mx2_camera_base + MX2_CAMERA_BUF_SIZE, - MX2_CAMERA_BUF_SIZE, - DMA_MEMORY_EXCLUSIVE); + MX2_CAMERA_BUF_SIZE); } /* DMA deinterlace */ @@ -465,8 +463,7 @@ static void __init visstrim_deinterlace_init(void) dma_declare_coherent_memory(&pdev->dev, mx2_camera_base + 2 * MX2_CAMERA_BUF_SIZE, mx2_camera_base + 2 * MX2_CAMERA_BUF_SIZE, - MX2_CAMERA_BUF_SIZE, - DMA_MEMORY_EXCLUSIVE); + MX2_CAMERA_BUF_SIZE); } /* Emma-PrP for format conversion */ @@ -485,8 +482,7 @@ static void __init visstrim_emmaprp_init(void) */ ret = dma_declare_coherent_memory(&pdev->dev, mx2_camera_base, mx2_camera_base, - MX2_CAMERA_BUF_SIZE, - DMA_MEMORY_EXCLUSIVE); + MX2_CAMERA_BUF_SIZE); if (ret) pr_err("Failed to declare memory for emmaprp\n"); } diff --git a/arch/arm/mach-imx/mach-imx7ulp.c b/arch/arm/mach-imx/mach-imx7ulp.c index 33937ebf66b5..11ac71aaf965 100644 --- a/arch/arm/mach-imx/mach-imx7ulp.c +++ b/arch/arm/mach-imx/mach-imx7ulp.c @@ -6,17 +6,57 @@ */ #include +#include #include +#include #include #include "common.h" +#include "cpuidle.h" #include "hardware.h" +#define SIM_JTAG_ID_REG 0x8c + +static void __init imx7ulp_set_revision(void) +{ + struct regmap *sim; + u32 revision; + + sim = syscon_regmap_lookup_by_compatible("fsl,imx7ulp-sim"); + if (IS_ERR(sim)) { + pr_warn("failed to find fsl,imx7ulp-sim regmap!\n"); + return; + } + + if (regmap_read(sim, SIM_JTAG_ID_REG, &revision)) { + pr_warn("failed to read sim regmap!\n"); + return; + } + + /* + * bit[31:28] of JTAG_ID register defines revision as below from B0: + * 0001 B0 + * 0010 B1 + */ + switch (revision >> 28) { + case 1: + imx_set_soc_revision(IMX_CHIP_REVISION_2_0); + break; + case 2: + imx_set_soc_revision(IMX_CHIP_REVISION_2_1); + break; + default: + imx_set_soc_revision(IMX_CHIP_REVISION_1_0); + break; + } +} + static void __init imx7ulp_init_machine(void) { imx7ulp_pm_init(); mxc_set_cpu_type(MXC_CPU_IMX7ULP); + imx7ulp_set_revision(); of_platform_default_populate(NULL, NULL, imx_soc_device_init()); } @@ -25,7 +65,13 @@ static const char *const imx7ulp_dt_compat[] __initconst = { NULL, }; +static void __init imx7ulp_init_late(void) +{ + imx7ulp_cpuidle_init(); +} + DT_MACHINE_START(IMX7ulp, "Freescale i.MX7ULP (Device Tree)") .init_machine = imx7ulp_init_machine, .dt_compat = imx7ulp_dt_compat, + .init_late = imx7ulp_init_late, MACHINE_END diff --git a/arch/arm/mach-imx/mach-mx21ads.c b/arch/arm/mach-imx/mach-mx21ads.c index 2e1e540f2e5a..d278fb672d40 100644 --- a/arch/arm/mach-imx/mach-mx21ads.c +++ b/arch/arm/mach-imx/mach-mx21ads.c @@ -205,7 +205,6 @@ static struct regulator_init_data mx21ads_lcd_regulator_init_data = { static struct fixed_voltage_config mx21ads_lcd_regulator_pdata = { .supply_name = "LCD", .microvolts = 3300000, - .enable_high = 1, .init_data = &mx21ads_lcd_regulator_init_data, }; diff --git a/arch/arm/mach-imx/mach-mx27ads.c b/arch/arm/mach-imx/mach-mx27ads.c index f5e04047ed13..6dd7f57c332f 100644 --- a/arch/arm/mach-imx/mach-mx27ads.c +++ b/arch/arm/mach-imx/mach-mx27ads.c @@ -237,7 +237,7 @@ static struct fixed_voltage_config mx27ads_lcd_regulator_pdata = { static struct gpiod_lookup_table mx27ads_lcd_regulator_gpiod_table = { .dev_id = "reg-fixed-voltage.0", /* Let's hope ID 0 is what we get */ .table = { - GPIO_LOOKUP("LCD", 0, NULL, GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("LCD", 0, NULL, GPIO_ACTIVE_LOW), { }, }, }; diff --git a/arch/arm/mach-imx/mach-mx31moboard.c b/arch/arm/mach-imx/mach-mx31moboard.c index 643a3d749703..fe50f4cf00a7 100644 --- a/arch/arm/mach-imx/mach-mx31moboard.c +++ b/arch/arm/mach-imx/mach-mx31moboard.c @@ -475,8 +475,7 @@ static int __init mx31moboard_init_cam(void) ret = dma_declare_coherent_memory(&pdev->dev, mx3_camera_base, mx3_camera_base, - MX3_CAMERA_BUF_SIZE, - DMA_MEMORY_EXCLUSIVE); + MX3_CAMERA_BUF_SIZE); if (ret) goto err; diff --git a/arch/arm/mach-imx/mmdc.c b/arch/arm/mach-imx/mmdc.c index e49e06834516..fce4b426c379 100644 --- a/arch/arm/mach-imx/mmdc.c +++ b/arch/arm/mach-imx/mmdc.c @@ -294,13 +294,7 @@ static int mmdc_pmu_event_init(struct perf_event *event) return -EOPNOTSUPP; } - if (event->attr.exclude_user || - event->attr.exclude_kernel || - event->attr.exclude_hv || - event->attr.exclude_idle || - event->attr.exclude_host || - event->attr.exclude_guest || - event->attr.sample_period) + if (event->attr.sample_period) return -EINVAL; if (cfg < 0 || cfg >= MMDC_NUM_COUNTERS) @@ -456,6 +450,7 @@ static int mmdc_pmu_init(struct mmdc_pmu *pmu_mmdc, .start = mmdc_pmu_event_start, .stop = mmdc_pmu_event_stop, .read = mmdc_pmu_event_update, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }, .mmdc_base = mmdc_base, .dev = dev, diff --git a/arch/arm/mach-imx/pm-imx7ulp.c b/arch/arm/mach-imx/pm-imx7ulp.c index cf6a380c2b8d..7b2f7387e662 100644 --- a/arch/arm/mach-imx/pm-imx7ulp.c +++ b/arch/arm/mach-imx/pm-imx7ulp.c @@ -9,21 +9,60 @@ #include #include +#include "common.h" + #define SMC_PMCTRL 0x10 #define BP_PMCTRL_PSTOPO 16 #define PSTOPO_PSTOP3 0x3 +#define PSTOPO_PSTOP2 0x2 +#define PSTOPO_PSTOP1 0x1 +#define BP_PMCTRL_RUNM 8 +#define RUNM_RUN 0 +#define BP_PMCTRL_STOPM 0 +#define STOPM_STOP 0 + +#define BM_PMCTRL_PSTOPO (3 << BP_PMCTRL_PSTOPO) +#define BM_PMCTRL_RUNM (3 << BP_PMCTRL_RUNM) +#define BM_PMCTRL_STOPM (7 << BP_PMCTRL_STOPM) + +static void __iomem *smc1_base; + +int imx7ulp_set_lpm(enum ulp_cpu_pwr_mode mode) +{ + u32 val = readl_relaxed(smc1_base + SMC_PMCTRL); + + /* clear all */ + val &= ~(BM_PMCTRL_RUNM | BM_PMCTRL_STOPM | BM_PMCTRL_PSTOPO); + + switch (mode) { + case ULP_PM_RUN: + /* system/bus clock enabled */ + val |= PSTOPO_PSTOP3 << BP_PMCTRL_PSTOPO; + break; + case ULP_PM_WAIT: + /* system clock disabled, bus clock enabled */ + val |= PSTOPO_PSTOP2 << BP_PMCTRL_PSTOPO; + break; + case ULP_PM_STOP: + /* system/bus clock disabled */ + val |= PSTOPO_PSTOP1 << BP_PMCTRL_PSTOPO; + break; + default: + return -EINVAL; + } + + writel_relaxed(val, smc1_base + SMC_PMCTRL); + + return 0; +} void __init imx7ulp_pm_init(void) { struct device_node *np; - void __iomem *smc1_base; np = of_find_compatible_node(NULL, NULL, "fsl,imx7ulp-smc1"); smc1_base = of_iomap(np, 0); WARN_ON(!smc1_base); - /* Partial Stop mode 3 with system/bus clock enabled */ - writel_relaxed(PSTOPO_PSTOP3 << BP_PMCTRL_PSTOPO, - smc1_base + SMC_PMCTRL); - iounmap(smc1_base); + imx7ulp_set_lpm(ULP_PM_RUN); } diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c index 3b73813c6b04..23e8c93515d4 100644 --- a/arch/arm/mach-iop32x/n2100.c +++ b/arch/arm/mach-iop32x/n2100.c @@ -75,8 +75,7 @@ void __init n2100_map_io(void) /* * N2100 PCI. */ -static int __init -n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +static int n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { int irq; diff --git a/arch/arm/mach-lpc32xx/phy3250.c b/arch/arm/mach-lpc32xx/phy3250.c index e48cc06c2aec..b3be60a8e467 100644 --- a/arch/arm/mach-lpc32xx/phy3250.c +++ b/arch/arm/mach-lpc32xx/phy3250.c @@ -45,73 +45,6 @@ #include #include "common.h" -/* - * AMBA LCD controller - */ -static struct clcd_panel conn_lcd_panel = { - .mode = { - .name = "QVGA portrait", - .refresh = 60, - .xres = 240, - .yres = 320, - .pixclock = 191828, - .left_margin = 22, - .right_margin = 11, - .upper_margin = 2, - .lower_margin = 1, - .hsync_len = 5, - .vsync_len = 2, - .sync = 0, - .vmode = FB_VMODE_NONINTERLACED, - }, - .width = -1, - .height = -1, - .tim2 = (TIM2_IVS | TIM2_IHS), - .cntl = (CNTL_BGR | CNTL_LCDTFT | CNTL_LCDVCOMP(1) | - CNTL_LCDBPP16_565), - .bpp = 16, -}; -#define PANEL_SIZE (3 * SZ_64K) - -static int lpc32xx_clcd_setup(struct clcd_fb *fb) -{ - dma_addr_t dma; - - fb->fb.screen_base = dma_alloc_wc(&fb->dev->dev, PANEL_SIZE, &dma, - GFP_KERNEL); - if (!fb->fb.screen_base) { - printk(KERN_ERR "CLCD: unable to map framebuffer\n"); - return -ENOMEM; - } - - fb->fb.fix.smem_start = dma; - fb->fb.fix.smem_len = PANEL_SIZE; - fb->panel = &conn_lcd_panel; - - return 0; -} - -static int lpc32xx_clcd_mmap(struct clcd_fb *fb, struct vm_area_struct *vma) -{ - return dma_mmap_wc(&fb->dev->dev, vma, fb->fb.screen_base, - fb->fb.fix.smem_start, fb->fb.fix.smem_len); -} - -static void lpc32xx_clcd_remove(struct clcd_fb *fb) -{ - dma_free_wc(&fb->dev->dev, fb->fb.fix.smem_len, fb->fb.screen_base, - fb->fb.fix.smem_start); -} - -static struct clcd_board lpc32xx_clcd_data = { - .name = "Phytec LCD", - .check = clcdfb_check, - .decode = clcdfb_decode, - .setup = lpc32xx_clcd_setup, - .mmap = lpc32xx_clcd_mmap, - .remove = lpc32xx_clcd_remove, -}; - static struct pl08x_channel_data pl08x_slave_channels[] = { { .bus_id = "nand-slc", @@ -148,11 +81,6 @@ static struct pl08x_platform_data pl08x_pd = { .mem_buses = PL08X_AHB1, }; -static struct mmci_platform_data lpc32xx_mmci_data = { - .ocr_mask = MMC_VDD_30_31 | MMC_VDD_31_32 | - MMC_VDD_32_33 | MMC_VDD_33_34, -}; - static struct lpc32xx_slc_platform_data lpc32xx_slc_data = { .dma_filter = pl08x_filter_id, }; @@ -164,10 +92,7 @@ static struct lpc32xx_mlc_platform_data lpc32xx_mlc_data = { static const struct of_dev_auxdata lpc32xx_auxdata_lookup[] __initconst = { OF_DEV_AUXDATA("arm,pl022", 0x20084000, "dev:ssp0", NULL), OF_DEV_AUXDATA("arm,pl022", 0x2008C000, "dev:ssp1", NULL), - OF_DEV_AUXDATA("arm,pl110", 0x31040000, "dev:clcd", &lpc32xx_clcd_data), OF_DEV_AUXDATA("arm,pl080", 0x31000000, "pl08xdmac", &pl08x_pd), - OF_DEV_AUXDATA("arm,pl18x", 0x20098000, "20098000.sd", - &lpc32xx_mmci_data), OF_DEV_AUXDATA("nxp,lpc3220-slc", 0x20020000, "20020000.flash", &lpc32xx_slc_data), OF_DEV_AUXDATA("nxp,lpc3220-mlc", 0x200a8000, "200a8000.flash", @@ -177,15 +102,6 @@ static const struct of_dev_auxdata lpc32xx_auxdata_lookup[] __initconst = { static void __init lpc3250_machine_init(void) { - u32 tmp; - - /* Setup LCD muxing to RGB565 */ - tmp = __raw_readl(LPC32XX_CLKPWR_LCDCLK_CTRL) & - ~(LPC32XX_CLKPWR_LCDCTRL_LCDTYPE_MSK | - LPC32XX_CLKPWR_LCDCTRL_PSCALE_MSK); - tmp |= LPC32XX_CLKPWR_LCDCTRL_LCDTYPE_TFT16; - __raw_writel(tmp, LPC32XX_CLKPWR_LCDCLK_CTRL); - lpc32xx_serial_init(); /* Test clock needed for UDA1380 initial init */ diff --git a/arch/arm/mach-lpc32xx/pm.c b/arch/arm/mach-lpc32xx/pm.c index 62471570d586..32bca351a73b 100644 --- a/arch/arm/mach-lpc32xx/pm.c +++ b/arch/arm/mach-lpc32xx/pm.c @@ -86,17 +86,10 @@ static int lpc32xx_pm_enter(suspend_state_t state) void *iram_swap_area; /* Allocate some space for temporary IRAM storage */ - iram_swap_area = kmalloc(lpc32xx_sys_suspend_sz, GFP_KERNEL); - if (!iram_swap_area) { - printk(KERN_ERR - "PM Suspend: cannot allocate memory to save portion " - "of SRAM\n"); + iram_swap_area = kmemdup((void *)TEMP_IRAM_AREA, + lpc32xx_sys_suspend_sz, GFP_KERNEL); + if (!iram_swap_area) return -ENOMEM; - } - - /* Backup a small area of IRAM used for the suspend code */ - memcpy(iram_swap_area, (void *) TEMP_IRAM_AREA, - lpc32xx_sys_suspend_sz); /* * Copy code to suspend system into IRAM. The suspend code diff --git a/arch/arm/mach-mediatek/Kconfig b/arch/arm/mach-mediatek/Kconfig index 91cc461f7b04..11ed264f0731 100644 --- a/arch/arm/mach-mediatek/Kconfig +++ b/arch/arm/mach-mediatek/Kconfig @@ -26,6 +26,10 @@ config MACH_MT7623 bool "MediaTek MT7623 SoCs support" default ARCH_MEDIATEK +config MACH_MT7629 + bool "MediaTek MT7629 SoCs support" + default ARCH_MEDIATEK + config MACH_MT8127 bool "MediaTek MT8127 SoCs support" default ARCH_MEDIATEK diff --git a/arch/arm/mach-mediatek/mediatek.c b/arch/arm/mach-mediatek/mediatek.c index 6910b4e0d913..b6a81ba1ce32 100644 --- a/arch/arm/mach-mediatek/mediatek.c +++ b/arch/arm/mach-mediatek/mediatek.c @@ -30,7 +30,6 @@ static void __init mediatek_timer_init(void) if (of_machine_is_compatible("mediatek,mt6589") || of_machine_is_compatible("mediatek,mt7623") || - of_machine_is_compatible("mediatek,mt7623a") || of_machine_is_compatible("mediatek,mt8135") || of_machine_is_compatible("mediatek,mt8127")) { /* turn on GPT6 which ungates arch timer clocks */ @@ -50,7 +49,7 @@ static const char * const mediatek_board_dt_compat[] = { "mediatek,mt6589", "mediatek,mt6592", "mediatek,mt7623", - "mediatek,mt7623a", + "mediatek,mt7629", "mediatek,mt8127", "mediatek,mt8135", NULL, diff --git a/arch/arm/mach-mediatek/platsmp.c b/arch/arm/mach-mediatek/platsmp.c index 6882ff07aaa6..c9d7c0458452 100644 --- a/arch/arm/mach-mediatek/platsmp.c +++ b/arch/arm/mach-mediatek/platsmp.c @@ -60,7 +60,7 @@ static const struct of_device_id mtk_tz_smp_boot_infos[] __initconst = { static const struct of_device_id mtk_smp_boot_infos[] __initconst = { { .compatible = "mediatek,mt6589", .data = &mtk_mt6589_boot }, { .compatible = "mediatek,mt7623", .data = &mtk_mt7623_boot }, - { .compatible = "mediatek,mt7623a", .data = &mtk_mt7623_boot }, + { .compatible = "mediatek,mt7629", .data = &mtk_mt7623_boot }, {}, }; diff --git a/arch/arm/mach-meson/Kconfig b/arch/arm/mach-meson/Kconfig index b16831697183..15e9cb75738e 100644 --- a/arch/arm/mach-meson/Kconfig +++ b/arch/arm/mach-meson/Kconfig @@ -9,7 +9,6 @@ menuconfig ARCH_MESON select PINCTRL select PINCTRL_MESON select COMMON_CLK - select COMMON_CLK_AMLOGIC select HAVE_ARM_SCU if SMP select HAVE_ARM_TWD if SMP diff --git a/arch/arm/mach-milbeaut/Kconfig b/arch/arm/mach-milbeaut/Kconfig new file mode 100644 index 000000000000..6a576fd8521e --- /dev/null +++ b/arch/arm/mach-milbeaut/Kconfig @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0 +menuconfig ARCH_MILBEAUT + bool "Socionext Milbeaut SoCs" + depends on ARCH_MULTI_V7 + select ARM_GIC + help + This enables support for Socionext Milbeaut SoCs + +if ARCH_MILBEAUT + +config ARCH_MILBEAUT_M10V + bool "Milbeaut SC2000/M10V platform" + select ARM_ARCH_TIMER + select MILBEAUT_TIMER + select PINCTRL + select PINCTRL_MILBEAUT + help + Support for Socionext's MILBEAUT M10V based systems + +endif diff --git a/arch/arm/mach-milbeaut/Makefile b/arch/arm/mach-milbeaut/Makefile new file mode 100644 index 000000000000..ce5ea062047a --- /dev/null +++ b/arch/arm/mach-milbeaut/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_SMP) += platsmp.o diff --git a/arch/arm/mach-milbeaut/platsmp.c b/arch/arm/mach-milbeaut/platsmp.c new file mode 100644 index 000000000000..591543c81399 --- /dev/null +++ b/arch/arm/mach-milbeaut/platsmp.c @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright: (C) 2018 Socionext Inc. + * Copyright: (C) 2015 Linaro Ltd. + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#define M10V_MAX_CPU 4 +#define KERNEL_UNBOOT_FLAG 0x12345678 + +static void __iomem *m10v_smp_base; + +static int m10v_boot_secondary(unsigned int l_cpu, struct task_struct *idle) +{ + unsigned int mpidr, cpu, cluster; + + if (!m10v_smp_base) + return -ENXIO; + + mpidr = cpu_logical_map(l_cpu); + cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); + cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); + + if (cpu >= M10V_MAX_CPU) + return -EINVAL; + + pr_info("%s: cpu %u l_cpu %u cluster %u\n", + __func__, cpu, l_cpu, cluster); + + writel(__pa_symbol(secondary_startup), m10v_smp_base + cpu * 4); + arch_send_wakeup_ipi_mask(cpumask_of(l_cpu)); + + return 0; +} + +static void m10v_smp_init(unsigned int max_cpus) +{ + unsigned int mpidr, cpu, cluster; + struct device_node *np; + + np = of_find_compatible_node(NULL, NULL, "socionext,milbeaut-smp-sram"); + if (!np) + return; + + m10v_smp_base = of_iomap(np, 0); + if (!m10v_smp_base) + return; + + mpidr = read_cpuid_mpidr(); + cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); + cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); + pr_info("MCPM boot on cpu_%u cluster_%u\n", cpu, cluster); + + for (cpu = 0; cpu < M10V_MAX_CPU; cpu++) + writel(KERNEL_UNBOOT_FLAG, m10v_smp_base + cpu * 4); +} + +static void m10v_cpu_die(unsigned int l_cpu) +{ + gic_cpu_if_down(0); + v7_exit_coherency_flush(louis); + wfi(); +} + +static int m10v_cpu_kill(unsigned int l_cpu) +{ + unsigned int mpidr, cpu; + + mpidr = cpu_logical_map(l_cpu); + cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); + + writel(KERNEL_UNBOOT_FLAG, m10v_smp_base + cpu * 4); + + return 1; +} + +static struct smp_operations m10v_smp_ops __initdata = { + .smp_prepare_cpus = m10v_smp_init, + .smp_boot_secondary = m10v_boot_secondary, + .cpu_die = m10v_cpu_die, + .cpu_kill = m10v_cpu_kill, +}; +CPU_METHOD_OF_DECLARE(m10v_smp, "socionext,milbeaut-m10v-smp", &m10v_smp_ops); + +static int m10v_pm_valid(suspend_state_t state) +{ + return (state == PM_SUSPEND_STANDBY) || (state == PM_SUSPEND_MEM); +} + +typedef void (*phys_reset_t)(unsigned long); +static phys_reset_t phys_reset; + +static int m10v_die(unsigned long arg) +{ + setup_mm_for_reboot(); + asm("wfi"); + /* Boot just like a secondary */ + phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); + phys_reset(virt_to_phys(cpu_resume)); + + return 0; +} + +static int m10v_pm_enter(suspend_state_t state) +{ + switch (state) { + case PM_SUSPEND_STANDBY: + asm("wfi"); + break; + case PM_SUSPEND_MEM: + cpu_pm_enter(); + cpu_suspend(0, m10v_die); + cpu_pm_exit(); + break; + } + return 0; +} + +static const struct platform_suspend_ops m10v_pm_ops = { + .valid = m10v_pm_valid, + .enter = m10v_pm_enter, +}; + +struct clk *m10v_clclk_register(struct device *cpu_dev); + +static int __init m10v_pm_init(void) +{ + if (of_machine_is_compatible("socionext,milbeaut-evb")) + suspend_set_ops(&m10v_pm_ops); + + return 0; +} +late_initcall(m10v_pm_init); diff --git a/arch/arm/mach-mmp/brownstone.c b/arch/arm/mach-mmp/brownstone.c index a04e249c654b..d2560fb1e835 100644 --- a/arch/arm/mach-mmp/brownstone.c +++ b/arch/arm/mach-mmp/brownstone.c @@ -149,7 +149,6 @@ static struct regulator_init_data brownstone_v_5vp_data = { static struct fixed_voltage_config brownstone_v_5vp = { .supply_name = "v_5vp", .microvolts = 5000000, - .enable_high = 1, .enabled_at_boot = 1, .init_data = &brownstone_v_5vp_data, }; diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c index c4c0a8ea11e4..be30c3c061b4 100644 --- a/arch/arm/mach-omap1/board-ams-delta.c +++ b/arch/arm/mach-omap1/board-ams-delta.c @@ -267,7 +267,6 @@ static struct fixed_voltage_config modem_nreset_config = { .supply_name = "modem_nreset", .microvolts = 3300000, .startup_delay = 25000, - .enable_high = 1, .enabled_at_boot = 1, .init_data = &modem_nreset_data, }; @@ -533,7 +532,6 @@ static struct regulator_init_data keybrd_pwr_initdata = { static struct fixed_voltage_config keybrd_pwr_config = { .supply_name = "keybrd_pwr", .microvolts = 5000000, - .enable_high = 1, .init_data = &keybrd_pwr_initdata, }; diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c index eb41db78cd47..10848f573d37 100644 --- a/arch/arm/mach-omap1/board-nokia770.c +++ b/arch/arm/mach-omap1/board-nokia770.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -25,7 +26,6 @@ #include #include #include -#include #include #include @@ -217,18 +217,19 @@ static inline void nokia770_mmc_init(void) #endif #if IS_ENABLED(CONFIG_I2C_CBUS_GPIO) -static struct i2c_cbus_platform_data nokia770_cbus_data = { - .clk_gpio = OMAP_MPUIO(9), - .dat_gpio = OMAP_MPUIO(10), - .sel_gpio = OMAP_MPUIO(11), +static struct gpiod_lookup_table nokia770_cbus_gpio_table = { + .dev_id = "i2c-cbus-gpio.2", + .table = { + GPIO_LOOKUP_IDX("mpuio", 9, NULL, 0, 0), /* clk */ + GPIO_LOOKUP_IDX("mpuio", 10, NULL, 1, 0), /* dat */ + GPIO_LOOKUP_IDX("mpuio", 11, NULL, 2, 0), /* sel */ + { }, + }, }; static struct platform_device nokia770_cbus_device = { .name = "i2c-cbus-gpio", .id = 2, - .dev = { - .platform_data = &nokia770_cbus_data, - }, }; static struct i2c_board_info nokia770_i2c_board_info_2[] __initdata = { @@ -257,6 +258,7 @@ static void __init nokia770_cbus_init(void) nokia770_i2c_board_info_2[1].irq = gpio_to_irq(tahvo_irq_gpio); i2c_register_board_info(2, nokia770_i2c_board_info_2, ARRAY_SIZE(nokia770_i2c_board_info_2)); + gpiod_add_lookup_table(&nokia770_cbus_gpio_table); platform_device_register(&nokia770_cbus_device); } #else /* CONFIG_I2C_CBUS_GPIO */ diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c index a8b291f00109..dae514c8276a 100644 --- a/arch/arm/mach-omap2/cpuidle44xx.c +++ b/arch/arm/mach-omap2/cpuidle44xx.c @@ -152,6 +152,10 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) && (cx->mpu_logic_state == PWRDM_POWER_OFF); + /* Enter broadcast mode for periodic timers */ + tick_broadcast_enable(); + + /* Enter broadcast mode for one-shot timers */ tick_broadcast_enter(); /* @@ -218,15 +222,6 @@ fail: return index; } -/* - * For each cpu, setup the broadcast timer because local timers - * stops for the states above C1. - */ -static void omap_setup_broadcast_timer(void *arg) -{ - tick_broadcast_enable(); -} - static struct cpuidle_driver omap4_idle_driver = { .name = "omap4_idle", .owner = THIS_MODULE, @@ -319,8 +314,5 @@ int __init omap4_idle_init(void) if (!cpu_clkdm[0] || !cpu_clkdm[1]) return -ENODEV; - /* Configure the broadcast timer on each cpu */ - on_each_cpu(omap_setup_broadcast_timer, NULL, 1); - return cpuidle_register(idle_driver, cpu_online_mask); } diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c index f86b72d1d59e..1444b4b4bd9f 100644 --- a/arch/arm/mach-omap2/display.c +++ b/arch/arm/mach-omap2/display.c @@ -83,6 +83,7 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes) u32 enable_mask, enable_shift; u32 pipd_mask, pipd_shift; u32 reg; + int ret; if (dsi_id == 0) { enable_mask = OMAP4_DSI1_LANEENABLE_MASK; @@ -98,7 +99,11 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes) return -ENODEV; } - regmap_read(omap4_dsi_mux_syscon, OMAP4_DSIPHY_SYSCON_OFFSET, ®); + ret = regmap_read(omap4_dsi_mux_syscon, + OMAP4_DSIPHY_SYSCON_OFFSET, + ®); + if (ret) + return ret; reg &= ~enable_mask; reg &= ~pipd_mask; diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c index fc5fb776a710..17558be4bf0a 100644 --- a/arch/arm/mach-omap2/omap-wakeupgen.c +++ b/arch/arm/mach-omap2/omap-wakeupgen.c @@ -50,6 +50,9 @@ #define OMAP4_NR_BANKS 4 #define OMAP4_NR_IRQS 128 +#define SYS_NIRQ1_EXT_SYS_IRQ_1 7 +#define SYS_NIRQ2_EXT_SYS_IRQ_2 119 + static void __iomem *wakeupgen_base; static void __iomem *sar_base; static DEFINE_RAW_SPINLOCK(wakeupgen_lock); @@ -153,6 +156,37 @@ static void wakeupgen_unmask(struct irq_data *d) irq_chip_unmask_parent(d); } +/* + * The sys_nirq pins bypass peripheral modules and are wired directly + * to MPUSS wakeupgen. They get automatically inverted for GIC. + */ +static int wakeupgen_irq_set_type(struct irq_data *d, unsigned int type) +{ + bool inverted = false; + + switch (type) { + case IRQ_TYPE_LEVEL_LOW: + type &= ~IRQ_TYPE_LEVEL_MASK; + type |= IRQ_TYPE_LEVEL_HIGH; + inverted = true; + break; + case IRQ_TYPE_EDGE_FALLING: + type &= ~IRQ_TYPE_EDGE_BOTH; + type |= IRQ_TYPE_EDGE_RISING; + inverted = true; + break; + default: + break; + } + + if (inverted && d->hwirq != SYS_NIRQ1_EXT_SYS_IRQ_1 && + d->hwirq != SYS_NIRQ2_EXT_SYS_IRQ_2) + pr_warn("wakeupgen: irq%li polarity inverted in dts\n", + d->hwirq); + + return irq_chip_set_type_parent(d, type); +} + #ifdef CONFIG_HOTPLUG_CPU static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks); @@ -446,7 +480,7 @@ static struct irq_chip wakeupgen_chip = { .irq_mask = wakeupgen_mask, .irq_unmask = wakeupgen_unmask, .irq_retrigger = irq_chip_retrigger_hierarchy, - .irq_set_type = irq_chip_set_type_parent, + .irq_set_type = wakeupgen_irq_set_type, .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND, #ifdef CONFIG_SMP .irq_set_affinity = irq_chip_set_affinity_parent, diff --git a/arch/arm/mach-omap2/omap_hwmod_81xx_data.c b/arch/arm/mach-omap2/omap_hwmod_81xx_data.c index 8e44e2728620..debcd88ab971 100644 --- a/arch/arm/mach-omap2/omap_hwmod_81xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_81xx_data.c @@ -432,6 +432,13 @@ static struct omap_hwmod dm81xx_i2c2_hwmod = { .class = &i2c_class, }; +static struct omap_hwmod_ocp_if dm81xx_l4_ls__i2c2 = { + .master = &dm81xx_l4_ls_hwmod, + .slave = &dm81xx_i2c2_hwmod, + .clk = "sysclk6_ck", + .user = OCP_USER_MPU, +}; + static struct omap_hwmod_class_sysconfig dm81xx_elm_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, @@ -443,13 +450,6 @@ static struct omap_hwmod_class_sysconfig dm81xx_elm_sysc = { .sysc_fields = &omap_hwmod_sysc_type1, }; -static struct omap_hwmod_ocp_if dm81xx_l4_ls__i2c2 = { - .master = &dm81xx_l4_ls_hwmod, - .slave = &dm81xx_i2c2_hwmod, - .clk = "sysclk6_ck", - .user = OCP_USER_MPU, -}; - static struct omap_hwmod_class dm81xx_elm_hwmod_class = { .name = "elm", .sysc = &dm81xx_elm_sysc, @@ -539,6 +539,58 @@ static struct omap_hwmod_ocp_if dm81xx_l4_ls__gpio2 = { .user = OCP_USER_MPU, }; +static struct omap_hwmod_opt_clk gpio3_opt_clks[] = { + { .role = "dbclk", .clk = "sysclk18_ck" }, +}; + +static struct omap_hwmod dm81xx_gpio3_hwmod = { + .name = "gpio3", + .clkdm_name = "alwon_l3s_clkdm", + .class = &dm81xx_gpio_hwmod_class, + .main_clk = "sysclk6_ck", + .prcm = { + .omap4 = { + .clkctrl_offs = DM81XX_CM_ALWON_GPIO_1_CLKCTRL, + .modulemode = MODULEMODE_SWCTRL, + }, + }, + .opt_clks = gpio3_opt_clks, + .opt_clks_cnt = ARRAY_SIZE(gpio3_opt_clks), +}; + +static struct omap_hwmod_ocp_if dm81xx_l4_ls__gpio3 = { + .master = &dm81xx_l4_ls_hwmod, + .slave = &dm81xx_gpio3_hwmod, + .clk = "sysclk6_ck", + .user = OCP_USER_MPU, +}; + +static struct omap_hwmod_opt_clk gpio4_opt_clks[] = { + { .role = "dbclk", .clk = "sysclk18_ck" }, +}; + +static struct omap_hwmod dm81xx_gpio4_hwmod = { + .name = "gpio4", + .clkdm_name = "alwon_l3s_clkdm", + .class = &dm81xx_gpio_hwmod_class, + .main_clk = "sysclk6_ck", + .prcm = { + .omap4 = { + .clkctrl_offs = DM81XX_CM_ALWON_GPIO_1_CLKCTRL, + .modulemode = MODULEMODE_SWCTRL, + }, + }, + .opt_clks = gpio4_opt_clks, + .opt_clks_cnt = ARRAY_SIZE(gpio4_opt_clks), +}; + +static struct omap_hwmod_ocp_if dm81xx_l4_ls__gpio4 = { + .master = &dm81xx_l4_ls_hwmod, + .slave = &dm81xx_gpio4_hwmod, + .clk = "sysclk6_ck", + .user = OCP_USER_MPU, +}; + static struct omap_hwmod_class_sysconfig dm81xx_gpmc_sysc = { .rev_offs = 0x0, .sysc_offs = 0x10, @@ -1133,6 +1185,45 @@ static struct omap_hwmod dm81xx_mcspi1_hwmod = { .class = &dm816x_mcspi_class, }; +static struct omap_hwmod dm81xx_mcspi2_hwmod = { + .name = "mcspi2", + .clkdm_name = "alwon_l3s_clkdm", + .main_clk = "sysclk10_ck", + .prcm = { + .omap4 = { + .clkctrl_offs = DM81XX_CM_ALWON_SPI_CLKCTRL, + .modulemode = MODULEMODE_SWCTRL, + }, + }, + .class = &dm816x_mcspi_class, +}; + +static struct omap_hwmod dm81xx_mcspi3_hwmod = { + .name = "mcspi3", + .clkdm_name = "alwon_l3s_clkdm", + .main_clk = "sysclk10_ck", + .prcm = { + .omap4 = { + .clkctrl_offs = DM81XX_CM_ALWON_SPI_CLKCTRL, + .modulemode = MODULEMODE_SWCTRL, + }, + }, + .class = &dm816x_mcspi_class, +}; + +static struct omap_hwmod dm81xx_mcspi4_hwmod = { + .name = "mcspi4", + .clkdm_name = "alwon_l3s_clkdm", + .main_clk = "sysclk10_ck", + .prcm = { + .omap4 = { + .clkctrl_offs = DM81XX_CM_ALWON_SPI_CLKCTRL, + .modulemode = MODULEMODE_SWCTRL, + }, + }, + .class = &dm816x_mcspi_class, +}; + static struct omap_hwmod_ocp_if dm81xx_l4_ls__mcspi1 = { .master = &dm81xx_l4_ls_hwmod, .slave = &dm81xx_mcspi1_hwmod, @@ -1140,6 +1231,27 @@ static struct omap_hwmod_ocp_if dm81xx_l4_ls__mcspi1 = { .user = OCP_USER_MPU, }; +static struct omap_hwmod_ocp_if dm81xx_l4_ls__mcspi2 = { + .master = &dm81xx_l4_ls_hwmod, + .slave = &dm81xx_mcspi2_hwmod, + .clk = "sysclk6_ck", + .user = OCP_USER_MPU, +}; + +static struct omap_hwmod_ocp_if dm81xx_l4_ls__mcspi3 = { + .master = &dm81xx_l4_ls_hwmod, + .slave = &dm81xx_mcspi3_hwmod, + .clk = "sysclk6_ck", + .user = OCP_USER_MPU, +}; + +static struct omap_hwmod_ocp_if dm81xx_l4_ls__mcspi4 = { + .master = &dm81xx_l4_ls_hwmod, + .slave = &dm81xx_mcspi4_hwmod, + .clk = "sysclk6_ck", + .user = OCP_USER_MPU, +}; + static struct omap_hwmod_class_sysconfig dm81xx_mailbox_sysc = { .rev_offs = 0x000, .sysc_offs = 0x010, @@ -1378,8 +1490,13 @@ static struct omap_hwmod_ocp_if *dm814x_hwmod_ocp_ifs[] __initdata = { &dm81xx_l4_ls__i2c2, &dm81xx_l4_ls__gpio1, &dm81xx_l4_ls__gpio2, + &dm81xx_l4_ls__gpio3, + &dm81xx_l4_ls__gpio4, &dm81xx_l4_ls__elm, &dm81xx_l4_ls__mcspi1, + &dm81xx_l4_ls__mcspi2, + &dm81xx_l4_ls__mcspi3, + &dm81xx_l4_ls__mcspi4, &dm814x_l4_ls__mmc1, &dm814x_l4_ls__mmc2, &ti81xx_l4_ls__rtc, diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c index 8a5b6ed4ec36..a2ecc5e69abb 100644 --- a/arch/arm/mach-omap2/pdata-quirks.c +++ b/arch/arm/mach-omap2/pdata-quirks.c @@ -330,7 +330,6 @@ static struct fixed_voltage_config pandora_vwlan = { .supply_name = "vwlan", .microvolts = 1800000, /* 1.8V */ .startup_delay = 50000, /* 50ms */ - .enable_high = 1, .init_data = &pandora_vmmc3, }; diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c index 83a7ec4c16d0..c67f92bfa30e 100644 --- a/arch/arm/mach-orion5x/common.c +++ b/arch/arm/mach-orion5x/common.c @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c b/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c index a3c1336d30c9..c65ab7db36ad 100644 --- a/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c +++ b/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c b/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c index 252efe29bd1a..76b8138d9d79 100644 --- a/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c +++ b/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c b/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c index f4f1dbe1d91d..5f388a1ed1e4 100644 --- a/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c +++ b/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-orion5x/wnr854t-setup.c b/arch/arm/mach-orion5x/wnr854t-setup.c index d162d4c7f85d..83589a28a491 100644 --- a/arch/arm/mach-orion5x/wnr854t-setup.c +++ b/arch/arm/mach-orion5x/wnr854t-setup.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-orion5x/wrt350n-v2-setup.c b/arch/arm/mach-orion5x/wrt350n-v2-setup.c index 9250bb2e429c..cea08d4a2597 100644 --- a/arch/arm/mach-orion5x/wrt350n-v2-setup.c +++ b/arch/arm/mach-orion5x/wrt350n-v2-setup.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig index dc8e4f4b7ade..8839c72fdee3 100644 --- a/arch/arm/mach-pxa/Kconfig +++ b/arch/arm/mach-pxa/Kconfig @@ -454,24 +454,6 @@ config MACH_TREO680 Say Y here if you intend to run this kernel on Palm Treo 680 smartphone. -config MACH_RAUMFELD_RC - bool "Raumfeld Controller" - select CPU_PXA300 - select POWER_SUPPLY - select PXA3xx - -config MACH_RAUMFELD_CONNECTOR - bool "Raumfeld Connector" - select CPU_PXA300 - select POWER_SUPPLY - select PXA3xx - -config MACH_RAUMFELD_SPEAKER - bool "Raumfeld Speaker" - select CPU_PXA300 - select POWER_SUPPLY - select PXA3xx - config PXA_SHARPSL bool "SHARP Zaurus SL-5600, SL-C7xx and SL-Cxx00 Models" select SHARP_PARAM diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile index 0a8e9611052f..f70728930c4f 100644 --- a/arch/arm/mach-pxa/Makefile +++ b/arch/arm/mach-pxa/Makefile @@ -86,9 +86,6 @@ obj-$(CONFIG_MACH_POODLE) += poodle.o obj-$(CONFIG_MACH_TOSA) += tosa.o obj-$(CONFIG_MACH_ICONTROL) += icontrol.o mxm8x10.o obj-$(CONFIG_ARCH_PXA_ESERIES) += eseries.o -obj-$(CONFIG_MACH_RAUMFELD_RC) += raumfeld.o -obj-$(CONFIG_MACH_RAUMFELD_CONNECTOR) += raumfeld.o -obj-$(CONFIG_MACH_RAUMFELD_SPEAKER) += raumfeld.o obj-$(CONFIG_MACH_ZIPIT2) += z2.o obj-$(CONFIG_PXA_SYSTEMS_CPLDS) += pxa_cplds_irqs.o diff --git a/arch/arm/mach-pxa/cm-x255.c b/arch/arm/mach-pxa/cm-x255.c index fa8e7dd4d898..4401dfcd7e68 100644 --- a/arch/arm/mach-pxa/cm-x255.c +++ b/arch/arm/mach-pxa/cm-x255.c @@ -98,7 +98,7 @@ static unsigned long cmx255_pin_config[] = { }; #if defined(CONFIG_SPI_PXA2XX) -static struct pxa2xx_spi_master pxa_ssp_master_info = { +static struct pxa2xx_spi_controller pxa_ssp_master_info = { .num_chipselect = 1, }; diff --git a/arch/arm/mach-pxa/cm-x270.c b/arch/arm/mach-pxa/cm-x270.c index f7081a50dc67..279eeca7add0 100644 --- a/arch/arm/mach-pxa/cm-x270.c +++ b/arch/arm/mach-pxa/cm-x270.c @@ -313,7 +313,7 @@ static inline void cmx270_init_mmc(void) {} #endif #if defined(CONFIG_SPI_PXA2XX) || defined(CONFIG_SPI_PXA2XX_MODULE) -static struct pxa2xx_spi_master cm_x270_spi_info = { +static struct pxa2xx_spi_controller cm_x270_spi_info = { .num_chipselect = 1, .enable_dma = 1, }; diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c index c9732cace5e3..7ecf559bd71c 100644 --- a/arch/arm/mach-pxa/corgi.c +++ b/arch/arm/mach-pxa/corgi.c @@ -530,7 +530,7 @@ static struct pxa2xx_udc_mach_info udc_info __initdata = { }; #if IS_ENABLED(CONFIG_SPI_PXA2XX) -static struct pxa2xx_spi_master corgi_spi_info = { +static struct pxa2xx_spi_controller corgi_spi_info = { .num_chipselect = 3, }; diff --git a/arch/arm/mach-pxa/devices.c b/arch/arm/mach-pxa/devices.c index a24783a03827..524d6093e0c7 100644 --- a/arch/arm/mach-pxa/devices.c +++ b/arch/arm/mach-pxa/devices.c @@ -1065,7 +1065,7 @@ struct platform_device pxa93x_device_gpio = { /* pxa2xx-spi platform-device ID equals respective SSP platform-device ID + 1. * See comment in arch/arm/mach-pxa/ssp.c::ssp_probe() */ -void __init pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info) +void __init pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_controller *info) { struct platform_device *pd; diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c index 32c1edeb3f14..fa3adb073a0f 100644 --- a/arch/arm/mach-pxa/em-x270.c +++ b/arch/arm/mach-pxa/em-x270.c @@ -689,7 +689,7 @@ static inline void em_x270_init_lcd(void) {} #endif #if defined(CONFIG_SPI_PXA2XX) || defined(CONFIG_SPI_PXA2XX_MODULE) -static struct pxa2xx_spi_master em_x270_spi_info = { +static struct pxa2xx_spi_controller em_x270_spi_info = { .num_chipselect = 1, }; @@ -703,7 +703,7 @@ static struct tdo24m_platform_data em_x270_tdo24m_pdata = { .model = TDO35S, }; -static struct pxa2xx_spi_master em_x270_spi_2_info = { +static struct pxa2xx_spi_controller em_x270_spi_2_info = { .num_chipselect = 1, .enable_dma = 1, }; @@ -976,7 +976,6 @@ static struct fixed_voltage_config camera_dummy_config = { .supply_name = "camera_vdd", .input_supply = "vcc cam", .microvolts = 2800000, - .enable_high = 0, .init_data = &camera_dummy_initdata, }; diff --git a/arch/arm/mach-pxa/ezx.c b/arch/arm/mach-pxa/ezx.c index 565965e9acc7..5e110e70ce5a 100644 --- a/arch/arm/mach-pxa/ezx.c +++ b/arch/arm/mach-pxa/ezx.c @@ -714,7 +714,6 @@ static struct regulator_init_data camera_regulator_initdata = { static struct fixed_voltage_config camera_regulator_config = { .supply_name = "camera_vdd", .microvolts = 2800000, - .enable_high = 0, .init_data = &camera_regulator_initdata, }; @@ -730,7 +729,7 @@ static struct gpiod_lookup_table camera_supply_gpiod_table = { .dev_id = "reg-fixed-voltage.1", .table = { GPIO_LOOKUP("gpio-pxa", GPIO50_nCAM_EN, - NULL, GPIO_ACTIVE_HIGH), + NULL, GPIO_ACTIVE_LOW), { }, }, }; diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c index b79b757fdd41..1d6b1d2fb6a9 100644 --- a/arch/arm/mach-pxa/hx4700.c +++ b/arch/arm/mach-pxa/hx4700.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -629,7 +630,7 @@ static struct spi_board_info tsc2046_board_info[] __initdata = { }, }; -static struct pxa2xx_spi_master pxa_ssp2_master_info = { +static struct pxa2xx_spi_controller pxa_ssp2_master_info = { .num_chipselect = 1, .enable_dma = 1, }; @@ -702,9 +703,7 @@ static struct regulator_init_data bq24022_init_data = { .consumer_supplies = bq24022_consumers, }; -static struct gpio bq24022_gpios[] = { - { GPIO96_HX4700_BQ24022_ISET2, GPIOF_OUT_INIT_LOW, "bq24022_iset2" }, -}; +static enum gpiod_flags bq24022_gpiod_gflags[] = { GPIOD_OUT_LOW }; static struct gpio_regulator_state bq24022_states[] = { { .value = 100000, .gpios = (0 << 0) }, @@ -714,12 +713,10 @@ static struct gpio_regulator_state bq24022_states[] = { static struct gpio_regulator_config bq24022_info = { .supply_name = "bq24022", - .enable_gpio = GPIO72_HX4700_BQ24022_nCHARGE_EN, - .enable_high = 0, .enabled_at_boot = 0, - .gpios = bq24022_gpios, - .nr_gpios = ARRAY_SIZE(bq24022_gpios), + .gflags = bq24022_gpiod_gflags, + .ngpios = ARRAY_SIZE(bq24022_gpiod_gflags), .states = bq24022_states, .nr_states = ARRAY_SIZE(bq24022_states), @@ -736,6 +733,17 @@ static struct platform_device bq24022 = { }, }; +static struct gpiod_lookup_table bq24022_gpiod_table = { + .dev_id = "gpio-regulator", + .table = { + GPIO_LOOKUP("gpio-pxa", GPIO96_HX4700_BQ24022_ISET2, + NULL, GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("gpio-pxa", GPIO72_HX4700_BQ24022_nCHARGE_EN, + "enable", GPIO_ACTIVE_LOW), + { }, + }, +}; + /* * StrataFlash */ @@ -878,6 +886,7 @@ static void __init hx4700_init(void) pxa_set_btuart_info(NULL); pxa_set_stuart_info(NULL); + gpiod_add_lookup_table(&bq24022_gpiod_table); platform_add_devices(devices, ARRAY_SIZE(devices)); pwm_add_table(hx4700_pwm_lookup, ARRAY_SIZE(hx4700_pwm_lookup)); diff --git a/arch/arm/mach-pxa/icontrol.c b/arch/arm/mach-pxa/icontrol.c index cbaf4f6edcda..7e30452e3840 100644 --- a/arch/arm/mach-pxa/icontrol.c +++ b/arch/arm/mach-pxa/icontrol.c @@ -115,12 +115,12 @@ static struct spi_board_info mcp251x_board_info[] = { } }; -static struct pxa2xx_spi_master pxa_ssp3_spi_master_info = { +static struct pxa2xx_spi_controller pxa_ssp3_spi_master_info = { .num_chipselect = 2, .enable_dma = 1 }; -static struct pxa2xx_spi_master pxa_ssp4_spi_master_info = { +static struct pxa2xx_spi_controller pxa_ssp4_spi_master_info = { .num_chipselect = 2, .enable_dma = 1 }; diff --git a/arch/arm/mach-pxa/include/mach/pxa25x-udc.h b/arch/arm/mach-pxa/include/mach/pxa25x-udc.h deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/arch/arm/mach-pxa/littleton.c b/arch/arm/mach-pxa/littleton.c index 39db4898dc4a..464b8bd2bcb9 100644 --- a/arch/arm/mach-pxa/littleton.c +++ b/arch/arm/mach-pxa/littleton.c @@ -191,7 +191,7 @@ static inline void littleton_init_lcd(void) {}; #endif /* CONFIG_FB_PXA || CONFIG_FB_PXA_MODULE */ #if defined(CONFIG_SPI_PXA2XX) || defined(CONFIG_SPI_PXA2XX_MODULE) -static struct pxa2xx_spi_master littleton_spi_info = { +static struct pxa2xx_spi_controller littleton_spi_info = { .num_chipselect = 1, }; diff --git a/arch/arm/mach-pxa/lubbock.c b/arch/arm/mach-pxa/lubbock.c index a1391e113ef4..c1bd0d544981 100644 --- a/arch/arm/mach-pxa/lubbock.c +++ b/arch/arm/mach-pxa/lubbock.c @@ -197,7 +197,7 @@ static struct platform_device sa1111_device = { * (to J5) and poking board registers (as done below). Else it's only useful * for the temperature sensors. */ -static struct pxa2xx_spi_master pxa_ssp_master_info = { +static struct pxa2xx_spi_controller pxa_ssp_master_info = { .num_chipselect = 1, }; diff --git a/arch/arm/mach-pxa/magician.c b/arch/arm/mach-pxa/magician.c index 08b079653c3f..75abc21083eb 100644 --- a/arch/arm/mach-pxa/magician.c +++ b/arch/arm/mach-pxa/magician.c @@ -645,9 +645,8 @@ static struct regulator_init_data bq24022_init_data = { .consumer_supplies = bq24022_consumers, }; -static struct gpio bq24022_gpios[] = { - { EGPIO_MAGICIAN_BQ24022_ISET2, GPIOF_OUT_INIT_LOW, "bq24022_iset2" }, -}; + +static enum gpiod_flags bq24022_gpiod_gflags[] = { GPIOD_OUT_LOW }; static struct gpio_regulator_state bq24022_states[] = { { .value = 100000, .gpios = (0 << 0) }, @@ -657,12 +656,10 @@ static struct gpio_regulator_state bq24022_states[] = { static struct gpio_regulator_config bq24022_info = { .supply_name = "bq24022", - .enable_gpio = GPIO30_MAGICIAN_BQ24022_nCHARGE_EN, - .enable_high = 0, .enabled_at_boot = 1, - .gpios = bq24022_gpios, - .nr_gpios = ARRAY_SIZE(bq24022_gpios), + .gflags = bq24022_gpiod_gflags, + .ngpios = ARRAY_SIZE(bq24022_gpiod_gflags), .states = bq24022_states, .nr_states = ARRAY_SIZE(bq24022_states), @@ -679,6 +676,17 @@ static struct platform_device bq24022 = { }, }; +static struct gpiod_lookup_table bq24022_gpiod_table = { + .dev_id = "gpio-regulator", + .table = { + GPIO_LOOKUP("gpio-pxa", EGPIO_MAGICIAN_BQ24022_ISET2, + NULL, GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("gpio-pxa", GPIO30_MAGICIAN_BQ24022_nCHARGE_EN, + "enable", GPIO_ACTIVE_LOW), + { }, + }, +}; + /* * fixed regulator for ads7846 */ @@ -932,7 +940,7 @@ struct pxa2xx_spi_chip tsc2046_chip_info = { .gpio_cs = GPIO14_MAGICIAN_TSC2046_CS, }; -static struct pxa2xx_spi_master magician_spi_info = { +static struct pxa2xx_spi_controller magician_spi_info = { .num_chipselect = 1, .enable_dma = 1, }; @@ -1027,6 +1035,7 @@ static void __init magician_init(void) regulator_register_always_on(0, "power", pwm_backlight_supply, ARRAY_SIZE(pwm_backlight_supply), 5000000); + gpiod_add_lookup_table(&bq24022_gpiod_table); platform_add_devices(ARRAY_AND_SIZE(devices)); } diff --git a/arch/arm/mach-pxa/pcm027.c b/arch/arm/mach-pxa/pcm027.c index ccca9f7575c3..e2e613449660 100644 --- a/arch/arm/mach-pxa/pcm027.c +++ b/arch/arm/mach-pxa/pcm027.c @@ -132,7 +132,7 @@ static struct platform_device smc91x_device = { /* * SPI host and devices */ -static struct pxa2xx_spi_master pxa_ssp_master_info = { +static struct pxa2xx_spi_controller pxa_ssp_master_info = { .num_chipselect = 1, }; diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c index c2a43d4cfd3e..9450a523cd0b 100644 --- a/arch/arm/mach-pxa/poodle.c +++ b/arch/arm/mach-pxa/poodle.c @@ -196,7 +196,7 @@ struct platform_device poodle_locomo_device = { EXPORT_SYMBOL(poodle_locomo_device); #if defined(CONFIG_SPI_PXA2XX) || defined(CONFIG_SPI_PXA2XX_MODULE) -static struct pxa2xx_spi_master poodle_spi_info = { +static struct pxa2xx_spi_controller poodle_spi_info = { .num_chipselect = 1, }; diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c deleted file mode 100644 index e1db072756f2..000000000000 --- a/arch/arm/mach-pxa/raumfeld.c +++ /dev/null @@ -1,1187 +0,0 @@ -/* - * arch/arm/mach-pxa/raumfeld.c - * - * Support for the following Raumfeld devices: - * - * * Controller - * * Connector - * * Speaker S/M - * - * See http://www.raumfeld.com for details. - * - * Copyright (c) 2009 Daniel Mack - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include - -#include "pxa300.h" -#include -#include -#include -#include - -#include "generic.h" -#include "devices.h" - -/* common GPIO definitions */ - -/* inputs */ -#define GPIO_ON_OFF (14) -#define GPIO_VOLENC_A (19) -#define GPIO_VOLENC_B (20) -#define GPIO_CHARGE_DONE (23) -#define GPIO_CHARGE_IND (27) -#define GPIO_TOUCH_IRQ (32) -#define GPIO_ETH_IRQ (40) -#define GPIO_SPI_MISO (98) -#define GPIO_ACCEL_IRQ (104) -#define GPIO_RESCUE_BOOT (115) -#define GPIO_DOCK_DETECT (116) -#define GPIO_KEY1 (117) -#define GPIO_KEY2 (118) -#define GPIO_KEY3 (119) -#define GPIO_CHARGE_USB_OK (112) -#define GPIO_CHARGE_DC_OK (101) -#define GPIO_CHARGE_USB_SUSP (102) - -/* outputs */ -#define GPIO_SHUTDOWN_SUPPLY (16) -#define GPIO_SHUTDOWN_BATT (18) -#define GPIO_CHRG_PEN2 (31) -#define GPIO_TFT_VA_EN (33) -#define GPIO_SPDIF_CS (34) -#define GPIO_LED2 (35) -#define GPIO_LED1 (36) -#define GPIO_SPDIF_RESET (38) -#define GPIO_SPI_CLK (95) -#define GPIO_MCLK_DAC_CS (96) -#define GPIO_SPI_MOSI (97) -#define GPIO_W1_PULLUP_ENABLE (105) -#define GPIO_DISPLAY_ENABLE (106) -#define GPIO_MCLK_RESET (111) -#define GPIO_W2W_RESET (113) -#define GPIO_W2W_PDN (114) -#define GPIO_CODEC_RESET (120) -#define GPIO_AUDIO_VA_ENABLE (124) -#define GPIO_ACCEL_CS (125) -#define GPIO_ONE_WIRE (126) - -/* - * GPIO configurations - */ -static mfp_cfg_t raumfeld_controller_pin_config[] __initdata = { - /* UART1 */ - GPIO77_UART1_RXD, - GPIO78_UART1_TXD, - GPIO79_UART1_CTS, - GPIO81_UART1_DSR, - GPIO83_UART1_DTR, - GPIO84_UART1_RTS, - - /* UART3 */ - GPIO110_UART3_RXD, - - /* USB Host */ - GPIO0_2_USBH_PEN, - GPIO1_2_USBH_PWR, - - /* I2C */ - GPIO21_I2C_SCL | MFP_LPM_FLOAT | MFP_PULL_FLOAT, - GPIO22_I2C_SDA | MFP_LPM_FLOAT | MFP_PULL_FLOAT, - - /* SPI */ - GPIO34_GPIO, /* SPDIF_CS */ - GPIO96_GPIO, /* MCLK_CS */ - GPIO125_GPIO, /* ACCEL_CS */ - - /* MMC */ - GPIO3_MMC1_DAT0, - GPIO4_MMC1_DAT1, - GPIO5_MMC1_DAT2, - GPIO6_MMC1_DAT3, - GPIO7_MMC1_CLK, - GPIO8_MMC1_CMD, - - /* One-wire */ - GPIO126_GPIO | MFP_LPM_FLOAT, - GPIO105_GPIO | MFP_PULL_LOW | MFP_LPM_PULL_LOW, - - /* CHRG_USB_OK */ - GPIO101_GPIO | MFP_PULL_HIGH, - /* CHRG_USB_OK */ - GPIO112_GPIO | MFP_PULL_HIGH, - /* CHRG_USB_SUSP */ - GPIO102_GPIO, - /* DISPLAY_ENABLE */ - GPIO106_GPIO, - /* DOCK_DETECT */ - GPIO116_GPIO | MFP_LPM_FLOAT | MFP_PULL_FLOAT, - - /* LCD */ - GPIO54_LCD_LDD_0, - GPIO55_LCD_LDD_1, - GPIO56_LCD_LDD_2, - GPIO57_LCD_LDD_3, - GPIO58_LCD_LDD_4, - GPIO59_LCD_LDD_5, - GPIO60_LCD_LDD_6, - GPIO61_LCD_LDD_7, - GPIO62_LCD_LDD_8, - GPIO63_LCD_LDD_9, - GPIO64_LCD_LDD_10, - GPIO65_LCD_LDD_11, - GPIO66_LCD_LDD_12, - GPIO67_LCD_LDD_13, - GPIO68_LCD_LDD_14, - GPIO69_LCD_LDD_15, - GPIO70_LCD_LDD_16, - GPIO71_LCD_LDD_17, - GPIO72_LCD_FCLK, - GPIO73_LCD_LCLK, - GPIO74_LCD_PCLK, - GPIO75_LCD_BIAS, -}; - -static mfp_cfg_t raumfeld_connector_pin_config[] __initdata = { - /* UART1 */ - GPIO77_UART1_RXD, - GPIO78_UART1_TXD, - GPIO79_UART1_CTS, - GPIO81_UART1_DSR, - GPIO83_UART1_DTR, - GPIO84_UART1_RTS, - - /* UART3 */ - GPIO110_UART3_RXD, - - /* USB Host */ - GPIO0_2_USBH_PEN, - GPIO1_2_USBH_PWR, - - /* I2C */ - GPIO21_I2C_SCL | MFP_LPM_FLOAT | MFP_PULL_FLOAT, - GPIO22_I2C_SDA | MFP_LPM_FLOAT | MFP_PULL_FLOAT, - - /* SPI */ - GPIO34_GPIO, /* SPDIF_CS */ - GPIO96_GPIO, /* MCLK_CS */ - GPIO125_GPIO, /* ACCEL_CS */ - - /* MMC */ - GPIO3_MMC1_DAT0, - GPIO4_MMC1_DAT1, - GPIO5_MMC1_DAT2, - GPIO6_MMC1_DAT3, - GPIO7_MMC1_CLK, - GPIO8_MMC1_CMD, - - /* Ethernet */ - GPIO1_nCS2, /* CS */ - GPIO40_GPIO | MFP_PULL_HIGH, /* IRQ */ - - /* SSP for I2S */ - GPIO85_SSP1_SCLK, - GPIO89_SSP1_EXTCLK, - GPIO86_SSP1_FRM, - GPIO87_SSP1_TXD, - GPIO88_SSP1_RXD, - GPIO90_SSP1_SYSCLK, - - /* SSP2 for S/PDIF */ - GPIO25_SSP2_SCLK, - GPIO26_SSP2_FRM, - GPIO27_SSP2_TXD, - GPIO29_SSP2_EXTCLK, - - /* LEDs */ - GPIO35_GPIO | MFP_LPM_PULL_LOW, - GPIO36_GPIO | MFP_LPM_DRIVE_HIGH, -}; - -static mfp_cfg_t raumfeld_speaker_pin_config[] __initdata = { - /* UART1 */ - GPIO77_UART1_RXD, - GPIO78_UART1_TXD, - GPIO79_UART1_CTS, - GPIO81_UART1_DSR, - GPIO83_UART1_DTR, - GPIO84_UART1_RTS, - - /* UART3 */ - GPIO110_UART3_RXD, - - /* USB Host */ - GPIO0_2_USBH_PEN, - GPIO1_2_USBH_PWR, - - /* I2C */ - GPIO21_I2C_SCL | MFP_LPM_FLOAT | MFP_PULL_FLOAT, - GPIO22_I2C_SDA | MFP_LPM_FLOAT | MFP_PULL_FLOAT, - - /* SPI */ - GPIO34_GPIO, /* SPDIF_CS */ - GPIO96_GPIO, /* MCLK_CS */ - GPIO125_GPIO, /* ACCEL_CS */ - - /* MMC */ - GPIO3_MMC1_DAT0, - GPIO4_MMC1_DAT1, - GPIO5_MMC1_DAT2, - GPIO6_MMC1_DAT3, - GPIO7_MMC1_CLK, - GPIO8_MMC1_CMD, - - /* Ethernet */ - GPIO1_nCS2, /* CS */ - GPIO40_GPIO | MFP_PULL_HIGH, /* IRQ */ - - /* SSP for I2S */ - GPIO85_SSP1_SCLK, - GPIO89_SSP1_EXTCLK, - GPIO86_SSP1_FRM, - GPIO87_SSP1_TXD, - GPIO88_SSP1_RXD, - GPIO90_SSP1_SYSCLK, - - /* LEDs */ - GPIO35_GPIO | MFP_LPM_PULL_LOW, - GPIO36_GPIO | MFP_LPM_DRIVE_HIGH, -}; - -/* - * SMSC LAN9220 Ethernet - */ - -static struct resource smc91x_resources[] = { - { - .start = PXA3xx_CS2_PHYS, - .end = PXA3xx_CS2_PHYS + 0xfffff, - .flags = IORESOURCE_MEM, - }, - { - .start = PXA_GPIO_TO_IRQ(GPIO_ETH_IRQ), - .end = PXA_GPIO_TO_IRQ(GPIO_ETH_IRQ), - .flags = IORESOURCE_IRQ | IRQF_TRIGGER_FALLING, - } -}; - -static struct smsc911x_platform_config raumfeld_smsc911x_config = { - .phy_interface = PHY_INTERFACE_MODE_MII, - .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, - .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN, - .flags = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS, -}; - -static struct platform_device smc91x_device = { - .name = "smsc911x", - .id = -1, - .num_resources = ARRAY_SIZE(smc91x_resources), - .resource = smc91x_resources, - .dev = { - .platform_data = &raumfeld_smsc911x_config, - } -}; - -/** - * NAND - */ - -static struct mtd_partition raumfeld_nand_partitions[] = { - { - .name = "Bootloader", - .offset = 0, - .size = 0xa0000, - .mask_flags = MTD_WRITEABLE, /* force read-only */ - }, - { - .name = "BootloaderEnvironment", - .offset = 0xa0000, - .size = 0x20000, - }, - { - .name = "BootloaderSplashScreen", - .offset = 0xc0000, - .size = 0x60000, - }, - { - .name = "UBI", - .offset = 0x120000, - .size = MTDPART_SIZ_FULL, - }, -}; - -static struct pxa3xx_nand_platform_data raumfeld_nand_info = { - .keep_config = 1, - .parts = raumfeld_nand_partitions, - .nr_parts = ARRAY_SIZE(raumfeld_nand_partitions), -}; - -/** - * USB (OHCI) support - */ - -static struct pxaohci_platform_data raumfeld_ohci_info = { - .port_mode = PMM_GLOBAL_MODE, - .flags = ENABLE_PORT1, -}; - -/** - * Rotary encoder input device - */ - -static struct gpiod_lookup_table raumfeld_rotary_gpios_table = { - .dev_id = "rotary-encoder.0", - .table = { - GPIO_LOOKUP_IDX("gpio-0", - GPIO_VOLENC_A, NULL, 0, GPIO_ACTIVE_LOW), - GPIO_LOOKUP_IDX("gpio-0", - GPIO_VOLENC_B, NULL, 1, GPIO_ACTIVE_HIGH), - { }, - }, -}; - -static const struct property_entry raumfeld_rotary_properties[] __initconst = { - PROPERTY_ENTRY_U32("rotary-encoder,steps-per-period", 24), - PROPERTY_ENTRY_U32("linux,axis", REL_X), - PROPERTY_ENTRY_U32("rotary-encoder,relative_axis", 1), - { }, -}; - -static struct platform_device rotary_encoder_device = { - .name = "rotary-encoder", - .id = 0, -}; - -/** - * GPIO buttons - */ - -static struct gpio_keys_button gpio_keys_button[] = { - { - .code = KEY_F1, - .type = EV_KEY, - .gpio = GPIO_KEY1, - .active_low = 1, - .wakeup = 0, - .debounce_interval = 5, /* ms */ - .desc = "Button 1", - }, - { - .code = KEY_F2, - .type = EV_KEY, - .gpio = GPIO_KEY2, - .active_low = 1, - .wakeup = 0, - .debounce_interval = 5, /* ms */ - .desc = "Button 2", - }, - { - .code = KEY_F3, - .type = EV_KEY, - .gpio = GPIO_KEY3, - .active_low = 1, - .wakeup = 0, - .debounce_interval = 5, /* ms */ - .desc = "Button 3", - }, - { - .code = KEY_F4, - .type = EV_KEY, - .gpio = GPIO_RESCUE_BOOT, - .active_low = 0, - .wakeup = 0, - .debounce_interval = 5, /* ms */ - .desc = "rescue boot button", - }, - { - .code = KEY_F5, - .type = EV_KEY, - .gpio = GPIO_DOCK_DETECT, - .active_low = 1, - .wakeup = 0, - .debounce_interval = 5, /* ms */ - .desc = "dock detect", - }, - { - .code = KEY_F6, - .type = EV_KEY, - .gpio = GPIO_ON_OFF, - .active_low = 0, - .wakeup = 0, - .debounce_interval = 5, /* ms */ - .desc = "on_off button", - }, -}; - -static struct gpio_keys_platform_data gpio_keys_platform_data = { - .buttons = gpio_keys_button, - .nbuttons = ARRAY_SIZE(gpio_keys_button), - .rep = 0, -}; - -static struct platform_device raumfeld_gpio_keys_device = { - .name = "gpio-keys", - .id = -1, - .dev = { - .platform_data = &gpio_keys_platform_data, - } -}; - -/** - * GPIO LEDs - */ - -static struct gpio_led raumfeld_leds[] = { - { - .name = "raumfeld:1", - .gpio = GPIO_LED1, - .active_low = 1, - .default_state = LEDS_GPIO_DEFSTATE_ON, - }, - { - .name = "raumfeld:2", - .gpio = GPIO_LED2, - .active_low = 0, - .default_state = LEDS_GPIO_DEFSTATE_OFF, - } -}; - -static struct gpio_led_platform_data raumfeld_led_platform_data = { - .leds = raumfeld_leds, - .num_leds = ARRAY_SIZE(raumfeld_leds), -}; - -static struct platform_device raumfeld_led_device = { - .name = "leds-gpio", - .id = -1, - .dev = { - .platform_data = &raumfeld_led_platform_data, - }, -}; - -/** - * One-wire (W1 bus) support - */ - -static void w1_enable_external_pullup(int enable) -{ - gpio_set_value(GPIO_W1_PULLUP_ENABLE, enable); - msleep(100); -} - -static struct gpiod_lookup_table raumfeld_w1_gpiod_table = { - .dev_id = "w1-gpio", - .table = { - GPIO_LOOKUP_IDX("gpio-pxa", GPIO_ONE_WIRE, NULL, 0, - GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), - }, -}; - -static struct w1_gpio_platform_data w1_gpio_platform_data = { - .enable_external_pullup = w1_enable_external_pullup, -}; - -static struct platform_device raumfeld_w1_gpio_device = { - .name = "w1-gpio", - .dev = { - .platform_data = &w1_gpio_platform_data - } -}; - -static void __init raumfeld_w1_init(void) -{ - int ret = gpio_request(GPIO_W1_PULLUP_ENABLE, - "W1 external pullup enable"); - - if (ret < 0) - pr_warn("Unable to request GPIO_W1_PULLUP_ENABLE\n"); - else - gpio_direction_output(GPIO_W1_PULLUP_ENABLE, 0); - - gpiod_add_lookup_table(&raumfeld_w1_gpiod_table); - platform_device_register(&raumfeld_w1_gpio_device); -} - -/** - * Framebuffer device - */ - -static struct pwm_lookup raumfeld_pwm_lookup[] = { - PWM_LOOKUP("pxa27x-pwm.0", 0, "pwm-backlight", NULL, 10000, - PWM_POLARITY_NORMAL), -}; - -/* PWM controlled backlight */ -static struct platform_pwm_backlight_data raumfeld_pwm_backlight_data = { - .max_brightness = 100, - .dft_brightness = 100, - .enable_gpio = -1, -}; - -static struct platform_device raumfeld_pwm_backlight_device = { - .name = "pwm-backlight", - .dev = { - .parent = &pxa27x_device_pwm0.dev, - .platform_data = &raumfeld_pwm_backlight_data, - } -}; - -/* LT3593 controlled backlight */ -static struct gpio_led raumfeld_lt3593_led = { - .name = "backlight", - .gpio = mfp_to_gpio(MFP_PIN_GPIO17), - .default_state = LEDS_GPIO_DEFSTATE_ON, -}; - -static struct gpio_led_platform_data raumfeld_lt3593_platform_data = { - .leds = &raumfeld_lt3593_led, - .num_leds = 1, -}; - -static struct platform_device raumfeld_lt3593_device = { - .name = "leds-lt3593", - .id = -1, - .dev = { - .platform_data = &raumfeld_lt3593_platform_data, - }, -}; - -static struct pxafb_mode_info sharp_lq043t3dx02_mode = { - .pixclock = 111000, - .xres = 480, - .yres = 272, - .bpp = 16, - .hsync_len = 41, - .left_margin = 2, - .right_margin = 1, - .vsync_len = 10, - .upper_margin = 3, - .lower_margin = 1, - .sync = 0, -}; - -static struct pxafb_mach_info raumfeld_sharp_lcd_info = { - .modes = &sharp_lq043t3dx02_mode, - .num_modes = 1, - .video_mem_size = 0x400000, - .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL, -#ifdef CONFIG_PXA3XX_GCU - .acceleration_enabled = 1, -#endif -}; - -static void __init raumfeld_lcd_init(void) -{ - int ret; - - ret = gpio_request(GPIO_TFT_VA_EN, "display VA enable"); - if (ret < 0) - pr_warn("Unable to request GPIO_TFT_VA_EN\n"); - else - gpio_direction_output(GPIO_TFT_VA_EN, 1); - - msleep(100); - - ret = gpio_request(GPIO_DISPLAY_ENABLE, "display enable"); - if (ret < 0) - pr_warn("Unable to request GPIO_DISPLAY_ENABLE\n"); - else - gpio_direction_output(GPIO_DISPLAY_ENABLE, 1); - - /* Hardware revision 2 has the backlight regulator controlled - * by an LT3593, earlier and later devices use PWM for that. */ - if ((system_rev & 0xff) == 2) { - platform_device_register(&raumfeld_lt3593_device); - } else { - mfp_cfg_t raumfeld_pwm_pin_config = GPIO17_PWM0_OUT; - pxa3xx_mfp_config(&raumfeld_pwm_pin_config, 1); - pwm_add_table(raumfeld_pwm_lookup, - ARRAY_SIZE(raumfeld_pwm_lookup)); - platform_device_register(&raumfeld_pwm_backlight_device); - } - - pxa_set_fb_info(NULL, &raumfeld_sharp_lcd_info); - platform_device_register(&pxa3xx_device_gcu); -} - -/** - * SPI devices - */ - -static struct spi_gpio_platform_data raumfeld_spi_platform_data = { - .num_chipselect = 3, -}; - -static struct platform_device raumfeld_spi_device = { - .name = "spi_gpio", - .id = 0, - .dev = { - .platform_data = &raumfeld_spi_platform_data, - } -}; - -static struct gpiod_lookup_table raumfeld_spi_gpiod_table = { - .dev_id = "spi_gpio", - .table = { - GPIO_LOOKUP("gpio-0", GPIO_SPI_CLK, - "sck", GPIO_ACTIVE_HIGH), - GPIO_LOOKUP("gpio-0", GPIO_SPI_MOSI, - "mosi", GPIO_ACTIVE_HIGH), - GPIO_LOOKUP("gpio-0", GPIO_SPI_MISO, - "miso", GPIO_ACTIVE_HIGH), - GPIO_LOOKUP_IDX("gpio-0", GPIO_SPDIF_CS, - "cs", 0, GPIO_ACTIVE_HIGH), - GPIO_LOOKUP_IDX("gpio-0", GPIO_ACCEL_CS, - "cs", 1, GPIO_ACTIVE_HIGH), - GPIO_LOOKUP_IDX("gpio-0", GPIO_MCLK_DAC_CS, - "cs", 2, GPIO_ACTIVE_HIGH), - { }, - }, -}; - -static struct lis3lv02d_platform_data lis3_pdata = { - .click_flags = LIS3_CLICK_SINGLE_X | - LIS3_CLICK_SINGLE_Y | - LIS3_CLICK_SINGLE_Z, - .irq_cfg = LIS3_IRQ1_CLICK | LIS3_IRQ2_CLICK, - .wakeup_flags = LIS3_WAKEUP_X_LO | LIS3_WAKEUP_X_HI | - LIS3_WAKEUP_Y_LO | LIS3_WAKEUP_Y_HI | - LIS3_WAKEUP_Z_LO | LIS3_WAKEUP_Z_HI, - .wakeup_thresh = 10, - .click_thresh_x = 10, - .click_thresh_y = 10, - .click_thresh_z = 10, -}; - -#define SPI_AK4104 \ -{ \ - .modalias = "ak4104-codec", \ - .max_speed_hz = 10000, \ - .bus_num = 0, \ - .chip_select = 0, \ -} - -#define SPI_LIS3 \ -{ \ - .modalias = "lis3lv02d_spi", \ - .max_speed_hz = 1000000, \ - .bus_num = 0, \ - .chip_select = 1, \ - .platform_data = &lis3_pdata, \ - .irq = PXA_GPIO_TO_IRQ(GPIO_ACCEL_IRQ), \ -} - -#define SPI_DAC7512 \ -{ \ - .modalias = "dac7512", \ - .max_speed_hz = 1000000, \ - .bus_num = 0, \ - .chip_select = 2, \ -} - -static struct spi_board_info connector_spi_devices[] __initdata = { - SPI_AK4104, - SPI_DAC7512, -}; - -static struct spi_board_info speaker_spi_devices[] __initdata = { - SPI_DAC7512, -}; - -static struct spi_board_info controller_spi_devices[] __initdata = { - SPI_LIS3, -}; - -/** - * MMC for Marvell Libertas 8688 via SDIO - */ - -static int raumfeld_mci_init(struct device *dev, irq_handler_t isr, void *data) -{ - gpio_set_value(GPIO_W2W_RESET, 1); - gpio_set_value(GPIO_W2W_PDN, 1); - - return 0; -} - -static void raumfeld_mci_exit(struct device *dev, void *data) -{ - gpio_set_value(GPIO_W2W_RESET, 0); - gpio_set_value(GPIO_W2W_PDN, 0); -} - -static struct pxamci_platform_data raumfeld_mci_platform_data = { - .init = raumfeld_mci_init, - .exit = raumfeld_mci_exit, - .detect_delay_ms = 200, -}; - -/* - * External power / charge logic - */ - -static int power_supply_init(struct device *dev) -{ - return 0; -} - -static void power_supply_exit(struct device *dev) -{ -} - -static int raumfeld_is_ac_online(void) -{ - return !gpio_get_value(GPIO_CHARGE_DC_OK); -} - -static int raumfeld_is_usb_online(void) -{ - return 0; -} - -static char *raumfeld_power_supplicants[] = { "ds2760-battery.0" }; - -static void raumfeld_power_signal_charged(void) -{ - struct power_supply *psy = - power_supply_get_by_name(raumfeld_power_supplicants[0]); - - if (psy) { - power_supply_set_battery_charged(psy); - power_supply_put(psy); - } -} - -static int raumfeld_power_resume(void) -{ - /* check if GPIO_CHARGE_DONE went low while we were sleeping */ - if (!gpio_get_value(GPIO_CHARGE_DONE)) - raumfeld_power_signal_charged(); - - return 0; -} - -static struct pda_power_pdata power_supply_info = { - .init = power_supply_init, - .is_ac_online = raumfeld_is_ac_online, - .is_usb_online = raumfeld_is_usb_online, - .exit = power_supply_exit, - .supplied_to = raumfeld_power_supplicants, - .num_supplicants = ARRAY_SIZE(raumfeld_power_supplicants), - .resume = raumfeld_power_resume, -}; - -static struct resource power_supply_resources[] = { - { - .name = "ac", - .flags = IORESOURCE_IRQ | - IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE, - .start = GPIO_CHARGE_DC_OK, - .end = GPIO_CHARGE_DC_OK, - }, -}; - -static irqreturn_t charge_done_irq(int irq, void *dev_id) -{ - raumfeld_power_signal_charged(); - return IRQ_HANDLED; -} - -static struct platform_device raumfeld_power_supply = { - .name = "pda-power", - .id = -1, - .dev = { - .platform_data = &power_supply_info, - }, - .resource = power_supply_resources, - .num_resources = ARRAY_SIZE(power_supply_resources), -}; - -static void __init raumfeld_power_init(void) -{ - int ret; - - /* Set PEN2 high to enable maximum charge current */ - ret = gpio_request(GPIO_CHRG_PEN2, "CHRG_PEN2"); - if (ret < 0) - pr_warn("Unable to request GPIO_CHRG_PEN2\n"); - else - gpio_direction_output(GPIO_CHRG_PEN2, 1); - - ret = gpio_request(GPIO_CHARGE_DC_OK, "CABLE_DC_OK"); - if (ret < 0) - pr_warn("Unable to request GPIO_CHARGE_DC_OK\n"); - - ret = gpio_request(GPIO_CHARGE_USB_SUSP, "CHARGE_USB_SUSP"); - if (ret < 0) - pr_warn("Unable to request GPIO_CHARGE_USB_SUSP\n"); - else - gpio_direction_output(GPIO_CHARGE_USB_SUSP, 0); - - power_supply_resources[0].start = gpio_to_irq(GPIO_CHARGE_DC_OK); - power_supply_resources[0].end = gpio_to_irq(GPIO_CHARGE_DC_OK); - - ret = request_irq(gpio_to_irq(GPIO_CHARGE_DONE), - &charge_done_irq, IORESOURCE_IRQ_LOWEDGE, - "charge_done", NULL); - - if (ret < 0) - printk(KERN_ERR "%s: unable to register irq %d\n", __func__, - GPIO_CHARGE_DONE); - else - platform_device_register(&raumfeld_power_supply); -} - -/* Fixed regulator for AUDIO_VA, 0-0048 maps to the cs4270 codec device */ - -static struct regulator_consumer_supply audio_va_consumer_supply = - REGULATOR_SUPPLY("va", "0-0048"); - -static struct regulator_init_data audio_va_initdata = { - .consumer_supplies = &audio_va_consumer_supply, - .num_consumer_supplies = 1, - .constraints = { - .valid_ops_mask = REGULATOR_CHANGE_STATUS, - }, -}; - -static struct fixed_voltage_config audio_va_config = { - .supply_name = "audio_va", - .microvolts = 5000000, - .enable_high = 1, - .enabled_at_boot = 0, - .init_data = &audio_va_initdata, -}; - -static struct platform_device audio_va_device = { - .name = "reg-fixed-voltage", - .id = 0, - .dev = { - .platform_data = &audio_va_config, - }, -}; - -static struct gpiod_lookup_table audio_va_gpiod_table = { - .dev_id = "reg-fixed-voltage.0", - .table = { - GPIO_LOOKUP("gpio-pxa", GPIO_AUDIO_VA_ENABLE, - NULL, GPIO_ACTIVE_HIGH), - { }, - }, -}; - -/* Dummy supplies for Codec's VD/VLC */ - -static struct regulator_consumer_supply audio_dummy_supplies[] = { - REGULATOR_SUPPLY("vd", "0-0048"), - REGULATOR_SUPPLY("vlc", "0-0048"), -}; - -static struct regulator_init_data audio_dummy_initdata = { - .consumer_supplies = audio_dummy_supplies, - .num_consumer_supplies = ARRAY_SIZE(audio_dummy_supplies), - .constraints = { - .valid_ops_mask = REGULATOR_CHANGE_STATUS, - }, -}; - -static struct fixed_voltage_config audio_dummy_config = { - .supply_name = "audio_vd", - .microvolts = 3300000, - .init_data = &audio_dummy_initdata, -}; - -static struct platform_device audio_supply_dummy_device = { - .name = "reg-fixed-voltage", - .id = 1, - .dev = { - .platform_data = &audio_dummy_config, - }, -}; - -static struct platform_device *audio_regulator_devices[] = { - &audio_va_device, - &audio_supply_dummy_device, -}; - -/** - * Regulator support via MAX8660 - */ - -static struct regulator_consumer_supply vcc_mmc_supply = - REGULATOR_SUPPLY("vmmc", "pxa2xx-mci.0"); - -static struct regulator_init_data vcc_mmc_init_data = { - .constraints = { - .min_uV = 3300000, - .max_uV = 3300000, - .valid_modes_mask = REGULATOR_MODE_NORMAL, - .valid_ops_mask = REGULATOR_CHANGE_STATUS | - REGULATOR_CHANGE_VOLTAGE | - REGULATOR_CHANGE_MODE, - }, - .consumer_supplies = &vcc_mmc_supply, - .num_consumer_supplies = 1, -}; - -static struct max8660_subdev_data max8660_v6_subdev_data = { - .id = MAX8660_V6, - .name = "vmmc", - .platform_data = &vcc_mmc_init_data, -}; - -static struct max8660_platform_data max8660_pdata = { - .subdevs = &max8660_v6_subdev_data, - .num_subdevs = 1, -}; - -/** - * I2C devices - */ - -static struct i2c_board_info raumfeld_pwri2c_board_info = { - .type = "max8660", - .addr = 0x34, - .platform_data = &max8660_pdata, -}; - -static struct i2c_board_info raumfeld_connector_i2c_board_info __initdata = { - .type = "cs4270", - .addr = 0x48, -}; - -static struct gpiod_lookup_table raumfeld_controller_gpios_table = { - .dev_id = "0-000a", - .table = { - GPIO_LOOKUP("gpio-pxa", - GPIO_TOUCH_IRQ, "attn", GPIO_ACTIVE_HIGH), - { }, - }, -}; - -static const struct resource raumfeld_controller_resources[] __initconst = { - { - .start = PXA_GPIO_TO_IRQ(GPIO_TOUCH_IRQ), - .end = PXA_GPIO_TO_IRQ(GPIO_TOUCH_IRQ), - .flags = IORESOURCE_IRQ | IRQF_TRIGGER_HIGH, - }, -}; - -static struct i2c_board_info raumfeld_controller_i2c_board_info __initdata = { - .type = "eeti_ts", - .addr = 0x0a, - .resources = raumfeld_controller_resources, - .num_resources = ARRAY_SIZE(raumfeld_controller_resources), -}; - -static struct platform_device *raumfeld_common_devices[] = { - &raumfeld_gpio_keys_device, - &raumfeld_led_device, - &raumfeld_spi_device, -}; - -static void __init raumfeld_audio_init(void) -{ - int ret; - - ret = gpio_request(GPIO_CODEC_RESET, "cs4270 reset"); - if (ret < 0) - pr_warn("unable to request GPIO_CODEC_RESET\n"); - else - gpio_direction_output(GPIO_CODEC_RESET, 1); - - ret = gpio_request(GPIO_SPDIF_RESET, "ak4104 s/pdif reset"); - if (ret < 0) - pr_warn("unable to request GPIO_SPDIF_RESET\n"); - else - gpio_direction_output(GPIO_SPDIF_RESET, 1); - - ret = gpio_request(GPIO_MCLK_RESET, "MCLK reset"); - if (ret < 0) - pr_warn("unable to request GPIO_MCLK_RESET\n"); - else - gpio_direction_output(GPIO_MCLK_RESET, 1); - - gpiod_add_lookup_table(&audio_va_gpiod_table); - platform_add_devices(ARRAY_AND_SIZE(audio_regulator_devices)); -} - -static void __init raumfeld_common_init(void) -{ - int ret; - - /* The on/off button polarity has changed after revision 1 */ - if ((system_rev & 0xff) > 1) { - int i; - - for (i = 0; i < ARRAY_SIZE(gpio_keys_button); i++) - if (!strcmp(gpio_keys_button[i].desc, "on_off button")) - gpio_keys_button[i].active_low = 1; - } - - enable_irq_wake(IRQ_WAKEUP0); - - pxa3xx_set_nand_info(&raumfeld_nand_info); - pxa3xx_set_i2c_power_info(NULL); - pxa_set_ohci_info(&raumfeld_ohci_info); - pxa_set_mci_info(&raumfeld_mci_platform_data); - pxa_set_i2c_info(NULL); - pxa_set_ffuart_info(NULL); - - ret = gpio_request(GPIO_W2W_RESET, "Wi2Wi reset"); - if (ret < 0) - pr_warn("Unable to request GPIO_W2W_RESET\n"); - else - gpio_direction_output(GPIO_W2W_RESET, 0); - - ret = gpio_request(GPIO_W2W_PDN, "Wi2Wi powerup"); - if (ret < 0) - pr_warn("Unable to request GPIO_W2W_PDN\n"); - else - gpio_direction_output(GPIO_W2W_PDN, 0); - - /* this can be used to switch off the device */ - ret = gpio_request(GPIO_SHUTDOWN_SUPPLY, "supply shutdown"); - if (ret < 0) - pr_warn("Unable to request GPIO_SHUTDOWN_SUPPLY\n"); - else - gpio_direction_output(GPIO_SHUTDOWN_SUPPLY, 0); - - gpiod_add_lookup_table(&raumfeld_spi_gpiod_table); - platform_add_devices(ARRAY_AND_SIZE(raumfeld_common_devices)); - i2c_register_board_info(1, &raumfeld_pwri2c_board_info, 1); -} - -static void __init __maybe_unused raumfeld_controller_init(void) -{ - int ret; - - pxa3xx_mfp_config(ARRAY_AND_SIZE(raumfeld_controller_pin_config)); - - gpiod_add_lookup_table(&raumfeld_rotary_gpios_table); - device_add_properties(&rotary_encoder_device.dev, - raumfeld_rotary_properties); - platform_device_register(&rotary_encoder_device); - - spi_register_board_info(ARRAY_AND_SIZE(controller_spi_devices)); - - gpiod_add_lookup_table(&raumfeld_controller_gpios_table); - i2c_register_board_info(0, &raumfeld_controller_i2c_board_info, 1); - - ret = gpio_request(GPIO_SHUTDOWN_BATT, "battery shutdown"); - if (ret < 0) - pr_warn("Unable to request GPIO_SHUTDOWN_BATT\n"); - else - gpio_direction_output(GPIO_SHUTDOWN_BATT, 0); - - raumfeld_common_init(); - raumfeld_power_init(); - raumfeld_lcd_init(); - raumfeld_w1_init(); -} - -static void __init __maybe_unused raumfeld_connector_init(void) -{ - pxa3xx_mfp_config(ARRAY_AND_SIZE(raumfeld_connector_pin_config)); - spi_register_board_info(ARRAY_AND_SIZE(connector_spi_devices)); - i2c_register_board_info(0, &raumfeld_connector_i2c_board_info, 1); - - platform_device_register(&smc91x_device); - - raumfeld_audio_init(); - raumfeld_common_init(); -} - -static void __init __maybe_unused raumfeld_speaker_init(void) -{ - pxa3xx_mfp_config(ARRAY_AND_SIZE(raumfeld_speaker_pin_config)); - spi_register_board_info(ARRAY_AND_SIZE(speaker_spi_devices)); - i2c_register_board_info(0, &raumfeld_connector_i2c_board_info, 1); - - platform_device_register(&smc91x_device); - - gpiod_add_lookup_table(&raumfeld_rotary_gpios_table); - device_add_properties(&rotary_encoder_device.dev, - raumfeld_rotary_properties); - platform_device_register(&rotary_encoder_device); - - raumfeld_audio_init(); - raumfeld_common_init(); -} - -/* physical memory regions */ -#define RAUMFELD_SDRAM_BASE 0xa0000000 /* SDRAM region */ - -#ifdef CONFIG_MACH_RAUMFELD_RC -MACHINE_START(RAUMFELD_RC, "Raumfeld Controller") - .atag_offset = 0x100, - .init_machine = raumfeld_controller_init, - .map_io = pxa3xx_map_io, - .nr_irqs = PXA_NR_IRQS, - .init_irq = pxa3xx_init_irq, - .handle_irq = pxa3xx_handle_irq, - .init_time = pxa_timer_init, - .restart = pxa_restart, -MACHINE_END -#endif - -#ifdef CONFIG_MACH_RAUMFELD_CONNECTOR -MACHINE_START(RAUMFELD_CONNECTOR, "Raumfeld Connector") - .atag_offset = 0x100, - .init_machine = raumfeld_connector_init, - .map_io = pxa3xx_map_io, - .nr_irqs = PXA_NR_IRQS, - .init_irq = pxa3xx_init_irq, - .handle_irq = pxa3xx_handle_irq, - .init_time = pxa_timer_init, - .restart = pxa_restart, -MACHINE_END -#endif - -#ifdef CONFIG_MACH_RAUMFELD_SPEAKER -MACHINE_START(RAUMFELD_SPEAKER, "Raumfeld Speaker") - .atag_offset = 0x100, - .init_machine = raumfeld_speaker_init, - .map_io = pxa3xx_map_io, - .nr_irqs = PXA_NR_IRQS, - .init_irq = pxa3xx_init_irq, - .handle_irq = pxa3xx_handle_irq, - .init_time = pxa_timer_init, - .restart = pxa_restart, -MACHINE_END -#endif diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c index 306818e2cf54..8dac824a85df 100644 --- a/arch/arm/mach-pxa/spitz.c +++ b/arch/arm/mach-pxa/spitz.c @@ -572,7 +572,7 @@ static struct spi_board_info spitz_spi_devices[] = { }, }; -static struct pxa2xx_spi_master spitz_spi_info = { +static struct pxa2xx_spi_controller spitz_spi_info = { .num_chipselect = 3, }; diff --git a/arch/arm/mach-pxa/stargate2.c b/arch/arm/mach-pxa/stargate2.c index e0d6c872270a..c28d19b126a7 100644 --- a/arch/arm/mach-pxa/stargate2.c +++ b/arch/arm/mach-pxa/stargate2.c @@ -337,15 +337,15 @@ static struct platform_device stargate2_flash_device = { .num_resources = 1, }; -static struct pxa2xx_spi_master pxa_ssp_master_0_info = { +static struct pxa2xx_spi_controller pxa_ssp_master_0_info = { .num_chipselect = 1, }; -static struct pxa2xx_spi_master pxa_ssp_master_1_info = { +static struct pxa2xx_spi_controller pxa_ssp_master_1_info = { .num_chipselect = 1, }; -static struct pxa2xx_spi_master pxa_ssp_master_2_info = { +static struct pxa2xx_spi_controller pxa_ssp_master_2_info = { .num_chipselect = 1, }; diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c index e8a93c088c35..7439798d58e4 100644 --- a/arch/arm/mach-pxa/tosa.c +++ b/arch/arm/mach-pxa/tosa.c @@ -813,7 +813,7 @@ static struct platform_device tosa_bt_device = { .dev.platform_data = &tosa_bt_data, }; -static struct pxa2xx_spi_master pxa_ssp_master_info = { +static struct pxa2xx_spi_controller pxa_ssp_master_info = { .num_chipselect = 1, }; diff --git a/arch/arm/mach-pxa/z2.c b/arch/arm/mach-pxa/z2.c index e2353e75bb28..ad082e11e2a4 100644 --- a/arch/arm/mach-pxa/z2.c +++ b/arch/arm/mach-pxa/z2.c @@ -607,12 +607,12 @@ static struct spi_board_info spi_board_info[] __initdata = { }, }; -static struct pxa2xx_spi_master pxa_ssp1_master_info = { +static struct pxa2xx_spi_controller pxa_ssp1_master_info = { .num_chipselect = 1, .enable_dma = 1, }; -static struct pxa2xx_spi_master pxa_ssp2_master_info = { +static struct pxa2xx_spi_controller pxa_ssp2_master_info = { .num_chipselect = 1, }; diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c index c411f79d4cb5..3fd1119c14d5 100644 --- a/arch/arm/mach-pxa/zeus.c +++ b/arch/arm/mach-pxa/zeus.c @@ -391,7 +391,7 @@ static struct platform_device zeus_sram_device = { }; /* SPI interface on SSP3 */ -static struct pxa2xx_spi_master pxa2xx_spi_ssp3_master_info = { +static struct pxa2xx_spi_controller pxa2xx_spi_ssp3_master_info = { .num_chipselect = 1, .enable_dma = 1, }; @@ -426,7 +426,7 @@ static struct gpiod_lookup_table can_regulator_gpiod_table = { .dev_id = "reg-fixed-voltage.0", .table = { GPIO_LOOKUP("gpio-pxa", ZEUS_CAN_SHDN_GPIO, - NULL, GPIO_ACTIVE_HIGH), + NULL, GPIO_ACTIVE_LOW), { }, }, }; @@ -547,7 +547,6 @@ static struct regulator_init_data zeus_ohci_regulator_data = { static struct fixed_voltage_config zeus_ohci_regulator_config = { .supply_name = "vbus2", .microvolts = 5000000, /* 5.0V */ - .enable_high = 1, .startup_delay = 0, .init_data = &zeus_ohci_regulator_data, }; diff --git a/arch/arm/mach-s3c24xx/mach-osiris-dvs.c b/arch/arm/mach-s3c24xx/mach-osiris-dvs.c index 058ce73137e8..5d819b6ea428 100644 --- a/arch/arm/mach-s3c24xx/mach-osiris-dvs.c +++ b/arch/arm/mach-s3c24xx/mach-osiris-dvs.c @@ -65,16 +65,16 @@ static int osiris_dvs_notify(struct notifier_block *nb, switch (val) { case CPUFREQ_PRECHANGE: - if (old_dvs & !new_dvs || - cur_dvs & !new_dvs) { + if ((old_dvs && !new_dvs) || + (cur_dvs && !new_dvs)) { pr_debug("%s: exiting dvs\n", __func__); cur_dvs = false; gpio_set_value(OSIRIS_GPIO_DVS, 1); } break; case CPUFREQ_POSTCHANGE: - if (!old_dvs & new_dvs || - !cur_dvs & new_dvs) { + if ((!old_dvs && new_dvs) || + (!cur_dvs && new_dvs)) { pr_debug("entering dvs\n"); cur_dvs = true; gpio_set_value(OSIRIS_GPIO_DVS, 0); diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c index dfa42496ec27..d09c3f236186 100644 --- a/arch/arm/mach-sa1100/assabet.c +++ b/arch/arm/mach-sa1100/assabet.c @@ -469,7 +469,6 @@ static struct regulator_consumer_supply assabet_cf_vcc_consumers[] = { static struct fixed_voltage_config assabet_cf_vcc_pdata __initdata = { .supply_name = "cf-power", .microvolts = 3300000, - .enable_high = 1, }; static struct gpiod_lookup_table assabet_cf_vcc_gpio_table = { diff --git a/arch/arm/mach-sa1100/simpad.c b/arch/arm/mach-sa1100/simpad.c index 406487e76a5c..c7fb9a73e4c5 100644 --- a/arch/arm/mach-sa1100/simpad.c +++ b/arch/arm/mach-sa1100/simpad.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include diff --git a/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c index 8e50daa99151..dc526ef2e9b3 100644 --- a/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c +++ b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c @@ -40,6 +40,7 @@ struct regulator_quirk { struct list_head list; const struct of_device_id *id; + struct device_node *np; struct of_phandle_args irq_args; struct i2c_msg i2c_msg; bool shared; /* IRQ line is shared */ @@ -101,6 +102,9 @@ static int regulator_quirk_notify(struct notifier_block *nb, if (!pos->shared) continue; + if (pos->np->parent != client->dev.parent->of_node) + continue; + dev_info(&client->dev, "clearing %s@0x%02x interrupts\n", pos->id->compatible, pos->i2c_msg.addr); @@ -165,6 +169,7 @@ static int __init rcar_gen2_regulator_quirk(void) memcpy(&quirk->i2c_msg, id->data, sizeof(quirk->i2c_msg)); quirk->id = id; + quirk->np = np; quirk->i2c_msg.addr = addr; ret = of_irq_parse_one(np, 0, argsa); diff --git a/arch/arm/mach-socfpga/socfpga.c b/arch/arm/mach-socfpga/socfpga.c index afd98971d903..816da0eb6616 100644 --- a/arch/arm/mach-socfpga/socfpga.c +++ b/arch/arm/mach-socfpga/socfpga.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -32,8 +33,6 @@ void __iomem *rst_manager_base_addr; void __iomem *sdr_ctl_base_addr; unsigned long socfpga_cpu1start_addr; -extern void __init socfpga_reset_init(void); - static void __init socfpga_sysmgr_init(void) { struct device_node *np; diff --git a/arch/arm/mach-sunxi/sunxi.c b/arch/arm/mach-sunxi/sunxi.c index 8a7f301839c2..933b6930f024 100644 --- a/arch/arm/mach-sunxi/sunxi.c +++ b/arch/arm/mach-sunxi/sunxi.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -37,7 +38,6 @@ static const char * const sun6i_board_dt_compat[] = { NULL, }; -extern void __init sun6i_reset_init(void); static void __init sun6i_timer_init(void) { of_clk_init(NULL); diff --git a/arch/arm/mach-tango/pm.c b/arch/arm/mach-tango/pm.c index 028e50c6383f..a32c3b631484 100644 --- a/arch/arm/mach-tango/pm.c +++ b/arch/arm/mach-tango/pm.c @@ -3,6 +3,7 @@ #include #include #include "smc.h" +#include "pm.h" static int tango_pm_powerdown(unsigned long arg) { @@ -24,10 +25,7 @@ static const struct platform_suspend_ops tango_pm_ops = { .valid = suspend_valid_only_mem, }; -static int __init tango_pm_init(void) +void __init tango_pm_init(void) { suspend_set_ops(&tango_pm_ops); - return 0; } - -late_initcall(tango_pm_init); diff --git a/arch/arm/mach-tango/pm.h b/arch/arm/mach-tango/pm.h new file mode 100644 index 000000000000..35ea705a0ee2 --- /dev/null +++ b/arch/arm/mach-tango/pm.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifdef CONFIG_SUSPEND +void __init tango_pm_init(void); +#else +#define tango_pm_init NULL +#endif diff --git a/arch/arm/mach-tango/setup.c b/arch/arm/mach-tango/setup.c index 677dd7b5efd9..824f90737b04 100644 --- a/arch/arm/mach-tango/setup.c +++ b/arch/arm/mach-tango/setup.c @@ -2,6 +2,7 @@ #include #include #include "smc.h" +#include "pm.h" static void tango_l2c_write(unsigned long val, unsigned int reg) { @@ -15,4 +16,5 @@ DT_MACHINE_START(TANGO_DT, "Sigma Tango DT") .dt_compat = tango_dt_compat, .l2c_aux_mask = ~0, .l2c_write_sec = tango_l2c_write, + .init_late = tango_pm_init, MACHINE_END diff --git a/arch/arm/mach-tegra/iomap.h b/arch/arm/mach-tegra/iomap.h index 9e5b2f869fc8..9bc291e76887 100644 --- a/arch/arm/mach-tegra/iomap.h +++ b/arch/arm/mach-tegra/iomap.h @@ -79,15 +79,24 @@ #define TEGRA_PMC_BASE 0x7000E400 #define TEGRA_PMC_SIZE SZ_256 +#define TEGRA_MC_BASE 0x7000F000 +#define TEGRA_MC_SIZE SZ_1K + #define TEGRA_EMC_BASE 0x7000F400 #define TEGRA_EMC_SIZE SZ_1K +#define TEGRA114_MC_BASE 0x70019000 +#define TEGRA114_MC_SIZE SZ_4K + #define TEGRA_EMC0_BASE 0x7001A000 #define TEGRA_EMC0_SIZE SZ_2K #define TEGRA_EMC1_BASE 0x7001A800 #define TEGRA_EMC1_SIZE SZ_2K +#define TEGRA124_MC_BASE 0x70019000 +#define TEGRA124_MC_SIZE SZ_4K + #define TEGRA124_EMC_BASE 0x7001B000 #define TEGRA124_EMC_SIZE SZ_2K diff --git a/arch/arm/mach-tegra/sleep-tegra20.S b/arch/arm/mach-tegra/sleep-tegra20.S index 5c8e638ee51a..dedeebfccc55 100644 --- a/arch/arm/mach-tegra/sleep-tegra20.S +++ b/arch/arm/mach-tegra/sleep-tegra20.S @@ -32,7 +32,6 @@ #define EMC_CFG 0xc #define EMC_ADR_CFG 0x10 -#define EMC_REFRESH 0x70 #define EMC_NOP 0xdc #define EMC_SELF_REF 0xe0 #define EMC_REQ_CTRL 0x2b0 @@ -397,7 +396,6 @@ padload_done: mov r1, #1 str r1, [r0, #EMC_NOP] str r1, [r0, #EMC_NOP] - str r1, [r0, #EMC_REFRESH] emc_device_mask r1, r0 diff --git a/arch/arm/mach-tegra/sleep-tegra30.S b/arch/arm/mach-tegra/sleep-tegra30.S index dd4a67dabd91..d0b4c486ddbf 100644 --- a/arch/arm/mach-tegra/sleep-tegra30.S +++ b/arch/arm/mach-tegra/sleep-tegra30.S @@ -29,7 +29,6 @@ #define EMC_CFG 0xc #define EMC_ADR_CFG 0x10 #define EMC_TIMING_CONTROL 0x28 -#define EMC_REFRESH 0x70 #define EMC_NOP 0xdc #define EMC_SELF_REF 0xe0 #define EMC_MRW 0xe8 @@ -45,6 +44,8 @@ #define EMC_XM2VTTGENPADCTRL 0x310 #define EMC_XM2VTTGENPADCTRL2 0x314 +#define MC_EMEM_ARB_CFG 0x90 + #define PMC_CTRL 0x0 #define PMC_CTRL_SIDE_EFFECT_LP0 (1 << 14) /* enter LP0 when CPU pwr gated */ @@ -419,6 +420,22 @@ _pll_m_c_x_done: movweq r0, #:lower16:TEGRA124_EMC_BASE movteq r0, #:upper16:TEGRA124_EMC_BASE + cmp r10, #TEGRA30 + moveq r2, #0x20 + movweq r4, #:lower16:TEGRA_MC_BASE + movteq r4, #:upper16:TEGRA_MC_BASE + cmp r10, #TEGRA114 + moveq r2, #0x34 + movweq r4, #:lower16:TEGRA114_MC_BASE + movteq r4, #:upper16:TEGRA114_MC_BASE + cmp r10, #TEGRA124 + moveq r2, #0x20 + movweq r4, #:lower16:TEGRA124_MC_BASE + movteq r4, #:upper16:TEGRA124_MC_BASE + + ldr r1, [r5, r2] @ restore MC_EMEM_ARB_CFG + str r1, [r4, #MC_EMEM_ARB_CFG] + exit_self_refresh: ldr r1, [r5, #0xC] @ restore EMC_XM2VTTGENPADCTRL str r1, [r0, #EMC_XM2VTTGENPADCTRL] @@ -459,7 +476,6 @@ emc_wait_auto_cal_onetime: cmp r10, #TEGRA30 streq r1, [r0, #EMC_NOP] streq r1, [r0, #EMC_NOP] - streq r1, [r0, #EMC_REFRESH] emc_device_mask r1, r0 @@ -521,6 +537,8 @@ zcal_done: ldr r1, [r5, #0x0] @ restore EMC_CFG str r1, [r0, #EMC_CFG] + emc_timing_update r1, r0 + /* Tegra114 had dual EMC channel, now config the other one */ cmp r10, #TEGRA114 bne __no_dual_emc_chanl @@ -546,6 +564,7 @@ tegra30_sdram_pad_address: .word TEGRA_PMC_BASE + PMC_IO_DPD_STATUS @0x14 .word TEGRA_CLK_RESET_BASE + CLK_RESET_CLK_SOURCE_MSELECT @0x18 .word TEGRA_CLK_RESET_BASE + CLK_RESET_SCLK_BURST @0x1c + .word TEGRA_MC_BASE + MC_EMEM_ARB_CFG @0x20 tegra30_sdram_pad_address_end: tegra114_sdram_pad_address: @@ -562,6 +581,7 @@ tegra114_sdram_pad_address: .word TEGRA_EMC1_BASE + EMC_AUTO_CAL_INTERVAL @0x28 .word TEGRA_EMC1_BASE + EMC_XM2VTTGENPADCTRL @0x2c .word TEGRA_EMC1_BASE + EMC_XM2VTTGENPADCTRL2 @0x30 + .word TEGRA114_MC_BASE + MC_EMEM_ARB_CFG @0x34 tegra114_sdram_pad_adress_end: tegra124_sdram_pad_address: @@ -573,6 +593,7 @@ tegra124_sdram_pad_address: .word TEGRA_PMC_BASE + PMC_IO_DPD_STATUS @0x14 .word TEGRA_CLK_RESET_BASE + CLK_RESET_CLK_SOURCE_MSELECT @0x18 .word TEGRA_CLK_RESET_BASE + CLK_RESET_SCLK_BURST @0x1c + .word TEGRA124_MC_BASE + MC_EMEM_ARB_CFG @0x20 tegra124_sdram_pad_address_end: tegra30_sdram_pad_size: diff --git a/arch/arm/mm/cache-l2x0-pmu.c b/arch/arm/mm/cache-l2x0-pmu.c index afe5b4c7b164..99bcd074916a 100644 --- a/arch/arm/mm/cache-l2x0-pmu.c +++ b/arch/arm/mm/cache-l2x0-pmu.c @@ -314,14 +314,6 @@ static int l2x0_pmu_event_init(struct perf_event *event) event->attach_state & PERF_ATTACH_TASK) return -EINVAL; - if (event->attr.exclude_user || - event->attr.exclude_kernel || - event->attr.exclude_hv || - event->attr.exclude_idle || - event->attr.exclude_host || - event->attr.exclude_guest) - return -EINVAL; - if (event->cpu < 0) return -EINVAL; @@ -544,6 +536,7 @@ static __init int l2x0_pmu_init(void) .del = l2x0_pmu_event_del, .event_init = l2x0_pmu_event_init, .attr_groups = l2x0_pmu_attr_groups, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }; l2x0_pmu_reset(); diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index f1e2922e447c..c6aab9c36ff1 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -188,6 +188,7 @@ const struct dma_map_ops arm_dma_ops = { .unmap_page = arm_dma_unmap_page, .map_sg = arm_dma_map_sg, .unmap_sg = arm_dma_unmap_sg, + .map_resource = dma_direct_map_resource, .sync_single_for_cpu = arm_dma_sync_single_for_cpu, .sync_single_for_device = arm_dma_sync_single_for_device, .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, @@ -211,6 +212,7 @@ const struct dma_map_ops arm_coherent_dma_ops = { .get_sgtable = arm_dma_get_sgtable, .map_page = arm_coherent_dma_map_page, .map_sg = arm_dma_map_sg, + .map_resource = dma_direct_map_resource, .dma_supported = arm_dma_supported, }; EXPORT_SYMBOL(arm_coherent_dma_ops); @@ -2390,4 +2392,6 @@ void arch_teardown_dma_ops(struct device *dev) return; arm_teardown_iommu_dma_ops(dev); + /* Let arch_setup_dma_ops() start again from scratch upon re-probe */ + set_dma_ops(dev, NULL); } diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 478ea8b7db87..15dddfe43319 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -205,7 +205,11 @@ phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align) BUG_ON(!arm_memblock_steal_permitted); - phys = memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE); + phys = memblock_phys_alloc(size, align); + if (!phys) + panic("Failed to steal %pa bytes at %pS\n", + &size, (void *)_RET_IP_); + memblock_free(phys, size); memblock_remove(phys, size); diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index f5cc1ccfea3d..f3ce34113f89 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -719,16 +719,15 @@ EXPORT_SYMBOL(phys_mem_access_prot); #define vectors_base() (vectors_high() ? 0xffff0000 : 0) -static void __init *early_alloc_aligned(unsigned long sz, unsigned long align) -{ - void *ptr = __va(memblock_phys_alloc(sz, align)); - memset(ptr, 0, sz); - return ptr; -} - static void __init *early_alloc(unsigned long sz) { - return early_alloc_aligned(sz, sz); + void *ptr = memblock_alloc(sz, sz); + + if (!ptr) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", + __func__, sz, sz); + + return ptr; } static void *__init late_alloc(unsigned long sz) @@ -1000,7 +999,10 @@ void __init iotable_init(struct map_desc *io_desc, int nr) if (!nr) return; - svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm)); + svm = memblock_alloc(sizeof(*svm) * nr, __alignof__(*svm)); + if (!svm) + panic("%s: Failed to allocate %zu bytes align=0x%zx\n", + __func__, sizeof(*svm) * nr, __alignof__(*svm)); for (md = io_desc; nr; md++, nr--) { create_mapping(md); @@ -1022,7 +1024,10 @@ void __init vm_reserve_area_early(unsigned long addr, unsigned long size, struct vm_struct *vm; struct static_vm *svm; - svm = early_alloc_aligned(sizeof(*svm), __alignof__(*svm)); + svm = memblock_alloc(sizeof(*svm), __alignof__(*svm)); + if (!svm) + panic("%s: Failed to allocate %zu bytes align=0x%zx\n", + __func__, sizeof(*svm), __alignof__(*svm)); vm = &svm->vm; vm->addr = (void *)addr; diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 25b3ee85066e..c8bfbbfdfcc3 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -1083,12 +1083,17 @@ static inline void emit_ldx_r(const s8 dst[], const s8 src, /* Arithmatic Operation */ static inline void emit_ar_r(const u8 rd, const u8 rt, const u8 rm, - const u8 rn, struct jit_ctx *ctx, u8 op) { + const u8 rn, struct jit_ctx *ctx, u8 op, + bool is_jmp64) { switch (op) { case BPF_JSET: - emit(ARM_AND_R(ARM_IP, rt, rn), ctx); - emit(ARM_AND_R(ARM_LR, rd, rm), ctx); - emit(ARM_ORRS_R(ARM_IP, ARM_LR, ARM_IP), ctx); + if (is_jmp64) { + emit(ARM_AND_R(ARM_IP, rt, rn), ctx); + emit(ARM_AND_R(ARM_LR, rd, rm), ctx); + emit(ARM_ORRS_R(ARM_IP, ARM_LR, ARM_IP), ctx); + } else { + emit(ARM_ANDS_R(ARM_IP, rt, rn), ctx); + } break; case BPF_JEQ: case BPF_JNE: @@ -1096,18 +1101,25 @@ static inline void emit_ar_r(const u8 rd, const u8 rt, const u8 rm, case BPF_JGE: case BPF_JLE: case BPF_JLT: - emit(ARM_CMP_R(rd, rm), ctx); - _emit(ARM_COND_EQ, ARM_CMP_R(rt, rn), ctx); + if (is_jmp64) { + emit(ARM_CMP_R(rd, rm), ctx); + /* Only compare low halve if high halve are equal. */ + _emit(ARM_COND_EQ, ARM_CMP_R(rt, rn), ctx); + } else { + emit(ARM_CMP_R(rt, rn), ctx); + } break; case BPF_JSLE: case BPF_JSGT: emit(ARM_CMP_R(rn, rt), ctx); - emit(ARM_SBCS_R(ARM_IP, rm, rd), ctx); + if (is_jmp64) + emit(ARM_SBCS_R(ARM_IP, rm, rd), ctx); break; case BPF_JSLT: case BPF_JSGE: emit(ARM_CMP_R(rt, rn), ctx); - emit(ARM_SBCS_R(ARM_IP, rd, rm), ctx); + if (is_jmp64) + emit(ARM_SBCS_R(ARM_IP, rd, rm), ctx); break; } } @@ -1615,6 +1627,17 @@ exit: case BPF_JMP | BPF_JLT | BPF_X: case BPF_JMP | BPF_JSLT | BPF_X: case BPF_JMP | BPF_JSLE | BPF_X: + case BPF_JMP32 | BPF_JEQ | BPF_X: + case BPF_JMP32 | BPF_JGT | BPF_X: + case BPF_JMP32 | BPF_JGE | BPF_X: + case BPF_JMP32 | BPF_JNE | BPF_X: + case BPF_JMP32 | BPF_JSGT | BPF_X: + case BPF_JMP32 | BPF_JSGE | BPF_X: + case BPF_JMP32 | BPF_JSET | BPF_X: + case BPF_JMP32 | BPF_JLE | BPF_X: + case BPF_JMP32 | BPF_JLT | BPF_X: + case BPF_JMP32 | BPF_JSLT | BPF_X: + case BPF_JMP32 | BPF_JSLE | BPF_X: /* Setup source registers */ rm = arm_bpf_get_reg32(src_hi, tmp2[0], ctx); rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); @@ -1641,6 +1664,17 @@ exit: case BPF_JMP | BPF_JLE | BPF_K: case BPF_JMP | BPF_JSLT | BPF_K: case BPF_JMP | BPF_JSLE | BPF_K: + case BPF_JMP32 | BPF_JEQ | BPF_K: + case BPF_JMP32 | BPF_JGT | BPF_K: + case BPF_JMP32 | BPF_JGE | BPF_K: + case BPF_JMP32 | BPF_JNE | BPF_K: + case BPF_JMP32 | BPF_JSGT | BPF_K: + case BPF_JMP32 | BPF_JSGE | BPF_K: + case BPF_JMP32 | BPF_JSET | BPF_K: + case BPF_JMP32 | BPF_JLT | BPF_K: + case BPF_JMP32 | BPF_JLE | BPF_K: + case BPF_JMP32 | BPF_JSLT | BPF_K: + case BPF_JMP32 | BPF_JSLE | BPF_K: if (off == 0) break; rm = tmp2[0]; @@ -1652,7 +1686,8 @@ go_jmp: rd = arm_bpf_get_reg64(dst, tmp, ctx); /* Check for the condition */ - emit_ar_r(rd[0], rd[1], rm, rn, ctx, BPF_OP(code)); + emit_ar_r(rd[0], rd[1], rm, rn, ctx, BPF_OP(code), + BPF_CLASS(code) == BPF_JMP); /* Setup JUMP instruction */ jmp_offset = bpf2a32_offset(i+off, i, ctx); diff --git a/arch/arm/net/bpf_jit_32.h b/arch/arm/net/bpf_jit_32.h index f4e58bcdaa43..13a05f759552 100644 --- a/arch/arm/net/bpf_jit_32.h +++ b/arch/arm/net/bpf_jit_32.h @@ -62,6 +62,7 @@ #define ARM_INST_ADDS_I 0x02900000 #define ARM_INST_AND_R 0x00000000 +#define ARM_INST_ANDS_R 0x00100000 #define ARM_INST_AND_I 0x02000000 #define ARM_INST_BIC_R 0x01c00000 @@ -172,6 +173,7 @@ #define ARM_ADC_I(rd, rn, imm) _AL3_I(ARM_INST_ADC, rd, rn, imm) #define ARM_AND_R(rd, rn, rm) _AL3_R(ARM_INST_AND, rd, rn, rm) +#define ARM_ANDS_R(rd, rn, rm) _AL3_R(ARM_INST_ANDS, rd, rn, rm) #define ARM_AND_I(rd, rn, imm) _AL3_I(ARM_INST_AND, rd, rn, imm) #define ARM_BIC_R(rd, rn, rm) _AL3_R(ARM_INST_BIC, rd, rn, rm) diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c index a2399fd66e97..a6c81ce00f52 100644 --- a/arch/arm/plat-orion/common.c +++ b/arch/arm/plat-orion/common.c @@ -18,7 +18,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c index ed36dcab80f1..f51919974183 100644 --- a/arch/arm/plat-pxa/ssp.c +++ b/arch/arm/plat-pxa/ssp.c @@ -190,8 +190,6 @@ static int pxa_ssp_remove(struct platform_device *pdev) if (ssp == NULL) return -ENODEV; - iounmap(ssp->mmio_base); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, resource_size(res)); @@ -201,7 +199,6 @@ static int pxa_ssp_remove(struct platform_device *pdev) list_del(&ssp->node); mutex_unlock(&ssp_lock); - kfree(ssp); return 0; } diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c index 2c118a6ab358..0dc23fc227ed 100644 --- a/arch/arm/probes/kprobes/opt-arm.c +++ b/arch/arm/probes/kprobes/opt-arm.c @@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or } /* Copy arch-dep-instance from template. */ - memcpy(code, (unsigned char *)optprobe_template_entry, + memcpy(code, (unsigned long *)&optprobe_template_entry, TMPL_END_IDX * sizeof(kprobe_opcode_t)); /* Adjust buffer according to instruction. */ diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl index 8edf93b4490f..9016f4081bb9 100644 --- a/arch/arm/tools/syscall.tbl +++ b/arch/arm/tools/syscall.tbl @@ -24,7 +24,7 @@ 10 common unlink sys_unlink 11 common execve sys_execve 12 common chdir sys_chdir -13 oabi time sys_time +13 oabi time sys_time32 14 common mknod sys_mknod 15 common chmod sys_chmod 16 common lchown sys_lchown16 @@ -36,12 +36,12 @@ 22 oabi umount sys_oldumount 23 common setuid sys_setuid16 24 common getuid sys_getuid16 -25 oabi stime sys_stime +25 oabi stime sys_stime32 26 common ptrace sys_ptrace 27 oabi alarm sys_alarm # 28 was sys_fstat 29 common pause sys_pause -30 oabi utime sys_utime +30 oabi utime sys_utime32 # 31 was sys_stty # 32 was sys_gtty 33 common access sys_access @@ -137,7 +137,7 @@ 121 common setdomainname sys_setdomainname 122 common uname sys_newuname # 123 was sys_modify_ldt -124 common adjtimex sys_adjtimex +124 common adjtimex sys_adjtimex_time32 125 common mprotect sys_mprotect 126 common sigprocmask sys_sigprocmask # 127 was sys_create_module @@ -174,8 +174,8 @@ 158 common sched_yield sys_sched_yield 159 common sched_get_priority_max sys_sched_get_priority_max 160 common sched_get_priority_min sys_sched_get_priority_min -161 common sched_rr_get_interval sys_sched_rr_get_interval -162 common nanosleep sys_nanosleep +161 common sched_rr_get_interval sys_sched_rr_get_interval_time32 +162 common nanosleep sys_nanosleep_time32 163 common mremap sys_mremap 164 common setresuid sys_setresuid16 165 common getresuid sys_getresuid16 @@ -190,7 +190,7 @@ 174 common rt_sigaction sys_rt_sigaction 175 common rt_sigprocmask sys_rt_sigprocmask 176 common rt_sigpending sys_rt_sigpending -177 common rt_sigtimedwait sys_rt_sigtimedwait +177 common rt_sigtimedwait sys_rt_sigtimedwait_time32 178 common rt_sigqueueinfo sys_rt_sigqueueinfo 179 common rt_sigsuspend sys_rt_sigsuspend 180 common pread64 sys_pread64 sys_oabi_pread64 @@ -254,12 +254,12 @@ 237 common fremovexattr sys_fremovexattr 238 common tkill sys_tkill 239 common sendfile64 sys_sendfile64 -240 common futex sys_futex +240 common futex sys_futex_time32 241 common sched_setaffinity sys_sched_setaffinity 242 common sched_getaffinity sys_sched_getaffinity 243 common io_setup sys_io_setup 244 common io_destroy sys_io_destroy -245 common io_getevents sys_io_getevents +245 common io_getevents sys_io_getevents_time32 246 common io_submit sys_io_submit 247 common io_cancel sys_io_cancel 248 common exit_group sys_exit_group @@ -272,26 +272,26 @@ # 255 for get_thread_area 256 common set_tid_address sys_set_tid_address 257 common timer_create sys_timer_create -258 common timer_settime sys_timer_settime -259 common timer_gettime sys_timer_gettime +258 common timer_settime sys_timer_settime32 +259 common timer_gettime sys_timer_gettime32 260 common timer_getoverrun sys_timer_getoverrun 261 common timer_delete sys_timer_delete -262 common clock_settime sys_clock_settime -263 common clock_gettime sys_clock_gettime -264 common clock_getres sys_clock_getres -265 common clock_nanosleep sys_clock_nanosleep +262 common clock_settime sys_clock_settime32 +263 common clock_gettime sys_clock_gettime32 +264 common clock_getres sys_clock_getres_time32 +265 common clock_nanosleep sys_clock_nanosleep_time32 266 common statfs64 sys_statfs64_wrapper 267 common fstatfs64 sys_fstatfs64_wrapper 268 common tgkill sys_tgkill -269 common utimes sys_utimes +269 common utimes sys_utimes_time32 270 common arm_fadvise64_64 sys_arm_fadvise64_64 271 common pciconfig_iobase sys_pciconfig_iobase 272 common pciconfig_read sys_pciconfig_read 273 common pciconfig_write sys_pciconfig_write 274 common mq_open sys_mq_open 275 common mq_unlink sys_mq_unlink -276 common mq_timedsend sys_mq_timedsend -277 common mq_timedreceive sys_mq_timedreceive +276 common mq_timedsend sys_mq_timedsend_time32 +277 common mq_timedreceive sys_mq_timedreceive_time32 278 common mq_notify sys_mq_notify 279 common mq_getsetattr sys_mq_getsetattr 280 common waitid sys_waitid @@ -314,19 +314,19 @@ 297 common recvmsg sys_recvmsg 298 common semop sys_semop sys_oabi_semop 299 common semget sys_semget -300 common semctl sys_semctl +300 common semctl sys_old_semctl 301 common msgsnd sys_msgsnd 302 common msgrcv sys_msgrcv 303 common msgget sys_msgget -304 common msgctl sys_msgctl +304 common msgctl sys_old_msgctl 305 common shmat sys_shmat 306 common shmdt sys_shmdt 307 common shmget sys_shmget -308 common shmctl sys_shmctl +308 common shmctl sys_old_shmctl 309 common add_key sys_add_key 310 common request_key sys_request_key 311 common keyctl sys_keyctl -312 common semtimedop sys_semtimedop sys_oabi_semtimedop +312 common semtimedop sys_semtimedop_time32 sys_oabi_semtimedop 313 common vserver 314 common ioprio_set sys_ioprio_set 315 common ioprio_get sys_ioprio_get @@ -340,7 +340,7 @@ 323 common mkdirat sys_mkdirat 324 common mknodat sys_mknodat 325 common fchownat sys_fchownat -326 common futimesat sys_futimesat +326 common futimesat sys_futimesat_time32 327 common fstatat64 sys_fstatat64 sys_oabi_fstatat64 328 common unlinkat sys_unlinkat 329 common renameat sys_renameat @@ -349,8 +349,8 @@ 332 common readlinkat sys_readlinkat 333 common fchmodat sys_fchmodat 334 common faccessat sys_faccessat -335 common pselect6 sys_pselect6 -336 common ppoll sys_ppoll +335 common pselect6 sys_pselect6_time32 +336 common ppoll sys_ppoll_time32 337 common unshare sys_unshare 338 common set_robust_list sys_set_robust_list 339 common get_robust_list sys_get_robust_list @@ -362,13 +362,13 @@ 345 common getcpu sys_getcpu 346 common epoll_pwait sys_epoll_pwait 347 common kexec_load sys_kexec_load -348 common utimensat sys_utimensat +348 common utimensat sys_utimensat_time32 349 common signalfd sys_signalfd 350 common timerfd_create sys_timerfd_create 351 common eventfd sys_eventfd 352 common fallocate sys_fallocate -353 common timerfd_settime sys_timerfd_settime -354 common timerfd_gettime sys_timerfd_gettime +353 common timerfd_settime sys_timerfd_settime32 +354 common timerfd_gettime sys_timerfd_gettime32 355 common signalfd4 sys_signalfd4 356 common eventfd2 sys_eventfd2 357 common epoll_create1 sys_epoll_create1 @@ -379,14 +379,14 @@ 362 common pwritev sys_pwritev 363 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo 364 common perf_event_open sys_perf_event_open -365 common recvmmsg sys_recvmmsg +365 common recvmmsg sys_recvmmsg_time32 366 common accept4 sys_accept4 367 common fanotify_init sys_fanotify_init 368 common fanotify_mark sys_fanotify_mark 369 common prlimit64 sys_prlimit64 370 common name_to_handle_at sys_name_to_handle_at 371 common open_by_handle_at sys_open_by_handle_at -372 common clock_adjtime sys_clock_adjtime +372 common clock_adjtime sys_clock_adjtime32 373 common syncfs sys_syncfs 374 common sendmmsg sys_sendmmsg 375 common setns sys_setns @@ -413,4 +413,27 @@ 396 common pkey_free sys_pkey_free 397 common statx sys_statx 398 common rseq sys_rseq -399 common io_pgetevents sys_io_pgetevents +399 common io_pgetevents sys_io_pgetevents_time32 +400 common migrate_pages sys_migrate_pages +401 common kexec_file_load sys_kexec_file_load +# 402 is unused +403 common clock_gettime64 sys_clock_gettime +404 common clock_settime64 sys_clock_settime +405 common clock_adjtime64 sys_clock_adjtime +406 common clock_getres_time64 sys_clock_getres +407 common clock_nanosleep_time64 sys_clock_nanosleep +408 common timer_gettime64 sys_timer_gettime +409 common timer_settime64 sys_timer_settime +410 common timerfd_gettime64 sys_timerfd_gettime +411 common timerfd_settime64 sys_timerfd_settime +412 common utimensat_time64 sys_utimensat +413 common pselect6_time64 sys_pselect6 +414 common ppoll_time64 sys_ppoll +416 common io_pgetevents_time64 sys_io_pgetevents +417 common recvmmsg_time64 sys_recvmmsg +418 common mq_timedsend_time64 sys_mq_timedsend +419 common mq_timedreceive_time64 sys_mq_timedreceive +420 common semtimedop_time64 sys_semtimedop +421 common rt_sigtimedwait_time64 sys_rt_sigtimedwait +422 common futex_time64 sys_futex +423 common sched_rr_get_interval_time64 sys_sched_rr_get_interval diff --git a/arch/arm/xen/hypercall.S b/arch/arm/xen/hypercall.S index b0b80c0f09f3..b11bba542fac 100644 --- a/arch/arm/xen/hypercall.S +++ b/arch/arm/xen/hypercall.S @@ -113,8 +113,7 @@ ENTRY(privcmd_call) /* * Disable userspace access from kernel. This is fine to do it - * unconditionally as no set_fs(KERNEL_DS)/set_fs(get_ds()) is - * called before. + * unconditionally as no set_fs(KERNEL_DS) is called before. */ uaccess_disable r4 diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c index cb44aa290e73..e1d44b903dfc 100644 --- a/arch/arm/xen/mm.c +++ b/arch/arm/xen/mm.c @@ -7,7 +7,6 @@ #include #include #include -#include #include #include diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index a4168d366127..117b2541ef3d 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -22,12 +22,14 @@ config ARM64 select ARCH_HAS_KCOV select ARCH_HAS_MEMBARRIER_SYNC_CORE select ARCH_HAS_PTE_SPECIAL + select ARCH_HAS_SETUP_DMA_OPS select ARCH_HAS_SET_MEMORY select ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_STRICT_MODULE_RWX select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYSCALL_WRAPPER + select ARCH_HAS_TEARDOWN_DMA_OPS if IOMMU_SUPPORT select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_INLINE_READ_LOCK if !PREEMPT @@ -137,7 +139,6 @@ config ARM64 select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_GRAPH_TRACER select HAVE_GCC_PLUGINS - select HAVE_GENERIC_DMA_COHERENT select HAVE_HW_BREAKPOINT if PERF_EVENTS select HAVE_IRQ_TIME_ACCOUNTING select HAVE_MEMBLOCK_NODE_MAP if NUMA @@ -163,7 +164,6 @@ config ARM64 select NEED_SG_DMA_LENGTH select OF select OF_EARLY_FLATTREE - select OF_RESERVED_MEM select PCI_DOMAINS_GENERIC if PCI select PCI_ECAM if (ACPI && PCI) select PCI_SYSCALL if PCI @@ -643,6 +643,25 @@ config QCOM_FALKOR_ERRATUM_E1041 If unsure, say Y. +config FUJITSU_ERRATUM_010001 + bool "Fujitsu-A64FX erratum E#010001: Undefined fault may occur wrongly" + default y + help + This option adds workaround for Fujitsu-A64FX erratum E#010001. + On some variants of the Fujitsu-A64FX cores ver(1.0, 1.1), memory + accesses may cause undefined fault (Data abort, DFSC=0b111111). + This fault occurs under a specific hardware condition when a + load/store instruction performs an address translation using: + case-1 TTBR0_EL1 with TCR_EL1.NFD0 == 1. + case-2 TTBR0_EL2 with TCR_EL2.NFD0 == 1. + case-3 TTBR1_EL1 with TCR_EL1.NFD1 == 1. + case-4 TTBR1_EL2 with TCR_EL2.NFD1 == 1. + + The workaround is to ensure these bits are clear in TCR_ELx. + The workaround only affect the Fujitsu-A64FX. + + If unsure, say Y. + endmenu @@ -792,8 +811,7 @@ config SCHED_SMT config NR_CPUS int "Maximum number of CPUs (2-4096)" range 2 4096 - # These have to remain sorted largest to smallest - default "64" + default "256" config HOTPLUG_CPU bool "Support for hot-pluggable CPUs" @@ -1328,6 +1346,20 @@ config ARM64_MODULE_PLTS bool select HAVE_MOD_ARCH_SPECIFIC +config ARM64_PSEUDO_NMI + bool "Support for NMI-like interrupts" + select CONFIG_ARM_GIC_V3 + help + Adds support for mimicking Non-Maskable Interrupts through the use of + GIC interrupt priority. This support requires version 3 or later of + Arm GIC. + + This high priority configuration for interrupts needs to be + explicitly enabled by setting the kernel parameter + "irqchip.gicv3_pseudo_nmi" to 1. + + If unsure, say N + config RELOCATABLE bool help @@ -1467,6 +1499,10 @@ config SYSVIPC_COMPAT def_bool y depends on COMPAT && SYSVIPC +config ARCH_ENABLE_HUGEPAGE_MIGRATION + def_bool y + depends on HUGETLB_PAGE && MIGRATION + menu "Power management options" source "kernel/power/Kconfig" diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index 251ecf34cb02..70498a033cf5 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -52,6 +52,11 @@ config ARCH_BERLIN help This enables support for Marvell Berlin SoC Family +config ARCH_BITMAIN + bool "Bitmain SoC Platforms" + help + This enables support for the Bitmain SoC Family. + config ARCH_BRCMSTB bool "Broadcom Set-Top-Box SoCs" select BRCMSTB_L2_IRQ @@ -112,12 +117,13 @@ config ARCH_MESON bool "Amlogic Platforms" select PINCTRL select PINCTRL_MESON - select COMMON_CLK_AMLOGIC select COMMON_CLK_GXBB select COMMON_CLK_AXG + select COMMON_CLK_G12A select MESON_IRQ_GPIO help - This enables support for the Amlogic S905 SoCs. + This enables support for the arm64 based Amlogic SoCs + such as the s905, S905X/D, S912, A113X/D or S905X/D2 config ARCH_MVEBU bool "Marvell EBU SoC Family" @@ -145,7 +151,11 @@ config ARCH_MVEBU config ARCH_MXC bool "ARMv8 based NXP i.MX SoC family" select ARM64_ERRATUM_843419 - select ARM64_ERRATUM_845719 + select ARM64_ERRATUM_845719 if COMPAT + select IMX_GPCV2 + select IMX_GPCV2_PM_DOMAINS + select PM + select PM_GENERIC_DOMAINS help This enables support for the ARMv8 based SoCs in the NXP i.MX family. diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile index 4690364d584b..5bc7533a12c7 100644 --- a/arch/arm64/boot/dts/Makefile +++ b/arch/arm64/boot/dts/Makefile @@ -7,6 +7,7 @@ subdir-y += amd subdir-y += amlogic subdir-y += apm subdir-y += arm +subdir-y += bitmain subdir-y += broadcom subdir-y += cavium subdir-y += exynos diff --git a/arch/arm64/boot/dts/actions/s700-cubieboard7.dts b/arch/arm64/boot/dts/actions/s700-cubieboard7.dts index 28f3f4a0f7f0..63e375cd9eb4 100644 --- a/arch/arm64/boot/dts/actions/s700-cubieboard7.dts +++ b/arch/arm64/boot/dts/actions/s700-cubieboard7.dts @@ -30,6 +30,59 @@ }; }; +&i2c0 { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&i2c0_default>; +}; + +&i2c1 { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&i2c1_default>; +}; + +&i2c2 { + status = "disabled"; + pinctrl-names = "default"; + pinctrl-0 = <&i2c2_default>; +}; + +&pinctrl { + i2c0_default: i2c0_default { + pinmux { + groups = "i2c0_mfp"; + function = "i2c0"; + }; + pinconf { + pins = "i2c0_sclk", "i2c0_sdata"; + bias-pull-up; + }; + }; + + i2c1_default: i2c1_default { + pinmux { + groups = "i2c1_dummy"; + function = "i2c1"; + }; + pinconf { + pins = "i2c1_sclk", "i2c1_sdata"; + bias-pull-up; + }; + }; + + i2c2_default: i2c2_default { + pinmux { + groups = "i2c2_dummy"; + function = "i2c2"; + }; + pinconf { + pins = "i2c2_sclk", "i2c2_sdata"; + bias-pull-up; + }; + }; +}; + &timer { clocks = <&hosc>; }; diff --git a/arch/arm64/boot/dts/actions/s700.dtsi b/arch/arm64/boot/dts/actions/s700.dtsi index 192c7b39c8c1..2006ad5424fa 100644 --- a/arch/arm64/boot/dts/actions/s700.dtsi +++ b/arch/arm64/boot/dts/actions/s700.dtsi @@ -5,6 +5,7 @@ #include #include +#include / { compatible = "actions,s700"; @@ -18,28 +19,28 @@ cpu0: cpu@0 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x0>; enable-method = "psci"; }; cpu1: cpu@1 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x1>; enable-method = "psci"; }; cpu2: cpu@2 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x2>; enable-method = "psci"; }; cpu3: cpu@3 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x3>; enable-method = "psci"; }; @@ -172,6 +173,47 @@ reg = <0x0 0xe0168000 0x0 0x1000>; clocks = <&hosc>, <&losc>; #clock-cells = <1>; + #reset-cells = <1>; + }; + + i2c0: i2c@e0170000 { + compatible = "actions,s700-i2c"; + reg = <0 0xe0170000 0 0x1000>; + clocks = <&cmu CLK_I2C0>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c1: i2c@e0174000 { + compatible = "actions,s700-i2c"; + reg = <0 0xe0174000 0 0x1000>; + clocks = <&cmu CLK_I2C1>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c2: i2c@e0178000 { + compatible = "actions,s700-i2c"; + reg = <0 0xe0178000 0 0x1000>; + clocks = <&cmu CLK_I2C2>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c3: i2c@e017c000 { + compatible = "actions,s700-i2c"; + reg = <0 0xe017c000 0 0x1000>; + clocks = <&cmu CLK_I2C3>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; }; sps: power-controller@e01b0100 { @@ -186,5 +228,21 @@ interrupts = ; interrupt-names = "timer1"; }; + + pinctrl: pinctrl@e01b0000 { + compatible = "actions,s700-pinctrl"; + reg = <0x0 0xe01b0000 0x0 0x1000>; + clocks = <&cmu CLK_GPIO>; + gpio-controller; + gpio-ranges = <&pinctrl 0 0 136>; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + interrupts = , + , + , + , + ; + }; }; }; diff --git a/arch/arm64/boot/dts/actions/s900.dtsi b/arch/arm64/boot/dts/actions/s900.dtsi index 491ddccc9038..df3a68a3ac97 100644 --- a/arch/arm64/boot/dts/actions/s900.dtsi +++ b/arch/arm64/boot/dts/actions/s900.dtsi @@ -5,6 +5,7 @@ #include #include +#include / { compatible = "actions,s900"; @@ -18,28 +19,28 @@ cpu0: cpu@0 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x0>; enable-method = "psci"; }; cpu1: cpu@1 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x1>; enable-method = "psci"; }; cpu2: cpu@2 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x2>; enable-method = "psci"; }; cpu3: cpu@3 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x3>; enable-method = "psci"; }; @@ -184,6 +185,7 @@ reg = <0x0 0xe0160000 0x0 0x1000>; clocks = <&hosc>, <&losc>; #clock-cells = <1>; + #reset-cells = <1>; }; i2c0: i2c@e0170000 { @@ -253,6 +255,14 @@ gpio-controller; gpio-ranges = <&pinctrl 0 0 146>; #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + interrupts = , + , + , + , + , + ; }; timer: timer@e0228000 { diff --git a/arch/arm64/boot/dts/al/alpine-v2.dtsi b/arch/arm64/boot/dts/al/alpine-v2.dtsi index 5b7bef684256..d5e7e2bb4e6c 100644 --- a/arch/arm64/boot/dts/al/alpine-v2.dtsi +++ b/arch/arm64/boot/dts/al/alpine-v2.dtsi @@ -47,28 +47,28 @@ #size-cells = <0>; cpu@0 { - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; device_type = "cpu"; reg = <0x0 0x0>; enable-method = "psci"; }; cpu@1 { - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; device_type = "cpu"; reg = <0x0 0x1>; enable-method = "psci"; }; cpu@2 { - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; device_type = "cpu"; reg = <0x0 0x2>; enable-method = "psci"; }; cpu@3 { - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; device_type = "cpu"; reg = <0x0 0x3>; enable-method = "psci"; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts index 9d0afd7d50ec..7793ebb5d2b8 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts @@ -222,6 +222,14 @@ #include "axp803.dtsi" +&ac_power_supply { + status = "okay"; +}; + +&battery_power_supply { + status = "okay"; +}; + ®_aldo1 { /* * This regulator also drives the PE pingroup GPIOs, diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts index 31884dbc8838..f4e78531f639 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts @@ -186,6 +186,10 @@ #include "axp803.dtsi" +&ac_power_supply { + status = "okay"; +}; + ®_aldo2 { regulator-always-on; regulator-min-microvolt = <1800000>; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts index f7a4bccaa5d4..01a9a52edae4 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts @@ -177,6 +177,14 @@ #include "axp803.dtsi" +&ac_power_supply { + status = "okay"; +}; + +&battery_power_supply { + status = "okay"; +}; + ®_aldo1 { regulator-always-on; regulator-min-microvolt = <2800000>; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts index b0c64f75792c..510f661229dc 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts @@ -188,11 +188,20 @@ reg = <0x3a3>; interrupt-parent = <&r_intc>; interrupts = <0 IRQ_TYPE_LEVEL_LOW>; + x-powers,drive-vbus-en; /* set N_VBUSEN as output pin */ }; }; #include "axp803.dtsi" +&ac_power_supply { + status = "okay"; +}; + +&battery_power_supply { + status = "okay"; +}; + ®_aldo1 { regulator-min-microvolt = <2800000>; regulator-max-microvolt = <2800000>; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts index 216f2f5db5ef..c0b9cc7a6b3a 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts @@ -169,6 +169,14 @@ #include "axp803.dtsi" +&ac_power_supply { + status = "okay"; +}; + +&battery_power_supply { + status = "okay"; +}; + ®_aldo2 { regulator-always-on; regulator-min-microvolt = <1800000>; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts index c455b24dd079..7b7b14ba58e6 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts @@ -145,6 +145,14 @@ #include "axp803.dtsi" +&ac_power_supply { + status = "okay"; +}; + +&battery_power_supply { + status = "okay"; +}; + ®_aldo1 { regulator-always-on; regulator-min-microvolt = <2800000>; @@ -239,7 +247,7 @@ }; /* - * The A64 chip cannot work without this regulator off, although + * The A64 chip cannot work with this regulator off, although * it seems to be only driving the AR100 core. * Maybe we don't still know well about CPUs domain. */ diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi index 837a03dee875..e628d063931b 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi @@ -84,7 +84,7 @@ #size-cells = <0>; cpu0: cpu@0 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <0>; enable-method = "psci"; @@ -92,7 +92,7 @@ }; cpu1: cpu@1 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <1>; enable-method = "psci"; @@ -100,7 +100,7 @@ }; cpu2: cpu@2 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <2>; enable-method = "psci"; @@ -108,7 +108,7 @@ }; cpu3: cpu@3 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <3>; enable-method = "psci"; @@ -142,6 +142,15 @@ clock-output-names = "ext-osc32k"; }; + pmu { + compatible = "arm,cortex-a53-pmu"; + interrupts = , + , + , + ; + interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>; + }; + psci { compatible = "arm,psci-0.2"; method = "smc"; @@ -191,6 +200,7 @@ timer { compatible = "arm,armv8-timer"; + allwinner,erratum-unknown1; interrupts = , ; clocks = <&ccu CLK_BUS_VE>, <&ccu CLK_VE>, <&ccu CLK_DRAM_VE>; @@ -549,6 +559,12 @@ interrupt-controller; #interrupt-cells = <3>; + csi_pins: csi-pins { + pins = "PE0", "PE2", "PE3", "PE4", "PE5", "PE6", + "PE7", "PE8", "PE9", "PE10", "PE11"; + function = "csi"; + }; + i2c0_pins: i2c0_pins { pins = "PH0", "PH1"; function = "i2c0"; @@ -916,6 +932,20 @@ status = "disabled"; }; + csi: csi@1cb0000 { + compatible = "allwinner,sun50i-a64-csi"; + reg = <0x01cb0000 0x1000>; + interrupts = ; + clocks = <&ccu CLK_BUS_CSI>, + <&ccu CLK_CSI_SCLK>, + <&ccu CLK_DRAM_CSI>; + clock-names = "bus", "mod", "ram"; + resets = <&ccu RST_BUS_CSI>; + pinctrl-names = "default"; + pinctrl-0 = <&csi_pins>; + status = "disabled"; + }; + hdmi: hdmi@1ee0000 { compatible = "allwinner,sun50i-a64-dw-hdmi", "allwinner,sun8i-a83t-dw-hdmi"; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-libretech-all-h3-cc.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-libretech-all-h3-cc.dts index 95e113ce8699..d68bdfea2271 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h5-libretech-all-h3-cc.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-libretech-all-h3-cc.dts @@ -12,3 +12,7 @@ model = "Libre Computer Board ALL-H3-CC H5"; compatible = "libretech,all-h3-cc-h5", "allwinner,sun50i-h5"; }; + +&mmc2 { + mmc-ddr-3_3v; +}; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi index c22621b4b8e9..96acafd3a852 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-h5.dtsi @@ -48,28 +48,28 @@ #size-cells = <0>; cpu0: cpu@0 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <0>; enable-method = "psci"; }; cpu@1 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <1>; enable-method = "psci"; }; cpu@2 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <2>; enable-method = "psci"; }; cpu@3 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <3>; enable-method = "psci"; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi index d93a7add67e7..c9e861a50a63 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi @@ -22,28 +22,28 @@ #size-cells = <0>; cpu0: cpu@0 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <0>; enable-method = "psci"; }; cpu1: cpu@1 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <1>; enable-method = "psci"; }; cpu2: cpu@2 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <2>; enable-method = "psci"; }; cpu3: cpu@3 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <3>; enable-method = "psci"; @@ -167,6 +167,20 @@ reg = <0x0000 0x1e000>; }; }; + + sram_c1: sram@1a00000 { + compatible = "mmio-sram"; + reg = <0x01a00000 0x200000>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0x01a00000 0x200000>; + + ve_sram: sram-section@0 { + compatible = "allwinner,sun50i-h6-sram-c1", + "allwinner,sun4i-a10-sram-c1"; + reg = <0x000000 0x200000>; + }; + }; }; ccu: clock@3001000 { @@ -178,17 +192,6 @@ #reset-cells = <1>; }; - gic: interrupt-controller@3021000 { - compatible = "arm,gic-400"; - reg = <0x03021000 0x1000>, - <0x03022000 0x2000>, - <0x03024000 0x2000>, - <0x03026000 0x2000>; - interrupts = ; - interrupt-controller; - #interrupt-cells = <3>; - }; - pio: pinctrl@300b000 { compatible = "allwinner,sun50i-h6-pinctrl"; reg = <0x0300b000 0x400>; @@ -239,6 +242,17 @@ }; }; + gic: interrupt-controller@3021000 { + compatible = "arm,gic-400"; + reg = <0x03021000 0x1000>, + <0x03022000 0x2000>, + <0x03024000 0x2000>, + <0x03026000 0x2000>; + interrupts = ; + interrupt-controller; + #interrupt-cells = <3>; + }; + mmc0: mmc@4020000 { compatible = "allwinner,sun50i-h6-mmc", "allwinner,sun50i-a64-mmc"; diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi index b2c9bb664595..7c649f6b14cb 100644 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi @@ -42,28 +42,28 @@ #size-cells = <0>; cpu0: cpu@0 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; enable-method = "psci"; reg = <0x0>; }; cpu1: cpu@1 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; enable-method = "psci"; reg = <0x1>; }; cpu2: cpu@2 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; enable-method = "psci"; reg = <0x2>; }; cpu3: cpu@3 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; enable-method = "psci"; reg = <0x3>; @@ -161,6 +161,7 @@ tx-fifo-depth = <16384>; rx-fifo-depth = <16384>; snps,multicast-filter-bins = <256>; + iommus = <&smmu 1>; status = "disabled"; }; @@ -177,6 +178,7 @@ tx-fifo-depth = <16384>; rx-fifo-depth = <16384>; snps,multicast-filter-bins = <256>; + iommus = <&smmu 2>; status = "disabled"; }; @@ -193,6 +195,7 @@ tx-fifo-depth = <16384>; rx-fifo-depth = <16384>; snps,multicast-filter-bins = <256>; + iommus = <&smmu 3>; status = "disabled"; }; @@ -303,6 +306,7 @@ clocks = <&clkmgr STRATIX10_L4_MP_CLK>, <&clkmgr STRATIX10_SDMMC_CLK>; clock-names = "biu", "ciu"; + iommus = <&smmu 5>; status = "disabled"; }; @@ -336,6 +340,29 @@ reg = <0xffd11000 0x1000>; }; + smmu: iommu@fa000000 { + compatible = "arm,mmu-500", "arm,smmu-v2"; + reg = <0xfa000000 0x40000>; + #global-interrupts = <2>; + #iommu-cells = <1>; + clocks = <&clkmgr STRATIX10_L4_MAIN_CLK>; + clock-names = "iommu"; + interrupt-parent = <&intc>; + interrupts = <0 128 4>, /* Global Secure Fault */ + <0 129 4>, /* Global Non-secure Fault */ + /* Non-secure Context Interrupts (32) */ + <0 138 4>, <0 139 4>, <0 140 4>, <0 141 4>, + <0 142 4>, <0 143 4>, <0 144 4>, <0 145 4>, + <0 146 4>, <0 147 4>, <0 148 4>, <0 149 4>, + <0 150 4>, <0 151 4>, <0 152 4>, <0 153 4>, + <0 154 4>, <0 155 4>, <0 156 4>, <0 157 4>, + <0 158 4>, <0 159 4>, <0 160 4>, <0 161 4>, + <0 162 4>, <0 163 4>, <0 164 4>, <0 165 4>, + <0 166 4>, <0 167 4>, <0 168 4>, <0 169 4>; + stream-match-mask = <0x7ff0>; + status = "disabled"; + }; + spi0: spi@ffda4000 { compatible = "snps,dw-apb-ssi"; #address-cells = <1>; @@ -445,6 +472,7 @@ resets = <&rst USB0_RESET>, <&rst USB0_OCP_RESET>; reset-names = "dwc2", "dwc2-ecc"; clocks = <&clkmgr STRATIX10_USB_CLK>; + iommus = <&smmu 6>; status = "disabled"; }; @@ -457,6 +485,7 @@ resets = <&rst USB1_RESET>, <&rst USB1_OCP_RESET>; reset-names = "dwc2", "dwc2-ecc"; clocks = <&clkmgr STRATIX10_USB_CLK>; + iommus = <&smmu 7>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/amlogic/Makefile b/arch/arm64/boot/dts/amlogic/Makefile index f12efa27c636..0821fed4c074 100644 --- a/arch/arm64/boot/dts/amlogic/Makefile +++ b/arch/arm64/boot/dts/amlogic/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 dtb-$(CONFIG_ARCH_MESON) += meson-axg-s400.dtb dtb-$(CONFIG_ARCH_MESON) += meson-g12a-u200.dtb +dtb-$(CONFIG_ARCH_MESON) += meson-g12a-x96-max.dtb dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-nanopi-k2.dtb dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-nexbox-a95x.dtb dtb-$(CONFIG_ARCH_MESON) += meson-gxbb-odroidc2.dtb diff --git a/arch/arm64/boot/dts/amlogic/meson-axg-s400.dts b/arch/arm64/boot/dts/amlogic/meson-axg-s400.dts index 824eba98db2c..75fe1a2c49d0 100644 --- a/arch/arm64/boot/dts/amlogic/meson-axg-s400.dts +++ b/arch/arm64/boot/dts/amlogic/meson-axg-s400.dts @@ -95,6 +95,13 @@ sound-name-prefix = "MIC"; }; + spdif_dir: audio-codec-4 { + #sound-dai-cells = <0>; + compatible = "linux,spdif-dir"; + status = "okay"; + sound-name-prefix = "DIR"; + }; + emmc_pwrseq: emmc-pwrseq { compatible = "mmc-pwrseq-emmc"; reset-gpios = <&gpio BOOT_9 GPIO_ACTIVE_LOW>; @@ -249,6 +256,9 @@ "TODDR_A IN 2", "TDMIN_C OUT", "TODDR_B IN 2", "TDMIN_C OUT", "TODDR_C IN 2", "TDMIN_C OUT", + "TODDR_A IN 3", "SPDIFIN Capture", + "TODDR_B IN 3", "SPDIFIN Capture", + "TODDR_C IN 3", "SPDIFIN Capture", "TODDR_A IN 4", "PDM Capture", "TODDR_B IN 4", "PDM Capture", "TODDR_C IN 4", "PDM Capture", @@ -326,6 +336,14 @@ }; dai-link-8 { + sound-dai = <&spdifin>; + + codec { + sound-dai = <&spdif_dir>; + }; + }; + + dai-link-9 { sound-dai = <&pdm>; codec { @@ -446,7 +464,8 @@ bus-width = <4>; cap-sd-highspeed; - max-frequency = <100000000>; + sd-uhs-sdr104; + max-frequency = <200000000>; non-removable; disable-wp; @@ -469,9 +488,8 @@ pinctrl-names = "default", "clk-gate"; bus-width = <8>; - cap-sd-highspeed; cap-mmc-highspeed; - max-frequency = <180000000>; + max-frequency = <200000000>; non-removable; disable-wp; mmc-ddr-1_8v; @@ -483,6 +501,12 @@ vqmmc-supply = <&vddio_boot>; }; +&spdifin { + pinctrl-0 = <&spdif_in_a19_pins>; + pinctrl-names = "default"; + status = "okay"; +}; + &spdifout { pinctrl-0 = <&spdif_out_a20_pins>; pinctrl-names = "default"; diff --git a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi index fffd55787981..34704fecf756 100644 --- a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi @@ -68,7 +68,7 @@ cpu0: cpu@0 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x0>; enable-method = "psci"; next-level-cache = <&l2>; @@ -77,7 +77,7 @@ cpu1: cpu@1 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x1>; enable-method = "psci"; next-level-cache = <&l2>; @@ -86,7 +86,7 @@ cpu2: cpu@2 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x2>; enable-method = "psci"; next-level-cache = <&l2>; @@ -95,7 +95,7 @@ cpu3: cpu@3 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x3>; enable-method = "psci"; next-level-cache = <&l2>; @@ -111,6 +111,14 @@ compatible = "amlogic,meson-gxbb-sm"; }; + efuse: efuse { + compatible = "amlogic,meson-gxbb-efuse"; + clocks = <&clkc CLKID_EFUSE>; + #address-cells = <1>; + #size-cells = <1>; + read-only; + }; + psci { compatible = "arm,psci-1.0"; method = "smc"; @@ -1260,6 +1268,18 @@ status = "disabled"; }; + spdifin: audio-controller@400 { + compatible = "amlogic,axg-spdifin"; + reg = <0x0 0x400 0x0 0x30>; + #sound-dai-cells = <0>; + sound-name-prefix = "SPDIFIN"; + interrupts = ; + clocks = <&clkc_audio AUD_CLKID_SPDIFIN>, + <&clkc_audio AUD_CLKID_SPDIFIN_CLK>; + clock-names = "pclk", "refclk"; + status = "disabled"; + }; + spdifout: audio-controller@480 { compatible = "amlogic,axg-spdifout"; reg = <0x0 0x480 0x0 0x50>; @@ -1590,6 +1610,11 @@ status = "disabled"; }; + clk_msr: clock-measure@18000 { + compatible = "amlogic,meson-axg-clk-measure"; + reg = <0x0 0x18000 0x0 0x10>; + }; + i2c3: i2c@1c000 { compatible = "amlogic,meson-axg-i2c"; reg = <0x0 0x1c000 0x0 0x20>; diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts new file mode 100644 index 000000000000..c62d3d5706ff --- /dev/null +++ b/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Copyright (c) 2018 BayLibre SAS. All rights reserved. + */ + +/dts-v1/; + +#include "meson-g12a.dtsi" + +/ { + compatible = "amediatech,x96-max", "amlogic,u200", "amlogic,g12a"; + model = "Shenzhen Amediatech Technology Co., Ltd X96 Max"; + + aliases { + serial0 = &uart_AO; + }; + chosen { + stdout-path = "serial0:115200n8"; + }; + memory@0 { + device_type = "memory"; + reg = <0x0 0x0 0x0 0x40000000>; + }; +}; + +&uart_AO { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi index 3b82a975c663..17c6217f8a84 100644 --- a/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi @@ -20,7 +20,7 @@ cpu0: cpu@0 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x0>; enable-method = "psci"; next-level-cache = <&l2>; @@ -28,7 +28,7 @@ cpu1: cpu@1 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x1>; enable-method = "psci"; next-level-cache = <&l2>; @@ -36,7 +36,7 @@ cpu2: cpu@2 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x2>; enable-method = "psci"; next-level-cache = <&l2>; @@ -44,7 +44,7 @@ cpu3: cpu@3 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x3>; enable-method = "psci"; next-level-cache = <&l2>; @@ -78,20 +78,41 @@ #size-cells = <2>; ranges; - periphs: periphs@ff634000 { + apb: bus@ff600000 { compatible = "simple-bus"; - reg = <0x0 0xff634000 0x0 0x2000>; + reg = <0x0 0xff600000 0x0 0x200000>; #address-cells = <2>; #size-cells = <2>; - ranges = <0x0 0x0 0x0 0xff634000 0x0 0x2000>; - }; + ranges = <0x0 0x0 0x0 0xff600000 0x0 0x200000>; + + periphs: bus@34400 { + compatible = "simple-bus"; + reg = <0x0 0x34400 0x0 0x400>; + #address-cells = <2>; + #size-cells = <2>; + ranges = <0x0 0x0 0x0 0x34400 0x0 0x400>; + }; - hiubus: bus@ff63c000 { - compatible = "simple-bus"; - reg = <0x0 0xff63c000 0x0 0x1c00>; - #address-cells = <2>; - #size-cells = <2>; - ranges = <0x0 0x0 0x0 0xff63c000 0x0 0x1c00>; + hiu: bus@3c000 { + compatible = "simple-bus"; + reg = <0x0 0x3c000 0x0 0x1400>; + #address-cells = <2>; + #size-cells = <2>; + ranges = <0x0 0x0 0x0 0x3c000 0x0 0x1400>; + + hhi: system-controller@0 { + compatible = "amlogic,meson-gx-hhi-sysctrl", + "simple-mfd", "syscon"; + reg = <0 0 0 0x400>; + + clkc: clock-controller { + compatible = "amlogic,g12a-clkc"; + #clock-cells = <1>; + clocks = <&xtal>; + clock-names = "xtal"; + }; + }; + }; }; aobus: bus@ff800000 { @@ -102,7 +123,8 @@ ranges = <0x0 0x0 0x0 0xff800000 0x0 0x100000>; uart_AO: serial@3000 { - compatible = "amlogic,meson-gx-uart", "amlogic,meson-ao-uart"; + compatible = "amlogic,meson-gx-uart", + "amlogic,meson-ao-uart"; reg = <0x0 0x3000 0x0 0x18>; interrupts = ; clocks = <&xtal>, <&xtal>, <&xtal>; @@ -111,7 +133,8 @@ }; uart_AO_B: serial@4000 { - compatible = "amlogic,meson-gx-uart", "amlogic,meson-ao-uart"; + compatible = "amlogic,meson-gx-uart", + "amlogic,meson-ao-uart"; reg = <0x0 0x4000 0x0 0x18>; interrupts = ; clocks = <&xtal>, <&xtal>, <&xtal>; @@ -135,18 +158,15 @@ cbus: bus@ffd00000 { compatible = "simple-bus"; - reg = <0x0 0xffd00000 0x0 0x25000>; + reg = <0x0 0xffd00000 0x0 0x100000>; #address-cells = <2>; #size-cells = <2>; - ranges = <0x0 0x0 0x0 0xffd00000 0x0 0x25000>; - }; + ranges = <0x0 0x0 0x0 0xffd00000 0x0 0x100000>; - apb: apb@ffe00000 { - compatible = "simple-bus"; - reg = <0x0 0xffe00000 0x0 0x200000>; - #address-cells = <2>; - #size-cells = <2>; - ranges = <0x0 0x0 0x0 0xffe00000 0x0 0x200000>; + clk_msr: clock-measure@18000 { + compatible = "amlogic,meson-g12a-clk-measure"; + reg = <0x0 0x18000 0x0 0x10>; + }; }; }; diff --git a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi index e14e0ce7e89f..016641a41694 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi @@ -187,8 +187,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vddao_3v3>; vqmmc-supply = <&vddio_boot>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi index 44c5c51ff1fa..6772709b9e19 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi @@ -50,13 +50,35 @@ }; }; + chosen { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + simplefb_cvbs: framebuffer-cvbs { + compatible = "amlogic,simple-framebuffer", + "simple-framebuffer"; + amlogic,pipeline = "vpu-cvbs"; + power-domains = <&pwrc_vpu>; + status = "disabled"; + }; + + simplefb_hdmi: framebuffer-hdmi { + compatible = "amlogic,simple-framebuffer", + "simple-framebuffer"; + amlogic,pipeline = "vpu-hdmi"; + power-domains = <&pwrc_vpu>; + status = "disabled"; + }; + }; + cpus { #address-cells = <0x2>; #size-cells = <0x0>; cpu0: cpu@0 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x0>; enable-method = "psci"; next-level-cache = <&l2>; @@ -65,7 +87,7 @@ cpu1: cpu@1 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x1>; enable-method = "psci"; next-level-cache = <&l2>; @@ -74,7 +96,7 @@ cpu2: cpu@2 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x2>; enable-method = "psci"; next-level-cache = <&l2>; @@ -83,7 +105,7 @@ cpu3: cpu@3 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x3>; enable-method = "psci"; next-level-cache = <&l2>; @@ -510,6 +532,7 @@ interrupts = ; #address-cells = <1>; #size-cells = <0>; + amlogic,canvas = <&canvas>; /* CVBS VDAC output port */ cvbs_vdac_port: port@0 { diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts index 8cd50b75171d..ade2ee09ae96 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts @@ -305,8 +305,7 @@ max-frequency = <200000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vddio_ao3v3>; vqmmc-supply = <&vddio_tf>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts index 4cf7f6e80c6a..25105ac96d55 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts @@ -238,8 +238,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vddao_3v3>; vqmmc-supply = <&vddio_card>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts index 2e1cd5e3a246..1cc9dc68ef00 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts @@ -258,8 +258,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&tflash_vdd>; vqmmc-supply = <&tf_io>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi index ce862266b9aa..0be0f2a5d2fe 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi @@ -196,8 +196,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vddao_3v3>; vqmmc-supply = <&vddio_card>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi index 93a4acf2c46c..ad4d50bd9d77 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi @@ -154,8 +154,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vcc_3v3>; }; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi index ec09bb5792b7..2d2db783c44c 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi @@ -211,8 +211,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vddao_3v3>; vqmmc-supply = <&vcc_3v3>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi index a7b883ced0a8..a60d3652beee 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi @@ -784,6 +784,12 @@ resets = <&reset RESET_SD_EMMC_C>; }; +&simplefb_hdmi { + clocks = <&clkc CLKID_HDMI_PCLK>, + <&clkc CLKID_CLK81>, + <&clkc CLKID_GCLK_VENCI_INT0>; +}; + &spicc { clocks = <&clkc CLKID_SPICC>; clock-names = "core"; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts index f1c410e2da2b..796baea7a0bf 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts @@ -131,8 +131,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vddao_3v3>; vqmmc-supply = <&vddio_card>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts index db293440e4ca..255cede7b447 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts @@ -238,8 +238,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vcc_3v3>; vqmmc-supply = <&vcc_card>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts index 6739697be1de..9cbdb85fb591 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts @@ -183,8 +183,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vddao_3v3>; vqmmc-supply = <&vddio_card>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi index a1b31013ab6e..bc811a2faf42 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi @@ -137,8 +137,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vddao_3v3>; vqmmc-supply = <&vddio_boot>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi index d5c3d78aafeb..3093ae421b17 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi @@ -787,6 +787,12 @@ resets = <&reset RESET_SD_EMMC_C>; }; +&simplefb_hdmi { + clocks = <&clkc CLKID_HDMI_PCLK>, + <&clkc CLKID_CLK81>, + <&clkc CLKID_GCLK_VENCI_INT0>; +}; + &spicc { clocks = <&clkc CLKID_SPICC>; clock-names = "core"; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts index 3c3a667a8df8..3f086ed7de05 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts @@ -356,8 +356,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vddao_3v3>; vqmmc-supply = <&vddio_boot>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts index f7a1cffab4a8..8acfd40090d2 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts @@ -147,8 +147,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vddao_3v3>; vqmmc-supply = <&vddio_boot>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts index 7212dc4531e4..7fa20a8ede17 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts @@ -170,8 +170,7 @@ max-frequency = <100000000>; disable-wp; - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; - cd-inverted; + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; vmmc-supply = <&vddao_3v3>; vqmmc-supply = <&vddio_boot>; diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi index 247888d68a3a..ed3a3d5adf31 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxm.dtsi @@ -44,7 +44,7 @@ cpu4: cpu@100 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x100>; enable-method = "psci"; next-level-cache = <&l2>; @@ -53,7 +53,7 @@ cpu5: cpu@101 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x101>; enable-method = "psci"; next-level-cache = <&l2>; @@ -62,7 +62,7 @@ cpu6: cpu@102 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x102>; enable-method = "psci"; next-level-cache = <&l2>; @@ -71,7 +71,7 @@ cpu7: cpu@103 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x103>; enable-method = "psci"; next-level-cache = <&l2>; diff --git a/arch/arm64/boot/dts/apm/apm-shadowcat.dtsi b/arch/arm64/boot/dts/apm/apm-shadowcat.dtsi index d8ecd1661461..7faea28a37b0 100644 --- a/arch/arm64/boot/dts/apm/apm-shadowcat.dtsi +++ b/arch/arm64/boot/dts/apm/apm-shadowcat.dtsi @@ -21,7 +21,7 @@ cpu@0 { device_type = "cpu"; - compatible = "apm,strega", "arm,armv8"; + compatible = "apm,strega"; reg = <0x0 0x000>; enable-method = "spin-table"; cpu-release-addr = <0x1 0x0000fff8>; @@ -31,7 +31,7 @@ }; cpu@1 { device_type = "cpu"; - compatible = "apm,strega", "arm,armv8"; + compatible = "apm,strega"; reg = <0x0 0x001>; enable-method = "spin-table"; cpu-release-addr = <0x1 0x0000fff8>; @@ -41,7 +41,7 @@ }; cpu@100 { device_type = "cpu"; - compatible = "apm,strega", "arm,armv8"; + compatible = "apm,strega"; reg = <0x0 0x100>; enable-method = "spin-table"; cpu-release-addr = <0x1 0x0000fff8>; @@ -51,7 +51,7 @@ }; cpu@101 { device_type = "cpu"; - compatible = "apm,strega", "arm,armv8"; + compatible = "apm,strega"; reg = <0x0 0x101>; enable-method = "spin-table"; cpu-release-addr = <0x1 0x0000fff8>; @@ -61,7 +61,7 @@ }; cpu@200 { device_type = "cpu"; - compatible = "apm,strega", "arm,armv8"; + compatible = "apm,strega"; reg = <0x0 0x200>; enable-method = "spin-table"; cpu-release-addr = <0x1 0x0000fff8>; @@ -71,7 +71,7 @@ }; cpu@201 { device_type = "cpu"; - compatible = "apm,strega", "arm,armv8"; + compatible = "apm,strega"; reg = <0x0 0x201>; enable-method = "spin-table"; cpu-release-addr = <0x1 0x0000fff8>; @@ -81,7 +81,7 @@ }; cpu@300 { device_type = "cpu"; - compatible = "apm,strega", "arm,armv8"; + compatible = "apm,strega"; reg = <0x0 0x300>; enable-method = "spin-table"; cpu-release-addr = <0x1 0x0000fff8>; @@ -91,7 +91,7 @@ }; cpu@301 { device_type = "cpu"; - compatible = "apm,strega", "arm,armv8"; + compatible = "apm,strega"; reg = <0x0 0x301>; enable-method = "spin-table"; cpu-release-addr = <0x1 0x0000fff8>; diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi index 00e82b8e9a19..94d637d17262 100644 --- a/arch/arm64/boot/dts/apm/apm-storm.dtsi +++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi @@ -21,7 +21,7 @@ cpu@0 { device_type = "cpu"; - compatible = "apm,potenza", "arm,armv8"; + compatible = "apm,potenza"; reg = <0x0 0x000>; enable-method = "spin-table"; cpu-release-addr = <0x1 0x0000fff8>; @@ -29,7 +29,7 @@ }; cpu@1 { device_type = "cpu"; - compatible = "apm,potenza", "arm,armv8"; + compatible = "apm,potenza"; reg = <0x0 0x001>; enable-method = "spin-table"; cpu-release-addr = <0x1 0x0000fff8>; @@ -37,7 +37,7 @@ }; cpu@100 { device_type = "cpu"; - compatible = "apm,potenza", "arm,armv8"; + compatible = "apm,potenza"; reg = <0x0 0x100>; enable-method = "spin-table"; cpu-release-addr = <0x1 0x0000fff8>; @@ -45,7 +45,7 @@ }; cpu@101 { device_type = "cpu"; - compatible = "apm,potenza", "arm,armv8"; + compatible = "apm,potenza"; reg = <0x0 0x101>; enable-method = "spin-table"; cpu-release-addr = <0x1 0x0000fff8>; @@ -53,7 +53,7 @@ }; cpu@200 { device_type = "cpu"; - compatible = "apm,potenza", "arm,armv8"; + compatible = "apm,potenza"; reg = <0x0 0x200>; enable-method = "spin-table"; cpu-release-addr = <0x1 0x0000fff8>; @@ -61,7 +61,7 @@ }; cpu@201 { device_type = "cpu"; - compatible = "apm,potenza", "arm,armv8"; + compatible = "apm,potenza"; reg = <0x0 0x201>; enable-method = "spin-table"; cpu-release-addr = <0x1 0x0000fff8>; @@ -69,7 +69,7 @@ }; cpu@300 { device_type = "cpu"; - compatible = "apm,potenza", "arm,armv8"; + compatible = "apm,potenza"; reg = <0x0 0x300>; enable-method = "spin-table"; cpu-release-addr = <0x1 0x0000fff8>; @@ -77,7 +77,7 @@ }; cpu@301 { device_type = "cpu"; - compatible = "apm,potenza", "arm,armv8"; + compatible = "apm,potenza"; reg = <0x0 0x301>; enable-method = "spin-table"; cpu-release-addr = <0x1 0x0000fff8>; diff --git a/arch/arm64/boot/dts/arm/Makefile b/arch/arm64/boot/dts/arm/Makefile index 5b45144b371a..800da2e84f3f 100644 --- a/arch/arm64/boot/dts/arm/Makefile +++ b/arch/arm64/boot/dts/arm/Makefile @@ -5,3 +5,4 @@ dtb-$(CONFIG_ARCH_VEXPRESS) += \ dtb-$(CONFIG_ARCH_VEXPRESS) += juno.dtb juno-r1.dtb juno-r2.dtb dtb-$(CONFIG_ARCH_VEXPRESS) += rtsm_ve-aemv8a.dtb dtb-$(CONFIG_ARCH_VEXPRESS) += vexpress-v2f-1xv7-ca53x2.dtb +dtb-$(CONFIG_ARCH_VEXPRESS) += fvp-base-revc.dtb diff --git a/arch/arm64/boot/dts/arm/foundation-v8-gicv2.dtsi b/arch/arm64/boot/dts/arm/foundation-v8-gicv2.dtsi index 851abf34fc80..15fe81738e94 100644 --- a/arch/arm64/boot/dts/arm/foundation-v8-gicv2.dtsi +++ b/arch/arm64/boot/dts/arm/foundation-v8-gicv2.dtsi @@ -14,6 +14,6 @@ <0x0 0x2c002000 0 0x2000>, <0x0 0x2c004000 0 0x2000>, <0x0 0x2c006000 0 0x2000>; - interrupts = <1 9 0xf04>; + interrupts = ; }; }; diff --git a/arch/arm64/boot/dts/arm/foundation-v8-gicv3.dtsi b/arch/arm64/boot/dts/arm/foundation-v8-gicv3.dtsi index 91fc5c60d88b..f2c75c756039 100644 --- a/arch/arm64/boot/dts/arm/foundation-v8-gicv3.dtsi +++ b/arch/arm64/boot/dts/arm/foundation-v8-gicv3.dtsi @@ -17,7 +17,7 @@ <0x0 0x2c000000 0x0 0x2000>, <0x0 0x2c010000 0x0 0x2000>, <0x0 0x2c02f000 0x0 0x2000>; - interrupts = <1 9 4>; + interrupts = ; its: its@2f020000 { compatible = "arm,gic-v3-its"; diff --git a/arch/arm64/boot/dts/arm/foundation-v8.dtsi b/arch/arm64/boot/dts/arm/foundation-v8.dtsi index e080277d27ae..3f78373f708a 100644 --- a/arch/arm64/boot/dts/arm/foundation-v8.dtsi +++ b/arch/arm64/boot/dts/arm/foundation-v8.dtsi @@ -7,6 +7,8 @@ /dts-v1/; +#include + /memreserve/ 0x80000000 0x00010000; / { @@ -67,26 +69,26 @@ timer { compatible = "arm,armv8-timer"; - interrupts = <1 13 0xf08>, - <1 14 0xf08>, - <1 11 0xf08>, - <1 10 0xf08>; + interrupts = , + , + , + ; clock-frequency = <100000000>; }; pmu { compatible = "arm,armv8-pmuv3"; - interrupts = <0 60 4>, - <0 61 4>, - <0 62 4>, - <0 63 4>; + interrupts = , + , + , + ; }; watchdog@2a440000 { compatible = "arm,sbsa-gwdt"; reg = <0x0 0x2a440000 0 0x1000>, <0x0 0x2a450000 0 0x1000>; - interrupts = <0 27 4>; + interrupts = ; timeout-sec = <30>; }; @@ -105,49 +107,49 @@ #interrupt-cells = <1>; interrupt-map-mask = <0 0 63>; - interrupt-map = <0 0 0 &gic 0 0 0 0 4>, - <0 0 1 &gic 0 0 0 1 4>, - <0 0 2 &gic 0 0 0 2 4>, - <0 0 3 &gic 0 0 0 3 4>, - <0 0 4 &gic 0 0 0 4 4>, - <0 0 5 &gic 0 0 0 5 4>, - <0 0 6 &gic 0 0 0 6 4>, - <0 0 7 &gic 0 0 0 7 4>, - <0 0 8 &gic 0 0 0 8 4>, - <0 0 9 &gic 0 0 0 9 4>, - <0 0 10 &gic 0 0 0 10 4>, - <0 0 11 &gic 0 0 0 11 4>, - <0 0 12 &gic 0 0 0 12 4>, - <0 0 13 &gic 0 0 0 13 4>, - <0 0 14 &gic 0 0 0 14 4>, - <0 0 15 &gic 0 0 0 15 4>, - <0 0 16 &gic 0 0 0 16 4>, - <0 0 17 &gic 0 0 0 17 4>, - <0 0 18 &gic 0 0 0 18 4>, - <0 0 19 &gic 0 0 0 19 4>, - <0 0 20 &gic 0 0 0 20 4>, - <0 0 21 &gic 0 0 0 21 4>, - <0 0 22 &gic 0 0 0 22 4>, - <0 0 23 &gic 0 0 0 23 4>, - <0 0 24 &gic 0 0 0 24 4>, - <0 0 25 &gic 0 0 0 25 4>, - <0 0 26 &gic 0 0 0 26 4>, - <0 0 27 &gic 0 0 0 27 4>, - <0 0 28 &gic 0 0 0 28 4>, - <0 0 29 &gic 0 0 0 29 4>, - <0 0 30 &gic 0 0 0 30 4>, - <0 0 31 &gic 0 0 0 31 4>, - <0 0 32 &gic 0 0 0 32 4>, - <0 0 33 &gic 0 0 0 33 4>, - <0 0 34 &gic 0 0 0 34 4>, - <0 0 35 &gic 0 0 0 35 4>, - <0 0 36 &gic 0 0 0 36 4>, - <0 0 37 &gic 0 0 0 37 4>, - <0 0 38 &gic 0 0 0 38 4>, - <0 0 39 &gic 0 0 0 39 4>, - <0 0 40 &gic 0 0 0 40 4>, - <0 0 41 &gic 0 0 0 41 4>, - <0 0 42 &gic 0 0 0 42 4>; + interrupt-map = <0 0 0 &gic 0 0 GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>, + <0 0 1 &gic 0 0 GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>, + <0 0 2 &gic 0 0 GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>, + <0 0 3 &gic 0 0 GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>, + <0 0 4 &gic 0 0 GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>, + <0 0 5 &gic 0 0 GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>, + <0 0 6 &gic 0 0 GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>, + <0 0 7 &gic 0 0 GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>, + <0 0 8 &gic 0 0 GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>, + <0 0 9 &gic 0 0 GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>, + <0 0 10 &gic 0 0 GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>, + <0 0 11 &gic 0 0 GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>, + <0 0 12 &gic 0 0 GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>, + <0 0 13 &gic 0 0 GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>, + <0 0 14 &gic 0 0 GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>, + <0 0 15 &gic 0 0 GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>, + <0 0 16 &gic 0 0 GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>, + <0 0 17 &gic 0 0 GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>, + <0 0 18 &gic 0 0 GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>, + <0 0 19 &gic 0 0 GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>, + <0 0 20 &gic 0 0 GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>, + <0 0 21 &gic 0 0 GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>, + <0 0 22 &gic 0 0 GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>, + <0 0 23 &gic 0 0 GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>, + <0 0 24 &gic 0 0 GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>, + <0 0 25 &gic 0 0 GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>, + <0 0 26 &gic 0 0 GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>, + <0 0 27 &gic 0 0 GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>, + <0 0 28 &gic 0 0 GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>, + <0 0 29 &gic 0 0 GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>, + <0 0 30 &gic 0 0 GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>, + <0 0 31 &gic 0 0 GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>, + <0 0 32 &gic 0 0 GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>, + <0 0 33 &gic 0 0 GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>, + <0 0 34 &gic 0 0 GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>, + <0 0 35 &gic 0 0 GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>, + <0 0 36 &gic 0 0 GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>, + <0 0 37 &gic 0 0 GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>, + <0 0 38 &gic 0 0 GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>, + <0 0 39 &gic 0 0 GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>, + <0 0 40 &gic 0 0 GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>, + <0 0 41 &gic 0 0 GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>, + <0 0 42 &gic 0 0 GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>; ethernet@2,02000000 { compatible = "smsc,lan91c111"; diff --git a/arch/arm64/boot/dts/arm/fvp-base-revc.dts b/arch/arm64/boot/dts/arm/fvp-base-revc.dts new file mode 100644 index 000000000000..687707020ec1 --- /dev/null +++ b/arch/arm64/boot/dts/arm/fvp-base-revc.dts @@ -0,0 +1,277 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ARM Ltd. Fast Models + * + * Architecture Envelope Model (AEM) ARMv8-A + * ARMAEMv8AMPCT + * + * FVP Base RevC + */ + +/dts-v1/; + +#include + +/memreserve/ 0x80000000 0x00010000; + +#include "rtsm_ve-motherboard.dtsi" +#include "rtsm_ve-motherboard-rs2.dtsi" + +/ { + model = "FVP Base RevC"; + compatible = "arm,fvp-base-revc", "arm,vexpress"; + interrupt-parent = <&gic>; + #address-cells = <2>; + #size-cells = <2>; + + chosen { }; + + aliases { + serial0 = &v2m_serial0; + serial1 = &v2m_serial1; + serial2 = &v2m_serial2; + serial3 = &v2m_serial3; + }; + + psci { + compatible = "arm,psci-0.2"; + method = "smc"; + }; + + cpus { + #address-cells = <2>; + #size-cells = <0>; + + cpu0: cpu@0 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x000>; + enable-method = "psci"; + }; + cpu1: cpu@100 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x100>; + enable-method = "psci"; + }; + cpu2: cpu@200 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x200>; + enable-method = "psci"; + }; + cpu3: cpu@300 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x300>; + enable-method = "psci"; + }; + cpu4: cpu@10000 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x10000>; + enable-method = "psci"; + }; + cpu5: cpu@10100 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x10100>; + enable-method = "psci"; + }; + cpu6: cpu@10200 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x10200>; + enable-method = "psci"; + }; + cpu7: cpu@10300 { + device_type = "cpu"; + compatible = "arm,armv8"; + reg = <0x0 0x10300>; + enable-method = "psci"; + }; + }; + + memory@80000000 { + device_type = "memory"; + reg = <0x00000000 0x80000000 0 0x80000000>, + <0x00000008 0x80000000 0 0x80000000>; + }; + + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + /* Chipselect 2,00000000 is physically at 0x18000000 */ + vram: vram@18000000 { + /* 8 MB of designated video RAM */ + compatible = "shared-dma-pool"; + reg = <0x00000000 0x18000000 0 0x00800000>; + no-map; + }; + }; + + gic: interrupt-controller@2f000000 { + compatible = "arm,gic-v3"; + #interrupt-cells = <3>; + #address-cells = <2>; + #size-cells = <2>; + ranges; + interrupt-controller; + reg = <0x0 0x2f000000 0 0x10000>, // GICD + <0x0 0x2f100000 0 0x200000>, // GICR + <0x0 0x2c000000 0 0x2000>, // GICC + <0x0 0x2c010000 0 0x2000>, // GICH + <0x0 0x2c02f000 0 0x2000>; // GICV + interrupts = ; + + its: its@2f020000 { + #msi-cells = <1>; + compatible = "arm,gic-v3-its"; + reg = <0x0 0x2f020000 0x0 0x20000>; // GITS + msi-controller; + }; + }; + + timer { + compatible = "arm,armv8-timer"; + interrupts = , + , + , + ; + }; + + pmu { + compatible = "arm,armv8-pmuv3"; + interrupts = ; + }; + + spe-pmu { + compatible = "arm,statistical-profiling-extension-v1"; + interrupts = ; + }; + + pci: pci@40000000 { + #address-cells = <0x3>; + #size-cells = <0x2>; + #interrupt-cells = <0x1>; + compatible = "pci-host-ecam-generic"; + device_type = "pci"; + bus-range = <0x0 0x1>; + reg = <0x0 0x40000000 0x0 0x10000000>; + ranges = <0x2000000 0x0 0x50000000 0x0 0x50000000 0x0 0x10000000>; + interrupt-map = <0 0 0 1 &gic GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 2 &gic GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 3 &gic GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 4 &gic GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>; + interrupt-map-mask = <0x0 0x0 0x0 0x7>; + msi-map = <0x0 &its 0x0 0x10000>; + iommu-map = <0x0 &smmu 0x0 0x10000>; + + dma-coherent; + }; + + smmu: smmu@2b400000 { + compatible = "arm,smmu-v3"; + reg = <0x0 0x2b400000 0x0 0x100000>; + interrupts = , + , + , + ; + interrupt-names = "eventq", "priq", "cmdq-sync", "gerror"; + dma-coherent; + #iommu-cells = <1>; + msi-parent = <&its 0x10000>; + }; + + panel { + compatible = "arm,rtsm-display", "panel-dpi"; + port { + panel_in: endpoint { + remote-endpoint = <&clcd_pads>; + }; + }; + + panel-timing { + clock-frequency = <63500127>; + hactive = <1024>; + hback-porch = <152>; + hfront-porch = <48>; + hsync-len = <104>; + vactive = <768>; + vback-porch = <23>; + vfront-porch = <3>; + vsync-len = <4>; + }; + }; + + smb@8000000 { + compatible = "simple-bus"; + + #address-cells = <2>; + #size-cells = <1>; + ranges = <0 0 0 0x08000000 0x04000000>, + <1 0 0 0x14000000 0x04000000>, + <2 0 0 0x18000000 0x04000000>, + <3 0 0 0x1c000000 0x04000000>, + <4 0 0 0x0c000000 0x04000000>, + <5 0 0 0x10000000 0x04000000>; + + #interrupt-cells = <1>; + interrupt-map-mask = <0 0 63>; + interrupt-map = <0 0 0 &gic 0 0 GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>, + <0 0 1 &gic 0 0 GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>, + <0 0 2 &gic 0 0 GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>, + <0 0 3 &gic 0 0 GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>, + <0 0 4 &gic 0 0 GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>, + <0 0 5 &gic 0 0 GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>, + <0 0 6 &gic 0 0 GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>, + <0 0 7 &gic 0 0 GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>, + <0 0 8 &gic 0 0 GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>, + <0 0 9 &gic 0 0 GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>, + <0 0 10 &gic 0 0 GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>, + <0 0 11 &gic 0 0 GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>, + <0 0 12 &gic 0 0 GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>, + <0 0 13 &gic 0 0 GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>, + <0 0 14 &gic 0 0 GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>, + <0 0 15 &gic 0 0 GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>, + <0 0 16 &gic 0 0 GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>, + <0 0 17 &gic 0 0 GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>, + <0 0 18 &gic 0 0 GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>, + <0 0 19 &gic 0 0 GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>, + <0 0 20 &gic 0 0 GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>, + <0 0 21 &gic 0 0 GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>, + <0 0 22 &gic 0 0 GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>, + <0 0 23 &gic 0 0 GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>, + <0 0 24 &gic 0 0 GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>, + <0 0 25 &gic 0 0 GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>, + <0 0 26 &gic 0 0 GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>, + <0 0 27 &gic 0 0 GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>, + <0 0 28 &gic 0 0 GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>, + <0 0 29 &gic 0 0 GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>, + <0 0 30 &gic 0 0 GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>, + <0 0 31 &gic 0 0 GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>, + <0 0 32 &gic 0 0 GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>, + <0 0 33 &gic 0 0 GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>, + <0 0 34 &gic 0 0 GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>, + <0 0 35 &gic 0 0 GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>, + <0 0 36 &gic 0 0 GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>, + <0 0 37 &gic 0 0 GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>, + <0 0 38 &gic 0 0 GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>, + <0 0 39 &gic 0 0 GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>, + <0 0 40 &gic 0 0 GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>, + <0 0 41 &gic 0 0 GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>, + <0 0 42 &gic 0 0 GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>, + <0 0 43 &gic 0 0 GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>, + <0 0 44 &gic 0 0 GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>; + + motherboard { + iofpga@3,00000000 { + clcd@1f0000 { + max-memory-bandwidth = <130000000>; /* 16bpp @ 63.5MHz */ + }; + }; + }; + }; +}; diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi index ed774ee8f659..7446e0dc154d 100644 --- a/arch/arm64/boot/dts/arm/juno-base.dtsi +++ b/arch/arm64/boot/dts/arm/juno-base.dtsi @@ -18,7 +18,7 @@ status = "disabled"; frame@2a830000 { frame-number = <1>; - interrupts = <0 60 4>; + interrupts = ; reg = <0x0 0x2a830000 0x0 0x10000>; }; }; @@ -220,6 +220,41 @@ }; }; + replicator@20120000 { + compatible = "arm,coresight-dynamic-replicator", "arm,primecell"; + reg = <0 0x20120000 0 0x1000>; + + clocks = <&soc_smc50mhz>; + clock-names = "apb_pclk"; + power-domains = <&scpi_devpd 0>; + + out-ports { + #address-cells = <1>; + #size-cells = <0>; + + /* replicator output ports */ + port@0 { + reg = <0>; + replicator_out_port0: endpoint { + remote-endpoint = <&tpiu_in_port>; + }; + }; + + port@1 { + reg = <1>; + replicator_out_port1: endpoint { + remote-endpoint = <&etr_in_port>; + }; + }; + }; + in-ports { + port { + replicator_in_port0: endpoint { + }; + }; + }; + }; + cpu_debug0: cpu-debug@22010000 { compatible = "arm,coresight-cpu-debug", "arm,primecell"; reg = <0x0 0x22010000 0x0 0x1000>; @@ -452,41 +487,6 @@ }; }; - replicator@20120000 { - compatible = "arm,coresight-dynamic-replicator", "arm,primecell"; - reg = <0 0x20120000 0 0x1000>; - - clocks = <&soc_smc50mhz>; - clock-names = "apb_pclk"; - power-domains = <&scpi_devpd 0>; - - out-ports { - #address-cells = <1>; - #size-cells = <0>; - - /* replicator output ports */ - port@0 { - reg = <0>; - replicator_out_port0: endpoint { - remote-endpoint = <&tpiu_in_port>; - }; - }; - - port@1 { - reg = <1>; - replicator_out_port1: endpoint { - remote-endpoint = <&etr_in_port>; - }; - }; - }; - in-ports { - port { - replicator_in_port0: endpoint { - }; - }; - }; - }; - sram: sram@2e000000 { compatible = "arm,juno-sram-ns", "mmio-sram"; reg = <0x0 0x2e000000 0x0 0x8000>; @@ -520,10 +520,10 @@ <0x42000000 0x40 0x00000000 0x40 0x00000000 0x1 0x00000000>; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 7>; - interrupt-map = <0 0 0 1 &gic 0 0 0 136 4>, - <0 0 0 2 &gic 0 0 0 137 4>, - <0 0 0 3 &gic 0 0 0 138 4>, - <0 0 0 4 &gic 0 0 0 139 4>; + interrupt-map = <0 0 0 1 &gic 0 0 GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 2 &gic 0 0 GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 3 &gic 0 0 GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>, + <0 0 0 4 &gic 0 0 GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>; msi-parent = <&v2m_0>; status = "disabled"; iommu-map-mask = <0x0>; /* RC has no means to output PCI RID */ @@ -787,19 +787,19 @@ #interrupt-cells = <1>; interrupt-map-mask = <0 0 15>; - interrupt-map = <0 0 0 &gic 0 0 0 68 IRQ_TYPE_LEVEL_HIGH>, - <0 0 1 &gic 0 0 0 69 IRQ_TYPE_LEVEL_HIGH>, - <0 0 2 &gic 0 0 0 70 IRQ_TYPE_LEVEL_HIGH>, - <0 0 3 &gic 0 0 0 160 IRQ_TYPE_LEVEL_HIGH>, - <0 0 4 &gic 0 0 0 161 IRQ_TYPE_LEVEL_HIGH>, - <0 0 5 &gic 0 0 0 162 IRQ_TYPE_LEVEL_HIGH>, - <0 0 6 &gic 0 0 0 163 IRQ_TYPE_LEVEL_HIGH>, - <0 0 7 &gic 0 0 0 164 IRQ_TYPE_LEVEL_HIGH>, - <0 0 8 &gic 0 0 0 165 IRQ_TYPE_LEVEL_HIGH>, - <0 0 9 &gic 0 0 0 166 IRQ_TYPE_LEVEL_HIGH>, - <0 0 10 &gic 0 0 0 167 IRQ_TYPE_LEVEL_HIGH>, - <0 0 11 &gic 0 0 0 168 IRQ_TYPE_LEVEL_HIGH>, - <0 0 12 &gic 0 0 0 169 IRQ_TYPE_LEVEL_HIGH>; + interrupt-map = <0 0 0 &gic 0 0 GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>, + <0 0 1 &gic 0 0 GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>, + <0 0 2 &gic 0 0 GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>, + <0 0 3 &gic 0 0 GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>, + <0 0 4 &gic 0 0 GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>, + <0 0 5 &gic 0 0 GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>, + <0 0 6 &gic 0 0 GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>, + <0 0 7 &gic 0 0 GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>, + <0 0 8 &gic 0 0 GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>, + <0 0 9 &gic 0 0 GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>, + <0 0 10 &gic 0 0 GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>, + <0 0 11 &gic 0 0 GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>, + <0 0 12 &gic 0 0 GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>; }; site2: tlx@60000000 { @@ -809,6 +809,6 @@ ranges = <0 0 0x60000000 0x10000000>; #interrupt-cells = <1>; interrupt-map-mask = <0 0>; - interrupt-map = <0 0 &gic 0 0 0 168 IRQ_TYPE_LEVEL_HIGH>; + interrupt-map = <0 0 &gic 0 0 GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>; }; }; diff --git a/arch/arm64/boot/dts/arm/juno-r1.dts b/arch/arm64/boot/dts/arm/juno-r1.dts index b2b7ced633cf..5f290090b0cf 100644 --- a/arch/arm64/boot/dts/arm/juno-r1.dts +++ b/arch/arm64/boot/dts/arm/juno-r1.dts @@ -85,7 +85,7 @@ }; A57_0: cpu@0 { - compatible = "arm,cortex-a57","arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x0 0x0>; device_type = "cpu"; enable-method = "psci"; @@ -102,7 +102,7 @@ }; A57_1: cpu@1 { - compatible = "arm,cortex-a57","arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x0 0x1>; device_type = "cpu"; enable-method = "psci"; @@ -119,7 +119,7 @@ }; A53_0: cpu@100 { - compatible = "arm,cortex-a53","arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x100>; device_type = "cpu"; enable-method = "psci"; @@ -136,7 +136,7 @@ }; A53_1: cpu@101 { - compatible = "arm,cortex-a53","arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x101>; device_type = "cpu"; enable-method = "psci"; @@ -153,7 +153,7 @@ }; A53_2: cpu@102 { - compatible = "arm,cortex-a53","arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x102>; device_type = "cpu"; enable-method = "psci"; @@ -170,7 +170,7 @@ }; A53_3: cpu@103 { - compatible = "arm,cortex-a53","arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x103>; device_type = "cpu"; enable-method = "psci"; diff --git a/arch/arm64/boot/dts/arm/juno-r2.dts b/arch/arm64/boot/dts/arm/juno-r2.dts index ab77adb4f3c2..305300dd521c 100644 --- a/arch/arm64/boot/dts/arm/juno-r2.dts +++ b/arch/arm64/boot/dts/arm/juno-r2.dts @@ -85,7 +85,7 @@ }; A72_0: cpu@0 { - compatible = "arm,cortex-a72","arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x0 0x0>; device_type = "cpu"; enable-method = "psci"; @@ -99,10 +99,11 @@ clocks = <&scpi_dvfs 0>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; capacity-dmips-mhz = <1024>; + dynamic-power-coefficient = <450>; }; A72_1: cpu@1 { - compatible = "arm,cortex-a72","arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x0 0x1>; device_type = "cpu"; enable-method = "psci"; @@ -116,10 +117,11 @@ clocks = <&scpi_dvfs 0>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; capacity-dmips-mhz = <1024>; + dynamic-power-coefficient = <450>; }; A53_0: cpu@100 { - compatible = "arm,cortex-a53","arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x100>; device_type = "cpu"; enable-method = "psci"; @@ -133,10 +135,11 @@ clocks = <&scpi_dvfs 1>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; capacity-dmips-mhz = <485>; + dynamic-power-coefficient = <140>; }; A53_1: cpu@101 { - compatible = "arm,cortex-a53","arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x101>; device_type = "cpu"; enable-method = "psci"; @@ -150,10 +153,11 @@ clocks = <&scpi_dvfs 1>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; capacity-dmips-mhz = <485>; + dynamic-power-coefficient = <140>; }; A53_2: cpu@102 { - compatible = "arm,cortex-a53","arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x102>; device_type = "cpu"; enable-method = "psci"; @@ -167,10 +171,11 @@ clocks = <&scpi_dvfs 1>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; capacity-dmips-mhz = <485>; + dynamic-power-coefficient = <140>; }; A53_3: cpu@103 { - compatible = "arm,cortex-a53","arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x103>; device_type = "cpu"; enable-method = "psci"; @@ -184,6 +189,7 @@ clocks = <&scpi_dvfs 1>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; capacity-dmips-mhz = <485>; + dynamic-power-coefficient = <140>; }; A72_L2: l2-cache0 { diff --git a/arch/arm64/boot/dts/arm/juno.dts b/arch/arm64/boot/dts/arm/juno.dts index 08d4ba1716c3..f00cffbd032c 100644 --- a/arch/arm64/boot/dts/arm/juno.dts +++ b/arch/arm64/boot/dts/arm/juno.dts @@ -84,7 +84,7 @@ }; A57_0: cpu@0 { - compatible = "arm,cortex-a57","arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x0 0x0>; device_type = "cpu"; enable-method = "psci"; @@ -98,10 +98,11 @@ clocks = <&scpi_dvfs 0>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; capacity-dmips-mhz = <1024>; + dynamic-power-coefficient = <530>; }; A57_1: cpu@1 { - compatible = "arm,cortex-a57","arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x0 0x1>; device_type = "cpu"; enable-method = "psci"; @@ -115,10 +116,11 @@ clocks = <&scpi_dvfs 0>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; capacity-dmips-mhz = <1024>; + dynamic-power-coefficient = <530>; }; A53_0: cpu@100 { - compatible = "arm,cortex-a53","arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x100>; device_type = "cpu"; enable-method = "psci"; @@ -132,10 +134,11 @@ clocks = <&scpi_dvfs 1>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; capacity-dmips-mhz = <578>; + dynamic-power-coefficient = <140>; }; A53_1: cpu@101 { - compatible = "arm,cortex-a53","arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x101>; device_type = "cpu"; enable-method = "psci"; @@ -149,10 +152,11 @@ clocks = <&scpi_dvfs 1>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; capacity-dmips-mhz = <578>; + dynamic-power-coefficient = <140>; }; A53_2: cpu@102 { - compatible = "arm,cortex-a53","arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x102>; device_type = "cpu"; enable-method = "psci"; @@ -166,10 +170,11 @@ clocks = <&scpi_dvfs 1>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; capacity-dmips-mhz = <578>; + dynamic-power-coefficient = <140>; }; A53_3: cpu@103 { - compatible = "arm,cortex-a53","arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x103>; device_type = "cpu"; enable-method = "psci"; @@ -183,6 +188,7 @@ clocks = <&scpi_dvfs 1>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; capacity-dmips-mhz = <578>; + dynamic-power-coefficient = <140>; }; A57_L2: l2-cache0 { diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts index fe4fda473c0a..6e685d883303 100644 --- a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts +++ b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts @@ -10,6 +10,8 @@ /dts-v1/; +#include + /memreserve/ 0x80000000 0x00010000; #include "rtsm_ve-motherboard.dtsi" @@ -101,24 +103,24 @@ <0x0 0x2c002000 0 0x2000>, <0x0 0x2c004000 0 0x2000>, <0x0 0x2c006000 0 0x2000>; - interrupts = <1 9 0xf04>; + interrupts = ; }; timer { compatible = "arm,armv8-timer"; - interrupts = <1 13 0xf08>, - <1 14 0xf08>, - <1 11 0xf08>, - <1 10 0xf08>; + interrupts = , + , + , + ; clock-frequency = <100000000>; }; pmu { compatible = "arm,armv8-pmuv3"; - interrupts = <0 60 4>, - <0 61 4>, - <0 62 4>, - <0 63 4>; + interrupts = , + , + , + ; }; panel { @@ -144,48 +146,48 @@ #interrupt-cells = <1>; interrupt-map-mask = <0 0 63>; - interrupt-map = <0 0 0 &gic 0 0 4>, - <0 0 1 &gic 0 1 4>, - <0 0 2 &gic 0 2 4>, - <0 0 3 &gic 0 3 4>, - <0 0 4 &gic 0 4 4>, - <0 0 5 &gic 0 5 4>, - <0 0 6 &gic 0 6 4>, - <0 0 7 &gic 0 7 4>, - <0 0 8 &gic 0 8 4>, - <0 0 9 &gic 0 9 4>, - <0 0 10 &gic 0 10 4>, - <0 0 11 &gic 0 11 4>, - <0 0 12 &gic 0 12 4>, - <0 0 13 &gic 0 13 4>, - <0 0 14 &gic 0 14 4>, - <0 0 15 &gic 0 15 4>, - <0 0 16 &gic 0 16 4>, - <0 0 17 &gic 0 17 4>, - <0 0 18 &gic 0 18 4>, - <0 0 19 &gic 0 19 4>, - <0 0 20 &gic 0 20 4>, - <0 0 21 &gic 0 21 4>, - <0 0 22 &gic 0 22 4>, - <0 0 23 &gic 0 23 4>, - <0 0 24 &gic 0 24 4>, - <0 0 25 &gic 0 25 4>, - <0 0 26 &gic 0 26 4>, - <0 0 27 &gic 0 27 4>, - <0 0 28 &gic 0 28 4>, - <0 0 29 &gic 0 29 4>, - <0 0 30 &gic 0 30 4>, - <0 0 31 &gic 0 31 4>, - <0 0 32 &gic 0 32 4>, - <0 0 33 &gic 0 33 4>, - <0 0 34 &gic 0 34 4>, - <0 0 35 &gic 0 35 4>, - <0 0 36 &gic 0 36 4>, - <0 0 37 &gic 0 37 4>, - <0 0 38 &gic 0 38 4>, - <0 0 39 &gic 0 39 4>, - <0 0 40 &gic 0 40 4>, - <0 0 41 &gic 0 41 4>, - <0 0 42 &gic 0 42 4>; + interrupt-map = <0 0 0 &gic GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>, + <0 0 1 &gic GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>, + <0 0 2 &gic GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>, + <0 0 3 &gic GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>, + <0 0 4 &gic GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>, + <0 0 5 &gic GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>, + <0 0 6 &gic GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>, + <0 0 7 &gic GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>, + <0 0 8 &gic GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>, + <0 0 9 &gic GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>, + <0 0 10 &gic GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>, + <0 0 11 &gic GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>, + <0 0 12 &gic GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>, + <0 0 13 &gic GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>, + <0 0 14 &gic GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>, + <0 0 15 &gic GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>, + <0 0 16 &gic GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>, + <0 0 17 &gic GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>, + <0 0 18 &gic GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>, + <0 0 19 &gic GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>, + <0 0 20 &gic GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>, + <0 0 21 &gic GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>, + <0 0 22 &gic GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>, + <0 0 23 &gic GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>, + <0 0 24 &gic GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>, + <0 0 25 &gic GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>, + <0 0 26 &gic GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>, + <0 0 27 &gic GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>, + <0 0 28 &gic GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>, + <0 0 29 &gic GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>, + <0 0 30 &gic GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>, + <0 0 31 &gic GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>, + <0 0 32 &gic GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>, + <0 0 33 &gic GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>, + <0 0 34 &gic GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>, + <0 0 35 &gic GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>, + <0 0 36 &gic GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>, + <0 0 37 &gic GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>, + <0 0 38 &gic GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>, + <0 0 39 &gic GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>, + <0 0 40 &gic GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>, + <0 0 41 &gic GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>, + <0 0 42 &gic GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>; }; }; diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard-rs2.dtsi b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard-rs2.dtsi new file mode 100644 index 000000000000..57b0b9d7f3fa --- /dev/null +++ b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard-rs2.dtsi @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ARM Ltd. Fast Models + * + * "rs2" extension for the v2m motherboard + */ +/ { + smb@8000000 { + motherboard { + arm,v2m-memory-map = "rs2"; + + iofpga@3,00000000 { + virtio-p9@140000 { + compatible = "virtio,mmio"; + reg = <0x140000 0x200>; + interrupts = <43>; + }; + + virtio-net@150000 { + compatible = "virtio,mmio"; + reg = <0x150000 0x200>; + interrupts = <44>; + }; + }; + }; + }; +}; diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi index b25f3cbd3da8..454cf6c44c49 100644 --- a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi +++ b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi @@ -86,7 +86,7 @@ mmci@50000 { compatible = "arm,pl180", "arm,primecell"; reg = <0x050000 0x1000>; - interrupts = <9 10>; + interrupts = <9>, <10>; cd-gpios = <&v2m_sysreg 0 0>; wp-gpios = <&v2m_sysreg 1 0>; max-frequency = <12000000>; @@ -167,6 +167,12 @@ clock-names = "timclken1", "timclken2", "apb_pclk"; }; + virtio-block@130000 { + compatible = "virtio,mmio"; + reg = <0x130000 0x200>; + interrupts = <42>; + }; + rtc@170000 { compatible = "arm,pl031", "arm,primecell"; reg = <0x170000 0x1000>; @@ -193,12 +199,6 @@ }; }; }; - - virtio-block@130000 { - compatible = "virtio,mmio"; - reg = <0x130000 0x200>; - interrupts = <42>; - }; }; v2m_fixed_3v3: v2m-3v3 { diff --git a/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts b/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts index 8981c3d2ff18..22383c26bb03 100644 --- a/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts +++ b/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts @@ -43,14 +43,14 @@ cpu@0 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0 0>; next-level-cache = <&L2_0>; }; cpu@1 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0 1>; next-level-cache = <&L2_0>; }; diff --git a/arch/arm64/boot/dts/bitmain/Makefile b/arch/arm64/boot/dts/bitmain/Makefile new file mode 100644 index 000000000000..be90a6071be0 --- /dev/null +++ b/arch/arm64/boot/dts/bitmain/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0+ + +dtb-$(CONFIG_ARCH_BITMAIN) += bm1880-sophon-edge.dtb diff --git a/arch/arm64/boot/dts/bitmain/bm1880-sophon-edge.dts b/arch/arm64/boot/dts/bitmain/bm1880-sophon-edge.dts new file mode 100644 index 000000000000..6a3255597138 --- /dev/null +++ b/arch/arm64/boot/dts/bitmain/bm1880-sophon-edge.dts @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Copyright (c) 2019 Linaro Ltd. + * Author: Manivannan Sadhasivam + */ + +/dts-v1/; + +#include "bm1880.dtsi" + +/ { + compatible = "bitmain,sophon-edge", "bitmain,bm1880"; + model = "Sophon Edge"; + + aliases { + serial0 = &uart0; + serial1 = &uart2; + serial2 = &uart1; + }; + + chosen { + stdout-path = "serial0:115200n8"; + }; + + memory@0 { + device_type = "memory"; + reg = <0x1 0x00000000 0x0 0x40000000>; // 1GB + }; + + uart_clk: uart-clk { + compatible = "fixed-clock"; + clock-frequency = <500000000>; + #clock-cells = <0>; + }; +}; + +&uart0 { + status = "okay"; + clocks = <&uart_clk>; +}; + +&uart1 { + status = "okay"; + clocks = <&uart_clk>; +}; + +&uart2 { + status = "okay"; + clocks = <&uart_clk>; +}; diff --git a/arch/arm64/boot/dts/bitmain/bm1880.dtsi b/arch/arm64/boot/dts/bitmain/bm1880.dtsi new file mode 100644 index 000000000000..55a4769e0de2 --- /dev/null +++ b/arch/arm64/boot/dts/bitmain/bm1880.dtsi @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Copyright (c) 2019 Linaro Ltd. + * Author: Manivannan Sadhasivam + */ + +#include + +/ { + compatible = "bitmain,bm1880"; + interrupt-parent = <&gic>; + #address-cells = <2>; + #size-cells = <2>; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu0: cpu@0 { + device_type = "cpu"; + compatible = "arm,cortex-a53"; + reg = <0x0>; + enable-method = "psci"; + }; + + cpu1: cpu@1 { + device_type = "cpu"; + compatible = "arm,cortex-a53"; + reg = <0x1>; + enable-method = "psci"; + }; + }; + + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + secmon@100000000 { + reg = <0x1 0x00000000 0x0 0x20000>; + no-map; + }; + + jpu@130000000 { + reg = <0x1 0x30000000 0x0 0x08000000>; // 128M + no-map; + }; + + vpu@138000000 { + reg = <0x1 0x38000000 0x0 0x08000000>; // 128M + no-map; + }; + }; + + psci { + compatible = "arm,psci-0.2"; + method = "smc"; + }; + + timer { + compatible = "arm,armv8-timer"; + interrupts = , + , + , + ; + }; + + soc { + compatible = "simple-bus"; + #address-cells = <2>; + #size-cells = <2>; + ranges; + + gic: interrupt-controller@50001000 { + compatible = "arm,gic-400"; + reg = <0x0 0x50001000 0x0 0x1000>, + <0x0 0x50002000 0x0 0x2000>; + interrupts = ; + interrupt-controller; + #interrupt-cells = <3>; + }; + + uart0: serial@58018000 { + compatible = "snps,dw-apb-uart"; + reg = <0x0 0x58018000 0x0 0x2000>; + interrupts = ; + reg-shift = <2>; + reg-io-width = <4>; + status = "disabled"; + }; + + uart1: serial@5801A000 { + compatible = "snps,dw-apb-uart"; + reg = <0x0 0x5801a000 0x0 0x2000>; + interrupts = ; + reg-shift = <2>; + reg-io-width = <4>; + status = "disabled"; + }; + + uart2: serial@5801C000 { + compatible = "snps,dw-apb-uart"; + reg = <0x0 0x5801c000 0x0 0x2000>; + interrupts = ; + reg-shift = <2>; + reg-io-width = <4>; + status = "disabled"; + }; + + uart3: serial@5801E000 { + compatible = "snps,dw-apb-uart"; + reg = <0x0 0x5801e000 0x0 0x2000>; + interrupts = ; + reg-shift = <2>; + reg-io-width = <4>; + status = "disabled"; + }; + }; +}; diff --git a/arch/arm64/boot/dts/broadcom/Makefile b/arch/arm64/boot/dts/broadcom/Makefile index 667ca989c11b..d1d31ccad758 100644 --- a/arch/arm64/boot/dts/broadcom/Makefile +++ b/arch/arm64/boot/dts/broadcom/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 -dtb-$(CONFIG_ARCH_BCM2835) += bcm2837-rpi-3-b.dtb \ +dtb-$(CONFIG_ARCH_BCM2835) += bcm2837-rpi-3-a-plus.dtb \ + bcm2837-rpi-3-b.dtb \ bcm2837-rpi-3-b-plus.dtb \ bcm2837-rpi-cm3-io3.dtb diff --git a/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-a-plus.dts b/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-a-plus.dts new file mode 100644 index 000000000000..f0ec56a1c4d7 --- /dev/null +++ b/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-a-plus.dts @@ -0,0 +1,2 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "arm/bcm2837-rpi-3-a-plus.dts" diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi index ea854f689fda..15f7b0ed3836 100644 --- a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi +++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi @@ -47,7 +47,7 @@ A57_0: cpu@0 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0 0>; enable-method = "psci"; next-level-cache = <&CLUSTER0_L2>; @@ -55,7 +55,7 @@ A57_1: cpu@1 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0 1>; enable-method = "psci"; next-level-cache = <&CLUSTER0_L2>; @@ -63,7 +63,7 @@ A57_2: cpu@2 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0 2>; enable-method = "psci"; next-level-cache = <&CLUSTER0_L2>; @@ -71,7 +71,7 @@ A57_3: cpu@3 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0 3>; enable-method = "psci"; next-level-cache = <&CLUSTER0_L2>; diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi index cfeaa855bd05..35c4670c00d1 100644 --- a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi +++ b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi @@ -44,7 +44,7 @@ cpu@0 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x0 0x0>; enable-method = "psci"; next-level-cache = <&CLUSTER0_L2>; @@ -52,7 +52,7 @@ cpu@1 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x0 0x1>; enable-method = "psci"; next-level-cache = <&CLUSTER0_L2>; @@ -60,7 +60,7 @@ cpu@100 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x0 0x100>; enable-method = "psci"; next-level-cache = <&CLUSTER1_L2>; @@ -68,7 +68,7 @@ cpu@101 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x0 0x101>; enable-method = "psci"; next-level-cache = <&CLUSTER1_L2>; @@ -76,7 +76,7 @@ cpu@200 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x0 0x200>; enable-method = "psci"; next-level-cache = <&CLUSTER2_L2>; @@ -84,7 +84,7 @@ cpu@201 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x0 0x201>; enable-method = "psci"; next-level-cache = <&CLUSTER2_L2>; @@ -92,7 +92,7 @@ cpu@300 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x0 0x300>; enable-method = "psci"; next-level-cache = <&CLUSTER3_L2>; @@ -100,7 +100,7 @@ cpu@301 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x0 0x301>; enable-method = "psci"; next-level-cache = <&CLUSTER3_L2>; diff --git a/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi b/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi index 1a9103b269cb..e0a71795261b 100644 --- a/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi +++ b/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi @@ -64,289 +64,289 @@ cpu@0 { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x000>; enable-method = "psci"; }; cpu@1 { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x001>; enable-method = "psci"; }; cpu@2 { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x002>; enable-method = "psci"; }; cpu@3 { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x003>; enable-method = "psci"; }; cpu@4 { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x004>; enable-method = "psci"; }; cpu@5 { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x005>; enable-method = "psci"; }; cpu@6 { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x006>; enable-method = "psci"; }; cpu@7 { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x007>; enable-method = "psci"; }; cpu@8 { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x008>; enable-method = "psci"; }; cpu@9 { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x009>; enable-method = "psci"; }; cpu@a { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x00a>; enable-method = "psci"; }; cpu@b { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x00b>; enable-method = "psci"; }; cpu@c { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x00c>; enable-method = "psci"; }; cpu@d { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x00d>; enable-method = "psci"; }; cpu@e { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x00e>; enable-method = "psci"; }; cpu@f { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x00f>; enable-method = "psci"; }; cpu@100 { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x100>; enable-method = "psci"; }; cpu@101 { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x101>; enable-method = "psci"; }; cpu@102 { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x102>; enable-method = "psci"; }; cpu@103 { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x103>; enable-method = "psci"; }; cpu@104 { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x104>; enable-method = "psci"; }; cpu@105 { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x105>; enable-method = "psci"; }; cpu@106 { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x106>; enable-method = "psci"; }; cpu@107 { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x107>; enable-method = "psci"; }; cpu@108 { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x108>; enable-method = "psci"; }; cpu@109 { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x109>; enable-method = "psci"; }; cpu@10a { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x10a>; enable-method = "psci"; }; cpu@10b { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x10b>; enable-method = "psci"; }; cpu@10c { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x10c>; enable-method = "psci"; }; cpu@10d { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x10d>; enable-method = "psci"; }; cpu@10e { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x10e>; enable-method = "psci"; }; cpu@10f { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x10f>; enable-method = "psci"; }; cpu@200 { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x200>; enable-method = "psci"; }; cpu@201 { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x201>; enable-method = "psci"; }; cpu@202 { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x202>; enable-method = "psci"; }; cpu@203 { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x203>; enable-method = "psci"; }; cpu@204 { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x204>; enable-method = "psci"; }; cpu@205 { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x205>; enable-method = "psci"; }; cpu@206 { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x206>; enable-method = "psci"; }; cpu@207 { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x207>; enable-method = "psci"; }; cpu@208 { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x208>; enable-method = "psci"; }; cpu@209 { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x209>; enable-method = "psci"; }; cpu@20a { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x20a>; enable-method = "psci"; }; cpu@20b { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x20b>; enable-method = "psci"; }; cpu@20c { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x20c>; enable-method = "psci"; }; cpu@20d { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x20d>; enable-method = "psci"; }; cpu@20e { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x20e>; enable-method = "psci"; }; cpu@20f { device_type = "cpu"; - compatible = "cavium,thunder", "arm,armv8"; + compatible = "cavium,thunder"; reg = <0x0 0x20f>; enable-method = "psci"; }; diff --git a/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi b/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi index ff5c4c47b22b..0b7c935a4778 100644 --- a/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi +++ b/arch/arm64/boot/dts/cavium/thunder2-99xx.dtsi @@ -27,28 +27,28 @@ cpu@0 { device_type = "cpu"; - compatible = "cavium,thunder2", "brcm,vulcan", "arm,armv8"; + compatible = "cavium,thunder2", "brcm,vulcan"; reg = <0x0 0x0>; enable-method = "psci"; }; cpu@1 { device_type = "cpu"; - compatible = "cavium,thunder2", "brcm,vulcan", "arm,armv8"; + compatible = "cavium,thunder2", "brcm,vulcan"; reg = <0x0 0x1>; enable-method = "psci"; }; cpu@2 { device_type = "cpu"; - compatible = "cavium,thunder2", "brcm,vulcan", "arm,armv8"; + compatible = "cavium,thunder2", "brcm,vulcan"; reg = <0x0 0x2>; enable-method = "psci"; }; cpu@3 { device_type = "cpu"; - compatible = "cavium,thunder2", "brcm,vulcan", "arm,armv8"; + compatible = "cavium,thunder2", "brcm,vulcan"; reg = <0x0 0x3>; enable-method = "psci"; }; diff --git a/arch/arm64/boot/dts/exynos/exynos5433.dtsi b/arch/arm64/boot/dts/exynos/exynos5433.dtsi index e7cd3b67d818..a04e80327b6e 100644 --- a/arch/arm64/boot/dts/exynos/exynos5433.dtsi +++ b/arch/arm64/boot/dts/exynos/exynos5433.dtsi @@ -29,7 +29,7 @@ cpu0: cpu@100 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; enable-method = "psci"; reg = <0x100>; clock-frequency = <1300000000>; @@ -41,7 +41,7 @@ cpu1: cpu@101 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; enable-method = "psci"; reg = <0x101>; clock-frequency = <1300000000>; @@ -51,7 +51,7 @@ cpu2: cpu@102 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; enable-method = "psci"; reg = <0x102>; clock-frequency = <1300000000>; @@ -61,7 +61,7 @@ cpu3: cpu@103 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; enable-method = "psci"; reg = <0x103>; clock-frequency = <1300000000>; @@ -71,7 +71,7 @@ cpu4: cpu@0 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; enable-method = "psci"; reg = <0x0>; clock-frequency = <1900000000>; @@ -83,7 +83,7 @@ cpu5: cpu@1 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; enable-method = "psci"; reg = <0x1>; clock-frequency = <1900000000>; @@ -93,7 +93,7 @@ cpu6: cpu@2 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; enable-method = "psci"; reg = <0x2>; clock-frequency = <1900000000>; @@ -103,7 +103,7 @@ cpu7: cpu@3 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; enable-method = "psci"; reg = <0x3>; clock-frequency = <1900000000>; diff --git a/arch/arm64/boot/dts/exynos/exynos7.dtsi b/arch/arm64/boot/dts/exynos/exynos7.dtsi index 75ad724c487e..967558a93d82 100644 --- a/arch/arm64/boot/dts/exynos/exynos7.dtsi +++ b/arch/arm64/boot/dts/exynos/exynos7.dtsi @@ -34,28 +34,28 @@ cpu_atlas0: cpu@0 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x0>; enable-method = "psci"; }; cpu_atlas1: cpu@1 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x1>; enable-method = "psci"; }; cpu_atlas2: cpu@2 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x2>; enable-method = "psci"; }; cpu_atlas3: cpu@3 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x3>; enable-method = "psci"; }; diff --git a/arch/arm64/boot/dts/freescale/Makefile b/arch/arm64/boot/dts/freescale/Makefile index f9be2426f83c..13604e558dc1 100644 --- a/arch/arm64/boot/dts/freescale/Makefile +++ b/arch/arm64/boot/dts/freescale/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-frdm.dtb dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-frwy.dtb +dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-oxalis.dtb dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-qds.dtb dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1012a-rdb.dtb dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-ls1028a-qds.dtb @@ -20,3 +21,4 @@ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-qds.dtb dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-rdb.dtb dtb-$(CONFIG_ARCH_MXC) += imx8mq-evk.dtb +dtb-$(CONFIG_ARCH_MXC) += imx8qxp-mek.dtb diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a-oxalis.dts b/arch/arm64/boot/dts/freescale/fsl-ls1012a-oxalis.dts new file mode 100644 index 000000000000..7c726267ec8f --- /dev/null +++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a-oxalis.dts @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Device Tree file for Oxalis + * + * Copyright (c) 2019 Manivannan Sadhasivam + * + */ + +/dts-v1/; + +#include "fsl-ls1012a.dtsi" + +/ { + model = "Oxalis"; + compatible = "ebs-systart,oxalis", "fsl,ls1012a"; + + sys_mclk: clock-mclk { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <25000000>; + }; + + reg_1p8v: regulator-1p8v { + compatible = "regulator-fixed"; + regulator-name = "1P8V"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + }; + + sound { + compatible = "simple-audio-card"; + simple-audio-card,format = "i2s"; + simple-audio-card,widgets = + "Microphone", "Microphone Jack", + "Headphone", "Headphone Jack", + "Speaker", "Speaker Ext", + "Line", "Line In Jack"; + simple-audio-card,routing = + "MIC_IN", "Microphone Jack", + "Microphone Jack", "Mic Bias", + "LINE_IN", "Line In Jack", + "Headphone Jack", "HP_OUT", + "Speaker Ext", "LINE_OUT"; + + simple-audio-card,cpu { + sound-dai = <&sai2>; + frame-master; + bitclock-master; + }; + + simple-audio-card,codec { + sound-dai = <&codec>; + frame-master; + bitclock-master; + system-clock-frequency = <25000000>; + }; + }; +}; + +&duart0 { + status = "okay"; +}; + +&duart1 { + status = "okay"; +}; + +&esdhc1 { + status = "okay"; +}; + +&i2c0 { + status = "okay"; + + codec: audio-codec@a { + #sound-dai-cells = <0>; + compatible = "fsl,sgtl5000"; + reg = <0xa>; + VDDA-supply = <®_1p8v>; + VDDIO-supply = <®_1p8v>; + clocks = <&sys_mclk>; + }; +}; + +&i2c1 { + status = "okay"; +}; + +&sai2 { + status = "okay"; +}; + +&sata { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi index 816f3a4537e3..1ce0042b2a14 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi @@ -446,6 +446,7 @@ dr_mode = "host"; snps,quirk-frame-length-adjustment = <0x20>; snps,dis_rxdet_inp3_quirk; + snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>; }; sata: sata@3200000 { @@ -486,6 +487,7 @@ #size-cells = <2>; device_type = "pci"; num-lanes = <4>; + num-viewport = <2>; bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */ 0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts index fdeb4176fc33..f86b054a74ae 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts @@ -71,3 +71,20 @@ &duart1 { status = "okay"; }; + +&enetc_port0 { + phy-handle = <&sgmii_phy0>; + phy-connection-type = "sgmii"; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + sgmii_phy0: ethernet-phy@2 { + reg = <0x2>; + }; + }; +}; + +&enetc_port1 { + status = "disabled"; +}; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi index a8cf92af05fb..2896bbcfa3bb 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi @@ -335,5 +335,40 @@ , , , ; }; + + pcie@1f0000000 { /* Integrated Endpoint Root Complex */ + compatible = "pci-host-ecam-generic"; + reg = <0x01 0xf0000000 0x0 0x100000>; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + msi-parent = <&its>; + device_type = "pci"; + bus-range = <0x0 0x0>; + dma-coherent; + msi-map = <0 &its 0x17 0xe>; + iommu-map = <0 &smmu 0x17 0xe>; + /* PF0-6 BAR0 - non-prefetchable memory */ + ranges = <0x82000000 0x0 0x00000000 0x1 0xf8000000 0x0 0x160000 + /* PF0-6 BAR2 - prefetchable memory */ + 0xc2000000 0x0 0x00000000 0x1 0xf8160000 0x0 0x070000 + /* PF0: VF0-1 BAR0 - non-prefetchable memory */ + 0x82000000 0x0 0x00000000 0x1 0xf81d0000 0x0 0x020000 + /* PF0: VF0-1 BAR2 - prefetchable memory */ + 0xc2000000 0x0 0x00000000 0x1 0xf81f0000 0x0 0x020000 + /* PF1: VF0-1 BAR0 - non-prefetchable memory */ + 0x82000000 0x0 0x00000000 0x1 0xf8210000 0x0 0x020000 + /* PF1: VF0-1 BAR2 - prefetchable memory */ + 0xc2000000 0x0 0x00000000 0x1 0xf8230000 0x0 0x020000>; + + enetc_port0: ethernet@0,0 { + compatible = "fsl,enetc"; + reg = <0x000000 0 0 0 0>; + }; + enetc_port1: ethernet@0,1 { + compatible = "fsl,enetc"; + reg = <0x000100 0 0 0 0>; + }; + }; }; }; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts index 8a500940f124..1aac81da7e37 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a-qds.dts @@ -137,7 +137,7 @@ &qspi { status = "okay"; - qflash0: s25fl128s@0 { + qflash0: flash@0 { compatible = "spansion,m25p80"; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi index 70057b4e46e8..6fd6116509cc 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi @@ -611,6 +611,7 @@ dr_mode = "host"; snps,quirk-frame-length-adjustment = <0x20>; snps,dis_rxdet_inp3_quirk; + snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>; }; usb1: usb3@3000000 { @@ -620,6 +621,7 @@ dr_mode = "host"; snps,quirk-frame-length-adjustment = <0x20>; snps,dis_rxdet_inp3_quirk; + snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>; }; usb2: usb3@3100000 { @@ -629,6 +631,7 @@ dr_mode = "host"; snps,quirk-frame-length-adjustment = <0x20>; snps,dis_rxdet_inp3_quirk; + snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>; }; sata: sata@3200000 { @@ -675,6 +678,7 @@ device_type = "pci"; dma-coherent; num-lanes = <4>; + num-viewport = <6>; bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */ 0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ @@ -701,6 +705,7 @@ device_type = "pci"; dma-coherent; num-lanes = <2>; + num-viewport = <6>; bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */ 0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ @@ -727,6 +732,7 @@ device_type = "pci"; dma-coherent; num-lanes = <2>; + num-viewport = <6>; bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x00000000 0x50 0x00010000 0x0 0x00010000 /* downstream I/O */ 0x82000000 0x0 0x40000000 0x50 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts index 2f220ec4947b..eec62c63dafe 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-qds.dts @@ -165,7 +165,7 @@ &qspi { status = "okay"; - qflash0: s25fl128s@0 { + qflash0: flash@0 { compatible = "spansion,m25p80"; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts index 07c665c6e0dc..6a6514d0e5a9 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a-rdb.dts @@ -101,7 +101,7 @@ &qspi { status = "okay"; - qflash0: s25fs512s@0 { + qflash0: flash@0 { compatible = "spansion,m25p80"; #address-cells = <1>; #size-cells = <1>; @@ -111,7 +111,7 @@ reg = <0>; }; - qflash1: s25fs512s@1 { + qflash1: flash@1 { compatible = "spansion,m25p80"; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi index 9a2106e60e19..cb7185014d3a 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi @@ -202,6 +202,7 @@ compatible = "fsl,ifc", "simple-bus"; reg = <0x0 0x1530000 0x0 0x10000>; interrupts = ; + status = "disabled"; }; qspi: spi@1550000 { @@ -424,6 +425,7 @@ reg = <0x00 0x21c0500 0x0 0x100>; interrupts = ; clocks = <&clockgen 4 1>; + status = "disabled"; }; duart1: serial@21c0600 { @@ -431,6 +433,7 @@ reg = <0x00 0x21c0600 0x0 0x100>; interrupts = ; clocks = <&clockgen 4 1>; + status = "disabled"; }; duart2: serial@21d0500 { @@ -438,6 +441,7 @@ reg = <0x0 0x21d0500 0x0 0x100>; interrupts = ; clocks = <&clockgen 4 1>; + status = "disabled"; }; duart3: serial@21d0600 { @@ -445,6 +449,7 @@ reg = <0x0 0x21d0600 0x0 0x100>; interrupts = ; clocks = <&clockgen 4 1>; + status = "disabled"; }; gpio0: gpio@2300000 { @@ -572,6 +577,7 @@ dr_mode = "host"; snps,quirk-frame-length-adjustment = <0x20>; snps,dis_rxdet_inp3_quirk; + snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>; }; usb1: usb@3000000 { @@ -581,6 +587,7 @@ dr_mode = "host"; snps,quirk-frame-length-adjustment = <0x20>; snps,dis_rxdet_inp3_quirk; + snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>; }; usb2: usb@3100000 { @@ -590,6 +597,7 @@ dr_mode = "host"; snps,quirk-frame-length-adjustment = <0x20>; snps,dis_rxdet_inp3_quirk; + snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>; }; sata: sata@3200000 { @@ -644,6 +652,7 @@ device_type = "pci"; dma-coherent; num-lanes = <4>; + num-viewport = <8>; bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */ 0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ @@ -657,6 +666,17 @@ status = "disabled"; }; + pcie_ep@3400000 { + compatible = "fsl,ls1046a-pcie-ep","fsl,ls-pcie-ep"; + reg = <0x00 0x03400000 0x0 0x00100000 + 0x40 0x00000000 0x8 0x00000000>; + reg-names = "regs", "addr_space"; + num-ib-windows = <6>; + num-ob-windows = <8>; + num-lanes = <2>; + status = "disabled"; + }; + pcie@3500000 { compatible = "fsl,ls1046a-pcie"; reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */ @@ -670,6 +690,7 @@ device_type = "pci"; dma-coherent; num-lanes = <2>; + num-viewport = <8>; bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */ 0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ @@ -683,6 +704,17 @@ status = "disabled"; }; + pcie_ep@3500000 { + compatible = "fsl,ls1046a-pcie-ep","fsl,ls-pcie-ep"; + reg = <0x00 0x03500000 0x0 0x00100000 + 0x48 0x00000000 0x8 0x00000000>; + reg-names = "regs", "addr_space"; + num-ib-windows = <6>; + num-ob-windows = <8>; + num-lanes = <2>; + status = "disabled"; + }; + pcie@3600000 { compatible = "fsl,ls1046a-pcie"; reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */ @@ -696,6 +728,7 @@ device_type = "pci"; dma-coherent; num-lanes = <2>; + num-viewport = <8>; bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x00000000 0x50 0x00010000 0x0 0x00010000 /* downstream I/O */ 0x82000000 0x0 0x40000000 0x50 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ @@ -709,6 +742,17 @@ status = "disabled"; }; + pcie_ep@3600000 { + compatible = "fsl,ls1046a-pcie-ep", "fsl,ls-pcie-ep"; + reg = <0x00 0x03600000 0x0 0x00100000 + 0x50 0x00000000 0x8 0x00000000>; + reg-names = "regs", "addr_space"; + num-ib-windows = <6>; + num-ob-windows = <8>; + num-lanes = <2>; + status = "disabled"; + }; + qdma: dma-controller@8380000 { compatible = "fsl,ls1046a-qdma", "fsl,ls1021a-qdma"; reg = <0x0 0x8380000 0x0 0x1000>, /* Controller regs */ @@ -729,7 +773,6 @@ queue-sizes = <64 64>; big-endian; }; - }; reserved-memory { diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi index de93b42b1f51..661137ffa319 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1088a.dtsi @@ -377,6 +377,7 @@ dr_mode = "host"; snps,quirk-frame-length-adjustment = <0x20>; snps,dis_rxdet_inp3_quirk; + snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>; status = "disabled"; }; @@ -452,6 +453,7 @@ device_type = "pci"; dma-coherent; num-lanes = <4>; + num-viewport = <256>; bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x00000000 0x20 0x00010000 0x0 0x00010000 /* downstream I/O */ 0x82000000 0x0 0x40000000 0x20 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ @@ -477,6 +479,7 @@ device_type = "pci"; dma-coherent; num-lanes = <4>; + num-viewport = <6>; bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x00000000 0x28 0x00010000 0x0 0x00010000 /* downstream I/O */ 0x82000000 0x0 0x40000000 0x28 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ @@ -502,6 +505,7 @@ device_type = "pci"; dma-coherent; num-lanes = <8>; + num-viewport = <6>; bus-range = <0x0 0xff>; ranges = <0x81000000 0x0 0x00000000 0x30 0x00010000 0x0 0x00010000 /* downstream I/O */ 0x82000000 0x0 0x40000000 0x30 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ @@ -515,6 +519,96 @@ status = "disabled"; }; + smmu: iommu@5000000 { + compatible = "arm,mmu-500"; + reg = <0 0x5000000 0 0x800000>; + #iommu-cells = <1>; + stream-match-mask = <0x7C00>; + #global-interrupts = <12>; + // global secure fault + interrupts = , + // combined secure + , + // global non-secure fault + , + // combined non-secure + , + // performance counter interrupts 0-7 + , + , + , + , + , + , + , + , + // per context interrupt, 64 interrupts + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + }; + cluster1_core0_watchdog: wdt@c000000 { compatible = "arm,sp805-wdt", "arm,primecell"; reg = <0x0 0xc000000 0x0 0x1000>; @@ -576,6 +670,8 @@ reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */ <0x00000000 0x08340000 0 0x40000>; /* MC control reg */ msi-parent = <&its>; + iommu-map = <0 &smmu 0 0>; /* This is fixed-up by u-boot */ + dma-coherent; #address-cells = <3>; #size-cells = <1>; @@ -649,5 +745,4 @@ method = "smc"; }; }; - }; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi index 6d6ca166f86b..d7e78dcd153d 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi @@ -627,6 +627,7 @@ device_type = "pci"; dma-coherent; num-lanes = <4>; + num-viewport = <6>; bus-range = <0x0 0xff>; msi-parent = <&its>; #interrupt-cells = <1>; @@ -648,6 +649,7 @@ device_type = "pci"; dma-coherent; num-lanes = <4>; + num-viewport = <6>; bus-range = <0x0 0xff>; msi-parent = <&its>; #interrupt-cells = <1>; @@ -669,6 +671,7 @@ device_type = "pci"; dma-coherent; num-lanes = <8>; + num-viewport = <256>; bus-range = <0x0 0xff>; msi-parent = <&its>; #interrupt-cells = <1>; @@ -690,6 +693,7 @@ device_type = "pci"; dma-coherent; num-lanes = <4>; + num-viewport = <6>; bus-range = <0x0 0xff>; msi-parent = <&its>; #interrupt-cells = <1>; @@ -727,6 +731,7 @@ dr_mode = "host"; snps,quirk-frame-length-adjustment = <0x20>; snps,dis_rxdet_inp3_quirk; + snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>; }; usb1: usb3@3110000 { @@ -737,6 +742,7 @@ dr_mode = "host"; snps,quirk-frame-length-adjustment = <0x20>; snps,dis_rxdet_inp3_quirk; + snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>; }; ccn@4000000 { diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-lx2160a-rdb.dts index 6481e5f20e69..9df37b159415 100644 --- a/arch/arm64/boot/dts/freescale/fsl-lx2160a-rdb.dts +++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a-rdb.dts @@ -50,6 +50,32 @@ status = "okay"; }; +&fspi { + status = "okay"; + + mt35xu512aba0: flash@0 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "spansion,m25p80"; + m25p,fast-read; + spi-max-frequency = <50000000>; + reg = <0>; + spi-rx-bus-width = <8>; + spi-tx-bus-width = <8>; + }; + + mt35xu512aba1: flash@1 { + #address-cells = <1>; + #size-cells = <1>; + compatible = "spansion,m25p80"; + m25p,fast-read; + spi-max-frequency = <50000000>; + reg = <1>; + spi-rx-bus-width = <8>; + spi-tx-bus-width = <8>; + }; +}; + &i2c0 { status = "okay"; diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi index a79f5c1ea56d..fe87204850b5 100644 --- a/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi @@ -398,6 +398,7 @@ #address-cells = <2>; #size-cells = <2>; ranges; + dma-ranges = <0x0 0x0 0x0 0x0 0x10000 0x00000000>; crypto: crypto@8000000 { compatible = "fsl,sec-v5.0", "fsl,sec-v4.0"; @@ -542,6 +543,19 @@ status = "disabled"; }; + fspi: spi@20c0000 { + compatible = "nxp,lx2160a-fspi"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x0 0x20c0000 0x0 0x10000>, + <0x0 0x20000000 0x0 0x10000000>; + reg-names = "fspi_base", "fspi_mmap"; + interrupts = ; + clocks = <&clockgen 4 3>, <&clockgen 4 3>; + clock-names = "fspi_en", "fspi"; + status = "disabled"; + }; + esdhc0: esdhc@2140000 { compatible = "fsl,esdhc"; reg = <0x0 0x2140000 0x0 0x10000>; @@ -658,6 +672,7 @@ dr_mode = "host"; snps,quirk-frame-length-adjustment = <0x20>; snps,dis_rxdet_inp3_quirk; + snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>; status = "disabled"; }; @@ -668,6 +683,7 @@ dr_mode = "host"; snps,quirk-frame-length-adjustment = <0x20>; snps,dis_rxdet_inp3_quirk; + snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>; status = "disabled"; }; @@ -762,5 +778,122 @@ ; dma-coherent; }; + + fsl_mc: fsl-mc@80c000000 { + compatible = "fsl,qoriq-mc"; + reg = <0x00000008 0x0c000000 0 0x40>, + <0x00000000 0x08340000 0 0x40000>; + msi-parent = <&its>; + /* iommu-map property is fixed up by u-boot */ + iommu-map = <0 &smmu 0 0>; + dma-coherent; + #address-cells = <3>; + #size-cells = <1>; + + /* + * Region type 0x0 - MC portals + * Region type 0x1 - QBMAN portals + */ + ranges = <0x0 0x0 0x0 0x8 0x0c000000 0x4000000 + 0x1 0x0 0x0 0x8 0x18000000 0x8000000>; + + /* + * Define the maximum number of MACs present on the SoC. + */ + dpmacs { + #address-cells = <1>; + #size-cells = <0>; + + dpmac1: dpmac@1 { + compatible = "fsl,qoriq-mc-dpmac"; + reg = <0x1>; + }; + + dpmac2: dpmac@2 { + compatible = "fsl,qoriq-mc-dpmac"; + reg = <0x2>; + }; + + dpmac3: dpmac@3 { + compatible = "fsl,qoriq-mc-dpmac"; + reg = <0x3>; + }; + + dpmac4: dpmac@4 { + compatible = "fsl,qoriq-mc-dpmac"; + reg = <0x4>; + }; + + dpmac5: dpmac@5 { + compatible = "fsl,qoriq-mc-dpmac"; + reg = <0x5>; + }; + + dpmac6: dpmac@6 { + compatible = "fsl,qoriq-mc-dpmac"; + reg = <0x6>; + }; + + dpmac7: dpmac@7 { + compatible = "fsl,qoriq-mc-dpmac"; + reg = <0x7>; + }; + + dpmac8: dpmac@8 { + compatible = "fsl,qoriq-mc-dpmac"; + reg = <0x8>; + }; + + dpmac9: dpmac@9 { + compatible = "fsl,qoriq-mc-dpmac"; + reg = <0x9>; + }; + + dpmac10: dpmac@a { + compatible = "fsl,qoriq-mc-dpmac"; + reg = <0xa>; + }; + + dpmac11: dpmac@b { + compatible = "fsl,qoriq-mc-dpmac"; + reg = <0xb>; + }; + + dpmac12: dpmac@c { + compatible = "fsl,qoriq-mc-dpmac"; + reg = <0xc>; + }; + + dpmac13: dpmac@d { + compatible = "fsl,qoriq-mc-dpmac"; + reg = <0xd>; + }; + + dpmac14: dpmac@e { + compatible = "fsl,qoriq-mc-dpmac"; + reg = <0xe>; + }; + + dpmac15: dpmac@f { + compatible = "fsl,qoriq-mc-dpmac"; + reg = <0xf>; + }; + + dpmac16: dpmac@10 { + compatible = "fsl,qoriq-mc-dpmac"; + reg = <0x10>; + }; + + dpmac17: dpmac@11 { + compatible = "fsl,qoriq-mc-dpmac"; + reg = <0x11>; + }; + + dpmac18: dpmac@12 { + compatible = "fsl,qoriq-mc-dpmac"; + reg = <0x12>; + }; + }; + }; }; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h new file mode 100644 index 000000000000..e25f7fcd7997 --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h @@ -0,0 +1,629 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright 2017-2018 NXP + */ + +#ifndef __DTS_IMX8MM_PINFUNC_H +#define __DTS_IMX8MM_PINFUNC_H + +/* + * The pin function ID is a tuple of + * + */ + +#define MX8MM_IOMUXC_GPIO1_IO00_GPIO1_IO0 0x028 0x290 0x000 0x0 0x0 +#define MX8MM_IOMUXC_GPIO1_IO00_CCMSRCGPCMIX_ENET_PHY_REF_CLK_ROOT 0x028 0x290 0x4C0 0x1 0x0 +#define MX8MM_IOMUXC_GPIO1_IO00_ANAMIX_REF_CLK_32K 0x028 0x290 0x000 0x5 0x0 +#define MX8MM_IOMUXC_GPIO1_IO00_CCMSRCGPCMIX_EXT_CLK1 0x028 0x290 0x000 0x6 0x0 +#define MX8MM_IOMUXC_GPIO1_IO00_SJC_FAIL 0x028 0x290 0x000 0x7 0x0 +#define MX8MM_IOMUXC_GPIO1_IO01_GPIO1_IO1 0x02C 0x294 0x000 0x0 0x0 +#define MX8MM_IOMUXC_GPIO1_IO01_PWM1_OUT 0x02C 0x294 0x000 0x1 0x0 +#define MX8MM_IOMUXC_GPIO1_IO01_ANAMIX_REF_CLK_24M 0x02C 0x294 0x4BC 0x5 0x0 +#define MX8MM_IOMUXC_GPIO1_IO01_CCMSRCGPCMIX_EXT_CLK2 0x02C 0x294 0x000 0x6 0x0 +#define MX8MM_IOMUXC_GPIO1_IO01_SJC_ACTIVE 0x02C 0x294 0x000 0x7 0x0 +#define MX8MM_IOMUXC_GPIO1_IO02_GPIO1_IO2 0x030 0x298 0x000 0x0 0x0 +#define MX8MM_IOMUXC_GPIO1_IO02_WDOG1_WDOG_B 0x030 0x298 0x000 0x1 0x0 +#define MX8MM_IOMUXC_GPIO1_IO02_WDOG1_WDOG_ANY 0x030 0x298 0x000 0x5 0x0 +#define MX8MM_IOMUXC_GPIO1_IO02_SJC_DE_B 0x030 0x298 0x000 0x7 0x0 +#define MX8MM_IOMUXC_GPIO1_IO03_GPIO1_IO3 0x034 0x29C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_GPIO1_IO03_USDHC1_VSELECT 0x034 0x29C 0x000 0x1 0x0 +#define MX8MM_IOMUXC_GPIO1_IO03_SDMA1_EXT_EVENT0 0x034 0x29C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_GPIO1_IO03_ANAMIX_XTAL_OK 0x034 0x29C 0x000 0x6 0x0 +#define MX8MM_IOMUXC_GPIO1_IO03_SJC_DONE 0x034 0x29C 0x000 0x7 0x0 +#define MX8MM_IOMUXC_GPIO1_IO04_GPIO1_IO4 0x038 0x2A0 0x000 0x0 0x0 +#define MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x038 0x2A0 0x000 0x1 0x0 +#define MX8MM_IOMUXC_GPIO1_IO04_SDMA1_EXT_EVENT1 0x038 0x2A0 0x000 0x5 0x0 +#define MX8MM_IOMUXC_GPIO1_IO04_ANAMIX_XTAL_OK_LV 0x038 0x2A0 0x000 0x6 0x0 +#define MX8MM_IOMUXC_GPIO1_IO04_USDHC1_TEST_TRIG 0x038 0x2A0 0x000 0x7 0x0 +#define MX8MM_IOMUXC_GPIO1_IO05_GPIO1_IO5 0x03C 0x2A4 0x000 0x0 0x0 +#define MX8MM_IOMUXC_GPIO1_IO05_M4_NMI 0x03C 0x2A4 0x000 0x1 0x0 +#define MX8MM_IOMUXC_GPIO1_IO05_CCMSRCGPCMIX_PMIC_READY 0x03C 0x2A4 0x000 0x5 0x0 +#define MX8MM_IOMUXC_GPIO1_IO05_CCMSRCGPCMIX_INT_BOOT 0x03C 0x2A4 0x000 0x6 0x0 +#define MX8MM_IOMUXC_GPIO1_IO05_USDHC2_TEST_TRIG 0x03C 0x2A4 0x000 0x7 0x0 +#define MX8MM_IOMUXC_GPIO1_IO06_GPIO1_IO6 0x040 0x2A8 0x000 0x0 0x0 +#define MX8MM_IOMUXC_GPIO1_IO06_ENET1_MDC 0x040 0x2A8 0x000 0x1 0x0 +#define MX8MM_IOMUXC_GPIO1_IO06_USDHC1_CD_B 0x040 0x2A8 0x000 0x5 0x0 +#define MX8MM_IOMUXC_GPIO1_IO06_CCMSRCGPCMIX_EXT_CLK3 0x040 0x2A8 0x000 0x6 0x0 +#define MX8MM_IOMUXC_GPIO1_IO06_ECSPI1_TEST_TRIG 0x040 0x2A8 0x000 0x7 0x0 +#define MX8MM_IOMUXC_GPIO1_IO07_GPIO1_IO7 0x044 0x2AC 0x000 0x0 0x0 +#define MX8MM_IOMUXC_GPIO1_IO07_ENET1_MDIO 0x044 0x2AC 0x000 0x1 0x0 +#define MX8MM_IOMUXC_GPIO1_IO07_USDHC1_WP 0x044 0x2AC 0x000 0x5 0x0 +#define MX8MM_IOMUXC_GPIO1_IO07_CCMSRCGPCMIX_EXT_CLK4 0x044 0x2AC 0x000 0x6 0x0 +#define MX8MM_IOMUXC_GPIO1_IO07_ECSPI2_TEST_TRIG 0x044 0x2AC 0x000 0x7 0x0 +#define MX8MM_IOMUXC_GPIO1_IO08_GPIO1_IO8 0x048 0x2B0 0x000 0x0 0x0 +#define MX8MM_IOMUXC_GPIO1_IO08_ENET1_1588_EVENT0_IN 0x048 0x2B0 0x000 0x1 0x0 +#define MX8MM_IOMUXC_GPIO1_IO08_USDHC2_RESET_B 0x048 0x2B0 0x000 0x5 0x0 +#define MX8MM_IOMUXC_GPIO1_IO08_CCMSRCGPCMIX_WAIT 0x048 0x2B0 0x000 0x6 0x0 +#define MX8MM_IOMUXC_GPIO1_IO08_QSPI_TEST_TRIG 0x048 0x2B0 0x000 0x7 0x0 +#define MX8MM_IOMUXC_GPIO1_IO09_GPIO1_IO9 0x04C 0x2B4 0x000 0x0 0x0 +#define MX8MM_IOMUXC_GPIO1_IO09_ENET1_1588_EVENT0_OUT 0x04C 0x2B4 0x000 0x1 0x0 +#define MX8MM_IOMUXC_GPIO1_IO09_SDMA2_EXT_EVENT0 0x04C 0x2B4 0x000 0x5 0x0 +#define MX8MM_IOMUXC_GPIO1_IO09_CCMSRCGPCMIX_STOP 0x04C 0x2B4 0x000 0x6 0x0 +#define MX8MM_IOMUXC_GPIO1_IO09_RAWNAND_TEST_TRIG 0x04C 0x2B4 0x000 0x7 0x0 +#define MX8MM_IOMUXC_GPIO1_IO10_GPIO1_IO10 0x050 0x2B8 0x000 0x0 0x0 +#define MX8MM_IOMUXC_GPIO1_IO10_USB1_OTG_ID 0x050 0x2B8 0x000 0x1 0x0 +#define MX8MM_IOMUXC_GPIO1_IO10_OCOTP_CTRL_WRAPPER_FUSE_LATCHED 0x050 0x2B8 0x000 0x7 0x0 +#define MX8MM_IOMUXC_GPIO1_IO11_GPIO1_IO11 0x054 0x2BC 0x000 0x0 0x0 +#define MX8MM_IOMUXC_GPIO1_IO11_USB2_OTG_ID 0x054 0x2BC 0x000 0x1 0x0 +#define MX8MM_IOMUXC_GPIO1_IO11_CCMSRCGPCMIX_PMIC_READY 0x054 0x2BC 0x4BC 0x5 0x1 +#define MX8MM_IOMUXC_GPIO1_IO11_CCMSRCGPCMIX_OUT0 0x054 0x2BC 0x000 0x6 0x0 +#define MX8MM_IOMUXC_GPIO1_IO11_CAAM_WRAPPER_RNG_OSC_OBS 0x054 0x2BC 0x000 0x7 0x0 +#define MX8MM_IOMUXC_GPIO1_IO12_GPIO1_IO12 0x058 0x2C0 0x000 0x0 0x0 +#define MX8MM_IOMUXC_GPIO1_IO12_USB1_OTG_PWR 0x058 0x2C0 0x000 0x1 0x0 +#define MX8MM_IOMUXC_GPIO1_IO12_SDMA2_EXT_EVENT1 0x058 0x2C0 0x000 0x5 0x0 +#define MX8MM_IOMUXC_GPIO1_IO12_CCMSRCGPCMIX_OUT1 0x058 0x2C0 0x000 0x6 0x0 +#define MX8MM_IOMUXC_GPIO1_IO12_CSU_CSU_ALARM_AUT0 0x058 0x2C0 0x000 0x7 0x0 +#define MX8MM_IOMUXC_GPIO1_IO13_GPIO1_IO13 0x05C 0x2C4 0x000 0x0 0x0 +#define MX8MM_IOMUXC_GPIO1_IO13_USB1_OTG_OC 0x05C 0x2C4 0x000 0x1 0x0 +#define MX8MM_IOMUXC_GPIO1_IO13_PWM2_OUT 0x05C 0x2C4 0x000 0x5 0x0 +#define MX8MM_IOMUXC_GPIO1_IO13_CCMSRCGPCMIX_OUT2 0x05C 0x2C4 0x000 0x6 0x0 +#define MX8MM_IOMUXC_GPIO1_IO13_CSU_CSU_ALARM_AUT1 0x05C 0x2C4 0x000 0x7 0x0 +#define MX8MM_IOMUXC_GPIO1_IO14_GPIO1_IO14 0x060 0x2C8 0x000 0x0 0x0 +#define MX8MM_IOMUXC_GPIO1_IO14_USB2_OTG_PWR 0x060 0x2C8 0x000 0x1 0x0 +#define MX8MM_IOMUXC_GPIO1_IO14_PWM3_OUT 0x060 0x2C8 0x000 0x5 0x0 +#define MX8MM_IOMUXC_GPIO1_IO14_CCMSRCGPCMIX_CLKO1 0x060 0x2C8 0x000 0x6 0x0 +#define MX8MM_IOMUXC_GPIO1_IO14_CSU_CSU_ALARM_AUT2 0x060 0x2C8 0x000 0x7 0x0 +#define MX8MM_IOMUXC_GPIO1_IO15_GPIO1_IO15 0x064 0x2CC 0x000 0x0 0x0 +#define MX8MM_IOMUXC_GPIO1_IO15_USB2_OTG_OC 0x064 0x2CC 0x000 0x1 0x0 +#define MX8MM_IOMUXC_GPIO1_IO15_PWM4_OUT 0x064 0x2CC 0x000 0x5 0x0 +#define MX8MM_IOMUXC_GPIO1_IO15_CCMSRCGPCMIX_CLKO2 0x064 0x2CC 0x000 0x6 0x0 +#define MX8MM_IOMUXC_GPIO1_IO15_CSU_CSU_INT_DEB 0x064 0x2CC 0x000 0x7 0x0 +#define MX8MM_IOMUXC_ENET_MDC_ENET1_MDC 0x068 0x2D0 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ENET_MDC_GPIO1_IO16 0x068 0x2D0 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ENET_MDIO_ENET1_MDIO 0x06C 0x2D4 0x4C0 0x0 0x1 +#define MX8MM_IOMUXC_ENET_MDIO_GPIO1_IO17 0x06C 0x2D4 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ENET_TD3_ENET1_RGMII_TD3 0x070 0x2D8 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ENET_TD3_GPIO1_IO18 0x070 0x2D8 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ENET_TD2_ENET1_RGMII_TD2 0x074 0x2DC 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ENET_TD2_ENET1_TX_CLK 0x074 0x2DC 0x000 0x1 0x0 +#define MX8MM_IOMUXC_ENET_TD2_GPIO1_IO19 0x074 0x2DC 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ENET_TD1_ENET1_RGMII_TD1 0x078 0x2E0 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ENET_TD1_GPIO1_IO20 0x078 0x2E0 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ENET_TD0_ENET1_RGMII_TD0 0x07C 0x2E4 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ENET_TD0_GPIO1_IO21 0x07C 0x2E4 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ENET_TX_CTL_ENET1_RGMII_TX_CTL 0x080 0x2E8 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ENET_TX_CTL_GPIO1_IO22 0x080 0x2E8 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ENET_TXC_ENET1_RGMII_TXC 0x084 0x2EC 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ENET_TXC_ENET1_TX_ER 0x084 0x2EC 0x000 0x1 0x0 +#define MX8MM_IOMUXC_ENET_TXC_GPIO1_IO23 0x084 0x2EC 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ENET_RX_CTL_ENET1_RGMII_RX_CTL 0x088 0x2F0 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ENET_RX_CTL_GPIO1_IO24 0x088 0x2F0 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ENET_RXC_ENET1_RGMII_RXC 0x08C 0x2F4 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ENET_RXC_ENET1_RX_ER 0x08C 0x2F4 0x000 0x1 0x0 +#define MX8MM_IOMUXC_ENET_RXC_GPIO1_IO25 0x08C 0x2F4 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ENET_RD0_ENET1_RGMII_RD0 0x090 0x2F8 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ENET_RD0_GPIO1_IO26 0x090 0x2F8 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ENET_RD1_ENET1_RGMII_RD1 0x094 0x2FC 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ENET_RD1_GPIO1_IO27 0x094 0x2FC 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ENET_RD2_ENET1_RGMII_RD2 0x098 0x300 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ENET_RD2_GPIO1_IO28 0x098 0x300 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ENET_RD3_ENET1_RGMII_RD3 0x09C 0x304 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ENET_RD3_GPIO1_IO29 0x09C 0x304 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD1_CLK_USDHC1_CLK 0x0A0 0x308 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD1_CLK_GPIO2_IO0 0x0A0 0x308 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD1_CMD_USDHC1_CMD 0x0A4 0x30C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD1_CMD_GPIO2_IO1 0x0A4 0x30C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x0A8 0x310 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x31 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x0AC 0x314 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD1_DATA1_GPIO2_IO3 0x0AC 0x314 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x0B0 0x318 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD1_DATA2_GPIO2_IO4 0x0B0 0x318 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD1_DATA3_USDHC1_DATA3 0x0B4 0x31C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD1_DATA3_GPIO2_IO5 0x0B4 0x31C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD1_DATA4_USDHC1_DATA4 0x0B8 0x320 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD1_DATA4_GPIO2_IO6 0x0B8 0x320 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD1_DATA5_USDHC1_DATA5 0x0BC 0x324 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD1_DATA5_GPIO2_IO7 0x0BC 0x324 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD1_DATA6_USDHC1_DATA6 0x0C0 0x328 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD1_DATA6_GPIO2_IO8 0x0C0 0x328 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD1_DATA7_USDHC1_DATA7 0x0C4 0x32C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD1_DATA7_GPIO2_IO9 0x0C4 0x32C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0x0C8 0x330 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD1_RESET_B_GPIO2_IO10 0x0C8 0x330 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x0CC 0x334 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD1_STROBE_GPIO2_IO11 0x0CC 0x334 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD2_CD_B_USDHC2_CD_B 0x0D0 0x338 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x0D0 0x338 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x0D4 0x33C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD2_CLK_GPIO2_IO13 0x0D4 0x33C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD2_CLK_CCMSRCGPCMIX_OBSERVE0 0x0D4 0x33C 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SD2_CLK_OBSERVE_MUX_OUT0 0x0D4 0x33C 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x0D8 0x340 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD2_CMD_GPIO2_IO14 0x0D8 0x340 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD2_CMD_CCMSRCGPCMIX_OBSERVE1 0x0D8 0x340 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SD2_CMD_OBSERVE_MUX_OUT1 0x0D8 0x340 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x0DC 0x344 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD2_DATA0_GPIO2_IO15 0x0DC 0x344 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD2_DATA0_CCMSRCGPCMIX_OBSERVE2 0x0DC 0x344 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SD2_DATA0_OBSERVE_MUX_OUT2 0x0DC 0x344 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x0E0 0x348 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD2_DATA1_GPIO2_IO16 0x0E0 0x348 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD2_DATA1_CCMSRCGPCMIX_WAIT 0x0E0 0x348 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SD2_DATA1_OBSERVE_MUX_OUT3 0x0E0 0x348 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x0E4 0x34C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD2_DATA2_GPIO2_IO17 0x0E4 0x34C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD2_DATA2_CCMSRCGPCMIX_STOP 0x0E4 0x34C 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SD2_DATA2_OBSERVE_MUX_OUT4 0x0E4 0x34C 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x0E8 0x350 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD2_DATA3_GPIO2_IO18 0x0E8 0x350 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD2_DATA3_CCMSRCGPCMIX_EARLY_RESET 0x0E8 0x350 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SD2_RESET_B_USDHC2_RESET_B 0x0EC 0x354 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD2_RESET_B_GPIO2_IO19 0x0EC 0x354 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD2_RESET_B_CCMSRCGPCMIX_SYSTEM_RESET 0x0EC 0x354 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SD2_WP_USDHC2_WP 0x0F0 0x358 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SD2_WP_GPIO2_IO20 0x0F0 0x358 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SD2_WP_SIM_M_HMASTLOCK 0x0F0 0x358 0x000 0x7 0x0 +#define MX8MM_IOMUXC_NAND_ALE_RAWNAND_ALE 0x0F4 0x35C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_NAND_ALE_QSPI_A_SCLK 0x0F4 0x35C 0x000 0x1 0x0 +#define MX8MM_IOMUXC_NAND_ALE_GPIO3_IO0 0x0F4 0x35C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_NAND_ALE_SIM_M_HPROT0 0x0F4 0x35C 0x000 0x7 0x0 +#define MX8MM_IOMUXC_NAND_CE0_B_RAWNAND_CE0_B 0x0F8 0x360 0x000 0x0 0x0 +#define MX8MM_IOMUXC_NAND_CE0_B_QSPI_A_SS0_B 0x0F8 0x360 0x000 0x1 0x0 +#define MX8MM_IOMUXC_NAND_CE0_B_GPIO3_IO1 0x0F8 0x360 0x000 0x5 0x0 +#define MX8MM_IOMUXC_NAND_CE0_B_SIM_M_HPROT1 0x0F8 0x360 0x000 0x7 0x0 +#define MX8MM_IOMUXC_NAND_CE1_B_RAWNAND_CE1_B 0x0FC 0x364 0x000 0x0 0x0 +#define MX8MM_IOMUXC_NAND_CE1_B_QSPI_A_SS1_B 0x0FC 0x364 0x000 0x1 0x0 +#define MX8MM_IOMUXC_NAND_CE1_B_USDHC3_STROBE 0x0FC 0x364 0x000 0x2 0x0 +#define MX8MM_IOMUXC_NAND_CE1_B_GPIO3_IO2 0x0FC 0x364 0x000 0x5 0x0 +#define MX8MM_IOMUXC_NAND_CE1_B_SIM_M_HPROT2 0x0FC 0x364 0x000 0x7 0x0 +#define MX8MM_IOMUXC_NAND_CE2_B_RAWNAND_CE2_B 0x100 0x368 0x000 0x0 0x0 +#define MX8MM_IOMUXC_NAND_CE2_B_QSPI_B_SS0_B 0x100 0x368 0x000 0x1 0x0 +#define MX8MM_IOMUXC_NAND_CE2_B_USDHC3_DATA5 0x100 0x368 0x000 0x2 0x0 +#define MX8MM_IOMUXC_NAND_CE2_B_GPIO3_IO3 0x100 0x368 0x000 0x5 0x0 +#define MX8MM_IOMUXC_NAND_CE2_B_SIM_M_HPROT3 0x100 0x368 0x000 0x7 0x0 +#define MX8MM_IOMUXC_NAND_CE3_B_RAWNAND_CE3_B 0x104 0x36C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_NAND_CE3_B_QSPI_B_SS1_B 0x104 0x36C 0x000 0x1 0x0 +#define MX8MM_IOMUXC_NAND_CE3_B_USDHC3_DATA6 0x104 0x36C 0x000 0x2 0x0 +#define MX8MM_IOMUXC_NAND_CE3_B_GPIO3_IO4 0x104 0x36C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_NAND_CE3_B_SIM_M_HADDR0 0x104 0x36C 0x000 0x7 0x0 +#define MX8MM_IOMUXC_NAND_CLE_RAWNAND_CLE 0x108 0x370 0x000 0x0 0x0 +#define MX8MM_IOMUXC_NAND_CLE_QSPI_B_SCLK 0x108 0x370 0x000 0x1 0x0 +#define MX8MM_IOMUXC_NAND_CLE_USDHC3_DATA7 0x108 0x370 0x000 0x2 0x0 +#define MX8MM_IOMUXC_NAND_CLE_GPIO3_IO5 0x108 0x370 0x000 0x5 0x0 +#define MX8MM_IOMUXC_NAND_CLE_SIM_M_HADDR1 0x108 0x370 0x000 0x7 0x0 +#define MX8MM_IOMUXC_NAND_DATA00_RAWNAND_DATA00 0x10C 0x374 0x000 0x0 0x0 +#define MX8MM_IOMUXC_NAND_DATA00_QSPI_A_DATA0 0x10C 0x374 0x000 0x1 0x0 +#define MX8MM_IOMUXC_NAND_DATA00_GPIO3_IO6 0x10C 0x374 0x000 0x5 0x0 +#define MX8MM_IOMUXC_NAND_DATA00_SIM_M_HADDR2 0x10C 0x374 0x000 0x7 0x0 +#define MX8MM_IOMUXC_NAND_DATA01_RAWNAND_DATA01 0x110 0x378 0x000 0x0 0x0 +#define MX8MM_IOMUXC_NAND_DATA01_QSPI_A_DATA1 0x110 0x378 0x000 0x1 0x0 +#define MX8MM_IOMUXC_NAND_DATA01_GPIO3_IO7 0x110 0x378 0x000 0x5 0x0 +#define MX8MM_IOMUXC_NAND_DATA01_SIM_M_HADDR3 0x110 0x378 0x000 0x7 0x0 +#define MX8MM_IOMUXC_NAND_DATA02_RAWNAND_DATA02 0x114 0x37C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_NAND_DATA02_QSPI_A_DATA2 0x114 0x37C 0x000 0x1 0x0 +#define MX8MM_IOMUXC_NAND_DATA02_GPIO3_IO8 0x114 0x37C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_NAND_DATA02_SIM_M_HADDR4 0x114 0x37C 0x000 0x7 0x0 +#define MX8MM_IOMUXC_NAND_DATA03_RAWNAND_DATA03 0x118 0x380 0x000 0x0 0x0 +#define MX8MM_IOMUXC_NAND_DATA03_QSPI_A_DATA3 0x118 0x380 0x000 0x1 0x0 +#define MX8MM_IOMUXC_NAND_DATA03_GPIO3_IO9 0x118 0x380 0x000 0x5 0x0 +#define MX8MM_IOMUXC_NAND_DATA03_SIM_M_HADDR5 0x118 0x380 0x000 0x7 0x0 +#define MX8MM_IOMUXC_NAND_DATA04_RAWNAND_DATA04 0x11C 0x384 0x000 0x0 0x0 +#define MX8MM_IOMUXC_NAND_DATA04_QSPI_B_DATA0 0x11C 0x384 0x000 0x1 0x0 +#define MX8MM_IOMUXC_NAND_DATA04_USDHC3_DATA0 0x11C 0x384 0x000 0x2 0x0 +#define MX8MM_IOMUXC_NAND_DATA04_GPIO3_IO10 0x11C 0x384 0x000 0x5 0x0 +#define MX8MM_IOMUXC_NAND_DATA04_SIM_M_HADDR6 0x11C 0x384 0x000 0x7 0x0 +#define MX8MM_IOMUXC_NAND_DATA05_RAWNAND_DATA05 0x120 0x388 0x000 0x0 0x0 +#define MX8MM_IOMUXC_NAND_DATA05_QSPI_B_DATA1 0x120 0x388 0x000 0x1 0x0 +#define MX8MM_IOMUXC_NAND_DATA05_USDHC3_DATA1 0x120 0x388 0x000 0x2 0x0 +#define MX8MM_IOMUXC_NAND_DATA05_GPIO3_IO11 0x120 0x388 0x000 0x5 0x0 +#define MX8MM_IOMUXC_NAND_DATA05_SIM_M_HADDR7 0x120 0x388 0x000 0x7 0x0 +#define MX8MM_IOMUXC_NAND_DATA06_RAWNAND_DATA06 0x124 0x38C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_NAND_DATA06_QSPI_B_DATA2 0x124 0x38C 0x000 0x1 0x0 +#define MX8MM_IOMUXC_NAND_DATA06_USDHC3_DATA2 0x124 0x38C 0x000 0x2 0x0 +#define MX8MM_IOMUXC_NAND_DATA06_GPIO3_IO12 0x124 0x38C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_NAND_DATA06_SIM_M_HADDR8 0x124 0x38C 0x000 0x7 0x0 +#define MX8MM_IOMUXC_NAND_DATA07_RAWNAND_DATA07 0x128 0x390 0x000 0x0 0x0 +#define MX8MM_IOMUXC_NAND_DATA07_QSPI_B_DATA3 0x128 0x390 0x000 0x1 0x0 +#define MX8MM_IOMUXC_NAND_DATA07_USDHC3_DATA3 0x128 0x390 0x000 0x2 0x0 +#define MX8MM_IOMUXC_NAND_DATA07_GPIO3_IO13 0x128 0x390 0x000 0x5 0x0 +#define MX8MM_IOMUXC_NAND_DATA07_SIM_M_HADDR9 0x128 0x390 0x000 0x7 0x0 +#define MX8MM_IOMUXC_NAND_DQS_RAWNAND_DQS 0x12C 0x394 0x000 0x0 0x0 +#define MX8MM_IOMUXC_NAND_DQS_QSPI_A_DQS 0x12C 0x394 0x000 0x1 0x0 +#define MX8MM_IOMUXC_NAND_DQS_GPIO3_IO14 0x12C 0x394 0x000 0x5 0x0 +#define MX8MM_IOMUXC_NAND_DQS_SIM_M_HADDR10 0x12C 0x394 0x000 0x7 0x0 +#define MX8MM_IOMUXC_NAND_RE_B_RAWNAND_RE_B 0x130 0x398 0x000 0x0 0x0 +#define MX8MM_IOMUXC_NAND_RE_B_QSPI_B_DQS 0x130 0x398 0x000 0x1 0x0 +#define MX8MM_IOMUXC_NAND_RE_B_USDHC3_DATA4 0x130 0x398 0x000 0x2 0x0 +#define MX8MM_IOMUXC_NAND_RE_B_GPIO3_IO15 0x130 0x398 0x000 0x5 0x0 +#define MX8MM_IOMUXC_NAND_RE_B_SIM_M_HADDR11 0x130 0x398 0x000 0x7 0x0 +#define MX8MM_IOMUXC_NAND_READY_B_RAWNAND_READY_B 0x134 0x39C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_NAND_READY_B_GPIO3_IO16 0x134 0x39C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_NAND_READY_B_SIM_M_HADDR12 0x134 0x39C 0x000 0x7 0x0 +#define MX8MM_IOMUXC_NAND_WE_B_RAWNAND_WE_B 0x138 0x3A0 0x000 0x0 0x0 +#define MX8MM_IOMUXC_NAND_WE_B_USDHC3_CLK 0x138 0x3A0 0x000 0x12 0x0 +#define MX8MM_IOMUXC_NAND_WE_B_GPIO3_IO17 0x138 0x3A0 0x000 0x5 0x0 +#define MX8MM_IOMUXC_NAND_WE_B_SIM_M_HADDR13 0x138 0x3A0 0x000 0x7 0x0 +#define MX8MM_IOMUXC_NAND_WP_B_RAWNAND_WP_B 0x13C 0x3A4 0x000 0x0 0x0 +#define MX8MM_IOMUXC_NAND_WP_B_USDHC3_CMD 0x13C 0x3A4 0x000 0x2 0x0 +#define MX8MM_IOMUXC_NAND_WP_B_GPIO3_IO18 0x13C 0x3A4 0x000 0x5 0x0 +#define MX8MM_IOMUXC_NAND_WP_B_SIM_M_HADDR14 0x13C 0x3A4 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI5_RXFS_SAI5_RX_SYNC 0x140 0x3A8 0x4E4 0x0 0x0 +#define MX8MM_IOMUXC_SAI5_RXFS_SAI1_TX_DATA0 0x140 0x3A8 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI5_RXFS_GPIO3_IO19 0x140 0x3A8 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI5_RXC_SAI5_RX_BCLK 0x144 0x3AC 0x4D0 0x0 0x0 +#define MX8MM_IOMUXC_SAI5_RXC_SAI1_TX_DATA1 0x144 0x3AC 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI5_RXC_PDM_CLK 0x144 0x3AC 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI5_RXC_GPIO3_IO20 0x144 0x3AC 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI5_RXD0_SAI5_RX_DATA0 0x148 0x3B0 0x4D4 0x0 0x0 +#define MX8MM_IOMUXC_SAI5_RXD0_SAI1_TX_DATA2 0x148 0x3B0 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI5_RXD0_PDM_DATA0 0x148 0x3B0 0x534 0x4 0x0 +#define MX8MM_IOMUXC_SAI5_RXD0_GPIO3_IO21 0x148 0x3B0 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI5_RXD1_SAI5_RX_DATA1 0x14C 0x3B4 0x4D8 0x0 0x0 +#define MX8MM_IOMUXC_SAI5_RXD1_SAI1_TX_DATA3 0x14C 0x3B4 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI5_RXD1_SAI1_TX_SYNC 0x14C 0x3B4 0x4CC 0x2 0x0 +#define MX8MM_IOMUXC_SAI5_RXD1_SAI5_TX_SYNC 0x14C 0x3B4 0x4EC 0x3 0x0 +#define MX8MM_IOMUXC_SAI5_RXD1_PDM_DATA1 0x14C 0x3B4 0x538 0x4 0x0 +#define MX8MM_IOMUXC_SAI5_RXD1_GPIO3_IO22 0x14C 0x3B4 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI5_RXD2_SAI5_RX_DATA2 0x150 0x3B8 0x4DC 0x0 0x0 +#define MX8MM_IOMUXC_SAI5_RXD2_SAI1_TX_DATA4 0x150 0x3B8 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI5_RXD2_SAI1_TX_SYNC 0x150 0x3B8 0x4CC 0x2 0x1 +#define MX8MM_IOMUXC_SAI5_RXD2_SAI5_TX_BCLK 0x150 0x3B8 0x4E8 0x3 0x0 +#define MX8MM_IOMUXC_SAI5_RXD2_PDM_DATA2 0x150 0x3B8 0x53c 0x4 0x0 +#define MX8MM_IOMUXC_SAI5_RXD2_GPIO3_IO23 0x150 0x3B8 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI5_RXD3_SAI5_RX_DATA3 0x154 0x3BC 0x4E0 0x0 0x0 +#define MX8MM_IOMUXC_SAI5_RXD3_SAI1_TX_DATA5 0x154 0x3BC 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI5_RXD3_SAI1_TX_SYNC 0x154 0x3BC 0x4CC 0x2 0x2 +#define MX8MM_IOMUXC_SAI5_RXD3_SAI5_TX_DATA0 0x154 0x3BC 0x000 0x3 0x0 +#define MX8MM_IOMUXC_SAI5_RXD3_PDM_DATA3 0x154 0x3BC 0x540 0x4 0x0 +#define MX8MM_IOMUXC_SAI5_RXD3_GPIO3_IO24 0x154 0x3BC 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI5_MCLK_SAI5_MCLK 0x158 0x3C0 0x52C 0x0 0x0 +#define MX8MM_IOMUXC_SAI5_MCLK_SAI1_TX_BCLK 0x158 0x3C0 0x4C8 0x1 0x0 +#define MX8MM_IOMUXC_SAI5_MCLK_SAI4_MCLK 0x158 0x3C0 0x000 0x2 0x0 +#define MX8MM_IOMUXC_SAI5_MCLK_GPIO3_IO25 0x158 0x3C0 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI5_MCLK_CCMSRCGPCMIX_TESTER_ACK 0x158 0x3C0 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SAI1_RXFS_SAI1_RX_SYNC 0x15C 0x3C4 0x4C4 0x0 0x0 +#define MX8MM_IOMUXC_SAI1_RXFS_SAI5_RX_SYNC 0x15C 0x3C4 0x4E4 0x1 0x1 +#define MX8MM_IOMUXC_SAI1_RXFS_CORESIGHT_TRACE_CLK 0x15C 0x3C4 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_RXFS_GPIO4_IO0 0x15C 0x3C4 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_RXFS_SIM_M_HADDR15 0x15C 0x3C4 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_RXC_SAI1_RX_BCLK 0x160 0x3C8 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI1_RXC_SAI5_RX_BCLK 0x160 0x3C8 0x4D0 0x1 0x1 +#define MX8MM_IOMUXC_SAI1_RXC_CORESIGHT_TRACE_CTL 0x160 0x3C8 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_RXC_GPIO4_IO1 0x160 0x3C8 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_RXC_SIM_M_HADDR16 0x160 0x3C8 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_RXD0_SAI1_RX_DATA0 0x164 0x3CC 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI1_RXD0_SAI5_RX_DATA0 0x164 0x3CC 0x4D4 0x1 0x1 +#define MX8MM_IOMUXC_SAI1_RXD0_PDM_DATA0 0x164 0x3CC 0x534 0x3 0x1 +#define MX8MM_IOMUXC_SAI1_RXD0_CORESIGHT_TRACE0 0x164 0x3CC 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_RXD0_GPIO4_IO2 0x164 0x3CC 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_RXD0_CCMSRCGPCMIX_BOOT_CFG0 0x164 0x3CC 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SAI1_RXD0_SIM_M_HADDR17 0x164 0x3CC 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_RXD1_SAI1_RX_DATA1 0x168 0x3D0 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI1_RXD1_SAI5_RX_DATA1 0x168 0x3D0 0x4D8 0x1 0x1 +#define MX8MM_IOMUXC_SAI1_RXD1_PDM_DATA1 0x168 0x3D0 0x538 0x3 0x1 +#define MX8MM_IOMUXC_SAI1_RXD1_CORESIGHT_TRACE1 0x168 0x3D0 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_RXD1_GPIO4_IO3 0x168 0x3D0 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_RXD1_CCMSRCGPCMIX_BOOT_CFG1 0x168 0x3D0 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SAI1_RXD1_SIM_M_HADDR18 0x168 0x3D0 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_RXD2_SAI1_RX_DATA2 0x16C 0x3D4 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI1_RXD2_SAI5_RX_DATA2 0x16C 0x3D4 0x4DC 0x1 0x1 +#define MX8MM_IOMUXC_SAI1_RXD2_PDM_DATA2 0x16C 0x3D4 0x53C 0x3 0x1 +#define MX8MM_IOMUXC_SAI1_RXD2_CORESIGHT_TRACE2 0x16C 0x3D4 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_RXD2_GPIO4_IO4 0x16C 0x3D4 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_RXD2_CCMSRCGPCMIX_BOOT_CFG2 0x16C 0x3D4 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SAI1_RXD2_SIM_M_HADDR19 0x16C 0x3D4 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_RXD3_SAI1_RX_DATA3 0x170 0x3D8 0x4E0 0x0 0x1 +#define MX8MM_IOMUXC_SAI1_RXD3_SAI5_RX_DATA3 0x170 0x3D8 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI1_RXD3_PDM_DATA3 0x170 0x3D8 0x540 0x3 0x1 +#define MX8MM_IOMUXC_SAI1_RXD3_CORESIGHT_TRACE3 0x170 0x3D8 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_RXD3_GPIO4_IO5 0x170 0x3D8 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_RXD3_CCMSRCGPCMIX_BOOT_CFG3 0x170 0x3D8 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SAI1_RXD3_SIM_M_HADDR20 0x170 0x3D8 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_RXD4_SAI1_RX_DATA4 0x174 0x3DC 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI1_RXD4_SAI6_TX_BCLK 0x174 0x3DC 0x51C 0x1 0x0 +#define MX8MM_IOMUXC_SAI1_RXD4_SAI6_RX_BCLK 0x174 0x3DC 0x510 0x2 0x0 +#define MX8MM_IOMUXC_SAI1_RXD4_CORESIGHT_TRACE4 0x174 0x3DC 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_RXD4_GPIO4_IO6 0x174 0x3DC 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_RXD4_CCMSRCGPCMIX_BOOT_CFG4 0x174 0x3DC 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SAI1_RXD4_SIM_M_HADDR21 0x174 0x3DC 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_RXD5_SAI1_RX_DATA5 0x178 0x3E0 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI1_RXD5_SAI6_TX_DATA0 0x178 0x3E0 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI1_RXD5_SAI6_RX_DATA0 0x178 0x3E0 0x514 0x2 0x0 +#define MX8MM_IOMUXC_SAI1_RXD5_SAI1_RX_SYNC 0x178 0x3E0 0x4C4 0x3 0x1 +#define MX8MM_IOMUXC_SAI1_RXD5_CORESIGHT_TRACE5 0x178 0x3E0 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_RXD5_GPIO4_IO7 0x178 0x3E0 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_RXD5_CCMSRCGPCMIX_BOOT_CFG5 0x178 0x3E0 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SAI1_RXD5_SIM_M_HADDR22 0x178 0x3E0 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_RXD6_SAI1_RX_DATA6 0x17C 0x3E4 0x520 0x0 0x0 +#define MX8MM_IOMUXC_SAI1_RXD6_SAI6_TX_SYNC 0x17C 0x3E4 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI1_RXD6_SAI6_RX_SYNC 0x17C 0x3E4 0x518 0x2 0x0 +#define MX8MM_IOMUXC_SAI1_RXD6_CORESIGHT_TRACE6 0x17C 0x3E4 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_RXD6_GPIO4_IO8 0x17C 0x3E4 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_RXD6_CCMSRCGPCMIX_BOOT_CFG6 0x17C 0x3E4 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SAI1_RXD6_SIM_M_HADDR23 0x17C 0x3E4 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_RXD7_SAI1_RX_DATA7 0x180 0x3E8 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI1_RXD7_SAI6_MCLK 0x180 0x3E8 0x530 0x1 0x0 +#define MX8MM_IOMUXC_SAI1_RXD7_SAI1_TX_SYNC 0x180 0x3E8 0x4CC 0x2 0x4 +#define MX8MM_IOMUXC_SAI1_RXD7_SAI1_TX_DATA4 0x180 0x3E8 0x000 0x3 0x0 +#define MX8MM_IOMUXC_SAI1_RXD7_CORESIGHT_TRACE7 0x180 0x3E8 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_RXD7_GPIO4_IO9 0x180 0x3E8 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_RXD7_CCMSRCGPCMIX_BOOT_CFG7 0x180 0x3E8 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SAI1_RXD7_SIM_M_HADDR24 0x180 0x3E8 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_TXFS_SAI1_TX_SYNC 0x184 0x3EC 0x4CC 0x0 0x3 +#define MX8MM_IOMUXC_SAI1_TXFS_SAI5_TX_SYNC 0x184 0x3EC 0x4EC 0x1 0x1 +#define MX8MM_IOMUXC_SAI1_TXFS_CORESIGHT_EVENTO 0x184 0x3EC 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_TXFS_GPIO4_IO10 0x184 0x3EC 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_TXFS_SIM_M_HADDR25 0x184 0x3EC 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_TXC_SAI1_TX_BCLK 0x188 0x3F0 0x4C8 0x0 0x1 +#define MX8MM_IOMUXC_SAI1_TXC_SAI5_TX_BCLK 0x188 0x3F0 0x4E8 0x1 0x1 +#define MX8MM_IOMUXC_SAI1_TXC_CORESIGHT_EVENTI 0x188 0x3F0 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_TXC_GPIO4_IO11 0x188 0x3F0 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_TXC_SIM_M_HADDR26 0x188 0x3F0 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_TXD0_SAI1_TX_DATA0 0x18C 0x3F4 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI1_TXD0_SAI5_TX_DATA0 0x18C 0x3F4 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI1_TXD0_CORESIGHT_TRACE8 0x18C 0x3F4 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_TXD0_GPIO4_IO12 0x18C 0x3F4 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_TXD0_CCMSRCGPCMIX_BOOT_CFG8 0x18C 0x3F4 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SAI1_TXD0_SIM_M_HADDR27 0x18C 0x3F4 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_TXD1_SAI1_TX_DATA1 0x190 0x3F8 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI1_TXD1_SAI5_TX_DATA1 0x190 0x3F8 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI1_TXD1_CORESIGHT_TRACE9 0x190 0x3F8 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_TXD1_GPIO4_IO13 0x190 0x3F8 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_TXD1_CCMSRCGPCMIX_BOOT_CFG9 0x190 0x3F8 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SAI1_TXD1_SIM_M_HADDR28 0x190 0x3F8 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_TXD2_SAI1_TX_DATA2 0x194 0x3FC 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI1_TXD2_SAI5_TX_DATA2 0x194 0x3FC 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI1_TXD2_CORESIGHT_TRACE10 0x194 0x3FC 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_TXD2_GPIO4_IO14 0x194 0x3FC 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_TXD2_CCMSRCGPCMIX_BOOT_CFG10 0x194 0x3FC 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SAI1_TXD2_SIM_M_HADDR29 0x194 0x3FC 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_TXD3_SAI1_TX_DATA3 0x198 0x400 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI1_TXD3_SAI5_TX_DATA3 0x198 0x400 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI1_TXD3_CORESIGHT_TRACE11 0x198 0x400 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_TXD3_GPIO4_IO15 0x198 0x400 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_TXD3_CCMSRCGPCMIX_BOOT_CFG11 0x198 0x400 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SAI1_TXD3_SIM_M_HADDR30 0x198 0x400 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_TXD4_SAI1_TX_DATA4 0x19C 0x404 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI1_TXD4_SAI6_RX_BCLK 0x19C 0x404 0x510 0x1 0x1 +#define MX8MM_IOMUXC_SAI1_TXD4_SAI6_TX_BCLK 0x19C 0x404 0x51C 0x2 0x1 +#define MX8MM_IOMUXC_SAI1_TXD4_CORESIGHT_TRACE12 0x19C 0x404 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_TXD4_GPIO4_IO16 0x19C 0x404 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_TXD4_CCMSRCGPCMIX_BOOT_CFG12 0x19C 0x404 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SAI1_TXD4_SIM_M_HADDR31 0x19C 0x404 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_TXD5_SAI1_TX_DATA5 0x1A0 0x408 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI1_TXD5_SAI6_RX_DATA0 0x1A0 0x408 0x514 0x1 0x1 +#define MX8MM_IOMUXC_SAI1_TXD5_SAI6_TX_DATA0 0x1A0 0x408 0x000 0x2 0x0 +#define MX8MM_IOMUXC_SAI1_TXD5_CORESIGHT_TRACE13 0x1A0 0x408 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_TXD5_GPIO4_IO17 0x1A0 0x408 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_TXD5_CCMSRCGPCMIX_BOOT_CFG13 0x1A0 0x408 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SAI1_TXD5_SIM_M_HBURST0 0x1A0 0x408 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_TXD6_SAI1_TX_DATA6 0x1A4 0x40C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI1_TXD6_SAI6_RX_SYNC 0x1A4 0x40C 0x518 0x1 0x1 +#define MX8MM_IOMUXC_SAI1_TXD6_SAI6_TX_SYNC 0x1A4 0x40C 0x520 0x2 0x1 +#define MX8MM_IOMUXC_SAI1_TXD6_CORESIGHT_TRACE14 0x1A4 0x40C 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_TXD6_GPIO4_IO18 0x1A4 0x40C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_TXD6_CCMSRCGPCMIX_BOOT_CFG14 0x1A4 0x40C 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SAI1_TXD6_SIM_M_HBURST1 0x1A4 0x40C 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_TXD7_SAI1_TX_DATA7 0x1A8 0x410 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI1_TXD7_SAI6_MCLK 0x1A8 0x410 0x530 0x1 0x1 +#define MX8MM_IOMUXC_SAI1_TXD7_PDM_CLK 0x1A8 0x410 0x000 0x3 0x0 +#define MX8MM_IOMUXC_SAI1_TXD7_CORESIGHT_TRACE15 0x1A8 0x410 0x000 0x4 0x0 +#define MX8MM_IOMUXC_SAI1_TXD7_GPIO4_IO19 0x1A8 0x410 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_TXD7_CCMSRCGPCMIX_BOOT_CFG15 0x1A8 0x410 0x000 0x6 0x0 +#define MX8MM_IOMUXC_SAI1_TXD7_SIM_M_HBURST2 0x1A8 0x410 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI1_MCLK_SAI1_MCLK 0x1AC 0x414 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI1_MCLK_SAI5_MCLK 0x1AC 0x414 0x52C 0x1 0x1 +#define MX8MM_IOMUXC_SAI1_MCLK_SAI1_TX_BCLK 0x1AC 0x414 0x4C8 0x2 0x2 +#define MX8MM_IOMUXC_SAI1_MCLK_PDM_CLK 0x1AC 0x414 0x000 0x3 0x0 +#define MX8MM_IOMUXC_SAI1_MCLK_GPIO4_IO20 0x1AC 0x414 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI1_MCLK_SIM_M_HRESP 0x1AC 0x414 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI2_RXFS_SAI2_RX_SYNC 0x1B0 0x418 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI2_RXFS_SAI5_TX_SYNC 0x1B0 0x418 0x4EC 0x1 0x2 +#define MX8MM_IOMUXC_SAI2_RXFS_GPIO4_IO21 0x1B0 0x418 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI2_RXFS_SIM_M_HSIZE0 0x1B0 0x418 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI2_RXC_SAI2_RX_BCLK 0x1B4 0x41C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI2_RXC_SAI5_TX_BCLK 0x1B4 0x41C 0x4E8 0x1 0x2 +#define MX8MM_IOMUXC_SAI2_RXC_GPIO4_IO22 0x1B4 0x41C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI2_RXC_SIM_M_HSIZE1 0x1B4 0x41C 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI2_RXD0_SAI2_RX_DATA0 0x1B8 0x420 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI2_RXD0_SAI5_TX_DATA0 0x1B8 0x420 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI2_RXD0_GPIO4_IO23 0x1B8 0x420 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI2_RXD0_SIM_M_HSIZE2 0x1B8 0x420 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI2_TXFS_SAI2_TX_SYNC 0x1BC 0x424 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI2_TXFS_SAI5_TX_DATA1 0x1BC 0x424 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI2_TXFS_GPIO4_IO24 0x1BC 0x424 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI2_TXFS_SIM_M_HWRITE 0x1BC 0x424 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI2_TXC_SAI2_TX_BCLK 0x1C0 0x428 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI2_TXC_SAI5_TX_DATA2 0x1C0 0x428 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI2_TXC_GPIO4_IO25 0x1C0 0x428 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI2_TXC_SIM_M_HREADYOUT 0x1C0 0x428 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI2_TXD0_SAI2_TX_DATA0 0x1C4 0x42C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI2_TXD0_SAI5_TX_DATA3 0x1C4 0x42C 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI2_TXD0_GPIO4_IO26 0x1C4 0x42C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI2_TXD0_TPSMP_CLK 0x1C4 0x42C 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI2_MCLK_SAI2_MCLK 0x1C8 0x430 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI2_MCLK_SAI5_MCLK 0x1C8 0x430 0x52C 0x1 0x2 +#define MX8MM_IOMUXC_SAI2_MCLK_GPIO4_IO27 0x1C8 0x430 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI2_MCLK_TPSMP_HDATA_DIR 0x1C8 0x430 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI3_RXFS_SAI3_RX_SYNC 0x1CC 0x434 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI3_RXFS_GPT1_CAPTURE1 0x1CC 0x434 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI3_RXFS_SAI5_RX_SYNC 0x1CC 0x434 0x4E4 0x2 0x2 +#define MX8MM_IOMUXC_SAI3_RXFS_GPIO4_IO28 0x1CC 0x434 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI3_RXFS_TPSMP_HTRANS0 0x1CC 0x434 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI3_RXC_SAI3_RX_BCLK 0x1D0 0x438 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI3_RXC_GPT1_CAPTURE2 0x1D0 0x438 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI3_RXC_SAI5_RX_BCLK 0x1D0 0x438 0x4D0 0x2 0x2 +#define MX8MM_IOMUXC_SAI3_RXC_GPIO4_IO29 0x1D0 0x438 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI3_RXC_TPSMP_HTRANS1 0x1D0 0x438 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI3_RXD_SAI3_RX_DATA0 0x1D4 0x43C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI3_RXD_GPT1_COMPARE1 0x1D4 0x43C 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI3_RXD_SAI5_RX_DATA0 0x1D4 0x43C 0x4D4 0x2 0x2 +#define MX8MM_IOMUXC_SAI3_RXD_GPIO4_IO30 0x1D4 0x43C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI3_RXD_TPSMP_HDATA0 0x1D4 0x43C 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI3_TXFS_SAI3_TX_SYNC 0x1D8 0x440 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI3_TXFS_GPT1_CLK 0x1D8 0x440 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI3_TXFS_SAI5_RX_DATA1 0x1D8 0x440 0x4D8 0x2 0x2 +#define MX8MM_IOMUXC_SAI3_TXFS_GPIO4_IO31 0x1D8 0x440 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI3_TXFS_TPSMP_HDATA1 0x1D8 0x440 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI3_TXC_SAI3_TX_BCLK 0x1DC 0x444 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI3_TXC_GPT1_COMPARE2 0x1DC 0x444 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI3_TXC_SAI5_RX_DATA2 0x1DC 0x444 0x4DC 0x2 0x2 +#define MX8MM_IOMUXC_SAI3_TXC_GPIO5_IO0 0x1DC 0x444 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI3_TXC_TPSMP_HDATA2 0x1DC 0x444 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI3_TXD_SAI3_TX_DATA0 0x1E0 0x448 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI3_TXD_GPT1_COMPARE3 0x1E0 0x448 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI3_TXD_SAI5_RX_DATA3 0x1E0 0x448 0x4E0 0x2 0x2 +#define MX8MM_IOMUXC_SAI3_TXD_GPIO5_IO1 0x1E0 0x448 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI3_TXD_TPSMP_HDATA3 0x1E0 0x448 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SAI3_MCLK_SAI3_MCLK 0x1E4 0x44C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SAI3_MCLK_PWM4_OUT 0x1E4 0x44C 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SAI3_MCLK_SAI5_MCLK 0x1E4 0x44C 0x52C 0x2 0x3 +#define MX8MM_IOMUXC_SAI3_MCLK_GPIO5_IO2 0x1E4 0x44C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SAI3_MCLK_TPSMP_HDATA4 0x1E4 0x44C 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SPDIF_TX_SPDIF1_OUT 0x1E8 0x450 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SPDIF_TX_PWM3_OUT 0x1E8 0x450 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SPDIF_TX_GPIO5_IO3 0x1E8 0x450 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SPDIF_TX_TPSMP_HDATA5 0x1E8 0x450 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SPDIF_RX_SPDIF1_IN 0x1EC 0x454 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SPDIF_RX_PWM2_OUT 0x1EC 0x454 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SPDIF_RX_GPIO5_IO4 0x1EC 0x454 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SPDIF_RX_TPSMP_HDATA6 0x1EC 0x454 0x000 0x7 0x0 +#define MX8MM_IOMUXC_SPDIF_EXT_CLK_SPDIF1_EXT_CLK 0x1F0 0x458 0x000 0x0 0x0 +#define MX8MM_IOMUXC_SPDIF_EXT_CLK_PWM1_OUT 0x1F0 0x458 0x000 0x1 0x0 +#define MX8MM_IOMUXC_SPDIF_EXT_CLK_GPIO5_IO5 0x1F0 0x458 0x000 0x5 0x0 +#define MX8MM_IOMUXC_SPDIF_EXT_CLK_TPSMP_HDATA7 0x1F0 0x458 0x000 0x7 0x0 +#define MX8MM_IOMUXC_ECSPI1_SCLK_ECSPI1_SCLK 0x1F4 0x45C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ECSPI1_SCLK_UART3_DCE_RX 0x1F4 0x45C 0x504 0x1 0x0 +#define MX8MM_IOMUXC_ECSPI1_SCLK_UART3_DTE_TX 0x1F4 0x45C 0x000 0x1 0x0 +#define MX8MM_IOMUXC_ECSPI1_SCLK_GPIO5_IO6 0x1F4 0x45C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ECSPI1_SCLK_TPSMP_HDATA8 0x1F4 0x45C 0x000 0x7 0x0 +#define MX8MM_IOMUXC_ECSPI1_MOSI_ECSPI1_MOSI 0x1F8 0x460 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ECSPI1_MOSI_UART3_DCE_TX 0x1F8 0x460 0x000 0x1 0x0 +#define MX8MM_IOMUXC_ECSPI1_MOSI_UART3_DTE_RX 0x1F8 0x460 0x504 0x1 0x1 +#define MX8MM_IOMUXC_ECSPI1_MOSI_GPIO5_IO7 0x1F8 0x460 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ECSPI1_MOSI_TPSMP_HDATA9 0x1F8 0x460 0x000 0x7 0x0 +#define MX8MM_IOMUXC_ECSPI1_MISO_ECSPI1_MISO 0x1FC 0x464 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ECSPI1_MISO_UART3_DCE_CTS_B 0x1FC 0x464 0x000 0x1 0x0 +#define MX8MM_IOMUXC_ECSPI1_MISO_UART3_DTE_RTS_B 0x1FC 0x464 0x500 0x1 0x0 +#define MX8MM_IOMUXC_ECSPI1_MISO_GPIO5_IO8 0x1FC 0x464 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ECSPI1_MISO_TPSMP_HDATA10 0x1FC 0x464 0x000 0x7 0x0 +#define MX8MM_IOMUXC_ECSPI1_SS0_ECSPI1_SS0 0x200 0x468 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ECSPI1_SS0_UART3_DCE_RTS_B 0x200 0x468 0x500 0x1 0x1 +#define MX8MM_IOMUXC_ECSPI1_SS0_UART3_DTE_CTS_B 0x200 0x468 0x000 0x1 0x0 +#define MX8MM_IOMUXC_ECSPI1_SS0_GPIO5_IO9 0x200 0x468 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ECSPI1_SS0_TPSMP_HDATA11 0x200 0x468 0x000 0x7 0x0 +#define MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0x204 0x46C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ECSPI2_SCLK_UART4_DCE_RX 0x204 0x46C 0x50C 0x1 0x0 +#define MX8MM_IOMUXC_ECSPI2_SCLK_UART4_DTE_TX 0x204 0x46C 0x000 0x1 0x0 +#define MX8MM_IOMUXC_ECSPI2_SCLK_GPIO5_IO10 0x204 0x46C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ECSPI2_SCLK_TPSMP_HDATA12 0x204 0x46C 0x000 0x7 0x0 +#define MX8MM_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0x208 0x470 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ECSPI2_MOSI_UART4_DCE_TX 0x208 0x470 0x000 0x1 0x0 +#define MX8MM_IOMUXC_ECSPI2_MOSI_UART4_DTE_RX 0x208 0x470 0x50C 0x1 0x1 +#define MX8MM_IOMUXC_ECSPI2_MOSI_GPIO5_IO11 0x208 0x470 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ECSPI2_MOSI_TPSMP_HDATA13 0x208 0x470 0x000 0x7 0x0 +#define MX8MM_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0x20C 0x474 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ECSPI2_MISO_UART4_DCE_CTS_B 0x20C 0x474 0x000 0x1 0x0 +#define MX8MM_IOMUXC_ECSPI2_MISO_UART4_DTE_RTS_B 0x20C 0x474 0x508 0x1 0x0 +#define MX8MM_IOMUXC_ECSPI2_MISO_GPIO5_IO12 0x20C 0x474 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ECSPI2_MISO_TPSMP_HDATA14 0x20C 0x474 0x000 0x7 0x0 +#define MX8MM_IOMUXC_ECSPI2_SS0_ECSPI2_SS0 0x210 0x478 0x000 0x0 0x0 +#define MX8MM_IOMUXC_ECSPI2_SS0_UART4_DCE_RTS_B 0x210 0x478 0x508 0x1 0x1 +#define MX8MM_IOMUXC_ECSPI2_SS0_UART4_DTE_CTS_B 0x210 0x478 0x000 0x1 0x0 +#define MX8MM_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0x210 0x478 0x000 0x5 0x0 +#define MX8MM_IOMUXC_ECSPI2_SS0_TPSMP_HDATA15 0x210 0x478 0x000 0x7 0x0 +#define MX8MM_IOMUXC_I2C1_SCL_I2C1_SCL 0x214 0x47C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_I2C1_SCL_ENET1_MDC 0x214 0x47C 0x000 0x1 0x0 +#define MX8MM_IOMUXC_I2C1_SCL_GPIO5_IO14 0x214 0x47C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_I2C1_SCL_TPSMP_HDATA16 0x214 0x47C 0x000 0x7 0x0 +#define MX8MM_IOMUXC_I2C1_SDA_I2C1_SDA 0x218 0x480 0x000 0x0 0x0 +#define MX8MM_IOMUXC_I2C1_SDA_ENET1_MDIO 0x218 0x480 0x4C0 0x1 0x2 +#define MX8MM_IOMUXC_I2C1_SDA_GPIO5_IO15 0x218 0x480 0x000 0x5 0x0 +#define MX8MM_IOMUXC_I2C1_SDA_TPSMP_HDATA17 0x218 0x480 0x000 0x7 0x0 +#define MX8MM_IOMUXC_I2C2_SCL_I2C2_SCL 0x21C 0x484 0x000 0x0 0x0 +#define MX8MM_IOMUXC_I2C2_SCL_ENET1_1588_EVENT1_IN 0x21C 0x484 0x000 0x1 0x0 +#define MX8MM_IOMUXC_I2C2_SCL_GPIO5_IO16 0x21C 0x484 0x000 0x5 0x0 +#define MX8MM_IOMUXC_I2C2_SCL_TPSMP_HDATA18 0x21C 0x484 0x000 0x7 0x0 +#define MX8MM_IOMUXC_I2C2_SDA_I2C2_SDA 0x220 0x488 0x000 0x0 0x0 +#define MX8MM_IOMUXC_I2C2_SDA_ENET1_1588_EVENT1_OUT 0x220 0x488 0x000 0x1 0x0 +#define MX8MM_IOMUXC_I2C2_SDA_GPIO5_IO17 0x220 0x488 0x000 0x5 0x0 +#define MX8MM_IOMUXC_I2C2_SDA_TPSMP_HDATA19 0x220 0x488 0x000 0x7 0x0 +#define MX8MM_IOMUXC_I2C3_SCL_I2C3_SCL 0x224 0x48C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_I2C3_SCL_PWM4_OUT 0x224 0x48C 0x000 0x1 0x0 +#define MX8MM_IOMUXC_I2C3_SCL_GPT2_CLK 0x224 0x48C 0x000 0x2 0x0 +#define MX8MM_IOMUXC_I2C3_SCL_GPIO5_IO18 0x224 0x48C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_I2C3_SCL_TPSMP_HDATA20 0x224 0x48C 0x000 0x7 0x0 +#define MX8MM_IOMUXC_I2C3_SDA_I2C3_SDA 0x228 0x490 0x000 0x0 0x0 +#define MX8MM_IOMUXC_I2C3_SDA_PWM3_OUT 0x228 0x490 0x000 0x1 0x0 +#define MX8MM_IOMUXC_I2C3_SDA_GPT3_CLK 0x228 0x490 0x000 0x2 0x0 +#define MX8MM_IOMUXC_I2C3_SDA_GPIO5_IO19 0x228 0x490 0x000 0x5 0x0 +#define MX8MM_IOMUXC_I2C3_SDA_TPSMP_HDATA21 0x228 0x490 0x000 0x7 0x0 +#define MX8MM_IOMUXC_I2C4_SCL_I2C4_SCL 0x22C 0x494 0x000 0x0 0x0 +#define MX8MM_IOMUXC_I2C4_SCL_PWM2_OUT 0x22C 0x494 0x000 0x1 0x0 +#define MX8MM_IOMUXC_I2C4_SCL_PCIE1_CLKREQ_B 0x22C 0x494 0x524 0x12 0x0 +#define MX8MM_IOMUXC_I2C4_SCL_GPIO5_IO20 0x22C 0x494 0x000 0x5 0x0 +#define MX8MM_IOMUXC_I2C4_SCL_TPSMP_HDATA22 0x22C 0x494 0x000 0x7 0x0 +#define MX8MM_IOMUXC_I2C4_SDA_I2C4_SDA 0x230 0x498 0x000 0x0 0x0 +#define MX8MM_IOMUXC_I2C4_SDA_PWM1_OUT 0x230 0x498 0x000 0x1 0x0 +#define MX8MM_IOMUXC_I2C4_SDA_PCIE2_CLKREQ_B 0x230 0x498 0x528 0x2 0x0 +#define MX8MM_IOMUXC_I2C4_SDA_GPIO5_IO21 0x230 0x498 0x000 0x5 0x0 +#define MX8MM_IOMUXC_I2C4_SDA_TPSMP_HDATA23 0x230 0x498 0x000 0x7 0x0 +#define MX8MM_IOMUXC_UART1_RXD_UART1_DCE_RX 0x234 0x49C 0x4F4 0x0 0x0 +#define MX8MM_IOMUXC_UART1_RXD_UART1_DTE_TX 0x234 0x49C 0x000 0x0 0x0 +#define MX8MM_IOMUXC_UART1_RXD_ECSPI3_SCLK 0x234 0x49C 0x000 0x1 0x0 +#define MX8MM_IOMUXC_UART1_RXD_GPIO5_IO22 0x234 0x49C 0x000 0x5 0x0 +#define MX8MM_IOMUXC_UART1_RXD_TPSMP_HDATA24 0x234 0x49C 0x000 0x7 0x0 +#define MX8MM_IOMUXC_UART1_TXD_UART1_DCE_TX 0x238 0x4A0 0x000 0x0 0x0 +#define MX8MM_IOMUXC_UART1_TXD_UART1_DTE_RX 0x238 0x4A0 0x4F4 0x0 0x0 +#define MX8MM_IOMUXC_UART1_TXD_ECSPI3_MOSI 0x238 0x4A0 0x000 0x1 0x0 +#define MX8MM_IOMUXC_UART1_TXD_GPIO5_IO23 0x238 0x4A0 0x000 0x5 0x0 +#define MX8MM_IOMUXC_UART1_TXD_TPSMP_HDATA25 0x238 0x4A0 0x000 0x7 0x0 +#define MX8MM_IOMUXC_UART2_RXD_UART2_DCE_RX 0x23C 0x4A4 0x4FC 0x0 0x0 +#define MX8MM_IOMUXC_UART2_RXD_UART2_DTE_TX 0x23C 0x4A4 0x000 0x0 0x0 +#define MX8MM_IOMUXC_UART2_RXD_ECSPI3_MISO 0x23C 0x4A4 0x000 0x1 0x0 +#define MX8MM_IOMUXC_UART2_RXD_GPIO5_IO24 0x23C 0x4A4 0x000 0x5 0x0 +#define MX8MM_IOMUXC_UART2_RXD_TPSMP_HDATA26 0x23C 0x4A4 0x000 0x7 0x0 +#define MX8MM_IOMUXC_UART2_TXD_UART2_DCE_TX 0x240 0x4A8 0x000 0x0 0x0 +#define MX8MM_IOMUXC_UART2_TXD_UART2_DTE_RX 0x240 0x4A8 0x4FC 0x0 0x1 +#define MX8MM_IOMUXC_UART2_TXD_ECSPI3_SS0 0x240 0x4A8 0x000 0x1 0x0 +#define MX8MM_IOMUXC_UART2_TXD_GPIO5_IO25 0x240 0x4A8 0x000 0x5 0x0 +#define MX8MM_IOMUXC_UART2_TXD_TPSMP_HDATA27 0x240 0x4A8 0x000 0x7 0x0 +#define MX8MM_IOMUXC_UART3_RXD_UART3_DCE_RX 0x244 0x4AC 0x504 0x0 0x2 +#define MX8MM_IOMUXC_UART3_RXD_UART3_DTE_TX 0x244 0x4AC 0x000 0x0 0x0 +#define MX8MM_IOMUXC_UART3_RXD_UART1_DCE_CTS_B 0x244 0x4AC 0x000 0x1 0x0 +#define MX8MM_IOMUXC_UART3_RXD_UART1_DTE_RTS_B 0x244 0x4AC 0x4F0 0x1 0x0 +#define MX8MM_IOMUXC_UART3_RXD_GPIO5_IO26 0x244 0x4AC 0x000 0x5 0x0 +#define MX8MM_IOMUXC_UART3_RXD_TPSMP_HDATA28 0x244 0x4AC 0x000 0x7 0x0 +#define MX8MM_IOMUXC_UART3_TXD_UART3_DCE_TX 0x248 0x4B0 0x000 0x0 0x0 +#define MX8MM_IOMUXC_UART3_TXD_UART3_DTE_RX 0x248 0x4B0 0x504 0x0 0x3 +#define MX8MM_IOMUXC_UART3_TXD_UART1_DCE_RTS_B 0x248 0x4B0 0x4F0 0x1 0x1 +#define MX8MM_IOMUXC_UART3_TXD_UART1_DTE_CTS_B 0x248 0x4B0 0x000 0x1 0x0 +#define MX8MM_IOMUXC_UART3_TXD_GPIO5_IO27 0x248 0x4B0 0x000 0x5 0x0 +#define MX8MM_IOMUXC_UART3_TXD_TPSMP_HDATA29 0x248 0x4B0 0x000 0x7 0x0 +#define MX8MM_IOMUXC_UART4_RXD_UART4_DCE_RX 0x24C 0x4B4 0x50C 0x0 0x2 +#define MX8MM_IOMUXC_UART4_RXD_UART4_DTE_TX 0x24C 0x4B4 0x000 0x0 0x0 +#define MX8MM_IOMUXC_UART4_RXD_UART2_DCE_CTS_B 0x24C 0x4B4 0x000 0x1 0x0 +#define MX8MM_IOMUXC_UART4_RXD_UART2_DTE_RTS_B 0x24C 0x4B4 0x4F8 0x1 0x0 +#define MX8MM_IOMUXC_UART4_RXD_PCIE1_CLKREQ_B 0x24C 0x4B4 0x524 0x2 0x1 +#define MX8MM_IOMUXC_UART4_RXD_GPIO5_IO28 0x24C 0x4B4 0x000 0x5 0x0 +#define MX8MM_IOMUXC_UART4_RXD_TPSMP_HDATA30 0x24C 0x4B4 0x000 0x7 0x0 +#define MX8MM_IOMUXC_UART4_TXD_UART4_DCE_TX 0x250 0x4B8 0x000 0x0 0x0 +#define MX8MM_IOMUXC_UART4_TXD_UART4_DTE_RX 0x250 0x4B8 0x50C 0x0 0x3 +#define MX8MM_IOMUXC_UART4_TXD_UART2_DCE_RTS_B 0x250 0x4B8 0x4F8 0x1 0x1 +#define MX8MM_IOMUXC_UART4_TXD_UART2_DTE_CTS_B 0x250 0x4B8 0x000 0x1 0x0 +#define MX8MM_IOMUXC_UART4_TXD_PCIE2_CLKREQ_B 0x250 0x4B8 0x528 0x2 0x1 +#define MX8MM_IOMUXC_UART4_TXD_GPIO5_IO29 0x250 0x4B8 0x000 0x5 0x0 +#define MX8MM_IOMUXC_UART4_TXD_TPSMP_HDATA31 0x250 0x4B8 0x000 0x7 0x0 + +#endif /* __DTS_IMX8MM_PINFUNC_H */ diff --git a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts index 64acccc4bfcb..54737bf1772f 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts +++ b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts @@ -37,7 +37,19 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_fec1>; phy-mode = "rgmii-id"; + phy-handle = <ðphy0>; + fsl,magic-packet; status = "okay"; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + + ethphy0: ethernet-phy@0 { + compatible = "ethernet-phy-ieee802.3-c22"; + reg = <0>; + }; + }; }; &i2c1 { @@ -137,6 +149,29 @@ status = "okay"; }; +&usb3_phy1 { + status = "okay"; +}; + +&usb_dwc3_1 { + dr_mode = "host"; + status = "okay"; +}; + +&qspi0 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_qspi>; + status = "okay"; + + n25q256a: flash@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <1>; + compatible = "micron,n25q256a", "jedec,spi-nor"; + spi-max-frequency = <29000000>; + }; +}; + &usdhc1 { pinctrl-names = "default", "state_100mhz", "state_200mhz"; pinctrl-0 = <&pinctrl_usdhc1>; @@ -195,6 +230,18 @@ >; }; + pinctrl_qspi: qspigrp { + fsl,pins = < + MX8MQ_IOMUXC_NAND_ALE_QSPI_A_SCLK 0x82 + MX8MQ_IOMUXC_NAND_CE0_B_QSPI_A_SS0_B 0x82 + MX8MQ_IOMUXC_NAND_DATA00_QSPI_A_DATA0 0x82 + MX8MQ_IOMUXC_NAND_DATA01_QSPI_A_DATA1 0x82 + MX8MQ_IOMUXC_NAND_DATA02_QSPI_A_DATA2 0x82 + MX8MQ_IOMUXC_NAND_DATA03_QSPI_A_DATA3 0x82 + + >; + }; + pinctrl_reg_usdhc2: regusdhc2grpgpio { fsl,pins = < MX8MQ_IOMUXC_SD2_RESET_B_GPIO2_IO19 0x41 @@ -227,34 +274,34 @@ pinctrl_usdhc1_100mhz: usdhc1-100grp { fsl,pins = < - MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x85 - MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xc5 - MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xc5 - MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xc5 - MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xc5 - MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xc5 - MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xc5 - MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xc5 - MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xc5 - MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xc5 - MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x85 + MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x8d + MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xcd + MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xcd + MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xcd + MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xcd + MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xcd + MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xcd + MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xcd + MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xcd + MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xcd + MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x8d MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1 >; }; pinctrl_usdhc1_200mhz: usdhc1-200grp { fsl,pins = < - MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x87 - MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xc7 - MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xc7 - MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xc7 - MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xc7 - MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xc7 - MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xc7 - MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xc7 - MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xc7 - MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xc7 - MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x87 + MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x9f + MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xdf + MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xdf + MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xdf + MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xdf + MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xdf + MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xdf + MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xdf + MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xdf + MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xdf + MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x9f MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1 >; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi index 8e9d6d5ed7b2..9155bd4784eb 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi @@ -5,13 +5,13 @@ */ #include +#include #include #include #include "imx8mq-pinfunc.h" / { - /* This should really be the GPC, but we need a driver for this first */ - interrupt-parent = <&gic>; + interrupt-parent = <&gpc>; #address-cells = <2>; #size-cells = <2>; @@ -25,6 +25,9 @@ serial1 = &uart2; serial2 = &uart3; serial3 = &uart4; + spi0 = &ecspi1; + spi1 = &ecspi2; + spi2 = &ecspi3; }; ckil: clock-ckil { @@ -117,6 +120,13 @@ }; }; + pmu { + compatible = "arm,cortex-a53-pmu"; + interrupts = ; + interrupt-parent = <&gic>; + interrupt-affinity = <&A53_0>, <&A53_1>, <&A53_2>, <&A53_3>; + }; + psci { compatible = "arm,psci-1.0"; method = "smc"; @@ -137,6 +147,7 @@ #address-cells = <1>; #size-cells = <1>; ranges = <0x0 0x0 0x0 0x3e000000>; + dma-ranges = <0x40000000 0x0 0x40000000 0xc0000000>; bus@30000000 { /* AIPS1 */ compatible = "fsl,imx8mq-aips-bus", "simple-bus"; @@ -199,6 +210,30 @@ #interrupt-cells = <2>; }; + wdog1: watchdog@30280000 { + compatible = "fsl,imx8mq-wdt", "fsl,imx21-wdt"; + reg = <0x30280000 0x10000>; + interrupts = ; + clocks = <&clk IMX8MQ_CLK_WDOG1_ROOT>; + status = "disabled"; + }; + + wdog2: watchdog@30290000 { + compatible = "fsl,imx8mq-wdt", "fsl,imx21-wdt"; + reg = <0x30290000 0x10000>; + interrupts = ; + clocks = <&clk IMX8MQ_CLK_WDOG2_ROOT>; + status = "disabled"; + }; + + wdog3: watchdog@302a0000 { + compatible = "fsl,imx8mq-wdt", "fsl,imx21-wdt"; + reg = <0x302a0000 0x10000>; + interrupts = ; + clocks = <&clk IMX8MQ_CLK_WDOG3_ROOT>; + status = "disabled"; + }; + iomuxc: iomuxc@30330000 { compatible = "fsl,imx8mq-iomuxc"; reg = <0x30330000 0x10000>; @@ -215,6 +250,20 @@ interrupts = ; }; + snvs: snvs@30370000 { + compatible = "fsl,sec-v4.0-mon", "syscon", "simple-mfd"; + reg = <0x30370000 0x10000>; + + snvs_rtc: snvs-rtc-lp{ + compatible = "fsl,sec-v4.0-mon-rtc-lp"; + regmap =<&snvs>; + offset = <0x34>; + interrupts = , + ; + }; + + }; + clk: clock-controller@30380000 { compatible = "fsl,imx8mq-ccm"; reg = <0x30380000 0x10000>; @@ -229,43 +278,172 @@ "clk_ext3", "clk_ext4"; }; - wdog1: watchdog@30280000 { - compatible = "fsl,imx8mq-wdt", "fsl,imx21-wdt"; - reg = <0x30280000 0x10000>; - interrupts = ; - clocks = <&clk IMX8MQ_CLK_WDOG1_ROOT>; + gpc: gpc@303a0000 { + compatible = "fsl,imx8mq-gpc"; + reg = <0x303a0000 0x10000>; + interrupt-parent = <&gic>; + interrupt-controller; + #interrupt-cells = <3>; + + pgc { + #address-cells = <1>; + #size-cells = <0>; + + pgc_mipi: power-domain@0 { + #power-domain-cells = <0>; + reg = ; + }; + + pgc_pcie1: power-domain@1 { + #power-domain-cells = <0>; + reg = ; + }; + + pgc_otg1: power-domain@2 { + #power-domain-cells = <0>; + reg = ; + }; + + pgc_otg2: power-domain@3 { + #power-domain-cells = <0>; + reg = ; + }; + + pgc_ddr1: power-domain@4 { + #power-domain-cells = <0>; + reg = ; + }; + + pgc_gpu: power-domain@5 { + #power-domain-cells = <0>; + reg = ; + clocks = <&clk IMX8MQ_CLK_GPU_ROOT>, + <&clk IMX8MQ_CLK_GPU_SHADER_DIV>, + <&clk IMX8MQ_CLK_GPU_AXI>, + <&clk IMX8MQ_CLK_GPU_AHB>; + }; + + pgc_vpu: power-domain@6 { + #power-domain-cells = <0>; + reg = ; + }; + + pgc_disp: power-domain@7 { + #power-domain-cells = <0>; + reg = ; + }; + + pgc_mipi_csi1: power-domain@8 { + #power-domain-cells = <0>; + reg = ; + }; + + pgc_mipi_csi2: power-domain@9 { + #power-domain-cells = <0>; + reg = ; + }; + + pgc_pcie2: power-domain@a { + #power-domain-cells = <0>; + reg = ; + }; + }; + }; + }; + + bus@30400000 { /* AIPS2 */ + compatible = "fsl,imx8mq-aips-bus", "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x30400000 0x30400000 0x400000>; + + pwm1: pwm@30660000 { + compatible = "fsl,imx8mq-pwm", "fsl,imx27-pwm"; + reg = <0x30660000 0x10000>; + interrupts = ; + clocks = <&clk IMX8MQ_CLK_PWM1_ROOT>, + <&clk IMX8MQ_CLK_PWM1_ROOT>; + clock-names = "ipg", "per"; + #pwm-cells = <2>; status = "disabled"; }; - wdog2: watchdog@30290000 { - compatible = "fsl,imx8mq-wdt", "fsl,imx21-wdt"; - reg = <0x30290000 0x10000>; - interrupts = ; - clocks = <&clk IMX8MQ_CLK_WDOG2_ROOT>; + pwm2: pwm@30670000 { + compatible = "fsl,imx8mq-pwm", "fsl,imx27-pwm"; + reg = <0x30670000 0x10000>; + interrupts = ; + clocks = <&clk IMX8MQ_CLK_PWM2_ROOT>, + <&clk IMX8MQ_CLK_PWM2_ROOT>; + clock-names = "ipg", "per"; + #pwm-cells = <2>; status = "disabled"; }; - wdog3: watchdog@302a0000 { - compatible = "fsl,imx8mq-wdt", "fsl,imx21-wdt"; - reg = <0x302a0000 0x10000>; - interrupts = ; - clocks = <&clk IMX8MQ_CLK_WDOG3_ROOT>; + pwm3: pwm@30680000 { + compatible = "fsl,imx8mq-pwm", "fsl,imx27-pwm"; + reg = <0x30680000 0x10000>; + interrupts = ; + clocks = <&clk IMX8MQ_CLK_PWM3_ROOT>, + <&clk IMX8MQ_CLK_PWM3_ROOT>; + clock-names = "ipg", "per"; + #pwm-cells = <2>; status = "disabled"; }; - }; - bus@30400000 { /* AIPS2 */ - compatible = "fsl,imx8mq-aips-bus", "simple-bus"; - #address-cells = <1>; - #size-cells = <1>; - ranges = <0x30400000 0x30400000 0x400000>; + pwm4: pwm@30690000 { + compatible = "fsl,imx8mq-pwm", "fsl,imx27-pwm"; + reg = <0x30690000 0x10000>; + interrupts = ; + clocks = <&clk IMX8MQ_CLK_PWM4_ROOT>, + <&clk IMX8MQ_CLK_PWM4_ROOT>; + clock-names = "ipg", "per"; + #pwm-cells = <2>; + status = "disabled"; + }; }; bus@30800000 { /* AIPS3 */ compatible = "fsl,imx8mq-aips-bus", "simple-bus"; #address-cells = <1>; #size-cells = <1>; - ranges = <0x30800000 0x30800000 0x400000>; + ranges = <0x30800000 0x30800000 0x400000>, + <0x08000000 0x08000000 0x10000000>; + + ecspi1: spi@30820000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,imx8mq-ecspi", "fsl,imx51-ecspi"; + reg = <0x30820000 0x10000>; + interrupts = ; + clocks = <&clk IMX8MQ_CLK_ECSPI1_ROOT>, + <&clk IMX8MQ_CLK_ECSPI1_ROOT>; + clock-names = "ipg", "per"; + status = "disabled"; + }; + + ecspi2: spi@30830000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,imx8mq-ecspi", "fsl,imx51-ecspi"; + reg = <0x30830000 0x10000>; + interrupts = ; + clocks = <&clk IMX8MQ_CLK_ECSPI2_ROOT>, + <&clk IMX8MQ_CLK_ECSPI2_ROOT>; + clock-names = "ipg", "per"; + status = "disabled"; + }; + + ecspi3: spi@30840000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,imx8mq-ecspi", "fsl,imx51-ecspi"; + reg = <0x30840000 0x10000>; + interrupts = ; + clocks = <&clk IMX8MQ_CLK_ECSPI3_ROOT>, + <&clk IMX8MQ_CLK_ECSPI3_ROOT>; + clock-names = "ipg", "per"; + status = "disabled"; + }; uart1: serial@30860000 { compatible = "fsl,imx8mq-uart", @@ -360,6 +538,8 @@ <&clk IMX8MQ_CLK_NAND_USDHC_BUS>, <&clk IMX8MQ_CLK_USDHC1_ROOT>; clock-names = "ipg", "ahb", "per"; + assigned-clocks = <&clk IMX8MQ_CLK_USDHC1>; + assigned-clock-rates = <400000000>; fsl,tuning-start-tap = <20>; fsl,tuning-step = <2>; bus-width = <4>; @@ -381,6 +561,20 @@ status = "disabled"; }; + qspi0: spi@30bb0000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,imx8mq-qspi", "fsl,imx7d-qspi"; + reg = <0x30bb0000 0x10000>, + <0x08000000 0x10000000>; + reg-names = "QuadSPI", "QuadSPI-memory"; + interrupts = ; + clocks = <&clk IMX8MQ_CLK_QSPI_ROOT>, + <&clk IMX8MQ_CLK_QSPI_ROOT>; + clock-names = "qspi_en", "qspi"; + status = "disabled"; + }; + fec1: ethernet@30be0000 { compatible = "fsl,imx8mq-fec", "fsl,imx6sx-fec"; reg = <0x30be0000 0x10000>; @@ -400,6 +594,70 @@ }; }; + usb_dwc3_0: usb@38100000 { + compatible = "fsl,imx8mq-dwc3", "snps,dwc3"; + reg = <0x38100000 0x10000>; + clocks = <&clk IMX8MQ_CLK_USB_BUS>, + <&clk IMX8MQ_CLK_USB_CORE_REF>, + <&clk IMX8MQ_CLK_USB1_CTRL_ROOT>; + clock-names = "bus_early", "ref", "suspend"; + assigned-clocks = <&clk IMX8MQ_CLK_USB_BUS>, + <&clk IMX8MQ_CLK_USB_CORE_REF>; + assigned-clock-parents = <&clk IMX8MQ_SYS2_PLL_500M>, + <&clk IMX8MQ_SYS1_PLL_100M>; + assigned-clock-rates = <500000000>, <100000000>; + interrupts = ; + phys = <&usb3_phy0>, <&usb3_phy0>; + phy-names = "usb2-phy", "usb3-phy"; + power-domains = <&pgc_otg1>; + usb3-resume-missing-cas; + status = "disabled"; + }; + + usb3_phy0: usb-phy@381f0040 { + compatible = "fsl,imx8mq-usb-phy"; + reg = <0x381f0040 0x40>; + clocks = <&clk IMX8MQ_CLK_USB1_PHY_ROOT>; + clock-names = "phy"; + assigned-clocks = <&clk IMX8MQ_CLK_USB_PHY_REF>; + assigned-clock-parents = <&clk IMX8MQ_SYS1_PLL_100M>; + assigned-clock-rates = <100000000>; + #phy-cells = <0>; + status = "disabled"; + }; + + usb_dwc3_1: usb@38200000 { + compatible = "fsl,imx8mq-dwc3", "snps,dwc3"; + reg = <0x38200000 0x10000>; + clocks = <&clk IMX8MQ_CLK_USB_BUS>, + <&clk IMX8MQ_CLK_USB_CORE_REF>, + <&clk IMX8MQ_CLK_USB2_CTRL_ROOT>; + clock-names = "bus_early", "ref", "suspend"; + assigned-clocks = <&clk IMX8MQ_CLK_USB_BUS>, + <&clk IMX8MQ_CLK_USB_CORE_REF>; + assigned-clock-parents = <&clk IMX8MQ_SYS2_PLL_500M>, + <&clk IMX8MQ_SYS1_PLL_100M>; + assigned-clock-rates = <500000000>, <100000000>; + interrupts = ; + phys = <&usb3_phy1>, <&usb3_phy1>; + phy-names = "usb2-phy", "usb3-phy"; + power-domains = <&pgc_otg2>; + usb3-resume-missing-cas; + status = "disabled"; + }; + + usb3_phy1: usb-phy@382f0040 { + compatible = "fsl,imx8mq-usb-phy"; + reg = <0x382f0040 0x40>; + clocks = <&clk IMX8MQ_CLK_USB2_PHY_ROOT>; + clock-names = "phy"; + assigned-clocks = <&clk IMX8MQ_CLK_USB_PHY_REF>; + assigned-clock-parents = <&clk IMX8MQ_SYS1_PLL_100M>; + assigned-clock-rates = <100000000>; + #phy-cells = <0>; + status = "disabled"; + }; + gic: interrupt-controller@38800000 { compatible = "arm,gic-v3"; reg = <0x38800000 0x10000>, /* GIC Dist */ diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts new file mode 100644 index 000000000000..03aad66545c5 --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2017~2018 NXP + */ + +/dts-v1/; + +#include "imx8qxp.dtsi" + +/ { + model = "Freescale i.MX8QXP MEK"; + compatible = "fsl,imx8qxp-mek", "fsl,imx8qxp"; + + chosen { + stdout-path = &adma_lpuart0; + }; + + memory@80000000 { + device_type = "memory"; + reg = <0x00000000 0x80000000 0 0x40000000>; + }; + + reg_usdhc2_vmmc: usdhc2-vmmc { + compatible = "regulator-fixed"; + regulator-name = "SD1_SPWR"; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; + gpio = <&lsio_gpio4 19 GPIO_ACTIVE_HIGH>; + enable-active-high; + }; +}; + +&adma_lpuart0 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_lpuart0>; + status = "okay"; +}; + +&fec1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_fec1>; + phy-mode = "rgmii-id"; + phy-handle = <ðphy0>; + fsl,magic-packet; + status = "okay"; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + + ethphy0: ethernet-phy@0 { + compatible = "ethernet-phy-ieee802.3-c22"; + reg = <0>; + }; + + ethphy1: ethernet-phy@1 { + compatible = "ethernet-phy-ieee802.3-c22"; + reg = <1>; + }; + }; +}; + +&usdhc1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usdhc1>; + bus-width = <8>; + no-sd; + no-sdio; + non-removable; + status = "okay"; +}; + +&usdhc2 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usdhc2>; + bus-width = <4>; + vmmc-supply = <®_usdhc2_vmmc>; + cd-gpios = <&lsio_gpio4 22 GPIO_ACTIVE_LOW>; + wp-gpios = <&lsio_gpio4 21 GPIO_ACTIVE_HIGH>; + status = "okay"; +}; + +&iomuxc { + pinctrl_fec1: fec1grp { + fsl,pins = < + IMX8QXP_ENET0_MDC_CONN_ENET0_MDC 0x06000020 + IMX8QXP_ENET0_MDIO_CONN_ENET0_MDIO 0x06000020 + IMX8QXP_ENET0_RGMII_TX_CTL_CONN_ENET0_RGMII_TX_CTL 0x06000020 + IMX8QXP_ENET0_RGMII_TXC_CONN_ENET0_RGMII_TXC 0x06000020 + IMX8QXP_ENET0_RGMII_TXD0_CONN_ENET0_RGMII_TXD0 0x06000020 + IMX8QXP_ENET0_RGMII_TXD1_CONN_ENET0_RGMII_TXD1 0x06000020 + IMX8QXP_ENET0_RGMII_TXD2_CONN_ENET0_RGMII_TXD2 0x06000020 + IMX8QXP_ENET0_RGMII_TXD3_CONN_ENET0_RGMII_TXD3 0x06000020 + IMX8QXP_ENET0_RGMII_RXC_CONN_ENET0_RGMII_RXC 0x06000020 + IMX8QXP_ENET0_RGMII_RX_CTL_CONN_ENET0_RGMII_RX_CTL 0x06000020 + IMX8QXP_ENET0_RGMII_RXD0_CONN_ENET0_RGMII_RXD0 0x06000020 + IMX8QXP_ENET0_RGMII_RXD1_CONN_ENET0_RGMII_RXD1 0x06000020 + IMX8QXP_ENET0_RGMII_RXD2_CONN_ENET0_RGMII_RXD2 0x06000020 + IMX8QXP_ENET0_RGMII_RXD3_CONN_ENET0_RGMII_RXD3 0x06000020 + >; + }; + + pinctrl_lpuart0: lpuart0grp { + fsl,pins = < + IMX8QXP_UART0_RX_ADMA_UART0_RX 0x06000020 + IMX8QXP_UART0_TX_ADMA_UART0_TX 0x06000020 + >; + }; + + pinctrl_usdhc1: usdhc1grp { + fsl,pins = < + IMX8QXP_EMMC0_CLK_CONN_EMMC0_CLK 0x06000041 + IMX8QXP_EMMC0_CMD_CONN_EMMC0_CMD 0x00000021 + IMX8QXP_EMMC0_DATA0_CONN_EMMC0_DATA0 0x00000021 + IMX8QXP_EMMC0_DATA1_CONN_EMMC0_DATA1 0x00000021 + IMX8QXP_EMMC0_DATA2_CONN_EMMC0_DATA2 0x00000021 + IMX8QXP_EMMC0_DATA3_CONN_EMMC0_DATA3 0x00000021 + IMX8QXP_EMMC0_DATA4_CONN_EMMC0_DATA4 0x00000021 + IMX8QXP_EMMC0_DATA5_CONN_EMMC0_DATA5 0x00000021 + IMX8QXP_EMMC0_DATA6_CONN_EMMC0_DATA6 0x00000021 + IMX8QXP_EMMC0_DATA7_CONN_EMMC0_DATA7 0x00000021 + IMX8QXP_EMMC0_STROBE_CONN_EMMC0_STROBE 0x00000041 + >; + }; + + pinctrl_usdhc2: usdhc2grp { + fsl,pins = < + IMX8QXP_USDHC1_CLK_CONN_USDHC1_CLK 0x06000041 + IMX8QXP_USDHC1_CMD_CONN_USDHC1_CMD 0x00000021 + IMX8QXP_USDHC1_DATA0_CONN_USDHC1_DATA0 0x00000021 + IMX8QXP_USDHC1_DATA1_CONN_USDHC1_DATA1 0x00000021 + IMX8QXP_USDHC1_DATA2_CONN_USDHC1_DATA2 0x00000021 + IMX8QXP_USDHC1_DATA3_CONN_USDHC1_DATA3 0x00000021 + IMX8QXP_USDHC1_VSELECT_CONN_USDHC1_VSELECT 0x00000021 + >; + }; +}; diff --git a/arch/arm64/boot/dts/freescale/imx8qxp.dtsi b/arch/arm64/boot/dts/freescale/imx8qxp.dtsi new file mode 100644 index 000000000000..4c3dd95ed488 --- /dev/null +++ b/arch/arm64/boot/dts/freescale/imx8qxp.dtsi @@ -0,0 +1,446 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017-2018 NXP + * Dong Aisheng + */ + +#include +#include +#include +#include +#include + +/ { + interrupt-parent = <&gic>; + #address-cells = <2>; + #size-cells = <2>; + + aliases { + mmc0 = &usdhc1; + mmc1 = &usdhc2; + mmc2 = &usdhc3; + serial0 = &adma_lpuart0; + }; + + cpus { + #address-cells = <2>; + #size-cells = <0>; + + /* We have 1 clusters with 4 Cortex-A35 cores */ + A35_0: cpu@0 { + device_type = "cpu"; + compatible = "arm,cortex-a35"; + reg = <0x0 0x0>; + enable-method = "psci"; + next-level-cache = <&A35_L2>; + }; + + A35_1: cpu@1 { + device_type = "cpu"; + compatible = "arm,cortex-a35"; + reg = <0x0 0x1>; + enable-method = "psci"; + next-level-cache = <&A35_L2>; + }; + + A35_2: cpu@2 { + device_type = "cpu"; + compatible = "arm,cortex-a35"; + reg = <0x0 0x2>; + enable-method = "psci"; + next-level-cache = <&A35_L2>; + }; + + A35_3: cpu@3 { + device_type = "cpu"; + compatible = "arm,cortex-a35"; + reg = <0x0 0x3>; + enable-method = "psci"; + next-level-cache = <&A35_L2>; + }; + + A35_L2: l2-cache0 { + compatible = "cache"; + }; + }; + + gic: interrupt-controller@51a00000 { + compatible = "arm,gic-v3"; + reg = <0x0 0x51a00000 0 0x10000>, /* GIC Dist */ + <0x0 0x51b00000 0 0xc0000>; /* GICR (RD_base + SGI_base) */ + #interrupt-cells = <3>; + interrupt-controller; + interrupts = ; + }; + + pmu { + compatible = "arm,armv8-pmuv3"; + interrupts = ; + }; + + psci { + compatible = "arm,psci-1.0"; + method = "smc"; + }; + + scu { + compatible = "fsl,imx-scu"; + mbox-names = "tx0", "tx1", "tx2", "tx3", + "rx0", "rx1", "rx2", "rx3"; + mboxes = <&lsio_mu1 0 0 + &lsio_mu1 0 1 + &lsio_mu1 0 2 + &lsio_mu1 0 3 + &lsio_mu1 1 0 + &lsio_mu1 1 1 + &lsio_mu1 1 2 + &lsio_mu1 1 3>; + + clk: clock-controller { + compatible = "fsl,imx8qxp-clk"; + #clock-cells = <1>; + clocks = <&xtal32k &xtal24m>; + clock-names = "xtal_32KHz", "xtal_24Mhz"; + }; + + iomuxc: pinctrl { + compatible = "fsl,imx8qxp-iomuxc"; + }; + + pd: imx8qx-pd { + compatible = "fsl,imx8qxp-scu-pd"; + #power-domain-cells = <1>; + }; + + rtc: rtc { + compatible = "fsl,imx8qxp-sc-rtc"; + }; + }; + + timer { + compatible = "arm,armv8-timer"; + interrupts = , /* Physical Secure */ + , /* Physical Non-Secure */ + , /* Virtual */ + ; /* Hypervisor */ + }; + + xtal32k: clock-xtal32k { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <32768>; + clock-output-names = "xtal_32KHz"; + }; + + xtal24m: clock-xtal24m { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <24000000>; + clock-output-names = "xtal_24MHz"; + }; + + adma_subsys: bus@59000000 { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x59000000 0x0 0x59000000 0x2000000>; + + adma_lpcg: clock-controller@59000000 { + compatible = "fsl,imx8qxp-lpcg-adma"; + reg = <0x59000000 0x2000000>; + #clock-cells = <1>; + }; + + adma_lpuart0: serial@5a060000 { + compatible = "fsl,imx8qxp-lpuart", "fsl,imx7ulp-lpuart"; + reg = <0x5a060000 0x1000>; + interrupts = ; + interrupt-parent = <&gic>; + clocks = <&adma_lpcg IMX_ADMA_LPCG_UART0_BAUD_CLK>; + clock-names = "ipg"; + power-domains = <&pd IMX_SC_R_UART_0>; + status = "disabled"; + }; + + adma_i2c0: i2c@5a800000 { + compatible = "fsl,imx8qxp-lpi2c", "fsl,imx7ulp-lpi2c"; + reg = <0x5a800000 0x4000>; + interrupts = ; + interrupt-parent = <&gic>; + clocks = <&adma_lpcg IMX_ADMA_LPCG_I2C0_CLK>; + clock-names = "per"; + assigned-clocks = <&clk IMX_ADMA_I2C0_CLK>; + assigned-clock-rates = <24000000>; + power-domains = <&pd IMX_SC_R_I2C_0>; + status = "disabled"; + }; + + adma_i2c1: i2c@5a810000 { + compatible = "fsl,imx8qxp-lpi2c", "fsl,imx7ulp-lpi2c"; + reg = <0x5a810000 0x4000>; + interrupts = ; + interrupt-parent = <&gic>; + clocks = <&adma_lpcg IMX_ADMA_LPCG_I2C1_CLK>; + clock-names = "per"; + assigned-clocks = <&clk IMX_ADMA_I2C1_CLK>; + assigned-clock-rates = <24000000>; + power-domains = <&pd IMX_SC_R_I2C_1>; + status = "disabled"; + }; + + adma_i2c2: i2c@5a820000 { + compatible = "fsl,imx8qxp-lpi2c", "fsl,imx7ulp-lpi2c"; + reg = <0x5a820000 0x4000>; + interrupts = ; + interrupt-parent = <&gic>; + clocks = <&adma_lpcg IMX_ADMA_LPCG_I2C2_CLK>; + clock-names = "per"; + assigned-clocks = <&clk IMX_ADMA_I2C2_CLK>; + assigned-clock-rates = <24000000>; + power-domains = <&pd IMX_SC_R_I2C_2>; + status = "disabled"; + }; + + adma_i2c3: i2c@5a830000 { + compatible = "fsl,imx8qxp-lpi2c", "fsl,imx7ulp-lpi2c"; + reg = <0x5a830000 0x4000>; + interrupts = ; + interrupt-parent = <&gic>; + clocks = <&adma_lpcg IMX_ADMA_LPCG_I2C3_CLK>; + clock-names = "per"; + assigned-clocks = <&clk IMX_ADMA_I2C3_CLK>; + assigned-clock-rates = <24000000>; + power-domains = <&pd IMX_SC_R_I2C_3>; + status = "disabled"; + }; + }; + + conn_subsys: bus@5b000000 { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x5b000000 0x0 0x5b000000 0x1000000>; + + conn_lpcg: clock-controller@5b200000 { + compatible = "fsl,imx8qxp-lpcg-conn"; + reg = <0x5b200000 0xb0000>; + #clock-cells = <1>; + }; + + usdhc1: mmc@5b010000 { + compatible = "fsl,imx8qxp-usdhc", "fsl,imx7d-usdhc"; + interrupt-parent = <&gic>; + interrupts = ; + reg = <0x5b010000 0x10000>; + clocks = <&conn_lpcg IMX_CONN_LPCG_SDHC0_IPG_CLK>, + <&conn_lpcg IMX_CONN_LPCG_SDHC0_PER_CLK>, + <&conn_lpcg IMX_CONN_LPCG_SDHC0_HCLK>; + clock-names = "ipg", "per", "ahb"; + assigned-clocks = <&clk IMX_CONN_SDHC0_CLK>; + assigned-clock-rates = <200000000>; + power-domains = <&pd IMX_SC_R_SDHC_0>; + status = "disabled"; + }; + + usdhc2: mmc@5b020000 { + compatible = "fsl,imx8qxp-usdhc", "fsl,imx7d-usdhc"; + interrupt-parent = <&gic>; + interrupts = ; + reg = <0x5b020000 0x10000>; + clocks = <&conn_lpcg IMX_CONN_LPCG_SDHC1_IPG_CLK>, + <&conn_lpcg IMX_CONN_LPCG_SDHC1_PER_CLK>, + <&conn_lpcg IMX_CONN_LPCG_SDHC1_HCLK>; + clock-names = "ipg", "per", "ahb"; + assigned-clocks = <&clk IMX_CONN_SDHC1_CLK>; + assigned-clock-rates = <200000000>; + power-domains = <&pd IMX_SC_R_SDHC_1>; + fsl,tuning-start-tap = <20>; + fsl,tuning-step= <2>; + status = "disabled"; + }; + + usdhc3: mmc@5b030000 { + compatible = "fsl,imx8qxp-usdhc", "fsl,imx7d-usdhc"; + interrupt-parent = <&gic>; + interrupts = ; + reg = <0x5b030000 0x10000>; + clocks = <&conn_lpcg IMX_CONN_LPCG_SDHC2_IPG_CLK>, + <&conn_lpcg IMX_CONN_LPCG_SDHC2_PER_CLK>, + <&conn_lpcg IMX_CONN_LPCG_SDHC2_HCLK>; + clock-names = "ipg", "per", "ahb"; + assigned-clocks = <&clk IMX_CONN_SDHC2_CLK>; + assigned-clock-rates = <200000000>; + power-domains = <&pd IMX_SC_R_SDHC_2>; + status = "disabled"; + }; + + fec1: ethernet@5b040000 { + compatible = "fsl,imx8qxp-fec", "fsl,imx6sx-fec"; + reg = <0x5b040000 0x10000>; + interrupts = , + , + , + ; + clocks = <&conn_lpcg IMX_CONN_LPCG_ENET0_IPG_CLK>, + <&conn_lpcg IMX_CONN_LPCG_ENET0_AHB_CLK>, + <&conn_lpcg IMX_CONN_LPCG_ENET0_TX_CLK>, + <&conn_lpcg IMX_CONN_LPCG_ENET0_ROOT_CLK>; + clock-names = "ipg", "ahb", "enet_clk_ref", "ptp"; + fsl,num-tx-queues=<3>; + fsl,num-rx-queues=<3>; + power-domains = <&pd IMX_SC_R_ENET_0>; + status = "disabled"; + }; + + fec2: ethernet@5b050000 { + compatible = "fsl,imx8qxp-fec", "fsl,imx6sx-fec"; + reg = <0x5b050000 0x10000>; + interrupts = , + , + , + ; + clocks = <&conn_lpcg IMX_CONN_LPCG_ENET1_IPG_CLK>, + <&conn_lpcg IMX_CONN_LPCG_ENET1_AHB_CLK>, + <&conn_lpcg IMX_CONN_LPCG_ENET1_TX_CLK>, + <&conn_lpcg IMX_CONN_LPCG_ENET1_ROOT_CLK>; + clock-names = "ipg", "ahb", "enet_clk_ref", "ptp"; + fsl,num-tx-queues=<3>; + fsl,num-rx-queues=<3>; + power-domains = <&pd IMX_SC_R_ENET_1>; + status = "disabled"; + }; + }; + + lsio_subsys: bus@5d000000 { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x5d000000 0x0 0x5d000000 0x1000000>; + + lsio_lpcg: clock-controller@5d400000 { + compatible = "fsl,imx8qxp-lpcg-lsio"; + reg = <0x5d400000 0x400000>; + #clock-cells = <1>; + }; + + lsio_mu0: mailbox@5d1b0000 { + compatible = "fsl,imx8qxp-mu", "fsl,imx6sx-mu"; + reg = <0x5d1b0000 0x10000>; + interrupts = ; + #mbox-cells = <0>; + status = "disabled"; + }; + + lsio_mu1: mailbox@5d1c0000 { + compatible = "fsl,imx8qxp-mu", "fsl,imx6sx-mu"; + reg = <0x5d1c0000 0x10000>; + interrupts = ; + #mbox-cells = <2>; + }; + + lsio_mu3: mailbox@5d1e0000 { + compatible = "fsl,imx8qxp-mu", "fsl,imx6sx-mu"; + reg = <0x5d1e0000 0x10000>; + interrupts = ; + #mbox-cells = <0>; + status = "disabled"; + }; + + lsio_mu4: mailbox@5d1f0000 { + compatible = "fsl,imx8qxp-mu", "fsl,imx6sx-mu"; + reg = <0x5d1f0000 0x10000>; + interrupts = ; + #mbox-cells = <0>; + status = "disabled"; + }; + + lsio_gpio0: gpio@5d080000 { + compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio"; + reg = <0x5d080000 0x10000>; + interrupts = ; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + power-domains = <&pd IMX_SC_R_GPIO_0>; + }; + + lsio_gpio1: gpio@5d090000 { + compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio"; + reg = <0x5d090000 0x10000>; + interrupts = ; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + power-domains = <&pd IMX_SC_R_GPIO_1>; + }; + + lsio_gpio2: gpio@5d0a0000 { + compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio"; + reg = <0x5d0a0000 0x10000>; + interrupts = ; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + power-domains = <&pd IMX_SC_R_GPIO_2>; + }; + + lsio_gpio3: gpio@5d0b0000 { + compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio"; + reg = <0x5d0b0000 0x10000>; + interrupts = ; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + power-domains = <&pd IMX_SC_R_GPIO_3>; + }; + + lsio_gpio4: gpio@5d0c0000 { + compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio"; + reg = <0x5d0c0000 0x10000>; + interrupts = ; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + power-domains = <&pd IMX_SC_R_GPIO_4>; + }; + + lsio_gpio5: gpio@5d0d0000 { + compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio"; + reg = <0x5d0d0000 0x10000>; + interrupts = ; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + power-domains = <&pd IMX_SC_R_GPIO_5>; + }; + + lsio_gpio6: gpio@5d0e0000 { + compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio"; + reg = <0x5d0e0000 0x10000>; + interrupts = ; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + power-domains = <&pd IMX_SC_R_GPIO_6>; + }; + + lsio_gpio7: gpio@5d0f0000 { + compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio"; + reg = <0x5d0f0000 0x10000>; + interrupts = ; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + power-domains = <&pd IMX_SC_R_GPIO_7>; + }; + }; +}; diff --git a/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts b/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts index 46435466f1ab..e035cf195b19 100644 --- a/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts +++ b/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts @@ -581,8 +581,7 @@ sd-uhs-sdr50; sd-uhs-sdr104; disable-wp; - cd-inverted; - cd-gpios = <&gpio25 3 0>; + cd-gpios = <&gpio25 3 GPIO_ACTIVE_LOW>; pinctrl-names = "default"; pinctrl-0 = <&sd_pmx_func &sd_clk_cfg_func diff --git a/arch/arm64/boot/dts/hisilicon/hi3660.dtsi b/arch/arm64/boot/dts/hisilicon/hi3660.dtsi index 20ae40df61d5..2f19e0e5b7cf 100644 --- a/arch/arm64/boot/dts/hisilicon/hi3660.dtsi +++ b/arch/arm64/boot/dts/hisilicon/hi3660.dtsi @@ -56,7 +56,7 @@ }; cpu0: cpu@0 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <0x0 0x0>; enable-method = "psci"; @@ -70,7 +70,7 @@ }; cpu1: cpu@1 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <0x0 0x1>; enable-method = "psci"; @@ -83,7 +83,7 @@ }; cpu2: cpu@2 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <0x0 0x2>; enable-method = "psci"; @@ -96,7 +96,7 @@ }; cpu3: cpu@3 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <0x0 0x3>; enable-method = "psci"; @@ -109,7 +109,7 @@ }; cpu4: cpu@100 { - compatible = "arm,cortex-a73", "arm,armv8"; + compatible = "arm,cortex-a73"; device_type = "cpu"; reg = <0x0 0x100>; enable-method = "psci"; @@ -123,7 +123,7 @@ }; cpu5: cpu@101 { - compatible = "arm,cortex-a73", "arm,armv8"; + compatible = "arm,cortex-a73"; device_type = "cpu"; reg = <0x0 0x101>; enable-method = "psci"; @@ -136,7 +136,7 @@ }; cpu6: cpu@102 { - compatible = "arm,cortex-a73", "arm,armv8"; + compatible = "arm,cortex-a73"; device_type = "cpu"; reg = <0x0 0x102>; enable-method = "psci"; @@ -149,7 +149,7 @@ }; cpu7: cpu@103 { - compatible = "arm,cortex-a73", "arm,armv8"; + compatible = "arm,cortex-a73"; device_type = "cpu"; reg = <0x0 0x103>; enable-method = "psci"; diff --git a/arch/arm64/boot/dts/hisilicon/hi3670.dtsi b/arch/arm64/boot/dts/hisilicon/hi3670.dtsi index a5bd6d80b226..2ed06e4588b8 100644 --- a/arch/arm64/boot/dts/hisilicon/hi3670.dtsi +++ b/arch/arm64/boot/dts/hisilicon/hi3670.dtsi @@ -56,56 +56,56 @@ }; cpu0: cpu@0 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <0x0 0x0>; enable-method = "psci"; }; cpu1: cpu@1 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <0x0 0x1>; enable-method = "psci"; }; cpu2: cpu@2 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <0x0 0x2>; enable-method = "psci"; }; cpu3: cpu@3 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <0x0 0x3>; enable-method = "psci"; }; cpu4: cpu@100 { - compatible = "arm,cortex-a73", "arm,armv8"; + compatible = "arm,cortex-a73"; device_type = "cpu"; reg = <0x0 0x100>; enable-method = "psci"; }; cpu5: cpu@101 { - compatible = "arm,cortex-a73", "arm,armv8"; + compatible = "arm,cortex-a73"; device_type = "cpu"; reg = <0x0 0x101>; enable-method = "psci"; }; cpu6: cpu@102 { - compatible = "arm,cortex-a73", "arm,armv8"; + compatible = "arm,cortex-a73"; device_type = "cpu"; reg = <0x0 0x102>; enable-method = "psci"; }; cpu7: cpu@103 { - compatible = "arm,cortex-a73", "arm,armv8"; + compatible = "arm,cortex-a73"; device_type = "cpu"; reg = <0x0 0x103>; enable-method = "psci"; diff --git a/arch/arm64/boot/dts/hisilicon/hi3798cv200-poplar.dts b/arch/arm64/boot/dts/hisilicon/hi3798cv200-poplar.dts index 32716c96b457..c563d3eb2d98 100644 --- a/arch/arm64/boot/dts/hisilicon/hi3798cv200-poplar.dts +++ b/arch/arm64/boot/dts/hisilicon/hi3798cv200-poplar.dts @@ -1,10 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 /* * DTS File for HiSilicon Poplar Development Board * * Copyright (c) 2016-2017 HiSilicon Technologies Co., Ltd. - * - * Released under the GPLv2 only. - * SPDX-License-Identifier: GPL-2.0 */ /dts-v1/; diff --git a/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi b/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi index 7c0fddd7c8cf..13821a0ff524 100644 --- a/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi +++ b/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi @@ -1,10 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 /* * DTS File for HiSilicon Hi3798cv200 SoC. * * Copyright (c) 2016-2017 HiSilicon Technologies Co., Ltd. - * - * Released under the GPLv2 only. - * SPDX-License-Identifier: GPL-2.0 */ #include diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts index 610235028cc7..c14205cd6bf5 100644 --- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts +++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts @@ -118,6 +118,7 @@ reset-gpios = <&gpio0 5 GPIO_ACTIVE_LOW>; clocks = <&pmic>; clock-names = "ext_clock"; + post-power-on-delay-ms = <10>; power-off-delay-us = <10>; }; @@ -300,7 +301,6 @@ dwmmc_0: dwmmc0@f723d000 { cap-mmc-highspeed; - mmc-hs200-1_8v; non-removable; bus-width = <0x8>; vmmc-supply = <&ldo19>; diff --git a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi index aec9e371c2a7..108e2a4227f6 100644 --- a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi +++ b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi @@ -81,7 +81,7 @@ }; cpu0: cpu@0 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <0x0 0x0>; enable-method = "psci"; @@ -94,7 +94,7 @@ }; cpu1: cpu@1 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <0x0 0x1>; enable-method = "psci"; @@ -107,7 +107,7 @@ }; cpu2: cpu@2 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <0x0 0x2>; enable-method = "psci"; @@ -120,7 +120,7 @@ }; cpu3: cpu@3 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <0x0 0x3>; enable-method = "psci"; @@ -133,7 +133,7 @@ }; cpu4: cpu@100 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <0x0 0x100>; enable-method = "psci"; @@ -146,7 +146,7 @@ }; cpu5: cpu@101 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <0x0 0x101>; enable-method = "psci"; @@ -159,7 +159,7 @@ }; cpu6: cpu@102 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <0x0 0x102>; enable-method = "psci"; @@ -172,7 +172,7 @@ }; cpu7: cpu@103 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <0x0 0x103>; enable-method = "psci"; @@ -319,6 +319,8 @@ clock-names = "uartclk", "apb_pclk"; pinctrl-names = "default"; pinctrl-0 = <&uart1_pmx_func &uart1_cfg_func1 &uart1_cfg_func2>; + dmas = <&dma0 8 &dma0 9>; + dma-names = "rx", "tx"; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/hisilicon/hip05.dtsi b/arch/arm64/boot/dts/hisilicon/hip05.dtsi index 4b472a302cd8..d321edc09c3f 100644 --- a/arch/arm64/boot/dts/hisilicon/hip05.dtsi +++ b/arch/arm64/boot/dts/hisilicon/hip05.dtsi @@ -87,7 +87,7 @@ cpu0: cpu@20000 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x20000>; enable-method = "psci"; next-level-cache = <&cluster0_l2>; @@ -95,7 +95,7 @@ cpu1: cpu@20001 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x20001>; enable-method = "psci"; next-level-cache = <&cluster0_l2>; @@ -103,7 +103,7 @@ cpu2: cpu@20002 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x20002>; enable-method = "psci"; next-level-cache = <&cluster0_l2>; @@ -111,7 +111,7 @@ cpu3: cpu@20003 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x20003>; enable-method = "psci"; next-level-cache = <&cluster0_l2>; @@ -119,7 +119,7 @@ cpu4: cpu@20100 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x20100>; enable-method = "psci"; next-level-cache = <&cluster1_l2>; @@ -127,7 +127,7 @@ cpu5: cpu@20101 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x20101>; enable-method = "psci"; next-level-cache = <&cluster1_l2>; @@ -135,7 +135,7 @@ cpu6: cpu@20102 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x20102>; enable-method = "psci"; next-level-cache = <&cluster1_l2>; @@ -143,7 +143,7 @@ cpu7: cpu@20103 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x20103>; enable-method = "psci"; next-level-cache = <&cluster1_l2>; @@ -151,7 +151,7 @@ cpu8: cpu@20200 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x20200>; enable-method = "psci"; next-level-cache = <&cluster2_l2>; @@ -159,7 +159,7 @@ cpu9: cpu@20201 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x20201>; enable-method = "psci"; next-level-cache = <&cluster2_l2>; @@ -167,7 +167,7 @@ cpu10: cpu@20202 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x20202>; enable-method = "psci"; next-level-cache = <&cluster2_l2>; @@ -175,7 +175,7 @@ cpu11: cpu@20203 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x20203>; enable-method = "psci"; next-level-cache = <&cluster2_l2>; @@ -183,7 +183,7 @@ cpu12: cpu@20300 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x20300>; enable-method = "psci"; next-level-cache = <&cluster3_l2>; @@ -191,7 +191,7 @@ cpu13: cpu@20301 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x20301>; enable-method = "psci"; next-level-cache = <&cluster3_l2>; @@ -199,7 +199,7 @@ cpu14: cpu@20302 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x20302>; enable-method = "psci"; next-level-cache = <&cluster3_l2>; @@ -207,7 +207,7 @@ cpu15: cpu@20303 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x20303>; enable-method = "psci"; next-level-cache = <&cluster3_l2>; diff --git a/arch/arm64/boot/dts/hisilicon/hip06.dtsi b/arch/arm64/boot/dts/hisilicon/hip06.dtsi index d78a6a755d03..56625587b6de 100644 --- a/arch/arm64/boot/dts/hisilicon/hip06.dtsi +++ b/arch/arm64/boot/dts/hisilicon/hip06.dtsi @@ -87,7 +87,7 @@ cpu0: cpu@10000 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x10000>; enable-method = "psci"; next-level-cache = <&cluster0_l2>; @@ -95,7 +95,7 @@ cpu1: cpu@10001 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x10001>; enable-method = "psci"; next-level-cache = <&cluster0_l2>; @@ -103,7 +103,7 @@ cpu2: cpu@10002 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x10002>; enable-method = "psci"; next-level-cache = <&cluster0_l2>; @@ -111,7 +111,7 @@ cpu3: cpu@10003 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x10003>; enable-method = "psci"; next-level-cache = <&cluster0_l2>; @@ -119,7 +119,7 @@ cpu4: cpu@10100 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x10100>; enable-method = "psci"; next-level-cache = <&cluster1_l2>; @@ -127,7 +127,7 @@ cpu5: cpu@10101 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x10101>; enable-method = "psci"; next-level-cache = <&cluster1_l2>; @@ -135,7 +135,7 @@ cpu6: cpu@10102 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x10102>; enable-method = "psci"; next-level-cache = <&cluster1_l2>; @@ -143,7 +143,7 @@ cpu7: cpu@10103 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x10103>; enable-method = "psci"; next-level-cache = <&cluster1_l2>; @@ -151,7 +151,7 @@ cpu8: cpu@10200 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x10200>; enable-method = "psci"; next-level-cache = <&cluster2_l2>; @@ -159,7 +159,7 @@ cpu9: cpu@10201 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x10201>; enable-method = "psci"; next-level-cache = <&cluster2_l2>; @@ -167,7 +167,7 @@ cpu10: cpu@10202 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x10202>; enable-method = "psci"; next-level-cache = <&cluster2_l2>; @@ -175,7 +175,7 @@ cpu11: cpu@10203 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x10203>; enable-method = "psci"; next-level-cache = <&cluster2_l2>; @@ -183,7 +183,7 @@ cpu12: cpu@10300 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x10300>; enable-method = "psci"; next-level-cache = <&cluster3_l2>; @@ -191,7 +191,7 @@ cpu13: cpu@10301 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x10301>; enable-method = "psci"; next-level-cache = <&cluster3_l2>; @@ -199,7 +199,7 @@ cpu14: cpu@10302 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x10302>; enable-method = "psci"; next-level-cache = <&cluster3_l2>; @@ -207,7 +207,7 @@ cpu15: cpu@10303 { device_type = "cpu"; - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x10303>; enable-method = "psci"; next-level-cache = <&cluster3_l2>; diff --git a/arch/arm64/boot/dts/hisilicon/hip07.dtsi b/arch/arm64/boot/dts/hisilicon/hip07.dtsi index c33adefc3061..28bd4389441f 100644 --- a/arch/arm64/boot/dts/hisilicon/hip07.dtsi +++ b/arch/arm64/boot/dts/hisilicon/hip07.dtsi @@ -270,7 +270,7 @@ cpu0: cpu@10000 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x10000>; enable-method = "psci"; next-level-cache = <&cluster0_l2>; @@ -279,7 +279,7 @@ cpu1: cpu@10001 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x10001>; enable-method = "psci"; next-level-cache = <&cluster0_l2>; @@ -288,7 +288,7 @@ cpu2: cpu@10002 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x10002>; enable-method = "psci"; next-level-cache = <&cluster0_l2>; @@ -297,7 +297,7 @@ cpu3: cpu@10003 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x10003>; enable-method = "psci"; next-level-cache = <&cluster0_l2>; @@ -306,7 +306,7 @@ cpu4: cpu@10100 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x10100>; enable-method = "psci"; next-level-cache = <&cluster1_l2>; @@ -315,7 +315,7 @@ cpu5: cpu@10101 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x10101>; enable-method = "psci"; next-level-cache = <&cluster1_l2>; @@ -324,7 +324,7 @@ cpu6: cpu@10102 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x10102>; enable-method = "psci"; next-level-cache = <&cluster1_l2>; @@ -333,7 +333,7 @@ cpu7: cpu@10103 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x10103>; enable-method = "psci"; next-level-cache = <&cluster1_l2>; @@ -342,7 +342,7 @@ cpu8: cpu@10200 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x10200>; enable-method = "psci"; next-level-cache = <&cluster2_l2>; @@ -351,7 +351,7 @@ cpu9: cpu@10201 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x10201>; enable-method = "psci"; next-level-cache = <&cluster2_l2>; @@ -360,7 +360,7 @@ cpu10: cpu@10202 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x10202>; enable-method = "psci"; next-level-cache = <&cluster2_l2>; @@ -369,7 +369,7 @@ cpu11: cpu@10203 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x10203>; enable-method = "psci"; next-level-cache = <&cluster2_l2>; @@ -378,7 +378,7 @@ cpu12: cpu@10300 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x10300>; enable-method = "psci"; next-level-cache = <&cluster3_l2>; @@ -387,7 +387,7 @@ cpu13: cpu@10301 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x10301>; enable-method = "psci"; next-level-cache = <&cluster3_l2>; @@ -396,7 +396,7 @@ cpu14: cpu@10302 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x10302>; enable-method = "psci"; next-level-cache = <&cluster3_l2>; @@ -405,7 +405,7 @@ cpu15: cpu@10303 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x10303>; enable-method = "psci"; next-level-cache = <&cluster3_l2>; @@ -414,7 +414,7 @@ cpu16: cpu@30000 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x30000>; enable-method = "psci"; next-level-cache = <&cluster4_l2>; @@ -423,7 +423,7 @@ cpu17: cpu@30001 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x30001>; enable-method = "psci"; next-level-cache = <&cluster4_l2>; @@ -432,7 +432,7 @@ cpu18: cpu@30002 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x30002>; enable-method = "psci"; next-level-cache = <&cluster4_l2>; @@ -441,7 +441,7 @@ cpu19: cpu@30003 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x30003>; enable-method = "psci"; next-level-cache = <&cluster4_l2>; @@ -450,7 +450,7 @@ cpu20: cpu@30100 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x30100>; enable-method = "psci"; next-level-cache = <&cluster5_l2>; @@ -459,7 +459,7 @@ cpu21: cpu@30101 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x30101>; enable-method = "psci"; next-level-cache = <&cluster5_l2>; @@ -468,7 +468,7 @@ cpu22: cpu@30102 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x30102>; enable-method = "psci"; next-level-cache = <&cluster5_l2>; @@ -477,7 +477,7 @@ cpu23: cpu@30103 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x30103>; enable-method = "psci"; next-level-cache = <&cluster5_l2>; @@ -486,7 +486,7 @@ cpu24: cpu@30200 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x30200>; enable-method = "psci"; next-level-cache = <&cluster6_l2>; @@ -495,7 +495,7 @@ cpu25: cpu@30201 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x30201>; enable-method = "psci"; next-level-cache = <&cluster6_l2>; @@ -504,7 +504,7 @@ cpu26: cpu@30202 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x30202>; enable-method = "psci"; next-level-cache = <&cluster6_l2>; @@ -513,7 +513,7 @@ cpu27: cpu@30203 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x30203>; enable-method = "psci"; next-level-cache = <&cluster6_l2>; @@ -522,7 +522,7 @@ cpu28: cpu@30300 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x30300>; enable-method = "psci"; next-level-cache = <&cluster7_l2>; @@ -531,7 +531,7 @@ cpu29: cpu@30301 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x30301>; enable-method = "psci"; next-level-cache = <&cluster7_l2>; @@ -540,7 +540,7 @@ cpu30: cpu@30302 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x30302>; enable-method = "psci"; next-level-cache = <&cluster7_l2>; @@ -549,7 +549,7 @@ cpu31: cpu@30303 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x30303>; enable-method = "psci"; next-level-cache = <&cluster7_l2>; @@ -558,7 +558,7 @@ cpu32: cpu@50000 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x50000>; enable-method = "psci"; next-level-cache = <&cluster8_l2>; @@ -567,7 +567,7 @@ cpu33: cpu@50001 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x50001>; enable-method = "psci"; next-level-cache = <&cluster8_l2>; @@ -576,7 +576,7 @@ cpu34: cpu@50002 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x50002>; enable-method = "psci"; next-level-cache = <&cluster8_l2>; @@ -585,7 +585,7 @@ cpu35: cpu@50003 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x50003>; enable-method = "psci"; next-level-cache = <&cluster8_l2>; @@ -594,7 +594,7 @@ cpu36: cpu@50100 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x50100>; enable-method = "psci"; next-level-cache = <&cluster9_l2>; @@ -603,7 +603,7 @@ cpu37: cpu@50101 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x50101>; enable-method = "psci"; next-level-cache = <&cluster9_l2>; @@ -612,7 +612,7 @@ cpu38: cpu@50102 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x50102>; enable-method = "psci"; next-level-cache = <&cluster9_l2>; @@ -621,7 +621,7 @@ cpu39: cpu@50103 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x50103>; enable-method = "psci"; next-level-cache = <&cluster9_l2>; @@ -630,7 +630,7 @@ cpu40: cpu@50200 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x50200>; enable-method = "psci"; next-level-cache = <&cluster10_l2>; @@ -639,7 +639,7 @@ cpu41: cpu@50201 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x50201>; enable-method = "psci"; next-level-cache = <&cluster10_l2>; @@ -648,7 +648,7 @@ cpu42: cpu@50202 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x50202>; enable-method = "psci"; next-level-cache = <&cluster10_l2>; @@ -657,7 +657,7 @@ cpu43: cpu@50203 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x50203>; enable-method = "psci"; next-level-cache = <&cluster10_l2>; @@ -666,7 +666,7 @@ cpu44: cpu@50300 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x50300>; enable-method = "psci"; next-level-cache = <&cluster11_l2>; @@ -675,7 +675,7 @@ cpu45: cpu@50301 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x50301>; enable-method = "psci"; next-level-cache = <&cluster11_l2>; @@ -684,7 +684,7 @@ cpu46: cpu@50302 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x50302>; enable-method = "psci"; next-level-cache = <&cluster11_l2>; @@ -693,7 +693,7 @@ cpu47: cpu@50303 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x50303>; enable-method = "psci"; next-level-cache = <&cluster11_l2>; @@ -702,7 +702,7 @@ cpu48: cpu@70000 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x70000>; enable-method = "psci"; next-level-cache = <&cluster12_l2>; @@ -711,7 +711,7 @@ cpu49: cpu@70001 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x70001>; enable-method = "psci"; next-level-cache = <&cluster12_l2>; @@ -720,7 +720,7 @@ cpu50: cpu@70002 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x70002>; enable-method = "psci"; next-level-cache = <&cluster12_l2>; @@ -729,7 +729,7 @@ cpu51: cpu@70003 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x70003>; enable-method = "psci"; next-level-cache = <&cluster12_l2>; @@ -738,7 +738,7 @@ cpu52: cpu@70100 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x70100>; enable-method = "psci"; next-level-cache = <&cluster13_l2>; @@ -747,7 +747,7 @@ cpu53: cpu@70101 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x70101>; enable-method = "psci"; next-level-cache = <&cluster13_l2>; @@ -756,7 +756,7 @@ cpu54: cpu@70102 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x70102>; enable-method = "psci"; next-level-cache = <&cluster13_l2>; @@ -765,7 +765,7 @@ cpu55: cpu@70103 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x70103>; enable-method = "psci"; next-level-cache = <&cluster13_l2>; @@ -774,7 +774,7 @@ cpu56: cpu@70200 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x70200>; enable-method = "psci"; next-level-cache = <&cluster14_l2>; @@ -783,7 +783,7 @@ cpu57: cpu@70201 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x70201>; enable-method = "psci"; next-level-cache = <&cluster14_l2>; @@ -792,7 +792,7 @@ cpu58: cpu@70202 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x70202>; enable-method = "psci"; next-level-cache = <&cluster14_l2>; @@ -801,7 +801,7 @@ cpu59: cpu@70203 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x70203>; enable-method = "psci"; next-level-cache = <&cluster14_l2>; @@ -810,7 +810,7 @@ cpu60: cpu@70300 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x70300>; enable-method = "psci"; next-level-cache = <&cluster15_l2>; @@ -819,7 +819,7 @@ cpu61: cpu@70301 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x70301>; enable-method = "psci"; next-level-cache = <&cluster15_l2>; @@ -828,7 +828,7 @@ cpu62: cpu@70302 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x70302>; enable-method = "psci"; next-level-cache = <&cluster15_l2>; @@ -837,7 +837,7 @@ cpu63: cpu@70303 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x70303>; enable-method = "psci"; next-level-cache = <&cluster15_l2>; diff --git a/arch/arm64/boot/dts/lg/lg1312.dtsi b/arch/arm64/boot/dts/lg/lg1312.dtsi index 4bde7b6f2b11..c8dc9c20fba3 100644 --- a/arch/arm64/boot/dts/lg/lg1312.dtsi +++ b/arch/arm64/boot/dts/lg/lg1312.dtsi @@ -21,27 +21,27 @@ cpu0: cpu@0 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x0>; next-level-cache = <&L2_0>; }; cpu1: cpu@1 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x1>; enable-method = "psci"; next-level-cache = <&L2_0>; }; cpu2: cpu@2 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x2>; enable-method = "psci"; next-level-cache = <&L2_0>; }; cpu3: cpu@3 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x3>; enable-method = "psci"; next-level-cache = <&L2_0>; diff --git a/arch/arm64/boot/dts/lg/lg1313.dtsi b/arch/arm64/boot/dts/lg/lg1313.dtsi index 16ced1ff1ad3..82c6645b58b7 100644 --- a/arch/arm64/boot/dts/lg/lg1313.dtsi +++ b/arch/arm64/boot/dts/lg/lg1313.dtsi @@ -21,27 +21,27 @@ cpu0: cpu@0 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x0>; next-level-cache = <&L2_0>; }; cpu1: cpu@1 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x1>; enable-method = "psci"; next-level-cache = <&L2_0>; }; cpu2: cpu@2 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x2>; enable-method = "psci"; next-level-cache = <&L2_0>; }; cpu3: cpu@3 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x3>; enable-method = "psci"; next-level-cache = <&L2_0>; diff --git a/arch/arm64/boot/dts/marvell/Makefile b/arch/arm64/boot/dts/marvell/Makefile index 2eff1f927471..caed4334f27d 100644 --- a/arch/arm64/boot/dts/marvell/Makefile +++ b/arch/arm64/boot/dts/marvell/Makefile @@ -2,6 +2,7 @@ # Mvebu SoC Family dtb-$(CONFIG_ARCH_MVEBU) += armada-3720-db.dtb dtb-$(CONFIG_ARCH_MVEBU) += armada-3720-espressobin.dtb +dtb-$(CONFIG_ARCH_MVEBU) += armada-3720-uDPU.dtb dtb-$(CONFIG_ARCH_MVEBU) += armada-7040-db.dtb dtb-$(CONFIG_ARCH_MVEBU) += armada-8040-clearfog-gt-8k.dtb dtb-$(CONFIG_ARCH_MVEBU) += armada-8040-db.dtb diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts index 846003bb480c..6be019e1888e 100644 --- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts +++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts @@ -46,11 +46,16 @@ /* J9 */ &pcie0 { status = "okay"; + phys = <&comphy1 0>; + pinctrl-names = "default"; + pinctrl-0 = <&pcie_reset_pins &pcie_clkreq_pins>; }; /* J6 */ &sata { status = "okay"; + phys = <&comphy2 0>; + phy-names = "sata-phy"; }; /* J1 */ @@ -156,6 +161,11 @@ reg = <0>; label = "cpu"; ethernet = <ð0>; + phy-mode = "rgmii-id"; + fixed-link { + speed = <1000>; + full-duplex; + }; }; port@1 { @@ -196,6 +206,8 @@ }; ð0 { + pinctrl-names = "default"; + pinctrl-0 = <&rgmii_pins>, <&smi_pins>; phy-mode = "rgmii-id"; status = "okay"; diff --git a/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts b/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts new file mode 100644 index 000000000000..bd4aab6092e0 --- /dev/null +++ b/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dts @@ -0,0 +1,162 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Device tree for the uDPU board. + * Based on Marvell Armada 3720 development board (DB-88F3720-DDR3) + * Copyright (C) 2016 Marvell + * Copyright (C) 2019 Methode Electronics + * Copyright (C) 2019 Telus + * + * Vladimir Vid + */ + +/dts-v1/; + +#include +#include "armada-372x.dtsi" + +/ { + model = "Methode uDPU Board"; + compatible = "methode,udpu", "marvell,armada3720"; + + chosen { + stdout-path = "serial0:115200n8"; + }; + + memory@0 { + device_type = "memory"; + reg = <0x00000000 0x00000000 0x00000000 0x20000000>; + }; + + leds { + pinctrl-names = "default"; + compatible = "gpio-leds"; + + power1 { + label = "udpu:green:power"; + gpios = <&gpionb 11 GPIO_ACTIVE_LOW>; + }; + + power2 { + label = "udpu:red:power"; + gpios = <&gpionb 12 GPIO_ACTIVE_LOW>; + }; + + network1 { + label = "udpu:green:network"; + gpios = <&gpionb 13 GPIO_ACTIVE_LOW>; + }; + + network2 { + label = "udpu:red:network"; + gpios = <&gpionb 14 GPIO_ACTIVE_LOW>; + }; + + alarm1 { + label = "udpu:green:alarm"; + gpios = <&gpionb 15 GPIO_ACTIVE_LOW>; + }; + + alarm2 { + label = "udpu:red:alarm"; + gpios = <&gpionb 16 GPIO_ACTIVE_LOW>; + }; + }; + + sfp_eth0: sfp-eth0 { + compatible = "sff,sfp"; + i2c-bus = <&i2c0>; + los-gpio = <&gpiosb 2 GPIO_ACTIVE_HIGH>; + mod-def0-gpio = <&gpiosb 3 GPIO_ACTIVE_LOW>; + tx-disable-gpio = <&gpiosb 4 GPIO_ACTIVE_HIGH>; + tx-fault-gpio = <&gpiosb 5 GPIO_ACTIVE_HIGH>; + }; + + sfp_eth1: sfp-eth1 { + compatible = "sff,sfp"; + i2c-bus = <&i2c1>; + los-gpio = <&gpiosb 7 GPIO_ACTIVE_HIGH>; + mod-def0-gpio = <&gpiosb 8 GPIO_ACTIVE_LOW>; + tx-disable-gpio = <&gpiosb 9 GPIO_ACTIVE_HIGH>; + tx-fault-gpio = <&gpiosb 10 GPIO_ACTIVE_HIGH>; + }; +}; + +&sdhci0 { + status = "okay"; + bus-width = <8>; + mmc-ddr-1_8v; + mmc-hs400-1_8v; + marvell,pad-type = "fixed-1-8v"; + non-removable; + no-sd; + no-sdio; +}; + +&spi0 { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&spi_quad_pins>; + + m25p80@0 { + compatible = "jedec,spi-nor"; + reg = <0>; + spi-max-frequency = <54000000>; + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + /* only bootloader is located on the SPI */ + partition@0 { + label = "uboot"; + reg = <0 0x400000>; + }; + }; + }; +}; + +&i2c0 { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&i2c1_pins>; +}; + +&i2c1 { + status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&i2c2_pins>; + + lm75@48 { + status = "okay"; + compatible = "lm75"; + reg = <0x48>; + }; + + lm75@49 { + status = "okay"; + compatible = "lm75"; + reg = <0x49>; + }; +}; + +ð0 { + phy-mode = "sgmii"; + status = "okay"; + managed = "in-band-status"; + sfp = <&sfp_eth0>; +}; + +ð1 { + phy-mode = "sgmii"; + status = "okay"; + managed = "in-band-status"; + sfp = <&sfp_eth1>; +}; + +&usb3 { + status = "okay"; +}; + +&uart0 { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/marvell/armada-372x.dtsi b/arch/arm64/boot/dts/marvell/armada-372x.dtsi index 6800945a88ad..5ce55bdbb995 100644 --- a/arch/arm64/boot/dts/marvell/armada-372x.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-372x.dtsi @@ -18,7 +18,7 @@ cpus { cpu1: cpu@1 { device_type = "cpu"; - compatible = "arm,cortex-a53","arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x1>; clocks = <&nb_periph_clk 16>; enable-method = "psci"; diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi index e05594ea15fb..f43c43168b00 100644 --- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi @@ -42,7 +42,7 @@ #size-cells = <0>; cpu0: cpu@0 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0>; clocks = <&nb_periph_clk 16>; enable-method = "psci"; @@ -247,6 +247,35 @@ reg = <0x14000 0x60>; }; + comphy: phy@18300 { + compatible = "marvell,comphy-a3700"; + reg = <0x18300 0x300>, + <0x1F000 0x400>, + <0x5C000 0x400>, + <0xe0178 0x8>; + reg-names = "comphy", + "lane1_pcie_gbe", + "lane0_usb3_gbe", + "lane2_sata_usb3"; + #address-cells = <1>; + #size-cells = <0>; + + comphy0: phy@0 { + reg = <0>; + #phy-cells = <1>; + }; + + comphy1: phy@1 { + reg = <1>; + #phy-cells = <1>; + }; + + comphy2: phy@2 { + reg = <2>; + #phy-cells = <1>; + }; + }; + pinctrl_sb: pinctrl@18800 { compatible = "marvell,armada3710-sb-pinctrl", "syscon", "simple-mfd"; @@ -271,11 +300,25 @@ function = "mii"; }; + smi_pins: smi-pins { + groups = "smi"; + function = "smi"; + }; + sdio_pins: sdio-pins { groups = "sdio_sb"; function = "sdio"; }; + pcie_reset_pins: pcie-reset-pins { + groups = "pcie1"; + function = "pcie"; + }; + + pcie_clkreq_pins: pcie-clkreq-pins { + groups = "pcie1_clkreq"; + function = "pcie"; + }; }; eth0: ethernet@30000 { @@ -305,18 +348,50 @@ compatible = "marvell,armada3700-xhci", "generic-xhci"; reg = <0x58000 0x4000>; + marvell,usb-misc-reg = <&usb32_syscon>; interrupts = ; clocks = <&sb_periph_clk 12>; + phys = <&comphy0 0>, <&usb2_utmi_otg_phy>; + phy-names = "usb3-phy", "usb2-utmi-otg-phy"; status = "disabled"; }; + usb2_utmi_otg_phy: phy@5d000 { + compatible = "marvell,a3700-utmi-otg-phy"; + reg = <0x5d000 0x800>; + marvell,usb-misc-reg = <&usb32_syscon>; + #phy-cells = <0>; + }; + + usb32_syscon: system-controller@5d800 { + compatible = "marvell,armada-3700-usb2-host-device-misc", + "syscon"; + reg = <0x5d800 0x800>; + }; + usb2: usb@5e000 { compatible = "marvell,armada-3700-ehci"; - reg = <0x5e000 0x2000>; + reg = <0x5e000 0x1000>; + marvell,usb-misc-reg = <&usb2_syscon>; interrupts = ; + phys = <&usb2_utmi_host_phy>; + phy-names = "usb2-utmi-host-phy"; status = "disabled"; }; + usb2_utmi_host_phy: phy@5f000 { + compatible = "marvell,a3700-utmi-host-phy"; + reg = <0x5f000 0x800>; + marvell,usb-misc-reg = <&usb2_syscon>; + #phy-cells = <0>; + }; + + usb2_syscon: system-controller@5f800 { + compatible = "marvell,armada-3700-usb2-host-misc", + "syscon"; + reg = <0x5f800 0x800>; + }; + xor@60900 { compatible = "marvell,armada-3700-xor"; reg = <0x60900 0x100>, @@ -368,8 +443,9 @@ sata: sata@e0000 { compatible = "marvell,armada-3700-ahci"; - reg = <0xe0000 0x2000>; + reg = <0xe0000 0x178>; interrupts = ; + clocks = <&nb_periph_clk 1>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/marvell/armada-7040-db.dts b/arch/arm64/boot/dts/marvell/armada-7040-db.dts index 412efdb46e7c..d20d84ce7ca8 100644 --- a/arch/arm64/boot/dts/marvell/armada-7040-db.dts +++ b/arch/arm64/boot/dts/marvell/armada-7040-db.dts @@ -66,8 +66,6 @@ status = "okay"; spi-flash@0 { - #address-cells = <1>; - #size-cells = <1>; compatible = "jedec,spi-nor"; reg = <0>; spi-max-frequency = <10000000>; @@ -169,8 +167,6 @@ status = "okay"; spi-flash@0 { - #address-cells = <0x1>; - #size-cells = <0x1>; compatible = "jedec,spi-nor"; reg = <0x0>; spi-max-frequency = <20000000>; diff --git a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts index 5b4a9609e31f..2468762283a5 100644 --- a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts +++ b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts @@ -351,7 +351,7 @@ reg = <0>; pinctrl-names = "default"; pinctrl-0 = <&cp0_copper_eth_phy_reset>; - reset-gpios = <&cp1_gpio1 11 GPIO_ACTIVE_LOW>; + reset-gpios = <&cp0_gpio2 11 GPIO_ACTIVE_LOW>; reset-assert-us = <10000>; }; diff --git a/arch/arm64/boot/dts/marvell/armada-8040-db.dts b/arch/arm64/boot/dts/marvell/armada-8040-db.dts index 1bac437369a1..9f4f939ab65f 100644 --- a/arch/arm64/boot/dts/marvell/armada-8040-db.dts +++ b/arch/arm64/boot/dts/marvell/armada-8040-db.dts @@ -81,8 +81,6 @@ status = "okay"; spi-flash@0 { - #address-cells = <1>; - #size-cells = <1>; compatible = "jedec,spi-nor"; reg = <0>; spi-max-frequency = <10000000>; @@ -214,8 +212,6 @@ status = "okay"; spi-flash@0 { - #address-cells = <0x1>; - #size-cells = <0x1>; compatible = "jedec,spi-nor"; reg = <0x0>; spi-max-frequency = <20000000>; diff --git a/arch/arm64/boot/dts/marvell/armada-ap806-dual.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806-dual.dtsi index d3c0636558ff..861fd21922c4 100644 --- a/arch/arm64/boot/dts/marvell/armada-ap806-dual.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-ap806-dual.dtsi @@ -17,13 +17,13 @@ cpu0: cpu@0 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x000>; enable-method = "psci"; }; cpu1: cpu@1 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x001>; enable-method = "psci"; }; diff --git a/arch/arm64/boot/dts/marvell/armada-ap806-quad.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806-quad.dtsi index 01ea662afba8..2baafe12ebd4 100644 --- a/arch/arm64/boot/dts/marvell/armada-ap806-quad.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-ap806-quad.dtsi @@ -17,25 +17,25 @@ cpu0: cpu@0 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x000>; enable-method = "psci"; }; cpu1: cpu@1 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x001>; enable-method = "psci"; }; cpu2: cpu@100 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x100>; enable-method = "psci"; }; cpu3: cpu@101 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x101>; enable-method = "psci"; }; diff --git a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi index 7f799cb5668e..91dad7e4ee59 100644 --- a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi @@ -283,6 +283,8 @@ ap_thermal: thermal-sensor@80 { compatible = "marvell,armada-ap806-thermal"; reg = <0x80 0x10>; + interrupt-parent = <&sei>; + interrupts = <18>; #thermal-sensor-cells = <1>; }; }; @@ -293,16 +295,26 @@ * The thermal IP features one internal sensor plus, if applicable, one * remote channel wired to one sensor per CPU. * + * Only one thermal zone per AP/CP may trigger interrupts at a time, the + * first one that will have a critical trip point will be chosen. + * * The cooling maps are always empty as there are no cooling devices. */ thermal-zones { ap_thermal_ic: ap-thermal-ic { - polling-delay-passive = <1000>; - polling-delay = <1000>; + polling-delay-passive = <0>; /* Interrupt driven */ + polling-delay = <0>; /* Interrupt driven */ thermal-sensors = <&ap_thermal 0>; - trips { }; + trips { + ap_crit: ap-crit { + temperature = <100000>; /* mC degrees */ + hysteresis = <2000>; /* mC degrees */ + type = "critical"; + }; + }; + cooling-maps { }; }; diff --git a/arch/arm64/boot/dts/marvell/armada-ap810-ap0-octa-core.dtsi b/arch/arm64/boot/dts/marvell/armada-ap810-ap0-octa-core.dtsi index b788cb63caf2..d1a7143ef3d4 100644 --- a/arch/arm64/boot/dts/marvell/armada-ap810-ap0-octa-core.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-ap810-ap0-octa-core.dtsi @@ -15,49 +15,49 @@ cpu0: cpu@0 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x000>; enable-method = "psci"; }; cpu1: cpu@1 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x001>; enable-method = "psci"; }; cpu2: cpu@100 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x100>; enable-method = "psci"; }; cpu3: cpu@101 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x101>; enable-method = "psci"; }; cpu4: cpu@200 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x200>; enable-method = "psci"; }; cpu5: cpu@201 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x201>; enable-method = "psci"; }; cpu6: cpu@300 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x300>; enable-method = "psci"; }; cpu7: cpu@301 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x301>; enable-method = "psci"; }; diff --git a/arch/arm64/boot/dts/marvell/armada-cp110.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110.dtsi index b9d9f31e3ba1..4d6e4a097f72 100644 --- a/arch/arm64/boot/dts/marvell/armada-cp110.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-cp110.dtsi @@ -28,12 +28,19 @@ */ thermal-zones { CP110_LABEL(thermal_ic): CP110_NODE_NAME(thermal-ic) { - polling-delay-passive = <1000>; - polling-delay = <1000>; + polling-delay-passive = <0>; /* Interrupt driven */ + polling-delay = <0>; /* Interrupt driven */ thermal-sensors = <&CP110_LABEL(thermal) 0>; - trips { }; + trips { + CP110_LABEL(crit): crit { + temperature = <100000>; /* mC degrees */ + hysteresis = <2000>; /* mC degrees */ + type = "critical"; + }; + }; + cooling-maps { }; }; }; @@ -259,6 +266,8 @@ CP110_LABEL(thermal): thermal-sensor@70 { compatible = "marvell,armada-cp110-thermal"; reg = <0x70 0x10>; + interrupts-extended = + <&CP110_LABEL(icu_sei) 116 IRQ_TYPE_LEVEL_HIGH>; #thermal-sensor-cells = <1>; }; }; diff --git a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts index 4ce9d6ca0bf7..2b91daf5c1a6 100644 --- a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts +++ b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts @@ -6,6 +6,7 @@ */ /dts-v1/; +#include #include "mt2712e.dtsi" / { @@ -39,6 +40,53 @@ regulator-max-microvolt = <1000000>; }; + extcon_usb: extcon_iddig { + compatible = "linux,extcon-usb-gpio"; + id-gpio = <&pio 12 GPIO_ACTIVE_HIGH>; + }; + + extcon_usb1: extcon_iddig1 { + compatible = "linux,extcon-usb-gpio"; + id-gpio = <&pio 14 GPIO_ACTIVE_HIGH>; + }; + + usb_p0_vbus: regulator@2 { + compatible = "regulator-fixed"; + regulator-name = "p0_vbus"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + gpio = <&pio 13 GPIO_ACTIVE_HIGH>; + enable-active-high; + }; + + usb_p1_vbus: regulator@3 { + compatible = "regulator-fixed"; + regulator-name = "p1_vbus"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + gpio = <&pio 15 GPIO_ACTIVE_HIGH>; + enable-active-high; + }; + + usb_p2_vbus: regulator@4 { + compatible = "regulator-fixed"; + regulator-name = "p2_vbus"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + gpio = <&pio 16 GPIO_ACTIVE_HIGH>; + enable-active-high; + }; + + usb_p3_vbus: regulator@5 { + compatible = "regulator-fixed"; + regulator-name = "p3_vbus"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + gpio = <&pio 17 GPIO_ACTIVE_HIGH>; + enable-active-high; + regulator-always-on; + }; + }; &auxadc { @@ -57,7 +105,57 @@ proc-supply = <&cpus_fixed_vproc1>; }; +&pio { + usb0_id_pins_float: usb0_iddig { + pins_iddig { + pinmux = ; + bias-pull-up; + }; + }; + + usb1_id_pins_float: usb1_iddig { + pins_iddig { + pinmux = ; + bias-pull-up; + }; + }; +}; + +&ssusb { + vbus-supply = <&usb_p0_vbus>; + extcon = <&extcon_usb>; + dr_mode = "otg"; + wakeup-source; + mediatek,u3p-dis-msk = <0x1>; + //enable-manual-drd; + //maximum-speed = "full-speed"; + pinctrl-names = "default"; + pinctrl-0 = <&usb0_id_pins_float>; + status = "okay"; +}; + +&ssusb1 { + vbus-supply = <&usb_p1_vbus>; + extcon = <&extcon_usb1>; + dr_mode = "otg"; + //mediatek,u3p-dis-msk = <0x1>; + enable-manual-drd; + wakeup-source; + //maximum-speed = "full-speed"; + pinctrl-names = "default"; + pinctrl-0 = <&usb1_id_pins_float>; + status = "okay"; +}; + &uart0 { status = "okay"; }; +&usb_host0 { + vbus-supply = <&usb_p2_vbus>; + status = "okay"; +}; + +&usb_host1 { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi index ee627a7c7b45..976d92a94738 100644 --- a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi @@ -8,6 +8,8 @@ #include #include #include +#include +#include #include #include "mt2712-pinfunc.h" @@ -312,12 +314,33 @@ status = "disabled"; }; + iommu0: iommu@10205000 { + compatible = "mediatek,mt2712-m4u"; + reg = <0 0x10205000 0 0x1000>; + interrupts = ; + clocks = <&infracfg CLK_INFRA_M4U>; + clock-names = "bclk"; + mediatek,larbs = <&larb0 &larb1 &larb2 + &larb3 &larb6>; + #iommu-cells = <1>; + }; + apmixedsys: syscon@10209000 { compatible = "mediatek,mt2712-apmixedsys", "syscon"; reg = <0 0x10209000 0 0x1000>; #clock-cells = <1>; }; + iommu1: iommu@1020a000 { + compatible = "mediatek,mt2712-m4u"; + reg = <0 0x1020a000 0 0x1000>; + interrupts = ; + clocks = <&infracfg CLK_INFRA_M4U>; + clock-names = "bclk"; + mediatek,larbs = <&larb4 &larb5 &larb7>; + #iommu-cells = <1>; + }; + mcucfg: syscon@10220000 { compatible = "mediatek,mt2712-mcucfg", "syscon"; reg = <0 0x10220000 0 0x1000>; @@ -395,6 +418,210 @@ status = "disabled"; }; + pwm: pwm@11006000 { + compatible = "mediatek,mt2712-pwm"; + reg = <0 0x11006000 0 0x1000>; + #pwm-cells = <2>; + interrupts = ; + clocks = <&topckgen CLK_TOP_PWM_SEL>, + <&pericfg CLK_PERI_PWM>, + <&pericfg CLK_PERI_PWM0>, + <&pericfg CLK_PERI_PWM1>, + <&pericfg CLK_PERI_PWM2>, + <&pericfg CLK_PERI_PWM3>, + <&pericfg CLK_PERI_PWM4>, + <&pericfg CLK_PERI_PWM5>, + <&pericfg CLK_PERI_PWM6>, + <&pericfg CLK_PERI_PWM7>; + clock-names = "top", + "main", + "pwm1", + "pwm2", + "pwm3", + "pwm4", + "pwm5", + "pwm6", + "pwm7", + "pwm8"; + status = "disabled"; + }; + + i2c0: i2c@11007000 { + compatible = "mediatek,mt2712-i2c"; + reg = <0 0x11007000 0 0x90>, + <0 0x11000180 0 0x80>; + interrupts = ; + clock-div = <4>; + clocks = <&pericfg CLK_PERI_I2C0>, + <&pericfg CLK_PERI_AP_DMA>; + clock-names = "main", + "dma"; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c1: i2c@11008000 { + compatible = "mediatek,mt2712-i2c"; + reg = <0 0x11008000 0 0x90>, + <0 0x11000200 0 0x80>; + interrupts = ; + clock-div = <4>; + clocks = <&pericfg CLK_PERI_I2C1>, + <&pericfg CLK_PERI_AP_DMA>; + clock-names = "main", + "dma"; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c2: i2c@11009000 { + compatible = "mediatek,mt2712-i2c"; + reg = <0 0x11009000 0 0x90>, + <0 0x11000280 0 0x80>; + interrupts = ; + clock-div = <4>; + clocks = <&pericfg CLK_PERI_I2C2>, + <&pericfg CLK_PERI_AP_DMA>; + clock-names = "main", + "dma"; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + spi0: spi@1100a000 { + compatible = "mediatek,mt2712-spi"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0 0x1100a000 0 0x100>; + interrupts = ; + clocks = <&topckgen CLK_TOP_UNIVPLL2_D4>, + <&topckgen CLK_TOP_SPI_SEL>, + <&pericfg CLK_PERI_SPI0>; + clock-names = "parent-clk", "sel-clk", "spi-clk"; + status = "disabled"; + }; + + nandc: nfi@1100e000 { + compatible = "mediatek,mt2712-nfc"; + reg = <0 0x1100e000 0 0x1000>; + interrupts = ; + clocks = <&topckgen CLK_TOP_NFI2X_EN>, <&pericfg CLK_PERI_NFI>; + clock-names = "nfi_clk", "pad_clk"; + ecc-engine = <&bch>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + bch: ecc@1100f000 { + compatible = "mediatek,mt2712-ecc"; + reg = <0 0x1100f000 0 0x1000>; + interrupts = ; + clocks = <&topckgen CLK_TOP_NFI1X_CK_EN>; + clock-names = "nfiecc_clk"; + status = "disabled"; + }; + + i2c3: i2c@11010000 { + compatible = "mediatek,mt2712-i2c"; + reg = <0 0x11010000 0 0x90>, + <0 0x11000300 0 0x80>; + interrupts = ; + clock-div = <4>; + clocks = <&pericfg CLK_PERI_I2C3>, + <&pericfg CLK_PERI_AP_DMA>; + clock-names = "main", + "dma"; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c4: i2c@11011000 { + compatible = "mediatek,mt2712-i2c"; + reg = <0 0x11011000 0 0x90>, + <0 0x11000380 0 0x80>; + interrupts = ; + clock-div = <4>; + clocks = <&pericfg CLK_PERI_I2C4>, + <&pericfg CLK_PERI_AP_DMA>; + clock-names = "main", + "dma"; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + i2c5: i2c@11013000 { + compatible = "mediatek,mt2712-i2c"; + reg = <0 0x11013000 0 0x90>, + <0 0x11000100 0 0x80>; + interrupts = ; + clock-div = <4>; + clocks = <&pericfg CLK_PERI_I2C5>, + <&pericfg CLK_PERI_AP_DMA>; + clock-names = "main", + "dma"; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + spi2: spi@11015000 { + compatible = "mediatek,mt2712-spi"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0 0x11015000 0 0x100>; + interrupts = ; + clocks = <&topckgen CLK_TOP_UNIVPLL2_D4>, + <&topckgen CLK_TOP_SPI_SEL>, + <&pericfg CLK_PERI_SPI2>; + clock-names = "parent-clk", "sel-clk", "spi-clk"; + status = "disabled"; + }; + + spi3: spi@11016000 { + compatible = "mediatek,mt2712-spi"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0 0x11016000 0 0x100>; + interrupts = ; + clocks = <&topckgen CLK_TOP_UNIVPLL2_D4>, + <&topckgen CLK_TOP_SPI_SEL>, + <&pericfg CLK_PERI_SPI3>; + clock-names = "parent-clk", "sel-clk", "spi-clk"; + status = "disabled"; + }; + + spi4: spi@10012000 { + compatible = "mediatek,mt2712-spi"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0 0x10012000 0 0x100>; + interrupts = ; + clocks = <&topckgen CLK_TOP_UNIVPLL2_D4>, + <&topckgen CLK_TOP_SPI_SEL>, + <&infracfg CLK_INFRA_AO_SPI0>; + clock-names = "parent-clk", "sel-clk", "spi-clk"; + status = "disabled"; + }; + + spi5: spi@11018000 { + compatible = "mediatek,mt2712-spi"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0 0x11018000 0 0x100>; + interrupts = ; + clocks = <&topckgen CLK_TOP_UNIVPLL2_D4>, + <&topckgen CLK_TOP_SPI_SEL>, + <&pericfg CLK_PERI_SPI5>; + clock-names = "parent-clk", "sel-clk", "spi-clk"; + status = "disabled"; + }; + uart4: serial@11019000 { compatible = "mediatek,mt2712-uart", "mediatek,mt6577-uart"; @@ -405,6 +632,228 @@ status = "disabled"; }; + mmc0: mmc@11230000 { + compatible = "mediatek,mt2712-mmc"; + reg = <0 0x11230000 0 0x1000>; + interrupts = ; + clocks = <&pericfg CLK_PERI_MSDC30_0>, + <&pericfg CLK_PERI_MSDC50_0_HCLK_EN>, + <&pericfg CLK_PERI_MSDC30_0_QTR_EN>, + <&pericfg CLK_PERI_MSDC50_0_EN>; + clock-names = "source", "hclk", "bus_clk", "source_cg"; + status = "disabled"; + }; + + mmc1: mmc@11240000 { + compatible = "mediatek,mt2712-mmc"; + reg = <0 0x11240000 0 0x1000>; + interrupts = ; + clocks = <&pericfg CLK_PERI_MSDC30_1>, + <&topckgen CLK_TOP_AXI_SEL>, + <&pericfg CLK_PERI_MSDC30_1_EN>; + clock-names = "source", "hclk", "source_cg"; + status = "disabled"; + }; + + mmc2: mmc@11250000 { + compatible = "mediatek,mt2712-mmc"; + reg = <0 0x11250000 0 0x1000>; + interrupts = ; + clocks = <&pericfg CLK_PERI_MSDC30_2>, + <&topckgen CLK_TOP_AXI_SEL>, + <&pericfg CLK_PERI_MSDC30_2_EN>; + clock-names = "source", "hclk", "source_cg"; + status = "disabled"; + }; + + ssusb: usb@11271000 { + compatible = "mediatek,mt2712-mtu3", "mediatek,mtu3"; + reg = <0 0x11271000 0 0x3000>, + <0 0x11280700 0 0x0100>; + reg-names = "mac", "ippc"; + interrupts = ; + phys = <&u2port0 PHY_TYPE_USB2>, + <&u2port1 PHY_TYPE_USB2>; + power-domains = <&scpsys MT2712_POWER_DOMAIN_USB>; + clocks = <&topckgen CLK_TOP_USB30_SEL>; + clock-names = "sys_ck"; + mediatek,syscon-wakeup = <&pericfg 0x510 2>; + #address-cells = <2>; + #size-cells = <2>; + ranges; + status = "disabled"; + + usb_host0: xhci@11270000 { + compatible = "mediatek,mt2712-xhci", + "mediatek,mtk-xhci"; + reg = <0 0x11270000 0 0x1000>; + reg-names = "mac"; + interrupts = ; + power-domains = <&scpsys MT2712_POWER_DOMAIN_USB>; + clocks = <&topckgen CLK_TOP_USB30_SEL>, <&clk26m>; + clock-names = "sys_ck", "ref_ck"; + status = "disabled"; + }; + }; + + u3phy0: usb-phy@11290000 { + compatible = "mediatek,mt2712-u3phy"; + #address-cells = <2>; + #size-cells = <2>; + ranges; + status = "okay"; + + u2port0: usb-phy@11290000 { + reg = <0 0x11290000 0 0x700>; + clocks = <&clk26m>; + clock-names = "ref"; + #phy-cells = <1>; + status = "okay"; + }; + + u2port1: usb-phy@11298000 { + reg = <0 0x11298000 0 0x700>; + clocks = <&clk26m>; + clock-names = "ref"; + #phy-cells = <1>; + status = "okay"; + }; + + u3port0: usb-phy@11298700 { + reg = <0 0x11298700 0 0x900>; + clocks = <&clk26m>; + clock-names = "ref"; + #phy-cells = <1>; + status = "okay"; + }; + }; + + ssusb1: usb@112c1000 { + compatible = "mediatek,mt2712-mtu3", "mediatek,mtu3"; + reg = <0 0x112c1000 0 0x3000>, + <0 0x112d0700 0 0x0100>; + reg-names = "mac", "ippc"; + interrupts = ; + phys = <&u2port2 PHY_TYPE_USB2>, + <&u2port3 PHY_TYPE_USB2>, + <&u3port1 PHY_TYPE_USB3>; + power-domains = <&scpsys MT2712_POWER_DOMAIN_USB2>; + clocks = <&topckgen CLK_TOP_USB30_SEL>; + clock-names = "sys_ck"; + mediatek,syscon-wakeup = <&pericfg 0x514 2>; + #address-cells = <2>; + #size-cells = <2>; + ranges; + status = "disabled"; + + usb_host1: xhci@112c0000 { + compatible = "mediatek,mt2712-xhci", + "mediatek,mtk-xhci"; + reg = <0 0x112c0000 0 0x1000>; + reg-names = "mac"; + interrupts = ; + power-domains = <&scpsys MT2712_POWER_DOMAIN_USB2>; + clocks = <&topckgen CLK_TOP_USB30_SEL>, <&clk26m>; + clock-names = "sys_ck", "ref_ck"; + status = "disabled"; + }; + }; + + u3phy1: usb-phy@112e0000 { + compatible = "mediatek,mt2712-u3phy"; + #address-cells = <2>; + #size-cells = <2>; + ranges; + status = "okay"; + + u2port2: usb-phy@112e0000 { + reg = <0 0x112e0000 0 0x700>; + clocks = <&clk26m>; + clock-names = "ref"; + #phy-cells = <1>; + status = "okay"; + }; + + u2port3: usb-phy@112e8000 { + reg = <0 0x112e8000 0 0x700>; + clocks = <&clk26m>; + clock-names = "ref"; + #phy-cells = <1>; + status = "okay"; + }; + + u3port1: usb-phy@112e8700 { + reg = <0 0x112e8700 0 0x900>; + clocks = <&clk26m>; + clock-names = "ref"; + #phy-cells = <1>; + status = "okay"; + }; + }; + + pcie: pcie@11700000 { + compatible = "mediatek,mt2712-pcie"; + device_type = "pci"; + reg = <0 0x11700000 0 0x1000>, + <0 0x112ff000 0 0x1000>; + reg-names = "port0", "port1"; + #address-cells = <3>; + #size-cells = <2>; + interrupts = , + ; + clocks = <&topckgen CLK_TOP_PE2_MAC_P0_SEL>, + <&topckgen CLK_TOP_PE2_MAC_P1_SEL>, + <&pericfg CLK_PERI_PCIE0>, + <&pericfg CLK_PERI_PCIE1>; + clock-names = "sys_ck0", "sys_ck1", "ahb_ck0", "ahb_ck1"; + phys = <&u3port0 PHY_TYPE_PCIE>, <&u3port1 PHY_TYPE_PCIE>; + phy-names = "pcie-phy0", "pcie-phy1"; + bus-range = <0x00 0xff>; + ranges = <0x82000000 0 0x20000000 0x0 0x20000000 0 0x10000000>; + + pcie0: pcie@0,0 { + device_type = "pci"; + status = "disabled"; + reg = <0x0000 0 0 0 0>; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + ranges; + num-lanes = <1>; + interrupt-map-mask = <0 0 0 7>; + interrupt-map = <0 0 0 1 &pcie_intc0 0>, + <0 0 0 2 &pcie_intc0 1>, + <0 0 0 3 &pcie_intc0 2>, + <0 0 0 4 &pcie_intc0 3>; + pcie_intc0: interrupt-controller { + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <1>; + }; + }; + + pcie1: pcie@1,0 { + device_type = "pci"; + status = "disabled"; + reg = <0x0800 0 0 0 0>; + #address-cells = <3>; + #size-cells = <2>; + #interrupt-cells = <1>; + ranges; + num-lanes = <1>; + interrupt-map-mask = <0 0 0 7>; + interrupt-map = <0 0 0 1 &pcie_intc1 0>, + <0 0 0 2 &pcie_intc1 1>, + <0 0 0 3 &pcie_intc1 2>, + <0 0 0 4 &pcie_intc1 3>; + pcie_intc1: interrupt-controller { + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <1>; + }; + }; + }; + mfgcfg: syscon@13000000 { compatible = "mediatek,mt2712-mfgcfg", "syscon"; reg = <0 0x13000000 0 0x1000>; @@ -417,12 +866,85 @@ #clock-cells = <1>; }; + larb0: larb@14021000 { + compatible = "mediatek,mt2712-smi-larb"; + reg = <0 0x14021000 0 0x1000>; + mediatek,smi = <&smi_common0>; + mediatek,larb-id = <0>; + power-domains = <&scpsys MT2712_POWER_DOMAIN_MM>; + clocks = <&mmsys CLK_MM_SMI_LARB0>, + <&mmsys CLK_MM_SMI_LARB0>; + clock-names = "apb", "smi"; + }; + + smi_common0: smi@14022000 { + compatible = "mediatek,mt2712-smi-common"; + reg = <0 0x14022000 0 0x1000>; + power-domains = <&scpsys MT2712_POWER_DOMAIN_MM>; + clocks = <&mmsys CLK_MM_SMI_COMMON>, + <&mmsys CLK_MM_SMI_COMMON>; + clock-names = "apb", "smi"; + }; + + larb4: larb@14027000 { + compatible = "mediatek,mt2712-smi-larb"; + reg = <0 0x14027000 0 0x1000>; + mediatek,smi = <&smi_common1>; + mediatek,larb-id = <4>; + power-domains = <&scpsys MT2712_POWER_DOMAIN_MM>; + clocks = <&mmsys CLK_MM_SMI_LARB4>, + <&mmsys CLK_MM_SMI_LARB4>; + clock-names = "apb", "smi"; + }; + + larb5: larb@14030000 { + compatible = "mediatek,mt2712-smi-larb"; + reg = <0 0x14030000 0 0x1000>; + mediatek,smi = <&smi_common1>; + mediatek,larb-id = <5>; + power-domains = <&scpsys MT2712_POWER_DOMAIN_MM>; + clocks = <&mmsys CLK_MM_SMI_LARB5>, + <&mmsys CLK_MM_SMI_LARB5>; + clock-names = "apb", "smi"; + }; + + smi_common1: smi@14031000 { + compatible = "mediatek,mt2712-smi-common"; + reg = <0 0x14031000 0 0x1000>; + power-domains = <&scpsys MT2712_POWER_DOMAIN_MM>; + clocks = <&mmsys CLK_MM_SMI_COMMON1>, + <&mmsys CLK_MM_SMI_COMMON1>; + clock-names = "apb", "smi"; + }; + + larb7: larb@14032000 { + compatible = "mediatek,mt2712-smi-larb"; + reg = <0 0x14032000 0 0x1000>; + mediatek,smi = <&smi_common1>; + mediatek,larb-id = <7>; + power-domains = <&scpsys MT2712_POWER_DOMAIN_MM>; + clocks = <&mmsys CLK_MM_SMI_LARB7>, + <&mmsys CLK_MM_SMI_LARB7>; + clock-names = "apb", "smi"; + }; + imgsys: syscon@15000000 { compatible = "mediatek,mt2712-imgsys", "syscon"; reg = <0 0x15000000 0 0x1000>; #clock-cells = <1>; }; + larb2: larb@15001000 { + compatible = "mediatek,mt2712-smi-larb"; + reg = <0 0x15001000 0 0x1000>; + mediatek,smi = <&smi_common0>; + mediatek,larb-id = <2>; + power-domains = <&scpsys MT2712_POWER_DOMAIN_ISP>; + clocks = <&imgsys CLK_IMG_SMI_LARB2>, + <&imgsys CLK_IMG_SMI_LARB2>; + clock-names = "apb", "smi"; + }; + bdpsys: syscon@15010000 { compatible = "mediatek,mt2712-bdpsys", "syscon"; reg = <0 0x15010000 0 0x1000>; @@ -435,12 +957,45 @@ #clock-cells = <1>; }; + larb1: larb@16010000 { + compatible = "mediatek,mt2712-smi-larb"; + reg = <0 0x16010000 0 0x1000>; + mediatek,smi = <&smi_common0>; + mediatek,larb-id = <1>; + power-domains = <&scpsys MT2712_POWER_DOMAIN_VDEC>; + clocks = <&vdecsys CLK_VDEC_CKEN>, + <&vdecsys CLK_VDEC_LARB1_CKEN>; + clock-names = "apb", "smi"; + }; + vencsys: syscon@18000000 { compatible = "mediatek,mt2712-vencsys", "syscon"; reg = <0 0x18000000 0 0x1000>; #clock-cells = <1>; }; + larb3: larb@18001000 { + compatible = "mediatek,mt2712-smi-larb"; + reg = <0 0x18001000 0 0x1000>; + mediatek,smi = <&smi_common0>; + mediatek,larb-id = <3>; + power-domains = <&scpsys MT2712_POWER_DOMAIN_VENC>; + clocks = <&vencsys CLK_VENC_SMI_COMMON_CON>, + <&vencsys CLK_VENC_VENC>; + clock-names = "apb", "smi"; + }; + + larb6: larb@18002000 { + compatible = "mediatek,mt2712-smi-larb"; + reg = <0 0x18002000 0 0x1000>; + mediatek,smi = <&smi_common0>; + mediatek,larb-id = <6>; + power-domains = <&scpsys MT2712_POWER_DOMAIN_VENC>; + clocks = <&vencsys CLK_VENC_SMI_COMMON_CON>, + <&vencsys CLK_VENC_VENC>; + clock-names = "apb", "smi"; + }; + jpgdecsys: syscon@19000000 { compatible = "mediatek,mt2712-jpgdecsys", "syscon"; reg = <0 0x19000000 0 0x1000>; diff --git a/arch/arm64/boot/dts/mediatek/mt6797-evb.dts b/arch/arm64/boot/dts/mediatek/mt6797-evb.dts index c79109c65409..237e869a5fa1 100644 --- a/arch/arm64/boot/dts/mediatek/mt6797-evb.dts +++ b/arch/arm64/boot/dts/mediatek/mt6797-evb.dts @@ -33,4 +33,6 @@ &uart0 { status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&uart0_pins_a>; }; diff --git a/arch/arm64/boot/dts/mediatek/mt6797-x20-dev.dts b/arch/arm64/boot/dts/mediatek/mt6797-x20-dev.dts index 742938a1a548..13939d55b85b 100644 --- a/arch/arm64/boot/dts/mediatek/mt6797-x20-dev.dts +++ b/arch/arm64/boot/dts/mediatek/mt6797-x20-dev.dts @@ -30,4 +30,6 @@ &uart1 { status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&uart1_pins_a>; }; diff --git a/arch/arm64/boot/dts/mediatek/mt6797.dtsi b/arch/arm64/boot/dts/mediatek/mt6797.dtsi index 4beaa71107d7..2b2a69c7567f 100644 --- a/arch/arm64/boot/dts/mediatek/mt6797.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt6797.dtsi @@ -14,6 +14,7 @@ #include #include #include +#include / { compatible = "mediatek,mt6797"; @@ -129,6 +130,33 @@ #clock-cells = <1>; }; + pio: pinctrl@10005000 { + compatible = "mediatek,mt6797-pinctrl"; + reg = <0 0x10005000 0 0x1000>, + <0 0x10002000 0 0x400>, + <0 0x10002400 0 0x400>, + <0 0x10002800 0 0x400>, + <0 0x10002C00 0 0x400>; + reg-names = "gpio", "iocfgl", "iocfgb", + "iocfgr", "iocfgt"; + gpio-controller; + #gpio-cells = <2>; + + uart0_pins_a: uart0 { + pins0 { + pinmux = , + ; + }; + }; + + uart1_pins_a: uart1 { + pins1 { + pinmux = , + ; + }; + }; + }; + scpsys: scpsys@10006000 { compatible = "mediatek,mt6797-scpsys"; #power-domain-cells = <1>; diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi index 8fc4aa77f012..4b1f5ae710eb 100644 --- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi @@ -70,7 +70,7 @@ cpu0: cpu@0 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x0>; clocks = <&infracfg CLK_INFRA_MUX1_SEL>, <&apmixedsys CLK_APMIXED_MAIN_CORE_EN>; @@ -84,7 +84,7 @@ cpu1: cpu@1 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x1>; clocks = <&infracfg CLK_INFRA_MUX1_SEL>, <&apmixedsys CLK_APMIXED_MAIN_CORE_EN>; @@ -170,17 +170,20 @@ cooling-maps { map0 { trip = <&cpu_passive>; - cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; }; map1 { trip = <&cpu_active>; - cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; }; map2 { trip = <&cpu_hot>; - cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; }; }; }; diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi index 412ffd4d426b..c3c360161c5d 100644 --- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi @@ -276,12 +276,14 @@ cooling-maps { map@0 { trip = <&target>; - cooling-device = <&cpu0 0 0>; + cooling-device = <&cpu0 0 0>, + <&cpu1 0 0>; contribution = <3072>; }; map@1 { trip = <&target>; - cooling-device = <&cpu2 0 0>; + cooling-device = <&cpu2 0 0>, + <&cpu3 0 0>; contribution = <1024>; }; }; diff --git a/arch/arm64/boot/dts/nvidia/Makefile b/arch/arm64/boot/dts/nvidia/Makefile index 7c13d7df484e..6b8ab5568481 100644 --- a/arch/arm64/boot/dts/nvidia/Makefile +++ b/arch/arm64/boot/dts/nvidia/Makefile @@ -4,5 +4,6 @@ dtb-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210-p2371-0000.dtb dtb-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210-p2371-2180.dtb dtb-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210-p2571.dtb dtb-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210-smaug.dtb +dtb-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210-p2894-0050-a08.dtb dtb-$(CONFIG_ARCH_TEGRA_186_SOC) += tegra186-p2771-0000.dtb dtb-$(CONFIG_ARCH_TEGRA_194_SOC) += tegra194-p2972-0000.dtb diff --git a/arch/arm64/boot/dts/nvidia/tegra132.dtsi b/arch/arm64/boot/dts/nvidia/tegra132.dtsi index fa5a7c4bc807..631a7f77c386 100644 --- a/arch/arm64/boot/dts/nvidia/tegra132.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra132.dtsi @@ -1082,13 +1082,13 @@ cpu@0 { device_type = "cpu"; - compatible = "nvidia,denver", "arm,armv8"; + compatible = "nvidia,denver"; reg = <0>; }; cpu@1 { device_type = "cpu"; - compatible = "nvidia,denver", "arm,armv8"; + compatible = "nvidia,denver"; reg = <1>; }; }; diff --git a/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts b/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts index 65487eee2ce6..31457f32e4d0 100644 --- a/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts +++ b/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts @@ -26,7 +26,8 @@ reg = <0x74>; interrupt-parent = <&gpio>; - interrupts = ; + interrupts = ; #gpio-cells = <2>; gpio-controller; @@ -37,7 +38,8 @@ reg = <0x77>; interrupt-parent = <&gpio>; - interrupts = ; + interrupts = ; #gpio-cells = <2>; gpio-controller; @@ -52,6 +54,7 @@ }; hda@3510000 { + nvidia,model = "jetson-tx2-hda"; status = "okay"; }; @@ -108,7 +111,8 @@ hdmi-supply = <&vdd_hdmi>; nvidia,ddc-i2c-bus = <&ddc>; - nvidia,hpd-gpio = <&gpio TEGRA_MAIN_GPIO(P, 1) GPIO_ACTIVE_LOW>; + nvidia,hpd-gpio = <&gpio TEGRA186_MAIN_GPIO(P, 1) + GPIO_ACTIVE_LOW>; }; dpaux@155c0000 { @@ -121,7 +125,7 @@ power { label = "Power"; - gpios = <&gpio_aon TEGRA_AON_GPIO(FF, 0) + gpios = <&gpio_aon TEGRA186_AON_GPIO(FF, 0) GPIO_ACTIVE_LOW>; linux,input-type = ; linux,code = ; @@ -132,7 +136,7 @@ volume-up { label = "Volume Up"; - gpios = <&gpio_aon TEGRA_AON_GPIO(FF, 1) + gpios = <&gpio_aon TEGRA186_AON_GPIO(FF, 1) GPIO_ACTIVE_LOW>; linux,input-type = ; linux,code = ; @@ -141,7 +145,7 @@ volume-down { label = "Volume Down"; - gpios = <&gpio_aon TEGRA_AON_GPIO(FF, 2) + gpios = <&gpio_aon TEGRA186_AON_GPIO(FF, 2) GPIO_ACTIVE_LOW>; linux,input-type = ; linux,code = ; @@ -158,7 +162,8 @@ regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; - gpio = <&gpio TEGRA_MAIN_GPIO(P, 6) GPIO_ACTIVE_HIGH>; + gpio = <&gpio TEGRA186_MAIN_GPIO(P, 6) + GPIO_ACTIVE_HIGH>; enable-active-high; vin-supply = <&vdd_3v3_sys>; diff --git a/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi b/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi index b539561e7877..89a2da46efae 100644 --- a/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi @@ -34,7 +34,8 @@ ethernet@2490000 { status = "okay"; - phy-reset-gpios = <&gpio TEGRA_MAIN_GPIO(M, 4) GPIO_ACTIVE_LOW>; + phy-reset-gpios = <&gpio TEGRA186_MAIN_GPIO(M, 4) + GPIO_ACTIVE_LOW>; phy-handle = <&phy>; phy-mode = "rgmii"; @@ -46,7 +47,8 @@ compatible = "ethernet-phy-ieee802.3-c22"; reg = <0x0>; interrupt-parent = <&gpio>; - interrupts = ; + interrupts = ; }; }; }; @@ -91,8 +93,8 @@ /* SDMMC1 (SD/MMC) */ sdhci@3400000 { - cd-gpios = <&gpio TEGRA_MAIN_GPIO(P, 5) GPIO_ACTIVE_LOW>; - wp-gpios = <&gpio TEGRA_MAIN_GPIO(P, 4) GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio TEGRA186_MAIN_GPIO(P, 5) GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio TEGRA186_MAIN_GPIO(P, 4) GPIO_ACTIVE_HIGH>; vqmmc-supply = <&vddio_sdmmc1>; }; diff --git a/arch/arm64/boot/dts/nvidia/tegra186.dtsi b/arch/arm64/boot/dts/nvidia/tegra186.dtsi index 22815db4a3ed..bb2045be8814 100644 --- a/arch/arm64/boot/dts/nvidia/tegra186.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra186.dtsi @@ -136,7 +136,7 @@ }; gen1_i2c: i2c@3160000 { - compatible = "nvidia,tegra186-i2c", "nvidia,tegra114-i2c"; + compatible = "nvidia,tegra186-i2c", "nvidia,tegra210-i2c"; reg = <0x0 0x03160000 0x0 0x10000>; interrupts = ; #address-cells = <1>; @@ -149,7 +149,7 @@ }; cam_i2c: i2c@3180000 { - compatible = "nvidia,tegra186-i2c", "nvidia,tegra114-i2c"; + compatible = "nvidia,tegra186-i2c", "nvidia,tegra210-i2c"; reg = <0x0 0x03180000 0x0 0x10000>; interrupts = ; #address-cells = <1>; @@ -163,7 +163,7 @@ /* shares pads with dpaux1 */ dp_aux_ch1_i2c: i2c@3190000 { - compatible = "nvidia,tegra186-i2c", "nvidia,tegra114-i2c"; + compatible = "nvidia,tegra186-i2c", "nvidia,tegra210-i2c"; reg = <0x0 0x03190000 0x0 0x10000>; interrupts = ; #address-cells = <1>; @@ -177,7 +177,7 @@ /* controlled by BPMP, should not be enabled */ pwr_i2c: i2c@31a0000 { - compatible = "nvidia,tegra186-i2c", "nvidia,tegra114-i2c"; + compatible = "nvidia,tegra186-i2c", "nvidia,tegra210-i2c"; reg = <0x0 0x031a0000 0x0 0x10000>; interrupts = ; #address-cells = <1>; @@ -191,7 +191,7 @@ /* shares pads with dpaux0 */ dp_aux_ch0_i2c: i2c@31b0000 { - compatible = "nvidia,tegra186-i2c", "nvidia,tegra114-i2c"; + compatible = "nvidia,tegra186-i2c", "nvidia,tegra210-i2c"; reg = <0x0 0x031b0000 0x0 0x10000>; interrupts = ; #address-cells = <1>; @@ -204,7 +204,7 @@ }; gen7_i2c: i2c@31c0000 { - compatible = "nvidia,tegra186-i2c", "nvidia,tegra114-i2c"; + compatible = "nvidia,tegra186-i2c", "nvidia,tegra210-i2c"; reg = <0x0 0x031c0000 0x0 0x10000>; interrupts = ; #address-cells = <1>; @@ -217,7 +217,7 @@ }; gen9_i2c: i2c@31e0000 { - compatible = "nvidia,tegra186-i2c", "nvidia,tegra114-i2c"; + compatible = "nvidia,tegra186-i2c", "nvidia,tegra210-i2c"; reg = <0x0 0x031e0000 0x0 0x10000>; interrupts = ; #address-cells = <1>; @@ -315,10 +315,13 @@ nvidia,pad-autocal-pull-down-offset-hs400 = <0x05>; nvidia,pad-autocal-pull-up-offset-1v8-timeout = <0x0a>; nvidia,pad-autocal-pull-down-offset-1v8-timeout = <0x0a>; + nvidia,pad-autocal-pull-up-offset-3v3-timeout = <0x0a>; + nvidia,pad-autocal-pull-down-offset-3v3-timeout = <0x0a>; nvidia,default-tap = <0x5>; nvidia,default-trim = <0x9>; nvidia,dqs-trim = <63>; mmc-hs400-1_8v; + supports-cqe; status = "disabled"; }; @@ -375,7 +378,7 @@ }; gen2_i2c: i2c@c240000 { - compatible = "nvidia,tegra186-i2c", "nvidia,tegra114-i2c"; + compatible = "nvidia,tegra186-i2c", "nvidia,tegra210-i2c"; reg = <0x0 0x0c240000 0x0 0x10000>; interrupts = ; #address-cells = <1>; @@ -388,7 +391,7 @@ }; gen8_i2c: i2c@c250000 { - compatible = "nvidia,tegra186-i2c", "nvidia,tegra114-i2c"; + compatible = "nvidia,tegra186-i2c", "nvidia,tegra210-i2c"; reg = <0x0 0x0c250000 0x0 0x10000>; interrupts = ; #address-cells = <1>; @@ -982,37 +985,37 @@ #size-cells = <0>; cpu@0 { - compatible = "nvidia,tegra186-denver", "arm,armv8"; + compatible = "nvidia,tegra186-denver"; device_type = "cpu"; reg = <0x000>; }; cpu@1 { - compatible = "nvidia,tegra186-denver", "arm,armv8"; + compatible = "nvidia,tegra186-denver"; device_type = "cpu"; reg = <0x001>; }; cpu@2 { - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; device_type = "cpu"; reg = <0x100>; }; cpu@3 { - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; device_type = "cpu"; reg = <0x101>; }; cpu@4 { - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; device_type = "cpu"; reg = <0x102>; }; cpu@5 { - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; device_type = "cpu"; reg = <0x103>; }; diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi b/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi index 22a1c267aed9..246c1ebbd055 100644 --- a/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi @@ -10,7 +10,7 @@ aliases { sdhci0 = "/cbb/sdhci@3460000"; sdhci1 = "/cbb/sdhci@3400000"; - serial0 = &uartb; + serial0 = &tcu; i2c0 = "/bpmp/i2c"; i2c1 = "/cbb/i2c@3160000"; i2c2 = "/cbb/i2c@c240000"; diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts b/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts index adf351010ff5..b62e96945846 100644 --- a/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts +++ b/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts @@ -11,20 +11,21 @@ compatible = "nvidia,p2972-0000", "nvidia,tegra194"; cbb { - /* SDMMC1 (SD/MMC) */ - sdhci@3400000 { + ddc: i2c@31c0000 { status = "okay"; }; - ddc: i2c@31c0000 { + /* SDMMC1 (SD/MMC) */ + sdhci@3400000 { status = "okay"; }; - pwm@c340000 { + hda@3510000 { + nvidia,model = "jetson-xavier-hda"; status = "okay"; }; - hda@3510000 { + pwm@c340000 { status = "okay"; }; diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi index 6dfa1ca0b851..c77ca211fa8f 100644 --- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi @@ -303,6 +303,17 @@ clock-names = "sdhci"; resets = <&bpmp TEGRA194_RESET_SDMMC1>; reset-names = "sdhci"; + nvidia,pad-autocal-pull-up-offset-3v3-timeout = + <0x07>; + nvidia,pad-autocal-pull-down-offset-3v3-timeout = + <0x07>; + nvidia,pad-autocal-pull-up-offset-1v8-timeout = <0x06>; + nvidia,pad-autocal-pull-down-offset-1v8-timeout = + <0x07>; + nvidia,pad-autocal-pull-up-offset-sdr104 = <0x00>; + nvidia,pad-autocal-pull-down-offset-sdr104 = <0x00>; + nvidia,default-tap = <0x9>; + nvidia,default-trim = <0x5>; status = "disabled"; }; @@ -314,6 +325,18 @@ clock-names = "sdhci"; resets = <&bpmp TEGRA194_RESET_SDMMC3>; reset-names = "sdhci"; + nvidia,pad-autocal-pull-up-offset-1v8 = <0x00>; + nvidia,pad-autocal-pull-down-offset-1v8 = <0x7a>; + nvidia,pad-autocal-pull-up-offset-3v3-timeout = <0x07>; + nvidia,pad-autocal-pull-down-offset-3v3-timeout = + <0x07>; + nvidia,pad-autocal-pull-up-offset-1v8-timeout = <0x06>; + nvidia,pad-autocal-pull-down-offset-1v8-timeout = + <0x07>; + nvidia,pad-autocal-pull-up-offset-sdr104 = <0x00>; + nvidia,pad-autocal-pull-down-offset-sdr104 = <0x00>; + nvidia,default-tap = <0x9>; + nvidia,default-trim = <0x5>; status = "disabled"; }; @@ -323,8 +346,24 @@ interrupts = ; clocks = <&bpmp TEGRA194_CLK_SDMMC4>; clock-names = "sdhci"; + assigned-clocks = <&bpmp TEGRA194_CLK_SDMMC4>, + <&bpmp TEGRA194_CLK_PLLC4>; + assigned-clock-parents = + <&bpmp TEGRA194_CLK_PLLC4>; resets = <&bpmp TEGRA194_RESET_SDMMC4>; reset-names = "sdhci"; + nvidia,pad-autocal-pull-up-offset-hs400 = <0x00>; + nvidia,pad-autocal-pull-down-offset-hs400 = <0x00>; + nvidia,pad-autocal-pull-up-offset-1v8-timeout = <0x0a>; + nvidia,pad-autocal-pull-down-offset-1v8-timeout = + <0x0a>; + nvidia,pad-autocal-pull-up-offset-3v3-timeout = <0x0a>; + nvidia,pad-autocal-pull-down-offset-3v3-timeout = + <0x0a>; + nvidia,default-tap = <0x8>; + nvidia,default-trim = <0x14>; + nvidia,dqs-trim = <40>; + supports-cqe; status = "disabled"; }; @@ -367,10 +406,35 @@ }; hsp_top0: hsp@3c00000 { - compatible = "nvidia,tegra186-hsp"; + compatible = "nvidia,tegra194-hsp", "nvidia,tegra186-hsp"; reg = <0x03c00000 0xa0000>; - interrupts = ; - interrupt-names = "doorbell"; + interrupts = , + , + , + , + , + , + , + , + ; + interrupt-names = "doorbell", "shared0", "shared1", "shared2", + "shared3", "shared4", "shared5", "shared6", + "shared7"; + #mbox-cells = <2>; + }; + + hsp_aon: hsp@c150000 { + compatible = "nvidia,tegra194-hsp", "nvidia,tegra186-hsp"; + reg = <0x0c150000 0xa0000>; + interrupts = , + , + , + ; + /* + * Shared interrupt 0 is routed only to AON/SPE, so + * we only have 4 shared interrupts for the CCPLEX. + */ + interrupt-names = "shared1", "shared2", "shared3", "shared4"; #mbox-cells = <2>; }; @@ -871,56 +935,56 @@ #size-cells = <0>; cpu@0 { - compatible = "nvidia,tegra194-carmel", "arm,armv8"; + compatible = "nvidia,tegra194-carmel"; device_type = "cpu"; reg = <0x10000>; enable-method = "psci"; }; cpu@1 { - compatible = "nvidia,tegra194-carmel", "arm,armv8"; + compatible = "nvidia,tegra194-carmel"; device_type = "cpu"; reg = <0x10001>; enable-method = "psci"; }; cpu@2 { - compatible = "nvidia,tegra194-carmel", "arm,armv8"; + compatible = "nvidia,tegra194-carmel"; device_type = "cpu"; reg = <0x100>; enable-method = "psci"; }; cpu@3 { - compatible = "nvidia,tegra194-carmel", "arm,armv8"; + compatible = "nvidia,tegra194-carmel"; device_type = "cpu"; reg = <0x101>; enable-method = "psci"; }; cpu@4 { - compatible = "nvidia,tegra194-carmel", "arm,armv8"; + compatible = "nvidia,tegra194-carmel"; device_type = "cpu"; reg = <0x200>; enable-method = "psci"; }; cpu@5 { - compatible = "nvidia,tegra194-carmel", "arm,armv8"; + compatible = "nvidia,tegra194-carmel"; device_type = "cpu"; reg = <0x201>; enable-method = "psci"; }; cpu@6 { - compatible = "nvidia,tegra194-carmel", "arm,armv8"; + compatible = "nvidia,tegra194-carmel"; device_type = "cpu"; reg = <0x10300>; enable-method = "psci"; }; cpu@7 { - compatible = "nvidia,tegra194-carmel", "arm,armv8"; + compatible = "nvidia,tegra194-carmel"; device_type = "cpu"; reg = <0x10301>; enable-method = "psci"; @@ -933,6 +997,13 @@ method = "smc"; }; + tcu: tcu { + compatible = "nvidia,tegra194-tcu"; + mboxes = <&hsp_top0 TEGRA_HSP_MBOX_TYPE_SM TEGRA_HSP_SM_RX(0)>, + <&hsp_aon TEGRA_HSP_MBOX_TYPE_SM TEGRA_HSP_SM_TX(1)>; + mbox-names = "rx", "tx"; + }; + thermal-zones { cpu { thermal-sensors = <&{/bpmp/thermal} diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts b/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts index 37e3c46e753f..9fad0d27278e 100644 --- a/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts +++ b/arch/arm64/boot/dts/nvidia/tegra210-p2371-2180.dts @@ -78,4 +78,25 @@ }; }; }; + + clock@70110000 { + status = "okay"; + + nvidia,cf = <6>; + nvidia,ci = <0>; + nvidia,cg = <2>; + nvidia,droop-ctrl = <0x00000f00>; + nvidia,force-mode = <1>; + nvidia,sample-rate = <25000>; + + nvidia,pwm-min-microvolts = <708000>; + nvidia,pwm-period-nanoseconds = <2500>; /* 2.5us */ + nvidia,pwm-to-pmic; + nvidia,pwm-tristate-microvolts = <1000000>; + nvidia,pwm-voltage-step-microvolts = <19200>; + + pinctrl-names = "dvfs_pwm_enable", "dvfs_pwm_disable"; + pinctrl-0 = <&dvfs_pwm_active_state>; + pinctrl-1 = <&dvfs_pwm_inactive_state>; + }; }; diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi index a96e6ee70c21..95e890d8a119 100644 --- a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi @@ -1278,6 +1278,20 @@ nvidia,open-drain = ; }; }; + + dvfs_pwm_active_state: dvfs_pwm_active { + dvfs_pwm_pbb1 { + nvidia,pins = "dvfs_pwm_pbb1"; + nvidia,tristate = ; + }; + }; + + dvfs_pwm_inactive_state: dvfs_pwm_inactive { + dvfs_pwm_pbb1 { + nvidia,pins = "dvfs_pwm_pbb1"; + nvidia,tristate = ; + }; + }; }; pwm@7000a000 { @@ -1303,6 +1317,16 @@ clock-frequency = <100000>; }; + sata@70020000 { + status = "okay"; + phys = <&{/padctl@7009f000/pads/sata/lanes/sata-0}>; + }; + + hda@70030000 { + nvidia,model = "jetson-tx1-hda"; + status = "okay"; + }; + usb@70090000 { phys = <&{/padctl@7009f000/pads/usb2/lanes/usb2-0}>, <&{/padctl@7009f000/pads/usb2/lanes/usb2-1}>, @@ -1325,15 +1349,6 @@ status = "okay"; }; - sata@70020000 { - status = "okay"; - phys = <&{/padctl@7009f000/pads/sata/lanes/sata-0}>; - }; - - hda@70030000 { - status = "okay"; - }; - padctl@7009f000 { status = "okay"; diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2894-0050-a08.dts b/arch/arm64/boot/dts/nvidia/tegra210-p2894-0050-a08.dts new file mode 100644 index 000000000000..7ffb351b5882 --- /dev/null +++ b/arch/arm64/boot/dts/nvidia/tegra210-p2894-0050-a08.dts @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 +/dts-v1/; + +#include "tegra210-p2894.dtsi" + +/ { + model = "NVIDIA Shield TV"; + compatible = "nvidia,p2894-0050-a08", "nvidia,darcy", "nvidia,tegra210"; +}; diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2894.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2894.dtsi new file mode 100644 index 000000000000..3ddf173ccc18 --- /dev/null +++ b/arch/arm64/boot/dts/nvidia/tegra210-p2894.dtsi @@ -0,0 +1,1858 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include "tegra210.dtsi" + +/ { + aliases { + serial0 = &uarta; + }; + + chosen { + bootargs = "earlycon"; + stdout-path = "serial0:115200n8"; + }; + + memory { + device_type = "memory"; + reg = <0x0 0x80000000 0x0 0xc0000000>; + }; + + pinmux: pinmux@700008d4 { + status = "okay"; + pinctrl-names = "boot"; + pinctrl-0 = <&state_boot>; + + state_boot: pinmux { + pex_l0_rst_n_pa0 { + nvidia,pins = "pex_l0_rst_n_pa0"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + nvidia,io-hv = ; + }; + pex_l0_clkreq_n_pa1 { + nvidia,pins = "pex_l0_clkreq_n_pa1"; + nvidia,function = "pe0"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + nvidia,io-hv = ; + }; + pex_wake_n_pa2 { + nvidia,pins = "pex_wake_n_pa2"; + nvidia,function = "pe"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + nvidia,io-hv = ; + }; + pex_l1_rst_n_pa3 { + nvidia,pins = "pex_l1_rst_n_pa3"; + nvidia,function = "pe1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + nvidia,io-hv = ; + }; + pex_l1_clkreq_n_pa4 { + nvidia,pins = "pex_l1_clkreq_n_pa4"; + nvidia,function = "pe1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + nvidia,io-hv = ; + }; + sata_led_active_pa5 { + nvidia,pins = "sata_led_active_pa5"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + pa6 { + nvidia,pins = "pa6"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + dap1_fs_pb0 { + nvidia,pins = "dap1_fs_pb0"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + dap1_din_pb1 { + nvidia,pins = "dap1_din_pb1"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + dap1_dout_pb2 { + nvidia,pins = "dap1_dout_pb2"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + dap1_sclk_pb3 { + nvidia,pins = "dap1_sclk_pb3"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + spi2_mosi_pb4 { + nvidia,pins = "spi2_mosi_pb4"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + spi2_miso_pb5 { + nvidia,pins = "spi2_miso_pb5"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + spi2_sck_pb6 { + nvidia,pins = "spi2_sck_pb6"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + spi2_cs0_pb7 { + nvidia,pins = "spi2_cs0_pb7"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + spi1_mosi_pc0 { + nvidia,pins = "spi1_mosi_pc0"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + spi1_miso_pc1 { + nvidia,pins = "spi1_miso_pc1"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + spi1_sck_pc2 { + nvidia,pins = "spi1_sck_pc2"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + spi1_cs0_pc3 { + nvidia,pins = "spi1_cs0_pc3"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + spi1_cs1_pc4 { + nvidia,pins = "spi1_cs1_pc4"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + spi4_sck_pc5 { + nvidia,pins = "spi4_sck_pc5"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + spi4_cs0_pc6 { + nvidia,pins = "spi4_cs0_pc6"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + spi4_mosi_pc7 { + nvidia,pins = "spi4_mosi_pc7"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + spi4_miso_pd0 { + nvidia,pins = "spi4_miso_pd0"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + uart3_tx_pd1 { + nvidia,pins = "uart3_tx_pd1"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + uart3_rx_pd2 { + nvidia,pins = "uart3_rx_pd2"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + uart3_rts_pd3 { + nvidia,pins = "uart3_rts_pd3"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + uart3_cts_pd4 { + nvidia,pins = "uart3_cts_pd4"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + dmic1_clk_pe0 { + nvidia,pins = "dmic1_clk_pe0"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + dmic1_dat_pe1 { + nvidia,pins = "dmic1_dat_pe1"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + dmic2_clk_pe2 { + nvidia,pins = "dmic2_clk_pe2"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + dmic2_dat_pe3 { + nvidia,pins = "dmic2_dat_pe3"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + dmic3_clk_pe4 { + nvidia,pins = "dmic3_clk_pe4"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + dmic3_dat_pe5 { + nvidia,pins = "dmic3_dat_pe5"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + pe6 { + nvidia,pins = "pe6"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + pe7 { + nvidia,pins = "pe7"; + nvidia,function = "pwm3"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + gen3_i2c_scl_pf0 { + nvidia,pins = "gen3_i2c_scl_pf0"; + nvidia,function = "i2c3"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + nvidia,io-hv = ; + }; + gen3_i2c_sda_pf1 { + nvidia,pins = "gen3_i2c_sda_pf1"; + nvidia,function = "i2c3"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + nvidia,io-hv = ; + }; + uart2_tx_pg0 { + nvidia,pins = "uart2_tx_pg0"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + uart2_rx_pg1 { + nvidia,pins = "uart2_rx_pg1"; + nvidia,function = "uartb"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + uart2_rts_pg2 { + nvidia,pins = "uart2_rts_pg2"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + uart2_cts_pg3 { + nvidia,pins = "uart2_cts_pg3"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + wifi_en_ph0 { + nvidia,pins = "wifi_en_ph0"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + wifi_rst_ph1 { + nvidia,pins = "wifi_rst_ph1"; + nvidia,function = "rsvd0"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + wifi_wake_ap_ph2 { + nvidia,pins = "wifi_wake_ap_ph2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + ap_wake_bt_ph3 { + nvidia,pins = "ap_wake_bt_ph3"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + bt_rst_ph4 { + nvidia,pins = "bt_rst_ph4"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + bt_wake_ap_ph5 { + nvidia,pins = "bt_wake_ap_ph5"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + ph6 { + nvidia,pins = "ph6"; + nvidia,function = "rsvd0"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + ap_wake_nfc_ph7 { + nvidia,pins = "ap_wake_nfc_ph7"; + nvidia,function = "rsvd0"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + nfc_en_pi0 { + nvidia,pins = "nfc_en_pi0"; + nvidia,function = "rsvd0"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + nfc_int_pi1 { + nvidia,pins = "nfc_int_pi1"; + nvidia,function = "rsvd0"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + gps_en_pi2 { + nvidia,pins = "gps_en_pi2"; + nvidia,function = "rsvd0"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + gps_rst_pi3 { + nvidia,pins = "gps_rst_pi3"; + nvidia,function = "rsvd0"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + uart4_tx_pi4 { + nvidia,pins = "uart4_tx_pi4"; + nvidia,function = "uartd"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + uart4_rx_pi5 { + nvidia,pins = "uart4_rx_pi5"; + nvidia,function = "uartd"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + uart4_rts_pi6 { + nvidia,pins = "uart4_rts_pi6"; + nvidia,function = "uartd"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + uart4_cts_pi7 { + nvidia,pins = "uart4_cts_pi7"; + nvidia,function = "uartd"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + gen1_i2c_sda_pj0 { + nvidia,pins = "gen1_i2c_sda_pj0"; + nvidia,function = "i2c1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + nvidia,io-hv = ; + }; + gen1_i2c_scl_pj1 { + nvidia,pins = "gen1_i2c_scl_pj1"; + nvidia,function = "i2c1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + nvidia,io-hv = ; + }; + gen2_i2c_scl_pj2 { + nvidia,pins = "gen2_i2c_scl_pj2"; + nvidia,function = "i2c2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + nvidia,io-hv = ; + }; + gen2_i2c_sda_pj3 { + nvidia,pins = "gen2_i2c_sda_pj3"; + nvidia,function = "i2c2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + nvidia,io-hv = ; + }; + dap4_fs_pj4 { + nvidia,pins = "dap4_fs_pj4"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + dap4_din_pj5 { + nvidia,pins = "dap4_din_pj5"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + dap4_dout_pj6 { + nvidia,pins = "dap4_dout_pj6"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + dap4_sclk_pj7 { + nvidia,pins = "dap4_sclk_pj7"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + pk0 { + nvidia,pins = "pk0"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + pk1 { + nvidia,pins = "pk1"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + pk2 { + nvidia,pins = "pk2"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + pk3 { + nvidia,pins = "pk3"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + pk4 { + nvidia,pins = "pk4"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + pk5 { + nvidia,pins = "pk5"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + pk6 { + nvidia,pins = "pk6"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + pk7 { + nvidia,pins = "pk7"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + pl0 { + nvidia,pins = "pl0"; + nvidia,function = "rsvd0"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + pl1 { + nvidia,pins = "pl1"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + sdmmc1_clk_pm0 { + nvidia,pins = "sdmmc1_clk_pm0"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + sdmmc1_cmd_pm1 { + nvidia,pins = "sdmmc1_cmd_pm1"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + sdmmc1_dat3_pm2 { + nvidia,pins = "sdmmc1_dat3_pm2"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + sdmmc1_dat2_pm3 { + nvidia,pins = "sdmmc1_dat2_pm3"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + sdmmc1_dat1_pm4 { + nvidia,pins = "sdmmc1_dat1_pm4"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + sdmmc1_dat0_pm5 { + nvidia,pins = "sdmmc1_dat0_pm5"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + sdmmc3_clk_pp0 { + nvidia,pins = "sdmmc3_clk_pp0"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + sdmmc3_cmd_pp1 { + nvidia,pins = "sdmmc3_cmd_pp1"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + sdmmc3_dat3_pp2 { + nvidia,pins = "sdmmc3_dat3_pp2"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + sdmmc3_dat2_pp3 { + nvidia,pins = "sdmmc3_dat2_pp3"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + sdmmc3_dat1_pp4 { + nvidia,pins = "sdmmc3_dat1_pp4"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + sdmmc3_dat0_pp5 { + nvidia,pins = "sdmmc3_dat0_pp5"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + cam1_mclk_ps0 { + nvidia,pins = "cam1_mclk_ps0"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + cam2_mclk_ps1 { + nvidia,pins = "cam2_mclk_ps1"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + cam_i2c_scl_ps2 { + nvidia,pins = "cam_i2c_scl_ps2"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + nvidia,io-hv = ; + }; + cam_i2c_sda_ps3 { + nvidia,pins = "cam_i2c_sda_ps3"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + nvidia,io-hv = ; + }; + cam_rst_ps4 { + nvidia,pins = "cam_rst_ps4"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + cam_af_en_ps5 { + nvidia,pins = "cam_af_en_ps5"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + cam_flash_en_ps6 { + nvidia,pins = "cam_flash_en_ps6"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + cam1_pwdn_ps7 { + nvidia,pins = "cam1_pwdn_ps7"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + cam2_pwdn_pt0 { + nvidia,pins = "cam2_pwdn_pt0"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + cam1_strobe_pt1 { + nvidia,pins = "cam1_strobe_pt1"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + uart1_tx_pu0 { + nvidia,pins = "uart1_tx_pu0"; + nvidia,function = "uarta"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + uart1_rx_pu1 { + nvidia,pins = "uart1_rx_pu1"; + nvidia,function = "uarta"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + uart1_rts_pu2 { + nvidia,pins = "uart1_rts_pu2"; + nvidia,function = "uarta"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + uart1_cts_pu3 { + nvidia,pins = "uart1_cts_pu3"; + nvidia,function = "uarta"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + lcd_bl_pwm_pv0 { + nvidia,pins = "lcd_bl_pwm_pv0"; + nvidia,function = "pwm0"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + lcd_bl_en_pv1 { + nvidia,pins = "lcd_bl_en_pv1"; + nvidia,function = "rsvd0"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + lcd_rst_pv2 { + nvidia,pins = "lcd_rst_pv2"; + nvidia,function = "rsvd0"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + lcd_gpio1_pv3 { + nvidia,pins = "lcd_gpio1_pv3"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + lcd_gpio2_pv4 { + nvidia,pins = "lcd_gpio2_pv4"; + nvidia,function = "pwm1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + ap_ready_pv5 { + nvidia,pins = "ap_ready_pv5"; + nvidia,function = "rsvd0"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + touch_rst_pv6 { + nvidia,pins = "touch_rst_pv6"; + nvidia,function = "rsvd0"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + touch_clk_pv7 { + nvidia,pins = "touch_clk_pv7"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + modem_wake_ap_px0 { + nvidia,pins = "modem_wake_ap_px0"; + nvidia,function = "rsvd0"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + touch_int_px1 { + nvidia,pins = "touch_int_px1"; + nvidia,function = "rsvd0"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + motion_int_px2 { + nvidia,pins = "motion_int_px2"; + nvidia,function = "rsvd0"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + als_prox_int_px3 { + nvidia,pins = "als_prox_int_px3"; + nvidia,function = "rsvd0"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + temp_alert_px4 { + nvidia,pins = "temp_alert_px4"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + button_power_on_px5 { + nvidia,pins = "button_power_on_px5"; + nvidia,function = "rsvd0"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + button_vol_up_px6 { + nvidia,pins = "button_vol_up_px6"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + button_vol_down_px7 { + nvidia,pins = "button_vol_down_px7"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + button_slide_sw_py0 { + nvidia,pins = "button_slide_sw_py0"; + nvidia,function = "rsvd0"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + button_home_py1 { + nvidia,pins = "button_home_py1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + lcd_te_py2 { + nvidia,pins = "lcd_te_py2"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + pwr_i2c_scl_py3 { + nvidia,pins = "pwr_i2c_scl_py3"; + nvidia,function = "i2cpmu"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + nvidia,io-hv = ; + }; + pwr_i2c_sda_py4 { + nvidia,pins = "pwr_i2c_sda_py4"; + nvidia,function = "i2cpmu"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + nvidia,io-hv = ; + }; + clk_32k_out_py5 { + nvidia,pins = "clk_32k_out_py5"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + pz0 { + nvidia,pins = "pz0"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + pz1 { + nvidia,pins = "pz1"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + pz2 { + nvidia,pins = "pz2"; + nvidia,function = "rsvd2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + pz3 { + nvidia,pins = "pz3"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + pz4 { + nvidia,pins = "pz4"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + pz5 { + nvidia,pins = "pz5"; + nvidia,function = "soc"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + dap2_fs_paa0 { + nvidia,pins = "dap2_fs_paa0"; + nvidia,function = "i2s2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + dap2_sclk_paa1 { + nvidia,pins = "dap2_sclk_paa1"; + nvidia,function = "i2s2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + dap2_din_paa2 { + nvidia,pins = "dap2_din_paa2"; + nvidia,function = "i2s2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + dap2_dout_paa3 { + nvidia,pins = "dap2_dout_paa3"; + nvidia,function = "i2s2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + aud_mclk_pbb0 { + nvidia,pins = "aud_mclk_pbb0"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + dvfs_pwm_pbb1 { + nvidia,pins = "dvfs_pwm_pbb1"; + nvidia,function = "cldvfs"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + dvfs_clk_pbb2 { + nvidia,pins = "dvfs_clk_pbb2"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + gpio_x1_aud_pbb3 { + nvidia,pins = "gpio_x1_aud_pbb3"; + nvidia,function = "rsvd0"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + gpio_x3_aud_pbb4 { + nvidia,pins = "gpio_x3_aud_pbb4"; + nvidia,function = "rsvd0"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + hdmi_cec_pcc0 { + nvidia,pins = "hdmi_cec_pcc0"; + nvidia,function = "cec"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + nvidia,io-hv = ; + }; + hdmi_int_dp_hpd_pcc1 { + nvidia,pins = "hdmi_int_dp_hpd_pcc1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + nvidia,io-hv = ; + }; + spdif_out_pcc2 { + nvidia,pins = "spdif_out_pcc2"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + spdif_in_pcc3 { + nvidia,pins = "spdif_in_pcc3"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + usb_vbus_en0_pcc4 { + nvidia,pins = "usb_vbus_en0_pcc4"; + nvidia,function = "usb"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + nvidia,io-hv = ; + }; + usb_vbus_en1_pcc5 { + nvidia,pins = "usb_vbus_en1_pcc5"; + nvidia,function = "usb"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + nvidia,io-hv = ; + }; + dp_hpd0_pcc6 { + nvidia,pins = "dp_hpd0_pcc6"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + pcc7 { + nvidia,pins = "pcc7"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + nvidia,io-hv = ; + }; + spi2_cs1_pdd0 { + nvidia,pins = "spi2_cs1_pdd0"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + qspi_sck_pee0 { + nvidia,pins = "qspi_sck_pee0"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + qspi_cs_n_pee1 { + nvidia,pins = "qspi_cs_n_pee1"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + qspi_io0_pee2 { + nvidia,pins = "qspi_io0_pee2"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + qspi_io1_pee3 { + nvidia,pins = "qspi_io1_pee3"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + qspi_io2_pee4 { + nvidia,pins = "qspi_io2_pee4"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + qspi_io3_pee5 { + nvidia,pins = "qspi_io3_pee5"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + core_pwr_req { + nvidia,pins = "core_pwr_req"; + nvidia,function = "core"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + cpu_pwr_req { + nvidia,pins = "cpu_pwr_req"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + pwr_int_n { + nvidia,pins = "pwr_int_n"; + nvidia,function = "pmi"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + clk_32k_in { + nvidia,pins = "clk_32k_in"; + nvidia,function = "clk"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + jtag_rtck { + nvidia,pins = "jtag_rtck"; + nvidia,function = "jtag"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + clk_req { + nvidia,pins = "clk_req"; + nvidia,function = "rsvd1"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + shutdown { + nvidia,pins = "shutdown"; + nvidia,function = "shutdown"; + nvidia,pull = ; + nvidia,tristate = ; + nvidia,enable-input = ; + nvidia,open-drain = ; + }; + }; + }; + + serial@70006000 { + status = "okay"; + }; + + i2c@7000d000 { + status = "okay"; + clock-frequency = <400000>; + + max77620: max77620@3c { + compatible = "maxim,max77620"; + reg = <0x3c>; + interrupts = ; + + #interrupt-cells = <2>; + interrupt-controller; + + gpio-controller; + #gpio-cells = <2>; + + pinctrl-names = "default"; + pinctrl-0 = <&max77620_default>; + + max77620_default: pinmux@0 { + pin_gpio0 { + pins = "gpio0"; + function = "gpio"; + }; + + pin_gpio1 { + pins = "gpio1"; + function = "fps-out"; + drive-push-pull = <1>; + maxim,active-fps-source = ; + maxim,active-fps-power-up-slot = <7>; + maxim,active-fps-power-down-slot = <0>; + }; + + pin_gpio2_3 { + pins = "gpio2", "gpio3"; + function = "fps-out"; + drive-open-drain = <1>; + maxim,active-fps-source = ; + }; + + pin_gpio4 { + pins = "gpio4"; + function = "32k-out1"; + }; + + pin_gpio5_6_7 { + pins = "gpio5", "gpio6", "gpio7"; + function = "gpio"; + drive-push-pull = <1>; + }; + + pin_gpio2 { + maxim,active-fps-source = ; + }; + + pin_gpio3 { + maxim,active-fps-source = ; + }; + }; + + spmic-default-output-high { + gpio-hog; + output-high; + gpios = <2 GPIO_ACTIVE_HIGH 7 GPIO_ACTIVE_HIGH>; + }; + + fps { + #address-cells = <1>; + #size-cells = <0>; + + fps0 { + reg = <0>; + maxim,fps-event-source = ; + }; + + fps1 { + reg = <1>; + maxim,fps-event-source = ; + maxim,device-state-on-disabled-event = ; + }; + + fps2 { + reg = <2>; + maxim,fps-event-source = ; + }; + }; + + regulators { + in-ldo0-1-supply = <&max77620_sd2>; + in-ldo7-8-supply = <&max77620_sd2>; + + max77620_sd0: sd0 { + regulator-name = "vdd-core"; + regulator-enable-ramp-delay = <146>; + regulator-min-microvolt = <600000>; + regulator-max-microvolt = <1400000>; + regulator-ramp-delay = <27500>; + regulator-always-on; + regulator-boot-on; + + maxim,active-fps-power-up-slot = <0>; + maxim,active-fps-source = ; + }; + + max77620_sd1: sd1 { + regulator-name = "vddio-ddr"; + regulator-enable-ramp-delay = <130>; + regulator-ramp-delay = <27500>; + regulator-always-on; + regulator-boot-on; + + maxim,active-fps-source = ; + }; + + max77620_sd2: sd2 { + regulator-name = "vdd-pre-reg"; + regulator-enable-ramp-delay = <176>; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; + regulator-ramp-delay = <27500>; + regulator-always-on; + regulator-boot-on; + + maxim,active-fps-source = ; + maxim,suspend-fps-source = ; + }; + + max77620_sd3: sd3 { + regulator-name = "vdd-1v8"; + regulator-enable-ramp-delay = <242>; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-ramp-delay = <27500>; + regulator-always-on; + regulator-boot-on; + + maxim,active-fps-source = ; + }; + + max77620_ldo0: ldo0 { + regulator-name = "avdd-sys"; + regulator-enable-ramp-delay = <26>; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + regulator-ramp-delay = <100000>; + regulator-boot-on; + + maxim,active-fps-source = ; + }; + + max77620_ldo1: ldo1 { + regulator-name = "vdd-pex"; + regulator-enable-ramp-delay = <22>; + regulator-min-microvolt = <1075000>; + regulator-max-microvolt = <1075000>; + regulator-ramp-delay = <100000>; + regulator-always-on; + + maxim,active-fps-source = ; + }; + + max77620_ldo2: ldo2 { + regulator-name = "vddio-sdmmc3"; + regulator-enable-ramp-delay = <62>; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + regulator-ramp-delay = <100000>; + + maxim,active-fps-source = ; + }; + + max77620_ldo3: ldo3 { + regulator-name = "vdd-3v3-eth"; + regulator-enable-ramp-delay = <50>; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-ramp-delay = <100000>; + regulator-always-on; + regulator-boot-on; + + maxim,active-fps-source = ; + }; + + max77620_ldo4: ldo4 { + regulator-name = "vdd-rtc"; + regulator-enable-ramp-delay = <22>; + regulator-min-microvolt = <850000>; + regulator-max-microvolt = <850000>; + regulator-ramp-delay = <100000>; + regulator-always-on; + regulator-boot-on; + + maxim,active-fps-source = ; + }; + + max77620_ldo5: ldo5 { + regulator-name = "avdd-ts-hv"; + regulator-enable-ramp-delay = <62>; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-ramp-delay = <100000>; + + maxim,active-fps-source = ; + }; + + max77620_ldo6: ldo6 { + regulator-name = "vdd-ts"; + regulator-enable-ramp-delay = <36>; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-ramp-delay = <100000>; + regulator-boot-on; + + maxim,active-fps-source = ; + }; + + max77620_ldo7: ldo7 { + regulator-name = "vdd-gen-pll-edp"; + regulator-enable-ramp-delay = <24>; + regulator-min-microvolt = <1050000>; + regulator-max-microvolt = <1050000>; + regulator-ramp-delay = <100000>; + regulator-always-on; + regulator-boot-on; + + maxim,active-fps-source = ; + maxim,suspend-fps-source = ; + }; + + max77620_ldo8: ldo8 { + regulator-name = "vdd-hdmi-dp"; + regulator-enable-ramp-delay = <22>; + regulator-min-microvolt = <1050000>; + regulator-max-microvolt = <1050000>; + regulator-ramp-delay = <100000>; + regulator-always-on; + regulator-boot-on; + + maxim,active-fps-source = ; + }; + }; + }; + }; + + pmc@7000e400 { + nvidia,invert-interrupt; + nvidia,suspend-mode = <0>; + nvidia,cpu-pwr-good-time = <0>; + nvidia,cpu-pwr-off-time = <0>; + nvidia,core-pwr-good-time = <4587 3876>; + nvidia,core-pwr-off-time = <39065>; + nvidia,core-power-req-active-high; + nvidia,sys-clock-req-active-high; + status = "okay"; + }; + + sdhci@700b0600 { + bus-width = <8>; + non-removable; + status = "okay"; + }; + + clocks { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <0>; + + clk32k_in: clock@0 { + compatible = "fixed-clock"; + reg = <0>; + #clock-cells = <0>; + clock-frequency = <32768>; + }; + }; + + gpio-keys { + compatible = "gpio-keys"; + status = "okay"; + + power { + debounce-interval = <30>; + gpios = <&gpio TEGRA_GPIO(X, 7) GPIO_ACTIVE_LOW>; + label = "Power"; + linux,code = ; + wakeup-event-action = ; + wakeup-source; + }; + }; + + cpus { + cpu@0 { + enable-method = "psci"; + }; + + cpu@1 { + enable-method = "psci"; + }; + + cpu@2 { + enable-method = "psci"; + }; + + cpu@3 { + enable-method = "psci"; + }; + }; + + psci { + compatible = "arm,psci-1.0"; + method = "smc"; + }; + + regulators { + compatible = "simple-bus"; + device_type = "fixed-regulators"; + #address-cells = <1>; + #size-cells = <0>; + + battery_reg: regulator@0 { + compatible = "regulator-fixed"; + reg = <0>; + regulator-name = "vdd-ac-bat"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + regulator-always-on; + }; + + vdd_3v3: regulator@1 { + compatible = "regulator-fixed"; + reg = <1>; + regulator-name = "vdd-3v3"; + regulator-enable-ramp-delay = <160>; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + + gpio = <&max77620 3 GPIO_ACTIVE_HIGH>; + enable-active-high; + }; + + max77620_gpio7: regulator@2 { + compatible = "regulator-fixed"; + reg = <2>; + regulator-name = "max77620-gpio7"; + regulator-enable-ramp-delay = <240>; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + vin-supply = <&max77620_ldo0>; + regulator-always-on; + regulator-boot-on; + + gpio = <&max77620 7 GPIO_ACTIVE_HIGH>; + enable-active-high; + }; + + lcd_bl_en: regulator@3 { + compatible = "regulator-fixed"; + reg = <3>; + regulator-name = "lcd-bl-en"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-boot-on; + + gpio = <&gpio TEGRA_GPIO(V, 1) GPIO_ACTIVE_HIGH>; + enable-active-high; + }; + + en_vdd_sd: regulator@4 { + compatible = "regulator-fixed"; + reg = <4>; + regulator-name = "en-vdd-sd"; + regulator-enable-ramp-delay = <472>; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&vdd_3v3>; + + gpio = <&gpio TEGRA_GPIO(Z, 4) GPIO_ACTIVE_HIGH>; + enable-active-high; + }; + + en_vdd_cam: regulator@5 { + compatible = "regulator-fixed"; + reg = <5>; + regulator-name = "en-vdd-cam"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + + gpio = <&gpio TEGRA_GPIO(S, 4) GPIO_ACTIVE_HIGH>; + enable-active-high; + }; + + vdd_sys_boost: regulator@6 { + compatible = "regulator-fixed"; + reg = <6>; + regulator-name = "vdd-sys-boost"; + regulator-enable-ramp-delay = <3090>; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + regulator-always-on; + + gpio = <&max77620 1 GPIO_ACTIVE_HIGH>; + enable-active-high; + }; + + vdd_hdmi: regulator@7 { + compatible = "regulator-fixed"; + reg = <7>; + regulator-name = "vdd-hdmi"; + regulator-enable-ramp-delay = <468>; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + vin-supply = <&vdd_sys_boost>; + regulator-boot-on; + + gpio = <&gpio TEGRA_GPIO(CC, 7) GPIO_ACTIVE_HIGH>; + enable-active-high; + }; + + en_vdd_cpu_fixed: regulator@8 { + compatible = "regulator-fixed"; + reg = <8>; + regulator-name = "vdd-cpu-fixed"; + regulator-min-microvolt = <1000000>; + regulator-max-microvolt = <1000000>; + }; + + vdd_aux_3v3: regulator@9 { + compatible = "regulator-fixed"; + reg = <9>; + regulator-name = "aux-3v3"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + }; + + vdd_snsr_pm: regulator@10 { + compatible = "regulator-fixed"; + reg = <10>; + regulator-name = "snsr_pm"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + + enable-active-high; + }; + + vdd_usb_5v0: regulator@11 { + compatible = "regulator-fixed"; + reg = <11>; + status = "disabled"; + regulator-name = "vdd-usb-5v0"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + vin-supply = <&vdd_3v3>; + + enable-active-high; + }; + + vdd_cdc_1v2_aud: regulator@101 { + compatible = "regulator-fixed"; + reg = <101>; + status = "disabled"; + regulator-name = "vdd_cdc_1v2_aud"; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <1200000>; + startup-delay-us = <250000>; + + enable-active-high; + }; + + vdd_disp_3v0: regulator@12 { + compatible = "regulator-fixed"; + reg = <12>; + regulator-name = "vdd-disp-3v0"; + regulator-enable-ramp-delay = <232>; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; + regulator-always-on; + + gpio = <&gpio TEGRA_GPIO(I, 3) GPIO_ACTIVE_HIGH>; + enable-active-high; + }; + + vdd_fan: regulator@13 { + compatible = "regulator-fixed"; + reg = <13>; + regulator-name = "vdd-fan"; + regulator-enable-ramp-delay = <284>; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + + gpio = <&gpio TEGRA_GPIO(E, 4) GPIO_ACTIVE_HIGH>; + enable-active-high; + }; + + usb_vbus1: regulator@14 { + compatible = "regulator-fixed"; + reg = <14>; + regulator-name = "usb-vbus1"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + + gpio = <&gpio TEGRA_GPIO(CC, 5) GPIO_ACTIVE_HIGH>; + enable-active-high; + gpio-open-drain; + }; + + usb_vbus2: regulator@15 { + compatible = "regulator-fixed"; + reg = <15>; + regulator-name = "usb-vbus2"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + + gpio = <&gpio TEGRA_GPIO(CC, 4) GPIO_ACTIVE_HIGH>; + enable-active-high; + gpio-open-drain; + }; + + vdd_3v3_eth: regulator@16 { + compatible = "regulator-fixed"; + reg = <16>; + regulator-name = "vdd-3v3-eth-a02"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + regulator-boot-on; + + gpio = <&gpio TEGRA_GPIO(D, 4) GPIO_ACTIVE_HIGH>; + enable-active-high; + gpio-open-drain; + }; + }; +}; diff --git a/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts b/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts index 43cae4798870..a4b8f668a6d4 100644 --- a/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts +++ b/arch/arm64/boot/dts/nvidia/tegra210-smaug.dts @@ -1340,10 +1340,29 @@ status = "okay"; clock-frequency = <1000000>; + max77621_cpu: max77621@1b { + compatible = "maxim,max77621"; + reg = <0x1b>; + interrupt-parent = <&gpio>; + interrupts = ; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <800000>; + regulator-max-microvolt = <1231250>; + regulator-name = "PPVAR_CPU"; + regulator-ramp-delay = <12500>; + maxim,dvs-default-state = <1>; + maxim,enable-active-discharge; + maxim,enable-bias-control; + maxim,enable-etr; + maxim,enable-gpio = <&max77620 5 0>; + maxim,externally-enable; + }; + max77620: max77620@3c { compatible = "maxim,max77620"; reg = <0x3c>; - interrupts = <0 86 IRQ_TYPE_NONE>; + interrupts = ; #interrupt-cells = <2>; interrupt-controller; @@ -1679,6 +1698,18 @@ status = "okay"; }; + clock@70110000 { + status = "okay"; + nvidia,cf = <6>; + nvidia,ci = <0>; + nvidia,cg = <2>; + nvidia,droop-ctrl = <0x00000f00>; + nvidia,force-mode = <1>; + nvidia,i2c-fs-rate = <400000>; + nvidia,sample-rate = <12500>; + vdd-cpu-supply = <&max77621_cpu>; + }; + aconnect@702c0000 { status = "okay"; @@ -1724,7 +1755,6 @@ gpio-keys { compatible = "gpio-keys"; - gpio-keys,name = "gpio-keys"; power { label = "Power"; diff --git a/arch/arm64/boot/dts/nvidia/tegra210.dtsi b/arch/arm64/boot/dts/nvidia/tegra210.dtsi index 2205d66b0443..6574396d2257 100644 --- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi @@ -4,6 +4,7 @@ #include #include #include +#include #include #include @@ -469,13 +470,55 @@ apbmisc@70000800 { compatible = "nvidia,tegra210-apbmisc", "nvidia,tegra20-apbmisc"; reg = <0x0 0x70000800 0x0 0x64>, /* Chip revision */ - <0x0 0x7000e864 0x0 0x04>; /* Strapping options */ + <0x0 0x70000008 0x0 0x04>; /* Strapping options */ }; pinmux: pinmux@700008d4 { compatible = "nvidia,tegra210-pinmux"; reg = <0x0 0x700008d4 0x0 0x29c>, /* Pad control registers */ <0x0 0x70003000 0x0 0x294>; /* Mux registers */ + sdmmc1_3v3_drv: sdmmc1-3v3-drv { + sdmmc1 { + nvidia,pins = "drive_sdmmc1"; + nvidia,pull-down-strength = <0x8>; + nvidia,pull-up-strength = <0x8>; + }; + }; + sdmmc1_1v8_drv: sdmmc1-1v8-drv { + sdmmc1 { + nvidia,pins = "drive_sdmmc1"; + nvidia,pull-down-strength = <0x4>; + nvidia,pull-up-strength = <0x3>; + }; + }; + sdmmc2_1v8_drv: sdmmc2-1v8-drv { + sdmmc2 { + nvidia,pins = "drive_sdmmc2"; + nvidia,pull-down-strength = <0x10>; + nvidia,pull-up-strength = <0x10>; + }; + }; + sdmmc3_3v3_drv: sdmmc3-3v3-drv { + sdmmc3 { + nvidia,pins = "drive_sdmmc3"; + nvidia,pull-down-strength = <0x8>; + nvidia,pull-up-strength = <0x8>; + }; + }; + sdmmc3_1v8_drv: sdmmc3-1v8-drv { + sdmmc3 { + nvidia,pins = "drive_sdmmc3"; + nvidia,pull-down-strength = <0x4>; + nvidia,pull-up-strength = <0x3>; + }; + }; + sdmmc4_1v8_drv: sdmmc4-1v8-drv { + sdmmc4 { + nvidia,pins = "drive_sdmmc4"; + nvidia,pull-down-strength = <0x10>; + nvidia,pull-up-strength = <0x10>; + }; + }; }; /* @@ -554,7 +597,7 @@ }; i2c@7000c000 { - compatible = "nvidia,tegra210-i2c", "nvidia,tegra114-i2c"; + compatible = "nvidia,tegra210-i2c", "nvidia,tegra124-i2c"; reg = <0x0 0x7000c000 0x0 0x100>; interrupts = ; #address-cells = <1>; @@ -569,7 +612,7 @@ }; i2c@7000c400 { - compatible = "nvidia,tegra210-i2c", "nvidia,tegra114-i2c"; + compatible = "nvidia,tegra210-i2c", "nvidia,tegra124-i2c"; reg = <0x0 0x7000c400 0x0 0x100>; interrupts = ; #address-cells = <1>; @@ -584,7 +627,7 @@ }; i2c@7000c500 { - compatible = "nvidia,tegra210-i2c", "nvidia,tegra114-i2c"; + compatible = "nvidia,tegra210-i2c", "nvidia,tegra124-i2c"; reg = <0x0 0x7000c500 0x0 0x100>; interrupts = ; #address-cells = <1>; @@ -599,7 +642,7 @@ }; i2c@7000c700 { - compatible = "nvidia,tegra210-i2c", "nvidia,tegra114-i2c"; + compatible = "nvidia,tegra210-i2c", "nvidia,tegra124-i2c"; reg = <0x0 0x7000c700 0x0 0x100>; interrupts = ; #address-cells = <1>; @@ -617,7 +660,7 @@ }; i2c@7000d000 { - compatible = "nvidia,tegra210-i2c", "nvidia,tegra114-i2c"; + compatible = "nvidia,tegra210-i2c", "nvidia,tegra124-i2c"; reg = <0x0 0x7000d000 0x0 0x100>; interrupts = ; #address-cells = <1>; @@ -632,7 +675,7 @@ }; i2c@7000d100 { - compatible = "nvidia,tegra210-i2c", "nvidia,tegra114-i2c"; + compatible = "nvidia,tegra210-i2c", "nvidia,tegra124-i2c"; reg = <0x0 0x7000d100 0x0 0x100>; interrupts = ; #address-cells = <1>; @@ -1050,9 +1093,12 @@ clock-names = "sdhci"; resets = <&tegra_car 14>; reset-names = "sdhci"; - pinctrl-names = "sdmmc-3v3", "sdmmc-1v8"; + pinctrl-names = "sdmmc-3v3", "sdmmc-1v8", + "sdmmc-3v3-drv", "sdmmc-1v8-drv"; pinctrl-0 = <&sdmmc1_3v3>; pinctrl-1 = <&sdmmc1_1v8>; + pinctrl-2 = <&sdmmc1_3v3_drv>; + pinctrl-3 = <&sdmmc1_1v8_drv>; nvidia,pad-autocal-pull-up-offset-3v3 = <0x00>; nvidia,pad-autocal-pull-down-offset-3v3 = <0x7d>; nvidia,pad-autocal-pull-up-offset-1v8 = <0x7b>; @@ -1075,6 +1121,8 @@ clock-names = "sdhci"; resets = <&tegra_car 9>; reset-names = "sdhci"; + pinctrl-names = "sdmmc-1v8-drv"; + pinctrl-0 = <&sdmmc2_1v8_drv>; nvidia,pad-autocal-pull-up-offset-1v8 = <0x05>; nvidia,pad-autocal-pull-down-offset-1v8 = <0x05>; nvidia,default-tap = <0x8>; @@ -1090,9 +1138,12 @@ clock-names = "sdhci"; resets = <&tegra_car 69>; reset-names = "sdhci"; - pinctrl-names = "sdmmc-3v3", "sdmmc-1v8"; + pinctrl-names = "sdmmc-3v3", "sdmmc-1v8", + "sdmmc-3v3-drv", "sdmmc-1v8-drv"; pinctrl-0 = <&sdmmc3_3v3>; pinctrl-1 = <&sdmmc3_1v8>; + pinctrl-2 = <&sdmmc3_3v3_drv>; + pinctrl-3 = <&sdmmc3_1v8_drv>; nvidia,pad-autocal-pull-up-offset-3v3 = <0x00>; nvidia,pad-autocal-pull-down-offset-3v3 = <0x7d>; nvidia,pad-autocal-pull-up-offset-1v8 = <0x7b>; @@ -1110,6 +1161,9 @@ clock-names = "sdhci"; resets = <&tegra_car 15>; reset-names = "sdhci"; + pinctrl-names = "sdmmc-3v3-drv", "sdmmc-1v8-drv"; + pinctrl-0 = <&sdmmc4_1v8_drv>; + pinctrl-1 = <&sdmmc4_1v8_drv>; nvidia,pad-autocal-pull-up-offset-1v8 = <0x05>; nvidia,pad-autocal-pull-down-offset-1v8 = <0x05>; nvidia,default-tap = <0x8>; @@ -1131,6 +1185,24 @@ #nvidia,mipi-calibrate-cells = <1>; }; + dfll: clock@70110000 { + compatible = "nvidia,tegra210-dfll"; + reg = <0 0x70110000 0 0x100>, /* DFLL control */ + <0 0x70110000 0 0x100>, /* I2C output control */ + <0 0x70110100 0 0x100>, /* Integrated I2C controller */ + <0 0x70110200 0 0x100>; /* Look-up table RAM */ + interrupts = ; + clocks = <&tegra_car TEGRA210_CLK_DFLL_SOC>, + <&tegra_car TEGRA210_CLK_DFLL_REF>, + <&tegra_car TEGRA210_CLK_I2C5>; + clock-names = "soc", "ref", "i2c"; + resets = <&tegra_car TEGRA210_RST_DFLL_DVCO>; + reset-names = "dvco"; + #clock-cells = <0>; + clock-output-names = "dfllCPU_out"; + status = "disabled"; + }; + aconnect@702c0000 { compatible = "nvidia,tegra210-aconnect"; clocks = <&tegra_car TEGRA210_CLK_APE>, @@ -1285,6 +1357,12 @@ device_type = "cpu"; compatible = "arm,cortex-a57"; reg = <0>; + clocks = <&tegra_car TEGRA210_CLK_CCLK_G>, + <&tegra_car TEGRA210_CLK_PLL_X>, + <&tegra_car TEGRA210_CLK_PLL_P_OUT4>, + <&dfll>; + clock-names = "cpu_g", "pll_x", "pll_p", "dfll"; + clock-latency = <300000>; }; cpu@1 { diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi index 46feedf7c989..134617d87a1a 100644 --- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi +++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi @@ -644,6 +644,8 @@ l11 { regulator-min-microvolt = <1750000>; regulator-max-microvolt = <3337000>; + regulator-allow-set-load; + regulator-system-load = <200000>; }; l12 { diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi index 18226980f7c3..aea1dbc3f53e 100644 --- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi +++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi @@ -441,7 +441,7 @@ CPU0: cpu@0 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0>; next-level-cache = <&L2_0>; enable-method = "psci"; @@ -449,7 +449,7 @@ CPU1: cpu@1 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; enable-method = "psci"; reg = <0x1>; next-level-cache = <&L2_0>; @@ -457,7 +457,7 @@ CPU2: cpu@2 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; enable-method = "psci"; reg = <0x2>; next-level-cache = <&L2_0>; @@ -465,7 +465,7 @@ CPU3: cpu@3 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; enable-method = "psci"; reg = <0x3>; next-level-cache = <&L2_0>; diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi index c5348c3da5a2..0803ca8c02da 100644 --- a/arch/arm64/boot/dts/qcom/msm8916.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi @@ -106,48 +106,48 @@ CPU0: cpu@0 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0>; next-level-cache = <&L2_0>; enable-method = "psci"; cpu-idle-states = <&CPU_SPC>; - clocks = <&apcs 0>; + clocks = <&apcs>; operating-points-v2 = <&cpu_opp_table>; #cooling-cells = <2>; }; CPU1: cpu@1 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x1>; next-level-cache = <&L2_0>; enable-method = "psci"; cpu-idle-states = <&CPU_SPC>; - clocks = <&apcs 0>; + clocks = <&apcs>; operating-points-v2 = <&cpu_opp_table>; #cooling-cells = <2>; }; CPU2: cpu@2 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x2>; next-level-cache = <&L2_0>; enable-method = "psci"; cpu-idle-states = <&CPU_SPC>; - clocks = <&apcs 0>; + clocks = <&apcs>; operating-points-v2 = <&cpu_opp_table>; #cooling-cells = <2>; }; CPU3: cpu@3 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x3>; next-level-cache = <&L2_0>; enable-method = "psci"; cpu-idle-states = <&CPU_SPC>; - clocks = <&apcs 0>; + clocks = <&apcs>; operating-points-v2 = <&cpu_opp_table>; #cooling-cells = <2>; }; diff --git a/arch/arm64/boot/dts/qcom/msm8992.dtsi b/arch/arm64/boot/dts/qcom/msm8992.dtsi index cf5cacdd624d..50cefb822d6d 100644 --- a/arch/arm64/boot/dts/qcom/msm8992.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8992.dtsi @@ -38,7 +38,7 @@ CPU0: cpu@0 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x0>; next-level-cache = <&L2_0>; L2_0: l2-cache { diff --git a/arch/arm64/boot/dts/qcom/msm8994.dtsi b/arch/arm64/boot/dts/qcom/msm8994.dtsi index f33c41d01c86..6a4049aae0c3 100644 --- a/arch/arm64/boot/dts/qcom/msm8994.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8994.dtsi @@ -40,7 +40,7 @@ CPU0: cpu@0 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0>; next-level-cache = <&L2_0>; L2_0: l2-cache { diff --git a/arch/arm64/boot/dts/qcom/msm8996-pins.dtsi b/arch/arm64/boot/dts/qcom/msm8996-pins.dtsi index 8d5114d16d09..131878db9852 100644 --- a/arch/arm64/boot/dts/qcom/msm8996-pins.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8996-pins.dtsi @@ -139,7 +139,7 @@ }; pinconf { - pins = "gpio4", "gpiio5", "gpio6", "gpio7"; + pins = "gpio4", "gpio5", "gpio6", "gpio7"; drive-strength = <2>; bias-disable; }; diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi index 99b7495455a6..c761269caf80 100644 --- a/arch/arm64/boot/dts/qcom/msm8996.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi @@ -306,6 +306,40 @@ #clock-cells = <1>; }; + rpmpd: power-controller { + compatible = "qcom,msm8996-rpmpd"; + #power-domain-cells = <1>; + operating-points-v2 = <&rpmpd_opp_table>; + + rpmpd_opp_table: opp-table { + compatible = "operating-points-v2"; + + rpmpd_opp1: opp1 { + opp-level = <1>; + }; + + rpmpd_opp2: opp2 { + opp-level = <2>; + }; + + rpmpd_opp3: opp3 { + opp-level = <3>; + }; + + rpmpd_opp4: opp4 { + opp-level = <4>; + }; + + rpmpd_opp5: opp5 { + opp-level = <5>; + }; + + rpmpd_opp6: opp6 { + opp-level = <6>; + }; + }; + }; + pm8994-regulators { compatible = "qcom,rpm-pm8994-regulators"; @@ -404,7 +438,7 @@ }; intc: interrupt-controller@9bc0000 { - compatible = "arm,gic-v3"; + compatible = "qcom,msm8996-gic-v3", "arm,gic-v3"; #interrupt-cells = <3>; interrupt-controller; #redistributor-regions = <1>; @@ -966,7 +1000,7 @@ clock-names = "iface", "bus"; #iommu-cells = <1>; - status = "ok"; + status = "disabled"; }; camss: camss@a00000 { diff --git a/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi index 50e9033aa7f6..f0901067b043 100644 --- a/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi @@ -65,6 +65,13 @@ status = "okay"; }; +&qusb2phy { + status = "okay"; + + vdda-pll-supply = <&vreg_l12a_1p8>; + vdda-phy-dpdm-supply = <&vreg_l24a_3p075>; +}; + &rpm_requests { pm8998-regulators { compatible = "qcom,rpm-pm8998-regulators"; @@ -192,6 +199,8 @@ vreg_l21a_2p95: l21 { regulator-min-microvolt = <2960000>; regulator-max-microvolt = <2960000>; + regulator-allow-set-load; + regulator-system-load = <800000>; }; vreg_l22a_2p85: l22 { regulator-min-microvolt = <2864000>; @@ -257,3 +266,18 @@ pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>; pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>; }; + +&usb3 { + status = "okay"; +}; + +&usb3_dwc3 { + dr_mode = "host"; /* Force to host until we have Type-C hooked up */ +}; + +&usb3phy { + status = "okay"; + + vdda-phy-supply = <&vreg_l1a_0p875>; + vdda-pll-supply = <&vreg_l2a_1p2>; +}; diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi index 8d41b69ec2da..3fd0769fe648 100644 --- a/arch/arm64/boot/dts/qcom/msm8998.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi @@ -3,6 +3,7 @@ #include #include +#include #include / { @@ -37,7 +38,7 @@ }; memory@86200000 { - reg = <0x0 0x86200000 0x0 0x2600000>; + reg = <0x0 0x86200000 0x0 0x2d00000>; no-map; }; @@ -266,6 +267,11 @@ rpm_requests: rpm-requests { compatible = "qcom,rpm-msm8998"; qcom,glink-channels = "rpm_requests"; + + rpmcc: clock-controller { + compatible = "qcom,rpmcc-msm8998", "qcom,rpmcc"; + #clock-cells = <1>; + }; }; }; @@ -540,6 +546,11 @@ reg = <0x780000 0x621c>; #address-cells = <1>; #size-cells = <1>; + + qusb2_hstx_trim: hstx-trim@423a { + reg = <0x423a 0x1>; + bits = <0 4>; + }; }; gcc: clock-controller@100000 { @@ -607,6 +618,93 @@ #mbox-cells = <1>; }; + usb3: usb@a8f8800 { + compatible = "qcom,msm8998-dwc3", "qcom,dwc3"; + reg = <0x0a8f8800 0x400>; + status = "disabled"; + #address-cells = <1>; + #size-cells = <1>; + ranges; + + clocks = <&gcc GCC_CFG_NOC_USB3_AXI_CLK>, + <&gcc GCC_USB30_MASTER_CLK>, + <&gcc GCC_AGGRE1_USB3_AXI_CLK>, + <&gcc GCC_USB30_MOCK_UTMI_CLK>, + <&gcc GCC_USB30_SLEEP_CLK>; + clock-names = "cfg_noc", "core", "iface", "mock_utmi", + "sleep"; + + assigned-clocks = <&gcc GCC_USB30_MOCK_UTMI_CLK>, + <&gcc GCC_USB30_MASTER_CLK>; + assigned-clock-rates = <19200000>, <120000000>; + + interrupts = , + ; + interrupt-names = "hs_phy_irq", "ss_phy_irq"; + + power-domains = <&gcc USB_30_GDSC>; + + resets = <&gcc GCC_USB_30_BCR>; + + usb3_dwc3: dwc3@a800000 { + compatible = "snps,dwc3"; + reg = <0x0a800000 0xcd00>; + interrupts = ; + snps,dis_u2_susphy_quirk; + snps,dis_enblslpm_quirk; + phys = <&qusb2phy>, <&usb1_ssphy>; + phy-names = "usb2-phy", "usb3-phy"; + snps,has-lpm-erratum; + snps,hird-threshold = /bits/ 8 <0x10>; + }; + }; + + usb3phy: phy@c010000 { + compatible = "qcom,msm8998-qmp-usb3-phy"; + reg = <0x0c010000 0x18c>; + status = "disabled"; + #clock-cells = <1>; + #address-cells = <1>; + #size-cells = <1>; + ranges; + + clocks = <&gcc GCC_USB3_PHY_AUX_CLK>, + <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>, + <&gcc GCC_USB3_CLKREF_CLK>; + clock-names = "aux", "cfg_ahb", "ref"; + + resets = <&gcc GCC_USB3_PHY_BCR>, + <&gcc GCC_USB3PHY_PHY_BCR>; + reset-names = "phy", "common"; + + usb1_ssphy: lane@c010200 { + reg = <0xc010200 0x128>, + <0xc010400 0x200>, + <0xc010c00 0x20c>, + <0xc010600 0x128>, + <0xc010800 0x200>; + #phy-cells = <0>; + clocks = <&gcc GCC_USB3_PHY_PIPE_CLK>; + clock-names = "pipe0"; + clock-output-names = "usb3_phy_pipe_clk_src"; + }; + }; + + qusb2phy: phy@c012000 { + compatible = "qcom,msm8998-qusb2-phy"; + reg = <0x0c012000 0x2a8>; + status = "disabled"; + #phy-cells = <0>; + + clocks = <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>, + <&gcc GCC_RX1_USB2_CLKREF_CLK>; + clock-names = "cfg_ahb", "ref"; + + resets = <&gcc GCC_QUSB2PHY_PRIM_BCR>; + + nvmem-cells = <&qusb2_hstx_trim>; + }; + sdhc2: sdhci@c0a4900 { compatible = "qcom,sdhci-msm-v4"; reg = <0xc0a4900 0x314>, <0xc0a4000 0x800>; @@ -624,6 +722,186 @@ status = "disabled"; }; + blsp1_i2c1: i2c@c175000 { + compatible = "qcom,i2c-qup-v2.2.1"; + reg = <0x0c175000 0x600>; + interrupts = ; + + clocks = <&gcc GCC_BLSP1_QUP1_I2C_APPS_CLK>, + <&gcc GCC_BLSP1_AHB_CLK>; + clock-names = "core", "iface"; + clock-frequency = <400000>; + + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; + }; + + blsp1_i2c2: i2c@c176000 { + compatible = "qcom,i2c-qup-v2.2.1"; + reg = <0x0c176000 0x600>; + interrupts = ; + + clocks = <&gcc GCC_BLSP1_QUP2_I2C_APPS_CLK>, + <&gcc GCC_BLSP1_AHB_CLK>; + clock-names = "core", "iface"; + clock-frequency = <400000>; + + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; + }; + + blsp1_i2c3: i2c@c177000 { + compatible = "qcom,i2c-qup-v2.2.1"; + reg = <0x0c177000 0x600>; + interrupts = ; + + clocks = <&gcc GCC_BLSP1_QUP3_I2C_APPS_CLK>, + <&gcc GCC_BLSP1_AHB_CLK>; + clock-names = "core", "iface"; + clock-frequency = <400000>; + + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; + }; + + blsp1_i2c4: i2c@c178000 { + compatible = "qcom,i2c-qup-v2.2.1"; + reg = <0x0c178000 0x600>; + interrupts = ; + + clocks = <&gcc GCC_BLSP1_QUP4_I2C_APPS_CLK>, + <&gcc GCC_BLSP1_AHB_CLK>; + clock-names = "core", "iface"; + clock-frequency = <400000>; + + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; + }; + + blsp1_i2c5: i2c@c179000 { + compatible = "qcom,i2c-qup-v2.2.1"; + reg = <0x0c179000 0x600>; + interrupts = ; + + clocks = <&gcc GCC_BLSP1_QUP5_I2C_APPS_CLK>, + <&gcc GCC_BLSP1_AHB_CLK>; + clock-names = "core", "iface"; + clock-frequency = <400000>; + + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; + }; + + blsp1_i2c6: i2c@c17a000 { + compatible = "qcom,i2c-qup-v2.2.1"; + reg = <0x0c17a000 0x600>; + interrupts = ; + + clocks = <&gcc GCC_BLSP1_QUP6_I2C_APPS_CLK>, + <&gcc GCC_BLSP1_AHB_CLK>; + clock-names = "core", "iface"; + clock-frequency = <400000>; + + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; + }; + + blsp2_i2c0: i2c@c1b5000 { + compatible = "qcom,i2c-qup-v2.2.1"; + reg = <0x0c1b5000 0x600>; + interrupts = ; + + clocks = <&gcc GCC_BLSP2_QUP1_I2C_APPS_CLK>, + <&gcc GCC_BLSP2_AHB_CLK>; + clock-names = "core", "iface"; + clock-frequency = <400000>; + + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; + }; + + blsp2_i2c1: i2c@c1b6000 { + compatible = "qcom,i2c-qup-v2.2.1"; + reg = <0x0c1b6000 0x600>; + interrupts = ; + + clocks = <&gcc GCC_BLSP2_QUP2_I2C_APPS_CLK>, + <&gcc GCC_BLSP2_AHB_CLK>; + clock-names = "core", "iface"; + clock-frequency = <400000>; + + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; + }; + + blsp2_i2c2: i2c@c1b7000 { + compatible = "qcom,i2c-qup-v2.2.1"; + reg = <0x0c1b7000 0x600>; + interrupts = ; + + clocks = <&gcc GCC_BLSP2_QUP3_I2C_APPS_CLK>, + <&gcc GCC_BLSP2_AHB_CLK>; + clock-names = "core", "iface"; + clock-frequency = <400000>; + + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; + }; + + blsp2_i2c3: i2c@c1b8000 { + compatible = "qcom,i2c-qup-v2.2.1"; + reg = <0x0c1b8000 0x600>; + interrupts = ; + + clocks = <&gcc GCC_BLSP2_QUP4_I2C_APPS_CLK>, + <&gcc GCC_BLSP2_AHB_CLK>; + clock-names = "core", "iface"; + clock-frequency = <400000>; + + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; + }; + + blsp2_i2c4: i2c@c1b9000 { + compatible = "qcom,i2c-qup-v2.2.1"; + reg = <0x0c1b9000 0x600>; + interrupts = ; + + clocks = <&gcc GCC_BLSP2_QUP5_I2C_APPS_CLK>, + <&gcc GCC_BLSP2_AHB_CLK>; + clock-names = "core", "iface"; + clock-frequency = <400000>; + + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; + }; + + blsp2_i2c5: i2c@c1ba000 { + compatible = "qcom,i2c-qup-v2.2.1"; + reg = <0x0c175000 0x600>; + interrupts = ; + + clocks = <&gcc GCC_BLSP2_QUP6_I2C_APPS_CLK>, + <&gcc GCC_BLSP2_AHB_CLK>; + clock-names = "core", "iface"; + clock-frequency = <400000>; + + status = "disabled"; + #address-cells = <1>; + #size-cells = <0>; + }; + blsp2_uart1: serial@c1b0000 { compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm"; reg = <0xc1b0000 0x1000>; diff --git a/arch/arm64/boot/dts/qcom/pm8005.dtsi b/arch/arm64/boot/dts/qcom/pm8005.dtsi index 4d5aca3eeb69..c0ddf128136c 100644 --- a/arch/arm64/boot/dts/qcom/pm8005.dtsi +++ b/arch/arm64/boot/dts/qcom/pm8005.dtsi @@ -16,10 +16,8 @@ reg = <0xc000>; gpio-controller; #gpio-cells = <2>; - interrupts = <0 0xc0 0 IRQ_TYPE_NONE>, - <0 0xc1 0 IRQ_TYPE_NONE>, - <0 0xc2 0 IRQ_TYPE_NONE>, - <0 0xc3 0 IRQ_TYPE_NONE>; + interrupt-controller; + #interrupt-cells = <2>; }; }; diff --git a/arch/arm64/boot/dts/qcom/pm8916.dtsi b/arch/arm64/boot/dts/qcom/pm8916.dtsi index 15a37cbcd216..9dd2df1cbf47 100644 --- a/arch/arm64/boot/dts/qcom/pm8916.dtsi +++ b/arch/arm64/boot/dts/qcom/pm8916.dtsi @@ -32,6 +32,12 @@ bias-pull-up; linux,code = ; }; + + watchdog { + compatible = "qcom,pm8916-wdt"; + interrupts = <0x0 0x8 6 IRQ_TYPE_EDGE_RISING>; + timeout-sec = <60>; + }; }; pm8916_gpios: gpios@c000 { diff --git a/arch/arm64/boot/dts/qcom/pm8998.dtsi b/arch/arm64/boot/dts/qcom/pm8998.dtsi index f1025a50c227..43cb5ea14089 100644 --- a/arch/arm64/boot/dts/qcom/pm8998.dtsi +++ b/arch/arm64/boot/dts/qcom/pm8998.dtsi @@ -94,32 +94,8 @@ reg = <0xc000>; gpio-controller; #gpio-cells = <2>; - interrupts = <0 0xc0 0 IRQ_TYPE_NONE>, - <0 0xc1 0 IRQ_TYPE_NONE>, - <0 0xc2 0 IRQ_TYPE_NONE>, - <0 0xc3 0 IRQ_TYPE_NONE>, - <0 0xc4 0 IRQ_TYPE_NONE>, - <0 0xc5 0 IRQ_TYPE_NONE>, - <0 0xc6 0 IRQ_TYPE_NONE>, - <0 0xc7 0 IRQ_TYPE_NONE>, - <0 0xc8 0 IRQ_TYPE_NONE>, - <0 0xc9 0 IRQ_TYPE_NONE>, - <0 0xca 0 IRQ_TYPE_NONE>, - <0 0xcb 0 IRQ_TYPE_NONE>, - <0 0xcc 0 IRQ_TYPE_NONE>, - <0 0xcd 0 IRQ_TYPE_NONE>, - <0 0xce 0 IRQ_TYPE_NONE>, - <0 0xcf 0 IRQ_TYPE_NONE>, - <0 0xd0 0 IRQ_TYPE_NONE>, - <0 0xd1 0 IRQ_TYPE_NONE>, - <0 0xd2 0 IRQ_TYPE_NONE>, - <0 0xd3 0 IRQ_TYPE_NONE>, - <0 0xd4 0 IRQ_TYPE_NONE>, - <0 0xd5 0 IRQ_TYPE_NONE>, - <0 0xd6 0 IRQ_TYPE_NONE>, - <0 0xd7 0 IRQ_TYPE_NONE>, - <0 0xd8 0 IRQ_TYPE_NONE>, - <0 0xd9 0 IRQ_TYPE_NONE>; + interrupt-controller; + #interrupt-cells = <2>; }; }; diff --git a/arch/arm64/boot/dts/qcom/pmi8994.dtsi b/arch/arm64/boot/dts/qcom/pmi8994.dtsi index dae1cdc23f54..3aee10e3f921 100644 --- a/arch/arm64/boot/dts/qcom/pmi8994.dtsi +++ b/arch/arm64/boot/dts/qcom/pmi8994.dtsi @@ -15,16 +15,8 @@ reg = <0xc000>; gpio-controller; #gpio-cells = <2>; - interrupts = <2 0xc0 0 IRQ_TYPE_NONE>, - <2 0xc1 0 IRQ_TYPE_NONE>, - <2 0xc2 0 IRQ_TYPE_NONE>, - <2 0xc3 0 IRQ_TYPE_NONE>, - <2 0xc4 0 IRQ_TYPE_NONE>, - <2 0xc5 0 IRQ_TYPE_NONE>, - <2 0xc6 0 IRQ_TYPE_NONE>, - <2 0xc7 0 IRQ_TYPE_NONE>, - <2 0xc8 0 IRQ_TYPE_NONE>, - <2 0xc9 0 IRQ_TYPE_NONE>; + interrupt-controller; + #interrupt-cells = <2>; }; }; diff --git a/arch/arm64/boot/dts/qcom/pmi8998.dtsi b/arch/arm64/boot/dts/qcom/pmi8998.dtsi index da3285e216e2..051f57e7d6ac 100644 --- a/arch/arm64/boot/dts/qcom/pmi8998.dtsi +++ b/arch/arm64/boot/dts/qcom/pmi8998.dtsi @@ -14,20 +14,8 @@ reg = <0xc000>; gpio-controller; #gpio-cells = <2>; - interrupts = <0 0xc0 0 IRQ_TYPE_NONE>, - <0 0xc1 0 IRQ_TYPE_NONE>, - <0 0xc2 0 IRQ_TYPE_NONE>, - <0 0xc3 0 IRQ_TYPE_NONE>, - <0 0xc4 0 IRQ_TYPE_NONE>, - <0 0xc5 0 IRQ_TYPE_NONE>, - <0 0xc6 0 IRQ_TYPE_NONE>, - <0 0xc7 0 IRQ_TYPE_NONE>, - <0 0xc8 0 IRQ_TYPE_NONE>, - <0 0xc9 0 IRQ_TYPE_NONE>, - <0 0xca 0 IRQ_TYPE_NONE>, - <0 0xcb 0 IRQ_TYPE_NONE>, - <0 0xcc 0 IRQ_TYPE_NONE>, - <0 0xcd 0 IRQ_TYPE_NONE>; + interrupt-controller; + #interrupt-cells = <2>; }; }; diff --git a/arch/arm64/boot/dts/qcom/pms405.dtsi b/arch/arm64/boot/dts/qcom/pms405.dtsi index ad2b62dfc9f6..1bb836d1e8aa 100644 --- a/arch/arm64/boot/dts/qcom/pms405.dtsi +++ b/arch/arm64/boot/dts/qcom/pms405.dtsi @@ -3,6 +3,32 @@ #include #include +#include +#include + +/ { + thermal-zones { + pms405 { + polling-delay-passive = <250>; + polling-delay = <1000>; + + thermal-sensors = <&pms405_temp>; + + trips { + pms405_alert0: pms405-alert0 { + temperature = <105000>; + hysteresis = <2000>; + type = "passive"; + }; + pms405_crit: pms405-crit { + temperature = <125000>; + hysteresis = <2000>; + type = "critical"; + }; + }; + }; + }; +}; &spmi_bus { pms405_0: pms405@0 { @@ -45,6 +71,59 @@ }; }; + pms405_temp: temp-alarm@2400 { + compatible = "qcom,spmi-temp-alarm"; + reg = <0x2400>; + interrupts = <0 0x24 0 IRQ_TYPE_EDGE_RISING>; + io-channels = <&pms405_adc ADC5_DIE_TEMP>; + io-channel-names = "thermal"; + #thermal-sensor-cells = <0>; + }; + + pms405_adc: adc@3100 { + compatible = "qcom,pms405-adc", "qcom,spmi-adc-rev2"; + reg = <0x3100>; + interrupts = <0x0 0x31 0x0 IRQ_TYPE_EDGE_RISING>; + #address-cells = <1>; + #size-cells = <0>; + #io-channel-cells = <1>; + + ref_gnd { + reg = ; + qcom,pre-scaling = <1 1>; + }; + + vref_1p25 { + reg = ; + qcom,pre-scaling = <1 1>; + }; + + vph_pwr { + reg = ; + qcom,pre-scaling = <1 3>; + }; + + die_temp { + reg = ; + qcom,pre-scaling = <1 1>; + }; + + xo_therm_100k_pu { + reg = ; + qcom,pre-scaling = <1 1>; + }; + + amux_thm1_100k_pu { + reg = ; + qcom,pre-scaling = <1 1>; + }; + + amux_thm3_100k_pu { + reg = ; + qcom,pre-scaling = <1 1>; + }; + }; + rtc@6000 { compatible = "qcom,pm8941-rtc"; reg = <0x6000>; diff --git a/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi b/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi index a39924efebe4..50b3589c7f15 100644 --- a/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi @@ -127,6 +127,7 @@ status = "ok"; mmc-ddr-1_8v; + mmc-hs400-1_8v; bus-width = <8>; non-removable; @@ -186,3 +187,21 @@ }; }; }; + +&wifi { + status = "okay"; +}; + +/* PINCTRL - additions to nodes defined in qcs404.dtsi */ + +&blsp1_uart2_default { + rx { + drive-strength = <2>; + bias-disable; + }; + + tx { + drive-strength = <2>; + bias-disable; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/qcs404.dtsi b/arch/arm64/boot/dts/qcom/qcs404.dtsi index 9b5c16562bbe..e8fd26633d57 100644 --- a/arch/arm64/boot/dts/qcom/qcs404.dtsi +++ b/arch/arm64/boot/dts/qcom/qcs404.dtsi @@ -3,6 +3,7 @@ #include #include +#include / { interrupt-parent = <&intc>; @@ -224,6 +225,11 @@ rpm_requests: glink-channel { compatible = "qcom,rpm-qcs404"; qcom,glink-channels = "rpm_requests"; + + rpmcc: clock-controller { + compatible = "qcom,rpmcc-qcs404"; + #clock-cells = <1>; + }; }; }; @@ -272,6 +278,105 @@ #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; + + blsp1_i2c0_default: blsp1-i2c0-default { + pins = "gpio32", "gpio33"; + function = "blsp_i2c0"; + }; + + blsp1_i2c1_default: blsp1-i2c1-default { + pins = "gpio24", "gpio25"; + function = "blsp_i2c1"; + }; + + blsp1_i2c2_default: blsp1-i2c2-default { + sda { + pins = "gpio19"; + function = "blsp_i2c_sda_a2"; + }; + + scl { + pins = "gpio20"; + function = "blsp_i2c_scl_a2"; + }; + }; + + blsp1_i2c3_default: blsp1-i2c3-default { + pins = "gpio84", "gpio85"; + function = "blsp_i2c3"; + }; + + blsp1_i2c4_default: blsp1-i2c4-default { + pins = "gpio117", "gpio118"; + function = "blsp_i2c4"; + }; + + blsp1_uart0_default: blsp1-uart0-default { + pins = "gpio30", "gpio31", "gpio32", "gpio33"; + function = "blsp_uart0"; + }; + + blsp1_uart1_default: blsp1-uart1-default { + pins = "gpio22", "gpio23"; + function = "blsp_uart1"; + }; + + blsp1_uart2_default: blsp1-uart2-default { + rx { + pins = "gpio18"; + function = "blsp_uart_rx_a2"; + }; + + tx { + pins = "gpio17"; + function = "blsp_uart_tx_a2"; + }; + }; + + blsp1_uart3_default: blsp1-uart3-default { + pins = "gpio82", "gpio83", "gpio84", "gpio85"; + function = "blsp_uart3"; + }; + + blsp2_i2c0_default: blsp2-i2c0-default { + pins = "gpio28", "gpio29"; + function = "blsp_i2c5"; + }; + + blsp1_spi0_default: blsp1-spi0-default { + pins = "gpio30", "gpio31", "gpio32", "gpio33"; + function = "blsp_spi0"; + }; + + blsp1_spi1_default: blsp1-spi1-default { + pins = "gpio22", "gpio23", "gpio24", "gpio25"; + function = "blsp_spi1"; + }; + + blsp1_spi2_default: blsp1-spi2-default { + pins = "gpio17", "gpio18", "gpio19", "gpio20"; + function = "blsp_spi2"; + }; + + blsp1_spi3_default: blsp1-spi3-default { + pins = "gpio82", "gpio83", "gpio84", "gpio85"; + function = "blsp_spi3"; + }; + + blsp1_spi4_default: blsp1-spi4-default { + pins = "gpio37", "gpio38", "gpio117", "gpio118"; + function = "blsp_spi4"; + }; + + blsp2_spi0_default: blsp2-spi0-default { + pins = "gpio26", "gpio27", "gpio28", "gpio29"; + function = "blsp_spi5"; + }; + + blsp2_uart0_default: blsp2-uart0-default { + pins = "gpio26", "gpio27", "gpio28", "gpio29"; + function = "blsp_uart5"; + }; }; gcc: clock-controller@1800000 { @@ -335,6 +440,32 @@ status = "okay"; }; + blsp1_uart0: serial@78af000 { + compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm"; + reg = <0x078af000 0x200>; + interrupts = ; + clocks = <&gcc GCC_BLSP1_UART0_APPS_CLK>, <&gcc GCC_BLSP1_AHB_CLK>; + clock-names = "core", "iface"; + dmas = <&blsp1_dma 1>, <&blsp1_dma 0>; + dma-names = "rx", "tx"; + pinctrl-names = "default"; + pinctrl-0 = <&blsp1_uart0_default>; + status = "disabled"; + }; + + blsp1_uart1: serial@78b0000 { + compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm"; + reg = <0x078b0000 0x200>; + interrupts = ; + clocks = <&gcc GCC_BLSP1_UART1_APPS_CLK>, <&gcc GCC_BLSP1_AHB_CLK>; + clock-names = "core", "iface"; + dmas = <&blsp1_dma 3>, <&blsp1_dma 2>; + dma-names = "rx", "tx"; + pinctrl-names = "default"; + pinctrl-0 = <&blsp1_uart1_default>; + status = "disabled"; + }; + blsp1_uart2: serial@78b1000 { compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm"; reg = <0x078b1000 0x200>; @@ -343,9 +474,237 @@ clock-names = "core", "iface"; dmas = <&blsp1_dma 5>, <&blsp1_dma 4>; dma-names = "rx", "tx"; + pinctrl-names = "default"; + pinctrl-0 = <&blsp1_uart2_default>; status = "okay"; }; + wifi: wifi@a000000 { + compatible = "qcom,wcn3990-wifi"; + reg = <0xa000000 0x800000>; + reg-names = "membase"; + memory-region = <&wlan_msa_mem>; + interrupts = , + , + , + , + , + , + , + , + , + , + , + ; + status = "disabled"; + }; + + blsp1_uart3: serial@78b2000 { + compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm"; + reg = <0x078b2000 0x200>; + interrupts = ; + clocks = <&gcc GCC_BLSP1_UART3_APPS_CLK>, <&gcc GCC_BLSP1_AHB_CLK>; + clock-names = "core", "iface"; + dmas = <&blsp1_dma 7>, <&blsp1_dma 6>; + dma-names = "rx", "tx"; + pinctrl-names = "default"; + pinctrl-0 = <&blsp1_uart3_default>; + status = "disabled"; + }; + + blsp1_i2c0: i2c@78b5000 { + compatible = "qcom,i2c-qup-v2.2.1"; + reg = <0x078b5000 0x600>; + interrupts = ; + clocks = <&gcc GCC_BLSP1_AHB_CLK>, + <&gcc GCC_BLSP1_QUP0_I2C_APPS_CLK>; + clock-names = "iface", "core"; + pinctrl-names = "default"; + pinctrl-0 = <&blsp1_i2c0_default>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + blsp1_spi0: spi@78b5000 { + compatible = "qcom,spi-qup-v2.2.1"; + reg = <0x078b5000 0x600>; + interrupts = ; + clocks = <&gcc GCC_BLSP1_AHB_CLK>, + <&gcc GCC_BLSP1_QUP0_SPI_APPS_CLK>; + clock-names = "iface", "core"; + pinctrl-names = "default"; + pinctrl-0 = <&blsp1_spi0_default>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + blsp1_i2c1: i2c@78b6000 { + compatible = "qcom,i2c-qup-v2.2.1"; + reg = <0x078b6000 0x600>; + interrupts = ; + clocks = <&gcc GCC_BLSP1_AHB_CLK>, + <&gcc GCC_BLSP1_QUP1_I2C_APPS_CLK>; + clock-names = "iface", "core"; + pinctrl-names = "default"; + pinctrl-0 = <&blsp1_i2c1_default>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + blsp1_spi1: spi@78b6000 { + compatible = "qcom,spi-qup-v2.2.1"; + reg = <0x078b6000 0x600>; + interrupts = ; + clocks = <&gcc GCC_BLSP1_AHB_CLK>, + <&gcc GCC_BLSP1_QUP1_SPI_APPS_CLK>; + clock-names = "iface", "core"; + pinctrl-names = "default"; + pinctrl-0 = <&blsp1_spi1_default>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + blsp1_i2c2: i2c@78b7000 { + compatible = "qcom,i2c-qup-v2.2.1"; + reg = <0x078b7000 0x600>; + interrupts = ; + clocks = <&gcc GCC_BLSP1_AHB_CLK>, + <&gcc GCC_BLSP1_QUP2_I2C_APPS_CLK>; + clock-names = "iface", "core"; + pinctrl-names = "default"; + pinctrl-0 = <&blsp1_i2c2_default>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + blsp1_spi2: spi@78b7000 { + compatible = "qcom,spi-qup-v2.2.1"; + reg = <0x078b7000 0x600>; + interrupts = ; + clocks = <&gcc GCC_BLSP1_AHB_CLK>, + <&gcc GCC_BLSP1_QUP2_SPI_APPS_CLK>; + clock-names = "iface", "core"; + pinctrl-names = "default"; + pinctrl-0 = <&blsp1_spi2_default>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + blsp1_i2c3: i2c@78b8000 { + compatible = "qcom,i2c-qup-v2.2.1"; + reg = <0x078b8000 0x600>; + interrupts = ; + clocks = <&gcc GCC_BLSP1_AHB_CLK>, + <&gcc GCC_BLSP1_QUP3_I2C_APPS_CLK>; + clock-names = "iface", "core"; + pinctrl-names = "default"; + pinctrl-0 = <&blsp1_i2c3_default>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + blsp1_spi3: spi@78b8000 { + compatible = "qcom,spi-qup-v2.2.1"; + reg = <0x078b8000 0x600>; + interrupts = ; + clocks = <&gcc GCC_BLSP1_AHB_CLK>, + <&gcc GCC_BLSP1_QUP3_SPI_APPS_CLK>; + clock-names = "iface", "core"; + pinctrl-names = "default"; + pinctrl-0 = <&blsp1_spi3_default>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + blsp1_i2c4: i2c@78b9000 { + compatible = "qcom,i2c-qup-v2.2.1"; + reg = <0x078b9000 0x600>; + interrupts = ; + clocks = <&gcc GCC_BLSP1_AHB_CLK>, + <&gcc GCC_BLSP1_QUP4_I2C_APPS_CLK>; + clock-names = "iface", "core"; + pinctrl-names = "default"; + pinctrl-0 = <&blsp1_i2c4_default>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + blsp1_spi4: spi@78b9000 { + compatible = "qcom,spi-qup-v2.2.1"; + reg = <0x078b9000 0x600>; + interrupts = ; + clocks = <&gcc GCC_BLSP1_AHB_CLK>, + <&gcc GCC_BLSP1_QUP4_SPI_APPS_CLK>; + clock-names = "iface", "core"; + pinctrl-names = "default"; + pinctrl-0 = <&blsp1_spi4_default>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + blsp2_dma: dma@7ac4000 { + compatible = "qcom,bam-v1.7.0"; + reg = <0x07ac4000 0x17000>; + interrupts = ; + clocks = <&gcc GCC_BLSP2_AHB_CLK>; + clock-names = "bam_clk"; + #dma-cells = <1>; + qcom,controlled-remotely = <1>; + qcom,ee = <0>; + status = "disabled"; + }; + + blsp2_uart0: serial@7aef000 { + compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm"; + reg = <0x07aef000 0x200>; + interrupts = ; + clocks = <&gcc GCC_BLSP2_UART0_APPS_CLK>, <&gcc GCC_BLSP2_AHB_CLK>; + clock-names = "core", "iface"; + dmas = <&blsp2_dma 1>, <&blsp2_dma 0>; + dma-names = "rx", "tx"; + pinctrl-names = "default"; + pinctrl-0 = <&blsp2_uart0_default>; + status = "disabled"; + }; + + blsp2_i2c0: i2c@7af5000 { + compatible = "qcom,i2c-qup-v2.2.1"; + reg = <0x07af5000 0x600>; + interrupts = ; + clocks = <&gcc GCC_BLSP2_AHB_CLK>, + <&gcc GCC_BLSP2_QUP0_I2C_APPS_CLK>; + clock-names = "iface", "core"; + pinctrl-names = "default"; + pinctrl-0 = <&blsp2_i2c0_default>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + blsp2_spi0: spi@7af5000 { + compatible = "qcom,spi-qup-v2.2.1"; + reg = <0x07af5000 0x600>; + interrupts = ; + clocks = <&gcc GCC_BLSP2_AHB_CLK>, + <&gcc GCC_BLSP2_QUP0_SPI_APPS_CLK>; + clock-names = "iface", "core"; + pinctrl-names = "default"; + pinctrl-0 = <&blsp2_spi0_default>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + intc: interrupt-controller@b000000 { compatible = "qcom,msm-qgic2"; interrupt-controller; diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts index b3def0358177..af8c6a2445a2 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts +++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts @@ -7,6 +7,7 @@ /dts-v1/; +#include #include #include "sdm845.dtsi" @@ -346,7 +347,9 @@ &gcc { protected-clocks = , , - ; + , + , + ; }; &i2c10 { @@ -358,14 +361,36 @@ status = "okay"; }; -&tlmm { - gpio-reserved-ranges = <0 4>, <81 4>; +&sdhc_2 { + status = "okay"; + + pinctrl-names = "default"; + pinctrl-0 = <&sdc2_clk &sdc2_cmd &sdc2_data &sd_card_det_n>; + + vmmc-supply = <&vreg_l21a_2p95>; + vqmmc-supply = <&vddpx_2>; + + cd-gpios = <&tlmm 126 GPIO_ACTIVE_LOW>; }; &uart9 { status = "okay"; }; +&ufs_mem_hc { + status = "okay"; + + vcc-supply = <&vreg_l20a_2p95>; + vcc-max-microamp = <600000>; +}; + +&ufs_mem_phy { + status = "okay"; + + vdda-phy-supply = <&vdda_ufs1_core>; + vdda-pll-supply = <&vdda_ufs1_1p2>; +}; + &usb_1 { status = "okay"; }; @@ -427,6 +452,14 @@ vdda-pll-supply = <&vdda_usb2_ss_core>; }; +&wifi { + status = "okay"; + vdd-0.8-cx-mx-supply = <&vreg_l5a_0p8>; + vdd-1.8-xo-supply = <&vreg_l7a_1p8>; + vdd-1.3-rfa-supply = <&vreg_l17a_1p3>; + vdd-3.3-ch0-supply = <&vreg_l25a_3p3>; +}; + /* PINCTRL - additions to nodes defined in sdm845.dtsi */ &qup_i2c10_default { @@ -450,3 +483,48 @@ bias-pull-up; }; }; + +&tlmm { + gpio-reserved-ranges = <0 4>, <81 4>; + + sdc2_clk: sdc2-clk { + pinconf { + pins = "sdc2_clk"; + bias-disable; + + /* + * It seems that mmc_test reports errors if drive + * strength is not 16 on clk, cmd, and data pins. + */ + drive-strength = <16>; + }; + }; + + sdc2_cmd: sdc2-cmd { + pinconf { + pins = "sdc2_cmd"; + bias-pull-up; + drive-strength = <16>; + }; + }; + + sdc2_data: sdc2-data { + pinconf { + pins = "sdc2_data"; + bias-pull-up; + drive-strength = <16>; + }; + }; + + sd_card_det_n: sd-card-det-n { + pinmux { + pins = "gpio126"; + function = "gpio"; + }; + + pinconf { + pins = "gpio126"; + bias-pull-up; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi index c27cbd3bcb0a..5308f1671824 100644 --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi @@ -7,12 +7,17 @@ #include #include +#include +#include #include +#include #include #include #include +#include #include #include +#include / { interrupt-parent = <&intc>; @@ -88,6 +93,21 @@ reg = <0 0x86200000 0 0x2d00000>; no-map; }; + + wlan_msa_mem: memory@96700000 { + reg = <0 0x96700000 0 0x100000>; + no-map; + }; + + mpss_region: memory@8e000000 { + reg = <0 0x8e000000 0 0x7800000>; + no-map; + }; + + mba_region: memory@96500000 { + reg = <0 0x96500000 0 0x200000>; + no-map; + }; }; cpus { @@ -99,6 +119,8 @@ compatible = "qcom,kryo385"; reg = <0x0 0x0>; enable-method = "psci"; + qcom,freq-domain = <&cpufreq_hw 0>; + #cooling-cells = <2>; next-level-cache = <&L2_0>; L2_0: l2-cache { compatible = "cache"; @@ -114,6 +136,8 @@ compatible = "qcom,kryo385"; reg = <0x0 0x100>; enable-method = "psci"; + qcom,freq-domain = <&cpufreq_hw 0>; + #cooling-cells = <2>; next-level-cache = <&L2_100>; L2_100: l2-cache { compatible = "cache"; @@ -126,6 +150,8 @@ compatible = "qcom,kryo385"; reg = <0x0 0x200>; enable-method = "psci"; + qcom,freq-domain = <&cpufreq_hw 0>; + #cooling-cells = <2>; next-level-cache = <&L2_200>; L2_200: l2-cache { compatible = "cache"; @@ -138,6 +164,8 @@ compatible = "qcom,kryo385"; reg = <0x0 0x300>; enable-method = "psci"; + qcom,freq-domain = <&cpufreq_hw 0>; + #cooling-cells = <2>; next-level-cache = <&L2_300>; L2_300: l2-cache { compatible = "cache"; @@ -150,6 +178,8 @@ compatible = "qcom,kryo385"; reg = <0x0 0x400>; enable-method = "psci"; + qcom,freq-domain = <&cpufreq_hw 1>; + #cooling-cells = <2>; next-level-cache = <&L2_400>; L2_400: l2-cache { compatible = "cache"; @@ -162,6 +192,8 @@ compatible = "qcom,kryo385"; reg = <0x0 0x500>; enable-method = "psci"; + qcom,freq-domain = <&cpufreq_hw 1>; + #cooling-cells = <2>; next-level-cache = <&L2_500>; L2_500: l2-cache { compatible = "cache"; @@ -174,6 +206,8 @@ compatible = "qcom,kryo385"; reg = <0x0 0x600>; enable-method = "psci"; + qcom,freq-domain = <&cpufreq_hw 1>; + #cooling-cells = <2>; next-level-cache = <&L2_600>; L2_600: l2-cache { compatible = "cache"; @@ -186,6 +220,8 @@ compatible = "qcom,kryo385"; reg = <0x0 0x700>; enable-method = "psci"; + qcom,freq-domain = <&cpufreq_hw 1>; + #cooling-cells = <2>; next-level-cache = <&L2_700>; L2_700: l2-cache { compatible = "cache"; @@ -222,6 +258,12 @@ }; }; + firmware { + scm { + compatible = "qcom,scm-sdm845", "qcom,scm"; + }; + }; + tcsr_mutex: hwlock { compatible = "qcom,tcsr-mutex"; syscon = <&tcsr_mutex_regs 0 0x1000>; @@ -328,14 +370,15 @@ }; soc: soc { - #address-cells = <1>; - #size-cells = <1>; - ranges = <0 0 0 0xffffffff>; + #address-cells = <2>; + #size-cells = <2>; + ranges = <0 0 0 0 0x10 0>; + dma-ranges = <0 0 0 0 0x10 0>; compatible = "simple-bus"; gcc: clock-controller@100000 { compatible = "qcom,gcc-sdm845"; - reg = <0x100000 0x1f0000>; + reg = <0 0x00100000 0 0x1f0000>; #clock-cells = <1>; #reset-cells = <1>; #power-domain-cells = <1>; @@ -343,7 +386,7 @@ qfprom@784000 { compatible = "qcom,qfprom"; - reg = <0x784000 0x8ff>; + reg = <0 0x00784000 0 0x8ff>; #address-cells = <1>; #size-cells = <1>; @@ -360,25 +403,25 @@ rng: rng@793000 { compatible = "qcom,prng-ee"; - reg = <0x00793000 0x1000>; + reg = <0 0x00793000 0 0x1000>; clocks = <&gcc GCC_PRNG_AHB_CLK>; clock-names = "core"; }; qupv3_id_0: geniqup@8c0000 { compatible = "qcom,geni-se-qup"; - reg = <0x8c0000 0x6000>; + reg = <0 0x008c0000 0 0x6000>; clock-names = "m-ahb", "s-ahb"; clocks = <&gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>, <&gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>; - #address-cells = <1>; - #size-cells = <1>; + #address-cells = <2>; + #size-cells = <2>; ranges; status = "disabled"; i2c0: i2c@880000 { compatible = "qcom,geni-i2c"; - reg = <0x880000 0x4000>; + reg = <0 0x00880000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S0_CLK>; pinctrl-names = "default"; @@ -391,7 +434,7 @@ spi0: spi@880000 { compatible = "qcom,geni-spi"; - reg = <0x880000 0x4000>; + reg = <0 0x00880000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S0_CLK>; pinctrl-names = "default"; @@ -404,7 +447,7 @@ uart0: serial@880000 { compatible = "qcom,geni-uart"; - reg = <0x880000 0x4000>; + reg = <0 0x00880000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S0_CLK>; pinctrl-names = "default"; @@ -415,7 +458,7 @@ i2c1: i2c@884000 { compatible = "qcom,geni-i2c"; - reg = <0x884000 0x4000>; + reg = <0 0x00884000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S1_CLK>; pinctrl-names = "default"; @@ -428,7 +471,7 @@ spi1: spi@884000 { compatible = "qcom,geni-spi"; - reg = <0x884000 0x4000>; + reg = <0 0x00884000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S1_CLK>; pinctrl-names = "default"; @@ -441,7 +484,7 @@ uart1: serial@884000 { compatible = "qcom,geni-uart"; - reg = <0x884000 0x4000>; + reg = <0 0x00884000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S1_CLK>; pinctrl-names = "default"; @@ -452,7 +495,7 @@ i2c2: i2c@888000 { compatible = "qcom,geni-i2c"; - reg = <0x888000 0x4000>; + reg = <0 0x00888000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S2_CLK>; pinctrl-names = "default"; @@ -465,7 +508,7 @@ spi2: spi@888000 { compatible = "qcom,geni-spi"; - reg = <0x888000 0x4000>; + reg = <0 0x00888000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S2_CLK>; pinctrl-names = "default"; @@ -478,7 +521,7 @@ uart2: serial@888000 { compatible = "qcom,geni-uart"; - reg = <0x888000 0x4000>; + reg = <0 0x00888000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S2_CLK>; pinctrl-names = "default"; @@ -489,7 +532,7 @@ i2c3: i2c@88c000 { compatible = "qcom,geni-i2c"; - reg = <0x88c000 0x4000>; + reg = <0 0x0088c000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S3_CLK>; pinctrl-names = "default"; @@ -502,7 +545,7 @@ spi3: spi@88c000 { compatible = "qcom,geni-spi"; - reg = <0x88c000 0x4000>; + reg = <0 0x0088c000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S3_CLK>; pinctrl-names = "default"; @@ -515,7 +558,7 @@ uart3: serial@88c000 { compatible = "qcom,geni-uart"; - reg = <0x88c000 0x4000>; + reg = <0 0x0088c000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S3_CLK>; pinctrl-names = "default"; @@ -526,7 +569,7 @@ i2c4: i2c@890000 { compatible = "qcom,geni-i2c"; - reg = <0x890000 0x4000>; + reg = <0 0x00890000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S4_CLK>; pinctrl-names = "default"; @@ -539,7 +582,7 @@ spi4: spi@890000 { compatible = "qcom,geni-spi"; - reg = <0x890000 0x4000>; + reg = <0 0x00890000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S4_CLK>; pinctrl-names = "default"; @@ -552,7 +595,7 @@ uart4: serial@890000 { compatible = "qcom,geni-uart"; - reg = <0x890000 0x4000>; + reg = <0 0x00890000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S4_CLK>; pinctrl-names = "default"; @@ -563,7 +606,7 @@ i2c5: i2c@894000 { compatible = "qcom,geni-i2c"; - reg = <0x894000 0x4000>; + reg = <0 0x00894000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S5_CLK>; pinctrl-names = "default"; @@ -576,7 +619,7 @@ spi5: spi@894000 { compatible = "qcom,geni-spi"; - reg = <0x894000 0x4000>; + reg = <0 0x00894000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S5_CLK>; pinctrl-names = "default"; @@ -589,7 +632,7 @@ uart5: serial@894000 { compatible = "qcom,geni-uart"; - reg = <0x894000 0x4000>; + reg = <0 0x00894000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S5_CLK>; pinctrl-names = "default"; @@ -600,7 +643,7 @@ i2c6: i2c@898000 { compatible = "qcom,geni-i2c"; - reg = <0x898000 0x4000>; + reg = <0 0x00898000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S6_CLK>; pinctrl-names = "default"; @@ -613,7 +656,7 @@ spi6: spi@898000 { compatible = "qcom,geni-spi"; - reg = <0x898000 0x4000>; + reg = <0 0x00898000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S6_CLK>; pinctrl-names = "default"; @@ -626,7 +669,7 @@ uart6: serial@898000 { compatible = "qcom,geni-uart"; - reg = <0x898000 0x4000>; + reg = <0 0x00898000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S6_CLK>; pinctrl-names = "default"; @@ -637,7 +680,7 @@ i2c7: i2c@89c000 { compatible = "qcom,geni-i2c"; - reg = <0x89c000 0x4000>; + reg = <0 0x0089c000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S7_CLK>; pinctrl-names = "default"; @@ -650,7 +693,7 @@ spi7: spi@89c000 { compatible = "qcom,geni-spi"; - reg = <0x89c000 0x4000>; + reg = <0 0x0089c000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S7_CLK>; pinctrl-names = "default"; @@ -663,7 +706,7 @@ uart7: serial@89c000 { compatible = "qcom,geni-uart"; - reg = <0x89c000 0x4000>; + reg = <0 0x0089c000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP0_S7_CLK>; pinctrl-names = "default"; @@ -675,18 +718,18 @@ qupv3_id_1: geniqup@ac0000 { compatible = "qcom,geni-se-qup"; - reg = <0xac0000 0x6000>; + reg = <0 0x00ac0000 0 0x6000>; clock-names = "m-ahb", "s-ahb"; clocks = <&gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>, <&gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>; - #address-cells = <1>; - #size-cells = <1>; + #address-cells = <2>; + #size-cells = <2>; ranges; status = "disabled"; i2c8: i2c@a80000 { compatible = "qcom,geni-i2c"; - reg = <0xa80000 0x4000>; + reg = <0 0x00a80000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP1_S0_CLK>; pinctrl-names = "default"; @@ -699,7 +742,7 @@ spi8: spi@a80000 { compatible = "qcom,geni-spi"; - reg = <0xa80000 0x4000>; + reg = <0 0x00a80000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP1_S0_CLK>; pinctrl-names = "default"; @@ -712,7 +755,7 @@ uart8: serial@a80000 { compatible = "qcom,geni-uart"; - reg = <0xa80000 0x4000>; + reg = <0 0x00a80000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP1_S0_CLK>; pinctrl-names = "default"; @@ -723,7 +766,7 @@ i2c9: i2c@a84000 { compatible = "qcom,geni-i2c"; - reg = <0xa84000 0x4000>; + reg = <0 0x00a84000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP1_S1_CLK>; pinctrl-names = "default"; @@ -736,7 +779,7 @@ spi9: spi@a84000 { compatible = "qcom,geni-spi"; - reg = <0xa84000 0x4000>; + reg = <0 0x00a84000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP1_S1_CLK>; pinctrl-names = "default"; @@ -749,7 +792,7 @@ uart9: serial@a84000 { compatible = "qcom,geni-debug-uart"; - reg = <0xa84000 0x4000>; + reg = <0 0x00a84000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP1_S1_CLK>; pinctrl-names = "default"; @@ -760,7 +803,7 @@ i2c10: i2c@a88000 { compatible = "qcom,geni-i2c"; - reg = <0xa88000 0x4000>; + reg = <0 0x00a88000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>; pinctrl-names = "default"; @@ -773,7 +816,7 @@ spi10: spi@a88000 { compatible = "qcom,geni-spi"; - reg = <0xa88000 0x4000>; + reg = <0 0x00a88000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>; pinctrl-names = "default"; @@ -786,7 +829,7 @@ uart10: serial@a88000 { compatible = "qcom,geni-uart"; - reg = <0xa88000 0x4000>; + reg = <0 0x00a88000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>; pinctrl-names = "default"; @@ -797,7 +840,7 @@ i2c11: i2c@a8c000 { compatible = "qcom,geni-i2c"; - reg = <0xa8c000 0x4000>; + reg = <0 0x00a8c000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP1_S3_CLK>; pinctrl-names = "default"; @@ -810,7 +853,7 @@ spi11: spi@a8c000 { compatible = "qcom,geni-spi"; - reg = <0xa8c000 0x4000>; + reg = <0 0x00a8c000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP1_S3_CLK>; pinctrl-names = "default"; @@ -823,7 +866,7 @@ uart11: serial@a8c000 { compatible = "qcom,geni-uart"; - reg = <0xa8c000 0x4000>; + reg = <0 0x00a8c000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP1_S3_CLK>; pinctrl-names = "default"; @@ -834,7 +877,7 @@ i2c12: i2c@a90000 { compatible = "qcom,geni-i2c"; - reg = <0xa90000 0x4000>; + reg = <0 0x00a90000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP1_S4_CLK>; pinctrl-names = "default"; @@ -847,7 +890,7 @@ spi12: spi@a90000 { compatible = "qcom,geni-spi"; - reg = <0xa90000 0x4000>; + reg = <0 0x00a90000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP1_S4_CLK>; pinctrl-names = "default"; @@ -860,7 +903,7 @@ uart12: serial@a90000 { compatible = "qcom,geni-uart"; - reg = <0xa90000 0x4000>; + reg = <0 0x00a90000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP1_S4_CLK>; pinctrl-names = "default"; @@ -871,7 +914,7 @@ i2c13: i2c@a94000 { compatible = "qcom,geni-i2c"; - reg = <0xa94000 0x4000>; + reg = <0 0x00a94000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP1_S5_CLK>; pinctrl-names = "default"; @@ -884,7 +927,7 @@ spi13: spi@a94000 { compatible = "qcom,geni-spi"; - reg = <0xa94000 0x4000>; + reg = <0 0x00a94000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP1_S5_CLK>; pinctrl-names = "default"; @@ -897,7 +940,7 @@ uart13: serial@a94000 { compatible = "qcom,geni-uart"; - reg = <0xa94000 0x4000>; + reg = <0 0x00a94000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP1_S5_CLK>; pinctrl-names = "default"; @@ -908,7 +951,7 @@ i2c14: i2c@a98000 { compatible = "qcom,geni-i2c"; - reg = <0xa98000 0x4000>; + reg = <0 0x00a98000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP1_S6_CLK>; pinctrl-names = "default"; @@ -921,7 +964,7 @@ spi14: spi@a98000 { compatible = "qcom,geni-spi"; - reg = <0xa98000 0x4000>; + reg = <0 0x00a98000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP1_S6_CLK>; pinctrl-names = "default"; @@ -934,7 +977,7 @@ uart14: serial@a98000 { compatible = "qcom,geni-uart"; - reg = <0xa98000 0x4000>; + reg = <0 0x00a98000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP1_S6_CLK>; pinctrl-names = "default"; @@ -945,7 +988,7 @@ i2c15: i2c@a9c000 { compatible = "qcom,geni-i2c"; - reg = <0xa9c000 0x4000>; + reg = <0 0x00a9c000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP1_S7_CLK>; pinctrl-names = "default"; @@ -958,7 +1001,7 @@ spi15: spi@a9c000 { compatible = "qcom,geni-spi"; - reg = <0xa9c000 0x4000>; + reg = <0 0x00a9c000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP1_S7_CLK>; pinctrl-names = "default"; @@ -971,7 +1014,7 @@ uart15: serial@a9c000 { compatible = "qcom,geni-uart"; - reg = <0xa9c000 0x4000>; + reg = <0 0x00a9c000 0 0x4000>; clock-names = "se"; clocks = <&gcc GCC_QUPV3_WRAP1_S7_CLK>; pinctrl-names = "default"; @@ -981,19 +1024,121 @@ }; }; + ufs_mem_hc: ufshc@1d84000 { + compatible = "qcom,sdm845-ufshc", "qcom,ufshc", + "jedec,ufs-2.0"; + reg = <0 0x01d84000 0 0x2500>; + interrupts = ; + phys = <&ufs_mem_phy_lanes>; + phy-names = "ufsphy"; + lanes-per-direction = <2>; + power-domains = <&gcc UFS_PHY_GDSC>; + + iommus = <&apps_smmu 0x100 0xf>; + + clock-names = + "core_clk", + "bus_aggr_clk", + "iface_clk", + "core_clk_unipro", + "ref_clk", + "tx_lane0_sync_clk", + "rx_lane0_sync_clk", + "rx_lane1_sync_clk"; + clocks = + <&gcc GCC_UFS_PHY_AXI_CLK>, + <&gcc GCC_AGGRE_UFS_PHY_AXI_CLK>, + <&gcc GCC_UFS_PHY_AHB_CLK>, + <&gcc GCC_UFS_PHY_UNIPRO_CORE_CLK>, + <&rpmhcc RPMH_CXO_CLK>, + <&gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>, + <&gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>, + <&gcc GCC_UFS_PHY_RX_SYMBOL_1_CLK>; + freq-table-hz = + <50000000 200000000>, + <0 0>, + <0 0>, + <37500000 150000000>, + <0 0>, + <0 0>, + <0 0>, + <0 0>; + + status = "disabled"; + }; + + ufs_mem_phy: phy@1d87000 { + compatible = "qcom,sdm845-qmp-ufs-phy"; + reg = <0 0x01d87000 0 0x18c>; + #address-cells = <2>; + #size-cells = <2>; + ranges; + clock-names = "ref", + "ref_aux"; + clocks = <&gcc GCC_UFS_MEM_CLKREF_CLK>, + <&gcc GCC_UFS_PHY_PHY_AUX_CLK>; + + status = "disabled"; + + ufs_mem_phy_lanes: lanes@1d87400 { + reg = <0 0x01d87400 0 0x108>, + <0 0x01d87600 0 0x1e0>, + <0 0x01d87c00 0 0x1dc>, + <0 0x01d87800 0 0x108>, + <0 0x01d87a00 0 0x1e0>; + #phy-cells = <0>; + }; + }; + tcsr_mutex_regs: syscon@1f40000 { compatible = "syscon"; - reg = <0x1f40000 0x40000>; + reg = <0 0x01f40000 0 0x40000>; }; tlmm: pinctrl@3400000 { compatible = "qcom,sdm845-pinctrl"; - reg = <0x03400000 0xc00000>; + reg = <0 0x03400000 0 0xc00000>; interrupts = ; gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; + gpio-ranges = <&tlmm 0 0 150>; + + qspi_clk: qspi-clk { + pinmux { + pins = "gpio95"; + function = "qspi_clk"; + }; + }; + + qspi_cs0: qspi-cs0 { + pinmux { + pins = "gpio90"; + function = "qspi_cs"; + }; + }; + + qspi_cs1: qspi-cs1 { + pinmux { + pins = "gpio89"; + function = "qspi_cs"; + }; + }; + + qspi_data01: qspi-data01 { + pinmux-data { + pins = "gpio91", "gpio92"; + function = "qspi_data"; + }; + }; + + qspi_data12: qspi-data12 { + pinmux-data { + pins = "gpio93", "gpio94"; + function = "qspi_data"; + }; + }; qup_i2c0_default: qup-i2c0-default { pinmux { @@ -1348,9 +1493,47 @@ }; }; + gpucc: clock-controller@5090000 { + compatible = "qcom,sdm845-gpucc"; + reg = <0 0x05090000 0 0x9000>; + #clock-cells = <1>; + #reset-cells = <1>; + #power-domain-cells = <1>; + clocks = <&rpmhcc RPMH_CXO_CLK>; + clock-names = "xo"; + }; + + sdhc_2: sdhci@8804000 { + compatible = "qcom,sdm845-sdhci", "qcom,sdhci-msm-v5"; + reg = <0 0x08804000 0 0x1000>; + + interrupts = , + ; + interrupt-names = "hc_irq", "pwr_irq"; + + clocks = <&gcc GCC_SDCC2_AHB_CLK>, + <&gcc GCC_SDCC2_APPS_CLK>; + clock-names = "iface", "core"; + iommus = <&apps_smmu 0xa0 0xf>; + + status = "disabled"; + }; + + qspi: spi@88df000 { + compatible = "qcom,sdm845-qspi", "qcom,qspi-v1"; + reg = <0 0x088df000 0 0x600>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = ; + clocks = <&gcc GCC_QSPI_CNOC_PERIPH_AHB_CLK>, + <&gcc GCC_QSPI_CORE_CLK>; + clock-names = "iface", "core"; + status = "disabled"; + }; + usb_1_hsphy: phy@88e2000 { compatible = "qcom,sdm845-qusb2-phy"; - reg = <0x88e2000 0x400>; + reg = <0 0x088e2000 0 0x400>; status = "disabled"; #phy-cells = <0>; @@ -1365,7 +1548,7 @@ usb_2_hsphy: phy@88e3000 { compatible = "qcom,sdm845-qusb2-phy"; - reg = <0x88e3000 0x400>; + reg = <0 0x088e3000 0 0x400>; status = "disabled"; #phy-cells = <0>; @@ -1380,13 +1563,13 @@ usb_1_qmpphy: phy@88e9000 { compatible = "qcom,sdm845-qmp-usb3-phy"; - reg = <0x88e9000 0x18c>, - <0x88e8000 0x10>; + reg = <0 0x088e9000 0 0x18c>, + <0 0x088e8000 0 0x10>; reg-names = "reg-base", "dp_com"; status = "disabled"; #clock-cells = <1>; - #address-cells = <1>; - #size-cells = <1>; + #address-cells = <2>; + #size-cells = <2>; ranges; clocks = <&gcc GCC_USB3_PRIM_PHY_AUX_CLK>, @@ -1399,11 +1582,13 @@ <&gcc GCC_USB3_PHY_PRIM_BCR>; reset-names = "phy", "common"; - usb_1_ssphy: lane@88e9200 { - reg = <0x88e9200 0x128>, - <0x88e9400 0x200>, - <0x88e9c00 0x218>, - <0x88e9a00 0x100>; + usb_1_ssphy: lanes@88e9200 { + reg = <0 0x088e9200 0 0x128>, + <0 0x088e9400 0 0x200>, + <0 0x088e9c00 0 0x218>, + <0 0x088e9600 0 0x128>, + <0 0x088e9800 0 0x200>, + <0 0x088e9a00 0 0x100>; #phy-cells = <0>; clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>; clock-names = "pipe0"; @@ -1413,11 +1598,11 @@ usb_2_qmpphy: phy@88eb000 { compatible = "qcom,sdm845-qmp-usb3-uni-phy"; - reg = <0x88eb000 0x18c>; + reg = <0 0x088eb000 0 0x18c>; status = "disabled"; #clock-cells = <1>; - #address-cells = <1>; - #size-cells = <1>; + #address-cells = <2>; + #size-cells = <2>; ranges; clocks = <&gcc GCC_USB3_SEC_PHY_AUX_CLK>, @@ -1431,10 +1616,10 @@ reset-names = "phy", "common"; usb_2_ssphy: lane@88eb200 { - reg = <0x88eb200 0x128>, - <0x88eb400 0x1fc>, - <0x88eb800 0x218>, - <0x88e9600 0x70>; + reg = <0 0x088eb200 0 0x128>, + <0 0x088eb400 0 0x1fc>, + <0 0x088eb800 0 0x218>, + <0 0x088eb600 0 0x70>; #phy-cells = <0>; clocks = <&gcc GCC_USB3_SEC_PHY_PIPE_CLK>; clock-names = "pipe0"; @@ -1444,11 +1629,12 @@ usb_1: usb@a6f8800 { compatible = "qcom,sdm845-dwc3", "qcom,dwc3"; - reg = <0xa6f8800 0x400>; + reg = <0 0x0a6f8800 0 0x400>; status = "disabled"; - #address-cells = <1>; - #size-cells = <1>; + #address-cells = <2>; + #size-cells = <2>; ranges; + dma-ranges; clocks = <&gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>, <&gcc GCC_USB30_PRIM_MASTER_CLK>, @@ -1475,8 +1661,9 @@ usb_1_dwc3: dwc3@a600000 { compatible = "snps,dwc3"; - reg = <0xa600000 0xcd00>; + reg = <0 0x0a600000 0 0xcd00>; interrupts = ; + iommus = <&apps_smmu 0x740 0>; snps,dis_u2_susphy_quirk; snps,dis_enblslpm_quirk; phys = <&usb_1_hsphy>, <&usb_1_ssphy>; @@ -1486,11 +1673,12 @@ usb_2: usb@a8f8800 { compatible = "qcom,sdm845-dwc3", "qcom,dwc3"; - reg = <0xa8f8800 0x400>; + reg = <0 0x0a8f8800 0 0x400>; status = "disabled"; - #address-cells = <1>; - #size-cells = <1>; + #address-cells = <2>; + #size-cells = <2>; ranges; + dma-ranges; clocks = <&gcc GCC_CFG_NOC_USB3_SEC_AXI_CLK>, <&gcc GCC_USB30_SEC_MASTER_CLK>, @@ -1517,8 +1705,9 @@ usb_2_dwc3: dwc3@a800000 { compatible = "snps,dwc3"; - reg = <0xa800000 0xcd00>; + reg = <0 0x0a800000 0 0xcd00>; interrupts = ; + iommus = <&apps_smmu 0x760 0>; snps,dis_u2_susphy_quirk; snps,dis_enblslpm_quirk; phys = <&usb_2_hsphy>, <&usb_2_ssphy>; @@ -1526,43 +1715,260 @@ }; }; + videocc: clock-controller@ab00000 { + compatible = "qcom,sdm845-videocc"; + reg = <0 0x0ab00000 0 0x10000>; + #clock-cells = <1>; + #power-domain-cells = <1>; + #reset-cells = <1>; + }; + + mdss: mdss@ae00000 { + compatible = "qcom,sdm845-mdss"; + reg = <0 0x0ae00000 0 0x1000>; + reg-names = "mdss"; + + power-domains = <&dispcc MDSS_GDSC>; + + clocks = <&gcc GCC_DISP_AHB_CLK>, + <&gcc GCC_DISP_AXI_CLK>, + <&dispcc DISP_CC_MDSS_MDP_CLK>; + clock-names = "iface", "bus", "core"; + + assigned-clocks = <&dispcc DISP_CC_MDSS_MDP_CLK>; + assigned-clock-rates = <300000000>; + + interrupts = ; + interrupt-controller; + #interrupt-cells = <1>; + + iommus = <&apps_smmu 0x880 0x8>, + <&apps_smmu 0xc80 0x8>; + + status = "disabled"; + + #address-cells = <2>; + #size-cells = <2>; + ranges; + + mdss_mdp: mdp@ae01000 { + compatible = "qcom,sdm845-dpu"; + reg = <0 0x0ae01000 0 0x8f000>, + <0 0x0aeb0000 0 0x2008>; + reg-names = "mdp", "vbif"; + + clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>, + <&dispcc DISP_CC_MDSS_AXI_CLK>, + <&dispcc DISP_CC_MDSS_MDP_CLK>, + <&dispcc DISP_CC_MDSS_VSYNC_CLK>; + clock-names = "iface", "bus", "core", "vsync"; + + assigned-clocks = <&dispcc DISP_CC_MDSS_MDP_CLK>, + <&dispcc DISP_CC_MDSS_VSYNC_CLK>; + assigned-clock-rates = <300000000>, + <19200000>; + + interrupt-parent = <&mdss>; + interrupts = <0 IRQ_TYPE_LEVEL_HIGH>; + + status = "disabled"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + dpu_intf1_out: endpoint { + remote-endpoint = <&dsi0_in>; + }; + }; + + port@1 { + reg = <1>; + dpu_intf2_out: endpoint { + remote-endpoint = <&dsi1_in>; + }; + }; + }; + }; + + dsi0: dsi@ae94000 { + compatible = "qcom,mdss-dsi-ctrl"; + reg = <0 0x0ae94000 0 0x400>; + reg-names = "dsi_ctrl"; + + interrupt-parent = <&mdss>; + interrupts = <4 IRQ_TYPE_LEVEL_HIGH>; + + clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK>, + <&dispcc DISP_CC_MDSS_BYTE0_INTF_CLK>, + <&dispcc DISP_CC_MDSS_PCLK0_CLK>, + <&dispcc DISP_CC_MDSS_ESC0_CLK>, + <&dispcc DISP_CC_MDSS_AHB_CLK>, + <&dispcc DISP_CC_MDSS_AXI_CLK>; + clock-names = "byte", + "byte_intf", + "pixel", + "core", + "iface", + "bus"; + + phys = <&dsi0_phy>; + phy-names = "dsi"; + + status = "disabled"; + + #address-cells = <1>; + #size-cells = <0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + dsi0_in: endpoint { + remote-endpoint = <&dpu_intf1_out>; + }; + }; + + port@1 { + reg = <1>; + dsi0_out: endpoint { + }; + }; + }; + }; + + dsi0_phy: dsi-phy@ae94400 { + compatible = "qcom,dsi-phy-10nm"; + reg = <0 0x0ae94400 0 0x200>, + <0 0x0ae94600 0 0x280>, + <0 0x0ae94a00 0 0x1e0>; + reg-names = "dsi_phy", + "dsi_phy_lane", + "dsi_pll"; + + #clock-cells = <1>; + #phy-cells = <0>; + + clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>; + clock-names = "iface"; + + status = "disabled"; + }; + + dsi1: dsi@ae96000 { + compatible = "qcom,mdss-dsi-ctrl"; + reg = <0 0x0ae96000 0 0x400>; + reg-names = "dsi_ctrl"; + + interrupt-parent = <&mdss>; + interrupts = <5 IRQ_TYPE_LEVEL_HIGH>; + + clocks = <&dispcc DISP_CC_MDSS_BYTE1_CLK>, + <&dispcc DISP_CC_MDSS_BYTE1_INTF_CLK>, + <&dispcc DISP_CC_MDSS_PCLK1_CLK>, + <&dispcc DISP_CC_MDSS_ESC1_CLK>, + <&dispcc DISP_CC_MDSS_AHB_CLK>, + <&dispcc DISP_CC_MDSS_AXI_CLK>; + clock-names = "byte", + "byte_intf", + "pixel", + "core", + "iface", + "bus"; + + phys = <&dsi1_phy>; + phy-names = "dsi"; + + status = "disabled"; + + #address-cells = <1>; + #size-cells = <0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + dsi1_in: endpoint { + remote-endpoint = <&dpu_intf2_out>; + }; + }; + + port@1 { + reg = <1>; + dsi1_out: endpoint { + }; + }; + }; + }; + + dsi1_phy: dsi-phy@ae96400 { + compatible = "qcom,dsi-phy-10nm"; + reg = <0 0x0ae96400 0 0x200>, + <0 0x0ae96600 0 0x280>, + <0 0x0ae96a00 0 0x10e>; + reg-names = "dsi_phy", + "dsi_phy_lane", + "dsi_pll"; + + #clock-cells = <1>; + #phy-cells = <0>; + + clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>; + clock-names = "iface"; + + status = "disabled"; + }; + }; + dispcc: clock-controller@af00000 { compatible = "qcom,sdm845-dispcc"; - reg = <0xaf00000 0x10000>; + reg = <0 0x0af00000 0 0x10000>; #clock-cells = <1>; #reset-cells = <1>; #power-domain-cells = <1>; }; + pdc_reset: reset-controller@b2e0000 { + compatible = "qcom,sdm845-pdc-global"; + reg = <0 0x0b2e0000 0 0x20000>; + #reset-cells = <1>; + }; + tsens0: thermal-sensor@c263000 { compatible = "qcom,sdm845-tsens", "qcom,tsens-v2"; - reg = <0xc263000 0x1ff>, /* TM */ - <0xc222000 0x1ff>; /* SROT */ + reg = <0 0x0c263000 0 0x1ff>, /* TM */ + <0 0x0c222000 0 0x1ff>; /* SROT */ #qcom,sensors = <13>; #thermal-sensor-cells = <1>; }; tsens1: thermal-sensor@c265000 { compatible = "qcom,sdm845-tsens", "qcom,tsens-v2"; - reg = <0xc265000 0x1ff>, /* TM */ - <0xc223000 0x1ff>; /* SROT */ + reg = <0 0x0c265000 0 0x1ff>, /* TM */ + <0 0x0c223000 0 0x1ff>; /* SROT */ #qcom,sensors = <8>; #thermal-sensor-cells = <1>; }; aoss_reset: reset-controller@c2a0000 { compatible = "qcom,sdm845-aoss-cc"; - reg = <0xc2a0000 0x31000>; + reg = <0 0x0c2a0000 0 0x31000>; #reset-cells = <1>; }; spmi_bus: spmi@c440000 { compatible = "qcom,spmi-pmic-arb"; - reg = <0xc440000 0x1100>, - <0xc600000 0x2000000>, - <0xe600000 0x100000>, - <0xe700000 0xa0000>, - <0xc40a000 0x26000>; + reg = <0 0x0c440000 0 0x1100>, + <0 0x0c600000 0 0x2000000>, + <0 0x0e600000 0 0x100000>, + <0 0x0e700000 0 0xa0000>, + <0 0x0c40a000 0 0x26000>; reg-names = "core", "chnls", "obsrvr", "intr", "cnfg"; interrupt-names = "periph_irq"; interrupts = ; @@ -1575,18 +1981,98 @@ cell-index = <0>; }; + apps_smmu: iommu@15000000 { + compatible = "qcom,sdm845-smmu-500", "arm,mmu-500"; + reg = <0 0x15000000 0 0x80000>; + #iommu-cells = <2>; + #global-interrupts = <1>; + interrupts = , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + }; + + lpasscc: clock-controller@17014000 { + compatible = "qcom,sdm845-lpasscc"; + reg = <0 0x17014000 0 0x1f004>, <0 0x17300000 0 0x200>; + reg-names = "cc", "qdsp6ss"; + #clock-cells = <1>; + status = "disabled"; + }; + apss_shared: mailbox@17990000 { compatible = "qcom,sdm845-apss-shared"; - reg = <0x17990000 0x1000>; + reg = <0 0x17990000 0 0x1000>; #mbox-cells = <1>; }; apps_rsc: rsc@179c0000 { label = "apps_rsc"; compatible = "qcom,rpmh-rsc"; - reg = <0x179c0000 0x10000>, - <0x179d0000 0x10000>, - <0x179e0000 0x10000>; + reg = <0 0x179c0000 0 0x10000>, + <0 0x179d0000 0 0x10000>, + <0 0x179e0000 0 0x10000>; reg-names = "drv-0", "drv-1", "drv-2"; interrupts = , , @@ -1602,85 +2088,175 @@ compatible = "qcom,sdm845-rpmh-clk"; #clock-cells = <1>; }; + + rpmhpd: power-controller { + compatible = "qcom,sdm845-rpmhpd"; + #power-domain-cells = <1>; + operating-points-v2 = <&rpmhpd_opp_table>; + + rpmhpd_opp_table: opp-table { + compatible = "operating-points-v2"; + + rpmhpd_opp_ret: opp1 { + opp-level = <16>; + }; + + rpmhpd_opp_min_svs: opp2 { + opp-level = <48>; + }; + + rpmhpd_opp_low_svs: opp3 { + opp-level = <64>; + }; + + rpmhpd_opp_svs: opp4 { + opp-level = <128>; + }; + + rpmhpd_opp_svs_l1: opp5 { + opp-level = <192>; + }; + + rpmhpd_opp_nom: opp6 { + opp-level = <256>; + }; + + rpmhpd_opp_nom_l1: opp7 { + opp-level = <320>; + }; + + rpmhpd_opp_nom_l2: opp8 { + opp-level = <336>; + }; + + rpmhpd_opp_turbo: opp9 { + opp-level = <384>; + }; + + rpmhpd_opp_turbo_l1: opp10 { + opp-level = <416>; + }; + }; + }; + + rsc_hlos: interconnect { + compatible = "qcom,sdm845-rsc-hlos"; + #interconnect-cells = <1>; + }; }; intc: interrupt-controller@17a00000 { compatible = "arm,gic-v3"; - #address-cells = <1>; - #size-cells = <1>; + #address-cells = <2>; + #size-cells = <2>; ranges; #interrupt-cells = <3>; interrupt-controller; - reg = <0x17a00000 0x10000>, /* GICD */ - <0x17a60000 0x100000>; /* GICR * 8 */ + reg = <0 0x17a00000 0 0x10000>, /* GICD */ + <0 0x17a60000 0 0x100000>; /* GICR * 8 */ interrupts = ; gic-its@17a40000 { compatible = "arm,gic-v3-its"; msi-controller; #msi-cells = <1>; - reg = <0x17a40000 0x20000>; + reg = <0 0x17a40000 0 0x20000>; status = "disabled"; }; }; timer@17c90000 { - #address-cells = <1>; - #size-cells = <1>; + #address-cells = <2>; + #size-cells = <2>; ranges; compatible = "arm,armv7-timer-mem"; - reg = <0x17c90000 0x1000>; + reg = <0 0x17c90000 0 0x1000>; frame@17ca0000 { frame-number = <0>; interrupts = , ; - reg = <0x17ca0000 0x1000>, - <0x17cb0000 0x1000>; + reg = <0 0x17ca0000 0 0x1000>, + <0 0x17cb0000 0 0x1000>; }; frame@17cc0000 { frame-number = <1>; interrupts = ; - reg = <0x17cc0000 0x1000>; + reg = <0 0x17cc0000 0 0x1000>; status = "disabled"; }; frame@17cd0000 { frame-number = <2>; interrupts = ; - reg = <0x17cd0000 0x1000>; + reg = <0 0x17cd0000 0 0x1000>; status = "disabled"; }; frame@17ce0000 { frame-number = <3>; interrupts = ; - reg = <0x17ce0000 0x1000>; + reg = <0 0x17ce0000 0 0x1000>; status = "disabled"; }; frame@17cf0000 { frame-number = <4>; interrupts = ; - reg = <0x17cf0000 0x1000>; + reg = <0 0x17cf0000 0 0x1000>; status = "disabled"; }; frame@17d00000 { frame-number = <5>; interrupts = ; - reg = <0x17d00000 0x1000>; + reg = <0 0x17d00000 0 0x1000>; status = "disabled"; }; frame@17d10000 { frame-number = <6>; interrupts = ; - reg = <0x17d10000 0x1000>; + reg = <0 0x17d10000 0 0x1000>; status = "disabled"; }; }; + + cpufreq_hw: cpufreq@17d43000 { + compatible = "qcom,cpufreq-hw"; + reg = <0 0x17d43000 0 0x1400>, <0 0x17d45800 0 0x1400>; + reg-names = "freq-domain0", "freq-domain1"; + + clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GPLL0>; + clock-names = "xo", "alternate"; + + #freq-domain-cells = <1>; + }; + + wifi: wifi@18800000 { + compatible = "qcom,wcn3990-wifi"; + status = "disabled"; + reg = <0 0x18800000 0 0x800000>; + reg-names = "membase"; + memory-region = <&wlan_msa_mem>; + clock-names = "cxo_ref_clk_pin"; + clocks = <&rpmhcc RPMH_RF_CLK2>; + interrupts = + , + , + , + , + , + , + , + , + , + , + , + ; + iommus = <&apps_smmu 0x0040 0x1>; + }; }; thermal-zones { @@ -1691,18 +2267,41 @@ thermal-sensors = <&tsens0 1>; trips { - cpu_alert0: trip0 { - temperature = <75000>; + cpu0_alert0: trip-point@0 { + temperature = <90000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu0_alert1: trip-point@1 { + temperature = <95000>; hysteresis = <2000>; type = "passive"; }; - cpu_crit0: trip1 { + cpu0_crit: cpu_crit { temperature = <110000>; hysteresis = <1000>; type = "critical"; }; }; + + cooling-maps { + map0 { + trip = <&cpu0_alert0>; + cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + map1 { + trip = <&cpu0_alert1>; + cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; }; cpu1-thermal { @@ -1712,18 +2311,41 @@ thermal-sensors = <&tsens0 2>; trips { - cpu_alert1: trip0 { - temperature = <75000>; + cpu1_alert0: trip-point@0 { + temperature = <90000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu1_alert1: trip-point@1 { + temperature = <95000>; hysteresis = <2000>; type = "passive"; }; - cpu_crit1: trip1 { + cpu1_crit: cpu_crit { temperature = <110000>; hysteresis = <1000>; type = "critical"; }; }; + + cooling-maps { + map0 { + trip = <&cpu1_alert0>; + cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + map1 { + trip = <&cpu1_alert1>; + cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; }; cpu2-thermal { @@ -1733,18 +2355,41 @@ thermal-sensors = <&tsens0 3>; trips { - cpu_alert2: trip0 { - temperature = <75000>; + cpu2_alert0: trip-point@0 { + temperature = <90000>; hysteresis = <2000>; type = "passive"; }; - cpu_crit2: trip1 { + cpu2_alert1: trip-point@1 { + temperature = <95000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu2_crit: cpu_crit { temperature = <110000>; hysteresis = <1000>; type = "critical"; }; }; + + cooling-maps { + map0 { + trip = <&cpu2_alert0>; + cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + map1 { + trip = <&cpu2_alert1>; + cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; }; cpu3-thermal { @@ -1754,18 +2399,41 @@ thermal-sensors = <&tsens0 4>; trips { - cpu_alert3: trip0 { - temperature = <75000>; + cpu3_alert0: trip-point@0 { + temperature = <90000>; hysteresis = <2000>; type = "passive"; }; - cpu_crit3: trip1 { + cpu3_alert1: trip-point@1 { + temperature = <95000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu3_crit: cpu_crit { temperature = <110000>; hysteresis = <1000>; type = "critical"; }; }; + + cooling-maps { + map0 { + trip = <&cpu3_alert0>; + cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + map1 { + trip = <&cpu3_alert1>; + cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; }; cpu4-thermal { @@ -1775,18 +2443,41 @@ thermal-sensors = <&tsens0 7>; trips { - cpu_alert4: trip0 { - temperature = <75000>; + cpu4_alert0: trip-point@0 { + temperature = <90000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu4_alert1: trip-point@1 { + temperature = <95000>; hysteresis = <2000>; type = "passive"; }; - cpu_crit4: trip1 { + cpu4_crit: cpu_crit { temperature = <110000>; hysteresis = <1000>; type = "critical"; }; }; + + cooling-maps { + map0 { + trip = <&cpu4_alert0>; + cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + map1 { + trip = <&cpu4_alert1>; + cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; }; cpu5-thermal { @@ -1796,18 +2487,41 @@ thermal-sensors = <&tsens0 8>; trips { - cpu_alert5: trip0 { - temperature = <75000>; + cpu5_alert0: trip-point@0 { + temperature = <90000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu5_alert1: trip-point@1 { + temperature = <95000>; hysteresis = <2000>; type = "passive"; }; - cpu_crit5: trip1 { + cpu5_crit: cpu_crit { temperature = <110000>; hysteresis = <1000>; type = "critical"; }; }; + + cooling-maps { + map0 { + trip = <&cpu5_alert0>; + cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + map1 { + trip = <&cpu5_alert1>; + cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; }; cpu6-thermal { @@ -1817,18 +2531,41 @@ thermal-sensors = <&tsens0 9>; trips { - cpu_alert6: trip0 { - temperature = <75000>; + cpu6_alert0: trip-point@0 { + temperature = <90000>; hysteresis = <2000>; type = "passive"; }; - cpu_crit6: trip1 { + cpu6_alert1: trip-point@1 { + temperature = <95000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu6_crit: cpu_crit { temperature = <110000>; hysteresis = <1000>; type = "critical"; }; }; + + cooling-maps { + map0 { + trip = <&cpu6_alert0>; + cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + map1 { + trip = <&cpu6_alert1>; + cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; }; cpu7-thermal { @@ -1838,18 +2575,41 @@ thermal-sensors = <&tsens0 10>; trips { - cpu_alert7: trip0 { - temperature = <75000>; + cpu7_alert0: trip-point@0 { + temperature = <90000>; hysteresis = <2000>; type = "passive"; }; - cpu_crit7: trip1 { + cpu7_alert1: trip-point@1 { + temperature = <95000>; + hysteresis = <2000>; + type = "passive"; + }; + + cpu7_crit: cpu_crit { temperature = <110000>; hysteresis = <1000>; type = "critical"; }; }; + + cooling-maps { + map0 { + trip = <&cpu7_alert0>; + cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + map1 { + trip = <&cpu7_alert1>; + cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; }; }; }; diff --git a/arch/arm64/boot/dts/realtek/rtd1295.dtsi b/arch/arm64/boot/dts/realtek/rtd1295.dtsi index 8d9ac05d17dc..41d7858da826 100644 --- a/arch/arm64/boot/dts/realtek/rtd1295.dtsi +++ b/arch/arm64/boot/dts/realtek/rtd1295.dtsi @@ -17,28 +17,28 @@ cpu0: cpu@0 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x0>; next-level-cache = <&l2>; }; cpu1: cpu@1 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x1>; next-level-cache = <&l2>; }; cpu2: cpu@2 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x2>; next-level-cache = <&l2>; }; cpu3: cpu@3 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x3>; next-level-cache = <&l2>; }; diff --git a/arch/arm64/boot/dts/renesas/Makefile b/arch/arm64/boot/dts/renesas/Makefile index a8ce6594342d..6cde526547e4 100644 --- a/arch/arm64/boot/dts/renesas/Makefile +++ b/arch/arm64/boot/dts/renesas/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 +dtb-$(CONFIG_ARCH_R8A774C0) += r8a774c0-cat874.dtb r8a774c0-ek874.dtb dtb-$(CONFIG_ARCH_R8A7795) += r8a7795-salvator-x.dtb r8a7795-h3ulcb.dtb dtb-$(CONFIG_ARCH_R8A7795) += r8a7795-h3ulcb-kf.dtb dtb-$(CONFIG_ARCH_R8A7795) += r8a7795-salvator-xs.dtb diff --git a/arch/arm64/boot/dts/renesas/cat875.dtsi b/arch/arm64/boot/dts/renesas/cat875.dtsi new file mode 100644 index 000000000000..14db66755a89 --- /dev/null +++ b/arch/arm64/boot/dts/renesas/cat875.dtsi @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Device Tree Source for the Silicon Linux sub board for CAT874 (CAT875) + * + * Copyright (C) 2019 Renesas Electronics Corp. + */ + +/ { + model = "Silicon Linux sub board for CAT874 (CAT875)"; + + aliases { + ethernet0 = &avb; + }; +}; + +&avb { + pinctrl-0 = <&avb_pins>; + pinctrl-names = "default"; + renesas,no-ether-link; + phy-handle = <&phy0>; + phy-mode = "rgmii"; + status = "okay"; + + phy0: ethernet-phy@0 { + rxc-skew-ps = <1500>; + reg = <0>; + interrupt-parent = <&gpio2>; + interrupts = <21 IRQ_TYPE_LEVEL_LOW>; + reset-gpios = <&gpio1 20 GPIO_ACTIVE_LOW>; + }; +}; + +&pciec0 { + status = "okay"; +}; + +&pfc { + avb_pins: avb { + mux { + groups = "avb_mii"; + function = "avb"; + }; + }; +}; diff --git a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi index 20745a8528c5..ef3cff2dd1b6 100644 --- a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi @@ -61,7 +61,7 @@ #size-cells = <0>; a57_0: cpu@0 { - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x0>; device_type = "cpu"; power-domains = <&sysc R8A774A1_PD_CA57_CPU0>; @@ -71,7 +71,7 @@ }; a57_1: cpu@1 { - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x1>; device_type = "cpu"; power-domains = <&sysc R8A774A1_PD_CA57_CPU1>; @@ -81,7 +81,7 @@ }; a53_0: cpu@100 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x100>; device_type = "cpu"; power-domains = <&sysc R8A774A1_PD_CA53_CPU0>; @@ -91,7 +91,7 @@ }; a53_1: cpu@101 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x101>; device_type = "cpu"; power-domains = <&sysc R8A774A1_PD_CA53_CPU1>; @@ -101,7 +101,7 @@ }; a53_2: cpu@102 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x102>; device_type = "cpu"; power-domains = <&sysc R8A774A1_PD_CA53_CPU2>; @@ -111,7 +111,7 @@ }; a53_3: cpu@103 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x103>; device_type = "cpu"; power-domains = <&sysc R8A774A1_PD_CA53_CPU3>; @@ -599,7 +599,7 @@ hsusb: usb@e6590000 { compatible = "renesas,usbhs-r8a774a1", "renesas,rcar-gen3-usbhs"; - reg = <0 0xe6590000 0 0x100>; + reg = <0 0xe6590000 0 0x200>; interrupts = ; clocks = <&cpg CPG_MOD 704>; dmas = <&usb_dmac0 0>, <&usb_dmac0 1>, @@ -1011,6 +1011,9 @@ <&cpg CPG_CORE R8A774A1_CLK_S3D1>, <&scif_clk>; clock-names = "fck", "brg_int", "scif_clk"; + dmas = <&dmac1 0x13>, <&dmac1 0x12>, + <&dmac2 0x13>, <&dmac2 0x12>; + dma-names = "tx", "rx", "tx", "rx"; power-domains = <&sysc R8A774A1_PD_ALWAYS_ON>; resets = <&cpg 310>; status = "disabled"; diff --git a/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts b/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts new file mode 100644 index 000000000000..96ee0d2c6357 --- /dev/null +++ b/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts @@ -0,0 +1,106 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Device Tree Source for the Silicon Linux RZ/G2E 96board platform (CAT874) + * + * Copyright (C) 2019 Renesas Electronics Corp. + */ + +/dts-v1/; +#include "r8a774c0.dtsi" +#include + +/ { + model = "Silicon Linux RZ/G2E 96board platform (CAT874)"; + compatible = "si-linux,cat874", "renesas,r8a774c0"; + + aliases { + serial0 = &scif2; + }; + + chosen { + bootargs = "ignore_loglevel rw root=/dev/nfs ip=dhcp"; + stdout-path = "serial0:115200n8"; + }; + + memory@48000000 { + device_type = "memory"; + /* first 128MB is reserved for secure area. */ + reg = <0x0 0x48000000 0x0 0x78000000>; + }; + + vcc_sdhi0: regulator-vcc-sdhi0 { + compatible = "regulator-fixed"; + + regulator-name = "SDHI0 Vcc"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + regulator-boot-on; + }; + + vccq_sdhi0: regulator-vccq-sdhi0 { + compatible = "regulator-gpio"; + + regulator-name = "SDHI0 VccQ"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + + gpios = <&gpio3 13 GPIO_ACTIVE_HIGH>; + gpios-states = <1>; + states = <3300000 1 + 1800000 0>; + }; +}; + +&extal_clk { + clock-frequency = <48000000>; +}; + +&pcie_bus_clk { + clock-frequency = <100000000>; +}; + +&pciec0 { + /* Map all possible DDR as inbound ranges */ + dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x80000000>; +}; + +&pfc { + scif2_pins: scif2 { + groups = "scif2_data_a"; + function = "scif2"; + }; + + sdhi0_pins: sd0 { + groups = "sdhi0_data4", "sdhi0_ctrl"; + function = "sdhi0"; + power-source = <3300>; + }; + + sdhi0_pins_uhs: sd0_uhs { + groups = "sdhi0_data4", "sdhi0_ctrl"; + function = "sdhi0"; + power-source = <1800>; + }; +}; + +&scif2 { + pinctrl-0 = <&scif2_pins>; + pinctrl-names = "default"; + + status = "okay"; +}; + +&sdhi0 { + pinctrl-0 = <&sdhi0_pins>; + pinctrl-1 = <&sdhi0_pins_uhs>; + pinctrl-names = "default", "state_uhs"; + + vmmc-supply = <&vcc_sdhi0>; + vqmmc-supply = <&vccq_sdhi0>; + cd-gpios = <&gpio3 12 GPIO_ACTIVE_LOW>; + bus-width = <4>; + sd-uhs-sdr50; + sd-uhs-sdr104; + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/renesas/r8a774c0-ek874.dts b/arch/arm64/boot/dts/renesas/r8a774c0-ek874.dts new file mode 100644 index 000000000000..e7b6619ab224 --- /dev/null +++ b/arch/arm64/boot/dts/renesas/r8a774c0-ek874.dts @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Device Tree Source for the Silicon Linux RZ/G2E evaluation kit (EK874) + * + * Copyright (C) 2019 Renesas Electronics Corp. + */ + +#include "r8a774c0-cat874.dts" +#include "cat875.dtsi" + +/ { + model = "Silicon Linux RZ/G2E evaluation kit EK874 (CAT874 + CAT875)"; + compatible = "si-linux,cat875", "si-linux,cat874", "renesas,r8a774c0"; +}; diff --git a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi new file mode 100644 index 000000000000..61a0afb74e63 --- /dev/null +++ b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi @@ -0,0 +1,1911 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Device Tree Source for the RZ/G2E (R8A774C0) SoC + * + * Copyright (C) 2018 Renesas Electronics Corp. + */ + +#include +#include +#include + +/ { + compatible = "renesas,r8a774c0"; + #address-cells = <2>; + #size-cells = <2>; + + /* + * The external audio clocks are configured as 0 Hz fixed frequency + * clocks by default. + * Boards that provide audio clocks should override them. + */ + audio_clk_a: audio_clk_a { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <0>; + }; + + audio_clk_b: audio_clk_b { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <0>; + }; + + audio_clk_c: audio_clk_c { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <0>; + }; + + /* External CAN clock - to be overridden by boards that provide it */ + can_clk: can { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <0>; + }; + + cluster1_opp: opp_table10 { + compatible = "operating-points-v2"; + opp-shared; + opp-800000000 { + opp-hz = /bits/ 64 <800000000>; + opp-microvolt = <820000>; + clock-latency-ns = <300000>; + }; + opp-1000000000 { + opp-hz = /bits/ 64 <1000000000>; + opp-microvolt = <820000>; + clock-latency-ns = <300000>; + }; + opp-1200000000 { + opp-hz = /bits/ 64 <1200000000>; + opp-microvolt = <820000>; + clock-latency-ns = <300000>; + opp-suspend; + }; + }; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + + a53_0: cpu@0 { + compatible = "arm,cortex-a53", "arm,armv8"; + reg = <0>; + device_type = "cpu"; + power-domains = <&sysc R8A774C0_PD_CA53_CPU0>; + next-level-cache = <&L2_CA53>; + enable-method = "psci"; + clocks =<&cpg CPG_CORE R8A774C0_CLK_Z2>; + operating-points-v2 = <&cluster1_opp>; + }; + + a53_1: cpu@1 { + compatible = "arm,cortex-a53", "arm,armv8"; + reg = <1>; + device_type = "cpu"; + power-domains = <&sysc R8A774C0_PD_CA53_CPU1>; + next-level-cache = <&L2_CA53>; + enable-method = "psci"; + clocks =<&cpg CPG_CORE R8A774C0_CLK_Z2>; + operating-points-v2 = <&cluster1_opp>; + }; + + L2_CA53: cache-controller-0 { + compatible = "cache"; + power-domains = <&sysc R8A774C0_PD_CA53_SCU>; + cache-unified; + cache-level = <2>; + }; + }; + + extal_clk: extal { + compatible = "fixed-clock"; + #clock-cells = <0>; + /* This value must be overridden by the board */ + clock-frequency = <0>; + }; + + /* External PCIe clock - can be overridden by the board */ + pcie_bus_clk: pcie_bus { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <0>; + }; + + pmu_a53 { + compatible = "arm,cortex-a53-pmu"; + interrupts-extended = <&gic GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>, + <&gic GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>; + interrupt-affinity = <&a53_0>, <&a53_1>; + }; + + psci { + compatible = "arm,psci-1.0", "arm,psci-0.2"; + method = "smc"; + }; + + /* External SCIF clock - to be overridden by boards that provide it */ + scif_clk: scif { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <0>; + }; + + soc: soc { + compatible = "simple-bus"; + interrupt-parent = <&gic>; + #address-cells = <2>; + #size-cells = <2>; + ranges; + + rwdt: watchdog@e6020000 { + compatible = "renesas,r8a774c0-wdt", + "renesas,rcar-gen3-wdt"; + reg = <0 0xe6020000 0 0x0c>; + clocks = <&cpg CPG_MOD 402>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 402>; + status = "disabled"; + }; + + gpio0: gpio@e6050000 { + compatible = "renesas,gpio-r8a774c0", + "renesas,rcar-gen3-gpio"; + reg = <0 0xe6050000 0 0x50>; + interrupts = ; + #gpio-cells = <2>; + gpio-controller; + gpio-ranges = <&pfc 0 0 18>; + #interrupt-cells = <2>; + interrupt-controller; + clocks = <&cpg CPG_MOD 912>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 912>; + }; + + gpio1: gpio@e6051000 { + compatible = "renesas,gpio-r8a774c0", + "renesas,rcar-gen3-gpio"; + reg = <0 0xe6051000 0 0x50>; + interrupts = ; + #gpio-cells = <2>; + gpio-controller; + gpio-ranges = <&pfc 0 32 23>; + #interrupt-cells = <2>; + interrupt-controller; + clocks = <&cpg CPG_MOD 911>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 911>; + }; + + gpio2: gpio@e6052000 { + compatible = "renesas,gpio-r8a774c0", + "renesas,rcar-gen3-gpio"; + reg = <0 0xe6052000 0 0x50>; + interrupts = ; + #gpio-cells = <2>; + gpio-controller; + gpio-ranges = <&pfc 0 64 26>; + #interrupt-cells = <2>; + interrupt-controller; + clocks = <&cpg CPG_MOD 910>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 910>; + }; + + gpio3: gpio@e6053000 { + compatible = "renesas,gpio-r8a774c0", + "renesas,rcar-gen3-gpio"; + reg = <0 0xe6053000 0 0x50>; + interrupts = ; + #gpio-cells = <2>; + gpio-controller; + gpio-ranges = <&pfc 0 96 16>; + #interrupt-cells = <2>; + interrupt-controller; + clocks = <&cpg CPG_MOD 909>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 909>; + }; + + gpio4: gpio@e6054000 { + compatible = "renesas,gpio-r8a774c0", + "renesas,rcar-gen3-gpio"; + reg = <0 0xe6054000 0 0x50>; + interrupts = ; + #gpio-cells = <2>; + gpio-controller; + gpio-ranges = <&pfc 0 128 11>; + #interrupt-cells = <2>; + interrupt-controller; + clocks = <&cpg CPG_MOD 908>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 908>; + }; + + gpio5: gpio@e6055000 { + compatible = "renesas,gpio-r8a774c0", + "renesas,rcar-gen3-gpio"; + reg = <0 0xe6055000 0 0x50>; + interrupts = ; + #gpio-cells = <2>; + gpio-controller; + gpio-ranges = <&pfc 0 160 20>; + #interrupt-cells = <2>; + interrupt-controller; + clocks = <&cpg CPG_MOD 907>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 907>; + }; + + gpio6: gpio@e6055400 { + compatible = "renesas,gpio-r8a774c0", + "renesas,rcar-gen3-gpio"; + reg = <0 0xe6055400 0 0x50>; + interrupts = ; + #gpio-cells = <2>; + gpio-controller; + gpio-ranges = <&pfc 0 192 18>; + #interrupt-cells = <2>; + interrupt-controller; + clocks = <&cpg CPG_MOD 906>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 906>; + }; + + pfc: pin-controller@e6060000 { + compatible = "renesas,pfc-r8a774c0"; + reg = <0 0xe6060000 0 0x508>; + }; + + cmt0: timer@e60f0000 { + compatible = "renesas,r8a774c0-cmt0", + "renesas,rcar-gen3-cmt0"; + reg = <0 0xe60f0000 0 0x1004>; + interrupts = , + ; + clocks = <&cpg CPG_MOD 303>; + clock-names = "fck"; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 303>; + status = "disabled"; + }; + + cmt1: timer@e6130000 { + compatible = "renesas,r8a774c0-cmt1", + "renesas,rcar-gen3-cmt1"; + reg = <0 0xe6130000 0 0x1004>; + interrupts = , + , + , + , + , + , + , + ; + clocks = <&cpg CPG_MOD 302>; + clock-names = "fck"; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 302>; + status = "disabled"; + }; + + cmt2: timer@e6140000 { + compatible = "renesas,r8a774c0-cmt1", + "renesas,rcar-gen3-cmt1"; + reg = <0 0xe6140000 0 0x1004>; + interrupts = , + , + , + , + , + , + , + ; + clocks = <&cpg CPG_MOD 301>; + clock-names = "fck"; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 301>; + status = "disabled"; + }; + + cmt3: timer@e6148000 { + compatible = "renesas,r8a774c0-cmt1", + "renesas,rcar-gen3-cmt1"; + reg = <0 0xe6148000 0 0x1004>; + interrupts = , + , + , + , + , + , + , + ; + clocks = <&cpg CPG_MOD 300>; + clock-names = "fck"; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 300>; + status = "disabled"; + }; + + cpg: clock-controller@e6150000 { + compatible = "renesas,r8a774c0-cpg-mssr"; + reg = <0 0xe6150000 0 0x1000>; + clocks = <&extal_clk>; + clock-names = "extal"; + #clock-cells = <2>; + #power-domain-cells = <0>; + #reset-cells = <1>; + }; + + rst: reset-controller@e6160000 { + compatible = "renesas,r8a774c0-rst"; + reg = <0 0xe6160000 0 0x0200>; + }; + + sysc: system-controller@e6180000 { + compatible = "renesas,r8a774c0-sysc"; + reg = <0 0xe6180000 0 0x0400>; + #power-domain-cells = <1>; + }; + + thermal: thermal@e6190000 { + compatible = "renesas,thermal-r8a774c0"; + reg = <0 0xe6190000 0 0x10>, <0 0xe6190100 0 0x38>; + interrupts = , + , + ; + clocks = <&cpg CPG_MOD 522>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 522>; + #thermal-sensor-cells = <0>; + }; + + intc_ex: interrupt-controller@e61c0000 { + compatible = "renesas,intc-ex-r8a774c0", "renesas,irqc"; + #interrupt-cells = <2>; + interrupt-controller; + reg = <0 0xe61c0000 0 0x200>; + interrupts = ; + clocks = <&cpg CPG_MOD 407>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 407>; + }; + + tmu0: timer@e61e0000 { + compatible = "renesas,tmu-r8a774c0", "renesas,tmu"; + reg = <0 0xe61e0000 0 0x30>; + interrupts = , + , + ; + clocks = <&cpg CPG_MOD 125>; + clock-names = "fck"; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 125>; + status = "disabled"; + }; + + tmu1: timer@e6fc0000 { + compatible = "renesas,tmu-r8a774c0", "renesas,tmu"; + reg = <0 0xe6fc0000 0 0x30>; + interrupts = , + , + ; + clocks = <&cpg CPG_MOD 124>; + clock-names = "fck"; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 124>; + status = "disabled"; + }; + + tmu2: timer@e6fd0000 { + compatible = "renesas,tmu-r8a774c0", "renesas,tmu"; + reg = <0 0xe6fd0000 0 0x30>; + interrupts = , + , + ; + clocks = <&cpg CPG_MOD 123>; + clock-names = "fck"; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 123>; + status = "disabled"; + }; + + tmu3: timer@e6fe0000 { + compatible = "renesas,tmu-r8a774c0", "renesas,tmu"; + reg = <0 0xe6fe0000 0 0x30>; + interrupts = , + , + ; + clocks = <&cpg CPG_MOD 122>; + clock-names = "fck"; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 122>; + status = "disabled"; + }; + + tmu4: timer@ffc00000 { + compatible = "renesas,tmu-r8a774c0", "renesas,tmu"; + reg = <0 0xffc00000 0 0x30>; + interrupts = , + , + ; + clocks = <&cpg CPG_MOD 121>; + clock-names = "fck"; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 121>; + status = "disabled"; + }; + + i2c0: i2c@e6500000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "renesas,i2c-r8a774c0", + "renesas,rcar-gen3-i2c"; + reg = <0 0xe6500000 0 0x40>; + interrupts = ; + clocks = <&cpg CPG_MOD 931>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 931>; + dmas = <&dmac1 0x91>, <&dmac1 0x90>, + <&dmac2 0x91>, <&dmac2 0x90>; + dma-names = "tx", "rx", "tx", "rx"; + i2c-scl-internal-delay-ns = <110>; + status = "disabled"; + }; + + i2c1: i2c@e6508000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "renesas,i2c-r8a774c0", + "renesas,rcar-gen3-i2c"; + reg = <0 0xe6508000 0 0x40>; + interrupts = ; + clocks = <&cpg CPG_MOD 930>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 930>; + dmas = <&dmac1 0x93>, <&dmac1 0x92>, + <&dmac2 0x93>, <&dmac2 0x92>; + dma-names = "tx", "rx", "tx", "rx"; + i2c-scl-internal-delay-ns = <6>; + status = "disabled"; + }; + + i2c2: i2c@e6510000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "renesas,i2c-r8a774c0", + "renesas,rcar-gen3-i2c"; + reg = <0 0xe6510000 0 0x40>; + interrupts = ; + clocks = <&cpg CPG_MOD 929>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 929>; + dmas = <&dmac1 0x95>, <&dmac1 0x94>, + <&dmac2 0x95>, <&dmac2 0x94>; + dma-names = "tx", "rx", "tx", "rx"; + i2c-scl-internal-delay-ns = <6>; + status = "disabled"; + }; + + i2c3: i2c@e66d0000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "renesas,i2c-r8a774c0", + "renesas,rcar-gen3-i2c"; + reg = <0 0xe66d0000 0 0x40>; + interrupts = ; + clocks = <&cpg CPG_MOD 928>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 928>; + dmas = <&dmac0 0x97>, <&dmac0 0x96>; + dma-names = "tx", "rx"; + i2c-scl-internal-delay-ns = <110>; + status = "disabled"; + }; + + i2c4: i2c@e66d8000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "renesas,i2c-r8a774c0", + "renesas,rcar-gen3-i2c"; + reg = <0 0xe66d8000 0 0x40>; + interrupts = ; + clocks = <&cpg CPG_MOD 927>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 927>; + dmas = <&dmac0 0x99>, <&dmac0 0x98>; + dma-names = "tx", "rx"; + i2c-scl-internal-delay-ns = <6>; + status = "disabled"; + }; + + i2c5: i2c@e66e0000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "renesas,i2c-r8a774c0", + "renesas,rcar-gen3-i2c"; + reg = <0 0xe66e0000 0 0x40>; + interrupts = ; + clocks = <&cpg CPG_MOD 919>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 919>; + dmas = <&dmac0 0x9b>, <&dmac0 0x9a>; + dma-names = "tx", "rx"; + i2c-scl-internal-delay-ns = <6>; + status = "disabled"; + }; + + i2c6: i2c@e66e8000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "renesas,i2c-r8a774c0", + "renesas,rcar-gen3-i2c"; + reg = <0 0xe66e8000 0 0x40>; + interrupts = ; + clocks = <&cpg CPG_MOD 918>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 918>; + dmas = <&dmac0 0x9d>, <&dmac0 0x9c>; + dma-names = "tx", "rx"; + i2c-scl-internal-delay-ns = <6>; + status = "disabled"; + }; + + i2c7: i2c@e6690000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "renesas,i2c-r8a774c0", + "renesas,rcar-gen3-i2c"; + reg = <0 0xe6690000 0 0x40>; + interrupts = ; + clocks = <&cpg CPG_MOD 1003>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 1003>; + i2c-scl-internal-delay-ns = <6>; + status = "disabled"; + }; + + i2c_dvfs: i2c@e60b0000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "renesas,iic-r8a774c0"; + reg = <0 0xe60b0000 0 0x15>; + interrupts = ; + clocks = <&cpg CPG_MOD 926>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 926>; + dmas = <&dmac0 0x11>, <&dmac0 0x10>; + dma-names = "tx", "rx"; + status = "disabled"; + }; + + hscif0: serial@e6540000 { + compatible = "renesas,hscif-r8a774c0", + "renesas,rcar-gen3-hscif", + "renesas,hscif"; + reg = <0 0xe6540000 0 0x60>; + interrupts = ; + clocks = <&cpg CPG_MOD 520>, + <&cpg CPG_CORE R8A774C0_CLK_S3D1C>, + <&scif_clk>; + clock-names = "fck", "brg_int", "scif_clk"; + dmas = <&dmac1 0x31>, <&dmac1 0x30>, + <&dmac2 0x31>, <&dmac2 0x30>; + dma-names = "tx", "rx", "tx", "rx"; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 520>; + status = "disabled"; + }; + + hscif1: serial@e6550000 { + compatible = "renesas,hscif-r8a774c0", + "renesas,rcar-gen3-hscif", + "renesas,hscif"; + reg = <0 0xe6550000 0 0x60>; + interrupts = ; + clocks = <&cpg CPG_MOD 519>, + <&cpg CPG_CORE R8A774C0_CLK_S3D1C>, + <&scif_clk>; + clock-names = "fck", "brg_int", "scif_clk"; + dmas = <&dmac1 0x33>, <&dmac1 0x32>, + <&dmac2 0x33>, <&dmac2 0x32>; + dma-names = "tx", "rx", "tx", "rx"; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 519>; + status = "disabled"; + }; + + hscif2: serial@e6560000 { + compatible = "renesas,hscif-r8a774c0", + "renesas,rcar-gen3-hscif", + "renesas,hscif"; + reg = <0 0xe6560000 0 0x60>; + interrupts = ; + clocks = <&cpg CPG_MOD 518>, + <&cpg CPG_CORE R8A774C0_CLK_S3D1C>, + <&scif_clk>; + clock-names = "fck", "brg_int", "scif_clk"; + dmas = <&dmac1 0x35>, <&dmac1 0x34>, + <&dmac2 0x35>, <&dmac2 0x34>; + dma-names = "tx", "rx", "tx", "rx"; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 518>; + status = "disabled"; + }; + + hscif3: serial@e66a0000 { + compatible = "renesas,hscif-r8a774c0", + "renesas,rcar-gen3-hscif", + "renesas,hscif"; + reg = <0 0xe66a0000 0 0x60>; + interrupts = ; + clocks = <&cpg CPG_MOD 517>, + <&cpg CPG_CORE R8A774C0_CLK_S3D1C>, + <&scif_clk>; + clock-names = "fck", "brg_int", "scif_clk"; + dmas = <&dmac0 0x37>, <&dmac0 0x36>; + dma-names = "tx", "rx"; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 517>; + status = "disabled"; + }; + + hscif4: serial@e66b0000 { + compatible = "renesas,hscif-r8a774c0", + "renesas,rcar-gen3-hscif", + "renesas,hscif"; + reg = <0 0xe66b0000 0 0x60>; + interrupts = ; + clocks = <&cpg CPG_MOD 516>, + <&cpg CPG_CORE R8A774C0_CLK_S3D1C>, + <&scif_clk>; + clock-names = "fck", "brg_int", "scif_clk"; + dmas = <&dmac0 0x39>, <&dmac0 0x38>; + dma-names = "tx", "rx"; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 516>; + status = "disabled"; + }; + + hsusb: usb@e6590000 { + compatible = "renesas,usbhs-r8a774c0", + "renesas,rcar-gen3-usbhs"; + reg = <0 0xe6590000 0 0x200>; + interrupts = ; + clocks = <&cpg CPG_MOD 704>, <&cpg CPG_MOD 703>; + dmas = <&usb_dmac0 0>, <&usb_dmac0 1>, + <&usb_dmac1 0>, <&usb_dmac1 1>; + dma-names = "ch0", "ch1", "ch2", "ch3"; + renesas,buswait = <11>; + phys = <&usb2_phy0>; + phy-names = "usb"; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 704>, <&cpg 703>; + status = "disabled"; + }; + + usb_dmac0: dma-controller@e65a0000 { + compatible = "renesas,r8a774c0-usb-dmac", + "renesas,usb-dmac"; + reg = <0 0xe65a0000 0 0x100>; + interrupts = ; + interrupt-names = "ch0", "ch1"; + clocks = <&cpg CPG_MOD 330>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 330>; + #dma-cells = <1>; + dma-channels = <2>; + }; + + usb_dmac1: dma-controller@e65b0000 { + compatible = "renesas,r8a774c0-usb-dmac", + "renesas,usb-dmac"; + reg = <0 0xe65b0000 0 0x100>; + interrupts = ; + interrupt-names = "ch0", "ch1"; + clocks = <&cpg CPG_MOD 331>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 331>; + #dma-cells = <1>; + dma-channels = <2>; + }; + + dmac0: dma-controller@e6700000 { + compatible = "renesas,dmac-r8a774c0", + "renesas,rcar-dmac"; + reg = <0 0xe6700000 0 0x10000>; + interrupts = ; + interrupt-names = "error", + "ch0", "ch1", "ch2", "ch3", + "ch4", "ch5", "ch6", "ch7", + "ch8", "ch9", "ch10", "ch11", + "ch12", "ch13", "ch14", "ch15"; + clocks = <&cpg CPG_MOD 219>; + clock-names = "fck"; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 219>; + #dma-cells = <1>; + dma-channels = <16>; + iommus = <&ipmmu_ds0 0>, <&ipmmu_ds0 1>, + <&ipmmu_ds0 2>, <&ipmmu_ds0 3>, + <&ipmmu_ds0 4>, <&ipmmu_ds0 5>, + <&ipmmu_ds0 6>, <&ipmmu_ds0 7>, + <&ipmmu_ds0 8>, <&ipmmu_ds0 9>, + <&ipmmu_ds0 10>, <&ipmmu_ds0 11>, + <&ipmmu_ds0 12>, <&ipmmu_ds0 13>, + <&ipmmu_ds0 14>, <&ipmmu_ds0 15>; + }; + + dmac1: dma-controller@e7300000 { + compatible = "renesas,dmac-r8a774c0", + "renesas,rcar-dmac"; + reg = <0 0xe7300000 0 0x10000>; + interrupts = ; + interrupt-names = "error", + "ch0", "ch1", "ch2", "ch3", + "ch4", "ch5", "ch6", "ch7", + "ch8", "ch9", "ch10", "ch11", + "ch12", "ch13", "ch14", "ch15"; + clocks = <&cpg CPG_MOD 218>; + clock-names = "fck"; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 218>; + #dma-cells = <1>; + dma-channels = <16>; + iommus = <&ipmmu_ds1 0>, <&ipmmu_ds1 1>, + <&ipmmu_ds1 2>, <&ipmmu_ds1 3>, + <&ipmmu_ds1 4>, <&ipmmu_ds1 5>, + <&ipmmu_ds1 6>, <&ipmmu_ds1 7>, + <&ipmmu_ds1 8>, <&ipmmu_ds1 9>, + <&ipmmu_ds1 10>, <&ipmmu_ds1 11>, + <&ipmmu_ds1 12>, <&ipmmu_ds1 13>, + <&ipmmu_ds1 14>, <&ipmmu_ds1 15>; + }; + + dmac2: dma-controller@e7310000 { + compatible = "renesas,dmac-r8a774c0", + "renesas,rcar-dmac"; + reg = <0 0xe7310000 0 0x10000>; + interrupts = ; + interrupt-names = "error", + "ch0", "ch1", "ch2", "ch3", + "ch4", "ch5", "ch6", "ch7", + "ch8", "ch9", "ch10", "ch11", + "ch12", "ch13", "ch14", "ch15"; + clocks = <&cpg CPG_MOD 217>; + clock-names = "fck"; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 217>; + #dma-cells = <1>; + dma-channels = <16>; + iommus = <&ipmmu_ds1 16>, <&ipmmu_ds1 17>, + <&ipmmu_ds1 18>, <&ipmmu_ds1 19>, + <&ipmmu_ds1 20>, <&ipmmu_ds1 21>, + <&ipmmu_ds1 22>, <&ipmmu_ds1 23>, + <&ipmmu_ds1 24>, <&ipmmu_ds1 25>, + <&ipmmu_ds1 26>, <&ipmmu_ds1 27>, + <&ipmmu_ds1 28>, <&ipmmu_ds1 29>, + <&ipmmu_ds1 30>, <&ipmmu_ds1 31>; + }; + + ipmmu_ds0: mmu@e6740000 { + compatible = "renesas,ipmmu-r8a774c0"; + reg = <0 0xe6740000 0 0x1000>; + renesas,ipmmu-main = <&ipmmu_mm 0>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + #iommu-cells = <1>; + }; + + ipmmu_ds1: mmu@e7740000 { + compatible = "renesas,ipmmu-r8a774c0"; + reg = <0 0xe7740000 0 0x1000>; + renesas,ipmmu-main = <&ipmmu_mm 1>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + #iommu-cells = <1>; + }; + + ipmmu_hc: mmu@e6570000 { + compatible = "renesas,ipmmu-r8a774c0"; + reg = <0 0xe6570000 0 0x1000>; + renesas,ipmmu-main = <&ipmmu_mm 2>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + #iommu-cells = <1>; + }; + + ipmmu_mm: mmu@e67b0000 { + compatible = "renesas,ipmmu-r8a774c0"; + reg = <0 0xe67b0000 0 0x1000>; + interrupts = , + ; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + #iommu-cells = <1>; + }; + + ipmmu_mp: mmu@ec670000 { + compatible = "renesas,ipmmu-r8a774c0"; + reg = <0 0xec670000 0 0x1000>; + renesas,ipmmu-main = <&ipmmu_mm 4>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + #iommu-cells = <1>; + }; + + ipmmu_pv0: mmu@fd800000 { + compatible = "renesas,ipmmu-r8a774c0"; + reg = <0 0xfd800000 0 0x1000>; + renesas,ipmmu-main = <&ipmmu_mm 6>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + #iommu-cells = <1>; + }; + + ipmmu_vc0: mmu@fe6b0000 { + compatible = "renesas,ipmmu-r8a774c0"; + reg = <0 0xfe6b0000 0 0x1000>; + renesas,ipmmu-main = <&ipmmu_mm 12>; + power-domains = <&sysc R8A774C0_PD_A3VC>; + #iommu-cells = <1>; + }; + + ipmmu_vi0: mmu@febd0000 { + compatible = "renesas,ipmmu-r8a774c0"; + reg = <0 0xfebd0000 0 0x1000>; + renesas,ipmmu-main = <&ipmmu_mm 14>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + #iommu-cells = <1>; + }; + + ipmmu_vp0: mmu@fe990000 { + compatible = "renesas,ipmmu-r8a774c0"; + reg = <0 0xfe990000 0 0x1000>; + renesas,ipmmu-main = <&ipmmu_mm 16>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + #iommu-cells = <1>; + }; + + avb: ethernet@e6800000 { + compatible = "renesas,etheravb-r8a774c0", + "renesas,etheravb-rcar-gen3"; + reg = <0 0xe6800000 0 0x800>; + interrupts = , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + interrupt-names = "ch0", "ch1", "ch2", "ch3", + "ch4", "ch5", "ch6", "ch7", + "ch8", "ch9", "ch10", "ch11", + "ch12", "ch13", "ch14", "ch15", + "ch16", "ch17", "ch18", "ch19", + "ch20", "ch21", "ch22", "ch23", + "ch24"; + clocks = <&cpg CPG_MOD 812>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 812>; + phy-mode = "rgmii"; + iommus = <&ipmmu_ds0 16>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + can0: can@e6c30000 { + compatible = "renesas,can-r8a774c0", + "renesas,rcar-gen3-can"; + reg = <0 0xe6c30000 0 0x1000>; + interrupts = ; + clocks = <&cpg CPG_MOD 916>, <&can_clk>; + clock-names = "clkp1", "can_clk"; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 916>; + status = "disabled"; + }; + + can1: can@e6c38000 { + compatible = "renesas,can-r8a774c0", + "renesas,rcar-gen3-can"; + reg = <0 0xe6c38000 0 0x1000>; + interrupts = ; + clocks = <&cpg CPG_MOD 915>, <&can_clk>; + clock-names = "clkp1", "can_clk"; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 915>; + status = "disabled"; + }; + + pwm0: pwm@e6e30000 { + compatible = "renesas,pwm-r8a774c0", "renesas,pwm-rcar"; + reg = <0 0xe6e30000 0 0x8>; + clocks = <&cpg CPG_MOD 523>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 523>; + #pwm-cells = <2>; + status = "disabled"; + }; + + pwm1: pwm@e6e31000 { + compatible = "renesas,pwm-r8a774c0", "renesas,pwm-rcar"; + reg = <0 0xe6e31000 0 0x8>; + clocks = <&cpg CPG_MOD 523>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 523>; + #pwm-cells = <2>; + status = "disabled"; + }; + + pwm2: pwm@e6e32000 { + compatible = "renesas,pwm-r8a774c0", "renesas,pwm-rcar"; + reg = <0 0xe6e32000 0 0x8>; + clocks = <&cpg CPG_MOD 523>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 523>; + #pwm-cells = <2>; + status = "disabled"; + }; + + pwm3: pwm@e6e33000 { + compatible = "renesas,pwm-r8a774c0", "renesas,pwm-rcar"; + reg = <0 0xe6e33000 0 0x8>; + clocks = <&cpg CPG_MOD 523>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 523>; + #pwm-cells = <2>; + status = "disabled"; + }; + + pwm4: pwm@e6e34000 { + compatible = "renesas,pwm-r8a774c0", "renesas,pwm-rcar"; + reg = <0 0xe6e34000 0 0x8>; + clocks = <&cpg CPG_MOD 523>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 523>; + #pwm-cells = <2>; + status = "disabled"; + }; + + pwm5: pwm@e6e35000 { + compatible = "renesas,pwm-r8a774c0", "renesas,pwm-rcar"; + reg = <0 0xe6e35000 0 0x8>; + clocks = <&cpg CPG_MOD 523>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 523>; + #pwm-cells = <2>; + status = "disabled"; + }; + + pwm6: pwm@e6e36000 { + compatible = "renesas,pwm-r8a774c0", "renesas,pwm-rcar"; + reg = <0 0xe6e36000 0 0x8>; + clocks = <&cpg CPG_MOD 523>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 523>; + #pwm-cells = <2>; + status = "disabled"; + }; + + scif0: serial@e6e60000 { + compatible = "renesas,scif-r8a774c0", + "renesas,rcar-gen3-scif", "renesas,scif"; + reg = <0 0xe6e60000 0 64>; + interrupts = ; + clocks = <&cpg CPG_MOD 207>, + <&cpg CPG_CORE R8A774C0_CLK_S3D1C>, + <&scif_clk>; + clock-names = "fck", "brg_int", "scif_clk"; + dmas = <&dmac1 0x51>, <&dmac1 0x50>, + <&dmac2 0x51>, <&dmac2 0x50>; + dma-names = "tx", "rx", "tx", "rx"; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 207>; + status = "disabled"; + }; + + scif1: serial@e6e68000 { + compatible = "renesas,scif-r8a774c0", + "renesas,rcar-gen3-scif", "renesas,scif"; + reg = <0 0xe6e68000 0 64>; + interrupts = ; + clocks = <&cpg CPG_MOD 206>, + <&cpg CPG_CORE R8A774C0_CLK_S3D1C>, + <&scif_clk>; + clock-names = "fck", "brg_int", "scif_clk"; + dmas = <&dmac1 0x53>, <&dmac1 0x52>, + <&dmac2 0x53>, <&dmac2 0x52>; + dma-names = "tx", "rx", "tx", "rx"; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 206>; + status = "disabled"; + }; + + scif2: serial@e6e88000 { + compatible = "renesas,scif-r8a774c0", + "renesas,rcar-gen3-scif", "renesas,scif"; + reg = <0 0xe6e88000 0 64>; + interrupts = ; + clocks = <&cpg CPG_MOD 310>, + <&cpg CPG_CORE R8A774C0_CLK_S3D1C>, + <&scif_clk>; + clock-names = "fck", "brg_int", "scif_clk"; + dmas = <&dmac1 0x13>, <&dmac1 0x12>, + <&dmac2 0x13>, <&dmac2 0x12>; + dma-names = "tx", "rx", "tx", "rx"; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 310>; + status = "disabled"; + }; + + scif3: serial@e6c50000 { + compatible = "renesas,scif-r8a774c0", + "renesas,rcar-gen3-scif", "renesas,scif"; + reg = <0 0xe6c50000 0 64>; + interrupts = ; + clocks = <&cpg CPG_MOD 204>, + <&cpg CPG_CORE R8A774C0_CLK_S3D1C>, + <&scif_clk>; + clock-names = "fck", "brg_int", "scif_clk"; + dmas = <&dmac0 0x57>, <&dmac0 0x56>; + dma-names = "tx", "rx"; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 204>; + status = "disabled"; + }; + + scif4: serial@e6c40000 { + compatible = "renesas,scif-r8a774c0", + "renesas,rcar-gen3-scif", "renesas,scif"; + reg = <0 0xe6c40000 0 64>; + interrupts = ; + clocks = <&cpg CPG_MOD 203>, + <&cpg CPG_CORE R8A774C0_CLK_S3D1C>, + <&scif_clk>; + clock-names = "fck", "brg_int", "scif_clk"; + dmas = <&dmac0 0x59>, <&dmac0 0x58>; + dma-names = "tx", "rx"; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 203>; + status = "disabled"; + }; + + scif5: serial@e6f30000 { + compatible = "renesas,scif-r8a774c0", + "renesas,rcar-gen3-scif", "renesas,scif"; + reg = <0 0xe6f30000 0 64>; + interrupts = ; + clocks = <&cpg CPG_MOD 202>, + <&cpg CPG_CORE R8A774C0_CLK_S3D1C>, + <&scif_clk>; + clock-names = "fck", "brg_int", "scif_clk"; + dmas = <&dmac1 0x5b>, <&dmac1 0x5a>, + <&dmac2 0x5b>, <&dmac2 0x5a>; + dma-names = "tx", "rx", "tx", "rx"; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 202>; + status = "disabled"; + }; + + msiof0: spi@e6e90000 { + compatible = "renesas,msiof-r8a774c0", + "renesas,rcar-gen3-msiof"; + reg = <0 0xe6e90000 0 0x0064>; + interrupts = ; + clocks = <&cpg CPG_MOD 211>; + dmas = <&dmac1 0x41>, <&dmac1 0x40>, + <&dmac2 0x41>, <&dmac2 0x40>; + dma-names = "tx", "rx", "tx", "rx"; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 211>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + msiof1: spi@e6ea0000 { + compatible = "renesas,msiof-r8a774c0", + "renesas,rcar-gen3-msiof"; + reg = <0 0xe6ea0000 0 0x0064>; + interrupts = ; + clocks = <&cpg CPG_MOD 210>; + dmas = <&dmac1 0x43>, <&dmac1 0x42>, + <&dmac2 0x43>, <&dmac2 0x42>; + dma-names = "tx", "rx", "tx", "rx"; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 210>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + msiof2: spi@e6c00000 { + compatible = "renesas,msiof-r8a774c0", + "renesas,rcar-gen3-msiof"; + reg = <0 0xe6c00000 0 0x0064>; + interrupts = ; + clocks = <&cpg CPG_MOD 209>; + dmas = <&dmac0 0x45>, <&dmac0 0x44>; + dma-names = "tx", "rx"; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 209>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + msiof3: spi@e6c10000 { + compatible = "renesas,msiof-r8a774c0", + "renesas,rcar-gen3-msiof"; + reg = <0 0xe6c10000 0 0x0064>; + interrupts = ; + clocks = <&cpg CPG_MOD 208>; + dmas = <&dmac0 0x47>, <&dmac0 0x46>; + dma-names = "tx", "rx"; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 208>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + }; + + vin4: video@e6ef4000 { + compatible = "renesas,vin-r8a774c0"; + reg = <0 0xe6ef4000 0 0x1000>; + interrupts = ; + clocks = <&cpg CPG_MOD 807>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 807>; + renesas,id = <4>; + status = "disabled"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@1 { + #address-cells = <1>; + #size-cells = <0>; + + reg = <1>; + + vin4csi40: endpoint@2 { + reg = <2>; + remote-endpoint= <&csi40vin4>; + }; + }; + }; + }; + + vin5: video@e6ef5000 { + compatible = "renesas,vin-r8a774c0"; + reg = <0 0xe6ef5000 0 0x1000>; + interrupts = ; + clocks = <&cpg CPG_MOD 806>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 806>; + renesas,id = <5>; + status = "disabled"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@1 { + #address-cells = <1>; + #size-cells = <0>; + + reg = <1>; + + vin5csi40: endpoint@2 { + reg = <2>; + remote-endpoint= <&csi40vin5>; + }; + }; + }; + }; + + rcar_sound: sound@ec500000 { + /* + * #sound-dai-cells is required + * + * Single DAI : #sound-dai-cells = <0>; <&rcar_sound>; + * Multi DAI : #sound-dai-cells = <1>; <&rcar_sound N>; + */ + /* + * #clock-cells is required for audio_clkout0/1/2/3 + * + * clkout : #clock-cells = <0>; <&rcar_sound>; + * clkout0/1/2/3: #clock-cells = <1>; <&rcar_sound N>; + */ + compatible = "renesas,rcar_sound-r8a774c0", + "renesas,rcar_sound-gen3"; + reg = <0 0xec500000 0 0x1000>, /* SCU */ + <0 0xec5a0000 0 0x100>, /* ADG */ + <0 0xec540000 0 0x1000>, /* SSIU */ + <0 0xec541000 0 0x280>, /* SSI */ + <0 0xec760000 0 0x200>; /* Audio DMAC peri peri*/ + reg-names = "scu", "adg", "ssiu", "ssi", "audmapp"; + + clocks = <&cpg CPG_MOD 1005>, + <&cpg CPG_MOD 1006>, <&cpg CPG_MOD 1007>, + <&cpg CPG_MOD 1008>, <&cpg CPG_MOD 1009>, + <&cpg CPG_MOD 1010>, <&cpg CPG_MOD 1011>, + <&cpg CPG_MOD 1012>, <&cpg CPG_MOD 1013>, + <&cpg CPG_MOD 1014>, <&cpg CPG_MOD 1015>, + <&cpg CPG_MOD 1022>, <&cpg CPG_MOD 1023>, + <&cpg CPG_MOD 1024>, <&cpg CPG_MOD 1025>, + <&cpg CPG_MOD 1026>, <&cpg CPG_MOD 1027>, + <&cpg CPG_MOD 1028>, <&cpg CPG_MOD 1029>, + <&cpg CPG_MOD 1030>, <&cpg CPG_MOD 1031>, + <&cpg CPG_MOD 1020>, <&cpg CPG_MOD 1021>, + <&cpg CPG_MOD 1020>, <&cpg CPG_MOD 1021>, + <&cpg CPG_MOD 1019>, <&cpg CPG_MOD 1018>, + <&audio_clk_a>, <&audio_clk_b>, + <&audio_clk_c>, + <&cpg CPG_CORE R8A774C0_CLK_ZA2>; + clock-names = "ssi-all", + "ssi.9", "ssi.8", "ssi.7", "ssi.6", + "ssi.5", "ssi.4", "ssi.3", "ssi.2", + "ssi.1", "ssi.0", + "src.9", "src.8", "src.7", "src.6", + "src.5", "src.4", "src.3", "src.2", + "src.1", "src.0", + "mix.1", "mix.0", + "ctu.1", "ctu.0", + "dvc.0", "dvc.1", + "clk_a", "clk_b", "clk_c", "clk_i"; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 1005>, + <&cpg 1006>, <&cpg 1007>, + <&cpg 1008>, <&cpg 1009>, + <&cpg 1010>, <&cpg 1011>, + <&cpg 1012>, <&cpg 1013>, + <&cpg 1014>, <&cpg 1015>; + reset-names = "ssi-all", + "ssi.9", "ssi.8", "ssi.7", "ssi.6", + "ssi.5", "ssi.4", "ssi.3", "ssi.2", + "ssi.1", "ssi.0"; + status = "disabled"; + + rcar_sound,dvc { + dvc0: dvc-0 { + dmas = <&audma0 0xbc>; + dma-names = "tx"; + }; + dvc1: dvc-1 { + dmas = <&audma0 0xbe>; + dma-names = "tx"; + }; + }; + + rcar_sound,mix { + mix0: mix-0 { }; + mix1: mix-1 { }; + }; + + rcar_sound,ctu { + ctu00: ctu-0 { }; + ctu01: ctu-1 { }; + ctu02: ctu-2 { }; + ctu03: ctu-3 { }; + ctu10: ctu-4 { }; + ctu11: ctu-5 { }; + ctu12: ctu-6 { }; + ctu13: ctu-7 { }; + }; + + rcar_sound,src { + src0: src-0 { + interrupts = ; + dmas = <&audma0 0x85>, <&audma0 0x9a>; + dma-names = "rx", "tx"; + }; + src1: src-1 { + interrupts = ; + dmas = <&audma0 0x87>, <&audma0 0x9c>; + dma-names = "rx", "tx"; + }; + src2: src-2 { + interrupts = ; + dmas = <&audma0 0x89>, <&audma0 0x9e>; + dma-names = "rx", "tx"; + }; + src3: src-3 { + interrupts = ; + dmas = <&audma0 0x8b>, <&audma0 0xa0>; + dma-names = "rx", "tx"; + }; + src4: src-4 { + interrupts = ; + dmas = <&audma0 0x8d>, <&audma0 0xb0>; + dma-names = "rx", "tx"; + }; + src5: src-5 { + interrupts = ; + dmas = <&audma0 0x8f>, <&audma0 0xb2>; + dma-names = "rx", "tx"; + }; + src6: src-6 { + interrupts = ; + dmas = <&audma0 0x91>, <&audma0 0xb4>; + dma-names = "rx", "tx"; + }; + src7: src-7 { + interrupts = ; + dmas = <&audma0 0x93>, <&audma0 0xb6>; + dma-names = "rx", "tx"; + }; + src8: src-8 { + interrupts = ; + dmas = <&audma0 0x95>, <&audma0 0xb8>; + dma-names = "rx", "tx"; + }; + src9: src-9 { + interrupts = ; + dmas = <&audma0 0x97>, <&audma0 0xba>; + dma-names = "rx", "tx"; + }; + }; + + rcar_sound,ssi { + ssi0: ssi-0 { + interrupts = ; + dmas = <&audma0 0x01>, <&audma0 0x02>, + <&audma0 0x15>, <&audma0 0x16>; + dma-names = "rx", "tx", "rxu", "txu"; + }; + ssi1: ssi-1 { + interrupts = ; + dmas = <&audma0 0x03>, <&audma0 0x04>, + <&audma0 0x49>, <&audma0 0x4a>; + dma-names = "rx", "tx", "rxu", "txu"; + }; + ssi2: ssi-2 { + interrupts = ; + dmas = <&audma0 0x05>, <&audma0 0x06>, + <&audma0 0x63>, <&audma0 0x64>; + dma-names = "rx", "tx", "rxu", "txu"; + }; + ssi3: ssi-3 { + interrupts = ; + dmas = <&audma0 0x07>, <&audma0 0x08>, + <&audma0 0x6f>, <&audma0 0x70>; + dma-names = "rx", "tx", "rxu", "txu"; + }; + ssi4: ssi-4 { + interrupts = ; + dmas = <&audma0 0x09>, <&audma0 0x0a>, + <&audma0 0x71>, <&audma0 0x72>; + dma-names = "rx", "tx", "rxu", "txu"; + }; + ssi5: ssi-5 { + interrupts = ; + dmas = <&audma0 0x0b>, <&audma0 0x0c>, + <&audma0 0x73>, <&audma0 0x74>; + dma-names = "rx", "tx", "rxu", "txu"; + }; + ssi6: ssi-6 { + interrupts = ; + dmas = <&audma0 0x0d>, <&audma0 0x0e>, + <&audma0 0x75>, <&audma0 0x76>; + dma-names = "rx", "tx", "rxu", "txu"; + }; + ssi7: ssi-7 { + interrupts = ; + dmas = <&audma0 0x0f>, <&audma0 0x10>, + <&audma0 0x79>, <&audma0 0x7a>; + dma-names = "rx", "tx", "rxu", "txu"; + }; + ssi8: ssi-8 { + interrupts = ; + dmas = <&audma0 0x11>, <&audma0 0x12>, + <&audma0 0x7b>, <&audma0 0x7c>; + dma-names = "rx", "tx", "rxu", "txu"; + }; + ssi9: ssi-9 { + interrupts = ; + dmas = <&audma0 0x13>, <&audma0 0x14>, + <&audma0 0x7d>, <&audma0 0x7e>; + dma-names = "rx", "tx", "rxu", "txu"; + }; + }; + }; + + audma0: dma-controller@ec700000 { + compatible = "renesas,dmac-r8a774c0", + "renesas,rcar-dmac"; + reg = <0 0xec700000 0 0x10000>; + interrupts = ; + interrupt-names = "error", + "ch0", "ch1", "ch2", "ch3", + "ch4", "ch5", "ch6", "ch7", + "ch8", "ch9", "ch10", "ch11", + "ch12", "ch13", "ch14", "ch15"; + clocks = <&cpg CPG_MOD 502>; + clock-names = "fck"; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 502>; + #dma-cells = <1>; + dma-channels = <16>; + iommus = <&ipmmu_mp 0>, <&ipmmu_mp 1>, + <&ipmmu_mp 2>, <&ipmmu_mp 3>, + <&ipmmu_mp 4>, <&ipmmu_mp 5>, + <&ipmmu_mp 6>, <&ipmmu_mp 7>, + <&ipmmu_mp 8>, <&ipmmu_mp 9>, + <&ipmmu_mp 10>, <&ipmmu_mp 11>, + <&ipmmu_mp 12>, <&ipmmu_mp 13>, + <&ipmmu_mp 14>, <&ipmmu_mp 15>; + }; + + xhci0: usb@ee000000 { + compatible = "renesas,xhci-r8a774c0", + "renesas,rcar-gen3-xhci"; + reg = <0 0xee000000 0 0xc00>; + interrupts = ; + clocks = <&cpg CPG_MOD 328>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 328>; + status = "disabled"; + }; + + usb3_peri0: usb@ee020000 { + compatible = "renesas,r8a774c0-usb3-peri", + "renesas,rcar-gen3-usb3-peri"; + reg = <0 0xee020000 0 0x400>; + interrupts = ; + clocks = <&cpg CPG_MOD 328>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 328>; + status = "disabled"; + }; + + ohci0: usb@ee080000 { + compatible = "generic-ohci"; + reg = <0 0xee080000 0 0x100>; + interrupts = ; + clocks = <&cpg CPG_MOD 703>, <&cpg CPG_MOD 704>; + phys = <&usb2_phy0>; + phy-names = "usb"; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 703>, <&cpg 704>; + status = "disabled"; + }; + + ehci0: usb@ee080100 { + compatible = "generic-ehci"; + reg = <0 0xee080100 0 0x100>; + interrupts = ; + clocks = <&cpg CPG_MOD 703>, <&cpg CPG_MOD 704>; + phys = <&usb2_phy0>; + phy-names = "usb"; + companion = <&ohci0>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 703>, <&cpg 704>; + status = "disabled"; + }; + + usb2_phy0: usb-phy@ee080200 { + compatible = "renesas,usb2-phy-r8a774c0", + "renesas,rcar-gen3-usb2-phy"; + reg = <0 0xee080200 0 0x700>; + interrupts = ; + clocks = <&cpg CPG_MOD 703>, <&cpg CPG_MOD 704>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 703>, <&cpg 704>; + #phy-cells = <0>; + status = "disabled"; + }; + + sdhi0: sd@ee100000 { + compatible = "renesas,sdhi-r8a774c0", + "renesas,rcar-gen3-sdhi"; + reg = <0 0xee100000 0 0x2000>; + interrupts = ; + clocks = <&cpg CPG_MOD 314>; + max-frequency = <200000000>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 314>; + status = "disabled"; + }; + + sdhi1: sd@ee120000 { + compatible = "renesas,sdhi-r8a774c0", + "renesas,rcar-gen3-sdhi"; + reg = <0 0xee120000 0 0x2000>; + interrupts = ; + clocks = <&cpg CPG_MOD 313>; + max-frequency = <200000000>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 313>; + status = "disabled"; + }; + + sdhi3: sd@ee160000 { + compatible = "renesas,sdhi-r8a774c0", + "renesas,rcar-gen3-sdhi"; + reg = <0 0xee160000 0 0x2000>; + interrupts = ; + clocks = <&cpg CPG_MOD 311>; + max-frequency = <200000000>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 311>; + status = "disabled"; + }; + + gic: interrupt-controller@f1010000 { + compatible = "arm,gic-400"; + #interrupt-cells = <3>; + #address-cells = <0>; + interrupt-controller; + reg = <0x0 0xf1010000 0 0x1000>, + <0x0 0xf1020000 0 0x20000>, + <0x0 0xf1040000 0 0x20000>, + <0x0 0xf1060000 0 0x20000>; + interrupts = ; + clocks = <&cpg CPG_MOD 408>; + clock-names = "clk"; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 408>; + }; + + pciec0: pcie@fe000000 { + compatible = "renesas,pcie-r8a774c0", + "renesas,pcie-rcar-gen3"; + reg = <0 0xfe000000 0 0x80000>; + #address-cells = <3>; + #size-cells = <2>; + bus-range = <0x00 0xff>; + device_type = "pci"; + ranges = <0x01000000 0 0x00000000 0 0xfe100000 0 0x00100000 + 0x02000000 0 0xfe200000 0 0xfe200000 0 0x00200000 + 0x02000000 0 0x30000000 0 0x30000000 0 0x08000000 + 0x42000000 0 0x38000000 0 0x38000000 0 0x08000000>; + /* Map all possible DDR as inbound ranges */ + dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x40000000>; + interrupts = , + , + ; + #interrupt-cells = <1>; + interrupt-map-mask = <0 0 0 0>; + interrupt-map = <0 0 0 0 &gic GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cpg CPG_MOD 319>, <&pcie_bus_clk>; + clock-names = "pcie", "pcie_bus"; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 319>; + status = "disabled"; + }; + + vspb0: vsp@fe960000 { + compatible = "renesas,vsp2"; + reg = <0 0xfe960000 0 0x8000>; + interrupts = ; + clocks = <&cpg CPG_MOD 626>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 626>; + renesas,fcp = <&fcpvb0>; + }; + + fcpvb0: fcp@fe96f000 { + compatible = "renesas,fcpv"; + reg = <0 0xfe96f000 0 0x200>; + clocks = <&cpg CPG_MOD 607>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 607>; + iommus = <&ipmmu_vp0 5>; + }; + + vspi0: vsp@fe9a0000 { + compatible = "renesas,vsp2"; + reg = <0 0xfe9a0000 0 0x8000>; + interrupts = ; + clocks = <&cpg CPG_MOD 631>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 631>; + renesas,fcp = <&fcpvi0>; + }; + + fcpvi0: fcp@fe9af000 { + compatible = "renesas,fcpv"; + reg = <0 0xfe9af000 0 0x200>; + clocks = <&cpg CPG_MOD 611>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 611>; + iommus = <&ipmmu_vp0 8>; + }; + + vspd0: vsp@fea20000 { + compatible = "renesas,vsp2"; + reg = <0 0xfea20000 0 0x7000>; + interrupts = ; + clocks = <&cpg CPG_MOD 623>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 623>; + renesas,fcp = <&fcpvd0>; + }; + + fcpvd0: fcp@fea27000 { + compatible = "renesas,fcpv"; + reg = <0 0xfea27000 0 0x200>; + clocks = <&cpg CPG_MOD 603>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 603>; + iommus = <&ipmmu_vi0 8>; + }; + + vspd1: vsp@fea28000 { + compatible = "renesas,vsp2"; + reg = <0 0xfea28000 0 0x7000>; + interrupts = ; + clocks = <&cpg CPG_MOD 622>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 622>; + renesas,fcp = <&fcpvd1>; + }; + + fcpvd1: fcp@fea2f000 { + compatible = "renesas,fcpv"; + reg = <0 0xfea2f000 0 0x200>; + clocks = <&cpg CPG_MOD 602>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 602>; + iommus = <&ipmmu_vi0 9>; + }; + + csi40: csi2@feaa0000 { + compatible = "renesas,r8a774c0-csi2", + "renesas,rcar-gen3-csi2"; + reg = <0 0xfeaa0000 0 0x10000>; + interrupts = ; + clocks = <&cpg CPG_MOD 716>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 716>; + status = "disabled"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@1 { + #address-cells = <1>; + #size-cells = <0>; + + reg = <1>; + + csi40vin4: endpoint@0 { + reg = <0>; + remote-endpoint = <&vin4csi40>; + }; + csi40vin5: endpoint@1 { + reg = <1>; + remote-endpoint = <&vin5csi40>; + }; + }; + }; + }; + + du: display@feb00000 { + compatible = "renesas,du-r8a774c0"; + reg = <0 0xfeb00000 0 0x80000>; + interrupts = , + ; + clocks = <&cpg CPG_MOD 724>, + <&cpg CPG_MOD 723>; + clock-names = "du.0", "du.1"; + vsps = <&vspd0 0 &vspd1 0>; + status = "disabled"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + du_out_rgb: endpoint { + }; + }; + + port@1 { + reg = <1>; + du_out_lvds0: endpoint { + remote-endpoint = <&lvds0_in>; + }; + }; + + port@2 { + reg = <2>; + du_out_lvds1: endpoint { + remote-endpoint = <&lvds1_in>; + }; + }; + }; + }; + + lvds0: lvds-encoder@feb90000 { + compatible = "renesas,r8a774c0-lvds"; + reg = <0 0xfeb90000 0 0x20>; + clocks = <&cpg CPG_MOD 727>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 727>; + status = "disabled"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + lvds0_in: endpoint { + remote-endpoint = <&du_out_lvds0>; + }; + }; + + port@1 { + reg = <1>; + lvds0_out: endpoint { + }; + }; + }; + }; + + lvds1: lvds-encoder@feb90100 { + compatible = "renesas,r8a774c0-lvds"; + reg = <0 0xfeb90100 0 0x20>; + clocks = <&cpg CPG_MOD 727>; + power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>; + resets = <&cpg 726>; + status = "disabled"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + lvds1_in: endpoint { + remote-endpoint = <&du_out_lvds1>; + }; + }; + + port@1 { + reg = <1>; + lvds1_out: endpoint { + }; + }; + }; + }; + + prr: chipid@fff00044 { + compatible = "renesas,prr"; + reg = <0 0xfff00044 0 4>; + }; + }; + + thermal-zones { + cpu-thermal { + polling-delay-passive = <250>; + polling-delay = <1000>; + thermal-sensors = <&thermal>; + + trips { + cpu-crit { + temperature = <120000>; + hysteresis = <2000>; + type = "critical"; + }; + }; + + cooling-maps { + }; + }; + }; + + timer { + compatible = "arm,armv8-timer"; + interrupts-extended = <&gic GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>, + <&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>, + <&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>, + <&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>; + }; + + /* External USB clocks - can be overridden by the board */ + usb3s0_clk: usb3s0 { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <0>; + }; + + usb_extal_clk: usb_extal { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <0>; + }; +}; diff --git a/arch/arm64/boot/dts/renesas/r8a7795.dtsi b/arch/arm64/boot/dts/renesas/r8a7795.dtsi index af9605d5db27..abeac3059383 100644 --- a/arch/arm64/boot/dts/renesas/r8a7795.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a7795.dtsi @@ -149,7 +149,7 @@ }; a57_0: cpu@0 { - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x0>; device_type = "cpu"; power-domains = <&sysc R8A7795_PD_CA57_CPU0>; @@ -162,7 +162,7 @@ }; a57_1: cpu@1 { - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x1>; device_type = "cpu"; power-domains = <&sysc R8A7795_PD_CA57_CPU1>; @@ -175,7 +175,7 @@ }; a57_2: cpu@2 { - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x2>; device_type = "cpu"; power-domains = <&sysc R8A7795_PD_CA57_CPU2>; @@ -188,7 +188,7 @@ }; a57_3: cpu@3 { - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x3>; device_type = "cpu"; power-domains = <&sysc R8A7795_PD_CA57_CPU3>; @@ -201,7 +201,7 @@ }; a53_0: cpu@100 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x100>; device_type = "cpu"; power-domains = <&sysc R8A7795_PD_CA53_CPU0>; @@ -213,7 +213,7 @@ }; a53_1: cpu@101 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x101>; device_type = "cpu"; power-domains = <&sysc R8A7795_PD_CA53_CPU1>; @@ -225,7 +225,7 @@ }; a53_2: cpu@102 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x102>; device_type = "cpu"; power-domains = <&sysc R8A7795_PD_CA53_CPU2>; @@ -237,7 +237,7 @@ }; a53_3: cpu@103 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x103>; device_type = "cpu"; power-domains = <&sysc R8A7795_PD_CA53_CPU3>; @@ -2174,53 +2174,53 @@ rcar_sound,ssi { ssi0: ssi-0 { interrupts = ; - dmas = <&audma0 0x01>, <&audma1 0x02>, <&audma0 0x15>, <&audma1 0x16>; - dma-names = "rx", "tx", "rxu", "txu"; + dmas = <&audma0 0x01>, <&audma1 0x02>; + dma-names = "rx", "tx"; }; ssi1: ssi-1 { interrupts = ; - dmas = <&audma0 0x03>, <&audma1 0x04>, <&audma0 0x49>, <&audma1 0x4a>; - dma-names = "rx", "tx", "rxu", "txu"; + dmas = <&audma0 0x03>, <&audma1 0x04>; + dma-names = "rx", "tx"; }; ssi2: ssi-2 { interrupts = ; - dmas = <&audma0 0x05>, <&audma1 0x06>, <&audma0 0x63>, <&audma1 0x64>; - dma-names = "rx", "tx", "rxu", "txu"; + dmas = <&audma0 0x05>, <&audma1 0x06>; + dma-names = "rx", "tx"; }; ssi3: ssi-3 { interrupts = ; - dmas = <&audma0 0x07>, <&audma1 0x08>, <&audma0 0x6f>, <&audma1 0x70>; - dma-names = "rx", "tx", "rxu", "txu"; + dmas = <&audma0 0x07>, <&audma1 0x08>; + dma-names = "rx", "tx"; }; ssi4: ssi-4 { interrupts = ; - dmas = <&audma0 0x09>, <&audma1 0x0a>, <&audma0 0x71>, <&audma1 0x72>; - dma-names = "rx", "tx", "rxu", "txu"; + dmas = <&audma0 0x09>, <&audma1 0x0a>; + dma-names = "rx", "tx"; }; ssi5: ssi-5 { interrupts = ; - dmas = <&audma0 0x0b>, <&audma1 0x0c>, <&audma0 0x73>, <&audma1 0x74>; - dma-names = "rx", "tx", "rxu", "txu"; + dmas = <&audma0 0x0b>, <&audma1 0x0c>; + dma-names = "rx", "tx"; }; ssi6: ssi-6 { interrupts = ; - dmas = <&audma0 0x0d>, <&audma1 0x0e>, <&audma0 0x75>, <&audma1 0x76>; - dma-names = "rx", "tx", "rxu", "txu"; + dmas = <&audma0 0x0d>, <&audma1 0x0e>; + dma-names = "rx", "tx"; }; ssi7: ssi-7 { interrupts = ; - dmas = <&audma0 0x0f>, <&audma1 0x10>, <&audma0 0x79>, <&audma1 0x7a>; - dma-names = "rx", "tx", "rxu", "txu"; + dmas = <&audma0 0x0f>, <&audma1 0x10>; + dma-names = "rx", "tx"; }; ssi8: ssi-8 { interrupts = ; - dmas = <&audma0 0x11>, <&audma1 0x12>, <&audma0 0x7b>, <&audma1 0x7c>; - dma-names = "rx", "tx", "rxu", "txu"; + dmas = <&audma0 0x11>, <&audma1 0x12>; + dma-names = "rx", "tx"; }; ssi9: ssi-9 { interrupts = ; - dmas = <&audma0 0x13>, <&audma1 0x14>, <&audma0 0x7d>, <&audma1 0x7e>; - dma-names = "rx", "tx", "rxu", "txu"; + dmas = <&audma0 0x13>, <&audma1 0x14>; + dma-names = "rx", "tx"; }; }; }; diff --git a/arch/arm64/boot/dts/renesas/r8a7796-salvator-xs.dts b/arch/arm64/boot/dts/renesas/r8a7796-salvator-xs.dts index 8860be65342e..31f12059355e 100644 --- a/arch/arm64/boot/dts/renesas/r8a7796-salvator-xs.dts +++ b/arch/arm64/boot/dts/renesas/r8a7796-salvator-xs.dts @@ -29,11 +29,10 @@ clocks = <&cpg CPG_MOD 724>, <&cpg CPG_MOD 723>, <&cpg CPG_MOD 722>, - <&cpg CPG_MOD 727>, <&versaclock6 1>, <&x21_clk>, <&versaclock6 2>; - clock-names = "du.0", "du.1", "du.2", "lvds.0", + clock-names = "du.0", "du.1", "du.2", "dclkin.0", "dclkin.1", "dclkin.2"; }; diff --git a/arch/arm64/boot/dts/renesas/r8a7796.dtsi b/arch/arm64/boot/dts/renesas/r8a7796.dtsi index afedbf5728ec..cdf784899cf8 100644 --- a/arch/arm64/boot/dts/renesas/r8a7796.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a7796.dtsi @@ -154,7 +154,7 @@ }; a57_0: cpu@0 { - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x0>; device_type = "cpu"; power-domains = <&sysc R8A7796_PD_CA57_CPU0>; @@ -167,7 +167,7 @@ }; a57_1: cpu@1 { - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x1>; device_type = "cpu"; power-domains = <&sysc R8A7796_PD_CA57_CPU1>; @@ -180,7 +180,7 @@ }; a53_0: cpu@100 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x100>; device_type = "cpu"; power-domains = <&sysc R8A7796_PD_CA53_CPU0>; @@ -192,7 +192,7 @@ }; a53_1: cpu@101 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x101>; device_type = "cpu"; power-domains = <&sysc R8A7796_PD_CA53_CPU1>; @@ -204,7 +204,7 @@ }; a53_2: cpu@102 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x102>; device_type = "cpu"; power-domains = <&sysc R8A7796_PD_CA53_CPU2>; @@ -216,7 +216,7 @@ }; a53_3: cpu@103 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x103>; device_type = "cpu"; power-domains = <&sysc R8A7796_PD_CA53_CPU3>; @@ -1262,6 +1262,9 @@ <&cpg CPG_CORE R8A7796_CLK_S3D1>, <&scif_clk>; clock-names = "fck", "brg_int", "scif_clk"; + dmas = <&dmac1 0x13>, <&dmac1 0x12>, + <&dmac2 0x13>, <&dmac2 0x12>; + dma-names = "tx", "rx", "tx", "rx"; power-domains = <&sysc R8A7796_PD_ALWAYS_ON>; resets = <&cpg 310>; status = "disabled"; @@ -2110,53 +2113,53 @@ rcar_sound,ssi { ssi0: ssi-0 { interrupts = ; - dmas = <&audma0 0x01>, <&audma1 0x02>, <&audma0 0x15>, <&audma1 0x16>; - dma-names = "rx", "tx", "rxu", "txu"; + dmas = <&audma0 0x01>, <&audma1 0x02>; + dma-names = "rx", "tx"; }; ssi1: ssi-1 { interrupts = ; - dmas = <&audma0 0x03>, <&audma1 0x04>, <&audma0 0x49>, <&audma1 0x4a>; - dma-names = "rx", "tx", "rxu", "txu"; + dmas = <&audma0 0x03>, <&audma1 0x04>; + dma-names = "rx", "tx"; }; ssi2: ssi-2 { interrupts = ; - dmas = <&audma0 0x05>, <&audma1 0x06>, <&audma0 0x63>, <&audma1 0x64>; - dma-names = "rx", "tx", "rxu", "txu"; + dmas = <&audma0 0x05>, <&audma1 0x06>; + dma-names = "rx", "tx"; }; ssi3: ssi-3 { interrupts = ; - dmas = <&audma0 0x07>, <&audma1 0x08>, <&audma0 0x6f>, <&audma1 0x70>; - dma-names = "rx", "tx", "rxu", "txu"; + dmas = <&audma0 0x07>, <&audma1 0x08>; + dma-names = "rx", "tx"; }; ssi4: ssi-4 { interrupts = ; - dmas = <&audma0 0x09>, <&audma1 0x0a>, <&audma0 0x71>, <&audma1 0x72>; - dma-names = "rx", "tx", "rxu", "txu"; + dmas = <&audma0 0x09>, <&audma1 0x0a>; + dma-names = "rx", "tx"; }; ssi5: ssi-5 { interrupts = ; - dmas = <&audma0 0x0b>, <&audma1 0x0c>, <&audma0 0x73>, <&audma1 0x74>; - dma-names = "rx", "tx", "rxu", "txu"; + dmas = <&audma0 0x0b>, <&audma1 0x0c>; + dma-names = "rx", "tx"; }; ssi6: ssi-6 { interrupts = ; - dmas = <&audma0 0x0d>, <&audma1 0x0e>, <&audma0 0x75>, <&audma1 0x76>; - dma-names = "rx", "tx", "rxu", "txu"; + dmas = <&audma0 0x0d>, <&audma1 0x0e>; + dma-names = "rx", "tx"; }; ssi7: ssi-7 { interrupts = ; - dmas = <&audma0 0x0f>, <&audma1 0x10>, <&audma0 0x79>, <&audma1 0x7a>; - dma-names = "rx", "tx", "rxu", "txu"; + dmas = <&audma0 0x0f>, <&audma1 0x10>; + dma-names = "rx", "tx"; }; ssi8: ssi-8 { interrupts = ; - dmas = <&audma0 0x11>, <&audma1 0x12>, <&audma0 0x7b>, <&audma1 0x7c>; - dma-names = "rx", "tx", "rxu", "txu"; + dmas = <&audma0 0x11>, <&audma1 0x12>; + dma-names = "rx", "tx"; }; ssi9: ssi-9 { interrupts = ; - dmas = <&audma0 0x13>, <&audma1 0x14>, <&audma0 0x7d>, <&audma1 0x7e>; - dma-names = "rx", "tx", "rxu", "txu"; + dmas = <&audma0 0x13>, <&audma1 0x14>; + dma-names = "rx", "tx"; }; }; diff --git a/arch/arm64/boot/dts/renesas/r8a77965.dtsi b/arch/arm64/boot/dts/renesas/r8a77965.dtsi index 6dc9b1fef830..9763d108e183 100644 --- a/arch/arm64/boot/dts/renesas/r8a77965.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a77965.dtsi @@ -105,7 +105,7 @@ #size-cells = <0>; a57_0: cpu@0 { - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x0>; device_type = "cpu"; power-domains = <&sysc R8A77965_PD_CA57_CPU0>; @@ -116,7 +116,7 @@ }; a57_1: cpu@1 { - compatible = "arm,cortex-a57", "arm,armv8"; + compatible = "arm,cortex-a57"; reg = <0x1>; device_type = "cpu"; power-domains = <&sysc R8A77965_PD_CA57_CPU1>; @@ -1068,6 +1068,9 @@ <&cpg CPG_CORE R8A77965_CLK_S3D1>, <&scif_clk>; clock-names = "fck", "brg_int", "scif_clk"; + dmas = <&dmac1 0x13>, <&dmac1 0x12>, + <&dmac2 0x13>, <&dmac2 0x12>; + dma-names = "tx", "rx", "tx", "rx"; power-domains = <&sysc R8A77965_PD_ALWAYS_ON>; resets = <&cpg 310>; status = "disabled"; diff --git a/arch/arm64/boot/dts/renesas/r8a77970-v3msk.dts b/arch/arm64/boot/dts/renesas/r8a77970-v3msk.dts index 0dbcb4cccc18..15cc9fed2e16 100644 --- a/arch/arm64/boot/dts/renesas/r8a77970-v3msk.dts +++ b/arch/arm64/boot/dts/renesas/r8a77970-v3msk.dts @@ -108,6 +108,8 @@ phy0: ethernet-phy@0 { rxc-skew-ps = <1500>; reg = <0>; + interrupt-parent = <&gpio1>; + interrupts = <17 IRQ_TYPE_LEVEL_LOW>; }; }; diff --git a/arch/arm64/boot/dts/renesas/r8a77970.dtsi b/arch/arm64/boot/dts/renesas/r8a77970.dtsi index 563428d1cdc2..5b6164d4b8e3 100644 --- a/arch/arm64/boot/dts/renesas/r8a77970.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a77970.dtsi @@ -37,7 +37,7 @@ a53_0: cpu@0 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0>; clocks = <&cpg CPG_CORE R8A77970_CLK_Z2>; power-domains = <&sysc R8A77970_PD_CA53_CPU0>; @@ -47,7 +47,7 @@ a53_1: cpu@1 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <1>; clocks = <&cpg CPG_CORE R8A77970_CLK_Z2>; power-domains = <&sysc R8A77970_PD_CA53_CPU1>; diff --git a/arch/arm64/boot/dts/renesas/r8a77980.dtsi b/arch/arm64/boot/dts/renesas/r8a77980.dtsi index 5bd9b2547c36..4081622d548a 100644 --- a/arch/arm64/boot/dts/renesas/r8a77980.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a77980.dtsi @@ -38,7 +38,7 @@ a53_0: cpu@0 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0>; clocks = <&cpg CPG_CORE R8A77980_CLK_Z2>; power-domains = <&sysc R8A77980_PD_CA53_CPU0>; @@ -48,7 +48,7 @@ a53_1: cpu@1 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <1>; clocks = <&cpg CPG_CORE R8A77980_CLK_Z2>; power-domains = <&sysc R8A77980_PD_CA53_CPU1>; @@ -58,7 +58,7 @@ a53_2: cpu@2 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <2>; clocks = <&cpg CPG_CORE R8A77980_CLK_Z2>; power-domains = <&sysc R8A77980_PD_CA53_CPU2>; @@ -68,7 +68,7 @@ a53_3: cpu@3 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <3>; clocks = <&cpg CPG_CORE R8A77980_CLK_Z2>; power-domains = <&sysc R8A77980_PD_CA53_CPU3>; diff --git a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts index 62bdddcbbae7..144c0820cf60 100644 --- a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts +++ b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts @@ -39,6 +39,16 @@ clock-frequency = <11289600>; }; + backlight: backlight { + compatible = "pwm-backlight"; + pwms = <&pwm3 0 50000>; + + brightness-levels = <512 511 505 494 473 440 392 327 241 133 0>; + default-brightness-level = <10>; + + power-supply = <®_12p0v>; + }; + cvbs-in { compatible = "composite-video-connector"; label = "CVBS IN"; @@ -159,16 +169,13 @@ }; rsnd_ak4613: sound { - compatible = "simple-scu-audio-card"; + compatible = "simple-audio-card"; simple-audio-card,name = "rsnd-ak4613"; simple-audio-card,format = "left_j"; simple-audio-card,bitclock-master = <&sndcpu>; simple-audio-card,frame-master = <&sndcpu>; - simple-audio-card,prefix = "ak4613"; - simple-audio-card,routing = "ak4613 Playback", "DAI0 Playback", - "DAI0 Capture", "ak4613 Capture"; sndcpu: simple-audio-card,cpu { sound-dai = <&rcar_sound>; }; @@ -184,6 +191,15 @@ clock-frequency = <24576000>; }; + reg_12p0v: regulator2 { + compatible = "regulator-fixed"; + regulator-name = "D12.0V"; + regulator-min-microvolt = <12000000>; + regulator-max-microvolt = <12000000>; + regulator-boot-on; + regulator-always-on; + }; + x13_clk: x13 { compatible = "fixed-clock"; #clock-cells = <0>; @@ -248,7 +264,6 @@ pinctrl-names = "default"; renesas,no-ether-link; phy-handle = <&phy0>; - phy-mode = "rgmii-txid"; status = "okay"; phy0: ethernet-phy@0 { @@ -680,6 +695,7 @@ vmmc-supply = <®_3p3v>; vqmmc-supply = <®_1p8v>; mmc-hs200-1_8v; + mmc-hs400-1_8v; bus-width = <8>; non-removable; status = "okay"; diff --git a/arch/arm64/boot/dts/renesas/r8a77990.dtsi b/arch/arm64/boot/dts/renesas/r8a77990.dtsi index b2f606e286ce..a69faa60ea4d 100644 --- a/arch/arm64/boot/dts/renesas/r8a77990.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a77990.dtsi @@ -55,26 +55,51 @@ clock-frequency = <0>; }; + cluster1_opp: opp_table10 { + compatible = "operating-points-v2"; + opp-shared; + opp-800000000 { + opp-hz = /bits/ 64 <800000000>; + opp-microvolt = <820000>; + clock-latency-ns = <300000>; + }; + opp-1000000000 { + opp-hz = /bits/ 64 <1000000000>; + opp-microvolt = <820000>; + clock-latency-ns = <300000>; + }; + opp-1200000000 { + opp-hz = /bits/ 64 <1200000000>; + opp-microvolt = <820000>; + clock-latency-ns = <300000>; + opp-suspend; + }; + }; + cpus { #address-cells = <1>; #size-cells = <0>; a53_0: cpu@0 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0>; device_type = "cpu"; power-domains = <&sysc R8A77990_PD_CA53_CPU0>; next-level-cache = <&L2_CA53>; enable-method = "psci"; + clocks =<&cpg CPG_CORE R8A77990_CLK_Z2>; + operating-points-v2 = <&cluster1_opp>; }; a53_1: cpu@1 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <1>; device_type = "cpu"; power-domains = <&sysc R8A77990_PD_CA53_CPU1>; next-level-cache = <&L2_CA53>; enable-method = "psci"; + clocks =<&cpg CPG_CORE R8A77990_CLK_Z2>; + operating-points-v2 = <&cluster1_opp>; }; L2_CA53: cache-controller-0 { @@ -240,6 +265,74 @@ resets = <&cpg 906>; }; + pfc: pin-controller@e6060000 { + compatible = "renesas,pfc-r8a77990"; + reg = <0 0xe6060000 0 0x508>; + }; + + i2c_dvfs: i2c@e60b0000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "renesas,iic-r8a77990"; + reg = <0 0xe60b0000 0 0x15>; + interrupts = ; + clocks = <&cpg CPG_MOD 926>; + power-domains = <&sysc R8A77990_PD_ALWAYS_ON>; + resets = <&cpg 926>; + dmas = <&dmac0 0x11>, <&dmac0 0x10>; + dma-names = "tx", "rx"; + status = "disabled"; + }; + + cpg: clock-controller@e6150000 { + compatible = "renesas,r8a77990-cpg-mssr"; + reg = <0 0xe6150000 0 0x1000>; + clocks = <&extal_clk>; + clock-names = "extal"; + #clock-cells = <2>; + #power-domain-cells = <0>; + #reset-cells = <1>; + }; + + rst: reset-controller@e6160000 { + compatible = "renesas,r8a77990-rst"; + reg = <0 0xe6160000 0 0x0200>; + }; + + sysc: system-controller@e6180000 { + compatible = "renesas,r8a77990-sysc"; + reg = <0 0xe6180000 0 0x0400>; + #power-domain-cells = <1>; + }; + + thermal: thermal@e6190000 { + compatible = "renesas,thermal-r8a77990"; + reg = <0 0xe6190000 0 0x10>, <0 0xe6190100 0 0x38>; + interrupts = , + , + ; + clocks = <&cpg CPG_MOD 522>; + power-domains = <&sysc R8A77990_PD_ALWAYS_ON>; + resets = <&cpg 522>; + #thermal-sensor-cells = <0>; + }; + + intc_ex: interrupt-controller@e61c0000 { + compatible = "renesas,intc-ex-r8a77990", "renesas,irqc"; + #interrupt-cells = <2>; + interrupt-controller; + reg = <0 0xe61c0000 0 0x200>; + interrupts = ; + clocks = <&cpg CPG_MOD 407>; + power-domains = <&sysc R8A77990_PD_ALWAYS_ON>; + resets = <&cpg 407>; + }; + i2c0: i2c@e6500000 { #address-cells = <1>; #size-cells = <0>; @@ -369,74 +462,6 @@ status = "disabled"; }; - pfc: pin-controller@e6060000 { - compatible = "renesas,pfc-r8a77990"; - reg = <0 0xe6060000 0 0x508>; - }; - - i2c_dvfs: i2c@e60b0000 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "renesas,iic-r8a77990"; - reg = <0 0xe60b0000 0 0x15>; - interrupts = ; - clocks = <&cpg CPG_MOD 926>; - power-domains = <&sysc R8A77990_PD_ALWAYS_ON>; - resets = <&cpg 926>; - dmas = <&dmac0 0x11>, <&dmac0 0x10>; - dma-names = "tx", "rx"; - status = "disabled"; - }; - - cpg: clock-controller@e6150000 { - compatible = "renesas,r8a77990-cpg-mssr"; - reg = <0 0xe6150000 0 0x1000>; - clocks = <&extal_clk>; - clock-names = "extal"; - #clock-cells = <2>; - #power-domain-cells = <0>; - #reset-cells = <1>; - }; - - rst: reset-controller@e6160000 { - compatible = "renesas,r8a77990-rst"; - reg = <0 0xe6160000 0 0x0200>; - }; - - sysc: system-controller@e6180000 { - compatible = "renesas,r8a77990-sysc"; - reg = <0 0xe6180000 0 0x0400>; - #power-domain-cells = <1>; - }; - - thermal: thermal@e6190000 { - compatible = "renesas,thermal-r8a77990"; - reg = <0 0xe6190000 0 0x10>, <0 0xe6190100 0 0x38>; - interrupts = , - , - ; - clocks = <&cpg CPG_MOD 522>; - power-domains = <&sysc R8A77990_PD_ALWAYS_ON>; - resets = <&cpg 522>; - #thermal-sensor-cells = <0>; - }; - - intc_ex: interrupt-controller@e61c0000 { - compatible = "renesas,intc-ex-r8a77990", "renesas,irqc"; - #interrupt-cells = <2>; - interrupt-controller; - reg = <0 0xe61c0000 0 0x200>; - interrupts = ; - clocks = <&cpg CPG_MOD 407>; - power-domains = <&sysc R8A77990_PD_ALWAYS_ON>; - resets = <&cpg 407>; - }; - hscif0: serial@e6540000 { compatible = "renesas,hscif-r8a77990", "renesas,rcar-gen3-hscif", @@ -993,7 +1018,9 @@ <&cpg CPG_CORE R8A77990_CLK_S3D1C>, <&scif_clk>; clock-names = "fck", "brg_int", "scif_clk"; - + dmas = <&dmac1 0x13>, <&dmac1 0x12>, + <&dmac2 0x13>, <&dmac2 0x12>; + dma-names = "tx", "rx", "tx", "rx"; power-domains = <&sysc R8A77990_PD_ALWAYS_ON>; resets = <&cpg 310>; status = "disabled"; @@ -1526,6 +1553,33 @@ resets = <&cpg 408>; }; + pciec0: pcie@fe000000 { + compatible = "renesas,pcie-r8a77990", + "renesas,pcie-rcar-gen3"; + reg = <0 0xfe000000 0 0x80000>; + #address-cells = <3>; + #size-cells = <2>; + bus-range = <0x00 0xff>; + device_type = "pci"; + ranges = <0x01000000 0 0x00000000 0 0xfe100000 0 0x00100000 + 0x02000000 0 0xfe200000 0 0xfe200000 0 0x00200000 + 0x02000000 0 0x30000000 0 0x30000000 0 0x08000000 + 0x42000000 0 0x38000000 0 0x38000000 0 0x08000000>; + /* Map all possible DDR as inbound ranges */ + dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x40000000>; + interrupts = , + , + ; + #interrupt-cells = <1>; + interrupt-map-mask = <0 0 0 0>; + interrupt-map = <0 0 0 0 &gic GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cpg CPG_MOD 319>, <&pcie_bus_clk>; + clock-names = "pcie", "pcie_bus"; + power-domains = <&sysc R8A77990_PD_ALWAYS_ON>; + resets = <&cpg 319>; + status = "disabled"; + }; + vspb0: vsp@fe960000 { compatible = "renesas,vsp2"; reg = <0 0xfe960000 0 0x8000>; @@ -1724,33 +1778,6 @@ }; }; - pciec0: pcie@fe000000 { - compatible = "renesas,pcie-r8a77990", - "renesas,pcie-rcar-gen3"; - reg = <0 0xfe000000 0 0x80000>; - #address-cells = <3>; - #size-cells = <2>; - bus-range = <0x00 0xff>; - device_type = "pci"; - ranges = <0x01000000 0 0x00000000 0 0xfe100000 0 0x00100000 - 0x02000000 0 0xfe200000 0 0xfe200000 0 0x00200000 - 0x02000000 0 0x30000000 0 0x30000000 0 0x08000000 - 0x42000000 0 0x38000000 0 0x38000000 0 0x08000000>; - /* Map all possible DDR as inbound ranges */ - dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x40000000>; - interrupts = , - , - ; - #interrupt-cells = <1>; - interrupt-map-mask = <0 0 0 0>; - interrupt-map = <0 0 0 0 &gic GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&cpg CPG_MOD 319>, <&pcie_bus_clk>; - clock-names = "pcie", "pcie_bus"; - power-domains = <&sysc R8A77990_PD_ALWAYS_ON>; - resets = <&cpg 319>; - status = "disabled"; - }; - prr: chipid@fff00044 { compatible = "renesas,prr"; reg = <0 0xfff00044 0 4>; diff --git a/arch/arm64/boot/dts/renesas/r8a77995-draak.dts b/arch/arm64/boot/dts/renesas/r8a77995-draak.dts index 89df9bc844c0..db2bed1751b8 100644 --- a/arch/arm64/boot/dts/renesas/r8a77995-draak.dts +++ b/arch/arm64/boot/dts/renesas/r8a77995-draak.dts @@ -28,8 +28,8 @@ compatible = "pwm-backlight"; pwms = <&pwm1 0 50000>; - brightness-levels = <256 128 64 16 8 4 0>; - default-brightness-level = <6>; + brightness-levels = <512 511 505 494 473 440 392 327 241 133 0>; + default-brightness-level = <10>; power-supply = <®_12p0v>; enable-gpios = <&gpio4 0 GPIO_ACTIVE_HIGH>; diff --git a/arch/arm64/boot/dts/renesas/r8a77995.dtsi b/arch/arm64/boot/dts/renesas/r8a77995.dtsi index 8530d9fc1371..5bf3af246e14 100644 --- a/arch/arm64/boot/dts/renesas/r8a77995.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a77995.dtsi @@ -27,7 +27,7 @@ #size-cells = <0>; a53_0: cpu@0 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0>; device_type = "cpu"; power-domains = <&sysc R8A77995_PD_CA53_CPU0>; diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi index f66d990b92f1..a225c2457274 100644 --- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi +++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi @@ -764,6 +764,7 @@ vqmmc-supply = <®_1p8v>; bus-width = <8>; mmc-hs200-1_8v; + mmc-hs400-1_8v; non-removable; fixed-emmc-driver-type = <1>; status = "okay"; diff --git a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi index 1b316d79df88..7a09576b3112 100644 --- a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi +++ b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi @@ -6,11 +6,38 @@ * Copyright (C) 2017 Cogent Embedded, Inc. */ +/* + * SSI-PCM3168A + * aplay -D plughw:0,2 xxx.wav + * arecord -D plughw:0,3 xxx.wav + */ + / { aliases { serial1 = &hscif0; serial2 = &scif1; }; + + clksndsel: clksndsel { + #clock-cells = <0>; + compatible = "gpio-mux-clock"; + clocks = <&cs2000>, <&audio_clk_a>; /* clk8snd, clksnd */ + select-gpios = <&gpio_exp_75 13 GPIO_ACTIVE_HIGH>; + }; + + snd_3p3v: regulator-snd_3p3v { + compatible = "regulator-fixed"; + regulator-name = "snd-3.3v"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + }; + + snd_vcc5v: regulator-snd_vcc5v { + compatible = "regulator-fixed"; + regulator-name = "snd-vcc5v"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + }; }; &can0 { @@ -44,6 +71,7 @@ }; &i2c2 { + /* U11 */ gpio_exp_74: gpio@74 { compatible = "ti,tca9539"; reg = <0x74>; @@ -53,6 +81,13 @@ interrupt-parent = <&gpio6>; interrupts = <8 IRQ_TYPE_EDGE_FALLING>; + audio_out_off { + gpio-hog; + gpios = <0 GPIO_ACTIVE_HIGH>; /* P00 */ + output-high; + line-name = "Audio_Out_OFF"; + }; + hub_pwen { gpio-hog; gpios = <6 GPIO_ACTIVE_HIGH>; @@ -80,8 +115,16 @@ output-high; line-name = "OTG EXTLPn"; }; + + snd_rst { + gpio-hog; + gpios = <15 GPIO_ACTIVE_HIGH>; /* P17 */ + output-high; + line-name = "SND_RST"; + }; }; + /* U5 */ gpio_exp_75: gpio@75 { compatible = "ti,tca9539"; reg = <0x75>; @@ -98,6 +141,48 @@ #size-cells = <0>; reg = <0x71>; reset-gpios = <&gpio5 3 GPIO_ACTIVE_LOW>; + + /* Audio_SDA, Audio_SCL */ + i2c@7 { + #address-cells = <1>; + #size-cells = <0>; + reg = <7>; + + pcm3168a: audio-codec@44 { + #sound-dai-cells = <0>; + compatible = "ti,pcm3168a"; + reg = <0x44>; + clocks = <&clksndsel>; + clock-names = "scki"; + + VDD1-supply = <&snd_3p3v>; + VDD2-supply = <&snd_3p3v>; + VCCAD1-supply = <&snd_vcc5v>; + VCCAD2-supply = <&snd_vcc5v>; + VCCDA1-supply = <&snd_vcc5v>; + VCCDA2-supply = <&snd_vcc5v>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + mclk-fs = <512>; + port@0 { + reg = <0>; + pcm3168a_endpoint_p: endpoint { + remote-endpoint = <&rsnd_for_pcm3168a_play>; + clocks = <&clksndsel>; + }; + }; + port@1 { + reg = <1>; + pcm3168a_endpoint_c: endpoint { + remote-endpoint = <&rsnd_for_pcm3168a_capture>; + clocks = <&clksndsel>; + }; + }; + }; + }; + }; }; }; @@ -173,6 +258,11 @@ groups = "usb0"; function = "usb0"; }; + + sound_pcm_pins: sound-pcm { + groups = "ssi349_ctrl", "ssi3_data", "ssi4_data"; + function = "ssi"; + }; }; &scif1 { @@ -193,3 +283,51 @@ &xhci0 { status = "okay"; }; + +&sound_card { + dais = <&rsnd_port0 /* ak4613 */ + &rsnd_port1 /* HDMI0 */ + &rsnd_port2 /* pcm3168a playback */ + &rsnd_port3 /* pcm3168a capture */ + >; +}; + +&rcar_sound { + pinctrl-0 = <&sound_pins + &sound_clk_pins + &sound_pcm_pins>; + + ports { + /* rsnd_port0/1 are on salvator-common */ + rsnd_port2: port@2 { + reg = <2>; + rsnd_for_pcm3168a_play: endpoint { + remote-endpoint = <&pcm3168a_endpoint_p>; + + dai-format = "i2s"; + bitclock-master = <&rsnd_for_pcm3168a_play>; + frame-master = <&rsnd_for_pcm3168a_play>; + dai-tdm-slot-num = <8>; + + playback = <&ssi3>; + }; + }; + rsnd_port3: port@3 { + reg = <3>; + rsnd_for_pcm3168a_capture: endpoint { + remote-endpoint = <&pcm3168a_endpoint_c>; + + dai-format = "i2s"; + bitclock-master = <&rsnd_for_pcm3168a_capture>; + frame-master = <&rsnd_for_pcm3168a_capture>; + dai-tdm-slot-num = <6>; + + capture = <&ssi4>; + }; + }; + }; +}; + +&ssi4 { + shared-pin; +}; diff --git a/arch/arm64/boot/dts/renesas/ulcb.dtsi b/arch/arm64/boot/dts/renesas/ulcb.dtsi index de694fdae067..e70e1bac2be4 100644 --- a/arch/arm64/boot/dts/renesas/ulcb.dtsi +++ b/arch/arm64/boot/dts/renesas/ulcb.dtsi @@ -6,6 +6,14 @@ * Copyright (C) 2016 Cogent Embedded, Inc. */ +/* + * SSI-AK4613 + * aplay -D plughw:0,0 xxx.wav + * arecord -D plughw:0,0 xxx.wav + * SSI-HDMI + * aplay -D plughw:0,1 xxx.wav + */ + #include #include @@ -83,20 +91,13 @@ regulator-always-on; }; - rsnd_ak4613: sound { - compatible = "simple-audio-card"; - - simple-audio-card,format = "left_j"; - simple-audio-card,bitclock-master = <&sndcpu>; - simple-audio-card,frame-master = <&sndcpu>; - - sndcpu: simple-audio-card,cpu { - sound-dai = <&rcar_sound>; - }; + sound_card: sound { + compatible = "audio-graph-card"; + label = "rcar-sound"; - sndcodec: simple-audio-card,codec { - sound-dai = <&ak4613>; - }; + dais = <&rsnd_port0 /* ak4613 */ + &rsnd_port1 /* HDMI0 */ + >; }; vcc_sdhi0: regulator-vcc-sdhi0 { @@ -182,6 +183,12 @@ remote-endpoint = <&hdmi0_con>; }; }; + port@2 { + reg = <2>; + dw_hdmi0_snd_in: endpoint { + remote-endpoint = <&rsnd_for_hdmi>; + }; + }; }; }; @@ -211,6 +218,12 @@ asahi-kasei,out4-single-end; asahi-kasei,out5-single-end; asahi-kasei,out6-single-end; + + port { + ak4613_endpoint: endpoint { + remote-endpoint = <&rsnd_for_ak4613>; + }; + }; }; cs2000: clk-multiplier@4f { @@ -384,10 +397,33 @@ <&audio_clk_c>, <&cpg CPG_CORE CPG_AUDIO_CLK_I>; - rcar_sound,dai { - dai0 { - playback = <&ssi0 &src0 &dvc0>; - capture = <&ssi1 &src1 &dvc1>; + ports { + #address-cells = <1>; + #size-cells = <0>; + rsnd_port0: port@0 { + reg = <0>; + rsnd_for_ak4613: endpoint { + remote-endpoint = <&ak4613_endpoint>; + + dai-format = "left_j"; + bitclock-master = <&rsnd_for_ak4613>; + frame-master = <&rsnd_for_ak4613>; + + playback = <&ssi0 &src0 &dvc0>; + capture = <&ssi1 &src1 &dvc1>; + }; + }; + rsnd_port1: port@1 { + reg = <1>; + rsnd_for_hdmi: endpoint { + remote-endpoint = <&dw_hdmi0_snd_in>; + + dai-format = "i2s"; + bitclock-master = <&rsnd_for_hdmi>; + frame-master = <&rsnd_for_hdmi>; + + playback = <&ssi2>; + }; }; }; }; @@ -427,6 +463,7 @@ vqmmc-supply = <®_1p8v>; bus-width = <8>; mmc-hs200-1_8v; + mmc-hs400-1_8v; non-removable; status = "okay"; }; diff --git a/arch/arm64/boot/dts/rockchip/Makefile b/arch/arm64/boot/dts/rockchip/Makefile index de0c406c20cc..1b28fa72ea0b 100644 --- a/arch/arm64/boot/dts/rockchip/Makefile +++ b/arch/arm64/boot/dts/rockchip/Makefile @@ -16,8 +16,11 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-gru-bob.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-gru-kevin.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-gru-scarlet-inx.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-gru-scarlet-kd.dtb +dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-nanopc-t4.dtb +dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-nanopi-m4.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-puma-haikou.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-roc-pc.dtb +dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-rock-pi-4.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-rock960.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-rockpro64.dtb dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-sapphire.dtb diff --git a/arch/arm64/boot/dts/rockchip/px30.dtsi b/arch/arm64/boot/dts/rockchip/px30.dtsi index 9aa8d5ef9e45..eb992d60e6ba 100644 --- a/arch/arm64/boot/dts/rockchip/px30.dtsi +++ b/arch/arm64/boot/dts/rockchip/px30.dtsi @@ -40,7 +40,7 @@ cpu0: cpu@0 { device_type = "cpu"; - compatible = "arm,cortex-a35", "arm,armv8"; + compatible = "arm,cortex-a35"; reg = <0x0 0x0>; enable-method = "psci"; clocks = <&cru ARMCLK>; @@ -52,7 +52,7 @@ cpu1: cpu@1 { device_type = "cpu"; - compatible = "arm,cortex-a35", "arm,armv8"; + compatible = "arm,cortex-a35"; reg = <0x0 0x1>; enable-method = "psci"; clocks = <&cru ARMCLK>; @@ -64,7 +64,7 @@ cpu2: cpu@2 { device_type = "cpu"; - compatible = "arm,cortex-a35", "arm,armv8"; + compatible = "arm,cortex-a35"; reg = <0x0 0x2>; enable-method = "psci"; clocks = <&cru ARMCLK>; @@ -76,7 +76,7 @@ cpu3: cpu@3 { device_type = "cpu"; - compatible = "arm,cortex-a35", "arm,armv8"; + compatible = "arm,cortex-a35"; reg = <0x0 0x3>; enable-method = "psci"; clocks = <&cru ARMCLK>; diff --git a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts index 99d0d9912950..33c44e857247 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts @@ -104,6 +104,7 @@ phy-mode = "rgmii"; pinctrl-names = "default"; pinctrl-0 = <&rgmiim1_pins>; + snps,force_thresh_dma_mode; snps,reset-gpio = <&gpio1 RK_PC2 GPIO_ACTIVE_LOW>; snps,reset-active-low; snps,reset-delays-us = <0 10000 50000>; diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts index bd937d68ca3b..2157a528276b 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts @@ -40,6 +40,7 @@ pinctrl-0 = <&usb30_host_drv>; regulator-name = "vcc_host_5v"; regulator-always-on; + regulator-boot-on; vin-supply = <&vcc_sys>; }; @@ -51,6 +52,7 @@ pinctrl-0 = <&usb20_host_drv>; regulator-name = "vcc_host1_5v"; regulator-always-on; + regulator-boot-on; vin-supply = <&vcc_sys>; }; @@ -66,7 +68,8 @@ sound { compatible = "audio-graph-card"; label = "rockchip,rk3328"; - dais = <&spdif_p0>; + dais = <&i2s1_p0 + &spdif_p0>; }; spdif-dit { @@ -81,6 +84,16 @@ }; }; +&codec { + status = "okay"; + + port@0 { + codec_p0_0: endpoint { + remote-endpoint = <&i2s1_p0_0>; + }; + }; +}; + &cpu0 { cpu-supply = <&vdd_arm>; }; @@ -243,6 +256,18 @@ }; }; +&i2s1 { + status = "okay"; + + i2s1_p0: port { + i2s1_p0_0: endpoint { + dai-format = "i2s"; + mclk-fs = <256>; + remote-endpoint = <&codec_p0_0>; + }; + }; +}; + &io_domains { status = "okay"; @@ -290,7 +315,6 @@ &spdif { pinctrl-0 = <&spdifm0_tx>; status = "okay"; - #sound-dai-cells = <0>; spdif_p0: port { spdif_p0_0: endpoint { diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi index ecd7f19c3542..84f14b132e8f 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi @@ -37,7 +37,7 @@ cpu0: cpu@0 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x0>; clocks = <&cru ARMCLK>; #cooling-cells = <2>; @@ -49,7 +49,7 @@ cpu1: cpu@1 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x1>; clocks = <&cru ARMCLK>; #cooling-cells = <2>; @@ -61,7 +61,7 @@ cpu2: cpu@2 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x2>; clocks = <&cru ARMCLK>; #cooling-cells = <2>; @@ -73,7 +73,7 @@ cpu3: cpu@3 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x3>; clocks = <&cru ARMCLK>; #cooling-cells = <2>; @@ -184,6 +184,7 @@ clock-names = "i2s_clk", "i2s_hclk"; dmas = <&dmac 11>, <&dmac 12>; dma-names = "tx", "rx"; + #sound-dai-cells = <0>; status = "disabled"; }; @@ -195,6 +196,7 @@ clock-names = "i2s_clk", "i2s_hclk"; dmas = <&dmac 14>, <&dmac 15>; dma-names = "tx", "rx"; + #sound-dai-cells = <0>; status = "disabled"; }; @@ -206,6 +208,7 @@ clock-names = "i2s_clk", "i2s_hclk"; dmas = <&dmac 0>, <&dmac 1>; dma-names = "tx", "rx"; + #sound-dai-cells = <0>; status = "disabled"; }; @@ -219,6 +222,7 @@ dma-names = "tx"; pinctrl-names = "default"; pinctrl-0 = <&spdifm2_tx>; + #sound-dai-cells = <0>; status = "disabled"; }; @@ -672,6 +676,16 @@ }; }; + codec: codec@ff410000 { + compatible = "rockchip,rk3328-codec"; + reg = <0x0 0xff410000 0x0 0x1000>; + clocks = <&cru PCLK_ACODECPHY>, <&cru SCLK_I2S1>; + clock-names = "pclk", "mclk"; + rockchip,grf = <&grf>; + #sound-dai-cells = <0>; + status = "disabled"; + }; + hdmiphy: phy@ff430000 { compatible = "rockchip,rk3328-hdmi-phy"; reg = <0x0 0xff430000 0x0 0x10000>; diff --git a/arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi b/arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi index 4de089149c50..e96eb62f362b 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi @@ -116,7 +116,6 @@ &emmc { bus-width = <8>; cap-mmc-highspeed; - disable-wp; mmc-pwrseq = <&emmc_pwrseq>; non-removable; pinctrl-names = "default"; diff --git a/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts b/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts index 6b9b1ac1994c..8fa550cbd1a4 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts +++ b/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts @@ -78,7 +78,6 @@ bus-width = <8>; cap-mmc-highspeed; clock-frequency = <150000000>; - disable-wp; non-removable; vmmc-supply = <&vcc_io>; vqmmc-supply = <&vcc18_flash>; diff --git a/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi b/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi index 1315972412df..1b35d612b660 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi @@ -139,7 +139,6 @@ &emmc { bus-width = <8>; clock-frequency = <150000000>; - disable-wp; mmc-hs200-1_8v; non-removable; vmmc-supply = <&vcc33_io>; diff --git a/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts b/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts index 96147d93dd1d..f5aa3cad67c5 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts +++ b/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts @@ -161,7 +161,6 @@ &emmc { bus-width = <8>; cap-mmc-highspeed; - disable-wp; mmc-pwrseq = <&emmc_pwrseq>; mmc-hs200-1_2v; mmc-hs200-1_8v; diff --git a/arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts b/arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts index fc1bf078a41f..41edcfd53184 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts +++ b/arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts @@ -48,7 +48,6 @@ bus-width = <8>; cap-mmc-highspeed; clock-frequency = <150000000>; - disable-wp; mmc-hs200-1_8v; no-sdio; no-sd; diff --git a/arch/arm64/boot/dts/rockchip/rk3368-r88.dts b/arch/arm64/boot/dts/rockchip/rk3368-r88.dts index 7452bedf1a7e..d34064c65f10 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368-r88.dts +++ b/arch/arm64/boot/dts/rockchip/rk3368-r88.dts @@ -149,7 +149,6 @@ &emmc { bus-width = <8>; cap-mmc-highspeed; - disable-wp; mmc-pwrseq = <&emmc_pwrseq>; non-removable; pinctrl-names = "default"; diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi index 7014d10b954c..06e7c31d7d07 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi @@ -73,7 +73,7 @@ cpu_l0: cpu@0 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x0>; enable-method = "psci"; #cooling-cells = <2>; /* min followed by max */ @@ -81,7 +81,7 @@ cpu_l1: cpu@1 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x1>; enable-method = "psci"; #cooling-cells = <2>; /* min followed by max */ @@ -89,7 +89,7 @@ cpu_l2: cpu@2 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x2>; enable-method = "psci"; #cooling-cells = <2>; /* min followed by max */ @@ -97,7 +97,7 @@ cpu_l3: cpu@3 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x3>; enable-method = "psci"; #cooling-cells = <2>; /* min followed by max */ @@ -105,7 +105,7 @@ cpu_b0: cpu@100 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x100>; enable-method = "psci"; #cooling-cells = <2>; /* min followed by max */ @@ -113,7 +113,7 @@ cpu_b1: cpu@101 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x101>; enable-method = "psci"; #cooling-cells = <2>; /* min followed by max */ @@ -121,7 +121,7 @@ cpu_b2: cpu@102 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x102>; enable-method = "psci"; #cooling-cells = <2>; /* min followed by max */ @@ -129,7 +129,7 @@ cpu_b3: cpu@103 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x103>; enable-method = "psci"; #cooling-cells = <2>; /* min followed by max */ diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts index 1ee0dc0d9f10..d1cf404b8708 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts @@ -22,7 +22,7 @@ backlight = <&backlight>; power-supply = <&pp3300_disp>; - ports { + port { panel_in_edp: endpoint { remote-endpoint = <&edp_out_panel>; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi index c400be64170e..931640e9aed4 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi @@ -200,6 +200,19 @@ pinctrl-0 = <&bl_en>; pwm-delay-us = <10000>; }; + + gpio_keys: gpio-keys { + compatible = "gpio-keys"; + pinctrl-names = "default"; + pinctrl-0 = <&bt_host_wake_l>; + + wake_on_bt: wake-on-bt { + label = "Wake-on-Bluetooth"; + gpios = <&gpio0 3 GPIO_ACTIVE_LOW>; + linux,code = ; + wakeup-source; + }; + }; }; &ppvar_bigcpu { diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts index 81e73103fa78..15e254a77391 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts @@ -43,7 +43,7 @@ backlight = <&backlight>; power-supply = <&pp3300_disp>; - ports { + port { panel_in_edp: endpoint { remote-endpoint = <&edp_out_panel>; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi index fc50b3ef758c..62ea7d6a7d4a 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi @@ -175,6 +175,21 @@ pinctrl-0 = <&dmic_en>; wakeup-delay-ms = <250>; }; + + gpio_keys: gpio-keys { + compatible = "gpio-keys"; + pinctrl-names = "default"; + pinctrl-0 = <&pen_eject_odl>; + + pen-insert { + label = "Pen Insert"; + /* Insert = low, eject = high */ + gpios = <&gpio1 1 GPIO_ACTIVE_LOW>; + linux,code = ; + linux,input-type = ; + wakeup-source; + }; + }; }; /* pp900_s0 aliases */ @@ -328,20 +343,6 @@ camera: &i2c7 { <400000000>; }; -&gpio_keys { - pinctrl-names = "default"; - pinctrl-0 = <&bt_host_wake_l>, <&pen_eject_odl>; - - pen-insert { - label = "Pen Insert"; - /* Insert = low, eject = high */ - gpios = <&gpio1 1 GPIO_ACTIVE_LOW>; - linux,code = ; - linux,input-type = ; - wakeup-source; - }; -}; - &i2c_tunnel { google,remote-bus = <0>; }; @@ -437,8 +438,19 @@ camera: &i2c7 { status = "okay"; }; -&wake_on_bt { - gpios = <&gpio1 2 GPIO_ACTIVE_LOW>; +&usb_host0_ohci { + #address-cells = <1>; + #size-cells = <0>; + + qca_bt: bluetooth@1 { + compatible = "usbcf3,e300", "usb4ca,301a"; + reg = <1>; + pinctrl-names = "default"; + pinctrl-0 = <&bt_host_wake_l>; + interrupt-parent = <&gpio1>; + interrupts = <2 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "wakeup"; + }; }; /* PINCTRL OVERRIDES */ @@ -455,7 +467,7 @@ camera: &i2c7 { }; &bt_host_wake_l { - rockchip,pins = <1 2 RK_FUNC_GPIO &pcfg_pull_up>; + rockchip,pins = <1 2 RK_FUNC_GPIO &pcfg_pull_none>; }; &ec_ap_int_l { diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi index ea607a601a86..da03fa9c5662 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi @@ -269,19 +269,6 @@ #clock-cells = <0>; }; - gpio_keys: gpio-keys { - compatible = "gpio-keys"; - pinctrl-names = "default"; - pinctrl-0 = <&bt_host_wake_l>; - - wake_on_bt: wake-on-bt { - label = "Wake-on-Bluetooth"; - gpios = <&gpio0 3 GPIO_ACTIVE_LOW>; - linux,code = ; - wakeup-source; - }; - }; - max98357a: max98357a { compatible = "maxim,max98357a"; pinctrl-names = "default"; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts b/arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts new file mode 100644 index 000000000000..84433cf02be9 --- /dev/null +++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopc-t4.dts @@ -0,0 +1,91 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * FriendlyElec NanoPC-T4 board device tree source + * + * Copyright (c) 2018 FriendlyElec Computer Tech. Co., Ltd. + * (http://www.friendlyarm.com) + * + * Copyright (c) 2018 Collabora Ltd. + */ + +/dts-v1/; +#include "rk3399-nanopi4.dtsi" + +/ { + model = "FriendlyElec NanoPC-T4"; + compatible = "friendlyarm,nanopc-t4", "rockchip,rk3399"; + + vcc12v0_sys: vcc12v0-sys { + compatible = "regulator-fixed"; + regulator-always-on; + regulator-boot-on; + regulator-max-microvolt = <12000000>; + regulator-min-microvolt = <12000000>; + regulator-name = "vcc12v0_sys"; + }; + + vcc5v0_host0: vcc5v0-host0 { + compatible = "regulator-fixed"; + regulator-always-on; + regulator-boot-on; + regulator-name = "vcc5v0_host0"; + vin-supply = <&vcc5v0_sys>; + }; + + adc-keys { + compatible = "adc-keys"; + io-channels = <&saradc 1>; + io-channel-names = "buttons"; + keyup-threshold-microvolt = <1800000>; + poll-interval = <100>; + + recovery { + label = "Recovery"; + linux,code = ; + press-threshold-microvolt = <18000>; + }; + }; + + ir-receiver { + compatible = "gpio-ir-receiver"; + gpios = <&gpio0 RK_PA6 GPIO_ACTIVE_LOW>; + pinctrl-names = "default"; + pinctrl-0 = <&ir_rx>; + }; +}; + +&pinctrl { + ir { + ir_rx: ir-rx { + /* external pullup to VCC3V3_SYS, despite being 1.8V :/ */ + rockchip,pins = <0 RK_PA6 RK_FUNC_1 &pcfg_pull_none>; + }; + }; +}; + +&sdhci { + mmc-hs400-1_8v; + mmc-hs400-enhanced-strobe; +}; + +&u2phy0_host { + phy-supply = <&vcc5v0_host0>; +}; + +&u2phy1_host { + phy-supply = <&vcc5v0_host0>; +}; + +&vcc5v0_sys { + vin-supply = <&vcc12v0_sys>; +}; + +&vcc3v3_sys { + vin-supply = <&vcc12v0_sys>; +}; + +&vbus_typec { + enable-active-high; + gpios = <&gpio4 RK_PD2 GPIO_ACTIVE_HIGH>; + vin-supply = <&vcc5v0_sys>; +}; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-nanopi-m4.dts b/arch/arm64/boot/dts/rockchip/rk3399-nanopi-m4.dts new file mode 100644 index 000000000000..60358ab8c7df --- /dev/null +++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopi-m4.dts @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * FriendlyElec NanoPi M4 board device tree source + * + * Copyright (c) 2018 FriendlyElec Computer Tech. Co., Ltd. + * (http://www.friendlyarm.com) + * + * Copyright (c) 2018 Collabora Ltd. + * Copyright (c) 2019 Arm Ltd. + */ + +/dts-v1/; +#include "rk3399-nanopi4.dtsi" + +/ { + model = "FriendlyElec NanoPi M4"; + compatible = "friendlyarm,nanopi-m4", "rockchip,rk3399"; + + vdd_5v: vdd-5v { + compatible = "regulator-fixed"; + regulator-name = "vdd_5v"; + regulator-always-on; + regulator-boot-on; + }; + + vcc5v0_core: vcc5v0-core { + compatible = "regulator-fixed"; + regulator-name = "vcc5v0_core"; + regulator-always-on; + regulator-boot-on; + vin-supply = <&vdd_5v>; + }; + + vcc5v0_usb1: vcc5v0-usb1 { + compatible = "regulator-fixed"; + regulator-name = "vcc5v0_usb1"; + regulator-always-on; + regulator-boot-on; + vin-supply = <&vcc5v0_sys>; + }; + + vcc5v0_usb2: vcc5v0-usb2 { + compatible = "regulator-fixed"; + regulator-name = "vcc5v0_usb2"; + regulator-always-on; + regulator-boot-on; + vin-supply = <&vcc5v0_sys>; + }; +}; + +&vcc3v3_sys { + vin-supply = <&vcc5v0_core>; +}; + +&u2phy0_host { + phy-supply = <&vcc5v0_usb1>; +}; + +&u2phy1_host { + phy-supply = <&vcc5v0_usb2>; +}; + +&vbus_typec { + regulator-always-on; + vin-supply = <&vdd_5v>; +}; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi new file mode 100644 index 000000000000..d325e117287b --- /dev/null +++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi @@ -0,0 +1,703 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * RK3399-based FriendlyElec boards device tree source + * + * Copyright (c) 2016 Fuzhou Rockchip Electronics Co., Ltd + * + * Copyright (c) 2018 FriendlyElec Computer Tech. Co., Ltd. + * (http://www.friendlyarm.com) + * + * Copyright (c) 2018 Collabora Ltd. + * Copyright (c) 2019 Arm Ltd. + */ + +/dts-v1/; +#include +#include "rk3399.dtsi" +#include "rk3399-opp.dtsi" + +/ { + chosen { + stdout-path = "serial2:1500000n8"; + }; + + clkin_gmac: external-gmac-clock { + compatible = "fixed-clock"; + clock-frequency = <125000000>; + clock-output-names = "clkin_gmac"; + #clock-cells = <0>; + }; + + vcc3v3_sys: vcc3v3-sys { + compatible = "regulator-fixed"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-name = "vcc3v3_sys"; + }; + + vcc5v0_sys: vcc5v0-sys { + compatible = "regulator-fixed"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + regulator-name = "vcc5v0_sys"; + vin-supply = <&vdd_5v>; + }; + + /* switched by pmic_sleep */ + vcc1v8_s3: vcca1v8_s3: vcc1v8-s3 { + compatible = "regulator-fixed"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-name = "vcc1v8_s3"; + vin-supply = <&vcc_1v8>; + }; + + vcc3v0_sd: vcc3v0-sd { + compatible = "regulator-fixed"; + enable-active-high; + gpio = <&gpio0 RK_PA1 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&sdmmc0_pwr_h>; + regulator-always-on; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; + regulator-name = "vcc3v0_sd"; + vin-supply = <&vcc3v3_sys>; + }; + + vbus_typec: vbus-typec { + compatible = "regulator-fixed"; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + regulator-name = "vbus_typec"; + }; + + gpio-keys { + compatible = "gpio-keys"; + autorepeat; + pinctrl-names = "default"; + pinctrl-0 = <&power_key>; + + power { + debounce-interval = <100>; + gpios = <&gpio0 RK_PA5 GPIO_ACTIVE_LOW>; + label = "GPIO Key Power"; + linux,code = ; + wakeup-source; + }; + }; + + leds: gpio-leds { + compatible = "gpio-leds"; + pinctrl-names = "default"; + pinctrl-0 = <&leds_gpio>; + + status { + gpios = <&gpio0 RK_PB5 GPIO_ACTIVE_HIGH>; + label = "status_led"; + linux,default-trigger = "heartbeat"; + }; + }; + + sdio_pwrseq: sdio-pwrseq { + compatible = "mmc-pwrseq-simple"; + clocks = <&rk808 1>; + clock-names = "ext_clock"; + pinctrl-names = "default"; + pinctrl-0 = <&wifi_reg_on_h>; + reset-gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>; + }; +}; + +&cpu_b0 { + cpu-supply = <&vdd_cpu_b>; +}; + +&cpu_b1 { + cpu-supply = <&vdd_cpu_b>; +}; + +&cpu_l0 { + cpu-supply = <&vdd_cpu_l>; +}; + +&cpu_l1 { + cpu-supply = <&vdd_cpu_l>; +}; + +&cpu_l2 { + cpu-supply = <&vdd_cpu_l>; +}; + +&cpu_l3 { + cpu-supply = <&vdd_cpu_l>; +}; + +&emmc_phy { + status = "okay"; +}; + +&gmac { + assigned-clock-parents = <&clkin_gmac>; + assigned-clocks = <&cru SCLK_RMII_SRC>; + clock_in_out = "input"; + pinctrl-names = "default"; + pinctrl-0 = <&rgmii_pins>; + phy-mode = "rgmii"; + phy-supply = <&vcc3v3_s3>; + snps,reset-active-low; + snps,reset-delays-us = <0 10000 50000>; + snps,reset-gpio = <&gpio3 RK_PB7 GPIO_ACTIVE_LOW>; + tx_delay = <0x28>; + rx_delay = <0x11>; + status = "okay"; +}; + +&gpu { + mali-supply = <&vdd_gpu>; + status = "okay"; +}; + +&hdmi { + ddc-i2c-bus = <&i2c7>; + pinctrl-names = "default"; + pinctrl-0 = <&hdmi_cec>; + status = "okay"; +}; + +&i2c0 { + clock-frequency = <400000>; + i2c-scl-rising-time-ns = <160>; + i2c-scl-falling-time-ns = <30>; + status = "okay"; + + vdd_cpu_b: regulator@40 { + compatible = "silergy,syr827"; + reg = <0x40>; + fcs,suspend-voltage-selector = <1>; + pinctrl-names = "default"; + pinctrl-0 = <&cpu_b_sleep>; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <712500>; + regulator-max-microvolt = <1500000>; + regulator-name = "vdd_cpu_b"; + regulator-ramp-delay = <1000>; + vin-supply = <&vcc3v3_sys>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdd_gpu: regulator@41 { + compatible = "silergy,syr828"; + reg = <0x41>; + fcs,suspend-voltage-selector = <1>; + pinctrl-names = "default"; + pinctrl-0 = <&gpu_sleep>; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <712500>; + regulator-max-microvolt = <1500000>; + regulator-name = "vdd_gpu"; + regulator-ramp-delay = <1000>; + vin-supply = <&vcc3v3_sys>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + rk808: pmic@1b { + compatible = "rockchip,rk808"; + reg = <0x1b>; + clock-output-names = "xin32k", "rtc_clko_wifi"; + #clock-cells = <1>; + interrupt-parent = <&gpio1>; + interrupts = <21 IRQ_TYPE_LEVEL_LOW>; + pinctrl-names = "default"; + pinctrl-0 = <&pmic_int_l>; + rockchip,system-power-controller; + wakeup-source; + + vcc1-supply = <&vcc3v3_sys>; + vcc2-supply = <&vcc3v3_sys>; + vcc3-supply = <&vcc3v3_sys>; + vcc4-supply = <&vcc3v3_sys>; + vcc6-supply = <&vcc3v3_sys>; + vcc7-supply = <&vcc3v3_sys>; + vcc8-supply = <&vcc3v3_sys>; + vcc9-supply = <&vcc3v3_sys>; + vcc10-supply = <&vcc3v3_sys>; + vcc11-supply = <&vcc3v3_sys>; + vcc12-supply = <&vcc3v3_sys>; + vddio-supply = <&vcc_3v0>; + + regulators { + vdd_center: DCDC_REG1 { + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <750000>; + regulator-max-microvolt = <1350000>; + regulator-name = "vdd_center"; + regulator-ramp-delay = <6001>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdd_cpu_l: DCDC_REG2 { + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <750000>; + regulator-max-microvolt = <1350000>; + regulator-name = "vdd_cpu_l"; + regulator-ramp-delay = <6001>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc_ddr: DCDC_REG3 { + regulator-always-on; + regulator-boot-on; + regulator-name = "vcc_ddr"; + + regulator-state-mem { + regulator-on-in-suspend; + }; + }; + + vcc_1v8: DCDC_REG4 { + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-name = "vcc_1v8"; + + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <1800000>; + }; + }; + + vcc1v8_cam: LDO_REG1 { + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-name = "vcc1v8_cam"; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc3v0_touch: LDO_REG2 { + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; + regulator-name = "vcc3v0_touch"; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc1v8_pmupll: LDO_REG3 { + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-name = "vcc1v8_pmupll"; + + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <1800000>; + }; + }; + + vcc_sdio: LDO_REG4 { + regulator-always-on; + regulator-boot-on; + regulator-init-microvolt = <3000000>; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <3300000>; + regulator-name = "vcc_sdio"; + + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <3000000>; + }; + }; + + vcca3v0_codec: LDO_REG5 { + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; + regulator-name = "vcca3v0_codec"; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc_1v5: LDO_REG6 { + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1500000>; + regulator-max-microvolt = <1500000>; + regulator-name = "vcc_1v5"; + + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <1500000>; + }; + }; + + vcca1v8_codec: LDO_REG7 { + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-name = "vcca1v8_codec"; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc_3v0: LDO_REG8 { + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; + regulator-name = "vcc_3v0"; + + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <3000000>; + }; + }; + + vcc3v3_s3: SWITCH_REG1 { + regulator-always-on; + regulator-boot-on; + regulator-name = "vcc3v3_s3"; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc3v3_s0: SWITCH_REG2 { + regulator-always-on; + regulator-boot-on; + regulator-name = "vcc3v3_s0"; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + }; + }; +}; + +&i2c1 { + clock-frequency = <200000>; + i2c-scl-rising-time-ns = <150>; + i2c-scl-falling-time-ns = <30>; + status = "okay"; +}; + +&i2c2 { + status = "okay"; +}; + +&i2c4 { + clock-frequency = <400000>; + i2c-scl-rising-time-ns = <160>; + i2c-scl-falling-time-ns = <30>; + status = "okay"; + + fusb0: typec-portc@22 { + compatible = "fcs,fusb302"; + reg = <0x22>; + interrupt-parent = <&gpio1>; + interrupts = ; + pinctrl-names = "default"; + pinctrl-0 = <&fusb0_int>; + vbus-supply = <&vbus_typec>; + }; +}; + +&i2c7 { + status = "okay"; +}; + +&io_domains { + bt656-supply = <&vcc_1v8>; + audio-supply = <&vcca1v8_codec>; + sdmmc-supply = <&vcc_sdio>; + gpio1830-supply = <&vcc_3v0>; + status = "okay"; +}; + +&pcie_phy { + assigned-clock-parents = <&cru SCLK_PCIEPHY_REF100M>; + assigned-clock-rates = <100000000>; + assigned-clocks = <&cru SCLK_PCIEPHY_REF>; + status = "okay"; +}; + +&pcie0 { + ep-gpios = <&gpio2 RK_PA4 GPIO_ACTIVE_HIGH>; + max-link-speed = <2>; + num-lanes = <4>; + status = "okay"; +}; + +&pinctrl { + fusb30x { + fusb0_int: fusb0-int { + rockchip,pins = <1 RK_PA2 RK_FUNC_GPIO &pcfg_pull_up>; + }; + }; + + gpio-leds { + leds_gpio: leds-gpio { + rockchip,pins = <0 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; + + pmic { + cpu_b_sleep: cpu-b-sleep { + rockchip,pins = <1 RK_PC1 RK_FUNC_GPIO &pcfg_pull_down>; + }; + + gpu_sleep: gpu-sleep { + rockchip,pins = <1 RK_PB6 RK_FUNC_GPIO &pcfg_pull_down>; + }; + + pmic_int_l: pmic-int-l { + rockchip,pins = <1 RK_PC5 RK_FUNC_GPIO &pcfg_pull_up>; + }; + }; + + rockchip-key { + power_key: power-key { + rockchip,pins = <0 RK_PA5 RK_FUNC_GPIO &pcfg_pull_up>; + }; + }; + + sdio { + bt_host_wake_l: bt-host-wake-l { + rockchip,pins = <0 RK_PA4 RK_FUNC_GPIO &pcfg_pull_none>; + }; + + bt_reg_on_h: bt-reg-on-h { + /* external pullup to VCC1V8_PMUPLL */ + rockchip,pins = <0 RK_PB1 RK_FUNC_GPIO &pcfg_pull_none>; + }; + + bt_wake_l: bt-wake-l { + rockchip,pins = <2 RK_PD2 RK_FUNC_GPIO &pcfg_pull_none>; + }; + + wifi_reg_on_h: wifi-reg_on-h { + rockchip,pins = <0 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; + + sdmmc { + sdmmc0_det_l: sdmmc0-det-l { + rockchip,pins = <0 RK_PA7 RK_FUNC_GPIO &pcfg_pull_up>; + }; + + sdmmc0_pwr_h: sdmmc0-pwr-h { + rockchip,pins = <0 RK_PA1 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; +}; + +&pmu_io_domains { + pmu1830-supply = <&vcc_3v0>; + status = "okay"; +}; + +&pwm0 { + status = "okay"; +}; + +&pwm1 { + status = "okay"; +}; + +&pwm2 { + pinctrl-names = "active"; + pinctrl-0 = <&pwm2_pin_pull_down>; + status = "okay"; +}; + +&saradc { + vref-supply = <&vcca1v8_s3>; + status = "okay"; +}; + +&sdhci { + bus-width = <8>; + mmc-hs200-1_8v; + non-removable; + status = "okay"; +}; + +&sdio0 { + bus-width = <4>; + cap-sd-highspeed; + cap-sdio-irq; + keep-power-in-suspend; + mmc-pwrseq = <&sdio_pwrseq>; + non-removable; + pinctrl-names = "default"; + pinctrl-0 = <&sdio0_bus4 &sdio0_cmd &sdio0_clk>; + sd-uhs-sdr104; + status = "okay"; +}; + +&sdmmc { + bus-width = <4>; + cap-sd-highspeed; + cap-mmc-highspeed; + cd-gpios = <&gpio0 RK_PA7 GPIO_ACTIVE_LOW>; + disable-wp; + pinctrl-names = "default"; + pinctrl-0 = <&sdmmc_bus4 &sdmmc_clk &sdmmc_cmd &sdmmc0_det_l>; + sd-uhs-sdr104; + vmmc-supply = <&vcc3v0_sd>; + vqmmc-supply = <&vcc_sdio>; + status = "okay"; +}; + +&tcphy0 { + status = "okay"; +}; + +&tcphy1 { + status = "okay"; +}; + +&tsadc { + /* tshut mode 0:CRU 1:GPIO */ + rockchip,hw-tshut-mode = <1>; + /* tshut polarity 0:LOW 1:HIGH */ + rockchip,hw-tshut-polarity = <1>; + status = "okay"; +}; + +&u2phy0 { + status = "okay"; +}; + +&u2phy0_host { + status = "okay"; +}; + +&u2phy0_otg { + status = "okay"; +}; + +&u2phy1 { + status = "okay"; +}; + +&u2phy1_host { + status = "okay"; +}; + +&u2phy1_otg { + status = "okay"; +}; + +&uart0 { + pinctrl-names = "default"; + pinctrl-0 = <&uart0_xfer &uart0_rts &uart0_cts>; + status = "okay"; + + bluetooth { + compatible = "brcm,bcm43438-bt"; + clocks = <&rk808 1>; + clock-names = "lpo"; + device-wakeup-gpios = <&gpio2 RK_PD2 GPIO_ACTIVE_HIGH>; + host-wakeup-gpios = <&gpio0 RK_PA4 GPIO_ACTIVE_HIGH>; + shutdown-gpios = <&gpio0 RK_PB1 GPIO_ACTIVE_HIGH>; + max-speed = <4000000>; + pinctrl-names = "default"; + pinctrl-0 = <&bt_reg_on_h &bt_host_wake_l &bt_wake_l>; + vbat-supply = <&vcc3v3_sys>; + vddio-supply = <&vcc_1v8>; + }; +}; + +&uart2 { + status = "okay"; +}; + +&usbdrd3_0 { + status = "okay"; +}; + +&usbdrd3_1 { + status = "okay"; +}; + +&usbdrd_dwc3_0 { + status = "okay"; +}; + +&usbdrd_dwc3_1 { + dr_mode = "host"; + status = "okay"; +}; + +&usb_host0_ehci { + status = "okay"; +}; + +&usb_host0_ohci { + status = "okay"; +}; + +&usb_host1_ehci { + status = "okay"; +}; + +&usb_host1_ohci { + status = "okay"; +}; + +&vopb { + status = "okay"; +}; + +&vopb_mmu { + status = "okay"; +}; + +&vopl { + status = "okay"; +}; + +&vopl_mmu { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts new file mode 100644 index 000000000000..4a543f2117d4 --- /dev/null +++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts @@ -0,0 +1,606 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Copyright (c) 2019 Akash Gajjar + * Copyright (c) 2019 Pragnesh Patel + */ + +/dts-v1/; +#include +#include +#include "rk3399.dtsi" +#include "rk3399-opp.dtsi" + +/ { + model = "Radxa ROCK Pi 4"; + compatible = "radxa,rockpi4", "rockchip,rk3399"; + + chosen { + stdout-path = "serial2:1500000n8"; + }; + + clkin_gmac: external-gmac-clock { + compatible = "fixed-clock"; + clock-frequency = <125000000>; + clock-output-names = "clkin_gmac"; + #clock-cells = <0>; + }; + + vcc12v_dcin: dc-12v { + compatible = "regulator-fixed"; + regulator-name = "vcc12v_dcin"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <12000000>; + regulator-max-microvolt = <12000000>; + }; + + vcc5v0_sys: vcc-sys { + compatible = "regulator-fixed"; + regulator-name = "vcc5v0_sys"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + vin-supply = <&vcc12v_dcin>; + }; + + vcc3v3_pcie: vcc3v3-pcie-regulator { + compatible = "regulator-fixed"; + enable-active-high; + gpio = <&gpio2 RK_PD2 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&pcie_pwr_en>; + regulator-name = "vcc3v3_pcie"; + regulator-always-on; + regulator-boot-on; + vin-supply = <&vcc5v0_sys>; + }; + + vcc3v3_sys: vcc3v3-sys { + compatible = "regulator-fixed"; + regulator-name = "vcc3v3_sys"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + vin-supply = <&vcc5v0_sys>; + }; + + vcc5v0_host: vcc5v0-host-regulator { + compatible = "regulator-fixed"; + enable-active-high; + gpio = <&gpio4 RK_PD1 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&vcc5v0_host_en>; + regulator-name = "vcc5v0_host"; + regulator-always-on; + vin-supply = <&vcc5v0_sys>; + }; + + vcc5v0_typec: vcc5v0-typec-regulator { + compatible = "regulator-fixed"; + enable-active-high; + gpio = <&gpio1 RK_PA3 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&vcc5v0_typec_en>; + regulator-name = "vcc5v0_typec"; + regulator-always-on; + vin-supply = <&vcc5v0_sys>; + }; + + vcc_lan: vcc3v3-phy-regulator { + compatible = "regulator-fixed"; + regulator-name = "vcc_lan"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdd_log: vdd-log { + compatible = "pwm-regulator"; + pwms = <&pwm2 0 25000 1>; + regulator-name = "vdd_log"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <800000>; + regulator-max-microvolt = <1400000>; + vin-supply = <&vcc5v0_sys>; + }; +}; + +&cpu_l0 { + cpu-supply = <&vdd_cpu_l>; +}; + +&cpu_l1 { + cpu-supply = <&vdd_cpu_l>; +}; + +&cpu_l2 { + cpu-supply = <&vdd_cpu_l>; +}; + +&cpu_l3 { + cpu-supply = <&vdd_cpu_l>; +}; + +&cpu_b0 { + cpu-supply = <&vdd_cpu_b>; +}; + +&cpu_b1 { + cpu-supply = <&vdd_cpu_b>; +}; + +&emmc_phy { + status = "okay"; +}; + +&gmac { + assigned-clocks = <&cru SCLK_RMII_SRC>; + assigned-clock-parents = <&clkin_gmac>; + clock_in_out = "input"; + phy-supply = <&vcc_lan>; + phy-mode = "rgmii"; + pinctrl-names = "default"; + pinctrl-0 = <&rgmii_pins>; + snps,reset-gpio = <&gpio3 RK_PB7 GPIO_ACTIVE_LOW>; + snps,reset-active-low; + snps,reset-delays-us = <0 10000 50000>; + tx_delay = <0x28>; + rx_delay = <0x11>; + status = "okay"; +}; + +&hdmi { + pinctrl-names = "default"; + pinctrl-0 = <&hdmi_cec>; + status = "okay"; +}; + +&i2c0 { + clock-frequency = <400000>; + i2c-scl-rising-time-ns = <168>; + i2c-scl-falling-time-ns = <4>; + status = "okay"; + + rk808: pmic@1b { + compatible = "rockchip,rk808"; + reg = <0x1b>; + interrupt-parent = <&gpio1>; + interrupts = <21 IRQ_TYPE_LEVEL_LOW>; + #clock-cells = <1>; + clock-output-names = "xin32k", "rk808-clkout2"; + pinctrl-names = "default"; + pinctrl-0 = <&pmic_int_l>; + rockchip,system-power-controller; + wakeup-source; + + vcc1-supply = <&vcc5v0_sys>; + vcc2-supply = <&vcc5v0_sys>; + vcc3-supply = <&vcc5v0_sys>; + vcc4-supply = <&vcc5v0_sys>; + vcc6-supply = <&vcc5v0_sys>; + vcc7-supply = <&vcc5v0_sys>; + vcc8-supply = <&vcc3v3_sys>; + vcc9-supply = <&vcc5v0_sys>; + vcc10-supply = <&vcc5v0_sys>; + vcc11-supply = <&vcc5v0_sys>; + vcc12-supply = <&vcc3v3_sys>; + vddio-supply = <&vcc_1v8>; + + regulators { + vdd_center: DCDC_REG1 { + regulator-name = "vdd_center"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <750000>; + regulator-max-microvolt = <1350000>; + regulator-ramp-delay = <6001>; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdd_cpu_l: DCDC_REG2 { + regulator-name = "vdd_cpu_l"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <750000>; + regulator-max-microvolt = <1350000>; + regulator-ramp-delay = <6001>; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc_ddr: DCDC_REG3 { + regulator-name = "vcc_ddr"; + regulator-always-on; + regulator-boot-on; + regulator-state-mem { + regulator-on-in-suspend; + }; + }; + + vcc_1v8: DCDC_REG4 { + regulator-name = "vcc_1v8"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <1800000>; + }; + }; + + vcc1v8_codec: LDO_REG1 { + regulator-name = "vcc1v8_codec"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc1v8_hdmi: LDO_REG2 { + regulator-name = "vcc1v8_hdmi"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcca_1v8: LDO_REG3 { + regulator-name = "vcca_1v8"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <1800000>; + }; + }; + + vcc_sdio: LDO_REG4 { + regulator-name = "vcc_sdio"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <3000000>; + }; + }; + + vcca3v0_codec: LDO_REG5 { + regulator-name = "vcca3v0_codec"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc_1v5: LDO_REG6 { + regulator-name = "vcc_1v5"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <1500000>; + regulator-max-microvolt = <1500000>; + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <1500000>; + }; + }; + + vcc0v9_hdmi: LDO_REG7 { + regulator-name = "vcc0v9_hdmi"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <900000>; + regulator-max-microvolt = <900000>; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc_3v0: LDO_REG8 { + regulator-name = "vcc_3v0"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <3000000>; + regulator-max-microvolt = <3000000>; + regulator-state-mem { + regulator-on-in-suspend; + regulator-suspend-microvolt = <3000000>; + }; + }; + + vcc_cam: SWITCH_REG1 { + regulator-name = "vcc_cam"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vcc_mipi: SWITCH_REG2 { + regulator-name = "vcc_mipi"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + }; + }; + + vdd_cpu_b: regulator@40 { + compatible = "silergy,syr827"; + reg = <0x40>; + fcs,suspend-voltage-selector = <1>; + pinctrl-names = "default"; + pinctrl-0 = <&vsel1_gpio>; + regulator-name = "vdd_cpu_b"; + regulator-min-microvolt = <712500>; + regulator-max-microvolt = <1500000>; + regulator-ramp-delay = <1000>; + regulator-always-on; + regulator-boot-on; + vin-supply = <&vcc5v0_sys>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; + + vdd_gpu: regulator@41 { + compatible = "silergy,syr828"; + reg = <0x41>; + fcs,suspend-voltage-selector = <1>; + pinctrl-names = "default"; + pinctrl-0 = <&vsel2_gpio>; + regulator-name = "vdd_gpu"; + regulator-min-microvolt = <712500>; + regulator-max-microvolt = <1500000>; + regulator-ramp-delay = <1000>; + regulator-always-on; + regulator-boot-on; + vin-supply = <&vcc5v0_sys>; + + regulator-state-mem { + regulator-off-in-suspend; + }; + }; +}; + +&i2c1 { + i2c-scl-rising-time-ns = <300>; + i2c-scl-falling-time-ns = <15>; + status = "okay"; +}; + +&i2c3 { + i2c-scl-rising-time-ns = <450>; + i2c-scl-falling-time-ns = <15>; + status = "okay"; +}; + +&i2c4 { + i2c-scl-rising-time-ns = <600>; + i2c-scl-falling-time-ns = <20>; + status = "okay"; +}; + +&i2s0 { + rockchip,playback-channels = <8>; + rockchip,capture-channels = <8>; + status = "okay"; +}; + +&i2s1 { + rockchip,playback-channels = <2>; + rockchip,capture-channels = <2>; + status = "okay"; +}; + +&i2s2 { + status = "okay"; +}; + +&io_domains { + status = "okay"; + + bt656-supply = <&vcc_3v0>; + audio-supply = <&vcc_3v0>; + sdmmc-supply = <&vcc_sdio>; + gpio1830-supply = <&vcc_3v0>; +}; + +&pmu_io_domains { + status = "okay"; + + pmu1830-supply = <&vcc_3v0>; +}; + +&pinctrl { + pcie { + pcie_pwr_en: pcie-pwr-en { + rockchip,pins = <2 RK_PD2 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; + + pmic { + pmic_int_l: pmic-int-l { + rockchip,pins = <1 RK_PC5 RK_FUNC_GPIO &pcfg_pull_up>; + }; + + vsel1_gpio: vsel1-gpio { + rockchip,pins = <1 RK_PC1 RK_FUNC_GPIO &pcfg_pull_down>; + }; + + vsel2_gpio: vsel2-gpio { + rockchip,pins = <1 RK_PB6 RK_FUNC_GPIO &pcfg_pull_down>; + }; + }; + + usb-typec { + vcc5v0_typec_en: vcc5v0-typec-en { + rockchip,pins = <1 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>; + }; + }; + + usb2 { + vcc5v0_host_en: vcc5v0-host-en { + rockchip,pins = <4 RK_PD1 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; +}; + +&pwm2 { + status = "okay"; +}; + +&saradc { + status = "okay"; + + vref-supply = <&vcc_1v8>; +}; + +&sdmmc { + bus-width = <4>; + cap-mmc-highspeed; + cap-sd-highspeed; + cd-gpios = <&gpio0 RK_PA7 GPIO_ACTIVE_LOW>; + disable-wp; + max-frequency = <150000000>; + pinctrl-names = "default"; + pinctrl-0 = <&sdmmc_clk &sdmmc_cd &sdmmc_cmd &sdmmc_bus4>; + status = "okay"; +}; + +&sdhci { + bus-width = <8>; + mmc-hs400-1_8v; + mmc-hs400-enhanced-strobe; + non-removable; + status = "okay"; +}; + +&tcphy0 { + status = "okay"; +}; + +&tcphy1 { + status = "okay"; +}; + +&tsadc { + status = "okay"; + + /* tshut mode 0:CRU 1:GPIO */ + rockchip,hw-tshut-mode = <1>; + /* tshut polarity 0:LOW 1:HIGH */ + rockchip,hw-tshut-polarity = <1>; +}; + +&u2phy0 { + status = "okay"; + + u2phy0_otg: otg-port { + status = "okay"; + }; + + u2phy0_host: host-port { + phy-supply = <&vcc5v0_host>; + status = "okay"; + }; +}; + +&u2phy1 { + status = "okay"; + + u2phy1_otg: otg-port { + status = "okay"; + }; + + u2phy1_host: host-port { + phy-supply = <&vcc5v0_host>; + status = "okay"; + }; +}; + +&uart2 { + status = "okay"; +}; + +&usb_host0_ehci { + status = "okay"; +}; + +&usb_host0_ohci { + status = "okay"; +}; + +&usb_host1_ehci { + status = "okay"; +}; + +&usb_host1_ohci { + status = "okay"; +}; + +&usbdrd3_0 { + status = "okay"; +}; + +&usbdrd_dwc3_0 { + status = "okay"; + dr_mode = "otg"; +}; + +&usbdrd3_1 { + status = "okay"; +}; + +&usbdrd_dwc3_1 { + status = "okay"; + dr_mode = "host"; +}; + +&vopb { + status = "okay"; +}; + +&vopb_mmu { + status = "okay"; +}; + +&vopl { + status = "okay"; +}; + +&vopl_mmu { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi index 56abbb08c133..2927db4dda9d 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi @@ -9,6 +9,15 @@ #include "rk3399-opp.dtsi" / { + sdio_pwrseq: sdio-pwrseq { + compatible = "mmc-pwrseq-simple"; + clocks = <&rk808 1>; + clock-names = "ext_clock"; + pinctrl-names = "default"; + pinctrl-0 = <&wifi_enable_h>; + reset-gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>; + }; + vcc1v8_s0: vcc1v8-s0 { compatible = "regulator-fixed"; regulator-name = "vcc1v8_s0"; @@ -94,6 +103,10 @@ status = "okay"; }; +&hdmi_sound { + status = "okay"; +}; + &i2c0 { clock-frequency = <400000>; i2c-scl-rising-time-ns = <168>; @@ -336,6 +349,10 @@ status = "okay"; }; +&i2s2 { + status = "okay"; +}; + &io_domains { bt656-supply = <&vcc1v8_s0>; /* bt656_gpio2ab_ms */ audio-supply = <&vcc1v8_s0>; /* audio_gpio3d4a_ms */ @@ -362,6 +379,20 @@ }; &pinctrl { + bt { + bt_enable_h: bt-enable-h { + rockchip,pins = <0 RK_PB1 RK_FUNC_GPIO &pcfg_pull_none>; + }; + + bt_host_wake_l: bt-host-wake-l { + rockchip,pins = <0 RK_PA4 RK_FUNC_GPIO &pcfg_pull_none>; + }; + + bt_wake_l: bt-wake-l { + rockchip,pins = <2 RK_PD3 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; + sdmmc { sdmmc_bus1: sdmmc-bus1 { rockchip,pins = @@ -387,6 +418,26 @@ }; }; + sdio0 { + sdio0_bus4: sdio0-bus4 { + rockchip,pins = + <2 20 RK_FUNC_1 &pcfg_pull_up_20ma>, + <2 21 RK_FUNC_1 &pcfg_pull_up_20ma>, + <2 22 RK_FUNC_1 &pcfg_pull_up_20ma>, + <2 23 RK_FUNC_1 &pcfg_pull_up_20ma>; + }; + + sdio0_cmd: sdio0-cmd { + rockchip,pins = + <2 24 RK_FUNC_1 &pcfg_pull_up_20ma>; + }; + + sdio0_clk: sdio0-clk { + rockchip,pins = + <2 25 RK_FUNC_1 &pcfg_pull_none_20ma>; + }; + }; + pmic { pmic_int_l: pmic-int-l { rockchip,pins = @@ -403,6 +454,19 @@ <1 14 RK_FUNC_GPIO &pcfg_pull_down>; }; }; + + sdio-pwrseq { + wifi_enable_h: wifi-enable-h { + rockchip,pins = + <0 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; + + wifi { + wifi_host_wake_l: wifi-host-wake-l { + rockchip,pins = <0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; }; &pwm2 { @@ -413,6 +477,32 @@ status = "okay"; }; +&sdio0 { + bus-width = <4>; + clock-frequency = <50000000>; + cap-sdio-irq; + cap-sd-highspeed; + keep-power-in-suspend; + mmc-pwrseq = <&sdio_pwrseq>; + non-removable; + pinctrl-names = "default"; + pinctrl-0 = <&sdio0_bus4 &sdio0_cmd &sdio0_clk>; + sd-uhs-sdr104; + #address-cells = <1>; + #size-cells = <0>; + status = "okay"; + + brcmf: wifi@1 { + compatible = "brcm,bcm4329-fmac"; + reg = <1>; + interrupt-parent = <&gpio0>; + interrupts = ; + interrupt-names = "host-wake"; + pinctrl-names = "default"; + pinctrl-0 = <&wifi_host_wake_l>; + }; +}; + &sdhci { bus-width = <8>; mmc-hs400-1_8v; @@ -437,10 +527,28 @@ status = "okay"; }; +&tsadc { + rockchip,hw-tshut-mode = <1>; + rockchip,hw-tshut-polarity = <1>; + rockchip,hw-tshut-temp = <110000>; + status = "okay"; +}; + &uart0 { pinctrl-names = "default"; - pinctrl-0 = <&uart0_xfer &uart0_cts>; + pinctrl-0 = <&uart0_xfer &uart0_cts &uart0_rts>; status = "okay"; + + bluetooth { + compatible = "brcm,bcm43438-bt"; + clocks = <&rk808 1>; + clock-names = "ext_clock"; + device-wakeup-gpios = <&gpio2 RK_PD3 GPIO_ACTIVE_HIGH>; + host-wakeup-gpios = <&gpio0 RK_PA4 GPIO_ACTIVE_HIGH>; + shutdown-gpios = <&gpio0 RK_PB1 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&bt_host_wake_l &bt_wake_l &bt_enable_h>; + }; }; &uart2 { diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dts b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dts index be78172abc09..1f2394e0587d 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dts @@ -25,15 +25,6 @@ #clock-cells = <0>; }; - dc_12v: dc-12v { - compatible = "regulator-fixed"; - regulator-name = "dc_12v"; - regulator-always-on; - regulator-boot-on; - regulator-min-microvolt = <12000000>; - regulator-max-microvolt = <12000000>; - }; - gpio-keys { compatible = "gpio-keys"; autorepeat; @@ -83,6 +74,15 @@ reset-gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>; }; + vcc12v_dcin: vcc12v-dcin { + compatible = "regulator-fixed"; + regulator-name = "vcc12v_dcin"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <12000000>; + regulator-max-microvolt = <12000000>; + }; + /* switched by pmic_sleep */ vcc1v8_s3: vcca1v8_s3: vcc1v8-s3 { compatible = "regulator-fixed"; @@ -103,7 +103,7 @@ regulator-name = "vcc3v3_pcie"; regulator-always-on; regulator-boot-on; - vin-supply = <&dc_12v>; + vin-supply = <&vcc12v_dcin>; }; vcc3v3_sys: vcc3v3-sys { @@ -113,7 +113,7 @@ regulator-boot-on; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; - vin-supply = <&vcc_sys>; + vin-supply = <&vcc5v0_sys>; }; /* Actually 3 regulators (host0, 1, 2) controlled by the same gpio */ @@ -125,7 +125,7 @@ pinctrl-0 = <&vcc5v0_host_en>; regulator-name = "vcc5v0_host"; regulator-always-on; - vin-supply = <&vcc_sys>; + vin-supply = <&vcc5v0_usb>; }; vcc5v0_typec: vcc5v0-typec-regulator { @@ -136,17 +136,27 @@ pinctrl-0 = <&vcc5v0_typec_en>; regulator-name = "vcc5v0_typec"; regulator-always-on; - vin-supply = <&vcc_sys>; + vin-supply = <&vcc5v0_usb>; }; - vcc_sys: vcc-sys { + vcc5v0_sys: vcc5v0-sys { compatible = "regulator-fixed"; - regulator-name = "vcc_sys"; + regulator-name = "vcc5v0_sys"; regulator-always-on; regulator-boot-on; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; - vin-supply = <&dc_12v>; + vin-supply = <&vcc12v_dcin>; + }; + + vcc5v0_usb: vcc5v0-usb { + compatible = "regulator-fixed"; + regulator-name = "vcc5v0_usb"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + vin-supply = <&vcc12v_dcin>; }; vdd_log: vdd-log { @@ -157,7 +167,7 @@ regulator-boot-on; regulator-min-microvolt = <800000>; regulator-max-microvolt = <1400000>; - vin-supply = <&vcc_sys>; + vin-supply = <&vcc5v0_sys>; }; }; @@ -212,6 +222,11 @@ status = "okay"; }; +&gpu { + mali-supply = <&vdd_gpu>; + status = "okay"; +}; + &i2c0 { clock-frequency = <400000>; i2c-scl-rising-time-ns = <168>; @@ -230,18 +245,18 @@ rockchip,system-power-controller; wakeup-source; - vcc1-supply = <&vcc_sys>; - vcc2-supply = <&vcc_sys>; - vcc3-supply = <&vcc_sys>; - vcc4-supply = <&vcc_sys>; - vcc6-supply = <&vcc_sys>; - vcc7-supply = <&vcc_sys>; + vcc1-supply = <&vcc5v0_sys>; + vcc2-supply = <&vcc5v0_sys>; + vcc3-supply = <&vcc5v0_sys>; + vcc4-supply = <&vcc5v0_sys>; + vcc6-supply = <&vcc5v0_sys>; + vcc7-supply = <&vcc5v0_sys>; vcc8-supply = <&vcc3v3_sys>; - vcc9-supply = <&vcc_sys>; - vcc10-supply = <&vcc_sys>; - vcc11-supply = <&vcc_sys>; + vcc9-supply = <&vcc5v0_sys>; + vcc10-supply = <&vcc5v0_sys>; + vcc11-supply = <&vcc5v0_sys>; vcc12-supply = <&vcc3v3_sys>; - vddio-supply = <&vcc1v8_pmu>; + vddio-supply = <&vcca_1v8>; regulators { vdd_center: DCDC_REG1 { @@ -311,8 +326,8 @@ }; }; - vcc1v8_pmu: LDO_REG3 { - regulator-name = "vcc1v8_pmu"; + vcca_1v8: LDO_REG3 { + regulator-name = "vcca_1v8"; regulator-always-on; regulator-boot-on; regulator-min-microvolt = <1800000>; @@ -413,7 +428,7 @@ regulator-ramp-delay = <1000>; regulator-always-on; regulator-boot-on; - vin-supply = <&vcc_sys>; + vin-supply = <&vcc5v0_sys>; regulator-state-mem { regulator-off-in-suspend; @@ -432,7 +447,7 @@ regulator-ramp-delay = <1000>; regulator-always-on; regulator-boot-on; - vin-supply = <&vcc_sys>; + vin-supply = <&vcc5v0_sys>; regulator-state-mem { regulator-off-in-suspend; @@ -522,12 +537,6 @@ }; }; - lcd-panel { - lcd_panel_reset: lcd-panel-reset { - rockchip,pins = <4 RK_PD6 RK_FUNC_GPIO &pcfg_pull_up>; - }; - }; - pcie { pcie_pwr_en: pcie-pwr-en { rockchip,pins = <1 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts index 0b8f1edbd746..808ea77f951d 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts @@ -91,7 +91,7 @@ pinctrl-0 = <&lcd_panel_reset>; power-supply = <&vcc3v3_s0>; - ports { + port { panel_in_edp: endpoint { remote-endpoint = <&edp_out_panel>; }; @@ -219,7 +219,6 @@ cap-sd-highspeed; cap-sdio-irq; clock-frequency = <50000000>; - disable-wp; keep-power-in-suspend; max-frequency = <50000000>; mmc-pwrseq = <&sdio_pwrseq>; diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi index 6cc1c9fa4ea6..db9d948c0b03 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi @@ -68,7 +68,7 @@ cpu_l0: cpu@0 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x0>; enable-method = "psci"; clocks = <&cru ARMCLKL>; @@ -79,7 +79,7 @@ cpu_l1: cpu@1 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x1>; enable-method = "psci"; clocks = <&cru ARMCLKL>; @@ -90,7 +90,7 @@ cpu_l2: cpu@2 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x2>; enable-method = "psci"; clocks = <&cru ARMCLKL>; @@ -101,7 +101,7 @@ cpu_l3: cpu@3 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x3>; enable-method = "psci"; clocks = <&cru ARMCLKL>; @@ -112,7 +112,7 @@ cpu_b0: cpu@100 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x0 0x100>; enable-method = "psci"; clocks = <&cru ARMCLKB>; @@ -123,7 +123,7 @@ cpu_b1: cpu@101 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0x0 0x101>; enable-method = "psci"; clocks = <&cru ARMCLKB>; diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi index 31ba52b14e99..a3cd475b48d2 100644 --- a/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi +++ b/arch/arm64/boot/dts/socionext/uniphier-ld11.dtsi @@ -33,7 +33,7 @@ cpu0: cpu@0 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0 0x000>; clocks = <&sys_clk 33>; enable-method = "psci"; @@ -42,7 +42,7 @@ cpu1: cpu@1 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0 0x001>; clocks = <&sys_clk 33>; enable-method = "psci"; diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts b/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts index d7ae28afef7d..9ca692ed1b2b 100644 --- a/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts +++ b/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts @@ -145,10 +145,10 @@ }; }; -&nand { +&usb { status = "okay"; }; -&usb { +&nand { status = "okay"; }; diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi index 4a0c46cb11cd..017f6328c191 100644 --- a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi +++ b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi @@ -43,7 +43,7 @@ cpu0: cpu@0 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0 0x000>; clocks = <&sys_clk 32>; enable-method = "psci"; @@ -53,7 +53,7 @@ cpu1: cpu@1 { device_type = "cpu"; - compatible = "arm,cortex-a72", "arm,armv8"; + compatible = "arm,cortex-a72"; reg = <0 0x001>; clocks = <&sys_clk 32>; enable-method = "psci"; @@ -63,7 +63,7 @@ cpu2: cpu@100 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0 0x100>; clocks = <&sys_clk 33>; enable-method = "psci"; @@ -73,7 +73,7 @@ cpu3: cpu@101 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0 0x101>; clocks = <&sys_clk 33>; enable-method = "psci"; @@ -869,6 +869,53 @@ }; }; + pcie: pcie@66000000 { + compatible = "socionext,uniphier-pcie", "snps,dw-pcie"; + status = "disabled"; + reg-names = "dbi", "link", "config"; + reg = <0x66000000 0x1000>, <0x66010000 0x10000>, + <0x2fff0000 0x10000>; + #address-cells = <3>; + #size-cells = <2>; + clocks = <&sys_clk 24>; + resets = <&sys_rst 24>; + num-lanes = <1>; + num-viewport = <1>; + bus-range = <0x0 0xff>; + device_type = "pci"; + ranges = + /* downstream I/O */ + <0x81000000 0 0x00000000 0x2ffe0000 0 0x00010000>, + /* non-prefetchable memory */ + <0x82000000 0 0x20000000 0x20000000 0 0x0ffe0000>; + #interrupt-cells = <1>; + interrupt-names = "dma", "msi"; + interrupts = <0 224 4>, <0 225 4>; + interrupt-map-mask = <0 0 0 7>; + interrupt-map = <0 0 0 1 &pcie_intc 0>, /* INTA */ + <0 0 0 2 &pcie_intc 1>, /* INTB */ + <0 0 0 3 &pcie_intc 2>, /* INTC */ + <0 0 0 4 &pcie_intc 3>; /* INTD */ + phy-names = "pcie-phy"; + phys = <&pcie_phy>; + + pcie_intc: legacy-interrupt-controller { + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&gic>; + interrupts = <0 226 4>; + }; + }; + + pcie_phy: phy@66038000 { + compatible = "socionext,uniphier-ld20-pcie-phy"; + reg = <0x66038000 0x4000>; + #phy-cells = <0>; + clocks = <&sys_clk 24>; + resets = <&sys_rst 24>; + socionext,syscon = <&soc_glue>; + }; + nand: nand@68000000 { compatible = "socionext,uniphier-denali-nand-v5b"; status = "disabled"; diff --git a/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref.dts b/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref.dts index a41f7cac952a..1965e4dfe4a4 100644 --- a/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref.dts +++ b/arch/arm64/boot/dts/socionext/uniphier-pxs3-ref.dts @@ -101,14 +101,18 @@ }; }; -&nand { +&usb0 { status = "okay"; }; -&usb0 { +&usb1 { status = "okay"; }; -&usb1 { +&pcie { + status = "okay"; +}; + +&nand { status = "okay"; }; diff --git a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi index 4f57c9e9d7a8..bb97abe1a55f 100644 --- a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi +++ b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi @@ -39,7 +39,7 @@ cpu0: cpu@0 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0 0x000>; clocks = <&sys_clk 33>; enable-method = "psci"; @@ -48,7 +48,7 @@ cpu1: cpu@1 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0 0x001>; clocks = <&sys_clk 33>; enable-method = "psci"; @@ -57,7 +57,7 @@ cpu2: cpu@2 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0 0x002>; clocks = <&sys_clk 33>; enable-method = "psci"; @@ -66,7 +66,7 @@ cpu3: cpu@3 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0 0x003>; clocks = <&sys_clk 33>; enable-method = "psci"; @@ -727,6 +727,53 @@ }; }; + pcie: pcie@66000000 { + compatible = "socionext,uniphier-pcie", "snps,dw-pcie"; + status = "disabled"; + reg-names = "dbi", "link", "config"; + reg = <0x66000000 0x1000>, <0x66010000 0x10000>, + <0x2fff0000 0x10000>; + #address-cells = <3>; + #size-cells = <2>; + clocks = <&sys_clk 24>; + resets = <&sys_rst 24>; + num-lanes = <1>; + num-viewport = <1>; + bus-range = <0x0 0xff>; + device_type = "pci"; + ranges = + /* downstream I/O */ + <0x81000000 0 0x00000000 0x2ffe0000 0 0x00010000>, + /* non-prefetchable memory */ + <0x82000000 0 0x20000000 0x20000000 0 0x0ffe0000>; + #interrupt-cells = <1>; + interrupt-names = "dma", "msi"; + interrupts = <0 224 4>, <0 225 4>; + interrupt-map-mask = <0 0 0 7>; + interrupt-map = <0 0 0 1 &pcie_intc 0>, /* INTA */ + <0 0 0 2 &pcie_intc 1>, /* INTB */ + <0 0 0 3 &pcie_intc 2>, /* INTC */ + <0 0 0 4 &pcie_intc 3>; /* INTD */ + phy-names = "pcie-phy"; + phys = <&pcie_phy>; + + pcie_intc: legacy-interrupt-controller { + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&gic>; + interrupts = <0 226 4>; + }; + }; + + pcie_phy: phy@66038000 { + compatible = "socionext,uniphier-pxs3-pcie-phy"; + reg = <0x66038000 0x4000>; + #phy-cells = <0>; + clocks = <&sys_clk 24>; + resets = <&sys_rst 24>; + socionext,syscon = <&soc_glue>; + }; + nand: nand@68000000 { compatible = "socionext,uniphier-denali-nand-v5b"; status = "disabled"; diff --git a/arch/arm64/boot/dts/sprd/sc2731.dtsi b/arch/arm64/boot/dts/sprd/sc2731.dtsi index 82bd642d770b..e15409f55f43 100644 --- a/arch/arm64/boot/dts/sprd/sc2731.dtsi +++ b/arch/arm64/boot/dts/sprd/sc2731.dtsi @@ -13,12 +13,18 @@ spi-max-frequency = <26000000>; interrupts = ; interrupt-controller; - #interrupt-cells = <2>; + #interrupt-cells = <1>; #address-cells = <1>; #size-cells = <0>; + charger@0 { + compatible = "sprd,sc2731-charger"; + reg = <0x0>; + monitored-battery = <&bat>; + }; + led-controller@200 { - compatible = "sprd,sc27xx-bltc", "sprd,sc2731-bltc"; + compatible = "sprd,sc2731-bltc"; reg = <0x200>; #address-cells = <1>; #size-cells = <0>; @@ -40,17 +46,17 @@ }; rtc@280 { - compatible = "sprd,sc27xx-rtc", "sprd,sc2731-rtc"; + compatible = "sprd,sc2731-rtc"; reg = <0x280>; interrupt-parent = <&sc2731_pmic>; - interrupts = <2 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <2>; }; pmic_eic: gpio@300 { - compatible = "sprd,sc27xx-eic"; + compatible = "sprd,sc2731-eic"; reg = <0x300>; interrupt-parent = <&sc2731_pmic>; - interrupts = <5 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <5>; gpio-controller; #gpio-cells = <2>; interrupt-controller; @@ -58,29 +64,57 @@ }; efuse@380 { - compatible = "sprd,sc27xx-efuse", "sprd,sc2731-efuse"; + compatible = "sprd,sc2731-efuse"; reg = <0x380>; #address-cells = <1>; #size-cells = <1>; hwlocks = <&hwlock 12>; + + fgu_calib: calib@6 { + reg = <0x6 0x2>; + bits = <0 9>; + }; + + adc_big_scale: calib@24 { + reg = <0x24 0x2>; + }; + + adc_small_scale: calib@26 { + reg = <0x26 0x2>; + }; }; pmic_adc: adc@480 { - compatible = "sprd,sc27xx-adc", "sprd,sc2731-adc"; + compatible = "sprd,sc2731-adc"; reg = <0x480>; interrupt-parent = <&sc2731_pmic>; - interrupts = <0 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <0>; #io-channel-cells = <1>; hwlocks = <&hwlock 4>; + nvmem-cell-names = "big_scale_calib", "small_scale_calib"; + nvmem-cells = <&adc_big_scale>, <&adc_small_scale>; + }; + + fgu@a00 { + compatible = "sprd,sc2731-fgu"; + reg = <0xa00>; + bat-detect-gpio = <&pmic_eic 9 GPIO_ACTIVE_HIGH>; + io-channels = <&pmic_adc 3>, <&pmic_adc 6>; + io-channel-names = "bat-temp", "charge-vol"; + monitored-battery = <&bat>; + nvmem-cell-names = "fgu_calib"; + nvmem-cells = <&fgu_calib>; + interrupt-parent = <&sc2731_pmic>; + interrupts = <4>; }; vibrator@ec8 { - compatible = "sprd,sc27xx-vibrator", "sprd,sc2731-vibrator"; + compatible = "sprd,sc2731-vibrator"; reg = <0xec8>; }; regulators { - compatible = "sprd,sc27xx-regulator"; + compatible = "sprd,sc2731-regulator"; vddarm0: BUCK_CPU0 { regulator-name = "vddarm0"; diff --git a/arch/arm64/boot/dts/sprd/sc9836.dtsi b/arch/arm64/boot/dts/sprd/sc9836.dtsi index 4bcdbb709c01..286d7173f94f 100644 --- a/arch/arm64/boot/dts/sprd/sc9836.dtsi +++ b/arch/arm64/boot/dts/sprd/sc9836.dtsi @@ -18,28 +18,28 @@ cpu0: cpu@0 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x0>; enable-method = "psci"; }; cpu1: cpu@1 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x1>; enable-method = "psci"; }; cpu2: cpu@2 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x2>; enable-method = "psci"; }; cpu3: cpu@3 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x3>; enable-method = "psci"; }; diff --git a/arch/arm64/boot/dts/sprd/sc9860.dtsi b/arch/arm64/boot/dts/sprd/sc9860.dtsi index 5f57bf055cde..b25d19977170 100644 --- a/arch/arm64/boot/dts/sprd/sc9860.dtsi +++ b/arch/arm64/boot/dts/sprd/sc9860.dtsi @@ -50,7 +50,7 @@ CPU0: cpu@530000 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x530000>; enable-method = "psci"; cpu-idle-states = <&CORE_PD &CLUSTER_PD>; @@ -58,7 +58,7 @@ CPU1: cpu@530001 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x530001>; enable-method = "psci"; cpu-idle-states = <&CORE_PD &CLUSTER_PD>; @@ -66,7 +66,7 @@ CPU2: cpu@530002 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x530002>; enable-method = "psci"; cpu-idle-states = <&CORE_PD &CLUSTER_PD>; @@ -74,7 +74,7 @@ CPU3: cpu@530003 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x530003>; enable-method = "psci"; cpu-idle-states = <&CORE_PD &CLUSTER_PD>; @@ -82,7 +82,7 @@ CPU4: cpu@530100 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x530100>; enable-method = "psci"; cpu-idle-states = <&CORE_PD &CLUSTER_PD>; @@ -90,7 +90,7 @@ CPU5: cpu@530101 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x530101>; enable-method = "psci"; cpu-idle-states = <&CORE_PD &CLUSTER_PD>; @@ -98,7 +98,7 @@ CPU6: cpu@530102 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x530102>; enable-method = "psci"; cpu-idle-states = <&CORE_PD &CLUSTER_PD>; @@ -106,7 +106,7 @@ CPU7: cpu@530103 { device_type = "cpu"; - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x530103>; enable-method = "psci"; cpu-idle-states = <&CORE_PD &CLUSTER_PD>; diff --git a/arch/arm64/boot/dts/sprd/sp9860g-1h10.dts b/arch/arm64/boot/dts/sprd/sp9860g-1h10.dts index 985ebb5d157e..6b95fd94cee3 100644 --- a/arch/arm64/boot/dts/sprd/sp9860g-1h10.dts +++ b/arch/arm64/boot/dts/sprd/sp9860g-1h10.dts @@ -39,6 +39,22 @@ #size-cells = <2>; ranges; }; + + bat: battery { + compatible = "simple-battery"; + charge-full-design-microamp-hours = <1900000>; + charge-term-current-microamp = <120000>; + constant_charge_voltage_max_microvolt = <4350000>; + internal-resistance-micro-ohms = <250000>; + ocv-capacity-celsius = <20>; + ocv-capacity-table-0 = <4185000 100>, <4113000 95>, <4066000 90>, + <4022000 85>, <3983000 80>, <3949000 75>, + <3917000 70>, <3889000 65>, <3864000 60>, + <3835000 55>, <3805000 50>, <3787000 45>, + <3777000 40>, <3773000 35>, <3770000 30>, + <3765000 25>, <3752000 20>, <3724000 15>, + <3680000 10>, <3605000 5>, <3400000 0>; + }; }; &uart0 { diff --git a/arch/arm64/boot/dts/synaptics/as370.dtsi b/arch/arm64/boot/dts/synaptics/as370.dtsi index 7331acf3874e..addeb0efc616 100644 --- a/arch/arm64/boot/dts/synaptics/as370.dtsi +++ b/arch/arm64/boot/dts/synaptics/as370.dtsi @@ -23,7 +23,7 @@ #size-cells = <0>; cpu0: cpu@0 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <0x0>; enable-method = "psci"; @@ -32,7 +32,7 @@ }; cpu1: cpu@1 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <0x1>; enable-method = "psci"; @@ -41,7 +41,7 @@ }; cpu2: cpu@2 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <0x2>; enable-method = "psci"; @@ -50,7 +50,7 @@ }; cpu3: cpu@3 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <0x3>; enable-method = "psci"; diff --git a/arch/arm64/boot/dts/synaptics/berlin4ct.dtsi b/arch/arm64/boot/dts/synaptics/berlin4ct.dtsi index 216767e2edf6..15625b99e336 100644 --- a/arch/arm64/boot/dts/synaptics/berlin4ct.dtsi +++ b/arch/arm64/boot/dts/synaptics/berlin4ct.dtsi @@ -27,7 +27,7 @@ #size-cells = <0>; cpu0: cpu@0 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <0x0>; enable-method = "psci"; @@ -36,7 +36,7 @@ }; cpu1: cpu@1 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <0x1>; enable-method = "psci"; @@ -45,7 +45,7 @@ }; cpu2: cpu@2 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <0x2>; enable-method = "psci"; @@ -54,7 +54,7 @@ }; cpu3: cpu@3 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; reg = <0x3>; enable-method = "psci"; diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi index 272cf8fc8d30..752455269fab 100644 --- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi @@ -6,6 +6,26 @@ */ &cbass_main { + msmc_ram: sram@70000000 { + compatible = "mmio-sram"; + reg = <0x0 0x70000000 0x0 0x200000>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x0 0x70000000 0x200000>; + + atf-sram@0 { + reg = <0x0 0x20000>; + }; + + sysfw-sram@f0000 { + reg = <0xf0000 0x10000>; + }; + + l3cache-sram@100000 { + reg = <0x100000 0x100000>; + }; + }; + gic500: interrupt-controller@1800000 { compatible = "arm,gic-v3"; #address-cells = <2>; @@ -191,4 +211,102 @@ #address-cells = <1>; #size-cells = <0>; }; + + sdhci0: sdhci@4f80000 { + compatible = "ti,am654-sdhci-5.1"; + reg = <0x0 0x4f80000 0x0 0x260>, <0x0 0x4f90000 0x0 0x134>; + power-domains = <&k3_pds 47>; + clocks = <&k3_clks 47 0>, <&k3_clks 47 1>; + clock-names = "clk_ahb", "clk_xin"; + interrupts = ; + mmc-ddr-1_8v; + mmc-hs200-1_8v; + ti,otap-del-sel = <0x2>; + ti,trm-icp = <0x8>; + dma-coherent; + }; + + scm_conf: scm_conf@100000 { + compatible = "syscon", "simple-mfd"; + reg = <0 0x00100000 0 0x1c000>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x0 0x00100000 0x1c000>; + }; + + dwc3_0: dwc3@4000000 { + compatible = "ti,am654-dwc3"; + reg = <0x0 0x4000000 0x0 0x4000>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x0 0x4000000 0x20000>; + interrupts = ; + dma-coherent; + power-domains = <&k3_pds 151>; + assigned-clocks = <&k3_clks 151 2>, <&k3_clks 151 7>; + assigned-clock-parents = <&k3_clks 151 4>, /* set REF_CLK to 20MHz i.e. PER0_PLL/48 */ + <&k3_clks 151 9>; /* set PIPE3_TXB_CLK to CLK_12M_RC/256 (for HS only) */ + + usb0: usb@10000 { + compatible = "snps,dwc3"; + reg = <0x10000 0x10000>; + interrupts = , + , + ; + interrupt-names = "peripheral", + "host", + "otg"; + maximum-speed = "high-speed"; + dr_mode = "otg"; + phys = <&usb0_phy>; + phy-names = "usb2-phy"; + snps,dis_u3_susphy_quirk; + }; + }; + + usb0_phy: phy@4100000 { + compatible = "ti,am654-usb2", "ti,omap-usb2"; + reg = <0x0 0x4100000 0x0 0x54>; + syscon-phy-power = <&scm_conf 0x4000>; + clocks = <&k3_clks 151 0>, <&k3_clks 151 1>; + clock-names = "wkupclk", "refclk"; + #phy-cells = <0>; + }; + + dwc3_1: dwc3@4020000 { + compatible = "ti,am654-dwc3"; + reg = <0x0 0x4020000 0x0 0x4000>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x0 0x4020000 0x20000>; + interrupts = ; + dma-coherent; + power-domains = <&k3_pds 152>; + assigned-clocks = <&k3_clks 152 2>; + assigned-clock-parents = <&k3_clks 152 4>; /* set REF_CLK to 20MHz i.e. PER0_PLL/48 */ + + usb1: usb@10000 { + compatible = "snps,dwc3"; + reg = <0x10000 0x10000>; + interrupts = , + , + ; + interrupt-names = "peripheral", + "host", + "otg"; + maximum-speed = "high-speed"; + dr_mode = "otg"; + phys = <&usb1_phy>; + phy-names = "usb2-phy"; + }; + }; + + usb1_phy: phy@4110000 { + compatible = "ti,am654-usb2", "ti,omap-usb2"; + reg = <0x0 0x4110000 0x0 0x54>; + syscon-phy-power = <&scm_conf 0x4020>; + clocks = <&k3_clks 152 0>, <&k3_clks 152 1>; + clock-names = "wkupclk", "refclk"; + #phy-cells = <0>; + }; }; diff --git a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi index 593f718e8fb5..6f7d2b316ded 100644 --- a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi @@ -57,4 +57,34 @@ #address-cells = <1>; #size-cells = <0>; }; + + tscadc0: tscadc@40200000 { + compatible = "ti,am654-tscadc", "ti,am3359-tscadc"; + reg = <0x0 0x40200000 0x0 0x1000>; + interrupts = ; + clocks = <&k3_clks 0 2>; + assigned-clocks = <&k3_clks 0 2>; + assigned-clock-rates = <60000000>; + clock-names = "adc_tsc_fck"; + + adc { + #io-channel-cells = <1>; + compatible = "ti,am654-adc", "ti,am3359-adc"; + }; + }; + + tscadc1: tscadc@40210000 { + compatible = "ti,am654-tscadc", "ti,am3359-tscadc"; + reg = <0x0 0x40210000 0x0 0x1000>; + interrupts = ; + clocks = <&k3_clks 1 2>; + assigned-clocks = <&k3_clks 1 2>; + assigned-clock-rates = <60000000>; + clock-names = "adc_tsc_fck"; + + adc { + #io-channel-cells = <1>; + compatible = "ti,am654-adc", "ti,am3359-adc"; + }; + }; }; diff --git a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts index e41fc3a5987b..cf1aa276a1ea 100644 --- a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts +++ b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts @@ -69,6 +69,29 @@ AM65X_IOPAD(0x01bc, PIN_OUTPUT, 0) /* (AG13) SPI0_CS0 */ >; }; + + main_mmc0_pins_default: main-mmc0-pins-default { + pinctrl-single,pins = < + AM65X_IOPAD(0x01a8, PIN_INPUT_PULLDOWN, 0) /* (B25) MMC0_CLK */ + AM65X_IOPAD(0x01ac, PIN_INPUT_PULLUP, 0) /* (B27) MMC0_CMD */ + AM65X_IOPAD(0x01a4, PIN_INPUT_PULLUP, 0) /* (A26) MMC0_DAT0 */ + AM65X_IOPAD(0x01a0, PIN_INPUT_PULLUP, 0) /* (E25) MMC0_DAT1 */ + AM65X_IOPAD(0x019c, PIN_INPUT_PULLUP, 0) /* (C26) MMC0_DAT2 */ + AM65X_IOPAD(0x0198, PIN_INPUT_PULLUP, 0) /* (A25) MMC0_DAT3 */ + AM65X_IOPAD(0x0194, PIN_INPUT_PULLUP, 0) /* (E24) MMC0_DAT4 */ + AM65X_IOPAD(0x0190, PIN_INPUT_PULLUP, 0) /* (A24) MMC0_DAT5 */ + AM65X_IOPAD(0x018c, PIN_INPUT_PULLUP, 0) /* (B26) MMC0_DAT6 */ + AM65X_IOPAD(0x0188, PIN_INPUT_PULLUP, 0) /* (D25) MMC0_DAT7 */ + AM65X_IOPAD(0x01b4, PIN_INPUT_PULLUP, 0) /* (A23) MMC0_SDCD */ + AM65X_IOPAD(0x01b0, PIN_INPUT, 0) /* (C25) MMC0_DS */ + >; + }; + + usb1_pins_default: usb1_pins_default { + pinctrl-single,pins = < + AM65X_IOPAD(0x02c0, PIN_OUTPUT, 0) /* (AC8) USB1_DRVVBUS */ + >; + }; }; &main_pmx1 { @@ -163,3 +186,45 @@ #size-cells= <1>; }; }; + +&sdhci0 { + pinctrl-names = "default"; + pinctrl-0 = <&main_mmc0_pins_default>; + bus-width = <8>; + non-removable; + ti,driver-strength-ohm = <50>; +}; + +&dwc3_1 { + status = "okay"; +}; + +&usb1_phy { + status = "okay"; +}; + +&usb1 { + pinctrl-names = "default"; + pinctrl-0 = <&usb1_pins_default>; + dr_mode = "otg"; +}; + +&dwc3_0 { + status = "disabled"; +}; + +&usb0_phy { + status = "disabled"; +}; + +&tscadc0 { + adc { + ti,adc-channels = <0 1 2 3 4 5 6 7>; + }; +}; + +&tscadc1 { + adc { + ti,adc-channels = <0 1 2 3 4 5 6 7>; + }; +}; diff --git a/arch/arm64/boot/dts/ti/k3-am654.dtsi b/arch/arm64/boot/dts/ti/k3-am654.dtsi index 2affa6f6617e..b221abf43ac2 100644 --- a/arch/arm64/boot/dts/ti/k3-am654.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am654.dtsi @@ -34,7 +34,7 @@ }; cpu0: cpu@0 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x000>; device_type = "cpu"; enable-method = "psci"; @@ -48,7 +48,7 @@ }; cpu1: cpu@1 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x001>; device_type = "cpu"; enable-method = "psci"; @@ -62,7 +62,7 @@ }; cpu2: cpu@100 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x100>; device_type = "cpu"; enable-method = "psci"; @@ -76,7 +76,7 @@ }; cpu3: cpu@101 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x101>; device_type = "cpu"; enable-method = "psci"; diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts index 13a0a028df98..e5699d0d91e4 100644 --- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts +++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts @@ -101,6 +101,7 @@ sdio_pwrseq: sdio-pwrseq { compatible = "mmc-pwrseq-simple"; reset-gpios = <&gpio 7 GPIO_ACTIVE_LOW>; /* WIFI_EN */ + post-power-on-delay-ms = <10>; }; }; diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi index fa4fd777d90e..9aa67340a4d8 100644 --- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi +++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi @@ -22,7 +22,7 @@ #size-cells = <0>; cpu0: cpu@0 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; enable-method = "psci"; operating-points-v2 = <&cpu_opp_table>; @@ -31,7 +31,7 @@ }; cpu1: cpu@1 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; enable-method = "psci"; reg = <0x1>; @@ -40,7 +40,7 @@ }; cpu2: cpu@2 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; enable-method = "psci"; reg = <0x2>; @@ -49,7 +49,7 @@ }; cpu3: cpu@3 { - compatible = "arm,cortex-a53", "arm,armv8"; + compatible = "arm,cortex-a53"; device_type = "cpu"; enable-method = "psci"; reg = <0x3>; diff --git a/arch/arm64/boot/dts/zte/zx296718.dtsi b/arch/arm64/boot/dts/zte/zx296718.dtsi index 6eef64761009..cc54837ff4ba 100644 --- a/arch/arm64/boot/dts/zte/zx296718.dtsi +++ b/arch/arm64/boot/dts/zte/zx296718.dtsi @@ -86,7 +86,7 @@ cpu0: cpu@0 { device_type = "cpu"; - compatible = "arm,cortex-a53","arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x0>; enable-method = "psci"; clocks = <&topcrm A53_GATE>; @@ -95,7 +95,7 @@ cpu1: cpu@1 { device_type = "cpu"; - compatible = "arm,cortex-a53","arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x1>; enable-method = "psci"; clocks = <&topcrm A53_GATE>; @@ -104,7 +104,7 @@ cpu2: cpu@2 { device_type = "cpu"; - compatible = "arm,cortex-a53","arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x2>; enable-method = "psci"; clocks = <&topcrm A53_GATE>; @@ -113,7 +113,7 @@ cpu3: cpu@3 { device_type = "cpu"; - compatible = "arm,cortex-a53","arm,armv8"; + compatible = "arm,cortex-a53"; reg = <0x0 0x3>; enable-method = "psci"; clocks = <&topcrm A53_GATE>; diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index c8432e24207e..2d9c39033c1a 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -43,6 +43,7 @@ CONFIG_ARCH_HISI=y CONFIG_ARCH_MEDIATEK=y CONFIG_ARCH_MESON=y CONFIG_ARCH_MVEBU=y +CONFIG_ARCH_MXC=y CONFIG_ARCH_QCOM=y CONFIG_ARCH_RENESAS=y CONFIG_ARCH_R8A774A1=y @@ -113,6 +114,8 @@ CONFIG_ARM_SCPI_PROTOCOL=y CONFIG_RASPBERRYPI_FIRMWARE=y CONFIG_TI_SCI_PROTOCOL=y CONFIG_EFI_CAPSULE_LOADER=y +CONFIG_IMX_SCU=y +CONFIG_IMX_SCU_PD=y CONFIG_ACPI=y CONFIG_ACPI_APEI=y CONFIG_ACPI_APEI_GHES=y @@ -245,6 +248,7 @@ CONFIG_NET_XGENE=y CONFIG_ATL1C=m CONFIG_MACB=y CONFIG_THUNDER_NIC_PF=y +CONFIG_FEC=y CONFIG_HIX5HD2_GMAC=y CONFIG_HNS_DSAF=y CONFIG_HNS_ENET=y @@ -319,6 +323,9 @@ CONFIG_SERIAL_MESON_CONSOLE=y CONFIG_SERIAL_SAMSUNG=y CONFIG_SERIAL_SAMSUNG_CONSOLE=y CONFIG_SERIAL_TEGRA=y +CONFIG_SERIAL_TEGRA_TCU=y +CONFIG_SERIAL_IMX=y +CONFIG_SERIAL_IMX_CONSOLE=y CONFIG_SERIAL_SH_SCI=y CONFIG_SERIAL_MSM=y CONFIG_SERIAL_MSM_CONSOLE=y @@ -326,6 +333,8 @@ CONFIG_SERIAL_QCOM_GENI=y CONFIG_SERIAL_QCOM_GENI_CONSOLE=y CONFIG_SERIAL_XILINX_PS_UART=y CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y +CONFIG_SERIAL_FSL_LPUART=y +CONFIG_SERIAL_FSL_LPUART_CONSOLE=y CONFIG_SERIAL_MVEBU_UART=y CONFIG_SERIAL_DEV_BUS=y CONFIG_VIRTIO_CONSOLE=y @@ -339,6 +348,7 @@ CONFIG_I2C_MUX=y CONFIG_I2C_MUX_PCA954x=y CONFIG_I2C_BCM2835=m CONFIG_I2C_DESIGNWARE_PLATFORM=y +CONFIG_I2C_GPIO=m CONFIG_I2C_IMX=y CONFIG_I2C_MESON=y CONFIG_I2C_MV64XXX=y @@ -362,9 +372,12 @@ CONFIG_SPI_ROCKCHIP=y CONFIG_SPI_QUP=y CONFIG_SPI_S3C64XX=y CONFIG_SPI_SPIDEV=m +CONFIG_SPI_NXP_FLEXSPI=y CONFIG_SPMI=y CONFIG_PINCTRL_SINGLE=y CONFIG_PINCTRL_MAX77620=y +CONFIG_PINCTRL_IMX8MQ=y +CONFIG_PINCTRL_IMX8QXP=y CONFIG_PINCTRL_IPQ8074=y CONFIG_PINCTRL_MSM8916=y CONFIG_PINCTRL_MSM8994=y @@ -401,6 +414,7 @@ CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y CONFIG_CPU_THERMAL=y CONFIG_THERMAL_EMULATION=y CONFIG_ROCKCHIP_THERMAL=m +CONFIG_RCAR_THERMAL=y CONFIG_RCAR_GEN3_THERMAL=y CONFIG_ARMADA_THERMAL=y CONFIG_BCM2835_THERMAL=m @@ -412,6 +426,7 @@ CONFIG_UNIPHIER_THERMAL=y CONFIG_WATCHDOG=y CONFIG_ARM_SP805_WATCHDOG=y CONFIG_S3C2410_WATCHDOG=y +CONFIG_IMX2_WDT=y CONFIG_MESON_GXBB_WATCHDOG=m CONFIG_MESON_WATCHDOG=m CONFIG_RENESAS_WDT=y @@ -437,6 +452,8 @@ CONFIG_REGULATOR_GPIO=y CONFIG_REGULATOR_HI6421V530=y CONFIG_REGULATOR_HI655X=y CONFIG_REGULATOR_MAX77620=y +CONFIG_REGULATOR_MAX8973=y +CONFIG_REGULATOR_PFUZE100=y CONFIG_REGULATOR_PWM=y CONFIG_REGULATOR_QCOM_RPMH=y CONFIG_REGULATOR_QCOM_SMD_RPM=y @@ -455,12 +472,14 @@ CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y CONFIG_MEDIA_CONTROLLER=y CONFIG_VIDEO_V4L2_SUBDEV_API=y # CONFIG_DVB_NET is not set +CONFIG_V4L_PLATFORM_DRIVERS=y CONFIG_MEDIA_USB_SUPPORT=y CONFIG_USB_VIDEO_CLASS=m CONFIG_V4L_MEM2MEM_DRIVERS=y CONFIG_VIDEO_SAMSUNG_S5P_JPEG=m CONFIG_VIDEO_SAMSUNG_S5P_MFC=m CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC=m +CONFIG_VIDEO_SUN6I_CSI=m CONFIG_VIDEO_RENESAS_FCP=m CONFIG_VIDEO_RENESAS_VSP1=m CONFIG_DRM=m @@ -485,13 +504,16 @@ CONFIG_DRM_SUN8I_DW_HDMI=m CONFIG_DRM_SUN8I_MIXER=m CONFIG_DRM_TEGRA=m CONFIG_DRM_PANEL_SIMPLE=m +CONFIG_DRM_SII902X=m CONFIG_DRM_I2C_ADV7511=m CONFIG_DRM_VC4=m CONFIG_DRM_HISI_HIBMC=m CONFIG_DRM_HISI_KIRIN=m CONFIG_DRM_MESON=m +CONFIG_DRM_PL111=m CONFIG_FB=y -CONFIG_FB_ARMCLCD=y +CONFIG_FB_MODE_HELPERS=y +CONFIG_BACKLIGHT_CLASS_DEVICE=y CONFIG_BACKLIGHT_GENERIC=m CONFIG_BACKLIGHT_PWM=m CONFIG_BACKLIGHT_LP855X=m @@ -510,6 +532,7 @@ CONFIG_SND_MESON_AXG_SOUND_CARD=m CONFIG_SND_SOC_SAMSUNG=y CONFIG_SND_SOC_RCAR=m CONFIG_SND_SOC_AK4613=m +CONFIG_SND_SOC_PCM3168A_I2C=m CONFIG_SND_SIMPLE_CARD=m CONFIG_SND_AUDIO_GRAPH_CARD=m CONFIG_SND_SOC_ES7134=m @@ -551,6 +574,7 @@ CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_OF_ARASAN=y CONFIG_MMC_SDHCI_OF_ESDHC=y CONFIG_MMC_SDHCI_CADENCE=y +CONFIG_MMC_SDHCI_ESDHC_IMX=y CONFIG_MMC_SDHCI_TEGRA=y CONFIG_MMC_SDHCI_F_SDH30=y CONFIG_MMC_MESON_GX=y @@ -590,6 +614,7 @@ CONFIG_RTC_DRV_PL031=y CONFIG_RTC_DRV_SUN6I=y CONFIG_RTC_DRV_ARMADA38X=y CONFIG_RTC_DRV_TEGRA=y +CONFIG_RTC_DRV_IMX_SC=m CONFIG_RTC_DRV_XGENE=y CONFIG_DMADEVICES=y CONFIG_DMA_BCM2835=m @@ -617,6 +642,8 @@ CONFIG_COMMON_CLK_CS2000_CP=y CONFIG_COMMON_CLK_S2MPS11=y CONFIG_CLK_QORIQ=y CONFIG_COMMON_CLK_PWM=y +CONFIG_CLK_IMX8MQ=y +CONFIG_CLK_IMX8QXP=y CONFIG_TI_SCI_CLK=y CONFIG_COMMON_CLK_QCOM=y CONFIG_QCOM_CLK_SMD_RPM=y @@ -631,6 +658,7 @@ CONFIG_SDM_GCC_845=y CONFIG_HWSPINLOCK=y CONFIG_HWSPINLOCK_QCOM=y CONFIG_ARM_MHU=y +CONFIG_IMX_MBOX=y CONFIG_PLATFORM_MHU=y CONFIG_BCM2835_MBOX=y CONFIG_TI_MESSAGE_MANAGER=y @@ -648,6 +676,7 @@ CONFIG_RPMSG_QCOM_GLINK_RPM=y CONFIG_RPMSG_QCOM_GLINK_SMEM=m CONFIG_RPMSG_QCOM_SMD=y CONFIG_RASPBERRYPI_POWER=y +CONFIG_IMX_GPCV2_PM_DOMAINS=y CONFIG_QCOM_COMMAND_DB=y CONFIG_QCOM_GENI_SE=y CONFIG_QCOM_GLINK_SSR=m @@ -693,6 +722,7 @@ CONFIG_PHY_HISI_INNO_USB2=y CONFIG_PHY_MVEBU_CP110_COMPHY=y CONFIG_PHY_QCOM_QMP=m CONFIG_PHY_QCOM_USB_HS=y +CONFIG_PHY_RCAR_GEN3_PCIE=y CONFIG_PHY_RCAR_GEN3_USB2=y CONFIG_PHY_RCAR_GEN3_USB3=m CONFIG_PHY_ROCKCHIP_EMMC=y diff --git a/arch/arm64/crypto/aes-ce-ccm-core.S b/arch/arm64/crypto/aes-ce-ccm-core.S index e3a375c4cb83..1b151442dac1 100644 --- a/arch/arm64/crypto/aes-ce-ccm-core.S +++ b/arch/arm64/crypto/aes-ce-ccm-core.S @@ -74,12 +74,13 @@ ENTRY(ce_aes_ccm_auth_data) beq 10f ext v0.16b, v0.16b, v0.16b, #1 /* rotate out the mac bytes */ b 7b -8: mov w7, w8 +8: cbz w8, 91f + mov w7, w8 add w8, w8, #16 9: ext v1.16b, v1.16b, v1.16b, #1 adds w7, w7, #1 bne 9b - eor v0.16b, v0.16b, v1.16b +91: eor v0.16b, v0.16b, v1.16b st1 {v0.16b}, [x0] 10: str w8, [x3] ret diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c index 68b11aa690e4..5fc6f51908fd 100644 --- a/arch/arm64/crypto/aes-ce-ccm-glue.c +++ b/arch/arm64/crypto/aes-ce-ccm-glue.c @@ -125,7 +125,7 @@ static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[], abytes -= added; } - while (abytes > AES_BLOCK_SIZE) { + while (abytes >= AES_BLOCK_SIZE) { __aes_arm64_encrypt(key->key_enc, mac, mac, num_rounds(key)); crypto_xor(mac, in, AES_BLOCK_SIZE); @@ -139,8 +139,6 @@ static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[], num_rounds(key)); crypto_xor(mac, in, abytes); *macp = abytes; - } else { - *macp = 0; } } } @@ -255,7 +253,7 @@ static int ccm_encrypt(struct aead_request *req) /* preserve the original iv for the final round */ memcpy(buf, req->iv, AES_BLOCK_SIZE); - err = skcipher_walk_aead_encrypt(&walk, req, true); + err = skcipher_walk_aead_encrypt(&walk, req, false); if (may_use_simd()) { while (walk.nbytes) { @@ -313,7 +311,7 @@ static int ccm_decrypt(struct aead_request *req) /* preserve the original iv for the final round */ memcpy(buf, req->iv, AES_BLOCK_SIZE); - err = skcipher_walk_aead_decrypt(&walk, req, true); + err = skcipher_walk_aead_decrypt(&walk, req, false); if (may_use_simd()) { while (walk.nbytes) { diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S index 67700045a0e0..4c7ce231963c 100644 --- a/arch/arm64/crypto/aes-modes.S +++ b/arch/arm64/crypto/aes-modes.S @@ -320,8 +320,7 @@ AES_ENTRY(aes_ctr_encrypt) .Lctrtailblock: st1 {v0.16b}, [x0] - ldp x29, x30, [sp], #16 - ret + b .Lctrout .Lctrcarry: umov x7, v4.d[0] /* load upper word of ctr */ diff --git a/arch/arm64/crypto/aes-neonbs-core.S b/arch/arm64/crypto/aes-neonbs-core.S index e613a87f8b53..8432c8d0dea6 100644 --- a/arch/arm64/crypto/aes-neonbs-core.S +++ b/arch/arm64/crypto/aes-neonbs-core.S @@ -971,18 +971,22 @@ CPU_LE( rev x8, x8 ) 8: next_ctr v0 st1 {v0.16b}, [x24] - cbz x23, 0f + cbz x23, .Lctr_done cond_yield_neon 98b b 99b -0: frame_pop +.Lctr_done: + frame_pop ret /* * If we are handling the tail of the input (x6 != NULL), return the * final keystream block back to the caller. */ +0: cbz x25, 8b + st1 {v0.16b}, [x25] + b 8b 1: cbz x25, 8b st1 {v1.16b}, [x25] b 8b diff --git a/arch/arm64/crypto/chacha-neon-core.S b/arch/arm64/crypto/chacha-neon-core.S index 021bb9e9784b..706c4e10e9e2 100644 --- a/arch/arm64/crypto/chacha-neon-core.S +++ b/arch/arm64/crypto/chacha-neon-core.S @@ -158,8 +158,8 @@ ENTRY(hchacha_block_neon) mov w3, w2 bl chacha_permute - st1 {v0.16b}, [x1], #16 - st1 {v3.16b}, [x1] + st1 {v0.4s}, [x1], #16 + st1 {v3.4s}, [x1] ldp x29, x30, [sp], #16 ret @@ -532,6 +532,10 @@ ENTRY(chacha_4block_xor_neon) add v3.4s, v3.4s, v19.4s add a2, a2, w8 add a3, a3, w9 +CPU_BE( rev a0, a0 ) +CPU_BE( rev a1, a1 ) +CPU_BE( rev a2, a2 ) +CPU_BE( rev a3, a3 ) ld4r {v24.4s-v27.4s}, [x0], #16 ld4r {v28.4s-v31.4s}, [x0] @@ -552,6 +556,10 @@ ENTRY(chacha_4block_xor_neon) add v7.4s, v7.4s, v23.4s add a6, a6, w8 add a7, a7, w9 +CPU_BE( rev a4, a4 ) +CPU_BE( rev a5, a5 ) +CPU_BE( rev a6, a6 ) +CPU_BE( rev a7, a7 ) // x8[0-3] += s2[0] // x9[0-3] += s2[1] @@ -569,6 +577,10 @@ ENTRY(chacha_4block_xor_neon) add v11.4s, v11.4s, v27.4s add a10, a10, w8 add a11, a11, w9 +CPU_BE( rev a8, a8 ) +CPU_BE( rev a9, a9 ) +CPU_BE( rev a10, a10 ) +CPU_BE( rev a11, a11 ) // x12[0-3] += s3[0] // x13[0-3] += s3[1] @@ -586,6 +598,10 @@ ENTRY(chacha_4block_xor_neon) add v15.4s, v15.4s, v31.4s add a14, a14, w8 add a15, a15, w9 +CPU_BE( rev a12, a12 ) +CPU_BE( rev a13, a13 ) +CPU_BE( rev a14, a14 ) +CPU_BE( rev a15, a15 ) // interleave 32-bit words in state n, n+1 ldp w6, w7, [x2], #64 diff --git a/arch/arm64/crypto/crct10dif-ce-core.S b/arch/arm64/crypto/crct10dif-ce-core.S index 9e82e8e8ed05..e545b42e6a46 100644 --- a/arch/arm64/crypto/crct10dif-ce-core.S +++ b/arch/arm64/crypto/crct10dif-ce-core.S @@ -2,12 +2,14 @@ // Accelerated CRC-T10DIF using arm64 NEON and Crypto Extensions instructions // // Copyright (C) 2016 Linaro Ltd +// Copyright (C) 2019 Google LLC // // This program is free software; you can redistribute it and/or modify // it under the terms of the GNU General Public License version 2 as // published by the Free Software Foundation. // +// Derived from the x86 version: // // Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions // @@ -54,19 +56,11 @@ // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // -// Function API: -// UINT16 crc_t10dif_pcl( -// UINT16 init_crc, //initial CRC value, 16 bits -// const unsigned char *buf, //buffer pointer to calculate CRC on -// UINT64 len //buffer length in bytes (64-bit data) -// ); -// // Reference paper titled "Fast CRC Computation for Generic // Polynomials Using PCLMULQDQ Instruction" // URL: http://www.intel.com/content/dam/www/public/us/en/documents // /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf // -// #include #include @@ -74,14 +68,14 @@ .text .cpu generic+crypto - arg1_low32 .req w19 - arg2 .req x20 - arg3 .req x21 + init_crc .req w19 + buf .req x20 + len .req x21 + fold_consts_ptr .req x22 - vzr .req v13 + fold_consts .req v10 ad .req v14 - bd .req v10 k00_16 .req v15 k32_48 .req v16 @@ -143,11 +137,11 @@ __pmull_p8_core: ext t5.8b, ad.8b, ad.8b, #2 // A2 ext t6.8b, ad.8b, ad.8b, #3 // A3 - pmull t4.8h, t4.8b, bd.8b // F = A1*B + pmull t4.8h, t4.8b, fold_consts.8b // F = A1*B pmull t8.8h, ad.8b, bd1.8b // E = A*B1 - pmull t5.8h, t5.8b, bd.8b // H = A2*B + pmull t5.8h, t5.8b, fold_consts.8b // H = A2*B pmull t7.8h, ad.8b, bd2.8b // G = A*B2 - pmull t6.8h, t6.8b, bd.8b // J = A3*B + pmull t6.8h, t6.8b, fold_consts.8b // J = A3*B pmull t9.8h, ad.8b, bd3.8b // I = A*B3 pmull t3.8h, ad.8b, bd4.8b // K = A*B4 b 0f @@ -157,11 +151,11 @@ __pmull_p8_core: tbl t5.16b, {ad.16b}, perm2.16b // A2 tbl t6.16b, {ad.16b}, perm3.16b // A3 - pmull2 t4.8h, t4.16b, bd.16b // F = A1*B + pmull2 t4.8h, t4.16b, fold_consts.16b // F = A1*B pmull2 t8.8h, ad.16b, bd1.16b // E = A*B1 - pmull2 t5.8h, t5.16b, bd.16b // H = A2*B + pmull2 t5.8h, t5.16b, fold_consts.16b // H = A2*B pmull2 t7.8h, ad.16b, bd2.16b // G = A*B2 - pmull2 t6.8h, t6.16b, bd.16b // J = A3*B + pmull2 t6.8h, t6.16b, fold_consts.16b // J = A3*B pmull2 t9.8h, ad.16b, bd3.16b // I = A*B3 pmull2 t3.8h, ad.16b, bd4.16b // K = A*B4 @@ -203,14 +197,14 @@ __pmull_p8_core: ENDPROC(__pmull_p8_core) .macro __pmull_p8, rq, ad, bd, i - .ifnc \bd, v10 + .ifnc \bd, fold_consts .err .endif mov ad.16b, \ad\().16b .ifb \i - pmull \rq\().8h, \ad\().8b, bd.8b // D = A*B + pmull \rq\().8h, \ad\().8b, \bd\().8b // D = A*B .else - pmull2 \rq\().8h, \ad\().16b, bd.16b // D = A*B + pmull2 \rq\().8h, \ad\().16b, \bd\().16b // D = A*B .endif bl .L__pmull_p8_core\i @@ -219,17 +213,19 @@ ENDPROC(__pmull_p8_core) eor \rq\().16b, \rq\().16b, t6.16b .endm - .macro fold64, p, reg1, reg2 - ldp q11, q12, [arg2], #0x20 + // Fold reg1, reg2 into the next 32 data bytes, storing the result back + // into reg1, reg2. + .macro fold_32_bytes, p, reg1, reg2 + ldp q11, q12, [buf], #0x20 - __pmull_\p v8, \reg1, v10, 2 - __pmull_\p \reg1, \reg1, v10 + __pmull_\p v8, \reg1, fold_consts, 2 + __pmull_\p \reg1, \reg1, fold_consts CPU_LE( rev64 v11.16b, v11.16b ) CPU_LE( rev64 v12.16b, v12.16b ) - __pmull_\p v9, \reg2, v10, 2 - __pmull_\p \reg2, \reg2, v10 + __pmull_\p v9, \reg2, fold_consts, 2 + __pmull_\p \reg2, \reg2, fold_consts CPU_LE( ext v11.16b, v11.16b, v11.16b, #8 ) CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 ) @@ -240,15 +236,16 @@ CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 ) eor \reg2\().16b, \reg2\().16b, v12.16b .endm - .macro fold16, p, reg, rk - __pmull_\p v8, \reg, v10 - __pmull_\p \reg, \reg, v10, 2 - .ifnb \rk - ldr_l q10, \rk, x8 - __pmull_pre_\p v10 + // Fold src_reg into dst_reg, optionally loading the next fold constants + .macro fold_16_bytes, p, src_reg, dst_reg, load_next_consts + __pmull_\p v8, \src_reg, fold_consts + __pmull_\p \src_reg, \src_reg, fold_consts, 2 + .ifnb \load_next_consts + ld1 {fold_consts.2d}, [fold_consts_ptr], #16 + __pmull_pre_\p fold_consts .endif - eor v7.16b, v7.16b, v8.16b - eor v7.16b, v7.16b, \reg\().16b + eor \dst_reg\().16b, \dst_reg\().16b, v8.16b + eor \dst_reg\().16b, \dst_reg\().16b, \src_reg\().16b .endm .macro __pmull_p64, rd, rn, rm, n @@ -260,40 +257,27 @@ CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 ) .endm .macro crc_t10dif_pmull, p - frame_push 3, 128 + frame_push 4, 128 - mov arg1_low32, w0 - mov arg2, x1 - mov arg3, x2 - - movi vzr.16b, #0 // init zero register + mov init_crc, w0 + mov buf, x1 + mov len, x2 __pmull_init_\p - // adjust the 16-bit initial_crc value, scale it to 32 bits - lsl arg1_low32, arg1_low32, #16 - - // check if smaller than 256 - cmp arg3, #256 - - // for sizes less than 128, we can't fold 64B at a time... - b.lt .L_less_than_128_\@ + // For sizes less than 256 bytes, we can't fold 128 bytes at a time. + cmp len, #256 + b.lt .Lless_than_256_bytes_\@ - // load the initial crc value - // crc value does not need to be byte-reflected, but it needs - // to be moved to the high part of the register. - // because data will be byte-reflected and will align with - // initial crc at correct place. - movi v10.16b, #0 - mov v10.s[3], arg1_low32 // initial crc - - // receive the initial 64B data, xor the initial crc value - ldp q0, q1, [arg2] - ldp q2, q3, [arg2, #0x20] - ldp q4, q5, [arg2, #0x40] - ldp q6, q7, [arg2, #0x60] - add arg2, arg2, #0x80 + adr_l fold_consts_ptr, .Lfold_across_128_bytes_consts + // Load the first 128 data bytes. Byte swapping is necessary to make + // the bit order match the polynomial coefficient order. + ldp q0, q1, [buf] + ldp q2, q3, [buf, #0x20] + ldp q4, q5, [buf, #0x40] + ldp q6, q7, [buf, #0x60] + add buf, buf, #0x80 CPU_LE( rev64 v0.16b, v0.16b ) CPU_LE( rev64 v1.16b, v1.16b ) CPU_LE( rev64 v2.16b, v2.16b ) @@ -302,7 +286,6 @@ CPU_LE( rev64 v4.16b, v4.16b ) CPU_LE( rev64 v5.16b, v5.16b ) CPU_LE( rev64 v6.16b, v6.16b ) CPU_LE( rev64 v7.16b, v7.16b ) - CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 ) CPU_LE( ext v1.16b, v1.16b, v1.16b, #8 ) CPU_LE( ext v2.16b, v2.16b, v2.16b, #8 ) @@ -312,36 +295,29 @@ CPU_LE( ext v5.16b, v5.16b, v5.16b, #8 ) CPU_LE( ext v6.16b, v6.16b, v6.16b, #8 ) CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 ) - // XOR the initial_crc value - eor v0.16b, v0.16b, v10.16b - - ldr_l q10, rk3, x8 // xmm10 has rk3 and rk4 - // type of pmull instruction - // will determine which constant to use - __pmull_pre_\p v10 + // XOR the first 16 data *bits* with the initial CRC value. + movi v8.16b, #0 + mov v8.h[7], init_crc + eor v0.16b, v0.16b, v8.16b - // - // we subtract 256 instead of 128 to save one instruction from the loop - // - sub arg3, arg3, #256 + // Load the constants for folding across 128 bytes. + ld1 {fold_consts.2d}, [fold_consts_ptr] + __pmull_pre_\p fold_consts - // at this section of the code, there is 64*x+y (0<=y<64) bytes of - // buffer. The _fold_64_B_loop will fold 64B at a time - // until we have 64+y Bytes of buffer + // Subtract 128 for the 128 data bytes just consumed. Subtract another + // 128 to simplify the termination condition of the following loop. + sub len, len, #256 - // fold 64B at a time. This section of the code folds 4 vector - // registers in parallel -.L_fold_64_B_loop_\@: + // While >= 128 data bytes remain (not counting v0-v7), fold the 128 + // bytes v0-v7 into them, storing the result back into v0-v7. +.Lfold_128_bytes_loop_\@: + fold_32_bytes \p, v0, v1 + fold_32_bytes \p, v2, v3 + fold_32_bytes \p, v4, v5 + fold_32_bytes \p, v6, v7 - fold64 \p, v0, v1 - fold64 \p, v2, v3 - fold64 \p, v4, v5 - fold64 \p, v6, v7 - - subs arg3, arg3, #128 - - // check if there is another 64B in the buffer to be able to fold - b.lt .L_fold_64_B_end_\@ + subs len, len, #128 + b.lt .Lfold_128_bytes_loop_done_\@ if_will_cond_yield_neon stp q0, q1, [sp, #.Lframe_local_offset] @@ -353,228 +329,207 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 ) ldp q2, q3, [sp, #.Lframe_local_offset + 32] ldp q4, q5, [sp, #.Lframe_local_offset + 64] ldp q6, q7, [sp, #.Lframe_local_offset + 96] - ldr_l q10, rk3, x8 - movi vzr.16b, #0 // init zero register + ld1 {fold_consts.2d}, [fold_consts_ptr] __pmull_init_\p - __pmull_pre_\p v10 + __pmull_pre_\p fold_consts endif_yield_neon - b .L_fold_64_B_loop_\@ - -.L_fold_64_B_end_\@: - // at this point, the buffer pointer is pointing at the last y Bytes - // of the buffer the 64B of folded data is in 4 of the vector - // registers: v0, v1, v2, v3 - - // fold the 8 vector registers to 1 vector register with different - // constants - - ldr_l q10, rk9, x8 - __pmull_pre_\p v10 - - fold16 \p, v0, rk11 - fold16 \p, v1, rk13 - fold16 \p, v2, rk15 - fold16 \p, v3, rk17 - fold16 \p, v4, rk19 - fold16 \p, v5, rk1 - fold16 \p, v6 - - // instead of 64, we add 48 to the loop counter to save 1 instruction - // from the loop instead of a cmp instruction, we use the negative - // flag with the jl instruction - adds arg3, arg3, #(128-16) - b.lt .L_final_reduction_for_128_\@ - - // now we have 16+y bytes left to reduce. 16 Bytes is in register v7 - // and the rest is in memory. We can fold 16 bytes at a time if y>=16 - // continue folding 16B at a time - -.L_16B_reduction_loop_\@: - __pmull_\p v8, v7, v10 - __pmull_\p v7, v7, v10, 2 + b .Lfold_128_bytes_loop_\@ + +.Lfold_128_bytes_loop_done_\@: + + // Now fold the 112 bytes in v0-v6 into the 16 bytes in v7. + + // Fold across 64 bytes. + add fold_consts_ptr, fold_consts_ptr, #16 + ld1 {fold_consts.2d}, [fold_consts_ptr], #16 + __pmull_pre_\p fold_consts + fold_16_bytes \p, v0, v4 + fold_16_bytes \p, v1, v5 + fold_16_bytes \p, v2, v6 + fold_16_bytes \p, v3, v7, 1 + // Fold across 32 bytes. + fold_16_bytes \p, v4, v6 + fold_16_bytes \p, v5, v7, 1 + // Fold across 16 bytes. + fold_16_bytes \p, v6, v7 + + // Add 128 to get the correct number of data bytes remaining in 0...127 + // (not counting v7), following the previous extra subtraction by 128. + // Then subtract 16 to simplify the termination condition of the + // following loop. + adds len, len, #(128-16) + + // While >= 16 data bytes remain (not counting v7), fold the 16 bytes v7 + // into them, storing the result back into v7. + b.lt .Lfold_16_bytes_loop_done_\@ +.Lfold_16_bytes_loop_\@: + __pmull_\p v8, v7, fold_consts + __pmull_\p v7, v7, fold_consts, 2 eor v7.16b, v7.16b, v8.16b - - ldr q0, [arg2], #16 + ldr q0, [buf], #16 CPU_LE( rev64 v0.16b, v0.16b ) CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 ) eor v7.16b, v7.16b, v0.16b - subs arg3, arg3, #16 - - // instead of a cmp instruction, we utilize the flags with the - // jge instruction equivalent of: cmp arg3, 16-16 - // check if there is any more 16B in the buffer to be able to fold - b.ge .L_16B_reduction_loop_\@ - - // now we have 16+z bytes left to reduce, where 0<= z < 16. - // first, we reduce the data in the xmm7 register - -.L_final_reduction_for_128_\@: - // check if any more data to fold. If not, compute the CRC of - // the final 128 bits - adds arg3, arg3, #16 - b.eq .L_128_done_\@ - - // here we are getting data that is less than 16 bytes. - // since we know that there was data before the pointer, we can - // offset the input pointer before the actual point, to receive - // exactly 16 bytes. after that the registers need to be adjusted. -.L_get_last_two_regs_\@: - add arg2, arg2, arg3 - ldr q1, [arg2, #-16] -CPU_LE( rev64 v1.16b, v1.16b ) -CPU_LE( ext v1.16b, v1.16b, v1.16b, #8 ) - - // get rid of the extra data that was loaded before - // load the shift constant - adr_l x4, tbl_shf_table + 16 - sub x4, x4, arg3 - ld1 {v0.16b}, [x4] - - // shift v2 to the left by arg3 bytes - tbl v2.16b, {v7.16b}, v0.16b - - // shift v7 to the right by 16-arg3 bytes - movi v9.16b, #0x80 - eor v0.16b, v0.16b, v9.16b - tbl v7.16b, {v7.16b}, v0.16b - - // blend - sshr v0.16b, v0.16b, #7 // convert to 8-bit mask - bsl v0.16b, v2.16b, v1.16b - - // fold 16 Bytes - __pmull_\p v8, v7, v10 - __pmull_\p v7, v7, v10, 2 - eor v7.16b, v7.16b, v8.16b - eor v7.16b, v7.16b, v0.16b + subs len, len, #16 + b.ge .Lfold_16_bytes_loop_\@ + +.Lfold_16_bytes_loop_done_\@: + // Add 16 to get the correct number of data bytes remaining in 0...15 + // (not counting v7), following the previous extra subtraction by 16. + adds len, len, #16 + b.eq .Lreduce_final_16_bytes_\@ + +.Lhandle_partial_segment_\@: + // Reduce the last '16 + len' bytes where 1 <= len <= 15 and the first + // 16 bytes are in v7 and the rest are the remaining data in 'buf'. To + // do this without needing a fold constant for each possible 'len', + // redivide the bytes into a first chunk of 'len' bytes and a second + // chunk of 16 bytes, then fold the first chunk into the second. + + // v0 = last 16 original data bytes + add buf, buf, len + ldr q0, [buf, #-16] +CPU_LE( rev64 v0.16b, v0.16b ) +CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 ) -.L_128_done_\@: - // compute crc of a 128-bit value - ldr_l q10, rk5, x8 // rk5 and rk6 in xmm10 - __pmull_pre_\p v10 + // v1 = high order part of second chunk: v7 left-shifted by 'len' bytes. + adr_l x4, .Lbyteshift_table + 16 + sub x4, x4, len + ld1 {v2.16b}, [x4] + tbl v1.16b, {v7.16b}, v2.16b - // 64b fold - ext v0.16b, vzr.16b, v7.16b, #8 - mov v7.d[0], v7.d[1] - __pmull_\p v7, v7, v10 - eor v7.16b, v7.16b, v0.16b + // v3 = first chunk: v7 right-shifted by '16-len' bytes. + movi v3.16b, #0x80 + eor v2.16b, v2.16b, v3.16b + tbl v3.16b, {v7.16b}, v2.16b - // 32b fold - ext v0.16b, v7.16b, vzr.16b, #4 - mov v7.s[3], vzr.s[0] - __pmull_\p v0, v0, v10, 2 - eor v7.16b, v7.16b, v0.16b + // Convert to 8-bit masks: 'len' 0x00 bytes, then '16-len' 0xff bytes. + sshr v2.16b, v2.16b, #7 - // barrett reduction - ldr_l q10, rk7, x8 - __pmull_pre_\p v10 - mov v0.d[0], v7.d[1] + // v2 = second chunk: 'len' bytes from v0 (low-order bytes), + // then '16-len' bytes from v1 (high-order bytes). + bsl v2.16b, v1.16b, v0.16b - __pmull_\p v0, v0, v10 - ext v0.16b, vzr.16b, v0.16b, #12 - __pmull_\p v0, v0, v10, 2 - ext v0.16b, vzr.16b, v0.16b, #12 + // Fold the first chunk into the second chunk, storing the result in v7. + __pmull_\p v0, v3, fold_consts + __pmull_\p v7, v3, fold_consts, 2 eor v7.16b, v7.16b, v0.16b - mov w0, v7.s[1] - -.L_cleanup_\@: - // scale the result back to 16 bits - lsr x0, x0, #16 + eor v7.16b, v7.16b, v2.16b + +.Lreduce_final_16_bytes_\@: + // Reduce the 128-bit value M(x), stored in v7, to the final 16-bit CRC. + + movi v2.16b, #0 // init zero register + + // Load 'x^48 * (x^48 mod G(x))' and 'x^48 * (x^80 mod G(x))'. + ld1 {fold_consts.2d}, [fold_consts_ptr], #16 + __pmull_pre_\p fold_consts + + // Fold the high 64 bits into the low 64 bits, while also multiplying by + // x^64. This produces a 128-bit value congruent to x^64 * M(x) and + // whose low 48 bits are 0. + ext v0.16b, v2.16b, v7.16b, #8 + __pmull_\p v7, v7, fold_consts, 2 // high bits * x^48 * (x^80 mod G(x)) + eor v0.16b, v0.16b, v7.16b // + low bits * x^64 + + // Fold the high 32 bits into the low 96 bits. This produces a 96-bit + // value congruent to x^64 * M(x) and whose low 48 bits are 0. + ext v1.16b, v0.16b, v2.16b, #12 // extract high 32 bits + mov v0.s[3], v2.s[0] // zero high 32 bits + __pmull_\p v1, v1, fold_consts // high 32 bits * x^48 * (x^48 mod G(x)) + eor v0.16b, v0.16b, v1.16b // + low bits + + // Load G(x) and floor(x^48 / G(x)). + ld1 {fold_consts.2d}, [fold_consts_ptr] + __pmull_pre_\p fold_consts + + // Use Barrett reduction to compute the final CRC value. + __pmull_\p v1, v0, fold_consts, 2 // high 32 bits * floor(x^48 / G(x)) + ushr v1.2d, v1.2d, #32 // /= x^32 + __pmull_\p v1, v1, fold_consts // *= G(x) + ushr v0.2d, v0.2d, #48 + eor v0.16b, v0.16b, v1.16b // + low 16 nonzero bits + // Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of v0. + + umov w0, v0.h[0] frame_pop ret -.L_less_than_128_\@: - cbz arg3, .L_cleanup_\@ +.Lless_than_256_bytes_\@: + // Checksumming a buffer of length 16...255 bytes - movi v0.16b, #0 - mov v0.s[3], arg1_low32 // get the initial crc value + adr_l fold_consts_ptr, .Lfold_across_16_bytes_consts - ldr q7, [arg2], #0x10 + // Load the first 16 data bytes. + ldr q7, [buf], #0x10 CPU_LE( rev64 v7.16b, v7.16b ) CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 ) - eor v7.16b, v7.16b, v0.16b // xor the initial crc value - - cmp arg3, #16 - b.eq .L_128_done_\@ // exactly 16 left - b.lt .L_less_than_16_left_\@ - - ldr_l q10, rk1, x8 // rk1 and rk2 in xmm10 - __pmull_pre_\p v10 - - // update the counter. subtract 32 instead of 16 to save one - // instruction from the loop - subs arg3, arg3, #32 - b.ge .L_16B_reduction_loop_\@ - - add arg3, arg3, #16 - b .L_get_last_two_regs_\@ - -.L_less_than_16_left_\@: - // shl r9, 4 - adr_l x0, tbl_shf_table + 16 - sub x0, x0, arg3 - ld1 {v0.16b}, [x0] - movi v9.16b, #0x80 - eor v0.16b, v0.16b, v9.16b - tbl v7.16b, {v7.16b}, v0.16b - b .L_128_done_\@ + + // XOR the first 16 data *bits* with the initial CRC value. + movi v0.16b, #0 + mov v0.h[7], init_crc + eor v7.16b, v7.16b, v0.16b + + // Load the fold-across-16-bytes constants. + ld1 {fold_consts.2d}, [fold_consts_ptr], #16 + __pmull_pre_\p fold_consts + + cmp len, #16 + b.eq .Lreduce_final_16_bytes_\@ // len == 16 + subs len, len, #32 + b.ge .Lfold_16_bytes_loop_\@ // 32 <= len <= 255 + add len, len, #16 + b .Lhandle_partial_segment_\@ // 17 <= len <= 31 .endm +// +// u16 crc_t10dif_pmull_p8(u16 init_crc, const u8 *buf, size_t len); +// +// Assumes len >= 16. +// ENTRY(crc_t10dif_pmull_p8) crc_t10dif_pmull p8 ENDPROC(crc_t10dif_pmull_p8) .align 5 +// +// u16 crc_t10dif_pmull_p64(u16 init_crc, const u8 *buf, size_t len); +// +// Assumes len >= 16. +// ENTRY(crc_t10dif_pmull_p64) crc_t10dif_pmull p64 ENDPROC(crc_t10dif_pmull_p64) -// precomputed constants -// these constants are precomputed from the poly: -// 0x8bb70000 (0x8bb7 scaled to 32 bits) .section ".rodata", "a" .align 4 -// Q = 0x18BB70000 -// rk1 = 2^(32*3) mod Q << 32 -// rk2 = 2^(32*5) mod Q << 32 -// rk3 = 2^(32*15) mod Q << 32 -// rk4 = 2^(32*17) mod Q << 32 -// rk5 = 2^(32*3) mod Q << 32 -// rk6 = 2^(32*2) mod Q << 32 -// rk7 = floor(2^64/Q) -// rk8 = Q - -rk1: .octa 0x06df0000000000002d56000000000000 -rk3: .octa 0x7cf50000000000009d9d000000000000 -rk5: .octa 0x13680000000000002d56000000000000 -rk7: .octa 0x000000018bb7000000000001f65a57f8 -rk9: .octa 0xbfd6000000000000ceae000000000000 -rk11: .octa 0x713c0000000000001e16000000000000 -rk13: .octa 0x80a6000000000000f7f9000000000000 -rk15: .octa 0xe658000000000000044c000000000000 -rk17: .octa 0xa497000000000000ad18000000000000 -rk19: .octa 0xe7b50000000000006ee3000000000000 - -tbl_shf_table: -// use these values for shift constants for the tbl/tbx instruction -// different alignments result in values as shown: -// DDQ 0x008f8e8d8c8b8a898887868584838281 # shl 15 (16-1) / shr1 -// DDQ 0x01008f8e8d8c8b8a8988878685848382 # shl 14 (16-3) / shr2 -// DDQ 0x0201008f8e8d8c8b8a89888786858483 # shl 13 (16-4) / shr3 -// DDQ 0x030201008f8e8d8c8b8a898887868584 # shl 12 (16-4) / shr4 -// DDQ 0x04030201008f8e8d8c8b8a8988878685 # shl 11 (16-5) / shr5 -// DDQ 0x0504030201008f8e8d8c8b8a89888786 # shl 10 (16-6) / shr6 -// DDQ 0x060504030201008f8e8d8c8b8a898887 # shl 9 (16-7) / shr7 -// DDQ 0x07060504030201008f8e8d8c8b8a8988 # shl 8 (16-8) / shr8 -// DDQ 0x0807060504030201008f8e8d8c8b8a89 # shl 7 (16-9) / shr9 -// DDQ 0x090807060504030201008f8e8d8c8b8a # shl 6 (16-10) / shr10 -// DDQ 0x0a090807060504030201008f8e8d8c8b # shl 5 (16-11) / shr11 -// DDQ 0x0b0a090807060504030201008f8e8d8c # shl 4 (16-12) / shr12 -// DDQ 0x0c0b0a090807060504030201008f8e8d # shl 3 (16-13) / shr13 -// DDQ 0x0d0c0b0a090807060504030201008f8e # shl 2 (16-14) / shr14 -// DDQ 0x0e0d0c0b0a090807060504030201008f # shl 1 (16-15) / shr15 +// Fold constants precomputed from the polynomial 0x18bb7 +// G(x) = x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + x^0 +.Lfold_across_128_bytes_consts: + .quad 0x0000000000006123 // x^(8*128) mod G(x) + .quad 0x0000000000002295 // x^(8*128+64) mod G(x) +// .Lfold_across_64_bytes_consts: + .quad 0x0000000000001069 // x^(4*128) mod G(x) + .quad 0x000000000000dd31 // x^(4*128+64) mod G(x) +// .Lfold_across_32_bytes_consts: + .quad 0x000000000000857d // x^(2*128) mod G(x) + .quad 0x0000000000007acc // x^(2*128+64) mod G(x) +.Lfold_across_16_bytes_consts: + .quad 0x000000000000a010 // x^(1*128) mod G(x) + .quad 0x0000000000001faa // x^(1*128+64) mod G(x) +// .Lfinal_fold_consts: + .quad 0x1368000000000000 // x^48 * (x^48 mod G(x)) + .quad 0x2d56000000000000 // x^48 * (x^80 mod G(x)) +// .Lbarrett_reduction_consts: + .quad 0x0000000000018bb7 // G(x) + .quad 0x00000001f65a57f8 // floor(x^48 / G(x)) + +// For 1 <= len <= 15, the 16-byte vector beginning at &byteshift_table[16 - +// len] is the index vector to shift left by 'len' bytes, and is also {0x80, +// ..., 0x80} XOR the index vector to shift right by '16 - len' bytes. +.Lbyteshift_table: .byte 0x0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87 .byte 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7 diff --git a/arch/arm64/crypto/crct10dif-ce-glue.c b/arch/arm64/crypto/crct10dif-ce-glue.c index b461d62023f2..dd325829ee44 100644 --- a/arch/arm64/crypto/crct10dif-ce-glue.c +++ b/arch/arm64/crypto/crct10dif-ce-glue.c @@ -22,10 +22,8 @@ #define CRC_T10DIF_PMULL_CHUNK_SIZE 16U -asmlinkage u16 crc_t10dif_pmull_p64(u16 init_crc, const u8 buf[], u64 len); -asmlinkage u16 crc_t10dif_pmull_p8(u16 init_crc, const u8 buf[], u64 len); - -static u16 (*crc_t10dif_pmull)(u16 init_crc, const u8 buf[], u64 len); +asmlinkage u16 crc_t10dif_pmull_p8(u16 init_crc, const u8 *buf, size_t len); +asmlinkage u16 crc_t10dif_pmull_p64(u16 init_crc, const u8 *buf, size_t len); static int crct10dif_init(struct shash_desc *desc) { @@ -35,30 +33,33 @@ static int crct10dif_init(struct shash_desc *desc) return 0; } -static int crct10dif_update(struct shash_desc *desc, const u8 *data, +static int crct10dif_update_pmull_p8(struct shash_desc *desc, const u8 *data, unsigned int length) { u16 *crc = shash_desc_ctx(desc); - unsigned int l; - if (unlikely((u64)data % CRC_T10DIF_PMULL_CHUNK_SIZE)) { - l = min_t(u32, length, CRC_T10DIF_PMULL_CHUNK_SIZE - - ((u64)data % CRC_T10DIF_PMULL_CHUNK_SIZE)); + if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) { + kernel_neon_begin(); + *crc = crc_t10dif_pmull_p8(*crc, data, length); + kernel_neon_end(); + } else { + *crc = crc_t10dif_generic(*crc, data, length); + } - *crc = crc_t10dif_generic(*crc, data, l); + return 0; +} - length -= l; - data += l; - } +static int crct10dif_update_pmull_p64(struct shash_desc *desc, const u8 *data, + unsigned int length) +{ + u16 *crc = shash_desc_ctx(desc); - if (length > 0) { - if (may_use_simd()) { - kernel_neon_begin(); - *crc = crc_t10dif_pmull(*crc, data, length); - kernel_neon_end(); - } else { - *crc = crc_t10dif_generic(*crc, data, length); - } + if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) { + kernel_neon_begin(); + *crc = crc_t10dif_pmull_p64(*crc, data, length); + kernel_neon_end(); + } else { + *crc = crc_t10dif_generic(*crc, data, length); } return 0; @@ -72,10 +73,22 @@ static int crct10dif_final(struct shash_desc *desc, u8 *out) return 0; } -static struct shash_alg crc_t10dif_alg = { +static struct shash_alg crc_t10dif_alg[] = {{ .digestsize = CRC_T10DIF_DIGEST_SIZE, .init = crct10dif_init, - .update = crct10dif_update, + .update = crct10dif_update_pmull_p8, + .final = crct10dif_final, + .descsize = CRC_T10DIF_DIGEST_SIZE, + + .base.cra_name = "crct10dif", + .base.cra_driver_name = "crct10dif-arm64-neon", + .base.cra_priority = 100, + .base.cra_blocksize = CRC_T10DIF_BLOCK_SIZE, + .base.cra_module = THIS_MODULE, +}, { + .digestsize = CRC_T10DIF_DIGEST_SIZE, + .init = crct10dif_init, + .update = crct10dif_update_pmull_p64, .final = crct10dif_final, .descsize = CRC_T10DIF_DIGEST_SIZE, @@ -84,21 +97,25 @@ static struct shash_alg crc_t10dif_alg = { .base.cra_priority = 200, .base.cra_blocksize = CRC_T10DIF_BLOCK_SIZE, .base.cra_module = THIS_MODULE, -}; +}}; static int __init crc_t10dif_mod_init(void) { if (elf_hwcap & HWCAP_PMULL) - crc_t10dif_pmull = crc_t10dif_pmull_p64; + return crypto_register_shashes(crc_t10dif_alg, + ARRAY_SIZE(crc_t10dif_alg)); else - crc_t10dif_pmull = crc_t10dif_pmull_p8; - - return crypto_register_shash(&crc_t10dif_alg); + /* only register the first array element */ + return crypto_register_shash(crc_t10dif_alg); } static void __exit crc_t10dif_mod_exit(void) { - crypto_unregister_shash(&crc_t10dif_alg); + if (elf_hwcap & HWCAP_PMULL) + crypto_unregister_shashes(crc_t10dif_alg, + ARRAY_SIZE(crc_t10dif_alg)); + else + crypto_unregister_shash(crc_t10dif_alg); } module_cpu_feature_match(ASIMD, crc_t10dif_mod_init); diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c index 067d8937d5af..791ad422c427 100644 --- a/arch/arm64/crypto/ghash-ce-glue.c +++ b/arch/arm64/crypto/ghash-ce-glue.c @@ -60,10 +60,6 @@ asmlinkage void pmull_ghash_update_p8(int blocks, u64 dg[], const char *src, struct ghash_key const *k, const char *head); -static void (*pmull_ghash_update)(int blocks, u64 dg[], const char *src, - struct ghash_key const *k, - const char *head); - asmlinkage void pmull_gcm_encrypt(int blocks, u64 dg[], u8 dst[], const u8 src[], struct ghash_key const *k, u8 ctr[], u32 const rk[], int rounds, @@ -87,11 +83,15 @@ static int ghash_init(struct shash_desc *desc) } static void ghash_do_update(int blocks, u64 dg[], const char *src, - struct ghash_key *key, const char *head) + struct ghash_key *key, const char *head, + void (*simd_update)(int blocks, u64 dg[], + const char *src, + struct ghash_key const *k, + const char *head)) { if (likely(may_use_simd())) { kernel_neon_begin(); - pmull_ghash_update(blocks, dg, src, key, head); + simd_update(blocks, dg, src, key, head); kernel_neon_end(); } else { be128 dst = { cpu_to_be64(dg[1]), cpu_to_be64(dg[0]) }; @@ -119,8 +119,12 @@ static void ghash_do_update(int blocks, u64 dg[], const char *src, /* avoid hogging the CPU for too long */ #define MAX_BLOCKS (SZ_64K / GHASH_BLOCK_SIZE) -static int ghash_update(struct shash_desc *desc, const u8 *src, - unsigned int len) +static int __ghash_update(struct shash_desc *desc, const u8 *src, + unsigned int len, + void (*simd_update)(int blocks, u64 dg[], + const char *src, + struct ghash_key const *k, + const char *head)) { struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); unsigned int partial = ctx->count % GHASH_BLOCK_SIZE; @@ -146,7 +150,8 @@ static int ghash_update(struct shash_desc *desc, const u8 *src, int chunk = min(blocks, MAX_BLOCKS); ghash_do_update(chunk, ctx->digest, src, key, - partial ? ctx->buf : NULL); + partial ? ctx->buf : NULL, + simd_update); blocks -= chunk; src += chunk * GHASH_BLOCK_SIZE; @@ -158,7 +163,19 @@ static int ghash_update(struct shash_desc *desc, const u8 *src, return 0; } -static int ghash_final(struct shash_desc *desc, u8 *dst) +static int ghash_update_p8(struct shash_desc *desc, const u8 *src, + unsigned int len) +{ + return __ghash_update(desc, src, len, pmull_ghash_update_p8); +} + +static int ghash_update_p64(struct shash_desc *desc, const u8 *src, + unsigned int len) +{ + return __ghash_update(desc, src, len, pmull_ghash_update_p64); +} + +static int ghash_final_p8(struct shash_desc *desc, u8 *dst) { struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); unsigned int partial = ctx->count % GHASH_BLOCK_SIZE; @@ -168,7 +185,28 @@ static int ghash_final(struct shash_desc *desc, u8 *dst) memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial); - ghash_do_update(1, ctx->digest, ctx->buf, key, NULL); + ghash_do_update(1, ctx->digest, ctx->buf, key, NULL, + pmull_ghash_update_p8); + } + put_unaligned_be64(ctx->digest[1], dst); + put_unaligned_be64(ctx->digest[0], dst + 8); + + *ctx = (struct ghash_desc_ctx){}; + return 0; +} + +static int ghash_final_p64(struct shash_desc *desc, u8 *dst) +{ + struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); + unsigned int partial = ctx->count % GHASH_BLOCK_SIZE; + + if (partial) { + struct ghash_key *key = crypto_shash_ctx(desc->tfm); + + memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial); + + ghash_do_update(1, ctx->digest, ctx->buf, key, NULL, + pmull_ghash_update_p64); } put_unaligned_be64(ctx->digest[1], dst); put_unaligned_be64(ctx->digest[0], dst + 8); @@ -224,7 +262,21 @@ static int ghash_setkey(struct crypto_shash *tfm, return __ghash_setkey(key, inkey, keylen); } -static struct shash_alg ghash_alg = { +static struct shash_alg ghash_alg[] = {{ + .base.cra_name = "ghash", + .base.cra_driver_name = "ghash-neon", + .base.cra_priority = 100, + .base.cra_blocksize = GHASH_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct ghash_key), + .base.cra_module = THIS_MODULE, + + .digestsize = GHASH_DIGEST_SIZE, + .init = ghash_init, + .update = ghash_update_p8, + .final = ghash_final_p8, + .setkey = ghash_setkey, + .descsize = sizeof(struct ghash_desc_ctx), +}, { .base.cra_name = "ghash", .base.cra_driver_name = "ghash-ce", .base.cra_priority = 200, @@ -234,11 +286,11 @@ static struct shash_alg ghash_alg = { .digestsize = GHASH_DIGEST_SIZE, .init = ghash_init, - .update = ghash_update, - .final = ghash_final, + .update = ghash_update_p64, + .final = ghash_final_p64, .setkey = ghash_setkey, .descsize = sizeof(struct ghash_desc_ctx), -}; +}}; static int num_rounds(struct crypto_aes_ctx *ctx) { @@ -301,7 +353,8 @@ static void gcm_update_mac(u64 dg[], const u8 *src, int count, u8 buf[], int blocks = count / GHASH_BLOCK_SIZE; ghash_do_update(blocks, dg, src, &ctx->ghash_key, - *buf_count ? buf : NULL); + *buf_count ? buf : NULL, + pmull_ghash_update_p64); src += blocks * GHASH_BLOCK_SIZE; count %= GHASH_BLOCK_SIZE; @@ -345,7 +398,8 @@ static void gcm_calculate_auth_mac(struct aead_request *req, u64 dg[]) if (buf_count) { memset(&buf[buf_count], 0, GHASH_BLOCK_SIZE - buf_count); - ghash_do_update(1, dg, buf, &ctx->ghash_key, NULL); + ghash_do_update(1, dg, buf, &ctx->ghash_key, NULL, + pmull_ghash_update_p64); } } @@ -358,7 +412,8 @@ static void gcm_final(struct aead_request *req, struct gcm_aes_ctx *ctx, lengths.a = cpu_to_be64(req->assoclen * 8); lengths.b = cpu_to_be64(cryptlen * 8); - ghash_do_update(1, dg, (void *)&lengths, &ctx->ghash_key, NULL); + ghash_do_update(1, dg, (void *)&lengths, &ctx->ghash_key, NULL, + pmull_ghash_update_p64); put_unaligned_be64(dg[1], mac); put_unaligned_be64(dg[0], mac + 8); @@ -434,7 +489,7 @@ static int gcm_encrypt(struct aead_request *req) ghash_do_update(walk.nbytes / AES_BLOCK_SIZE, dg, walk.dst.virt.addr, &ctx->ghash_key, - NULL); + NULL, pmull_ghash_update_p64); err = skcipher_walk_done(&walk, walk.nbytes % (2 * AES_BLOCK_SIZE)); @@ -469,7 +524,8 @@ static int gcm_encrypt(struct aead_request *req) memcpy(buf, dst, nbytes); memset(buf + nbytes, 0, GHASH_BLOCK_SIZE - nbytes); - ghash_do_update(!!nbytes, dg, buf, &ctx->ghash_key, head); + ghash_do_update(!!nbytes, dg, buf, &ctx->ghash_key, head, + pmull_ghash_update_p64); err = skcipher_walk_done(&walk, 0); } @@ -558,7 +614,8 @@ static int gcm_decrypt(struct aead_request *req) u8 *src = walk.src.virt.addr; ghash_do_update(blocks, dg, walk.src.virt.addr, - &ctx->ghash_key, NULL); + &ctx->ghash_key, NULL, + pmull_ghash_update_p64); do { __aes_arm64_encrypt(ctx->aes_key.key_enc, @@ -602,7 +659,8 @@ static int gcm_decrypt(struct aead_request *req) memcpy(buf, src, nbytes); memset(buf + nbytes, 0, GHASH_BLOCK_SIZE - nbytes); - ghash_do_update(!!nbytes, dg, buf, &ctx->ghash_key, head); + ghash_do_update(!!nbytes, dg, buf, &ctx->ghash_key, head, + pmull_ghash_update_p64); crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, iv, walk.nbytes); @@ -650,26 +708,30 @@ static int __init ghash_ce_mod_init(void) return -ENODEV; if (elf_hwcap & HWCAP_PMULL) - pmull_ghash_update = pmull_ghash_update_p64; - + ret = crypto_register_shashes(ghash_alg, + ARRAY_SIZE(ghash_alg)); else - pmull_ghash_update = pmull_ghash_update_p8; + /* only register the first array element */ + ret = crypto_register_shash(ghash_alg); - ret = crypto_register_shash(&ghash_alg); if (ret) return ret; if (elf_hwcap & HWCAP_PMULL) { ret = crypto_register_aead(&gcm_aes_alg); if (ret) - crypto_unregister_shash(&ghash_alg); + crypto_unregister_shashes(ghash_alg, + ARRAY_SIZE(ghash_alg)); } return ret; } static void __exit ghash_ce_mod_exit(void) { - crypto_unregister_shash(&ghash_alg); + if (elf_hwcap & HWCAP_PMULL) + crypto_unregister_shashes(ghash_alg, ARRAY_SIZE(ghash_alg)); + else + crypto_unregister_shash(ghash_alg); crypto_unregister_aead(&gcm_aes_alg); } diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h index 2def77ec14be..7628efbe6c12 100644 --- a/arch/arm64/include/asm/acpi.h +++ b/arch/arm64/include/asm/acpi.h @@ -18,6 +18,7 @@ #include #include +#include #include #include @@ -110,9 +111,10 @@ static inline u32 get_acpi_id_for_cpu(unsigned int cpu) static inline void arch_fix_phys_package_id(int num, u32 slot) { } void __init acpi_init_cpus(void); - +int apei_claim_sea(struct pt_regs *regs); #else static inline void acpi_init_cpus(void) { } +static inline int apei_claim_sea(struct pt_regs *regs) { return -ENOENT; } #endif /* CONFIG_ACPI */ #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index 4b650ec1d7dd..b9f8d787eea9 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -14,8 +14,6 @@ #include #include -extern int alternatives_applied; - struct alt_instr { s32 orig_offset; /* offset to original instruction */ s32 alt_offset; /* offset to replacement instruction */ @@ -27,7 +25,9 @@ struct alt_instr { typedef void (*alternative_cb_t)(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst); +void __init apply_boot_alternatives(void); void __init apply_alternatives_all(void); +bool alternative_is_applied(u16 cpufeature); #ifdef CONFIG_MODULES void apply_alternatives_module(void *start, size_t length); diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index e278f94df0c9..14b41ddc68ba 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h @@ -22,6 +22,7 @@ #ifndef __ASSEMBLY__ +#include #include #include #include @@ -114,6 +115,21 @@ static inline void gic_write_bpr1(u32 val) write_sysreg_s(val, SYS_ICC_BPR1_EL1); } +static inline u32 gic_read_pmr(void) +{ + return read_sysreg_s(SYS_ICC_PMR_EL1); +} + +static inline void gic_write_pmr(u32 val) +{ + write_sysreg_s(val, SYS_ICC_PMR_EL1); +} + +static inline u32 gic_read_rpr(void) +{ + return read_sysreg_s(SYS_ICC_RPR_EL1); +} + #define gic_read_typer(c) readq_relaxed(c) #define gic_write_irouter(v, c) writeq_relaxed(v, c) #define gic_read_lpir(c) readq_relaxed(c) @@ -140,5 +156,21 @@ static inline void gic_write_bpr1(u32 val) #define gits_write_vpendbaser(v, c) writeq_relaxed(v, c) #define gits_read_vpendbaser(c) readq_relaxed(c) +static inline bool gic_prio_masking_enabled(void) +{ + return system_uses_irq_prio_masking(); +} + +static inline void gic_pmr_mask_irqs(void) +{ + BUILD_BUG_ON(GICD_INT_DEF_PRI <= GIC_PRIO_IRQOFF); + gic_write_pmr(GIC_PRIO_IRQOFF); +} + +static inline void gic_arch_enable_irqs(void) +{ + asm volatile ("msr daifclr, #2" : : : "memory"); +} + #endif /* __ASSEMBLY__ */ #endif /* __ASM_ARCH_GICV3_H */ diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h index 4128bec033f6..f74909ba29bd 100644 --- a/arch/arm64/include/asm/asm-uaccess.h +++ b/arch/arm64/include/asm/asm-uaccess.h @@ -24,7 +24,7 @@ .endm .macro __uaccess_ttbr0_enable, tmp1, tmp2 - get_thread_info \tmp1 + get_current_task \tmp1 ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1 mrs \tmp2, ttbr1_el1 extr \tmp2, \tmp2, \tmp1, #48 diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 4feb6119c3c9..c5308d01e228 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -27,6 +27,7 @@ #include #include +#include #include #include #include @@ -62,16 +63,8 @@ .endm /* - * Enable and disable interrupts. + * Save/restore interrupts. */ - .macro disable_irq - msr daifset, #2 - .endm - - .macro enable_irq - msr daifclr, #2 - .endm - .macro save_and_disable_irq, flags mrs \flags, daif msr daifset, #2 @@ -536,9 +529,9 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU .endm /* - * Return the current thread_info. + * Return the current task_struct. */ - .macro get_thread_info, rd + .macro get_current_task, rd mrs \rd, sp_el0 .endm @@ -604,6 +597,25 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU #endif .endm +/* + * tcr_clear_errata_bits - Clear TCR bits that trigger an errata on this CPU. + */ + .macro tcr_clear_errata_bits, tcr, tmp1, tmp2 +#ifdef CONFIG_FUJITSU_ERRATUM_010001 + mrs \tmp1, midr_el1 + + mov_q \tmp2, MIDR_FUJITSU_ERRATUM_010001_MASK + and \tmp1, \tmp1, \tmp2 + mov_q \tmp2, MIDR_FUJITSU_ERRATUM_010001 + cmp \tmp1, \tmp2 + b.ne 10f + + mov_q \tmp2, TCR_CLEAR_FUJITSU_ERRATUM_010001 + bic \tcr, \tcr, \tmp2 +10: +#endif /* CONFIG_FUJITSU_ERRATUM_010001 */ + .endm + /** * Errata workaround prior to disable MMU. Insert an ISB immediately prior * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0. @@ -721,7 +733,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU .macro if_will_cond_yield_neon #ifdef CONFIG_PREEMPT - get_thread_info x0 + get_current_task x0 ldr x0, [x0, #TSK_TI_PREEMPT] sub x0, x0, #PREEMPT_DISABLE_OFFSET cbz x0, .Lyield_\@ diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index 9bca54dda75c..1f4e9ee641c9 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h @@ -42,124 +42,131 @@ #define ATOMIC_INIT(i) { (i) } -#define atomic_read(v) READ_ONCE((v)->counter) -#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) - -#define atomic_add_return_relaxed atomic_add_return_relaxed -#define atomic_add_return_acquire atomic_add_return_acquire -#define atomic_add_return_release atomic_add_return_release -#define atomic_add_return atomic_add_return - -#define atomic_sub_return_relaxed atomic_sub_return_relaxed -#define atomic_sub_return_acquire atomic_sub_return_acquire -#define atomic_sub_return_release atomic_sub_return_release -#define atomic_sub_return atomic_sub_return - -#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed -#define atomic_fetch_add_acquire atomic_fetch_add_acquire -#define atomic_fetch_add_release atomic_fetch_add_release -#define atomic_fetch_add atomic_fetch_add - -#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed -#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire -#define atomic_fetch_sub_release atomic_fetch_sub_release -#define atomic_fetch_sub atomic_fetch_sub - -#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed -#define atomic_fetch_and_acquire atomic_fetch_and_acquire -#define atomic_fetch_and_release atomic_fetch_and_release -#define atomic_fetch_and atomic_fetch_and - -#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed -#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire -#define atomic_fetch_andnot_release atomic_fetch_andnot_release -#define atomic_fetch_andnot atomic_fetch_andnot - -#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed -#define atomic_fetch_or_acquire atomic_fetch_or_acquire -#define atomic_fetch_or_release atomic_fetch_or_release -#define atomic_fetch_or atomic_fetch_or - -#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed -#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire -#define atomic_fetch_xor_release atomic_fetch_xor_release -#define atomic_fetch_xor atomic_fetch_xor - -#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new)) -#define atomic_xchg_acquire(v, new) xchg_acquire(&((v)->counter), (new)) -#define atomic_xchg_release(v, new) xchg_release(&((v)->counter), (new)) -#define atomic_xchg(v, new) xchg(&((v)->counter), (new)) - -#define atomic_cmpxchg_relaxed(v, old, new) \ - cmpxchg_relaxed(&((v)->counter), (old), (new)) -#define atomic_cmpxchg_acquire(v, old, new) \ - cmpxchg_acquire(&((v)->counter), (old), (new)) -#define atomic_cmpxchg_release(v, old, new) \ - cmpxchg_release(&((v)->counter), (old), (new)) -#define atomic_cmpxchg(v, old, new) cmpxchg(&((v)->counter), (old), (new)) - -#define atomic_andnot atomic_andnot +#define arch_atomic_read(v) READ_ONCE((v)->counter) +#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) + +#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed +#define arch_atomic_add_return_acquire arch_atomic_add_return_acquire +#define arch_atomic_add_return_release arch_atomic_add_return_release +#define arch_atomic_add_return arch_atomic_add_return + +#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed +#define arch_atomic_sub_return_acquire arch_atomic_sub_return_acquire +#define arch_atomic_sub_return_release arch_atomic_sub_return_release +#define arch_atomic_sub_return arch_atomic_sub_return + +#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed +#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add_acquire +#define arch_atomic_fetch_add_release arch_atomic_fetch_add_release +#define arch_atomic_fetch_add arch_atomic_fetch_add + +#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed +#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub_acquire +#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub_release +#define arch_atomic_fetch_sub arch_atomic_fetch_sub + +#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed +#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and_acquire +#define arch_atomic_fetch_and_release arch_atomic_fetch_and_release +#define arch_atomic_fetch_and arch_atomic_fetch_and + +#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed +#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire +#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release +#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot + +#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed +#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or_acquire +#define arch_atomic_fetch_or_release arch_atomic_fetch_or_release +#define arch_atomic_fetch_or arch_atomic_fetch_or + +#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed +#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor_acquire +#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor_release +#define arch_atomic_fetch_xor arch_atomic_fetch_xor + +#define arch_atomic_xchg_relaxed(v, new) \ + arch_xchg_relaxed(&((v)->counter), (new)) +#define arch_atomic_xchg_acquire(v, new) \ + arch_xchg_acquire(&((v)->counter), (new)) +#define arch_atomic_xchg_release(v, new) \ + arch_xchg_release(&((v)->counter), (new)) +#define arch_atomic_xchg(v, new) \ + arch_xchg(&((v)->counter), (new)) + +#define arch_atomic_cmpxchg_relaxed(v, old, new) \ + arch_cmpxchg_relaxed(&((v)->counter), (old), (new)) +#define arch_atomic_cmpxchg_acquire(v, old, new) \ + arch_cmpxchg_acquire(&((v)->counter), (old), (new)) +#define arch_atomic_cmpxchg_release(v, old, new) \ + arch_cmpxchg_release(&((v)->counter), (old), (new)) +#define arch_atomic_cmpxchg(v, old, new) \ + arch_cmpxchg(&((v)->counter), (old), (new)) + +#define arch_atomic_andnot arch_atomic_andnot /* - * 64-bit atomic operations. + * 64-bit arch_atomic operations. */ -#define ATOMIC64_INIT ATOMIC_INIT -#define atomic64_read atomic_read -#define atomic64_set atomic_set - -#define atomic64_add_return_relaxed atomic64_add_return_relaxed -#define atomic64_add_return_acquire atomic64_add_return_acquire -#define atomic64_add_return_release atomic64_add_return_release -#define atomic64_add_return atomic64_add_return - -#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed -#define atomic64_sub_return_acquire atomic64_sub_return_acquire -#define atomic64_sub_return_release atomic64_sub_return_release -#define atomic64_sub_return atomic64_sub_return - -#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed -#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire -#define atomic64_fetch_add_release atomic64_fetch_add_release -#define atomic64_fetch_add atomic64_fetch_add - -#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed -#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire -#define atomic64_fetch_sub_release atomic64_fetch_sub_release -#define atomic64_fetch_sub atomic64_fetch_sub - -#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed -#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire -#define atomic64_fetch_and_release atomic64_fetch_and_release -#define atomic64_fetch_and atomic64_fetch_and - -#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed -#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire -#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release -#define atomic64_fetch_andnot atomic64_fetch_andnot - -#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed -#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire -#define atomic64_fetch_or_release atomic64_fetch_or_release -#define atomic64_fetch_or atomic64_fetch_or - -#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed -#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire -#define atomic64_fetch_xor_release atomic64_fetch_xor_release -#define atomic64_fetch_xor atomic64_fetch_xor - -#define atomic64_xchg_relaxed atomic_xchg_relaxed -#define atomic64_xchg_acquire atomic_xchg_acquire -#define atomic64_xchg_release atomic_xchg_release -#define atomic64_xchg atomic_xchg - -#define atomic64_cmpxchg_relaxed atomic_cmpxchg_relaxed -#define atomic64_cmpxchg_acquire atomic_cmpxchg_acquire -#define atomic64_cmpxchg_release atomic_cmpxchg_release -#define atomic64_cmpxchg atomic_cmpxchg - -#define atomic64_andnot atomic64_andnot - -#define atomic64_dec_if_positive atomic64_dec_if_positive +#define ATOMIC64_INIT ATOMIC_INIT +#define arch_atomic64_read arch_atomic_read +#define arch_atomic64_set arch_atomic_set + +#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed +#define arch_atomic64_add_return_acquire arch_atomic64_add_return_acquire +#define arch_atomic64_add_return_release arch_atomic64_add_return_release +#define arch_atomic64_add_return arch_atomic64_add_return + +#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed +#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return_acquire +#define arch_atomic64_sub_return_release arch_atomic64_sub_return_release +#define arch_atomic64_sub_return arch_atomic64_sub_return + +#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed +#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add_acquire +#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add_release +#define arch_atomic64_fetch_add arch_atomic64_fetch_add + +#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed +#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub_acquire +#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub_release +#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub + +#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed +#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and_acquire +#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and_release +#define arch_atomic64_fetch_and arch_atomic64_fetch_and + +#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed +#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire +#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release +#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot + +#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed +#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or_acquire +#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or_release +#define arch_atomic64_fetch_or arch_atomic64_fetch_or + +#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed +#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor_acquire +#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor_release +#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor + +#define arch_atomic64_xchg_relaxed arch_atomic_xchg_relaxed +#define arch_atomic64_xchg_acquire arch_atomic_xchg_acquire +#define arch_atomic64_xchg_release arch_atomic_xchg_release +#define arch_atomic64_xchg arch_atomic_xchg + +#define arch_atomic64_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed +#define arch_atomic64_cmpxchg_acquire arch_atomic_cmpxchg_acquire +#define arch_atomic64_cmpxchg_release arch_atomic_cmpxchg_release +#define arch_atomic64_cmpxchg arch_atomic_cmpxchg + +#define arch_atomic64_andnot arch_atomic64_andnot + +#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive + +#include #endif #endif diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h index af7b99005453..e321293e0c89 100644 --- a/arch/arm64/include/asm/atomic_ll_sc.h +++ b/arch/arm64/include/asm/atomic_ll_sc.h @@ -39,7 +39,7 @@ #define ATOMIC_OP(op, asm_op) \ __LL_SC_INLINE void \ -__LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \ +__LL_SC_PREFIX(arch_atomic_##op(int i, atomic_t *v)) \ { \ unsigned long tmp; \ int result; \ @@ -53,11 +53,11 @@ __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : "Ir" (i)); \ } \ -__LL_SC_EXPORT(atomic_##op); +__LL_SC_EXPORT(arch_atomic_##op); #define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \ __LL_SC_INLINE int \ -__LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \ +__LL_SC_PREFIX(arch_atomic_##op##_return##name(int i, atomic_t *v)) \ { \ unsigned long tmp; \ int result; \ @@ -75,11 +75,11 @@ __LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \ \ return result; \ } \ -__LL_SC_EXPORT(atomic_##op##_return##name); +__LL_SC_EXPORT(arch_atomic_##op##_return##name); #define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \ __LL_SC_INLINE int \ -__LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \ +__LL_SC_PREFIX(arch_atomic_fetch_##op##name(int i, atomic_t *v)) \ { \ unsigned long tmp; \ int val, result; \ @@ -97,7 +97,7 @@ __LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \ \ return result; \ } \ -__LL_SC_EXPORT(atomic_fetch_##op##name); +__LL_SC_EXPORT(arch_atomic_fetch_##op##name); #define ATOMIC_OPS(...) \ ATOMIC_OP(__VA_ARGS__) \ @@ -133,7 +133,7 @@ ATOMIC_OPS(xor, eor) #define ATOMIC64_OP(op, asm_op) \ __LL_SC_INLINE void \ -__LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \ +__LL_SC_PREFIX(arch_atomic64_##op(long i, atomic64_t *v)) \ { \ long result; \ unsigned long tmp; \ @@ -147,11 +147,11 @@ __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : "Ir" (i)); \ } \ -__LL_SC_EXPORT(atomic64_##op); +__LL_SC_EXPORT(arch_atomic64_##op); #define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \ __LL_SC_INLINE long \ -__LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \ +__LL_SC_PREFIX(arch_atomic64_##op##_return##name(long i, atomic64_t *v))\ { \ long result; \ unsigned long tmp; \ @@ -169,11 +169,11 @@ __LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \ \ return result; \ } \ -__LL_SC_EXPORT(atomic64_##op##_return##name); +__LL_SC_EXPORT(arch_atomic64_##op##_return##name); #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \ __LL_SC_INLINE long \ -__LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \ +__LL_SC_PREFIX(arch_atomic64_fetch_##op##name(long i, atomic64_t *v)) \ { \ long result, val; \ unsigned long tmp; \ @@ -191,7 +191,7 @@ __LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \ \ return result; \ } \ -__LL_SC_EXPORT(atomic64_fetch_##op##name); +__LL_SC_EXPORT(arch_atomic64_fetch_##op##name); #define ATOMIC64_OPS(...) \ ATOMIC64_OP(__VA_ARGS__) \ @@ -226,7 +226,7 @@ ATOMIC64_OPS(xor, eor) #undef ATOMIC64_OP __LL_SC_INLINE long -__LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v)) +__LL_SC_PREFIX(arch_atomic64_dec_if_positive(atomic64_t *v)) { long result; unsigned long tmp; @@ -246,7 +246,7 @@ __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v)) return result; } -__LL_SC_EXPORT(atomic64_dec_if_positive); +__LL_SC_EXPORT(arch_atomic64_dec_if_positive); #define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl) \ __LL_SC_INLINE u##sz \ diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h index a424355240c5..9256a3921e4b 100644 --- a/arch/arm64/include/asm/atomic_lse.h +++ b/arch/arm64/include/asm/atomic_lse.h @@ -25,9 +25,9 @@ #error "please don't include this file directly" #endif -#define __LL_SC_ATOMIC(op) __LL_SC_CALL(atomic_##op) +#define __LL_SC_ATOMIC(op) __LL_SC_CALL(arch_atomic_##op) #define ATOMIC_OP(op, asm_op) \ -static inline void atomic_##op(int i, atomic_t *v) \ +static inline void arch_atomic_##op(int i, atomic_t *v) \ { \ register int w0 asm ("w0") = i; \ register atomic_t *x1 asm ("x1") = v; \ @@ -47,7 +47,7 @@ ATOMIC_OP(add, stadd) #undef ATOMIC_OP #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \ -static inline int atomic_fetch_##op##name(int i, atomic_t *v) \ +static inline int arch_atomic_fetch_##op##name(int i, atomic_t *v) \ { \ register int w0 asm ("w0") = i; \ register atomic_t *x1 asm ("x1") = v; \ @@ -79,7 +79,7 @@ ATOMIC_FETCH_OPS(add, ldadd) #undef ATOMIC_FETCH_OPS #define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \ -static inline int atomic_add_return##name(int i, atomic_t *v) \ +static inline int arch_atomic_add_return##name(int i, atomic_t *v) \ { \ register int w0 asm ("w0") = i; \ register atomic_t *x1 asm ("x1") = v; \ @@ -105,7 +105,7 @@ ATOMIC_OP_ADD_RETURN( , al, "memory") #undef ATOMIC_OP_ADD_RETURN -static inline void atomic_and(int i, atomic_t *v) +static inline void arch_atomic_and(int i, atomic_t *v) { register int w0 asm ("w0") = i; register atomic_t *x1 asm ("x1") = v; @@ -123,7 +123,7 @@ static inline void atomic_and(int i, atomic_t *v) } #define ATOMIC_FETCH_OP_AND(name, mb, cl...) \ -static inline int atomic_fetch_and##name(int i, atomic_t *v) \ +static inline int arch_atomic_fetch_and##name(int i, atomic_t *v) \ { \ register int w0 asm ("w0") = i; \ register atomic_t *x1 asm ("x1") = v; \ @@ -149,7 +149,7 @@ ATOMIC_FETCH_OP_AND( , al, "memory") #undef ATOMIC_FETCH_OP_AND -static inline void atomic_sub(int i, atomic_t *v) +static inline void arch_atomic_sub(int i, atomic_t *v) { register int w0 asm ("w0") = i; register atomic_t *x1 asm ("x1") = v; @@ -167,7 +167,7 @@ static inline void atomic_sub(int i, atomic_t *v) } #define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \ -static inline int atomic_sub_return##name(int i, atomic_t *v) \ +static inline int arch_atomic_sub_return##name(int i, atomic_t *v) \ { \ register int w0 asm ("w0") = i; \ register atomic_t *x1 asm ("x1") = v; \ @@ -195,7 +195,7 @@ ATOMIC_OP_SUB_RETURN( , al, "memory") #undef ATOMIC_OP_SUB_RETURN #define ATOMIC_FETCH_OP_SUB(name, mb, cl...) \ -static inline int atomic_fetch_sub##name(int i, atomic_t *v) \ +static inline int arch_atomic_fetch_sub##name(int i, atomic_t *v) \ { \ register int w0 asm ("w0") = i; \ register atomic_t *x1 asm ("x1") = v; \ @@ -222,9 +222,9 @@ ATOMIC_FETCH_OP_SUB( , al, "memory") #undef ATOMIC_FETCH_OP_SUB #undef __LL_SC_ATOMIC -#define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op) +#define __LL_SC_ATOMIC64(op) __LL_SC_CALL(arch_atomic64_##op) #define ATOMIC64_OP(op, asm_op) \ -static inline void atomic64_##op(long i, atomic64_t *v) \ +static inline void arch_atomic64_##op(long i, atomic64_t *v) \ { \ register long x0 asm ("x0") = i; \ register atomic64_t *x1 asm ("x1") = v; \ @@ -244,7 +244,7 @@ ATOMIC64_OP(add, stadd) #undef ATOMIC64_OP #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \ -static inline long atomic64_fetch_##op##name(long i, atomic64_t *v) \ +static inline long arch_atomic64_fetch_##op##name(long i, atomic64_t *v)\ { \ register long x0 asm ("x0") = i; \ register atomic64_t *x1 asm ("x1") = v; \ @@ -276,7 +276,7 @@ ATOMIC64_FETCH_OPS(add, ldadd) #undef ATOMIC64_FETCH_OPS #define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \ -static inline long atomic64_add_return##name(long i, atomic64_t *v) \ +static inline long arch_atomic64_add_return##name(long i, atomic64_t *v)\ { \ register long x0 asm ("x0") = i; \ register atomic64_t *x1 asm ("x1") = v; \ @@ -302,7 +302,7 @@ ATOMIC64_OP_ADD_RETURN( , al, "memory") #undef ATOMIC64_OP_ADD_RETURN -static inline void atomic64_and(long i, atomic64_t *v) +static inline void arch_atomic64_and(long i, atomic64_t *v) { register long x0 asm ("x0") = i; register atomic64_t *x1 asm ("x1") = v; @@ -320,7 +320,7 @@ static inline void atomic64_and(long i, atomic64_t *v) } #define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \ -static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \ +static inline long arch_atomic64_fetch_and##name(long i, atomic64_t *v) \ { \ register long x0 asm ("x0") = i; \ register atomic64_t *x1 asm ("x1") = v; \ @@ -346,7 +346,7 @@ ATOMIC64_FETCH_OP_AND( , al, "memory") #undef ATOMIC64_FETCH_OP_AND -static inline void atomic64_sub(long i, atomic64_t *v) +static inline void arch_atomic64_sub(long i, atomic64_t *v) { register long x0 asm ("x0") = i; register atomic64_t *x1 asm ("x1") = v; @@ -364,7 +364,7 @@ static inline void atomic64_sub(long i, atomic64_t *v) } #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \ -static inline long atomic64_sub_return##name(long i, atomic64_t *v) \ +static inline long arch_atomic64_sub_return##name(long i, atomic64_t *v)\ { \ register long x0 asm ("x0") = i; \ register atomic64_t *x1 asm ("x1") = v; \ @@ -392,7 +392,7 @@ ATOMIC64_OP_SUB_RETURN( , al, "memory") #undef ATOMIC64_OP_SUB_RETURN #define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \ -static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \ +static inline long arch_atomic64_fetch_sub##name(long i, atomic64_t *v) \ { \ register long x0 asm ("x0") = i; \ register atomic64_t *x1 asm ("x1") = v; \ @@ -418,7 +418,7 @@ ATOMIC64_FETCH_OP_SUB( , al, "memory") #undef ATOMIC64_FETCH_OP_SUB -static inline long atomic64_dec_if_positive(atomic64_t *v) +static inline long arch_atomic64_dec_if_positive(atomic64_t *v) { register long x0 asm ("x0") = (long)v; diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index 3f9376f1c409..e6ea0f42e097 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h @@ -110,10 +110,10 @@ __XCHG_GEN(_mb) }) /* xchg */ -#define xchg_relaxed(...) __xchg_wrapper( , __VA_ARGS__) -#define xchg_acquire(...) __xchg_wrapper(_acq, __VA_ARGS__) -#define xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__) -#define xchg(...) __xchg_wrapper( _mb, __VA_ARGS__) +#define arch_xchg_relaxed(...) __xchg_wrapper( , __VA_ARGS__) +#define arch_xchg_acquire(...) __xchg_wrapper(_acq, __VA_ARGS__) +#define arch_xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__) +#define arch_xchg(...) __xchg_wrapper( _mb, __VA_ARGS__) #define __CMPXCHG_GEN(sfx) \ static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \ @@ -154,18 +154,18 @@ __CMPXCHG_GEN(_mb) }) /* cmpxchg */ -#define cmpxchg_relaxed(...) __cmpxchg_wrapper( , __VA_ARGS__) -#define cmpxchg_acquire(...) __cmpxchg_wrapper(_acq, __VA_ARGS__) -#define cmpxchg_release(...) __cmpxchg_wrapper(_rel, __VA_ARGS__) -#define cmpxchg(...) __cmpxchg_wrapper( _mb, __VA_ARGS__) -#define cmpxchg_local cmpxchg_relaxed +#define arch_cmpxchg_relaxed(...) __cmpxchg_wrapper( , __VA_ARGS__) +#define arch_cmpxchg_acquire(...) __cmpxchg_wrapper(_acq, __VA_ARGS__) +#define arch_cmpxchg_release(...) __cmpxchg_wrapper(_rel, __VA_ARGS__) +#define arch_cmpxchg(...) __cmpxchg_wrapper( _mb, __VA_ARGS__) +#define arch_cmpxchg_local arch_cmpxchg_relaxed /* cmpxchg64 */ -#define cmpxchg64_relaxed cmpxchg_relaxed -#define cmpxchg64_acquire cmpxchg_acquire -#define cmpxchg64_release cmpxchg_release -#define cmpxchg64 cmpxchg -#define cmpxchg64_local cmpxchg_local +#define arch_cmpxchg64_relaxed arch_cmpxchg_relaxed +#define arch_cmpxchg64_acquire arch_cmpxchg_acquire +#define arch_cmpxchg64_release arch_cmpxchg_release +#define arch_cmpxchg64 arch_cmpxchg +#define arch_cmpxchg64_local arch_cmpxchg_local /* cmpxchg_double */ #define system_has_cmpxchg_double() 1 @@ -177,24 +177,24 @@ __CMPXCHG_GEN(_mb) VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1); \ }) -#define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \ -({\ - int __ret;\ - __cmpxchg_double_check(ptr1, ptr2); \ - __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \ - (unsigned long)(n1), (unsigned long)(n2), \ - ptr1); \ - __ret; \ +#define arch_cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \ +({ \ + int __ret; \ + __cmpxchg_double_check(ptr1, ptr2); \ + __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \ + (unsigned long)(n1), (unsigned long)(n2), \ + ptr1); \ + __ret; \ }) -#define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \ -({\ - int __ret;\ - __cmpxchg_double_check(ptr1, ptr2); \ - __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \ - (unsigned long)(n1), (unsigned long)(n2), \ - ptr1); \ - __ret; \ +#define arch_cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \ +({ \ + int __ret; \ + __cmpxchg_double_check(ptr1, ptr2); \ + __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \ + (unsigned long)(n1), (unsigned long)(n2), \ + ptr1); \ + __ret; \ }) #define __CMPWAIT_CASE(w, sfx, sz) \ diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 82e9099834ae..f6a76e43f39e 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -60,7 +60,8 @@ #define ARM64_HAS_ADDRESS_AUTH_IMP_DEF 39 #define ARM64_HAS_GENERIC_AUTH_ARCH 40 #define ARM64_HAS_GENERIC_AUTH_IMP_DEF 41 +#define ARM64_HAS_IRQ_PRIO_MASKING 42 -#define ARM64_NCAPS 42 +#define ARM64_NCAPS 43 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index dfcfba725d72..e505e1fbd2b9 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -391,6 +391,10 @@ extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; extern struct static_key_false arm64_const_caps_ready; +/* ARM64 CAPS + alternative_cb */ +#define ARM64_NPATCHABLE (ARM64_NCAPS + 1) +extern DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE); + #define for_each_available_cap(cap) \ for_each_set_bit(cap, cpu_hwcaps, ARM64_NCAPS) @@ -612,6 +616,12 @@ static inline bool system_supports_generic_auth(void) cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF)); } +static inline bool system_uses_irq_prio_masking(void) +{ + return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && + cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING); +} + #define ARM64_SSBD_UNKNOWN -1 #define ARM64_SSBD_FORCE_DISABLE 0 #define ARM64_SSBD_KERNEL 1 diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 951ed1a4e5c9..2afb1338b48a 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -76,6 +76,7 @@ #define ARM_CPU_IMP_BRCM 0x42 #define ARM_CPU_IMP_QCOM 0x51 #define ARM_CPU_IMP_NVIDIA 0x4E +#define ARM_CPU_IMP_FUJITSU 0x46 #define ARM_CPU_PART_AEM_V8 0xD0F #define ARM_CPU_PART_FOUNDATION 0xD00 @@ -104,6 +105,8 @@ #define NVIDIA_CPU_PART_DENVER 0x003 #define NVIDIA_CPU_PART_CARMEL 0x004 +#define FUJITSU_CPU_PART_A64FX 0x001 + #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) @@ -122,6 +125,12 @@ #define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO) #define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER) #define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL) +#define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX) + +/* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */ +#define MIDR_FUJITSU_ERRATUM_010001 MIDR_FUJITSU_A64FX +#define MIDR_FUJITSU_ERRATUM_010001_MASK (~MIDR_VARIANT(1)) +#define TCR_CLEAR_FUJITSU_ERRATUM_010001 (TCR_NFD1 | TCR_NFD0) #ifndef __ASSEMBLY__ diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h index 8d91f2233135..db452aa9e651 100644 --- a/arch/arm64/include/asm/daifflags.h +++ b/arch/arm64/include/asm/daifflags.h @@ -18,8 +18,11 @@ #include +#include + #define DAIF_PROCCTX 0 #define DAIF_PROCCTX_NOIRQ PSR_I_BIT +#define DAIF_ERRCTX (PSR_I_BIT | PSR_A_BIT) /* mask/save/unmask/restore all exceptions, including interrupts. */ static inline void local_daif_mask(void) @@ -36,31 +39,61 @@ static inline unsigned long local_daif_save(void) { unsigned long flags; - flags = arch_local_save_flags(); + flags = read_sysreg(daif); + + if (system_uses_irq_prio_masking()) { + /* If IRQs are masked with PMR, reflect it in the flags */ + if (read_sysreg_s(SYS_ICC_PMR_EL1) <= GIC_PRIO_IRQOFF) + flags |= PSR_I_BIT; + } local_daif_mask(); return flags; } -static inline void local_daif_unmask(void) -{ - trace_hardirqs_on(); - asm volatile( - "msr daifclr, #0xf // local_daif_unmask" - : - : - : "memory"); -} - static inline void local_daif_restore(unsigned long flags) { - if (!arch_irqs_disabled_flags(flags)) + bool irq_disabled = flags & PSR_I_BIT; + + if (!irq_disabled) { trace_hardirqs_on(); - arch_local_irq_restore(flags); + if (system_uses_irq_prio_masking()) + arch_local_irq_enable(); + } else if (!(flags & PSR_A_BIT)) { + /* + * If interrupts are disabled but we can take + * asynchronous errors, we can take NMIs + */ + if (system_uses_irq_prio_masking()) { + flags &= ~PSR_I_BIT; + /* + * There has been concern that the write to daif + * might be reordered before this write to PMR. + * From the ARM ARM DDI 0487D.a, section D1.7.1 + * "Accessing PSTATE fields": + * Writes to the PSTATE fields have side-effects on + * various aspects of the PE operation. All of these + * side-effects are guaranteed: + * - Not to be visible to earlier instructions in + * the execution stream. + * - To be visible to later instructions in the + * execution stream + * + * Also, writes to PMR are self-synchronizing, so no + * interrupts with a lower priority than PMR is signaled + * to the PE after the write. + * + * So we don't need additional synchronization here. + */ + arch_local_irq_disable(); + } + } + + write_sysreg(flags, daif); - if (arch_irqs_disabled_flags(flags)) + if (irq_disabled) trace_hardirqs_off(); } diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index 95dbf3ef735a..de98191e4c7d 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -29,15 +29,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) return NULL; } -void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, - const struct iommu_ops *iommu, bool coherent); -#define arch_setup_dma_ops arch_setup_dma_ops - -#ifdef CONFIG_IOMMU_DMA -void arch_teardown_dma_ops(struct device *dev); -#define arch_teardown_dma_ops arch_teardown_dma_ops -#endif - /* * Do not use this function in a driver, it is only provided for * arch/arm/mm/xen.c, which is used by arm64 as well. diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index 7ed320895d1f..c9e9a6978e73 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -44,6 +44,17 @@ efi_status_t __efi_rt_asm_wrapper(void *, const char *, ...); #define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT) +/* + * Even when Linux uses IRQ priorities for IRQ disabling, EFI does not. + * And EFI shouldn't really play around with priority masking as it is not aware + * which priorities the OS has assigned to its interrupts. + */ +#define arch_efi_save_flags(state_flags) \ + ((void)((state_flags) = read_sysreg(daif))) + +#define arch_efi_restore_flags(state_flags) write_sysreg(state_flags, daif) + + /* arch specific definitions used by the stub code */ /* diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h index ec1e6d6fa14c..f987b8a8f325 100644 --- a/arch/arm64/include/asm/fixmap.h +++ b/arch/arm64/include/asm/fixmap.h @@ -55,7 +55,11 @@ enum fixed_addresses { #ifdef CONFIG_ACPI_APEI_GHES /* Used for GHES mapping from assorted contexts */ FIX_APEI_GHES_IRQ, - FIX_APEI_GHES_NMI, + FIX_APEI_GHES_SEA, +#ifdef CONFIG_ARM_SDE_INTERFACE + FIX_APEI_GHES_SDEI_NORMAL, + FIX_APEI_GHES_SDEI_CRITICAL, +#endif #endif /* CONFIG_ACPI_APEI_GHES */ #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h index 1473fc2f7ab7..89691c86640a 100644 --- a/arch/arm64/include/asm/hardirq.h +++ b/arch/arm64/include/asm/hardirq.h @@ -17,8 +17,12 @@ #define __ASM_HARDIRQ_H #include +#include #include +#include #include +#include +#include #define NR_IPI 7 @@ -37,6 +41,33 @@ u64 smp_irq_stat_cpu(unsigned int cpu); #define __ARCH_IRQ_EXIT_IRQS_DISABLED 1 +struct nmi_ctx { + u64 hcr; +}; + +DECLARE_PER_CPU(struct nmi_ctx, nmi_contexts); + +#define arch_nmi_enter() \ + do { \ + if (is_kernel_in_hyp_mode()) { \ + struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts); \ + nmi_ctx->hcr = read_sysreg(hcr_el2); \ + if (!(nmi_ctx->hcr & HCR_TGE)) { \ + write_sysreg(nmi_ctx->hcr | HCR_TGE, hcr_el2); \ + isb(); \ + } \ + } \ + } while (0) + +#define arch_nmi_exit() \ + do { \ + if (is_kernel_in_hyp_mode()) { \ + struct nmi_ctx *nmi_ctx = this_cpu_ptr(&nmi_contexts); \ + if (!(nmi_ctx->hcr & HCR_TGE)) \ + write_sysreg(nmi_ctx->hcr, hcr_el2); \ + } \ + } while (0) + static inline void ack_bad_irq(unsigned int irq) { extern unsigned long irq_err_count; diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h index fb6609875455..c6a07a3b433e 100644 --- a/arch/arm64/include/asm/hugetlb.h +++ b/arch/arm64/include/asm/hugetlb.h @@ -20,6 +20,11 @@ #include +#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION +#define arch_hugetlb_migration_supported arch_hugetlb_migration_supported +extern bool arch_hugetlb_migration_supported(struct hstate *h); +#endif + #define __HAVE_ARCH_HUGE_PTEP_GET static inline pte_t huge_ptep_get(pte_t *ptep) { diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index ee723835c1f4..8bb7210ac286 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -121,6 +121,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) : "memory"); \ }) +#define __io_par(v) __iormb(v) #define __iowmb() wmb() #define mmiowb() do { } while (0) diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h index 24692edf1a69..43d8366c1e87 100644 --- a/arch/arm64/include/asm/irqflags.h +++ b/arch/arm64/include/asm/irqflags.h @@ -18,7 +18,9 @@ #ifdef __KERNEL__ +#include #include +#include /* * Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and @@ -36,33 +38,27 @@ /* * CPU interrupt mask handling. */ -static inline unsigned long arch_local_irq_save(void) -{ - unsigned long flags; - asm volatile( - "mrs %0, daif // arch_local_irq_save\n" - "msr daifset, #2" - : "=r" (flags) - : - : "memory"); - return flags; -} - static inline void arch_local_irq_enable(void) { - asm volatile( - "msr daifclr, #2 // arch_local_irq_enable" - : + asm volatile(ALTERNATIVE( + "msr daifclr, #2 // arch_local_irq_enable\n" + "nop", + "msr_s " __stringify(SYS_ICC_PMR_EL1) ",%0\n" + "dsb sy", + ARM64_HAS_IRQ_PRIO_MASKING) : + : "r" ((unsigned long) GIC_PRIO_IRQON) : "memory"); } static inline void arch_local_irq_disable(void) { - asm volatile( - "msr daifset, #2 // arch_local_irq_disable" - : + asm volatile(ALTERNATIVE( + "msr daifset, #2 // arch_local_irq_disable", + "msr_s " __stringify(SYS_ICC_PMR_EL1) ", %0", + ARM64_HAS_IRQ_PRIO_MASKING) : + : "r" ((unsigned long) GIC_PRIO_IRQOFF) : "memory"); } @@ -71,12 +67,44 @@ static inline void arch_local_irq_disable(void) */ static inline unsigned long arch_local_save_flags(void) { + unsigned long daif_bits; unsigned long flags; - asm volatile( - "mrs %0, daif // arch_local_save_flags" - : "=r" (flags) - : + + daif_bits = read_sysreg(daif); + + /* + * The asm is logically equivalent to: + * + * if (system_uses_irq_prio_masking()) + * flags = (daif_bits & PSR_I_BIT) ? + * GIC_PRIO_IRQOFF : + * read_sysreg_s(SYS_ICC_PMR_EL1); + * else + * flags = daif_bits; + */ + asm volatile(ALTERNATIVE( + "mov %0, %1\n" + "nop\n" + "nop", + "mrs_s %0, " __stringify(SYS_ICC_PMR_EL1) "\n" + "ands %1, %1, " __stringify(PSR_I_BIT) "\n" + "csel %0, %0, %2, eq", + ARM64_HAS_IRQ_PRIO_MASKING) + : "=&r" (flags), "+r" (daif_bits) + : "r" ((unsigned long) GIC_PRIO_IRQOFF) : "memory"); + + return flags; +} + +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags; + + flags = arch_local_save_flags(); + + arch_local_irq_disable(); + return flags; } @@ -85,16 +113,32 @@ static inline unsigned long arch_local_save_flags(void) */ static inline void arch_local_irq_restore(unsigned long flags) { - asm volatile( - "msr daif, %0 // arch_local_irq_restore" - : - : "r" (flags) - : "memory"); + asm volatile(ALTERNATIVE( + "msr daif, %0\n" + "nop", + "msr_s " __stringify(SYS_ICC_PMR_EL1) ", %0\n" + "dsb sy", + ARM64_HAS_IRQ_PRIO_MASKING) + : "+r" (flags) + : + : "memory"); } static inline int arch_irqs_disabled_flags(unsigned long flags) { - return flags & PSR_I_BIT; + int res; + + asm volatile(ALTERNATIVE( + "and %w0, %w1, #" __stringify(PSR_I_BIT) "\n" + "nop", + "cmp %w1, #" __stringify(GIC_PRIO_IRQOFF) "\n" + "cset %w0, ls", + ARM64_HAS_IRQ_PRIO_MASKING) + : "=&r" (res) + : "r" ((int) flags) + : "memory"); + + return res; } #endif #endif diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 7732d0ba4e60..222af1d2c3e4 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -24,6 +24,7 @@ #include #include +#include #include #include #include @@ -48,6 +49,7 @@ #define KVM_REQ_SLEEP \ KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) +#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2) DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); @@ -208,6 +210,13 @@ struct kvm_cpu_context { typedef struct kvm_cpu_context kvm_cpu_context_t; +struct vcpu_reset_state { + unsigned long pc; + unsigned long r0; + bool be; + bool reset; +}; + struct kvm_vcpu_arch { struct kvm_cpu_context ctxt; @@ -297,6 +306,9 @@ struct kvm_vcpu_arch { /* Virtual SError ESR to restore when HCR_EL2.VSE is set */ u64 vsesr_el2; + /* Additional reset state */ + struct vcpu_reset_state reset_state; + /* True when deferrable sysregs are loaded on the physical CPU, * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */ bool sysregs_loaded_on_cpu; @@ -474,10 +486,25 @@ static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) static inline void kvm_arm_vhe_guest_enter(void) { local_daif_mask(); + + /* + * Having IRQs masked via PMR when entering the guest means the GIC + * will not signal the CPU of interrupts of lower priority, and the + * only way to get out will be via guest exceptions. + * Naturally, we want to avoid this. + */ + if (system_uses_irq_prio_masking()) { + gic_write_pmr(GIC_PRIO_IRQON); + dsb(sy); + } } static inline void kvm_arm_vhe_guest_exit(void) { + /* + * local_daif_restore() takes care to properly restore PSTATE.DAIF + * and the GIC PMR if the host is using IRQ priorities. + */ local_daif_restore(DAIF_PROCCTX_NOIRQ); /* diff --git a/arch/arm64/include/asm/kvm_ras.h b/arch/arm64/include/asm/kvm_ras.h new file mode 100644 index 000000000000..8ac6ee77437c --- /dev/null +++ b/arch/arm64/include/asm/kvm_ras.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018 - Arm Ltd */ + +#ifndef __ARM64_KVM_RAS_H__ +#define __ARM64_KVM_RAS_H__ + +#include +#include +#include + +#include + +/* + * Was this synchronous external abort a RAS notification? + * Returns '0' for errors handled by some RAS subsystem, or -ENOENT. + */ +static inline int kvm_handle_guest_sea(phys_addr_t addr, unsigned int esr) +{ + /* apei_claim_sea(NULL) expects to mask interrupts itself */ + lockdep_assert_irqs_enabled(); + + return apei_claim_sea(NULL); +} + +#endif /* __ARM64_KVM_RAS_H__ */ diff --git a/arch/arm64/include/asm/memblock.h b/arch/arm64/include/asm/memblock.h deleted file mode 100644 index 6afeed2467f1..000000000000 --- a/arch/arm64/include/asm/memblock.h +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright (C) 2012 ARM Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -#ifndef __ASM_MEMBLOCK_H -#define __ASM_MEMBLOCK_H - -extern void arm64_memblock_init(void); - -#endif diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index e1ec947e7c0c..290195168bb3 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -80,11 +80,7 @@ */ #ifdef CONFIG_KASAN #define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT)) -#ifdef CONFIG_KASAN_EXTRA -#define KASAN_THREAD_SHIFT 2 -#else #define KASAN_THREAD_SHIFT 1 -#endif /* CONFIG_KASAN_EXTRA */ #else #define KASAN_SHADOW_SIZE (0) #define KASAN_THREAD_SHIFT 0 @@ -316,8 +312,9 @@ static inline void *phys_to_virt(phys_addr_t x) #define page_to_virt(page) ({ \ unsigned long __addr = \ ((__page_to_voff(page)) | PAGE_OFFSET); \ - __addr = __tag_set(__addr, page_kasan_tag(page)); \ - ((void *)__addr); \ + unsigned long __addr_tag = \ + __tag_set(__addr, page_kasan_tag(page)); \ + ((void *)__addr_tag); \ }) #define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START)) @@ -332,6 +329,17 @@ static inline void *phys_to_virt(phys_addr_t x) #define virt_addr_valid(kaddr) \ (_virt_addr_is_linear(kaddr) && _virt_addr_valid(kaddr)) +/* + * Given that the GIC architecture permits ITS implementations that can only be + * configured with a LPI table address once, GICv3 systems with many CPUs may + * end up reserving a lot of different regions after a kexec for their LPI + * tables (one per CPU), as we are forced to reuse the same memory after kexec + * (and thus reserve it persistently with EFI beforehand) + */ +#if defined(CONFIG_EFI) && defined(CONFIG_ARM_GIC_V3_ITS) +# define INIT_MEMBLOCK_RESERVED_REGIONS (INIT_MEMBLOCK_REGIONS + NR_CPUS + 1) +#endif + #include #endif diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 3e8063f4f9d3..67ef25d037ea 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -129,6 +129,7 @@ static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void) static inline void arm64_apply_bp_hardening(void) { } #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */ +extern void arm64_memblock_init(void); extern void paging_init(void); extern void bootmem_init(void); extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); diff --git a/arch/arm64/include/asm/neon-intrinsics.h b/arch/arm64/include/asm/neon-intrinsics.h index 2ba6c6b9541f..71abfc7612b2 100644 --- a/arch/arm64/include/asm/neon-intrinsics.h +++ b/arch/arm64/include/asm/neon-intrinsics.h @@ -36,4 +36,8 @@ #include #endif +#ifdef CONFIG_CC_IS_CLANG +#pragma clang diagnostic ignored "-Wincompatible-pointer-types" +#endif + #endif /* __ASM_NEON_INTRINSICS_H */ diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index e9b0a7d75184..a69259cc1f16 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -302,6 +302,7 @@ #define TCR_TBI1 (UL(1) << 38) #define TCR_HA (UL(1) << 39) #define TCR_HD (UL(1) << 40) +#define TCR_NFD0 (UL(1) << 53) #define TCR_NFD1 (UL(1) << 54) /* diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index f1a7ab18faf3..5d9ce62bdebd 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -191,6 +191,9 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc) memset(regs, 0, sizeof(*regs)); forget_syscall(regs); regs->pc = pc; + + if (system_uses_irq_prio_masking()) + regs->pmr_save = GIC_PRIO_IRQON; } static inline void start_thread(struct pt_regs *regs, unsigned long pc, diff --git a/arch/arm64/include/asm/ptdump.h b/arch/arm64/include/asm/ptdump.h index 6afd8476c60c..9e948a93d26c 100644 --- a/arch/arm64/include/asm/ptdump.h +++ b/arch/arm64/include/asm/ptdump.h @@ -34,13 +34,10 @@ struct ptdump_info { void ptdump_walk_pgd(struct seq_file *s, struct ptdump_info *info); #ifdef CONFIG_ARM64_PTDUMP_DEBUGFS -int ptdump_debugfs_register(struct ptdump_info *info, const char *name); +void ptdump_debugfs_register(struct ptdump_info *info, const char *name); #else -static inline int ptdump_debugfs_register(struct ptdump_info *info, - const char *name) -{ - return 0; -} +static inline void ptdump_debugfs_register(struct ptdump_info *info, + const char *name) { } #endif void ptdump_check_wx(void); #endif /* CONFIG_ARM64_PTDUMP_CORE */ diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index fce22c4b2f73..ec60174c8c18 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -19,12 +19,26 @@ #ifndef __ASM_PTRACE_H #define __ASM_PTRACE_H +#include + #include /* Current Exception Level values, as contained in CurrentEL */ #define CurrentEL_EL1 (1 << 2) #define CurrentEL_EL2 (2 << 2) +/* + * PMR values used to mask/unmask interrupts. + * + * GIC priority masking works as follows: if an IRQ's priority is a higher value + * than the value held in PMR, that IRQ is masked. Lowering the value of PMR + * means masking more IRQs (or at least that the same IRQs remain masked). + * + * To mask interrupts, we clear the most significant bit of PMR. + */ +#define GIC_PRIO_IRQON 0xf0 +#define GIC_PRIO_IRQOFF (GIC_PRIO_IRQON & ~0x80) + /* Additional SPSR bits not exposed in the UABI */ #define PSR_IL_BIT (1 << 20) @@ -167,7 +181,8 @@ struct pt_regs { #endif u64 orig_addr_limit; - u64 unused; // maintain 16 byte alignment + /* Only valid when ARM64_HAS_IRQ_PRIO_MASKING is enabled. */ + u64 pmr_save; u64 stackframe[2]; }; @@ -202,8 +217,13 @@ static inline void forget_syscall(struct pt_regs *regs) #define processor_mode(regs) \ ((regs)->pstate & PSR_MODE_MASK) -#define interrupts_enabled(regs) \ - (!((regs)->pstate & PSR_I_BIT)) +#define irqs_priority_unmasked(regs) \ + (system_uses_irq_prio_masking() ? \ + (regs)->pmr_save == GIC_PRIO_IRQON : \ + true) + +#define interrupts_enabled(regs) \ + (!((regs)->pstate & PSR_I_BIT) && irqs_priority_unmasked(regs)) #define fast_interrupts_enabled(regs) \ (!((regs)->pstate & PSR_F_BIT)) diff --git a/arch/arm64/include/asm/sync_bitops.h b/arch/arm64/include/asm/sync_bitops.h index eee31a9f72a5..e9c1a02c2154 100644 --- a/arch/arm64/include/asm/sync_bitops.h +++ b/arch/arm64/include/asm/sync_bitops.h @@ -15,13 +15,13 @@ * ops which are SMP safe even on a UP kernel. */ -#define sync_set_bit(nr, p) set_bit(nr, p) -#define sync_clear_bit(nr, p) clear_bit(nr, p) -#define sync_change_bit(nr, p) change_bit(nr, p) -#define sync_test_and_set_bit(nr, p) test_and_set_bit(nr, p) -#define sync_test_and_clear_bit(nr, p) test_and_clear_bit(nr, p) -#define sync_test_and_change_bit(nr, p) test_and_change_bit(nr, p) -#define sync_test_bit(nr, addr) test_bit(nr, addr) -#define sync_cmpxchg cmpxchg +#define sync_set_bit(nr, p) set_bit(nr, p) +#define sync_clear_bit(nr, p) clear_bit(nr, p) +#define sync_change_bit(nr, p) change_bit(nr, p) +#define sync_test_and_set_bit(nr, p) test_and_set_bit(nr, p) +#define sync_test_and_clear_bit(nr, p) test_and_clear_bit(nr, p) +#define sync_test_and_change_bit(nr, p) test_and_change_bit(nr, p) +#define sync_test_bit(nr, addr) test_bit(nr, addr) +#define arch_sync_cmpxchg arch_cmpxchg #endif diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h index 0e2a0ecaf484..32693f34f431 100644 --- a/arch/arm64/include/asm/system_misc.h +++ b/arch/arm64/include/asm/system_misc.h @@ -46,8 +46,6 @@ extern void __show_regs(struct pt_regs *); extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); -int handle_guest_sea(phys_addr_t addr, unsigned int esr); - #endif /* __ASSEMBLY__ */ #endif /* __ASM_SYSTEM_MISC_H */ diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index bbca68b54732..eb3ef73e07cf 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -79,7 +79,6 @@ void arch_release_task_struct(struct task_struct *tsk); * TIF_SIGPENDING - signal pending * TIF_NEED_RESCHED - rescheduling necessary * TIF_NOTIFY_RESUME - callback before returning to user - * TIF_USEDFPU - FPU was used by this task this quantum (SMP) */ #define TIF_SIGPENDING 0 #define TIF_NEED_RESCHED 1 diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 547d7a0c9d05..e5d5f31c6d36 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -34,7 +34,6 @@ #include #include -#define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) static inline void set_fs(mm_segment_t fs) @@ -268,7 +267,7 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr) : "+r" (err), "=&r" (x) \ : "r" (addr), "i" (-EFAULT)) -#define __get_user_err(x, ptr, err) \ +#define __raw_get_user(x, ptr, err) \ do { \ unsigned long __gu_val; \ __chk_user_ptr(ptr); \ @@ -297,28 +296,22 @@ do { \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ } while (0) -#define __get_user_check(x, ptr, err) \ -({ \ +#define __get_user_error(x, ptr, err) \ +do { \ __typeof__(*(ptr)) __user *__p = (ptr); \ might_fault(); \ if (access_ok(__p, sizeof(*__p))) { \ __p = uaccess_mask_ptr(__p); \ - __get_user_err((x), __p, (err)); \ + __raw_get_user((x), __p, (err)); \ } else { \ (x) = 0; (err) = -EFAULT; \ } \ -}) - -#define __get_user_error(x, ptr, err) \ -({ \ - __get_user_check((x), (ptr), (err)); \ - (void)0; \ -}) +} while (0) #define __get_user(x, ptr) \ ({ \ int __gu_err = 0; \ - __get_user_check((x), (ptr), __gu_err); \ + __get_user_error((x), (ptr), __gu_err); \ __gu_err; \ }) @@ -338,7 +331,7 @@ do { \ : "+r" (err) \ : "r" (x), "r" (addr), "i" (-EFAULT)) -#define __put_user_err(x, ptr, err) \ +#define __raw_put_user(x, ptr, err) \ do { \ __typeof__(*(ptr)) __pu_val = (x); \ __chk_user_ptr(ptr); \ @@ -366,28 +359,22 @@ do { \ uaccess_disable_not_uao(); \ } while (0) -#define __put_user_check(x, ptr, err) \ -({ \ +#define __put_user_error(x, ptr, err) \ +do { \ __typeof__(*(ptr)) __user *__p = (ptr); \ might_fault(); \ if (access_ok(__p, sizeof(*__p))) { \ __p = uaccess_mask_ptr(__p); \ - __put_user_err((x), __p, (err)); \ + __raw_put_user((x), __p, (err)); \ } else { \ (err) = -EFAULT; \ } \ -}) - -#define __put_user_error(x, ptr, err) \ -({ \ - __put_user_check((x), (ptr), (err)); \ - (void)0; \ -}) +} while (0) #define __put_user(x, ptr) \ ({ \ int __pu_err = 0; \ - __put_user_check((x), (ptr), __pu_err); \ + __put_user_error((x), (ptr), __pu_err); \ __pu_err; \ }) diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index a7b1fc58ffdf..d1dd93436e1e 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -44,7 +44,7 @@ #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5) #define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800) -#define __NR_compat_syscalls 400 +#define __NR_compat_syscalls 424 #endif #define __ARCH_WANT_SYS_CLONE diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 04ee190b90fe..5590f2623690 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -270,7 +270,7 @@ __SYSCALL(__NR_uname, sys_newuname) /* 123 was sys_modify_ldt */ __SYSCALL(123, sys_ni_syscall) #define __NR_adjtimex 124 -__SYSCALL(__NR_adjtimex, compat_sys_adjtimex) +__SYSCALL(__NR_adjtimex, sys_adjtimex_time32) #define __NR_mprotect 125 __SYSCALL(__NR_mprotect, sys_mprotect) #define __NR_sigprocmask 126 @@ -344,9 +344,9 @@ __SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max) #define __NR_sched_get_priority_min 160 __SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min) #define __NR_sched_rr_get_interval 161 -__SYSCALL(__NR_sched_rr_get_interval, compat_sys_sched_rr_get_interval) +__SYSCALL(__NR_sched_rr_get_interval, sys_sched_rr_get_interval_time32) #define __NR_nanosleep 162 -__SYSCALL(__NR_nanosleep, compat_sys_nanosleep) +__SYSCALL(__NR_nanosleep, sys_nanosleep_time32) #define __NR_mremap 163 __SYSCALL(__NR_mremap, sys_mremap) #define __NR_setresuid 164 @@ -376,7 +376,7 @@ __SYSCALL(__NR_rt_sigprocmask, compat_sys_rt_sigprocmask) #define __NR_rt_sigpending 176 __SYSCALL(__NR_rt_sigpending, compat_sys_rt_sigpending) #define __NR_rt_sigtimedwait 177 -__SYSCALL(__NR_rt_sigtimedwait, compat_sys_rt_sigtimedwait) +__SYSCALL(__NR_rt_sigtimedwait, compat_sys_rt_sigtimedwait_time32) #define __NR_rt_sigqueueinfo 178 __SYSCALL(__NR_rt_sigqueueinfo, compat_sys_rt_sigqueueinfo) #define __NR_rt_sigsuspend 179 @@ -502,7 +502,7 @@ __SYSCALL(__NR_tkill, sys_tkill) #define __NR_sendfile64 239 __SYSCALL(__NR_sendfile64, sys_sendfile64) #define __NR_futex 240 -__SYSCALL(__NR_futex, compat_sys_futex) +__SYSCALL(__NR_futex, sys_futex_time32) #define __NR_sched_setaffinity 241 __SYSCALL(__NR_sched_setaffinity, compat_sys_sched_setaffinity) #define __NR_sched_getaffinity 242 @@ -512,7 +512,7 @@ __SYSCALL(__NR_io_setup, compat_sys_io_setup) #define __NR_io_destroy 244 __SYSCALL(__NR_io_destroy, sys_io_destroy) #define __NR_io_getevents 245 -__SYSCALL(__NR_io_getevents, compat_sys_io_getevents) +__SYSCALL(__NR_io_getevents, sys_io_getevents_time32) #define __NR_io_submit 246 __SYSCALL(__NR_io_submit, compat_sys_io_submit) #define __NR_io_cancel 247 @@ -538,21 +538,21 @@ __SYSCALL(__NR_set_tid_address, sys_set_tid_address) #define __NR_timer_create 257 __SYSCALL(__NR_timer_create, compat_sys_timer_create) #define __NR_timer_settime 258 -__SYSCALL(__NR_timer_settime, compat_sys_timer_settime) +__SYSCALL(__NR_timer_settime, sys_timer_settime32) #define __NR_timer_gettime 259 -__SYSCALL(__NR_timer_gettime, compat_sys_timer_gettime) +__SYSCALL(__NR_timer_gettime, sys_timer_gettime32) #define __NR_timer_getoverrun 260 __SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun) #define __NR_timer_delete 261 __SYSCALL(__NR_timer_delete, sys_timer_delete) #define __NR_clock_settime 262 -__SYSCALL(__NR_clock_settime, compat_sys_clock_settime) +__SYSCALL(__NR_clock_settime, sys_clock_settime32) #define __NR_clock_gettime 263 -__SYSCALL(__NR_clock_gettime, compat_sys_clock_gettime) +__SYSCALL(__NR_clock_gettime, sys_clock_gettime32) #define __NR_clock_getres 264 -__SYSCALL(__NR_clock_getres, compat_sys_clock_getres) +__SYSCALL(__NR_clock_getres, sys_clock_getres_time32) #define __NR_clock_nanosleep 265 -__SYSCALL(__NR_clock_nanosleep, compat_sys_clock_nanosleep) +__SYSCALL(__NR_clock_nanosleep, sys_clock_nanosleep_time32) #define __NR_statfs64 266 __SYSCALL(__NR_statfs64, compat_sys_aarch32_statfs64) #define __NR_fstatfs64 267 @@ -560,7 +560,7 @@ __SYSCALL(__NR_fstatfs64, compat_sys_aarch32_fstatfs64) #define __NR_tgkill 268 __SYSCALL(__NR_tgkill, sys_tgkill) #define __NR_utimes 269 -__SYSCALL(__NR_utimes, compat_sys_utimes) +__SYSCALL(__NR_utimes, sys_utimes_time32) #define __NR_arm_fadvise64_64 270 __SYSCALL(__NR_arm_fadvise64_64, compat_sys_aarch32_fadvise64_64) #define __NR_pciconfig_iobase 271 @@ -574,9 +574,9 @@ __SYSCALL(__NR_mq_open, compat_sys_mq_open) #define __NR_mq_unlink 275 __SYSCALL(__NR_mq_unlink, sys_mq_unlink) #define __NR_mq_timedsend 276 -__SYSCALL(__NR_mq_timedsend, compat_sys_mq_timedsend) +__SYSCALL(__NR_mq_timedsend, sys_mq_timedsend_time32) #define __NR_mq_timedreceive 277 -__SYSCALL(__NR_mq_timedreceive, compat_sys_mq_timedreceive) +__SYSCALL(__NR_mq_timedreceive, sys_mq_timedreceive_time32) #define __NR_mq_notify 278 __SYSCALL(__NR_mq_notify, compat_sys_mq_notify) #define __NR_mq_getsetattr 279 @@ -622,7 +622,7 @@ __SYSCALL(__NR_semop, sys_semop) #define __NR_semget 299 __SYSCALL(__NR_semget, sys_semget) #define __NR_semctl 300 -__SYSCALL(__NR_semctl, compat_sys_semctl) +__SYSCALL(__NR_semctl, compat_sys_old_semctl) #define __NR_msgsnd 301 __SYSCALL(__NR_msgsnd, compat_sys_msgsnd) #define __NR_msgrcv 302 @@ -630,7 +630,7 @@ __SYSCALL(__NR_msgrcv, compat_sys_msgrcv) #define __NR_msgget 303 __SYSCALL(__NR_msgget, sys_msgget) #define __NR_msgctl 304 -__SYSCALL(__NR_msgctl, compat_sys_msgctl) +__SYSCALL(__NR_msgctl, compat_sys_old_msgctl) #define __NR_shmat 305 __SYSCALL(__NR_shmat, compat_sys_shmat) #define __NR_shmdt 306 @@ -638,7 +638,7 @@ __SYSCALL(__NR_shmdt, sys_shmdt) #define __NR_shmget 307 __SYSCALL(__NR_shmget, sys_shmget) #define __NR_shmctl 308 -__SYSCALL(__NR_shmctl, compat_sys_shmctl) +__SYSCALL(__NR_shmctl, compat_sys_old_shmctl) #define __NR_add_key 309 __SYSCALL(__NR_add_key, sys_add_key) #define __NR_request_key 310 @@ -646,7 +646,7 @@ __SYSCALL(__NR_request_key, sys_request_key) #define __NR_keyctl 311 __SYSCALL(__NR_keyctl, compat_sys_keyctl) #define __NR_semtimedop 312 -__SYSCALL(__NR_semtimedop, compat_sys_semtimedop) +__SYSCALL(__NR_semtimedop, sys_semtimedop_time32) #define __NR_vserver 313 __SYSCALL(__NR_vserver, sys_ni_syscall) #define __NR_ioprio_set 314 @@ -674,7 +674,7 @@ __SYSCALL(__NR_mknodat, sys_mknodat) #define __NR_fchownat 325 __SYSCALL(__NR_fchownat, sys_fchownat) #define __NR_futimesat 326 -__SYSCALL(__NR_futimesat, compat_sys_futimesat) +__SYSCALL(__NR_futimesat, sys_futimesat_time32) #define __NR_fstatat64 327 __SYSCALL(__NR_fstatat64, sys_fstatat64) #define __NR_unlinkat 328 @@ -692,9 +692,9 @@ __SYSCALL(__NR_fchmodat, sys_fchmodat) #define __NR_faccessat 334 __SYSCALL(__NR_faccessat, sys_faccessat) #define __NR_pselect6 335 -__SYSCALL(__NR_pselect6, compat_sys_pselect6) +__SYSCALL(__NR_pselect6, compat_sys_pselect6_time32) #define __NR_ppoll 336 -__SYSCALL(__NR_ppoll, compat_sys_ppoll) +__SYSCALL(__NR_ppoll, compat_sys_ppoll_time32) #define __NR_unshare 337 __SYSCALL(__NR_unshare, sys_unshare) #define __NR_set_robust_list 338 @@ -718,7 +718,7 @@ __SYSCALL(__NR_epoll_pwait, compat_sys_epoll_pwait) #define __NR_kexec_load 347 __SYSCALL(__NR_kexec_load, compat_sys_kexec_load) #define __NR_utimensat 348 -__SYSCALL(__NR_utimensat, compat_sys_utimensat) +__SYSCALL(__NR_utimensat, sys_utimensat_time32) #define __NR_signalfd 349 __SYSCALL(__NR_signalfd, compat_sys_signalfd) #define __NR_timerfd_create 350 @@ -728,9 +728,9 @@ __SYSCALL(__NR_eventfd, sys_eventfd) #define __NR_fallocate 352 __SYSCALL(__NR_fallocate, compat_sys_aarch32_fallocate) #define __NR_timerfd_settime 353 -__SYSCALL(__NR_timerfd_settime, compat_sys_timerfd_settime) +__SYSCALL(__NR_timerfd_settime, sys_timerfd_settime32) #define __NR_timerfd_gettime 354 -__SYSCALL(__NR_timerfd_gettime, compat_sys_timerfd_gettime) +__SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime32) #define __NR_signalfd4 355 __SYSCALL(__NR_signalfd4, compat_sys_signalfd4) #define __NR_eventfd2 356 @@ -752,7 +752,7 @@ __SYSCALL(__NR_rt_tgsigqueueinfo, compat_sys_rt_tgsigqueueinfo) #define __NR_perf_event_open 364 __SYSCALL(__NR_perf_event_open, sys_perf_event_open) #define __NR_recvmmsg 365 -__SYSCALL(__NR_recvmmsg, compat_sys_recvmmsg) +__SYSCALL(__NR_recvmmsg, compat_sys_recvmmsg_time32) #define __NR_accept4 366 __SYSCALL(__NR_accept4, sys_accept4) #define __NR_fanotify_init 367 @@ -766,7 +766,7 @@ __SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at) #define __NR_open_by_handle_at 371 __SYSCALL(__NR_open_by_handle_at, compat_sys_open_by_handle_at) #define __NR_clock_adjtime 372 -__SYSCALL(__NR_clock_adjtime, compat_sys_clock_adjtime) +__SYSCALL(__NR_clock_adjtime, sys_clock_adjtime32) #define __NR_syncfs 373 __SYSCALL(__NR_syncfs, sys_syncfs) #define __NR_sendmmsg 374 @@ -821,6 +821,51 @@ __SYSCALL(__NR_statx, sys_statx) __SYSCALL(__NR_rseq, sys_rseq) #define __NR_io_pgetevents 399 __SYSCALL(__NR_io_pgetevents, compat_sys_io_pgetevents) +#define __NR_migrate_pages 400 +__SYSCALL(__NR_migrate_pages, compat_sys_migrate_pages) +#define __NR_kexec_file_load 401 +__SYSCALL(__NR_kexec_file_load, sys_kexec_file_load) +/* 402 is unused */ +#define __NR_clock_gettime64 403 +__SYSCALL(__NR_clock_gettime64, sys_clock_gettime) +#define __NR_clock_settime64 404 +__SYSCALL(__NR_clock_settime64, sys_clock_settime) +#define __NR_clock_adjtime64 405 +__SYSCALL(__NR_clock_adjtime64, sys_clock_adjtime) +#define __NR_clock_getres_time64 406 +__SYSCALL(__NR_clock_getres_time64, sys_clock_getres) +#define __NR_clock_nanosleep_time64 407 +__SYSCALL(__NR_clock_nanosleep_time64, sys_clock_nanosleep) +#define __NR_timer_gettime64 408 +__SYSCALL(__NR_timer_gettime64, sys_timer_gettime) +#define __NR_timer_settime64 409 +__SYSCALL(__NR_timer_settime64, sys_timer_settime) +#define __NR_timerfd_gettime64 410 +__SYSCALL(__NR_timerfd_gettime64, sys_timerfd_gettime) +#define __NR_timerfd_settime64 411 +__SYSCALL(__NR_timerfd_settime64, sys_timerfd_settime) +#define __NR_utimensat_time64 412 +__SYSCALL(__NR_utimensat_time64, sys_utimensat) +#define __NR_pselect6_time64 413 +__SYSCALL(__NR_pselect6_time64, compat_sys_pselect6_time64) +#define __NR_ppoll_time64 414 +__SYSCALL(__NR_ppoll_time64, compat_sys_ppoll_time64) +#define __NR_io_pgetevents_time64 416 +__SYSCALL(__NR_io_pgetevents_time64, sys_io_pgetevents) +#define __NR_recvmmsg_time64 417 +__SYSCALL(__NR_recvmmsg_time64, compat_sys_recvmmsg_time64) +#define __NR_mq_timedsend_time64 418 +__SYSCALL(__NR_mq_timedsend_time64, sys_mq_timedsend) +#define __NR_mq_timedreceive_time64 419 +__SYSCALL(__NR_mq_timedreceive_time64, sys_mq_timedreceive) +#define __NR_semtimedop_time64 420 +__SYSCALL(__NR_semtimedop_time64, sys_semtimedop) +#define __NR_rt_sigtimedwait_time64 421 +__SYSCALL(__NR_rt_sigtimedwait_time64, compat_sys_rt_sigtimedwait_time64) +#define __NR_futex_time64 422 +__SYSCALL(__NR_futex_time64, sys_futex) +#define __NR_sched_rr_get_interval_time64 423 +__SYSCALL(__NR_sched_rr_get_interval_time64, sys_sched_rr_get_interval) /* * Please add new compat syscalls above this comment and update diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h index 28d77c9ed531..d78623acb649 100644 --- a/arch/arm64/include/uapi/asm/ptrace.h +++ b/arch/arm64/include/uapi/asm/ptrace.h @@ -233,6 +233,19 @@ struct user_pac_mask { __u64 insn_mask; }; +/* pointer authentication keys (NT_ARM_PACA_KEYS, NT_ARM_PACG_KEYS) */ + +struct user_pac_address_keys { + __uint128_t apiakey; + __uint128_t apibkey; + __uint128_t apdakey; + __uint128_t apdbkey; +}; + +struct user_pac_generic_keys { + __uint128_t apgakey; +}; + #endif /* __ASSEMBLY__ */ #endif /* _UAPI__ASM_PTRACE_H */ diff --git a/arch/arm64/include/uapi/asm/unistd.h b/arch/arm64/include/uapi/asm/unistd.h index dae1584cf017..4703d218663a 100644 --- a/arch/arm64/include/uapi/asm/unistd.h +++ b/arch/arm64/include/uapi/asm/unistd.h @@ -17,5 +17,7 @@ #define __ARCH_WANT_RENAMEAT #define __ARCH_WANT_NEW_STAT +#define __ARCH_WANT_SET_GET_RLIMIT +#define __ARCH_WANT_TIME32_SYSCALLS #include diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c index 44e3c351e1ea..803f0494dd3e 100644 --- a/arch/arm64/kernel/acpi.c +++ b/arch/arm64/kernel/acpi.c @@ -27,8 +27,10 @@ #include #include +#include #include #include +#include #include #include @@ -256,3 +258,32 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr) return __pgprot(PROT_NORMAL_NC); return __pgprot(PROT_DEVICE_nGnRnE); } + +/* + * Claim Synchronous External Aborts as a firmware first notification. + * + * Used by KVM and the arch do_sea handler. + * @regs may be NULL when called from process context. + */ +int apei_claim_sea(struct pt_regs *regs) +{ + int err = -ENOENT; + unsigned long current_flags; + + if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES)) + return err; + + current_flags = arch_local_save_flags(); + + /* + * SEA can interrupt SError, mask it and describe this as an NMI so + * that APEI defers the handling. + */ + local_daif_restore(DAIF_ERRCTX); + nmi_enter(); + err = ghes_notify_sea(); + nmi_exit(); + local_daif_restore(current_flags); + + return err; +} diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index b5d603992d40..a9b467763153 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -32,13 +32,23 @@ #define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset) #define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset) -int alternatives_applied; +static int all_alternatives_applied; + +static DECLARE_BITMAP(applied_alternatives, ARM64_NCAPS); struct alt_region { struct alt_instr *begin; struct alt_instr *end; }; +bool alternative_is_applied(u16 cpufeature) +{ + if (WARN_ON(cpufeature >= ARM64_NCAPS)) + return false; + + return test_bit(cpufeature, applied_alternatives); +} + /* * Check if the target PC is within an alternative block. */ @@ -145,7 +155,8 @@ static void clean_dcache_range_nopatch(u64 start, u64 end) } while (cur += d_size, cur < end); } -static void __apply_alternatives(void *alt_region, bool is_module) +static void __apply_alternatives(void *alt_region, bool is_module, + unsigned long *feature_mask) { struct alt_instr *alt; struct alt_region *region = alt_region; @@ -155,6 +166,9 @@ static void __apply_alternatives(void *alt_region, bool is_module) for (alt = region->begin; alt < region->end; alt++) { int nr_inst; + if (!test_bit(alt->cpufeature, feature_mask)) + continue; + /* Use ARM64_CB_PATCH as an unconditional patch */ if (alt->cpufeature < ARM64_CB_PATCH && !cpus_have_cap(alt->cpufeature)) @@ -192,6 +206,12 @@ static void __apply_alternatives(void *alt_region, bool is_module) dsb(ish); __flush_icache_all(); isb(); + + /* Ignore ARM64_CB bit from feature mask */ + bitmap_or(applied_alternatives, applied_alternatives, + feature_mask, ARM64_NCAPS); + bitmap_and(applied_alternatives, applied_alternatives, + cpu_hwcaps, ARM64_NCAPS); } } @@ -208,14 +228,19 @@ static int __apply_alternatives_multi_stop(void *unused) /* We always have a CPU 0 at this point (__init) */ if (smp_processor_id()) { - while (!READ_ONCE(alternatives_applied)) + while (!READ_ONCE(all_alternatives_applied)) cpu_relax(); isb(); } else { - BUG_ON(alternatives_applied); - __apply_alternatives(®ion, false); + DECLARE_BITMAP(remaining_capabilities, ARM64_NPATCHABLE); + + bitmap_complement(remaining_capabilities, boot_capabilities, + ARM64_NPATCHABLE); + + BUG_ON(all_alternatives_applied); + __apply_alternatives(®ion, false, remaining_capabilities); /* Barriers provided by the cache flushing */ - WRITE_ONCE(alternatives_applied, 1); + WRITE_ONCE(all_alternatives_applied, 1); } return 0; @@ -227,6 +252,24 @@ void __init apply_alternatives_all(void) stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask); } +/* + * This is called very early in the boot process (directly after we run + * a feature detect on the boot CPU). No need to worry about other CPUs + * here. + */ +void __init apply_boot_alternatives(void) +{ + struct alt_region region = { + .begin = (struct alt_instr *)__alt_instructions, + .end = (struct alt_instr *)__alt_instructions_end, + }; + + /* If called on non-boot cpu things could go wrong */ + WARN_ON(smp_processor_id() != 0); + + __apply_alternatives(®ion, false, &boot_capabilities[0]); +} + #ifdef CONFIG_MODULES void apply_alternatives_module(void *start, size_t length) { @@ -234,7 +277,10 @@ void apply_alternatives_module(void *start, size_t length) .begin = start, .end = start + length, }; + DECLARE_BITMAP(all_capabilities, ARM64_NPATCHABLE); + + bitmap_fill(all_capabilities, ARM64_NPATCHABLE); - __apply_alternatives(®ion, true); + __apply_alternatives(®ion, true, &all_capabilities[0]); } #endif diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 65b8afc84466..7f40dcbdd51d 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -53,13 +53,9 @@ int main(void) DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context)); BLANK(); DEFINE(S_X0, offsetof(struct pt_regs, regs[0])); - DEFINE(S_X1, offsetof(struct pt_regs, regs[1])); DEFINE(S_X2, offsetof(struct pt_regs, regs[2])); - DEFINE(S_X3, offsetof(struct pt_regs, regs[3])); DEFINE(S_X4, offsetof(struct pt_regs, regs[4])); - DEFINE(S_X5, offsetof(struct pt_regs, regs[5])); DEFINE(S_X6, offsetof(struct pt_regs, regs[6])); - DEFINE(S_X7, offsetof(struct pt_regs, regs[7])); DEFINE(S_X8, offsetof(struct pt_regs, regs[8])); DEFINE(S_X10, offsetof(struct pt_regs, regs[10])); DEFINE(S_X12, offsetof(struct pt_regs, regs[12])); @@ -73,14 +69,11 @@ int main(void) DEFINE(S_X28, offsetof(struct pt_regs, regs[28])); DEFINE(S_LR, offsetof(struct pt_regs, regs[30])); DEFINE(S_SP, offsetof(struct pt_regs, sp)); -#ifdef CONFIG_COMPAT - DEFINE(S_COMPAT_SP, offsetof(struct pt_regs, compat_sp)); -#endif DEFINE(S_PSTATE, offsetof(struct pt_regs, pstate)); DEFINE(S_PC, offsetof(struct pt_regs, pc)); - DEFINE(S_ORIG_X0, offsetof(struct pt_regs, orig_x0)); DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno)); DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit)); + DEFINE(S_PMR_SAVE, offsetof(struct pt_regs, pmr_save)); DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe)); DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs)); BLANK(); @@ -93,7 +86,6 @@ int main(void) BLANK(); DEFINE(PAGE_SZ, PAGE_SIZE); BLANK(); - DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); BLANK(); @@ -110,25 +102,18 @@ int main(void) BLANK(); DEFINE(VDSO_CS_CYCLE_LAST, offsetof(struct vdso_data, cs_cycle_last)); DEFINE(VDSO_RAW_TIME_SEC, offsetof(struct vdso_data, raw_time_sec)); - DEFINE(VDSO_RAW_TIME_NSEC, offsetof(struct vdso_data, raw_time_nsec)); DEFINE(VDSO_XTIME_CLK_SEC, offsetof(struct vdso_data, xtime_clock_sec)); - DEFINE(VDSO_XTIME_CLK_NSEC, offsetof(struct vdso_data, xtime_clock_nsec)); DEFINE(VDSO_XTIME_CRS_SEC, offsetof(struct vdso_data, xtime_coarse_sec)); DEFINE(VDSO_XTIME_CRS_NSEC, offsetof(struct vdso_data, xtime_coarse_nsec)); DEFINE(VDSO_WTM_CLK_SEC, offsetof(struct vdso_data, wtm_clock_sec)); - DEFINE(VDSO_WTM_CLK_NSEC, offsetof(struct vdso_data, wtm_clock_nsec)); DEFINE(VDSO_TB_SEQ_COUNT, offsetof(struct vdso_data, tb_seq_count)); DEFINE(VDSO_CS_MONO_MULT, offsetof(struct vdso_data, cs_mono_mult)); - DEFINE(VDSO_CS_RAW_MULT, offsetof(struct vdso_data, cs_raw_mult)); DEFINE(VDSO_CS_SHIFT, offsetof(struct vdso_data, cs_shift)); DEFINE(VDSO_TZ_MINWEST, offsetof(struct vdso_data, tz_minuteswest)); - DEFINE(VDSO_TZ_DSTTIME, offsetof(struct vdso_data, tz_dsttime)); DEFINE(VDSO_USE_SYSCALL, offsetof(struct vdso_data, use_syscall)); BLANK(); DEFINE(TVAL_TV_SEC, offsetof(struct timeval, tv_sec)); - DEFINE(TVAL_TV_USEC, offsetof(struct timeval, tv_usec)); DEFINE(TSPEC_TV_SEC, offsetof(struct timespec, tv_sec)); - DEFINE(TSPEC_TV_NSEC, offsetof(struct timespec, tv_nsec)); BLANK(); DEFINE(TZ_MINWEST, offsetof(struct timezone, tz_minuteswest)); DEFINE(TZ_DSTTIME, offsetof(struct timezone, tz_dsttime)); @@ -142,13 +127,9 @@ int main(void) DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags)); DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs)); DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs)); - DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs)); - DEFINE(VCPU_FPEXC32_EL2, offsetof(struct kvm_vcpu, arch.ctxt.sys_regs[FPEXC32_EL2])); - DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context)); DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu)); #endif #ifdef CONFIG_CPU_PM - DEFINE(CPU_SUSPEND_SZ, sizeof(struct cpu_suspend_ctx)); DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp)); DEFINE(MPIDR_HASH_MASK, offsetof(struct mpidr_hash, mask)); DEFINE(MPIDR_HASH_SHIFTS, offsetof(struct mpidr_hash, shift_aff)); diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index f6d84e2c92fe..e24e94d28767 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -54,6 +54,9 @@ DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); EXPORT_SYMBOL(cpu_hwcaps); static struct arm64_cpu_capabilities const __ro_after_init *cpu_hwcaps_ptrs[ARM64_NCAPS]; +/* Need also bit for ARM64_CB_PATCH */ +DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE); + /* * Flag to indicate if we have computed the system wide * capabilities based on the boot time active CPUs. This @@ -1118,7 +1121,7 @@ static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused) * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to * do anything here. */ - if (!alternatives_applied) + if (!alternative_is_applied(ARM64_HAS_VIRT_HOST_EXTN)) write_sysreg(read_sysreg(tpidr_el1), tpidr_el2); } #endif @@ -1203,11 +1206,27 @@ static void cpu_enable_address_auth(struct arm64_cpu_capabilities const *cap) } #endif /* CONFIG_ARM64_PTR_AUTH */ +#ifdef CONFIG_ARM64_PSEUDO_NMI +static bool enable_pseudo_nmi; + +static int __init early_enable_pseudo_nmi(char *p) +{ + return strtobool(p, &enable_pseudo_nmi); +} +early_param("irqchip.gicv3_pseudo_nmi", early_enable_pseudo_nmi); + +static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry, + int scope) +{ + return enable_pseudo_nmi && has_useable_gicv3_cpuif(entry, scope); +} +#endif + static const struct arm64_cpu_capabilities arm64_features[] = { { .desc = "GIC system register CPU interface", .capability = ARM64_HAS_SYSREG_GIC_CPUIF, - .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, .matches = has_useable_gicv3_cpuif, .sys_reg = SYS_ID_AA64PFR0_EL1, .field_pos = ID_AA64PFR0_GIC_SHIFT, @@ -1480,6 +1499,21 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = has_cpuid_feature, }, #endif /* CONFIG_ARM64_PTR_AUTH */ +#ifdef CONFIG_ARM64_PSEUDO_NMI + { + /* + * Depends on having GICv3 + */ + .desc = "IRQ priority masking", + .capability = ARM64_HAS_IRQ_PRIO_MASKING, + .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE, + .matches = can_use_gic_priorities, + .sys_reg = SYS_ID_AA64PFR0_EL1, + .field_pos = ID_AA64PFR0_GIC_SHIFT, + .sign = FTR_UNSIGNED, + .min_field_value = 1, + }, +#endif {}, }; @@ -1654,6 +1688,9 @@ static void update_cpu_capabilities(u16 scope_mask) if (caps->desc) pr_info("detected: %s\n", caps->desc); cpus_set_cap(caps->capability); + + if ((scope_mask & SCOPE_BOOT_CPU) && (caps->type & SCOPE_BOOT_CPU)) + set_bit(caps->capability, boot_capabilities); } } diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 0ec0c46b2c0c..c50a7a75f2e0 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -185,7 +185,7 @@ alternative_cb_end .else add x21, sp, #S_FRAME_SIZE - get_thread_info tsk + get_current_task tsk /* Save the task's original addr_limit and set USER_DS */ ldr x20, [tsk, #TSK_TI_ADDR_LIMIT] str x20, [sp, #S_ORIG_ADDR_LIMIT] @@ -249,6 +249,12 @@ alternative_else_nop_endif msr sp_el0, tsk .endif + /* Save pmr */ +alternative_if ARM64_HAS_IRQ_PRIO_MASKING + mrs_s x20, SYS_ICC_PMR_EL1 + str x20, [sp, #S_PMR_SAVE] +alternative_else_nop_endif + /* * Registers that may be useful after this macro is invoked: * @@ -269,6 +275,14 @@ alternative_else_nop_endif /* No need to restore UAO, it will be restored from SPSR_EL1 */ .endif + /* Restore pmr */ +alternative_if ARM64_HAS_IRQ_PRIO_MASKING + ldr x20, [sp, #S_PMR_SAVE] + msr_s SYS_ICC_PMR_EL1, x20 + /* Ensure priority change is seen by redistributor */ + dsb sy +alternative_else_nop_endif + ldp x21, x22, [sp, #S_PC] // load ELR, SPSR .if \el == 0 ct_user_enter @@ -603,32 +617,52 @@ el1_irq: kernel_entry 1 enable_da_f #ifdef CONFIG_TRACE_IRQFLAGS +#ifdef CONFIG_ARM64_PSEUDO_NMI +alternative_if ARM64_HAS_IRQ_PRIO_MASKING + ldr x20, [sp, #S_PMR_SAVE] +alternative_else + mov x20, #GIC_PRIO_IRQON +alternative_endif + cmp x20, #GIC_PRIO_IRQOFF + /* Irqs were disabled, don't trace */ + b.ls 1f +#endif bl trace_hardirqs_off +1: #endif irq_handler #ifdef CONFIG_PREEMPT ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count - cbnz x24, 1f // preempt count != 0 - bl el1_preempt +alternative_if ARM64_HAS_IRQ_PRIO_MASKING + /* + * DA_F were cleared at start of handling. If anything is set in DAIF, + * we come back from an NMI, so skip preemption + */ + mrs x0, daif + orr x24, x24, x0 +alternative_else_nop_endif + cbnz x24, 1f // preempt count != 0 || NMI return path + bl preempt_schedule_irq // irq en/disable is done inside 1: #endif #ifdef CONFIG_TRACE_IRQFLAGS +#ifdef CONFIG_ARM64_PSEUDO_NMI + /* + * if IRQs were disabled when we received the interrupt, we have an NMI + * and we are not re-enabling interrupt upon eret. Skip tracing. + */ + cmp x20, #GIC_PRIO_IRQOFF + b.ls 1f +#endif bl trace_hardirqs_on +1: #endif + kernel_exit 1 ENDPROC(el1_irq) -#ifdef CONFIG_PREEMPT -el1_preempt: - mov x24, lr -1: bl preempt_schedule_irq // irq en/disable is done inside - ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS - tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling? - ret x24 -#endif - /* * EL0 mode handlers. */ @@ -1070,7 +1104,7 @@ ENTRY(ret_from_fork) cbz x19, 1f // not a kernel thread mov x0, x20 blr x19 -1: get_thread_info tsk +1: get_current_task tsk b ret_to_user ENDPROC(ret_from_fork) NOKPROBE(ret_from_fork) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 15d79a8e5e5e..eecf7927dab0 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -539,8 +539,7 @@ set_hcr: /* GICv3 system register access */ mrs x0, id_aa64pfr0_el1 ubfx x0, x0, #24, #4 - cmp x0, #1 - b.ne 3f + cbz x0, 3f mrs_s x0, SYS_ICC_SRE_EL2 orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1 diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index 29cdc99688f3..9859e1178e6b 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -299,8 +299,10 @@ int swsusp_arch_suspend(void) dcache_clean_range(__idmap_text_start, __idmap_text_end); /* Clean kvm setup code to PoC? */ - if (el2_reset_needed()) + if (el2_reset_needed()) { dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end); + dcache_clean_range(__hyp_text_start, __hyp_text_end); + } /* make the crash dump kernel image protected again */ crash_post_resume(); diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S index e1261fbaa374..17f325ba831e 100644 --- a/arch/arm64/kernel/hyp-stub.S +++ b/arch/arm64/kernel/hyp-stub.S @@ -28,6 +28,8 @@ #include .text + .pushsection .hyp.text, "ax" + .align 11 ENTRY(__hyp_stub_vectors) diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c index 780a12f59a8f..92fa81798fb9 100644 --- a/arch/arm64/kernel/irq.c +++ b/arch/arm64/kernel/irq.c @@ -33,6 +33,9 @@ unsigned long irq_err_count; +/* Only access this in an NMI enter/exit */ +DEFINE_PER_CPU(struct nmi_ctx, nmi_contexts); + DEFINE_PER_CPU(unsigned long *, irq_stack_ptr); int arch_show_interrupts(struct seq_file *p, int prec) diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index ba6b41790fcd..b09b6f75f759 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -88,6 +88,7 @@ u64 __init kaslr_early_init(u64 dt_phys) * we end up running with module randomization disabled. */ module_alloc_base = (u64)_etext - MODULES_VSIZE; + __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base)); /* * Try to map the FDT early. If this fails, we simply bail, diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c index ce46c4cdf368..691854b77c7f 100644 --- a/arch/arm64/kernel/kgdb.c +++ b/arch/arm64/kernel/kgdb.c @@ -244,27 +244,33 @@ int kgdb_arch_handle_exception(int exception_vector, int signo, static int kgdb_brk_fn(struct pt_regs *regs, unsigned int esr) { + if (user_mode(regs)) + return DBG_HOOK_ERROR; + kgdb_handle_exception(1, SIGTRAP, 0, regs); - return 0; + return DBG_HOOK_HANDLED; } NOKPROBE_SYMBOL(kgdb_brk_fn) static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr) { + if (user_mode(regs)) + return DBG_HOOK_ERROR; + compiled_break = 1; kgdb_handle_exception(1, SIGTRAP, 0, regs); - return 0; + return DBG_HOOK_HANDLED; } NOKPROBE_SYMBOL(kgdb_compiled_brk_fn); static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr) { - if (!kgdb_single_step) + if (user_mode(regs) || !kgdb_single_step) return DBG_HOOK_ERROR; kgdb_handle_exception(1, SIGTRAP, 0, regs); - return 0; + return DBG_HOOK_HANDLED; } NOKPROBE_SYMBOL(kgdb_step_brk_fn); diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index aa9c94113700..66b5d697d943 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -321,7 +321,7 @@ void crash_post_resume(void) * but does not hold any data of loaded kernel image. * * Note that all the pages in crash dump kernel memory have been initially - * marked as Reserved in kexec_reserve_crashkres_pages(). + * marked as Reserved as memory was allocated via memblock_reserve(). * * In hibernation, the pages which are Reserved and yet "nosave" are excluded * from the hibernation iamge. crash_is_nosave() does thich check for crash @@ -361,7 +361,6 @@ void crash_free_reserved_phys_range(unsigned long begin, unsigned long end) for (addr = begin; addr < end; addr += PAGE_SIZE) { page = phys_to_page(addr); - ClearPageReserved(page); free_reserved_page(page); } } diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c index f2c211a6229b..58871333737a 100644 --- a/arch/arm64/kernel/machine_kexec_file.c +++ b/arch/arm64/kernel/machine_kexec_file.c @@ -120,10 +120,12 @@ static int create_dtb(struct kimage *image, { void *buf; size_t buf_size; + size_t cmdline_len; int ret; + cmdline_len = cmdline ? strlen(cmdline) : 0; buf_size = fdt_totalsize(initial_boot_params) - + strlen(cmdline) + DTB_EXTRA_SPACE; + + cmdline_len + DTB_EXTRA_SPACE; for (;;) { buf = vmalloc(buf_size); diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 1620a371b1f5..4addb38bc250 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -810,7 +810,7 @@ static void armv8pmu_clear_event_idx(struct pmu_hw_events *cpuc, } /* - * Add an event filter to a given event. This will only work for PMUv2 PMUs. + * Add an event filter to a given event. */ static int armv8pmu_set_event_filter(struct hw_perf_event *event, struct perf_event_attr *attr) diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index 2a5b338b2542..7fb6f3aa5ceb 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -450,6 +450,9 @@ kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr) struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); int retval; + if (user_mode(regs)) + return DBG_HOOK_ERROR; + /* return error if this is not our step */ retval = kprobe_ss_hit(kcb, instruction_pointer(regs)); @@ -466,6 +469,9 @@ kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr) int __kprobes kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr) { + if (user_mode(regs)) + return DBG_HOOK_ERROR; + kprobe_handler(regs); return DBG_HOOK_HANDLED; } @@ -478,13 +484,13 @@ bool arch_within_kprobe_blacklist(unsigned long addr) addr < (unsigned long)__entry_text_end) || (addr >= (unsigned long)__idmap_text_start && addr < (unsigned long)__idmap_text_end) || + (addr >= (unsigned long)__hyp_text_start && + addr < (unsigned long)__hyp_text_end) || !!search_exception_tables(addr)) return true; if (!is_kernel_in_hyp_mode()) { - if ((addr >= (unsigned long)__hyp_text_start && - addr < (unsigned long)__hyp_text_end) || - (addr >= (unsigned long)__hyp_idmap_text_start && + if ((addr >= (unsigned long)__hyp_idmap_text_start && addr < (unsigned long)__hyp_idmap_text_end)) return true; } diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index a0f985a6ac50..3767fb21a5b8 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -51,6 +51,7 @@ #include #include +#include #include #include #include @@ -74,6 +75,50 @@ EXPORT_SYMBOL_GPL(pm_power_off); void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); +static void __cpu_do_idle(void) +{ + dsb(sy); + wfi(); +} + +static void __cpu_do_idle_irqprio(void) +{ + unsigned long pmr; + unsigned long daif_bits; + + daif_bits = read_sysreg(daif); + write_sysreg(daif_bits | PSR_I_BIT, daif); + + /* + * Unmask PMR before going idle to make sure interrupts can + * be raised. + */ + pmr = gic_read_pmr(); + gic_write_pmr(GIC_PRIO_IRQON); + + __cpu_do_idle(); + + gic_write_pmr(pmr); + write_sysreg(daif_bits, daif); +} + +/* + * cpu_do_idle() + * + * Idle the processor (wait for interrupt). + * + * If the CPU supports priority masking we must do additional work to + * ensure that interrupts are not masked at the PMR (because the core will + * not wake up if we block the wake up signal in the interrupt controller). + */ +void cpu_do_idle(void) +{ + if (system_uses_irq_prio_masking()) + __cpu_do_idle_irqprio(); + else + __cpu_do_idle(); +} + /* * This is our default idle handler. */ @@ -232,6 +277,9 @@ void __show_regs(struct pt_regs *regs) printk("sp : %016llx\n", sp); + if (system_uses_irq_prio_masking()) + printk("pmr_save: %08llx\n", regs->pmr_save); + i = top_reg; while (i >= 0) { @@ -363,6 +411,9 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) childregs->pstate |= PSR_SSBS_BIT; + if (system_uses_irq_prio_masking()) + childregs->pmr_save = GIC_PRIO_IRQON; + p->thread.cpu_context.x19 = stack_start; p->thread.cpu_context.x20 = stk_sz; } diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 9dce33b0e260..b82e0a9b3da3 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -979,6 +979,131 @@ static int pac_mask_get(struct task_struct *target, return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &uregs, 0, -1); } + +#ifdef CONFIG_CHECKPOINT_RESTORE +static __uint128_t pac_key_to_user(const struct ptrauth_key *key) +{ + return (__uint128_t)key->hi << 64 | key->lo; +} + +static struct ptrauth_key pac_key_from_user(__uint128_t ukey) +{ + struct ptrauth_key key = { + .lo = (unsigned long)ukey, + .hi = (unsigned long)(ukey >> 64), + }; + + return key; +} + +static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys, + const struct ptrauth_keys *keys) +{ + ukeys->apiakey = pac_key_to_user(&keys->apia); + ukeys->apibkey = pac_key_to_user(&keys->apib); + ukeys->apdakey = pac_key_to_user(&keys->apda); + ukeys->apdbkey = pac_key_to_user(&keys->apdb); +} + +static void pac_address_keys_from_user(struct ptrauth_keys *keys, + const struct user_pac_address_keys *ukeys) +{ + keys->apia = pac_key_from_user(ukeys->apiakey); + keys->apib = pac_key_from_user(ukeys->apibkey); + keys->apda = pac_key_from_user(ukeys->apdakey); + keys->apdb = pac_key_from_user(ukeys->apdbkey); +} + +static int pac_address_keys_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + struct ptrauth_keys *keys = &target->thread.keys_user; + struct user_pac_address_keys user_keys; + + if (!system_supports_address_auth()) + return -EINVAL; + + pac_address_keys_to_user(&user_keys, keys); + + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &user_keys, 0, -1); +} + +static int pac_address_keys_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + struct ptrauth_keys *keys = &target->thread.keys_user; + struct user_pac_address_keys user_keys; + int ret; + + if (!system_supports_address_auth()) + return -EINVAL; + + pac_address_keys_to_user(&user_keys, keys); + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &user_keys, 0, -1); + if (ret) + return ret; + pac_address_keys_from_user(keys, &user_keys); + + return 0; +} + +static void pac_generic_keys_to_user(struct user_pac_generic_keys *ukeys, + const struct ptrauth_keys *keys) +{ + ukeys->apgakey = pac_key_to_user(&keys->apga); +} + +static void pac_generic_keys_from_user(struct ptrauth_keys *keys, + const struct user_pac_generic_keys *ukeys) +{ + keys->apga = pac_key_from_user(ukeys->apgakey); +} + +static int pac_generic_keys_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + struct ptrauth_keys *keys = &target->thread.keys_user; + struct user_pac_generic_keys user_keys; + + if (!system_supports_generic_auth()) + return -EINVAL; + + pac_generic_keys_to_user(&user_keys, keys); + + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &user_keys, 0, -1); +} + +static int pac_generic_keys_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + struct ptrauth_keys *keys = &target->thread.keys_user; + struct user_pac_generic_keys user_keys; + int ret; + + if (!system_supports_generic_auth()) + return -EINVAL; + + pac_generic_keys_to_user(&user_keys, keys); + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &user_keys, 0, -1); + if (ret) + return ret; + pac_generic_keys_from_user(keys, &user_keys); + + return 0; +} +#endif /* CONFIG_CHECKPOINT_RESTORE */ #endif /* CONFIG_ARM64_PTR_AUTH */ enum aarch64_regset { @@ -995,6 +1120,10 @@ enum aarch64_regset { #endif #ifdef CONFIG_ARM64_PTR_AUTH REGSET_PAC_MASK, +#ifdef CONFIG_CHECKPOINT_RESTORE + REGSET_PACA_KEYS, + REGSET_PACG_KEYS, +#endif #endif }; @@ -1074,6 +1203,24 @@ static const struct user_regset aarch64_regsets[] = { .get = pac_mask_get, /* this cannot be set dynamically */ }, +#ifdef CONFIG_CHECKPOINT_RESTORE + [REGSET_PACA_KEYS] = { + .core_note_type = NT_ARM_PACA_KEYS, + .n = sizeof(struct user_pac_address_keys) / sizeof(__uint128_t), + .size = sizeof(__uint128_t), + .align = sizeof(__uint128_t), + .get = pac_address_keys_get, + .set = pac_address_keys_set, + }, + [REGSET_PACG_KEYS] = { + .core_note_type = NT_ARM_PACG_KEYS, + .n = sizeof(struct user_pac_generic_keys) / sizeof(__uint128_t), + .size = sizeof(__uint128_t), + .align = sizeof(__uint128_t), + .get = pac_generic_keys_get, + .set = pac_generic_keys_set, + }, +#endif #endif }; @@ -1702,19 +1849,20 @@ void syscall_trace_exit(struct pt_regs *regs) } /* - * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487C.a - * We also take into account DIT (bit 24), which is not yet documented, and - * treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may be - * allocated an EL0 meaning in future. + * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a. + * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is + * not described in ARM DDI 0487D.a. + * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may + * be allocated an EL0 meaning in future. * Userspace cannot use these until they have an architectural meaning. * Note that this follows the SPSR_ELx format, not the AArch32 PSR format. * We also reserve IL for the kernel; SS is handled dynamically. */ #define SPSR_EL1_AARCH64_RES0_BITS \ - (GENMASK_ULL(63,32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \ - GENMASK_ULL(20, 10) | GENMASK_ULL(5, 5)) + (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \ + GENMASK_ULL(20, 13) | GENMASK_ULL(11, 10) | GENMASK_ULL(5, 5)) #define SPSR_EL1_AARCH32_RES0_BITS \ - (GENMASK_ULL(63,32) | GENMASK_ULL(23, 22) | GENMASK_ULL(20,20)) + (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20)) static int valid_compat_regs(struct user_pt_regs *regs) { diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 4b0e1231625c..f8482fe5a190 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -58,7 +58,6 @@ #include #include #include -#include #include #include #include @@ -209,6 +208,7 @@ static void __init request_standard_resources(void) struct memblock_region *region; struct resource *res; unsigned long i = 0; + size_t res_size; kernel_code.start = __pa_symbol(_text); kernel_code.end = __pa_symbol(__init_begin - 1); @@ -216,9 +216,10 @@ static void __init request_standard_resources(void) kernel_data.end = __pa_symbol(_end - 1); num_standard_resources = memblock.memory.cnt; - standard_resources = memblock_alloc_low(num_standard_resources * - sizeof(*standard_resources), - SMP_CACHE_BYTES); + res_size = num_standard_resources * sizeof(*standard_resources); + standard_resources = memblock_alloc_low(res_size, SMP_CACHE_BYTES); + if (!standard_resources) + panic("%s: Failed to allocate %zu bytes\n", __func__, res_size); for_each_memblock(memory, region) { res = &standard_resources[i++]; @@ -313,7 +314,6 @@ void __init setup_arch(char **cmdline_p) arm64_memblock_init(); paging_init(); - efi_apply_persistent_mem_reservations(); acpi_table_upgrade(); @@ -340,6 +340,9 @@ void __init setup_arch(char **cmdline_p) smp_init_cpus(); smp_build_mpidr_hash(); + /* Init percpu seeds for random tags after cpus are set up. */ + kasan_init_tags(); + #ifdef CONFIG_ARM64_SW_TTBR0_PAN /* * Make sure init_thread_info.ttbr0 always generates translation diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 1598d6f7200a..824de7038967 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include @@ -180,6 +181,24 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) return ret; } +static void init_gic_priority_masking(void) +{ + u32 cpuflags; + + if (WARN_ON(!gic_enable_sre())) + return; + + cpuflags = read_sysreg(daif); + + WARN_ON(!(cpuflags & PSR_I_BIT)); + + gic_write_pmr(GIC_PRIO_IRQOFF); + + /* We can only unmask PSR.I if we can take aborts */ + if (!(cpuflags & PSR_A_BIT)) + write_sysreg(cpuflags & ~PSR_I_BIT, daif); +} + /* * This is the secondary CPU boot entry. We're using this CPUs * idle thread stack, but a set of temporary page tables. @@ -206,6 +225,9 @@ asmlinkage notrace void secondary_start_kernel(void) */ cpu_uninstall_idmap(); + if (system_uses_irq_prio_masking()) + init_gic_priority_masking(); + preempt_disable(); trace_hardirqs_off(); @@ -419,6 +441,17 @@ void __init smp_prepare_boot_cpu(void) */ jump_label_init(); cpuinfo_store_boot_cpu(); + + /* + * We now know enough about the boot CPU to apply the + * alternatives that cannot wait until interrupt handling + * and/or scheduling is enabled. + */ + apply_boot_alternatives(); + + /* Conditionally switch to GIC PMR for interrupt masking */ + if (system_uses_irq_prio_masking()) + init_gic_priority_masking(); } static u64 __init of_get_cpu_mpidr(struct device_node *dn) diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 4e2fb877f8d5..8ad119c3f665 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -898,13 +898,17 @@ bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr) asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr) { - nmi_enter(); + const bool was_in_nmi = in_nmi(); + + if (!was_in_nmi) + nmi_enter(); /* non-RAS errors are not containable */ if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr)) arm64_serror_panic(regs, esr); - nmi_exit(); + if (!was_in_nmi) + nmi_exit(); } void __pte_error(const char *file, int line, unsigned long val) diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index b0b1478094b4..3563fe655cd5 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -22,7 +22,9 @@ #include +#include #include +#include #include #include #include @@ -107,6 +109,7 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu) write_sysreg(kvm_get_hyp_vector(), vbar_el1); } +NOKPROBE_SYMBOL(activate_traps_vhe); static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu) { @@ -154,6 +157,7 @@ static void deactivate_traps_vhe(void) write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1); write_sysreg(vectors, vbar_el1); } +NOKPROBE_SYMBOL(deactivate_traps_vhe); static void __hyp_text __deactivate_traps_nvhe(void) { @@ -513,6 +517,7 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) return exit_code; } +NOKPROBE_SYMBOL(kvm_vcpu_run_vhe); /* Switch to the guest for legacy non-VHE systems */ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) @@ -521,6 +526,17 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) struct kvm_cpu_context *guest_ctxt; u64 exit_code; + /* + * Having IRQs masked via PMR when entering the guest means the GIC + * will not signal the CPU of interrupts of lower priority, and the + * only way to get out will be via guest exceptions. + * Naturally, we want to avoid this. + */ + if (system_uses_irq_prio_masking()) { + gic_write_pmr(GIC_PRIO_IRQON); + dsb(sy); + } + vcpu = kern_hyp_va(vcpu); host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); @@ -573,6 +589,10 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) */ __debug_switch_to_host(vcpu); + /* Returning to host will clear PSR.I, remask PMR if needed */ + if (system_uses_irq_prio_masking()) + gic_write_pmr(GIC_PRIO_IRQOFF); + return exit_code; } @@ -620,6 +640,7 @@ static void __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par, read_sysreg_el2(esr), read_sysreg_el2(far), read_sysreg(hpfar_el2), par, vcpu); } +NOKPROBE_SYMBOL(__hyp_call_panic_vhe); void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) { diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index 68d6f7c3b237..b426e2cf973c 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c @@ -18,6 +18,7 @@ #include #include +#include #include #include #include @@ -98,12 +99,14 @@ void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt) { __sysreg_save_common_state(ctxt); } +NOKPROBE_SYMBOL(sysreg_save_host_state_vhe); void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt) { __sysreg_save_common_state(ctxt); __sysreg_save_el2_return_state(ctxt); } +NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe); static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt) { @@ -188,12 +191,14 @@ void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt) { __sysreg_restore_common_state(ctxt); } +NOKPROBE_SYMBOL(sysreg_restore_host_state_vhe); void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt) { __sysreg_restore_common_state(ctxt); __sysreg_restore_el2_return_state(ctxt); } +NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe); void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu) { diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index b72a3dd56204..f16a5f8ff2b4 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -32,6 +32,7 @@ #include #include #include +#include #include /* Maximum phys_shift supported for any VM on this host */ @@ -105,16 +106,33 @@ int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext) * This function finds the right table above and sets the registers on * the virtual CPU struct to their architecturally defined reset * values. + * + * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT + * ioctl or as part of handling a request issued by another VCPU in the PSCI + * handling code. In the first case, the VCPU will not be loaded, and in the + * second case the VCPU will be loaded. Because this function operates purely + * on the memory-backed valus of system registers, we want to do a full put if + * we were loaded (handling a request) and load the values back at the end of + * the function. Otherwise we leave the state alone. In both cases, we + * disable preemption around the vcpu reset as we would otherwise race with + * preempt notifiers which also call put/load. */ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) { const struct kvm_regs *cpu_reset; + int ret = -EINVAL; + bool loaded; + + preempt_disable(); + loaded = (vcpu->cpu != -1); + if (loaded) + kvm_arch_vcpu_put(vcpu); switch (vcpu->arch.target) { default: if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { if (!cpu_has_32bit_el1()) - return -EINVAL; + goto out; cpu_reset = &default_regs_reset32; } else { cpu_reset = &default_regs_reset; @@ -129,6 +147,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) /* Reset system registers */ kvm_reset_sys_regs(vcpu); + /* + * Additional reset state handling that PSCI may have imposed on us. + * Must be done after all the sys_reg reset. + */ + if (vcpu->arch.reset_state.reset) { + unsigned long target_pc = vcpu->arch.reset_state.pc; + + /* Gracefully handle Thumb2 entry point */ + if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) { + target_pc &= ~1UL; + vcpu_set_thumb(vcpu); + } + + /* Propagate caller endianness */ + if (vcpu->arch.reset_state.be) + kvm_vcpu_set_be(vcpu); + + *vcpu_pc(vcpu) = target_pc; + vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0); + + vcpu->arch.reset_state.reset = false; + } + /* Reset PMU */ kvm_pmu_vcpu_reset(vcpu); @@ -137,7 +178,12 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; /* Reset timer */ - return kvm_timer_vcpu_reset(vcpu); + ret = kvm_timer_vcpu_reset(vcpu); +out: + if (loaded) + kvm_arch_vcpu_load(vcpu, smp_processor_id()); + preempt_enable(); + return ret; } void kvm_set_ipa_limit(void) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index e3e37228ae4e..c936aa40c3f4 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -314,12 +314,29 @@ static bool trap_raz_wi(struct kvm_vcpu *vcpu, return read_zero(vcpu, p); } -static bool trap_undef(struct kvm_vcpu *vcpu, - struct sys_reg_params *p, - const struct sys_reg_desc *r) +/* + * ARMv8.1 mandates at least a trivial LORegion implementation, where all the + * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0 + * system, these registers should UNDEF. LORID_EL1 being a RO register, we + * treat it separately. + */ +static bool trap_loregion(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *r) { - kvm_inject_undefined(vcpu); - return false; + u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); + u32 sr = sys_reg((u32)r->Op0, (u32)r->Op1, + (u32)r->CRn, (u32)r->CRm, (u32)r->Op2); + + if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) { + kvm_inject_undefined(vcpu); + return false; + } + + if (p->is_write && sr == SYS_LORID_EL1) + return write_to_read_only(vcpu, p, r); + + return trap_raz_wi(vcpu, p, r); } static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, @@ -1048,11 +1065,6 @@ static u64 read_id_reg(struct sys_reg_desc const *r, bool raz) if (val & ptrauth_mask) kvm_debug("ptrauth unsupported for guests, suppressing\n"); val &= ~ptrauth_mask; - } else if (id == SYS_ID_AA64MMFR1_EL1) { - if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT)) - kvm_debug("LORegions unsupported for guests, suppressing\n"); - - val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT); } return val; @@ -1338,11 +1350,11 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 }, { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 }, - { SYS_DESC(SYS_LORSA_EL1), trap_undef }, - { SYS_DESC(SYS_LOREA_EL1), trap_undef }, - { SYS_DESC(SYS_LORN_EL1), trap_undef }, - { SYS_DESC(SYS_LORC_EL1), trap_undef }, - { SYS_DESC(SYS_LORID_EL1), trap_undef }, + { SYS_DESC(SYS_LORSA_EL1), trap_loregion }, + { SYS_DESC(SYS_LOREA_EL1), trap_loregion }, + { SYS_DESC(SYS_LORN_EL1), trap_loregion }, + { SYS_DESC(SYS_LORC_EL1), trap_loregion }, + { SYS_DESC(SYS_LORID_EL1), trap_loregion }, { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 }, { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 }, @@ -2596,7 +2608,9 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu) table = get_target_table(vcpu->arch.target, true, &num); reset_sys_reg_descs(vcpu, table, num); - for (num = 1; num < NR_SYS_REGS; num++) - if (__vcpu_sys_reg(vcpu, num) == 0x4242424242424242) - panic("Didn't reset __vcpu_sys_reg(%zi)", num); + for (num = 1; num < NR_SYS_REGS; num++) { + if (WARN(__vcpu_sys_reg(vcpu, num) == 0x4242424242424242, + "Didn't reset __vcpu_sys_reg(%zi)\n", num)) + break; + } } diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c index fcb1f2a6d7c6..14fe23cd5932 100644 --- a/arch/arm64/mm/dump.c +++ b/arch/arm64/mm/dump.c @@ -286,74 +286,73 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, } -static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start) +static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start, + unsigned long end) { - pte_t *ptep = pte_offset_kernel(pmdp, 0UL); - unsigned long addr; - unsigned i; + unsigned long addr = start; + pte_t *ptep = pte_offset_kernel(pmdp, start); - for (i = 0; i < PTRS_PER_PTE; i++, ptep++) { - addr = start + i * PAGE_SIZE; + do { note_page(st, addr, 4, READ_ONCE(pte_val(*ptep))); - } + } while (ptep++, addr += PAGE_SIZE, addr != end); } -static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start) +static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start, + unsigned long end) { - pmd_t *pmdp = pmd_offset(pudp, 0UL); - unsigned long addr; - unsigned i; + unsigned long next, addr = start; + pmd_t *pmdp = pmd_offset(pudp, start); - for (i = 0; i < PTRS_PER_PMD; i++, pmdp++) { + do { pmd_t pmd = READ_ONCE(*pmdp); + next = pmd_addr_end(addr, end); - addr = start + i * PMD_SIZE; if (pmd_none(pmd) || pmd_sect(pmd)) { note_page(st, addr, 3, pmd_val(pmd)); } else { BUG_ON(pmd_bad(pmd)); - walk_pte(st, pmdp, addr); + walk_pte(st, pmdp, addr, next); } - } + } while (pmdp++, addr = next, addr != end); } -static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start) +static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start, + unsigned long end) { - pud_t *pudp = pud_offset(pgdp, 0UL); - unsigned long addr; - unsigned i; + unsigned long next, addr = start; + pud_t *pudp = pud_offset(pgdp, start); - for (i = 0; i < PTRS_PER_PUD; i++, pudp++) { + do { pud_t pud = READ_ONCE(*pudp); + next = pud_addr_end(addr, end); - addr = start + i * PUD_SIZE; if (pud_none(pud) || pud_sect(pud)) { note_page(st, addr, 2, pud_val(pud)); } else { BUG_ON(pud_bad(pud)); - walk_pmd(st, pudp, addr); + walk_pmd(st, pudp, addr, next); } - } + } while (pudp++, addr = next, addr != end); } static void walk_pgd(struct pg_state *st, struct mm_struct *mm, unsigned long start) { - pgd_t *pgdp = pgd_offset(mm, 0UL); - unsigned i; - unsigned long addr; + unsigned long end = (start < TASK_SIZE_64) ? TASK_SIZE_64 : 0; + unsigned long next, addr = start; + pgd_t *pgdp = pgd_offset(mm, start); - for (i = 0; i < PTRS_PER_PGD; i++, pgdp++) { + do { pgd_t pgd = READ_ONCE(*pgdp); + next = pgd_addr_end(addr, end); - addr = start + i * PGDIR_SIZE; if (pgd_none(pgd)) { note_page(st, addr, 1, pgd_val(pgd)); } else { BUG_ON(pgd_bad(pgd)); - walk_pud(st, pgdp, addr); + walk_pud(st, pgdp, addr, next); } - } + } while (pgdp++, addr = next, addr != end); } void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info) @@ -407,7 +406,7 @@ void ptdump_check_wx(void) static int ptdump_init(void) { ptdump_initialize(); - return ptdump_debugfs_register(&kernel_ptdump_info, - "kernel_page_tables"); + ptdump_debugfs_register(&kernel_ptdump_info, "kernel_page_tables"); + return 0; } device_initcall(ptdump_init); diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index efb7b2cbead5..1a7e92ab69eb 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -18,6 +18,7 @@ * along with this program. If not, see . */ +#include #include #include #include @@ -33,6 +34,7 @@ #include #include +#include #include #include #include @@ -47,8 +49,6 @@ #include #include -#include - struct fault_info { int (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs); @@ -643,19 +643,10 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs) inf = esr_to_fault_info(esr); /* - * Synchronous aborts may interrupt code which had interrupts masked. - * Before calling out into the wider kernel tell the interested - * subsystems. + * Return value ignored as we rely on signal merging. + * Future patches will make this more robust. */ - if (IS_ENABLED(CONFIG_ACPI_APEI_SEA)) { - if (interrupts_enabled(regs)) - nmi_enter(); - - ghes_notify_sea(); - - if (interrupts_enabled(regs)) - nmi_exit(); - } + apei_claim_sea(regs); if (esr & ESR_ELx_FnV) siaddr = NULL; @@ -733,11 +724,6 @@ static const struct fault_info fault_info[] = { { do_bad, SIGKILL, SI_KERNEL, "unknown 63" }, }; -int handle_guest_sea(phys_addr_t addr, unsigned int esr) -{ - return ghes_notify_sea(); -} - asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs) { @@ -824,11 +810,12 @@ void __init hook_debug_fault_code(int nr, debug_fault_info[nr].name = name; } -asmlinkage int __exception do_debug_exception(unsigned long addr, +asmlinkage int __exception do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr, struct pt_regs *regs) { const struct fault_info *inf = esr_to_debug_fault_info(esr); + unsigned long pc = instruction_pointer(regs); int rv; /* @@ -838,14 +825,14 @@ asmlinkage int __exception do_debug_exception(unsigned long addr, if (interrupts_enabled(regs)) trace_hardirqs_off(); - if (user_mode(regs) && !is_ttbr0_addr(instruction_pointer(regs))) + if (user_mode(regs) && !is_ttbr0_addr(pc)) arm64_apply_bp_hardening(); - if (!inf->fn(addr, esr, regs)) { + if (!inf->fn(addr_if_watchpoint, esr, regs)) { rv = 1; } else { arm64_notify_die(inf->name, regs, - inf->sig, inf->code, (void __user *)addr, esr); + inf->sig, inf->code, (void __user *)pc, esr); rv = 0; } diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c index 30695a868107..5c9073bace83 100644 --- a/arch/arm64/mm/flush.c +++ b/arch/arm64/mm/flush.c @@ -33,7 +33,11 @@ void sync_icache_aliases(void *kaddr, unsigned long len) __clean_dcache_area_pou(kaddr, len); __flush_icache_all(); } else { - flush_icache_range(addr, addr + len); + /* + * Don't issue kick_all_cpus_sync() after I-cache invalidation + * for user mappings. + */ + __flush_icache_range(addr, addr + len); } } diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 28cbc22d7e30..6b4a47b3adf4 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -27,6 +27,26 @@ #include #include +#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION +bool arch_hugetlb_migration_supported(struct hstate *h) +{ + size_t pagesize = huge_page_size(h); + + switch (pagesize) { +#ifdef CONFIG_ARM64_4K_PAGES + case PUD_SIZE: +#endif + case PMD_SIZE: + case CONT_PMD_SIZE: + case CONT_PTE_SIZE: + return true; + } + pr_warn("%s: unrecognized huge page size 0x%lx\n", + __func__, pagesize); + return false; +} +#endif + int pmd_huge(pmd_t pmd) { return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT); diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 7205a9085b4d..6bc135042f5e 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -118,35 +118,10 @@ static void __init reserve_crashkernel(void) crashk_res.start = crash_base; crashk_res.end = crash_base + crash_size - 1; } - -static void __init kexec_reserve_crashkres_pages(void) -{ -#ifdef CONFIG_HIBERNATION - phys_addr_t addr; - struct page *page; - - if (!crashk_res.end) - return; - - /* - * To reduce the size of hibernation image, all the pages are - * marked as Reserved initially. - */ - for (addr = crashk_res.start; addr < (crashk_res.end + 1); - addr += PAGE_SIZE) { - page = phys_to_page(addr); - SetPageReserved(page); - } -#endif -} #else static void __init reserve_crashkernel(void) { } - -static void __init kexec_reserve_crashkres_pages(void) -{ -} #endif /* CONFIG_KEXEC_CORE */ #ifdef CONFIG_CRASH_DUMP @@ -285,24 +260,6 @@ int pfn_valid(unsigned long pfn) } EXPORT_SYMBOL(pfn_valid); -#ifndef CONFIG_SPARSEMEM -static void __init arm64_memory_present(void) -{ -} -#else -static void __init arm64_memory_present(void) -{ - struct memblock_region *reg; - - for_each_memblock(memory, reg) { - int nid = memblock_get_region_node(reg); - - memory_present(nid, memblock_region_memory_base_pfn(reg), - memblock_region_memory_end_pfn(reg)); - } -} -#endif - static phys_addr_t memory_limit = PHYS_ADDR_MAX; /* @@ -489,7 +446,7 @@ void __init bootmem_init(void) * Sparsemem tries to allocate bootmem in memory_present(), so must be * done after the fixed reservations. */ - arm64_memory_present(); + memblocks_present(); sparse_init(); zone_sizes_init(min, max); @@ -586,8 +543,6 @@ void __init mem_init(void) /* this will put all unused low memory onto the freelists */ memblock_free_all(); - kexec_reserve_crashkres_pages(); - mem_init_print_info(NULL); /* diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index 4b55b15707a3..296de39ddee5 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -40,6 +40,11 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node) void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE, __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_KASAN, node); + if (!p) + panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n", + __func__, PAGE_SIZE, PAGE_SIZE, node, + __pa(MAX_DMA_ADDRESS)); + return __pa(p); } @@ -48,6 +53,11 @@ static phys_addr_t __init kasan_alloc_raw_page(int node) void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE, __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_KASAN, node); + if (!p) + panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n", + __func__, PAGE_SIZE, PAGE_SIZE, node, + __pa(MAX_DMA_ADDRESS)); + return __pa(p); } @@ -252,8 +262,6 @@ void __init kasan_init(void) memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE); cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); - kasan_init_tags(); - /* At this point kasan is fully initialized. Enable error messages */ init_task.kasan_depth = 0; pr_info("KernelAddressSanitizer initialized\n"); diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index b6f5aa52ac67..e97f018ff740 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -42,7 +42,6 @@ #include #include #include -#include #include #include #include @@ -104,6 +103,8 @@ static phys_addr_t __init early_pgtable_alloc(void) void *ptr; phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); + if (!phys) + panic("Failed to allocate page table page\n"); /* * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE @@ -655,10 +656,6 @@ static void __init map_kernel(pgd_t *pgdp) kasan_copy_shadow(pgdp); } -/* - * paging_init() sets up the page tables, initialises the zone memory - * maps and sets up the zero page. - */ void __init paging_init(void) { pgd_t *pgdp = pgd_set_fixmap(__pa_symbol(swapper_pg_dir)); diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c index ae34e3a1cef1..06a6f264f2dd 100644 --- a/arch/arm64/mm/numa.c +++ b/arch/arm64/mm/numa.c @@ -120,7 +120,7 @@ static void __init setup_node_to_cpumask_map(void) } /* cpumask_of_node() will now work */ - pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids); + pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids); } /* @@ -237,6 +237,10 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) pr_info("Initmem setup node %d []\n", nid); nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid); + if (!nd_pa) + panic("Cannot allocate %zu bytes for node %d data\n", + nd_size, nid); + nd = __va(nd_pa); /* report and initialize */ diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 73886a5f1f30..aa0817c9c4c3 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -55,17 +55,6 @@ #define MAIR(attr, mt) ((attr) << ((mt) * 8)) -/* - * cpu_do_idle() - * - * Idle the processor (wait for interrupt). - */ -ENTRY(cpu_do_idle) - dsb sy // WFI may enter a low-power mode - wfi - ret -ENDPROC(cpu_do_idle) - #ifdef CONFIG_CPU_PM /** * cpu_do_suspend - save CPU registers context @@ -456,6 +445,7 @@ ENTRY(__cpu_setup) ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ TCR_TBI0 | TCR_A1 | TCR_KASAN_FLAGS + tcr_clear_errata_bits x10, x9, x5 #ifdef CONFIG_ARM64_USER_VA_BITS_52 ldr_l x9, vabits_user diff --git a/arch/arm64/mm/ptdump_debugfs.c b/arch/arm64/mm/ptdump_debugfs.c index 24d786fc3a4c..064163f25592 100644 --- a/arch/arm64/mm/ptdump_debugfs.c +++ b/arch/arm64/mm/ptdump_debugfs.c @@ -12,10 +12,7 @@ static int ptdump_show(struct seq_file *m, void *v) } DEFINE_SHOW_ATTRIBUTE(ptdump); -int ptdump_debugfs_register(struct ptdump_info *info, const char *name) +void ptdump_debugfs_register(struct ptdump_info *info, const char *name) { - struct dentry *pe; - pe = debugfs_create_file(name, 0400, NULL, info, &ptdump_fops); - return pe ? 0 : -ENOMEM; - + debugfs_create_file(name, 0400, NULL, info, &ptdump_fops); } diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 1542df00b23c..aaddc0217e73 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -362,7 +362,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, const s16 off = insn->off; const s32 imm = insn->imm; const int i = insn - ctx->prog->insnsi; - const bool is64 = BPF_CLASS(code) == BPF_ALU64; + const bool is64 = BPF_CLASS(code) == BPF_ALU64 || + BPF_CLASS(code) == BPF_JMP; const bool isdw = BPF_SIZE(code) == BPF_DW; u8 jmp_cond; s32 jmp_offset; @@ -559,7 +560,17 @@ emit_bswap_uxt: case BPF_JMP | BPF_JSLT | BPF_X: case BPF_JMP | BPF_JSGE | BPF_X: case BPF_JMP | BPF_JSLE | BPF_X: - emit(A64_CMP(1, dst, src), ctx); + case BPF_JMP32 | BPF_JEQ | BPF_X: + case BPF_JMP32 | BPF_JGT | BPF_X: + case BPF_JMP32 | BPF_JLT | BPF_X: + case BPF_JMP32 | BPF_JGE | BPF_X: + case BPF_JMP32 | BPF_JLE | BPF_X: + case BPF_JMP32 | BPF_JNE | BPF_X: + case BPF_JMP32 | BPF_JSGT | BPF_X: + case BPF_JMP32 | BPF_JSLT | BPF_X: + case BPF_JMP32 | BPF_JSGE | BPF_X: + case BPF_JMP32 | BPF_JSLE | BPF_X: + emit(A64_CMP(is64, dst, src), ctx); emit_cond_jmp: jmp_offset = bpf2a64_offset(i + off, i, ctx); check_imm19(jmp_offset); @@ -601,7 +612,8 @@ emit_cond_jmp: emit(A64_B_(jmp_cond, jmp_offset), ctx); break; case BPF_JMP | BPF_JSET | BPF_X: - emit(A64_TST(1, dst, src), ctx); + case BPF_JMP32 | BPF_JSET | BPF_X: + emit(A64_TST(is64, dst, src), ctx); goto emit_cond_jmp; /* IF (dst COND imm) JUMP off */ case BPF_JMP | BPF_JEQ | BPF_K: @@ -614,12 +626,23 @@ emit_cond_jmp: case BPF_JMP | BPF_JSLT | BPF_K: case BPF_JMP | BPF_JSGE | BPF_K: case BPF_JMP | BPF_JSLE | BPF_K: - emit_a64_mov_i(1, tmp, imm, ctx); - emit(A64_CMP(1, dst, tmp), ctx); + case BPF_JMP32 | BPF_JEQ | BPF_K: + case BPF_JMP32 | BPF_JGT | BPF_K: + case BPF_JMP32 | BPF_JLT | BPF_K: + case BPF_JMP32 | BPF_JGE | BPF_K: + case BPF_JMP32 | BPF_JLE | BPF_K: + case BPF_JMP32 | BPF_JNE | BPF_K: + case BPF_JMP32 | BPF_JSGT | BPF_K: + case BPF_JMP32 | BPF_JSLT | BPF_K: + case BPF_JMP32 | BPF_JSGE | BPF_K: + case BPF_JMP32 | BPF_JSLE | BPF_K: + emit_a64_mov_i(is64, tmp, imm, ctx); + emit(A64_CMP(is64, dst, tmp), ctx); goto emit_cond_jmp; case BPF_JMP | BPF_JSET | BPF_K: - emit_a64_mov_i(1, tmp, imm, ctx); - emit(A64_TST(1, dst, tmp), ctx); + case BPF_JMP32 | BPF_JSET | BPF_K: + emit_a64_mov_i(is64, tmp, imm, ctx); + emit(A64_TST(is64, dst, tmp), ctx); goto emit_cond_jmp; /* function call */ case BPF_JMP | BPF_CALL: diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig index 456e154674d1..e5cd3c5f8399 100644 --- a/arch/c6x/Kconfig +++ b/arch/c6x/Kconfig @@ -6,6 +6,7 @@ config C6X def_bool y + select ARCH_32BIT_OFF_T select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE select CLKDEV_LOOKUP diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild index 33a2c94fed0d..63b4a1705182 100644 --- a/arch/c6x/include/asm/Kbuild +++ b/arch/c6x/include/asm/Kbuild @@ -30,6 +30,7 @@ generic-y += pgalloc.h generic-y += preempt.h generic-y += segment.h generic-y += serial.h +generic-y += shmparam.h generic-y += tlbflush.h generic-y += topology.h generic-y += trace_clock.h diff --git a/arch/c6x/include/uapi/asm/Kbuild b/arch/c6x/include/uapi/asm/Kbuild index 6c6f6301012e..0febf1a07c30 100644 --- a/arch/c6x/include/uapi/asm/Kbuild +++ b/arch/c6x/include/uapi/asm/Kbuild @@ -1,5 +1,4 @@ include include/uapi/asm-generic/Kbuild.asm generic-y += kvm_para.h -generic-y += shmparam.h generic-y += ucontext.h diff --git a/arch/c6x/include/uapi/asm/unistd.h b/arch/c6x/include/uapi/asm/unistd.h index 6b2fe792de9d..79b724c39d9b 100644 --- a/arch/c6x/include/uapi/asm/unistd.h +++ b/arch/c6x/include/uapi/asm/unistd.h @@ -17,7 +17,9 @@ #define __ARCH_WANT_RENAMEAT #define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SET_GET_RLIMIT #define __ARCH_WANT_SYS_CLONE +#define __ARCH_WANT_TIME32_SYSCALLS /* Use the standard ABI for syscalls. */ #include diff --git a/arch/c6x/mm/dma-coherent.c b/arch/c6x/mm/dma-coherent.c index 75b79571732c..0d3701bc88f6 100644 --- a/arch/c6x/mm/dma-coherent.c +++ b/arch/c6x/mm/dma-coherent.c @@ -121,8 +121,6 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr, */ void __init coherent_mem_init(phys_addr_t start, u32 size) { - phys_addr_t bitmap_phys; - if (!size) return; @@ -138,11 +136,12 @@ void __init coherent_mem_init(phys_addr_t start, u32 size) if (dma_size & (PAGE_SIZE - 1)) ++dma_pages; - bitmap_phys = memblock_phys_alloc(BITS_TO_LONGS(dma_pages) * sizeof(long), - sizeof(long)); - - dma_bitmap = phys_to_virt(bitmap_phys); - memset(dma_bitmap, 0, dma_pages * PAGE_SIZE); + dma_bitmap = memblock_alloc(BITS_TO_LONGS(dma_pages) * sizeof(long), + sizeof(long)); + if (!dma_bitmap) + panic("%s: Failed to allocate %zu bytes align=0x%zx\n", + __func__, BITS_TO_LONGS(dma_pages) * sizeof(long), + sizeof(long)); } static void c6x_dma_sync(struct device *dev, phys_addr_t paddr, size_t size, diff --git a/arch/c6x/mm/init.c b/arch/c6x/mm/init.c index af5ada0520be..fe582c3a1794 100644 --- a/arch/c6x/mm/init.c +++ b/arch/c6x/mm/init.c @@ -40,7 +40,9 @@ void __init paging_init(void) empty_zero_page = (unsigned long) memblock_alloc(PAGE_SIZE, PAGE_SIZE); - memset((void *)empty_zero_page, 0, PAGE_SIZE); + if (!empty_zero_page) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", + __func__, PAGE_SIZE, PAGE_SIZE); /* * Set up user data space diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig index 398113c845f5..725a115759c9 100644 --- a/arch/csky/Kconfig +++ b/arch/csky/Kconfig @@ -1,5 +1,6 @@ config CSKY def_bool y + select ARCH_32BIT_OFF_T select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_USE_BUILTIN_BSWAP @@ -30,7 +31,6 @@ config CSKY select HAVE_ARCH_TRACEHOOK select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_GRAPH_TRACER - select HAVE_GENERIC_DMA_COHERENT select HAVE_KERNEL_GZIP select HAVE_KERNEL_LZO select HAVE_KERNEL_LZMA @@ -42,7 +42,6 @@ config CSKY select MODULES_USE_ELF_RELA if MODULES select OF select OF_EARLY_FLATTREE - select OF_RESERVED_MEM select PERF_USE_VMALLOC if CPU_CK610 select RTC_LIB select TIMER_OF diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h index edfcbb25fd9f..dcea277c09ae 100644 --- a/arch/csky/include/asm/pgtable.h +++ b/arch/csky/include/asm/pgtable.h @@ -45,8 +45,8 @@ ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset_t(address)) #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) #define pte_clear(mm, addr, ptep) set_pte((ptep), \ - (((unsigned int)addr&0x80000000)?__pte(1):__pte(0))) -#define pte_none(pte) (!(pte_val(pte)&0xfffffffe)) + (((unsigned int) addr & PAGE_OFFSET) ? __pte(_PAGE_GLOBAL) : __pte(0))) +#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) #define pte_pfn(x) ((unsigned long)((x).pte_low >> PAGE_SHIFT)) #define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \ @@ -241,6 +241,11 @@ static inline pte_t pte_mkyoung(pte_t pte) #define pgd_index(address) ((address) >> PGDIR_SHIFT) +#define __HAVE_PHYS_MEM_ACCESS_PROT +struct file; +extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t vma_prot); + /* * Macro to make mark a page protection value as "uncacheable". Note * that "protection" is really a misnomer here as the protection value diff --git a/arch/csky/include/asm/processor.h b/arch/csky/include/asm/processor.h index 8f454810514f..21e0bd5293dd 100644 --- a/arch/csky/include/asm/processor.h +++ b/arch/csky/include/asm/processor.h @@ -49,7 +49,7 @@ struct thread_struct { }; #define INIT_THREAD { \ - .ksp = (unsigned long) init_thread_union.stack + THREAD_SIZE, \ + .ksp = sizeof(init_stack) + (unsigned long) &init_stack, \ .sr = DEFAULT_PSR_VALUE, \ } @@ -95,7 +95,7 @@ unsigned long get_wchan(struct task_struct *p); #define KSTK_ESP(tsk) (task_pt_regs(tsk)->usp) #define task_pt_regs(p) \ - ((struct pt_regs *)(THREAD_SIZE + p->stack) - 1) + ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1) #define cpu_relax() barrier() diff --git a/arch/csky/include/asm/segment.h b/arch/csky/include/asm/segment.h index ffdc4c47ff43..db2640d5f575 100644 --- a/arch/csky/include/asm/segment.h +++ b/arch/csky/include/asm/segment.h @@ -9,7 +9,6 @@ typedef struct { } mm_segment_t; #define KERNEL_DS ((mm_segment_t) { 0xFFFFFFFF }) -#define get_ds() KERNEL_DS #define USER_DS ((mm_segment_t) { 0x80000000UL }) #define get_fs() (current_thread_info()->addr_limit) diff --git a/arch/csky/include/uapi/asm/unistd.h b/arch/csky/include/uapi/asm/unistd.h index 224c9a9ab45b..ec60e49cea66 100644 --- a/arch/csky/include/uapi/asm/unistd.h +++ b/arch/csky/include/uapi/asm/unistd.h @@ -2,6 +2,8 @@ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #define __ARCH_WANT_SYS_CLONE +#define __ARCH_WANT_SET_GET_RLIMIT +#define __ARCH_WANT_TIME32_SYSCALLS #include #define __NR_set_thread_area (__NR_arch_specific_syscall + 0) diff --git a/arch/csky/kernel/dumpstack.c b/arch/csky/kernel/dumpstack.c index 659253e9989c..d67f9777cfd9 100644 --- a/arch/csky/kernel/dumpstack.c +++ b/arch/csky/kernel/dumpstack.c @@ -38,7 +38,11 @@ void show_stack(struct task_struct *task, unsigned long *stack) if (task) stack = (unsigned long *)thread_saved_fp(task); else +#ifdef CONFIG_STACKTRACE + asm volatile("mov %0, r8\n":"=r"(stack)::"memory"); +#else stack = (unsigned long *)&stack; +#endif } show_trace(stack); diff --git a/arch/csky/kernel/ptrace.c b/arch/csky/kernel/ptrace.c index 57f1afe19a52..f2f12fff36f7 100644 --- a/arch/csky/kernel/ptrace.c +++ b/arch/csky/kernel/ptrace.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -159,7 +160,7 @@ static int fpr_set(struct task_struct *target, static const struct user_regset csky_regsets[] = { [REGSET_GPR] = { .core_note_type = NT_PRSTATUS, - .n = ELF_NGREG, + .n = sizeof(struct pt_regs) / sizeof(u32), .size = sizeof(u32), .align = sizeof(u32), .get = &gpr_get, diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c index ddc4dd79f282..b07a534b3062 100644 --- a/arch/csky/kernel/smp.c +++ b/arch/csky/kernel/smp.c @@ -160,7 +160,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) { unsigned long mask = 1 << cpu; - secondary_stack = (unsigned int)tidle->stack + THREAD_SIZE - 8; + secondary_stack = + (unsigned int) task_stack_page(tidle) + THREAD_SIZE - 8; secondary_hint = mfcr("cr31"); secondary_ccr = mfcr("cr18"); diff --git a/arch/csky/mm/highmem.c b/arch/csky/mm/highmem.c index 53b1bfa4c462..3317b774f6dc 100644 --- a/arch/csky/mm/highmem.c +++ b/arch/csky/mm/highmem.c @@ -141,6 +141,11 @@ static void __init fixrange_init(unsigned long start, unsigned long end, for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { if (pmd_none(*pmd)) { pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); + if (!pte) + panic("%s: Failed to allocate %lu bytes align=%lx\n", + __func__, PAGE_SIZE, + PAGE_SIZE); + set_pmd(pmd, __pmd(__pa(pte))); BUG_ON(pte != pte_offset_kernel(pmd, 0)); } diff --git a/arch/csky/mm/ioremap.c b/arch/csky/mm/ioremap.c index cb7c03e5cd21..8473b6bdf512 100644 --- a/arch/csky/mm/ioremap.c +++ b/arch/csky/mm/ioremap.c @@ -46,3 +46,17 @@ void iounmap(void __iomem *addr) vunmap((void *)((unsigned long)addr & PAGE_MASK)); } EXPORT_SYMBOL(iounmap); + +pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t vma_prot) +{ + if (!pfn_valid(pfn)) { + vma_prot.pgprot |= _PAGE_SO; + return pgprot_noncached(vma_prot); + } else if (file->f_flags & O_SYNC) { + return pgprot_noncached(vma_prot); + } + + return vma_prot; +} +EXPORT_SYMBOL(phys_mem_access_prot); diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig index 6472a0685470..c071da34e081 100644 --- a/arch/h8300/Kconfig +++ b/arch/h8300/Kconfig @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 config H8300 def_bool y + select ARCH_32BIT_OFF_T select GENERIC_ATOMIC64 select HAVE_UID16 select VIRT_TO_BUS diff --git a/arch/h8300/configs/edosk2674_defconfig b/arch/h8300/configs/edosk2674_defconfig index 29fda12d5da9..23791dcf6c25 100644 --- a/arch/h8300/configs/edosk2674_defconfig +++ b/arch/h8300/configs/edosk2674_defconfig @@ -45,5 +45,4 @@ CONFIG_SERIAL_SH_SCI_CONSOLE=y # CONFIG_SYSFS is not set # CONFIG_MISC_FILESYSTEMS is not set CONFIG_DEBUG_INFO=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set diff --git a/arch/h8300/configs/h8300h-sim_defconfig b/arch/h8300/configs/h8300h-sim_defconfig index 80624f46b0ed..7fc9c2f0acc0 100644 --- a/arch/h8300/configs/h8300h-sim_defconfig +++ b/arch/h8300/configs/h8300h-sim_defconfig @@ -45,5 +45,4 @@ CONFIG_SERIAL_SH_SCI_EARLYCON=y # CONFIG_SYSFS is not set # CONFIG_MISC_FILESYSTEMS is not set CONFIG_DEBUG_INFO=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set diff --git a/arch/h8300/configs/h8s-sim_defconfig b/arch/h8300/configs/h8s-sim_defconfig index 29fda12d5da9..23791dcf6c25 100644 --- a/arch/h8300/configs/h8s-sim_defconfig +++ b/arch/h8300/configs/h8s-sim_defconfig @@ -45,5 +45,4 @@ CONFIG_SERIAL_SH_SCI_CONSOLE=y # CONFIG_SYSFS is not set # CONFIG_MISC_FILESYSTEMS is not set CONFIG_DEBUG_INFO=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild index cd400d353d18..961c1dc064e1 100644 --- a/arch/h8300/include/asm/Kbuild +++ b/arch/h8300/include/asm/Kbuild @@ -40,6 +40,7 @@ generic-y += preempt.h generic-y += scatterlist.h generic-y += sections.h generic-y += serial.h +generic-y += shmparam.h generic-y += sizes.h generic-y += spinlock.h generic-y += timex.h diff --git a/arch/h8300/include/asm/segment.h b/arch/h8300/include/asm/segment.h index 9adbf7e1aaa3..a407978f9f9f 100644 --- a/arch/h8300/include/asm/segment.h +++ b/arch/h8300/include/asm/segment.h @@ -33,12 +33,6 @@ static inline mm_segment_t get_fs(void) return USER_DS; } -static inline mm_segment_t get_ds(void) -{ - /* return the supervisor data space code */ - return KERNEL_DS; -} - #define segment_eq(a, b) ((a).seg == (b).seg) #endif /* __ASSEMBLY__ */ diff --git a/arch/h8300/include/uapi/asm/Kbuild b/arch/h8300/include/uapi/asm/Kbuild index 6c6f6301012e..0febf1a07c30 100644 --- a/arch/h8300/include/uapi/asm/Kbuild +++ b/arch/h8300/include/uapi/asm/Kbuild @@ -1,5 +1,4 @@ include include/uapi/asm-generic/Kbuild.asm generic-y += kvm_para.h -generic-y += shmparam.h generic-y += ucontext.h diff --git a/arch/h8300/include/uapi/asm/unistd.h b/arch/h8300/include/uapi/asm/unistd.h index 628195823816..eb7bc0012af5 100644 --- a/arch/h8300/include/uapi/asm/unistd.h +++ b/arch/h8300/include/uapi/asm/unistd.h @@ -2,5 +2,7 @@ #define __ARCH_WANT_RENAMEAT #define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SET_GET_RLIMIT +#define __ARCH_WANT_TIME32_SYSCALLS #include diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c index 6519252ac4db..0f04a5e9aa4f 100644 --- a/arch/h8300/mm/init.c +++ b/arch/h8300/mm/init.c @@ -68,7 +68,9 @@ void __init paging_init(void) * to a couple of allocated pages. */ empty_zero_page = (unsigned long)memblock_alloc(PAGE_SIZE, PAGE_SIZE); - memset((void *)empty_zero_page, 0, PAGE_SIZE); + if (!empty_zero_page) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", + __func__, PAGE_SIZE, PAGE_SIZE); /* * Set up SFC/DFC registers (user data space). diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig index fb2fbfcfc532..ac441680dcc0 100644 --- a/arch/hexagon/Kconfig +++ b/arch/hexagon/Kconfig @@ -4,6 +4,7 @@ comment "Linux Kernel Configuration for Hexagon" config HEXAGON def_bool y + select ARCH_32BIT_OFF_T select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_NO_PREEMPT select HAVE_OPROFILE diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild index 47c4da3d64a4..b25fd42aa0f4 100644 --- a/arch/hexagon/include/asm/Kbuild +++ b/arch/hexagon/include/asm/Kbuild @@ -30,6 +30,7 @@ generic-y += rwsem.h generic-y += sections.h generic-y += segment.h generic-y += serial.h +generic-y += shmparam.h generic-y += sizes.h generic-y += topology.h generic-y += trace_clock.h diff --git a/arch/hexagon/include/uapi/asm/Kbuild b/arch/hexagon/include/uapi/asm/Kbuild index 61d955c1747a..c1b06dcf6cf8 100644 --- a/arch/hexagon/include/uapi/asm/Kbuild +++ b/arch/hexagon/include/uapi/asm/Kbuild @@ -1,4 +1,3 @@ include include/uapi/asm-generic/Kbuild.asm -generic-y += shmparam.h generic-y += ucontext.h diff --git a/arch/hexagon/include/uapi/asm/unistd.h b/arch/hexagon/include/uapi/asm/unistd.h index c91ca7d02461..432c4db1b623 100644 --- a/arch/hexagon/include/uapi/asm/unistd.h +++ b/arch/hexagon/include/uapi/asm/unistd.h @@ -30,9 +30,11 @@ #define sys_mmap2 sys_mmap_pgoff #define __ARCH_WANT_RENAMEAT #define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SET_GET_RLIMIT #define __ARCH_WANT_SYS_EXECVE #define __ARCH_WANT_SYS_CLONE #define __ARCH_WANT_SYS_VFORK #define __ARCH_WANT_SYS_FORK +#define __ARCH_WANT_TIME32_SYSCALLS #include diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h index 306d469e43da..89782ad3fb88 100644 --- a/arch/ia64/include/asm/uaccess.h +++ b/arch/ia64/include/asm/uaccess.h @@ -48,7 +48,6 @@ #define KERNEL_DS ((mm_segment_t) { ~0UL }) /* cf. access_ok() */ #define USER_DS ((mm_segment_t) { TASK_SIZE-1 }) /* cf. access_ok() */ -#define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h index 0b08ebd2dfde..9ba6110b10b9 100644 --- a/arch/ia64/include/asm/unistd.h +++ b/arch/ia64/include/asm/unistd.h @@ -12,20 +12,6 @@ #define NR_syscalls __NR_syscalls /* length of syscall table */ -/* - * The following defines stop scripts/checksyscalls.sh from complaining about - * unimplemented system calls. Glibc provides for each of these by using - * more modern equivalent system calls. - */ -#define __IGNORE_fork /* clone() */ -#define __IGNORE_time /* gettimeofday() */ -#define __IGNORE_alarm /* setitimer(ITIMER_REAL, ... */ -#define __IGNORE_pause /* rt_sigprocmask(), rt_sigsuspend() */ -#define __IGNORE_utime /* utimes() */ -#define __IGNORE_getpgrp /* getpgid() */ -#define __IGNORE_vfork /* clone() */ -#define __IGNORE_umount2 /* umount() */ - #define __ARCH_WANT_NEW_STAT #define __ARCH_WANT_SYS_UTIME diff --git a/arch/ia64/include/uapi/asm/Kbuild b/arch/ia64/include/uapi/asm/Kbuild index 5b819e53c397..b71c5f787783 100644 --- a/arch/ia64/include/uapi/asm/Kbuild +++ b/arch/ia64/include/uapi/asm/Kbuild @@ -2,3 +2,4 @@ include include/uapi/asm-generic/Kbuild.asm generated-y += unistd_64.h generic-y += kvm_para.h +generic-y += socket.h diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h deleted file mode 100644 index c872c4e6bafb..000000000000 --- a/arch/ia64/include/uapi/asm/socket.h +++ /dev/null @@ -1,120 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#ifndef _ASM_IA64_SOCKET_H -#define _ASM_IA64_SOCKET_H - -/* - * Socket related defines. - * - * Based on . - * - * Modified 1998-2000 - * David Mosberger-Tang , Hewlett-Packard Co - */ - -#include - -/* For setsockopt(2) */ -#define SOL_SOCKET 1 - -#define SO_DEBUG 1 -#define SO_REUSEADDR 2 -#define SO_TYPE 3 -#define SO_ERROR 4 -#define SO_DONTROUTE 5 -#define SO_BROADCAST 6 -#define SO_SNDBUF 7 -#define SO_RCVBUF 8 -#define SO_SNDBUFFORCE 32 -#define SO_RCVBUFFORCE 33 -#define SO_KEEPALIVE 9 -#define SO_OOBINLINE 10 -#define SO_NO_CHECK 11 -#define SO_PRIORITY 12 -#define SO_LINGER 13 -#define SO_BSDCOMPAT 14 -#define SO_REUSEPORT 15 -#define SO_PASSCRED 16 -#define SO_PEERCRED 17 -#define SO_RCVLOWAT 18 -#define SO_SNDLOWAT 19 -#define SO_RCVTIMEO 20 -#define SO_SNDTIMEO 21 - -/* Security levels - as per NRL IPv6 - don't actually do anything */ -#define SO_SECURITY_AUTHENTICATION 22 -#define SO_SECURITY_ENCRYPTION_TRANSPORT 23 -#define SO_SECURITY_ENCRYPTION_NETWORK 24 - -#define SO_BINDTODEVICE 25 - -/* Socket filtering */ -#define SO_ATTACH_FILTER 26 -#define SO_DETACH_FILTER 27 -#define SO_GET_FILTER SO_ATTACH_FILTER - -#define SO_PEERNAME 28 -#define SO_TIMESTAMP 29 -#define SCM_TIMESTAMP SO_TIMESTAMP - -#define SO_ACCEPTCONN 30 - -#define SO_PEERSEC 31 -#define SO_PASSSEC 34 -#define SO_TIMESTAMPNS 35 -#define SCM_TIMESTAMPNS SO_TIMESTAMPNS - -#define SO_MARK 36 - -#define SO_TIMESTAMPING 37 -#define SCM_TIMESTAMPING SO_TIMESTAMPING - -#define SO_PROTOCOL 38 -#define SO_DOMAIN 39 - -#define SO_RXQ_OVFL 40 - -#define SO_WIFI_STATUS 41 -#define SCM_WIFI_STATUS SO_WIFI_STATUS -#define SO_PEEK_OFF 42 - -/* Instruct lower device to use last 4-bytes of skb data as FCS */ -#define SO_NOFCS 43 - -#define SO_LOCK_FILTER 44 - -#define SO_SELECT_ERR_QUEUE 45 - -#define SO_BUSY_POLL 46 - -#define SO_MAX_PACING_RATE 47 - -#define SO_BPF_EXTENSIONS 48 - -#define SO_INCOMING_CPU 49 - -#define SO_ATTACH_BPF 50 -#define SO_DETACH_BPF SO_DETACH_FILTER - -#define SO_ATTACH_REUSEPORT_CBPF 51 -#define SO_ATTACH_REUSEPORT_EBPF 52 - -#define SO_CNX_ADVICE 53 - -#define SCM_TIMESTAMPING_OPT_STATS 54 - -#define SO_MEMINFO 55 - -#define SO_INCOMING_NAPI_ID 56 - -#define SO_COOKIE 57 - -#define SCM_TIMESTAMPING_PKTINFO 58 - -#define SO_PEERGROUPS 59 - -#define SO_ZEROCOPY 60 - -#define SO_TXTIME 61 -#define SCM_TXTIME SO_TXTIME - -#endif /* _ASM_IA64_SOCKET_H */ diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h index b2513922dcb5..013e0bcacc39 100644 --- a/arch/ia64/include/uapi/asm/unistd.h +++ b/arch/ia64/include/uapi/asm/unistd.h @@ -15,6 +15,8 @@ #define __NR_Linux 1024 +#define __NR_umount __NR_umount2 + #include #endif /* _UAPI_ASM_IA64_UNISTD_H */ diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 91bd1e129379..5cabb3fd159f 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -359,11 +359,6 @@ typedef struct ia64_state_log_s static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES]; -#define IA64_LOG_ALLOCATE(it, size) \ - {ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \ - (ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES); \ - ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \ - (ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES);} #define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock) #define IA64_LOG_LOCK(it) spin_lock_irqsave(&ia64_state_log[it].isl_lock, s) #define IA64_LOG_UNLOCK(it) spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s) @@ -378,6 +373,19 @@ static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES]; #define IA64_LOG_CURR_BUFFER(it) (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)])) #define IA64_LOG_COUNT(it) ia64_state_log[it].isl_count +static inline void ia64_log_allocate(int it, u64 size) +{ + ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = + (ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES); + if (!ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)]) + panic("%s: Failed to allocate %llu bytes\n", __func__, size); + + ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = + (ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES); + if (!ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)]) + panic("%s: Failed to allocate %llu bytes\n", __func__, size); +} + /* * ia64_log_init * Reset the OS ia64 log buffer @@ -399,9 +407,7 @@ ia64_log_init(int sal_info_type) return; // set up OS data structures to hold error info - IA64_LOG_ALLOCATE(sal_info_type, max_size); - memset(IA64_LOG_CURR_BUFFER(sal_info_type), 0, max_size); - memset(IA64_LOG_NEXT_BUFFER(sal_info_type), 0, max_size); + ia64_log_allocate(sal_info_type, max_size); } /* @@ -1835,8 +1841,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset, /* Caller prevents this from being called after init */ static void * __ref mca_bootmem(void) { - return memblock_alloc_from(sizeof(struct ia64_mca_cpu), - KERNEL_STACK_SIZE, 0); + return memblock_alloc(sizeof(struct ia64_mca_cpu), KERNEL_STACK_SIZE); } /* Do per-CPU MCA-related initialization. */ diff --git a/arch/ia64/kernel/numa.c b/arch/ia64/kernel/numa.c index 92c376279c6d..1315da6c7aeb 100644 --- a/arch/ia64/kernel/numa.c +++ b/arch/ia64/kernel/numa.c @@ -74,7 +74,7 @@ void __init build_cpu_to_node_map(void) cpumask_clear(&node_to_cpu_mask[node]); for_each_possible_early_cpu(cpu) { - node = -1; + node = NUMA_NO_NODE; for (i = 0; i < NR_CPUS; ++i) if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) { node = node_cpuid[i].nid; diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 46bff1661836..7a969f4c3534 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -583,17 +583,6 @@ pfm_put_task(struct task_struct *task) if (task != current) put_task_struct(task); } -static inline void -pfm_reserve_page(unsigned long a) -{ - SetPageReserved(vmalloc_to_page((void *)a)); -} -static inline void -pfm_unreserve_page(unsigned long a) -{ - ClearPageReserved(vmalloc_to_page((void*)a)); -} - static inline unsigned long pfm_protect_ctx_ctxsw(pfm_context_t *x) { @@ -816,44 +805,6 @@ pfm_reset_msgq(pfm_context_t *ctx) DPRINT(("ctx=%p msgq reset\n", ctx)); } -static void * -pfm_rvmalloc(unsigned long size) -{ - void *mem; - unsigned long addr; - - size = PAGE_ALIGN(size); - mem = vzalloc(size); - if (mem) { - //printk("perfmon: CPU%d pfm_rvmalloc(%ld)=%p\n", smp_processor_id(), size, mem); - addr = (unsigned long)mem; - while (size > 0) { - pfm_reserve_page(addr); - addr+=PAGE_SIZE; - size-=PAGE_SIZE; - } - } - return mem; -} - -static void -pfm_rvfree(void *mem, unsigned long size) -{ - unsigned long addr; - - if (mem) { - DPRINT(("freeing physical buffer @%p size=%lu\n", mem, size)); - addr = (unsigned long) mem; - while ((long) size > 0) { - pfm_unreserve_page(addr); - addr+=PAGE_SIZE; - size-=PAGE_SIZE; - } - vfree(mem); - } - return; -} - static pfm_context_t * pfm_context_alloc(int ctx_flags) { @@ -1498,7 +1449,7 @@ pfm_free_smpl_buffer(pfm_context_t *ctx) /* * free the buffer */ - pfm_rvfree(ctx->ctx_smpl_hdr, ctx->ctx_smpl_size); + vfree(ctx->ctx_smpl_hdr); ctx->ctx_smpl_hdr = NULL; ctx->ctx_smpl_size = 0UL; @@ -2137,7 +2088,7 @@ doit: * All memory free operations (especially for vmalloc'ed memory) * MUST be done with interrupts ENABLED. */ - if (smpl_buf_addr) pfm_rvfree(smpl_buf_addr, smpl_buf_size); + vfree(smpl_buf_addr); /* * return the memory used by the context @@ -2266,10 +2217,8 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t /* * We do the easy to undo allocations first. - * - * pfm_rvmalloc(), clears the buffer, so there is no leak */ - smpl_buf = pfm_rvmalloc(size); + smpl_buf = vzalloc(size); if (smpl_buf == NULL) { DPRINT(("Can't allocate sampling buffer\n")); return -ENOMEM; @@ -2346,7 +2295,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t error: vm_area_free(vma); error_kmem: - pfm_rvfree(smpl_buf, size); + vfree(smpl_buf); return -ENOMEM; } diff --git a/arch/ia64/kernel/syscalls/syscall.tbl b/arch/ia64/kernel/syscalls/syscall.tbl index b22203b40bfe..ab9cda5f6136 100644 --- a/arch/ia64/kernel/syscalls/syscall.tbl +++ b/arch/ia64/kernel/syscalls/syscall.tbl @@ -29,7 +29,7 @@ 17 common getpid sys_getpid 18 common getppid sys_getppid 19 common mount sys_mount -20 common umount sys_umount +20 common umount2 sys_umount 21 common setuid sys_setuid 22 common getuid sys_getuid 23 common geteuid sys_geteuid @@ -335,3 +335,12 @@ 323 common copy_file_range sys_copy_file_range 324 common preadv2 sys_preadv2 325 common pwritev2 sys_pwritev2 +326 common statx sys_statx +327 common io_pgetevents sys_io_pgetevents +328 common perf_event_open sys_perf_event_open +329 common seccomp sys_seccomp +330 common pkey_mprotect sys_pkey_mprotect +331 common pkey_alloc sys_pkey_alloc +332 common pkey_free sys_pkey_free +333 common rseq sys_rseq +# 334 through 423 are reserved to sync up with other architectures diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index 6e447234205c..d29fb6b9fa33 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c @@ -84,9 +84,13 @@ skip: static inline void alloc_per_cpu_data(void) { - cpu_data = memblock_alloc_from(PERCPU_PAGE_SIZE * num_possible_cpus(), - PERCPU_PAGE_SIZE, + size_t size = PERCPU_PAGE_SIZE * num_possible_cpus(); + + cpu_data = memblock_alloc_from(size, PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); + if (!cpu_data) + panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n", + __func__, size, PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); } /** diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index 8a965784340c..05490dd073e6 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -227,7 +227,7 @@ void __init setup_per_cpu_areas(void) * CPUs are put into groups according to node. Walk cpu_map * and create new groups at node boundaries. */ - prev_node = -1; + prev_node = NUMA_NO_NODE; ai->nr_groups = 0; for (unit = 0; unit < nr_units; unit++) { cpu = cpu_map[unit]; @@ -435,7 +435,7 @@ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize) { void *ptr = NULL; u8 best = 0xff; - int bestnode = -1, node, anynode = 0; + int bestnode = NUMA_NO_NODE, node, anynode = 0; for_each_online_node(node) { if (node_isset(node, memory_less_mask)) @@ -447,13 +447,17 @@ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize) anynode = node; } - if (bestnode == -1) + if (bestnode == NUMA_NO_NODE) bestnode = anynode; ptr = memblock_alloc_try_nid(pernodesize, PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, bestnode); + if (!ptr) + panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%lx\n", + __func__, pernodesize, PERCPU_PAGE_SIZE, bestnode, + __pa(MAX_DMA_ADDRESS)); return ptr; } diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 29d841525ca1..e49200e31750 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -444,23 +444,45 @@ int __init create_mem_map_page_table(u64 start, u64 end, void *arg) for (address = start_page; address < end_page; address += PAGE_SIZE) { pgd = pgd_offset_k(address); - if (pgd_none(*pgd)) - pgd_populate(&init_mm, pgd, memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node)); + if (pgd_none(*pgd)) { + pud = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node); + if (!pud) + goto err_alloc; + pgd_populate(&init_mm, pgd, pud); + } pud = pud_offset(pgd, address); - if (pud_none(*pud)) - pud_populate(&init_mm, pud, memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node)); + if (pud_none(*pud)) { + pmd = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node); + if (!pmd) + goto err_alloc; + pud_populate(&init_mm, pud, pmd); + } pmd = pmd_offset(pud, address); - if (pmd_none(*pmd)) - pmd_populate_kernel(&init_mm, pmd, memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node)); + if (pmd_none(*pmd)) { + pte = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node); + if (!pte) + goto err_alloc; + pmd_populate_kernel(&init_mm, pmd, pte); + } pte = pte_offset_kernel(pmd, address); - if (pte_none(*pte)) - set_pte(pte, pfn_pte(__pa(memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node)) >> PAGE_SHIFT, + if (pte_none(*pte)) { + void *page = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, + node); + if (!page) + goto err_alloc; + set_pte(pte, pfn_pte(__pa(page) >> PAGE_SHIFT, PAGE_KERNEL)); + } } return 0; + +err_alloc: + panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d\n", + __func__, PAGE_SIZE, PAGE_SIZE, node); + return -ENOMEM; } struct memmap_init_callback_data { diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c index 9340bcb4f29c..5fc89aabdce1 100644 --- a/arch/ia64/mm/tlb.c +++ b/arch/ia64/mm/tlb.c @@ -61,8 +61,14 @@ mmu_context_init (void) { ia64_ctx.bitmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3, SMP_CACHE_BYTES); + if (!ia64_ctx.bitmap) + panic("%s: Failed to allocate %u bytes\n", __func__, + (ia64_ctx.max_ctx + 1) >> 3); ia64_ctx.flushmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3, SMP_CACHE_BYTES); + if (!ia64_ctx.flushmap) + panic("%s: Failed to allocate %u bytes\n", __func__, + (ia64_ctx.max_ctx + 1) >> 3); } /* diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c index 8df13d0d96fa..d46847323ef6 100644 --- a/arch/ia64/sn/kernel/io_common.c +++ b/arch/ia64/sn/kernel/io_common.c @@ -394,6 +394,9 @@ void __init hubdev_init_node(nodepda_t * npda, cnodeid_t node) hubdev_info = (struct hubdev_info *)memblock_alloc_node(size, SMP_CACHE_BYTES, node); + if (!hubdev_info) + panic("%s: Failed to allocate %d bytes align=0x%x nid=%d\n", + __func__, size, SMP_CACHE_BYTES, node); npda->pdinfo = (void *)hubdev_info; } diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index a6d40a2c5bff..e6a5049ef503 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c @@ -513,6 +513,10 @@ static void __init sn_init_pdas(char **cmdline_p) nodepdaindr[cnode] = memblock_alloc_node(sizeof(nodepda_t), SMP_CACHE_BYTES, cnode); + if (!nodepdaindr[cnode]) + panic("%s: Failed to allocate %lu bytes align=0x%x nid=%d\n", + __func__, sizeof(nodepda_t), SMP_CACHE_BYTES, + cnode); memset(nodepdaindr[cnode]->phys_cpuid, -1, sizeof(nodepdaindr[cnode]->phys_cpuid)); spin_lock_init(&nodepdaindr[cnode]->ptc_lock); @@ -521,9 +525,15 @@ static void __init sn_init_pdas(char **cmdline_p) /* * Allocate & initialize nodepda for TIOs. For now, put them on node 0. */ - for (cnode = num_online_nodes(); cnode < num_cnodes; cnode++) + for (cnode = num_online_nodes(); cnode < num_cnodes; cnode++) { nodepdaindr[cnode] = memblock_alloc_node(sizeof(nodepda_t), SMP_CACHE_BYTES, 0); + if (!nodepdaindr[cnode]) + panic("%s: Failed to allocate %lu bytes align=0x%x nid=%d\n", + __func__, sizeof(nodepda_t), SMP_CACHE_BYTES, + cnode); + } + /* * Now copy the array of nodepda pointers to each nodepda. diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index e173ea2ff395..b54206408f91 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -2,6 +2,7 @@ config M68K bool default y + select ARCH_32BIT_OFF_T select ARCH_HAS_SYNC_DMA_FOR_DEVICE if HAS_DMA select ARCH_MIGHT_HAVE_PC_PARPORT if ISA select ARCH_NO_COHERENT_DMA_MMAP if !MMU diff --git a/arch/m68k/Kconfig.machine b/arch/m68k/Kconfig.machine index 328ba83d735b..c01e103492fd 100644 --- a/arch/m68k/Kconfig.machine +++ b/arch/m68k/Kconfig.machine @@ -16,6 +16,7 @@ config ATARI bool "Atari support" depends on MMU select MMU_MOTOROLA if MMU + select HAVE_ARCH_NVRAM_OPS help This option enables support for the 68000-based Atari series of computers (including the TT, Falcon and Medusa). If you plan to use @@ -26,6 +27,7 @@ config MAC bool "Macintosh support" depends on MMU select MMU_MOTOROLA if MMU + select HAVE_ARCH_NVRAM_OPS help This option enables support for the Apple Macintosh series of computers (yes, there is experimental support now, at least for part diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile index f00ca53f8c14..482513b9af2c 100644 --- a/arch/m68k/Makefile +++ b/arch/m68k/Makefile @@ -58,7 +58,10 @@ cpuflags-$(CONFIG_M5206e) := $(call cc-option,-mcpu=5206e,-m5200) cpuflags-$(CONFIG_M5206) := $(call cc-option,-mcpu=5206,-m5200) KBUILD_AFLAGS += $(cpuflags-y) -KBUILD_CFLAGS += $(cpuflags-y) -pipe +KBUILD_CFLAGS += $(cpuflags-y) + +KBUILD_CFLAGS += -pipe -ffreestanding + ifdef CONFIG_MMU # without -fno-strength-reduce the 53c7xx.c driver fails ;-( KBUILD_CFLAGS += -fno-strength-reduce -ffixed-a2 diff --git a/arch/m68k/apollo/Makefile b/arch/m68k/apollo/Makefile index 76a057962c38..01856a858fda 100644 --- a/arch/m68k/apollo/Makefile +++ b/arch/m68k/apollo/Makefile @@ -1,5 +1,5 @@ # -# Makefile for Linux arch/m68k/amiga source directory +# Makefile for Linux arch/m68k/apollo source directory # obj-y := config.o dn_ints.o diff --git a/arch/m68k/atari/Makefile b/arch/m68k/atari/Makefile index 0cac723306f9..0b86bb6cfa87 100644 --- a/arch/m68k/atari/Makefile +++ b/arch/m68k/atari/Makefile @@ -6,3 +6,5 @@ obj-y := config.o time.o debug.o ataints.o stdma.o \ atasound.o stram.o obj-$(CONFIG_ATARI_KBD_CORE) += atakeyb.o + +obj-$(CONFIG_NVRAM:m=y) += nvram.o diff --git a/arch/m68k/atari/config.c b/arch/m68k/atari/config.c index bd96702a1ad0..4fcc4b1df1c0 100644 --- a/arch/m68k/atari/config.c +++ b/arch/m68k/atari/config.c @@ -148,7 +148,7 @@ int __init atari_parse_bootinfo(const struct bi_record *record) /* Parse the Atari-specific switches= option. */ static int __init atari_switches_setup(char *str) { - char switches[strlen(str) + 1]; + char switches[COMMAND_LINE_SIZE]; char *p; int ovsc_shift; char *args = switches; diff --git a/arch/m68k/atari/nvram.c b/arch/m68k/atari/nvram.c new file mode 100644 index 000000000000..7000d2443aa3 --- /dev/null +++ b/arch/m68k/atari/nvram.c @@ -0,0 +1,272 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * CMOS/NV-RAM driver for Atari. Adapted from drivers/char/nvram.c. + * Copyright (C) 1997 Roman Hodek + * idea by and with help from Richard Jelinek + * Portions copyright (c) 2001,2002 Sun Microsystems (thockin@sun.com) + * Further contributions from Cesar Barros, Erik Gilling, Tim Hockin and + * Wim Van Sebroeck. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define NVRAM_BYTES 50 + +/* It is worth noting that these functions all access bytes of general + * purpose memory in the NVRAM - that is to say, they all add the + * NVRAM_FIRST_BYTE offset. Pass them offsets into NVRAM as if you did not + * know about the RTC cruft. + */ + +/* Note that *all* calls to CMOS_READ and CMOS_WRITE must be done with + * rtc_lock held. Due to the index-port/data-port design of the RTC, we + * don't want two different things trying to get to it at once. (e.g. the + * periodic 11 min sync from kernel/time/ntp.c vs. this driver.) + */ + +static unsigned char __nvram_read_byte(int i) +{ + return CMOS_READ(NVRAM_FIRST_BYTE + i); +} + +/* This races nicely with trying to read with checksum checking */ +static void __nvram_write_byte(unsigned char c, int i) +{ + CMOS_WRITE(c, NVRAM_FIRST_BYTE + i); +} + +/* On Ataris, the checksum is over all bytes except the checksum bytes + * themselves; these are at the very end. + */ +#define ATARI_CKS_RANGE_START 0 +#define ATARI_CKS_RANGE_END 47 +#define ATARI_CKS_LOC 48 + +static int __nvram_check_checksum(void) +{ + int i; + unsigned char sum = 0; + + for (i = ATARI_CKS_RANGE_START; i <= ATARI_CKS_RANGE_END; ++i) + sum += __nvram_read_byte(i); + return (__nvram_read_byte(ATARI_CKS_LOC) == (~sum & 0xff)) && + (__nvram_read_byte(ATARI_CKS_LOC + 1) == (sum & 0xff)); +} + +static void __nvram_set_checksum(void) +{ + int i; + unsigned char sum = 0; + + for (i = ATARI_CKS_RANGE_START; i <= ATARI_CKS_RANGE_END; ++i) + sum += __nvram_read_byte(i); + __nvram_write_byte(~sum, ATARI_CKS_LOC); + __nvram_write_byte(sum, ATARI_CKS_LOC + 1); +} + +long atari_nvram_set_checksum(void) +{ + spin_lock_irq(&rtc_lock); + __nvram_set_checksum(); + spin_unlock_irq(&rtc_lock); + return 0; +} + +long atari_nvram_initialize(void) +{ + loff_t i; + + spin_lock_irq(&rtc_lock); + for (i = 0; i < NVRAM_BYTES; ++i) + __nvram_write_byte(0, i); + __nvram_set_checksum(); + spin_unlock_irq(&rtc_lock); + return 0; +} + +ssize_t atari_nvram_read(char *buf, size_t count, loff_t *ppos) +{ + char *p = buf; + loff_t i; + + spin_lock_irq(&rtc_lock); + if (!__nvram_check_checksum()) { + spin_unlock_irq(&rtc_lock); + return -EIO; + } + for (i = *ppos; count > 0 && i < NVRAM_BYTES; --count, ++i, ++p) + *p = __nvram_read_byte(i); + spin_unlock_irq(&rtc_lock); + + *ppos = i; + return p - buf; +} + +ssize_t atari_nvram_write(char *buf, size_t count, loff_t *ppos) +{ + char *p = buf; + loff_t i; + + spin_lock_irq(&rtc_lock); + if (!__nvram_check_checksum()) { + spin_unlock_irq(&rtc_lock); + return -EIO; + } + for (i = *ppos; count > 0 && i < NVRAM_BYTES; --count, ++i, ++p) + __nvram_write_byte(*p, i); + __nvram_set_checksum(); + spin_unlock_irq(&rtc_lock); + + *ppos = i; + return p - buf; +} + +ssize_t atari_nvram_get_size(void) +{ + return NVRAM_BYTES; +} + +#ifdef CONFIG_PROC_FS +static struct { + unsigned char val; + const char *name; +} boot_prefs[] = { + { 0x80, "TOS" }, + { 0x40, "ASV" }, + { 0x20, "NetBSD (?)" }, + { 0x10, "Linux" }, + { 0x00, "unspecified" }, +}; + +static const char * const languages[] = { + "English (US)", + "German", + "French", + "English (UK)", + "Spanish", + "Italian", + "6 (undefined)", + "Swiss (French)", + "Swiss (German)", +}; + +static const char * const dateformat[] = { + "MM%cDD%cYY", + "DD%cMM%cYY", + "YY%cMM%cDD", + "YY%cDD%cMM", + "4 (undefined)", + "5 (undefined)", + "6 (undefined)", + "7 (undefined)", +}; + +static const char * const colors[] = { + "2", "4", "16", "256", "65536", "??", "??", "??" +}; + +static void atari_nvram_proc_read(unsigned char *nvram, struct seq_file *seq, + void *offset) +{ + int checksum; + int i; + unsigned int vmode; + + spin_lock_irq(&rtc_lock); + checksum = __nvram_check_checksum(); + spin_unlock_irq(&rtc_lock); + + seq_printf(seq, "Checksum status : %svalid\n", checksum ? "" : "not "); + + seq_puts(seq, "Boot preference : "); + for (i = ARRAY_SIZE(boot_prefs) - 1; i >= 0; --i) + if (nvram[1] == boot_prefs[i].val) { + seq_printf(seq, "%s\n", boot_prefs[i].name); + break; + } + if (i < 0) + seq_printf(seq, "0x%02x (undefined)\n", nvram[1]); + + seq_printf(seq, "SCSI arbitration : %s\n", + (nvram[16] & 0x80) ? "on" : "off"); + seq_puts(seq, "SCSI host ID : "); + if (nvram[16] & 0x80) + seq_printf(seq, "%d\n", nvram[16] & 7); + else + seq_puts(seq, "n/a\n"); + + if (!MACH_IS_FALCON) + return; + + seq_puts(seq, "OS language : "); + if (nvram[6] < ARRAY_SIZE(languages)) + seq_printf(seq, "%s\n", languages[nvram[6]]); + else + seq_printf(seq, "%u (undefined)\n", nvram[6]); + seq_puts(seq, "Keyboard language: "); + if (nvram[7] < ARRAY_SIZE(languages)) + seq_printf(seq, "%s\n", languages[nvram[7]]); + else + seq_printf(seq, "%u (undefined)\n", nvram[7]); + seq_puts(seq, "Date format : "); + seq_printf(seq, dateformat[nvram[8] & 7], + nvram[9] ? nvram[9] : '/', nvram[9] ? nvram[9] : '/'); + seq_printf(seq, ", %dh clock\n", nvram[8] & 16 ? 24 : 12); + seq_puts(seq, "Boot delay : "); + if (nvram[10] == 0) + seq_puts(seq, "default\n"); + else + seq_printf(seq, "%ds%s\n", nvram[10], + nvram[10] < 8 ? ", no memory test" : ""); + + vmode = (nvram[14] << 8) | nvram[15]; + seq_printf(seq, + "Video mode : %s colors, %d columns, %s %s monitor\n", + colors[vmode & 7], vmode & 8 ? 80 : 40, + vmode & 16 ? "VGA" : "TV", vmode & 32 ? "PAL" : "NTSC"); + seq_printf(seq, + " %soverscan, compat. mode %s%s\n", + vmode & 64 ? "" : "no ", vmode & 128 ? "on" : "off", + vmode & 256 ? + (vmode & 16 ? ", line doubling" : ", half screen") : ""); +} + +static int nvram_proc_read(struct seq_file *seq, void *offset) +{ + unsigned char contents[NVRAM_BYTES]; + int i; + + spin_lock_irq(&rtc_lock); + for (i = 0; i < NVRAM_BYTES; ++i) + contents[i] = __nvram_read_byte(i); + spin_unlock_irq(&rtc_lock); + + atari_nvram_proc_read(contents, seq, offset); + + return 0; +} + +static int __init atari_nvram_init(void) +{ + if (!(MACH_IS_ATARI && ATARIHW_PRESENT(TT_CLK))) + return -ENODEV; + + if (!proc_create_single("driver/nvram", 0, NULL, nvram_proc_read)) { + pr_err("nvram: can't create /proc/driver/nvram\n"); + return -ENOMEM; + } + + return 0; +} +device_initcall(atari_nvram_init); +#endif /* CONFIG_PROC_FS */ diff --git a/arch/m68k/atari/stram.c b/arch/m68k/atari/stram.c index 6ffc204eb07d..6152f9f631d2 100644 --- a/arch/m68k/atari/stram.c +++ b/arch/m68k/atari/stram.c @@ -97,6 +97,10 @@ void __init atari_stram_reserve_pages(void *start_mem) pr_debug("atari_stram pool: kernel in ST-RAM, using alloc_bootmem!\n"); stram_pool.start = (resource_size_t)memblock_alloc_low(pool_size, PAGE_SIZE); + if (!stram_pool.start) + panic("%s: Failed to allocate %lu bytes align=%lx\n", + __func__, pool_size, PAGE_SIZE); + stram_pool.end = stram_pool.start + pool_size - 1; request_resource(&iomem_resource, &stram_pool); stram_virt_offset = 0; diff --git a/arch/m68k/coldfire/device.c b/arch/m68k/coldfire/device.c index 908d58347790..b4103b6bfdeb 100644 --- a/arch/m68k/coldfire/device.c +++ b/arch/m68k/coldfire/device.c @@ -14,11 +14,14 @@ #include #include #include +#include #include #include #include #include #include +#include +#include /* * All current ColdFire parts contain from 2, 3, 4 or 10 UARTS. @@ -476,6 +479,81 @@ static struct platform_device mcf_i2c5 = { #endif /* MCFI2C_BASE5 */ #endif /* IS_ENABLED(CONFIG_I2C_IMX) */ +#if IS_ENABLED(CONFIG_MCF_EDMA) + +static const struct dma_slave_map mcf_edma_map[] = { + { "dreq0", "rx-tx", MCF_EDMA_FILTER_PARAM(0) }, + { "dreq1", "rx-tx", MCF_EDMA_FILTER_PARAM(1) }, + { "uart.0", "rx", MCF_EDMA_FILTER_PARAM(2) }, + { "uart.0", "tx", MCF_EDMA_FILTER_PARAM(3) }, + { "uart.1", "rx", MCF_EDMA_FILTER_PARAM(4) }, + { "uart.1", "tx", MCF_EDMA_FILTER_PARAM(5) }, + { "uart.2", "rx", MCF_EDMA_FILTER_PARAM(6) }, + { "uart.2", "tx", MCF_EDMA_FILTER_PARAM(7) }, + { "timer0", "rx-tx", MCF_EDMA_FILTER_PARAM(8) }, + { "timer1", "rx-tx", MCF_EDMA_FILTER_PARAM(9) }, + { "timer2", "rx-tx", MCF_EDMA_FILTER_PARAM(10) }, + { "timer3", "rx-tx", MCF_EDMA_FILTER_PARAM(11) }, + { "fsl-dspi.0", "rx", MCF_EDMA_FILTER_PARAM(12) }, + { "fsl-dspi.0", "tx", MCF_EDMA_FILTER_PARAM(13) }, + { "fsl-dspi.1", "rx", MCF_EDMA_FILTER_PARAM(14) }, + { "fsl-dspi.1", "tx", MCF_EDMA_FILTER_PARAM(15) }, +}; + +static struct mcf_edma_platform_data mcf_edma_data = { + .dma_channels = 64, + .slave_map = mcf_edma_map, + .slavecnt = ARRAY_SIZE(mcf_edma_map), +}; + +static struct resource mcf_edma_resources[] = { + { + .start = MCFEDMA_BASE, + .end = MCFEDMA_BASE + MCFEDMA_SIZE - 1, + .flags = IORESOURCE_MEM, + }, + { + .start = MCFEDMA_IRQ_INTR0, + .end = MCFEDMA_IRQ_INTR0 + 15, + .flags = IORESOURCE_IRQ, + .name = "edma-tx-00-15", + }, + { + .start = MCFEDMA_IRQ_INTR16, + .end = MCFEDMA_IRQ_INTR16 + 39, + .flags = IORESOURCE_IRQ, + .name = "edma-tx-16-55", + }, + { + .start = MCFEDMA_IRQ_INTR56, + .end = MCFEDMA_IRQ_INTR56, + .flags = IORESOURCE_IRQ, + .name = "edma-tx-56-63", + }, + { + .start = MCFEDMA_IRQ_ERR, + .end = MCFEDMA_IRQ_ERR, + .flags = IORESOURCE_IRQ, + .name = "edma-err", + }, +}; + +static u64 mcf_edma_dmamask = DMA_BIT_MASK(32); + +static struct platform_device mcf_edma = { + .name = "mcf-edma", + .id = 0, + .num_resources = ARRAY_SIZE(mcf_edma_resources), + .resource = mcf_edma_resources, + .dev = { + .dma_mask = &mcf_edma_dmamask, + .coherent_dma_mask = DMA_BIT_MASK(32), + .platform_data = &mcf_edma_data, + } +}; + +#endif /* IS_ENABLED(CONFIG_MCF_EDMA) */ + static struct platform_device *mcf_devices[] __initdata = { &mcf_uart, #if IS_ENABLED(CONFIG_FEC) @@ -505,6 +583,9 @@ static struct platform_device *mcf_devices[] __initdata = { &mcf_i2c5, #endif #endif +#if IS_ENABLED(CONFIG_MCF_EDMA) + &mcf_edma, +#endif }; /* diff --git a/arch/m68k/coldfire/m5272.c b/arch/m68k/coldfire/m5272.c index ad1185c68df7..6b3ab583c698 100644 --- a/arch/m68k/coldfire/m5272.c +++ b/arch/m68k/coldfire/m5272.c @@ -127,7 +127,7 @@ static struct fixed_phy_status nettel_fixed_phy_status __initdata = { static int __init init_BSP(void) { m5272_uarts_init(); - fixed_phy_add(PHY_POLL, 0, &nettel_fixed_phy_status, -1); + fixed_phy_add(PHY_POLL, 0, &nettel_fixed_phy_status); return 0; } diff --git a/arch/m68k/coldfire/m5441x.c b/arch/m68k/coldfire/m5441x.c index 55392af845fb..5bd24c9b865d 100644 --- a/arch/m68k/coldfire/m5441x.c +++ b/arch/m68k/coldfire/m5441x.c @@ -137,6 +137,8 @@ struct clk *mcf_clks[] = { static struct clk * const enable_clks[] __initconst = { /* make sure these clocks are enabled */ + &__clk_0_15, /* dspi.1 */ + &__clk_0_17, /* eDMA */ &__clk_0_18, /* intc0 */ &__clk_0_19, /* intc0 */ &__clk_0_20, /* intc0 */ @@ -157,8 +159,6 @@ static struct clk * const disable_clks[] __initconst = { &__clk_0_8, /* can.0 */ &__clk_0_9, /* can.1 */ &__clk_0_14, /* i2c.1 */ - &__clk_0_15, /* dspi.1 */ - &__clk_0_17, /* eDMA */ &__clk_0_22, /* i2c.0 */ &__clk_0_23, /* dspi.0 */ &__clk_0_28, /* tmr.1 */ diff --git a/arch/m68k/configs/amcore_defconfig b/arch/m68k/configs/amcore_defconfig index 131b4101ae5d..0857cdbfde0c 100644 --- a/arch/m68k/configs/amcore_defconfig +++ b/arch/m68k/configs/amcore_defconfig @@ -55,27 +55,7 @@ CONFIG_MTD_UCLINUX=y CONFIG_MTD_PLATRAM=y CONFIG_BLK_DEV_RAM=y CONFIG_NETDEVICES=y -# CONFIG_NET_VENDOR_AMAZON is not set -# CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_CADENCE is not set -# CONFIG_NET_VENDOR_BROADCOM is not set CONFIG_DM9000=y -# CONFIG_NET_VENDOR_EZCHIP is not set -# CONFIG_NET_VENDOR_INTEL is not set -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -# CONFIG_NET_VENDOR_NETRONOME is not set -# CONFIG_NET_VENDOR_QUALCOMM is not set -# CONFIG_NET_VENDOR_RENESAS is not set -# CONFIG_NET_VENDOR_ROCKER is not set -# CONFIG_NET_VENDOR_SAMSUNG is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SMSC is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set -# CONFIG_NET_VENDOR_VIA is not set -# CONFIG_NET_VENDOR_WIZNET is not set # CONFIG_WLAN is not set # CONFIG_INPUT is not set # CONFIG_SERIO is not set @@ -108,7 +88,6 @@ CONFIG_ROMFS_FS=y CONFIG_ROMFS_BACKED_BY_BOTH=y # CONFIG_NETWORK_FILESYSTEMS is not set CONFIG_PRINTK_TIME=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set CONFIG_PANIC_ON_OOPS=y diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig index bfd4648e76e3..525421ae277d 100644 --- a/arch/m68k/configs/amiga_defconfig +++ b/arch/m68k/configs/amiga_defconfig @@ -38,7 +38,6 @@ CONFIG_UNIXWARE_DISKLABEL=y CONFIG_SUN_PARTITION=y # CONFIG_EFI_PARTITION is not set CONFIG_SYSV68_PARTITION=y -CONFIG_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m @@ -304,7 +303,6 @@ CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m # CONFIG_BATMAN_ADV_BATMAN_V is not set CONFIG_BATMAN_ADV_NC=y -CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m @@ -342,7 +340,6 @@ CONFIG_BLK_DEV_GAYLE=y CONFIG_BLK_DEV_BUDDHA=y CONFIG_RAID_ATTRS=m CONFIG_SCSI=y -# CONFIG_SCSI_MQ_DEFAULT is not set CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m @@ -399,44 +396,12 @@ CONFIG_MACSEC=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m -# CONFIG_NET_VENDOR_3COM is not set -# CONFIG_NET_VENDOR_ALACRITECH is not set -# CONFIG_NET_VENDOR_AMAZON is not set CONFIG_A2065=y CONFIG_ARIADNE=y -# CONFIG_NET_VENDOR_AQUANTIA is not set -# CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_VENDOR_AURORA is not set -# CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_VENDOR_CADENCE is not set -# CONFIG_NET_VENDOR_CAVIUM is not set -# CONFIG_NET_VENDOR_CIRRUS is not set -# CONFIG_NET_VENDOR_CORTINA is not set -# CONFIG_NET_VENDOR_EZCHIP is not set -# CONFIG_NET_VENDOR_HP is not set -# CONFIG_NET_VENDOR_HUAWEI is not set -# CONFIG_NET_VENDOR_INTEL is not set -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_MICROSEMI is not set -# CONFIG_NET_VENDOR_NETRONOME is not set -# CONFIG_NET_VENDOR_NI is not set CONFIG_XSURF100=y CONFIG_HYDRA=y CONFIG_APNE=y CONFIG_ZORRO8390=y -# CONFIG_NET_VENDOR_QUALCOMM is not set -# CONFIG_NET_VENDOR_RENESAS is not set -# CONFIG_NET_VENDOR_ROCKER is not set -# CONFIG_NET_VENDOR_SAMSUNG is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SOLARFLARE is not set -# CONFIG_NET_VENDOR_SMSC is not set -# CONFIG_NET_VENDOR_SOCIONEXT is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set -# CONFIG_NET_VENDOR_VIA is not set -# CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m CONFIG_PPP_DEFLATE=m @@ -612,6 +577,7 @@ CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_ADIANTUM=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -621,6 +587,7 @@ CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m +CONFIG_CRYPTO_STREEBOG=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES_TI=m diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig index 81112af1e478..db0e654a88d5 100644 --- a/arch/m68k/configs/apollo_defconfig +++ b/arch/m68k/configs/apollo_defconfig @@ -34,7 +34,6 @@ CONFIG_UNIXWARE_DISKLABEL=y CONFIG_SUN_PARTITION=y # CONFIG_EFI_PARTITION is not set CONFIG_SYSV68_PARTITION=y -CONFIG_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m @@ -300,7 +299,6 @@ CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m # CONFIG_BATMAN_ADV_BATMAN_V is not set CONFIG_BATMAN_ADV_NC=y -CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m @@ -327,7 +325,6 @@ CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y -# CONFIG_SCSI_MQ_DEFAULT is not set CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m @@ -378,35 +375,6 @@ CONFIG_MACSEC=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m -# CONFIG_NET_VENDOR_ALACRITECH is not set -# CONFIG_NET_VENDOR_AMAZON is not set -# CONFIG_NET_VENDOR_AQUANTIA is not set -# CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_VENDOR_AURORA is not set -# CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_VENDOR_CADENCE is not set -# CONFIG_NET_VENDOR_CAVIUM is not set -# CONFIG_NET_VENDOR_CORTINA is not set -# CONFIG_NET_VENDOR_EZCHIP is not set -# CONFIG_NET_VENDOR_HUAWEI is not set -# CONFIG_NET_VENDOR_INTEL is not set -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_MICROSEMI is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -# CONFIG_NET_VENDOR_NETRONOME is not set -# CONFIG_NET_VENDOR_NI is not set -# CONFIG_NET_VENDOR_QUALCOMM is not set -# CONFIG_NET_VENDOR_RENESAS is not set -# CONFIG_NET_VENDOR_ROCKER is not set -# CONFIG_NET_VENDOR_SAMSUNG is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SOLARFLARE is not set -# CONFIG_NET_VENDOR_SOCIONEXT is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set -# CONFIG_NET_VENDOR_VIA is not set -# CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m CONFIG_PPP_DEFLATE=m @@ -569,6 +537,7 @@ CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_ADIANTUM=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -578,6 +547,7 @@ CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m +CONFIG_CRYPTO_STREEBOG=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES_TI=m diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig index 6d4b6023a2f0..1451168eb789 100644 --- a/arch/m68k/configs/atari_defconfig +++ b/arch/m68k/configs/atari_defconfig @@ -41,7 +41,6 @@ CONFIG_UNIXWARE_DISKLABEL=y CONFIG_SUN_PARTITION=y # CONFIG_EFI_PARTITION is not set CONFIG_SYSV68_PARTITION=y -CONFIG_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m @@ -307,7 +306,6 @@ CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m # CONFIG_BATMAN_ADV_BATMAN_V is not set CONFIG_BATMAN_ADV_NC=y -CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m @@ -342,7 +340,6 @@ CONFIG_BLK_DEV_IDECD=y CONFIG_BLK_DEV_FALCON_IDE=y CONFIG_RAID_ATTRS=m CONFIG_SCSI=y -# CONFIG_SCSI_MQ_DEFAULT is not set CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m @@ -394,37 +391,9 @@ CONFIG_MACSEC=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m -# CONFIG_NET_VENDOR_ALACRITECH is not set -# CONFIG_NET_VENDOR_AMAZON is not set CONFIG_ATARILANCE=y -# CONFIG_NET_VENDOR_AQUANTIA is not set -# CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_VENDOR_AURORA is not set -# CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_VENDOR_CADENCE is not set -# CONFIG_NET_VENDOR_CAVIUM is not set -# CONFIG_NET_VENDOR_CORTINA is not set -# CONFIG_NET_VENDOR_EZCHIP is not set -# CONFIG_NET_VENDOR_HUAWEI is not set -# CONFIG_NET_VENDOR_INTEL is not set -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_MICROSEMI is not set -# CONFIG_NET_VENDOR_NETRONOME is not set -# CONFIG_NET_VENDOR_NI is not set CONFIG_NE2000=y -# CONFIG_NET_VENDOR_QUALCOMM is not set -# CONFIG_NET_VENDOR_RENESAS is not set -# CONFIG_NET_VENDOR_ROCKER is not set -# CONFIG_NET_VENDOR_SAMSUNG is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SOLARFLARE is not set CONFIG_SMC91X=y -# CONFIG_NET_VENDOR_SOCIONEXT is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set -# CONFIG_NET_VENDOR_VIA is not set -# CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m CONFIG_PPP_DEFLATE=m @@ -590,6 +559,7 @@ CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_ADIANTUM=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -599,6 +569,7 @@ CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m +CONFIG_CRYPTO_STREEBOG=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES_TI=m diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig index 3306dff09d3c..b0d3609f5bb3 100644 --- a/arch/m68k/configs/bvme6000_defconfig +++ b/arch/m68k/configs/bvme6000_defconfig @@ -31,7 +31,6 @@ CONFIG_SOLARIS_X86_PARTITION=y CONFIG_UNIXWARE_DISKLABEL=y CONFIG_SUN_PARTITION=y # CONFIG_EFI_PARTITION is not set -CONFIG_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m @@ -297,7 +296,6 @@ CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m # CONFIG_BATMAN_ADV_BATMAN_V is not set CONFIG_BATMAN_ADV_NC=y -CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m @@ -324,7 +322,6 @@ CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y -# CONFIG_SCSI_MQ_DEFAULT is not set CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m @@ -376,35 +373,7 @@ CONFIG_MACSEC=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m -# CONFIG_NET_VENDOR_ALACRITECH is not set -# CONFIG_NET_VENDOR_AMAZON is not set -# CONFIG_NET_VENDOR_AQUANTIA is not set -# CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_VENDOR_AURORA is not set -# CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_VENDOR_CADENCE is not set -# CONFIG_NET_VENDOR_CAVIUM is not set -# CONFIG_NET_VENDOR_CORTINA is not set -# CONFIG_NET_VENDOR_EZCHIP is not set -# CONFIG_NET_VENDOR_HUAWEI is not set CONFIG_BVME6000_NET=y -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_MICROSEMI is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -# CONFIG_NET_VENDOR_NETRONOME is not set -# CONFIG_NET_VENDOR_NI is not set -# CONFIG_NET_VENDOR_QUALCOMM is not set -# CONFIG_NET_VENDOR_RENESAS is not set -# CONFIG_NET_VENDOR_ROCKER is not set -# CONFIG_NET_VENDOR_SAMSUNG is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SOLARFLARE is not set -# CONFIG_NET_VENDOR_SOCIONEXT is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set -# CONFIG_NET_VENDOR_VIA is not set -# CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m CONFIG_PPP_DEFLATE=m @@ -561,6 +530,7 @@ CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_ADIANTUM=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -570,6 +540,7 @@ CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m +CONFIG_CRYPTO_STREEBOG=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES_TI=m diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig index c15e15b68d39..4ed7c151347c 100644 --- a/arch/m68k/configs/hp300_defconfig +++ b/arch/m68k/configs/hp300_defconfig @@ -33,7 +33,6 @@ CONFIG_UNIXWARE_DISKLABEL=y CONFIG_SUN_PARTITION=y # CONFIG_EFI_PARTITION is not set CONFIG_SYSV68_PARTITION=y -CONFIG_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m @@ -299,7 +298,6 @@ CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m # CONFIG_BATMAN_ADV_BATMAN_V is not set CONFIG_BATMAN_ADV_NC=y -CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m @@ -326,7 +324,6 @@ CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y -# CONFIG_SCSI_MQ_DEFAULT is not set CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m @@ -377,36 +374,7 @@ CONFIG_MACSEC=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m -# CONFIG_NET_VENDOR_ALACRITECH is not set -# CONFIG_NET_VENDOR_AMAZON is not set CONFIG_HPLANCE=y -# CONFIG_NET_VENDOR_AQUANTIA is not set -# CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_VENDOR_AURORA is not set -# CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_VENDOR_CADENCE is not set -# CONFIG_NET_VENDOR_CAVIUM is not set -# CONFIG_NET_VENDOR_CORTINA is not set -# CONFIG_NET_VENDOR_EZCHIP is not set -# CONFIG_NET_VENDOR_HUAWEI is not set -# CONFIG_NET_VENDOR_INTEL is not set -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_MICROSEMI is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -# CONFIG_NET_VENDOR_NETRONOME is not set -# CONFIG_NET_VENDOR_NI is not set -# CONFIG_NET_VENDOR_QUALCOMM is not set -# CONFIG_NET_VENDOR_RENESAS is not set -# CONFIG_NET_VENDOR_ROCKER is not set -# CONFIG_NET_VENDOR_SAMSUNG is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SOLARFLARE is not set -# CONFIG_NET_VENDOR_SOCIONEXT is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set -# CONFIG_NET_VENDOR_VIA is not set -# CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m CONFIG_PPP_DEFLATE=m @@ -571,6 +539,7 @@ CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_ADIANTUM=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -580,6 +549,7 @@ CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m +CONFIG_CRYPTO_STREEBOG=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES_TI=m diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig index 1a0ce0d11267..0dc544e1ce1f 100644 --- a/arch/m68k/configs/mac_defconfig +++ b/arch/m68k/configs/mac_defconfig @@ -32,7 +32,6 @@ CONFIG_UNIXWARE_DISKLABEL=y CONFIG_SUN_PARTITION=y # CONFIG_EFI_PARTITION is not set CONFIG_SYSV68_PARTITION=y -CONFIG_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m @@ -301,7 +300,6 @@ CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m # CONFIG_BATMAN_ADV_BATMAN_V is not set CONFIG_BATMAN_ADV_NC=y -CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m @@ -333,7 +331,6 @@ CONFIG_BLK_DEV_IDECD=y CONFIG_BLK_DEV_MAC_IDE=y CONFIG_RAID_ATTRS=m CONFIG_SCSI=y -# CONFIG_SCSI_MQ_DEFAULT is not set CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m @@ -393,39 +390,10 @@ CONFIG_MACSEC=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m -# CONFIG_NET_VENDOR_ALACRITECH is not set -# CONFIG_NET_VENDOR_AMAZON is not set CONFIG_MACMACE=y -# CONFIG_NET_VENDOR_AQUANTIA is not set -# CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_VENDOR_AURORA is not set -# CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_VENDOR_CADENCE is not set -# CONFIG_NET_VENDOR_CAVIUM is not set CONFIG_MAC89x0=y -# CONFIG_NET_VENDOR_CORTINA is not set -# CONFIG_NET_VENDOR_EZCHIP is not set -# CONFIG_NET_VENDOR_HUAWEI is not set -# CONFIG_NET_VENDOR_INTEL is not set -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_MICROSEMI is not set CONFIG_MACSONIC=y -# CONFIG_NET_VENDOR_NETRONOME is not set -# CONFIG_NET_VENDOR_NI is not set CONFIG_MAC8390=y -# CONFIG_NET_VENDOR_QUALCOMM is not set -# CONFIG_NET_VENDOR_RENESAS is not set -# CONFIG_NET_VENDOR_ROCKER is not set -# CONFIG_NET_VENDOR_SAMSUNG is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SOLARFLARE is not set -# CONFIG_NET_VENDOR_SMSC is not set -# CONFIG_NET_VENDOR_SOCIONEXT is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set -# CONFIG_NET_VENDOR_VIA is not set -# CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m CONFIG_PPP_DEFLATE=m @@ -593,6 +561,7 @@ CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_ADIANTUM=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -602,6 +571,7 @@ CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m +CONFIG_CRYPTO_STREEBOG=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES_TI=m diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig index 9758839b74bd..5a7b7b0d6e72 100644 --- a/arch/m68k/configs/multi_defconfig +++ b/arch/m68k/configs/multi_defconfig @@ -52,7 +52,6 @@ CONFIG_MINIX_SUBPARTITION=y CONFIG_SOLARIS_X86_PARTITION=y CONFIG_UNIXWARE_DISKLABEL=y # CONFIG_EFI_PARTITION is not set -CONFIG_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m @@ -321,7 +320,6 @@ CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m # CONFIG_BATMAN_ADV_BATMAN_V is not set CONFIG_BATMAN_ADV_NC=y -CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m @@ -366,7 +364,6 @@ CONFIG_BLK_DEV_MAC_IDE=y CONFIG_BLK_DEV_Q40IDE=y CONFIG_RAID_ATTRS=m CONFIG_SCSI=y -# CONFIG_SCSI_MQ_DEFAULT is not set CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m @@ -437,9 +434,6 @@ CONFIG_MACSEC=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m -# CONFIG_NET_VENDOR_3COM is not set -# CONFIG_NET_VENDOR_ALACRITECH is not set -# CONFIG_NET_VENDOR_AMAZON is not set CONFIG_A2065=y CONFIG_ARIADNE=y CONFIG_ATARILANCE=y @@ -447,43 +441,17 @@ CONFIG_HPLANCE=y CONFIG_MVME147_NET=y CONFIG_SUN3LANCE=y CONFIG_MACMACE=y -# CONFIG_NET_VENDOR_AQUANTIA is not set -# CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_VENDOR_AURORA is not set -# CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_VENDOR_CADENCE is not set -# CONFIG_NET_VENDOR_CAVIUM is not set CONFIG_MAC89x0=y -# CONFIG_NET_VENDOR_CORTINA is not set -# CONFIG_NET_VENDOR_EZCHIP is not set -# CONFIG_NET_VENDOR_HP is not set -# CONFIG_NET_VENDOR_HUAWEI is not set CONFIG_BVME6000_NET=y CONFIG_MVME16x_NET=y -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_MICROSEMI is not set CONFIG_MACSONIC=y -# CONFIG_NET_VENDOR_NETRONOME is not set -# CONFIG_NET_VENDOR_NI is not set CONFIG_XSURF100=y CONFIG_HYDRA=y CONFIG_MAC8390=y CONFIG_NE2000=y CONFIG_APNE=y CONFIG_ZORRO8390=y -# CONFIG_NET_VENDOR_QUALCOMM is not set -# CONFIG_NET_VENDOR_RENESAS is not set -# CONFIG_NET_VENDOR_ROCKER is not set -# CONFIG_NET_VENDOR_SAMSUNG is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SOLARFLARE is not set CONFIG_SMC91X=y -# CONFIG_NET_VENDOR_SOCIONEXT is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set -# CONFIG_NET_VENDOR_VIA is not set -# CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PLIP=m CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m @@ -675,6 +643,7 @@ CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_ADIANTUM=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -684,6 +653,7 @@ CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m +CONFIG_CRYPTO_STREEBOG=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES_TI=m diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig index f5526731ccdb..71eb9be1803b 100644 --- a/arch/m68k/configs/mvme147_defconfig +++ b/arch/m68k/configs/mvme147_defconfig @@ -30,7 +30,6 @@ CONFIG_SOLARIS_X86_PARTITION=y CONFIG_UNIXWARE_DISKLABEL=y CONFIG_SUN_PARTITION=y # CONFIG_EFI_PARTITION is not set -CONFIG_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m @@ -296,7 +295,6 @@ CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m # CONFIG_BATMAN_ADV_BATMAN_V is not set CONFIG_BATMAN_ADV_NC=y -CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m @@ -323,7 +321,6 @@ CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y -# CONFIG_SCSI_MQ_DEFAULT is not set CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m @@ -375,36 +372,7 @@ CONFIG_MACSEC=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m -# CONFIG_NET_VENDOR_ALACRITECH is not set -# CONFIG_NET_VENDOR_AMAZON is not set CONFIG_MVME147_NET=y -# CONFIG_NET_VENDOR_AQUANTIA is not set -# CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_VENDOR_AURORA is not set -# CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_VENDOR_CADENCE is not set -# CONFIG_NET_VENDOR_CAVIUM is not set -# CONFIG_NET_VENDOR_CORTINA is not set -# CONFIG_NET_VENDOR_EZCHIP is not set -# CONFIG_NET_VENDOR_HUAWEI is not set -# CONFIG_NET_VENDOR_INTEL is not set -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_MICROSEMI is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -# CONFIG_NET_VENDOR_NETRONOME is not set -# CONFIG_NET_VENDOR_NI is not set -# CONFIG_NET_VENDOR_QUALCOMM is not set -# CONFIG_NET_VENDOR_RENESAS is not set -# CONFIG_NET_VENDOR_ROCKER is not set -# CONFIG_NET_VENDOR_SAMSUNG is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SOLARFLARE is not set -# CONFIG_NET_VENDOR_SOCIONEXT is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set -# CONFIG_NET_VENDOR_VIA is not set -# CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m CONFIG_PPP_DEFLATE=m @@ -561,6 +529,7 @@ CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_ADIANTUM=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -570,6 +539,7 @@ CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m +CONFIG_CRYPTO_STREEBOG=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES_TI=m diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig index 5db58ff4b107..ea2ebd4241c0 100644 --- a/arch/m68k/configs/mvme16x_defconfig +++ b/arch/m68k/configs/mvme16x_defconfig @@ -31,7 +31,6 @@ CONFIG_SOLARIS_X86_PARTITION=y CONFIG_UNIXWARE_DISKLABEL=y CONFIG_SUN_PARTITION=y # CONFIG_EFI_PARTITION is not set -CONFIG_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m @@ -297,7 +296,6 @@ CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m # CONFIG_BATMAN_ADV_BATMAN_V is not set CONFIG_BATMAN_ADV_NC=y -CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m @@ -324,7 +322,6 @@ CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y -# CONFIG_SCSI_MQ_DEFAULT is not set CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m @@ -376,35 +373,7 @@ CONFIG_MACSEC=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m -# CONFIG_NET_VENDOR_ALACRITECH is not set -# CONFIG_NET_VENDOR_AMAZON is not set -# CONFIG_NET_VENDOR_AQUANTIA is not set -# CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_VENDOR_AURORA is not set -# CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_VENDOR_CADENCE is not set -# CONFIG_NET_VENDOR_CAVIUM is not set -# CONFIG_NET_VENDOR_CORTINA is not set -# CONFIG_NET_VENDOR_EZCHIP is not set -# CONFIG_NET_VENDOR_HUAWEI is not set CONFIG_MVME16x_NET=y -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_MICROSEMI is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -# CONFIG_NET_VENDOR_NETRONOME is not set -# CONFIG_NET_VENDOR_NI is not set -# CONFIG_NET_VENDOR_QUALCOMM is not set -# CONFIG_NET_VENDOR_RENESAS is not set -# CONFIG_NET_VENDOR_ROCKER is not set -# CONFIG_NET_VENDOR_SAMSUNG is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SOLARFLARE is not set -# CONFIG_NET_VENDOR_SOCIONEXT is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set -# CONFIG_NET_VENDOR_VIA is not set -# CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m CONFIG_PPP_DEFLATE=m @@ -561,6 +530,7 @@ CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_ADIANTUM=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -570,6 +540,7 @@ CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m +CONFIG_CRYPTO_STREEBOG=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES_TI=m diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig index b645230da128..cef6dc47c725 100644 --- a/arch/m68k/configs/q40_defconfig +++ b/arch/m68k/configs/q40_defconfig @@ -32,7 +32,6 @@ CONFIG_UNIXWARE_DISKLABEL=y CONFIG_SUN_PARTITION=y # CONFIG_EFI_PARTITION is not set CONFIG_SYSV68_PARTITION=y -CONFIG_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m @@ -298,7 +297,6 @@ CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m # CONFIG_BATMAN_ADV_BATMAN_V is not set CONFIG_BATMAN_ADV_NC=y -CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m @@ -332,7 +330,6 @@ CONFIG_BLK_DEV_IDECD=y CONFIG_BLK_DEV_Q40IDE=y CONFIG_RAID_ATTRS=m CONFIG_SCSI=y -# CONFIG_SCSI_MQ_DEFAULT is not set CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m @@ -383,40 +380,7 @@ CONFIG_MACSEC=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m -# CONFIG_NET_VENDOR_3COM is not set -# CONFIG_NET_VENDOR_ALACRITECH is not set -# CONFIG_NET_VENDOR_AMAZON is not set -# CONFIG_NET_VENDOR_AMD is not set -# CONFIG_NET_VENDOR_AQUANTIA is not set -# CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_VENDOR_AURORA is not set -# CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_VENDOR_CADENCE is not set -# CONFIG_NET_VENDOR_CAVIUM is not set -# CONFIG_NET_VENDOR_CIRRUS is not set -# CONFIG_NET_VENDOR_CORTINA is not set -# CONFIG_NET_VENDOR_EZCHIP is not set -# CONFIG_NET_VENDOR_HP is not set -# CONFIG_NET_VENDOR_HUAWEI is not set -# CONFIG_NET_VENDOR_INTEL is not set -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_MICROSEMI is not set -# CONFIG_NET_VENDOR_NETRONOME is not set -# CONFIG_NET_VENDOR_NI is not set CONFIG_NE2000=y -# CONFIG_NET_VENDOR_QUALCOMM is not set -# CONFIG_NET_VENDOR_RENESAS is not set -# CONFIG_NET_VENDOR_ROCKER is not set -# CONFIG_NET_VENDOR_SAMSUNG is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SOLARFLARE is not set -# CONFIG_NET_VENDOR_SMSC is not set -# CONFIG_NET_VENDOR_SOCIONEXT is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set -# CONFIG_NET_VENDOR_VIA is not set -# CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PLIP=m CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m @@ -584,6 +548,7 @@ CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_ADIANTUM=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -593,6 +558,7 @@ CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m +CONFIG_CRYPTO_STREEBOG=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES_TI=m diff --git a/arch/m68k/configs/stmark2_defconfig b/arch/m68k/configs/stmark2_defconfig index 3d07b1de7eb0..69f23c7b0497 100644 --- a/arch/m68k/configs/stmark2_defconfig +++ b/arch/m68k/configs/stmark2_defconfig @@ -76,7 +76,6 @@ CONFIG_GPIO_GENERIC_PLATFORM=y CONFIG_FSCACHE=y # CONFIG_PROC_SYSCTL is not set CONFIG_PRINTK_TIME=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set CONFIG_SLUB_DEBUG_ON=y CONFIG_PANIC_ON_OOPS=y diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig index 4afe2100947e..69f2282dc4e9 100644 --- a/arch/m68k/configs/sun3_defconfig +++ b/arch/m68k/configs/sun3_defconfig @@ -28,7 +28,6 @@ CONFIG_SOLARIS_X86_PARTITION=y CONFIG_UNIXWARE_DISKLABEL=y # CONFIG_EFI_PARTITION is not set CONFIG_SYSV68_PARTITION=y -CONFIG_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m @@ -294,7 +293,6 @@ CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m # CONFIG_BATMAN_ADV_BATMAN_V is not set CONFIG_BATMAN_ADV_NC=y -CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m @@ -321,7 +319,6 @@ CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y -# CONFIG_SCSI_MQ_DEFAULT is not set CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m @@ -373,36 +370,8 @@ CONFIG_MACSEC=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m -# CONFIG_NET_VENDOR_ALACRITECH is not set -# CONFIG_NET_VENDOR_AMAZON is not set CONFIG_SUN3LANCE=y -# CONFIG_NET_VENDOR_AQUANTIA is not set -# CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_VENDOR_AURORA is not set -# CONFIG_NET_VENDOR_CADENCE is not set -# CONFIG_NET_VENDOR_CAVIUM is not set -# CONFIG_NET_VENDOR_CORTINA is not set -# CONFIG_NET_VENDOR_EZCHIP is not set -# CONFIG_NET_VENDOR_HUAWEI is not set CONFIG_SUN3_82586=y -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_MICROSEMI is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -# CONFIG_NET_VENDOR_NETRONOME is not set -# CONFIG_NET_VENDOR_NI is not set -# CONFIG_NET_VENDOR_QUALCOMM is not set -# CONFIG_NET_VENDOR_RENESAS is not set -# CONFIG_NET_VENDOR_ROCKER is not set -# CONFIG_NET_VENDOR_SAMSUNG is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SOLARFLARE is not set -# CONFIG_NET_VENDOR_SOCIONEXT is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SUN is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set -# CONFIG_NET_VENDOR_VIA is not set -# CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m CONFIG_PPP_DEFLATE=m @@ -563,6 +532,7 @@ CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_ADIANTUM=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -572,6 +542,7 @@ CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m +CONFIG_CRYPTO_STREEBOG=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES_TI=m diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig index bd22893d0dc3..e91267e868b2 100644 --- a/arch/m68k/configs/sun3x_defconfig +++ b/arch/m68k/configs/sun3x_defconfig @@ -28,7 +28,6 @@ CONFIG_SOLARIS_X86_PARTITION=y CONFIG_UNIXWARE_DISKLABEL=y # CONFIG_EFI_PARTITION is not set CONFIG_SYSV68_PARTITION=y -CONFIG_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_MQ_IOSCHED_KYBER=m CONFIG_IOSCHED_BFQ=m @@ -294,7 +293,6 @@ CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m # CONFIG_BATMAN_ADV_BATMAN_V is not set CONFIG_BATMAN_ADV_NC=y -CONFIG_BATMAN_ADV_MCAST=y CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m @@ -321,7 +319,6 @@ CONFIG_ATA_OVER_ETH=m CONFIG_DUMMY_IRQ=m CONFIG_RAID_ATTRS=m CONFIG_SCSI=y -# CONFIG_SCSI_MQ_DEFAULT is not set CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_CHR_DEV_OSST=m @@ -373,36 +370,7 @@ CONFIG_MACSEC=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m -# CONFIG_NET_VENDOR_ALACRITECH is not set -# CONFIG_NET_VENDOR_AMAZON is not set CONFIG_SUN3LANCE=y -# CONFIG_NET_VENDOR_AQUANTIA is not set -# CONFIG_NET_VENDOR_ARC is not set -# CONFIG_NET_VENDOR_AURORA is not set -# CONFIG_NET_VENDOR_BROADCOM is not set -# CONFIG_NET_VENDOR_CADENCE is not set -# CONFIG_NET_VENDOR_CAVIUM is not set -# CONFIG_NET_VENDOR_CORTINA is not set -# CONFIG_NET_VENDOR_EZCHIP is not set -# CONFIG_NET_VENDOR_HUAWEI is not set -# CONFIG_NET_VENDOR_INTEL is not set -# CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_MICROSEMI is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -# CONFIG_NET_VENDOR_NETRONOME is not set -# CONFIG_NET_VENDOR_NI is not set -# CONFIG_NET_VENDOR_QUALCOMM is not set -# CONFIG_NET_VENDOR_RENESAS is not set -# CONFIG_NET_VENDOR_ROCKER is not set -# CONFIG_NET_VENDOR_SAMSUNG is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SOLARFLARE is not set -# CONFIG_NET_VENDOR_SOCIONEXT is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set -# CONFIG_NET_VENDOR_VIA is not set -# CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m CONFIG_PPP_DEFLATE=m @@ -563,6 +531,7 @@ CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_OFB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_ADIANTUM=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -572,6 +541,7 @@ CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SM3=m +CONFIG_CRYPTO_STREEBOG=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES_TI=m diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c index 38049357d6d3..40712e49381b 100644 --- a/arch/m68k/emu/nfblock.c +++ b/arch/m68k/emu/nfblock.c @@ -155,18 +155,22 @@ out: static int __init nfhd_init(void) { u32 blocks, bsize; + int ret; int i; nfhd_id = nf_get_id("XHDI"); if (!nfhd_id) return -ENODEV; - major_num = register_blkdev(major_num, "nfhd"); - if (major_num <= 0) { + ret = register_blkdev(major_num, "nfhd"); + if (ret < 0) { pr_warn("nfhd: unable to get major number\n"); - return major_num; + return ret; } + if (!major_num) + major_num = ret; + for (i = NFHD_DEV_OFFSET; i < 24; i++) { if (nfhd_get_capacity(i, 0, &blocks, &bsize)) continue; diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild index 9f1dd26903e3..95f8f631c4df 100644 --- a/arch/m68k/include/asm/Kbuild +++ b/arch/m68k/include/asm/Kbuild @@ -20,6 +20,7 @@ generic-y += mm-arch-hooks.h generic-y += percpu.h generic-y += preempt.h generic-y += sections.h +generic-y += shmparam.h generic-y += spinlock.h generic-y += topology.h generic-y += trace_clock.h diff --git a/arch/m68k/include/asm/a.out-core.h b/arch/m68k/include/asm/a.out-core.h deleted file mode 100644 index ae91ea6bb303..000000000000 --- a/arch/m68k/include/asm/a.out-core.h +++ /dev/null @@ -1,68 +0,0 @@ -/* a.out coredump register dumper - * - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ - -#ifndef _ASM_A_OUT_CORE_H -#define _ASM_A_OUT_CORE_H - -#ifdef __KERNEL__ - -#include -#include -#include - -/* - * fill in the user structure for an a.out core dump - */ -static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) -{ - struct switch_stack *sw; - -/* changed the size calculations - should hopefully work better. lbt */ - dump->magic = CMAGIC; - dump->start_code = 0; - dump->start_stack = rdusp() & ~(PAGE_SIZE - 1); - dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT; - dump->u_dsize = ((unsigned long) (current->mm->brk + - (PAGE_SIZE-1))) >> PAGE_SHIFT; - dump->u_dsize -= dump->u_tsize; - dump->u_ssize = 0; - - if (dump->start_stack < TASK_SIZE) - dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT; - - dump->u_ar0 = offsetof(struct user, regs); - sw = ((struct switch_stack *)regs) - 1; - dump->regs.d1 = regs->d1; - dump->regs.d2 = regs->d2; - dump->regs.d3 = regs->d3; - dump->regs.d4 = regs->d4; - dump->regs.d5 = regs->d5; - dump->regs.d6 = sw->d6; - dump->regs.d7 = sw->d7; - dump->regs.a0 = regs->a0; - dump->regs.a1 = regs->a1; - dump->regs.a2 = regs->a2; - dump->regs.a3 = sw->a3; - dump->regs.a4 = sw->a4; - dump->regs.a5 = sw->a5; - dump->regs.a6 = sw->a6; - dump->regs.d0 = regs->d0; - dump->regs.orig_d0 = regs->orig_d0; - dump->regs.stkadj = regs->stkadj; - dump->regs.sr = regs->sr; - dump->regs.pc = regs->pc; - dump->regs.fmtvec = (regs->format << 12) | regs->vector; - /* dump floating point stuff */ - dump->u_fpvalid = dump_fpu (regs, &dump->m68kfp); -} - -#endif /* __KERNEL__ */ -#endif /* _ASM_A_OUT_CORE_H */ diff --git a/arch/m68k/include/asm/atarihw.h b/arch/m68k/include/asm/atarihw.h index 9000b249d225..533008262b69 100644 --- a/arch/m68k/include/asm/atarihw.h +++ b/arch/m68k/include/asm/atarihw.h @@ -33,6 +33,12 @@ extern int atari_dont_touch_floppy_select; extern int atari_SCC_reset_done; +extern ssize_t atari_nvram_read(char *, size_t, loff_t *); +extern ssize_t atari_nvram_write(char *, size_t, loff_t *); +extern ssize_t atari_nvram_get_size(void); +extern long atari_nvram_set_checksum(void); +extern long atari_nvram_initialize(void); + /* convenience macros for testing machine type */ #define MACH_IS_ST ((atari_mch_cookie >> 16) == ATARI_MCH_ST) #define MACH_IS_STE ((atari_mch_cookie >> 16) == ATARI_MCH_STE && \ diff --git a/arch/m68k/include/asm/m5441xsim.h b/arch/m68k/include/asm/m5441xsim.h index c87556d5581c..4892f314ff38 100644 --- a/arch/m68k/include/asm/m5441xsim.h +++ b/arch/m68k/include/asm/m5441xsim.h @@ -282,6 +282,21 @@ * DSPI module. */ #define MCFDSPI_BASE0 0xfc05c000 +#define MCFDSPI_BASE1 0xfC03c000 #define MCF_IRQ_DSPI0 (MCFINT0_VECBASE + MCFINT0_DSPI0) +#define MCF_IRQ_DSPI1 (MCFINT1_VECBASE + MCFINT1_DSPI1) +/* + * eDMA module. + */ +#define MCFEDMA_BASE 0xfc044000 +#define MCFEDMA_SIZE 0x4000 +#define MCFINT0_EDMA_INTR0 8 +#define MCFINT0_EDMA_ERR 24 +#define MCFEDMA_EDMA_INTR16 8 +#define MCFEDMA_EDMA_INTR56 0 +#define MCFEDMA_IRQ_INTR0 (MCFINT0_VECBASE + MCFINT0_EDMA_INTR0) +#define MCFEDMA_IRQ_INTR16 (MCFINT1_VECBASE + MCFEDMA_EDMA_INTR16) +#define MCFEDMA_IRQ_INTR56 (MCFINT2_VECBASE + MCFEDMA_EDMA_INTR56) +#define MCFEDMA_IRQ_ERR (MCFINT0_VECBASE + MCFINT0_EDMA_ERR) #endif /* m5441xsim_h */ diff --git a/arch/m68k/include/asm/macintosh.h b/arch/m68k/include/asm/macintosh.h index 08cee11180e6..d9a08bed4b12 100644 --- a/arch/m68k/include/asm/macintosh.h +++ b/arch/m68k/include/asm/macintosh.h @@ -19,6 +19,10 @@ extern void mac_init_IRQ(void); extern void mac_irq_enable(struct irq_data *data); extern void mac_irq_disable(struct irq_data *data); +extern unsigned char mac_pram_read_byte(int); +extern void mac_pram_write_byte(unsigned char, int); +extern ssize_t mac_pram_get_size(void); + /* * Macintosh Table */ diff --git a/arch/m68k/include/asm/macints.h b/arch/m68k/include/asm/macints.h index cddb2d3ea49b..4da172bd048c 100644 --- a/arch/m68k/include/asm/macints.h +++ b/arch/m68k/include/asm/macints.h @@ -121,7 +121,4 @@ #define SLOT2IRQ(x) (x + 47) #define IRQ2SLOT(x) (x - 47) -#define INT_CLK 24576 /* CLK while int_clk =2.456MHz and divide = 100 */ -#define INT_TICKS 246 /* to make sched_time = 99.902... HZ */ - #endif /* asm/macints.h */ diff --git a/arch/m68k/include/asm/segment.h b/arch/m68k/include/asm/segment.h index 0b4cc1e079b5..c6686559e9b7 100644 --- a/arch/m68k/include/asm/segment.h +++ b/arch/m68k/include/asm/segment.h @@ -45,16 +45,9 @@ static inline void set_fs(mm_segment_t val) : /* no outputs */ : "r" (val.seg) : "memory"); } -static inline mm_segment_t get_ds(void) -{ - /* return the supervisor data space code */ - return KERNEL_DS; -} - #else #define USER_DS MAKE_MM_SEG(TASK_SIZE) #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF) -#define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) #endif diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index 49d5de18646b..2e0047cf86f8 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h @@ -15,8 +15,8 @@ #define __ARCH_WANT_SYS_IPC #define __ARCH_WANT_SYS_PAUSE #define __ARCH_WANT_SYS_SIGNAL -#define __ARCH_WANT_SYS_TIME -#define __ARCH_WANT_SYS_UTIME +#define __ARCH_WANT_SYS_TIME32 +#define __ARCH_WANT_SYS_UTIME32 #define __ARCH_WANT_SYS_WAITPID #define __ARCH_WANT_SYS_SOCKETCALL #define __ARCH_WANT_SYS_FADVISE64 diff --git a/arch/m68k/include/uapi/asm/Kbuild b/arch/m68k/include/uapi/asm/Kbuild index b8b3525271fa..960bf1e4be53 100644 --- a/arch/m68k/include/uapi/asm/Kbuild +++ b/arch/m68k/include/uapi/asm/Kbuild @@ -2,4 +2,3 @@ include include/uapi/asm-generic/Kbuild.asm generated-y += unistd_32.h generic-y += kvm_para.h -generic-y += shmparam.h diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c index ad0195cbe042..528484feff80 100644 --- a/arch/m68k/kernel/setup_mm.c +++ b/arch/m68k/kernel/setup_mm.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -37,13 +38,14 @@ #ifdef CONFIG_AMIGA #include #endif -#ifdef CONFIG_ATARI #include +#ifdef CONFIG_ATARI #include #endif #ifdef CONFIG_SUN3X #include #endif +#include #include #if !FPSTATESIZE || !NR_IRQS @@ -547,3 +549,81 @@ static int __init adb_probe_sync_enable (char *str) { __setup("adb_sync", adb_probe_sync_enable); #endif /* CONFIG_ADB */ + +#if IS_ENABLED(CONFIG_NVRAM) +#ifdef CONFIG_MAC +static unsigned char m68k_nvram_read_byte(int addr) +{ + if (MACH_IS_MAC) + return mac_pram_read_byte(addr); + return 0xff; +} + +static void m68k_nvram_write_byte(unsigned char val, int addr) +{ + if (MACH_IS_MAC) + mac_pram_write_byte(val, addr); +} +#endif /* CONFIG_MAC */ + +#ifdef CONFIG_ATARI +static ssize_t m68k_nvram_read(char *buf, size_t count, loff_t *ppos) +{ + if (MACH_IS_ATARI) + return atari_nvram_read(buf, count, ppos); + else if (MACH_IS_MAC) + return nvram_read_bytes(buf, count, ppos); + return -EINVAL; +} + +static ssize_t m68k_nvram_write(char *buf, size_t count, loff_t *ppos) +{ + if (MACH_IS_ATARI) + return atari_nvram_write(buf, count, ppos); + else if (MACH_IS_MAC) + return nvram_write_bytes(buf, count, ppos); + return -EINVAL; +} + +static long m68k_nvram_set_checksum(void) +{ + if (MACH_IS_ATARI) + return atari_nvram_set_checksum(); + return -EINVAL; +} + +static long m68k_nvram_initialize(void) +{ + if (MACH_IS_ATARI) + return atari_nvram_initialize(); + return -EINVAL; +} +#endif /* CONFIG_ATARI */ + +static ssize_t m68k_nvram_get_size(void) +{ + if (MACH_IS_ATARI) + return atari_nvram_get_size(); + else if (MACH_IS_MAC) + return mac_pram_get_size(); + return -ENODEV; +} + +/* Atari device drivers call .read (to get checksum validation) whereas + * Mac and PowerMac device drivers just use .read_byte. + */ +const struct nvram_ops arch_nvram_ops = { +#ifdef CONFIG_MAC + .read_byte = m68k_nvram_read_byte, + .write_byte = m68k_nvram_write_byte, +#endif +#ifdef CONFIG_ATARI + .read = m68k_nvram_read, + .write = m68k_nvram_write, + .set_checksum = m68k_nvram_set_checksum, + .initialize = m68k_nvram_initialize, +#endif + .get_size = m68k_nvram_get_size, +}; +EXPORT_SYMBOL(arch_nvram_ops); +#endif /* CONFIG_NVRAM */ diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c index e2a9421c5797..87e7f3639839 100644 --- a/arch/m68k/kernel/signal.c +++ b/arch/m68k/kernel/signal.c @@ -651,7 +651,8 @@ static int mangle_kernel_stack(struct pt_regs *regs, int formatvec, regs->vector = formatvec & 0xfff; } else { struct switch_stack *sw = (struct switch_stack *)regs - 1; - unsigned long buf[fsize / 2]; /* yes, twice as much */ + /* yes, twice as much as max(sizeof(frame.un.fmt)) */ + unsigned long buf[sizeof(((struct frame *)0)->un) / 2]; /* that'll make sure that expansion won't crap over data */ if (copy_from_user(buf + fsize / 4, fp, fsize)) diff --git a/arch/m68k/kernel/syscalls/syscall.tbl b/arch/m68k/kernel/syscalls/syscall.tbl index 1a95c4a1bc0d..125c14178979 100644 --- a/arch/m68k/kernel/syscalls/syscall.tbl +++ b/arch/m68k/kernel/syscalls/syscall.tbl @@ -20,7 +20,7 @@ 10 common unlink sys_unlink 11 common execve sys_execve 12 common chdir sys_chdir -13 common time sys_time +13 common time sys_time32 14 common mknod sys_mknod 15 common chmod sys_chmod 16 common chown sys_chown16 @@ -32,12 +32,12 @@ 22 common umount sys_oldumount 23 common setuid sys_setuid16 24 common getuid sys_getuid16 -25 common stime sys_stime +25 common stime sys_stime32 26 common ptrace sys_ptrace 27 common alarm sys_alarm 28 common oldfstat sys_fstat 29 common pause sys_pause -30 common utime sys_utime +30 common utime sys_utime32 # 31 was stty # 32 was gtty 33 common access sys_access @@ -131,7 +131,7 @@ 121 common setdomainname sys_setdomainname 122 common uname sys_newuname 123 common cacheflush sys_cacheflush -124 common adjtimex sys_adjtimex +124 common adjtimex sys_adjtimex_time32 125 common mprotect sys_mprotect 126 common sigprocmask sys_sigprocmask 127 common create_module sys_ni_syscall @@ -168,8 +168,8 @@ 158 common sched_yield sys_sched_yield 159 common sched_get_priority_max sys_sched_get_priority_max 160 common sched_get_priority_min sys_sched_get_priority_min -161 common sched_rr_get_interval sys_sched_rr_get_interval -162 common nanosleep sys_nanosleep +161 common sched_rr_get_interval sys_sched_rr_get_interval_time32 +162 common nanosleep sys_nanosleep_time32 163 common mremap sys_mremap 164 common setresuid sys_setresuid16 165 common getresuid sys_getresuid16 @@ -184,7 +184,7 @@ 174 common rt_sigaction sys_rt_sigaction 175 common rt_sigprocmask sys_rt_sigprocmask 176 common rt_sigpending sys_rt_sigpending -177 common rt_sigtimedwait sys_rt_sigtimedwait +177 common rt_sigtimedwait sys_rt_sigtimedwait_time32 178 common rt_sigqueueinfo sys_rt_sigqueueinfo 179 common rt_sigsuspend sys_rt_sigsuspend 180 common pread64 sys_pread64 @@ -242,7 +242,7 @@ 232 common removexattr sys_removexattr 233 common lremovexattr sys_lremovexattr 234 common fremovexattr sys_fremovexattr -235 common futex sys_futex +235 common futex sys_futex_time32 236 common sendfile64 sys_sendfile64 237 common mincore sys_mincore 238 common madvise sys_madvise @@ -250,7 +250,7 @@ 240 common readahead sys_readahead 241 common io_setup sys_io_setup 242 common io_destroy sys_io_destroy -243 common io_getevents sys_io_getevents +243 common io_getevents sys_io_getevents_time32 244 common io_submit sys_io_submit 245 common io_cancel sys_io_cancel 246 common fadvise64 sys_fadvise64 @@ -262,26 +262,26 @@ 252 common remap_file_pages sys_remap_file_pages 253 common set_tid_address sys_set_tid_address 254 common timer_create sys_timer_create -255 common timer_settime sys_timer_settime -256 common timer_gettime sys_timer_gettime +255 common timer_settime sys_timer_settime32 +256 common timer_gettime sys_timer_gettime32 257 common timer_getoverrun sys_timer_getoverrun 258 common timer_delete sys_timer_delete -259 common clock_settime sys_clock_settime -260 common clock_gettime sys_clock_gettime -261 common clock_getres sys_clock_getres -262 common clock_nanosleep sys_clock_nanosleep +259 common clock_settime sys_clock_settime32 +260 common clock_gettime sys_clock_gettime32 +261 common clock_getres sys_clock_getres_time32 +262 common clock_nanosleep sys_clock_nanosleep_time32 263 common statfs64 sys_statfs64 264 common fstatfs64 sys_fstatfs64 265 common tgkill sys_tgkill -266 common utimes sys_utimes +266 common utimes sys_utimes_time32 267 common fadvise64_64 sys_fadvise64_64 268 common mbind sys_mbind 269 common get_mempolicy sys_get_mempolicy 270 common set_mempolicy sys_set_mempolicy 271 common mq_open sys_mq_open 272 common mq_unlink sys_mq_unlink -273 common mq_timedsend sys_mq_timedsend -274 common mq_timedreceive sys_mq_timedreceive +273 common mq_timedsend sys_mq_timedsend_time32 +274 common mq_timedreceive sys_mq_timedreceive_time32 275 common mq_notify sys_mq_notify 276 common mq_getsetattr sys_mq_getsetattr 277 common waitid sys_waitid @@ -299,7 +299,7 @@ 289 common mkdirat sys_mkdirat 290 common mknodat sys_mknodat 291 common fchownat sys_fchownat -292 common futimesat sys_futimesat +292 common futimesat sys_futimesat_time32 293 common fstatat64 sys_fstatat64 294 common unlinkat sys_unlinkat 295 common renameat sys_renameat @@ -308,8 +308,8 @@ 298 common readlinkat sys_readlinkat 299 common fchmodat sys_fchmodat 300 common faccessat sys_faccessat -301 common pselect6 sys_pselect6 -302 common ppoll sys_ppoll +301 common pselect6 sys_pselect6_time32 +302 common ppoll sys_ppoll_time32 303 common unshare sys_unshare 304 common set_robust_list sys_set_robust_list 305 common get_robust_list sys_get_robust_list @@ -323,13 +323,13 @@ 313 common kexec_load sys_kexec_load 314 common getcpu sys_getcpu 315 common epoll_pwait sys_epoll_pwait -316 common utimensat sys_utimensat +316 common utimensat sys_utimensat_time32 317 common signalfd sys_signalfd 318 common timerfd_create sys_timerfd_create 319 common eventfd sys_eventfd 320 common fallocate sys_fallocate -321 common timerfd_settime sys_timerfd_settime -322 common timerfd_gettime sys_timerfd_gettime +321 common timerfd_settime sys_timerfd_settime32 +322 common timerfd_gettime sys_timerfd_gettime32 323 common signalfd4 sys_signalfd4 324 common eventfd2 sys_eventfd2 325 common epoll_create1 sys_epoll_create1 @@ -349,7 +349,7 @@ 339 common prlimit64 sys_prlimit64 340 common name_to_handle_at sys_name_to_handle_at 341 common open_by_handle_at sys_open_by_handle_at -342 common clock_adjtime sys_clock_adjtime +342 common clock_adjtime sys_clock_adjtime32 343 common syncfs sys_syncfs 344 common setns sys_setns 345 common process_vm_readv sys_process_vm_readv @@ -378,7 +378,7 @@ 368 common recvfrom sys_recvfrom 369 common recvmsg sys_recvmsg 370 common shutdown sys_shutdown -371 common recvmmsg sys_recvmmsg +371 common recvmmsg sys_recvmmsg_time32 372 common sendmmsg sys_sendmmsg 373 common userfaultfd sys_userfaultfd 374 common membarrier sys_membarrier @@ -387,3 +387,39 @@ 377 common preadv2 sys_preadv2 378 common pwritev2 sys_pwritev2 379 common statx sys_statx +380 common seccomp sys_seccomp +381 common pkey_mprotect sys_pkey_mprotect +382 common pkey_alloc sys_pkey_alloc +383 common pkey_free sys_pkey_free +384 common rseq sys_rseq +# room for arch specific calls +393 common semget sys_semget +394 common semctl sys_semctl +395 common shmget sys_shmget +396 common shmctl sys_shmctl +397 common shmat sys_shmat +398 common shmdt sys_shmdt +399 common msgget sys_msgget +400 common msgsnd sys_msgsnd +401 common msgrcv sys_msgrcv +402 common msgctl sys_msgctl +403 common clock_gettime64 sys_clock_gettime +404 common clock_settime64 sys_clock_settime +405 common clock_adjtime64 sys_clock_adjtime +406 common clock_getres_time64 sys_clock_getres +407 common clock_nanosleep_time64 sys_clock_nanosleep +408 common timer_gettime64 sys_timer_gettime +409 common timer_settime64 sys_timer_settime +410 common timerfd_gettime64 sys_timerfd_gettime +411 common timerfd_settime64 sys_timerfd_settime +412 common utimensat_time64 sys_utimensat +413 common pselect6_time64 sys_pselect6 +414 common ppoll_time64 sys_ppoll +416 common io_pgetevents_time64 sys_io_pgetevents +417 common recvmmsg_time64 sys_recvmmsg +418 common mq_timedsend_time64 sys_mq_timedsend +419 common mq_timedreceive_time64 sys_mq_timedreceive +420 common semtimedop_time64 sys_semtimedop +421 common rt_sigtimedwait_time64 sys_rt_sigtimedwait +422 common futex_time64 sys_futex +423 common sched_rr_get_interval_time64 sys_sched_rr_get_interval diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c index 71c4735a31ee..90f4e9ca1276 100644 --- a/arch/m68k/mac/misc.c +++ b/arch/m68k/mac/misc.c @@ -36,8 +36,9 @@ static void (*rom_reset)(void); +#if IS_ENABLED(CONFIG_NVRAM) #ifdef CONFIG_ADB_CUDA -static __u8 cuda_read_pram(int offset) +static unsigned char cuda_pram_read_byte(int offset) { struct adb_request req; @@ -49,7 +50,7 @@ static __u8 cuda_read_pram(int offset) return req.reply[3]; } -static void cuda_write_pram(int offset, __u8 data) +static void cuda_pram_write_byte(unsigned char data, int offset) { struct adb_request req; @@ -62,29 +63,29 @@ static void cuda_write_pram(int offset, __u8 data) #endif /* CONFIG_ADB_CUDA */ #ifdef CONFIG_ADB_PMU -static __u8 pmu_read_pram(int offset) +static unsigned char pmu_pram_read_byte(int offset) { struct adb_request req; - if (pmu_request(&req, NULL, 3, PMU_READ_NVRAM, - (offset >> 8) & 0xFF, offset & 0xFF) < 0) + if (pmu_request(&req, NULL, 3, PMU_READ_XPRAM, + offset & 0xFF, 1) < 0) return 0; - while (!req.complete) - pmu_poll(); - return req.reply[3]; + pmu_wait_complete(&req); + + return req.reply[0]; } -static void pmu_write_pram(int offset, __u8 data) +static void pmu_pram_write_byte(unsigned char data, int offset) { struct adb_request req; - if (pmu_request(&req, NULL, 4, PMU_WRITE_NVRAM, - (offset >> 8) & 0xFF, offset & 0xFF, data) < 0) + if (pmu_request(&req, NULL, 4, PMU_WRITE_XPRAM, + offset & 0xFF, 1, data) < 0) return; - while (!req.complete) - pmu_poll(); + pmu_wait_complete(&req); } #endif /* CONFIG_ADB_PMU */ +#endif /* CONFIG_NVRAM */ /* * VIA PRAM/RTC access routines @@ -93,7 +94,7 @@ static void pmu_write_pram(int offset, __u8 data) * the RTC should be enabled. */ -static __u8 via_pram_readbyte(void) +static __u8 via_rtc_recv(void) { int i, reg; __u8 data; @@ -120,7 +121,7 @@ static __u8 via_pram_readbyte(void) return data; } -static void via_pram_writebyte(__u8 data) +static void via_rtc_send(__u8 data) { int i, reg, bit; @@ -136,6 +137,31 @@ static void via_pram_writebyte(__u8 data) } } +/* + * These values can be found in Inside Macintosh vol. III ch. 2 + * which has a description of the RTC chip in the original Mac. + */ + +#define RTC_FLG_READ BIT(7) +#define RTC_FLG_WRITE_PROTECT BIT(7) +#define RTC_CMD_READ(r) (RTC_FLG_READ | (r << 2)) +#define RTC_CMD_WRITE(r) (r << 2) +#define RTC_REG_SECONDS_0 0 +#define RTC_REG_SECONDS_1 1 +#define RTC_REG_SECONDS_2 2 +#define RTC_REG_SECONDS_3 3 +#define RTC_REG_WRITE_PROTECT 13 + +/* + * Inside Mac has no information about two-byte RTC commands but + * the MAME/MESS source code has the essentials. + */ + +#define RTC_REG_XPRAM 14 +#define RTC_CMD_XPRAM_READ (RTC_CMD_READ(RTC_REG_XPRAM) << 8) +#define RTC_CMD_XPRAM_WRITE (RTC_CMD_WRITE(RTC_REG_XPRAM) << 8) +#define RTC_CMD_XPRAM_ARG(a) (((a & 0xE0) << 3) | ((a & 0x1F) << 2)) + /* * Execute a VIA PRAM/RTC command. For read commands * data should point to a one-byte buffer for the @@ -145,29 +171,33 @@ static void via_pram_writebyte(__u8 data) * This function disables all interrupts while running. */ -static void via_pram_command(int command, __u8 *data) +static void via_rtc_command(int command, __u8 *data) { unsigned long flags; int is_read; local_irq_save(flags); + /* The least significant bits must be 0b01 according to Inside Mac */ + + command = (command & ~3) | 1; + /* Enable the RTC and make sure the strobe line is high */ via1[vBufB] = (via1[vBufB] | VIA1B_vRTCClk) & ~VIA1B_vRTCEnb; if (command & 0xFF00) { /* extended (two-byte) command */ - via_pram_writebyte((command & 0xFF00) >> 8); - via_pram_writebyte(command & 0xFF); - is_read = command & 0x8000; + via_rtc_send((command & 0xFF00) >> 8); + via_rtc_send(command & 0xFF); + is_read = command & (RTC_FLG_READ << 8); } else { /* one-byte command */ - via_pram_writebyte(command); - is_read = command & 0x80; + via_rtc_send(command); + is_read = command & RTC_FLG_READ; } if (is_read) { - *data = via_pram_readbyte(); + *data = via_rtc_recv(); } else { - via_pram_writebyte(*data); + via_rtc_send(*data); } /* All done, disable the RTC */ @@ -177,14 +207,30 @@ static void via_pram_command(int command, __u8 *data) local_irq_restore(flags); } -static __u8 via_read_pram(int offset) +#if IS_ENABLED(CONFIG_NVRAM) +static unsigned char via_pram_read_byte(int offset) { - return 0; + unsigned char temp; + + via_rtc_command(RTC_CMD_XPRAM_READ | RTC_CMD_XPRAM_ARG(offset), &temp); + + return temp; } -static void via_write_pram(int offset, __u8 data) +static void via_pram_write_byte(unsigned char data, int offset) { + unsigned char temp; + + temp = 0x55; + via_rtc_command(RTC_CMD_WRITE(RTC_REG_WRITE_PROTECT), &temp); + + temp = data; + via_rtc_command(RTC_CMD_XPRAM_WRITE | RTC_CMD_XPRAM_ARG(offset), &temp); + + temp = 0x55 | RTC_FLG_WRITE_PROTECT; + via_rtc_command(RTC_CMD_WRITE(RTC_REG_WRITE_PROTECT), &temp); } +#endif /* CONFIG_NVRAM */ /* * Return the current time in seconds since January 1, 1904. @@ -201,10 +247,10 @@ static time64_t via_read_time(void) } result, last_result; int count = 1; - via_pram_command(0x81, &last_result.cdata[3]); - via_pram_command(0x85, &last_result.cdata[2]); - via_pram_command(0x89, &last_result.cdata[1]); - via_pram_command(0x8D, &last_result.cdata[0]); + via_rtc_command(RTC_CMD_READ(RTC_REG_SECONDS_0), &last_result.cdata[3]); + via_rtc_command(RTC_CMD_READ(RTC_REG_SECONDS_1), &last_result.cdata[2]); + via_rtc_command(RTC_CMD_READ(RTC_REG_SECONDS_2), &last_result.cdata[1]); + via_rtc_command(RTC_CMD_READ(RTC_REG_SECONDS_3), &last_result.cdata[0]); /* * The NetBSD guys say to loop until you get the same reading @@ -212,10 +258,14 @@ static time64_t via_read_time(void) */ while (1) { - via_pram_command(0x81, &result.cdata[3]); - via_pram_command(0x85, &result.cdata[2]); - via_pram_command(0x89, &result.cdata[1]); - via_pram_command(0x8D, &result.cdata[0]); + via_rtc_command(RTC_CMD_READ(RTC_REG_SECONDS_0), + &result.cdata[3]); + via_rtc_command(RTC_CMD_READ(RTC_REG_SECONDS_1), + &result.cdata[2]); + via_rtc_command(RTC_CMD_READ(RTC_REG_SECONDS_2), + &result.cdata[1]); + via_rtc_command(RTC_CMD_READ(RTC_REG_SECONDS_3), + &result.cdata[0]); if (result.idata == last_result.idata) return (time64_t)result.idata - RTC_OFFSET; @@ -254,18 +304,18 @@ static void via_set_rtc_time(struct rtc_time *tm) /* Clear the write protect bit */ temp = 0x55; - via_pram_command(0x35, &temp); + via_rtc_command(RTC_CMD_WRITE(RTC_REG_WRITE_PROTECT), &temp); data.idata = lower_32_bits(time + RTC_OFFSET); - via_pram_command(0x01, &data.cdata[3]); - via_pram_command(0x05, &data.cdata[2]); - via_pram_command(0x09, &data.cdata[1]); - via_pram_command(0x0D, &data.cdata[0]); + via_rtc_command(RTC_CMD_WRITE(RTC_REG_SECONDS_0), &data.cdata[3]); + via_rtc_command(RTC_CMD_WRITE(RTC_REG_SECONDS_1), &data.cdata[2]); + via_rtc_command(RTC_CMD_WRITE(RTC_REG_SECONDS_2), &data.cdata[1]); + via_rtc_command(RTC_CMD_WRITE(RTC_REG_SECONDS_3), &data.cdata[0]); /* Set the write protect bit */ - temp = 0xD5; - via_pram_command(0x35, &temp); + temp = 0x55 | RTC_FLG_WRITE_PROTECT; + via_rtc_command(RTC_CMD_WRITE(RTC_REG_WRITE_PROTECT), &temp); } static void via_shutdown(void) @@ -326,66 +376,58 @@ static void cuda_shutdown(void) *------------------------------------------------------------------- */ -void mac_pram_read(int offset, __u8 *buffer, int len) +#if IS_ENABLED(CONFIG_NVRAM) +unsigned char mac_pram_read_byte(int addr) { - __u8 (*func)(int); - int i; - switch (macintosh_config->adb_type) { case MAC_ADB_IOP: case MAC_ADB_II: case MAC_ADB_PB1: - func = via_read_pram; - break; + return via_pram_read_byte(addr); #ifdef CONFIG_ADB_CUDA case MAC_ADB_EGRET: case MAC_ADB_CUDA: - func = cuda_read_pram; - break; + return cuda_pram_read_byte(addr); #endif #ifdef CONFIG_ADB_PMU case MAC_ADB_PB2: - func = pmu_read_pram; - break; + return pmu_pram_read_byte(addr); #endif default: - return; - } - for (i = 0 ; i < len ; i++) { - buffer[i] = (*func)(offset++); + return 0xFF; } } -void mac_pram_write(int offset, __u8 *buffer, int len) +void mac_pram_write_byte(unsigned char val, int addr) { - void (*func)(int, __u8); - int i; - switch (macintosh_config->adb_type) { case MAC_ADB_IOP: case MAC_ADB_II: case MAC_ADB_PB1: - func = via_write_pram; + via_pram_write_byte(val, addr); break; #ifdef CONFIG_ADB_CUDA case MAC_ADB_EGRET: case MAC_ADB_CUDA: - func = cuda_write_pram; + cuda_pram_write_byte(val, addr); break; #endif #ifdef CONFIG_ADB_PMU case MAC_ADB_PB2: - func = pmu_write_pram; + pmu_pram_write_byte(val, addr); break; #endif default: - return; - } - for (i = 0 ; i < len ; i++) { - (*func)(offset++, buffer[i]); + break; } } +ssize_t mac_pram_get_size(void) +{ + return 256; +} +#endif /* CONFIG_NVRAM */ + void mac_poweroff(void) { if (oss_present) { @@ -410,9 +452,8 @@ void mac_poweroff(void) void mac_reset(void) { - if (macintosh_config->adb_type == MAC_ADB_II) { - unsigned long flags; - + if (macintosh_config->adb_type == MAC_ADB_II && + macintosh_config->ident != MAC_MODEL_SE30) { /* need ROMBASE in booter */ /* indeed, plus need to MAP THE ROM !! */ @@ -422,17 +463,8 @@ void mac_reset(void) /* works on some */ rom_reset = (void *) (mac_bi_data.rombase + 0xa); - if (macintosh_config->ident == MAC_MODEL_SE30) { - /* - * MSch: Machines known to crash on ROM reset ... - */ - } else { - local_irq_save(flags); - - rom_reset(); - - local_irq_restore(flags); - } + local_irq_disable(); + rom_reset(); #ifdef CONFIG_ADB_CUDA } else if (macintosh_config->adb_type == MAC_ADB_EGRET || macintosh_config->adb_type == MAC_ADB_CUDA) { diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c index acdabbeecfd2..0b0289459173 100644 --- a/arch/m68k/mac/via.c +++ b/arch/m68k/mac/via.c @@ -189,7 +189,6 @@ void __init via_init(void) /* * SE/30: disable video IRQ - * XXX: testing for SE/30 VBL */ if (macintosh_config->ident == MAC_MODEL_SE30) { @@ -197,13 +196,18 @@ void __init via_init(void) via1[vBufB] |= 0x40; } - /* - * Set the RTC bits to a known state: all lines to outputs and - * RTC disabled (yes that's 0 to enable and 1 to disable). - */ - - via1[vDirB] |= (VIA1B_vRTCEnb | VIA1B_vRTCClk | VIA1B_vRTCData); - via1[vBufB] |= (VIA1B_vRTCEnb | VIA1B_vRTCClk); + switch (macintosh_config->adb_type) { + case MAC_ADB_IOP: + case MAC_ADB_II: + case MAC_ADB_PB1: + /* + * Set the RTC bits to a known state: all lines to outputs and + * RTC disabled (yes that's 0 to enable and 1 to disable). + */ + via1[vDirB] |= VIA1B_vRTCEnb | VIA1B_vRTCClk | VIA1B_vRTCData; + via1[vBufB] |= VIA1B_vRTCEnb | VIA1B_vRTCClk; + break; + } /* Everything below this point is VIA2/RBV only... */ diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c index 933c33e76a48..8868a4c9adae 100644 --- a/arch/m68k/mm/init.c +++ b/arch/m68k/mm/init.c @@ -94,6 +94,9 @@ void __init paging_init(void) high_memory = (void *) end_mem; empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + if (!empty_zero_page) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", + __func__, PAGE_SIZE, PAGE_SIZE); /* * Set up SFC/DFC registers (user data space). diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c index 0de4999a3810..6cb1e41d58d0 100644 --- a/arch/m68k/mm/mcfmmu.c +++ b/arch/m68k/mm/mcfmmu.c @@ -44,7 +44,9 @@ void __init paging_init(void) int i; empty_zero_page = (void *) memblock_alloc(PAGE_SIZE, PAGE_SIZE); - memset((void *) empty_zero_page, 0, PAGE_SIZE); + if (!empty_zero_page) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", + __func__, PAGE_SIZE, PAGE_SIZE); pg_dir = swapper_pg_dir; memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); @@ -52,6 +54,9 @@ void __init paging_init(void) size = num_pages * sizeof(pte_t); size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1); next_pgtable = (unsigned long) memblock_alloc(size, PAGE_SIZE); + if (!next_pgtable) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", + __func__, size, PAGE_SIZE); bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK; pg_dir += PAGE_OFFSET >> PGDIR_SHIFT; diff --git a/arch/m68k/mm/memory.c b/arch/m68k/mm/memory.c index b86a2e21693b..227c04fe60d2 100644 --- a/arch/m68k/mm/memory.c +++ b/arch/m68k/mm/memory.c @@ -51,7 +51,7 @@ void __init init_pointer_table(unsigned long ptable) pr_debug("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp)); /* unreserve the page so it's possible to free that page */ - PD_PAGE(dp)->flags &= ~(1 << PG_reserved); + __ClearPageReserved(PD_PAGE(dp)); init_page_count(PD_PAGE(dp)); return; diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c index 3f3d0bf36091..356601bf96d9 100644 --- a/arch/m68k/mm/motorola.c +++ b/arch/m68k/mm/motorola.c @@ -55,6 +55,9 @@ static pte_t * __init kernel_page_table(void) pte_t *ptablep; ptablep = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); + if (!ptablep) + panic("%s: Failed to allocate %lu bytes align=%lx\n", + __func__, PAGE_SIZE, PAGE_SIZE); clear_page(ptablep); __flush_page_to_ram(ptablep); @@ -96,6 +99,9 @@ static pmd_t * __init kernel_ptr_table(void) if (((unsigned long)last_pgtable & ~PAGE_MASK) == 0) { last_pgtable = (pmd_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); + if (!last_pgtable) + panic("%s: Failed to allocate %lu bytes align=%lx\n", + __func__, PAGE_SIZE, PAGE_SIZE); clear_page(last_pgtable); __flush_page_to_ram(last_pgtable); @@ -278,6 +284,9 @@ void __init paging_init(void) * to a couple of allocated pages */ empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + if (!empty_zero_page) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", + __func__, PAGE_SIZE, PAGE_SIZE); /* * Set up SFC/DFC registers diff --git a/arch/m68k/mm/sun3mmu.c b/arch/m68k/mm/sun3mmu.c index f736db48a2e1..eca1c46bb90a 100644 --- a/arch/m68k/mm/sun3mmu.c +++ b/arch/m68k/mm/sun3mmu.c @@ -46,6 +46,9 @@ void __init paging_init(void) unsigned long size; empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + if (!empty_zero_page) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", + __func__, PAGE_SIZE, PAGE_SIZE); address = PAGE_OFFSET; pg_dir = swapper_pg_dir; @@ -56,6 +59,9 @@ void __init paging_init(void) size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1); next_pgtable = (unsigned long)memblock_alloc(size, PAGE_SIZE); + if (!next_pgtable) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", + __func__, size, PAGE_SIZE); bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK; /* Map whole memory from PAGE_OFFSET (0x0E000000) */ diff --git a/arch/m68k/sun3/sun3dvma.c b/arch/m68k/sun3/sun3dvma.c index 4d64711d3d47..399f3d06125f 100644 --- a/arch/m68k/sun3/sun3dvma.c +++ b/arch/m68k/sun3/sun3dvma.c @@ -269,6 +269,9 @@ void __init dvma_init(void) iommu_use = memblock_alloc(IOMMU_TOTAL_ENTRIES * sizeof(unsigned long), SMP_CACHE_BYTES); + if (!iommu_use) + panic("%s: Failed to allocate %zu bytes\n", __func__, + IOMMU_TOTAL_ENTRIES * sizeof(unsigned long)); dvma_unmap_iommu(DVMA_START, DVMA_SIZE); diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 58aff2653d86..a51b965b3b82 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -1,5 +1,6 @@ config MICROBLAZE def_bool y + select ARCH_32BIT_OFF_T select ARCH_NO_SWAP select ARCH_HAS_DMA_COHERENT_TO_PFN if MMU select ARCH_HAS_GCOV_PROFILE_ALL diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild index 9c7d1d25bf3d..791cc8d54d0a 100644 --- a/arch/microblaze/include/asm/Kbuild +++ b/arch/microblaze/include/asm/Kbuild @@ -26,6 +26,7 @@ generic-y += parport.h generic-y += percpu.h generic-y += preempt.h generic-y += serial.h +generic-y += shmparam.h generic-y += syscalls.h generic-y += topology.h generic-y += trace_clock.h diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h index dbfea093a7c7..bff2a71c828a 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h @@ -42,7 +42,6 @@ # define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) # endif -# define get_ds() (KERNEL_DS) # define get_fs() (current_thread_info()->addr_limit) # define set_fs(val) (current_thread_info()->addr_limit = (val)) diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h index 9b7c2c4eaf12..d79d35ac6253 100644 --- a/arch/microblaze/include/asm/unistd.h +++ b/arch/microblaze/include/asm/unistd.h @@ -21,8 +21,8 @@ #define __ARCH_WANT_SYS_GETHOSTNAME #define __ARCH_WANT_SYS_PAUSE #define __ARCH_WANT_SYS_SIGNAL -#define __ARCH_WANT_SYS_TIME -#define __ARCH_WANT_SYS_UTIME +#define __ARCH_WANT_SYS_TIME32 +#define __ARCH_WANT_SYS_UTIME32 #define __ARCH_WANT_SYS_WAITPID #define __ARCH_WANT_SYS_SOCKETCALL #define __ARCH_WANT_SYS_FADVISE64 diff --git a/arch/microblaze/include/uapi/asm/Kbuild b/arch/microblaze/include/uapi/asm/Kbuild index 28823e3db825..97823ec46e97 100644 --- a/arch/microblaze/include/uapi/asm/Kbuild +++ b/arch/microblaze/include/uapi/asm/Kbuild @@ -2,5 +2,4 @@ include include/uapi/asm-generic/Kbuild.asm generated-y += unistd_32.h generic-y += kvm_para.h -generic-y += shmparam.h generic-y += ucontext.h diff --git a/arch/microblaze/kernel/syscalls/syscall.tbl b/arch/microblaze/kernel/syscalls/syscall.tbl index a24d09e937dd..8ee3a8c18498 100644 --- a/arch/microblaze/kernel/syscalls/syscall.tbl +++ b/arch/microblaze/kernel/syscalls/syscall.tbl @@ -20,7 +20,7 @@ 10 common unlink sys_unlink 11 common execve sys_execve 12 common chdir sys_chdir -13 common time sys_time +13 common time sys_time32 14 common mknod sys_mknod 15 common chmod sys_chmod 16 common lchown sys_lchown @@ -32,12 +32,12 @@ 22 common umount sys_oldumount 23 common setuid sys_setuid 24 common getuid sys_getuid -25 common stime sys_stime +25 common stime sys_stime32 26 common ptrace sys_ptrace 27 common alarm sys_alarm 28 common oldfstat sys_ni_syscall 29 common pause sys_pause -30 common utime sys_utime +30 common utime sys_utime32 31 common stty sys_ni_syscall 32 common gtty sys_ni_syscall 33 common access sys_access @@ -131,7 +131,7 @@ 121 common setdomainname sys_setdomainname 122 common uname sys_newuname 123 common modify_ldt sys_ni_syscall -124 common adjtimex sys_adjtimex +124 common adjtimex sys_adjtimex_time32 125 common mprotect sys_mprotect 126 common sigprocmask sys_sigprocmask 127 common create_module sys_ni_syscall @@ -168,8 +168,8 @@ 158 common sched_yield sys_sched_yield 159 common sched_get_priority_max sys_sched_get_priority_max 160 common sched_get_priority_min sys_sched_get_priority_min -161 common sched_rr_get_interval sys_sched_rr_get_interval -162 common nanosleep sys_nanosleep +161 common sched_rr_get_interval sys_sched_rr_get_interval_time32 +162 common nanosleep sys_nanosleep_time32 163 common mremap sys_mremap 164 common setresuid sys_setresuid 165 common getresuid sys_getresuid @@ -184,7 +184,7 @@ 174 common rt_sigaction sys_rt_sigaction 175 common rt_sigprocmask sys_rt_sigprocmask 176 common rt_sigpending sys_rt_sigpending -177 common rt_sigtimedwait sys_rt_sigtimedwait +177 common rt_sigtimedwait sys_rt_sigtimedwait_time32 178 common rt_sigqueueinfo sys_rt_sigqueueinfo 179 common rt_sigsuspend sys_rt_sigsuspend 180 common pread64 sys_pread64 @@ -247,14 +247,14 @@ 237 common fremovexattr sys_fremovexattr 238 common tkill sys_tkill 239 common sendfile64 sys_sendfile64 -240 common futex sys_futex +240 common futex sys_futex_time32 241 common sched_setaffinity sys_sched_setaffinity 242 common sched_getaffinity sys_sched_getaffinity 243 common set_thread_area sys_ni_syscall 244 common get_thread_area sys_ni_syscall 245 common io_setup sys_io_setup 246 common io_destroy sys_io_destroy -247 common io_getevents sys_io_getevents +247 common io_getevents sys_io_getevents_time32 248 common io_submit sys_io_submit 249 common io_cancel sys_io_cancel 250 common fadvise64 sys_fadvise64 @@ -267,18 +267,18 @@ 257 common remap_file_pages sys_remap_file_pages 258 common set_tid_address sys_set_tid_address 259 common timer_create sys_timer_create -260 common timer_settime sys_timer_settime -261 common timer_gettime sys_timer_gettime +260 common timer_settime sys_timer_settime32 +261 common timer_gettime sys_timer_gettime32 262 common timer_getoverrun sys_timer_getoverrun 263 common timer_delete sys_timer_delete -264 common clock_settime sys_clock_settime -265 common clock_gettime sys_clock_gettime -266 common clock_getres sys_clock_getres -267 common clock_nanosleep sys_clock_nanosleep +264 common clock_settime sys_clock_settime32 +265 common clock_gettime sys_clock_gettime32 +266 common clock_getres sys_clock_getres_time32 +267 common clock_nanosleep sys_clock_nanosleep_time32 268 common statfs64 sys_statfs64 269 common fstatfs64 sys_fstatfs64 270 common tgkill sys_tgkill -271 common utimes sys_utimes +271 common utimes sys_utimes_time32 272 common fadvise64_64 sys_fadvise64_64 273 common vserver sys_ni_syscall 274 common mbind sys_mbind @@ -286,8 +286,8 @@ 276 common set_mempolicy sys_set_mempolicy 277 common mq_open sys_mq_open 278 common mq_unlink sys_mq_unlink -279 common mq_timedsend sys_mq_timedsend -280 common mq_timedreceive sys_mq_timedreceive +279 common mq_timedsend sys_mq_timedsend_time32 +280 common mq_timedreceive sys_mq_timedreceive_time32 281 common mq_notify sys_mq_notify 282 common mq_getsetattr sys_mq_getsetattr 283 common kexec_load sys_kexec_load @@ -306,7 +306,7 @@ 296 common mkdirat sys_mkdirat 297 common mknodat sys_mknodat 298 common fchownat sys_fchownat -299 common futimesat sys_futimesat +299 common futimesat sys_futimesat_time32 300 common fstatat64 sys_fstatat64 301 common unlinkat sys_unlinkat 302 common renameat sys_renameat @@ -315,8 +315,8 @@ 305 common readlinkat sys_readlinkat 306 common fchmodat sys_fchmodat 307 common faccessat sys_faccessat -308 common pselect6 sys_pselect6 -309 common ppoll sys_ppoll +308 common pselect6 sys_pselect6_time32 +309 common ppoll sys_ppoll_time32 310 common unshare sys_unshare 311 common set_robust_list sys_set_robust_list 312 common get_robust_list sys_get_robust_list @@ -327,23 +327,23 @@ 317 common move_pages sys_move_pages 318 common getcpu sys_getcpu 319 common epoll_pwait sys_epoll_pwait -320 common utimensat sys_utimensat +320 common utimensat sys_utimensat_time32 321 common signalfd sys_signalfd 322 common timerfd_create sys_timerfd_create 323 common eventfd sys_eventfd 324 common fallocate sys_fallocate -325 common semtimedop sys_semtimedop -326 common timerfd_settime sys_timerfd_settime -327 common timerfd_gettime sys_timerfd_gettime -328 common semctl sys_semctl +325 common semtimedop sys_semtimedop_time32 +326 common timerfd_settime sys_timerfd_settime32 +327 common timerfd_gettime sys_timerfd_gettime32 +328 common semctl sys_old_semctl 329 common semget sys_semget 330 common semop sys_semop -331 common msgctl sys_msgctl +331 common msgctl sys_old_msgctl 332 common msgget sys_msgget 333 common msgrcv sys_msgrcv 334 common msgsnd sys_msgsnd 335 common shmat sys_shmat -336 common shmctl sys_shmctl +336 common shmctl sys_old_shmctl 337 common shmdt sys_shmdt 338 common shmget sys_shmget 339 common signalfd4 sys_signalfd4 @@ -374,13 +374,13 @@ 364 common pwritev sys_pwritev 365 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo 366 common perf_event_open sys_perf_event_open -367 common recvmmsg sys_recvmmsg +367 common recvmmsg sys_recvmmsg_time32 368 common fanotify_init sys_fanotify_init 369 common fanotify_mark sys_fanotify_mark 370 common prlimit64 sys_prlimit64 371 common name_to_handle_at sys_name_to_handle_at 372 common open_by_handle_at sys_open_by_handle_at -373 common clock_adjtime sys_clock_adjtime +373 common clock_adjtime sys_clock_adjtime32 374 common syncfs sys_syncfs 375 common setns sys_setns 376 common sendmmsg sys_sendmmsg @@ -406,5 +406,26 @@ 396 common pkey_alloc sys_pkey_alloc 397 common pkey_free sys_pkey_free 398 common statx sys_statx -399 common io_pgetevents sys_io_pgetevents +399 common io_pgetevents sys_io_pgetevents_time32 400 common rseq sys_rseq +# 401 and 402 are unused +403 common clock_gettime64 sys_clock_gettime +404 common clock_settime64 sys_clock_settime +405 common clock_adjtime64 sys_clock_adjtime +406 common clock_getres_time64 sys_clock_getres +407 common clock_nanosleep_time64 sys_clock_nanosleep +408 common timer_gettime64 sys_timer_gettime +409 common timer_settime64 sys_timer_settime +410 common timerfd_gettime64 sys_timerfd_gettime +411 common timerfd_settime64 sys_timerfd_settime +412 common utimensat_time64 sys_utimensat +413 common pselect6_time64 sys_pselect6 +414 common ppoll_time64 sys_ppoll +416 common io_pgetevents_time64 sys_io_pgetevents +417 common recvmmsg_time64 sys_recvmmsg +418 common mq_timedsend_time64 sys_mq_timedsend +419 common mq_timedreceive_time64 sys_mq_timedreceive +420 common semtimedop_time64 sys_semtimedop +421 common rt_sigtimedwait_time64 sys_rt_sigtimedwait +422 common futex_time64 sys_futex +423 common sched_rr_get_interval_time64 sys_sched_rr_get_interval diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index b17fd8aafd64..7e97d44f6538 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -363,8 +363,9 @@ void __init *early_get_page(void) * Mem start + kernel_tlb -> here is limit * because of mem mapping from head.S */ - return __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, - memory_start + kernel_tlb)); + return memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE, + MEMBLOCK_LOW_LIMIT, memory_start + kernel_tlb, + NUMA_NO_NODE); } #endif /* CONFIG_MMU */ @@ -373,12 +374,14 @@ void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask) { void *p; - if (mem_init_done) + if (mem_init_done) { p = kzalloc(size, mask); - else { + } else { p = memblock_alloc(size, SMP_CACHE_BYTES); - if (p) - memset(p, 0, size); + if (!p) + panic("%s: Failed to allocate %zu bytes\n", + __func__, size); } + return p; } diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 0d14f51d0002..4a5f5b0ee9a9 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -2,6 +2,7 @@ config MIPS bool default y + select ARCH_32BIT_OFF_T if !64BIT select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT select ARCH_CLOCKSOURCE_DATA select ARCH_DISCARD_MEMBLOCK @@ -56,7 +57,6 @@ config MIPS select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_TRACER - select HAVE_GENERIC_DMA_COHERENT select HAVE_IDE select HAVE_IOREMAP_PROT select HAVE_IRQ_EXIT_ON_IRQ_STACK @@ -206,7 +206,6 @@ config ATH79 select COMMON_CLK select CLKDEV_LOOKUP select IRQ_MIPS_CPU - select MIPS_MACHINE select SYS_HAS_CPU_MIPS32_R2 select SYS_HAS_EARLY_PRINTK select SYS_SUPPORTS_32BIT_KERNEL @@ -391,7 +390,7 @@ config MACH_INGENIC select GPIOLIB select COMMON_CLK select GENERIC_IRQ_CHIP - select BUILTIN_DTB + select BUILTIN_DTB if MIPS_NO_APPENDED_DTB select USE_OF select LIBFDT @@ -676,6 +675,7 @@ config SGI_IP27 select DEFAULT_SGI_PARTITION select SYS_HAS_EARLY_PRINTK select HAVE_PCI + select IRQ_MIPS_CPU select NR_CPUS_DEFAULT_64 select SYS_HAS_CPU_R10000 select SYS_SUPPORTS_64BIT_KERNEL @@ -1118,13 +1118,13 @@ config DMA_MAYBE_COHERENT config DMA_PERDEV_COHERENT bool + select ARCH_HAS_SETUP_DMA_OPS select DMA_NONCOHERENT config DMA_NONCOHERENT bool select ARCH_HAS_DMA_MMAP_PGPROT select ARCH_HAS_SYNC_DMA_FOR_DEVICE - select ARCH_HAS_SYNC_DMA_FOR_CPU select NEED_DMA_MAP_STATE select ARCH_HAS_DMA_COHERENT_TO_PFN select DMA_NONCOHERENT_CACHE_SYNC @@ -1403,6 +1403,21 @@ config LOONGSON3_ENHANCEMENT please say 'N' here. If you want a high-performance kernel to run on new Loongson 3 machines only, please say 'Y' here. +config CPU_LOONGSON3_WORKAROUNDS + bool "Old Loongson 3 LLSC Workarounds" + default y if SMP + depends on CPU_LOONGSON3 + help + Loongson 3 processors have the llsc issues which require workarounds. + Without workarounds the system may hang unexpectedly. + + Newer Loongson 3 will fix these issues and no workarounds are needed. + The workarounds have no significant side effect on them but may + decrease the performance of the system so this option should be + disabled unless the kernel is intended to be run on old systems. + + If unsure, please say Y. + config CPU_LOONGSON2E bool "Loongson 2E" depends on SYS_HAS_CPU_LOONGSON2E @@ -1541,6 +1556,7 @@ config CPU_MIPS64_R6 select CPU_SUPPORTS_32BIT_KERNEL select CPU_SUPPORTS_64BIT_KERNEL select CPU_SUPPORTS_HIGHMEM + select CPU_SUPPORTS_HUGEPAGES select CPU_SUPPORTS_MSA select MIPS_O32_FP64_SUPPORT if 32BIT || MIPS32_O32 select HAVE_KVM @@ -1866,7 +1882,7 @@ config CPU_LOONGSON2 config CPU_LOONGSON1 bool select CPU_MIPS32 - select CPU_MIPSR1 + select CPU_MIPSR2 select CPU_HAS_PREFETCH select CPU_HAS_LOAD_STORE_LR select CPU_SUPPORTS_32BIT_KERNEL @@ -1928,9 +1944,11 @@ config SYS_HAS_CPU_MIPS32_R3_5 config SYS_HAS_CPU_MIPS32_R5 bool + select ARCH_HAS_SYNC_DMA_FOR_CPU if DMA_NONCOHERENT config SYS_HAS_CPU_MIPS32_R6 bool + select ARCH_HAS_SYNC_DMA_FOR_CPU if DMA_NONCOHERENT config SYS_HAS_CPU_MIPS64_R1 bool @@ -1940,6 +1958,7 @@ config SYS_HAS_CPU_MIPS64_R2 config SYS_HAS_CPU_MIPS64_R6 bool + select ARCH_HAS_SYNC_DMA_FOR_CPU if DMA_NONCOHERENT config SYS_HAS_CPU_R3000 bool @@ -1976,6 +1995,7 @@ config SYS_HAS_CPU_R8000 config SYS_HAS_CPU_R10000 bool + select ARCH_HAS_SYNC_DMA_FOR_CPU if DMA_NONCOHERENT config SYS_HAS_CPU_RM7000 bool @@ -2004,6 +2024,7 @@ config SYS_HAS_CPU_BMIPS4380 config SYS_HAS_CPU_BMIPS5000 bool select SYS_HAS_CPU_BMIPS + select ARCH_HAS_SYNC_DMA_FOR_CPU config SYS_HAS_CPU_XLR bool diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 5b174c3d0de3..8f4486c4415b 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -233,6 +233,8 @@ toolchain-crc := $(call cc-option-yn,$(mips-cflags) -Wa$(comma)-mcrc) cflags-$(toolchain-crc) += -DTOOLCHAIN_SUPPORTS_CRC toolchain-dsp := $(call cc-option-yn,$(mips-cflags) -Wa$(comma)-mdsp) cflags-$(toolchain-dsp) += -DTOOLCHAIN_SUPPORTS_DSP +toolchain-ginv := $(call cc-option-yn,$(mips-cflags) -Wa$(comma)-mginv) +cflags-$(toolchain-ginv) += -DTOOLCHAIN_SUPPORTS_GINV # # Firmware support diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c index f09262e0a72f..10ff07b7721e 100644 --- a/arch/mips/ar7/platform.c +++ b/arch/mips/ar7/platform.c @@ -683,7 +683,7 @@ static int __init ar7_register_devices(void) if (ar7_has_high_cpmac()) { res = fixed_phy_add(PHY_POLL, cpmac_high.id, - &fixed_phy_status, -1); + &fixed_phy_status); if (!res) { cpmac_get_mac(1, cpmac_high_data.dev_addr); @@ -696,7 +696,7 @@ static int __init ar7_register_devices(void) } else cpmac_low_data.phy_mask = 0xffffffff; - res = fixed_phy_add(PHY_POLL, cpmac_low.id, &fixed_phy_status, -1); + res = fixed_phy_add(PHY_POLL, cpmac_low.id, &fixed_phy_status); if (!res) { cpmac_get_mac(0, cpmac_low_data.dev_addr); res = platform_device_register(&cpmac_low); diff --git a/arch/mips/ath79/Kconfig b/arch/mips/ath79/Kconfig index 191c3910eac5..7367416642cb 100644 --- a/arch/mips/ath79/Kconfig +++ b/arch/mips/ath79/Kconfig @@ -1,79 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 if ATH79 -menu "Atheros AR71XX/AR724X/AR913X machine selection" - -config ATH79_MACH_AP121 - bool "Atheros AP121 reference board" - select SOC_AR933X - select ATH79_DEV_GPIO_BUTTONS - select ATH79_DEV_LEDS_GPIO - select ATH79_DEV_SPI - select ATH79_DEV_USB - select ATH79_DEV_WMAC - help - Say 'Y' here if you want your kernel to support the - Atheros AP121 reference board. - -config ATH79_MACH_AP136 - bool "Atheros AP136 reference board" - select SOC_QCA955X - select ATH79_DEV_GPIO_BUTTONS - select ATH79_DEV_LEDS_GPIO - select ATH79_DEV_SPI - select ATH79_DEV_USB - select ATH79_DEV_WMAC - help - Say 'Y' here if you want your kernel to support the - Atheros AP136 reference board. - -config ATH79_MACH_AP81 - bool "Atheros AP81 reference board" - select SOC_AR913X - select ATH79_DEV_GPIO_BUTTONS - select ATH79_DEV_LEDS_GPIO - select ATH79_DEV_SPI - select ATH79_DEV_USB - select ATH79_DEV_WMAC - help - Say 'Y' here if you want your kernel to support the - Atheros AP81 reference board. - -config ATH79_MACH_DB120 - bool "Atheros DB120 reference board" - select SOC_AR934X - select ATH79_DEV_GPIO_BUTTONS - select ATH79_DEV_LEDS_GPIO - select ATH79_DEV_SPI - select ATH79_DEV_USB - select ATH79_DEV_WMAC - help - Say 'Y' here if you want your kernel to support the - Atheros DB120 reference board. - -config ATH79_MACH_PB44 - bool "Atheros PB44 reference board" - select SOC_AR71XX - select ATH79_DEV_GPIO_BUTTONS - select ATH79_DEV_LEDS_GPIO - select ATH79_DEV_SPI - select ATH79_DEV_USB - help - Say 'Y' here if you want your kernel to support the - Atheros PB44 reference board. - -config ATH79_MACH_UBNT_XM - bool "Ubiquiti Networks XM (rev 1.0) board" - select SOC_AR724X - select ATH79_DEV_GPIO_BUTTONS - select ATH79_DEV_LEDS_GPIO - select ATH79_DEV_SPI - help - Say 'Y' here if you want your kernel to support the - Ubiquiti Networks XM (rev 1.0) board. - -endmenu - config SOC_AR71XX select HAVE_PCI def_bool n diff --git a/arch/mips/ath79/Makefile b/arch/mips/ath79/Makefile index fcc382cfc770..e18d9a2ecf62 100644 --- a/arch/mips/ath79/Makefile +++ b/arch/mips/ath79/Makefile @@ -8,27 +8,6 @@ # under the terms of the GNU General Public License version 2 as published # by the Free Software Foundation. -obj-y := prom.o setup.o irq.o common.o clock.o +obj-y := prom.o setup.o common.o clock.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o -obj-$(CONFIG_PCI) += pci.o - -# -# Devices -# -obj-y += dev-common.o -obj-$(CONFIG_ATH79_DEV_GPIO_BUTTONS) += dev-gpio-buttons.o -obj-$(CONFIG_ATH79_DEV_LEDS_GPIO) += dev-leds-gpio.o -obj-$(CONFIG_ATH79_DEV_SPI) += dev-spi.o -obj-$(CONFIG_ATH79_DEV_USB) += dev-usb.o -obj-$(CONFIG_ATH79_DEV_WMAC) += dev-wmac.o - -# -# Machines -# -obj-$(CONFIG_ATH79_MACH_AP121) += mach-ap121.o -obj-$(CONFIG_ATH79_MACH_AP136) += mach-ap136.o -obj-$(CONFIG_ATH79_MACH_AP81) += mach-ap81.o -obj-$(CONFIG_ATH79_MACH_DB120) += mach-db120.o -obj-$(CONFIG_ATH79_MACH_PB44) += mach-pb44.o -obj-$(CONFIG_ATH79_MACH_UBNT_XM) += mach-ubnt-xm.o diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c index cf9158e3c2d9..d4ca97e2ec6c 100644 --- a/arch/mips/ath79/clock.c +++ b/arch/mips/ath79/clock.c @@ -26,7 +26,6 @@ #include #include #include "common.h" -#include "machtypes.h" #define AR71XX_BASE_FREQ 40000000 #define AR724X_BASE_FREQ 40000000 @@ -37,24 +36,63 @@ static struct clk_onecell_data clk_data = { .clk_num = ARRAY_SIZE(clks), }; -static struct clk *__init ath79_add_sys_clkdev( - const char *id, unsigned long rate) +static const char * const clk_names[ATH79_CLK_END] = { + [ATH79_CLK_CPU] = "cpu", + [ATH79_CLK_DDR] = "ddr", + [ATH79_CLK_AHB] = "ahb", + [ATH79_CLK_REF] = "ref", + [ATH79_CLK_MDIO] = "mdio", +}; + +static const char * __init ath79_clk_name(int type) { - struct clk *clk; - int err; + BUG_ON(type >= ARRAY_SIZE(clk_names) || !clk_names[type]); + return clk_names[type]; +} - clk = clk_register_fixed_rate(NULL, id, NULL, 0, rate); +static void __init __ath79_set_clk(int type, const char *name, struct clk *clk) +{ if (IS_ERR(clk)) - panic("failed to allocate %s clock structure", id); + panic("failed to allocate %s clock structure", clk_names[type]); - err = clk_register_clkdev(clk, id, NULL); - if (err) - panic("unable to register %s clock device", id); + clks[type] = clk; + clk_register_clkdev(clk, name, NULL); +} +static struct clk * __init ath79_set_clk(int type, unsigned long rate) +{ + const char *name = ath79_clk_name(type); + struct clk *clk; + + clk = clk_register_fixed_rate(NULL, name, NULL, 0, rate); + __ath79_set_clk(type, name, clk); return clk; } -static void __init ar71xx_clocks_init(void) +static struct clk * __init ath79_set_ff_clk(int type, const char *parent, + unsigned int mult, unsigned int div) +{ + const char *name = ath79_clk_name(type); + struct clk *clk; + + clk = clk_register_fixed_factor(NULL, name, parent, 0, mult, div); + __ath79_set_clk(type, name, clk); + return clk; +} + +static unsigned long __init ath79_setup_ref_clk(unsigned long rate) +{ + struct clk *clk = clks[ATH79_CLK_REF]; + + if (clk) + rate = clk_get_rate(clk); + else + clk = ath79_set_clk(ATH79_CLK_REF, rate); + + return rate; +} + +static void __init ar71xx_clocks_init(void __iomem *pll_base) { unsigned long ref_rate; unsigned long cpu_rate; @@ -64,9 +102,9 @@ static void __init ar71xx_clocks_init(void) u32 freq; u32 div; - ref_rate = AR71XX_BASE_FREQ; + ref_rate = ath79_setup_ref_clk(AR71XX_BASE_FREQ); - pll = ath79_pll_rr(AR71XX_PLL_REG_CPU_CONFIG); + pll = __raw_readl(pll_base + AR71XX_PLL_REG_CPU_CONFIG); div = ((pll >> AR71XX_PLL_FB_SHIFT) & AR71XX_PLL_FB_MASK) + 1; freq = div * ref_rate; @@ -80,31 +118,17 @@ static void __init ar71xx_clocks_init(void) div = (((pll >> AR71XX_AHB_DIV_SHIFT) & AR71XX_AHB_DIV_MASK) + 1) * 2; ahb_rate = cpu_rate / div; - ath79_add_sys_clkdev("ref", ref_rate); - clks[ATH79_CLK_CPU] = ath79_add_sys_clkdev("cpu", cpu_rate); - clks[ATH79_CLK_DDR] = ath79_add_sys_clkdev("ddr", ddr_rate); - clks[ATH79_CLK_AHB] = ath79_add_sys_clkdev("ahb", ahb_rate); - - clk_add_alias("wdt", NULL, "ahb", NULL); - clk_add_alias("uart", NULL, "ahb", NULL); + ath79_set_clk(ATH79_CLK_CPU, cpu_rate); + ath79_set_clk(ATH79_CLK_DDR, ddr_rate); + ath79_set_clk(ATH79_CLK_AHB, ahb_rate); } -static struct clk * __init ath79_reg_ffclk(const char *name, - const char *parent_name, unsigned int mult, unsigned int div) +static void __init ar724x_clocks_init(void __iomem *pll_base) { - struct clk *clk; - - clk = clk_register_fixed_factor(NULL, name, parent_name, 0, mult, div); - if (IS_ERR(clk)) - panic("failed to allocate %s clock structure", name); - - return clk; -} - -static void __init ar724x_clk_init(struct clk *ref_clk, void __iomem *pll_base) -{ - u32 pll; u32 mult, div, ddr_div, ahb_div; + u32 pll; + + ath79_setup_ref_clk(AR71XX_BASE_FREQ); pll = __raw_readl(pll_base + AR724X_PLL_REG_CPU_CONFIG); @@ -114,30 +138,14 @@ static void __init ar724x_clk_init(struct clk *ref_clk, void __iomem *pll_base) ddr_div = ((pll >> AR724X_DDR_DIV_SHIFT) & AR724X_DDR_DIV_MASK) + 1; ahb_div = (((pll >> AR724X_AHB_DIV_SHIFT) & AR724X_AHB_DIV_MASK) + 1) * 2; - clks[ATH79_CLK_CPU] = ath79_reg_ffclk("cpu", "ref", mult, div); - clks[ATH79_CLK_DDR] = ath79_reg_ffclk("ddr", "ref", mult, div * ddr_div); - clks[ATH79_CLK_AHB] = ath79_reg_ffclk("ahb", "ref", mult, div * ahb_div); -} - -static void __init ar724x_clocks_init(void) -{ - struct clk *ref_clk; - - ref_clk = ath79_add_sys_clkdev("ref", AR724X_BASE_FREQ); - - ar724x_clk_init(ref_clk, ath79_pll_base); - - /* just make happy plat_time_init() from arch/mips/ath79/setup.c */ - clk_register_clkdev(clks[ATH79_CLK_CPU], "cpu", NULL); - clk_register_clkdev(clks[ATH79_CLK_DDR], "ddr", NULL); - clk_register_clkdev(clks[ATH79_CLK_AHB], "ahb", NULL); - - clk_add_alias("wdt", NULL, "ahb", NULL); - clk_add_alias("uart", NULL, "ahb", NULL); + ath79_set_ff_clk(ATH79_CLK_CPU, "ref", mult, div); + ath79_set_ff_clk(ATH79_CLK_DDR, "ref", mult, div * ddr_div); + ath79_set_ff_clk(ATH79_CLK_AHB, "ref", mult, div * ahb_div); } -static void __init ar9330_clk_init(struct clk *ref_clk, void __iomem *pll_base) +static void __init ar933x_clocks_init(void __iomem *pll_base) { + unsigned long ref_rate; u32 clock_ctrl; u32 ref_div; u32 ninit_mul; @@ -146,6 +154,15 @@ static void __init ar9330_clk_init(struct clk *ref_clk, void __iomem *pll_base) u32 cpu_div; u32 ddr_div; u32 ahb_div; + u32 t; + + t = ath79_reset_rr(AR933X_RESET_REG_BOOTSTRAP); + if (t & AR933X_BOOTSTRAP_REF_CLK_40) + ref_rate = (40 * 1000 * 1000); + else + ref_rate = (25 * 1000 * 1000); + + ath79_setup_ref_clk(ref_rate); clock_ctrl = __raw_readl(pll_base + AR933X_PLL_CLOCK_CTRL_REG); if (clock_ctrl & AR933X_PLL_CLOCK_CTRL_BYPASS) { @@ -186,37 +203,12 @@ static void __init ar9330_clk_init(struct clk *ref_clk, void __iomem *pll_base) AR933X_PLL_CLOCK_CTRL_AHB_DIV_MASK) + 1; } - clks[ATH79_CLK_CPU] = ath79_reg_ffclk("cpu", "ref", - ninit_mul, ref_div * out_div * cpu_div); - clks[ATH79_CLK_DDR] = ath79_reg_ffclk("ddr", "ref", - ninit_mul, ref_div * out_div * ddr_div); - clks[ATH79_CLK_AHB] = ath79_reg_ffclk("ahb", "ref", - ninit_mul, ref_div * out_div * ahb_div); -} - -static void __init ar933x_clocks_init(void) -{ - struct clk *ref_clk; - unsigned long ref_rate; - u32 t; - - t = ath79_reset_rr(AR933X_RESET_REG_BOOTSTRAP); - if (t & AR933X_BOOTSTRAP_REF_CLK_40) - ref_rate = (40 * 1000 * 1000); - else - ref_rate = (25 * 1000 * 1000); - - ref_clk = ath79_add_sys_clkdev("ref", ref_rate); - - ar9330_clk_init(ref_clk, ath79_pll_base); - - /* just make happy plat_time_init() from arch/mips/ath79/setup.c */ - clk_register_clkdev(clks[ATH79_CLK_CPU], "cpu", NULL); - clk_register_clkdev(clks[ATH79_CLK_DDR], "ddr", NULL); - clk_register_clkdev(clks[ATH79_CLK_AHB], "ahb", NULL); - - clk_add_alias("wdt", NULL, "ahb", NULL); - clk_add_alias("uart", NULL, "ref", NULL); + ath79_set_ff_clk(ATH79_CLK_CPU, "ref", ninit_mul, + ref_div * out_div * cpu_div); + ath79_set_ff_clk(ATH79_CLK_DDR, "ref", ninit_mul, + ref_div * out_div * ddr_div); + ath79_set_ff_clk(ATH79_CLK_AHB, "ref", ninit_mul, + ref_div * out_div * ahb_div); } static u32 __init ar934x_get_pll_freq(u32 ref, u32 ref_div, u32 nint, u32 nfrac, @@ -239,7 +231,7 @@ static u32 __init ar934x_get_pll_freq(u32 ref, u32 ref_div, u32 nint, u32 nfrac, return ret; } -static void __init ar934x_clocks_init(void) +static void __init ar934x_clocks_init(void __iomem *pll_base) { unsigned long ref_rate; unsigned long cpu_rate; @@ -258,6 +250,8 @@ static void __init ar934x_clocks_init(void) else ref_rate = 25 * 1000 * 1000; + ref_rate = ath79_setup_ref_clk(ref_rate); + pll = __raw_readl(dpll_base + AR934X_SRIF_CPU_DPLL2_REG); if (pll & AR934X_SRIF_DPLL2_LOCAL_PLL) { out_div = (pll >> AR934X_SRIF_DPLL2_OUTDIV_SHIFT) & @@ -270,7 +264,7 @@ static void __init ar934x_clocks_init(void) AR934X_SRIF_DPLL1_REFDIV_MASK; frac = 1 << 18; } else { - pll = ath79_pll_rr(AR934X_PLL_CPU_CONFIG_REG); + pll = __raw_readl(pll_base + AR934X_PLL_CPU_CONFIG_REG); out_div = (pll >> AR934X_PLL_CPU_CONFIG_OUTDIV_SHIFT) & AR934X_PLL_CPU_CONFIG_OUTDIV_MASK; ref_div = (pll >> AR934X_PLL_CPU_CONFIG_REFDIV_SHIFT) & @@ -297,7 +291,7 @@ static void __init ar934x_clocks_init(void) AR934X_SRIF_DPLL1_REFDIV_MASK; frac = 1 << 18; } else { - pll = ath79_pll_rr(AR934X_PLL_DDR_CONFIG_REG); + pll = __raw_readl(pll_base + AR934X_PLL_DDR_CONFIG_REG); out_div = (pll >> AR934X_PLL_DDR_CONFIG_OUTDIV_SHIFT) & AR934X_PLL_DDR_CONFIG_OUTDIV_MASK; ref_div = (pll >> AR934X_PLL_DDR_CONFIG_REFDIV_SHIFT) & @@ -312,7 +306,7 @@ static void __init ar934x_clocks_init(void) ddr_pll = ar934x_get_pll_freq(ref_rate, ref_div, nint, nfrac, frac, out_div); - clk_ctrl = ath79_pll_rr(AR934X_PLL_CPU_DDR_CLK_CTRL_REG); + clk_ctrl = __raw_readl(pll_base + AR934X_PLL_CPU_DDR_CLK_CTRL_REG); postdiv = (clk_ctrl >> AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_POST_DIV_SHIFT) & AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_POST_DIV_MASK; @@ -344,18 +338,18 @@ static void __init ar934x_clocks_init(void) else ahb_rate = cpu_pll / (postdiv + 1); - ath79_add_sys_clkdev("ref", ref_rate); - clks[ATH79_CLK_CPU] = ath79_add_sys_clkdev("cpu", cpu_rate); - clks[ATH79_CLK_DDR] = ath79_add_sys_clkdev("ddr", ddr_rate); - clks[ATH79_CLK_AHB] = ath79_add_sys_clkdev("ahb", ahb_rate); + ath79_set_clk(ATH79_CLK_CPU, cpu_rate); + ath79_set_clk(ATH79_CLK_DDR, ddr_rate); + ath79_set_clk(ATH79_CLK_AHB, ahb_rate); - clk_add_alias("wdt", NULL, "ref", NULL); - clk_add_alias("uart", NULL, "ref", NULL); + clk_ctrl = __raw_readl(pll_base + AR934X_PLL_SWITCH_CLOCK_CONTROL_REG); + if (clk_ctrl & AR934X_PLL_SWITCH_CLOCK_CONTROL_MDIO_CLK_SEL) + ath79_set_clk(ATH79_CLK_MDIO, 100 * 1000 * 1000); iounmap(dpll_base); } -static void __init qca953x_clocks_init(void) +static void __init qca953x_clocks_init(void __iomem *pll_base) { unsigned long ref_rate; unsigned long cpu_rate; @@ -371,7 +365,9 @@ static void __init qca953x_clocks_init(void) else ref_rate = 25 * 1000 * 1000; - pll = ath79_pll_rr(QCA953X_PLL_CPU_CONFIG_REG); + ref_rate = ath79_setup_ref_clk(ref_rate); + + pll = __raw_readl(pll_base + QCA953X_PLL_CPU_CONFIG_REG); out_div = (pll >> QCA953X_PLL_CPU_CONFIG_OUTDIV_SHIFT) & QCA953X_PLL_CPU_CONFIG_OUTDIV_MASK; ref_div = (pll >> QCA953X_PLL_CPU_CONFIG_REFDIV_SHIFT) & @@ -385,7 +381,7 @@ static void __init qca953x_clocks_init(void) cpu_pll += frac * (ref_rate >> 6) / ref_div; cpu_pll /= (1 << out_div); - pll = ath79_pll_rr(QCA953X_PLL_DDR_CONFIG_REG); + pll = __raw_readl(pll_base + QCA953X_PLL_DDR_CONFIG_REG); out_div = (pll >> QCA953X_PLL_DDR_CONFIG_OUTDIV_SHIFT) & QCA953X_PLL_DDR_CONFIG_OUTDIV_MASK; ref_div = (pll >> QCA953X_PLL_DDR_CONFIG_REFDIV_SHIFT) & @@ -399,7 +395,7 @@ static void __init qca953x_clocks_init(void) ddr_pll += frac * (ref_rate >> 6) / (ref_div << 4); ddr_pll /= (1 << out_div); - clk_ctrl = ath79_pll_rr(QCA953X_PLL_CLK_CTRL_REG); + clk_ctrl = __raw_readl(pll_base + QCA953X_PLL_CLK_CTRL_REG); postdiv = (clk_ctrl >> QCA953X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT) & QCA953X_PLL_CLK_CTRL_CPU_POST_DIV_MASK; @@ -431,16 +427,12 @@ static void __init qca953x_clocks_init(void) else ahb_rate = cpu_pll / (postdiv + 1); - ath79_add_sys_clkdev("ref", ref_rate); - ath79_add_sys_clkdev("cpu", cpu_rate); - ath79_add_sys_clkdev("ddr", ddr_rate); - ath79_add_sys_clkdev("ahb", ahb_rate); - - clk_add_alias("wdt", NULL, "ref", NULL); - clk_add_alias("uart", NULL, "ref", NULL); + ath79_set_clk(ATH79_CLK_CPU, cpu_rate); + ath79_set_clk(ATH79_CLK_DDR, ddr_rate); + ath79_set_clk(ATH79_CLK_AHB, ahb_rate); } -static void __init qca955x_clocks_init(void) +static void __init qca955x_clocks_init(void __iomem *pll_base) { unsigned long ref_rate; unsigned long cpu_rate; @@ -456,7 +448,9 @@ static void __init qca955x_clocks_init(void) else ref_rate = 25 * 1000 * 1000; - pll = ath79_pll_rr(QCA955X_PLL_CPU_CONFIG_REG); + ref_rate = ath79_setup_ref_clk(ref_rate); + + pll = __raw_readl(pll_base + QCA955X_PLL_CPU_CONFIG_REG); out_div = (pll >> QCA955X_PLL_CPU_CONFIG_OUTDIV_SHIFT) & QCA955X_PLL_CPU_CONFIG_OUTDIV_MASK; ref_div = (pll >> QCA955X_PLL_CPU_CONFIG_REFDIV_SHIFT) & @@ -470,7 +464,7 @@ static void __init qca955x_clocks_init(void) cpu_pll += frac * ref_rate / (ref_div * (1 << 6)); cpu_pll /= (1 << out_div); - pll = ath79_pll_rr(QCA955X_PLL_DDR_CONFIG_REG); + pll = __raw_readl(pll_base + QCA955X_PLL_DDR_CONFIG_REG); out_div = (pll >> QCA955X_PLL_DDR_CONFIG_OUTDIV_SHIFT) & QCA955X_PLL_DDR_CONFIG_OUTDIV_MASK; ref_div = (pll >> QCA955X_PLL_DDR_CONFIG_REFDIV_SHIFT) & @@ -484,7 +478,7 @@ static void __init qca955x_clocks_init(void) ddr_pll += frac * ref_rate / (ref_div * (1 << 10)); ddr_pll /= (1 << out_div); - clk_ctrl = ath79_pll_rr(QCA955X_PLL_CLK_CTRL_REG); + clk_ctrl = __raw_readl(pll_base + QCA955X_PLL_CLK_CTRL_REG); postdiv = (clk_ctrl >> QCA955X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT) & QCA955X_PLL_CLK_CTRL_CPU_POST_DIV_MASK; @@ -516,16 +510,12 @@ static void __init qca955x_clocks_init(void) else ahb_rate = cpu_pll / (postdiv + 1); - ath79_add_sys_clkdev("ref", ref_rate); - clks[ATH79_CLK_CPU] = ath79_add_sys_clkdev("cpu", cpu_rate); - clks[ATH79_CLK_DDR] = ath79_add_sys_clkdev("ddr", ddr_rate); - clks[ATH79_CLK_AHB] = ath79_add_sys_clkdev("ahb", ahb_rate); - - clk_add_alias("wdt", NULL, "ref", NULL); - clk_add_alias("uart", NULL, "ref", NULL); + ath79_set_clk(ATH79_CLK_CPU, cpu_rate); + ath79_set_clk(ATH79_CLK_DDR, ddr_rate); + ath79_set_clk(ATH79_CLK_AHB, ahb_rate); } -static void __init qca956x_clocks_init(void) +static void __init qca956x_clocks_init(void __iomem *pll_base) { unsigned long ref_rate; unsigned long cpu_rate; @@ -551,13 +541,15 @@ static void __init qca956x_clocks_init(void) else ref_rate = 25 * 1000 * 1000; - pll = ath79_pll_rr(QCA956X_PLL_CPU_CONFIG_REG); + ref_rate = ath79_setup_ref_clk(ref_rate); + + pll = __raw_readl(pll_base + QCA956X_PLL_CPU_CONFIG_REG); out_div = (pll >> QCA956X_PLL_CPU_CONFIG_OUTDIV_SHIFT) & QCA956X_PLL_CPU_CONFIG_OUTDIV_MASK; ref_div = (pll >> QCA956X_PLL_CPU_CONFIG_REFDIV_SHIFT) & QCA956X_PLL_CPU_CONFIG_REFDIV_MASK; - pll = ath79_pll_rr(QCA956X_PLL_CPU_CONFIG1_REG); + pll = __raw_readl(pll_base + QCA956X_PLL_CPU_CONFIG1_REG); nint = (pll >> QCA956X_PLL_CPU_CONFIG1_NINT_SHIFT) & QCA956X_PLL_CPU_CONFIG1_NINT_MASK; hfrac = (pll >> QCA956X_PLL_CPU_CONFIG1_NFRAC_H_SHIFT) & @@ -570,12 +562,12 @@ static void __init qca956x_clocks_init(void) cpu_pll += (hfrac >> 13) * ref_rate / ref_div; cpu_pll /= (1 << out_div); - pll = ath79_pll_rr(QCA956X_PLL_DDR_CONFIG_REG); + pll = __raw_readl(pll_base + QCA956X_PLL_DDR_CONFIG_REG); out_div = (pll >> QCA956X_PLL_DDR_CONFIG_OUTDIV_SHIFT) & QCA956X_PLL_DDR_CONFIG_OUTDIV_MASK; ref_div = (pll >> QCA956X_PLL_DDR_CONFIG_REFDIV_SHIFT) & QCA956X_PLL_DDR_CONFIG_REFDIV_MASK; - pll = ath79_pll_rr(QCA956X_PLL_DDR_CONFIG1_REG); + pll = __raw_readl(pll_base + QCA956X_PLL_DDR_CONFIG1_REG); nint = (pll >> QCA956X_PLL_DDR_CONFIG1_NINT_SHIFT) & QCA956X_PLL_DDR_CONFIG1_NINT_MASK; hfrac = (pll >> QCA956X_PLL_DDR_CONFIG1_NFRAC_H_SHIFT) & @@ -588,7 +580,7 @@ static void __init qca956x_clocks_init(void) ddr_pll += (hfrac >> 13) * ref_rate / ref_div; ddr_pll /= (1 << out_div); - clk_ctrl = ath79_pll_rr(QCA956X_PLL_CLK_CTRL_REG); + clk_ctrl = __raw_readl(pll_base + QCA956X_PLL_CLK_CTRL_REG); postdiv = (clk_ctrl >> QCA956X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT) & QCA956X_PLL_CLK_CTRL_CPU_POST_DIV_MASK; @@ -620,72 +612,19 @@ static void __init qca956x_clocks_init(void) else ahb_rate = cpu_pll / (postdiv + 1); - ath79_add_sys_clkdev("ref", ref_rate); - ath79_add_sys_clkdev("cpu", cpu_rate); - ath79_add_sys_clkdev("ddr", ddr_rate); - ath79_add_sys_clkdev("ahb", ahb_rate); - - clk_add_alias("wdt", NULL, "ref", NULL); - clk_add_alias("uart", NULL, "ref", NULL); -} - -void __init ath79_clocks_init(void) -{ - if (soc_is_ar71xx()) - ar71xx_clocks_init(); - else if (soc_is_ar724x() || soc_is_ar913x()) - ar724x_clocks_init(); - else if (soc_is_ar933x()) - ar933x_clocks_init(); - else if (soc_is_ar934x()) - ar934x_clocks_init(); - else if (soc_is_qca953x()) - qca953x_clocks_init(); - else if (soc_is_qca955x()) - qca955x_clocks_init(); - else if (soc_is_qca956x() || soc_is_tp9343()) - qca956x_clocks_init(); - else - BUG(); -} - -unsigned long __init -ath79_get_sys_clk_rate(const char *id) -{ - struct clk *clk; - unsigned long rate; - - clk = clk_get(NULL, id); - if (IS_ERR(clk)) - panic("unable to get %s clock, err=%d", id, (int) PTR_ERR(clk)); - - rate = clk_get_rate(clk); - clk_put(clk); - - return rate; + ath79_set_clk(ATH79_CLK_CPU, cpu_rate); + ath79_set_clk(ATH79_CLK_DDR, ddr_rate); + ath79_set_clk(ATH79_CLK_AHB, ahb_rate); } -#ifdef CONFIG_OF static void __init ath79_clocks_init_dt(struct device_node *np) -{ - of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); -} - -CLK_OF_DECLARE(ar7100, "qca,ar7100-pll", ath79_clocks_init_dt); -CLK_OF_DECLARE(ar7240, "qca,ar7240-pll", ath79_clocks_init_dt); -CLK_OF_DECLARE(ar9340, "qca,ar9340-pll", ath79_clocks_init_dt); -CLK_OF_DECLARE(ar9550, "qca,qca9550-pll", ath79_clocks_init_dt); - -static void __init ath79_clocks_init_dt_ng(struct device_node *np) { struct clk *ref_clk; void __iomem *pll_base; ref_clk = of_clk_get(np, 0); - if (IS_ERR(ref_clk)) { - pr_err("%pOF: of_clk_get failed\n", np); - goto err; - } + if (!IS_ERR(ref_clk)) + clks[ATH79_CLK_REF] = ref_clk; pll_base = of_iomap(np, 0); if (!pll_base) { @@ -693,14 +632,24 @@ static void __init ath79_clocks_init_dt_ng(struct device_node *np) goto err_clk; } - if (of_device_is_compatible(np, "qca,ar9130-pll")) - ar724x_clk_init(ref_clk, pll_base); + if (of_device_is_compatible(np, "qca,ar7100-pll")) + ar71xx_clocks_init(pll_base); + else if (of_device_is_compatible(np, "qca,ar7240-pll") || + of_device_is_compatible(np, "qca,ar9130-pll")) + ar724x_clocks_init(pll_base); else if (of_device_is_compatible(np, "qca,ar9330-pll")) - ar9330_clk_init(ref_clk, pll_base); - else { - pr_err("%pOF: could not find any appropriate clk_init()\n", np); - goto err_iounmap; - } + ar933x_clocks_init(pll_base); + else if (of_device_is_compatible(np, "qca,ar9340-pll")) + ar934x_clocks_init(pll_base); + else if (of_device_is_compatible(np, "qca,qca9530-pll")) + qca953x_clocks_init(pll_base); + else if (of_device_is_compatible(np, "qca,qca9550-pll")) + qca955x_clocks_init(pll_base); + else if (of_device_is_compatible(np, "qca,qca9560-pll")) + qca956x_clocks_init(pll_base); + + if (!clks[ATH79_CLK_MDIO]) + clks[ATH79_CLK_MDIO] = clks[ATH79_CLK_REF]; if (of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data)) { pr_err("%pOF: could not register clk provider\n", np); @@ -714,10 +663,13 @@ err_iounmap: err_clk: clk_put(ref_clk); - -err: - return; } -CLK_OF_DECLARE(ar9130_clk, "qca,ar9130-pll", ath79_clocks_init_dt_ng); -CLK_OF_DECLARE(ar9330_clk, "qca,ar9330-pll", ath79_clocks_init_dt_ng); -#endif + +CLK_OF_DECLARE(ar7100_clk, "qca,ar7100-pll", ath79_clocks_init_dt); +CLK_OF_DECLARE(ar7240_clk, "qca,ar7240-pll", ath79_clocks_init_dt); +CLK_OF_DECLARE(ar9130_clk, "qca,ar9130-pll", ath79_clocks_init_dt); +CLK_OF_DECLARE(ar9330_clk, "qca,ar9330-pll", ath79_clocks_init_dt); +CLK_OF_DECLARE(ar9340_clk, "qca,ar9340-pll", ath79_clocks_init_dt); +CLK_OF_DECLARE(ar9530_clk, "qca,qca9530-pll", ath79_clocks_init_dt); +CLK_OF_DECLARE(ar9550_clk, "qca,qca9550-pll", ath79_clocks_init_dt); +CLK_OF_DECLARE(ar9560_clk, "qca,qca9560-pll", ath79_clocks_init_dt); diff --git a/arch/mips/ath79/common.h b/arch/mips/ath79/common.h index 870c6b2e97e8..25b96f59e8e8 100644 --- a/arch/mips/ath79/common.h +++ b/arch/mips/ath79/common.h @@ -19,11 +19,6 @@ #define ATH79_MEM_SIZE_MIN (2 * 1024 * 1024) #define ATH79_MEM_SIZE_MAX (256 * 1024 * 1024) -void ath79_clocks_init(void); -unsigned long ath79_get_sys_clk_rate(const char *id); - void ath79_ddr_ctrl_init(void); -void ath79_gpio_init(void); - #endif /* __ATH79_COMMON_H */ diff --git a/arch/mips/ath79/dev-common.c b/arch/mips/ath79/dev-common.c deleted file mode 100644 index 9d0172a4dc69..000000000000 --- a/arch/mips/ath79/dev-common.c +++ /dev/null @@ -1,159 +0,0 @@ -/* - * Atheros AR71XX/AR724X/AR913X common devices - * - * Copyright (C) 2008-2011 Gabor Juhos - * Copyright (C) 2008 Imre Kaloz - * - * Parts of this file are based on Atheros' 2.6.15 BSP - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include "common.h" -#include "dev-common.h" - -static struct resource ath79_uart_resources[] = { - { - .start = AR71XX_UART_BASE, - .end = AR71XX_UART_BASE + AR71XX_UART_SIZE - 1, - .flags = IORESOURCE_MEM, - }, -}; - -#define AR71XX_UART_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP) -static struct plat_serial8250_port ath79_uart_data[] = { - { - .mapbase = AR71XX_UART_BASE, - .irq = ATH79_MISC_IRQ(3), - .flags = AR71XX_UART_FLAGS, - .iotype = UPIO_MEM32, - .regshift = 2, - }, { - /* terminating entry */ - } -}; - -static struct platform_device ath79_uart_device = { - .name = "serial8250", - .id = PLAT8250_DEV_PLATFORM, - .resource = ath79_uart_resources, - .num_resources = ARRAY_SIZE(ath79_uart_resources), - .dev = { - .platform_data = ath79_uart_data - }, -}; - -static struct resource ar933x_uart_resources[] = { - { - .start = AR933X_UART_BASE, - .end = AR933X_UART_BASE + AR71XX_UART_SIZE - 1, - .flags = IORESOURCE_MEM, - }, - { - .start = ATH79_MISC_IRQ(3), - .end = ATH79_MISC_IRQ(3), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device ar933x_uart_device = { - .name = "ar933x-uart", - .id = -1, - .resource = ar933x_uart_resources, - .num_resources = ARRAY_SIZE(ar933x_uart_resources), -}; - -void __init ath79_register_uart(void) -{ - unsigned long uart_clk_rate; - - uart_clk_rate = ath79_get_sys_clk_rate("uart"); - - if (soc_is_ar71xx() || - soc_is_ar724x() || - soc_is_ar913x() || - soc_is_ar934x() || - soc_is_qca955x()) { - ath79_uart_data[0].uartclk = uart_clk_rate; - platform_device_register(&ath79_uart_device); - } else if (soc_is_ar933x()) { - platform_device_register(&ar933x_uart_device); - } else { - BUG(); - } -} - -void __init ath79_register_wdt(void) -{ - struct resource res; - - memset(&res, 0, sizeof(res)); - - res.flags = IORESOURCE_MEM; - res.start = AR71XX_RESET_BASE + AR71XX_RESET_REG_WDOG_CTRL; - res.end = res.start + 0x8 - 1; - - platform_device_register_simple("ath79-wdt", -1, &res, 1); -} - -static struct ath79_gpio_platform_data ath79_gpio_pdata; - -static struct resource ath79_gpio_resources[] = { - { - .flags = IORESOURCE_MEM, - .start = AR71XX_GPIO_BASE, - .end = AR71XX_GPIO_BASE + AR71XX_GPIO_SIZE - 1, - }, - { - .start = ATH79_MISC_IRQ(2), - .end = ATH79_MISC_IRQ(2), - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device ath79_gpio_device = { - .name = "ath79-gpio", - .id = -1, - .resource = ath79_gpio_resources, - .num_resources = ARRAY_SIZE(ath79_gpio_resources), - .dev = { - .platform_data = &ath79_gpio_pdata - }, -}; - -void __init ath79_gpio_init(void) -{ - if (soc_is_ar71xx()) { - ath79_gpio_pdata.ngpios = AR71XX_GPIO_COUNT; - } else if (soc_is_ar7240()) { - ath79_gpio_pdata.ngpios = AR7240_GPIO_COUNT; - } else if (soc_is_ar7241() || soc_is_ar7242()) { - ath79_gpio_pdata.ngpios = AR7241_GPIO_COUNT; - } else if (soc_is_ar913x()) { - ath79_gpio_pdata.ngpios = AR913X_GPIO_COUNT; - } else if (soc_is_ar933x()) { - ath79_gpio_pdata.ngpios = AR933X_GPIO_COUNT; - } else if (soc_is_ar934x()) { - ath79_gpio_pdata.ngpios = AR934X_GPIO_COUNT; - ath79_gpio_pdata.oe_inverted = 1; - } else if (soc_is_qca955x()) { - ath79_gpio_pdata.ngpios = QCA955X_GPIO_COUNT; - ath79_gpio_pdata.oe_inverted = 1; - } else { - BUG(); - } - - platform_device_register(&ath79_gpio_device); -} diff --git a/arch/mips/ath79/dev-common.h b/arch/mips/ath79/dev-common.h deleted file mode 100644 index 0f514e1affce..000000000000 --- a/arch/mips/ath79/dev-common.h +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Atheros AR71XX/AR724X/AR913X common devices - * - * Copyright (C) 2008-2010 Gabor Juhos - * Copyright (C) 2008 Imre Kaloz - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#ifndef _ATH79_DEV_COMMON_H -#define _ATH79_DEV_COMMON_H - -void ath79_register_uart(void); -void ath79_register_wdt(void); - -#endif /* _ATH79_DEV_COMMON_H */ diff --git a/arch/mips/ath79/dev-gpio-buttons.c b/arch/mips/ath79/dev-gpio-buttons.c deleted file mode 100644 index 366b35fb164d..000000000000 --- a/arch/mips/ath79/dev-gpio-buttons.c +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Atheros AR71XX/AR724X/AR913X GPIO button support - * - * Copyright (C) 2008-2010 Gabor Juhos - * Copyright (C) 2008 Imre Kaloz - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#include "linux/init.h" -#include "linux/slab.h" -#include - -#include "dev-gpio-buttons.h" - -void __init ath79_register_gpio_keys_polled(int id, - unsigned poll_interval, - unsigned nbuttons, - struct gpio_keys_button *buttons) -{ - struct platform_device *pdev; - struct gpio_keys_platform_data pdata; - struct gpio_keys_button *p; - int err; - - p = kmemdup(buttons, nbuttons * sizeof(*p), GFP_KERNEL); - if (!p) - return; - - pdev = platform_device_alloc("gpio-keys-polled", id); - if (!pdev) - goto err_free_buttons; - - memset(&pdata, 0, sizeof(pdata)); - pdata.poll_interval = poll_interval; - pdata.nbuttons = nbuttons; - pdata.buttons = p; - - err = platform_device_add_data(pdev, &pdata, sizeof(pdata)); - if (err) - goto err_put_pdev; - - err = platform_device_add(pdev); - if (err) - goto err_put_pdev; - - return; - -err_put_pdev: - platform_device_put(pdev); - -err_free_buttons: - kfree(p); -} diff --git a/arch/mips/ath79/dev-gpio-buttons.h b/arch/mips/ath79/dev-gpio-buttons.h deleted file mode 100644 index 481847ac1cba..000000000000 --- a/arch/mips/ath79/dev-gpio-buttons.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Atheros AR71XX/AR724X/AR913X GPIO button support - * - * Copyright (C) 2008-2010 Gabor Juhos - * Copyright (C) 2008 Imre Kaloz - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#ifndef _ATH79_DEV_GPIO_BUTTONS_H -#define _ATH79_DEV_GPIO_BUTTONS_H - -#include -#include - -void ath79_register_gpio_keys_polled(int id, - unsigned poll_interval, - unsigned nbuttons, - struct gpio_keys_button *buttons); - -#endif /* _ATH79_DEV_GPIO_BUTTONS_H */ diff --git a/arch/mips/ath79/dev-leds-gpio.c b/arch/mips/ath79/dev-leds-gpio.c deleted file mode 100644 index dcb1debcefb8..000000000000 --- a/arch/mips/ath79/dev-leds-gpio.c +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Atheros AR71XX/AR724X/AR913X common GPIO LEDs support - * - * Copyright (C) 2008-2010 Gabor Juhos - * Copyright (C) 2008 Imre Kaloz - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#include -#include -#include - -#include "dev-leds-gpio.h" - -void __init ath79_register_leds_gpio(int id, - unsigned num_leds, - struct gpio_led *leds) -{ - struct platform_device *pdev; - struct gpio_led_platform_data pdata; - struct gpio_led *p; - int err; - - p = kmemdup(leds, num_leds * sizeof(*p), GFP_KERNEL); - if (!p) - return; - - pdev = platform_device_alloc("leds-gpio", id); - if (!pdev) - goto err_free_leds; - - memset(&pdata, 0, sizeof(pdata)); - pdata.num_leds = num_leds; - pdata.leds = p; - - err = platform_device_add_data(pdev, &pdata, sizeof(pdata)); - if (err) - goto err_put_pdev; - - err = platform_device_add(pdev); - if (err) - goto err_put_pdev; - - return; - -err_put_pdev: - platform_device_put(pdev); - -err_free_leds: - kfree(p); -} diff --git a/arch/mips/ath79/dev-leds-gpio.h b/arch/mips/ath79/dev-leds-gpio.h deleted file mode 100644 index 6e5d8851ebcf..000000000000 --- a/arch/mips/ath79/dev-leds-gpio.h +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Atheros AR71XX/AR724X/AR913X common GPIO LEDs support - * - * Copyright (C) 2008-2010 Gabor Juhos - * Copyright (C) 2008 Imre Kaloz - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#ifndef _ATH79_DEV_LEDS_GPIO_H -#define _ATH79_DEV_LEDS_GPIO_H - -#include - -void ath79_register_leds_gpio(int id, - unsigned num_leds, - struct gpio_led *leds); - -#endif /* _ATH79_DEV_LEDS_GPIO_H */ diff --git a/arch/mips/ath79/dev-spi.c b/arch/mips/ath79/dev-spi.c deleted file mode 100644 index aa30163efbfd..000000000000 --- a/arch/mips/ath79/dev-spi.c +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Atheros AR71XX/AR724X/AR913X SPI controller device - * - * Copyright (C) 2008-2010 Gabor Juhos - * Copyright (C) 2008 Imre Kaloz - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#include -#include -#include "dev-spi.h" - -static struct resource ath79_spi_resources[] = { - { - .start = AR71XX_SPI_BASE, - .end = AR71XX_SPI_BASE + AR71XX_SPI_SIZE - 1, - .flags = IORESOURCE_MEM, - }, -}; - -static struct platform_device ath79_spi_device = { - .name = "ath79-spi", - .id = -1, - .resource = ath79_spi_resources, - .num_resources = ARRAY_SIZE(ath79_spi_resources), -}; - -void __init ath79_register_spi(struct ath79_spi_platform_data *pdata, - struct spi_board_info const *info, - unsigned n) -{ - spi_register_board_info(info, n); - ath79_spi_device.dev.platform_data = pdata; - platform_device_register(&ath79_spi_device); -} diff --git a/arch/mips/ath79/dev-spi.h b/arch/mips/ath79/dev-spi.h deleted file mode 100644 index d732565ca736..000000000000 --- a/arch/mips/ath79/dev-spi.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Atheros AR71XX/AR724X/AR913X SPI controller device - * - * Copyright (C) 2008-2010 Gabor Juhos - * Copyright (C) 2008 Imre Kaloz - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#ifndef _ATH79_DEV_SPI_H -#define _ATH79_DEV_SPI_H - -#include -#include - -void ath79_register_spi(struct ath79_spi_platform_data *pdata, - struct spi_board_info const *info, - unsigned n); - -#endif /* _ATH79_DEV_SPI_H */ diff --git a/arch/mips/ath79/dev-usb.c b/arch/mips/ath79/dev-usb.c deleted file mode 100644 index 8227265bcc2d..000000000000 --- a/arch/mips/ath79/dev-usb.c +++ /dev/null @@ -1,242 +0,0 @@ -/* - * Atheros AR7XXX/AR9XXX USB Host Controller device - * - * Copyright (C) 2008-2011 Gabor Juhos - * Copyright (C) 2008 Imre Kaloz - * - * Parts of this file are based on Atheros' 2.6.15 BSP - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include "common.h" -#include "dev-usb.h" - -static u64 ath79_usb_dmamask = DMA_BIT_MASK(32); - -static struct usb_ohci_pdata ath79_ohci_pdata = { -}; - -static struct usb_ehci_pdata ath79_ehci_pdata_v1 = { - .has_synopsys_hc_bug = 1, -}; - -static struct usb_ehci_pdata ath79_ehci_pdata_v2 = { - .caps_offset = 0x100, - .has_tt = 1, -}; - -static void __init ath79_usb_register(const char *name, int id, - unsigned long base, unsigned long size, - int irq, const void *data, - size_t data_size) -{ - struct resource res[2]; - struct platform_device *pdev; - - memset(res, 0, sizeof(res)); - - res[0].flags = IORESOURCE_MEM; - res[0].start = base; - res[0].end = base + size - 1; - - res[1].flags = IORESOURCE_IRQ; - res[1].start = irq; - res[1].end = irq; - - pdev = platform_device_register_resndata(NULL, name, id, - res, ARRAY_SIZE(res), - data, data_size); - - if (IS_ERR(pdev)) { - pr_err("ath79: unable to register USB at %08lx, err=%d\n", - base, (int) PTR_ERR(pdev)); - return; - } - - pdev->dev.dma_mask = &ath79_usb_dmamask; - pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); -} - -#define AR71XX_USB_RESET_MASK (AR71XX_RESET_USB_HOST | \ - AR71XX_RESET_USB_PHY | \ - AR71XX_RESET_USB_OHCI_DLL) - -static void __init ath79_usb_setup(void) -{ - void __iomem *usb_ctrl_base; - - ath79_device_reset_set(AR71XX_USB_RESET_MASK); - mdelay(1000); - ath79_device_reset_clear(AR71XX_USB_RESET_MASK); - - usb_ctrl_base = ioremap(AR71XX_USB_CTRL_BASE, AR71XX_USB_CTRL_SIZE); - - /* Turning on the Buff and Desc swap bits */ - __raw_writel(0xf0000, usb_ctrl_base + AR71XX_USB_CTRL_REG_CONFIG); - - /* WAR for HW bug. Here it adjusts the duration between two SOFS */ - __raw_writel(0x20c00, usb_ctrl_base + AR71XX_USB_CTRL_REG_FLADJ); - - iounmap(usb_ctrl_base); - - mdelay(900); - - ath79_usb_register("ohci-platform", -1, - AR71XX_OHCI_BASE, AR71XX_OHCI_SIZE, - ATH79_MISC_IRQ(6), - &ath79_ohci_pdata, sizeof(ath79_ohci_pdata)); - - ath79_usb_register("ehci-platform", -1, - AR71XX_EHCI_BASE, AR71XX_EHCI_SIZE, - ATH79_CPU_IRQ(3), - &ath79_ehci_pdata_v1, sizeof(ath79_ehci_pdata_v1)); -} - -static void __init ar7240_usb_setup(void) -{ - void __iomem *usb_ctrl_base; - - ath79_device_reset_clear(AR7240_RESET_OHCI_DLL); - ath79_device_reset_set(AR7240_RESET_USB_HOST); - - mdelay(1000); - - ath79_device_reset_set(AR7240_RESET_OHCI_DLL); - ath79_device_reset_clear(AR7240_RESET_USB_HOST); - - usb_ctrl_base = ioremap(AR7240_USB_CTRL_BASE, AR7240_USB_CTRL_SIZE); - - /* WAR for HW bug. Here it adjusts the duration between two SOFS */ - __raw_writel(0x3, usb_ctrl_base + AR71XX_USB_CTRL_REG_FLADJ); - - iounmap(usb_ctrl_base); - - ath79_usb_register("ohci-platform", -1, - AR7240_OHCI_BASE, AR7240_OHCI_SIZE, - ATH79_CPU_IRQ(3), - &ath79_ohci_pdata, sizeof(ath79_ohci_pdata)); -} - -static void __init ar724x_usb_setup(void) -{ - ath79_device_reset_set(AR724X_RESET_USBSUS_OVERRIDE); - mdelay(10); - - ath79_device_reset_clear(AR724X_RESET_USB_HOST); - mdelay(10); - - ath79_device_reset_clear(AR724X_RESET_USB_PHY); - mdelay(10); - - ath79_usb_register("ehci-platform", -1, - AR724X_EHCI_BASE, AR724X_EHCI_SIZE, - ATH79_CPU_IRQ(3), - &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2)); -} - -static void __init ar913x_usb_setup(void) -{ - ath79_device_reset_set(AR913X_RESET_USBSUS_OVERRIDE); - mdelay(10); - - ath79_device_reset_clear(AR913X_RESET_USB_HOST); - mdelay(10); - - ath79_device_reset_clear(AR913X_RESET_USB_PHY); - mdelay(10); - - ath79_usb_register("ehci-platform", -1, - AR913X_EHCI_BASE, AR913X_EHCI_SIZE, - ATH79_CPU_IRQ(3), - &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2)); -} - -static void __init ar933x_usb_setup(void) -{ - ath79_device_reset_set(AR933X_RESET_USBSUS_OVERRIDE); - mdelay(10); - - ath79_device_reset_clear(AR933X_RESET_USB_HOST); - mdelay(10); - - ath79_device_reset_clear(AR933X_RESET_USB_PHY); - mdelay(10); - - ath79_usb_register("ehci-platform", -1, - AR933X_EHCI_BASE, AR933X_EHCI_SIZE, - ATH79_CPU_IRQ(3), - &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2)); -} - -static void __init ar934x_usb_setup(void) -{ - u32 bootstrap; - - bootstrap = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP); - if (bootstrap & AR934X_BOOTSTRAP_USB_MODE_DEVICE) - return; - - ath79_device_reset_set(AR934X_RESET_USBSUS_OVERRIDE); - udelay(1000); - - ath79_device_reset_clear(AR934X_RESET_USB_PHY); - udelay(1000); - - ath79_device_reset_clear(AR934X_RESET_USB_PHY_ANALOG); - udelay(1000); - - ath79_device_reset_clear(AR934X_RESET_USB_HOST); - udelay(1000); - - ath79_usb_register("ehci-platform", -1, - AR934X_EHCI_BASE, AR934X_EHCI_SIZE, - ATH79_CPU_IRQ(3), - &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2)); -} - -static void __init qca955x_usb_setup(void) -{ - ath79_usb_register("ehci-platform", 0, - QCA955X_EHCI0_BASE, QCA955X_EHCI_SIZE, - ATH79_IP3_IRQ(0), - &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2)); - - ath79_usb_register("ehci-platform", 1, - QCA955X_EHCI1_BASE, QCA955X_EHCI_SIZE, - ATH79_IP3_IRQ(1), - &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2)); -} - -void __init ath79_register_usb(void) -{ - if (soc_is_ar71xx()) - ath79_usb_setup(); - else if (soc_is_ar7240()) - ar7240_usb_setup(); - else if (soc_is_ar7241() || soc_is_ar7242()) - ar724x_usb_setup(); - else if (soc_is_ar913x()) - ar913x_usb_setup(); - else if (soc_is_ar933x()) - ar933x_usb_setup(); - else if (soc_is_ar934x()) - ar934x_usb_setup(); - else if (soc_is_qca955x()) - qca955x_usb_setup(); - else - BUG(); -} diff --git a/arch/mips/ath79/dev-usb.h b/arch/mips/ath79/dev-usb.h deleted file mode 100644 index 4b86a69ca080..000000000000 --- a/arch/mips/ath79/dev-usb.h +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Atheros AR71XX/AR724X/AR913X USB Host Controller support - * - * Copyright (C) 2008-2010 Gabor Juhos - * Copyright (C) 2008 Imre Kaloz - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#ifndef _ATH79_DEV_USB_H -#define _ATH79_DEV_USB_H - -void ath79_register_usb(void); - -#endif /* _ATH79_DEV_USB_H */ diff --git a/arch/mips/ath79/dev-wmac.c b/arch/mips/ath79/dev-wmac.c deleted file mode 100644 index da190b1b87ce..000000000000 --- a/arch/mips/ath79/dev-wmac.c +++ /dev/null @@ -1,155 +0,0 @@ -/* - * Atheros AR913X/AR933X SoC built-in WMAC device support - * - * Copyright (C) 2010-2011 Jaiganesh Narayanan - * Copyright (C) 2008-2011 Gabor Juhos - * Copyright (C) 2008 Imre Kaloz - * - * Parts of this file are based on Atheros 2.6.15/2.6.31 BSP - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include - -#include -#include -#include "dev-wmac.h" - -static struct ath9k_platform_data ath79_wmac_data; - -static struct resource ath79_wmac_resources[] = { - { - /* .start and .end fields are filled dynamically */ - .flags = IORESOURCE_MEM, - }, { - /* .start and .end fields are filled dynamically */ - .flags = IORESOURCE_IRQ, - }, -}; - -static struct platform_device ath79_wmac_device = { - .name = "ath9k", - .id = -1, - .resource = ath79_wmac_resources, - .num_resources = ARRAY_SIZE(ath79_wmac_resources), - .dev = { - .platform_data = &ath79_wmac_data, - }, -}; - -static void __init ar913x_wmac_setup(void) -{ - /* reset the WMAC */ - ath79_device_reset_set(AR913X_RESET_AMBA2WMAC); - mdelay(10); - - ath79_device_reset_clear(AR913X_RESET_AMBA2WMAC); - mdelay(10); - - ath79_wmac_resources[0].start = AR913X_WMAC_BASE; - ath79_wmac_resources[0].end = AR913X_WMAC_BASE + AR913X_WMAC_SIZE - 1; - ath79_wmac_resources[1].start = ATH79_CPU_IRQ(2); - ath79_wmac_resources[1].end = ATH79_CPU_IRQ(2); -} - - -static int ar933x_wmac_reset(void) -{ - ath79_device_reset_set(AR933X_RESET_WMAC); - ath79_device_reset_clear(AR933X_RESET_WMAC); - - return 0; -} - -static int ar933x_r1_get_wmac_revision(void) -{ - return ath79_soc_rev; -} - -static void __init ar933x_wmac_setup(void) -{ - u32 t; - - ar933x_wmac_reset(); - - ath79_wmac_device.name = "ar933x_wmac"; - - ath79_wmac_resources[0].start = AR933X_WMAC_BASE; - ath79_wmac_resources[0].end = AR933X_WMAC_BASE + AR933X_WMAC_SIZE - 1; - ath79_wmac_resources[1].start = ATH79_CPU_IRQ(2); - ath79_wmac_resources[1].end = ATH79_CPU_IRQ(2); - - t = ath79_reset_rr(AR933X_RESET_REG_BOOTSTRAP); - if (t & AR933X_BOOTSTRAP_REF_CLK_40) - ath79_wmac_data.is_clk_25mhz = false; - else - ath79_wmac_data.is_clk_25mhz = true; - - if (ath79_soc_rev == 1) - ath79_wmac_data.get_mac_revision = ar933x_r1_get_wmac_revision; - - ath79_wmac_data.external_reset = ar933x_wmac_reset; -} - -static void ar934x_wmac_setup(void) -{ - u32 t; - - ath79_wmac_device.name = "ar934x_wmac"; - - ath79_wmac_resources[0].start = AR934X_WMAC_BASE; - ath79_wmac_resources[0].end = AR934X_WMAC_BASE + AR934X_WMAC_SIZE - 1; - ath79_wmac_resources[1].start = ATH79_IP2_IRQ(1); - ath79_wmac_resources[1].end = ATH79_IP2_IRQ(1); - - t = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP); - if (t & AR934X_BOOTSTRAP_REF_CLK_40) - ath79_wmac_data.is_clk_25mhz = false; - else - ath79_wmac_data.is_clk_25mhz = true; -} - -static void qca955x_wmac_setup(void) -{ - u32 t; - - ath79_wmac_device.name = "qca955x_wmac"; - - ath79_wmac_resources[0].start = QCA955X_WMAC_BASE; - ath79_wmac_resources[0].end = QCA955X_WMAC_BASE + QCA955X_WMAC_SIZE - 1; - ath79_wmac_resources[1].start = ATH79_IP2_IRQ(1); - ath79_wmac_resources[1].end = ATH79_IP2_IRQ(1); - - t = ath79_reset_rr(QCA955X_RESET_REG_BOOTSTRAP); - if (t & QCA955X_BOOTSTRAP_REF_CLK_40) - ath79_wmac_data.is_clk_25mhz = false; - else - ath79_wmac_data.is_clk_25mhz = true; -} - -void __init ath79_register_wmac(u8 *cal_data) -{ - if (soc_is_ar913x()) - ar913x_wmac_setup(); - else if (soc_is_ar933x()) - ar933x_wmac_setup(); - else if (soc_is_ar934x()) - ar934x_wmac_setup(); - else if (soc_is_qca955x()) - qca955x_wmac_setup(); - else - BUG(); - - if (cal_data) - memcpy(ath79_wmac_data.eeprom_data, cal_data, - sizeof(ath79_wmac_data.eeprom_data)); - - platform_device_register(&ath79_wmac_device); -} diff --git a/arch/mips/ath79/dev-wmac.h b/arch/mips/ath79/dev-wmac.h deleted file mode 100644 index c9cd8709f090..000000000000 --- a/arch/mips/ath79/dev-wmac.h +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Atheros AR913X/AR933X SoC built-in WMAC device support - * - * Copyright (C) 2008-2011 Gabor Juhos - * Copyright (C) 2008 Imre Kaloz - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#ifndef _ATH79_DEV_WMAC_H -#define _ATH79_DEV_WMAC_H - -void ath79_register_wmac(u8 *cal_data); - -#endif /* _ATH79_DEV_WMAC_H */ diff --git a/arch/mips/ath79/irq.c b/arch/mips/ath79/irq.c deleted file mode 100644 index 2dfff1f19004..000000000000 --- a/arch/mips/ath79/irq.c +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Atheros AR71xx/AR724x/AR913x specific interrupt handling - * - * Copyright (C) 2010-2011 Jaiganesh Narayanan - * Copyright (C) 2008-2011 Gabor Juhos - * Copyright (C) 2008 Imre Kaloz - * - * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include - -#include -#include - -#include -#include -#include "common.h" -#include "machtypes.h" - - -static void ar934x_ip2_irq_dispatch(struct irq_desc *desc) -{ - u32 status; - - status = ath79_reset_rr(AR934X_RESET_REG_PCIE_WMAC_INT_STATUS); - - if (status & AR934X_PCIE_WMAC_INT_PCIE_ALL) { - ath79_ddr_wb_flush(3); - generic_handle_irq(ATH79_IP2_IRQ(0)); - } else if (status & AR934X_PCIE_WMAC_INT_WMAC_ALL) { - ath79_ddr_wb_flush(4); - generic_handle_irq(ATH79_IP2_IRQ(1)); - } else { - spurious_interrupt(); - } -} - -static void ar934x_ip2_irq_init(void) -{ - int i; - - for (i = ATH79_IP2_IRQ_BASE; - i < ATH79_IP2_IRQ_BASE + ATH79_IP2_IRQ_COUNT; i++) - irq_set_chip_and_handler(i, &dummy_irq_chip, - handle_level_irq); - - irq_set_chained_handler(ATH79_CPU_IRQ(2), ar934x_ip2_irq_dispatch); -} - -static void qca955x_ip2_irq_dispatch(struct irq_desc *desc) -{ - u32 status; - - status = ath79_reset_rr(QCA955X_RESET_REG_EXT_INT_STATUS); - status &= QCA955X_EXT_INT_PCIE_RC1_ALL | QCA955X_EXT_INT_WMAC_ALL; - - if (status == 0) { - spurious_interrupt(); - return; - } - - if (status & QCA955X_EXT_INT_PCIE_RC1_ALL) { - /* TODO: flush DDR? */ - generic_handle_irq(ATH79_IP2_IRQ(0)); - } - - if (status & QCA955X_EXT_INT_WMAC_ALL) { - /* TODO: flush DDR? */ - generic_handle_irq(ATH79_IP2_IRQ(1)); - } -} - -static void qca955x_ip3_irq_dispatch(struct irq_desc *desc) -{ - u32 status; - - status = ath79_reset_rr(QCA955X_RESET_REG_EXT_INT_STATUS); - status &= QCA955X_EXT_INT_PCIE_RC2_ALL | - QCA955X_EXT_INT_USB1 | - QCA955X_EXT_INT_USB2; - - if (status == 0) { - spurious_interrupt(); - return; - } - - if (status & QCA955X_EXT_INT_USB1) { - /* TODO: flush DDR? */ - generic_handle_irq(ATH79_IP3_IRQ(0)); - } - - if (status & QCA955X_EXT_INT_USB2) { - /* TODO: flush DDR? */ - generic_handle_irq(ATH79_IP3_IRQ(1)); - } - - if (status & QCA955X_EXT_INT_PCIE_RC2_ALL) { - /* TODO: flush DDR? */ - generic_handle_irq(ATH79_IP3_IRQ(2)); - } -} - -static void qca955x_irq_init(void) -{ - int i; - - for (i = ATH79_IP2_IRQ_BASE; - i < ATH79_IP2_IRQ_BASE + ATH79_IP2_IRQ_COUNT; i++) - irq_set_chip_and_handler(i, &dummy_irq_chip, - handle_level_irq); - - irq_set_chained_handler(ATH79_CPU_IRQ(2), qca955x_ip2_irq_dispatch); - - for (i = ATH79_IP3_IRQ_BASE; - i < ATH79_IP3_IRQ_BASE + ATH79_IP3_IRQ_COUNT; i++) - irq_set_chip_and_handler(i, &dummy_irq_chip, - handle_level_irq); - - irq_set_chained_handler(ATH79_CPU_IRQ(3), qca955x_ip3_irq_dispatch); -} - -void __init arch_init_irq(void) -{ - unsigned irq_wb_chan2 = -1; - unsigned irq_wb_chan3 = -1; - bool misc_is_ar71xx; - - if (mips_machtype == ATH79_MACH_GENERIC_OF) { - irqchip_init(); - return; - } - - if (soc_is_ar71xx() || soc_is_ar724x() || - soc_is_ar913x() || soc_is_ar933x()) { - irq_wb_chan2 = 3; - irq_wb_chan3 = 2; - } else if (soc_is_ar934x()) { - irq_wb_chan3 = 2; - } - - ath79_cpu_irq_init(irq_wb_chan2, irq_wb_chan3); - - if (soc_is_ar71xx() || soc_is_ar913x()) - misc_is_ar71xx = true; - else if (soc_is_ar724x() || - soc_is_ar933x() || - soc_is_ar934x() || - soc_is_qca955x()) - misc_is_ar71xx = false; - else - BUG(); - ath79_misc_irq_init( - ath79_reset_base + AR71XX_RESET_REG_MISC_INT_STATUS, - ATH79_CPU_IRQ(6), ATH79_MISC_IRQ_BASE, misc_is_ar71xx); - - if (soc_is_ar934x()) - ar934x_ip2_irq_init(); - else if (soc_is_qca955x()) - qca955x_irq_init(); -} diff --git a/arch/mips/ath79/mach-ap121.c b/arch/mips/ath79/mach-ap121.c deleted file mode 100644 index 1bf73f2a069d..000000000000 --- a/arch/mips/ath79/mach-ap121.c +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Atheros AP121 board support - * - * Copyright (C) 2011 Gabor Juhos - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#include "machtypes.h" -#include "dev-gpio-buttons.h" -#include "dev-leds-gpio.h" -#include "dev-spi.h" -#include "dev-usb.h" -#include "dev-wmac.h" - -#define AP121_GPIO_LED_WLAN 0 -#define AP121_GPIO_LED_USB 1 - -#define AP121_GPIO_BTN_JUMPSTART 11 -#define AP121_GPIO_BTN_RESET 12 - -#define AP121_KEYS_POLL_INTERVAL 20 /* msecs */ -#define AP121_KEYS_DEBOUNCE_INTERVAL (3 * AP121_KEYS_POLL_INTERVAL) - -#define AP121_CAL_DATA_ADDR 0x1fff1000 - -static struct gpio_led ap121_leds_gpio[] __initdata = { - { - .name = "ap121:green:usb", - .gpio = AP121_GPIO_LED_USB, - .active_low = 0, - }, - { - .name = "ap121:green:wlan", - .gpio = AP121_GPIO_LED_WLAN, - .active_low = 0, - }, -}; - -static struct gpio_keys_button ap121_gpio_keys[] __initdata = { - { - .desc = "jumpstart button", - .type = EV_KEY, - .code = KEY_WPS_BUTTON, - .debounce_interval = AP121_KEYS_DEBOUNCE_INTERVAL, - .gpio = AP121_GPIO_BTN_JUMPSTART, - .active_low = 1, - }, - { - .desc = "reset button", - .type = EV_KEY, - .code = KEY_RESTART, - .debounce_interval = AP121_KEYS_DEBOUNCE_INTERVAL, - .gpio = AP121_GPIO_BTN_RESET, - .active_low = 1, - } -}; - -static struct spi_board_info ap121_spi_info[] = { - { - .bus_num = 0, - .chip_select = 0, - .max_speed_hz = 25000000, - .modalias = "mx25l1606e", - } -}; - -static struct ath79_spi_platform_data ap121_spi_data = { - .bus_num = 0, - .num_chipselect = 1, -}; - -static void __init ap121_setup(void) -{ - u8 *cal_data = (u8 *) KSEG1ADDR(AP121_CAL_DATA_ADDR); - - ath79_register_leds_gpio(-1, ARRAY_SIZE(ap121_leds_gpio), - ap121_leds_gpio); - ath79_register_gpio_keys_polled(-1, AP121_KEYS_POLL_INTERVAL, - ARRAY_SIZE(ap121_gpio_keys), - ap121_gpio_keys); - - ath79_register_spi(&ap121_spi_data, ap121_spi_info, - ARRAY_SIZE(ap121_spi_info)); - ath79_register_usb(); - ath79_register_wmac(cal_data); -} - -MIPS_MACHINE(ATH79_MACH_AP121, "AP121", "Atheros AP121 reference board", - ap121_setup); diff --git a/arch/mips/ath79/mach-ap136.c b/arch/mips/ath79/mach-ap136.c deleted file mode 100644 index 07eac58c3641..000000000000 --- a/arch/mips/ath79/mach-ap136.c +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Qualcomm Atheros AP136 reference board support - * - * Copyright (c) 2012 Qualcomm Atheros - * Copyright (c) 2012-2013 Gabor Juhos - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - * - */ - -#include -#include - -#include "machtypes.h" -#include "dev-gpio-buttons.h" -#include "dev-leds-gpio.h" -#include "dev-spi.h" -#include "dev-usb.h" -#include "dev-wmac.h" -#include "pci.h" - -#define AP136_GPIO_LED_STATUS_RED 14 -#define AP136_GPIO_LED_STATUS_GREEN 19 -#define AP136_GPIO_LED_USB 4 -#define AP136_GPIO_LED_WLAN_2G 13 -#define AP136_GPIO_LED_WLAN_5G 12 -#define AP136_GPIO_LED_WPS_RED 15 -#define AP136_GPIO_LED_WPS_GREEN 20 - -#define AP136_GPIO_BTN_WPS 16 -#define AP136_GPIO_BTN_RFKILL 21 - -#define AP136_KEYS_POLL_INTERVAL 20 /* msecs */ -#define AP136_KEYS_DEBOUNCE_INTERVAL (3 * AP136_KEYS_POLL_INTERVAL) - -#define AP136_WMAC_CALDATA_OFFSET 0x1000 -#define AP136_PCIE_CALDATA_OFFSET 0x5000 - -static struct gpio_led ap136_leds_gpio[] __initdata = { - { - .name = "qca:green:status", - .gpio = AP136_GPIO_LED_STATUS_GREEN, - .active_low = 1, - }, - { - .name = "qca:red:status", - .gpio = AP136_GPIO_LED_STATUS_RED, - .active_low = 1, - }, - { - .name = "qca:green:wps", - .gpio = AP136_GPIO_LED_WPS_GREEN, - .active_low = 1, - }, - { - .name = "qca:red:wps", - .gpio = AP136_GPIO_LED_WPS_RED, - .active_low = 1, - }, - { - .name = "qca:red:wlan-2g", - .gpio = AP136_GPIO_LED_WLAN_2G, - .active_low = 1, - }, - { - .name = "qca:red:usb", - .gpio = AP136_GPIO_LED_USB, - .active_low = 1, - } -}; - -static struct gpio_keys_button ap136_gpio_keys[] __initdata = { - { - .desc = "WPS button", - .type = EV_KEY, - .code = KEY_WPS_BUTTON, - .debounce_interval = AP136_KEYS_DEBOUNCE_INTERVAL, - .gpio = AP136_GPIO_BTN_WPS, - .active_low = 1, - }, - { - .desc = "RFKILL button", - .type = EV_KEY, - .code = KEY_RFKILL, - .debounce_interval = AP136_KEYS_DEBOUNCE_INTERVAL, - .gpio = AP136_GPIO_BTN_RFKILL, - .active_low = 1, - }, -}; - -static struct spi_board_info ap136_spi_info[] = { - { - .bus_num = 0, - .chip_select = 0, - .max_speed_hz = 25000000, - .modalias = "mx25l6405d", - } -}; - -static struct ath79_spi_platform_data ap136_spi_data = { - .bus_num = 0, - .num_chipselect = 1, -}; - -#ifdef CONFIG_PCI -static struct ath9k_platform_data ap136_ath9k_data; - -static int ap136_pci_plat_dev_init(struct pci_dev *dev) -{ - if (dev->bus->number == 1 && (PCI_SLOT(dev->devfn)) == 0) - dev->dev.platform_data = &ap136_ath9k_data; - - return 0; -} - -static void __init ap136_pci_init(u8 *eeprom) -{ - memcpy(ap136_ath9k_data.eeprom_data, eeprom, - sizeof(ap136_ath9k_data.eeprom_data)); - - ath79_pci_set_plat_dev_init(ap136_pci_plat_dev_init); - ath79_register_pci(); -} -#else -static inline void ap136_pci_init(u8 *eeprom) {} -#endif /* CONFIG_PCI */ - -static void __init ap136_setup(void) -{ - u8 *art = (u8 *) KSEG1ADDR(0x1fff0000); - - ath79_register_leds_gpio(-1, ARRAY_SIZE(ap136_leds_gpio), - ap136_leds_gpio); - ath79_register_gpio_keys_polled(-1, AP136_KEYS_POLL_INTERVAL, - ARRAY_SIZE(ap136_gpio_keys), - ap136_gpio_keys); - ath79_register_spi(&ap136_spi_data, ap136_spi_info, - ARRAY_SIZE(ap136_spi_info)); - ath79_register_usb(); - ath79_register_wmac(art + AP136_WMAC_CALDATA_OFFSET); - ap136_pci_init(art + AP136_PCIE_CALDATA_OFFSET); -} - -MIPS_MACHINE(ATH79_MACH_AP136_010, "AP136-010", - "Atheros AP136-010 reference board", - ap136_setup); diff --git a/arch/mips/ath79/mach-ap81.c b/arch/mips/ath79/mach-ap81.c deleted file mode 100644 index 1c78d497f930..000000000000 --- a/arch/mips/ath79/mach-ap81.c +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Atheros AP81 board support - * - * Copyright (C) 2009-2010 Gabor Juhos - * Copyright (C) 2009 Imre Kaloz - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#include "machtypes.h" -#include "dev-wmac.h" -#include "dev-gpio-buttons.h" -#include "dev-leds-gpio.h" -#include "dev-spi.h" -#include "dev-usb.h" - -#define AP81_GPIO_LED_STATUS 1 -#define AP81_GPIO_LED_AOSS 3 -#define AP81_GPIO_LED_WLAN 6 -#define AP81_GPIO_LED_POWER 14 - -#define AP81_GPIO_BTN_SW4 12 -#define AP81_GPIO_BTN_SW1 21 - -#define AP81_KEYS_POLL_INTERVAL 20 /* msecs */ -#define AP81_KEYS_DEBOUNCE_INTERVAL (3 * AP81_KEYS_POLL_INTERVAL) - -#define AP81_CAL_DATA_ADDR 0x1fff1000 - -static struct gpio_led ap81_leds_gpio[] __initdata = { - { - .name = "ap81:green:status", - .gpio = AP81_GPIO_LED_STATUS, - .active_low = 1, - }, { - .name = "ap81:amber:aoss", - .gpio = AP81_GPIO_LED_AOSS, - .active_low = 1, - }, { - .name = "ap81:green:wlan", - .gpio = AP81_GPIO_LED_WLAN, - .active_low = 1, - }, { - .name = "ap81:green:power", - .gpio = AP81_GPIO_LED_POWER, - .active_low = 1, - } -}; - -static struct gpio_keys_button ap81_gpio_keys[] __initdata = { - { - .desc = "sw1", - .type = EV_KEY, - .code = BTN_0, - .debounce_interval = AP81_KEYS_DEBOUNCE_INTERVAL, - .gpio = AP81_GPIO_BTN_SW1, - .active_low = 1, - } , { - .desc = "sw4", - .type = EV_KEY, - .code = BTN_1, - .debounce_interval = AP81_KEYS_DEBOUNCE_INTERVAL, - .gpio = AP81_GPIO_BTN_SW4, - .active_low = 1, - } -}; - -static struct spi_board_info ap81_spi_info[] = { - { - .bus_num = 0, - .chip_select = 0, - .max_speed_hz = 25000000, - .modalias = "m25p64", - } -}; - -static struct ath79_spi_platform_data ap81_spi_data = { - .bus_num = 0, - .num_chipselect = 1, -}; - -static void __init ap81_setup(void) -{ - u8 *cal_data = (u8 *) KSEG1ADDR(AP81_CAL_DATA_ADDR); - - ath79_register_leds_gpio(-1, ARRAY_SIZE(ap81_leds_gpio), - ap81_leds_gpio); - ath79_register_gpio_keys_polled(-1, AP81_KEYS_POLL_INTERVAL, - ARRAY_SIZE(ap81_gpio_keys), - ap81_gpio_keys); - ath79_register_spi(&ap81_spi_data, ap81_spi_info, - ARRAY_SIZE(ap81_spi_info)); - ath79_register_wmac(cal_data); - ath79_register_usb(); -} - -MIPS_MACHINE(ATH79_MACH_AP81, "AP81", "Atheros AP81 reference board", - ap81_setup); diff --git a/arch/mips/ath79/mach-db120.c b/arch/mips/ath79/mach-db120.c deleted file mode 100644 index 9423f5aed287..000000000000 --- a/arch/mips/ath79/mach-db120.c +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Atheros DB120 reference board support - * - * Copyright (c) 2011 Qualcomm Atheros - * Copyright (c) 2011 Gabor Juhos - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - * - */ - -#include -#include - -#include "machtypes.h" -#include "dev-gpio-buttons.h" -#include "dev-leds-gpio.h" -#include "dev-spi.h" -#include "dev-usb.h" -#include "dev-wmac.h" -#include "pci.h" - -#define DB120_GPIO_LED_WLAN_5G 12 -#define DB120_GPIO_LED_WLAN_2G 13 -#define DB120_GPIO_LED_STATUS 14 -#define DB120_GPIO_LED_WPS 15 - -#define DB120_GPIO_BTN_WPS 16 - -#define DB120_KEYS_POLL_INTERVAL 20 /* msecs */ -#define DB120_KEYS_DEBOUNCE_INTERVAL (3 * DB120_KEYS_POLL_INTERVAL) - -#define DB120_WMAC_CALDATA_OFFSET 0x1000 -#define DB120_PCIE_CALDATA_OFFSET 0x5000 - -static struct gpio_led db120_leds_gpio[] __initdata = { - { - .name = "db120:green:status", - .gpio = DB120_GPIO_LED_STATUS, - .active_low = 1, - }, - { - .name = "db120:green:wps", - .gpio = DB120_GPIO_LED_WPS, - .active_low = 1, - }, - { - .name = "db120:green:wlan-5g", - .gpio = DB120_GPIO_LED_WLAN_5G, - .active_low = 1, - }, - { - .name = "db120:green:wlan-2g", - .gpio = DB120_GPIO_LED_WLAN_2G, - .active_low = 1, - }, -}; - -static struct gpio_keys_button db120_gpio_keys[] __initdata = { - { - .desc = "WPS button", - .type = EV_KEY, - .code = KEY_WPS_BUTTON, - .debounce_interval = DB120_KEYS_DEBOUNCE_INTERVAL, - .gpio = DB120_GPIO_BTN_WPS, - .active_low = 1, - }, -}; - -static struct spi_board_info db120_spi_info[] = { - { - .bus_num = 0, - .chip_select = 0, - .max_speed_hz = 25000000, - .modalias = "s25sl064a", - } -}; - -static struct ath79_spi_platform_data db120_spi_data = { - .bus_num = 0, - .num_chipselect = 1, -}; - -#ifdef CONFIG_PCI -static struct ath9k_platform_data db120_ath9k_data; - -static int db120_pci_plat_dev_init(struct pci_dev *dev) -{ - switch (PCI_SLOT(dev->devfn)) { - case 0: - dev->dev.platform_data = &db120_ath9k_data; - break; - } - - return 0; -} - -static void __init db120_pci_init(u8 *eeprom) -{ - memcpy(db120_ath9k_data.eeprom_data, eeprom, - sizeof(db120_ath9k_data.eeprom_data)); - - ath79_pci_set_plat_dev_init(db120_pci_plat_dev_init); - ath79_register_pci(); -} -#else -static inline void db120_pci_init(u8 *eeprom) {} -#endif /* CONFIG_PCI */ - -static void __init db120_setup(void) -{ - u8 *art = (u8 *) KSEG1ADDR(0x1fff0000); - - ath79_register_leds_gpio(-1, ARRAY_SIZE(db120_leds_gpio), - db120_leds_gpio); - ath79_register_gpio_keys_polled(-1, DB120_KEYS_POLL_INTERVAL, - ARRAY_SIZE(db120_gpio_keys), - db120_gpio_keys); - ath79_register_spi(&db120_spi_data, db120_spi_info, - ARRAY_SIZE(db120_spi_info)); - ath79_register_usb(); - ath79_register_wmac(art + DB120_WMAC_CALDATA_OFFSET); - db120_pci_init(art + DB120_PCIE_CALDATA_OFFSET); -} - -MIPS_MACHINE(ATH79_MACH_DB120, "DB120", "Atheros DB120 reference board", - db120_setup); diff --git a/arch/mips/ath79/mach-pb44.c b/arch/mips/ath79/mach-pb44.c deleted file mode 100644 index 75fb96ca61db..000000000000 --- a/arch/mips/ath79/mach-pb44.c +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Atheros PB44 reference board support - * - * Copyright (C) 2009-2010 Gabor Juhos - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include - -#include "machtypes.h" -#include "dev-gpio-buttons.h" -#include "dev-leds-gpio.h" -#include "dev-spi.h" -#include "dev-usb.h" -#include "pci.h" - -#define PB44_GPIO_I2C_SCL 0 -#define PB44_GPIO_I2C_SDA 1 - -#define PB44_GPIO_EXP_BASE 16 -#define PB44_GPIO_SW_RESET (PB44_GPIO_EXP_BASE + 6) -#define PB44_GPIO_SW_JUMP (PB44_GPIO_EXP_BASE + 8) -#define PB44_GPIO_LED_JUMP1 (PB44_GPIO_EXP_BASE + 9) -#define PB44_GPIO_LED_JUMP2 (PB44_GPIO_EXP_BASE + 10) - -#define PB44_KEYS_POLL_INTERVAL 20 /* msecs */ -#define PB44_KEYS_DEBOUNCE_INTERVAL (3 * PB44_KEYS_POLL_INTERVAL) - -static struct gpiod_lookup_table pb44_i2c_gpiod_table = { - .dev_id = "i2c-gpio.0", - .table = { - GPIO_LOOKUP_IDX("ath79-gpio", PB44_GPIO_I2C_SDA, - NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), - GPIO_LOOKUP_IDX("ath79-gpio", PB44_GPIO_I2C_SCL, - NULL, 1, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN), - }, -}; - -static struct platform_device pb44_i2c_gpio_device = { - .name = "i2c-gpio", - .id = 0, - .dev = { - .platform_data = NULL, - } -}; - -static struct pcf857x_platform_data pb44_pcf857x_data = { - .gpio_base = PB44_GPIO_EXP_BASE, -}; - -static struct i2c_board_info pb44_i2c_board_info[] __initdata = { - { - I2C_BOARD_INFO("pcf8575", 0x20), - .platform_data = &pb44_pcf857x_data, - }, -}; - -static struct gpio_led pb44_leds_gpio[] __initdata = { - { - .name = "pb44:amber:jump1", - .gpio = PB44_GPIO_LED_JUMP1, - .active_low = 1, - }, { - .name = "pb44:green:jump2", - .gpio = PB44_GPIO_LED_JUMP2, - .active_low = 1, - }, -}; - -static struct gpio_keys_button pb44_gpio_keys[] __initdata = { - { - .desc = "soft_reset", - .type = EV_KEY, - .code = KEY_RESTART, - .debounce_interval = PB44_KEYS_DEBOUNCE_INTERVAL, - .gpio = PB44_GPIO_SW_RESET, - .active_low = 1, - } , { - .desc = "jumpstart", - .type = EV_KEY, - .code = KEY_WPS_BUTTON, - .debounce_interval = PB44_KEYS_DEBOUNCE_INTERVAL, - .gpio = PB44_GPIO_SW_JUMP, - .active_low = 1, - } -}; - -static struct spi_board_info pb44_spi_info[] = { - { - .bus_num = 0, - .chip_select = 0, - .max_speed_hz = 25000000, - .modalias = "m25p64", - }, -}; - -static struct ath79_spi_platform_data pb44_spi_data = { - .bus_num = 0, - .num_chipselect = 1, -}; - -static void __init pb44_init(void) -{ - gpiod_add_lookup_table(&pb44_i2c_gpiod_table); - i2c_register_board_info(0, pb44_i2c_board_info, - ARRAY_SIZE(pb44_i2c_board_info)); - platform_device_register(&pb44_i2c_gpio_device); - - ath79_register_leds_gpio(-1, ARRAY_SIZE(pb44_leds_gpio), - pb44_leds_gpio); - ath79_register_gpio_keys_polled(-1, PB44_KEYS_POLL_INTERVAL, - ARRAY_SIZE(pb44_gpio_keys), - pb44_gpio_keys); - ath79_register_spi(&pb44_spi_data, pb44_spi_info, - ARRAY_SIZE(pb44_spi_info)); - ath79_register_usb(); - ath79_register_pci(); -} - -MIPS_MACHINE(ATH79_MACH_PB44, "PB44", "Atheros PB44 reference board", - pb44_init); diff --git a/arch/mips/ath79/mach-ubnt-xm.c b/arch/mips/ath79/mach-ubnt-xm.c deleted file mode 100644 index 4a3c60694c75..000000000000 --- a/arch/mips/ath79/mach-ubnt-xm.c +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Ubiquiti Networks XM (rev 1.0) board support - * - * Copyright (C) 2011 René Bolldorf - * - * Derived from: mach-pb44.c - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#include -#include -#include - -#include - -#include "machtypes.h" -#include "dev-gpio-buttons.h" -#include "dev-leds-gpio.h" -#include "dev-spi.h" -#include "pci.h" - -#define UBNT_XM_GPIO_LED_L1 0 -#define UBNT_XM_GPIO_LED_L2 1 -#define UBNT_XM_GPIO_LED_L3 11 -#define UBNT_XM_GPIO_LED_L4 7 - -#define UBNT_XM_GPIO_BTN_RESET 12 - -#define UBNT_XM_KEYS_POLL_INTERVAL 20 -#define UBNT_XM_KEYS_DEBOUNCE_INTERVAL (3 * UBNT_XM_KEYS_POLL_INTERVAL) - -#define UBNT_XM_EEPROM_ADDR (u8 *) KSEG1ADDR(0x1fff1000) - -static struct gpio_led ubnt_xm_leds_gpio[] __initdata = { - { - .name = "ubnt-xm:red:link1", - .gpio = UBNT_XM_GPIO_LED_L1, - .active_low = 0, - }, { - .name = "ubnt-xm:orange:link2", - .gpio = UBNT_XM_GPIO_LED_L2, - .active_low = 0, - }, { - .name = "ubnt-xm:green:link3", - .gpio = UBNT_XM_GPIO_LED_L3, - .active_low = 0, - }, { - .name = "ubnt-xm:green:link4", - .gpio = UBNT_XM_GPIO_LED_L4, - .active_low = 0, - }, -}; - -static struct gpio_keys_button ubnt_xm_gpio_keys[] __initdata = { - { - .desc = "reset", - .type = EV_KEY, - .code = KEY_RESTART, - .debounce_interval = UBNT_XM_KEYS_DEBOUNCE_INTERVAL, - .gpio = UBNT_XM_GPIO_BTN_RESET, - .active_low = 1, - } -}; - -static struct spi_board_info ubnt_xm_spi_info[] = { - { - .bus_num = 0, - .chip_select = 0, - .max_speed_hz = 25000000, - .modalias = "mx25l6405d", - } -}; - -static struct ath79_spi_platform_data ubnt_xm_spi_data = { - .bus_num = 0, - .num_chipselect = 1, -}; - -#ifdef CONFIG_PCI -static struct ath9k_platform_data ubnt_xm_eeprom_data; - -static int ubnt_xm_pci_plat_dev_init(struct pci_dev *dev) -{ - switch (PCI_SLOT(dev->devfn)) { - case 0: - dev->dev.platform_data = &ubnt_xm_eeprom_data; - break; - } - - return 0; -} - -static void __init ubnt_xm_pci_init(void) -{ - memcpy(ubnt_xm_eeprom_data.eeprom_data, UBNT_XM_EEPROM_ADDR, - sizeof(ubnt_xm_eeprom_data.eeprom_data)); - - ath79_pci_set_plat_dev_init(ubnt_xm_pci_plat_dev_init); - ath79_register_pci(); -} -#else -static inline void ubnt_xm_pci_init(void) {} -#endif /* CONFIG_PCI */ - -static void __init ubnt_xm_init(void) -{ - ath79_register_leds_gpio(-1, ARRAY_SIZE(ubnt_xm_leds_gpio), - ubnt_xm_leds_gpio); - - ath79_register_gpio_keys_polled(-1, UBNT_XM_KEYS_POLL_INTERVAL, - ARRAY_SIZE(ubnt_xm_gpio_keys), - ubnt_xm_gpio_keys); - - ath79_register_spi(&ubnt_xm_spi_data, ubnt_xm_spi_info, - ARRAY_SIZE(ubnt_xm_spi_info)); - - ubnt_xm_pci_init(); -} - -MIPS_MACHINE(ATH79_MACH_UBNT_XM, - "UBNT-XM", - "Ubiquiti Networks XM (rev 1.0) board", - ubnt_xm_init); diff --git a/arch/mips/ath79/machtypes.h b/arch/mips/ath79/machtypes.h deleted file mode 100644 index a13db3d15c8f..000000000000 --- a/arch/mips/ath79/machtypes.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Atheros AR71XX/AR724X/AR913X machine type definitions - * - * Copyright (C) 2008-2010 Gabor Juhos - * Copyright (C) 2008 Imre Kaloz - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#ifndef _ATH79_MACHTYPE_H -#define _ATH79_MACHTYPE_H - -#include - -enum ath79_mach_type { - ATH79_MACH_GENERIC_OF = -1, /* Device tree board */ - ATH79_MACH_GENERIC = 0, - ATH79_MACH_AP121, /* Atheros AP121 reference board */ - ATH79_MACH_AP136_010, /* Atheros AP136-010 reference board */ - ATH79_MACH_AP81, /* Atheros AP81 reference board */ - ATH79_MACH_DB120, /* Atheros DB120 reference board */ - ATH79_MACH_PB44, /* Atheros PB44 reference board */ - ATH79_MACH_UBNT_XM, /* Ubiquiti Networks XM board rev 1.0 */ -}; - -#endif /* _ATH79_MACHTYPE_H */ diff --git a/arch/mips/ath79/pci.c b/arch/mips/ath79/pci.c deleted file mode 100644 index b816cb4a25ff..000000000000 --- a/arch/mips/ath79/pci.c +++ /dev/null @@ -1,273 +0,0 @@ -/* - * Atheros AR71XX/AR724X specific PCI setup code - * - * Copyright (C) 2011 René Bolldorf - * Copyright (C) 2008-2011 Gabor Juhos - * Copyright (C) 2008 Imre Kaloz - * - * Parts of this file are based on Atheros' 2.6.15 BSP - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include "pci.h" - -static int (*ath79_pci_plat_dev_init)(struct pci_dev *dev); -static const struct ath79_pci_irq *ath79_pci_irq_map; -static unsigned ath79_pci_nr_irqs; - -static const struct ath79_pci_irq ar71xx_pci_irq_map[] = { - { - .slot = 17, - .pin = 1, - .irq = ATH79_PCI_IRQ(0), - }, { - .slot = 18, - .pin = 1, - .irq = ATH79_PCI_IRQ(1), - }, { - .slot = 19, - .pin = 1, - .irq = ATH79_PCI_IRQ(2), - } -}; - -static const struct ath79_pci_irq ar724x_pci_irq_map[] = { - { - .slot = 0, - .pin = 1, - .irq = ATH79_PCI_IRQ(0), - } -}; - -static const struct ath79_pci_irq qca955x_pci_irq_map[] = { - { - .bus = 0, - .slot = 0, - .pin = 1, - .irq = ATH79_PCI_IRQ(0), - }, - { - .bus = 1, - .slot = 0, - .pin = 1, - .irq = ATH79_PCI_IRQ(1), - }, -}; - -int pcibios_map_irq(const struct pci_dev *dev, uint8_t slot, uint8_t pin) -{ - int irq = -1; - int i; - - if (ath79_pci_nr_irqs == 0 || - ath79_pci_irq_map == NULL) { - if (soc_is_ar71xx()) { - ath79_pci_irq_map = ar71xx_pci_irq_map; - ath79_pci_nr_irqs = ARRAY_SIZE(ar71xx_pci_irq_map); - } else if (soc_is_ar724x() || - soc_is_ar9342() || - soc_is_ar9344()) { - ath79_pci_irq_map = ar724x_pci_irq_map; - ath79_pci_nr_irqs = ARRAY_SIZE(ar724x_pci_irq_map); - } else if (soc_is_qca955x()) { - ath79_pci_irq_map = qca955x_pci_irq_map; - ath79_pci_nr_irqs = ARRAY_SIZE(qca955x_pci_irq_map); - } else { - pr_crit("pci %s: invalid irq map\n", - pci_name((struct pci_dev *) dev)); - return irq; - } - } - - for (i = 0; i < ath79_pci_nr_irqs; i++) { - const struct ath79_pci_irq *entry; - - entry = &ath79_pci_irq_map[i]; - if (entry->bus == dev->bus->number && - entry->slot == slot && - entry->pin == pin) { - irq = entry->irq; - break; - } - } - - if (irq < 0) - pr_crit("pci %s: no irq found for pin %u\n", - pci_name((struct pci_dev *) dev), pin); - else - pr_info("pci %s: using irq %d for pin %u\n", - pci_name((struct pci_dev *) dev), irq, pin); - - return irq; -} - -int pcibios_plat_dev_init(struct pci_dev *dev) -{ - if (ath79_pci_plat_dev_init) - return ath79_pci_plat_dev_init(dev); - - return 0; -} - -void __init ath79_pci_set_irq_map(unsigned nr_irqs, - const struct ath79_pci_irq *map) -{ - ath79_pci_nr_irqs = nr_irqs; - ath79_pci_irq_map = map; -} - -void __init ath79_pci_set_plat_dev_init(int (*func)(struct pci_dev *dev)) -{ - ath79_pci_plat_dev_init = func; -} - -static struct platform_device * -ath79_register_pci_ar71xx(void) -{ - struct platform_device *pdev; - struct resource res[4]; - - memset(res, 0, sizeof(res)); - - res[0].name = "cfg_base"; - res[0].flags = IORESOURCE_MEM; - res[0].start = AR71XX_PCI_CFG_BASE; - res[0].end = AR71XX_PCI_CFG_BASE + AR71XX_PCI_CFG_SIZE - 1; - - res[1].flags = IORESOURCE_IRQ; - res[1].start = ATH79_CPU_IRQ(2); - res[1].end = ATH79_CPU_IRQ(2); - - res[2].name = "io_base"; - res[2].flags = IORESOURCE_IO; - res[2].start = 0; - res[2].end = 0; - - res[3].name = "mem_base"; - res[3].flags = IORESOURCE_MEM; - res[3].start = AR71XX_PCI_MEM_BASE; - res[3].end = AR71XX_PCI_MEM_BASE + AR71XX_PCI_MEM_SIZE - 1; - - pdev = platform_device_register_simple("ar71xx-pci", -1, - res, ARRAY_SIZE(res)); - return pdev; -} - -static struct platform_device * -ath79_register_pci_ar724x(int id, - unsigned long cfg_base, - unsigned long ctrl_base, - unsigned long crp_base, - unsigned long mem_base, - unsigned long mem_size, - unsigned long io_base, - int irq) -{ - struct platform_device *pdev; - struct resource res[6]; - - memset(res, 0, sizeof(res)); - - res[0].name = "cfg_base"; - res[0].flags = IORESOURCE_MEM; - res[0].start = cfg_base; - res[0].end = cfg_base + AR724X_PCI_CFG_SIZE - 1; - - res[1].name = "ctrl_base"; - res[1].flags = IORESOURCE_MEM; - res[1].start = ctrl_base; - res[1].end = ctrl_base + AR724X_PCI_CTRL_SIZE - 1; - - res[2].flags = IORESOURCE_IRQ; - res[2].start = irq; - res[2].end = irq; - - res[3].name = "mem_base"; - res[3].flags = IORESOURCE_MEM; - res[3].start = mem_base; - res[3].end = mem_base + mem_size - 1; - - res[4].name = "io_base"; - res[4].flags = IORESOURCE_IO; - res[4].start = io_base; - res[4].end = io_base; - - res[5].name = "crp_base"; - res[5].flags = IORESOURCE_MEM; - res[5].start = crp_base; - res[5].end = crp_base + AR724X_PCI_CRP_SIZE - 1; - - pdev = platform_device_register_simple("ar724x-pci", id, - res, ARRAY_SIZE(res)); - return pdev; -} - -int __init ath79_register_pci(void) -{ - struct platform_device *pdev = NULL; - - if (soc_is_ar71xx()) { - pdev = ath79_register_pci_ar71xx(); - } else if (soc_is_ar724x()) { - pdev = ath79_register_pci_ar724x(-1, - AR724X_PCI_CFG_BASE, - AR724X_PCI_CTRL_BASE, - AR724X_PCI_CRP_BASE, - AR724X_PCI_MEM_BASE, - AR724X_PCI_MEM_SIZE, - 0, - ATH79_CPU_IRQ(2)); - } else if (soc_is_ar9342() || - soc_is_ar9344()) { - u32 bootstrap; - - bootstrap = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP); - if ((bootstrap & AR934X_BOOTSTRAP_PCIE_RC) == 0) - return -ENODEV; - - pdev = ath79_register_pci_ar724x(-1, - AR724X_PCI_CFG_BASE, - AR724X_PCI_CTRL_BASE, - AR724X_PCI_CRP_BASE, - AR724X_PCI_MEM_BASE, - AR724X_PCI_MEM_SIZE, - 0, - ATH79_IP2_IRQ(0)); - } else if (soc_is_qca9558()) { - pdev = ath79_register_pci_ar724x(0, - QCA955X_PCI_CFG_BASE0, - QCA955X_PCI_CTRL_BASE0, - QCA955X_PCI_CRP_BASE0, - QCA955X_PCI_MEM_BASE0, - QCA955X_PCI_MEM_SIZE, - 0, - ATH79_IP2_IRQ(0)); - - pdev = ath79_register_pci_ar724x(1, - QCA955X_PCI_CFG_BASE1, - QCA955X_PCI_CTRL_BASE1, - QCA955X_PCI_CRP_BASE1, - QCA955X_PCI_MEM_BASE1, - QCA955X_PCI_MEM_SIZE, - 1, - ATH79_IP3_IRQ(2)); - } else { - /* No PCI support */ - return -ENODEV; - } - - if (!pdev) - pr_err("unable to register PCI controller device\n"); - - return pdev ? 0 : -ENODEV; -} diff --git a/arch/mips/ath79/pci.h b/arch/mips/ath79/pci.h deleted file mode 100644 index 1d00a3803c37..000000000000 --- a/arch/mips/ath79/pci.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Atheros AR71XX/AR724X PCI support - * - * Copyright (C) 2011 René Bolldorf - * Copyright (C) 2008-2011 Gabor Juhos - * Copyright (C) 2008 Imre Kaloz - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - */ - -#ifndef _ATH79_PCI_H -#define _ATH79_PCI_H - -struct ath79_pci_irq { - int bus; - u8 slot; - u8 pin; - int irq; -}; - -#ifdef CONFIG_PCI -void ath79_pci_set_irq_map(unsigned nr_irqs, const struct ath79_pci_irq *map); -void ath79_pci_set_plat_dev_init(int (*func)(struct pci_dev *dev)); -int ath79_register_pci(void); -#else -static inline void -ath79_pci_set_irq_map(unsigned nr_irqs, const struct ath79_pci_irq *map) {} -static inline void -ath79_pci_set_plat_dev_init(int (*func)(struct pci_dev *)) {} -static inline int ath79_register_pci(void) { return 0; } -#endif - -#endif /* _ATH79_PCI_H */ diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c index 9728abcb18fa..4a70c5de8c92 100644 --- a/arch/mips/ath79/setup.c +++ b/arch/mips/ath79/setup.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -31,8 +32,6 @@ #include #include #include "common.h" -#include "dev-common.h" -#include "machtypes.h" #define ATH79_SYS_TYPE_LEN 64 @@ -235,25 +234,21 @@ void __init plat_mem_setup(void) else if (fw_passed_dtb) __dt_setup_arch((void *)KSEG0ADDR(fw_passed_dtb)); - if (mips_machtype != ATH79_MACH_GENERIC_OF) { - ath79_reset_base = ioremap_nocache(AR71XX_RESET_BASE, - AR71XX_RESET_SIZE); - ath79_pll_base = ioremap_nocache(AR71XX_PLL_BASE, - AR71XX_PLL_SIZE); - ath79_detect_sys_type(); - ath79_ddr_ctrl_init(); + ath79_reset_base = ioremap_nocache(AR71XX_RESET_BASE, + AR71XX_RESET_SIZE); + ath79_pll_base = ioremap_nocache(AR71XX_PLL_BASE, + AR71XX_PLL_SIZE); + ath79_detect_sys_type(); + ath79_ddr_ctrl_init(); - detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX); - - /* OF machines should use the reset driver */ - _machine_restart = ath79_restart; - } + detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX); + _machine_restart = ath79_restart; _machine_halt = ath79_halt; pm_power_off = ath79_halt; } -static void __init ath79_of_plat_time_init(void) +void __init plat_time_init(void) { struct device_node *np; struct clk *clk; @@ -283,61 +278,12 @@ static void __init ath79_of_plat_time_init(void) clk_put(clk); } -void __init plat_time_init(void) -{ - unsigned long cpu_clk_rate; - unsigned long ahb_clk_rate; - unsigned long ddr_clk_rate; - unsigned long ref_clk_rate; - - if (IS_ENABLED(CONFIG_OF) && mips_machtype == ATH79_MACH_GENERIC_OF) { - ath79_of_plat_time_init(); - return; - } - - ath79_clocks_init(); - - cpu_clk_rate = ath79_get_sys_clk_rate("cpu"); - ahb_clk_rate = ath79_get_sys_clk_rate("ahb"); - ddr_clk_rate = ath79_get_sys_clk_rate("ddr"); - ref_clk_rate = ath79_get_sys_clk_rate("ref"); - - pr_info("Clocks: CPU:%lu.%03luMHz, DDR:%lu.%03luMHz, AHB:%lu.%03luMHz, Ref:%lu.%03luMHz\n", - cpu_clk_rate / 1000000, (cpu_clk_rate / 1000) % 1000, - ddr_clk_rate / 1000000, (ddr_clk_rate / 1000) % 1000, - ahb_clk_rate / 1000000, (ahb_clk_rate / 1000) % 1000, - ref_clk_rate / 1000000, (ref_clk_rate / 1000) % 1000); - - mips_hpt_frequency = cpu_clk_rate / 2; -} - -static int __init ath79_setup(void) +void __init arch_init_irq(void) { - if (mips_machtype == ATH79_MACH_GENERIC_OF) - return 0; - - ath79_gpio_init(); - ath79_register_uart(); - ath79_register_wdt(); - - mips_machine_setup(); - - return 0; + irqchip_init(); } -arch_initcall(ath79_setup); - void __init device_tree_init(void) { unflatten_and_copy_device_tree(); } - -MIPS_MACHINE(ATH79_MACH_GENERIC, - "Generic", - "Generic AR71XX/AR724X/AR913X based board", - NULL); - -MIPS_MACHINE(ATH79_MACH_GENERIC_OF, - "DTB", - "Generic AR71XX/AR724X/AR913X based board (DT)", - NULL); diff --git a/arch/mips/bcm47xx/buttons.c b/arch/mips/bcm47xx/buttons.c index 977990a609ba..67b6a78d670b 100644 --- a/arch/mips/bcm47xx/buttons.c +++ b/arch/mips/bcm47xx/buttons.c @@ -147,7 +147,7 @@ bcm47xx_buttons_buffalo_whr_g125[] __initconst = { static const struct gpio_keys_button bcm47xx_buttons_buffalo_whr_g54s[] __initconst = { BCM47XX_GPIO_KEY(0, KEY_WPS_BUTTON), - BCM47XX_GPIO_KEY(4, KEY_RESTART), + BCM47XX_GPIO_KEY_H(4, KEY_RESTART), BCM47XX_GPIO_KEY(5, BTN_0), /* Router / AP mode swtich */ }; diff --git a/arch/mips/bcm47xx/leds.c b/arch/mips/bcm47xx/leds.c index d85fcdac8bf0..167c42c71e79 100644 --- a/arch/mips/bcm47xx/leds.c +++ b/arch/mips/bcm47xx/leds.c @@ -152,11 +152,11 @@ bcm47xx_leds_buffalo_whr_g125[] __initconst = { static const struct gpio_led bcm47xx_leds_buffalo_whr_g54s[] __initconst = { - BCM47XX_GPIO_LED(1, "unk", "bridge", 1, LEDS_GPIO_DEFSTATE_OFF), - BCM47XX_GPIO_LED(2, "unk", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF), - BCM47XX_GPIO_LED(3, "unk", "internal", 1, LEDS_GPIO_DEFSTATE_OFF), - BCM47XX_GPIO_LED(6, "unk", "wps", 1, LEDS_GPIO_DEFSTATE_OFF), - BCM47XX_GPIO_LED(7, "unk", "diag", 1, LEDS_GPIO_DEFSTATE_OFF), + BCM47XX_GPIO_LED(1, "green", "bridge", 1, LEDS_GPIO_DEFSTATE_OFF), + BCM47XX_GPIO_LED(2, "green", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF), + BCM47XX_GPIO_LED(3, "green", "internal", 1, LEDS_GPIO_DEFSTATE_OFF), + BCM47XX_GPIO_LED(6, "amber", "wps", 1, LEDS_GPIO_DEFSTATE_OFF), + BCM47XX_GPIO_LED(7, "red", "diag", 1, LEDS_GPIO_DEFSTATE_OFF), }; static const struct gpio_led diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c index fe3773539eff..82627c264964 100644 --- a/arch/mips/bcm47xx/setup.c +++ b/arch/mips/bcm47xx/setup.c @@ -274,7 +274,7 @@ static int __init bcm47xx_register_bus_complete(void) bcm47xx_leds_register(); bcm47xx_workarounds(); - fixed_phy_add(PHY_POLL, 0, &bcm47xx_fixed_phy_status, -1); + fixed_phy_add(PHY_POLL, 0, &bcm47xx_fixed_phy_status); return 0; } device_initcall(bcm47xx_register_bus_complete); diff --git a/arch/mips/bcm63xx/dev-enet.c b/arch/mips/bcm63xx/dev-enet.c index 07b4c65a88a4..8e73d65f3480 100644 --- a/arch/mips/bcm63xx/dev-enet.c +++ b/arch/mips/bcm63xx/dev-enet.c @@ -70,6 +70,8 @@ static struct platform_device bcm63xx_enet_shared_device = { static int shared_device_registered; +static u64 enet_dmamask = DMA_BIT_MASK(32); + static struct resource enet0_res[] = { { .start = -1, /* filled at runtime */ @@ -99,6 +101,8 @@ static struct platform_device bcm63xx_enet0_device = { .resource = enet0_res, .dev = { .platform_data = &enet0_pd, + .dma_mask = &enet_dmamask, + .coherent_dma_mask = DMA_BIT_MASK(32), }, }; @@ -131,6 +135,8 @@ static struct platform_device bcm63xx_enet1_device = { .resource = enet1_res, .dev = { .platform_data = &enet1_pd, + .dma_mask = &enet_dmamask, + .coherent_dma_mask = DMA_BIT_MASK(32), }, }; @@ -157,6 +163,8 @@ static struct platform_device bcm63xx_enetsw_device = { .resource = enetsw_res, .dev = { .platform_data = &enetsw_pd, + .dma_mask = &enet_dmamask, + .coherent_dma_mask = DMA_BIT_MASK(32), }, }; diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile index 35704c28a28b..3ce4dd578370 100644 --- a/arch/mips/boot/Makefile +++ b/arch/mips/boot/Makefile @@ -115,7 +115,7 @@ endif targets += vmlinux.its.S quiet_cmd_its_cat = CAT $@ - cmd_its_cat = cat $(filter-out $(PHONY), $^) >$@ + cmd_its_cat = cat $(real-prereqs) >$@ $(obj)/vmlinux.its.S: $(addprefix $(srctree)/arch/mips/$(PLATFORM)/,$(ITS_INPUTS)) FORCE $(call if_changed,its_cat) diff --git a/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts b/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts index 0fa3dd1819ff..dda0559cef50 100644 --- a/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts +++ b/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts @@ -180,14 +180,28 @@ ethernet@0 { phy-handle = <&phy2>; cavium,alt-phy-handle = <&phy100>; + rx-delay = <0>; + tx-delay = <0>; + fixed-link { + speed = <1000>; + full-duplex; + }; }; ethernet@1 { phy-handle = <&phy3>; cavium,alt-phy-handle = <&phy101>; + rx-delay = <0>; + tx-delay = <0>; + fixed-link { + speed = <1000>; + full-duplex; + }; }; ethernet@2 { phy-handle = <&phy4>; cavium,alt-phy-handle = <&phy102>; + rx-delay = <0>; + tx-delay = <0>; }; ethernet@3 { compatible = "cavium,octeon-3860-pip-port"; diff --git a/arch/mips/boot/dts/cavium-octeon/ubnt_e100.dts b/arch/mips/boot/dts/cavium-octeon/ubnt_e100.dts index 243e5dc444fb..962f37fbc7db 100644 --- a/arch/mips/boot/dts/cavium-octeon/ubnt_e100.dts +++ b/arch/mips/boot/dts/cavium-octeon/ubnt_e100.dts @@ -33,12 +33,18 @@ interface@0 { ethernet@0 { phy-handle = <&phy7>; + rx-delay = <0>; + tx-delay = <0x10>; }; ethernet@1 { phy-handle = <&phy6>; + rx-delay = <0>; + tx-delay = <0x10>; }; ethernet@2 { phy-handle = <&phy5>; + rx-delay = <0>; + tx-delay = <0x10>; }; }; }; diff --git a/arch/mips/boot/dts/ingenic/ci20.dts b/arch/mips/boot/dts/ingenic/ci20.dts index 50cff3cbcc6d..4f7b1fa31cf5 100644 --- a/arch/mips/boot/dts/ingenic/ci20.dts +++ b/arch/mips/boot/dts/ingenic/ci20.dts @@ -76,7 +76,7 @@ status = "okay"; pinctrl-names = "default"; - pinctrl-0 = <&pins_uart2>; + pinctrl-0 = <&pins_uart3>; }; &uart4 { @@ -196,9 +196,9 @@ bias-disable; }; - pins_uart2: uart2 { - function = "uart2"; - groups = "uart2-data", "uart2-hwflow"; + pins_uart3: uart3 { + function = "uart3"; + groups = "uart3-data", "uart3-hwflow"; bias-disable; }; diff --git a/arch/mips/boot/dts/ingenic/jz4740.dtsi b/arch/mips/boot/dts/ingenic/jz4740.dtsi index 6fb16fd24035..2beb78a62b7d 100644 --- a/arch/mips/boot/dts/ingenic/jz4740.dtsi +++ b/arch/mips/boot/dts/ingenic/jz4740.dtsi @@ -161,7 +161,7 @@ #dma-cells = <2>; interrupt-parent = <&intc>; - interrupts = <29>; + interrupts = <20>; clocks = <&cgu JZ4740_CLK_DMA>; diff --git a/arch/mips/boot/dts/xilfpga/nexys4ddr.dts b/arch/mips/boot/dts/xilfpga/nexys4ddr.dts index 2152b7ba65fb..cc8dbea0911f 100644 --- a/arch/mips/boot/dts/xilfpga/nexys4ddr.dts +++ b/arch/mips/boot/dts/xilfpga/nexys4ddr.dts @@ -90,11 +90,11 @@ interrupts = <0>; }; - axi_i2c: i2c@10A00000 { + axi_i2c: i2c@10a00000 { compatible = "xlnx,xps-iic-2.00.a"; interrupt-parent = <&axi_intc>; interrupts = <4>; - reg = < 0x10A00000 0x10000 >; + reg = < 0x10a00000 0x10000 >; clocks = <&ext>; xlnx,clk-freq = <0x5f5e100>; xlnx,family = "Artix7"; @@ -106,9 +106,9 @@ #address-cells = <1>; #size-cells = <0>; - ad7420@4B { + ad7420@4b { compatible = "adi,adt7420"; - reg = <0x4B>; + reg = <0x4b>; }; } ; }; diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c index e8eb60ed99f2..11d5a4e90736 100644 --- a/arch/mips/cavium-octeon/dma-octeon.c +++ b/arch/mips/cavium-octeon/dma-octeon.c @@ -245,6 +245,9 @@ void __init plat_swiotlb_setup(void) swiotlbsize = swiotlb_nslabs << IO_TLB_SHIFT; octeon_swiotlb = memblock_alloc_low(swiotlbsize, PAGE_SIZE); + if (!octeon_swiotlb) + panic("%s: Failed to allocate %zu bytes align=%lx\n", + __func__, swiotlbsize, PAGE_SIZE); if (swiotlb_init_with_tbl(octeon_swiotlb, swiotlb_nslabs, 1) == -ENOMEM) panic("Cannot allocate SWIOTLB buffer"); diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c index ab8362e04461..2e2d45bc850d 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c +++ b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c @@ -31,6 +31,7 @@ * network ports from the rest of the cvmx-helper files. */ +#include #include #include @@ -210,56 +211,18 @@ cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port) { cvmx_helper_link_info_t result; + WARN(!octeon_is_simulation(), + "Using deprecated link status - please update your DT"); + /* Unless we fix it later, all links are defaulted to down */ result.u64 = 0; - /* - * This switch statement should handle all ports that either don't use - * Marvell PHYS, or don't support in-band status. - */ - switch (cvmx_sysinfo_get()->board_type) { - case CVMX_BOARD_TYPE_SIM: + if (octeon_is_simulation()) { /* The simulator gives you a simulated 1Gbps full duplex link */ result.s.link_up = 1; result.s.full_duplex = 1; result.s.speed = 1000; return result; - case CVMX_BOARD_TYPE_EBH3100: - case CVMX_BOARD_TYPE_CN3010_EVB_HS5: - case CVMX_BOARD_TYPE_CN3005_EVB_HS5: - case CVMX_BOARD_TYPE_CN3020_EVB_HS5: - /* Port 1 on these boards is always Gigabit */ - if (ipd_port == 1) { - result.s.link_up = 1; - result.s.full_duplex = 1; - result.s.speed = 1000; - return result; - } - /* Fall through to the generic code below */ - break; - case CVMX_BOARD_TYPE_CUST_NB5: - /* Port 1 on these boards is always Gigabit */ - if (ipd_port == 1) { - result.s.link_up = 1; - result.s.full_duplex = 1; - result.s.speed = 1000; - return result; - } - break; - case CVMX_BOARD_TYPE_BBGW_REF: - /* Port 1 on these boards is always Gigabit */ - if (ipd_port == 2) { - /* Port 2 is not hooked up */ - result.u64 = 0; - return result; - } else { - /* Ports 0 and 1 connect to the switch */ - result.s.link_up = 1; - result.s.full_duplex = 1; - result.s.speed = 1000; - return result; - } - break; } if (OCTEON_IS_MODEL(OCTEON_CN3XXX) @@ -357,45 +320,6 @@ int __cvmx_helper_board_interface_probe(int interface, int supported_ports) return supported_ports; } -/** - * Enable packet input/output from the hardware. This function is - * called after by cvmx_helper_packet_hardware_enable() to - * perform board specific initialization. For most boards - * nothing is needed. - * - * @interface: Interface to enable - * - * Returns Zero on success, negative on failure - */ -int __cvmx_helper_board_hardware_enable(int interface) -{ - if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CN3005_EVB_HS5) { - if (interface == 0) { - /* Different config for switch port */ - cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(1, interface), 0); - cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(1, interface), 0); - /* - * Boards with gigabit WAN ports need a - * different setting that is compatible with - * 100 Mbit settings - */ - cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(0, interface), - 0xc); - cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(0, interface), - 0xc); - } - } else if (cvmx_sysinfo_get()->board_type == - CVMX_BOARD_TYPE_UBNT_E100) { - cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(0, interface), 0); - cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(0, interface), 0x10); - cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(1, interface), 0); - cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(1, interface), 0x10); - cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(2, interface), 0); - cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(2, interface), 0x10); - } - return 0; -} - /** * Get the clock type used for the USB block based on board type. * Used by the USB code for auto configuration of clock type. diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c index 38e0444e57e8..de391541d6f7 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-helper.c +++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c @@ -30,6 +30,7 @@ * Helper functions for common, but complicated tasks. * */ +#include #include #include @@ -43,7 +44,6 @@ #include #include -#include #include /* Port count per interface */ @@ -317,22 +317,6 @@ cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int interface) return CVMX_HELPER_INTERFACE_MODE_DISABLED; } - if (interface == 0 - && cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CN3005_EVB_HS5 - && cvmx_sysinfo_get()->board_rev_major == 1) { - /* - * Lie about interface type of CN3005 board. This - * board has a switch on port 1 like the other - * evaluation boards, but it is connected over RGMII - * instead of GMII. Report GMII mode so that the - * speed is forced to 1 Gbit full duplex. Other than - * some initial configuration (which does not use the - * output of this function) there is no difference in - * setup between GMII and RGMII modes. - */ - return CVMX_HELPER_INTERFACE_MODE_GMII; - } - /* Interface 1 is always disabled on CN31XX and CN30XX */ if ((interface == 1) && (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN30XX) @@ -778,7 +762,6 @@ static int __cvmx_helper_packet_hardware_enable(int interface) result = __cvmx_helper_loop_enable(interface); break; } - result |= __cvmx_helper_board_hardware_enable(interface); return result; } @@ -1026,7 +1009,6 @@ int cvmx_helper_initialize_packet_io_global(void) int result = 0; int interface; union cvmx_l2c_cfg l2c_cfg; - union cvmx_smix_en smix_en; const int num_interfaces = cvmx_helper_get_number_of_interfaces(); /* @@ -1046,24 +1028,6 @@ int cvmx_helper_initialize_packet_io_global(void) l2c_cfg.s.rfb_arb_mode = 0; cvmx_write_csr(CVMX_L2C_CFG, l2c_cfg.u64); - /* Make sure SMI/MDIO is enabled so we can query PHYs */ - smix_en.u64 = cvmx_read_csr(CVMX_SMIX_EN(0)); - if (!smix_en.s.en) { - smix_en.s.en = 1; - cvmx_write_csr(CVMX_SMIX_EN(0), smix_en.u64); - } - - /* Newer chips actually have two SMI/MDIO interfaces */ - if (!OCTEON_IS_MODEL(OCTEON_CN3XXX) && - !OCTEON_IS_MODEL(OCTEON_CN58XX) && - !OCTEON_IS_MODEL(OCTEON_CN50XX)) { - smix_en.u64 = cvmx_read_csr(CVMX_SMIX_EN(1)); - if (!smix_en.s.en) { - smix_en.s.en = 1; - cvmx_write_csr(CVMX_SMIX_EN(1), smix_en.u64); - } - } - cvmx_pko_initialize_global(); for (interface = 0; interface < num_interfaces; interface++) { result |= cvmx_helper_interface_probe(interface); @@ -1136,6 +1100,7 @@ cvmx_helper_link_info_t cvmx_helper_link_get(int ipd_port) if (index == 0) result = __cvmx_helper_rgmii_link_get(ipd_port); else { + WARN(1, "Using deprecated link status - please update your DT"); result.s.full_duplex = 1; result.s.link_up = 1; result.s.speed = 1000; diff --git a/arch/mips/cavium-octeon/oct_ilm.c b/arch/mips/cavium-octeon/oct_ilm.c index 2d68a39f1443..13f6c7716b1e 100644 --- a/arch/mips/cavium-octeon/oct_ilm.c +++ b/arch/mips/cavium-octeon/oct_ilm.c @@ -63,31 +63,11 @@ static int reset_statistics(void *data, u64 value) DEFINE_SIMPLE_ATTRIBUTE(reset_statistics_ops, NULL, reset_statistics, "%llu\n"); -static int init_debufs(void) +static void init_debugfs(void) { - struct dentry *show_dentry; dir = debugfs_create_dir("oct_ilm", 0); - if (!dir) { - pr_err("oct_ilm: failed to create debugfs entry oct_ilm\n"); - return -1; - } - - show_dentry = debugfs_create_file("statistics", 0222, dir, NULL, - &oct_ilm_ops); - if (!show_dentry) { - pr_err("oct_ilm: failed to create debugfs entry oct_ilm/statistics\n"); - return -1; - } - - show_dentry = debugfs_create_file("reset", 0222, dir, NULL, - &reset_statistics_ops); - if (!show_dentry) { - pr_err("oct_ilm: failed to create debugfs entry oct_ilm/reset\n"); - return -1; - } - - return 0; - + debugfs_create_file("statistics", 0222, dir, NULL, &oct_ilm_ops); + debugfs_create_file("reset", 0222, dir, NULL, &reset_statistics_ops); } static void init_latency_info(struct latency_info *li, int startup) @@ -169,11 +149,7 @@ static __init int oct_ilm_module_init(void) int rc; int irq = OCTEON_IRQ_TIMER0 + TIMER_NUM; - rc = init_debufs(); - if (rc) { - WARN(1, "Could not create debugfs entries"); - return rc; - } + init_debugfs(); rc = request_irq(irq, cvm_oct_ciu_timer_interrupt, IRQF_NO_THREAD, "oct_ilm", 0); diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c index 1f9ba60f7375..51685f893eab 100644 --- a/arch/mips/cavium-octeon/octeon-platform.c +++ b/arch/mips/cavium-octeon/octeon-platform.c @@ -458,6 +458,23 @@ static bool __init octeon_has_88e1145(void) !OCTEON_IS_MODEL(OCTEON_CN56XX); } +static bool __init octeon_has_fixed_link(int ipd_port) +{ + switch (cvmx_sysinfo_get()->board_type) { + case CVMX_BOARD_TYPE_CN3005_EVB_HS5: + case CVMX_BOARD_TYPE_CN3010_EVB_HS5: + case CVMX_BOARD_TYPE_CN3020_EVB_HS5: + case CVMX_BOARD_TYPE_CUST_NB5: + case CVMX_BOARD_TYPE_EBH3100: + /* Port 1 on these boards is always gigabit. */ + return ipd_port == 1; + case CVMX_BOARD_TYPE_BBGW_REF: + /* Ports 0 and 1 connect to the switch. */ + return ipd_port == 0 || ipd_port == 1; + } + return false; +} + static void __init octeon_fdt_set_phy(int eth, int phy_addr) { const __be32 *phy_handle; @@ -586,12 +603,52 @@ static void __init octeon_fdt_rm_ethernet(int node) fdt_nop_node(initial_boot_params, node); } +static void __init _octeon_rx_tx_delay(int eth, int rx_delay, int tx_delay) +{ + fdt_setprop_inplace_cell(initial_boot_params, eth, "rx-delay", + rx_delay); + fdt_setprop_inplace_cell(initial_boot_params, eth, "tx-delay", + tx_delay); +} + +static void __init octeon_rx_tx_delay(int eth, int iface, int port) +{ + switch (cvmx_sysinfo_get()->board_type) { + case CVMX_BOARD_TYPE_CN3005_EVB_HS5: + if (iface == 0) { + if (port == 0) { + /* + * Boards with gigabit WAN ports need a + * different setting that is compatible with + * 100 Mbit settings + */ + _octeon_rx_tx_delay(eth, 0xc, 0x0c); + return; + } else if (port == 1) { + /* Different config for switch port. */ + _octeon_rx_tx_delay(eth, 0x0, 0x0); + return; + } + } + break; + case CVMX_BOARD_TYPE_UBNT_E100: + if (iface == 0 && port <= 2) { + _octeon_rx_tx_delay(eth, 0x0, 0x10); + return; + } + break; + } + fdt_nop_property(initial_boot_params, eth, "rx-delay"); + fdt_nop_property(initial_boot_params, eth, "tx-delay"); +} + static void __init octeon_fdt_pip_port(int iface, int i, int p, int max) { char name_buffer[20]; int eth; int phy_addr; int ipd_port; + int fixed_link; snprintf(name_buffer, sizeof(name_buffer), "ethernet@%x", p); eth = fdt_subnode_offset(initial_boot_params, iface, name_buffer); @@ -609,6 +666,13 @@ static void __init octeon_fdt_pip_port(int iface, int i, int p, int max) phy_addr = cvmx_helper_board_get_mii_address(ipd_port); octeon_fdt_set_phy(eth, phy_addr); + + fixed_link = fdt_subnode_offset(initial_boot_params, eth, "fixed-link"); + if (fixed_link < 0) + WARN_ON(octeon_has_fixed_link(ipd_port)); + else if (!octeon_has_fixed_link(ipd_port)) + fdt_nop_node(initial_boot_params, fixed_link); + octeon_rx_tx_delay(eth, i, p); } static void __init octeon_fdt_pip_iface(int pip, int idx) diff --git a/arch/mips/configs/generic_defconfig b/arch/mips/configs/generic_defconfig index 7c138dab87df..5d80521e5d5a 100644 --- a/arch/mips/configs/generic_defconfig +++ b/arch/mips/configs/generic_defconfig @@ -59,7 +59,7 @@ CONFIG_HID_MONTEREY=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y -CONFIG_EXT4_ENCRYPTION=y +CONFIG_FS_ENCRYPTION=y CONFIG_FANOTIFY=y CONFIG_FUSE_FS=y CONFIG_CUSE=y diff --git a/arch/mips/configs/xway_defconfig b/arch/mips/configs/xway_defconfig index c3cac29e8414..2bb02ea9fb4e 100644 --- a/arch/mips/configs/xway_defconfig +++ b/arch/mips/configs/xway_defconfig @@ -13,7 +13,6 @@ CONFIG_EMBEDDED=y # CONFIG_COMPAT_BRK is not set CONFIG_LANTIQ=y CONFIG_PCI_LANTIQ=y -CONFIG_XRX200_PHY_FW=y CONFIG_CPU_MIPS32_R2=y CONFIG_MIPS_VPE_LOADER=y CONFIG_NR_CPUS=2 diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index f15d5db5dd67..87b86cdf126a 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild @@ -3,7 +3,6 @@ generated-y += syscall_table_32_o32.h generated-y += syscall_table_64_n32.h generated-y += syscall_table_64_n64.h generated-y += syscall_table_64_o32.h -generic-(CONFIG_GENERIC_CSUM) += checksum.h generic-y += current.h generic-y += device.h generic-y += dma-contiguous.h diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 43fcd35e2957..94096299fc56 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -58,6 +58,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \ if (kernel_uses_llsc) { \ int temp; \ \ + loongson_llsc_mb(); \ __asm__ __volatile__( \ " .set push \n" \ " .set "MIPS_ISA_LEVEL" \n" \ @@ -85,6 +86,7 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \ if (kernel_uses_llsc) { \ int temp; \ \ + loongson_llsc_mb(); \ __asm__ __volatile__( \ " .set push \n" \ " .set "MIPS_ISA_LEVEL" \n" \ @@ -118,6 +120,7 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \ if (kernel_uses_llsc) { \ int temp; \ \ + loongson_llsc_mb(); \ __asm__ __volatile__( \ " .set push \n" \ " .set "MIPS_ISA_LEVEL" \n" \ @@ -256,6 +259,7 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \ if (kernel_uses_llsc) { \ long temp; \ \ + loongson_llsc_mb(); \ __asm__ __volatile__( \ " .set push \n" \ " .set "MIPS_ISA_LEVEL" \n" \ @@ -283,6 +287,7 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \ if (kernel_uses_llsc) { \ long temp; \ \ + loongson_llsc_mb(); \ __asm__ __volatile__( \ " .set push \n" \ " .set "MIPS_ISA_LEVEL" \n" \ @@ -316,6 +321,7 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \ if (kernel_uses_llsc) { \ long temp; \ \ + loongson_llsc_mb(); \ __asm__ __volatile__( \ " .set push \n" \ " .set "MIPS_ISA_LEVEL" \n" \ diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h index a5eb1bb199a7..b865e317a14f 100644 --- a/arch/mips/include/asm/barrier.h +++ b/arch/mips/include/asm/barrier.h @@ -105,6 +105,20 @@ */ #define STYPE_SYNC_MB 0x10 +/* + * stype 0x14 - A completion barrier specific to global invalidations + * + * When a sync instruction of this type completes any preceding GINVI or GINVT + * operation has been globalized & completed on all coherent CPUs. Anything + * that the GINV* instruction should invalidate will have been invalidated on + * all coherent CPUs when this instruction completes. It is implementation + * specific whether the GINV* instructions themselves will ensure completion, + * or this sync type will. + * + * In systems implementing global invalidates (ie. with Config5.GI == 2 or 3) + * this sync type also requires that previous SYNCI operations have completed. + */ +#define STYPE_GINV 0x14 #ifdef CONFIG_CPU_HAS_SYNC #define __sync() \ @@ -222,6 +236,47 @@ #define __smp_mb__before_atomic() __smp_mb__before_llsc() #define __smp_mb__after_atomic() smp_llsc_mb() +/* + * Some Loongson 3 CPUs have a bug wherein execution of a memory access (load, + * store or pref) in between an ll & sc can cause the sc instruction to + * erroneously succeed, breaking atomicity. Whilst it's unusual to write code + * containing such sequences, this bug bites harder than we might otherwise + * expect due to reordering & speculation: + * + * 1) A memory access appearing prior to the ll in program order may actually + * be executed after the ll - this is the reordering case. + * + * In order to avoid this we need to place a memory barrier (ie. a sync + * instruction) prior to every ll instruction, in between it & any earlier + * memory access instructions. Many of these cases are already covered by + * smp_mb__before_llsc() but for the remaining cases, typically ones in + * which multiple CPUs may operate on a memory location but ordering is not + * usually guaranteed, we use loongson_llsc_mb() below. + * + * This reordering case is fixed by 3A R2 CPUs, ie. 3A2000 models and later. + * + * 2) If a conditional branch exists between an ll & sc with a target outside + * of the ll-sc loop, for example an exit upon value mismatch in cmpxchg() + * or similar, then misprediction of the branch may allow speculative + * execution of memory accesses from outside of the ll-sc loop. + * + * In order to avoid this we need a memory barrier (ie. a sync instruction) + * at each affected branch target, for which we also use loongson_llsc_mb() + * defined below. + * + * This case affects all current Loongson 3 CPUs. + */ +#ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS /* Loongson-3's LLSC workaround */ +#define loongson_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory") +#else +#define loongson_llsc_mb() do { } while (0) +#endif + +static inline void sync_ginv(void) +{ + asm volatile("sync\t%0" :: "i"(STYPE_GINV)); +} + #include #endif /* __ASM_BARRIER_H */ diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h index c4675957b21b..830c93a010c3 100644 --- a/arch/mips/include/asm/bitops.h +++ b/arch/mips/include/asm/bitops.h @@ -69,6 +69,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m)); #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { + loongson_llsc_mb(); do { __asm__ __volatile__( " " __LL "%0, %1 # set_bit \n" @@ -79,6 +80,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) } while (unlikely(!temp)); #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ } else if (kernel_uses_llsc) { + loongson_llsc_mb(); do { __asm__ __volatile__( " .set push \n" @@ -123,6 +125,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) : "ir" (~(1UL << bit))); #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { + loongson_llsc_mb(); do { __asm__ __volatile__( " " __LL "%0, %1 # clear_bit \n" @@ -133,6 +136,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) } while (unlikely(!temp)); #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ } else if (kernel_uses_llsc) { + loongson_llsc_mb(); do { __asm__ __volatile__( " .set push \n" @@ -193,6 +197,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); unsigned long temp; + loongson_llsc_mb(); do { __asm__ __volatile__( " .set push \n" diff --git a/arch/mips/include/asm/cacheflush.h b/arch/mips/include/asm/cacheflush.h index 4812d1fed0c2..d687b40b9fbb 100644 --- a/arch/mips/include/asm/cacheflush.h +++ b/arch/mips/include/asm/cacheflush.h @@ -25,7 +25,6 @@ * * MIPS specific flush operations: * - * - flush_cache_sigtramp() flush signal trampoline * - flush_icache_all() flush the entire instruction cache * - flush_data_cache_page() flushes a page from the data cache * - __flush_icache_user_range(start, end) flushes range of user instructions @@ -110,7 +109,6 @@ extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len); -extern void (*flush_cache_sigtramp)(unsigned long addr); extern void (*flush_icache_all)(void); extern void (*local_flush_data_cache_page)(void * addr); extern void (*flush_data_cache_page)(unsigned long addr); diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h index 638de0c25249..f345a873742d 100644 --- a/arch/mips/include/asm/cmpxchg.h +++ b/arch/mips/include/asm/cmpxchg.h @@ -36,6 +36,8 @@ */ extern unsigned long __cmpxchg_called_with_bad_pointer(void) __compiletime_error("Bad argument size for cmpxchg"); +extern unsigned long __cmpxchg64_unsupported(void) + __compiletime_error("cmpxchg64 not available; cpu_has_64bits may be false"); extern unsigned long __xchg_called_with_bad_pointer(void) __compiletime_error("Bad argument size for xchg"); @@ -204,12 +206,102 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, cmpxchg((ptr), (o), (n)); \ }) #else -#include -#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) -#ifndef CONFIG_SMP -#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) -#endif -#endif + +# include +# define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) + +# ifdef CONFIG_SMP + +static inline unsigned long __cmpxchg64(volatile void *ptr, + unsigned long long old, + unsigned long long new) +{ + unsigned long long tmp, ret; + unsigned long flags; + + /* + * The assembly below has to combine 32 bit values into a 64 bit + * register, and split 64 bit values from one register into two. If we + * were to take an interrupt in the middle of this we'd only save the + * least significant 32 bits of each register & probably clobber the + * most significant 32 bits of the 64 bit values we're using. In order + * to avoid this we must disable interrupts. + */ + local_irq_save(flags); + + asm volatile( + " .set push \n" + " .set " MIPS_ISA_ARCH_LEVEL " \n" + /* Load 64 bits from ptr */ + "1: lld %L0, %3 # __cmpxchg64 \n" + /* + * Split the 64 bit value we loaded into the 2 registers that hold the + * ret variable. + */ + " dsra %M0, %L0, 32 \n" + " sll %L0, %L0, 0 \n" + /* + * Compare ret against old, breaking out of the loop if they don't + * match. + */ + " bne %M0, %M4, 2f \n" + " bne %L0, %L4, 2f \n" + /* + * Combine the 32 bit halves from the 2 registers that hold the new + * variable into a single 64 bit register. + */ +# if MIPS_ISA_REV >= 2 + " move %L1, %L5 \n" + " dins %L1, %M5, 32, 32 \n" +# else + " dsll %L1, %L5, 32 \n" + " dsrl %L1, %L1, 32 \n" + " .set noat \n" + " dsll $at, %M5, 32 \n" + " or %L1, %L1, $at \n" + " .set at \n" +# endif + /* Attempt to store new at ptr */ + " scd %L1, %2 \n" + /* If we failed, loop! */ + "\t" __scbeqz " %L1, 1b \n" + " .set pop \n" + "2: \n" + : "=&r"(ret), + "=&r"(tmp), + "=" GCC_OFF_SMALL_ASM() (*(unsigned long long *)ptr) + : GCC_OFF_SMALL_ASM() (*(unsigned long long *)ptr), + "r" (old), + "r" (new) + : "memory"); + + local_irq_restore(flags); + return ret; +} + +# define cmpxchg64(ptr, o, n) ({ \ + unsigned long long __old = (__typeof__(*(ptr)))(o); \ + unsigned long long __new = (__typeof__(*(ptr)))(n); \ + __typeof__(*(ptr)) __res; \ + \ + /* \ + * We can only use cmpxchg64 if we know that the CPU supports \ + * 64-bits, ie. lld & scd. Our call to __cmpxchg64_unsupported \ + * will cause a build error unless cpu_has_64bits is a \ + * compile-time constant 1. \ + */ \ + if (cpu_has_64bits && kernel_uses_llsc) \ + __res = __cmpxchg64((ptr), __old, __new); \ + else \ + __res = __cmpxchg64_unsupported(); \ + \ + __res; \ +}) + +# else /* !CONFIG_SMP */ +# define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) +# endif /* !CONFIG_SMP */ +#endif /* !CONFIG_64BIT */ #undef __scbeqz diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index 701e525641b8..6998a9796499 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -590,6 +590,19 @@ # define cpu_has_mipsmt_pertccounters 0 #endif /* CONFIG_MIPS_MT_SMP */ +/* + * We only enable MMID support for configurations which natively support 64 bit + * atomics because getting good performance from the allocator relies upon + * efficient atomic64_*() functions. + */ +#ifndef cpu_has_mmid +# ifdef CONFIG_GENERIC_ATOMIC64 +# define cpu_has_mmid 0 +# else +# define cpu_has_mmid __isa_ge_and_opt(6, MIPS_CPU_MMID) +# endif +#endif + /* * Guest capabilities */ diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h index 532b49b1dbb3..6ad7d3cabd91 100644 --- a/arch/mips/include/asm/cpu.h +++ b/arch/mips/include/asm/cpu.h @@ -422,6 +422,7 @@ enum cpu_type_enum { MBIT_ULL(55) /* CPU shares FTLB entries with another */ #define MIPS_CPU_MT_PER_TC_PERF_COUNTERS \ MBIT_ULL(56) /* CPU has perf counters implemented per TC (MIPSMT ASE) */ +#define MIPS_CPU_MMID MBIT_ULL(57) /* CPU supports MemoryMapIDs */ /* * CPU ASE encodings diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h index 20dfaad3a55d..34de7b17b41b 100644 --- a/arch/mips/include/asm/dma-mapping.h +++ b/arch/mips/include/asm/dma-mapping.h @@ -15,14 +15,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) #endif } -#define arch_setup_dma_ops arch_setup_dma_ops -static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, - u64 size, const struct iommu_ops *iommu, - bool coherent) -{ -#ifdef CONFIG_DMA_PERDEV_COHERENT - dev->dma_coherent = coherent; -#endif -} - #endif /* _ASM_DMA_MAPPING_H */ diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h index c14d798f3888..b83b0397462d 100644 --- a/arch/mips/include/asm/futex.h +++ b/arch/mips/include/asm/futex.h @@ -50,6 +50,7 @@ "i" (-EFAULT) \ : "memory"); \ } else if (cpu_has_llsc) { \ + loongson_llsc_mb(); \ __asm__ __volatile__( \ " .set push \n" \ " .set noat \n" \ @@ -163,6 +164,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, "i" (-EFAULT) : "memory"); } else if (cpu_has_llsc) { + loongson_llsc_mb(); __asm__ __volatile__( "# futex_atomic_cmpxchg_inatomic \n" " .set push \n" @@ -192,6 +194,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT) : "memory"); + loongson_llsc_mb(); } else return -ENOSYS; diff --git a/arch/mips/include/asm/ginvt.h b/arch/mips/include/asm/ginvt.h new file mode 100644 index 000000000000..49c6dbe37338 --- /dev/null +++ b/arch/mips/include/asm/ginvt.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __MIPS_ASM_GINVT_H__ +#define __MIPS_ASM_GINVT_H__ + +#include + +enum ginvt_type { + GINVT_FULL, + GINVT_VA, + GINVT_MMID, +}; + +#ifdef TOOLCHAIN_SUPPORTS_GINV +# define _ASM_SET_GINV ".set ginv\n" +#else +_ASM_MACRO_1R1I(ginvt, rs, type, + _ASM_INSN_IF_MIPS(0x7c0000bd | (__rs << 21) | (\\type << 8)) + _ASM_INSN32_IF_MM(0x0000717c | (__rs << 16) | (\\type << 9))); +# define _ASM_SET_GINV +#endif + +static inline void ginvt(unsigned long addr, enum ginvt_type type) +{ + asm volatile( + ".set push\n" + _ASM_SET_GINV + " ginvt %0, %1\n" + ".set pop" + : /* no outputs */ + : "r"(addr), "i"(type) + : "memory"); +} + +static inline void ginvt_full(void) +{ + ginvt(0, GINVT_FULL); +} + +static inline void ginvt_va(unsigned long addr) +{ + addr &= PAGE_MASK << 1; + ginvt(addr, GINVT_VA); +} + +static inline void ginvt_mmid(void) +{ + ginvt(0, GINVT_MMID); +} + +static inline void ginvt_va_mmid(unsigned long addr) +{ + addr &= PAGE_MASK << 1; + ginvt(addr, GINVT_VA | GINVT_MMID); +} + +#endif /* __MIPS_ASM_GINVT_H__ */ diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h index 9d3610be2323..f0b862a83816 100644 --- a/arch/mips/include/asm/irqflags.h +++ b/arch/mips/include/asm/irqflags.h @@ -41,7 +41,7 @@ static inline unsigned long arch_local_irq_save(void) " .set push \n" " .set reorder \n" " .set noat \n" -#if defined(CONFIG_CPU_LOONGSON3) +#if defined(CONFIG_CPU_LOONGSON3) || defined (CONFIG_CPU_LOONGSON1) " mfc0 %[flags], $12 \n" " di \n" #else diff --git a/arch/mips/include/asm/mach-ath79/ath79.h b/arch/mips/include/asm/mach-ath79/ath79.h index 73dcd63b8243..47e8827e9564 100644 --- a/arch/mips/include/asm/mach-ath79/ath79.h +++ b/arch/mips/include/asm/mach-ath79/ath79.h @@ -178,8 +178,4 @@ static inline u32 ath79_reset_rr(unsigned reg) void ath79_device_reset_set(u32 mask); void ath79_device_reset_clear(u32 mask); -void ath79_cpu_irq_init(unsigned irq_wb_chan2, unsigned irq_wb_chan3); -void ath79_misc_irq_init(void __iomem *regs, int irq, - int irq_base, bool is_ar71xx); - #endif /* __ASM_MACH_ATH79_H */ diff --git a/arch/mips/include/asm/mach-ip27/irq.h b/arch/mips/include/asm/mach-ip27/irq.h index b0b7261ff3ad..fd91c58aaf7d 100644 --- a/arch/mips/include/asm/mach-ip27/irq.h +++ b/arch/mips/include/asm/mach-ip27/irq.h @@ -10,13 +10,15 @@ #ifndef __ASM_MACH_IP27_IRQ_H #define __ASM_MACH_IP27_IRQ_H -/* - * A hardwired interrupt number is completely stupid for this system - a - * large configuration might have thousands if not tenthousands of - * interrupts. - */ #define NR_IRQS 256 #include_next +#define IP27_HUB_PEND0_IRQ (MIPS_CPU_IRQ_BASE + 2) +#define IP27_HUB_PEND1_IRQ (MIPS_CPU_IRQ_BASE + 3) +#define IP27_RT_TIMER_IRQ (MIPS_CPU_IRQ_BASE + 4) + +#define IP27_HUB_IRQ_BASE (MIPS_CPU_IRQ_BASE + 8) +#define IP27_HUB_IRQ_COUNT 128 + #endif /* __ASM_MACH_IP27_IRQ_H */ diff --git a/arch/mips/include/asm/mach-ip27/mmzone.h b/arch/mips/include/asm/mach-ip27/mmzone.h index 2ed3094dee07..1cd6a23a84f2 100644 --- a/arch/mips/include/asm/mach-ip27/mmzone.h +++ b/arch/mips/include/asm/mach-ip27/mmzone.h @@ -8,20 +8,11 @@ #define pa_to_nid(addr) NASID_TO_COMPACT_NODEID(NASID_GET(addr)) -#define LEVELS_PER_SLICE 128 - -struct slice_data { - unsigned long irq_enable_mask[2]; - int level_to_irq[LEVELS_PER_SLICE]; -}; - struct hub_data { kern_vars_t kern_vars; DECLARE_BITMAP(h_bigwin_used, HUB_NUM_BIG_WINDOW); cpumask_t h_cpus; unsigned long slice_map; - unsigned long irq_alloc_mask[2]; - struct slice_data slice[2]; }; struct node_data { diff --git a/arch/mips/include/asm/mach-loongson32/platform.h b/arch/mips/include/asm/mach-loongson32/platform.h index 8f8fa43ba095..15d1de2300fe 100644 --- a/arch/mips/include/asm/mach-loongson32/platform.h +++ b/arch/mips/include/asm/mach-loongson32/platform.h @@ -17,19 +17,15 @@ extern struct platform_device ls1x_uart_pdev; extern struct platform_device ls1x_cpufreq_pdev; -extern struct platform_device ls1x_dma_pdev; extern struct platform_device ls1x_eth0_pdev; extern struct platform_device ls1x_eth1_pdev; extern struct platform_device ls1x_ehci_pdev; extern struct platform_device ls1x_gpio0_pdev; extern struct platform_device ls1x_gpio1_pdev; -extern struct platform_device ls1x_nand_pdev; extern struct platform_device ls1x_rtc_pdev; extern struct platform_device ls1x_wdt_pdev; void __init ls1x_clk_init(void); -void __init ls1x_dma_set_platdata(struct plat_ls1x_dma *pdata); -void __init ls1x_nand_set_platdata(struct plat_ls1x_nand *pdata); void __init ls1x_rtc_set_extclk(struct platform_device *pdev); void __init ls1x_serial_set_uartclk(struct platform_device *pdev); diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 402b80af91aa..1e6966e8527e 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -667,6 +667,7 @@ #define MIPS_CONF5_FRE (_ULCAST_(1) << 8) #define MIPS_CONF5_UFE (_ULCAST_(1) << 9) #define MIPS_CONF5_CA2 (_ULCAST_(1) << 14) +#define MIPS_CONF5_MI (_ULCAST_(1) << 17) #define MIPS_CONF5_CRCP (_ULCAST_(1) << 18) #define MIPS_CONF5_MSAEN (_ULCAST_(1) << 27) #define MIPS_CONF5_EVA (_ULCAST_(1) << 28) @@ -1247,6 +1248,13 @@ __asm__(".macro parse_r var r\n\t" ENC \ ".endm") +/* Instructions with 1 register operand & 1 immediate operand */ +#define _ASM_MACRO_1R1I(OP, R1, I2, ENC) \ + __asm__(".macro " #OP " " #R1 ", " #I2 "\n\t" \ + "parse_r __" #R1 ", \\" #R1 "\n\t" \ + ENC \ + ".endm") + /* Instructions with 2 register operands */ #define _ASM_MACRO_2R(OP, R1, R2, ENC) \ __asm__(".macro " #OP " " #R1 ", " #R2 "\n\t" \ @@ -1603,6 +1611,9 @@ do { \ #define read_c0_xcontextconfig() __read_ulong_c0_register($4, 3) #define write_c0_xcontextconfig(val) __write_ulong_c0_register($4, 3, val) +#define read_c0_memorymapid() __read_32bit_c0_register($4, 5) +#define write_c0_memorymapid(val) __write_32bit_c0_register($4, 5, val) + #define read_c0_pagemask() __read_32bit_c0_register($5, 0) #define write_c0_pagemask(val) __write_32bit_c0_register($5, 0, val) diff --git a/arch/mips/include/asm/mmu.h b/arch/mips/include/asm/mmu.h index 88a108ce62c1..5df0238f639b 100644 --- a/arch/mips/include/asm/mmu.h +++ b/arch/mips/include/asm/mmu.h @@ -7,7 +7,11 @@ #include typedef struct { - u64 asid[NR_CPUS]; + union { + u64 asid[NR_CPUS]; + atomic64_t mmid; + }; + void *vdso; /* lock to be held whilst modifying fp_bd_emupage_allocmap */ diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index a589585be21b..cddead91acd4 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h @@ -17,8 +17,10 @@ #include #include +#include #include #include +#include #include #include #include @@ -72,6 +74,19 @@ extern unsigned long pgd_current[]; TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) #endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/ +/* + * The ginvt instruction will invalidate wired entries when its type field + * targets anything other than the entire TLB. That means that if we were to + * allow the kernel to create wired entries with the MMID of current->active_mm + * then those wired entries could be invalidated when we later use ginvt to + * invalidate TLB entries with that MMID. + * + * In order to prevent ginvt from trashing wired entries, we reserve one MMID + * for use by the kernel when creating wired entries. This MMID will never be + * assigned to a struct mm, and we'll never target it with a ginvt instruction. + */ +#define MMID_KERNEL_WIRED 0 + /* * All unused by hardware upper bits will be considered * as a software asid extension. @@ -88,7 +103,23 @@ static inline u64 asid_first_version(unsigned int cpu) return ~asid_version_mask(cpu) + 1; } -#define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) +static inline u64 cpu_context(unsigned int cpu, const struct mm_struct *mm) +{ + if (cpu_has_mmid) + return atomic64_read(&mm->context.mmid); + + return mm->context.asid[cpu]; +} + +static inline void set_cpu_context(unsigned int cpu, + struct mm_struct *mm, u64 ctx) +{ + if (cpu_has_mmid) + atomic64_set(&mm->context.mmid, ctx); + else + mm->context.asid[cpu] = ctx; +} + #define asid_cache(cpu) (cpu_data[cpu].asid_cache) #define cpu_asid(cpu, mm) \ (cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu])) @@ -97,21 +128,9 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { } - -/* Normal, classic MIPS get_new_mmu_context */ -static inline void -get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) -{ - u64 asid = asid_cache(cpu); - - if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) { - if (cpu_has_vtag_icache) - flush_icache_all(); - local_flush_tlb_all(); /* start new asid cycle */ - } - - cpu_context(cpu, mm) = asid_cache(cpu) = asid; -} +extern void get_new_mmu_context(struct mm_struct *mm); +extern void check_mmu_context(struct mm_struct *mm); +extern void check_switch_mmu_context(struct mm_struct *mm); /* * Initialize the context related info for a new mm_struct @@ -122,8 +141,12 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm) { int i; - for_each_possible_cpu(i) - cpu_context(i, mm) = 0; + if (cpu_has_mmid) { + set_cpu_context(0, mm, 0); + } else { + for_each_possible_cpu(i) + set_cpu_context(i, mm, 0); + } mm->context.bd_emupage_allocmap = NULL; spin_lock_init(&mm->context.bd_emupage_lock); @@ -140,11 +163,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, local_irq_save(flags); htw_stop(); - /* Check if our ASID is of an older version and thus invalid */ - if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & asid_version_mask(cpu)) - get_new_mmu_context(next, cpu); - write_c0_entryhi(cpu_asid(cpu, next)); - TLBMISS_HANDLER_SETUP_PGD(next->pgd); + check_switch_mmu_context(next); /* * Mark current->active_mm as not "active" anymore. @@ -166,55 +185,55 @@ static inline void destroy_context(struct mm_struct *mm) dsemul_mm_cleanup(mm); } +#define activate_mm(prev, next) switch_mm(prev, next, current) #define deactivate_mm(tsk, mm) do { } while (0) -/* - * After we have set current->mm to a new value, this activates - * the context for the new mm so we see the new mappings. - */ -static inline void -activate_mm(struct mm_struct *prev, struct mm_struct *next) -{ - unsigned long flags; - unsigned int cpu = smp_processor_id(); - - local_irq_save(flags); - - htw_stop(); - /* Unconditionally get a new ASID. */ - get_new_mmu_context(next, cpu); - - write_c0_entryhi(cpu_asid(cpu, next)); - TLBMISS_HANDLER_SETUP_PGD(next->pgd); - - /* mark mmu ownership change */ - cpumask_clear_cpu(cpu, mm_cpumask(prev)); - cpumask_set_cpu(cpu, mm_cpumask(next)); - htw_start(); - - local_irq_restore(flags); -} - -/* - * If mm is currently active_mm, we can't really drop it. Instead, - * we will get a new one for it. - */ static inline void -drop_mmu_context(struct mm_struct *mm, unsigned cpu) +drop_mmu_context(struct mm_struct *mm) { unsigned long flags; + unsigned int cpu; + u32 old_mmid; + u64 ctx; local_irq_save(flags); - htw_stop(); - if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { - get_new_mmu_context(mm, cpu); + cpu = smp_processor_id(); + ctx = cpu_context(cpu, mm); + + if (!ctx) { + /* no-op */ + } else if (cpu_has_mmid) { + /* + * Globally invalidating TLB entries associated with the MMID + * is pretty cheap using the GINVT instruction, so we'll do + * that rather than incur the overhead of allocating a new + * MMID. The latter would be especially difficult since MMIDs + * are global & other CPUs may be actively using ctx. + */ + htw_stop(); + old_mmid = read_c0_memorymapid(); + write_c0_memorymapid(ctx & cpu_asid_mask(&cpu_data[cpu])); + mtc0_tlbw_hazard(); + ginvt_mmid(); + sync_ginv(); + write_c0_memorymapid(old_mmid); + instruction_hazard(); + htw_start(); + } else if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { + /* + * mm is currently active, so we can't really drop it. + * Instead we bump the ASID. + */ + htw_stop(); + get_new_mmu_context(mm); write_c0_entryhi(cpu_asid(cpu, mm)); + htw_start(); } else { /* will get a new context next time */ - cpu_context(cpu, mm) = 0; + set_cpu_context(cpu, mm, 0); } - htw_start(); + local_irq_restore(flags); } diff --git a/arch/mips/include/asm/octeon/cvmx-helper-board.h b/arch/mips/include/asm/octeon/cvmx-helper-board.h index b4d19c21b62c..d7fdcf0a0088 100644 --- a/arch/mips/include/asm/octeon/cvmx-helper-board.h +++ b/arch/mips/include/asm/octeon/cvmx-helper-board.h @@ -119,18 +119,6 @@ extern cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port); extern int __cvmx_helper_board_interface_probe(int interface, int supported_ports); -/** - * Enable packet input/output from the hardware. This function is - * called after by cvmx_helper_packet_hardware_enable() to - * perform board specific initialization. For most boards - * nothing is needed. - * - * @interface: Interface to enable - * - * Returns Zero on success, negative on failure - */ -extern int __cvmx_helper_board_hardware_enable(int interface); - enum cvmx_helper_board_usb_clock_types __cvmx_helper_board_usb_get_clock_type(void); #endif /* __CVMX_HELPER_BOARD_H__ */ diff --git a/arch/mips/include/asm/octeon/cvmx-smix-defs.h b/arch/mips/include/asm/octeon/cvmx-smix-defs.h deleted file mode 100644 index 7a928230b0c0..000000000000 --- a/arch/mips/include/asm/octeon/cvmx-smix-defs.h +++ /dev/null @@ -1,276 +0,0 @@ -/***********************license start*************** - * Author: Cavium Networks - * - * Contact: support@caviumnetworks.com - * This file is part of the OCTEON SDK - * - * Copyright (c) 2003-2012 Cavium Networks - * - * This file is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, Version 2, as - * published by the Free Software Foundation. - * - * This file is distributed in the hope that it will be useful, but - * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or - * NONINFRINGEMENT. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License - * along with this file; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * or visit http://www.gnu.org/licenses/. - * - * This file may also be available under a different license from Cavium. - * Contact Cavium Networks for more information - ***********************license end**************************************/ - -#ifndef __CVMX_SMIX_DEFS_H__ -#define __CVMX_SMIX_DEFS_H__ - -static inline uint64_t CVMX_SMIX_CLK(unsigned long offset) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180000001818ull) + (offset) * 256; - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180000001818ull) + (offset) * 256; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180000003818ull) + (offset) * 128; - } - return CVMX_ADD_IO_SEG(0x0001180000001818ull) + (offset) * 256; -} - -static inline uint64_t CVMX_SMIX_CMD(unsigned long offset) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180000001800ull) + (offset) * 256; - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180000001800ull) + (offset) * 256; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180000003800ull) + (offset) * 128; - } - return CVMX_ADD_IO_SEG(0x0001180000001800ull) + (offset) * 256; -} - -static inline uint64_t CVMX_SMIX_EN(unsigned long offset) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180000001820ull) + (offset) * 256; - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180000001820ull) + (offset) * 256; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180000003820ull) + (offset) * 128; - } - return CVMX_ADD_IO_SEG(0x0001180000001820ull) + (offset) * 256; -} - -static inline uint64_t CVMX_SMIX_RD_DAT(unsigned long offset) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180000001810ull) + (offset) * 256; - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180000001810ull) + (offset) * 256; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180000003810ull) + (offset) * 128; - } - return CVMX_ADD_IO_SEG(0x0001180000001810ull) + (offset) * 256; -} - -static inline uint64_t CVMX_SMIX_WR_DAT(unsigned long offset) -{ - switch (cvmx_get_octeon_family()) { - case OCTEON_CN30XX & OCTEON_FAMILY_MASK: - case OCTEON_CN50XX & OCTEON_FAMILY_MASK: - case OCTEON_CN38XX & OCTEON_FAMILY_MASK: - case OCTEON_CN31XX & OCTEON_FAMILY_MASK: - case OCTEON_CN58XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180000001808ull) + (offset) * 256; - case OCTEON_CNF71XX & OCTEON_FAMILY_MASK: - case OCTEON_CN61XX & OCTEON_FAMILY_MASK: - case OCTEON_CN56XX & OCTEON_FAMILY_MASK: - case OCTEON_CN66XX & OCTEON_FAMILY_MASK: - case OCTEON_CN52XX & OCTEON_FAMILY_MASK: - case OCTEON_CN63XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180000001808ull) + (offset) * 256; - case OCTEON_CN68XX & OCTEON_FAMILY_MASK: - return CVMX_ADD_IO_SEG(0x0001180000003808ull) + (offset) * 128; - } - return CVMX_ADD_IO_SEG(0x0001180000001808ull) + (offset) * 256; -} - -union cvmx_smix_clk { - uint64_t u64; - struct cvmx_smix_clk_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_25_63:39; - uint64_t mode:1; - uint64_t reserved_21_23:3; - uint64_t sample_hi:5; - uint64_t sample_mode:1; - uint64_t reserved_14_14:1; - uint64_t clk_idle:1; - uint64_t preamble:1; - uint64_t sample:4; - uint64_t phase:8; -#else - uint64_t phase:8; - uint64_t sample:4; - uint64_t preamble:1; - uint64_t clk_idle:1; - uint64_t reserved_14_14:1; - uint64_t sample_mode:1; - uint64_t sample_hi:5; - uint64_t reserved_21_23:3; - uint64_t mode:1; - uint64_t reserved_25_63:39; -#endif - } s; - struct cvmx_smix_clk_cn30xx { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_21_63:43; - uint64_t sample_hi:5; - uint64_t sample_mode:1; - uint64_t reserved_14_14:1; - uint64_t clk_idle:1; - uint64_t preamble:1; - uint64_t sample:4; - uint64_t phase:8; -#else - uint64_t phase:8; - uint64_t sample:4; - uint64_t preamble:1; - uint64_t clk_idle:1; - uint64_t reserved_14_14:1; - uint64_t sample_mode:1; - uint64_t sample_hi:5; - uint64_t reserved_21_63:43; -#endif - } cn30xx; -}; - -union cvmx_smix_cmd { - uint64_t u64; - struct cvmx_smix_cmd_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_18_63:46; - uint64_t phy_op:2; - uint64_t reserved_13_15:3; - uint64_t phy_adr:5; - uint64_t reserved_5_7:3; - uint64_t reg_adr:5; -#else - uint64_t reg_adr:5; - uint64_t reserved_5_7:3; - uint64_t phy_adr:5; - uint64_t reserved_13_15:3; - uint64_t phy_op:2; - uint64_t reserved_18_63:46; -#endif - } s; - struct cvmx_smix_cmd_cn30xx { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_17_63:47; - uint64_t phy_op:1; - uint64_t reserved_13_15:3; - uint64_t phy_adr:5; - uint64_t reserved_5_7:3; - uint64_t reg_adr:5; -#else - uint64_t reg_adr:5; - uint64_t reserved_5_7:3; - uint64_t phy_adr:5; - uint64_t reserved_13_15:3; - uint64_t phy_op:1; - uint64_t reserved_17_63:47; -#endif - } cn30xx; -}; - -union cvmx_smix_en { - uint64_t u64; - struct cvmx_smix_en_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_1_63:63; - uint64_t en:1; -#else - uint64_t en:1; - uint64_t reserved_1_63:63; -#endif - } s; -}; - -union cvmx_smix_rd_dat { - uint64_t u64; - struct cvmx_smix_rd_dat_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_18_63:46; - uint64_t pending:1; - uint64_t val:1; - uint64_t dat:16; -#else - uint64_t dat:16; - uint64_t val:1; - uint64_t pending:1; - uint64_t reserved_18_63:46; -#endif - } s; -}; - -union cvmx_smix_wr_dat { - uint64_t u64; - struct cvmx_smix_wr_dat_s { -#ifdef __BIG_ENDIAN_BITFIELD - uint64_t reserved_18_63:46; - uint64_t pending:1; - uint64_t val:1; - uint64_t dat:16; -#else - uint64_t dat:16; - uint64_t val:1; - uint64_t pending:1; - uint64_t reserved_18_63:46; -#endif - } s; -}; - -#endif diff --git a/arch/mips/include/asm/pci/bridge.h b/arch/mips/include/asm/pci/bridge.h index 3206245d1ed6..23574c27eb40 100644 --- a/arch/mips/include/asm/pci/bridge.h +++ b/arch/mips/include/asm/pci/bridge.h @@ -45,18 +45,21 @@ #ifndef __ASSEMBLY__ -/* - * All accesses to bridge hardware registers must be done - * using 32-bit loads and stores. - */ -typedef u32 bridgereg_t; +#define ATE_V 0x01 +#define ATE_CO 0x02 +#define ATE_PREC 0x04 +#define ATE_PREF 0x08 +#define ATE_BAR 0x10 + +#define ATE_PFNSHIFT 12 +#define ATE_TIDSHIFT 8 +#define ATE_RMFSHIFT 48 -typedef u64 bridge_ate_t; +#define mkate(xaddr, xid, attr) (((xaddr) & 0x0000fffffffff000ULL) | \ + ((xid)<sysdata)) -extern void register_bridge_irq(unsigned int irq); -extern int request_bridge_irq(struct bridge_controller *bc); +#define bridge_read(bc, reg) __raw_readl(&bc->base->reg) +#define bridge_write(bc, reg, val) __raw_writel(val, &bc->base->reg) +#define bridge_set(bc, reg, val) \ + __raw_writel(__raw_readl(&bc->base->reg) | (val), &bc->base->reg) +#define bridge_clr(bc, reg, val) \ + __raw_writel(__raw_readl(&bc->base->reg) & ~(val), &bc->base->reg) + +extern int request_bridge_irq(struct bridge_controller *bc, int pin); extern struct pci_ops bridge_pci_ops; diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 57933fc8fd98..4ccb465ef3f2 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -17,6 +17,7 @@ #include #endif +#include #include #include @@ -204,49 +205,11 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) * Make sure the buddy is global too (if it's !none, * it better already be global) */ -#ifdef CONFIG_SMP - /* - * For SMP, multiple CPUs can race, so we need to do - * this atomically. - */ - unsigned long page_global = _PAGE_GLOBAL; - unsigned long tmp; - - if (kernel_uses_llsc && R10000_LLSC_WAR) { - __asm__ __volatile__ ( - " .set push \n" - " .set arch=r4000 \n" - " .set noreorder \n" - "1:" __LL "%[tmp], %[buddy] \n" - " bnez %[tmp], 2f \n" - " or %[tmp], %[tmp], %[global] \n" - __SC "%[tmp], %[buddy] \n" - " beqzl %[tmp], 1b \n" - " nop \n" - "2: \n" - " .set pop \n" - : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) - : [global] "r" (page_global)); - } else if (kernel_uses_llsc) { - __asm__ __volatile__ ( - " .set push \n" - " .set "MIPS_ISA_ARCH_LEVEL" \n" - " .set noreorder \n" - "1:" __LL "%[tmp], %[buddy] \n" - " bnez %[tmp], 2f \n" - " or %[tmp], %[tmp], %[global] \n" - __SC "%[tmp], %[buddy] \n" - " beqz %[tmp], 1b \n" - " nop \n" - "2: \n" - " .set pop \n" - : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) - : [global] "r" (page_global)); - } -#else /* !CONFIG_SMP */ - if (pte_none(*buddy)) - pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL; -#endif /* CONFIG_SMP */ +# if defined(CONFIG_PHYS_ADDR_T_64BIT) && !defined(CONFIG_CPU_MIPS32) + cmpxchg64(&buddy->pte, 0, _PAGE_GLOBAL); +# else + cmpxchg(&buddy->pte, 0, _PAGE_GLOBAL); +# endif } #endif } diff --git a/arch/mips/include/asm/smp-ops.h b/arch/mips/include/asm/smp-ops.h index b7123f9c0785..65618ff1280c 100644 --- a/arch/mips/include/asm/smp-ops.h +++ b/arch/mips/include/asm/smp-ops.h @@ -29,6 +29,7 @@ struct plat_smp_ops { int (*boot_secondary)(int cpu, struct task_struct *idle); void (*smp_setup)(void); void (*prepare_cpus)(unsigned int max_cpus); + void (*prepare_boot_cpu)(void); #ifdef CONFIG_HOTPLUG_CPU int (*cpu_disable)(void); void (*cpu_die)(unsigned int cpu); diff --git a/arch/mips/include/asm/sn/addrs.h b/arch/mips/include/asm/sn/addrs.h index 66814f8ba8e8..837d23e24976 100644 --- a/arch/mips/include/asm/sn/addrs.h +++ b/arch/mips/include/asm/sn/addrs.h @@ -27,16 +27,11 @@ #ifndef __ASSEMBLY__ -#define PS_UINT_CAST (unsigned long) #define UINT64_CAST (unsigned long) -#define HUBREG_CAST (volatile hubreg_t *) - #else /* __ASSEMBLY__ */ -#define PS_UINT_CAST #define UINT64_CAST -#define HUBREG_CAST #endif /* __ASSEMBLY__ */ @@ -256,41 +251,22 @@ * Otherwise, the recommended approach is to use *_HUB_L() and *_HUB_S(). * They're always safe. */ -#define LOCAL_HUB_ADDR(_x) (HUBREG_CAST (IALIAS_BASE + (_x))) -#define REMOTE_HUB_ADDR(_n, _x) (HUBREG_CAST (NODE_SWIN_BASE(_n, 1) + \ - 0x800000 + (_x))) -#ifdef CONFIG_SGI_IP27 -#define REMOTE_HUB_PI_ADDR(_n, _sn, _x) (HUBREG_CAST (NODE_SWIN_BASE(_n, 1) + \ - 0x800000 + (_x))) -#endif /* CONFIG_SGI_IP27 */ +#define LOCAL_HUB_ADDR(_x) (IALIAS_BASE + (_x)) +#define REMOTE_HUB_ADDR(_n, _x) ((NODE_SWIN_BASE(_n, 1) + 0x800000 + (_x))) #ifndef __ASSEMBLY__ -#define HUB_L(_a) *(_a) -#define HUB_S(_a, _d) *(_a) = (_d) +#define LOCAL_HUB_PTR(_x) ((u64 *)LOCAL_HUB_ADDR((_x))) +#define REMOTE_HUB_PTR(_n, _x) ((u64 *)REMOTE_HUB_ADDR((_n), (_x))) -#define LOCAL_HUB_L(_r) HUB_L(LOCAL_HUB_ADDR(_r)) -#define LOCAL_HUB_S(_r, _d) HUB_S(LOCAL_HUB_ADDR(_r), (_d)) -#define REMOTE_HUB_L(_n, _r) HUB_L(REMOTE_HUB_ADDR((_n), (_r))) -#define REMOTE_HUB_S(_n, _r, _d) HUB_S(REMOTE_HUB_ADDR((_n), (_r)), (_d)) -#define REMOTE_HUB_PI_L(_n, _sn, _r) HUB_L(REMOTE_HUB_PI_ADDR((_n), (_sn), (_r))) -#define REMOTE_HUB_PI_S(_n, _sn, _r, _d) HUB_S(REMOTE_HUB_PI_ADDR((_n), (_sn), (_r)), (_d)) +#define LOCAL_HUB_L(_r) __raw_readq(LOCAL_HUB_PTR(_r)) +#define LOCAL_HUB_S(_r, _d) __raw_writeq((_d), LOCAL_HUB_PTR(_r)) +#define REMOTE_HUB_L(_n, _r) __raw_readq(REMOTE_HUB_PTR((_n), (_r))) +#define REMOTE_HUB_S(_n, _r, _d) __raw_writeq((_d), \ + REMOTE_HUB_PTR((_n), (_r))) #endif /* !__ASSEMBLY__ */ -/* - * The following macros are used to get to a hub/bridge register, given - * the base of the register space. - */ -#define HUB_REG_PTR(_base, _off) \ - (HUBREG_CAST((__psunsigned_t)(_base) + (__psunsigned_t)(_off))) - -#define HUB_REG_PTR_L(_base, _off) \ - HUB_L(HUB_REG_PTR((_base), (_off))) - -#define HUB_REG_PTR_S(_base, _off, _data) \ - HUB_S(HUB_REG_PTR((_base), (_off)), (_data)) - /* * Software structure locations -- permanently fixed * See diagram in kldir.h @@ -387,44 +363,14 @@ #define SYMMON_STK_END(nasid) (SYMMON_STK_ADDR(nasid, 0) + KLD_SYMMON_STK(nasid)->size) -/* loading symmon 4k below UNIX. the arcs loader needs the topaddr for a - * relocatable program - */ -#define UNIX_DEBUG_LOADADDR 0x300000 -#define SYMMON_LOADADDR(nasid) \ - TO_NODE(nasid, PHYS_TO_K0(UNIX_DEBUG_LOADADDR - 0x1000)) - -#define FREEMEM_OFFSET(nasid) KLD_FREEMEM(nasid)->offset -#define FREEMEM_ADDR(nasid) SYMMON_STK_END(nasid) -/* - * XXX - * Fix this. FREEMEM_ADDR should be aware of if symmon is loaded. - * Also, it should take into account what prom thinks to be a safe - * address - PHYS_TO_K0(NODE_OFFSET(nasid) + FREEMEM_OFFSET(nasid)) - */ -#define FREEMEM_SIZE(nasid) KLD_FREEMEM(nasid)->size - -#define PI_ERROR_OFFSET(nasid) KLD_PI_ERROR(nasid)->offset -#define PI_ERROR_ADDR(nasid) \ - TO_NODE_UNCAC((nasid), PI_ERROR_OFFSET(nasid)) -#define PI_ERROR_SIZE(nasid) KLD_PI_ERROR(nasid)->size - #define NODE_OFFSET_TO_K0(_nasid, _off) \ PHYS_TO_K0((NODE_OFFSET(_nasid) + (_off)) | CAC_BASE) #define NODE_OFFSET_TO_K1(_nasid, _off) \ TO_UNCAC((NODE_OFFSET(_nasid) + (_off)) | UNCAC_BASE) -#define K0_TO_NODE_OFFSET(_k0addr) \ - ((__psunsigned_t)(_k0addr) & NODE_ADDRSPACE_MASK) #define KERN_VARS_ADDR(nasid) KLD_KERN_VARS(nasid)->pointer #define KERN_VARS_SIZE(nasid) KLD_KERN_VARS(nasid)->size -#define KERN_XP_ADDR(nasid) KLD_KERN_XP(nasid)->pointer -#define KERN_XP_SIZE(nasid) KLD_KERN_XP(nasid)->size - -#define GPDA_ADDR(nasid) TO_NODE_CAC(nasid, GPDA_OFFSET) - #endif /* !__ASSEMBLY__ */ diff --git a/arch/mips/include/asm/sn/arch.h b/arch/mips/include/asm/sn/arch.h index 471e6870d876..3f1fb1454749 100644 --- a/arch/mips/include/asm/sn/arch.h +++ b/arch/mips/include/asm/sn/arch.h @@ -17,8 +17,6 @@ #include #endif -typedef u64 hubreg_t; - #define cputonasid(cpu) (sn_cpu_info[(cpu)].p_nasid) #define cputoslice(cpu) (sn_cpu_info[(cpu)].p_slice) #define makespnum(_nasid, _slice) \ diff --git a/arch/mips/include/asm/sn/io.h b/arch/mips/include/asm/sn/io.h index d5174d04538c..211f1e83b523 100644 --- a/arch/mips/include/asm/sn/io.h +++ b/arch/mips/include/asm/sn/io.h @@ -44,7 +44,7 @@ IIO_ITTE_PUT((nasid), HUB_PIO_MAP_TO_MEM, \ (bigwin), IIO_ITTE_INVALID_WIDGET, 0) -#define IIO_ITTE_GET(nasid, bigwin) REMOTE_HUB_ADDR((nasid), IIO_ITTE(bigwin)) +#define IIO_ITTE_GET(nasid, bigwin) REMOTE_HUB_PTR((nasid), IIO_ITTE(bigwin)) /* * Macro which takes the widget number, and returns the diff --git a/arch/mips/include/asm/sn/sn0/addrs.h b/arch/mips/include/asm/sn/sn0/addrs.h index 6b53070f400f..f13df84edfdd 100644 --- a/arch/mips/include/asm/sn/sn0/addrs.h +++ b/arch/mips/include/asm/sn/sn0/addrs.h @@ -134,11 +134,6 @@ #define CALIAS_BASE CAC_BASE - - -#define BRIDGE_REG_PTR(_base, _off) ((volatile bridgereg_t *) \ - ((__psunsigned_t)(_base) + (__psunsigned_t)(_off))) - #define SN0_WIDGET_BASE(_nasid, _wid) (NODE_SWIN_BASE((_nasid), (_wid))) /* Turn on sable logging for the processors whose bits are set. */ diff --git a/arch/mips/include/asm/tlbflush.h b/arch/mips/include/asm/tlbflush.h index 40a361092491..9789e7a32def 100644 --- a/arch/mips/include/asm/tlbflush.h +++ b/arch/mips/include/asm/tlbflush.h @@ -14,7 +14,6 @@ * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages */ extern void local_flush_tlb_all(void); -extern void local_flush_tlb_mm(struct mm_struct *mm); extern void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void local_flush_tlb_kernel_range(unsigned long start, @@ -23,6 +22,8 @@ extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); extern void local_flush_tlb_one(unsigned long vaddr); +#include + #ifdef CONFIG_SMP extern void flush_tlb_all(void); @@ -36,7 +37,7 @@ extern void flush_tlb_one(unsigned long vaddr); #else /* CONFIG_SMP */ #define flush_tlb_all() local_flush_tlb_all() -#define flush_tlb_mm(mm) local_flush_tlb_mm(mm) +#define flush_tlb_mm(mm) drop_mmu_context(mm) #define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, end) #define flush_tlb_kernel_range(vmaddr,end) \ local_flush_tlb_kernel_range(vmaddr, end) diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index d43c1dc6ef15..62b298c50905 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -69,7 +69,6 @@ extern u64 __ua_limit; #define USER_DS ((mm_segment_t) { __UA_LIMIT }) #endif -#define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h index b23d74a601b3..071053ece677 100644 --- a/arch/mips/include/asm/unistd.h +++ b/arch/mips/include/asm/unistd.h @@ -45,29 +45,16 @@ #define __ARCH_WANT_SYS_SIGPROCMASK # ifdef CONFIG_32BIT # define __ARCH_WANT_STAT64 -# define __ARCH_WANT_SYS_TIME +# define __ARCH_WANT_SYS_TIME32 # endif # ifdef CONFIG_MIPS32_O32 -# define __ARCH_WANT_COMPAT_SYS_TIME +# define __ARCH_WANT_SYS_TIME32 # endif #define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_SYS_CLONE /* whitelists for checksyscalls */ -#define __IGNORE_select -#define __IGNORE_vfork -#define __IGNORE_time -#define __IGNORE_uselib #define __IGNORE_fadvise64_64 -#define __IGNORE_getdents64 -#if _MIPS_SIM == _MIPS_SIM_NABI32 -#define __IGNORE_truncate64 -#define __IGNORE_ftruncate64 -#define __IGNORE_stat64 -#define __IGNORE_lstat64 -#define __IGNORE_fstat64 -#define __IGNORE_fstatat64 -#endif #endif /* !__ASSEMBLY__ */ diff --git a/arch/mips/include/uapi/asm/mman.h b/arch/mips/include/uapi/asm/mman.h index 3035ca499cd8..c2b40969eb1f 100644 --- a/arch/mips/include/uapi/asm/mman.h +++ b/arch/mips/include/uapi/asm/mman.h @@ -27,9 +27,7 @@ /* * Flags for mmap */ -#define MAP_SHARED 0x001 /* Share changes */ -#define MAP_PRIVATE 0x002 /* Changes are private */ -#define MAP_SHARED_VALIDATE 0x003 /* share + validate extension flags */ +/* 0x01 - 0x03 are defined in linux/mman.h */ #define MAP_TYPE 0x00f /* Mask for type of mapping */ #define MAP_FIXED 0x010 /* Interpret addr exactly */ diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h index 71370fb3ceef..eb9f33f8a8b3 100644 --- a/arch/mips/include/uapi/asm/socket.h +++ b/arch/mips/include/uapi/asm/socket.h @@ -11,6 +11,7 @@ #define _UAPI_ASM_SOCKET_H #include +#include /* * For setsockopt(2) @@ -38,8 +39,8 @@ #define SO_RCVBUF 0x1002 /* Receive buffer. */ #define SO_SNDLOWAT 0x1003 /* send low-water mark */ #define SO_RCVLOWAT 0x1004 /* receive low-water mark */ -#define SO_SNDTIMEO 0x1005 /* send timeout */ -#define SO_RCVTIMEO 0x1006 /* receive timeout */ +#define SO_SNDTIMEO_OLD 0x1005 /* send timeout */ +#define SO_RCVTIMEO_OLD 0x1006 /* receive timeout */ #define SO_ACCEPTCONN 0x1009 #define SO_PROTOCOL 0x1028 /* protocol type */ #define SO_DOMAIN 0x1029 /* domain/socket family */ @@ -65,21 +66,14 @@ #define SO_GET_FILTER SO_ATTACH_FILTER #define SO_PEERNAME 28 -#define SO_TIMESTAMP 29 -#define SCM_TIMESTAMP SO_TIMESTAMP #define SO_PEERSEC 30 #define SO_SNDBUFFORCE 31 #define SO_RCVBUFFORCE 33 #define SO_PASSSEC 34 -#define SO_TIMESTAMPNS 35 -#define SCM_TIMESTAMPNS SO_TIMESTAMPNS #define SO_MARK 36 -#define SO_TIMESTAMPING 37 -#define SCM_TIMESTAMPING SO_TIMESTAMPING - #define SO_RXQ_OVFL 40 #define SO_WIFI_STATUS 41 @@ -126,4 +120,41 @@ #define SO_TXTIME 61 #define SCM_TXTIME SO_TXTIME +#define SO_BINDTOIFINDEX 62 + +#define SO_TIMESTAMP_OLD 29 +#define SO_TIMESTAMPNS_OLD 35 +#define SO_TIMESTAMPING_OLD 37 + +#define SO_TIMESTAMP_NEW 63 +#define SO_TIMESTAMPNS_NEW 64 +#define SO_TIMESTAMPING_NEW 65 + +#define SO_RCVTIMEO_NEW 66 +#define SO_SNDTIMEO_NEW 67 + +#if !defined(__KERNEL__) + +#if __BITS_PER_LONG == 64 +#define SO_TIMESTAMP SO_TIMESTAMP_OLD +#define SO_TIMESTAMPNS SO_TIMESTAMPNS_OLD +#define SO_TIMESTAMPING SO_TIMESTAMPING_OLD + +#define SO_RCVTIMEO SO_RCVTIMEO_OLD +#define SO_SNDTIMEO SO_SNDTIMEO_OLD +#else +#define SO_TIMESTAMP (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMP_OLD : SO_TIMESTAMP_NEW) +#define SO_TIMESTAMPNS (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPNS_OLD : SO_TIMESTAMPNS_NEW) +#define SO_TIMESTAMPING (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPING_OLD : SO_TIMESTAMPING_NEW) + +#define SO_RCVTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_RCVTIMEO_OLD : SO_RCVTIMEO_NEW) +#define SO_SNDTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_SNDTIMEO_OLD : SO_SNDTIMEO_NEW) +#endif + +#define SCM_TIMESTAMP SO_TIMESTAMP +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS +#define SCM_TIMESTAMPING SO_TIMESTAMPING + +#endif + #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/mips/jz4740/setup.c b/arch/mips/jz4740/setup.c index afb40f8bce96..7e63c54eb8d2 100644 --- a/arch/mips/jz4740/setup.c +++ b/arch/mips/jz4740/setup.c @@ -31,7 +31,6 @@ #define JZ4740_EMC_SDRAM_CTRL 0x80 - static void __init jz4740_detect_mem(void) { void __iomem *jz_emc_base; @@ -66,15 +65,22 @@ static unsigned long __init get_board_mach_type(const void *fdt) void __init plat_mem_setup(void) { int offset; + void *dtb; jz4740_reset_init(); - __dt_setup_arch(__dtb_start); - offset = fdt_path_offset(__dtb_start, "/memory"); + if (__dtb_start != __dtb_end) + dtb = __dtb_start; + else + dtb = (void *)fw_passed_dtb; + + __dt_setup_arch(dtb); + + offset = fdt_path_offset(dtb, "/memory"); if (offset < 0) jz4740_detect_mem(); - mips_machtype = get_board_mach_type(__dtb_start); + mips_machtype = get_board_mach_type(dtb); } void __init device_tree_init(void) diff --git a/arch/mips/kernel/cmpxchg.c b/arch/mips/kernel/cmpxchg.c index 0b9535bc2c53..6b2a4a902a98 100644 --- a/arch/mips/kernel/cmpxchg.c +++ b/arch/mips/kernel/cmpxchg.c @@ -54,10 +54,9 @@ unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int s unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old, unsigned long new, unsigned int size) { - u32 mask, old32, new32, load32; + u32 mask, old32, new32, load32, load; volatile u32 *ptr32; unsigned int shift; - u8 load; /* Check that ptr is naturally aligned */ WARN_ON((unsigned long)ptr & (size - 1)); diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 95b18a194f53..d5e335e6846a 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -872,10 +872,19 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c) static inline unsigned int decode_config5(struct cpuinfo_mips *c) { - unsigned int config5; + unsigned int config5, max_mmid_width; + unsigned long asid_mask; config5 = read_c0_config5(); config5 &= ~(MIPS_CONF5_UFR | MIPS_CONF5_UFE); + + if (cpu_has_mips_r6) { + if (!__builtin_constant_p(cpu_has_mmid) || cpu_has_mmid) + config5 |= MIPS_CONF5_MI; + else + config5 &= ~MIPS_CONF5_MI; + } + write_c0_config5(config5); if (config5 & MIPS_CONF5_EVA) @@ -894,6 +903,50 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c) if (config5 & MIPS_CONF5_CRCP) elf_hwcap |= HWCAP_MIPS_CRC32; + if (cpu_has_mips_r6) { + /* Ensure the write to config5 above takes effect */ + back_to_back_c0_hazard(); + + /* Check whether we successfully enabled MMID support */ + config5 = read_c0_config5(); + if (config5 & MIPS_CONF5_MI) + c->options |= MIPS_CPU_MMID; + + /* + * Warn if we've hardcoded cpu_has_mmid to a value unsuitable + * for the CPU we're running on, or if CPUs in an SMP system + * have inconsistent MMID support. + */ + WARN_ON(!!cpu_has_mmid != !!(config5 & MIPS_CONF5_MI)); + + if (cpu_has_mmid) { + write_c0_memorymapid(~0ul); + back_to_back_c0_hazard(); + asid_mask = read_c0_memorymapid(); + + /* + * We maintain a bitmap to track MMID allocation, and + * need a sensible upper bound on the size of that + * bitmap. The initial CPU with MMID support (I6500) + * supports 16 bit MMIDs, which gives us an 8KiB + * bitmap. The architecture recommends that hardware + * support 32 bit MMIDs, which would give us a 512MiB + * bitmap - that's too big in most cases. + * + * Cap MMID width at 16 bits for now & we can revisit + * this if & when hardware supports anything wider. + */ + max_mmid_width = 16; + if (asid_mask > GENMASK(max_mmid_width - 1, 0)) { + pr_info("Capping MMID width at %d bits", + max_mmid_width); + asid_mask = GENMASK(max_mmid_width - 1, 0); + } + + set_cpu_asid_mask(c, asid_mask); + } + } + return config5 & MIPS_CONF_M; } diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 2ea0ec95efe9..4b5e1f2bfbce 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -86,7 +86,7 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code) return -EFAULT; old_fs = get_fs(); - set_fs(get_ds()); + set_fs(KERNEL_DS); flush_icache_range(ip, ip + 8); set_fs(old_fs); @@ -111,7 +111,7 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1, ip -= 4; old_fs = get_fs(); - set_fs(get_ds()); + set_fs(KERNEL_DS); flush_icache_range(ip, ip + 8); set_fs(old_fs); @@ -135,7 +135,7 @@ static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1, return -EFAULT; old_fs = get_fs(); - set_fs(get_ds()); + set_fs(KERNEL_DS); flush_icache_range(ip, ip + 8); set_fs(old_fs); diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index ba150c755fcc..85b6c60f285d 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c @@ -52,6 +52,7 @@ asmlinkage void spurious_interrupt(void) void __init init_IRQ(void) { int i; + unsigned int order = get_order(IRQ_STACK_SIZE); for (i = 0; i < NR_IRQS; i++) irq_set_noprobe(i); @@ -62,8 +63,7 @@ void __init init_IRQ(void) arch_init_irq(); for_each_possible_cpu(i) { - int irq_pages = IRQ_STACK_SIZE / PAGE_SIZE; - void *s = (void *)__get_free_pages(GFP_KERNEL, irq_pages); + void *s = (void *)__get_free_pages(GFP_KERNEL, order); irq_stack[i] = s; pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i, diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c index 149100e1bc7c..6e574c02e4c3 100644 --- a/arch/mips/kernel/kgdb.c +++ b/arch/mips/kernel/kgdb.c @@ -212,7 +212,7 @@ void kgdb_call_nmi_hook(void *ignored) mm_segment_t old_fs; old_fs = get_fs(); - set_fs(get_ds()); + set_fs(KERNEL_DS); kgdb_nmicallback(raw_smp_processor_id(), NULL); @@ -318,7 +318,7 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd, /* Kernel mode. Set correct address limit */ old_fs = get_fs(); - set_fs(get_ds()); + set_fs(KERNEL_DS); if (atomic_read(&kgdb_active) != -1) kgdb_nmicallback(smp_processor_id(), regs); diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c index 8f5bd04f320a..537e8d091874 100644 --- a/arch/mips/kernel/mips-cm.c +++ b/arch/mips/kernel/mips-cm.c @@ -382,8 +382,8 @@ void mips_cm_error_report(void) sc_bit ? "True" : "False", cm2_cmd[cmd_bits], sport_bits); } - pr_err("CM_ERROR=%08llx %s <%s>\n", cm_error, - cm2_causes[cause], buf); + pr_err("CM_ERROR=%08llx %s <%s>\n", cm_error, + cm2_causes[cause], buf); pr_err("CM_ADDR =%08llx\n", cm_addr); pr_err("CM_OTHER=%08llx %s\n", cm_other, cm2_causes[ocause]); } else { /* CM3 */ @@ -457,5 +457,5 @@ void mips_cm_error_report(void) } /* reprime cause register */ - write_gcr_error_cause(0); + write_gcr_error_cause(cm_error); } diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c index c50c89a978f1..b4d210bfcdae 100644 --- a/arch/mips/kernel/mips-r2-to-r6-emul.c +++ b/arch/mips/kernel/mips-r2-to-r6-emul.c @@ -2351,23 +2351,10 @@ DEFINE_SHOW_ATTRIBUTE(mipsr2_clear); static int __init mipsr2_init_debugfs(void) { - struct dentry *mipsr2_emul; - - if (!mips_debugfs_dir) - return -ENODEV; - - mipsr2_emul = debugfs_create_file("r2_emul_stats", S_IRUGO, - mips_debugfs_dir, NULL, - &mipsr2_emul_fops); - if (!mipsr2_emul) - return -ENOMEM; - - mipsr2_emul = debugfs_create_file("r2_emul_stats_clear", S_IRUGO, - mips_debugfs_dir, NULL, - &mipsr2_clear_fops); - if (!mipsr2_emul) - return -ENOMEM; - + debugfs_create_file("r2_emul_stats", S_IRUGO, mips_debugfs_dir, NULL, + &mipsr2_emul_fops); + debugfs_create_file("r2_emul_stats_clear", S_IRUGO, mips_debugfs_dir, + NULL, &mipsr2_clear_fops); return 0; } diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 6829a064aac8..339870ed92f7 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -371,7 +371,7 @@ static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size) static int get_frame_info(struct mips_frame_info *info) { bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS); - union mips_instruction insn, *ip, *ip_end; + union mips_instruction insn, *ip; const unsigned int max_insns = 128; unsigned int last_insn_size = 0; unsigned int i; @@ -384,10 +384,9 @@ static int get_frame_info(struct mips_frame_info *info) if (!ip) goto err; - ip_end = (void *)ip + info->func_size; - - for (i = 0; i < max_insns && ip < ip_end; i++) { + for (i = 0; i < max_insns; i++) { ip = (void *)ip + last_insn_size; + if (is_mmips && mm_insn_16bit(ip->halfword[0])) { insn.word = ip->halfword[0] << 16; last_insn_size = 2; diff --git a/arch/mips/kernel/segment.c b/arch/mips/kernel/segment.c index 2703f218202e..0a9bd7b0983b 100644 --- a/arch/mips/kernel/segment.c +++ b/arch/mips/kernel/segment.c @@ -95,18 +95,9 @@ static const struct file_operations segments_fops = { static int __init segments_info(void) { - struct dentry *segments; - - if (cpu_has_segments) { - if (!mips_debugfs_dir) - return -ENODEV; - - segments = debugfs_create_file("segments", S_IRUGO, - mips_debugfs_dir, NULL, - &segments_fops); - if (!segments) - return -ENOMEM; - } + if (cpu_has_segments) + debugfs_create_file("segments", S_IRUGO, mips_debugfs_dir, NULL, + &segments_fops); return 0; } diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 8c6c48ed786a..8d1dc6c71173 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -384,7 +384,8 @@ static void __init bootmem_init(void) init_initrd(); reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end)); - memblock_reserve(PHYS_OFFSET, reserved_end << PAGE_SHIFT); + memblock_reserve(PHYS_OFFSET, + (reserved_end << PAGE_SHIFT) - PHYS_OFFSET); /* * max_low_pfn is not a number of pages. The number of pages @@ -918,6 +919,9 @@ static void __init resource_init(void) end = HIGHMEM_START - 1; res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES); + if (!res) + panic("%s: Failed to allocate %zu bytes\n", __func__, + sizeof(struct resource)); res->start = start; res->end = end; @@ -1010,12 +1014,7 @@ unsigned long fw_passed_dtb; struct dentry *mips_debugfs_dir; static int __init debugfs_mips(void) { - struct dentry *d; - - d = debugfs_create_dir("mips", NULL); - if (!d) - return -ENOMEM; - mips_debugfs_dir = d; + mips_debugfs_dir = debugfs_create_dir("mips", NULL); return 0; } arch_initcall(debugfs_mips); diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index d84b9066b465..bc4bb3c6bd00 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -39,6 +39,7 @@ #include #include +#include #include #include #include @@ -443,6 +444,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus) /* preload SMP state for boot cpu */ void smp_prepare_boot_cpu(void) { + if (mp_ops->prepare_boot_cpu) + mp_ops->prepare_boot_cpu(); set_cpu_possible(0, true); set_cpu_online(0, true); } @@ -482,12 +485,21 @@ static void flush_tlb_all_ipi(void *info) void flush_tlb_all(void) { + if (cpu_has_mmid) { + htw_stop(); + ginvt_full(); + sync_ginv(); + instruction_hazard(); + htw_start(); + return; + } + on_each_cpu(flush_tlb_all_ipi, NULL, 1); } static void flush_tlb_mm_ipi(void *mm) { - local_flush_tlb_mm((struct mm_struct *)mm); + drop_mmu_context((struct mm_struct *)mm); } /* @@ -530,17 +542,22 @@ void flush_tlb_mm(struct mm_struct *mm) { preempt_disable(); - if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { + if (cpu_has_mmid) { + /* + * No need to worry about other CPUs - the ginvt in + * drop_mmu_context() will be globalized. + */ + } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { smp_on_other_tlbs(flush_tlb_mm_ipi, mm); } else { unsigned int cpu; for_each_online_cpu(cpu) { if (cpu != smp_processor_id() && cpu_context(cpu, mm)) - cpu_context(cpu, mm) = 0; + set_cpu_context(cpu, mm, 0); } } - local_flush_tlb_mm(mm); + drop_mmu_context(mm); preempt_enable(); } @@ -561,9 +578,26 @@ static void flush_tlb_range_ipi(void *info) void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; + unsigned long addr; + u32 old_mmid; preempt_disable(); - if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { + if (cpu_has_mmid) { + htw_stop(); + old_mmid = read_c0_memorymapid(); + write_c0_memorymapid(cpu_asid(0, mm)); + mtc0_tlbw_hazard(); + addr = round_down(start, PAGE_SIZE * 2); + end = round_up(end, PAGE_SIZE * 2); + do { + ginvt_va_mmid(addr); + sync_ginv(); + addr += PAGE_SIZE * 2; + } while (addr < end); + write_c0_memorymapid(old_mmid); + instruction_hazard(); + htw_start(); + } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { struct flush_tlb_data fd = { .vma = vma, .addr1 = start, @@ -571,6 +605,7 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l }; smp_on_other_tlbs(flush_tlb_range_ipi, &fd); + local_flush_tlb_range(vma, start, end); } else { unsigned int cpu; int exec = vma->vm_flags & VM_EXEC; @@ -583,10 +618,10 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l * mm has been completely unused by that CPU. */ if (cpu != smp_processor_id() && cpu_context(cpu, mm)) - cpu_context(cpu, mm) = !exec; + set_cpu_context(cpu, mm, !exec); } + local_flush_tlb_range(vma, start, end); } - local_flush_tlb_range(vma, start, end); preempt_enable(); } @@ -616,14 +651,28 @@ static void flush_tlb_page_ipi(void *info) void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { + u32 old_mmid; + preempt_disable(); - if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) { + if (cpu_has_mmid) { + htw_stop(); + old_mmid = read_c0_memorymapid(); + write_c0_memorymapid(cpu_asid(0, vma->vm_mm)); + mtc0_tlbw_hazard(); + ginvt_va_mmid(page); + sync_ginv(); + write_c0_memorymapid(old_mmid); + instruction_hazard(); + htw_start(); + } else if ((atomic_read(&vma->vm_mm->mm_users) != 1) || + (current->mm != vma->vm_mm)) { struct flush_tlb_data fd = { .vma = vma, .addr1 = page, }; smp_on_other_tlbs(flush_tlb_page_ipi, &fd); + local_flush_tlb_page(vma, page); } else { unsigned int cpu; @@ -635,10 +684,10 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) * by that CPU. */ if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm)) - cpu_context(cpu, vma->vm_mm) = 1; + set_cpu_context(cpu, vma->vm_mm, 1); } + local_flush_tlb_page(vma, page); } - local_flush_tlb_page(vma, page); preempt_enable(); } diff --git a/arch/mips/kernel/spinlock_test.c b/arch/mips/kernel/spinlock_test.c index eaed550e79a2..ab4e3e1b138d 100644 --- a/arch/mips/kernel/spinlock_test.c +++ b/arch/mips/kernel/spinlock_test.c @@ -118,23 +118,10 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_multi, multi_get, NULL, "%llu\n"); static int __init spinlock_test(void) { - struct dentry *d; - - if (!mips_debugfs_dir) - return -ENODEV; - - d = debugfs_create_file("spin_single", S_IRUGO, - mips_debugfs_dir, NULL, - &fops_ss); - if (!d) - return -ENOMEM; - - d = debugfs_create_file("spin_multi", S_IRUGO, - mips_debugfs_dir, NULL, - &fops_multi); - if (!d) - return -ENOMEM; - + debugfs_create_file("spin_single", S_IRUGO, mips_debugfs_dir, NULL, + &fops_ss); + debugfs_create_file("spin_multi", S_IRUGO, mips_debugfs_dir, NULL, + &fops_multi); return 0; } device_initcall(spinlock_test); diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl index 53d5862649ae..15f4117900ee 100644 --- a/arch/mips/kernel/syscalls/syscall_n32.tbl +++ b/arch/mips/kernel/syscalls/syscall_n32.tbl @@ -37,11 +37,11 @@ 27 n32 madvise sys_madvise 28 n32 shmget sys_shmget 29 n32 shmat sys_shmat -30 n32 shmctl compat_sys_shmctl +30 n32 shmctl compat_sys_old_shmctl 31 n32 dup sys_dup 32 n32 dup2 sys_dup2 33 n32 pause sys_pause -34 n32 nanosleep compat_sys_nanosleep +34 n32 nanosleep sys_nanosleep_time32 35 n32 getitimer compat_sys_getitimer 36 n32 setitimer compat_sys_setitimer 37 n32 alarm sys_alarm @@ -71,12 +71,12 @@ 61 n32 uname sys_newuname 62 n32 semget sys_semget 63 n32 semop sys_semop -64 n32 semctl compat_sys_semctl +64 n32 semctl compat_sys_old_semctl 65 n32 shmdt sys_shmdt 66 n32 msgget sys_msgget 67 n32 msgsnd compat_sys_msgsnd 68 n32 msgrcv compat_sys_msgrcv -69 n32 msgctl compat_sys_msgctl +69 n32 msgctl compat_sys_old_msgctl 70 n32 fcntl compat_sys_fcntl 71 n32 flock sys_flock 72 n32 fsync sys_fsync @@ -133,11 +133,11 @@ 123 n32 capget sys_capget 124 n32 capset sys_capset 125 n32 rt_sigpending compat_sys_rt_sigpending -126 n32 rt_sigtimedwait compat_sys_rt_sigtimedwait +126 n32 rt_sigtimedwait compat_sys_rt_sigtimedwait_time32 127 n32 rt_sigqueueinfo compat_sys_rt_sigqueueinfo 128 n32 rt_sigsuspend compat_sys_rt_sigsuspend 129 n32 sigaltstack compat_sys_sigaltstack -130 n32 utime compat_sys_utime +130 n32 utime sys_utime32 131 n32 mknod sys_mknod 132 n32 personality sys_32_personality 133 n32 ustat compat_sys_ustat @@ -152,7 +152,7 @@ 142 n32 sched_getscheduler sys_sched_getscheduler 143 n32 sched_get_priority_max sys_sched_get_priority_max 144 n32 sched_get_priority_min sys_sched_get_priority_min -145 n32 sched_rr_get_interval compat_sys_sched_rr_get_interval +145 n32 sched_rr_get_interval sys_sched_rr_get_interval_time32 146 n32 mlock sys_mlock 147 n32 munlock sys_munlock 148 n32 mlockall sys_mlockall @@ -161,7 +161,7 @@ 151 n32 pivot_root sys_pivot_root 152 n32 _sysctl compat_sys_sysctl 153 n32 prctl sys_prctl -154 n32 adjtimex compat_sys_adjtimex +154 n32 adjtimex sys_adjtimex_time32 155 n32 setrlimit compat_sys_setrlimit 156 n32 chroot sys_chroot 157 n32 sync sys_sync @@ -202,7 +202,7 @@ 191 n32 fremovexattr sys_fremovexattr 192 n32 tkill sys_tkill 193 n32 reserved193 sys_ni_syscall -194 n32 futex compat_sys_futex +194 n32 futex sys_futex_time32 195 n32 sched_setaffinity compat_sys_sched_setaffinity 196 n32 sched_getaffinity compat_sys_sched_getaffinity 197 n32 cacheflush sys_cacheflush @@ -210,7 +210,7 @@ 199 n32 sysmips __sys_sysmips 200 n32 io_setup compat_sys_io_setup 201 n32 io_destroy sys_io_destroy -202 n32 io_getevents compat_sys_io_getevents +202 n32 io_getevents sys_io_getevents_time32 203 n32 io_submit compat_sys_io_submit 204 n32 io_cancel sys_io_cancel 205 n32 exit_group sys_exit_group @@ -223,29 +223,29 @@ 212 n32 fcntl64 compat_sys_fcntl64 213 n32 set_tid_address sys_set_tid_address 214 n32 restart_syscall sys_restart_syscall -215 n32 semtimedop compat_sys_semtimedop +215 n32 semtimedop sys_semtimedop_time32 216 n32 fadvise64 sys_fadvise64_64 217 n32 statfs64 compat_sys_statfs64 218 n32 fstatfs64 compat_sys_fstatfs64 219 n32 sendfile64 sys_sendfile64 220 n32 timer_create compat_sys_timer_create -221 n32 timer_settime compat_sys_timer_settime -222 n32 timer_gettime compat_sys_timer_gettime +221 n32 timer_settime sys_timer_settime32 +222 n32 timer_gettime sys_timer_gettime32 223 n32 timer_getoverrun sys_timer_getoverrun 224 n32 timer_delete sys_timer_delete -225 n32 clock_settime compat_sys_clock_settime -226 n32 clock_gettime compat_sys_clock_gettime -227 n32 clock_getres compat_sys_clock_getres -228 n32 clock_nanosleep compat_sys_clock_nanosleep +225 n32 clock_settime sys_clock_settime32 +226 n32 clock_gettime sys_clock_gettime32 +227 n32 clock_getres sys_clock_getres_time32 +228 n32 clock_nanosleep sys_clock_nanosleep_time32 229 n32 tgkill sys_tgkill -230 n32 utimes compat_sys_utimes +230 n32 utimes sys_utimes_time32 231 n32 mbind compat_sys_mbind 232 n32 get_mempolicy compat_sys_get_mempolicy 233 n32 set_mempolicy compat_sys_set_mempolicy 234 n32 mq_open compat_sys_mq_open 235 n32 mq_unlink sys_mq_unlink -236 n32 mq_timedsend compat_sys_mq_timedsend -237 n32 mq_timedreceive compat_sys_mq_timedreceive +236 n32 mq_timedsend sys_mq_timedsend_time32 +237 n32 mq_timedreceive sys_mq_timedreceive_time32 238 n32 mq_notify compat_sys_mq_notify 239 n32 mq_getsetattr compat_sys_mq_getsetattr 240 n32 vserver sys_ni_syscall @@ -263,7 +263,7 @@ 252 n32 mkdirat sys_mkdirat 253 n32 mknodat sys_mknodat 254 n32 fchownat sys_fchownat -255 n32 futimesat compat_sys_futimesat +255 n32 futimesat sys_futimesat_time32 256 n32 newfstatat sys_newfstatat 257 n32 unlinkat sys_unlinkat 258 n32 renameat sys_renameat @@ -272,8 +272,8 @@ 261 n32 readlinkat sys_readlinkat 262 n32 fchmodat sys_fchmodat 263 n32 faccessat sys_faccessat -264 n32 pselect6 compat_sys_pselect6 -265 n32 ppoll compat_sys_ppoll +264 n32 pselect6 compat_sys_pselect6_time32 +265 n32 ppoll compat_sys_ppoll_time32 266 n32 unshare sys_unshare 267 n32 splice sys_splice 268 n32 sync_file_range sys_sync_file_range @@ -287,14 +287,14 @@ 276 n32 epoll_pwait compat_sys_epoll_pwait 277 n32 ioprio_set sys_ioprio_set 278 n32 ioprio_get sys_ioprio_get -279 n32 utimensat compat_sys_utimensat +279 n32 utimensat sys_utimensat_time32 280 n32 signalfd compat_sys_signalfd 281 n32 timerfd sys_ni_syscall 282 n32 eventfd sys_eventfd 283 n32 fallocate sys_fallocate 284 n32 timerfd_create sys_timerfd_create -285 n32 timerfd_gettime compat_sys_timerfd_gettime -286 n32 timerfd_settime compat_sys_timerfd_settime +285 n32 timerfd_gettime sys_timerfd_gettime32 +286 n32 timerfd_settime sys_timerfd_settime32 287 n32 signalfd4 compat_sys_signalfd4 288 n32 eventfd2 sys_eventfd2 289 n32 epoll_create1 sys_epoll_create1 @@ -306,14 +306,14 @@ 295 n32 rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo 296 n32 perf_event_open sys_perf_event_open 297 n32 accept4 sys_accept4 -298 n32 recvmmsg compat_sys_recvmmsg +298 n32 recvmmsg compat_sys_recvmmsg_time32 299 n32 getdents64 sys_getdents64 300 n32 fanotify_init sys_fanotify_init 301 n32 fanotify_mark sys_fanotify_mark 302 n32 prlimit64 sys_prlimit64 303 n32 name_to_handle_at sys_name_to_handle_at 304 n32 open_by_handle_at sys_open_by_handle_at -305 n32 clock_adjtime compat_sys_clock_adjtime +305 n32 clock_adjtime sys_clock_adjtime32 306 n32 syncfs sys_syncfs 307 n32 sendmmsg compat_sys_sendmmsg 308 n32 setns sys_setns @@ -341,3 +341,24 @@ 330 n32 statx sys_statx 331 n32 rseq sys_rseq 332 n32 io_pgetevents compat_sys_io_pgetevents +# 333 through 402 are unassigned to sync up with generic numbers +403 n32 clock_gettime64 sys_clock_gettime +404 n32 clock_settime64 sys_clock_settime +405 n32 clock_adjtime64 sys_clock_adjtime +406 n32 clock_getres_time64 sys_clock_getres +407 n32 clock_nanosleep_time64 sys_clock_nanosleep +408 n32 timer_gettime64 sys_timer_gettime +409 n32 timer_settime64 sys_timer_settime +410 n32 timerfd_gettime64 sys_timerfd_gettime +411 n32 timerfd_settime64 sys_timerfd_settime +412 n32 utimensat_time64 sys_utimensat +413 n32 pselect6_time64 compat_sys_pselect6_time64 +414 n32 ppoll_time64 compat_sys_ppoll_time64 +416 n32 io_pgetevents_time64 sys_io_pgetevents +417 n32 recvmmsg_time64 compat_sys_recvmmsg_time64 +418 n32 mq_timedsend_time64 sys_mq_timedsend +419 n32 mq_timedreceive_time64 sys_mq_timedreceive +420 n32 semtimedop_time64 sys_semtimedop +421 n32 rt_sigtimedwait_time64 compat_sys_rt_sigtimedwait_time64 +422 n32 futex_time64 sys_futex +423 n32 sched_rr_get_interval_time64 sys_sched_rr_get_interval diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl index a8286ccbb66c..c85502e67b44 100644 --- a/arch/mips/kernel/syscalls/syscall_n64.tbl +++ b/arch/mips/kernel/syscalls/syscall_n64.tbl @@ -37,7 +37,7 @@ 27 n64 madvise sys_madvise 28 n64 shmget sys_shmget 29 n64 shmat sys_shmat -30 n64 shmctl sys_shmctl +30 n64 shmctl sys_old_shmctl 31 n64 dup sys_dup 32 n64 dup2 sys_dup2 33 n64 pause sys_pause @@ -71,12 +71,12 @@ 61 n64 uname sys_newuname 62 n64 semget sys_semget 63 n64 semop sys_semop -64 n64 semctl sys_semctl +64 n64 semctl sys_old_semctl 65 n64 shmdt sys_shmdt 66 n64 msgget sys_msgget 67 n64 msgsnd sys_msgsnd 68 n64 msgrcv sys_msgrcv -69 n64 msgctl sys_msgctl +69 n64 msgctl sys_old_msgctl 70 n64 fcntl sys_fcntl 71 n64 flock sys_flock 72 n64 fsync sys_fsync @@ -337,3 +337,4 @@ 326 n64 statx sys_statx 327 n64 rseq sys_rseq 328 n64 io_pgetevents sys_io_pgetevents +# 329 through 423 are reserved to sync up with other architectures diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl index 3d5a47b80d2b..2e063d0f837e 100644 --- a/arch/mips/kernel/syscalls/syscall_o32.tbl +++ b/arch/mips/kernel/syscalls/syscall_o32.tbl @@ -20,7 +20,7 @@ 10 o32 unlink sys_unlink 11 o32 execve sys_execve compat_sys_execve 12 o32 chdir sys_chdir -13 o32 time sys_time compat_sys_time +13 o32 time sys_time32 14 o32 mknod sys_mknod 15 o32 chmod sys_chmod 16 o32 lchown sys_lchown @@ -33,13 +33,13 @@ 22 o32 umount sys_oldumount 23 o32 setuid sys_setuid 24 o32 getuid sys_getuid -25 o32 stime sys_stime compat_sys_stime +25 o32 stime sys_stime32 26 o32 ptrace sys_ptrace compat_sys_ptrace 27 o32 alarm sys_alarm # 28 was sys_fstat 28 o32 unused28 sys_ni_syscall 29 o32 pause sys_pause -30 o32 utime sys_utime compat_sys_utime +30 o32 utime sys_utime32 31 o32 stty sys_ni_syscall 32 o32 gtty sys_ni_syscall 33 o32 access sys_access @@ -135,7 +135,7 @@ 121 o32 setdomainname sys_setdomainname 122 o32 uname sys_newuname 123 o32 modify_ldt sys_ni_syscall -124 o32 adjtimex sys_adjtimex compat_sys_adjtimex +124 o32 adjtimex sys_adjtimex_time32 125 o32 mprotect sys_mprotect 126 o32 sigprocmask sys_sigprocmask compat_sys_sigprocmask 127 o32 create_module sys_ni_syscall @@ -176,8 +176,8 @@ 162 o32 sched_yield sys_sched_yield 163 o32 sched_get_priority_max sys_sched_get_priority_max 164 o32 sched_get_priority_min sys_sched_get_priority_min -165 o32 sched_rr_get_interval sys_sched_rr_get_interval compat_sys_sched_rr_get_interval -166 o32 nanosleep sys_nanosleep compat_sys_nanosleep +165 o32 sched_rr_get_interval sys_sched_rr_get_interval_time32 +166 o32 nanosleep sys_nanosleep_time32 167 o32 mremap sys_mremap 168 o32 accept sys_accept 169 o32 bind sys_bind @@ -208,7 +208,7 @@ 194 o32 rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction 195 o32 rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask 196 o32 rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending -197 o32 rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait +197 o32 rt_sigtimedwait sys_rt_sigtimedwait_time32 compat_sys_rt_sigtimedwait_time32 198 o32 rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo 199 o32 rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend 200 o32 pread64 sys_pread64 sys_32_pread @@ -249,12 +249,12 @@ 235 o32 fremovexattr sys_fremovexattr 236 o32 tkill sys_tkill 237 o32 sendfile64 sys_sendfile64 -238 o32 futex sys_futex compat_sys_futex +238 o32 futex sys_futex_time32 239 o32 sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity 240 o32 sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity 241 o32 io_setup sys_io_setup compat_sys_io_setup 242 o32 io_destroy sys_io_destroy -243 o32 io_getevents sys_io_getevents compat_sys_io_getevents +243 o32 io_getevents sys_io_getevents_time32 244 o32 io_submit sys_io_submit compat_sys_io_submit 245 o32 io_cancel sys_io_cancel 246 o32 exit_group sys_exit_group @@ -269,23 +269,23 @@ 255 o32 statfs64 sys_statfs64 compat_sys_statfs64 256 o32 fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 257 o32 timer_create sys_timer_create compat_sys_timer_create -258 o32 timer_settime sys_timer_settime compat_sys_timer_settime -259 o32 timer_gettime sys_timer_gettime compat_sys_timer_gettime +258 o32 timer_settime sys_timer_settime32 +259 o32 timer_gettime sys_timer_gettime32 260 o32 timer_getoverrun sys_timer_getoverrun 261 o32 timer_delete sys_timer_delete -262 o32 clock_settime sys_clock_settime compat_sys_clock_settime -263 o32 clock_gettime sys_clock_gettime compat_sys_clock_gettime -264 o32 clock_getres sys_clock_getres compat_sys_clock_getres -265 o32 clock_nanosleep sys_clock_nanosleep compat_sys_clock_nanosleep +262 o32 clock_settime sys_clock_settime32 +263 o32 clock_gettime sys_clock_gettime32 +264 o32 clock_getres sys_clock_getres_time32 +265 o32 clock_nanosleep sys_clock_nanosleep_time32 266 o32 tgkill sys_tgkill -267 o32 utimes sys_utimes compat_sys_utimes +267 o32 utimes sys_utimes_time32 268 o32 mbind sys_mbind compat_sys_mbind 269 o32 get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy 270 o32 set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy 271 o32 mq_open sys_mq_open compat_sys_mq_open 272 o32 mq_unlink sys_mq_unlink -273 o32 mq_timedsend sys_mq_timedsend compat_sys_mq_timedsend -274 o32 mq_timedreceive sys_mq_timedreceive compat_sys_mq_timedreceive +273 o32 mq_timedsend sys_mq_timedsend_time32 +274 o32 mq_timedreceive sys_mq_timedreceive_time32 275 o32 mq_notify sys_mq_notify compat_sys_mq_notify 276 o32 mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr 277 o32 vserver sys_ni_syscall @@ -303,7 +303,7 @@ 289 o32 mkdirat sys_mkdirat 290 o32 mknodat sys_mknodat 291 o32 fchownat sys_fchownat -292 o32 futimesat sys_futimesat compat_sys_futimesat +292 o32 futimesat sys_futimesat_time32 293 o32 fstatat64 sys_fstatat64 sys_newfstatat 294 o32 unlinkat sys_unlinkat 295 o32 renameat sys_renameat @@ -312,8 +312,8 @@ 298 o32 readlinkat sys_readlinkat 299 o32 fchmodat sys_fchmodat 300 o32 faccessat sys_faccessat -301 o32 pselect6 sys_pselect6 compat_sys_pselect6 -302 o32 ppoll sys_ppoll compat_sys_ppoll +301 o32 pselect6 sys_pselect6_time32 compat_sys_pselect6_time32 +302 o32 ppoll sys_ppoll_time32 compat_sys_ppoll_time32 303 o32 unshare sys_unshare 304 o32 splice sys_splice 305 o32 sync_file_range sys_sync_file_range sys32_sync_file_range @@ -327,14 +327,14 @@ 313 o32 epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait 314 o32 ioprio_set sys_ioprio_set 315 o32 ioprio_get sys_ioprio_get -316 o32 utimensat sys_utimensat compat_sys_utimensat +316 o32 utimensat sys_utimensat_time32 317 o32 signalfd sys_signalfd compat_sys_signalfd 318 o32 timerfd sys_ni_syscall 319 o32 eventfd sys_eventfd 320 o32 fallocate sys_fallocate sys32_fallocate 321 o32 timerfd_create sys_timerfd_create -322 o32 timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime -323 o32 timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime +322 o32 timerfd_gettime sys_timerfd_gettime32 +323 o32 timerfd_settime sys_timerfd_settime32 324 o32 signalfd4 sys_signalfd4 compat_sys_signalfd4 325 o32 eventfd2 sys_eventfd2 326 o32 epoll_create1 sys_epoll_create1 @@ -346,13 +346,13 @@ 332 o32 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo 333 o32 perf_event_open sys_perf_event_open 334 o32 accept4 sys_accept4 -335 o32 recvmmsg sys_recvmmsg compat_sys_recvmmsg +335 o32 recvmmsg sys_recvmmsg_time32 compat_sys_recvmmsg_time32 336 o32 fanotify_init sys_fanotify_init 337 o32 fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark 338 o32 prlimit64 sys_prlimit64 339 o32 name_to_handle_at sys_name_to_handle_at 340 o32 open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at -341 o32 clock_adjtime sys_clock_adjtime compat_sys_clock_adjtime +341 o32 clock_adjtime sys_clock_adjtime32 342 o32 syncfs sys_syncfs 343 o32 sendmmsg sys_sendmmsg compat_sys_sendmmsg 344 o32 setns sys_setns @@ -379,4 +379,35 @@ 365 o32 pkey_free sys_pkey_free 366 o32 statx sys_statx 367 o32 rseq sys_rseq -368 o32 io_pgetevents sys_io_pgetevents compat_sys_io_pgetevents +368 o32 io_pgetevents sys_io_pgetevents_time32 compat_sys_io_pgetevents +# room for arch specific calls +393 o32 semget sys_semget +394 o32 semctl sys_semctl compat_sys_semctl +395 o32 shmget sys_shmget +396 o32 shmctl sys_shmctl compat_sys_shmctl +397 o32 shmat sys_shmat compat_sys_shmat +398 o32 shmdt sys_shmdt +399 o32 msgget sys_msgget +400 o32 msgsnd sys_msgsnd compat_sys_msgsnd +401 o32 msgrcv sys_msgrcv compat_sys_msgrcv +402 o32 msgctl sys_msgctl compat_sys_msgctl +403 o32 clock_gettime64 sys_clock_gettime sys_clock_gettime +404 o32 clock_settime64 sys_clock_settime sys_clock_settime +405 o32 clock_adjtime64 sys_clock_adjtime sys_clock_adjtime +406 o32 clock_getres_time64 sys_clock_getres sys_clock_getres +407 o32 clock_nanosleep_time64 sys_clock_nanosleep sys_clock_nanosleep +408 o32 timer_gettime64 sys_timer_gettime sys_timer_gettime +409 o32 timer_settime64 sys_timer_settime sys_timer_settime +410 o32 timerfd_gettime64 sys_timerfd_gettime sys_timerfd_gettime +411 o32 timerfd_settime64 sys_timerfd_settime sys_timerfd_settime +412 o32 utimensat_time64 sys_utimensat sys_utimensat +413 o32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64 +414 o32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64 +416 o32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents +417 o32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64 +418 o32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend +419 o32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive +420 o32 semtimedop_time64 sys_semtimedop sys_semtimedop +421 o32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64 +422 o32 futex_time64 sys_futex sys_futex +423 o32 sched_rr_get_interval_time64 sys_sched_rr_get_interval sys_sched_rr_get_interval diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index c91097f7b32f..98ca55d62201 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -1077,7 +1077,7 @@ asmlinkage void do_tr(struct pt_regs *regs) seg = get_fs(); if (!user_mode(regs)) - set_fs(get_ds()); + set_fs(KERNEL_DS); prev_state = exception_enter(); current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; @@ -2223,7 +2223,9 @@ void per_cpu_trap_init(bool is_boot_cpu) cp0_fdc_irq = -1; } - if (!cpu_data[cpu].asid_cache) + if (cpu_has_mmid) + cpu_data[cpu].asid_cache = 0; + else if (!cpu_data[cpu].asid_cache) cpu_data[cpu].asid_cache = asid_first_version(cpu); mmgrab(&init_mm); @@ -2291,7 +2293,10 @@ void __init trap_init(void) phys_addr_t ebase_pa; ebase = (unsigned long) - memblock_alloc_from(size, 1 << fls(size), 0); + memblock_alloc(size, 1 << fls(size)); + if (!ebase) + panic("%s: Failed to allocate %lu bytes align=0x%x\n", + __func__, size, 1 << fls(size)); /* * Try to ensure ebase resides in KSeg0 if possible. diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 595ca9c85111..76e33f940971 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -89,6 +89,7 @@ #include #include #include +#include #include #define STR(x) __STR(x) @@ -2374,18 +2375,10 @@ sigbus: #ifdef CONFIG_DEBUG_FS static int __init debugfs_unaligned(void) { - struct dentry *d; - - if (!mips_debugfs_dir) - return -ENODEV; - d = debugfs_create_u32("unaligned_instructions", S_IRUGO, - mips_debugfs_dir, &unaligned_instructions); - if (!d) - return -ENOMEM; - d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR, - mips_debugfs_dir, &unaligned_action); - if (!d) - return -ENOMEM; + debugfs_create_u32("unaligned_instructions", S_IRUGO, mips_debugfs_dir, + &unaligned_instructions); + debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR, + mips_debugfs_dir, &unaligned_action); return 0; } arch_initcall(debugfs_unaligned); diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index ec9ed23bca7f..0074427b04fb 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c @@ -1016,10 +1016,10 @@ static void kvm_mips_change_entryhi(struct kvm_vcpu *vcpu, */ preempt_disable(); cpu = smp_processor_id(); - get_new_mmu_context(kern_mm, cpu); + get_new_mmu_context(kern_mm); for_each_possible_cpu(i) if (i != cpu) - cpu_context(i, kern_mm) = 0; + set_cpu_context(i, kern_mm, 0); preempt_enable(); } kvm_write_c0_guest_entryhi(cop0, entryhi); @@ -1090,8 +1090,8 @@ static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu, if (i == cpu) continue; if (user) - cpu_context(i, user_mm) = 0; - cpu_context(i, kern_mm) = 0; + set_cpu_context(i, user_mm, 0); + set_cpu_context(i, kern_mm, 0); } preempt_enable(); diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 3734cd58895e..6d0517ac18e5 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -1723,6 +1723,11 @@ static int __init kvm_mips_init(void) { int ret; + if (cpu_has_mmid) { + pr_warn("KVM does not yet support MMIDs. KVM Disabled\n"); + return -EOPNOTSUPP; + } + ret = kvm_mips_entry_setup(); if (ret) return ret; diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c index 6a0d7040d882..73daa6ad33af 100644 --- a/arch/mips/kvm/trap_emul.c +++ b/arch/mips/kvm/trap_emul.c @@ -1056,11 +1056,7 @@ static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu) */ if (current->flags & PF_VCPU) { mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm; - if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) & - asid_version_mask(cpu)) - get_new_mmu_context(mm, cpu); - write_c0_entryhi(cpu_asid(cpu, mm)); - TLBMISS_HANDLER_SETUP_PGD(mm->pgd); + check_switch_mmu_context(mm); kvm_mips_suspend_mm(cpu); ehb(); } @@ -1074,11 +1070,7 @@ static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu) if (current->flags & PF_VCPU) { /* Restore normal Linux process memory map */ - if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) & - asid_version_mask(cpu))) - get_new_mmu_context(current->mm, cpu); - write_c0_entryhi(cpu_asid(cpu, current->mm)); - TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd); + check_switch_mmu_context(current->mm); kvm_mips_resume_mm(cpu); ehb(); } @@ -1106,14 +1098,14 @@ static void kvm_trap_emul_check_requests(struct kvm_vcpu *vcpu, int cpu, kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_GPA | KMF_KERN); kvm_mips_flush_gva_pt(user_mm->pgd, KMF_GPA | KMF_USER); for_each_possible_cpu(i) { - cpu_context(i, kern_mm) = 0; - cpu_context(i, user_mm) = 0; + set_cpu_context(i, kern_mm, 0); + set_cpu_context(i, user_mm, 0); } /* Generate new ASID for current mode */ if (reload_asid) { mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm; - get_new_mmu_context(mm, cpu); + get_new_mmu_context(mm); htw_stop(); write_c0_entryhi(cpu_asid(cpu, mm)); TLBMISS_HANDLER_SETUP_PGD(mm->pgd); @@ -1219,7 +1211,7 @@ static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run, if (gasid != vcpu->arch.last_user_gasid) { kvm_mips_flush_gva_pt(user_mm->pgd, KMF_USER); for_each_possible_cpu(i) - cpu_context(i, user_mm) = 0; + set_cpu_context(i, user_mm, 0); vcpu->arch.last_user_gasid = gasid; } } @@ -1228,9 +1220,7 @@ static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run, * Check if ASID is stale. This may happen due to a TLB flush request or * a lazy user MM invalidation. */ - if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) & - asid_version_mask(cpu)) - get_new_mmu_context(mm, cpu); + check_mmu_context(mm); } static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) @@ -1266,11 +1256,7 @@ static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) cpu = smp_processor_id(); /* Restore normal Linux process memory map */ - if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) & - asid_version_mask(cpu))) - get_new_mmu_context(current->mm, cpu); - write_c0_entryhi(cpu_asid(cpu, current->mm)); - TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd); + check_switch_mmu_context(current->mm); kvm_mips_resume_mm(cpu); htw_start(); diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c index 74805035edc8..dde20887a70d 100644 --- a/arch/mips/kvm/vz.c +++ b/arch/mips/kvm/vz.c @@ -2454,10 +2454,10 @@ static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu) * Root ASID dealiases guest GPA mappings in the root TLB. * Allocate new root ASID if needed. */ - if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask) - || (cpu_context(cpu, gpa_mm) ^ asid_cache(cpu)) & - asid_version_mask(cpu)) - get_new_mmu_context(gpa_mm, cpu); + if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask)) + get_new_mmu_context(gpa_mm); + else + check_mmu_context(gpa_mm); } } diff --git a/arch/mips/lantiq/Kconfig b/arch/mips/lantiq/Kconfig index 188de95d6dbd..6c6802e482c9 100644 --- a/arch/mips/lantiq/Kconfig +++ b/arch/mips/lantiq/Kconfig @@ -52,8 +52,4 @@ config PCI_LANTIQ bool "PCI Support" depends on SOC_XWAY && PCI -config XRX200_PHY_FW - bool "XRX200 PHY firmware loader" - depends on SOC_XWAY - endif diff --git a/arch/mips/lantiq/xway/vmmc.c b/arch/mips/lantiq/xway/vmmc.c index 577ec81b557d..3deab9a77718 100644 --- a/arch/mips/lantiq/xway/vmmc.c +++ b/arch/mips/lantiq/xway/vmmc.c @@ -31,8 +31,8 @@ static int vmmc_probe(struct platform_device *pdev) dma_addr_t dma; cp1_base = - (void *) CPHYSADDR(dma_alloc_coherent(NULL, CP1_SIZE, - &dma, GFP_ATOMIC)); + (void *) CPHYSADDR(dma_alloc_coherent(&pdev->dev, CP1_SIZE, + &dma, GFP_KERNEL)); gpio_count = of_gpio_count(pdev->dev.of_node); while (gpio_count > 0) { diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c index 781ad96b78c4..83ed37298e66 100644 --- a/arch/mips/lib/dump_tlb.c +++ b/arch/mips/lib/dump_tlb.c @@ -10,6 +10,7 @@ #include #include +#include #include #include #include @@ -73,12 +74,13 @@ static inline const char *msk2str(unsigned int mask) static void dump_tlb(int first, int last) { - unsigned long s_entryhi, entryhi, asid; + unsigned long s_entryhi, entryhi, asid, mmid; unsigned long long entrylo0, entrylo1, pa; unsigned int s_index, s_pagemask, s_guestctl1 = 0; unsigned int pagemask, guestctl1 = 0, c0, c1, i; unsigned long asidmask = cpu_asid_mask(¤t_cpu_data); int asidwidth = DIV_ROUND_UP(ilog2(asidmask) + 1, 4); + unsigned long uninitialized_var(s_mmid); #ifdef CONFIG_32BIT bool xpa = cpu_has_xpa && (read_c0_pagegrain() & PG_ELPA); int pwidth = xpa ? 11 : 8; @@ -92,7 +94,12 @@ static void dump_tlb(int first, int last) s_pagemask = read_c0_pagemask(); s_entryhi = read_c0_entryhi(); s_index = read_c0_index(); - asid = s_entryhi & asidmask; + + if (cpu_has_mmid) + asid = s_mmid = read_c0_memorymapid(); + else + asid = s_entryhi & asidmask; + if (cpu_has_guestid) s_guestctl1 = read_c0_guestctl1(); @@ -105,6 +112,12 @@ static void dump_tlb(int first, int last) entryhi = read_c0_entryhi(); entrylo0 = read_c0_entrylo0(); entrylo1 = read_c0_entrylo1(); + + if (cpu_has_mmid) + mmid = read_c0_memorymapid(); + else + mmid = entryhi & asidmask; + if (cpu_has_guestid) guestctl1 = read_c0_guestctl1(); @@ -124,8 +137,7 @@ static void dump_tlb(int first, int last) * leave only a single G bit set after a machine check exception * due to duplicate TLB entry. */ - if (!((entrylo0 | entrylo1) & ENTRYLO_G) && - (entryhi & asidmask) != asid) + if (!((entrylo0 | entrylo1) & ENTRYLO_G) && (mmid != asid)) continue; /* @@ -138,7 +150,7 @@ static void dump_tlb(int first, int last) pr_cont("va=%0*lx asid=%0*lx", vwidth, (entryhi & ~0x1fffUL), - asidwidth, entryhi & asidmask); + asidwidth, mmid); if (cpu_has_guestid) pr_cont(" gid=%02lx", (guestctl1 & MIPS_GCTL1_RID) diff --git a/arch/mips/loongson32/Kconfig b/arch/mips/loongson32/Kconfig index 462b126f45aa..6dacc1438906 100644 --- a/arch/mips/loongson32/Kconfig +++ b/arch/mips/loongson32/Kconfig @@ -15,7 +15,6 @@ config LOONGSON1_LS1B select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_LITTLE_ENDIAN select SYS_SUPPORTS_HIGHMEM - select SYS_SUPPORTS_MIPS16 select SYS_HAS_EARLY_PRINTK select USE_GENERIC_EARLY_PRINTK_8250 select COMMON_CLK @@ -31,7 +30,6 @@ config LOONGSON1_LS1C select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_LITTLE_ENDIAN select SYS_SUPPORTS_HIGHMEM - select SYS_SUPPORTS_MIPS16 select SYS_HAS_EARLY_PRINTK select USE_GENERIC_EARLY_PRINTK_8250 select COMMON_CLK diff --git a/arch/mips/loongson32/Platform b/arch/mips/loongson32/Platform index a0dbb3b2f2de..333215593092 100644 --- a/arch/mips/loongson32/Platform +++ b/arch/mips/loongson32/Platform @@ -1,4 +1,4 @@ -cflags-$(CONFIG_CPU_LOONGSON1) += -march=mips32 -Wa,--trap +cflags-$(CONFIG_CPU_LOONGSON1) += -march=mips32r2 -Wa,--trap platform-$(CONFIG_MACH_LOONGSON32) += loongson32/ cflags-$(CONFIG_MACH_LOONGSON32) += -I$(srctree)/arch/mips/include/asm/mach-loongson32 -load-$(CONFIG_CPU_LOONGSON1) += 0xffffffff80100000 +load-$(CONFIG_CPU_LOONGSON1) += 0xffffffff80200000 diff --git a/arch/mips/loongson32/common/platform.c b/arch/mips/loongson32/common/platform.c index ac584c5823d0..0bf355c8bcb2 100644 --- a/arch/mips/loongson32/common/platform.c +++ b/arch/mips/loongson32/common/platform.c @@ -81,42 +81,6 @@ struct platform_device ls1x_cpufreq_pdev = { }, }; -/* DMA */ -static struct resource ls1x_dma_resources[] = { - [0] = { - .start = LS1X_DMAC_BASE, - .end = LS1X_DMAC_BASE + SZ_4 - 1, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = LS1X_DMA0_IRQ, - .end = LS1X_DMA0_IRQ, - .flags = IORESOURCE_IRQ, - }, - [2] = { - .start = LS1X_DMA1_IRQ, - .end = LS1X_DMA1_IRQ, - .flags = IORESOURCE_IRQ, - }, - [3] = { - .start = LS1X_DMA2_IRQ, - .end = LS1X_DMA2_IRQ, - .flags = IORESOURCE_IRQ, - }, -}; - -struct platform_device ls1x_dma_pdev = { - .name = "ls1x-dma", - .id = -1, - .num_resources = ARRAY_SIZE(ls1x_dma_resources), - .resource = ls1x_dma_resources, -}; - -void __init ls1x_dma_set_platdata(struct plat_ls1x_dma *pdata) -{ - ls1x_dma_pdev.dev.platform_data = pdata; -} - /* Synopsys Ethernet GMAC */ static struct stmmac_mdio_bus_data ls1x_mdio_bus_data = { .phy_mask = 0, @@ -291,33 +255,6 @@ struct platform_device ls1x_gpio1_pdev = { .resource = ls1x_gpio1_resources, }; -/* NAND Flash */ -static struct resource ls1x_nand_resources[] = { - [0] = { - .start = LS1X_NAND_BASE, - .end = LS1X_NAND_BASE + SZ_32 - 1, - .flags = IORESOURCE_MEM, - }, - [1] = { - /* DMA channel 0 is dedicated to NAND */ - .start = LS1X_DMA_CHANNEL0, - .end = LS1X_DMA_CHANNEL0, - .flags = IORESOURCE_DMA, - }, -}; - -struct platform_device ls1x_nand_pdev = { - .name = "ls1x-nand", - .id = -1, - .num_resources = ARRAY_SIZE(ls1x_nand_resources), - .resource = ls1x_nand_resources, -}; - -void __init ls1x_nand_set_platdata(struct plat_ls1x_nand *pdata) -{ - ls1x_nand_pdev.dev.platform_data = pdata; -} - /* USB EHCI */ static u64 ls1x_ehci_dmamask = DMA_BIT_MASK(32); diff --git a/arch/mips/loongson32/ls1b/board.c b/arch/mips/loongson32/ls1b/board.c index 01aceaace314..447b15fc0a2b 100644 --- a/arch/mips/loongson32/ls1b/board.c +++ b/arch/mips/loongson32/ls1b/board.c @@ -16,30 +16,6 @@ #include #include -struct plat_ls1x_dma ls1x_dma_pdata = { - .nr_channels = 3, -}; - -static struct mtd_partition ls1x_nand_parts[] = { - { - .name = "kernel", - .offset = 0, - .size = SZ_16M, - }, - { - .name = "rootfs", - .offset = MTDPART_OFS_APPEND, - .size = MTDPART_SIZ_FULL, - }, -}; - -struct plat_ls1x_nand ls1x_nand_pdata = { - .parts = ls1x_nand_parts, - .nr_parts = ARRAY_SIZE(ls1x_nand_parts), - .hold_cycle = 0x2, - .wait_cycle = 0xc, -}; - static const struct gpio_led ls1x_gpio_leds[] __initconst = { { .name = "LED9", @@ -64,13 +40,11 @@ static const struct gpio_led_platform_data ls1x_led_pdata __initconst = { static struct platform_device *ls1b_platform_devices[] __initdata = { &ls1x_uart_pdev, &ls1x_cpufreq_pdev, - &ls1x_dma_pdev, &ls1x_eth0_pdev, &ls1x_eth1_pdev, &ls1x_ehci_pdev, &ls1x_gpio0_pdev, &ls1x_gpio1_pdev, - &ls1x_nand_pdev, &ls1x_rtc_pdev, &ls1x_wdt_pdev, }; @@ -78,8 +52,6 @@ static struct platform_device *ls1b_platform_devices[] __initdata = { static int __init ls1b_platform_init(void) { ls1x_serial_set_uartclk(&ls1x_uart_pdev); - ls1x_dma_set_platdata(&ls1x_dma_pdata); - ls1x_nand_set_platdata(&ls1x_nand_pdata); gpio_led_register_device(-1, &ls1x_led_pdata); diff --git a/arch/mips/loongson64/Platform b/arch/mips/loongson64/Platform index 0fce4608aa88..c1a4d4dc4665 100644 --- a/arch/mips/loongson64/Platform +++ b/arch/mips/loongson64/Platform @@ -23,6 +23,29 @@ ifdef CONFIG_CPU_LOONGSON2F_WORKAROUNDS endif cflags-$(CONFIG_CPU_LOONGSON3) += -Wa,--trap + +# +# Some versions of binutils, not currently mainline as of 2019/02/04, support +# an -mfix-loongson3-llsc flag which emits a sync prior to each ll instruction +# to work around a CPU bug (see loongson_llsc_mb() in asm/barrier.h for a +# description). +# +# We disable this in order to prevent the assembler meddling with the +# instruction that labels refer to, ie. if we label an ll instruction: +# +# 1: ll v0, 0(a0) +# +# ...then with the assembler fix applied the label may actually point at a sync +# instruction inserted by the assembler, and if we were using the label in an +# exception table the table would no longer contain the address of the ll +# instruction. +# +# Avoid this by explicitly disabling that assembler behaviour. If upstream +# binutils does not merge support for the flag then we can revisit & remove +# this later - for now it ensures vendor toolchains don't cause problems. +# +cflags-$(CONFIG_CPU_LOONGSON3) += $(call as-option,-Wa$(comma)-mno-fix-loongson3-llsc,) + # # binutils from v2.25 on and gcc starting from v4.9.0 treat -march=loongson3a # as MIPS64 R2; older versions as just R1. This leaves the possibility open diff --git a/arch/mips/loongson64/common/reset.c b/arch/mips/loongson64/common/reset.c index a60715e11306..b26892ce871c 100644 --- a/arch/mips/loongson64/common/reset.c +++ b/arch/mips/loongson64/common/reset.c @@ -59,7 +59,12 @@ static void loongson_poweroff(void) { #ifndef CONFIG_LEFI_FIRMWARE_INTERFACE mach_prepare_shutdown(); - unreachable(); + + /* + * It needs a wait loop here, but mips/kernel/reset.c already calls + * a generic delay loop, machine_hang(), so simply return. + */ + return; #else void (*fw_poweroff)(void) = (void *)loongson_sysconf.poweroff_addr; diff --git a/arch/mips/math-emu/me-debugfs.c b/arch/mips/math-emu/me-debugfs.c index 58798f527356..387724860fa6 100644 --- a/arch/mips/math-emu/me-debugfs.c +++ b/arch/mips/math-emu/me-debugfs.c @@ -189,32 +189,21 @@ static int __init debugfs_fpuemu(void) { struct dentry *fpuemu_debugfs_base_dir; struct dentry *fpuemu_debugfs_inst_dir; - struct dentry *d, *reset_file; - - if (!mips_debugfs_dir) - return -ENODEV; fpuemu_debugfs_base_dir = debugfs_create_dir("fpuemustats", mips_debugfs_dir); - if (!fpuemu_debugfs_base_dir) - return -ENOMEM; - reset_file = debugfs_create_file("fpuemustats_clear", 0444, - mips_debugfs_dir, NULL, - &fpuemustats_clear_fops); - if (!reset_file) - return -ENOMEM; + debugfs_create_file("fpuemustats_clear", 0444, mips_debugfs_dir, NULL, + &fpuemustats_clear_fops); #define FPU_EMU_STAT_OFFSET(m) \ offsetof(struct mips_fpu_emulator_stats, m) #define FPU_STAT_CREATE(m) \ do { \ - d = debugfs_create_file(#m, 0444, fpuemu_debugfs_base_dir, \ + debugfs_create_file(#m, 0444, fpuemu_debugfs_base_dir, \ (void *)FPU_EMU_STAT_OFFSET(m), \ &fops_fpuemu_stat); \ - if (!d) \ - return -ENOMEM; \ } while (0) FPU_STAT_CREATE(emulated); @@ -233,8 +222,6 @@ do { \ fpuemu_debugfs_inst_dir = debugfs_create_dir("instructions", fpuemu_debugfs_base_dir); - if (!fpuemu_debugfs_inst_dir) - return -ENOMEM; #define FPU_STAT_CREATE_EX(m) \ do { \ @@ -242,11 +229,9 @@ do { \ \ adjust_instruction_counter_name(name, #m); \ \ - d = debugfs_create_file(name, 0444, fpuemu_debugfs_inst_dir, \ + debugfs_create_file(name, 0444, fpuemu_debugfs_inst_dir, \ (void *)FPU_EMU_STAT_OFFSET(m), \ &fops_fpuemu_stat); \ - if (!d) \ - return -ENOMEM; \ } while (0) FPU_STAT_CREATE_EX(abs_s); diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile index 3e5bb203c95a..f34d7ff5eb60 100644 --- a/arch/mips/mm/Makefile +++ b/arch/mips/mm/Makefile @@ -3,9 +3,19 @@ # Makefile for the Linux/MIPS-specific parts of the memory manager. # -obj-y += cache.o extable.o fault.o \ - gup.o init.o mmap.o page.o page-funcs.o \ - pgtable.o tlbex.o tlbex-fault.o tlb-funcs.o +obj-y += cache.o +obj-y += context.o +obj-y += extable.o +obj-y += fault.o +obj-y += gup.o +obj-y += init.o +obj-y += mmap.o +obj-y += page.o +obj-y += page-funcs.o +obj-y += pgtable.o +obj-y += tlbex.o +obj-y += tlbex-fault.o +obj-y += tlb-funcs.o ifdef CONFIG_CPU_MICROMIPS obj-y += uasm-micromips.o diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c index 0e45b061e514..8064821e9805 100644 --- a/arch/mips/mm/c-octeon.c +++ b/arch/mips/mm/c-octeon.c @@ -127,23 +127,6 @@ static void octeon_flush_icache_range(unsigned long start, unsigned long end) } -/** - * Flush the icache for a trampoline. These are used for interrupt - * and exception hooking. - * - * @addr: Address to flush - */ -static void octeon_flush_cache_sigtramp(unsigned long addr) -{ - struct vm_area_struct *vma; - - down_read(¤t->mm->mmap_sem); - vma = find_vma(current->mm, addr); - octeon_flush_icache_all_cores(vma); - up_read(¤t->mm->mmap_sem); -} - - /** * Flush a range out of a vma * @@ -289,7 +272,6 @@ void octeon_cache_init(void) flush_cache_mm = octeon_flush_cache_mm; flush_cache_page = octeon_flush_cache_page; flush_cache_range = octeon_flush_cache_range; - flush_cache_sigtramp = octeon_flush_cache_sigtramp; flush_icache_all = octeon_flush_icache_all; flush_data_cache_page = octeon_flush_data_cache_page; flush_icache_range = octeon_flush_icache_range; diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c index 01848cdf2074..0ca401ddf3b7 100644 --- a/arch/mips/mm/c-r3k.c +++ b/arch/mips/mm/c-r3k.c @@ -274,30 +274,6 @@ static void r3k_flush_data_cache_page(unsigned long addr) { } -static void r3k_flush_cache_sigtramp(unsigned long addr) -{ - unsigned long flags; - - pr_debug("csigtramp[%08lx]\n", addr); - - flags = read_c0_status(); - - write_c0_status(flags&~ST0_IEC); - - /* Fill the TLB to avoid an exception with caches isolated. */ - asm( "lw\t$0, 0x000(%0)\n\t" - "lw\t$0, 0x004(%0)\n\t" - : : "r" (addr) ); - - write_c0_status((ST0_ISC|ST0_SWC|flags)&~ST0_IEC); - - asm( "sb\t$0, 0x000(%0)\n\t" - "sb\t$0, 0x004(%0)\n\t" - : : "r" (addr) ); - - write_c0_status(flags); -} - static void r3k_flush_kernel_vmap_range(unsigned long vaddr, int size) { BUG(); @@ -331,7 +307,6 @@ void r3k_cache_init(void) __flush_kernel_vmap_range = r3k_flush_kernel_vmap_range; - flush_cache_sigtramp = r3k_flush_cache_sigtramp; local_flush_data_cache_page = local_r3k_flush_data_cache_page; flush_data_cache_page = r3k_flush_data_cache_page; diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index d0b64df51eb2..5166e38cd1c6 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -540,6 +540,9 @@ static inline int has_valid_asid(const struct mm_struct *mm, unsigned int type) unsigned int i; const cpumask_t *mask = cpu_present_mask; + if (cpu_has_mmid) + return cpu_context(0, mm) != 0; + /* cpu_sibling_map[] undeclared when !CONFIG_SMP */ #ifdef CONFIG_SMP /* @@ -697,10 +700,7 @@ static inline void local_r4k_flush_cache_page(void *args) } if (exec) { if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) { - int cpu = smp_processor_id(); - - if (cpu_context(cpu, mm) != 0) - drop_mmu_context(mm, cpu); + drop_mmu_context(mm); } else vaddr ? r4k_blast_icache_page(addr) : r4k_blast_icache_user_page(addr); @@ -937,119 +937,6 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) } #endif /* CONFIG_DMA_NONCOHERENT */ -struct flush_cache_sigtramp_args { - struct mm_struct *mm; - struct page *page; - unsigned long addr; -}; - -/* - * While we're protected against bad userland addresses we don't care - * very much about what happens in that case. Usually a segmentation - * fault will dump the process later on anyway ... - */ -static void local_r4k_flush_cache_sigtramp(void *args) -{ - struct flush_cache_sigtramp_args *fcs_args = args; - unsigned long addr = fcs_args->addr; - struct page *page = fcs_args->page; - struct mm_struct *mm = fcs_args->mm; - int map_coherent = 0; - void *vaddr; - - unsigned long ic_lsize = cpu_icache_line_size(); - unsigned long dc_lsize = cpu_dcache_line_size(); - unsigned long sc_lsize = cpu_scache_line_size(); - - /* - * If owns no valid ASID yet, cannot possibly have gotten - * this page into the cache. - */ - if (!has_valid_asid(mm, R4K_HIT)) - return; - - if (mm == current->active_mm) { - vaddr = NULL; - } else { - /* - * Use kmap_coherent or kmap_atomic to do flushes for - * another ASID than the current one. - */ - map_coherent = (cpu_has_dc_aliases && - page_mapcount(page) && - !Page_dcache_dirty(page)); - if (map_coherent) - vaddr = kmap_coherent(page, addr); - else - vaddr = kmap_atomic(page); - addr = (unsigned long)vaddr + (addr & ~PAGE_MASK); - } - - R4600_HIT_CACHEOP_WAR_IMPL; - if (!cpu_has_ic_fills_f_dc) { - if (dc_lsize) - vaddr ? flush_dcache_line(addr & ~(dc_lsize - 1)) - : protected_writeback_dcache_line( - addr & ~(dc_lsize - 1)); - if (!cpu_icache_snoops_remote_store && scache_size) - vaddr ? flush_scache_line(addr & ~(sc_lsize - 1)) - : protected_writeback_scache_line( - addr & ~(sc_lsize - 1)); - } - if (ic_lsize) - vaddr ? flush_icache_line(addr & ~(ic_lsize - 1)) - : protected_flush_icache_line(addr & ~(ic_lsize - 1)); - - if (vaddr) { - if (map_coherent) - kunmap_coherent(); - else - kunmap_atomic(vaddr); - } - - if (MIPS4K_ICACHE_REFILL_WAR) { - __asm__ __volatile__ ( - ".set push\n\t" - ".set noat\n\t" - ".set "MIPS_ISA_LEVEL"\n\t" -#ifdef CONFIG_32BIT - "la $at,1f\n\t" -#endif -#ifdef CONFIG_64BIT - "dla $at,1f\n\t" -#endif - "cache %0,($at)\n\t" - "nop; nop; nop\n" - "1:\n\t" - ".set pop" - : - : "i" (Hit_Invalidate_I)); - } - if (MIPS_CACHE_SYNC_WAR) - __asm__ __volatile__ ("sync"); -} - -static void r4k_flush_cache_sigtramp(unsigned long addr) -{ - struct flush_cache_sigtramp_args args; - int npages; - - down_read(¤t->mm->mmap_sem); - - npages = get_user_pages_fast(addr, 1, 0, &args.page); - if (npages < 1) - goto out; - - args.mm = current->mm; - args.addr = addr; - - r4k_on_each_cpu(R4K_HIT, local_r4k_flush_cache_sigtramp, &args); - - put_page(args.page); -out: - up_read(¤t->mm->mmap_sem); -} - static void r4k_flush_icache_all(void) { if (cpu_has_vtag_icache) @@ -1978,7 +1865,6 @@ void r4k_cache_init(void) __flush_kernel_vmap_range = r4k_flush_kernel_vmap_range; - flush_cache_sigtramp = r4k_flush_cache_sigtramp; flush_icache_all = r4k_flush_icache_all; local_flush_data_cache_page = local_r4k_flush_data_cache_page; flush_data_cache_page = r4k_flush_data_cache_page; @@ -2033,7 +1919,6 @@ void r4k_cache_init(void) /* I$ fills from D$ just by emptying the write buffers */ flush_cache_page = (void *)b5k_instruction_hazard; flush_cache_range = (void *)b5k_instruction_hazard; - flush_cache_sigtramp = (void *)b5k_instruction_hazard; local_flush_data_cache_page = (void *)b5k_instruction_hazard; flush_data_cache_page = (void *)b5k_instruction_hazard; flush_icache_range = (void *)b5k_instruction_hazard; @@ -2052,7 +1937,6 @@ void r4k_cache_init(void) flush_cache_mm = (void *)cache_noop; flush_cache_page = (void *)cache_noop; flush_cache_range = (void *)cache_noop; - flush_cache_sigtramp = (void *)cache_noop; flush_icache_all = (void *)cache_noop; flush_data_cache_page = (void *)cache_noop; local_flush_data_cache_page = (void *)cache_noop; diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c index 5f6c099a9457..b7c8a9d79c35 100644 --- a/arch/mips/mm/c-tx39.c +++ b/arch/mips/mm/c-tx39.c @@ -290,25 +290,6 @@ static void tx39_dma_cache_inv(unsigned long addr, unsigned long size) } } -static void tx39_flush_cache_sigtramp(unsigned long addr) -{ - unsigned long ic_lsize = current_cpu_data.icache.linesz; - unsigned long dc_lsize = current_cpu_data.dcache.linesz; - unsigned long config; - unsigned long flags; - - protected_writeback_dcache_line(addr & ~(dc_lsize - 1)); - - /* disable icache (set ICE#) */ - local_irq_save(flags); - config = read_c0_conf(); - write_c0_conf(config & ~TX39_CONF_ICE); - TX39_STOP_STREAMING(); - protected_flush_icache_line(addr & ~(ic_lsize - 1)); - write_c0_conf(config); - local_irq_restore(flags); -} - static __init void tx39_probe_cache(void) { unsigned long config; @@ -368,7 +349,6 @@ void tx39_cache_init(void) flush_icache_range = (void *) tx39h_flush_icache_all; local_flush_icache_range = (void *) tx39h_flush_icache_all; - flush_cache_sigtramp = (void *) tx39h_flush_icache_all; local_flush_data_cache_page = (void *) tx39h_flush_icache_all; flush_data_cache_page = (void *) tx39h_flush_icache_all; @@ -397,7 +377,6 @@ void tx39_cache_init(void) __flush_kernel_vmap_range = tx39_flush_kernel_vmap_range; - flush_cache_sigtramp = tx39_flush_cache_sigtramp; local_flush_data_cache_page = local_tx39_flush_data_cache_page; flush_data_cache_page = tx39_flush_data_cache_page; diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 55099fbff4e6..3da216988672 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -47,7 +47,6 @@ void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size); EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range); /* MIPS specific cache operations */ -void (*flush_cache_sigtramp)(unsigned long addr); void (*local_flush_data_cache_page)(void * addr); void (*flush_data_cache_page)(unsigned long addr); void (*flush_icache_all)(void); diff --git a/arch/mips/mm/context.c b/arch/mips/mm/context.c new file mode 100644 index 000000000000..b25564090939 --- /dev/null +++ b/arch/mips/mm/context.c @@ -0,0 +1,291 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include + +static DEFINE_RAW_SPINLOCK(cpu_mmid_lock); + +static atomic64_t mmid_version; +static unsigned int num_mmids; +static unsigned long *mmid_map; + +static DEFINE_PER_CPU(u64, reserved_mmids); +static cpumask_t tlb_flush_pending; + +static bool asid_versions_eq(int cpu, u64 a, u64 b) +{ + return ((a ^ b) & asid_version_mask(cpu)) == 0; +} + +void get_new_mmu_context(struct mm_struct *mm) +{ + unsigned int cpu; + u64 asid; + + /* + * This function is specific to ASIDs, and should not be called when + * MMIDs are in use. + */ + if (WARN_ON(IS_ENABLED(CONFIG_DEBUG_VM) && cpu_has_mmid)) + return; + + cpu = smp_processor_id(); + asid = asid_cache(cpu); + + if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) { + if (cpu_has_vtag_icache) + flush_icache_all(); + local_flush_tlb_all(); /* start new asid cycle */ + } + + set_cpu_context(cpu, mm, asid); + asid_cache(cpu) = asid; +} +EXPORT_SYMBOL_GPL(get_new_mmu_context); + +void check_mmu_context(struct mm_struct *mm) +{ + unsigned int cpu = smp_processor_id(); + + /* + * This function is specific to ASIDs, and should not be called when + * MMIDs are in use. + */ + if (WARN_ON(IS_ENABLED(CONFIG_DEBUG_VM) && cpu_has_mmid)) + return; + + /* Check if our ASID is of an older version and thus invalid */ + if (!asid_versions_eq(cpu, cpu_context(cpu, mm), asid_cache(cpu))) + get_new_mmu_context(mm); +} +EXPORT_SYMBOL_GPL(check_mmu_context); + +static void flush_context(void) +{ + u64 mmid; + int cpu; + + /* Update the list of reserved MMIDs and the MMID bitmap */ + bitmap_clear(mmid_map, 0, num_mmids); + + /* Reserve an MMID for kmap/wired entries */ + __set_bit(MMID_KERNEL_WIRED, mmid_map); + + for_each_possible_cpu(cpu) { + mmid = xchg_relaxed(&cpu_data[cpu].asid_cache, 0); + + /* + * If this CPU has already been through a + * rollover, but hasn't run another task in + * the meantime, we must preserve its reserved + * MMID, as this is the only trace we have of + * the process it is still running. + */ + if (mmid == 0) + mmid = per_cpu(reserved_mmids, cpu); + + __set_bit(mmid & cpu_asid_mask(&cpu_data[cpu]), mmid_map); + per_cpu(reserved_mmids, cpu) = mmid; + } + + /* + * Queue a TLB invalidation for each CPU to perform on next + * context-switch + */ + cpumask_setall(&tlb_flush_pending); +} + +static bool check_update_reserved_mmid(u64 mmid, u64 newmmid) +{ + bool hit; + int cpu; + + /* + * Iterate over the set of reserved MMIDs looking for a match. + * If we find one, then we can update our mm to use newmmid + * (i.e. the same MMID in the current generation) but we can't + * exit the loop early, since we need to ensure that all copies + * of the old MMID are updated to reflect the mm. Failure to do + * so could result in us missing the reserved MMID in a future + * generation. + */ + hit = false; + for_each_possible_cpu(cpu) { + if (per_cpu(reserved_mmids, cpu) == mmid) { + hit = true; + per_cpu(reserved_mmids, cpu) = newmmid; + } + } + + return hit; +} + +static u64 get_new_mmid(struct mm_struct *mm) +{ + static u32 cur_idx = MMID_KERNEL_WIRED + 1; + u64 mmid, version, mmid_mask; + + mmid = cpu_context(0, mm); + version = atomic64_read(&mmid_version); + mmid_mask = cpu_asid_mask(&boot_cpu_data); + + if (!asid_versions_eq(0, mmid, 0)) { + u64 newmmid = version | (mmid & mmid_mask); + + /* + * If our current MMID was active during a rollover, we + * can continue to use it and this was just a false alarm. + */ + if (check_update_reserved_mmid(mmid, newmmid)) { + mmid = newmmid; + goto set_context; + } + + /* + * We had a valid MMID in a previous life, so try to re-use + * it if possible. + */ + if (!__test_and_set_bit(mmid & mmid_mask, mmid_map)) { + mmid = newmmid; + goto set_context; + } + } + + /* Allocate a free MMID */ + mmid = find_next_zero_bit(mmid_map, num_mmids, cur_idx); + if (mmid != num_mmids) + goto reserve_mmid; + + /* We're out of MMIDs, so increment the global version */ + version = atomic64_add_return_relaxed(asid_first_version(0), + &mmid_version); + + /* Note currently active MMIDs & mark TLBs as requiring flushes */ + flush_context(); + + /* We have more MMIDs than CPUs, so this will always succeed */ + mmid = find_first_zero_bit(mmid_map, num_mmids); + +reserve_mmid: + __set_bit(mmid, mmid_map); + cur_idx = mmid; + mmid |= version; +set_context: + set_cpu_context(0, mm, mmid); + return mmid; +} + +void check_switch_mmu_context(struct mm_struct *mm) +{ + unsigned int cpu = smp_processor_id(); + u64 ctx, old_active_mmid; + unsigned long flags; + + if (!cpu_has_mmid) { + check_mmu_context(mm); + write_c0_entryhi(cpu_asid(cpu, mm)); + goto setup_pgd; + } + + /* + * MMID switch fast-path, to avoid acquiring cpu_mmid_lock when it's + * unnecessary. + * + * The memory ordering here is subtle. If our active_mmids is non-zero + * and the MMID matches the current version, then we update the CPU's + * asid_cache with a relaxed cmpxchg. Racing with a concurrent rollover + * means that either: + * + * - We get a zero back from the cmpxchg and end up waiting on + * cpu_mmid_lock in check_mmu_context(). Taking the lock synchronises + * with the rollover and so we are forced to see the updated + * generation. + * + * - We get a valid MMID back from the cmpxchg, which means the + * relaxed xchg in flush_context will treat us as reserved + * because atomic RmWs are totally ordered for a given location. + */ + ctx = cpu_context(cpu, mm); + old_active_mmid = READ_ONCE(cpu_data[cpu].asid_cache); + if (!old_active_mmid || + !asid_versions_eq(cpu, ctx, atomic64_read(&mmid_version)) || + !cmpxchg_relaxed(&cpu_data[cpu].asid_cache, old_active_mmid, ctx)) { + raw_spin_lock_irqsave(&cpu_mmid_lock, flags); + + ctx = cpu_context(cpu, mm); + if (!asid_versions_eq(cpu, ctx, atomic64_read(&mmid_version))) + ctx = get_new_mmid(mm); + + WRITE_ONCE(cpu_data[cpu].asid_cache, ctx); + raw_spin_unlock_irqrestore(&cpu_mmid_lock, flags); + } + + /* + * Invalidate the local TLB if needed. Note that we must only clear our + * bit in tlb_flush_pending after this is complete, so that the + * cpu_has_shared_ftlb_entries case below isn't misled. + */ + if (cpumask_test_cpu(cpu, &tlb_flush_pending)) { + if (cpu_has_vtag_icache) + flush_icache_all(); + local_flush_tlb_all(); + cpumask_clear_cpu(cpu, &tlb_flush_pending); + } + + write_c0_memorymapid(ctx & cpu_asid_mask(&boot_cpu_data)); + + /* + * If this CPU shares FTLB entries with its siblings and one or more of + * those siblings hasn't yet invalidated its TLB following a version + * increase then we need to invalidate any TLB entries for our MMID + * that we might otherwise pick up from a sibling. + * + * We ifdef on CONFIG_SMP because cpu_sibling_map isn't defined in + * CONFIG_SMP=n kernels. + */ +#ifdef CONFIG_SMP + if (cpu_has_shared_ftlb_entries && + cpumask_intersects(&tlb_flush_pending, &cpu_sibling_map[cpu])) { + /* Ensure we operate on the new MMID */ + mtc0_tlbw_hazard(); + + /* + * Invalidate all TLB entries associated with the new + * MMID, and wait for the invalidation to complete. + */ + ginvt_mmid(); + sync_ginv(); + } +#endif + +setup_pgd: + TLBMISS_HANDLER_SETUP_PGD(mm->pgd); +} +EXPORT_SYMBOL_GPL(check_switch_mmu_context); + +static int mmid_init(void) +{ + if (!cpu_has_mmid) + return 0; + + /* + * Expect allocation after rollover to fail if we don't have at least + * one more MMID than CPUs. + */ + num_mmids = asid_first_version(0); + WARN_ON(num_mmids <= num_possible_cpus()); + + atomic64_set(&mmid_version, asid_first_version(0)); + mmid_map = kcalloc(BITS_TO_LONGS(num_mmids), sizeof(*mmid_map), + GFP_KERNEL); + if (!mmid_map) + panic("Failed to allocate bitmap for %u MMIDs\n", num_mmids); + + /* Reserve an MMID for kmap/wired entries */ + __set_bit(MMID_KERNEL_WIRED, mmid_map); + + pr_info("MMID allocator initialised with %u entries\n", num_mmids); + return 0; +} +early_initcall(mmid_init); diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c index cb38461391cb..f9549d2fbea3 100644 --- a/arch/mips/mm/dma-noncoherent.c +++ b/arch/mips/mm/dma-noncoherent.c @@ -120,13 +120,8 @@ static inline void dma_sync_phys(phys_addr_t paddr, size_t size, if (PageHighMem(page)) { void *addr; - if (offset + len > PAGE_SIZE) { - if (offset >= PAGE_SIZE) { - page += offset >> PAGE_SHIFT; - offset &= ~PAGE_MASK; - } + if (offset + len > PAGE_SIZE) len = PAGE_SIZE - offset; - } addr = kmap_atomic(page); dma_sync_virt(addr + offset, len, dir); @@ -145,12 +140,14 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, dma_sync_phys(paddr, size, dir); } +#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, size_t size, enum dma_data_direction dir) { if (cpu_needs_post_dma_flush(dev)) dma_sync_phys(paddr, size, dir); } +#endif void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) @@ -159,3 +156,11 @@ void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, dma_sync_virt(vaddr, size, direction); } + +#ifdef CONFIG_DMA_PERDEV_COHERENT +void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, + const struct iommu_ops *iommu, bool coherent) +{ + dev->dma_coherent = coherent; +} +#endif diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index b521d8e2d359..bbb196ad5f26 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -84,6 +84,7 @@ void setup_zero_pages(void) static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot) { enum fixed_addresses idx; + unsigned int uninitialized_var(old_mmid); unsigned long vaddr, flags, entrylo; unsigned long old_ctx; pte_t pte; @@ -110,6 +111,10 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot) write_c0_entryhi(vaddr & (PAGE_MASK << 1)); write_c0_entrylo0(entrylo); write_c0_entrylo1(entrylo); + if (cpu_has_mmid) { + old_mmid = read_c0_memorymapid(); + write_c0_memorymapid(MMID_KERNEL_WIRED); + } #ifdef CONFIG_XPA if (cpu_has_xpa) { entrylo = (pte.pte_low & _PFNX_MASK); @@ -124,6 +129,8 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot) tlb_write_indexed(); tlbw_use_hazard(); write_c0_entryhi(old_ctx); + if (cpu_has_mmid) + write_c0_memorymapid(old_mmid); local_irq_restore(flags); return (void*) vaddr; @@ -245,6 +252,11 @@ void __init fixrange_init(unsigned long start, unsigned long end, if (pmd_none(*pmd)) { pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); + if (!pte) + panic("%s: Failed to allocate %lu bytes align=%lx\n", + __func__, PAGE_SIZE, + PAGE_SIZE); + set_pmd(pmd, __pmd((unsigned long)pte)); BUG_ON(pte != pte_offset_kernel(pmd, 0)); } diff --git a/arch/mips/mm/sc-debugfs.c b/arch/mips/mm/sc-debugfs.c index 2a116084216f..9507421de335 100644 --- a/arch/mips/mm/sc-debugfs.c +++ b/arch/mips/mm/sc-debugfs.c @@ -55,20 +55,11 @@ static const struct file_operations sc_prefetch_fops = { static int __init sc_debugfs_init(void) { - struct dentry *dir, *file; - - if (!mips_debugfs_dir) - return -ENODEV; + struct dentry *dir; dir = debugfs_create_dir("l2cache", mips_debugfs_dir); - if (IS_ERR(dir)) - return PTR_ERR(dir); - - file = debugfs_create_file("prefetch", S_IRUGO | S_IWUSR, dir, - NULL, &sc_prefetch_fops); - if (!file) - return -ENOMEM; - + debugfs_create_file("prefetch", S_IRUGO | S_IWUSR, dir, NULL, + &sc_prefetch_fops); return 0; } late_initcall(sc_debugfs_init); diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c index 6f589e0112ce..50f207591b6d 100644 --- a/arch/mips/mm/tlb-r3k.c +++ b/arch/mips/mm/tlb-r3k.c @@ -67,18 +67,6 @@ void local_flush_tlb_all(void) local_irq_restore(flags); } -void local_flush_tlb_mm(struct mm_struct *mm) -{ - int cpu = smp_processor_id(); - - if (cpu_context(cpu, mm) != 0) { -#ifdef DEBUG_TLB - printk("[tlbmm<%lu>]", (unsigned long)cpu_context(cpu, mm)); -#endif - drop_mmu_context(mm, cpu); - } -} - void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { @@ -117,7 +105,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, } write_c0_entryhi(oldpid); } else { - drop_mmu_context(mm, cpu); + drop_mmu_context(mm); } local_irq_restore(flags); } diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index 0596505770db..c13e46ced425 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -104,23 +104,6 @@ void local_flush_tlb_all(void) } EXPORT_SYMBOL(local_flush_tlb_all); -/* All entries common to a mm share an asid. To effectively flush - these entries, we just bump the asid. */ -void local_flush_tlb_mm(struct mm_struct *mm) -{ - int cpu; - - preempt_disable(); - - cpu = smp_processor_id(); - - if (cpu_context(cpu, mm) != 0) { - drop_mmu_context(mm, cpu); - } - - preempt_enable(); -} - void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { @@ -137,14 +120,23 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, if (size <= (current_cpu_data.tlbsizeftlbsets ? current_cpu_data.tlbsize / 8 : current_cpu_data.tlbsize / 2)) { - int oldpid = read_c0_entryhi(); + unsigned long old_entryhi, uninitialized_var(old_mmid); int newpid = cpu_asid(cpu, mm); + old_entryhi = read_c0_entryhi(); + if (cpu_has_mmid) { + old_mmid = read_c0_memorymapid(); + write_c0_memorymapid(newpid); + } + htw_stop(); while (start < end) { int idx; - write_c0_entryhi(start | newpid); + if (cpu_has_mmid) + write_c0_entryhi(start); + else + write_c0_entryhi(start | newpid); start += (PAGE_SIZE << 1); mtc0_tlbw_hazard(); tlb_probe(); @@ -160,10 +152,12 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, tlb_write_indexed(); } tlbw_use_hazard(); - write_c0_entryhi(oldpid); + write_c0_entryhi(old_entryhi); + if (cpu_has_mmid) + write_c0_memorymapid(old_mmid); htw_start(); } else { - drop_mmu_context(mm, cpu); + drop_mmu_context(mm); } flush_micro_tlb(); local_irq_restore(flags); @@ -220,15 +214,21 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) int cpu = smp_processor_id(); if (cpu_context(cpu, vma->vm_mm) != 0) { - unsigned long flags; - int oldpid, newpid, idx; + unsigned long uninitialized_var(old_mmid); + unsigned long flags, old_entryhi; + int idx; - newpid = cpu_asid(cpu, vma->vm_mm); page &= (PAGE_MASK << 1); local_irq_save(flags); - oldpid = read_c0_entryhi(); + old_entryhi = read_c0_entryhi(); htw_stop(); - write_c0_entryhi(page | newpid); + if (cpu_has_mmid) { + old_mmid = read_c0_memorymapid(); + write_c0_entryhi(page); + write_c0_memorymapid(cpu_asid(cpu, vma->vm_mm)); + } else { + write_c0_entryhi(page | cpu_asid(cpu, vma->vm_mm)); + } mtc0_tlbw_hazard(); tlb_probe(); tlb_probe_hazard(); @@ -244,7 +244,9 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) tlbw_use_hazard(); finish: - write_c0_entryhi(oldpid); + write_c0_entryhi(old_entryhi); + if (cpu_has_mmid) + write_c0_memorymapid(old_mmid); htw_start(); flush_micro_tlb_vm(vma); local_irq_restore(flags); @@ -307,9 +309,13 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) local_irq_save(flags); htw_stop(); - pid = read_c0_entryhi() & cpu_asid_mask(¤t_cpu_data); address &= (PAGE_MASK << 1); - write_c0_entryhi(address | pid); + if (cpu_has_mmid) { + write_c0_entryhi(address); + } else { + pid = read_c0_entryhi() & cpu_asid_mask(¤t_cpu_data); + write_c0_entryhi(address | pid); + } pgdp = pgd_offset(vma->vm_mm, address); mtc0_tlbw_hazard(); tlb_probe(); @@ -375,12 +381,17 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, #ifdef CONFIG_XPA panic("Broken for XPA kernels"); #else + unsigned int uninitialized_var(old_mmid); unsigned long flags; unsigned long wired; unsigned long old_pagemask; unsigned long old_ctx; local_irq_save(flags); + if (cpu_has_mmid) { + old_mmid = read_c0_memorymapid(); + write_c0_memorymapid(MMID_KERNEL_WIRED); + } /* Save old context and create impossible VPN2 value */ old_ctx = read_c0_entryhi(); htw_stop(); @@ -398,6 +409,8 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, tlbw_use_hazard(); write_c0_entryhi(old_ctx); + if (cpu_has_mmid) + write_c0_memorymapid(old_mmid); tlbw_use_hazard(); /* What is the hazard here? */ htw_start(); write_c0_pagemask(old_pagemask); diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c index e86e2e55ad3e..c1e9e144007e 100644 --- a/arch/mips/mm/tlb-r8k.c +++ b/arch/mips/mm/tlb-r8k.c @@ -50,14 +50,6 @@ void local_flush_tlb_all(void) local_irq_restore(flags); } -void local_flush_tlb_mm(struct mm_struct *mm) -{ - int cpu = smp_processor_id(); - - if (cpu_context(cpu, mm) != 0) - drop_mmu_context(mm, cpu); -} - void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { @@ -75,7 +67,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, local_irq_save(flags); if (size > TFP_TLB_SIZE / 2) { - drop_mmu_context(mm, cpu); + drop_mmu_context(mm); goto out_restore; } diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 37b1cb246332..65b6e85447b1 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -932,6 +932,8 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, * to mimic that here by taking a load/istream page * fault. */ + if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) + uasm_i_sync(p, 0); UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0); uasm_i_jr(p, ptr); @@ -1646,6 +1648,8 @@ static void iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) { #ifdef CONFIG_SMP + if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) + uasm_i_sync(p, 0); # ifdef CONFIG_PHYS_ADDR_T_64BIT if (cpu_has_64bits) uasm_i_lld(p, pte, 0, ptr); @@ -2259,6 +2263,8 @@ static void build_r4000_tlb_load_handler(void) #endif uasm_l_nopage_tlbl(&l, p); + if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) + uasm_i_sync(&p, 0); build_restore_work_registers(&p); #ifdef CONFIG_CPU_MICROMIPS if ((unsigned long)tlb_do_page_fault_0 & 1) { @@ -2313,6 +2319,8 @@ static void build_r4000_tlb_store_handler(void) #endif uasm_l_nopage_tlbs(&l, p); + if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) + uasm_i_sync(&p, 0); build_restore_work_registers(&p); #ifdef CONFIG_CPU_MICROMIPS if ((unsigned long)tlb_do_page_fault_1 & 1) { @@ -2368,6 +2376,8 @@ static void build_r4000_tlb_modify_handler(void) #endif uasm_l_nopage_tlbm(&l, p); + if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) + uasm_i_sync(&p, 0); build_restore_work_registers(&p); #ifdef CONFIG_CPU_MICROMIPS if ((unsigned long)tlb_do_page_fault_1 & 1) { diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c index b16710a8a9e7..0effd3cba9a7 100644 --- a/arch/mips/net/ebpf_jit.c +++ b/arch/mips/net/ebpf_jit.c @@ -79,8 +79,6 @@ enum reg_val_type { REG_64BIT_32BIT, /* 32-bit compatible, need truncation for 64-bit ops. */ REG_32BIT, - /* 32-bit zero extended. */ - REG_32BIT_ZERO_EX, /* 32-bit no sign/zero extension needed. */ REG_32BIT_POS }; @@ -343,12 +341,15 @@ static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg) const struct bpf_prog *prog = ctx->skf; int stack_adjust = ctx->stack_size; int store_offset = stack_adjust - 8; + enum reg_val_type td; int r0 = MIPS_R_V0; - if (dest_reg == MIPS_R_RA && - get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX) + if (dest_reg == MIPS_R_RA) { /* Don't let zero extended value escape. */ - emit_instr(ctx, sll, r0, r0, 0); + td = get_reg_val_type(ctx, prog->len, BPF_REG_0); + if (td == REG_64BIT) + emit_instr(ctx, sll, r0, r0, 0); + } if (ctx->flags & EBPF_SAVE_RA) { emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP); @@ -692,7 +693,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, if (dst < 0) return dst; td = get_reg_val_type(ctx, this_idx, insn->dst_reg); - if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { + if (td == REG_64BIT) { /* sign extend */ emit_instr(ctx, sll, dst, dst, 0); } @@ -707,7 +708,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, if (dst < 0) return dst; td = get_reg_val_type(ctx, this_idx, insn->dst_reg); - if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { + if (td == REG_64BIT) { /* sign extend */ emit_instr(ctx, sll, dst, dst, 0); } @@ -721,7 +722,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, if (dst < 0) return dst; td = get_reg_val_type(ctx, this_idx, insn->dst_reg); - if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) + if (td == REG_64BIT) /* sign extend */ emit_instr(ctx, sll, dst, dst, 0); if (insn->imm == 1) { @@ -860,13 +861,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, if (src < 0 || dst < 0) return -EINVAL; td = get_reg_val_type(ctx, this_idx, insn->dst_reg); - if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { + if (td == REG_64BIT) { /* sign extend */ emit_instr(ctx, sll, dst, dst, 0); } did_move = false; ts = get_reg_val_type(ctx, this_idx, insn->src_reg); - if (ts == REG_64BIT || ts == REG_32BIT_ZERO_EX) { + if (ts == REG_64BIT) { int tmp_reg = MIPS_R_AT; if (bpf_op == BPF_MOV) { @@ -1254,8 +1255,7 @@ jeq_common: if (insn->imm == 64 && td == REG_32BIT) emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); - if (insn->imm != 64 && - (td == REG_64BIT || td == REG_32BIT_ZERO_EX)) { + if (insn->imm != 64 && td == REG_64BIT) { /* sign extend */ emit_instr(ctx, sll, dst, dst, 0); } @@ -1819,7 +1819,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) /* Update the icache */ flush_icache_range((unsigned long)ctx.target, - (unsigned long)(ctx.target + ctx.idx * sizeof(u32))); + (unsigned long)&ctx.target[ctx.idx]); if (bpf_jit_enable > 1) /* Dump JIT code */ diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile index 8185a2bfaf09..c4f976593061 100644 --- a/arch/mips/pci/Makefile +++ b/arch/mips/pci/Makefile @@ -29,6 +29,7 @@ obj-$(CONFIG_MIPS_PCI_VIRTIO) += pci-virtio-guest.o # # These are still pretty much in the old state, watch, go blind. # +obj-$(CONFIG_ATH79) += fixup-ath79.o obj-$(CONFIG_LASAT) += pci-lasat.o obj-$(CONFIG_MIPS_COBALT) += fixup-cobalt.o obj-$(CONFIG_LEMOTE_FULOONG2E) += fixup-fuloong2e.o ops-loongson2.o diff --git a/arch/mips/pci/fixup-ath79.c b/arch/mips/pci/fixup-ath79.c new file mode 100644 index 000000000000..9e651a4af05e --- /dev/null +++ b/arch/mips/pci/fixup-ath79.c @@ -0,0 +1,21 @@ +/* + * Copyright (C) 2018 John Crispin + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include +//#include +#include + +int pcibios_plat_dev_init(struct pci_dev *dev) +{ + return PCIBIOS_SUCCESSFUL; +} + +int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + return of_irq_parse_and_map_pci(dev, slot, pin); +} diff --git a/arch/mips/pci/ops-bridge.c b/arch/mips/pci/ops-bridge.c index a1d2c4ae0d1b..df95b0da08f2 100644 --- a/arch/mips/pci/ops-bridge.c +++ b/arch/mips/pci/ops-bridge.c @@ -44,7 +44,7 @@ static int pci_conf0_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 * value) { struct bridge_controller *bc = BRIDGE_CONTROLLER(bus); - bridge_t *bridge = bc->base; + struct bridge_regs *bridge = bc->base; int slot = PCI_SLOT(devfn); int fn = PCI_FUNC(devfn); volatile void *addr; @@ -56,11 +56,11 @@ static int pci_conf0_read_config(struct pci_bus *bus, unsigned int devfn, return PCIBIOS_DEVICE_NOT_FOUND; /* - * IOC3 is fucking fucked beyond belief ... Don't even give the + * IOC3 is broken beyond belief ... Don't even give the * generic PCI code a chance to look at it for real ... */ if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16))) - goto oh_my_gawd; + goto is_ioc3; addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[where ^ (4 - size)]; @@ -73,21 +73,16 @@ static int pci_conf0_read_config(struct pci_bus *bus, unsigned int devfn, return res ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL; -oh_my_gawd: +is_ioc3: /* - * IOC3 is fucking fucked beyond belief ... Don't even give the - * generic PCI code a chance to look at the wrong register. + * IOC3 special handling */ if ((where >= 0x14 && where < 0x40) || (where >= 0x48)) { *value = emulate_ioc3_cfg(where, size); return PCIBIOS_SUCCESSFUL; } - /* - * IOC3 is fucking fucked beyond belief ... Don't try to access - * anything but 32-bit words ... - */ addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2]; if (get_dbe(cf, (u32 *) addr)) @@ -104,7 +99,7 @@ static int pci_conf1_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 * value) { struct bridge_controller *bc = BRIDGE_CONTROLLER(bus); - bridge_t *bridge = bc->base; + struct bridge_regs *bridge = bc->base; int busno = bus->number; int slot = PCI_SLOT(devfn); int fn = PCI_FUNC(devfn); @@ -112,19 +107,19 @@ static int pci_conf1_read_config(struct pci_bus *bus, unsigned int devfn, u32 cf, shift, mask; int res; - bridge->b_pci_cfg = (busno << 16) | (slot << 11); + bridge_write(bc, b_pci_cfg, (busno << 16) | (slot << 11)); addr = &bridge->b_type1_cfg.c[(fn << 8) | PCI_VENDOR_ID]; if (get_dbe(cf, (u32 *) addr)) return PCIBIOS_DEVICE_NOT_FOUND; /* - * IOC3 is fucking fucked beyond belief ... Don't even give the + * IOC3 is broken beyond belief ... Don't even give the * generic PCI code a chance to look at it for real ... */ if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16))) - goto oh_my_gawd; + goto is_ioc3; - bridge->b_pci_cfg = (busno << 16) | (slot << 11); + bridge_write(bc, b_pci_cfg, (busno << 16) | (slot << 11)); addr = &bridge->b_type1_cfg.c[(fn << 8) | (where ^ (4 - size))]; if (size == 1) @@ -136,22 +131,17 @@ static int pci_conf1_read_config(struct pci_bus *bus, unsigned int devfn, return res ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL; -oh_my_gawd: +is_ioc3: /* - * IOC3 is fucking fucked beyond belief ... Don't even give the - * generic PCI code a chance to look at the wrong register. + * IOC3 special handling */ if ((where >= 0x14 && where < 0x40) || (where >= 0x48)) { *value = emulate_ioc3_cfg(where, size); return PCIBIOS_SUCCESSFUL; } - /* - * IOC3 is fucking fucked beyond belief ... Don't try to access - * anything but 32-bit words ... - */ - bridge->b_pci_cfg = (busno << 16) | (slot << 11); + bridge_write(bc, b_pci_cfg, (busno << 16) | (slot << 11)); addr = &bridge->b_type1_cfg.c[(fn << 8) | where]; if (get_dbe(cf, (u32 *) addr)) @@ -177,7 +167,7 @@ static int pci_conf0_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { struct bridge_controller *bc = BRIDGE_CONTROLLER(bus); - bridge_t *bridge = bc->base; + struct bridge_regs *bridge = bc->base; int slot = PCI_SLOT(devfn); int fn = PCI_FUNC(devfn); volatile void *addr; @@ -189,11 +179,11 @@ static int pci_conf0_write_config(struct pci_bus *bus, unsigned int devfn, return PCIBIOS_DEVICE_NOT_FOUND; /* - * IOC3 is fucking fucked beyond belief ... Don't even give the + * IOC3 is broken beyond belief ... Don't even give the * generic PCI code a chance to look at it for real ... */ if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16))) - goto oh_my_gawd; + goto is_ioc3; addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[where ^ (4 - size)]; @@ -210,19 +200,14 @@ static int pci_conf0_write_config(struct pci_bus *bus, unsigned int devfn, return PCIBIOS_SUCCESSFUL; -oh_my_gawd: +is_ioc3: /* - * IOC3 is fucking fucked beyond belief ... Don't even give the - * generic PCI code a chance to touch the wrong register. + * IOC3 special handling */ if ((where >= 0x14 && where < 0x40) || (where >= 0x48)) return PCIBIOS_SUCCESSFUL; - /* - * IOC3 is fucking fucked beyond belief ... Don't try to access - * anything but 32-bit words ... - */ addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2]; if (get_dbe(cf, (u32 *) addr)) @@ -243,7 +228,7 @@ static int pci_conf1_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { struct bridge_controller *bc = BRIDGE_CONTROLLER(bus); - bridge_t *bridge = bc->base; + struct bridge_regs *bridge = bc->base; int slot = PCI_SLOT(devfn); int fn = PCI_FUNC(devfn); int busno = bus->number; @@ -251,17 +236,17 @@ static int pci_conf1_write_config(struct pci_bus *bus, unsigned int devfn, u32 cf, shift, mask, smask; int res; - bridge->b_pci_cfg = (busno << 16) | (slot << 11); + bridge_write(bc, b_pci_cfg, (busno << 16) | (slot << 11)); addr = &bridge->b_type1_cfg.c[(fn << 8) | PCI_VENDOR_ID]; if (get_dbe(cf, (u32 *) addr)) return PCIBIOS_DEVICE_NOT_FOUND; /* - * IOC3 is fucking fucked beyond belief ... Don't even give the + * IOC3 is broken beyond belief ... Don't even give the * generic PCI code a chance to look at it for real ... */ if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16))) - goto oh_my_gawd; + goto is_ioc3; addr = &bridge->b_type1_cfg.c[(fn << 8) | (where ^ (4 - size))]; @@ -278,19 +263,14 @@ static int pci_conf1_write_config(struct pci_bus *bus, unsigned int devfn, return PCIBIOS_SUCCESSFUL; -oh_my_gawd: +is_ioc3: /* - * IOC3 is fucking fucked beyond belief ... Don't even give the - * generic PCI code a chance to touch the wrong register. + * IOC3 special handling */ if ((where >= 0x14 && where < 0x40) || (where >= 0x48)) return PCIBIOS_SUCCESSFUL; - /* - * IOC3 is fucking fucked beyond belief ... Don't try to access - * anything but 32-bit words ... - */ addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2]; if (get_dbe(cf, (u32 *) addr)) diff --git a/arch/mips/pci/pci-ip27.c b/arch/mips/pci/pci-ip27.c index c94a66070a60..3c177b4d0609 100644 --- a/arch/mips/pci/pci-ip27.c +++ b/arch/mips/pci/pci-ip27.c @@ -23,23 +23,12 @@ */ #define MAX_PCI_BUSSES 40 -/* - * Max #PCI devices (like scsi controllers) we handle on a bus. - */ -#define MAX_DEVICES_PER_PCIBUS 8 - /* * XXX: No kmalloc available when we do our crosstalk scan, * we should try to move it later in the boot process. */ static struct bridge_controller bridges[MAX_PCI_BUSSES]; -/* - * Translate from irq to software PCI bus number and PCI slot. - */ -struct bridge_controller *irq_to_bridge[MAX_PCI_BUSSES * MAX_DEVICES_PER_PCIBUS]; -int irq_to_slot[MAX_PCI_BUSSES * MAX_DEVICES_PER_PCIBUS]; - extern struct pci_ops bridge_pci_ops; int bridge_probe(nasid_t nasid, int widget_id, int masterwid) @@ -47,7 +36,6 @@ int bridge_probe(nasid_t nasid, int widget_id, int masterwid) unsigned long offset = NODE_OFFSET(nasid); struct bridge_controller *bc; static int num_bridges = 0; - bridge_t *bridge; int slot; pci_set_flags(PCI_PROBE_ONLY); @@ -78,7 +66,6 @@ int bridge_probe(nasid_t nasid, int widget_id, int masterwid) bc->io.end = ~0UL; bc->io.flags = IORESOURCE_IO; - bc->irq_cpu = smp_processor_id(); bc->widget_id = widget_id; bc->nasid = nasid; @@ -87,45 +74,43 @@ int bridge_probe(nasid_t nasid, int widget_id, int masterwid) /* * point to this bridge */ - bridge = (bridge_t *) RAW_NODE_SWIN_BASE(nasid, widget_id); + bc->base = (struct bridge_regs *)RAW_NODE_SWIN_BASE(nasid, widget_id); /* * Clear all pending interrupts. */ - bridge->b_int_rst_stat = BRIDGE_IRR_ALL_CLR; + bridge_write(bc, b_int_rst_stat, BRIDGE_IRR_ALL_CLR); /* * Until otherwise set up, assume all interrupts are from slot 0 */ - bridge->b_int_device = 0x0; + bridge_write(bc, b_int_device, 0x0); /* * swap pio's to pci mem and io space (big windows) */ - bridge->b_wid_control |= BRIDGE_CTRL_IO_SWAP | - BRIDGE_CTRL_MEM_SWAP; + bridge_set(bc, b_wid_control, BRIDGE_CTRL_IO_SWAP | + BRIDGE_CTRL_MEM_SWAP); #ifdef CONFIG_PAGE_SIZE_4KB - bridge->b_wid_control &= ~BRIDGE_CTRL_PAGE_SIZE; + bridge_clr(bc, b_wid_control, BRIDGE_CTRL_PAGE_SIZE); #else /* 16kB or larger */ - bridge->b_wid_control |= BRIDGE_CTRL_PAGE_SIZE; + bridge_set(bc, b_wid_control, BRIDGE_CTRL_PAGE_SIZE); #endif /* * Hmm... IRIX sets additional bits in the address which * are documented as reserved in the bridge docs. */ - bridge->b_wid_int_upper = 0x8000 | (masterwid << 16); - bridge->b_wid_int_lower = 0x01800090; /* PI_INT_PEND_MOD off*/ - bridge->b_dir_map = (masterwid << 20); /* DMA */ - bridge->b_int_enable = 0; + bridge_write(bc, b_wid_int_upper, 0x8000 | (masterwid << 16)); + bridge_write(bc, b_wid_int_lower, 0x01800090); /* PI_INT_PEND_MOD off*/ + bridge_write(bc, b_dir_map, (masterwid << 20)); /* DMA */ + bridge_write(bc, b_int_enable, 0); for (slot = 0; slot < 8; slot ++) { - bridge->b_device[slot].reg |= BRIDGE_DEV_SWAP_DIR; + bridge_set(bc, b_device[slot].reg, BRIDGE_DEV_SWAP_DIR); bc->pci_int[slot] = -1; } - bridge->b_wid_tflush; /* wait until Bridge PIO complete */ - - bc->base = bridge; + bridge_read(bc, b_wid_tflush); /* wait until Bridge PIO complete */ register_pci_controller(&bc->pc); @@ -168,16 +153,12 @@ int pcibios_plat_dev_init(struct pci_dev *dev) irq = bc->pci_int[slot]; if (irq == -1) { - irq = request_bridge_irq(bc); + irq = request_bridge_irq(bc, slot); if (irq < 0) return irq; bc->pci_int[slot] = irq; } - - irq_to_bridge[irq] = bc; - irq_to_slot[irq] = slot; - dev->irq = irq; return 0; @@ -206,7 +187,7 @@ phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr) static inline void pci_disable_swapping(struct pci_dev *dev) { struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus); - bridge_t *bridge = bc->base; + struct bridge_regs *bridge = bc->base; int slot = PCI_SLOT(dev->devfn); /* Turn off byte swapping */ diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c index 5017d5843c5a..fc29b85cfa92 100644 --- a/arch/mips/pci/pci-octeon.c +++ b/arch/mips/pci/pci-octeon.c @@ -568,6 +568,11 @@ static int __init octeon_pci_setup(void) if (octeon_has_feature(OCTEON_FEATURE_PCIE)) return 0; + if (!octeon_is_pci_host()) { + pr_notice("Not in host mode, PCI Controller not initialized\n"); + return 0; + } + /* Point pcibios_map_irq() to the PCI version of it */ octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq; @@ -579,11 +584,6 @@ static int __init octeon_pci_setup(void) else octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG; - if (!octeon_is_pci_host()) { - pr_notice("Not in host mode, PCI Controller not initialized\n"); - return 0; - } - /* PCI I/O and PCI MEM values */ set_io_port_base(OCTEON_PCI_IOSPACE_BASE); ioport_resource.start = 0; diff --git a/arch/mips/ralink/bootrom.c b/arch/mips/ralink/bootrom.c index e1fa5972a81d..648f5eb2ba68 100644 --- a/arch/mips/ralink/bootrom.c +++ b/arch/mips/ralink/bootrom.c @@ -35,13 +35,7 @@ static const struct file_operations bootrom_file_ops = { static int bootrom_setup(void) { - if (!debugfs_create_file("bootrom", 0444, - NULL, NULL, &bootrom_file_ops)) { - pr_err("Failed to create bootrom debugfs file\n"); - - return -EINVAL; - } - + debugfs_create_file("bootrom", 0444, NULL, NULL, &bootrom_file_ops); return 0; } diff --git a/arch/mips/sgi-ip27/Makefile b/arch/mips/sgi-ip27/Makefile index 73502fda13ee..27c14ede191e 100644 --- a/arch/mips/sgi-ip27/Makefile +++ b/arch/mips/sgi-ip27/Makefile @@ -3,10 +3,9 @@ # Makefile for the IP27 specific kernel interface routines under Linux. # -obj-y := ip27-berr.o ip27-irq.o ip27-irqno.o ip27-init.o ip27-klconfig.o \ +obj-y := ip27-berr.o ip27-irq.o ip27-init.o ip27-klconfig.o \ ip27-klnuma.o ip27-memory.o ip27-nmi.o ip27-reset.o ip27-timer.o \ ip27-hubio.o ip27-xtalk.o obj-$(CONFIG_EARLY_PRINTK) += ip27-console.o -obj-$(CONFIG_PCI) += ip27-irq-pci.o obj-$(CONFIG_SMP) += ip27-smp.o diff --git a/arch/mips/sgi-ip27/ip27-hubio.c b/arch/mips/sgi-ip27/ip27-hubio.c index 2abe016a0ffc..68fb0cb89d9d 100644 --- a/arch/mips/sgi-ip27/ip27-hubio.c +++ b/arch/mips/sgi-ip27/ip27-hubio.c @@ -63,7 +63,7 @@ unsigned long hub_pio_map(cnodeid_t cnode, xwidgetnum_t widget, * after we write it. */ IIO_ITTE_PUT(nasid, i, HUB_PIO_MAP_TO_MEM, widget, xtalk_addr); - (void) HUB_L(IIO_ITTE_GET(nasid, i)); + __raw_readq(IIO_ITTE_GET(nasid, i)); return NODE_BWIN_BASE(nasid, widget) + (xtalk_addr % BWIN_SIZE); } @@ -135,7 +135,7 @@ static void hub_setup_prb(nasid_t nasid, int prbnum, int credits) **/ static void hub_set_piomode(nasid_t nasid) { - hubreg_t ii_iowa; + u64 ii_iowa; hubii_wcr_t ii_wcr; unsigned i; diff --git a/arch/mips/sgi-ip27/ip27-init.c b/arch/mips/sgi-ip27/ip27-init.c index e501c43c02db..6074efeff894 100644 --- a/arch/mips/sgi-ip27/ip27-init.c +++ b/arch/mips/sgi-ip27/ip27-init.c @@ -52,13 +52,10 @@ EXPORT_SYMBOL_GPL(sn_cpu_info); extern void pcibr_setup(cnodeid_t); -extern void xtalk_probe_node(cnodeid_t nid); - static void per_hub_init(cnodeid_t cnode) { struct hub_data *hub = hub_data(cnode); nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); - int i; cpumask_set_cpu(smp_processor_id(), &hub->h_cpus); @@ -71,7 +68,6 @@ static void per_hub_init(cnodeid_t cnode) REMOTE_HUB_S(nasid, IIO_ICTO, 0xff); hub_rtc_init(cnode); - xtalk_probe_node(cnode); #ifdef CONFIG_REPLICATE_EXHANDLERS /* @@ -90,24 +86,6 @@ static void per_hub_init(cnodeid_t cnode) __flush_cache_all(); } #endif - - /* - * Some interrupts are reserved by hardware or by software convention. - * Mark these as reserved right away so they won't be used accidentally - * later. - */ - for (i = 0; i <= BASE_PCI_IRQ; i++) { - __set_bit(i, hub->irq_alloc_mask); - LOCAL_HUB_CLR_INTR(INT_PEND0_BASELVL + i); - } - - __set_bit(IP_PEND0_6_63, hub->irq_alloc_mask); - LOCAL_HUB_S(PI_INT_PEND_MOD, IP_PEND0_6_63); - - for (i = NI_BRDCAST_ERR_A; i <= MSC_PANIC_INTR; i++) { - __set_bit(i, hub->irq_alloc_mask); - LOCAL_HUB_CLR_INTR(INT_PEND1_BASELVL + i); - } } void per_cpu_init(void) @@ -116,8 +94,6 @@ void per_cpu_init(void) int slice = LOCAL_HUB_L(PI_CPU_NUM); cnodeid_t cnode = get_compact_nodeid(); struct hub_data *hub = hub_data(cnode); - struct slice_data *si = hub->slice + slice; - int i; if (test_and_set_bit(slice, &hub->slice_map)) return; @@ -126,22 +102,14 @@ void per_cpu_init(void) per_hub_init(cnode); - for (i = 0; i < LEVELS_PER_SLICE; i++) - si->level_to_irq[i] = -1; - - /* - * We use this so we can find the local hub's data as fast as only - * possible. - */ - cpu_data[cpu].data = si; - cpu_time_init(); install_ipi(); /* Install our NMI handler if symmon hasn't installed one. */ install_cpu_nmi_handler(cputoslice(cpu)); - set_c0_status(SRB_DEV0 | SRB_DEV1); + enable_percpu_irq(IP27_HUB_PEND0_IRQ, IRQ_TYPE_NONE); + enable_percpu_irq(IP27_HUB_PEND1_IRQ, IRQ_TYPE_NONE); } /* @@ -177,7 +145,7 @@ extern void ip27_reboot_setup(void); void __init plat_mem_setup(void) { - hubreg_t p, e, n_mode; + u64 p, e, n_mode; nasid_t nid; ip27_reboot_setup(); @@ -215,7 +183,6 @@ void __init plat_mem_setup(void) #endif ioc3_eth_init(); - per_cpu_init(); set_io_port_base(IO_BASE); } diff --git a/arch/mips/sgi-ip27/ip27-irq-pci.c b/arch/mips/sgi-ip27/ip27-irq-pci.c deleted file mode 100644 index cd449e90b917..000000000000 --- a/arch/mips/sgi-ip27/ip27-irq-pci.c +++ /dev/null @@ -1,266 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * ip27-irq.c: Highlevel interrupt handling for IP27 architecture. - * - * Copyright (C) 1999, 2000 Ralf Baechle (ralf@gnu.org) - * Copyright (C) 1999, 2000 Silicon Graphics, Inc. - * Copyright (C) 1999 - 2001 Kanoj Sarcar - */ - -#undef DEBUG - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -/* - * Linux has a controller-independent x86 interrupt architecture. - * every controller has a 'controller-template', that is used - * by the main code to do the right thing. Each driver-visible - * interrupt source is transparently wired to the appropriate - * controller. Thus drivers need not be aware of the - * interrupt-controller. - * - * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC, - * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC. - * (IO-APICs assumed to be messaging to Pentium local-APICs) - * - * the code is designed to be easily extended with new/different - * interrupt controllers, without having to do assembly magic. - */ - -extern struct bridge_controller *irq_to_bridge[]; -extern int irq_to_slot[]; - -/* - * use these macros to get the encoded nasid and widget id - * from the irq value - */ -#define IRQ_TO_BRIDGE(i) irq_to_bridge[(i)] -#define SLOT_FROM_PCI_IRQ(i) irq_to_slot[i] - -static inline int alloc_level(int cpu, int irq) -{ - struct hub_data *hub = hub_data(cpu_to_node(cpu)); - struct slice_data *si = cpu_data[cpu].data; - int level; - - level = find_first_zero_bit(hub->irq_alloc_mask, LEVELS_PER_SLICE); - if (level >= LEVELS_PER_SLICE) - panic("Cpu %d flooded with devices", cpu); - - __set_bit(level, hub->irq_alloc_mask); - si->level_to_irq[level] = irq; - - return level; -} - -static inline int find_level(cpuid_t *cpunum, int irq) -{ - int cpu, i; - - for_each_online_cpu(cpu) { - struct slice_data *si = cpu_data[cpu].data; - - for (i = BASE_PCI_IRQ; i < LEVELS_PER_SLICE; i++) - if (si->level_to_irq[i] == irq) { - *cpunum = cpu; - - return i; - } - } - - panic("Could not identify cpu/level for irq %d", irq); -} - -static int intr_connect_level(int cpu, int bit) -{ - nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); - struct slice_data *si = cpu_data[cpu].data; - - set_bit(bit, si->irq_enable_mask); - - if (!cputoslice(cpu)) { - REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]); - REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]); - } else { - REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]); - REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]); - } - - return 0; -} - -static int intr_disconnect_level(int cpu, int bit) -{ - nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); - struct slice_data *si = cpu_data[cpu].data; - - clear_bit(bit, si->irq_enable_mask); - - if (!cputoslice(cpu)) { - REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]); - REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]); - } else { - REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]); - REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]); - } - - return 0; -} - -/* Startup one of the (PCI ...) IRQs routes over a bridge. */ -static unsigned int startup_bridge_irq(struct irq_data *d) -{ - struct bridge_controller *bc; - bridgereg_t device; - bridge_t *bridge; - int pin, swlevel; - cpuid_t cpu; - - pin = SLOT_FROM_PCI_IRQ(d->irq); - bc = IRQ_TO_BRIDGE(d->irq); - bridge = bc->base; - - pr_debug("bridge_startup(): irq= 0x%x pin=%d\n", d->irq, pin); - /* - * "map" irq to a swlevel greater than 6 since the first 6 bits - * of INT_PEND0 are taken - */ - swlevel = find_level(&cpu, d->irq); - bridge->b_int_addr[pin].addr = (0x20000 | swlevel | (bc->nasid << 8)); - bridge->b_int_enable |= (1 << pin); - bridge->b_int_enable |= 0x7ffffe00; /* more stuff in int_enable */ - - /* - * Enable sending of an interrupt clear packt to the hub on a high to - * low transition of the interrupt pin. - * - * IRIX sets additional bits in the address which are documented as - * reserved in the bridge docs. - */ - bridge->b_int_mode |= (1UL << pin); - - /* - * We assume the bridge to have a 1:1 mapping between devices - * (slots) and intr pins. - */ - device = bridge->b_int_device; - device &= ~(7 << (pin*3)); - device |= (pin << (pin*3)); - bridge->b_int_device = device; - - bridge->b_wid_tflush; - - intr_connect_level(cpu, swlevel); - - return 0; /* Never anything pending. */ -} - -/* Shutdown one of the (PCI ...) IRQs routes over a bridge. */ -static void shutdown_bridge_irq(struct irq_data *d) -{ - struct bridge_controller *bc = IRQ_TO_BRIDGE(d->irq); - bridge_t *bridge = bc->base; - int pin, swlevel; - cpuid_t cpu; - - pr_debug("bridge_shutdown: irq 0x%x\n", d->irq); - pin = SLOT_FROM_PCI_IRQ(d->irq); - - /* - * map irq to a swlevel greater than 6 since the first 6 bits - * of INT_PEND0 are taken - */ - swlevel = find_level(&cpu, d->irq); - intr_disconnect_level(cpu, swlevel); - - bridge->b_int_enable &= ~(1 << pin); - bridge->b_wid_tflush; -} - -static inline void enable_bridge_irq(struct irq_data *d) -{ - cpuid_t cpu; - int swlevel; - - swlevel = find_level(&cpu, d->irq); /* Criminal offence */ - intr_connect_level(cpu, swlevel); -} - -static inline void disable_bridge_irq(struct irq_data *d) -{ - cpuid_t cpu; - int swlevel; - - swlevel = find_level(&cpu, d->irq); /* Criminal offence */ - intr_disconnect_level(cpu, swlevel); -} - -static struct irq_chip bridge_irq_type = { - .name = "bridge", - .irq_startup = startup_bridge_irq, - .irq_shutdown = shutdown_bridge_irq, - .irq_mask = disable_bridge_irq, - .irq_unmask = enable_bridge_irq, -}; - -void register_bridge_irq(unsigned int irq) -{ - irq_set_chip_and_handler(irq, &bridge_irq_type, handle_level_irq); -} - -int request_bridge_irq(struct bridge_controller *bc) -{ - int irq = allocate_irqno(); - int swlevel, cpu; - nasid_t nasid; - - if (irq < 0) - return irq; - - /* - * "map" irq to a swlevel greater than 6 since the first 6 bits - * of INT_PEND0 are taken - */ - cpu = bc->irq_cpu; - swlevel = alloc_level(cpu, irq); - if (unlikely(swlevel < 0)) { - free_irqno(irq); - - return -EAGAIN; - } - - /* Make sure it's not already pending when we connect it. */ - nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); - REMOTE_HUB_CLR_INTR(nasid, swlevel); - - intr_connect_level(cpu, swlevel); - - register_bridge_irq(irq); - - return irq; -} diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c index 0dde6164a06f..710a59764b01 100644 --- a/arch/mips/sgi-ip27/ip27-irq.c +++ b/arch/mips/sgi-ip27/ip27-irq.c @@ -7,67 +7,234 @@ * Copyright (C) 1999 - 2001 Kanoj Sarcar */ -#undef DEBUG - -#include -#include -#include -#include -#include -#include #include +#include #include -#include -#include -#include #include -#include -#include #include -#include #include -#include - -#include +#include +#include #include #include #include #include #include -/* - * Linux has a controller-independent x86 interrupt architecture. - * every controller has a 'controller-template', that is used - * by the main code to do the right thing. Each driver-visible - * interrupt source is transparently wired to the appropriate - * controller. Thus drivers need not be aware of the - * interrupt-controller. - * - * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC, - * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC. - * (IO-APICs assumed to be messaging to Pentium local-APICs) - * - * the code is designed to be easily extended with new/different - * interrupt controllers, without having to do assembly magic. - */ +struct hub_irq_data { + struct bridge_controller *bc; + u64 *irq_mask[2]; + cpuid_t cpu; + int bit; + int pin; +}; -extern asmlinkage void ip27_irq(void); +static DECLARE_BITMAP(hub_irq_map, IP27_HUB_IRQ_COUNT); -/* - * Find first bit set - */ -static int ms1bit(unsigned long x) +static DEFINE_PER_CPU(unsigned long [2], irq_enable_mask); + +static inline int alloc_level(void) +{ + int level; + +again: + level = find_first_zero_bit(hub_irq_map, IP27_HUB_IRQ_COUNT); + if (level >= IP27_HUB_IRQ_COUNT) + return -ENOSPC; + + if (test_and_set_bit(level, hub_irq_map)) + goto again; + + return level; +} + +static void enable_hub_irq(struct irq_data *d) +{ + struct hub_irq_data *hd = irq_data_get_irq_chip_data(d); + unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu); + + set_bit(hd->bit, mask); + __raw_writeq(mask[0], hd->irq_mask[0]); + __raw_writeq(mask[1], hd->irq_mask[1]); +} + +static void disable_hub_irq(struct irq_data *d) +{ + struct hub_irq_data *hd = irq_data_get_irq_chip_data(d); + unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu); + + clear_bit(hd->bit, mask); + __raw_writeq(mask[0], hd->irq_mask[0]); + __raw_writeq(mask[1], hd->irq_mask[1]); +} + +static unsigned int startup_bridge_irq(struct irq_data *d) +{ + struct hub_irq_data *hd = irq_data_get_irq_chip_data(d); + struct bridge_controller *bc; + nasid_t nasid; + u32 device; + int pin; + + if (!hd) + return -EINVAL; + + pin = hd->pin; + bc = hd->bc; + + nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(hd->cpu)); + bridge_write(bc, b_int_addr[pin].addr, + (0x20000 | hd->bit | (nasid << 8))); + bridge_set(bc, b_int_enable, (1 << pin)); + bridge_set(bc, b_int_enable, 0x7ffffe00); /* more stuff in int_enable */ + + /* + * Enable sending of an interrupt clear packt to the hub on a high to + * low transition of the interrupt pin. + * + * IRIX sets additional bits in the address which are documented as + * reserved in the bridge docs. + */ + bridge_set(bc, b_int_mode, (1UL << pin)); + + /* + * We assume the bridge to have a 1:1 mapping between devices + * (slots) and intr pins. + */ + device = bridge_read(bc, b_int_device); + device &= ~(7 << (pin*3)); + device |= (pin << (pin*3)); + bridge_write(bc, b_int_device, device); + + bridge_read(bc, b_wid_tflush); + + enable_hub_irq(d); + + return 0; /* Never anything pending. */ +} + +static void shutdown_bridge_irq(struct irq_data *d) +{ + struct hub_irq_data *hd = irq_data_get_irq_chip_data(d); + struct bridge_controller *bc; + int pin = hd->pin; + + if (!hd) + return; + + disable_hub_irq(d); + + bc = hd->bc; + bridge_clr(bc, b_int_enable, (1 << pin)); + bridge_read(bc, b_wid_tflush); +} + +static void setup_hub_mask(struct hub_irq_data *hd, const struct cpumask *mask) +{ + nasid_t nasid; + int cpu; + + cpu = cpumask_first_and(mask, cpu_online_mask); + nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); + hd->cpu = cpu; + if (!cputoslice(cpu)) { + hd->irq_mask[0] = REMOTE_HUB_PTR(nasid, PI_INT_MASK0_A); + hd->irq_mask[1] = REMOTE_HUB_PTR(nasid, PI_INT_MASK1_A); + } else { + hd->irq_mask[0] = REMOTE_HUB_PTR(nasid, PI_INT_MASK0_B); + hd->irq_mask[1] = REMOTE_HUB_PTR(nasid, PI_INT_MASK1_B); + } + + /* Make sure it's not already pending when we connect it. */ + REMOTE_HUB_CLR_INTR(nasid, hd->bit); +} + +static int set_affinity_hub_irq(struct irq_data *d, const struct cpumask *mask, + bool force) +{ + struct hub_irq_data *hd = irq_data_get_irq_chip_data(d); + + if (!hd) + return -EINVAL; + + if (irqd_is_started(d)) + disable_hub_irq(d); + + setup_hub_mask(hd, mask); + + if (irqd_is_started(d)) + startup_bridge_irq(d); + + irq_data_update_effective_affinity(d, cpumask_of(hd->cpu)); + + return 0; +} + +static struct irq_chip hub_irq_type = { + .name = "HUB", + .irq_startup = startup_bridge_irq, + .irq_shutdown = shutdown_bridge_irq, + .irq_mask = disable_hub_irq, + .irq_unmask = enable_hub_irq, + .irq_set_affinity = set_affinity_hub_irq, +}; + +int request_bridge_irq(struct bridge_controller *bc, int pin) { - int b = 0, s; + struct hub_irq_data *hd; + struct hub_data *hub; + struct irq_desc *desc; + int swlevel; + int irq; + + hd = kzalloc(sizeof(*hd), GFP_KERNEL); + if (!hd) + return -ENOMEM; + + swlevel = alloc_level(); + if (unlikely(swlevel < 0)) { + kfree(hd); + return -EAGAIN; + } + irq = swlevel + IP27_HUB_IRQ_BASE; + + hd->bc = bc; + hd->bit = swlevel; + hd->pin = pin; + irq_set_chip_data(irq, hd); + + /* use CPU connected to nearest hub */ + hub = hub_data(NASID_TO_COMPACT_NODEID(bc->nasid)); + setup_hub_mask(hd, &hub->h_cpus); - s = 16; if (x >> 16 == 0) s = 0; b += s; x >>= s; - s = 8; if (x >> 8 == 0) s = 0; b += s; x >>= s; - s = 4; if (x >> 4 == 0) s = 0; b += s; x >>= s; - s = 2; if (x >> 2 == 0) s = 0; b += s; x >>= s; - s = 1; if (x >> 1 == 0) s = 0; b += s; + desc = irq_to_desc(irq); + desc->irq_common_data.node = bc->nasid; + cpumask_copy(desc->irq_common_data.affinity, &hub->h_cpus); - return b; + return irq; +} + +void ip27_hub_irq_init(void) +{ + int i; + + for (i = IP27_HUB_IRQ_BASE; + i < (IP27_HUB_IRQ_BASE + IP27_HUB_IRQ_COUNT); i++) + irq_set_chip_and_handler(i, &hub_irq_type, handle_level_irq); + + /* + * Some interrupts are reserved by hardware or by software convention. + * Mark these as reserved right away so they won't be used accidentally + * later. + */ + for (i = 0; i <= BASE_PCI_IRQ; i++) + set_bit(i, hub_irq_map); + + set_bit(IP_PEND0_6_63, hub_irq_map); + + for (i = NI_BRDCAST_ERR_A; i <= MSC_PANIC_INTR; i++) + set_bit(i, hub_irq_map); } /* @@ -82,23 +249,19 @@ static int ms1bit(unsigned long x) * Kanoj 05.13.00 */ -static void ip27_do_irq_mask0(void) +static void ip27_do_irq_mask0(struct irq_desc *desc) { - int irq, swlevel; - hubreg_t pend0, mask0; cpuid_t cpu = smp_processor_id(); - int pi_int_mask0 = - (cputoslice(cpu) == 0) ? PI_INT_MASK0_A : PI_INT_MASK0_B; + unsigned long *mask = per_cpu(irq_enable_mask, cpu); + u64 pend0; /* copied from Irix intpend0() */ pend0 = LOCAL_HUB_L(PI_INT_PEND0); - mask0 = LOCAL_HUB_L(pi_int_mask0); - pend0 &= mask0; /* Pick intrs we should look at */ + pend0 &= mask[0]; /* Pick intrs we should look at */ if (!pend0) return; - swlevel = ms1bit(pend0); #ifdef CONFIG_SMP if (pend0 & (1UL << CPU_RESCHED_A_IRQ)) { LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ); @@ -108,106 +271,66 @@ static void ip27_do_irq_mask0(void) scheduler_ipi(); } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) { LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ); - irq_enter(); generic_smp_call_function_interrupt(); - irq_exit(); } else if (pend0 & (1UL << CPU_CALL_B_IRQ)) { LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ); - irq_enter(); generic_smp_call_function_interrupt(); - irq_exit(); } else #endif - { - /* "map" swlevel to irq */ - struct slice_data *si = cpu_data[cpu].data; - - irq = si->level_to_irq[swlevel]; - do_IRQ(irq); - } + generic_handle_irq(__ffs(pend0) + IP27_HUB_IRQ_BASE); LOCAL_HUB_L(PI_INT_PEND0); } -static void ip27_do_irq_mask1(void) +static void ip27_do_irq_mask1(struct irq_desc *desc) { - int irq, swlevel; - hubreg_t pend1, mask1; cpuid_t cpu = smp_processor_id(); - int pi_int_mask1 = (cputoslice(cpu) == 0) ? PI_INT_MASK1_A : PI_INT_MASK1_B; - struct slice_data *si = cpu_data[cpu].data; + unsigned long *mask = per_cpu(irq_enable_mask, cpu); + u64 pend1; /* copied from Irix intpend0() */ pend1 = LOCAL_HUB_L(PI_INT_PEND1); - mask1 = LOCAL_HUB_L(pi_int_mask1); - pend1 &= mask1; /* Pick intrs we should look at */ + pend1 &= mask[1]; /* Pick intrs we should look at */ if (!pend1) return; - swlevel = ms1bit(pend1); - /* "map" swlevel to irq */ - irq = si->level_to_irq[swlevel]; - LOCAL_HUB_CLR_INTR(swlevel); - do_IRQ(irq); + generic_handle_irq(__ffs(pend1) + IP27_HUB_IRQ_BASE + 64); LOCAL_HUB_L(PI_INT_PEND1); } -static void ip27_prof_timer(void) -{ - panic("CPU %d got a profiling interrupt", smp_processor_id()); -} - -static void ip27_hub_error(void) -{ - panic("CPU %d got a hub error interrupt", smp_processor_id()); -} - -asmlinkage void plat_irq_dispatch(void) -{ - unsigned long pending = read_c0_cause() & read_c0_status(); - extern unsigned int rt_timer_irq; - - if (pending & CAUSEF_IP4) - do_IRQ(rt_timer_irq); - else if (pending & CAUSEF_IP2) /* PI_INT_PEND_0 or CC_PEND_{A|B} */ - ip27_do_irq_mask0(); - else if (pending & CAUSEF_IP3) /* PI_INT_PEND_1 */ - ip27_do_irq_mask1(); - else if (pending & CAUSEF_IP5) - ip27_prof_timer(); - else if (pending & CAUSEF_IP6) - ip27_hub_error(); -} - -void __init arch_init_irq(void) -{ -} - void install_ipi(void) { - int slice = LOCAL_HUB_L(PI_CPU_NUM); int cpu = smp_processor_id(); - struct slice_data *si = cpu_data[cpu].data; - struct hub_data *hub = hub_data(cpu_to_node(cpu)); + unsigned long *mask = per_cpu(irq_enable_mask, cpu); + int slice = LOCAL_HUB_L(PI_CPU_NUM); int resched, call; resched = CPU_RESCHED_A_IRQ + slice; - __set_bit(resched, hub->irq_alloc_mask); - __set_bit(resched, si->irq_enable_mask); + set_bit(resched, mask); LOCAL_HUB_CLR_INTR(resched); call = CPU_CALL_A_IRQ + slice; - __set_bit(call, hub->irq_alloc_mask); - __set_bit(call, si->irq_enable_mask); + set_bit(call, mask); LOCAL_HUB_CLR_INTR(call); if (slice == 0) { - LOCAL_HUB_S(PI_INT_MASK0_A, si->irq_enable_mask[0]); - LOCAL_HUB_S(PI_INT_MASK1_A, si->irq_enable_mask[1]); + LOCAL_HUB_S(PI_INT_MASK0_A, mask[0]); + LOCAL_HUB_S(PI_INT_MASK1_A, mask[1]); } else { - LOCAL_HUB_S(PI_INT_MASK0_B, si->irq_enable_mask[0]); - LOCAL_HUB_S(PI_INT_MASK1_B, si->irq_enable_mask[1]); + LOCAL_HUB_S(PI_INT_MASK0_B, mask[0]); + LOCAL_HUB_S(PI_INT_MASK1_B, mask[1]); } } + +void __init arch_init_irq(void) +{ + mips_cpu_irq_init(); + ip27_hub_irq_init(); + + irq_set_percpu_devid(IP27_HUB_PEND0_IRQ); + irq_set_chained_handler(IP27_HUB_PEND0_IRQ, ip27_do_irq_mask0); + irq_set_percpu_devid(IP27_HUB_PEND1_IRQ); + irq_set_chained_handler(IP27_HUB_PEND1_IRQ, ip27_do_irq_mask1); +} diff --git a/arch/mips/sgi-ip27/ip27-irqno.c b/arch/mips/sgi-ip27/ip27-irqno.c deleted file mode 100644 index 957ab58e1c00..000000000000 --- a/arch/mips/sgi-ip27/ip27-irqno.c +++ /dev/null @@ -1,48 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#include -#include -#include - -#include - -static DECLARE_BITMAP(irq_map, NR_IRQS); - -int allocate_irqno(void) -{ - int irq; - -again: - irq = find_first_zero_bit(irq_map, NR_IRQS); - - if (irq >= NR_IRQS) - return -ENOSPC; - - if (test_and_set_bit(irq, irq_map)) - goto again; - - return irq; -} - -/* - * Allocate the 16 legacy interrupts for i8259 devices. This happens early - * in the kernel initialization so treating allocation failure as BUG() is - * ok. - */ -void __init alloc_legacy_irqno(void) -{ - int i; - - for (i = 0; i <= 16; i++) - BUG_ON(test_and_set_bit(i, irq_map)); -} - -void free_irqno(unsigned int irq) -{ - smp_mb__before_atomic(); - clear_bit(irq, irq_map); - smp_mb__after_atomic(); -} diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c index 813d13f92957..fb077a947575 100644 --- a/arch/mips/sgi-ip27/ip27-memory.c +++ b/arch/mips/sgi-ip27/ip27-memory.c @@ -44,7 +44,7 @@ static int is_fine_dirmode(void) return ((LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_REGIONSIZE_MASK) >> NSRI_REGIONSIZE_SHFT) & REGIONSIZE_FINE; } -static hubreg_t get_region(cnodeid_t cnode) +static u64 get_region(cnodeid_t cnode) { if (fine_mode) return COMPACT_TO_NASID_NODEID(cnode) >> NASID_TO_FINEREG_SHFT; @@ -52,9 +52,9 @@ static hubreg_t get_region(cnodeid_t cnode) return COMPACT_TO_NASID_NODEID(cnode) >> NASID_TO_COARSEREG_SHFT; } -static hubreg_t region_mask; +static u64 region_mask; -static void gen_region_mask(hubreg_t *region_mask) +static void gen_region_mask(u64 *region_mask) { cnodeid_t cnode; @@ -154,11 +154,11 @@ static int __init compute_node_distance(nasid_t nasid_a, nasid_t nasid_b) } if (router_a == NULL) { - printk("node_distance: router_a NULL\n"); + pr_info("node_distance: router_a NULL\n"); return -1; } if (router_b == NULL) { - printk("node_distance: router_b NULL\n"); + pr_info("node_distance: router_b NULL\n"); return -1; } @@ -203,17 +203,17 @@ static void __init dump_topology(void) klrou_t *router; cnodeid_t row, col; - printk("************** Topology ********************\n"); + pr_info("************** Topology ********************\n"); - printk(" "); + pr_info(" "); for_each_online_node(col) - printk("%02d ", col); - printk("\n"); + pr_cont("%02d ", col); + pr_cont("\n"); for_each_online_node(row) { - printk("%02d ", row); + pr_info("%02d ", row); for_each_online_node(col) - printk("%2d ", node_distance(row, col)); - printk("\n"); + pr_cont("%2d ", node_distance(row, col)); + pr_cont("\n"); } for_each_online_node(cnode) { @@ -230,7 +230,7 @@ static void __init dump_topology(void) do { if (brd->brd_flags & DUPLICATE_BOARD) continue; - printk("Router %d:", router_num); + pr_cont("Router %d:", router_num); router_num++; router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]); @@ -244,11 +244,11 @@ static void __init dump_topology(void) router->rou_port[port].port_offset); if (dest_brd->brd_type == KLTYPE_IP27) - printk(" %d", dest_brd->brd_nasid); + pr_cont(" %d", dest_brd->brd_nasid); if (dest_brd->brd_type == KLTYPE_ROUTER) - printk(" r"); + pr_cont(" r"); } - printk("\n"); + pr_cont("\n"); } while ( (brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER)) ); } @@ -373,7 +373,7 @@ static void __init szmem(void) if ((nodebytes >> PAGE_SHIFT) * (sizeof(struct page)) > (slot0sz << PAGE_SHIFT)) { - printk("Ignoring slot %d onwards on node %d\n", + pr_info("Ignoring slot %d onwards on node %d\n", slot, node); slot = MAX_MEM_SLOTS; continue; diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c index 8ac2bfa35fb6..3aae388561d9 100644 --- a/arch/mips/sgi-ip27/ip27-nmi.c +++ b/arch/mips/sgi-ip27/ip27-nmi.c @@ -62,75 +62,75 @@ void nmi_cpu_eframe_save(nasid_t nasid, int slice) (TO_UNCAC(TO_NODE(nasid, IP27_NMI_KREGS_OFFSET)) + slice * IP27_NMI_KREGS_CPU_SIZE); - printk("NMI nasid %d: slice %d\n", nasid, slice); + pr_emerg("NMI nasid %d: slice %d\n", nasid, slice); /* * Saved main processor registers */ for (i = 0; i < 32; ) { if ((i % 4) == 0) - printk("$%2d :", i); - printk(" %016lx", nr->gpr[i]); + pr_emerg("$%2d :", i); + pr_cont(" %016lx", nr->gpr[i]); i++; if ((i % 4) == 0) - printk("\n"); + pr_cont("\n"); } - printk("Hi : (value lost)\n"); - printk("Lo : (value lost)\n"); + pr_emerg("Hi : (value lost)\n"); + pr_emerg("Lo : (value lost)\n"); /* * Saved cp0 registers */ - printk("epc : %016lx %pS\n", nr->epc, (void *) nr->epc); - printk("%s\n", print_tainted()); - printk("ErrEPC: %016lx %pS\n", nr->error_epc, (void *) nr->error_epc); - printk("ra : %016lx %pS\n", nr->gpr[31], (void *) nr->gpr[31]); - printk("Status: %08lx ", nr->sr); + pr_emerg("epc : %016lx %pS\n", nr->epc, (void *)nr->epc); + pr_emerg("%s\n", print_tainted()); + pr_emerg("ErrEPC: %016lx %pS\n", nr->error_epc, (void *)nr->error_epc); + pr_emerg("ra : %016lx %pS\n", nr->gpr[31], (void *)nr->gpr[31]); + pr_emerg("Status: %08lx ", nr->sr); if (nr->sr & ST0_KX) - printk("KX "); + pr_cont("KX "); if (nr->sr & ST0_SX) - printk("SX "); + pr_cont("SX "); if (nr->sr & ST0_UX) - printk("UX "); + pr_cont("UX "); switch (nr->sr & ST0_KSU) { case KSU_USER: - printk("USER "); + pr_cont("USER "); break; case KSU_SUPERVISOR: - printk("SUPERVISOR "); + pr_cont("SUPERVISOR "); break; case KSU_KERNEL: - printk("KERNEL "); + pr_cont("KERNEL "); break; default: - printk("BAD_MODE "); + pr_cont("BAD_MODE "); break; } if (nr->sr & ST0_ERL) - printk("ERL "); + pr_cont("ERL "); if (nr->sr & ST0_EXL) - printk("EXL "); + pr_cont("EXL "); if (nr->sr & ST0_IE) - printk("IE "); - printk("\n"); + pr_cont("IE "); + pr_cont("\n"); - printk("Cause : %08lx\n", nr->cause); - printk("PrId : %08x\n", read_c0_prid()); - printk("BadVA : %016lx\n", nr->badva); - printk("CErr : %016lx\n", nr->cache_err); - printk("NMI_SR: %016lx\n", nr->nmi_sr); + pr_emerg("Cause : %08lx\n", nr->cause); + pr_emerg("PrId : %08x\n", read_c0_prid()); + pr_emerg("BadVA : %016lx\n", nr->badva); + pr_emerg("CErr : %016lx\n", nr->cache_err); + pr_emerg("NMI_SR: %016lx\n", nr->nmi_sr); - printk("\n"); + pr_emerg("\n"); } void nmi_dump_hub_irq(nasid_t nasid, int slice) { - hubreg_t mask0, mask1, pend0, pend1; + u64 mask0, mask1, pend0, pend1; if (slice == 0) { /* Slice A */ mask0 = REMOTE_HUB_L(nasid, PI_INT_MASK0_A); @@ -143,9 +143,9 @@ void nmi_dump_hub_irq(nasid_t nasid, int slice) pend0 = REMOTE_HUB_L(nasid, PI_INT_PEND0); pend1 = REMOTE_HUB_L(nasid, PI_INT_PEND1); - printk("PI_INT_MASK0: %16Lx PI_INT_MASK1: %16Lx\n", mask0, mask1); - printk("PI_INT_PEND0: %16Lx PI_INT_PEND1: %16Lx\n", pend0, pend1); - printk("\n\n"); + pr_emerg("PI_INT_MASK0: %16llx PI_INT_MASK1: %16llx\n", mask0, mask1); + pr_emerg("PI_INT_PEND0: %16llx PI_INT_PEND1: %16llx\n", pend0, pend1); + pr_emerg("\n\n"); } /* diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c index 545446dfe7fa..20b81209c6b8 100644 --- a/arch/mips/sgi-ip27/ip27-smp.c +++ b/arch/mips/sgi-ip27/ip27-smp.c @@ -177,7 +177,7 @@ static void ip27_send_ipi_mask(const struct cpumask *mask, unsigned int action) ip27_send_ipi_single(i, action); } -static void ip27_init_secondary(void) +static void ip27_init_cpu(void) { per_cpu_init(); } @@ -235,9 +235,10 @@ static void __init ip27_prepare_cpus(unsigned int max_cpus) const struct plat_smp_ops ip27_smp_ops = { .send_ipi_single = ip27_send_ipi_single, .send_ipi_mask = ip27_send_ipi_mask, - .init_secondary = ip27_init_secondary, + .init_secondary = ip27_init_cpu, .smp_finish = ip27_smp_finish, .boot_secondary = ip27_boot_secondary, .smp_setup = ip27_smp_setup, .prepare_cpus = ip27_prepare_cpus, + .prepare_boot_cpu = ip27_init_cpu, }; diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c index 9d55247533a5..9b4b9ac621a3 100644 --- a/arch/mips/sgi-ip27/ip27-timer.c +++ b/arch/mips/sgi-ip27/ip27-timer.c @@ -38,20 +38,6 @@ #include #include -static void enable_rt_irq(struct irq_data *d) -{ -} - -static void disable_rt_irq(struct irq_data *d) -{ -} - -static struct irq_chip rt_irq_type = { - .name = "SN HUB RT timer", - .irq_mask = disable_rt_irq, - .irq_unmask = enable_rt_irq, -}; - static int rt_next_event(unsigned long delta, struct clock_event_device *evt) { unsigned int cpu = smp_processor_id(); @@ -65,8 +51,6 @@ static int rt_next_event(unsigned long delta, struct clock_event_device *evt) return LOCAL_HUB_L(PI_RT_COUNT) >= cnt ? -ETIME : 0; } -unsigned int rt_timer_irq; - static DEFINE_PER_CPU(struct clock_event_device, hub_rt_clockevent); static DEFINE_PER_CPU(char [11], hub_rt_name); @@ -87,6 +71,7 @@ static irqreturn_t hub_rt_counter_handler(int irq, void *dev_id) struct irqaction hub_rt_irqaction = { .handler = hub_rt_counter_handler, + .percpu_dev_id = &hub_rt_clockevent, .flags = IRQF_PERCPU | IRQF_TIMER, .name = "hub-rt", }; @@ -107,7 +92,6 @@ void hub_rt_clock_event_init(void) unsigned int cpu = smp_processor_id(); struct clock_event_device *cd = &per_cpu(hub_rt_clockevent, cpu); unsigned char *name = per_cpu(hub_rt_name, cpu); - int irq = rt_timer_irq; sprintf(name, "hub-rt %d", cpu); cd->name = name; @@ -118,29 +102,19 @@ void hub_rt_clock_event_init(void) cd->min_delta_ns = clockevent_delta2ns(0x300, cd); cd->min_delta_ticks = 0x300; cd->rating = 200; - cd->irq = irq; + cd->irq = IP27_RT_TIMER_IRQ; cd->cpumask = cpumask_of(cpu); cd->set_next_event = rt_next_event; clockevents_register_device(cd); + + enable_percpu_irq(IP27_RT_TIMER_IRQ, IRQ_TYPE_NONE); } static void __init hub_rt_clock_event_global_init(void) { - int irq; - - do { - smp_wmb(); - irq = rt_timer_irq; - if (irq) - break; - - irq = allocate_irqno(); - if (irq < 0) - panic("Allocation of irq number for timer failed"); - } while (xchg(&rt_timer_irq, irq)); - - irq_set_chip_and_handler(irq, &rt_irq_type, handle_percpu_irq); - setup_irq(irq, &hub_rt_irqaction); + irq_set_handler(IP27_RT_TIMER_IRQ, handle_percpu_devid_irq); + irq_set_percpu_devid(IP27_RT_TIMER_IRQ); + setup_percpu_irq(IP27_RT_TIMER_IRQ, &hub_rt_irqaction); } static u64 hub_rt_read(struct clocksource *cs) @@ -194,8 +168,6 @@ void cpu_time_init(void) panic("No information about myself?"); printk("CPU %d clock is %dMHz.\n", smp_processor_id(), cpu->cpu_speed); - - set_c0_status(SRB_TIMOCLK); } void hub_rtc_init(cnodeid_t cnode) diff --git a/arch/mips/sgi-ip27/ip27-xtalk.c b/arch/mips/sgi-ip27/ip27-xtalk.c index 4fe5678ba74d..ce06aaa115ae 100644 --- a/arch/mips/sgi-ip27/ip27-xtalk.c +++ b/arch/mips/sgi-ip27/ip27-xtalk.c @@ -99,7 +99,7 @@ static int xbow_probe(nasid_t nasid) return 0; } -void xtalk_probe_node(cnodeid_t nid) +static void xtalk_probe_node(cnodeid_t nid) { volatile u64 hubreg; nasid_t nasid; @@ -133,3 +133,14 @@ void xtalk_probe_node(cnodeid_t nid) break; } } + +static int __init xtalk_init(void) +{ + cnodeid_t cnode; + + for_each_online_node(cnode) + xtalk_probe_node(cnode); + + return 0; +} +arch_initcall(xtalk_init); diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile index f6fd340e39c2..0ede4deb8181 100644 --- a/arch/mips/vdso/Makefile +++ b/arch/mips/vdso/Makefile @@ -8,6 +8,7 @@ ccflags-vdso := \ $(filter -E%,$(KBUILD_CFLAGS)) \ $(filter -mmicromips,$(KBUILD_CFLAGS)) \ $(filter -march=%,$(KBUILD_CFLAGS)) \ + $(filter -m%-float,$(KBUILD_CFLAGS)) \ -D__VDSO__ ifdef CONFIG_CC_IS_CLANG @@ -129,7 +130,7 @@ $(obj)/%-o32.o: $(src)/%.c FORCE $(call cmd,force_checksrc) $(call if_changed_rule,cc_o_c) -$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := -mabi=32 +$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=32 $(obj)/vdso-o32.lds: $(src)/vdso.lds.S FORCE $(call if_changed_dep,cpp_lds_S) @@ -169,7 +170,7 @@ $(obj)/%-n32.o: $(src)/%.c FORCE $(call cmd,force_checksrc) $(call if_changed_rule,cc_o_c) -$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := -mabi=n32 +$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=n32 $(obj)/vdso-n32.lds: $(src)/vdso.lds.S FORCE $(call if_changed_dep,cpp_lds_S) diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig index dda1906bba11..addb7f5f5264 100644 --- a/arch/nds32/Kconfig +++ b/arch/nds32/Kconfig @@ -5,6 +5,7 @@ config NDS32 def_bool y + select ARCH_32BIT_OFF_T select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_WANT_FRAME_POINTERS if FTRACE diff --git a/arch/nds32/configs/defconfig b/arch/nds32/configs/defconfig index 2546d8770785..65ce9259081b 100644 --- a/arch/nds32/configs/defconfig +++ b/arch/nds32/configs/defconfig @@ -74,7 +74,7 @@ CONFIG_GENERIC_PHY=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y -CONFIG_EXT4_ENCRYPTION=y +CONFIG_FS_ENCRYPTION=y CONFIG_FUSE_FS=y CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y diff --git a/arch/nds32/include/asm/uaccess.h b/arch/nds32/include/asm/uaccess.h index 53dcb49b0b12..116598b47c4d 100644 --- a/arch/nds32/include/asm/uaccess.h +++ b/arch/nds32/include/asm/uaccess.h @@ -37,7 +37,6 @@ extern int fixup_exception(struct pt_regs *regs); #define KERNEL_DS ((mm_segment_t) { ~0UL }) #define USER_DS ((mm_segment_t) {TASK_SIZE - 1}) -#define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) #define user_addr_max get_fs diff --git a/arch/nds32/include/uapi/asm/unistd.h b/arch/nds32/include/uapi/asm/unistd.h index c2c3a3e34083..4ec8f543103f 100644 --- a/arch/nds32/include/uapi/asm/unistd.h +++ b/arch/nds32/include/uapi/asm/unistd.h @@ -3,6 +3,8 @@ #define __ARCH_WANT_STAT64 #define __ARCH_WANT_SYNC_FILE_RANGE2 +#define __ARCH_WANT_SET_GET_RLIMIT +#define __ARCH_WANT_TIME32_SYSCALLS /* Use the standard ABI for syscalls */ #include diff --git a/arch/nds32/kernel/process.c b/arch/nds32/kernel/process.c index ab7ab46234b1..9712fd474f2c 100644 --- a/arch/nds32/kernel/process.c +++ b/arch/nds32/kernel/process.c @@ -121,7 +121,7 @@ void show_regs(struct pt_regs *regs) regs->uregs[3], regs->uregs[2], regs->uregs[1], regs->uregs[0]); pr_info(" IRQs o%s Segment %s\n", interrupts_enabled(regs) ? "n" : "ff", - segment_eq(get_fs(), get_ds())? "kernel" : "user"); + segment_eq(get_fs(), KERNEL_DS)? "kernel" : "user"); } EXPORT_SYMBOL(show_regs); diff --git a/arch/nds32/mm/init.c b/arch/nds32/mm/init.c index 253f79fc7196..1d03633f89a9 100644 --- a/arch/nds32/mm/init.c +++ b/arch/nds32/mm/init.c @@ -78,8 +78,10 @@ static void __init map_ram(void) } /* Alloc one page for holding PTE's... */ - pte = (pte_t *) __va(memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE)); - memset(pte, 0, PAGE_SIZE); + pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + if (!pte) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", + __func__, PAGE_SIZE, PAGE_SIZE); set_pmd(pme, __pmd(__pa(pte) + _PAGE_KERNEL_TABLE)); /* Fill the newly allocated page with PTE'S */ @@ -111,8 +113,10 @@ static void __init fixedrange_init(void) pgd = swapper_pg_dir + pgd_index(vaddr); pud = pud_offset(pgd, vaddr); pmd = pmd_offset(pud, vaddr); - fixmap_pmd_p = (pmd_t *) __va(memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE)); - memset(fixmap_pmd_p, 0, PAGE_SIZE); + fixmap_pmd_p = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + if (!fixmap_pmd_p) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", + __func__, PAGE_SIZE, PAGE_SIZE); set_pmd(pmd, __pmd(__pa(fixmap_pmd_p) + _PAGE_KERNEL_TABLE)); #ifdef CONFIG_HIGHMEM @@ -124,8 +128,10 @@ static void __init fixedrange_init(void) pgd = swapper_pg_dir + pgd_index(vaddr); pud = pud_offset(pgd, vaddr); pmd = pmd_offset(pud, vaddr); - pte = (pte_t *) __va(memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE)); - memset(pte, 0, PAGE_SIZE); + pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + if (!pte) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", + __func__, PAGE_SIZE, PAGE_SIZE); set_pmd(pmd, __pmd(__pa(pte) + _PAGE_KERNEL_TABLE)); pkmap_page_table = pte; #endif /* CONFIG_HIGHMEM */ @@ -150,8 +156,10 @@ void __init paging_init(void) fixedrange_init(); /* allocate space for empty_zero_page */ - zero_page = __va(memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE)); - memset(zero_page, 0, PAGE_SIZE); + zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + if (!zero_page) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", + __func__, PAGE_SIZE, PAGE_SIZE); zone_sizes_init(); empty_zero_page = virt_to_page(zero_page); diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig index 532343eebf89..4ef15a61b7bc 100644 --- a/arch/nios2/Kconfig +++ b/arch/nios2/Kconfig @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 config NIOS2 def_bool y + select ARCH_32BIT_OFF_T select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_NO_SWAP @@ -122,7 +123,6 @@ config NIOS2_CMDLINE_IGNORE_DTB config NIOS2_PASS_CMDLINE bool "Passed kernel command line from u-boot" - default n help Use bootargs env variable from u-boot for kernel command line. will override "Default kernel command string". diff --git a/arch/nios2/configs/10m50_defconfig b/arch/nios2/configs/10m50_defconfig index c601c8ff1ae6..7977ab7e2ca6 100644 --- a/arch/nios2/configs/10m50_defconfig +++ b/arch/nios2/configs/10m50_defconfig @@ -77,4 +77,3 @@ CONFIG_NFS_V3_ACL=y CONFIG_ROOT_NFS=y CONFIG_SUNRPC_DEBUG=y CONFIG_DEBUG_INFO=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set diff --git a/arch/nios2/configs/3c120_defconfig b/arch/nios2/configs/3c120_defconfig index fce33588d55c..ceb97cd85ac1 100644 --- a/arch/nios2/configs/3c120_defconfig +++ b/arch/nios2/configs/3c120_defconfig @@ -74,4 +74,3 @@ CONFIG_NFS_V3_ACL=y CONFIG_ROOT_NFS=y CONFIG_SUNRPC_DEBUG=y CONFIG_DEBUG_INFO=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h index db4f7d179220..95237b7f6fc1 100644 --- a/arch/nios2/include/asm/pgtable.h +++ b/arch/nios2/include/asm/pgtable.h @@ -232,7 +232,6 @@ static inline void pte_clear(struct mm_struct *mm, pte_val(null) = (addr >> PAGE_SHIFT) & 0xf; set_pte_at(mm, addr, ptep, null); - flush_tlb_one(addr); } /* diff --git a/arch/nios2/include/asm/tlbflush.h b/arch/nios2/include/asm/tlbflush.h index e19652fca1c6..b4bf487b9832 100644 --- a/arch/nios2/include/asm/tlbflush.h +++ b/arch/nios2/include/asm/tlbflush.h @@ -26,21 +26,32 @@ struct mm_struct; * * - flush_tlb_all() flushes all processes TLB entries * - flush_tlb_mm(mm) flushes the specified mm context TLB entries - * - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_range(vma, start, end) flushes a range of pages + * - flush_tlb_page(vma, address) flushes a page * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages + * - flush_tlb_kernel_page(address) flushes a kernel page + * + * - reload_tlb_page(vma, address, pte) flushes the TLB for address like + * flush_tlb_page, then replaces it with a TLB for pte. */ extern void flush_tlb_all(void); extern void flush_tlb_mm(struct mm_struct *mm); extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); -extern void flush_tlb_one(unsigned long vaddr); static inline void flush_tlb_page(struct vm_area_struct *vma, - unsigned long addr) + unsigned long address) { - flush_tlb_one(addr); + flush_tlb_range(vma, address, address + PAGE_SIZE); } +static inline void flush_tlb_kernel_page(unsigned long address) +{ + flush_tlb_kernel_range(address, address + PAGE_SIZE); +} + +extern void reload_tlb_page(struct vm_area_struct *vma, unsigned long addr, + pte_t pte); + #endif /* _ASM_NIOS2_TLBFLUSH_H */ diff --git a/arch/nios2/include/asm/uaccess.h b/arch/nios2/include/asm/uaccess.h index e0ea10806491..e83f831a76f9 100644 --- a/arch/nios2/include/asm/uaccess.h +++ b/arch/nios2/include/asm/uaccess.h @@ -26,7 +26,6 @@ #define USER_DS MAKE_MM_SEG(0x80000000UL) #define KERNEL_DS MAKE_MM_SEG(0) -#define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) #define set_fs(seg) (current_thread_info()->addr_limit = (seg)) diff --git a/arch/nios2/include/uapi/asm/unistd.h b/arch/nios2/include/uapi/asm/unistd.h index d9948d88790b..0b4bb1d41b28 100644 --- a/arch/nios2/include/uapi/asm/unistd.h +++ b/arch/nios2/include/uapi/asm/unistd.h @@ -20,6 +20,8 @@ #define __ARCH_WANT_RENAMEAT #define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SET_GET_RLIMIT +#define __ARCH_WANT_TIME32_SYSCALLS /* Use the standard ABI for syscalls */ #include diff --git a/arch/nios2/kernel/nios2_ksyms.c b/arch/nios2/kernel/nios2_ksyms.c index bf2f55d10a4d..4e704046a150 100644 --- a/arch/nios2/kernel/nios2_ksyms.c +++ b/arch/nios2/kernel/nios2_ksyms.c @@ -9,12 +9,20 @@ #include #include +#include +#include + /* string functions */ EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memmove); +/* memory management */ + +EXPORT_SYMBOL(empty_zero_page); +EXPORT_SYMBOL(flush_icache_range); + /* * libgcc functions - functions that are used internally by the * compiler... (prototypes are not correct though, but that @@ -31,3 +39,7 @@ DECLARE_EXPORT(__udivsi3); DECLARE_EXPORT(__umoddi3); DECLARE_EXPORT(__umodsi3); DECLARE_EXPORT(__muldi3); +DECLARE_EXPORT(__ucmpdi2); +DECLARE_EXPORT(__lshrdi3); +DECLARE_EXPORT(__ashldi3); +DECLARE_EXPORT(__ashrdi3); diff --git a/arch/nios2/mm/cacheflush.c b/arch/nios2/mm/cacheflush.c index 506f6e1c86d5..65de1bd6a760 100644 --- a/arch/nios2/mm/cacheflush.c +++ b/arch/nios2/mm/cacheflush.c @@ -198,12 +198,15 @@ void flush_dcache_page(struct page *page) EXPORT_SYMBOL(flush_dcache_page); void update_mmu_cache(struct vm_area_struct *vma, - unsigned long address, pte_t *pte) + unsigned long address, pte_t *ptep) { - unsigned long pfn = pte_pfn(*pte); + pte_t pte = *ptep; + unsigned long pfn = pte_pfn(pte); struct page *page; struct address_space *mapping; + reload_tlb_page(vma, address, pte); + if (!pfn_valid(pfn)) return; diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c index 24fd84cf6006..6a2e716b959f 100644 --- a/arch/nios2/mm/fault.c +++ b/arch/nios2/mm/fault.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include @@ -271,7 +270,7 @@ vmalloc_fault: if (!pte_present(*pte_k)) goto no_context; - flush_tlb_one(address); + flush_tlb_kernel_page(address); return; } } diff --git a/arch/nios2/mm/tlb.c b/arch/nios2/mm/tlb.c index cf10326aab1c..7fea59e53f94 100644 --- a/arch/nios2/mm/tlb.c +++ b/arch/nios2/mm/tlb.c @@ -23,10 +23,6 @@ ((((1UL << (cpuinfo.tlb_ptr_sz - cpuinfo.tlb_num_ways_log2))) - 1) \ << PAGE_SHIFT) -/* Used as illegal PHYS_ADDR for TLB mappings - */ -#define MAX_PHYS_ADDR 0 - static void get_misc_and_pid(unsigned long *misc, unsigned long *pid) { *misc = RDCTL(CTL_TLBMISC); @@ -35,28 +31,23 @@ static void get_misc_and_pid(unsigned long *misc, unsigned long *pid) } /* - * All entries common to a mm share an asid. To effectively flush these - * entries, we just bump the asid. + * This provides a PTEADDR value for addr that will cause a TLB miss + * (fast TLB miss). TLB invalidation replaces entries with this value. */ -void flush_tlb_mm(struct mm_struct *mm) +static unsigned long pteaddr_invalid(unsigned long addr) { - if (current->mm == mm) - flush_tlb_all(); - else - memset(&mm->context, 0, sizeof(mm_context_t)); + return ((addr | 0xC0000000UL) >> PAGE_SHIFT) << 2; } /* * This one is only used for pages with the global bit set so we don't care * much about the ASID. */ -void flush_tlb_one_pid(unsigned long addr, unsigned long mmu_pid) +static void replace_tlb_one_pid(unsigned long addr, unsigned long mmu_pid, unsigned long tlbacc) { unsigned int way; unsigned long org_misc, pid_misc; - pr_debug("Flush tlb-entry for vaddr=%#lx\n", addr); - /* remember pid/way until we return. */ get_misc_and_pid(&org_misc, &pid_misc); @@ -67,30 +58,48 @@ void flush_tlb_one_pid(unsigned long addr, unsigned long mmu_pid) unsigned long tlbmisc; unsigned long pid; - tlbmisc = pid_misc | TLBMISC_RD | (way << TLBMISC_WAY_SHIFT); + tlbmisc = TLBMISC_RD | (way << TLBMISC_WAY_SHIFT); WRCTL(CTL_TLBMISC, tlbmisc); + pteaddr = RDCTL(CTL_PTEADDR); + if (((pteaddr >> 2) & 0xfffff) != (addr >> PAGE_SHIFT)) + continue; + tlbmisc = RDCTL(CTL_TLBMISC); pid = (tlbmisc >> TLBMISC_PID_SHIFT) & TLBMISC_PID_MASK; - if (((((pteaddr >> 2) & 0xfffff)) == (addr >> PAGE_SHIFT)) && - pid == mmu_pid) { - unsigned long vaddr = CONFIG_NIOS2_IO_REGION_BASE + - ((PAGE_SIZE * cpuinfo.tlb_num_lines) * way) + - (addr & TLB_INDEX_MASK); - pr_debug("Flush entry by writing %#lx way=%dl pid=%ld\n", - vaddr, way, (pid_misc >> TLBMISC_PID_SHIFT)); - - WRCTL(CTL_PTEADDR, (vaddr >> 12) << 2); - tlbmisc = pid_misc | TLBMISC_WE | - (way << TLBMISC_WAY_SHIFT); - WRCTL(CTL_TLBMISC, tlbmisc); - WRCTL(CTL_TLBACC, (MAX_PHYS_ADDR >> PAGE_SHIFT)); - } + if (pid != mmu_pid) + continue; + + tlbmisc = (mmu_pid << TLBMISC_PID_SHIFT) | TLBMISC_WE | + (way << TLBMISC_WAY_SHIFT); + WRCTL(CTL_TLBMISC, tlbmisc); + if (tlbacc == 0) + WRCTL(CTL_PTEADDR, pteaddr_invalid(addr)); + WRCTL(CTL_TLBACC, tlbacc); + /* + * There should be only a single entry that maps a + * particular {address,pid} so break after a match. + */ + break; } WRCTL(CTL_TLBMISC, org_misc); } +static void flush_tlb_one_pid(unsigned long addr, unsigned long mmu_pid) +{ + pr_debug("Flush tlb-entry for vaddr=%#lx\n", addr); + + replace_tlb_one_pid(addr, mmu_pid, 0); +} + +static void reload_tlb_one_pid(unsigned long addr, unsigned long mmu_pid, pte_t pte) +{ + pr_debug("Reload tlb-entry for vaddr=%#lx\n", addr); + + replace_tlb_one_pid(addr, mmu_pid, pte_val(pte)); +} + void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { @@ -102,19 +111,18 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, } } -void flush_tlb_kernel_range(unsigned long start, unsigned long end) +void reload_tlb_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte) { - while (start < end) { - flush_tlb_one(start); - start += PAGE_SIZE; - } + unsigned long mmu_pid = get_pid_from_context(&vma->vm_mm->context); + + reload_tlb_one_pid(addr, mmu_pid, pte); } /* * This one is only used for pages with the global bit set so we don't care * much about the ASID. */ -void flush_tlb_one(unsigned long addr) +static void flush_tlb_one(unsigned long addr) { unsigned int way; unsigned long org_misc, pid_misc; @@ -130,30 +138,33 @@ void flush_tlb_one(unsigned long addr) unsigned long pteaddr; unsigned long tlbmisc; - tlbmisc = pid_misc | TLBMISC_RD | (way << TLBMISC_WAY_SHIFT); + tlbmisc = TLBMISC_RD | (way << TLBMISC_WAY_SHIFT); WRCTL(CTL_TLBMISC, tlbmisc); - pteaddr = RDCTL(CTL_PTEADDR); - tlbmisc = RDCTL(CTL_TLBMISC); - if ((((pteaddr >> 2) & 0xfffff)) == (addr >> PAGE_SHIFT)) { - unsigned long vaddr = CONFIG_NIOS2_IO_REGION_BASE + - ((PAGE_SIZE * cpuinfo.tlb_num_lines) * way) + - (addr & TLB_INDEX_MASK); + pteaddr = RDCTL(CTL_PTEADDR); + if (((pteaddr >> 2) & 0xfffff) != (addr >> PAGE_SHIFT)) + continue; - pr_debug("Flush entry by writing %#lx way=%dl pid=%ld\n", - vaddr, way, (pid_misc >> TLBMISC_PID_SHIFT)); + pr_debug("Flush entry by writing way=%dl pid=%ld\n", + way, (pid_misc >> TLBMISC_PID_SHIFT)); - tlbmisc = pid_misc | TLBMISC_WE | - (way << TLBMISC_WAY_SHIFT); - WRCTL(CTL_PTEADDR, (vaddr >> 12) << 2); - WRCTL(CTL_TLBMISC, tlbmisc); - WRCTL(CTL_TLBACC, (MAX_PHYS_ADDR >> PAGE_SHIFT)); - } + tlbmisc = TLBMISC_WE | (way << TLBMISC_WAY_SHIFT); + WRCTL(CTL_TLBMISC, tlbmisc); + WRCTL(CTL_PTEADDR, pteaddr_invalid(addr)); + WRCTL(CTL_TLBACC, 0); } WRCTL(CTL_TLBMISC, org_misc); } +void flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + while (start < end) { + flush_tlb_one(start); + start += PAGE_SIZE; + } +} + void dump_tlb_line(unsigned long line) { unsigned int way; @@ -177,7 +188,7 @@ void dump_tlb_line(unsigned long line) tlbmisc = RDCTL(CTL_TLBMISC); tlbacc = RDCTL(CTL_TLBACC); - if ((tlbacc << PAGE_SHIFT) != (MAX_PHYS_ADDR & PAGE_MASK)) { + if ((tlbacc << PAGE_SHIFT) != 0) { pr_debug("-- way:%02x vpn:0x%08lx phys:0x%08lx pid:0x%02lx flags:%c%c%c%c%c\n", way, (pteaddr << (PAGE_SHIFT-2)), @@ -203,8 +214,9 @@ void dump_tlb(void) dump_tlb_line(i); } -void flush_tlb_pid(unsigned long pid) +void flush_tlb_pid(unsigned long mmu_pid) { + unsigned long addr = 0; unsigned int line; unsigned int way; unsigned long org_misc, pid_misc; @@ -213,55 +225,65 @@ void flush_tlb_pid(unsigned long pid) get_misc_and_pid(&org_misc, &pid_misc); for (line = 0; line < cpuinfo.tlb_num_lines; line++) { - WRCTL(CTL_PTEADDR, line << 2); + WRCTL(CTL_PTEADDR, pteaddr_invalid(addr)); for (way = 0; way < cpuinfo.tlb_num_ways; way++) { - unsigned long pteaddr; unsigned long tlbmisc; - unsigned long tlbacc; + unsigned long pid; - tlbmisc = pid_misc | TLBMISC_RD | - (way << TLBMISC_WAY_SHIFT); + tlbmisc = TLBMISC_RD | (way << TLBMISC_WAY_SHIFT); WRCTL(CTL_TLBMISC, tlbmisc); - pteaddr = RDCTL(CTL_PTEADDR); tlbmisc = RDCTL(CTL_TLBMISC); - tlbacc = RDCTL(CTL_TLBACC); - - if (((tlbmisc>>TLBMISC_PID_SHIFT) & TLBMISC_PID_MASK) - == pid) { - tlbmisc = pid_misc | TLBMISC_WE | - (way << TLBMISC_WAY_SHIFT); - WRCTL(CTL_TLBMISC, tlbmisc); - WRCTL(CTL_TLBACC, - (MAX_PHYS_ADDR >> PAGE_SHIFT)); - } + pid = (tlbmisc >> TLBMISC_PID_SHIFT) & TLBMISC_PID_MASK; + if (pid != mmu_pid) + continue; + + tlbmisc = TLBMISC_WE | (way << TLBMISC_WAY_SHIFT); + WRCTL(CTL_TLBMISC, tlbmisc); + WRCTL(CTL_TLBACC, 0); } - WRCTL(CTL_TLBMISC, org_misc); + addr += PAGE_SIZE; + } + + WRCTL(CTL_TLBMISC, org_misc); +} + +/* + * All entries common to a mm share an asid. To effectively flush these + * entries, we just bump the asid. + */ +void flush_tlb_mm(struct mm_struct *mm) +{ + if (current->mm == mm) { + unsigned long mmu_pid = get_pid_from_context(&mm->context); + flush_tlb_pid(mmu_pid); + } else { + memset(&mm->context, 0, sizeof(mm_context_t)); } } void flush_tlb_all(void) { - int i; - unsigned long vaddr = CONFIG_NIOS2_IO_REGION_BASE; + unsigned long addr = 0; + unsigned int line; unsigned int way; - unsigned long org_misc, pid_misc, tlbmisc; + unsigned long org_misc, pid_misc; /* remember pid/way until we return */ get_misc_and_pid(&org_misc, &pid_misc); - pid_misc |= TLBMISC_WE; + + /* Start at way 0, way is auto-incremented after each TLBACC write */ + WRCTL(CTL_TLBMISC, TLBMISC_WE); /* Map each TLB entry to physcal address 0 with no-access and a bad ptbase */ - for (way = 0; way < cpuinfo.tlb_num_ways; way++) { - tlbmisc = pid_misc | (way << TLBMISC_WAY_SHIFT); - for (i = 0; i < cpuinfo.tlb_num_lines; i++) { - WRCTL(CTL_PTEADDR, ((vaddr) >> PAGE_SHIFT) << 2); - WRCTL(CTL_TLBMISC, tlbmisc); - WRCTL(CTL_TLBACC, (MAX_PHYS_ADDR >> PAGE_SHIFT)); - vaddr += 1UL << 12; - } + for (line = 0; line < cpuinfo.tlb_num_lines; line++) { + WRCTL(CTL_PTEADDR, pteaddr_invalid(addr)); + for (way = 0; way < cpuinfo.tlb_num_ways; way++) + WRCTL(CTL_TLBACC, 0); + + addr += PAGE_SIZE; } /* restore pid/way */ @@ -270,6 +292,10 @@ void flush_tlb_all(void) void set_mmu_pid(unsigned long pid) { - WRCTL(CTL_TLBMISC, (RDCTL(CTL_TLBMISC) & TLBMISC_WAY) | - ((pid & TLBMISC_PID_MASK) << TLBMISC_PID_SHIFT)); + unsigned long tlbmisc; + + tlbmisc = RDCTL(CTL_TLBMISC); + tlbmisc = (tlbmisc & TLBMISC_WAY); + tlbmisc |= (pid & TLBMISC_PID_MASK) << TLBMISC_PID_SHIFT; + WRCTL(CTL_TLBMISC, tlbmisc); } diff --git a/arch/nios2/platform/Kconfig.platform b/arch/nios2/platform/Kconfig.platform index 74c1aaf588b8..c72074f8bdd9 100644 --- a/arch/nios2/platform/Kconfig.platform +++ b/arch/nios2/platform/Kconfig.platform @@ -17,7 +17,6 @@ comment "Device tree" config NIOS2_DTB_AT_PHYS_ADDR bool "DTB at physical address" - default n help When enabled you can select a physical address to load the dtb from. Normally this address is passed by a bootloader such as u-boot but @@ -37,7 +36,6 @@ config NIOS2_DTB_PHYS_ADDR config NIOS2_DTB_SOURCE_BOOL bool "Compile and link device tree into kernel image" - default n help This allows you to specify a dts (device tree source) file which will be compiled and linked into the kernel image. @@ -62,21 +60,18 @@ config NIOS2_ARCH_REVISION config NIOS2_HW_MUL_SUPPORT bool "Enable MUL instruction" - default n help Set to true if you configured the Nios II to include the MUL instruction. This will enable the -mhw-mul compiler flag. config NIOS2_HW_MULX_SUPPORT bool "Enable MULX instruction" - default n help Set to true if you configured the Nios II to include the MULX instruction. Enables the -mhw-mulx compiler flag. config NIOS2_HW_DIV_SUPPORT bool "Enable DIV instruction" - default n help Set to true if you configured the Nios II to include the DIV instruction. Enables the -mhw-div compiler flag. @@ -84,7 +79,6 @@ config NIOS2_HW_DIV_SUPPORT config NIOS2_BMX_SUPPORT bool "Enable BMX instructions" depends on NIOS2_ARCH_REVISION = 2 - default n help Set to true if you configured the Nios II R2 to include the BMX Bit Manipulation Extension instructions. Enables @@ -93,7 +87,6 @@ config NIOS2_BMX_SUPPORT config NIOS2_CDX_SUPPORT bool "Enable CDX instructions" depends on NIOS2_ARCH_REVISION = 2 - default n help Set to true if you configured the Nios II R2 to include the CDX Bit Manipulation Extension instructions. Enables @@ -101,13 +94,11 @@ config NIOS2_CDX_SUPPORT config NIOS2_FPU_SUPPORT bool "Custom floating point instr support" - default n help Enables the -mcustom-fpu-cfg=60-1 compiler flag. config NIOS2_CI_SWAB_SUPPORT bool "Byteswap custom instruction" - default n help Use the byteswap (endian converter) Nios II custom instruction provided by Altera and which can be enabled in QSYS builder. This accelerates diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig index 09ab59e942ae..a5e361fbb75a 100644 --- a/arch/openrisc/Kconfig +++ b/arch/openrisc/Kconfig @@ -6,6 +6,7 @@ config OPENRISC def_bool y + select ARCH_32BIT_OFF_T select ARCH_HAS_SYNC_DMA_FOR_DEVICE select OF select OF_EARLY_FLATTREE diff --git a/arch/openrisc/configs/or1ksim_defconfig b/arch/openrisc/configs/or1ksim_defconfig index a73aa90501be..d8ff4f8ffb88 100644 --- a/arch/openrisc/configs/or1ksim_defconfig +++ b/arch/openrisc/configs/or1ksim_defconfig @@ -54,5 +54,4 @@ CONFIG_SERIAL_OF_PLATFORM=y # CONFIG_DNOTIFY is not set CONFIG_TMPFS=y CONFIG_NFS_FS=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set diff --git a/arch/openrisc/configs/simple_smp_defconfig b/arch/openrisc/configs/simple_smp_defconfig index b6e3c7e158e7..64278992df9c 100644 --- a/arch/openrisc/configs/simple_smp_defconfig +++ b/arch/openrisc/configs/simple_smp_defconfig @@ -61,6 +61,5 @@ CONFIG_SERIAL_OF_PLATFORM=y CONFIG_TMPFS=y CONFIG_NFS_FS=y CONFIG_XZ_DEC=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set # CONFIG_RCU_TRACE is not set diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild index eb87cd8327c8..1f04844b6b82 100644 --- a/arch/openrisc/include/asm/Kbuild +++ b/arch/openrisc/include/asm/Kbuild @@ -34,6 +34,7 @@ generic-y += qrwlock_types.h generic-y += qrwlock.h generic-y += sections.h generic-y += segment.h +generic-y += shmparam.h generic-y += string.h generic-y += switch_to.h generic-y += topology.h diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h index a44682c8adc3..45afd9ab78c1 100644 --- a/arch/openrisc/include/asm/uaccess.h +++ b/arch/openrisc/include/asm/uaccess.h @@ -42,7 +42,6 @@ */ #define KERNEL_DS (~0UL) -#define get_ds() (KERNEL_DS) #define USER_DS (TASK_SIZE) #define get_fs() (current_thread_info()->addr_limit) diff --git a/arch/openrisc/include/uapi/asm/Kbuild b/arch/openrisc/include/uapi/asm/Kbuild index 6c6f6301012e..0febf1a07c30 100644 --- a/arch/openrisc/include/uapi/asm/Kbuild +++ b/arch/openrisc/include/uapi/asm/Kbuild @@ -1,5 +1,4 @@ include include/uapi/asm-generic/Kbuild.asm generic-y += kvm_para.h -generic-y += shmparam.h generic-y += ucontext.h diff --git a/arch/openrisc/include/uapi/asm/unistd.h b/arch/openrisc/include/uapi/asm/unistd.h index ec37df18d8ed..566f8c4f8047 100644 --- a/arch/openrisc/include/uapi/asm/unistd.h +++ b/arch/openrisc/include/uapi/asm/unistd.h @@ -21,8 +21,10 @@ #define __ARCH_WANT_RENAMEAT #define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SET_GET_RLIMIT #define __ARCH_WANT_SYS_FORK #define __ARCH_WANT_SYS_CLONE +#define __ARCH_WANT_TIME32_SYSCALLS #include diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c index d157310eb377..caeb4184e8a6 100644 --- a/arch/openrisc/mm/init.c +++ b/arch/openrisc/mm/init.c @@ -105,7 +105,10 @@ static void __init map_ram(void) } /* Alloc one page for holding PTE's... */ - pte = (pte_t *) __va(memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE)); + pte = memblock_alloc_raw(PAGE_SIZE, PAGE_SIZE); + if (!pte) + panic("%s: Failed to allocate page for PTEs\n", + __func__); set_pmd(pme, __pmd(_KERNPG_TABLE + __pa(pte))); /* Fill the newly allocated page with PTE'S */ diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c index 270d1c9bc0d6..a8509950dbbc 100644 --- a/arch/openrisc/mm/ioremap.c +++ b/arch/openrisc/mm/ioremap.c @@ -123,12 +123,13 @@ pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm) pte_t *pte; if (likely(mem_init_done)) { - pte = (pte_t *) __get_free_page(GFP_KERNEL); + pte = (pte_t *)get_zeroed_page(GFP_KERNEL); } else { - pte = (pte_t *) __va(memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE)); + pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + if (!pte) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", + __func__, PAGE_SIZE, PAGE_SIZE); } - if (pte) - clear_page(pte); return pte; } diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 7ca2c3ebad64..c8e621296092 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 config PARISC def_bool y + select ARCH_32BIT_OFF_T if !64BIT select ARCH_MIGHT_HAVE_PC_PARPORT select HAVE_IDE select HAVE_OPROFILE diff --git a/arch/parisc/boot/Makefile b/arch/parisc/boot/Makefile index cad68a584884..41cce0706f80 100644 --- a/arch/parisc/boot/Makefile +++ b/arch/parisc/boot/Makefile @@ -2,12 +2,6 @@ # Makefile for the linux parisc-specific parts of the boot image creator. # -COMPILE_VERSION := __linux_compile_version_id__`hostname | \ - tr -c '[0-9A-Za-z]' '_'`__`date | \ - tr -c '[0-9A-Za-z]' '_'`_t - -ccflags-y := -DCOMPILE_VERSION=$(COMPILE_VERSION) -gstabs -I. - targets := image targets += bzImage subdir- := compressed diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h index 44a9f97194aa..d5bd94247371 100644 --- a/arch/parisc/include/asm/dma-mapping.h +++ b/arch/parisc/include/asm/dma-mapping.h @@ -2,8 +2,6 @@ #ifndef _PARISC_DMA_MAPPING_H #define _PARISC_DMA_MAPPING_H -#include - /* ** We need to support 4 different coherent dma models with one binary: ** @@ -28,48 +26,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) return hppa_dma_ops; } -static inline void * -parisc_walk_tree(struct device *dev) -{ - struct device *otherdev; - if(likely(dev->platform_data != NULL)) - return dev->platform_data; - /* OK, just traverse the bus to find it */ - for(otherdev = dev->parent; otherdev; - otherdev = otherdev->parent) { - if(otherdev->platform_data) { - dev->platform_data = otherdev->platform_data; - break; - } - } - return dev->platform_data; -} - -#define GET_IOC(dev) ({ \ - void *__pdata = parisc_walk_tree(dev); \ - __pdata ? HBA_DATA(__pdata)->iommu : NULL; \ -}) - -#ifdef CONFIG_IOMMU_CCIO -struct parisc_device; -struct ioc; -void * ccio_get_iommu(const struct parisc_device *dev); -int ccio_request_resource(const struct parisc_device *dev, - struct resource *res); -int ccio_allocate_resource(const struct parisc_device *dev, - struct resource *res, unsigned long size, - unsigned long min, unsigned long max, unsigned long align); -#else /* !CONFIG_IOMMU_CCIO */ -#define ccio_get_iommu(dev) NULL -#define ccio_request_resource(dev, res) insert_resource(&iomem_resource, res) -#define ccio_allocate_resource(dev, res, size, min, max, align) \ - allocate_resource(&iomem_resource, res, size, min, max, \ - align, NULL, NULL) -#endif /* !CONFIG_IOMMU_CCIO */ - -#ifdef CONFIG_IOMMU_SBA -struct parisc_device; -void * sba_get_iommu(struct parisc_device *dev); -#endif - #endif diff --git a/arch/parisc/include/asm/hardirq.h b/arch/parisc/include/asm/hardirq.h index 1a1235a9d533..7f7039516e53 100644 --- a/arch/parisc/include/asm/hardirq.h +++ b/arch/parisc/include/asm/hardirq.h @@ -22,6 +22,7 @@ typedef struct { unsigned int irq_stack_usage; #ifdef CONFIG_SMP unsigned int irq_resched_count; + unsigned int irq_call_count; #endif unsigned int irq_unaligned_count; unsigned int irq_fpassist_count; diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h index afe493b23d04..30a8315d5c07 100644 --- a/arch/parisc/include/asm/io.h +++ b/arch/parisc/include/asm/io.h @@ -311,6 +311,15 @@ extern void outsl (unsigned long port, const void *src, unsigned long count); * value for either 32 or 64 bit mode */ #define F_EXTEND(x) ((unsigned long)((x) | (0xffffffff00000000ULL))) +#define ioread64 ioread64 +#define ioread64be ioread64be +#define iowrite64 iowrite64 +#define iowrite64be iowrite64be +extern u64 ioread64(void __iomem *addr); +extern u64 ioread64be(void __iomem *addr); +extern void iowrite64(u64 val, void __iomem *addr); +extern void iowrite64be(u64 val, void __iomem *addr); + #include /* diff --git a/arch/parisc/include/asm/pci.h b/arch/parisc/include/asm/pci.h index 3328fd17c19d..f14465b84de4 100644 --- a/arch/parisc/include/asm/pci.h +++ b/arch/parisc/include/asm/pci.h @@ -56,7 +56,7 @@ struct pci_hba_data { #define DINO_MAX_LMMIO_RESOURCES 3 unsigned long lmmio_space_offset; /* CPU view - PCI view */ - void * iommu; /* IOMMU this device is under */ + struct ioc *iommu; /* IOMMU this device is under */ /* REVISIT - spinlock to protect resources? */ #define HBA_NAME_SIZE 16 @@ -66,8 +66,6 @@ struct pci_hba_data { char gmmio_name[HBA_NAME_SIZE]; }; -#define HBA_DATA(d) ((struct pci_hba_data *) (d)) - /* ** We support 2^16 I/O ports per HBA. These are set up in the form ** 0xbbxxxx, where bb is the bus number and xxxx is the I/O port diff --git a/arch/parisc/include/asm/pdc.h b/arch/parisc/include/asm/pdc.h index 5b187d40d604..19bb2e46cd36 100644 --- a/arch/parisc/include/asm/pdc.h +++ b/arch/parisc/include/asm/pdc.h @@ -44,6 +44,7 @@ int pdc_model_sysmodel(char *name); int pdc_model_cpuid(unsigned long *cpu_id); int pdc_model_versions(unsigned long *versions, int id); int pdc_model_capabilities(unsigned long *capabilities); +int pdc_model_platform_info(char *orig_prod_num, char *current_prod_num, char *serial_no); int pdc_cache_info(struct pdc_cache_info *cache); int pdc_spaceid_bits(unsigned long *space_bits); #ifndef CONFIG_PA20 diff --git a/arch/parisc/include/asm/pdcpat.h b/arch/parisc/include/asm/pdcpat.h index bce9ee1c1c99..24355ed1453a 100644 --- a/arch/parisc/include/asm/pdcpat.h +++ b/arch/parisc/include/asm/pdcpat.h @@ -67,6 +67,10 @@ #define PDC_PAT_CHASSIS_READ_LOG 1L /* Read Log Entry */ +/* PDC PAT COMPLEX */ + +#define PDC_PAT_COMPLEX 66L + /* PDC PAT CPU -- CPU configuration within the protection domain */ #define PDC_PAT_CPU 67L diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index 30ac2865ea73..ebbb9ffe038c 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -16,7 +16,6 @@ #define segment_eq(a, b) ((a).seg == (b).seg) -#define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h index c2c2afb28941..b0838dc4dfee 100644 --- a/arch/parisc/include/asm/unistd.h +++ b/arch/parisc/include/asm/unistd.h @@ -10,11 +10,7 @@ #define SYS_ify(syscall_name) __NR_##syscall_name -#define __IGNORE_select /* newselect */ #define __IGNORE_fadvise64 /* fadvise64_64 */ -#define __IGNORE_pkey_mprotect -#define __IGNORE_pkey_alloc -#define __IGNORE_pkey_free #ifndef ASM_LINE_SEP # define ASM_LINE_SEP ; @@ -156,10 +152,8 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \ #define __ARCH_WANT_SYS_GETHOSTNAME #define __ARCH_WANT_SYS_PAUSE #define __ARCH_WANT_SYS_SIGNAL -#define __ARCH_WANT_SYS_TIME -#define __ARCH_WANT_COMPAT_SYS_TIME +#define __ARCH_WANT_SYS_TIME32 #define __ARCH_WANT_COMPAT_SYS_SCHED_RR_GET_INTERVAL -#define __ARCH_WANT_SYS_UTIME #define __ARCH_WANT_SYS_UTIME32 #define __ARCH_WANT_SYS_WAITPID #define __ARCH_WANT_SYS_SOCKETCALL @@ -174,6 +168,11 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \ #define __ARCH_WANT_SYS_CLONE #define __ARCH_WANT_COMPAT_SYS_SENDFILE +#ifdef CONFIG_64BIT +#define __ARCH_WANT_SYS_TIME +#define __ARCH_WANT_SYS_UTIME +#endif + #endif /* __ASSEMBLY__ */ #undef STR diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h index 870fbf8c7088..c98162f494db 100644 --- a/arch/parisc/include/uapi/asm/mman.h +++ b/arch/parisc/include/uapi/asm/mman.h @@ -10,9 +10,7 @@ #define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ #define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ -#define MAP_SHARED 0x01 /* Share changes */ -#define MAP_PRIVATE 0x02 /* Changes are private */ -#define MAP_SHARED_VALIDATE 0x03 /* share + validate extension flags */ +/* 0x01 - 0x03 are defined in linux/mman.h */ #define MAP_TYPE 0x2b /* Mask for type of mapping, includes bits 0x08 and 0x20 */ #define MAP_FIXED 0x04 /* Interpret addr exactly */ #define MAP_ANONYMOUS 0x10 /* don't use a file */ diff --git a/arch/parisc/include/uapi/asm/pdc.h b/arch/parisc/include/uapi/asm/pdc.h index 593eeb573138..15211723ebf5 100644 --- a/arch/parisc/include/uapi/asm/pdc.h +++ b/arch/parisc/include/uapi/asm/pdc.h @@ -60,6 +60,8 @@ #define PDC_MODEL_NVA_UNSUPPORTED (3 << 4) #define PDC_MODEL_GET_BOOT__OP 8 /* returns boot test options */ #define PDC_MODEL_SET_BOOT__OP 9 /* set boot test options */ +#define PDC_MODEL_GET_PLATFORM_INFO 10 /* returns platform info */ +#define PDC_MODEL_GET_INSTALL_KERNEL 11 /* returns kernel for installation */ #define PA89_INSTRUCTION_SET 0x4 /* capabilities returned */ #define PA90_INSTRUCTION_SET 0x8 @@ -99,7 +101,7 @@ #define PDC_TOD 9 /* time-of-day clock (TOD) */ #define PDC_TOD_READ 0 /* read TOD */ #define PDC_TOD_WRITE 1 /* write TOD */ - +#define PDC_TOD_CALIBRATE 2 /* calibrate timers */ #define PDC_STABLE 10 /* stable storage (sprockets) */ #define PDC_STABLE_READ 0 @@ -109,15 +111,22 @@ #define PDC_STABLE_INITIALIZE 4 #define PDC_NVOLATILE 11 /* often not implemented */ +#define PDC_NVOLATILE_READ 0 +#define PDC_NVOLATILE_WRITE 1 +#define PDC_NVOLATILE_RETURN_SIZE 2 +#define PDC_NVOLATILE_VERIFY_CONTENTS 3 +#define PDC_NVOLATILE_INITIALIZE 4 #define PDC_ADD_VALID 12 /* Memory validation PDC call */ #define PDC_ADD_VALID_VERIFY 0 /* Make PDC_ADD_VALID verify region */ +#define PDC_DEBUG 14 /* Obsolete */ + #define PDC_INSTR 15 /* get instr to invoke PDCE_CHECK() */ #define PDC_PROC 16 /* (sprockets) */ -#define PDC_CONFIG 16 /* (sprockets) */ +#define PDC_CONFIG 17 /* (sprockets) */ #define PDC_CONFIG_DECONFIG 0 #define PDC_CONFIG_DRECONFIG 1 #define PDC_CONFIG_DRETURN_CONFIG 2 @@ -167,6 +176,15 @@ #define PDC_SOFT_POWER_INFO 0 /* return info about the soft power switch */ #define PDC_SOFT_POWER_ENABLE 1 /* enable/disable soft power switch */ +#define PDC_ALLOC 24 /* allocate static storage for PDC & IODC */ + +#define PDC_CRASH_PREP 25 /* Prepare system for crash dump */ +#define PDC_CRASH_DUMP 0 /* Do platform specific preparations for dump */ +#define PDC_CRASH_LOG_CEC_ERROR 1 /* Dump hardware registers */ + +#define PDC_SCSI_PARMS 26 /* Get and set SCSI parameters */ +#define PDC_SCSI_GET_PARMS 0 /* Get SCSI parameters for I/O device */ +#define PDC_SCSI_SET_PARMS 1 /* Set SCSI parameters for I/O device */ /* HVERSION dependent */ @@ -260,6 +278,10 @@ #define PDC_PCI_READ_MON_TYPE 15 #define PDC_PCI_WRITE_MON_TYPE 16 +#define PDC_RELOCATE 149 /* (sprockets) */ +#define PDC_RELOCATE_GET_RELOCINFO 0 +#define PDC_RELOCATE_CHECKSUM 1 +#define PDC_RELOCATE_RELOCATE 2 /* Get SCSI Interface Card info: SDTR, SCSI ID, mode (SE vs LVD) */ #define PDC_INITIATOR 163 diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h index 061b9cf2a779..16e428f03526 100644 --- a/arch/parisc/include/uapi/asm/socket.h +++ b/arch/parisc/include/uapi/asm/socket.h @@ -3,6 +3,7 @@ #define _UAPI_ASM_SOCKET_H #include +#include /* For setsockopt(2) */ #define SOL_SOCKET 0xffff @@ -21,8 +22,8 @@ #define SO_RCVBUFFORCE 0x100b #define SO_SNDLOWAT 0x1003 #define SO_RCVLOWAT 0x1004 -#define SO_SNDTIMEO 0x1005 -#define SO_RCVTIMEO 0x1006 +#define SO_SNDTIMEO_OLD 0x1005 +#define SO_RCVTIMEO_OLD 0x1006 #define SO_ERROR 0x1007 #define SO_TYPE 0x1008 #define SO_PROTOCOL 0x1028 @@ -34,10 +35,6 @@ #define SO_BSDCOMPAT 0x400e #define SO_PASSCRED 0x4010 #define SO_PEERCRED 0x4011 -#define SO_TIMESTAMP 0x4012 -#define SCM_TIMESTAMP SO_TIMESTAMP -#define SO_TIMESTAMPNS 0x4013 -#define SCM_TIMESTAMPNS SO_TIMESTAMPNS /* Security levels - as per NRL IPv6 - don't actually do anything */ #define SO_SECURITY_AUTHENTICATION 0x4016 @@ -58,9 +55,6 @@ #define SO_MARK 0x401f -#define SO_TIMESTAMPING 0x4020 -#define SCM_TIMESTAMPING SO_TIMESTAMPING - #define SO_RXQ_OVFL 0x4021 #define SO_WIFI_STATUS 0x4022 @@ -107,4 +101,40 @@ #define SO_TXTIME 0x4036 #define SCM_TXTIME SO_TXTIME +#define SO_BINDTOIFINDEX 0x4037 + +#define SO_TIMESTAMP_OLD 0x4012 +#define SO_TIMESTAMPNS_OLD 0x4013 +#define SO_TIMESTAMPING_OLD 0x4020 + +#define SO_TIMESTAMP_NEW 0x4038 +#define SO_TIMESTAMPNS_NEW 0x4039 +#define SO_TIMESTAMPING_NEW 0x403A + +#define SO_RCVTIMEO_NEW 0x4040 +#define SO_SNDTIMEO_NEW 0x4041 + +#if !defined(__KERNEL__) + +#if __BITS_PER_LONG == 64 +#define SO_TIMESTAMP SO_TIMESTAMP_OLD +#define SO_TIMESTAMPNS SO_TIMESTAMPNS_OLD +#define SO_TIMESTAMPING SO_TIMESTAMPING_OLD +#define SO_RCVTIMEO SO_RCVTIMEO_OLD +#define SO_SNDTIMEO SO_SNDTIMEO_OLD +#else +#define SO_TIMESTAMP (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMP_OLD : SO_TIMESTAMP_NEW) +#define SO_TIMESTAMPNS (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPNS_OLD : SO_TIMESTAMPNS_NEW) +#define SO_TIMESTAMPING (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPING_OLD : SO_TIMESTAMPING_NEW) + +#define SO_RCVTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_RCVTIMEO_OLD : SO_RCVTIMEO_NEW) +#define SO_SNDTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_SNDTIMEO_OLD : SO_SNDTIMEO_NEW) +#endif + +#define SCM_TIMESTAMP SO_TIMESTAMP +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS +#define SCM_TIMESTAMPING SO_TIMESTAMPING + +#endif + #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c index e6f3b49f2fd7..7a17551ea31e 100644 --- a/arch/parisc/kernel/firmware.c +++ b/arch/parisc/kernel/firmware.c @@ -568,6 +568,30 @@ int pdc_model_capabilities(unsigned long *capabilities) return retval; } +/** + * pdc_model_platform_info - Returns machine product and serial number. + * @orig_prod_num: Return buffer for original product number. + * @current_prod_num: Return buffer for current product number. + * @serial_no: Return buffer for serial number. + * + * Returns strings containing the original and current product numbers and the + * serial number of the system. + */ +int pdc_model_platform_info(char *orig_prod_num, char *current_prod_num, + char *serial_no) +{ + int retval; + unsigned long flags; + + spin_lock_irqsave(&pdc_lock, flags); + retval = mem_pdc_call(PDC_MODEL, PDC_MODEL_GET_PLATFORM_INFO, + __pa(orig_prod_num), __pa(current_prod_num), __pa(serial_no)); + convert_to_wide(pdc_result); + spin_unlock_irqrestore(&pdc_lock, flags); + + return retval; +} + /** * pdc_cache_info - Return cache and TLB information. * @cache_info: The return buffer. diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index 0ca254085a66..23040a67583e 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c @@ -117,7 +117,10 @@ int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest) return -EINVAL; /* whatever mask they set, we just allow one CPU */ - cpu_dest = cpumask_first_and(dest, cpu_online_mask); + cpu_dest = cpumask_next_and(d->irq & (num_online_cpus()-1), + dest, cpu_online_mask); + if (cpu_dest >= nr_cpu_ids) + cpu_dest = cpumask_first_and(dest, cpu_online_mask); return cpu_dest; } @@ -175,10 +178,16 @@ int arch_show_interrupts(struct seq_file *p, int prec) # endif #endif #ifdef CONFIG_SMP - seq_printf(p, "%*s: ", prec, "RES"); - for_each_online_cpu(j) - seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count); - seq_puts(p, " Rescheduling interrupts\n"); + if (num_online_cpus() > 1) { + seq_printf(p, "%*s: ", prec, "RES"); + for_each_online_cpu(j) + seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count); + seq_puts(p, " Rescheduling interrupts\n"); + seq_printf(p, "%*s: ", prec, "CAL"); + for_each_online_cpu(j) + seq_printf(p, "%10u ", irq_stats(j)->irq_call_count); + seq_puts(p, " Function call interrupts\n"); + } #endif seq_printf(p, "%*s: ", prec, "UAH"); for_each_online_cpu(j) diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c index 82bd0d0927ce..7f4d042856b5 100644 --- a/arch/parisc/kernel/processor.c +++ b/arch/parisc/kernel/processor.c @@ -242,6 +242,7 @@ static int __init processor_probe(struct parisc_device *dev) void __init collect_boot_cpu_data(void) { unsigned long cr16_seed; + char orig_prod_num[64], current_prod_num[64], serial_no[64]; memset(&boot_cpu_data, 0, sizeof(boot_cpu_data)); @@ -301,6 +302,14 @@ void __init collect_boot_cpu_data(void) _parisc_requires_coherency = (boot_cpu_data.cpu_type == mako) || (boot_cpu_data.cpu_type == mako2); #endif + + if (pdc_model_platform_info(orig_prod_num, current_prod_num, serial_no) == PDC_OK) { + printk(KERN_INFO "product %s, original product %s, S/N: %s\n", + current_prod_num, orig_prod_num, serial_no); + add_device_randomness(orig_prod_num, strlen(orig_prod_num)); + add_device_randomness(current_prod_num, strlen(current_prod_num)); + add_device_randomness(serial_no, strlen(serial_no)); + } } diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index 2582df1c529b..0964c236e3e5 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c @@ -308,15 +308,29 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, long do_syscall_trace_enter(struct pt_regs *regs) { - if (test_thread_flag(TIF_SYSCALL_TRACE) && - tracehook_report_syscall_entry(regs)) { + if (test_thread_flag(TIF_SYSCALL_TRACE)) { + int rc = tracehook_report_syscall_entry(regs); + /* - * Tracing decided this syscall should not happen or the - * debugger stored an invalid system call number. Skip - * the system call and the system call restart handling. + * As tracesys_next does not set %r28 to -ENOSYS + * when %r20 is set to -1, initialize it here. */ - regs->gr[20] = -1UL; - goto out; + regs->gr[28] = -ENOSYS; + + if (rc) { + /* + * A nonzero return code from + * tracehook_report_syscall_entry() tells us + * to prevent the syscall execution. Skip + * the syscall call and the syscall restart handling. + * + * Note that the tracer may also just change + * regs->gr[20] to an invalid syscall number, + * that is handled by tracesys_next. + */ + regs->gr[20] = -1UL; + return -1; + } } /* Do the secure computing check after ptrace. */ @@ -340,7 +354,6 @@ long do_syscall_trace_enter(struct pt_regs *regs) regs->gr[24] & 0xffffffff, regs->gr[23] & 0xffffffff); -out: /* * Sign extend the syscall number to 64bit since it may have been * modified by a compat ptrace call diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index f2cf86ac279b..15dd9e21be7e 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c @@ -40,6 +40,7 @@ #include #include +#include #include #include #include diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 5e26dbede5fc..d9e2d69c9e48 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c @@ -155,6 +155,7 @@ ipi_interrupt(int irq, void *dev_id) case IPI_CALL_FUNC: smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu); + inc_irq_stat(irq_call_count); generic_smp_call_function_interrupt(); break; diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl index 9bbd2f9f56c8..b26766c6647d 100644 --- a/arch/parisc/kernel/syscalls/syscall.tbl +++ b/arch/parisc/kernel/syscalls/syscall.tbl @@ -20,7 +20,8 @@ 10 common unlink sys_unlink 11 common execve sys_execve compat_sys_execve 12 common chdir sys_chdir -13 common time sys_time compat_sys_time +13 32 time sys_time32 +13 64 time sys_time 14 common mknod sys_mknod 15 common chmod sys_chmod 16 common lchown sys_lchown @@ -32,12 +33,14 @@ 22 common bind sys_bind 23 common setuid sys_setuid 24 common getuid sys_getuid -25 common stime sys_stime compat_sys_stime +25 32 stime sys_stime32 +25 64 stime sys_stime 26 common ptrace sys_ptrace compat_sys_ptrace 27 common alarm sys_alarm 28 common fstat sys_newfstat compat_sys_newfstat 29 common pause sys_pause -30 common utime sys_utime compat_sys_utime +30 32 utime sys_utime32 +30 64 utime sys_utime 31 common connect sys_connect 32 common listen sys_listen 33 common access sys_access @@ -133,7 +136,8 @@ 121 common setdomainname sys_setdomainname 122 common sendfile sys_sendfile compat_sys_sendfile 123 common recvfrom sys_recvfrom -124 common adjtimex sys_adjtimex compat_sys_adjtimex +124 32 adjtimex sys_adjtimex_time32 +124 64 adjtimex sys_adjtimex 125 common mprotect sys_mprotect 126 common sigprocmask sys_sigprocmask compat_sys_sigprocmask # 127 was create_module @@ -171,8 +175,10 @@ 158 common sched_yield sys_sched_yield 159 common sched_get_priority_max sys_sched_get_priority_max 160 common sched_get_priority_min sys_sched_get_priority_min -161 common sched_rr_get_interval sys_sched_rr_get_interval compat_sys_sched_rr_get_interval -162 common nanosleep sys_nanosleep compat_sys_nanosleep +161 32 sched_rr_get_interval sys_sched_rr_get_interval_time32 +161 64 sched_rr_get_interval sys_sched_rr_get_interval +162 32 nanosleep sys_nanosleep_time32 +162 64 nanosleep sys_nanosleep 163 common mremap sys_mremap 164 common setresuid sys_setresuid 165 common getresuid sys_getresuid @@ -187,7 +193,8 @@ 174 common rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction 175 common rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask 176 common rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending -177 common rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait +177 32 rt_sigtimedwait sys_rt_sigtimedwait_time32 compat_sys_rt_sigtimedwait_time32 +177 64 rt_sigtimedwait sys_rt_sigtimedwait 178 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo 179 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend 180 common chown sys_chown @@ -223,14 +230,16 @@ 207 64 readahead sys_readahead 208 common tkill sys_tkill 209 common sendfile64 sys_sendfile64 compat_sys_sendfile64 -210 common futex sys_futex compat_sys_futex +210 32 futex sys_futex_time32 +210 64 futex sys_futex 211 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity 212 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity # 213 was set_thread_area # 214 was get_thread_area 215 common io_setup sys_io_setup compat_sys_io_setup 216 common io_destroy sys_io_destroy -217 common io_getevents sys_io_getevents compat_sys_io_getevents +217 32 io_getevents sys_io_getevents_time32 +217 64 io_getevents sys_io_getevents 218 common io_submit sys_io_submit compat_sys_io_submit 219 common io_cancel sys_io_cancel # 220 was alloc_hugepages @@ -241,11 +250,14 @@ 225 common epoll_ctl sys_epoll_ctl 226 common epoll_wait sys_epoll_wait 227 common remap_file_pages sys_remap_file_pages -228 common semtimedop sys_semtimedop compat_sys_semtimedop +228 32 semtimedop sys_semtimedop_time32 +228 64 semtimedop sys_semtimedop 229 common mq_open sys_mq_open compat_sys_mq_open 230 common mq_unlink sys_mq_unlink -231 common mq_timedsend sys_mq_timedsend compat_sys_mq_timedsend -232 common mq_timedreceive sys_mq_timedreceive compat_sys_mq_timedreceive +231 32 mq_timedsend sys_mq_timedsend_time32 +231 64 mq_timedsend sys_mq_timedsend +232 32 mq_timedreceive sys_mq_timedreceive_time32 +232 64 mq_timedreceive sys_mq_timedreceive 233 common mq_notify sys_mq_notify compat_sys_mq_notify 234 common mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr 235 common waitid sys_waitid compat_sys_waitid @@ -265,14 +277,20 @@ 248 common lremovexattr sys_lremovexattr 249 common fremovexattr sys_fremovexattr 250 common timer_create sys_timer_create compat_sys_timer_create -251 common timer_settime sys_timer_settime compat_sys_timer_settime -252 common timer_gettime sys_timer_gettime compat_sys_timer_gettime +251 32 timer_settime sys_timer_settime32 +251 64 timer_settime sys_timer_settime +252 32 timer_gettime sys_timer_gettime32 +252 64 timer_gettime sys_timer_gettime 253 common timer_getoverrun sys_timer_getoverrun 254 common timer_delete sys_timer_delete -255 common clock_settime sys_clock_settime compat_sys_clock_settime -256 common clock_gettime sys_clock_gettime compat_sys_clock_gettime -257 common clock_getres sys_clock_getres compat_sys_clock_getres -258 common clock_nanosleep sys_clock_nanosleep compat_sys_clock_nanosleep +255 32 clock_settime sys_clock_settime32 +255 64 clock_settime sys_clock_settime +256 32 clock_gettime sys_clock_gettime32 +256 64 clock_gettime sys_clock_gettime +257 32 clock_getres sys_clock_getres_time32 +257 64 clock_getres sys_clock_getres +258 32 clock_nanosleep sys_clock_nanosleep_time32 +258 64 clock_nanosleep sys_clock_nanosleep 259 common tgkill sys_tgkill 260 common mbind sys_mbind compat_sys_mbind 261 common get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy @@ -287,13 +305,16 @@ 270 common inotify_add_watch sys_inotify_add_watch 271 common inotify_rm_watch sys_inotify_rm_watch 272 common migrate_pages sys_migrate_pages -273 common pselect6 sys_pselect6 compat_sys_pselect6 -274 common ppoll sys_ppoll compat_sys_ppoll +273 32 pselect6 sys_pselect6_time32 compat_sys_pselect6_time32 +273 64 pselect6 sys_pselect6 +274 32 ppoll sys_ppoll_time32 compat_sys_ppoll_time32 +274 64 ppoll sys_ppoll 275 common openat sys_openat compat_sys_openat 276 common mkdirat sys_mkdirat 277 common mknodat sys_mknodat 278 common fchownat sys_fchownat -279 common futimesat sys_futimesat compat_sys_futimesat +279 32 futimesat sys_futimesat_time32 +279 64 futimesat sys_futimesat 280 common fstatat64 sys_fstatat64 281 common unlinkat sys_unlinkat 282 common renameat sys_renameat @@ -316,15 +337,18 @@ 298 common statfs64 sys_statfs64 compat_sys_statfs64 299 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 300 common kexec_load sys_kexec_load compat_sys_kexec_load -301 common utimensat sys_utimensat compat_sys_utimensat +301 32 utimensat sys_utimensat_time32 +301 64 utimensat sys_utimensat 302 common signalfd sys_signalfd compat_sys_signalfd # 303 was timerfd 304 common eventfd sys_eventfd 305 32 fallocate parisc_fallocate 305 64 fallocate sys_fallocate 306 common timerfd_create sys_timerfd_create -307 common timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime -308 common timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime +307 32 timerfd_settime sys_timerfd_settime32 +307 64 timerfd_settime sys_timerfd_settime +308 32 timerfd_gettime sys_timerfd_gettime32 +308 64 timerfd_gettime sys_timerfd_gettime 309 common signalfd4 sys_signalfd4 compat_sys_signalfd4 310 common eventfd2 sys_eventfd2 311 common epoll_create1 sys_epoll_create1 @@ -335,12 +359,14 @@ 316 common pwritev sys_pwritev compat_sys_pwritev 317 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo 318 common perf_event_open sys_perf_event_open -319 common recvmmsg sys_recvmmsg compat_sys_recvmmsg +319 32 recvmmsg sys_recvmmsg_time32 compat_sys_recvmmsg_time32 +319 64 recvmmsg sys_recvmmsg 320 common accept4 sys_accept4 321 common prlimit64 sys_prlimit64 322 common fanotify_init sys_fanotify_init 323 common fanotify_mark sys_fanotify_mark sys32_fanotify_mark -324 common clock_adjtime sys_clock_adjtime compat_sys_clock_adjtime +324 32 clock_adjtime sys_clock_adjtime32 +324 64 clock_adjtime sys_clock_adjtime 325 common name_to_handle_at sys_name_to_handle_at 326 common open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at 327 common syncfs sys_syncfs @@ -352,7 +378,8 @@ 333 common finit_module sys_finit_module 334 common sched_setattr sys_sched_setattr 335 common sched_getattr sys_sched_getattr -336 common utimes sys_utimes compat_sys_utimes +336 32 utimes sys_utimes_time32 +336 64 utimes sys_utimes 337 common renameat2 sys_renameat2 338 common seccomp sys_seccomp 339 common getrandom sys_getrandom @@ -366,4 +393,30 @@ 347 common preadv2 sys_preadv2 compat_sys_preadv2 348 common pwritev2 sys_pwritev2 compat_sys_pwritev2 349 common statx sys_statx -350 common io_pgetevents sys_io_pgetevents compat_sys_io_pgetevents +350 32 io_pgetevents sys_io_pgetevents_time32 compat_sys_io_pgetevents +350 64 io_pgetevents sys_io_pgetevents +351 common pkey_mprotect sys_pkey_mprotect +352 common pkey_alloc sys_pkey_alloc +353 common pkey_free sys_pkey_free +354 common rseq sys_rseq +# 355 through 402 are unassigned to sync up with generic numbers +403 32 clock_gettime64 sys_clock_gettime sys_clock_gettime +404 32 clock_settime64 sys_clock_settime sys_clock_settime +405 32 clock_adjtime64 sys_clock_adjtime sys_clock_adjtime +406 32 clock_getres_time64 sys_clock_getres sys_clock_getres +407 32 clock_nanosleep_time64 sys_clock_nanosleep sys_clock_nanosleep +408 32 timer_gettime64 sys_timer_gettime sys_timer_gettime +409 32 timer_settime64 sys_timer_settime sys_timer_settime +410 32 timerfd_gettime64 sys_timerfd_gettime sys_timerfd_gettime +411 32 timerfd_settime64 sys_timerfd_settime sys_timerfd_settime +412 32 utimensat_time64 sys_utimensat sys_utimensat +413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64 +414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64 +416 32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents +417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64 +418 32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend +419 32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive +420 32 semtimedop_time64 sys_semtimedop sys_semtimedop +421 32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64 +422 32 futex_time64 sys_futex sys_futex +423 32 sched_rr_get_interval_time64 sys_sched_rr_get_interval sys_sched_rr_get_interval diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index 472a818e8c17..7e1ccafadf57 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -218,7 +218,7 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err) return; } - oops_in_progress = 1; + bust_spinlocks(1); oops_enter(); @@ -396,7 +396,7 @@ void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long o { static DEFINE_SPINLOCK(terminate_lock); - oops_in_progress = 1; + bust_spinlocks(1); set_eiem(0); local_irq_disable(); diff --git a/arch/parisc/lib/iomap.c b/arch/parisc/lib/iomap.c index 4b19e6e64fb7..0195aec657e2 100644 --- a/arch/parisc/lib/iomap.c +++ b/arch/parisc/lib/iomap.c @@ -48,11 +48,15 @@ struct iomap_ops { unsigned int (*read16be)(void __iomem *); unsigned int (*read32)(void __iomem *); unsigned int (*read32be)(void __iomem *); + u64 (*read64)(void __iomem *); + u64 (*read64be)(void __iomem *); void (*write8)(u8, void __iomem *); void (*write16)(u16, void __iomem *); void (*write16be)(u16, void __iomem *); void (*write32)(u32, void __iomem *); void (*write32be)(u32, void __iomem *); + void (*write64)(u64, void __iomem *); + void (*write64be)(u64, void __iomem *); void (*read8r)(void __iomem *, void *, unsigned long); void (*read16r)(void __iomem *, void *, unsigned long); void (*read32r)(void __iomem *, void *, unsigned long); @@ -171,6 +175,16 @@ static unsigned int iomem_read32be(void __iomem *addr) return __raw_readl(addr); } +static u64 iomem_read64(void __iomem *addr) +{ + return readq(addr); +} + +static u64 iomem_read64be(void __iomem *addr) +{ + return __raw_readq(addr); +} + static void iomem_write8(u8 datum, void __iomem *addr) { writeb(datum, addr); @@ -196,6 +210,16 @@ static void iomem_write32be(u32 datum, void __iomem *addr) __raw_writel(datum, addr); } +static void iomem_write64(u64 datum, void __iomem *addr) +{ + writel(datum, addr); +} + +static void iomem_write64be(u64 datum, void __iomem *addr) +{ + __raw_writel(datum, addr); +} + static void iomem_read8r(void __iomem *addr, void *dst, unsigned long count) { while (count--) { @@ -250,11 +274,15 @@ static const struct iomap_ops iomem_ops = { .read16be = iomem_read16be, .read32 = iomem_read32, .read32be = iomem_read32be, + .read64 = iomem_read64, + .read64be = iomem_read64be, .write8 = iomem_write8, .write16 = iomem_write16, .write16be = iomem_write16be, .write32 = iomem_write32, .write32be = iomem_write32be, + .write64 = iomem_write64, + .write64be = iomem_write64be, .read8r = iomem_read8r, .read16r = iomem_read16r, .read32r = iomem_read32r, @@ -304,6 +332,20 @@ unsigned int ioread32be(void __iomem *addr) return *((u32 *)addr); } +u64 ioread64(void __iomem *addr) +{ + if (unlikely(INDIRECT_ADDR(addr))) + return iomap_ops[ADDR_TO_REGION(addr)]->read64(addr); + return le64_to_cpup((u64 *)addr); +} + +u64 ioread64be(void __iomem *addr) +{ + if (unlikely(INDIRECT_ADDR(addr))) + return iomap_ops[ADDR_TO_REGION(addr)]->read64be(addr); + return *((u64 *)addr); +} + void iowrite8(u8 datum, void __iomem *addr) { if (unlikely(INDIRECT_ADDR(addr))) { @@ -349,6 +391,24 @@ void iowrite32be(u32 datum, void __iomem *addr) } } +void iowrite64(u64 datum, void __iomem *addr) +{ + if (unlikely(INDIRECT_ADDR(addr))) { + iomap_ops[ADDR_TO_REGION(addr)]->write64(datum, addr); + } else { + *((u64 *)addr) = cpu_to_le64(datum); + } +} + +void iowrite64be(u64 datum, void __iomem *addr) +{ + if (unlikely(INDIRECT_ADDR(addr))) { + iomap_ops[ADDR_TO_REGION(addr)]->write64be(datum, addr); + } else { + *((u64 *)addr) = datum; + } +} + /* Repeating interfaces */ void ioread8_rep(void __iomem *addr, void *dst, unsigned long count) @@ -449,11 +509,15 @@ EXPORT_SYMBOL(ioread16); EXPORT_SYMBOL(ioread16be); EXPORT_SYMBOL(ioread32); EXPORT_SYMBOL(ioread32be); +EXPORT_SYMBOL(ioread64); +EXPORT_SYMBOL(ioread64be); EXPORT_SYMBOL(iowrite8); EXPORT_SYMBOL(iowrite16); EXPORT_SYMBOL(iowrite16be); EXPORT_SYMBOL(iowrite32); EXPORT_SYMBOL(iowrite32be); +EXPORT_SYMBOL(iowrite64); +EXPORT_SYMBOL(iowrite64be); EXPORT_SYMBOL(ioread8_rep); EXPORT_SYMBOL(ioread16_rep); EXPORT_SYMBOL(ioread32_rep); diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 059187a3ded7..d0b166256f1a 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -79,36 +79,6 @@ static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly; physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly; int npmem_ranges __read_mostly; -/* - * get_memblock() allocates pages via memblock. - * We can't use memblock_find_in_range(0, KERNEL_INITIAL_SIZE) here since it - * doesn't allocate from bottom to top which is needed because we only created - * the initial mapping up to KERNEL_INITIAL_SIZE in the assembly bootup code. - */ -static void * __init get_memblock(unsigned long size) -{ - static phys_addr_t search_addr __initdata; - phys_addr_t phys; - - if (!search_addr) - search_addr = PAGE_ALIGN(__pa((unsigned long) &_end)); - search_addr = ALIGN(search_addr, size); - while (!memblock_is_region_memory(search_addr, size) || - memblock_is_region_reserved(search_addr, size)) { - search_addr += size; - } - phys = search_addr; - - if (phys) - memblock_reserve(phys, size); - else - panic("get_memblock() failed.\n"); - - memset(__va(phys), 0, size); - - return __va(phys); -} - #ifdef CONFIG_64BIT #define MAX_MEM (~0UL) #else /* !CONFIG_64BIT */ @@ -321,6 +291,13 @@ static void __init setup_bootmem(void) max_pfn = start_pfn + npages; } + /* + * We can't use memblock top-down allocations because we only + * created the initial mapping up to KERNEL_INITIAL_SIZE in + * the assembly bootup code. + */ + memblock_set_bottom_up(true); + /* IOMMU is always used to access "high mem" on those boxes * that can support enough mem that a PCI device couldn't * directly DMA to any physical addresses. @@ -442,7 +419,10 @@ static void __init map_pages(unsigned long start_vaddr, */ if (!pmd) { - pmd = (pmd_t *) get_memblock(PAGE_SIZE << PMD_ORDER); + pmd = memblock_alloc(PAGE_SIZE << PMD_ORDER, + PAGE_SIZE << PMD_ORDER); + if (!pmd) + panic("pmd allocation failed.\n"); pmd = (pmd_t *) __pa(pmd); } @@ -461,7 +441,10 @@ static void __init map_pages(unsigned long start_vaddr, pg_table = (pte_t *)pmd_address(*pmd); if (!pg_table) { - pg_table = (pte_t *) get_memblock(PAGE_SIZE); + pg_table = memblock_alloc(PAGE_SIZE, + PAGE_SIZE); + if (!pg_table) + panic("page table allocation failed\n"); pg_table = (pte_t *) __pa(pg_table); } @@ -700,7 +683,10 @@ static void __init pagetable_init(void) } #endif - empty_zero_page = get_memblock(PAGE_SIZE); + empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + if (!empty_zero_page) + panic("zero page allocation failed.\n"); + } static void __init gateway_init(void) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 2890d36eb531..2d0be82c3061 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -119,21 +119,19 @@ config GENERIC_HWEIGHT bool default y -config ARCH_HAS_DMA_SET_COHERENT_MASK - bool - config PPC bool default y # # Please keep this list sorted alphabetically. # + select ARCH_32BIT_OFF_T if PPC32 select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEVMEM_IS_ALLOWED - select ARCH_HAS_DMA_SET_COHERENT_MASK select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_GCOV_PROFILE_ALL + select ARCH_HAS_KCOV select ARCH_HAS_PHYS_TO_DMA select ARCH_HAS_PMEM_API if PPC64 select ARCH_HAS_PTE_SPECIAL @@ -178,6 +176,7 @@ config PPC select HAVE_ARCH_KGDB select HAVE_ARCH_MMAP_RND_BITS select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT + select HAVE_ARCH_NVRAM_OPS select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK select HAVE_CBPF_JIT if !PPC64 @@ -201,7 +200,7 @@ config PPC select HAVE_IOREMAP_PROT select HAVE_IRQ_EXIT_ON_IRQ_STACK select HAVE_KERNEL_GZIP - select HAVE_KERNEL_XZ if PPC_BOOK3S + select HAVE_KERNEL_XZ if PPC_BOOK3S || 44x select HAVE_KPROBES select HAVE_KPROBES_ON_FTRACE select HAVE_KRETPROBES @@ -220,7 +219,7 @@ config PPC select HAVE_PERF_USER_STACK_DUMP select HAVE_RCU_TABLE_FREE if SMP select HAVE_REGS_AND_STACK_ACCESS_API - select HAVE_RELIABLE_STACKTRACE if PPC64 && CPU_LITTLE_ENDIAN + select HAVE_RELIABLE_STACKTRACE if PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN select HAVE_SYSCALL_TRACEPOINTS select HAVE_VIRT_CPU_ACCOUNTING select HAVE_IRQ_TIME_ACCOUNTING @@ -233,7 +232,6 @@ config PPC select NEED_SG_DMA_LENGTH select OF select OF_EARLY_FLATTREE - select OF_RESERVED_MEM select OLD_SIGACTION if PPC32 select OLD_SIGSUSPEND select PCI_DOMAINS if PCI @@ -241,6 +239,7 @@ config PPC select RTC_LIB select SPARSE_IRQ select SYSCTL_EXCEPTION_TRACE + select THREAD_INFO_IN_TASK select VIRT_TO_BUS if !PPC64 # # Please keep this list sorted alphabetically. @@ -251,9 +250,6 @@ config PPC_BARRIER_NOSPEC default y depends on PPC_BOOK3S_64 || PPC_FSL_BOOK3E -config GENERIC_CSUM - def_bool n - config EARLY_PRINTK bool default y @@ -274,11 +270,6 @@ config SYSVIPC_COMPAT depends on COMPAT && SYSVIPC default y -# All PPC32s use generic nvram driver through ppc_md -config GENERIC_NVRAM - bool - default y if PPC32 - config SCHED_OMIT_FRAME_POINTER bool default y @@ -478,9 +469,6 @@ config ARCH_CPU_PROBE_RELEASE config ARCH_ENABLE_MEMORY_HOTPLUG def_bool y -config ARCH_HAS_WALK_MEMORY - def_bool y - config ARCH_ENABLE_MEMORY_HOTREMOVE def_bool y @@ -696,7 +684,7 @@ config PPC_16K_PAGES config PPC_64K_PAGES bool "64k page size" - depends on !PPC_FSL_BOOK3E && (44x || PPC_BOOK3S_64 || PPC_BOOK3E_64) + depends on 44x || PPC_BOOK3S_64 select HAVE_ARCH_SOFT_DIRTY if PPC_BOOK3S_64 config PPC_256K_PAGES @@ -714,6 +702,13 @@ config PPC_256K_PAGES endchoice +config PPC_PAGE_SHIFT + int + default 18 if PPC_256K_PAGES + default 16 if PPC_64K_PAGES + default 14 if PPC_16K_PAGES + default 12 + config THREAD_SHIFT int "Thread shift" if EXPERT range 13 15 @@ -724,6 +719,59 @@ config THREAD_SHIFT Used to define the stack size. The default is almost always what you want. Only change this if you know what you are doing. +config ETEXT_SHIFT_BOOL + bool "Set custom etext alignment" if STRICT_KERNEL_RWX && \ + (PPC_BOOK3S_32 || PPC_8xx) + depends on ADVANCED_OPTIONS + help + This option allows you to set the kernel end of text alignment. When + RAM is mapped by blocks, the alignment needs to fit the size and + number of possible blocks. The default should be OK for most configs. + + Say N here unless you know what you are doing. + +config ETEXT_SHIFT + int "_etext shift" if ETEXT_SHIFT_BOOL + range 17 28 if STRICT_KERNEL_RWX && PPC_BOOK3S_32 + range 19 23 if STRICT_KERNEL_RWX && PPC_8xx + default 17 if STRICT_KERNEL_RWX && PPC_BOOK3S_32 + default 19 if STRICT_KERNEL_RWX && PPC_8xx + default PPC_PAGE_SHIFT + help + On Book3S 32 (603+), IBATs are used to map kernel text. + Smaller is the alignment, greater is the number of necessary IBATs. + + On 8xx, large pages (512kb or 8M) are used to map kernel linear + memory. Aligning to 8M reduces TLB misses as only 8M pages are used + in that case. + +config DATA_SHIFT_BOOL + bool "Set custom data alignment" if STRICT_KERNEL_RWX && \ + (PPC_BOOK3S_32 || PPC_8xx) + depends on ADVANCED_OPTIONS + help + This option allows you to set the kernel data alignment. When + RAM is mapped by blocks, the alignment needs to fit the size and + number of possible blocks. The default should be OK for most configs. + + Say N here unless you know what you are doing. + +config DATA_SHIFT + int "Data shift" if DATA_SHIFT_BOOL + default 24 if STRICT_KERNEL_RWX && PPC64 + range 17 28 if STRICT_KERNEL_RWX && PPC_BOOK3S_32 + range 19 23 if STRICT_KERNEL_RWX && PPC_8xx + default 22 if STRICT_KERNEL_RWX && PPC_BOOK3S_32 + default 23 if STRICT_KERNEL_RWX && PPC_8xx + default PPC_PAGE_SHIFT + help + On Book3S 32 (603+), DBATs are used to map kernel text and rodata RO. + Smaller is the alignment, greater is the number of necessary DBATs. + + On 8xx, large pages (512kb or 8M) are used to map kernel linear + memory. Aligning to 8M reduces TLB misses as only 8M pages are used + in that case. + config FORCE_MAX_ZONEORDER int "Maximum zone order" range 8 9 if PPC64 && PPC_64K_PAGES @@ -890,6 +938,7 @@ config FSL_SOC config FSL_PCI bool + select ARCH_HAS_DMA_SET_MASK select PPC_INDIRECT_PCI select PCI_QUIRKS diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index f4961fbcb48d..4e00cb0a5464 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug @@ -361,10 +361,6 @@ config PPC_PTDUMP If you are unsure, say N. -config PPC_HTDUMP - def_bool y - depends on PPC_PTDUMP && PPC_BOOK3S_64 - config PPC_FAST_ENDIAN_SWITCH bool "Deprecated fast endian-switch syscall" depends on DEBUG_KERNEL && PPC_BOOK3S_64 diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 488c9edffa58..7de49889bd5d 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -213,9 +213,9 @@ endif asinstr := $(call as-instr,lis 9$(comma)foo@high,-DHAVE_AS_ATHIGH=1) KBUILD_CPPFLAGS += -Iarch/$(ARCH) $(asinstr) -KBUILD_AFLAGS += -Iarch/$(ARCH) $(AFLAGS-y) +KBUILD_AFLAGS += $(AFLAGS-y) KBUILD_CFLAGS += $(call cc-option,-msoft-float) -KBUILD_CFLAGS += -pipe -Iarch/$(ARCH) $(CFLAGS-y) +KBUILD_CFLAGS += -pipe $(CFLAGS-y) CPP = $(CC) -E $(KBUILD_CFLAGS) CHECKFLAGS += -m$(BITS) -D__powerpc__ -D__powerpc$(BITS)__ @@ -427,6 +427,13 @@ else endif endif +ifdef CONFIG_SMP +prepare: task_cpu_prepare + +task_cpu_prepare: prepare0 + $(eval KBUILD_CFLAGS += -D_TASK_CPU=$(shell awk '{if ($$2 == "TASK_CPU") print $$3;}' include/generated/asm-offsets.h)) +endif + # Check toolchain versions: # - gcc-4.6 is the minimum kernel-wide version so nothing required. checkbin: diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index 0e8dadd011bc..73d1f3562978 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -218,7 +218,7 @@ quiet_cmd_bootas = BOOTAS $@ cmd_bootas = $(BOOTCC) -Wp,-MD,$(depfile) $(BOOTAFLAGS) -c -o $@ $< quiet_cmd_bootar = BOOTAR $@ - cmd_bootar = $(BOOTAR) $(BOOTARFLAGS) $@.$$$$ $(filter-out FORCE,$^); mv $@.$$$$ $@ + cmd_bootar = $(BOOTAR) $(BOOTARFLAGS) $@.$$$$ $(real-prereqs); mv $@.$$$$ $@ $(obj-libfdt): $(obj)/%.o: $(srctree)/scripts/dtc/libfdt/%.c FORCE $(call if_changed_dep,bootcc) diff --git a/arch/powerpc/boot/dts/Makefile b/arch/powerpc/boot/dts/Makefile index fb335d05aae8..1cbc0e4ce857 100644 --- a/arch/powerpc/boot/dts/Makefile +++ b/arch/powerpc/boot/dts/Makefile @@ -4,3 +4,4 @@ subdir-y += fsl dtstree := $(srctree)/$(src) dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts)) +dtb-$(CONFIG_XILINX_VIRTEX440_GENERIC_BOARD) += virtex440-ml507.dtb virtex440-ml510.dtb diff --git a/arch/powerpc/boot/dts/akebono.dts b/arch/powerpc/boot/dts/akebono.dts index 8a7a10139bc9..cd9d66041a3f 100644 --- a/arch/powerpc/boot/dts/akebono.dts +++ b/arch/powerpc/boot/dts/akebono.dts @@ -40,7 +40,7 @@ d-cache-size = <32768>; dcr-controller; dcr-access-method = "native"; - status = "ok"; + status = "okay"; }; cpu@1 { device_type = "cpu"; diff --git a/arch/powerpc/boot/dts/bluestone.dts b/arch/powerpc/boot/dts/bluestone.dts index b0b26d8d68a2..64eaf7e09d22 100644 --- a/arch/powerpc/boot/dts/bluestone.dts +++ b/arch/powerpc/boot/dts/bluestone.dts @@ -109,7 +109,7 @@ OCM: ocm@400040000 { compatible = "ibm,ocm"; - status = "ok"; + status = "okay"; cell-index = <1>; /* configured in U-Boot */ reg = <4 0x00040000 0x8000>; /* 32K */ diff --git a/arch/powerpc/boot/dts/currituck.dts b/arch/powerpc/boot/dts/currituck.dts index a04a4fcfde63..b6d87b9c2cef 100644 --- a/arch/powerpc/boot/dts/currituck.dts +++ b/arch/powerpc/boot/dts/currituck.dts @@ -39,7 +39,7 @@ d-cache-size = <32768>; dcr-controller; dcr-access-method = "native"; - status = "ok"; + status = "okay"; }; cpu@1 { device_type = "cpu"; diff --git a/arch/powerpc/boot/dts/iss4xx-mpic.dts b/arch/powerpc/boot/dts/iss4xx-mpic.dts index f7063198b2dc..c9f90f1a9c8e 100644 --- a/arch/powerpc/boot/dts/iss4xx-mpic.dts +++ b/arch/powerpc/boot/dts/iss4xx-mpic.dts @@ -43,7 +43,7 @@ d-cache-size = <32768>; dcr-controller; dcr-access-method = "native"; - status = "ok"; + status = "okay"; }; cpu@1 { device_type = "cpu"; diff --git a/arch/powerpc/boot/dts/wii.dts b/arch/powerpc/boot/dts/wii.dts index 104b1d6d5695..c406bdb4f36f 100644 --- a/arch/powerpc/boot/dts/wii.dts +++ b/arch/powerpc/boot/dts/wii.dts @@ -14,6 +14,7 @@ /dts-v1/; #include +#include /* * This is commented-out for now. @@ -187,6 +188,11 @@ "DEBUG0", "DEBUG1", "DEBUG2", "DEBUG3", "DEBUG4", "DEBUG5", "DEBUG6", "DEBUG7"; + interrupt-controller; + #interrupt-cells = <2>; + interrupts = <10>; + interrupt-parent = <&PIC1>; + /* * This is commented out while a standard binding * for i2c over gpio is defined. @@ -235,5 +241,21 @@ panic-indicator; }; }; + + gpio-keys { + compatible = "gpio-keys"; + + power { + label = "Power Button"; + gpios = <&GPIO 0 GPIO_ACTIVE_HIGH>; + linux,code = ; + }; + + eject { + label = "Eject Button"; + gpios = <&GPIO 6 GPIO_ACTIVE_HIGH>; + linux,code = ; + }; + }; }; diff --git a/arch/powerpc/configs/mpc512x_defconfig b/arch/powerpc/configs/mpc512x_defconfig index c2b1c4404683..e4bfb1101c0e 100644 --- a/arch/powerpc/configs/mpc512x_defconfig +++ b/arch/powerpc/configs/mpc512x_defconfig @@ -120,6 +120,5 @@ CONFIG_NFS_FS=y CONFIG_ROOT_NFS=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set # CONFIG_CRYPTO_HW is not set diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig index 53687c3a70c4..7c6baf6df139 100644 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig @@ -1123,7 +1123,6 @@ CONFIG_NLS_ISO8859_15=m CONFIG_NLS_KOI8_R=m CONFIG_NLS_KOI8_U=m CONFIG_DEBUG_INFO=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set CONFIG_UNUSED_SYMBOLS=y CONFIG_HEADERS_CHECK=y CONFIG_MAGIC_SYSRQ=y diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index 1d911f68a23b..296584e6dd55 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h @@ -23,8 +23,8 @@ #include /* SMP */ -extern struct thread_info *current_set[NR_CPUS]; -extern struct thread_info *secondary_ti; +extern struct task_struct *current_set[NR_CPUS]; +extern struct task_struct *secondary_current; void start_secondary(void *unused); /* kexec */ @@ -37,13 +37,11 @@ void kexec_copy_flush(struct kimage *image); extern struct static_key hcall_tracepoint_key; void __trace_hcall_entry(unsigned long opcode, unsigned long *args); void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf); -/* OPAL tracing */ -#ifdef CONFIG_JUMP_LABEL -extern struct static_key opal_tracepoint_key; -#endif -void __trace_opal_entry(unsigned long opcode, unsigned long *args); -void __trace_opal_exit(long opcode, unsigned long retval); +/* OPAL */ +int64_t __opal_call(int64_t a0, int64_t a1, int64_t a2, int64_t a3, + int64_t a4, int64_t a5, int64_t a6, int64_t a7, + int64_t opcode, uint64_t msr); /* VMX copying */ int enter_vmx_usercopy(void); diff --git a/arch/powerpc/include/asm/book3s/32/mmu-hash.h b/arch/powerpc/include/asm/book3s/32/mmu-hash.h index 0c261ba2c826..5cb588395fdc 100644 --- a/arch/powerpc/include/asm/book3s/32/mmu-hash.h +++ b/arch/powerpc/include/asm/book3s/32/mmu-hash.h @@ -92,6 +92,8 @@ typedef struct { unsigned long vdso_base; } mm_context_t; +void update_bats(void); + /* patch sites */ extern s32 patch__hash_page_A0, patch__hash_page_A1, patch__hash_page_A2; extern s32 patch__hash_page_B, patch__hash_page_C; diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 49d76adb9bc5..aa8406b8f7ba 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -174,7 +174,18 @@ static inline bool pte_user(pte_t pte) * of RAM. -- Cort */ #define VMALLOC_OFFSET (0x1000000) /* 16M */ + +/* + * With CONFIG_STRICT_KERNEL_RWX, kernel segments are set NX. But when modules + * are used, NX cannot be set on VMALLOC space. So vmalloc VM space and linear + * memory shall not share segments. + */ +#if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_MODULES) +#define VMALLOC_START ((_ALIGN((long)high_memory, 256L << 20) + VMALLOC_OFFSET) & \ + ~(VMALLOC_OFFSET - 1)) +#else #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) +#endif #define VMALLOC_END ioremap_bot #ifndef __ASSEMBLY__ diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h index 247aff9cc6ba..54b7af6cd27f 100644 --- a/arch/powerpc/include/asm/book3s/64/hash.h +++ b/arch/powerpc/include/asm/book3s/64/hash.h @@ -40,22 +40,36 @@ #else #define H_PUD_CACHE_INDEX (H_PUD_INDEX_SIZE) #endif + /* - * Define the address range of the kernel non-linear virtual area + * Define the address range of the kernel non-linear virtual area. In contrast + * to the linear mapping, this is managed using the kernel page tables and then + * inserted into the hash page table to actually take effect, similarly to user + * mappings. */ #define H_KERN_VIRT_START ASM_CONST(0xD000000000000000) -#define H_KERN_VIRT_SIZE ASM_CONST(0x0000400000000000) /* 64T */ /* - * The vmalloc space starts at the beginning of that region, and - * occupies half of it on hash CPUs and a quarter of it on Book3E - * (we keep a quarter for the virtual memmap) + * Allow virtual mapping of one context size. + * 512TB for 64K page size + * 64TB for 4K page size + */ +#define H_KERN_VIRT_SIZE (1UL << MAX_EA_BITS_PER_CONTEXT) + +/* + * 8TB IO mapping size + */ +#define H_KERN_IO_SIZE ASM_CONST(0x80000000000) /* 8T */ + +/* + * The vmalloc space starts at the beginning of the kernel non-linear virtual + * region, and occupies 504T (64K) or 56T (4K) */ -#define H_VMALLOC_START H_KERN_VIRT_START -#define H_VMALLOC_SIZE ASM_CONST(0x380000000000) /* 56T */ -#define H_VMALLOC_END (H_VMALLOC_START + H_VMALLOC_SIZE) +#define H_VMALLOC_START H_KERN_VIRT_START +#define H_VMALLOC_SIZE (H_KERN_VIRT_SIZE - H_KERN_IO_SIZE) +#define H_VMALLOC_END (H_VMALLOC_START + H_VMALLOC_SIZE) -#define H_KERN_IO_START H_VMALLOC_END +#define H_KERN_IO_START H_VMALLOC_END /* * Region IDs diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h index 5b0177733994..66c1e4f88d65 100644 --- a/arch/powerpc/include/asm/book3s/64/hugetlb.h +++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h @@ -13,6 +13,10 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); +extern void radix__huge_ptep_modify_prot_commit(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t old_pte, pte_t pte); + static inline int hstate_get_psize(struct hstate *hstate) { unsigned long shift; @@ -42,4 +46,12 @@ static inline bool gigantic_page_supported(void) /* hugepd entry valid bit */ #define HUGEPD_VAL_BITS (0x8000000000000000UL) +#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start +extern pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep); + +#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit +extern void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t old_pte, pte_t new_pte); #endif diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h index 12e522807f9f..a28a28079edb 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h +++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h @@ -23,7 +23,7 @@ */ #include #include -#include +#include #include /* diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h index 9c1173283b96..138bc2ecc0c4 100644 --- a/arch/powerpc/include/asm/book3s/64/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h @@ -111,7 +111,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) { - pgd_set(pgd, __pgtable_ptr_val(pud) | PGD_VAL_BITS); + *pgd = __pgd(__pgtable_ptr_val(pud) | PGD_VAL_BITS); } static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) @@ -138,7 +138,7 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud) static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) { - pud_set(pud, __pgtable_ptr_val(pmd) | PUD_VAL_BITS); + *pud = __pud(__pgtable_ptr_val(pmd) | PUD_VAL_BITS); } static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, @@ -176,13 +176,13 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) { - pmd_set(pmd, __pgtable_ptr_val(pte) | PMD_VAL_BITS); + *pmd = __pmd(__pgtable_ptr_val(pte) | PMD_VAL_BITS); } static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte_page) { - pmd_set(pmd, __pgtable_ptr_val(pte_page) | PMD_VAL_BITS); + *pmd = __pmd(__pgtable_ptr_val(pte_page) | PMD_VAL_BITS); } static inline pgtable_t pmd_pgtable(pmd_t pmd) diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 2e6ada28da64..581f91be9dd4 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -811,7 +811,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, return hash__set_pte_at(mm, addr, ptep, pte, percpu); } -#define _PAGE_CACHE_CTL (_PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT) +#define _PAGE_CACHE_CTL (_PAGE_SAO | _PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT) #define pgprot_noncached pgprot_noncached static inline pgprot_t pgprot_noncached(pgprot_t prot) @@ -851,11 +851,6 @@ static inline bool pte_ci(pte_t pte) return false; } -static inline void pmd_set(pmd_t *pmdp, unsigned long val) -{ - *pmdp = __pmd(val); -} - static inline void pmd_clear(pmd_t *pmdp) { *pmdp = __pmd(0); @@ -887,11 +882,6 @@ static inline int pmd_bad(pmd_t pmd) return hash__pmd_bad(pmd); } -static inline void pud_set(pud_t *pudp, unsigned long val) -{ - *pudp = __pud(val); -} - static inline void pud_clear(pud_t *pudp) { *pudp = __pud(0); @@ -904,7 +894,7 @@ static inline int pud_none(pud_t pud) static inline int pud_present(pud_t pud) { - return (pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT)); + return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT)); } extern struct page *pud_page(pud_t pud); @@ -934,10 +924,6 @@ static inline bool pud_access_permitted(pud_t pud, bool write) } #define pgd_write(pgd) pte_write(pgd_pte(pgd)) -static inline void pgd_set(pgd_t *pgdp, unsigned long val) -{ - *pgdp = __pgd(val); -} static inline void pgd_clear(pgd_t *pgdp) { @@ -951,7 +937,7 @@ static inline int pgd_none(pgd_t pgd) static inline int pgd_present(pgd_t pgd) { - return (pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT)); + return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT)); } static inline pte_t pgd_pte(pgd_t pgd) @@ -1258,21 +1244,13 @@ extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, #define pmd_move_must_withdraw pmd_move_must_withdraw struct spinlock; -static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, - struct spinlock *old_pmd_ptl, - struct vm_area_struct *vma) -{ - if (radix_enabled()) - return false; - /* - * Archs like ppc64 use pgtable to store per pmd - * specific information. So when we switch the pmd, - * we should also withdraw and deposit the pgtable - */ - return true; -} - - +extern int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, + struct spinlock *old_pmd_ptl, + struct vm_area_struct *vma); +/* + * Hash translation mode use the deposited table to store hash pte + * slot information. + */ #define arch_needs_pgtable_deposit arch_needs_pgtable_deposit static inline bool arch_needs_pgtable_deposit(void) { @@ -1314,6 +1292,24 @@ static inline int pud_pfn(pud_t pud) BUILD_BUG(); return 0; } +#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION +pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *); +void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long, + pte_t *, pte_t, pte_t); + +/* + * Returns true for a R -> RW upgrade of pte + */ +static inline bool is_pte_rw_upgrade(unsigned long old_val, unsigned long new_val) +{ + if (!(old_val & _PAGE_READ)) + return false; + + if ((!(old_val & _PAGE_WRITE)) && (new_val & _PAGE_WRITE)) + return true; + + return false; +} #endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */ diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h index 7d1a3d1543fc..5ab134eeed20 100644 --- a/arch/powerpc/include/asm/book3s/64/radix.h +++ b/arch/powerpc/include/asm/book3s/64/radix.h @@ -127,6 +127,10 @@ extern void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep pte_t entry, unsigned long address, int psize); +extern void radix__ptep_modify_prot_commit(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t old_pte, pte_t pte); + static inline unsigned long __radix_pte_update(pte_t *ptep, unsigned long clr, unsigned long set) { diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h index 671316f9e95d..05147cecb8df 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h @@ -13,8 +13,32 @@ static inline int mmu_get_ap(int psize) #ifdef CONFIG_PPC_RADIX_MMU extern void radix__tlbiel_all(unsigned int action); +extern void radix__flush_tlb_lpid_page(unsigned int lpid, + unsigned long addr, + unsigned long page_size); +extern void radix__flush_pwc_lpid(unsigned int lpid); +extern void radix__flush_tlb_lpid(unsigned int lpid); +extern void radix__local_flush_tlb_lpid_guest(unsigned int lpid); #else static inline void radix__tlbiel_all(unsigned int action) { WARN_ON(1); }; +static inline void radix__flush_tlb_lpid_page(unsigned int lpid, + unsigned long addr, + unsigned long page_size) +{ + WARN_ON(1); +} +static inline void radix__flush_pwc_lpid(unsigned int lpid) +{ + WARN_ON(1); +} +static inline void radix__flush_tlb_lpid(unsigned int lpid) +{ + WARN_ON(1); +} +static inline void radix__local_flush_tlb_lpid_guest(unsigned int lpid) +{ + WARN_ON(1); +} #endif extern void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma, @@ -49,12 +73,6 @@ extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr); extern void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr); extern void radix__flush_tlb_all(void); -extern void radix__flush_tlb_lpid_page(unsigned int lpid, - unsigned long addr, - unsigned long page_size); -extern void radix__flush_pwc_lpid(unsigned int lpid); -extern void radix__flush_tlb_lpid(unsigned int lpid); extern void radix__local_flush_tlb_lpid(unsigned int lpid); -extern void radix__local_flush_tlb_lpid_guest(unsigned int lpid); #endif diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h index a78a57e5058d..72a65d744a28 100644 --- a/arch/powerpc/include/asm/checksum.h +++ b/arch/powerpc/include/asm/checksum.h @@ -9,9 +9,6 @@ * 2 of the License, or (at your option) any later version. */ -#ifdef CONFIG_GENERIC_CSUM -#include -#else #include #include /* @@ -217,6 +214,5 @@ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, __u32 len, __u8 proto, __wsum sum); -#endif #endif /* __KERNEL__ */ #endif diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h index 0245bfcaac32..a130be13ee83 100644 --- a/arch/powerpc/include/asm/device.h +++ b/arch/powerpc/include/asm/device.h @@ -19,6 +19,11 @@ struct iommu_table; * drivers/macintosh/macio_asic.c */ struct dev_archdata { + /* + * Set to %true if the dma_iommu_ops are requested to use a direct + * window instead of dynamically mapping memory. + */ + bool iommu_bypass : 1; /* * These two used to be a union. However, with the hybrid ops we need * both so here we store both a DMA offset for direct mappings and @@ -33,9 +38,6 @@ struct dev_archdata { #ifdef CONFIG_IOMMU_API void *iommu_domain; #endif -#ifdef CONFIG_SWIOTLB - dma_addr_t max_direct_dma_addr; -#endif #ifdef CONFIG_PPC64 struct pci_dn *pci_data; #endif @@ -54,6 +56,4 @@ struct pdev_archdata { u64 dma_mask; }; -#define ARCH_HAS_DMA_GET_REQUIRED_MASK - #endif /* _ASM_POWERPC_DEVICE_H */ diff --git a/arch/powerpc/include/asm/dma-direct.h b/arch/powerpc/include/asm/dma-direct.h index 7702875aabb7..a2912b47102c 100644 --- a/arch/powerpc/include/asm/dma-direct.h +++ b/arch/powerpc/include/asm/dma-direct.h @@ -4,26 +4,24 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) { -#ifdef CONFIG_SWIOTLB - struct dev_archdata *sd = &dev->archdata; - - if (sd->max_direct_dma_addr && addr + size > sd->max_direct_dma_addr) - return false; -#endif - if (!dev->dma_mask) return false; - return addr + size - 1 <= *dev->dma_mask; + return addr + size - 1 <= + min_not_zero(*dev->dma_mask, dev->bus_dma_mask); } static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr) { - return paddr + get_dma_offset(dev); + if (!dev) + return paddr + PCI_DRAM_OFFSET; + return paddr + dev->archdata.dma_offset; } static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr) { - return daddr - get_dma_offset(dev); + if (!dev) + return daddr - PCI_DRAM_OFFSET; + return daddr - dev->archdata.dma_offset; } #endif /* ASM_POWERPC_DMA_DIRECT_H */ diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index ebf66809f2d3..565d6f74b189 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -1,74 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2004 IBM - * - * Implements the generic device dma API for powerpc. - * the pci and vio busses */ #ifndef _ASM_DMA_MAPPING_H #define _ASM_DMA_MAPPING_H -#ifdef __KERNEL__ - -#include -#include -/* need struct page definitions */ -#include -#include -#include -#include -#include - -/* Some dma direct funcs must be visible for use in other dma_ops */ -extern void *__dma_nommu_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag, - unsigned long attrs); -extern void __dma_nommu_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle, - unsigned long attrs); -extern int dma_nommu_mmap_coherent(struct device *dev, - struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t handle, - size_t size, unsigned long attrs); - -#ifdef CONFIG_NOT_COHERENT_CACHE -/* - * DMA-consistent mapping functions for PowerPCs that don't support - * cache snooping. These allocate/free a region of uncached mapped - * memory space for use with DMA devices. Alternatively, you could - * allocate the space "normally" and use the cache management functions - * to ensure it is consistent. - */ -struct device; -extern void __dma_sync(void *vaddr, size_t size, int direction); -extern void __dma_sync_page(struct page *page, unsigned long offset, - size_t size, int direction); -extern unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr); - -#else /* ! CONFIG_NOT_COHERENT_CACHE */ -/* - * Cache coherent cores. - */ - -#define __dma_sync(addr, size, rw) ((void)0) -#define __dma_sync_page(pg, off, sz, rw) ((void)0) - -#endif /* ! CONFIG_NOT_COHERENT_CACHE */ - -static inline unsigned long device_to_mask(struct device *dev) -{ - if (dev->dma_mask && *dev->dma_mask) - return *dev->dma_mask; - /* Assume devices without mask can take 32 bit addresses */ - return 0xfffffffful; -} - -/* - * Available generic sets of operations - */ -#ifdef CONFIG_PPC64 -extern struct dma_map_ops dma_iommu_ops; -#endif -extern const struct dma_map_ops dma_nommu_ops; static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { @@ -80,31 +15,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) return NULL; } -/* - * get_dma_offset() - * - * Get the dma offset on configurations where the dma address can be determined - * from the physical address by looking at a simple offset. Direct dma and - * swiotlb use this function, but it is typically not used by implementations - * with an iommu. - */ -static inline dma_addr_t get_dma_offset(struct device *dev) -{ - if (dev) - return dev->archdata.dma_offset; - - return PCI_DRAM_OFFSET; -} - -static inline void set_dma_offset(struct device *dev, dma_addr_t off) -{ - if (dev) - dev->archdata.dma_offset = off; -} - -#define HAVE_ARCH_DMA_SET_MASK 1 - -extern u64 __dma_get_required_mask(struct device *dev); - -#endif /* __KERNEL__ */ #endif /* _ASM_DMA_MAPPING_H */ diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index 8b596d096ebe..94cfcf33030a 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h @@ -219,7 +219,8 @@ struct eeh_ops { }; extern int eeh_subsystem_flags; -extern int eeh_max_freezes; +extern u32 eeh_max_freezes; +extern bool eeh_debugfs_no_recover; extern struct eeh_ops *eeh_ops; extern raw_spinlock_t confirm_error_lock; @@ -293,14 +294,14 @@ void eeh_add_device_late(struct pci_dev *); void eeh_add_device_tree_late(struct pci_bus *); void eeh_add_sysfs_files(struct pci_bus *); void eeh_remove_device(struct pci_dev *); -int eeh_unfreeze_pe(struct eeh_pe *pe, bool sw_state); +int eeh_unfreeze_pe(struct eeh_pe *pe); int eeh_pe_reset_and_recover(struct eeh_pe *pe); int eeh_dev_open(struct pci_dev *pdev); void eeh_dev_release(struct pci_dev *pdev); struct eeh_pe *eeh_iommu_group_to_pe(struct iommu_group *group); int eeh_pe_set_option(struct eeh_pe *pe, int option); int eeh_pe_get_state(struct eeh_pe *pe); -int eeh_pe_reset(struct eeh_pe *pe, int option); +int eeh_pe_reset(struct eeh_pe *pe, int option, bool include_passed); int eeh_pe_configure(struct eeh_pe *pe); int eeh_pe_inject_err(struct eeh_pe *pe, int type, int func, unsigned long addr, unsigned long mask); @@ -460,6 +461,9 @@ static inline void eeh_readsl(const volatile void __iomem *addr, void * buf, eeh_check_failure(addr); } + +void eeh_cache_debugfs_init(void); + #endif /* CONFIG_PPC64 */ #endif /* __KERNEL__ */ #endif /* _POWERPC_EEH_H */ diff --git a/arch/powerpc/include/asm/eeh_event.h b/arch/powerpc/include/asm/eeh_event.h index 9884e872686f..6d0412b846ac 100644 --- a/arch/powerpc/include/asm/eeh_event.h +++ b/arch/powerpc/include/asm/eeh_event.h @@ -33,6 +33,7 @@ struct eeh_event { int eeh_event_init(void); int eeh_send_failure_event(struct eeh_pe *pe); +int __eeh_send_failure_event(struct eeh_pe *pe); void eeh_remove_event(struct eeh_pe *pe, bool force); void eeh_handle_normal_event(struct eeh_pe *pe); void eeh_handle_special_event(void); diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 3b4767ed3ec5..937bb630093f 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -671,7 +671,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) #define RUNLATCH_ON \ BEGIN_FTR_SECTION \ - CURRENT_THREAD_INFO(r3, r1); \ + ld r3, PACA_THREAD_INFO(r13); \ ld r4,TI_LOCAL_FLAGS(r3); \ andi. r0,r4,_TLF_RUNLATCH; \ beql ppc64_runlatch_on_trampoline; \ @@ -721,7 +721,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CTRL) #ifdef CONFIG_PPC_970_NAP #define FINISH_NAP \ BEGIN_FTR_SECTION \ - CURRENT_THREAD_INFO(r11, r1); \ + ld r11, PACA_THREAD_INFO(r13); \ ld r9,TI_LOCAL_FLAGS(r11); \ andi. r10,r9,_TLF_NAPPING; \ bnel power4_fixup_nap; \ diff --git a/arch/powerpc/include/asm/hvsi.h b/arch/powerpc/include/asm/hvsi.h index 3fdc54df63c9..464a7519ed64 100644 --- a/arch/powerpc/include/asm/hvsi.h +++ b/arch/powerpc/include/asm/hvsi.h @@ -64,7 +64,7 @@ struct hvsi_priv { unsigned int inbuf_len; /* data in input buffer */ unsigned char inbuf[HVSI_INBUF_SIZE]; unsigned int inbuf_cur; /* Cursor in input buffer */ - unsigned int inbuf_pktlen; /* packet lenght from cursor */ + unsigned int inbuf_pktlen; /* packet length from cursor */ atomic_t seqno; /* packet sequence number */ unsigned int opened:1; /* driver opened */ unsigned int established:1; /* protocol established */ diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index 7f19fbd3ba55..4b73847e9b95 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -783,8 +783,10 @@ extern void __iounmap_at(void *ea, unsigned long size); #define mmio_read16be(addr) readw_be(addr) #define mmio_read32be(addr) readl_be(addr) +#define mmio_read64be(addr) readq_be(addr) #define mmio_write16be(val, addr) writew_be(val, addr) #define mmio_write32be(val, addr) writel_be(val, addr) +#define mmio_write64be(val, addr) writeq_be(val, addr) #define mmio_insb(addr, dst, count) readsb(addr, dst, count) #define mmio_insw(addr, dst, count) readsw(addr, dst, count) #define mmio_insl(addr, dst, count) readsl(addr, dst, count) diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index 17524d222a7b..0ac52392ed99 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -237,6 +237,7 @@ static inline void iommu_del_device(struct device *dev) } #endif /* !CONFIG_IOMMU_API */ +u64 dma_iommu_get_required_mask(struct device *dev); #else static inline void *get_iommu_table_base(struct device *dev) @@ -318,5 +319,21 @@ extern void iommu_release_ownership(struct iommu_table *tbl); extern enum dma_data_direction iommu_tce_direction(unsigned long tce); extern unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir); +#ifdef CONFIG_PPC_CELL_NATIVE +extern bool iommu_fixed_is_weak; +#else +#define iommu_fixed_is_weak false +#endif + +extern const struct dma_map_ops dma_iommu_ops; + +static inline unsigned long device_to_mask(struct device *dev) +{ + if (dev->dma_mask && *dev->dma_mask) + return *dev->dma_mask; + /* Assume devices without mask can take 32 bit addresses */ + return 0xfffffffful; +} + #endif /* __KERNEL__ */ #endif /* _ASM_IOMMU_H */ diff --git a/arch/powerpc/include/asm/ipic.h b/arch/powerpc/include/asm/ipic.h index 3dbd47f2bffe..abad50a745db 100644 --- a/arch/powerpc/include/asm/ipic.h +++ b/arch/powerpc/include/asm/ipic.h @@ -69,10 +69,7 @@ enum ipic_mcp_irq { IPIC_MCP_MU = 7, }; -extern void ipic_set_highest_priority(unsigned int irq); extern void ipic_set_default_priority(void); -extern void ipic_enable_mcp(enum ipic_mcp_irq mcp_irq); -extern void ipic_disable_mcp(enum ipic_mcp_irq mcp_irq); extern u32 ipic_get_mcp_status(void); extern void ipic_clear_mcp_status(u32 mask); diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h index ee39ce56b2a2..c91a60cda4fa 100644 --- a/arch/powerpc/include/asm/irq.h +++ b/arch/powerpc/include/asm/irq.h @@ -48,23 +48,19 @@ struct pt_regs; * Per-cpu stacks for handling critical, debug and machine check * level interrupts. */ -extern struct thread_info *critirq_ctx[NR_CPUS]; -extern struct thread_info *dbgirq_ctx[NR_CPUS]; -extern struct thread_info *mcheckirq_ctx[NR_CPUS]; -extern void exc_lvl_ctx_init(void); -#else -#define exc_lvl_ctx_init() +extern void *critirq_ctx[NR_CPUS]; +extern void *dbgirq_ctx[NR_CPUS]; +extern void *mcheckirq_ctx[NR_CPUS]; #endif /* * Per-cpu stacks for handling hard and soft interrupts. */ -extern struct thread_info *hardirq_ctx[NR_CPUS]; -extern struct thread_info *softirq_ctx[NR_CPUS]; +extern void *hardirq_ctx[NR_CPUS]; +extern void *softirq_ctx[NR_CPUS]; -extern void irq_ctx_init(void); -extern void call_do_softirq(struct thread_info *tp); -extern void call_do_irq(struct pt_regs *regs, struct thread_info *tp); +void call_do_softirq(void *sp); +void call_do_irq(struct pt_regs *regs, void *sp); extern void do_IRQ(struct pt_regs *regs); extern void __init init_IRQ(void); extern void __do_irq(struct pt_regs *regs); diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index eb0d79f0ca45..a6c8548ed9fa 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -141,6 +141,7 @@ extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu); extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu); extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu); +extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags); extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags); extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu); extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu); @@ -632,7 +633,7 @@ long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target, unsigned int yield_count); long kvmppc_h_random(struct kvm_vcpu *vcpu); void kvmhv_commence_exit(int trap); -long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu); +void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu); void kvmppc_subcore_enter_guest(void); void kvmppc_subcore_exit_guest(void); long kvmppc_realmode_hmi_handler(void); diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h index 47a03b9b528b..5070df19d463 100644 --- a/arch/powerpc/include/asm/livepatch.h +++ b/arch/powerpc/include/asm/livepatch.h @@ -21,6 +21,7 @@ #include #include +#include #ifdef CONFIG_LIVEPATCH static inline int klp_check_compiler_support(void) @@ -43,13 +44,13 @@ static inline unsigned long klp_get_ftrace_location(unsigned long faddr) return ftrace_location_range(faddr, faddr + 16); } -static inline void klp_init_thread_info(struct thread_info *ti) +static inline void klp_init_thread_info(struct task_struct *p) { /* + 1 to account for STACK_END_MAGIC */ - ti->livepatch_sp = (unsigned long *)(ti + 1) + 1; + task_thread_info(p)->livepatch_sp = end_of_stack(p) + 1; } #else -static void klp_init_thread_info(struct thread_info *ti) { } +static inline void klp_init_thread_info(struct task_struct *p) { } #endif /* CONFIG_LIVEPATCH */ #endif /* _ASM_POWERPC_LIVEPATCH_H */ diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 8311869005fa..2f0ca6560e47 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -47,9 +47,7 @@ struct machdep_calls { #endif #endif /* CONFIG_PPC64 */ - /* Platform set_dma_mask and dma_get_required_mask overrides */ - int (*dma_set_mask)(struct device *dev, u64 dma_mask); - u64 (*dma_get_required_mask)(struct device *dev); + void (*dma_set_mask)(struct device *dev, u64 dma_mask); int (*probe)(void); void (*setup_arch)(void); /* Optional, may be NULL */ diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h index a8b8903e1844..17996bc9382b 100644 --- a/arch/powerpc/include/asm/mce.h +++ b/arch/powerpc/include/asm/mce.h @@ -209,7 +209,7 @@ extern int get_mce_event(struct machine_check_event *mce, bool release); extern void release_mce_event(void); extern void machine_check_queue_event(void); extern void machine_check_print_event_info(struct machine_check_event *evt, - bool user_mode); + bool user_mode, bool in_guest); #ifdef CONFIG_PPC_BOOK3S_64 void flush_and_reload_slb(void); #endif /* CONFIG_PPC_BOOK3S_64 */ diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 25607604a7a5..d34ad1657d7b 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -289,6 +289,17 @@ static inline u16 get_mm_addr_key(struct mm_struct *mm, unsigned long address) } #endif /* CONFIG_PPC_MEM_KEYS */ +#ifdef CONFIG_STRICT_KERNEL_RWX +static inline bool strict_kernel_rwx_enabled(void) +{ + return rodata_enabled; +} +#else +static inline bool strict_kernel_rwx_enabled(void) +{ + return false; +} +#endif #endif /* !__ASSEMBLY__ */ /* The kernel use the constants below to index in the page sizes array. @@ -356,6 +367,8 @@ extern void early_init_mmu_secondary(void); extern void setup_initial_memory_limit(phys_addr_t first_memblock_base, phys_addr_t first_memblock_size); static inline void mmu_early_init_devtree(void) { } + +extern void *abatron_pteptrs[2]; #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/powerpc/include/asm/nmi.h b/arch/powerpc/include/asm/nmi.h index bd9ba8defd72..84b4cfe73edd 100644 --- a/arch/powerpc/include/asm/nmi.h +++ b/arch/powerpc/include/asm/nmi.h @@ -14,4 +14,6 @@ extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask, #define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace #endif +extern void hv_nmi_check_nonrecoverable(struct pt_regs *regs); + #endif /* _ASM_NMI_H */ diff --git a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h index b0f764c827c0..0a1a3fc54e54 100644 --- a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h @@ -231,9 +231,10 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize) } /* patch sites */ -extern s32 patch__itlbmiss_linmem_top; +extern s32 patch__itlbmiss_linmem_top, patch__itlbmiss_linmem_top8; extern s32 patch__dtlbmiss_linmem_top, patch__dtlbmiss_immr_jmp; extern s32 patch__fixupdar_linmem_top; +extern s32 patch__dtlbmiss_romem_top, patch__dtlbmiss_romem_top8; extern s32 patch__itlbmiss_exit_1, patch__itlbmiss_exit_2; extern s32 patch__dtlbmiss_exit_1, patch__dtlbmiss_exit_2, patch__dtlbmiss_exit_3; diff --git a/arch/powerpc/include/asm/nvram.h b/arch/powerpc/include/asm/nvram.h index 09a518bb7c03..629a5cdcc865 100644 --- a/arch/powerpc/include/asm/nvram.h +++ b/arch/powerpc/include/asm/nvram.h @@ -78,9 +78,6 @@ extern int pmac_get_partition(int partition); extern u8 pmac_xpram_read(int xpaddr); extern void pmac_xpram_write(int xpaddr, u8 data); -/* Synchronize NVRAM */ -extern void nvram_sync(void); - /* Initialize NVRAM OS partition */ extern int __init nvram_init_os_partition(struct nvram_os_partition *part); @@ -98,10 +95,4 @@ extern int nvram_write_os_partition(struct nvram_os_partition *part, unsigned int err_type, unsigned int error_log_cnt); -/* Determine NVRAM size */ -extern ssize_t nvram_get_size(void); - -/* Normal access to NVRAM */ -extern unsigned char nvram_read_byte(int i); -extern void nvram_write_byte(unsigned char c, int i); #endif /* _ASM_POWERPC_NVRAM_H */ diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 5c5ea2413413..ed870468ef6f 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -20,20 +20,11 @@ /* * On regular PPC32 page size is 4K (but we support 4K/16K/64K/256K pages - * on PPC44x). For PPC64 we support either 4K or 64K software + * on PPC44x and 4K/16K on 8xx). For PPC64 we support either 4K or 64K software * page size. When using 64K pages however, whether we are really supporting * 64K pages in HW or not is irrelevant to those definitions. */ -#if defined(CONFIG_PPC_256K_PAGES) -#define PAGE_SHIFT 18 -#elif defined(CONFIG_PPC_64K_PAGES) -#define PAGE_SHIFT 16 -#elif defined(CONFIG_PPC_16K_PAGES) -#define PAGE_SHIFT 14 -#else -#define PAGE_SHIFT 12 -#endif - +#define PAGE_SHIFT CONFIG_PPC_PAGE_SHIFT #define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT) #ifndef __ASSEMBLY__ @@ -326,7 +317,6 @@ struct page; extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg); extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *p); -extern int page_is_ram(unsigned long pfn); extern int devmem_is_allowed(unsigned long pfn); #ifdef CONFIG_PPC_SMLPAR diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h index aee4fcc24990..fc188e0e9179 100644 --- a/arch/powerpc/include/asm/pci-bridge.h +++ b/arch/powerpc/include/asm/pci-bridge.h @@ -10,6 +10,7 @@ #include #include #include +#include struct device_node; @@ -19,6 +20,8 @@ struct device_node; struct pci_controller_ops { void (*dma_dev_setup)(struct pci_dev *pdev); void (*dma_bus_setup)(struct pci_bus *bus); + bool (*iommu_bypass_supported)(struct pci_dev *pdev, + u64 mask); int (*probe_mode)(struct pci_bus *bus); @@ -43,9 +46,6 @@ struct pci_controller_ops { void (*teardown_msi_irqs)(struct pci_dev *pdev); #endif - int (*dma_set_mask)(struct pci_dev *pdev, u64 dma_mask); - u64 (*dma_get_required_mask)(struct pci_dev *pdev); - void (*shutdown)(struct pci_controller *hose); }; @@ -265,7 +265,7 @@ extern int pcibios_map_io_space(struct pci_bus *bus); #ifdef CONFIG_NUMA #define PHB_SET_NODE(PHB, NODE) ((PHB)->node = (NODE)) #else -#define PHB_SET_NODE(PHB, NODE) ((PHB)->node = -1) +#define PHB_SET_NODE(PHB, NODE) ((PHB)->node = NUMA_NO_NODE) #endif #endif /* CONFIG_PPC64 */ @@ -274,6 +274,8 @@ extern int pcibios_map_io_space(struct pci_bus *bus); extern struct pci_controller *pci_find_hose_for_OF_device( struct device_node* node); +extern struct pci_controller *pci_find_controller_for_domain(int domain_nr); + /* Fill up host controller resources from the OF node */ extern void pci_process_bridge_OF_ranges(struct pci_controller *hose, struct device_node *dev, int primary); diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index 0c72f1897063..6a1861a6301e 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h @@ -52,10 +52,8 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) #ifdef CONFIG_PCI extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops); -extern const struct dma_map_ops *get_pci_dma_ops(void); #else /* CONFIG_PCI */ #define set_pci_dma_ops(d) -#define get_pci_dma_ops() NULL #endif #ifdef CONFIG_PPC64 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index dad1d27e196d..505550fb2935 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -66,7 +66,6 @@ extern unsigned long empty_zero_page[]; extern pgd_t swapper_pg_dir[]; -int dma_pfn_limit_to_zone(u64 pfn_limit); extern void paging_init(void); /* diff --git a/arch/powerpc/include/asm/powernv.h b/arch/powerpc/include/asm/powernv.h index 2f3ff7a27881..05b552418519 100644 --- a/arch/powerpc/include/asm/powernv.h +++ b/arch/powerpc/include/asm/powernv.h @@ -23,6 +23,8 @@ extern int pnv_npu2_handle_fault(struct npu_context *context, uintptr_t *ea, unsigned long *flags, unsigned long *status, int count); +void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val); + void pnv_tm_init(void); #else static inline void powernv_set_nmmu_ptcr(unsigned long ptcr) { } @@ -40,7 +42,6 @@ static inline int pnv_npu2_handle_fault(struct npu_context *context, } static inline void pnv_tm_init(void) { } -static inline void pnv_power9_force_smt4(void) { } #endif #endif /* _ASM_POWERNV_H */ diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 19a8834e0398..c5698a523bb1 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -326,6 +326,7 @@ #define PPC_INST_ADDI 0x38000000 #define PPC_INST_ADDIS 0x3c000000 #define PPC_INST_ADD 0x7c000214 +#define PPC_INST_ADDC 0x7c000014 #define PPC_INST_SUB 0x7c000050 #define PPC_INST_BLR 0x4e800020 #define PPC_INST_BLRL 0x4e800021 @@ -334,9 +335,13 @@ #define PPC_INST_MULLW 0x7c0001d6 #define PPC_INST_MULHWU 0x7c000016 #define PPC_INST_MULLI 0x1c000000 +#define PPC_INST_MADDHD 0x10000030 +#define PPC_INST_MADDHDU 0x10000031 +#define PPC_INST_MADDLD 0x10000033 #define PPC_INST_DIVWU 0x7c000396 #define PPC_INST_DIVD 0x7c0003d2 #define PPC_INST_RLWINM 0x54000000 +#define PPC_INST_RLWINM_DOT 0x54000001 #define PPC_INST_RLWIMI 0x50000000 #define PPC_INST_RLDICL 0x78000000 #define PPC_INST_RLDICR 0x78000004 @@ -376,6 +381,7 @@ /* macros to insert fields into opcodes */ #define ___PPC_RA(a) (((a) & 0x1f) << 16) #define ___PPC_RB(b) (((b) & 0x1f) << 11) +#define ___PPC_RC(c) (((c) & 0x1f) << 6) #define ___PPC_RS(s) (((s) & 0x1f) << 21) #define ___PPC_RT(t) ___PPC_RS(t) #define ___PPC_R(r) (((r) & 0x1) << 16) @@ -395,7 +401,7 @@ #define __PPC_WS(w) (((w) & 0x1f) << 11) #define __PPC_SH(s) __PPC_WS(s) #define __PPC_SH64(s) (__PPC_SH(s) | (((s) & 0x20) >> 4)) -#define __PPC_MB(s) (((s) & 0x1f) << 6) +#define __PPC_MB(s) ___PPC_RC(s) #define __PPC_ME(s) (((s) & 0x1f) << 1) #define __PPC_MB64(s) (__PPC_MB(s) | ((s) & 0x20)) #define __PPC_ME64(s) __PPC_MB64(s) @@ -437,6 +443,15 @@ #define PPC_STQCX(t, a, b) stringify_in_c(.long PPC_INST_STQCX | \ ___PPC_RT(t) | ___PPC_RA(a) | \ ___PPC_RB(b)) +#define PPC_MADDHD(t, a, b, c) stringify_in_c(.long PPC_INST_MADDHD | \ + ___PPC_RT(t) | ___PPC_RA(a) | \ + ___PPC_RB(b) | ___PPC_RC(c)) +#define PPC_MADDHDU(t, a, b, c) stringify_in_c(.long PPC_INST_MADDHDU | \ + ___PPC_RT(t) | ___PPC_RA(a) | \ + ___PPC_RB(b) | ___PPC_RC(c)) +#define PPC_MADDLD(t, a, b, c) stringify_in_c(.long PPC_INST_MADDLD | \ + ___PPC_RT(t) | ___PPC_RA(a) | \ + ___PPC_RB(b) | ___PPC_RC(c)) #define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \ ___PPC_RB(b)) #define PPC_MSGSYNC stringify_in_c(.long PPC_INST_MSGSYNC) diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h index f67da277d652..f191ef0d2a0a 100644 --- a/arch/powerpc/include/asm/ppc-pci.h +++ b/arch/powerpc/include/asm/ppc-pci.h @@ -53,13 +53,13 @@ void eeh_addr_cache_rmv_dev(struct pci_dev *dev); struct eeh_dev *eeh_addr_cache_get_dev(unsigned long addr); void eeh_slot_error_detail(struct eeh_pe *pe, int severity); int eeh_pci_enable(struct eeh_pe *pe, int function); -int eeh_pe_reset_full(struct eeh_pe *pe); +int eeh_pe_reset_full(struct eeh_pe *pe, bool include_passed); void eeh_save_bars(struct eeh_dev *edev); int rtas_write_config(struct pci_dn *, int where, int size, u32 val); int rtas_read_config(struct pci_dn *, int where, int size, u32 *val); void eeh_pe_state_mark(struct eeh_pe *pe, int state); void eeh_pe_mark_isolated(struct eeh_pe *pe); -void eeh_pe_state_clear(struct eeh_pe *pe, int state); +void eeh_pe_state_clear(struct eeh_pe *pe, int state, bool include_passed); void eeh_pe_state_mark_with_cfg(struct eeh_pe *pe, int state); void eeh_pe_dev_mode_mark(struct eeh_pe *pe, int mode); diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index ee58526cb6c2..3351bcf42f2d 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -40,7 +40,7 @@ #ifndef __ASSEMBLY__ #include -#include +#include #include #include @@ -77,105 +77,15 @@ extern int _chrp_type; #ifdef __KERNEL__ -struct task_struct; -void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp); -void release_thread(struct task_struct *); - -#ifdef CONFIG_PPC32 - -#if CONFIG_TASK_SIZE > CONFIG_KERNEL_START -#error User TASK_SIZE overlaps with KERNEL_START address -#endif -#define TASK_SIZE (CONFIG_TASK_SIZE) - -/* This decides where the kernel will search for a free chunk of vm - * space during mmap's. - */ -#define TASK_UNMAPPED_BASE (TASK_SIZE / 8 * 3) -#endif - #ifdef CONFIG_PPC64 -/* - * 64-bit user address space can have multiple limits - * For now supported values are: - */ -#define TASK_SIZE_64TB (0x0000400000000000UL) -#define TASK_SIZE_128TB (0x0000800000000000UL) -#define TASK_SIZE_512TB (0x0002000000000000UL) -#define TASK_SIZE_1PB (0x0004000000000000UL) -#define TASK_SIZE_2PB (0x0008000000000000UL) -/* - * With 52 bits in the address we can support - * upto 4PB of range. - */ -#define TASK_SIZE_4PB (0x0010000000000000UL) - -/* - * For now 512TB is only supported with book3s and 64K linux page size. - */ -#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_64K_PAGES) -/* - * Max value currently used: - */ -#define TASK_SIZE_USER64 TASK_SIZE_4PB -#define DEFAULT_MAP_WINDOW_USER64 TASK_SIZE_128TB -#define TASK_CONTEXT_SIZE TASK_SIZE_512TB +#include #else -#define TASK_SIZE_USER64 TASK_SIZE_64TB -#define DEFAULT_MAP_WINDOW_USER64 TASK_SIZE_64TB -/* - * We don't need to allocate extended context ids for 4K page size, because - * we limit the max effective address on this config to 64TB. - */ -#define TASK_CONTEXT_SIZE TASK_SIZE_64TB +#include #endif -/* - * 32-bit user address space is 4GB - 1 page - * (this 1 page is needed so referencing of 0xFFFFFFFF generates EFAULT - */ -#define TASK_SIZE_USER32 (0x0000000100000000UL - (1*PAGE_SIZE)) - -#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \ - TASK_SIZE_USER32 : TASK_SIZE_USER64) -#define TASK_SIZE TASK_SIZE_OF(current) -/* This decides where the kernel will search for a free chunk of vm - * space during mmap's. - */ -#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4)) -#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(DEFAULT_MAP_WINDOW_USER64 / 4)) - -#define TASK_UNMAPPED_BASE ((is_32bit_task()) ? \ - TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 ) -#endif - -/* - * Initial task size value for user applications. For book3s 64 we start - * with 128TB and conditionally enable upto 512TB - */ -#ifdef CONFIG_PPC_BOOK3S_64 -#define DEFAULT_MAP_WINDOW ((is_32bit_task()) ? \ - TASK_SIZE_USER32 : DEFAULT_MAP_WINDOW_USER64) -#else -#define DEFAULT_MAP_WINDOW TASK_SIZE -#endif - -#ifdef __powerpc64__ - -#define STACK_TOP_USER64 DEFAULT_MAP_WINDOW_USER64 -#define STACK_TOP_USER32 TASK_SIZE_USER32 - -#define STACK_TOP (is_32bit_task() ? \ - STACK_TOP_USER32 : STACK_TOP_USER64) - -#define STACK_TOP_MAX TASK_SIZE_USER64 - -#else /* __powerpc64__ */ - -#define STACK_TOP TASK_SIZE -#define STACK_TOP_MAX STACK_TOP - -#endif /* __powerpc64__ */ +struct task_struct; +void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp); +void release_thread(struct task_struct *); typedef struct { unsigned long seg; @@ -250,6 +160,9 @@ struct thread_struct { #ifdef CONFIG_PPC32 void *pgdir; /* root of page-table tree */ unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */ +#ifdef CONFIG_PPC_RTAS + unsigned long rtas_sp; /* stack pointer for when in RTAS */ +#endif #endif /* Debug Registers */ struct debug_reg debug; @@ -357,8 +270,7 @@ struct thread_struct { #define ARCH_MIN_TASKALIGN 16 #define INIT_SP (sizeof(init_stack) + (unsigned long) &init_stack) -#define INIT_SP_LIMIT \ - (_ALIGN_UP(sizeof(init_thread_info), 16) + (unsigned long) &init_stack) +#define INIT_SP_LIMIT ((unsigned long)&init_stack) #ifdef CONFIG_SPE #define SPEFSCR_INIT \ diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index 0b8a735b6d85..64271e562fed 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h @@ -157,7 +157,7 @@ extern int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data); #define current_pt_regs() \ - ((struct pt_regs *)((unsigned long)current_thread_info() + THREAD_SIZE) - 1) + ((struct pt_regs *)((unsigned long)task_stack_page(current) + THREAD_SIZE) - 1) /* * We use the least-significant bit of the trap field to indicate * whether we have saved the full set of registers, or only a diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 1c98ef1f2d5b..c5b2aff0ce8e 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -1062,7 +1062,7 @@ * - SPRG9 debug exception scratch * * All 32-bit: - * - SPRG3 current thread_info pointer + * - SPRG3 current thread_struct physical addr pointer * (virtual on BookE, physical on others) * * 32-bit classic: @@ -1167,7 +1167,7 @@ #ifdef CONFIG_PPC_BOOK3S_32 #define SPRN_SPRG_SCRATCH0 SPRN_SPRG0 #define SPRN_SPRG_SCRATCH1 SPRN_SPRG1 -#define SPRN_SPRG_RTAS SPRN_SPRG2 +#define SPRN_SPRG_PGDIR SPRN_SPRG2 #define SPRN_SPRG_603_LRU SPRN_SPRG4 #endif @@ -1425,6 +1425,11 @@ static inline void msr_check_and_clear(unsigned long bits) #define mfsrin(v) ({unsigned int rval; \ asm volatile("mfsrin %0,%1" : "=r" (rval) : "r" (v)); \ rval;}) + +static inline void mtsrin(u32 val, u32 idx) +{ + asm volatile("mtsrin %0, %1" : : "r" (val), "r" (idx)); +} #endif #define proc_trap() asm volatile("trap") diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h index e335a8f846af..4a1664a8658d 100644 --- a/arch/powerpc/include/asm/sections.h +++ b/arch/powerpc/include/asm/sections.h @@ -17,6 +17,13 @@ extern char __end_interrupts[]; extern char __prom_init_toc_start[]; extern char __prom_init_toc_end[]; +#ifdef CONFIG_PPC_POWERNV +extern char start_real_trampolines[]; +extern char end_real_trampolines[]; +extern char start_virt_trampolines[]; +extern char end_virt_trampolines[]; +#endif + static inline int in_kernel_text(unsigned long addr) { if (addr >= (unsigned long)_stext && addr < (unsigned long)__init_end) diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index 41695745032c..0de717e16dd6 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -83,7 +83,22 @@ int is_cpu_dead(unsigned int cpu); /* 32-bit */ extern int smp_hw_index[]; -#define raw_smp_processor_id() (current_thread_info()->cpu) +/* + * This is particularly ugly: it appears we can't actually get the definition + * of task_struct here, but we need access to the CPU this task is running on. + * Instead of using task_struct we're using _TASK_CPU which is extracted from + * asm-offsets.h by kbuild to get the current processor ID. + * + * This also needs to be safeguarded when building asm-offsets.s because at + * that time _TASK_CPU is not defined yet. It could have been guarded by + * _TASK_CPU itself, but we want the build to fail if _TASK_CPU is missing + * when building something else than asm-offsets.s + */ +#ifdef GENERATING_ASM_OFFSETS +#define raw_smp_processor_id() (0) +#else +#define raw_smp_processor_id() (*(unsigned int *)((void *)current + _TASK_CPU)) +#endif #define hard_smp_processor_id() (smp_hw_index[smp_processor_id()]) static inline int get_hard_smp_processor_id(int cpu) diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h index f65ecf57b66c..b7d082c0ec25 100644 --- a/arch/powerpc/include/asm/swiotlb.h +++ b/arch/powerpc/include/asm/swiotlb.h @@ -13,12 +13,7 @@ #include -extern const struct dma_map_ops powerpc_swiotlb_dma_ops; - extern unsigned int ppc_swiotlb_enable; -int __init swiotlb_setup_bus_notifier(void); - -extern void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev); #ifdef CONFIG_SWIOTLB void swiotlb_detect_4g(void); diff --git a/arch/powerpc/include/asm/task_size_32.h b/arch/powerpc/include/asm/task_size_32.h new file mode 100644 index 000000000000..de7290ee770f --- /dev/null +++ b/arch/powerpc/include/asm/task_size_32.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_TASK_SIZE_32_H +#define _ASM_POWERPC_TASK_SIZE_32_H + +#if CONFIG_TASK_SIZE > CONFIG_KERNEL_START +#error User TASK_SIZE overlaps with KERNEL_START address +#endif + +#define TASK_SIZE (CONFIG_TASK_SIZE) + +/* + * This decides where the kernel will search for a free chunk of vm space during + * mmap's. + */ +#define TASK_UNMAPPED_BASE (TASK_SIZE / 8 * 3) + +#define DEFAULT_MAP_WINDOW TASK_SIZE +#define STACK_TOP TASK_SIZE +#define STACK_TOP_MAX STACK_TOP + +#endif /* _ASM_POWERPC_TASK_SIZE_32_H */ diff --git a/arch/powerpc/include/asm/task_size_64.h b/arch/powerpc/include/asm/task_size_64.h new file mode 100644 index 000000000000..eab4779f6b84 --- /dev/null +++ b/arch/powerpc/include/asm/task_size_64.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_TASK_SIZE_64_H +#define _ASM_POWERPC_TASK_SIZE_64_H + +/* + * 64-bit user address space can have multiple limits + * For now supported values are: + */ +#define TASK_SIZE_64TB (0x0000400000000000UL) +#define TASK_SIZE_128TB (0x0000800000000000UL) +#define TASK_SIZE_512TB (0x0002000000000000UL) +#define TASK_SIZE_1PB (0x0004000000000000UL) +#define TASK_SIZE_2PB (0x0008000000000000UL) + +/* + * With 52 bits in the address we can support up to 4PB of range. + */ +#define TASK_SIZE_4PB (0x0010000000000000UL) + +/* + * For now 512TB is only supported with book3s and 64K linux page size. + */ +#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_64K_PAGES) +/* + * Max value currently used: + */ +#define TASK_SIZE_USER64 TASK_SIZE_4PB +#define DEFAULT_MAP_WINDOW_USER64 TASK_SIZE_128TB +#define TASK_CONTEXT_SIZE TASK_SIZE_512TB +#else +#define TASK_SIZE_USER64 TASK_SIZE_64TB +#define DEFAULT_MAP_WINDOW_USER64 TASK_SIZE_64TB + +/* + * We don't need to allocate extended context ids for 4K page size, because we + * limit the max effective address on this config to 64TB. + */ +#define TASK_CONTEXT_SIZE TASK_SIZE_64TB +#endif + +/* + * 32-bit user address space is 4GB - 1 page + * (this 1 page is needed so referencing of 0xFFFFFFFF generates EFAULT + */ +#define TASK_SIZE_USER32 (0x0000000100000000UL - (1 * PAGE_SIZE)) + +#define TASK_SIZE_OF(tsk) \ + (test_tsk_thread_flag(tsk, TIF_32BIT) ? TASK_SIZE_USER32 : \ + TASK_SIZE_USER64) + +#define TASK_SIZE TASK_SIZE_OF(current) + +#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4)) +#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(DEFAULT_MAP_WINDOW_USER64 / 4)) + +/* + * This decides where the kernel will search for a free chunk of vm space during + * mmap's. + */ +#define TASK_UNMAPPED_BASE \ + ((is_32bit_task()) ? TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64) + +/* + * Initial task size value for user applications. For book3s 64 we start + * with 128TB and conditionally enable upto 512TB + */ +#ifdef CONFIG_PPC_BOOK3S_64 +#define DEFAULT_MAP_WINDOW \ + ((is_32bit_task()) ? TASK_SIZE_USER32 : DEFAULT_MAP_WINDOW_USER64) +#else +#define DEFAULT_MAP_WINDOW TASK_SIZE +#endif + +#define STACK_TOP_USER64 DEFAULT_MAP_WINDOW_USER64 +#define STACK_TOP_USER32 TASK_SIZE_USER32 +#define STACK_TOP_MAX TASK_SIZE_USER64 +#define STACK_TOP (is_32bit_task() ? STACK_TOP_USER32 : STACK_TOP_USER64) + +#endif /* _ASM_POWERPC_TASK_SIZE_64_H */ diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 544cac0474cb..8e1d0195ac36 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -17,12 +17,6 @@ #define THREAD_SIZE (1 << THREAD_SHIFT) -#ifdef CONFIG_PPC64 -#define CURRENT_THREAD_INFO(dest, sp) stringify_in_c(clrrdi dest, sp, THREAD_SHIFT) -#else -#define CURRENT_THREAD_INFO(dest, sp) stringify_in_c(rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT) -#endif - #ifndef __ASSEMBLY__ #include #include @@ -34,8 +28,6 @@ * low level task data. */ struct thread_info { - struct task_struct *task; /* main task structure */ - int cpu; /* cpu we're on */ int preempt_count; /* 0 => preemptable, <0 => BUG */ unsigned long local_flags; /* private flags for thread */ @@ -58,8 +50,6 @@ struct thread_info { */ #define INIT_THREAD_INFO(tsk) \ { \ - .task = &tsk, \ - .cpu = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ .flags = 0, \ } @@ -67,15 +57,6 @@ struct thread_info { #define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) /* how to get the thread information struct from C */ -static inline struct thread_info *current_thread_info(void) -{ - unsigned long val; - - asm (CURRENT_THREAD_INFO(%0,1) : "=r" (val)); - - return (struct thread_info *)val; -} - extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); #ifdef CONFIG_PPC_BOOK3S_64 diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index a4a718dbfec6..f85e2b01c3df 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h @@ -132,6 +132,8 @@ static inline void shared_proc_topology_init(void) {} #define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) #define topology_core_id(cpu) (cpu_to_core_id(cpu)) + +int dlpar_cpu_readd(int cpu); #endif #endif diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index e3a731793ea2..4d6d905e9138 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -28,7 +28,6 @@ #define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) #endif -#define get_ds() (KERNEL_DS) #define get_fs() (current->thread.addr_limit) static inline void set_fs(mm_segment_t fs) diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index a3c35e6d6ffb..f44dbc65e38e 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -29,8 +29,8 @@ #define __ARCH_WANT_SYS_IPC #define __ARCH_WANT_SYS_PAUSE #define __ARCH_WANT_SYS_SIGNAL -#define __ARCH_WANT_SYS_TIME -#define __ARCH_WANT_SYS_UTIME +#define __ARCH_WANT_SYS_TIME32 +#define __ARCH_WANT_SYS_UTIME32 #define __ARCH_WANT_SYS_WAITPID #define __ARCH_WANT_SYS_SOCKETCALL #define __ARCH_WANT_SYS_FADVISE64 @@ -45,8 +45,8 @@ #define __ARCH_WANT_OLD_STAT #endif #ifdef CONFIG_PPC64 -#define __ARCH_WANT_COMPAT_SYS_TIME -#define __ARCH_WANT_SYS_UTIME32 +#define __ARCH_WANT_SYS_TIME +#define __ARCH_WANT_SYS_UTIME #define __ARCH_WANT_SYS_NEWFSTATAT #define __ARCH_WANT_COMPAT_SYS_SENDFILE #endif diff --git a/arch/powerpc/include/uapi/asm/socket.h b/arch/powerpc/include/uapi/asm/socket.h index 94de465e0920..12aa0c43e775 100644 --- a/arch/powerpc/include/uapi/asm/socket.h +++ b/arch/powerpc/include/uapi/asm/socket.h @@ -11,8 +11,8 @@ #define SO_RCVLOWAT 16 #define SO_SNDLOWAT 17 -#define SO_RCVTIMEO 18 -#define SO_SNDTIMEO 19 +#define SO_RCVTIMEO_OLD 18 +#define SO_SNDTIMEO_OLD 19 #define SO_PASSCRED 20 #define SO_PEERCRED 21 diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index cb7f0bb9ee71..cddadccf551d 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -36,7 +36,7 @@ obj-y := cputable.o ptrace.o syscalls.o \ process.o systbl.o idle.o \ signal.o sysfs.o cacheinfo.o time.o \ prom.o traps.o setup-common.o \ - udbg.o misc.o io.o dma.o misc_$(BITS).o \ + udbg.o misc.o io.o misc_$(BITS).o \ of_platform.o prom_parse.o obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \ signal_64.o ptrace32.o \ @@ -105,6 +105,7 @@ obj-$(CONFIG_UPROBES) += uprobes.o obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o +obj-$(CONFIG_ARCH_HAS_DMA_SET_MASK) += dma-mask.o pci64-$(CONFIG_PPC64) += pci_dn.o pci-hotplug.o isa-bridge.o obj-$(CONFIG_PCI) += pci_$(BITS).o $(pci64-y) \ @@ -142,19 +143,29 @@ endif obj-$(CONFIG_EPAPR_PARAVIRT) += epapr_paravirt.o epapr_hcalls.o obj-$(CONFIG_KVM_GUEST) += kvm.o kvm_emul.o -# Disable GCOV & sanitizers in odd or sensitive code +# Disable GCOV, KCOV & sanitizers in odd or sensitive code GCOV_PROFILE_prom_init.o := n +KCOV_INSTRUMENT_prom_init.o := n UBSAN_SANITIZE_prom_init.o := n GCOV_PROFILE_machine_kexec_64.o := n +KCOV_INSTRUMENT_machine_kexec_64.o := n UBSAN_SANITIZE_machine_kexec_64.o := n GCOV_PROFILE_machine_kexec_32.o := n +KCOV_INSTRUMENT_machine_kexec_32.o := n UBSAN_SANITIZE_machine_kexec_32.o := n GCOV_PROFILE_kprobes.o := n +KCOV_INSTRUMENT_kprobes.o := n UBSAN_SANITIZE_kprobes.o := n GCOV_PROFILE_kprobes-ftrace.o := n +KCOV_INSTRUMENT_kprobes-ftrace.o := n UBSAN_SANITIZE_kprobes-ftrace.o := n UBSAN_SANITIZE_vdso.o := n +# Necessary for booting with kcov enabled on book3e machines +KCOV_INSTRUMENT_cputable.o := n +KCOV_INSTRUMENT_setup_64.o := n +KCOV_INSTRUMENT_paca.o := n + extra-$(CONFIG_PPC_FPU) += fpu.o extra-$(CONFIG_ALTIVEC) += vector.o extra-$(CONFIG_PPC64) += entry_64.o diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 9ffc72ded73a..86a61e5f8285 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -13,6 +13,8 @@ * 2 of the License, or (at your option) any later version. */ +#define GENERATING_ASM_OFFSETS /* asm/smp.h */ + #include #include #include @@ -90,10 +92,15 @@ int main(void) DEFINE(SIGSEGV, SIGSEGV); DEFINE(NMI_MASK, NMI_MASK); #else - OFFSET(THREAD_INFO, task_struct, stack); - DEFINE(THREAD_INFO_GAP, _ALIGN_UP(sizeof(struct thread_info), 16)); OFFSET(KSP_LIMIT, thread_struct, ksp_limit); +#ifdef CONFIG_PPC_RTAS + OFFSET(RTAS_SP, thread_struct, rtas_sp); +#endif #endif /* CONFIG_PPC64 */ + OFFSET(TASK_STACK, task_struct, stack); +#ifdef CONFIG_SMP + OFFSET(TASK_CPU, task_struct, cpu); +#endif #ifdef CONFIG_LIVEPATCH OFFSET(TI_livepatch_sp, thread_info, livepatch_sp); @@ -161,8 +168,6 @@ int main(void) OFFSET(TI_FLAGS, thread_info, flags); OFFSET(TI_LOCAL_FLAGS, thread_info, local_flags); OFFSET(TI_PREEMPT, thread_info, preempt_count); - OFFSET(TI_TASK, thread_info, task); - OFFSET(TI_CPU, thread_info, cpu); #ifdef CONFIG_PPC64 OFFSET(DCACHEL1BLOCKSIZE, ppc64_caches, l1d.block_size); @@ -177,6 +182,8 @@ int main(void) OFFSET(PACAPROCSTART, paca_struct, cpu_start); OFFSET(PACAKSAVE, paca_struct, kstack); OFFSET(PACACURRENT, paca_struct, __current); + DEFINE(PACA_THREAD_INFO, offsetof(struct paca_struct, __current) + + offsetof(struct task_struct, thread_info)); OFFSET(PACASAVEDMSR, paca_struct, saved_msr); OFFSET(PACAR1, paca_struct, saved_r1); OFFSET(PACATOC, paca_struct, kernel_toc); diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S index 8c069e96c478..6f1c11e0691f 100644 --- a/arch/powerpc/kernel/cpu_setup_6xx.S +++ b/arch/powerpc/kernel/cpu_setup_6xx.S @@ -24,6 +24,10 @@ BEGIN_MMU_FTR_SECTION li r10,0 mtspr SPRN_SPRG_603_LRU,r10 /* init SW LRU tracking */ END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU) + lis r10, (swapper_pg_dir - PAGE_OFFSET)@h + ori r10, r10, (swapper_pg_dir - PAGE_OFFSET)@l + mtspr SPRN_SPRG_PGDIR, r10 + BEGIN_FTR_SECTION bl __init_fpu_registers END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE) diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c index 9c9bcaae2f75..09231ef06d01 100644 --- a/arch/powerpc/kernel/dma-iommu.c +++ b/arch/powerpc/kernel/dma-iommu.c @@ -6,12 +6,31 @@ * busses using the iommu infrastructure */ +#include +#include #include /* * Generic iommu implementation */ +/* + * The coherent mask may be smaller than the real mask, check if we can + * really use a direct window. + */ +static inline bool dma_iommu_alloc_bypass(struct device *dev) +{ + return dev->archdata.iommu_bypass && !iommu_fixed_is_weak && + dma_direct_supported(dev, dev->coherent_dma_mask); +} + +static inline bool dma_iommu_map_bypass(struct device *dev, + unsigned long attrs) +{ + return dev->archdata.iommu_bypass && + (!iommu_fixed_is_weak || (attrs & DMA_ATTR_WEAK_ORDERING)); +} + /* Allocates a contiguous real buffer and creates mappings over it. * Returns the virtual address of the buffer and sets dma_handle * to the dma address (mapping) of the first page. @@ -20,6 +39,8 @@ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) { + if (dma_iommu_alloc_bypass(dev)) + return dma_direct_alloc(dev, size, dma_handle, flag, attrs); return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size, dma_handle, dev->coherent_dma_mask, flag, dev_to_node(dev)); @@ -29,7 +50,11 @@ static void dma_iommu_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { - iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle); + if (dma_iommu_alloc_bypass(dev)) + dma_direct_free(dev, size, vaddr, dma_handle, attrs); + else + iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, + dma_handle); } /* Creates TCEs for a user provided buffer. The user buffer must be @@ -42,6 +67,9 @@ static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page, enum dma_data_direction direction, unsigned long attrs) { + if (dma_iommu_map_bypass(dev, attrs)) + return dma_direct_map_page(dev, page, offset, size, direction, + attrs); return iommu_map_page(dev, get_iommu_table_base(dev), page, offset, size, device_to_mask(dev), direction, attrs); } @@ -51,8 +79,9 @@ static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction, unsigned long attrs) { - iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction, - attrs); + if (!dma_iommu_map_bypass(dev, attrs)) + iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, + direction, attrs); } @@ -60,6 +89,8 @@ static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction, unsigned long attrs) { + if (dma_iommu_map_bypass(dev, attrs)) + return dma_direct_map_sg(dev, sglist, nelems, direction, attrs); return ppc_iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems, device_to_mask(dev), direction, attrs); } @@ -68,10 +99,20 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction, unsigned long attrs) { - ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems, + if (!dma_iommu_map_bypass(dev, attrs)) + ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems, direction, attrs); } +static bool dma_iommu_bypass_supported(struct device *dev, u64 mask) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct pci_controller *phb = pci_bus_to_host(pdev->bus); + + return phb->controller_ops.iommu_bypass_supported && + phb->controller_ops.iommu_bypass_supported(pdev, mask); +} + /* We support DMA to/from any memory page via the iommu */ int dma_iommu_dma_supported(struct device *dev, u64 mask) { @@ -83,32 +124,48 @@ int dma_iommu_dma_supported(struct device *dev, u64 mask) return 0; } + if (dev_is_pci(dev) && dma_iommu_bypass_supported(dev, mask)) { + dev->archdata.iommu_bypass = true; + dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n"); + return 1; + } + if (tbl->it_offset > (mask >> tbl->it_page_shift)) { dev_info(dev, "Warning: IOMMU offset too big for device mask\n"); dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n", mask, tbl->it_offset << tbl->it_page_shift); return 0; - } else - return 1; + } + + dev_dbg(dev, "iommu: not 64-bit, using default ops\n"); + dev->archdata.iommu_bypass = false; + return 1; } -static u64 dma_iommu_get_required_mask(struct device *dev) +u64 dma_iommu_get_required_mask(struct device *dev) { struct iommu_table *tbl = get_iommu_table_base(dev); u64 mask; + if (!tbl) return 0; + if (dev_is_pci(dev)) { + u64 bypass_mask = dma_direct_get_required_mask(dev); + + if (dma_iommu_bypass_supported(dev, bypass_mask)) + return bypass_mask; + } + mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1); mask += mask - 1; return mask; } -struct dma_map_ops dma_iommu_ops = { +const struct dma_map_ops dma_iommu_ops = { .alloc = dma_iommu_alloc_coherent, .free = dma_iommu_free_coherent, - .mmap = dma_nommu_mmap_coherent, .map_sg = dma_iommu_map_sg, .unmap_sg = dma_iommu_unmap_sg, .dma_supported = dma_iommu_dma_supported, diff --git a/arch/powerpc/kernel/dma-mask.c b/arch/powerpc/kernel/dma-mask.c new file mode 100644 index 000000000000..ffbbbc432612 --- /dev/null +++ b/arch/powerpc/kernel/dma-mask.c @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include + +void arch_dma_set_mask(struct device *dev, u64 dma_mask) +{ + if (ppc_md.dma_set_mask) + ppc_md.dma_set_mask(dev, dma_mask); +} +EXPORT_SYMBOL(arch_dma_set_mask); diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c index 7d5fc9751622..132d61c91629 100644 --- a/arch/powerpc/kernel/dma-swiotlb.c +++ b/arch/powerpc/kernel/dma-swiotlb.c @@ -10,101 +10,12 @@ * option) any later version. * */ - -#include #include -#include -#include -#include -#include - #include #include -#include unsigned int ppc_swiotlb_enable; -static u64 swiotlb_powerpc_get_required(struct device *dev) -{ - u64 end, mask, max_direct_dma_addr = dev->archdata.max_direct_dma_addr; - - end = memblock_end_of_DRAM(); - if (max_direct_dma_addr && end > max_direct_dma_addr) - end = max_direct_dma_addr; - end += get_dma_offset(dev); - - mask = 1ULL << (fls64(end) - 1); - mask += mask - 1; - - return mask; -} - -/* - * At the moment, all platforms that use this code only require - * swiotlb to be used if we're operating on HIGHMEM. Since - * we don't ever call anything other than map_sg, unmap_sg, - * map_page, and unmap_page on highmem, use normal dma_ops - * for everything else. - */ -const struct dma_map_ops powerpc_swiotlb_dma_ops = { - .alloc = __dma_nommu_alloc_coherent, - .free = __dma_nommu_free_coherent, - .mmap = dma_nommu_mmap_coherent, - .map_sg = dma_direct_map_sg, - .unmap_sg = dma_direct_unmap_sg, - .dma_supported = swiotlb_dma_supported, - .map_page = dma_direct_map_page, - .unmap_page = dma_direct_unmap_page, - .sync_single_for_cpu = dma_direct_sync_single_for_cpu, - .sync_single_for_device = dma_direct_sync_single_for_device, - .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu, - .sync_sg_for_device = dma_direct_sync_sg_for_device, - .get_required_mask = swiotlb_powerpc_get_required, -}; - -void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev) -{ - struct pci_controller *hose; - struct dev_archdata *sd; - - hose = pci_bus_to_host(pdev->bus); - sd = &pdev->dev.archdata; - sd->max_direct_dma_addr = - hose->dma_window_base_cur + hose->dma_window_size; -} - -static int ppc_swiotlb_bus_notify(struct notifier_block *nb, - unsigned long action, void *data) -{ - struct device *dev = data; - struct dev_archdata *sd; - - /* We are only intereted in device addition */ - if (action != BUS_NOTIFY_ADD_DEVICE) - return 0; - - sd = &dev->archdata; - sd->max_direct_dma_addr = 0; - - /* May need to bounce if the device can't address all of DRAM */ - if ((dma_get_mask(dev) + 1) < memblock_end_of_DRAM()) - set_dma_ops(dev, &powerpc_swiotlb_dma_ops); - - return NOTIFY_DONE; -} - -static struct notifier_block ppc_swiotlb_plat_bus_notifier = { - .notifier_call = ppc_swiotlb_bus_notify, - .priority = 0, -}; - -int __init swiotlb_setup_bus_notifier(void) -{ - bus_register_notifier(&platform_bus_type, - &ppc_swiotlb_plat_bus_notifier); - return 0; -} - void __init swiotlb_detect_4g(void) { if ((memblock_end_of_DRAM() - 1) > 0xffffffff) diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c deleted file mode 100644 index b1903ebb2e9c..000000000000 --- a/arch/powerpc/kernel/dma.c +++ /dev/null @@ -1,362 +0,0 @@ -/* - * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation - * - * Provide default implementations of the DMA mapping callbacks for - * directly mapped busses. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * Generic direct DMA implementation - * - * This implementation supports a per-device offset that can be applied if - * the address at which memory is visible to devices is not 0. Platform code - * can set archdata.dma_data to an unsigned long holding the offset. By - * default the offset is PCI_DRAM_OFFSET. - */ - -static u64 __maybe_unused get_pfn_limit(struct device *dev) -{ - u64 pfn = (dev->coherent_dma_mask >> PAGE_SHIFT) + 1; - struct dev_archdata __maybe_unused *sd = &dev->archdata; - -#ifdef CONFIG_SWIOTLB - if (sd->max_direct_dma_addr && dev->dma_ops == &powerpc_swiotlb_dma_ops) - pfn = min_t(u64, pfn, sd->max_direct_dma_addr >> PAGE_SHIFT); -#endif - - return pfn; -} - -static int dma_nommu_dma_supported(struct device *dev, u64 mask) -{ -#ifdef CONFIG_PPC64 - u64 limit = get_dma_offset(dev) + (memblock_end_of_DRAM() - 1); - - /* Limit fits in the mask, we are good */ - if (mask >= limit) - return 1; - -#ifdef CONFIG_FSL_SOC - /* - * Freescale gets another chance via ZONE_DMA, however - * that will have to be refined if/when they support iommus - */ - return 1; -#endif - /* Sorry ... */ - return 0; -#else - return 1; -#endif -} - -#ifndef CONFIG_NOT_COHERENT_CACHE -void *__dma_nommu_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag, - unsigned long attrs) -{ - void *ret; - struct page *page; - int node = dev_to_node(dev); -#ifdef CONFIG_FSL_SOC - u64 pfn = get_pfn_limit(dev); - int zone; - - /* - * This code should be OK on other platforms, but we have drivers that - * don't set coherent_dma_mask. As a workaround we just ifdef it. This - * whole routine needs some serious cleanup. - */ - - zone = dma_pfn_limit_to_zone(pfn); - if (zone < 0) { - dev_err(dev, "%s: No suitable zone for pfn %#llx\n", - __func__, pfn); - return NULL; - } - - switch (zone) { -#ifdef CONFIG_ZONE_DMA - case ZONE_DMA: - flag |= GFP_DMA; - break; -#endif - }; -#endif /* CONFIG_FSL_SOC */ - - page = alloc_pages_node(node, flag, get_order(size)); - if (page == NULL) - return NULL; - ret = page_address(page); - memset(ret, 0, size); - *dma_handle = __pa(ret) + get_dma_offset(dev); - - return ret; -} - -void __dma_nommu_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle, - unsigned long attrs) -{ - free_pages((unsigned long)vaddr, get_order(size)); -} -#endif /* !CONFIG_NOT_COHERENT_CACHE */ - -static void *dma_nommu_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag, - unsigned long attrs) -{ - struct iommu_table *iommu; - - /* The coherent mask may be smaller than the real mask, check if - * we can really use the direct ops - */ - if (dma_nommu_dma_supported(dev, dev->coherent_dma_mask)) - return __dma_nommu_alloc_coherent(dev, size, dma_handle, - flag, attrs); - - /* Ok we can't ... do we have an iommu ? If not, fail */ - iommu = get_iommu_table_base(dev); - if (!iommu) - return NULL; - - /* Try to use the iommu */ - return iommu_alloc_coherent(dev, iommu, size, dma_handle, - dev->coherent_dma_mask, flag, - dev_to_node(dev)); -} - -static void dma_nommu_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle, - unsigned long attrs) -{ - struct iommu_table *iommu; - - /* See comments in dma_nommu_alloc_coherent() */ - if (dma_nommu_dma_supported(dev, dev->coherent_dma_mask)) - return __dma_nommu_free_coherent(dev, size, vaddr, dma_handle, - attrs); - /* Maybe we used an iommu ... */ - iommu = get_iommu_table_base(dev); - - /* If we hit that we should have never allocated in the first - * place so how come we are freeing ? - */ - if (WARN_ON(!iommu)) - return; - iommu_free_coherent(iommu, size, vaddr, dma_handle); -} - -int dma_nommu_mmap_coherent(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t handle, size_t size, - unsigned long attrs) -{ - unsigned long pfn; - -#ifdef CONFIG_NOT_COHERENT_CACHE - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr); -#else - pfn = page_to_pfn(virt_to_page(cpu_addr)); -#endif - return remap_pfn_range(vma, vma->vm_start, - pfn + vma->vm_pgoff, - vma->vm_end - vma->vm_start, - vma->vm_page_prot); -} - -static int dma_nommu_map_sg(struct device *dev, struct scatterlist *sgl, - int nents, enum dma_data_direction direction, - unsigned long attrs) -{ - struct scatterlist *sg; - int i; - - for_each_sg(sgl, sg, nents, i) { - sg->dma_address = sg_phys(sg) + get_dma_offset(dev); - sg->dma_length = sg->length; - - if (attrs & DMA_ATTR_SKIP_CPU_SYNC) - continue; - - __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); - } - - return nents; -} - -static void dma_nommu_unmap_sg(struct device *dev, struct scatterlist *sgl, - int nents, enum dma_data_direction direction, - unsigned long attrs) -{ - struct scatterlist *sg; - int i; - - for_each_sg(sgl, sg, nents, i) - __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); -} - -static u64 dma_nommu_get_required_mask(struct device *dev) -{ - u64 end, mask; - - end = memblock_end_of_DRAM() + get_dma_offset(dev); - - mask = 1ULL << (fls64(end) - 1); - mask += mask - 1; - - return mask; -} - -static inline dma_addr_t dma_nommu_map_page(struct device *dev, - struct page *page, - unsigned long offset, - size_t size, - enum dma_data_direction dir, - unsigned long attrs) -{ - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - __dma_sync_page(page, offset, size, dir); - - return page_to_phys(page) + offset + get_dma_offset(dev); -} - -static inline void dma_nommu_unmap_page(struct device *dev, - dma_addr_t dma_address, - size_t size, - enum dma_data_direction direction, - unsigned long attrs) -{ - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) - __dma_sync(bus_to_virt(dma_address), size, direction); -} - -#ifdef CONFIG_NOT_COHERENT_CACHE -static inline void dma_nommu_sync_sg(struct device *dev, - struct scatterlist *sgl, int nents, - enum dma_data_direction direction) -{ - struct scatterlist *sg; - int i; - - for_each_sg(sgl, sg, nents, i) - __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); -} - -static inline void dma_nommu_sync_single(struct device *dev, - dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction) -{ - __dma_sync(bus_to_virt(dma_handle), size, direction); -} -#endif - -const struct dma_map_ops dma_nommu_ops = { - .alloc = dma_nommu_alloc_coherent, - .free = dma_nommu_free_coherent, - .mmap = dma_nommu_mmap_coherent, - .map_sg = dma_nommu_map_sg, - .unmap_sg = dma_nommu_unmap_sg, - .dma_supported = dma_nommu_dma_supported, - .map_page = dma_nommu_map_page, - .unmap_page = dma_nommu_unmap_page, - .get_required_mask = dma_nommu_get_required_mask, -#ifdef CONFIG_NOT_COHERENT_CACHE - .sync_single_for_cpu = dma_nommu_sync_single, - .sync_single_for_device = dma_nommu_sync_single, - .sync_sg_for_cpu = dma_nommu_sync_sg, - .sync_sg_for_device = dma_nommu_sync_sg, -#endif -}; -EXPORT_SYMBOL(dma_nommu_ops); - -int dma_set_coherent_mask(struct device *dev, u64 mask) -{ - if (!dma_supported(dev, mask)) { - /* - * We need to special case the direct DMA ops which can - * support a fallback for coherent allocations. There - * is no dma_op->set_coherent_mask() so we have to do - * things the hard way: - */ - if (get_dma_ops(dev) != &dma_nommu_ops || - get_iommu_table_base(dev) == NULL || - !dma_iommu_dma_supported(dev, mask)) - return -EIO; - } - dev->coherent_dma_mask = mask; - return 0; -} -EXPORT_SYMBOL(dma_set_coherent_mask); - -int dma_set_mask(struct device *dev, u64 dma_mask) -{ - if (ppc_md.dma_set_mask) - return ppc_md.dma_set_mask(dev, dma_mask); - - if (dev_is_pci(dev)) { - struct pci_dev *pdev = to_pci_dev(dev); - struct pci_controller *phb = pci_bus_to_host(pdev->bus); - if (phb->controller_ops.dma_set_mask) - return phb->controller_ops.dma_set_mask(pdev, dma_mask); - } - - if (!dev->dma_mask || !dma_supported(dev, dma_mask)) - return -EIO; - *dev->dma_mask = dma_mask; - return 0; -} -EXPORT_SYMBOL(dma_set_mask); - -u64 __dma_get_required_mask(struct device *dev) -{ - const struct dma_map_ops *dma_ops = get_dma_ops(dev); - - if (unlikely(dma_ops == NULL)) - return 0; - - if (dma_ops->get_required_mask) - return dma_ops->get_required_mask(dev); - - return DMA_BIT_MASK(8 * sizeof(dma_addr_t)); -} - -u64 dma_get_required_mask(struct device *dev) -{ - if (ppc_md.dma_get_required_mask) - return ppc_md.dma_get_required_mask(dev); - - if (dev_is_pci(dev)) { - struct pci_dev *pdev = to_pci_dev(dev); - struct pci_controller *phb = pci_bus_to_host(pdev->bus); - if (phb->controller_ops.dma_get_required_mask) - return phb->controller_ops.dma_get_required_mask(pdev); - } - - return __dma_get_required_mask(dev); -} -EXPORT_SYMBOL_GPL(dma_get_required_mask); - -static int __init dma_init(void) -{ -#ifdef CONFIG_IBMVIO - dma_debug_add_bus(&vio_bus_type); -#endif - - return 0; -} -fs_initcall(dma_init); - diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index 8be3721d9302..c66fd3ce6478 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -666,8 +666,10 @@ static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f) m = &dt_cpu_feature_match_table[i]; if (!strcmp(f->name, m->name)) { known = true; - if (m->enable(f)) + if (m->enable(f)) { + cur_cpu_spec->cpu_features |= m->cpu_ftr_bit_mask; break; + } pr_info("not enabling: %s (disabled or unsupported by kernel)\n", f->name); @@ -675,17 +677,12 @@ static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f) } } - if (!known && enable_unknown) { - if (!feat_try_enable_unknown(f)) { - pr_info("not enabling: %s (unknown and unsupported by kernel)\n", - f->name); - return false; - } + if (!known && (!enable_unknown || !feat_try_enable_unknown(f))) { + pr_info("not enabling: %s (unknown and unsupported by kernel)\n", + f->name); + return false; } - if (m->cpu_ftr_bit_mask) - cur_cpu_spec->cpu_features |= m->cpu_ftr_bit_mask; - if (known) pr_debug("enabling: %s\n", f->name); else @@ -813,7 +810,6 @@ static int __init process_cpufeatures_node(unsigned long node, int len; f = &dt_cpu_features[i]; - memset(f, 0, sizeof(struct dt_cpu_feature)); f->node = node; @@ -1008,7 +1004,12 @@ static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char /* Count and allocate space for cpu features */ of_scan_flat_dt_subnodes(node, count_cpufeatures_subnodes, &nr_dt_cpu_features); - dt_cpu_features = __va(memblock_phys_alloc(sizeof(struct dt_cpu_feature) * nr_dt_cpu_features, PAGE_SIZE)); + dt_cpu_features = memblock_alloc(sizeof(struct dt_cpu_feature) * nr_dt_cpu_features, PAGE_SIZE); + if (!dt_cpu_features) + panic("%s: Failed to allocate %zu bytes align=0x%lx\n", + __func__, + sizeof(struct dt_cpu_feature) * nr_dt_cpu_features, + PAGE_SIZE); cpufeatures_setup_start(isa); diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index ae05203eb4de..289c0b37d845 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -109,7 +109,14 @@ EXPORT_SYMBOL(eeh_subsystem_flags); * frozen count in last hour exceeds this limit, the PE will * be forced to be offline permanently. */ -int eeh_max_freezes = 5; +u32 eeh_max_freezes = 5; + +/* + * Controls whether a recovery event should be scheduled when an + * isolated device is discovered. This is only really useful for + * debugging problems with the EEH core. + */ +bool eeh_debugfs_no_recover; /* Platform dependent EEH operations */ struct eeh_ops *eeh_ops = NULL; @@ -823,15 +830,15 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat switch (state) { case pcie_deassert_reset: eeh_ops->reset(pe, EEH_RESET_DEACTIVATE); - eeh_unfreeze_pe(pe, false); + eeh_unfreeze_pe(pe); if (!(pe->type & EEH_PE_VF)) - eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); + eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED, true); eeh_pe_dev_traverse(pe, eeh_restore_dev_state, dev); - eeh_pe_state_clear(pe, EEH_PE_ISOLATED); + eeh_pe_state_clear(pe, EEH_PE_ISOLATED, true); break; case pcie_hot_reset: eeh_pe_mark_isolated(pe); - eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); + eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED, true); eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE); eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev); if (!(pe->type & EEH_PE_VF)) @@ -840,7 +847,7 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat break; case pcie_warm_reset: eeh_pe_mark_isolated(pe); - eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); + eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED, true); eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE); eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev); if (!(pe->type & EEH_PE_VF)) @@ -848,7 +855,7 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL); break; default: - eeh_pe_state_clear(pe, EEH_PE_ISOLATED | EEH_PE_CFG_BLOCKED); + eeh_pe_state_clear(pe, EEH_PE_ISOLATED | EEH_PE_CFG_BLOCKED, true); return -EINVAL; }; @@ -877,6 +884,24 @@ static void *eeh_set_dev_freset(struct eeh_dev *edev, void *flag) return NULL; } +static void eeh_pe_refreeze_passed(struct eeh_pe *root) +{ + struct eeh_pe *pe; + int state; + + eeh_for_each_pe(root, pe) { + if (eeh_pe_passed(pe)) { + state = eeh_ops->get_state(pe, NULL); + if (state & + (EEH_STATE_MMIO_ACTIVE | EEH_STATE_MMIO_ENABLED)) { + pr_info("EEH: Passed-through PE PHB#%x-PE#%x was thawed by reset, re-freezing for safety.\n", + pe->phb->global_number, pe->addr); + eeh_pe_set_option(pe, EEH_OPT_FREEZE_PE); + } + } + } +} + /** * eeh_pe_reset_full - Complete a full reset process on the indicated PE * @pe: EEH PE @@ -889,12 +914,12 @@ static void *eeh_set_dev_freset(struct eeh_dev *edev, void *flag) * * This function will attempt to reset a PE three times before failing. */ -int eeh_pe_reset_full(struct eeh_pe *pe) +int eeh_pe_reset_full(struct eeh_pe *pe, bool include_passed) { int reset_state = (EEH_PE_RESET | EEH_PE_CFG_BLOCKED); int type = EEH_RESET_HOT; unsigned int freset = 0; - int i, state, ret; + int i, state = 0, ret; /* * Determine the type of reset to perform - hot or fundamental. @@ -911,32 +936,42 @@ int eeh_pe_reset_full(struct eeh_pe *pe) /* Make three attempts at resetting the bus */ for (i = 0; i < 3; i++) { - ret = eeh_pe_reset(pe, type); - if (ret) - break; - - ret = eeh_pe_reset(pe, EEH_RESET_DEACTIVATE); - if (ret) - break; + ret = eeh_pe_reset(pe, type, include_passed); + if (!ret) + ret = eeh_pe_reset(pe, EEH_RESET_DEACTIVATE, + include_passed); + if (ret) { + ret = -EIO; + pr_warn("EEH: Failure %d resetting PHB#%x-PE#%x (attempt %d)\n\n", + state, pe->phb->global_number, pe->addr, i + 1); + continue; + } + if (i) + pr_warn("EEH: PHB#%x-PE#%x: Successful reset (attempt %d)\n", + pe->phb->global_number, pe->addr, i + 1); /* Wait until the PE is in a functioning state */ state = eeh_wait_state(pe, PCI_BUS_RESET_WAIT_MSEC); if (state < 0) { - pr_warn("%s: Unrecoverable slot failure on PHB#%x-PE#%x", - __func__, pe->phb->global_number, pe->addr); + pr_warn("EEH: Unrecoverable slot failure on PHB#%x-PE#%x", + pe->phb->global_number, pe->addr); ret = -ENOTRECOVERABLE; break; } if (eeh_state_active(state)) break; - - /* Set error in case this is our last attempt */ - ret = -EIO; - pr_warn("%s: Failure %d resetting PHB#%x-PE#%x\n (%d)\n", - __func__, state, pe->phb->global_number, pe->addr, (i + 1)); + else + pr_warn("EEH: PHB#%x-PE#%x: Slot inactive after reset: 0x%x (attempt %d)\n", + pe->phb->global_number, pe->addr, state, i + 1); } - eeh_pe_state_clear(pe, reset_state); + /* Resetting the PE may have unfrozen child PEs. If those PEs have been + * (potentially) passed through to a guest, re-freeze them: + */ + if (!include_passed) + eeh_pe_refreeze_passed(pe); + + eeh_pe_state_clear(pe, reset_state, true); return ret; } @@ -1309,7 +1344,7 @@ void eeh_remove_device(struct pci_dev *dev) edev->mode &= ~EEH_DEV_SYSFS; } -int eeh_unfreeze_pe(struct eeh_pe *pe, bool sw_state) +int eeh_unfreeze_pe(struct eeh_pe *pe) { int ret; @@ -1327,10 +1362,6 @@ int eeh_unfreeze_pe(struct eeh_pe *pe, bool sw_state) return ret; } - /* Clear software isolated state */ - if (sw_state && (pe->state & EEH_PE_ISOLATED)) - eeh_pe_state_clear(pe, EEH_PE_ISOLATED); - return ret; } @@ -1382,7 +1413,10 @@ static int eeh_pe_change_owner(struct eeh_pe *pe) } } - return eeh_unfreeze_pe(pe, true); + ret = eeh_unfreeze_pe(pe); + if (!ret) + eeh_pe_state_clear(pe, EEH_PE_ISOLATED, true); + return ret; } /** @@ -1612,13 +1646,12 @@ int eeh_pe_get_state(struct eeh_pe *pe) } EXPORT_SYMBOL_GPL(eeh_pe_get_state); -static int eeh_pe_reenable_devices(struct eeh_pe *pe) +static int eeh_pe_reenable_devices(struct eeh_pe *pe, bool include_passed) { struct eeh_dev *edev, *tmp; struct pci_dev *pdev; int ret = 0; - /* Restore config space */ eeh_pe_restore_bars(pe); /* @@ -1639,7 +1672,14 @@ static int eeh_pe_reenable_devices(struct eeh_pe *pe) } /* The PE is still in frozen state */ - return eeh_unfreeze_pe(pe, true); + if (include_passed || !eeh_pe_passed(pe)) { + ret = eeh_unfreeze_pe(pe); + } else + pr_info("EEH: Note: Leaving passthrough PHB#%x-PE#%x frozen.\n", + pe->phb->global_number, pe->addr); + if (!ret) + eeh_pe_state_clear(pe, EEH_PE_ISOLATED, include_passed); + return ret; } @@ -1652,7 +1692,7 @@ static int eeh_pe_reenable_devices(struct eeh_pe *pe) * indicated type, either fundamental reset or hot reset. * PE reset is the most important part for error recovery. */ -int eeh_pe_reset(struct eeh_pe *pe, int option) +int eeh_pe_reset(struct eeh_pe *pe, int option, bool include_passed) { int ret = 0; @@ -1666,11 +1706,11 @@ int eeh_pe_reset(struct eeh_pe *pe, int option) switch (option) { case EEH_RESET_DEACTIVATE: ret = eeh_ops->reset(pe, option); - eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); + eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED, include_passed); if (ret) break; - ret = eeh_pe_reenable_devices(pe); + ret = eeh_pe_reenable_devices(pe, include_passed); break; case EEH_RESET_HOT: case EEH_RESET_FUNDAMENTAL: @@ -1796,22 +1836,64 @@ static int eeh_enable_dbgfs_get(void *data, u64 *val) return 0; } -static int eeh_freeze_dbgfs_set(void *data, u64 val) -{ - eeh_max_freezes = val; - return 0; -} +DEFINE_DEBUGFS_ATTRIBUTE(eeh_enable_dbgfs_ops, eeh_enable_dbgfs_get, + eeh_enable_dbgfs_set, "0x%llx\n"); -static int eeh_freeze_dbgfs_get(void *data, u64 *val) +static ssize_t eeh_force_recover_write(struct file *filp, + const char __user *user_buf, + size_t count, loff_t *ppos) { - *val = eeh_max_freezes; - return 0; + struct pci_controller *hose; + uint32_t phbid, pe_no; + struct eeh_pe *pe; + char buf[20]; + int ret; + + ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count); + if (!ret) + return -EFAULT; + + /* + * When PE is NULL the event is a "special" event. Rather than + * recovering a specific PE it forces the EEH core to scan for failed + * PHBs and recovers each. This needs to be done before any device + * recoveries can occur. + */ + if (!strncmp(buf, "hwcheck", 7)) { + __eeh_send_failure_event(NULL); + return count; + } + + ret = sscanf(buf, "%x:%x", &phbid, &pe_no); + if (ret != 2) + return -EINVAL; + + hose = pci_find_controller_for_domain(phbid); + if (!hose) + return -ENODEV; + + /* Retrieve PE */ + pe = eeh_pe_get(hose, pe_no, 0); + if (!pe) + return -ENODEV; + + /* + * We don't do any state checking here since the detection + * process is async to the recovery process. The recovery + * thread *should* not break even if we schedule a recovery + * from an odd state (e.g. PE removed, or recovery of a + * non-isolated PE) + */ + __eeh_send_failure_event(pe); + + return ret < 0 ? ret : count; } -DEFINE_DEBUGFS_ATTRIBUTE(eeh_enable_dbgfs_ops, eeh_enable_dbgfs_get, - eeh_enable_dbgfs_set, "0x%llx\n"); -DEFINE_DEBUGFS_ATTRIBUTE(eeh_freeze_dbgfs_ops, eeh_freeze_dbgfs_get, - eeh_freeze_dbgfs_set, "0x%llx\n"); +static const struct file_operations eeh_force_recover_fops = { + .open = simple_open, + .llseek = no_llseek, + .write = eeh_force_recover_write, +}; #endif static int __init eeh_init_proc(void) @@ -1822,9 +1904,15 @@ static int __init eeh_init_proc(void) debugfs_create_file_unsafe("eeh_enable", 0600, powerpc_debugfs_root, NULL, &eeh_enable_dbgfs_ops); - debugfs_create_file_unsafe("eeh_max_freezes", 0600, - powerpc_debugfs_root, NULL, - &eeh_freeze_dbgfs_ops); + debugfs_create_u32("eeh_max_freezes", 0600, + powerpc_debugfs_root, &eeh_max_freezes); + debugfs_create_bool("eeh_disable_recovery", 0600, + powerpc_debugfs_root, + &eeh_debugfs_no_recover); + debugfs_create_file_unsafe("eeh_force_recover", 0600, + powerpc_debugfs_root, NULL, + &eeh_force_recover_fops); + eeh_cache_debugfs_init(); #endif } diff --git a/arch/powerpc/kernel/eeh_cache.c b/arch/powerpc/kernel/eeh_cache.c index 201943d54a6e..9c68f0837385 100644 --- a/arch/powerpc/kernel/eeh_cache.c +++ b/arch/powerpc/kernel/eeh_cache.c @@ -26,6 +26,7 @@ #include #include #include +#include #include @@ -113,7 +114,7 @@ static void eeh_addr_cache_print(struct pci_io_addr_cache *cache) while (n) { struct pci_io_addr_range *piar; piar = rb_entry(n, struct pci_io_addr_range, rb_node); - pr_debug("PCI: %s addr range %d [%pap-%pap]: %s\n", + pr_info("PCI: %s addr range %d [%pap-%pap]: %s\n", (piar->flags & IORESOURCE_IO) ? "i/o" : "mem", cnt, &piar->addr_lo, &piar->addr_hi, pci_name(piar->pcidev)); cnt++; @@ -157,10 +158,8 @@ eeh_addr_cache_insert(struct pci_dev *dev, resource_size_t alo, piar->pcidev = dev; piar->flags = flags; -#ifdef DEBUG pr_debug("PIAR: insert range=[%pap:%pap] dev=%s\n", &alo, &ahi, pci_name(dev)); -#endif rb_link_node(&piar->rb_node, parent, p); rb_insert_color(&piar->rb_node, &pci_io_addr_cache_root.rb_root); @@ -240,6 +239,8 @@ restart: piar = rb_entry(n, struct pci_io_addr_range, rb_node); if (piar->pcidev == dev) { + pr_debug("PIAR: remove range=[%pap:%pap] dev=%s\n", + &piar->addr_lo, &piar->addr_hi, pci_name(dev)); rb_erase(n, &pci_io_addr_cache_root.rb_root); kfree(piar); goto restart; @@ -298,9 +299,30 @@ void eeh_addr_cache_build(void) eeh_addr_cache_insert_dev(dev); eeh_sysfs_add_device(dev); } +} -#ifdef DEBUG - /* Verify tree built up above, echo back the list of addrs. */ - eeh_addr_cache_print(&pci_io_addr_cache_root); -#endif +static int eeh_addr_cache_show(struct seq_file *s, void *v) +{ + struct pci_io_addr_range *piar; + struct rb_node *n; + + spin_lock(&pci_io_addr_cache_root.piar_lock); + for (n = rb_first(&pci_io_addr_cache_root.rb_root); n; n = rb_next(n)) { + piar = rb_entry(n, struct pci_io_addr_range, rb_node); + + seq_printf(s, "%s addr range [%pap-%pap]: %s\n", + (piar->flags & IORESOURCE_IO) ? "i/o" : "mem", + &piar->addr_lo, &piar->addr_hi, pci_name(piar->pcidev)); + } + spin_unlock(&pci_io_addr_cache_root.piar_lock); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(eeh_addr_cache); + +void eeh_cache_debugfs_init(void) +{ + debugfs_create_file_unsafe("eeh_address_cache", 0400, + powerpc_debugfs_root, NULL, + &eeh_addr_cache_fops); } diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 99eab7bc7edc..89623962c727 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -510,22 +510,11 @@ static void *eeh_rmv_device(struct eeh_dev *edev, void *userdata) * support EEH. So we just care about PCI devices for * simplicity here. */ - if (!dev || (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)) - return NULL; - - /* - * We rely on count-based pcibios_release_device() to - * detach permanently offlined PEs. Unfortunately, that's - * not reliable enough. We might have the permanently - * offlined PEs attached, but we needn't take care of - * them and their child devices. - */ - if (eeh_dev_removed(edev)) + if (!eeh_edev_actionable(edev) || + (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)) return NULL; if (rmv_data) { - if (eeh_pe_passed(edev->pe)) - return NULL; driver = eeh_pcid_get(dev); if (driver) { if (driver->err_handler && @@ -539,8 +528,8 @@ static void *eeh_rmv_device(struct eeh_dev *edev, void *userdata) } /* Remove it from PCI subsystem */ - pr_debug("EEH: Removing %s without EEH sensitive driver\n", - pci_name(dev)); + pr_info("EEH: Removing %s without EEH sensitive driver\n", + pci_name(dev)); edev->mode |= EEH_DEV_DISCONNECTED; if (rmv_data) rmv_data->removed_dev_count++; @@ -591,34 +580,22 @@ static void *eeh_pe_detach_dev(struct eeh_pe *pe, void *userdata) * PE reset (for 3 times), we try to clear the frozen state * for 3 times as well. */ -static void *__eeh_clear_pe_frozen_state(struct eeh_pe *pe, void *flag) +static int eeh_clear_pe_frozen_state(struct eeh_pe *root, bool include_passed) { - bool clear_sw_state = *(bool *)flag; - int i, rc = 1; - - for (i = 0; rc && i < 3; i++) - rc = eeh_unfreeze_pe(pe, clear_sw_state); + struct eeh_pe *pe; + int i; - /* Stop immediately on any errors */ - if (rc) { - pr_warn("%s: Failure %d unfreezing PHB#%x-PE#%x\n", - __func__, rc, pe->phb->global_number, pe->addr); - return (void *)pe; + eeh_for_each_pe(root, pe) { + if (include_passed || !eeh_pe_passed(pe)) { + for (i = 0; i < 3; i++) + if (!eeh_unfreeze_pe(pe)) + break; + if (i >= 3) + return -EIO; + } } - - return NULL; -} - -static int eeh_clear_pe_frozen_state(struct eeh_pe *pe, - bool clear_sw_state) -{ - void *rc; - - rc = eeh_pe_traverse(pe, __eeh_clear_pe_frozen_state, &clear_sw_state); - if (!rc) - eeh_pe_state_clear(pe, EEH_PE_ISOLATED); - - return rc ? -EIO : 0; + eeh_pe_state_clear(root, EEH_PE_ISOLATED, include_passed); + return 0; } int eeh_pe_reset_and_recover(struct eeh_pe *pe) @@ -636,16 +613,16 @@ int eeh_pe_reset_and_recover(struct eeh_pe *pe) eeh_pe_dev_traverse(pe, eeh_dev_save_state, NULL); /* Issue reset */ - ret = eeh_pe_reset_full(pe); + ret = eeh_pe_reset_full(pe, true); if (ret) { - eeh_pe_state_clear(pe, EEH_PE_RECOVERING); + eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true); return ret; } /* Unfreeze the PE */ ret = eeh_clear_pe_frozen_state(pe, true); if (ret) { - eeh_pe_state_clear(pe, EEH_PE_RECOVERING); + eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true); return ret; } @@ -653,7 +630,7 @@ int eeh_pe_reset_and_recover(struct eeh_pe *pe) eeh_pe_dev_traverse(pe, eeh_dev_restore_state, NULL); /* Clear recovery mode */ - eeh_pe_state_clear(pe, EEH_PE_RECOVERING); + eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true); return 0; } @@ -676,6 +653,11 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus, time64_t tstamp; int cnt, rc; struct eeh_dev *edev; + struct eeh_pe *tmp_pe; + bool any_passed = false; + + eeh_for_each_pe(pe, tmp_pe) + any_passed |= eeh_pe_passed(tmp_pe); /* pcibios will clear the counter; save the value */ cnt = pe->freeze_count; @@ -688,7 +670,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus, * into pci_hp_add_devices(). */ eeh_pe_state_mark(pe, EEH_PE_KEEP); - if (driver_eeh_aware || (pe->type & EEH_PE_VF)) { + if (any_passed || driver_eeh_aware || (pe->type & EEH_PE_VF)) { eeh_pe_dev_traverse(pe, eeh_rmv_device, rmv_data); } else { pci_lock_rescan_remove(); @@ -705,7 +687,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus, * config accesses. So we prefer to block them. However, controlled * PCI config accesses initiated from EEH itself are allowed. */ - rc = eeh_pe_reset_full(pe); + rc = eeh_pe_reset_full(pe, false); if (rc) return rc; @@ -744,11 +726,11 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus, eeh_add_virt_device(edev); } else { if (!driver_eeh_aware) - eeh_pe_state_clear(pe, EEH_PE_PRI_BUS); + eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true); pci_hp_add_devices(bus); } } - eeh_pe_state_clear(pe, EEH_PE_KEEP); + eeh_pe_state_clear(pe, EEH_PE_KEEP, true); pe->tstamp = tstamp; pe->freeze_count = cnt; @@ -900,7 +882,7 @@ void eeh_handle_normal_event(struct eeh_pe *pe) * is still in frozen state. Clear it before * resuming the PE. */ - eeh_pe_state_clear(pe, EEH_PE_ISOLATED); + eeh_pe_state_clear(pe, EEH_PE_ISOLATED, true); result = PCI_ERS_RESULT_RECOVERED; } } @@ -977,7 +959,7 @@ void eeh_handle_normal_event(struct eeh_pe *pe) eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL); eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED); } else { - eeh_pe_state_clear(pe, EEH_PE_PRI_BUS); + eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true); eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED); pci_lock_rescan_remove(); @@ -987,7 +969,7 @@ void eeh_handle_normal_event(struct eeh_pe *pe) return; } } - eeh_pe_state_clear(pe, EEH_PE_RECOVERING); + eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true); } /** @@ -1069,7 +1051,7 @@ void eeh_handle_special_event(void) continue; /* Notify all devices to be down */ - eeh_pe_state_clear(pe, EEH_PE_PRI_BUS); + eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true); eeh_set_channel_state(pe, pci_channel_io_perm_failure); eeh_pe_report( "error_detected(permanent failure)", pe, diff --git a/arch/powerpc/kernel/eeh_event.c b/arch/powerpc/kernel/eeh_event.c index 227e57f980df..539aca055d70 100644 --- a/arch/powerpc/kernel/eeh_event.c +++ b/arch/powerpc/kernel/eeh_event.c @@ -121,7 +121,7 @@ int eeh_event_init(void) * the actual event will be delivered in a normal context * (from a workqueue). */ -int eeh_send_failure_event(struct eeh_pe *pe) +int __eeh_send_failure_event(struct eeh_pe *pe) { unsigned long flags; struct eeh_event *event; @@ -144,6 +144,20 @@ int eeh_send_failure_event(struct eeh_pe *pe) return 0; } +int eeh_send_failure_event(struct eeh_pe *pe) +{ + /* + * If we've manually supressed recovery events via debugfs + * then just drop it on the floor. + */ + if (eeh_debugfs_no_recover) { + pr_err("EEH: Event dropped due to no_recover setting\n"); + return 0; + } + + return __eeh_send_failure_event(pe); +} + /** * eeh_remove_event - Remove EEH event from the queue * @pe: Event binding to the PE diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c index 6fa2032e0594..8b578891f27c 100644 --- a/arch/powerpc/kernel/eeh_pe.c +++ b/arch/powerpc/kernel/eeh_pe.c @@ -657,62 +657,52 @@ void eeh_pe_dev_mode_mark(struct eeh_pe *pe, int mode) } /** - * __eeh_pe_state_clear - Clear state for the PE + * eeh_pe_state_clear - Clear state for the PE * @data: EEH PE - * @flag: state + * @state: state + * @include_passed: include passed-through devices? * * The function is used to clear the indicated state from the * given PE. Besides, we also clear the check count of the PE * as well. */ -static void *__eeh_pe_state_clear(struct eeh_pe *pe, void *flag) +void eeh_pe_state_clear(struct eeh_pe *root, int state, bool include_passed) { - int state = *((int *)flag); + struct eeh_pe *pe; struct eeh_dev *edev, *tmp; struct pci_dev *pdev; - /* Keep the state of permanently removed PE intact */ - if (pe->state & EEH_PE_REMOVED) - return NULL; + eeh_for_each_pe(root, pe) { + /* Keep the state of permanently removed PE intact */ + if (pe->state & EEH_PE_REMOVED) + continue; - pe->state &= ~state; + if (!include_passed && eeh_pe_passed(pe)) + continue; - /* - * Special treatment on clearing isolated state. Clear - * check count since last isolation and put all affected - * devices to normal state. - */ - if (!(state & EEH_PE_ISOLATED)) - return NULL; + pe->state &= ~state; - pe->check_count = 0; - eeh_pe_for_each_dev(pe, edev, tmp) { - pdev = eeh_dev_to_pci_dev(edev); - if (!pdev) + /* + * Special treatment on clearing isolated state. Clear + * check count since last isolation and put all affected + * devices to normal state. + */ + if (!(state & EEH_PE_ISOLATED)) continue; - pdev->error_state = pci_channel_io_normal; - } - - /* Unblock PCI config access if required */ - if (pe->state & EEH_PE_CFG_RESTRICTED) - pe->state &= ~EEH_PE_CFG_BLOCKED; + pe->check_count = 0; + eeh_pe_for_each_dev(pe, edev, tmp) { + pdev = eeh_dev_to_pci_dev(edev); + if (!pdev) + continue; - return NULL; -} + pdev->error_state = pci_channel_io_normal; + } -/** - * eeh_pe_state_clear - Clear state for the PE and its children - * @pe: PE - * @state: state to be cleared - * - * When the PE and its children has been recovered from error, - * we need clear the error state for that. The function is used - * for the purpose. - */ -void eeh_pe_state_clear(struct eeh_pe *pe, int state) -{ - eeh_pe_traverse(pe, __eeh_pe_state_clear, &state); + /* Unblock PCI config access if required */ + if (pe->state & EEH_PE_CFG_RESTRICTED) + pe->state &= ~EEH_PE_CFG_BLOCKED; + } } /* diff --git a/arch/powerpc/kernel/eeh_sysfs.c b/arch/powerpc/kernel/eeh_sysfs.c index deed906dd8f1..3fa04dda1737 100644 --- a/arch/powerpc/kernel/eeh_sysfs.c +++ b/arch/powerpc/kernel/eeh_sysfs.c @@ -82,8 +82,9 @@ static ssize_t eeh_pe_state_store(struct device *dev, if (!(edev->pe->state & EEH_PE_ISOLATED)) return count; - if (eeh_unfreeze_pe(edev->pe, true)) + if (eeh_unfreeze_pe(edev->pe)) return -EIO; + eeh_pe_state_clear(edev->pe, EEH_PE_ISOLATED, true); return count; } diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 0768dfd8a64e..b61cfd29c76f 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -97,14 +97,11 @@ crit_transfer_to_handler: mfspr r0,SPRN_SRR1 stw r0,_SRR1(r11) - /* set the stack limit to the current stack - * and set the limit to protect the thread_info - * struct - */ + /* set the stack limit to the current stack */ mfspr r8,SPRN_SPRG_THREAD lwz r0,KSP_LIMIT(r8) stw r0,SAVED_KSP_LIMIT(r11) - rlwimi r0,r1,0,0,(31-THREAD_SHIFT) + rlwinm r0,r1,0,0,(31 - THREAD_SHIFT) stw r0,KSP_LIMIT(r8) /* fall through */ #endif @@ -121,14 +118,11 @@ crit_transfer_to_handler: mfspr r0,SPRN_SRR1 stw r0,crit_srr1@l(0) - /* set the stack limit to the current stack - * and set the limit to protect the thread_info - * struct - */ + /* set the stack limit to the current stack */ mfspr r8,SPRN_SPRG_THREAD lwz r0,KSP_LIMIT(r8) stw r0,saved_ksp_limit@l(0) - rlwimi r0,r1,0,0,(31-THREAD_SHIFT) + rlwinm r0,r1,0,0,(31 - THREAD_SHIFT) stw r0,KSP_LIMIT(r8) /* fall through */ #endif @@ -157,7 +151,6 @@ transfer_to_handler: stw r2,_XER(r11) mfspr r12,SPRN_SPRG_THREAD addi r2,r12,-THREAD - tovirt(r2,r2) /* set r2 to current */ beq 2f /* if from user, fix up THREAD.regs */ addi r11,r1,STACK_FRAME_OVERHEAD stw r11,PT_REGS(r12) @@ -166,6 +159,9 @@ transfer_to_handler: internal debug mode bit to do this. */ lwz r12,THREAD_DBCR0(r12) andis. r12,r12,DBCR0_IDM@h +#endif + ACCOUNT_CPU_USER_ENTRY(r2, r11, r12) +#if defined(CONFIG_40x) || defined(CONFIG_BOOKE) beq+ 3f /* From user and task is ptraced - load up global dbcr0 */ li r12,-1 /* clear all pending debug events */ @@ -174,8 +170,7 @@ transfer_to_handler: tophys(r11,r11) addi r11,r11,global_dbcr0@l #ifdef CONFIG_SMP - CURRENT_THREAD_INFO(r9, r1) - lwz r9,TI_CPU(r9) + lwz r9,TASK_CPU(r2) slwi r9,r9,3 add r11,r11,r9 #endif @@ -185,11 +180,6 @@ transfer_to_handler: addi r12,r12,-1 stw r12,4(r11) #endif -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE - CURRENT_THREAD_INFO(r9, r1) - tophys(r9, r9) - ACCOUNT_CPU_USER_ENTRY(r9, r11, r12) -#endif b 3f @@ -201,9 +191,7 @@ transfer_to_handler: ble- stack_ovf /* then the kernel stack overflowed */ 5: #if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500) - CURRENT_THREAD_INFO(r9, r1) - tophys(r9,r9) /* check local flags */ - lwz r12,TI_LOCAL_FLAGS(r9) + lwz r12,TI_LOCAL_FLAGS(r2) mtcrf 0x01,r12 bt- 31-TLF_NAPPING,4f bt- 31-TLF_SLEEPING,7f @@ -212,6 +200,7 @@ transfer_to_handler: transfer_to_handler_cont: 3: mflr r9 + tovirt(r2, r2) /* set r2 to current */ lwz r11,0(r9) /* virtual address of handler */ lwz r9,4(r9) /* where to go when done */ #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS) @@ -275,11 +264,11 @@ reenable_mmu: /* re-enable mmu so we can */ #if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500) 4: rlwinm r12,r12,0,~_TLF_NAPPING - stw r12,TI_LOCAL_FLAGS(r9) + stw r12,TI_LOCAL_FLAGS(r2) b power_save_ppc32_restore 7: rlwinm r12,r12,0,~_TLF_SLEEPING - stw r12,TI_LOCAL_FLAGS(r9) + stw r12,TI_LOCAL_FLAGS(r2) lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */ rlwinm r9,r9,0,~MSR_EE lwz r12,_LINK(r11) /* and return to address in LR */ @@ -351,8 +340,7 @@ _GLOBAL(DoSyscall) mtmsr r11 1: #endif /* CONFIG_TRACE_IRQFLAGS */ - CURRENT_THREAD_INFO(r10, r1) - lwz r11,TI_FLAGS(r10) + lwz r11,TI_FLAGS(r2) andi. r11,r11,_TIF_SYSCALL_DOTRACE bne- syscall_dotrace syscall_dotrace_cont: @@ -385,13 +373,12 @@ ret_from_syscall: lwz r3,GPR3(r1) #endif mr r6,r3 - CURRENT_THREAD_INFO(r12, r1) /* disable interrupts so current_thread_info()->flags can't change */ LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */ /* Note: We don't bother telling lockdep about it */ SYNC MTMSRD(r10) - lwz r9,TI_FLAGS(r12) + lwz r9,TI_FLAGS(r2) li r8,-MAX_ERRNO andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK) bne- syscall_exit_work @@ -438,8 +425,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE andi. r4,r8,MSR_PR beq 3f - CURRENT_THREAD_INFO(r4, r1) - ACCOUNT_CPU_USER_EXIT(r4, r5, r7) + ACCOUNT_CPU_USER_EXIT(r2, r5, r7) 3: #endif lwz r4,_LINK(r1) @@ -532,7 +518,7 @@ syscall_exit_work: /* Clear per-syscall TIF flags if any are set. */ li r11,_TIF_PERSYSCALL_MASK - addi r12,r12,TI_FLAGS + addi r12,r2,TI_FLAGS 3: lwarx r8,0,r12 andc r8,r8,r11 #ifdef CONFIG_IBM405_ERR77 @@ -540,7 +526,6 @@ syscall_exit_work: #endif stwcx. r8,0,r12 bne- 3b - subi r12,r12,TI_FLAGS 4: /* Anything which requires enabling interrupts? */ andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP) @@ -745,6 +730,9 @@ fast_exception_return: mtcr r10 lwz r10,_LINK(r11) mtlr r10 + /* Clear the exception_marker on the stack to avoid confusing stacktrace */ + li r10, 0 + stw r10, 8(r11) REST_GPR(10, r11) #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS) mtspr SPRN_NRI, r0 @@ -819,8 +807,7 @@ ret_from_except: user_exc_return: /* r10 contains MSR_KERNEL here */ /* Check current_thread_info()->flags */ - CURRENT_THREAD_INFO(r9, r1) - lwz r9,TI_FLAGS(r9) + lwz r9,TI_FLAGS(r2) andi. r0,r9,_TIF_USER_WORK_MASK bne do_work @@ -832,18 +819,14 @@ restore_user: andis. r10,r0,DBCR0_IDM@h bnel- load_dbcr0 #endif -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE - CURRENT_THREAD_INFO(r9, r1) - ACCOUNT_CPU_USER_EXIT(r9, r10, r11) -#endif + ACCOUNT_CPU_USER_EXIT(r2, r10, r11) b restore /* N.B. the only way to get here is from the beq following ret_from_except. */ resume_kernel: /* check current_thread_info, _TIF_EMULATE_STACK_STORE */ - CURRENT_THREAD_INFO(r9, r1) - lwz r8,TI_FLAGS(r9) + lwz r8,TI_FLAGS(r2) andis. r0,r8,_TIF_EMULATE_STACK_STORE@h beq+ 1f @@ -869,7 +852,7 @@ resume_kernel: /* Clear _TIF_EMULATE_STACK_STORE flag */ lis r11,_TIF_EMULATE_STACK_STORE@h - addi r5,r9,TI_FLAGS + addi r5,r2,TI_FLAGS 0: lwarx r8,0,r5 andc r8,r8,r11 #ifdef CONFIG_IBM405_ERR77 @@ -881,7 +864,7 @@ resume_kernel: #ifdef CONFIG_PREEMPT /* check current_thread_info->preempt_count */ - lwz r0,TI_PREEMPT(r9) + lwz r0,TI_PREEMPT(r2) cmpwi 0,r0,0 /* if non-zero, just restore regs and return */ bne restore andi. r8,r8,_TIF_NEED_RESCHED @@ -897,8 +880,7 @@ resume_kernel: bl trace_hardirqs_off #endif 1: bl preempt_schedule_irq - CURRENT_THREAD_INFO(r9, r1) - lwz r3,TI_FLAGS(r9) + lwz r3,TI_FLAGS(r2) andi. r0,r3,_TIF_NEED_RESCHED bne- 1b #ifdef CONFIG_TRACE_IRQFLAGS @@ -982,6 +964,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) mtcrf 0xFF,r10 mtlr r11 + /* Clear the exception_marker on the stack to avoid confusing stacktrace */ + li r10, 0 + stw r10, 8(r1) /* * Once we put values in SRR0 and SRR1, we are in a state * where exceptions are not recoverable, since taking an @@ -997,9 +982,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) .globl exc_exit_restart exc_exit_restart: lwz r12,_NIP(r1) -#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS) - mtspr SPRN_NRI, r0 -#endif mtspr SPRN_SRR0,r12 mtspr SPRN_SRR1,r9 REST_4GPRS(9, r1) @@ -1021,6 +1003,9 @@ exc_exit_restart_end: mtlr r11 lwz r10,_CCR(r1) mtcrf 0xff,r10 + /* Clear the exception_marker on the stack to avoid confusing stacktrace */ + li r10, 0 + stw r10, 8(r1) REST_2GPRS(9, r1) .globl exc_exit_restart exc_exit_restart: @@ -1166,10 +1151,6 @@ ret_from_debug_exc: mfspr r9,SPRN_SPRG_THREAD lwz r10,SAVED_KSP_LIMIT(r1) stw r10,KSP_LIMIT(r9) - lwz r9,THREAD_INFO-THREAD(r9) - CURRENT_THREAD_INFO(r10, r1) - lwz r10,TI_PREEMPT(r10) - stw r10,TI_PREEMPT(r9) RESTORE_xSRR(SRR0,SRR1); RESTORE_xSRR(CSRR0,CSRR1); RESTORE_MMU_REGS; @@ -1201,8 +1182,7 @@ load_dbcr0: lis r11,global_dbcr0@ha addi r11,r11,global_dbcr0@l #ifdef CONFIG_SMP - CURRENT_THREAD_INFO(r9, r1) - lwz r9,TI_CPU(r9) + lwz r9,TASK_CPU(r2) slwi r9,r9,3 add r11,r11,r9 #endif @@ -1242,8 +1222,7 @@ recheck: LOAD_MSR_KERNEL(r10,MSR_KERNEL) SYNC MTMSRD(r10) /* disable interrupts */ - CURRENT_THREAD_INFO(r9, r1) - lwz r9,TI_FLAGS(r9) + lwz r9,TI_FLAGS(r2) andi. r0,r9,_TIF_NEED_RESCHED bne- do_resched andi. r0,r9,_TIF_USER_WORK_MASK @@ -1292,10 +1271,13 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFSET(CPU_FTR_601) lwz r3,_TRAP(r1) andi. r0,r3,1 - beq 4f + beq 5f SAVE_NVGPRS(r1) rlwinm r3,r3,0,0,30 stw r3,_TRAP(r1) +5: mfspr r2,SPRN_SPRG_THREAD + addi r2,r2,-THREAD + tovirt(r2,r2) /* set back r2 to current */ 4: addi r3,r1,STACK_FRAME_OVERHEAD bl unrecoverable_exception /* shouldn't return */ @@ -1335,7 +1317,7 @@ _GLOBAL(enter_rtas) MTMSRD(r0) /* don't get trashed */ li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR) mtlr r6 - mtspr SPRN_SPRG_RTAS,r7 + stw r7, THREAD + RTAS_SP(r2) mtspr SPRN_SRR0,r8 mtspr SPRN_SRR1,r9 RFI @@ -1344,7 +1326,8 @@ _GLOBAL(enter_rtas) lwz r9,8(r9) /* original msr value */ addi r1,r1,INT_FRAME_SIZE li r0,0 - mtspr SPRN_SPRG_RTAS,r0 + tophys(r7, r2) + stw r0, THREAD + RTAS_SP(r7) mtspr SPRN_SRR0,r8 mtspr SPRN_SRR1,r9 RFI /* return to caller */ diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 435927f549c4..15c67d2c0534 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -166,7 +166,7 @@ system_call: /* label this so stack traces look sane */ li r10,IRQS_ENABLED std r10,SOFTE(r1) - CURRENT_THREAD_INFO(r11, r1) + ld r11, PACA_THREAD_INFO(r13) ld r10,TI_FLAGS(r11) andi. r11,r10,_TIF_SYSCALL_DOTRACE bne .Lsyscall_dotrace /* does not return */ @@ -213,7 +213,7 @@ system_call: /* label this so stack traces look sane */ ld r3,RESULT(r1) #endif - CURRENT_THREAD_INFO(r12, r1) + ld r12, PACA_THREAD_INFO(r13) ld r8,_MSR(r1) #ifdef CONFIG_PPC_BOOK3S @@ -236,18 +236,14 @@ system_call_exit: /* * Disable interrupts so current_thread_info()->flags can't change, * and so that we don't get interrupted after loading SRR0/1. + * + * Leave MSR_RI enabled for now, because with THREAD_INFO_IN_TASK we + * could fault on the load of the TI_FLAGS below. */ #ifdef CONFIG_PPC_BOOK3E wrteei 0 #else - /* - * For performance reasons we clear RI the same time that we - * clear EE. We only need to clear RI just before we restore r13 - * below, but batching it with EE saves us one expensive mtmsrd call. - * We have to be careful to restore RI if we branch anywhere from - * here (eg syscall_exit_work). - */ - li r11,0 + li r11,MSR_RI mtmsrd r11,1 #endif /* CONFIG_PPC_BOOK3E */ @@ -263,15 +259,7 @@ system_call_exit: bne 3f #endif 2: addi r3,r1,STACK_FRAME_OVERHEAD -#ifdef CONFIG_PPC_BOOK3S - li r10,MSR_RI - mtmsrd r10,1 /* Restore RI */ -#endif bl restore_math -#ifdef CONFIG_PPC_BOOK3S - li r11,0 - mtmsrd r11,1 -#endif ld r8,_MSR(r1) ld r3,RESULT(r1) li r11,-MAX_ERRNO @@ -287,6 +275,16 @@ END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) andi. r6,r8,MSR_PR ld r4,_LINK(r1) +#ifdef CONFIG_PPC_BOOK3S + /* + * Clear MSR_RI, MSR_EE is already and remains disabled. We could do + * this later, but testing shows that doing it here causes less slow + * down than doing it closer to the rfid. + */ + li r11,0 + mtmsrd r11,1 +#endif + beq- 1f ACCOUNT_CPU_USER_EXIT(r13, r11, r12) @@ -348,7 +346,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) /* Repopulate r9 and r10 for the syscall path */ addi r9,r1,STACK_FRAME_OVERHEAD - CURRENT_THREAD_INFO(r10, r1) + ld r10, PACA_THREAD_INFO(r13) ld r10,TI_FLAGS(r10) cmpldi r0,NR_syscalls @@ -363,10 +361,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) b .Lsyscall_exit .Lsyscall_exit_work: -#ifdef CONFIG_PPC_BOOK3S - li r10,MSR_RI - mtmsrd r10,1 /* Restore RI */ -#endif /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr. If TIF_NOERROR is set, just save r3 as it is. */ @@ -695,7 +689,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 2: #endif /* CONFIG_PPC_BOOK3S_64 */ - CURRENT_THREAD_INFO(r7, r8) /* base of new stack */ + clrrdi r7, r8, THREAD_SHIFT /* base of new stack */ /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE because we don't need to leave the 288-byte ABI gap at the top of the kernel stack. */ @@ -746,7 +740,7 @@ _GLOBAL(ret_from_except_lite) mtmsrd r10,1 /* Update machine state */ #endif /* CONFIG_PPC_BOOK3E */ - CURRENT_THREAD_INFO(r9, r1) + ld r9, PACA_THREAD_INFO(r13) ld r3,_MSR(r1) #ifdef CONFIG_PPC_BOOK3E ld r10,PACACURRENT(r13) @@ -860,7 +854,7 @@ resume_kernel: 1: bl preempt_schedule_irq /* Re-test flags and eventually loop */ - CURRENT_THREAD_INFO(r9, r1) + ld r9, PACA_THREAD_INFO(r13) ld r4,TI_FLAGS(r9) andi. r0,r4,_TIF_NEED_RESCHED bne 1b @@ -1002,6 +996,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) ld r2,_NIP(r1) mtspr SPRN_SRR0,r2 + /* + * Leaving a stale exception_marker on the stack can confuse + * the reliable stack unwinder later on. Clear it. + */ + li r2,0 + std r2,STACK_FRAME_OVERHEAD-16(r1) + ld r0,GPR0(r1) ld r2,GPR2(r1) ld r3,GPR3(r1) diff --git a/arch/powerpc/kernel/epapr_hcalls.S b/arch/powerpc/kernel/epapr_hcalls.S index 52ca2471ee1a..d252f4663a23 100644 --- a/arch/powerpc/kernel/epapr_hcalls.S +++ b/arch/powerpc/kernel/epapr_hcalls.S @@ -21,10 +21,9 @@ #ifndef CONFIG_PPC64 /* epapr_ev_idle() was derived from e500_idle() */ _GLOBAL(epapr_ev_idle) - CURRENT_THREAD_INFO(r3, r1) - PPC_LL r4, TI_LOCAL_FLAGS(r3) /* set napping bit */ + PPC_LL r4, TI_LOCAL_FLAGS(r2) /* set napping bit */ ori r4, r4,_TLF_NAPPING /* so when we take an exception */ - PPC_STL r4, TI_LOCAL_FLAGS(r3) /* it will return to our caller */ + PPC_STL r4, TI_LOCAL_FLAGS(r2) /* it will return to our caller */ wrteei 1 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index afb638778f44..49381f32b374 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -77,17 +77,6 @@ special_reg_save: andi. r3,r3,MSR_PR bnelr - /* Copy info into temporary exception thread info */ - ld r11,PACAKSAVE(r13) - CURRENT_THREAD_INFO(r11, r11) - CURRENT_THREAD_INFO(r12, r1) - ld r10,TI_FLAGS(r11) - std r10,TI_FLAGS(r12) - ld r10,TI_PREEMPT(r11) - std r10,TI_PREEMPT(r12) - ld r10,TI_TASK(r11) - std r10,TI_TASK(r12) - /* * Advance to the next TLB exception frame for handler * types that don't do it automatically. @@ -349,6 +338,7 @@ ret_from_mc_except: #define GEN_BTB_FLUSH #define CRIT_BTB_FLUSH #define DBG_BTB_FLUSH +#define MC_BTB_FLUSH #define GDBELL_BTB_FLUSH #endif @@ -504,7 +494,7 @@ exc_##n##_bad_stack: \ * interrupts happen before the wait instruction. */ #define CHECK_NAPPING() \ - CURRENT_THREAD_INFO(r11, r1); \ + ld r11, PACA_THREAD_INFO(r13); \ ld r10,TI_LOCAL_FLAGS(r11); \ andi. r9,r10,_TLF_NAPPING; \ beq+ 1f; \ diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 9e253ce27e08..a5b8fbae56a0 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -68,6 +68,14 @@ OPEN_FIXED_SECTION(real_vectors, 0x0100, 0x1900) OPEN_FIXED_SECTION(real_trampolines, 0x1900, 0x4000) OPEN_FIXED_SECTION(virt_vectors, 0x4000, 0x5900) OPEN_FIXED_SECTION(virt_trampolines, 0x5900, 0x7000) + +#ifdef CONFIG_PPC_POWERNV + .globl start_real_trampolines + .globl end_real_trampolines + .globl start_virt_trampolines + .globl end_virt_trampolines +#endif + #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) /* * Data area reserved for FWNMI option. @@ -566,8 +574,36 @@ EXC_COMMON_BEGIN(mce_return) RFI_TO_KERNEL b . -EXC_REAL(data_access, 0x300, 0x80) -EXC_VIRT(data_access, 0x4300, 0x80, 0x300) +EXC_REAL_BEGIN(data_access, 0x300, 0x80) +SET_SCRATCH0(r13) /* save r13 */ +EXCEPTION_PROLOG_0(PACA_EXGEN) + b tramp_real_data_access +EXC_REAL_END(data_access, 0x300, 0x80) + +TRAMP_REAL_BEGIN(tramp_real_data_access) +EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, 0x300) + /* + * DAR/DSISR must be read before setting MSR[RI], because + * a d-side MCE will clobber those registers so is not + * recoverable if they are live. + */ + mfspr r10,SPRN_DAR + mfspr r11,SPRN_DSISR + std r10,PACA_EXGEN+EX_DAR(r13) + stw r11,PACA_EXGEN+EX_DSISR(r13) +EXCEPTION_PROLOG_2(data_access_common, EXC_STD) + +EXC_VIRT_BEGIN(data_access, 0x4300, 0x80) +SET_SCRATCH0(r13) /* save r13 */ +EXCEPTION_PROLOG_0(PACA_EXGEN) +EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x300) + mfspr r10,SPRN_DAR + mfspr r11,SPRN_DSISR + std r10,PACA_EXGEN+EX_DAR(r13) + stw r11,PACA_EXGEN+EX_DSISR(r13) +EXCEPTION_PROLOG_2_RELON(data_access_common, EXC_STD) +EXC_VIRT_END(data_access, 0x4300, 0x80) + TRAMP_KVM_SKIP(PACA_EXGEN, 0x300) EXC_COMMON_BEGIN(data_access_common) @@ -575,11 +611,8 @@ EXC_COMMON_BEGIN(data_access_common) * Here r13 points to the paca, r9 contains the saved CR, * SRR0 and SRR1 are saved in r11 and r12, * r9 - r13 are saved in paca->exgen. + * EX_DAR and EX_DSISR have saved DAR/DSISR */ - mfspr r10,SPRN_DAR - std r10,PACA_EXGEN+EX_DAR(r13) - mfspr r10,SPRN_DSISR - stw r10,PACA_EXGEN+EX_DSISR(r13) EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) RECONCILE_IRQ_STATE(r10, r11) ld r12,_MSR(r1) @@ -596,18 +629,29 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX) EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80) -EXCEPTION_PROLOG(PACA_EXSLB, data_access_slb_common, EXC_STD, KVMTEST_PR, 0x380); +SET_SCRATCH0(r13) /* save r13 */ +EXCEPTION_PROLOG_0(PACA_EXSLB) + b tramp_real_data_access_slb EXC_REAL_END(data_access_slb, 0x380, 0x80) +TRAMP_REAL_BEGIN(tramp_real_data_access_slb) +EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x380) + mfspr r10,SPRN_DAR + std r10,PACA_EXSLB+EX_DAR(r13) +EXCEPTION_PROLOG_2(data_access_slb_common, EXC_STD) + EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80) -EXCEPTION_RELON_PROLOG(PACA_EXSLB, data_access_slb_common, EXC_STD, NOTEST, 0x380); +SET_SCRATCH0(r13) /* save r13 */ +EXCEPTION_PROLOG_0(PACA_EXSLB) +EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380) + mfspr r10,SPRN_DAR + std r10,PACA_EXSLB+EX_DAR(r13) +EXCEPTION_PROLOG_2_RELON(data_access_slb_common, EXC_STD) EXC_VIRT_END(data_access_slb, 0x4380, 0x80) TRAMP_KVM_SKIP(PACA_EXSLB, 0x380) EXC_COMMON_BEGIN(data_access_slb_common) - mfspr r10,SPRN_DAR - std r10,PACA_EXSLB+EX_DAR(r13) EXCEPTION_PROLOG_COMMON(0x380, PACA_EXSLB) ld r4,PACA_EXSLB+EX_DAR(r13) std r4,_DAR(r1) @@ -703,14 +747,30 @@ TRAMP_KVM_HV(PACA_EXGEN, 0x500) EXC_COMMON_ASYNC(hardware_interrupt_common, 0x500, do_IRQ) -EXC_REAL(alignment, 0x600, 0x100) -EXC_VIRT(alignment, 0x4600, 0x100, 0x600) -TRAMP_KVM(PACA_EXGEN, 0x600) -EXC_COMMON_BEGIN(alignment_common) +EXC_REAL_BEGIN(alignment, 0x600, 0x100) +SET_SCRATCH0(r13) /* save r13 */ +EXCEPTION_PROLOG_0(PACA_EXGEN) +EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, 0x600) mfspr r10,SPRN_DAR + mfspr r11,SPRN_DSISR std r10,PACA_EXGEN+EX_DAR(r13) - mfspr r10,SPRN_DSISR - stw r10,PACA_EXGEN+EX_DSISR(r13) + stw r11,PACA_EXGEN+EX_DSISR(r13) +EXCEPTION_PROLOG_2(alignment_common, EXC_STD) +EXC_REAL_END(alignment, 0x600, 0x100) + +EXC_VIRT_BEGIN(alignment, 0x4600, 0x100) +SET_SCRATCH0(r13) /* save r13 */ +EXCEPTION_PROLOG_0(PACA_EXGEN) +EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x600) + mfspr r10,SPRN_DAR + mfspr r11,SPRN_DSISR + std r10,PACA_EXGEN+EX_DAR(r13) + stw r11,PACA_EXGEN+EX_DSISR(r13) +EXCEPTION_PROLOG_2_RELON(alignment_common, EXC_STD) +EXC_VIRT_END(alignment, 0x4600, 0x100) + +TRAMP_KVM(PACA_EXGEN, 0x600) +EXC_COMMON_BEGIN(alignment_common) EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN) ld r3,PACA_EXGEN+EX_DAR(r13) lwz r4,PACA_EXGEN+EX_DSISR(r13) @@ -1629,7 +1689,7 @@ do_hash_page: ori r0,r0,DSISR_BAD_FAULT_64S@l and. r0,r4,r0 /* weird error? */ bne- handle_page_fault /* if not, try to insert a HPTE */ - CURRENT_THREAD_INFO(r11, r1) + ld r11, PACA_THREAD_INFO(r13) lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ bne 77f /* then don't call hash_page now */ diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index 05b08db3901d..ce6a972f2584 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -261,7 +261,7 @@ __secondary_hold_acknowledge: tophys(r11,r1); /* use tophys(r1) if kernel */ \ beq 1f; \ mfspr r11,SPRN_SPRG_THREAD; \ - lwz r11,THREAD_INFO-THREAD(r11); \ + lwz r11,TASK_STACK-THREAD(r11); \ addi r11,r11,THREAD_SIZE; \ tophys(r11,r11); \ 1: subi r11,r11,INT_FRAME_SIZE /* alloc exc. frame */ @@ -352,9 +352,8 @@ i##n: \ * registers that might have bad values includes all the GPRs * and all the BATs. We indicate that we are in RTAS by putting * a non-zero value, the address of the exception frame to use, - * in SPRG2. The machine check handler checks SPRG2 and uses its - * value if it is non-zero. If we ever needed to free up SPRG2, - * we could use a field in the thread_info or thread_struct instead. + * in thread.rtas_sp. The machine check handler checks thread.rtas_sp + * and uses its value if it is non-zero. * (Other exception handlers assume that r1 is a valid kernel stack * pointer when we take an exception from supervisor mode.) * -- paulus. @@ -365,16 +364,15 @@ i##n: \ mtspr SPRN_SPRG_SCRATCH1,r11 mfcr r10 #ifdef CONFIG_PPC_CHRP - mfspr r11,SPRN_SPRG_RTAS - cmpwi 0,r11,0 - bne 7f + mfspr r11, SPRN_SPRG_THREAD + lwz r11, RTAS_SP(r11) + cmpwi cr1, r11, 0 + bne cr1, 7f #endif /* CONFIG_PPC_CHRP */ EXCEPTION_PROLOG_1 7: EXCEPTION_PROLOG_2 addi r3,r1,STACK_FRAME_OVERHEAD #ifdef CONFIG_PPC_CHRP - mfspr r4,SPRN_SPRG_RTAS - cmpwi cr1,r4,0 bne cr1,1f #endif EXC_XFER_STD(0x200, machine_check_exception) @@ -500,18 +498,22 @@ InstructionTLBMiss: */ /* Get PTE (linux-style) and check access */ mfspr r3,SPRN_IMISS +#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) lis r1,PAGE_OFFSET@h /* check if kernel address */ cmplw 0,r1,r3 - mfspr r2,SPRN_SPRG_THREAD - li r1,_PAGE_USER|_PAGE_PRESENT|_PAGE_EXEC /* low addresses tested as user */ - lwz r2,PGDIR(r2) +#endif + mfspr r2, SPRN_SPRG_PGDIR +#ifdef CONFIG_SWAP + li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC +#else + li r1,_PAGE_PRESENT | _PAGE_EXEC +#endif +#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) bge- 112f - mfspr r2,SPRN_SRR1 /* and MSR_PR bit from SRR1 */ - rlwimi r1,r2,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ - lis r2,swapper_pg_dir@ha /* if kernel address, use */ - addi r2,r2,swapper_pg_dir@l /* kernel page table */ -112: tophys(r2,r2) - rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ + lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ + addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ +#endif +112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ lwz r2,0(r2) /* get pmd entry */ rlwinm. r2,r2,0,0,19 /* extract address of pte page */ beq- InstructionAddressInvalid /* return if no mapping */ @@ -519,20 +521,10 @@ InstructionTLBMiss: lwz r0,0(r2) /* get linux-style pte */ andc. r1,r1,r0 /* check access & ~permission */ bne- InstructionAddressInvalid /* return if access not permitted */ - ori r0,r0,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */ - /* - * NOTE! We are assuming this is not an SMP system, otherwise - * we would need to update the pte atomically with lwarx/stwcx. - */ - stw r0,0(r2) /* update PTE (accessed bit) */ /* Convert linux-style PTE to low word of PPC-style PTE */ - rlwinm r1,r0,32-10,31,31 /* _PAGE_RW -> PP lsb */ - rlwinm r2,r0,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */ - and r1,r1,r2 /* writable if _RW and _DIRTY */ rlwimi r0,r0,32-1,30,30 /* _PAGE_USER -> PP msb */ - rlwimi r0,r0,32-1,31,31 /* _PAGE_USER -> PP lsb */ - ori r1,r1,0xe04 /* clear out reserved bits */ - andc r1,r0,r1 /* PP = user? (rw&dirty? 2: 3): 0 */ + ori r1, r1, 0xe05 /* clear out reserved bits */ + andc r1, r0, r1 /* PP = user? 2 : 0 */ BEGIN_FTR_SECTION rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) @@ -576,16 +568,16 @@ DataLoadTLBMiss: mfspr r3,SPRN_DMISS lis r1,PAGE_OFFSET@h /* check if kernel address */ cmplw 0,r1,r3 - mfspr r2,SPRN_SPRG_THREAD - li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */ - lwz r2,PGDIR(r2) + mfspr r2, SPRN_SPRG_PGDIR +#ifdef CONFIG_SWAP + li r1, _PAGE_PRESENT | _PAGE_ACCESSED +#else + li r1, _PAGE_PRESENT +#endif bge- 112f - mfspr r2,SPRN_SRR1 /* and MSR_PR bit from SRR1 */ - rlwimi r1,r2,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ - lis r2,swapper_pg_dir@ha /* if kernel address, use */ - addi r2,r2,swapper_pg_dir@l /* kernel page table */ -112: tophys(r2,r2) - rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ + lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ + addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ +112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ lwz r2,0(r2) /* get pmd entry */ rlwinm. r2,r2,0,0,19 /* extract address of pte page */ beq- DataAddressInvalid /* return if no mapping */ @@ -593,20 +585,16 @@ DataLoadTLBMiss: lwz r0,0(r2) /* get linux-style pte */ andc. r1,r1,r0 /* check access & ~permission */ bne- DataAddressInvalid /* return if access not permitted */ - ori r0,r0,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */ /* * NOTE! We are assuming this is not an SMP system, otherwise * we would need to update the pte atomically with lwarx/stwcx. */ - stw r0,0(r2) /* update PTE (accessed bit) */ /* Convert linux-style PTE to low word of PPC-style PTE */ rlwinm r1,r0,32-10,31,31 /* _PAGE_RW -> PP lsb */ - rlwinm r2,r0,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */ - and r1,r1,r2 /* writable if _RW and _DIRTY */ rlwimi r0,r0,32-1,30,30 /* _PAGE_USER -> PP msb */ rlwimi r0,r0,32-1,31,31 /* _PAGE_USER -> PP lsb */ ori r1,r1,0xe04 /* clear out reserved bits */ - andc r1,r0,r1 /* PP = user? (rw&dirty? 2: 3): 0 */ + andc r1,r0,r1 /* PP = user? rw? 2: 3: 0 */ BEGIN_FTR_SECTION rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) @@ -660,16 +648,16 @@ DataStoreTLBMiss: mfspr r3,SPRN_DMISS lis r1,PAGE_OFFSET@h /* check if kernel address */ cmplw 0,r1,r3 - mfspr r2,SPRN_SPRG_THREAD - li r1,_PAGE_RW|_PAGE_USER|_PAGE_PRESENT /* access flags */ - lwz r2,PGDIR(r2) + mfspr r2, SPRN_SPRG_PGDIR +#ifdef CONFIG_SWAP + li r1, _PAGE_RW | _PAGE_PRESENT | _PAGE_ACCESSED +#else + li r1, _PAGE_RW | _PAGE_PRESENT +#endif bge- 112f - mfspr r2,SPRN_SRR1 /* and MSR_PR bit from SRR1 */ - rlwimi r1,r2,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ - lis r2,swapper_pg_dir@ha /* if kernel address, use */ - addi r2,r2,swapper_pg_dir@l /* kernel page table */ -112: tophys(r2,r2) - rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ + lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ + addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ +112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ lwz r2,0(r2) /* get pmd entry */ rlwinm. r2,r2,0,0,19 /* extract address of pte page */ beq- DataAddressInvalid /* return if no mapping */ @@ -677,12 +665,10 @@ DataStoreTLBMiss: lwz r0,0(r2) /* get linux-style pte */ andc. r1,r1,r0 /* check access & ~permission */ bne- DataAddressInvalid /* return if access not permitted */ - ori r0,r0,_PAGE_ACCESSED|_PAGE_DIRTY /* * NOTE! We are assuming this is not an SMP system, otherwise * we would need to update the pte atomically with lwarx/stwcx. */ - stw r0,0(r2) /* update PTE (accessed/dirty bits) */ /* Convert linux-style PTE to low word of PPC-style PTE */ rlwimi r0,r0,32-1,30,30 /* _PAGE_USER -> PP msb */ li r1,0xe05 /* clear out reserved bits & PP lsb */ @@ -845,12 +831,12 @@ __secondary_start: bl init_idle_6xx #endif /* CONFIG_PPC_BOOK3S_32 */ - /* get current_thread_info and current */ - lis r1,secondary_ti@ha - tophys(r1,r1) - lwz r1,secondary_ti@l(r1) - tophys(r2,r1) - lwz r2,TI_TASK(r2) + /* get current's stack and current */ + lis r2,secondary_current@ha + tophys(r2,r2) + lwz r2,secondary_current@l(r2) + tophys(r1,r2) + lwz r1,TASK_STACK(r1) /* stack */ addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD @@ -865,8 +851,10 @@ __secondary_start: tophys(r4,r2) addi r4,r4,THREAD /* phys address of our thread_struct */ mtspr SPRN_SPRG_THREAD,r4 +#ifdef CONFIG_PPC_RTAS li r3,0 - mtspr SPRN_SPRG_RTAS,r3 /* 0 => not in RTAS */ + stw r3, RTAS_SP(r4) /* 0 => not in RTAS */ +#endif /* enable MMU and jump to start_secondary */ li r4,MSR_KERNEL @@ -950,8 +938,10 @@ start_here: tophys(r4,r2) addi r4,r4,THREAD /* init task's THREAD */ mtspr SPRN_SPRG_THREAD,r4 +#ifdef CONFIG_PPC_RTAS li r3,0 - mtspr SPRN_SPRG_RTAS,r3 /* 0 => not in RTAS */ + stw r3, RTAS_SP(r4) /* 0 => not in RTAS */ +#endif /* stack */ lis r1,init_thread_union@ha @@ -1022,15 +1012,16 @@ _ENTRY(switch_mmu_context) li r0,NUM_USER_SEGMENTS mtctr r0 + lwz r4, MM_PGD(r4) #ifdef CONFIG_BDI_SWITCH /* Context switch the PTE pointer for the Abatron BDI2000. * The PGDIR is passed as second argument. */ - lwz r4,MM_PGD(r4) - lis r5, KERNELBASE@h - lwz r5, 0xf0(r5) - stw r4, 0x4(r5) + lis r5, abatron_pteptrs@ha + stw r4, abatron_pteptrs@l + 0x4(r5) #endif + tophys(r4, r4) + mtspr SPRN_SPRG_PGDIR, r4 li r4,0 isync 3: @@ -1105,6 +1096,41 @@ BEGIN_MMU_FTR_SECTION END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) blr +_ENTRY(update_bats) + lis r4, 1f@h + ori r4, r4, 1f@l + tophys(r4, r4) + mfmsr r6 + mflr r7 + li r3, MSR_KERNEL & ~(MSR_IR | MSR_DR) + rlwinm r0, r6, 0, ~MSR_RI + rlwinm r0, r0, 0, ~MSR_EE + mtmsr r0 + mtspr SPRN_SRR0, r4 + mtspr SPRN_SRR1, r3 + SYNC + RFI +1: bl clear_bats + lis r3, BATS@ha + addi r3, r3, BATS@l + tophys(r3, r3) + LOAD_BAT(0, r3, r4, r5) + LOAD_BAT(1, r3, r4, r5) + LOAD_BAT(2, r3, r4, r5) + LOAD_BAT(3, r3, r4, r5) +BEGIN_MMU_FTR_SECTION + LOAD_BAT(4, r3, r4, r5) + LOAD_BAT(5, r3, r4, r5) + LOAD_BAT(6, r3, r4, r5) + LOAD_BAT(7, r3, r4, r5) +END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) + li r3, MSR_KERNEL & ~(MSR_IR | MSR_DR | MSR_RI) + mtmsr r3 + mtspr SPRN_SRR0, r7 + mtspr SPRN_SRR1, r6 + SYNC + RFI + flush_tlbs: lis r10, 0x40 1: addic. r10, r10, -0x1000 diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index b19d78410511..a9c934f2319b 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -115,7 +115,7 @@ _ENTRY(saved_ksp_limit) andi. r11,r11,MSR_PR; \ beq 1f; \ mfspr r1,SPRN_SPRG_THREAD; /* if from user, start at top of */\ - lwz r1,THREAD_INFO-THREAD(r1); /* this thread's kernel stack */\ + lwz r1,TASK_STACK-THREAD(r1); /* this thread's kernel stack */\ addi r1,r1,THREAD_SIZE; \ 1: subi r1,r1,INT_FRAME_SIZE; /* Allocate an exception frame */\ tophys(r11,r1); \ @@ -158,7 +158,7 @@ _ENTRY(saved_ksp_limit) beq 1f; \ /* COMING FROM USER MODE */ \ mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\ - lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\ + lwz r11,TASK_STACK-THREAD(r11); /* this thread's kernel stack */\ 1: addi r11,r11,THREAD_SIZE-INT_FRAME_SIZE; /* Alloc an excpt frm */\ tophys(r11,r11); \ stw r10,_CCR(r11); /* save various registers */\ @@ -953,9 +953,8 @@ _GLOBAL(set_context) /* Context switch the PTE pointer for the Abatron BDI2000. * The PGDIR is the second parameter. */ - lis r5, KERNELBASE@h - lwz r5, 0xf0(r5) - stw r4, 0x4(r5) + lis r5, abatron_pteptrs@ha + stw r4, abatron_pteptrs@l + 0x4(r5) #endif sync mtspr SPRN_PID,r3 diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index bf23c19c92d6..37117ab11584 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S @@ -1019,10 +1019,10 @@ _GLOBAL(start_secondary_47x) /* Now we can get our task struct and real stack pointer */ - /* Get current_thread_info and current */ - lis r1,secondary_ti@ha - lwz r1,secondary_ti@l(r1) - lwz r2,TI_TASK(r1) + /* Get current's stack and current */ + lis r2,secondary_current@ha + lwz r2,secondary_current@l(r2) + lwz r1,TASK_STACK(r2) /* Current stack pointer */ addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 4898e9491a1c..3fad8d499767 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -801,21 +801,19 @@ __secondary_start: /* Set thread priority to MEDIUM */ HMT_MEDIUM - /* Initialize the kernel stack */ - LOAD_REG_ADDR(r3, current_set) - sldi r28,r24,3 /* get current_set[cpu#] */ - ldx r14,r3,r28 - addi r14,r14,THREAD_SIZE-STACK_FRAME_OVERHEAD - std r14,PACAKSAVE(r13) - - /* Do early setup for that CPU (SLB and hash table pointer) */ + /* + * Do early setup for this CPU, in particular initialising the MMU so we + * can turn it on below. This is a call to C, which is OK, we're still + * running on the emergency stack. + */ bl early_setup_secondary /* - * setup the new stack pointer, but *don't* use this until - * translation is on. + * The primary has initialized our kernel stack for us in the paca, grab + * it and put it in r1. We must *not* use it until we turn on the MMU + * below, because it may not be inside the RMO. */ - mr r1, r14 + ld r1, PACAKSAVE(r13) /* Clear backchain so we get nice backtraces */ li r7,0 diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 20cc816b3508..03c73b4c6435 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -142,7 +142,7 @@ instruction_counter: tophys(r11,r1); /* use tophys(r1) if kernel */ \ beq 1f; \ mfspr r11,SPRN_SPRG_THREAD; \ - lwz r11,THREAD_INFO-THREAD(r11); \ + lwz r11,TASK_STACK-THREAD(r11); \ addi r11,r11,THREAD_SIZE; \ tophys(r11,r11); \ 1: subi r11,r11,INT_FRAME_SIZE /* alloc exc. frame */ @@ -292,6 +292,17 @@ SystemCall: */ EXCEPTION(0x1000, SoftEmu, program_check_exception, EXC_XFER_STD) +/* Called from DataStoreTLBMiss when perf TLB misses events are activated */ +#ifdef CONFIG_PERF_EVENTS + patch_site 0f, patch__dtlbmiss_perf +0: lwz r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0) + addi r10, r10, 1 + stw r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0) + mfspr r10, SPRN_SPRG_SCRATCH0 + mfspr r11, SPRN_SPRG_SCRATCH1 + rfi +#endif + . = 0x1100 /* * For the MPC8xx, this is a software tablewalk to load the instruction @@ -337,8 +348,8 @@ InstructionTLBMiss: rlwinm r10, r10, 16, 0xfff8 cmpli cr0, r10, PAGE_OFFSET@h #ifndef CONFIG_PIN_TLB_TEXT - /* It is assumed that kernel code fits into the first 8M page */ -0: cmpli cr7, r10, (PAGE_OFFSET + 0x0800000)@h + /* It is assumed that kernel code fits into the first 32M */ +0: cmpli cr7, r10, (PAGE_OFFSET + 0x2000000)@h patch_site 0b, patch__itlbmiss_linmem_top #endif #endif @@ -405,10 +416,20 @@ InstructionTLBMiss: #ifndef CONFIG_PIN_TLB_TEXT ITLBMissLinear: mtcr r11 +#if defined(CONFIG_STRICT_KERNEL_RWX) && CONFIG_ETEXT_SHIFT < 23 + patch_site 0f, patch__itlbmiss_linmem_top8 + + mfspr r10, SPRN_SRR0 +0: subis r11, r10, (PAGE_OFFSET - 0x80000000)@ha + rlwinm r11, r11, 4, MI_PS8MEG ^ MI_PS512K + ori r11, r11, MI_PS512K | MI_SVALID + rlwinm r10, r10, 0, 0x0ff80000 /* 8xx supports max 256Mb RAM */ +#else /* Set 8M byte page and mark it valid */ li r11, MI_PS8MEG | MI_SVALID - mtspr SPRN_MI_TWC, r11 rlwinm r10, r10, 20, 0x0f800000 /* 8xx supports max 256Mb RAM */ +#endif + mtspr SPRN_MI_TWC, r11 ori r10, r10, 0xf0 | MI_SPS16K | _PAGE_SH | _PAGE_DIRTY | \ _PAGE_PRESENT mtspr SPRN_MI_RPN, r10 /* Update TLB entry */ @@ -434,7 +455,7 @@ DataStoreTLBMiss: #ifndef CONFIG_PIN_TLB_IMMR cmpli cr6, r10, VIRT_IMMR_BASE@h #endif -0: cmpli cr7, r10, (PAGE_OFFSET + 0x1800000)@h +0: cmpli cr7, r10, (PAGE_OFFSET + 0x2000000)@h patch_site 0b, patch__dtlbmiss_linmem_top mfspr r10, SPRN_M_TWB /* Get level 1 table */ @@ -494,16 +515,6 @@ DataStoreTLBMiss: rfi patch_site 0b, patch__dtlbmiss_exit_1 -#ifdef CONFIG_PERF_EVENTS - patch_site 0f, patch__dtlbmiss_perf -0: lwz r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0) - addi r10, r10, 1 - stw r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0) - mfspr r10, SPRN_SPRG_SCRATCH0 - mfspr r11, SPRN_SPRG_SCRATCH1 - rfi -#endif - DTLBMissIMMR: mtcr r11 /* Set 512k byte guarded page and mark it valid */ @@ -525,10 +536,29 @@ DTLBMissIMMR: DTLBMissLinear: mtcr r11 + rlwinm r10, r10, 20, 0x0f800000 /* 8xx supports max 256Mb RAM */ +#if defined(CONFIG_STRICT_KERNEL_RWX) && CONFIG_DATA_SHIFT < 23 + patch_site 0f, patch__dtlbmiss_romem_top8 + +0: subis r11, r10, (PAGE_OFFSET - 0x80000000)@ha + rlwinm r11, r11, 0, 0xff800000 + neg r10, r11 + or r11, r11, r10 + rlwinm r11, r11, 4, MI_PS8MEG ^ MI_PS512K + ori r11, r11, MI_PS512K | MI_SVALID + mfspr r10, SPRN_MD_EPN + rlwinm r10, r10, 0, 0x0ff80000 /* 8xx supports max 256Mb RAM */ +#else /* Set 8M byte page and mark it valid */ li r11, MD_PS8MEG | MD_SVALID +#endif mtspr SPRN_MD_TWC, r11 - rlwinm r10, r10, 20, 0x0f800000 /* 8xx supports max 256Mb RAM */ +#ifdef CONFIG_STRICT_KERNEL_RWX + patch_site 0f, patch__dtlbmiss_romem_top + +0: subis r11, r10, 0 + rlwimi r10, r11, 11, _PAGE_RO +#endif ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY | \ _PAGE_PRESENT mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ @@ -551,11 +581,11 @@ InstructionTLBError: mr r4,r12 andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */ andis. r10,r9,SRR1_ISI_NOPT@h - beq+ 1f + beq+ .Litlbie tlbie r4 -itlbie: /* 0x400 is InstructionAccess exception, needed by bad_page_fault() */ -1: EXC_XFER_LITE(0x400, handle_page_fault) +.Litlbie: + EXC_XFER_LITE(0x400, handle_page_fault) /* This is the data TLB error on the MPC8xx. This could be due to * many reasons, including a dirty update to a pte. We bail out to @@ -577,10 +607,10 @@ DARFixed:/* Return from dcbx instruction bug workaround */ stw r5,_DSISR(r11) mfspr r4,SPRN_DAR andis. r10,r5,DSISR_NOHPTE@h - beq+ 1f + beq+ .Ldtlbie tlbie r4 -dtlbie: -1: li r10,RPN_PATTERN +.Ldtlbie: + li r10,RPN_PATTERN mtspr SPRN_DAR,r10 /* Tag DAR, to be used in DTLB Error */ /* 0x300 is DataAccess exception, needed by bad_page_fault() */ EXC_XFER_LITE(0x300, handle_page_fault) @@ -603,8 +633,8 @@ DataBreakpoint: mtspr SPRN_SPRG_SCRATCH1, r11 mfcr r10 mfspr r11, SPRN_SRR0 - cmplwi cr0, r11, (dtlbie - PAGE_OFFSET)@l - cmplwi cr7, r11, (itlbie - PAGE_OFFSET)@l + cmplwi cr0, r11, (.Ldtlbie - PAGE_OFFSET)@l + cmplwi cr7, r11, (.Litlbie - PAGE_OFFSET)@l beq- cr0, 11f beq- cr7, 11f EXCEPTION_PROLOG_1 @@ -886,28 +916,11 @@ initial_mmu: mtspr SPRN_MD_CTR, r10 /* remove PINNED DTLB entries */ tlbia /* Invalidate all TLB entries */ -#ifdef CONFIG_PIN_TLB_TEXT - lis r8, MI_RSV4I@h - ori r8, r8, 0x1c00 - - mtspr SPRN_MI_CTR, r8 /* Set instruction MMU control */ -#endif - #ifdef CONFIG_PIN_TLB_DATA oris r10, r10, MD_RSV4I@h mtspr SPRN_MD_CTR, r10 /* Set data TLB control */ #endif - /* Now map the lower 8 Meg into the ITLB. */ - lis r8, KERNELBASE@h /* Create vaddr for TLB */ - ori r8, r8, MI_EVALID /* Mark it valid */ - mtspr SPRN_MI_EPN, r8 - li r8, MI_PS8MEG /* Set 8M byte page */ - ori r8, r8, MI_SVALID /* Make it valid */ - mtspr SPRN_MI_TWC, r8 - li r8, MI_BOOTINIT /* Create RPN for address 0 */ - mtspr SPRN_MI_RPN, r8 /* Store TLB entry */ - lis r8, MI_APG_INIT@h /* Set protection modes */ ori r8, r8, MI_APG_INIT@l mtspr SPRN_MI_AP, r8 @@ -937,6 +950,34 @@ initial_mmu: mtspr SPRN_MD_RPN, r8 #endif + /* Now map the lower RAM (up to 32 Mbytes) into the ITLB. */ +#ifdef CONFIG_PIN_TLB_TEXT + lis r8, MI_RSV4I@h + ori r8, r8, 0x1c00 +#endif + li r9, 4 /* up to 4 pages of 8M */ + mtctr r9 + lis r9, KERNELBASE@h /* Create vaddr for TLB */ + li r10, MI_PS8MEG | MI_SVALID /* Set 8M byte page */ + li r11, MI_BOOTINIT /* Create RPN for address 0 */ + lis r12, _einittext@h + ori r12, r12, _einittext@l +1: +#ifdef CONFIG_PIN_TLB_TEXT + mtspr SPRN_MI_CTR, r8 /* Set instruction MMU control */ + addi r8, r8, 0x100 +#endif + + ori r0, r9, MI_EVALID /* Mark it valid */ + mtspr SPRN_MI_EPN, r0 + mtspr SPRN_MI_TWC, r10 + mtspr SPRN_MI_RPN, r11 /* Store TLB entry */ + addis r9, r9, 0x80 + addis r11, r11, 0x80 + + cmpl cr0, r9, r12 + bdnzf gt, 1b + /* Since the cache is enabled according to the information we * just loaded into the TLB, invalidate and enable the caches here. * We should probably check/set other modes....later. @@ -989,5 +1030,6 @@ swapper_pg_dir: /* Room for two PTE table poiners, usually the kernel and current user * pointer to their respective root page table (pgdir). */ + .globl abatron_pteptrs abatron_pteptrs: .space 8 diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index 306e26c073a0..1b22a8dea399 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h @@ -55,7 +55,7 @@ END_BTB_FLUSH_SECTION beq 1f; \ BOOKE_CLEAR_BTB(r11) \ /* if from user, start at top of this thread's kernel stack */ \ - lwz r11, THREAD_INFO-THREAD(r10); \ + lwz r11, TASK_STACK - THREAD(r10); \ ALLOC_STACK_FRAME(r11, THREAD_SIZE); \ 1 : subi r11, r11, INT_FRAME_SIZE; /* Allocate exception frame */ \ stw r13, _CCR(r11); /* save various registers */ \ @@ -142,7 +142,7 @@ END_BTB_FLUSH_SECTION BOOKE_CLEAR_BTB(r10) \ andi. r11,r11,MSR_PR; \ mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\ - lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\ + lwz r11, TASK_STACK - THREAD(r11); /* this thread's kernel stack */\ addi r11,r11,EXC_LVL_FRAME_OVERHEAD; /* allocate stack frame */\ beq 1f; \ /* COMING FROM USER MODE */ \ @@ -155,13 +155,7 @@ END_BTB_FLUSH_SECTION stw r10,GPR11(r11); \ b 2f; \ /* COMING FROM PRIV MODE */ \ -1: lwz r9,TI_FLAGS-EXC_LVL_FRAME_OVERHEAD(r11); \ - lwz r10,TI_PREEMPT-EXC_LVL_FRAME_OVERHEAD(r11); \ - stw r9,TI_FLAGS-EXC_LVL_FRAME_OVERHEAD(r8); \ - stw r10,TI_PREEMPT-EXC_LVL_FRAME_OVERHEAD(r8); \ - lwz r9,TI_TASK-EXC_LVL_FRAME_OVERHEAD(r11); \ - stw r9,TI_TASK-EXC_LVL_FRAME_OVERHEAD(r8); \ - mr r11,r8; \ +1: mr r11, r8; \ 2: mfspr r8,SPRN_SPRG_RSCRATCH_##exc_level; \ stw r12,GPR12(r11); /* save various registers */\ mflr r10; \ diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 2386ce2a9c6e..1881127682e9 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -243,8 +243,9 @@ set_ivor: li r0,0 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) - CURRENT_THREAD_INFO(r22, r1) - stw r24, TI_CPU(r22) +#ifdef CONFIG_SMP + stw r24, TASK_CPU(r2) +#endif bl early_init @@ -717,8 +718,7 @@ finish_tlb_load: /* Get the next_tlbcam_idx percpu var */ #ifdef CONFIG_SMP - lwz r12, THREAD_INFO-THREAD(r12) - lwz r15, TI_CPU(r12) + lwz r15, TASK_CPU-THREAD(r12) lis r14, __per_cpu_offset@h ori r14, r14, __per_cpu_offset@l rlwinm r15, r15, 2, 0, 29 @@ -1089,10 +1089,10 @@ __secondary_start: mr r4,r24 /* Why? */ bl call_setup_cpu - /* get current_thread_info and current */ - lis r1,secondary_ti@ha - lwz r1,secondary_ti@l(r1) - lwz r2,TI_TASK(r1) + /* get current's stack and current */ + lis r2,secondary_current@ha + lwz r2,secondary_current@l(r2) + lwz r1,TASK_STACK(r2) /* stack */ addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD diff --git a/arch/powerpc/kernel/idle_6xx.S b/arch/powerpc/kernel/idle_6xx.S index ff026c9d3cab..c5e7f5bb2e66 100644 --- a/arch/powerpc/kernel/idle_6xx.S +++ b/arch/powerpc/kernel/idle_6xx.S @@ -136,10 +136,9 @@ BEGIN_FTR_SECTION DSSALL sync END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) - CURRENT_THREAD_INFO(r9, r1) - lwz r8,TI_LOCAL_FLAGS(r9) /* set napping bit */ + lwz r8,TI_LOCAL_FLAGS(r2) /* set napping bit */ ori r8,r8,_TLF_NAPPING /* so when we take an exception */ - stw r8,TI_LOCAL_FLAGS(r9) /* it will return to our caller */ + stw r8,TI_LOCAL_FLAGS(r2) /* it will return to our caller */ mfmsr r7 ori r7,r7,MSR_EE oris r7,r7,MSR_POW@h @@ -159,8 +158,7 @@ _GLOBAL(power_save_ppc32_restore) stw r9,_NIP(r11) /* make it do a blr */ #ifdef CONFIG_SMP - CURRENT_THREAD_INFO(r12, r11) - lwz r11,TI_CPU(r12) /* get cpu number * 4 */ + lwz r11,TASK_CPU(r2) /* get cpu number * 4 */ slwi r11,r11,2 #else li r11,0 diff --git a/arch/powerpc/kernel/idle_book3e.S b/arch/powerpc/kernel/idle_book3e.S index 4e0d94d02030..31e732c378ad 100644 --- a/arch/powerpc/kernel/idle_book3e.S +++ b/arch/powerpc/kernel/idle_book3e.S @@ -63,7 +63,7 @@ _GLOBAL(\name) 1: /* Let's set the _TLF_NAPPING flag so interrupts make us return * to the right spot */ - CURRENT_THREAD_INFO(r11, r1) + ld r11, PACACURRENT(r13) ld r10,TI_LOCAL_FLAGS(r11) ori r10,r10,_TLF_NAPPING std r10,TI_LOCAL_FLAGS(r11) diff --git a/arch/powerpc/kernel/idle_e500.S b/arch/powerpc/kernel/idle_e500.S index 583e55ac7d26..69dfcd2ca011 100644 --- a/arch/powerpc/kernel/idle_e500.S +++ b/arch/powerpc/kernel/idle_e500.S @@ -22,10 +22,9 @@ .text _GLOBAL(e500_idle) - CURRENT_THREAD_INFO(r3, r1) - lwz r4,TI_LOCAL_FLAGS(r3) /* set napping bit */ + lwz r4,TI_LOCAL_FLAGS(r2) /* set napping bit */ ori r4,r4,_TLF_NAPPING /* so when we take an exception */ - stw r4,TI_LOCAL_FLAGS(r3) /* it will return to our caller */ + stw r4,TI_LOCAL_FLAGS(r2) /* it will return to our caller */ #ifdef CONFIG_PPC_E500MC wrteei 1 @@ -88,8 +87,7 @@ _GLOBAL(power_save_ppc32_restore) stw r9,_NIP(r11) /* make it do a blr */ #ifdef CONFIG_SMP - CURRENT_THREAD_INFO(r12, r1) - lwz r11,TI_CPU(r12) /* get cpu number * 4 */ + lwz r11,TASK_CPU(r2) /* get cpu number * 4 */ slwi r11,r11,2 #else li r11,0 diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S index a09b3c7ca176..a2fdb0a34b75 100644 --- a/arch/powerpc/kernel/idle_power4.S +++ b/arch/powerpc/kernel/idle_power4.S @@ -68,7 +68,7 @@ BEGIN_FTR_SECTION DSSALL sync END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) - CURRENT_THREAD_INFO(r9, r1) + ld r9, PACA_THREAD_INFO(r13) ld r8,TI_LOCAL_FLAGS(r9) /* set napping bit */ ori r8,r8,_TLF_NAPPING /* so when we take an exception */ std r8,TI_LOCAL_FLAGS(r9) /* it will return to our caller */ diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 916ddc4aac44..8a936723c791 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -618,9 +618,8 @@ static inline void check_stack_overflow(void) sp = current_stack_pointer() & (THREAD_SIZE-1); /* check for stack overflow: is there less than 2KB free? */ - if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { - pr_err("do_IRQ: stack overflow: %ld\n", - sp - sizeof(struct thread_info)); + if (unlikely(sp < 2048)) { + pr_err("do_IRQ: stack overflow: %ld\n", sp); dump_stack(); } #endif @@ -660,36 +659,21 @@ void __do_irq(struct pt_regs *regs) void do_IRQ(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); - struct thread_info *curtp, *irqtp, *sirqtp; + void *cursp, *irqsp, *sirqsp; /* Switch to the irq stack to handle this */ - curtp = current_thread_info(); - irqtp = hardirq_ctx[raw_smp_processor_id()]; - sirqtp = softirq_ctx[raw_smp_processor_id()]; + cursp = (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1)); + irqsp = hardirq_ctx[raw_smp_processor_id()]; + sirqsp = softirq_ctx[raw_smp_processor_id()]; /* Already there ? */ - if (unlikely(curtp == irqtp || curtp == sirqtp)) { + if (unlikely(cursp == irqsp || cursp == sirqsp)) { __do_irq(regs); set_irq_regs(old_regs); return; } - - /* Prepare the thread_info in the irq stack */ - irqtp->task = curtp->task; - irqtp->flags = 0; - - /* Copy the preempt_count so that the [soft]irq checks work. */ - irqtp->preempt_count = curtp->preempt_count; - /* Switch stack and call */ - call_do_irq(regs, irqtp); - - /* Restore stack limit */ - irqtp->task = NULL; - - /* Copy back updates to the thread_info */ - if (irqtp->flags) - set_bits(irqtp->flags, &curtp->flags); + call_do_irq(regs, irqsp); set_irq_regs(old_regs); } @@ -698,90 +682,20 @@ void __init init_IRQ(void) { if (ppc_md.init_IRQ) ppc_md.init_IRQ(); - - exc_lvl_ctx_init(); - - irq_ctx_init(); } #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) -struct thread_info *critirq_ctx[NR_CPUS] __read_mostly; -struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly; -struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly; - -void exc_lvl_ctx_init(void) -{ - struct thread_info *tp; - int i, cpu_nr; - - for_each_possible_cpu(i) { -#ifdef CONFIG_PPC64 - cpu_nr = i; -#else -#ifdef CONFIG_SMP - cpu_nr = get_hard_smp_processor_id(i); -#else - cpu_nr = 0; -#endif +void *critirq_ctx[NR_CPUS] __read_mostly; +void *dbgirq_ctx[NR_CPUS] __read_mostly; +void *mcheckirq_ctx[NR_CPUS] __read_mostly; #endif - memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE); - tp = critirq_ctx[cpu_nr]; - tp->cpu = cpu_nr; - tp->preempt_count = 0; - -#ifdef CONFIG_BOOKE - memset((void *)dbgirq_ctx[cpu_nr], 0, THREAD_SIZE); - tp = dbgirq_ctx[cpu_nr]; - tp->cpu = cpu_nr; - tp->preempt_count = 0; - - memset((void *)mcheckirq_ctx[cpu_nr], 0, THREAD_SIZE); - tp = mcheckirq_ctx[cpu_nr]; - tp->cpu = cpu_nr; - tp->preempt_count = HARDIRQ_OFFSET; -#endif - } -} -#endif - -struct thread_info *softirq_ctx[NR_CPUS] __read_mostly; -struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly; - -void irq_ctx_init(void) -{ - struct thread_info *tp; - int i; - - for_each_possible_cpu(i) { - memset((void *)softirq_ctx[i], 0, THREAD_SIZE); - tp = softirq_ctx[i]; - tp->cpu = i; - klp_init_thread_info(tp); - - memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); - tp = hardirq_ctx[i]; - tp->cpu = i; - klp_init_thread_info(tp); - } -} +void *softirq_ctx[NR_CPUS] __read_mostly; +void *hardirq_ctx[NR_CPUS] __read_mostly; void do_softirq_own_stack(void) { - struct thread_info *curtp, *irqtp; - - curtp = current_thread_info(); - irqtp = softirq_ctx[smp_processor_id()]; - irqtp->task = curtp->task; - irqtp->flags = 0; - call_do_softirq(irqtp); - irqtp->task = NULL; - - /* Set any flag that may have been set on the - * alternate stack - */ - if (irqtp->flags) - set_bits(irqtp->flags, &curtp->flags); + call_do_softirq(softirq_ctx[smp_processor_id()]); } irq_hw_number_t virq_to_hw(unsigned int virq) @@ -827,11 +741,6 @@ int irq_choose_cpu(const struct cpumask *mask) } #endif -int arch_early_irq_init(void) -{ - return 0; -} - #ifdef CONFIG_PPC64 static int __init setup_noirqdistrib(char *str) { diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c index e1865565f0ae..7dd55eb1259d 100644 --- a/arch/powerpc/kernel/kgdb.c +++ b/arch/powerpc/kernel/kgdb.c @@ -151,41 +151,13 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs) return 1; } -static DEFINE_PER_CPU(struct thread_info, kgdb_thread_info); static int kgdb_singlestep(struct pt_regs *regs) { - struct thread_info *thread_info, *exception_thread_info; - struct thread_info *backup_current_thread_info = - this_cpu_ptr(&kgdb_thread_info); - if (user_mode(regs)) return 0; - /* - * On Book E and perhaps other processors, singlestep is handled on - * the critical exception stack. This causes current_thread_info() - * to fail, since it it locates the thread_info by masking off - * the low bits of the current stack pointer. We work around - * this issue by copying the thread_info from the kernel stack - * before calling kgdb_handle_exception, and copying it back - * afterwards. On most processors the copy is avoided since - * exception_thread_info == thread_info. - */ - thread_info = (struct thread_info *)(regs->gpr[1] & ~(THREAD_SIZE-1)); - exception_thread_info = current_thread_info(); - - if (thread_info != exception_thread_info) { - /* Save the original current_thread_info. */ - memcpy(backup_current_thread_info, exception_thread_info, sizeof *thread_info); - memcpy(exception_thread_info, thread_info, sizeof *thread_info); - } - kgdb_handle_exception(0, SIGTRAP, 0, regs); - if (thread_info != exception_thread_info) - /* Restore current_thread_info lastly. */ - memcpy(exception_thread_info, backup_current_thread_info, sizeof *thread_info); - return 1; } diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index a0f6f45005bd..75692c327ba0 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c @@ -317,10 +317,8 @@ void default_machine_kexec(struct kimage *image) * We setup preempt_count to avoid using VMX in memcpy. * XXX: the task struct will likely be invalid once we do the copy! */ - kexec_stack.thread_info.task = current_thread_info()->task; - kexec_stack.thread_info.flags = 0; - kexec_stack.thread_info.preempt_count = HARDIRQ_OFFSET; - kexec_stack.thread_info.cpu = current_thread_info()->cpu; + current_thread_info()->flags = 0; + current_thread_info()->preempt_count = HARDIRQ_OFFSET; /* We need a static PACA, too; copy this CPU's PACA over and switch to * it. Also poison per_cpu_offset and NULL lppaca to catch anyone using diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c index bd933a75f0bc..b5fec1f9751a 100644 --- a/arch/powerpc/kernel/mce.c +++ b/arch/powerpc/kernel/mce.c @@ -31,6 +31,7 @@ #include #include +#include static DEFINE_PER_CPU(int, mce_nest_count); static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event); @@ -301,13 +302,13 @@ static void machine_check_process_queued_event(struct irq_work *work) while (__this_cpu_read(mce_queue_count) > 0) { index = __this_cpu_read(mce_queue_count) - 1; evt = this_cpu_ptr(&mce_event_queue[index]); - machine_check_print_event_info(evt, false); + machine_check_print_event_info(evt, false, false); __this_cpu_dec(mce_queue_count); } } void machine_check_print_event_info(struct machine_check_event *evt, - bool user_mode) + bool user_mode, bool in_guest) { const char *level, *sevstr, *subtype; static const char *mc_ue_types[] = { @@ -387,7 +388,9 @@ void machine_check_print_event_info(struct machine_check_event *evt, evt->disposition == MCE_DISPOSITION_RECOVERED ? "Recovered" : "Not recovered"); - if (user_mode) { + if (in_guest) { + printk("%s Guest NIP: %016llx\n", level, evt->srr0); + } else if (user_mode) { printk("%s NIP: [%016llx] PID: %d Comm: %s\n", level, evt->srr0, current->pid, current->comm); } else { @@ -488,6 +491,8 @@ long machine_check_early(struct pt_regs *regs) { long handled = 0; + hv_nmi_check_nonrecoverable(regs); + /* * See if platform is capable of handling machine check. */ diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 57d2ffb2d45c..0dda4f8e3d7a 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -46,11 +46,10 @@ _GLOBAL(call_do_softirq) mflr r0 stw r0,4(r1) lwz r10,THREAD+KSP_LIMIT(r2) - addi r11,r3,THREAD_INFO_GAP + stw r3, THREAD+KSP_LIMIT(r2) stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3) mr r1,r3 stw r10,8(r1) - stw r11,THREAD+KSP_LIMIT(r2) bl __do_softirq lwz r10,8(r1) lwz r1,0(r1) @@ -60,17 +59,16 @@ _GLOBAL(call_do_softirq) blr /* - * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp); + * void call_do_irq(struct pt_regs *regs, void *sp); */ _GLOBAL(call_do_irq) mflr r0 stw r0,4(r1) lwz r10,THREAD+KSP_LIMIT(r2) - addi r11,r4,THREAD_INFO_GAP + stw r4, THREAD+KSP_LIMIT(r2) stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4) mr r1,r4 stw r10,8(r1) - stw r11,THREAD+KSP_LIMIT(r2) bl __do_irq lwz r10,8(r1) lwz r1,0(r1) @@ -183,10 +181,13 @@ _GLOBAL(low_choose_750fx_pll) or r4,r4,r5 mtspr SPRN_HID1,r4 +#ifdef CONFIG_SMP /* Store new HID1 image */ - CURRENT_THREAD_INFO(r6, r1) - lwz r6,TI_CPU(r6) + lwz r6,TASK_CPU(r2) slwi r6,r6,2 +#else + li r6, 0 +#endif addis r6,r6,nap_save_hid1@ha stw r4,nap_save_hid1@l(r6) @@ -599,7 +600,7 @@ EXPORT_SYMBOL(__bswapdi2) #ifdef CONFIG_SMP _GLOBAL(start_secondary_resume) /* Reset stack */ - CURRENT_THREAD_INFO(r1, r1) + rlwinm r1, r1, 0, 0, 31 - THREAD_SHIFT addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD li r3,0 stw r3,0(r1) /* Zero the stack frame pointer */ diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c index 38b03a330cd2..244d2462e781 100644 --- a/arch/powerpc/kernel/nvram_64.c +++ b/arch/powerpc/kernel/nvram_64.c @@ -7,12 +7,6 @@ * 2 of the License, or (at your option) any later version. * * /dev/nvram driver for PPC64 - * - * This perhaps should live in drivers/char - * - * TODO: Split the /dev/nvram part (that one can use - * drivers/char/generic_nvram.c) from the arch & partition - * parsing code. */ #include @@ -714,137 +708,6 @@ static void oops_to_nvram(struct kmsg_dumper *dumper, spin_unlock_irqrestore(&lock, flags); } -static loff_t dev_nvram_llseek(struct file *file, loff_t offset, int origin) -{ - if (ppc_md.nvram_size == NULL) - return -ENODEV; - return generic_file_llseek_size(file, offset, origin, MAX_LFS_FILESIZE, - ppc_md.nvram_size()); -} - - -static ssize_t dev_nvram_read(struct file *file, char __user *buf, - size_t count, loff_t *ppos) -{ - ssize_t ret; - char *tmp = NULL; - ssize_t size; - - if (!ppc_md.nvram_size) { - ret = -ENODEV; - goto out; - } - - size = ppc_md.nvram_size(); - if (size < 0) { - ret = size; - goto out; - } - - if (*ppos >= size) { - ret = 0; - goto out; - } - - count = min_t(size_t, count, size - *ppos); - count = min(count, PAGE_SIZE); - - tmp = kmalloc(count, GFP_KERNEL); - if (!tmp) { - ret = -ENOMEM; - goto out; - } - - ret = ppc_md.nvram_read(tmp, count, ppos); - if (ret <= 0) - goto out; - - if (copy_to_user(buf, tmp, ret)) - ret = -EFAULT; - -out: - kfree(tmp); - return ret; - -} - -static ssize_t dev_nvram_write(struct file *file, const char __user *buf, - size_t count, loff_t *ppos) -{ - ssize_t ret; - char *tmp = NULL; - ssize_t size; - - ret = -ENODEV; - if (!ppc_md.nvram_size) - goto out; - - ret = 0; - size = ppc_md.nvram_size(); - if (*ppos >= size || size < 0) - goto out; - - count = min_t(size_t, count, size - *ppos); - count = min(count, PAGE_SIZE); - - tmp = memdup_user(buf, count); - if (IS_ERR(tmp)) { - ret = PTR_ERR(tmp); - goto out; - } - - ret = ppc_md.nvram_write(tmp, count, ppos); - - kfree(tmp); -out: - return ret; -} - -static long dev_nvram_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) -{ - switch(cmd) { -#ifdef CONFIG_PPC_PMAC - case OBSOLETE_PMAC_NVRAM_GET_OFFSET: - printk(KERN_WARNING "nvram: Using obsolete PMAC_NVRAM_GET_OFFSET ioctl\n"); - /* fall through */ - case IOC_NVRAM_GET_OFFSET: { - int part, offset; - - if (!machine_is(powermac)) - return -EINVAL; - if (copy_from_user(&part, (void __user*)arg, sizeof(part)) != 0) - return -EFAULT; - if (part < pmac_nvram_OF || part > pmac_nvram_NR) - return -EINVAL; - offset = pmac_get_partition(part); - if (offset < 0) - return offset; - if (copy_to_user((void __user*)arg, &offset, sizeof(offset)) != 0) - return -EFAULT; - return 0; - } -#endif /* CONFIG_PPC_PMAC */ - default: - return -EINVAL; - } -} - -static const struct file_operations nvram_fops = { - .owner = THIS_MODULE, - .llseek = dev_nvram_llseek, - .read = dev_nvram_read, - .write = dev_nvram_write, - .unlocked_ioctl = dev_nvram_ioctl, -}; - -static struct miscdevice nvram_dev = { - NVRAM_MINOR, - "nvram", - &nvram_fops -}; - - #ifdef DEBUG_NVRAM static void __init nvram_print_partitions(char * label) { @@ -992,6 +855,8 @@ loff_t __init nvram_create_partition(const char *name, int sig, long size = 0; int rc; + BUILD_BUG_ON(NVRAM_BLOCK_LEN != 16); + /* Convert sizes from bytes to blocks */ req_size = _ALIGN_UP(req_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN; min_size = _ALIGN_UP(min_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN; @@ -1192,22 +1057,3 @@ int __init nvram_scan_partitions(void) kfree(header); return err; } - -static int __init nvram_init(void) -{ - int rc; - - BUILD_BUG_ON(NVRAM_BLOCK_LEN != 16); - - if (ppc_md.nvram_size == NULL || ppc_md.nvram_size() <= 0) - return -ENODEV; - - rc = misc_register(&nvram_dev); - if (rc != 0) { - printk(KERN_ERR "nvram_init: failed to register device\n"); - return rc; - } - - return rc; -} -device_initcall(nvram_init); diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index 913bfca09c4f..e7382abee868 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -27,7 +28,7 @@ static void *__init alloc_paca_data(unsigned long size, unsigned long align, unsigned long limit, int cpu) { - unsigned long pa; + void *ptr; int nid; /* @@ -36,23 +37,21 @@ static void *__init alloc_paca_data(unsigned long size, unsigned long align, * which will put its paca in the right place. */ if (cpu == boot_cpuid) { - nid = -1; + nid = NUMA_NO_NODE; memblock_set_bottom_up(true); } else { nid = early_cpu_to_node(cpu); } - pa = memblock_alloc_base_nid(size, align, limit, nid, MEMBLOCK_NONE); - if (!pa) { - pa = memblock_alloc_base(size, align, limit); - if (!pa) - panic("cannot allocate paca data"); - } + ptr = memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, + limit, nid); + if (!ptr) + panic("cannot allocate paca data"); if (cpu == boot_cpuid) memblock_set_bottom_up(false); - return __va(pa); + return ptr; } #ifdef CONFIG_PPC_PSERIES @@ -118,7 +117,6 @@ static struct slb_shadow * __init new_slb_shadow(int cpu, unsigned long limit) } s = alloc_paca_data(sizeof(*s), L1_CACHE_BYTES, limit, cpu); - memset(s, 0, sizeof(*s)); s->persistent = cpu_to_be32(SLB_NUM_BOLTED); s->buffer_length = cpu_to_be32(sizeof(*s)); @@ -198,7 +196,11 @@ void __init allocate_paca_ptrs(void) paca_nr_cpu_ids = nr_cpu_ids; paca_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids; - paca_ptrs = __va(memblock_phys_alloc(paca_ptrs_size, SMP_CACHE_BYTES)); + paca_ptrs = memblock_alloc_raw(paca_ptrs_size, SMP_CACHE_BYTES); + if (!paca_ptrs) + panic("Failed to allocate %d bytes for paca pointers\n", + paca_ptrs_size); + memset(paca_ptrs, 0x88, paca_ptrs_size); } @@ -222,7 +224,6 @@ void __init allocate_paca(int cpu) paca = alloc_paca_data(sizeof(struct paca_struct), L1_CACHE_BYTES, limit, cpu); paca_ptrs[cpu] = paca; - memset(paca, 0, sizeof(struct paca_struct)); initialise_paca(paca, cpu); #ifdef CONFIG_PPC_PSERIES diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 88e4f69a09e5..ff4b7539cbdf 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -62,19 +63,13 @@ resource_size_t isa_mem_base; EXPORT_SYMBOL(isa_mem_base); -static const struct dma_map_ops *pci_dma_ops = &dma_nommu_ops; +static const struct dma_map_ops *pci_dma_ops; void set_pci_dma_ops(const struct dma_map_ops *dma_ops) { pci_dma_ops = dma_ops; } -const struct dma_map_ops *get_pci_dma_ops(void) -{ - return pci_dma_ops; -} -EXPORT_SYMBOL(get_pci_dma_ops); - /* * This function should run under locking protection, specifically * hose_spinlock. @@ -132,7 +127,7 @@ struct pci_controller *pcibios_alloc_controller(struct device_node *dev) int nid = of_node_to_nid(dev); if (nid < 0 || !node_online(nid)) - nid = -1; + nid = NUMA_NO_NODE; PHB_SET_NODE(phb, nid); } @@ -357,6 +352,17 @@ struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node) return NULL; } +struct pci_controller *pci_find_controller_for_domain(int domain_nr) +{ + struct pci_controller *hose; + + list_for_each_entry(hose, &hose_list, list_node) + if (hose->global_number == domain_nr) + return hose; + + return NULL; +} + /* * Reads the interrupt pin to determine if interrupt is use by card. * If the interrupt is used, then gets the interrupt line from the @@ -972,7 +978,7 @@ static void pcibios_setup_device(struct pci_dev *dev) /* Hook up default DMA ops */ set_dma_ops(&dev->dev, pci_dma_ops); - set_dma_offset(&dev->dev, PCI_DRAM_OFFSET); + dev->dev.archdata.dma_offset = PCI_DRAM_OFFSET; /* Additional platform DMA/iommu setup */ phb = pci_bus_to_host(dev->bus); diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index d3f04f2d8249..0417fda13636 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c @@ -205,6 +205,9 @@ pci_create_OF_bus_map(void) of_prop = memblock_alloc(sizeof(struct property) + 256, SMP_CACHE_BYTES); + if (!of_prop) + panic("%s: Failed to allocate %zu bytes\n", __func__, + sizeof(struct property) + 256); dn = of_find_node_by_path("/"); if (dn) { memset(of_prop, -1, sizeof(struct property) + 256); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index ce393df243aa..dd9e0d5386ee 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -176,7 +176,7 @@ static void __giveup_fpu(struct task_struct *tsk) save_fpu(tsk); msr = tsk->thread.regs->msr; - msr &= ~MSR_FP; + msr &= ~(MSR_FP|MSR_FE0|MSR_FE1); #ifdef CONFIG_VSX if (cpu_has_feature(CPU_FTR_VSX)) msr &= ~MSR_VSX; @@ -1231,8 +1231,8 @@ struct task_struct *__switch_to(struct task_struct *prev, batch->active = 1; } - if (current_thread_info()->task->thread.regs) { - restore_math(current_thread_info()->task->thread.regs); + if (current->thread.regs) { + restore_math(current->thread.regs); /* * The copy-paste buffer can only store into foreign real @@ -1242,7 +1242,7 @@ struct task_struct *__switch_to(struct task_struct *prev, * mappings, we must issue a cp_abort to clear any state and * prevent snooping, corruption or a covert channel. */ - if (current_thread_info()->task->thread.used_vas) + if (current->thread.used_vas) asm volatile(PPC_CP_ABORT); } #endif /* CONFIG_PPC_BOOK3S_64 */ @@ -1634,7 +1634,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE; struct thread_info *ti = task_thread_info(p); - klp_init_thread_info(ti); + klp_init_thread_info(p); /* Copy registers */ sp -= sizeof(struct pt_regs); @@ -1691,8 +1691,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, sp -= STACK_FRAME_OVERHEAD; p->thread.ksp = sp; #ifdef CONFIG_PPC32 - p->thread.ksp_limit = (unsigned long)task_stack_page(p) + - _ALIGN_UP(sizeof(struct thread_info), 16); + p->thread.ksp_limit = (unsigned long)end_of_stack(p); #endif #ifdef CONFIG_HAVE_HW_BREAKPOINT p->thread.ptrace_bps[0] = NULL; @@ -1995,21 +1994,14 @@ static inline int valid_irq_stack(unsigned long sp, struct task_struct *p, unsigned long stack_page; unsigned long cpu = task_cpu(p); - /* - * Avoid crashing if the stack has overflowed and corrupted - * task_cpu(p), which is in the thread_info struct. - */ - if (cpu < NR_CPUS && cpu_possible(cpu)) { - stack_page = (unsigned long) hardirq_ctx[cpu]; - if (sp >= stack_page + sizeof(struct thread_struct) - && sp <= stack_page + THREAD_SIZE - nbytes) - return 1; - - stack_page = (unsigned long) softirq_ctx[cpu]; - if (sp >= stack_page + sizeof(struct thread_struct) - && sp <= stack_page + THREAD_SIZE - nbytes) - return 1; - } + stack_page = (unsigned long)hardirq_ctx[cpu]; + if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes) + return 1; + + stack_page = (unsigned long)softirq_ctx[cpu]; + if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes) + return 1; + return 0; } @@ -2018,8 +2010,10 @@ int validate_sp(unsigned long sp, struct task_struct *p, { unsigned long stack_page = (unsigned long)task_stack_page(p); - if (sp >= stack_page + sizeof(struct thread_struct) - && sp <= stack_page + THREAD_SIZE - nbytes) + if (sp < THREAD_SIZE) + return 0; + + if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes) return 1; return valid_irq_stack(sp, p, nbytes); @@ -2027,7 +2021,7 @@ int validate_sp(unsigned long sp, struct task_struct *p, EXPORT_SYMBOL(validate_sp); -unsigned long get_wchan(struct task_struct *p) +static unsigned long __get_wchan(struct task_struct *p) { unsigned long ip, sp; int count = 0; @@ -2053,6 +2047,20 @@ unsigned long get_wchan(struct task_struct *p) return 0; } +unsigned long get_wchan(struct task_struct *p) +{ + unsigned long ret; + + if (!try_get_task_stack(p)) + return 0; + + ret = __get_wchan(p); + + put_task_stack(p); + + return ret; +} + static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH; void show_stack(struct task_struct *tsk, unsigned long *stack) @@ -2067,9 +2075,13 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) int curr_frame = 0; #endif - sp = (unsigned long) stack; if (tsk == NULL) tsk = current; + + if (!try_get_task_stack(tsk)) + return; + + sp = (unsigned long) stack; if (sp == 0) { if (tsk == current) sp = current_stack_pointer(); @@ -2081,7 +2093,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) printk("Call Trace:\n"); do { if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD)) - return; + break; stack = (unsigned long *) sp; newsp = stack[0]; @@ -2121,6 +2133,8 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) sp = newsp; } while (count++ < kstack_depth_to_print); + + put_task_stack(tsk); } #ifdef CONFIG_PPC64 diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 4181ec715f88..4221527b082f 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -126,7 +126,10 @@ static void __init move_device_tree(void) if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) || !memblock_is_memory(start + size - 1) || overlaps_crashkernel(start, size) || overlaps_initrd(start, size)) { - p = __va(memblock_phys_alloc(size, PAGE_SIZE)); + p = memblock_alloc_raw(size, PAGE_SIZE); + if (!p) + panic("Failed to allocate %lu bytes to move device tree\n", + size); memcpy(p, initial_boot_params, size); initial_boot_params = p; DBG("Moved device tree to 0x%px\n", p); diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index cdd5d1d3ae41..d9ac7d94656e 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include @@ -274,6 +275,8 @@ static int set_user_trap(struct task_struct *task, unsigned long trap) */ int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data) { + unsigned int regs_max; + if ((task->thread.regs == NULL) || !data) return -EIO; @@ -297,7 +300,9 @@ int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data) } #endif - if (regno < (sizeof(struct user_pt_regs) / sizeof(unsigned long))) { + regs_max = sizeof(struct user_pt_regs) / sizeof(unsigned long); + if (regno < regs_max) { + regno = array_index_nospec(regno, regs_max); *data = ((unsigned long *)task->thread.regs)[regno]; return 0; } @@ -321,6 +326,7 @@ int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data) return set_user_dscr(task, data); if (regno <= PT_MAX_PUT_REG) { + regno = array_index_nospec(regno, PT_MAX_PUT_REG + 1); ((unsigned long *)task->thread.regs)[regno] = data; return 0; } @@ -561,6 +567,7 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset, /* * Copy out only the low-order word of vrsave. */ + int start, end; union { elf_vrreg_t reg; u32 word; @@ -569,8 +576,10 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset, vrsave.word = target->thread.vrsave; + start = 33 * sizeof(vector128); + end = start + sizeof(vrsave); ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave, - 33 * sizeof(vector128), -1); + start, end); } return ret; @@ -608,6 +617,7 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset, /* * We use only the first word of vrsave. */ + int start, end; union { elf_vrreg_t reg; u32 word; @@ -616,8 +626,10 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset, vrsave.word = target->thread.vrsave; + start = 33 * sizeof(vector128); + end = start + sizeof(vrsave); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave, - 33 * sizeof(vector128), -1); + start, end); if (!ret) target->thread.vrsave = vrsave.word; } diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index de35bd8f047f..fbc676160adf 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -1187,7 +1187,11 @@ void __init rtas_initialize(void) ibm_suspend_me_token = rtas_token("ibm,suspend-me"); } #endif - rtas_rmo_buf = memblock_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region); + rtas_rmo_buf = memblock_phys_alloc_range(RTAS_RMOBUF_MAX, PAGE_SIZE, + 0, rtas_region); + if (!rtas_rmo_buf) + panic("ERROR: RTAS: Failed to allocate %lx bytes below %pa\n", + PAGE_SIZE, &rtas_region); #ifdef CONFIG_RTAS_ERROR_LOGGING rtas_last_error_token = rtas_token("rtas-last-error"); diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index ca00fbb97cf8..2e5dfb6e0823 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -459,8 +459,11 @@ void __init smp_setup_cpu_maps(void) DBG("smp_setup_cpu_maps()\n"); - cpu_to_phys_id = __va(memblock_phys_alloc(nr_cpu_ids * sizeof(u32), __alignof__(u32))); - memset(cpu_to_phys_id, 0, nr_cpu_ids * sizeof(u32)); + cpu_to_phys_id = memblock_alloc(nr_cpu_ids * sizeof(u32), + __alignof__(u32)); + if (!cpu_to_phys_id) + panic("%s: Failed to allocate %zu bytes align=0x%zx\n", + __func__, nr_cpu_ids * sizeof(u32), __alignof__(u32)); for_each_node_by_type(dn, "cpu") { const __be32 *intserv; @@ -634,7 +637,7 @@ void probe_machine(void) } /* What can we do if we didn't find ? */ if (machine_id >= &__machine_desc_end) { - DBG("No suitable machine found !\n"); + pr_err("No suitable machine description found !\n"); for (;;); } @@ -791,7 +794,6 @@ void arch_setup_pdev_archdata(struct platform_device *pdev) { pdev->archdata.dma_mask = DMA_BIT_MASK(32); pdev->dev.dma_mask = &pdev->archdata.dma_mask; - set_dma_ops(&pdev->dev, &dma_nommu_ops); } static __init void print_system_info(void) @@ -938,7 +940,7 @@ void __init setup_arch(char **cmdline_p) /* Reserve large chunks of memory for use by CMA for KVM. */ kvm_cma_reserve(); - klp_init_thread_info(&init_thread_info); + klp_init_thread_info(&init_task); init_mm.start_code = (unsigned long)_stext; init_mm.end_code = (unsigned long) _etext; diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 947f904688b0..4a65e08a6042 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -147,41 +148,6 @@ static int __init ppc_setup_l3cr(char *str) } __setup("l3cr=", ppc_setup_l3cr); -#ifdef CONFIG_GENERIC_NVRAM - -/* Generic nvram hooks used by drivers/char/gen_nvram.c */ -unsigned char nvram_read_byte(int addr) -{ - if (ppc_md.nvram_read_val) - return ppc_md.nvram_read_val(addr); - return 0xff; -} -EXPORT_SYMBOL(nvram_read_byte); - -void nvram_write_byte(unsigned char val, int addr) -{ - if (ppc_md.nvram_write_val) - ppc_md.nvram_write_val(addr, val); -} -EXPORT_SYMBOL(nvram_write_byte); - -ssize_t nvram_get_size(void) -{ - if (ppc_md.nvram_size) - return ppc_md.nvram_size(); - return -1; -} -EXPORT_SYMBOL(nvram_get_size); - -void nvram_sync(void) -{ - if (ppc_md.nvram_sync) - ppc_md.nvram_sync(); -} -EXPORT_SYMBOL(nvram_sync); - -#endif /* CONFIG_NVRAM */ - static int __init ppc_init(void) { /* clear the progress line */ @@ -196,6 +162,17 @@ static int __init ppc_init(void) } arch_initcall(ppc_init); +static void *__init alloc_stack(void) +{ + void *ptr = memblock_alloc(THREAD_SIZE, THREAD_SIZE); + + if (!ptr) + panic("cannot allocate %d bytes for stack at %pS\n", + THREAD_SIZE, (void *)_RET_IP_); + + return ptr; +} + void __init irqstack_early_init(void) { unsigned int i; @@ -203,10 +180,8 @@ void __init irqstack_early_init(void) /* interrupt stacks must be in lowmem, we get that for free on ppc32 * as the memblock is limited to lowmem by default */ for_each_possible_cpu(i) { - softirq_ctx[i] = (struct thread_info *) - __va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE)); - hardirq_ctx[i] = (struct thread_info *) - __va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE)); + softirq_ctx[i] = alloc_stack(); + hardirq_ctx[i] = alloc_stack(); } } @@ -224,13 +199,10 @@ void __init exc_lvl_early_init(void) hw_cpu = 0; #endif - critirq_ctx[hw_cpu] = (struct thread_info *) - __va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE)); + critirq_ctx[hw_cpu] = alloc_stack(); #ifdef CONFIG_BOOKE - dbgirq_ctx[hw_cpu] = (struct thread_info *) - __va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE)); - mcheckirq_ctx[hw_cpu] = (struct thread_info *) - __va(memblock_phys_alloc(THREAD_SIZE, THREAD_SIZE)); + dbgirq_ctx[hw_cpu] = alloc_stack(); + mcheckirq_ctx[hw_cpu] = alloc_stack(); #endif } } diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 236c1151a3a7..ba404dd9ce1d 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -634,19 +634,17 @@ __init u64 ppc64_bolted_size(void) static void *__init alloc_stack(unsigned long limit, int cpu) { - unsigned long pa; + void *ptr; BUILD_BUG_ON(STACK_INT_FRAME_SIZE % 16); - pa = memblock_alloc_base_nid(THREAD_SIZE, THREAD_SIZE, limit, - early_cpu_to_node(cpu), MEMBLOCK_NONE); - if (!pa) { - pa = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit); - if (!pa) - panic("cannot allocate stacks"); - } + ptr = memblock_alloc_try_nid(THREAD_SIZE, THREAD_SIZE, + MEMBLOCK_LOW_LIMIT, limit, + early_cpu_to_node(cpu)); + if (!ptr) + panic("cannot allocate stacks"); - return __va(pa); + return ptr; } void __init irqstack_early_init(void) @@ -691,24 +689,6 @@ void __init exc_lvl_early_init(void) } #endif -/* - * Emergency stacks are used for a range of things, from asynchronous - * NMIs (system reset, machine check) to synchronous, process context. - * We set preempt_count to zero, even though that isn't necessarily correct. To - * get the right value we'd need to copy it from the previous thread_info, but - * doing that might fault causing more problems. - * TODO: what to do with accounting? - */ -static void emerg_stack_init_thread_info(struct thread_info *ti, int cpu) -{ - ti->task = NULL; - ti->cpu = cpu; - ti->preempt_count = 0; - ti->local_flags = 0; - ti->flags = 0; - klp_init_thread_info(ti); -} - /* * Stack space used when we detect a bad kernel stack pointer, and * early in SMP boots before relocation is enabled. Exclusive emergency @@ -736,25 +716,14 @@ void __init emergency_stack_init(void) limit = min(ppc64_bolted_size(), ppc64_rma_size); for_each_possible_cpu(i) { - struct thread_info *ti; - - ti = alloc_stack(limit, i); - memset(ti, 0, THREAD_SIZE); - emerg_stack_init_thread_info(ti, i); - paca_ptrs[i]->emergency_sp = (void *)ti + THREAD_SIZE; + paca_ptrs[i]->emergency_sp = alloc_stack(limit, i) + THREAD_SIZE; #ifdef CONFIG_PPC_BOOK3S_64 /* emergency stack for NMI exception handling. */ - ti = alloc_stack(limit, i); - memset(ti, 0, THREAD_SIZE); - emerg_stack_init_thread_info(ti, i); - paca_ptrs[i]->nmi_emergency_sp = (void *)ti + THREAD_SIZE; + paca_ptrs[i]->nmi_emergency_sp = alloc_stack(limit, i) + THREAD_SIZE; /* emergency stack for machine check exception handling. */ - ti = alloc_stack(limit, i); - memset(ti, 0, THREAD_SIZE); - emerg_stack_init_thread_info(ti, i); - paca_ptrs[i]->mc_emergency_sp = (void *)ti + THREAD_SIZE; + paca_ptrs[i]->mc_emergency_sp = alloc_stack(limit, i) + THREAD_SIZE; #endif } } @@ -933,8 +902,13 @@ static void __ref init_fallback_flush(void) * hardware prefetch runoff. We don't have a recipe for load patterns to * reliably avoid the prefetcher. */ - l1d_flush_fallback_area = __va(memblock_alloc_base(l1d_size * 2, l1d_size, limit)); - memset(l1d_flush_fallback_area, 0, l1d_size * 2); + l1d_flush_fallback_area = memblock_alloc_try_nid(l1d_size * 2, + l1d_size, MEMBLOCK_LOW_LIMIT, + limit, NUMA_NO_NODE); + if (!l1d_flush_fallback_area) + panic("%s: Failed to allocate %llu bytes align=0x%llx max_addr=%pa\n", + __func__, l1d_size * 2, l1d_size, &limit); + for_each_possible_cpu(cpu) { struct paca_struct *paca = paca_ptrs[cpu]; diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 3f15edf25a0d..e784342bdaa1 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -75,7 +76,7 @@ static DEFINE_PER_CPU(int, cpu_state) = { 0 }; #endif -struct thread_info *secondary_ti; +struct task_struct *secondary_current; bool has_big_cores; DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); @@ -358,13 +359,12 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask) * NMI IPIs may not be recoverable, so should not be used as ongoing part of * a running system. They can be used for crash, debug, halt/reboot, etc. * - * NMI IPIs are globally single threaded. No more than one in progress at - * any time. - * * The IPI call waits with interrupts disabled until all targets enter the - * NMI handler, then the call returns. + * NMI handler, then returns. Subsequent IPIs can be issued before targets + * have returned from their handlers, so there is no guarantee about + * concurrency or re-entrancy. * - * No new NMI can be initiated until targets exit the handler. + * A new NMI can be issued before all targets exit the handler. * * The IPI call may time out without all targets entering the NMI handler. * In that case, there is some logic to recover (and ignore subsequent @@ -375,7 +375,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask) static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0); static struct cpumask nmi_ipi_pending_mask; -static int nmi_ipi_busy_count = 0; +static bool nmi_ipi_busy = false; static void (*nmi_ipi_function)(struct pt_regs *) = NULL; static void nmi_ipi_lock_start(unsigned long *flags) @@ -414,7 +414,7 @@ static void nmi_ipi_unlock_end(unsigned long *flags) */ int smp_handle_nmi_ipi(struct pt_regs *regs) { - void (*fn)(struct pt_regs *); + void (*fn)(struct pt_regs *) = NULL; unsigned long flags; int me = raw_smp_processor_id(); int ret = 0; @@ -425,29 +425,17 @@ int smp_handle_nmi_ipi(struct pt_regs *regs) * because the caller may have timed out. */ nmi_ipi_lock_start(&flags); - if (!nmi_ipi_busy_count) - goto out; - if (!cpumask_test_cpu(me, &nmi_ipi_pending_mask)) - goto out; - - fn = nmi_ipi_function; - if (!fn) - goto out; - - cpumask_clear_cpu(me, &nmi_ipi_pending_mask); - nmi_ipi_busy_count++; - nmi_ipi_unlock(); - - ret = 1; - - fn(regs); - - nmi_ipi_lock(); - if (nmi_ipi_busy_count > 1) /* Can race with caller time-out */ - nmi_ipi_busy_count--; -out: + if (cpumask_test_cpu(me, &nmi_ipi_pending_mask)) { + cpumask_clear_cpu(me, &nmi_ipi_pending_mask); + fn = READ_ONCE(nmi_ipi_function); + WARN_ON_ONCE(!fn); + ret = 1; + } nmi_ipi_unlock_end(&flags); + if (fn) + fn(regs); + return ret; } @@ -473,9 +461,10 @@ static void do_smp_send_nmi_ipi(int cpu, bool safe) * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS. * - fn is the target callback function. * - delay_us > 0 is the delay before giving up waiting for targets to - * complete executing the handler, == 0 specifies indefinite delay. + * begin executing the handler, == 0 specifies indefinite delay. */ -int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool safe) +static int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), + u64 delay_us, bool safe) { unsigned long flags; int me = raw_smp_processor_id(); @@ -487,31 +476,33 @@ int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool if (unlikely(!smp_ops)) return 0; - /* Take the nmi_ipi_busy count/lock with interrupts hard disabled */ nmi_ipi_lock_start(&flags); - while (nmi_ipi_busy_count) { + while (nmi_ipi_busy) { nmi_ipi_unlock_end(&flags); - spin_until_cond(nmi_ipi_busy_count == 0); + spin_until_cond(!nmi_ipi_busy); nmi_ipi_lock_start(&flags); } - + nmi_ipi_busy = true; nmi_ipi_function = fn; + WARN_ON_ONCE(!cpumask_empty(&nmi_ipi_pending_mask)); + if (cpu < 0) { /* ALL_OTHERS */ cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask); cpumask_clear_cpu(me, &nmi_ipi_pending_mask); } else { - /* cpumask starts clear */ cpumask_set_cpu(cpu, &nmi_ipi_pending_mask); } - nmi_ipi_busy_count++; + nmi_ipi_unlock(); + /* Interrupts remain hard disabled */ + do_smp_send_nmi_ipi(cpu, safe); nmi_ipi_lock(); - /* nmi_ipi_busy_count is held here, so unlock/lock is okay */ + /* nmi_ipi_busy is set here, so unlock/lock is okay */ while (!cpumask_empty(&nmi_ipi_pending_mask)) { nmi_ipi_unlock(); udelay(1); @@ -523,29 +514,15 @@ int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool } } - while (nmi_ipi_busy_count > 1) { - nmi_ipi_unlock(); - udelay(1); - nmi_ipi_lock(); - if (delay_us) { - delay_us--; - if (!delay_us) - break; - } - } - if (!cpumask_empty(&nmi_ipi_pending_mask)) { /* Timeout waiting for CPUs to call smp_handle_nmi_ipi */ ret = 0; cpumask_clear(&nmi_ipi_pending_mask); } - if (nmi_ipi_busy_count > 1) { - /* Timeout waiting for CPUs to execute fn */ - ret = 0; - nmi_ipi_busy_count = 1; - } - nmi_ipi_busy_count--; + nmi_ipi_function = NULL; + nmi_ipi_busy = false; + nmi_ipi_unlock_end(&flags); return ret; @@ -613,17 +590,8 @@ void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) static void nmi_stop_this_cpu(struct pt_regs *regs) { /* - * This is a special case because it never returns, so the NMI IPI - * handling would never mark it as done, which makes any later - * smp_send_nmi_ipi() call spin forever. Mark it done now. - * * IRQs are already hard disabled by the smp_handle_nmi_ipi. */ - nmi_ipi_lock(); - if (nmi_ipi_busy_count > 1) - nmi_ipi_busy_count--; - nmi_ipi_unlock(); - spin_begin(); while (1) spin_cpu_relax(); @@ -663,7 +631,7 @@ void smp_send_stop(void) } #endif /* CONFIG_NMI_IPI */ -struct thread_info *current_set[NR_CPUS]; +struct task_struct *current_set[NR_CPUS]; static void smp_store_cpu_info(int id) { @@ -928,7 +896,7 @@ void smp_prepare_boot_cpu(void) paca_ptrs[boot_cpuid]->__current = current; #endif set_numa_node(numa_cpu_lookup_table[boot_cpuid]); - current_set[boot_cpuid] = task_thread_info(current); + current_set[boot_cpuid] = current; } #ifdef CONFIG_HOTPLUG_CPU @@ -1013,14 +981,13 @@ static bool secondaries_inhibited(void) static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle) { - struct thread_info *ti = task_thread_info(idle); - #ifdef CONFIG_PPC64 paca_ptrs[cpu]->__current = idle; - paca_ptrs[cpu]->kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD; + paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) + + THREAD_SIZE - STACK_FRAME_OVERHEAD; #endif - ti->cpu = cpu; - secondary_ti = current_set[cpu] = ti; + idle->cpu = cpu; + secondary_current = current_set[cpu] = idle; } int __cpu_up(unsigned int cpu, struct task_struct *tidle) diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c index e2c50b55138f..1e2276963f6d 100644 --- a/arch/powerpc/kernel/stacktrace.c +++ b/arch/powerpc/kernel/stacktrace.c @@ -67,12 +67,17 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) { unsigned long sp; + if (!try_get_task_stack(tsk)) + return; + if (tsk == current) sp = current_stack_pointer(); else sp = tsk->thread.ksp; save_context_stack(trace, sp, tsk, 0); + + put_task_stack(tsk); } EXPORT_SYMBOL_GPL(save_stack_trace_tsk); @@ -84,25 +89,21 @@ save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) EXPORT_SYMBOL_GPL(save_stack_trace_regs); #ifdef CONFIG_HAVE_RELIABLE_STACKTRACE -int -save_stack_trace_tsk_reliable(struct task_struct *tsk, - struct stack_trace *trace) +/* + * This function returns an error if it detects any unreliable features of the + * stack. Otherwise it guarantees that the stack trace is reliable. + * + * If the task is not 'current', the caller *must* ensure the task is inactive. + */ +static int __save_stack_trace_tsk_reliable(struct task_struct *tsk, + struct stack_trace *trace) { unsigned long sp; + unsigned long newsp; unsigned long stack_page = (unsigned long)task_stack_page(tsk); unsigned long stack_end; int graph_idx = 0; - - /* - * The last frame (unwinding first) may not yet have saved - * its LR onto the stack. - */ - int firstframe = 1; - - if (tsk == current) - sp = current_stack_pointer(); - else - sp = tsk->thread.ksp; + bool firstframe; stack_end = stack_page + THREAD_SIZE; if (!is_idle_task(tsk)) { @@ -129,40 +130,53 @@ save_stack_trace_tsk_reliable(struct task_struct *tsk, stack_end -= STACK_FRAME_OVERHEAD; } + if (tsk == current) + sp = current_stack_pointer(); + else + sp = tsk->thread.ksp; + if (sp < stack_page + sizeof(struct thread_struct) || sp > stack_end - STACK_FRAME_MIN_SIZE) { - return 1; + return -EINVAL; } - for (;;) { + for (firstframe = true; sp != stack_end; + firstframe = false, sp = newsp) { unsigned long *stack = (unsigned long *) sp; - unsigned long newsp, ip; + unsigned long ip; /* sanity check: ABI requires SP to be aligned 16 bytes. */ if (sp & 0xF) - return 1; - - /* Mark stacktraces with exception frames as unreliable. */ - if (sp <= stack_end - STACK_INT_FRAME_SIZE && - stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { - return 1; - } + return -EINVAL; newsp = stack[0]; /* Stack grows downwards; unwinder may only go up. */ if (newsp <= sp) - return 1; + return -EINVAL; if (newsp != stack_end && newsp > stack_end - STACK_FRAME_MIN_SIZE) { - return 1; /* invalid backlink, too far up. */ + return -EINVAL; /* invalid backlink, too far up. */ + } + + /* + * We can only trust the bottom frame's backlink, the + * rest of the frame may be uninitialized, continue to + * the next. + */ + if (firstframe) + continue; + + /* Mark stacktraces with exception frames as unreliable. */ + if (sp <= stack_end - STACK_INT_FRAME_SIZE && + stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { + return -EINVAL; } /* Examine the saved LR: it must point into kernel code. */ ip = stack[STACK_FRAME_LR_SAVE]; - if (!firstframe && !__kernel_text_address(ip)) - return 1; - firstframe = 0; + if (!__kernel_text_address(ip)) + return -EINVAL; /* * FIXME: IMHO these tests do not belong in @@ -175,25 +189,37 @@ save_stack_trace_tsk_reliable(struct task_struct *tsk, * as unreliable. */ if (ip == (unsigned long)kretprobe_trampoline) - return 1; + return -EINVAL; #endif + if (trace->nr_entries >= trace->max_entries) + return -E2BIG; if (!trace->skip) trace->entries[trace->nr_entries++] = ip; else trace->skip--; + } + return 0; +} - if (newsp == stack_end) - break; +int save_stack_trace_tsk_reliable(struct task_struct *tsk, + struct stack_trace *trace) +{ + int ret; - if (trace->nr_entries >= trace->max_entries) - return -E2BIG; + /* + * If the task doesn't have a stack (e.g., a zombie), the stack is + * "reliably" empty. + */ + if (!try_get_task_stack(tsk)) + return 0; - sp = newsp; - } - return 0; + ret = __save_stack_trace_tsk_reliable(tsk, trace); + + put_task_stack(tsk); + + return ret; } -EXPORT_SYMBOL_GPL(save_stack_trace_tsk_reliable); #endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */ #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI) diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c index e6982ab21816..e52a8878c2fb 100644 --- a/arch/powerpc/kernel/syscalls.c +++ b/arch/powerpc/kernel/syscalls.c @@ -123,7 +123,7 @@ long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low, (u64)len_high << 32 | len_low, advice); } -long sys_switch_endian(void) +SYSCALL_DEFINE0(switch_endian) { struct thread_info *ti; diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl index db3bbb8744af..b18abb0c3dae 100644 --- a/arch/powerpc/kernel/syscalls/syscall.tbl +++ b/arch/powerpc/kernel/syscalls/syscall.tbl @@ -20,7 +20,9 @@ 10 common unlink sys_unlink 11 nospu execve sys_execve compat_sys_execve 12 common chdir sys_chdir -13 common time sys_time compat_sys_time +13 32 time sys_time32 +13 64 time sys_time +13 spu time sys_time 14 common mknod sys_mknod 15 common chmod sys_chmod 16 common lchown sys_lchown @@ -36,14 +38,17 @@ 22 spu umount sys_ni_syscall 23 common setuid sys_setuid 24 common getuid sys_getuid -25 common stime sys_stime compat_sys_stime +25 32 stime sys_stime32 +25 64 stime sys_stime +25 spu stime sys_stime 26 nospu ptrace sys_ptrace compat_sys_ptrace 27 common alarm sys_alarm 28 32 oldfstat sys_fstat sys_ni_syscall 28 64 oldfstat sys_ni_syscall 28 spu oldfstat sys_ni_syscall 29 nospu pause sys_pause -30 nospu utime sys_utime compat_sys_utime +30 32 utime sys_utime32 +30 64 utime sys_utime 31 common stty sys_ni_syscall 32 common gtty sys_ni_syscall 33 common access sys_access @@ -157,7 +162,9 @@ 121 common setdomainname sys_setdomainname 122 common uname sys_newuname 123 common modify_ldt sys_ni_syscall -124 common adjtimex sys_adjtimex compat_sys_adjtimex +124 32 adjtimex sys_adjtimex_time32 +124 64 adjtimex sys_adjtimex +124 spu adjtimex sys_adjtimex 125 common mprotect sys_mprotect 126 32 sigprocmask sys_sigprocmask compat_sys_sigprocmask 126 64 sigprocmask sys_ni_syscall @@ -198,8 +205,12 @@ 158 common sched_yield sys_sched_yield 159 common sched_get_priority_max sys_sched_get_priority_max 160 common sched_get_priority_min sys_sched_get_priority_min -161 common sched_rr_get_interval sys_sched_rr_get_interval compat_sys_sched_rr_get_interval -162 common nanosleep sys_nanosleep compat_sys_nanosleep +161 32 sched_rr_get_interval sys_sched_rr_get_interval_time32 +161 64 sched_rr_get_interval sys_sched_rr_get_interval +161 spu sched_rr_get_interval sys_sched_rr_get_interval +162 32 nanosleep sys_nanosleep_time32 +162 64 nanosleep sys_nanosleep +162 spu nanosleep sys_nanosleep 163 common mremap sys_mremap 164 common setresuid sys_setresuid 165 common getresuid sys_getresuid @@ -213,7 +224,8 @@ 173 nospu rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction 174 nospu rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask 175 nospu rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending -176 nospu rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait +176 32 rt_sigtimedwait sys_rt_sigtimedwait_time32 compat_sys_rt_sigtimedwait_time32 +176 64 rt_sigtimedwait sys_rt_sigtimedwait 177 nospu rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo 178 nospu rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend 179 common pread64 sys_pread64 compat_sys_pread64 @@ -260,7 +272,9 @@ 218 common removexattr sys_removexattr 219 common lremovexattr sys_lremovexattr 220 common fremovexattr sys_fremovexattr -221 common futex sys_futex compat_sys_futex +221 32 futex sys_futex_time32 +221 64 futex sys_futex +221 spu futex sys_futex 222 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity 223 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity # 224 unused @@ -268,7 +282,9 @@ 226 32 sendfile64 sys_sendfile64 compat_sys_sendfile64 227 common io_setup sys_io_setup compat_sys_io_setup 228 common io_destroy sys_io_destroy -229 common io_getevents sys_io_getevents compat_sys_io_getevents +229 32 io_getevents sys_io_getevents_time32 +229 64 io_getevents sys_io_getevents +229 spu io_getevents sys_io_getevents 230 common io_submit sys_io_submit compat_sys_io_submit 231 common io_cancel sys_io_cancel 232 nospu set_tid_address sys_set_tid_address @@ -280,19 +296,33 @@ 238 common epoll_wait sys_epoll_wait 239 common remap_file_pages sys_remap_file_pages 240 common timer_create sys_timer_create compat_sys_timer_create -241 common timer_settime sys_timer_settime compat_sys_timer_settime -242 common timer_gettime sys_timer_gettime compat_sys_timer_gettime +241 32 timer_settime sys_timer_settime32 +241 64 timer_settime sys_timer_settime +241 spu timer_settime sys_timer_settime +242 32 timer_gettime sys_timer_gettime32 +242 64 timer_gettime sys_timer_gettime +242 spu timer_gettime sys_timer_gettime 243 common timer_getoverrun sys_timer_getoverrun 244 common timer_delete sys_timer_delete -245 common clock_settime sys_clock_settime compat_sys_clock_settime -246 common clock_gettime sys_clock_gettime compat_sys_clock_gettime -247 common clock_getres sys_clock_getres compat_sys_clock_getres -248 common clock_nanosleep sys_clock_nanosleep compat_sys_clock_nanosleep +245 32 clock_settime sys_clock_settime32 +245 64 clock_settime sys_clock_settime +245 spu clock_settime sys_clock_settime +246 32 clock_gettime sys_clock_gettime32 +246 64 clock_gettime sys_clock_gettime +246 spu clock_gettime sys_clock_gettime +247 32 clock_getres sys_clock_getres_time32 +247 64 clock_getres sys_clock_getres +247 spu clock_getres sys_clock_getres +248 32 clock_nanosleep sys_clock_nanosleep_time32 +248 64 clock_nanosleep sys_clock_nanosleep +248 spu clock_nanosleep sys_clock_nanosleep 249 32 swapcontext ppc_swapcontext ppc32_swapcontext 249 64 swapcontext ppc64_swapcontext 249 spu swapcontext sys_ni_syscall 250 common tgkill sys_tgkill -251 common utimes sys_utimes compat_sys_utimes +251 32 utimes sys_utimes_time32 +251 64 utimes sys_utimes +251 spu utimes sys_utimes 252 common statfs64 sys_statfs64 compat_sys_statfs64 253 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 254 32 fadvise64_64 ppc_fadvise64_64 @@ -308,8 +338,10 @@ 261 nospu set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy 262 nospu mq_open sys_mq_open compat_sys_mq_open 263 nospu mq_unlink sys_mq_unlink -264 nospu mq_timedsend sys_mq_timedsend compat_sys_mq_timedsend -265 nospu mq_timedreceive sys_mq_timedreceive compat_sys_mq_timedreceive +264 32 mq_timedsend sys_mq_timedsend_time32 +264 64 mq_timedsend sys_mq_timedsend +265 32 mq_timedreceive sys_mq_timedreceive_time32 +265 64 mq_timedreceive sys_mq_timedreceive 266 nospu mq_notify sys_mq_notify compat_sys_mq_notify 267 nospu mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr 268 nospu kexec_load sys_kexec_load compat_sys_kexec_load @@ -324,8 +356,10 @@ 277 nospu inotify_rm_watch sys_inotify_rm_watch 278 nospu spu_run sys_spu_run 279 nospu spu_create sys_spu_create -280 nospu pselect6 sys_pselect6 compat_sys_pselect6 -281 nospu ppoll sys_ppoll compat_sys_ppoll +280 32 pselect6 sys_pselect6_time32 compat_sys_pselect6_time32 +280 64 pselect6 sys_pselect6 +281 32 ppoll sys_ppoll_time32 compat_sys_ppoll_time32 +281 64 ppoll sys_ppoll 282 common unshare sys_unshare 283 common splice sys_splice 284 common tee sys_tee @@ -334,7 +368,9 @@ 287 common mkdirat sys_mkdirat 288 common mknodat sys_mknodat 289 common fchownat sys_fchownat -290 common futimesat sys_futimesat compat_sys_futimesat +290 32 futimesat sys_futimesat_time32 +290 64 futimesat sys_futimesat +290 spu utimesat sys_futimesat 291 32 fstatat64 sys_fstatat64 291 64 newfstatat sys_newfstatat 291 spu newfstatat sys_newfstatat @@ -350,15 +386,21 @@ 301 common move_pages sys_move_pages compat_sys_move_pages 302 common getcpu sys_getcpu 303 nospu epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait -304 common utimensat sys_utimensat compat_sys_utimensat +304 32 utimensat sys_utimensat_time32 +304 64 utimensat sys_utimensat +304 spu utimensat sys_utimensat 305 common signalfd sys_signalfd compat_sys_signalfd 306 common timerfd_create sys_timerfd_create 307 common eventfd sys_eventfd 308 common sync_file_range2 sys_sync_file_range2 compat_sys_sync_file_range2 309 nospu fallocate sys_fallocate compat_sys_fallocate 310 nospu subpage_prot sys_subpage_prot -311 common timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime -312 common timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime +311 32 timerfd_settime sys_timerfd_settime32 +311 64 timerfd_settime sys_timerfd_settime +311 spu timerfd_settime sys_timerfd_settime +312 32 timerfd_gettime sys_timerfd_gettime32 +312 64 timerfd_gettime sys_timerfd_gettime +312 spu timerfd_gettime sys_timerfd_gettime 313 common signalfd4 sys_signalfd4 compat_sys_signalfd4 314 common eventfd2 sys_eventfd2 315 common epoll_create1 sys_epoll_create1 @@ -389,11 +431,15 @@ 340 common getsockopt sys_getsockopt compat_sys_getsockopt 341 common sendmsg sys_sendmsg compat_sys_sendmsg 342 common recvmsg sys_recvmsg compat_sys_recvmsg -343 common recvmmsg sys_recvmmsg compat_sys_recvmmsg +343 32 recvmmsg sys_recvmmsg_time32 compat_sys_recvmmsg_time32 +343 64 recvmmsg sys_recvmmsg +343 spu recvmmsg sys_recvmmsg 344 common accept4 sys_accept4 345 common name_to_handle_at sys_name_to_handle_at 346 common open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at -347 common clock_adjtime sys_clock_adjtime compat_sys_clock_adjtime +347 32 clock_adjtime sys_clock_adjtime32 +347 64 clock_adjtime sys_clock_adjtime +347 spu clock_adjtime sys_clock_adjtime 348 common syncfs sys_syncfs 349 common sendmmsg sys_sendmmsg compat_sys_sendmmsg 350 common setns sys_setns @@ -414,6 +460,7 @@ 363 spu switch_endian sys_ni_syscall 364 common userfaultfd sys_userfaultfd 365 common membarrier sys_membarrier +# 366-377 originally left for IPC, now unused 378 nospu mlock2 sys_mlock2 379 nospu copy_file_range sys_copy_file_range 380 common preadv2 sys_preadv2 compat_sys_preadv2 @@ -424,4 +471,37 @@ 385 nospu pkey_free sys_pkey_free 386 nospu pkey_mprotect sys_pkey_mprotect 387 nospu rseq sys_rseq -388 nospu io_pgetevents sys_io_pgetevents compat_sys_io_pgetevents +388 32 io_pgetevents sys_io_pgetevents_time32 compat_sys_io_pgetevents +388 64 io_pgetevents sys_io_pgetevents +# room for arch specific syscalls +392 64 semtimedop sys_semtimedop +393 common semget sys_semget +394 common semctl sys_semctl compat_sys_semctl +395 common shmget sys_shmget +396 common shmctl sys_shmctl compat_sys_shmctl +397 common shmat sys_shmat compat_sys_shmat +398 common shmdt sys_shmdt +399 common msgget sys_msgget +400 common msgsnd sys_msgsnd compat_sys_msgsnd +401 common msgrcv sys_msgrcv compat_sys_msgrcv +402 common msgctl sys_msgctl compat_sys_msgctl +403 32 clock_gettime64 sys_clock_gettime sys_clock_gettime +404 32 clock_settime64 sys_clock_settime sys_clock_settime +405 32 clock_adjtime64 sys_clock_adjtime sys_clock_adjtime +406 32 clock_getres_time64 sys_clock_getres sys_clock_getres +407 32 clock_nanosleep_time64 sys_clock_nanosleep sys_clock_nanosleep +408 32 timer_gettime64 sys_timer_gettime sys_timer_gettime +409 32 timer_settime64 sys_timer_settime sys_timer_settime +410 32 timerfd_gettime64 sys_timerfd_gettime sys_timerfd_gettime +411 32 timerfd_settime64 sys_timerfd_settime sys_timerfd_settime +412 32 utimensat_time64 sys_utimensat sys_utimensat +413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64 +414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64 +416 32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents +417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64 +418 32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend +419 32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive +420 32 semtimedop_time64 sys_semtimedop sys_semtimedop +421 32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64 +422 32 futex_time64 sys_futex sys_futex +423 32 sched_rr_get_interval_time64 sys_sched_rr_get_interval sys_sched_rr_get_interval diff --git a/arch/powerpc/kernel/syscalls/syscalltbl.sh b/arch/powerpc/kernel/syscalls/syscalltbl.sh index fd620490a542..f7393a7b18aa 100644 --- a/arch/powerpc/kernel/syscalls/syscalltbl.sh +++ b/arch/powerpc/kernel/syscalls/syscalltbl.sh @@ -13,10 +13,10 @@ emit() { t_entry="$3" while [ $t_nxt -lt $t_nr ]; do - printf "__SYSCALL(%s,sys_ni_syscall, )\n" "${t_nxt}" + printf "__SYSCALL(%s,sys_ni_syscall)\n" "${t_nxt}" t_nxt=$((t_nxt+1)) done - printf "__SYSCALL(%s,%s, )\n" "${t_nxt}" "${t_entry}" + printf "__SYSCALL(%s,%s)\n" "${t_nxt}" "${t_entry}" } grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S index 23265a28740b..02f28faba125 100644 --- a/arch/powerpc/kernel/systbl.S +++ b/arch/powerpc/kernel/systbl.S @@ -25,11 +25,11 @@ .globl sys_call_table sys_call_table: #ifdef CONFIG_PPC64 -#define __SYSCALL(nr, entry, nargs) .8byte DOTSYM(entry) +#define __SYSCALL(nr, entry) .8byte DOTSYM(entry) #include #undef __SYSCALL #else -#define __SYSCALL(nr, entry, nargs) .long entry +#define __SYSCALL(nr, entry) .long entry #include #undef __SYSCALL #endif @@ -38,7 +38,7 @@ sys_call_table: .globl compat_sys_call_table compat_sys_call_table: #define compat_sys_sigsuspend sys_sigsuspend -#define __SYSCALL(nr, entry, nargs) .8byte DOTSYM(entry) +#define __SYSCALL(nr, entry) .8byte DOTSYM(entry) #include #undef __SYSCALL #endif diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 3646affae963..bc0503ef9c9c 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -57,7 +57,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/kernel/trace/Makefile b/arch/powerpc/kernel/trace/Makefile index b1725ad3e13d..858503775c58 100644 --- a/arch/powerpc/kernel/trace/Makefile +++ b/arch/powerpc/kernel/trace/Makefile @@ -23,6 +23,7 @@ obj-$(CONFIG_TRACING) += trace_clock.o obj-$(CONFIG_PPC64) += $(obj64-y) obj-$(CONFIG_PPC32) += $(obj32-y) -# Disable GCOV & sanitizers in odd or sensitive code +# Disable GCOV, KCOV & sanitizers in odd or sensitive code GCOV_PROFILE_ftrace.o := n +KCOV_INSTRUMENT_ftrace.o := n UBSAN_SANITIZE_ftrace.o := n diff --git a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S index 32476a6e4e9c..01b1224add49 100644 --- a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S +++ b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S @@ -229,7 +229,7 @@ ftrace_call: * - r0, r11 & r12 are free */ livepatch_handler: - CURRENT_THREAD_INFO(r12, r1) + ld r12, PACA_THREAD_INFO(r13) /* Allocate 3 x 8 bytes */ ld r11, TI_livepatch_sp(r12) @@ -256,7 +256,7 @@ livepatch_handler: * restore it. */ - CURRENT_THREAD_INFO(r12, r1) + ld r12, PACA_THREAD_INFO(r13) ld r11, TI_livepatch_sp(r12) @@ -273,7 +273,7 @@ livepatch_handler: ld r2, -24(r11) /* Pop livepatch stack frame */ - CURRENT_THREAD_INFO(r12, r1) + ld r12, PACA_THREAD_INFO(r13) subi r11, r11, 24 std r11, TI_livepatch_sp(r12) diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 64936b60d521..a21200c6aaea 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -257,24 +257,17 @@ static int __die(const char *str, struct pt_regs *regs, long err) { printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter); - if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN)) - printk("LE "); - else - printk("BE "); - - if (IS_ENABLED(CONFIG_PREEMPT)) - pr_cont("PREEMPT "); - - if (IS_ENABLED(CONFIG_SMP)) - pr_cont("SMP NR_CPUS=%d ", NR_CPUS); - - if (debug_pagealloc_enabled()) - pr_cont("DEBUG_PAGEALLOC "); - - if (IS_ENABLED(CONFIG_NUMA)) - pr_cont("NUMA "); - - pr_cont("%s\n", ppc_md.name ? ppc_md.name : ""); + printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s%s %s\n", + IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) ? "LE" : "BE", + PAGE_SIZE / 1024, + early_radix_enabled() ? " MMU=Radix" : "", + early_mmu_has_feature(MMU_FTR_HPTE_TABLE) ? " MMU=Hash" : "", + IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "", + IS_ENABLED(CONFIG_SMP) ? " SMP" : "", + IS_ENABLED(CONFIG_SMP) ? (" NR_CPUS=" __stringify(NR_CPUS)) : "", + debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "", + IS_ENABLED(CONFIG_NUMA) ? " NUMA" : "", + ppc_md.name ? ppc_md.name : ""); if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP) return 1; @@ -376,16 +369,101 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) force_sig_fault(signr, code, (void __user *)addr, current); } +/* + * The interrupt architecture has a quirk in that the HV interrupts excluding + * the NMIs (0x100 and 0x200) do not clear MSR[RI] at entry. The first thing + * that an interrupt handler must do is save off a GPR into a scratch register, + * and all interrupts on POWERNV (HV=1) use the HSPRG1 register as scratch. + * Therefore an NMI can clobber an HV interrupt's live HSPRG1 without noticing + * that it is non-reentrant, which leads to random data corruption. + * + * The solution is for NMI interrupts in HV mode to check if they originated + * from these critical HV interrupt regions. If so, then mark them not + * recoverable. + * + * An alternative would be for HV NMIs to use SPRG for scratch to avoid the + * HSPRG1 clobber, however this would cause guest SPRG to be clobbered. Linux + * guests should always have MSR[RI]=0 when its scratch SPRG is in use, so + * that would work. However any other guest OS that may have the SPRG live + * and MSR[RI]=1 could encounter silent corruption. + * + * Builds that do not support KVM could take this second option to increase + * the recoverability of NMIs. + */ +void hv_nmi_check_nonrecoverable(struct pt_regs *regs) +{ +#ifdef CONFIG_PPC_POWERNV + unsigned long kbase = (unsigned long)_stext; + unsigned long nip = regs->nip; + + if (!(regs->msr & MSR_RI)) + return; + if (!(regs->msr & MSR_HV)) + return; + if (regs->msr & MSR_PR) + return; + + /* + * Now test if the interrupt has hit a range that may be using + * HSPRG1 without having RI=0 (i.e., an HSRR interrupt). The + * problem ranges all run un-relocated. Test real and virt modes + * at the same time by droping the high bit of the nip (virt mode + * entry points still have the +0x4000 offset). + */ + nip &= ~0xc000000000000000ULL; + if ((nip >= 0x500 && nip < 0x600) || (nip >= 0x4500 && nip < 0x4600)) + goto nonrecoverable; + if ((nip >= 0x980 && nip < 0xa00) || (nip >= 0x4980 && nip < 0x4a00)) + goto nonrecoverable; + if ((nip >= 0xe00 && nip < 0xec0) || (nip >= 0x4e00 && nip < 0x4ec0)) + goto nonrecoverable; + if ((nip >= 0xf80 && nip < 0xfa0) || (nip >= 0x4f80 && nip < 0x4fa0)) + goto nonrecoverable; + + /* Trampoline code runs un-relocated so subtract kbase. */ + if (nip >= (unsigned long)(start_real_trampolines - kbase) && + nip < (unsigned long)(end_real_trampolines - kbase)) + goto nonrecoverable; + if (nip >= (unsigned long)(start_virt_trampolines - kbase) && + nip < (unsigned long)(end_virt_trampolines - kbase)) + goto nonrecoverable; + return; + +nonrecoverable: + regs->msr &= ~MSR_RI; +#endif +} + void system_reset_exception(struct pt_regs *regs) { + unsigned long hsrr0, hsrr1; + bool nested = in_nmi(); + bool saved_hsrrs = false; + /* * Avoid crashes in case of nested NMI exceptions. Recoverability * is determined by RI and in_nmi */ - bool nested = in_nmi(); if (!nested) nmi_enter(); + /* + * System reset can interrupt code where HSRRs are live and MSR[RI]=1. + * The system reset interrupt itself may clobber HSRRs (e.g., to call + * OPAL), so save them here and restore them before returning. + * + * Machine checks don't need to save HSRRs, as the real mode handler + * is careful to avoid them, and the regular handler is not delivered + * as an NMI. + */ + if (cpu_has_feature(CPU_FTR_HVMODE)) { + hsrr0 = mfspr(SPRN_HSRR0); + hsrr1 = mfspr(SPRN_HSRR1); + saved_hsrrs = true; + } + + hv_nmi_check_nonrecoverable(regs); + __this_cpu_inc(irq_stat.sreset_irqs); /* See if any machine dependent calls */ @@ -433,6 +511,11 @@ out: if (!(regs->msr & MSR_RI)) nmi_panic(regs, "Unrecoverable System Reset"); + if (saved_hsrrs) { + mtspr(SPRN_HSRR0, hsrr0); + mtspr(SPRN_HSRR1, hsrr1); + } + if (!nested) nmi_exit(); @@ -763,15 +846,15 @@ void machine_check_exception(struct pt_regs *regs) if (check_io_access(regs)) goto bail; - /* Must die if the interrupt is not recoverable */ - if (!(regs->msr & MSR_RI)) - nmi_panic(regs, "Unrecoverable Machine check"); - if (!nested) nmi_exit(); die("Machine check", regs, SIGBUS); + /* Must die if the interrupt is not recoverable */ + if (!(regs->msr & MSR_RI)) + nmi_panic(regs, "Unrecoverable Machine check"); + return; bail: @@ -1542,8 +1625,8 @@ bail: void StackOverflow(struct pt_regs *regs) { - printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n", - current, regs->gpr[1]); + pr_crit("Kernel stack overflow in process %s[%d], r1=%lx\n", + current->comm, task_pid_nr(current), regs->gpr[1]); debugger(regs); show_regs(regs); panic("kernel stack overflow"); diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c index 7cc38b5b58bc..8db4891acdaf 100644 --- a/arch/powerpc/kernel/udbg.c +++ b/arch/powerpc/kernel/udbg.c @@ -74,7 +74,7 @@ void __init udbg_early_init(void) #endif #ifdef CONFIG_PPC_EARLY_DEBUG - console_loglevel = 10; + console_loglevel = CONSOLE_LOGLEVEL_DEBUG; register_early_udbg_console(); #endif diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 7725a9714736..a31b6234fcd7 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -798,7 +798,6 @@ static int __init vdso_init(void) BUG_ON(vdso32_pagelist == NULL); for (i = 0; i < vdso32_pages; i++) { struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE); - ClearPageReserved(pg); get_page(pg); vdso32_pagelist[i] = pg; } @@ -812,7 +811,6 @@ static int __init vdso_init(void) BUG_ON(vdso64_pagelist == NULL); for (i = 0; i < vdso64_pages; i++) { struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE); - ClearPageReserved(pg); get_page(pg); vdso64_pagelist[i] = pg; } diff --git a/arch/powerpc/kernel/vdso32/Makefile b/arch/powerpc/kernel/vdso32/Makefile index 50112d4473bb..ce199f6e4256 100644 --- a/arch/powerpc/kernel/vdso32/Makefile +++ b/arch/powerpc/kernel/vdso32/Makefile @@ -23,6 +23,7 @@ targets := $(obj-vdso32) vdso32.so vdso32.so.dbg obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32)) GCOV_PROFILE := n +KCOV_INSTRUMENT := n UBSAN_SANITIZE := n ccflags-y := -shared -fno-common -fno-builtin diff --git a/arch/powerpc/kernel/vdso64/Makefile b/arch/powerpc/kernel/vdso64/Makefile index 69cecb346269..28e7d112aa2f 100644 --- a/arch/powerpc/kernel/vdso64/Makefile +++ b/arch/powerpc/kernel/vdso64/Makefile @@ -9,6 +9,7 @@ targets := $(obj-vdso64) vdso64.so vdso64.so.dbg obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64)) GCOV_PROFILE := n +KCOV_INSTRUMENT := n UBSAN_SANITIZE := n ccflags-y := -shared -fno-common -fno-builtin diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index ad1c77f71f54..060a1acd7c6d 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -12,11 +12,8 @@ #include #include -#if defined(CONFIG_STRICT_KERNEL_RWX) && !defined(CONFIG_PPC32) -#define STRICT_ALIGN_SIZE (1 << 24) -#else -#define STRICT_ALIGN_SIZE PAGE_SIZE -#endif +#define STRICT_ALIGN_SIZE (1 << CONFIG_DATA_SHIFT) +#define ETEXT_ALIGN_SIZE (1 << CONFIG_ETEXT_SHIFT) ENTRY(_stext) @@ -86,11 +83,11 @@ SECTIONS #ifdef CONFIG_PPC64 /* - * BLOCK(0) overrides the default output section alignment because + * ALIGN(0) overrides the default output section alignment because * this needs to start right after .head.text in order for fixed * section placement to work. */ - .text BLOCK(0) : AT(ADDR(.text) - LOAD_OFFSET) { + .text ALIGN(0) : AT(ADDR(.text) - LOAD_OFFSET) { #ifdef CONFIG_LD_HEAD_STUB_CATCH KEEP(*(.linker_stub_catch)); . = . ; @@ -131,7 +128,7 @@ SECTIONS } :kernel - . = ALIGN(PAGE_SIZE); + . = ALIGN(ETEXT_ALIGN_SIZE); _etext = .; PROVIDE32 (etext = .); @@ -319,6 +316,7 @@ SECTIONS *(.sdata2) *(.got.plt) *(.got) *(.plt) + *(.branch_lt) } #else .data : AT(ADDR(.data) - LOAD_OFFSET) { diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index 64f1135e7732..3223aec88b2c 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile @@ -10,11 +10,6 @@ common-objs-y = $(KVM)/kvm_main.o $(KVM)/eventfd.o common-objs-$(CONFIG_KVM_VFIO) += $(KVM)/vfio.o common-objs-$(CONFIG_KVM_MMIO) += $(KVM)/coalesced_mmio.o -CFLAGS_e500_mmu.o := -I. -CFLAGS_e500_mmu_host.o := -I. -CFLAGS_emulate.o := -I. -CFLAGS_emulate_loadstore.o := -I. - common-objs-y += powerpc.o emulate_loadstore.o obj-$(CONFIG_KVM_EXIT_TIMING) += timing.o obj-$(CONFIG_KVM_BOOK3S_HANDLER) += book3s_exports.o diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index bd1a677dd9e4..9a7dadbe1f17 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -192,6 +192,13 @@ void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) } EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio); +void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags) +{ + /* might as well deliver this straight away */ + kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_MACHINE_CHECK, flags); +} +EXPORT_SYMBOL_GPL(kvmppc_core_queue_machine_check); + void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags) { /* might as well deliver this straight away */ diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 5a066fc299e1..a3d5318f5d1e 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1215,6 +1215,22 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, r = RESUME_GUEST; break; case BOOK3S_INTERRUPT_MACHINE_CHECK: + /* Print the MCE event to host console. */ + machine_check_print_event_info(&vcpu->arch.mce_evt, false, true); + + /* + * If the guest can do FWNMI, exit to userspace so it can + * deliver a FWNMI to the guest. + * Otherwise we synthesize a machine check for the guest + * so that it knows that the machine check occurred. + */ + if (!vcpu->kvm->arch.fwnmi_enabled) { + ulong flags = vcpu->arch.shregs.msr & 0x083c0000; + kvmppc_core_queue_machine_check(vcpu, flags); + r = RESUME_GUEST; + break; + } + /* Exit to guest with KVM_EXIT_NMI as exit reason */ run->exit_reason = KVM_EXIT_NMI; run->hw.hardware_exit_reason = vcpu->arch.trap; @@ -1227,8 +1243,6 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, run->flags |= KVM_RUN_PPC_NMI_DISP_NOT_RECOV; r = RESUME_HOST; - /* Print the MCE event to host console. */ - machine_check_print_event_info(&vcpu->arch.mce_evt, false); break; case BOOK3S_INTERRUPT_PROGRAM: { @@ -1392,7 +1406,7 @@ static int kvmppc_handle_nested_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) /* Pass the machine check to the L1 guest */ r = RESUME_HOST; /* Print the MCE event to host console. */ - machine_check_print_event_info(&vcpu->arch.mce_evt, false); + machine_check_print_event_info(&vcpu->arch.mce_evt, false, true); break; /* * We get these next two if the guest accesses a page which it thinks @@ -3455,6 +3469,7 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long host_dscr = mfspr(SPRN_DSCR); unsigned long host_tidr = mfspr(SPRN_TIDR); unsigned long host_iamr = mfspr(SPRN_IAMR); + unsigned long host_amr = mfspr(SPRN_AMR); s64 dec; u64 tb; int trap, save_pmu; @@ -3571,13 +3586,15 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, mtspr(SPRN_PSPB, 0); mtspr(SPRN_WORT, 0); - mtspr(SPRN_AMR, 0); mtspr(SPRN_UAMOR, 0); mtspr(SPRN_DSCR, host_dscr); mtspr(SPRN_TIDR, host_tidr); mtspr(SPRN_IAMR, host_iamr); mtspr(SPRN_PSPB, 0); + if (host_amr != vcpu->arch.amr) + mtspr(SPRN_AMR, host_amr); + msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX); store_fp_state(&vcpu->arch.fp); #ifdef CONFIG_ALTIVEC diff --git a/arch/powerpc/kvm/book3s_hv_hmi.c b/arch/powerpc/kvm/book3s_hv_hmi.c index e3f738eb1cac..64b5011475c7 100644 --- a/arch/powerpc/kvm/book3s_hv_hmi.c +++ b/arch/powerpc/kvm/book3s_hv_hmi.c @@ -24,6 +24,7 @@ #include #include #include +#include void wait_for_subcore_guest_exit(void) { diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c index 0787f12c1a1b..8c24c3bea0bf 100644 --- a/arch/powerpc/kvm/book3s_hv_ras.c +++ b/arch/powerpc/kvm/book3s_hv_ras.c @@ -66,10 +66,8 @@ static void reload_slb(struct kvm_vcpu *vcpu) /* * On POWER7, see if we can handle a machine check that occurred inside * the guest in real mode, without switching to the host partition. - * - * Returns: 0 => exit guest, 1 => deliver machine check to guest */ -static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu) +static void kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu) { unsigned long srr1 = vcpu->arch.shregs.msr; struct machine_check_event mce_evt; @@ -111,52 +109,24 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu) } /* - * See if we have already handled the condition in the linux host. - * We assume that if the condition is recovered then linux host - * will have generated an error log event that we will pick - * up and log later. - * Don't release mce event now. We will queue up the event so that - * we can log the MCE event info on host console. + * Now get the event and stash it in the vcpu struct so it can + * be handled by the primary thread in virtual mode. We can't + * call machine_check_queue_event() here if we are running on + * an offline secondary thread. */ - if (!get_mce_event(&mce_evt, MCE_EVENT_DONTRELEASE)) - goto out; - - if (mce_evt.version == MCE_V1 && - (mce_evt.severity == MCE_SEV_NO_ERROR || - mce_evt.disposition == MCE_DISPOSITION_RECOVERED)) - handled = 1; - -out: - /* - * For guest that supports FWNMI capability, hook the MCE event into - * vcpu structure. We are going to exit the guest with KVM_EXIT_NMI - * exit reason. On our way to exit we will pull this event from vcpu - * structure and print it from thread 0 of the core/subcore. - * - * For guest that does not support FWNMI capability (old QEMU): - * We are now going enter guest either through machine check - * interrupt (for unhandled errors) or will continue from - * current HSRR0 (for handled errors) in guest. Hence - * queue up the event so that we can log it from host console later. - */ - if (vcpu->kvm->arch.fwnmi_enabled) { - /* - * Hook up the mce event on to vcpu structure. - * First clear the old event. - */ - memset(&vcpu->arch.mce_evt, 0, sizeof(vcpu->arch.mce_evt)); - if (get_mce_event(&mce_evt, MCE_EVENT_RELEASE)) { - vcpu->arch.mce_evt = mce_evt; - } - } else - machine_check_queue_event(); + if (get_mce_event(&mce_evt, MCE_EVENT_RELEASE)) { + if (handled && mce_evt.version == MCE_V1) + mce_evt.disposition = MCE_DISPOSITION_RECOVERED; + } else { + memset(&mce_evt, 0, sizeof(mce_evt)); + } - return handled; + vcpu->arch.mce_evt = mce_evt; } -long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu) +void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu) { - return kvmppc_realmode_mc_power7(vcpu); + kvmppc_realmode_mc_power7(vcpu); } /* Check if dynamic split is in force and return subcore size accordingly. */ diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 9b8d50a7cbaf..25043b50cb30 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -58,6 +58,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) #define STACK_SLOT_DAWR (SFS-56) #define STACK_SLOT_DAWRX (SFS-64) #define STACK_SLOT_HFSCR (SFS-72) +#define STACK_SLOT_AMR (SFS-80) +#define STACK_SLOT_UAMOR (SFS-88) /* the following is used by the P9 short path */ #define STACK_SLOT_NVGPRS (SFS-152) /* 18 gprs */ @@ -726,11 +728,9 @@ BEGIN_FTR_SECTION mfspr r5, SPRN_TIDR mfspr r6, SPRN_PSSCR mfspr r7, SPRN_PID - mfspr r8, SPRN_IAMR std r5, STACK_SLOT_TID(r1) std r6, STACK_SLOT_PSSCR(r1) std r7, STACK_SLOT_PID(r1) - std r8, STACK_SLOT_IAMR(r1) mfspr r5, SPRN_HFSCR std r5, STACK_SLOT_HFSCR(r1) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) @@ -738,11 +738,18 @@ BEGIN_FTR_SECTION mfspr r5, SPRN_CIABR mfspr r6, SPRN_DAWR mfspr r7, SPRN_DAWRX + mfspr r8, SPRN_IAMR std r5, STACK_SLOT_CIABR(r1) std r6, STACK_SLOT_DAWR(r1) std r7, STACK_SLOT_DAWRX(r1) + std r8, STACK_SLOT_IAMR(r1) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) + mfspr r5, SPRN_AMR + std r5, STACK_SLOT_AMR(r1) + mfspr r6, SPRN_UAMOR + std r6, STACK_SLOT_UAMOR(r1) + BEGIN_FTR_SECTION /* Set partition DABR */ /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ @@ -1631,22 +1638,25 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) mtspr SPRN_PSPB, r0 mtspr SPRN_WORT, r0 BEGIN_FTR_SECTION - mtspr SPRN_IAMR, r0 mtspr SPRN_TCSCR, r0 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ li r0, 1 sldi r0, r0, 31 mtspr SPRN_MMCRS, r0 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) -8: - /* Save and reset AMR and UAMOR before turning on the MMU */ + /* Save and restore AMR, IAMR and UAMOR before turning on the MMU */ + ld r8, STACK_SLOT_IAMR(r1) + mtspr SPRN_IAMR, r8 + +8: /* Power7 jumps back in here */ mfspr r5,SPRN_AMR mfspr r6,SPRN_UAMOR std r5,VCPU_AMR(r9) std r6,VCPU_UAMOR(r9) - li r6,0 - mtspr SPRN_AMR,r6 + ld r5,STACK_SLOT_AMR(r1) + ld r6,STACK_SLOT_UAMOR(r1) + mtspr SPRN_AMR, r5 mtspr SPRN_UAMOR, r6 /* Switch DSCR back to host value */ @@ -1746,11 +1756,9 @@ BEGIN_FTR_SECTION ld r5, STACK_SLOT_TID(r1) ld r6, STACK_SLOT_PSSCR(r1) ld r7, STACK_SLOT_PID(r1) - ld r8, STACK_SLOT_IAMR(r1) mtspr SPRN_TIDR, r5 mtspr SPRN_PSSCR, r6 mtspr SPRN_PID, r7 - mtspr SPRN_IAMR, r8 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) #ifdef CONFIG_PPC_RADIX_MMU @@ -2826,49 +2834,15 @@ kvm_cede_exit: #endif /* CONFIG_KVM_XICS */ 3: b guest_exit_cont - /* Try to handle a machine check in real mode */ + /* Try to do machine check recovery in real mode */ machine_check_realmode: mr r3, r9 /* get vcpu pointer */ bl kvmppc_realmode_machine_check nop + /* all machine checks go to virtual mode for further handling */ ld r9, HSTATE_KVM_VCPU(r13) li r12, BOOK3S_INTERRUPT_MACHINE_CHECK - /* - * For the guest that is FWNMI capable, deliver all the MCE errors - * (handled/unhandled) by exiting the guest with KVM_EXIT_NMI exit - * reason. This new approach injects machine check errors in guest - * address space to guest with additional information in the form - * of RTAS event, thus enabling guest kernel to suitably handle - * such errors. - * - * For the guest that is not FWNMI capable (old QEMU) fallback - * to old behaviour for backward compatibility: - * Deliver unhandled/fatal (e.g. UE) MCE errors to guest either - * through machine check interrupt (set HSRR0 to 0x200). - * For handled errors (no-fatal), just go back to guest execution - * with current HSRR0. - * if we receive machine check with MSR(RI=0) then deliver it to - * guest as machine check causing guest to crash. - */ - ld r11, VCPU_MSR(r9) - rldicl. r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */ - bne guest_exit_cont /* if so, exit to host */ - /* Check if guest is capable of handling NMI exit */ - ld r10, VCPU_KVM(r9) - lbz r10, KVM_FWNMI(r10) - cmpdi r10, 1 /* FWNMI capable? */ - beq guest_exit_cont /* if so, exit with KVM_EXIT_NMI. */ - - /* if not, fall through for backward compatibility. */ - andi. r10, r11, MSR_RI /* check for unrecoverable exception */ - beq 1f /* Deliver a machine check to guest */ - ld r10, VCPU_PC(r9) - cmpdi r3, 0 /* Did we handle MCE ? */ - bne 2f /* Continue guest execution. */ - /* If not, deliver a machine check. SRR0/1 are already set */ -1: li r10, BOOK3S_INTERRUPT_MACHINE_CHECK - bl kvmppc_msr_interrupt -2: b fast_interrupt_c_return + b guest_exit_cont /* * Call C code to handle a HMI in real mode. diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 3bf9fc6fd36c..79396e184bca 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -30,7 +30,8 @@ obj64-y += copypage_64.o copyuser_64.o mem_64.o hweight_64.o \ obj64-$(CONFIG_SMP) += locks.o obj64-$(CONFIG_ALTIVEC) += vmx-helper.o -obj64-$(CONFIG_KPROBES_SANITY_TEST) += test_emulate_step.o +obj64-$(CONFIG_KPROBES_SANITY_TEST) += test_emulate_step.o \ + test_emulate_step_exec_instr.o obj-y += checksum_$(BITS).o checksum_wrappers.o \ string_$(BITS).o memcmp_$(BITS).o diff --git a/arch/powerpc/lib/alloc.c b/arch/powerpc/lib/alloc.c index dedf88a76f58..ce180870bd52 100644 --- a/arch/powerpc/lib/alloc.c +++ b/arch/powerpc/lib/alloc.c @@ -15,6 +15,9 @@ void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask) p = kzalloc(size, mask); else { p = memblock_alloc(size, SMP_CACHE_BYTES); + if (!p) + panic("%s: Failed to allocate %zu bytes\n", __func__, + size); } return p; } diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index d81568f783e5..3d33fb509ef4 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -1169,7 +1169,7 @@ static nokprobe_inline int trap_compare(long v1, long v2) int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, unsigned int instr) { - unsigned int opcode, ra, rb, rd, spr, u; + unsigned int opcode, ra, rb, rc, rd, spr, u; unsigned long int imm; unsigned long int val, val2; unsigned int mb, me, sh; @@ -1292,6 +1292,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, rd = (instr >> 21) & 0x1f; ra = (instr >> 16) & 0x1f; rb = (instr >> 11) & 0x1f; + rc = (instr >> 6) & 0x1f; switch (opcode) { #ifdef __powerpc64__ @@ -1305,6 +1306,38 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, goto trap; return 1; +#ifdef __powerpc64__ + case 4: + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + return -1; + + switch (instr & 0x3f) { + case 48: /* maddhd */ + asm volatile(PPC_MADDHD(%0, %1, %2, %3) : + "=r" (op->val) : "r" (regs->gpr[ra]), + "r" (regs->gpr[rb]), "r" (regs->gpr[rc])); + goto compute_done; + + case 49: /* maddhdu */ + asm volatile(PPC_MADDHDU(%0, %1, %2, %3) : + "=r" (op->val) : "r" (regs->gpr[ra]), + "r" (regs->gpr[rb]), "r" (regs->gpr[rc])); + goto compute_done; + + case 51: /* maddld */ + asm volatile(PPC_MADDLD(%0, %1, %2, %3) : + "=r" (op->val) : "r" (regs->gpr[ra]), + "r" (regs->gpr[rb]), "r" (regs->gpr[rc])); + goto compute_done; + } + + /* + * There are other instructions from ISA 3.0 with the same + * primary opcode which do not have emulation support yet. + */ + return -1; +#endif + case 7: /* mulli */ op->val = regs->gpr[ra] * (short) instr; goto compute_done; @@ -1671,10 +1704,23 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, (int) regs->gpr[rb]; goto arith_done; - +#ifdef __powerpc64__ + case 265: /* modud */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + return -1; + op->val = regs->gpr[ra] % regs->gpr[rb]; + goto compute_done; +#endif case 266: /* add */ op->val = regs->gpr[ra] + regs->gpr[rb]; goto arith_done; + + case 267: /* moduw */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + return -1; + op->val = (unsigned int) regs->gpr[ra] % + (unsigned int) regs->gpr[rb]; + goto compute_done; #ifdef __powerpc64__ case 457: /* divdu */ op->val = regs->gpr[ra] / regs->gpr[rb]; @@ -1695,6 +1741,42 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, (int) regs->gpr[rb]; goto arith_done; + case 755: /* darn */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + return -1; + switch (ra & 0x3) { + case 0: + /* 32-bit conditioned */ + asm volatile(PPC_DARN(%0, 0) : "=r" (op->val)); + goto compute_done; + + case 1: + /* 64-bit conditioned */ + asm volatile(PPC_DARN(%0, 1) : "=r" (op->val)); + goto compute_done; + + case 2: + /* 64-bit raw */ + asm volatile(PPC_DARN(%0, 2) : "=r" (op->val)); + goto compute_done; + } + + return -1; +#ifdef __powerpc64__ + case 777: /* modsd */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + return -1; + op->val = (long int) regs->gpr[ra] % + (long int) regs->gpr[rb]; + goto compute_done; +#endif + case 779: /* modsw */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + return -1; + op->val = (int) regs->gpr[ra] % + (int) regs->gpr[rb]; + goto compute_done; + /* * Logical instructions @@ -1764,6 +1846,20 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, case 506: /* popcntd */ do_popcnt(regs, op, regs->gpr[rd], 64); goto logical_done_nocc; +#endif + case 538: /* cnttzw */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + return -1; + val = (unsigned int) regs->gpr[rd]; + op->val = (val ? __builtin_ctz(val) : 32); + goto logical_done; +#ifdef __powerpc64__ + case 570: /* cnttzd */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + return -1; + val = regs->gpr[rd]; + op->val = (val ? __builtin_ctzl(val) : 64); + goto logical_done; #endif case 922: /* extsh */ op->val = (signed short) regs->gpr[rd]; @@ -1866,6 +1962,20 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, op->xerval &= ~XER_CA; set_ca32(op, op->xerval & XER_CA); goto logical_done; + + case 890: /* extswsli with sh_5 = 0 */ + case 891: /* extswsli with sh_5 = 1 */ + if (!cpu_has_feature(CPU_FTR_ARCH_300)) + return -1; + op->type = COMPUTE + SETREG; + sh = rb | ((instr & 2) << 4); + val = (signed int) regs->gpr[rd]; + if (sh) + op->val = ROTATE(val, sh) & MASK64(0, 63 - sh); + else + op->val = val; + goto logical_done; + #endif /* __powerpc64__ */ /* diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c index 6c47daa61614..9992c1ea7a1d 100644 --- a/arch/powerpc/lib/test_emulate_step.c +++ b/arch/powerpc/lib/test_emulate_step.c @@ -1,5 +1,5 @@ /* - * Simple sanity test for emulate_step load/store instructions. + * Simple sanity tests for instruction emulation infrastructure. * * Copyright IBM Corp. 2016 * @@ -14,6 +14,7 @@ #include #include #include +#include #define IMM_L(i) ((uintptr_t)(i) & 0xffff) @@ -48,7 +49,20 @@ ___PPC_RA(a) | ___PPC_RB(b)) #define TEST_LXVD2X(s, a, b) (PPC_INST_LXVD2X | VSX_XX1((s), R##a, R##b)) #define TEST_STXVD2X(s, a, b) (PPC_INST_STXVD2X | VSX_XX1((s), R##a, R##b)) +#define TEST_ADD(t, a, b) (PPC_INST_ADD | ___PPC_RT(t) | \ + ___PPC_RA(a) | ___PPC_RB(b)) +#define TEST_ADD_DOT(t, a, b) (PPC_INST_ADD | ___PPC_RT(t) | \ + ___PPC_RA(a) | ___PPC_RB(b) | 0x1) +#define TEST_ADDC(t, a, b) (PPC_INST_ADDC | ___PPC_RT(t) | \ + ___PPC_RA(a) | ___PPC_RB(b)) +#define TEST_ADDC_DOT(t, a, b) (PPC_INST_ADDC | ___PPC_RT(t) | \ + ___PPC_RA(a) | ___PPC_RB(b) | 0x1) + +#define MAX_SUBTESTS 16 +#define IGNORE_GPR(n) (0x1UL << (n)) +#define IGNORE_XER (0x1UL << 32) +#define IGNORE_CCR (0x1UL << 33) static void __init init_pt_regs(struct pt_regs *regs) { @@ -72,9 +86,15 @@ static void __init init_pt_regs(struct pt_regs *regs) msr_cached = true; } -static void __init show_result(char *ins, char *result) +static void __init show_result(char *mnemonic, char *result) { - pr_info("%-14s : %s\n", ins, result); + pr_info("%-14s : %s\n", mnemonic, result); +} + +static void __init show_result_with_descr(char *mnemonic, char *descr, + char *result) +{ + pr_info("%-14s : %-50s %s\n", mnemonic, descr, result); } static void __init test_ld(void) @@ -426,7 +446,7 @@ static void __init test_lxvd2x_stxvd2x(void) } #endif /* CONFIG_VSX */ -static int __init test_emulate_step(void) +static void __init run_tests_load_store(void) { test_ld(); test_lwz(); @@ -437,6 +457,513 @@ static int __init test_emulate_step(void) test_lfdx_stfdx(); test_lvx_stvx(); test_lxvd2x_stxvd2x(); +} + +struct compute_test { + char *mnemonic; + struct { + char *descr; + unsigned long flags; + unsigned int instr; + struct pt_regs regs; + } subtests[MAX_SUBTESTS + 1]; +}; + +static struct compute_test compute_tests[] = { + { + .mnemonic = "nop", + .subtests = { + { + .descr = "R0 = LONG_MAX", + .instr = PPC_INST_NOP, + .regs = { + .gpr[0] = LONG_MAX, + } + } + } + }, + { + .mnemonic = "add", + .subtests = { + { + .descr = "RA = LONG_MIN, RB = LONG_MIN", + .instr = TEST_ADD(20, 21, 22), + .regs = { + .gpr[21] = LONG_MIN, + .gpr[22] = LONG_MIN, + } + }, + { + .descr = "RA = LONG_MIN, RB = LONG_MAX", + .instr = TEST_ADD(20, 21, 22), + .regs = { + .gpr[21] = LONG_MIN, + .gpr[22] = LONG_MAX, + } + }, + { + .descr = "RA = LONG_MAX, RB = LONG_MAX", + .instr = TEST_ADD(20, 21, 22), + .regs = { + .gpr[21] = LONG_MAX, + .gpr[22] = LONG_MAX, + } + }, + { + .descr = "RA = ULONG_MAX, RB = ULONG_MAX", + .instr = TEST_ADD(20, 21, 22), + .regs = { + .gpr[21] = ULONG_MAX, + .gpr[22] = ULONG_MAX, + } + }, + { + .descr = "RA = ULONG_MAX, RB = 0x1", + .instr = TEST_ADD(20, 21, 22), + .regs = { + .gpr[21] = ULONG_MAX, + .gpr[22] = 0x1, + } + }, + { + .descr = "RA = INT_MIN, RB = INT_MIN", + .instr = TEST_ADD(20, 21, 22), + .regs = { + .gpr[21] = INT_MIN, + .gpr[22] = INT_MIN, + } + }, + { + .descr = "RA = INT_MIN, RB = INT_MAX", + .instr = TEST_ADD(20, 21, 22), + .regs = { + .gpr[21] = INT_MIN, + .gpr[22] = INT_MAX, + } + }, + { + .descr = "RA = INT_MAX, RB = INT_MAX", + .instr = TEST_ADD(20, 21, 22), + .regs = { + .gpr[21] = INT_MAX, + .gpr[22] = INT_MAX, + } + }, + { + .descr = "RA = UINT_MAX, RB = UINT_MAX", + .instr = TEST_ADD(20, 21, 22), + .regs = { + .gpr[21] = UINT_MAX, + .gpr[22] = UINT_MAX, + } + }, + { + .descr = "RA = UINT_MAX, RB = 0x1", + .instr = TEST_ADD(20, 21, 22), + .regs = { + .gpr[21] = UINT_MAX, + .gpr[22] = 0x1, + } + } + } + }, + { + .mnemonic = "add.", + .subtests = { + { + .descr = "RA = LONG_MIN, RB = LONG_MIN", + .flags = IGNORE_CCR, + .instr = TEST_ADD_DOT(20, 21, 22), + .regs = { + .gpr[21] = LONG_MIN, + .gpr[22] = LONG_MIN, + } + }, + { + .descr = "RA = LONG_MIN, RB = LONG_MAX", + .instr = TEST_ADD_DOT(20, 21, 22), + .regs = { + .gpr[21] = LONG_MIN, + .gpr[22] = LONG_MAX, + } + }, + { + .descr = "RA = LONG_MAX, RB = LONG_MAX", + .flags = IGNORE_CCR, + .instr = TEST_ADD_DOT(20, 21, 22), + .regs = { + .gpr[21] = LONG_MAX, + .gpr[22] = LONG_MAX, + } + }, + { + .descr = "RA = ULONG_MAX, RB = ULONG_MAX", + .instr = TEST_ADD_DOT(20, 21, 22), + .regs = { + .gpr[21] = ULONG_MAX, + .gpr[22] = ULONG_MAX, + } + }, + { + .descr = "RA = ULONG_MAX, RB = 0x1", + .instr = TEST_ADD_DOT(20, 21, 22), + .regs = { + .gpr[21] = ULONG_MAX, + .gpr[22] = 0x1, + } + }, + { + .descr = "RA = INT_MIN, RB = INT_MIN", + .instr = TEST_ADD_DOT(20, 21, 22), + .regs = { + .gpr[21] = INT_MIN, + .gpr[22] = INT_MIN, + } + }, + { + .descr = "RA = INT_MIN, RB = INT_MAX", + .instr = TEST_ADD_DOT(20, 21, 22), + .regs = { + .gpr[21] = INT_MIN, + .gpr[22] = INT_MAX, + } + }, + { + .descr = "RA = INT_MAX, RB = INT_MAX", + .instr = TEST_ADD_DOT(20, 21, 22), + .regs = { + .gpr[21] = INT_MAX, + .gpr[22] = INT_MAX, + } + }, + { + .descr = "RA = UINT_MAX, RB = UINT_MAX", + .instr = TEST_ADD_DOT(20, 21, 22), + .regs = { + .gpr[21] = UINT_MAX, + .gpr[22] = UINT_MAX, + } + }, + { + .descr = "RA = UINT_MAX, RB = 0x1", + .instr = TEST_ADD_DOT(20, 21, 22), + .regs = { + .gpr[21] = UINT_MAX, + .gpr[22] = 0x1, + } + } + } + }, + { + .mnemonic = "addc", + .subtests = { + { + .descr = "RA = LONG_MIN, RB = LONG_MIN", + .instr = TEST_ADDC(20, 21, 22), + .regs = { + .gpr[21] = LONG_MIN, + .gpr[22] = LONG_MIN, + } + }, + { + .descr = "RA = LONG_MIN, RB = LONG_MAX", + .instr = TEST_ADDC(20, 21, 22), + .regs = { + .gpr[21] = LONG_MIN, + .gpr[22] = LONG_MAX, + } + }, + { + .descr = "RA = LONG_MAX, RB = LONG_MAX", + .instr = TEST_ADDC(20, 21, 22), + .regs = { + .gpr[21] = LONG_MAX, + .gpr[22] = LONG_MAX, + } + }, + { + .descr = "RA = ULONG_MAX, RB = ULONG_MAX", + .instr = TEST_ADDC(20, 21, 22), + .regs = { + .gpr[21] = ULONG_MAX, + .gpr[22] = ULONG_MAX, + } + }, + { + .descr = "RA = ULONG_MAX, RB = 0x1", + .instr = TEST_ADDC(20, 21, 22), + .regs = { + .gpr[21] = ULONG_MAX, + .gpr[22] = 0x1, + } + }, + { + .descr = "RA = INT_MIN, RB = INT_MIN", + .instr = TEST_ADDC(20, 21, 22), + .regs = { + .gpr[21] = INT_MIN, + .gpr[22] = INT_MIN, + } + }, + { + .descr = "RA = INT_MIN, RB = INT_MAX", + .instr = TEST_ADDC(20, 21, 22), + .regs = { + .gpr[21] = INT_MIN, + .gpr[22] = INT_MAX, + } + }, + { + .descr = "RA = INT_MAX, RB = INT_MAX", + .instr = TEST_ADDC(20, 21, 22), + .regs = { + .gpr[21] = INT_MAX, + .gpr[22] = INT_MAX, + } + }, + { + .descr = "RA = UINT_MAX, RB = UINT_MAX", + .instr = TEST_ADDC(20, 21, 22), + .regs = { + .gpr[21] = UINT_MAX, + .gpr[22] = UINT_MAX, + } + }, + { + .descr = "RA = UINT_MAX, RB = 0x1", + .instr = TEST_ADDC(20, 21, 22), + .regs = { + .gpr[21] = UINT_MAX, + .gpr[22] = 0x1, + } + }, + { + .descr = "RA = LONG_MIN | INT_MIN, RB = LONG_MIN | INT_MIN", + .instr = TEST_ADDC(20, 21, 22), + .regs = { + .gpr[21] = LONG_MIN | (uint)INT_MIN, + .gpr[22] = LONG_MIN | (uint)INT_MIN, + } + } + } + }, + { + .mnemonic = "addc.", + .subtests = { + { + .descr = "RA = LONG_MIN, RB = LONG_MIN", + .flags = IGNORE_CCR, + .instr = TEST_ADDC_DOT(20, 21, 22), + .regs = { + .gpr[21] = LONG_MIN, + .gpr[22] = LONG_MIN, + } + }, + { + .descr = "RA = LONG_MIN, RB = LONG_MAX", + .instr = TEST_ADDC_DOT(20, 21, 22), + .regs = { + .gpr[21] = LONG_MIN, + .gpr[22] = LONG_MAX, + } + }, + { + .descr = "RA = LONG_MAX, RB = LONG_MAX", + .flags = IGNORE_CCR, + .instr = TEST_ADDC_DOT(20, 21, 22), + .regs = { + .gpr[21] = LONG_MAX, + .gpr[22] = LONG_MAX, + } + }, + { + .descr = "RA = ULONG_MAX, RB = ULONG_MAX", + .instr = TEST_ADDC_DOT(20, 21, 22), + .regs = { + .gpr[21] = ULONG_MAX, + .gpr[22] = ULONG_MAX, + } + }, + { + .descr = "RA = ULONG_MAX, RB = 0x1", + .instr = TEST_ADDC_DOT(20, 21, 22), + .regs = { + .gpr[21] = ULONG_MAX, + .gpr[22] = 0x1, + } + }, + { + .descr = "RA = INT_MIN, RB = INT_MIN", + .instr = TEST_ADDC_DOT(20, 21, 22), + .regs = { + .gpr[21] = INT_MIN, + .gpr[22] = INT_MIN, + } + }, + { + .descr = "RA = INT_MIN, RB = INT_MAX", + .instr = TEST_ADDC_DOT(20, 21, 22), + .regs = { + .gpr[21] = INT_MIN, + .gpr[22] = INT_MAX, + } + }, + { + .descr = "RA = INT_MAX, RB = INT_MAX", + .instr = TEST_ADDC_DOT(20, 21, 22), + .regs = { + .gpr[21] = INT_MAX, + .gpr[22] = INT_MAX, + } + }, + { + .descr = "RA = UINT_MAX, RB = UINT_MAX", + .instr = TEST_ADDC_DOT(20, 21, 22), + .regs = { + .gpr[21] = UINT_MAX, + .gpr[22] = UINT_MAX, + } + }, + { + .descr = "RA = UINT_MAX, RB = 0x1", + .instr = TEST_ADDC_DOT(20, 21, 22), + .regs = { + .gpr[21] = UINT_MAX, + .gpr[22] = 0x1, + } + }, + { + .descr = "RA = LONG_MIN | INT_MIN, RB = LONG_MIN | INT_MIN", + .instr = TEST_ADDC_DOT(20, 21, 22), + .regs = { + .gpr[21] = LONG_MIN | (uint)INT_MIN, + .gpr[22] = LONG_MIN | (uint)INT_MIN, + } + } + } + } +}; + +static int __init emulate_compute_instr(struct pt_regs *regs, + unsigned int instr) +{ + struct instruction_op op; + + if (!regs || !instr) + return -EINVAL; + + if (analyse_instr(&op, regs, instr) != 1 || + GETTYPE(op.type) != COMPUTE) { + pr_info("emulation failed, instruction = 0x%08x\n", instr); + return -EFAULT; + } + + emulate_update_regs(regs, &op); + return 0; +} + +static int __init execute_compute_instr(struct pt_regs *regs, + unsigned int instr) +{ + extern int exec_instr(struct pt_regs *regs); + extern s32 patch__exec_instr; + + if (!regs || !instr) + return -EINVAL; + + /* Patch the NOP with the actual instruction */ + patch_instruction_site(&patch__exec_instr, instr); + if (exec_instr(regs)) { + pr_info("execution failed, instruction = 0x%08x\n", instr); + return -EFAULT; + } + + return 0; +} + +#define gpr_mismatch(gprn, exp, got) \ + pr_info("GPR%u mismatch, exp = 0x%016lx, got = 0x%016lx\n", \ + gprn, exp, got) + +#define reg_mismatch(name, exp, got) \ + pr_info("%s mismatch, exp = 0x%016lx, got = 0x%016lx\n", \ + name, exp, got) + +static void __init run_tests_compute(void) +{ + unsigned long flags; + struct compute_test *test; + struct pt_regs *regs, exp, got; + unsigned int i, j, k, instr; + bool ignore_gpr, ignore_xer, ignore_ccr, passed; + + for (i = 0; i < ARRAY_SIZE(compute_tests); i++) { + test = &compute_tests[i]; + + for (j = 0; j < MAX_SUBTESTS && test->subtests[j].descr; j++) { + instr = test->subtests[j].instr; + flags = test->subtests[j].flags; + regs = &test->subtests[j].regs; + ignore_xer = flags & IGNORE_XER; + ignore_ccr = flags & IGNORE_CCR; + passed = true; + + memcpy(&exp, regs, sizeof(struct pt_regs)); + memcpy(&got, regs, sizeof(struct pt_regs)); + + /* + * Set a compatible MSR value explicitly to ensure + * that XER and CR bits are updated appropriately + */ + exp.msr = MSR_KERNEL; + got.msr = MSR_KERNEL; + + if (emulate_compute_instr(&got, instr) || + execute_compute_instr(&exp, instr)) { + passed = false; + goto print; + } + + /* Verify GPR values */ + for (k = 0; k < 32; k++) { + ignore_gpr = flags & IGNORE_GPR(k); + if (!ignore_gpr && exp.gpr[k] != got.gpr[k]) { + passed = false; + gpr_mismatch(k, exp.gpr[k], got.gpr[k]); + } + } + + /* Verify LR value */ + if (exp.link != got.link) { + passed = false; + reg_mismatch("LR", exp.link, got.link); + } + + /* Verify XER value */ + if (!ignore_xer && exp.xer != got.xer) { + passed = false; + reg_mismatch("XER", exp.xer, got.xer); + } + + /* Verify CR value */ + if (!ignore_ccr && exp.ccr != got.ccr) { + passed = false; + reg_mismatch("CR", exp.ccr, got.ccr); + } + +print: + show_result_with_descr(test->mnemonic, + test->subtests[j].descr, + passed ? "PASS" : "FAIL"); + } + } +} + +static int __init test_emulate_step(void) +{ + printk(KERN_INFO "Running instruction emulation self-tests ...\n"); + run_tests_load_store(); + run_tests_compute(); return 0; } diff --git a/arch/powerpc/lib/test_emulate_step_exec_instr.S b/arch/powerpc/lib/test_emulate_step_exec_instr.S new file mode 100644 index 000000000000..1580f34f4f4f --- /dev/null +++ b/arch/powerpc/lib/test_emulate_step_exec_instr.S @@ -0,0 +1,150 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Non-emulated single-stepping support (currently limited to basic integer + * computations) used to validate the instruction emulation infrastructure. + * + * Copyright (C) 2019 IBM Corporation + */ + +#include +#include +#include +#include + +/* int exec_instr(struct pt_regs *regs) */ +_GLOBAL(exec_instr) + + /* + * Stack frame layout (INT_FRAME_SIZE bytes) + * In-memory pt_regs (SP + STACK_FRAME_OVERHEAD) + * Scratch space (SP + 8) + * Back chain (SP + 0) + */ + + /* + * Allocate a new stack frame with enough space to hold the register + * states in an in-memory pt_regs and also create the back chain to + * the caller's stack frame. + */ + stdu r1, -INT_FRAME_SIZE(r1) + + /* + * Save non-volatile GPRs on stack. This includes TOC pointer (GPR2) + * and local variables (GPR14 to GPR31). The register for the pt_regs + * parameter (GPR3) is saved additionally to ensure that the resulting + * register state can still be saved even if GPR3 gets overwritten + * when loading the initial register state for the test instruction. + * The stack pointer (GPR1) and the thread pointer (GPR13) are not + * saved as these should not be modified anyway. + */ + SAVE_2GPRS(2, r1) + SAVE_NVGPRS(r1) + + /* + * Save LR on stack to ensure that the return address is available + * even if it gets overwritten by the test instruction. + */ + mflr r0 + std r0, _LINK(r1) + + /* + * Save CR on stack. For simplicity, the entire register is saved + * even though only fields 2 to 4 are non-volatile. + */ + mfcr r0 + std r0, _CCR(r1) + + /* + * Load register state for the test instruction without touching the + * critical non-volatile registers. The register state is passed as a + * pointer to a pt_regs instance. + */ + subi r31, r3, GPR0 + + /* Load LR from pt_regs */ + ld r0, _LINK(r31) + mtlr r0 + + /* Load CR from pt_regs */ + ld r0, _CCR(r31) + mtcr r0 + + /* Load XER from pt_regs */ + ld r0, _XER(r31) + mtxer r0 + + /* Load GPRs from pt_regs */ + REST_GPR(0, r31) + REST_10GPRS(2, r31) + REST_GPR(12, r31) + REST_NVGPRS(r31) + + /* Placeholder for the test instruction */ +1: nop + patch_site 1b patch__exec_instr + + /* + * Since GPR3 is overwritten, temporarily restore it back to its + * original state, i.e. the pointer to pt_regs, to ensure that the + * resulting register state can be saved. Before doing this, a copy + * of it is created in the scratch space which is used later on to + * save it to pt_regs. + */ + std r3, 8(r1) + REST_GPR(3, r1) + + /* Save resulting GPR state to pt_regs */ + subi r3, r3, GPR0 + SAVE_GPR(0, r3) + SAVE_GPR(2, r3) + SAVE_8GPRS(4, r3) + SAVE_GPR(12, r3) + SAVE_NVGPRS(r3) + + /* Save resulting LR to pt_regs */ + mflr r0 + std r0, _LINK(r3) + + /* Save resulting CR to pt_regs */ + mfcr r0 + std r0, _CCR(r3) + + /* Save resulting XER to pt_regs */ + mfxer r0 + std r0, _XER(r3) + + /* Restore resulting GPR3 from scratch space and save it to pt_regs */ + ld r0, 8(r1) + std r0, GPR3(r3) + + /* Set return value to denote execution success */ + li r3, 0 + + /* Continue */ + b 3f + + /* Set return value to denote execution failure */ +2: li r3, -EFAULT + + /* Restore the non-volatile GPRs from stack */ +3: REST_GPR(2, r1) + REST_NVGPRS(r1) + + /* Restore LR from stack to be able to return */ + ld r0, _LINK(r1) + mtlr r0 + + /* Restore CR from stack */ + ld r0, _CCR(r1) + mtcr r0 + + /* Tear down stack frame */ + addi r1, r1, INT_FRAME_SIZE + + /* Return */ + blr + + /* Setup exception table */ + EX_TABLE(1b, 2b) + +_ASM_NOKPROBE_SYMBOL(exec_instr) diff --git a/arch/powerpc/math-emu/Makefile b/arch/powerpc/math-emu/Makefile index 494df26c5988..a8794032f15f 100644 --- a/arch/powerpc/math-emu/Makefile +++ b/arch/powerpc/math-emu/Makefile @@ -17,4 +17,4 @@ obj-$(CONFIG_SPE) += math_efp.o CFLAGS_fabs.o = -fno-builtin-fabs CFLAGS_math.o = -fno-builtin-fabs -ccflags-y = -I. -Iinclude/math-emu -w +ccflags-y = -w diff --git a/arch/powerpc/mm/40x_mmu.c b/arch/powerpc/mm/40x_mmu.c index 61ac468c87c6..b9cf6f8764b0 100644 --- a/arch/powerpc/mm/40x_mmu.c +++ b/arch/powerpc/mm/40x_mmu.c @@ -93,7 +93,7 @@ void __init MMU_init_hw(void) #define LARGE_PAGE_SIZE_16M (1<<24) #define LARGE_PAGE_SIZE_4M (1<<22) -unsigned long __init mmu_mapin_ram(unsigned long top) +unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) { unsigned long v, s, mapped; phys_addr_t p; diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c index ea2b9af08a48..aad127acdbaa 100644 --- a/arch/powerpc/mm/44x_mmu.c +++ b/arch/powerpc/mm/44x_mmu.c @@ -170,7 +170,7 @@ void __init MMU_init_hw(void) flush_instruction_cache(); } -unsigned long __init mmu_mapin_ram(unsigned long top) +unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) { unsigned long addr; unsigned long memstart = memstart_addr & ~(PPC_PIN_SIZE - 1); diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/8xx_mmu.c index bfa503cff351..fe1f6443d57f 100644 --- a/arch/powerpc/mm/8xx_mmu.c +++ b/arch/powerpc/mm/8xx_mmu.c @@ -66,26 +66,22 @@ unsigned long p_block_mapped(phys_addr_t pa) void __init MMU_init_hw(void) { /* PIN up to the 3 first 8Mb after IMMR in DTLB table */ -#ifdef CONFIG_PIN_TLB_DATA - unsigned long ctr = mfspr(SPRN_MD_CTR) & 0xfe000000; - unsigned long flags = 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY; -#ifdef CONFIG_PIN_TLB_IMMR - int i = 29; -#else - int i = 28; -#endif - unsigned long addr = 0; - unsigned long mem = total_lowmem; - - for (; i < 32 && mem >= LARGE_PAGE_SIZE_8M; i++) { - mtspr(SPRN_MD_CTR, ctr | (i << 8)); - mtspr(SPRN_MD_EPN, (unsigned long)__va(addr) | MD_EVALID); - mtspr(SPRN_MD_TWC, MD_PS8MEG | MD_SVALID); - mtspr(SPRN_MD_RPN, addr | flags | _PAGE_PRESENT); - addr += LARGE_PAGE_SIZE_8M; - mem -= LARGE_PAGE_SIZE_8M; + if (IS_ENABLED(CONFIG_PIN_TLB_DATA)) { + unsigned long ctr = mfspr(SPRN_MD_CTR) & 0xfe000000; + unsigned long flags = 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY; + int i = IS_ENABLED(CONFIG_PIN_TLB_IMMR) ? 29 : 28; + unsigned long addr = 0; + unsigned long mem = total_lowmem; + + for (; i < 32 && mem >= LARGE_PAGE_SIZE_8M; i++) { + mtspr(SPRN_MD_CTR, ctr | (i << 8)); + mtspr(SPRN_MD_EPN, (unsigned long)__va(addr) | MD_EVALID); + mtspr(SPRN_MD_TWC, MD_PS8MEG | MD_SVALID); + mtspr(SPRN_MD_RPN, addr | flags | _PAGE_PRESENT); + addr += LARGE_PAGE_SIZE_8M; + mem -= LARGE_PAGE_SIZE_8M; + } } -#endif } static void __init mmu_mapin_immr(void) @@ -98,26 +94,36 @@ static void __init mmu_mapin_immr(void) map_kernel_page(v + offset, p + offset, PAGE_KERNEL_NCG); } -static void __init mmu_patch_cmp_limit(s32 *site, unsigned long mapped) +static void mmu_patch_cmp_limit(s32 *site, unsigned long mapped) { modify_instruction_site(site, 0xffff, (unsigned long)__va(mapped) >> 16); } -unsigned long __init mmu_mapin_ram(unsigned long top) +static void mmu_patch_addis(s32 *site, long simm) +{ + unsigned int instr = *(unsigned int *)patch_site_addr(site); + + instr &= 0xffff0000; + instr |= ((unsigned long)simm) >> 16; + patch_instruction_site(site, instr); +} + +unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) { unsigned long mapped; if (__map_without_ltlbs) { mapped = 0; mmu_mapin_immr(); -#ifndef CONFIG_PIN_TLB_IMMR - patch_instruction_site(&patch__dtlbmiss_immr_jmp, PPC_INST_NOP); -#endif -#ifndef CONFIG_PIN_TLB_TEXT - mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, 0); -#endif + if (!IS_ENABLED(CONFIG_PIN_TLB_IMMR)) + patch_instruction_site(&patch__dtlbmiss_immr_jmp, PPC_INST_NOP); + if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT)) + mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, 0); } else { mapped = top & ~(LARGE_PAGE_SIZE_8M - 1); + if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT)) + mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, + _ALIGN(__pa(_einittext), 8 << 20)); } mmu_patch_cmp_limit(&patch__dtlbmiss_linmem_top, mapped); @@ -138,6 +144,26 @@ unsigned long __init mmu_mapin_ram(unsigned long top) return mapped; } +void mmu_mark_initmem_nx(void) +{ + if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) && CONFIG_ETEXT_SHIFT < 23) + mmu_patch_addis(&patch__itlbmiss_linmem_top8, + -((long)_etext & ~(LARGE_PAGE_SIZE_8M - 1))); + if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT)) + mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, __pa(_etext)); +} + +#ifdef CONFIG_STRICT_KERNEL_RWX +void mmu_mark_rodata_ro(void) +{ + if (CONFIG_DATA_SHIFT < 23) + mmu_patch_addis(&patch__dtlbmiss_romem_top8, + -__pa(((unsigned long)_sinittext) & + ~(LARGE_PAGE_SIZE_8M - 1))); + mmu_patch_addis(&patch__dtlbmiss_romem_top, -__pa(_sinittext)); +} +#endif + void __init setup_initial_memory_limit(phys_addr_t first_memblock_base, phys_addr_t first_memblock_size) { @@ -146,8 +172,8 @@ void __init setup_initial_memory_limit(phys_addr_t first_memblock_base, */ BUG_ON(first_memblock_base != 0); - /* 8xx can only access 24MB at the moment */ - memblock_set_current_limit(min_t(u64, first_memblock_size, 0x01800000)); + /* 8xx can only access 32MB at the moment */ + memblock_set_current_limit(min_t(u64, first_memblock_size, 0x02000000)); } /* @@ -162,14 +188,11 @@ void set_context(unsigned long id, pgd_t *pgd) { s16 offset = (s16)(__pa(swapper_pg_dir)); -#ifdef CONFIG_BDI_SWITCH - pgd_t **ptr = *(pgd_t ***)(KERNELBASE + 0xf0); - /* Context switch the PTE pointer for the Abatron BDI2000. * The PGDIR is passed as second argument. */ - *(ptr + 1) = pgd; -#endif + if (IS_ENABLED(CONFIG_BDI_SWITCH)) + abatron_pteptrs[1] = pgd; /* Register M_TWB will contain base address of level 1 table minus the * lower part of the kernel PGDIR base address, so that all accesses to diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index f965fc33a8b7..d52ec118e09d 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile @@ -45,13 +45,10 @@ obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o obj-$(CONFIG_HIGHMEM) += highmem.o obj-$(CONFIG_PPC_COPRO_BASE) += copro_fault.o obj-$(CONFIG_SPAPR_TCE_IOMMU) += mmu_context_iommu.o -obj-$(CONFIG_PPC_PTDUMP) += dump_linuxpagetables.o -ifdef CONFIG_PPC_PTDUMP -obj-$(CONFIG_4xx) += dump_linuxpagetables-generic.o -obj-$(CONFIG_PPC_8xx) += dump_linuxpagetables-8xx.o -obj-$(CONFIG_PPC_BOOK3E_MMU) += dump_linuxpagetables-generic.o -obj-$(CONFIG_PPC_BOOK3S_32) += dump_linuxpagetables-generic.o dump_bats.o dump_sr.o -obj-$(CONFIG_PPC_BOOK3S_64) += dump_linuxpagetables-book3s64.o -endif -obj-$(CONFIG_PPC_HTDUMP) += dump_hashpagetable.o +obj-$(CONFIG_PPC_PTDUMP) += ptdump/ obj-$(CONFIG_PPC_MEM_KEYS) += pkeys.o + +# Disable kcov instrumentation on sensitive code +# This is necessary for booting with kcov enabled on book3e machines +KCOV_INSTRUMENT_tlb_nohash.o := n +KCOV_INSTRUMENT_fsl_booke_mmu.o := n diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c index e955539686a4..b5d2658c26af 100644 --- a/arch/powerpc/mm/dma-noncoherent.c +++ b/arch/powerpc/mm/dma-noncoherent.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -151,8 +152,8 @@ static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsi * Allocate DMA-coherent memory space and return both the kernel remapped * virtual and bus address for that space. */ -void *__dma_nommu_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) +void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t gfp, unsigned long attrs) { struct page *page; struct ppc_vm_region *c; @@ -253,7 +254,7 @@ void *__dma_nommu_alloc_coherent(struct device *dev, size_t size, /* * free a page as defined by the above mapping. */ -void __dma_nommu_free_coherent(struct device *dev, size_t size, void *vaddr, +void arch_dma_free(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { struct ppc_vm_region *c; @@ -313,7 +314,7 @@ void __dma_nommu_free_coherent(struct device *dev, size_t size, void *vaddr, /* * make an area consistent. */ -void __dma_sync(void *vaddr, size_t size, int direction) +static void __dma_sync(void *vaddr, size_t size, int direction) { unsigned long start = (unsigned long)vaddr; unsigned long end = start + size; @@ -339,7 +340,6 @@ void __dma_sync(void *vaddr, size_t size, int direction) break; } } -EXPORT_SYMBOL(__dma_sync); #ifdef CONFIG_HIGHMEM /* @@ -386,28 +386,42 @@ static inline void __dma_sync_page_highmem(struct page *page, * __dma_sync_page makes memory consistent. identical to __dma_sync, but * takes a struct page instead of a virtual address */ -void __dma_sync_page(struct page *page, unsigned long offset, - size_t size, int direction) +static void __dma_sync_page(phys_addr_t paddr, size_t size, int dir) { + struct page *page = pfn_to_page(paddr >> PAGE_SHIFT); + unsigned offset = paddr & ~PAGE_MASK; + #ifdef CONFIG_HIGHMEM - __dma_sync_page_highmem(page, offset, size, direction); + __dma_sync_page_highmem(page, offset, size, dir); #else unsigned long start = (unsigned long)page_address(page) + offset; - __dma_sync((void *)start, size, direction); + __dma_sync((void *)start, size, dir); #endif } -EXPORT_SYMBOL(__dma_sync_page); + +void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir) +{ + __dma_sync_page(paddr, size, dir); +} + +void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir) +{ + __dma_sync_page(paddr, size, dir); +} /* - * Return the PFN for a given cpu virtual address returned by - * __dma_nommu_alloc_coherent. This is used by dma_mmap_coherent() + * Return the PFN for a given cpu virtual address returned by arch_dma_alloc. */ -unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr) +long arch_dma_coherent_to_pfn(struct device *dev, void *vaddr, + dma_addr_t dma_addr) { /* This should always be populated, so we don't test every * level. If that fails, we'll have a nice crash which * will be as good as a BUG_ON() */ + unsigned long cpu_addr = (unsigned long)vaddr; pgd_t *pgd = pgd_offset_k(cpu_addr); pud_t *pud = pud_offset(pgd, cpu_addr); pmd_t *pmd = pmd_offset(pud, cpu_addr); diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c index 080d49b26c3a..210cbc1faf63 100644 --- a/arch/powerpc/mm/fsl_booke_mmu.c +++ b/arch/powerpc/mm/fsl_booke_mmu.c @@ -221,7 +221,7 @@ unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx, bool dryrun) #error "LOWMEM_CAM_NUM must be less than NUM_TLBCAMS" #endif -unsigned long __init mmu_mapin_ram(unsigned long top) +unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) { return tlbcam_addrs[tlbcam_index - 1].limit - PAGE_OFFSET + 1; } diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S index 1e2df3e9f9ea..1f13494efb2b 100644 --- a/arch/powerpc/mm/hash_low_32.S +++ b/arch/powerpc/mm/hash_low_32.S @@ -47,14 +47,13 @@ mmu_hash_lock: * Returns to the caller if the access is illegal or there is no * mapping for the address. Otherwise it places an appropriate PTE * in the hash table and returns from the exception. - * Uses r0, r3 - r8, r10, ctr, lr. + * Uses r0, r3 - r6, r8, r10, ctr, lr. */ .text _GLOBAL(hash_page) - tophys(r7,0) /* gets -KERNELBASE into r7 */ #ifdef CONFIG_SMP - addis r8,r7,mmu_hash_lock@h - ori r8,r8,mmu_hash_lock@l + lis r8, (mmu_hash_lock - PAGE_OFFSET)@h + ori r8, r8, (mmu_hash_lock - PAGE_OFFSET)@l lis r0,0x0fff b 10f 11: lwz r6,0(r8) @@ -70,14 +69,13 @@ _GLOBAL(hash_page) /* Get PTE (linux-style) and check access */ lis r0,KERNELBASE@h /* check if kernel address */ cmplw 0,r4,r0 - mfspr r8,SPRN_SPRG_THREAD /* current task's THREAD (phys) */ ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */ - lwz r5,PGDIR(r8) /* virt page-table root */ + mfspr r5, SPRN_SPRG_PGDIR /* virt page-table root */ blt+ 112f /* assume user more likely */ lis r5,swapper_pg_dir@ha /* if kernel address, use */ addi r5,r5,swapper_pg_dir@l /* kernel page table */ rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */ -112: add r5,r5,r7 /* convert to phys addr */ +112: tophys(r5, r5) #ifndef CONFIG_PTE_64BIT rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */ lwz r8,0(r5) /* get pmd entry */ @@ -144,25 +142,24 @@ retry: #ifdef CONFIG_SMP eieio - addis r8,r7,mmu_hash_lock@ha + lis r8, (mmu_hash_lock - PAGE_OFFSET)@ha li r0,0 - stw r0,mmu_hash_lock@l(r8) + stw r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8) #endif /* Return from the exception */ lwz r5,_CTR(r11) mtctr r5 lwz r0,GPR0(r11) - lwz r7,GPR7(r11) lwz r8,GPR8(r11) b fast_exception_return #ifdef CONFIG_SMP hash_page_out: eieio - addis r8,r7,mmu_hash_lock@ha + lis r8, (mmu_hash_lock - PAGE_OFFSET)@ha li r0,0 - stw r0,mmu_hash_lock@l(r8) + stw r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8) blr #endif /* CONFIG_SMP */ @@ -186,8 +183,7 @@ _GLOBAL(add_hash_page) add r3,r3,r0 /* note create_hpte trims to 24 bits */ #ifdef CONFIG_SMP - CURRENT_THREAD_INFO(r8, r1) /* use cpu number to make tag */ - lwz r8,TI_CPU(r8) /* to go in mmu_hash_lock */ + lwz r8,TASK_CPU(r2) /* to go in mmu_hash_lock */ oris r8,r8,12 #endif /* CONFIG_SMP */ @@ -208,11 +204,9 @@ _GLOBAL(add_hash_page) SYNC_601 isync - tophys(r7,0) - #ifdef CONFIG_SMP - addis r6,r7,mmu_hash_lock@ha - addi r6,r6,mmu_hash_lock@l + lis r6, (mmu_hash_lock - PAGE_OFFSET)@ha + addi r6, r6, (mmu_hash_lock - PAGE_OFFSET)@l 10: lwarx r0,0,r6 /* take the mmu_hash_lock */ cmpi 0,r0,0 bne- 11f @@ -257,8 +251,8 @@ _GLOBAL(add_hash_page) 9: #ifdef CONFIG_SMP - addis r6,r7,mmu_hash_lock@ha - addi r6,r6,mmu_hash_lock@l + lis r6, (mmu_hash_lock - PAGE_OFFSET)@ha + addi r6, r6, (mmu_hash_lock - PAGE_OFFSET)@l eieio li r0,0 stw r0,0(r6) /* clear mmu_hash_lock */ @@ -278,10 +272,8 @@ _GLOBAL(add_hash_page) * It is designed to be called with the MMU either on or off. * r3 contains the VSID, r4 contains the virtual address, * r5 contains the linux PTE, r6 contains the old value of the - * linux PTE (before setting _PAGE_HASHPTE) and r7 contains the - * offset to be added to addresses (0 if the MMU is on, - * -KERNELBASE if it is off). r10 contains the upper half of - * the PTE if CONFIG_PTE_64BIT. + * linux PTE (before setting _PAGE_HASHPTE). r10 contains the + * upper half of the PTE if CONFIG_PTE_64BIT. * On SMP, the caller should have the mmu_hash_lock held. * We assume that the caller has (or will) set the _PAGE_HASHPTE * bit in the linux PTE in memory. The value passed in r6 should @@ -342,7 +334,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) patch_site 1f, patch__hash_page_A1 patch_site 2f, patch__hash_page_A2 /* Get the address of the primary PTE group in the hash table (r3) */ -0: addis r0,r7,Hash_base@h /* base address of hash table */ +0: lis r0, (Hash_base - PAGE_OFFSET)@h /* base address of hash table */ 1: rlwimi r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */ 2: rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */ xor r3,r3,r0 /* make primary hash */ @@ -356,10 +348,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) beq+ 10f /* no PTE: go look for an empty slot */ tlbie r4 - addis r4,r7,htab_hash_searches@ha - lwz r6,htab_hash_searches@l(r4) + lis r4, (htab_hash_searches - PAGE_OFFSET)@ha + lwz r6, (htab_hash_searches - PAGE_OFFSET)@l(r4) addi r6,r6,1 /* count how many searches we do */ - stw r6,htab_hash_searches@l(r4) + stw r6, (htab_hash_searches - PAGE_OFFSET)@l(r4) /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */ mtctr r0 @@ -391,10 +383,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) beq+ found_empty /* update counter of times that the primary PTEG is full */ - addis r4,r7,primary_pteg_full@ha - lwz r6,primary_pteg_full@l(r4) + lis r4, (primary_pteg_full - PAGE_OFFSET)@ha + lwz r6, (primary_pteg_full - PAGE_OFFSET)@l(r4) addi r6,r6,1 - stw r6,primary_pteg_full@l(r4) + stw r6, (primary_pteg_full - PAGE_OFFSET)@l(r4) patch_site 0f, patch__hash_page_C /* Search the secondary PTEG for an empty slot */ @@ -428,8 +420,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) * lockup here but that shouldn't happen */ -1: addis r4,r7,next_slot@ha /* get next evict slot */ - lwz r6,next_slot@l(r4) +1: lis r4, (next_slot - PAGE_OFFSET)@ha /* get next evict slot */ + lwz r6, (next_slot - PAGE_OFFSET)@l(r4) addi r6,r6,HPTE_SIZE /* search for candidate */ andi. r6,r6,7*HPTE_SIZE stw r6,next_slot@l(r4) @@ -501,8 +493,6 @@ htab_hash_searches: * We assume that there is a hash table in use (Hash != 0). */ _GLOBAL(flush_hash_pages) - tophys(r7,0) - /* * We disable interrupts here, even on UP, because we want * the _PAGE_HASHPTE bit to be a reliable indication of @@ -547,11 +537,9 @@ _GLOBAL(flush_hash_pages) SET_V(r11) /* set V (valid) bit */ #ifdef CONFIG_SMP - addis r9,r7,mmu_hash_lock@ha - addi r9,r9,mmu_hash_lock@l - CURRENT_THREAD_INFO(r8, r1) - add r8,r8,r7 - lwz r8,TI_CPU(r8) + lis r9, (mmu_hash_lock - PAGE_OFFSET)@ha + addi r9, r9, (mmu_hash_lock - PAGE_OFFSET)@l + lwz r8,TASK_CPU(r2) oris r8,r8,9 10: lwarx r0,0,r9 cmpi 0,r0,0 @@ -584,7 +572,7 @@ _GLOBAL(flush_hash_pages) patch_site 1f, patch__flush_hash_A1 patch_site 2f, patch__flush_hash_A2 /* Get the address of the primary PTE group in the hash table (r3) */ -0: addis r8,r7,Hash_base@h /* base address of hash table */ +0: lis r8, (Hash_base - PAGE_OFFSET)@h /* base address of hash table */ 1: rlwimi r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */ 2: rlwinm r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */ xor r8,r0,r8 /* make primary hash */ @@ -646,8 +634,7 @@ EXPORT_SYMBOL(flush_hash_pages) */ _GLOBAL(_tlbie) #ifdef CONFIG_SMP - CURRENT_THREAD_INFO(r8, r1) - lwz r8,TI_CPU(r8) + lwz r8,TASK_CPU(r2) oris r8,r8,11 mfmsr r10 SYNC @@ -684,8 +671,7 @@ _GLOBAL(_tlbie) */ _GLOBAL(_tlbia) #if defined(CONFIG_SMP) - CURRENT_THREAD_INFO(r8, r1) - lwz r8,TI_CPU(r8) + lwz r8,TASK_CPU(r2) oris r8,r8,10 mfmsr r10 SYNC diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 0cc7fbc3bd1c..0a4f939a8161 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -882,8 +882,12 @@ static void __init htab_initialize(void) } #endif /* CONFIG_PPC_CELL */ - table = memblock_alloc_base(htab_size_bytes, htab_size_bytes, - limit); + table = memblock_phys_alloc_range(htab_size_bytes, + htab_size_bytes, + 0, limit); + if (!table) + panic("ERROR: Failed to allocate %pa bytes below %pa\n", + &htab_size_bytes, &limit); DBG("Hash table allocated at %lx, size: %lx\n", table, htab_size_bytes); @@ -908,9 +912,12 @@ static void __init htab_initialize(void) #ifdef CONFIG_DEBUG_PAGEALLOC if (debug_pagealloc_enabled()) { linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT; - linear_map_hash_slots = __va(memblock_alloc_base( - linear_map_hash_count, 1, ppc64_rma_size)); - memset(linear_map_hash_slots, 0, linear_map_hash_count); + linear_map_hash_slots = memblock_alloc_try_nid( + linear_map_hash_count, 1, MEMBLOCK_LOW_LIMIT, + ppc64_rma_size, NUMA_NO_NODE); + if (!linear_map_hash_slots) + panic("%s: Failed to allocate %lu bytes max_addr=%pa\n", + __func__, linear_map_hash_count, &ppc64_rma_size); } #endif /* CONFIG_DEBUG_PAGEALLOC */ @@ -1889,12 +1896,12 @@ static int hpt_order_set(void *data, u64 val) return mmu_hash_ops.resize_hpt(val); } -DEFINE_SIMPLE_ATTRIBUTE(fops_hpt_order, hpt_order_get, hpt_order_set, "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_hpt_order, hpt_order_get, hpt_order_set, "%llu\n"); static int __init hash64_debugfs(void) { - if (!debugfs_create_file("hpt_order", 0600, powerpc_debugfs_root, - NULL, &fops_hpt_order)) { + if (!debugfs_create_file_unsafe("hpt_order", 0600, powerpc_debugfs_root, + NULL, &fops_hpt_order)) { pr_err("lpar: unable to create hpt_order debugsfs file\n"); } diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c index 2e6a8f9345d3..b0d9209d9a86 100644 --- a/arch/powerpc/mm/hugetlbpage-hash64.c +++ b/arch/powerpc/mm/hugetlbpage-hash64.c @@ -26,7 +26,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, real_pte_t rpte; unsigned long vpn; unsigned long old_pte, new_pte; - unsigned long rflags, pa, sz; + unsigned long rflags, pa; long slot, offset; BUG_ON(shift != mmu_psize_defs[mmu_psize].shift); @@ -73,7 +73,6 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, offset = PTRS_PER_PMD; rpte = __real_pte(__pte(old_pte), ptep, offset); - sz = ((1UL) << shift); if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) /* No CPU has hugepages but lacks no execute, so we * don't need to worry about that case */ @@ -121,3 +120,28 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, *ptep = __pte(new_pte & ~H_PAGE_BUSY); return 0; } + +pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ + unsigned long pte_val; + /* + * Clear the _PAGE_PRESENT so that no hardware parallel update is + * possible. Also keep the pte_present true so that we don't take + * wrong fault. + */ + pte_val = pte_update(vma->vm_mm, addr, ptep, + _PAGE_PRESENT, _PAGE_INVALID, 1); + + return __pte(pte_val); +} + +void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep, pte_t old_pte, pte_t pte) +{ + + if (radix_enabled()) + return radix__huge_ptep_modify_prot_commit(vma, addr, ptep, + old_pte, pte); + set_huge_pte_at(vma->vm_mm, addr, ptep, pte); +} diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c index 2486bee0f93e..cab06331c0c0 100644 --- a/arch/powerpc/mm/hugetlbpage-radix.c +++ b/arch/powerpc/mm/hugetlbpage-radix.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include #include +#include #include #include #include @@ -73,7 +74,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr, if (addr) { addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); - if (high_limit - len >= addr && + if (high_limit - len >= addr && addr >= mmap_min_addr && (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -83,10 +84,27 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr, */ info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; - info.low_limit = PAGE_SIZE; + info.low_limit = max(PAGE_SIZE, mmap_min_addr); info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW); info.align_mask = PAGE_MASK & ~huge_page_mask(h); info.align_offset = 0; return vm_unmapped_area(&info); } + +void radix__huge_ptep_modify_prot_commit(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t old_pte, pte_t pte) +{ + struct mm_struct *mm = vma->vm_mm; + + /* + * To avoid NMMU hang while relaxing access we need to flush the tlb before + * we set the new value. + */ + if (is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) && + (atomic_read(&mm->context.copros) > 0)) + radix__flush_hugetlb_page(vma, addr); + + set_huge_pte_at(vma->vm_mm, addr, ptep, pte); +} diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 3e59e5d64b01..41a3513cadc9 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -108,12 +108,8 @@ static void __init MMU_setup(void) __map_without_bats = 1; __map_without_ltlbs = 1; } -#ifdef CONFIG_STRICT_KERNEL_RWX - if (rodata_enabled) { - __map_without_bats = 1; + if (strict_kernel_rwx_enabled() && !IS_ENABLED(CONFIG_PPC_8xx)) __map_without_ltlbs = 1; - } -#endif } /* diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index a5091c034747..a4c155af1597 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -274,7 +274,6 @@ void __ref vmemmap_free(unsigned long start, unsigned long end, for (; start < end; start += page_size) { unsigned long nr_pages, addr; - struct page *section_base; struct page *page; /* @@ -290,7 +289,6 @@ void __ref vmemmap_free(unsigned long start, unsigned long end, continue; page = pfn_to_page(addr >> PAGE_SHIFT); - section_base = pfn_to_page(vmemmap_section_start(start)); nr_pages = 1 << page_order; base_pfn = PHYS_PFN(addr); diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 33cc6f676fa6..f6787f90e158 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -69,22 +69,14 @@ pte_t *kmap_pte; EXPORT_SYMBOL(kmap_pte); pgprot_t kmap_prot; EXPORT_SYMBOL(kmap_prot); -#define TOP_ZONE ZONE_HIGHMEM static inline pte_t *virt_to_kpte(unsigned long vaddr) { return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), vaddr), vaddr); } -#else -#define TOP_ZONE ZONE_NORMAL #endif -int page_is_ram(unsigned long pfn) -{ - return memblock_is_memory(__pfn_to_phys(pfn)); -} - pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t vma_prot) { @@ -176,34 +168,6 @@ int __meminit arch_remove_memory(int nid, u64 start, u64 size, #endif #endif /* CONFIG_MEMORY_HOTPLUG */ -/* - * walk_memory_resource() needs to make sure there is no holes in a given - * memory range. PPC64 does not maintain the memory layout in /proc/iomem. - * Instead it maintains it in memblock.memory structures. Walk through the - * memory regions, find holes and callback for contiguous regions. - */ -int -walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, - void *arg, int (*func)(unsigned long, unsigned long, void *)) -{ - struct memblock_region *reg; - unsigned long end_pfn = start_pfn + nr_pages; - unsigned long tstart, tend; - int ret = -1; - - for_each_memblock(memory, reg) { - tstart = max(start_pfn, memblock_region_memory_base_pfn(reg)); - tend = min(end_pfn, memblock_region_memory_end_pfn(reg)); - if (tstart >= tend) - continue; - ret = (*func)(tstart, tend - tstart, arg); - if (ret) - break; - } - return ret; -} -EXPORT_SYMBOL_GPL(walk_system_ram_range); - #ifndef CONFIG_NEED_MULTIPLE_NODES void __init mem_topology_setup(void) { @@ -261,25 +225,6 @@ static int __init mark_nonram_nosave(void) */ static unsigned long max_zone_pfns[MAX_NR_ZONES]; -/* - * Find the least restrictive zone that is entirely below the - * specified pfn limit. Returns < 0 if no suitable zone is found. - * - * pfn_limit must be u64 because it can exceed 32 bits even on 32-bit - * systems -- the DMA limit can be higher than any possible real pfn. - */ -int dma_pfn_limit_to_zone(u64 pfn_limit) -{ - int i; - - for (i = TOP_ZONE; i >= 0; i--) { - if (max_zone_pfns[i] <= pfn_limit) - return i; - } - - return -EPERM; -} - /* * paging_init() sets up the page tables - in fact we've already done this. */ @@ -585,3 +530,9 @@ int devmem_is_allowed(unsigned long pfn) return 0; } #endif /* CONFIG_STRICT_DEVMEM */ + +/* + * This is defined in kernel/resource.c but only powerpc needs to export it, for + * the EHEA driver. Drop this when drivers/net/ethernet/ibm/ehea is removed. + */ +EXPORT_SYMBOL_GPL(walk_system_ram_range); diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c index a712a650a8b6..e7a9c4f6bfca 100644 --- a/arch/powerpc/mm/mmu_context_iommu.c +++ b/arch/powerpc/mm/mmu_context_iommu.c @@ -21,6 +21,7 @@ #include #include #include +#include static DEFINE_MUTEX(mem_list_mutex); @@ -34,8 +35,18 @@ struct mm_iommu_table_group_mem_t { atomic64_t mapped; unsigned int pageshift; u64 ua; /* userspace address */ - u64 entries; /* number of entries in hpas[] */ - u64 *hpas; /* vmalloc'ed */ + u64 entries; /* number of entries in hpas/hpages[] */ + /* + * in mm_iommu_get we temporarily use this to store + * struct page address. + * + * We need to convert ua to hpa in real mode. Make it + * simpler by storing physical address. + */ + union { + struct page **hpages; /* vmalloc'ed */ + phys_addr_t *hpas; + }; #define MM_IOMMU_TABLE_INVALID_HPA ((uint64_t)-1) u64 dev_hpa; /* Device memory base address */ }; @@ -80,64 +91,13 @@ bool mm_iommu_preregistered(struct mm_struct *mm) } EXPORT_SYMBOL_GPL(mm_iommu_preregistered); -/* - * Taken from alloc_migrate_target with changes to remove CMA allocations - */ -struct page *new_iommu_non_cma_page(struct page *page, unsigned long private) -{ - gfp_t gfp_mask = GFP_USER; - struct page *new_page; - - if (PageCompound(page)) - return NULL; - - if (PageHighMem(page)) - gfp_mask |= __GFP_HIGHMEM; - - /* - * We don't want the allocation to force an OOM if possibe - */ - new_page = alloc_page(gfp_mask | __GFP_NORETRY | __GFP_NOWARN); - return new_page; -} - -static int mm_iommu_move_page_from_cma(struct page *page) -{ - int ret = 0; - LIST_HEAD(cma_migrate_pages); - - /* Ignore huge pages for now */ - if (PageCompound(page)) - return -EBUSY; - - lru_add_drain(); - ret = isolate_lru_page(page); - if (ret) - return ret; - - list_add(&page->lru, &cma_migrate_pages); - put_page(page); /* Drop the gup reference */ - - ret = migrate_pages(&cma_migrate_pages, new_iommu_non_cma_page, - NULL, 0, MIGRATE_SYNC, MR_CONTIG_RANGE); - if (ret) { - if (!list_empty(&cma_migrate_pages)) - putback_movable_pages(&cma_migrate_pages); - } - - return 0; -} - static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua, - unsigned long entries, unsigned long dev_hpa, - struct mm_iommu_table_group_mem_t **pmem) + unsigned long entries, unsigned long dev_hpa, + struct mm_iommu_table_group_mem_t **pmem) { struct mm_iommu_table_group_mem_t *mem; - long i, j, ret = 0, locked_entries = 0; + long i, ret, locked_entries = 0; unsigned int pageshift; - unsigned long flags; - unsigned long cur_ua; - struct page *page = NULL; mutex_lock(&mem_list_mutex); @@ -187,62 +147,43 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua, goto unlock_exit; } + down_read(&mm->mmap_sem); + ret = get_user_pages_longterm(ua, entries, FOLL_WRITE, mem->hpages, NULL); + up_read(&mm->mmap_sem); + if (ret != entries) { + /* free the reference taken */ + for (i = 0; i < ret; i++) + put_page(mem->hpages[i]); + + vfree(mem->hpas); + kfree(mem); + ret = -EFAULT; + goto unlock_exit; + } + + pageshift = PAGE_SHIFT; for (i = 0; i < entries; ++i) { - cur_ua = ua + (i << PAGE_SHIFT); - if (1 != get_user_pages_fast(cur_ua, - 1/* pages */, 1/* iswrite */, &page)) { - ret = -EFAULT; - for (j = 0; j < i; ++j) - put_page(pfn_to_page(mem->hpas[j] >> - PAGE_SHIFT)); - vfree(mem->hpas); - kfree(mem); - goto unlock_exit; - } + struct page *page = mem->hpages[i]; + /* - * If we get a page from the CMA zone, since we are going to - * be pinning these entries, we might as well move them out - * of the CMA zone if possible. NOTE: faulting in + migration - * can be expensive. Batching can be considered later + * Allow to use larger than 64k IOMMU pages. Only do that + * if we are backed by hugetlb. */ - if (is_migrate_cma_page(page)) { - if (mm_iommu_move_page_from_cma(page)) - goto populate; - if (1 != get_user_pages_fast(cur_ua, - 1/* pages */, 1/* iswrite */, - &page)) { - ret = -EFAULT; - for (j = 0; j < i; ++j) - put_page(pfn_to_page(mem->hpas[j] >> - PAGE_SHIFT)); - vfree(mem->hpas); - kfree(mem); - goto unlock_exit; - } - } -populate: - pageshift = PAGE_SHIFT; - if (mem->pageshift > PAGE_SHIFT && PageCompound(page)) { - pte_t *pte; + if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page)) { struct page *head = compound_head(page); - unsigned int compshift = compound_order(head); - unsigned int pteshift; - - local_irq_save(flags); /* disables as well */ - pte = find_linux_pte(mm->pgd, cur_ua, NULL, &pteshift); - - /* Double check it is still the same pinned page */ - if (pte && pte_page(*pte) == head && - pteshift == compshift + PAGE_SHIFT) - pageshift = max_t(unsigned int, pteshift, - PAGE_SHIFT); - local_irq_restore(flags); + + pageshift = compound_order(head) + PAGE_SHIFT; } mem->pageshift = min(mem->pageshift, pageshift); + /* + * We don't need struct page reference any more, switch + * to physical address. + */ mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT; } good_exit: + ret = 0; atomic64_set(&mem->mapped, 1); mem->used = 1; mem->ua = ua; diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c index 22d71a58167f..1945c5f19f5e 100644 --- a/arch/powerpc/mm/mmu_context_nohash.c +++ b/arch/powerpc/mm/mmu_context_nohash.c @@ -461,10 +461,19 @@ void __init mmu_context_init(void) * Allocate the maps used by context management */ context_map = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES); + if (!context_map) + panic("%s: Failed to allocate %zu bytes\n", __func__, + CTX_MAP_SIZE); context_mm = memblock_alloc(sizeof(void *) * (LAST_CONTEXT + 1), SMP_CACHE_BYTES); + if (!context_mm) + panic("%s: Failed to allocate %zu bytes\n", __func__, + sizeof(void *) * (LAST_CONTEXT + 1)); #ifdef CONFIG_SMP stale_map[boot_cpuid] = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES); + if (!stale_map[boot_cpuid]) + panic("%s: Failed to allocate %zu bytes\n", __func__, + CTX_MAP_SIZE); cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE, "powerpc/mmu/ctx:prepare", diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index c4a717da65eb..74ff61dabcb1 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -130,7 +130,7 @@ extern void wii_memory_fixups(void); */ #ifdef CONFIG_PPC32 extern void MMU_init_hw(void); -extern unsigned long mmu_mapin_ram(unsigned long top); +unsigned long mmu_mapin_ram(unsigned long base, unsigned long top); #endif #ifdef CONFIG_PPC_FSL_BOOK3E @@ -165,3 +165,11 @@ unsigned long p_block_mapped(phys_addr_t pa); static inline phys_addr_t v_block_mapped(unsigned long va) { return 0; } static inline unsigned long p_block_mapped(phys_addr_t pa) { return 0; } #endif + +#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_8xx) +void mmu_mark_initmem_nx(void); +void mmu_mark_rodata_ro(void); +#else +static inline void mmu_mark_initmem_nx(void) { } +static inline void mmu_mark_rodata_ro(void) { } +#endif diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 87f0dd004295..f976676004ad 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -84,7 +84,7 @@ static void __init setup_node_to_cpumask_map(void) alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); /* cpumask_of_node() will now work */ - dbg("Node to cpumask map for %d nodes\n", nr_node_ids); + dbg("Node to cpumask map for %u nodes\n", nr_node_ids); } static int __init fake_numa_create_new_node(unsigned long end_pfn, @@ -215,7 +215,7 @@ static void initialize_distance_lookup_table(int nid, */ static int associativity_to_nid(const __be32 *associativity) { - int nid = -1; + int nid = NUMA_NO_NODE; if (min_common_depth == -1) goto out; @@ -225,7 +225,7 @@ static int associativity_to_nid(const __be32 *associativity) /* POWER4 LPAR uses 0xffff as invalid node */ if (nid == 0xffff || nid >= MAX_NUMNODES) - nid = -1; + nid = NUMA_NO_NODE; if (nid > 0 && of_read_number(associativity, 1) >= distance_ref_points_depth) { @@ -244,7 +244,7 @@ out: */ static int of_node_to_nid_single(struct device_node *device) { - int nid = -1; + int nid = NUMA_NO_NODE; const __be32 *tmp; tmp = of_get_associativity(device); @@ -256,7 +256,7 @@ static int of_node_to_nid_single(struct device_node *device) /* Walk the device tree upwards, looking for an associativity id */ int of_node_to_nid(struct device_node *device) { - int nid = -1; + int nid = NUMA_NO_NODE; of_node_get(device); while (device) { @@ -454,7 +454,7 @@ static int of_drconf_to_nid_single(struct drmem_lmb *lmb) */ static int numa_setup_cpu(unsigned long lcpu) { - int nid = -1; + int nid = NUMA_NO_NODE; struct device_node *cpu; /* @@ -788,6 +788,10 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) int tnid; nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid); + if (!nd_pa) + panic("Cannot allocate %zu bytes for node %d data\n", + nd_size, nid); + nd = __va(nd_pa); /* report and initialize */ @@ -930,7 +934,7 @@ static int hot_add_drconf_scn_to_nid(unsigned long scn_addr) { struct drmem_lmb *lmb; unsigned long lmb_size; - int nid = -1; + int nid = NUMA_NO_NODE; lmb_size = drmem_lmb_size(); @@ -960,7 +964,7 @@ static int hot_add_drconf_scn_to_nid(unsigned long scn_addr) static int hot_add_node_scn_to_nid(unsigned long scn_addr) { struct device_node *memory; - int nid = -1; + int nid = NUMA_NO_NODE; for_each_node_by_type(memory, "memory") { unsigned long start, size; @@ -1460,13 +1464,6 @@ static void reset_topology_timer(void) #ifdef CONFIG_SMP -static void stage_topology_update(int core_id) -{ - cpumask_or(&cpu_associativity_changes_mask, - &cpu_associativity_changes_mask, cpu_sibling_mask(core_id)); - reset_topology_timer(); -} - static int dt_update_callback(struct notifier_block *nb, unsigned long action, void *data) { @@ -1479,7 +1476,7 @@ static int dt_update_callback(struct notifier_block *nb, !of_prop_cmp(update->prop->name, "ibm,associativity")) { u32 core_id; of_property_read_u32(update->dn, "reg", &core_id); - stage_topology_update(core_id); + rc = dlpar_cpu_readd(core_id); rc = NOTIFY_OK; } break; diff --git a/arch/powerpc/mm/pgtable-book3e.c b/arch/powerpc/mm/pgtable-book3e.c index e0ccf36714b2..1032ef7aaf62 100644 --- a/arch/powerpc/mm/pgtable-book3e.c +++ b/arch/powerpc/mm/pgtable-book3e.c @@ -57,12 +57,16 @@ void vmemmap_remove_mapping(unsigned long start, static __ref void *early_alloc_pgtable(unsigned long size) { - void *pt; + void *ptr; - pt = __va(memblock_alloc_base(size, size, __pa(MAX_DMA_ADDRESS))); - memset(pt, 0, size); + ptr = memblock_alloc_try_nid(size, size, MEMBLOCK_LOW_LIMIT, + __pa(MAX_DMA_ADDRESS), NUMA_NO_NODE); - return pt; + if (!ptr) + panic("%s: Failed to allocate %lu bytes align=0x%lx max_addr=%lx\n", + __func__, size, size, __pa(MAX_DMA_ADDRESS)); + + return ptr; } /* diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c index f3c31f5e1026..a4341aba0af4 100644 --- a/arch/powerpc/mm/pgtable-book3s64.c +++ b/arch/powerpc/mm/pgtable-book3s64.c @@ -195,11 +195,11 @@ void __init mmu_partition_table_init(void) unsigned long ptcr; BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large."); - partition_tb = __va(memblock_alloc_base(patb_size, patb_size, - MEMBLOCK_ALLOC_ANYWHERE)); - /* Initialize the Partition Table with no entries */ - memset((void *)partition_tb, 0, patb_size); + partition_tb = memblock_alloc(patb_size, patb_size); + if (!partition_tb) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", + __func__, patb_size, patb_size); /* * update partition table control register, @@ -400,3 +400,50 @@ void arch_report_meminfo(struct seq_file *m) atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20); } #endif /* CONFIG_PROC_FS */ + +pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep) +{ + unsigned long pte_val; + + /* + * Clear the _PAGE_PRESENT so that no hardware parallel update is + * possible. Also keep the pte_present true so that we don't take + * wrong fault. + */ + pte_val = pte_update(vma->vm_mm, addr, ptep, _PAGE_PRESENT, _PAGE_INVALID, 0); + + return __pte(pte_val); + +} + +void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep, pte_t old_pte, pte_t pte) +{ + if (radix_enabled()) + return radix__ptep_modify_prot_commit(vma, addr, + ptep, old_pte, pte); + set_pte_at(vma->vm_mm, addr, ptep, pte); +} + +/* + * For hash translation mode, we use the deposited table to store hash slot + * information and they are stored at PTRS_PER_PMD offset from related pmd + * location. Hence a pmd move requires deposit and withdraw. + * + * For radix translation with split pmd ptl, we store the deposited table in the + * pmd page. Hence if we have different pmd page we need to withdraw during pmd + * move. + * + * With hash we use deposited table always irrespective of anon or not. + * With radix we use deposited table only for anonymous mapping. + */ +int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, + struct spinlock *old_pmd_ptl, + struct vm_area_struct *vma) +{ + if (radix_enabled()) + return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma); + + return true; +} diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index 931156069a81..154472a28c77 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c @@ -51,26 +51,22 @@ static int native_register_process_table(unsigned long base, unsigned long pg_sz static __ref void *early_alloc_pgtable(unsigned long size, int nid, unsigned long region_start, unsigned long region_end) { - unsigned long pa = 0; - void *pt; + phys_addr_t min_addr = MEMBLOCK_LOW_LIMIT; + phys_addr_t max_addr = MEMBLOCK_ALLOC_ANYWHERE; + void *ptr; - if (region_start || region_end) /* has region hint */ - pa = memblock_alloc_range(size, size, region_start, region_end, - MEMBLOCK_NONE); - else if (nid != -1) /* has node hint */ - pa = memblock_alloc_base_nid(size, size, - MEMBLOCK_ALLOC_ANYWHERE, - nid, MEMBLOCK_NONE); + if (region_start) + min_addr = region_start; + if (region_end) + max_addr = region_end; - if (!pa) - pa = memblock_alloc_base(size, size, MEMBLOCK_ALLOC_ANYWHERE); + ptr = memblock_alloc_try_nid(size, size, min_addr, max_addr, nid); - BUG_ON(!pa); + if (!ptr) + panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa max_addr=%pa\n", + __func__, size, size, nid, &min_addr, &max_addr); - pt = __va(pa); - memset(pt, 0, size); - - return pt; + return ptr; } static int early_map_kernel_page(unsigned long ea, unsigned long pa, @@ -1063,3 +1059,21 @@ void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep, } /* See ptesync comment in radix__set_pte_at */ } + +void radix__ptep_modify_prot_commit(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t old_pte, pte_t pte) +{ + struct mm_struct *mm = vma->vm_mm; + + /* + * To avoid NMMU hang while relaxing access we need to flush the tlb before + * we set the new value. We need to do this only for radix, because hash + * translation does flush when updating the linux pte. + */ + if (is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) && + (atomic_read(&mm->context.copros) > 0)) + radix__flush_tlb_page(vma, addr); + + set_pte_at(mm, addr, ptep, pte); +} diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index ded71126ce4c..6e56a6240bfa 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -254,26 +254,20 @@ static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top) void __init mapin_ram(void) { - unsigned long s, top; - -#ifndef CONFIG_WII - top = total_lowmem; - s = mmu_mapin_ram(top); - __mapin_ram_chunk(s, top); -#else - if (!wii_hole_size) { - s = mmu_mapin_ram(total_lowmem); - __mapin_ram_chunk(s, total_lowmem); - } else { - top = wii_hole_start; - s = mmu_mapin_ram(top); - __mapin_ram_chunk(s, top); - - top = memblock_end_of_DRAM(); - s = wii_mmu_mapin_mem2(top); - __mapin_ram_chunk(s, top); + struct memblock_region *reg; + + for_each_memblock(memory, reg) { + phys_addr_t base = reg->base; + phys_addr_t top = min(base + reg->size, total_lowmem); + + if (base >= top) + continue; + base = mmu_mapin_ram(base, top); + if (IS_ENABLED(CONFIG_BDI_SWITCH)) + __mapin_ram_chunk(reg->base, top); + else + __mapin_ram_chunk(base, top); } -#endif } /* Scan the real Linux page tables and return a PTE pointer for @@ -359,7 +353,10 @@ void mark_initmem_nx(void) unsigned long numpages = PFN_UP((unsigned long)_einittext) - PFN_DOWN((unsigned long)_sinittext); - change_page_attr(page, numpages, PAGE_KERNEL); + if (v_block_mapped((unsigned long)_stext) + 1) + mmu_mark_initmem_nx(); + else + change_page_attr(page, numpages, PAGE_KERNEL); } #ifdef CONFIG_STRICT_KERNEL_RWX @@ -368,6 +365,11 @@ void mark_rodata_ro(void) struct page *page; unsigned long numpages; + if (v_block_mapped((unsigned long)_sinittext)) { + mmu_mark_rodata_ro(); + return; + } + page = virt_to_page(_stext); numpages = PFN_UP((unsigned long)_etext) - PFN_DOWN((unsigned long)_stext); diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c index 3f4193201ee7..f29d2f118b44 100644 --- a/arch/powerpc/mm/ppc_mmu_32.c +++ b/arch/powerpc/mm/ppc_mmu_32.c @@ -32,6 +32,7 @@ #include #include #include +#include #include "mmu_decl.h" @@ -73,45 +74,171 @@ unsigned long p_block_mapped(phys_addr_t pa) return 0; } -unsigned long __init mmu_mapin_ram(unsigned long top) +static int find_free_bat(void) { - unsigned long tot, bl, done; - unsigned long max_size = (256<<20); + int b; + + if (cpu_has_feature(CPU_FTR_601)) { + for (b = 0; b < 4; b++) { + struct ppc_bat *bat = BATS[b]; + + if (!(bat[0].batl & 0x40)) + return b; + } + } else { + int n = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4; + + for (b = 0; b < n; b++) { + struct ppc_bat *bat = BATS[b]; + + if (!(bat[1].batu & 3)) + return b; + } + } + return -1; +} + +static unsigned int block_size(unsigned long base, unsigned long top) +{ + unsigned int max_size = (cpu_has_feature(CPU_FTR_601) ? 8 : 256) << 20; + unsigned int base_shift = (fls(base) - 1) & 31; + unsigned int block_shift = (fls(top - base) - 1) & 31; + + return min3(max_size, 1U << base_shift, 1U << block_shift); +} + +/* + * Set up one of the IBAT (block address translation) register pairs. + * The parameters are not checked; in particular size must be a power + * of 2 between 128k and 256M. + * Only for 603+ ... + */ +static void setibat(int index, unsigned long virt, phys_addr_t phys, + unsigned int size, pgprot_t prot) +{ + unsigned int bl = (size >> 17) - 1; + int wimgxpp; + struct ppc_bat *bat = BATS[index]; + unsigned long flags = pgprot_val(prot); + + if (!cpu_has_feature(CPU_FTR_NEED_COHERENT)) + flags &= ~_PAGE_COHERENT; + + wimgxpp = (flags & _PAGE_COHERENT) | (_PAGE_EXEC ? BPP_RX : BPP_XX); + bat[0].batu = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */ + bat[0].batl = BAT_PHYS_ADDR(phys) | wimgxpp; + if (flags & _PAGE_USER) + bat[0].batu |= 1; /* Vp = 1 */ +} + +static void clearibat(int index) +{ + struct ppc_bat *bat = BATS[index]; + + bat[0].batu = 0; + bat[0].batl = 0; +} + +static unsigned long __init __mmu_mapin_ram(unsigned long base, unsigned long top) +{ + int idx; + + while ((idx = find_free_bat()) != -1 && base != top) { + unsigned int size = block_size(base, top); + + if (size < 128 << 10) + break; + setbat(idx, PAGE_OFFSET + base, base, size, PAGE_KERNEL_X); + base += size; + } + + return base; +} + +unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) +{ + int done; + unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET; if (__map_without_bats) { - printk(KERN_DEBUG "RAM mapped without BATs\n"); - return 0; + pr_debug("RAM mapped without BATs\n"); + return base; + } + + if (!strict_kernel_rwx_enabled() || base >= border || top <= border) + return __mmu_mapin_ram(base, top); + + done = __mmu_mapin_ram(base, border); + if (done != border - base) + return done; + + return done + __mmu_mapin_ram(border, top); +} + +void mmu_mark_initmem_nx(void) +{ + int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4; + int i; + unsigned long base = (unsigned long)_stext - PAGE_OFFSET; + unsigned long top = (unsigned long)_etext - PAGE_OFFSET; + unsigned long size; + + if (cpu_has_feature(CPU_FTR_601)) + return; + + for (i = 0; i < nb - 1 && base < top && top - base > (128 << 10);) { + size = block_size(base, top); + setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT); + base += size; + } + if (base < top) { + size = block_size(base, top); + size = max(size, 128UL << 10); + if ((top - base) > size) { + if (strict_kernel_rwx_enabled()) + pr_warn("Kernel _etext not properly aligned\n"); + size <<= 1; + } + setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT); + base += size; } + for (; i < nb; i++) + clearibat(i); - /* Set up BAT2 and if necessary BAT3 to cover RAM. */ + update_bats(); - /* Make sure we don't map a block larger than the - smallest alignment of the physical address. */ - tot = top; - for (bl = 128<<10; bl < max_size; bl <<= 1) { - if (bl * 2 > tot) + for (i = TASK_SIZE >> 28; i < 16; i++) { + /* Do not set NX on VM space for modules */ + if (IS_ENABLED(CONFIG_MODULES) && + (VMALLOC_START & 0xf0000000) == i << 28) break; + mtsrin(mfsrin(i << 28) | 0x10000000, i << 28); } +} + +void mmu_mark_rodata_ro(void) +{ + int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4; + int i; - setbat(2, PAGE_OFFSET, 0, bl, PAGE_KERNEL_X); - done = (unsigned long)bat_addrs[2].limit - PAGE_OFFSET + 1; - if ((done < tot) && !bat_addrs[3].limit) { - /* use BAT3 to cover a bit more */ - tot -= done; - for (bl = 128<<10; bl < max_size; bl <<= 1) - if (bl * 2 > tot) - break; - setbat(3, PAGE_OFFSET+done, done, bl, PAGE_KERNEL_X); - done = (unsigned long)bat_addrs[3].limit - PAGE_OFFSET + 1; + if (cpu_has_feature(CPU_FTR_601)) + return; + + for (i = 0; i < nb; i++) { + struct ppc_bat *bat = BATS[i]; + + if (bat_addrs[i].start < (unsigned long)__init_begin) + bat[1].batl = (bat[1].batl & ~BPP_RW) | BPP_RX; } - return done; + update_bats(); } /* * Set up one of the I/D BAT (block address translation) register pairs. * The parameters are not checked; in particular size must be a power * of 2 between 128k and 256M. + * On 603+, only set IBAT when _PAGE_EXEC is set */ void __init setbat(int index, unsigned long virt, phys_addr_t phys, unsigned int size, pgprot_t prot) @@ -138,11 +265,12 @@ void __init setbat(int index, unsigned long virt, phys_addr_t phys, bat[1].batu |= 1; /* Vp = 1 */ if (flags & _PAGE_GUARDED) { /* G bit must be zero in IBATs */ - bat[0].batu = bat[0].batl = 0; - } else { - /* make IBAT same as DBAT */ - bat[0] = bat[1]; + flags &= ~_PAGE_EXEC; } + if (flags & _PAGE_EXEC) + bat[0] = bat[1]; + else + bat[0].batu = bat[0].batl = 0; } else { /* 601 cpu */ if (bl > BL_8M) @@ -211,8 +339,10 @@ void __init MMU_init_hw(void) * Find some memory for the hash table. */ if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322); - Hash = __va(memblock_phys_alloc(Hash_size, Hash_size)); - memset(Hash, 0, Hash_size); + Hash = memblock_alloc(Hash_size, Hash_size); + if (!Hash) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", + __func__, Hash_size, Hash_size); _SDR1 = __pa(Hash) | SDR1_LOW_BITS; Hash_end = (struct hash_pte *) ((unsigned long)Hash + Hash_size); @@ -231,7 +361,8 @@ void __init MMU_init_hw(void) if (lg_n_hpteg > 16) mb2 = 16 - LG_HPTEG_SIZE; - modify_instruction_site(&patch__hash_page_A0, 0xffff, (unsigned int)Hash >> 16); + modify_instruction_site(&patch__hash_page_A0, 0xffff, + ((unsigned int)Hash - PAGE_OFFSET) >> 16); modify_instruction_site(&patch__hash_page_A1, 0x7c0, mb << 6); modify_instruction_site(&patch__hash_page_A2, 0x7c0, mb2 << 6); modify_instruction_site(&patch__hash_page_B, 0xffff, hmask); @@ -240,7 +371,8 @@ void __init MMU_init_hw(void) /* * Patch up the instructions in hashtable.S:flush_hash_page */ - modify_instruction_site(&patch__flush_hash_A0, 0xffff, (unsigned int)Hash >> 16); + modify_instruction_site(&patch__flush_hash_A0, 0xffff, + ((unsigned int)Hash - PAGE_OFFSET) >> 16); modify_instruction_site(&patch__flush_hash_A1, 0x7c0, mb << 6); modify_instruction_site(&patch__flush_hash_A2, 0x7c0, mb2 << 6); modify_instruction_site(&patch__flush_hash_B, 0xffff, hmask); diff --git a/arch/powerpc/mm/dump_linuxpagetables-8xx.c b/arch/powerpc/mm/ptdump/8xx.c similarity index 97% rename from arch/powerpc/mm/dump_linuxpagetables-8xx.c rename to arch/powerpc/mm/ptdump/8xx.c index ab9e3f24db2f..9e2d8e847d6e 100644 --- a/arch/powerpc/mm/dump_linuxpagetables-8xx.c +++ b/arch/powerpc/mm/ptdump/8xx.c @@ -7,7 +7,7 @@ #include #include -#include "dump_linuxpagetables.h" +#include "ptdump.h" static const struct flag_info flag_array[] = { { diff --git a/arch/powerpc/mm/ptdump/Makefile b/arch/powerpc/mm/ptdump/Makefile new file mode 100644 index 000000000000..712762be3cb1 --- /dev/null +++ b/arch/powerpc/mm/ptdump/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-y += ptdump.o + +obj-$(CONFIG_4xx) += shared.o +obj-$(CONFIG_PPC_8xx) += 8xx.o +obj-$(CONFIG_PPC_BOOK3E_MMU) += shared.o +obj-$(CONFIG_PPC_BOOK3S_32) += shared.o bats.o segment_regs.o +obj-$(CONFIG_PPC_BOOK3S_64) += book3s64.o hashpagetable.o diff --git a/arch/powerpc/mm/dump_bats.c b/arch/powerpc/mm/ptdump/bats.c similarity index 100% rename from arch/powerpc/mm/dump_bats.c rename to arch/powerpc/mm/ptdump/bats.c diff --git a/arch/powerpc/mm/dump_linuxpagetables-book3s64.c b/arch/powerpc/mm/ptdump/book3s64.c similarity index 98% rename from arch/powerpc/mm/dump_linuxpagetables-book3s64.c rename to arch/powerpc/mm/ptdump/book3s64.c index ed6fcf78256e..0dfca72cb9bd 100644 --- a/arch/powerpc/mm/dump_linuxpagetables-book3s64.c +++ b/arch/powerpc/mm/ptdump/book3s64.c @@ -7,7 +7,7 @@ #include #include -#include "dump_linuxpagetables.h" +#include "ptdump.h" static const struct flag_info flag_array[] = { { diff --git a/arch/powerpc/mm/dump_hashpagetable.c b/arch/powerpc/mm/ptdump/hashpagetable.c similarity index 99% rename from arch/powerpc/mm/dump_hashpagetable.c rename to arch/powerpc/mm/ptdump/hashpagetable.c index 869294695048..b430e4e08af6 100644 --- a/arch/powerpc/mm/dump_hashpagetable.c +++ b/arch/powerpc/mm/ptdump/hashpagetable.c @@ -342,7 +342,7 @@ static unsigned long hpte_find(struct pg_state *st, unsigned long ea, int psize) /* Look in secondary table */ if (slot == -1) - slot = base_hpte_find(ea, psize, true, &v, &r); + slot = base_hpte_find(ea, psize, false, &v, &r); /* No entry found */ if (slot == -1) diff --git a/arch/powerpc/mm/dump_linuxpagetables.c b/arch/powerpc/mm/ptdump/ptdump.c similarity index 94% rename from arch/powerpc/mm/dump_linuxpagetables.c rename to arch/powerpc/mm/ptdump/ptdump.c index 6aa41669ac1a..37138428ab55 100644 --- a/arch/powerpc/mm/dump_linuxpagetables.c +++ b/arch/powerpc/mm/ptdump/ptdump.c @@ -28,7 +28,7 @@ #include #include -#include "dump_linuxpagetables.h" +#include "ptdump.h" #ifdef CONFIG_PPC32 #define KERN_VIRT_START 0 @@ -143,14 +143,19 @@ static void dump_addr(struct pg_state *st, unsigned long addr) unsigned long delta; #ifdef CONFIG_PPC64 - seq_printf(st->seq, "0x%016lx-0x%016lx ", st->start_address, addr-1); - seq_printf(st->seq, "0x%016lx ", st->start_pa); +#define REG "0x%016lx" #else - seq_printf(st->seq, "0x%08lx-0x%08lx ", st->start_address, addr - 1); - seq_printf(st->seq, "0x%08lx ", st->start_pa); +#define REG "0x%08lx" #endif - delta = (addr - st->start_address) >> 10; + seq_printf(st->seq, REG "-" REG " ", st->start_address, addr - 1); + if (st->start_pa == st->last_pa && st->start_address + PAGE_SIZE != addr) { + seq_printf(st->seq, "[" REG "]", st->start_pa); + delta = PAGE_SIZE >> 10; + } else { + seq_printf(st->seq, " " REG " ", st->start_pa); + delta = (addr - st->start_address) >> 10; + } /* Work out what appropriate unit to use */ while (!(delta & 1023) && unit[1]) { delta >>= 10; @@ -184,7 +189,8 @@ static void note_page(struct pg_state *st, unsigned long addr, */ } else if (flag != st->current_flags || level != st->level || addr >= st->marker[1].start_address || - pa != st->last_pa + PAGE_SIZE) { + (pa != st->last_pa + PAGE_SIZE && + (pa != st->start_pa || st->start_pa != st->last_pa))) { /* Check the PTE flags */ if (st->current_flags) { diff --git a/arch/powerpc/mm/dump_linuxpagetables.h b/arch/powerpc/mm/ptdump/ptdump.h similarity index 100% rename from arch/powerpc/mm/dump_linuxpagetables.h rename to arch/powerpc/mm/ptdump/ptdump.h diff --git a/arch/powerpc/mm/dump_sr.c b/arch/powerpc/mm/ptdump/segment_regs.c similarity index 100% rename from arch/powerpc/mm/dump_sr.c rename to arch/powerpc/mm/ptdump/segment_regs.c diff --git a/arch/powerpc/mm/dump_linuxpagetables-generic.c b/arch/powerpc/mm/ptdump/shared.c similarity index 97% rename from arch/powerpc/mm/dump_linuxpagetables-generic.c rename to arch/powerpc/mm/ptdump/shared.c index 3fe98a0974c6..f7ed2f187cb0 100644 --- a/arch/powerpc/mm/dump_linuxpagetables-generic.c +++ b/arch/powerpc/mm/ptdump/shared.c @@ -7,7 +7,7 @@ #include #include -#include "dump_linuxpagetables.h" +#include "ptdump.h" static const struct flag_info flag_array[] = { { diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index bc3914d54e26..5986df48359b 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -69,6 +69,11 @@ static void assert_slb_presence(bool present, unsigned long ea) if (!cpu_has_feature(CPU_FTR_ARCH_206)) return; + /* + * slbfee. requires bit 24 (PPC bit 39) be clear in RB. Hardware + * ignores all other bits from 0-27, so just clear them all. + */ + ea &= ~((1UL << 28) - 1); asm volatile(__PPC_SLBFEE_DOT(%0, %1) : "=r"(tmp) : "r"(ea) : "cr0"); WARN_ON(present == (tmp == 0)); diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 06898c13901d..aec91dbcdc0b 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -377,6 +378,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm, int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); unsigned long addr, found, prev; struct vm_unmapped_area_info info; + unsigned long min_addr = max(PAGE_SIZE, mmap_min_addr); info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; @@ -393,7 +395,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm, if (high_limit > DEFAULT_MAP_WINDOW) addr += mm->context.slb_addr_limit - DEFAULT_MAP_WINDOW; - while (addr > PAGE_SIZE) { + while (addr > min_addr) { info.high_limit = addr; if (!slice_scan_available(addr - 1, available, 0, &addr)) continue; @@ -405,8 +407,8 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm, * Check if we need to reduce the range, or if we can * extend it to cover the previous available slice. */ - if (addr < PAGE_SIZE) - addr = PAGE_SIZE; + if (addr < min_addr) + addr = min_addr; else if (slice_scan_available(addr - 1, available, 0, &prev)) { addr = prev; goto prev_slice; @@ -528,7 +530,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, addr = _ALIGN_UP(addr, page_size); slice_dbg(" aligned addr=%lx\n", addr); /* Ignore hint if it's too large or overlaps a VMA */ - if (addr > high_limit - len || + if (addr > high_limit - len || addr < mmap_min_addr || !slice_area_is_free(mm, addr, len)) addr = 0; } diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index ae5d568e267f..ac23dc1c6535 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c @@ -302,7 +302,7 @@ void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, * This function as well as __local_flush_tlb_page() must only be called * for user contexts. */ - if (unlikely(WARN_ON(!mm))) + if (WARN_ON(!mm)) return; preempt_disable(); diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index c2d5192ed64f..549e9490ff2a 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -165,6 +165,10 @@ #define PPC_RLWINM(d, a, i, mb, me) EMIT(PPC_INST_RLWINM | ___PPC_RA(d) | \ ___PPC_RS(a) | __PPC_SH(i) | \ __PPC_MB(mb) | __PPC_ME(me)) +#define PPC_RLWINM_DOT(d, a, i, mb, me) EMIT(PPC_INST_RLWINM_DOT | \ + ___PPC_RA(d) | ___PPC_RS(a) | \ + __PPC_SH(i) | __PPC_MB(mb) | \ + __PPC_ME(me)) #define PPC_RLWIMI(d, a, i, mb, me) EMIT(PPC_INST_RLWIMI | ___PPC_RA(d) | \ ___PPC_RS(a) | __PPC_SH(i) | \ __PPC_MB(mb) | __PPC_ME(me)) diff --git a/arch/powerpc/net/bpf_jit32.h b/arch/powerpc/net/bpf_jit32.h index 6f4daacad296..dc50a8d4b3b9 100644 --- a/arch/powerpc/net/bpf_jit32.h +++ b/arch/powerpc/net/bpf_jit32.h @@ -106,9 +106,8 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh); } while (0) #else #define PPC_BPF_LOAD_CPU(r) \ - do { BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4); \ - PPC_LHZ_OFFS(r, (1 & ~(THREAD_SIZE - 1)), \ - offsetof(struct thread_info, cpu)); \ + do { BUILD_BUG_ON(FIELD_SIZEOF(struct task_struct, cpu) != 4); \ + PPC_LHZ_OFFS(r, 2, offsetof(struct task_struct, cpu)); \ } while(0) #endif #else diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index 7ce57657d3b8..4194d3cfb60c 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -768,36 +768,58 @@ emit_clear: case BPF_JMP | BPF_JGT | BPF_X: case BPF_JMP | BPF_JSGT | BPF_K: case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP32 | BPF_JGT | BPF_K: + case BPF_JMP32 | BPF_JGT | BPF_X: + case BPF_JMP32 | BPF_JSGT | BPF_K: + case BPF_JMP32 | BPF_JSGT | BPF_X: true_cond = COND_GT; goto cond_branch; case BPF_JMP | BPF_JLT | BPF_K: case BPF_JMP | BPF_JLT | BPF_X: case BPF_JMP | BPF_JSLT | BPF_K: case BPF_JMP | BPF_JSLT | BPF_X: + case BPF_JMP32 | BPF_JLT | BPF_K: + case BPF_JMP32 | BPF_JLT | BPF_X: + case BPF_JMP32 | BPF_JSLT | BPF_K: + case BPF_JMP32 | BPF_JSLT | BPF_X: true_cond = COND_LT; goto cond_branch; case BPF_JMP | BPF_JGE | BPF_K: case BPF_JMP | BPF_JGE | BPF_X: case BPF_JMP | BPF_JSGE | BPF_K: case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP32 | BPF_JGE | BPF_K: + case BPF_JMP32 | BPF_JGE | BPF_X: + case BPF_JMP32 | BPF_JSGE | BPF_K: + case BPF_JMP32 | BPF_JSGE | BPF_X: true_cond = COND_GE; goto cond_branch; case BPF_JMP | BPF_JLE | BPF_K: case BPF_JMP | BPF_JLE | BPF_X: case BPF_JMP | BPF_JSLE | BPF_K: case BPF_JMP | BPF_JSLE | BPF_X: + case BPF_JMP32 | BPF_JLE | BPF_K: + case BPF_JMP32 | BPF_JLE | BPF_X: + case BPF_JMP32 | BPF_JSLE | BPF_K: + case BPF_JMP32 | BPF_JSLE | BPF_X: true_cond = COND_LE; goto cond_branch; case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP32 | BPF_JEQ | BPF_K: + case BPF_JMP32 | BPF_JEQ | BPF_X: true_cond = COND_EQ; goto cond_branch; case BPF_JMP | BPF_JNE | BPF_K: case BPF_JMP | BPF_JNE | BPF_X: + case BPF_JMP32 | BPF_JNE | BPF_K: + case BPF_JMP32 | BPF_JNE | BPF_X: true_cond = COND_NE; goto cond_branch; case BPF_JMP | BPF_JSET | BPF_K: case BPF_JMP | BPF_JSET | BPF_X: + case BPF_JMP32 | BPF_JSET | BPF_K: + case BPF_JMP32 | BPF_JSET | BPF_X: true_cond = COND_NE; /* Fall through */ @@ -809,18 +831,44 @@ cond_branch: case BPF_JMP | BPF_JLE | BPF_X: case BPF_JMP | BPF_JEQ | BPF_X: case BPF_JMP | BPF_JNE | BPF_X: + case BPF_JMP32 | BPF_JGT | BPF_X: + case BPF_JMP32 | BPF_JLT | BPF_X: + case BPF_JMP32 | BPF_JGE | BPF_X: + case BPF_JMP32 | BPF_JLE | BPF_X: + case BPF_JMP32 | BPF_JEQ | BPF_X: + case BPF_JMP32 | BPF_JNE | BPF_X: /* unsigned comparison */ - PPC_CMPLD(dst_reg, src_reg); + if (BPF_CLASS(code) == BPF_JMP32) + PPC_CMPLW(dst_reg, src_reg); + else + PPC_CMPLD(dst_reg, src_reg); break; case BPF_JMP | BPF_JSGT | BPF_X: case BPF_JMP | BPF_JSLT | BPF_X: case BPF_JMP | BPF_JSGE | BPF_X: case BPF_JMP | BPF_JSLE | BPF_X: + case BPF_JMP32 | BPF_JSGT | BPF_X: + case BPF_JMP32 | BPF_JSLT | BPF_X: + case BPF_JMP32 | BPF_JSGE | BPF_X: + case BPF_JMP32 | BPF_JSLE | BPF_X: /* signed comparison */ - PPC_CMPD(dst_reg, src_reg); + if (BPF_CLASS(code) == BPF_JMP32) + PPC_CMPW(dst_reg, src_reg); + else + PPC_CMPD(dst_reg, src_reg); break; case BPF_JMP | BPF_JSET | BPF_X: - PPC_AND_DOT(b2p[TMP_REG_1], dst_reg, src_reg); + case BPF_JMP32 | BPF_JSET | BPF_X: + if (BPF_CLASS(code) == BPF_JMP) { + PPC_AND_DOT(b2p[TMP_REG_1], dst_reg, + src_reg); + } else { + int tmp_reg = b2p[TMP_REG_1]; + + PPC_AND(tmp_reg, dst_reg, src_reg); + PPC_RLWINM_DOT(tmp_reg, tmp_reg, 0, 0, + 31); + } break; case BPF_JMP | BPF_JNE | BPF_K: case BPF_JMP | BPF_JEQ | BPF_K: @@ -828,43 +876,87 @@ cond_branch: case BPF_JMP | BPF_JLT | BPF_K: case BPF_JMP | BPF_JGE | BPF_K: case BPF_JMP | BPF_JLE | BPF_K: + case BPF_JMP32 | BPF_JNE | BPF_K: + case BPF_JMP32 | BPF_JEQ | BPF_K: + case BPF_JMP32 | BPF_JGT | BPF_K: + case BPF_JMP32 | BPF_JLT | BPF_K: + case BPF_JMP32 | BPF_JGE | BPF_K: + case BPF_JMP32 | BPF_JLE | BPF_K: + { + bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32; + /* * Need sign-extended load, so only positive * values can be used as imm in cmpldi */ - if (imm >= 0 && imm < 32768) - PPC_CMPLDI(dst_reg, imm); - else { + if (imm >= 0 && imm < 32768) { + if (is_jmp32) + PPC_CMPLWI(dst_reg, imm); + else + PPC_CMPLDI(dst_reg, imm); + } else { /* sign-extending load */ PPC_LI32(b2p[TMP_REG_1], imm); /* ... but unsigned comparison */ - PPC_CMPLD(dst_reg, b2p[TMP_REG_1]); + if (is_jmp32) + PPC_CMPLW(dst_reg, + b2p[TMP_REG_1]); + else + PPC_CMPLD(dst_reg, + b2p[TMP_REG_1]); } break; + } case BPF_JMP | BPF_JSGT | BPF_K: case BPF_JMP | BPF_JSLT | BPF_K: case BPF_JMP | BPF_JSGE | BPF_K: case BPF_JMP | BPF_JSLE | BPF_K: + case BPF_JMP32 | BPF_JSGT | BPF_K: + case BPF_JMP32 | BPF_JSLT | BPF_K: + case BPF_JMP32 | BPF_JSGE | BPF_K: + case BPF_JMP32 | BPF_JSLE | BPF_K: + { + bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32; + /* * signed comparison, so any 16-bit value * can be used in cmpdi */ - if (imm >= -32768 && imm < 32768) - PPC_CMPDI(dst_reg, imm); - else { + if (imm >= -32768 && imm < 32768) { + if (is_jmp32) + PPC_CMPWI(dst_reg, imm); + else + PPC_CMPDI(dst_reg, imm); + } else { PPC_LI32(b2p[TMP_REG_1], imm); - PPC_CMPD(dst_reg, b2p[TMP_REG_1]); + if (is_jmp32) + PPC_CMPW(dst_reg, + b2p[TMP_REG_1]); + else + PPC_CMPD(dst_reg, + b2p[TMP_REG_1]); } break; + } case BPF_JMP | BPF_JSET | BPF_K: + case BPF_JMP32 | BPF_JSET | BPF_K: /* andi does not sign-extend the immediate */ if (imm >= 0 && imm < 32768) /* PPC_ANDI is _only/always_ dot-form */ PPC_ANDI(b2p[TMP_REG_1], dst_reg, imm); else { - PPC_LI32(b2p[TMP_REG_1], imm); - PPC_AND_DOT(b2p[TMP_REG_1], dst_reg, - b2p[TMP_REG_1]); + int tmp_reg = b2p[TMP_REG_1]; + + PPC_LI32(tmp_reg, imm); + if (BPF_CLASS(code) == BPF_JMP) { + PPC_AND_DOT(tmp_reg, dst_reg, + tmp_reg); + } else { + PPC_AND(tmp_reg, dst_reg, + tmp_reg); + PPC_RLWINM_DOT(tmp_reg, tmp_reg, + 0, 0, 31); + } } break; } @@ -1093,6 +1185,7 @@ skip_codegen_passes: bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE)); if (!fp->is_func || extra_pass) { + bpf_prog_fill_jited_linfo(fp, addrs); out_addrs: kfree(addrs); kfree(jit_data); diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c index 72238eedc360..d2b8e6061933 100644 --- a/arch/powerpc/perf/hv-24x7.c +++ b/arch/powerpc/perf/hv-24x7.c @@ -1306,15 +1306,6 @@ static int h_24x7_event_init(struct perf_event *event) return -EINVAL; } - /* unsupported modes and filters */ - if (event->attr.exclude_user || - event->attr.exclude_kernel || - event->attr.exclude_hv || - event->attr.exclude_idle || - event->attr.exclude_host || - event->attr.exclude_guest) - return -EINVAL; - /* no branch sampling */ if (has_branch_stack(event)) return -EOPNOTSUPP; @@ -1577,6 +1568,7 @@ static struct pmu h_24x7_pmu = { .start_txn = h_24x7_event_start_txn, .commit_txn = h_24x7_event_commit_txn, .cancel_txn = h_24x7_event_cancel_txn, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }; static int hv_24x7_init(void) diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c index 43fabb3cae0f..735e77b09cdb 100644 --- a/arch/powerpc/perf/hv-gpci.c +++ b/arch/powerpc/perf/hv-gpci.c @@ -232,15 +232,6 @@ static int h_gpci_event_init(struct perf_event *event) return -EINVAL; } - /* unsupported modes and filters */ - if (event->attr.exclude_user || - event->attr.exclude_kernel || - event->attr.exclude_hv || - event->attr.exclude_idle || - event->attr.exclude_host || - event->attr.exclude_guest) - return -EINVAL; - /* no branch sampling */ if (has_branch_stack(event)) return -EOPNOTSUPP; @@ -285,6 +276,7 @@ static struct pmu h_gpci_pmu = { .start = h_gpci_event_start, .stop = h_gpci_event_stop, .read = h_gpci_event_update, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }; static int hv_gpci_init(void) diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c index f292a3f284f1..b1c37cc3fa98 100644 --- a/arch/powerpc/perf/imc-pmu.c +++ b/arch/powerpc/perf/imc-pmu.c @@ -473,15 +473,6 @@ static int nest_imc_event_init(struct perf_event *event) if (event->hw.sample_period) return -EINVAL; - /* unsupported modes and filters */ - if (event->attr.exclude_user || - event->attr.exclude_kernel || - event->attr.exclude_hv || - event->attr.exclude_idle || - event->attr.exclude_host || - event->attr.exclude_guest) - return -EINVAL; - if (event->cpu < 0) return -EINVAL; @@ -748,15 +739,6 @@ static int core_imc_event_init(struct perf_event *event) if (event->hw.sample_period) return -EINVAL; - /* unsupported modes and filters */ - if (event->attr.exclude_user || - event->attr.exclude_kernel || - event->attr.exclude_hv || - event->attr.exclude_idle || - event->attr.exclude_host || - event->attr.exclude_guest) - return -EINVAL; - if (event->cpu < 0) return -EINVAL; @@ -1069,6 +1051,7 @@ static int update_pmu_ops(struct imc_pmu *pmu) pmu->pmu.stop = imc_event_stop; pmu->pmu.read = imc_event_update; pmu->pmu.attr_groups = pmu->attr_groups; + pmu->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE; pmu->attr_groups[IMC_FORMAT_ATTR] = &imc_format_group; switch (pmu->domain) { diff --git a/arch/powerpc/perf/power9-events-list.h b/arch/powerpc/perf/power9-events-list.h index 7de344b7d9cc..063c9d9f2516 100644 --- a/arch/powerpc/perf/power9-events-list.h +++ b/arch/powerpc/perf/power9-events-list.h @@ -97,3 +97,27 @@ EVENT(PM_MRK_DTLB_MISS_64K, 0x3d156) EVENT(PM_DTLB_MISS_16M, 0x4c056) EVENT(PM_DTLB_MISS_1G, 0x4c05a) EVENT(PM_MRK_DTLB_MISS_16M, 0x4c15e) + +/* + * Memory Access Events + * + * Primary PMU event used here is PM_MRK_INST_CMPL (0x401e0) + * To enable capturing of memory profiling, these MMCRA bits + * needs to be programmed and corresponding raw event format + * encoding. + * + * MMCRA bits encoding needed are + * SM (Sampling Mode) + * EM (Eligibility for Random Sampling) + * TECE (Threshold Event Counter Event) + * TS (Threshold Start Event) + * TE (Threshold End Event) + * + * Corresponding Raw Encoding bits: + * sample [EM,SM] + * thresh_sel (TECE) + * thresh start (TS) + * thresh end (TE) + */ +EVENT(MEM_LOADS, 0x34340401e0) +EVENT(MEM_STORES, 0x343c0401e0) diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c index 0ff9c43733e9..030544e35959 100644 --- a/arch/powerpc/perf/power9-pmu.c +++ b/arch/powerpc/perf/power9-pmu.c @@ -160,6 +160,8 @@ GENERIC_EVENT_ATTR(branch-instructions, PM_BR_CMPL); GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED_CMPL); GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1); GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1_FIN); +GENERIC_EVENT_ATTR(mem-loads, MEM_LOADS); +GENERIC_EVENT_ATTR(mem-stores, MEM_STORES); CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1_FIN); CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1); @@ -185,6 +187,8 @@ static struct attribute *power9_events_attr[] = { GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL), GENERIC_EVENT_PTR(PM_LD_REF_L1), GENERIC_EVENT_PTR(PM_LD_MISS_L1_FIN), + GENERIC_EVENT_PTR(MEM_LOADS), + GENERIC_EVENT_PTR(MEM_STORES), CACHE_EVENT_PTR(PM_LD_MISS_L1_FIN), CACHE_EVENT_PTR(PM_LD_REF_L1), CACHE_EVENT_PTR(PM_L1_PREF), diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig index 4a9a72d01c3c..35be81fd2dc2 100644 --- a/arch/powerpc/platforms/44x/Kconfig +++ b/arch/powerpc/platforms/44x/Kconfig @@ -180,6 +180,7 @@ config CURRITUCK depends on PPC_47x select SWIOTLB select 476FPE + select FORCE_PCI select PPC4xx_PCI_EXPRESS help This option enables support for the IBM Currituck (476fpe) evaluation board diff --git a/arch/powerpc/platforms/44x/ppc476.c b/arch/powerpc/platforms/44x/ppc476.c index e55933f9cd55..a5e61e5c16e2 100644 --- a/arch/powerpc/platforms/44x/ppc476.c +++ b/arch/powerpc/platforms/44x/ppc476.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include diff --git a/arch/powerpc/platforms/44x/warp.c b/arch/powerpc/platforms/44x/warp.c index f467247fd1c4..18422dbd061a 100644 --- a/arch/powerpc/platforms/44x/warp.c +++ b/arch/powerpc/platforms/44x/warp.c @@ -47,7 +47,7 @@ static int __init warp_probe(void) if (!of_machine_is_compatible("pika,warp")) return 0; - /* For __dma_nommu_alloc_coherent */ + /* For arch_dma_alloc */ ISA_DMA_THRESHOLD = ~0L; return 1; diff --git a/arch/powerpc/platforms/83xx/suspend-asm.S b/arch/powerpc/platforms/83xx/suspend-asm.S index 3d1ecd211776..8137f77abad5 100644 --- a/arch/powerpc/platforms/83xx/suspend-asm.S +++ b/arch/powerpc/platforms/83xx/suspend-asm.S @@ -26,13 +26,13 @@ #define SS_MSR 0x74 #define SS_SDR1 0x78 #define SS_LR 0x7c -#define SS_SPRG 0x80 /* 4 SPRGs */ -#define SS_DBAT 0x90 /* 8 DBATs */ -#define SS_IBAT 0xd0 /* 8 IBATs */ -#define SS_TB 0x110 -#define SS_CR 0x118 -#define SS_GPREG 0x11c /* r12-r31 */ -#define STATE_SAVE_SIZE 0x16c +#define SS_SPRG 0x80 /* 8 SPRGs */ +#define SS_DBAT 0xa0 /* 8 DBATs */ +#define SS_IBAT 0xe0 /* 8 IBATs */ +#define SS_TB 0x120 +#define SS_CR 0x128 +#define SS_GPREG 0x12c /* r12-r31 */ +#define STATE_SAVE_SIZE 0x17c .section .data .align 5 @@ -103,6 +103,16 @@ _GLOBAL(mpc83xx_enter_deep_sleep) stw r7, SS_SPRG+12(r3) stw r8, SS_SDR1(r3) + mfspr r4, SPRN_SPRG4 + mfspr r5, SPRN_SPRG5 + mfspr r6, SPRN_SPRG6 + mfspr r7, SPRN_SPRG7 + + stw r4, SS_SPRG+16(r3) + stw r5, SS_SPRG+20(r3) + stw r6, SS_SPRG+24(r3) + stw r7, SS_SPRG+28(r3) + mfspr r4, SPRN_DBAT0U mfspr r5, SPRN_DBAT0L mfspr r6, SPRN_DBAT1U @@ -493,6 +503,16 @@ mpc83xx_deep_resume: mtspr SPRN_IBAT7U, r6 mtspr SPRN_IBAT7L, r7 + lwz r4, SS_SPRG+16(r3) + lwz r5, SS_SPRG+20(r3) + lwz r6, SS_SPRG+24(r3) + lwz r7, SS_SPRG+28(r3) + + mtspr SPRN_SPRG4, r4 + mtspr SPRN_SPRG5, r5 + mtspr SPRN_SPRG6, r6 + mtspr SPRN_SPRG7, r7 + lwz r4, SS_SPRG+0(r3) lwz r5, SS_SPRG+4(r3) lwz r6, SS_SPRG+8(r3) diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c index b0dac307bebf..785e9641220d 100644 --- a/arch/powerpc/platforms/85xx/corenet_generic.c +++ b/arch/powerpc/platforms/85xx/corenet_generic.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -223,7 +224,3 @@ define_machine(corenet_generic) { }; machine_arch_initcall(corenet_generic, corenet_gen_publish_devices); - -#ifdef CONFIG_SWIOTLB -machine_arch_initcall(corenet_generic, swiotlb_setup_bus_notifier); -#endif diff --git a/arch/powerpc/platforms/85xx/ge_imp3a.c b/arch/powerpc/platforms/85xx/ge_imp3a.c index f29c6f0909f3..c64fa2483ea9 100644 --- a/arch/powerpc/platforms/85xx/ge_imp3a.c +++ b/arch/powerpc/platforms/85xx/ge_imp3a.c @@ -202,8 +202,6 @@ static int __init ge_imp3a_probe(void) machine_arch_initcall(ge_imp3a, mpc85xx_common_publish_devices); -machine_arch_initcall(ge_imp3a, swiotlb_setup_bus_notifier); - define_machine(ge_imp3a) { .name = "GE_IMP3A", .probe = ge_imp3a_probe, diff --git a/arch/powerpc/platforms/85xx/mpc8536_ds.c b/arch/powerpc/platforms/85xx/mpc8536_ds.c index 94a7f92c858f..94194bad4954 100644 --- a/arch/powerpc/platforms/85xx/mpc8536_ds.c +++ b/arch/powerpc/platforms/85xx/mpc8536_ds.c @@ -57,8 +57,6 @@ static void __init mpc8536_ds_setup_arch(void) machine_arch_initcall(mpc8536_ds, mpc85xx_common_publish_devices); -machine_arch_initcall(mpc8536_ds, swiotlb_setup_bus_notifier); - /* * Called very early, device-tree isn't unflattened */ diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c index dc9e035cc637..b7e29ce1f266 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c @@ -174,10 +174,6 @@ machine_arch_initcall(mpc8544_ds, mpc85xx_common_publish_devices); machine_arch_initcall(mpc8572_ds, mpc85xx_common_publish_devices); machine_arch_initcall(p2020_ds, mpc85xx_common_publish_devices); -machine_arch_initcall(mpc8544_ds, swiotlb_setup_bus_notifier); -machine_arch_initcall(mpc8572_ds, swiotlb_setup_bus_notifier); -machine_arch_initcall(p2020_ds, swiotlb_setup_bus_notifier); - /* * Called very early, device-tree isn't unflattened */ diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c index d7e440e6dba3..80939a425de5 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c @@ -367,10 +367,6 @@ machine_arch_initcall(mpc8568_mds, mpc85xx_publish_devices); machine_arch_initcall(mpc8569_mds, mpc85xx_publish_devices); machine_arch_initcall(p1021_mds, mpc85xx_common_publish_devices); -machine_arch_initcall(mpc8568_mds, swiotlb_setup_bus_notifier); -machine_arch_initcall(mpc8569_mds, swiotlb_setup_bus_notifier); -machine_arch_initcall(p1021_mds, swiotlb_setup_bus_notifier); - static void __init mpc85xx_mds_pic_init(void) { struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN | diff --git a/arch/powerpc/platforms/85xx/p1010rdb.c b/arch/powerpc/platforms/85xx/p1010rdb.c index 78d13b364cd6..33ca373322e1 100644 --- a/arch/powerpc/platforms/85xx/p1010rdb.c +++ b/arch/powerpc/platforms/85xx/p1010rdb.c @@ -55,7 +55,6 @@ static void __init p1010_rdb_setup_arch(void) } machine_arch_initcall(p1010_rdb, mpc85xx_common_publish_devices); -machine_arch_initcall(p1010_rdb, swiotlb_setup_bus_notifier); /* * Called very early, device-tree isn't unflattened diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c index 9fb57f78cdbe..1f1af0557470 100644 --- a/arch/powerpc/platforms/85xx/p1022_ds.c +++ b/arch/powerpc/platforms/85xx/p1022_ds.c @@ -548,8 +548,6 @@ static void __init p1022_ds_setup_arch(void) machine_arch_initcall(p1022_ds, mpc85xx_common_publish_devices); -machine_arch_initcall(p1022_ds, swiotlb_setup_bus_notifier); - /* * Called very early, device-tree isn't unflattened */ diff --git a/arch/powerpc/platforms/85xx/p1022_rdk.c b/arch/powerpc/platforms/85xx/p1022_rdk.c index 276e00ab3dde..fd9e3e7ef234 100644 --- a/arch/powerpc/platforms/85xx/p1022_rdk.c +++ b/arch/powerpc/platforms/85xx/p1022_rdk.c @@ -128,8 +128,6 @@ static void __init p1022_rdk_setup_arch(void) machine_arch_initcall(p1022_rdk, mpc85xx_common_publish_devices); -machine_arch_initcall(p1022_rdk, swiotlb_setup_bus_notifier); - /* * Called very early, device-tree isn't unflattened */ diff --git a/arch/powerpc/platforms/85xx/qemu_e500.c b/arch/powerpc/platforms/85xx/qemu_e500.c index 27631c607f3d..c52c8f9e8385 100644 --- a/arch/powerpc/platforms/85xx/qemu_e500.c +++ b/arch/powerpc/platforms/85xx/qemu_e500.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include "smp.h" diff --git a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c index 17c6cd3d02e6..775a92353c83 100644 --- a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c +++ b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c @@ -121,7 +121,6 @@ static int __init declare_of_platform_devices(void) return 0; } machine_arch_initcall(mpc86xx_hpcn, declare_of_platform_devices); -machine_arch_initcall(mpc86xx_hpcn, swiotlb_setup_bus_notifier); define_machine(mpc86xx_hpcn) { .name = "MPC86xx HPCN", diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 8c7464c3f27f..842b2c7e156a 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -153,6 +153,11 @@ config E300C3_CPU bool "e300c3 (831x)" depends on PPC_BOOK3S_32 +config G4_CPU + bool "G4 (74xx)" + depends on PPC_BOOK3S_32 + select ALTIVEC + endchoice config TARGET_CPU_BOOL @@ -171,6 +176,7 @@ config TARGET_CPU default "860" if 860_CPU default "e300c2" if E300C2_CPU default "e300c3" if E300C3_CPU + default "G4" if G4_CPU config PPC_BOOK3S def_bool y @@ -402,6 +408,9 @@ config NOT_COHERENT_CACHE bool depends on 4xx || PPC_8xx || E200 || PPC_MPC512x || \ GAMECUBE_COMMON || AMIGAONE + select ARCH_HAS_DMA_COHERENT_TO_PFN + select ARCH_HAS_SYNC_DMA_FOR_DEVICE + select ARCH_HAS_SYNC_DMA_FOR_CPU default n if PPC_47x default y diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index af2a3c15e0ec..54e012e1f720 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -544,9 +544,10 @@ static struct cbe_iommu *cell_iommu_for_node(int nid) static unsigned long cell_dma_nommu_offset; static unsigned long dma_iommu_fixed_base; +static bool cell_iommu_enabled; /* iommu_fixed_is_weak is set if booted with iommu_fixed=weak */ -static int iommu_fixed_is_weak; +bool iommu_fixed_is_weak; static struct iommu_table *cell_get_iommu_table(struct device *dev) { @@ -568,102 +569,19 @@ static struct iommu_table *cell_get_iommu_table(struct device *dev) return &window->table; } -/* A coherent allocation implies strong ordering */ - -static void *dma_fixed_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag, - unsigned long attrs) -{ - if (iommu_fixed_is_weak) - return iommu_alloc_coherent(dev, cell_get_iommu_table(dev), - size, dma_handle, - device_to_mask(dev), flag, - dev_to_node(dev)); - else - return dma_nommu_ops.alloc(dev, size, dma_handle, flag, - attrs); -} - -static void dma_fixed_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle, - unsigned long attrs) -{ - if (iommu_fixed_is_weak) - iommu_free_coherent(cell_get_iommu_table(dev), size, vaddr, - dma_handle); - else - dma_nommu_ops.free(dev, size, vaddr, dma_handle, attrs); -} - -static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction direction, - unsigned long attrs) -{ - if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING)) - return dma_nommu_ops.map_page(dev, page, offset, size, - direction, attrs); - else - return iommu_map_page(dev, cell_get_iommu_table(dev), page, - offset, size, device_to_mask(dev), - direction, attrs); -} - -static void dma_fixed_unmap_page(struct device *dev, dma_addr_t dma_addr, - size_t size, enum dma_data_direction direction, - unsigned long attrs) -{ - if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING)) - dma_nommu_ops.unmap_page(dev, dma_addr, size, direction, - attrs); - else - iommu_unmap_page(cell_get_iommu_table(dev), dma_addr, size, - direction, attrs); -} - -static int dma_fixed_map_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction direction, - unsigned long attrs) -{ - if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING)) - return dma_nommu_ops.map_sg(dev, sg, nents, direction, attrs); - else - return ppc_iommu_map_sg(dev, cell_get_iommu_table(dev), sg, - nents, device_to_mask(dev), - direction, attrs); -} - -static void dma_fixed_unmap_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction direction, - unsigned long attrs) -{ - if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING)) - dma_nommu_ops.unmap_sg(dev, sg, nents, direction, attrs); - else - ppc_iommu_unmap_sg(cell_get_iommu_table(dev), sg, nents, - direction, attrs); -} - -static int dma_suported_and_switch(struct device *dev, u64 dma_mask); - -static const struct dma_map_ops dma_iommu_fixed_ops = { - .alloc = dma_fixed_alloc_coherent, - .free = dma_fixed_free_coherent, - .map_sg = dma_fixed_map_sg, - .unmap_sg = dma_fixed_unmap_sg, - .dma_supported = dma_suported_and_switch, - .map_page = dma_fixed_map_page, - .unmap_page = dma_fixed_unmap_page, -}; +static u64 cell_iommu_get_fixed_address(struct device *dev); static void cell_dma_dev_setup(struct device *dev) { - if (get_pci_dma_ops() == &dma_iommu_ops) + if (cell_iommu_enabled) { + u64 addr = cell_iommu_get_fixed_address(dev); + + if (addr != OF_BAD_ADDR) + dev->archdata.dma_offset = addr + dma_iommu_fixed_base; set_iommu_table_base(dev, cell_get_iommu_table(dev)); - else if (get_pci_dma_ops() == &dma_nommu_ops) - set_dma_offset(dev, cell_dma_nommu_offset); - else - BUG(); + } else { + dev->archdata.dma_offset = cell_dma_nommu_offset; + } } static void cell_pci_dma_dev_setup(struct pci_dev *dev) @@ -680,11 +598,9 @@ static int cell_of_bus_notify(struct notifier_block *nb, unsigned long action, if (action != BUS_NOTIFY_ADD_DEVICE) return 0; - /* We use the PCI DMA ops */ - dev->dma_ops = get_pci_dma_ops(); - + if (cell_iommu_enabled) + dev->dma_ops = &dma_iommu_ops; cell_dma_dev_setup(dev); - return 0; } @@ -809,7 +725,6 @@ static int __init cell_iommu_init_disabled(void) unsigned long base = 0, size; /* When no iommu is present, we use direct DMA ops */ - set_pci_dma_ops(&dma_nommu_ops); /* First make sure all IOC translation is turned off */ cell_disable_iommus(); @@ -894,7 +809,11 @@ static u64 cell_iommu_get_fixed_address(struct device *dev) const u32 *ranges = NULL; int i, len, best, naddr, nsize, pna, range_size; + /* We can be called for platform devices that have no of_node */ np = of_node_get(dev->of_node); + if (!np) + goto out; + while (1) { naddr = of_n_addr_cells(np); nsize = of_n_size_cells(np); @@ -945,27 +864,10 @@ out: return dev_addr; } -static int dma_suported_and_switch(struct device *dev, u64 dma_mask) +static bool cell_pci_iommu_bypass_supported(struct pci_dev *pdev, u64 mask) { - if (dma_mask == DMA_BIT_MASK(64) && - cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR) { - u64 addr = cell_iommu_get_fixed_address(dev) + - dma_iommu_fixed_base; - dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n"); - dev_dbg(dev, "iommu: fixed addr = %llx\n", addr); - set_dma_ops(dev, &dma_iommu_fixed_ops); - set_dma_offset(dev, addr); - return 1; - } - - if (dma_iommu_dma_supported(dev, dma_mask)) { - dev_dbg(dev, "iommu: not 64-bit, using default ops\n"); - set_dma_ops(dev, get_pci_dma_ops()); - cell_dma_dev_setup(dev); - return 1; - } - - return 0; + return mask == DMA_BIT_MASK(64) && + cell_iommu_get_fixed_address(&pdev->dev) != OF_BAD_ADDR; } static void insert_16M_pte(unsigned long addr, unsigned long *ptab, @@ -1119,9 +1021,8 @@ static int __init cell_iommu_fixed_mapping_init(void) cell_iommu_setup_window(iommu, np, dbase, dsize, 0); } - dma_iommu_ops.dma_supported = dma_suported_and_switch; - set_pci_dma_ops(&dma_iommu_ops); - + cell_pci_controller_ops.iommu_bypass_supported = + cell_pci_iommu_bypass_supported; return 0; } @@ -1142,7 +1043,7 @@ static int __init setup_iommu_fixed(char *str) pciep = of_find_node_by_type(NULL, "pcie-endpoint"); if (strcmp(str, "weak") == 0 || (pciep && strcmp(str, "strong") != 0)) - iommu_fixed_is_weak = DMA_ATTR_WEAK_ORDERING; + iommu_fixed_is_weak = true; of_node_put(pciep); @@ -1150,26 +1051,6 @@ static int __init setup_iommu_fixed(char *str) } __setup("iommu_fixed=", setup_iommu_fixed); -static u64 cell_dma_get_required_mask(struct device *dev) -{ - const struct dma_map_ops *dma_ops; - - if (!dev->dma_mask) - return 0; - - if (!iommu_fixed_disabled && - cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR) - return DMA_BIT_MASK(64); - - dma_ops = get_dma_ops(dev); - if (dma_ops->get_required_mask) - return dma_ops->get_required_mask(dev); - - WARN_ONCE(1, "no get_required_mask in %p ops", dma_ops); - - return DMA_BIT_MASK(64); -} - static int __init cell_iommu_init(void) { struct device_node *np; @@ -1186,10 +1067,9 @@ static int __init cell_iommu_init(void) /* Setup various callbacks */ cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup; - ppc_md.dma_get_required_mask = cell_dma_get_required_mask; if (!iommu_fixed_disabled && cell_iommu_fixed_mapping_init() == 0) - goto bail; + goto done; /* Create an iommu for each /axon node. */ for_each_node_by_name(np, "axon") { @@ -1206,10 +1086,10 @@ static int __init cell_iommu_init(void) continue; cell_iommu_init_one(np, SPIDER_DMA_OFFSET); } - + done: /* Setup default PCI iommu ops */ set_pci_dma_ops(&dma_iommu_ops); - + cell_iommu_enabled = true; bail: /* Register callbacks on OF platform device addition/removal * to handle linking them to the right DMA operations diff --git a/arch/powerpc/platforms/cell/spu_callbacks.c b/arch/powerpc/platforms/cell/spu_callbacks.c index 125f2a5f02de..b5f35cbe9e21 100644 --- a/arch/powerpc/platforms/cell/spu_callbacks.c +++ b/arch/powerpc/platforms/cell/spu_callbacks.c @@ -34,7 +34,7 @@ */ static void *spu_syscall_table[] = { -#define __SYSCALL(nr, entry, nargs) entry, +#define __SYSCALL(nr, entry) entry, #include #undef __SYSCALL }; diff --git a/arch/powerpc/platforms/cell/spu_syscalls.c b/arch/powerpc/platforms/cell/spu_syscalls.c index 263413a34823..b95d6afc39b5 100644 --- a/arch/powerpc/platforms/cell/spu_syscalls.c +++ b/arch/powerpc/platforms/cell/spu_syscalls.c @@ -26,7 +26,6 @@ #include #include #include -#include #include diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index ae8123edddc6..48c2477e7e2a 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -2338,9 +2338,8 @@ static int spufs_switch_log_open(struct inode *inode, struct file *file) goto out; } - ctx->switch_log = kmalloc(sizeof(struct switch_log) + - SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry), - GFP_KERNEL); + ctx->switch_log = kmalloc(struct_size(ctx->switch_log, log, + SWITCH_LOG_BUFSIZE), GFP_KERNEL); if (!ctx->switch_log) { rc = -ENOMEM; diff --git a/arch/powerpc/platforms/chrp/Makefile b/arch/powerpc/platforms/chrp/Makefile index 4b3bfadc70fa..dc3465cc8bc6 100644 --- a/arch/powerpc/platforms/chrp/Makefile +++ b/arch/powerpc/platforms/chrp/Makefile @@ -1,3 +1,3 @@ obj-y += setup.o time.o pegasos_eth.o pci.o obj-$(CONFIG_SMP) += smp.o -obj-$(CONFIG_NVRAM) += nvram.o +obj-$(CONFIG_NVRAM:m=y) += nvram.o diff --git a/arch/powerpc/platforms/chrp/nvram.c b/arch/powerpc/platforms/chrp/nvram.c index 791b86398e1d..37ac20ccbb19 100644 --- a/arch/powerpc/platforms/chrp/nvram.c +++ b/arch/powerpc/platforms/chrp/nvram.c @@ -24,7 +24,7 @@ static unsigned int nvram_size; static unsigned char nvram_buf[4]; static DEFINE_SPINLOCK(nvram_lock); -static unsigned char chrp_nvram_read(int addr) +static unsigned char chrp_nvram_read_val(int addr) { unsigned int done; unsigned long flags; @@ -46,7 +46,7 @@ static unsigned char chrp_nvram_read(int addr) return ret; } -static void chrp_nvram_write(int addr, unsigned char val) +static void chrp_nvram_write_val(int addr, unsigned char val) { unsigned int done; unsigned long flags; @@ -64,6 +64,11 @@ static void chrp_nvram_write(int addr, unsigned char val) spin_unlock_irqrestore(&nvram_lock, flags); } +static ssize_t chrp_nvram_size(void) +{ + return nvram_size; +} + void __init chrp_nvram_init(void) { struct device_node *nvram; @@ -85,8 +90,9 @@ void __init chrp_nvram_init(void) printk(KERN_INFO "CHRP nvram contains %u bytes\n", nvram_size); of_node_put(nvram); - ppc_md.nvram_read_val = chrp_nvram_read; - ppc_md.nvram_write_val = chrp_nvram_write; + ppc_md.nvram_read_val = chrp_nvram_read_val; + ppc_md.nvram_write_val = chrp_nvram_write_val; + ppc_md.nvram_size = chrp_nvram_size; return; } diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c index 9438fa0fc355..fcf6f2342ef4 100644 --- a/arch/powerpc/platforms/chrp/setup.c +++ b/arch/powerpc/platforms/chrp/setup.c @@ -549,7 +549,7 @@ static void __init chrp_init_IRQ(void) static void __init chrp_init2(void) { -#ifdef CONFIG_NVRAM +#if IS_ENABLED(CONFIG_NVRAM) chrp_nvram_init(); #endif diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c index ecf703ee3a76..235fe81aa2b1 100644 --- a/arch/powerpc/platforms/embedded6xx/wii.c +++ b/arch/powerpc/platforms/embedded6xx/wii.c @@ -54,10 +54,6 @@ static void __iomem *hw_ctrl; static void __iomem *hw_gpio; -unsigned long wii_hole_start; -unsigned long wii_hole_size; - - static int __init page_aligned(unsigned long x) { return !(x & (PAGE_SIZE-1)); @@ -69,26 +65,6 @@ void __init wii_memory_fixups(void) BUG_ON(memblock.memory.cnt != 2); BUG_ON(!page_aligned(p[0].base) || !page_aligned(p[1].base)); - - /* determine hole */ - wii_hole_start = ALIGN(p[0].base + p[0].size, PAGE_SIZE); - wii_hole_size = p[1].base - wii_hole_start; -} - -unsigned long __init wii_mmu_mapin_mem2(unsigned long top) -{ - unsigned long delta, size, bl; - unsigned long max_size = (256<<20); - - /* MEM2 64MB@0x10000000 */ - delta = wii_hole_start + wii_hole_size; - size = top - delta; - for (bl = 128<<10; bl < max_size; bl <<= 1) { - if (bl * 2 > size) - break; - } - setbat(4, PAGE_OFFSET+delta, delta, bl, PAGE_KERNEL_X); - return delta + bl; } static void __noreturn wii_spin(void) diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c index f2971522fb4a..044c6089462c 100644 --- a/arch/powerpc/platforms/pasemi/iommu.c +++ b/arch/powerpc/platforms/pasemi/iommu.c @@ -186,7 +186,7 @@ static void pci_dma_dev_setup_pasemi(struct pci_dev *dev) */ if (dev->vendor == 0x1959 && dev->device == 0xa007 && !firmware_has_feature(FW_FEATURE_LPAR)) { - dev->dev.dma_ops = &dma_nommu_ops; + dev->dev.dma_ops = NULL; /* * Set the coherent DMA mask to prevent the iommu * being used unnecessarily @@ -208,7 +208,12 @@ static int __init iob_init(struct device_node *dn) pr_debug(" -> %s\n", __func__); /* For 2G space, 8x64 pages (2^21 bytes) is max total l2 size */ - iob_l2_base = (u32 *)__va(memblock_alloc_base(1UL<<21, 1UL<<21, 0x80000000)); + iob_l2_base = memblock_alloc_try_nid_raw(1UL << 21, 1UL << 21, + MEMBLOCK_LOW_LIMIT, 0x80000000, + NUMA_NO_NODE); + if (!iob_l2_base) + panic("%s: Failed to allocate %lu bytes align=0x%lx max_addr=%x\n", + __func__, 1UL << 21, 1UL << 21, 0x80000000); pr_info("IOBMAP L2 allocated at: %p\n", iob_l2_base); @@ -269,4 +274,3 @@ void __init iommu_init_early_pasemi(void) pasemi_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pasemi; set_pci_dma_ops(&dma_iommu_ops); } - diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c index c0532999f854..46dd463faaa7 100644 --- a/arch/powerpc/platforms/pasemi/setup.c +++ b/arch/powerpc/platforms/pasemi/setup.c @@ -411,55 +411,6 @@ out: return !!(srr1 & 0x2); } -#ifdef CONFIG_PCMCIA -static int pcmcia_notify(struct notifier_block *nb, unsigned long action, - void *data) -{ - struct device *dev = data; - struct device *parent; - struct pcmcia_device *pdev = to_pcmcia_dev(dev); - - /* We are only intereted in device addition */ - if (action != BUS_NOTIFY_ADD_DEVICE) - return 0; - - parent = pdev->socket->dev.parent; - - /* We know electra_cf devices will always have of_node set, since - * electra_cf is an of_platform driver. - */ - if (!parent->of_node) - return 0; - - if (!of_device_is_compatible(parent->of_node, "electra-cf")) - return 0; - - /* We use the direct ops for localbus */ - dev->dma_ops = &dma_nommu_ops; - - return 0; -} - -static struct notifier_block pcmcia_notifier = { - .notifier_call = pcmcia_notify, -}; - -static inline void pasemi_pcmcia_init(void) -{ - extern struct bus_type pcmcia_bus_type; - - bus_register_notifier(&pcmcia_bus_type, &pcmcia_notifier); -} - -#else - -static inline void pasemi_pcmcia_init(void) -{ -} - -#endif - - static const struct of_device_id pasemi_bus_ids[] = { /* Unfortunately needed for legacy firmwares */ { .type = "localbus", }, @@ -472,8 +423,6 @@ static const struct of_device_id pasemi_bus_ids[] = { static int __init pasemi_publish_devices(void) { - pasemi_pcmcia_init(); - /* Publish OF platform devices for SDC and other non-PCI devices */ of_platform_bus_probe(NULL, pasemi_bus_ids, NULL); diff --git a/arch/powerpc/platforms/powermac/Makefile b/arch/powerpc/platforms/powermac/Makefile index 923bfb340433..20ebf35d7913 100644 --- a/arch/powerpc/platforms/powermac/Makefile +++ b/arch/powerpc/platforms/powermac/Makefile @@ -15,7 +15,5 @@ obj-$(CONFIG_PMAC_BACKLIGHT) += backlight.o # need this to be a bool. Cheat here and pretend CONFIG_NVRAM=m is really # CONFIG_NVRAM=y obj-$(CONFIG_NVRAM:m=y) += nvram.o -# ppc64 pmac doesn't define CONFIG_NVRAM but needs nvram stuff -obj-$(CONFIG_PPC64) += nvram.o obj-$(CONFIG_PPC32) += bootx_init.o obj-$(CONFIG_SMP) += smp.o diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c index ae54d7fe68f3..86989c5779c2 100644 --- a/arch/powerpc/platforms/powermac/nvram.c +++ b/arch/powerpc/platforms/powermac/nvram.c @@ -147,6 +147,11 @@ static ssize_t core99_nvram_size(void) static volatile unsigned char __iomem *nvram_addr; static int nvram_mult; +static ssize_t ppc32_nvram_size(void) +{ + return NVRAM_SIZE; +} + static unsigned char direct_nvram_read_byte(int addr) { return in_8(&nvram_data[(addr & (NVRAM_SIZE - 1)) * nvram_mult]); @@ -514,6 +519,9 @@ static int __init core99_nvram_setup(struct device_node *dp, unsigned long addr) return -EINVAL; } nvram_image = memblock_alloc(NVRAM_SIZE, SMP_CACHE_BYTES); + if (!nvram_image) + panic("%s: Failed to allocate %u bytes\n", __func__, + NVRAM_SIZE); nvram_data = ioremap(addr, NVRAM_SIZE*2); nvram_naddrs = 1; /* Make sure we get the correct case */ @@ -590,21 +598,25 @@ int __init pmac_nvram_init(void) nvram_mult = 1; ppc_md.nvram_read_val = direct_nvram_read_byte; ppc_md.nvram_write_val = direct_nvram_write_byte; + ppc_md.nvram_size = ppc32_nvram_size; } else if (nvram_naddrs == 1) { nvram_data = ioremap(r1.start, s1); nvram_mult = (s1 + NVRAM_SIZE - 1) / NVRAM_SIZE; ppc_md.nvram_read_val = direct_nvram_read_byte; ppc_md.nvram_write_val = direct_nvram_write_byte; + ppc_md.nvram_size = ppc32_nvram_size; } else if (nvram_naddrs == 2) { nvram_addr = ioremap(r1.start, s1); nvram_data = ioremap(r2.start, s2); ppc_md.nvram_read_val = indirect_nvram_read_byte; ppc_md.nvram_write_val = indirect_nvram_write_byte; + ppc_md.nvram_size = ppc32_nvram_size; } else if (nvram_naddrs == 0 && sys_ctrler == SYS_CTRLER_PMU) { #ifdef CONFIG_ADB_PMU nvram_naddrs = -1; ppc_md.nvram_read_val = pmu_nvram_read_byte; ppc_md.nvram_write_val = pmu_nvram_write_byte; + ppc_md.nvram_size = ppc32_nvram_size; #endif /* CONFIG_ADB_PMU */ } else { printk(KERN_ERR "Incompatible type of NVRAM\n"); diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index 2e8221e20ee8..b7efcf336589 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c @@ -316,8 +316,7 @@ static void __init pmac_setup_arch(void) find_via_pmu(); smu_init(); -#if defined(CONFIG_NVRAM) || defined(CONFIG_NVRAM_MODULE) || \ - defined(CONFIG_PPC64) +#if IS_ENABLED(CONFIG_NVRAM) pmac_nvram_init(); #endif #ifdef CONFIG_PPC32 diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c index f157e3d071f2..b36ddee17c87 100644 --- a/arch/powerpc/platforms/powermac/time.c +++ b/arch/powerpc/platforms/powermac/time.c @@ -68,7 +68,7 @@ long __init pmac_time_init(void) { s32 delta = 0; -#ifdef CONFIG_NVRAM +#if defined(CONFIG_NVRAM) && defined(CONFIG_PPC32) int dst; delta = ((s32)pmac_xpram_read(PMAC_XPRAM_MACHINE_LOC + 0x9)) << 16; diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile index b540ce8eec55..da2e99efbd04 100644 --- a/arch/powerpc/platforms/powernv/Makefile +++ b/arch/powerpc/platforms/powernv/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 -obj-y += setup.o opal-wrappers.o opal.o opal-async.o idle.o -obj-y += opal-rtc.o opal-nvram.o opal-lpc.o opal-flash.o +obj-y += setup.o opal-call.o opal-wrappers.o opal.o opal-async.o +obj-y += idle.o opal-rtc.o opal-nvram.o opal-lpc.o opal-flash.o obj-y += rng.o opal-elog.o opal-dump.o opal-sysparam.o opal-sensor.o obj-y += opal-msglog.o opal-hmi.o opal-power.o opal-irqchip.o obj-y += opal-kmsg.o opal-powercap.o opal-psr.o opal-sensor-groups.o @@ -11,7 +11,6 @@ obj-$(CONFIG_CXL_BASE) += pci-cxl.o obj-$(CONFIG_EEH) += eeh-powernv.o obj-$(CONFIG_PPC_SCOM) += opal-xscom.o obj-$(CONFIG_MEMORY_FAILURE) += opal-memory-errors.o -obj-$(CONFIG_TRACEPOINTS) += opal-tracepoints.o obj-$(CONFIG_OPAL_PRD) += opal-prd.o obj-$(CONFIG_PERF_EVENTS) += opal-imc.o obj-$(CONFIG_PPC_MEMTRACE) += memtrace.o diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c index 35f699ebb662..e52f9b06dd9c 100644 --- a/arch/powerpc/platforms/powernv/idle.c +++ b/arch/powerpc/platforms/powernv/idle.c @@ -458,7 +458,8 @@ EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_release); #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ #ifdef CONFIG_HOTPLUG_CPU -static void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val) + +void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val) { u64 pir = get_hard_smp_processor_id(cpu); @@ -481,20 +482,6 @@ unsigned long pnv_cpu_offline(unsigned int cpu) { unsigned long srr1; u32 idle_states = pnv_get_supported_cpuidle_states(); - u64 lpcr_val; - - /* - * We don't want to take decrementer interrupts while we are - * offline, so clear LPCR:PECE1. We keep PECE2 (and - * LPCR_PECE_HVEE on P9) enabled as to let IPIs in. - * - * If the CPU gets woken up by a special wakeup, ensure that - * the SLW engine sets LPCR with decrementer bit cleared, else - * the CPU will come back to the kernel due to a spurious - * wakeup. - */ - lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1; - pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val); __ppc64_runlatch_off(); @@ -526,16 +513,6 @@ unsigned long pnv_cpu_offline(unsigned int cpu) __ppc64_runlatch_on(); - /* - * Re-enable decrementer interrupts in LPCR. - * - * Further, we want stop states to be woken up by decrementer - * for non-hotplug cases. So program the LPCR via stop api as - * well. - */ - lpcr_val = mfspr(SPRN_LPCR) | (u64)LPCR_PECE1; - pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val); - return srr1; } #endif diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c index 84d038ed3882..248a38ad25c7 100644 --- a/arch/powerpc/platforms/powernv/memtrace.c +++ b/arch/powerpc/platforms/powernv/memtrace.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -223,7 +224,7 @@ static int memtrace_online(void) ent = &memtrace_array[i]; /* We have onlined this chunk previously */ - if (ent->nid == -1) + if (ent->nid == NUMA_NO_NODE) continue; /* Remove from io mappings */ @@ -257,7 +258,7 @@ static int memtrace_online(void) */ debugfs_remove_recursive(ent->dir); pr_info("Added trace memory back to node %d\n", ent->nid); - ent->size = ent->start = ent->nid = -1; + ent->size = ent->start = ent->nid = NUMA_NO_NODE; } if (ret) return ret; diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index 3f58c7dbd581..dc23d9d2a7d9 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c @@ -28,10 +28,6 @@ */ static DEFINE_SPINLOCK(npu_context_lock); -/* - * Other types of TCE cache invalidation are not functional in the - * hardware. - */ static struct pci_dev *get_pci_dev(struct device_node *dn) { struct pci_dn *pdn = PCI_DN(dn); @@ -220,7 +216,7 @@ static void pnv_npu_dma_set_32(struct pnv_ioda_pe *npe) * their parent device so drivers shouldn't be doing DMA * operations directly on these devices. */ - set_dma_ops(&npe->pdev->dev, NULL); + set_dma_ops(&npe->pdev->dev, &dma_dummy_ops); } /* @@ -917,15 +913,6 @@ static void pnv_npu2_mn_release(struct mmu_notifier *mn, mmio_invalidate(npu_context, 0, ~0UL); } -static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn, - struct mm_struct *mm, - unsigned long address, - pte_t pte) -{ - struct npu_context *npu_context = mn_to_npu_context(mn); - mmio_invalidate(npu_context, address, PAGE_SIZE); -} - static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn, struct mm_struct *mm, unsigned long start, unsigned long end) @@ -936,7 +923,6 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn, static const struct mmu_notifier_ops nv_nmmu_notifier_ops = { .release = pnv_npu2_mn_release, - .change_pte = pnv_npu2_mn_change_pte, .invalidate_range = pnv_npu2_mn_invalidate_range, }; diff --git a/arch/powerpc/platforms/powernv/opal-call.c b/arch/powerpc/platforms/powernv/opal-call.c new file mode 100644 index 000000000000..578757d403ab --- /dev/null +++ b/arch/powerpc/platforms/powernv/opal-call.c @@ -0,0 +1,283 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include + +#ifdef CONFIG_TRACEPOINTS +/* + * Since the tracing code might execute OPAL calls we need to guard against + * recursion. + */ +static DEFINE_PER_CPU(unsigned int, opal_trace_depth); + +static void __trace_opal_entry(s64 a0, s64 a1, s64 a2, s64 a3, + s64 a4, s64 a5, s64 a6, s64 a7, + unsigned long opcode) +{ + unsigned int *depth; + unsigned long args[8]; + + depth = this_cpu_ptr(&opal_trace_depth); + + if (*depth) + return; + + args[0] = a0; + args[1] = a1; + args[2] = a2; + args[3] = a3; + args[4] = a4; + args[5] = a5; + args[6] = a6; + args[7] = a7; + + (*depth)++; + trace_opal_entry(opcode, &args[0]); + (*depth)--; +} + +static void __trace_opal_exit(unsigned long opcode, unsigned long retval) +{ + unsigned int *depth; + + depth = this_cpu_ptr(&opal_trace_depth); + + if (*depth) + return; + + (*depth)++; + trace_opal_exit(opcode, retval); + (*depth)--; +} + +static DEFINE_STATIC_KEY_FALSE(opal_tracepoint_key); + +int opal_tracepoint_regfunc(void) +{ + static_branch_inc(&opal_tracepoint_key); + return 0; +} + +void opal_tracepoint_unregfunc(void) +{ + static_branch_dec(&opal_tracepoint_key); +} + +static s64 __opal_call_trace(s64 a0, s64 a1, s64 a2, s64 a3, + s64 a4, s64 a5, s64 a6, s64 a7, + unsigned long opcode, unsigned long msr) +{ + s64 ret; + + __trace_opal_entry(a0, a1, a2, a3, a4, a5, a6, a7, opcode); + ret = __opal_call(a0, a1, a2, a3, a4, a5, a6, a7, opcode, msr); + __trace_opal_exit(opcode, ret); + + return ret; +} + +#define DO_TRACE (static_branch_unlikely(&opal_tracepoint_key)) + +#else /* CONFIG_TRACEPOINTS */ + +static s64 __opal_call_trace(s64 a0, s64 a1, s64 a2, s64 a3, + s64 a4, s64 a5, s64 a6, s64 a7, + unsigned long opcode, unsigned long msr) +{ +} + +#define DO_TRACE false +#endif /* CONFIG_TRACEPOINTS */ + +static int64_t opal_call(int64_t a0, int64_t a1, int64_t a2, int64_t a3, + int64_t a4, int64_t a5, int64_t a6, int64_t a7, int64_t opcode) +{ + unsigned long flags; + unsigned long msr = mfmsr(); + bool mmu = (msr & (MSR_IR|MSR_DR)); + int64_t ret; + + msr &= ~MSR_EE; + + if (unlikely(!mmu)) + return __opal_call(a0, a1, a2, a3, a4, a5, a6, a7, opcode, msr); + + local_save_flags(flags); + hard_irq_disable(); + + if (DO_TRACE) { + ret = __opal_call_trace(a0, a1, a2, a3, a4, a5, a6, a7, opcode, msr); + } else { + ret = __opal_call(a0, a1, a2, a3, a4, a5, a6, a7, opcode, msr); + } + + local_irq_restore(flags); + + return ret; +} + +#define OPAL_CALL(name, opcode) \ +int64_t name(int64_t a0, int64_t a1, int64_t a2, int64_t a3, \ + int64_t a4, int64_t a5, int64_t a6, int64_t a7) \ +{ \ + return opal_call(a0, a1, a2, a3, a4, a5, a6, a7, opcode); \ +} + +OPAL_CALL(opal_invalid_call, OPAL_INVALID_CALL); +OPAL_CALL(opal_console_write, OPAL_CONSOLE_WRITE); +OPAL_CALL(opal_console_read, OPAL_CONSOLE_READ); +OPAL_CALL(opal_console_write_buffer_space, OPAL_CONSOLE_WRITE_BUFFER_SPACE); +OPAL_CALL(opal_rtc_read, OPAL_RTC_READ); +OPAL_CALL(opal_rtc_write, OPAL_RTC_WRITE); +OPAL_CALL(opal_cec_power_down, OPAL_CEC_POWER_DOWN); +OPAL_CALL(opal_cec_reboot, OPAL_CEC_REBOOT); +OPAL_CALL(opal_cec_reboot2, OPAL_CEC_REBOOT2); +OPAL_CALL(opal_read_nvram, OPAL_READ_NVRAM); +OPAL_CALL(opal_write_nvram, OPAL_WRITE_NVRAM); +OPAL_CALL(opal_handle_interrupt, OPAL_HANDLE_INTERRUPT); +OPAL_CALL(opal_poll_events, OPAL_POLL_EVENTS); +OPAL_CALL(opal_pci_set_hub_tce_memory, OPAL_PCI_SET_HUB_TCE_MEMORY); +OPAL_CALL(opal_pci_set_phb_tce_memory, OPAL_PCI_SET_PHB_TCE_MEMORY); +OPAL_CALL(opal_pci_config_read_byte, OPAL_PCI_CONFIG_READ_BYTE); +OPAL_CALL(opal_pci_config_read_half_word, OPAL_PCI_CONFIG_READ_HALF_WORD); +OPAL_CALL(opal_pci_config_read_word, OPAL_PCI_CONFIG_READ_WORD); +OPAL_CALL(opal_pci_config_write_byte, OPAL_PCI_CONFIG_WRITE_BYTE); +OPAL_CALL(opal_pci_config_write_half_word, OPAL_PCI_CONFIG_WRITE_HALF_WORD); +OPAL_CALL(opal_pci_config_write_word, OPAL_PCI_CONFIG_WRITE_WORD); +OPAL_CALL(opal_set_xive, OPAL_SET_XIVE); +OPAL_CALL(opal_get_xive, OPAL_GET_XIVE); +OPAL_CALL(opal_register_exception_handler, OPAL_REGISTER_OPAL_EXCEPTION_HANDLER); +OPAL_CALL(opal_pci_eeh_freeze_status, OPAL_PCI_EEH_FREEZE_STATUS); +OPAL_CALL(opal_pci_eeh_freeze_clear, OPAL_PCI_EEH_FREEZE_CLEAR); +OPAL_CALL(opal_pci_eeh_freeze_set, OPAL_PCI_EEH_FREEZE_SET); +OPAL_CALL(opal_pci_err_inject, OPAL_PCI_ERR_INJECT); +OPAL_CALL(opal_pci_shpc, OPAL_PCI_SHPC); +OPAL_CALL(opal_pci_phb_mmio_enable, OPAL_PCI_PHB_MMIO_ENABLE); +OPAL_CALL(opal_pci_set_phb_mem_window, OPAL_PCI_SET_PHB_MEM_WINDOW); +OPAL_CALL(opal_pci_map_pe_mmio_window, OPAL_PCI_MAP_PE_MMIO_WINDOW); +OPAL_CALL(opal_pci_set_phb_table_memory, OPAL_PCI_SET_PHB_TABLE_MEMORY); +OPAL_CALL(opal_pci_set_pe, OPAL_PCI_SET_PE); +OPAL_CALL(opal_pci_set_peltv, OPAL_PCI_SET_PELTV); +OPAL_CALL(opal_pci_set_mve, OPAL_PCI_SET_MVE); +OPAL_CALL(opal_pci_set_mve_enable, OPAL_PCI_SET_MVE_ENABLE); +OPAL_CALL(opal_pci_get_xive_reissue, OPAL_PCI_GET_XIVE_REISSUE); +OPAL_CALL(opal_pci_set_xive_reissue, OPAL_PCI_SET_XIVE_REISSUE); +OPAL_CALL(opal_pci_set_xive_pe, OPAL_PCI_SET_XIVE_PE); +OPAL_CALL(opal_get_xive_source, OPAL_GET_XIVE_SOURCE); +OPAL_CALL(opal_get_msi_32, OPAL_GET_MSI_32); +OPAL_CALL(opal_get_msi_64, OPAL_GET_MSI_64); +OPAL_CALL(opal_start_cpu, OPAL_START_CPU); +OPAL_CALL(opal_query_cpu_status, OPAL_QUERY_CPU_STATUS); +OPAL_CALL(opal_write_oppanel, OPAL_WRITE_OPPANEL); +OPAL_CALL(opal_pci_map_pe_dma_window, OPAL_PCI_MAP_PE_DMA_WINDOW); +OPAL_CALL(opal_pci_map_pe_dma_window_real, OPAL_PCI_MAP_PE_DMA_WINDOW_REAL); +OPAL_CALL(opal_pci_reset, OPAL_PCI_RESET); +OPAL_CALL(opal_pci_get_hub_diag_data, OPAL_PCI_GET_HUB_DIAG_DATA); +OPAL_CALL(opal_pci_get_phb_diag_data, OPAL_PCI_GET_PHB_DIAG_DATA); +OPAL_CALL(opal_pci_fence_phb, OPAL_PCI_FENCE_PHB); +OPAL_CALL(opal_pci_reinit, OPAL_PCI_REINIT); +OPAL_CALL(opal_pci_mask_pe_error, OPAL_PCI_MASK_PE_ERROR); +OPAL_CALL(opal_set_slot_led_status, OPAL_SET_SLOT_LED_STATUS); +OPAL_CALL(opal_get_epow_status, OPAL_GET_EPOW_STATUS); +OPAL_CALL(opal_get_dpo_status, OPAL_GET_DPO_STATUS); +OPAL_CALL(opal_set_system_attention_led, OPAL_SET_SYSTEM_ATTENTION_LED); +OPAL_CALL(opal_pci_next_error, OPAL_PCI_NEXT_ERROR); +OPAL_CALL(opal_pci_poll, OPAL_PCI_POLL); +OPAL_CALL(opal_pci_msi_eoi, OPAL_PCI_MSI_EOI); +OPAL_CALL(opal_pci_get_phb_diag_data2, OPAL_PCI_GET_PHB_DIAG_DATA2); +OPAL_CALL(opal_xscom_read, OPAL_XSCOM_READ); +OPAL_CALL(opal_xscom_write, OPAL_XSCOM_WRITE); +OPAL_CALL(opal_lpc_read, OPAL_LPC_READ); +OPAL_CALL(opal_lpc_write, OPAL_LPC_WRITE); +OPAL_CALL(opal_return_cpu, OPAL_RETURN_CPU); +OPAL_CALL(opal_reinit_cpus, OPAL_REINIT_CPUS); +OPAL_CALL(opal_read_elog, OPAL_ELOG_READ); +OPAL_CALL(opal_send_ack_elog, OPAL_ELOG_ACK); +OPAL_CALL(opal_get_elog_size, OPAL_ELOG_SIZE); +OPAL_CALL(opal_resend_pending_logs, OPAL_ELOG_RESEND); +OPAL_CALL(opal_write_elog, OPAL_ELOG_WRITE); +OPAL_CALL(opal_validate_flash, OPAL_FLASH_VALIDATE); +OPAL_CALL(opal_manage_flash, OPAL_FLASH_MANAGE); +OPAL_CALL(opal_update_flash, OPAL_FLASH_UPDATE); +OPAL_CALL(opal_resync_timebase, OPAL_RESYNC_TIMEBASE); +OPAL_CALL(opal_check_token, OPAL_CHECK_TOKEN); +OPAL_CALL(opal_dump_init, OPAL_DUMP_INIT); +OPAL_CALL(opal_dump_info, OPAL_DUMP_INFO); +OPAL_CALL(opal_dump_info2, OPAL_DUMP_INFO2); +OPAL_CALL(opal_dump_read, OPAL_DUMP_READ); +OPAL_CALL(opal_dump_ack, OPAL_DUMP_ACK); +OPAL_CALL(opal_get_msg, OPAL_GET_MSG); +OPAL_CALL(opal_write_oppanel_async, OPAL_WRITE_OPPANEL_ASYNC); +OPAL_CALL(opal_check_completion, OPAL_CHECK_ASYNC_COMPLETION); +OPAL_CALL(opal_dump_resend_notification, OPAL_DUMP_RESEND); +OPAL_CALL(opal_sync_host_reboot, OPAL_SYNC_HOST_REBOOT); +OPAL_CALL(opal_sensor_read, OPAL_SENSOR_READ); +OPAL_CALL(opal_get_param, OPAL_GET_PARAM); +OPAL_CALL(opal_set_param, OPAL_SET_PARAM); +OPAL_CALL(opal_handle_hmi, OPAL_HANDLE_HMI); +OPAL_CALL(opal_config_cpu_idle_state, OPAL_CONFIG_CPU_IDLE_STATE); +OPAL_CALL(opal_slw_set_reg, OPAL_SLW_SET_REG); +OPAL_CALL(opal_register_dump_region, OPAL_REGISTER_DUMP_REGION); +OPAL_CALL(opal_unregister_dump_region, OPAL_UNREGISTER_DUMP_REGION); +OPAL_CALL(opal_pci_set_phb_cxl_mode, OPAL_PCI_SET_PHB_CAPI_MODE); +OPAL_CALL(opal_tpo_write, OPAL_WRITE_TPO); +OPAL_CALL(opal_tpo_read, OPAL_READ_TPO); +OPAL_CALL(opal_ipmi_send, OPAL_IPMI_SEND); +OPAL_CALL(opal_ipmi_recv, OPAL_IPMI_RECV); +OPAL_CALL(opal_i2c_request, OPAL_I2C_REQUEST); +OPAL_CALL(opal_flash_read, OPAL_FLASH_READ); +OPAL_CALL(opal_flash_write, OPAL_FLASH_WRITE); +OPAL_CALL(opal_flash_erase, OPAL_FLASH_ERASE); +OPAL_CALL(opal_prd_msg, OPAL_PRD_MSG); +OPAL_CALL(opal_leds_get_ind, OPAL_LEDS_GET_INDICATOR); +OPAL_CALL(opal_leds_set_ind, OPAL_LEDS_SET_INDICATOR); +OPAL_CALL(opal_console_flush, OPAL_CONSOLE_FLUSH); +OPAL_CALL(opal_get_device_tree, OPAL_GET_DEVICE_TREE); +OPAL_CALL(opal_pci_get_presence_state, OPAL_PCI_GET_PRESENCE_STATE); +OPAL_CALL(opal_pci_get_power_state, OPAL_PCI_GET_POWER_STATE); +OPAL_CALL(opal_pci_set_power_state, OPAL_PCI_SET_POWER_STATE); +OPAL_CALL(opal_int_get_xirr, OPAL_INT_GET_XIRR); +OPAL_CALL(opal_int_set_cppr, OPAL_INT_SET_CPPR); +OPAL_CALL(opal_int_eoi, OPAL_INT_EOI); +OPAL_CALL(opal_int_set_mfrr, OPAL_INT_SET_MFRR); +OPAL_CALL(opal_pci_tce_kill, OPAL_PCI_TCE_KILL); +OPAL_CALL(opal_nmmu_set_ptcr, OPAL_NMMU_SET_PTCR); +OPAL_CALL(opal_xive_reset, OPAL_XIVE_RESET); +OPAL_CALL(opal_xive_get_irq_info, OPAL_XIVE_GET_IRQ_INFO); +OPAL_CALL(opal_xive_get_irq_config, OPAL_XIVE_GET_IRQ_CONFIG); +OPAL_CALL(opal_xive_set_irq_config, OPAL_XIVE_SET_IRQ_CONFIG); +OPAL_CALL(opal_xive_get_queue_info, OPAL_XIVE_GET_QUEUE_INFO); +OPAL_CALL(opal_xive_set_queue_info, OPAL_XIVE_SET_QUEUE_INFO); +OPAL_CALL(opal_xive_donate_page, OPAL_XIVE_DONATE_PAGE); +OPAL_CALL(opal_xive_alloc_vp_block, OPAL_XIVE_ALLOCATE_VP_BLOCK); +OPAL_CALL(opal_xive_free_vp_block, OPAL_XIVE_FREE_VP_BLOCK); +OPAL_CALL(opal_xive_allocate_irq, OPAL_XIVE_ALLOCATE_IRQ); +OPAL_CALL(opal_xive_free_irq, OPAL_XIVE_FREE_IRQ); +OPAL_CALL(opal_xive_get_vp_info, OPAL_XIVE_GET_VP_INFO); +OPAL_CALL(opal_xive_set_vp_info, OPAL_XIVE_SET_VP_INFO); +OPAL_CALL(opal_xive_sync, OPAL_XIVE_SYNC); +OPAL_CALL(opal_xive_dump, OPAL_XIVE_DUMP); +OPAL_CALL(opal_signal_system_reset, OPAL_SIGNAL_SYSTEM_RESET); +OPAL_CALL(opal_npu_init_context, OPAL_NPU_INIT_CONTEXT); +OPAL_CALL(opal_npu_destroy_context, OPAL_NPU_DESTROY_CONTEXT); +OPAL_CALL(opal_npu_map_lpar, OPAL_NPU_MAP_LPAR); +OPAL_CALL(opal_imc_counters_init, OPAL_IMC_COUNTERS_INIT); +OPAL_CALL(opal_imc_counters_start, OPAL_IMC_COUNTERS_START); +OPAL_CALL(opal_imc_counters_stop, OPAL_IMC_COUNTERS_STOP); +OPAL_CALL(opal_pci_set_p2p, OPAL_PCI_SET_P2P); +OPAL_CALL(opal_get_powercap, OPAL_GET_POWERCAP); +OPAL_CALL(opal_set_powercap, OPAL_SET_POWERCAP); +OPAL_CALL(opal_get_power_shift_ratio, OPAL_GET_POWER_SHIFT_RATIO); +OPAL_CALL(opal_set_power_shift_ratio, OPAL_SET_POWER_SHIFT_RATIO); +OPAL_CALL(opal_sensor_group_clear, OPAL_SENSOR_GROUP_CLEAR); +OPAL_CALL(opal_quiesce, OPAL_QUIESCE); +OPAL_CALL(opal_npu_spa_setup, OPAL_NPU_SPA_SETUP); +OPAL_CALL(opal_npu_spa_clear_cache, OPAL_NPU_SPA_CLEAR_CACHE); +OPAL_CALL(opal_npu_tl_set, OPAL_NPU_TL_SET); +OPAL_CALL(opal_pci_get_pbcq_tunnel_bar, OPAL_PCI_GET_PBCQ_TUNNEL_BAR); +OPAL_CALL(opal_pci_set_pbcq_tunnel_bar, OPAL_PCI_SET_PBCQ_TUNNEL_BAR); +OPAL_CALL(opal_sensor_read_u64, OPAL_SENSOR_READ_U64); +OPAL_CALL(opal_sensor_group_enable, OPAL_SENSOR_GROUP_ENABLE); +OPAL_CALL(opal_nx_coproc_init, OPAL_NX_COPROC_INIT); diff --git a/arch/powerpc/platforms/powernv/opal-msglog.c b/arch/powerpc/platforms/powernv/opal-msglog.c index acd3206dfae3..06628c71cef6 100644 --- a/arch/powerpc/platforms/powernv/opal-msglog.c +++ b/arch/powerpc/platforms/powernv/opal-msglog.c @@ -98,7 +98,7 @@ static ssize_t opal_msglog_read(struct file *file, struct kobject *kobj, } static struct bin_attribute opal_msglog_attr = { - .attr = {.name = "msglog", .mode = 0444}, + .attr = {.name = "msglog", .mode = 0400}, .read = opal_msglog_read }; diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S index f4875fe3f8ff..7d2052d8af9d 100644 --- a/arch/powerpc/platforms/powernv/opal-wrappers.S +++ b/arch/powerpc/platforms/powernv/opal-wrappers.S @@ -17,317 +17,51 @@ #include #include - .section ".text" - -#ifdef CONFIG_TRACEPOINTS -#ifdef CONFIG_JUMP_LABEL -#define OPAL_BRANCH(LABEL) \ - ARCH_STATIC_BRANCH(LABEL, opal_tracepoint_key) -#else - - .section ".toc","aw" - - .globl opal_tracepoint_refcount -opal_tracepoint_refcount: - .8byte 0 - - .section ".text" - -/* - * We branch around this in early init by using an unconditional cpu - * feature. - */ -#define OPAL_BRANCH(LABEL) \ -BEGIN_FTR_SECTION; \ - b 1f; \ -END_FTR_SECTION(0, 1); \ - ld r11,opal_tracepoint_refcount@toc(r2); \ - cmpdi r11,0; \ - bne- LABEL; \ -1: - -#endif - -#else -#define OPAL_BRANCH(LABEL) -#endif + .section ".text" /* - * DO_OPAL_CALL assumes: - * r0 = opal call token - * r12 = msr - * LR has been saved + * r3-r10 - OPAL call arguments + * STK_PARAM(R11) - OPAL opcode + * STK_PARAM(R12) - MSR to restore */ -#define DO_OPAL_CALL() \ - mfcr r11; \ - stw r11,8(r1); \ - li r11,0; \ - ori r11,r11,MSR_EE; \ - std r12,PACASAVEDMSR(r13); \ - andc r12,r12,r11; \ - mtmsrd r12,1; \ - LOAD_REG_ADDR(r11,opal_return); \ - mtlr r11; \ - li r11,MSR_DR|MSR_IR|MSR_LE;\ - andc r12,r12,r11; \ - mtspr SPRN_HSRR1,r12; \ - LOAD_REG_ADDR(r11,opal); \ - ld r12,8(r11); \ - ld r2,0(r11); \ - mtspr SPRN_HSRR0,r12; \ +_GLOBAL_TOC(__opal_call) + mflr r0 + std r0,PPC_LR_STKOFF(r1) + ld r12,STK_PARAM(R12)(r1) + li r0,MSR_IR|MSR_DR|MSR_LE + andc r12,r12,r0 + LOAD_REG_ADDR(r11, opal_return) + mtlr r11 + LOAD_REG_ADDR(r11, opal) + ld r2,0(r11) + ld r11,8(r11) + mtspr SPRN_HSRR0,r11 + mtspr SPRN_HSRR1,r12 + /* set token to r0 */ + ld r0,STK_PARAM(R11)(r1) hrfid - -#define OPAL_CALL(name, token) \ - _GLOBAL_TOC(name); \ - mfmsr r12; \ - mflr r0; \ - andi. r11,r12,MSR_IR|MSR_DR; \ - std r0,PPC_LR_STKOFF(r1); \ - li r0,token; \ - beq opal_real_call; \ - OPAL_BRANCH(opal_tracepoint_entry) \ - DO_OPAL_CALL() - - opal_return: /* - * Fixup endian on OPAL return... we should be able to simplify - * this by instead converting the below trampoline to a set of - * bytes (always BE) since MSR:LE will end up fixed up as a side - * effect of the rfid. + * Restore MSR on OPAL return. The MSR is set to big-endian. */ - FIXUP_ENDIAN_HV - ld r2,PACATOC(r13); - lwz r4,8(r1); - ld r5,PPC_LR_STKOFF(r1); - ld r6,PACASAVEDMSR(r13); - mtcr r4; - mtspr SPRN_HSRR0,r5; - mtspr SPRN_HSRR1,r6; - hrfid - -opal_real_call: - mfcr r11 - stw r11,8(r1) - /* Set opal return address */ - LOAD_REG_ADDR(r11, opal_return_realmode) - mtlr r11 - li r11,MSR_LE - andc r12,r12,r11 - mtspr SPRN_HSRR1,r12 - LOAD_REG_ADDR(r11,opal) - ld r12,8(r11) - ld r2,0(r11) - mtspr SPRN_HSRR0,r12 - hrfid - -opal_return_realmode: - FIXUP_ENDIAN_HV - ld r2,PACATOC(r13); - lwz r11,8(r1); - ld r12,PPC_LR_STKOFF(r1) - mtcr r11; - mtlr r12 - blr - -#ifdef CONFIG_TRACEPOINTS -opal_tracepoint_entry: - stdu r1,-STACKFRAMESIZE(r1) - std r0,STK_REG(R23)(r1) - std r3,STK_REG(R24)(r1) - std r4,STK_REG(R25)(r1) - std r5,STK_REG(R26)(r1) - std r6,STK_REG(R27)(r1) - std r7,STK_REG(R28)(r1) - std r8,STK_REG(R29)(r1) - std r9,STK_REG(R30)(r1) - std r10,STK_REG(R31)(r1) - mr r3,r0 - addi r4,r1,STK_REG(R24) - bl __trace_opal_entry - ld r0,STK_REG(R23)(r1) - ld r3,STK_REG(R24)(r1) - ld r4,STK_REG(R25)(r1) - ld r5,STK_REG(R26)(r1) - ld r6,STK_REG(R27)(r1) - ld r7,STK_REG(R28)(r1) - ld r8,STK_REG(R29)(r1) - ld r9,STK_REG(R30)(r1) - ld r10,STK_REG(R31)(r1) - - /* setup LR so we return via tracepoint_return */ - LOAD_REG_ADDR(r11,opal_tracepoint_return) - std r11,16(r1) - - mfmsr r12 - DO_OPAL_CALL() - -opal_tracepoint_return: - std r3,STK_REG(R31)(r1) - mr r4,r3 - ld r3,STK_REG(R23)(r1) - bl __trace_opal_exit - ld r3,STK_REG(R31)(r1) - addi r1,r1,STACKFRAMESIZE - ld r0,16(r1) +#ifdef __BIG_ENDIAN__ + ld r11,STK_PARAM(R12)(r1) + mtmsrd r11 +#else + /* Endian can only be switched with rfi, must byte reverse MSR load */ + .short 0x4039 /* li r10,STK_PARAM(R12) */ + .byte (STK_PARAM(R12) >> 8) & 0xff + .byte STK_PARAM(R12) & 0xff + + .long 0x280c6a7d /* ldbrx r11,r10,r1 */ + .long 0x05009f42 /* bcl 20,31,$+4 */ + .long 0xa602487d /* mflr r10 */ + .long 0x14004a39 /* addi r10,r10,20 */ + .long 0xa64b5a7d /* mthsrr0 r10 */ + .long 0xa64b7b7d /* mthsrr1 r11 */ + .long 0x2402004c /* hrfid */ +#endif + ld r2,PACATOC(r13) + ld r0,PPC_LR_STKOFF(r1) mtlr r0 blr -#endif - - -OPAL_CALL(opal_invalid_call, OPAL_INVALID_CALL); -OPAL_CALL(opal_console_write, OPAL_CONSOLE_WRITE); -OPAL_CALL(opal_console_read, OPAL_CONSOLE_READ); -OPAL_CALL(opal_console_write_buffer_space, OPAL_CONSOLE_WRITE_BUFFER_SPACE); -OPAL_CALL(opal_rtc_read, OPAL_RTC_READ); -OPAL_CALL(opal_rtc_write, OPAL_RTC_WRITE); -OPAL_CALL(opal_cec_power_down, OPAL_CEC_POWER_DOWN); -OPAL_CALL(opal_cec_reboot, OPAL_CEC_REBOOT); -OPAL_CALL(opal_cec_reboot2, OPAL_CEC_REBOOT2); -OPAL_CALL(opal_read_nvram, OPAL_READ_NVRAM); -OPAL_CALL(opal_write_nvram, OPAL_WRITE_NVRAM); -OPAL_CALL(opal_handle_interrupt, OPAL_HANDLE_INTERRUPT); -OPAL_CALL(opal_poll_events, OPAL_POLL_EVENTS); -OPAL_CALL(opal_pci_set_hub_tce_memory, OPAL_PCI_SET_HUB_TCE_MEMORY); -OPAL_CALL(opal_pci_set_phb_tce_memory, OPAL_PCI_SET_PHB_TCE_MEMORY); -OPAL_CALL(opal_pci_config_read_byte, OPAL_PCI_CONFIG_READ_BYTE); -OPAL_CALL(opal_pci_config_read_half_word, OPAL_PCI_CONFIG_READ_HALF_WORD); -OPAL_CALL(opal_pci_config_read_word, OPAL_PCI_CONFIG_READ_WORD); -OPAL_CALL(opal_pci_config_write_byte, OPAL_PCI_CONFIG_WRITE_BYTE); -OPAL_CALL(opal_pci_config_write_half_word, OPAL_PCI_CONFIG_WRITE_HALF_WORD); -OPAL_CALL(opal_pci_config_write_word, OPAL_PCI_CONFIG_WRITE_WORD); -OPAL_CALL(opal_set_xive, OPAL_SET_XIVE); -OPAL_CALL(opal_get_xive, OPAL_GET_XIVE); -OPAL_CALL(opal_register_exception_handler, OPAL_REGISTER_OPAL_EXCEPTION_HANDLER); -OPAL_CALL(opal_pci_eeh_freeze_status, OPAL_PCI_EEH_FREEZE_STATUS); -OPAL_CALL(opal_pci_eeh_freeze_clear, OPAL_PCI_EEH_FREEZE_CLEAR); -OPAL_CALL(opal_pci_eeh_freeze_set, OPAL_PCI_EEH_FREEZE_SET); -OPAL_CALL(opal_pci_err_inject, OPAL_PCI_ERR_INJECT); -OPAL_CALL(opal_pci_shpc, OPAL_PCI_SHPC); -OPAL_CALL(opal_pci_phb_mmio_enable, OPAL_PCI_PHB_MMIO_ENABLE); -OPAL_CALL(opal_pci_set_phb_mem_window, OPAL_PCI_SET_PHB_MEM_WINDOW); -OPAL_CALL(opal_pci_map_pe_mmio_window, OPAL_PCI_MAP_PE_MMIO_WINDOW); -OPAL_CALL(opal_pci_set_phb_table_memory, OPAL_PCI_SET_PHB_TABLE_MEMORY); -OPAL_CALL(opal_pci_set_pe, OPAL_PCI_SET_PE); -OPAL_CALL(opal_pci_set_peltv, OPAL_PCI_SET_PELTV); -OPAL_CALL(opal_pci_set_mve, OPAL_PCI_SET_MVE); -OPAL_CALL(opal_pci_set_mve_enable, OPAL_PCI_SET_MVE_ENABLE); -OPAL_CALL(opal_pci_get_xive_reissue, OPAL_PCI_GET_XIVE_REISSUE); -OPAL_CALL(opal_pci_set_xive_reissue, OPAL_PCI_SET_XIVE_REISSUE); -OPAL_CALL(opal_pci_set_xive_pe, OPAL_PCI_SET_XIVE_PE); -OPAL_CALL(opal_get_xive_source, OPAL_GET_XIVE_SOURCE); -OPAL_CALL(opal_get_msi_32, OPAL_GET_MSI_32); -OPAL_CALL(opal_get_msi_64, OPAL_GET_MSI_64); -OPAL_CALL(opal_start_cpu, OPAL_START_CPU); -OPAL_CALL(opal_query_cpu_status, OPAL_QUERY_CPU_STATUS); -OPAL_CALL(opal_write_oppanel, OPAL_WRITE_OPPANEL); -OPAL_CALL(opal_pci_map_pe_dma_window, OPAL_PCI_MAP_PE_DMA_WINDOW); -OPAL_CALL(opal_pci_map_pe_dma_window_real, OPAL_PCI_MAP_PE_DMA_WINDOW_REAL); -OPAL_CALL(opal_pci_reset, OPAL_PCI_RESET); -OPAL_CALL(opal_pci_get_hub_diag_data, OPAL_PCI_GET_HUB_DIAG_DATA); -OPAL_CALL(opal_pci_get_phb_diag_data, OPAL_PCI_GET_PHB_DIAG_DATA); -OPAL_CALL(opal_pci_fence_phb, OPAL_PCI_FENCE_PHB); -OPAL_CALL(opal_pci_reinit, OPAL_PCI_REINIT); -OPAL_CALL(opal_pci_mask_pe_error, OPAL_PCI_MASK_PE_ERROR); -OPAL_CALL(opal_set_slot_led_status, OPAL_SET_SLOT_LED_STATUS); -OPAL_CALL(opal_get_epow_status, OPAL_GET_EPOW_STATUS); -OPAL_CALL(opal_get_dpo_status, OPAL_GET_DPO_STATUS); -OPAL_CALL(opal_set_system_attention_led, OPAL_SET_SYSTEM_ATTENTION_LED); -OPAL_CALL(opal_pci_next_error, OPAL_PCI_NEXT_ERROR); -OPAL_CALL(opal_pci_poll, OPAL_PCI_POLL); -OPAL_CALL(opal_pci_msi_eoi, OPAL_PCI_MSI_EOI); -OPAL_CALL(opal_pci_get_phb_diag_data2, OPAL_PCI_GET_PHB_DIAG_DATA2); -OPAL_CALL(opal_xscom_read, OPAL_XSCOM_READ); -OPAL_CALL(opal_xscom_write, OPAL_XSCOM_WRITE); -OPAL_CALL(opal_lpc_read, OPAL_LPC_READ); -OPAL_CALL(opal_lpc_write, OPAL_LPC_WRITE); -OPAL_CALL(opal_return_cpu, OPAL_RETURN_CPU); -OPAL_CALL(opal_reinit_cpus, OPAL_REINIT_CPUS); -OPAL_CALL(opal_read_elog, OPAL_ELOG_READ); -OPAL_CALL(opal_send_ack_elog, OPAL_ELOG_ACK); -OPAL_CALL(opal_get_elog_size, OPAL_ELOG_SIZE); -OPAL_CALL(opal_resend_pending_logs, OPAL_ELOG_RESEND); -OPAL_CALL(opal_write_elog, OPAL_ELOG_WRITE); -OPAL_CALL(opal_validate_flash, OPAL_FLASH_VALIDATE); -OPAL_CALL(opal_manage_flash, OPAL_FLASH_MANAGE); -OPAL_CALL(opal_update_flash, OPAL_FLASH_UPDATE); -OPAL_CALL(opal_resync_timebase, OPAL_RESYNC_TIMEBASE); -OPAL_CALL(opal_check_token, OPAL_CHECK_TOKEN); -OPAL_CALL(opal_dump_init, OPAL_DUMP_INIT); -OPAL_CALL(opal_dump_info, OPAL_DUMP_INFO); -OPAL_CALL(opal_dump_info2, OPAL_DUMP_INFO2); -OPAL_CALL(opal_dump_read, OPAL_DUMP_READ); -OPAL_CALL(opal_dump_ack, OPAL_DUMP_ACK); -OPAL_CALL(opal_get_msg, OPAL_GET_MSG); -OPAL_CALL(opal_write_oppanel_async, OPAL_WRITE_OPPANEL_ASYNC); -OPAL_CALL(opal_check_completion, OPAL_CHECK_ASYNC_COMPLETION); -OPAL_CALL(opal_dump_resend_notification, OPAL_DUMP_RESEND); -OPAL_CALL(opal_sync_host_reboot, OPAL_SYNC_HOST_REBOOT); -OPAL_CALL(opal_sensor_read, OPAL_SENSOR_READ); -OPAL_CALL(opal_get_param, OPAL_GET_PARAM); -OPAL_CALL(opal_set_param, OPAL_SET_PARAM); -OPAL_CALL(opal_handle_hmi, OPAL_HANDLE_HMI); -OPAL_CALL(opal_config_cpu_idle_state, OPAL_CONFIG_CPU_IDLE_STATE); -OPAL_CALL(opal_slw_set_reg, OPAL_SLW_SET_REG); -OPAL_CALL(opal_register_dump_region, OPAL_REGISTER_DUMP_REGION); -OPAL_CALL(opal_unregister_dump_region, OPAL_UNREGISTER_DUMP_REGION); -OPAL_CALL(opal_pci_set_phb_cxl_mode, OPAL_PCI_SET_PHB_CAPI_MODE); -OPAL_CALL(opal_tpo_write, OPAL_WRITE_TPO); -OPAL_CALL(opal_tpo_read, OPAL_READ_TPO); -OPAL_CALL(opal_ipmi_send, OPAL_IPMI_SEND); -OPAL_CALL(opal_ipmi_recv, OPAL_IPMI_RECV); -OPAL_CALL(opal_i2c_request, OPAL_I2C_REQUEST); -OPAL_CALL(opal_flash_read, OPAL_FLASH_READ); -OPAL_CALL(opal_flash_write, OPAL_FLASH_WRITE); -OPAL_CALL(opal_flash_erase, OPAL_FLASH_ERASE); -OPAL_CALL(opal_prd_msg, OPAL_PRD_MSG); -OPAL_CALL(opal_leds_get_ind, OPAL_LEDS_GET_INDICATOR); -OPAL_CALL(opal_leds_set_ind, OPAL_LEDS_SET_INDICATOR); -OPAL_CALL(opal_console_flush, OPAL_CONSOLE_FLUSH); -OPAL_CALL(opal_get_device_tree, OPAL_GET_DEVICE_TREE); -OPAL_CALL(opal_pci_get_presence_state, OPAL_PCI_GET_PRESENCE_STATE); -OPAL_CALL(opal_pci_get_power_state, OPAL_PCI_GET_POWER_STATE); -OPAL_CALL(opal_pci_set_power_state, OPAL_PCI_SET_POWER_STATE); -OPAL_CALL(opal_int_get_xirr, OPAL_INT_GET_XIRR); -OPAL_CALL(opal_int_set_cppr, OPAL_INT_SET_CPPR); -OPAL_CALL(opal_int_eoi, OPAL_INT_EOI); -OPAL_CALL(opal_int_set_mfrr, OPAL_INT_SET_MFRR); -OPAL_CALL(opal_pci_tce_kill, OPAL_PCI_TCE_KILL); -OPAL_CALL(opal_nmmu_set_ptcr, OPAL_NMMU_SET_PTCR); -OPAL_CALL(opal_xive_reset, OPAL_XIVE_RESET); -OPAL_CALL(opal_xive_get_irq_info, OPAL_XIVE_GET_IRQ_INFO); -OPAL_CALL(opal_xive_get_irq_config, OPAL_XIVE_GET_IRQ_CONFIG); -OPAL_CALL(opal_xive_set_irq_config, OPAL_XIVE_SET_IRQ_CONFIG); -OPAL_CALL(opal_xive_get_queue_info, OPAL_XIVE_GET_QUEUE_INFO); -OPAL_CALL(opal_xive_set_queue_info, OPAL_XIVE_SET_QUEUE_INFO); -OPAL_CALL(opal_xive_donate_page, OPAL_XIVE_DONATE_PAGE); -OPAL_CALL(opal_xive_alloc_vp_block, OPAL_XIVE_ALLOCATE_VP_BLOCK); -OPAL_CALL(opal_xive_free_vp_block, OPAL_XIVE_FREE_VP_BLOCK); -OPAL_CALL(opal_xive_allocate_irq, OPAL_XIVE_ALLOCATE_IRQ); -OPAL_CALL(opal_xive_free_irq, OPAL_XIVE_FREE_IRQ); -OPAL_CALL(opal_xive_get_vp_info, OPAL_XIVE_GET_VP_INFO); -OPAL_CALL(opal_xive_set_vp_info, OPAL_XIVE_SET_VP_INFO); -OPAL_CALL(opal_xive_sync, OPAL_XIVE_SYNC); -OPAL_CALL(opal_xive_dump, OPAL_XIVE_DUMP); -OPAL_CALL(opal_signal_system_reset, OPAL_SIGNAL_SYSTEM_RESET); -OPAL_CALL(opal_npu_init_context, OPAL_NPU_INIT_CONTEXT); -OPAL_CALL(opal_npu_destroy_context, OPAL_NPU_DESTROY_CONTEXT); -OPAL_CALL(opal_npu_map_lpar, OPAL_NPU_MAP_LPAR); -OPAL_CALL(opal_imc_counters_init, OPAL_IMC_COUNTERS_INIT); -OPAL_CALL(opal_imc_counters_start, OPAL_IMC_COUNTERS_START); -OPAL_CALL(opal_imc_counters_stop, OPAL_IMC_COUNTERS_STOP); -OPAL_CALL(opal_pci_set_p2p, OPAL_PCI_SET_P2P); -OPAL_CALL(opal_get_powercap, OPAL_GET_POWERCAP); -OPAL_CALL(opal_set_powercap, OPAL_SET_POWERCAP); -OPAL_CALL(opal_get_power_shift_ratio, OPAL_GET_POWER_SHIFT_RATIO); -OPAL_CALL(opal_set_power_shift_ratio, OPAL_SET_POWER_SHIFT_RATIO); -OPAL_CALL(opal_sensor_group_clear, OPAL_SENSOR_GROUP_CLEAR); -OPAL_CALL(opal_quiesce, OPAL_QUIESCE); -OPAL_CALL(opal_npu_spa_setup, OPAL_NPU_SPA_SETUP); -OPAL_CALL(opal_npu_spa_clear_cache, OPAL_NPU_SPA_CLEAR_CACHE); -OPAL_CALL(opal_npu_tl_set, OPAL_NPU_TL_SET); -OPAL_CALL(opal_pci_get_pbcq_tunnel_bar, OPAL_PCI_GET_PBCQ_TUNNEL_BAR); -OPAL_CALL(opal_pci_set_pbcq_tunnel_bar, OPAL_PCI_SET_PBCQ_TUNNEL_BAR); -OPAL_CALL(opal_sensor_read_u64, OPAL_SENSOR_READ_U64); -OPAL_CALL(opal_sensor_group_enable, OPAL_SENSOR_GROUP_ENABLE); -OPAL_CALL(opal_nx_coproc_init, OPAL_NX_COPROC_INIT); diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index 79586f127521..2b0eca104f86 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include #include @@ -171,8 +170,10 @@ int __init early_init_dt_scan_recoverable_ranges(unsigned long node, /* * Allocate a buffer to hold the MC recoverable ranges. */ - mc_recoverable_range =__va(memblock_phys_alloc(size, __alignof__(u64))); - memset(mc_recoverable_range, 0, size); + mc_recoverable_range = memblock_alloc(size, __alignof__(u64)); + if (!mc_recoverable_range) + panic("%s: Failed to allocate %u bytes align=0x%lx\n", + __func__, size, __alignof__(u64)); for (i = 0; i < mc_recoverable_range_len; i++) { mc_recoverable_range[i].start_addr = @@ -587,7 +588,7 @@ int opal_machine_check(struct pt_regs *regs) evt.version); return 0; } - machine_check_print_event_info(&evt, user_mode(regs)); + machine_check_print_event_info(&evt, user_mode(regs), false); if (opal_recover_mce(regs, &evt)) return 1; diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c index 697449afb3f7..e28f03e1eb5e 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda-tce.c +++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c @@ -313,7 +313,6 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, page_shift); tbl->it_level_size = 1ULL << (level_shift - 3); tbl->it_indirect_levels = levels - 1; - tbl->it_allocated_size = total_allocated; tbl->it_userspace = uas; tbl->it_nid = nid; diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 7db3119f8a5b..3ead4c237ed0 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -1593,6 +1593,8 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs) pnv_pci_ioda2_setup_dma_pe(phb, pe); #ifdef CONFIG_IOMMU_API + iommu_register_group(&pe->table_group, + pe->phb->hose->global_number, pe->pe_number); pnv_ioda_setup_bus_iommu_group(pe, &pe->table_group, NULL); #endif } @@ -1746,7 +1748,7 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev pe = &phb->ioda.pe_array[pdn->pe_number]; WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops); - set_dma_offset(&pdev->dev, pe->tce_bypass_base); + pdev->dev.archdata.dma_offset = pe->tce_bypass_base; set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]); /* * Note: iommu_add_device() will fail here as @@ -1756,31 +1758,6 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev */ } -static bool pnv_pci_ioda_pe_single_vendor(struct pnv_ioda_pe *pe) -{ - unsigned short vendor = 0; - struct pci_dev *pdev; - - if (pe->device_count == 1) - return true; - - /* pe->pdev should be set if it's a single device, pe->pbus if not */ - if (!pe->pbus) - return true; - - list_for_each_entry(pdev, &pe->pbus->devices, bus_list) { - if (!vendor) { - vendor = pdev->vendor; - continue; - } - - if (pdev->vendor != vendor) - return false; - } - - return true; -} - /* * Reconfigure TVE#0 to be usable as 64-bit DMA space. * @@ -1850,88 +1827,45 @@ err: return -EIO; } -static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask) +static bool pnv_pci_ioda_iommu_bypass_supported(struct pci_dev *pdev, + u64 dma_mask) { struct pci_controller *hose = pci_bus_to_host(pdev->bus); struct pnv_phb *phb = hose->private_data; struct pci_dn *pdn = pci_get_pdn(pdev); struct pnv_ioda_pe *pe; - uint64_t top; - bool bypass = false; - s64 rc; if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE)) return -ENODEV; pe = &phb->ioda.pe_array[pdn->pe_number]; if (pe->tce_bypass_enabled) { - top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1; - bypass = (dma_mask >= top); + u64 top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1; + if (dma_mask >= top) + return true; } - if (bypass) { - dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n"); - set_dma_ops(&pdev->dev, &dma_nommu_ops); - } else { - /* - * If the device can't set the TCE bypass bit but still wants - * to access 4GB or more, on PHB3 we can reconfigure TVE#0 to - * bypass the 32-bit region and be usable for 64-bit DMAs. - * The device needs to be able to address all of this space. - */ - if (dma_mask >> 32 && - dma_mask > (memory_hotplug_max() + (1ULL << 32)) && - pnv_pci_ioda_pe_single_vendor(pe) && - phb->model == PNV_PHB_MODEL_PHB3) { - /* Configure the bypass mode */ - rc = pnv_pci_ioda_dma_64bit_bypass(pe); - if (rc) - return rc; - /* 4GB offset bypasses 32-bit space */ - set_dma_offset(&pdev->dev, (1ULL << 32)); - set_dma_ops(&pdev->dev, &dma_nommu_ops); - } else if (dma_mask >> 32 && dma_mask != DMA_BIT_MASK(64)) { - /* - * Fail the request if a DMA mask between 32 and 64 bits - * was requested but couldn't be fulfilled. Ideally we - * would do this for 64-bits but historically we have - * always fallen back to 32-bits. - */ - return -ENOMEM; - } else { - dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n"); - set_dma_ops(&pdev->dev, &dma_iommu_ops); - } + /* + * If the device can't set the TCE bypass bit but still wants + * to access 4GB or more, on PHB3 we can reconfigure TVE#0 to + * bypass the 32-bit region and be usable for 64-bit DMAs. + * The device needs to be able to address all of this space. + */ + if (dma_mask >> 32 && + dma_mask > (memory_hotplug_max() + (1ULL << 32)) && + /* pe->pdev should be set if it's a single device, pe->pbus if not */ + (pe->device_count == 1 || !pe->pbus) && + phb->model == PNV_PHB_MODEL_PHB3) { + /* Configure the bypass mode */ + s64 rc = pnv_pci_ioda_dma_64bit_bypass(pe); + if (rc) + return rc; + /* 4GB offset bypasses 32-bit space */ + pdev->dev.archdata.dma_offset = (1ULL << 32); + return true; } - *pdev->dev.dma_mask = dma_mask; - /* Update peer npu devices */ - pnv_npu_try_dma_set_bypass(pdev, bypass); - - return 0; -} - -static u64 pnv_pci_ioda_dma_get_required_mask(struct pci_dev *pdev) -{ - struct pci_controller *hose = pci_bus_to_host(pdev->bus); - struct pnv_phb *phb = hose->private_data; - struct pci_dn *pdn = pci_get_pdn(pdev); - struct pnv_ioda_pe *pe; - u64 end, mask; - - if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE)) - return 0; - - pe = &phb->ioda.pe_array[pdn->pe_number]; - if (!pe->tce_bypass_enabled) - return __dma_get_required_mask(&pdev->dev); - - - end = pe->tce_bypass_base + memblock_end_of_DRAM(); - mask = 1ULL << (fls64(end) - 1); - mask += mask - 1; - - return mask; + return false; } static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus) @@ -1940,7 +1874,7 @@ static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus) list_for_each_entry(dev, &bus->devices, bus_list) { set_iommu_table_base(&dev->dev, pe->table_group.tables[0]); - set_dma_offset(&dev->dev, pe->tce_bypass_base); + dev->dev.archdata.dma_offset = pe->tce_bypass_base; if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate) pnv_ioda_setup_bus_dma(pe, dev->subordinate); @@ -2592,8 +2526,13 @@ static long pnv_pci_ioda2_create_table_userspace( int num, __u32 page_shift, __u64 window_size, __u32 levels, struct iommu_table **ptbl) { - return pnv_pci_ioda2_create_table(table_group, + long ret = pnv_pci_ioda2_create_table(table_group, num, page_shift, window_size, levels, true, ptbl); + + if (!ret) + (*ptbl)->it_allocated_size = pnv_pci_ioda2_get_table_size( + page_shift, window_size, levels); + return ret; } static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group) @@ -3659,6 +3598,7 @@ static void pnv_pci_ioda_shutdown(struct pci_controller *hose) static const struct pci_controller_ops pnv_pci_ioda_controller_ops = { .dma_dev_setup = pnv_pci_dma_dev_setup, .dma_bus_setup = pnv_pci_dma_bus_setup, + .iommu_bypass_supported = pnv_pci_ioda_iommu_bypass_supported, .setup_msi_irqs = pnv_setup_msi_irqs, .teardown_msi_irqs = pnv_teardown_msi_irqs, .enable_device_hook = pnv_pci_enable_device_hook, @@ -3666,19 +3606,9 @@ static const struct pci_controller_ops pnv_pci_ioda_controller_ops = { .window_alignment = pnv_pci_window_alignment, .setup_bridge = pnv_pci_setup_bridge, .reset_secondary_bus = pnv_pci_reset_secondary_bus, - .dma_set_mask = pnv_pci_ioda_dma_set_mask, - .dma_get_required_mask = pnv_pci_ioda_dma_get_required_mask, .shutdown = pnv_pci_ioda_shutdown, }; -static int pnv_npu_dma_set_mask(struct pci_dev *npdev, u64 dma_mask) -{ - dev_err_once(&npdev->dev, - "%s operation unsupported for NVLink devices\n", - __func__); - return -EPERM; -} - static const struct pci_controller_ops pnv_npu_ioda_controller_ops = { .dma_dev_setup = pnv_pci_dma_dev_setup, .setup_msi_irqs = pnv_setup_msi_irqs, @@ -3686,7 +3616,6 @@ static const struct pci_controller_ops pnv_npu_ioda_controller_ops = { .enable_device_hook = pnv_pci_enable_device_hook, .window_alignment = pnv_pci_window_alignment, .reset_secondary_bus = pnv_pci_reset_secondary_bus, - .dma_set_mask = pnv_npu_dma_set_mask, .shutdown = pnv_pci_ioda_shutdown, .disable_device = pnv_npu_disable_device, }; @@ -3728,6 +3657,9 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, pr_debug(" PHB-ID : 0x%016llx\n", phb_id); phb = memblock_alloc(sizeof(*phb), SMP_CACHE_BYTES); + if (!phb) + panic("%s: Failed to allocate %zu bytes\n", __func__, + sizeof(*phb)); /* Allocate PCI controller */ phb->hose = hose = pcibios_alloc_controller(np); @@ -3774,6 +3706,9 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, phb->diag_data_size = PNV_PCI_DIAG_BUF_SIZE; phb->diag_data = memblock_alloc(phb->diag_data_size, SMP_CACHE_BYTES); + if (!phb->diag_data) + panic("%s: Failed to allocate %u bytes\n", __func__, + phb->diag_data_size); /* Parse 32-bit and IO ranges (if any) */ pci_process_bridge_OF_ranges(hose, np, !hose->global_number); @@ -3833,6 +3768,8 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, pemap_off = size; size += phb->ioda.total_pe_num * sizeof(struct pnv_ioda_pe); aux = memblock_alloc(size, SMP_CACHE_BYTES); + if (!aux) + panic("%s: Failed to allocate %lu bytes\n", __func__, size); phb->ioda.pe_alloc = aux; phb->ioda.m64_segmap = aux + m64map_off; phb->ioda.m32_segmap = aux + m32map_off; @@ -3944,9 +3881,12 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, * shutdown PCI devices correctly. We already got IODA table * cleaned out. So we have to issue PHB reset to stop all PCI * transactions from previous kernel. The ppc_pci_reset_phbs - * kernel parameter will force this reset too. + * kernel parameter will force this reset too. Additionally, + * if the IODA reset above failed then use a bigger hammer. + * This can happen if we get a PHB fatal error in very early + * boot. */ - if (is_kdump_kernel() || pci_reset_phbs) { + if (is_kdump_kernel() || pci_reset_phbs || rc) { pr_info(" Issue PHB reset ...\n"); pnv_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL); pnv_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE); diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index 45fb70b4bfa7..ef9448a907c6 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c @@ -1147,6 +1147,8 @@ static int pnv_tce_iommu_bus_notifier(struct notifier_block *nb, return 0; pe = &phb->ioda.pe_array[pdn->pe_number]; + if (!pe->table_group.group) + return 0; iommu_add_device(&pe->table_group, dev); return 0; case BUS_NOTIFY_DEL_DEVICE: diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index 0d354e19ef92..db09c7022635 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c @@ -39,6 +39,7 @@ #include #include #include +#include #include "powernv.h" @@ -153,6 +154,7 @@ static void pnv_smp_cpu_kill_self(void) { unsigned int cpu; unsigned long srr1, wmask; + u64 lpcr_val; /* Standard hot unplug procedure */ /* @@ -174,6 +176,19 @@ static void pnv_smp_cpu_kill_self(void) if (cpu_has_feature(CPU_FTR_ARCH_207S)) wmask = SRR1_WAKEMASK_P8; + /* + * We don't want to take decrementer interrupts while we are + * offline, so clear LPCR:PECE1. We keep PECE2 (and + * LPCR_PECE_HVEE on P9) enabled so as to let IPIs in. + * + * If the CPU gets woken up by a special wakeup, ensure that + * the SLW engine sets LPCR with decrementer bit cleared, else + * the CPU will come back to the kernel due to a spurious + * wakeup. + */ + lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1; + pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val); + while (!generic_check_cpu_restart(cpu)) { /* * Clear IPI flag, since we don't handle IPIs while @@ -246,6 +261,16 @@ static void pnv_smp_cpu_kill_self(void) } + /* + * Re-enable decrementer interrupts in LPCR. + * + * Further, we want stop states to be woken up by decrementer + * for non-hotplug cases. So program the LPCR via stop api as + * well. + */ + lpcr_val = mfspr(SPRN_LPCR) | (u64)LPCR_PECE1; + pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val); + DBG("CPU%d coming online...\n", cpu); } diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c index e7075aaff1bb..59587b75493d 100644 --- a/arch/powerpc/platforms/ps3/device-init.c +++ b/arch/powerpc/platforms/ps3/device-init.c @@ -354,9 +354,7 @@ static int ps3_setup_storage_dev(const struct ps3_repository_device *repo, repo->dev_index, repo->dev_type, port, blk_size, num_blocks, num_regions); - p = kzalloc(sizeof(struct ps3_storage_device) + - num_regions * sizeof(struct ps3_storage_region), - GFP_KERNEL); + p = kzalloc(struct_size(p, regions, num_regions), GFP_KERNEL); if (!p) { result = -ENOMEM; goto fail_malloc; diff --git a/arch/powerpc/platforms/ps3/os-area.c b/arch/powerpc/platforms/ps3/os-area.c index f5387ad82279..4d65c5380020 100644 --- a/arch/powerpc/platforms/ps3/os-area.c +++ b/arch/powerpc/platforms/ps3/os-area.c @@ -205,11 +205,11 @@ static const struct os_area_db_id os_area_db_id_rtc_diff = { * 3) The number of seconds from 1970 to 2000. */ -struct saved_params { +static struct saved_params { unsigned int valid; s64 rtc_diff; unsigned int av_multi_out; -} static saved_params; +} saved_params; static struct property property_rtc_diff = { .name = "linux,rtc_diff", diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c index 658bfab3350b..4ce5458eb0f8 100644 --- a/arch/powerpc/platforms/ps3/setup.c +++ b/arch/powerpc/platforms/ps3/setup.c @@ -127,6 +127,9 @@ static void __init prealloc(struct ps3_prealloc *p) return; p->address = memblock_alloc(p->size, p->align); + if (!p->address) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", + __func__, p->size, p->align); printk(KERN_INFO "%s: %lu bytes at %p\n", p->name, p->size, p->address); diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c index 5cc35d6b94b6..7c227e784247 100644 --- a/arch/powerpc/platforms/ps3/system-bus.c +++ b/arch/powerpc/platforms/ps3/system-bus.c @@ -37,12 +37,12 @@ static struct device ps3_system_bus = { }; /* FIXME: need device usage counters! */ -struct { +static struct { struct mutex mutex; int sb_11; /* usb 0 */ int sb_12; /* usb 0 */ int gpu; -} static usage_hack; +} usage_hack; static int ps3_is_device(struct ps3_system_bus_device *dev, u64 bus_id, u64 dev_id) diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index 2f8e62163602..97feb6e79f1a 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -802,6 +802,25 @@ static int dlpar_cpu_add_by_count(u32 cpus_to_add) return rc; } +int dlpar_cpu_readd(int cpu) +{ + struct device_node *dn; + struct device *dev; + u32 drc_index; + int rc; + + dev = get_cpu_device(cpu); + dn = dev->of_node; + + rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index); + + rc = dlpar_cpu_remove_by_index(drc_index); + if (!rc) + rc = dlpar_cpu_add(drc_index); + + return rc; +} + int dlpar_cpu(struct pseries_hp_errorlog *hp_elog) { u32 count, drc_index; diff --git a/arch/powerpc/platforms/pseries/ibmebus.c b/arch/powerpc/platforms/pseries/ibmebus.c index 5b4a56131904..84e8ec4011ba 100644 --- a/arch/powerpc/platforms/pseries/ibmebus.c +++ b/arch/powerpc/platforms/pseries/ibmebus.c @@ -261,8 +261,7 @@ static char *ibmebus_chomp(const char *in, size_t count) return out; } -static ssize_t ibmebus_store_probe(struct bus_type *bus, - const char *buf, size_t count) +static ssize_t probe_store(struct bus_type *bus, const char *buf, size_t count) { struct device_node *dn = NULL; struct device *dev; @@ -298,10 +297,9 @@ out: return rc; return count; } -static BUS_ATTR(probe, 0200, NULL, ibmebus_store_probe); +static BUS_ATTR_WO(probe); -static ssize_t ibmebus_store_remove(struct bus_type *bus, - const char *buf, size_t count) +static ssize_t remove_store(struct bus_type *bus, const char *buf, size_t count) { struct device *dev; char *path; @@ -325,7 +323,7 @@ static ssize_t ibmebus_store_remove(struct bus_type *bus, return -ENODEV; } } -static BUS_ATTR(remove, 0200, NULL, ibmebus_store_remove); +static BUS_ATTR_WO(remove); static struct attribute *ibmbus_bus_attrs[] = { &bus_attr_probe.attr, diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 8fc8fe0b9848..36eb1ddbac69 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -978,7 +978,7 @@ static phys_addr_t ddw_memory_hotplug_max(void) * pdn: the parent pe node with the ibm,dma_window property * Future: also check if we can remap the base window for our base page size * - * returns the dma offset for use by dma_set_mask + * returns the dma offset for use by the direct mapped DMA code. */ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) { @@ -1198,87 +1198,37 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) iommu_add_device(pci->table_group, &dev->dev); } -static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask) +static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask) { - bool ddw_enabled = false; - struct device_node *pdn, *dn; - struct pci_dev *pdev; + struct device_node *dn = pci_device_to_OF_node(pdev), *pdn; const __be32 *dma_window = NULL; - u64 dma_offset; - - if (!dev->dma_mask) - return -EIO; - - if (!dev_is_pci(dev)) - goto check_mask; - - pdev = to_pci_dev(dev); /* only attempt to use a new window if 64-bit DMA is requested */ - if (!disable_ddw && dma_mask == DMA_BIT_MASK(64)) { - dn = pci_device_to_OF_node(pdev); - dev_dbg(dev, "node is %pOF\n", dn); + if (dma_mask < DMA_BIT_MASK(64)) + return false; - /* - * the device tree might contain the dma-window properties - * per-device and not necessarily for the bus. So we need to - * search upwards in the tree until we either hit a dma-window - * property, OR find a parent with a table already allocated. - */ - for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->table_group; - pdn = pdn->parent) { - dma_window = of_get_property(pdn, "ibm,dma-window", NULL); - if (dma_window) - break; - } - if (pdn && PCI_DN(pdn)) { - dma_offset = enable_ddw(pdev, pdn); - if (dma_offset != 0) { - dev_info(dev, "Using 64-bit direct DMA at offset %llx\n", dma_offset); - set_dma_offset(dev, dma_offset); - set_dma_ops(dev, &dma_nommu_ops); - ddw_enabled = true; - } - } - } + dev_dbg(&pdev->dev, "node is %pOF\n", dn); - /* fall back on iommu ops */ - if (!ddw_enabled && get_dma_ops(dev) != &dma_iommu_ops) { - dev_info(dev, "Restoring 32-bit DMA via iommu\n"); - set_dma_ops(dev, &dma_iommu_ops); + /* + * the device tree might contain the dma-window properties + * per-device and not necessarily for the bus. So we need to + * search upwards in the tree until we either hit a dma-window + * property, OR find a parent with a table already allocated. + */ + for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->table_group; + pdn = pdn->parent) { + dma_window = of_get_property(pdn, "ibm,dma-window", NULL); + if (dma_window) + break; } -check_mask: - if (!dma_supported(dev, dma_mask)) - return -EIO; - - *dev->dma_mask = dma_mask; - return 0; -} - -static u64 dma_get_required_mask_pSeriesLP(struct device *dev) -{ - if (!dev->dma_mask) - return 0; - - if (!disable_ddw && dev_is_pci(dev)) { - struct pci_dev *pdev = to_pci_dev(dev); - struct device_node *dn; - - dn = pci_device_to_OF_node(pdev); - - /* search upwards for ibm,dma-window */ - for (; dn && PCI_DN(dn) && !PCI_DN(dn)->table_group; - dn = dn->parent) - if (of_get_property(dn, "ibm,dma-window", NULL)) - break; - /* if there is a ibm,ddw-applicable property require 64 bits */ - if (dn && PCI_DN(dn) && - of_get_property(dn, "ibm,ddw-applicable", NULL)) - return DMA_BIT_MASK(64); + if (pdn && PCI_DN(pdn)) { + pdev->dev.archdata.dma_offset = enable_ddw(pdev, pdn); + if (pdev->dev.archdata.dma_offset) + return true; } - return dma_iommu_ops.get_required_mask(dev); + return false; } static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action, @@ -1373,8 +1323,9 @@ void iommu_init_early_pSeries(void) if (firmware_has_feature(FW_FEATURE_LPAR)) { pseries_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pSeriesLP; pseries_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pSeriesLP; - ppc_md.dma_set_mask = dma_set_mask_pSeriesLP; - ppc_md.dma_get_required_mask = dma_get_required_mask_pSeriesLP; + if (!disable_ddw) + pseries_pci_controller_ops.iommu_bypass_supported = + iommu_bypass_supported_pSeriesLP; } else { pseries_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pSeries; pseries_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pSeries; diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c index 794487313cc8..e73c7e30efe6 100644 --- a/arch/powerpc/platforms/pseries/lparcfg.c +++ b/arch/powerpc/platforms/pseries/lparcfg.c @@ -475,6 +475,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) splpar_dispatch_data(m); seq_printf(m, "purr=%ld\n", get_purr()); + seq_printf(m, "tbr=%ld\n", mftb()); } else { /* non SPLPAR case */ seq_printf(m, "system_active_processors=%d\n", diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c index 69cedc1b3b8a..1136a38ff039 100644 --- a/arch/powerpc/platforms/pseries/nvram.c +++ b/arch/powerpc/platforms/pseries/nvram.c @@ -7,8 +7,6 @@ * 2 of the License, or (at your option) any later version. * * /dev/nvram driver for PPC64 - * - * This perhaps should live in drivers/char */ diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c index 7d6457ab5d34..bba281b1fe1b 100644 --- a/arch/powerpc/platforms/pseries/papr_scm.c +++ b/arch/powerpc/platforms/pseries/papr_scm.c @@ -43,6 +43,7 @@ static int drc_pmem_bind(struct papr_scm_priv *p) { unsigned long ret[PLPAR_HCALL_BUFSIZE]; uint64_t rc, token; + uint64_t saved = 0; /* * When the hypervisor cannot map all the requested memory in a single @@ -56,6 +57,8 @@ static int drc_pmem_bind(struct papr_scm_priv *p) rc = plpar_hcall(H_SCM_BIND_MEM, ret, p->drc_index, 0, p->blocks, BIND_ANY_ADDR, token); token = ret[0]; + if (!saved) + saved = ret[1]; cond_resched(); } while (rc == H_BUSY); @@ -64,7 +67,7 @@ static int drc_pmem_bind(struct papr_scm_priv *p) return -ENXIO; } - p->bound_addr = ret[1]; + p->bound_addr = saved; dev_dbg(&p->pdev->dev, "bound drc %x to %pR\n", p->drc_index, &p->res); diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 41f62ca27c63..e4f0dfd4ae33 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -130,8 +130,13 @@ static void __init fwnmi_init(void) * It will be used in real mode mce handler, hence it needs to be * below RMA. */ - mce_data_buf = __va(memblock_alloc_base(RTAS_ERROR_LOG_MAX * nr_cpus, - RTAS_ERROR_LOG_MAX, ppc64_rma_size)); + mce_data_buf = memblock_alloc_try_nid_raw(RTAS_ERROR_LOG_MAX * nr_cpus, + RTAS_ERROR_LOG_MAX, MEMBLOCK_LOW_LIMIT, + ppc64_rma_size, NUMA_NO_NODE); + if (!mce_data_buf) + panic("Failed to allocate %d bytes below %pa for MCE buffer\n", + RTAS_ERROR_LOG_MAX * nr_cpus, &ppc64_rma_size); + for_each_possible_cpu(i) { paca_ptrs[i]->mce_data_buf = mce_data_buf + (RTAS_ERROR_LOG_MAX * i); @@ -140,8 +145,13 @@ static void __init fwnmi_init(void) #ifdef CONFIG_PPC_BOOK3S_64 /* Allocate per cpu slb area to save old slb contents during MCE */ size = sizeof(struct slb_entry) * mmu_slb_size * nr_cpus; - slb_ptr = __va(memblock_alloc_base(size, sizeof(struct slb_entry), - ppc64_rma_size)); + slb_ptr = memblock_alloc_try_nid_raw(size, sizeof(struct slb_entry), + MEMBLOCK_LOW_LIMIT, ppc64_rma_size, + NUMA_NO_NODE); + if (!slb_ptr) + panic("Failed to allocate %zu bytes below %pa for slb area\n", + size, &ppc64_rma_size); + for_each_possible_cpu(i) paca_ptrs[i]->mce_faulty_slbs = slb_ptr + (mmu_slb_size * i); #endif diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c index 1fad4649735b..141795275ccb 100644 --- a/arch/powerpc/platforms/pseries/vio.c +++ b/arch/powerpc/platforms/pseries/vio.c @@ -492,7 +492,9 @@ static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size, return NULL; } - ret = dma_iommu_ops.alloc(dev, size, dma_handle, flag, attrs); + ret = iommu_alloc_coherent(dev, get_iommu_table_base(dev), size, + dma_handle, dev->coherent_dma_mask, flag, + dev_to_node(dev)); if (unlikely(ret == NULL)) { vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE)); atomic_inc(&viodev->cmo.allocs_failed); @@ -507,8 +509,7 @@ static void vio_dma_iommu_free_coherent(struct device *dev, size_t size, { struct vio_dev *viodev = to_vio_dev(dev); - dma_iommu_ops.free(dev, size, vaddr, dma_handle, attrs); - + iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle); vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE)); } @@ -518,22 +519,22 @@ static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page, unsigned long attrs) { struct vio_dev *viodev = to_vio_dev(dev); - struct iommu_table *tbl; + struct iommu_table *tbl = get_iommu_table_base(dev); dma_addr_t ret = DMA_MAPPING_ERROR; - tbl = get_iommu_table_base(dev); - if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) { - atomic_inc(&viodev->cmo.allocs_failed); - return ret; - } - - ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs); - if (unlikely(dma_mapping_error(dev, ret))) { - vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))); - atomic_inc(&viodev->cmo.allocs_failed); - } - + if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) + goto out_fail; + ret = iommu_map_page(dev, tbl, page, offset, size, device_to_mask(dev), + direction, attrs); + if (unlikely(ret == DMA_MAPPING_ERROR)) + goto out_deallocate; return ret; + +out_deallocate: + vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))); +out_fail: + atomic_inc(&viodev->cmo.allocs_failed); + return DMA_MAPPING_ERROR; } static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, @@ -542,11 +543,9 @@ static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, unsigned long attrs) { struct vio_dev *viodev = to_vio_dev(dev); - struct iommu_table *tbl; - - tbl = get_iommu_table_base(dev); - dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs); + struct iommu_table *tbl = get_iommu_table_base(dev); + iommu_unmap_page(tbl, dma_handle, size, direction, attrs); vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))); } @@ -555,34 +554,32 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, unsigned long attrs) { struct vio_dev *viodev = to_vio_dev(dev); - struct iommu_table *tbl; + struct iommu_table *tbl = get_iommu_table_base(dev); struct scatterlist *sgl; int ret, count; size_t alloc_size = 0; - tbl = get_iommu_table_base(dev); for_each_sg(sglist, sgl, nelems, count) alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl)); - if (vio_cmo_alloc(viodev, alloc_size)) { - atomic_inc(&viodev->cmo.allocs_failed); - return 0; - } - - ret = dma_iommu_ops.map_sg(dev, sglist, nelems, direction, attrs); - - if (unlikely(!ret)) { - vio_cmo_dealloc(viodev, alloc_size); - atomic_inc(&viodev->cmo.allocs_failed); - return ret; - } + if (vio_cmo_alloc(viodev, alloc_size)) + goto out_fail; + ret = ppc_iommu_map_sg(dev, tbl, sglist, nelems, device_to_mask(dev), + direction, attrs); + if (unlikely(!ret)) + goto out_deallocate; for_each_sg(sglist, sgl, ret, count) alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl)); if (alloc_size) vio_cmo_dealloc(viodev, alloc_size); - return ret; + +out_deallocate: + vio_cmo_dealloc(viodev, alloc_size); +out_fail: + atomic_inc(&viodev->cmo.allocs_failed); + return 0; } static void vio_dma_iommu_unmap_sg(struct device *dev, @@ -591,40 +588,27 @@ static void vio_dma_iommu_unmap_sg(struct device *dev, unsigned long attrs) { struct vio_dev *viodev = to_vio_dev(dev); - struct iommu_table *tbl; + struct iommu_table *tbl = get_iommu_table_base(dev); struct scatterlist *sgl; size_t alloc_size = 0; int count; - tbl = get_iommu_table_base(dev); for_each_sg(sglist, sgl, nelems, count) alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl)); - dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs); - + ppc_iommu_unmap_sg(tbl, sglist, nelems, direction, attrs); vio_cmo_dealloc(viodev, alloc_size); } -static int vio_dma_iommu_dma_supported(struct device *dev, u64 mask) -{ - return dma_iommu_ops.dma_supported(dev, mask); -} - -static u64 vio_dma_get_required_mask(struct device *dev) -{ - return dma_iommu_ops.get_required_mask(dev); -} - static const struct dma_map_ops vio_dma_mapping_ops = { .alloc = vio_dma_iommu_alloc_coherent, .free = vio_dma_iommu_free_coherent, - .mmap = dma_nommu_mmap_coherent, .map_sg = vio_dma_iommu_map_sg, .unmap_sg = vio_dma_iommu_unmap_sg, .map_page = vio_dma_iommu_map_page, .unmap_page = vio_dma_iommu_unmap_page, - .dma_supported = vio_dma_iommu_dma_supported, - .get_required_mask = vio_dma_get_required_mask, + .dma_supported = dma_iommu_dma_supported, + .get_required_mask = dma_iommu_get_required_mask, }; /** @@ -1715,3 +1699,10 @@ int vio_disable_interrupts(struct vio_dev *dev) } EXPORT_SYMBOL(vio_disable_interrupts); #endif /* CONFIG_PPC_PSERIES */ + +static int __init vio_init(void) +{ + dma_debug_add_bus(&vio_bus_type); + return 0; +} +fs_initcall(vio_init); diff --git a/arch/powerpc/sysdev/6xx-suspend.S b/arch/powerpc/sysdev/6xx-suspend.S index cf48e9cb2575..6c4aec25c4ba 100644 --- a/arch/powerpc/sysdev/6xx-suspend.S +++ b/arch/powerpc/sysdev/6xx-suspend.S @@ -29,10 +29,9 @@ _GLOBAL(mpc6xx_enter_standby) ori r5, r5, ret_from_standby@l mtlr r5 - CURRENT_THREAD_INFO(r5, r1) - lwz r6, TI_LOCAL_FLAGS(r5) + lwz r6, TI_LOCAL_FLAGS(r2) ori r6, r6, _TLF_SLEEPING - stw r6, TI_LOCAL_FLAGS(r5) + stw r6, TI_LOCAL_FLAGS(r2) mfmsr r5 ori r5, r5, MSR_EE diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c index a5b40d1460f1..2a751795ec1e 100644 --- a/arch/powerpc/sysdev/dart_iommu.c +++ b/arch/powerpc/sysdev/dart_iommu.c @@ -251,8 +251,11 @@ static void allocate_dart(void) * 16MB (1 << 24) alignment. We allocate a full 16Mb chuck since we * will blow up an entire large page anyway in the kernel mapping. */ - dart_tablebase = __va(memblock_alloc_base(1UL<<24, - 1UL<<24, 0x80000000L)); + dart_tablebase = memblock_alloc_try_nid_raw(SZ_16M, SZ_16M, + MEMBLOCK_LOW_LIMIT, SZ_2G, + NUMA_NO_NODE); + if (!dart_tablebase) + panic("Failed to allocate 16MB below 2GB for DART table\n"); /* There is no point scanning the DART space for leaks*/ kmemleak_no_scan((void *)dart_tablebase); @@ -262,6 +265,9 @@ static void allocate_dart(void) * prefetching into invalid pages and corrupting data */ tmp = memblock_phys_alloc(DART_PAGE_SIZE, DART_PAGE_SIZE); + if (!tmp) + panic("DART: table allocation failed\n"); + dart_emptyval = DARTMAP_VALID | ((tmp >> DART_PAGE_SHIFT) & DARTMAP_RPNMASK); @@ -360,13 +366,6 @@ static void iommu_table_dart_setup(void) set_bit(iommu_table_dart.it_size - 1, iommu_table_dart.it_map); } -static void pci_dma_dev_setup_dart(struct pci_dev *dev) -{ - if (dart_is_u4) - set_dma_offset(&dev->dev, DART_U4_BYPASS_BASE); - set_iommu_table_base(&dev->dev, &iommu_table_dart); -} - static void pci_dma_bus_setup_dart(struct pci_bus *bus) { if (!iommu_table_dart_inited) { @@ -390,27 +389,18 @@ static bool dart_device_on_pcie(struct device *dev) return false; } -static int dart_dma_set_mask(struct device *dev, u64 dma_mask) +static void pci_dma_dev_setup_dart(struct pci_dev *dev) { - if (!dev->dma_mask || !dma_supported(dev, dma_mask)) - return -EIO; - - /* U4 supports a DART bypass, we use it for 64-bit capable - * devices to improve performances. However, that only works - * for devices connected to U4 own PCIe interface, not bridged - * through hypertransport. We need the device to support at - * least 40 bits of addresses. - */ - if (dart_device_on_pcie(dev) && dma_mask >= DMA_BIT_MASK(40)) { - dev_info(dev, "Using 64-bit DMA iommu bypass\n"); - set_dma_ops(dev, &dma_nommu_ops); - } else { - dev_info(dev, "Using 32-bit DMA via iommu\n"); - set_dma_ops(dev, &dma_iommu_ops); - } + if (dart_is_u4 && dart_device_on_pcie(&dev->dev)) + dev->dev.archdata.dma_offset = DART_U4_BYPASS_BASE; + set_iommu_table_base(&dev->dev, &iommu_table_dart); +} - *dev->dma_mask = dma_mask; - return 0; +static bool iommu_bypass_supported_dart(struct pci_dev *dev, u64 mask) +{ + return dart_is_u4 && + dart_device_on_pcie(&dev->dev) && + mask >= DMA_BIT_MASK(40); } void __init iommu_init_early_dart(struct pci_controller_ops *controller_ops) @@ -428,26 +418,20 @@ void __init iommu_init_early_dart(struct pci_controller_ops *controller_ops) /* Initialize the DART HW */ if (dart_init(dn) != 0) - goto bail; - - /* Setup bypass if supported */ - if (dart_is_u4) - ppc_md.dma_set_mask = dart_dma_set_mask; + return; + /* + * U4 supports a DART bypass, we use it for 64-bit capable devices to + * improve performance. However, that only works for devices connected + * to the U4 own PCIe interface, not bridged through hypertransport. + * We need the device to support at least 40 bits of addresses. + */ controller_ops->dma_dev_setup = pci_dma_dev_setup_dart; controller_ops->dma_bus_setup = pci_dma_bus_setup_dart; + controller_ops->iommu_bypass_supported = iommu_bypass_supported_dart; /* Setup pci_dma ops */ set_pci_dma_ops(&dma_iommu_ops); - return; - - bail: - /* If init failed, use direct iommu and null setup functions */ - controller_ops->dma_dev_setup = NULL; - controller_ops->dma_bus_setup = NULL; - - /* Setup pci_dma ops */ - set_pci_dma_ops(&dma_nommu_ops); } #ifdef CONFIG_PM diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c index 918be816b097..f49aec251a5a 100644 --- a/arch/powerpc/sysdev/fsl_pci.c +++ b/arch/powerpc/sysdev/fsl_pci.c @@ -40,6 +40,7 @@ #include #include #include +#include #include #include @@ -114,33 +115,33 @@ static struct pci_ops fsl_indirect_pcie_ops = static u64 pci64_dma_offset; #ifdef CONFIG_SWIOTLB +static void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev) +{ + struct pci_controller *hose = pci_bus_to_host(pdev->bus); + + pdev->dev.bus_dma_mask = + hose->dma_window_base_cur + hose->dma_window_size; +} + static void setup_swiotlb_ops(struct pci_controller *hose) { - if (ppc_swiotlb_enable) { + if (ppc_swiotlb_enable) hose->controller_ops.dma_dev_setup = pci_dma_dev_setup_swiotlb; - set_pci_dma_ops(&powerpc_swiotlb_dma_ops); - } } #else static inline void setup_swiotlb_ops(struct pci_controller *hose) {} #endif -static int fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask) +static void fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask) { - if (!dev->dma_mask || !dma_supported(dev, dma_mask)) - return -EIO; - /* * Fix up PCI devices that are able to DMA to the large inbound * mapping that allows addressing any RAM address from across PCI. */ if (dev_is_pci(dev) && dma_mask >= pci64_dma_offset * 2 - 1) { - set_dma_ops(dev, &dma_nommu_ops); - set_dma_offset(dev, pci64_dma_offset); + dev->bus_dma_mask = 0; + dev->archdata.dma_offset = pci64_dma_offset; } - - *dev->dma_mask = dma_mask; - return 0; } static int setup_one_atmu(struct ccsr_pci __iomem *pci, diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c index 8030a0f55e96..fd129c8ecceb 100644 --- a/arch/powerpc/sysdev/ipic.c +++ b/arch/powerpc/sysdev/ipic.c @@ -771,21 +771,6 @@ struct ipic * __init ipic_init(struct device_node *node, unsigned int flags) return ipic; } -void ipic_set_highest_priority(unsigned int virq) -{ - struct ipic *ipic = ipic_from_irq(virq); - unsigned int src = virq_to_hw(virq); - u32 temp; - - temp = ipic_read(ipic->regs, IPIC_SICFR); - - /* clear and set HPI */ - temp &= 0x7f000000; - temp |= (src & 0x7f) << 24; - - ipic_write(ipic->regs, IPIC_SICFR, temp); -} - void ipic_set_default_priority(void) { ipic_write(primary_ipic->regs, IPIC_SIPRR_A, IPIC_PRIORITY_DEFAULT); @@ -796,26 +781,6 @@ void ipic_set_default_priority(void) ipic_write(primary_ipic->regs, IPIC_SMPRR_B, IPIC_PRIORITY_DEFAULT); } -void ipic_enable_mcp(enum ipic_mcp_irq mcp_irq) -{ - struct ipic *ipic = primary_ipic; - u32 temp; - - temp = ipic_read(ipic->regs, IPIC_SERMR); - temp |= (1 << (31 - mcp_irq)); - ipic_write(ipic->regs, IPIC_SERMR, temp); -} - -void ipic_disable_mcp(enum ipic_mcp_irq mcp_irq) -{ - struct ipic *ipic = primary_ipic; - u32 temp; - - temp = ipic_read(ipic->regs, IPIC_SERMR); - temp &= (1 << (31 - mcp_irq)); - ipic_write(ipic->regs, IPIC_SERMR, temp); -} - u32 ipic_get_mcp_status(void) { return primary_ipic ? ipic_read(primary_ipic->regs, IPIC_SERSR) : 0; diff --git a/arch/powerpc/sysdev/msi_bitmap.c b/arch/powerpc/sysdev/msi_bitmap.c index d45450f6666a..51a679a1c403 100644 --- a/arch/powerpc/sysdev/msi_bitmap.c +++ b/arch/powerpc/sysdev/msi_bitmap.c @@ -129,6 +129,9 @@ int __ref msi_bitmap_alloc(struct msi_bitmap *bmp, unsigned int irq_count, bmp->bitmap = kzalloc(size, GFP_KERNEL); else { bmp->bitmap = memblock_alloc(size, SMP_CACHE_BYTES); + if (!bmp->bitmap) + panic("%s: Failed to allocate %u bytes\n", __func__, + size); /* the bitmap won't be freed from memblock allocator */ kmemleak_not_leak(bmp->bitmap); } diff --git a/arch/powerpc/sysdev/tsi108_dev.c b/arch/powerpc/sysdev/tsi108_dev.c index 1fd0717ade02..1f1af12f23e2 100644 --- a/arch/powerpc/sysdev/tsi108_dev.c +++ b/arch/powerpc/sysdev/tsi108_dev.c @@ -51,7 +51,7 @@ phys_addr_t get_csrbase(void) const void *prop = of_get_property(tsi, "reg", &size); tsi108_csr_base = of_translate_address(tsi, prop); of_node_put(tsi); - }; + } return tsi108_csr_base; } diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index 94a69a62f5db..70a8f9e31a2d 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c @@ -442,7 +442,7 @@ static void xive_dec_target_count(int cpu) struct xive_cpu *xc = per_cpu(xive_cpu, cpu); struct xive_q *q = &xc->queue[xive_irq_priority]; - if (unlikely(WARN_ON(cpu < 0 || !xc))) { + if (WARN_ON(cpu < 0 || !xc)) { pr_err("%s: cpu=%d xc=%p\n", __func__, cpu, xc); return; } diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile index 878f9c1d3615..3050f9323254 100644 --- a/arch/powerpc/xmon/Makefile +++ b/arch/powerpc/xmon/Makefile @@ -5,6 +5,7 @@ subdir-ccflags-y := $(call cc-disable-warning, builtin-requires-header) GCOV_PROFILE := n +KCOV_INSTRUMENT := n UBSAN_SANITIZE := n # Disable ftrace for the entire directory diff --git a/arch/powerpc/xmon/ppc-dis.c b/arch/powerpc/xmon/ppc-dis.c index 9deea5ee13f6..27f1e6415036 100644 --- a/arch/powerpc/xmon/ppc-dis.c +++ b/arch/powerpc/xmon/ppc-dis.c @@ -158,7 +158,7 @@ int print_insn_powerpc (unsigned long insn, unsigned long memaddr) dialect |= (PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_POWER7 | PPC_OPCODE_POWER8 | PPC_OPCODE_POWER9 | PPC_OPCODE_HTM | PPC_OPCODE_ALTIVEC | PPC_OPCODE_ALTIVEC2 - | PPC_OPCODE_VSX | PPC_OPCODE_VSX3), + | PPC_OPCODE_VSX | PPC_OPCODE_VSX3); /* Get the major opcode of the insn. */ opcode = NULL; diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 757b8499aba2..a0f44f992360 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c @@ -2997,7 +2997,7 @@ static void show_task(struct task_struct *tsk) printf("%px %016lx %6d %6d %c %2d %s\n", tsk, tsk->thread.ksp, tsk->pid, rcu_dereference(tsk->parent)->pid, - state, task_thread_info(tsk)->cpu, + state, task_cpu(tsk), tsk->comm); } diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index feeeaa60697c..eb56c82d8aa1 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -32,7 +32,6 @@ config RISCV select HAVE_MEMBLOCK_NODE_MAP select HAVE_DMA_CONTIGUOUS select HAVE_FUTEX_CMPXCHG if FUTEX - select HAVE_GENERIC_DMA_COHERENT select HAVE_PERF_EVENTS select HAVE_SYSCALL_TRACEPOINTS select IRQ_DOMAIN @@ -49,6 +48,7 @@ config RISCV select RISCV_TIMER select GENERIC_IRQ_MULTI_HANDLER select ARCH_HAS_PTE_SPECIAL + select HAVE_EBPF_JIT if 64BIT config MMU def_bool y @@ -89,21 +89,21 @@ config GENERIC_CSUM config GENERIC_HWEIGHT def_bool y +config FIX_EARLYCON_MEM + def_bool y + config PGTABLE_LEVELS int default 3 if 64BIT default 2 -config HAVE_KPROBES - def_bool n - menu "Platform type" choice prompt "Base ISA" default ARCH_RV64I help - This selects the base ISA that this kernel will traget and must match + This selects the base ISA that this kernel will target and must match the target platform. config ARCH_RV32I diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index 4b594f2e4f7e..c6342e638ef7 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -77,7 +77,7 @@ KBUILD_IMAGE := $(boot)/Image.gz head-y := arch/riscv/kernel/head.o -core-y += arch/riscv/kernel/ arch/riscv/mm/ +core-y += arch/riscv/kernel/ arch/riscv/mm/ arch/riscv/net/ libs-y += arch/riscv/lib/ diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig index f399659d3b8d..2fd3461e50ab 100644 --- a/arch/riscv/configs/defconfig +++ b/arch/riscv/configs/defconfig @@ -13,8 +13,6 @@ CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y CONFIG_BPF_SYSCALL=y CONFIG_SMP=y -CONFIG_PCI=y -CONFIG_PCIE_XILINX=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_NET=y @@ -28,6 +26,10 @@ CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y CONFIG_IP_PNP_RARP=y CONFIG_NETLINK_DIAG=y +CONFIG_PCI=y +CONFIG_PCIEPORTBUS=y +CONFIG_PCI_HOST_GENERIC=y +CONFIG_PCIE_XILINX=y CONFIG_DEVTMPFS=y CONFIG_BLK_DEV_LOOP=y CONFIG_VIRTIO_BLK=y @@ -63,7 +65,6 @@ CONFIG_USB_STORAGE=y CONFIG_USB_UAS=y CONFIG_VIRTIO_MMIO=y CONFIG_SIFIVE_PLIC=y -CONFIG_RAS=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_AUTOFS4_FS=y @@ -77,5 +78,6 @@ CONFIG_NFS_V4_1=y CONFIG_NFS_V4_2=y CONFIG_ROOT_NFS=y CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_DEV_VIRTIO=y CONFIG_PRINTK_TIME=y # CONFIG_RCU_TRACE is not set diff --git a/arch/riscv/include/asm/fixmap.h b/arch/riscv/include/asm/fixmap.h new file mode 100644 index 000000000000..57afe604b495 --- /dev/null +++ b/arch/riscv/include/asm/fixmap.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 Western Digital Corporation or its affiliates. + */ + +#ifndef _ASM_RISCV_FIXMAP_H +#define _ASM_RISCV_FIXMAP_H + +#include +#include +#include +#include + +/* + * Here we define all the compile-time 'special' virtual addresses. + * The point is to have a constant address at compile time, but to + * set the physical address only in the boot process. + * + * These 'compile-time allocated' memory buffers are page-sized. Use + * set_fixmap(idx,phys) to associate physical memory with fixmap indices. + */ +enum fixed_addresses { + FIX_HOLE, + FIX_EARLYCON_MEM_BASE, + __end_of_fixed_addresses +}; + +#define FIXADDR_SIZE (__end_of_fixed_addresses * PAGE_SIZE) +#define FIXADDR_TOP (PAGE_OFFSET) +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) + +#define FIXMAP_PAGE_IO PAGE_KERNEL + +#define __early_set_fixmap __set_fixmap + +#define __late_set_fixmap __set_fixmap +#define __late_clear_fixmap(idx) __set_fixmap((idx), 0, FIXMAP_PAGE_CLEAR) + +extern void __set_fixmap(enum fixed_addresses idx, + phys_addr_t phys, pgprot_t prot); + +#include + +#endif /* _ASM_RISCV_FIXMAP_H */ diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h index b269451e7e85..1d9c1376dc64 100644 --- a/arch/riscv/include/asm/io.h +++ b/arch/riscv/include/asm/io.h @@ -163,20 +163,20 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) * doesn't define any ordering between the memory space and the I/O space. */ #define __io_br() do {} while (0) -#define __io_ar() __asm__ __volatile__ ("fence i,r" : : : "memory"); +#define __io_ar(v) __asm__ __volatile__ ("fence i,r" : : : "memory"); #define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory"); #define __io_aw() do {} while (0) -#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(); __v; }) -#define readw(c) ({ u16 __v; __io_br(); __v = readw_cpu(c); __io_ar(); __v; }) -#define readl(c) ({ u32 __v; __io_br(); __v = readl_cpu(c); __io_ar(); __v; }) +#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; }) +#define readw(c) ({ u16 __v; __io_br(); __v = readw_cpu(c); __io_ar(__v); __v; }) +#define readl(c) ({ u32 __v; __io_br(); __v = readl_cpu(c); __io_ar(__v); __v; }) #define writeb(v,c) ({ __io_bw(); writeb_cpu((v),(c)); __io_aw(); }) #define writew(v,c) ({ __io_bw(); writew_cpu((v),(c)); __io_aw(); }) #define writel(v,c) ({ __io_bw(); writel_cpu((v),(c)); __io_aw(); }) #ifdef CONFIG_64BIT -#define readq(c) ({ u64 __v; __io_br(); __v = readq_cpu(c); __io_ar(); __v; }) +#define readq(c) ({ u64 __v; __io_br(); __v = readq_cpu(c); __io_ar(__v); __v; }) #define writeq(v,c) ({ __io_bw(); writeq_cpu((v),(c)); __io_aw(); }) #endif @@ -198,20 +198,20 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) * writes. */ #define __io_pbr() __asm__ __volatile__ ("fence io,i" : : : "memory"); -#define __io_par() __asm__ __volatile__ ("fence i,ior" : : : "memory"); +#define __io_par(v) __asm__ __volatile__ ("fence i,ior" : : : "memory"); #define __io_pbw() __asm__ __volatile__ ("fence iow,o" : : : "memory"); #define __io_paw() __asm__ __volatile__ ("fence o,io" : : : "memory"); -#define inb(c) ({ u8 __v; __io_pbr(); __v = readb_cpu((void*)(PCI_IOBASE + (c))); __io_par(); __v; }) -#define inw(c) ({ u16 __v; __io_pbr(); __v = readw_cpu((void*)(PCI_IOBASE + (c))); __io_par(); __v; }) -#define inl(c) ({ u32 __v; __io_pbr(); __v = readl_cpu((void*)(PCI_IOBASE + (c))); __io_par(); __v; }) +#define inb(c) ({ u8 __v; __io_pbr(); __v = readb_cpu((void*)(PCI_IOBASE + (c))); __io_par(__v); __v; }) +#define inw(c) ({ u16 __v; __io_pbr(); __v = readw_cpu((void*)(PCI_IOBASE + (c))); __io_par(__v); __v; }) +#define inl(c) ({ u32 __v; __io_pbr(); __v = readl_cpu((void*)(PCI_IOBASE + (c))); __io_par(__v); __v; }) #define outb(v,c) ({ __io_pbw(); writeb_cpu((v),(void*)(PCI_IOBASE + (c))); __io_paw(); }) #define outw(v,c) ({ __io_pbw(); writew_cpu((v),(void*)(PCI_IOBASE + (c))); __io_paw(); }) #define outl(v,c) ({ __io_pbw(); writel_cpu((v),(void*)(PCI_IOBASE + (c))); __io_paw(); }) #ifdef CONFIG_64BIT -#define inq(c) ({ u64 __v; __io_pbr(); __v = readq_cpu((void*)(c)); __io_par(); __v; }) +#define inq(c) ({ u64 __v; __io_pbr(); __v = readq_cpu((void*)(c)); __io_par(__v); __v; }) #define outq(v,c) ({ __io_pbw(); writeq_cpu((v),(void*)(c)); __io_paw(); }) #endif @@ -254,16 +254,16 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) afence; \ } -__io_reads_ins(reads, u8, b, __io_br(), __io_ar()) -__io_reads_ins(reads, u16, w, __io_br(), __io_ar()) -__io_reads_ins(reads, u32, l, __io_br(), __io_ar()) +__io_reads_ins(reads, u8, b, __io_br(), __io_ar(addr)) +__io_reads_ins(reads, u16, w, __io_br(), __io_ar(addr)) +__io_reads_ins(reads, u32, l, __io_br(), __io_ar(addr)) #define readsb(addr, buffer, count) __readsb(addr, buffer, count) #define readsw(addr, buffer, count) __readsw(addr, buffer, count) #define readsl(addr, buffer, count) __readsl(addr, buffer, count) -__io_reads_ins(ins, u8, b, __io_pbr(), __io_par()) -__io_reads_ins(ins, u16, w, __io_pbr(), __io_par()) -__io_reads_ins(ins, u32, l, __io_pbr(), __io_par()) +__io_reads_ins(ins, u8, b, __io_pbr(), __io_par(addr)) +__io_reads_ins(ins, u16, w, __io_pbr(), __io_par(addr)) +__io_reads_ins(ins, u32, l, __io_pbr(), __io_par(addr)) #define insb(addr, buffer, count) __insb((void __iomem *)(long)addr, buffer, count) #define insw(addr, buffer, count) __insw((void __iomem *)(long)addr, buffer, count) #define insl(addr, buffer, count) __insl((void __iomem *)(long)addr, buffer, count) @@ -283,10 +283,10 @@ __io_writes_outs(outs, u32, l, __io_pbw(), __io_paw()) #define outsl(addr, buffer, count) __outsl((void __iomem *)(long)addr, buffer, count) #ifdef CONFIG_64BIT -__io_reads_ins(reads, u64, q, __io_br(), __io_ar()) +__io_reads_ins(reads, u64, q, __io_br(), __io_ar(addr)) #define readsq(addr, buffer, count) __readsq(addr, buffer, count) -__io_reads_ins(ins, u64, q, __io_pbr(), __io_par()) +__io_reads_ins(ins, u64, q, __io_pbr(), __io_par(addr)) #define insq(addr, buffer, count) __insq((void __iomem *)addr, buffer, count) __io_writes_outs(writes, u64, q, __io_bw(), __io_aw()) diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h index 06cfbb3aacbb..2a546a52f02a 100644 --- a/arch/riscv/include/asm/page.h +++ b/arch/riscv/include/asm/page.h @@ -80,7 +80,7 @@ typedef struct page *pgtable_t; #define __pgd(x) ((pgd_t) { (x) }) #define __pgprot(x) ((pgprot_t) { (x) }) -#ifdef CONFIG_64BITS +#ifdef CONFIG_64BIT #define PTE_FMT "%016lx" #else #define PTE_FMT "%08lx" diff --git a/arch/riscv/include/asm/pgtable-bits.h b/arch/riscv/include/asm/pgtable-bits.h index 2fa2942be221..470755cb7558 100644 --- a/arch/riscv/include/asm/pgtable-bits.h +++ b/arch/riscv/include/asm/pgtable-bits.h @@ -35,6 +35,12 @@ #define _PAGE_SPECIAL _PAGE_SOFT #define _PAGE_TABLE _PAGE_PRESENT +/* + * _PAGE_PROT_NONE is set on not-present pages (and ignored by the hardware) to + * distinguish them from swapped out pages + */ +#define _PAGE_PROT_NONE _PAGE_READ + #define _PAGE_PFN_SHIFT 10 /* Set of bits to preserve across pte_modify() */ diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 16301966d65b..1141364d990e 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -44,7 +44,7 @@ /* Page protection bits */ #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER) -#define PAGE_NONE __pgprot(0) +#define PAGE_NONE __pgprot(_PAGE_PROT_NONE) #define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ) #define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE) #define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC) @@ -98,7 +98,7 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; static inline int pmd_present(pmd_t pmd) { - return (pmd_val(pmd) & _PAGE_PRESENT); + return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); } static inline int pmd_none(pmd_t pmd) @@ -178,7 +178,7 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr) static inline int pte_present(pte_t pte) { - return (pte_val(pte) & _PAGE_PRESENT); + return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); } static inline int pte_none(pte_t pte) @@ -380,7 +380,7 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma, * * Format of swap PTE: * bit 0: _PAGE_PRESENT (zero) - * bit 1: reserved for future use (zero) + * bit 1: _PAGE_PROT_NONE (zero) * bits 2 to 6: swap type * bits 7 to XLEN-1: swap offset */ @@ -404,6 +404,7 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma, #define kern_addr_valid(addr) (1) /* FIXME */ #endif +extern void setup_bootmem(void); extern void paging_init(void); static inline void pgtable_cache_init(void) diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h index 0531f49af5c3..ce70bceb8872 100644 --- a/arch/riscv/include/asm/processor.h +++ b/arch/riscv/include/asm/processor.h @@ -22,7 +22,7 @@ * This decides where the kernel will search for a free chunk of vm * space during mmap's. */ -#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE >> 1) +#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3) #define STACK_TOP TASK_SIZE #define STACK_TOP_MAX STACK_TOP diff --git a/arch/riscv/include/asm/smp.h b/arch/riscv/include/asm/smp.h index 41aa73b476f4..636a934f013a 100644 --- a/arch/riscv/include/asm/smp.h +++ b/arch/riscv/include/asm/smp.h @@ -19,16 +19,17 @@ #include #define INVALID_HARTID ULONG_MAX + +struct seq_file; +extern unsigned long boot_cpu_hartid; + +#ifdef CONFIG_SMP /* * Mapping between linux logical cpu index and hartid. */ extern unsigned long __cpuid_to_hartid_map[NR_CPUS]; #define cpuid_to_hartid_map(cpu) __cpuid_to_hartid_map[cpu] -struct seq_file; - -#ifdef CONFIG_SMP - /* print IPI stats */ void show_ipi_stats(struct seq_file *p, int prec); @@ -58,7 +59,14 @@ static inline void show_ipi_stats(struct seq_file *p, int prec) static inline int riscv_hartid_to_cpuid(int hartid) { - return 0; + if (hartid == boot_cpu_hartid) + return 0; + + return -1; +} +static inline unsigned long cpuid_to_hartid_map(int cpu) +{ + return boot_cpu_hartid; } static inline void riscv_cpuid_to_hartid_mask(const struct cpumask *in, diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h index 637b896894fc..a00168b980d2 100644 --- a/arch/riscv/include/asm/uaccess.h +++ b/arch/riscv/include/asm/uaccess.h @@ -41,7 +41,6 @@ #define KERNEL_DS (~0UL) #define USER_DS (TASK_SIZE) -#define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) static inline void set_fs(mm_segment_t fs) diff --git a/arch/riscv/include/uapi/asm/unistd.h b/arch/riscv/include/uapi/asm/unistd.h index 1f3bd3ebbb0d..0e2eeeb1fd27 100644 --- a/arch/riscv/include/uapi/asm/unistd.h +++ b/arch/riscv/include/uapi/asm/unistd.h @@ -17,6 +17,7 @@ #ifdef __LP64__ #define __ARCH_WANT_NEW_STAT +#define __ARCH_WANT_SET_GET_RLIMIT #endif /* __LP64__ */ #include diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c index 6a92a2fe198e..dac98348c6a3 100644 --- a/arch/riscv/kernel/asm-offsets.c +++ b/arch/riscv/kernel/asm-offsets.c @@ -39,6 +39,7 @@ void asm_offsets(void) OFFSET(TASK_STACK, task_struct, stack); OFFSET(TASK_TI, task_struct, thread_info); OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags); + OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count); OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp); OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp); OFFSET(TASK_TI_CPU, task_struct, thread_info.cpu); diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c index f8fa2c63aa89..cf2fca12414a 100644 --- a/arch/riscv/kernel/cpu.c +++ b/arch/riscv/kernel/cpu.c @@ -17,44 +17,36 @@ #include /* - * Returns the hart ID of the given device tree node, or -1 if the device tree - * node isn't a RISC-V hart. + * Returns the hart ID of the given device tree node, or -ENODEV if the node + * isn't an enabled and valid RISC-V hart node. */ int riscv_of_processor_hartid(struct device_node *node) { - const char *isa, *status; + const char *isa; u32 hart; if (!of_device_is_compatible(node, "riscv")) { pr_warn("Found incompatible CPU\n"); - return -(ENODEV); + return -ENODEV; } if (of_property_read_u32(node, "reg", &hart)) { pr_warn("Found CPU without hart ID\n"); - return -(ENODEV); - } - if (hart >= NR_CPUS) { - pr_info("Found hart ID %d, which is above NR_CPUs. Disabling this hart\n", hart); - return -(ENODEV); + return -ENODEV; } - if (of_property_read_string(node, "status", &status)) { - pr_warn("CPU with hartid=%d has no \"status\" property\n", hart); - return -(ENODEV); - } - if (strcmp(status, "okay")) { - pr_info("CPU with hartid=%d has a non-okay status of \"%s\"\n", hart, status); - return -(ENODEV); + if (!of_device_is_available(node)) { + pr_info("CPU with hartid=%d is not available\n", hart); + return -ENODEV; } if (of_property_read_string(node, "riscv,isa", &isa)) { pr_warn("CPU with hartid=%d has no \"riscv,isa\" property\n", hart); - return -(ENODEV); + return -ENODEV; } if (isa[0] != 'r' || isa[1] != 'v') { pr_warn("CPU with hartid=%d has an invalid ISA of \"%s\"\n", hart, isa); - return -(ENODEV); + return -ENODEV; } return hart; @@ -106,7 +98,7 @@ static void print_isa(struct seq_file *f, const char *orig_isa) * a bit of info describing what went wrong. */ if (isa[0] != '\0') - pr_info("unsupported ISA \"%s\" in device tree", orig_isa); + pr_info("unsupported ISA \"%s\" in device tree\n", orig_isa); } static void print_mmu(struct seq_file *f, const char *mmu_type) diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index a6e369edbbd7..bc29b010b722 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -20,6 +20,7 @@ #include #include #include +#include unsigned long elf_hwcap __read_mostly; #ifdef CONFIG_FPU @@ -28,7 +29,7 @@ bool has_fpu __read_mostly; void riscv_fill_hwcap(void) { - struct device_node *node = NULL; + struct device_node *node; const char *isa; size_t i; static unsigned long isa2hwcap[256] = {0}; @@ -42,36 +43,39 @@ void riscv_fill_hwcap(void) elf_hwcap = 0; - /* - * We don't support running Linux on hertergenous ISA systems. For - * now, we just check the ISA of the first "okay" processor. - */ - while ((node = of_find_node_by_type(node, "cpu"))) - if (riscv_of_processor_hartid(node) >= 0) - break; - if (!node) { - pr_warning("Unable to find \"cpu\" devicetree entry"); - return; - } + for_each_of_cpu_node(node) { + unsigned long this_hwcap = 0; - if (of_property_read_string(node, "riscv,isa", &isa)) { - pr_warning("Unable to find \"riscv,isa\" devicetree entry"); - of_node_put(node); - return; - } - of_node_put(node); + if (riscv_of_processor_hartid(node) < 0) + continue; + + if (of_property_read_string(node, "riscv,isa", &isa)) { + pr_warn("Unable to find \"riscv,isa\" devicetree entry\n"); + continue; + } - for (i = 0; i < strlen(isa); ++i) - elf_hwcap |= isa2hwcap[(unsigned char)(isa[i])]; + for (i = 0; i < strlen(isa); ++i) + this_hwcap |= isa2hwcap[(unsigned char)(isa[i])]; + + /* + * All "okay" hart should have same isa. Set HWCAP based on + * common capabilities of every "okay" hart, in case they don't + * have. + */ + if (elf_hwcap) + elf_hwcap &= this_hwcap; + else + elf_hwcap = this_hwcap; + } /* We don't support systems with F but without D, so mask those out * here. */ if ((elf_hwcap & COMPAT_HWCAP_ISA_F) && !(elf_hwcap & COMPAT_HWCAP_ISA_D)) { - pr_info("This kernel does not support systems with F but not D"); + pr_info("This kernel does not support systems with F but not D\n"); elf_hwcap &= ~COMPAT_HWCAP_ISA_F; } - pr_info("elf_hwcap is 0x%lx", elf_hwcap); + pr_info("elf_hwcap is 0x%lx\n", elf_hwcap); #ifdef CONFIG_FPU if (elf_hwcap & (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D)) diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index 355166f57205..fd9b57c8b4ce 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -144,6 +144,10 @@ _save_context: REG_L x2, PT_SP(sp) .endm +#if !IS_ENABLED(CONFIG_PREEMPT) +.set resume_kernel, restore_all +#endif + ENTRY(handle_exception) SAVE_ALL @@ -228,7 +232,7 @@ ret_from_exception: REG_L s0, PT_SSTATUS(sp) csrc sstatus, SR_SIE andi s0, s0, SR_SPP - bnez s0, restore_all + bnez s0, resume_kernel resume_userspace: /* Interrupts must be disabled here so flags are checked atomically */ @@ -250,6 +254,18 @@ restore_all: RESTORE_ALL sret +#if IS_ENABLED(CONFIG_PREEMPT) +resume_kernel: + REG_L s0, TASK_TI_PREEMPT_COUNT(tp) + bnez s0, restore_all +need_resched: + REG_L s0, TASK_TI_FLAGS(tp) + andi s0, s0, _TIF_NEED_RESCHED + beqz s0, restore_all + call preempt_schedule_irq + j need_resched +#endif + work_pending: /* Enter slow path for supplementary processing */ la ra, ret_from_exception diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c index a840b7d074f7..b94d8db5ddcc 100644 --- a/arch/riscv/kernel/ftrace.c +++ b/arch/riscv/kernel/ftrace.c @@ -32,7 +32,7 @@ static int ftrace_check_current_call(unsigned long hook_pos, * return must be -EINVAL on failed comparison */ if (memcmp(expected, replaced, sizeof(replaced))) { - pr_err("%p: expected (%08x %08x) but get (%08x %08x)", + pr_err("%p: expected (%08x %08x) but got (%08x %08x)\n", (void *)hook_pos, expected[0], expected[1], replaced[0], replaced[1]); return -EINVAL; diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 6e079e94b638..ecb654f6a79e 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include @@ -61,95 +60,9 @@ EXPORT_SYMBOL(empty_zero_page); atomic_t hart_lottery; unsigned long boot_cpu_hartid; -unsigned long __cpuid_to_hartid_map[NR_CPUS] = { - [0 ... NR_CPUS-1] = INVALID_HARTID -}; - -void __init smp_setup_processor_id(void) -{ - cpuid_to_hartid_map(0) = boot_cpu_hartid; -} - -#ifdef CONFIG_BLK_DEV_INITRD -static void __init setup_initrd(void) -{ - unsigned long size; - - if (initrd_start >= initrd_end) { - printk(KERN_INFO "initrd not found or empty"); - goto disable; - } - if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) { - printk(KERN_ERR "initrd extends beyond end of memory"); - goto disable; - } - - size = initrd_end - initrd_start; - memblock_reserve(__pa(initrd_start), size); - initrd_below_start_ok = 1; - - printk(KERN_INFO "Initial ramdisk at: 0x%p (%lu bytes)\n", - (void *)(initrd_start), size); - return; -disable: - pr_cont(" - disabling initrd\n"); - initrd_start = 0; - initrd_end = 0; -} -#endif /* CONFIG_BLK_DEV_INITRD */ - -pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; -pgd_t trampoline_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE); - -#ifndef __PAGETABLE_PMD_FOLDED -#define NUM_SWAPPER_PMDS ((uintptr_t)-PAGE_OFFSET >> PGDIR_SHIFT) -pmd_t swapper_pmd[PTRS_PER_PMD*((-PAGE_OFFSET)/PGDIR_SIZE)] __page_aligned_bss; -pmd_t trampoline_pmd[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE); -#endif - -asmlinkage void __init setup_vm(void) -{ - extern char _start; - uintptr_t i; - uintptr_t pa = (uintptr_t) &_start; - pgprot_t prot = __pgprot(pgprot_val(PAGE_KERNEL) | _PAGE_EXEC); - - va_pa_offset = PAGE_OFFSET - pa; - pfn_base = PFN_DOWN(pa); - - /* Sanity check alignment and size */ - BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0); - BUG_ON((pa % (PAGE_SIZE * PTRS_PER_PTE)) != 0); - -#ifndef __PAGETABLE_PMD_FOLDED - trampoline_pg_dir[(PAGE_OFFSET >> PGDIR_SHIFT) % PTRS_PER_PGD] = - pfn_pgd(PFN_DOWN((uintptr_t)trampoline_pmd), - __pgprot(_PAGE_TABLE)); - trampoline_pmd[0] = pfn_pmd(PFN_DOWN(pa), prot); - - for (i = 0; i < (-PAGE_OFFSET)/PGDIR_SIZE; ++i) { - size_t o = (PAGE_OFFSET >> PGDIR_SHIFT) % PTRS_PER_PGD + i; - swapper_pg_dir[o] = - pfn_pgd(PFN_DOWN((uintptr_t)swapper_pmd) + i, - __pgprot(_PAGE_TABLE)); - } - for (i = 0; i < ARRAY_SIZE(swapper_pmd); i++) - swapper_pmd[i] = pfn_pmd(PFN_DOWN(pa + i * PMD_SIZE), prot); -#else - trampoline_pg_dir[(PAGE_OFFSET >> PGDIR_SHIFT) % PTRS_PER_PGD] = - pfn_pgd(PFN_DOWN(pa), prot); - - for (i = 0; i < (-PAGE_OFFSET)/PGDIR_SIZE; ++i) { - size_t o = (PAGE_OFFSET >> PGDIR_SHIFT) % PTRS_PER_PGD + i; - swapper_pg_dir[o] = - pfn_pgd(PFN_DOWN(pa + i * PGDIR_SIZE), prot); - } -#endif -} - void __init parse_dtb(unsigned int hartid, void *dtb) { - if (!early_init_dt_scan(__va(dtb))) + if (early_init_dt_scan(__va(dtb))) return; pr_err("No DTB passed to the kernel\n"); @@ -159,60 +72,17 @@ void __init parse_dtb(unsigned int hartid, void *dtb) #endif } -static void __init setup_bootmem(void) -{ - struct memblock_region *reg; - phys_addr_t mem_size = 0; - - /* Find the memory region containing the kernel */ - for_each_memblock(memory, reg) { - phys_addr_t vmlinux_end = __pa(_end); - phys_addr_t end = reg->base + reg->size; - - if (reg->base <= vmlinux_end && vmlinux_end <= end) { - /* - * Reserve from the start of the region to the end of - * the kernel - */ - memblock_reserve(reg->base, vmlinux_end - reg->base); - mem_size = min(reg->size, (phys_addr_t)-PAGE_OFFSET); - } - } - BUG_ON(mem_size == 0); - - set_max_mapnr(PFN_DOWN(mem_size)); - max_low_pfn = memblock_end_of_DRAM(); - -#ifdef CONFIG_BLK_DEV_INITRD - setup_initrd(); -#endif /* CONFIG_BLK_DEV_INITRD */ - - early_init_fdt_reserve_self(); - early_init_fdt_scan_reserved_mem(); - memblock_allow_resize(); - memblock_dump_all(); - - for_each_memblock(memory, reg) { - unsigned long start_pfn = memblock_region_memory_base_pfn(reg); - unsigned long end_pfn = memblock_region_memory_end_pfn(reg); - - memblock_set_node(PFN_PHYS(start_pfn), - PFN_PHYS(end_pfn - start_pfn), - &memblock.memory, 0); - } -} - void __init setup_arch(char **cmdline_p) { - *cmdline_p = boot_command_line; - - parse_early_param(); - init_mm.start_code = (unsigned long) _stext; init_mm.end_code = (unsigned long) _etext; init_mm.end_data = (unsigned long) _edata; init_mm.brk = (unsigned long) _end; + *cmdline_p = boot_command_line; + + parse_early_param(); + setup_bootmem(); paging_init(); unflatten_device_tree(); @@ -231,4 +101,3 @@ void __init setup_arch(char **cmdline_p) riscv_fill_hwcap(); } - diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c index 246635eac7bb..0c41d07ec281 100644 --- a/arch/riscv/kernel/smp.c +++ b/arch/riscv/kernel/smp.c @@ -36,6 +36,15 @@ enum ipi_message_type { IPI_MAX }; +unsigned long __cpuid_to_hartid_map[NR_CPUS] = { + [0 ... NR_CPUS-1] = INVALID_HARTID +}; + +void __init smp_setup_processor_id(void) +{ + cpuid_to_hartid_map(0) = boot_cpu_hartid; +} + /* A collection of single bit ipi messages. */ static struct { unsigned long stats[IPI_MAX] ____cacheline_aligned; @@ -51,7 +60,6 @@ int riscv_hartid_to_cpuid(int hartid) return i; pr_err("Couldn't find cpu id for hartid [%d]\n", hartid); - BUG(); return i; } diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c index fc185ecabb0a..eb533b5c2c8c 100644 --- a/arch/riscv/kernel/smpboot.c +++ b/arch/riscv/kernel/smpboot.c @@ -39,6 +39,7 @@ void *__cpu_up_stack_pointer[NR_CPUS]; void *__cpu_up_task_pointer[NR_CPUS]; +static DECLARE_COMPLETION(cpu_running); void __init smp_prepare_boot_cpu(void) { @@ -50,30 +51,31 @@ void __init smp_prepare_cpus(unsigned int max_cpus) void __init setup_smp(void) { - struct device_node *dn = NULL; + struct device_node *dn; int hart; bool found_boot_cpu = false; int cpuid = 1; - while ((dn = of_find_node_by_type(dn, "cpu"))) { + for_each_of_cpu_node(dn) { hart = riscv_of_processor_hartid(dn); - if (hart < 0) { - of_node_put(dn); + if (hart < 0) continue; - } if (hart == cpuid_to_hartid_map(0)) { BUG_ON(found_boot_cpu); found_boot_cpu = 1; - of_node_put(dn); continue; } + if (cpuid >= NR_CPUS) { + pr_warn("Invalid cpuid [%d] for hartid [%d]\n", + cpuid, hart); + break; + } cpuid_to_hartid_map(cpuid) = hart; set_cpu_possible(cpuid, true); set_cpu_present(cpuid, true); cpuid++; - of_node_put(dn); } BUG_ON(!found_boot_cpu); @@ -81,6 +83,7 @@ void __init setup_smp(void) int __cpu_up(unsigned int cpu, struct task_struct *tidle) { + int ret = 0; int hartid = cpuid_to_hartid_map(cpu); tidle->thread_info.cpu = cpu; @@ -96,10 +99,16 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) task_stack_page(tidle) + THREAD_SIZE); WRITE_ONCE(__cpu_up_task_pointer[hartid], tidle); - while (!cpu_online(cpu)) - cpu_relax(); + lockdep_assert_held(&cpu_running); + wait_for_completion_timeout(&cpu_running, + msecs_to_jiffies(1000)); + + if (!cpu_online(cpu)) { + pr_crit("CPU%u: failed to come online\n", cpu); + ret = -EIO; + } - return 0; + return ret; } void __init smp_cpus_done(unsigned int max_cpus) @@ -125,6 +134,7 @@ asmlinkage void __init smp_callin(void) * a local TLB flush right now just in case. */ local_flush_tlb_all(); + complete(&cpu_running); /* * Disable preemption before enabling interrupts, so we don't try to * schedule a CPU that hasn't actually started yet. diff --git a/arch/riscv/kernel/vdso.c b/arch/riscv/kernel/vdso.c index 582cb153eb24..0cd044122234 100644 --- a/arch/riscv/kernel/vdso.c +++ b/arch/riscv/kernel/vdso.c @@ -54,7 +54,6 @@ static int __init vdso_init(void) struct page *pg; pg = virt_to_page(vdso_start + (i << PAGE_SHIFT)); - ClearPageReserved(pg); vdso_pagelist[i] = pg; } vdso_pagelist[i] = virt_to_page(vdso_data); diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile index eed1c137f618..fec62b24df89 100644 --- a/arch/riscv/kernel/vdso/Makefile +++ b/arch/riscv/kernel/vdso/Makefile @@ -2,9 +2,11 @@ # Symbols present in the vdso vdso-syms = rt_sigreturn +ifdef CONFIG_64BIT vdso-syms += gettimeofday vdso-syms += clock_gettime vdso-syms += clock_getres +endif vdso-syms += getcpu vdso-syms += flush_icache diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S index 1e1395d63dab..65df1dfdc303 100644 --- a/arch/riscv/kernel/vmlinux.lds.S +++ b/arch/riscv/kernel/vmlinux.lds.S @@ -18,8 +18,6 @@ #include #include -#define MAX_BYTES_PER_LONG 0x10 - OUTPUT_ARCH(riscv) ENTRY(_start) @@ -76,6 +74,8 @@ SECTIONS *(.sbss*) } + BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0) + EXCEPTION_TABLE(0x10) NOTES @@ -83,10 +83,6 @@ SECTIONS *(.rel.dyn*) } - BSS_SECTION(MAX_BYTES_PER_LONG, - MAX_BYTES_PER_LONG, - MAX_BYTES_PER_LONG) - _end = .; STABS_DEBUG diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 1d9bfaff60bc..b379a75ac6a6 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -17,7 +17,9 @@ #include #include #include +#include +#include #include #include #include @@ -28,7 +30,8 @@ static void __init zone_sizes_init(void) unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, }; #ifdef CONFIG_ZONE_DMA32 - max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, max_low_pfn)); + max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, + (unsigned long) PFN_PHYS(max_low_pfn))); #endif max_zone_pfns[ZONE_NORMAL] = max_low_pfn; @@ -65,7 +68,159 @@ void free_initmem(void) } #ifdef CONFIG_BLK_DEV_INITRD -void free_initrd_mem(unsigned long start, unsigned long end) +static void __init setup_initrd(void) { + unsigned long size; + + if (initrd_start >= initrd_end) { + pr_info("initrd not found or empty"); + goto disable; + } + if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) { + pr_err("initrd extends beyond end of memory"); + goto disable; + } + + size = initrd_end - initrd_start; + memblock_reserve(__pa(initrd_start), size); + initrd_below_start_ok = 1; + + pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n", + (void *)(initrd_start), size); + return; +disable: + pr_cont(" - disabling initrd\n"); + initrd_start = 0; + initrd_end = 0; +} + +void __init free_initrd_mem(unsigned long start, unsigned long end) +{ + free_reserved_area((void *)start, (void *)end, -1, "initrd"); } #endif /* CONFIG_BLK_DEV_INITRD */ + +void __init setup_bootmem(void) +{ + struct memblock_region *reg; + phys_addr_t mem_size = 0; + + /* Find the memory region containing the kernel */ + for_each_memblock(memory, reg) { + phys_addr_t vmlinux_end = __pa(_end); + phys_addr_t end = reg->base + reg->size; + + if (reg->base <= vmlinux_end && vmlinux_end <= end) { + /* + * Reserve from the start of the region to the end of + * the kernel + */ + memblock_reserve(reg->base, vmlinux_end - reg->base); + mem_size = min(reg->size, (phys_addr_t)-PAGE_OFFSET); + } + } + BUG_ON(mem_size == 0); + + set_max_mapnr(PFN_DOWN(mem_size)); + max_low_pfn = PFN_DOWN(memblock_end_of_DRAM()); + +#ifdef CONFIG_BLK_DEV_INITRD + setup_initrd(); +#endif /* CONFIG_BLK_DEV_INITRD */ + + early_init_fdt_reserve_self(); + early_init_fdt_scan_reserved_mem(); + memblock_allow_resize(); + memblock_dump_all(); + + for_each_memblock(memory, reg) { + unsigned long start_pfn = memblock_region_memory_base_pfn(reg); + unsigned long end_pfn = memblock_region_memory_end_pfn(reg); + + memblock_set_node(PFN_PHYS(start_pfn), + PFN_PHYS(end_pfn - start_pfn), + &memblock.memory, 0); + } +} + +pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; +pgd_t trampoline_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE); + +#ifndef __PAGETABLE_PMD_FOLDED +#define NUM_SWAPPER_PMDS ((uintptr_t)-PAGE_OFFSET >> PGDIR_SHIFT) +pmd_t swapper_pmd[PTRS_PER_PMD*((-PAGE_OFFSET)/PGDIR_SIZE)] __page_aligned_bss; +pmd_t trampoline_pmd[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE); +pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss; +#endif + +pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss; + +void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) +{ + unsigned long addr = __fix_to_virt(idx); + pte_t *ptep; + + BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); + + ptep = &fixmap_pte[pte_index(addr)]; + + if (pgprot_val(prot)) { + set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot)); + } else { + pte_clear(&init_mm, addr, ptep); + local_flush_tlb_page(addr); + } +} + +asmlinkage void __init setup_vm(void) +{ + extern char _start; + uintptr_t i; + uintptr_t pa = (uintptr_t) &_start; + pgprot_t prot = __pgprot(pgprot_val(PAGE_KERNEL) | _PAGE_EXEC); + + va_pa_offset = PAGE_OFFSET - pa; + pfn_base = PFN_DOWN(pa); + + /* Sanity check alignment and size */ + BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0); + BUG_ON((pa % (PAGE_SIZE * PTRS_PER_PTE)) != 0); + +#ifndef __PAGETABLE_PMD_FOLDED + trampoline_pg_dir[(PAGE_OFFSET >> PGDIR_SHIFT) % PTRS_PER_PGD] = + pfn_pgd(PFN_DOWN((uintptr_t)trampoline_pmd), + __pgprot(_PAGE_TABLE)); + trampoline_pmd[0] = pfn_pmd(PFN_DOWN(pa), prot); + + for (i = 0; i < (-PAGE_OFFSET)/PGDIR_SIZE; ++i) { + size_t o = (PAGE_OFFSET >> PGDIR_SHIFT) % PTRS_PER_PGD + i; + + swapper_pg_dir[o] = + pfn_pgd(PFN_DOWN((uintptr_t)swapper_pmd) + i, + __pgprot(_PAGE_TABLE)); + } + for (i = 0; i < ARRAY_SIZE(swapper_pmd); i++) + swapper_pmd[i] = pfn_pmd(PFN_DOWN(pa + i * PMD_SIZE), prot); + + swapper_pg_dir[(FIXADDR_START >> PGDIR_SHIFT) % PTRS_PER_PGD] = + pfn_pgd(PFN_DOWN((uintptr_t)fixmap_pmd), + __pgprot(_PAGE_TABLE)); + fixmap_pmd[(FIXADDR_START >> PMD_SHIFT) % PTRS_PER_PMD] = + pfn_pmd(PFN_DOWN((uintptr_t)fixmap_pte), + __pgprot(_PAGE_TABLE)); +#else + trampoline_pg_dir[(PAGE_OFFSET >> PGDIR_SHIFT) % PTRS_PER_PGD] = + pfn_pgd(PFN_DOWN(pa), prot); + + for (i = 0; i < (-PAGE_OFFSET)/PGDIR_SIZE; ++i) { + size_t o = (PAGE_OFFSET >> PGDIR_SHIFT) % PTRS_PER_PGD + i; + + swapper_pg_dir[o] = + pfn_pgd(PFN_DOWN(pa + i * PGDIR_SIZE), prot); + } + + swapper_pg_dir[(FIXADDR_START >> PGDIR_SHIFT) % PTRS_PER_PGD] = + pfn_pgd(PFN_DOWN((uintptr_t)fixmap_pte), + __pgprot(_PAGE_TABLE)); +#endif +} diff --git a/arch/riscv/net/Makefile b/arch/riscv/net/Makefile new file mode 100644 index 000000000000..a132220cc582 --- /dev/null +++ b/arch/riscv/net/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o diff --git a/arch/riscv/net/bpf_jit_comp.c b/arch/riscv/net/bpf_jit_comp.c new file mode 100644 index 000000000000..80b12aa5e10d --- /dev/null +++ b/arch/riscv/net/bpf_jit_comp.c @@ -0,0 +1,1602 @@ +// SPDX-License-Identifier: GPL-2.0 +/* BPF JIT compiler for RV64G + * + * Copyright(c) 2019 Björn Töpel + * + */ + +#include +#include +#include + +enum { + RV_REG_ZERO = 0, /* The constant value 0 */ + RV_REG_RA = 1, /* Return address */ + RV_REG_SP = 2, /* Stack pointer */ + RV_REG_GP = 3, /* Global pointer */ + RV_REG_TP = 4, /* Thread pointer */ + RV_REG_T0 = 5, /* Temporaries */ + RV_REG_T1 = 6, + RV_REG_T2 = 7, + RV_REG_FP = 8, + RV_REG_S1 = 9, /* Saved registers */ + RV_REG_A0 = 10, /* Function argument/return values */ + RV_REG_A1 = 11, /* Function arguments */ + RV_REG_A2 = 12, + RV_REG_A3 = 13, + RV_REG_A4 = 14, + RV_REG_A5 = 15, + RV_REG_A6 = 16, + RV_REG_A7 = 17, + RV_REG_S2 = 18, /* Saved registers */ + RV_REG_S3 = 19, + RV_REG_S4 = 20, + RV_REG_S5 = 21, + RV_REG_S6 = 22, + RV_REG_S7 = 23, + RV_REG_S8 = 24, + RV_REG_S9 = 25, + RV_REG_S10 = 26, + RV_REG_S11 = 27, + RV_REG_T3 = 28, /* Temporaries */ + RV_REG_T4 = 29, + RV_REG_T5 = 30, + RV_REG_T6 = 31, +}; + +#define RV_REG_TCC RV_REG_A6 +#define RV_REG_TCC_SAVED RV_REG_S6 /* Store A6 in S6 if program do calls */ + +static const int regmap[] = { + [BPF_REG_0] = RV_REG_A5, + [BPF_REG_1] = RV_REG_A0, + [BPF_REG_2] = RV_REG_A1, + [BPF_REG_3] = RV_REG_A2, + [BPF_REG_4] = RV_REG_A3, + [BPF_REG_5] = RV_REG_A4, + [BPF_REG_6] = RV_REG_S1, + [BPF_REG_7] = RV_REG_S2, + [BPF_REG_8] = RV_REG_S3, + [BPF_REG_9] = RV_REG_S4, + [BPF_REG_FP] = RV_REG_S5, + [BPF_REG_AX] = RV_REG_T0, +}; + +enum { + RV_CTX_F_SEEN_TAIL_CALL = 0, + RV_CTX_F_SEEN_CALL = RV_REG_RA, + RV_CTX_F_SEEN_S1 = RV_REG_S1, + RV_CTX_F_SEEN_S2 = RV_REG_S2, + RV_CTX_F_SEEN_S3 = RV_REG_S3, + RV_CTX_F_SEEN_S4 = RV_REG_S4, + RV_CTX_F_SEEN_S5 = RV_REG_S5, + RV_CTX_F_SEEN_S6 = RV_REG_S6, +}; + +struct rv_jit_context { + struct bpf_prog *prog; + u32 *insns; /* RV insns */ + int ninsns; + int epilogue_offset; + int *offset; /* BPF to RV */ + unsigned long flags; + int stack_size; +}; + +struct rv_jit_data { + struct bpf_binary_header *header; + u8 *image; + struct rv_jit_context ctx; +}; + +static u8 bpf_to_rv_reg(int bpf_reg, struct rv_jit_context *ctx) +{ + u8 reg = regmap[bpf_reg]; + + switch (reg) { + case RV_CTX_F_SEEN_S1: + case RV_CTX_F_SEEN_S2: + case RV_CTX_F_SEEN_S3: + case RV_CTX_F_SEEN_S4: + case RV_CTX_F_SEEN_S5: + case RV_CTX_F_SEEN_S6: + __set_bit(reg, &ctx->flags); + } + return reg; +}; + +static bool seen_reg(int reg, struct rv_jit_context *ctx) +{ + switch (reg) { + case RV_CTX_F_SEEN_CALL: + case RV_CTX_F_SEEN_S1: + case RV_CTX_F_SEEN_S2: + case RV_CTX_F_SEEN_S3: + case RV_CTX_F_SEEN_S4: + case RV_CTX_F_SEEN_S5: + case RV_CTX_F_SEEN_S6: + return test_bit(reg, &ctx->flags); + } + return false; +} + +static void mark_call(struct rv_jit_context *ctx) +{ + __set_bit(RV_CTX_F_SEEN_CALL, &ctx->flags); +} + +static bool seen_call(struct rv_jit_context *ctx) +{ + return test_bit(RV_CTX_F_SEEN_CALL, &ctx->flags); +} + +static void mark_tail_call(struct rv_jit_context *ctx) +{ + __set_bit(RV_CTX_F_SEEN_TAIL_CALL, &ctx->flags); +} + +static bool seen_tail_call(struct rv_jit_context *ctx) +{ + return test_bit(RV_CTX_F_SEEN_TAIL_CALL, &ctx->flags); +} + +static u8 rv_tail_call_reg(struct rv_jit_context *ctx) +{ + mark_tail_call(ctx); + + if (seen_call(ctx)) { + __set_bit(RV_CTX_F_SEEN_S6, &ctx->flags); + return RV_REG_S6; + } + return RV_REG_A6; +} + +static void emit(const u32 insn, struct rv_jit_context *ctx) +{ + if (ctx->insns) + ctx->insns[ctx->ninsns] = insn; + + ctx->ninsns++; +} + +static u32 rv_r_insn(u8 funct7, u8 rs2, u8 rs1, u8 funct3, u8 rd, u8 opcode) +{ + return (funct7 << 25) | (rs2 << 20) | (rs1 << 15) | (funct3 << 12) | + (rd << 7) | opcode; +} + +static u32 rv_i_insn(u16 imm11_0, u8 rs1, u8 funct3, u8 rd, u8 opcode) +{ + return (imm11_0 << 20) | (rs1 << 15) | (funct3 << 12) | (rd << 7) | + opcode; +} + +static u32 rv_s_insn(u16 imm11_0, u8 rs2, u8 rs1, u8 funct3, u8 opcode) +{ + u8 imm11_5 = imm11_0 >> 5, imm4_0 = imm11_0 & 0x1f; + + return (imm11_5 << 25) | (rs2 << 20) | (rs1 << 15) | (funct3 << 12) | + (imm4_0 << 7) | opcode; +} + +static u32 rv_sb_insn(u16 imm12_1, u8 rs2, u8 rs1, u8 funct3, u8 opcode) +{ + u8 imm12 = ((imm12_1 & 0x800) >> 5) | ((imm12_1 & 0x3f0) >> 4); + u8 imm4_1 = ((imm12_1 & 0xf) << 1) | ((imm12_1 & 0x400) >> 10); + + return (imm12 << 25) | (rs2 << 20) | (rs1 << 15) | (funct3 << 12) | + (imm4_1 << 7) | opcode; +} + +static u32 rv_u_insn(u32 imm31_12, u8 rd, u8 opcode) +{ + return (imm31_12 << 12) | (rd << 7) | opcode; +} + +static u32 rv_uj_insn(u32 imm20_1, u8 rd, u8 opcode) +{ + u32 imm; + + imm = (imm20_1 & 0x80000) | ((imm20_1 & 0x3ff) << 9) | + ((imm20_1 & 0x400) >> 2) | ((imm20_1 & 0x7f800) >> 11); + + return (imm << 12) | (rd << 7) | opcode; +} + +static u32 rv_amo_insn(u8 funct5, u8 aq, u8 rl, u8 rs2, u8 rs1, + u8 funct3, u8 rd, u8 opcode) +{ + u8 funct7 = (funct5 << 2) | (aq << 1) | rl; + + return rv_r_insn(funct7, rs2, rs1, funct3, rd, opcode); +} + +static u32 rv_addiw(u8 rd, u8 rs1, u16 imm11_0) +{ + return rv_i_insn(imm11_0, rs1, 0, rd, 0x1b); +} + +static u32 rv_addi(u8 rd, u8 rs1, u16 imm11_0) +{ + return rv_i_insn(imm11_0, rs1, 0, rd, 0x13); +} + +static u32 rv_addw(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0, rs2, rs1, 0, rd, 0x3b); +} + +static u32 rv_add(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0, rs2, rs1, 0, rd, 0x33); +} + +static u32 rv_subw(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0x20, rs2, rs1, 0, rd, 0x3b); +} + +static u32 rv_sub(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0x20, rs2, rs1, 0, rd, 0x33); +} + +static u32 rv_and(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0, rs2, rs1, 7, rd, 0x33); +} + +static u32 rv_or(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0, rs2, rs1, 6, rd, 0x33); +} + +static u32 rv_xor(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0, rs2, rs1, 4, rd, 0x33); +} + +static u32 rv_mulw(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(1, rs2, rs1, 0, rd, 0x3b); +} + +static u32 rv_mul(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(1, rs2, rs1, 0, rd, 0x33); +} + +static u32 rv_divuw(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(1, rs2, rs1, 5, rd, 0x3b); +} + +static u32 rv_divu(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(1, rs2, rs1, 5, rd, 0x33); +} + +static u32 rv_remuw(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(1, rs2, rs1, 7, rd, 0x3b); +} + +static u32 rv_remu(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(1, rs2, rs1, 7, rd, 0x33); +} + +static u32 rv_sllw(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0, rs2, rs1, 1, rd, 0x3b); +} + +static u32 rv_sll(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0, rs2, rs1, 1, rd, 0x33); +} + +static u32 rv_srlw(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0, rs2, rs1, 5, rd, 0x3b); +} + +static u32 rv_srl(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0, rs2, rs1, 5, rd, 0x33); +} + +static u32 rv_sraw(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0x20, rs2, rs1, 5, rd, 0x3b); +} + +static u32 rv_sra(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0x20, rs2, rs1, 5, rd, 0x33); +} + +static u32 rv_lui(u8 rd, u32 imm31_12) +{ + return rv_u_insn(imm31_12, rd, 0x37); +} + +static u32 rv_slli(u8 rd, u8 rs1, u16 imm11_0) +{ + return rv_i_insn(imm11_0, rs1, 1, rd, 0x13); +} + +static u32 rv_andi(u8 rd, u8 rs1, u16 imm11_0) +{ + return rv_i_insn(imm11_0, rs1, 7, rd, 0x13); +} + +static u32 rv_ori(u8 rd, u8 rs1, u16 imm11_0) +{ + return rv_i_insn(imm11_0, rs1, 6, rd, 0x13); +} + +static u32 rv_xori(u8 rd, u8 rs1, u16 imm11_0) +{ + return rv_i_insn(imm11_0, rs1, 4, rd, 0x13); +} + +static u32 rv_slliw(u8 rd, u8 rs1, u16 imm11_0) +{ + return rv_i_insn(imm11_0, rs1, 1, rd, 0x1b); +} + +static u32 rv_srliw(u8 rd, u8 rs1, u16 imm11_0) +{ + return rv_i_insn(imm11_0, rs1, 5, rd, 0x1b); +} + +static u32 rv_srli(u8 rd, u8 rs1, u16 imm11_0) +{ + return rv_i_insn(imm11_0, rs1, 5, rd, 0x13); +} + +static u32 rv_sraiw(u8 rd, u8 rs1, u16 imm11_0) +{ + return rv_i_insn(0x400 | imm11_0, rs1, 5, rd, 0x1b); +} + +static u32 rv_srai(u8 rd, u8 rs1, u16 imm11_0) +{ + return rv_i_insn(0x400 | imm11_0, rs1, 5, rd, 0x13); +} + +static u32 rv_jal(u8 rd, u32 imm20_1) +{ + return rv_uj_insn(imm20_1, rd, 0x6f); +} + +static u32 rv_jalr(u8 rd, u8 rs1, u16 imm11_0) +{ + return rv_i_insn(imm11_0, rs1, 0, rd, 0x67); +} + +static u32 rv_beq(u8 rs1, u8 rs2, u16 imm12_1) +{ + return rv_sb_insn(imm12_1, rs2, rs1, 0, 0x63); +} + +static u32 rv_bltu(u8 rs1, u8 rs2, u16 imm12_1) +{ + return rv_sb_insn(imm12_1, rs2, rs1, 6, 0x63); +} + +static u32 rv_bgeu(u8 rs1, u8 rs2, u16 imm12_1) +{ + return rv_sb_insn(imm12_1, rs2, rs1, 7, 0x63); +} + +static u32 rv_bne(u8 rs1, u8 rs2, u16 imm12_1) +{ + return rv_sb_insn(imm12_1, rs2, rs1, 1, 0x63); +} + +static u32 rv_blt(u8 rs1, u8 rs2, u16 imm12_1) +{ + return rv_sb_insn(imm12_1, rs2, rs1, 4, 0x63); +} + +static u32 rv_bge(u8 rs1, u8 rs2, u16 imm12_1) +{ + return rv_sb_insn(imm12_1, rs2, rs1, 5, 0x63); +} + +static u32 rv_sb(u8 rs1, u16 imm11_0, u8 rs2) +{ + return rv_s_insn(imm11_0, rs2, rs1, 0, 0x23); +} + +static u32 rv_sh(u8 rs1, u16 imm11_0, u8 rs2) +{ + return rv_s_insn(imm11_0, rs2, rs1, 1, 0x23); +} + +static u32 rv_sw(u8 rs1, u16 imm11_0, u8 rs2) +{ + return rv_s_insn(imm11_0, rs2, rs1, 2, 0x23); +} + +static u32 rv_sd(u8 rs1, u16 imm11_0, u8 rs2) +{ + return rv_s_insn(imm11_0, rs2, rs1, 3, 0x23); +} + +static u32 rv_lbu(u8 rd, u16 imm11_0, u8 rs1) +{ + return rv_i_insn(imm11_0, rs1, 4, rd, 0x03); +} + +static u32 rv_lhu(u8 rd, u16 imm11_0, u8 rs1) +{ + return rv_i_insn(imm11_0, rs1, 5, rd, 0x03); +} + +static u32 rv_lwu(u8 rd, u16 imm11_0, u8 rs1) +{ + return rv_i_insn(imm11_0, rs1, 6, rd, 0x03); +} + +static u32 rv_ld(u8 rd, u16 imm11_0, u8 rs1) +{ + return rv_i_insn(imm11_0, rs1, 3, rd, 0x03); +} + +static u32 rv_amoadd_w(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl) +{ + return rv_amo_insn(0, aq, rl, rs2, rs1, 2, rd, 0x2f); +} + +static u32 rv_amoadd_d(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl) +{ + return rv_amo_insn(0, aq, rl, rs2, rs1, 3, rd, 0x2f); +} + +static bool is_12b_int(s64 val) +{ + return -(1 << 11) <= val && val < (1 << 11); +} + +static bool is_13b_int(s64 val) +{ + return -(1 << 12) <= val && val < (1 << 12); +} + +static bool is_21b_int(s64 val) +{ + return -(1L << 20) <= val && val < (1L << 20); +} + +static bool is_32b_int(s64 val) +{ + return -(1L << 31) <= val && val < (1L << 31); +} + +static int is_12b_check(int off, int insn) +{ + if (!is_12b_int(off)) { + pr_err("bpf-jit: insn=%d offset=%d not supported yet!\n", + insn, (int)off); + return -1; + } + return 0; +} + +static int is_13b_check(int off, int insn) +{ + if (!is_13b_int(off)) { + pr_err("bpf-jit: insn=%d offset=%d not supported yet!\n", + insn, (int)off); + return -1; + } + return 0; +} + +static int is_21b_check(int off, int insn) +{ + if (!is_21b_int(off)) { + pr_err("bpf-jit: insn=%d offset=%d not supported yet!\n", + insn, (int)off); + return -1; + } + return 0; +} + +static void emit_imm(u8 rd, s64 val, struct rv_jit_context *ctx) +{ + /* Note that the immediate from the add is sign-extended, + * which means that we need to compensate this by adding 2^12, + * when the 12th bit is set. A simpler way of doing this, and + * getting rid of the check, is to just add 2**11 before the + * shift. The "Loading a 32-Bit constant" example from the + * "Computer Organization and Design, RISC-V edition" book by + * Patterson/Hennessy highlights this fact. + * + * This also means that we need to process LSB to MSB. + */ + s64 upper = (val + (1 << 11)) >> 12, lower = val & 0xfff; + int shift; + + if (is_32b_int(val)) { + if (upper) + emit(rv_lui(rd, upper), ctx); + + if (!upper) { + emit(rv_addi(rd, RV_REG_ZERO, lower), ctx); + return; + } + + emit(rv_addiw(rd, rd, lower), ctx); + return; + } + + shift = __ffs(upper); + upper >>= shift; + shift += 12; + + emit_imm(rd, upper, ctx); + + emit(rv_slli(rd, rd, shift), ctx); + if (lower) + emit(rv_addi(rd, rd, lower), ctx); +} + +static int rv_offset(int bpf_to, int bpf_from, struct rv_jit_context *ctx) +{ + int from = ctx->offset[bpf_from] - 1, to = ctx->offset[bpf_to]; + + return (to - from) << 2; +} + +static int epilogue_offset(struct rv_jit_context *ctx) +{ + int to = ctx->epilogue_offset, from = ctx->ninsns; + + return (to - from) << 2; +} + +static void __build_epilogue(u8 reg, struct rv_jit_context *ctx) +{ + int stack_adjust = ctx->stack_size, store_offset = stack_adjust - 8; + + if (seen_reg(RV_REG_RA, ctx)) { + emit(rv_ld(RV_REG_RA, store_offset, RV_REG_SP), ctx); + store_offset -= 8; + } + emit(rv_ld(RV_REG_FP, store_offset, RV_REG_SP), ctx); + store_offset -= 8; + if (seen_reg(RV_REG_S1, ctx)) { + emit(rv_ld(RV_REG_S1, store_offset, RV_REG_SP), ctx); + store_offset -= 8; + } + if (seen_reg(RV_REG_S2, ctx)) { + emit(rv_ld(RV_REG_S2, store_offset, RV_REG_SP), ctx); + store_offset -= 8; + } + if (seen_reg(RV_REG_S3, ctx)) { + emit(rv_ld(RV_REG_S3, store_offset, RV_REG_SP), ctx); + store_offset -= 8; + } + if (seen_reg(RV_REG_S4, ctx)) { + emit(rv_ld(RV_REG_S4, store_offset, RV_REG_SP), ctx); + store_offset -= 8; + } + if (seen_reg(RV_REG_S5, ctx)) { + emit(rv_ld(RV_REG_S5, store_offset, RV_REG_SP), ctx); + store_offset -= 8; + } + if (seen_reg(RV_REG_S6, ctx)) { + emit(rv_ld(RV_REG_S6, store_offset, RV_REG_SP), ctx); + store_offset -= 8; + } + + emit(rv_addi(RV_REG_SP, RV_REG_SP, stack_adjust), ctx); + /* Set return value. */ + emit(rv_addi(RV_REG_A0, RV_REG_A5, 0), ctx); + emit(rv_jalr(RV_REG_ZERO, reg, 0), ctx); +} + +static void emit_zext_32(u8 reg, struct rv_jit_context *ctx) +{ + emit(rv_slli(reg, reg, 32), ctx); + emit(rv_srli(reg, reg, 32), ctx); +} + +static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx) +{ + int tc_ninsn, off, start_insn = ctx->ninsns; + u8 tcc = rv_tail_call_reg(ctx); + + /* a0: &ctx + * a1: &array + * a2: index + * + * if (index >= array->map.max_entries) + * goto out; + */ + tc_ninsn = insn ? ctx->offset[insn] - ctx->offset[insn - 1] : + ctx->offset[0]; + emit_zext_32(RV_REG_A2, ctx); + + off = offsetof(struct bpf_array, map.max_entries); + if (is_12b_check(off, insn)) + return -1; + emit(rv_lwu(RV_REG_T1, off, RV_REG_A1), ctx); + off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2; + if (is_13b_check(off, insn)) + return -1; + emit(rv_bgeu(RV_REG_A2, RV_REG_T1, off >> 1), ctx); + + /* if (--TCC < 0) + * goto out; + */ + emit(rv_addi(RV_REG_T1, tcc, -1), ctx); + off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2; + if (is_13b_check(off, insn)) + return -1; + emit(rv_blt(RV_REG_T1, RV_REG_ZERO, off >> 1), ctx); + + /* prog = array->ptrs[index]; + * if (!prog) + * goto out; + */ + emit(rv_slli(RV_REG_T2, RV_REG_A2, 3), ctx); + emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_A1), ctx); + off = offsetof(struct bpf_array, ptrs); + if (is_12b_check(off, insn)) + return -1; + emit(rv_ld(RV_REG_T2, off, RV_REG_T2), ctx); + off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2; + if (is_13b_check(off, insn)) + return -1; + emit(rv_beq(RV_REG_T2, RV_REG_ZERO, off >> 1), ctx); + + /* goto *(prog->bpf_func + 4); */ + off = offsetof(struct bpf_prog, bpf_func); + if (is_12b_check(off, insn)) + return -1; + emit(rv_ld(RV_REG_T3, off, RV_REG_T2), ctx); + emit(rv_addi(RV_REG_T3, RV_REG_T3, 4), ctx); + emit(rv_addi(RV_REG_TCC, RV_REG_T1, 0), ctx); + __build_epilogue(RV_REG_T3, ctx); + return 0; +} + +static void init_regs(u8 *rd, u8 *rs, const struct bpf_insn *insn, + struct rv_jit_context *ctx) +{ + u8 code = insn->code; + + switch (code) { + case BPF_JMP | BPF_JA: + case BPF_JMP | BPF_CALL: + case BPF_JMP | BPF_EXIT: + case BPF_JMP | BPF_TAIL_CALL: + break; + default: + *rd = bpf_to_rv_reg(insn->dst_reg, ctx); + } + + if (code & (BPF_ALU | BPF_X) || code & (BPF_ALU64 | BPF_X) || + code & (BPF_JMP | BPF_X) || code & (BPF_JMP32 | BPF_X) || + code & BPF_LDX || code & BPF_STX) + *rs = bpf_to_rv_reg(insn->src_reg, ctx); +} + +static int rv_offset_check(int *rvoff, s16 off, int insn, + struct rv_jit_context *ctx) +{ + *rvoff = rv_offset(insn + off, insn, ctx); + return is_13b_check(*rvoff, insn); +} + +static void emit_zext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx) +{ + emit(rv_addi(RV_REG_T2, *rd, 0), ctx); + emit_zext_32(RV_REG_T2, ctx); + emit(rv_addi(RV_REG_T1, *rs, 0), ctx); + emit_zext_32(RV_REG_T1, ctx); + *rd = RV_REG_T2; + *rs = RV_REG_T1; +} + +static void emit_sext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx) +{ + emit(rv_addiw(RV_REG_T2, *rd, 0), ctx); + emit(rv_addiw(RV_REG_T1, *rs, 0), ctx); + *rd = RV_REG_T2; + *rs = RV_REG_T1; +} + +static void emit_zext_32_rd_t1(u8 *rd, struct rv_jit_context *ctx) +{ + emit(rv_addi(RV_REG_T2, *rd, 0), ctx); + emit_zext_32(RV_REG_T2, ctx); + emit_zext_32(RV_REG_T1, ctx); + *rd = RV_REG_T2; +} + +static void emit_sext_32_rd(u8 *rd, struct rv_jit_context *ctx) +{ + emit(rv_addiw(RV_REG_T2, *rd, 0), ctx); + *rd = RV_REG_T2; +} + +static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, + bool extra_pass) +{ + bool is64 = BPF_CLASS(insn->code) == BPF_ALU64 || + BPF_CLASS(insn->code) == BPF_JMP; + int rvoff, i = insn - ctx->prog->insnsi; + u8 rd = -1, rs = -1, code = insn->code; + s16 off = insn->off; + s32 imm = insn->imm; + + init_regs(&rd, &rs, insn, ctx); + + switch (code) { + /* dst = src */ + case BPF_ALU | BPF_MOV | BPF_X: + case BPF_ALU64 | BPF_MOV | BPF_X: + emit(is64 ? rv_addi(rd, rs, 0) : rv_addiw(rd, rs, 0), ctx); + if (!is64) + emit_zext_32(rd, ctx); + break; + + /* dst = dst OP src */ + case BPF_ALU | BPF_ADD | BPF_X: + case BPF_ALU64 | BPF_ADD | BPF_X: + emit(is64 ? rv_add(rd, rd, rs) : rv_addw(rd, rd, rs), ctx); + break; + case BPF_ALU | BPF_SUB | BPF_X: + case BPF_ALU64 | BPF_SUB | BPF_X: + emit(is64 ? rv_sub(rd, rd, rs) : rv_subw(rd, rd, rs), ctx); + break; + case BPF_ALU | BPF_AND | BPF_X: + case BPF_ALU64 | BPF_AND | BPF_X: + emit(rv_and(rd, rd, rs), ctx); + break; + case BPF_ALU | BPF_OR | BPF_X: + case BPF_ALU64 | BPF_OR | BPF_X: + emit(rv_or(rd, rd, rs), ctx); + break; + case BPF_ALU | BPF_XOR | BPF_X: + case BPF_ALU64 | BPF_XOR | BPF_X: + emit(rv_xor(rd, rd, rs), ctx); + break; + case BPF_ALU | BPF_MUL | BPF_X: + case BPF_ALU64 | BPF_MUL | BPF_X: + emit(is64 ? rv_mul(rd, rd, rs) : rv_mulw(rd, rd, rs), ctx); + if (!is64) + emit_zext_32(rd, ctx); + break; + case BPF_ALU | BPF_DIV | BPF_X: + case BPF_ALU64 | BPF_DIV | BPF_X: + emit(is64 ? rv_divu(rd, rd, rs) : rv_divuw(rd, rd, rs), ctx); + if (!is64) + emit_zext_32(rd, ctx); + break; + case BPF_ALU | BPF_MOD | BPF_X: + case BPF_ALU64 | BPF_MOD | BPF_X: + emit(is64 ? rv_remu(rd, rd, rs) : rv_remuw(rd, rd, rs), ctx); + if (!is64) + emit_zext_32(rd, ctx); + break; + case BPF_ALU | BPF_LSH | BPF_X: + case BPF_ALU64 | BPF_LSH | BPF_X: + emit(is64 ? rv_sll(rd, rd, rs) : rv_sllw(rd, rd, rs), ctx); + break; + case BPF_ALU | BPF_RSH | BPF_X: + case BPF_ALU64 | BPF_RSH | BPF_X: + emit(is64 ? rv_srl(rd, rd, rs) : rv_srlw(rd, rd, rs), ctx); + break; + case BPF_ALU | BPF_ARSH | BPF_X: + case BPF_ALU64 | BPF_ARSH | BPF_X: + emit(is64 ? rv_sra(rd, rd, rs) : rv_sraw(rd, rd, rs), ctx); + break; + + /* dst = -dst */ + case BPF_ALU | BPF_NEG: + case BPF_ALU64 | BPF_NEG: + emit(is64 ? rv_sub(rd, RV_REG_ZERO, rd) : + rv_subw(rd, RV_REG_ZERO, rd), ctx); + break; + + /* dst = BSWAP##imm(dst) */ + case BPF_ALU | BPF_END | BPF_FROM_LE: + { + int shift = 64 - imm; + + emit(rv_slli(rd, rd, shift), ctx); + emit(rv_srli(rd, rd, shift), ctx); + break; + } + case BPF_ALU | BPF_END | BPF_FROM_BE: + emit(rv_addi(RV_REG_T2, RV_REG_ZERO, 0), ctx); + + emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); + emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); + emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx); + emit(rv_srli(rd, rd, 8), ctx); + if (imm == 16) + goto out_be; + + emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); + emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); + emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx); + emit(rv_srli(rd, rd, 8), ctx); + + emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); + emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); + emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx); + emit(rv_srli(rd, rd, 8), ctx); + if (imm == 32) + goto out_be; + + emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); + emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); + emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx); + emit(rv_srli(rd, rd, 8), ctx); + + emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); + emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); + emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx); + emit(rv_srli(rd, rd, 8), ctx); + + emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); + emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); + emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx); + emit(rv_srli(rd, rd, 8), ctx); + + emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); + emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); + emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx); + emit(rv_srli(rd, rd, 8), ctx); +out_be: + emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); + emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); + + emit(rv_addi(rd, RV_REG_T2, 0), ctx); + break; + + /* dst = imm */ + case BPF_ALU | BPF_MOV | BPF_K: + case BPF_ALU64 | BPF_MOV | BPF_K: + emit_imm(rd, imm, ctx); + if (!is64) + emit_zext_32(rd, ctx); + break; + + /* dst = dst OP imm */ + case BPF_ALU | BPF_ADD | BPF_K: + case BPF_ALU64 | BPF_ADD | BPF_K: + if (is_12b_int(imm)) { + emit(is64 ? rv_addi(rd, rd, imm) : + rv_addiw(rd, rd, imm), ctx); + } else { + emit_imm(RV_REG_T1, imm, ctx); + emit(is64 ? rv_add(rd, rd, RV_REG_T1) : + rv_addw(rd, rd, RV_REG_T1), ctx); + } + if (!is64) + emit_zext_32(rd, ctx); + break; + case BPF_ALU | BPF_SUB | BPF_K: + case BPF_ALU64 | BPF_SUB | BPF_K: + if (is_12b_int(-imm)) { + emit(is64 ? rv_addi(rd, rd, -imm) : + rv_addiw(rd, rd, -imm), ctx); + } else { + emit_imm(RV_REG_T1, imm, ctx); + emit(is64 ? rv_sub(rd, rd, RV_REG_T1) : + rv_subw(rd, rd, RV_REG_T1), ctx); + } + if (!is64) + emit_zext_32(rd, ctx); + break; + case BPF_ALU | BPF_AND | BPF_K: + case BPF_ALU64 | BPF_AND | BPF_K: + if (is_12b_int(imm)) { + emit(rv_andi(rd, rd, imm), ctx); + } else { + emit_imm(RV_REG_T1, imm, ctx); + emit(rv_and(rd, rd, RV_REG_T1), ctx); + } + if (!is64) + emit_zext_32(rd, ctx); + break; + case BPF_ALU | BPF_OR | BPF_K: + case BPF_ALU64 | BPF_OR | BPF_K: + if (is_12b_int(imm)) { + emit(rv_ori(rd, rd, imm), ctx); + } else { + emit_imm(RV_REG_T1, imm, ctx); + emit(rv_or(rd, rd, RV_REG_T1), ctx); + } + if (!is64) + emit_zext_32(rd, ctx); + break; + case BPF_ALU | BPF_XOR | BPF_K: + case BPF_ALU64 | BPF_XOR | BPF_K: + if (is_12b_int(imm)) { + emit(rv_xori(rd, rd, imm), ctx); + } else { + emit_imm(RV_REG_T1, imm, ctx); + emit(rv_xor(rd, rd, RV_REG_T1), ctx); + } + if (!is64) + emit_zext_32(rd, ctx); + break; + case BPF_ALU | BPF_MUL | BPF_K: + case BPF_ALU64 | BPF_MUL | BPF_K: + emit_imm(RV_REG_T1, imm, ctx); + emit(is64 ? rv_mul(rd, rd, RV_REG_T1) : + rv_mulw(rd, rd, RV_REG_T1), ctx); + if (!is64) + emit_zext_32(rd, ctx); + break; + case BPF_ALU | BPF_DIV | BPF_K: + case BPF_ALU64 | BPF_DIV | BPF_K: + emit_imm(RV_REG_T1, imm, ctx); + emit(is64 ? rv_divu(rd, rd, RV_REG_T1) : + rv_divuw(rd, rd, RV_REG_T1), ctx); + if (!is64) + emit_zext_32(rd, ctx); + break; + case BPF_ALU | BPF_MOD | BPF_K: + case BPF_ALU64 | BPF_MOD | BPF_K: + emit_imm(RV_REG_T1, imm, ctx); + emit(is64 ? rv_remu(rd, rd, RV_REG_T1) : + rv_remuw(rd, rd, RV_REG_T1), ctx); + if (!is64) + emit_zext_32(rd, ctx); + break; + case BPF_ALU | BPF_LSH | BPF_K: + case BPF_ALU64 | BPF_LSH | BPF_K: + emit(is64 ? rv_slli(rd, rd, imm) : rv_slliw(rd, rd, imm), ctx); + break; + case BPF_ALU | BPF_RSH | BPF_K: + case BPF_ALU64 | BPF_RSH | BPF_K: + emit(is64 ? rv_srli(rd, rd, imm) : rv_srliw(rd, rd, imm), ctx); + break; + case BPF_ALU | BPF_ARSH | BPF_K: + case BPF_ALU64 | BPF_ARSH | BPF_K: + emit(is64 ? rv_srai(rd, rd, imm) : rv_sraiw(rd, rd, imm), ctx); + break; + + /* JUMP off */ + case BPF_JMP | BPF_JA: + rvoff = rv_offset(i + off, i, ctx); + if (!is_21b_int(rvoff)) { + pr_err("bpf-jit: insn=%d offset=%d not supported yet!\n", + i, rvoff); + return -1; + } + + emit(rv_jal(RV_REG_ZERO, rvoff >> 1), ctx); + break; + + /* IF (dst COND src) JUMP off */ + case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP32 | BPF_JEQ | BPF_X: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + if (!is64) + emit_zext_32_rd_rs(&rd, &rs, ctx); + emit(rv_beq(rd, rs, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP32 | BPF_JGT | BPF_X: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + if (!is64) + emit_zext_32_rd_rs(&rd, &rs, ctx); + emit(rv_bltu(rs, rd, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JLT | BPF_X: + case BPF_JMP32 | BPF_JLT | BPF_X: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + if (!is64) + emit_zext_32_rd_rs(&rd, &rs, ctx); + emit(rv_bltu(rd, rs, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP32 | BPF_JGE | BPF_X: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + if (!is64) + emit_zext_32_rd_rs(&rd, &rs, ctx); + emit(rv_bgeu(rd, rs, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JLE | BPF_X: + case BPF_JMP32 | BPF_JLE | BPF_X: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + if (!is64) + emit_zext_32_rd_rs(&rd, &rs, ctx); + emit(rv_bgeu(rs, rd, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JNE | BPF_X: + case BPF_JMP32 | BPF_JNE | BPF_X: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + if (!is64) + emit_zext_32_rd_rs(&rd, &rs, ctx); + emit(rv_bne(rd, rs, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP32 | BPF_JSGT | BPF_X: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + if (!is64) + emit_sext_32_rd_rs(&rd, &rs, ctx); + emit(rv_blt(rs, rd, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JSLT | BPF_X: + case BPF_JMP32 | BPF_JSLT | BPF_X: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + if (!is64) + emit_sext_32_rd_rs(&rd, &rs, ctx); + emit(rv_blt(rd, rs, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP32 | BPF_JSGE | BPF_X: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + if (!is64) + emit_sext_32_rd_rs(&rd, &rs, ctx); + emit(rv_bge(rd, rs, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JSLE | BPF_X: + case BPF_JMP32 | BPF_JSLE | BPF_X: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + if (!is64) + emit_sext_32_rd_rs(&rd, &rs, ctx); + emit(rv_bge(rs, rd, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JSET | BPF_X: + case BPF_JMP32 | BPF_JSET | BPF_X: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + if (!is64) + emit_zext_32_rd_rs(&rd, &rs, ctx); + emit(rv_and(RV_REG_T1, rd, rs), ctx); + emit(rv_bne(RV_REG_T1, RV_REG_ZERO, rvoff >> 1), ctx); + break; + + /* IF (dst COND imm) JUMP off */ + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP32 | BPF_JEQ | BPF_K: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + emit_imm(RV_REG_T1, imm, ctx); + if (!is64) + emit_zext_32_rd_t1(&rd, ctx); + emit(rv_beq(rd, RV_REG_T1, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP32 | BPF_JGT | BPF_K: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + emit_imm(RV_REG_T1, imm, ctx); + if (!is64) + emit_zext_32_rd_t1(&rd, ctx); + emit(rv_bltu(RV_REG_T1, rd, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JLT | BPF_K: + case BPF_JMP32 | BPF_JLT | BPF_K: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + emit_imm(RV_REG_T1, imm, ctx); + if (!is64) + emit_zext_32_rd_t1(&rd, ctx); + emit(rv_bltu(rd, RV_REG_T1, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP32 | BPF_JGE | BPF_K: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + emit_imm(RV_REG_T1, imm, ctx); + if (!is64) + emit_zext_32_rd_t1(&rd, ctx); + emit(rv_bgeu(rd, RV_REG_T1, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JLE | BPF_K: + case BPF_JMP32 | BPF_JLE | BPF_K: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + emit_imm(RV_REG_T1, imm, ctx); + if (!is64) + emit_zext_32_rd_t1(&rd, ctx); + emit(rv_bgeu(RV_REG_T1, rd, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JNE | BPF_K: + case BPF_JMP32 | BPF_JNE | BPF_K: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + emit_imm(RV_REG_T1, imm, ctx); + if (!is64) + emit_zext_32_rd_t1(&rd, ctx); + emit(rv_bne(rd, RV_REG_T1, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP32 | BPF_JSGT | BPF_K: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + emit_imm(RV_REG_T1, imm, ctx); + if (!is64) + emit_sext_32_rd(&rd, ctx); + emit(rv_blt(RV_REG_T1, rd, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JSLT | BPF_K: + case BPF_JMP32 | BPF_JSLT | BPF_K: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + emit_imm(RV_REG_T1, imm, ctx); + if (!is64) + emit_sext_32_rd(&rd, ctx); + emit(rv_blt(rd, RV_REG_T1, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JSGE | BPF_K: + case BPF_JMP32 | BPF_JSGE | BPF_K: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + emit_imm(RV_REG_T1, imm, ctx); + if (!is64) + emit_sext_32_rd(&rd, ctx); + emit(rv_bge(rd, RV_REG_T1, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JSLE | BPF_K: + case BPF_JMP32 | BPF_JSLE | BPF_K: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + emit_imm(RV_REG_T1, imm, ctx); + if (!is64) + emit_sext_32_rd(&rd, ctx); + emit(rv_bge(RV_REG_T1, rd, rvoff >> 1), ctx); + break; + case BPF_JMP | BPF_JSET | BPF_K: + case BPF_JMP32 | BPF_JSET | BPF_K: + if (rv_offset_check(&rvoff, off, i, ctx)) + return -1; + emit_imm(RV_REG_T1, imm, ctx); + if (!is64) + emit_zext_32_rd_t1(&rd, ctx); + emit(rv_and(RV_REG_T1, rd, RV_REG_T1), ctx); + emit(rv_bne(RV_REG_T1, RV_REG_ZERO, rvoff >> 1), ctx); + break; + + /* function call */ + case BPF_JMP | BPF_CALL: + { + bool fixed; + int i, ret; + u64 addr; + + mark_call(ctx); + ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, &addr, + &fixed); + if (ret < 0) + return ret; + if (fixed) { + emit_imm(RV_REG_T1, addr, ctx); + } else { + i = ctx->ninsns; + emit_imm(RV_REG_T1, addr, ctx); + for (i = ctx->ninsns - i; i < 8; i++) { + /* nop */ + emit(rv_addi(RV_REG_ZERO, RV_REG_ZERO, 0), + ctx); + } + } + emit(rv_jalr(RV_REG_RA, RV_REG_T1, 0), ctx); + rd = bpf_to_rv_reg(BPF_REG_0, ctx); + emit(rv_addi(rd, RV_REG_A0, 0), ctx); + break; + } + /* tail call */ + case BPF_JMP | BPF_TAIL_CALL: + if (emit_bpf_tail_call(i, ctx)) + return -1; + break; + + /* function return */ + case BPF_JMP | BPF_EXIT: + if (i == ctx->prog->len - 1) + break; + + rvoff = epilogue_offset(ctx); + if (is_21b_check(rvoff, i)) + return -1; + emit(rv_jal(RV_REG_ZERO, rvoff >> 1), ctx); + break; + + /* dst = imm64 */ + case BPF_LD | BPF_IMM | BPF_DW: + { + struct bpf_insn insn1 = insn[1]; + u64 imm64; + + imm64 = (u64)insn1.imm << 32 | (u32)imm; + emit_imm(rd, imm64, ctx); + return 1; + } + + /* LDX: dst = *(size *)(src + off) */ + case BPF_LDX | BPF_MEM | BPF_B: + if (is_12b_int(off)) { + emit(rv_lbu(rd, off, rs), ctx); + break; + } + + emit_imm(RV_REG_T1, off, ctx); + emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx); + emit(rv_lbu(rd, 0, RV_REG_T1), ctx); + break; + case BPF_LDX | BPF_MEM | BPF_H: + if (is_12b_int(off)) { + emit(rv_lhu(rd, off, rs), ctx); + break; + } + + emit_imm(RV_REG_T1, off, ctx); + emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx); + emit(rv_lhu(rd, 0, RV_REG_T1), ctx); + break; + case BPF_LDX | BPF_MEM | BPF_W: + if (is_12b_int(off)) { + emit(rv_lwu(rd, off, rs), ctx); + break; + } + + emit_imm(RV_REG_T1, off, ctx); + emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx); + emit(rv_lwu(rd, 0, RV_REG_T1), ctx); + break; + case BPF_LDX | BPF_MEM | BPF_DW: + if (is_12b_int(off)) { + emit(rv_ld(rd, off, rs), ctx); + break; + } + + emit_imm(RV_REG_T1, off, ctx); + emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx); + emit(rv_ld(rd, 0, RV_REG_T1), ctx); + break; + + /* ST: *(size *)(dst + off) = imm */ + case BPF_ST | BPF_MEM | BPF_B: + emit_imm(RV_REG_T1, imm, ctx); + if (is_12b_int(off)) { + emit(rv_sb(rd, off, RV_REG_T1), ctx); + break; + } + + emit_imm(RV_REG_T2, off, ctx); + emit(rv_add(RV_REG_T2, RV_REG_T2, rd), ctx); + emit(rv_sb(RV_REG_T2, 0, RV_REG_T1), ctx); + break; + + case BPF_ST | BPF_MEM | BPF_H: + emit_imm(RV_REG_T1, imm, ctx); + if (is_12b_int(off)) { + emit(rv_sh(rd, off, RV_REG_T1), ctx); + break; + } + + emit_imm(RV_REG_T2, off, ctx); + emit(rv_add(RV_REG_T2, RV_REG_T2, rd), ctx); + emit(rv_sh(RV_REG_T2, 0, RV_REG_T1), ctx); + break; + case BPF_ST | BPF_MEM | BPF_W: + emit_imm(RV_REG_T1, imm, ctx); + if (is_12b_int(off)) { + emit(rv_sw(rd, off, RV_REG_T1), ctx); + break; + } + + emit_imm(RV_REG_T2, off, ctx); + emit(rv_add(RV_REG_T2, RV_REG_T2, rd), ctx); + emit(rv_sw(RV_REG_T2, 0, RV_REG_T1), ctx); + break; + case BPF_ST | BPF_MEM | BPF_DW: + emit_imm(RV_REG_T1, imm, ctx); + if (is_12b_int(off)) { + emit(rv_sd(rd, off, RV_REG_T1), ctx); + break; + } + + emit_imm(RV_REG_T2, off, ctx); + emit(rv_add(RV_REG_T2, RV_REG_T2, rd), ctx); + emit(rv_sd(RV_REG_T2, 0, RV_REG_T1), ctx); + break; + + /* STX: *(size *)(dst + off) = src */ + case BPF_STX | BPF_MEM | BPF_B: + if (is_12b_int(off)) { + emit(rv_sb(rd, off, rs), ctx); + break; + } + + emit_imm(RV_REG_T1, off, ctx); + emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx); + emit(rv_sb(RV_REG_T1, 0, rs), ctx); + break; + case BPF_STX | BPF_MEM | BPF_H: + if (is_12b_int(off)) { + emit(rv_sh(rd, off, rs), ctx); + break; + } + + emit_imm(RV_REG_T1, off, ctx); + emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx); + emit(rv_sh(RV_REG_T1, 0, rs), ctx); + break; + case BPF_STX | BPF_MEM | BPF_W: + if (is_12b_int(off)) { + emit(rv_sw(rd, off, rs), ctx); + break; + } + + emit_imm(RV_REG_T1, off, ctx); + emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx); + emit(rv_sw(RV_REG_T1, 0, rs), ctx); + break; + case BPF_STX | BPF_MEM | BPF_DW: + if (is_12b_int(off)) { + emit(rv_sd(rd, off, rs), ctx); + break; + } + + emit_imm(RV_REG_T1, off, ctx); + emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx); + emit(rv_sd(RV_REG_T1, 0, rs), ctx); + break; + /* STX XADD: lock *(u32 *)(dst + off) += src */ + case BPF_STX | BPF_XADD | BPF_W: + /* STX XADD: lock *(u64 *)(dst + off) += src */ + case BPF_STX | BPF_XADD | BPF_DW: + if (off) { + if (is_12b_int(off)) { + emit(rv_addi(RV_REG_T1, rd, off), ctx); + } else { + emit_imm(RV_REG_T1, off, ctx); + emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx); + } + + rd = RV_REG_T1; + } + + emit(BPF_SIZE(code) == BPF_W ? + rv_amoadd_w(RV_REG_ZERO, rs, rd, 0, 0) : + rv_amoadd_d(RV_REG_ZERO, rs, rd, 0, 0), ctx); + break; + default: + pr_err("bpf-jit: unknown opcode %02x\n", code); + return -EINVAL; + } + + return 0; +} + +static void build_prologue(struct rv_jit_context *ctx) +{ + int stack_adjust = 0, store_offset, bpf_stack_adjust; + + if (seen_reg(RV_REG_RA, ctx)) + stack_adjust += 8; + stack_adjust += 8; /* RV_REG_FP */ + if (seen_reg(RV_REG_S1, ctx)) + stack_adjust += 8; + if (seen_reg(RV_REG_S2, ctx)) + stack_adjust += 8; + if (seen_reg(RV_REG_S3, ctx)) + stack_adjust += 8; + if (seen_reg(RV_REG_S4, ctx)) + stack_adjust += 8; + if (seen_reg(RV_REG_S5, ctx)) + stack_adjust += 8; + if (seen_reg(RV_REG_S6, ctx)) + stack_adjust += 8; + + stack_adjust = round_up(stack_adjust, 16); + bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16); + stack_adjust += bpf_stack_adjust; + + store_offset = stack_adjust - 8; + + /* First instruction is always setting the tail-call-counter + * (TCC) register. This instruction is skipped for tail calls. + */ + emit(rv_addi(RV_REG_TCC, RV_REG_ZERO, MAX_TAIL_CALL_CNT), ctx); + + emit(rv_addi(RV_REG_SP, RV_REG_SP, -stack_adjust), ctx); + + if (seen_reg(RV_REG_RA, ctx)) { + emit(rv_sd(RV_REG_SP, store_offset, RV_REG_RA), ctx); + store_offset -= 8; + } + emit(rv_sd(RV_REG_SP, store_offset, RV_REG_FP), ctx); + store_offset -= 8; + if (seen_reg(RV_REG_S1, ctx)) { + emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S1), ctx); + store_offset -= 8; + } + if (seen_reg(RV_REG_S2, ctx)) { + emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S2), ctx); + store_offset -= 8; + } + if (seen_reg(RV_REG_S3, ctx)) { + emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S3), ctx); + store_offset -= 8; + } + if (seen_reg(RV_REG_S4, ctx)) { + emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S4), ctx); + store_offset -= 8; + } + if (seen_reg(RV_REG_S5, ctx)) { + emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S5), ctx); + store_offset -= 8; + } + if (seen_reg(RV_REG_S6, ctx)) { + emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S6), ctx); + store_offset -= 8; + } + + emit(rv_addi(RV_REG_FP, RV_REG_SP, stack_adjust), ctx); + + if (bpf_stack_adjust) + emit(rv_addi(RV_REG_S5, RV_REG_SP, bpf_stack_adjust), ctx); + + /* Program contains calls and tail calls, so RV_REG_TCC need + * to be saved across calls. + */ + if (seen_tail_call(ctx) && seen_call(ctx)) + emit(rv_addi(RV_REG_TCC_SAVED, RV_REG_TCC, 0), ctx); + + ctx->stack_size = stack_adjust; +} + +static void build_epilogue(struct rv_jit_context *ctx) +{ + __build_epilogue(RV_REG_RA, ctx); +} + +static int build_body(struct rv_jit_context *ctx, bool extra_pass) +{ + const struct bpf_prog *prog = ctx->prog; + int i; + + for (i = 0; i < prog->len; i++) { + const struct bpf_insn *insn = &prog->insnsi[i]; + int ret; + + ret = emit_insn(insn, ctx, extra_pass); + if (ret > 0) { + i++; + if (ctx->insns == NULL) + ctx->offset[i] = ctx->ninsns; + continue; + } + if (ctx->insns == NULL) + ctx->offset[i] = ctx->ninsns; + if (ret) + return ret; + } + return 0; +} + +static void bpf_fill_ill_insns(void *area, unsigned int size) +{ + memset(area, 0, size); +} + +static void bpf_flush_icache(void *start, void *end) +{ + flush_icache_range((unsigned long)start, (unsigned long)end); +} + +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) +{ + bool tmp_blinded = false, extra_pass = false; + struct bpf_prog *tmp, *orig_prog = prog; + struct rv_jit_data *jit_data; + struct rv_jit_context *ctx; + unsigned int image_size; + + if (!prog->jit_requested) + return orig_prog; + + tmp = bpf_jit_blind_constants(prog); + if (IS_ERR(tmp)) + return orig_prog; + if (tmp != prog) { + tmp_blinded = true; + prog = tmp; + } + + jit_data = prog->aux->jit_data; + if (!jit_data) { + jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); + if (!jit_data) { + prog = orig_prog; + goto out; + } + prog->aux->jit_data = jit_data; + } + + ctx = &jit_data->ctx; + + if (ctx->offset) { + extra_pass = true; + image_size = sizeof(u32) * ctx->ninsns; + goto skip_init_ctx; + } + + ctx->prog = prog; + ctx->offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL); + if (!ctx->offset) { + prog = orig_prog; + goto out_offset; + } + + /* First pass generates the ctx->offset, but does not emit an image. */ + if (build_body(ctx, extra_pass)) { + prog = orig_prog; + goto out_offset; + } + build_prologue(ctx); + ctx->epilogue_offset = ctx->ninsns; + build_epilogue(ctx); + + /* Allocate image, now that we know the size. */ + image_size = sizeof(u32) * ctx->ninsns; + jit_data->header = bpf_jit_binary_alloc(image_size, &jit_data->image, + sizeof(u32), + bpf_fill_ill_insns); + if (!jit_data->header) { + prog = orig_prog; + goto out_offset; + } + + /* Second, real pass, that acutally emits the image. */ + ctx->insns = (u32 *)jit_data->image; +skip_init_ctx: + ctx->ninsns = 0; + + build_prologue(ctx); + if (build_body(ctx, extra_pass)) { + bpf_jit_binary_free(jit_data->header); + prog = orig_prog; + goto out_offset; + } + build_epilogue(ctx); + + if (bpf_jit_enable > 1) + bpf_jit_dump(prog->len, image_size, 2, ctx->insns); + + prog->bpf_func = (void *)ctx->insns; + prog->jited = 1; + prog->jited_len = image_size; + + bpf_flush_icache(jit_data->header, ctx->insns + ctx->ninsns); + + if (!prog->is_func || extra_pass) { +out_offset: + kfree(ctx->offset); + kfree(jit_data); + prog->aux->jit_data = NULL; + } +out: + if (tmp_blinded) + bpf_jit_prog_release_other(prog, prog == orig_prog ? + tmp : orig_prog); + return prog; +} diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index ed554b09eb3f..b6e3d0653002 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -75,6 +75,7 @@ config S390 select ARCH_HAS_SET_MEMORY select ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_STRICT_MODULE_RWX + select ARCH_HAS_SYSCALL_WRAPPER select ARCH_HAS_UBSAN_SANITIZE_ALL select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_INLINE_READ_LOCK @@ -379,6 +380,7 @@ config COMPAT select COMPAT_BINFMT_ELF if BINFMT_ELF select ARCH_WANT_OLD_COMPAT_IPC select COMPAT_OLD_SIGACTION + select HAVE_UID16 depends on MULTIUSER help Select this option if you want to enable your system kernel to diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile index d5ad724f5c96..11ca8795b74a 100644 --- a/arch/s390/boot/Makefile +++ b/arch/s390/boot/Makefile @@ -58,7 +58,7 @@ $(obj)/compressed/vmlinux: $(obj)/startup.a FORCE $(Q)$(MAKE) $(build)=$(obj)/compressed $@ quiet_cmd_ar = AR $@ - cmd_ar = rm -f $@; $(AR) rcsTP$(KBUILD_ARFLAGS) $@ $(filter $(OBJECTS), $^) + cmd_ar = rm -f $@; $(AR) rcsTP$(KBUILD_ARFLAGS) $@ $(real-prereqs) $(obj)/startup.a: $(OBJECTS) FORCE $(call if_changed,ar) @@ -67,6 +67,6 @@ install: $(CONFIGURE) $(obj)/bzImage sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/bzImage \ System.map "$(INSTALL_PATH)" -chkbss := $(OBJECTS) -chkbss-target := $(obj)/startup.a +chkbss := $(obj-y) +chkbss-target := startup.a include $(srctree)/arch/s390/scripts/Makefile.chkbss diff --git a/arch/s390/boot/als.c b/arch/s390/boot/als.c index d592e0d90d9f..f902215e9cd9 100644 --- a/arch/s390/boot/als.c +++ b/arch/s390/boot/als.c @@ -7,6 +7,7 @@ #include #include #include +#include "boot.h" /* * The code within this file will be called very early. It may _not_ @@ -58,7 +59,7 @@ static void u16_to_decimal(char *str, u16 val) *str = '\0'; } -static void print_missing_facilities(void) +void print_missing_facilities(void) { static char als_str[80] = "Missing facilities: "; unsigned long val; @@ -90,7 +91,6 @@ static void print_missing_facilities(void) } strcat(als_str, "\n"); sclp_early_printk(als_str); - sclp_early_printk("See Principles of Operations for facility bits\n"); } static void facility_mismatch(void) @@ -98,6 +98,7 @@ static void facility_mismatch(void) sclp_early_printk("The Linux kernel requires more recent processor hardware\n"); print_machine_type(); print_missing_facilities(); + sclp_early_printk("See Principles of Operations for facility bits\n"); disabled_wait(0x8badcccc); } @@ -105,20 +106,7 @@ void verify_facilities(void) { int i; - for (i = 0; i < ARRAY_SIZE(S390_lowcore.stfle_fac_list); i++) - S390_lowcore.stfle_fac_list[i] = 0; - asm volatile( - " stfl 0(0)\n" - : "=m" (S390_lowcore.stfl_fac_list)); - S390_lowcore.stfle_fac_list[0] = (u64)S390_lowcore.stfl_fac_list << 32; - if (S390_lowcore.stfl_fac_list & 0x01000000) { - register unsigned long reg0 asm("0") = ARRAY_SIZE(als) - 1; - - asm volatile(".insn s,0xb2b00000,0(%1)" /* stfle */ - : "+d" (reg0) - : "a" (&S390_lowcore.stfle_fac_list) - : "memory", "cc"); - } + __stfle(S390_lowcore.stfle_fac_list, ARRAY_SIZE(S390_lowcore.stfle_fac_list)); for (i = 0; i < ARRAY_SIZE(als); i++) { if ((S390_lowcore.stfle_fac_list[i] & als[i]) != als[i]) facility_mismatch(); diff --git a/arch/s390/boot/boot.h b/arch/s390/boot/boot.h index fc41e2277ea8..82bc06346e05 100644 --- a/arch/s390/boot/boot.h +++ b/arch/s390/boot/boot.h @@ -6,6 +6,8 @@ void startup_kernel(void); void detect_memory(void); void store_ipl_parmblock(void); void setup_boot_command_line(void); +void parse_boot_command_line(void); void setup_memory_end(void); +void print_missing_facilities(void); #endif /* BOOT_BOOT_H */ diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile index b1bdd15e3429..fa529c5b4486 100644 --- a/arch/s390/boot/compressed/Makefile +++ b/arch/s390/boot/compressed/Makefile @@ -63,6 +63,6 @@ OBJCOPYFLAGS_piggy.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section $(obj)/piggy.o: $(obj)/vmlinux.bin$(suffix-y) FORCE $(call if_changed,objcopy) -chkbss := $(filter-out $(obj)/piggy.o $(obj)/info.o,$(OBJECTS)) -chkbss-target := $(obj)/vmlinux.bin +chkbss := $(filter-out piggy.o info.o, $(obj-y)) +chkbss-target := vmlinux.bin include $(srctree)/arch/s390/scripts/Makefile.chkbss diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c index 9dab596be98e..36beb56de021 100644 --- a/arch/s390/boot/ipl_parm.c +++ b/arch/s390/boot/ipl_parm.c @@ -1,10 +1,12 @@ // SPDX-License-Identifier: GPL-2.0 +#include #include #include #include #include #include #include +#include #include "boot.h" char __bootdata(early_command_line)[COMMAND_LINE_SIZE]; @@ -143,8 +145,66 @@ void setup_boot_command_line(void) append_ipl_block_parm(); } +static void modify_facility(unsigned long nr, bool clear) +{ + if (clear) + __clear_facility(nr, S390_lowcore.stfle_fac_list); + else + __set_facility(nr, S390_lowcore.stfle_fac_list); +} + +static void check_cleared_facilities(void) +{ + unsigned long als[] = { FACILITIES_ALS }; + int i; + + for (i = 0; i < ARRAY_SIZE(als); i++) { + if ((S390_lowcore.stfle_fac_list[i] & als[i]) != als[i]) { + sclp_early_printk("Warning: The Linux kernel requires facilities cleared via command line option\n"); + print_missing_facilities(); + break; + } + } +} + +static void modify_fac_list(char *str) +{ + unsigned long val, endval; + char *endp; + bool clear; + + while (*str) { + clear = false; + if (*str == '!') { + clear = true; + str++; + } + val = simple_strtoull(str, &endp, 0); + if (str == endp) + break; + str = endp; + if (*str == '-') { + str++; + endval = simple_strtoull(str, &endp, 0); + if (str == endp) + break; + str = endp; + while (val <= endval) { + modify_facility(val, clear); + val++; + } + } else { + modify_facility(val, clear); + } + if (*str != ',') + break; + str++; + } + check_cleared_facilities(); +} + static char command_line_buf[COMMAND_LINE_SIZE] __section(.data); -static void parse_mem_opt(void) +void parse_boot_command_line(void) { char *param, *val; bool enabled; @@ -165,12 +225,14 @@ static void parse_mem_opt(void) if (!rc && !enabled) noexec_disabled = 1; } + + if (!strcmp(param, "facilities")) + modify_fac_list(val); } } void setup_memory_end(void) { - parse_mem_opt(); #ifdef CONFIG_CRASH_DUMP if (!OLDMEM_BASE && early_ipl_block_valid && early_ipl_block.hdr.pbt == DIAG308_IPL_TYPE_FCP && diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c index 4d441317cdeb..bdfc5549a299 100644 --- a/arch/s390/boot/startup.c +++ b/arch/s390/boot/startup.c @@ -53,6 +53,7 @@ void startup_kernel(void) sclp_early_read_info(); store_ipl_parmblock(); setup_boot_command_line(); + parse_boot_command_line(); setup_memory_end(); detect_memory(); if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) { diff --git a/arch/s390/boot/string.c b/arch/s390/boot/string.c index 25aca07898ba..b11e8108773a 100644 --- a/arch/s390/boot/string.c +++ b/arch/s390/boot/string.c @@ -2,6 +2,7 @@ #include #include #include +#undef CONFIG_KASAN #include "../lib/string.c" int strncmp(const char *cs, const char *ct, size_t count) diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index c69cb04b7a59..9824c7bad9d4 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -500,7 +500,6 @@ CONFIG_S390_AP_IOMMU=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y -CONFIG_EXT4_ENCRYPTION=y CONFIG_JBD2_DEBUG=y CONFIG_JFS_FS=m CONFIG_JFS_POSIX_ACL=y @@ -520,6 +519,7 @@ CONFIG_BTRFS_DEBUG=y CONFIG_NILFS2_FS=m CONFIG_FS_DAX=y CONFIG_EXPORTFS_BLOCK_OPS=y +CONFIG_FS_ENCRYPTION=y CONFIG_FANOTIFY=y CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y CONFIG_QUOTA_NETLINK_INTERFACE=y diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig index 32f539dc9c19..4fcbe5792744 100644 --- a/arch/s390/configs/performance_defconfig +++ b/arch/s390/configs/performance_defconfig @@ -497,7 +497,6 @@ CONFIG_S390_AP_IOMMU=y CONFIG_EXT4_FS=y CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y -CONFIG_EXT4_ENCRYPTION=y CONFIG_JBD2_DEBUG=y CONFIG_JFS_FS=m CONFIG_JFS_POSIX_ACL=y @@ -515,6 +514,7 @@ CONFIG_BTRFS_FS_POSIX_ACL=y CONFIG_NILFS2_FS=m CONFIG_FS_DAX=y CONFIG_EXPORTFS_BLOCK_OPS=y +CONFIG_FS_ENCRYPTION=y CONFIG_FANOTIFY=y CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y CONFIG_QUOTA_NETLINK_INTERFACE=y diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c index 5346b5a80bb6..0d15383d0ff1 100644 --- a/arch/s390/crypto/des_s390.c +++ b/arch/s390/crypto/des_s390.c @@ -38,7 +38,7 @@ static int des_setkey(struct crypto_tfm *tfm, const u8 *key, /* check for weak keys */ if (!des_ekey(tmp, key) && - (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } @@ -228,7 +228,7 @@ static int des3_setkey(struct crypto_tfm *tfm, const u8 *key, if (!(crypto_memneq(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) && crypto_memneq(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2], DES_KEY_SIZE)) && - (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } diff --git a/arch/s390/hypfs/hypfs.h b/arch/s390/hypfs/hypfs.h index 52348e0a812e..05f3f9aee5fc 100644 --- a/arch/s390/hypfs/hypfs.h +++ b/arch/s390/hypfs/hypfs.h @@ -43,7 +43,7 @@ int hypfs_diag0c_init(void); void hypfs_diag0c_exit(void); /* Set Partition-Resource Parameter */ -int hypfs_sprp_init(void); +void hypfs_sprp_init(void); void hypfs_sprp_exit(void); /* debugfs interface */ @@ -69,9 +69,9 @@ struct hypfs_dbfs_file { struct dentry *dentry; }; -extern int hypfs_dbfs_init(void); +extern void hypfs_dbfs_init(void); extern void hypfs_dbfs_exit(void); -extern int hypfs_dbfs_create_file(struct hypfs_dbfs_file *df); +extern void hypfs_dbfs_create_file(struct hypfs_dbfs_file *df); extern void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df); #endif /* _HYPFS_H_ */ diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c index b9bdf5c1918e..f4c7dbfaf8ee 100644 --- a/arch/s390/hypfs/hypfs_dbfs.c +++ b/arch/s390/hypfs/hypfs_dbfs.c @@ -78,14 +78,11 @@ static const struct file_operations dbfs_ops = { .unlocked_ioctl = dbfs_ioctl, }; -int hypfs_dbfs_create_file(struct hypfs_dbfs_file *df) +void hypfs_dbfs_create_file(struct hypfs_dbfs_file *df) { df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df, &dbfs_ops); - if (IS_ERR(df->dentry)) - return PTR_ERR(df->dentry); mutex_init(&df->lock); - return 0; } void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df) @@ -93,10 +90,9 @@ void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df) debugfs_remove(df->dentry); } -int hypfs_dbfs_init(void) +void hypfs_dbfs_init(void) { dbfs_dir = debugfs_create_dir("s390_hypfs", NULL); - return PTR_ERR_OR_ZERO(dbfs_dir); } void hypfs_dbfs_exit(void) diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c index 3452e18bb1ca..f0bc4dc3e9bf 100644 --- a/arch/s390/hypfs/hypfs_diag.c +++ b/arch/s390/hypfs/hypfs_diag.c @@ -440,11 +440,10 @@ __init int hypfs_diag_init(void) pr_err("The hardware system does not support hypfs\n"); return -ENODATA; } - if (diag204_info_type == DIAG204_INFO_EXT) { - rc = hypfs_dbfs_create_file(&dbfs_file_d204); - if (rc) - return rc; - } + + if (diag204_info_type == DIAG204_INFO_EXT) + hypfs_dbfs_create_file(&dbfs_file_d204); + if (MACHINE_IS_LPAR) { rc = diag224_get_name_table(); if (rc) { diff --git a/arch/s390/hypfs/hypfs_diag0c.c b/arch/s390/hypfs/hypfs_diag0c.c index cebf05150cc1..72e3140fafb5 100644 --- a/arch/s390/hypfs/hypfs_diag0c.c +++ b/arch/s390/hypfs/hypfs_diag0c.c @@ -54,8 +54,7 @@ static void *diag0c_store(unsigned int *count) if (!cpu_vec) goto fail_put_online_cpus; /* Note: Diag 0c needs 8 byte alignment and real storage */ - diag0c_data = kzalloc(sizeof(struct hypfs_diag0c_hdr) + - cpu_count * sizeof(struct hypfs_diag0c_entry), + diag0c_data = kzalloc(struct_size(diag0c_data, entry, cpu_count), GFP_KERNEL | GFP_DMA); if (!diag0c_data) goto fail_kfree_cpu_vec; @@ -125,7 +124,8 @@ int __init hypfs_diag0c_init(void) { if (!MACHINE_IS_VM) return 0; - return hypfs_dbfs_create_file(&dbfs_file_0c); + hypfs_dbfs_create_file(&dbfs_file_0c); + return 0; } /* diff --git a/arch/s390/hypfs/hypfs_sprp.c b/arch/s390/hypfs/hypfs_sprp.c index 601b70786dc8..7d9fb496d155 100644 --- a/arch/s390/hypfs/hypfs_sprp.c +++ b/arch/s390/hypfs/hypfs_sprp.c @@ -137,11 +137,11 @@ static struct hypfs_dbfs_file hypfs_sprp_file = { .unlocked_ioctl = hypfs_sprp_ioctl, }; -int hypfs_sprp_init(void) +void hypfs_sprp_init(void) { if (!sclp.has_sprp) - return 0; - return hypfs_dbfs_create_file(&hypfs_sprp_file); + return; + hypfs_dbfs_create_file(&hypfs_sprp_file); } void hypfs_sprp_exit(void) diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c index c4b7b681e055..42f2375c203e 100644 --- a/arch/s390/hypfs/hypfs_vm.c +++ b/arch/s390/hypfs/hypfs_vm.c @@ -279,7 +279,8 @@ int hypfs_vm_init(void) guest_query = local_guest; else return -EACCES; - return hypfs_dbfs_create_file(&dbfs_file_2fc); + hypfs_dbfs_create_file(&dbfs_file_2fc); + return 0; } void hypfs_vm_exit(void) diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index c681329fdeec..ccad1398abd4 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c @@ -456,9 +456,8 @@ static int __init hypfs_init(void) { int rc; - rc = hypfs_dbfs_init(); - if (rc) - return rc; + hypfs_dbfs_init(); + if (hypfs_diag_init()) { rc = -ENODATA; goto fail_dbfs_exit; @@ -467,10 +466,7 @@ static int __init hypfs_init(void) rc = -ENODATA; goto fail_hypfs_diag_exit; } - if (hypfs_sprp_init()) { - rc = -ENODATA; - goto fail_hypfs_vm_exit; - } + hypfs_sprp_init(); if (hypfs_diag0c_init()) { rc = -ENODATA; goto fail_hypfs_sprp_exit; @@ -489,7 +485,6 @@ fail_hypfs_diag0c_exit: hypfs_diag0c_exit(); fail_hypfs_sprp_exit: hypfs_sprp_exit(); -fail_hypfs_vm_exit: hypfs_vm_exit(); fail_hypfs_diag_exit: hypfs_diag_exit(); diff --git a/arch/s390/include/asm/cpu_mcf.h b/arch/s390/include/asm/cpu_mcf.h new file mode 100644 index 000000000000..649b9fc60685 --- /dev/null +++ b/arch/s390/include/asm/cpu_mcf.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Counter facility support definitions for the Linux perf + * + * Copyright IBM Corp. 2019 + * Author(s): Hendrik Brueckner + */ +#ifndef _ASM_S390_CPU_MCF_H +#define _ASM_S390_CPU_MCF_H + +#include +#include + +enum cpumf_ctr_set { + CPUMF_CTR_SET_BASIC = 0, /* Basic Counter Set */ + CPUMF_CTR_SET_USER = 1, /* Problem-State Counter Set */ + CPUMF_CTR_SET_CRYPTO = 2, /* Crypto-Activity Counter Set */ + CPUMF_CTR_SET_EXT = 3, /* Extended Counter Set */ + CPUMF_CTR_SET_MT_DIAG = 4, /* MT-diagnostic Counter Set */ + + /* Maximum number of counter sets */ + CPUMF_CTR_SET_MAX, +}; + +#define CPUMF_LCCTL_ENABLE_SHIFT 16 +#define CPUMF_LCCTL_ACTCTL_SHIFT 0 +static const u64 cpumf_ctr_ctl[CPUMF_CTR_SET_MAX] = { + [CPUMF_CTR_SET_BASIC] = 0x02, + [CPUMF_CTR_SET_USER] = 0x04, + [CPUMF_CTR_SET_CRYPTO] = 0x08, + [CPUMF_CTR_SET_EXT] = 0x01, + [CPUMF_CTR_SET_MT_DIAG] = 0x20, +}; + +static inline void ctr_set_enable(u64 *state, int ctr_set) +{ + *state |= cpumf_ctr_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT; +} +static inline void ctr_set_disable(u64 *state, int ctr_set) +{ + *state &= ~(cpumf_ctr_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT); +} +static inline void ctr_set_start(u64 *state, int ctr_set) +{ + *state |= cpumf_ctr_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT; +} +static inline void ctr_set_stop(u64 *state, int ctr_set) +{ + *state &= ~(cpumf_ctr_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT); +} + +static inline void ctr_set_multiple_enable(u64 *state, u64 ctrsets) +{ + *state |= ctrsets << CPUMF_LCCTL_ENABLE_SHIFT; +} + +static inline void ctr_set_multiple_disable(u64 *state, u64 ctrsets) +{ + *state &= ~(ctrsets << CPUMF_LCCTL_ENABLE_SHIFT); +} + +static inline void ctr_set_multiple_start(u64 *state, u64 ctrsets) +{ + *state |= ctrsets << CPUMF_LCCTL_ACTCTL_SHIFT; +} + +static inline void ctr_set_multiple_stop(u64 *state, u64 ctrsets) +{ + *state &= ~(ctrsets << CPUMF_LCCTL_ACTCTL_SHIFT); +} + +static inline int ctr_stcctm(enum cpumf_ctr_set set, u64 range, u64 *dest) +{ + switch (set) { + case CPUMF_CTR_SET_BASIC: + return stcctm(BASIC, range, dest); + case CPUMF_CTR_SET_USER: + return stcctm(PROBLEM_STATE, range, dest); + case CPUMF_CTR_SET_CRYPTO: + return stcctm(CRYPTO_ACTIVITY, range, dest); + case CPUMF_CTR_SET_EXT: + return stcctm(EXTENDED, range, dest); + case CPUMF_CTR_SET_MT_DIAG: + return stcctm(MT_DIAG_CLEARING, range, dest); + case CPUMF_CTR_SET_MAX: + return 3; + } + return 3; +} + +struct cpu_cf_events { + struct cpumf_ctr_info info; + atomic_t ctr_set[CPUMF_CTR_SET_MAX]; + atomic64_t alert; + u64 state, tx_state; + unsigned int flags; + unsigned int txn_flags; +}; +DECLARE_PER_CPU(struct cpu_cf_events, cpu_cf_events); + +bool kernel_cpumcf_avail(void); +int __kernel_cpumcf_begin(void); +unsigned long kernel_cpumcf_alert(int clear); +void __kernel_cpumcf_end(void); + +static inline int kernel_cpumcf_begin(void) +{ + if (!cpum_cf_avail()) + return -ENODEV; + + preempt_disable(); + return __kernel_cpumcf_begin(); +} +static inline void kernel_cpumcf_end(void) +{ + __kernel_cpumcf_end(); + preempt_enable(); +} + +/* Return true if store counter set multiple instruction is available */ +static inline int stccm_avail(void) +{ + return test_facility(142); +} + +#endif /* _ASM_S390_CPU_MCF_H */ diff --git a/arch/s390/include/asm/cpu_mf-insn.h b/arch/s390/include/asm/cpu_mf-insn.h new file mode 100644 index 000000000000..a68b362e0964 --- /dev/null +++ b/arch/s390/include/asm/cpu_mf-insn.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Support for CPU-MF instructions + * + * Copyright IBM Corp. 2019 + * Author(s): Hendrik Brueckner + */ +#ifndef _ASM_S390_CPU_MF_INSN_H +#define _ASM_S390_CPU_MF_INSN_H + +#ifdef __ASSEMBLY__ + +/* Macro to generate the STCCTM instruction with a customized + * M3 field designating the counter set. + */ +.macro STCCTM r1 m3 db2 + .insn rsy,0xeb0000000017,\r1,\m3 & 0xf,\db2 +.endm + +#endif /* __ASSEMBLY__ */ + +#endif diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h index bf2cbff926ef..ae3e3221d4b5 100644 --- a/arch/s390/include/asm/cpu_mf.h +++ b/arch/s390/include/asm/cpu_mf.h @@ -12,6 +12,8 @@ #include #include +asm(".include \"asm/cpu_mf-insn.h\"\n"); + #define CPU_MF_INT_SF_IAE (1 << 31) /* invalid entry address */ #define CPU_MF_INT_SF_ISE (1 << 30) /* incorrect SDBT entry */ #define CPU_MF_INT_SF_PRA (1 << 29) /* program request alert */ @@ -209,17 +211,25 @@ static inline int ecctr(u64 ctr, u64 *val) return cc; } -/* Store CPU counter multiple for the MT utilization counter set */ -static inline int stcctm5(u64 num, u64 *val) +/* Store CPU counter multiple for a particular counter set */ +enum stcctm_ctr_set { + EXTENDED = 0, + BASIC = 1, + PROBLEM_STATE = 2, + CRYPTO_ACTIVITY = 3, + MT_DIAG = 5, + MT_DIAG_CLEARING = 9, /* clears loss-of-MT-ctr-data alert */ +}; +static inline int stcctm(enum stcctm_ctr_set set, u64 range, u64 *dest) { int cc; asm volatile ( - " .insn rsy,0xeb0000000017,%2,5,%1\n" + " STCCTM %2,%3,%1\n" " ipm %0\n" " srl %0,28\n" : "=d" (cc) - : "Q" (*val), "d" (num) + : "Q" (*dest), "d" (range), "i" (set) : "cc", "memory"); return cc; } diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h index cdbaad50c7c7..19562be22b7e 100644 --- a/arch/s390/include/asm/diag.h +++ b/arch/s390/include/asm/diag.h @@ -32,6 +32,7 @@ enum diag_stat_enum { DIAG_STAT_X2FC, DIAG_STAT_X304, DIAG_STAT_X308, + DIAG_STAT_X318, DIAG_STAT_X500, NR_DIAG_STAT }; @@ -293,6 +294,17 @@ struct diag26c_mac_resp { u8 res[2]; } __aligned(8); +#define CPNC_LINUX 0x4 +union diag318_info { + unsigned long val; + struct { + unsigned int cpnc : 8; + unsigned int cpvc_linux : 24; + unsigned char cpvc_distro[3]; + unsigned char zero; + }; +}; + int diag204(unsigned long subcode, unsigned long size, void *addr); int diag224(void *ptr); int diag26c(void *req, void *resp, enum diag26c_sc subcode); diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h index 8ea270fdc7fb..5a3c95b11952 100644 --- a/arch/s390/include/asm/ftrace.h +++ b/arch/s390/include/asm/ftrace.h @@ -81,5 +81,30 @@ static inline void ftrace_generate_call_insn(struct ftrace_insn *insn, #endif } +/* + * Even though the system call numbers are identical for s390/s390x a + * different system call table is used for compat tasks. This may lead + * to e.g. incorrect or missing trace event sysfs files. + * Therefore simply do not trace compat system calls at all. + * See kernel/trace/trace_syscalls.c. + */ +#define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS +static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs) +{ + return is_compat_task(); +} + +#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME +static inline bool arch_syscall_match_sym_name(const char *sym, + const char *name) +{ + /* + * Skip __s390_ and __s390x_ prefix - due to compat wrappers + * and aliasing some symbols of 64 bit system call functions + * may get the __s390_ prefix instead of the __s390x_ prefix. + */ + return !strcmp(sym + 7, name) || !strcmp(sym + 8, name); +} + #endif /* __ASSEMBLY__ */ #endif /* _ASM_S390_FTRACE_H */ diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h index e2d3e6c43395..e548ec1ec12c 100644 --- a/arch/s390/include/asm/jump_label.h +++ b/arch/s390/include/asm/jump_label.h @@ -10,6 +10,12 @@ #define JUMP_LABEL_NOP_SIZE 6 #define JUMP_LABEL_NOP_OFFSET 2 +#if __GNUC__ < 9 +#define JUMP_LABEL_STATIC_KEY_CONSTRAINT "X" +#else +#define JUMP_LABEL_STATIC_KEY_CONSTRAINT "jdd" +#endif + /* * We use a brcl 0,2 instruction for jump labels at compile time so it * can be easily distinguished from a hotpatch generated instruction. @@ -20,9 +26,9 @@ static inline bool arch_static_branch(struct static_key *key, bool branch) ".pushsection __jump_table,\"aw\"\n" ".balign 8\n" ".long 0b-.,%l[label]-.\n" - ".quad %0-.\n" + ".quad %0+%1-.\n" ".popsection\n" - : : "X" (&((char *)key)[branch]) : : label); + : : JUMP_LABEL_STATIC_KEY_CONSTRAINT (key), "i" (branch) : : label); return false; label: return true; @@ -34,9 +40,9 @@ static inline bool arch_static_branch_jump(struct static_key *key, bool branch) ".pushsection __jump_table,\"aw\"\n" ".balign 8\n" ".long 0b-.,%l[label]-.\n" - ".quad %0-.\n" + ".quad %0+%1-.\n" ".popsection\n" - : : "X" (&((char *)key)[branch]) : : label); + : : JUMP_LABEL_STATIC_KEY_CONSTRAINT (key), "i" (branch) : : label); return false; label: return true; diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index 10fe982f2b4b..4e0efebc56a9 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -148,7 +148,6 @@ struct zpci_dev { enum pci_bus_speed max_bus_speed; struct dentry *debugfs_dev; - struct dentry *debugfs_perf; struct s390_domain *s390_domain; /* s390 IOMMU domain data */ }; diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h index b9c0e361748b..560d8f766ddf 100644 --- a/arch/s390/include/asm/perf_event.h +++ b/arch/s390/include/asm/perf_event.h @@ -12,7 +12,6 @@ #include #include -#include /* Per-CPU flags for PMU states */ #define PMU_F_RESERVED 0x1000 @@ -55,6 +54,7 @@ struct perf_sf_sde_regs { #define PERF_CPUM_SF_MAX_CTR 2 #define PERF_EVENT_CPUM_SF 0xB0000UL /* Event: Basic-sampling */ #define PERF_EVENT_CPUM_SF_DIAG 0xBD000UL /* Event: Combined-sampling */ +#define PERF_EVENT_CPUM_CF_DIAG 0xBC000UL /* Event: Counter sets */ #define PERF_CPUM_SF_BASIC_MODE 0x0001 /* Basic-sampling flag */ #define PERF_CPUM_SF_DIAG_MODE 0x0002 /* Diagnostic-sampling flag */ #define PERF_CPUM_SF_MODE_MASK (PERF_CPUM_SF_BASIC_MODE| \ diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 063732414dfb..76dc344edb8c 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -1069,8 +1069,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, } #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION -pte_t ptep_modify_prot_start(struct mm_struct *, unsigned long, pte_t *); -void ptep_modify_prot_commit(struct mm_struct *, unsigned long, pte_t *, pte_t); +pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *); +void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long, + pte_t *, pte_t, pte_t); #define __HAVE_ARCH_PTEP_CLEAR_FLUSH static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, diff --git a/arch/s390/include/asm/pnet.h b/arch/s390/include/asm/pnet.h index 6e278584f8f1..5739276b458d 100644 --- a/arch/s390/include/asm/pnet.h +++ b/arch/s390/include/asm/pnet.h @@ -11,13 +11,5 @@ #include #include -#define PNETIDS_LEN 64 /* Total utility string length in bytes - * to cover up to 4 PNETIDs of 16 bytes - * for up to 4 device ports - */ -#define MAX_PNETID_LEN 16 /* Max.length of a single port PNETID */ -#define MAX_PNETID_PORTS (PNETIDS_LEN / MAX_PNETID_LEN) - /* Max. # of ports with a PNETID */ - int pnet_id_by_dev_port(struct device *dev, unsigned short port, u8 *pnetid); #endif /* _ASM_S390_PNET_H */ diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h index d46edde7e458..db5ef22c46e4 100644 --- a/arch/s390/include/asm/qdio.h +++ b/arch/s390/include/asm/qdio.h @@ -361,8 +361,8 @@ struct qdio_initialize { unsigned long); int scan_threshold; unsigned long int_parm; - void **input_sbal_addr_array; - void **output_sbal_addr_array; + struct qdio_buffer **input_sbal_addr_array; + struct qdio_buffer **output_sbal_addr_array; struct qdio_outbuf_state *output_sbal_state_array; }; diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index 0cd4bda85eb1..ef4c9dec06a4 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h @@ -78,6 +78,7 @@ struct sclp_info { unsigned char has_skey : 1; unsigned char has_kss : 1; unsigned char has_gisaf : 1; + unsigned char has_diag318 : 1; unsigned int ibc; unsigned int mtid; unsigned int mtid_cp; diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h index 116cc15a4b8a..70d87db54e62 100644 --- a/arch/s390/include/asm/string.h +++ b/arch/s390/include/asm/string.h @@ -12,15 +12,21 @@ #include #endif -#define __HAVE_ARCH_MEMCHR /* inline & arch function */ -#define __HAVE_ARCH_MEMCMP /* arch function */ #define __HAVE_ARCH_MEMCPY /* gcc builtin & arch function */ #define __HAVE_ARCH_MEMMOVE /* gcc builtin & arch function */ -#define __HAVE_ARCH_MEMSCAN /* inline & arch function */ #define __HAVE_ARCH_MEMSET /* gcc builtin & arch function */ #define __HAVE_ARCH_MEMSET16 /* arch function */ #define __HAVE_ARCH_MEMSET32 /* arch function */ #define __HAVE_ARCH_MEMSET64 /* arch function */ + +void *memcpy(void *dest, const void *src, size_t n); +void *memset(void *s, int c, size_t n); +void *memmove(void *dest, const void *src, size_t n); + +#ifndef CONFIG_KASAN +#define __HAVE_ARCH_MEMCHR /* inline & arch function */ +#define __HAVE_ARCH_MEMCMP /* arch function */ +#define __HAVE_ARCH_MEMSCAN /* inline & arch function */ #define __HAVE_ARCH_STRCAT /* inline & arch function */ #define __HAVE_ARCH_STRCMP /* arch function */ #define __HAVE_ARCH_STRCPY /* inline & arch function */ @@ -35,9 +41,6 @@ /* Prototypes for non-inlined arch strings functions. */ int memcmp(const void *s1, const void *s2, size_t n); -void *memcpy(void *dest, const void *src, size_t n); -void *memset(void *s, int c, size_t n); -void *memmove(void *dest, const void *src, size_t n); int strcmp(const char *s1, const char *s2); size_t strlcat(char *dest, const char *src, size_t n); size_t strlcpy(char *dest, const char *src, size_t size); @@ -45,6 +48,7 @@ char *strncat(char *dest, const char *src, size_t n); char *strncpy(char *dest, const char *src, size_t n); char *strrchr(const char *s, int c); char *strstr(const char *s1, const char *s2); +#endif /* !CONFIG_KASAN */ #undef __HAVE_ARCH_STRCHR #undef __HAVE_ARCH_STRNCHR @@ -95,6 +99,7 @@ static inline void *memset64(uint64_t *s, uint64_t v, size_t count) #if !defined(IN_ARCH_STRING_C) && (!defined(CONFIG_FORTIFY_SOURCE) || defined(__NO_FORTIFY)) +#ifdef __HAVE_ARCH_MEMCHR static inline void *memchr(const void * s, int c, size_t n) { register int r0 asm("0") = (char) c; @@ -109,7 +114,9 @@ static inline void *memchr(const void * s, int c, size_t n) : "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory"); return (void *) ret; } +#endif +#ifdef __HAVE_ARCH_MEMSCAN static inline void *memscan(void *s, int c, size_t n) { register int r0 asm("0") = (char) c; @@ -121,7 +128,9 @@ static inline void *memscan(void *s, int c, size_t n) : "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory"); return (void *) ret; } +#endif +#ifdef __HAVE_ARCH_STRCAT static inline char *strcat(char *dst, const char *src) { register int r0 asm("0") = 0; @@ -137,7 +146,9 @@ static inline char *strcat(char *dst, const char *src) : "d" (r0), "0" (0) : "cc", "memory" ); return ret; } +#endif +#ifdef __HAVE_ARCH_STRCPY static inline char *strcpy(char *dst, const char *src) { register int r0 asm("0") = 0; @@ -150,7 +161,9 @@ static inline char *strcpy(char *dst, const char *src) : "cc", "memory"); return ret; } +#endif +#ifdef __HAVE_ARCH_STRLEN static inline size_t strlen(const char *s) { register unsigned long r0 asm("0") = 0; @@ -162,7 +175,9 @@ static inline size_t strlen(const char *s) : "+d" (r0), "+a" (tmp) : : "cc", "memory"); return r0 - (unsigned long) s; } +#endif +#ifdef __HAVE_ARCH_STRNLEN static inline size_t strnlen(const char * s, size_t n) { register int r0 asm("0") = 0; @@ -175,6 +190,7 @@ static inline size_t strnlen(const char * s, size_t n) : "+a" (end), "+a" (tmp) : "d" (r0) : "cc", "memory"); return end - s; } +#endif #else /* IN_ARCH_STRING_C */ void *memchr(const void * s, int c, size_t n); void *memscan(void *s, int c, size_t n); diff --git a/arch/s390/include/asm/syscall_wrapper.h b/arch/s390/include/asm/syscall_wrapper.h new file mode 100644 index 000000000000..5596c5c625d2 --- /dev/null +++ b/arch/s390/include/asm/syscall_wrapper.h @@ -0,0 +1,135 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * syscall_wrapper.h - s390 specific wrappers to syscall definitions + * + */ + +#ifndef _ASM_S390_SYSCALL_WRAPPER_H +#define _ASM_S390_SYSCALL_WRAPPER_H + +#ifdef CONFIG_COMPAT +#define __SC_COMPAT_TYPE(t, a) \ + __typeof(__builtin_choose_expr(sizeof(t) > 4, 0L, (t)0)) a + +#define __SC_COMPAT_CAST(t, a) \ +({ \ + long __ReS = a; \ + \ + BUILD_BUG_ON((sizeof(t) > 4) && !__TYPE_IS_L(t) && \ + !__TYPE_IS_UL(t) && !__TYPE_IS_PTR(t) && \ + !__TYPE_IS_LL(t)); \ + if (__TYPE_IS_L(t)) \ + __ReS = (s32)a; \ + if (__TYPE_IS_UL(t)) \ + __ReS = (u32)a; \ + if (__TYPE_IS_PTR(t)) \ + __ReS = a & 0x7fffffff; \ + if (__TYPE_IS_LL(t)) \ + return -ENOSYS; \ + (t)__ReS; \ +}) + +#define __S390_SYS_STUBx(x, name, ...) \ + asmlinkage long __s390_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__))\ + ALLOW_ERROR_INJECTION(__s390_sys##name, ERRNO); \ + asmlinkage long __s390_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__))\ + { \ + long ret = __s390x_sys##name(__MAP(x,__SC_COMPAT_CAST,__VA_ARGS__));\ + __MAP(x,__SC_TEST,__VA_ARGS__); \ + return ret; \ + } + +/* + * To keep the naming coherent, re-define SYSCALL_DEFINE0 to create an alias + * named __s390x_sys_*() + */ +#define COMPAT_SYSCALL_DEFINE0(sname) \ + SYSCALL_METADATA(_##sname, 0); \ + asmlinkage long __s390_compat_sys_##sname(void); \ + ALLOW_ERROR_INJECTION(__s390_compat__sys_##sname, ERRNO); \ + asmlinkage long __s390_compat_sys_##sname(void) + +#define SYSCALL_DEFINE0(sname) \ + SYSCALL_METADATA(_##sname, 0); \ + asmlinkage long __s390x_sys_##sname(void); \ + ALLOW_ERROR_INJECTION(__s390x_sys_##sname, ERRNO); \ + asmlinkage long __s390_sys_##sname(void) \ + __attribute__((alias(__stringify(__s390x_sys_##sname)))); \ + asmlinkage long __s390x_sys_##sname(void) + +#define COND_SYSCALL(name) \ + cond_syscall(__s390x_sys_##name); \ + cond_syscall(__s390_sys_##name) + +#define SYS_NI(name) \ + SYSCALL_ALIAS(__s390x_sys_##name, sys_ni_posix_timers); \ + SYSCALL_ALIAS(__s390_sys_##name, sys_ni_posix_timers) + +#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \ + __diag_push(); \ + __diag_ignore(GCC, 8, "-Wattribute-alias", \ + "Type aliasing is used to sanitize syscall arguments");\ + asmlinkage long __s390_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ + asmlinkage long __s390_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \ + __attribute__((alias(__stringify(__se_compat_sys##name)))); \ + ALLOW_ERROR_INJECTION(compat_sys##name, ERRNO); \ + static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\ + asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ + asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ + { \ + long ret = __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__));\ + __MAP(x,__SC_TEST,__VA_ARGS__); \ + return ret; \ + } \ + __diag_pop(); \ + static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) + +/* + * As some compat syscalls may not be implemented, we need to expand + * COND_SYSCALL_COMPAT in kernel/sys_ni.c and COMPAT_SYS_NI in + * kernel/time/posix-stubs.c to cover this case as well. + */ +#define COND_SYSCALL_COMPAT(name) \ + cond_syscall(__s390_compat_sys_##name) + +#define COMPAT_SYS_NI(name) \ + SYSCALL_ALIAS(__s390_compat_sys_##name, sys_ni_posix_timers) + +#else /* CONFIG_COMPAT */ + +#define __S390_SYS_STUBx(x, fullname, name, ...) + +#define SYSCALL_DEFINE0(sname) \ + SYSCALL_METADATA(_##sname, 0); \ + asmlinkage long __s390x_sys_##sname(void); \ + ALLOW_ERROR_INJECTION(__s390x_sys_##sname, ERRNO); \ + asmlinkage long __s390x_sys_##sname(void) + +#define COND_SYSCALL(name) \ + cond_syscall(__s390x_sys_##name) + +#define SYS_NI(name) \ + SYSCALL_ALIAS(__s390x_sys_##name, sys_ni_posix_timers); + +#endif /* CONFIG_COMPAT */ + +#define __SYSCALL_DEFINEx(x, name, ...) \ + __diag_push(); \ + __diag_ignore(GCC, 8, "-Wattribute-alias", \ + "Type aliasing is used to sanitize syscall arguments");\ + asmlinkage long __s390x_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \ + __attribute__((alias(__stringify(__se_sys##name)))); \ + ALLOW_ERROR_INJECTION(__s390x_sys##name, ERRNO); \ + static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ + static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ + __S390_SYS_STUBx(x, name, __VA_ARGS__) \ + asmlinkage long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ + { \ + long ret = __do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \ + __MAP(x,__SC_TEST,__VA_ARGS__); \ + return ret; \ + } \ + __diag_pop(); \ + static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) + +#endif /* _ASM_X86_SYSCALL_WRAPPER_H */ diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index bd2545977ad3..007fcb9aeeb8 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -31,7 +31,6 @@ #define USER_DS (2) #define USER_DS_SACF (3) -#define get_ds() (KERNEL_DS) #define get_fs() (current->thread.mm_segment) #define segment_eq(a,b) (((a) & 2) == ((b) & 2)) diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h index a1fbf15d53aa..b6755685c7b8 100644 --- a/arch/s390/include/asm/unistd.h +++ b/arch/s390/include/asm/unistd.h @@ -10,11 +10,6 @@ #include #include -#define __IGNORE_time -#define __IGNORE_pkey_mprotect -#define __IGNORE_pkey_alloc -#define __IGNORE_pkey_free - #define __ARCH_WANT_NEW_STAT #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_SYS_ALARM @@ -33,7 +28,7 @@ #define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_SIGPROCMASK # ifdef CONFIG_COMPAT -# define __ARCH_WANT_COMPAT_SYS_TIME +# define __ARCH_WANT_SYS_TIME32 # define __ARCH_WANT_SYS_UTIME32 # endif #define __ARCH_WANT_SYS_FORK diff --git a/arch/s390/include/asm/vx-insn.h b/arch/s390/include/asm/vx-insn.h index 266a72320e05..0c05a673811c 100644 --- a/arch/s390/include/asm/vx-insn.h +++ b/arch/s390/include/asm/vx-insn.h @@ -363,23 +363,23 @@ .endm /* VECTOR LOAD MULTIPLE */ -.macro VLM vfrom, vto, disp, base +.macro VLM vfrom, vto, disp, base, hint=3 VX_NUM v1, \vfrom VX_NUM v3, \vto GR_NUM b2, \base /* Base register */ .word 0xE700 | ((v1&15) << 4) | (v3&15) .word (b2 << 12) | (\disp) - MRXBOPC 0, 0x36, v1, v3 + MRXBOPC \hint, 0x36, v1, v3 .endm /* VECTOR STORE MULTIPLE */ -.macro VSTM vfrom, vto, disp, base +.macro VSTM vfrom, vto, disp, base, hint=3 VX_NUM v1, \vfrom VX_NUM v3, \vto GR_NUM b2, \base /* Base register */ .word 0xE700 | ((v1&15) << 4) | (v3&15) .word (b2 << 12) | (\disp) - MRXBOPC 0, 0x3E, v1, v3 + MRXBOPC \hint, 0x3E, v1, v3 .endm /* VECTOR PERMUTE */ diff --git a/arch/s390/include/uapi/asm/Kbuild b/arch/s390/include/uapi/asm/Kbuild index da3e0d48abbc..6b0f30b14642 100644 --- a/arch/s390/include/uapi/asm/Kbuild +++ b/arch/s390/include/uapi/asm/Kbuild @@ -3,3 +3,4 @@ include include/uapi/asm-generic/Kbuild.asm generated-y += unistd_32.h generated-y += unistd_64.h +generic-y += socket.h diff --git a/arch/s390/include/uapi/asm/posix_types.h b/arch/s390/include/uapi/asm/posix_types.h index 2a3fc638414b..1913613e71b6 100644 --- a/arch/s390/include/uapi/asm/posix_types.h +++ b/arch/s390/include/uapi/asm/posix_types.h @@ -20,6 +20,12 @@ typedef long __kernel_ssize_t; typedef unsigned short __kernel_old_dev_t; #define __kernel_old_dev_t __kernel_old_dev_t +#ifdef __KERNEL__ +typedef unsigned short __kernel_old_uid_t; +typedef unsigned short __kernel_old_gid_t; +#define __kernel_old_uid_t __kernel_old_uid_t +#endif + #ifndef __s390x__ typedef unsigned long __kernel_ino_t; diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h deleted file mode 100644 index 39d901476ee5..000000000000 --- a/arch/s390/include/uapi/asm/socket.h +++ /dev/null @@ -1,117 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* - * S390 version - * - * Derived from "include/asm-i386/socket.h" - */ - -#ifndef _ASM_SOCKET_H -#define _ASM_SOCKET_H - -#include - -/* For setsockopt(2) */ -#define SOL_SOCKET 1 - -#define SO_DEBUG 1 -#define SO_REUSEADDR 2 -#define SO_TYPE 3 -#define SO_ERROR 4 -#define SO_DONTROUTE 5 -#define SO_BROADCAST 6 -#define SO_SNDBUF 7 -#define SO_RCVBUF 8 -#define SO_SNDBUFFORCE 32 -#define SO_RCVBUFFORCE 33 -#define SO_KEEPALIVE 9 -#define SO_OOBINLINE 10 -#define SO_NO_CHECK 11 -#define SO_PRIORITY 12 -#define SO_LINGER 13 -#define SO_BSDCOMPAT 14 -#define SO_REUSEPORT 15 -#define SO_PASSCRED 16 -#define SO_PEERCRED 17 -#define SO_RCVLOWAT 18 -#define SO_SNDLOWAT 19 -#define SO_RCVTIMEO 20 -#define SO_SNDTIMEO 21 - -/* Security levels - as per NRL IPv6 - don't actually do anything */ -#define SO_SECURITY_AUTHENTICATION 22 -#define SO_SECURITY_ENCRYPTION_TRANSPORT 23 -#define SO_SECURITY_ENCRYPTION_NETWORK 24 - -#define SO_BINDTODEVICE 25 - -/* Socket filtering */ -#define SO_ATTACH_FILTER 26 -#define SO_DETACH_FILTER 27 -#define SO_GET_FILTER SO_ATTACH_FILTER - -#define SO_PEERNAME 28 -#define SO_TIMESTAMP 29 -#define SCM_TIMESTAMP SO_TIMESTAMP - -#define SO_ACCEPTCONN 30 - -#define SO_PEERSEC 31 -#define SO_PASSSEC 34 -#define SO_TIMESTAMPNS 35 -#define SCM_TIMESTAMPNS SO_TIMESTAMPNS - -#define SO_MARK 36 - -#define SO_TIMESTAMPING 37 -#define SCM_TIMESTAMPING SO_TIMESTAMPING - -#define SO_PROTOCOL 38 -#define SO_DOMAIN 39 - -#define SO_RXQ_OVFL 40 - -#define SO_WIFI_STATUS 41 -#define SCM_WIFI_STATUS SO_WIFI_STATUS -#define SO_PEEK_OFF 42 - -/* Instruct lower device to use last 4-bytes of skb data as FCS */ -#define SO_NOFCS 43 - -#define SO_LOCK_FILTER 44 - -#define SO_SELECT_ERR_QUEUE 45 - -#define SO_BUSY_POLL 46 - -#define SO_MAX_PACING_RATE 47 - -#define SO_BPF_EXTENSIONS 48 - -#define SO_INCOMING_CPU 49 - -#define SO_ATTACH_BPF 50 -#define SO_DETACH_BPF SO_DETACH_FILTER - -#define SO_ATTACH_REUSEPORT_CBPF 51 -#define SO_ATTACH_REUSEPORT_EBPF 52 - -#define SO_CNX_ADVICE 53 - -#define SCM_TIMESTAMPING_OPT_STATS 54 - -#define SO_MEMINFO 55 - -#define SO_INCOMING_NAPI_ID 56 - -#define SO_COOKIE 57 - -#define SCM_TIMESTAMPING_PKTINFO 58 - -#define SO_PEERGROUPS 59 - -#define SO_ZEROCOPY 60 - -#define SO_TXTIME 61 -#define SCM_TXTIME SO_TXTIME - -#endif /* _ASM_SOCKET_H */ diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index e216e116a9a9..8a62c7f72e1b 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -65,7 +65,7 @@ obj-$(CONFIG_HIBERNATION) += suspend.o swsusp.o obj-$(CONFIG_AUDIT) += audit.o compat-obj-$(CONFIG_AUDIT) += compat_audit.o obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o -obj-$(CONFIG_COMPAT) += compat_wrapper.o $(compat-obj-y) +obj-$(CONFIG_COMPAT) += $(compat-obj-y) obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_KPROBES) += kprobes.o @@ -77,8 +77,10 @@ obj-$(CONFIG_JUMP_LABEL) += jump_label.o obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o obj-$(CONFIG_KEXEC_FILE) += kexec_elf.o -obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o +obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf_common.o +obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf.o perf_cpum_sf.o obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o perf_regs.o +obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_diag.o obj-$(CONFIG_TRACEPOINTS) += trace.o diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index 8ac38d51ed7d..f9d418d1b619 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c @@ -34,7 +34,6 @@ #include #include #include -#include #include #include #include @@ -58,245 +57,13 @@ #include "compat_linux.h" -/* For this source file, we want overflow handling. */ - -#undef high2lowuid -#undef high2lowgid -#undef low2highuid -#undef low2highgid -#undef SET_UID16 -#undef SET_GID16 -#undef NEW_TO_OLD_UID -#undef NEW_TO_OLD_GID -#undef SET_OLDSTAT_UID -#undef SET_OLDSTAT_GID -#undef SET_STAT_UID -#undef SET_STAT_GID - -#define high2lowuid(uid) ((uid) > 65535) ? (u16)overflowuid : (u16)(uid) -#define high2lowgid(gid) ((gid) > 65535) ? (u16)overflowgid : (u16)(gid) -#define low2highuid(uid) ((uid) == (u16)-1) ? (uid_t)-1 : (uid_t)(uid) -#define low2highgid(gid) ((gid) == (u16)-1) ? (gid_t)-1 : (gid_t)(gid) -#define SET_UID16(var, uid) var = high2lowuid(uid) -#define SET_GID16(var, gid) var = high2lowgid(gid) -#define NEW_TO_OLD_UID(uid) high2lowuid(uid) -#define NEW_TO_OLD_GID(gid) high2lowgid(gid) -#define SET_OLDSTAT_UID(stat, uid) (stat).st_uid = high2lowuid(uid) -#define SET_OLDSTAT_GID(stat, gid) (stat).st_gid = high2lowgid(gid) -#define SET_STAT_UID(stat, uid) (stat).st_uid = high2lowuid(uid) -#define SET_STAT_GID(stat, gid) (stat).st_gid = high2lowgid(gid) - -COMPAT_SYSCALL_DEFINE3(s390_chown16, const char __user *, filename, - u16, user, u16, group) -{ - return ksys_chown(filename, low2highuid(user), low2highgid(group)); -} - -COMPAT_SYSCALL_DEFINE3(s390_lchown16, const char __user *, - filename, u16, user, u16, group) -{ - return ksys_lchown(filename, low2highuid(user), low2highgid(group)); -} - -COMPAT_SYSCALL_DEFINE3(s390_fchown16, unsigned int, fd, u16, user, u16, group) -{ - return ksys_fchown(fd, low2highuid(user), low2highgid(group)); -} - -COMPAT_SYSCALL_DEFINE2(s390_setregid16, u16, rgid, u16, egid) -{ - return sys_setregid(low2highgid(rgid), low2highgid(egid)); -} - -COMPAT_SYSCALL_DEFINE1(s390_setgid16, u16, gid) -{ - return sys_setgid(low2highgid(gid)); -} - -COMPAT_SYSCALL_DEFINE2(s390_setreuid16, u16, ruid, u16, euid) -{ - return sys_setreuid(low2highuid(ruid), low2highuid(euid)); -} - -COMPAT_SYSCALL_DEFINE1(s390_setuid16, u16, uid) -{ - return sys_setuid(low2highuid(uid)); -} - -COMPAT_SYSCALL_DEFINE3(s390_setresuid16, u16, ruid, u16, euid, u16, suid) -{ - return sys_setresuid(low2highuid(ruid), low2highuid(euid), - low2highuid(suid)); -} - -COMPAT_SYSCALL_DEFINE3(s390_getresuid16, u16 __user *, ruidp, - u16 __user *, euidp, u16 __user *, suidp) -{ - const struct cred *cred = current_cred(); - int retval; - u16 ruid, euid, suid; - - ruid = high2lowuid(from_kuid_munged(cred->user_ns, cred->uid)); - euid = high2lowuid(from_kuid_munged(cred->user_ns, cred->euid)); - suid = high2lowuid(from_kuid_munged(cred->user_ns, cred->suid)); - - if (!(retval = put_user(ruid, ruidp)) && - !(retval = put_user(euid, euidp))) - retval = put_user(suid, suidp); - - return retval; -} - -COMPAT_SYSCALL_DEFINE3(s390_setresgid16, u16, rgid, u16, egid, u16, sgid) -{ - return sys_setresgid(low2highgid(rgid), low2highgid(egid), - low2highgid(sgid)); -} - -COMPAT_SYSCALL_DEFINE3(s390_getresgid16, u16 __user *, rgidp, - u16 __user *, egidp, u16 __user *, sgidp) -{ - const struct cred *cred = current_cred(); - int retval; - u16 rgid, egid, sgid; - - rgid = high2lowgid(from_kgid_munged(cred->user_ns, cred->gid)); - egid = high2lowgid(from_kgid_munged(cred->user_ns, cred->egid)); - sgid = high2lowgid(from_kgid_munged(cred->user_ns, cred->sgid)); - - if (!(retval = put_user(rgid, rgidp)) && - !(retval = put_user(egid, egidp))) - retval = put_user(sgid, sgidp); - - return retval; -} - -COMPAT_SYSCALL_DEFINE1(s390_setfsuid16, u16, uid) -{ - return sys_setfsuid(low2highuid(uid)); -} - -COMPAT_SYSCALL_DEFINE1(s390_setfsgid16, u16, gid) -{ - return sys_setfsgid(low2highgid(gid)); -} - -static int groups16_to_user(u16 __user *grouplist, struct group_info *group_info) -{ - struct user_namespace *user_ns = current_user_ns(); - int i; - u16 group; - kgid_t kgid; - - for (i = 0; i < group_info->ngroups; i++) { - kgid = group_info->gid[i]; - group = (u16)from_kgid_munged(user_ns, kgid); - if (put_user(group, grouplist+i)) - return -EFAULT; - } - - return 0; -} - -static int groups16_from_user(struct group_info *group_info, u16 __user *grouplist) -{ - struct user_namespace *user_ns = current_user_ns(); - int i; - u16 group; - kgid_t kgid; - - for (i = 0; i < group_info->ngroups; i++) { - if (get_user(group, grouplist+i)) - return -EFAULT; - - kgid = make_kgid(user_ns, (gid_t)group); - if (!gid_valid(kgid)) - return -EINVAL; - - group_info->gid[i] = kgid; - } - - return 0; -} - -COMPAT_SYSCALL_DEFINE2(s390_getgroups16, int, gidsetsize, u16 __user *, grouplist) -{ - const struct cred *cred = current_cred(); - int i; - - if (gidsetsize < 0) - return -EINVAL; - - get_group_info(cred->group_info); - i = cred->group_info->ngroups; - if (gidsetsize) { - if (i > gidsetsize) { - i = -EINVAL; - goto out; - } - if (groups16_to_user(grouplist, cred->group_info)) { - i = -EFAULT; - goto out; - } - } -out: - put_group_info(cred->group_info); - return i; -} - -COMPAT_SYSCALL_DEFINE2(s390_setgroups16, int, gidsetsize, u16 __user *, grouplist) -{ - struct group_info *group_info; - int retval; - - if (!may_setgroups()) - return -EPERM; - if ((unsigned)gidsetsize > NGROUPS_MAX) - return -EINVAL; - - group_info = groups_alloc(gidsetsize); - if (!group_info) - return -ENOMEM; - retval = groups16_from_user(group_info, grouplist); - if (retval) { - put_group_info(group_info); - return retval; - } - - groups_sort(group_info); - retval = set_current_groups(group_info); - put_group_info(group_info); - - return retval; -} - -COMPAT_SYSCALL_DEFINE0(s390_getuid16) -{ - return high2lowuid(from_kuid_munged(current_user_ns(), current_uid())); -} - -COMPAT_SYSCALL_DEFINE0(s390_geteuid16) -{ - return high2lowuid(from_kuid_munged(current_user_ns(), current_euid())); -} - -COMPAT_SYSCALL_DEFINE0(s390_getgid16) -{ - return high2lowgid(from_kgid_munged(current_user_ns(), current_gid())); -} - -COMPAT_SYSCALL_DEFINE0(s390_getegid16) -{ - return high2lowgid(from_kgid_munged(current_user_ns(), current_egid())); -} - #ifdef CONFIG_SYSVIPC COMPAT_SYSCALL_DEFINE5(s390_ipc, uint, call, int, first, compat_ulong_t, second, compat_ulong_t, third, compat_uptr_t, ptr) { if (call >> 16) /* hack for backward compatibility */ return -EINVAL; - return compat_sys_ipc(call, first, second, third, ptr, third); + return compat_ksys_ipc(call, first, second, third, ptr, third); } #endif diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c deleted file mode 100644 index 48c4ce668244..000000000000 --- a/arch/s390/kernel/compat_wrapper.c +++ /dev/null @@ -1,186 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Compat system call wrappers. - * - * Copyright IBM Corp. 2014 - */ - -#include -#include -#include "entry.h" - -#define COMPAT_SYSCALL_WRAP1(name, ...) \ - COMPAT_SYSCALL_WRAPx(1, _##name, __VA_ARGS__) -#define COMPAT_SYSCALL_WRAP2(name, ...) \ - COMPAT_SYSCALL_WRAPx(2, _##name, __VA_ARGS__) -#define COMPAT_SYSCALL_WRAP3(name, ...) \ - COMPAT_SYSCALL_WRAPx(3, _##name, __VA_ARGS__) -#define COMPAT_SYSCALL_WRAP4(name, ...) \ - COMPAT_SYSCALL_WRAPx(4, _##name, __VA_ARGS__) -#define COMPAT_SYSCALL_WRAP5(name, ...) \ - COMPAT_SYSCALL_WRAPx(5, _##name, __VA_ARGS__) -#define COMPAT_SYSCALL_WRAP6(name, ...) \ - COMPAT_SYSCALL_WRAPx(6, _##name, __VA_ARGS__) - -#define __SC_COMPAT_TYPE(t, a) \ - __typeof(__builtin_choose_expr(sizeof(t) > 4, 0L, (t)0)) a - -#define __SC_COMPAT_CAST(t, a) \ -({ \ - long __ReS = a; \ - \ - BUILD_BUG_ON((sizeof(t) > 4) && !__TYPE_IS_L(t) && \ - !__TYPE_IS_UL(t) && !__TYPE_IS_PTR(t)); \ - if (__TYPE_IS_L(t)) \ - __ReS = (s32)a; \ - if (__TYPE_IS_UL(t)) \ - __ReS = (u32)a; \ - if (__TYPE_IS_PTR(t)) \ - __ReS = a & 0x7fffffff; \ - (t)__ReS; \ -}) - -/* - * The COMPAT_SYSCALL_WRAP macro generates system call wrappers to be used by - * compat tasks. These wrappers will only be used for system calls where only - * the system call arguments need sign or zero extension or zeroing of the upper - * 33 bits of pointers. - * Note: since the wrapper function will afterwards call a system call which - * again performs zero and sign extension for all system call arguments with - * a size of less than eight bytes, these compat wrappers only touch those - * system call arguments with a size of eight bytes ((unsigned) long and - * pointers). Zero and sign extension for e.g. int parameters will be done by - * the regular system call wrappers. - */ -#define COMPAT_SYSCALL_WRAPx(x, name, ...) \ -asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ -asmlinkage long notrace compat_sys##name(__MAP(x,__SC_COMPAT_TYPE,__VA_ARGS__));\ -asmlinkage long notrace compat_sys##name(__MAP(x,__SC_COMPAT_TYPE,__VA_ARGS__)) \ -{ \ - return sys##name(__MAP(x,__SC_COMPAT_CAST,__VA_ARGS__)); \ -} - -COMPAT_SYSCALL_WRAP2(creat, const char __user *, pathname, umode_t, mode); -COMPAT_SYSCALL_WRAP2(link, const char __user *, oldname, const char __user *, newname); -COMPAT_SYSCALL_WRAP1(unlink, const char __user *, pathname); -COMPAT_SYSCALL_WRAP1(chdir, const char __user *, filename); -COMPAT_SYSCALL_WRAP3(mknod, const char __user *, filename, umode_t, mode, unsigned, dev); -COMPAT_SYSCALL_WRAP2(chmod, const char __user *, filename, umode_t, mode); -COMPAT_SYSCALL_WRAP1(oldumount, char __user *, name); -COMPAT_SYSCALL_WRAP2(access, const char __user *, filename, int, mode); -COMPAT_SYSCALL_WRAP2(rename, const char __user *, oldname, const char __user *, newname); -COMPAT_SYSCALL_WRAP2(mkdir, const char __user *, pathname, umode_t, mode); -COMPAT_SYSCALL_WRAP1(rmdir, const char __user *, pathname); -COMPAT_SYSCALL_WRAP1(pipe, int __user *, fildes); -COMPAT_SYSCALL_WRAP1(brk, unsigned long, brk); -COMPAT_SYSCALL_WRAP2(signal, int, sig, __sighandler_t, handler); -COMPAT_SYSCALL_WRAP1(acct, const char __user *, name); -COMPAT_SYSCALL_WRAP2(umount, char __user *, name, int, flags); -COMPAT_SYSCALL_WRAP1(chroot, const char __user *, filename); -COMPAT_SYSCALL_WRAP3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask); -COMPAT_SYSCALL_WRAP2(sethostname, char __user *, name, int, len); -COMPAT_SYSCALL_WRAP2(symlink, const char __user *, old, const char __user *, new); -COMPAT_SYSCALL_WRAP3(readlink, const char __user *, path, char __user *, buf, int, bufsiz); -COMPAT_SYSCALL_WRAP1(uselib, const char __user *, library); -COMPAT_SYSCALL_WRAP2(swapon, const char __user *, specialfile, int, swap_flags); -COMPAT_SYSCALL_WRAP4(reboot, int, magic1, int, magic2, unsigned int, cmd, void __user *, arg); -COMPAT_SYSCALL_WRAP2(munmap, unsigned long, addr, size_t, len); -COMPAT_SYSCALL_WRAP3(syslog, int, type, char __user *, buf, int, len); -COMPAT_SYSCALL_WRAP1(swapoff, const char __user *, specialfile); -COMPAT_SYSCALL_WRAP2(setdomainname, char __user *, name, int, len); -COMPAT_SYSCALL_WRAP1(newuname, struct new_utsname __user *, name); -COMPAT_SYSCALL_WRAP3(mprotect, unsigned long, start, size_t, len, unsigned long, prot); -COMPAT_SYSCALL_WRAP3(init_module, void __user *, umod, unsigned long, len, const char __user *, uargs); -COMPAT_SYSCALL_WRAP2(delete_module, const char __user *, name_user, unsigned int, flags); -COMPAT_SYSCALL_WRAP4(quotactl, unsigned int, cmd, const char __user *, special, qid_t, id, void __user *, addr); -COMPAT_SYSCALL_WRAP2(bdflush, int, func, long, data); -COMPAT_SYSCALL_WRAP3(sysfs, int, option, unsigned long, arg1, unsigned long, arg2); -COMPAT_SYSCALL_WRAP5(llseek, unsigned int, fd, unsigned long, high, unsigned long, low, loff_t __user *, result, unsigned int, whence); -COMPAT_SYSCALL_WRAP3(msync, unsigned long, start, size_t, len, int, flags); -COMPAT_SYSCALL_WRAP2(mlock, unsigned long, start, size_t, len); -COMPAT_SYSCALL_WRAP2(munlock, unsigned long, start, size_t, len); -COMPAT_SYSCALL_WRAP2(sched_setparam, pid_t, pid, struct sched_param __user *, param); -COMPAT_SYSCALL_WRAP2(sched_getparam, pid_t, pid, struct sched_param __user *, param); -COMPAT_SYSCALL_WRAP3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param); -COMPAT_SYSCALL_WRAP5(mremap, unsigned long, addr, unsigned long, old_len, unsigned long, new_len, unsigned long, flags, unsigned long, new_addr); -COMPAT_SYSCALL_WRAP3(poll, struct pollfd __user *, ufds, unsigned int, nfds, int, timeout); -COMPAT_SYSCALL_WRAP5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, unsigned long, arg4, unsigned long, arg5); -COMPAT_SYSCALL_WRAP2(getcwd, char __user *, buf, unsigned long, size); -COMPAT_SYSCALL_WRAP2(capget, cap_user_header_t, header, cap_user_data_t, dataptr); -COMPAT_SYSCALL_WRAP2(capset, cap_user_header_t, header, const cap_user_data_t, data); -COMPAT_SYSCALL_WRAP3(lchown, const char __user *, filename, uid_t, user, gid_t, group); -COMPAT_SYSCALL_WRAP2(getgroups, int, gidsetsize, gid_t __user *, grouplist); -COMPAT_SYSCALL_WRAP2(setgroups, int, gidsetsize, gid_t __user *, grouplist); -COMPAT_SYSCALL_WRAP3(getresuid, uid_t __user *, ruid, uid_t __user *, euid, uid_t __user *, suid); -COMPAT_SYSCALL_WRAP3(getresgid, gid_t __user *, rgid, gid_t __user *, egid, gid_t __user *, sgid); -COMPAT_SYSCALL_WRAP3(chown, const char __user *, filename, uid_t, user, gid_t, group); -COMPAT_SYSCALL_WRAP2(pivot_root, const char __user *, new_root, const char __user *, put_old); -COMPAT_SYSCALL_WRAP3(mincore, unsigned long, start, size_t, len, unsigned char __user *, vec); -COMPAT_SYSCALL_WRAP3(madvise, unsigned long, start, size_t, len, int, behavior); -COMPAT_SYSCALL_WRAP5(setxattr, const char __user *, path, const char __user *, name, const void __user *, value, size_t, size, int, flags); -COMPAT_SYSCALL_WRAP5(lsetxattr, const char __user *, path, const char __user *, name, const void __user *, value, size_t, size, int, flags); -COMPAT_SYSCALL_WRAP5(fsetxattr, int, fd, const char __user *, name, const void __user *, value, size_t, size, int, flags); -COMPAT_SYSCALL_WRAP3(getdents64, unsigned int, fd, struct linux_dirent64 __user *, dirent, unsigned int, count); -COMPAT_SYSCALL_WRAP4(getxattr, const char __user *, path, const char __user *, name, void __user *, value, size_t, size); -COMPAT_SYSCALL_WRAP4(lgetxattr, const char __user *, path, const char __user *, name, void __user *, value, size_t, size); -COMPAT_SYSCALL_WRAP4(fgetxattr, int, fd, const char __user *, name, void __user *, value, size_t, size); -COMPAT_SYSCALL_WRAP3(listxattr, const char __user *, path, char __user *, list, size_t, size); -COMPAT_SYSCALL_WRAP3(llistxattr, const char __user *, path, char __user *, list, size_t, size); -COMPAT_SYSCALL_WRAP3(flistxattr, int, fd, char __user *, list, size_t, size); -COMPAT_SYSCALL_WRAP2(removexattr, const char __user *, path, const char __user *, name); -COMPAT_SYSCALL_WRAP2(lremovexattr, const char __user *, path, const char __user *, name); -COMPAT_SYSCALL_WRAP2(fremovexattr, int, fd, const char __user *, name); -COMPAT_SYSCALL_WRAP1(set_tid_address, int __user *, tidptr); -COMPAT_SYSCALL_WRAP4(epoll_ctl, int, epfd, int, op, int, fd, struct epoll_event __user *, event); -COMPAT_SYSCALL_WRAP4(epoll_wait, int, epfd, struct epoll_event __user *, events, int, maxevents, int, timeout); -COMPAT_SYSCALL_WRAP1(io_destroy, aio_context_t, ctx); -COMPAT_SYSCALL_WRAP3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, struct io_event __user *, result); -COMPAT_SYSCALL_WRAP1(mq_unlink, const char __user *, name); -COMPAT_SYSCALL_WRAP5(add_key, const char __user *, tp, const char __user *, dsc, const void __user *, pld, size_t, len, key_serial_t, id); -COMPAT_SYSCALL_WRAP4(request_key, const char __user *, tp, const char __user *, dsc, const char __user *, info, key_serial_t, id); -COMPAT_SYSCALL_WRAP5(remap_file_pages, unsigned long, start, unsigned long, size, unsigned long, prot, unsigned long, pgoff, unsigned long, flags); -COMPAT_SYSCALL_WRAP3(inotify_add_watch, int, fd, const char __user *, path, u32, mask); -COMPAT_SYSCALL_WRAP3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode); -COMPAT_SYSCALL_WRAP4(mknodat, int, dfd, const char __user *, filename, umode_t, mode, unsigned, dev); -COMPAT_SYSCALL_WRAP5(fchownat, int, dfd, const char __user *, filename, uid_t, user, gid_t, group, int, flag); -COMPAT_SYSCALL_WRAP3(unlinkat, int, dfd, const char __user *, pathname, int, flag); -COMPAT_SYSCALL_WRAP4(renameat, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname); -COMPAT_SYSCALL_WRAP5(linkat, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname, int, flags); -COMPAT_SYSCALL_WRAP3(symlinkat, const char __user *, oldname, int, newdfd, const char __user *, newname); -COMPAT_SYSCALL_WRAP4(readlinkat, int, dfd, const char __user *, path, char __user *, buf, int, bufsiz); -COMPAT_SYSCALL_WRAP3(fchmodat, int, dfd, const char __user *, filename, umode_t, mode); -COMPAT_SYSCALL_WRAP3(faccessat, int, dfd, const char __user *, filename, int, mode); -COMPAT_SYSCALL_WRAP1(unshare, unsigned long, unshare_flags); -COMPAT_SYSCALL_WRAP6(splice, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags); -COMPAT_SYSCALL_WRAP4(tee, int, fdin, int, fdout, size_t, len, unsigned int, flags); -COMPAT_SYSCALL_WRAP3(getcpu, unsigned __user *, cpu, unsigned __user *, node, struct getcpu_cache __user *, cache); -COMPAT_SYSCALL_WRAP2(pipe2, int __user *, fildes, int, flags); -COMPAT_SYSCALL_WRAP5(perf_event_open, struct perf_event_attr __user *, attr_uptr, pid_t, pid, int, cpu, int, group_fd, unsigned long, flags); -COMPAT_SYSCALL_WRAP5(clone, unsigned long, newsp, unsigned long, clone_flags, int __user *, parent_tidptr, int __user *, child_tidptr, unsigned long, tls); -COMPAT_SYSCALL_WRAP4(prlimit64, pid_t, pid, unsigned int, resource, const struct rlimit64 __user *, new_rlim, struct rlimit64 __user *, old_rlim); -COMPAT_SYSCALL_WRAP5(name_to_handle_at, int, dfd, const char __user *, name, struct file_handle __user *, handle, int __user *, mnt_id, int, flag); -COMPAT_SYSCALL_WRAP5(kcmp, pid_t, pid1, pid_t, pid2, int, type, unsigned long, idx1, unsigned long, idx2); -COMPAT_SYSCALL_WRAP3(finit_module, int, fd, const char __user *, uargs, int, flags); -COMPAT_SYSCALL_WRAP3(sched_setattr, pid_t, pid, struct sched_attr __user *, attr, unsigned int, flags); -COMPAT_SYSCALL_WRAP4(sched_getattr, pid_t, pid, struct sched_attr __user *, attr, unsigned int, size, unsigned int, flags); -COMPAT_SYSCALL_WRAP5(renameat2, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname, unsigned int, flags); -COMPAT_SYSCALL_WRAP3(seccomp, unsigned int, op, unsigned int, flags, void __user *, uargs) -COMPAT_SYSCALL_WRAP3(getrandom, char __user *, buf, size_t, count, unsigned int, flags) -COMPAT_SYSCALL_WRAP2(memfd_create, const char __user *, uname, unsigned int, flags) -COMPAT_SYSCALL_WRAP3(bpf, int, cmd, union bpf_attr *, attr, unsigned int, size); -COMPAT_SYSCALL_WRAP3(s390_pci_mmio_write, const unsigned long, mmio_addr, const void __user *, user_buffer, const size_t, length); -COMPAT_SYSCALL_WRAP3(s390_pci_mmio_read, const unsigned long, mmio_addr, void __user *, user_buffer, const size_t, length); -COMPAT_SYSCALL_WRAP4(socketpair, int, family, int, type, int, protocol, int __user *, usockvec); -COMPAT_SYSCALL_WRAP3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen); -COMPAT_SYSCALL_WRAP3(connect, int, fd, struct sockaddr __user *, uservaddr, int, addrlen); -COMPAT_SYSCALL_WRAP4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, int __user *, upeer_addrlen, int, flags); -COMPAT_SYSCALL_WRAP3(getsockname, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len); -COMPAT_SYSCALL_WRAP3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len); -COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len); -COMPAT_SYSCALL_WRAP3(mlock2, unsigned long, start, size_t, len, int, flags); -COMPAT_SYSCALL_WRAP6(copy_file_range, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags); -COMPAT_SYSCALL_WRAP2(s390_guarded_storage, int, command, struct gs_cb *, gs_cb); -COMPAT_SYSCALL_WRAP5(statx, int, dfd, const char __user *, path, unsigned, flags, unsigned, mask, struct statx __user *, buffer); -COMPAT_SYSCALL_WRAP4(s390_sthyi, unsigned long, code, void __user *, info, u64 __user *, rc, unsigned long, flags); -COMPAT_SYSCALL_WRAP5(kexec_file_load, int, kernel_fd, int, initrd_fd, unsigned long, cmdline_len, const char __user *, cmdline_ptr, unsigned long, flags) -COMPAT_SYSCALL_WRAP4(rseq, struct rseq __user *, rseq, u32, rseq_len, int, flags, u32, sig) diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c index 97eae3871868..f96a5857bbfd 100644 --- a/arch/s390/kernel/crash_dump.c +++ b/arch/s390/kernel/crash_dump.c @@ -61,6 +61,9 @@ struct save_area * __init save_area_alloc(bool is_boot_cpu) struct save_area *sa; sa = (void *) memblock_phys_alloc(sizeof(*sa), 8); + if (!sa) + panic("Failed to allocate save area\n"); + if (is_boot_cpu) list_add(&sa->list, &dump_save_areas); else diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index d374f9b218b4..0ebf08c3b35e 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -1056,12 +1056,6 @@ int debug_register_view(debug_info_t *id, struct debug_view *view) mode &= ~(S_IWUSR | S_IWGRP | S_IWOTH); pde = debugfs_create_file(view->name, mode, id->debugfs_root_entry, id, &debug_file_ops); - if (!pde) { - pr_err("Registering view %s/%s failed due to out of " - "memory\n", id->name, view->name); - rc = -1; - goto out; - } spin_lock_irqsave(&id->lock, flags); for (i = 0; i < DEBUG_MAX_VIEWS; i++) { if (!id->views[i]) diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c index 53a5316cc4b7..7edaa733a77f 100644 --- a/arch/s390/kernel/diag.c +++ b/arch/s390/kernel/diag.c @@ -45,6 +45,7 @@ static const struct diag_desc diag_map[NR_DIAG_STAT] = { [DIAG_STAT_X2FC] = { .code = 0x2fc, .name = "Guest Performance Data" }, [DIAG_STAT_X304] = { .code = 0x304, .name = "Partition-Resource Service" }, [DIAG_STAT_X308] = { .code = 0x308, .name = "List-Directed IPL" }, + [DIAG_STAT_X318] = { .code = 0x318, .name = "CP Name and Version Codes" }, [DIAG_STAT_X500] = { .code = 0x500, .name = "Virtio Service" }, }; diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index a8c7789b246b..d6edf45f93b9 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -164,8 +164,6 @@ static noinline __init void setup_lowcore_early(void) static noinline __init void setup_facility_list(void) { - stfle(S390_lowcore.stfle_fac_list, - ARRAY_SIZE(S390_lowcore.stfle_fac_list)); memcpy(S390_lowcore.alt_stfle_fac_list, S390_lowcore.stfle_fac_list, sizeof(S390_lowcore.alt_stfle_fac_list)); diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 39191a0feed1..583d65ef5007 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -1512,7 +1512,7 @@ cleanup_critical: .quad .Lsie_skip - .Lsie_entry #endif .section .rodata, "a" -#define SYSCALL(esame,emu) .long esame +#define SYSCALL(esame,emu) .long __s390x_ ## esame .globl sys_call_table sys_call_table: #include "asm/syscall_table.h" @@ -1520,7 +1520,7 @@ sys_call_table: #ifdef CONFIG_COMPAT -#define SYSCALL(esame,emu) .long emu +#define SYSCALL(esame,emu) .long __s390_ ## emu .globl sys_call_table_emu sys_call_table_emu: #include "asm/syscall_table.h" diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index 57bba24b1c27..56491e636eab 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S @@ -27,8 +27,6 @@ ENTRY(startup_continue) mvc 0(16,%r1),__LC_BOOT_CLOCK larl %r13,.LPG1 # get base lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers - lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area - # move IPL device to lowcore larl %r0,boot_vdso_data stg %r0,__LC_VDSO_PER_CPU # diff --git a/arch/s390/kernel/kdebugfs.c b/arch/s390/kernel/kdebugfs.c index 2c46bd6c6fd2..33130c7daf55 100644 --- a/arch/s390/kernel/kdebugfs.c +++ b/arch/s390/kernel/kdebugfs.c @@ -9,8 +9,6 @@ EXPORT_SYMBOL(arch_debugfs_dir); static int __init arch_kdebugfs_init(void) { arch_debugfs_dir = debugfs_create_dir("s390", NULL); - if (IS_ERR(arch_debugfs_dir)) - arch_debugfs_dir = NULL; return 0; } postcore_initcall(arch_kdebugfs_init); diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c index d5523adeddbf..e1c54d28713a 100644 --- a/arch/s390/kernel/perf_cpum_cf.c +++ b/arch/s390/kernel/perf_cpum_cf.c @@ -10,73 +10,11 @@ #include #include -#include #include #include #include #include -#include -#include -#include - -enum cpumf_ctr_set { - CPUMF_CTR_SET_BASIC = 0, /* Basic Counter Set */ - CPUMF_CTR_SET_USER = 1, /* Problem-State Counter Set */ - CPUMF_CTR_SET_CRYPTO = 2, /* Crypto-Activity Counter Set */ - CPUMF_CTR_SET_EXT = 3, /* Extended Counter Set */ - CPUMF_CTR_SET_MT_DIAG = 4, /* MT-diagnostic Counter Set */ - - /* Maximum number of counter sets */ - CPUMF_CTR_SET_MAX, -}; - -#define CPUMF_LCCTL_ENABLE_SHIFT 16 -#define CPUMF_LCCTL_ACTCTL_SHIFT 0 -static const u64 cpumf_state_ctl[CPUMF_CTR_SET_MAX] = { - [CPUMF_CTR_SET_BASIC] = 0x02, - [CPUMF_CTR_SET_USER] = 0x04, - [CPUMF_CTR_SET_CRYPTO] = 0x08, - [CPUMF_CTR_SET_EXT] = 0x01, - [CPUMF_CTR_SET_MT_DIAG] = 0x20, -}; - -static void ctr_set_enable(u64 *state, int ctr_set) -{ - *state |= cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT; -} -static void ctr_set_disable(u64 *state, int ctr_set) -{ - *state &= ~(cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT); -} -static void ctr_set_start(u64 *state, int ctr_set) -{ - *state |= cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT; -} -static void ctr_set_stop(u64 *state, int ctr_set) -{ - *state &= ~(cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT); -} - -/* Local CPUMF event structure */ -struct cpu_hw_events { - struct cpumf_ctr_info info; - atomic_t ctr_set[CPUMF_CTR_SET_MAX]; - u64 state, tx_state; - unsigned int flags; - unsigned int txn_flags; -}; -static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { - .ctr_set = { - [CPUMF_CTR_SET_BASIC] = ATOMIC_INIT(0), - [CPUMF_CTR_SET_USER] = ATOMIC_INIT(0), - [CPUMF_CTR_SET_CRYPTO] = ATOMIC_INIT(0), - [CPUMF_CTR_SET_EXT] = ATOMIC_INIT(0), - [CPUMF_CTR_SET_MT_DIAG] = ATOMIC_INIT(0), - }, - .state = 0, - .flags = 0, - .txn_flags = 0, -}; +#include static enum cpumf_ctr_set get_counter_set(u64 event) { @@ -98,11 +36,11 @@ static enum cpumf_ctr_set get_counter_set(u64 event) static int validate_ctr_version(const struct hw_perf_event *hwc) { - struct cpu_hw_events *cpuhw; + struct cpu_cf_events *cpuhw; int err = 0; u16 mtdiag_ctl; - cpuhw = &get_cpu_var(cpu_hw_events); + cpuhw = &get_cpu_var(cpu_cf_events); /* check required version for counter sets */ switch (hwc->config_base) { @@ -135,7 +73,7 @@ static int validate_ctr_version(const struct hw_perf_event *hwc) * Thus, the counters can only be used if SMT is on and the * counter set is enabled and active. */ - mtdiag_ctl = cpumf_state_ctl[CPUMF_CTR_SET_MT_DIAG]; + mtdiag_ctl = cpumf_ctr_ctl[CPUMF_CTR_SET_MT_DIAG]; if (!((cpuhw->info.auth_ctl & mtdiag_ctl) && (cpuhw->info.enable_ctl & mtdiag_ctl) && (cpuhw->info.act_ctl & mtdiag_ctl))) @@ -143,28 +81,28 @@ static int validate_ctr_version(const struct hw_perf_event *hwc) break; } - put_cpu_var(cpu_hw_events); + put_cpu_var(cpu_cf_events); return err; } static int validate_ctr_auth(const struct hw_perf_event *hwc) { - struct cpu_hw_events *cpuhw; + struct cpu_cf_events *cpuhw; u64 ctrs_state; int err = 0; - cpuhw = &get_cpu_var(cpu_hw_events); + cpuhw = &get_cpu_var(cpu_cf_events); /* Check authorization for cpu counter sets. * If the particular CPU counter set is not authorized, * return with -ENOENT in order to fall back to other * PMUs that might suffice the event request. */ - ctrs_state = cpumf_state_ctl[hwc->config_base]; + ctrs_state = cpumf_ctr_ctl[hwc->config_base]; if (!(ctrs_state & cpuhw->info.auth_ctl)) err = -ENOENT; - put_cpu_var(cpu_hw_events); + put_cpu_var(cpu_cf_events); return err; } @@ -175,7 +113,7 @@ static int validate_ctr_auth(const struct hw_perf_event *hwc) */ static void cpumf_pmu_enable(struct pmu *pmu) { - struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); int err; if (cpuhw->flags & PMU_F_ENABLED) @@ -198,7 +136,7 @@ static void cpumf_pmu_enable(struct pmu *pmu) */ static void cpumf_pmu_disable(struct pmu *pmu) { - struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); int err; u64 inactive; @@ -222,86 +160,13 @@ static atomic_t num_events = ATOMIC_INIT(0); /* Used to avoid races in calling reserve/release_cpumf_hardware */ static DEFINE_MUTEX(pmc_reserve_mutex); -/* CPU-measurement alerts for the counter facility */ -static void cpumf_measurement_alert(struct ext_code ext_code, - unsigned int alert, unsigned long unused) -{ - struct cpu_hw_events *cpuhw; - - if (!(alert & CPU_MF_INT_CF_MASK)) - return; - - inc_irq_stat(IRQEXT_CMC); - cpuhw = this_cpu_ptr(&cpu_hw_events); - - /* Measurement alerts are shared and might happen when the PMU - * is not reserved. Ignore these alerts in this case. */ - if (!(cpuhw->flags & PMU_F_RESERVED)) - return; - - /* counter authorization change alert */ - if (alert & CPU_MF_INT_CF_CACA) - qctri(&cpuhw->info); - - /* loss of counter data alert */ - if (alert & CPU_MF_INT_CF_LCDA) - pr_err("CPU[%i] Counter data was lost\n", smp_processor_id()); - - /* loss of MT counter data alert */ - if (alert & CPU_MF_INT_CF_MTDA) - pr_warn("CPU[%i] MT counter data was lost\n", - smp_processor_id()); -} - -#define PMC_INIT 0 -#define PMC_RELEASE 1 -static void setup_pmc_cpu(void *flags) -{ - struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); - - switch (*((int *) flags)) { - case PMC_INIT: - memset(&cpuhw->info, 0, sizeof(cpuhw->info)); - qctri(&cpuhw->info); - cpuhw->flags |= PMU_F_RESERVED; - break; - - case PMC_RELEASE: - cpuhw->flags &= ~PMU_F_RESERVED; - break; - } - - /* Disable CPU counter sets */ - lcctl(0); -} - -/* Initialize the CPU-measurement facility */ -static int reserve_pmc_hardware(void) -{ - int flags = PMC_INIT; - - on_each_cpu(setup_pmc_cpu, &flags, 1); - irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT); - - return 0; -} - -/* Release the CPU-measurement facility */ -static void release_pmc_hardware(void) -{ - int flags = PMC_RELEASE; - - on_each_cpu(setup_pmc_cpu, &flags, 1); - irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); -} - /* Release the PMU if event is the last perf event */ static void hw_perf_event_destroy(struct perf_event *event) { if (!atomic_add_unless(&num_events, -1, 1)) { mutex_lock(&pmc_reserve_mutex); if (atomic_dec_return(&num_events) == 0) - release_pmc_hardware(); + __kernel_cpumcf_end(); mutex_unlock(&pmc_reserve_mutex); } } @@ -332,7 +197,7 @@ static int __hw_perf_event_init(struct perf_event *event) struct perf_event_attr *attr = &event->attr; struct hw_perf_event *hwc = &event->hw; enum cpumf_ctr_set set; - int err; + int err = 0; u64 ev; switch (attr->type) { @@ -402,12 +267,14 @@ static int __hw_perf_event_init(struct perf_event *event) /* Initialize for using the CPU-measurement counter facility */ if (!atomic_inc_not_zero(&num_events)) { mutex_lock(&pmc_reserve_mutex); - if (atomic_read(&num_events) == 0 && reserve_pmc_hardware()) + if (atomic_read(&num_events) == 0 && __kernel_cpumcf_begin()) err = -EBUSY; else atomic_inc(&num_events); mutex_unlock(&pmc_reserve_mutex); } + if (err) + return err; event->destroy = hw_perf_event_destroy; /* Finally, validate version and authorization of the counter set */ @@ -488,7 +355,7 @@ static void cpumf_pmu_read(struct perf_event *event) static void cpumf_pmu_start(struct perf_event *event, int flags) { - struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); struct hw_perf_event *hwc = &event->hw; if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) @@ -519,7 +386,7 @@ static void cpumf_pmu_start(struct perf_event *event, int flags) static void cpumf_pmu_stop(struct perf_event *event, int flags) { - struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); struct hw_perf_event *hwc = &event->hw; if (!(hwc->state & PERF_HES_STOPPED)) { @@ -540,7 +407,7 @@ static void cpumf_pmu_stop(struct perf_event *event, int flags) static int cpumf_pmu_add(struct perf_event *event, int flags) { - struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); /* Check authorization for the counter set to which this * counter belongs. @@ -564,7 +431,7 @@ static int cpumf_pmu_add(struct perf_event *event, int flags) static void cpumf_pmu_del(struct perf_event *event, int flags) { - struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); cpumf_pmu_stop(event, PERF_EF_UPDATE); @@ -592,7 +459,7 @@ static void cpumf_pmu_del(struct perf_event *event, int flags) */ static void cpumf_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags) { - struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); WARN_ON_ONCE(cpuhw->txn_flags); /* txn already in flight */ @@ -612,7 +479,7 @@ static void cpumf_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags) static void cpumf_pmu_cancel_txn(struct pmu *pmu) { unsigned int txn_flags; - struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */ @@ -633,7 +500,7 @@ static void cpumf_pmu_cancel_txn(struct pmu *pmu) */ static int cpumf_pmu_commit_txn(struct pmu *pmu) { - struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); u64 state; WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */ @@ -671,54 +538,17 @@ static struct pmu cpumf_pmu = { .cancel_txn = cpumf_pmu_cancel_txn, }; -static int cpumf_pmf_setup(unsigned int cpu, int flags) -{ - local_irq_disable(); - setup_pmc_cpu(&flags); - local_irq_enable(); - return 0; -} - -static int s390_pmu_online_cpu(unsigned int cpu) -{ - return cpumf_pmf_setup(cpu, PMC_INIT); -} - -static int s390_pmu_offline_cpu(unsigned int cpu) -{ - return cpumf_pmf_setup(cpu, PMC_RELEASE); -} - static int __init cpumf_pmu_init(void) { int rc; - if (!cpum_cf_avail()) + if (!kernel_cpumcf_avail()) return -ENODEV; - /* clear bit 15 of cr0 to unauthorize problem-state to - * extract measurement counters */ - ctl_clear_bit(0, 48); - - /* register handler for measurement-alert interruptions */ - rc = register_external_irq(EXT_IRQ_MEASURE_ALERT, - cpumf_measurement_alert); - if (rc) { - pr_err("Registering for CPU-measurement alerts " - "failed with rc=%i\n", rc); - return rc; - } - cpumf_pmu.attr_groups = cpumf_cf_event_group(); rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW); - if (rc) { + if (rc) pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc); - unregister_external_irq(EXT_IRQ_MEASURE_ALERT, - cpumf_measurement_alert); - return rc; - } - return cpuhp_setup_state(CPUHP_AP_PERF_S390_CF_ONLINE, - "perf/s390/cf:online", - s390_pmu_online_cpu, s390_pmu_offline_cpu); + return rc; } -early_initcall(cpumf_pmu_init); +subsys_initcall(cpumf_pmu_init); diff --git a/arch/s390/kernel/perf_cpum_cf_common.c b/arch/s390/kernel/perf_cpum_cf_common.c new file mode 100644 index 000000000000..3bced89caffb --- /dev/null +++ b/arch/s390/kernel/perf_cpum_cf_common.c @@ -0,0 +1,201 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * CPU-Measurement Counter Facility Support - Common Layer + * + * Copyright IBM Corp. 2019 + * Author(s): Hendrik Brueckner + */ +#define KMSG_COMPONENT "cpum_cf_common" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Per-CPU event structure for the counter facility */ +DEFINE_PER_CPU(struct cpu_cf_events, cpu_cf_events) = { + .ctr_set = { + [CPUMF_CTR_SET_BASIC] = ATOMIC_INIT(0), + [CPUMF_CTR_SET_USER] = ATOMIC_INIT(0), + [CPUMF_CTR_SET_CRYPTO] = ATOMIC_INIT(0), + [CPUMF_CTR_SET_EXT] = ATOMIC_INIT(0), + [CPUMF_CTR_SET_MT_DIAG] = ATOMIC_INIT(0), + }, + .alert = ATOMIC64_INIT(0), + .state = 0, + .flags = 0, + .txn_flags = 0, +}; +/* Indicator whether the CPU-Measurement Counter Facility Support is ready */ +static bool cpum_cf_initalized; + +/* CPU-measurement alerts for the counter facility */ +static void cpumf_measurement_alert(struct ext_code ext_code, + unsigned int alert, unsigned long unused) +{ + struct cpu_cf_events *cpuhw; + + if (!(alert & CPU_MF_INT_CF_MASK)) + return; + + inc_irq_stat(IRQEXT_CMC); + cpuhw = this_cpu_ptr(&cpu_cf_events); + + /* Measurement alerts are shared and might happen when the PMU + * is not reserved. Ignore these alerts in this case. */ + if (!(cpuhw->flags & PMU_F_RESERVED)) + return; + + /* counter authorization change alert */ + if (alert & CPU_MF_INT_CF_CACA) + qctri(&cpuhw->info); + + /* loss of counter data alert */ + if (alert & CPU_MF_INT_CF_LCDA) + pr_err("CPU[%i] Counter data was lost\n", smp_processor_id()); + + /* loss of MT counter data alert */ + if (alert & CPU_MF_INT_CF_MTDA) + pr_warn("CPU[%i] MT counter data was lost\n", + smp_processor_id()); + + /* store alert for special handling by in-kernel users */ + atomic64_or(alert, &cpuhw->alert); +} + +#define PMC_INIT 0 +#define PMC_RELEASE 1 +static void cpum_cf_setup_cpu(void *flags) +{ + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); + + switch (*((int *) flags)) { + case PMC_INIT: + memset(&cpuhw->info, 0, sizeof(cpuhw->info)); + qctri(&cpuhw->info); + cpuhw->flags |= PMU_F_RESERVED; + break; + + case PMC_RELEASE: + cpuhw->flags &= ~PMU_F_RESERVED; + break; + } + + /* Disable CPU counter sets */ + lcctl(0); +} + +bool kernel_cpumcf_avail(void) +{ + return cpum_cf_initalized; +} +EXPORT_SYMBOL(kernel_cpumcf_avail); + + +/* Reserve/release functions for sharing perf hardware */ +static DEFINE_SPINLOCK(cpumcf_owner_lock); +static void *cpumcf_owner; + +/* Initialize the CPU-measurement counter facility */ +int __kernel_cpumcf_begin(void) +{ + int flags = PMC_INIT; + int err = 0; + + spin_lock(&cpumcf_owner_lock); + if (cpumcf_owner) + err = -EBUSY; + else + cpumcf_owner = __builtin_return_address(0); + spin_unlock(&cpumcf_owner_lock); + if (err) + return err; + + on_each_cpu(cpum_cf_setup_cpu, &flags, 1); + irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT); + + return 0; +} +EXPORT_SYMBOL(__kernel_cpumcf_begin); + +/* Obtain the CPU-measurement alerts for the counter facility */ +unsigned long kernel_cpumcf_alert(int clear) +{ + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); + unsigned long alert; + + alert = atomic64_read(&cpuhw->alert); + if (clear) + atomic64_set(&cpuhw->alert, 0); + + return alert; +} +EXPORT_SYMBOL(kernel_cpumcf_alert); + +/* Release the CPU-measurement counter facility */ +void __kernel_cpumcf_end(void) +{ + int flags = PMC_RELEASE; + + on_each_cpu(cpum_cf_setup_cpu, &flags, 1); + irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); + + spin_lock(&cpumcf_owner_lock); + cpumcf_owner = NULL; + spin_unlock(&cpumcf_owner_lock); +} +EXPORT_SYMBOL(__kernel_cpumcf_end); + +static int cpum_cf_setup(unsigned int cpu, int flags) +{ + local_irq_disable(); + cpum_cf_setup_cpu(&flags); + local_irq_enable(); + return 0; +} + +static int cpum_cf_online_cpu(unsigned int cpu) +{ + return cpum_cf_setup(cpu, PMC_INIT); +} + +static int cpum_cf_offline_cpu(unsigned int cpu) +{ + return cpum_cf_setup(cpu, PMC_RELEASE); +} + +static int __init cpum_cf_init(void) +{ + int rc; + + if (!cpum_cf_avail()) + return -ENODEV; + + /* clear bit 15 of cr0 to unauthorize problem-state to + * extract measurement counters */ + ctl_clear_bit(0, 48); + + /* register handler for measurement-alert interruptions */ + rc = register_external_irq(EXT_IRQ_MEASURE_ALERT, + cpumf_measurement_alert); + if (rc) { + pr_err("Registering for CPU-measurement alerts " + "failed with rc=%i\n", rc); + return rc; + } + + rc = cpuhp_setup_state(CPUHP_AP_PERF_S390_CF_ONLINE, + "perf/s390/cf:online", + cpum_cf_online_cpu, cpum_cf_offline_cpu); + if (!rc) + cpum_cf_initalized = true; + + return rc; +} +early_initcall(cpum_cf_init); diff --git a/arch/s390/kernel/perf_cpum_cf_diag.c b/arch/s390/kernel/perf_cpum_cf_diag.c new file mode 100644 index 000000000000..c6fad208c2fa --- /dev/null +++ b/arch/s390/kernel/perf_cpum_cf_diag.c @@ -0,0 +1,693 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Performance event support for s390x - CPU-measurement Counter Sets + * + * Copyright IBM Corp. 2019 + * Author(s): Hendrik Brueckner + * Thomas Richer + */ +#define KMSG_COMPONENT "cpum_cf_diag" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#define CF_DIAG_CTRSET_DEF 0xfeef /* Counter set header mark */ + +static unsigned int cf_diag_cpu_speed; +static debug_info_t *cf_diag_dbg; + +struct cf_diag_csd { /* Counter set data per CPU */ + size_t used; /* Bytes used in data/start */ + unsigned char start[PAGE_SIZE]; /* Counter set at event start */ + unsigned char data[PAGE_SIZE]; /* Counter set at event delete */ +}; +DEFINE_PER_CPU(struct cf_diag_csd, cf_diag_csd); + +/* Counter sets are stored as data stream in a page sized memory buffer and + * exported to user space via raw data attached to the event sample data. + * Each counter set starts with an eight byte header consisting of: + * - a two byte eye catcher (0xfeef) + * - a one byte counter set number + * - a two byte counter set size (indicates the number of counters in this set) + * - a three byte reserved value (must be zero) to make the header the same + * size as a counter value. + * All counter values are eight byte in size. + * + * All counter sets are followed by a 64 byte trailer. + * The trailer consists of a: + * - flag field indicating valid fields when corresponding bit set + * - the counter facility first and second version number + * - the CPU speed if nonzero + * - the time stamp the counter sets have been collected + * - the time of day (TOD) base value + * - the machine type. + * + * The counter sets are saved when the process is prepared to be executed on a + * CPU and saved again when the process is going to be removed from a CPU. + * The difference of both counter sets are calculated and stored in the event + * sample data area. + */ + +struct cf_ctrset_entry { /* CPU-M CF counter set entry (8 byte) */ + unsigned int def:16; /* 0-15 Data Entry Format */ + unsigned int set:16; /* 16-31 Counter set identifier */ + unsigned int ctr:16; /* 32-47 Number of stored counters */ + unsigned int res1:16; /* 48-63 Reserved */ +}; + +struct cf_trailer_entry { /* CPU-M CF_DIAG trailer (64 byte) */ + /* 0 - 7 */ + union { + struct { + unsigned int clock_base:1; /* TOD clock base set */ + unsigned int speed:1; /* CPU speed set */ + /* Measurement alerts */ + unsigned int mtda:1; /* Loss of MT ctr. data alert */ + unsigned int caca:1; /* Counter auth. change alert */ + unsigned int lcda:1; /* Loss of counter data alert */ + }; + unsigned long flags; /* 0-63 All indicators */ + }; + /* 8 - 15 */ + unsigned int cfvn:16; /* 64-79 Ctr First Version */ + unsigned int csvn:16; /* 80-95 Ctr Second Version */ + unsigned int cpu_speed:32; /* 96-127 CPU speed */ + /* 16 - 23 */ + unsigned long timestamp; /* 128-191 Timestamp (TOD) */ + /* 24 - 55 */ + union { + struct { + unsigned long progusage1; + unsigned long progusage2; + unsigned long progusage3; + unsigned long tod_base; + }; + unsigned long progusage[4]; + }; + /* 56 - 63 */ + unsigned int mach_type:16; /* Machine type */ + unsigned int res1:16; /* Reserved */ + unsigned int res2:32; /* Reserved */ +}; + +/* Create the trailer data at the end of a page. */ +static void cf_diag_trailer(struct cf_trailer_entry *te) +{ + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); + struct cpuid cpuid; + + te->cfvn = cpuhw->info.cfvn; /* Counter version numbers */ + te->csvn = cpuhw->info.csvn; + + get_cpu_id(&cpuid); /* Machine type */ + te->mach_type = cpuid.machine; + te->cpu_speed = cf_diag_cpu_speed; + if (te->cpu_speed) + te->speed = 1; + te->clock_base = 1; /* Save clock base */ + memcpy(&te->tod_base, &tod_clock_base[1], 8); + store_tod_clock((__u64 *)&te->timestamp); +} + +/* + * Change the CPUMF state to active. + * Enable and activate the CPU-counter sets according + * to the per-cpu control state. + */ +static void cf_diag_enable(struct pmu *pmu) +{ + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); + int err; + + debug_sprintf_event(cf_diag_dbg, 5, + "%s pmu %p cpu %d flags %#x state %#llx\n", + __func__, pmu, smp_processor_id(), cpuhw->flags, + cpuhw->state); + if (cpuhw->flags & PMU_F_ENABLED) + return; + + err = lcctl(cpuhw->state); + if (err) { + pr_err("Enabling the performance measuring unit " + "failed with rc=%x\n", err); + return; + } + cpuhw->flags |= PMU_F_ENABLED; +} + +/* + * Change the CPUMF state to inactive. + * Disable and enable (inactive) the CPU-counter sets according + * to the per-cpu control state. + */ +static void cf_diag_disable(struct pmu *pmu) +{ + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); + u64 inactive; + int err; + + debug_sprintf_event(cf_diag_dbg, 5, + "%s pmu %p cpu %d flags %#x state %#llx\n", + __func__, pmu, smp_processor_id(), cpuhw->flags, + cpuhw->state); + if (!(cpuhw->flags & PMU_F_ENABLED)) + return; + + inactive = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1); + err = lcctl(inactive); + if (err) { + pr_err("Disabling the performance measuring unit " + "failed with rc=%x\n", err); + return; + } + cpuhw->flags &= ~PMU_F_ENABLED; +} + +/* Number of perf events counting hardware events */ +static atomic_t cf_diag_events = ATOMIC_INIT(0); + +/* Release the PMU if event is the last perf event */ +static void cf_diag_perf_event_destroy(struct perf_event *event) +{ + debug_sprintf_event(cf_diag_dbg, 5, + "%s event %p cpu %d cf_diag_events %d\n", + __func__, event, event->cpu, + atomic_read(&cf_diag_events)); + if (atomic_dec_return(&cf_diag_events) == 0) + __kernel_cpumcf_end(); +} + +/* Setup the event. Test for authorized counter sets and only include counter + * sets which are authorized at the time of the setup. Including unauthorized + * counter sets result in specification exception (and panic). + */ +static int __hw_perf_event_init(struct perf_event *event) +{ + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); + struct perf_event_attr *attr = &event->attr; + enum cpumf_ctr_set i; + int err = 0; + + debug_sprintf_event(cf_diag_dbg, 5, + "%s event %p cpu %d authorized %#x\n", __func__, + event, event->cpu, cpuhw->info.auth_ctl); + + event->hw.config = attr->config; + event->hw.config_base = 0; + local64_set(&event->count, 0); + + /* Add all authorized counter sets to config_base */ + for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) + if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i]) + event->hw.config_base |= cpumf_ctr_ctl[i]; + + /* No authorized counter sets, nothing to count/sample */ + if (!event->hw.config_base) { + err = -EINVAL; + goto out; + } + + /* Set sample_period to indicate sampling */ + event->hw.sample_period = attr->sample_period; + local64_set(&event->hw.period_left, event->hw.sample_period); + event->hw.last_period = event->hw.sample_period; +out: + debug_sprintf_event(cf_diag_dbg, 5, "%s err %d config_base %#lx\n", + __func__, err, event->hw.config_base); + return err; +} + +static int cf_diag_event_init(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + int err = -ENOENT; + + debug_sprintf_event(cf_diag_dbg, 5, + "%s event %p cpu %d config %#llx " + "sample_type %#llx cf_diag_events %d\n", __func__, + event, event->cpu, attr->config, attr->sample_type, + atomic_read(&cf_diag_events)); + + if (event->attr.config != PERF_EVENT_CPUM_CF_DIAG || + event->attr.type != PERF_TYPE_RAW) + goto out; + + /* Raw events are used to access counters directly, + * hence do not permit excludes. + * This event is usesless without PERF_SAMPLE_RAW to return counter set + * values as raw data. + */ + if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv || + !(attr->sample_type & (PERF_SAMPLE_CPU | PERF_SAMPLE_RAW))) { + err = -EOPNOTSUPP; + goto out; + } + + /* Initialize for using the CPU-measurement counter facility */ + if (atomic_inc_return(&cf_diag_events) == 1) { + if (__kernel_cpumcf_begin()) { + atomic_dec(&cf_diag_events); + err = -EBUSY; + goto out; + } + } + event->destroy = cf_diag_perf_event_destroy; + + err = __hw_perf_event_init(event); + if (unlikely(err)) + event->destroy(event); +out: + debug_sprintf_event(cf_diag_dbg, 5, "%s err %d\n", __func__, err); + return err; +} + +static void cf_diag_read(struct perf_event *event) +{ + debug_sprintf_event(cf_diag_dbg, 5, "%s event %p\n", __func__, event); +} + +/* Return the maximum possible counter set size (in number of 8 byte counters) + * depending on type and model number. + */ +static size_t cf_diag_ctrset_size(enum cpumf_ctr_set ctrset, + struct cpumf_ctr_info *info) +{ + size_t ctrset_size = 0; + + switch (ctrset) { + case CPUMF_CTR_SET_BASIC: + if (info->cfvn >= 1) + ctrset_size = 6; + break; + case CPUMF_CTR_SET_USER: + if (info->cfvn == 1) + ctrset_size = 6; + else if (info->cfvn >= 3) + ctrset_size = 2; + break; + case CPUMF_CTR_SET_CRYPTO: + ctrset_size = 16; + break; + case CPUMF_CTR_SET_EXT: + if (info->csvn == 1) + ctrset_size = 32; + else if (info->csvn == 2) + ctrset_size = 48; + else if (info->csvn >= 3) + ctrset_size = 128; + break; + case CPUMF_CTR_SET_MT_DIAG: + if (info->csvn > 3) + ctrset_size = 48; + break; + case CPUMF_CTR_SET_MAX: + break; + } + + return ctrset_size; +} + +/* Calculate memory needed to store all counter sets together with header and + * trailer data. This is independend of the counter set authorization which + * can vary depending on the configuration. + */ +static size_t cf_diag_ctrset_maxsize(struct cpumf_ctr_info *info) +{ + size_t max_size = sizeof(struct cf_trailer_entry); + enum cpumf_ctr_set i; + + for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) { + size_t size = cf_diag_ctrset_size(i, info); + + if (size) + max_size += size * sizeof(u64) + + sizeof(struct cf_ctrset_entry); + } + debug_sprintf_event(cf_diag_dbg, 5, "%s max_size %zu\n", __func__, + max_size); + + return max_size; +} + +/* Read a counter set. The counter set number determines which counter set and + * the CPUM-CF first and second version number determine the number of + * available counters in this counter set. + * Each counter set starts with header containing the counter set number and + * the number of 8 byte counters. + * + * The functions returns the number of bytes occupied by this counter set + * including the header. + * If there is no counter in the counter set, this counter set is useless and + * zero is returned on this case. + */ +static size_t cf_diag_getctrset(struct cf_ctrset_entry *ctrdata, int ctrset, + size_t room) +{ + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); + size_t ctrset_size, need = 0; + int rc = 3; /* Assume write failure */ + + ctrdata->def = CF_DIAG_CTRSET_DEF; + ctrdata->set = ctrset; + ctrdata->res1 = 0; + ctrset_size = cf_diag_ctrset_size(ctrset, &cpuhw->info); + + if (ctrset_size) { /* Save data */ + need = ctrset_size * sizeof(u64) + sizeof(*ctrdata); + if (need <= room) + rc = ctr_stcctm(ctrset, ctrset_size, + (u64 *)(ctrdata + 1)); + if (rc != 3) + ctrdata->ctr = ctrset_size; + else + need = 0; + } + + debug_sprintf_event(cf_diag_dbg, 6, + "%s ctrset %d ctrset_size %zu cfvn %d csvn %d" + " need %zd rc:%d\n", + __func__, ctrset, ctrset_size, cpuhw->info.cfvn, + cpuhw->info.csvn, need, rc); + return need; +} + +/* Read out all counter sets and save them in the provided data buffer. + * The last 64 byte host an artificial trailer entry. + */ +static size_t cf_diag_getctr(void *data, size_t sz, unsigned long auth) +{ + struct cf_trailer_entry *trailer; + size_t offset = 0, done; + int i; + + memset(data, 0, sz); + sz -= sizeof(*trailer); /* Always room for trailer */ + for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) { + struct cf_ctrset_entry *ctrdata = data + offset; + + if (!(auth & cpumf_ctr_ctl[i])) + continue; /* Counter set not authorized */ + + done = cf_diag_getctrset(ctrdata, i, sz - offset); + offset += done; + debug_sprintf_event(cf_diag_dbg, 6, + "%s ctrset %d offset %zu done %zu\n", + __func__, i, offset, done); + } + trailer = data + offset; + cf_diag_trailer(trailer); + return offset + sizeof(*trailer); +} + +/* Calculate the difference for each counter in a counter set. */ +static void cf_diag_diffctrset(u64 *pstart, u64 *pstop, int counters) +{ + for (; --counters >= 0; ++pstart, ++pstop) + if (*pstop >= *pstart) + *pstop -= *pstart; + else + *pstop = *pstart - *pstop; +} + +/* Scan the counter sets and calculate the difference of each counter + * in each set. The result is the increment of each counter during the + * period the counter set has been activated. + * + * Return true on success. + */ +static int cf_diag_diffctr(struct cf_diag_csd *csd, unsigned long auth) +{ + struct cf_trailer_entry *trailer_start, *trailer_stop; + struct cf_ctrset_entry *ctrstart, *ctrstop; + size_t offset = 0; + + auth &= (1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1; + do { + ctrstart = (struct cf_ctrset_entry *)(csd->start + offset); + ctrstop = (struct cf_ctrset_entry *)(csd->data + offset); + + if (memcmp(ctrstop, ctrstart, sizeof(*ctrstop))) { + pr_err("cpum_cf_diag counter set compare error " + "in set %i\n", ctrstart->set); + return 0; + } + auth &= ~cpumf_ctr_ctl[ctrstart->set]; + if (ctrstart->def == CF_DIAG_CTRSET_DEF) { + cf_diag_diffctrset((u64 *)(ctrstart + 1), + (u64 *)(ctrstop + 1), ctrstart->ctr); + offset += ctrstart->ctr * sizeof(u64) + + sizeof(*ctrstart); + } + debug_sprintf_event(cf_diag_dbg, 6, + "%s set %d ctr %d offset %zu auth %lx\n", + __func__, ctrstart->set, ctrstart->ctr, + offset, auth); + } while (ctrstart->def && auth); + + /* Save time_stamp from start of event in stop's trailer */ + trailer_start = (struct cf_trailer_entry *)(csd->start + offset); + trailer_stop = (struct cf_trailer_entry *)(csd->data + offset); + trailer_stop->progusage[0] = trailer_start->timestamp; + + return 1; +} + +/* Create perf event sample with the counter sets as raw data. The sample + * is then pushed to the event subsystem and the function checks for + * possible event overflows. If an event overflow occurs, the PMU is + * stopped. + * + * Return non-zero if an event overflow occurred. + */ +static int cf_diag_push_sample(struct perf_event *event, + struct cf_diag_csd *csd) +{ + struct perf_sample_data data; + struct perf_raw_record raw; + struct pt_regs regs; + int overflow; + + /* Setup perf sample */ + perf_sample_data_init(&data, 0, event->hw.last_period); + memset(®s, 0, sizeof(regs)); + memset(&raw, 0, sizeof(raw)); + + if (event->attr.sample_type & PERF_SAMPLE_CPU) + data.cpu_entry.cpu = event->cpu; + if (event->attr.sample_type & PERF_SAMPLE_RAW) { + raw.frag.size = csd->used; + raw.frag.data = csd->data; + raw.size = csd->used; + data.raw = &raw; + } + + overflow = perf_event_overflow(event, &data, ®s); + debug_sprintf_event(cf_diag_dbg, 6, + "%s event %p cpu %d sample_type %#llx raw %d " + "ov %d\n", __func__, event, event->cpu, + event->attr.sample_type, raw.size, overflow); + if (overflow) + event->pmu->stop(event, 0); + + perf_event_update_userpage(event); + return overflow; +} + +static void cf_diag_start(struct perf_event *event, int flags) +{ + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); + struct cf_diag_csd *csd = this_cpu_ptr(&cf_diag_csd); + struct hw_perf_event *hwc = &event->hw; + + debug_sprintf_event(cf_diag_dbg, 5, + "%s event %p cpu %d flags %#x hwc-state %#x\n", + __func__, event, event->cpu, flags, hwc->state); + if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) + return; + + /* (Re-)enable and activate all counter sets */ + lcctl(0); /* Reset counter sets */ + hwc->state = 0; + ctr_set_multiple_enable(&cpuhw->state, hwc->config_base); + lcctl(cpuhw->state); /* Enable counter sets */ + csd->used = cf_diag_getctr(csd->start, sizeof(csd->start), + event->hw.config_base); + ctr_set_multiple_start(&cpuhw->state, hwc->config_base); + /* Function cf_diag_enable() starts the counter sets. */ +} + +static void cf_diag_stop(struct perf_event *event, int flags) +{ + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); + struct cf_diag_csd *csd = this_cpu_ptr(&cf_diag_csd); + struct hw_perf_event *hwc = &event->hw; + + debug_sprintf_event(cf_diag_dbg, 5, + "%s event %p cpu %d flags %#x hwc-state %#x\n", + __func__, event, event->cpu, flags, hwc->state); + + /* Deactivate all counter sets */ + ctr_set_multiple_stop(&cpuhw->state, hwc->config_base); + local64_inc(&event->count); + csd->used = cf_diag_getctr(csd->data, sizeof(csd->data), + event->hw.config_base); + if (cf_diag_diffctr(csd, event->hw.config_base)) + cf_diag_push_sample(event, csd); + hwc->state |= PERF_HES_STOPPED; +} + +static int cf_diag_add(struct perf_event *event, int flags) +{ + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); + int err = 0; + + debug_sprintf_event(cf_diag_dbg, 5, + "%s event %p cpu %d flags %#x cpuhw:%p\n", + __func__, event, event->cpu, flags, cpuhw); + + if (cpuhw->flags & PMU_F_IN_USE) { + err = -EAGAIN; + goto out; + } + + event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + + cpuhw->flags |= PMU_F_IN_USE; + if (flags & PERF_EF_START) + cf_diag_start(event, PERF_EF_RELOAD); +out: + debug_sprintf_event(cf_diag_dbg, 5, "%s err %d\n", __func__, err); + return err; +} + +static void cf_diag_del(struct perf_event *event, int flags) +{ + struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); + + debug_sprintf_event(cf_diag_dbg, 5, + "%s event %p cpu %d flags %#x\n", + __func__, event, event->cpu, flags); + + cf_diag_stop(event, PERF_EF_UPDATE); + ctr_set_multiple_stop(&cpuhw->state, event->hw.config_base); + ctr_set_multiple_disable(&cpuhw->state, event->hw.config_base); + cpuhw->flags &= ~PMU_F_IN_USE; +} + +CPUMF_EVENT_ATTR(CF_DIAG, CF_DIAG, PERF_EVENT_CPUM_CF_DIAG); + +static struct attribute *cf_diag_events_attr[] = { + CPUMF_EVENT_PTR(CF_DIAG, CF_DIAG), + NULL, +}; + +PMU_FORMAT_ATTR(event, "config:0-63"); + +static struct attribute *cf_diag_format_attr[] = { + &format_attr_event.attr, + NULL, +}; + +static struct attribute_group cf_diag_events_group = { + .name = "events", + .attrs = cf_diag_events_attr, +}; +static struct attribute_group cf_diag_format_group = { + .name = "format", + .attrs = cf_diag_format_attr, +}; +static const struct attribute_group *cf_diag_attr_groups[] = { + &cf_diag_events_group, + &cf_diag_format_group, + NULL, +}; + +/* Performance monitoring unit for s390x */ +static struct pmu cf_diag = { + .task_ctx_nr = perf_sw_context, + .pmu_enable = cf_diag_enable, + .pmu_disable = cf_diag_disable, + .event_init = cf_diag_event_init, + .add = cf_diag_add, + .del = cf_diag_del, + .start = cf_diag_start, + .stop = cf_diag_stop, + .read = cf_diag_read, + + .attr_groups = cf_diag_attr_groups +}; + +/* Get the CPU speed, try sampling facility first and CPU attributes second. */ +static void cf_diag_get_cpu_speed(void) +{ + if (cpum_sf_avail()) { /* Sampling facility first */ + struct hws_qsi_info_block si; + + memset(&si, 0, sizeof(si)); + if (!qsi(&si)) { + cf_diag_cpu_speed = si.cpu_speed; + return; + } + } + + if (test_facility(34)) { /* CPU speed extract static part */ + unsigned long mhz = __ecag(ECAG_CPU_ATTRIBUTE, 0); + + if (mhz != -1UL) + cf_diag_cpu_speed = mhz & 0xffffffff; + } +} + +/* Initialize the counter set PMU to generate complete counter set data as + * event raw data. This relies on the CPU Measurement Counter Facility device + * already being loaded and initialized. + */ +static int __init cf_diag_init(void) +{ + struct cpumf_ctr_info info; + size_t need; + int rc; + + if (!kernel_cpumcf_avail() || !stccm_avail() || qctri(&info)) + return -ENODEV; + cf_diag_get_cpu_speed(); + + /* Make sure the counter set data fits into predefined buffer. */ + need = cf_diag_ctrset_maxsize(&info); + if (need > sizeof(((struct cf_diag_csd *)0)->start)) { + pr_err("Insufficient memory for PMU(cpum_cf_diag) need=%zu\n", + need); + return -ENOMEM; + } + + /* Setup s390dbf facility */ + cf_diag_dbg = debug_register(KMSG_COMPONENT, 2, 1, 128); + if (!cf_diag_dbg) { + pr_err("Registration of s390dbf(cpum_cf_diag) failed\n"); + return -ENOMEM; + } + debug_register_view(cf_diag_dbg, &debug_sprintf_view); + + rc = perf_pmu_register(&cf_diag, "cpum_cf_diag", PERF_TYPE_RAW); + if (rc) { + debug_unregister_view(cf_diag_dbg, &debug_sprintf_view); + debug_unregister(cf_diag_dbg); + pr_err("Registration of PMU(cpum_cf_diag) failed with rc=%i\n", + rc); + } + return rc; +} +arch_initcall(cf_diag_init); diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c index d63fb3c56b8a..b45238c89728 100644 --- a/arch/s390/kernel/perf_cpum_cf_events.c +++ b/arch/s390/kernel/perf_cpum_cf_events.c @@ -6,6 +6,7 @@ #include #include +#include /* BEGIN: CPUM_CF COUNTER DEFINITIONS =================================== */ diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index bfabeb1889cc..1266194afb02 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -1600,7 +1600,7 @@ static void aux_sdb_init(unsigned long sdb) /* * aux_buffer_setup() - Setup AUX buffer for diagnostic mode sampling - * @cpu: On which to allocate, -1 means current + * @event: Event the buffer is setup for, event->cpu == -1 means current * @pages: Array of pointers to buffer pages passed from perf core * @nr_pages: Total pages * @snapshot: Flag for snapshot mode @@ -1612,8 +1612,8 @@ static void aux_sdb_init(unsigned long sdb) * * Return the private AUX buffer structure if success or NULL if fails. */ -static void *aux_buffer_setup(int cpu, void **pages, int nr_pages, - bool snapshot) +static void *aux_buffer_setup(struct perf_event *event, void **pages, + int nr_pages, bool snapshot) { struct sf_buffer *sfb; struct aux_buffer *aux; diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 7ed90a759135..2c642af526ce 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -369,7 +369,7 @@ void __init arch_call_rest_init(void) : : [_frame] "a" (frame)); } -static void __init setup_lowcore(void) +static void __init setup_lowcore_dat_off(void) { struct lowcore *lc; @@ -378,21 +378,22 @@ static void __init setup_lowcore(void) */ BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * PAGE_SIZE); lc = memblock_alloc_low(sizeof(*lc), sizeof(*lc)); + if (!lc) + panic("%s: Failed to allocate %zu bytes align=%zx\n", + __func__, sizeof(*lc), sizeof(*lc)); + lc->restart_psw.mask = PSW_KERNEL_BITS; lc->restart_psw.addr = (unsigned long) restart_int_handler; - lc->external_new_psw.mask = PSW_KERNEL_BITS | - PSW_MASK_DAT | PSW_MASK_MCHECK; + lc->external_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK; lc->external_new_psw.addr = (unsigned long) ext_int_handler; lc->svc_new_psw.mask = PSW_KERNEL_BITS | - PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; + PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; lc->svc_new_psw.addr = (unsigned long) system_call; - lc->program_new_psw.mask = PSW_KERNEL_BITS | - PSW_MASK_DAT | PSW_MASK_MCHECK; + lc->program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK; lc->program_new_psw.addr = (unsigned long) pgm_check_handler; lc->mcck_new_psw.mask = PSW_KERNEL_BITS; lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler; - lc->io_new_psw.mask = PSW_KERNEL_BITS | - PSW_MASK_DAT | PSW_MASK_MCHECK; + lc->io_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK; lc->io_new_psw.addr = (unsigned long) io_int_handler; lc->clock_comparator = clock_comparator_max; lc->nodat_stack = ((unsigned long) &init_thread_union) @@ -422,6 +423,9 @@ static void __init setup_lowcore(void) * all CPUs in cast *one* of them does a PSW restart. */ restart_stack = memblock_alloc(THREAD_SIZE, THREAD_SIZE); + if (!restart_stack) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", + __func__, THREAD_SIZE, THREAD_SIZE); restart_stack += STACK_INIT_OFFSET; /* @@ -452,6 +456,16 @@ static void __init setup_lowcore(void) lowcore_ptr[0] = lc; } +static void __init setup_lowcore_dat_on(void) +{ + __ctl_clear_bit(0, 28); + S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT; + S390_lowcore.svc_new_psw.mask |= PSW_MASK_DAT; + S390_lowcore.program_new_psw.mask |= PSW_MASK_DAT; + S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT; + __ctl_set_bit(0, 28); +} + static struct resource code_resource = { .name = "Kernel code", .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM, @@ -488,6 +502,9 @@ static void __init setup_resources(void) for_each_memblock(memory, reg) { res = memblock_alloc(sizeof(*res), 8); + if (!res) + panic("%s: Failed to allocate %zu bytes align=0x%x\n", + __func__, sizeof(*res), 8); res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM; res->name = "System RAM"; @@ -502,6 +519,9 @@ static void __init setup_resources(void) continue; if (std_res->end > res->end) { sub_res = memblock_alloc(sizeof(*sub_res), 8); + if (!sub_res) + panic("%s: Failed to allocate %zu bytes align=0x%x\n", + __func__, sizeof(*sub_res), 8); *sub_res = *std_res; sub_res->end = res->end; std_res->start = res->end + 1; @@ -794,18 +814,9 @@ static void __init reserve_kernel(void) { unsigned long start_pfn = PFN_UP(__pa(_end)); -#ifdef CONFIG_DMA_API_DEBUG - /* - * DMA_API_DEBUG code stumbles over addresses from the - * range [PARMAREA_END, _stext]. Mark the memory as reserved - * so it is not used for CONFIG_DMA_API_DEBUG=y. - */ - memblock_reserve(0, PFN_PHYS(start_pfn)); -#else memblock_reserve(0, PARMAREA_END); memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn) - (unsigned long)_stext); -#endif } static void __init setup_memory(void) @@ -968,6 +979,9 @@ static void __init setup_randomness(void) vmms = (struct sysinfo_3_2_2 *) memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); + if (!vmms) + panic("Failed to allocate memory for sysinfo structure\n"); + if (stsi(vmms, 3, 2, 2) == 0 && vmms->count) add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count); memblock_free((unsigned long) vmms, PAGE_SIZE); @@ -989,6 +1003,25 @@ static void __init setup_task_size(void) arch_task_struct_size = task_size; } +/* + * Issue diagnose 318 to set the control program name and + * version codes. + */ +static void __init setup_control_program_code(void) +{ + union diag318_info diag318_info = { + .cpnc = CPNC_LINUX, + .cpvc_linux = 0, + .cpvc_distro = {0}, + }; + + if (!sclp.has_diag318) + return; + + diag_stat_inc(DIAG_STAT_X318); + asm volatile("diag %0,0,0x318\n" : : "d" (diag318_info.val)); +} + /* * Setup function called from init/main.c just after the banner * was printed. @@ -1033,6 +1066,7 @@ void __init setup_arch(char **cmdline_p) os_info_init(); setup_ipl(); setup_task_size(); + setup_control_program_code(); /* Do some memory reservations *before* memory is added to memblock */ reserve_memory_end(); @@ -1072,7 +1106,7 @@ void __init setup_arch(char **cmdline_p) #endif setup_resources(); - setup_lowcore(); + setup_lowcore_dat_off(); smp_fill_possible_mask(); cpu_detect_mhz_feature(); cpu_init(); @@ -1085,6 +1119,12 @@ void __init setup_arch(char **cmdline_p) */ paging_init(); + /* + * After paging_init created the kernel page table, the new PSWs + * in lowcore can now run with DAT enabled. + */ + setup_lowcore_dat_on(); + /* Setup default console */ conmode_default(); set_preferred_console(); diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index b198ece2aad6..3fe1c77c361b 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -656,7 +656,11 @@ void __init smp_save_dump_cpus(void) /* No previous system present, normal boot. */ return; /* Allocate a page as dumping area for the store status sigps */ - page = memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, 1UL << 31); + page = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0, 1UL << 31); + if (!page) + panic("ERROR: Failed to allocate %lx bytes below %lx\n", + PAGE_SIZE, 1UL << 31); + /* Set multi-threading state to the previous system. */ pcpu_set_smt(sclp.mtid_prev); boot_cpu_addr = stap(); @@ -766,6 +770,9 @@ void __init smp_detect_cpus(void) /* Get CPU information */ info = memblock_alloc(sizeof(*info), 8); + if (!info) + panic("%s: Failed to allocate %zu bytes align=0x%x\n", + __func__, sizeof(*info), 8); smp_get_core_info(info, 1); /* Find boot CPU type */ if (sclp.has_core_type) { diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S index 537f97fde37f..993100c31d65 100644 --- a/arch/s390/kernel/swsusp.S +++ b/arch/s390/kernel/swsusp.S @@ -30,10 +30,10 @@ .section .text ENTRY(swsusp_arch_suspend) lg %r1,__LC_NODAT_STACK - aghi %r1,-STACK_FRAME_OVERHEAD stmg %r6,%r15,__SF_GPRS(%r1) + aghi %r1,-STACK_FRAME_OVERHEAD stg %r15,__SF_BACKCHAIN(%r1) - lgr %r1,%r15 + lgr %r15,%r1 /* Store FPU registers */ brasl %r14,save_fpu_regs @@ -124,13 +124,13 @@ ENTRY(swsusp_arch_resume) lghi %r2,1 brasl %r14,arch_set_page_states - /* Deactivate DAT */ - stnsm __SF_EMPTY(%r15),0xfb - /* Set prefix page to zero */ xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15) spx __SF_EMPTY(%r15) + /* Deactivate DAT */ + stnsm __SF_EMPTY(%r15),0xfb + /* Restore saved image */ larl %r1,restore_pblist lg %r1,0(%r1) diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c index 31cefe0c28c0..202fa73ac167 100644 --- a/arch/s390/kernel/sys_s390.c +++ b/arch/s390/kernel/sys_s390.c @@ -58,6 +58,7 @@ out: return error; } +#ifdef CONFIG_SYSVIPC /* * sys_ipc() is the de-multiplexer for the SysV IPC calls. */ @@ -74,19 +75,28 @@ SYSCALL_DEFINE5(s390_ipc, uint, call, int, first, unsigned long, second, * Therefore we can call the generic variant by simply passing the * third parameter also as fifth parameter. */ - return sys_ipc(call, first, second, third, ptr, third); + return ksys_ipc(call, first, second, third, ptr, third); } +#endif /* CONFIG_SYSVIPC */ SYSCALL_DEFINE1(s390_personality, unsigned int, personality) { - unsigned int ret; + unsigned int ret = current->personality; if (personality(current->personality) == PER_LINUX32 && personality(personality) == PER_LINUX) personality |= PER_LINUX32; - ret = sys_personality(personality); + + if (personality != 0xffffffff) + set_personality(personality); + if (personality(ret) == PER_LINUX32) ret &= ~PER_LINUX32; return ret; } + +SYSCALL_DEFINE0(ni_syscall) +{ + return -ENOSYS; +} diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl index 022fc099b628..02579f95f391 100644 --- a/arch/s390/kernel/syscalls/syscall.tbl +++ b/arch/s390/kernel/syscalls/syscall.tbl @@ -15,86 +15,86 @@ 5 common open sys_open compat_sys_open 6 common close sys_close sys_close 7 common restart_syscall sys_restart_syscall sys_restart_syscall -8 common creat sys_creat compat_sys_creat -9 common link sys_link compat_sys_link -10 common unlink sys_unlink compat_sys_unlink +8 common creat sys_creat sys_creat +9 common link sys_link sys_link +10 common unlink sys_unlink sys_unlink 11 common execve sys_execve compat_sys_execve -12 common chdir sys_chdir compat_sys_chdir -13 32 time - compat_sys_time -14 common mknod sys_mknod compat_sys_mknod -15 common chmod sys_chmod compat_sys_chmod -16 32 lchown - compat_sys_s390_lchown16 +12 common chdir sys_chdir sys_chdir +13 32 time - sys_time32 +14 common mknod sys_mknod sys_mknod +15 common chmod sys_chmod sys_chmod +16 32 lchown - sys_lchown16 19 common lseek sys_lseek compat_sys_lseek 20 common getpid sys_getpid sys_getpid 21 common mount sys_mount compat_sys_mount -22 common umount sys_oldumount compat_sys_oldumount -23 32 setuid - compat_sys_s390_setuid16 -24 32 getuid - compat_sys_s390_getuid16 -25 32 stime - compat_sys_stime +22 common umount sys_oldumount sys_oldumount +23 32 setuid - sys_setuid16 +24 32 getuid - sys_getuid16 +25 32 stime - sys_stime32 26 common ptrace sys_ptrace compat_sys_ptrace 27 common alarm sys_alarm sys_alarm 29 common pause sys_pause sys_pause -30 common utime sys_utime compat_sys_utime -33 common access sys_access compat_sys_access +30 common utime sys_utime sys_utime32 +33 common access sys_access sys_access 34 common nice sys_nice sys_nice 36 common sync sys_sync sys_sync 37 common kill sys_kill sys_kill -38 common rename sys_rename compat_sys_rename -39 common mkdir sys_mkdir compat_sys_mkdir -40 common rmdir sys_rmdir compat_sys_rmdir +38 common rename sys_rename sys_rename +39 common mkdir sys_mkdir sys_mkdir +40 common rmdir sys_rmdir sys_rmdir 41 common dup sys_dup sys_dup -42 common pipe sys_pipe compat_sys_pipe +42 common pipe sys_pipe sys_pipe 43 common times sys_times compat_sys_times -45 common brk sys_brk compat_sys_brk -46 32 setgid - compat_sys_s390_setgid16 -47 32 getgid - compat_sys_s390_getgid16 -48 common signal sys_signal compat_sys_signal -49 32 geteuid - compat_sys_s390_geteuid16 -50 32 getegid - compat_sys_s390_getegid16 -51 common acct sys_acct compat_sys_acct -52 common umount2 sys_umount compat_sys_umount +45 common brk sys_brk sys_brk +46 32 setgid - sys_setgid16 +47 32 getgid - sys_getgid16 +48 common signal sys_signal sys_signal +49 32 geteuid - sys_geteuid16 +50 32 getegid - sys_getegid16 +51 common acct sys_acct sys_acct +52 common umount2 sys_umount sys_umount 54 common ioctl sys_ioctl compat_sys_ioctl 55 common fcntl sys_fcntl compat_sys_fcntl 57 common setpgid sys_setpgid sys_setpgid 60 common umask sys_umask sys_umask -61 common chroot sys_chroot compat_sys_chroot +61 common chroot sys_chroot sys_chroot 62 common ustat sys_ustat compat_sys_ustat 63 common dup2 sys_dup2 sys_dup2 64 common getppid sys_getppid sys_getppid 65 common getpgrp sys_getpgrp sys_getpgrp 66 common setsid sys_setsid sys_setsid 67 common sigaction sys_sigaction compat_sys_sigaction -70 32 setreuid - compat_sys_s390_setreuid16 -71 32 setregid - compat_sys_s390_setregid16 -72 common sigsuspend sys_sigsuspend compat_sys_sigsuspend +70 32 setreuid - sys_setreuid16 +71 32 setregid - sys_setregid16 +72 common sigsuspend sys_sigsuspend sys_sigsuspend 73 common sigpending sys_sigpending compat_sys_sigpending -74 common sethostname sys_sethostname compat_sys_sethostname +74 common sethostname sys_sethostname sys_sethostname 75 common setrlimit sys_setrlimit compat_sys_setrlimit 76 32 getrlimit - compat_sys_old_getrlimit 77 common getrusage sys_getrusage compat_sys_getrusage 78 common gettimeofday sys_gettimeofday compat_sys_gettimeofday 79 common settimeofday sys_settimeofday compat_sys_settimeofday -80 32 getgroups - compat_sys_s390_getgroups16 -81 32 setgroups - compat_sys_s390_setgroups16 -83 common symlink sys_symlink compat_sys_symlink -85 common readlink sys_readlink compat_sys_readlink -86 common uselib sys_uselib compat_sys_uselib -87 common swapon sys_swapon compat_sys_swapon -88 common reboot sys_reboot compat_sys_reboot +80 32 getgroups - sys_getgroups16 +81 32 setgroups - sys_setgroups16 +83 common symlink sys_symlink sys_symlink +85 common readlink sys_readlink sys_readlink +86 common uselib sys_uselib sys_uselib +87 common swapon sys_swapon sys_swapon +88 common reboot sys_reboot sys_reboot 89 common readdir - compat_sys_old_readdir 90 common mmap sys_old_mmap compat_sys_s390_old_mmap -91 common munmap sys_munmap compat_sys_munmap +91 common munmap sys_munmap sys_munmap 92 common truncate sys_truncate compat_sys_truncate 93 common ftruncate sys_ftruncate compat_sys_ftruncate 94 common fchmod sys_fchmod sys_fchmod -95 32 fchown - compat_sys_s390_fchown16 +95 32 fchown - sys_fchown16 96 common getpriority sys_getpriority sys_getpriority 97 common setpriority sys_setpriority sys_setpriority 99 common statfs sys_statfs compat_sys_statfs 100 common fstatfs sys_fstatfs compat_sys_fstatfs 101 32 ioperm - - 102 common socketcall sys_socketcall compat_sys_socketcall -103 common syslog sys_syslog compat_sys_syslog +103 common syslog sys_syslog sys_syslog 104 common setitimer sys_setitimer compat_sys_setitimer 105 common getitimer sys_getitimer compat_sys_getitimer 106 common stat sys_newstat compat_sys_newstat @@ -104,76 +104,76 @@ 111 common vhangup sys_vhangup sys_vhangup 112 common idle - - 114 common wait4 sys_wait4 compat_sys_wait4 -115 common swapoff sys_swapoff compat_sys_swapoff +115 common swapoff sys_swapoff sys_swapoff 116 common sysinfo sys_sysinfo compat_sys_sysinfo 117 common ipc sys_s390_ipc compat_sys_s390_ipc 118 common fsync sys_fsync sys_fsync 119 common sigreturn sys_sigreturn compat_sys_sigreturn -120 common clone sys_clone compat_sys_clone -121 common setdomainname sys_setdomainname compat_sys_setdomainname -122 common uname sys_newuname compat_sys_newuname -124 common adjtimex sys_adjtimex compat_sys_adjtimex -125 common mprotect sys_mprotect compat_sys_mprotect +120 common clone sys_clone sys_clone +121 common setdomainname sys_setdomainname sys_setdomainname +122 common uname sys_newuname sys_newuname +124 common adjtimex sys_adjtimex sys_adjtimex_time32 +125 common mprotect sys_mprotect sys_mprotect 126 common sigprocmask sys_sigprocmask compat_sys_sigprocmask 127 common create_module - - -128 common init_module sys_init_module compat_sys_init_module -129 common delete_module sys_delete_module compat_sys_delete_module +128 common init_module sys_init_module sys_init_module +129 common delete_module sys_delete_module sys_delete_module 130 common get_kernel_syms - - -131 common quotactl sys_quotactl compat_sys_quotactl +131 common quotactl sys_quotactl sys_quotactl 132 common getpgid sys_getpgid sys_getpgid 133 common fchdir sys_fchdir sys_fchdir -134 common bdflush sys_bdflush compat_sys_bdflush -135 common sysfs sys_sysfs compat_sys_sysfs +134 common bdflush sys_bdflush sys_bdflush +135 common sysfs sys_sysfs sys_sysfs 136 common personality sys_s390_personality sys_s390_personality 137 common afs_syscall - - -138 32 setfsuid - compat_sys_s390_setfsuid16 -139 32 setfsgid - compat_sys_s390_setfsgid16 -140 32 _llseek - compat_sys_llseek +138 32 setfsuid - sys_setfsuid16 +139 32 setfsgid - sys_setfsgid16 +140 32 _llseek - sys_llseek 141 common getdents sys_getdents compat_sys_getdents 142 32 _newselect - compat_sys_select 142 64 select sys_select - 143 common flock sys_flock sys_flock -144 common msync sys_msync compat_sys_msync +144 common msync sys_msync sys_msync 145 common readv sys_readv compat_sys_readv 146 common writev sys_writev compat_sys_writev 147 common getsid sys_getsid sys_getsid 148 common fdatasync sys_fdatasync sys_fdatasync 149 common _sysctl sys_sysctl compat_sys_sysctl -150 common mlock sys_mlock compat_sys_mlock -151 common munlock sys_munlock compat_sys_munlock +150 common mlock sys_mlock sys_mlock +151 common munlock sys_munlock sys_munlock 152 common mlockall sys_mlockall sys_mlockall 153 common munlockall sys_munlockall sys_munlockall -154 common sched_setparam sys_sched_setparam compat_sys_sched_setparam -155 common sched_getparam sys_sched_getparam compat_sys_sched_getparam -156 common sched_setscheduler sys_sched_setscheduler compat_sys_sched_setscheduler +154 common sched_setparam sys_sched_setparam sys_sched_setparam +155 common sched_getparam sys_sched_getparam sys_sched_getparam +156 common sched_setscheduler sys_sched_setscheduler sys_sched_setscheduler 157 common sched_getscheduler sys_sched_getscheduler sys_sched_getscheduler 158 common sched_yield sys_sched_yield sys_sched_yield 159 common sched_get_priority_max sys_sched_get_priority_max sys_sched_get_priority_max 160 common sched_get_priority_min sys_sched_get_priority_min sys_sched_get_priority_min -161 common sched_rr_get_interval sys_sched_rr_get_interval compat_sys_sched_rr_get_interval -162 common nanosleep sys_nanosleep compat_sys_nanosleep -163 common mremap sys_mremap compat_sys_mremap -164 32 setresuid - compat_sys_s390_setresuid16 -165 32 getresuid - compat_sys_s390_getresuid16 +161 common sched_rr_get_interval sys_sched_rr_get_interval sys_sched_rr_get_interval_time32 +162 common nanosleep sys_nanosleep sys_nanosleep_time32 +163 common mremap sys_mremap sys_mremap +164 32 setresuid - sys_setresuid16 +165 32 getresuid - sys_getresuid16 167 common query_module - - -168 common poll sys_poll compat_sys_poll +168 common poll sys_poll sys_poll 169 common nfsservctl - - -170 32 setresgid - compat_sys_s390_setresgid16 -171 32 getresgid - compat_sys_s390_getresgid16 -172 common prctl sys_prctl compat_sys_prctl +170 32 setresgid - sys_setresgid16 +171 32 getresgid - sys_getresgid16 +172 common prctl sys_prctl sys_prctl 173 common rt_sigreturn sys_rt_sigreturn compat_sys_rt_sigreturn 174 common rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction 175 common rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask 176 common rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending -177 common rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait +177 common rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time32 178 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo 179 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend 180 common pread64 sys_pread64 compat_sys_s390_pread64 181 common pwrite64 sys_pwrite64 compat_sys_s390_pwrite64 -182 32 chown - compat_sys_s390_chown16 -183 common getcwd sys_getcwd compat_sys_getcwd -184 common capget sys_capget compat_sys_capget -185 common capset sys_capset compat_sys_capset +182 32 chown - sys_chown16 +183 common getcwd sys_getcwd sys_getcwd +184 common capget sys_capget sys_capget +185 common capset sys_capset sys_capset 186 common sigaltstack sys_sigaltstack compat_sys_sigaltstack 187 common sendfile sys_sendfile64 compat_sys_sendfile 188 common getpmsg - - @@ -187,7 +187,7 @@ 195 32 stat64 - compat_sys_s390_stat64 196 32 lstat64 - compat_sys_s390_lstat64 197 32 fstat64 - compat_sys_s390_fstat64 -198 32 lchown32 - compat_sys_lchown +198 32 lchown32 - sys_lchown 198 64 lchown sys_lchown - 199 32 getuid32 - sys_getuid 199 64 getuid sys_getuid - @@ -201,21 +201,21 @@ 203 64 setreuid sys_setreuid - 204 32 setregid32 - sys_setregid 204 64 setregid sys_setregid - -205 32 getgroups32 - compat_sys_getgroups +205 32 getgroups32 - sys_getgroups 205 64 getgroups sys_getgroups - -206 32 setgroups32 - compat_sys_setgroups +206 32 setgroups32 - sys_setgroups 206 64 setgroups sys_setgroups - 207 32 fchown32 - sys_fchown 207 64 fchown sys_fchown - 208 32 setresuid32 - sys_setresuid 208 64 setresuid sys_setresuid - -209 32 getresuid32 - compat_sys_getresuid +209 32 getresuid32 - sys_getresuid 209 64 getresuid sys_getresuid - 210 32 setresgid32 - sys_setresgid 210 64 setresgid sys_setresgid - -211 32 getresgid32 - compat_sys_getresgid +211 32 getresgid32 - sys_getresgid 211 64 getresgid sys_getresgid - -212 32 chown32 - compat_sys_chown +212 32 chown32 - sys_chown 212 64 chown sys_chown - 213 32 setuid32 - sys_setuid 213 64 setuid sys_setuid - @@ -225,169 +225,204 @@ 215 64 setfsuid sys_setfsuid - 216 32 setfsgid32 - sys_setfsgid 216 64 setfsgid sys_setfsgid - -217 common pivot_root sys_pivot_root compat_sys_pivot_root -218 common mincore sys_mincore compat_sys_mincore -219 common madvise sys_madvise compat_sys_madvise -220 common getdents64 sys_getdents64 compat_sys_getdents64 +217 common pivot_root sys_pivot_root sys_pivot_root +218 common mincore sys_mincore sys_mincore +219 common madvise sys_madvise sys_madvise +220 common getdents64 sys_getdents64 sys_getdents64 221 32 fcntl64 - compat_sys_fcntl64 222 common readahead sys_readahead compat_sys_s390_readahead 223 32 sendfile64 - compat_sys_sendfile64 -224 common setxattr sys_setxattr compat_sys_setxattr -225 common lsetxattr sys_lsetxattr compat_sys_lsetxattr -226 common fsetxattr sys_fsetxattr compat_sys_fsetxattr -227 common getxattr sys_getxattr compat_sys_getxattr -228 common lgetxattr sys_lgetxattr compat_sys_lgetxattr -229 common fgetxattr sys_fgetxattr compat_sys_fgetxattr -230 common listxattr sys_listxattr compat_sys_listxattr -231 common llistxattr sys_llistxattr compat_sys_llistxattr -232 common flistxattr sys_flistxattr compat_sys_flistxattr -233 common removexattr sys_removexattr compat_sys_removexattr -234 common lremovexattr sys_lremovexattr compat_sys_lremovexattr -235 common fremovexattr sys_fremovexattr compat_sys_fremovexattr +224 common setxattr sys_setxattr sys_setxattr +225 common lsetxattr sys_lsetxattr sys_lsetxattr +226 common fsetxattr sys_fsetxattr sys_fsetxattr +227 common getxattr sys_getxattr sys_getxattr +228 common lgetxattr sys_lgetxattr sys_lgetxattr +229 common fgetxattr sys_fgetxattr sys_fgetxattr +230 common listxattr sys_listxattr sys_listxattr +231 common llistxattr sys_llistxattr sys_llistxattr +232 common flistxattr sys_flistxattr sys_flistxattr +233 common removexattr sys_removexattr sys_removexattr +234 common lremovexattr sys_lremovexattr sys_lremovexattr +235 common fremovexattr sys_fremovexattr sys_fremovexattr 236 common gettid sys_gettid sys_gettid 237 common tkill sys_tkill sys_tkill -238 common futex sys_futex compat_sys_futex +238 common futex sys_futex sys_futex_time32 239 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity 240 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity 241 common tgkill sys_tgkill sys_tgkill 243 common io_setup sys_io_setup compat_sys_io_setup -244 common io_destroy sys_io_destroy compat_sys_io_destroy -245 common io_getevents sys_io_getevents compat_sys_io_getevents +244 common io_destroy sys_io_destroy sys_io_destroy +245 common io_getevents sys_io_getevents sys_io_getevents_time32 246 common io_submit sys_io_submit compat_sys_io_submit -247 common io_cancel sys_io_cancel compat_sys_io_cancel +247 common io_cancel sys_io_cancel sys_io_cancel 248 common exit_group sys_exit_group sys_exit_group 249 common epoll_create sys_epoll_create sys_epoll_create -250 common epoll_ctl sys_epoll_ctl compat_sys_epoll_ctl -251 common epoll_wait sys_epoll_wait compat_sys_epoll_wait -252 common set_tid_address sys_set_tid_address compat_sys_set_tid_address +250 common epoll_ctl sys_epoll_ctl sys_epoll_ctl +251 common epoll_wait sys_epoll_wait sys_epoll_wait +252 common set_tid_address sys_set_tid_address sys_set_tid_address 253 common fadvise64 sys_fadvise64_64 compat_sys_s390_fadvise64 254 common timer_create sys_timer_create compat_sys_timer_create -255 common timer_settime sys_timer_settime compat_sys_timer_settime -256 common timer_gettime sys_timer_gettime compat_sys_timer_gettime +255 common timer_settime sys_timer_settime sys_timer_settime32 +256 common timer_gettime sys_timer_gettime sys_timer_gettime32 257 common timer_getoverrun sys_timer_getoverrun sys_timer_getoverrun 258 common timer_delete sys_timer_delete sys_timer_delete -259 common clock_settime sys_clock_settime compat_sys_clock_settime -260 common clock_gettime sys_clock_gettime compat_sys_clock_gettime -261 common clock_getres sys_clock_getres compat_sys_clock_getres -262 common clock_nanosleep sys_clock_nanosleep compat_sys_clock_nanosleep +259 common clock_settime sys_clock_settime sys_clock_settime32 +260 common clock_gettime sys_clock_gettime sys_clock_gettime32 +261 common clock_getres sys_clock_getres sys_clock_getres_time32 +262 common clock_nanosleep sys_clock_nanosleep sys_clock_nanosleep_time32 264 32 fadvise64_64 - compat_sys_s390_fadvise64_64 265 common statfs64 sys_statfs64 compat_sys_statfs64 266 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 -267 common remap_file_pages sys_remap_file_pages compat_sys_remap_file_pages +267 common remap_file_pages sys_remap_file_pages sys_remap_file_pages 268 common mbind sys_mbind compat_sys_mbind 269 common get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy 270 common set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy 271 common mq_open sys_mq_open compat_sys_mq_open -272 common mq_unlink sys_mq_unlink compat_sys_mq_unlink -273 common mq_timedsend sys_mq_timedsend compat_sys_mq_timedsend -274 common mq_timedreceive sys_mq_timedreceive compat_sys_mq_timedreceive +272 common mq_unlink sys_mq_unlink sys_mq_unlink +273 common mq_timedsend sys_mq_timedsend sys_mq_timedsend_time32 +274 common mq_timedreceive sys_mq_timedreceive sys_mq_timedreceive_time32 275 common mq_notify sys_mq_notify compat_sys_mq_notify 276 common mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr 277 common kexec_load sys_kexec_load compat_sys_kexec_load -278 common add_key sys_add_key compat_sys_add_key -279 common request_key sys_request_key compat_sys_request_key +278 common add_key sys_add_key sys_add_key +279 common request_key sys_request_key sys_request_key 280 common keyctl sys_keyctl compat_sys_keyctl 281 common waitid sys_waitid compat_sys_waitid 282 common ioprio_set sys_ioprio_set sys_ioprio_set 283 common ioprio_get sys_ioprio_get sys_ioprio_get 284 common inotify_init sys_inotify_init sys_inotify_init -285 common inotify_add_watch sys_inotify_add_watch compat_sys_inotify_add_watch +285 common inotify_add_watch sys_inotify_add_watch sys_inotify_add_watch 286 common inotify_rm_watch sys_inotify_rm_watch sys_inotify_rm_watch 287 common migrate_pages sys_migrate_pages compat_sys_migrate_pages 288 common openat sys_openat compat_sys_openat -289 common mkdirat sys_mkdirat compat_sys_mkdirat -290 common mknodat sys_mknodat compat_sys_mknodat -291 common fchownat sys_fchownat compat_sys_fchownat -292 common futimesat sys_futimesat compat_sys_futimesat +289 common mkdirat sys_mkdirat sys_mkdirat +290 common mknodat sys_mknodat sys_mknodat +291 common fchownat sys_fchownat sys_fchownat +292 common futimesat sys_futimesat sys_futimesat_time32 293 32 fstatat64 - compat_sys_s390_fstatat64 293 64 newfstatat sys_newfstatat - -294 common unlinkat sys_unlinkat compat_sys_unlinkat -295 common renameat sys_renameat compat_sys_renameat -296 common linkat sys_linkat compat_sys_linkat -297 common symlinkat sys_symlinkat compat_sys_symlinkat -298 common readlinkat sys_readlinkat compat_sys_readlinkat -299 common fchmodat sys_fchmodat compat_sys_fchmodat -300 common faccessat sys_faccessat compat_sys_faccessat -301 common pselect6 sys_pselect6 compat_sys_pselect6 -302 common ppoll sys_ppoll compat_sys_ppoll -303 common unshare sys_unshare compat_sys_unshare +294 common unlinkat sys_unlinkat sys_unlinkat +295 common renameat sys_renameat sys_renameat +296 common linkat sys_linkat sys_linkat +297 common symlinkat sys_symlinkat sys_symlinkat +298 common readlinkat sys_readlinkat sys_readlinkat +299 common fchmodat sys_fchmodat sys_fchmodat +300 common faccessat sys_faccessat sys_faccessat +301 common pselect6 sys_pselect6 compat_sys_pselect6_time32 +302 common ppoll sys_ppoll compat_sys_ppoll_time32 +303 common unshare sys_unshare sys_unshare 304 common set_robust_list sys_set_robust_list compat_sys_set_robust_list 305 common get_robust_list sys_get_robust_list compat_sys_get_robust_list -306 common splice sys_splice compat_sys_splice +306 common splice sys_splice sys_splice 307 common sync_file_range sys_sync_file_range compat_sys_s390_sync_file_range -308 common tee sys_tee compat_sys_tee +308 common tee sys_tee sys_tee 309 common vmsplice sys_vmsplice compat_sys_vmsplice 310 common move_pages sys_move_pages compat_sys_move_pages -311 common getcpu sys_getcpu compat_sys_getcpu +311 common getcpu sys_getcpu sys_getcpu 312 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait -313 common utimes sys_utimes compat_sys_utimes +313 common utimes sys_utimes sys_utimes_time32 314 common fallocate sys_fallocate compat_sys_s390_fallocate -315 common utimensat sys_utimensat compat_sys_utimensat +315 common utimensat sys_utimensat sys_utimensat_time32 316 common signalfd sys_signalfd compat_sys_signalfd 317 common timerfd - - 318 common eventfd sys_eventfd sys_eventfd 319 common timerfd_create sys_timerfd_create sys_timerfd_create -320 common timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime -321 common timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime +320 common timerfd_settime sys_timerfd_settime sys_timerfd_settime32 +321 common timerfd_gettime sys_timerfd_gettime sys_timerfd_gettime32 322 common signalfd4 sys_signalfd4 compat_sys_signalfd4 323 common eventfd2 sys_eventfd2 sys_eventfd2 324 common inotify_init1 sys_inotify_init1 sys_inotify_init1 -325 common pipe2 sys_pipe2 compat_sys_pipe2 +325 common pipe2 sys_pipe2 sys_pipe2 326 common dup3 sys_dup3 sys_dup3 327 common epoll_create1 sys_epoll_create1 sys_epoll_create1 328 common preadv sys_preadv compat_sys_preadv 329 common pwritev sys_pwritev compat_sys_pwritev 330 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo -331 common perf_event_open sys_perf_event_open compat_sys_perf_event_open +331 common perf_event_open sys_perf_event_open sys_perf_event_open 332 common fanotify_init sys_fanotify_init sys_fanotify_init 333 common fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark -334 common prlimit64 sys_prlimit64 compat_sys_prlimit64 -335 common name_to_handle_at sys_name_to_handle_at compat_sys_name_to_handle_at +334 common prlimit64 sys_prlimit64 sys_prlimit64 +335 common name_to_handle_at sys_name_to_handle_at sys_name_to_handle_at 336 common open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at -337 common clock_adjtime sys_clock_adjtime compat_sys_clock_adjtime +337 common clock_adjtime sys_clock_adjtime sys_clock_adjtime32 338 common syncfs sys_syncfs sys_syncfs 339 common setns sys_setns sys_setns 340 common process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv 341 common process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev 342 common s390_runtime_instr sys_s390_runtime_instr sys_s390_runtime_instr -343 common kcmp sys_kcmp compat_sys_kcmp -344 common finit_module sys_finit_module compat_sys_finit_module -345 common sched_setattr sys_sched_setattr compat_sys_sched_setattr -346 common sched_getattr sys_sched_getattr compat_sys_sched_getattr -347 common renameat2 sys_renameat2 compat_sys_renameat2 -348 common seccomp sys_seccomp compat_sys_seccomp -349 common getrandom sys_getrandom compat_sys_getrandom -350 common memfd_create sys_memfd_create compat_sys_memfd_create -351 common bpf sys_bpf compat_sys_bpf -352 common s390_pci_mmio_write sys_s390_pci_mmio_write compat_sys_s390_pci_mmio_write -353 common s390_pci_mmio_read sys_s390_pci_mmio_read compat_sys_s390_pci_mmio_read +343 common kcmp sys_kcmp sys_kcmp +344 common finit_module sys_finit_module sys_finit_module +345 common sched_setattr sys_sched_setattr sys_sched_setattr +346 common sched_getattr sys_sched_getattr sys_sched_getattr +347 common renameat2 sys_renameat2 sys_renameat2 +348 common seccomp sys_seccomp sys_seccomp +349 common getrandom sys_getrandom sys_getrandom +350 common memfd_create sys_memfd_create sys_memfd_create +351 common bpf sys_bpf sys_bpf +352 common s390_pci_mmio_write sys_s390_pci_mmio_write sys_s390_pci_mmio_write +353 common s390_pci_mmio_read sys_s390_pci_mmio_read sys_s390_pci_mmio_read 354 common execveat sys_execveat compat_sys_execveat 355 common userfaultfd sys_userfaultfd sys_userfaultfd 356 common membarrier sys_membarrier sys_membarrier -357 common recvmmsg sys_recvmmsg compat_sys_recvmmsg +357 common recvmmsg sys_recvmmsg compat_sys_recvmmsg_time32 358 common sendmmsg sys_sendmmsg compat_sys_sendmmsg 359 common socket sys_socket sys_socket -360 common socketpair sys_socketpair compat_sys_socketpair -361 common bind sys_bind compat_sys_bind -362 common connect sys_connect compat_sys_connect +360 common socketpair sys_socketpair sys_socketpair +361 common bind sys_bind sys_bind +362 common connect sys_connect sys_connect 363 common listen sys_listen sys_listen -364 common accept4 sys_accept4 compat_sys_accept4 +364 common accept4 sys_accept4 sys_accept4 365 common getsockopt sys_getsockopt compat_sys_getsockopt 366 common setsockopt sys_setsockopt compat_sys_setsockopt -367 common getsockname sys_getsockname compat_sys_getsockname -368 common getpeername sys_getpeername compat_sys_getpeername -369 common sendto sys_sendto compat_sys_sendto +367 common getsockname sys_getsockname sys_getsockname +368 common getpeername sys_getpeername sys_getpeername +369 common sendto sys_sendto sys_sendto 370 common sendmsg sys_sendmsg compat_sys_sendmsg 371 common recvfrom sys_recvfrom compat_sys_recvfrom 372 common recvmsg sys_recvmsg compat_sys_recvmsg 373 common shutdown sys_shutdown sys_shutdown -374 common mlock2 sys_mlock2 compat_sys_mlock2 -375 common copy_file_range sys_copy_file_range compat_sys_copy_file_range +374 common mlock2 sys_mlock2 sys_mlock2 +375 common copy_file_range sys_copy_file_range sys_copy_file_range 376 common preadv2 sys_preadv2 compat_sys_preadv2 377 common pwritev2 sys_pwritev2 compat_sys_pwritev2 -378 common s390_guarded_storage sys_s390_guarded_storage compat_sys_s390_guarded_storage -379 common statx sys_statx compat_sys_statx -380 common s390_sthyi sys_s390_sthyi compat_sys_s390_sthyi -381 common kexec_file_load sys_kexec_file_load compat_sys_kexec_file_load +378 common s390_guarded_storage sys_s390_guarded_storage sys_s390_guarded_storage +379 common statx sys_statx sys_statx +380 common s390_sthyi sys_s390_sthyi sys_s390_sthyi +381 common kexec_file_load sys_kexec_file_load sys_kexec_file_load 382 common io_pgetevents sys_io_pgetevents compat_sys_io_pgetevents -383 common rseq sys_rseq compat_sys_rseq +383 common rseq sys_rseq sys_rseq +384 common pkey_mprotect sys_pkey_mprotect sys_pkey_mprotect +385 common pkey_alloc sys_pkey_alloc sys_pkey_alloc +386 common pkey_free sys_pkey_free sys_pkey_free +# room for arch specific syscalls +392 64 semtimedop sys_semtimedop - +393 common semget sys_semget sys_semget +394 common semctl sys_semctl compat_sys_semctl +395 common shmget sys_shmget sys_shmget +396 common shmctl sys_shmctl compat_sys_shmctl +397 common shmat sys_shmat compat_sys_shmat +398 common shmdt sys_shmdt sys_shmdt +399 common msgget sys_msgget sys_msgget +400 common msgsnd sys_msgsnd compat_sys_msgsnd +401 common msgrcv sys_msgrcv compat_sys_msgrcv +402 common msgctl sys_msgctl compat_sys_msgctl +403 32 clock_gettime64 - sys_clock_gettime +404 32 clock_settime64 - sys_clock_settime +405 32 clock_adjtime64 - sys_clock_adjtime +406 32 clock_getres_time64 - sys_clock_getres +407 32 clock_nanosleep_time64 - sys_clock_nanosleep +408 32 timer_gettime64 - sys_timer_gettime +409 32 timer_settime64 - sys_timer_settime +410 32 timerfd_gettime64 - sys_timerfd_gettime +411 32 timerfd_settime64 - sys_timerfd_settime +412 32 utimensat_time64 - sys_utimensat +413 32 pselect6_time64 - compat_sys_pselect6_time64 +414 32 ppoll_time64 - compat_sys_ppoll_time64 +416 32 io_pgetevents_time64 - sys_io_pgetevents +417 32 recvmmsg_time64 - compat_sys_recvmmsg_time64 +418 32 mq_timedsend_time64 - sys_mq_timedsend +419 32 mq_timedreceive_time64 - sys_mq_timedreceive +420 32 semtimedop_time64 - sys_semtimedop +421 32 rt_sigtimedwait_time64 - compat_sys_rt_sigtimedwait_time64 +422 32 futex_time64 - sys_futex +423 32 sched_rr_get_interval_time64 - sys_sched_rr_get_interval diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c index 12f80d1f0415..2ac3c9b56a13 100644 --- a/arch/s390/kernel/sysinfo.c +++ b/arch/s390/kernel/sysinfo.c @@ -545,8 +545,6 @@ static __init int stsi_init_debugfs(void) int lvl, i; stsi_root = debugfs_create_dir("stsi", arch_debugfs_dir); - if (IS_ERR_OR_NULL(stsi_root)) - return 0; lvl = stsi(NULL, 0, 0, 0); if (lvl > 0) stsi_0_0_0 = lvl; diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 8992b04c0ade..8964a3f60aad 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -520,6 +520,9 @@ static void __init alloc_masks(struct sysinfo_15_1_x *info, nr_masks = max(nr_masks, 1); for (i = 0; i < nr_masks; i++) { mask->next = memblock_alloc(sizeof(*mask->next), 8); + if (!mask->next) + panic("%s: Failed to allocate %zu bytes align=0x%x\n", + __func__, sizeof(*mask->next), 8); mask = mask->next; } } @@ -538,6 +541,9 @@ void __init topology_init_early(void) if (!MACHINE_HAS_TOPOLOGY) goto out; tl_info = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + if (!tl_info) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", + __func__, PAGE_SIZE, PAGE_SIZE); info = tl_info; store_topology(info); pr_info("The CPU configuration topology of the machine is: %d %d %d %d %d %d / %d\n", diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index 4ff354887db4..e7920a68a12e 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c @@ -291,7 +291,6 @@ static int __init vdso_init(void) BUG_ON(vdso32_pagelist == NULL); for (i = 0; i < vdso32_pages - 1; i++) { struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE); - ClearPageReserved(pg); get_page(pg); vdso32_pagelist[i] = pg; } @@ -309,7 +308,6 @@ static int __init vdso_init(void) BUG_ON(vdso64_pagelist == NULL); for (i = 0; i < vdso64_pages - 1; i++) { struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE); - ClearPageReserved(pg); get_page(pg); vdso64_pagelist[i] = pg; } diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index f24395a01918..98f850e00008 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -69,7 +69,7 @@ static void update_mt_scaling(void) u64 delta, fac, mult, div; int i; - stcctm5(smp_cpu_mtid + 1, cycles_new); + stcctm(MT_DIAG, smp_cpu_mtid + 1, cycles_new); cycles_old = this_cpu_ptr(mt_cycles); fac = 1; mult = div = 0; @@ -432,6 +432,6 @@ void vtime_init(void) __this_cpu_write(mt_scaling_jiffies, jiffies); __this_cpu_write(mt_scaling_mult, 1); __this_cpu_write(mt_scaling_div, 1); - stcctm5(smp_cpu_mtid + 1, this_cpu_ptr(mt_cycles)); + stcctm(MT_DIAG, smp_cpu_mtid + 1, this_cpu_ptr(mt_cycles)); } } diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index a153257bf7d9..d62fa148558b 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -297,7 +297,7 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) scb_s->crycbd = 0; apie_h = vcpu->arch.sie_block->eca & ECA_APIE; - if (!apie_h && !key_msk) + if (!apie_h && (!key_msk || fmt_o == CRYCB_FORMAT0)) return 0; if (!crycb_addr) diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c index a10e11f7a5f7..0e30e6e43b0c 100644 --- a/arch/s390/lib/string.c +++ b/arch/s390/lib/string.c @@ -43,11 +43,13 @@ static inline char *__strnend(const char *s, size_t n) * * returns the length of @s */ +#ifdef __HAVE_ARCH_STRLEN size_t strlen(const char *s) { return __strend(s) - s; } EXPORT_SYMBOL(strlen); +#endif /** * strnlen - Find the length of a length-limited string @@ -56,11 +58,13 @@ EXPORT_SYMBOL(strlen); * * returns the minimum of the length of @s and @n */ +#ifdef __HAVE_ARCH_STRNLEN size_t strnlen(const char *s, size_t n) { return __strnend(s, n) - s; } EXPORT_SYMBOL(strnlen); +#endif /** * strcpy - Copy a %NUL terminated string @@ -69,6 +73,7 @@ EXPORT_SYMBOL(strnlen); * * returns a pointer to @dest */ +#ifdef __HAVE_ARCH_STRCPY char *strcpy(char *dest, const char *src) { register int r0 asm("0") = 0; @@ -81,6 +86,7 @@ char *strcpy(char *dest, const char *src) return ret; } EXPORT_SYMBOL(strcpy); +#endif /** * strlcpy - Copy a %NUL terminated string into a sized buffer @@ -93,6 +99,7 @@ EXPORT_SYMBOL(strcpy); * of course, the buffer size is zero). It does not pad * out the result like strncpy() does. */ +#ifdef __HAVE_ARCH_STRLCPY size_t strlcpy(char *dest, const char *src, size_t size) { size_t ret = __strend(src) - src; @@ -105,6 +112,7 @@ size_t strlcpy(char *dest, const char *src, size_t size) return ret; } EXPORT_SYMBOL(strlcpy); +#endif /** * strncpy - Copy a length-limited, %NUL-terminated string @@ -115,6 +123,7 @@ EXPORT_SYMBOL(strlcpy); * The result is not %NUL-terminated if the source exceeds * @n bytes. */ +#ifdef __HAVE_ARCH_STRNCPY char *strncpy(char *dest, const char *src, size_t n) { size_t len = __strnend(src, n) - src; @@ -123,6 +132,7 @@ char *strncpy(char *dest, const char *src, size_t n) return dest; } EXPORT_SYMBOL(strncpy); +#endif /** * strcat - Append one %NUL-terminated string to another @@ -131,6 +141,7 @@ EXPORT_SYMBOL(strncpy); * * returns a pointer to @dest */ +#ifdef __HAVE_ARCH_STRCAT char *strcat(char *dest, const char *src) { register int r0 asm("0") = 0; @@ -146,6 +157,7 @@ char *strcat(char *dest, const char *src) return ret; } EXPORT_SYMBOL(strcat); +#endif /** * strlcat - Append a length-limited, %NUL-terminated string to another @@ -153,6 +165,7 @@ EXPORT_SYMBOL(strcat); * @src: The string to append to it * @n: The size of the destination buffer. */ +#ifdef __HAVE_ARCH_STRLCAT size_t strlcat(char *dest, const char *src, size_t n) { size_t dsize = __strend(dest) - dest; @@ -170,6 +183,7 @@ size_t strlcat(char *dest, const char *src, size_t n) return res; } EXPORT_SYMBOL(strlcat); +#endif /** * strncat - Append a length-limited, %NUL-terminated string to another @@ -182,6 +196,7 @@ EXPORT_SYMBOL(strlcat); * Note that in contrast to strncpy, strncat ensures the result is * terminated. */ +#ifdef __HAVE_ARCH_STRNCAT char *strncat(char *dest, const char *src, size_t n) { size_t len = __strnend(src, n) - src; @@ -192,6 +207,7 @@ char *strncat(char *dest, const char *src, size_t n) return dest; } EXPORT_SYMBOL(strncat); +#endif /** * strcmp - Compare two strings @@ -202,6 +218,7 @@ EXPORT_SYMBOL(strncat); * < 0 if @s1 is less than @s2 * > 0 if @s1 is greater than @s2 */ +#ifdef __HAVE_ARCH_STRCMP int strcmp(const char *s1, const char *s2) { register int r0 asm("0") = 0; @@ -219,12 +236,14 @@ int strcmp(const char *s1, const char *s2) return ret; } EXPORT_SYMBOL(strcmp); +#endif /** * strrchr - Find the last occurrence of a character in a string * @s: The string to be searched * @c: The character to search for */ +#ifdef __HAVE_ARCH_STRRCHR char *strrchr(const char *s, int c) { size_t len = __strend(s) - s; @@ -237,6 +256,7 @@ char *strrchr(const char *s, int c) return NULL; } EXPORT_SYMBOL(strrchr); +#endif static inline int clcle(const char *s1, unsigned long l1, const char *s2, unsigned long l2) @@ -261,6 +281,7 @@ static inline int clcle(const char *s1, unsigned long l1, * @s1: The string to be searched * @s2: The string to search for */ +#ifdef __HAVE_ARCH_STRSTR char *strstr(const char *s1, const char *s2) { int l1, l2; @@ -280,6 +301,7 @@ char *strstr(const char *s1, const char *s2) return NULL; } EXPORT_SYMBOL(strstr); +#endif /** * memchr - Find a character in an area of memory. @@ -290,6 +312,7 @@ EXPORT_SYMBOL(strstr); * returns the address of the first occurrence of @c, or %NULL * if @c is not found */ +#ifdef __HAVE_ARCH_MEMCHR void *memchr(const void *s, int c, size_t n) { register int r0 asm("0") = (char) c; @@ -304,6 +327,7 @@ void *memchr(const void *s, int c, size_t n) return (void *) ret; } EXPORT_SYMBOL(memchr); +#endif /** * memcmp - Compare two areas of memory @@ -311,6 +335,7 @@ EXPORT_SYMBOL(memchr); * @s2: Another area of memory * @count: The size of the area. */ +#ifdef __HAVE_ARCH_MEMCMP int memcmp(const void *s1, const void *s2, size_t n) { int ret; @@ -321,6 +346,7 @@ int memcmp(const void *s1, const void *s2, size_t n) return ret; } EXPORT_SYMBOL(memcmp); +#endif /** * memscan - Find a character in an area of memory. @@ -331,6 +357,7 @@ EXPORT_SYMBOL(memcmp); * returns the address of the first occurrence of @c, or 1 byte past * the area if @c is not found */ +#ifdef __HAVE_ARCH_MEMSCAN void *memscan(void *s, int c, size_t n) { register int r0 asm("0") = (char) c; @@ -342,3 +369,4 @@ void *memscan(void *s, int c, size_t n) return (void *) ret; } EXPORT_SYMBOL(memscan); +#endif diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c index eba2def3414d..0b5622714c12 100644 --- a/arch/s390/mm/extmem.c +++ b/arch/s390/mm/extmem.c @@ -28,12 +28,7 @@ #include #include -#define DCSS_LOADSHR 0x00 -#define DCSS_LOADNSR 0x04 #define DCSS_PURGESEG 0x08 -#define DCSS_FINDSEG 0x0c -#define DCSS_LOADNOLY 0x10 -#define DCSS_SEGEXT 0x18 #define DCSS_LOADSHRX 0x20 #define DCSS_LOADNSRX 0x24 #define DCSS_FINDSEGX 0x2c @@ -53,20 +48,6 @@ struct qout64 { struct qrange range[6]; }; -struct qrange_old { - unsigned int start; /* last byte type */ - unsigned int end; /* last byte reserved */ -}; - -/* output area format for the Diag x'64' old subcode x'18' */ -struct qout64_old { - int segstart; - int segend; - int segcnt; - int segrcnt; - struct qrange_old range[6]; -}; - struct qin64 { char qopcode; char rsrv1[3]; @@ -95,52 +76,10 @@ static DEFINE_MUTEX(dcss_lock); static LIST_HEAD(dcss_list); static char *segtype_string[] = { "SW", "EW", "SR", "ER", "SN", "EN", "SC", "EW/EN-MIXED" }; -static int loadshr_scode, loadnsr_scode; -static int segext_scode, purgeseg_scode; -static int scode_set; - -/* set correct Diag x'64' subcodes. */ -static int -dcss_set_subcodes(void) -{ - char *name = kmalloc(8, GFP_KERNEL | GFP_DMA); - unsigned long rx, ry; - int rc; - - if (name == NULL) - return -ENOMEM; - - rx = (unsigned long) name; - ry = DCSS_FINDSEGX; - - strcpy(name, "dummy"); - diag_stat_inc(DIAG_STAT_X064); - asm volatile( - " diag %0,%1,0x64\n" - "0: ipm %2\n" - " srl %2,28\n" - " j 2f\n" - "1: la %2,3\n" - "2:\n" - EX_TABLE(0b, 1b) - : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc", "memory"); - - kfree(name); - /* Diag x'64' new subcodes are supported, set to new subcodes */ - if (rc != 3) { - loadshr_scode = DCSS_LOADSHRX; - loadnsr_scode = DCSS_LOADNSRX; - purgeseg_scode = DCSS_PURGESEG; - segext_scode = DCSS_SEGEXTX; - return 0; - } - /* Diag x'64' new subcodes are not supported, set to old subcodes */ - loadshr_scode = DCSS_LOADNOLY; - loadnsr_scode = DCSS_LOADNSR; - purgeseg_scode = DCSS_PURGESEG; - segext_scode = DCSS_SEGEXT; - return 0; -} +static int loadshr_scode = DCSS_LOADSHRX; +static int loadnsr_scode = DCSS_LOADNSRX; +static int purgeseg_scode = DCSS_PURGESEG; +static int segext_scode = DCSS_SEGEXTX; /* * Create the 8 bytes, ebcdic VM segment name from @@ -196,32 +135,15 @@ dcss_diag(int *func, void *parameter, unsigned long rx, ry; int rc; - if (scode_set == 0) { - rc = dcss_set_subcodes(); - if (rc < 0) - return rc; - scode_set = 1; - } rx = (unsigned long) parameter; ry = (unsigned long) *func; - /* 64-bit Diag x'64' new subcode, keep in 64-bit addressing mode */ diag_stat_inc(DIAG_STAT_X064); - if (*func > DCSS_SEGEXT) - asm volatile( - " diag %0,%1,0x64\n" - " ipm %2\n" - " srl %2,28\n" - : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc"); - /* 31-bit Diag x'64' old subcode, switch to 31-bit addressing mode */ - else - asm volatile( - " sam31\n" - " diag %0,%1,0x64\n" - " sam64\n" - " ipm %2\n" - " srl %2,28\n" - : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc"); + asm volatile( + " diag %0,%1,0x64\n" + " ipm %2\n" + " srl %2,28\n" + : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc"); *ret1 = rx; *ret2 = ry; return rc; @@ -271,31 +193,6 @@ query_segment_type (struct dcss_segment *seg) goto out_free; } - /* Only old format of output area of Diagnose x'64' is supported, - copy data for the new format. */ - if (segext_scode == DCSS_SEGEXT) { - struct qout64_old *qout_old; - qout_old = kzalloc(sizeof(*qout_old), GFP_KERNEL | GFP_DMA); - if (qout_old == NULL) { - rc = -ENOMEM; - goto out_free; - } - memcpy(qout_old, qout, sizeof(struct qout64_old)); - qout->segstart = (unsigned long) qout_old->segstart; - qout->segend = (unsigned long) qout_old->segend; - qout->segcnt = qout_old->segcnt; - qout->segrcnt = qout_old->segrcnt; - - if (qout->segcnt > 6) - qout->segrcnt = 6; - for (i = 0; i < qout->segrcnt; i++) { - qout->range[i].start = - (unsigned long) qout_old->range[i].start; - qout->range[i].end = - (unsigned long) qout_old->range[i].end; - } - kfree(qout_old); - } if (qout->segcnt > 6) { rc = -EOPNOTSUPP; goto out_free; @@ -410,11 +307,9 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long if (rc < 0) goto out_free; - if (loadshr_scode == DCSS_LOADSHRX) { - if (segment_overlaps_others(seg)) { - rc = -EBUSY; - goto out_free; - } + if (segment_overlaps_others(seg)) { + rc = -EBUSY; + goto out_free; } rc = vmem_add_mapping(seg->start_addr, seg->end - seg->start_addr + 1); @@ -472,11 +367,11 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long *addr = seg->start_addr; *end = seg->end; if (do_nonshared) - pr_info("DCSS %s of range %p to %p and type %s loaded as " + pr_info("DCSS %s of range %px to %px and type %s loaded as " "exclusive-writable\n", name, (void*) seg->start_addr, (void*) seg->end, segtype_string[seg->vm_segtype]); else { - pr_info("DCSS %s of range %p to %p and type %s loaded in " + pr_info("DCSS %s of range %px to %px and type %s loaded in " "shared access mode\n", name, (void*) seg->start_addr, (void*) seg->end, segtype_string[seg->vm_segtype]); } diff --git a/arch/s390/mm/kasan_init.c b/arch/s390/mm/kasan_init.c index bac5c27d11fc..01892dcf4029 100644 --- a/arch/s390/mm/kasan_init.c +++ b/arch/s390/mm/kasan_init.c @@ -226,8 +226,6 @@ static void __init kasan_enable_dat(void) static void __init kasan_early_detect_facilities(void) { - __stfle(S390_lowcore.stfle_fac_list, - ARRAY_SIZE(S390_lowcore.stfle_fac_list)); if (test_facility(8)) { has_edat = true; __ctl_set_bit(0, 23); diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index 0a7627cdb34e..687f2a4d3459 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -29,14 +29,6 @@ static unsigned long stack_maxrandom_size(void) return STACK_RND_MASK << PAGE_SHIFT; } -/* - * Top of mmap area (just below the process stack). - * - * Leave at least a ~32 MB hole. - */ -#define MIN_GAP (32*1024*1024) -#define MAX_GAP (STACK_TOP/6*5) - static inline int mmap_is_legacy(struct rlimit *rlim_stack) { if (current->personality & ADDR_COMPAT_LAYOUT) @@ -60,13 +52,26 @@ static inline unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack) { unsigned long gap = rlim_stack->rlim_cur; + unsigned long pad = stack_maxrandom_size() + stack_guard_gap; + unsigned long gap_min, gap_max; + + /* Values close to RLIM_INFINITY can overflow. */ + if (gap + pad > gap) + gap += pad; + + /* + * Top of mmap area (just below the process stack). + * Leave at least a ~32 MB hole. + */ + gap_min = 32 * 1024 * 1024UL; + gap_max = (STACK_TOP / 6) * 5; + + if (gap < gap_min) + gap = gap_min; + else if (gap > gap_max) + gap = gap_max; - if (gap < MIN_GAP) - gap = MIN_GAP; - else if (gap > MAX_GAP) - gap = MAX_GAP; - gap &= PAGE_MASK; - return STACK_TOP - stack_maxrandom_size() - rnd - gap; + return PAGE_ALIGN(STACK_TOP - gap - rnd); } unsigned long diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index f2cc7da473e4..8485d6dc2754 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -301,12 +301,13 @@ pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr, } EXPORT_SYMBOL(ptep_xchg_lazy); -pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, +pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { pgste_t pgste; pte_t old; int nodat; + struct mm_struct *mm = vma->vm_mm; preempt_disable(); pgste = ptep_xchg_start(mm, addr, ptep); @@ -318,12 +319,12 @@ pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, } return old; } -EXPORT_SYMBOL(ptep_modify_prot_start); -void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte) +void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep, pte_t old_pte, pte_t pte) { pgste_t pgste; + struct mm_struct *mm = vma->vm_mm; if (!MACHINE_HAS_NX) pte_val(pte) &= ~_PAGE_NOEXEC; @@ -337,7 +338,6 @@ void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, } preempt_enable(); } -EXPORT_SYMBOL(ptep_modify_prot_commit); static inline void pmdp_idte_local(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp) diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 3ff758eeb71d..51dd0267d014 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -1110,103 +1110,145 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i mask = 0xf000; /* j */ goto branch_oc; case BPF_JMP | BPF_JSGT | BPF_K: /* ((s64) dst > (s64) imm) */ + case BPF_JMP32 | BPF_JSGT | BPF_K: /* ((s32) dst > (s32) imm) */ mask = 0x2000; /* jh */ goto branch_ks; case BPF_JMP | BPF_JSLT | BPF_K: /* ((s64) dst < (s64) imm) */ + case BPF_JMP32 | BPF_JSLT | BPF_K: /* ((s32) dst < (s32) imm) */ mask = 0x4000; /* jl */ goto branch_ks; case BPF_JMP | BPF_JSGE | BPF_K: /* ((s64) dst >= (s64) imm) */ + case BPF_JMP32 | BPF_JSGE | BPF_K: /* ((s32) dst >= (s32) imm) */ mask = 0xa000; /* jhe */ goto branch_ks; case BPF_JMP | BPF_JSLE | BPF_K: /* ((s64) dst <= (s64) imm) */ + case BPF_JMP32 | BPF_JSLE | BPF_K: /* ((s32) dst <= (s32) imm) */ mask = 0xc000; /* jle */ goto branch_ks; case BPF_JMP | BPF_JGT | BPF_K: /* (dst_reg > imm) */ + case BPF_JMP32 | BPF_JGT | BPF_K: /* ((u32) dst_reg > (u32) imm) */ mask = 0x2000; /* jh */ goto branch_ku; case BPF_JMP | BPF_JLT | BPF_K: /* (dst_reg < imm) */ + case BPF_JMP32 | BPF_JLT | BPF_K: /* ((u32) dst_reg < (u32) imm) */ mask = 0x4000; /* jl */ goto branch_ku; case BPF_JMP | BPF_JGE | BPF_K: /* (dst_reg >= imm) */ + case BPF_JMP32 | BPF_JGE | BPF_K: /* ((u32) dst_reg >= (u32) imm) */ mask = 0xa000; /* jhe */ goto branch_ku; case BPF_JMP | BPF_JLE | BPF_K: /* (dst_reg <= imm) */ + case BPF_JMP32 | BPF_JLE | BPF_K: /* ((u32) dst_reg <= (u32) imm) */ mask = 0xc000; /* jle */ goto branch_ku; case BPF_JMP | BPF_JNE | BPF_K: /* (dst_reg != imm) */ + case BPF_JMP32 | BPF_JNE | BPF_K: /* ((u32) dst_reg != (u32) imm) */ mask = 0x7000; /* jne */ goto branch_ku; case BPF_JMP | BPF_JEQ | BPF_K: /* (dst_reg == imm) */ + case BPF_JMP32 | BPF_JEQ | BPF_K: /* ((u32) dst_reg == (u32) imm) */ mask = 0x8000; /* je */ goto branch_ku; case BPF_JMP | BPF_JSET | BPF_K: /* (dst_reg & imm) */ + case BPF_JMP32 | BPF_JSET | BPF_K: /* ((u32) dst_reg & (u32) imm) */ mask = 0x7000; /* jnz */ - /* lgfi %w1,imm (load sign extend imm) */ - EMIT6_IMM(0xc0010000, REG_W1, imm); - /* ngr %w1,%dst */ - EMIT4(0xb9800000, REG_W1, dst_reg); + if (BPF_CLASS(insn->code) == BPF_JMP32) { + /* llilf %w1,imm (load zero extend imm) */ + EMIT6_IMM(0xc00f0000, REG_W1, imm); + /* nr %w1,%dst */ + EMIT2(0x1400, REG_W1, dst_reg); + } else { + /* lgfi %w1,imm (load sign extend imm) */ + EMIT6_IMM(0xc0010000, REG_W1, imm); + /* ngr %w1,%dst */ + EMIT4(0xb9800000, REG_W1, dst_reg); + } goto branch_oc; case BPF_JMP | BPF_JSGT | BPF_X: /* ((s64) dst > (s64) src) */ + case BPF_JMP32 | BPF_JSGT | BPF_X: /* ((s32) dst > (s32) src) */ mask = 0x2000; /* jh */ goto branch_xs; case BPF_JMP | BPF_JSLT | BPF_X: /* ((s64) dst < (s64) src) */ + case BPF_JMP32 | BPF_JSLT | BPF_X: /* ((s32) dst < (s32) src) */ mask = 0x4000; /* jl */ goto branch_xs; case BPF_JMP | BPF_JSGE | BPF_X: /* ((s64) dst >= (s64) src) */ + case BPF_JMP32 | BPF_JSGE | BPF_X: /* ((s32) dst >= (s32) src) */ mask = 0xa000; /* jhe */ goto branch_xs; case BPF_JMP | BPF_JSLE | BPF_X: /* ((s64) dst <= (s64) src) */ + case BPF_JMP32 | BPF_JSLE | BPF_X: /* ((s32) dst <= (s32) src) */ mask = 0xc000; /* jle */ goto branch_xs; case BPF_JMP | BPF_JGT | BPF_X: /* (dst > src) */ + case BPF_JMP32 | BPF_JGT | BPF_X: /* ((u32) dst > (u32) src) */ mask = 0x2000; /* jh */ goto branch_xu; case BPF_JMP | BPF_JLT | BPF_X: /* (dst < src) */ + case BPF_JMP32 | BPF_JLT | BPF_X: /* ((u32) dst < (u32) src) */ mask = 0x4000; /* jl */ goto branch_xu; case BPF_JMP | BPF_JGE | BPF_X: /* (dst >= src) */ + case BPF_JMP32 | BPF_JGE | BPF_X: /* ((u32) dst >= (u32) src) */ mask = 0xa000; /* jhe */ goto branch_xu; case BPF_JMP | BPF_JLE | BPF_X: /* (dst <= src) */ + case BPF_JMP32 | BPF_JLE | BPF_X: /* ((u32) dst <= (u32) src) */ mask = 0xc000; /* jle */ goto branch_xu; case BPF_JMP | BPF_JNE | BPF_X: /* (dst != src) */ + case BPF_JMP32 | BPF_JNE | BPF_X: /* ((u32) dst != (u32) src) */ mask = 0x7000; /* jne */ goto branch_xu; case BPF_JMP | BPF_JEQ | BPF_X: /* (dst == src) */ + case BPF_JMP32 | BPF_JEQ | BPF_X: /* ((u32) dst == (u32) src) */ mask = 0x8000; /* je */ goto branch_xu; case BPF_JMP | BPF_JSET | BPF_X: /* (dst & src) */ + case BPF_JMP32 | BPF_JSET | BPF_X: /* ((u32) dst & (u32) src) */ + { + bool is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; + mask = 0x7000; /* jnz */ - /* ngrk %w1,%dst,%src */ - EMIT4_RRF(0xb9e40000, REG_W1, dst_reg, src_reg); + /* nrk or ngrk %w1,%dst,%src */ + EMIT4_RRF((is_jmp32 ? 0xb9f40000 : 0xb9e40000), + REG_W1, dst_reg, src_reg); goto branch_oc; branch_ks: + is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; /* lgfi %w1,imm (load sign extend imm) */ EMIT6_IMM(0xc0010000, REG_W1, imm); - /* cgrj %dst,%w1,mask,off */ - EMIT6_PCREL(0xec000000, 0x0064, dst_reg, REG_W1, i, off, mask); + /* crj or cgrj %dst,%w1,mask,off */ + EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0076 : 0x0064), + dst_reg, REG_W1, i, off, mask); break; branch_ku: + is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; /* lgfi %w1,imm (load sign extend imm) */ EMIT6_IMM(0xc0010000, REG_W1, imm); - /* clgrj %dst,%w1,mask,off */ - EMIT6_PCREL(0xec000000, 0x0065, dst_reg, REG_W1, i, off, mask); + /* clrj or clgrj %dst,%w1,mask,off */ + EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0077 : 0x0065), + dst_reg, REG_W1, i, off, mask); break; branch_xs: - /* cgrj %dst,%src,mask,off */ - EMIT6_PCREL(0xec000000, 0x0064, dst_reg, src_reg, i, off, mask); + is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; + /* crj or cgrj %dst,%src,mask,off */ + EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0076 : 0x0064), + dst_reg, src_reg, i, off, mask); break; branch_xu: - /* clgrj %dst,%src,mask,off */ - EMIT6_PCREL(0xec000000, 0x0065, dst_reg, src_reg, i, off, mask); + is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; + /* clrj or clgrj %dst,%src,mask,off */ + EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0077 : 0x0065), + dst_reg, src_reg, i, off, mask); break; branch_oc: /* brc mask,jmp_off (branch instruction needs 4 bytes) */ jmp_off = addrs[i + off + 1] - (addrs[i + 1] - 4); EMIT4_PCREL(0xa7040000 | mask << 8, jmp_off); break; + } default: /* too complex, give up */ pr_err("Unknown opcode %02x\n", insn->code); return -1; diff --git a/arch/s390/net/pnet.c b/arch/s390/net/pnet.c index e22f1b10a6c7..79211bec0fc8 100644 --- a/arch/s390/net/pnet.c +++ b/arch/s390/net/pnet.c @@ -12,6 +12,15 @@ #include #include #include +#include + +#define PNETIDS_LEN 64 /* Total utility string length in bytes + * to cover up to 4 PNETIDs of 16 bytes + * for up to 4 device ports + */ +#define MAX_PNETID_LEN 16 /* Max.length of a single port PNETID */ +#define MAX_PNETID_PORTS (PNETIDS_LEN / MAX_PNETID_LEN) + /* Max. # of ports with a PNETID */ /* * Get the PNETIDs from a device. @@ -40,6 +49,7 @@ static int pnet_ids_by_device(struct device *dev, u8 *pnetids) if (!util_str) return -ENOMEM; memcpy(pnetids, util_str, PNETIDS_LEN); + EBCASC(pnetids, PNETIDS_LEN); kfree(util_str); return 0; } @@ -47,6 +57,7 @@ static int pnet_ids_by_device(struct device *dev, u8 *pnetids) struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); memcpy(pnetids, zdev->util_str, sizeof(zdev->util_str)); + EBCASC(pnetids, sizeof(zdev->util_str)); return 0; } return -EOPNOTSUPP; diff --git a/arch/s390/numa/mode_emu.c b/arch/s390/numa/mode_emu.c index bfba273c32c0..71a12a4f4906 100644 --- a/arch/s390/numa/mode_emu.c +++ b/arch/s390/numa/mode_emu.c @@ -313,6 +313,9 @@ static void __ref create_core_to_node_map(void) int i; emu_cores = memblock_alloc(sizeof(*emu_cores), 8); + if (!emu_cores) + panic("%s: Failed to allocate %zu bytes align=0x%x\n", + __func__, sizeof(*emu_cores), 8); for (i = 0; i < ARRAY_SIZE(emu_cores->to_node_id); i++) emu_cores->to_node_id[i] = NODE_ID_FREE; } diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c index d31bde0870d8..8eb9e9743f5d 100644 --- a/arch/s390/numa/numa.c +++ b/arch/s390/numa/numa.c @@ -57,18 +57,6 @@ EXPORT_SYMBOL(__node_distance); int numa_debug_enabled; -/* - * alloc_node_data() - Allocate node data - */ -static __init pg_data_t *alloc_node_data(void) -{ - pg_data_t *res; - - res = (pg_data_t *) memblock_phys_alloc(sizeof(pg_data_t), 8); - memset(res, 0, sizeof(pg_data_t)); - return res; -} - /* * numa_setup_memory() - Assign bootmem to nodes * @@ -104,8 +92,12 @@ static void __init numa_setup_memory(void) } while (cur_base < end_of_dram); /* Allocate and fill out node_data */ - for (nid = 0; nid < MAX_NUMNODES; nid++) - NODE_DATA(nid) = alloc_node_data(); + for (nid = 0; nid < MAX_NUMNODES; nid++) { + NODE_DATA(nid) = memblock_alloc(sizeof(pg_data_t), 8); + if (!NODE_DATA(nid)) + panic("%s: Failed to allocate %zu bytes align=0x%x\n", + __func__, sizeof(pg_data_t), 8); + } for_each_online_node(nid) { unsigned long start_pfn, end_pfn; diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index a966d7bfac57..dc9bc82c072c 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -285,7 +285,7 @@ void __iomem *pci_iomap_range(struct pci_dev *pdev, struct zpci_dev *zdev = to_zpci(pdev); int idx; - if (!pci_resource_len(pdev, bar)) + if (!pci_resource_len(pdev, bar) || bar >= PCI_BAR_COUNT) return NULL; idx = zdev->bars[bar].map_idx; @@ -382,7 +382,9 @@ static void zpci_irq_handler(struct airq_struct *airq) if (ai == -1UL) break; inc_irq_stat(IRQIO_MSI); + airq_iv_lock(aibv, ai); generic_handle_irq(airq_iv_get_data(aibv, ai)); + airq_iv_unlock(aibv, ai); } } } @@ -408,7 +410,7 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) zdev->aisb = aisb; /* Create adapter interrupt vector */ - zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA); + zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK); if (!zdev->aibv) return -ENOMEM; @@ -482,6 +484,15 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev) } } +#ifdef CONFIG_PCI_IOV +static struct resource iov_res = { + .name = "PCI IOV res", + .start = 0, + .end = -1, + .flags = IORESOURCE_MEM, +}; +#endif + static void zpci_map_resources(struct pci_dev *pdev) { resource_size_t len; @@ -495,6 +506,17 @@ static void zpci_map_resources(struct pci_dev *pdev) (resource_size_t __force) pci_iomap(pdev, i, 0); pdev->resource[i].end = pdev->resource[i].start + len - 1; } + +#ifdef CONFIG_PCI_IOV + i = PCI_IOV_RESOURCES; + + for (; i < PCI_SRIOV_NUM_BARS + PCI_IOV_RESOURCES; i++) { + len = pci_resource_len(pdev, i); + if (!len) + continue; + pdev->resource[i].parent = &iov_res; + } +#endif } static void zpci_unmap_resources(struct pci_dev *pdev) diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c index 04388a254ffb..6b48ca7760a7 100644 --- a/arch/s390/pci/pci_debug.c +++ b/arch/s390/pci/pci_debug.c @@ -172,21 +172,14 @@ static const struct file_operations debugfs_pci_perf_fops = { void zpci_debug_init_device(struct zpci_dev *zdev, const char *name) { zdev->debugfs_dev = debugfs_create_dir(name, debugfs_root); - if (IS_ERR(zdev->debugfs_dev)) - zdev->debugfs_dev = NULL; - - zdev->debugfs_perf = debugfs_create_file("statistics", - S_IFREG | S_IRUGO | S_IWUSR, - zdev->debugfs_dev, zdev, - &debugfs_pci_perf_fops); - if (IS_ERR(zdev->debugfs_perf)) - zdev->debugfs_perf = NULL; + + debugfs_create_file("statistics", S_IFREG | S_IRUGO | S_IWUSR, + zdev->debugfs_dev, zdev, &debugfs_pci_perf_fops); } void zpci_debug_exit_device(struct zpci_dev *zdev) { - debugfs_remove(zdev->debugfs_perf); - debugfs_remove(zdev->debugfs_dev); + debugfs_remove_recursive(zdev->debugfs_dev); } int __init zpci_debug_init(void) diff --git a/arch/s390/scripts/Makefile.chkbss b/arch/s390/scripts/Makefile.chkbss index 9bba2c14e0ca..cd7e8f4419f5 100644 --- a/arch/s390/scripts/Makefile.chkbss +++ b/arch/s390/scripts/Makefile.chkbss @@ -1,23 +1,20 @@ # SPDX-License-Identifier: GPL-2.0 +chkbss-target ?= built-in.a +$(obj)/$(chkbss-target): chkbss + +chkbss-files := $(addsuffix .chkbss, $(chkbss)) +clean-files += $(chkbss-files) + +PHONY += chkbss +chkbss: $(addprefix $(obj)/, $(chkbss-files)) + quiet_cmd_chkbss = CHKBSS $< -define cmd_chkbss - rm -f $@; \ + cmd_chkbss = \ if ! $(OBJDUMP) -j .bss -w -h $< | awk 'END { if ($$3) exit 1 }'; then \ echo "error: $< .bss section is not empty" >&2; exit 1; \ fi; \ touch $@; -endef - -chkbss-target ?= $(obj)/built-in.a -ifneq (,$(findstring /,$(chkbss))) -chkbss-files := $(patsubst %, %.chkbss, $(chkbss)) -else -chkbss-files := $(patsubst %, $(obj)/%.chkbss, $(chkbss)) -endif - -$(chkbss-target): $(chkbss-files) -targets += $(notdir $(chkbss-files)) -%.o.chkbss: %.o +$(obj)/%.o.chkbss: $(obj)/%.o $(call cmd,chkbss) diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index a9c36f95744a..b1c91ea9a958 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -7,11 +7,11 @@ config SUPERH select ARCH_NO_COHERENT_DMA_MMAP if !MMU select HAVE_PATA_PLATFORM select CLKDEV_LOOKUP + select DMA_DECLARE_COHERENT select HAVE_IDE if HAS_IOPORT_MAP select HAVE_MEMBLOCK_NODE_MAP select ARCH_DISCARD_MEMBLOCK select HAVE_OPROFILE - select HAVE_GENERIC_DMA_COHERENT select HAVE_ARCH_TRACEHOOK select HAVE_PERF_EVENTS select HAVE_DEBUG_BUGVERBOSE @@ -62,6 +62,7 @@ config SUPERH config SUPERH32 def_bool "$(ARCH)" = "sh" + select ARCH_32BIT_OFF_T select HAVE_KPROBES select HAVE_KRETPROBES select HAVE_IOREMAP_PROT if MMU && !X2TLB diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c index 8f234d0435aa..8301a4378f50 100644 --- a/arch/sh/boards/mach-ap325rxa/setup.c +++ b/arch/sh/boards/mach-ap325rxa/setup.c @@ -529,9 +529,8 @@ static int __init ap325rxa_devices_setup(void) device_initialize(&ap325rxa_ceu_device.dev); arch_setup_pdev_archdata(&ap325rxa_ceu_device); dma_declare_coherent_memory(&ap325rxa_ceu_device.dev, - ceu_dma_membase, ceu_dma_membase, - ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1, - DMA_MEMORY_EXCLUSIVE); + ceu_dma_membase, ceu_dma_membase, + ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1); platform_device_add(&ap325rxa_ceu_device); @@ -557,7 +556,10 @@ static void __init ap325rxa_mv_mem_reserve(void) phys_addr_t phys; phys_addr_t size = CEU_BUFFER_MEMORY_SIZE; - phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE); + phys = memblock_phys_alloc(size, PAGE_SIZE); + if (!phys) + panic("Failed to allocate CEU memory\n"); + memblock_free(phys, size); memblock_remove(phys, size); diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c index 22b4106b8084..34e5414c5563 100644 --- a/arch/sh/boards/mach-ecovec24/setup.c +++ b/arch/sh/boards/mach-ecovec24/setup.c @@ -630,7 +630,6 @@ static struct regulator_init_data cn12_power_init_data = { static struct fixed_voltage_config cn12_power_info = { .supply_name = "CN12 SD/MMC Vdd", .microvolts = 3300000, - .enable_high = 1, .init_data = &cn12_power_init_data, }; @@ -671,7 +670,6 @@ static struct regulator_init_data sdhi0_power_init_data = { static struct fixed_voltage_config sdhi0_power_info = { .supply_name = "CN11 SD/MMC Vdd", .microvolts = 3300000, - .enable_high = 1, .init_data = &sdhi0_power_init_data, }; @@ -1440,8 +1438,7 @@ static int __init arch_setup(void) dma_declare_coherent_memory(&ecovec_ceu_devices[0]->dev, ceu0_dma_membase, ceu0_dma_membase, ceu0_dma_membase + - CEU_BUFFER_MEMORY_SIZE - 1, - DMA_MEMORY_EXCLUSIVE); + CEU_BUFFER_MEMORY_SIZE - 1); platform_device_add(ecovec_ceu_devices[0]); device_initialize(&ecovec_ceu_devices[1]->dev); @@ -1449,8 +1446,7 @@ static int __init arch_setup(void) dma_declare_coherent_memory(&ecovec_ceu_devices[1]->dev, ceu1_dma_membase, ceu1_dma_membase, ceu1_dma_membase + - CEU_BUFFER_MEMORY_SIZE - 1, - DMA_MEMORY_EXCLUSIVE); + CEU_BUFFER_MEMORY_SIZE - 1); platform_device_add(ecovec_ceu_devices[1]); gpiod_add_lookup_table(&cn12_power_gpiod_table); @@ -1480,12 +1476,18 @@ static void __init ecovec_mv_mem_reserve(void) phys_addr_t phys; phys_addr_t size = CEU_BUFFER_MEMORY_SIZE; - phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE); + phys = memblock_phys_alloc(size, PAGE_SIZE); + if (!phys) + panic("Failed to allocate CEU0 memory\n"); + memblock_free(phys, size); memblock_remove(phys, size); ceu0_dma_membase = phys; - phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE); + phys = memblock_phys_alloc(size, PAGE_SIZE); + if (!phys) + panic("Failed to allocate CEU1 memory\n"); + memblock_free(phys, size); memblock_remove(phys, size); ceu1_dma_membase = phys; diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c index 203d249a0a2b..1cf9a47ac90e 100644 --- a/arch/sh/boards/mach-kfr2r09/setup.c +++ b/arch/sh/boards/mach-kfr2r09/setup.c @@ -603,9 +603,8 @@ static int __init kfr2r09_devices_setup(void) device_initialize(&kfr2r09_ceu_device.dev); arch_setup_pdev_archdata(&kfr2r09_ceu_device); dma_declare_coherent_memory(&kfr2r09_ceu_device.dev, - ceu_dma_membase, ceu_dma_membase, - ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1, - DMA_MEMORY_EXCLUSIVE); + ceu_dma_membase, ceu_dma_membase, + ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1); platform_device_add(&kfr2r09_ceu_device); @@ -631,7 +630,10 @@ static void __init kfr2r09_mv_mem_reserve(void) phys_addr_t phys; phys_addr_t size = CEU_BUFFER_MEMORY_SIZE; - phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE); + phys = memblock_phys_alloc(size, PAGE_SIZE); + if (!phys) + panic("Failed to allocate CEU memory\n"); + memblock_free(phys, size); memblock_remove(phys, size); diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c index f4ad33c6d2aa..90702740f207 100644 --- a/arch/sh/boards/mach-migor/setup.c +++ b/arch/sh/boards/mach-migor/setup.c @@ -5,6 +5,7 @@ * Copyright (C) 2008 Magnus Damm */ #include +#include #include #include #include @@ -603,9 +604,8 @@ static int __init migor_devices_setup(void) device_initialize(&migor_ceu_device.dev); arch_setup_pdev_archdata(&migor_ceu_device); dma_declare_coherent_memory(&migor_ceu_device.dev, - ceu_dma_membase, ceu_dma_membase, - ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1, - DMA_MEMORY_EXCLUSIVE); + ceu_dma_membase, ceu_dma_membase, + ceu_dma_membase + CEU_BUFFER_MEMORY_SIZE - 1); platform_device_add(&migor_ceu_device); @@ -630,7 +630,10 @@ static void __init migor_mv_mem_reserve(void) phys_addr_t phys; phys_addr_t size = CEU_BUFFER_MEMORY_SIZE; - phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE); + phys = memblock_phys_alloc(size, PAGE_SIZE); + if (!phys) + panic("Failed to allocate CEU memory\n"); + memblock_free(phys, size); memblock_remove(phys, size); diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c index fdbec22ae687..3674064816c7 100644 --- a/arch/sh/boards/mach-se/7724/setup.c +++ b/arch/sh/boards/mach-se/7724/setup.c @@ -941,8 +941,7 @@ static int __init devices_setup(void) dma_declare_coherent_memory(&ms7724se_ceu_devices[0]->dev, ceu0_dma_membase, ceu0_dma_membase, ceu0_dma_membase + - CEU_BUFFER_MEMORY_SIZE - 1, - DMA_MEMORY_EXCLUSIVE); + CEU_BUFFER_MEMORY_SIZE - 1); platform_device_add(ms7724se_ceu_devices[0]); device_initialize(&ms7724se_ceu_devices[1]->dev); @@ -950,8 +949,7 @@ static int __init devices_setup(void) dma_declare_coherent_memory(&ms7724se_ceu_devices[1]->dev, ceu1_dma_membase, ceu1_dma_membase, ceu1_dma_membase + - CEU_BUFFER_MEMORY_SIZE - 1, - DMA_MEMORY_EXCLUSIVE); + CEU_BUFFER_MEMORY_SIZE - 1); platform_device_add(ms7724se_ceu_devices[1]); return platform_add_devices(ms7724se_devices, @@ -965,12 +963,18 @@ static void __init ms7724se_mv_mem_reserve(void) phys_addr_t phys; phys_addr_t size = CEU_BUFFER_MEMORY_SIZE; - phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE); + phys = memblock_phys_alloc(size, PAGE_SIZE); + if (!phys) + panic("Failed to allocate CEU0 memory\n"); + memblock_free(phys, size); memblock_remove(phys, size); ceu0_dma_membase = phys; - phys = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_ALLOC_ANYWHERE); + phys = memblock_phys_alloc(size, PAGE_SIZE); + if (!phys) + panic("Failed to allocate CEU1 memory\n"); + memblock_free(phys, size); memblock_remove(phys, size); ceu1_dma_membase = phys; diff --git a/arch/sh/boot/dts/Makefile b/arch/sh/boot/dts/Makefile index 01d0f7fb14cc..2563d1e532e2 100644 --- a/arch/sh/boot/dts/Makefile +++ b/arch/sh/boot/dts/Makefile @@ -1,3 +1,3 @@ ifneq ($(CONFIG_BUILTIN_DTB_SOURCE),"") -obj-y += $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o +obj-$(CONFIG_USE_BUILTIN_DTB) += $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o endif diff --git a/arch/sh/configs/apsh4a3a_defconfig b/arch/sh/configs/apsh4a3a_defconfig index 4710df43a5b5..6c7cdc3beb28 100644 --- a/arch/sh/configs/apsh4a3a_defconfig +++ b/arch/sh/configs/apsh4a3a_defconfig @@ -81,7 +81,6 @@ CONFIG_NLS_CODEPAGE_932=y CONFIG_NLS_ASCII=y CONFIG_NLS_ISO8859_1=y CONFIG_NLS_UTF8=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_DEBUG_FS=y CONFIG_DEBUG_KERNEL=y diff --git a/arch/sh/configs/edosk7705_defconfig b/arch/sh/configs/edosk7705_defconfig index db756e099052..ef7cc31997b1 100644 --- a/arch/sh/configs/edosk7705_defconfig +++ b/arch/sh/configs/edosk7705_defconfig @@ -32,6 +32,5 @@ CONFIG_SH_PCLK_FREQ=31250000 # CONFIG_DNOTIFY is not set # CONFIG_PROC_FS is not set # CONFIG_SYSFS is not set -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set # CONFIG_CRC32 is not set diff --git a/arch/sh/configs/espt_defconfig b/arch/sh/configs/espt_defconfig index 2985fe7c6d50..444d75947e70 100644 --- a/arch/sh/configs/espt_defconfig +++ b/arch/sh/configs/espt_defconfig @@ -111,7 +111,6 @@ CONFIG_NLS_ISO8859_15=y CONFIG_NLS_KOI8_R=y CONFIG_NLS_KOI8_U=y CONFIG_NLS_UTF8=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_DEBUG_FS=y # CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/sh/configs/sdk7786_defconfig b/arch/sh/configs/sdk7786_defconfig index e9ee0c878ead..d16e9334cd98 100644 --- a/arch/sh/configs/sdk7786_defconfig +++ b/arch/sh/configs/sdk7786_defconfig @@ -209,7 +209,6 @@ CONFIG_NLS_ISO8859_1=y CONFIG_NLS_ISO8859_15=m CONFIG_NLS_UTF8=m CONFIG_PRINTK_TIME=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y diff --git a/arch/sh/configs/sh2007_defconfig b/arch/sh/configs/sh2007_defconfig index 34094e05e892..a1cf6447dbb1 100644 --- a/arch/sh/configs/sh2007_defconfig +++ b/arch/sh/configs/sh2007_defconfig @@ -157,7 +157,6 @@ CONFIG_NLS_ISO8859_15=y CONFIG_NLS_KOI8_R=y CONFIG_NLS_KOI8_U=y CONFIG_NLS_UTF8=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_DEBUG_FS=y CONFIG_DEBUG_KERNEL=y diff --git a/arch/sh/configs/sh7724_generic_defconfig b/arch/sh/configs/sh7724_generic_defconfig index d15e53647983..d04bc27aa816 100644 --- a/arch/sh/configs/sh7724_generic_defconfig +++ b/arch/sh/configs/sh7724_generic_defconfig @@ -40,6 +40,5 @@ CONFIG_UIO_PDRV_GENIRQ=y # CONFIG_PROC_FS is not set # CONFIG_SYSFS is not set # CONFIG_MISC_FILESYSTEMS is not set -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set # CONFIG_CRC32 is not set diff --git a/arch/sh/configs/sh7763rdp_defconfig b/arch/sh/configs/sh7763rdp_defconfig index 2ef780fb9813..405bf62d22d0 100644 --- a/arch/sh/configs/sh7763rdp_defconfig +++ b/arch/sh/configs/sh7763rdp_defconfig @@ -113,7 +113,6 @@ CONFIG_NLS_ISO8859_15=y CONFIG_NLS_KOI8_R=y CONFIG_NLS_KOI8_U=y CONFIG_NLS_UTF8=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_DEBUG_FS=y # CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/sh/configs/sh7770_generic_defconfig b/arch/sh/configs/sh7770_generic_defconfig index 742634b37c0a..e5b733c2d988 100644 --- a/arch/sh/configs/sh7770_generic_defconfig +++ b/arch/sh/configs/sh7770_generic_defconfig @@ -42,6 +42,5 @@ CONFIG_UIO_PDRV_GENIRQ=y # CONFIG_PROC_FS is not set # CONFIG_SYSFS is not set # CONFIG_MISC_FILESYSTEMS is not set -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set # CONFIG_CRC32 is not set diff --git a/arch/sh/configs/sh7785lcr_defconfig b/arch/sh/configs/sh7785lcr_defconfig index 7098828d392e..5201bb78c6f9 100644 --- a/arch/sh/configs/sh7785lcr_defconfig +++ b/arch/sh/configs/sh7785lcr_defconfig @@ -109,7 +109,6 @@ CONFIG_NFSD_V4=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_CODEPAGE_932=y CONFIG_NLS_ISO8859_1=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y diff --git a/arch/sh/configs/ul2_defconfig b/arch/sh/configs/ul2_defconfig index 5f2921a85192..1b7412df12e0 100644 --- a/arch/sh/configs/ul2_defconfig +++ b/arch/sh/configs/ul2_defconfig @@ -82,7 +82,6 @@ CONFIG_NFSD=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_CODEPAGE_932=y CONFIG_NLS_ISO8859_1=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_CRYPTO_MICHAEL_MIC=y # CONFIG_CRYPTO_ANSI_CPRNG is not set diff --git a/arch/sh/configs/urquell_defconfig b/arch/sh/configs/urquell_defconfig index 7d5591b7c088..f891045e633a 100644 --- a/arch/sh/configs/urquell_defconfig +++ b/arch/sh/configs/urquell_defconfig @@ -136,7 +136,6 @@ CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_CODEPAGE_932=y CONFIG_NLS_ISO8859_1=y CONFIG_PRINTK_TIME=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_DEBUG_FS=y CONFIG_DEBUG_KERNEL=y diff --git a/arch/sh/drivers/pci/fixups-dreamcast.c b/arch/sh/drivers/pci/fixups-dreamcast.c index dfdbd05b6eb1..7be8694c0d13 100644 --- a/arch/sh/drivers/pci/fixups-dreamcast.c +++ b/arch/sh/drivers/pci/fixups-dreamcast.c @@ -63,8 +63,7 @@ static void gapspci_fixup_resources(struct pci_dev *dev) BUG_ON(dma_declare_coherent_memory(&dev->dev, res.start, region.start, - resource_size(&res), - DMA_MEMORY_EXCLUSIVE)); + resource_size(&res))); break; default: printk("PCI: Failed resource fixup\n"); diff --git a/arch/sh/include/asm/segment.h b/arch/sh/include/asm/segment.h index 101c13c0c6ad..33d1d28057cb 100644 --- a/arch/sh/include/asm/segment.h +++ b/arch/sh/include/asm/segment.h @@ -26,7 +26,6 @@ typedef struct { #define segment_eq(a, b) ((a).seg == (b).seg) -#define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) diff --git a/arch/sh/include/asm/unistd.h b/arch/sh/include/asm/unistd.h index a97f93ca3bd7..9c7d9d9999c6 100644 --- a/arch/sh/include/asm/unistd.h +++ b/arch/sh/include/asm/unistd.h @@ -16,8 +16,8 @@ # define __ARCH_WANT_SYS_IPC # define __ARCH_WANT_SYS_PAUSE # define __ARCH_WANT_SYS_SIGNAL -# define __ARCH_WANT_SYS_TIME -# define __ARCH_WANT_SYS_UTIME +# define __ARCH_WANT_SYS_TIME32 +# define __ARCH_WANT_SYS_UTIME32 # define __ARCH_WANT_SYS_WAITPID # define __ARCH_WANT_SYS_SOCKETCALL # define __ARCH_WANT_SYS_FADVISE64 diff --git a/arch/sh/include/uapi/asm/unistd_32.h b/arch/sh/include/uapi/asm/unistd_32.h deleted file mode 100644 index 31c85aa251ab..000000000000 --- a/arch/sh/include/uapi/asm/unistd_32.h +++ /dev/null @@ -1,403 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#ifndef __ASM_SH_UNISTD_32_H -#define __ASM_SH_UNISTD_32_H - -/* - * Copyright (C) 1999 Niibe Yutaka - */ - -/* - * This file contains the system call numbers. - */ - -#define __NR_restart_syscall 0 -#define __NR_exit 1 -#define __NR_fork 2 -#define __NR_read 3 -#define __NR_write 4 -#define __NR_open 5 -#define __NR_close 6 -#define __NR_waitpid 7 -#define __NR_creat 8 -#define __NR_link 9 -#define __NR_unlink 10 -#define __NR_execve 11 -#define __NR_chdir 12 -#define __NR_time 13 -#define __NR_mknod 14 -#define __NR_chmod 15 -#define __NR_lchown 16 - /* 17 was sys_break */ -#define __NR_oldstat 18 -#define __NR_lseek 19 -#define __NR_getpid 20 -#define __NR_mount 21 -#define __NR_umount 22 -#define __NR_setuid 23 -#define __NR_getuid 24 -#define __NR_stime 25 -#define __NR_ptrace 26 -#define __NR_alarm 27 -#define __NR_oldfstat 28 -#define __NR_pause 29 -#define __NR_utime 30 - /* 31 was sys_stty */ - /* 32 was sys_gtty */ -#define __NR_access 33 -#define __NR_nice 34 - /* 35 was sys_ftime */ -#define __NR_sync 36 -#define __NR_kill 37 -#define __NR_rename 38 -#define __NR_mkdir 39 -#define __NR_rmdir 40 -#define __NR_dup 41 -#define __NR_pipe 42 -#define __NR_times 43 - /* 44 was sys_prof */ -#define __NR_brk 45 -#define __NR_setgid 46 -#define __NR_getgid 47 -#define __NR_signal 48 -#define __NR_geteuid 49 -#define __NR_getegid 50 -#define __NR_acct 51 -#define __NR_umount2 52 - /* 53 was sys_lock */ -#define __NR_ioctl 54 -#define __NR_fcntl 55 - /* 56 was sys_mpx */ -#define __NR_setpgid 57 - /* 58 was sys_ulimit */ - /* 59 was sys_olduname */ -#define __NR_umask 60 -#define __NR_chroot 61 -#define __NR_ustat 62 -#define __NR_dup2 63 -#define __NR_getppid 64 -#define __NR_getpgrp 65 -#define __NR_setsid 66 -#define __NR_sigaction 67 -#define __NR_sgetmask 68 -#define __NR_ssetmask 69 -#define __NR_setreuid 70 -#define __NR_setregid 71 -#define __NR_sigsuspend 72 -#define __NR_sigpending 73 -#define __NR_sethostname 74 -#define __NR_setrlimit 75 -#define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */ -#define __NR_getrusage 77 -#define __NR_gettimeofday 78 -#define __NR_settimeofday 79 -#define __NR_getgroups 80 -#define __NR_setgroups 81 - /* 82 was sys_oldselect */ -#define __NR_symlink 83 -#define __NR_oldlstat 84 -#define __NR_readlink 85 -#define __NR_uselib 86 -#define __NR_swapon 87 -#define __NR_reboot 88 -#define __NR_readdir 89 -#define __NR_mmap 90 -#define __NR_munmap 91 -#define __NR_truncate 92 -#define __NR_ftruncate 93 -#define __NR_fchmod 94 -#define __NR_fchown 95 -#define __NR_getpriority 96 -#define __NR_setpriority 97 - /* 98 was sys_profil */ -#define __NR_statfs 99 -#define __NR_fstatfs 100 - /* 101 was sys_ioperm */ -#define __NR_socketcall 102 -#define __NR_syslog 103 -#define __NR_setitimer 104 -#define __NR_getitimer 105 -#define __NR_stat 106 -#define __NR_lstat 107 -#define __NR_fstat 108 -#define __NR_olduname 109 - /* 110 was sys_iopl */ -#define __NR_vhangup 111 - /* 112 was sys_idle */ - /* 113 was sys_vm86old */ -#define __NR_wait4 114 -#define __NR_swapoff 115 -#define __NR_sysinfo 116 -#define __NR_ipc 117 -#define __NR_fsync 118 -#define __NR_sigreturn 119 -#define __NR_clone 120 -#define __NR_setdomainname 121 -#define __NR_uname 122 -#define __NR_cacheflush 123 -#define __NR_adjtimex 124 -#define __NR_mprotect 125 -#define __NR_sigprocmask 126 - /* 127 was sys_create_module */ -#define __NR_init_module 128 -#define __NR_delete_module 129 - /* 130 was sys_get_kernel_syms */ -#define __NR_quotactl 131 -#define __NR_getpgid 132 -#define __NR_fchdir 133 -#define __NR_bdflush 134 -#define __NR_sysfs 135 -#define __NR_personality 136 - /* 137 was sys_afs_syscall */ -#define __NR_setfsuid 138 -#define __NR_setfsgid 139 -#define __NR__llseek 140 -#define __NR_getdents 141 -#define __NR__newselect 142 -#define __NR_flock 143 -#define __NR_msync 144 -#define __NR_readv 145 -#define __NR_writev 146 -#define __NR_getsid 147 -#define __NR_fdatasync 148 -#define __NR__sysctl 149 -#define __NR_mlock 150 -#define __NR_munlock 151 -#define __NR_mlockall 152 -#define __NR_munlockall 153 -#define __NR_sched_setparam 154 -#define __NR_sched_getparam 155 -#define __NR_sched_setscheduler 156 -#define __NR_sched_getscheduler 157 -#define __NR_sched_yield 158 -#define __NR_sched_get_priority_max 159 -#define __NR_sched_get_priority_min 160 -#define __NR_sched_rr_get_interval 161 -#define __NR_nanosleep 162 -#define __NR_mremap 163 -#define __NR_setresuid 164 -#define __NR_getresuid 165 - /* 166 was sys_vm86 */ - /* 167 was sys_query_module */ -#define __NR_poll 168 -#define __NR_nfsservctl 169 -#define __NR_setresgid 170 -#define __NR_getresgid 171 -#define __NR_prctl 172 -#define __NR_rt_sigreturn 173 -#define __NR_rt_sigaction 174 -#define __NR_rt_sigprocmask 175 -#define __NR_rt_sigpending 176 -#define __NR_rt_sigtimedwait 177 -#define __NR_rt_sigqueueinfo 178 -#define __NR_rt_sigsuspend 179 -#define __NR_pread64 180 -#define __NR_pwrite64 181 -#define __NR_chown 182 -#define __NR_getcwd 183 -#define __NR_capget 184 -#define __NR_capset 185 -#define __NR_sigaltstack 186 -#define __NR_sendfile 187 - /* 188 reserved for sys_getpmsg */ - /* 189 reserved for sys_putpmsg */ -#define __NR_vfork 190 -#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */ -#define __NR_mmap2 192 -#define __NR_truncate64 193 -#define __NR_ftruncate64 194 -#define __NR_stat64 195 -#define __NR_lstat64 196 -#define __NR_fstat64 197 -#define __NR_lchown32 198 -#define __NR_getuid32 199 -#define __NR_getgid32 200 -#define __NR_geteuid32 201 -#define __NR_getegid32 202 -#define __NR_setreuid32 203 -#define __NR_setregid32 204 -#define __NR_getgroups32 205 -#define __NR_setgroups32 206 -#define __NR_fchown32 207 -#define __NR_setresuid32 208 -#define __NR_getresuid32 209 -#define __NR_setresgid32 210 -#define __NR_getresgid32 211 -#define __NR_chown32 212 -#define __NR_setuid32 213 -#define __NR_setgid32 214 -#define __NR_setfsuid32 215 -#define __NR_setfsgid32 216 -#define __NR_pivot_root 217 -#define __NR_mincore 218 -#define __NR_madvise 219 -#define __NR_getdents64 220 -#define __NR_fcntl64 221 - /* 222 is reserved for tux */ - /* 223 is unused */ -#define __NR_gettid 224 -#define __NR_readahead 225 -#define __NR_setxattr 226 -#define __NR_lsetxattr 227 -#define __NR_fsetxattr 228 -#define __NR_getxattr 229 -#define __NR_lgetxattr 230 -#define __NR_fgetxattr 231 -#define __NR_listxattr 232 -#define __NR_llistxattr 233 -#define __NR_flistxattr 234 -#define __NR_removexattr 235 -#define __NR_lremovexattr 236 -#define __NR_fremovexattr 237 -#define __NR_tkill 238 -#define __NR_sendfile64 239 -#define __NR_futex 240 -#define __NR_sched_setaffinity 241 -#define __NR_sched_getaffinity 242 - /* 243 is reserved for set_thread_area */ - /* 244 is reserved for get_thread_area */ -#define __NR_io_setup 245 -#define __NR_io_destroy 246 -#define __NR_io_getevents 247 -#define __NR_io_submit 248 -#define __NR_io_cancel 249 -#define __NR_fadvise64 250 - /* 251 is unused */ -#define __NR_exit_group 252 -#define __NR_lookup_dcookie 253 -#define __NR_epoll_create 254 -#define __NR_epoll_ctl 255 -#define __NR_epoll_wait 256 -#define __NR_remap_file_pages 257 -#define __NR_set_tid_address 258 -#define __NR_timer_create 259 -#define __NR_timer_settime (__NR_timer_create+1) -#define __NR_timer_gettime (__NR_timer_create+2) -#define __NR_timer_getoverrun (__NR_timer_create+3) -#define __NR_timer_delete (__NR_timer_create+4) -#define __NR_clock_settime (__NR_timer_create+5) -#define __NR_clock_gettime (__NR_timer_create+6) -#define __NR_clock_getres (__NR_timer_create+7) -#define __NR_clock_nanosleep (__NR_timer_create+8) -#define __NR_statfs64 268 -#define __NR_fstatfs64 269 -#define __NR_tgkill 270 -#define __NR_utimes 271 -#define __NR_fadvise64_64 272 - /* 273 is reserved for vserver */ -#define __NR_mbind 274 -#define __NR_get_mempolicy 275 -#define __NR_set_mempolicy 276 -#define __NR_mq_open 277 -#define __NR_mq_unlink (__NR_mq_open+1) -#define __NR_mq_timedsend (__NR_mq_open+2) -#define __NR_mq_timedreceive (__NR_mq_open+3) -#define __NR_mq_notify (__NR_mq_open+4) -#define __NR_mq_getsetattr (__NR_mq_open+5) -#define __NR_kexec_load 283 -#define __NR_waitid 284 -#define __NR_add_key 285 -#define __NR_request_key 286 -#define __NR_keyctl 287 -#define __NR_ioprio_set 288 -#define __NR_ioprio_get 289 -#define __NR_inotify_init 290 -#define __NR_inotify_add_watch 291 -#define __NR_inotify_rm_watch 292 - /* 293 is unused */ -#define __NR_migrate_pages 294 -#define __NR_openat 295 -#define __NR_mkdirat 296 -#define __NR_mknodat 297 -#define __NR_fchownat 298 -#define __NR_futimesat 299 -#define __NR_fstatat64 300 -#define __NR_unlinkat 301 -#define __NR_renameat 302 -#define __NR_linkat 303 -#define __NR_symlinkat 304 -#define __NR_readlinkat 305 -#define __NR_fchmodat 306 -#define __NR_faccessat 307 -#define __NR_pselect6 308 -#define __NR_ppoll 309 -#define __NR_unshare 310 -#define __NR_set_robust_list 311 -#define __NR_get_robust_list 312 -#define __NR_splice 313 -#define __NR_sync_file_range 314 -#define __NR_tee 315 -#define __NR_vmsplice 316 -#define __NR_move_pages 317 -#define __NR_getcpu 318 -#define __NR_epoll_pwait 319 -#define __NR_utimensat 320 -#define __NR_signalfd 321 -#define __NR_timerfd_create 322 -#define __NR_eventfd 323 -#define __NR_fallocate 324 -#define __NR_timerfd_settime 325 -#define __NR_timerfd_gettime 326 -#define __NR_signalfd4 327 -#define __NR_eventfd2 328 -#define __NR_epoll_create1 329 -#define __NR_dup3 330 -#define __NR_pipe2 331 -#define __NR_inotify_init1 332 -#define __NR_preadv 333 -#define __NR_pwritev 334 -#define __NR_rt_tgsigqueueinfo 335 -#define __NR_perf_event_open 336 -#define __NR_fanotify_init 337 -#define __NR_fanotify_mark 338 -#define __NR_prlimit64 339 - -/* Non-multiplexed socket family */ -#define __NR_socket 340 -#define __NR_bind 341 -#define __NR_connect 342 -#define __NR_listen 343 -#define __NR_accept 344 -#define __NR_getsockname 345 -#define __NR_getpeername 346 -#define __NR_socketpair 347 -#define __NR_send 348 -#define __NR_sendto 349 -#define __NR_recv 350 -#define __NR_recvfrom 351 -#define __NR_shutdown 352 -#define __NR_setsockopt 353 -#define __NR_getsockopt 354 -#define __NR_sendmsg 355 -#define __NR_recvmsg 356 -#define __NR_recvmmsg 357 -#define __NR_accept4 358 -#define __NR_name_to_handle_at 359 -#define __NR_open_by_handle_at 360 -#define __NR_clock_adjtime 361 -#define __NR_syncfs 362 -#define __NR_sendmmsg 363 -#define __NR_setns 364 -#define __NR_process_vm_readv 365 -#define __NR_process_vm_writev 366 -#define __NR_kcmp 367 -#define __NR_finit_module 368 -#define __NR_sched_getattr 369 -#define __NR_sched_setattr 370 -#define __NR_renameat2 371 -#define __NR_seccomp 372 -#define __NR_getrandom 373 -#define __NR_memfd_create 374 -#define __NR_bpf 375 -#define __NR_execveat 376 -#define __NR_userfaultfd 377 -#define __NR_membarrier 378 -#define __NR_mlock2 379 -#define __NR_copy_file_range 380 -#define __NR_preadv2 381 -#define __NR_pwritev2 382 - -#ifdef __KERNEL__ -#define __NR_syscalls 383 -#endif - -#endif /* __ASM_SH_UNISTD_32_H */ diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c index b9f9f1a5afdc..63d63a36f6f2 100644 --- a/arch/sh/kernel/machine_kexec.c +++ b/arch/sh/kernel/machine_kexec.c @@ -168,7 +168,8 @@ void __init reserve_crashkernel(void) crash_size = PAGE_ALIGN(resource_size(&crashk_res)); if (!crashk_res.start) { unsigned long max = memblock_end_of_DRAM() - memory_limit; - crashk_res.start = __memblock_alloc_base(crash_size, PAGE_SIZE, max); + crashk_res.start = memblock_phys_alloc_range(crash_size, + PAGE_SIZE, 0, max); if (!crashk_res.start) { pr_err("crashkernel allocation failed\n"); goto disable; diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl index 21ec75288562..bfda678576e4 100644 --- a/arch/sh/kernel/syscalls/syscall.tbl +++ b/arch/sh/kernel/syscalls/syscall.tbl @@ -20,7 +20,7 @@ 10 common unlink sys_unlink 11 common execve sys_execve 12 common chdir sys_chdir -13 common time sys_time +13 common time sys_time32 14 common mknod sys_mknod 15 common chmod sys_chmod 16 common lchown sys_lchown16 @@ -32,12 +32,12 @@ 22 common umount sys_oldumount 23 common setuid sys_setuid16 24 common getuid sys_getuid16 -25 common stime sys_stime +25 common stime sys_stime32 26 common ptrace sys_ptrace 27 common alarm sys_alarm 28 common oldfstat sys_fstat 29 common pause sys_pause -30 common utime sys_utime +30 common utime sys_utime32 # 31 was stty # 32 was gtty 33 common access sys_access @@ -131,7 +131,7 @@ 121 common setdomainname sys_setdomainname 122 common uname sys_newuname 123 common cacheflush sys_cacheflush -124 common adjtimex sys_adjtimex +124 common adjtimex sys_adjtimex_time32 125 common mprotect sys_mprotect 126 common sigprocmask sys_sigprocmask # 127 was create_module @@ -168,8 +168,8 @@ 158 common sched_yield sys_sched_yield 159 common sched_get_priority_max sys_sched_get_priority_max 160 common sched_get_priority_min sys_sched_get_priority_min -161 common sched_rr_get_interval sys_sched_rr_get_interval -162 common nanosleep sys_nanosleep +161 common sched_rr_get_interval sys_sched_rr_get_interval_time32 +162 common nanosleep sys_nanosleep_time32 163 common mremap sys_mremap 164 common setresuid sys_setresuid16 165 common getresuid sys_getresuid16 @@ -184,7 +184,7 @@ 174 common rt_sigaction sys_rt_sigaction 175 common rt_sigprocmask sys_rt_sigprocmask 176 common rt_sigpending sys_rt_sigpending -177 common rt_sigtimedwait sys_rt_sigtimedwait +177 common rt_sigtimedwait sys_rt_sigtimedwait_time32 178 common rt_sigqueueinfo sys_rt_sigqueueinfo 179 common rt_sigsuspend sys_rt_sigsuspend 180 common pread64 sys_pread_wrapper @@ -247,14 +247,14 @@ 237 common fremovexattr sys_fremovexattr 238 common tkill sys_tkill 239 common sendfile64 sys_sendfile64 -240 common futex sys_futex +240 common futex sys_futex_time32 241 common sched_setaffinity sys_sched_setaffinity 242 common sched_getaffinity sys_sched_getaffinity # 243 is reserved for set_thread_area # 244 is reserved for get_thread_area 245 common io_setup sys_io_setup 246 common io_destroy sys_io_destroy -247 common io_getevents sys_io_getevents +247 common io_getevents sys_io_getevents_time32 248 common io_submit sys_io_submit 249 common io_cancel sys_io_cancel 250 common fadvise64 sys_fadvise64 @@ -267,18 +267,18 @@ 257 common remap_file_pages sys_remap_file_pages 258 common set_tid_address sys_set_tid_address 259 common timer_create sys_timer_create -260 common timer_settime sys_timer_settime -261 common timer_gettime sys_timer_gettime +260 common timer_settime sys_timer_settime32 +261 common timer_gettime sys_timer_gettime32 262 common timer_getoverrun sys_timer_getoverrun 263 common timer_delete sys_timer_delete -264 common clock_settime sys_clock_settime -265 common clock_gettime sys_clock_gettime -266 common clock_getres sys_clock_getres -267 common clock_nanosleep sys_clock_nanosleep +264 common clock_settime sys_clock_settime32 +265 common clock_gettime sys_clock_gettime32 +266 common clock_getres sys_clock_getres_time32 +267 common clock_nanosleep sys_clock_nanosleep_time32 268 common statfs64 sys_statfs64 269 common fstatfs64 sys_fstatfs64 270 common tgkill sys_tgkill -271 common utimes sys_utimes +271 common utimes sys_utimes_time32 272 common fadvise64_64 sys_fadvise64_64_wrapper # 273 is reserved for vserver 274 common mbind sys_mbind @@ -286,8 +286,8 @@ 276 common set_mempolicy sys_set_mempolicy 277 common mq_open sys_mq_open 278 common mq_unlink sys_mq_unlink -279 common mq_timedsend sys_mq_timedsend -280 common mq_timedreceive sys_mq_timedreceive +279 common mq_timedsend sys_mq_timedsend_time32 +280 common mq_timedreceive sys_mq_timedreceive_time32 281 common mq_notify sys_mq_notify 282 common mq_getsetattr sys_mq_getsetattr 283 common kexec_load sys_kexec_load @@ -306,7 +306,7 @@ 296 common mkdirat sys_mkdirat 297 common mknodat sys_mknodat 298 common fchownat sys_fchownat -299 common futimesat sys_futimesat +299 common futimesat sys_futimesat_time32 300 common fstatat64 sys_fstatat64 301 common unlinkat sys_unlinkat 302 common renameat sys_renameat @@ -315,8 +315,8 @@ 305 common readlinkat sys_readlinkat 306 common fchmodat sys_fchmodat 307 common faccessat sys_faccessat -308 common pselect6 sys_pselect6 -309 common ppoll sys_ppoll +308 common pselect6 sys_pselect6_time32 +309 common ppoll sys_ppoll_time32 310 common unshare sys_unshare 311 common set_robust_list sys_set_robust_list 312 common get_robust_list sys_get_robust_list @@ -327,13 +327,13 @@ 317 common move_pages sys_move_pages 318 common getcpu sys_getcpu 319 common epoll_pwait sys_epoll_pwait -320 common utimensat sys_utimensat +320 common utimensat sys_utimensat_time32 321 common signalfd sys_signalfd 322 common timerfd_create sys_timerfd_create 323 common eventfd sys_eventfd 324 common fallocate sys_fallocate -325 common timerfd_settime sys_timerfd_settime -326 common timerfd_gettime sys_timerfd_gettime +325 common timerfd_settime sys_timerfd_settime32 +326 common timerfd_gettime sys_timerfd_gettime32 327 common signalfd4 sys_signalfd4 328 common eventfd2 sys_eventfd2 329 common epoll_create1 sys_epoll_create1 @@ -364,11 +364,11 @@ 354 common getsockopt sys_getsockopt 355 common sendmsg sys_sendmsg 356 common recvmsg sys_recvmsg -357 common recvmmsg sys_recvmmsg +357 common recvmmsg sys_recvmmsg_time32 358 common accept4 sys_accept4 359 common name_to_handle_at sys_name_to_handle_at 360 common open_by_handle_at sys_open_by_handle_at -361 common clock_adjtime sys_clock_adjtime +361 common clock_adjtime sys_clock_adjtime32 362 common syncfs sys_syncfs 363 common sendmmsg sys_sendmmsg 364 common setns sys_setns @@ -390,3 +390,39 @@ 380 common copy_file_range sys_copy_file_range 381 common preadv2 sys_preadv2 382 common pwritev2 sys_pwritev2 +383 common statx sys_statx +384 common pkey_mprotect sys_pkey_mprotect +385 common pkey_alloc sys_pkey_alloc +386 common pkey_free sys_pkey_free +387 common rseq sys_rseq +# room for arch specific syscalls +393 common semget sys_semget +394 common semctl sys_semctl +395 common shmget sys_shmget +396 common shmctl sys_shmctl +397 common shmat sys_shmat +398 common shmdt sys_shmdt +399 common msgget sys_msgget +400 common msgsnd sys_msgsnd +401 common msgrcv sys_msgrcv +402 common msgctl sys_msgctl +403 common clock_gettime64 sys_clock_gettime +404 common clock_settime64 sys_clock_settime +405 common clock_adjtime64 sys_clock_adjtime +406 common clock_getres_time64 sys_clock_getres +407 common clock_nanosleep_time64 sys_clock_nanosleep +408 common timer_gettime64 sys_timer_gettime +409 common timer_settime64 sys_timer_settime +410 common timerfd_gettime64 sys_timerfd_gettime +411 common timerfd_settime64 sys_timerfd_settime +412 common utimensat_time64 sys_utimensat +413 common pselect6_time64 sys_pselect6 +414 common ppoll_time64 sys_ppoll +416 common io_pgetevents_time64 sys_io_pgetevents +417 common recvmmsg_time64 sys_recvmmsg +418 common mq_timedsend_time64 sys_mq_timedsend +419 common mq_timedreceive_time64 sys_mq_timedreceive +420 common semtimedop_time64 sys_semtimedop +421 common rt_sigtimedwait_time64 sys_rt_sigtimedwait +422 common futex_time64 sys_futex +423 common sched_rr_get_interval_time64 sys_sched_rr_get_interval diff --git a/arch/sh/kernel/syscalls/syscalltbl.sh b/arch/sh/kernel/syscalls/syscalltbl.sh index 85d78d9309ad..904b8e6e625d 100644 --- a/arch/sh/kernel/syscalls/syscalltbl.sh +++ b/arch/sh/kernel/syscalls/syscalltbl.sh @@ -13,10 +13,10 @@ emit() { t_entry="$3" while [ $t_nxt -lt $t_nr ]; do - printf "__SYSCALL(%s, sys_ni_syscall, )\n" "${t_nxt}" + printf "__SYSCALL(%s,sys_ni_syscall)\n" "${t_nxt}" t_nxt=$((t_nxt+1)) done - printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}" + printf "__SYSCALL(%s,%s)\n" "${t_nxt}" "${t_entry}" } grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S index 96e9c54a07f5..bd1a9c544767 100644 --- a/arch/sh/kernel/syscalls_32.S +++ b/arch/sh/kernel/syscalls_32.S @@ -10,7 +10,7 @@ #include #include -#define __SYSCALL(nr, entry, nargs) .long entry +#define __SYSCALL(nr, entry) .long entry .data ENTRY(sys_call_table) #include diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index a8e5c0e00fca..70621324db41 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -128,6 +128,9 @@ static pmd_t * __init one_md_table_init(pud_t *pud) pmd_t *pmd; pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + if (!pmd) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", + __func__, PAGE_SIZE, PAGE_SIZE); pud_populate(&init_mm, pud, pmd); BUG_ON(pmd != pmd_offset(pud, 0)); } @@ -141,6 +144,9 @@ static pte_t * __init one_page_table_init(pmd_t *pmd) pte_t *pte; pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + if (!pte) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", + __func__, PAGE_SIZE, PAGE_SIZE); pmd_populate_kernel(&init_mm, pmd, pte); BUG_ON(pte != pte_offset_kernel(pmd, 0)); } @@ -192,24 +198,16 @@ void __init page_table_range_init(unsigned long start, unsigned long end, void __init allocate_pgdat(unsigned int nid) { unsigned long start_pfn, end_pfn; -#ifdef CONFIG_NEED_MULTIPLE_NODES - unsigned long phys; -#endif get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); #ifdef CONFIG_NEED_MULTIPLE_NODES - phys = __memblock_alloc_base(sizeof(struct pglist_data), - SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT); - /* Retry with all of system memory */ - if (!phys) - phys = __memblock_alloc_base(sizeof(struct pglist_data), - SMP_CACHE_BYTES, memblock_end_of_DRAM()); - if (!phys) + NODE_DATA(nid) = memblock_alloc_try_nid( + sizeof(struct pglist_data), + SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT, + MEMBLOCK_ALLOC_ACCESSIBLE, nid); + if (!NODE_DATA(nid)) panic("Can't allocate pgdat for node %d\n", nid); - - NODE_DATA(nid) = __va(phys); - memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); #endif NODE_DATA(nid)->node_start_pfn = start_pfn; diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c index 830e8b3684e4..f7e4439deb17 100644 --- a/arch/sh/mm/numa.c +++ b/arch/sh/mm/numa.c @@ -41,9 +41,12 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end) __add_active_range(nid, start_pfn, end_pfn); /* Node-local pgdat */ - NODE_DATA(nid) = __va(memblock_alloc_base(sizeof(struct pglist_data), - SMP_CACHE_BYTES, end)); - memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); + NODE_DATA(nid) = memblock_alloc_node(sizeof(struct pglist_data), + SMP_CACHE_BYTES, nid); + if (!NODE_DATA(nid)) + panic("%s: Failed to allocate %zu bytes align=0x%x nid=%d\n", + __func__, sizeof(struct pglist_data), SMP_CACHE_BYTES, + nid); NODE_DATA(nid)->node_start_pfn = start_pfn; NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index d5dd652fb8cc..40f8f4f73fe8 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -49,6 +49,7 @@ config SPARC config SPARC32 def_bool !64BIT + select ARCH_32BIT_OFF_T select ARCH_HAS_SYNC_DMA_FOR_CPU select GENERIC_ATOMIC64 select CLZ_TAB diff --git a/arch/sparc/configs/sparc32_defconfig b/arch/sparc/configs/sparc32_defconfig index 207a43a2d8b3..2d4f34c52c67 100644 --- a/arch/sparc/configs/sparc32_defconfig +++ b/arch/sparc/configs/sparc32_defconfig @@ -75,7 +75,6 @@ CONFIG_NFS_FS=y CONFIG_ROOT_NFS=y CONFIG_RPCSEC_GSS_KRB5=m CONFIG_NLS=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y # CONFIG_SCHED_DEBUG is not set diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig index 4d4e1cc6402f..ea547d596fcf 100644 --- a/arch/sparc/configs/sparc64_defconfig +++ b/arch/sparc/configs/sparc64_defconfig @@ -201,7 +201,6 @@ CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_HUGETLBFS=y CONFIG_PRINTK_TIME=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_LOCKUP_DETECTOR=y diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c index 56499ea39fd3..4884315daff4 100644 --- a/arch/sparc/crypto/des_glue.c +++ b/arch/sparc/crypto/des_glue.c @@ -53,7 +53,7 @@ static int des_set_key(struct crypto_tfm *tfm, const u8 *key, * weak key detection code. */ ret = des_ekey(tmp, key); - if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { *flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } @@ -209,7 +209,7 @@ static int des3_ede_set_key(struct crypto_tfm *tfm, const u8 *key, if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || !((K[2] ^ K[4]) | (K[3] ^ K[5]))) && - (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { *flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h index 5153798051fb..d6d8413eca83 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h @@ -25,7 +25,6 @@ #define KERNEL_DS ((mm_segment_t) { 0 }) #define USER_DS ((mm_segment_t) { -1 }) -#define get_ds() (KERNEL_DS) #define get_fs() (current->thread.current_ds) #define set_fs(val) ((current->thread.current_ds) = (val)) diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index 87ae9ffb1521..bf9d330073b2 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -31,7 +31,6 @@ #define USER_DS ((mm_segment_t) { ASI_AIUS }) /* har har har */ #define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)}) -#define get_ds() (KERNEL_DS) #define segment_eq(a, b) ((a).seg == (b).seg) diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h index 5194d86ef72d..1e66278ba4a5 100644 --- a/arch/sparc/include/asm/unistd.h +++ b/arch/sparc/include/asm/unistd.h @@ -30,8 +30,8 @@ #define __ARCH_WANT_SYS_GETHOSTNAME #define __ARCH_WANT_SYS_PAUSE #define __ARCH_WANT_SYS_SIGNAL -#define __ARCH_WANT_SYS_TIME -#define __ARCH_WANT_SYS_UTIME +#define __ARCH_WANT_SYS_TIME32 +#define __ARCH_WANT_SYS_UTIME32 #define __ARCH_WANT_SYS_WAITPID #define __ARCH_WANT_SYS_SOCKETCALL #define __ARCH_WANT_SYS_FADVISE64 @@ -43,8 +43,8 @@ #ifdef __32bit_syscall_numbers__ #define __ARCH_WANT_SYS_IPC #else -#define __ARCH_WANT_COMPAT_SYS_TIME -#define __ARCH_WANT_SYS_UTIME32 +#define __ARCH_WANT_SYS_TIME +#define __ARCH_WANT_SYS_UTIME #define __ARCH_WANT_COMPAT_SYS_SENDFILE #endif @@ -59,9 +59,4 @@ #define __IGNORE_getresgid #endif -/* Sparc doesn't have protection keys. */ -#define __IGNORE_pkey_mprotect -#define __IGNORE_pkey_alloc -#define __IGNORE_pkey_free - #endif /* _SPARC_UNISTD_H */ diff --git a/arch/sparc/include/uapi/asm/posix_types.h b/arch/sparc/include/uapi/asm/posix_types.h index fec499d6efb7..f139e0048628 100644 --- a/arch/sparc/include/uapi/asm/posix_types.h +++ b/arch/sparc/include/uapi/asm/posix_types.h @@ -19,6 +19,16 @@ typedef unsigned short __kernel_old_gid_t; typedef int __kernel_suseconds_t; #define __kernel_suseconds_t __kernel_suseconds_t +typedef long __kernel_long_t; +typedef unsigned long __kernel_ulong_t; +#define __kernel_long_t __kernel_long_t + +struct __kernel_old_timeval { + __kernel_long_t tv_sec; + __kernel_suseconds_t tv_usec; +}; +#define __kernel_old_timeval __kernel_old_timeval + #else /* sparc 32 bit */ diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h index 7ea35e5601b6..88fe4f978aca 100644 --- a/arch/sparc/include/uapi/asm/socket.h +++ b/arch/sparc/include/uapi/asm/socket.h @@ -3,6 +3,7 @@ #define _ASM_SOCKET_H #include +#include /* For setsockopt(2) */ #define SOL_SOCKET 0xffff @@ -20,8 +21,8 @@ #define SO_BSDCOMPAT 0x0400 #define SO_RCVLOWAT 0x0800 #define SO_SNDLOWAT 0x1000 -#define SO_RCVTIMEO 0x2000 -#define SO_SNDTIMEO 0x4000 +#define SO_RCVTIMEO_OLD 0x2000 +#define SO_SNDTIMEO_OLD 0x4000 #define SO_ACCEPTCONN 0x8000 #define SO_SNDBUF 0x1001 @@ -33,7 +34,6 @@ #define SO_PROTOCOL 0x1028 #define SO_DOMAIN 0x1029 - /* Linux specific, keep the same. */ #define SO_NO_CHECK 0x000b #define SO_PRIORITY 0x000c @@ -45,19 +45,12 @@ #define SO_GET_FILTER SO_ATTACH_FILTER #define SO_PEERNAME 0x001c -#define SO_TIMESTAMP 0x001d -#define SCM_TIMESTAMP SO_TIMESTAMP #define SO_PEERSEC 0x001e #define SO_PASSSEC 0x001f -#define SO_TIMESTAMPNS 0x0021 -#define SCM_TIMESTAMPNS SO_TIMESTAMPNS #define SO_MARK 0x0022 -#define SO_TIMESTAMPING 0x0023 -#define SCM_TIMESTAMPING SO_TIMESTAMPING - #define SO_RXQ_OVFL 0x0024 #define SO_WIFI_STATUS 0x0025 @@ -104,9 +97,47 @@ #define SO_TXTIME 0x003f #define SCM_TXTIME SO_TXTIME +#define SO_BINDTOIFINDEX 0x0041 + /* Security levels - as per NRL IPv6 - don't actually do anything */ #define SO_SECURITY_AUTHENTICATION 0x5001 #define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002 #define SO_SECURITY_ENCRYPTION_NETWORK 0x5004 +#define SO_TIMESTAMP_OLD 0x001d +#define SO_TIMESTAMPNS_OLD 0x0021 +#define SO_TIMESTAMPING_OLD 0x0023 + +#define SO_TIMESTAMP_NEW 0x0046 +#define SO_TIMESTAMPNS_NEW 0x0042 +#define SO_TIMESTAMPING_NEW 0x0043 + +#define SO_RCVTIMEO_NEW 0x0044 +#define SO_SNDTIMEO_NEW 0x0045 + +#if !defined(__KERNEL__) + + +#if __BITS_PER_LONG == 64 +#define SO_TIMESTAMP SO_TIMESTAMP_OLD +#define SO_TIMESTAMPNS SO_TIMESTAMPNS_OLD +#define SO_TIMESTAMPING SO_TIMESTAMPING_OLD + +#define SO_RCVTIMEO SO_RCVTIMEO_OLD +#define SO_SNDTIMEO SO_SNDTIMEO_OLD +#else +#define SO_TIMESTAMP (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMP_OLD : SO_TIMESTAMP_NEW) +#define SO_TIMESTAMPNS (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPNS_OLD : SO_TIMESTAMPNS_NEW) +#define SO_TIMESTAMPING (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPING_OLD : SO_TIMESTAMPING_NEW) + +#define SO_RCVTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_RCVTIMEO_OLD : SO_RCVTIMEO_NEW) +#define SO_SNDTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_SNDTIMEO_OLD : SO_SNDTIMEO_NEW) +#endif + +#define SCM_TIMESTAMP SO_TIMESTAMP +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS +#define SCM_TIMESTAMPING SO_TIMESTAMPING + +#endif + #endif /* _ASM_SOCKET_H */ diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index b1a09080e8da..4ae7388b1bff 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c @@ -745,15 +745,12 @@ static int dma_4u_supported(struct device *dev, u64 device_mask) { struct iommu *iommu = dev->archdata.iommu; - if (device_mask > DMA_BIT_MASK(32)) - return 0; - if ((device_mask & iommu->dma_addr_mask) == iommu->dma_addr_mask) + if (ali_sound_dma_hack(dev, device_mask)) return 1; -#ifdef CONFIG_PCI - if (dev_is_pci(dev)) - return pci64_dma_supported(to_pci_dev(dev), device_mask); -#endif - return 0; + + if (device_mask < iommu->dma_addr_mask) + return 0; + return 1; } static const struct dma_map_ops sun4u_dma_ops = { diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h index ddffd368e057..f6f498ba3198 100644 --- a/arch/sparc/kernel/kernel.h +++ b/arch/sparc/kernel/kernel.h @@ -45,7 +45,11 @@ void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs); void __irq_entry smp_kgdb_capture_client(int irq, struct pt_regs *regs); /* pci.c */ -int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask); +#ifdef CONFIG_PCI +int ali_sound_dma_hack(struct device *dev, u64 device_mask); +#else +#define ali_sound_dma_hack(dev, mask) (0) +#endif /* signal32.c */ void do_sigreturn32(struct pt_regs *regs); diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index bcfec6a85d23..5ed43828e078 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c @@ -956,51 +956,35 @@ void arch_teardown_msi_irq(unsigned int irq) } #endif /* !(CONFIG_PCI_MSI) */ -static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit) +/* ALI sound chips generate 31-bits of DMA, a special register + * determines what bit 31 is emitted as. + */ +int ali_sound_dma_hack(struct device *dev, u64 device_mask) { + struct iommu *iommu = dev->archdata.iommu; struct pci_dev *ali_isa_bridge; u8 val; - /* ALI sound chips generate 31-bits of DMA, a special register - * determines what bit 31 is emitted as. - */ + if (!dev_is_pci(dev)) + return 0; + + if (to_pci_dev(dev)->vendor != PCI_VENDOR_ID_AL || + to_pci_dev(dev)->device != PCI_DEVICE_ID_AL_M5451 || + device_mask != 0x7fffffff) + return 0; + ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); pci_read_config_byte(ali_isa_bridge, 0x7e, &val); - if (set_bit) + if (iommu->dma_addr_mask & 0x80000000) val |= 0x01; else val &= ~0x01; pci_write_config_byte(ali_isa_bridge, 0x7e, val); pci_dev_put(ali_isa_bridge); -} - -int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask) -{ - u64 dma_addr_mask; - - if (pdev == NULL) { - dma_addr_mask = 0xffffffff; - } else { - struct iommu *iommu = pdev->dev.archdata.iommu; - - dma_addr_mask = iommu->dma_addr_mask; - - if (pdev->vendor == PCI_VENDOR_ID_AL && - pdev->device == PCI_DEVICE_ID_AL_M5451 && - device_mask == 0x7fffffff) { - ali_sound_dma_hack(pdev, - (dma_addr_mask & 0x80000000) != 0); - return 1; - } - } - - if (device_mask >= (1UL << 32UL)) - return 0; - - return (device_mask & dma_addr_mask) == dma_addr_mask; + return 1; } void pci_resource_to_user(const struct pci_dev *pdev, int bar, diff --git a/arch/sparc/kernel/pci_fire.c b/arch/sparc/kernel/pci_fire.c index be71ae086622..0ca08d455e80 100644 --- a/arch/sparc/kernel/pci_fire.c +++ b/arch/sparc/kernel/pci_fire.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -416,7 +417,7 @@ static int pci_fire_pbm_init(struct pci_pbm_info *pbm, struct device_node *dp = op->dev.of_node; int err; - pbm->numa_node = -1; + pbm->numa_node = NUMA_NO_NODE; pbm->pci_ops = &sun4u_pci_ops; pbm->config_space_reg_bits = 12; diff --git a/arch/sparc/kernel/pci_schizo.c b/arch/sparc/kernel/pci_schizo.c index 934b97c72f7c..421aba00e6b0 100644 --- a/arch/sparc/kernel/pci_schizo.c +++ b/arch/sparc/kernel/pci_schizo.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -1347,7 +1348,7 @@ static int schizo_pbm_init(struct pci_pbm_info *pbm, pbm->next = pci_pbm_root; pci_pbm_root = pbm; - pbm->numa_node = -1; + pbm->numa_node = NUMA_NO_NODE; pbm->pci_ops = &sun4u_pci_ops; pbm->config_space_reg_bits = 8; diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index fa0e42b4cbfb..a8af6023c126 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c @@ -92,7 +92,7 @@ static long iommu_batch_flush(struct iommu_batch *p, u64 mask) prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE); while (npages != 0) { - if (mask <= DMA_BIT_MASK(32)) { + if (mask <= DMA_BIT_MASK(32) || !pbm->iommu->atu) { num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), npages, @@ -208,7 +208,7 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, atu = iommu->atu; mask = dev->coherent_dma_mask; - if (mask <= DMA_BIT_MASK(32)) + if (mask <= DMA_BIT_MASK(32) || !atu) tbl = &iommu->tbl; else tbl = &atu->tbl; @@ -674,18 +674,12 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, static int dma_4v_supported(struct device *dev, u64 device_mask) { struct iommu *iommu = dev->archdata.iommu; - u64 dma_addr_mask = iommu->dma_addr_mask; - - if (device_mask > DMA_BIT_MASK(32)) { - if (iommu->atu) - dma_addr_mask = iommu->atu->dma_addr_mask; - else - return 0; - } - if ((device_mask & dma_addr_mask) == dma_addr_mask) + if (ali_sound_dma_hack(dev, device_mask)) return 1; - return pci64_dma_supported(to_pci_dev(dev), device_mask); + if (device_mask < iommu->dma_addr_mask) + return 0; + return 1; } static const struct dma_map_ops sun4v_dma_ops = { diff --git a/arch/sparc/kernel/prom_32.c b/arch/sparc/kernel/prom_32.c index 42d7f2a7da6d..869b16c96157 100644 --- a/arch/sparc/kernel/prom_32.c +++ b/arch/sparc/kernel/prom_32.c @@ -32,9 +32,9 @@ void * __init prom_early_alloc(unsigned long size) { void *ret; - ret = memblock_alloc_from(size, SMP_CACHE_BYTES, 0UL); - if (ret != NULL) - memset(ret, 0, size); + ret = memblock_alloc(size, SMP_CACHE_BYTES); + if (!ret) + panic("%s: Failed to allocate %lu bytes\n", __func__, size); prom_early_allocated += size; diff --git a/arch/sparc/kernel/prom_64.c b/arch/sparc/kernel/prom_64.c index e897a4ded3a1..c50ff1fee0e6 100644 --- a/arch/sparc/kernel/prom_64.c +++ b/arch/sparc/kernel/prom_64.c @@ -34,16 +34,13 @@ void * __init prom_early_alloc(unsigned long size) { - unsigned long paddr = memblock_phys_alloc(size, SMP_CACHE_BYTES); - void *ret; + void *ret = memblock_alloc(size, SMP_CACHE_BYTES); - if (!paddr) { + if (!ret) { prom_printf("prom_early_alloc(%lu) failed\n", size); prom_halt(); } - ret = __va(paddr); - memset(ret, 0, size); prom_early_allocated += size; return ret; diff --git a/arch/sparc/kernel/psycho_common.c b/arch/sparc/kernel/psycho_common.c index 81aa91e5c0e6..e90bcb6bad7f 100644 --- a/arch/sparc/kernel/psycho_common.c +++ b/arch/sparc/kernel/psycho_common.c @@ -5,6 +5,7 @@ */ #include #include +#include #include @@ -454,7 +455,7 @@ void psycho_pbm_init_common(struct pci_pbm_info *pbm, struct platform_device *op struct device_node *dp = op->dev.of_node; pbm->name = dp->full_name; - pbm->numa_node = -1; + pbm->numa_node = NUMA_NO_NODE; pbm->chip_type = chip_type; pbm->chip_version = of_getintprop_default(dp, "version#", 0); pbm->chip_revision = of_getintprop_default(dp, "module-revision#", 0); diff --git a/arch/sparc/kernel/sbus.c b/arch/sparc/kernel/sbus.c index 41c5deb581b8..32141e1006c4 100644 --- a/arch/sparc/kernel/sbus.c +++ b/arch/sparc/kernel/sbus.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -561,7 +562,7 @@ static void __init sbus_iommu_init(struct platform_device *op) op->dev.archdata.iommu = iommu; op->dev.archdata.stc = strbuf; - op->dev.archdata.numa_node = -1; + op->dev.archdata.numa_node = NUMA_NO_NODE; reg_base = regs + SYSIO_IOMMUREG_BASE; iommu->iommu_control = reg_base + IOMMU_CONTROL; diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index 51c4d12c0853..fd2182a5c32d 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c @@ -624,8 +624,14 @@ void __init alloc_irqstack_bootmem(void) softirq_stack[i] = memblock_alloc_node(THREAD_SIZE, THREAD_SIZE, node); + if (!softirq_stack[i]) + panic("%s: Failed to allocate %lu bytes align=%lx nid=%d\n", + __func__, THREAD_SIZE, THREAD_SIZE, node); hardirq_stack[i] = memblock_alloc_node(THREAD_SIZE, THREAD_SIZE, node); + if (!hardirq_stack[i]) + panic("%s: Failed to allocate %lu bytes align=%lx nid=%d\n", + __func__, THREAD_SIZE, THREAD_SIZE, node); } } diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index f45d876983f1..a8275fea4b70 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -1628,6 +1628,8 @@ static void __init pcpu_populate_pte(unsigned long addr) pud_t *new; new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); + if (!new) + goto err_alloc; pgd_populate(&init_mm, pgd, new); } @@ -1636,6 +1638,8 @@ static void __init pcpu_populate_pte(unsigned long addr) pmd_t *new; new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); + if (!new) + goto err_alloc; pud_populate(&init_mm, pud, new); } @@ -1644,8 +1648,16 @@ static void __init pcpu_populate_pte(unsigned long addr) pte_t *new; new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); + if (!new) + goto err_alloc; pmd_populate_kernel(&init_mm, pmd, new); } + + return; + +err_alloc: + panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n", + __func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); } void __init setup_per_cpu_areas(void) diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index 274ed0b9b3e0..9825ca6a6020 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -28,8 +28,9 @@ #include #include #include - +#include #include + #include #include @@ -344,7 +345,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second goto out; case SEMTIMEDOP: err = sys_semtimedop(first, ptr, (unsigned int)second, - (const struct timespec __user *) + (const struct __kernel_timespec __user *) (unsigned long) fifth); goto out; case SEMGET: @@ -544,6 +545,62 @@ out_unlock: return err; } +SYSCALL_DEFINE1(sparc_adjtimex, struct timex __user *, txc_p) +{ + struct timex txc; /* Local copy of parameter */ + struct __kernel_timex *kt = (void *)&txc; + int ret; + + /* Copy the user data space into the kernel copy + * structure. But bear in mind that the structures + * may change + */ + if (copy_from_user(&txc, txc_p, sizeof(struct timex))) + return -EFAULT; + + /* + * override for sparc64 specific timeval type: tv_usec + * is 32 bit wide instead of 64-bit in __kernel_timex + */ + kt->time.tv_usec = txc.time.tv_usec; + ret = do_adjtimex(kt); + txc.time.tv_usec = kt->time.tv_usec; + + return copy_to_user(txc_p, &txc, sizeof(struct timex)) ? -EFAULT : ret; +} + +SYSCALL_DEFINE2(sparc_clock_adjtime, const clockid_t, which_clock,struct timex __user *, txc_p) +{ + struct timex txc; /* Local copy of parameter */ + struct __kernel_timex *kt = (void *)&txc; + int ret; + + if (!IS_ENABLED(CONFIG_POSIX_TIMERS)) { + pr_err_once("process %d (%s) attempted a POSIX timer syscall " + "while CONFIG_POSIX_TIMERS is not set\n", + current->pid, current->comm); + + return -ENOSYS; + } + + /* Copy the user data space into the kernel copy + * structure. But bear in mind that the structures + * may change + */ + if (copy_from_user(&txc, txc_p, sizeof(struct timex))) + return -EFAULT; + + /* + * override for sparc64 specific timeval type: tv_usec + * is 32 bit wide instead of 64-bit in __kernel_timex + */ + kt->time.tv_usec = txc.time.tv_usec; + ret = do_clock_adjtime(which_clock, kt); + txc.time.tv_usec = kt->time.tv_usec; + + return copy_to_user(txc_p, &txc, sizeof(struct timex)) ? -EFAULT : ret; +} + SYSCALL_DEFINE5(utrap_install, utrap_entry_t, type, utrap_handler_t, new_p, utrap_handler_t, new_d, utrap_handler_t __user *, old_p, diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl index c8c77c05ea97..b9a5a04b2d2c 100644 --- a/arch/sparc/kernel/syscalls/syscall.tbl +++ b/arch/sparc/kernel/syscalls/syscall.tbl @@ -44,7 +44,8 @@ 28 common sigaltstack sys_sigaltstack compat_sys_sigaltstack 29 32 pause sys_pause 29 64 pause sys_nis_syscall -30 common utime sys_utime compat_sys_utime +30 32 utime sys_utime32 +30 64 utime sys_utime 31 32 lchown32 sys_lchown 32 32 fchown32 sys_fchown 33 common access sys_access @@ -128,7 +129,8 @@ 102 common rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction 103 common rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask 104 common rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending -105 common rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait +105 32 rt_sigtimedwait sys_rt_sigtimedwait_time32 compat_sys_rt_sigtimedwait_time32 +105 64 rt_sigtimedwait sys_rt_sigtimedwait 106 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo 107 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend 108 32 setresuid32 sys_setresuid @@ -168,11 +170,13 @@ 135 common socketpair sys_socketpair 136 common mkdir sys_mkdir 137 common rmdir sys_rmdir -138 common utimes sys_utimes compat_sys_utimes +138 32 utimes sys_utimes_time32 +138 64 utimes sys_utimes 139 common stat64 sys_stat64 compat_sys_stat64 140 common sendfile64 sys_sendfile64 141 common getpeername sys_getpeername -142 common futex sys_futex compat_sys_futex +142 32 futex sys_futex_time32 +142 64 futex sys_futex 143 common gettid sys_gettid 144 common getrlimit sys_getrlimit compat_sys_getrlimit 145 common setrlimit sys_setrlimit compat_sys_setrlimit @@ -258,7 +262,8 @@ 216 64 sigreturn sys_nis_syscall 217 common clone sys_clone 218 common ioprio_get sys_ioprio_get -219 common adjtimex sys_adjtimex compat_sys_adjtimex +219 32 adjtimex sys_adjtimex_time32 +219 64 adjtimex sys_sparc_adjtimex 220 32 sigprocmask sys_sigprocmask compat_sys_sigprocmask 220 64 sigprocmask sys_nis_syscall 221 common create_module sys_ni_syscall @@ -271,9 +276,10 @@ 228 common setfsuid sys_setfsuid16 229 common setfsgid sys_setfsgid16 230 common _newselect sys_select compat_sys_select -231 32 time sys_time compat_sys_time +231 32 time sys_time32 232 common splice sys_splice -233 common stime sys_stime compat_sys_stime +233 32 stime sys_stime32 +233 64 stime sys_stime 234 common statfs64 sys_statfs64 compat_sys_statfs64 235 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 236 common _llseek sys_llseek @@ -288,8 +294,10 @@ 245 common sched_yield sys_sched_yield 246 common sched_get_priority_max sys_sched_get_priority_max 247 common sched_get_priority_min sys_sched_get_priority_min -248 common sched_rr_get_interval sys_sched_rr_get_interval compat_sys_sched_rr_get_interval -249 common nanosleep sys_nanosleep compat_sys_nanosleep +248 32 sched_rr_get_interval sys_sched_rr_get_interval_time32 +248 64 sched_rr_get_interval sys_sched_rr_get_interval +249 32 nanosleep sys_nanosleep_time32 +249 64 nanosleep sys_nanosleep 250 32 mremap sys_mremap 250 64 mremap sys_64_mremap 251 common _sysctl sys_sysctl compat_sys_sysctl @@ -298,14 +306,20 @@ 254 32 nfsservctl sys_ni_syscall sys_nis_syscall 254 64 nfsservctl sys_nis_syscall 255 common sync_file_range sys_sync_file_range compat_sys_sync_file_range -256 common clock_settime sys_clock_settime compat_sys_clock_settime -257 common clock_gettime sys_clock_gettime compat_sys_clock_gettime -258 common clock_getres sys_clock_getres compat_sys_clock_getres -259 common clock_nanosleep sys_clock_nanosleep compat_sys_clock_nanosleep +256 32 clock_settime sys_clock_settime32 +256 64 clock_settime sys_clock_settime +257 32 clock_gettime sys_clock_gettime32 +257 64 clock_gettime sys_clock_gettime +258 32 clock_getres sys_clock_getres_time32 +258 64 clock_getres sys_clock_getres +259 32 clock_nanosleep sys_clock_nanosleep_time32 +259 64 clock_nanosleep sys_clock_nanosleep 260 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity 261 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity -262 common timer_settime sys_timer_settime compat_sys_timer_settime -263 common timer_gettime sys_timer_gettime compat_sys_timer_gettime +262 32 timer_settime sys_timer_settime32 +262 64 timer_settime sys_timer_settime +263 32 timer_gettime sys_timer_gettime32 +263 64 timer_gettime sys_timer_gettime 264 common timer_getoverrun sys_timer_getoverrun 265 common timer_delete sys_timer_delete 266 common timer_create sys_timer_create compat_sys_timer_create @@ -315,11 +329,14 @@ 269 common io_destroy sys_io_destroy 270 common io_submit sys_io_submit compat_sys_io_submit 271 common io_cancel sys_io_cancel -272 common io_getevents sys_io_getevents compat_sys_io_getevents +272 32 io_getevents sys_io_getevents_time32 +272 64 io_getevents sys_io_getevents 273 common mq_open sys_mq_open compat_sys_mq_open 274 common mq_unlink sys_mq_unlink -275 common mq_timedsend sys_mq_timedsend compat_sys_mq_timedsend -276 common mq_timedreceive sys_mq_timedreceive compat_sys_mq_timedreceive +275 32 mq_timedsend sys_mq_timedsend_time32 +275 64 mq_timedsend sys_mq_timedsend +276 32 mq_timedreceive sys_mq_timedreceive_time32 +276 64 mq_timedreceive sys_mq_timedreceive 277 common mq_notify sys_mq_notify compat_sys_mq_notify 278 common mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr 279 common waitid sys_waitid compat_sys_waitid @@ -331,7 +348,8 @@ 285 common mkdirat sys_mkdirat 286 common mknodat sys_mknodat 287 common fchownat sys_fchownat -288 common futimesat sys_futimesat compat_sys_futimesat +288 32 futimesat sys_futimesat_time32 +288 64 futimesat sys_futimesat 289 common fstatat64 sys_fstatat64 compat_sys_fstatat64 290 common unlinkat sys_unlinkat 291 common renameat sys_renameat @@ -340,8 +358,10 @@ 294 common readlinkat sys_readlinkat 295 common fchmodat sys_fchmodat 296 common faccessat sys_faccessat -297 common pselect6 sys_pselect6 compat_sys_pselect6 -298 common ppoll sys_ppoll compat_sys_ppoll +297 32 pselect6 sys_pselect6_time32 compat_sys_pselect6_time32 +297 64 pselect6 sys_pselect6 +298 32 ppoll sys_ppoll_time32 compat_sys_ppoll_time32 +298 64 ppoll sys_ppoll 299 common unshare sys_unshare 300 common set_robust_list sys_set_robust_list compat_sys_set_robust_list 301 common get_robust_list sys_get_robust_list compat_sys_get_robust_list @@ -353,13 +373,16 @@ 307 common move_pages sys_move_pages compat_sys_move_pages 308 common getcpu sys_getcpu 309 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait -310 common utimensat sys_utimensat compat_sys_utimensat +310 32 utimensat sys_utimensat_time32 +310 64 utimensat sys_utimensat 311 common signalfd sys_signalfd compat_sys_signalfd 312 common timerfd_create sys_timerfd_create 313 common eventfd sys_eventfd 314 common fallocate sys_fallocate compat_sys_fallocate -315 common timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime -316 common timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime +315 32 timerfd_settime sys_timerfd_settime32 +315 64 timerfd_settime sys_timerfd_settime +316 32 timerfd_gettime sys_timerfd_gettime32 +316 64 timerfd_gettime sys_timerfd_gettime 317 common signalfd4 sys_signalfd4 compat_sys_signalfd4 318 common eventfd2 sys_eventfd2 319 common epoll_create1 sys_epoll_create1 @@ -371,13 +394,15 @@ 325 common pwritev sys_pwritev compat_sys_pwritev 326 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo 327 common perf_event_open sys_perf_event_open -328 common recvmmsg sys_recvmmsg compat_sys_recvmmsg +328 32 recvmmsg sys_recvmmsg_time32 compat_sys_recvmmsg_time32 +328 64 recvmmsg sys_recvmmsg 329 common fanotify_init sys_fanotify_init 330 common fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark 331 common prlimit64 sys_prlimit64 332 common name_to_handle_at sys_name_to_handle_at 333 common open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at -334 common clock_adjtime sys_clock_adjtime compat_sys_clock_adjtime +334 32 clock_adjtime sys_clock_adjtime32 +334 64 clock_adjtime sys_sparc_clock_adjtime 335 common syncfs sys_syncfs 336 common sendmmsg sys_sendmmsg compat_sys_sendmmsg 337 common setns sys_setns @@ -406,4 +431,41 @@ 358 common preadv2 sys_preadv2 compat_sys_preadv2 359 common pwritev2 sys_pwritev2 compat_sys_pwritev2 360 common statx sys_statx -361 common io_pgetevents sys_io_pgetevents compat_sys_io_pgetevents +361 32 io_pgetevents sys_io_pgetevents_time32 compat_sys_io_pgetevents +361 64 io_pgetevents sys_io_pgetevents +362 common pkey_mprotect sys_pkey_mprotect +363 common pkey_alloc sys_pkey_alloc +364 common pkey_free sys_pkey_free +365 common rseq sys_rseq +# room for arch specific syscalls +392 64 semtimedop sys_semtimedop +393 common semget sys_semget +394 common semctl sys_semctl compat_sys_semctl +395 common shmget sys_shmget +396 common shmctl sys_shmctl compat_sys_shmctl +397 common shmat sys_shmat compat_sys_shmat +398 common shmdt sys_shmdt +399 common msgget sys_msgget +400 common msgsnd sys_msgsnd compat_sys_msgsnd +401 common msgrcv sys_msgrcv compat_sys_msgrcv +402 common msgctl sys_msgctl compat_sys_msgctl +403 32 clock_gettime64 sys_clock_gettime sys_clock_gettime +404 32 clock_settime64 sys_clock_settime sys_clock_settime +405 32 clock_adjtime64 sys_clock_adjtime sys_clock_adjtime +406 32 clock_getres_time64 sys_clock_getres sys_clock_getres +407 32 clock_nanosleep_time64 sys_clock_nanosleep sys_clock_nanosleep +408 32 timer_gettime64 sys_timer_gettime sys_timer_gettime +409 32 timer_settime64 sys_timer_settime sys_timer_settime +410 32 timerfd_gettime64 sys_timerfd_gettime sys_timerfd_gettime +411 32 timerfd_settime64 sys_timerfd_settime sys_timerfd_settime +412 32 utimensat_time64 sys_utimensat sys_utimensat +413 32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64 +414 32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64 +416 32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents +417 32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64 +418 32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend +419 32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive +420 32 semtimedop_time64 sys_semtimedop sys_semtimedop +421 32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64 +422 32 futex_time64 sys_futex sys_futex +423 32 sched_rr_get_interval_time64 sys_sched_rr_get_interval sys_sched_rr_get_interval diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index d900952bfc5f..a8ff29821bdb 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c @@ -264,7 +264,7 @@ void __init mem_init(void) i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5); i += 1; sparc_valid_addr_bitmap = (unsigned long *) - memblock_alloc_from(i << 2, SMP_CACHE_BYTES, 0UL); + memblock_alloc(i << 2, SMP_CACHE_BYTES); if (sparc_valid_addr_bitmap == NULL) { prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n"); diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index b4221d3727d0..f2d70ff7a284 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -976,13 +976,13 @@ static u64 __init memblock_nid_range_sun4u(u64 start, u64 end, int *nid) { int prev_nid, new_nid; - prev_nid = -1; + prev_nid = NUMA_NO_NODE; for ( ; start < end; start += PAGE_SIZE) { for (new_nid = 0; new_nid < num_node_masks; new_nid++) { struct node_mem_mask *p = &node_masks[new_nid]; if ((start & p->mask) == p->match) { - if (prev_nid == -1) + if (prev_nid == NUMA_NO_NODE) prev_nid = new_nid; break; } @@ -1089,16 +1089,13 @@ static void __init allocate_node_data(int nid) struct pglist_data *p; unsigned long start_pfn, end_pfn; #ifdef CONFIG_NEED_MULTIPLE_NODES - unsigned long paddr; - paddr = memblock_phys_alloc_try_nid(sizeof(struct pglist_data), - SMP_CACHE_BYTES, nid); - if (!paddr) { + NODE_DATA(nid) = memblock_alloc_node(sizeof(struct pglist_data), + SMP_CACHE_BYTES, nid); + if (!NODE_DATA(nid)) { prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid); prom_halt(); } - NODE_DATA(nid) = __va(paddr); - memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); NODE_DATA(nid)->node_id = nid; #endif @@ -1208,7 +1205,7 @@ int of_node_to_nid(struct device_node *dp) md = mdesc_grab(); count = 0; - nid = -1; + nid = NUMA_NO_NODE; mdesc_for_each_node_by_name(md, grp, "group") { if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) { nid = count; @@ -1812,6 +1809,8 @@ static unsigned long __ref kernel_map_range(unsigned long pstart, new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); + if (!new) + goto err_alloc; alloc_bytes += PAGE_SIZE; pgd_populate(&init_mm, pgd, new); } @@ -1825,6 +1824,8 @@ static unsigned long __ref kernel_map_range(unsigned long pstart, } new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); + if (!new) + goto err_alloc; alloc_bytes += PAGE_SIZE; pud_populate(&init_mm, pud, new); } @@ -1839,6 +1840,8 @@ static unsigned long __ref kernel_map_range(unsigned long pstart, } new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); + if (!new) + goto err_alloc; alloc_bytes += PAGE_SIZE; pmd_populate_kernel(&init_mm, pmd, new); } @@ -1858,6 +1861,11 @@ static unsigned long __ref kernel_map_range(unsigned long pstart, } return alloc_bytes; + +err_alloc: + panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n", + __func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); + return -ENOMEM; } static void __init flush_all_kernel_tsbs(void) diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index b609362e846f..aaebbc00d262 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -303,13 +303,19 @@ static void __init srmmu_nocache_init(void) bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT; - srmmu_nocache_pool = memblock_alloc_from(srmmu_nocache_size, - SRMMU_NOCACHE_ALIGN_MAX, 0UL); + srmmu_nocache_pool = memblock_alloc(srmmu_nocache_size, + SRMMU_NOCACHE_ALIGN_MAX); + if (!srmmu_nocache_pool) + panic("%s: Failed to allocate %lu bytes align=0x%x\n", + __func__, srmmu_nocache_size, SRMMU_NOCACHE_ALIGN_MAX); memset(srmmu_nocache_pool, 0, srmmu_nocache_size); srmmu_nocache_bitmap = - memblock_alloc_from(BITS_TO_LONGS(bitmap_bits) * sizeof(long), - SMP_CACHE_BYTES, 0UL); + memblock_alloc(BITS_TO_LONGS(bitmap_bits) * sizeof(long), + SMP_CACHE_BYTES); + if (!srmmu_nocache_bitmap) + panic("%s: Failed to allocate %zu bytes\n", __func__, + BITS_TO_LONGS(bitmap_bits) * sizeof(long)); bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits); srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); @@ -467,7 +473,9 @@ static void __init sparc_context_init(int numctx) unsigned long size; size = numctx * sizeof(struct ctx_list); - ctx_list_pool = memblock_alloc_from(size, SMP_CACHE_BYTES, 0UL); + ctx_list_pool = memblock_alloc(size, SMP_CACHE_BYTES); + if (!ctx_list_pool) + panic("%s: Failed to allocate %lu bytes\n", __func__, size); for (ctx = 0; ctx < numctx; ctx++) { struct ctx_list *clist; diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c index d80cfb1d9430..6e5be5fb4143 100644 --- a/arch/um/drivers/net_kern.c +++ b/arch/um/drivers/net_kern.c @@ -649,6 +649,9 @@ static int __init eth_setup(char *str) } new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES); + if (!new) + panic("%s: Failed to allocate %zu bytes\n", __func__, + sizeof(*new)); INIT_LIST_HEAD(&new->list); new->index = n; diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c index 046fa9ea0ccc..596e7056f376 100644 --- a/arch/um/drivers/vector_kern.c +++ b/arch/um/drivers/vector_kern.c @@ -1576,6 +1576,9 @@ static int __init vector_setup(char *str) return 1; } new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES); + if (!new) + panic("%s: Failed to allocate %zu bytes\n", __func__, + sizeof(*new)); INIT_LIST_HEAD(&new->list); new->unit = n; new->arguments = str; diff --git a/arch/um/include/asm/a.out-core.h b/arch/um/include/asm/a.out-core.h deleted file mode 100644 index 995643b18309..000000000000 --- a/arch/um/include/asm/a.out-core.h +++ /dev/null @@ -1,27 +0,0 @@ -/* a.out coredump register dumper - * - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ - -#ifndef __UM_A_OUT_CORE_H -#define __UM_A_OUT_CORE_H - -#ifdef __KERNEL__ - -#include - -/* - * fill in the user structure for an a.out core dump - */ -static inline void aout_dump_thread(struct pt_regs *regs, struct user *u) -{ -} - -#endif /* __KERNEL__ */ -#endif /* __UM_A_OUT_CORE_H */ diff --git a/arch/um/kernel/initrd.c b/arch/um/kernel/initrd.c index ce169ea87e61..1dcd310cb34d 100644 --- a/arch/um/kernel/initrd.c +++ b/arch/um/kernel/initrd.c @@ -37,6 +37,8 @@ int __init read_initrd(void) } area = memblock_alloc(size, SMP_CACHE_BYTES); + if (!area) + panic("%s: Failed to allocate %llu bytes\n", __func__, size); if (load_initrd(initrd, area, size) == -1) return 0; diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c index 799b571a8f88..99aa11bf53d1 100644 --- a/arch/um/kernel/mem.c +++ b/arch/um/kernel/mem.c @@ -66,6 +66,10 @@ static void __init one_page_table_init(pmd_t *pmd) if (pmd_none(*pmd)) { pte_t *pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); + if (!pte) + panic("%s: Failed to allocate %lu bytes align=%lx\n", + __func__, PAGE_SIZE, PAGE_SIZE); + set_pmd(pmd, __pmd(_KERNPG_TABLE + (unsigned long) __pa(pte))); if (pte != pte_offset_kernel(pmd, 0)) @@ -77,6 +81,10 @@ static void __init one_md_table_init(pud_t *pud) { #ifdef CONFIG_3_LEVEL_PGTABLES pmd_t *pmd_table = (pmd_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); + if (!pmd_table) + panic("%s: Failed to allocate %lu bytes align=%lx\n", + __func__, PAGE_SIZE, PAGE_SIZE); + set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table))); if (pmd_table != pmd_offset(pud, 0)) BUG(); @@ -126,6 +134,10 @@ static void __init fixaddr_user_init( void) fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir); v = (unsigned long) memblock_alloc_low(size, PAGE_SIZE); + if (!v) + panic("%s: Failed to allocate %lu bytes align=%lx\n", + __func__, size, PAGE_SIZE); + memcpy((void *) v , (void *) FIXADDR_USER_START, size); p = __pa(v); for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE, @@ -146,6 +158,10 @@ void __init paging_init(void) empty_zero_page = (unsigned long *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); + if (!empty_zero_page) + panic("%s: Failed to allocate %lu bytes align=%lx\n", + __func__, PAGE_SIZE, PAGE_SIZE); + for (i = 0; i < ARRAY_SIZE(zones_size); i++) zones_size[i] = 0; diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig index c3a41bfe161b..817d82608712 100644 --- a/arch/unicore32/Kconfig +++ b/arch/unicore32/Kconfig @@ -1,10 +1,10 @@ # SPDX-License-Identifier: GPL-2.0 config UNICORE32 def_bool y + select ARCH_32BIT_OFF_T select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO - select HAVE_GENERIC_DMA_COHERENT select HAVE_KERNEL_GZIP select HAVE_KERNEL_BZIP2 select GENERIC_ATOMIC64 diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild index 1372553dc0a9..1d1544b6ca74 100644 --- a/arch/unicore32/include/asm/Kbuild +++ b/arch/unicore32/include/asm/Kbuild @@ -28,6 +28,7 @@ generic-y += preempt.h generic-y += sections.h generic-y += segment.h generic-y += serial.h +generic-y += shmparam.h generic-y += sizes.h generic-y += syscalls.h generic-y += topology.h diff --git a/arch/unicore32/include/uapi/asm/Kbuild b/arch/unicore32/include/uapi/asm/Kbuild index 6c6f6301012e..0febf1a07c30 100644 --- a/arch/unicore32/include/uapi/asm/Kbuild +++ b/arch/unicore32/include/uapi/asm/Kbuild @@ -1,5 +1,4 @@ include include/uapi/asm-generic/Kbuild.asm generic-y += kvm_para.h -generic-y += shmparam.h generic-y += ucontext.h diff --git a/arch/unicore32/include/uapi/asm/unistd.h b/arch/unicore32/include/uapi/asm/unistd.h index 1e8fe5941b8a..54a7378a70b1 100644 --- a/arch/unicore32/include/uapi/asm/unistd.h +++ b/arch/unicore32/include/uapi/asm/unistd.h @@ -12,8 +12,10 @@ */ #define __ARCH_WANT_RENAMEAT +#define __ARCH_WANT_SET_GET_RLIMIT +#define __ARCH_WANT_STAT64 +#define __ARCH_WANT_TIME32_SYSCALLS /* Use the standard ABI for syscalls. */ #include -#define __ARCH_WANT_STAT64 #define __ARCH_WANT_SYS_CLONE diff --git a/arch/unicore32/kernel/setup.c b/arch/unicore32/kernel/setup.c index 4b0cb68c355a..d3239cf2e837 100644 --- a/arch/unicore32/kernel/setup.c +++ b/arch/unicore32/kernel/setup.c @@ -207,6 +207,10 @@ request_standard_resources(struct meminfo *mi) continue; res = memblock_alloc_low(sizeof(*res), SMP_CACHE_BYTES); + if (!res) + panic("%s: Failed to allocate %zu bytes align=%x\n", + __func__, sizeof(*res), SMP_CACHE_BYTES); + res->name = "System RAM"; res->start = mi->bank[i].start; res->end = mi->bank[i].start + mi->bank[i].size - 1; diff --git a/arch/unicore32/mm/init.c b/arch/unicore32/mm/init.c index 85ef2c624090..74b6a2e29809 100644 --- a/arch/unicore32/mm/init.c +++ b/arch/unicore32/mm/init.c @@ -274,30 +274,6 @@ void __init mem_init(void) memblock_free_all(); mem_init_print_info(NULL); - printk(KERN_NOTICE "Virtual kernel memory layout:\n" - " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" - " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" - " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" - " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" - " .init : 0x%p" " - 0x%p" " (%4d kB)\n" - " .text : 0x%p" " - 0x%p" " (%4d kB)\n" - " .data : 0x%p" " - 0x%p" " (%4d kB)\n", - - VECTORS_BASE, VECTORS_BASE + PAGE_SIZE, - DIV_ROUND_UP(PAGE_SIZE, SZ_1K), - VMALLOC_START, VMALLOC_END, - DIV_ROUND_UP((VMALLOC_END - VMALLOC_START), SZ_1M), - PAGE_OFFSET, (unsigned long)high_memory, - DIV_ROUND_UP(((unsigned long)high_memory - PAGE_OFFSET), SZ_1M), - MODULES_VADDR, MODULES_END, - DIV_ROUND_UP((MODULES_END - MODULES_VADDR), SZ_1M), - - __init_begin, __init_end, - DIV_ROUND_UP((__init_end - __init_begin), SZ_1K), - _stext, _etext, - DIV_ROUND_UP((_etext - _stext), SZ_1K), - _sdata, _edata, - DIV_ROUND_UP((_edata - _sdata), SZ_1K)); BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); BUG_ON(TASK_SIZE > MODULES_VADDR); diff --git a/arch/unicore32/mm/mmu.c b/arch/unicore32/mm/mmu.c index 040a8c279761..aa2060beb408 100644 --- a/arch/unicore32/mm/mmu.c +++ b/arch/unicore32/mm/mmu.c @@ -141,18 +141,17 @@ static void __init build_mem_type_table(void) #define vectors_base() (vectors_high() ? 0xffff0000 : 0) -static void __init *early_alloc(unsigned long sz) -{ - void *ptr = __va(memblock_phys_alloc(sz, sz)); - memset(ptr, 0, sz); - return ptr; -} - static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot) { if (pmd_none(*pmd)) { - pte_t *pte = early_alloc(PTRS_PER_PTE * sizeof(pte_t)); + size_t size = PTRS_PER_PTE * sizeof(pte_t); + pte_t *pte = memblock_alloc(size, size); + + if (!pte) + panic("%s: Failed to allocate %zu bytes align=%zx\n", + __func__, size, size); + __pmd_populate(pmd, __pa(pte) | prot); } BUG_ON(pmd_bad(*pmd)); @@ -354,7 +353,10 @@ static void __init devicemaps_init(void) /* * Allocate the vector page early. */ - vectors = early_alloc(PAGE_SIZE); + vectors = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + if (!vectors) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", + __func__, PAGE_SIZE, PAGE_SIZE); for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) pmd_clear(pmd_off_k(addr)); @@ -431,7 +433,10 @@ void __init paging_init(void) top_pmd = pmd_off_k(0xffff0000); /* allocate the zero page. */ - zero_page = early_alloc(PAGE_SIZE); + zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + if (!zero_page) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", + __func__, PAGE_SIZE, PAGE_SIZE); bootmem_init(); diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 26387c7bf305..c1f9b3cf437c 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -14,8 +14,6 @@ config X86_32 select ARCH_WANT_IPC_PARSE_VERSION select CLKSRC_I8253 select CLONE_BACKWARDS - select HAVE_AOUT - select HAVE_GENERIC_DMA_COHERENT select MODULES_USE_ELF_REL select OLD_SIGACTION @@ -47,6 +45,7 @@ config X86 select ACPI_LEGACY_TABLES_LOOKUP if ACPI select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI select ANON_INODES + select ARCH_32BIT_OFF_T if X86_32 select ARCH_CLOCKSOURCE_DATA select ARCH_CLOCKSOURCE_INIT select ARCH_DISCARD_MEMBLOCK @@ -446,12 +445,12 @@ config RETPOLINE branches. Requires a compiler with -mindirect-branch=thunk-extern support for full protection. The kernel may run slower. -config X86_RESCTRL - bool "Resource Control support" +config X86_CPU_RESCTRL + bool "x86 CPU resource control support" depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD) select KERNFS help - Enable Resource Control support. + Enable x86 CPU resource control support. Provide support for the allocation and monitoring of system resources usage by the CPU. @@ -1510,6 +1509,7 @@ config AMD_MEM_ENCRYPT bool "AMD Secure Memory Encryption (SME) support" depends on X86_64 && CPU_SUP_AMD select DYNAMIC_PHYSICAL_MASK + select ARCH_USE_MEMREMAP_PROT ---help--- Say yes to enable support for the encryption of system memory. This requires an AMD processor that supports Secure Memory @@ -1529,10 +1529,6 @@ config AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT If set to N, then the encryption of system memory can be activated with the mem_encrypt=on command line option. -config ARCH_USE_MEMREMAP_PROT - def_bool y - depends on AMD_MEM_ENCRYPT - # Common NUMA Features config NUMA bool "Numa Memory Allocation and Scheduler Support" @@ -2843,6 +2839,7 @@ config IA32_EMULATION config IA32_AOUT tristate "IA32 a.out support" depends on IA32_EMULATION + depends on BROKEN ---help--- Support old a.out binaries in the 32bit emulation. diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index 0723dff17e6c..15d0fbe27872 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -40,16 +40,6 @@ config EARLY_PRINTK_DBGP with klogd/syslogd or the X server. You should normally say N here, unless you want to debug such a crash. You need usb debug device. -config EARLY_PRINTK_EFI - bool "Early printk via the EFI framebuffer" - depends on EFI && EARLY_PRINTK - select FONT_SUPPORT - ---help--- - Write kernel log output directly into the EFI framebuffer. - - This is useful for kernel debugging when your machine crashes very - early before the console code is initialized. - config EARLY_PRINTK_USB_XDBC bool "Early printk via the xHCI debug port" depends on EARLY_PRINTK && PCI diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 9c5a67d1b9c1..2d8b9d8ca4f8 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -187,7 +187,6 @@ cfi-sigframe := $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc, cfi-sections := $(call as-instr,.cfi_sections .debug_frame,-DCONFIG_AS_CFI_SECTIONS=1) # does binutils support specific instructions? -asinstr := $(call as-instr,fxsaveq (%rax),-DCONFIG_AS_FXSAVEQ=1) asinstr += $(call as-instr,pshufb %xmm0$(comma)%xmm0,-DCONFIG_AS_SSSE3=1) avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1) avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1) @@ -217,6 +216,11 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables # Avoid indirect branches in kernel to deal with Spectre ifdef CONFIG_RETPOLINE KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) + # Additionally, avoid generating expensive indirect jumps which + # are subject to retpolines for small number of switch cases. + # clang turns off jump table generation by default when under + # retpoline builds, however, gcc does not for x86. + KBUILD_CFLAGS += $(call cc-option,--param=case-values-threshold=20) endif archscripts: scripts_basic diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile index 9b5adae9cc40..e2839b5c246c 100644 --- a/arch/x86/boot/Makefile +++ b/arch/x86/boot/Makefile @@ -100,7 +100,7 @@ $(obj)/zoffset.h: $(obj)/compressed/vmlinux FORCE AFLAGS_header.o += -I$(objtree)/$(obj) $(obj)/header.o: $(obj)/zoffset.h -LDFLAGS_setup.elf := -T +LDFLAGS_setup.elf := -m elf_i386 -T $(obj)/setup.elf: $(src)/setup.ld $(SETUP_OBJS) FORCE $(call if_changed,ld) diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index f0515ac895a4..6b84afdd7538 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -84,6 +84,8 @@ ifdef CONFIG_X86_64 vmlinux-objs-y += $(obj)/pgtable_64.o endif +vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o + $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone vmlinux-objs-$(CONFIG_EFI_STUB) += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o \ diff --git a/arch/x86/boot/compressed/acpi.c b/arch/x86/boot/compressed/acpi.c new file mode 100644 index 000000000000..0ef4ad55b29b --- /dev/null +++ b/arch/x86/boot/compressed/acpi.c @@ -0,0 +1,338 @@ +// SPDX-License-Identifier: GPL-2.0 +#define BOOT_CTYPE_H +#include "misc.h" +#include "error.h" +#include "../string.h" + +#include +#include +#include + +/* + * Longest parameter of 'acpi=' is 'copy_dsdt', plus an extra '\0' + * for termination. + */ +#define MAX_ACPI_ARG_LENGTH 10 + +/* + * Immovable memory regions representation. Max amount of memory regions is + * MAX_NUMNODES*2. + */ +struct mem_vector immovable_mem[MAX_NUMNODES*2]; + +/* + * Max length of 64-bit hex address string is 19, prefix "0x" + 16 hex + * digits, and '\0' for termination. + */ +#define MAX_ADDR_LEN 19 + +static acpi_physical_address get_acpi_rsdp(void) +{ + acpi_physical_address addr = 0; + +#ifdef CONFIG_KEXEC + char val[MAX_ADDR_LEN] = { }; + int ret; + + ret = cmdline_find_option("acpi_rsdp", val, MAX_ADDR_LEN); + if (ret < 0) + return 0; + + if (kstrtoull(val, 16, &addr)) + return 0; +#endif + return addr; +} + +/* Search EFI system tables for RSDP. */ +static acpi_physical_address efi_get_rsdp_addr(void) +{ + acpi_physical_address rsdp_addr = 0; + +#ifdef CONFIG_EFI + unsigned long systab, systab_tables, config_tables; + unsigned int nr_tables; + struct efi_info *ei; + bool efi_64; + int size, i; + char *sig; + + ei = &boot_params->efi_info; + sig = (char *)&ei->efi_loader_signature; + + if (!strncmp(sig, EFI64_LOADER_SIGNATURE, 4)) { + efi_64 = true; + } else if (!strncmp(sig, EFI32_LOADER_SIGNATURE, 4)) { + efi_64 = false; + } else { + debug_putstr("Wrong EFI loader signature.\n"); + return 0; + } + + /* Get systab from boot params. */ +#ifdef CONFIG_X86_64 + systab = ei->efi_systab | ((__u64)ei->efi_systab_hi << 32); +#else + if (ei->efi_systab_hi || ei->efi_memmap_hi) { + debug_putstr("Error getting RSDP address: EFI system table located above 4GB.\n"); + return 0; + } + systab = ei->efi_systab; +#endif + if (!systab) + error("EFI system table not found."); + + /* Handle EFI bitness properly */ + if (efi_64) { + efi_system_table_64_t *stbl = (efi_system_table_64_t *)systab; + + config_tables = stbl->tables; + nr_tables = stbl->nr_tables; + size = sizeof(efi_config_table_64_t); + } else { + efi_system_table_32_t *stbl = (efi_system_table_32_t *)systab; + + config_tables = stbl->tables; + nr_tables = stbl->nr_tables; + size = sizeof(efi_config_table_32_t); + } + + if (!config_tables) + error("EFI config tables not found."); + + /* Get EFI tables from systab. */ + for (i = 0; i < nr_tables; i++) { + acpi_physical_address table; + efi_guid_t guid; + + config_tables += size; + + if (efi_64) { + efi_config_table_64_t *tbl = (efi_config_table_64_t *)config_tables; + + guid = tbl->guid; + table = tbl->table; + + if (!IS_ENABLED(CONFIG_X86_64) && table >> 32) { + debug_putstr("Error getting RSDP address: EFI config table located above 4GB.\n"); + return 0; + } + } else { + efi_config_table_32_t *tbl = (efi_config_table_32_t *)config_tables; + + guid = tbl->guid; + table = tbl->table; + } + + if (!(efi_guidcmp(guid, ACPI_TABLE_GUID))) + rsdp_addr = table; + else if (!(efi_guidcmp(guid, ACPI_20_TABLE_GUID))) + return table; + } +#endif + return rsdp_addr; +} + +static u8 compute_checksum(u8 *buffer, u32 length) +{ + u8 *end = buffer + length; + u8 sum = 0; + + while (buffer < end) + sum += *(buffer++); + + return sum; +} + +/* Search a block of memory for the RSDP signature. */ +static u8 *scan_mem_for_rsdp(u8 *start, u32 length) +{ + struct acpi_table_rsdp *rsdp; + u8 *address, *end; + + end = start + length; + + /* Search from given start address for the requested length */ + for (address = start; address < end; address += ACPI_RSDP_SCAN_STEP) { + /* + * Both RSDP signature and checksum must be correct. + * Note: Sometimes there exists more than one RSDP in memory; + * the valid RSDP has a valid checksum, all others have an + * invalid checksum. + */ + rsdp = (struct acpi_table_rsdp *)address; + + /* BAD Signature */ + if (!ACPI_VALIDATE_RSDP_SIG(rsdp->signature)) + continue; + + /* Check the standard checksum */ + if (compute_checksum((u8 *)rsdp, ACPI_RSDP_CHECKSUM_LENGTH)) + continue; + + /* Check extended checksum if table version >= 2 */ + if ((rsdp->revision >= 2) && + (compute_checksum((u8 *)rsdp, ACPI_RSDP_XCHECKSUM_LENGTH))) + continue; + + /* Signature and checksum valid, we have found a real RSDP */ + return address; + } + return NULL; +} + +/* Search RSDP address in EBDA. */ +static acpi_physical_address bios_get_rsdp_addr(void) +{ + unsigned long address; + u8 *rsdp; + + /* Get the location of the Extended BIOS Data Area (EBDA) */ + address = *(u16 *)ACPI_EBDA_PTR_LOCATION; + address <<= 4; + + /* + * Search EBDA paragraphs (EBDA is required to be a minimum of + * 1K length) + */ + if (address > 0x400) { + rsdp = scan_mem_for_rsdp((u8 *)address, ACPI_EBDA_WINDOW_SIZE); + if (rsdp) + return (acpi_physical_address)(unsigned long)rsdp; + } + + /* Search upper memory: 16-byte boundaries in E0000h-FFFFFh */ + rsdp = scan_mem_for_rsdp((u8 *) ACPI_HI_RSDP_WINDOW_BASE, + ACPI_HI_RSDP_WINDOW_SIZE); + if (rsdp) + return (acpi_physical_address)(unsigned long)rsdp; + + return 0; +} + +/* Return RSDP address on success, otherwise 0. */ +acpi_physical_address get_rsdp_addr(void) +{ + acpi_physical_address pa; + + pa = get_acpi_rsdp(); + + if (!pa) + pa = boot_params->acpi_rsdp_addr; + + if (!pa) + pa = efi_get_rsdp_addr(); + + if (!pa) + pa = bios_get_rsdp_addr(); + + return pa; +} + +#if defined(CONFIG_RANDOMIZE_BASE) && defined(CONFIG_MEMORY_HOTREMOVE) +/* Compute SRAT address from RSDP. */ +static unsigned long get_acpi_srat_table(void) +{ + unsigned long root_table, acpi_table; + struct acpi_table_header *header; + struct acpi_table_rsdp *rsdp; + u32 num_entries, size, len; + char arg[10]; + u8 *entry; + + rsdp = (struct acpi_table_rsdp *)(long)boot_params->acpi_rsdp_addr; + if (!rsdp) + return 0; + + /* Get ACPI root table from RSDP.*/ + if (!(cmdline_find_option("acpi", arg, sizeof(arg)) == 4 && + !strncmp(arg, "rsdt", 4)) && + rsdp->xsdt_physical_address && + rsdp->revision > 1) { + root_table = rsdp->xsdt_physical_address; + size = ACPI_XSDT_ENTRY_SIZE; + } else { + root_table = rsdp->rsdt_physical_address; + size = ACPI_RSDT_ENTRY_SIZE; + } + + if (!root_table) + return 0; + + header = (struct acpi_table_header *)root_table; + len = header->length; + if (len < sizeof(struct acpi_table_header) + size) + return 0; + + num_entries = (len - sizeof(struct acpi_table_header)) / size; + entry = (u8 *)(root_table + sizeof(struct acpi_table_header)); + + while (num_entries--) { + if (size == ACPI_RSDT_ENTRY_SIZE) + acpi_table = *(u32 *)entry; + else + acpi_table = *(u64 *)entry; + + if (acpi_table) { + header = (struct acpi_table_header *)acpi_table; + + if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_SRAT)) + return acpi_table; + } + entry += size; + } + return 0; +} + +/** + * count_immovable_mem_regions - Parse SRAT and cache the immovable + * memory regions into the immovable_mem array. + * + * Return the number of immovable memory regions on success, 0 on failure: + * + * - Too many immovable memory regions + * - ACPI off or no SRAT found + * - No immovable memory region found. + */ +int count_immovable_mem_regions(void) +{ + unsigned long table_addr, table_end, table; + struct acpi_subtable_header *sub_table; + struct acpi_table_header *table_header; + char arg[MAX_ACPI_ARG_LENGTH]; + int num = 0; + + if (cmdline_find_option("acpi", arg, sizeof(arg)) == 3 && + !strncmp(arg, "off", 3)) + return 0; + + table_addr = get_acpi_srat_table(); + if (!table_addr) + return 0; + + table_header = (struct acpi_table_header *)table_addr; + table_end = table_addr + table_header->length; + table = table_addr + sizeof(struct acpi_table_srat); + + while (table + sizeof(struct acpi_subtable_header) < table_end) { + sub_table = (struct acpi_subtable_header *)table; + if (sub_table->type == ACPI_SRAT_TYPE_MEMORY_AFFINITY) { + struct acpi_srat_mem_affinity *ma; + + ma = (struct acpi_srat_mem_affinity *)sub_table; + if (!(ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && ma->length) { + immovable_mem[num].start = ma->base_address; + immovable_mem[num].size = ma->length; + num++; + } + + if (num >= MAX_NUMNODES*2) { + debug_putstr("Too many immovable memory regions, aborting.\n"); + return 0; + } + } + table += sub_table->length; + } + return num; +} +#endif /* CONFIG_RANDOMIZE_BASE && CONFIG_MEMORY_HOTREMOVE */ diff --git a/arch/x86/boot/compressed/cmdline.c b/arch/x86/boot/compressed/cmdline.c index af6cda0b7900..f1add5d85da9 100644 --- a/arch/x86/boot/compressed/cmdline.c +++ b/arch/x86/boot/compressed/cmdline.c @@ -1,8 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include "misc.h" -#if CONFIG_EARLY_PRINTK || CONFIG_RANDOMIZE_BASE || CONFIG_X86_5LEVEL - static unsigned long fs; static inline void set_fs(unsigned long seg) { @@ -30,5 +28,3 @@ int cmdline_find_option_bool(const char *option) { return __cmdline_find_option_bool(get_cmd_line_ptr(), option); } - -#endif diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 64037895b085..fafb75c6c592 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -358,8 +358,11 @@ ENTRY(startup_64) * paging_prepare() sets up the trampoline and checks if we need to * enable 5-level paging. * - * Address of the trampoline is returned in RAX. - * Non zero RDX on return means we need to enable 5-level paging. + * paging_prepare() returns a two-quadword structure which lands + * into RDX:RAX: + * - Address of the trampoline is returned in RAX. + * - Non zero RDX means trampoline needs to enable 5-level + * paging. * * RSI holds real mode data and needs to be preserved across * this function call. @@ -565,7 +568,7 @@ adjust_got: * * RDI contains the return address (might be above 4G). * ECX contains the base address of the trampoline memory. - * Non zero RDX on return means we need to enable 5-level paging. + * Non zero RDX means trampoline needs to enable 5-level paging. */ ENTRY(trampoline_32bit_src) /* Set up data and stack segments */ @@ -600,6 +603,16 @@ ENTRY(trampoline_32bit_src) leal TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax movl %eax, %cr3 3: + /* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */ + pushl %ecx + pushl %edx + movl $MSR_EFER, %ecx + rdmsr + btsl $_EFER_LME, %eax + wrmsr + popl %edx + popl %ecx + /* Enable PAE and LA57 (if required) paging modes */ movl $X86_CR4_PAE, %eax cmpl $0, %edx @@ -645,8 +658,6 @@ no_longmode: .data gdt64: .word gdt_end - gdt - .long 0 - .word 0 .quad 0 gdt: .word gdt_end - gdt diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c index 9ed9709d9947..2e53c056ba20 100644 --- a/arch/x86/boot/compressed/kaslr.c +++ b/arch/x86/boot/compressed/kaslr.c @@ -87,10 +87,6 @@ static unsigned long get_boot_seed(void) #define KASLR_COMPRESSED_BOOT #include "../../lib/kaslr.c" -struct mem_vector { - unsigned long long start; - unsigned long long size; -}; /* Only supporting at most 4 unusable memmap regions with kaslr */ #define MAX_MEMMAP_REGIONS 4 @@ -101,6 +97,8 @@ static bool memmap_too_large; /* Store memory limit specified by "mem=nn[KMG]" or "memmap=nn[KMG]" */ static unsigned long long mem_limit = ULLONG_MAX; +/* Number of immovable memory regions */ +static int num_immovable_mem; enum mem_avoid_index { MEM_AVOID_ZO_RANGE = 0, @@ -417,6 +415,9 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size, /* Mark the memmap regions we need to avoid */ handle_mem_options(); + /* Enumerate the immovable memory regions */ + num_immovable_mem = count_immovable_mem_regions(); + #ifdef CONFIG_X86_VERBOSE_BOOTUP /* Make sure video RAM can be used. */ add_identity_map(0, PMD_SIZE); @@ -572,9 +573,9 @@ static unsigned long slots_fetch_random(void) return 0; } -static void process_mem_region(struct mem_vector *entry, - unsigned long minimum, - unsigned long image_size) +static void __process_mem_region(struct mem_vector *entry, + unsigned long minimum, + unsigned long image_size) { struct mem_vector region, overlap; unsigned long start_orig, end; @@ -650,6 +651,56 @@ static void process_mem_region(struct mem_vector *entry, } } +static bool process_mem_region(struct mem_vector *region, + unsigned long long minimum, + unsigned long long image_size) +{ + int i; + /* + * If no immovable memory found, or MEMORY_HOTREMOVE disabled, + * use @region directly. + */ + if (!num_immovable_mem) { + __process_mem_region(region, minimum, image_size); + + if (slot_area_index == MAX_SLOT_AREA) { + debug_putstr("Aborted e820/efi memmap scan (slot_areas full)!\n"); + return 1; + } + return 0; + } + +#if defined(CONFIG_MEMORY_HOTREMOVE) && defined(CONFIG_ACPI) + /* + * If immovable memory found, filter the intersection between + * immovable memory and @region. + */ + for (i = 0; i < num_immovable_mem; i++) { + unsigned long long start, end, entry_end, region_end; + struct mem_vector entry; + + if (!mem_overlaps(region, &immovable_mem[i])) + continue; + + start = immovable_mem[i].start; + end = start + immovable_mem[i].size; + region_end = region->start + region->size; + + entry.start = clamp(region->start, start, end); + entry_end = clamp(region_end, start, end); + entry.size = entry_end - entry.start; + + __process_mem_region(&entry, minimum, image_size); + + if (slot_area_index == MAX_SLOT_AREA) { + debug_putstr("Aborted e820/efi memmap scan when walking immovable regions(slot_areas full)!\n"); + return 1; + } + } +#endif + return 0; +} + #ifdef CONFIG_EFI /* * Returns true if mirror region found (and must have been processed @@ -715,11 +766,8 @@ process_efi_entries(unsigned long minimum, unsigned long image_size) region.start = md->phys_addr; region.size = md->num_pages << EFI_PAGE_SHIFT; - process_mem_region(®ion, minimum, image_size); - if (slot_area_index == MAX_SLOT_AREA) { - debug_putstr("Aborted EFI scan (slot_areas full)!\n"); + if (process_mem_region(®ion, minimum, image_size)) break; - } } return true; } @@ -746,11 +794,8 @@ static void process_e820_entries(unsigned long minimum, continue; region.start = entry->addr; region.size = entry->size; - process_mem_region(®ion, minimum, image_size); - if (slot_area_index == MAX_SLOT_AREA) { - debug_putstr("Aborted e820 scan (slot_areas full)!\n"); + if (process_mem_region(®ion, minimum, image_size)) break; - } } } diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 8dd1d5ccae58..c0d6c560df69 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -351,6 +351,9 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap, /* Clear flags intended for solely in-kernel use. */ boot_params->hdr.loadflags &= ~KASLR_FLAG; + /* Save RSDP address for later use. */ + boot_params->acpi_rsdp_addr = get_rsdp_addr(); + sanitize_boot_params(boot_params); if (boot_params->screen_info.orig_video_mode == 7) { diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index a1d5918765f3..fd13655e0f9b 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h @@ -25,6 +25,9 @@ #include #include +#define BOOT_CTYPE_H +#include + #define BOOT_BOOT_H #include "../ctype.h" @@ -63,12 +66,14 @@ static inline void debug_puthex(const char *s) #endif -#if CONFIG_EARLY_PRINTK || CONFIG_RANDOMIZE_BASE /* cmdline.c */ int cmdline_find_option(const char *option, char *buffer, int bufsize); int cmdline_find_option_bool(const char *option); -#endif +struct mem_vector { + unsigned long long start; + unsigned long long size; +}; #if CONFIG_RANDOMIZE_BASE /* kaslr.c */ @@ -116,3 +121,17 @@ static inline void console_init(void) void set_sev_encryption_mask(void); #endif + +/* acpi.c */ +#ifdef CONFIG_ACPI +acpi_physical_address get_rsdp_addr(void); +#else +static inline acpi_physical_address get_rsdp_addr(void) { return 0; } +#endif + +#if defined(CONFIG_RANDOMIZE_BASE) && defined(CONFIG_MEMORY_HOTREMOVE) && defined(CONFIG_ACPI) +extern struct mem_vector immovable_mem[MAX_NUMNODES*2]; +int count_immovable_mem_regions(void); +#else +static inline int count_immovable_mem_regions(void) { return 0; } +#endif diff --git a/arch/x86/boot/compressed/pgtable.h b/arch/x86/boot/compressed/pgtable.h index 91f75638f6e6..6ff7e81b5628 100644 --- a/arch/x86/boot/compressed/pgtable.h +++ b/arch/x86/boot/compressed/pgtable.h @@ -6,7 +6,7 @@ #define TRAMPOLINE_32BIT_PGTABLE_OFFSET 0 #define TRAMPOLINE_32BIT_CODE_OFFSET PAGE_SIZE -#define TRAMPOLINE_32BIT_CODE_SIZE 0x60 +#define TRAMPOLINE_32BIT_CODE_SIZE 0x70 #define TRAMPOLINE_32BIT_STACK_END TRAMPOLINE_32BIT_SIZE diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c index 9e2157371491..f8debf7aeb4c 100644 --- a/arch/x86/boot/compressed/pgtable_64.c +++ b/arch/x86/boot/compressed/pgtable_64.c @@ -1,5 +1,7 @@ +#include #include #include +#include #include "pgtable.h" #include "../string.h" @@ -37,9 +39,10 @@ int cmdline_find_option_bool(const char *option); static unsigned long find_trampoline_placement(void) { - unsigned long bios_start, ebda_start; + unsigned long bios_start = 0, ebda_start = 0; unsigned long trampoline_start; struct boot_e820_entry *entry; + char *signature; int i; /* @@ -47,8 +50,18 @@ static unsigned long find_trampoline_placement(void) * This code is based on reserve_bios_regions(). */ - ebda_start = *(unsigned short *)0x40e << 4; - bios_start = *(unsigned short *)0x413 << 10; + /* + * EFI systems may not provide legacy ROM. The memory may not be mapped + * at all. + * + * Only look for values in the legacy ROM for non-EFI system. + */ + signature = (char *)&boot_params->efi_info.efi_loader_signature; + if (strncmp(signature, EFI32_LOADER_SIGNATURE, 4) && + strncmp(signature, EFI64_LOADER_SIGNATURE, 4)) { + ebda_start = *(unsigned short *)0x40e << 4; + bios_start = *(unsigned short *)0x413 << 10; + } if (bios_start < BIOS_START_MIN || bios_start > BIOS_START_MAX) bios_start = BIOS_START_MAX; diff --git a/arch/x86/boot/compressed/vmlinux.lds.S b/arch/x86/boot/compressed/vmlinux.lds.S index f491bbde8493..508cfa6828c5 100644 --- a/arch/x86/boot/compressed/vmlinux.lds.S +++ b/arch/x86/boot/compressed/vmlinux.lds.S @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include -OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT) +OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT) #undef i386 diff --git a/arch/x86/boot/setup.ld b/arch/x86/boot/setup.ld index 96a6c7563538..0149e41d42c2 100644 --- a/arch/x86/boot/setup.ld +++ b/arch/x86/boot/setup.ld @@ -3,7 +3,7 @@ * * Linker script for the i386 setup code */ -OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386") +OUTPUT_FORMAT("elf32-i386") OUTPUT_ARCH(i386) ENTRY(_start) diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c index c4428a176973..315a67b8896b 100644 --- a/arch/x86/boot/string.c +++ b/arch/x86/boot/string.c @@ -13,10 +13,14 @@ */ #include +#include +#include #include #include "ctype.h" #include "string.h" +#define KSTRTOX_OVERFLOW (1U << 31) + /* * Undef these macros so that the functions that we provide * here will have the correct names regardless of how string.h @@ -187,3 +191,140 @@ char *strchr(const char *s, int c) return NULL; return (char *)s; } + +static inline u64 __div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) +{ + union { + u64 v64; + u32 v32[2]; + } d = { dividend }; + u32 upper; + + upper = d.v32[1]; + d.v32[1] = 0; + if (upper >= divisor) { + d.v32[1] = upper / divisor; + upper %= divisor; + } + asm ("divl %2" : "=a" (d.v32[0]), "=d" (*remainder) : + "rm" (divisor), "0" (d.v32[0]), "1" (upper)); + return d.v64; +} + +static inline u64 __div_u64(u64 dividend, u32 divisor) +{ + u32 remainder; + + return __div_u64_rem(dividend, divisor, &remainder); +} + +static inline char _tolower(const char c) +{ + return c | 0x20; +} + +static const char *_parse_integer_fixup_radix(const char *s, unsigned int *base) +{ + if (*base == 0) { + if (s[0] == '0') { + if (_tolower(s[1]) == 'x' && isxdigit(s[2])) + *base = 16; + else + *base = 8; + } else + *base = 10; + } + if (*base == 16 && s[0] == '0' && _tolower(s[1]) == 'x') + s += 2; + return s; +} + +/* + * Convert non-negative integer string representation in explicitly given radix + * to an integer. + * Return number of characters consumed maybe or-ed with overflow bit. + * If overflow occurs, result integer (incorrect) is still returned. + * + * Don't you dare use this function. + */ +static unsigned int _parse_integer(const char *s, + unsigned int base, + unsigned long long *p) +{ + unsigned long long res; + unsigned int rv; + + res = 0; + rv = 0; + while (1) { + unsigned int c = *s; + unsigned int lc = c | 0x20; /* don't tolower() this line */ + unsigned int val; + + if ('0' <= c && c <= '9') + val = c - '0'; + else if ('a' <= lc && lc <= 'f') + val = lc - 'a' + 10; + else + break; + + if (val >= base) + break; + /* + * Check for overflow only if we are within range of + * it in the max base we support (16) + */ + if (unlikely(res & (~0ull << 60))) { + if (res > __div_u64(ULLONG_MAX - val, base)) + rv |= KSTRTOX_OVERFLOW; + } + res = res * base + val; + rv++; + s++; + } + *p = res; + return rv; +} + +static int _kstrtoull(const char *s, unsigned int base, unsigned long long *res) +{ + unsigned long long _res; + unsigned int rv; + + s = _parse_integer_fixup_radix(s, &base); + rv = _parse_integer(s, base, &_res); + if (rv & KSTRTOX_OVERFLOW) + return -ERANGE; + if (rv == 0) + return -EINVAL; + s += rv; + if (*s == '\n') + s++; + if (*s) + return -EINVAL; + *res = _res; + return 0; +} + +/** + * kstrtoull - convert a string to an unsigned long long + * @s: The start of the string. The string must be null-terminated, and may also + * include a single newline before its terminating null. The first character + * may also be a plus sign, but not a minus sign. + * @base: The number base to use. The maximum supported base is 16. If base is + * given as 0, then the base of the string is automatically detected with the + * conventional semantics - If it begins with 0x the number will be parsed as a + * hexadecimal (case insensitive), if it otherwise begins with 0, it will be + * parsed as an octal number. Otherwise it will be parsed as a decimal. + * @res: Where to write the result of the conversion on success. + * + * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. + * Used as a replacement for the obsolete simple_strtoull. Return code must + * be checked. + */ +int kstrtoull(const char *s, unsigned int base, unsigned long long *res) +{ + if (s[0] == '+') + s++; + return _kstrtoull(s, base, res); +} diff --git a/arch/x86/boot/string.h b/arch/x86/boot/string.h index 3d78e27077f4..38d8f2f5e47e 100644 --- a/arch/x86/boot/string.h +++ b/arch/x86/boot/string.h @@ -29,4 +29,5 @@ extern unsigned int atou(const char *s); extern unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base); +int kstrtoull(const char *s, unsigned int base, unsigned long long *res); #endif /* BOOT_STRING_H */ diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig index 4bb95d7ad947..9f908112bbb9 100644 --- a/arch/x86/configs/i386_defconfig +++ b/arch/x86/configs/i386_defconfig @@ -287,7 +287,6 @@ CONFIG_NLS_ASCII=y CONFIG_NLS_ISO8859_1=y CONFIG_NLS_UTF8=y CONFIG_PRINTK_TIME=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set CONFIG_FRAME_WARN=1024 CONFIG_MAGIC_SYSRQ=y # CONFIG_UNUSED_SYMBOLS is not set @@ -310,3 +309,5 @@ CONFIG_SECURITY_SELINUX_BOOTPARAM=y CONFIG_SECURITY_SELINUX_DISABLE=y CONFIG_CRYPTO_AES_586=y # CONFIG_CRYPTO_ANSI_CPRNG is not set +CONFIG_EFI_STUB=y +CONFIG_ACPI_BGRT=y diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index 0fed049422a8..1d3badfda09e 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig @@ -286,7 +286,6 @@ CONFIG_NLS_ASCII=y CONFIG_NLS_ISO8859_1=y CONFIG_NLS_UTF8=y CONFIG_PRINTK_TIME=y -# CONFIG_ENABLE_WARN_DEPRECATED is not set CONFIG_MAGIC_SYSRQ=y # CONFIG_UNUSED_SYMBOLS is not set CONFIG_DEBUG_KERNEL=y @@ -308,3 +307,6 @@ CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX_BOOTPARAM=y CONFIG_SECURITY_SELINUX_DISABLE=y # CONFIG_CRYPTO_ANSI_CPRNG is not set +CONFIG_EFI_STUB=y +CONFIG_EFI_MIXED=y +CONFIG_ACPI_BGRT=y diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c index 2a356b948720..3ea71b871813 100644 --- a/arch/x86/crypto/aegis128-aesni-glue.c +++ b/arch/x86/crypto/aegis128-aesni-glue.c @@ -119,31 +119,20 @@ static void crypto_aegis128_aesni_process_ad( } static void crypto_aegis128_aesni_process_crypt( - struct aegis_state *state, struct aead_request *req, + struct aegis_state *state, struct skcipher_walk *walk, const struct aegis_crypt_ops *ops) { - struct skcipher_walk walk; - u8 *src, *dst; - unsigned int chunksize, base; - - ops->skcipher_walk_init(&walk, req, false); - - while (walk.nbytes) { - src = walk.src.virt.addr; - dst = walk.dst.virt.addr; - chunksize = walk.nbytes; - - ops->crypt_blocks(state, chunksize, src, dst); - - base = chunksize & ~(AEGIS128_BLOCK_SIZE - 1); - src += base; - dst += base; - chunksize &= AEGIS128_BLOCK_SIZE - 1; - - if (chunksize > 0) - ops->crypt_tail(state, chunksize, src, dst); + while (walk->nbytes >= AEGIS128_BLOCK_SIZE) { + ops->crypt_blocks(state, + round_down(walk->nbytes, AEGIS128_BLOCK_SIZE), + walk->src.virt.addr, walk->dst.virt.addr); + skcipher_walk_done(walk, walk->nbytes % AEGIS128_BLOCK_SIZE); + } - skcipher_walk_done(&walk, 0); + if (walk->nbytes) { + ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr, + walk->dst.virt.addr); + skcipher_walk_done(walk, 0); } } @@ -186,13 +175,16 @@ static void crypto_aegis128_aesni_crypt(struct aead_request *req, { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aegis_ctx *ctx = crypto_aegis128_aesni_ctx(tfm); + struct skcipher_walk walk; struct aegis_state state; + ops->skcipher_walk_init(&walk, req, true); + kernel_fpu_begin(); crypto_aegis128_aesni_init(&state, ctx->key.bytes, req->iv); crypto_aegis128_aesni_process_ad(&state, req->src, req->assoclen); - crypto_aegis128_aesni_process_crypt(&state, req, ops); + crypto_aegis128_aesni_process_crypt(&state, &walk, ops); crypto_aegis128_aesni_final(&state, tag_xor, req->assoclen, cryptlen); kernel_fpu_end(); diff --git a/arch/x86/crypto/aegis128l-aesni-glue.c b/arch/x86/crypto/aegis128l-aesni-glue.c index dbe8bb980da1..1b1b39c66c5e 100644 --- a/arch/x86/crypto/aegis128l-aesni-glue.c +++ b/arch/x86/crypto/aegis128l-aesni-glue.c @@ -119,31 +119,20 @@ static void crypto_aegis128l_aesni_process_ad( } static void crypto_aegis128l_aesni_process_crypt( - struct aegis_state *state, struct aead_request *req, + struct aegis_state *state, struct skcipher_walk *walk, const struct aegis_crypt_ops *ops) { - struct skcipher_walk walk; - u8 *src, *dst; - unsigned int chunksize, base; - - ops->skcipher_walk_init(&walk, req, false); - - while (walk.nbytes) { - src = walk.src.virt.addr; - dst = walk.dst.virt.addr; - chunksize = walk.nbytes; - - ops->crypt_blocks(state, chunksize, src, dst); - - base = chunksize & ~(AEGIS128L_BLOCK_SIZE - 1); - src += base; - dst += base; - chunksize &= AEGIS128L_BLOCK_SIZE - 1; - - if (chunksize > 0) - ops->crypt_tail(state, chunksize, src, dst); + while (walk->nbytes >= AEGIS128L_BLOCK_SIZE) { + ops->crypt_blocks(state, round_down(walk->nbytes, + AEGIS128L_BLOCK_SIZE), + walk->src.virt.addr, walk->dst.virt.addr); + skcipher_walk_done(walk, walk->nbytes % AEGIS128L_BLOCK_SIZE); + } - skcipher_walk_done(&walk, 0); + if (walk->nbytes) { + ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr, + walk->dst.virt.addr); + skcipher_walk_done(walk, 0); } } @@ -186,13 +175,16 @@ static void crypto_aegis128l_aesni_crypt(struct aead_request *req, { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aegis_ctx *ctx = crypto_aegis128l_aesni_ctx(tfm); + struct skcipher_walk walk; struct aegis_state state; + ops->skcipher_walk_init(&walk, req, true); + kernel_fpu_begin(); crypto_aegis128l_aesni_init(&state, ctx->key.bytes, req->iv); crypto_aegis128l_aesni_process_ad(&state, req->src, req->assoclen); - crypto_aegis128l_aesni_process_crypt(&state, req, ops); + crypto_aegis128l_aesni_process_crypt(&state, &walk, ops); crypto_aegis128l_aesni_final(&state, tag_xor, req->assoclen, cryptlen); kernel_fpu_end(); diff --git a/arch/x86/crypto/aegis256-aesni-glue.c b/arch/x86/crypto/aegis256-aesni-glue.c index 8bebda2de92f..6227ca3220a0 100644 --- a/arch/x86/crypto/aegis256-aesni-glue.c +++ b/arch/x86/crypto/aegis256-aesni-glue.c @@ -119,31 +119,20 @@ static void crypto_aegis256_aesni_process_ad( } static void crypto_aegis256_aesni_process_crypt( - struct aegis_state *state, struct aead_request *req, + struct aegis_state *state, struct skcipher_walk *walk, const struct aegis_crypt_ops *ops) { - struct skcipher_walk walk; - u8 *src, *dst; - unsigned int chunksize, base; - - ops->skcipher_walk_init(&walk, req, false); - - while (walk.nbytes) { - src = walk.src.virt.addr; - dst = walk.dst.virt.addr; - chunksize = walk.nbytes; - - ops->crypt_blocks(state, chunksize, src, dst); - - base = chunksize & ~(AEGIS256_BLOCK_SIZE - 1); - src += base; - dst += base; - chunksize &= AEGIS256_BLOCK_SIZE - 1; - - if (chunksize > 0) - ops->crypt_tail(state, chunksize, src, dst); + while (walk->nbytes >= AEGIS256_BLOCK_SIZE) { + ops->crypt_blocks(state, + round_down(walk->nbytes, AEGIS256_BLOCK_SIZE), + walk->src.virt.addr, walk->dst.virt.addr); + skcipher_walk_done(walk, walk->nbytes % AEGIS256_BLOCK_SIZE); + } - skcipher_walk_done(&walk, 0); + if (walk->nbytes) { + ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr, + walk->dst.virt.addr); + skcipher_walk_done(walk, 0); } } @@ -186,13 +175,16 @@ static void crypto_aegis256_aesni_crypt(struct aead_request *req, { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aegis_ctx *ctx = crypto_aegis256_aesni_ctx(tfm); + struct skcipher_walk walk; struct aegis_state state; + ops->skcipher_walk_init(&walk, req, true); + kernel_fpu_begin(); crypto_aegis256_aesni_init(&state, ctx->key, req->iv); crypto_aegis256_aesni_process_ad(&state, req->src, req->assoclen); - crypto_aegis256_aesni_process_crypt(&state, req, ops); + crypto_aegis256_aesni_process_crypt(&state, &walk, ops); crypto_aegis256_aesni_final(&state, tag_xor, req->assoclen, cryptlen); kernel_fpu_end(); diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 1321700d6647..1e3d2102033a 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -175,26 +175,18 @@ asmlinkage void aesni_gcm_finalize(void *ctx, struct gcm_context_data *gdata, u8 *auth_tag, unsigned long auth_tag_len); -static struct aesni_gcm_tfm_s { -void (*init)(void *ctx, - struct gcm_context_data *gdata, - u8 *iv, - u8 *hash_subkey, const u8 *aad, - unsigned long aad_len); -void (*enc_update)(void *ctx, - struct gcm_context_data *gdata, u8 *out, - const u8 *in, - unsigned long plaintext_len); -void (*dec_update)(void *ctx, - struct gcm_context_data *gdata, u8 *out, - const u8 *in, - unsigned long ciphertext_len); -void (*finalize)(void *ctx, - struct gcm_context_data *gdata, - u8 *auth_tag, unsigned long auth_tag_len); +static const struct aesni_gcm_tfm_s { + void (*init)(void *ctx, struct gcm_context_data *gdata, u8 *iv, + u8 *hash_subkey, const u8 *aad, unsigned long aad_len); + void (*enc_update)(void *ctx, struct gcm_context_data *gdata, u8 *out, + const u8 *in, unsigned long plaintext_len); + void (*dec_update)(void *ctx, struct gcm_context_data *gdata, u8 *out, + const u8 *in, unsigned long ciphertext_len); + void (*finalize)(void *ctx, struct gcm_context_data *gdata, + u8 *auth_tag, unsigned long auth_tag_len); } *aesni_gcm_tfm; -struct aesni_gcm_tfm_s aesni_gcm_tfm_sse = { +static const struct aesni_gcm_tfm_s aesni_gcm_tfm_sse = { .init = &aesni_gcm_init, .enc_update = &aesni_gcm_enc_update, .dec_update = &aesni_gcm_dec_update, @@ -243,7 +235,7 @@ asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, const u8 *aad, unsigned long aad_len, u8 *auth_tag, unsigned long auth_tag_len); -struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen2 = { +static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen2 = { .init = &aesni_gcm_init_avx_gen2, .enc_update = &aesni_gcm_enc_update_avx_gen2, .dec_update = &aesni_gcm_dec_update_avx_gen2, @@ -288,7 +280,7 @@ asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, const u8 *aad, unsigned long aad_len, u8 *auth_tag, unsigned long auth_tag_len); -struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen4 = { +static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen4 = { .init = &aesni_gcm_init_avx_gen4, .enc_update = &aesni_gcm_enc_update_avx_gen4, .dec_update = &aesni_gcm_dec_update_avx_gen4, @@ -778,7 +770,7 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req, { struct crypto_aead *tfm = crypto_aead_reqtfm(req); unsigned long auth_tag_len = crypto_aead_authsize(tfm); - struct aesni_gcm_tfm_s *gcm_tfm = aesni_gcm_tfm; + const struct aesni_gcm_tfm_s *gcm_tfm = aesni_gcm_tfm; struct gcm_context_data data AESNI_ALIGN_ATTR; struct scatter_walk dst_sg_walk = {}; unsigned long left = req->cryptlen; @@ -821,11 +813,14 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req, scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0); } - src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen); - scatterwalk_start(&src_sg_walk, src_sg); - if (req->src != req->dst) { - dst_sg = scatterwalk_ffwd(dst_start, req->dst, req->assoclen); - scatterwalk_start(&dst_sg_walk, dst_sg); + if (left) { + src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen); + scatterwalk_start(&src_sg_walk, src_sg); + if (req->src != req->dst) { + dst_sg = scatterwalk_ffwd(dst_start, req->dst, + req->assoclen); + scatterwalk_start(&dst_sg_walk, dst_sg); + } } kernel_fpu_begin(); diff --git a/arch/x86/crypto/crct10dif-pcl-asm_64.S b/arch/x86/crypto/crct10dif-pcl-asm_64.S index de04d3e98d8d..3d873e67749d 100644 --- a/arch/x86/crypto/crct10dif-pcl-asm_64.S +++ b/arch/x86/crypto/crct10dif-pcl-asm_64.S @@ -43,609 +43,291 @@ # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -######################################################################## -# Function API: -# UINT16 crc_t10dif_pcl( -# UINT16 init_crc, //initial CRC value, 16 bits -# const unsigned char *buf, //buffer pointer to calculate CRC on -# UINT64 len //buffer length in bytes (64-bit data) -# ); # # Reference paper titled "Fast CRC Computation for Generic # Polynomials Using PCLMULQDQ Instruction" # URL: http://www.intel.com/content/dam/www/public/us/en/documents # /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf # -# #include .text -#define arg1 %rdi -#define arg2 %rsi -#define arg3 %rdx - -#define arg1_low32 %edi +#define init_crc %edi +#define buf %rsi +#define len %rdx + +#define FOLD_CONSTS %xmm10 +#define BSWAP_MASK %xmm11 + +# Fold reg1, reg2 into the next 32 data bytes, storing the result back into +# reg1, reg2. +.macro fold_32_bytes offset, reg1, reg2 + movdqu \offset(buf), %xmm9 + movdqu \offset+16(buf), %xmm12 + pshufb BSWAP_MASK, %xmm9 + pshufb BSWAP_MASK, %xmm12 + movdqa \reg1, %xmm8 + movdqa \reg2, %xmm13 + pclmulqdq $0x00, FOLD_CONSTS, \reg1 + pclmulqdq $0x11, FOLD_CONSTS, %xmm8 + pclmulqdq $0x00, FOLD_CONSTS, \reg2 + pclmulqdq $0x11, FOLD_CONSTS, %xmm13 + pxor %xmm9 , \reg1 + xorps %xmm8 , \reg1 + pxor %xmm12, \reg2 + xorps %xmm13, \reg2 +.endm + +# Fold src_reg into dst_reg. +.macro fold_16_bytes src_reg, dst_reg + movdqa \src_reg, %xmm8 + pclmulqdq $0x11, FOLD_CONSTS, \src_reg + pclmulqdq $0x00, FOLD_CONSTS, %xmm8 + pxor %xmm8, \dst_reg + xorps \src_reg, \dst_reg +.endm -ENTRY(crc_t10dif_pcl) +# +# u16 crc_t10dif_pcl(u16 init_crc, const *u8 buf, size_t len); +# +# Assumes len >= 16. +# .align 16 +ENTRY(crc_t10dif_pcl) - # adjust the 16-bit initial_crc value, scale it to 32 bits - shl $16, arg1_low32 - - # Allocate Stack Space - mov %rsp, %rcx - sub $16*2, %rsp - # align stack to 16 byte boundary - and $~(0x10 - 1), %rsp - - # check if smaller than 256 - cmp $256, arg3 - - # for sizes less than 128, we can't fold 64B at a time... - jl _less_than_128 - - - # load the initial crc value - movd arg1_low32, %xmm10 # initial crc - - # crc value does not need to be byte-reflected, but it needs - # to be moved to the high part of the register. - # because data will be byte-reflected and will align with - # initial crc at correct place. - pslldq $12, %xmm10 - - movdqa SHUF_MASK(%rip), %xmm11 - # receive the initial 64B data, xor the initial crc value - movdqu 16*0(arg2), %xmm0 - movdqu 16*1(arg2), %xmm1 - movdqu 16*2(arg2), %xmm2 - movdqu 16*3(arg2), %xmm3 - movdqu 16*4(arg2), %xmm4 - movdqu 16*5(arg2), %xmm5 - movdqu 16*6(arg2), %xmm6 - movdqu 16*7(arg2), %xmm7 - - pshufb %xmm11, %xmm0 - # XOR the initial_crc value - pxor %xmm10, %xmm0 - pshufb %xmm11, %xmm1 - pshufb %xmm11, %xmm2 - pshufb %xmm11, %xmm3 - pshufb %xmm11, %xmm4 - pshufb %xmm11, %xmm5 - pshufb %xmm11, %xmm6 - pshufb %xmm11, %xmm7 - - movdqa rk3(%rip), %xmm10 #xmm10 has rk3 and rk4 - #imm value of pclmulqdq instruction - #will determine which constant to use - - ################################################################# - # we subtract 256 instead of 128 to save one instruction from the loop - sub $256, arg3 - - # at this section of the code, there is 64*x+y (0<=y<64) bytes of - # buffer. The _fold_64_B_loop will fold 64B at a time - # until we have 64+y Bytes of buffer - - - # fold 64B at a time. This section of the code folds 4 xmm - # registers in parallel -_fold_64_B_loop: - - # update the buffer pointer - add $128, arg2 # buf += 64# - - movdqu 16*0(arg2), %xmm9 - movdqu 16*1(arg2), %xmm12 - pshufb %xmm11, %xmm9 - pshufb %xmm11, %xmm12 - movdqa %xmm0, %xmm8 - movdqa %xmm1, %xmm13 - pclmulqdq $0x0 , %xmm10, %xmm0 - pclmulqdq $0x11, %xmm10, %xmm8 - pclmulqdq $0x0 , %xmm10, %xmm1 - pclmulqdq $0x11, %xmm10, %xmm13 - pxor %xmm9 , %xmm0 - xorps %xmm8 , %xmm0 - pxor %xmm12, %xmm1 - xorps %xmm13, %xmm1 - - movdqu 16*2(arg2), %xmm9 - movdqu 16*3(arg2), %xmm12 - pshufb %xmm11, %xmm9 - pshufb %xmm11, %xmm12 - movdqa %xmm2, %xmm8 - movdqa %xmm3, %xmm13 - pclmulqdq $0x0, %xmm10, %xmm2 - pclmulqdq $0x11, %xmm10, %xmm8 - pclmulqdq $0x0, %xmm10, %xmm3 - pclmulqdq $0x11, %xmm10, %xmm13 - pxor %xmm9 , %xmm2 - xorps %xmm8 , %xmm2 - pxor %xmm12, %xmm3 - xorps %xmm13, %xmm3 - - movdqu 16*4(arg2), %xmm9 - movdqu 16*5(arg2), %xmm12 - pshufb %xmm11, %xmm9 - pshufb %xmm11, %xmm12 - movdqa %xmm4, %xmm8 - movdqa %xmm5, %xmm13 - pclmulqdq $0x0, %xmm10, %xmm4 - pclmulqdq $0x11, %xmm10, %xmm8 - pclmulqdq $0x0, %xmm10, %xmm5 - pclmulqdq $0x11, %xmm10, %xmm13 - pxor %xmm9 , %xmm4 - xorps %xmm8 , %xmm4 - pxor %xmm12, %xmm5 - xorps %xmm13, %xmm5 - - movdqu 16*6(arg2), %xmm9 - movdqu 16*7(arg2), %xmm12 - pshufb %xmm11, %xmm9 - pshufb %xmm11, %xmm12 - movdqa %xmm6 , %xmm8 - movdqa %xmm7 , %xmm13 - pclmulqdq $0x0 , %xmm10, %xmm6 - pclmulqdq $0x11, %xmm10, %xmm8 - pclmulqdq $0x0 , %xmm10, %xmm7 - pclmulqdq $0x11, %xmm10, %xmm13 - pxor %xmm9 , %xmm6 - xorps %xmm8 , %xmm6 - pxor %xmm12, %xmm7 - xorps %xmm13, %xmm7 - - sub $128, arg3 - - # check if there is another 64B in the buffer to be able to fold - jge _fold_64_B_loop - ################################################################## - - - add $128, arg2 - # at this point, the buffer pointer is pointing at the last y Bytes - # of the buffer the 64B of folded data is in 4 of the xmm - # registers: xmm0, xmm1, xmm2, xmm3 - - - # fold the 8 xmm registers to 1 xmm register with different constants - - movdqa rk9(%rip), %xmm10 - movdqa %xmm0, %xmm8 - pclmulqdq $0x11, %xmm10, %xmm0 - pclmulqdq $0x0 , %xmm10, %xmm8 - pxor %xmm8, %xmm7 - xorps %xmm0, %xmm7 - - movdqa rk11(%rip), %xmm10 - movdqa %xmm1, %xmm8 - pclmulqdq $0x11, %xmm10, %xmm1 - pclmulqdq $0x0 , %xmm10, %xmm8 - pxor %xmm8, %xmm7 - xorps %xmm1, %xmm7 - - movdqa rk13(%rip), %xmm10 - movdqa %xmm2, %xmm8 - pclmulqdq $0x11, %xmm10, %xmm2 - pclmulqdq $0x0 , %xmm10, %xmm8 - pxor %xmm8, %xmm7 - pxor %xmm2, %xmm7 - - movdqa rk15(%rip), %xmm10 - movdqa %xmm3, %xmm8 - pclmulqdq $0x11, %xmm10, %xmm3 - pclmulqdq $0x0 , %xmm10, %xmm8 - pxor %xmm8, %xmm7 - xorps %xmm3, %xmm7 - - movdqa rk17(%rip), %xmm10 - movdqa %xmm4, %xmm8 - pclmulqdq $0x11, %xmm10, %xmm4 - pclmulqdq $0x0 , %xmm10, %xmm8 - pxor %xmm8, %xmm7 - pxor %xmm4, %xmm7 - - movdqa rk19(%rip), %xmm10 - movdqa %xmm5, %xmm8 - pclmulqdq $0x11, %xmm10, %xmm5 - pclmulqdq $0x0 , %xmm10, %xmm8 - pxor %xmm8, %xmm7 - xorps %xmm5, %xmm7 - - movdqa rk1(%rip), %xmm10 #xmm10 has rk1 and rk2 - #imm value of pclmulqdq instruction - #will determine which constant to use - movdqa %xmm6, %xmm8 - pclmulqdq $0x11, %xmm10, %xmm6 - pclmulqdq $0x0 , %xmm10, %xmm8 - pxor %xmm8, %xmm7 - pxor %xmm6, %xmm7 - - - # instead of 64, we add 48 to the loop counter to save 1 instruction - # from the loop instead of a cmp instruction, we use the negative - # flag with the jl instruction - add $128-16, arg3 - jl _final_reduction_for_128 - - # now we have 16+y bytes left to reduce. 16 Bytes is in register xmm7 - # and the rest is in memory. We can fold 16 bytes at a time if y>=16 - # continue folding 16B at a time - -_16B_reduction_loop: + movdqa .Lbswap_mask(%rip), BSWAP_MASK + + # For sizes less than 256 bytes, we can't fold 128 bytes at a time. + cmp $256, len + jl .Lless_than_256_bytes + + # Load the first 128 data bytes. Byte swapping is necessary to make the + # bit order match the polynomial coefficient order. + movdqu 16*0(buf), %xmm0 + movdqu 16*1(buf), %xmm1 + movdqu 16*2(buf), %xmm2 + movdqu 16*3(buf), %xmm3 + movdqu 16*4(buf), %xmm4 + movdqu 16*5(buf), %xmm5 + movdqu 16*6(buf), %xmm6 + movdqu 16*7(buf), %xmm7 + add $128, buf + pshufb BSWAP_MASK, %xmm0 + pshufb BSWAP_MASK, %xmm1 + pshufb BSWAP_MASK, %xmm2 + pshufb BSWAP_MASK, %xmm3 + pshufb BSWAP_MASK, %xmm4 + pshufb BSWAP_MASK, %xmm5 + pshufb BSWAP_MASK, %xmm6 + pshufb BSWAP_MASK, %xmm7 + + # XOR the first 16 data *bits* with the initial CRC value. + pxor %xmm8, %xmm8 + pinsrw $7, init_crc, %xmm8 + pxor %xmm8, %xmm0 + + movdqa .Lfold_across_128_bytes_consts(%rip), FOLD_CONSTS + + # Subtract 128 for the 128 data bytes just consumed. Subtract another + # 128 to simplify the termination condition of the following loop. + sub $256, len + + # While >= 128 data bytes remain (not counting xmm0-7), fold the 128 + # bytes xmm0-7 into them, storing the result back into xmm0-7. +.Lfold_128_bytes_loop: + fold_32_bytes 0, %xmm0, %xmm1 + fold_32_bytes 32, %xmm2, %xmm3 + fold_32_bytes 64, %xmm4, %xmm5 + fold_32_bytes 96, %xmm6, %xmm7 + add $128, buf + sub $128, len + jge .Lfold_128_bytes_loop + + # Now fold the 112 bytes in xmm0-xmm6 into the 16 bytes in xmm7. + + # Fold across 64 bytes. + movdqa .Lfold_across_64_bytes_consts(%rip), FOLD_CONSTS + fold_16_bytes %xmm0, %xmm4 + fold_16_bytes %xmm1, %xmm5 + fold_16_bytes %xmm2, %xmm6 + fold_16_bytes %xmm3, %xmm7 + # Fold across 32 bytes. + movdqa .Lfold_across_32_bytes_consts(%rip), FOLD_CONSTS + fold_16_bytes %xmm4, %xmm6 + fold_16_bytes %xmm5, %xmm7 + # Fold across 16 bytes. + movdqa .Lfold_across_16_bytes_consts(%rip), FOLD_CONSTS + fold_16_bytes %xmm6, %xmm7 + + # Add 128 to get the correct number of data bytes remaining in 0...127 + # (not counting xmm7), following the previous extra subtraction by 128. + # Then subtract 16 to simplify the termination condition of the + # following loop. + add $128-16, len + + # While >= 16 data bytes remain (not counting xmm7), fold the 16 bytes + # xmm7 into them, storing the result back into xmm7. + jl .Lfold_16_bytes_loop_done +.Lfold_16_bytes_loop: movdqa %xmm7, %xmm8 - pclmulqdq $0x11, %xmm10, %xmm7 - pclmulqdq $0x0 , %xmm10, %xmm8 + pclmulqdq $0x11, FOLD_CONSTS, %xmm7 + pclmulqdq $0x00, FOLD_CONSTS, %xmm8 pxor %xmm8, %xmm7 - movdqu (arg2), %xmm0 - pshufb %xmm11, %xmm0 + movdqu (buf), %xmm0 + pshufb BSWAP_MASK, %xmm0 pxor %xmm0 , %xmm7 - add $16, arg2 - sub $16, arg3 - # instead of a cmp instruction, we utilize the flags with the - # jge instruction equivalent of: cmp arg3, 16-16 - # check if there is any more 16B in the buffer to be able to fold - jge _16B_reduction_loop - - #now we have 16+z bytes left to reduce, where 0<= z < 16. - #first, we reduce the data in the xmm7 register - - -_final_reduction_for_128: - # check if any more data to fold. If not, compute the CRC of - # the final 128 bits - add $16, arg3 - je _128_done - - # here we are getting data that is less than 16 bytes. - # since we know that there was data before the pointer, we can - # offset the input pointer before the actual point, to receive - # exactly 16 bytes. after that the registers need to be adjusted. -_get_last_two_xmms: + add $16, buf + sub $16, len + jge .Lfold_16_bytes_loop + +.Lfold_16_bytes_loop_done: + # Add 16 to get the correct number of data bytes remaining in 0...15 + # (not counting xmm7), following the previous extra subtraction by 16. + add $16, len + je .Lreduce_final_16_bytes + +.Lhandle_partial_segment: + # Reduce the last '16 + len' bytes where 1 <= len <= 15 and the first 16 + # bytes are in xmm7 and the rest are the remaining data in 'buf'. To do + # this without needing a fold constant for each possible 'len', redivide + # the bytes into a first chunk of 'len' bytes and a second chunk of 16 + # bytes, then fold the first chunk into the second. + movdqa %xmm7, %xmm2 - movdqu -16(arg2, arg3), %xmm1 - pshufb %xmm11, %xmm1 + # xmm1 = last 16 original data bytes + movdqu -16(buf, len), %xmm1 + pshufb BSWAP_MASK, %xmm1 - # get rid of the extra data that was loaded before - # load the shift constant - lea pshufb_shf_table+16(%rip), %rax - sub arg3, %rax + # xmm2 = high order part of second chunk: xmm7 left-shifted by 'len' bytes. + lea .Lbyteshift_table+16(%rip), %rax + sub len, %rax movdqu (%rax), %xmm0 - - # shift xmm2 to the left by arg3 bytes pshufb %xmm0, %xmm2 - # shift xmm7 to the right by 16-arg3 bytes - pxor mask1(%rip), %xmm0 + # xmm7 = first chunk: xmm7 right-shifted by '16-len' bytes. + pxor .Lmask1(%rip), %xmm0 pshufb %xmm0, %xmm7 + + # xmm1 = second chunk: 'len' bytes from xmm1 (low-order bytes), + # then '16-len' bytes from xmm2 (high-order bytes). pblendvb %xmm2, %xmm1 #xmm0 is implicit - # fold 16 Bytes - movdqa %xmm1, %xmm2 + # Fold the first chunk into the second chunk, storing the result in xmm7. movdqa %xmm7, %xmm8 - pclmulqdq $0x11, %xmm10, %xmm7 - pclmulqdq $0x0 , %xmm10, %xmm8 + pclmulqdq $0x11, FOLD_CONSTS, %xmm7 + pclmulqdq $0x00, FOLD_CONSTS, %xmm8 pxor %xmm8, %xmm7 - pxor %xmm2, %xmm7 + pxor %xmm1, %xmm7 -_128_done: - # compute crc of a 128-bit value - movdqa rk5(%rip), %xmm10 # rk5 and rk6 in xmm10 - movdqa %xmm7, %xmm0 +.Lreduce_final_16_bytes: + # Reduce the 128-bit value M(x), stored in xmm7, to the final 16-bit CRC - #64b fold - pclmulqdq $0x1, %xmm10, %xmm7 - pslldq $8 , %xmm0 - pxor %xmm0, %xmm7 + # Load 'x^48 * (x^48 mod G(x))' and 'x^48 * (x^80 mod G(x))'. + movdqa .Lfinal_fold_consts(%rip), FOLD_CONSTS - #32b fold + # Fold the high 64 bits into the low 64 bits, while also multiplying by + # x^64. This produces a 128-bit value congruent to x^64 * M(x) and + # whose low 48 bits are 0. movdqa %xmm7, %xmm0 + pclmulqdq $0x11, FOLD_CONSTS, %xmm7 # high bits * x^48 * (x^80 mod G(x)) + pslldq $8, %xmm0 + pxor %xmm0, %xmm7 # + low bits * x^64 - pand mask2(%rip), %xmm0 - - psrldq $12, %xmm7 - pclmulqdq $0x10, %xmm10, %xmm7 - pxor %xmm0, %xmm7 - - #barrett reduction -_barrett: - movdqa rk7(%rip), %xmm10 # rk7 and rk8 in xmm10 + # Fold the high 32 bits into the low 96 bits. This produces a 96-bit + # value congruent to x^64 * M(x) and whose low 48 bits are 0. movdqa %xmm7, %xmm0 - pclmulqdq $0x01, %xmm10, %xmm7 - pslldq $4, %xmm7 - pclmulqdq $0x11, %xmm10, %xmm7 + pand .Lmask2(%rip), %xmm0 # zero high 32 bits + psrldq $12, %xmm7 # extract high 32 bits + pclmulqdq $0x00, FOLD_CONSTS, %xmm7 # high 32 bits * x^48 * (x^48 mod G(x)) + pxor %xmm0, %xmm7 # + low bits - pslldq $4, %xmm7 - pxor %xmm0, %xmm7 - pextrd $1, %xmm7, %eax + # Load G(x) and floor(x^48 / G(x)). + movdqa .Lbarrett_reduction_consts(%rip), FOLD_CONSTS -_cleanup: - # scale the result back to 16 bits - shr $16, %eax - mov %rcx, %rsp + # Use Barrett reduction to compute the final CRC value. + movdqa %xmm7, %xmm0 + pclmulqdq $0x11, FOLD_CONSTS, %xmm7 # high 32 bits * floor(x^48 / G(x)) + psrlq $32, %xmm7 # /= x^32 + pclmulqdq $0x00, FOLD_CONSTS, %xmm7 # *= G(x) + psrlq $48, %xmm0 + pxor %xmm7, %xmm0 # + low 16 nonzero bits + # Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of xmm0. + + pextrw $0, %xmm0, %eax ret -######################################################################## - .align 16 -_less_than_128: - - # check if there is enough buffer to be able to fold 16B at a time - cmp $32, arg3 - jl _less_than_32 - movdqa SHUF_MASK(%rip), %xmm11 +.Lless_than_256_bytes: + # Checksumming a buffer of length 16...255 bytes - # now if there is, load the constants - movdqa rk1(%rip), %xmm10 # rk1 and rk2 in xmm10 + # Load the first 16 data bytes. + movdqu (buf), %xmm7 + pshufb BSWAP_MASK, %xmm7 + add $16, buf - movd arg1_low32, %xmm0 # get the initial crc value - pslldq $12, %xmm0 # align it to its correct place - movdqu (arg2), %xmm7 # load the plaintext - pshufb %xmm11, %xmm7 # byte-reflect the plaintext + # XOR the first 16 data *bits* with the initial CRC value. + pxor %xmm0, %xmm0 + pinsrw $7, init_crc, %xmm0 pxor %xmm0, %xmm7 - - # update the buffer pointer - add $16, arg2 - - # update the counter. subtract 32 instead of 16 to save one - # instruction from the loop - sub $32, arg3 - - jmp _16B_reduction_loop - - -.align 16 -_less_than_32: - # mov initial crc to the return value. this is necessary for - # zero-length buffers. - mov arg1_low32, %eax - test arg3, arg3 - je _cleanup - - movdqa SHUF_MASK(%rip), %xmm11 - - movd arg1_low32, %xmm0 # get the initial crc value - pslldq $12, %xmm0 # align it to its correct place - - cmp $16, arg3 - je _exact_16_left - jl _less_than_16_left - - movdqu (arg2), %xmm7 # load the plaintext - pshufb %xmm11, %xmm7 # byte-reflect the plaintext - pxor %xmm0 , %xmm7 # xor the initial crc value - add $16, arg2 - sub $16, arg3 - movdqa rk1(%rip), %xmm10 # rk1 and rk2 in xmm10 - jmp _get_last_two_xmms - - -.align 16 -_less_than_16_left: - # use stack space to load data less than 16 bytes, zero-out - # the 16B in memory first. - - pxor %xmm1, %xmm1 - mov %rsp, %r11 - movdqa %xmm1, (%r11) - - cmp $4, arg3 - jl _only_less_than_4 - - # backup the counter value - mov arg3, %r9 - cmp $8, arg3 - jl _less_than_8_left - - # load 8 Bytes - mov (arg2), %rax - mov %rax, (%r11) - add $8, %r11 - sub $8, arg3 - add $8, arg2 -_less_than_8_left: - - cmp $4, arg3 - jl _less_than_4_left - - # load 4 Bytes - mov (arg2), %eax - mov %eax, (%r11) - add $4, %r11 - sub $4, arg3 - add $4, arg2 -_less_than_4_left: - - cmp $2, arg3 - jl _less_than_2_left - - # load 2 Bytes - mov (arg2), %ax - mov %ax, (%r11) - add $2, %r11 - sub $2, arg3 - add $2, arg2 -_less_than_2_left: - cmp $1, arg3 - jl _zero_left - - # load 1 Byte - mov (arg2), %al - mov %al, (%r11) -_zero_left: - movdqa (%rsp), %xmm7 - pshufb %xmm11, %xmm7 - pxor %xmm0 , %xmm7 # xor the initial crc value - - # shl r9, 4 - lea pshufb_shf_table+16(%rip), %rax - sub %r9, %rax - movdqu (%rax), %xmm0 - pxor mask1(%rip), %xmm0 - - pshufb %xmm0, %xmm7 - jmp _128_done - -.align 16 -_exact_16_left: - movdqu (arg2), %xmm7 - pshufb %xmm11, %xmm7 - pxor %xmm0 , %xmm7 # xor the initial crc value - - jmp _128_done - -_only_less_than_4: - cmp $3, arg3 - jl _only_less_than_3 - - # load 3 Bytes - mov (arg2), %al - mov %al, (%r11) - - mov 1(arg2), %al - mov %al, 1(%r11) - - mov 2(arg2), %al - mov %al, 2(%r11) - - movdqa (%rsp), %xmm7 - pshufb %xmm11, %xmm7 - pxor %xmm0 , %xmm7 # xor the initial crc value - - psrldq $5, %xmm7 - - jmp _barrett -_only_less_than_3: - cmp $2, arg3 - jl _only_less_than_2 - - # load 2 Bytes - mov (arg2), %al - mov %al, (%r11) - - mov 1(arg2), %al - mov %al, 1(%r11) - - movdqa (%rsp), %xmm7 - pshufb %xmm11, %xmm7 - pxor %xmm0 , %xmm7 # xor the initial crc value - - psrldq $6, %xmm7 - - jmp _barrett -_only_less_than_2: - - # load 1 Byte - mov (arg2), %al - mov %al, (%r11) - - movdqa (%rsp), %xmm7 - pshufb %xmm11, %xmm7 - pxor %xmm0 , %xmm7 # xor the initial crc value - - psrldq $7, %xmm7 - - jmp _barrett - + movdqa .Lfold_across_16_bytes_consts(%rip), FOLD_CONSTS + cmp $16, len + je .Lreduce_final_16_bytes # len == 16 + sub $32, len + jge .Lfold_16_bytes_loop # 32 <= len <= 255 + add $16, len + jmp .Lhandle_partial_segment # 17 <= len <= 31 ENDPROC(crc_t10dif_pcl) .section .rodata, "a", @progbits .align 16 -# precomputed constants -# these constants are precomputed from the poly: -# 0x8bb70000 (0x8bb7 scaled to 32 bits) -# Q = 0x18BB70000 -# rk1 = 2^(32*3) mod Q << 32 -# rk2 = 2^(32*5) mod Q << 32 -# rk3 = 2^(32*15) mod Q << 32 -# rk4 = 2^(32*17) mod Q << 32 -# rk5 = 2^(32*3) mod Q << 32 -# rk6 = 2^(32*2) mod Q << 32 -# rk7 = floor(2^64/Q) -# rk8 = Q -rk1: -.quad 0x2d56000000000000 -rk2: -.quad 0x06df000000000000 -rk3: -.quad 0x9d9d000000000000 -rk4: -.quad 0x7cf5000000000000 -rk5: -.quad 0x2d56000000000000 -rk6: -.quad 0x1368000000000000 -rk7: -.quad 0x00000001f65a57f8 -rk8: -.quad 0x000000018bb70000 - -rk9: -.quad 0xceae000000000000 -rk10: -.quad 0xbfd6000000000000 -rk11: -.quad 0x1e16000000000000 -rk12: -.quad 0x713c000000000000 -rk13: -.quad 0xf7f9000000000000 -rk14: -.quad 0x80a6000000000000 -rk15: -.quad 0x044c000000000000 -rk16: -.quad 0xe658000000000000 -rk17: -.quad 0xad18000000000000 -rk18: -.quad 0xa497000000000000 -rk19: -.quad 0x6ee3000000000000 -rk20: -.quad 0xe7b5000000000000 - +# Fold constants precomputed from the polynomial 0x18bb7 +# G(x) = x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + x^0 +.Lfold_across_128_bytes_consts: + .quad 0x0000000000006123 # x^(8*128) mod G(x) + .quad 0x0000000000002295 # x^(8*128+64) mod G(x) +.Lfold_across_64_bytes_consts: + .quad 0x0000000000001069 # x^(4*128) mod G(x) + .quad 0x000000000000dd31 # x^(4*128+64) mod G(x) +.Lfold_across_32_bytes_consts: + .quad 0x000000000000857d # x^(2*128) mod G(x) + .quad 0x0000000000007acc # x^(2*128+64) mod G(x) +.Lfold_across_16_bytes_consts: + .quad 0x000000000000a010 # x^(1*128) mod G(x) + .quad 0x0000000000001faa # x^(1*128+64) mod G(x) +.Lfinal_fold_consts: + .quad 0x1368000000000000 # x^48 * (x^48 mod G(x)) + .quad 0x2d56000000000000 # x^48 * (x^80 mod G(x)) +.Lbarrett_reduction_consts: + .quad 0x0000000000018bb7 # G(x) + .quad 0x00000001f65a57f8 # floor(x^48 / G(x)) .section .rodata.cst16.mask1, "aM", @progbits, 16 .align 16 -mask1: -.octa 0x80808080808080808080808080808080 +.Lmask1: + .octa 0x80808080808080808080808080808080 .section .rodata.cst16.mask2, "aM", @progbits, 16 .align 16 -mask2: -.octa 0x00000000FFFFFFFFFFFFFFFFFFFFFFFF +.Lmask2: + .octa 0x00000000FFFFFFFFFFFFFFFFFFFFFFFF + +.section .rodata.cst16.bswap_mask, "aM", @progbits, 16 +.align 16 +.Lbswap_mask: + .octa 0x000102030405060708090A0B0C0D0E0F -.section .rodata.cst16.SHUF_MASK, "aM", @progbits, 16 +.section .rodata.cst32.byteshift_table, "aM", @progbits, 32 .align 16 -SHUF_MASK: -.octa 0x000102030405060708090A0B0C0D0E0F - -.section .rodata.cst32.pshufb_shf_table, "aM", @progbits, 32 -.align 32 -pshufb_shf_table: -# use these values for shift constants for the pshufb instruction -# different alignments result in values as shown: -# DDQ 0x008f8e8d8c8b8a898887868584838281 # shl 15 (16-1) / shr1 -# DDQ 0x01008f8e8d8c8b8a8988878685848382 # shl 14 (16-3) / shr2 -# DDQ 0x0201008f8e8d8c8b8a89888786858483 # shl 13 (16-4) / shr3 -# DDQ 0x030201008f8e8d8c8b8a898887868584 # shl 12 (16-4) / shr4 -# DDQ 0x04030201008f8e8d8c8b8a8988878685 # shl 11 (16-5) / shr5 -# DDQ 0x0504030201008f8e8d8c8b8a89888786 # shl 10 (16-6) / shr6 -# DDQ 0x060504030201008f8e8d8c8b8a898887 # shl 9 (16-7) / shr7 -# DDQ 0x07060504030201008f8e8d8c8b8a8988 # shl 8 (16-8) / shr8 -# DDQ 0x0807060504030201008f8e8d8c8b8a89 # shl 7 (16-9) / shr9 -# DDQ 0x090807060504030201008f8e8d8c8b8a # shl 6 (16-10) / shr10 -# DDQ 0x0a090807060504030201008f8e8d8c8b # shl 5 (16-11) / shr11 -# DDQ 0x0b0a090807060504030201008f8e8d8c # shl 4 (16-12) / shr12 -# DDQ 0x0c0b0a090807060504030201008f8e8d # shl 3 (16-13) / shr13 -# DDQ 0x0d0c0b0a090807060504030201008f8e # shl 2 (16-14) / shr14 -# DDQ 0x0e0d0c0b0a090807060504030201008f # shl 1 (16-15) / shr15 -.octa 0x8f8e8d8c8b8a89888786858483828100 -.octa 0x000e0d0c0b0a09080706050403020100 +# For 1 <= len <= 15, the 16-byte vector beginning at &byteshift_table[16 - len] +# is the index vector to shift left by 'len' bytes, and is also {0x80, ..., +# 0x80} XOR the index vector to shift right by '16 - len' bytes. +.Lbyteshift_table: + .byte 0x0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87 + .byte 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f + .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7 + .byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe , 0x0 diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c index cd4df9322501..0e785c0b2354 100644 --- a/arch/x86/crypto/crct10dif-pclmul_glue.c +++ b/arch/x86/crypto/crct10dif-pclmul_glue.c @@ -33,18 +33,12 @@ #include #include -asmlinkage __u16 crc_t10dif_pcl(__u16 crc, const unsigned char *buf, - size_t len); +asmlinkage u16 crc_t10dif_pcl(u16 init_crc, const u8 *buf, size_t len); struct chksum_desc_ctx { __u16 crc; }; -/* - * Steps through buffer one byte at at time, calculates reflected - * crc using table. - */ - static int chksum_init(struct shash_desc *desc) { struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); @@ -59,7 +53,7 @@ static int chksum_update(struct shash_desc *desc, const u8 *data, { struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); - if (irq_fpu_usable()) { + if (length >= 16 && irq_fpu_usable()) { kernel_fpu_begin(); ctx->crc = crc_t10dif_pcl(ctx->crc, data, length); kernel_fpu_end(); @@ -79,7 +73,7 @@ static int chksum_final(struct shash_desc *desc, u8 *out) static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len, u8 *out) { - if (irq_fpu_usable()) { + if (len >= 16 && irq_fpu_usable()) { kernel_fpu_begin(); *(__u16 *)out = crc_t10dif_pcl(*crcp, data, len); kernel_fpu_end(); diff --git a/arch/x86/crypto/morus1280_glue.c b/arch/x86/crypto/morus1280_glue.c index 0dccdda1eb3a..7e600f8bcdad 100644 --- a/arch/x86/crypto/morus1280_glue.c +++ b/arch/x86/crypto/morus1280_glue.c @@ -85,31 +85,20 @@ static void crypto_morus1280_glue_process_ad( static void crypto_morus1280_glue_process_crypt(struct morus1280_state *state, struct morus1280_ops ops, - struct aead_request *req) + struct skcipher_walk *walk) { - struct skcipher_walk walk; - u8 *cursor_src, *cursor_dst; - unsigned int chunksize, base; - - ops.skcipher_walk_init(&walk, req, false); - - while (walk.nbytes) { - cursor_src = walk.src.virt.addr; - cursor_dst = walk.dst.virt.addr; - chunksize = walk.nbytes; - - ops.crypt_blocks(state, cursor_src, cursor_dst, chunksize); - - base = chunksize & ~(MORUS1280_BLOCK_SIZE - 1); - cursor_src += base; - cursor_dst += base; - chunksize &= MORUS1280_BLOCK_SIZE - 1; - - if (chunksize > 0) - ops.crypt_tail(state, cursor_src, cursor_dst, - chunksize); + while (walk->nbytes >= MORUS1280_BLOCK_SIZE) { + ops.crypt_blocks(state, walk->src.virt.addr, + walk->dst.virt.addr, + round_down(walk->nbytes, + MORUS1280_BLOCK_SIZE)); + skcipher_walk_done(walk, walk->nbytes % MORUS1280_BLOCK_SIZE); + } - skcipher_walk_done(&walk, 0); + if (walk->nbytes) { + ops.crypt_tail(state, walk->src.virt.addr, walk->dst.virt.addr, + walk->nbytes); + skcipher_walk_done(walk, 0); } } @@ -147,12 +136,15 @@ static void crypto_morus1280_glue_crypt(struct aead_request *req, struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct morus1280_ctx *ctx = crypto_aead_ctx(tfm); struct morus1280_state state; + struct skcipher_walk walk; + + ops.skcipher_walk_init(&walk, req, true); kernel_fpu_begin(); ctx->ops->init(&state, &ctx->key, req->iv); crypto_morus1280_glue_process_ad(&state, ctx->ops, req->src, req->assoclen); - crypto_morus1280_glue_process_crypt(&state, ops, req); + crypto_morus1280_glue_process_crypt(&state, ops, &walk); ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen); kernel_fpu_end(); diff --git a/arch/x86/crypto/morus640_glue.c b/arch/x86/crypto/morus640_glue.c index 7b58fe4d9bd1..cb3a81732016 100644 --- a/arch/x86/crypto/morus640_glue.c +++ b/arch/x86/crypto/morus640_glue.c @@ -85,31 +85,19 @@ static void crypto_morus640_glue_process_ad( static void crypto_morus640_glue_process_crypt(struct morus640_state *state, struct morus640_ops ops, - struct aead_request *req) + struct skcipher_walk *walk) { - struct skcipher_walk walk; - u8 *cursor_src, *cursor_dst; - unsigned int chunksize, base; - - ops.skcipher_walk_init(&walk, req, false); - - while (walk.nbytes) { - cursor_src = walk.src.virt.addr; - cursor_dst = walk.dst.virt.addr; - chunksize = walk.nbytes; - - ops.crypt_blocks(state, cursor_src, cursor_dst, chunksize); - - base = chunksize & ~(MORUS640_BLOCK_SIZE - 1); - cursor_src += base; - cursor_dst += base; - chunksize &= MORUS640_BLOCK_SIZE - 1; - - if (chunksize > 0) - ops.crypt_tail(state, cursor_src, cursor_dst, - chunksize); + while (walk->nbytes >= MORUS640_BLOCK_SIZE) { + ops.crypt_blocks(state, walk->src.virt.addr, + walk->dst.virt.addr, + round_down(walk->nbytes, MORUS640_BLOCK_SIZE)); + skcipher_walk_done(walk, walk->nbytes % MORUS640_BLOCK_SIZE); + } - skcipher_walk_done(&walk, 0); + if (walk->nbytes) { + ops.crypt_tail(state, walk->src.virt.addr, walk->dst.virt.addr, + walk->nbytes); + skcipher_walk_done(walk, 0); } } @@ -143,12 +131,15 @@ static void crypto_morus640_glue_crypt(struct aead_request *req, struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct morus640_ctx *ctx = crypto_aead_ctx(tfm); struct morus640_state state; + struct skcipher_walk walk; + + ops.skcipher_walk_init(&walk, req, true); kernel_fpu_begin(); ctx->ops->init(&state, &ctx->key, req->iv); crypto_morus640_glue_process_ad(&state, ctx->ops, req->src, req->assoclen); - crypto_morus640_glue_process_crypt(&state, ops, req); + crypto_morus640_glue_process_crypt(&state, ops, &walk); ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen); kernel_fpu_end(); diff --git a/arch/x86/crypto/poly1305-sse2-x86_64.S b/arch/x86/crypto/poly1305-sse2-x86_64.S index c88c670cb5fc..e6add74d78a5 100644 --- a/arch/x86/crypto/poly1305-sse2-x86_64.S +++ b/arch/x86/crypto/poly1305-sse2-x86_64.S @@ -272,6 +272,10 @@ ENTRY(poly1305_block_sse2) dec %rcx jnz .Ldoblock + # Zeroing of key material + mov %rcx,0x00(%rsp) + mov %rcx,0x08(%rsp) + add $0x10,%rsp pop %r12 pop %rbx diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl index 3cf7b533b3d1..8da78595d69d 100644 --- a/arch/x86/entry/syscalls/syscall_32.tbl +++ b/arch/x86/entry/syscalls/syscall_32.tbl @@ -24,7 +24,7 @@ 10 i386 unlink sys_unlink __ia32_sys_unlink 11 i386 execve sys_execve __ia32_compat_sys_execve 12 i386 chdir sys_chdir __ia32_sys_chdir -13 i386 time sys_time __ia32_compat_sys_time +13 i386 time sys_time32 __ia32_sys_time32 14 i386 mknod sys_mknod __ia32_sys_mknod 15 i386 chmod sys_chmod __ia32_sys_chmod 16 i386 lchown sys_lchown16 __ia32_sys_lchown16 @@ -36,12 +36,12 @@ 22 i386 umount sys_oldumount __ia32_sys_oldumount 23 i386 setuid sys_setuid16 __ia32_sys_setuid16 24 i386 getuid sys_getuid16 __ia32_sys_getuid16 -25 i386 stime sys_stime __ia32_compat_sys_stime +25 i386 stime sys_stime32 __ia32_sys_stime32 26 i386 ptrace sys_ptrace __ia32_compat_sys_ptrace 27 i386 alarm sys_alarm __ia32_sys_alarm 28 i386 oldfstat sys_fstat __ia32_sys_fstat 29 i386 pause sys_pause __ia32_sys_pause -30 i386 utime sys_utime __ia32_compat_sys_utime +30 i386 utime sys_utime32 __ia32_sys_utime32 31 i386 stty 32 i386 gtty 33 i386 access sys_access __ia32_sys_access @@ -135,7 +135,7 @@ 121 i386 setdomainname sys_setdomainname __ia32_sys_setdomainname 122 i386 uname sys_newuname __ia32_sys_newuname 123 i386 modify_ldt sys_modify_ldt __ia32_sys_modify_ldt -124 i386 adjtimex sys_adjtimex __ia32_compat_sys_adjtimex +124 i386 adjtimex sys_adjtimex_time32 __ia32_sys_adjtimex_time32 125 i386 mprotect sys_mprotect __ia32_sys_mprotect 126 i386 sigprocmask sys_sigprocmask __ia32_compat_sys_sigprocmask 127 i386 create_module @@ -172,8 +172,8 @@ 158 i386 sched_yield sys_sched_yield __ia32_sys_sched_yield 159 i386 sched_get_priority_max sys_sched_get_priority_max __ia32_sys_sched_get_priority_max 160 i386 sched_get_priority_min sys_sched_get_priority_min __ia32_sys_sched_get_priority_min -161 i386 sched_rr_get_interval sys_sched_rr_get_interval __ia32_compat_sys_sched_rr_get_interval -162 i386 nanosleep sys_nanosleep __ia32_compat_sys_nanosleep +161 i386 sched_rr_get_interval sys_sched_rr_get_interval_time32 __ia32_sys_sched_rr_get_interval_time32 +162 i386 nanosleep sys_nanosleep_time32 __ia32_sys_nanosleep_time32 163 i386 mremap sys_mremap __ia32_sys_mremap 164 i386 setresuid sys_setresuid16 __ia32_sys_setresuid16 165 i386 getresuid sys_getresuid16 __ia32_sys_getresuid16 @@ -188,7 +188,7 @@ 174 i386 rt_sigaction sys_rt_sigaction __ia32_compat_sys_rt_sigaction 175 i386 rt_sigprocmask sys_rt_sigprocmask __ia32_sys_rt_sigprocmask 176 i386 rt_sigpending sys_rt_sigpending __ia32_compat_sys_rt_sigpending -177 i386 rt_sigtimedwait sys_rt_sigtimedwait __ia32_compat_sys_rt_sigtimedwait +177 i386 rt_sigtimedwait sys_rt_sigtimedwait_time32 __ia32_compat_sys_rt_sigtimedwait_time32 178 i386 rt_sigqueueinfo sys_rt_sigqueueinfo __ia32_compat_sys_rt_sigqueueinfo 179 i386 rt_sigsuspend sys_rt_sigsuspend __ia32_sys_rt_sigsuspend 180 i386 pread64 sys_pread64 __ia32_compat_sys_x86_pread @@ -251,14 +251,14 @@ 237 i386 fremovexattr sys_fremovexattr __ia32_sys_fremovexattr 238 i386 tkill sys_tkill __ia32_sys_tkill 239 i386 sendfile64 sys_sendfile64 __ia32_sys_sendfile64 -240 i386 futex sys_futex __ia32_compat_sys_futex +240 i386 futex sys_futex_time32 __ia32_sys_futex_time32 241 i386 sched_setaffinity sys_sched_setaffinity __ia32_compat_sys_sched_setaffinity 242 i386 sched_getaffinity sys_sched_getaffinity __ia32_compat_sys_sched_getaffinity 243 i386 set_thread_area sys_set_thread_area __ia32_sys_set_thread_area 244 i386 get_thread_area sys_get_thread_area __ia32_sys_get_thread_area 245 i386 io_setup sys_io_setup __ia32_compat_sys_io_setup 246 i386 io_destroy sys_io_destroy __ia32_sys_io_destroy -247 i386 io_getevents sys_io_getevents __ia32_compat_sys_io_getevents +247 i386 io_getevents sys_io_getevents_time32 __ia32_sys_io_getevents_time32 248 i386 io_submit sys_io_submit __ia32_compat_sys_io_submit 249 i386 io_cancel sys_io_cancel __ia32_sys_io_cancel 250 i386 fadvise64 sys_fadvise64 __ia32_compat_sys_x86_fadvise64 @@ -271,18 +271,18 @@ 257 i386 remap_file_pages sys_remap_file_pages __ia32_sys_remap_file_pages 258 i386 set_tid_address sys_set_tid_address __ia32_sys_set_tid_address 259 i386 timer_create sys_timer_create __ia32_compat_sys_timer_create -260 i386 timer_settime sys_timer_settime __ia32_compat_sys_timer_settime -261 i386 timer_gettime sys_timer_gettime __ia32_compat_sys_timer_gettime +260 i386 timer_settime sys_timer_settime32 __ia32_sys_timer_settime32 +261 i386 timer_gettime sys_timer_gettime32 __ia32_sys_timer_gettime32 262 i386 timer_getoverrun sys_timer_getoverrun __ia32_sys_timer_getoverrun 263 i386 timer_delete sys_timer_delete __ia32_sys_timer_delete -264 i386 clock_settime sys_clock_settime __ia32_compat_sys_clock_settime -265 i386 clock_gettime sys_clock_gettime __ia32_compat_sys_clock_gettime -266 i386 clock_getres sys_clock_getres __ia32_compat_sys_clock_getres -267 i386 clock_nanosleep sys_clock_nanosleep __ia32_compat_sys_clock_nanosleep +264 i386 clock_settime sys_clock_settime32 __ia32_sys_clock_settime32 +265 i386 clock_gettime sys_clock_gettime32 __ia32_sys_clock_gettime32 +266 i386 clock_getres sys_clock_getres_time32 __ia32_sys_clock_getres_time32 +267 i386 clock_nanosleep sys_clock_nanosleep_time32 __ia32_sys_clock_nanosleep_time32 268 i386 statfs64 sys_statfs64 __ia32_compat_sys_statfs64 269 i386 fstatfs64 sys_fstatfs64 __ia32_compat_sys_fstatfs64 270 i386 tgkill sys_tgkill __ia32_sys_tgkill -271 i386 utimes sys_utimes __ia32_compat_sys_utimes +271 i386 utimes sys_utimes_time32 __ia32_sys_utimes_time32 272 i386 fadvise64_64 sys_fadvise64_64 __ia32_compat_sys_x86_fadvise64_64 273 i386 vserver 274 i386 mbind sys_mbind __ia32_sys_mbind @@ -290,8 +290,8 @@ 276 i386 set_mempolicy sys_set_mempolicy __ia32_sys_set_mempolicy 277 i386 mq_open sys_mq_open __ia32_compat_sys_mq_open 278 i386 mq_unlink sys_mq_unlink __ia32_sys_mq_unlink -279 i386 mq_timedsend sys_mq_timedsend __ia32_compat_sys_mq_timedsend -280 i386 mq_timedreceive sys_mq_timedreceive __ia32_compat_sys_mq_timedreceive +279 i386 mq_timedsend sys_mq_timedsend_time32 __ia32_sys_mq_timedsend_time32 +280 i386 mq_timedreceive sys_mq_timedreceive_time32 __ia32_sys_mq_timedreceive_time32 281 i386 mq_notify sys_mq_notify __ia32_compat_sys_mq_notify 282 i386 mq_getsetattr sys_mq_getsetattr __ia32_compat_sys_mq_getsetattr 283 i386 kexec_load sys_kexec_load __ia32_compat_sys_kexec_load @@ -310,7 +310,7 @@ 296 i386 mkdirat sys_mkdirat __ia32_sys_mkdirat 297 i386 mknodat sys_mknodat __ia32_sys_mknodat 298 i386 fchownat sys_fchownat __ia32_sys_fchownat -299 i386 futimesat sys_futimesat __ia32_compat_sys_futimesat +299 i386 futimesat sys_futimesat_time32 __ia32_sys_futimesat_time32 300 i386 fstatat64 sys_fstatat64 __ia32_compat_sys_x86_fstatat 301 i386 unlinkat sys_unlinkat __ia32_sys_unlinkat 302 i386 renameat sys_renameat __ia32_sys_renameat @@ -319,8 +319,8 @@ 305 i386 readlinkat sys_readlinkat __ia32_sys_readlinkat 306 i386 fchmodat sys_fchmodat __ia32_sys_fchmodat 307 i386 faccessat sys_faccessat __ia32_sys_faccessat -308 i386 pselect6 sys_pselect6 __ia32_compat_sys_pselect6 -309 i386 ppoll sys_ppoll __ia32_compat_sys_ppoll +308 i386 pselect6 sys_pselect6_time32 __ia32_compat_sys_pselect6_time32 +309 i386 ppoll sys_ppoll_time32 __ia32_compat_sys_ppoll_time32 310 i386 unshare sys_unshare __ia32_sys_unshare 311 i386 set_robust_list sys_set_robust_list __ia32_compat_sys_set_robust_list 312 i386 get_robust_list sys_get_robust_list __ia32_compat_sys_get_robust_list @@ -331,13 +331,13 @@ 317 i386 move_pages sys_move_pages __ia32_compat_sys_move_pages 318 i386 getcpu sys_getcpu __ia32_sys_getcpu 319 i386 epoll_pwait sys_epoll_pwait __ia32_sys_epoll_pwait -320 i386 utimensat sys_utimensat __ia32_compat_sys_utimensat +320 i386 utimensat sys_utimensat_time32 __ia32_sys_utimensat_time32 321 i386 signalfd sys_signalfd __ia32_compat_sys_signalfd 322 i386 timerfd_create sys_timerfd_create __ia32_sys_timerfd_create 323 i386 eventfd sys_eventfd __ia32_sys_eventfd 324 i386 fallocate sys_fallocate __ia32_compat_sys_x86_fallocate -325 i386 timerfd_settime sys_timerfd_settime __ia32_compat_sys_timerfd_settime -326 i386 timerfd_gettime sys_timerfd_gettime __ia32_compat_sys_timerfd_gettime +325 i386 timerfd_settime sys_timerfd_settime32 __ia32_sys_timerfd_settime32 +326 i386 timerfd_gettime sys_timerfd_gettime32 __ia32_sys_timerfd_gettime32 327 i386 signalfd4 sys_signalfd4 __ia32_compat_sys_signalfd4 328 i386 eventfd2 sys_eventfd2 __ia32_sys_eventfd2 329 i386 epoll_create1 sys_epoll_create1 __ia32_sys_epoll_create1 @@ -348,13 +348,13 @@ 334 i386 pwritev sys_pwritev __ia32_compat_sys_pwritev 335 i386 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo __ia32_compat_sys_rt_tgsigqueueinfo 336 i386 perf_event_open sys_perf_event_open __ia32_sys_perf_event_open -337 i386 recvmmsg sys_recvmmsg __ia32_compat_sys_recvmmsg +337 i386 recvmmsg sys_recvmmsg_time32 __ia32_compat_sys_recvmmsg_time32 338 i386 fanotify_init sys_fanotify_init __ia32_sys_fanotify_init 339 i386 fanotify_mark sys_fanotify_mark __ia32_compat_sys_fanotify_mark 340 i386 prlimit64 sys_prlimit64 __ia32_sys_prlimit64 341 i386 name_to_handle_at sys_name_to_handle_at __ia32_sys_name_to_handle_at 342 i386 open_by_handle_at sys_open_by_handle_at __ia32_compat_sys_open_by_handle_at -343 i386 clock_adjtime sys_clock_adjtime __ia32_compat_sys_clock_adjtime +343 i386 clock_adjtime sys_clock_adjtime32 __ia32_sys_clock_adjtime32 344 i386 syncfs sys_syncfs __ia32_sys_syncfs 345 i386 sendmmsg sys_sendmmsg __ia32_compat_sys_sendmmsg 346 i386 setns sys_setns __ia32_sys_setns @@ -396,5 +396,39 @@ 382 i386 pkey_free sys_pkey_free __ia32_sys_pkey_free 383 i386 statx sys_statx __ia32_sys_statx 384 i386 arch_prctl sys_arch_prctl __ia32_compat_sys_arch_prctl -385 i386 io_pgetevents sys_io_pgetevents __ia32_compat_sys_io_pgetevents +385 i386 io_pgetevents sys_io_pgetevents_time32 __ia32_compat_sys_io_pgetevents 386 i386 rseq sys_rseq __ia32_sys_rseq +# don't use numbers 387 through 392, add new calls at the end +393 i386 semget sys_semget __ia32_sys_semget +394 i386 semctl sys_semctl __ia32_compat_sys_semctl +395 i386 shmget sys_shmget __ia32_sys_shmget +396 i386 shmctl sys_shmctl __ia32_compat_sys_shmctl +397 i386 shmat sys_shmat __ia32_compat_sys_shmat +398 i386 shmdt sys_shmdt __ia32_sys_shmdt +399 i386 msgget sys_msgget __ia32_sys_msgget +400 i386 msgsnd sys_msgsnd __ia32_compat_sys_msgsnd +401 i386 msgrcv sys_msgrcv __ia32_compat_sys_msgrcv +402 i386 msgctl sys_msgctl __ia32_compat_sys_msgctl +403 i386 clock_gettime64 sys_clock_gettime __ia32_sys_clock_gettime +404 i386 clock_settime64 sys_clock_settime __ia32_sys_clock_settime +405 i386 clock_adjtime64 sys_clock_adjtime __ia32_sys_clock_adjtime +406 i386 clock_getres_time64 sys_clock_getres __ia32_sys_clock_getres +407 i386 clock_nanosleep_time64 sys_clock_nanosleep __ia32_sys_clock_nanosleep +408 i386 timer_gettime64 sys_timer_gettime __ia32_sys_timer_gettime +409 i386 timer_settime64 sys_timer_settime __ia32_sys_timer_settime +410 i386 timerfd_gettime64 sys_timerfd_gettime __ia32_sys_timerfd_gettime +411 i386 timerfd_settime64 sys_timerfd_settime __ia32_sys_timerfd_settime +412 i386 utimensat_time64 sys_utimensat __ia32_sys_utimensat +413 i386 pselect6_time64 sys_pselect6 __ia32_compat_sys_pselect6_time64 +414 i386 ppoll_time64 sys_ppoll __ia32_compat_sys_ppoll_time64 +416 i386 io_pgetevents_time64 sys_io_pgetevents __ia32_sys_io_pgetevents +417 i386 recvmmsg_time64 sys_recvmmsg __ia32_compat_sys_recvmmsg_time64 +418 i386 mq_timedsend_time64 sys_mq_timedsend __ia32_sys_mq_timedsend +419 i386 mq_timedreceive_time64 sys_mq_timedreceive __ia32_sys_mq_timedreceive +420 i386 semtimedop_time64 sys_semtimedop __ia32_sys_semtimedop +421 i386 rt_sigtimedwait_time64 sys_rt_sigtimedwait __ia32_compat_sys_rt_sigtimedwait_time64 +422 i386 futex_time64 sys_futex __ia32_sys_futex +423 i386 sched_rr_get_interval_time64 sys_sched_rr_get_interval __ia32_sys_sched_rr_get_interval +425 i386 io_uring_setup sys_io_uring_setup __ia32_sys_io_uring_setup +426 i386 io_uring_enter sys_io_uring_enter __ia32_sys_io_uring_enter +427 i386 io_uring_register sys_io_uring_register __ia32_sys_io_uring_register diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl index f0b1709a5ffb..c768447f97ec 100644 --- a/arch/x86/entry/syscalls/syscall_64.tbl +++ b/arch/x86/entry/syscalls/syscall_64.tbl @@ -343,6 +343,11 @@ 332 common statx __x64_sys_statx 333 common io_pgetevents __x64_sys_io_pgetevents 334 common rseq __x64_sys_rseq +# don't use numbers 387 through 423, add new calls after the last +# 'common' entry +425 common io_uring_setup __x64_sys_io_uring_setup +426 common io_uring_enter __x64_sys_io_uring_enter +427 common io_uring_register __x64_sys_io_uring_register # # x32-specific system call numbers start at 512 to avoid cache impact @@ -361,7 +366,7 @@ 520 x32 execve __x32_compat_sys_execve/ptregs 521 x32 ptrace __x32_compat_sys_ptrace 522 x32 rt_sigpending __x32_compat_sys_rt_sigpending -523 x32 rt_sigtimedwait __x32_compat_sys_rt_sigtimedwait +523 x32 rt_sigtimedwait __x32_compat_sys_rt_sigtimedwait_time64 524 x32 rt_sigqueueinfo __x32_compat_sys_rt_sigqueueinfo 525 x32 sigaltstack __x32_compat_sys_sigaltstack 526 x32 timer_create __x32_compat_sys_timer_create @@ -375,7 +380,7 @@ 534 x32 preadv __x32_compat_sys_preadv64 535 x32 pwritev __x32_compat_sys_pwritev64 536 x32 rt_tgsigqueueinfo __x32_compat_sys_rt_tgsigqueueinfo -537 x32 recvmmsg __x32_compat_sys_recvmmsg +537 x32 recvmmsg __x32_compat_sys_recvmmsg_time64 538 x32 sendmmsg __x32_compat_sys_sendmmsg 539 x32 process_vm_readv __x32_compat_sys_process_vm_readv 540 x32 process_vm_writev __x32_compat_sys_process_vm_writev diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c index d50bb4dc0650..62f317c9113a 100644 --- a/arch/x86/events/amd/ibs.c +++ b/arch/x86/events/amd/ibs.c @@ -253,15 +253,6 @@ static int perf_ibs_precise_event(struct perf_event *event, u64 *config) return -EOPNOTSUPP; } -static const struct perf_event_attr ibs_notsupp = { - .exclude_user = 1, - .exclude_kernel = 1, - .exclude_hv = 1, - .exclude_idle = 1, - .exclude_host = 1, - .exclude_guest = 1, -}; - static int perf_ibs_init(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; @@ -282,9 +273,6 @@ static int perf_ibs_init(struct perf_event *event) if (event->pmu != &perf_ibs->pmu) return -ENOENT; - if (perf_flags(&event->attr) & perf_flags(&ibs_notsupp)) - return -EINVAL; - if (config & ~perf_ibs->config_mask) return -EINVAL; @@ -537,6 +525,7 @@ static struct perf_ibs perf_ibs_fetch = { .start = perf_ibs_start, .stop = perf_ibs_stop, .read = perf_ibs_read, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }, .msr = MSR_AMD64_IBSFETCHCTL, .config_mask = IBS_FETCH_CONFIG_MASK, diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c index 3210fee27e7f..7635c23f7d82 100644 --- a/arch/x86/events/amd/iommu.c +++ b/arch/x86/events/amd/iommu.c @@ -223,11 +223,6 @@ static int perf_iommu_event_init(struct perf_event *event) if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) return -EINVAL; - /* IOMMU counters do not have usr/os/guest/host bits */ - if (event->attr.exclude_user || event->attr.exclude_kernel || - event->attr.exclude_host || event->attr.exclude_guest) - return -EINVAL; - if (event->cpu < 0) return -EINVAL; @@ -414,6 +409,7 @@ static const struct pmu iommu_pmu __initconst = { .read = perf_iommu_read, .task_ctx_nr = perf_invalid_context, .attr_groups = amd_iommu_attr_groups, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }; static __init int init_one_iommu(unsigned int idx) diff --git a/arch/x86/events/amd/power.c b/arch/x86/events/amd/power.c index 2aefacf5c5b2..c5ff084551c6 100644 --- a/arch/x86/events/amd/power.c +++ b/arch/x86/events/amd/power.c @@ -136,14 +136,7 @@ static int pmu_event_init(struct perf_event *event) return -ENOENT; /* Unsupported modes and filters. */ - if (event->attr.exclude_user || - event->attr.exclude_kernel || - event->attr.exclude_hv || - event->attr.exclude_idle || - event->attr.exclude_host || - event->attr.exclude_guest || - /* no sampling */ - event->attr.sample_period) + if (event->attr.sample_period) return -EINVAL; if (cfg != AMD_POWER_EVENTSEL_PKG) @@ -226,6 +219,7 @@ static struct pmu pmu_class = { .start = pmu_event_start, .stop = pmu_event_stop, .read = pmu_event_read, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }; static int power_cpu_exit(unsigned int cpu) diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c index 398df6eaa109..79cfd3b30ceb 100644 --- a/arch/x86/events/amd/uncore.c +++ b/arch/x86/events/amd/uncore.c @@ -201,11 +201,6 @@ static int amd_uncore_event_init(struct perf_event *event) if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) return -EINVAL; - /* NB and Last level cache counters do not have usr/os/guest/host bits */ - if (event->attr.exclude_user || event->attr.exclude_kernel || - event->attr.exclude_host || event->attr.exclude_guest) - return -EINVAL; - /* and we do not enable counter overflow interrupts */ hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB; hwc->idx = -1; @@ -307,6 +302,7 @@ static struct pmu amd_nb_pmu = { .start = amd_uncore_start, .stop = amd_uncore_stop, .read = amd_uncore_read, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }; static struct pmu amd_llc_pmu = { @@ -317,6 +313,7 @@ static struct pmu amd_llc_pmu = { .start = amd_uncore_start, .stop = amd_uncore_stop, .read = amd_uncore_read, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }; static struct amd_uncore *amd_uncore_alloc(unsigned int cpu) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 374a19712e20..e2b1447192a8 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -1995,7 +1995,7 @@ static int x86_pmu_commit_txn(struct pmu *pmu) */ static void free_fake_cpuc(struct cpu_hw_events *cpuc) { - kfree(cpuc->shared_regs); + intel_cpuc_finish(cpuc); kfree(cpuc); } @@ -2007,14 +2007,11 @@ static struct cpu_hw_events *allocate_fake_cpuc(void) cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL); if (!cpuc) return ERR_PTR(-ENOMEM); - - /* only needed, if we have extra_regs */ - if (x86_pmu.extra_regs) { - cpuc->shared_regs = allocate_shared_regs(cpu); - if (!cpuc->shared_regs) - goto error; - } cpuc->is_fake = 1; + + if (intel_cpuc_prepare(cpuc, cpu)) + goto error; + return cpuc; error: free_fake_cpuc(cpuc); @@ -2278,6 +2275,19 @@ void perf_check_microcode(void) x86_pmu.check_microcode(); } +static int x86_pmu_check_period(struct perf_event *event, u64 value) +{ + if (x86_pmu.check_period && x86_pmu.check_period(event, value)) + return -EINVAL; + + if (value && x86_pmu.limit_period) { + if (x86_pmu.limit_period(event, value) > value) + return -EINVAL; + } + + return 0; +} + static struct pmu pmu = { .pmu_enable = x86_pmu_enable, .pmu_disable = x86_pmu_disable, @@ -2302,6 +2312,7 @@ static struct pmu pmu = { .event_idx = x86_pmu_event_idx, .sched_task = x86_pmu_sched_task, .task_ctx_size = sizeof(struct x86_perf_task_context), + .check_period = x86_pmu_check_period, }; void arch_perf_update_userpage(struct perf_event *event, diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c index a01ef1b0f883..7cdd7b13bbda 100644 --- a/arch/x86/events/intel/bts.c +++ b/arch/x86/events/intel/bts.c @@ -77,10 +77,12 @@ static size_t buf_size(struct page *page) } static void * -bts_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool overwrite) +bts_buffer_setup_aux(struct perf_event *event, void **pages, + int nr_pages, bool overwrite) { struct bts_buffer *buf; struct page *page; + int cpu = event->cpu; int node = (cpu == -1) ? cpu : cpu_to_node(cpu); unsigned long offset; size_t size = nr_pages << PAGE_SHIFT; diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 40e12cfc87f6..35102ecdfc8d 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -18,6 +18,7 @@ #include #include #include +#include #include "../perf_event.h" @@ -1999,6 +2000,39 @@ static void intel_pmu_nhm_enable_all(int added) intel_pmu_enable_all(added); } +static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on) +{ + u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0; + + if (cpuc->tfa_shadow != val) { + cpuc->tfa_shadow = val; + wrmsrl(MSR_TSX_FORCE_ABORT, val); + } +} + +static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr) +{ + /* + * We're going to use PMC3, make sure TFA is set before we touch it. + */ + if (cntr == 3 && !cpuc->is_fake) + intel_set_tfa(cpuc, true); +} + +static void intel_tfa_pmu_enable_all(int added) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + + /* + * If we find PMC3 is no longer used when we enable the PMU, we can + * clear TFA. + */ + if (!test_bit(3, cpuc->active_mask)) + intel_set_tfa(cpuc, false); + + intel_pmu_enable_all(added); +} + static void enable_counter_freeze(void) { update_debugctlmsr(get_debugctlmsr() | @@ -2768,6 +2802,35 @@ intel_stop_scheduling(struct cpu_hw_events *cpuc) raw_spin_unlock(&excl_cntrs->lock); } +static struct event_constraint * +dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx) +{ + WARN_ON_ONCE(!cpuc->constraint_list); + + if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) { + struct event_constraint *cx; + + /* + * grab pre-allocated constraint entry + */ + cx = &cpuc->constraint_list[idx]; + + /* + * initialize dynamic constraint + * with static constraint + */ + *cx = *c; + + /* + * mark constraint as dynamic + */ + cx->flags |= PERF_X86_EVENT_DYNAMIC; + c = cx; + } + + return c; +} + static struct event_constraint * intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, int idx, struct event_constraint *c) @@ -2798,27 +2861,7 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, * only needed when constraint has not yet * been cloned (marked dynamic) */ - if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) { - struct event_constraint *cx; - - /* - * grab pre-allocated constraint entry - */ - cx = &cpuc->constraint_list[idx]; - - /* - * initialize dynamic constraint - * with static constraint - */ - *cx = *c; - - /* - * mark constraint as dynamic, so we - * can free it later on - */ - cx->flags |= PERF_X86_EVENT_DYNAMIC; - c = cx; - } + c = dyn_constraint(cpuc, c, idx); /* * From here on, the constraint is dynamic. @@ -3206,16 +3249,27 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr) arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL; arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask; arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask; - /* - * If PMU counter has PEBS enabled it is not enough to disable counter - * on a guest entry since PEBS memory write can overshoot guest entry - * and corrupt guest memory. Disabling PEBS solves the problem. - */ - arr[1].msr = MSR_IA32_PEBS_ENABLE; - arr[1].host = cpuc->pebs_enabled; - arr[1].guest = 0; + if (x86_pmu.flags & PMU_FL_PEBS_ALL) + arr[0].guest &= ~cpuc->pebs_enabled; + else + arr[0].guest &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK); + *nr = 1; + + if (x86_pmu.pebs && x86_pmu.pebs_no_isolation) { + /* + * If PMU counter has PEBS enabled it is not enough to + * disable counter on a guest entry since PEBS memory + * write can overshoot guest entry and corrupt guest + * memory. Disabling PEBS solves the problem. + * + * Don't do this if the CPU already enforces it. + */ + arr[1].msr = MSR_IA32_PEBS_ENABLE; + arr[1].host = cpuc->pebs_enabled; + arr[1].guest = 0; + *nr = 2; + } - *nr = 2; return arr; } @@ -3345,6 +3399,26 @@ glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx, return c; } +static bool allow_tsx_force_abort = true; + +static struct event_constraint * +tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx, + struct perf_event *event) +{ + struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event); + + /* + * Without TFA we must not use PMC3. + */ + if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) { + c = dyn_constraint(cpuc, c, idx); + c->idxmsk64 &= ~(1ULL << 3); + c->weight--; + } + + return c; +} + /* * Broadwell: * @@ -3398,7 +3472,7 @@ ssize_t intel_event_sysfs_show(char *page, u64 config) return x86_event_sysfs_show(page, config, event); } -struct intel_shared_regs *allocate_shared_regs(int cpu) +static struct intel_shared_regs *allocate_shared_regs(int cpu) { struct intel_shared_regs *regs; int i; @@ -3430,23 +3504,24 @@ static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu) return c; } -static int intel_pmu_cpu_prepare(int cpu) -{ - struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); +int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu) +{ if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) { cpuc->shared_regs = allocate_shared_regs(cpu); if (!cpuc->shared_regs) goto err; } - if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { + if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) { size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint); - cpuc->constraint_list = kzalloc(sz, GFP_KERNEL); + cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu)); if (!cpuc->constraint_list) goto err_shared_regs; + } + if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { cpuc->excl_cntrs = allocate_excl_cntrs(cpu); if (!cpuc->excl_cntrs) goto err_constraint_list; @@ -3468,6 +3543,11 @@ err: return -ENOMEM; } +static int intel_pmu_cpu_prepare(int cpu) +{ + return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu); +} + static void flip_smm_bit(void *data) { unsigned long set = *(unsigned long *)data; @@ -3542,9 +3622,8 @@ static void intel_pmu_cpu_starting(int cpu) } } -static void free_excl_cntrs(int cpu) +static void free_excl_cntrs(struct cpu_hw_events *cpuc) { - struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); struct intel_excl_cntrs *c; c = cpuc->excl_cntrs; @@ -3552,14 +3631,22 @@ static void free_excl_cntrs(int cpu) if (c->core_id == -1 || --c->refcnt == 0) kfree(c); cpuc->excl_cntrs = NULL; - kfree(cpuc->constraint_list); - cpuc->constraint_list = NULL; } + + kfree(cpuc->constraint_list); + cpuc->constraint_list = NULL; } static void intel_pmu_cpu_dying(int cpu) { - struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); + fini_debug_store_on_cpu(cpu); + + if (x86_pmu.counter_freezing) + disable_counter_freeze(); +} + +void intel_cpuc_finish(struct cpu_hw_events *cpuc) +{ struct intel_shared_regs *pc; pc = cpuc->shared_regs; @@ -3569,12 +3656,12 @@ static void intel_pmu_cpu_dying(int cpu) cpuc->shared_regs = NULL; } - free_excl_cntrs(cpu); - - fini_debug_store_on_cpu(cpu); + free_excl_cntrs(cpuc); +} - if (x86_pmu.counter_freezing) - disable_counter_freeze(); +static void intel_pmu_cpu_dead(int cpu) +{ + intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu)); } static void intel_pmu_sched_task(struct perf_event_context *ctx, @@ -3584,6 +3671,11 @@ static void intel_pmu_sched_task(struct perf_event_context *ctx, intel_pmu_lbr_sched_task(ctx, sched_in); } +static int intel_pmu_check_period(struct perf_event *event, u64 value) +{ + return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0; +} + PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); PMU_FORMAT_ATTR(ldlat, "config1:0-15"); @@ -3663,6 +3755,9 @@ static __initconst const struct x86_pmu core_pmu = { .cpu_prepare = intel_pmu_cpu_prepare, .cpu_starting = intel_pmu_cpu_starting, .cpu_dying = intel_pmu_cpu_dying, + .cpu_dead = intel_pmu_cpu_dead, + + .check_period = intel_pmu_check_period, }; static struct attribute *intel_pmu_attrs[]; @@ -3703,8 +3798,12 @@ static __initconst const struct x86_pmu intel_pmu = { .cpu_prepare = intel_pmu_cpu_prepare, .cpu_starting = intel_pmu_cpu_starting, .cpu_dying = intel_pmu_cpu_dying, + .cpu_dead = intel_pmu_cpu_dead, + .guest_get_msrs = intel_guest_get_msrs, .sched_task = intel_pmu_sched_task, + + .check_period = intel_pmu_check_period, }; static __init void intel_clovertown_quirk(void) @@ -3733,36 +3832,62 @@ static __init void intel_clovertown_quirk(void) x86_pmu.pebs_constraints = NULL; } -static int intel_snb_pebs_broken(int cpu) +static const struct x86_cpu_desc isolation_ucodes[] = { + INTEL_CPU_DESC(INTEL_FAM6_HASWELL_CORE, 3, 0x0000001f), + INTEL_CPU_DESC(INTEL_FAM6_HASWELL_ULT, 1, 0x0000001e), + INTEL_CPU_DESC(INTEL_FAM6_HASWELL_GT3E, 1, 0x00000015), + INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 2, 0x00000037), + INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X, 4, 0x0000000a), + INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_CORE, 4, 0x00000023), + INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_GT3E, 1, 0x00000014), + INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 2, 0x00000010), + INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 3, 0x07000009), + INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 4, 0x0f000009), + INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D, 5, 0x0e000002), + INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 2, 0x0b000014), + INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 3, 0x00000021), + INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 4, 0x00000000), + INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_MOBILE, 3, 0x0000007c), + INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_DESKTOP, 3, 0x0000007c), + INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 9, 0x0000004e), + INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 9, 0x0000004e), + INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 10, 0x0000004e), + INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 11, 0x0000004e), + INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE, 12, 0x0000004e), + INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 10, 0x0000004e), + INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 11, 0x0000004e), + INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 12, 0x0000004e), + INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP, 13, 0x0000004e), + {} +}; + +static void intel_check_pebs_isolation(void) { - u32 rev = UINT_MAX; /* default to broken for unknown models */ + x86_pmu.pebs_no_isolation = !x86_cpu_has_min_microcode_rev(isolation_ucodes); +} - switch (cpu_data(cpu).x86_model) { - case INTEL_FAM6_SANDYBRIDGE: - rev = 0x28; - break; +static __init void intel_pebs_isolation_quirk(void) +{ + WARN_ON_ONCE(x86_pmu.check_microcode); + x86_pmu.check_microcode = intel_check_pebs_isolation; + intel_check_pebs_isolation(); +} - case INTEL_FAM6_SANDYBRIDGE_X: - switch (cpu_data(cpu).x86_stepping) { - case 6: rev = 0x618; break; - case 7: rev = 0x70c; break; - } - } +static const struct x86_cpu_desc pebs_ucodes[] = { + INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE, 7, 0x00000028), + INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 6, 0x00000618), + INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X, 7, 0x0000070c), + {} +}; - return (cpu_data(cpu).microcode < rev); +static bool intel_snb_pebs_broken(void) +{ + return !x86_cpu_has_min_microcode_rev(pebs_ucodes); } static void intel_snb_check_microcode(void) { - int pebs_broken = 0; - int cpu; - - for_each_online_cpu(cpu) { - if ((pebs_broken = intel_snb_pebs_broken(cpu))) - break; - } - - if (pebs_broken == x86_pmu.pebs_broken) + if (intel_snb_pebs_broken() == x86_pmu.pebs_broken) return; /* @@ -3879,23 +4004,22 @@ static __init void intel_nehalem_quirk(void) } } -static bool intel_glp_counter_freezing_broken(int cpu) -{ - u32 rev = UINT_MAX; /* default to broken for unknown stepping */ - - switch (cpu_data(cpu).x86_stepping) { - case 1: - rev = 0x28; - break; - case 8: - rev = 0x6; - break; - } +static const struct x86_cpu_desc counter_freezing_ucodes[] = { + INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 2, 0x0000000e), + INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 9, 0x0000002e), + INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT, 10, 0x00000008), + INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_X, 1, 0x00000028), + INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_PLUS, 1, 0x00000028), + INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_PLUS, 8, 0x00000006), + {} +}; - return (cpu_data(cpu).microcode < rev); +static bool intel_counter_freezing_broken(void) +{ + return !x86_cpu_has_min_microcode_rev(counter_freezing_ucodes); } -static __init void intel_glp_counter_freezing_quirk(void) +static __init void intel_counter_freezing_quirk(void) { /* Check if it's already disabled */ if (disable_counter_freezing) @@ -3905,7 +4029,7 @@ static __init void intel_glp_counter_freezing_quirk(void) * If the system starts with the wrong ucode, leave the * counter-freezing feature permanently disabled. */ - if (intel_glp_counter_freezing_broken(raw_smp_processor_id())) { + if (intel_counter_freezing_broken()) { pr_info("PMU counter freezing disabled due to CPU errata," "please upgrade microcode\n"); x86_pmu.counter_freezing = false; @@ -4055,8 +4179,11 @@ static struct attribute *intel_pmu_caps_attrs[] = { NULL }; +DEVICE_BOOL_ATTR(allow_tsx_force_abort, 0644, allow_tsx_force_abort); + static struct attribute *intel_pmu_attrs[] = { &dev_attr_freeze_on_smi.attr, + NULL, /* &dev_attr_allow_tsx_force_abort.attr.attr */ NULL, }; @@ -4168,6 +4295,8 @@ __init int intel_pmu_init(void) case INTEL_FAM6_CORE2_MEROM: x86_add_quirk(intel_clovertown_quirk); + /* fall through */ + case INTEL_FAM6_CORE2_MEROM_L: case INTEL_FAM6_CORE2_PENRYN: case INTEL_FAM6_CORE2_DUNNINGTON: @@ -4256,6 +4385,7 @@ __init int intel_pmu_init(void) case INTEL_FAM6_ATOM_GOLDMONT: case INTEL_FAM6_ATOM_GOLDMONT_X: + x86_add_quirk(intel_counter_freezing_quirk); memcpy(hw_cache_event_ids, glm_hw_cache_event_ids, sizeof(hw_cache_event_ids)); memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs, @@ -4282,7 +4412,7 @@ __init int intel_pmu_init(void) break; case INTEL_FAM6_ATOM_GOLDMONT_PLUS: - x86_add_quirk(intel_glp_counter_freezing_quirk); + x86_add_quirk(intel_counter_freezing_quirk); memcpy(hw_cache_event_ids, glp_hw_cache_event_ids, sizeof(hw_cache_event_ids)); memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs, @@ -4425,6 +4555,7 @@ __init int intel_pmu_init(void) case INTEL_FAM6_HASWELL_ULT: case INTEL_FAM6_HASWELL_GT3E: x86_add_quirk(intel_ht_bug); + x86_add_quirk(intel_pebs_isolation_quirk); x86_pmu.late_ack = true; memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids)); memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); @@ -4456,6 +4587,7 @@ __init int intel_pmu_init(void) case INTEL_FAM6_BROADWELL_XEON_D: case INTEL_FAM6_BROADWELL_GT3E: case INTEL_FAM6_BROADWELL_X: + x86_add_quirk(intel_pebs_isolation_quirk); x86_pmu.late_ack = true; memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids)); memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); @@ -4518,6 +4650,7 @@ __init int intel_pmu_init(void) case INTEL_FAM6_SKYLAKE_X: case INTEL_FAM6_KABYLAKE_MOBILE: case INTEL_FAM6_KABYLAKE_DESKTOP: + x86_add_quirk(intel_pebs_isolation_quirk); x86_pmu.late_ack = true; memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids)); memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); @@ -4549,6 +4682,15 @@ __init int intel_pmu_init(void) tsx_attr = hsw_tsx_events_attrs; intel_pmu_pebs_data_source_skl( boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X); + + if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) { + x86_pmu.flags |= PMU_FL_TFA; + x86_pmu.get_event_constraints = tfa_get_event_constraints; + x86_pmu.enable_all = intel_tfa_pmu_enable_all; + x86_pmu.commit_scheduling = intel_tfa_commit_scheduling; + intel_pmu_attrs[1] = &dev_attr_allow_tsx_force_abort.attr.attr; + } + pr_cont("Skylake events, "); name = "skylake"; break; @@ -4700,7 +4842,7 @@ static __init int fixup_ht_bug(void) hardlockup_detector_perf_restart(); for_each_online_cpu(c) - free_excl_cntrs(c); + free_excl_cntrs(&per_cpu(cpu_hw_events, c)); cpus_read_unlock(); pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n"); diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index d2e780705c5a..94a4b7fc75d0 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -280,13 +280,7 @@ static int cstate_pmu_event_init(struct perf_event *event) return -ENOENT; /* unsupported modes and filters */ - if (event->attr.exclude_user || - event->attr.exclude_kernel || - event->attr.exclude_hv || - event->attr.exclude_idle || - event->attr.exclude_host || - event->attr.exclude_guest || - event->attr.sample_period) /* no sampling */ + if (event->attr.sample_period) /* no sampling */ return -EINVAL; if (event->cpu < 0) @@ -437,7 +431,7 @@ static struct pmu cstate_core_pmu = { .start = cstate_pmu_event_start, .stop = cstate_pmu_event_stop, .read = cstate_pmu_event_update, - .capabilities = PERF_PMU_CAP_NO_INTERRUPT, + .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE, .module = THIS_MODULE, }; @@ -451,7 +445,7 @@ static struct pmu cstate_pkg_pmu = { .start = cstate_pmu_event_start, .stop = cstate_pmu_event_stop, .read = cstate_pmu_event_update, - .capabilities = PERF_PMU_CAP_NO_INTERRUPT, + .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE, .module = THIS_MODULE, }; diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index e9acf1d2e7b2..10c99ce1fead 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1628,6 +1628,8 @@ void __init intel_ds_init(void) x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS); x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS); x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE; + if (x86_pmu.version <= 4) + x86_pmu.pebs_no_isolation = 1; if (x86_pmu.pebs) { char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-'; int format = x86_pmu.intel_cap.pebs_format; diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index c88ed39582a1..580c1b91c454 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -931,6 +931,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort) ret = X86_BR_ZERO_CALL; break; } + /* fall through */ case 0x9a: /* call far absolute */ ret = X86_BR_CALL; break; diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index 9494ca68fd9d..fb3a2f13fc70 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -1114,10 +1114,11 @@ static int pt_buffer_init_topa(struct pt_buffer *buf, unsigned long nr_pages, * Return: Our private PT buffer structure. */ static void * -pt_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool snapshot) +pt_buffer_setup_aux(struct perf_event *event, void **pages, + int nr_pages, bool snapshot) { struct pt_buffer *buf; - int node, ret; + int node, ret, cpu = event->cpu; if (!nr_pages) return NULL; @@ -1222,7 +1223,8 @@ static int pt_event_addr_filters_validate(struct list_head *filters) static void pt_event_addr_filters_sync(struct perf_event *event) { struct perf_addr_filters_head *head = perf_event_addr_filters(event); - unsigned long msr_a, msr_b, *offs = event->addr_filters_offs; + unsigned long msr_a, msr_b; + struct perf_addr_filter_range *fr = event->addr_filter_ranges; struct pt_filters *filters = event->hw.addr_filters; struct perf_addr_filter *filter; int range = 0; @@ -1231,12 +1233,12 @@ static void pt_event_addr_filters_sync(struct perf_event *event) return; list_for_each_entry(filter, &head->list, entry) { - if (filter->path.dentry && !offs[range]) { + if (filter->path.dentry && !fr[range].start) { msr_a = msr_b = 0; } else { /* apply the offset */ - msr_a = filter->offset + offs[range]; - msr_b = filter->size + msr_a - 1; + msr_a = fr[range].start; + msr_b = msr_a + fr[range].size - 1; } filters->filter[range].msr_a = msr_a; diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c index 91039ffed633..94dc564146ca 100644 --- a/arch/x86/events/intel/rapl.c +++ b/arch/x86/events/intel/rapl.c @@ -397,13 +397,7 @@ static int rapl_pmu_event_init(struct perf_event *event) return -EINVAL; /* unsupported modes and filters */ - if (event->attr.exclude_user || - event->attr.exclude_kernel || - event->attr.exclude_hv || - event->attr.exclude_idle || - event->attr.exclude_host || - event->attr.exclude_guest || - event->attr.sample_period) /* no sampling */ + if (event->attr.sample_period) /* no sampling */ return -EINVAL; /* must be done before validate_group */ @@ -699,6 +693,7 @@ static int __init init_rapl_pmus(void) rapl_pmus->pmu.stop = rapl_pmu_event_stop; rapl_pmus->pmu.read = rapl_pmu_event_read; rapl_pmus->pmu.module = THIS_MODULE; + rapl_pmus->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE; return 0; } diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 27a461414b30..9fe64c01a2e5 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -695,14 +695,6 @@ static int uncore_pmu_event_init(struct perf_event *event) if (pmu->func_id < 0) return -ENOENT; - /* - * Uncore PMU does measure at all privilege level all the time. - * So it doesn't make sense to specify any exclude bits. - */ - if (event->attr.exclude_user || event->attr.exclude_kernel || - event->attr.exclude_hv || event->attr.exclude_idle) - return -EINVAL; - /* Sampling not supported yet */ if (hwc->sample_period) return -EINVAL; @@ -740,6 +732,7 @@ static int uncore_pmu_event_init(struct perf_event *event) /* fixed counters have event field hardcoded to zero */ hwc->config = 0ULL; } else if (is_freerunning_event(event)) { + hwc->config = event->attr.config; if (!check_valid_freerunning_event(box, event)) return -EINVAL; event->hw.idx = UNCORE_PMC_IDX_FREERUNNING; @@ -800,6 +793,7 @@ static int uncore_pmu_register(struct intel_uncore_pmu *pmu) .stop = uncore_pmu_event_stop, .read = uncore_pmu_event_read, .module = THIS_MODULE, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }; } else { pmu->pmu = *pmu->type->pmu; diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h index cb46d602a6b8..853a49a8ccf6 100644 --- a/arch/x86/events/intel/uncore.h +++ b/arch/x86/events/intel/uncore.h @@ -292,8 +292,8 @@ static inline unsigned int uncore_freerunning_counter(struct intel_uncore_box *box, struct perf_event *event) { - unsigned int type = uncore_freerunning_type(event->attr.config); - unsigned int idx = uncore_freerunning_idx(event->attr.config); + unsigned int type = uncore_freerunning_type(event->hw.config); + unsigned int idx = uncore_freerunning_idx(event->hw.config); struct intel_uncore_pmu *pmu = box->pmu; return pmu->type->freerunning[type].counter_base + @@ -377,7 +377,7 @@ static inline unsigned int uncore_freerunning_bits(struct intel_uncore_box *box, struct perf_event *event) { - unsigned int type = uncore_freerunning_type(event->attr.config); + unsigned int type = uncore_freerunning_type(event->hw.config); return box->pmu->type->freerunning[type].bits; } @@ -385,7 +385,7 @@ unsigned int uncore_freerunning_bits(struct intel_uncore_box *box, static inline int uncore_num_freerunning(struct intel_uncore_box *box, struct perf_event *event) { - unsigned int type = uncore_freerunning_type(event->attr.config); + unsigned int type = uncore_freerunning_type(event->hw.config); return box->pmu->type->freerunning[type].num_counters; } @@ -399,8 +399,8 @@ static inline int uncore_num_freerunning_types(struct intel_uncore_box *box, static inline bool check_valid_freerunning_event(struct intel_uncore_box *box, struct perf_event *event) { - unsigned int type = uncore_freerunning_type(event->attr.config); - unsigned int idx = uncore_freerunning_idx(event->attr.config); + unsigned int type = uncore_freerunning_type(event->hw.config); + unsigned int idx = uncore_freerunning_idx(event->hw.config); return (type < uncore_num_freerunning_types(box, event)) && (idx < uncore_num_freerunning(box, event)); diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c index 2593b0d7aeee..13493f43b247 100644 --- a/arch/x86/events/intel/uncore_snb.c +++ b/arch/x86/events/intel/uncore_snb.c @@ -397,13 +397,7 @@ static int snb_uncore_imc_event_init(struct perf_event *event) return -EINVAL; /* unsupported modes and filters */ - if (event->attr.exclude_user || - event->attr.exclude_kernel || - event->attr.exclude_hv || - event->attr.exclude_idle || - event->attr.exclude_host || - event->attr.exclude_guest || - event->attr.sample_period) /* no sampling */ + if (event->attr.sample_period) /* no sampling */ return -EINVAL; /* @@ -448,9 +442,11 @@ static int snb_uncore_imc_event_init(struct perf_event *event) /* must be done before validate_group */ event->hw.event_base = base; - event->hw.config = cfg; event->hw.idx = idx; + /* Convert to standard encoding format for freerunning counters */ + event->hw.config = ((cfg - 1) << 8) | 0x10ff; + /* no group validation needed, we have free running counters */ return 0; @@ -497,6 +493,7 @@ static struct pmu snb_uncore_imc_pmu = { .start = uncore_pmu_event_start, .stop = uncore_pmu_event_stop, .read = uncore_pmu_event_read, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }; static struct intel_uncore_ops snb_uncore_imc_ops = { diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index c07bee31abe8..b10e04387f38 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -1222,6 +1222,8 @@ static struct pci_driver snbep_uncore_pci_driver = { .id_table = snbep_uncore_pci_ids, }; +#define NODE_ID_MASK 0x7 + /* * build pci bus to socket mapping */ @@ -1243,7 +1245,7 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool err = pci_read_config_dword(ubox_dev, nodeid_loc, &config); if (err) break; - nodeid = config; + nodeid = config & NODE_ID_MASK; /* get the Node ID mapping */ err = pci_read_config_dword(ubox_dev, idmap_loc, &config); if (err) diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c index 1b9f85abf9bc..a878e6286e4a 100644 --- a/arch/x86/events/msr.c +++ b/arch/x86/events/msr.c @@ -160,13 +160,7 @@ static int msr_event_init(struct perf_event *event) return -ENOENT; /* unsupported modes and filters */ - if (event->attr.exclude_user || - event->attr.exclude_kernel || - event->attr.exclude_hv || - event->attr.exclude_idle || - event->attr.exclude_host || - event->attr.exclude_guest || - event->attr.sample_period) /* no sampling */ + if (event->attr.sample_period) /* no sampling */ return -EINVAL; if (cfg >= PERF_MSR_EVENT_MAX) @@ -256,7 +250,7 @@ static struct pmu pmu_msr = { .start = msr_event_start, .stop = msr_event_stop, .read = msr_event_update, - .capabilities = PERF_PMU_CAP_NO_INTERRUPT, + .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE, }; static int __init msr_init(void) diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 78d7b7031bfc..b04ae6c8775e 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -242,6 +242,11 @@ struct cpu_hw_events { struct intel_excl_cntrs *excl_cntrs; int excl_thread_id; /* 0 or 1 */ + /* + * SKL TSX_FORCE_ABORT shadow + */ + u64 tfa_shadow; + /* * AMD specific bits */ @@ -601,13 +606,14 @@ struct x86_pmu { /* * Intel DebugStore bits */ - unsigned int bts :1, - bts_active :1, - pebs :1, - pebs_active :1, - pebs_broken :1, - pebs_prec_dist :1, - pebs_no_tlb :1; + unsigned int bts :1, + bts_active :1, + pebs :1, + pebs_active :1, + pebs_broken :1, + pebs_prec_dist :1, + pebs_no_tlb :1, + pebs_no_isolation :1; int pebs_record_size; int pebs_buffer_size; void (*drain_pebs)(struct pt_regs *regs); @@ -646,6 +652,11 @@ struct x86_pmu { * Intel host/guest support (KVM) */ struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); + + /* + * Check period value for PERF_EVENT_IOC_PERIOD ioctl. + */ + int (*check_period) (struct perf_event *event, u64 period); }; struct x86_perf_task_context { @@ -676,6 +687,7 @@ do { \ #define PMU_FL_EXCL_CNTRS 0x4 /* has exclusive counter requirements */ #define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */ #define PMU_FL_PEBS_ALL 0x10 /* all events are valid PEBS events */ +#define PMU_FL_TFA 0x20 /* deal with TSX force abort */ #define EVENT_VAR(_id) event_attr_##_id #define EVENT_PTR(_id) &event_attr_##_id.attr.attr @@ -857,7 +869,7 @@ static inline int amd_pmu_init(void) #ifdef CONFIG_CPU_SUP_INTEL -static inline bool intel_pmu_has_bts(struct perf_event *event) +static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period) { struct hw_perf_event *hwc = &event->hw; unsigned int hw_event, bts_event; @@ -868,7 +880,14 @@ static inline bool intel_pmu_has_bts(struct perf_event *event) hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); - return hw_event == bts_event && hwc->sample_period == 1; + return hw_event == bts_event && period == 1; +} + +static inline bool intel_pmu_has_bts(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + return intel_pmu_has_bts_period(event, hwc->sample_period); } int intel_pmu_save_and_restart(struct perf_event *event); @@ -877,7 +896,8 @@ struct event_constraint * x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx, struct perf_event *event); -struct intel_shared_regs *allocate_shared_regs(int cpu); +extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu); +extern void intel_cpuc_finish(struct cpu_hw_events *cpuc); int intel_pmu_init(void); @@ -1013,9 +1033,13 @@ static inline int intel_pmu_init(void) return 0; } -static inline struct intel_shared_regs *allocate_shared_regs(int cpu) +static inline int intel_cpuc_prepare(struct cpu_hw_event *cpuc, int cpu) +{ + return 0; +} + +static inline void intel_cpuc_finish(struct cpu_hw_event *cpuc) { - return NULL; } static inline int is_ht_workaround_enabled(void) diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index 7abb09e2eeb8..6461a16b4559 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c @@ -96,6 +96,7 @@ void __percpu **hyperv_pcpu_input_arg; EXPORT_SYMBOL_GPL(hyperv_pcpu_input_arg); u32 hv_max_vp_index; +EXPORT_SYMBOL_GPL(hv_max_vp_index); static int hv_cpu_init(unsigned int cpu) { @@ -406,6 +407,13 @@ void hyperv_cleanup(void) /* Reset our OS id */ wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0); + /* + * Reset hypercall page reference before reset the page, + * let hypercall operations fail safely rather than + * panic the kernel for using invalid hypercall page + */ + hv_hypercall_pg = NULL; + /* Reset the hypercall page */ hypercall_msr.as_uint64 = 0; wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index f65b78d32f5e..3c135084e1eb 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c @@ -39,82 +39,10 @@ static int load_aout_binary(struct linux_binprm *); static int load_aout_library(struct file *); -#ifdef CONFIG_COREDUMP -static int aout_core_dump(struct coredump_params *); - -static unsigned long get_dr(int n) -{ - struct perf_event *bp = current->thread.ptrace_bps[n]; - return bp ? bp->hw.info.address : 0; -} - -/* - * fill in the user structure for a core dump.. - */ -static void dump_thread32(struct pt_regs *regs, struct user32 *dump) -{ - u32 fs, gs; - memset(dump, 0, sizeof(*dump)); - -/* changed the size calculations - should hopefully work better. lbt */ - dump->magic = CMAGIC; - dump->start_code = 0; - dump->start_stack = regs->sp & ~(PAGE_SIZE - 1); - dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT; - dump->u_dsize = ((unsigned long) - (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT; - dump->u_dsize -= dump->u_tsize; - dump->u_debugreg[0] = get_dr(0); - dump->u_debugreg[1] = get_dr(1); - dump->u_debugreg[2] = get_dr(2); - dump->u_debugreg[3] = get_dr(3); - dump->u_debugreg[6] = current->thread.debugreg6; - dump->u_debugreg[7] = current->thread.ptrace_dr7; - - if (dump->start_stack < 0xc0000000) { - unsigned long tmp; - - tmp = (unsigned long) (0xc0000000 - dump->start_stack); - dump->u_ssize = tmp >> PAGE_SHIFT; - } - - dump->regs.ebx = regs->bx; - dump->regs.ecx = regs->cx; - dump->regs.edx = regs->dx; - dump->regs.esi = regs->si; - dump->regs.edi = regs->di; - dump->regs.ebp = regs->bp; - dump->regs.eax = regs->ax; - dump->regs.ds = current->thread.ds; - dump->regs.es = current->thread.es; - savesegment(fs, fs); - dump->regs.fs = fs; - savesegment(gs, gs); - dump->regs.gs = gs; - dump->regs.orig_eax = regs->orig_ax; - dump->regs.eip = regs->ip; - dump->regs.cs = regs->cs; - dump->regs.eflags = regs->flags; - dump->regs.esp = regs->sp; - dump->regs.ss = regs->ss; - -#if 1 /* FIXME */ - dump->u_fpvalid = 0; -#else - dump->u_fpvalid = dump_fpu(regs, &dump->i387); -#endif -} - -#endif - static struct linux_binfmt aout_format = { .module = THIS_MODULE, .load_binary = load_aout_binary, .load_shlib = load_aout_library, -#ifdef CONFIG_COREDUMP - .core_dump = aout_core_dump, -#endif - .min_coredump = PAGE_SIZE }; static int set_brk(unsigned long start, unsigned long end) @@ -126,91 +54,6 @@ static int set_brk(unsigned long start, unsigned long end) return vm_brk(start, end - start); } -#ifdef CONFIG_COREDUMP -/* - * These are the only things you should do on a core-file: use only these - * macros to write out all the necessary info. - */ - -#include - -#define START_DATA(u) (u.u_tsize << PAGE_SHIFT) -#define START_STACK(u) (u.start_stack) - -/* - * Routine writes a core dump image in the current directory. - * Currently only a stub-function. - * - * Note that setuid/setgid files won't make a core-dump if the uid/gid - * changed due to the set[u|g]id. It's enforced by the "current->mm->dumpable" - * field, which also makes sure the core-dumps won't be recursive if the - * dumping of the process results in another error.. - */ - -static int aout_core_dump(struct coredump_params *cprm) -{ - mm_segment_t fs; - int has_dumped = 0; - unsigned long dump_start, dump_size; - struct user32 dump; - - fs = get_fs(); - set_fs(KERNEL_DS); - has_dumped = 1; - strncpy(dump.u_comm, current->comm, sizeof(current->comm)); - dump.u_ar0 = offsetof(struct user32, regs); - dump.signal = cprm->siginfo->si_signo; - dump_thread32(cprm->regs, &dump); - - /* - * If the size of the dump file exceeds the rlimit, then see - * what would happen if we wrote the stack, but not the data - * area. - */ - if ((dump.u_dsize + dump.u_ssize + 1) * PAGE_SIZE > cprm->limit) - dump.u_dsize = 0; - - /* Make sure we have enough room to write the stack and data areas. */ - if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit) - dump.u_ssize = 0; - - /* make sure we actually have a data and stack area to dump */ - set_fs(USER_DS); - if (!access_ok((void *) (unsigned long)START_DATA(dump), - dump.u_dsize << PAGE_SHIFT)) - dump.u_dsize = 0; - if (!access_ok((void *) (unsigned long)START_STACK(dump), - dump.u_ssize << PAGE_SHIFT)) - dump.u_ssize = 0; - - set_fs(KERNEL_DS); - /* struct user */ - if (!dump_emit(cprm, &dump, sizeof(dump))) - goto end_coredump; - /* Now dump all of the user data. Include malloced stuff as well */ - if (!dump_skip(cprm, PAGE_SIZE - sizeof(dump))) - goto end_coredump; - /* now we start writing out the user space info */ - set_fs(USER_DS); - /* Dump the data area */ - if (dump.u_dsize != 0) { - dump_start = START_DATA(dump); - dump_size = dump.u_dsize << PAGE_SHIFT; - if (!dump_emit(cprm, (void *)dump_start, dump_size)) - goto end_coredump; - } - /* Now prepare to dump the stack area */ - if (dump.u_ssize != 0) { - dump_start = START_STACK(dump); - dump_size = dump.u_ssize << PAGE_SHIFT; - if (!dump_emit(cprm, (void *)dump_start, dump_size)) - goto end_coredump; - } -end_coredump: - set_fs(fs); - return has_dumped; -} -#endif /* * create_aout_tables() parses the env- and arg-strings in new user diff --git a/arch/x86/include/asm/a.out-core.h b/arch/x86/include/asm/a.out-core.h deleted file mode 100644 index 7d3ece8bfb61..000000000000 --- a/arch/x86/include/asm/a.out-core.h +++ /dev/null @@ -1,67 +0,0 @@ -/* a.out coredump register dumper - * - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ - -#ifndef _ASM_X86_A_OUT_CORE_H -#define _ASM_X86_A_OUT_CORE_H - -#ifdef __KERNEL__ -#ifdef CONFIG_X86_32 - -#include -#include -#include - -#include - -/* - * fill in the user structure for an a.out core dump - */ -static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) -{ -/* changed the size calculations - should hopefully work better. lbt */ - dump->magic = CMAGIC; - dump->start_code = 0; - dump->start_stack = regs->sp & ~(PAGE_SIZE - 1); - dump->u_tsize = ((unsigned long)current->mm->end_code) >> PAGE_SHIFT; - dump->u_dsize = ((unsigned long)(current->mm->brk + (PAGE_SIZE - 1))) - >> PAGE_SHIFT; - dump->u_dsize -= dump->u_tsize; - dump->u_ssize = 0; - aout_dump_debugregs(dump); - - if (dump->start_stack < TASK_SIZE) - dump->u_ssize = ((unsigned long)(TASK_SIZE - dump->start_stack)) - >> PAGE_SHIFT; - - dump->regs.bx = regs->bx; - dump->regs.cx = regs->cx; - dump->regs.dx = regs->dx; - dump->regs.si = regs->si; - dump->regs.di = regs->di; - dump->regs.bp = regs->bp; - dump->regs.ax = regs->ax; - dump->regs.ds = (u16)regs->ds; - dump->regs.es = (u16)regs->es; - dump->regs.fs = (u16)regs->fs; - dump->regs.gs = get_user_gs(regs); - dump->regs.orig_ax = regs->orig_ax; - dump->regs.ip = regs->ip; - dump->regs.cs = (u16)regs->cs; - dump->regs.flags = regs->flags; - dump->regs.sp = regs->sp; - dump->regs.ss = (u16)regs->ss; - - dump->u_fpvalid = dump_fpu(regs, &dump->i387); -} - -#endif /* CONFIG_X86_32 */ -#endif /* __KERNEL__ */ -#endif /* _ASM_X86_A_OUT_CORE_H */ diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 0660e14690c8..4c74073a19cc 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -94,13 +94,12 @@ static inline int alternatives_text_reserved(void *start, void *end) #define alt_total_slen alt_end_marker"b-661b" #define alt_rlen(num) e_replacement(num)"f-"b_replacement(num)"f" -#define __OLDINSTR(oldinstr, num) \ +#define OLDINSTR(oldinstr, num) \ + "# ALT: oldnstr\n" \ "661:\n\t" oldinstr "\n662:\n" \ + "# ALT: padding\n" \ ".skip -(((" alt_rlen(num) ")-(" alt_slen ")) > 0) * " \ - "((" alt_rlen(num) ")-(" alt_slen ")),0x90\n" - -#define OLDINSTR(oldinstr, num) \ - __OLDINSTR(oldinstr, num) \ + "((" alt_rlen(num) ")-(" alt_slen ")),0x90\n" \ alt_end_marker ":\n" /* @@ -116,11 +115,23 @@ static inline int alternatives_text_reserved(void *start, void *end) * additionally longer than the first replacement alternative. */ #define OLDINSTR_2(oldinstr, num1, num2) \ + "# ALT: oldinstr2\n" \ "661:\n\t" oldinstr "\n662:\n" \ + "# ALT: padding2\n" \ ".skip -((" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")) > 0) * " \ "(" alt_max_short(alt_rlen(num1), alt_rlen(num2)) " - (" alt_slen ")), 0x90\n" \ alt_end_marker ":\n" +#define OLDINSTR_3(oldinsn, n1, n2, n3) \ + "# ALT: oldinstr3\n" \ + "661:\n\t" oldinsn "\n662:\n" \ + "# ALT: padding3\n" \ + ".skip -((" alt_max_short(alt_max_short(alt_rlen(n1), alt_rlen(n2)), alt_rlen(n3)) \ + " - (" alt_slen ")) > 0) * " \ + "(" alt_max_short(alt_max_short(alt_rlen(n1), alt_rlen(n2)), alt_rlen(n3)) \ + " - (" alt_slen ")), 0x90\n" \ + alt_end_marker ":\n" + #define ALTINSTR_ENTRY(feature, num) \ " .long 661b - .\n" /* label */ \ " .long " b_replacement(num)"f - .\n" /* new instruction */ \ @@ -129,8 +140,9 @@ static inline int alternatives_text_reserved(void *start, void *end) " .byte " alt_rlen(num) "\n" /* replacement len */ \ " .byte " alt_pad_len "\n" /* pad len */ -#define ALTINSTR_REPLACEMENT(newinstr, feature, num) /* replacement */ \ - b_replacement(num)":\n\t" newinstr "\n" e_replacement(num) ":\n\t" +#define ALTINSTR_REPLACEMENT(newinstr, feature, num) /* replacement */ \ + "# ALT: replacement " #num "\n" \ + b_replacement(num)":\n\t" newinstr "\n" e_replacement(num) ":\n" /* alternative assembly primitive: */ #define ALTERNATIVE(oldinstr, newinstr, feature) \ @@ -153,6 +165,19 @@ static inline int alternatives_text_reserved(void *start, void *end) ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \ ".popsection\n" +#define ALTERNATIVE_3(oldinsn, newinsn1, feat1, newinsn2, feat2, newinsn3, feat3) \ + OLDINSTR_3(oldinsn, 1, 2, 3) \ + ".pushsection .altinstructions,\"a\"\n" \ + ALTINSTR_ENTRY(feat1, 1) \ + ALTINSTR_ENTRY(feat2, 2) \ + ALTINSTR_ENTRY(feat3, 3) \ + ".popsection\n" \ + ".pushsection .altinstr_replacement, \"ax\"\n" \ + ALTINSTR_REPLACEMENT(newinsn1, feat1, 1) \ + ALTINSTR_REPLACEMENT(newinsn2, feat2, 2) \ + ALTINSTR_REPLACEMENT(newinsn3, feat3, 3) \ + ".popsection\n" + /* * Alternative instructions for different CPU types or capabilities. * diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h index 1908214b9125..ce92c4acc913 100644 --- a/arch/x86/include/asm/asm-prototypes.h +++ b/arch/x86/include/asm/asm-prototypes.h @@ -7,7 +7,6 @@ #include -#include #include #include #include diff --git a/arch/x86/include/asm/cpu_device_id.h b/arch/x86/include/asm/cpu_device_id.h index baeba0567126..3417110574c1 100644 --- a/arch/x86/include/asm/cpu_device_id.h +++ b/arch/x86/include/asm/cpu_device_id.h @@ -11,4 +11,32 @@ extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match); +/* + * Match specific microcode revisions. + * + * vendor/family/model/stepping must be all set. + * + * Only checks against the boot CPU. When mixed-stepping configs are + * valid for a CPU model, add a quirk for every valid stepping and + * do the fine-tuning in the quirk handler. + */ + +struct x86_cpu_desc { + __u8 x86_family; + __u8 x86_vendor; + __u8 x86_model; + __u8 x86_stepping; + __u32 x86_microcode_rev; +}; + +#define INTEL_CPU_DESC(mod, step, rev) { \ + .x86_family = 6, \ + .x86_vendor = X86_VENDOR_INTEL, \ + .x86_model = mod, \ + .x86_stepping = step, \ + .x86_microcode_rev = rev, \ +} + +extern bool x86_cpu_has_min_microcode_rev(const struct x86_cpu_desc *table); + #endif diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 6d6122524711..981ff9479648 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -344,6 +344,7 @@ /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ +#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */ #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 107283b1eb1e..606a4b6a9812 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -170,7 +170,6 @@ static inline bool efi_runtime_supported(void) return false; } -extern struct console early_efi_console; extern void parse_efi_setup(u64 phys_addr, u32 data_len); extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt); diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index fa2c93cb42a2..fb04a3ded7dd 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h @@ -137,37 +137,25 @@ static inline int copy_fxregs_to_user(struct fxregs_state __user *fx) { if (IS_ENABLED(CONFIG_X86_32)) return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx)); - else if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) + else return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx)); - /* See comment in copy_fxregs_to_kernel() below. */ - return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx)); } static inline void copy_kernel_to_fxregs(struct fxregs_state *fx) { - if (IS_ENABLED(CONFIG_X86_32)) { + if (IS_ENABLED(CONFIG_X86_32)) kernel_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); - } else { - if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) { - kernel_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); - } else { - /* See comment in copy_fxregs_to_kernel() below. */ - kernel_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), "m" (*fx)); - } - } + else + kernel_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); } static inline int copy_user_to_fxregs(struct fxregs_state __user *fx) { if (IS_ENABLED(CONFIG_X86_32)) return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); - else if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) + else return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); - - /* See comment in copy_fxregs_to_kernel() below. */ - return user_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), - "m" (*fx)); } static inline void copy_kernel_to_fregs(struct fregs_state *fx) @@ -184,34 +172,8 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu) { if (IS_ENABLED(CONFIG_X86_32)) asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state.fxsave)); - else if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) + else asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave)); - else { - /* Using "rex64; fxsave %0" is broken because, if the memory - * operand uses any extended registers for addressing, a second - * REX prefix will be generated (to the assembler, rex64 - * followed by semicolon is a separate instruction), and hence - * the 64-bitness is lost. - * - * Using "fxsaveq %0" would be the ideal choice, but is only - * supported starting with gas 2.16. - * - * Using, as a workaround, the properly prefixed form below - * isn't accepted by any binutils version so far released, - * complaining that the same type of prefix is used twice if - * an extended register is needed for addressing (fix submitted - * to mainline 2005-11-21). - * - * asm volatile("rex64/fxsave %0" : "=m" (fpu->state.fxsave)); - * - * This, however, we can work around by forcing the compiler to - * select an addressing mode that doesn't require extended - * registers. - */ - asm volatile( "rex64/fxsave (%[fx])" - : "=m" (fpu->state.fxsave) - : [fx] "R" (&fpu->state.fxsave)); - } } /* These macros all use (%edi)/(%rdi) as the single memory argument. */ @@ -414,6 +376,13 @@ static inline int copy_fpregs_to_fpstate(struct fpu *fpu) { if (likely(use_xsave())) { copy_xregs_to_kernel(&fpu->state.xsave); + + /* + * AVX512 state is tracked here because its use is + * known to slow the max clock speed of the core. + */ + if (fpu->state.xsave.header.xfeatures & XFEATURE_MASK_AVX512) + fpu->avx512_timestamp = jiffies; return 1; } diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h index 202c53918ecf..2e32e178e064 100644 --- a/arch/x86/include/asm/fpu/types.h +++ b/arch/x86/include/asm/fpu/types.h @@ -302,6 +302,13 @@ struct fpu { */ unsigned char initialized; + /* + * @avx512_timestamp: + * + * Records the timestamp of AVX512 use during last context switch. + */ + unsigned long avx512_timestamp; + /* * @state: * diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h index 705dafc2d11a..2bdbbbcfa393 100644 --- a/arch/x86/include/asm/hyperv-tlfs.h +++ b/arch/x86/include/asm/hyperv-tlfs.h @@ -841,7 +841,7 @@ union hv_gpa_page_range { * count is equal with how many entries of union hv_gpa_page_range can * be populated into the input parameter page. */ -#define HV_MAX_FLUSH_REP_COUNT (PAGE_SIZE - 2 * sizeof(u64) / \ +#define HV_MAX_FLUSH_REP_COUNT ((PAGE_SIZE - 2 * sizeof(u64)) / \ sizeof(union hv_gpa_page_range)) struct hv_guest_mapping_flush_list { diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h index 0dd6b0f4000e..9f15384c504a 100644 --- a/arch/x86/include/asm/intel-family.h +++ b/arch/x86/include/asm/intel-family.h @@ -6,7 +6,7 @@ * "Big Core" Processors (Branded as Core, Xeon, etc...) * * The "_X" parts are generally the EP and EX Xeons, or the - * "Extreme" ones, like Broadwell-E. + * "Extreme" ones, like Broadwell-E, or Atom microserver. * * While adding a new CPUID for a new microarchitecture, add a new * group to keep logically sorted out in chronological order. Within @@ -52,6 +52,8 @@ #define INTEL_FAM6_CANNONLAKE_MOBILE 0x66 +#define INTEL_FAM6_ICELAKE_MOBILE 0x7E + /* "Small Core" Processors (Atom) */ #define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */ @@ -71,6 +73,7 @@ #define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */ #define INTEL_FAM6_ATOM_GOLDMONT_X 0x5F /* Denverton */ #define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */ +#define INTEL_FAM6_ATOM_TREMONT_X 0x86 /* Jacobsville */ /* Xeon Phi */ diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 4660ce90de7f..180373360e34 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -299,6 +299,7 @@ union kvm_mmu_extended_role { unsigned int cr4_smap:1; unsigned int cr4_smep:1; unsigned int cr4_la57:1; + unsigned int maxphyaddr:6; }; }; @@ -397,6 +398,7 @@ struct kvm_mmu { void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *spte, const void *pte); hpa_t root_hpa; + gpa_t root_cr3; union kvm_mmu_role mmu_role; u8 root_level; u8 shadow_root_level; diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index c1a812bd5a27..22d05e3835f0 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -48,6 +48,7 @@ #define MCI_STATUS_SYNDV BIT_ULL(53) /* synd reg. valid */ #define MCI_STATUS_DEFERRED BIT_ULL(44) /* uncorrected error, deferred exception */ #define MCI_STATUS_POISON BIT_ULL(43) /* access poisonous data */ +#define MCI_STATUS_SCRUB BIT_ULL(40) /* Error detected during scrub operation */ /* * McaX field if set indicates a given bank supports MCA extensions: @@ -307,11 +308,17 @@ enum smca_bank_types { SMCA_FP, /* Floating Point */ SMCA_L3_CACHE, /* L3 Cache */ SMCA_CS, /* Coherent Slave */ + SMCA_CS_V2, /* Coherent Slave */ SMCA_PIE, /* Power, Interrupts, etc. */ SMCA_UMC, /* Unified Memory Controller */ SMCA_PB, /* Parameter Block */ SMCA_PSP, /* Platform Security Processor */ + SMCA_PSP_V2, /* Platform Security Processor */ SMCA_SMU, /* System Management Unit */ + SMCA_SMU_V2, /* System Management Unit */ + SMCA_MP5, /* Microprocessor 5 Unit */ + SMCA_NBIO, /* Northbridge IO Unit */ + SMCA_PCIE, /* PCI Express Unit */ N_SMCA_BANK_TYPES }; diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 8e40c2446fd1..ca5bc0eacb95 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -666,6 +666,12 @@ #define MSR_IA32_TSC_DEADLINE 0x000006E0 + +#define MSR_TSX_FORCE_ABORT 0x0000010F + +#define MSR_TFA_RTM_FORCE_ABORT_BIT 0 +#define MSR_TFA_RTM_FORCE_ABORT BIT_ULL(MSR_TFA_RTM_FORCE_ABORT_BIT) + /* P4/Xeon+ specific */ #define MSR_IA32_MCG_EAX 0x00000180 #define MSR_IA32_MCG_EBX 0x00000181 diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index 91e4cf189914..5cc3930cb465 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h @@ -217,6 +217,8 @@ static __always_inline unsigned long long rdtsc(void) */ static __always_inline unsigned long long rdtsc_ordered(void) { + DECLARE_ARGS(val, low, high); + /* * The RDTSC instruction is not ordered relative to memory * access. The Intel SDM and the AMD APM are both vague on this @@ -227,9 +229,19 @@ static __always_inline unsigned long long rdtsc_ordered(void) * ordering guarantees as reading from a global memory location * that some other imaginary CPU is updating continuously with a * time stamp. + * + * Thus, use the preferred barrier on the respective CPU, aiming for + * RDTSCP as the default. */ - barrier_nospec(); - return rdtsc(); + asm volatile(ALTERNATIVE_3("rdtsc", + "mfence; rdtsc", X86_FEATURE_MFENCE_RDTSC, + "lfence; rdtsc", X86_FEATURE_LFENCE_RDTSC, + "rdtscp", X86_FEATURE_RDTSCP) + : EAX_EDX_RET(val, low, high) + /* RDTSCP clobbers ECX with MSR_TSC_AUX. */ + :: "ecx"); + + return EAX_EDX_VAL(val, low, high); } static inline unsigned long long native_read_pmc(int counter) diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index a97f28d914d5..c25c38a05c1c 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -422,25 +422,26 @@ static inline pgdval_t pgd_val(pgd_t pgd) } #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION -static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, +static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { pteval_t ret; - ret = PVOP_CALL3(pteval_t, mmu.ptep_modify_prot_start, mm, addr, ptep); + ret = PVOP_CALL3(pteval_t, mmu.ptep_modify_prot_start, vma, addr, ptep); return (pte_t) { .pte = ret }; } -static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte) +static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep, pte_t old_pte, pte_t pte) { + if (sizeof(pteval_t) > sizeof(long)) /* 5 arg words */ - pv_ops.mmu.ptep_modify_prot_commit(mm, addr, ptep, pte); + pv_ops.mmu.ptep_modify_prot_commit(vma, addr, ptep, pte); else PVOP_VCALL4(mmu.ptep_modify_prot_commit, - mm, addr, ptep, pte.pte); + vma, addr, ptep, pte.pte); } static inline void set_pte(pte_t *ptep, pte_t pte) diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 488c59686a73..2474e434a6f7 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -55,6 +55,7 @@ struct task_struct; struct cpumask; struct flush_tlb_info; struct mmu_gather; +struct vm_area_struct; /* * Wrapper type for pointers to code which uses the non-standard @@ -254,9 +255,9 @@ struct pv_mmu_ops { pte_t *ptep, pte_t pteval); void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); - pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr, + pte_t (*ptep_modify_prot_start)(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep); - void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr, + void (*ptep_modify_prot_commit)(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte); struct paravirt_callee_save pte_val; diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index 662963681ea6..e662f987dfa2 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -141,7 +142,7 @@ cpumask_of_pcibus(const struct pci_bus *bus) int node; node = __pcibus_to_node(bus); - return (node == -1) ? cpu_online_mask : + return (node == NUMA_NO_NODE) ? cpu_online_mask : cpumask_of_node(node); } #endif diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 40616e805292..2779ace16d23 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -1065,7 +1065,7 @@ static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd) { - native_set_pmd(pmdp, pmd); + set_pmd(pmdp, pmd); } static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 9c85b54bf03c..0bb566315621 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -259,8 +259,7 @@ extern void init_extra_mapping_uc(unsigned long phys, unsigned long size); extern void init_extra_mapping_wb(unsigned long phys, unsigned long size); #define gup_fast_permitted gup_fast_permitted -static inline bool gup_fast_permitted(unsigned long start, int nr_pages, - int write) +static inline bool gup_fast_permitted(unsigned long start, int nr_pages) { unsigned long len, end; diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 33051436c864..2bb3a648fc12 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -742,7 +742,6 @@ enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, extern void enable_sep_cpu(void); extern int sysenter_setup(void); -void early_trap_pf_init(void); /* Defined in head.S */ extern struct desc_ptr early_gdt_descr; diff --git a/arch/x86/include/asm/refcount.h b/arch/x86/include/asm/refcount.h index dbaed55c1c24..232f856e0db0 100644 --- a/arch/x86/include/asm/refcount.h +++ b/arch/x86/include/asm/refcount.h @@ -67,16 +67,30 @@ static __always_inline void refcount_dec(refcount_t *r) static __always_inline __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r) { - return GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl", + bool ret = GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl", REFCOUNT_CHECK_LT_ZERO, r->refs.counter, e, "er", i, "cx"); + + if (ret) { + smp_acquire__after_ctrl_dep(); + return true; + } + + return false; } static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r) { - return GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl", - REFCOUNT_CHECK_LT_ZERO, - r->refs.counter, e, "cx"); + bool ret = GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl", + REFCOUNT_CHECK_LT_ZERO, + r->refs.counter, e, "cx"); + + if (ret) { + smp_acquire__after_ctrl_dep(); + return true; + } + + return false; } static __always_inline __must_check diff --git a/arch/x86/include/asm/resctrl_sched.h b/arch/x86/include/asm/resctrl_sched.h index 40ebddde6ac2..f6b7fe2833cc 100644 --- a/arch/x86/include/asm/resctrl_sched.h +++ b/arch/x86/include/asm/resctrl_sched.h @@ -2,7 +2,7 @@ #ifndef _ASM_X86_RESCTRL_SCHED_H #define _ASM_X86_RESCTRL_SCHED_H -#ifdef CONFIG_X86_RESCTRL +#ifdef CONFIG_X86_CPU_RESCTRL #include #include @@ -88,6 +88,6 @@ static inline void resctrl_sched_in(void) static inline void resctrl_sched_in(void) {} -#endif /* CONFIG_X86_RESCTRL */ +#endif /* CONFIG_X86_CPU_RESCTRL */ #endif /* _ASM_X86_RESCTRL_SCHED_H */ diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 780f2b42c8ef..1954dd5552a2 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -25,7 +25,6 @@ #define KERNEL_DS MAKE_MM_SEG(-1UL) #define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX) -#define get_ds() (KERNEL_DS) #define get_fs() (current->thread.addr_limit) static inline void set_fs(mm_segment_t fs) { @@ -35,10 +34,7 @@ static inline void set_fs(mm_segment_t fs) } #define segment_eq(a, b) ((a).seg == (b).seg) - #define user_addr_max() (current->thread.addr_limit.seg) -#define __addr_ok(addr) \ - ((unsigned long __force)(addr) < user_addr_max()) /* * Test whether a block of memory is a valid user space address. @@ -76,7 +72,7 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un #endif /** - * access_ok: - Checks if a user space pointer is valid + * access_ok - Checks if a user space pointer is valid * @addr: User space pointer to start of block to check * @size: Size of block to check * @@ -85,12 +81,12 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un * * Checks if a pointer to a block of memory in user space is valid. * - * Returns true (nonzero) if the memory block may be valid, false (zero) - * if it is definitely invalid. - * * Note that, depending on architecture, this function probably just * checks that the pointer is in the user space range - after calling * this function, memory access functions may still return -EFAULT. + * + * Return: true (nonzero) if the memory block may be valid, false (zero) + * if it is definitely invalid. */ #define access_ok(addr, size) \ ({ \ @@ -135,7 +131,7 @@ extern int __get_user_bad(void); __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) /** - * get_user: - Get a simple variable from user space. + * get_user - Get a simple variable from user space. * @x: Variable to store result. * @ptr: Source address, in user space. * @@ -149,7 +145,7 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) * @ptr must have pointer-to-simple-variable type, and the result of * dereferencing @ptr must be assignable to @x without a cast. * - * Returns zero on success, or -EFAULT on error. + * Return: zero on success, or -EFAULT on error. * On error, the variable @x is set to zero. */ /* @@ -227,7 +223,7 @@ extern void __put_user_4(void); extern void __put_user_8(void); /** - * put_user: - Write a simple value into user space. + * put_user - Write a simple value into user space. * @x: Value to copy to user space. * @ptr: Destination address, in user space. * @@ -241,7 +237,7 @@ extern void __put_user_8(void); * @ptr must have pointer-to-simple-variable type, and @x must be assignable * to the result of dereferencing @ptr. * - * Returns zero on success, or -EFAULT on error. + * Return: zero on success, or -EFAULT on error. */ #define put_user(x, ptr) \ ({ \ @@ -284,7 +280,7 @@ do { \ __put_user_goto(x, ptr, "l", "k", "ir", label); \ break; \ case 8: \ - __put_user_goto_u64((__typeof__(*ptr))(x), ptr, label); \ + __put_user_goto_u64(x, ptr, label); \ break; \ default: \ __put_user_bad(); \ @@ -431,8 +427,10 @@ do { \ ({ \ __label__ __pu_label; \ int __pu_err = -EFAULT; \ + __typeof__(*(ptr)) __pu_val; \ + __pu_val = x; \ __uaccess_begin(); \ - __put_user_size((x), (ptr), (size), __pu_label); \ + __put_user_size(__pu_val, (ptr), (size), __pu_label); \ __pu_err = 0; \ __pu_label: \ __uaccess_end(); \ @@ -501,7 +499,7 @@ struct __large_struct { unsigned long buf[100]; }; } while (0) /** - * __get_user: - Get a simple variable from user space, with less checking. + * __get_user - Get a simple variable from user space, with less checking. * @x: Variable to store result. * @ptr: Source address, in user space. * @@ -518,7 +516,7 @@ struct __large_struct { unsigned long buf[100]; }; * Caller must check the pointer with access_ok() before calling this * function. * - * Returns zero on success, or -EFAULT on error. + * Return: zero on success, or -EFAULT on error. * On error, the variable @x is set to zero. */ @@ -526,7 +524,7 @@ struct __large_struct { unsigned long buf[100]; }; __get_user_nocheck((x), (ptr), sizeof(*(ptr))) /** - * __put_user: - Write a simple value into user space, with less checking. + * __put_user - Write a simple value into user space, with less checking. * @x: Value to copy to user space. * @ptr: Destination address, in user space. * @@ -543,7 +541,7 @@ struct __large_struct { unsigned long buf[100]; }; * Caller must check the pointer with access_ok() before calling this * function. * - * Returns zero on success, or -EFAULT on error. + * Return: zero on success, or -EFAULT on error. */ #define __put_user(x, ptr) \ diff --git a/arch/x86/include/asm/unistd.h b/arch/x86/include/asm/unistd.h index dc4ed8bc2382..146859efd83c 100644 --- a/arch/x86/include/asm/unistd.h +++ b/arch/x86/include/asm/unistd.h @@ -23,8 +23,8 @@ # include # include -# define __ARCH_WANT_COMPAT_SYS_TIME -# define __ARCH_WANT_SYS_UTIME32 +# define __ARCH_WANT_SYS_TIME +# define __ARCH_WANT_SYS_UTIME # define __ARCH_WANT_COMPAT_SYS_PREADV64 # define __ARCH_WANT_COMPAT_SYS_PWRITEV64 # define __ARCH_WANT_COMPAT_SYS_PREADV64V2 @@ -48,8 +48,8 @@ # define __ARCH_WANT_SYS_SIGPENDING # define __ARCH_WANT_SYS_SIGPROCMASK # define __ARCH_WANT_SYS_SOCKETCALL -# define __ARCH_WANT_SYS_TIME -# define __ARCH_WANT_SYS_UTIME +# define __ARCH_WANT_SYS_TIME32 +# define __ARCH_WANT_SYS_UTIME32 # define __ARCH_WANT_SYS_WAITPID # define __ARCH_WANT_SYS_FORK # define __ARCH_WANT_SYS_VFORK diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h index 1f86e1b0a5cd..499578f7e6d7 100644 --- a/arch/x86/include/asm/unwind.h +++ b/arch/x86/include/asm/unwind.h @@ -23,6 +23,12 @@ struct unwind_state { #elif defined(CONFIG_UNWINDER_FRAME_POINTER) bool got_irq; unsigned long *bp, *orig_sp, ip; + /* + * If non-NULL: The current frame is incomplete and doesn't contain a + * valid BP. When looking for the next frame, use this instead of the + * non-existent saved BP. + */ + unsigned long *next_bp; struct pt_regs *regs; #else unsigned long *sp; diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h index e652a7cc6186..8cfccc3cbbf4 100644 --- a/arch/x86/include/asm/uv/bios.h +++ b/arch/x86/include/asm/uv/bios.h @@ -48,7 +48,8 @@ enum { BIOS_STATUS_SUCCESS = 0, BIOS_STATUS_UNIMPLEMENTED = -ENOSYS, BIOS_STATUS_EINVAL = -EINVAL, - BIOS_STATUS_UNAVAIL = -EBUSY + BIOS_STATUS_UNAVAIL = -EBUSY, + BIOS_STATUS_ABORT = -EINTR, }; /* Address map parameters */ @@ -140,7 +141,6 @@ enum uv_memprotect { */ extern s64 uv_bios_call(enum uv_bios_cmd, u64, u64, u64, u64, u64); extern s64 uv_bios_call_irqsave(enum uv_bios_cmd, u64, u64, u64, u64, u64); -extern s64 uv_bios_call_reentrant(enum uv_bios_cmd, u64, u64, u64, u64, u64); extern s64 uv_bios_get_sn_info(int, int *, long *, long *, long *, long *); extern s64 uv_bios_freq_base(u64, u64 *); @@ -151,11 +151,7 @@ extern s64 uv_bios_change_memprotect(u64, u64, enum uv_memprotect); extern s64 uv_bios_reserved_page_pa(u64, u64 *, u64 *, u64 *); extern int uv_bios_set_legacy_vga_target(bool decode, int domain, int bus); -#ifdef CONFIG_EFI extern void uv_bios_init(void); -#else -void uv_bios_init(void) { } -#endif extern unsigned long sn_rtc_cycles_per_second; extern int uv_type; @@ -167,4 +163,9 @@ extern long system_serial_number; extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */ +/* + * EFI runtime lock; cf. firmware/efi/runtime-wrappers.c for details + */ +extern struct semaphore __efi_uv_runtime_lock; + #endif /* _ASM_X86_UV_BIOS_H */ diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index ef05bea7010d..de6f0d59a24f 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h @@ -332,15 +332,11 @@ HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val, return _hypercall4(int, update_va_mapping, va, new_val.pte, new_val.pte >> 32, flags); } -extern int __must_check xen_event_channel_op_compat(int, void *); static inline int HYPERVISOR_event_channel_op(int cmd, void *arg) { - int rc = _hypercall2(int, event_channel_op, cmd, arg); - if (unlikely(rc == -ENOSYS)) - rc = xen_event_channel_op_compat(cmd, arg); - return rc; + return _hypercall2(int, event_channel_op, cmd, arg); } static inline int @@ -355,15 +351,10 @@ HYPERVISOR_console_io(int cmd, int count, char *str) return _hypercall3(int, console_io, cmd, count, str); } -extern int __must_check xen_physdev_op_compat(int, void *); - static inline int HYPERVISOR_physdev_op(int cmd, void *arg) { - int rc = _hypercall2(int, physdev_op, cmd, arg); - if (unlikely(rc == -ENOSYS)) - rc = xen_physdev_op_compat(cmd, arg); - return rc; + return _hypercall2(int, physdev_op, cmd, arg); } static inline int diff --git a/arch/x86/include/uapi/asm/Kbuild b/arch/x86/include/uapi/asm/Kbuild index f6648e9928b3..efe701b7c6ce 100644 --- a/arch/x86/include/uapi/asm/Kbuild +++ b/arch/x86/include/uapi/asm/Kbuild @@ -3,3 +3,4 @@ include include/uapi/asm-generic/Kbuild.asm generated-y += unistd_32.h generated-y += unistd_64.h generated-y += unistd_x32.h +generic-y += socket.h diff --git a/arch/x86/include/uapi/asm/socket.h b/arch/x86/include/uapi/asm/socket.h deleted file mode 100644 index 6b71384b9d8b..000000000000 --- a/arch/x86/include/uapi/asm/socket.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 2624de16cd7a..8dcbf6890714 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -935,6 +935,9 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table) #define HPET_RESOURCE_NAME_SIZE 9 hpet_res = memblock_alloc(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE, SMP_CACHE_BYTES); + if (!hpet_res) + panic("%s: Failed to allocate %zu bytes\n", __func__, + sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE); hpet_res->name = (void *)&hpet_res[1]; hpet_res->flags = IORESOURCE_MEM; diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S index 0c26b1b44e51..4203d4f0c68d 100644 --- a/arch/x86/kernel/acpi/wakeup_32.S +++ b/arch/x86/kernel/acpi/wakeup_32.S @@ -90,7 +90,7 @@ ret_point: .data ALIGN ENTRY(saved_magic) .long 0 -ENTRY(saved_eip) .long 0 +saved_eip: .long 0 # saved registers saved_idt: .long 0,0 diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S index 50b8ed0317a3..510fa12aab73 100644 --- a/arch/x86/kernel/acpi/wakeup_64.S +++ b/arch/x86/kernel/acpi/wakeup_64.S @@ -125,12 +125,12 @@ ENTRY(do_suspend_lowlevel) ENDPROC(do_suspend_lowlevel) .data -ENTRY(saved_rbp) .quad 0 -ENTRY(saved_rsi) .quad 0 -ENTRY(saved_rdi) .quad 0 -ENTRY(saved_rbx) .quad 0 +saved_rbp: .quad 0 +saved_rsi: .quad 0 +saved_rdi: .quad 0 +saved_rbx: .quad 0 -ENTRY(saved_rip) .quad 0 -ENTRY(saved_rsp) .quad 0 +saved_rip: .quad 0 +saved_rsp: .quad 0 ENTRY(saved_magic) .quad 0 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index ebeac487a20c..9a79c7808f9c 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -393,10 +394,10 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start, continue; } - DPRINTK("feat: %d*32+%d, old: (%px len: %d), repl: (%px, len: %d), pad: %d", + DPRINTK("feat: %d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d), pad: %d", a->cpuid >> 5, a->cpuid & 0x1f, - instr, a->instrlen, + instr, instr, a->instrlen, replacement, a->replacementlen, a->padlen); DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr); @@ -764,8 +765,8 @@ int poke_int3_handler(struct pt_regs *regs) regs->ip = (unsigned long) bp_int3_handler; return 1; - } +NOKPROBE_SYMBOL(poke_int3_handler); /** * text_poke_bp() -- update instructions on live kernel on SMP diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 2953bbf05c08..53aa234a6803 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -812,6 +812,7 @@ static int irq_polarity(int idx) return IOAPIC_POL_HIGH; case MP_IRQPOL_RESERVED: pr_warn("IOAPIC: Invalid polarity: 2, defaulting to low\n"); + /* fall through */ case MP_IRQPOL_ACTIVE_LOW: default: /* Pointless default required due to do gcc stupidity */ return IOAPIC_POL_LOW; @@ -859,6 +860,7 @@ static int irq_trigger(int idx) return IOAPIC_EDGE; case MP_IRQTRIG_RESERVED: pr_warn("IOAPIC: Invalid trigger mode 2 defaulting to level\n"); + /* fall through */ case MP_IRQTRIG_LEVEL: default: /* Pointless default required due to do gcc stupidity */ return IOAPIC_LEVEL; @@ -2579,6 +2581,8 @@ static struct resource * __init ioapic_setup_resources(void) n *= nr_ioapics; mem = memblock_alloc(n, SMP_CACHE_BYTES); + if (!mem) + panic("%s: Failed to allocate %lu bytes\n", __func__, n); res = (void *)mem; mem += sizeof(struct resource) * nr_ioapics; @@ -2623,6 +2627,9 @@ fake_ioapic_page: #endif ioapic_phys = (unsigned long)memblock_alloc(PAGE_SIZE, PAGE_SIZE); + if (!ioapic_phys) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", + __func__, PAGE_SIZE, PAGE_SIZE); ioapic_phys = __pa(ioapic_phys); } set_fixmap_nocache(idx, ioapic_phys); diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index a555da094157..1e225528f0d7 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -1390,7 +1391,7 @@ static void __init build_socket_tables(void) } /* Set socket -> node values: */ - lnid = -1; + lnid = NUMA_NO_NODE; for_each_present_cpu(cpu) { int nid = cpu_to_node(cpu); int apicid, sockid; @@ -1521,7 +1522,7 @@ static void __init uv_system_init_hub(void) new_hub->pnode = 0xffff; new_hub->numa_blade_id = uv_node_to_blade_id(nodeid); - new_hub->memory_nid = -1; + new_hub->memory_nid = NUMA_NO_NODE; new_hub->nr_possible_cpus = 0; new_hub->nr_online_cpus = 0; } @@ -1538,7 +1539,7 @@ static void __init uv_system_init_hub(void) uv_cpu_info_per(cpu)->p_uv_hub_info = uv_hub_info_list(nodeid); uv_cpu_info_per(cpu)->blade_cpu_id = uv_cpu_hub_info(cpu)->nr_possible_cpus++; - if (uv_cpu_hub_info(cpu)->memory_nid == -1) + if (uv_cpu_hub_info(cpu)->memory_nid == NUMA_NO_NODE) uv_cpu_hub_info(cpu)->memory_nid = cpu_to_node(cpu); /* Init memoryless node: */ diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index b6fa0869f7aa..cfd24f9f7614 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -39,7 +39,7 @@ obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o obj-$(CONFIG_X86_MCE) += mce/ obj-$(CONFIG_MTRR) += mtrr/ obj-$(CONFIG_MICROCODE) += microcode/ -obj-$(CONFIG_X86_RESCTRL) += resctrl/ +obj-$(CONFIG_X86_CPU_RESCTRL) += resctrl/ obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 69f6bbb41be0..01004bfb1a1b 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -819,11 +819,9 @@ static void init_amd_bd(struct cpuinfo_x86 *c) static void init_amd_zn(struct cpuinfo_x86 *c) { set_cpu_cap(c, X86_FEATURE_ZEN); - /* - * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects - * all up to and including B1. - */ - if (c->x86_model <= 1 && c->x86_stepping <= 1) + + /* Fix erratum 1076: CPB feature bit not being set in CPUID. */ + if (!cpu_has(c, X86_FEATURE_CPB)) set_cpu_cap(c, X86_FEATURE_CPB); } diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 1de0f4170178..2da82eff0eb4 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -71,7 +71,7 @@ void __init check_bugs(void) * identify_boot_cpu() initialized SMT support information, let the * core code know. */ - cpu_smt_check_topology_early(); + cpu_smt_check_topology(); if (!IS_ENABLED(CONFIG_SMP)) { pr_info("CPU: "); @@ -798,15 +798,25 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) if (task_spec_ssb_force_disable(task)) return -EPERM; task_clear_spec_ssb_disable(task); + task_clear_spec_ssb_noexec(task); task_update_spec_tif(task); break; case PR_SPEC_DISABLE: task_set_spec_ssb_disable(task); + task_clear_spec_ssb_noexec(task); task_update_spec_tif(task); break; case PR_SPEC_FORCE_DISABLE: task_set_spec_ssb_disable(task); task_set_spec_ssb_force_disable(task); + task_clear_spec_ssb_noexec(task); + task_update_spec_tif(task); + break; + case PR_SPEC_DISABLE_NOEXEC: + if (task_spec_ssb_force_disable(task)) + return -EPERM; + task_set_spec_ssb_disable(task); + task_set_spec_ssb_noexec(task); task_update_spec_tif(task); break; default: @@ -885,6 +895,8 @@ static int ssb_prctl_get(struct task_struct *task) case SPEC_STORE_BYPASS_PRCTL: if (task_spec_ssb_force_disable(task)) return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; + if (task_spec_ssb_noexec(task)) + return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC; if (task_spec_ssb_disable(task)) return PR_SPEC_PRCTL | PR_SPEC_DISABLE; return PR_SPEC_PRCTL | PR_SPEC_ENABLE; diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c index c4d1023fb0ab..395d46f78582 100644 --- a/arch/x86/kernel/cpu/cacheinfo.c +++ b/arch/x86/kernel/cpu/cacheinfo.c @@ -248,6 +248,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, switch (leaf) { case 1: l1 = &l1i; + /* fall through */ case 0: if (!l1->val) return; diff --git a/arch/x86/kernel/cpu/match.c b/arch/x86/kernel/cpu/match.c index 3fed38812eea..6dd78d8235e4 100644 --- a/arch/x86/kernel/cpu/match.c +++ b/arch/x86/kernel/cpu/match.c @@ -48,3 +48,34 @@ const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match) return NULL; } EXPORT_SYMBOL(x86_match_cpu); + +static const struct x86_cpu_desc * +x86_match_cpu_with_stepping(const struct x86_cpu_desc *match) +{ + struct cpuinfo_x86 *c = &boot_cpu_data; + const struct x86_cpu_desc *m; + + for (m = match; m->x86_family | m->x86_model; m++) { + if (c->x86_vendor != m->x86_vendor) + continue; + if (c->x86 != m->x86_family) + continue; + if (c->x86_model != m->x86_model) + continue; + if (c->x86_stepping != m->x86_stepping) + continue; + return m; + } + return NULL; +} + +bool x86_cpu_has_min_microcode_rev(const struct x86_cpu_desc *table) +{ + const struct x86_cpu_desc *res = x86_match_cpu_with_stepping(table); + + if (!res || res->x86_microcode_rev > boot_cpu_data.microcode) + return false; + + return true; +} +EXPORT_SYMBOL_GPL(x86_cpu_has_min_microcode_rev); diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c index 89298c83de53..e64de5149e50 100644 --- a/arch/x86/kernel/cpu/mce/amd.c +++ b/arch/x86/kernel/cpu/mce/amd.c @@ -88,11 +88,17 @@ static struct smca_bank_name smca_names[] = { [SMCA_FP] = { "floating_point", "Floating Point Unit" }, [SMCA_L3_CACHE] = { "l3_cache", "L3 Cache" }, [SMCA_CS] = { "coherent_slave", "Coherent Slave" }, + [SMCA_CS_V2] = { "coherent_slave", "Coherent Slave" }, [SMCA_PIE] = { "pie", "Power, Interrupts, etc." }, [SMCA_UMC] = { "umc", "Unified Memory Controller" }, [SMCA_PB] = { "param_block", "Parameter Block" }, [SMCA_PSP] = { "psp", "Platform Security Processor" }, + [SMCA_PSP_V2] = { "psp", "Platform Security Processor" }, [SMCA_SMU] = { "smu", "System Management Unit" }, + [SMCA_SMU_V2] = { "smu", "System Management Unit" }, + [SMCA_MP5] = { "mp5", "Microprocessor 5 Unit" }, + [SMCA_NBIO] = { "nbio", "Northbridge IO Unit" }, + [SMCA_PCIE] = { "pcie", "PCI Express Unit" }, }; static u32 smca_bank_addrs[MAX_NR_BANKS][NR_BLOCKS] __ro_after_init = @@ -138,30 +144,42 @@ static struct smca_hwid smca_hwid_mcatypes[] = { { SMCA_RESERVED, HWID_MCATYPE(0x00, 0x0), 0x0 }, /* ZN Core (HWID=0xB0) MCA types */ - { SMCA_LS, HWID_MCATYPE(0xB0, 0x0), 0x1FFFEF }, + { SMCA_LS, HWID_MCATYPE(0xB0, 0x0), 0x1FFFFF }, { SMCA_IF, HWID_MCATYPE(0xB0, 0x1), 0x3FFF }, { SMCA_L2_CACHE, HWID_MCATYPE(0xB0, 0x2), 0xF }, { SMCA_DE, HWID_MCATYPE(0xB0, 0x3), 0x1FF }, /* HWID 0xB0 MCATYPE 0x4 is Reserved */ - { SMCA_EX, HWID_MCATYPE(0xB0, 0x5), 0x7FF }, + { SMCA_EX, HWID_MCATYPE(0xB0, 0x5), 0xFFF }, { SMCA_FP, HWID_MCATYPE(0xB0, 0x6), 0x7F }, { SMCA_L3_CACHE, HWID_MCATYPE(0xB0, 0x7), 0xFF }, /* Data Fabric MCA types */ { SMCA_CS, HWID_MCATYPE(0x2E, 0x0), 0x1FF }, - { SMCA_PIE, HWID_MCATYPE(0x2E, 0x1), 0xF }, + { SMCA_PIE, HWID_MCATYPE(0x2E, 0x1), 0x1F }, + { SMCA_CS_V2, HWID_MCATYPE(0x2E, 0x2), 0x3FFF }, /* Unified Memory Controller MCA type */ - { SMCA_UMC, HWID_MCATYPE(0x96, 0x0), 0x3F }, + { SMCA_UMC, HWID_MCATYPE(0x96, 0x0), 0xFF }, /* Parameter Block MCA type */ { SMCA_PB, HWID_MCATYPE(0x05, 0x0), 0x1 }, /* Platform Security Processor MCA type */ { SMCA_PSP, HWID_MCATYPE(0xFF, 0x0), 0x1 }, + { SMCA_PSP_V2, HWID_MCATYPE(0xFF, 0x1), 0x3FFFF }, /* System Management Unit MCA type */ { SMCA_SMU, HWID_MCATYPE(0x01, 0x0), 0x1 }, + { SMCA_SMU_V2, HWID_MCATYPE(0x01, 0x1), 0x7FF }, + + /* Microprocessor 5 Unit MCA type */ + { SMCA_MP5, HWID_MCATYPE(0x01, 0x2), 0x3FF }, + + /* Northbridge IO Unit MCA type */ + { SMCA_NBIO, HWID_MCATYPE(0x18, 0x0), 0x1F }, + + /* PCI Express Unit MCA type */ + { SMCA_PCIE, HWID_MCATYPE(0x46, 0x0), 0x1F }, }; struct smca_bank smca_banks[MAX_NR_BANKS]; @@ -545,6 +563,40 @@ out: return offset; } +/* + * Turn off MC4_MISC thresholding banks on all family 0x15 models since + * they're not supported there. + */ +void disable_err_thresholding(struct cpuinfo_x86 *c) +{ + int i; + u64 hwcr; + bool need_toggle; + u32 msrs[] = { + 0x00000413, /* MC4_MISC0 */ + 0xc0000408, /* MC4_MISC1 */ + }; + + if (c->x86 != 0x15) + return; + + rdmsrl(MSR_K7_HWCR, hwcr); + + /* McStatusWrEn has to be set */ + need_toggle = !(hwcr & BIT(18)); + + if (need_toggle) + wrmsrl(MSR_K7_HWCR, hwcr | BIT(18)); + + /* Clear CntP bit safely */ + for (i = 0; i < ARRAY_SIZE(msrs); i++) + msr_clear_bit(msrs[i], 62); + + /* restore old settings */ + if (need_toggle) + wrmsrl(MSR_K7_HWCR, hwcr); +} + /* cpu init entry point, called from mce.c with preempt off */ void mce_amd_feature_init(struct cpuinfo_x86 *c) { @@ -552,6 +604,8 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) unsigned int bank, block, cpu = smp_processor_id(); int offset = -1; + disable_err_thresholding(c); + for (bank = 0; bank < mca_cfg.banks; ++bank) { if (mce_flags.smca) smca_configure(bank, cpu); diff --git a/arch/x86/kernel/cpu/mce/apei.c b/arch/x86/kernel/cpu/mce/apei.c index 1d9b3ce662a0..c038e5c00a59 100644 --- a/arch/x86/kernel/cpu/mce/apei.c +++ b/arch/x86/kernel/cpu/mce/apei.c @@ -64,11 +64,11 @@ void apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err) EXPORT_SYMBOL_GPL(apei_mce_report_mem_error); #define CPER_CREATOR_MCE \ - UUID_LE(0x75a574e3, 0x5052, 0x4b29, 0x8a, 0x8e, 0xbe, 0x2c, \ - 0x64, 0x90, 0xb8, 0x9d) + GUID_INIT(0x75a574e3, 0x5052, 0x4b29, 0x8a, 0x8e, 0xbe, 0x2c, \ + 0x64, 0x90, 0xb8, 0x9d) #define CPER_SECTION_TYPE_MCE \ - UUID_LE(0xfe08ffbe, 0x95e4, 0x4be7, 0xbc, 0x73, 0x40, 0x96, \ - 0x04, 0x4a, 0x38, 0xfc) + GUID_INIT(0xfe08ffbe, 0x95e4, 0x4be7, 0xbc, 0x73, 0x40, 0x96, \ + 0x04, 0x4a, 0x38, 0xfc) /* * CPER specification (in UEFI specification 2.3 appendix N) requires @@ -135,7 +135,7 @@ retry: goto out; /* try to skip other type records in storage */ else if (rc != sizeof(rcd) || - uuid_le_cmp(rcd.hdr.creator_id, CPER_CREATOR_MCE)) + !guid_equal(&rcd.hdr.creator_id, &CPER_CREATOR_MCE)) goto retry; memcpy(m, &rcd.mce, sizeof(*m)); rc = sizeof(*m); diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index 672c7225cb1b..b7fb541a4873 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -784,6 +784,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, quirk_no_way_out(i, m, regs); if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) { + m->bank = i; mce_read_aux(m, i); *msg = tmp; return 1; @@ -1611,36 +1612,6 @@ static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) if (c->x86 == 0x15 && c->x86_model <= 0xf) mce_flags.overflow_recov = 1; - /* - * Turn off MC4_MISC thresholding banks on those models since - * they're not supported there. - */ - if (c->x86 == 0x15 && - (c->x86_model >= 0x10 && c->x86_model <= 0x1f)) { - int i; - u64 hwcr; - bool need_toggle; - u32 msrs[] = { - 0x00000413, /* MC4_MISC0 */ - 0xc0000408, /* MC4_MISC1 */ - }; - - rdmsrl(MSR_K7_HWCR, hwcr); - - /* McStatusWrEn has to be set */ - need_toggle = !(hwcr & BIT(18)); - - if (need_toggle) - wrmsrl(MSR_K7_HWCR, hwcr | BIT(18)); - - /* Clear CntP bit safely */ - for (i = 0; i < ARRAY_SIZE(msrs); i++) - msr_clear_bit(msrs[i], 62); - - /* restore old settings */ - if (need_toggle) - wrmsrl(MSR_K7_HWCR, hwcr); - } } if (c->x86_vendor == X86_VENDOR_INTEL) { diff --git a/arch/x86/kernel/cpu/mce/severity.c b/arch/x86/kernel/cpu/mce/severity.c index dc3e26e905a3..65201e180fe0 100644 --- a/arch/x86/kernel/cpu/mce/severity.c +++ b/arch/x86/kernel/cpu/mce/severity.c @@ -165,6 +165,11 @@ static struct severity { SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA), KERNEL ), + MCESEV( + PANIC, "Instruction fetch error in kernel", + SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR), + KERNEL + ), #endif MCESEV( PANIC, "Action required: unknown MCACOD", diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 51adde0a0f1a..e1f3ba19ba54 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -855,7 +855,7 @@ load_microcode_amd(bool save, u8 family, const u8 *data, size_t size) if (!p) { return ret; } else { - if (boot_cpu_data.microcode == p->patch_id) + if (boot_cpu_data.microcode >= p->patch_id) return ret; ret = UCODE_NEW; diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index e81a2db42df7..3fa238a137d2 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -328,6 +328,18 @@ static void __init ms_hyperv_init_platform(void) # ifdef CONFIG_SMP smp_ops.smp_prepare_boot_cpu = hv_smp_prepare_boot_cpu; # endif + + /* + * Hyper-V doesn't provide irq remapping for IO-APIC. To enable x2apic, + * set x2apic destination mode to physcial mode when x2apic is available + * and Hyper-V IOMMU driver makes sure cpus assigned with IO-APIC irqs + * have 8-bit APIC id. + */ +# ifdef CONFIG_X86_X2APIC + if (x2apic_supported()) + x2apic_phys = 1; +# endif + #endif } diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c index 3668c5df90c6..5bd011737272 100644 --- a/arch/x86/kernel/cpu/mtrr/cleanup.c +++ b/arch/x86/kernel/cpu/mtrr/cleanup.c @@ -296,7 +296,7 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek, unsigned long sizek) { unsigned long hole_basek, hole_sizek; - unsigned long second_basek, second_sizek; + unsigned long second_sizek; unsigned long range0_basek, range0_sizek; unsigned long range_basek, range_sizek; unsigned long chunk_sizek; @@ -304,7 +304,6 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek, hole_basek = 0; hole_sizek = 0; - second_basek = 0; second_sizek = 0; chunk_sizek = state->chunk_sizek; gran_sizek = state->gran_sizek; diff --git a/arch/x86/kernel/cpu/resctrl/Makefile b/arch/x86/kernel/cpu/resctrl/Makefile index 1cabe6fd8e11..4a06c37b9cf1 100644 --- a/arch/x86/kernel/cpu/resctrl/Makefile +++ b/arch/x86/kernel/cpu/resctrl/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_X86_RESCTRL) += core.o rdtgroup.o monitor.o -obj-$(CONFIG_X86_RESCTRL) += ctrlmondata.o pseudo_lock.o +obj-$(CONFIG_X86_CPU_RESCTRL) += core.o rdtgroup.o monitor.o +obj-$(CONFIG_X86_CPU_RESCTRL) += ctrlmondata.o pseudo_lock.o CFLAGS_pseudo_lock.o = -I$(src) diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 822b7db634ee..e49b77283924 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -4,6 +4,7 @@ #include #include +#include #include #define MSR_IA32_L3_QOS_CFG 0xc81 @@ -40,6 +41,21 @@ #define RMID_VAL_ERROR BIT_ULL(63) #define RMID_VAL_UNAVAIL BIT_ULL(62) + +struct rdt_fs_context { + struct kernfs_fs_context kfc; + bool enable_cdpl2; + bool enable_cdpl3; + bool enable_mba_mbps; +}; + +static inline struct rdt_fs_context *rdt_fc2context(struct fs_context *fc) +{ + struct kernfs_fs_context *kfc = fc->fs_private; + + return container_of(kfc, struct rdt_fs_context, kfc); +} + DECLARE_STATIC_KEY_FALSE(rdt_enable_key); /** diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index 14bed6af8377..604c0e3bcc83 100644 --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -33,13 +33,6 @@ #define CREATE_TRACE_POINTS #include "pseudo_lock_event.h" -/* - * MSR_MISC_FEATURE_CONTROL register enables the modification of hardware - * prefetcher state. Details about this register can be found in the MSR - * tables for specific platforms found in Intel's SDM. - */ -#define MSR_MISC_FEATURE_CONTROL 0x000001a4 - /* * The bits needed to disable hardware prefetching varies based on the * platform. During initialization we will discover which bits to use. diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 8388adf241b2..399601eda8e4 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -32,6 +33,7 @@ #include #include #include +#include #include @@ -1858,46 +1860,6 @@ static void cdp_disable_all(void) cdpl2_disable(); } -static int parse_rdtgroupfs_options(char *data) -{ - char *token, *o = data; - int ret = 0; - - while ((token = strsep(&o, ",")) != NULL) { - if (!*token) { - ret = -EINVAL; - goto out; - } - - if (!strcmp(token, "cdp")) { - ret = cdpl3_enable(); - if (ret) - goto out; - } else if (!strcmp(token, "cdpl2")) { - ret = cdpl2_enable(); - if (ret) - goto out; - } else if (!strcmp(token, "mba_MBps")) { - if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) - ret = set_mba_sc(true); - else - ret = -EINVAL; - if (ret) - goto out; - } else { - ret = -EINVAL; - goto out; - } - } - - return 0; - -out: - pr_err("Invalid mount option \"%s\"\n", token); - - return ret; -} - /* * We don't allow rdtgroup directories to be created anywhere * except the root directory. Thus when looking for the rdtgroup @@ -1969,13 +1931,27 @@ static int mkdir_mondata_all(struct kernfs_node *parent_kn, struct rdtgroup *prgrp, struct kernfs_node **mon_data_kn); -static struct dentry *rdt_mount(struct file_system_type *fs_type, - int flags, const char *unused_dev_name, - void *data) +static int rdt_enable_ctx(struct rdt_fs_context *ctx) +{ + int ret = 0; + + if (ctx->enable_cdpl2) + ret = cdpl2_enable(); + + if (!ret && ctx->enable_cdpl3) + ret = cdpl3_enable(); + + if (!ret && ctx->enable_mba_mbps) + ret = set_mba_sc(true); + + return ret; +} + +static int rdt_get_tree(struct fs_context *fc) { + struct rdt_fs_context *ctx = rdt_fc2context(fc); struct rdt_domain *dom; struct rdt_resource *r; - struct dentry *dentry; int ret; cpus_read_lock(); @@ -1984,53 +1960,42 @@ static struct dentry *rdt_mount(struct file_system_type *fs_type, * resctrl file system can only be mounted once. */ if (static_branch_unlikely(&rdt_enable_key)) { - dentry = ERR_PTR(-EBUSY); + ret = -EBUSY; goto out; } - ret = parse_rdtgroupfs_options(data); - if (ret) { - dentry = ERR_PTR(ret); + ret = rdt_enable_ctx(ctx); + if (ret < 0) goto out_cdp; - } closid_init(); ret = rdtgroup_create_info_dir(rdtgroup_default.kn); - if (ret) { - dentry = ERR_PTR(ret); - goto out_cdp; - } + if (ret < 0) + goto out_mba; if (rdt_mon_capable) { ret = mongroup_create_dir(rdtgroup_default.kn, NULL, "mon_groups", &kn_mongrp); - if (ret) { - dentry = ERR_PTR(ret); + if (ret < 0) goto out_info; - } kernfs_get(kn_mongrp); ret = mkdir_mondata_all(rdtgroup_default.kn, &rdtgroup_default, &kn_mondata); - if (ret) { - dentry = ERR_PTR(ret); + if (ret < 0) goto out_mongrp; - } kernfs_get(kn_mondata); rdtgroup_default.mon.mon_data_kn = kn_mondata; } ret = rdt_pseudo_lock_init(); - if (ret) { - dentry = ERR_PTR(ret); + if (ret) goto out_mondata; - } - dentry = kernfs_mount(fs_type, flags, rdt_root, - RDTGROUP_SUPER_MAGIC, NULL); - if (IS_ERR(dentry)) + ret = kernfs_get_tree(fc); + if (ret < 0) goto out_psl; if (rdt_alloc_capable) @@ -2059,14 +2024,95 @@ out_mongrp: kernfs_remove(kn_mongrp); out_info: kernfs_remove(kn_info); +out_mba: + if (ctx->enable_mba_mbps) + set_mba_sc(false); out_cdp: cdp_disable_all(); out: rdt_last_cmd_clear(); mutex_unlock(&rdtgroup_mutex); cpus_read_unlock(); + return ret; +} + +enum rdt_param { + Opt_cdp, + Opt_cdpl2, + Opt_mba_mpbs, + nr__rdt_params +}; + +static const struct fs_parameter_spec rdt_param_specs[] = { + fsparam_flag("cdp", Opt_cdp), + fsparam_flag("cdpl2", Opt_cdpl2), + fsparam_flag("mba_mpbs", Opt_mba_mpbs), + {} +}; + +static const struct fs_parameter_description rdt_fs_parameters = { + .name = "rdt", + .specs = rdt_param_specs, +}; + +static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param) +{ + struct rdt_fs_context *ctx = rdt_fc2context(fc); + struct fs_parse_result result; + int opt; + + opt = fs_parse(fc, &rdt_fs_parameters, param, &result); + if (opt < 0) + return opt; - return dentry; + switch (opt) { + case Opt_cdp: + ctx->enable_cdpl3 = true; + return 0; + case Opt_cdpl2: + ctx->enable_cdpl2 = true; + return 0; + case Opt_mba_mpbs: + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) + return -EINVAL; + ctx->enable_mba_mbps = true; + return 0; + } + + return -EINVAL; +} + +static void rdt_fs_context_free(struct fs_context *fc) +{ + struct rdt_fs_context *ctx = rdt_fc2context(fc); + + kernfs_free_fs_context(fc); + kfree(ctx); +} + +static const struct fs_context_operations rdt_fs_context_ops = { + .free = rdt_fs_context_free, + .parse_param = rdt_parse_param, + .get_tree = rdt_get_tree, +}; + +static int rdt_init_fs_context(struct fs_context *fc) +{ + struct rdt_fs_context *ctx; + + ctx = kzalloc(sizeof(struct rdt_fs_context), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->kfc.root = rdt_root; + ctx->kfc.magic = RDTGROUP_SUPER_MAGIC; + fc->fs_private = &ctx->kfc; + fc->ops = &rdt_fs_context_ops; + if (fc->user_ns) + put_user_ns(fc->user_ns); + fc->user_ns = get_user_ns(&init_user_ns); + fc->global = true; + return 0; } static int reset_all_ctrls(struct rdt_resource *r) @@ -2239,9 +2285,10 @@ static void rdt_kill_sb(struct super_block *sb) } static struct file_system_type rdt_fs_type = { - .name = "resctrl", - .mount = rdt_mount, - .kill_sb = rdt_kill_sb, + .name = "resctrl", + .init_fs_context = rdt_init_fs_context, + .parameters = &rdt_fs_parameters, + .kill_sb = rdt_kill_sb, }; static int mon_addfile(struct kernfs_node *parent_kn, const char *name, diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 50895c2f937d..2879e234e193 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -671,21 +672,18 @@ __init void e820__reallocate_tables(void) int size; size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry)*e820_table->nr_entries; - n = kmalloc(size, GFP_KERNEL); + n = kmemdup(e820_table, size, GFP_KERNEL); BUG_ON(!n); - memcpy(n, e820_table, size); e820_table = n; size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry)*e820_table_kexec->nr_entries; - n = kmalloc(size, GFP_KERNEL); + n = kmemdup(e820_table_kexec, size, GFP_KERNEL); BUG_ON(!n); - memcpy(n, e820_table_kexec, size); e820_table_kexec = n; size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry)*e820_table_firmware->nr_entries; - n = kmalloc(size, GFP_KERNEL); + n = kmemdup(e820_table_firmware, size, GFP_KERNEL); BUG_ON(!n); - memcpy(n, e820_table_firmware, size); e820_table_firmware = n; } @@ -778,7 +776,7 @@ u64 __init e820__memblock_alloc_reserved(u64 size, u64 align) { u64 addr; - addr = __memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); + addr = memblock_phys_alloc(size, align); if (addr) { e820__range_update_kexec(addr, size, E820_TYPE_RAM, E820_TYPE_RESERVED); pr_info("update e820_table_kexec for e820__memblock_alloc_reserved()\n"); @@ -881,6 +879,10 @@ static int __init parse_memopt(char *p) e820__range_remove(mem_size, ULLONG_MAX - mem_size, E820_TYPE_RAM, 1); +#ifdef CONFIG_MEMORY_HOTPLUG + max_mem_size = mem_size; +#endif + return 0; } early_param("mem", parse_memopt); @@ -1095,6 +1097,9 @@ void __init e820__reserve_resources(void) res = memblock_alloc(sizeof(*res) * e820_table->nr_entries, SMP_CACHE_BYTES); + if (!res) + panic("%s: Failed to allocate %zu bytes\n", __func__, + sizeof(*res) * e820_table->nr_entries); e820_res = res; for (i = 0; i < e820_table->nr_entries; i++) { diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index 374a52fa5296..9b33904251a9 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c @@ -388,10 +388,6 @@ static int __init setup_early_printk(char *buf) if (!strncmp(buf, "xen", 3)) early_console_register(&xenboot_console, keep); #endif -#ifdef CONFIG_EARLY_PRINTK_EFI - if (!strncmp(buf, "efi", 3)) - early_console_register(&early_efi_console, keep); -#endif #ifdef CONFIG_EARLY_PRINTK_USB_XDBC if (!strncmp(buf, "xdbc", 4)) early_xdbc_parse_parameter(buf + 4); diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 9cc108456d0b..d7432c2b1051 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -669,7 +669,7 @@ static bool is_supported_xstate_size(unsigned int test_xstate_size) return false; } -static int init_xstate_size(void) +static int __init init_xstate_size(void) { /* Recompute the context size for enabled features: */ unsigned int possible_xstate_size; diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 8257a59704ae..ef49517f6bb2 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -49,7 +49,7 @@ int ftrace_arch_code_modify_post_process(void) union ftrace_code_union { char code[MCOUNT_INSN_SIZE]; struct { - unsigned char e8; + unsigned char op; int offset; } __attribute__((packed)); }; @@ -59,20 +59,23 @@ static int ftrace_calc_offset(long ip, long addr) return (int)(addr - ip); } -static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) +static unsigned char * +ftrace_text_replace(unsigned char op, unsigned long ip, unsigned long addr) { static union ftrace_code_union calc; - calc.e8 = 0xe8; + calc.op = op; calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr); - /* - * No locking needed, this must be called via kstop_machine - * which in essence is like running on a uniprocessor machine. - */ return calc.code; } +static unsigned char * +ftrace_call_replace(unsigned long ip, unsigned long addr) +{ + return ftrace_text_replace(0xe8, ip, addr); +} + static inline int within(unsigned long addr, unsigned long start, unsigned long end) { @@ -269,7 +272,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func) return ret; } -static int is_ftrace_caller(unsigned long ip) +static nokprobe_inline int is_ftrace_caller(unsigned long ip) { if (ip == ftrace_update_func) return 1; @@ -299,6 +302,7 @@ int ftrace_int3_handler(struct pt_regs *regs) return 1; } +NOKPROBE_SYMBOL(ftrace_int3_handler); static int ftrace_write(unsigned long ip, const char *val, int size) { @@ -664,22 +668,6 @@ int __init ftrace_dyn_arch_init(void) return 0; } -#if defined(CONFIG_X86_64) || defined(CONFIG_FUNCTION_GRAPH_TRACER) -static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr) -{ - static union ftrace_code_union calc; - - /* Jmp not a call (ignore the .e8) */ - calc.e8 = 0xe9; - calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr); - - /* - * ftrace external locks synchronize the access to the static variable. - */ - return calc.code; -} -#endif - /* Currently only x86_64 supports dynamic trampolines */ #ifdef CONFIG_X86_64 @@ -891,8 +879,8 @@ static void *addr_from_call(void *ptr) return NULL; /* Make sure this is a call */ - if (WARN_ON_ONCE(calc.e8 != 0xe8)) { - pr_warn("Expected e8, got %x\n", calc.e8); + if (WARN_ON_ONCE(calc.op != 0xe8)) { + pr_warn("Expected e8, got %x\n", calc.op); return NULL; } @@ -963,6 +951,11 @@ void arch_ftrace_trampoline_free(struct ftrace_ops *ops) #ifdef CONFIG_DYNAMIC_FTRACE extern void ftrace_graph_call(void); +static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr) +{ + return ftrace_text_replace(0xe9, ip, addr); +} + static int ftrace_mod_jmp(unsigned long ip, void *func) { unsigned char *new; diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index 34a5c1715148..ff9bfd40429e 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c @@ -261,12 +261,8 @@ static int arch_build_bp_info(struct perf_event *bp, * allow kernel breakpoints at all. */ if (attr->bp_addr >= TASK_SIZE_MAX) { -#ifdef CONFIG_KPROBES if (within_kprobe_blacklist(attr->bp_addr)) return -EINVAL; -#else - return -EINVAL; -#endif } hw->type = X86_BREAKPOINT_EXECUTE; @@ -279,6 +275,7 @@ static int arch_build_bp_info(struct perf_event *bp, hw->len = X86_BREAKPOINT_LEN_X; return 0; } + /* fall through */ default: return -EINVAL; } diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c index 0d5efa34f359..22f60dd26460 100644 --- a/arch/x86/kernel/kexec-bzimage64.c +++ b/arch/x86/kernel/kexec-bzimage64.c @@ -167,6 +167,9 @@ setup_efi_state(struct boot_params *params, unsigned long params_load_addr, struct efi_info *current_ei = &boot_params.efi_info; struct efi_info *ei = ¶ms->efi_info; + if (!efi_enabled(EFI_RUNTIME_SERVICES)) + return 0; + if (!current_ei->efi_memmap_size) return 0; @@ -215,6 +218,9 @@ setup_boot_parameters(struct kimage *image, struct boot_params *params, params->screen_info.ext_mem_k = 0; params->alt_mem_k = 0; + /* Always fill in RSDP: it is either 0 or a valid value */ + params->acpi_rsdp_addr = boot_params.acpi_rsdp_addr; + /* Default APM info */ memset(¶ms->apm_bios_info, 0, sizeof(params->apm_bios_info)); @@ -253,7 +259,6 @@ setup_boot_parameters(struct kimage *image, struct boot_params *params, setup_efi_state(params, params_load_addr, efi_map_offset, efi_map_sz, efi_setup_data_offset); #endif - /* Setup EDD info */ memcpy(params->eddbuf, boot_params.eddbuf, EDDMAXNR * sizeof(struct edd_info)); @@ -533,9 +538,17 @@ static int bzImage64_cleanup(void *loader_data) #ifdef CONFIG_KEXEC_BZIMAGE_VERIFY_SIG static int bzImage64_verify_sig(const char *kernel, unsigned long kernel_len) { - return verify_pefile_signature(kernel, kernel_len, - VERIFY_USE_SECONDARY_KEYRING, - VERIFYING_KEXEC_PE_SIGNATURE); + int ret; + + ret = verify_pefile_signature(kernel, kernel_len, + VERIFY_USE_SECONDARY_KEYRING, + VERIFYING_KEXEC_PE_SIGNATURE); + if (ret == -ENOKEY && IS_ENABLED(CONFIG_INTEGRITY_PLATFORM_KEYRING)) { + ret = verify_pefile_signature(kernel, kernel_len, + VERIFY_USE_PLATFORM_KEYRING, + VERIFYING_KEXEC_PE_SIGNATURE); + } + return ret; } #endif diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index 5db08425063e..4ff6b4cdb941 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c @@ -467,6 +467,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, ptr = &remcomInBuffer[1]; if (kgdb_hex2long(&ptr, &addr)) linux_regs->ip = addr; + /* fall through */ case 'D': case 'k': /* clear the trace bit */ diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 4ba75afba527..a034cb808e7e 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -1028,6 +1028,13 @@ NOKPROBE_SYMBOL(kprobe_fault_handler); int __init arch_populate_kprobe_blacklist(void) { + int ret; + + ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start, + (unsigned long)__irqentry_text_end); + if (ret) + return ret; + return kprobe_add_area_blacklist((unsigned long)__entry_text_start, (unsigned long)__entry_text_end); } diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 6adf6e6c2933..f14262952015 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -97,6 +97,7 @@ static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val) } asm ( + ".pushsection .rodata\n" "optprobe_template_func:\n" ".global optprobe_template_entry\n" "optprobe_template_entry:\n" @@ -136,8 +137,7 @@ asm ( #endif ".global optprobe_template_end\n" "optprobe_template_end:\n" - ".type optprobe_template_func, @function\n" - ".size optprobe_template_func, .-optprobe_template_func\n"); + ".popsection\n"); void optprobe_template_func(void); STACK_FRAME_NON_STANDARD(optprobe_template_func); diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 4c8acdfdc5a7..ceba408ea982 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -352,6 +352,8 @@ void machine_kexec(struct kimage *image) void arch_crash_save_vmcoreinfo(void) { + u64 sme_mask = sme_me_mask; + VMCOREINFO_NUMBER(phys_base); VMCOREINFO_SYMBOL(init_top_pgt); vmcoreinfo_append_str("NUMBER(pgtable_l5_enabled)=%d\n", @@ -364,6 +366,7 @@ void arch_crash_save_vmcoreinfo(void) vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset()); VMCOREINFO_NUMBER(KERNEL_IMAGE_SIZE); + VMCOREINFO_NUMBER(sme_mask); } /* arch-dependent functionality related to kexec file-based syscall */ diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 90ae0ca51083..58ac7be52c7a 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -255,6 +255,18 @@ void arch_setup_new_exec(void) /* If cpuid was previously disabled for this task, re-enable it. */ if (test_thread_flag(TIF_NOCPUID)) enable_cpuid(); + + /* + * Don't inherit TIF_SSBD across exec boundary when + * PR_SPEC_DISABLE_NOEXEC is used. + */ + if (test_thread_flag(TIF_SSBD) && + task_spec_ssb_noexec(current)) { + clear_thread_flag(TIF_SSBD); + task_clear_spec_ssb_disable(current); + task_clear_spec_ssb_noexec(current); + speculation_ctrl_update(task_thread_info(current)->flags); + } } static inline void switch_to_bitmap(struct thread_struct *prev, diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index e8796fcd7e5a..4bf46575568a 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -106,22 +106,22 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, void *ptr; if (!node_online(node) || !NODE_DATA(node)) { - ptr = memblock_alloc_from_nopanic(size, align, goal); + ptr = memblock_alloc_from(size, align, goal); pr_info("cpu %d has no node %d or node-local memory\n", cpu, node); pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n", cpu, size, __pa(ptr)); } else { - ptr = memblock_alloc_try_nid_nopanic(size, align, goal, - MEMBLOCK_ALLOC_ACCESSIBLE, - node); + ptr = memblock_alloc_try_nid(size, align, goal, + MEMBLOCK_ALLOC_ACCESSIBLE, + node); pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n", cpu, size, node, __pa(ptr)); } return ptr; #else - return memblock_alloc_from_nopanic(size, align, goal); + return memblock_alloc_from(size, align, goal); #endif } @@ -171,7 +171,7 @@ void __init setup_per_cpu_areas(void) unsigned long delta; int rc; - pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%u nr_node_ids:%d\n", + pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%u nr_node_ids:%u\n", NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); /* diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index ccd1f2a8e557..ce1a67b70168 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -56,6 +56,7 @@ #include #include #include +#include #include #include @@ -149,7 +150,7 @@ static inline void smpboot_restore_warm_reset_vector(void) */ static void smp_callin(void) { - int cpuid, phys_id; + int cpuid; /* * If waken up by an INIT in an 82489DX configuration @@ -159,11 +160,6 @@ static void smp_callin(void) */ cpuid = smp_processor_id(); - /* - * (This works even if the APIC is not enabled.) - */ - phys_id = read_apic_id(); - /* * the boot CPU has finished the init stage and is spinning * on callin_map until we finish. We are free to set up this @@ -841,7 +837,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) /* reduce the number of lines printed when booting a large cpu count system */ static void announce_cpu(int cpu, int apicid) { - static int current_node = -1; + static int current_node = NUMA_NO_NODE; int node = early_cpu_to_node(cpu); static int width, node_width; diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 9b7c4ca8f0a7..d26f9e9c3d83 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -111,6 +111,7 @@ void ist_enter(struct pt_regs *regs) /* This code is a bit fragile. Test it. */ RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work"); } +NOKPROBE_SYMBOL(ist_enter); void ist_exit(struct pt_regs *regs) { @@ -880,12 +881,12 @@ do_spurious_interrupt_bug(struct pt_regs *regs, long error_code) dotraplinkage void do_device_not_available(struct pt_regs *regs, long error_code) { - unsigned long cr0; + unsigned long cr0 = read_cr0(); RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); #ifdef CONFIG_MATH_EMULATION - if (!boot_cpu_has(X86_FEATURE_FPU) && (read_cr0() & X86_CR0_EM)) { + if (!boot_cpu_has(X86_FEATURE_FPU) && (cr0 & X86_CR0_EM)) { struct math_emu_info info = { }; cond_local_irq_enable(regs); @@ -897,7 +898,6 @@ do_device_not_available(struct pt_regs *regs, long error_code) #endif /* This should not happen. */ - cr0 = read_cr0(); if (WARN(cr0 & X86_CR0_TS, "CR0.TS was set")) { /* Try to fix it up and carry on. */ write_cr0(cr0 & ~X86_CR0_TS); diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c index 3dc26f95d46e..9b9fd4826e7a 100644 --- a/arch/x86/kernel/unwind_frame.c +++ b/arch/x86/kernel/unwind_frame.c @@ -320,10 +320,14 @@ bool unwind_next_frame(struct unwind_state *state) } /* Get the next frame pointer: */ - if (state->regs) + if (state->next_bp) { + next_bp = state->next_bp; + state->next_bp = NULL; + } else if (state->regs) { next_bp = (unsigned long *)state->regs->bp; - else + } else { next_bp = (unsigned long *)READ_ONCE_TASK_STACK(state->task, *state->bp); + } /* Move to the next frame if it's safe: */ if (!update_stack_state(state, next_bp)) @@ -398,6 +402,21 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task, bp = get_frame_pointer(task, regs); + /* + * If we crash with IP==0, the last successfully executed instruction + * was probably an indirect function call with a NULL function pointer. + * That means that SP points into the middle of an incomplete frame: + * *SP is a return pointer, and *(SP-sizeof(unsigned long)) is where we + * would have written a frame pointer if we hadn't crashed. + * Pretend that the frame is complete and that BP points to it, but save + * the real BP so that we can use it when looking for the next frame. + */ + if (regs && regs->ip == 0 && + (unsigned long *)kernel_stack_pointer(regs) >= first_frame) { + state->next_bp = bp; + bp = ((unsigned long *)kernel_stack_pointer(regs)) - 1; + } + /* Initialize stack info and make sure the frame data is accessible: */ get_stack_info(bp, state->task, &state->stack_info, &state->stack_mask); @@ -410,7 +429,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task, */ while (!unwind_done(state) && (!on_stack(&state->stack_info, first_frame, sizeof(long)) || - state->bp < first_frame)) + (state->next_bp == NULL && state->bp < first_frame))) unwind_next_frame(state); } EXPORT_SYMBOL_GPL(__unwind_start); diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c index 26038eacf74a..89be1be1790c 100644 --- a/arch/x86/kernel/unwind_orc.c +++ b/arch/x86/kernel/unwind_orc.c @@ -113,6 +113,20 @@ static struct orc_entry *orc_ftrace_find(unsigned long ip) } #endif +/* + * If we crash with IP==0, the last successfully executed instruction + * was probably an indirect function call with a NULL function pointer, + * and we don't have unwind information for NULL. + * This hardcoded ORC entry for IP==0 allows us to unwind from a NULL function + * pointer into its parent and then continue normally from there. + */ +static struct orc_entry null_orc_entry = { + .sp_offset = sizeof(long), + .sp_reg = ORC_REG_SP, + .bp_reg = ORC_REG_UNDEFINED, + .type = ORC_TYPE_CALL +}; + static struct orc_entry *orc_find(unsigned long ip) { static struct orc_entry *orc; @@ -120,6 +134,9 @@ static struct orc_entry *orc_find(unsigned long ip) if (!orc_init) return NULL; + if (ip == 0) + return &null_orc_entry; + /* For non-init vmlinux addresses, use the fast lookup table: */ if (ip >= LOOKUP_START_IP && ip < LOOKUP_STOP_IP) { unsigned int idx, start, stop; diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 843feb94a950..ccf03416e434 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -745,6 +745,7 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) * OPCODE1() of the "short" jmp which checks the same condition. */ opc1 = OPCODE2(insn) - 0x10; + /* fall through */ default: if (!is_cond_jmp_opcode(opc1)) return -ENOSYS; diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 0d618ee634ac..bad8c51fee6e 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -31,7 +31,7 @@ #undef i386 /* in case the preprocessor is a 32bit one */ -OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT) +OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT) #ifdef CONFIG_X86_32 OUTPUT_ARCH(i386) @@ -401,7 +401,7 @@ SECTIONS * Per-cpu symbols which need to be offset from __per_cpu_load * for the boot processor. */ -#define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load +#define INIT_PER_CPU(x) init_per_cpu__##x = ABSOLUTE(x) + __per_cpu_load INIT_PER_CPU(gdt_page); INIT_PER_CPU(irq_stack_union); diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index bbffa6c54697..c07958b59f50 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -335,6 +335,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0; unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0; unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0; + unsigned f_la57 = 0; /* cpuid 1.edx */ const u32 kvm_cpuid_1_edx_x86_features = @@ -489,7 +490,10 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, // TSC_ADJUST is emulated entry->ebx |= F(TSC_ADJUST); entry->ecx &= kvm_cpuid_7_0_ecx_x86_features; + f_la57 = entry->ecx & F(LA57); cpuid_mask(&entry->ecx, CPUID_7_ECX); + /* Set LA57 based on hardware capability. */ + entry->ecx |= f_la57; entry->ecx |= f_umip; /* PKU is not yet implemented for shadow paging. */ if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE)) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index da9c42349b1f..f2d1d230d5b8 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -3555,6 +3555,7 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, &invalid_list); mmu->root_hpa = INVALID_PAGE; } + mmu->root_cr3 = 0; } kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); @@ -3610,6 +3611,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root); } else BUG(); + vcpu->arch.mmu->root_cr3 = vcpu->arch.mmu->get_cr3(vcpu); return 0; } @@ -3618,10 +3620,11 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) { struct kvm_mmu_page *sp; u64 pdptr, pm_mask; - gfn_t root_gfn; + gfn_t root_gfn, root_cr3; int i; - root_gfn = vcpu->arch.mmu->get_cr3(vcpu) >> PAGE_SHIFT; + root_cr3 = vcpu->arch.mmu->get_cr3(vcpu); + root_gfn = root_cr3 >> PAGE_SHIFT; if (mmu_check_root(vcpu, root_gfn)) return 1; @@ -3646,7 +3649,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) ++sp->root_count; spin_unlock(&vcpu->kvm->mmu_lock); vcpu->arch.mmu->root_hpa = root; - return 0; + goto set_root_cr3; } /* @@ -3712,6 +3715,9 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root); } +set_root_cr3: + vcpu->arch.mmu->root_cr3 = root_cr3; + return 0; } @@ -4163,7 +4169,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3, struct kvm_mmu_root_info root; struct kvm_mmu *mmu = vcpu->arch.mmu; - root.cr3 = mmu->get_cr3(vcpu); + root.cr3 = mmu->root_cr3; root.hpa = mmu->root_hpa; for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { @@ -4176,6 +4182,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3, } mmu->root_hpa = root.hpa; + mmu->root_cr3 = root.cr3; return i < KVM_MMU_NUM_PREV_ROOTS; } @@ -4770,6 +4777,7 @@ static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu) ext.cr4_pse = !!is_pse(vcpu); ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE); ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57); + ext.maxphyaddr = cpuid_maxphyaddr(vcpu); ext.valid = 1; @@ -5516,11 +5524,13 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu) vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; vcpu->arch.root_mmu.root_hpa = INVALID_PAGE; + vcpu->arch.root_mmu.root_cr3 = 0; vcpu->arch.root_mmu.translate_gpa = translate_gpa; for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE; + vcpu->arch.guest_mmu.root_cr3 = 0; vcpu->arch.guest_mmu.translate_gpa = translate_gpa; for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 8ff20523661b..d737a51a53ca 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -211,6 +211,7 @@ static void free_nested(struct kvm_vcpu *vcpu) if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) return; + hrtimer_cancel(&vmx->nested.preemption_timer); vmx->nested.vmxon = false; vmx->nested.smm.vmxon = false; free_vpid(vmx->nested.vpid02); @@ -2472,6 +2473,10 @@ static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu, (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)) return -EINVAL; + if (!nested_cpu_has_preemption_timer(vmcs12) && + nested_cpu_has_save_preemption_timer(vmcs12)) + return -EINVAL; + if (nested_cpu_has_ept(vmcs12) && !valid_ept_address(vcpu, vmcs12->ept_pointer)) return -EINVAL; @@ -5556,9 +5561,11 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps, * secondary cpu-based controls. Do not include those that * depend on CPUID bits, they are added later by vmx_cpuid_update. */ - rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, - msrs->secondary_ctls_low, - msrs->secondary_ctls_high); + if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) + rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, + msrs->secondary_ctls_low, + msrs->secondary_ctls_high); + msrs->secondary_ctls_low = 0; msrs->secondary_ctls_high &= SECONDARY_EXEC_DESC | diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 4341175339f3..30a6bcd735ec 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -862,7 +863,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, if (!entry_only) j = find_msr(&m->host, msr); - if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) { + if ((i < 0 && m->guest.nr == NR_AUTOLOAD_MSRS) || + (j < 0 && m->host.nr == NR_AUTOLOAD_MSRS)) { printk_once(KERN_WARNING "Not enough msr switch entries. " "Can't add msr %x\n", msr); return; @@ -1192,21 +1194,6 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu) return; - /* - * First handle the simple case where no cmpxchg is necessary; just - * allow posting non-urgent interrupts. - * - * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change - * PI.NDST: pi_post_block will do it for us and the wakeup_handler - * expects the VCPU to be on the blocked_vcpu_list that matches - * PI.NDST. - */ - if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR || - vcpu->cpu == cpu) { - pi_clear_sn(pi_desc); - return; - } - /* The full case. */ do { old.control = new.control = pi_desc->control; @@ -1221,6 +1208,17 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) new.sn = 0; } while (cmpxchg64(&pi_desc->control, old.control, new.control) != old.control); + + /* + * Clear SN before reading the bitmap. The VT-d firmware + * writes the bitmap and reads SN atomically (5.2.3 in the + * spec), so it doesn't really have a memory barrier that + * pairs with this, but we cannot do that and we need one. + */ + smp_mb__after_atomic(); + + if (!bitmap_empty((unsigned long *)pi_desc->pir, NR_VECTORS)) + pi_set_on(pi_desc); } /* @@ -6823,7 +6821,7 @@ static int vmx_vm_init(struct kvm *kvm) * Warn upon starting the first VM in a potentially * insecure environment. */ - if (cpu_smt_control == CPU_SMT_ENABLED) + if (sched_smt_active()) pr_warn_once(L1TF_MSG_SMT); if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER) pr_warn_once(L1TF_MSG_L1D); diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 99328954c2fc..0ac0a64c7790 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -337,16 +337,16 @@ static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc) return test_and_set_bit(vector, (unsigned long *)pi_desc->pir); } -static inline void pi_clear_sn(struct pi_desc *pi_desc) +static inline void pi_set_sn(struct pi_desc *pi_desc) { - return clear_bit(POSTED_INTR_SN, + return set_bit(POSTED_INTR_SN, (unsigned long *)&pi_desc->control); } -static inline void pi_set_sn(struct pi_desc *pi_desc) +static inline void pi_set_on(struct pi_desc *pi_desc) { - return set_bit(POSTED_INTR_SN, - (unsigned long *)&pi_desc->control); + set_bit(POSTED_INTR_ON, + (unsigned long *)&pi_desc->control); } static inline void pi_clear_on(struct pi_desc *pi_desc) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3d27206f6c01..941f932373d0 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5116,6 +5116,13 @@ int kvm_read_guest_virt(struct kvm_vcpu *vcpu, { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; + /* + * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED + * is returned, but our callers are not ready for that and they blindly + * call kvm_inject_page_fault. Ensure that they at least do not leak + * uninitialized kernel stack memory into cr2 and error code. + */ + memset(exception, 0, sizeof(*exception)); return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception); } @@ -7794,7 +7801,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) * 1) We should set ->mode before checking ->requests. Please see * the comment in kvm_vcpu_exiting_guest_mode(). * - * 2) For APICv, we should set ->mode before checking PIR.ON. This + * 2) For APICv, we should set ->mode before checking PID.ON. This * pairs with the memory barrier implicit in pi_test_and_set_on * (see vmx_deliver_posted_interrupt). * diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c index 9119d8e41f1f..cf00ab6c6621 100644 --- a/arch/x86/lib/insn-eval.c +++ b/arch/x86/lib/insn-eval.c @@ -179,6 +179,8 @@ static int resolve_default_seg(struct insn *insn, struct pt_regs *regs, int off) if (insn->addr_bytes == 2) return -EINVAL; + /* fall through */ + case -EDOM: case offsetof(struct pt_regs, bx): case offsetof(struct pt_regs, si): diff --git a/arch/x86/lib/iomem.c b/arch/x86/lib/iomem.c index 66894675f3c8..df50451d94ef 100644 --- a/arch/x86/lib/iomem.c +++ b/arch/x86/lib/iomem.c @@ -2,8 +2,11 @@ #include #include +#define movs(type,to,from) \ + asm volatile("movs" type:"=&D" (to), "=&S" (from):"0" (to), "1" (from):"memory") + /* Originally from i386/string.h */ -static __always_inline void __iomem_memcpy(void *to, const void *from, size_t n) +static __always_inline void rep_movs(void *to, const void *from, size_t n) { unsigned long d0, d1, d2; asm volatile("rep ; movsl\n\t" @@ -21,13 +24,37 @@ static __always_inline void __iomem_memcpy(void *to, const void *from, size_t n) void memcpy_fromio(void *to, const volatile void __iomem *from, size_t n) { - __iomem_memcpy(to, (const void *)from, n); + if (unlikely(!n)) + return; + + /* Align any unaligned source IO */ + if (unlikely(1 & (unsigned long)from)) { + movs("b", to, from); + n--; + } + if (n > 1 && unlikely(2 & (unsigned long)from)) { + movs("w", to, from); + n-=2; + } + rep_movs(to, (const void *)from, n); } EXPORT_SYMBOL(memcpy_fromio); void memcpy_toio(volatile void __iomem *to, const void *from, size_t n) { - __iomem_memcpy((void *)to, (const void *) from, n); + if (unlikely(!n)) + return; + + /* Align any unaligned destination IO */ + if (unlikely(1 & (unsigned long)to)) { + movs("b", to, from); + n--; + } + if (n > 1 && unlikely(2 & (unsigned long)to)) { + movs("w", to, from); + n-=2; + } + rep_movs((void *)to, (const void *) from, n); } EXPORT_SYMBOL(memcpy_toio); diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index bfd94e7812fc..7d290777246d 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c @@ -54,13 +54,13 @@ do { \ } while (0) /** - * clear_user: - Zero a block of memory in user space. + * clear_user - Zero a block of memory in user space. * @to: Destination address, in user space. * @n: Number of bytes to zero. * * Zero a block of memory in user space. * - * Returns number of bytes that could not be cleared. + * Return: number of bytes that could not be cleared. * On success, this will be zero. */ unsigned long @@ -74,14 +74,14 @@ clear_user(void __user *to, unsigned long n) EXPORT_SYMBOL(clear_user); /** - * __clear_user: - Zero a block of memory in user space, with less checking. + * __clear_user - Zero a block of memory in user space, with less checking. * @to: Destination address, in user space. * @n: Number of bytes to zero. * * Zero a block of memory in user space. Caller must check * the specified block with access_ok() before calling this function. * - * Returns number of bytes that could not be cleared. + * Return: number of bytes that could not be cleared. * On success, this will be zero. */ unsigned long diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c index 12d7e7fb4efd..19c6abf9ea31 100644 --- a/arch/x86/mm/cpu_entry_area.c +++ b/arch/x86/mm/cpu_entry_area.c @@ -52,7 +52,7 @@ cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot) cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot); } -static void percpu_setup_debug_store(int cpu) +static void __init percpu_setup_debug_store(int cpu) { #ifdef CONFIG_CPU_SUP_INTEL int npages; diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index e3cdc85ce5b6..ee8f8ab46941 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c @@ -444,7 +444,6 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st, p4d_t addr, int i; pud_t *start, *pud_start; pgprotval_t prot, eff; - pud_t *prev_pud = NULL; pud_start = start = (pud_t *)p4d_page_vaddr(addr); @@ -462,7 +461,6 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st, p4d_t addr, } else note_page(m, st, __pgprot(0), 0, 3); - prev_pud = start; start++; } } diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c index 6521134057e8..3c4568f8fb28 100644 --- a/arch/x86/mm/extable.c +++ b/arch/x86/mm/extable.c @@ -117,67 +117,12 @@ __visible bool ex_handler_fprestore(const struct exception_table_entry *fixup, } EXPORT_SYMBOL_GPL(ex_handler_fprestore); -/* Helper to check whether a uaccess fault indicates a kernel bug. */ -static bool bogus_uaccess(struct pt_regs *regs, int trapnr, - unsigned long fault_addr) -{ - /* This is the normal case: #PF with a fault address in userspace. */ - if (trapnr == X86_TRAP_PF && fault_addr < TASK_SIZE_MAX) - return false; - - /* - * This code can be reached for machine checks, but only if the #MC - * handler has already decided that it looks like a candidate for fixup. - * This e.g. happens when attempting to access userspace memory which - * the CPU can't access because of uncorrectable bad memory. - */ - if (trapnr == X86_TRAP_MC) - return false; - - /* - * There are two remaining exception types we might encounter here: - * - #PF for faulting accesses to kernel addresses - * - #GP for faulting accesses to noncanonical addresses - * Complain about anything else. - */ - if (trapnr != X86_TRAP_PF && trapnr != X86_TRAP_GP) { - WARN(1, "unexpected trap %d in uaccess\n", trapnr); - return false; - } - - /* - * This is a faulting memory access in kernel space, on a kernel - * address, in a usercopy function. This can e.g. be caused by improper - * use of helpers like __put_user and by improper attempts to access - * userspace addresses in KERNEL_DS regions. - * The one (semi-)legitimate exception are probe_kernel_{read,write}(), - * which can be invoked from places like kgdb, /dev/mem (for reading) - * and privileged BPF code (for reading). - * The probe_kernel_*() functions set the kernel_uaccess_faults_ok flag - * to tell us that faulting on kernel addresses, and even noncanonical - * addresses, in a userspace accessor does not necessarily imply a - * kernel bug, root might just be doing weird stuff. - */ - if (current->kernel_uaccess_faults_ok) - return false; - - /* This is bad. Refuse the fixup so that we go into die(). */ - if (trapnr == X86_TRAP_PF) { - pr_emerg("BUG: pagefault on kernel address 0x%lx in non-whitelisted uaccess\n", - fault_addr); - } else { - pr_emerg("BUG: GPF in non-whitelisted uaccess (non-canonical address?)\n"); - } - return true; -} - __visible bool ex_handler_uaccess(const struct exception_table_entry *fixup, struct pt_regs *regs, int trapnr, unsigned long error_code, unsigned long fault_addr) { - if (bogus_uaccess(regs, trapnr, fault_addr)) - return false; + WARN_ONCE(trapnr == X86_TRAP_GP, "General protection fault in user access. Non-canonical address?"); regs->ip = ex_fixup_addr(fixup); return true; } @@ -188,8 +133,6 @@ __visible bool ex_handler_ext(const struct exception_table_entry *fixup, unsigned long error_code, unsigned long fault_addr) { - if (bogus_uaccess(regs, trapnr, fault_addr)) - return false; /* Special hack for uaccess_err */ current->thread.uaccess_err = 1; regs->ip = ex_fixup_addr(fixup); diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 2ff25ad33233..667f1da36208 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -595,7 +595,7 @@ static void show_ldttss(const struct desc_ptr *gdt, const char *name, u16 index) return; } - addr = desc.base0 | (desc.base1 << 16) | (desc.base2 << 24); + addr = desc.base0 | (desc.base1 << 16) | ((unsigned long)desc.base2 << 24); #ifdef CONFIG_X86_64 addr |= ((u64)desc.base3 << 32); #endif @@ -1031,7 +1031,7 @@ bad_area_access_error(struct pt_regs *regs, unsigned long error_code, static void do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, - unsigned int fault) + vm_fault_t fault) { struct task_struct *tsk = current; diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 5378d10f1d31..0029604af8a4 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -705,7 +705,7 @@ bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size) return arch_memremap_can_ram_remap(phys_addr, size, 0); } -#ifdef CONFIG_ARCH_USE_MEMREMAP_PROT +#ifdef CONFIG_AMD_MEM_ENCRYPT /* Remap memory with encryption */ void __init *early_memremap_encrypted(resource_size_t phys_addr, unsigned long size) @@ -747,7 +747,7 @@ void __init *early_memremap_decrypted_wp(resource_size_t phys_addr, return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC_WP); } -#endif /* CONFIG_ARCH_USE_MEMREMAP_PROT */ +#endif /* CONFIG_AMD_MEM_ENCRYPT */ static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss; diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index 462fde83b515..8dc0fc0b1382 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -24,14 +24,16 @@ extern struct range pfn_mapped[E820_MAX_ENTRIES]; static p4d_t tmp_p4d_table[MAX_PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE); -static __init void *early_alloc(size_t size, int nid, bool panic) +static __init void *early_alloc(size_t size, int nid, bool should_panic) { - if (panic) - return memblock_alloc_try_nid(size, size, - __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid); - else - return memblock_alloc_try_nid_nopanic(size, size, + void *ptr = memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid); + + if (!ptr && should_panic) + panic("%pS: Failed to allocate page, nid=%d from=%lx\n", + (void *)_RET_IP_, nid, __pa(MAX_DMA_ADDRESS)); + + return ptr; } static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr, diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c index de1851d15699..c805db6236b4 100644 --- a/arch/x86/mm/mpx.c +++ b/arch/x86/mm/mpx.c @@ -9,12 +9,12 @@ #include #include #include +#include #include #include #include #include -#include #include #include #include diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index 1308f5408bf7..dfb6c4df639a 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -123,7 +123,7 @@ void __init setup_node_to_cpumask_map(void) alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); /* cpumask_of_node() will now work */ - pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids); + pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids); } static int __init numa_add_memblk_to(int nid, u64 start, u64 end, @@ -195,15 +195,11 @@ static void __init alloc_node_data(int nid) * Allocate node data. Try node-local memory and then any node. * Never allocate in DMA zone. */ - nd_pa = memblock_phys_alloc_nid(nd_size, SMP_CACHE_BYTES, nid); + nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid); if (!nd_pa) { - nd_pa = __memblock_alloc_base(nd_size, SMP_CACHE_BYTES, - MEMBLOCK_ALLOC_ACCESSIBLE); - if (!nd_pa) { - pr_err("Cannot find %zu bytes in any node (initial node: %d)\n", - nd_size, nid); - return; - } + pr_err("Cannot find %zu bytes in any node (initial node: %d)\n", + nd_size, nid); + return; } nd = __va(nd_pa); @@ -866,7 +862,7 @@ const struct cpumask *cpumask_of_node(int node) { if (node >= nr_node_ids) { printk(KERN_WARNING - "cpumask_of_node(%d): node > nr_node_ids(%d)\n", + "cpumask_of_node(%d): node > nr_node_ids(%u)\n", node, nr_node_ids); dump_stack(); return cpu_none_mask; diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 4f8972311a77..4c570612e24e 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -230,6 +230,29 @@ static bool __cpa_pfn_in_highmap(unsigned long pfn) #endif +/* + * See set_mce_nospec(). + * + * Machine check recovery code needs to change cache mode of poisoned pages to + * UC to avoid speculative access logging another error. But passing the + * address of the 1:1 mapping to set_memory_uc() is a fine way to encourage a + * speculative access. So we cheat and flip the top bit of the address. This + * works fine for the code that updates the page tables. But at the end of the + * process we need to flush the TLB and cache and the non-canonical address + * causes a #GP fault when used by the INVLPG and CLFLUSH instructions. + * + * But in the common case we already have a canonical address. This code + * will fix the top bit if needed and is a no-op otherwise. + */ +static inline unsigned long fix_addr(unsigned long addr) +{ +#ifdef CONFIG_X86_64 + return (long)(addr << 1) >> 1; +#else + return addr; +#endif +} + static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx) { if (cpa->flags & CPA_PAGES_ARRAY) { @@ -313,7 +336,7 @@ void __cpa_flush_tlb(void *data) unsigned int i; for (i = 0; i < cpa->numpages; i++) - __flush_tlb_one_kernel(__cpa_addr(cpa, i)); + __flush_tlb_one_kernel(fix_addr(__cpa_addr(cpa, i))); } static void cpa_flush(struct cpa_data *data, int cache) @@ -347,7 +370,7 @@ static void cpa_flush(struct cpa_data *data, int cache) * Only flush present addresses: */ if (pte && (pte_val(*pte) & _PAGE_PRESENT)) - clflush_cache_range_opt((void *)addr, PAGE_SIZE); + clflush_cache_range_opt((void *)fix_addr(addr), PAGE_SIZE); } mb(); } @@ -715,7 +738,7 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address, { unsigned long numpages, pmask, psize, lpaddr, pfn, old_pfn; pgprot_t old_prot, new_prot, req_prot, chk_prot; - pte_t new_pte, old_pte, *tmp; + pte_t new_pte, *tmp; enum pg_level level; /* @@ -758,7 +781,7 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address, * Convert protection attributes to 4k-format, as cpa->mask* are set * up accordingly. */ - old_pte = *kpte; + /* Clear PSE (aka _PAGE_PAT) and move PAT bit to correct position */ req_prot = pgprot_large_2_4k(old_prot); @@ -1627,29 +1650,6 @@ out: return ret; } -/* - * Machine check recovery code needs to change cache mode of poisoned - * pages to UC to avoid speculative access logging another error. But - * passing the address of the 1:1 mapping to set_memory_uc() is a fine - * way to encourage a speculative access. So we cheat and flip the top - * bit of the address. This works fine for the code that updates the - * page tables. But at the end of the process we need to flush the cache - * and the non-canonical address causes a #GP fault when used by the - * CLFLUSH instruction. - * - * But in the common case we already have a canonical address. This code - * will fix the top bit if needed and is a no-op otherwise. - */ -static inline unsigned long make_addr_canonical_again(unsigned long addr) -{ -#ifdef CONFIG_X86_64 - return (long)(addr << 1) >> 1; -#else - return addr; -#endif -} - - static int change_page_attr_set_clr(unsigned long *addr, int numpages, pgprot_t mask_set, pgprot_t mask_clr, int force_split, int in_flag, diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 999d6d8f0bef..bc4bc7b2f075 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -685,9 +685,6 @@ void native_flush_tlb_others(const struct cpumask *cpumask, * that UV should be updated so that smp_call_function_many(), * etc, are optimal on UV. */ - unsigned int cpu; - - cpu = smp_processor_id(); cpumask = uv_flush_tlb_others(cpumask, info); if (cpumask) smp_call_function_many(cpumask, flush_tlb_func_remote, diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 5542303c43d9..afabf597c855 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -881,20 +881,41 @@ xadd: if (is_imm8(insn->off)) case BPF_JMP | BPF_JSLT | BPF_X: case BPF_JMP | BPF_JSGE | BPF_X: case BPF_JMP | BPF_JSLE | BPF_X: + case BPF_JMP32 | BPF_JEQ | BPF_X: + case BPF_JMP32 | BPF_JNE | BPF_X: + case BPF_JMP32 | BPF_JGT | BPF_X: + case BPF_JMP32 | BPF_JLT | BPF_X: + case BPF_JMP32 | BPF_JGE | BPF_X: + case BPF_JMP32 | BPF_JLE | BPF_X: + case BPF_JMP32 | BPF_JSGT | BPF_X: + case BPF_JMP32 | BPF_JSLT | BPF_X: + case BPF_JMP32 | BPF_JSGE | BPF_X: + case BPF_JMP32 | BPF_JSLE | BPF_X: /* cmp dst_reg, src_reg */ - EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39, - add_2reg(0xC0, dst_reg, src_reg)); + if (BPF_CLASS(insn->code) == BPF_JMP) + EMIT1(add_2mod(0x48, dst_reg, src_reg)); + else if (is_ereg(dst_reg) || is_ereg(src_reg)) + EMIT1(add_2mod(0x40, dst_reg, src_reg)); + EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg)); goto emit_cond_jmp; case BPF_JMP | BPF_JSET | BPF_X: + case BPF_JMP32 | BPF_JSET | BPF_X: /* test dst_reg, src_reg */ - EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x85, - add_2reg(0xC0, dst_reg, src_reg)); + if (BPF_CLASS(insn->code) == BPF_JMP) + EMIT1(add_2mod(0x48, dst_reg, src_reg)); + else if (is_ereg(dst_reg) || is_ereg(src_reg)) + EMIT1(add_2mod(0x40, dst_reg, src_reg)); + EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg)); goto emit_cond_jmp; case BPF_JMP | BPF_JSET | BPF_K: + case BPF_JMP32 | BPF_JSET | BPF_K: /* test dst_reg, imm32 */ - EMIT1(add_1mod(0x48, dst_reg)); + if (BPF_CLASS(insn->code) == BPF_JMP) + EMIT1(add_1mod(0x48, dst_reg)); + else if (is_ereg(dst_reg)) + EMIT1(add_1mod(0x40, dst_reg)); EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32); goto emit_cond_jmp; @@ -908,8 +929,21 @@ xadd: if (is_imm8(insn->off)) case BPF_JMP | BPF_JSLT | BPF_K: case BPF_JMP | BPF_JSGE | BPF_K: case BPF_JMP | BPF_JSLE | BPF_K: + case BPF_JMP32 | BPF_JEQ | BPF_K: + case BPF_JMP32 | BPF_JNE | BPF_K: + case BPF_JMP32 | BPF_JGT | BPF_K: + case BPF_JMP32 | BPF_JLT | BPF_K: + case BPF_JMP32 | BPF_JGE | BPF_K: + case BPF_JMP32 | BPF_JLE | BPF_K: + case BPF_JMP32 | BPF_JSGT | BPF_K: + case BPF_JMP32 | BPF_JSLT | BPF_K: + case BPF_JMP32 | BPF_JSGE | BPF_K: + case BPF_JMP32 | BPF_JSLE | BPF_K: /* cmp dst_reg, imm8/32 */ - EMIT1(add_1mod(0x48, dst_reg)); + if (BPF_CLASS(insn->code) == BPF_JMP) + EMIT1(add_1mod(0x48, dst_reg)); + else if (is_ereg(dst_reg)) + EMIT1(add_1mod(0x40, dst_reg)); if (is_imm8(imm32)) EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32); diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c index 8f6cc71e0848..0d9cdffce6ac 100644 --- a/arch/x86/net/bpf_jit_comp32.c +++ b/arch/x86/net/bpf_jit_comp32.c @@ -2072,7 +2072,18 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, case BPF_JMP | BPF_JSGT | BPF_X: case BPF_JMP | BPF_JSLE | BPF_X: case BPF_JMP | BPF_JSLT | BPF_X: - case BPF_JMP | BPF_JSGE | BPF_X: { + case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP32 | BPF_JEQ | BPF_X: + case BPF_JMP32 | BPF_JNE | BPF_X: + case BPF_JMP32 | BPF_JGT | BPF_X: + case BPF_JMP32 | BPF_JLT | BPF_X: + case BPF_JMP32 | BPF_JGE | BPF_X: + case BPF_JMP32 | BPF_JLE | BPF_X: + case BPF_JMP32 | BPF_JSGT | BPF_X: + case BPF_JMP32 | BPF_JSLE | BPF_X: + case BPF_JMP32 | BPF_JSLT | BPF_X: + case BPF_JMP32 | BPF_JSGE | BPF_X: { + bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP; u8 dreg_lo = dstk ? IA32_EAX : dst_lo; u8 dreg_hi = dstk ? IA32_EDX : dst_hi; u8 sreg_lo = sstk ? IA32_ECX : src_lo; @@ -2081,25 +2092,35 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, if (dstk) { EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(dst_lo)); - EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), - STACK_VAR(dst_hi)); + if (is_jmp64) + EMIT3(0x8B, + add_2reg(0x40, IA32_EBP, + IA32_EDX), + STACK_VAR(dst_hi)); } if (sstk) { EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX), STACK_VAR(src_lo)); - EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EBX), - STACK_VAR(src_hi)); + if (is_jmp64) + EMIT3(0x8B, + add_2reg(0x40, IA32_EBP, + IA32_EBX), + STACK_VAR(src_hi)); } - /* cmp dreg_hi,sreg_hi */ - EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi)); - EMIT2(IA32_JNE, 2); + if (is_jmp64) { + /* cmp dreg_hi,sreg_hi */ + EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi)); + EMIT2(IA32_JNE, 2); + } /* cmp dreg_lo,sreg_lo */ EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo)); goto emit_cond_jmp; } - case BPF_JMP | BPF_JSET | BPF_X: { + case BPF_JMP | BPF_JSET | BPF_X: + case BPF_JMP32 | BPF_JSET | BPF_X: { + bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP; u8 dreg_lo = dstk ? IA32_EAX : dst_lo; u8 dreg_hi = dstk ? IA32_EDX : dst_hi; u8 sreg_lo = sstk ? IA32_ECX : src_lo; @@ -2108,15 +2129,21 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, if (dstk) { EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(dst_lo)); - EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), - STACK_VAR(dst_hi)); + if (is_jmp64) + EMIT3(0x8B, + add_2reg(0x40, IA32_EBP, + IA32_EDX), + STACK_VAR(dst_hi)); } if (sstk) { EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX), STACK_VAR(src_lo)); - EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EBX), - STACK_VAR(src_hi)); + if (is_jmp64) + EMIT3(0x8B, + add_2reg(0x40, IA32_EBP, + IA32_EBX), + STACK_VAR(src_hi)); } /* and dreg_lo,sreg_lo */ EMIT2(0x23, add_2reg(0xC0, sreg_lo, dreg_lo)); @@ -2126,32 +2153,39 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi)); goto emit_cond_jmp; } - case BPF_JMP | BPF_JSET | BPF_K: { - u32 hi; + case BPF_JMP | BPF_JSET | BPF_K: + case BPF_JMP32 | BPF_JSET | BPF_K: { + bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP; u8 dreg_lo = dstk ? IA32_EAX : dst_lo; u8 dreg_hi = dstk ? IA32_EDX : dst_hi; u8 sreg_lo = IA32_ECX; u8 sreg_hi = IA32_EBX; + u32 hi; if (dstk) { EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(dst_lo)); - EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), - STACK_VAR(dst_hi)); + if (is_jmp64) + EMIT3(0x8B, + add_2reg(0x40, IA32_EBP, + IA32_EDX), + STACK_VAR(dst_hi)); } - hi = imm32 & (1<<31) ? (u32)~0 : 0; /* mov ecx,imm32 */ - EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX), imm32); - /* mov ebx,imm32 */ - EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EBX), hi); + EMIT2_off32(0xC7, add_1reg(0xC0, sreg_lo), imm32); /* and dreg_lo,sreg_lo */ EMIT2(0x23, add_2reg(0xC0, sreg_lo, dreg_lo)); - /* and dreg_hi,sreg_hi */ - EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi)); - /* or dreg_lo,dreg_hi */ - EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi)); + if (is_jmp64) { + hi = imm32 & (1 << 31) ? (u32)~0 : 0; + /* mov ebx,imm32 */ + EMIT2_off32(0xC7, add_1reg(0xC0, sreg_hi), hi); + /* and dreg_hi,sreg_hi */ + EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi)); + /* or dreg_lo,dreg_hi */ + EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi)); + } goto emit_cond_jmp; } case BPF_JMP | BPF_JEQ | BPF_K: @@ -2163,29 +2197,44 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, case BPF_JMP | BPF_JSGT | BPF_K: case BPF_JMP | BPF_JSLE | BPF_K: case BPF_JMP | BPF_JSLT | BPF_K: - case BPF_JMP | BPF_JSGE | BPF_K: { - u32 hi; + case BPF_JMP | BPF_JSGE | BPF_K: + case BPF_JMP32 | BPF_JEQ | BPF_K: + case BPF_JMP32 | BPF_JNE | BPF_K: + case BPF_JMP32 | BPF_JGT | BPF_K: + case BPF_JMP32 | BPF_JLT | BPF_K: + case BPF_JMP32 | BPF_JGE | BPF_K: + case BPF_JMP32 | BPF_JLE | BPF_K: + case BPF_JMP32 | BPF_JSGT | BPF_K: + case BPF_JMP32 | BPF_JSLE | BPF_K: + case BPF_JMP32 | BPF_JSLT | BPF_K: + case BPF_JMP32 | BPF_JSGE | BPF_K: { + bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP; u8 dreg_lo = dstk ? IA32_EAX : dst_lo; u8 dreg_hi = dstk ? IA32_EDX : dst_hi; u8 sreg_lo = IA32_ECX; u8 sreg_hi = IA32_EBX; + u32 hi; if (dstk) { EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(dst_lo)); - EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), - STACK_VAR(dst_hi)); + if (is_jmp64) + EMIT3(0x8B, + add_2reg(0x40, IA32_EBP, + IA32_EDX), + STACK_VAR(dst_hi)); } - hi = imm32 & (1<<31) ? (u32)~0 : 0; /* mov ecx,imm32 */ EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX), imm32); - /* mov ebx,imm32 */ - EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EBX), hi); - - /* cmp dreg_hi,sreg_hi */ - EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi)); - EMIT2(IA32_JNE, 2); + if (is_jmp64) { + hi = imm32 & (1 << 31) ? (u32)~0 : 0; + /* mov ebx,imm32 */ + EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EBX), hi); + /* cmp dreg_hi,sreg_hi */ + EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi)); + EMIT2(IA32_JNE, 2); + } /* cmp dreg_lo,sreg_lo */ EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo)); diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index 30a5111ae5fd..527e69b12002 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c @@ -635,6 +635,22 @@ static void quirk_no_aersid(struct pci_dev *pdev) DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_BRIDGE_PCI, 8, quirk_no_aersid); +static void quirk_intel_th_dnv(struct pci_dev *dev) +{ + struct resource *r = &dev->resource[4]; + + /* + * Denverton reports 2k of RTIT_BAR (intel_th resource 4), which + * appears to be 4 MB in reality. + */ + if (r->end == r->start + 0x7ff) { + r->start = 0; + r->end = 0x3fffff; + r->flags |= IORESOURCE_UNSET; + } +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x19e1, quirk_intel_th_dnv); + #ifdef CONFIG_PHYS_ADDR_T_64BIT #define AMD_141b_MMIO_BASE(x) (0x80 + (x) * 0x8) diff --git a/arch/x86/platform/efi/Makefile b/arch/x86/platform/efi/Makefile index e4dc3862d423..fe29f3f5d384 100644 --- a/arch/x86/platform/efi/Makefile +++ b/arch/x86/platform/efi/Makefile @@ -3,5 +3,4 @@ OBJECT_FILES_NON_STANDARD_efi_thunk_$(BITS).o := y OBJECT_FILES_NON_STANDARD_efi_stub_$(BITS).o := y obj-$(CONFIG_EFI) += quirks.o efi.o efi_$(BITS).o efi_stub_$(BITS).o -obj-$(CONFIG_EARLY_PRINTK_EFI) += early_printk.o obj-$(CONFIG_EFI_MIXED) += efi_thunk_$(BITS).o diff --git a/arch/x86/platform/efi/early_printk.c b/arch/x86/platform/efi/early_printk.c deleted file mode 100644 index 7138bc7a265c..000000000000 --- a/arch/x86/platform/efi/early_printk.c +++ /dev/null @@ -1,240 +0,0 @@ -/* - * Copyright (C) 2013 Intel Corporation; author Matt Fleming - * - * This file is part of the Linux kernel, and is made available under - * the terms of the GNU General Public License version 2. - */ - -#include -#include -#include -#include -#include -#include - -static const struct font_desc *font; -static u32 efi_x, efi_y; -static void *efi_fb; -static bool early_efi_keep; - -/* - * efi earlyprintk need use early_ioremap to map the framebuffer. - * But early_ioremap is not usable for earlyprintk=efi,keep, ioremap should - * be used instead. ioremap will be available after paging_init() which is - * earlier than initcall callbacks. Thus adding this early initcall function - * early_efi_map_fb to map the whole efi framebuffer. - */ -static __init int early_efi_map_fb(void) -{ - u64 base, size; - - if (!early_efi_keep) - return 0; - - base = boot_params.screen_info.lfb_base; - if (boot_params.screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE) - base |= (u64)boot_params.screen_info.ext_lfb_base << 32; - size = boot_params.screen_info.lfb_size; - efi_fb = ioremap(base, size); - - return efi_fb ? 0 : -ENOMEM; -} -early_initcall(early_efi_map_fb); - -/* - * early_efi_map maps efi framebuffer region [start, start + len -1] - * In case earlyprintk=efi,keep we have the whole framebuffer mapped already - * so just return the offset efi_fb + start. - */ -static __ref void *early_efi_map(unsigned long start, unsigned long len) -{ - u64 base; - - base = boot_params.screen_info.lfb_base; - if (boot_params.screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE) - base |= (u64)boot_params.screen_info.ext_lfb_base << 32; - - if (efi_fb) - return (efi_fb + start); - else - return early_ioremap(base + start, len); -} - -static __ref void early_efi_unmap(void *addr, unsigned long len) -{ - if (!efi_fb) - early_iounmap(addr, len); -} - -static void early_efi_clear_scanline(unsigned int y) -{ - unsigned long *dst; - u16 len; - - len = boot_params.screen_info.lfb_linelength; - dst = early_efi_map(y*len, len); - if (!dst) - return; - - memset(dst, 0, len); - early_efi_unmap(dst, len); -} - -static void early_efi_scroll_up(void) -{ - unsigned long *dst, *src; - u16 len; - u32 i, height; - - len = boot_params.screen_info.lfb_linelength; - height = boot_params.screen_info.lfb_height; - - for (i = 0; i < height - font->height; i++) { - dst = early_efi_map(i*len, len); - if (!dst) - return; - - src = early_efi_map((i + font->height) * len, len); - if (!src) { - early_efi_unmap(dst, len); - return; - } - - memmove(dst, src, len); - - early_efi_unmap(src, len); - early_efi_unmap(dst, len); - } -} - -static void early_efi_write_char(u32 *dst, unsigned char c, unsigned int h) -{ - const u32 color_black = 0x00000000; - const u32 color_white = 0x00ffffff; - const u8 *src; - u8 s8; - int m; - - src = font->data + c * font->height; - s8 = *(src + h); - - for (m = 0; m < 8; m++) { - if ((s8 >> (7 - m)) & 1) - *dst = color_white; - else - *dst = color_black; - dst++; - } -} - -static void -early_efi_write(struct console *con, const char *str, unsigned int num) -{ - struct screen_info *si; - unsigned int len; - const char *s; - void *dst; - - si = &boot_params.screen_info; - len = si->lfb_linelength; - - while (num) { - unsigned int linemax; - unsigned int h, count = 0; - - for (s = str; *s && *s != '\n'; s++) { - if (count == num) - break; - count++; - } - - linemax = (si->lfb_width - efi_x) / font->width; - if (count > linemax) - count = linemax; - - for (h = 0; h < font->height; h++) { - unsigned int n, x; - - dst = early_efi_map((efi_y + h) * len, len); - if (!dst) - return; - - s = str; - n = count; - x = efi_x; - - while (n-- > 0) { - early_efi_write_char(dst + x*4, *s, h); - x += font->width; - s++; - } - - early_efi_unmap(dst, len); - } - - num -= count; - efi_x += count * font->width; - str += count; - - if (num > 0 && *s == '\n') { - efi_x = 0; - efi_y += font->height; - str++; - num--; - } - - if (efi_x + font->width > si->lfb_width) { - efi_x = 0; - efi_y += font->height; - } - - if (efi_y + font->height > si->lfb_height) { - u32 i; - - efi_y -= font->height; - early_efi_scroll_up(); - - for (i = 0; i < font->height; i++) - early_efi_clear_scanline(efi_y + i); - } - } -} - -static __init int early_efi_setup(struct console *con, char *options) -{ - struct screen_info *si; - u16 xres, yres; - u32 i; - - si = &boot_params.screen_info; - xres = si->lfb_width; - yres = si->lfb_height; - - /* - * early_efi_write_char() implicitly assumes a framebuffer with - * 32-bits per pixel. - */ - if (si->lfb_depth != 32) - return -ENODEV; - - font = get_default_font(xres, yres, -1, -1); - if (!font) - return -ENODEV; - - efi_y = rounddown(yres, font->height) - font->height; - for (i = 0; i < (yres - efi_y) / font->height; i++) - early_efi_scroll_up(); - - /* early_console_register will unset CON_BOOT in case ,keep */ - if (!(con->flags & CON_BOOT)) - early_efi_keep = true; - return 0; -} - -struct console early_efi_console = { - .name = "earlyefi", - .write = early_efi_write, - .setup = early_efi_setup, - .flags = CON_PRINTBUFFER, - .index = -1, -}; diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c index 17456a1d3f04..458a0e2bcc57 100644 --- a/arch/x86/platform/efi/quirks.c +++ b/arch/x86/platform/efi/quirks.c @@ -304,7 +304,7 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size) * - Not within any part of the kernel * - Not the BIOS reserved area (E820_TYPE_RESERVED, E820_TYPE_NVS, etc) */ -static bool can_free_region(u64 start, u64 size) +static __init bool can_free_region(u64 start, u64 size) { if (start + size > __pa_symbol(_text) && start <= __pa_symbol(_end)) return false; @@ -717,7 +717,7 @@ void efi_recover_from_page_fault(unsigned long phys_addr) * "efi_mm" cannot be used to check if the page fault had occurred * in the firmware context because efi=old_map doesn't use efi_pgd. */ - if (efi_rts_work.efi_rts_id == NONE) + if (efi_rts_work.efi_rts_id == EFI_NONE) return; /* @@ -742,7 +742,7 @@ void efi_recover_from_page_fault(unsigned long phys_addr) * because this case occurs *very* rarely and hence could be improved * on a need by basis. */ - if (efi_rts_work.efi_rts_id == RESET_SYSTEM) { + if (efi_rts_work.efi_rts_id == EFI_RESET_SYSTEM) { pr_info("efi_reset_system() buggy! Reboot through BIOS\n"); machine_real_restart(MRR_BIOS); return; diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c b/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c index 96f438d4b026..1421d5330b2c 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c @@ -44,7 +44,6 @@ static struct fixed_voltage_config bcm43xx_vmmc = { */ .microvolts = 2000000, /* 1.8V */ .startup_delay = 250 * 1000, /* 250ms */ - .enable_high = 1, /* active high */ .enabled_at_boot = 0, /* disabled at boot */ .init_data = &bcm43xx_vmmc_data, }; diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c index b4ab779f1d47..ac9e7bf49b66 100644 --- a/arch/x86/platform/olpc/olpc_dt.c +++ b/arch/x86/platform/olpc/olpc_dt.c @@ -141,6 +141,9 @@ void * __init prom_early_alloc(unsigned long size) * wasted bootmem) and hand off chunks of it to callers. */ res = memblock_alloc(chunk_size, SMP_CACHE_BYTES); + if (!res) + panic("%s: Failed to allocate %zu bytes\n", __func__, + chunk_size); BUG_ON(!res); prom_early_allocated += chunk_size; memset(res, 0, chunk_size); diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c index 4a6a5a26c582..ef60d789c76e 100644 --- a/arch/x86/platform/uv/bios_uv.c +++ b/arch/x86/platform/uv/bios_uv.c @@ -29,7 +29,8 @@ struct uv_systab *uv_systab; -s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) +static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, + u64 a4, u64 a5) { struct uv_systab *tab = uv_systab; s64 ret; @@ -44,36 +45,42 @@ s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) * If EFI_OLD_MEMMAP is set, we need to fall back to using our old EFI * callback method, which uses efi_call() directly, with the kernel page tables: */ - if (unlikely(test_bit(EFI_OLD_MEMMAP, &efi.flags))) + if (unlikely(efi_enabled(EFI_OLD_MEMMAP))) ret = efi_call((void *)__va(tab->function), (u64)which, a1, a2, a3, a4, a5); else ret = efi_call_virt_pointer(tab, function, (u64)which, a1, a2, a3, a4, a5); return ret; } -EXPORT_SYMBOL_GPL(uv_bios_call); -s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, - u64 a4, u64 a5) +s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) { - unsigned long bios_flags; s64 ret; - local_irq_save(bios_flags); - ret = uv_bios_call(which, a1, a2, a3, a4, a5); - local_irq_restore(bios_flags); + if (down_interruptible(&__efi_uv_runtime_lock)) + return BIOS_STATUS_ABORT; + + ret = __uv_bios_call(which, a1, a2, a3, a4, a5); + up(&__efi_uv_runtime_lock); return ret; } +EXPORT_SYMBOL_GPL(uv_bios_call); -s64 uv_bios_call_reentrant(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, +s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) { + unsigned long bios_flags; s64 ret; - preempt_disable(); - ret = uv_bios_call(which, a1, a2, a3, a4, a5); - preempt_enable(); + if (down_interruptible(&__efi_uv_runtime_lock)) + return BIOS_STATUS_ABORT; + + local_irq_save(bios_flags); + ret = __uv_bios_call(which, a1, a2, a3, a4, a5); + local_irq_restore(bios_flags); + + up(&__efi_uv_runtime_lock); return ret; } @@ -188,7 +195,6 @@ int uv_bios_set_legacy_vga_target(bool decode, int domain, int bus) } EXPORT_SYMBOL_GPL(uv_bios_set_legacy_vga_target); -#ifdef CONFIG_EFI void uv_bios_init(void) { uv_systab = NULL; @@ -218,4 +224,3 @@ void uv_bios_init(void) } pr_info("UV: UVsystab: Revision:%x\n", uv_systab->revision); } -#endif diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index a4130b84d1ff..2c53b0f19329 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c @@ -2010,8 +2010,7 @@ static void make_per_cpu_thp(struct bau_control *smaster) int cpu; size_t hpsz = sizeof(struct hub_and_pnode) * num_possible_cpus(); - smaster->thp = kmalloc_node(hpsz, GFP_KERNEL, smaster->osnode); - memset(smaster->thp, 0, hpsz); + smaster->thp = kzalloc_node(hpsz, GFP_KERNEL, smaster->osnode); for_each_present_cpu(cpu) { smaster->thp[cpu].pnode = uv_cpu_hub_info(cpu)->pnode; smaster->thp[cpu].uvhub = uv_cpu_hub_info(cpu)->numa_blade_id; @@ -2135,15 +2134,12 @@ static int __init summarize_uvhub_sockets(int nuvhubs, static int __init init_per_cpu(int nuvhubs, int base_part_pnode) { unsigned char *uvhub_mask; - void *vp; struct uvhub_desc *uvhub_descs; if (is_uv3_hub() || is_uv2_hub() || is_uv1_hub()) timeout_us = calculate_destination_timeout(); - vp = kmalloc_array(nuvhubs, sizeof(struct uvhub_desc), GFP_KERNEL); - uvhub_descs = (struct uvhub_desc *)vp; - memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc)); + uvhub_descs = kcalloc(nuvhubs, sizeof(struct uvhub_desc), GFP_KERNEL); uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL); if (get_cpu_topology(base_part_pnode, uvhub_descs, uvhub_mask)) diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile index 4463fa72db94..f60501a384f9 100644 --- a/arch/x86/realmode/rm/Makefile +++ b/arch/x86/realmode/rm/Makefile @@ -37,8 +37,7 @@ REALMODE_OBJS = $(addprefix $(obj)/,$(realmode-y)) sed-pasyms := -n -r -e 's/^([0-9a-fA-F]+) [ABCDGRSTVW] (.+)$$/pa_\2 = \2;/p' quiet_cmd_pasyms = PASYMS $@ - cmd_pasyms = $(NM) $(filter-out FORCE,$^) | \ - sed $(sed-pasyms) | sort | uniq > $@ + cmd_pasyms = $(NM) $(real-prereqs) | sed $(sed-pasyms) | sort | uniq > $@ targets += pasyms.h $(obj)/pasyms.h: $(REALMODE_OBJS) FORCE @@ -47,7 +46,7 @@ $(obj)/pasyms.h: $(REALMODE_OBJS) FORCE targets += realmode.lds $(obj)/realmode.lds: $(obj)/pasyms.h -LDFLAGS_realmode.elf := --emit-relocs -T +LDFLAGS_realmode.elf := -m elf_i386 --emit-relocs -T CPPFLAGS_realmode.lds += -P -C -I$(objtree)/$(obj) targets += realmode.elf diff --git a/arch/x86/realmode/rm/realmode.lds.S b/arch/x86/realmode/rm/realmode.lds.S index df8e11e26bc3..3bb980800c58 100644 --- a/arch/x86/realmode/rm/realmode.lds.S +++ b/arch/x86/realmode/rm/realmode.lds.S @@ -9,7 +9,7 @@ #undef i386 -OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386") +OUTPUT_FORMAT("elf32-i386") OUTPUT_ARCH(i386) SECTIONS diff --git a/arch/x86/um/Kconfig b/arch/x86/um/Kconfig index f518b4744ff8..a9e80e44178c 100644 --- a/arch/x86/um/Kconfig +++ b/arch/x86/um/Kconfig @@ -16,7 +16,7 @@ config 64BIT config X86_32 def_bool !64BIT - select HAVE_AOUT + select ARCH_32BIT_OFF_T select ARCH_WANT_IPC_PARSE_VERSION select MODULES_USE_ELF_REL select CLONE_BACKWARDS diff --git a/arch/x86/xen/mmu.h b/arch/x86/xen/mmu.h index a7e47cf7ec6c..6e4c6bd62203 100644 --- a/arch/x86/xen/mmu.h +++ b/arch/x86/xen/mmu.h @@ -17,8 +17,8 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); -pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep); -void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, +pte_t xen_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep); +void xen_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte); unsigned long xen_read_cr2_direct(void); diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index 0f4fe206dcc2..a21e1734fc1f 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -306,20 +306,20 @@ static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, __xen_set_pte(ptep, pteval); } -pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, +pte_t xen_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { /* Just return the pte as-is. We preserve the bits on commit */ - trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep); + trace_xen_mmu_ptep_modify_prot_start(vma->vm_mm, addr, ptep, *ptep); return *ptep; } -void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, +void xen_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte) { struct mmu_update u; - trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte); + trace_xen_mmu_ptep_modify_prot_commit(vma->vm_mm, addr, ptep, pte); xen_mc_batch(); u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; @@ -2114,10 +2114,10 @@ void __init xen_relocate_p2m(void) pt = early_memremap(pt_phys, PAGE_SIZE); clear_page(pt); for (idx_pte = 0; - idx_pte < min(n_pte, PTRS_PER_PTE); - idx_pte++) { - set_pte(pt + idx_pte, - pfn_pte(p2m_pfn, PAGE_KERNEL)); + idx_pte < min(n_pte, PTRS_PER_PTE); + idx_pte++) { + pt[idx_pte] = pfn_pte(p2m_pfn, + PAGE_KERNEL); p2m_pfn++; } n_pte -= PTRS_PER_PTE; @@ -2125,8 +2125,7 @@ void __init xen_relocate_p2m(void) make_lowmem_page_readonly(__va(pt_phys)); pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, PFN_DOWN(pt_phys)); - set_pmd(pmd + idx_pt, - __pmd(_PAGE_TABLE | pt_phys)); + pmd[idx_pt] = __pmd(_PAGE_TABLE | pt_phys); pt_phys += PAGE_SIZE; } n_pt -= PTRS_PER_PMD; @@ -2134,7 +2133,7 @@ void __init xen_relocate_p2m(void) make_lowmem_page_readonly(__va(pmd_phys)); pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE, PFN_DOWN(pmd_phys)); - set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys)); + pud[idx_pmd] = __pud(_PAGE_TABLE | pmd_phys); pmd_phys += PAGE_SIZE; } n_pmd -= PTRS_PER_PUD; diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 055e37e43541..95ce9b5be411 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -181,8 +181,15 @@ static void p2m_init_identity(unsigned long *p2m, unsigned long pfn) static void * __ref alloc_p2m_page(void) { - if (unlikely(!slab_is_available())) - return memblock_alloc(PAGE_SIZE, PAGE_SIZE); + if (unlikely(!slab_is_available())) { + void *ptr = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + + if (!ptr) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", + __func__, PAGE_SIZE, PAGE_SIZE); + + return ptr; + } return (void *)__get_free_page(GFP_KERNEL); } diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index d5f303c0e656..548d1e0a5ba1 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -589,6 +590,14 @@ static void __init xen_align_and_add_e820_region(phys_addr_t start, if (type == E820_TYPE_RAM) { start = PAGE_ALIGN(start); end &= ~((phys_addr_t)PAGE_SIZE - 1); +#ifdef CONFIG_MEMORY_HOTPLUG + /* + * Don't allow adding memory not in E820 map while booting the + * system. Once the balloon driver is up it will remove that + * restriction again. + */ + max_mem_size = end; +#endif } e820__range_add(start, end - start, type); @@ -748,6 +757,10 @@ char * __init xen_memory_setup(void) memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries); set_xen_guest_handle(memmap.buffer, xen_e820_table.entries); +#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_XEN_BALLOON) + xen_saved_max_mem_size = max_mem_size; +#endif + op = xen_initial_domain() ? XENMEM_machine_memory_map : XENMEM_memory_map; diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 20a0756f27ef..4b9aafe766c5 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -1,9 +1,12 @@ # SPDX-License-Identifier: GPL-2.0 config XTENSA def_bool y + select ARCH_32BIT_OFF_T select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_NO_COHERENT_DMA_MMAP if !MMU + select ARCH_USE_QUEUED_RWLOCKS + select ARCH_USE_QUEUED_SPINLOCKS select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_IPC_PARSE_VERSION select BUILDTIME_EXTABLE_SORT @@ -164,7 +167,7 @@ config XTENSA_FAKE_NMI If unsure, say N. config XTENSA_UNALIGNED_USER - bool "Unaligned memory access in use space" + bool "Unaligned memory access in user space" help The Xtensa architecture currently does not handle unaligned memory accesses in hardware but through an exception handler. @@ -447,11 +450,10 @@ config USE_OF bool "Flattened Device Tree support" select OF select OF_EARLY_FLATTREE - select OF_RESERVED_MEM help Include support for flattened device tree machine descriptions. -config BUILTIN_DTB +config BUILTIN_DTB_SOURCE string "DTB to build into the kernel image" depends on OF diff --git a/arch/xtensa/boot/dts/Makefile b/arch/xtensa/boot/dts/Makefile index f8052ba5aea8..0b8d00cdae7c 100644 --- a/arch/xtensa/boot/dts/Makefile +++ b/arch/xtensa/boot/dts/Makefile @@ -7,9 +7,9 @@ # # -BUILTIN_DTB := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB)).dtb.o -ifneq ($(CONFIG_BUILTIN_DTB),"") -obj-$(CONFIG_OF) += $(BUILTIN_DTB) +BUILTIN_DTB_SOURCE := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o +ifneq ($(CONFIG_BUILTIN_DTB_SOURCE),"") +obj-$(CONFIG_OF) += $(BUILTIN_DTB_SOURCE) endif # for CONFIG_OF_ALL_DTBS test diff --git a/arch/xtensa/configs/audio_kc705_defconfig b/arch/xtensa/configs/audio_kc705_defconfig index 2bf964df37ba..f378e56f9ce6 100644 --- a/arch/xtensa/configs/audio_kc705_defconfig +++ b/arch/xtensa/configs/audio_kc705_defconfig @@ -34,7 +34,7 @@ CONFIG_XTENSA_PLATFORM_XTFPGA=y CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0" CONFIG_USE_OF=y -CONFIG_BUILTIN_DTB="kc705" +CONFIG_BUILTIN_DTB_SOURCE="kc705" # CONFIG_COMPACTION is not set # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_PM=y diff --git a/arch/xtensa/configs/cadence_csp_defconfig b/arch/xtensa/configs/cadence_csp_defconfig index 3221b7053fa3..62f32a902568 100644 --- a/arch/xtensa/configs/cadence_csp_defconfig +++ b/arch/xtensa/configs/cadence_csp_defconfig @@ -38,7 +38,7 @@ CONFIG_HIGHMEM=y # CONFIG_PCI is not set CONFIG_XTENSA_PLATFORM_XTFPGA=y CONFIG_USE_OF=y -CONFIG_BUILTIN_DTB="csp" +CONFIG_BUILTIN_DTB_SOURCE="csp" # CONFIG_COMPACTION is not set CONFIG_XTFPGA_LCD=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set diff --git a/arch/xtensa/configs/generic_kc705_defconfig b/arch/xtensa/configs/generic_kc705_defconfig index 985fa8546e4e..8bebe07f1060 100644 --- a/arch/xtensa/configs/generic_kc705_defconfig +++ b/arch/xtensa/configs/generic_kc705_defconfig @@ -33,7 +33,7 @@ CONFIG_XTENSA_PLATFORM_XTFPGA=y CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0" CONFIG_USE_OF=y -CONFIG_BUILTIN_DTB="kc705" +CONFIG_BUILTIN_DTB_SOURCE="kc705" # CONFIG_COMPACTION is not set # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_NET=y diff --git a/arch/xtensa/configs/nommu_kc705_defconfig b/arch/xtensa/configs/nommu_kc705_defconfig index f3fc4f970ca8..933ab2adf434 100644 --- a/arch/xtensa/configs/nommu_kc705_defconfig +++ b/arch/xtensa/configs/nommu_kc705_defconfig @@ -39,7 +39,7 @@ CONFIG_XTENSA_PLATFORM_XTFPGA=y CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0x9d050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=256M@0x60000000" CONFIG_USE_OF=y -CONFIG_BUILTIN_DTB="kc705_nommu" +CONFIG_BUILTIN_DTB_SOURCE="kc705_nommu" CONFIG_BINFMT_FLAT=y CONFIG_NET=y CONFIG_PACKET=y diff --git a/arch/xtensa/configs/smp_lx200_defconfig b/arch/xtensa/configs/smp_lx200_defconfig index 11fed6c06a7c..e29c5b179a5b 100644 --- a/arch/xtensa/configs/smp_lx200_defconfig +++ b/arch/xtensa/configs/smp_lx200_defconfig @@ -33,11 +33,12 @@ CONFIG_SMP=y CONFIG_HOTPLUG_CPU=y # CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is not set # CONFIG_PCI is not set +CONFIG_VECTORS_OFFSET=0x00002000 CONFIG_XTENSA_PLATFORM_XTFPGA=y CONFIG_CMDLINE_BOOL=y CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=96M@0" CONFIG_USE_OF=y -CONFIG_BUILTIN_DTB="lx200mx" +CONFIG_BUILTIN_DTB_SOURCE="lx200mx" # CONFIG_COMPACTION is not set # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_NET=y diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild index e255683cd520..d939e13e8d84 100644 --- a/arch/xtensa/include/asm/Kbuild +++ b/arch/xtensa/include/asm/Kbuild @@ -23,8 +23,11 @@ generic-y += mm-arch-hooks.h generic-y += param.h generic-y += percpu.h generic-y += preempt.h +generic-y += qrwlock.h +generic-y += qspinlock.h generic-y += rwsem.h generic-y += sections.h +generic-y += socket.h generic-y += topology.h generic-y += trace_clock.h generic-y += vga.h diff --git a/arch/xtensa/include/asm/asm-uaccess.h b/arch/xtensa/include/asm/asm-uaccess.h index dfdf9fae1f84..7f6cf4151843 100644 --- a/arch/xtensa/include/asm/asm-uaccess.h +++ b/arch/xtensa/include/asm/asm-uaccess.h @@ -32,8 +32,6 @@ #define KERNEL_DS 0 #define USER_DS 1 -#define get_ds (KERNEL_DS) - /* * get_fs reads current->thread.current_ds into a register. * On Entry: diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h index 201e9009efd8..22a10c715c1f 100644 --- a/arch/xtensa/include/asm/cmpxchg.h +++ b/arch/xtensa/include/asm/cmpxchg.h @@ -13,6 +13,7 @@ #ifndef __ASSEMBLY__ +#include #include /* @@ -138,6 +139,28 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val) #define xchg(ptr,x) \ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) +static inline u32 xchg_small(volatile void *ptr, u32 x, int size) +{ + int off = (unsigned long)ptr % sizeof(u32); + volatile u32 *p = ptr - off; +#ifdef __BIG_ENDIAN + int bitoff = (sizeof(u32) - size - off) * BITS_PER_BYTE; +#else + int bitoff = off * BITS_PER_BYTE; +#endif + u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff; + u32 oldv, newv; + u32 ret; + + do { + oldv = READ_ONCE(*p); + ret = (oldv & bitmask) >> bitoff; + newv = (oldv & ~bitmask) | (x << bitoff); + } while (__cmpxchg_u32(p, oldv, newv) != oldv); + + return ret; +} + /* * This only works if the compiler isn't horribly bad at optimizing. * gcc-2.5.8 reportedly can't handle this, but I define that one to @@ -150,11 +173,16 @@ static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, int size) { switch (size) { - case 4: - return xchg_u32(ptr, x); + case 1: + return xchg_small(ptr, x, 1); + case 2: + return xchg_small(ptr, x, 2); + case 4: + return xchg_u32(ptr, x); + default: + __xchg_called_with_bad_pointer(); + return x; } - __xchg_called_with_bad_pointer(); - return x; } #endif /* __ASSEMBLY__ */ diff --git a/arch/xtensa/include/asm/spinlock.h b/arch/xtensa/include/asm/spinlock.h index c6e1290dcbb7..584b0de6f2ca 100644 --- a/arch/xtensa/include/asm/spinlock.h +++ b/arch/xtensa/include/asm/spinlock.h @@ -12,188 +12,9 @@ #define _XTENSA_SPINLOCK_H #include -#include +#include +#include -/* - * spinlock - * - * There is at most one owner of a spinlock. There are not different - * types of spinlock owners like there are for rwlocks (see below). - * - * When trying to obtain a spinlock, the function "spins" forever, or busy- - * waits, until the lock is obtained. When spinning, presumably some other - * owner will soon give up the spinlock making it available to others. Use - * the trylock functions to avoid spinning forever. - * - * possible values: - * - * 0 nobody owns the spinlock - * 1 somebody owns the spinlock - */ - -#define arch_spin_is_locked(x) ((x)->slock != 0) - -static inline void arch_spin_lock(arch_spinlock_t *lock) -{ - unsigned long tmp; - - __asm__ __volatile__( - " movi %0, 0\n" - " wsr %0, scompare1\n" - "1: movi %0, 1\n" - " s32c1i %0, %1, 0\n" - " bnez %0, 1b\n" - : "=&a" (tmp) - : "a" (&lock->slock) - : "memory"); -} - -/* Returns 1 if the lock is obtained, 0 otherwise. */ - -static inline int arch_spin_trylock(arch_spinlock_t *lock) -{ - unsigned long tmp; - - __asm__ __volatile__( - " movi %0, 0\n" - " wsr %0, scompare1\n" - " movi %0, 1\n" - " s32c1i %0, %1, 0\n" - : "=&a" (tmp) - : "a" (&lock->slock) - : "memory"); - - return tmp == 0 ? 1 : 0; -} - -static inline void arch_spin_unlock(arch_spinlock_t *lock) -{ - unsigned long tmp; - - __asm__ __volatile__( - " movi %0, 0\n" - " s32ri %0, %1, 0\n" - : "=&a" (tmp) - : "a" (&lock->slock) - : "memory"); -} - -/* - * rwlock - * - * Read-write locks are really a more flexible spinlock. They allow - * multiple readers but only one writer. Write ownership is exclusive - * (i.e., all other readers and writers are blocked from ownership while - * there is a write owner). These rwlocks are unfair to writers. Writers - * can be starved for an indefinite time by readers. - * - * possible values: - * - * 0 nobody owns the rwlock - * >0 one or more readers own the rwlock - * (the positive value is the actual number of readers) - * 0x80000000 one writer owns the rwlock, no other writers, no readers - */ - -static inline void arch_write_lock(arch_rwlock_t *rw) -{ - unsigned long tmp; - - __asm__ __volatile__( - " movi %0, 0\n" - " wsr %0, scompare1\n" - "1: movi %0, 1\n" - " slli %0, %0, 31\n" - " s32c1i %0, %1, 0\n" - " bnez %0, 1b\n" - : "=&a" (tmp) - : "a" (&rw->lock) - : "memory"); -} - -/* Returns 1 if the lock is obtained, 0 otherwise. */ - -static inline int arch_write_trylock(arch_rwlock_t *rw) -{ - unsigned long tmp; - - __asm__ __volatile__( - " movi %0, 0\n" - " wsr %0, scompare1\n" - " movi %0, 1\n" - " slli %0, %0, 31\n" - " s32c1i %0, %1, 0\n" - : "=&a" (tmp) - : "a" (&rw->lock) - : "memory"); - - return tmp == 0 ? 1 : 0; -} - -static inline void arch_write_unlock(arch_rwlock_t *rw) -{ - unsigned long tmp; - - __asm__ __volatile__( - " movi %0, 0\n" - " s32ri %0, %1, 0\n" - : "=&a" (tmp) - : "a" (&rw->lock) - : "memory"); -} - -static inline void arch_read_lock(arch_rwlock_t *rw) -{ - unsigned long tmp; - unsigned long result; - - __asm__ __volatile__( - "1: l32i %1, %2, 0\n" - " bltz %1, 1b\n" - " wsr %1, scompare1\n" - " addi %0, %1, 1\n" - " s32c1i %0, %2, 0\n" - " bne %0, %1, 1b\n" - : "=&a" (result), "=&a" (tmp) - : "a" (&rw->lock) - : "memory"); -} - -/* Returns 1 if the lock is obtained, 0 otherwise. */ - -static inline int arch_read_trylock(arch_rwlock_t *rw) -{ - unsigned long result; - unsigned long tmp; - - __asm__ __volatile__( - " l32i %1, %2, 0\n" - " addi %0, %1, 1\n" - " bltz %0, 1f\n" - " wsr %1, scompare1\n" - " s32c1i %0, %2, 0\n" - " sub %0, %0, %1\n" - "1:\n" - : "=&a" (result), "=&a" (tmp) - : "a" (&rw->lock) - : "memory"); - - return result == 0; -} - -static inline void arch_read_unlock(arch_rwlock_t *rw) -{ - unsigned long tmp1, tmp2; - - __asm__ __volatile__( - "1: l32i %1, %2, 0\n" - " addi %0, %1, -1\n" - " wsr %1, scompare1\n" - " s32c1i %0, %2, 0\n" - " bne %0, %1, 1b\n" - : "=&a" (tmp1), "=&a" (tmp2) - : "a" (&rw->lock) - : "memory"); -} +#define smp_mb__after_spinlock() smp_mb() #endif /* _XTENSA_SPINLOCK_H */ diff --git a/arch/xtensa/include/asm/spinlock_types.h b/arch/xtensa/include/asm/spinlock_types.h index bb1fe6c1816e..64c9389254f1 100644 --- a/arch/xtensa/include/asm/spinlock_types.h +++ b/arch/xtensa/include/asm/spinlock_types.h @@ -2,20 +2,11 @@ #ifndef __ASM_SPINLOCK_TYPES_H #define __ASM_SPINLOCK_TYPES_H -#ifndef __LINUX_SPINLOCK_TYPES_H +#if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__ASM_SPINLOCK_H) # error "please don't include this file directly" #endif -typedef struct { - volatile unsigned int slock; -} arch_spinlock_t; - -#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } - -typedef struct { - volatile unsigned int lock; -} arch_rwlock_t; - -#define __ARCH_RW_LOCK_UNLOCKED { 0 } +#include +#include #endif diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h index f333f10a7650..f092cc3f4e66 100644 --- a/arch/xtensa/include/asm/thread_info.h +++ b/arch/xtensa/include/asm/thread_info.h @@ -121,15 +121,6 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_WORK_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \ _TIF_SYSCALL_TRACEPOINT) -/* - * Thread-synchronous status. - * - * This is different from the flags in that nobody else - * ever touches our thread-synchronous status, so we don't - * have to worry about atomic accesses. - */ -#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */ - #define THREAD_SIZE KERNEL_STACK_SIZE #define THREAD_SIZE_ORDER (KERNEL_STACK_SHIFT - PAGE_SHIFT) diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index 4b2480304bc3..6792928ba84a 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -32,7 +32,6 @@ #define KERNEL_DS ((mm_segment_t) { 0 }) #define USER_DS ((mm_segment_t) { 1 }) -#define get_ds() (KERNEL_DS) #define get_fs() (current->thread.current_ds) #define set_fs(val) (current->thread.current_ds = (val)) diff --git a/arch/xtensa/include/asm/unistd.h b/arch/xtensa/include/asm/unistd.h index 0d34629dafc5..30af4dc3ce7b 100644 --- a/arch/xtensa/include/asm/unistd.h +++ b/arch/xtensa/include/asm/unistd.h @@ -7,21 +7,9 @@ #define __ARCH_WANT_NEW_STAT #define __ARCH_WANT_STAT64 -#define __ARCH_WANT_SYS_UTIME +#define __ARCH_WANT_SYS_UTIME32 #define __ARCH_WANT_SYS_GETPGRP -/* - * Ignore legacy system calls in the checksyscalls.sh script - */ - -#define __IGNORE_fork /* use clone */ -#define __IGNORE_time -#define __IGNORE_alarm /* use setitimer */ -#define __IGNORE_pause -#define __IGNORE_mmap /* use mmap2 */ -#define __IGNORE_vfork /* use clone */ -#define __IGNORE_fadvise64 /* use fadvise64_64 */ - #define NR_syscalls __NR_syscalls #endif /* _XTENSA_UNISTD_H */ diff --git a/arch/xtensa/include/uapi/asm/Kbuild b/arch/xtensa/include/uapi/asm/Kbuild index 960bf1e4be53..6b43e5049ff7 100644 --- a/arch/xtensa/include/uapi/asm/Kbuild +++ b/arch/xtensa/include/uapi/asm/Kbuild @@ -2,3 +2,4 @@ include include/uapi/asm-generic/Kbuild.asm generated-y += unistd_32.h generic-y += kvm_para.h +generic-y += socket.h diff --git a/arch/xtensa/include/uapi/asm/mman.h b/arch/xtensa/include/uapi/asm/mman.h index 58f29a9d895d..be726062412b 100644 --- a/arch/xtensa/include/uapi/asm/mman.h +++ b/arch/xtensa/include/uapi/asm/mman.h @@ -34,9 +34,7 @@ /* * Flags for mmap */ -#define MAP_SHARED 0x001 /* Share changes */ -#define MAP_PRIVATE 0x002 /* Changes are private */ -#define MAP_SHARED_VALIDATE 0x003 /* share + validate extension flags */ +/* 0x01 - 0x03 are defined in linux/mman.h */ #define MAP_TYPE 0x00f /* Mask for type of mapping */ #define MAP_FIXED 0x010 /* Interpret addr exactly */ diff --git a/arch/xtensa/include/uapi/asm/socket.h b/arch/xtensa/include/uapi/asm/socket.h deleted file mode 100644 index 1de07a7f7680..000000000000 --- a/arch/xtensa/include/uapi/asm/socket.h +++ /dev/null @@ -1,122 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* - * include/asm-xtensa/socket.h - * - * Copied from i386. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ - -#ifndef _XTENSA_SOCKET_H -#define _XTENSA_SOCKET_H - -#include - -/* For setsockoptions(2) */ -#define SOL_SOCKET 1 - -#define SO_DEBUG 1 -#define SO_REUSEADDR 2 -#define SO_TYPE 3 -#define SO_ERROR 4 -#define SO_DONTROUTE 5 -#define SO_BROADCAST 6 -#define SO_SNDBUF 7 -#define SO_RCVBUF 8 -#define SO_SNDBUFFORCE 32 -#define SO_RCVBUFFORCE 33 -#define SO_KEEPALIVE 9 -#define SO_OOBINLINE 10 -#define SO_NO_CHECK 11 -#define SO_PRIORITY 12 -#define SO_LINGER 13 -#define SO_BSDCOMPAT 14 -#define SO_REUSEPORT 15 -#define SO_PASSCRED 16 -#define SO_PEERCRED 17 -#define SO_RCVLOWAT 18 -#define SO_SNDLOWAT 19 -#define SO_RCVTIMEO 20 -#define SO_SNDTIMEO 21 - -/* Security levels - as per NRL IPv6 - don't actually do anything */ - -#define SO_SECURITY_AUTHENTICATION 22 -#define SO_SECURITY_ENCRYPTION_TRANSPORT 23 -#define SO_SECURITY_ENCRYPTION_NETWORK 24 - -#define SO_BINDTODEVICE 25 - -/* Socket filtering */ - -#define SO_ATTACH_FILTER 26 -#define SO_DETACH_FILTER 27 -#define SO_GET_FILTER SO_ATTACH_FILTER - -#define SO_PEERNAME 28 -#define SO_TIMESTAMP 29 -#define SCM_TIMESTAMP SO_TIMESTAMP - -#define SO_ACCEPTCONN 30 -#define SO_PEERSEC 31 -#define SO_PASSSEC 34 -#define SO_TIMESTAMPNS 35 -#define SCM_TIMESTAMPNS SO_TIMESTAMPNS - -#define SO_MARK 36 - -#define SO_TIMESTAMPING 37 -#define SCM_TIMESTAMPING SO_TIMESTAMPING - -#define SO_PROTOCOL 38 -#define SO_DOMAIN 39 - -#define SO_RXQ_OVFL 40 - -#define SO_WIFI_STATUS 41 -#define SCM_WIFI_STATUS SO_WIFI_STATUS -#define SO_PEEK_OFF 42 - -/* Instruct lower device to use last 4-bytes of skb data as FCS */ -#define SO_NOFCS 43 - -#define SO_LOCK_FILTER 44 - -#define SO_SELECT_ERR_QUEUE 45 - -#define SO_BUSY_POLL 46 - -#define SO_MAX_PACING_RATE 47 - -#define SO_BPF_EXTENSIONS 48 - -#define SO_INCOMING_CPU 49 - -#define SO_ATTACH_BPF 50 -#define SO_DETACH_BPF SO_DETACH_FILTER - -#define SO_ATTACH_REUSEPORT_CBPF 51 -#define SO_ATTACH_REUSEPORT_EBPF 52 - -#define SO_CNX_ADVICE 53 - -#define SCM_TIMESTAMPING_OPT_STATS 54 - -#define SO_MEMINFO 55 - -#define SO_INCOMING_NAPI_ID 56 - -#define SO_COOKIE 57 - -#define SCM_TIMESTAMPING_PKTINFO 58 - -#define SO_PEERGROUPS 59 - -#define SO_ZEROCOPY 60 - -#define SO_TXTIME 61 -#define SCM_TXTIME SO_TXTIME - -#endif /* _XTENSA_SOCKET_H */ diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S index da08e75100ab..7f009719304e 100644 --- a/arch/xtensa/kernel/head.S +++ b/arch/xtensa/kernel/head.S @@ -276,12 +276,13 @@ should_never_return: movi a2, cpu_start_ccount 1: + memw l32i a3, a2, 0 beqi a3, 0, 1b movi a3, 0 s32i a3, a2, 0 - memw 1: + memw l32i a3, a2, 0 beqi a3, 0, 1b wsr a3, ccount @@ -317,11 +318,13 @@ ENTRY(cpu_restart) rsr a0, prid neg a2, a0 movi a3, cpu_start_id + memw s32i a2, a3, 0 #if XCHAL_DCACHE_IS_WRITEBACK dhwbi a3, 0 #endif 1: + memw l32i a2, a3, 0 dhi a3, 0 bne a2, a0, 1b diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index 74969a437a37..db278a9e80c7 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c @@ -52,8 +52,6 @@ extern void ret_from_fork(void); extern void ret_from_kernel_thread(void); -struct task_struct *current_set[NR_CPUS] = {&init_task, }; - void (*pm_power_off)(void) = NULL; EXPORT_SYMBOL(pm_power_off); @@ -321,8 +319,8 @@ unsigned long get_wchan(struct task_struct *p) /* Stack layout: sp-4: ra, sp-3: sp' */ - pc = MAKE_PC_FROM_RA(*(unsigned long*)sp - 4, sp); - sp = *(unsigned long *)sp - 3; + pc = MAKE_PC_FROM_RA(SPILL_SLOT(sp, 0), sp); + sp = SPILL_SLOT(sp, 1); } while (count++ < 16); return 0; } diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c index 932d64689bac..3699d6d3e479 100644 --- a/arch/xtensa/kernel/smp.c +++ b/arch/xtensa/kernel/smp.c @@ -83,7 +83,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) { unsigned i; - for (i = 0; i < max_cpus; ++i) + for_each_possible_cpu(i) set_cpu_present(i, true); } @@ -96,6 +96,11 @@ void __init smp_init_cpus(void) pr_info("%s: Core Count = %d\n", __func__, ncpus); pr_info("%s: Core Id = %d\n", __func__, core_id); + if (ncpus > NR_CPUS) { + ncpus = NR_CPUS; + pr_info("%s: limiting core count by %d\n", __func__, ncpus); + } + for (i = 0; i < ncpus; ++i) set_cpu_possible(i, true); } @@ -195,9 +200,11 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts) int i; #ifdef CONFIG_HOTPLUG_CPU - cpu_start_id = cpu; - system_flush_invalidate_dcache_range( - (unsigned long)&cpu_start_id, sizeof(cpu_start_id)); + WRITE_ONCE(cpu_start_id, cpu); + /* Pairs with the third memw in the cpu_restart */ + mb(); + system_flush_invalidate_dcache_range((unsigned long)&cpu_start_id, + sizeof(cpu_start_id)); #endif smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1); @@ -206,18 +213,21 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts) ccount = get_ccount(); while (!ccount); - cpu_start_ccount = ccount; + WRITE_ONCE(cpu_start_ccount, ccount); - while (time_before(jiffies, timeout)) { + do { + /* + * Pairs with the first two memws in the + * .Lboot_secondary. + */ mb(); - if (!cpu_start_ccount) - break; - } + ccount = READ_ONCE(cpu_start_ccount); + } while (ccount && time_before(jiffies, timeout)); - if (cpu_start_ccount) { + if (ccount) { smp_call_function_single(0, mx_cpu_stop, - (void *)cpu, 1); - cpu_start_ccount = 0; + (void *)cpu, 1); + WRITE_ONCE(cpu_start_ccount, 0); return -EIO; } } @@ -237,6 +247,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n", __func__, cpu, idle, start_info.stack); + init_completion(&cpu_running); ret = boot_secondary(cpu, idle); if (ret == 0) { wait_for_completion_timeout(&cpu_running, @@ -298,8 +309,10 @@ void __cpu_die(unsigned int cpu) unsigned long timeout = jiffies + msecs_to_jiffies(1000); while (time_before(jiffies, timeout)) { system_invalidate_dcache_range((unsigned long)&cpu_start_id, - sizeof(cpu_start_id)); - if (cpu_start_id == -cpu) { + sizeof(cpu_start_id)); + /* Pairs with the second memw in the cpu_restart */ + mb(); + if (READ_ONCE(cpu_start_id) == -cpu) { platform_cpu_kill(cpu); return; } @@ -359,8 +372,7 @@ static void send_ipi_message(const struct cpumask *callmask, unsigned long mask = 0; for_each_cpu(index, callmask) - if (index != smp_processor_id()) - mask |= 1 << index; + mask |= 1 << index; set_er(mask, MIPISET(msg_id)); } @@ -399,22 +411,31 @@ irqreturn_t ipi_interrupt(int irq, void *dev_id) { unsigned int cpu = smp_processor_id(); struct ipi_data *ipi = &per_cpu(ipi_data, cpu); - unsigned int msg; - unsigned i; - msg = get_er(MIPICAUSE(cpu)); - for (i = 0; i < IPI_MAX; i++) - if (msg & (1 << i)) { - set_er(1 << i, MIPICAUSE(cpu)); - ++ipi->ipi_count[i]; + for (;;) { + unsigned int msg; + + msg = get_er(MIPICAUSE(cpu)); + set_er(msg, MIPICAUSE(cpu)); + + if (!msg) + break; + + if (msg & (1 << IPI_CALL_FUNC)) { + ++ipi->ipi_count[IPI_CALL_FUNC]; + generic_smp_call_function_interrupt(); + } + + if (msg & (1 << IPI_RESCHEDULE)) { + ++ipi->ipi_count[IPI_RESCHEDULE]; + scheduler_ipi(); } - if (msg & (1 << IPI_RESCHEDULE)) - scheduler_ipi(); - if (msg & (1 << IPI_CALL_FUNC)) - generic_smp_call_function_interrupt(); - if (msg & (1 << IPI_CPU_STOP)) - ipi_cpu_stop(cpu); + if (msg & (1 << IPI_CPU_STOP)) { + ++ipi->ipi_count[IPI_CPU_STOP]; + ipi_cpu_stop(cpu); + } + } return IRQ_HANDLED; } diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl index 69cf91b03b26..6af49929de85 100644 --- a/arch/xtensa/kernel/syscalls/syscall.tbl +++ b/arch/xtensa/kernel/syscalls/syscall.tbl @@ -72,8 +72,8 @@ 61 common fcntl64 sys_fcntl64 62 common fallocate sys_fallocate 63 common fadvise64_64 xtensa_fadvise64_64 -64 common utime sys_utime -65 common utimes sys_utimes +64 common utime sys_utime32 +65 common utimes sys_utimes_time32 66 common ioctl sys_ioctl 67 common fcntl sys_fcntl 68 common setxattr sys_setxattr @@ -103,7 +103,7 @@ 91 common madvise sys_madvise 92 common shmget sys_shmget 93 common shmat xtensa_shmat -94 common shmctl sys_shmctl +94 common shmctl sys_old_shmctl 95 common shmdt sys_shmdt # Socket Operations 96 common socket sys_socket @@ -174,15 +174,15 @@ 158 common capget sys_capget 159 common capset sys_capset 160 common ptrace sys_ptrace -161 common semtimedop sys_semtimedop +161 common semtimedop sys_semtimedop_time32 162 common semget sys_semget 163 common semop sys_semop -164 common semctl sys_semctl +164 common semctl sys_old_semctl 165 common available165 sys_ni_syscall 166 common msgget sys_msgget 167 common msgsnd sys_msgsnd 168 common msgrcv sys_msgrcv -169 common msgctl sys_msgctl +169 common msgctl sys_old_msgctl 170 common available170 sys_ni_syscall # File System 171 common umount2 sys_umount @@ -206,11 +206,11 @@ 188 common setrlimit sys_setrlimit 189 common getrlimit sys_getrlimit 190 common getrusage sys_getrusage -191 common futex sys_futex +191 common futex sys_futex_time32 192 common gettimeofday sys_gettimeofday 193 common settimeofday sys_settimeofday -194 common adjtimex sys_adjtimex -195 common nanosleep sys_nanosleep +194 common adjtimex sys_adjtimex_time32 +195 common nanosleep sys_nanosleep_time32 196 common getgroups sys_getgroups 197 common setgroups sys_setgroups 198 common sethostname sys_sethostname @@ -234,7 +234,7 @@ 215 common sched_getscheduler sys_sched_getscheduler 216 common sched_get_priority_max sys_sched_get_priority_max 217 common sched_get_priority_min sys_sched_get_priority_min -218 common sched_rr_get_interval sys_sched_rr_get_interval +218 common sched_rr_get_interval sys_sched_rr_get_interval_time32 219 common sched_yield sys_sched_yield 222 common available222 sys_ni_syscall # Signal Handling @@ -244,14 +244,14 @@ 226 common rt_sigaction sys_rt_sigaction 227 common rt_sigprocmask sys_rt_sigprocmask 228 common rt_sigpending sys_rt_sigpending -229 common rt_sigtimedwait sys_rt_sigtimedwait +229 common rt_sigtimedwait sys_rt_sigtimedwait_time32 230 common rt_sigqueueinfo sys_rt_sigqueueinfo 231 common rt_sigsuspend sys_rt_sigsuspend # Message 232 common mq_open sys_mq_open 233 common mq_unlink sys_mq_unlink -234 common mq_timedsend sys_mq_timedsend -235 common mq_timedreceive sys_mq_timedreceive +234 common mq_timedsend sys_mq_timedsend_time32 +235 common mq_timedreceive sys_mq_timedreceive_time32 236 common mq_notify sys_mq_notify 237 common mq_getsetattr sys_mq_getsetattr 238 common available238 sys_ni_syscall @@ -259,17 +259,17 @@ # IO 240 common io_destroy sys_io_destroy 241 common io_submit sys_io_submit -242 common io_getevents sys_io_getevents +242 common io_getevents sys_io_getevents_time32 243 common io_cancel sys_io_cancel -244 common clock_settime sys_clock_settime -245 common clock_gettime sys_clock_gettime -246 common clock_getres sys_clock_getres -247 common clock_nanosleep sys_clock_nanosleep +244 common clock_settime sys_clock_settime32 +245 common clock_gettime sys_clock_gettime32 +246 common clock_getres sys_clock_getres_time32 +247 common clock_nanosleep sys_clock_nanosleep_time32 # Timer 248 common timer_create sys_timer_create 249 common timer_delete sys_timer_delete -250 common timer_settime sys_timer_settime -251 common timer_gettime sys_timer_gettime +250 common timer_settime sys_timer_settime32 +251 common timer_gettime sys_timer_gettime32 252 common timer_getoverrun sys_timer_getoverrun # System 253 common reserved253 sys_ni_syscall @@ -291,8 +291,8 @@ 269 common tee sys_tee 270 common vmsplice sys_vmsplice 271 common available271 sys_ni_syscall -272 common pselect6 sys_pselect6 -273 common ppoll sys_ppoll +272 common pselect6 sys_pselect6_time32 +273 common ppoll sys_ppoll_time32 274 common epoll_pwait sys_epoll_pwait 275 common epoll_create1 sys_epoll_create1 276 common inotify_init sys_inotify_init @@ -316,9 +316,9 @@ 293 common linkat sys_linkat 294 common symlinkat sys_symlinkat 295 common readlinkat sys_readlinkat -296 common utimensat sys_utimensat +296 common utimensat sys_utimensat_time32 297 common fchownat sys_fchownat -298 common futimesat sys_futimesat +298 common futimesat sys_futimesat_time32 299 common fstatat64 sys_fstatat64 300 common fchmodat sys_fchmodat 301 common faccessat sys_faccessat @@ -327,14 +327,14 @@ 304 common signalfd sys_signalfd # 305 was timerfd 306 common eventfd sys_eventfd -307 common recvmmsg sys_recvmmsg +307 common recvmmsg sys_recvmmsg_time32 308 common setns sys_setns 309 common signalfd4 sys_signalfd4 310 common dup3 sys_dup3 311 common pipe2 sys_pipe2 312 common timerfd_create sys_timerfd_create -313 common timerfd_settime sys_timerfd_settime -314 common timerfd_gettime sys_timerfd_gettime +313 common timerfd_settime sys_timerfd_settime32 +314 common timerfd_gettime sys_timerfd_gettime32 315 common available315 sys_ni_syscall 316 common eventfd2 sys_eventfd2 317 common preadv sys_preadv @@ -349,7 +349,7 @@ 326 common sync_file_range2 sys_sync_file_range2 327 common perf_event_open sys_perf_event_open 328 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo -329 common clock_adjtime sys_clock_adjtime +329 common clock_adjtime sys_clock_adjtime32 330 common prlimit64 sys_prlimit64 331 common kcmp sys_kcmp 332 common finit_module sys_finit_module @@ -372,3 +372,25 @@ 349 common pkey_alloc sys_pkey_alloc 350 common pkey_free sys_pkey_free 351 common statx sys_statx +352 common rseq sys_rseq +# 353 through 402 are unassigned to sync up with generic numbers +403 common clock_gettime64 sys_clock_gettime +404 common clock_settime64 sys_clock_settime +405 common clock_adjtime64 sys_clock_adjtime +406 common clock_getres_time64 sys_clock_getres +407 common clock_nanosleep_time64 sys_clock_nanosleep +408 common timer_gettime64 sys_timer_gettime +409 common timer_settime64 sys_timer_settime +410 common timerfd_gettime64 sys_timerfd_gettime +411 common timerfd_settime64 sys_timerfd_settime +412 common utimensat_time64 sys_utimensat +413 common pselect6_time64 sys_pselect6 +414 common ppoll_time64 sys_ppoll +416 common io_pgetevents_time64 sys_io_pgetevents +417 common recvmmsg_time64 sys_recvmmsg +418 common mq_timedsend_time64 sys_mq_timedsend +419 common mq_timedreceive_time64 sys_mq_timedreceive +420 common semtimedop_time64 sys_semtimedop +421 common rt_sigtimedwait_time64 sys_rt_sigtimedwait +422 common futex_time64 sys_futex +423 common sched_rr_get_interval_time64 sys_sched_rr_get_interval diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c index fd524a54d2ab..69db8c93c1f9 100644 --- a/arch/xtensa/kernel/time.c +++ b/arch/xtensa/kernel/time.c @@ -52,14 +52,11 @@ static struct clocksource ccount_clocksource = { .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; -static int ccount_timer_set_next_event(unsigned long delta, - struct clock_event_device *dev); struct ccount_timer { struct clock_event_device evt; int irq_enabled; char name[24]; }; -static DEFINE_PER_CPU(struct ccount_timer, ccount_timer); static int ccount_timer_set_next_event(unsigned long delta, struct clock_event_device *dev) @@ -89,7 +86,7 @@ static int ccount_timer_shutdown(struct clock_event_device *evt) container_of(evt, struct ccount_timer, evt); if (timer->irq_enabled) { - disable_irq(evt->irq); + disable_irq_nosync(evt->irq); timer->irq_enabled = 0; } return 0; @@ -107,7 +104,30 @@ static int ccount_timer_set_oneshot(struct clock_event_device *evt) return 0; } -static irqreturn_t timer_interrupt(int irq, void *dev_id); +static DEFINE_PER_CPU(struct ccount_timer, ccount_timer) = { + .evt = { + .features = CLOCK_EVT_FEAT_ONESHOT, + .rating = 300, + .set_next_event = ccount_timer_set_next_event, + .set_state_shutdown = ccount_timer_shutdown, + .set_state_oneshot = ccount_timer_set_oneshot, + .tick_resume = ccount_timer_set_oneshot, + }, +}; + +static irqreturn_t timer_interrupt(int irq, void *dev_id) +{ + struct clock_event_device *evt = &this_cpu_ptr(&ccount_timer)->evt; + + set_linux_timer(get_linux_timer()); + evt->event_handler(evt); + + /* Allow platform to do something useful (Wdog). */ + platform_heartbeat(); + + return IRQ_HANDLED; +} + static struct irqaction timer_irqaction = { .handler = timer_interrupt, .flags = IRQF_TIMER, @@ -120,14 +140,8 @@ void local_timer_setup(unsigned cpu) struct clock_event_device *clockevent = &timer->evt; timer->irq_enabled = 1; - clockevent->name = timer->name; snprintf(timer->name, sizeof(timer->name), "ccount_clockevent_%u", cpu); - clockevent->features = CLOCK_EVT_FEAT_ONESHOT; - clockevent->rating = 300; - clockevent->set_next_event = ccount_timer_set_next_event; - clockevent->set_state_shutdown = ccount_timer_shutdown; - clockevent->set_state_oneshot = ccount_timer_set_oneshot; - clockevent->tick_resume = ccount_timer_set_oneshot; + clockevent->name = timer->name; clockevent->cpumask = cpumask_of(cpu); clockevent->irq = irq_create_mapping(NULL, LINUX_TIMER_INT); if (WARN(!clockevent->irq, "error: can't map timer irq")) @@ -190,23 +204,6 @@ void __init time_init(void) timer_probe(); } -/* - * The timer interrupt is called HZ times per second. - */ - -irqreturn_t timer_interrupt(int irq, void *dev_id) -{ - struct clock_event_device *evt = &this_cpu_ptr(&ccount_timer)->evt; - - set_linux_timer(get_linux_timer()); - evt->event_handler(evt); - - /* Allow platform to do something useful (Wdog). */ - platform_heartbeat(); - - return IRQ_HANDLED; -} - #ifndef CONFIG_GENERIC_CALIBRATE_DELAY void calibrate_delay(void) { diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index e6fa55aa1ccb..454d53096bc9 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -420,16 +420,15 @@ void __init trap_init(void) /* Setup specific handlers. */ for(i = 0; dispatch_init_table[i].cause >= 0; i++) { - int fast = dispatch_init_table[i].fast; int cause = dispatch_init_table[i].cause; void *handler = dispatch_init_table[i].handler; if (fast == 0) set_handler(default_handler, cause, handler); - if (fast && fast & USER) + if ((fast & USER) != 0) set_handler(fast_user_handler, cause, handler); - if (fast && fast & KRNL) + if ((fast & KRNL) != 0) set_handler(fast_kernel_handler, cause, handler); } diff --git a/arch/xtensa/mm/kasan_init.c b/arch/xtensa/mm/kasan_init.c index 1734cda6bc4a..af7152560bc3 100644 --- a/arch/xtensa/mm/kasan_init.c +++ b/arch/xtensa/mm/kasan_init.c @@ -45,6 +45,10 @@ static void __init populate(void *start, void *end) pmd_t *pmd = pmd_offset(pgd, vaddr); pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE); + if (!pte) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", + __func__, n_pages * sizeof(pte_t), PAGE_SIZE); + pr_debug("%s: %p - %p\n", __func__, start, end); for (i = j = 0; i < n_pmds; ++i) { @@ -52,8 +56,10 @@ static void __init populate(void *start, void *end) for (k = 0; k < PTRS_PER_PTE; ++k, ++j) { phys_addr_t phys = - memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, - MEMBLOCK_ALLOC_ANYWHERE); + memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); + + if (!phys) + panic("Failed to allocate page table page\n"); set_pte(pte + j, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL)); } diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c index a4dcfd39bc5c..2fb7d1172228 100644 --- a/arch/xtensa/mm/mmu.c +++ b/arch/xtensa/mm/mmu.c @@ -32,6 +32,9 @@ static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages) __func__, vaddr, n_pages); pte = memblock_alloc_low(n_pages * sizeof(pte_t), PAGE_SIZE); + if (!pte) + panic("%s: Failed to allocate %zu bytes align=%lx\n", + __func__, n_pages * sizeof(pte_t), PAGE_SIZE); for (i = 0; i < n_pages; ++i) pte_clear(NULL, 0, pte + i); diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index cd307767a134..4c592496a16a 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -230,11 +230,16 @@ static struct kmem_cache *bfq_pool; #define BFQ_MIN_TT (2 * NSEC_PER_MSEC) /* hw_tag detection: parallel requests threshold and min samples needed. */ -#define BFQ_HW_QUEUE_THRESHOLD 4 +#define BFQ_HW_QUEUE_THRESHOLD 3 #define BFQ_HW_QUEUE_SAMPLES 32 #define BFQQ_SEEK_THR (sector_t)(8 * 100) #define BFQQ_SECT_THR_NONROT (sector_t)(2 * 32) +#define BFQ_RQ_SEEKY(bfqd, last_pos, rq) \ + (get_sdist(last_pos, rq) > \ + BFQQ_SEEK_THR && \ + (!blk_queue_nonrot(bfqd->queue) || \ + blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT)) #define BFQQ_CLOSE_THR (sector_t)(8 * 1024) #define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 19) @@ -623,26 +628,6 @@ void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq) bfqq->pos_root = NULL; } -/* - * Tell whether there are active queues with different weights or - * active groups. - */ -static bool bfq_varied_queue_weights_or_active_groups(struct bfq_data *bfqd) -{ - /* - * For queue weights to differ, queue_weights_tree must contain - * at least two nodes. - */ - return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) && - (bfqd->queue_weights_tree.rb_node->rb_left || - bfqd->queue_weights_tree.rb_node->rb_right) -#ifdef CONFIG_BFQ_GROUP_IOSCHED - ) || - (bfqd->num_groups_with_pending_reqs > 0 -#endif - ); -} - /* * The following function returns true if every queue must receive the * same share of the throughput (this condition is used when deciding @@ -651,25 +636,48 @@ static bool bfq_varied_queue_weights_or_active_groups(struct bfq_data *bfqd) * * Such a scenario occurs when: * 1) all active queues have the same weight, - * 2) all active groups at the same level in the groups tree have the same - * weight, + * 2) all active queues belong to the same I/O-priority class, * 3) all active groups at the same level in the groups tree have the same + * weight, + * 4) all active groups at the same level in the groups tree have the same * number of children. * * Unfortunately, keeping the necessary state for evaluating exactly * the last two symmetry sub-conditions above would be quite complex - * and time consuming. Therefore this function evaluates, instead, - * only the following stronger two sub-conditions, for which it is + * and time consuming. Therefore this function evaluates, instead, + * only the following stronger three sub-conditions, for which it is * much easier to maintain the needed state: * 1) all active queues have the same weight, - * 2) there are no active groups. + * 2) all active queues belong to the same I/O-priority class, + * 3) there are no active groups. * In particular, the last condition is always true if hierarchical * support or the cgroups interface are not enabled, thus no state * needs to be maintained in this case. */ static bool bfq_symmetric_scenario(struct bfq_data *bfqd) { - return !bfq_varied_queue_weights_or_active_groups(bfqd); + /* + * For queue weights to differ, queue_weights_tree must contain + * at least two nodes. + */ + bool varied_queue_weights = !RB_EMPTY_ROOT(&bfqd->queue_weights_tree) && + (bfqd->queue_weights_tree.rb_node->rb_left || + bfqd->queue_weights_tree.rb_node->rb_right); + + bool multiple_classes_busy = + (bfqd->busy_queues[0] && bfqd->busy_queues[1]) || + (bfqd->busy_queues[0] && bfqd->busy_queues[2]) || + (bfqd->busy_queues[1] && bfqd->busy_queues[2]); + + /* + * For queue weights to differ, queue_weights_tree must contain + * at least two nodes. + */ + return !(varied_queue_weights || multiple_classes_busy +#ifdef BFQ_GROUP_IOSCHED_ENABLED + || bfqd->num_groups_with_pending_reqs > 0 +#endif + ); } /* @@ -728,15 +736,14 @@ void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq, /* * In the unlucky event of an allocation failure, we just * exit. This will cause the weight of queue to not be - * considered in bfq_varied_queue_weights_or_active_groups, - * which, in its turn, causes the scenario to be deemed - * wrongly symmetric in case bfqq's weight would have been - * the only weight making the scenario asymmetric. On the - * bright side, no unbalance will however occur when bfqq - * becomes inactive again (the invocation of this function - * is triggered by an activation of queue). In fact, - * bfq_weights_tree_remove does nothing if - * !bfqq->weight_counter. + * considered in bfq_symmetric_scenario, which, in its turn, + * causes the scenario to be deemed wrongly symmetric in case + * bfqq's weight would have been the only weight making the + * scenario asymmetric. On the bright side, no unbalance will + * however occur when bfqq becomes inactive again (the + * invocation of this function is triggered by an activation + * of queue). In fact, bfq_weights_tree_remove does nothing + * if !bfqq->weight_counter. */ if (unlikely(!bfqq->weight_counter)) return; @@ -747,6 +754,7 @@ void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq, inc_counter: bfqq->weight_counter->num_active++; + bfqq->ref++; } /* @@ -771,6 +779,7 @@ void __bfq_weights_tree_remove(struct bfq_data *bfqd, reset_entity_pointer: bfqq->weight_counter = NULL; + bfq_put_queue(bfqq); } /* @@ -782,9 +791,6 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd, { struct bfq_entity *entity = bfqq->entity.parent; - __bfq_weights_tree_remove(bfqd, bfqq, - &bfqd->queue_weights_tree); - for_each_entity(entity) { struct bfq_sched_data *sd = entity->my_sched_data; @@ -818,6 +824,15 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd, bfqd->num_groups_with_pending_reqs--; } } + + /* + * Next function is invoked last, because it causes bfqq to be + * freed if the following holds: bfqq is not in service and + * has no dispatched request. DO NOT use bfqq after the next + * function invocation. + */ + __bfq_weights_tree_remove(bfqd, bfqq, + &bfqd->queue_weights_tree); } /* @@ -873,7 +888,8 @@ static struct request *bfq_find_next_rq(struct bfq_data *bfqd, static unsigned long bfq_serv_to_charge(struct request *rq, struct bfq_queue *bfqq) { - if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1) + if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1 || + !bfq_symmetric_scenario(bfqq->bfqd)) return blk_rq_sectors(rq); return blk_rq_sectors(rq) * bfq_async_charge_factor; @@ -907,8 +923,10 @@ static void bfq_updated_next_req(struct bfq_data *bfqd, */ return; - new_budget = max_t(unsigned long, bfqq->max_budget, - bfq_serv_to_charge(next_rq, bfqq)); + new_budget = max_t(unsigned long, + max_t(unsigned long, bfqq->max_budget, + bfq_serv_to_charge(next_rq, bfqq)), + entity->service); if (entity->budget != new_budget) { entity->budget = new_budget; bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu", @@ -1011,7 +1029,8 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd, static int bfqq_process_refs(struct bfq_queue *bfqq) { - return bfqq->ref - bfqq->allocated - bfqq->entity.on_st; + return bfqq->ref - bfqq->allocated - bfqq->entity.on_st - + (bfqq->weight_counter != NULL); } /* Empty burst list and add just bfqq (see comments on bfq_handle_burst) */ @@ -1380,7 +1399,15 @@ static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd, { struct bfq_entity *entity = &bfqq->entity; - if (bfq_bfqq_non_blocking_wait_rq(bfqq) && arrived_in_time) { + /* + * In the next compound condition, we check also whether there + * is some budget left, because otherwise there is no point in + * trying to go on serving bfqq with this same budget: bfqq + * would be expired immediately after being selected for + * service. This would only cause useless overhead. + */ + if (bfq_bfqq_non_blocking_wait_rq(bfqq) && arrived_in_time && + bfq_bfqq_budget_left(bfqq) > 0) { /* * We do not clear the flag non_blocking_wait_rq here, as * the latter is used in bfq_activate_bfqq to signal @@ -2217,14 +2244,15 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, return NULL; /* If there is only one backlogged queue, don't search. */ - if (bfqd->busy_queues == 1) + if (bfq_tot_busy_queues(bfqd) == 1) return NULL; in_service_bfqq = bfqd->in_service_queue; if (in_service_bfqq && in_service_bfqq != bfqq && likely(in_service_bfqq != &bfqd->oom_bfqq) && - bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) && + bfq_rq_close_to_sector(io_struct, request, + bfqd->in_serv_last_pos) && bfqq->entity.parent == in_service_bfqq->entity.parent && bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) { new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq); @@ -2742,7 +2770,7 @@ static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq) if ((bfqd->rq_in_driver > 0 || now_ns - bfqd->last_completion < BFQ_MIN_TT) - && get_sdist(bfqd->last_position, rq) < BFQQ_SEEK_THR) + && !BFQ_RQ_SEEKY(bfqd, bfqd->last_position, rq)) bfqd->sequential_samples++; bfqd->tot_sectors_dispatched += blk_rq_sectors(rq); @@ -2764,6 +2792,8 @@ update_rate_and_reset: bfq_update_rate_reset(bfqd, rq); update_last_values: bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); + if (RQ_BFQQ(rq) == bfqd->in_service_queue) + bfqd->in_serv_last_pos = bfqd->last_position; bfqd->last_dispatch = now_ns; } @@ -3274,16 +3304,32 @@ void bfq_bfqq_expire(struct bfq_data *bfqd, * requests, then the request pattern is isochronous * (see the comments on the function * bfq_bfqq_softrt_next_start()). Thus we can compute - * soft_rt_next_start. If, instead, the queue still - * has outstanding requests, then we have to wait for - * the completion of all the outstanding requests to - * discover whether the request pattern is actually - * isochronous. + * soft_rt_next_start. And we do it, unless bfqq is in + * interactive weight raising. We do not do it in the + * latter subcase, for the following reason. bfqq may + * be conveying the I/O needed to load a soft + * real-time application. Such an application will + * actually exhibit a soft real-time I/O pattern after + * it finally starts doing its job. But, if + * soft_rt_next_start is computed here for an + * interactive bfqq, and bfqq had received a lot of + * service before remaining with no outstanding + * request (likely to happen on a fast device), then + * soft_rt_next_start would be assigned such a high + * value that, for a very long time, bfqq would be + * prevented from being possibly considered as soft + * real time. + * + * If, instead, the queue still has outstanding + * requests, then we have to wait for the completion + * of all the outstanding requests to discover whether + * the request pattern is actually isochronous. */ - if (bfqq->dispatched == 0) + if (bfqq->dispatched == 0 && + bfqq->wr_coeff != bfqd->bfq_wr_coeff) bfqq->soft_rt_next_start = bfq_bfqq_softrt_next_start(bfqd, bfqq); - else { + else if (bfqq->dispatched > 0) { /* * Schedule an update of soft_rt_next_start to when * the task may be discovered to be isochronous. @@ -3376,53 +3422,13 @@ static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq) bfq_bfqq_budget_timeout(bfqq); } -/* - * For a queue that becomes empty, device idling is allowed only if - * this function returns true for the queue. As a consequence, since - * device idling plays a critical role in both throughput boosting and - * service guarantees, the return value of this function plays a - * critical role in both these aspects as well. - * - * In a nutshell, this function returns true only if idling is - * beneficial for throughput or, even if detrimental for throughput, - * idling is however necessary to preserve service guarantees (low - * latency, desired throughput distribution, ...). In particular, on - * NCQ-capable devices, this function tries to return false, so as to - * help keep the drives' internal queues full, whenever this helps the - * device boost the throughput without causing any service-guarantee - * issue. - * - * In more detail, the return value of this function is obtained by, - * first, computing a number of boolean variables that take into - * account throughput and service-guarantee issues, and, then, - * combining these variables in a logical expression. Most of the - * issues taken into account are not trivial. We discuss these issues - * individually while introducing the variables. - */ -static bool bfq_better_to_idle(struct bfq_queue *bfqq) +static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd, + struct bfq_queue *bfqq) { - struct bfq_data *bfqd = bfqq->bfqd; bool rot_without_queueing = !blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag, bfqq_sequential_and_IO_bound, - idling_boosts_thr, idling_boosts_thr_without_issues, - idling_needed_for_service_guarantees, - asymmetric_scenario; - - if (bfqd->strict_guarantees) - return true; - - /* - * Idling is performed only if slice_idle > 0. In addition, we - * do not idle if - * (a) bfqq is async - * (b) bfqq is in the idle io prio class: in this case we do - * not idle because we want to minimize the bandwidth that - * queues in this class can steal to higher-priority queues - */ - if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) || - bfq_class_idle(bfqq)) - return false; + idling_boosts_thr; bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) && bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq); @@ -3454,8 +3460,7 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq) bfqq_sequential_and_IO_bound); /* - * The value of the next variable, - * idling_boosts_thr_without_issues, is equal to that of + * The return value of this function is equal to that of * idling_boosts_thr, unless a special case holds. In this * special case, described below, idling may cause problems to * weight-raised queues. @@ -3472,217 +3477,252 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq) * which enqueue several requests in advance, and further * reorder internally-queued requests. * - * For this reason, we force to false the value of - * idling_boosts_thr_without_issues if there are weight-raised - * busy queues. In this case, and if bfqq is not weight-raised, - * this guarantees that the device is not idled for bfqq (if, - * instead, bfqq is weight-raised, then idling will be - * guaranteed by another variable, see below). Combined with - * the timestamping rules of BFQ (see [1] for details), this - * behavior causes bfqq, and hence any sync non-weight-raised - * queue, to get a lower number of requests served, and thus - * to ask for a lower number of requests from the request - * pool, before the busy weight-raised queues get served - * again. This often mitigates starvation problems in the - * presence of heavy write workloads and NCQ, thereby - * guaranteeing a higher application and system responsiveness - * in these hostile scenarios. + * For this reason, we force to false the return value if + * there are weight-raised busy queues. In this case, and if + * bfqq is not weight-raised, this guarantees that the device + * is not idled for bfqq (if, instead, bfqq is weight-raised, + * then idling will be guaranteed by another variable, see + * below). Combined with the timestamping rules of BFQ (see + * [1] for details), this behavior causes bfqq, and hence any + * sync non-weight-raised queue, to get a lower number of + * requests served, and thus to ask for a lower number of + * requests from the request pool, before the busy + * weight-raised queues get served again. This often mitigates + * starvation problems in the presence of heavy write + * workloads and NCQ, thereby guaranteeing a higher + * application and system responsiveness in these hostile + * scenarios. */ - idling_boosts_thr_without_issues = idling_boosts_thr && + return idling_boosts_thr && bfqd->wr_busy_queues == 0; +} - /* - * There is then a case where idling must be performed not - * for throughput concerns, but to preserve service - * guarantees. - * - * To introduce this case, we can note that allowing the drive - * to enqueue more than one request at a time, and hence - * delegating de facto final scheduling decisions to the - * drive's internal scheduler, entails loss of control on the - * actual request service order. In particular, the critical - * situation is when requests from different processes happen - * to be present, at the same time, in the internal queue(s) - * of the drive. In such a situation, the drive, by deciding - * the service order of the internally-queued requests, does - * determine also the actual throughput distribution among - * these processes. But the drive typically has no notion or - * concern about per-process throughput distribution, and - * makes its decisions only on a per-request basis. Therefore, - * the service distribution enforced by the drive's internal - * scheduler is likely to coincide with the desired - * device-throughput distribution only in a completely - * symmetric scenario where: - * (i) each of these processes must get the same throughput as - * the others; - * (ii) the I/O of each process has the same properties, in - * terms of locality (sequential or random), direction - * (reads or writes), request sizes, greediness - * (from I/O-bound to sporadic), and so on. - * In fact, in such a scenario, the drive tends to treat - * the requests of each of these processes in about the same - * way as the requests of the others, and thus to provide - * each of these processes with about the same throughput - * (which is exactly the desired throughput distribution). In - * contrast, in any asymmetric scenario, device idling is - * certainly needed to guarantee that bfqq receives its - * assigned fraction of the device throughput (see [1] for - * details). - * The problem is that idling may significantly reduce - * throughput with certain combinations of types of I/O and - * devices. An important example is sync random I/O, on flash - * storage with command queueing. So, unless bfqq falls in the - * above cases where idling also boosts throughput, it would - * be important to check conditions (i) and (ii) accurately, - * so as to avoid idling when not strictly needed for service - * guarantees. - * - * Unfortunately, it is extremely difficult to thoroughly - * check condition (ii). And, in case there are active groups, - * it becomes very difficult to check condition (i) too. In - * fact, if there are active groups, then, for condition (i) - * to become false, it is enough that an active group contains - * more active processes or sub-groups than some other active - * group. More precisely, for condition (i) to hold because of - * such a group, it is not even necessary that the group is - * (still) active: it is sufficient that, even if the group - * has become inactive, some of its descendant processes still - * have some request already dispatched but still waiting for - * completion. In fact, requests have still to be guaranteed - * their share of the throughput even after being - * dispatched. In this respect, it is easy to show that, if a - * group frequently becomes inactive while still having - * in-flight requests, and if, when this happens, the group is - * not considered in the calculation of whether the scenario - * is asymmetric, then the group may fail to be guaranteed its - * fair share of the throughput (basically because idling may - * not be performed for the descendant processes of the group, - * but it had to be). We address this issue with the - * following bi-modal behavior, implemented in the function - * bfq_symmetric_scenario(). - * - * If there are groups with requests waiting for completion - * (as commented above, some of these groups may even be - * already inactive), then the scenario is tagged as - * asymmetric, conservatively, without checking any of the - * conditions (i) and (ii). So the device is idled for bfqq. - * This behavior matches also the fact that groups are created - * exactly if controlling I/O is a primary concern (to - * preserve bandwidth and latency guarantees). - * - * On the opposite end, if there are no groups with requests - * waiting for completion, then only condition (i) is actually - * controlled, i.e., provided that condition (i) holds, idling - * is not performed, regardless of whether condition (ii) - * holds. In other words, only if condition (i) does not hold, - * then idling is allowed, and the device tends to be - * prevented from queueing many requests, possibly of several - * processes. Since there are no groups with requests waiting - * for completion, then, to control condition (i) it is enough - * to check just whether all the queues with requests waiting - * for completion also have the same weight. - * - * Not checking condition (ii) evidently exposes bfqq to the - * risk of getting less throughput than its fair share. - * However, for queues with the same weight, a further - * mechanism, preemption, mitigates or even eliminates this - * problem. And it does so without consequences on overall - * throughput. This mechanism and its benefits are explained - * in the next three paragraphs. - * - * Even if a queue, say Q, is expired when it remains idle, Q - * can still preempt the new in-service queue if the next - * request of Q arrives soon (see the comments on - * bfq_bfqq_update_budg_for_activation). If all queues and - * groups have the same weight, this form of preemption, - * combined with the hole-recovery heuristic described in the - * comments on function bfq_bfqq_update_budg_for_activation, - * are enough to preserve a correct bandwidth distribution in - * the mid term, even without idling. In fact, even if not - * idling allows the internal queues of the device to contain - * many requests, and thus to reorder requests, we can rather - * safely assume that the internal scheduler still preserves a - * minimum of mid-term fairness. - * - * More precisely, this preemption-based, idleless approach - * provides fairness in terms of IOPS, and not sectors per - * second. This can be seen with a simple example. Suppose - * that there are two queues with the same weight, but that - * the first queue receives requests of 8 sectors, while the - * second queue receives requests of 1024 sectors. In - * addition, suppose that each of the two queues contains at - * most one request at a time, which implies that each queue - * always remains idle after it is served. Finally, after - * remaining idle, each queue receives very quickly a new - * request. It follows that the two queues are served - * alternatively, preempting each other if needed. This - * implies that, although both queues have the same weight, - * the queue with large requests receives a service that is - * 1024/8 times as high as the service received by the other - * queue. - * - * The motivation for using preemption instead of idling (for - * queues with the same weight) is that, by not idling, - * service guarantees are preserved (completely or at least in - * part) without minimally sacrificing throughput. And, if - * there is no active group, then the primary expectation for - * this device is probably a high throughput. - * - * We are now left only with explaining the additional - * compound condition that is checked below for deciding - * whether the scenario is asymmetric. To explain this - * compound condition, we need to add that the function - * bfq_symmetric_scenario checks the weights of only - * non-weight-raised queues, for efficiency reasons (see - * comments on bfq_weights_tree_add()). Then the fact that - * bfqq is weight-raised is checked explicitly here. More - * precisely, the compound condition below takes into account - * also the fact that, even if bfqq is being weight-raised, - * the scenario is still symmetric if all queues with requests - * waiting for completion happen to be - * weight-raised. Actually, we should be even more precise - * here, and differentiate between interactive weight raising - * and soft real-time weight raising. - * - * As a side note, it is worth considering that the above - * device-idling countermeasures may however fail in the - * following unlucky scenario: if idling is (correctly) - * disabled in a time period during which all symmetry - * sub-conditions hold, and hence the device is allowed to - * enqueue many requests, but at some later point in time some - * sub-condition stops to hold, then it may become impossible - * to let requests be served in the desired order until all - * the requests already queued in the device have been served. - */ - asymmetric_scenario = (bfqq->wr_coeff > 1 && - bfqd->wr_busy_queues < bfqd->busy_queues) || +/* + * There is a case where idling must be performed not for + * throughput concerns, but to preserve service guarantees. + * + * To introduce this case, we can note that allowing the drive + * to enqueue more than one request at a time, and hence + * delegating de facto final scheduling decisions to the + * drive's internal scheduler, entails loss of control on the + * actual request service order. In particular, the critical + * situation is when requests from different processes happen + * to be present, at the same time, in the internal queue(s) + * of the drive. In such a situation, the drive, by deciding + * the service order of the internally-queued requests, does + * determine also the actual throughput distribution among + * these processes. But the drive typically has no notion or + * concern about per-process throughput distribution, and + * makes its decisions only on a per-request basis. Therefore, + * the service distribution enforced by the drive's internal + * scheduler is likely to coincide with the desired + * device-throughput distribution only in a completely + * symmetric scenario where: + * (i) each of these processes must get the same throughput as + * the others; + * (ii) the I/O of each process has the same properties, in + * terms of locality (sequential or random), direction + * (reads or writes), request sizes, greediness + * (from I/O-bound to sporadic), and so on. + * In fact, in such a scenario, the drive tends to treat + * the requests of each of these processes in about the same + * way as the requests of the others, and thus to provide + * each of these processes with about the same throughput + * (which is exactly the desired throughput distribution). In + * contrast, in any asymmetric scenario, device idling is + * certainly needed to guarantee that bfqq receives its + * assigned fraction of the device throughput (see [1] for + * details). + * The problem is that idling may significantly reduce + * throughput with certain combinations of types of I/O and + * devices. An important example is sync random I/O, on flash + * storage with command queueing. So, unless bfqq falls in the + * above cases where idling also boosts throughput, it would + * be important to check conditions (i) and (ii) accurately, + * so as to avoid idling when not strictly needed for service + * guarantees. + * + * Unfortunately, it is extremely difficult to thoroughly + * check condition (ii). And, in case there are active groups, + * it becomes very difficult to check condition (i) too. In + * fact, if there are active groups, then, for condition (i) + * to become false, it is enough that an active group contains + * more active processes or sub-groups than some other active + * group. More precisely, for condition (i) to hold because of + * such a group, it is not even necessary that the group is + * (still) active: it is sufficient that, even if the group + * has become inactive, some of its descendant processes still + * have some request already dispatched but still waiting for + * completion. In fact, requests have still to be guaranteed + * their share of the throughput even after being + * dispatched. In this respect, it is easy to show that, if a + * group frequently becomes inactive while still having + * in-flight requests, and if, when this happens, the group is + * not considered in the calculation of whether the scenario + * is asymmetric, then the group may fail to be guaranteed its + * fair share of the throughput (basically because idling may + * not be performed for the descendant processes of the group, + * but it had to be). We address this issue with the + * following bi-modal behavior, implemented in the function + * bfq_symmetric_scenario(). + * + * If there are groups with requests waiting for completion + * (as commented above, some of these groups may even be + * already inactive), then the scenario is tagged as + * asymmetric, conservatively, without checking any of the + * conditions (i) and (ii). So the device is idled for bfqq. + * This behavior matches also the fact that groups are created + * exactly if controlling I/O is a primary concern (to + * preserve bandwidth and latency guarantees). + * + * On the opposite end, if there are no groups with requests + * waiting for completion, then only condition (i) is actually + * controlled, i.e., provided that condition (i) holds, idling + * is not performed, regardless of whether condition (ii) + * holds. In other words, only if condition (i) does not hold, + * then idling is allowed, and the device tends to be + * prevented from queueing many requests, possibly of several + * processes. Since there are no groups with requests waiting + * for completion, then, to control condition (i) it is enough + * to check just whether all the queues with requests waiting + * for completion also have the same weight. + * + * Not checking condition (ii) evidently exposes bfqq to the + * risk of getting less throughput than its fair share. + * However, for queues with the same weight, a further + * mechanism, preemption, mitigates or even eliminates this + * problem. And it does so without consequences on overall + * throughput. This mechanism and its benefits are explained + * in the next three paragraphs. + * + * Even if a queue, say Q, is expired when it remains idle, Q + * can still preempt the new in-service queue if the next + * request of Q arrives soon (see the comments on + * bfq_bfqq_update_budg_for_activation). If all queues and + * groups have the same weight, this form of preemption, + * combined with the hole-recovery heuristic described in the + * comments on function bfq_bfqq_update_budg_for_activation, + * are enough to preserve a correct bandwidth distribution in + * the mid term, even without idling. In fact, even if not + * idling allows the internal queues of the device to contain + * many requests, and thus to reorder requests, we can rather + * safely assume that the internal scheduler still preserves a + * minimum of mid-term fairness. + * + * More precisely, this preemption-based, idleless approach + * provides fairness in terms of IOPS, and not sectors per + * second. This can be seen with a simple example. Suppose + * that there are two queues with the same weight, but that + * the first queue receives requests of 8 sectors, while the + * second queue receives requests of 1024 sectors. In + * addition, suppose that each of the two queues contains at + * most one request at a time, which implies that each queue + * always remains idle after it is served. Finally, after + * remaining idle, each queue receives very quickly a new + * request. It follows that the two queues are served + * alternatively, preempting each other if needed. This + * implies that, although both queues have the same weight, + * the queue with large requests receives a service that is + * 1024/8 times as high as the service received by the other + * queue. + * + * The motivation for using preemption instead of idling (for + * queues with the same weight) is that, by not idling, + * service guarantees are preserved (completely or at least in + * part) without minimally sacrificing throughput. And, if + * there is no active group, then the primary expectation for + * this device is probably a high throughput. + * + * We are now left only with explaining the additional + * compound condition that is checked below for deciding + * whether the scenario is asymmetric. To explain this + * compound condition, we need to add that the function + * bfq_symmetric_scenario checks the weights of only + * non-weight-raised queues, for efficiency reasons (see + * comments on bfq_weights_tree_add()). Then the fact that + * bfqq is weight-raised is checked explicitly here. More + * precisely, the compound condition below takes into account + * also the fact that, even if bfqq is being weight-raised, + * the scenario is still symmetric if all queues with requests + * waiting for completion happen to be + * weight-raised. Actually, we should be even more precise + * here, and differentiate between interactive weight raising + * and soft real-time weight raising. + * + * As a side note, it is worth considering that the above + * device-idling countermeasures may however fail in the + * following unlucky scenario: if idling is (correctly) + * disabled in a time period during which all symmetry + * sub-conditions hold, and hence the device is allowed to + * enqueue many requests, but at some later point in time some + * sub-condition stops to hold, then it may become impossible + * to let requests be served in the desired order until all + * the requests already queued in the device have been served. + */ +static bool idling_needed_for_service_guarantees(struct bfq_data *bfqd, + struct bfq_queue *bfqq) +{ + return (bfqq->wr_coeff > 1 && + bfqd->wr_busy_queues < + bfq_tot_busy_queues(bfqd)) || !bfq_symmetric_scenario(bfqd); +} + +/* + * For a queue that becomes empty, device idling is allowed only if + * this function returns true for that queue. As a consequence, since + * device idling plays a critical role for both throughput boosting + * and service guarantees, the return value of this function plays a + * critical role as well. + * + * In a nutshell, this function returns true only if idling is + * beneficial for throughput or, even if detrimental for throughput, + * idling is however necessary to preserve service guarantees (low + * latency, desired throughput distribution, ...). In particular, on + * NCQ-capable devices, this function tries to return false, so as to + * help keep the drives' internal queues full, whenever this helps the + * device boost the throughput without causing any service-guarantee + * issue. + * + * Most of the issues taken into account to get the return value of + * this function are not trivial. We discuss these issues in the two + * functions providing the main pieces of information needed by this + * function. + */ +static bool bfq_better_to_idle(struct bfq_queue *bfqq) +{ + struct bfq_data *bfqd = bfqq->bfqd; + bool idling_boosts_thr_with_no_issue, idling_needed_for_service_guar; + + if (unlikely(bfqd->strict_guarantees)) + return true; /* - * Finally, there is a case where maximizing throughput is the - * best choice even if it may cause unfairness toward - * bfqq. Such a case is when bfqq became active in a burst of - * queue activations. Queues that became active during a large - * burst benefit only from throughput, as discussed in the - * comments on bfq_handle_burst. Thus, if bfqq became active - * in a burst and not idling the device maximizes throughput, - * then the device must no be idled, because not idling the - * device provides bfqq and all other queues in the burst with - * maximum benefit. Combining this and the above case, we can - * now establish when idling is actually needed to preserve - * service guarantees. + * Idling is performed only if slice_idle > 0. In addition, we + * do not idle if + * (a) bfqq is async + * (b) bfqq is in the idle io prio class: in this case we do + * not idle because we want to minimize the bandwidth that + * queues in this class can steal to higher-priority queues */ - idling_needed_for_service_guarantees = - asymmetric_scenario && !bfq_bfqq_in_large_burst(bfqq); + if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) || + bfq_class_idle(bfqq)) + return false; + + idling_boosts_thr_with_no_issue = + idling_boosts_thr_without_issues(bfqd, bfqq); + + idling_needed_for_service_guar = + idling_needed_for_service_guarantees(bfqd, bfqq); /* - * We have now all the components we need to compute the + * We have now the two components we need to compute the * return value of the function, which is true only if idling * either boosts the throughput (without issues), or is * necessary to preserve service guarantees. */ - return idling_boosts_thr_without_issues || - idling_needed_for_service_guarantees; + return idling_boosts_thr_with_no_issue || + idling_needed_for_service_guar; } /* @@ -3934,7 +3974,7 @@ static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd, * belongs to CLASS_IDLE and other queues are waiting for * service. */ - if (!(bfqd->busy_queues > 1 && bfq_class_idle(bfqq))) + if (!(bfq_tot_busy_queues(bfqd) > 1 && bfq_class_idle(bfqq))) goto return_rq; bfq_bfqq_expire(bfqd, bfqq, false, BFQQE_BUDGET_EXHAUSTED); @@ -3952,7 +3992,7 @@ static bool bfq_has_work(struct blk_mq_hw_ctx *hctx) * most a call to dispatch for nothing */ return !list_empty_careful(&bfqd->dispatch) || - bfqd->busy_queues > 0; + bfq_tot_busy_queues(bfqd) > 0; } static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) @@ -4006,9 +4046,10 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) goto start_rq; } - bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues); + bfq_log(bfqd, "dispatch requests: %d busy queues", + bfq_tot_busy_queues(bfqd)); - if (bfqd->busy_queues == 0) + if (bfq_tot_busy_queues(bfqd) == 0) goto exit; /* @@ -4488,10 +4529,7 @@ bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq, struct request *rq) { bfqq->seek_history <<= 1; - bfqq->seek_history |= - get_sdist(bfqq->last_request_pos, rq) > BFQQ_SEEK_THR && - (!blk_queue_nonrot(bfqd->queue) || - blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT); + bfqq->seek_history |= BFQ_RQ_SEEKY(bfqd, bfqq->last_request_pos, rq); } static void bfq_update_has_short_ttime(struct bfq_data *bfqd, @@ -4560,28 +4598,31 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, bool budget_timeout = bfq_bfqq_budget_timeout(bfqq); /* - * There is just this request queued: if the request - * is small and the queue is not to be expired, then - * just exit. + * There is just this request queued: if + * - the request is small, and + * - we are idling to boost throughput, and + * - the queue is not to be expired, + * then just exit. * * In this way, if the device is being idled to wait * for a new request from the in-service queue, we * avoid unplugging the device and committing the - * device to serve just a small request. On the - * contrary, we wait for the block layer to decide - * when to unplug the device: hopefully, new requests - * will be merged to this one quickly, then the device - * will be unplugged and larger requests will be - * dispatched. + * device to serve just a small request. In contrast + * we wait for the block layer to decide when to + * unplug the device: hopefully, new requests will be + * merged to this one quickly, then the device will be + * unplugged and larger requests will be dispatched. */ - if (small_req && !budget_timeout) + if (small_req && idling_boosts_thr_without_issues(bfqd, bfqq) && + !budget_timeout) return; /* - * A large enough request arrived, or the queue is to - * be expired: in both cases disk idling is to be - * stopped, so clear wait_request flag and reset - * timer. + * A large enough request arrived, or idling is being + * performed to preserve service guarantees, or + * finally the queue is to be expired: in all these + * cases disk idling is to be stopped, so clear + * wait_request flag and reset timer. */ bfq_clear_bfqq_wait_request(bfqq); hrtimer_try_to_cancel(&bfqd->idle_slice_timer); @@ -4607,8 +4648,6 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) bool waiting, idle_timer_disabled = false; if (new_bfqq) { - if (bic_to_bfqq(RQ_BIC(rq), 1) != bfqq) - new_bfqq = bic_to_bfqq(RQ_BIC(rq), 1); /* * Release the request's reference to the old bfqq * and make sure one is taken to the shared queue. @@ -4751,6 +4790,8 @@ static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx, static void bfq_update_hw_tag(struct bfq_data *bfqd) { + struct bfq_queue *bfqq = bfqd->in_service_queue; + bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver, bfqd->rq_in_driver); @@ -4763,7 +4804,18 @@ static void bfq_update_hw_tag(struct bfq_data *bfqd) * sum is not exact, as it's not taking into account deactivated * requests. */ - if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD) + if (bfqd->rq_in_driver + bfqd->queued <= BFQ_HW_QUEUE_THRESHOLD) + return; + + /* + * If active queue hasn't enough requests and can idle, bfq might not + * dispatch sufficient requests to hardware. Don't zero hw_tag in this + * case + */ + if (bfqq && bfq_bfqq_has_short_ttime(bfqq) && + bfqq->dispatched + bfqq->queued[0] + bfqq->queued[1] < + BFQ_HW_QUEUE_THRESHOLD && + bfqd->rq_in_driver < BFQ_HW_QUEUE_THRESHOLD) return; if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES) @@ -4834,11 +4886,14 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd) * isochronous, and both requisites for this condition to hold * are now satisfied, then compute soft_rt_next_start (see the * comments on the function bfq_bfqq_softrt_next_start()). We - * schedule this delayed check when bfqq expires, if it still - * has in-flight requests. + * do not compute soft_rt_next_start if bfqq is in interactive + * weight raising (see the comments in bfq_bfqq_expire() for + * an explanation). We schedule this delayed update when bfqq + * expires, if it still has in-flight requests. */ if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 && - RB_EMPTY_ROOT(&bfqq->sort_list)) + RB_EMPTY_ROOT(&bfqq->sort_list) && + bfqq->wr_coeff != bfqd->bfq_wr_coeff) bfqq->soft_rt_next_start = bfq_bfqq_softrt_next_start(bfqd, bfqq); diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h index 0b02bf302de0..062e1c4787f4 100644 --- a/block/bfq-iosched.h +++ b/block/bfq-iosched.h @@ -501,10 +501,11 @@ struct bfq_data { unsigned int num_groups_with_pending_reqs; /* - * Number of bfq_queues containing requests (including the - * queue in service, even if it is idling). + * Per-class (RT, BE, IDLE) number of bfq_queues containing + * requests (including the queue in service, even if it is + * idling). */ - int busy_queues; + unsigned int busy_queues[3]; /* number of weight-raised busy @bfq_queues */ int wr_busy_queues; /* number of queued requests */ @@ -537,6 +538,9 @@ struct bfq_data { /* on-disk position of the last served request */ sector_t last_position; + /* position of the last served request for the in-service queue */ + sector_t in_serv_last_pos; + /* time of last request completion (ns) */ u64 last_completion; @@ -974,6 +978,7 @@ extern struct blkcg_policy blkcg_policy_bfq; struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq); struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity); +unsigned int bfq_tot_busy_queues(struct bfq_data *bfqd); struct bfq_service_tree *bfq_entity_service_tree(struct bfq_entity *entity); struct bfq_entity *bfq_entity_of(struct rb_node *node); unsigned short bfq_ioprio_to_weight(int ioprio); diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c index 72adbbe975d5..63311d1ff1ed 100644 --- a/block/bfq-wf2q.c +++ b/block/bfq-wf2q.c @@ -44,6 +44,12 @@ static unsigned int bfq_class_idx(struct bfq_entity *entity) BFQ_DEFAULT_GRP_CLASS - 1; } +unsigned int bfq_tot_busy_queues(struct bfq_data *bfqd) +{ + return bfqd->busy_queues[0] + bfqd->busy_queues[1] + + bfqd->busy_queues[2]; +} + static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd, bool expiration); @@ -1513,7 +1519,7 @@ struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd) struct bfq_sched_data *sd; struct bfq_queue *bfqq; - if (bfqd->busy_queues == 0) + if (bfq_tot_busy_queues(bfqd) == 0) return NULL; /* @@ -1665,10 +1671,7 @@ void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfq_clear_bfqq_busy(bfqq); - bfqd->busy_queues--; - - if (!bfqq->dispatched) - bfq_weights_tree_remove(bfqd, bfqq); + bfqd->busy_queues[bfqq->ioprio_class - 1]--; if (bfqq->wr_coeff > 1) bfqd->wr_busy_queues--; @@ -1676,6 +1679,9 @@ void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfqg_stats_update_dequeue(bfqq_group(bfqq)); bfq_deactivate_bfqq(bfqd, bfqq, true, expiration); + + if (!bfqq->dispatched) + bfq_weights_tree_remove(bfqd, bfqq); } /* @@ -1688,7 +1694,7 @@ void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq) bfq_activate_bfqq(bfqd, bfqq); bfq_mark_bfqq_busy(bfqq); - bfqd->busy_queues++; + bfqd->busy_queues[bfqq->ioprio_class - 1]++; if (!bfqq->dispatched) if (bfqq->wr_coeff == 1) diff --git a/block/bio.c b/block/bio.c index 4db1008309ed..71a78d9fb8b7 100644 --- a/block/bio.c +++ b/block/bio.c @@ -753,6 +753,8 @@ EXPORT_SYMBOL(bio_add_pc_page); * @page: page to add * @len: length of the data to add * @off: offset of the data in @page + * @same_page: if %true only merge if the new data is in the same physical + * page as the last segment of the bio. * * Try to add the data at @page + @off to the last bvec of @bio. This is a * a useful optimisation for file systems with a block size smaller than the @@ -761,19 +763,25 @@ EXPORT_SYMBOL(bio_add_pc_page); * Return %true on success or %false on failure. */ bool __bio_try_merge_page(struct bio *bio, struct page *page, - unsigned int len, unsigned int off) + unsigned int len, unsigned int off, bool same_page) { if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) return false; if (bio->bi_vcnt > 0) { struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; + phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + + bv->bv_offset + bv->bv_len - 1; + phys_addr_t page_addr = page_to_phys(page); - if (page == bv->bv_page && off == bv->bv_offset + bv->bv_len) { - bv->bv_len += len; - bio->bi_iter.bi_size += len; - return true; - } + if (vec_end_addr + 1 != page_addr + off) + return false; + if (same_page && (vec_end_addr & PAGE_MASK) != page_addr) + return false; + + bv->bv_len += len; + bio->bi_iter.bi_size += len; + return true; } return false; } @@ -819,7 +827,7 @@ EXPORT_SYMBOL_GPL(__bio_add_page); int bio_add_page(struct bio *bio, struct page *page, unsigned int len, unsigned int offset) { - if (!__bio_try_merge_page(bio, page, len, offset)) { + if (!__bio_try_merge_page(bio, page, len, offset, false)) { if (bio_full(bio)) return 0; __bio_add_page(bio, page, len, offset); @@ -828,6 +836,40 @@ int bio_add_page(struct bio *bio, struct page *page, } EXPORT_SYMBOL(bio_add_page); +static int __bio_iov_bvec_add_pages(struct bio *bio, struct iov_iter *iter) +{ + const struct bio_vec *bv = iter->bvec; + unsigned int len; + size_t size; + + if (WARN_ON_ONCE(iter->iov_offset > bv->bv_len)) + return -EINVAL; + + len = min_t(size_t, bv->bv_len - iter->iov_offset, iter->count); + size = bio_add_page(bio, bv->bv_page, len, + bv->bv_offset + iter->iov_offset); + if (size == len) { + struct page *page; + int i; + + /* + * For the normal O_DIRECT case, we could skip grabbing this + * reference and then not have to put them again when IO + * completes. But this breaks some in-kernel users, like + * splicing to/from a loop device, where we release the pipe + * pages unconditionally. If we can fix that case, we can + * get rid of the get here and the need to call + * bio_release_pages() at IO completion time. + */ + mp_bvec_for_each_page(page, bv, i) + get_page(page); + iov_iter_advance(iter, size); + return 0; + } + + return -EINVAL; +} + #define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *)) /** @@ -876,23 +918,35 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) } /** - * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio + * bio_iov_iter_get_pages - add user or kernel pages to a bio * @bio: bio to add pages to - * @iter: iov iterator describing the region to be mapped + * @iter: iov iterator describing the region to be added + * + * This takes either an iterator pointing to user memory, or one pointing to + * kernel pages (BVEC iterator). If we're adding user pages, we pin them and + * map them into the kernel. On IO completion, the caller should put those + * pages. For now, when adding kernel pages, we still grab a reference to the + * page. This isn't strictly needed for the common case, but some call paths + * end up releasing pages from eg a pipe and we can't easily control these. + * See comment in __bio_iov_bvec_add_pages(). * - * Pins pages from *iter and appends them to @bio's bvec array. The - * pages will have to be released using put_page() when done. * The function tries, but does not guarantee, to pin as many pages as - * fit into the bio, or are requested in *iter, whatever is smaller. - * If MM encounters an error pinning the requested pages, it stops. - * Error is returned only if 0 pages could be pinned. + * fit into the bio, or are requested in *iter, whatever is smaller. If + * MM encounters an error pinning the requested pages, it stops. Error + * is returned only if 0 pages could be pinned. */ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) { + const bool is_bvec = iov_iter_is_bvec(iter); unsigned short orig_vcnt = bio->bi_vcnt; do { - int ret = __bio_iov_iter_get_pages(bio, iter); + int ret; + + if (is_bvec) + ret = __bio_iov_bvec_add_pages(bio, iter); + else + ret = __bio_iov_iter_get_pages(bio, iter); if (unlikely(ret)) return bio->bi_vcnt > orig_vcnt ? 0 : ret; @@ -1072,8 +1126,9 @@ static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter) { int i; struct bio_vec *bvec; + struct bvec_iter_all iter_all; - bio_for_each_segment_all(bvec, bio, i) { + bio_for_each_segment_all(bvec, bio, i, iter_all) { ssize_t ret; ret = copy_page_from_iter(bvec->bv_page, @@ -1103,8 +1158,9 @@ static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter) { int i; struct bio_vec *bvec; + struct bvec_iter_all iter_all; - bio_for_each_segment_all(bvec, bio, i) { + bio_for_each_segment_all(bvec, bio, i, iter_all) { ssize_t ret; ret = copy_page_to_iter(bvec->bv_page, @@ -1126,8 +1182,9 @@ void bio_free_pages(struct bio *bio) { struct bio_vec *bvec; int i; + struct bvec_iter_all iter_all; - bio_for_each_segment_all(bvec, bio, i) + bio_for_each_segment_all(bvec, bio, i, iter_all) __free_page(bvec->bv_page); } EXPORT_SYMBOL(bio_free_pages); @@ -1295,6 +1352,7 @@ struct bio *bio_map_user_iov(struct request_queue *q, struct bio *bio; int ret; struct bio_vec *bvec; + struct bvec_iter_all iter_all; if (!iov_iter_count(iter)) return ERR_PTR(-EINVAL); @@ -1368,7 +1426,7 @@ struct bio *bio_map_user_iov(struct request_queue *q, return bio; out_unmap: - bio_for_each_segment_all(bvec, bio, j) { + bio_for_each_segment_all(bvec, bio, j, iter_all) { put_page(bvec->bv_page); } bio_put(bio); @@ -1379,11 +1437,12 @@ static void __bio_unmap_user(struct bio *bio) { struct bio_vec *bvec; int i; + struct bvec_iter_all iter_all; /* * make sure we dirty pages we wrote to */ - bio_for_each_segment_all(bvec, bio, i) { + bio_for_each_segment_all(bvec, bio, i, iter_all) { if (bio_data_dir(bio) == READ) set_page_dirty_lock(bvec->bv_page); @@ -1475,8 +1534,9 @@ static void bio_copy_kern_endio_read(struct bio *bio) char *p = bio->bi_private; struct bio_vec *bvec; int i; + struct bvec_iter_all iter_all; - bio_for_each_segment_all(bvec, bio, i) { + bio_for_each_segment_all(bvec, bio, i, iter_all) { memcpy(p, page_address(bvec->bv_page), bvec->bv_len); p += bvec->bv_len; } @@ -1585,8 +1645,9 @@ void bio_set_pages_dirty(struct bio *bio) { struct bio_vec *bvec; int i; + struct bvec_iter_all iter_all; - bio_for_each_segment_all(bvec, bio, i) { + bio_for_each_segment_all(bvec, bio, i, iter_all) { if (!PageCompound(bvec->bv_page)) set_page_dirty_lock(bvec->bv_page); } @@ -1596,8 +1657,9 @@ static void bio_release_pages(struct bio *bio) { struct bio_vec *bvec; int i; + struct bvec_iter_all iter_all; - bio_for_each_segment_all(bvec, bio, i) + bio_for_each_segment_all(bvec, bio, i, iter_all) put_page(bvec->bv_page); } @@ -1644,8 +1706,9 @@ void bio_check_pages_dirty(struct bio *bio) struct bio_vec *bvec; unsigned long flags; int i; + struct bvec_iter_all iter_all; - bio_for_each_segment_all(bvec, bio, i) { + bio_for_each_segment_all(bvec, bio, i, iter_all) { if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page)) goto defer; } diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 2bed5725aa03..77f37ef8ef06 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1269,7 +1269,7 @@ void blkcg_drain_queue(struct request_queue *q) * blkcg_exit_queue - exit and release blkcg part of request_queue * @q: request_queue being released * - * Called from blk_release_queue(). Responsible for exiting blkcg part. + * Called from blk_exit_queue(). Responsible for exiting blkcg part. */ void blkcg_exit_queue(struct request_queue *q) { diff --git a/block/blk-core.c b/block/blk-core.c index 3c5f61ceeb67..4673ebe42255 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -462,6 +462,10 @@ static void blk_rq_timed_out_timer(struct timer_list *t) kblockd_schedule_work(&q->timeout_work); } +static void blk_timeout_work(struct work_struct *work) +{ +} + /** * blk_alloc_queue_node - allocate a request queue * @gfp_mask: memory allocation flags @@ -496,8 +500,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) if (!q->stats) goto fail_stats; - q->backing_dev_info->ra_pages = - (VM_MAX_READAHEAD * 1024) / PAGE_SIZE; + q->backing_dev_info->ra_pages = VM_READAHEAD_PAGES; q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK; q->backing_dev_info->name = "block"; q->node = node_id; @@ -505,7 +508,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) timer_setup(&q->backing_dev_info->laptop_mode_wb_timer, laptop_mode_timer_fn, 0); timer_setup(&q->timeout, blk_rq_timed_out_timer, 0); - INIT_WORK(&q->timeout_work, NULL); + INIT_WORK(&q->timeout_work, blk_timeout_work); INIT_LIST_HEAD(&q->icq_list); #ifdef CONFIG_BLK_CGROUP INIT_LIST_HEAD(&q->blkg_list); diff --git a/block/blk-flush.c b/block/blk-flush.c index a3fc7191c694..6e0f2d97fc6d 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -335,7 +335,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error) blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error); spin_unlock_irqrestore(&fq->mq_flush_lock, flags); - blk_mq_run_hw_queue(hctx, true); + blk_mq_sched_restart(hctx); } /** diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c index fc714ef402a6..2620baa1f699 100644 --- a/block/blk-iolatency.c +++ b/block/blk-iolatency.c @@ -72,6 +72,7 @@ #include #include #include +#include #include "blk-rq-qos.h" #include "blk-stat.h" @@ -591,6 +592,7 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio) u64 now = ktime_to_ns(ktime_get()); bool issue_as_root = bio_issue_as_root_blkg(bio); bool enabled = false; + int inflight = 0; blkg = bio->bi_blkg; if (!blkg || !bio_flagged(bio, BIO_TRACKED)) @@ -601,6 +603,9 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio) return; enabled = blk_iolatency_enabled(iolat->blkiolat); + if (!enabled) + return; + while (blkg && blkg->parent) { iolat = blkg_to_lat(blkg); if (!iolat) { @@ -609,8 +614,9 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio) } rqw = &iolat->rq_wait; - atomic_dec(&rqw->inflight); - if (!enabled || iolat->min_lat_nsec == 0) + inflight = atomic_dec_return(&rqw->inflight); + WARN_ON_ONCE(inflight < 0); + if (iolat->min_lat_nsec == 0) goto next; iolatency_record_time(iolat, &bio->bi_issue, now, issue_as_root); @@ -754,10 +760,13 @@ int blk_iolatency_init(struct request_queue *q) return 0; } -static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val) +/* + * return 1 for enabling iolatency, return -1 for disabling iolatency, otherwise + * return 0. + */ +static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val) { struct iolatency_grp *iolat = blkg_to_lat(blkg); - struct blk_iolatency *blkiolat = iolat->blkiolat; u64 oldval = iolat->min_lat_nsec; iolat->min_lat_nsec = val; @@ -766,9 +775,10 @@ static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val) BLKIOLATENCY_MAX_WIN_SIZE); if (!oldval && val) - atomic_inc(&blkiolat->enabled); + return 1; if (oldval && !val) - atomic_dec(&blkiolat->enabled); + return -1; + return 0; } static void iolatency_clear_scaling(struct blkcg_gq *blkg) @@ -800,6 +810,7 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf, u64 lat_val = 0; u64 oldval; int ret; + int enable = 0; ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx); if (ret) @@ -834,7 +845,12 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf, blkg = ctx.blkg; oldval = iolat->min_lat_nsec; - iolatency_set_min_lat_nsec(blkg, lat_val); + enable = iolatency_set_min_lat_nsec(blkg, lat_val); + if (enable) { + WARN_ON_ONCE(!blk_get_queue(blkg->q)); + blkg_get(blkg); + } + if (oldval != iolat->min_lat_nsec) { iolatency_clear_scaling(blkg); } @@ -842,6 +858,24 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf, ret = 0; out: blkg_conf_finish(&ctx); + if (ret == 0 && enable) { + struct iolatency_grp *tmp = blkg_to_lat(blkg); + struct blk_iolatency *blkiolat = tmp->blkiolat; + + blk_mq_freeze_queue(blkg->q); + + if (enable == 1) + atomic_inc(&blkiolat->enabled); + else if (enable == -1) + atomic_dec(&blkiolat->enabled); + else + WARN_ON_ONCE(1); + + blk_mq_unfreeze_queue(blkg->q); + + blkg_put(blkg); + blk_put_queue(blkg->q); + } return ret ?: nbytes; } @@ -977,8 +1011,14 @@ static void iolatency_pd_offline(struct blkg_policy_data *pd) { struct iolatency_grp *iolat = pd_to_lat(pd); struct blkcg_gq *blkg = lat_to_blkg(iolat); + struct blk_iolatency *blkiolat = iolat->blkiolat; + int ret; - iolatency_set_min_lat_nsec(blkg, 0); + ret = iolatency_set_min_lat_nsec(blkg, 0); + if (ret == 1) + atomic_inc(&blkiolat->enabled); + if (ret == -1) + atomic_dec(&blkiolat->enabled); iolatency_clear_scaling(blkg); } diff --git a/block/blk-merge.c b/block/blk-merge.c index 71e9ac03f621..22467f475ab4 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -161,6 +161,73 @@ static inline unsigned get_max_io_size(struct request_queue *q, return sectors; } +static unsigned get_max_segment_size(struct request_queue *q, + unsigned offset) +{ + unsigned long mask = queue_segment_boundary(q); + + /* default segment boundary mask means no boundary limit */ + if (mask == BLK_SEG_BOUNDARY_MASK) + return queue_max_segment_size(q); + + return min_t(unsigned long, mask - (mask & offset) + 1, + queue_max_segment_size(q)); +} + +/* + * Split the bvec @bv into segments, and update all kinds of + * variables. + */ +static bool bvec_split_segs(struct request_queue *q, struct bio_vec *bv, + unsigned *nsegs, unsigned *last_seg_size, + unsigned *front_seg_size, unsigned *sectors) +{ + unsigned len = bv->bv_len; + unsigned total_len = 0; + unsigned new_nsegs = 0, seg_size = 0; + + /* + * Multi-page bvec may be too big to hold in one segment, so the + * current bvec has to be splitted as multiple segments. + */ + while (len && new_nsegs + *nsegs < queue_max_segments(q)) { + seg_size = get_max_segment_size(q, bv->bv_offset + total_len); + seg_size = min(seg_size, len); + + new_nsegs++; + total_len += seg_size; + len -= seg_size; + + if ((bv->bv_offset + total_len) & queue_virt_boundary(q)) + break; + } + + if (!new_nsegs) + return !!len; + + /* update front segment size */ + if (!*nsegs) { + unsigned first_seg_size; + + if (new_nsegs == 1) + first_seg_size = get_max_segment_size(q, bv->bv_offset); + else + first_seg_size = queue_max_segment_size(q); + + if (*front_seg_size < first_seg_size) + *front_seg_size = first_seg_size; + } + + /* update other varibles */ + *last_seg_size = seg_size; + *nsegs += new_nsegs; + if (sectors) + *sectors += total_len >> 9; + + /* split in the middle of the bvec if len != 0 */ + return !!len; +} + static struct bio *blk_bio_segment_split(struct request_queue *q, struct bio *bio, struct bio_set *bs, @@ -174,7 +241,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, struct bio *new = NULL; const unsigned max_sectors = get_max_io_size(q, bio); - bio_for_each_segment(bv, bio, iter) { + bio_for_each_bvec(bv, bio, iter) { /* * If the queue doesn't support SG gaps and adding this * offset would create a gap, disallow it. @@ -189,8 +256,12 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, */ if (nsegs < queue_max_segments(q) && sectors < max_sectors) { - nsegs++; - sectors = max_sectors; + /* split in the middle of bvec */ + bv.bv_len = (max_sectors - sectors) << 9; + bvec_split_segs(q, &bv, &nsegs, + &seg_size, + &front_seg_size, + §ors); } goto split; } @@ -206,21 +277,28 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, bvprvp = &bvprv; sectors += bv.bv_len >> 9; + if (nsegs == 1 && seg_size > front_seg_size) + front_seg_size = seg_size; + continue; } new_segment: if (nsegs == queue_max_segments(q)) goto split; - if (nsegs == 1 && seg_size > front_seg_size) - front_seg_size = seg_size; - - nsegs++; bvprv = bv; bvprvp = &bvprv; - seg_size = bv.bv_len; - sectors += bv.bv_len >> 9; + if (bv.bv_offset + bv.bv_len <= PAGE_SIZE) { + nsegs++; + seg_size = bv.bv_len; + sectors += bv.bv_len >> 9; + if (nsegs == 1 && seg_size > front_seg_size) + front_seg_size = seg_size; + } else if (bvec_split_segs(q, &bv, &nsegs, &seg_size, + &front_seg_size, §ors)) { + goto split; + } } do_split = false; @@ -233,8 +311,6 @@ split: bio = new; } - if (nsegs == 1 && seg_size > front_seg_size) - front_seg_size = seg_size; bio->bi_seg_front_size = front_seg_size; if (seg_size > bio->bi_seg_back_size) bio->bi_seg_back_size = seg_size; @@ -291,18 +367,20 @@ void blk_queue_split(struct request_queue *q, struct bio **bio) EXPORT_SYMBOL(blk_queue_split); static unsigned int __blk_recalc_rq_segments(struct request_queue *q, - struct bio *bio, - bool no_sg_merge) + struct bio *bio) { struct bio_vec bv, bvprv = { NULL }; int prev = 0; unsigned int seg_size, nr_phys_segs; + unsigned front_seg_size; struct bio *fbio, *bbio; struct bvec_iter iter; if (!bio) return 0; + front_seg_size = bio->bi_seg_front_size; + switch (bio_op(bio)) { case REQ_OP_DISCARD: case REQ_OP_SECURE_ERASE: @@ -316,14 +394,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, seg_size = 0; nr_phys_segs = 0; for_each_bio(bio) { - bio_for_each_segment(bv, bio, iter) { - /* - * If SG merging is disabled, each bio vector is - * a segment - */ - if (no_sg_merge) - goto new_segment; - + bio_for_each_bvec(bv, bio, iter) { if (prev) { if (seg_size + bv.bv_len > queue_max_segment_size(q)) @@ -333,23 +404,23 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, seg_size += bv.bv_len; bvprv = bv; + + if (nr_phys_segs == 1 && seg_size > + front_seg_size) + front_seg_size = seg_size; + continue; } new_segment: - if (nr_phys_segs == 1 && seg_size > - fbio->bi_seg_front_size) - fbio->bi_seg_front_size = seg_size; - - nr_phys_segs++; bvprv = bv; prev = 1; - seg_size = bv.bv_len; + bvec_split_segs(q, &bv, &nr_phys_segs, &seg_size, + &front_seg_size, NULL); } bbio = bio; } - if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size) - fbio->bi_seg_front_size = seg_size; + fbio->bi_seg_front_size = front_seg_size; if (seg_size > bbio->bi_seg_back_size) bbio->bi_seg_back_size = seg_size; @@ -358,33 +429,16 @@ new_segment: void blk_recalc_rq_segments(struct request *rq) { - bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE, - &rq->q->queue_flags); - - rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio, - no_sg_merge); + rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio); } void blk_recount_segments(struct request_queue *q, struct bio *bio) { - unsigned short seg_cnt; - - /* estimate segment number by bi_vcnt for non-cloned bio */ - if (bio_flagged(bio, BIO_CLONED)) - seg_cnt = bio_segments(bio); - else - seg_cnt = bio->bi_vcnt; + struct bio *nxt = bio->bi_next; - if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) && - (seg_cnt < queue_max_segments(q))) - bio->bi_phys_segments = seg_cnt; - else { - struct bio *nxt = bio->bi_next; - - bio->bi_next = NULL; - bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false); - bio->bi_next = nxt; - } + bio->bi_next = NULL; + bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio); + bio->bi_next = nxt; bio_set_flag(bio, BIO_SEG_VALID); } @@ -407,6 +461,54 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, return biovec_phys_mergeable(q, &end_bv, &nxt_bv); } +static inline struct scatterlist *blk_next_sg(struct scatterlist **sg, + struct scatterlist *sglist) +{ + if (!*sg) + return sglist; + + /* + * If the driver previously mapped a shorter list, we could see a + * termination bit prematurely unless it fully inits the sg table + * on each mapping. We KNOW that there must be more entries here + * or the driver would be buggy, so force clear the termination bit + * to avoid doing a full sg_init_table() in drivers for each command. + */ + sg_unmark_end(*sg); + return sg_next(*sg); +} + +static unsigned blk_bvec_map_sg(struct request_queue *q, + struct bio_vec *bvec, struct scatterlist *sglist, + struct scatterlist **sg) +{ + unsigned nbytes = bvec->bv_len; + unsigned nsegs = 0, total = 0, offset = 0; + + while (nbytes > 0) { + unsigned seg_size; + struct page *pg; + unsigned idx; + + *sg = blk_next_sg(sg, sglist); + + seg_size = get_max_segment_size(q, bvec->bv_offset + total); + seg_size = min(nbytes, seg_size); + + offset = (total + bvec->bv_offset) % PAGE_SIZE; + idx = (total + bvec->bv_offset) / PAGE_SIZE; + pg = bvec_nth_page(bvec->bv_page, idx); + + sg_set_page(*sg, pg, seg_size, offset); + + total += seg_size; + nbytes -= seg_size; + nsegs++; + } + + return nsegs; +} + static inline void __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, struct scatterlist *sglist, struct bio_vec *bvprv, @@ -424,25 +526,12 @@ __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, (*sg)->length += nbytes; } else { new_segment: - if (!*sg) - *sg = sglist; - else { - /* - * If the driver previously mapped a shorter - * list, we could see a termination bit - * prematurely unless it fully inits the sg - * table on each mapping. We KNOW that there - * must be more entries here or the driver - * would be buggy, so force clear the - * termination bit to avoid doing a full - * sg_init_table() in drivers for each command. - */ - sg_unmark_end(*sg); - *sg = sg_next(*sg); - } - - sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); - (*nsegs)++; + if (bvec->bv_offset + bvec->bv_len <= PAGE_SIZE) { + *sg = blk_next_sg(sg, sglist); + sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); + (*nsegs) += 1; + } else + (*nsegs) += blk_bvec_map_sg(q, bvec, sglist, sg); } *bvprv = *bvec; } @@ -464,7 +553,7 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, int nsegs = 0; for_each_bio(bio) - bio_for_each_segment(bvec, bio, iter) + bio_for_each_bvec(bvec, bio, iter) __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg, &nsegs); diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index f8120832ca7b..ec1d18cb643c 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -115,7 +115,6 @@ static int queue_pm_only_show(void *data, struct seq_file *m) static const char *const blk_queue_flag_name[] = { QUEUE_FLAG_NAME(STOPPED), QUEUE_FLAG_NAME(DYING), - QUEUE_FLAG_NAME(BIDI), QUEUE_FLAG_NAME(NOMERGES), QUEUE_FLAG_NAME(SAME_COMP), QUEUE_FLAG_NAME(FAIL_IO), @@ -128,11 +127,9 @@ static const char *const blk_queue_flag_name[] = { QUEUE_FLAG_NAME(SAME_FORCE), QUEUE_FLAG_NAME(DEAD), QUEUE_FLAG_NAME(INIT_DONE), - QUEUE_FLAG_NAME(NO_SG_MERGE), QUEUE_FLAG_NAME(POLL), QUEUE_FLAG_NAME(WC), QUEUE_FLAG_NAME(FUA), - QUEUE_FLAG_NAME(FLUSH_NQ), QUEUE_FLAG_NAME(DAX), QUEUE_FLAG_NAME(STATS), QUEUE_FLAG_NAME(POLL_STATS), @@ -251,7 +248,6 @@ static const char *const alloc_policy_name[] = { static const char *const hctx_flag_name[] = { HCTX_FLAG_NAME(SHOULD_MERGE), HCTX_FLAG_NAME(TAG_SHARED), - HCTX_FLAG_NAME(SG_MERGE), HCTX_FLAG_NAME(BLOCKING), HCTX_FLAG_NAME(NO_SCHED), }; @@ -839,6 +835,9 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = { static bool debugfs_create_files(struct dentry *parent, void *data, const struct blk_mq_debugfs_attr *attr) { + if (IS_ERR_OR_NULL(parent)) + return false; + d_inode(parent)->i_private = data; for (; attr->name; attr++) { diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 140933e4a7d1..40905539afed 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c @@ -321,7 +321,7 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) { struct elevator_queue *e = q->elevator; struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); - struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx->cpu); + struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); bool ret = false; enum hctx_type type; diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 2089c6c62f44..a4931fc7be8a 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -170,7 +170,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) data->ctx = blk_mq_get_ctx(data->q); data->hctx = blk_mq_map_queue(data->q, data->cmd_flags, - data->ctx->cpu); + data->ctx); tags = blk_mq_tags_from_data(data); if (data->flags & BLK_MQ_REQ_RESERVED) bt = &tags->breserved_tags; diff --git a/block/blk-mq.c b/block/blk-mq.c index 8f5b533764ca..a9c181603cbd 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -331,7 +331,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, #if defined(CONFIG_BLK_DEV_INTEGRITY) rq->nr_integrity_segments = 0; #endif - rq->special = NULL; /* tag was already set */ rq->extra_len = 0; WRITE_ONCE(rq->deadline, 0); @@ -340,7 +339,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, rq->end_io = NULL; rq->end_io_data = NULL; - rq->next_rq = NULL; data->ctx->rq_dispatched[op_is_sync(op)]++; refcount_set(&rq->ref, 1); @@ -364,7 +362,7 @@ static struct request *blk_mq_get_request(struct request_queue *q, } if (likely(!data->hctx)) data->hctx = blk_mq_map_queue(q, data->cmd_flags, - data->ctx->cpu); + data->ctx); if (data->cmd_flags & REQ_NOWAIT) data->flags |= BLK_MQ_REQ_NOWAIT; @@ -550,8 +548,6 @@ inline void __blk_mq_end_request(struct request *rq, blk_status_t error) rq_qos_done(rq->q, rq); rq->end_io(rq, error); } else { - if (unlikely(blk_bidi_rq(rq))) - blk_mq_free_request(rq->next_rq); blk_mq_free_request(rq); } } @@ -737,12 +733,20 @@ static void blk_mq_requeue_work(struct work_struct *work) spin_unlock_irq(&q->requeue_lock); list_for_each_entry_safe(rq, next, &rq_list, queuelist) { - if (!(rq->rq_flags & RQF_SOFTBARRIER)) + if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP))) continue; rq->rq_flags &= ~RQF_SOFTBARRIER; list_del_init(&rq->queuelist); - blk_mq_sched_insert_request(rq, true, false, false); + /* + * If RQF_DONTPREP, rq has contained some driver specific + * data, so insert it to hctx dispatch list to avoid any + * merge. + */ + if (rq->rq_flags & RQF_DONTPREP) + blk_mq_request_bypass_insert(rq, false); + else + blk_mq_sched_insert_request(rq, true, false, false); } while (!list_empty(&rq_list)) { @@ -2061,7 +2065,7 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, struct blk_mq_tags *tags; int node; - node = blk_mq_hw_queue_to_node(&set->map[0], hctx_idx); + node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx); if (node == NUMA_NO_NODE) node = set->numa_node; @@ -2117,7 +2121,7 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, size_t rq_size, left; int node; - node = blk_mq_hw_queue_to_node(&set->map[0], hctx_idx); + node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx); if (node == NUMA_NO_NODE) node = set->numa_node; @@ -2416,7 +2420,7 @@ static void blk_mq_map_swqueue(struct request_queue *q) * If the cpu isn't present, the cpu is mapped to first hctx. */ for_each_possible_cpu(i) { - hctx_idx = set->map[0].mq_map[i]; + hctx_idx = set->map[HCTX_TYPE_DEFAULT].mq_map[i]; /* unmapped hw queue can be remapped after CPU topo changed */ if (!set->tags[hctx_idx] && !__blk_mq_alloc_rq_map(set, hctx_idx)) { @@ -2426,16 +2430,19 @@ static void blk_mq_map_swqueue(struct request_queue *q) * case, remap the current ctx to hctx[0] which * is guaranteed to always have tags allocated */ - set->map[0].mq_map[i] = 0; + set->map[HCTX_TYPE_DEFAULT].mq_map[i] = 0; } ctx = per_cpu_ptr(q->queue_ctx, i); for (j = 0; j < set->nr_maps; j++) { - if (!set->map[j].nr_queues) + if (!set->map[j].nr_queues) { + ctx->hctxs[j] = blk_mq_map_queue_type(q, + HCTX_TYPE_DEFAULT, i); continue; + } hctx = blk_mq_map_queue_type(q, j, i); - + ctx->hctxs[j] = hctx; /* * If the CPU is already set in the mask, then we've * mapped this one already. This can happen if @@ -2455,6 +2462,10 @@ static void blk_mq_map_swqueue(struct request_queue *q) */ BUG_ON(!hctx->nr_ctx); } + + for (; j < HCTX_MAX_TYPES; j++) + ctx->hctxs[j] = blk_mq_map_queue_type(q, + HCTX_TYPE_DEFAULT, i); } mutex_unlock(&q->sysfs_lock); @@ -2726,7 +2737,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, int node; struct blk_mq_hw_ctx *hctx; - node = blk_mq_hw_queue_to_node(&set->map[0], i); + node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], i); /* * If the hw queue has been mapped to another numa node, * we need to realloc the hctx. If allocation fails, fallback @@ -2830,9 +2841,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, set->map[HCTX_TYPE_POLL].nr_queues) blk_queue_flag_set(QUEUE_FLAG_POLL, q); - if (!(set->flags & BLK_MQ_F_SG_MERGE)) - blk_queue_flag_set(QUEUE_FLAG_NO_SG_MERGE, q); - q->sg_reserved_size = INT_MAX; INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work); @@ -2960,7 +2968,7 @@ static int blk_mq_update_queue_map(struct blk_mq_tag_set *set) return set->ops->map_queues(set); } else { BUG_ON(set->nr_maps > 1); - return blk_mq_map_queues(&set->map[0]); + return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); } } @@ -3082,6 +3090,9 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) if (!set) return -EINVAL; + if (q->nr_requests == nr) + return 0; + blk_mq_freeze_queue(q); blk_mq_quiesce_queue(q); @@ -3227,7 +3238,7 @@ fallback: pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n", nr_hw_queues, prev_nr_hw_queues); set->nr_hw_queues = prev_nr_hw_queues; - blk_mq_map_queues(&set->map[0]); + blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); goto fallback; } blk_mq_map_swqueue(q); diff --git a/block/blk-mq.h b/block/blk-mq.h index d943d46b0785..c11353a3749d 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -23,6 +23,7 @@ struct blk_mq_ctx { unsigned int cpu; unsigned short index_hw[HCTX_MAX_TYPES]; + struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES]; /* incremented at dispatch time */ unsigned long rq_dispatched[2]; @@ -36,7 +37,6 @@ struct blk_mq_ctx { struct kobject kobj; } ____cacheline_aligned_in_smp; -void blk_mq_freeze_queue(struct request_queue *q); void blk_mq_free_queue(struct request_queue *q); int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); void blk_mq_wake_waiters(struct request_queue *q); @@ -97,26 +97,23 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue * * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue * @q: request queue * @flags: request command flags - * @cpu: CPU + * @cpu: cpu ctx */ static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, unsigned int flags, - unsigned int cpu) + struct blk_mq_ctx *ctx) { enum hctx_type type = HCTX_TYPE_DEFAULT; - if ((flags & REQ_HIPRI) && - q->tag_set->nr_maps > HCTX_TYPE_POLL && - q->tag_set->map[HCTX_TYPE_POLL].nr_queues && - test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) + /* + * The caller ensure that if REQ_HIPRI, poll must be enabled. + */ + if (flags & REQ_HIPRI) type = HCTX_TYPE_POLL; - - else if (((flags & REQ_OP_MASK) == REQ_OP_READ) && - q->tag_set->nr_maps > HCTX_TYPE_READ && - q->tag_set->map[HCTX_TYPE_READ].nr_queues) + else if ((flags & REQ_OP_MASK) == REQ_OP_READ) type = HCTX_TYPE_READ; - return blk_mq_map_queue_type(q, type, cpu); + return ctx->hctxs[type]; } /* diff --git a/block/blk-settings.c b/block/blk-settings.c index 3e7038e475ee..6375afaedcec 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -799,15 +799,6 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask) } EXPORT_SYMBOL(blk_queue_update_dma_alignment); -void blk_queue_flush_queueable(struct request_queue *q, bool queueable) -{ - if (queueable) - blk_queue_flag_clear(QUEUE_FLAG_FLUSH_NQ, q); - else - blk_queue_flag_set(QUEUE_FLAG_FLUSH_NQ, q); -} -EXPORT_SYMBOL_GPL(blk_queue_flush_queueable); - /** * blk_set_queue_depth - tell the block layer about the device queue depth * @q: the request queue for the device diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 590d1ef2f961..59685918167e 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -468,6 +468,9 @@ static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, else if (val >= 0) val *= 1000ULL; + if (wbt_get_min_lat(q) == val) + return count; + /* * Ensure that the queue is idled, in case the latency update * ends up either enabling or disabling wbt completely. We can't @@ -817,21 +820,16 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head) } /** - * __blk_release_queue - release a request queue when it is no longer needed + * __blk_release_queue - release a request queue * @work: pointer to the release_work member of the request queue to be released * * Description: - * blk_release_queue is the counterpart of blk_init_queue(). It should be - * called when a request queue is being released; typically when a block - * device is being de-registered. Its primary task it to free the queue - * itself. - * - * Notes: - * The low level driver must have finished any outstanding requests first - * via blk_cleanup_queue(). - * - * Although blk_release_queue() may be called with preemption disabled, - * __blk_release_queue() may sleep. + * This function is called when a block device is being unregistered. The + * process of releasing a request queue starts with blk_cleanup_queue, which + * set the appropriate flags and then calls blk_put_queue, that decrements + * the reference counter of the request queue. Once the reference counter + * of the request queue reaches zero, blk_release_queue is called to release + * all allocated resources of the request queue. */ static void __blk_release_queue(struct work_struct *work) { diff --git a/block/blk.h b/block/blk.h index 848278c52030..5d636ee41663 100644 --- a/block/blk.h +++ b/block/blk.h @@ -38,7 +38,7 @@ extern struct ida blk_queue_ida; static inline struct blk_flush_queue * blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx) { - return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx->cpu)->fq; + return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq; } static inline void __blk_get_queue(struct request_queue *q) diff --git a/block/bounce.c b/block/bounce.c index ffb9e9ecfa7e..47eb7e936e22 100644 --- a/block/bounce.c +++ b/block/bounce.c @@ -165,11 +165,12 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool) struct bio_vec *bvec, orig_vec; int i; struct bvec_iter orig_iter = bio_orig->bi_iter; + struct bvec_iter_all iter_all; /* * free up bounce indirect pages used */ - bio_for_each_segment_all(bvec, bio, i) { + bio_for_each_segment_all(bvec, bio, i, iter_all) { orig_vec = bio_iter_iovec(bio_orig, orig_iter); if (bvec->bv_page != orig_vec.bv_page) { dec_zone_page_state(bvec->bv_page, NR_BOUNCE); @@ -313,7 +314,12 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, bio = bounce_clone_bio(*bio_orig, GFP_NOIO, passthrough ? NULL : &bounce_bio_set); - bio_for_each_segment_all(to, bio, i) { + /* + * Bvec table can't be updated by bio_for_each_segment_all(), + * so retrieve bvec from the table directly. This way is safe + * because the 'bio' is single-page bvec. + */ + for (i = 0, to = bio->bi_io_vec; i < bio->bi_vcnt; to++, i++) { struct page *page = to->bv_page; if (page_to_pfn(page) <= q->limits.bounce_pfn) diff --git a/block/bsg-lib.c b/block/bsg-lib.c index 192129856342..005e2b75d775 100644 --- a/block/bsg-lib.c +++ b/block/bsg-lib.c @@ -51,11 +51,40 @@ static int bsg_transport_fill_hdr(struct request *rq, struct sg_io_v4 *hdr, fmode_t mode) { struct bsg_job *job = blk_mq_rq_to_pdu(rq); + int ret; job->request_len = hdr->request_len; job->request = memdup_user(uptr64(hdr->request), hdr->request_len); + if (IS_ERR(job->request)) + return PTR_ERR(job->request); + + if (hdr->dout_xfer_len && hdr->din_xfer_len) { + job->bidi_rq = blk_get_request(rq->q, REQ_OP_SCSI_IN, 0); + if (IS_ERR(job->bidi_rq)) { + ret = PTR_ERR(job->bidi_rq); + goto out; + } + + ret = blk_rq_map_user(rq->q, job->bidi_rq, NULL, + uptr64(hdr->din_xferp), hdr->din_xfer_len, + GFP_KERNEL); + if (ret) + goto out_free_bidi_rq; + + job->bidi_bio = job->bidi_rq->bio; + } else { + job->bidi_rq = NULL; + job->bidi_bio = NULL; + } - return PTR_ERR_OR_ZERO(job->request); + return 0; + +out_free_bidi_rq: + if (job->bidi_rq) + blk_put_request(job->bidi_rq); +out: + kfree(job->request); + return ret; } static int bsg_transport_complete_rq(struct request *rq, struct sg_io_v4 *hdr) @@ -93,7 +122,7 @@ static int bsg_transport_complete_rq(struct request *rq, struct sg_io_v4 *hdr) /* we assume all request payload was transferred, residual == 0 */ hdr->dout_resid = 0; - if (rq->next_rq) { + if (job->bidi_rq) { unsigned int rsp_len = job->reply_payload.payload_len; if (WARN_ON(job->reply_payload_rcv_len > rsp_len)) @@ -111,6 +140,11 @@ static void bsg_transport_free_rq(struct request *rq) { struct bsg_job *job = blk_mq_rq_to_pdu(rq); + if (job->bidi_rq) { + blk_rq_unmap_user(job->bidi_bio); + blk_put_request(job->bidi_rq); + } + kfree(job->request); } @@ -200,7 +234,6 @@ static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req) */ static bool bsg_prepare_job(struct device *dev, struct request *req) { - struct request *rsp = req->next_rq; struct bsg_job *job = blk_mq_rq_to_pdu(req); int ret; @@ -211,8 +244,8 @@ static bool bsg_prepare_job(struct device *dev, struct request *req) if (ret) goto failjob_rls_job; } - if (rsp && rsp->bio) { - ret = bsg_map_buffer(&job->reply_payload, rsp); + if (job->bidi_rq) { + ret = bsg_map_buffer(&job->reply_payload, job->bidi_rq); if (ret) goto failjob_rls_rqst_payload; } @@ -369,7 +402,6 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name, } q->queuedata = dev; - blk_queue_flag_set(QUEUE_FLAG_BIDI, q); blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); ret = bsg_register_queue(q, dev, name, &bsg_transport_ops); diff --git a/block/bsg.c b/block/bsg.c index 50e5f8f666f2..f306853c6b08 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -74,6 +74,11 @@ static int bsg_scsi_fill_hdr(struct request *rq, struct sg_io_v4 *hdr, { struct scsi_request *sreq = scsi_req(rq); + if (hdr->dout_xfer_len && hdr->din_xfer_len) { + pr_warn_once("BIDI support in bsg has been removed.\n"); + return -EOPNOTSUPP; + } + sreq->cmd_len = hdr->request_len; if (sreq->cmd_len > BLK_MAX_CDB) { sreq->cmd = kzalloc(sreq->cmd_len, GFP_KERNEL); @@ -114,14 +119,10 @@ static int bsg_scsi_complete_rq(struct request *rq, struct sg_io_v4 *hdr) hdr->response_len = len; } - if (rq->next_rq) { - hdr->dout_resid = sreq->resid_len; - hdr->din_resid = scsi_req(rq->next_rq)->resid_len; - } else if (rq_data_dir(rq) == READ) { + if (rq_data_dir(rq) == READ) hdr->din_resid = sreq->resid_len; - } else { + else hdr->dout_resid = sreq->resid_len; - } return ret; } @@ -138,32 +139,35 @@ static const struct bsg_ops bsg_scsi_ops = { .free_rq = bsg_scsi_free_rq, }; -static struct request * -bsg_map_hdr(struct request_queue *q, struct sg_io_v4 *hdr, fmode_t mode) +static int bsg_sg_io(struct request_queue *q, fmode_t mode, void __user *uarg) { - struct request *rq, *next_rq = NULL; + struct request *rq; + struct bio *bio; + struct sg_io_v4 hdr; int ret; - if (!q->bsg_dev.class_dev) - return ERR_PTR(-ENXIO); + if (copy_from_user(&hdr, uarg, sizeof(hdr))) + return -EFAULT; - if (hdr->guard != 'Q') - return ERR_PTR(-EINVAL); + if (!q->bsg_dev.class_dev) + return -ENXIO; - ret = q->bsg_dev.ops->check_proto(hdr); + if (hdr.guard != 'Q') + return -EINVAL; + ret = q->bsg_dev.ops->check_proto(&hdr); if (ret) - return ERR_PTR(ret); + return ret; - rq = blk_get_request(q, hdr->dout_xfer_len ? + rq = blk_get_request(q, hdr.dout_xfer_len ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0); if (IS_ERR(rq)) - return rq; + return PTR_ERR(rq); - ret = q->bsg_dev.ops->fill_hdr(rq, hdr, mode); + ret = q->bsg_dev.ops->fill_hdr(rq, &hdr, mode); if (ret) - goto out; + return ret; - rq->timeout = msecs_to_jiffies(hdr->timeout); + rq->timeout = msecs_to_jiffies(hdr.timeout); if (!rq->timeout) rq->timeout = q->sg_timeout; if (!rq->timeout) @@ -171,68 +175,28 @@ bsg_map_hdr(struct request_queue *q, struct sg_io_v4 *hdr, fmode_t mode) if (rq->timeout < BLK_MIN_SG_TIMEOUT) rq->timeout = BLK_MIN_SG_TIMEOUT; - if (hdr->dout_xfer_len && hdr->din_xfer_len) { - if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) { - ret = -EOPNOTSUPP; - goto out; - } - - pr_warn_once( - "BIDI support in bsg has been deprecated and might be removed. " - "Please report your use case to linux-scsi@vger.kernel.org\n"); - - next_rq = blk_get_request(q, REQ_OP_SCSI_IN, 0); - if (IS_ERR(next_rq)) { - ret = PTR_ERR(next_rq); - goto out; - } - - rq->next_rq = next_rq; - ret = blk_rq_map_user(q, next_rq, NULL, uptr64(hdr->din_xferp), - hdr->din_xfer_len, GFP_KERNEL); - if (ret) - goto out_free_nextrq; - } - - if (hdr->dout_xfer_len) { - ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->dout_xferp), - hdr->dout_xfer_len, GFP_KERNEL); - } else if (hdr->din_xfer_len) { - ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->din_xferp), - hdr->din_xfer_len, GFP_KERNEL); + if (hdr.dout_xfer_len) { + ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr.dout_xferp), + hdr.dout_xfer_len, GFP_KERNEL); + } else if (hdr.din_xfer_len) { + ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr.din_xferp), + hdr.din_xfer_len, GFP_KERNEL); } if (ret) - goto out_unmap_nextrq; - return rq; - -out_unmap_nextrq: - if (rq->next_rq) - blk_rq_unmap_user(rq->next_rq->bio); -out_free_nextrq: - if (rq->next_rq) - blk_put_request(rq->next_rq); -out: - q->bsg_dev.ops->free_rq(rq); - blk_put_request(rq); - return ERR_PTR(ret); -} + goto out_free_rq; -static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, - struct bio *bio, struct bio *bidi_bio) -{ - int ret; - - ret = rq->q->bsg_dev.ops->complete_rq(rq, hdr); - - if (rq->next_rq) { - blk_rq_unmap_user(bidi_bio); - blk_put_request(rq->next_rq); - } + bio = rq->bio; + blk_execute_rq(q, NULL, rq, !(hdr.flags & BSG_FLAG_Q_AT_TAIL)); + ret = rq->q->bsg_dev.ops->complete_rq(rq, &hdr); blk_rq_unmap_user(bio); + +out_free_rq: rq->q->bsg_dev.ops->free_rq(rq); blk_put_request(rq); + if (!ret && copy_to_user(uarg, &hdr, sizeof(hdr))) + return -EFAULT; return ret; } @@ -367,31 +331,39 @@ static int bsg_release(struct inode *inode, struct file *file) return bsg_put_device(bd); } +static int bsg_get_command_q(struct bsg_device *bd, int __user *uarg) +{ + return put_user(bd->max_queue, uarg); +} + +static int bsg_set_command_q(struct bsg_device *bd, int __user *uarg) +{ + int queue; + + if (get_user(queue, uarg)) + return -EFAULT; + if (queue < 1) + return -EINVAL; + + spin_lock_irq(&bd->lock); + bd->max_queue = queue; + spin_unlock_irq(&bd->lock); + return 0; +} + static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct bsg_device *bd = file->private_data; - int __user *uarg = (int __user *) arg; - int ret; + void __user *uarg = (void __user *) arg; switch (cmd) { - /* - * our own ioctls - */ + /* + * Our own ioctls + */ case SG_GET_COMMAND_Q: - return put_user(bd->max_queue, uarg); - case SG_SET_COMMAND_Q: { - int queue; - - if (get_user(queue, uarg)) - return -EFAULT; - if (queue < 1) - return -EINVAL; - - spin_lock_irq(&bd->lock); - bd->max_queue = queue; - spin_unlock_irq(&bd->lock); - return 0; - } + return bsg_get_command_q(bd, uarg); + case SG_SET_COMMAND_Q: + return bsg_set_command_q(bd, uarg); /* * SCSI/sg ioctls @@ -404,36 +376,10 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case SG_GET_RESERVED_SIZE: case SG_SET_RESERVED_SIZE: case SG_EMULATED_HOST: - case SCSI_IOCTL_SEND_COMMAND: { - void __user *uarg = (void __user *) arg; + case SCSI_IOCTL_SEND_COMMAND: return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg); - } - case SG_IO: { - struct request *rq; - struct bio *bio, *bidi_bio = NULL; - struct sg_io_v4 hdr; - int at_head; - - if (copy_from_user(&hdr, uarg, sizeof(hdr))) - return -EFAULT; - - rq = bsg_map_hdr(bd->queue, &hdr, file->f_mode); - if (IS_ERR(rq)) - return PTR_ERR(rq); - - bio = rq->bio; - if (rq->next_rq) - bidi_bio = rq->next_rq->bio; - - at_head = (0 == (hdr.flags & BSG_FLAG_Q_AT_TAIL)); - blk_execute_rq(bd->queue, NULL, rq, at_head); - ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio); - - if (copy_to_user(uarg, &hdr, sizeof(hdr))) - return -EFAULT; - - return ret; - } + case SG_IO: + return bsg_sg_io(bd->queue, file->f_mode, uarg); default: return -ENOTTY; } diff --git a/block/elevator.c b/block/elevator.c index f05e90d4e695..d6d835a08de6 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -667,8 +667,11 @@ static int __elevator_change(struct request_queue *q, const char *name) /* * Special case for mq, turn off scheduling */ - if (!strncmp(name, "none", 4)) + if (!strncmp(name, "none", 4)) { + if (!q->elevator) + return 0; return elevator_switch(q, NULL); + } strlcpy(elevator_name, name, sizeof(elevator_name)); e = elevator_get(q, strstrip(elevator_name), true); diff --git a/block/genhd.c b/block/genhd.c index 1dd8fd6613b8..703267865f14 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -365,8 +365,8 @@ int register_blkdev(unsigned int major, const char *name) } if (index == 0) { - printk("register_blkdev: failed to get major for %s\n", - name); + printk("%s: failed to get major for %s\n", + __func__, name); ret = -EBUSY; goto out; } @@ -375,8 +375,8 @@ int register_blkdev(unsigned int major, const char *name) } if (major >= BLKDEV_MAJOR_MAX) { - pr_err("register_blkdev: major requested (%u) is greater than the maximum (%u) for %s\n", - major, BLKDEV_MAJOR_MAX-1, name); + pr_err("%s: major requested (%u) is greater than the maximum (%u) for %s\n", + __func__, major, BLKDEV_MAJOR_MAX-1, name); ret = -EINVAL; goto out; @@ -655,10 +655,12 @@ exit: kobject_uevent(&part_to_dev(part)->kobj, KOBJ_ADD); disk_part_iter_exit(&piter); - err = sysfs_create_link(&ddev->kobj, - &disk->queue->backing_dev_info->dev->kobj, - "bdi"); - WARN_ON(err); + if (disk->queue->backing_dev_info->dev) { + err = sysfs_create_link(&ddev->kobj, + &disk->queue->backing_dev_info->dev->kobj, + "bdi"); + WARN_ON(err); + } } /** diff --git a/certs/system_keyring.c b/certs/system_keyring.c index 81728717523d..c05c29ae4d5d 100644 --- a/certs/system_keyring.c +++ b/certs/system_keyring.c @@ -24,6 +24,9 @@ static struct key *builtin_trusted_keys; #ifdef CONFIG_SECONDARY_TRUSTED_KEYRING static struct key *secondary_trusted_keys; #endif +#ifdef CONFIG_INTEGRITY_PLATFORM_KEYRING +static struct key *platform_trusted_keys; +#endif extern __initconst const u8 system_certificate_list[]; extern __initconst const unsigned long system_certificate_list_size; @@ -237,11 +240,22 @@ int verify_pkcs7_signature(const void *data, size_t len, #else trusted_keys = builtin_trusted_keys; #endif + } else if (trusted_keys == VERIFY_USE_PLATFORM_KEYRING) { +#ifdef CONFIG_INTEGRITY_PLATFORM_KEYRING + trusted_keys = platform_trusted_keys; +#else + trusted_keys = NULL; +#endif + if (!trusted_keys) { + ret = -ENOKEY; + pr_devel("PKCS#7 platform keyring is not available\n"); + goto error; + } } ret = pkcs7_validate_trust(pkcs7, trusted_keys); if (ret < 0) { if (ret == -ENOKEY) - pr_err("PKCS#7 signature not signed with a trusted key\n"); + pr_devel("PKCS#7 signature not signed with a trusted key\n"); goto error; } @@ -266,3 +280,10 @@ error: EXPORT_SYMBOL_GPL(verify_pkcs7_signature); #endif /* CONFIG_SYSTEM_DATA_VERIFICATION */ + +#ifdef CONFIG_INTEGRITY_PLATFORM_KEYRING +void __init set_platform_trusted_keys(struct key *keyring) +{ + platform_trusted_keys = keyring; +} +#endif diff --git a/crypto/Kconfig b/crypto/Kconfig index 9511144ac7b5..bbab6bf33519 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -168,6 +168,16 @@ config CRYPTO_MANAGER_DISABLE_TESTS Disable run-time self tests that normally take place at algorithm registration. +config CRYPTO_MANAGER_EXTRA_TESTS + bool "Enable extra run-time crypto self tests" + depends on DEBUG_KERNEL && !CRYPTO_MANAGER_DISABLE_TESTS + help + Enable extra run-time self tests of registered crypto algorithms, + including randomized fuzz tests. + + This is intended for developer use only, as these tests take much + longer to run than the normal self tests. + config CRYPTO_GF128MUL tristate "GF(2^128) multiplication functions" help @@ -642,7 +652,7 @@ config CRYPTO_CRC32_PCLMUL From Intel Westmere and AMD Bulldozer processor with SSE4.2 and PCLMULQDQ supported, the processor will support CRC32 PCLMULQDQ implementation using hardware accelerated PCLMULQDQ - instruction. This option will create 'crc32-plcmul' module, + instruction. This option will create 'crc32-pclmul' module, which will enable any routine to use the CRC-32-IEEE 802.3 checksum and gain better performance as compared with the table implementation. @@ -671,7 +681,7 @@ config CRYPTO_CRCT10DIF_PCLMUL For x86_64 processors with SSE4.2 and PCLMULQDQ supported, CRC T10 DIF PCLMULQDQ computation can be hardware accelerated PCLMULQDQ instruction. This option will create - 'crct10dif-plcmul' module, which is faster when computing the + 'crct10dif-pclmul' module, which is faster when computing the crct10dif checksum as compared with the generic table implementation. config CRYPTO_CRCT10DIF_VPMSUM diff --git a/crypto/Makefile b/crypto/Makefile index 799ed5e94606..fb5bf2a3a666 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -128,7 +128,7 @@ obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_generic.o obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o -obj-$(CONFIG_CRYPTO_LZO) += lzo.o +obj-$(CONFIG_CRYPTO_LZO) += lzo.o lzo-rle.o obj-$(CONFIG_CRYPTO_LZ4) += lz4.o obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o obj-$(CONFIG_CRYPTO_842) += 842.o diff --git a/crypto/aead.c b/crypto/aead.c index 189c52d1f63a..4908b5e846f0 100644 --- a/crypto/aead.c +++ b/crypto/aead.c @@ -61,8 +61,10 @@ int crypto_aead_setkey(struct crypto_aead *tfm, else err = crypto_aead_alg(tfm)->setkey(tfm, key, keylen); - if (err) + if (unlikely(err)) { + crypto_aead_set_flags(tfm, CRYPTO_TFM_NEED_KEY); return err; + } crypto_aead_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); return 0; diff --git a/crypto/aegis.h b/crypto/aegis.h index 405e025fc906..41a3090cda8e 100644 --- a/crypto/aegis.h +++ b/crypto/aegis.h @@ -1,14 +1,9 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * AEGIS common definitions * * Copyright (c) 2018 Ondrej Mosnacek * Copyright (c) 2018 Red Hat, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. */ #ifndef _CRYPTO_AEGIS_H diff --git a/crypto/aegis128.c b/crypto/aegis128.c index c22f4414856d..3718a8341303 100644 --- a/crypto/aegis128.c +++ b/crypto/aegis128.c @@ -1,13 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * The AEGIS-128 Authenticated-Encryption Algorithm * * Copyright (c) 2017-2018 Ondrej Mosnacek * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. */ #include @@ -290,19 +286,19 @@ static void crypto_aegis128_process_crypt(struct aegis_state *state, const struct aegis128_ops *ops) { struct skcipher_walk walk; - u8 *src, *dst; - unsigned int chunksize; ops->skcipher_walk_init(&walk, req, false); while (walk.nbytes) { - src = walk.src.virt.addr; - dst = walk.dst.virt.addr; - chunksize = walk.nbytes; + unsigned int nbytes = walk.nbytes; + + if (nbytes < walk.total) + nbytes = round_down(nbytes, walk.stride); - ops->crypt_chunk(state, dst, src, chunksize); + ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr, + nbytes); - skcipher_walk_done(&walk, 0); + skcipher_walk_done(&walk, walk.nbytes - nbytes); } } diff --git a/crypto/aegis128l.c b/crypto/aegis128l.c index b6fb21ebdc3e..275a8616d71b 100644 --- a/crypto/aegis128l.c +++ b/crypto/aegis128l.c @@ -1,13 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * The AEGIS-128L Authenticated-Encryption Algorithm * * Copyright (c) 2017-2018 Ondrej Mosnacek * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. */ #include @@ -353,19 +349,19 @@ static void crypto_aegis128l_process_crypt(struct aegis_state *state, const struct aegis128l_ops *ops) { struct skcipher_walk walk; - u8 *src, *dst; - unsigned int chunksize; ops->skcipher_walk_init(&walk, req, false); while (walk.nbytes) { - src = walk.src.virt.addr; - dst = walk.dst.virt.addr; - chunksize = walk.nbytes; + unsigned int nbytes = walk.nbytes; + + if (nbytes < walk.total) + nbytes = round_down(nbytes, walk.stride); - ops->crypt_chunk(state, dst, src, chunksize); + ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr, + nbytes); - skcipher_walk_done(&walk, 0); + skcipher_walk_done(&walk, walk.nbytes - nbytes); } } diff --git a/crypto/aegis256.c b/crypto/aegis256.c index 11f0f8ec9c7c..ecd6b7f34a2d 100644 --- a/crypto/aegis256.c +++ b/crypto/aegis256.c @@ -1,13 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * The AEGIS-256 Authenticated-Encryption Algorithm * * Copyright (c) 2017-2018 Ondrej Mosnacek * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. */ #include @@ -303,19 +299,19 @@ static void crypto_aegis256_process_crypt(struct aegis_state *state, const struct aegis256_ops *ops) { struct skcipher_walk walk; - u8 *src, *dst; - unsigned int chunksize; ops->skcipher_walk_init(&walk, req, false); while (walk.nbytes) { - src = walk.src.virt.addr; - dst = walk.dst.virt.addr; - chunksize = walk.nbytes; + unsigned int nbytes = walk.nbytes; + + if (nbytes < walk.total) + nbytes = round_down(nbytes, walk.stride); - ops->crypt_chunk(state, dst, src, chunksize); + ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr, + nbytes); - skcipher_walk_done(&walk, 0); + skcipher_walk_done(&walk, walk.nbytes - nbytes); } } diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 17eb09d222ff..edca0998b2a4 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -122,8 +122,10 @@ static void alg_do_release(const struct af_alg_type *type, void *private) int af_alg_release(struct socket *sock) { - if (sock->sk) + if (sock->sk) { sock_put(sock->sk); + sock->sk = NULL; + } return 0; } EXPORT_SYMBOL_GPL(af_alg_release); @@ -302,8 +304,6 @@ int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern) if (err) goto unlock; - sk2->sk_family = PF_ALG; - if (nokey || !ask->refcnt++) sock_hold(sk); ask->nokey_refcnt += nokey; @@ -380,7 +380,6 @@ static int alg_create(struct net *net, struct socket *sock, int protocol, sock->ops = &alg_proto_ops; sock_init_data(sock, sk); - sk->sk_family = PF_ALG; sk->sk_destruct = alg_sock_destruct; return 0; @@ -425,12 +424,12 @@ int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len) } EXPORT_SYMBOL_GPL(af_alg_make_sg); -void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new) +static void af_alg_link_sg(struct af_alg_sgl *sgl_prev, + struct af_alg_sgl *sgl_new) { sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1); sg_chain(sgl_prev->sg, sgl_prev->npages + 1, sgl_new->sg); } -EXPORT_SYMBOL_GPL(af_alg_link_sg); void af_alg_free_sg(struct af_alg_sgl *sgl) { @@ -441,7 +440,7 @@ void af_alg_free_sg(struct af_alg_sgl *sgl) } EXPORT_SYMBOL_GPL(af_alg_free_sg); -int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con) +static int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con) { struct cmsghdr *cmsg; @@ -480,7 +479,6 @@ int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con) return 0; } -EXPORT_SYMBOL_GPL(af_alg_cmsg_send); /** * af_alg_alloc_tsgl - allocate the TX SGL @@ -488,7 +486,7 @@ EXPORT_SYMBOL_GPL(af_alg_cmsg_send); * @sk socket of connection to user space * @return: 0 upon success, < 0 upon error */ -int af_alg_alloc_tsgl(struct sock *sk) +static int af_alg_alloc_tsgl(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); struct af_alg_ctx *ctx = ask->private; @@ -517,7 +515,6 @@ int af_alg_alloc_tsgl(struct sock *sk) return 0; } -EXPORT_SYMBOL_GPL(af_alg_alloc_tsgl); /** * aead_count_tsgl - Count number of TX SG entries @@ -532,17 +529,17 @@ EXPORT_SYMBOL_GPL(af_alg_alloc_tsgl); */ unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset) { - struct alg_sock *ask = alg_sk(sk); - struct af_alg_ctx *ctx = ask->private; - struct af_alg_tsgl *sgl, *tmp; + const struct alg_sock *ask = alg_sk(sk); + const struct af_alg_ctx *ctx = ask->private; + const struct af_alg_tsgl *sgl; unsigned int i; unsigned int sgl_count = 0; if (!bytes) return 0; - list_for_each_entry_safe(sgl, tmp, &ctx->tsgl_list, list) { - struct scatterlist *sg = sgl->sg; + list_for_each_entry(sgl, &ctx->tsgl_list, list) { + const struct scatterlist *sg = sgl->sg; for (i = 0; i < sgl->cur; i++) { size_t bytes_count; @@ -640,8 +637,7 @@ void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, } list_del(&sgl->list); - sock_kfree_s(sk, sgl, sizeof(*sgl) + sizeof(sgl->sg[0]) * - (MAX_SGL_ENTS + 1)); + sock_kfree_s(sk, sgl, struct_size(sgl, sg, MAX_SGL_ENTS + 1)); } if (!ctx->used) @@ -654,7 +650,7 @@ EXPORT_SYMBOL_GPL(af_alg_pull_tsgl); * * @areq Request holding the TX and RX SGL */ -void af_alg_free_areq_sgls(struct af_alg_async_req *areq) +static void af_alg_free_areq_sgls(struct af_alg_async_req *areq) { struct sock *sk = areq->sk; struct alg_sock *ask = alg_sk(sk); @@ -683,7 +679,6 @@ void af_alg_free_areq_sgls(struct af_alg_async_req *areq) sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl)); } } -EXPORT_SYMBOL_GPL(af_alg_free_areq_sgls); /** * af_alg_wait_for_wmem - wait for availability of writable memory @@ -692,7 +687,7 @@ EXPORT_SYMBOL_GPL(af_alg_free_areq_sgls); * @flags If MSG_DONTWAIT is set, then only report if function would sleep * @return 0 when writable memory is available, < 0 upon error */ -int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags) +static int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags) { DEFINE_WAIT_FUNC(wait, woken_wake_function); int err = -ERESTARTSYS; @@ -717,7 +712,6 @@ int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags) return err; } -EXPORT_SYMBOL_GPL(af_alg_wait_for_wmem); /** * af_alg_wmem_wakeup - wakeup caller when writable memory is available @@ -786,8 +780,7 @@ EXPORT_SYMBOL_GPL(af_alg_wait_for_data); * * @sk socket of connection to user space */ - -void af_alg_data_wakeup(struct sock *sk) +static void af_alg_data_wakeup(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); struct af_alg_ctx *ctx = ask->private; @@ -805,7 +798,6 @@ void af_alg_data_wakeup(struct sock *sk) sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); rcu_read_unlock(); } -EXPORT_SYMBOL_GPL(af_alg_data_wakeup); /** * af_alg_sendmsg - implementation of sendmsg system call handler diff --git a/crypto/ahash.c b/crypto/ahash.c index 5d320a811f75..81e2767e2164 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -86,17 +86,17 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk) int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) { unsigned int alignmask = walk->alignmask; - unsigned int nbytes = walk->entrylen; walk->data -= walk->offset; - if (nbytes && walk->offset & alignmask && !err) { - walk->offset = ALIGN(walk->offset, alignmask + 1); - nbytes = min(nbytes, - ((unsigned int)(PAGE_SIZE)) - walk->offset); - walk->entrylen -= nbytes; + if (walk->entrylen && (walk->offset & alignmask) && !err) { + unsigned int nbytes; + walk->offset = ALIGN(walk->offset, alignmask + 1); + nbytes = min(walk->entrylen, + (unsigned int)(PAGE_SIZE - walk->offset)); if (nbytes) { + walk->entrylen -= nbytes; walk->data += walk->offset; return nbytes; } @@ -116,7 +116,7 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) if (err) return err; - if (nbytes) { + if (walk->entrylen) { walk->offset = 0; walk->pg++; return hash_walk_next(walk); @@ -190,6 +190,21 @@ static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, return ret; } +static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) +{ + return -ENOSYS; +} + +static void ahash_set_needkey(struct crypto_ahash *tfm) +{ + const struct hash_alg_common *alg = crypto_hash_alg_common(tfm); + + if (tfm->setkey != ahash_nosetkey && + !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) + crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY); +} + int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { @@ -201,20 +216,16 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, else err = tfm->setkey(tfm, key, keylen); - if (err) + if (unlikely(err)) { + ahash_set_needkey(tfm); return err; + } crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); return 0; } EXPORT_SYMBOL_GPL(crypto_ahash_setkey); -static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, - unsigned int keylen) -{ - return -ENOSYS; -} - static inline unsigned int ahash_align_buffer_size(unsigned len, unsigned long mask) { @@ -489,8 +500,7 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) if (alg->setkey) { hash->setkey = alg->setkey; - if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) - crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY); + ahash_set_needkey(hash); } return 0; diff --git a/crypto/algapi.c b/crypto/algapi.c index 8b65ada33e5d..4c9c86b55738 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -494,6 +494,24 @@ out: } EXPORT_SYMBOL_GPL(crypto_register_template); +int crypto_register_templates(struct crypto_template *tmpls, int count) +{ + int i, err; + + for (i = 0; i < count; i++) { + err = crypto_register_template(&tmpls[i]); + if (err) + goto out; + } + return 0; + +out: + for (--i; i >= 0; --i) + crypto_unregister_template(&tmpls[i]); + return err; +} +EXPORT_SYMBOL_GPL(crypto_register_templates); + void crypto_unregister_template(struct crypto_template *tmpl) { struct crypto_instance *inst; @@ -523,6 +541,15 @@ void crypto_unregister_template(struct crypto_template *tmpl) } EXPORT_SYMBOL_GPL(crypto_unregister_template); +void crypto_unregister_templates(struct crypto_template *tmpls, int count) +{ + int i; + + for (i = count - 1; i >= 0; --i) + crypto_unregister_template(&tmpls[i]); +} +EXPORT_SYMBOL_GPL(crypto_unregister_templates); + static struct crypto_template *__crypto_lookup_template(const char *name) { struct crypto_template *q, *tmpl = NULL; @@ -608,6 +635,9 @@ int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, { int err = -EAGAIN; + if (WARN_ON_ONCE(inst == NULL)) + return -EINVAL; + spawn->inst = inst; spawn->mask = mask; @@ -845,8 +875,8 @@ int crypto_inst_setname(struct crypto_instance *inst, const char *name, } EXPORT_SYMBOL_GPL(crypto_inst_setname); -void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg, - unsigned int head) +void *crypto_alloc_instance(const char *name, struct crypto_alg *alg, + unsigned int head) { struct crypto_instance *inst; char *p; @@ -869,35 +899,6 @@ err_free_inst: kfree(p); return ERR_PTR(err); } -EXPORT_SYMBOL_GPL(crypto_alloc_instance2); - -struct crypto_instance *crypto_alloc_instance(const char *name, - struct crypto_alg *alg) -{ - struct crypto_instance *inst; - struct crypto_spawn *spawn; - int err; - - inst = crypto_alloc_instance2(name, alg, 0); - if (IS_ERR(inst)) - goto out; - - spawn = crypto_instance_ctx(inst); - err = crypto_init_spawn(spawn, alg, inst, - CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); - - if (err) - goto err_free_inst; - - return inst; - -err_free_inst: - kfree(inst); - inst = ERR_PTR(err); - -out: - return inst; -} EXPORT_SYMBOL_GPL(crypto_alloc_instance); void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen) diff --git a/crypto/arc4.c b/crypto/arc4.c index f1a81925558f..6c93342e3405 100644 --- a/crypto/arc4.c +++ b/crypto/arc4.c @@ -12,14 +12,11 @@ * */ -#include -#include -#include #include - -#define ARC4_MIN_KEY_SIZE 1 -#define ARC4_MAX_KEY_SIZE 256 -#define ARC4_BLOCK_SIZE 1 +#include +#include +#include +#include struct arc4_ctx { u32 S[256]; @@ -50,6 +47,12 @@ static int arc4_set_key(struct crypto_tfm *tfm, const u8 *in_key, return 0; } +static int arc4_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *in_key, + unsigned int key_len) +{ + return arc4_set_key(&tfm->base, in_key, key_len); +} + static void arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, unsigned int len) { @@ -92,30 +95,25 @@ static void arc4_crypt_one(struct crypto_tfm *tfm, u8 *out, const u8 *in) arc4_crypt(crypto_tfm_ctx(tfm), out, in, 1); } -static int ecb_arc4_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) +static int ecb_arc4_crypt(struct skcipher_request *req) { - struct arc4_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct arc4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; int err; - blkcipher_walk_init(&walk, dst, src, nbytes); - - err = blkcipher_walk_virt(desc, &walk); + err = skcipher_walk_virt(&walk, req, false); while (walk.nbytes > 0) { - u8 *wsrc = walk.src.virt.addr; - u8 *wdst = walk.dst.virt.addr; - - arc4_crypt(ctx, wdst, wsrc, walk.nbytes); - - err = blkcipher_walk_done(desc, &walk, 0); + arc4_crypt(ctx, walk.dst.virt.addr, walk.src.virt.addr, + walk.nbytes); + err = skcipher_walk_done(&walk, 0); } return err; } -static struct crypto_alg arc4_algs[2] = { { +static struct crypto_alg arc4_cipher = { .cra_name = "arc4", .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = ARC4_BLOCK_SIZE, @@ -130,34 +128,39 @@ static struct crypto_alg arc4_algs[2] = { { .cia_decrypt = arc4_crypt_one, }, }, -}, { - .cra_name = "ecb(arc4)", - .cra_priority = 100, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = ARC4_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct arc4_ctx), - .cra_alignmask = 0, - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .min_keysize = ARC4_MIN_KEY_SIZE, - .max_keysize = ARC4_MAX_KEY_SIZE, - .setkey = arc4_set_key, - .encrypt = ecb_arc4_crypt, - .decrypt = ecb_arc4_crypt, - }, - }, -} }; +}; + +static struct skcipher_alg arc4_skcipher = { + .base.cra_name = "ecb(arc4)", + .base.cra_priority = 100, + .base.cra_blocksize = ARC4_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct arc4_ctx), + .base.cra_module = THIS_MODULE, + .min_keysize = ARC4_MIN_KEY_SIZE, + .max_keysize = ARC4_MAX_KEY_SIZE, + .setkey = arc4_set_key_skcipher, + .encrypt = ecb_arc4_crypt, + .decrypt = ecb_arc4_crypt, +}; static int __init arc4_init(void) { - return crypto_register_algs(arc4_algs, ARRAY_SIZE(arc4_algs)); + int err; + + err = crypto_register_alg(&arc4_cipher); + if (err) + return err; + + err = crypto_register_skcipher(&arc4_skcipher); + if (err) + crypto_unregister_alg(&arc4_cipher); + return err; } static void __exit arc4_exit(void) { - crypto_unregister_algs(arc4_algs, ARRAY_SIZE(arc4_algs)); + crypto_unregister_alg(&arc4_cipher); + crypto_unregister_skcipher(&arc4_skcipher); } module_init(arc4_init); diff --git a/crypto/cbc.c b/crypto/cbc.c index dd5f332fd566..d12efaac9230 100644 --- a/crypto/cbc.c +++ b/crypto/cbc.c @@ -18,34 +18,11 @@ #include #include #include -#include - -struct crypto_cbc_ctx { - struct crypto_cipher *child; -}; - -static int crypto_cbc_setkey(struct crypto_skcipher *parent, const u8 *key, - unsigned int keylen) -{ - struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(parent); - struct crypto_cipher *child = ctx->child; - int err; - - crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) & - CRYPTO_TFM_REQ_MASK); - err = crypto_cipher_setkey(child, key, keylen); - crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) & - CRYPTO_TFM_RES_MASK); - return err; -} static inline void crypto_cbc_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst) { - struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); - - crypto_cipher_encrypt_one(ctx->child, dst, src); + crypto_cipher_encrypt_one(skcipher_cipher_simple(tfm), dst, src); } static int crypto_cbc_encrypt(struct skcipher_request *req) @@ -56,9 +33,7 @@ static int crypto_cbc_encrypt(struct skcipher_request *req) static inline void crypto_cbc_decrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst) { - struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); - - crypto_cipher_decrypt_one(ctx->child, dst, src); + crypto_cipher_decrypt_one(skcipher_cipher_simple(tfm), dst, src); } static int crypto_cbc_decrypt(struct skcipher_request *req) @@ -78,113 +53,33 @@ static int crypto_cbc_decrypt(struct skcipher_request *req) return err; } -static int crypto_cbc_init_tfm(struct crypto_skcipher *tfm) -{ - struct skcipher_instance *inst = skcipher_alg_instance(tfm); - struct crypto_spawn *spawn = skcipher_instance_ctx(inst); - struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_cipher *cipher; - - cipher = crypto_spawn_cipher(spawn); - if (IS_ERR(cipher)) - return PTR_ERR(cipher); - - ctx->child = cipher; - return 0; -} - -static void crypto_cbc_exit_tfm(struct crypto_skcipher *tfm) -{ - struct crypto_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); - - crypto_free_cipher(ctx->child); -} - -static void crypto_cbc_free(struct skcipher_instance *inst) -{ - crypto_drop_skcipher(skcipher_instance_ctx(inst)); - kfree(inst); -} - static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb) { struct skcipher_instance *inst; - struct crypto_attr_type *algt; - struct crypto_spawn *spawn; struct crypto_alg *alg; - u32 mask; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER); - if (err) - return err; - - inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); - if (!inst) - return -ENOMEM; - - algt = crypto_get_attr_type(tb); - err = PTR_ERR(algt); - if (IS_ERR(algt)) - goto err_free_inst; - - mask = CRYPTO_ALG_TYPE_MASK | - crypto_requires_off(algt->type, algt->mask, - CRYPTO_ALG_NEED_FALLBACK); - - alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask); - err = PTR_ERR(alg); - if (IS_ERR(alg)) - goto err_free_inst; - - spawn = skcipher_instance_ctx(inst); - err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst), - CRYPTO_ALG_TYPE_MASK); - if (err) - goto err_put_alg; - - err = crypto_inst_setname(skcipher_crypto_instance(inst), "cbc", alg); - if (err) - goto err_drop_spawn; + inst = skcipher_alloc_instance_simple(tmpl, tb, &alg); + if (IS_ERR(inst)) + return PTR_ERR(inst); err = -EINVAL; if (!is_power_of_2(alg->cra_blocksize)) - goto err_drop_spawn; - - inst->alg.base.cra_priority = alg->cra_priority; - inst->alg.base.cra_blocksize = alg->cra_blocksize; - inst->alg.base.cra_alignmask = alg->cra_alignmask; - - inst->alg.ivsize = alg->cra_blocksize; - inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize; - inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize; - - inst->alg.base.cra_ctxsize = sizeof(struct crypto_cbc_ctx); - - inst->alg.init = crypto_cbc_init_tfm; - inst->alg.exit = crypto_cbc_exit_tfm; + goto out_free_inst; - inst->alg.setkey = crypto_cbc_setkey; inst->alg.encrypt = crypto_cbc_encrypt; inst->alg.decrypt = crypto_cbc_decrypt; - inst->free = crypto_cbc_free; - err = skcipher_register_instance(tmpl, inst); if (err) - goto err_drop_spawn; - crypto_mod_put(alg); + goto out_free_inst; + goto out_put_alg; -out: - return err; - -err_drop_spawn: - crypto_drop_spawn(spawn); -err_put_alg: +out_free_inst: + inst->free(inst); +out_put_alg: crypto_mod_put(alg); -err_free_inst: - kfree(inst); - goto out; + return err; } static struct crypto_template crypto_cbc_tmpl = { @@ -207,5 +102,5 @@ module_init(crypto_cbc_module_init); module_exit(crypto_cbc_module_exit); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("CBC block cipher algorithm"); +MODULE_DESCRIPTION("CBC block cipher mode of operation"); MODULE_ALIAS_CRYPTO("cbc"); diff --git a/crypto/ccm.c b/crypto/ccm.c index b242fd0d3262..50df8f001c1c 100644 --- a/crypto/ccm.c +++ b/crypto/ccm.c @@ -589,12 +589,6 @@ static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb) mac_name); } -static struct crypto_template crypto_ccm_tmpl = { - .name = "ccm", - .create = crypto_ccm_create, - .module = THIS_MODULE, -}; - static int crypto_ccm_base_create(struct crypto_template *tmpl, struct rtattr **tb) { @@ -618,12 +612,6 @@ static int crypto_ccm_base_create(struct crypto_template *tmpl, cipher_name); } -static struct crypto_template crypto_ccm_base_tmpl = { - .name = "ccm_base", - .create = crypto_ccm_base_create, - .module = THIS_MODULE, -}; - static int crypto_rfc4309_setkey(struct crypto_aead *parent, const u8 *key, unsigned int keylen) { @@ -854,12 +842,6 @@ out_free_inst: goto out; } -static struct crypto_template crypto_rfc4309_tmpl = { - .name = "rfc4309", - .create = crypto_rfc4309_create, - .module = THIS_MODULE, -}; - static int crypto_cbcmac_digest_setkey(struct crypto_shash *parent, const u8 *inkey, unsigned int keylen) { @@ -999,51 +981,37 @@ out_put_alg: return err; } -static struct crypto_template crypto_cbcmac_tmpl = { - .name = "cbcmac", - .create = cbcmac_create, - .free = shash_free_instance, - .module = THIS_MODULE, +static struct crypto_template crypto_ccm_tmpls[] = { + { + .name = "cbcmac", + .create = cbcmac_create, + .free = shash_free_instance, + .module = THIS_MODULE, + }, { + .name = "ccm_base", + .create = crypto_ccm_base_create, + .module = THIS_MODULE, + }, { + .name = "ccm", + .create = crypto_ccm_create, + .module = THIS_MODULE, + }, { + .name = "rfc4309", + .create = crypto_rfc4309_create, + .module = THIS_MODULE, + }, }; static int __init crypto_ccm_module_init(void) { - int err; - - err = crypto_register_template(&crypto_cbcmac_tmpl); - if (err) - goto out; - - err = crypto_register_template(&crypto_ccm_base_tmpl); - if (err) - goto out_undo_cbcmac; - - err = crypto_register_template(&crypto_ccm_tmpl); - if (err) - goto out_undo_base; - - err = crypto_register_template(&crypto_rfc4309_tmpl); - if (err) - goto out_undo_ccm; - -out: - return err; - -out_undo_ccm: - crypto_unregister_template(&crypto_ccm_tmpl); -out_undo_base: - crypto_unregister_template(&crypto_ccm_base_tmpl); -out_undo_cbcmac: - crypto_register_template(&crypto_cbcmac_tmpl); - goto out; + return crypto_register_templates(crypto_ccm_tmpls, + ARRAY_SIZE(crypto_ccm_tmpls)); } static void __exit crypto_ccm_module_exit(void) { - crypto_unregister_template(&crypto_rfc4309_tmpl); - crypto_unregister_template(&crypto_ccm_tmpl); - crypto_unregister_template(&crypto_ccm_base_tmpl); - crypto_unregister_template(&crypto_cbcmac_tmpl); + crypto_unregister_templates(crypto_ccm_tmpls, + ARRAY_SIZE(crypto_ccm_tmpls)); } module_init(crypto_ccm_module_init); diff --git a/crypto/cfb.c b/crypto/cfb.c index e81e45673498..03ac847f6d6a 100644 --- a/crypto/cfb.c +++ b/crypto/cfb.c @@ -25,28 +25,17 @@ #include #include #include -#include #include -#include - -struct crypto_cfb_ctx { - struct crypto_cipher *child; -}; static unsigned int crypto_cfb_bsize(struct crypto_skcipher *tfm) { - struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_cipher *child = ctx->child; - - return crypto_cipher_blocksize(child); + return crypto_cipher_blocksize(skcipher_cipher_simple(tfm)); } static void crypto_cfb_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst) { - struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm); - - crypto_cipher_encrypt_one(ctx->child, dst, src); + crypto_cipher_encrypt_one(skcipher_cipher_simple(tfm), dst, src); } /* final encrypt and decrypt is the same */ @@ -77,12 +66,14 @@ static int crypto_cfb_encrypt_segment(struct skcipher_walk *walk, do { crypto_cfb_encrypt_one(tfm, iv, dst); crypto_xor(dst, src, bsize); - memcpy(iv, dst, bsize); + iv = dst; src += bsize; dst += bsize; } while ((nbytes -= bsize) >= bsize); + memcpy(walk->iv, iv, bsize); + return nbytes; } @@ -162,7 +153,7 @@ static int crypto_cfb_decrypt_inplace(struct skcipher_walk *walk, const unsigned int bsize = crypto_cfb_bsize(tfm); unsigned int nbytes = walk->nbytes; u8 *src = walk->src.virt.addr; - u8 *iv = walk->iv; + u8 * const iv = walk->iv; u8 tmp[MAX_CIPHER_BLOCKSIZE]; do { @@ -172,8 +163,6 @@ static int crypto_cfb_decrypt_inplace(struct skcipher_walk *walk, src += bsize; } while ((nbytes -= bsize) >= bsize); - memcpy(walk->iv, iv, bsize); - return nbytes; } @@ -186,22 +175,6 @@ static int crypto_cfb_decrypt_blocks(struct skcipher_walk *walk, return crypto_cfb_decrypt_segment(walk, tfm); } -static int crypto_cfb_setkey(struct crypto_skcipher *parent, const u8 *key, - unsigned int keylen) -{ - struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(parent); - struct crypto_cipher *child = ctx->child; - int err; - - crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) & - CRYPTO_TFM_REQ_MASK); - err = crypto_cipher_setkey(child, key, keylen); - crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) & - CRYPTO_TFM_RES_MASK); - return err; -} - static int crypto_cfb_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); @@ -224,110 +197,34 @@ static int crypto_cfb_decrypt(struct skcipher_request *req) return err; } -static int crypto_cfb_init_tfm(struct crypto_skcipher *tfm) -{ - struct skcipher_instance *inst = skcipher_alg_instance(tfm); - struct crypto_spawn *spawn = skcipher_instance_ctx(inst); - struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_cipher *cipher; - - cipher = crypto_spawn_cipher(spawn); - if (IS_ERR(cipher)) - return PTR_ERR(cipher); - - ctx->child = cipher; - return 0; -} - -static void crypto_cfb_exit_tfm(struct crypto_skcipher *tfm) -{ - struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm); - - crypto_free_cipher(ctx->child); -} - -static void crypto_cfb_free(struct skcipher_instance *inst) -{ - crypto_drop_skcipher(skcipher_instance_ctx(inst)); - kfree(inst); -} - static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb) { struct skcipher_instance *inst; - struct crypto_attr_type *algt; - struct crypto_spawn *spawn; struct crypto_alg *alg; - u32 mask; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER); - if (err) - return err; + inst = skcipher_alloc_instance_simple(tmpl, tb, &alg); + if (IS_ERR(inst)) + return PTR_ERR(inst); - inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); - if (!inst) - return -ENOMEM; - - algt = crypto_get_attr_type(tb); - err = PTR_ERR(algt); - if (IS_ERR(algt)) - goto err_free_inst; - - mask = CRYPTO_ALG_TYPE_MASK | - crypto_requires_off(algt->type, algt->mask, - CRYPTO_ALG_NEED_FALLBACK); - - alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask); - err = PTR_ERR(alg); - if (IS_ERR(alg)) - goto err_free_inst; - - spawn = skcipher_instance_ctx(inst); - err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst), - CRYPTO_ALG_TYPE_MASK); - if (err) - goto err_put_alg; - - err = crypto_inst_setname(skcipher_crypto_instance(inst), "cfb", alg); - if (err) - goto err_drop_spawn; - - inst->alg.base.cra_priority = alg->cra_priority; - /* we're a stream cipher independend of the crypto cra_blocksize */ + /* CFB mode is a stream cipher. */ inst->alg.base.cra_blocksize = 1; - inst->alg.base.cra_alignmask = alg->cra_alignmask; - - inst->alg.ivsize = alg->cra_blocksize; - inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize; - inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize; - - inst->alg.base.cra_ctxsize = sizeof(struct crypto_cfb_ctx); - inst->alg.init = crypto_cfb_init_tfm; - inst->alg.exit = crypto_cfb_exit_tfm; + /* + * To simplify the implementation, configure the skcipher walk to only + * give a partial block at the very end, never earlier. + */ + inst->alg.chunksize = alg->cra_blocksize; - inst->alg.setkey = crypto_cfb_setkey; inst->alg.encrypt = crypto_cfb_encrypt; inst->alg.decrypt = crypto_cfb_decrypt; - inst->free = crypto_cfb_free; - err = skcipher_register_instance(tmpl, inst); if (err) - goto err_drop_spawn; - crypto_mod_put(alg); + inst->free(inst); -out: - return err; - -err_drop_spawn: - crypto_drop_spawn(spawn); -err_put_alg: crypto_mod_put(alg); -err_free_inst: - kfree(inst); - goto out; + return err; } static struct crypto_template crypto_cfb_tmpl = { @@ -350,5 +247,5 @@ module_init(crypto_cfb_module_init); module_exit(crypto_cfb_module_exit); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("CFB block cipher algorithm"); +MODULE_DESCRIPTION("CFB block cipher mode of operation"); MODULE_ALIAS_CRYPTO("cfb"); diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c index fef11446ab1b..ed2e12e26dd8 100644 --- a/crypto/chacha20poly1305.c +++ b/crypto/chacha20poly1305.c @@ -701,37 +701,28 @@ static int rfc7539esp_create(struct crypto_template *tmpl, struct rtattr **tb) return chachapoly_create(tmpl, tb, "rfc7539esp", 8); } -static struct crypto_template rfc7539_tmpl = { - .name = "rfc7539", - .create = rfc7539_create, - .module = THIS_MODULE, -}; - -static struct crypto_template rfc7539esp_tmpl = { - .name = "rfc7539esp", - .create = rfc7539esp_create, - .module = THIS_MODULE, +static struct crypto_template rfc7539_tmpls[] = { + { + .name = "rfc7539", + .create = rfc7539_create, + .module = THIS_MODULE, + }, { + .name = "rfc7539esp", + .create = rfc7539esp_create, + .module = THIS_MODULE, + }, }; static int __init chacha20poly1305_module_init(void) { - int err; - - err = crypto_register_template(&rfc7539_tmpl); - if (err) - return err; - - err = crypto_register_template(&rfc7539esp_tmpl); - if (err) - crypto_unregister_template(&rfc7539_tmpl); - - return err; + return crypto_register_templates(rfc7539_tmpls, + ARRAY_SIZE(rfc7539_tmpls)); } static void __exit chacha20poly1305_module_exit(void) { - crypto_unregister_template(&rfc7539esp_tmpl); - crypto_unregister_template(&rfc7539_tmpl); + crypto_unregister_templates(rfc7539_tmpls, + ARRAY_SIZE(rfc7539_tmpls)); } module_init(chacha20poly1305_module_init); diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c index 0bae59922a80..01630a9c7e01 100644 --- a/crypto/crypto_null.c +++ b/crypto/crypto_null.c @@ -65,6 +65,10 @@ static int null_hash_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { return 0; } +static int null_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ return 0; } + static int null_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { return 0; } @@ -74,21 +78,18 @@ static void null_crypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) memcpy(dst, src, NULL_BLOCK_SIZE); } -static int skcipher_null_crypt(struct blkcipher_desc *desc, - struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) +static int null_skcipher_crypt(struct skcipher_request *req) { - struct blkcipher_walk walk; + struct skcipher_walk walk; int err; - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); + err = skcipher_walk_virt(&walk, req, false); while (walk.nbytes) { if (walk.src.virt.addr != walk.dst.virt.addr) memcpy(walk.dst.virt.addr, walk.src.virt.addr, walk.nbytes); - err = blkcipher_walk_done(desc, &walk, 0); + err = skcipher_walk_done(&walk, 0); } return err; @@ -109,7 +110,22 @@ static struct shash_alg digest_null = { } }; -static struct crypto_alg null_algs[3] = { { +static struct skcipher_alg skcipher_null = { + .base.cra_name = "ecb(cipher_null)", + .base.cra_driver_name = "ecb-cipher_null", + .base.cra_priority = 100, + .base.cra_blocksize = NULL_BLOCK_SIZE, + .base.cra_ctxsize = 0, + .base.cra_module = THIS_MODULE, + .min_keysize = NULL_KEY_SIZE, + .max_keysize = NULL_KEY_SIZE, + .ivsize = NULL_IV_SIZE, + .setkey = null_skcipher_setkey, + .encrypt = null_skcipher_crypt, + .decrypt = null_skcipher_crypt, +}; + +static struct crypto_alg null_algs[] = { { .cra_name = "cipher_null", .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = NULL_BLOCK_SIZE, @@ -121,22 +137,6 @@ static struct crypto_alg null_algs[3] = { { .cia_setkey = null_setkey, .cia_encrypt = null_crypt, .cia_decrypt = null_crypt } } -}, { - .cra_name = "ecb(cipher_null)", - .cra_driver_name = "ecb-cipher_null", - .cra_priority = 100, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = NULL_BLOCK_SIZE, - .cra_type = &crypto_blkcipher_type, - .cra_ctxsize = 0, - .cra_module = THIS_MODULE, - .cra_u = { .blkcipher = { - .min_keysize = NULL_KEY_SIZE, - .max_keysize = NULL_KEY_SIZE, - .ivsize = NULL_IV_SIZE, - .setkey = null_setkey, - .encrypt = skcipher_null_crypt, - .decrypt = skcipher_null_crypt } } }, { .cra_name = "compress_null", .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, @@ -199,8 +199,14 @@ static int __init crypto_null_mod_init(void) if (ret < 0) goto out_unregister_algs; + ret = crypto_register_skcipher(&skcipher_null); + if (ret < 0) + goto out_unregister_shash; + return 0; +out_unregister_shash: + crypto_unregister_shash(&digest_null); out_unregister_algs: crypto_unregister_algs(null_algs, ARRAY_SIZE(null_algs)); out: @@ -209,8 +215,9 @@ out: static void __exit crypto_null_mod_fini(void) { - crypto_unregister_shash(&digest_null); crypto_unregister_algs(null_algs, ARRAY_SIZE(null_algs)); + crypto_unregister_shash(&digest_null); + crypto_unregister_skcipher(&skcipher_null); } module_init(crypto_null_mod_init); diff --git a/crypto/crypto_user_stat.c b/crypto/crypto_user_stat.c index 3e9a53233d80..a03f326a63d3 100644 --- a/crypto/crypto_user_stat.c +++ b/crypto/crypto_user_stat.c @@ -20,10 +20,6 @@ #define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x)) -static DEFINE_MUTEX(crypto_cfg_mutex); - -extern struct sock *crypto_nlsk; - struct crypto_dump_info { struct sk_buff *in_skb; struct sk_buff *out_skb; diff --git a/crypto/ctr.c b/crypto/ctr.c index 30f3946efc6d..ec8f8b67473a 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c @@ -17,14 +17,8 @@ #include #include #include -#include -#include #include -struct crypto_ctr_ctx { - struct crypto_cipher *child; -}; - struct crypto_rfc3686_ctx { struct crypto_skcipher *child; u8 nonce[CTR_RFC3686_NONCE_SIZE]; @@ -35,24 +29,7 @@ struct crypto_rfc3686_req_ctx { struct skcipher_request subreq CRYPTO_MINALIGN_ATTR; }; -static int crypto_ctr_setkey(struct crypto_tfm *parent, const u8 *key, - unsigned int keylen) -{ - struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(parent); - struct crypto_cipher *child = ctx->child; - int err; - - crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & - CRYPTO_TFM_REQ_MASK); - err = crypto_cipher_setkey(child, key, keylen); - crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & - CRYPTO_TFM_RES_MASK); - - return err; -} - -static void crypto_ctr_crypt_final(struct blkcipher_walk *walk, +static void crypto_ctr_crypt_final(struct skcipher_walk *walk, struct crypto_cipher *tfm) { unsigned int bsize = crypto_cipher_blocksize(tfm); @@ -70,7 +47,7 @@ static void crypto_ctr_crypt_final(struct blkcipher_walk *walk, crypto_inc(ctrblk, bsize); } -static int crypto_ctr_crypt_segment(struct blkcipher_walk *walk, +static int crypto_ctr_crypt_segment(struct skcipher_walk *walk, struct crypto_cipher *tfm) { void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = @@ -96,7 +73,7 @@ static int crypto_ctr_crypt_segment(struct blkcipher_walk *walk, return nbytes; } -static int crypto_ctr_crypt_inplace(struct blkcipher_walk *walk, +static int crypto_ctr_crypt_inplace(struct skcipher_walk *walk, struct crypto_cipher *tfm) { void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = @@ -123,138 +100,77 @@ static int crypto_ctr_crypt_inplace(struct blkcipher_walk *walk, return nbytes; } -static int crypto_ctr_crypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int crypto_ctr_crypt(struct skcipher_request *req) { - struct blkcipher_walk walk; - struct crypto_blkcipher *tfm = desc->tfm; - struct crypto_ctr_ctx *ctx = crypto_blkcipher_ctx(tfm); - struct crypto_cipher *child = ctx->child; - unsigned int bsize = crypto_cipher_blocksize(child); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); + const unsigned int bsize = crypto_cipher_blocksize(cipher); + struct skcipher_walk walk; + unsigned int nbytes; int err; - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt_block(desc, &walk, bsize); + err = skcipher_walk_virt(&walk, req, false); while (walk.nbytes >= bsize) { if (walk.src.virt.addr == walk.dst.virt.addr) - nbytes = crypto_ctr_crypt_inplace(&walk, child); + nbytes = crypto_ctr_crypt_inplace(&walk, cipher); else - nbytes = crypto_ctr_crypt_segment(&walk, child); + nbytes = crypto_ctr_crypt_segment(&walk, cipher); - err = blkcipher_walk_done(desc, &walk, nbytes); + err = skcipher_walk_done(&walk, nbytes); } if (walk.nbytes) { - crypto_ctr_crypt_final(&walk, child); - err = blkcipher_walk_done(desc, &walk, 0); + crypto_ctr_crypt_final(&walk, cipher); + err = skcipher_walk_done(&walk, 0); } return err; } -static int crypto_ctr_init_tfm(struct crypto_tfm *tfm) -{ - struct crypto_instance *inst = (void *)tfm->__crt_alg; - struct crypto_spawn *spawn = crypto_instance_ctx(inst); - struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(tfm); - struct crypto_cipher *cipher; - - cipher = crypto_spawn_cipher(spawn); - if (IS_ERR(cipher)) - return PTR_ERR(cipher); - - ctx->child = cipher; - - return 0; -} - -static void crypto_ctr_exit_tfm(struct crypto_tfm *tfm) -{ - struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(tfm); - - crypto_free_cipher(ctx->child); -} - -static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb) +static int crypto_ctr_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_instance *inst; - struct crypto_attr_type *algt; + struct skcipher_instance *inst; struct crypto_alg *alg; - u32 mask; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); - if (err) - return ERR_PTR(err); - - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return ERR_CAST(algt); - - mask = CRYPTO_ALG_TYPE_MASK | - crypto_requires_off(algt->type, algt->mask, - CRYPTO_ALG_NEED_FALLBACK); - - alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_CIPHER, mask); - if (IS_ERR(alg)) - return ERR_CAST(alg); + inst = skcipher_alloc_instance_simple(tmpl, tb, &alg); + if (IS_ERR(inst)) + return PTR_ERR(inst); /* Block size must be >= 4 bytes. */ err = -EINVAL; if (alg->cra_blocksize < 4) - goto out_put_alg; + goto out_free_inst; /* If this is false we'd fail the alignment of crypto_inc. */ if (alg->cra_blocksize % 4) - goto out_put_alg; - - inst = crypto_alloc_instance("ctr", alg); - if (IS_ERR(inst)) - goto out; - - inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; - inst->alg.cra_priority = alg->cra_priority; - inst->alg.cra_blocksize = 1; - inst->alg.cra_alignmask = alg->cra_alignmask; - inst->alg.cra_type = &crypto_blkcipher_type; - - inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; - inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize; - inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize; + goto out_free_inst; - inst->alg.cra_ctxsize = sizeof(struct crypto_ctr_ctx); + /* CTR mode is a stream cipher. */ + inst->alg.base.cra_blocksize = 1; - inst->alg.cra_init = crypto_ctr_init_tfm; - inst->alg.cra_exit = crypto_ctr_exit_tfm; + /* + * To simplify the implementation, configure the skcipher walk to only + * give a partial block at the very end, never earlier. + */ + inst->alg.chunksize = alg->cra_blocksize; - inst->alg.cra_blkcipher.setkey = crypto_ctr_setkey; - inst->alg.cra_blkcipher.encrypt = crypto_ctr_crypt; - inst->alg.cra_blkcipher.decrypt = crypto_ctr_crypt; + inst->alg.encrypt = crypto_ctr_crypt; + inst->alg.decrypt = crypto_ctr_crypt; -out: - crypto_mod_put(alg); - return inst; + err = skcipher_register_instance(tmpl, inst); + if (err) + goto out_free_inst; + goto out_put_alg; +out_free_inst: + inst->free(inst); out_put_alg: - inst = ERR_PTR(err); - goto out; -} - -static void crypto_ctr_free(struct crypto_instance *inst) -{ - crypto_drop_spawn(crypto_instance_ctx(inst)); - kfree(inst); + crypto_mod_put(alg); + return err; } -static struct crypto_template crypto_ctr_tmpl = { - .name = "ctr", - .alloc = crypto_ctr_alloc, - .free = crypto_ctr_free, - .module = THIS_MODULE, -}; - static int crypto_rfc3686_setkey(struct crypto_skcipher *parent, const u8 *key, unsigned int keylen) { @@ -444,42 +360,34 @@ err_free_inst: goto out; } -static struct crypto_template crypto_rfc3686_tmpl = { - .name = "rfc3686", - .create = crypto_rfc3686_create, - .module = THIS_MODULE, +static struct crypto_template crypto_ctr_tmpls[] = { + { + .name = "ctr", + .create = crypto_ctr_create, + .module = THIS_MODULE, + }, { + .name = "rfc3686", + .create = crypto_rfc3686_create, + .module = THIS_MODULE, + }, }; static int __init crypto_ctr_module_init(void) { - int err; - - err = crypto_register_template(&crypto_ctr_tmpl); - if (err) - goto out; - - err = crypto_register_template(&crypto_rfc3686_tmpl); - if (err) - goto out_drop_ctr; - -out: - return err; - -out_drop_ctr: - crypto_unregister_template(&crypto_ctr_tmpl); - goto out; + return crypto_register_templates(crypto_ctr_tmpls, + ARRAY_SIZE(crypto_ctr_tmpls)); } static void __exit crypto_ctr_module_exit(void) { - crypto_unregister_template(&crypto_rfc3686_tmpl); - crypto_unregister_template(&crypto_ctr_tmpl); + crypto_unregister_templates(crypto_ctr_tmpls, + ARRAY_SIZE(crypto_ctr_tmpls)); } module_init(crypto_ctr_module_init); module_exit(crypto_ctr_module_exit); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("CTR Counter block mode"); +MODULE_DESCRIPTION("CTR block cipher mode of operation"); MODULE_ALIAS_CRYPTO("rfc3686"); MODULE_ALIAS_CRYPTO("ctr"); diff --git a/crypto/des_generic.c b/crypto/des_generic.c index a71720544d11..1e6621665dd9 100644 --- a/crypto/des_generic.c +++ b/crypto/des_generic.c @@ -789,7 +789,7 @@ static int des_setkey(struct crypto_tfm *tfm, const u8 *key, /* Expand to tmp */ ret = des_ekey(tmp, key); - if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { *flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } @@ -866,7 +866,7 @@ int __des3_ede_setkey(u32 *expkey, u32 *flags, const u8 *key, if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || !((K[2] ^ K[4]) | (K[3] ^ K[5]))) && - (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { *flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } diff --git a/crypto/ecb.c b/crypto/ecb.c index 12011aff0971..0732715c8d91 100644 --- a/crypto/ecb.c +++ b/crypto/ecb.c @@ -11,162 +11,83 @@ */ #include +#include #include #include #include #include -#include -#include -struct crypto_ecb_ctx { - struct crypto_cipher *child; -}; - -static int crypto_ecb_setkey(struct crypto_tfm *parent, const u8 *key, - unsigned int keylen) -{ - struct crypto_ecb_ctx *ctx = crypto_tfm_ctx(parent); - struct crypto_cipher *child = ctx->child; - int err; - - crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & - CRYPTO_TFM_REQ_MASK); - err = crypto_cipher_setkey(child, key, keylen); - crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & - CRYPTO_TFM_RES_MASK); - return err; -} - -static int crypto_ecb_crypt(struct blkcipher_desc *desc, - struct blkcipher_walk *walk, - struct crypto_cipher *tfm, +static int crypto_ecb_crypt(struct skcipher_request *req, + struct crypto_cipher *cipher, void (*fn)(struct crypto_tfm *, u8 *, const u8 *)) { - int bsize = crypto_cipher_blocksize(tfm); + const unsigned int bsize = crypto_cipher_blocksize(cipher); + struct skcipher_walk walk; unsigned int nbytes; int err; - err = blkcipher_walk_virt(desc, walk); + err = skcipher_walk_virt(&walk, req, false); - while ((nbytes = walk->nbytes)) { - u8 *wsrc = walk->src.virt.addr; - u8 *wdst = walk->dst.virt.addr; + while ((nbytes = walk.nbytes) != 0) { + const u8 *src = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; do { - fn(crypto_cipher_tfm(tfm), wdst, wsrc); + fn(crypto_cipher_tfm(cipher), dst, src); - wsrc += bsize; - wdst += bsize; + src += bsize; + dst += bsize; } while ((nbytes -= bsize) >= bsize); - err = blkcipher_walk_done(desc, walk, nbytes); + err = skcipher_walk_done(&walk, nbytes); } return err; } -static int crypto_ecb_encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int crypto_ecb_encrypt(struct skcipher_request *req) { - struct blkcipher_walk walk; - struct crypto_blkcipher *tfm = desc->tfm; - struct crypto_ecb_ctx *ctx = crypto_blkcipher_ctx(tfm); - struct crypto_cipher *child = ctx->child; - - blkcipher_walk_init(&walk, dst, src, nbytes); - return crypto_ecb_crypt(desc, &walk, child, - crypto_cipher_alg(child)->cia_encrypt); -} + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); -static int crypto_ecb_decrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) -{ - struct blkcipher_walk walk; - struct crypto_blkcipher *tfm = desc->tfm; - struct crypto_ecb_ctx *ctx = crypto_blkcipher_ctx(tfm); - struct crypto_cipher *child = ctx->child; - - blkcipher_walk_init(&walk, dst, src, nbytes); - return crypto_ecb_crypt(desc, &walk, child, - crypto_cipher_alg(child)->cia_decrypt); + return crypto_ecb_crypt(req, cipher, + crypto_cipher_alg(cipher)->cia_encrypt); } -static int crypto_ecb_init_tfm(struct crypto_tfm *tfm) +static int crypto_ecb_decrypt(struct skcipher_request *req) { - struct crypto_instance *inst = (void *)tfm->__crt_alg; - struct crypto_spawn *spawn = crypto_instance_ctx(inst); - struct crypto_ecb_ctx *ctx = crypto_tfm_ctx(tfm); - struct crypto_cipher *cipher; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); - cipher = crypto_spawn_cipher(spawn); - if (IS_ERR(cipher)) - return PTR_ERR(cipher); - - ctx->child = cipher; - return 0; + return crypto_ecb_crypt(req, cipher, + crypto_cipher_alg(cipher)->cia_decrypt); } -static void crypto_ecb_exit_tfm(struct crypto_tfm *tfm) +static int crypto_ecb_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_ecb_ctx *ctx = crypto_tfm_ctx(tfm); - crypto_free_cipher(ctx->child); -} - -static struct crypto_instance *crypto_ecb_alloc(struct rtattr **tb) -{ - struct crypto_instance *inst; + struct skcipher_instance *inst; struct crypto_alg *alg; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); - if (err) - return ERR_PTR(err); - - alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, - CRYPTO_ALG_TYPE_MASK); - if (IS_ERR(alg)) - return ERR_CAST(alg); - - inst = crypto_alloc_instance("ecb", alg); + inst = skcipher_alloc_instance_simple(tmpl, tb, &alg); if (IS_ERR(inst)) - goto out_put_alg; - - inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; - inst->alg.cra_priority = alg->cra_priority; - inst->alg.cra_blocksize = alg->cra_blocksize; - inst->alg.cra_alignmask = alg->cra_alignmask; - inst->alg.cra_type = &crypto_blkcipher_type; - - inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize; - inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize; + return PTR_ERR(inst); - inst->alg.cra_ctxsize = sizeof(struct crypto_ecb_ctx); + inst->alg.ivsize = 0; /* ECB mode doesn't take an IV */ - inst->alg.cra_init = crypto_ecb_init_tfm; - inst->alg.cra_exit = crypto_ecb_exit_tfm; + inst->alg.encrypt = crypto_ecb_encrypt; + inst->alg.decrypt = crypto_ecb_decrypt; - inst->alg.cra_blkcipher.setkey = crypto_ecb_setkey; - inst->alg.cra_blkcipher.encrypt = crypto_ecb_encrypt; - inst->alg.cra_blkcipher.decrypt = crypto_ecb_decrypt; - -out_put_alg: + err = skcipher_register_instance(tmpl, inst); + if (err) + inst->free(inst); crypto_mod_put(alg); - return inst; -} - -static void crypto_ecb_free(struct crypto_instance *inst) -{ - crypto_drop_spawn(crypto_instance_ctx(inst)); - kfree(inst); + return err; } static struct crypto_template crypto_ecb_tmpl = { .name = "ecb", - .alloc = crypto_ecb_alloc, - .free = crypto_ecb_free, + .create = crypto_ecb_create, .module = THIS_MODULE, }; @@ -184,5 +105,5 @@ module_init(crypto_ecb_module_init); module_exit(crypto_ecb_module_exit); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("ECB block cipher algorithm"); +MODULE_DESCRIPTION("ECB block cipher mode of operation"); MODULE_ALIAS_CRYPTO("ecb"); diff --git a/crypto/gcm.c b/crypto/gcm.c index e438492db2ca..e1a11f529d25 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -247,7 +247,7 @@ static int gcm_hash_len(struct aead_request *req, u32 flags) struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct ahash_request *ahreq = &pctx->u.ahreq; struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; - u128 lengths; + be128 lengths; lengths.a = cpu_to_be64(req->assoclen * 8); lengths.b = cpu_to_be64(gctx->cryptlen * 8); @@ -727,12 +727,6 @@ static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb) ctr_name, "ghash"); } -static struct crypto_template crypto_gcm_tmpl = { - .name = "gcm", - .create = crypto_gcm_create, - .module = THIS_MODULE, -}; - static int crypto_gcm_base_create(struct crypto_template *tmpl, struct rtattr **tb) { @@ -756,12 +750,6 @@ static int crypto_gcm_base_create(struct crypto_template *tmpl, ctr_name, ghash_name); } -static struct crypto_template crypto_gcm_base_tmpl = { - .name = "gcm_base", - .create = crypto_gcm_base_create, - .module = THIS_MODULE, -}; - static int crypto_rfc4106_setkey(struct crypto_aead *parent, const u8 *key, unsigned int keylen) { @@ -989,12 +977,6 @@ out_free_inst: goto out; } -static struct crypto_template crypto_rfc4106_tmpl = { - .name = "rfc4106", - .create = crypto_rfc4106_create, - .module = THIS_MODULE, -}; - static int crypto_rfc4543_setkey(struct crypto_aead *parent, const u8 *key, unsigned int keylen) { @@ -1231,10 +1213,24 @@ out_free_inst: goto out; } -static struct crypto_template crypto_rfc4543_tmpl = { - .name = "rfc4543", - .create = crypto_rfc4543_create, - .module = THIS_MODULE, +static struct crypto_template crypto_gcm_tmpls[] = { + { + .name = "gcm_base", + .create = crypto_gcm_base_create, + .module = THIS_MODULE, + }, { + .name = "gcm", + .create = crypto_gcm_create, + .module = THIS_MODULE, + }, { + .name = "rfc4106", + .create = crypto_rfc4106_create, + .module = THIS_MODULE, + }, { + .name = "rfc4543", + .create = crypto_rfc4543_create, + .module = THIS_MODULE, + }, }; static int __init crypto_gcm_module_init(void) @@ -1247,42 +1243,19 @@ static int __init crypto_gcm_module_init(void) sg_init_one(&gcm_zeroes->sg, gcm_zeroes->buf, sizeof(gcm_zeroes->buf)); - err = crypto_register_template(&crypto_gcm_base_tmpl); - if (err) - goto out; - - err = crypto_register_template(&crypto_gcm_tmpl); + err = crypto_register_templates(crypto_gcm_tmpls, + ARRAY_SIZE(crypto_gcm_tmpls)); if (err) - goto out_undo_base; + kfree(gcm_zeroes); - err = crypto_register_template(&crypto_rfc4106_tmpl); - if (err) - goto out_undo_gcm; - - err = crypto_register_template(&crypto_rfc4543_tmpl); - if (err) - goto out_undo_rfc4106; - - return 0; - -out_undo_rfc4106: - crypto_unregister_template(&crypto_rfc4106_tmpl); -out_undo_gcm: - crypto_unregister_template(&crypto_gcm_tmpl); -out_undo_base: - crypto_unregister_template(&crypto_gcm_base_tmpl); -out: - kfree(gcm_zeroes); return err; } static void __exit crypto_gcm_module_exit(void) { kfree(gcm_zeroes); - crypto_unregister_template(&crypto_rfc4543_tmpl); - crypto_unregister_template(&crypto_rfc4106_tmpl); - crypto_unregister_template(&crypto_gcm_tmpl); - crypto_unregister_template(&crypto_gcm_base_tmpl); + crypto_unregister_templates(crypto_gcm_tmpls, + ARRAY_SIZE(crypto_gcm_tmpls)); } module_init(crypto_gcm_module_init); diff --git a/crypto/keywrap.c b/crypto/keywrap.c index ec5c6a087c90..a5cfe610d8f4 100644 --- a/crypto/keywrap.c +++ b/crypto/keywrap.c @@ -56,7 +56,7 @@ * u8 *iv = data; * u8 *pt = data + crypto_skcipher_ivsize(tfm); * - * sg_init_one(&sg, ptdata, ptlen); + * sg_init_one(&sg, pt, ptlen); * skcipher_request_set_crypt(req, &sg, &sg, ptlen, iv); * * ==> After encryption, data now contains full KW result as per SP800-38F. @@ -70,8 +70,8 @@ * u8 *iv = data; * u8 *ct = data + crypto_skcipher_ivsize(tfm); * unsigned int ctlen = datalen - crypto_skcipher_ivsize(tfm); - * sg_init_one(&sg, ctdata, ctlen); - * skcipher_request_set_crypt(req, &sg, &sg, ptlen, iv); + * sg_init_one(&sg, ct, ctlen); + * skcipher_request_set_crypt(req, &sg, &sg, ctlen, iv); * * ==> After decryption (which hopefully does not return EBADMSG), the ct * pointer now points to the plaintext of size ctlen. @@ -87,10 +87,6 @@ #include #include -struct crypto_kw_ctx { - struct crypto_cipher *child; -}; - struct crypto_kw_block { #define SEMIBSIZE 8 __be64 A; @@ -124,16 +120,13 @@ static void crypto_kw_scatterlist_ff(struct scatter_walk *walk, } } -static int crypto_kw_decrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int crypto_kw_decrypt(struct skcipher_request *req) { - struct crypto_blkcipher *tfm = desc->tfm; - struct crypto_kw_ctx *ctx = crypto_blkcipher_ctx(tfm); - struct crypto_cipher *child = ctx->child; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); struct crypto_kw_block block; - struct scatterlist *lsrc, *ldst; - u64 t = 6 * ((nbytes) >> 3); + struct scatterlist *src, *dst; + u64 t = 6 * ((req->cryptlen) >> 3); unsigned int i; int ret = 0; @@ -141,27 +134,27 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc, * Require at least 2 semiblocks (note, the 3rd semiblock that is * required by SP800-38F is the IV. */ - if (nbytes < (2 * SEMIBSIZE) || nbytes % SEMIBSIZE) + if (req->cryptlen < (2 * SEMIBSIZE) || req->cryptlen % SEMIBSIZE) return -EINVAL; /* Place the IV into block A */ - memcpy(&block.A, desc->info, SEMIBSIZE); + memcpy(&block.A, req->iv, SEMIBSIZE); /* * src scatterlist is read-only. dst scatterlist is r/w. During the - * first loop, lsrc points to src and ldst to dst. For any - * subsequent round, the code operates on dst only. + * first loop, src points to req->src and dst to req->dst. For any + * subsequent round, the code operates on req->dst only. */ - lsrc = src; - ldst = dst; + src = req->src; + dst = req->dst; for (i = 0; i < 6; i++) { struct scatter_walk src_walk, dst_walk; - unsigned int tmp_nbytes = nbytes; + unsigned int nbytes = req->cryptlen; - while (tmp_nbytes) { - /* move pointer by tmp_nbytes in the SGL */ - crypto_kw_scatterlist_ff(&src_walk, lsrc, tmp_nbytes); + while (nbytes) { + /* move pointer by nbytes in the SGL */ + crypto_kw_scatterlist_ff(&src_walk, src, nbytes); /* get the source block */ scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE, false); @@ -170,21 +163,21 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc, block.A ^= cpu_to_be64(t); t--; /* perform KW operation: decrypt block */ - crypto_cipher_decrypt_one(child, (u8*)&block, - (u8*)&block); + crypto_cipher_decrypt_one(cipher, (u8 *)&block, + (u8 *)&block); - /* move pointer by tmp_nbytes in the SGL */ - crypto_kw_scatterlist_ff(&dst_walk, ldst, tmp_nbytes); + /* move pointer by nbytes in the SGL */ + crypto_kw_scatterlist_ff(&dst_walk, dst, nbytes); /* Copy block->R into place */ scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE, true); - tmp_nbytes -= SEMIBSIZE; + nbytes -= SEMIBSIZE; } /* we now start to operate on the dst SGL only */ - lsrc = dst; - ldst = dst; + src = req->dst; + dst = req->dst; } /* Perform authentication check */ @@ -196,15 +189,12 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc, return ret; } -static int crypto_kw_encrypt(struct blkcipher_desc *desc, - struct scatterlist *dst, struct scatterlist *src, - unsigned int nbytes) +static int crypto_kw_encrypt(struct skcipher_request *req) { - struct crypto_blkcipher *tfm = desc->tfm; - struct crypto_kw_ctx *ctx = crypto_blkcipher_ctx(tfm); - struct crypto_cipher *child = ctx->child; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); struct crypto_kw_block block; - struct scatterlist *lsrc, *ldst; + struct scatterlist *src, *dst; u64 t = 1; unsigned int i; @@ -214,7 +204,7 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc, * This means that the dst memory must be one semiblock larger than src. * Also ensure that the given data is aligned to semiblock. */ - if (nbytes < (2 * SEMIBSIZE) || nbytes % SEMIBSIZE) + if (req->cryptlen < (2 * SEMIBSIZE) || req->cryptlen % SEMIBSIZE) return -EINVAL; /* @@ -225,26 +215,26 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc, /* * src scatterlist is read-only. dst scatterlist is r/w. During the - * first loop, lsrc points to src and ldst to dst. For any - * subsequent round, the code operates on dst only. + * first loop, src points to req->src and dst to req->dst. For any + * subsequent round, the code operates on req->dst only. */ - lsrc = src; - ldst = dst; + src = req->src; + dst = req->dst; for (i = 0; i < 6; i++) { struct scatter_walk src_walk, dst_walk; - unsigned int tmp_nbytes = nbytes; + unsigned int nbytes = req->cryptlen; - scatterwalk_start(&src_walk, lsrc); - scatterwalk_start(&dst_walk, ldst); + scatterwalk_start(&src_walk, src); + scatterwalk_start(&dst_walk, dst); - while (tmp_nbytes) { + while (nbytes) { /* get the source block */ scatterwalk_copychunks(&block.R, &src_walk, SEMIBSIZE, false); /* perform KW operation: encrypt block */ - crypto_cipher_encrypt_one(child, (u8 *)&block, + crypto_cipher_encrypt_one(cipher, (u8 *)&block, (u8 *)&block); /* perform KW operation: modify IV with counter */ block.A ^= cpu_to_be64(t); @@ -254,117 +244,59 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc, scatterwalk_copychunks(&block.R, &dst_walk, SEMIBSIZE, true); - tmp_nbytes -= SEMIBSIZE; + nbytes -= SEMIBSIZE; } /* we now start to operate on the dst SGL only */ - lsrc = dst; - ldst = dst; + src = req->dst; + dst = req->dst; } /* establish the IV for the caller to pick up */ - memcpy(desc->info, &block.A, SEMIBSIZE); + memcpy(req->iv, &block.A, SEMIBSIZE); memzero_explicit(&block, sizeof(struct crypto_kw_block)); return 0; } -static int crypto_kw_setkey(struct crypto_tfm *parent, const u8 *key, - unsigned int keylen) +static int crypto_kw_create(struct crypto_template *tmpl, struct rtattr **tb) { - struct crypto_kw_ctx *ctx = crypto_tfm_ctx(parent); - struct crypto_cipher *child = ctx->child; + struct skcipher_instance *inst; + struct crypto_alg *alg; int err; - crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & - CRYPTO_TFM_REQ_MASK); - err = crypto_cipher_setkey(child, key, keylen); - crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & - CRYPTO_TFM_RES_MASK); - return err; -} - -static int crypto_kw_init_tfm(struct crypto_tfm *tfm) -{ - struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); - struct crypto_spawn *spawn = crypto_instance_ctx(inst); - struct crypto_kw_ctx *ctx = crypto_tfm_ctx(tfm); - struct crypto_cipher *cipher; - - cipher = crypto_spawn_cipher(spawn); - if (IS_ERR(cipher)) - return PTR_ERR(cipher); - - ctx->child = cipher; - return 0; -} - -static void crypto_kw_exit_tfm(struct crypto_tfm *tfm) -{ - struct crypto_kw_ctx *ctx = crypto_tfm_ctx(tfm); - - crypto_free_cipher(ctx->child); -} - -static struct crypto_instance *crypto_kw_alloc(struct rtattr **tb) -{ - struct crypto_instance *inst = NULL; - struct crypto_alg *alg = NULL; - int err; - - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); - if (err) - return ERR_PTR(err); - - alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, - CRYPTO_ALG_TYPE_MASK); - if (IS_ERR(alg)) - return ERR_CAST(alg); + inst = skcipher_alloc_instance_simple(tmpl, tb, &alg); + if (IS_ERR(inst)) + return PTR_ERR(inst); - inst = ERR_PTR(-EINVAL); + err = -EINVAL; /* Section 5.1 requirement for KW */ if (alg->cra_blocksize != sizeof(struct crypto_kw_block)) - goto err; + goto out_free_inst; - inst = crypto_alloc_instance("kw", alg); - if (IS_ERR(inst)) - goto err; + inst->alg.base.cra_blocksize = SEMIBSIZE; + inst->alg.base.cra_alignmask = 0; + inst->alg.ivsize = SEMIBSIZE; - inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; - inst->alg.cra_priority = alg->cra_priority; - inst->alg.cra_blocksize = SEMIBSIZE; - inst->alg.cra_alignmask = 0; - inst->alg.cra_type = &crypto_blkcipher_type; - inst->alg.cra_blkcipher.ivsize = SEMIBSIZE; - inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize; - inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize; + inst->alg.encrypt = crypto_kw_encrypt; + inst->alg.decrypt = crypto_kw_decrypt; - inst->alg.cra_ctxsize = sizeof(struct crypto_kw_ctx); - - inst->alg.cra_init = crypto_kw_init_tfm; - inst->alg.cra_exit = crypto_kw_exit_tfm; - - inst->alg.cra_blkcipher.setkey = crypto_kw_setkey; - inst->alg.cra_blkcipher.encrypt = crypto_kw_encrypt; - inst->alg.cra_blkcipher.decrypt = crypto_kw_decrypt; + err = skcipher_register_instance(tmpl, inst); + if (err) + goto out_free_inst; + goto out_put_alg; -err: +out_free_inst: + inst->free(inst); +out_put_alg: crypto_mod_put(alg); - return inst; -} - -static void crypto_kw_free(struct crypto_instance *inst) -{ - crypto_drop_spawn(crypto_instance_ctx(inst)); - kfree(inst); + return err; } static struct crypto_template crypto_kw_tmpl = { .name = "kw", - .alloc = crypto_kw_alloc, - .free = crypto_kw_free, + .create = crypto_kw_create, .module = THIS_MODULE, }; diff --git a/crypto/lzo-rle.c b/crypto/lzo-rle.c new file mode 100644 index 000000000000..ea9c75b1db49 --- /dev/null +++ b/crypto/lzo-rle.c @@ -0,0 +1,175 @@ +/* + * Cryptographic API. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 51 + * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include + +struct lzorle_ctx { + void *lzorle_comp_mem; +}; + +static void *lzorle_alloc_ctx(struct crypto_scomp *tfm) +{ + void *ctx; + + ctx = kvmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL); + if (!ctx) + return ERR_PTR(-ENOMEM); + + return ctx; +} + +static int lzorle_init(struct crypto_tfm *tfm) +{ + struct lzorle_ctx *ctx = crypto_tfm_ctx(tfm); + + ctx->lzorle_comp_mem = lzorle_alloc_ctx(NULL); + if (IS_ERR(ctx->lzorle_comp_mem)) + return -ENOMEM; + + return 0; +} + +static void lzorle_free_ctx(struct crypto_scomp *tfm, void *ctx) +{ + kvfree(ctx); +} + +static void lzorle_exit(struct crypto_tfm *tfm) +{ + struct lzorle_ctx *ctx = crypto_tfm_ctx(tfm); + + lzorle_free_ctx(NULL, ctx->lzorle_comp_mem); +} + +static int __lzorle_compress(const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen, void *ctx) +{ + size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */ + int err; + + err = lzorle1x_1_compress(src, slen, dst, &tmp_len, ctx); + + if (err != LZO_E_OK) + return -EINVAL; + + *dlen = tmp_len; + return 0; +} + +static int lzorle_compress(struct crypto_tfm *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen) +{ + struct lzorle_ctx *ctx = crypto_tfm_ctx(tfm); + + return __lzorle_compress(src, slen, dst, dlen, ctx->lzorle_comp_mem); +} + +static int lzorle_scompress(struct crypto_scomp *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen, + void *ctx) +{ + return __lzorle_compress(src, slen, dst, dlen, ctx); +} + +static int __lzorle_decompress(const u8 *src, unsigned int slen, + u8 *dst, unsigned int *dlen) +{ + int err; + size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */ + + err = lzo1x_decompress_safe(src, slen, dst, &tmp_len); + + if (err != LZO_E_OK) + return -EINVAL; + + *dlen = tmp_len; + return 0; +} + +static int lzorle_decompress(struct crypto_tfm *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen) +{ + return __lzorle_decompress(src, slen, dst, dlen); +} + +static int lzorle_sdecompress(struct crypto_scomp *tfm, const u8 *src, + unsigned int slen, u8 *dst, unsigned int *dlen, + void *ctx) +{ + return __lzorle_decompress(src, slen, dst, dlen); +} + +static struct crypto_alg alg = { + .cra_name = "lzo-rle", + .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, + .cra_ctxsize = sizeof(struct lzorle_ctx), + .cra_module = THIS_MODULE, + .cra_init = lzorle_init, + .cra_exit = lzorle_exit, + .cra_u = { .compress = { + .coa_compress = lzorle_compress, + .coa_decompress = lzorle_decompress } } +}; + +static struct scomp_alg scomp = { + .alloc_ctx = lzorle_alloc_ctx, + .free_ctx = lzorle_free_ctx, + .compress = lzorle_scompress, + .decompress = lzorle_sdecompress, + .base = { + .cra_name = "lzo-rle", + .cra_driver_name = "lzo-rle-scomp", + .cra_module = THIS_MODULE, + } +}; + +static int __init lzorle_mod_init(void) +{ + int ret; + + ret = crypto_register_alg(&alg); + if (ret) + return ret; + + ret = crypto_register_scomp(&scomp); + if (ret) { + crypto_unregister_alg(&alg); + return ret; + } + + return ret; +} + +static void __exit lzorle_mod_fini(void) +{ + crypto_unregister_alg(&alg); + crypto_unregister_scomp(&scomp); +} + +module_init(lzorle_mod_init); +module_exit(lzorle_mod_fini); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("LZO-RLE Compression Algorithm"); +MODULE_ALIAS_CRYPTO("lzo-rle"); diff --git a/crypto/morus1280.c b/crypto/morus1280.c index 3889c188f266..0747732d5b78 100644 --- a/crypto/morus1280.c +++ b/crypto/morus1280.c @@ -1,13 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * The MORUS-1280 Authenticated-Encryption Algorithm * * Copyright (c) 2016-2018 Ondrej Mosnacek * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. */ #include @@ -366,18 +362,19 @@ static void crypto_morus1280_process_crypt(struct morus1280_state *state, const struct morus1280_ops *ops) { struct skcipher_walk walk; - u8 *dst; - const u8 *src; ops->skcipher_walk_init(&walk, req, false); while (walk.nbytes) { - src = walk.src.virt.addr; - dst = walk.dst.virt.addr; + unsigned int nbytes = walk.nbytes; + + if (nbytes < walk.total) + nbytes = round_down(nbytes, walk.stride); - ops->crypt_chunk(state, dst, src, walk.nbytes); + ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr, + nbytes); - skcipher_walk_done(&walk, 0); + skcipher_walk_done(&walk, walk.nbytes - nbytes); } } diff --git a/crypto/morus640.c b/crypto/morus640.c index da06ec2f6a80..1617a1eb8be1 100644 --- a/crypto/morus640.c +++ b/crypto/morus640.c @@ -1,13 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * The MORUS-640 Authenticated-Encryption Algorithm * * Copyright (c) 2016-2018 Ondrej Mosnacek * Copyright (C) 2017-2018 Red Hat, Inc. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. */ #include @@ -365,18 +361,19 @@ static void crypto_morus640_process_crypt(struct morus640_state *state, const struct morus640_ops *ops) { struct skcipher_walk walk; - u8 *dst; - const u8 *src; ops->skcipher_walk_init(&walk, req, false); while (walk.nbytes) { - src = walk.src.virt.addr; - dst = walk.dst.virt.addr; + unsigned int nbytes = walk.nbytes; + + if (nbytes < walk.total) + nbytes = round_down(nbytes, walk.stride); - ops->crypt_chunk(state, dst, src, walk.nbytes); + ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr, + nbytes); - skcipher_walk_done(&walk, 0); + skcipher_walk_done(&walk, walk.nbytes - nbytes); } } diff --git a/crypto/ofb.c b/crypto/ofb.c index 886631708c5e..34b6e1f426f7 100644 --- a/crypto/ofb.c +++ b/crypto/ofb.c @@ -5,9 +5,6 @@ * * Copyright (C) 2018 ARM Limited or its affiliates. * All rights reserved. - * - * Based loosely on public domain code gleaned from libtomcrypt - * (https://github.com/libtom/libtomcrypt). */ #include @@ -16,189 +13,70 @@ #include #include #include -#include -#include - -struct crypto_ofb_ctx { - struct crypto_cipher *child; - int cnt; -}; - -static int crypto_ofb_setkey(struct crypto_skcipher *parent, const u8 *key, - unsigned int keylen) +static int crypto_ofb_crypt(struct skcipher_request *req) { - struct crypto_ofb_ctx *ctx = crypto_skcipher_ctx(parent); - struct crypto_cipher *child = ctx->child; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); + const unsigned int bsize = crypto_cipher_blocksize(cipher); + struct skcipher_walk walk; int err; - crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) & - CRYPTO_TFM_REQ_MASK); - err = crypto_cipher_setkey(child, key, keylen); - crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) & - CRYPTO_TFM_RES_MASK); - return err; -} - -static int crypto_ofb_encrypt_segment(struct crypto_ofb_ctx *ctx, - struct skcipher_walk *walk, - struct crypto_cipher *tfm) -{ - int bsize = crypto_cipher_blocksize(tfm); - int nbytes = walk->nbytes; - - u8 *src = walk->src.virt.addr; - u8 *dst = walk->dst.virt.addr; - u8 *iv = walk->iv; - - do { - if (ctx->cnt == bsize) { - if (nbytes < bsize) - break; - crypto_cipher_encrypt_one(tfm, iv, iv); - ctx->cnt = 0; - } - *dst = *src ^ iv[ctx->cnt]; - src++; - dst++; - ctx->cnt++; - } while (--nbytes); - return nbytes; -} - -static int crypto_ofb_encrypt(struct skcipher_request *req) -{ - struct skcipher_walk walk; - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - unsigned int bsize; - struct crypto_ofb_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_cipher *child = ctx->child; - int ret = 0; + err = skcipher_walk_virt(&walk, req, false); - bsize = crypto_cipher_blocksize(child); - ctx->cnt = bsize; + while (walk.nbytes >= bsize) { + const u8 *src = walk.src.virt.addr; + u8 *dst = walk.dst.virt.addr; + u8 * const iv = walk.iv; + unsigned int nbytes = walk.nbytes; - ret = skcipher_walk_virt(&walk, req, false); + do { + crypto_cipher_encrypt_one(cipher, iv, iv); + crypto_xor_cpy(dst, src, iv, bsize); + dst += bsize; + src += bsize; + } while ((nbytes -= bsize) >= bsize); - while (walk.nbytes) { - ret = crypto_ofb_encrypt_segment(ctx, &walk, child); - ret = skcipher_walk_done(&walk, ret); + err = skcipher_walk_done(&walk, nbytes); } - return ret; -} - -/* OFB encrypt and decrypt are identical */ -static int crypto_ofb_decrypt(struct skcipher_request *req) -{ - return crypto_ofb_encrypt(req); -} - -static int crypto_ofb_init_tfm(struct crypto_skcipher *tfm) -{ - struct skcipher_instance *inst = skcipher_alg_instance(tfm); - struct crypto_spawn *spawn = skcipher_instance_ctx(inst); - struct crypto_ofb_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_cipher *cipher; - - cipher = crypto_spawn_cipher(spawn); - if (IS_ERR(cipher)) - return PTR_ERR(cipher); - - ctx->child = cipher; - return 0; -} - -static void crypto_ofb_exit_tfm(struct crypto_skcipher *tfm) -{ - struct crypto_ofb_ctx *ctx = crypto_skcipher_ctx(tfm); - - crypto_free_cipher(ctx->child); -} - -static void crypto_ofb_free(struct skcipher_instance *inst) -{ - crypto_drop_skcipher(skcipher_instance_ctx(inst)); - kfree(inst); + if (walk.nbytes) { + crypto_cipher_encrypt_one(cipher, walk.iv, walk.iv); + crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, walk.iv, + walk.nbytes); + err = skcipher_walk_done(&walk, 0); + } + return err; } static int crypto_ofb_create(struct crypto_template *tmpl, struct rtattr **tb) { struct skcipher_instance *inst; - struct crypto_attr_type *algt; - struct crypto_spawn *spawn; struct crypto_alg *alg; - u32 mask; int err; - err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER); - if (err) - return err; - - inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); - if (!inst) - return -ENOMEM; - - algt = crypto_get_attr_type(tb); - err = PTR_ERR(algt); - if (IS_ERR(algt)) - goto err_free_inst; - - mask = CRYPTO_ALG_TYPE_MASK | - crypto_requires_off(algt->type, algt->mask, - CRYPTO_ALG_NEED_FALLBACK); + inst = skcipher_alloc_instance_simple(tmpl, tb, &alg); + if (IS_ERR(inst)) + return PTR_ERR(inst); - alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask); - err = PTR_ERR(alg); - if (IS_ERR(alg)) - goto err_free_inst; + /* OFB mode is a stream cipher. */ + inst->alg.base.cra_blocksize = 1; - spawn = skcipher_instance_ctx(inst); - err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst), - CRYPTO_ALG_TYPE_MASK); - crypto_mod_put(alg); - if (err) - goto err_free_inst; - - err = crypto_inst_setname(skcipher_crypto_instance(inst), "ofb", alg); - if (err) - goto err_drop_spawn; - - inst->alg.base.cra_priority = alg->cra_priority; - inst->alg.base.cra_blocksize = alg->cra_blocksize; - inst->alg.base.cra_alignmask = alg->cra_alignmask; - - /* We access the data as u32s when xoring. */ - inst->alg.base.cra_alignmask |= __alignof__(u32) - 1; + /* + * To simplify the implementation, configure the skcipher walk to only + * give a partial block at the very end, never earlier. + */ + inst->alg.chunksize = alg->cra_blocksize; - inst->alg.ivsize = alg->cra_blocksize; - inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize; - inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize; - - inst->alg.base.cra_ctxsize = sizeof(struct crypto_ofb_ctx); - - inst->alg.init = crypto_ofb_init_tfm; - inst->alg.exit = crypto_ofb_exit_tfm; - - inst->alg.setkey = crypto_ofb_setkey; - inst->alg.encrypt = crypto_ofb_encrypt; - inst->alg.decrypt = crypto_ofb_decrypt; - - inst->free = crypto_ofb_free; + inst->alg.encrypt = crypto_ofb_crypt; + inst->alg.decrypt = crypto_ofb_crypt; err = skcipher_register_instance(tmpl, inst); if (err) - goto err_drop_spawn; + inst->free(inst); -out: + crypto_mod_put(alg); return err; - -err_drop_spawn: - crypto_drop_spawn(spawn); -err_free_inst: - kfree(inst); - goto out; } static struct crypto_template crypto_ofb_tmpl = { @@ -221,5 +99,5 @@ module_init(crypto_ofb_module_init); module_exit(crypto_ofb_module_exit); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("OFB block cipher algorithm"); +MODULE_DESCRIPTION("OFB block cipher mode of operation"); MODULE_ALIAS_CRYPTO("ofb"); diff --git a/crypto/pcbc.c b/crypto/pcbc.c index 8aa10144407c..2fa03fc576fe 100644 --- a/crypto/pcbc.c +++ b/crypto/pcbc.c @@ -20,28 +20,6 @@ #include #include #include -#include -#include - -struct crypto_pcbc_ctx { - struct crypto_cipher *child; -}; - -static int crypto_pcbc_setkey(struct crypto_skcipher *parent, const u8 *key, - unsigned int keylen) -{ - struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(parent); - struct crypto_cipher *child = ctx->child; - int err; - - crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) & - CRYPTO_TFM_REQ_MASK); - err = crypto_cipher_setkey(child, key, keylen); - crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) & - CRYPTO_TFM_RES_MASK); - return err; -} static int crypto_pcbc_encrypt_segment(struct skcipher_request *req, struct skcipher_walk *walk, @@ -51,7 +29,7 @@ static int crypto_pcbc_encrypt_segment(struct skcipher_request *req, unsigned int nbytes = walk->nbytes; u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; - u8 *iv = walk->iv; + u8 * const iv = walk->iv; do { crypto_xor(iv, src, bsize); @@ -72,7 +50,7 @@ static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req, int bsize = crypto_cipher_blocksize(tfm); unsigned int nbytes = walk->nbytes; u8 *src = walk->src.virt.addr; - u8 *iv = walk->iv; + u8 * const iv = walk->iv; u8 tmpbuf[MAX_CIPHER_BLOCKSIZE]; do { @@ -84,16 +62,13 @@ static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req, src += bsize; } while ((nbytes -= bsize) >= bsize); - memcpy(walk->iv, iv, bsize); - return nbytes; } static int crypto_pcbc_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_cipher *child = ctx->child; + struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); struct skcipher_walk walk; unsigned int nbytes; int err; @@ -103,10 +78,10 @@ static int crypto_pcbc_encrypt(struct skcipher_request *req) while ((nbytes = walk.nbytes)) { if (walk.src.virt.addr == walk.dst.virt.addr) nbytes = crypto_pcbc_encrypt_inplace(req, &walk, - child); + cipher); else nbytes = crypto_pcbc_encrypt_segment(req, &walk, - child); + cipher); err = skcipher_walk_done(&walk, nbytes); } @@ -121,7 +96,7 @@ static int crypto_pcbc_decrypt_segment(struct skcipher_request *req, unsigned int nbytes = walk->nbytes; u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; - u8 *iv = walk->iv; + u8 * const iv = walk->iv; do { crypto_cipher_decrypt_one(tfm, dst, src); @@ -132,8 +107,6 @@ static int crypto_pcbc_decrypt_segment(struct skcipher_request *req, dst += bsize; } while ((nbytes -= bsize) >= bsize); - memcpy(walk->iv, iv, bsize); - return nbytes; } @@ -144,7 +117,7 @@ static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req, int bsize = crypto_cipher_blocksize(tfm); unsigned int nbytes = walk->nbytes; u8 *src = walk->src.virt.addr; - u8 *iv = walk->iv; + u8 * const iv = walk->iv; u8 tmpbuf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(u32)); do { @@ -156,16 +129,13 @@ static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req, src += bsize; } while ((nbytes -= bsize) >= bsize); - memcpy(walk->iv, iv, bsize); - return nbytes; } static int crypto_pcbc_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_cipher *child = ctx->child; + struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); struct skcipher_walk walk; unsigned int nbytes; int err; @@ -175,117 +145,34 @@ static int crypto_pcbc_decrypt(struct skcipher_request *req) while ((nbytes = walk.nbytes)) { if (walk.src.virt.addr == walk.dst.virt.addr) nbytes = crypto_pcbc_decrypt_inplace(req, &walk, - child); + cipher); else nbytes = crypto_pcbc_decrypt_segment(req, &walk, - child); + cipher); err = skcipher_walk_done(&walk, nbytes); } return err; } -static int crypto_pcbc_init_tfm(struct crypto_skcipher *tfm) -{ - struct skcipher_instance *inst = skcipher_alg_instance(tfm); - struct crypto_spawn *spawn = skcipher_instance_ctx(inst); - struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm); - struct crypto_cipher *cipher; - - cipher = crypto_spawn_cipher(spawn); - if (IS_ERR(cipher)) - return PTR_ERR(cipher); - - ctx->child = cipher; - return 0; -} - -static void crypto_pcbc_exit_tfm(struct crypto_skcipher *tfm) -{ - struct crypto_pcbc_ctx *ctx = crypto_skcipher_ctx(tfm); - - crypto_free_cipher(ctx->child); -} - -static void crypto_pcbc_free(struct skcipher_instance *inst) -{ - crypto_drop_skcipher(skcipher_instance_ctx(inst)); - kfree(inst); -} - static int crypto_pcbc_create(struct crypto_template *tmpl, struct rtattr **tb) { struct skcipher_instance *inst; - struct crypto_attr_type *algt; - struct crypto_spawn *spawn; struct crypto_alg *alg; int err; - algt = crypto_get_attr_type(tb); - if (IS_ERR(algt)) - return PTR_ERR(algt); - - if (((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) & - ~CRYPTO_ALG_INTERNAL) - return -EINVAL; - - inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); - if (!inst) - return -ENOMEM; - - alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER | - (algt->type & CRYPTO_ALG_INTERNAL), - CRYPTO_ALG_TYPE_MASK | - (algt->mask & CRYPTO_ALG_INTERNAL)); - err = PTR_ERR(alg); - if (IS_ERR(alg)) - goto err_free_inst; - - spawn = skcipher_instance_ctx(inst); - err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst), - CRYPTO_ALG_TYPE_MASK); - if (err) - goto err_put_alg; - - err = crypto_inst_setname(skcipher_crypto_instance(inst), "pcbc", alg); - if (err) - goto err_drop_spawn; + inst = skcipher_alloc_instance_simple(tmpl, tb, &alg); + if (IS_ERR(inst)) + return PTR_ERR(inst); - inst->alg.base.cra_flags = alg->cra_flags & CRYPTO_ALG_INTERNAL; - inst->alg.base.cra_priority = alg->cra_priority; - inst->alg.base.cra_blocksize = alg->cra_blocksize; - inst->alg.base.cra_alignmask = alg->cra_alignmask; - - inst->alg.ivsize = alg->cra_blocksize; - inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize; - inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize; - - inst->alg.base.cra_ctxsize = sizeof(struct crypto_pcbc_ctx); - - inst->alg.init = crypto_pcbc_init_tfm; - inst->alg.exit = crypto_pcbc_exit_tfm; - - inst->alg.setkey = crypto_pcbc_setkey; inst->alg.encrypt = crypto_pcbc_encrypt; inst->alg.decrypt = crypto_pcbc_decrypt; - inst->free = crypto_pcbc_free; - err = skcipher_register_instance(tmpl, inst); if (err) - goto err_drop_spawn; + inst->free(inst); crypto_mod_put(alg); - -out: return err; - -err_drop_spawn: - crypto_drop_spawn(spawn); -err_put_alg: - crypto_mod_put(alg); -err_free_inst: - kfree(inst); - goto out; } static struct crypto_template crypto_pcbc_tmpl = { @@ -308,5 +195,5 @@ module_init(crypto_pcbc_module_init); module_exit(crypto_pcbc_module_exit); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("PCBC block cipher algorithm"); +MODULE_DESCRIPTION("PCBC block cipher mode of operation"); MODULE_ALIAS_CRYPTO("pcbc"); diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c index cfc04e15fd97..0a6680ca8cb6 100644 --- a/crypto/rsa-pkcs1pad.c +++ b/crypto/rsa-pkcs1pad.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include diff --git a/crypto/seqiv.c b/crypto/seqiv.c index 64a412be255e..ed1b0e9f2436 100644 --- a/crypto/seqiv.c +++ b/crypto/seqiv.c @@ -89,13 +89,12 @@ static int seqiv_aead_encrypt(struct aead_request *req) if (unlikely(!IS_ALIGNED((unsigned long)info, crypto_aead_alignmask(geniv) + 1))) { - info = kmalloc(ivsize, req->base.flags & - CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL: - GFP_ATOMIC); + info = kmemdup(req->iv, ivsize, req->base.flags & + CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : + GFP_ATOMIC); if (!info) return -ENOMEM; - memcpy(info, req->iv, ivsize); compl = seqiv_aead_encrypt_complete; data = req; } diff --git a/crypto/shash.c b/crypto/shash.c index 44d297b82a8f..15b369c4745f 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -53,6 +53,13 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, return err; } +static void shash_set_needkey(struct crypto_shash *tfm, struct shash_alg *alg) +{ + if (crypto_shash_alg_has_setkey(alg) && + !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) + crypto_shash_set_flags(tfm, CRYPTO_TFM_NEED_KEY); +} + int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { @@ -65,8 +72,10 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, else err = shash->setkey(tfm, key, keylen); - if (err) + if (unlikely(err)) { + shash_set_needkey(tfm, shash); return err; + } crypto_shash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); return 0; @@ -373,15 +382,14 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm) crt->final = shash_async_final; crt->finup = shash_async_finup; crt->digest = shash_async_digest; - crt->setkey = shash_async_setkey; + if (crypto_shash_alg_has_setkey(alg)) + crt->setkey = shash_async_setkey; crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) & CRYPTO_TFM_NEED_KEY); - if (alg->export) - crt->export = shash_async_export; - if (alg->import) - crt->import = shash_async_import; + crt->export = shash_async_export; + crt->import = shash_async_import; crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash); @@ -395,9 +403,7 @@ static int crypto_shash_init_tfm(struct crypto_tfm *tfm) hash->descsize = alg->descsize; - if (crypto_shash_alg_has_setkey(alg) && - !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) - crypto_shash_set_flags(hash, CRYPTO_TFM_NEED_KEY); + shash_set_needkey(hash, alg); return 0; } @@ -464,6 +470,9 @@ static int shash_prepare_alg(struct shash_alg *alg) alg->statesize > HASH_MAX_STATESIZE) return -EINVAL; + if ((alg->export && !alg->import) || (alg->import && !alg->export)) + return -EINVAL; + base->cra_type = &crypto_shash_type; base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags |= CRYPTO_ALG_TYPE_SHASH; diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 2a969296bc24..bcf13d95f54a 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -585,6 +585,12 @@ static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg) return crypto_alg_extsize(alg); } +static void skcipher_set_needkey(struct crypto_skcipher *tfm) +{ + if (tfm->keysize) + crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY); +} + static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { @@ -598,8 +604,10 @@ static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm, err = crypto_blkcipher_setkey(blkcipher, key, keylen); crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) & CRYPTO_TFM_RES_MASK); - if (err) + if (unlikely(err)) { + skcipher_set_needkey(tfm); return err; + } crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); return 0; @@ -677,8 +685,7 @@ static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm) skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher); skcipher->keysize = calg->cra_blkcipher.max_keysize; - if (skcipher->keysize) - crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY); + skcipher_set_needkey(skcipher); return 0; } @@ -698,8 +705,10 @@ static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm, crypto_skcipher_set_flags(tfm, crypto_ablkcipher_get_flags(ablkcipher) & CRYPTO_TFM_RES_MASK); - if (err) + if (unlikely(err)) { + skcipher_set_needkey(tfm); return err; + } crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); return 0; @@ -776,8 +785,7 @@ static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm) sizeof(struct ablkcipher_request); skcipher->keysize = calg->cra_ablkcipher.max_keysize; - if (skcipher->keysize) - crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY); + skcipher_set_needkey(skcipher); return 0; } @@ -820,8 +828,10 @@ static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, else err = cipher->setkey(tfm, key, keylen); - if (err) + if (unlikely(err)) { + skcipher_set_needkey(tfm); return err; + } crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); return 0; @@ -852,8 +862,7 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) skcipher->ivsize = alg->ivsize; skcipher->keysize = alg->max_keysize; - if (skcipher->keysize) - crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY); + skcipher_set_needkey(skcipher); if (alg->exit) skcipher->base.exit = crypto_skcipher_exit_tfm; @@ -1058,5 +1067,136 @@ int skcipher_register_instance(struct crypto_template *tmpl, } EXPORT_SYMBOL_GPL(skcipher_register_instance); +static int skcipher_setkey_simple(struct crypto_skcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); + int err; + + crypto_cipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK); + crypto_cipher_set_flags(cipher, crypto_skcipher_get_flags(tfm) & + CRYPTO_TFM_REQ_MASK); + err = crypto_cipher_setkey(cipher, key, keylen); + crypto_skcipher_set_flags(tfm, crypto_cipher_get_flags(cipher) & + CRYPTO_TFM_RES_MASK); + return err; +} + +static int skcipher_init_tfm_simple(struct crypto_skcipher *tfm) +{ + struct skcipher_instance *inst = skcipher_alg_instance(tfm); + struct crypto_spawn *spawn = skcipher_instance_ctx(inst); + struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm); + struct crypto_cipher *cipher; + + cipher = crypto_spawn_cipher(spawn); + if (IS_ERR(cipher)) + return PTR_ERR(cipher); + + ctx->cipher = cipher; + return 0; +} + +static void skcipher_exit_tfm_simple(struct crypto_skcipher *tfm) +{ + struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm); + + crypto_free_cipher(ctx->cipher); +} + +static void skcipher_free_instance_simple(struct skcipher_instance *inst) +{ + crypto_drop_spawn(skcipher_instance_ctx(inst)); + kfree(inst); +} + +/** + * skcipher_alloc_instance_simple - allocate instance of simple block cipher mode + * + * Allocate an skcipher_instance for a simple block cipher mode of operation, + * e.g. cbc or ecb. The instance context will have just a single crypto_spawn, + * that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize, + * alignmask, and priority are set from the underlying cipher but can be + * overridden if needed. The tfm context defaults to skcipher_ctx_simple, and + * default ->setkey(), ->init(), and ->exit() methods are installed. + * + * @tmpl: the template being instantiated + * @tb: the template parameters + * @cipher_alg_ret: on success, a pointer to the underlying cipher algorithm is + * returned here. It must be dropped with crypto_mod_put(). + * + * Return: a pointer to the new instance, or an ERR_PTR(). The caller still + * needs to register the instance. + */ +struct skcipher_instance * +skcipher_alloc_instance_simple(struct crypto_template *tmpl, struct rtattr **tb, + struct crypto_alg **cipher_alg_ret) +{ + struct crypto_attr_type *algt; + struct crypto_alg *cipher_alg; + struct skcipher_instance *inst; + struct crypto_spawn *spawn; + u32 mask; + int err; + + algt = crypto_get_attr_type(tb); + if (IS_ERR(algt)) + return ERR_CAST(algt); + + if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) + return ERR_PTR(-EINVAL); + + mask = CRYPTO_ALG_TYPE_MASK | + crypto_requires_off(algt->type, algt->mask, + CRYPTO_ALG_NEED_FALLBACK); + + cipher_alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask); + if (IS_ERR(cipher_alg)) + return ERR_CAST(cipher_alg); + + inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); + if (!inst) { + err = -ENOMEM; + goto err_put_cipher_alg; + } + spawn = skcipher_instance_ctx(inst); + + err = crypto_inst_setname(skcipher_crypto_instance(inst), tmpl->name, + cipher_alg); + if (err) + goto err_free_inst; + + err = crypto_init_spawn(spawn, cipher_alg, + skcipher_crypto_instance(inst), + CRYPTO_ALG_TYPE_MASK); + if (err) + goto err_free_inst; + inst->free = skcipher_free_instance_simple; + + /* Default algorithm properties, can be overridden */ + inst->alg.base.cra_blocksize = cipher_alg->cra_blocksize; + inst->alg.base.cra_alignmask = cipher_alg->cra_alignmask; + inst->alg.base.cra_priority = cipher_alg->cra_priority; + inst->alg.min_keysize = cipher_alg->cra_cipher.cia_min_keysize; + inst->alg.max_keysize = cipher_alg->cra_cipher.cia_max_keysize; + inst->alg.ivsize = cipher_alg->cra_blocksize; + + /* Use skcipher_ctx_simple by default, can be overridden */ + inst->alg.base.cra_ctxsize = sizeof(struct skcipher_ctx_simple); + inst->alg.setkey = skcipher_setkey_simple; + inst->alg.init = skcipher_init_tfm_simple; + inst->alg.exit = skcipher_exit_tfm_simple; + + *cipher_alg_ret = cipher_alg; + return inst; + +err_free_inst: + kfree(inst); +err_put_cipher_alg: + crypto_mod_put(cipher_alg); + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Symmetric key cipher type"); diff --git a/crypto/streebog_generic.c b/crypto/streebog_generic.c index 03272a22afce..5a2eafed9c29 100644 --- a/crypto/streebog_generic.c +++ b/crypto/streebog_generic.c @@ -960,7 +960,7 @@ static int streebog_init(struct shash_desc *desc) memset(ctx, 0, sizeof(struct streebog_state)); for (i = 0; i < 8; i++) { if (digest_size == STREEBOG256_DIGEST_SIZE) - ctx->h.qword[i] = 0x0101010101010101ULL; + ctx->h.qword[i] = cpu_to_le64(0x0101010101010101ULL); } return 0; } diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index e7fb87e114a5..1ea2d5007ff5 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -76,8 +76,8 @@ static char *check[] = { "cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea", "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt", "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320", - "lzo", "cts", "sha3-224", "sha3-256", "sha3-384", "sha3-512", - "streebog256", "streebog512", + "lzo", "lzo-rle", "cts", "sha3-224", "sha3-256", "sha3-384", + "sha3-512", "streebog256", "streebog512", NULL }; diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 0f684a414acb..8386038d67c7 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -5,6 +5,7 @@ * Copyright (c) 2002 Jean-Francois Dive * Copyright (c) 2007 Nokia Siemens Networks * Copyright (c) 2008 Herbert Xu + * Copyright (c) 2019 Google LLC * * Updated RFC4106 AES-GCM testing. * Authors: Aidan O'Mahony (aidan.o.mahony@intel.com) @@ -26,6 +27,8 @@ #include #include #include +#include +#include #include #include #include @@ -41,6 +44,16 @@ static bool notests; module_param(notests, bool, 0644); MODULE_PARM_DESC(notests, "disable crypto self-tests"); +#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS +static bool noextratests; +module_param(noextratests, bool, 0644); +MODULE_PARM_DESC(noextratests, "disable expensive crypto self-tests"); + +static unsigned int fuzz_iterations = 100; +module_param(fuzz_iterations, uint, 0644); +MODULE_PARM_DESC(fuzz_iterations, "number of fuzz test iterations"); +#endif + #ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS /* a perfect nop */ @@ -58,18 +71,6 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask) */ #define XBUFSIZE 8 -/* - * Indexes into the xbuf to simulate cross-page access. - */ -#define IDX1 32 -#define IDX2 32400 -#define IDX3 1511 -#define IDX4 8193 -#define IDX5 22222 -#define IDX6 17101 -#define IDX7 27333 -#define IDX8 3000 - /* * Used by test_cipher() */ @@ -77,10 +78,8 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask) #define DECRYPT 0 struct aead_test_suite { - struct { - const struct aead_testvec *vecs; - unsigned int count; - } enc, dec; + const struct aead_testvec *vecs; + unsigned int count; }; struct cipher_test_suite { @@ -138,9 +137,6 @@ struct alg_test_desc { } suite; }; -static const unsigned int IDX[8] = { - IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 }; - static void hexdump(unsigned char *buf, unsigned int len) { print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, @@ -148,12 +144,12 @@ static void hexdump(unsigned char *buf, unsigned int len) buf, len, false); } -static int testmgr_alloc_buf(char *buf[XBUFSIZE]) +static int __testmgr_alloc_buf(char *buf[XBUFSIZE], int order) { int i; for (i = 0; i < XBUFSIZE; i++) { - buf[i] = (void *)__get_free_page(GFP_KERNEL); + buf[i] = (char *)__get_free_pages(GFP_KERNEL, order); if (!buf[i]) goto err_free_buf; } @@ -162,858 +158,1266 @@ static int testmgr_alloc_buf(char *buf[XBUFSIZE]) err_free_buf: while (i-- > 0) - free_page((unsigned long)buf[i]); + free_pages((unsigned long)buf[i], order); return -ENOMEM; } -static void testmgr_free_buf(char *buf[XBUFSIZE]) +static int testmgr_alloc_buf(char *buf[XBUFSIZE]) +{ + return __testmgr_alloc_buf(buf, 0); +} + +static void __testmgr_free_buf(char *buf[XBUFSIZE], int order) { int i; for (i = 0; i < XBUFSIZE; i++) - free_page((unsigned long)buf[i]); + free_pages((unsigned long)buf[i], order); } -static int ahash_guard_result(char *result, char c, int size) +static void testmgr_free_buf(char *buf[XBUFSIZE]) { - int i; + __testmgr_free_buf(buf, 0); +} - for (i = 0; i < size; i++) { - if (result[i] != c) - return -EINVAL; - } +#define TESTMGR_POISON_BYTE 0xfe +#define TESTMGR_POISON_LEN 16 - return 0; +static inline void testmgr_poison(void *addr, size_t len) +{ + memset(addr, TESTMGR_POISON_BYTE, len); } -static int ahash_partial_update(struct ahash_request **preq, - struct crypto_ahash *tfm, const struct hash_testvec *template, - void *hash_buff, int k, int temp, struct scatterlist *sg, - const char *algo, char *result, struct crypto_wait *wait) +/* Is the memory region still fully poisoned? */ +static inline bool testmgr_is_poison(const void *addr, size_t len) { - char *state; - struct ahash_request *req; - int statesize, ret = -EINVAL; - static const unsigned char guard[] = { 0x00, 0xba, 0xad, 0x00 }; - int digestsize = crypto_ahash_digestsize(tfm); - - req = *preq; - statesize = crypto_ahash_statesize( - crypto_ahash_reqtfm(req)); - state = kmalloc(statesize + sizeof(guard), GFP_KERNEL); - if (!state) { - pr_err("alg: hash: Failed to alloc state for %s\n", algo); - goto out_nostate; - } - memcpy(state + statesize, guard, sizeof(guard)); - memset(result, 1, digestsize); - ret = crypto_ahash_export(req, state); - WARN_ON(memcmp(state + statesize, guard, sizeof(guard))); - if (ret) { - pr_err("alg: hash: Failed to export() for %s\n", algo); - goto out; - } - ret = ahash_guard_result(result, 1, digestsize); - if (ret) { - pr_err("alg: hash: Failed, export used req->result for %s\n", - algo); - goto out; - } - ahash_request_free(req); - req = ahash_request_alloc(tfm, GFP_KERNEL); - if (!req) { - pr_err("alg: hash: Failed to alloc request for %s\n", algo); - goto out_noreq; - } - ahash_request_set_callback(req, - CRYPTO_TFM_REQ_MAY_BACKLOG, - crypto_req_done, wait); - - memcpy(hash_buff, template->plaintext + temp, - template->tap[k]); - sg_init_one(&sg[0], hash_buff, template->tap[k]); - ahash_request_set_crypt(req, sg, result, template->tap[k]); - ret = crypto_ahash_import(req, state); - if (ret) { - pr_err("alg: hash: Failed to import() for %s\n", algo); - goto out; - } - ret = ahash_guard_result(result, 1, digestsize); - if (ret) { - pr_err("alg: hash: Failed, import used req->result for %s\n", - algo); - goto out; - } - ret = crypto_wait_req(crypto_ahash_update(req), wait); - if (ret) - goto out; - *preq = req; - ret = 0; - goto out_noreq; -out: - ahash_request_free(req); -out_noreq: - kfree(state); -out_nostate: - return ret; + return memchr_inv(addr, TESTMGR_POISON_BYTE, len) == NULL; } -enum hash_test { - HASH_TEST_DIGEST, - HASH_TEST_FINAL, - HASH_TEST_FINUP +/* flush type for hash algorithms */ +enum flush_type { + /* merge with update of previous buffer(s) */ + FLUSH_TYPE_NONE = 0, + + /* update with previous buffer(s) before doing this one */ + FLUSH_TYPE_FLUSH, + + /* likewise, but also export and re-import the intermediate state */ + FLUSH_TYPE_REIMPORT, }; -static int __test_hash(struct crypto_ahash *tfm, - const struct hash_testvec *template, unsigned int tcount, - enum hash_test test_type, const int align_offset) -{ - const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm)); - size_t digest_size = crypto_ahash_digestsize(tfm); - unsigned int i, j, k, temp; - struct scatterlist sg[8]; - char *result; - char *key; - struct ahash_request *req; - struct crypto_wait wait; - void *hash_buff; - char *xbuf[XBUFSIZE]; - int ret = -ENOMEM; +/* finalization function for hash algorithms */ +enum finalization_type { + FINALIZATION_TYPE_FINAL, /* use final() */ + FINALIZATION_TYPE_FINUP, /* use finup() */ + FINALIZATION_TYPE_DIGEST, /* use digest() */ +}; - result = kmalloc(digest_size, GFP_KERNEL); - if (!result) - return ret; - key = kmalloc(MAX_KEYLEN, GFP_KERNEL); - if (!key) - goto out_nobuf; - if (testmgr_alloc_buf(xbuf)) - goto out_nobuf; +#define TEST_SG_TOTAL 10000 - crypto_init_wait(&wait); +/** + * struct test_sg_division - description of a scatterlist entry + * + * This struct describes one entry of a scatterlist being constructed to check a + * crypto test vector. + * + * @proportion_of_total: length of this chunk relative to the total length, + * given as a proportion out of TEST_SG_TOTAL so that it + * scales to fit any test vector + * @offset: byte offset into a 2-page buffer at which this chunk will start + * @offset_relative_to_alignmask: if true, add the algorithm's alignmask to the + * @offset + * @flush_type: for hashes, whether an update() should be done now vs. + * continuing to accumulate data + */ +struct test_sg_division { + unsigned int proportion_of_total; + unsigned int offset; + bool offset_relative_to_alignmask; + enum flush_type flush_type; +}; - req = ahash_request_alloc(tfm, GFP_KERNEL); - if (!req) { - printk(KERN_ERR "alg: hash: Failed to allocate request for " - "%s\n", algo); - goto out_noreq; +/** + * struct testvec_config - configuration for testing a crypto test vector + * + * This struct describes the data layout and other parameters with which each + * crypto test vector can be tested. + * + * @name: name of this config, logged for debugging purposes if a test fails + * @inplace: operate on the data in-place, if applicable for the algorithm type? + * @req_flags: extra request_flags, e.g. CRYPTO_TFM_REQ_MAY_SLEEP + * @src_divs: description of how to arrange the source scatterlist + * @dst_divs: description of how to arrange the dst scatterlist, if applicable + * for the algorithm type. Defaults to @src_divs if unset. + * @iv_offset: misalignment of the IV in the range [0..MAX_ALGAPI_ALIGNMASK+1], + * where 0 is aligned to a 2*(MAX_ALGAPI_ALIGNMASK+1) byte boundary + * @iv_offset_relative_to_alignmask: if true, add the algorithm's alignmask to + * the @iv_offset + * @finalization_type: what finalization function to use for hashes + */ +struct testvec_config { + const char *name; + bool inplace; + u32 req_flags; + struct test_sg_division src_divs[XBUFSIZE]; + struct test_sg_division dst_divs[XBUFSIZE]; + unsigned int iv_offset; + bool iv_offset_relative_to_alignmask; + enum finalization_type finalization_type; +}; + +#define TESTVEC_CONFIG_NAMELEN 192 + +/* + * The following are the lists of testvec_configs to test for each algorithm + * type when the basic crypto self-tests are enabled, i.e. when + * CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is unset. They aim to provide good test + * coverage, while keeping the test time much shorter than the full fuzz tests + * so that the basic tests can be enabled in a wider range of circumstances. + */ + +/* Configs for skciphers and aeads */ +static const struct testvec_config default_cipher_testvec_configs[] = { + { + .name = "in-place", + .inplace = true, + .src_divs = { { .proportion_of_total = 10000 } }, + }, { + .name = "out-of-place", + .src_divs = { { .proportion_of_total = 10000 } }, + }, { + .name = "unaligned buffer, offset=1", + .src_divs = { { .proportion_of_total = 10000, .offset = 1 } }, + .iv_offset = 1, + }, { + .name = "buffer aligned only to alignmask", + .src_divs = { + { + .proportion_of_total = 10000, + .offset = 1, + .offset_relative_to_alignmask = true, + }, + }, + .iv_offset = 1, + .iv_offset_relative_to_alignmask = true, + }, { + .name = "two even aligned splits", + .src_divs = { + { .proportion_of_total = 5000 }, + { .proportion_of_total = 5000 }, + }, + }, { + .name = "uneven misaligned splits, may sleep", + .req_flags = CRYPTO_TFM_REQ_MAY_SLEEP, + .src_divs = { + { .proportion_of_total = 1900, .offset = 33 }, + { .proportion_of_total = 3300, .offset = 7 }, + { .proportion_of_total = 4800, .offset = 18 }, + }, + .iv_offset = 3, + }, { + .name = "misaligned splits crossing pages, inplace", + .inplace = true, + .src_divs = { + { + .proportion_of_total = 7500, + .offset = PAGE_SIZE - 32 + }, { + .proportion_of_total = 2500, + .offset = PAGE_SIZE - 7 + }, + }, } - ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - crypto_req_done, &wait); +}; - j = 0; - for (i = 0; i < tcount; i++) { - if (template[i].np) - continue; +static const struct testvec_config default_hash_testvec_configs[] = { + { + .name = "init+update+final aligned buffer", + .src_divs = { { .proportion_of_total = 10000 } }, + .finalization_type = FINALIZATION_TYPE_FINAL, + }, { + .name = "init+finup aligned buffer", + .src_divs = { { .proportion_of_total = 10000 } }, + .finalization_type = FINALIZATION_TYPE_FINUP, + }, { + .name = "digest aligned buffer", + .src_divs = { { .proportion_of_total = 10000 } }, + .finalization_type = FINALIZATION_TYPE_DIGEST, + }, { + .name = "init+update+final misaligned buffer", + .src_divs = { { .proportion_of_total = 10000, .offset = 1 } }, + .finalization_type = FINALIZATION_TYPE_FINAL, + }, { + .name = "digest buffer aligned only to alignmask", + .src_divs = { + { + .proportion_of_total = 10000, + .offset = 1, + .offset_relative_to_alignmask = true, + }, + }, + .finalization_type = FINALIZATION_TYPE_DIGEST, + }, { + .name = "init+update+update+final two even splits", + .src_divs = { + { .proportion_of_total = 5000 }, + { + .proportion_of_total = 5000, + .flush_type = FLUSH_TYPE_FLUSH, + }, + }, + .finalization_type = FINALIZATION_TYPE_FINAL, + }, { + .name = "digest uneven misaligned splits, may sleep", + .req_flags = CRYPTO_TFM_REQ_MAY_SLEEP, + .src_divs = { + { .proportion_of_total = 1900, .offset = 33 }, + { .proportion_of_total = 3300, .offset = 7 }, + { .proportion_of_total = 4800, .offset = 18 }, + }, + .finalization_type = FINALIZATION_TYPE_DIGEST, + }, { + .name = "digest misaligned splits crossing pages", + .src_divs = { + { + .proportion_of_total = 7500, + .offset = PAGE_SIZE - 32, + }, { + .proportion_of_total = 2500, + .offset = PAGE_SIZE - 7, + }, + }, + .finalization_type = FINALIZATION_TYPE_DIGEST, + }, { + .name = "import/export", + .src_divs = { + { + .proportion_of_total = 6500, + .flush_type = FLUSH_TYPE_REIMPORT, + }, { + .proportion_of_total = 3500, + .flush_type = FLUSH_TYPE_REIMPORT, + }, + }, + .finalization_type = FINALIZATION_TYPE_FINAL, + } +}; - ret = -EINVAL; - if (WARN_ON(align_offset + template[i].psize > PAGE_SIZE)) - goto out; +static unsigned int count_test_sg_divisions(const struct test_sg_division *divs) +{ + unsigned int remaining = TEST_SG_TOTAL; + unsigned int ndivs = 0; - j++; - memset(result, 0, digest_size); + do { + remaining -= divs[ndivs++].proportion_of_total; + } while (remaining); - hash_buff = xbuf[0]; - hash_buff += align_offset; + return ndivs; +} - memcpy(hash_buff, template[i].plaintext, template[i].psize); - sg_init_one(&sg[0], hash_buff, template[i].psize); +static bool valid_sg_divisions(const struct test_sg_division *divs, + unsigned int count, bool *any_flushes_ret) +{ + unsigned int total = 0; + unsigned int i; - if (template[i].ksize) { - crypto_ahash_clear_flags(tfm, ~0); - if (template[i].ksize > MAX_KEYLEN) { - pr_err("alg: hash: setkey failed on test %d for %s: key size %d > %d\n", - j, algo, template[i].ksize, MAX_KEYLEN); - ret = -EINVAL; - goto out; - } - memcpy(key, template[i].key, template[i].ksize); - ret = crypto_ahash_setkey(tfm, key, template[i].ksize); - if (ret) { - printk(KERN_ERR "alg: hash: setkey failed on " - "test %d for %s: ret=%d\n", j, algo, - -ret); - goto out; - } - } + for (i = 0; i < count && total != TEST_SG_TOTAL; i++) { + if (divs[i].proportion_of_total <= 0 || + divs[i].proportion_of_total > TEST_SG_TOTAL - total) + return false; + total += divs[i].proportion_of_total; + if (divs[i].flush_type != FLUSH_TYPE_NONE) + *any_flushes_ret = true; + } + return total == TEST_SG_TOTAL && + memchr_inv(&divs[i], 0, (count - i) * sizeof(divs[0])) == NULL; +} - ahash_request_set_crypt(req, sg, result, template[i].psize); - switch (test_type) { - case HASH_TEST_DIGEST: - ret = crypto_wait_req(crypto_ahash_digest(req), &wait); - if (ret) { - pr_err("alg: hash: digest failed on test %d " - "for %s: ret=%d\n", j, algo, -ret); - goto out; - } - break; +/* + * Check whether the given testvec_config is valid. This isn't strictly needed + * since every testvec_config should be valid, but check anyway so that people + * don't unknowingly add broken configs that don't do what they wanted. + */ +static bool valid_testvec_config(const struct testvec_config *cfg) +{ + bool any_flushes = false; - case HASH_TEST_FINAL: - memset(result, 1, digest_size); - ret = crypto_wait_req(crypto_ahash_init(req), &wait); - if (ret) { - pr_err("alg: hash: init failed on test %d " - "for %s: ret=%d\n", j, algo, -ret); - goto out; - } - ret = ahash_guard_result(result, 1, digest_size); - if (ret) { - pr_err("alg: hash: init failed on test %d " - "for %s: used req->result\n", j, algo); - goto out; - } - ret = crypto_wait_req(crypto_ahash_update(req), &wait); - if (ret) { - pr_err("alg: hash: update failed on test %d " - "for %s: ret=%d\n", j, algo, -ret); - goto out; - } - ret = ahash_guard_result(result, 1, digest_size); - if (ret) { - pr_err("alg: hash: update failed on test %d " - "for %s: used req->result\n", j, algo); - goto out; - } - ret = crypto_wait_req(crypto_ahash_final(req), &wait); - if (ret) { - pr_err("alg: hash: final failed on test %d " - "for %s: ret=%d\n", j, algo, -ret); - goto out; - } - break; + if (cfg->name == NULL) + return false; - case HASH_TEST_FINUP: - memset(result, 1, digest_size); - ret = crypto_wait_req(crypto_ahash_init(req), &wait); - if (ret) { - pr_err("alg: hash: init failed on test %d " - "for %s: ret=%d\n", j, algo, -ret); - goto out; - } - ret = ahash_guard_result(result, 1, digest_size); - if (ret) { - pr_err("alg: hash: init failed on test %d " - "for %s: used req->result\n", j, algo); - goto out; - } - ret = crypto_wait_req(crypto_ahash_finup(req), &wait); - if (ret) { - pr_err("alg: hash: final failed on test %d " - "for %s: ret=%d\n", j, algo, -ret); - goto out; - } - break; - } + if (!valid_sg_divisions(cfg->src_divs, ARRAY_SIZE(cfg->src_divs), + &any_flushes)) + return false; - if (memcmp(result, template[i].digest, - crypto_ahash_digestsize(tfm))) { - printk(KERN_ERR "alg: hash: Test %d failed for %s\n", - j, algo); - hexdump(result, crypto_ahash_digestsize(tfm)); - ret = -EINVAL; - goto out; - } + if (cfg->dst_divs[0].proportion_of_total) { + if (!valid_sg_divisions(cfg->dst_divs, + ARRAY_SIZE(cfg->dst_divs), + &any_flushes)) + return false; + } else { + if (memchr_inv(cfg->dst_divs, 0, sizeof(cfg->dst_divs))) + return false; + /* defaults to dst_divs=src_divs */ } - if (test_type) - goto out; + if (cfg->iv_offset + + (cfg->iv_offset_relative_to_alignmask ? MAX_ALGAPI_ALIGNMASK : 0) > + MAX_ALGAPI_ALIGNMASK + 1) + return false; - j = 0; - for (i = 0; i < tcount; i++) { - /* alignment tests are only done with continuous buffers */ - if (align_offset != 0) - break; + if (any_flushes && cfg->finalization_type == FINALIZATION_TYPE_DIGEST) + return false; - if (!template[i].np) - continue; + return true; +} - j++; - memset(result, 0, digest_size); +struct test_sglist { + char *bufs[XBUFSIZE]; + struct scatterlist sgl[XBUFSIZE]; + struct scatterlist sgl_saved[XBUFSIZE]; + struct scatterlist *sgl_ptr; + unsigned int nents; +}; - temp = 0; - sg_init_table(sg, template[i].np); - ret = -EINVAL; - for (k = 0; k < template[i].np; k++) { - if (WARN_ON(offset_in_page(IDX[k]) + - template[i].tap[k] > PAGE_SIZE)) - goto out; - sg_set_buf(&sg[k], - memcpy(xbuf[IDX[k] >> PAGE_SHIFT] + - offset_in_page(IDX[k]), - template[i].plaintext + temp, - template[i].tap[k]), - template[i].tap[k]); - temp += template[i].tap[k]; - } - - if (template[i].ksize) { - if (template[i].ksize > MAX_KEYLEN) { - pr_err("alg: hash: setkey failed on test %d for %s: key size %d > %d\n", - j, algo, template[i].ksize, MAX_KEYLEN); - ret = -EINVAL; - goto out; - } - crypto_ahash_clear_flags(tfm, ~0); - memcpy(key, template[i].key, template[i].ksize); - ret = crypto_ahash_setkey(tfm, key, template[i].ksize); - - if (ret) { - printk(KERN_ERR "alg: hash: setkey " - "failed on chunking test %d " - "for %s: ret=%d\n", j, algo, -ret); - goto out; - } - } +static int init_test_sglist(struct test_sglist *tsgl) +{ + return __testmgr_alloc_buf(tsgl->bufs, 1 /* two pages per buffer */); +} - ahash_request_set_crypt(req, sg, result, template[i].psize); - ret = crypto_wait_req(crypto_ahash_digest(req), &wait); - if (ret) { - pr_err("alg: hash: digest failed on chunking test %d for %s: ret=%d\n", - j, algo, -ret); - goto out; - } +static void destroy_test_sglist(struct test_sglist *tsgl) +{ + return __testmgr_free_buf(tsgl->bufs, 1 /* two pages per buffer */); +} - if (memcmp(result, template[i].digest, - crypto_ahash_digestsize(tfm))) { - printk(KERN_ERR "alg: hash: Chunking test %d " - "failed for %s\n", j, algo); - hexdump(result, crypto_ahash_digestsize(tfm)); - ret = -EINVAL; - goto out; +/** + * build_test_sglist() - build a scatterlist for a crypto test + * + * @tsgl: the scatterlist to build. @tsgl->bufs[] contains an array of 2-page + * buffers which the scatterlist @tsgl->sgl[] will be made to point into. + * @divs: the layout specification on which the scatterlist will be based + * @alignmask: the algorithm's alignmask + * @total_len: the total length of the scatterlist to build in bytes + * @data: if non-NULL, the buffers will be filled with this data until it ends. + * Otherwise the buffers will be poisoned. In both cases, some bytes + * past the end of each buffer will be poisoned to help detect overruns. + * @out_divs: if non-NULL, the test_sg_division to which each scatterlist entry + * corresponds will be returned here. This will match @divs except + * that divisions resolving to a length of 0 are omitted as they are + * not included in the scatterlist. + * + * Return: 0 or a -errno value + */ +static int build_test_sglist(struct test_sglist *tsgl, + const struct test_sg_division *divs, + const unsigned int alignmask, + const unsigned int total_len, + struct iov_iter *data, + const struct test_sg_division *out_divs[XBUFSIZE]) +{ + struct { + const struct test_sg_division *div; + size_t length; + } partitions[XBUFSIZE]; + const unsigned int ndivs = count_test_sg_divisions(divs); + unsigned int len_remaining = total_len; + unsigned int i; + + BUILD_BUG_ON(ARRAY_SIZE(partitions) != ARRAY_SIZE(tsgl->sgl)); + if (WARN_ON(ndivs > ARRAY_SIZE(partitions))) + return -EINVAL; + + /* Calculate the (div, length) pairs */ + tsgl->nents = 0; + for (i = 0; i < ndivs; i++) { + unsigned int len_this_sg = + min(len_remaining, + (total_len * divs[i].proportion_of_total + + TEST_SG_TOTAL / 2) / TEST_SG_TOTAL); + + if (len_this_sg != 0) { + partitions[tsgl->nents].div = &divs[i]; + partitions[tsgl->nents].length = len_this_sg; + tsgl->nents++; + len_remaining -= len_this_sg; } } + if (tsgl->nents == 0) { + partitions[tsgl->nents].div = &divs[0]; + partitions[tsgl->nents].length = 0; + tsgl->nents++; + } + partitions[tsgl->nents - 1].length += len_remaining; - /* partial update exercise */ - j = 0; - for (i = 0; i < tcount; i++) { - /* alignment tests are only done with continuous buffers */ - if (align_offset != 0) - break; - - if (template[i].np < 2) - continue; + /* Set up the sgl entries and fill the data or poison */ + sg_init_table(tsgl->sgl, tsgl->nents); + for (i = 0; i < tsgl->nents; i++) { + unsigned int offset = partitions[i].div->offset; + void *addr; - j++; - memset(result, 0, digest_size); + if (partitions[i].div->offset_relative_to_alignmask) + offset += alignmask; - ret = -EINVAL; - hash_buff = xbuf[0]; - memcpy(hash_buff, template[i].plaintext, - template[i].tap[0]); - sg_init_one(&sg[0], hash_buff, template[i].tap[0]); - - if (template[i].ksize) { - crypto_ahash_clear_flags(tfm, ~0); - if (template[i].ksize > MAX_KEYLEN) { - pr_err("alg: hash: setkey failed on test %d for %s: key size %d > %d\n", - j, algo, template[i].ksize, MAX_KEYLEN); - ret = -EINVAL; - goto out; - } - memcpy(key, template[i].key, template[i].ksize); - ret = crypto_ahash_setkey(tfm, key, template[i].ksize); - if (ret) { - pr_err("alg: hash: setkey failed on test %d for %s: ret=%d\n", - j, algo, -ret); - goto out; - } + while (offset + partitions[i].length + TESTMGR_POISON_LEN > + 2 * PAGE_SIZE) { + if (WARN_ON(offset <= 0)) + return -EINVAL; + offset /= 2; } - ahash_request_set_crypt(req, sg, result, template[i].tap[0]); - ret = crypto_wait_req(crypto_ahash_init(req), &wait); - if (ret) { - pr_err("alg: hash: init failed on test %d for %s: ret=%d\n", - j, algo, -ret); - goto out; - } - ret = crypto_wait_req(crypto_ahash_update(req), &wait); - if (ret) { - pr_err("alg: hash: update failed on test %d for %s: ret=%d\n", - j, algo, -ret); - goto out; - } + addr = &tsgl->bufs[i][offset]; + sg_set_buf(&tsgl->sgl[i], addr, partitions[i].length); - temp = template[i].tap[0]; - for (k = 1; k < template[i].np; k++) { - ret = ahash_partial_update(&req, tfm, &template[i], - hash_buff, k, temp, &sg[0], algo, result, - &wait); - if (ret) { - pr_err("alg: hash: partial update failed on test %d for %s: ret=%d\n", - j, algo, -ret); - goto out_noreq; - } - temp += template[i].tap[k]; - } - ret = crypto_wait_req(crypto_ahash_final(req), &wait); - if (ret) { - pr_err("alg: hash: final failed on test %d for %s: ret=%d\n", - j, algo, -ret); - goto out; - } - if (memcmp(result, template[i].digest, - crypto_ahash_digestsize(tfm))) { - pr_err("alg: hash: Partial Test %d failed for %s\n", - j, algo); - hexdump(result, crypto_ahash_digestsize(tfm)); - ret = -EINVAL; - goto out; + if (out_divs) + out_divs[i] = partitions[i].div; + + if (data) { + size_t copy_len, copied; + + copy_len = min(partitions[i].length, data->count); + copied = copy_from_iter(addr, copy_len, data); + if (WARN_ON(copied != copy_len)) + return -EINVAL; + testmgr_poison(addr + copy_len, partitions[i].length + + TESTMGR_POISON_LEN - copy_len); + } else { + testmgr_poison(addr, partitions[i].length + + TESTMGR_POISON_LEN); } } - ret = 0; - -out: - ahash_request_free(req); -out_noreq: - testmgr_free_buf(xbuf); -out_nobuf: - kfree(key); - kfree(result); - return ret; + sg_mark_end(&tsgl->sgl[tsgl->nents - 1]); + tsgl->sgl_ptr = tsgl->sgl; + memcpy(tsgl->sgl_saved, tsgl->sgl, tsgl->nents * sizeof(tsgl->sgl[0])); + return 0; } -static int test_hash(struct crypto_ahash *tfm, - const struct hash_testvec *template, - unsigned int tcount, enum hash_test test_type) +/* + * Verify that a scatterlist crypto operation produced the correct output. + * + * @tsgl: scatterlist containing the actual output + * @expected_output: buffer containing the expected output + * @len_to_check: length of @expected_output in bytes + * @unchecked_prefix_len: number of ignored bytes in @tsgl prior to real result + * @check_poison: verify that the poison bytes after each chunk are intact? + * + * Return: 0 if correct, -EINVAL if incorrect, -EOVERFLOW if buffer overrun. + */ +static int verify_correct_output(const struct test_sglist *tsgl, + const char *expected_output, + unsigned int len_to_check, + unsigned int unchecked_prefix_len, + bool check_poison) { - unsigned int alignmask; - int ret; + unsigned int i; - ret = __test_hash(tfm, template, tcount, test_type, 0); - if (ret) - return ret; + for (i = 0; i < tsgl->nents; i++) { + struct scatterlist *sg = &tsgl->sgl_ptr[i]; + unsigned int len = sg->length; + unsigned int offset = sg->offset; + const char *actual_output; - /* test unaligned buffers, check with one byte offset */ - ret = __test_hash(tfm, template, tcount, test_type, 1); - if (ret) - return ret; + if (unchecked_prefix_len) { + if (unchecked_prefix_len >= len) { + unchecked_prefix_len -= len; + continue; + } + offset += unchecked_prefix_len; + len -= unchecked_prefix_len; + unchecked_prefix_len = 0; + } + len = min(len, len_to_check); + actual_output = page_address(sg_page(sg)) + offset; + if (memcmp(expected_output, actual_output, len) != 0) + return -EINVAL; + if (check_poison && + !testmgr_is_poison(actual_output + len, TESTMGR_POISON_LEN)) + return -EOVERFLOW; + len_to_check -= len; + expected_output += len; + } + if (WARN_ON(len_to_check != 0)) + return -EINVAL; + return 0; +} - alignmask = crypto_tfm_alg_alignmask(&tfm->base); - if (alignmask) { - /* Check if alignment mask for tfm is correctly set. */ - ret = __test_hash(tfm, template, tcount, test_type, - alignmask + 1); - if (ret) - return ret; - } +static bool is_test_sglist_corrupted(const struct test_sglist *tsgl) +{ + unsigned int i; - return 0; + for (i = 0; i < tsgl->nents; i++) { + if (tsgl->sgl[i].page_link != tsgl->sgl_saved[i].page_link) + return true; + if (tsgl->sgl[i].offset != tsgl->sgl_saved[i].offset) + return true; + if (tsgl->sgl[i].length != tsgl->sgl_saved[i].length) + return true; + } + return false; } -static int __test_aead(struct crypto_aead *tfm, int enc, - const struct aead_testvec *template, unsigned int tcount, - const bool diff_dst, const int align_offset) +struct cipher_test_sglists { + struct test_sglist src; + struct test_sglist dst; +}; + +static struct cipher_test_sglists *alloc_cipher_test_sglists(void) { - const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm)); - unsigned int i, j, k, n, temp; - int ret = -ENOMEM; - char *q; - char *key; - struct aead_request *req; - struct scatterlist *sg; - struct scatterlist *sgout; - const char *e, *d; - struct crypto_wait wait; - unsigned int authsize, iv_len; - void *input; - void *output; - void *assoc; - char *iv; - char *xbuf[XBUFSIZE]; - char *xoutbuf[XBUFSIZE]; - char *axbuf[XBUFSIZE]; + struct cipher_test_sglists *tsgls; - iv = kzalloc(MAX_IVLEN, GFP_KERNEL); - if (!iv) - return ret; - key = kmalloc(MAX_KEYLEN, GFP_KERNEL); - if (!key) - goto out_noxbuf; - if (testmgr_alloc_buf(xbuf)) - goto out_noxbuf; - if (testmgr_alloc_buf(axbuf)) - goto out_noaxbuf; - if (diff_dst && testmgr_alloc_buf(xoutbuf)) - goto out_nooutbuf; - - /* avoid "the frame size is larger than 1024 bytes" compiler warning */ - sg = kmalloc(array3_size(sizeof(*sg), 8, (diff_dst ? 4 : 2)), - GFP_KERNEL); - if (!sg) - goto out_nosg; - sgout = &sg[16]; - - if (diff_dst) - d = "-ddst"; - else - d = ""; + tsgls = kmalloc(sizeof(*tsgls), GFP_KERNEL); + if (!tsgls) + return NULL; - if (enc == ENCRYPT) - e = "encryption"; - else - e = "decryption"; + if (init_test_sglist(&tsgls->src) != 0) + goto fail_kfree; + if (init_test_sglist(&tsgls->dst) != 0) + goto fail_destroy_src; - crypto_init_wait(&wait); + return tsgls; - req = aead_request_alloc(tfm, GFP_KERNEL); - if (!req) { - pr_err("alg: aead%s: Failed to allocate request for %s\n", - d, algo); - goto out; - } +fail_destroy_src: + destroy_test_sglist(&tsgls->src); +fail_kfree: + kfree(tsgls); + return NULL; +} - aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - crypto_req_done, &wait); +static void free_cipher_test_sglists(struct cipher_test_sglists *tsgls) +{ + if (tsgls) { + destroy_test_sglist(&tsgls->src); + destroy_test_sglist(&tsgls->dst); + kfree(tsgls); + } +} - iv_len = crypto_aead_ivsize(tfm); +/* Build the src and dst scatterlists for an skcipher or AEAD test */ +static int build_cipher_test_sglists(struct cipher_test_sglists *tsgls, + const struct testvec_config *cfg, + unsigned int alignmask, + unsigned int src_total_len, + unsigned int dst_total_len, + const struct kvec *inputs, + unsigned int nr_inputs) +{ + struct iov_iter input; + int err; - for (i = 0, j = 0; i < tcount; i++) { - if (template[i].np) - continue; + iov_iter_kvec(&input, WRITE, inputs, nr_inputs, src_total_len); + err = build_test_sglist(&tsgls->src, cfg->src_divs, alignmask, + cfg->inplace ? + max(dst_total_len, src_total_len) : + src_total_len, + &input, NULL); + if (err) + return err; - j++; + if (cfg->inplace) { + tsgls->dst.sgl_ptr = tsgls->src.sgl; + tsgls->dst.nents = tsgls->src.nents; + return 0; + } + return build_test_sglist(&tsgls->dst, + cfg->dst_divs[0].proportion_of_total ? + cfg->dst_divs : cfg->src_divs, + alignmask, dst_total_len, NULL, NULL); +} - /* some templates have no input data but they will - * touch input - */ - input = xbuf[0]; - input += align_offset; - assoc = axbuf[0]; +#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS +static char *generate_random_sgl_divisions(struct test_sg_division *divs, + size_t max_divs, char *p, char *end, + bool gen_flushes) +{ + struct test_sg_division *div = divs; + unsigned int remaining = TEST_SG_TOTAL; - ret = -EINVAL; - if (WARN_ON(align_offset + template[i].ilen > - PAGE_SIZE || template[i].alen > PAGE_SIZE)) - goto out; + do { + unsigned int this_len; - memcpy(input, template[i].input, template[i].ilen); - memcpy(assoc, template[i].assoc, template[i].alen); - if (template[i].iv) - memcpy(iv, template[i].iv, iv_len); + if (div == &divs[max_divs - 1] || prandom_u32() % 2 == 0) + this_len = remaining; else - memset(iv, 0, iv_len); + this_len = 1 + (prandom_u32() % remaining); + div->proportion_of_total = this_len; - crypto_aead_clear_flags(tfm, ~0); - if (template[i].wk) - crypto_aead_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY); - - if (template[i].klen > MAX_KEYLEN) { - pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n", - d, j, algo, template[i].klen, - MAX_KEYLEN); - ret = -EINVAL; - goto out; + if (prandom_u32() % 4 == 0) + div->offset = (PAGE_SIZE - 128) + (prandom_u32() % 128); + else if (prandom_u32() % 2 == 0) + div->offset = prandom_u32() % 32; + else + div->offset = prandom_u32() % PAGE_SIZE; + if (prandom_u32() % 8 == 0) + div->offset_relative_to_alignmask = true; + + div->flush_type = FLUSH_TYPE_NONE; + if (gen_flushes) { + switch (prandom_u32() % 4) { + case 0: + div->flush_type = FLUSH_TYPE_REIMPORT; + break; + case 1: + div->flush_type = FLUSH_TYPE_FLUSH; + break; + } } - memcpy(key, template[i].key, template[i].klen); - ret = crypto_aead_setkey(tfm, key, template[i].klen); - if (template[i].fail == !ret) { - pr_err("alg: aead%s: setkey failed on test %d for %s: flags=%x\n", - d, j, algo, crypto_aead_get_flags(tfm)); - goto out; - } else if (ret) - continue; + BUILD_BUG_ON(TEST_SG_TOTAL != 10000); /* for "%u.%u%%" */ + p += scnprintf(p, end - p, "%s%u.%u%%@%s+%u%s", + div->flush_type == FLUSH_TYPE_NONE ? "" : + div->flush_type == FLUSH_TYPE_FLUSH ? + " " : " ", + this_len / 100, this_len % 100, + div->offset_relative_to_alignmask ? + "alignmask" : "", + div->offset, this_len == remaining ? "" : ", "); + remaining -= this_len; + div++; + } while (remaining); + + return p; +} - authsize = abs(template[i].rlen - template[i].ilen); - ret = crypto_aead_setauthsize(tfm, authsize); - if (ret) { - pr_err("alg: aead%s: Failed to set authsize to %u on test %d for %s\n", - d, authsize, j, algo); - goto out; - } +/* Generate a random testvec_config for fuzz testing */ +static void generate_random_testvec_config(struct testvec_config *cfg, + char *name, size_t max_namelen) +{ + char *p = name; + char * const end = name + max_namelen; - k = !!template[i].alen; - sg_init_table(sg, k + 1); - sg_set_buf(&sg[0], assoc, template[i].alen); - sg_set_buf(&sg[k], input, - template[i].ilen + (enc ? authsize : 0)); - output = input; + memset(cfg, 0, sizeof(*cfg)); - if (diff_dst) { - sg_init_table(sgout, k + 1); - sg_set_buf(&sgout[0], assoc, template[i].alen); + cfg->name = name; - output = xoutbuf[0]; - output += align_offset; - sg_set_buf(&sgout[k], output, - template[i].rlen + (enc ? 0 : authsize)); - } + p += scnprintf(p, end - p, "random:"); - aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg, - template[i].ilen, iv); + if (prandom_u32() % 2 == 0) { + cfg->inplace = true; + p += scnprintf(p, end - p, " inplace"); + } - aead_request_set_ad(req, template[i].alen); + if (prandom_u32() % 2 == 0) { + cfg->req_flags |= CRYPTO_TFM_REQ_MAY_SLEEP; + p += scnprintf(p, end - p, " may_sleep"); + } - ret = crypto_wait_req(enc ? crypto_aead_encrypt(req) - : crypto_aead_decrypt(req), &wait); + switch (prandom_u32() % 4) { + case 0: + cfg->finalization_type = FINALIZATION_TYPE_FINAL; + p += scnprintf(p, end - p, " use_final"); + break; + case 1: + cfg->finalization_type = FINALIZATION_TYPE_FINUP; + p += scnprintf(p, end - p, " use_finup"); + break; + default: + cfg->finalization_type = FINALIZATION_TYPE_DIGEST; + p += scnprintf(p, end - p, " use_digest"); + break; + } - switch (ret) { - case 0: - if (template[i].novrfy) { - /* verification was supposed to fail */ - pr_err("alg: aead%s: %s failed on test %d for %s: ret was 0, expected -EBADMSG\n", - d, e, j, algo); - /* so really, we got a bad message */ - ret = -EBADMSG; - goto out; - } - break; - case -EBADMSG: - if (template[i].novrfy) - /* verification failure was expected */ - continue; - /* fall through */ - default: - pr_err("alg: aead%s: %s failed on test %d for %s: ret=%d\n", - d, e, j, algo, -ret); - goto out; - } + p += scnprintf(p, end - p, " src_divs=["); + p = generate_random_sgl_divisions(cfg->src_divs, + ARRAY_SIZE(cfg->src_divs), p, end, + (cfg->finalization_type != + FINALIZATION_TYPE_DIGEST)); + p += scnprintf(p, end - p, "]"); - q = output; - if (memcmp(q, template[i].result, template[i].rlen)) { - pr_err("alg: aead%s: Test %d failed on %s for %s\n", - d, j, e, algo); - hexdump(q, template[i].rlen); - ret = -EINVAL; - goto out; - } + if (!cfg->inplace && prandom_u32() % 2 == 0) { + p += scnprintf(p, end - p, " dst_divs=["); + p = generate_random_sgl_divisions(cfg->dst_divs, + ARRAY_SIZE(cfg->dst_divs), + p, end, false); + p += scnprintf(p, end - p, "]"); } - for (i = 0, j = 0; i < tcount; i++) { - /* alignment tests are only done with continuous buffers */ - if (align_offset != 0) - break; + if (prandom_u32() % 2 == 0) { + cfg->iv_offset = 1 + (prandom_u32() % MAX_ALGAPI_ALIGNMASK); + p += scnprintf(p, end - p, " iv_offset=%u", cfg->iv_offset); + } - if (!template[i].np) - continue; + WARN_ON_ONCE(!valid_testvec_config(cfg)); +} +#endif /* CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ - j++; +static int check_nonfinal_hash_op(const char *op, int err, + u8 *result, unsigned int digestsize, + const char *driver, unsigned int vec_num, + const struct testvec_config *cfg) +{ + if (err) { + pr_err("alg: hash: %s %s() failed with err %d on test vector %u, cfg=\"%s\"\n", + driver, op, err, vec_num, cfg->name); + return err; + } + if (!testmgr_is_poison(result, digestsize)) { + pr_err("alg: hash: %s %s() used result buffer on test vector %u, cfg=\"%s\"\n", + driver, op, vec_num, cfg->name); + return -EINVAL; + } + return 0; +} - if (template[i].iv) - memcpy(iv, template[i].iv, iv_len); - else - memset(iv, 0, MAX_IVLEN); +static int test_hash_vec_cfg(const char *driver, + const struct hash_testvec *vec, + unsigned int vec_num, + const struct testvec_config *cfg, + struct ahash_request *req, + struct test_sglist *tsgl, + u8 *hashstate) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + const unsigned int alignmask = crypto_ahash_alignmask(tfm); + const unsigned int digestsize = crypto_ahash_digestsize(tfm); + const unsigned int statesize = crypto_ahash_statesize(tfm); + const u32 req_flags = CRYPTO_TFM_REQ_MAY_BACKLOG | cfg->req_flags; + const struct test_sg_division *divs[XBUFSIZE]; + DECLARE_CRYPTO_WAIT(wait); + struct kvec _input; + struct iov_iter input; + unsigned int i; + struct scatterlist *pending_sgl; + unsigned int pending_len; + u8 result[HASH_MAX_DIGESTSIZE + TESTMGR_POISON_LEN]; + int err; - crypto_aead_clear_flags(tfm, ~0); - if (template[i].wk) - crypto_aead_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY); - if (template[i].klen > MAX_KEYLEN) { - pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n", - d, j, algo, template[i].klen, MAX_KEYLEN); - ret = -EINVAL; - goto out; + /* Set the key, if specified */ + if (vec->ksize) { + err = crypto_ahash_setkey(tfm, vec->key, vec->ksize); + if (err) { + pr_err("alg: hash: %s setkey failed with err %d on test vector %u; flags=%#x\n", + driver, err, vec_num, + crypto_ahash_get_flags(tfm)); + return err; } - memcpy(key, template[i].key, template[i].klen); - - ret = crypto_aead_setkey(tfm, key, template[i].klen); - if (template[i].fail == !ret) { - pr_err("alg: aead%s: setkey failed on chunk test %d for %s: flags=%x\n", - d, j, algo, crypto_aead_get_flags(tfm)); - goto out; - } else if (ret) - continue; + } - authsize = abs(template[i].rlen - template[i].ilen); + /* Build the scatterlist for the source data */ + _input.iov_base = (void *)vec->plaintext; + _input.iov_len = vec->psize; + iov_iter_kvec(&input, WRITE, &_input, 1, vec->psize); + err = build_test_sglist(tsgl, cfg->src_divs, alignmask, vec->psize, + &input, divs); + if (err) { + pr_err("alg: hash: %s: error preparing scatterlist for test vector %u, cfg=\"%s\"\n", + driver, vec_num, cfg->name); + return err; + } - ret = -EINVAL; - sg_init_table(sg, template[i].anp + template[i].np); - if (diff_dst) - sg_init_table(sgout, template[i].anp + template[i].np); + /* Do the actual hashing */ - ret = -EINVAL; - for (k = 0, temp = 0; k < template[i].anp; k++) { - if (WARN_ON(offset_in_page(IDX[k]) + - template[i].atap[k] > PAGE_SIZE)) - goto out; - sg_set_buf(&sg[k], - memcpy(axbuf[IDX[k] >> PAGE_SHIFT] + - offset_in_page(IDX[k]), - template[i].assoc + temp, - template[i].atap[k]), - template[i].atap[k]); - if (diff_dst) - sg_set_buf(&sgout[k], - axbuf[IDX[k] >> PAGE_SHIFT] + - offset_in_page(IDX[k]), - template[i].atap[k]); - temp += template[i].atap[k]; - } - - for (k = 0, temp = 0; k < template[i].np; k++) { - if (WARN_ON(offset_in_page(IDX[k]) + - template[i].tap[k] > PAGE_SIZE)) - goto out; + testmgr_poison(req->__ctx, crypto_ahash_reqsize(tfm)); + testmgr_poison(result, digestsize + TESTMGR_POISON_LEN); - q = xbuf[IDX[k] >> PAGE_SHIFT] + offset_in_page(IDX[k]); - memcpy(q, template[i].input + temp, template[i].tap[k]); - sg_set_buf(&sg[template[i].anp + k], - q, template[i].tap[k]); + if (cfg->finalization_type == FINALIZATION_TYPE_DIGEST) { + /* Just using digest() */ + ahash_request_set_callback(req, req_flags, crypto_req_done, + &wait); + ahash_request_set_crypt(req, tsgl->sgl, result, vec->psize); + err = crypto_wait_req(crypto_ahash_digest(req), &wait); + if (err) { + pr_err("alg: hash: %s digest() failed with err %d on test vector %u, cfg=\"%s\"\n", + driver, err, vec_num, cfg->name); + return err; + } + goto result_ready; + } - if (diff_dst) { - q = xoutbuf[IDX[k] >> PAGE_SHIFT] + - offset_in_page(IDX[k]); + /* Using init(), zero or more update(), then final() or finup() */ - memset(q, 0, template[i].tap[k]); + ahash_request_set_callback(req, req_flags, crypto_req_done, &wait); + ahash_request_set_crypt(req, NULL, result, 0); + err = crypto_wait_req(crypto_ahash_init(req), &wait); + err = check_nonfinal_hash_op("init", err, result, digestsize, + driver, vec_num, cfg); + if (err) + return err; - sg_set_buf(&sgout[template[i].anp + k], - q, template[i].tap[k]); + pending_sgl = NULL; + pending_len = 0; + for (i = 0; i < tsgl->nents; i++) { + if (divs[i]->flush_type != FLUSH_TYPE_NONE && + pending_sgl != NULL) { + /* update() with the pending data */ + ahash_request_set_callback(req, req_flags, + crypto_req_done, &wait); + ahash_request_set_crypt(req, pending_sgl, result, + pending_len); + err = crypto_wait_req(crypto_ahash_update(req), &wait); + err = check_nonfinal_hash_op("update", err, + result, digestsize, + driver, vec_num, cfg); + if (err) + return err; + pending_sgl = NULL; + pending_len = 0; + } + if (divs[i]->flush_type == FLUSH_TYPE_REIMPORT) { + /* Test ->export() and ->import() */ + testmgr_poison(hashstate + statesize, + TESTMGR_POISON_LEN); + err = crypto_ahash_export(req, hashstate); + err = check_nonfinal_hash_op("export", err, + result, digestsize, + driver, vec_num, cfg); + if (err) + return err; + if (!testmgr_is_poison(hashstate + statesize, + TESTMGR_POISON_LEN)) { + pr_err("alg: hash: %s export() overran state buffer on test vector %u, cfg=\"%s\"\n", + driver, vec_num, cfg->name); + return -EOVERFLOW; } - n = template[i].tap[k]; - if (k == template[i].np - 1 && enc) - n += authsize; - if (offset_in_page(q) + n < PAGE_SIZE) - q[n] = 0; - - temp += template[i].tap[k]; + testmgr_poison(req->__ctx, crypto_ahash_reqsize(tfm)); + err = crypto_ahash_import(req, hashstate); + err = check_nonfinal_hash_op("import", err, + result, digestsize, + driver, vec_num, cfg); + if (err) + return err; + } + if (pending_sgl == NULL) + pending_sgl = &tsgl->sgl[i]; + pending_len += tsgl->sgl[i].length; + } + + ahash_request_set_callback(req, req_flags, crypto_req_done, &wait); + ahash_request_set_crypt(req, pending_sgl, result, pending_len); + if (cfg->finalization_type == FINALIZATION_TYPE_FINAL) { + /* finish with update() and final() */ + err = crypto_wait_req(crypto_ahash_update(req), &wait); + err = check_nonfinal_hash_op("update", err, result, digestsize, + driver, vec_num, cfg); + if (err) + return err; + err = crypto_wait_req(crypto_ahash_final(req), &wait); + if (err) { + pr_err("alg: hash: %s final() failed with err %d on test vector %u, cfg=\"%s\"\n", + driver, err, vec_num, cfg->name); + return err; } - - ret = crypto_aead_setauthsize(tfm, authsize); - if (ret) { - pr_err("alg: aead%s: Failed to set authsize to %u on chunk test %d for %s\n", - d, authsize, j, algo); - goto out; + } else { + /* finish with finup() */ + err = crypto_wait_req(crypto_ahash_finup(req), &wait); + if (err) { + pr_err("alg: hash: %s finup() failed with err %d on test vector %u, cfg=\"%s\"\n", + driver, err, vec_num, cfg->name); + return err; } + } - if (enc) { - if (WARN_ON(sg[template[i].anp + k - 1].offset + - sg[template[i].anp + k - 1].length + - authsize > PAGE_SIZE)) { - ret = -EINVAL; - goto out; - } +result_ready: + /* Check that the algorithm produced the correct digest */ + if (memcmp(result, vec->digest, digestsize) != 0) { + pr_err("alg: hash: %s test failed (wrong result) on test vector %u, cfg=\"%s\"\n", + driver, vec_num, cfg->name); + return -EINVAL; + } + if (!testmgr_is_poison(&result[digestsize], TESTMGR_POISON_LEN)) { + pr_err("alg: hash: %s overran result buffer on test vector %u, cfg=\"%s\"\n", + driver, vec_num, cfg->name); + return -EOVERFLOW; + } + + return 0; +} + +static int test_hash_vec(const char *driver, const struct hash_testvec *vec, + unsigned int vec_num, struct ahash_request *req, + struct test_sglist *tsgl, u8 *hashstate) +{ + unsigned int i; + int err; + + for (i = 0; i < ARRAY_SIZE(default_hash_testvec_configs); i++) { + err = test_hash_vec_cfg(driver, vec, vec_num, + &default_hash_testvec_configs[i], + req, tsgl, hashstate); + if (err) + return err; + } + +#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS + if (!noextratests) { + struct testvec_config cfg; + char cfgname[TESTVEC_CONFIG_NAMELEN]; - if (diff_dst) - sgout[template[i].anp + k - 1].length += - authsize; - sg[template[i].anp + k - 1].length += authsize; + for (i = 0; i < fuzz_iterations; i++) { + generate_random_testvec_config(&cfg, cfgname, + sizeof(cfgname)); + err = test_hash_vec_cfg(driver, vec, vec_num, &cfg, + req, tsgl, hashstate); + if (err) + return err; } + } +#endif + return 0; +} - aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg, - template[i].ilen, - iv); +static int __alg_test_hash(const struct hash_testvec *vecs, + unsigned int num_vecs, const char *driver, + u32 type, u32 mask) +{ + struct crypto_ahash *tfm; + struct ahash_request *req = NULL; + struct test_sglist *tsgl = NULL; + u8 *hashstate = NULL; + unsigned int i; + int err; - aead_request_set_ad(req, template[i].alen); + tfm = crypto_alloc_ahash(driver, type, mask); + if (IS_ERR(tfm)) { + pr_err("alg: hash: failed to allocate transform for %s: %ld\n", + driver, PTR_ERR(tfm)); + return PTR_ERR(tfm); + } - ret = crypto_wait_req(enc ? crypto_aead_encrypt(req) - : crypto_aead_decrypt(req), &wait); + req = ahash_request_alloc(tfm, GFP_KERNEL); + if (!req) { + pr_err("alg: hash: failed to allocate request for %s\n", + driver); + err = -ENOMEM; + goto out; + } - switch (ret) { - case 0: - if (template[i].novrfy) { - /* verification was supposed to fail */ - pr_err("alg: aead%s: %s failed on chunk test %d for %s: ret was 0, expected -EBADMSG\n", - d, e, j, algo); - /* so really, we got a bad message */ - ret = -EBADMSG; - goto out; - } - break; - case -EBADMSG: - if (template[i].novrfy) - /* verification failure was expected */ - continue; - /* fall through */ - default: - pr_err("alg: aead%s: %s failed on chunk test %d for %s: ret=%d\n", - d, e, j, algo, -ret); + tsgl = kmalloc(sizeof(*tsgl), GFP_KERNEL); + if (!tsgl || init_test_sglist(tsgl) != 0) { + pr_err("alg: hash: failed to allocate test buffers for %s\n", + driver); + kfree(tsgl); + tsgl = NULL; + err = -ENOMEM; + goto out; + } + + hashstate = kmalloc(crypto_ahash_statesize(tfm) + TESTMGR_POISON_LEN, + GFP_KERNEL); + if (!hashstate) { + pr_err("alg: hash: failed to allocate hash state buffer for %s\n", + driver); + err = -ENOMEM; + goto out; + } + + for (i = 0; i < num_vecs; i++) { + err = test_hash_vec(driver, &vecs[i], i, req, tsgl, hashstate); + if (err) goto out; + } + err = 0; +out: + kfree(hashstate); + if (tsgl) { + destroy_test_sglist(tsgl); + kfree(tsgl); + } + ahash_request_free(req); + crypto_free_ahash(tfm); + return err; +} + +static int alg_test_hash(const struct alg_test_desc *desc, const char *driver, + u32 type, u32 mask) +{ + const struct hash_testvec *template = desc->suite.hash.vecs; + unsigned int tcount = desc->suite.hash.count; + unsigned int nr_unkeyed, nr_keyed; + int err; + + /* + * For OPTIONAL_KEY algorithms, we have to do all the unkeyed tests + * first, before setting a key on the tfm. To make this easier, we + * require that the unkeyed test vectors (if any) are listed first. + */ + + for (nr_unkeyed = 0; nr_unkeyed < tcount; nr_unkeyed++) { + if (template[nr_unkeyed].ksize) + break; + } + for (nr_keyed = 0; nr_unkeyed + nr_keyed < tcount; nr_keyed++) { + if (!template[nr_unkeyed + nr_keyed].ksize) { + pr_err("alg: hash: test vectors for %s out of order, " + "unkeyed ones must come first\n", desc->alg); + return -EINVAL; } + } - ret = -EINVAL; - for (k = 0, temp = 0; k < template[i].np; k++) { - if (diff_dst) - q = xoutbuf[IDX[k] >> PAGE_SHIFT] + - offset_in_page(IDX[k]); - else - q = xbuf[IDX[k] >> PAGE_SHIFT] + - offset_in_page(IDX[k]); + err = 0; + if (nr_unkeyed) { + err = __alg_test_hash(template, nr_unkeyed, driver, type, mask); + template += nr_unkeyed; + } - n = template[i].tap[k]; - if (k == template[i].np - 1) - n += enc ? authsize : -authsize; + if (!err && nr_keyed) + err = __alg_test_hash(template, nr_keyed, driver, type, mask); - if (memcmp(q, template[i].result + temp, n)) { - pr_err("alg: aead%s: Chunk test %d failed on %s at page %u for %s\n", - d, j, e, k, algo); - hexdump(q, n); - goto out; - } + return err; +} - q += n; - if (k == template[i].np - 1 && !enc) { - if (!diff_dst && - memcmp(q, template[i].input + - temp + n, authsize)) - n = authsize; - else - n = 0; - } else { - for (n = 0; offset_in_page(q + n) && q[n]; n++) - ; - } - if (n) { - pr_err("alg: aead%s: Result buffer corruption in chunk test %d on %s at page %u for %s: %u bytes:\n", - d, j, e, k, algo, n); - hexdump(q, n); - goto out; - } +static int test_aead_vec_cfg(const char *driver, int enc, + const struct aead_testvec *vec, + unsigned int vec_num, + const struct testvec_config *cfg, + struct aead_request *req, + struct cipher_test_sglists *tsgls) +{ + struct crypto_aead *tfm = crypto_aead_reqtfm(req); + const unsigned int alignmask = crypto_aead_alignmask(tfm); + const unsigned int ivsize = crypto_aead_ivsize(tfm); + const unsigned int authsize = vec->clen - vec->plen; + const u32 req_flags = CRYPTO_TFM_REQ_MAY_BACKLOG | cfg->req_flags; + const char *op = enc ? "encryption" : "decryption"; + DECLARE_CRYPTO_WAIT(wait); + u8 _iv[3 * (MAX_ALGAPI_ALIGNMASK + 1) + MAX_IVLEN]; + u8 *iv = PTR_ALIGN(&_iv[0], 2 * (MAX_ALGAPI_ALIGNMASK + 1)) + + cfg->iv_offset + + (cfg->iv_offset_relative_to_alignmask ? alignmask : 0); + struct kvec input[2]; + int err; - temp += template[i].tap[k]; - } + /* Set the key */ + if (vec->wk) + crypto_aead_set_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS); + else + crypto_aead_clear_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS); + err = crypto_aead_setkey(tfm, vec->key, vec->klen); + if (err) { + if (vec->fail) /* expectedly failed to set key? */ + return 0; + pr_err("alg: aead: %s setkey failed with err %d on test vector %u; flags=%#x\n", + driver, err, vec_num, crypto_aead_get_flags(tfm)); + return err; + } + if (vec->fail) { + pr_err("alg: aead: %s setkey unexpectedly succeeded on test vector %u\n", + driver, vec_num); + return -EINVAL; } - ret = 0; + /* Set the authentication tag size */ + err = crypto_aead_setauthsize(tfm, authsize); + if (err) { + pr_err("alg: aead: %s setauthsize failed with err %d on test vector %u\n", + driver, err, vec_num); + return err; + } -out: - aead_request_free(req); - kfree(sg); -out_nosg: - if (diff_dst) - testmgr_free_buf(xoutbuf); -out_nooutbuf: - testmgr_free_buf(axbuf); -out_noaxbuf: - testmgr_free_buf(xbuf); -out_noxbuf: - kfree(key); - kfree(iv); - return ret; + /* The IV must be copied to a buffer, as the algorithm may modify it */ + if (WARN_ON(ivsize > MAX_IVLEN)) + return -EINVAL; + if (vec->iv) + memcpy(iv, vec->iv, ivsize); + else + memset(iv, 0, ivsize); + + /* Build the src/dst scatterlists */ + input[0].iov_base = (void *)vec->assoc; + input[0].iov_len = vec->alen; + input[1].iov_base = enc ? (void *)vec->ptext : (void *)vec->ctext; + input[1].iov_len = enc ? vec->plen : vec->clen; + err = build_cipher_test_sglists(tsgls, cfg, alignmask, + vec->alen + (enc ? vec->plen : + vec->clen), + vec->alen + (enc ? vec->clen : + vec->plen), + input, 2); + if (err) { + pr_err("alg: aead: %s %s: error preparing scatterlists for test vector %u, cfg=\"%s\"\n", + driver, op, vec_num, cfg->name); + return err; + } + + /* Do the actual encryption or decryption */ + testmgr_poison(req->__ctx, crypto_aead_reqsize(tfm)); + aead_request_set_callback(req, req_flags, crypto_req_done, &wait); + aead_request_set_crypt(req, tsgls->src.sgl_ptr, tsgls->dst.sgl_ptr, + enc ? vec->plen : vec->clen, iv); + aead_request_set_ad(req, vec->alen); + err = crypto_wait_req(enc ? crypto_aead_encrypt(req) : + crypto_aead_decrypt(req), &wait); + + aead_request_set_tfm(req, tfm); /* TODO: get rid of this */ + + if (err) { + if (err == -EBADMSG && vec->novrfy) + return 0; + pr_err("alg: aead: %s %s failed with err %d on test vector %u, cfg=\"%s\"\n", + driver, op, err, vec_num, cfg->name); + return err; + } + if (vec->novrfy) { + pr_err("alg: aead: %s %s unexpectedly succeeded on test vector %u, cfg=\"%s\"\n", + driver, op, vec_num, cfg->name); + return -EINVAL; + } + + /* Check that the algorithm didn't overwrite things it shouldn't have */ + if (req->cryptlen != (enc ? vec->plen : vec->clen) || + req->assoclen != vec->alen || + req->iv != iv || + req->src != tsgls->src.sgl_ptr || + req->dst != tsgls->dst.sgl_ptr || + crypto_aead_reqtfm(req) != tfm || + req->base.complete != crypto_req_done || + req->base.flags != req_flags || + req->base.data != &wait) { + pr_err("alg: aead: %s %s corrupted request struct on test vector %u, cfg=\"%s\"\n", + driver, op, vec_num, cfg->name); + if (req->cryptlen != (enc ? vec->plen : vec->clen)) + pr_err("alg: aead: changed 'req->cryptlen'\n"); + if (req->assoclen != vec->alen) + pr_err("alg: aead: changed 'req->assoclen'\n"); + if (req->iv != iv) + pr_err("alg: aead: changed 'req->iv'\n"); + if (req->src != tsgls->src.sgl_ptr) + pr_err("alg: aead: changed 'req->src'\n"); + if (req->dst != tsgls->dst.sgl_ptr) + pr_err("alg: aead: changed 'req->dst'\n"); + if (crypto_aead_reqtfm(req) != tfm) + pr_err("alg: aead: changed 'req->base.tfm'\n"); + if (req->base.complete != crypto_req_done) + pr_err("alg: aead: changed 'req->base.complete'\n"); + if (req->base.flags != req_flags) + pr_err("alg: aead: changed 'req->base.flags'\n"); + if (req->base.data != &wait) + pr_err("alg: aead: changed 'req->base.data'\n"); + return -EINVAL; + } + if (is_test_sglist_corrupted(&tsgls->src)) { + pr_err("alg: aead: %s %s corrupted src sgl on test vector %u, cfg=\"%s\"\n", + driver, op, vec_num, cfg->name); + return -EINVAL; + } + if (tsgls->dst.sgl_ptr != tsgls->src.sgl && + is_test_sglist_corrupted(&tsgls->dst)) { + pr_err("alg: aead: %s %s corrupted dst sgl on test vector %u, cfg=\"%s\"\n", + driver, op, vec_num, cfg->name); + return -EINVAL; + } + + /* Check for the correct output (ciphertext or plaintext) */ + err = verify_correct_output(&tsgls->dst, enc ? vec->ctext : vec->ptext, + enc ? vec->clen : vec->plen, + vec->alen, enc || !cfg->inplace); + if (err == -EOVERFLOW) { + pr_err("alg: aead: %s %s overran dst buffer on test vector %u, cfg=\"%s\"\n", + driver, op, vec_num, cfg->name); + return err; + } + if (err) { + pr_err("alg: aead: %s %s test failed (wrong result) on test vector %u, cfg=\"%s\"\n", + driver, op, vec_num, cfg->name); + return err; + } + + return 0; } -static int test_aead(struct crypto_aead *tfm, int enc, - const struct aead_testvec *template, unsigned int tcount) +static int test_aead_vec(const char *driver, int enc, + const struct aead_testvec *vec, unsigned int vec_num, + struct aead_request *req, + struct cipher_test_sglists *tsgls) { - unsigned int alignmask; - int ret; + unsigned int i; + int err; - /* test 'dst == src' case */ - ret = __test_aead(tfm, enc, template, tcount, false, 0); - if (ret) - return ret; + if (enc && vec->novrfy) + return 0; - /* test 'dst != src' case */ - ret = __test_aead(tfm, enc, template, tcount, true, 0); - if (ret) - return ret; + for (i = 0; i < ARRAY_SIZE(default_cipher_testvec_configs); i++) { + err = test_aead_vec_cfg(driver, enc, vec, vec_num, + &default_cipher_testvec_configs[i], + req, tsgls); + if (err) + return err; + } - /* test unaligned buffers, check with one byte offset */ - ret = __test_aead(tfm, enc, template, tcount, true, 1); - if (ret) - return ret; +#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS + if (!noextratests) { + struct testvec_config cfg; + char cfgname[TESTVEC_CONFIG_NAMELEN]; - alignmask = crypto_tfm_alg_alignmask(&tfm->base); - if (alignmask) { - /* Check if alignment mask for tfm is correctly set. */ - ret = __test_aead(tfm, enc, template, tcount, true, - alignmask + 1); - if (ret) - return ret; + for (i = 0; i < fuzz_iterations; i++) { + generate_random_testvec_config(&cfg, cfgname, + sizeof(cfgname)); + err = test_aead_vec_cfg(driver, enc, vec, vec_num, + &cfg, req, tsgls); + if (err) + return err; + } } +#endif + return 0; +} +static int test_aead(const char *driver, int enc, + const struct aead_test_suite *suite, + struct aead_request *req, + struct cipher_test_sglists *tsgls) +{ + unsigned int i; + int err; + + for (i = 0; i < suite->count; i++) { + err = test_aead_vec(driver, enc, &suite->vecs[i], i, req, + tsgls); + if (err) + return err; + } return 0; } +static int alg_test_aead(const struct alg_test_desc *desc, const char *driver, + u32 type, u32 mask) +{ + const struct aead_test_suite *suite = &desc->suite.aead; + struct crypto_aead *tfm; + struct aead_request *req = NULL; + struct cipher_test_sglists *tsgls = NULL; + int err; + + if (suite->count <= 0) { + pr_err("alg: aead: empty test suite for %s\n", driver); + return -EINVAL; + } + + tfm = crypto_alloc_aead(driver, type, mask); + if (IS_ERR(tfm)) { + pr_err("alg: aead: failed to allocate transform for %s: %ld\n", + driver, PTR_ERR(tfm)); + return PTR_ERR(tfm); + } + + req = aead_request_alloc(tfm, GFP_KERNEL); + if (!req) { + pr_err("alg: aead: failed to allocate request for %s\n", + driver); + err = -ENOMEM; + goto out; + } + + tsgls = alloc_cipher_test_sglists(); + if (!tsgls) { + pr_err("alg: aead: failed to allocate test buffers for %s\n", + driver); + err = -ENOMEM; + goto out; + } + + err = test_aead(driver, ENCRYPT, suite, req, tsgls); + if (err) + goto out; + + err = test_aead(driver, DECRYPT, suite, req, tsgls); +out: + free_cipher_test_sglists(tsgls); + aead_request_free(req); + crypto_free_aead(tfm); + return err; +} + static int test_cipher(struct crypto_cipher *tfm, int enc, const struct cipher_testvec *template, unsigned int tcount) @@ -1037,8 +1441,6 @@ static int test_cipher(struct crypto_cipher *tfm, int enc, j = 0; for (i = 0; i < tcount; i++) { - if (template[i].np) - continue; if (fips_enabled && template[i].fips_skip) continue; @@ -1056,7 +1458,7 @@ static int test_cipher(struct crypto_cipher *tfm, int enc, crypto_cipher_clear_flags(tfm, ~0); if (template[i].wk) - crypto_cipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY); + crypto_cipher_set_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS); ret = crypto_cipher_setkey(tfm, template[i].key, template[i].klen); @@ -1092,288 +1494,261 @@ static int test_cipher(struct crypto_cipher *tfm, int enc, out: testmgr_free_buf(xbuf); -out_nobuf: - return ret; -} - -static int __test_skcipher(struct crypto_skcipher *tfm, int enc, - const struct cipher_testvec *template, - unsigned int tcount, - const bool diff_dst, const int align_offset) -{ - const char *algo = - crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm)); - unsigned int i, j, k, n, temp; - char *q; - struct skcipher_request *req; - struct scatterlist sg[8]; - struct scatterlist sgout[8]; - const char *e, *d; - struct crypto_wait wait; - const char *input, *result; - void *data; - char iv[MAX_IVLEN]; - char *xbuf[XBUFSIZE]; - char *xoutbuf[XBUFSIZE]; - int ret = -ENOMEM; - unsigned int ivsize = crypto_skcipher_ivsize(tfm); - - if (testmgr_alloc_buf(xbuf)) - goto out_nobuf; - - if (diff_dst && testmgr_alloc_buf(xoutbuf)) - goto out_nooutbuf; - - if (diff_dst) - d = "-ddst"; - else - d = ""; - - if (enc == ENCRYPT) - e = "encryption"; - else - e = "decryption"; - - crypto_init_wait(&wait); - - req = skcipher_request_alloc(tfm, GFP_KERNEL); - if (!req) { - pr_err("alg: skcipher%s: Failed to allocate request for %s\n", - d, algo); - goto out; - } - - skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - crypto_req_done, &wait); - - j = 0; - for (i = 0; i < tcount; i++) { - if (template[i].np && !template[i].also_non_np) - continue; - - if (fips_enabled && template[i].fips_skip) - continue; - - if (template[i].iv && !(template[i].generates_iv && enc)) - memcpy(iv, template[i].iv, ivsize); - else - memset(iv, 0, MAX_IVLEN); - - input = enc ? template[i].ptext : template[i].ctext; - result = enc ? template[i].ctext : template[i].ptext; - j++; - ret = -EINVAL; - if (WARN_ON(align_offset + template[i].len > PAGE_SIZE)) - goto out; - - data = xbuf[0]; - data += align_offset; - memcpy(data, input, template[i].len); - - crypto_skcipher_clear_flags(tfm, ~0); - if (template[i].wk) - crypto_skcipher_set_flags(tfm, - CRYPTO_TFM_REQ_WEAK_KEY); - - ret = crypto_skcipher_setkey(tfm, template[i].key, - template[i].klen); - if (template[i].fail == !ret) { - pr_err("alg: skcipher%s: setkey failed on test %d for %s: flags=%x\n", - d, j, algo, crypto_skcipher_get_flags(tfm)); - goto out; - } else if (ret) - continue; - - sg_init_one(&sg[0], data, template[i].len); - if (diff_dst) { - data = xoutbuf[0]; - data += align_offset; - sg_init_one(&sgout[0], data, template[i].len); - } - - skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg, - template[i].len, iv); - ret = crypto_wait_req(enc ? crypto_skcipher_encrypt(req) : - crypto_skcipher_decrypt(req), &wait); - - if (ret) { - pr_err("alg: skcipher%s: %s failed on test %d for %s: ret=%d\n", - d, e, j, algo, -ret); - goto out; - } - - q = data; - if (memcmp(q, result, template[i].len)) { - pr_err("alg: skcipher%s: Test %d failed (invalid result) on %s for %s\n", - d, j, e, algo); - hexdump(q, template[i].len); - ret = -EINVAL; - goto out; - } - - if (template[i].generates_iv && enc && - memcmp(iv, template[i].iv, crypto_skcipher_ivsize(tfm))) { - pr_err("alg: skcipher%s: Test %d failed (invalid output IV) on %s for %s\n", - d, j, e, algo); - hexdump(iv, crypto_skcipher_ivsize(tfm)); - ret = -EINVAL; - goto out; - } - } - - j = 0; - for (i = 0; i < tcount; i++) { - /* alignment tests are only done with continuous buffers */ - if (align_offset != 0) - break; - - if (!template[i].np) - continue; - - if (fips_enabled && template[i].fips_skip) - continue; - - if (template[i].iv && !(template[i].generates_iv && enc)) - memcpy(iv, template[i].iv, ivsize); - else - memset(iv, 0, MAX_IVLEN); - - input = enc ? template[i].ptext : template[i].ctext; - result = enc ? template[i].ctext : template[i].ptext; - j++; - crypto_skcipher_clear_flags(tfm, ~0); - if (template[i].wk) - crypto_skcipher_set_flags(tfm, - CRYPTO_TFM_REQ_WEAK_KEY); - - ret = crypto_skcipher_setkey(tfm, template[i].key, - template[i].klen); - if (template[i].fail == !ret) { - pr_err("alg: skcipher%s: setkey failed on chunk test %d for %s: flags=%x\n", - d, j, algo, crypto_skcipher_get_flags(tfm)); - goto out; - } else if (ret) - continue; - - temp = 0; - ret = -EINVAL; - sg_init_table(sg, template[i].np); - if (diff_dst) - sg_init_table(sgout, template[i].np); - for (k = 0; k < template[i].np; k++) { - if (WARN_ON(offset_in_page(IDX[k]) + - template[i].tap[k] > PAGE_SIZE)) - goto out; +out_nobuf: + return ret; +} - q = xbuf[IDX[k] >> PAGE_SHIFT] + offset_in_page(IDX[k]); +static int test_skcipher_vec_cfg(const char *driver, int enc, + const struct cipher_testvec *vec, + unsigned int vec_num, + const struct testvec_config *cfg, + struct skcipher_request *req, + struct cipher_test_sglists *tsgls) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const unsigned int alignmask = crypto_skcipher_alignmask(tfm); + const unsigned int ivsize = crypto_skcipher_ivsize(tfm); + const u32 req_flags = CRYPTO_TFM_REQ_MAY_BACKLOG | cfg->req_flags; + const char *op = enc ? "encryption" : "decryption"; + DECLARE_CRYPTO_WAIT(wait); + u8 _iv[3 * (MAX_ALGAPI_ALIGNMASK + 1) + MAX_IVLEN]; + u8 *iv = PTR_ALIGN(&_iv[0], 2 * (MAX_ALGAPI_ALIGNMASK + 1)) + + cfg->iv_offset + + (cfg->iv_offset_relative_to_alignmask ? alignmask : 0); + struct kvec input; + int err; - memcpy(q, input + temp, template[i].tap[k]); + /* Set the key */ + if (vec->wk) + crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS); + else + crypto_skcipher_clear_flags(tfm, + CRYPTO_TFM_REQ_FORBID_WEAK_KEYS); + err = crypto_skcipher_setkey(tfm, vec->key, vec->klen); + if (err) { + if (vec->fail) /* expectedly failed to set key? */ + return 0; + pr_err("alg: skcipher: %s setkey failed with err %d on test vector %u; flags=%#x\n", + driver, err, vec_num, crypto_skcipher_get_flags(tfm)); + return err; + } + if (vec->fail) { + pr_err("alg: skcipher: %s setkey unexpectedly succeeded on test vector %u\n", + driver, vec_num); + return -EINVAL; + } - if (offset_in_page(q) + template[i].tap[k] < PAGE_SIZE) - q[template[i].tap[k]] = 0; + /* The IV must be copied to a buffer, as the algorithm may modify it */ + if (ivsize) { + if (WARN_ON(ivsize > MAX_IVLEN)) + return -EINVAL; + if (vec->generates_iv && !enc) + memcpy(iv, vec->iv_out, ivsize); + else if (vec->iv) + memcpy(iv, vec->iv, ivsize); + else + memset(iv, 0, ivsize); + } else { + if (vec->generates_iv) { + pr_err("alg: skcipher: %s has ivsize=0 but test vector %u generates IV!\n", + driver, vec_num); + return -EINVAL; + } + iv = NULL; + } - sg_set_buf(&sg[k], q, template[i].tap[k]); - if (diff_dst) { - q = xoutbuf[IDX[k] >> PAGE_SHIFT] + - offset_in_page(IDX[k]); + /* Build the src/dst scatterlists */ + input.iov_base = enc ? (void *)vec->ptext : (void *)vec->ctext; + input.iov_len = vec->len; + err = build_cipher_test_sglists(tsgls, cfg, alignmask, + vec->len, vec->len, &input, 1); + if (err) { + pr_err("alg: skcipher: %s %s: error preparing scatterlists for test vector %u, cfg=\"%s\"\n", + driver, op, vec_num, cfg->name); + return err; + } - sg_set_buf(&sgout[k], q, template[i].tap[k]); + /* Do the actual encryption or decryption */ + testmgr_poison(req->__ctx, crypto_skcipher_reqsize(tfm)); + skcipher_request_set_callback(req, req_flags, crypto_req_done, &wait); + skcipher_request_set_crypt(req, tsgls->src.sgl_ptr, tsgls->dst.sgl_ptr, + vec->len, iv); + err = crypto_wait_req(enc ? crypto_skcipher_encrypt(req) : + crypto_skcipher_decrypt(req), &wait); + if (err) { + pr_err("alg: skcipher: %s %s failed with err %d on test vector %u, cfg=\"%s\"\n", + driver, op, err, vec_num, cfg->name); + return err; + } - memset(q, 0, template[i].tap[k]); - if (offset_in_page(q) + - template[i].tap[k] < PAGE_SIZE) - q[template[i].tap[k]] = 0; - } + /* Check that the algorithm didn't overwrite things it shouldn't have */ + if (req->cryptlen != vec->len || + req->iv != iv || + req->src != tsgls->src.sgl_ptr || + req->dst != tsgls->dst.sgl_ptr || + crypto_skcipher_reqtfm(req) != tfm || + req->base.complete != crypto_req_done || + req->base.flags != req_flags || + req->base.data != &wait) { + pr_err("alg: skcipher: %s %s corrupted request struct on test vector %u, cfg=\"%s\"\n", + driver, op, vec_num, cfg->name); + if (req->cryptlen != vec->len) + pr_err("alg: skcipher: changed 'req->cryptlen'\n"); + if (req->iv != iv) + pr_err("alg: skcipher: changed 'req->iv'\n"); + if (req->src != tsgls->src.sgl_ptr) + pr_err("alg: skcipher: changed 'req->src'\n"); + if (req->dst != tsgls->dst.sgl_ptr) + pr_err("alg: skcipher: changed 'req->dst'\n"); + if (crypto_skcipher_reqtfm(req) != tfm) + pr_err("alg: skcipher: changed 'req->base.tfm'\n"); + if (req->base.complete != crypto_req_done) + pr_err("alg: skcipher: changed 'req->base.complete'\n"); + if (req->base.flags != req_flags) + pr_err("alg: skcipher: changed 'req->base.flags'\n"); + if (req->base.data != &wait) + pr_err("alg: skcipher: changed 'req->base.data'\n"); + return -EINVAL; + } + if (is_test_sglist_corrupted(&tsgls->src)) { + pr_err("alg: skcipher: %s %s corrupted src sgl on test vector %u, cfg=\"%s\"\n", + driver, op, vec_num, cfg->name); + return -EINVAL; + } + if (tsgls->dst.sgl_ptr != tsgls->src.sgl && + is_test_sglist_corrupted(&tsgls->dst)) { + pr_err("alg: skcipher: %s %s corrupted dst sgl on test vector %u, cfg=\"%s\"\n", + driver, op, vec_num, cfg->name); + return -EINVAL; + } + + /* Check for the correct output (ciphertext or plaintext) */ + err = verify_correct_output(&tsgls->dst, enc ? vec->ctext : vec->ptext, + vec->len, 0, true); + if (err == -EOVERFLOW) { + pr_err("alg: skcipher: %s %s overran dst buffer on test vector %u, cfg=\"%s\"\n", + driver, op, vec_num, cfg->name); + return err; + } + if (err) { + pr_err("alg: skcipher: %s %s test failed (wrong result) on test vector %u, cfg=\"%s\"\n", + driver, op, vec_num, cfg->name); + return err; + } - temp += template[i].tap[k]; - } + /* If applicable, check that the algorithm generated the correct IV */ + if (vec->iv_out && memcmp(iv, vec->iv_out, ivsize) != 0) { + pr_err("alg: skcipher: %s %s test failed (wrong output IV) on test vector %u, cfg=\"%s\"\n", + driver, op, vec_num, cfg->name); + hexdump(iv, ivsize); + return -EINVAL; + } - skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg, - template[i].len, iv); + return 0; +} - ret = crypto_wait_req(enc ? crypto_skcipher_encrypt(req) : - crypto_skcipher_decrypt(req), &wait); +static int test_skcipher_vec(const char *driver, int enc, + const struct cipher_testvec *vec, + unsigned int vec_num, + struct skcipher_request *req, + struct cipher_test_sglists *tsgls) +{ + unsigned int i; + int err; - if (ret) { - pr_err("alg: skcipher%s: %s failed on chunk test %d for %s: ret=%d\n", - d, e, j, algo, -ret); - goto out; - } + if (fips_enabled && vec->fips_skip) + return 0; - temp = 0; - ret = -EINVAL; - for (k = 0; k < template[i].np; k++) { - if (diff_dst) - q = xoutbuf[IDX[k] >> PAGE_SHIFT] + - offset_in_page(IDX[k]); - else - q = xbuf[IDX[k] >> PAGE_SHIFT] + - offset_in_page(IDX[k]); + for (i = 0; i < ARRAY_SIZE(default_cipher_testvec_configs); i++) { + err = test_skcipher_vec_cfg(driver, enc, vec, vec_num, + &default_cipher_testvec_configs[i], + req, tsgls); + if (err) + return err; + } - if (memcmp(q, result + temp, template[i].tap[k])) { - pr_err("alg: skcipher%s: Chunk test %d failed on %s at page %u for %s\n", - d, j, e, k, algo); - hexdump(q, template[i].tap[k]); - goto out; - } +#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS + if (!noextratests) { + struct testvec_config cfg; + char cfgname[TESTVEC_CONFIG_NAMELEN]; - q += template[i].tap[k]; - for (n = 0; offset_in_page(q + n) && q[n]; n++) - ; - if (n) { - pr_err("alg: skcipher%s: Result buffer corruption in chunk test %d on %s at page %u for %s: %u bytes:\n", - d, j, e, k, algo, n); - hexdump(q, n); - goto out; - } - temp += template[i].tap[k]; + for (i = 0; i < fuzz_iterations; i++) { + generate_random_testvec_config(&cfg, cfgname, + sizeof(cfgname)); + err = test_skcipher_vec_cfg(driver, enc, vec, vec_num, + &cfg, req, tsgls); + if (err) + return err; } } +#endif + return 0; +} - ret = 0; +static int test_skcipher(const char *driver, int enc, + const struct cipher_test_suite *suite, + struct skcipher_request *req, + struct cipher_test_sglists *tsgls) +{ + unsigned int i; + int err; -out: - skcipher_request_free(req); - if (diff_dst) - testmgr_free_buf(xoutbuf); -out_nooutbuf: - testmgr_free_buf(xbuf); -out_nobuf: - return ret; + for (i = 0; i < suite->count; i++) { + err = test_skcipher_vec(driver, enc, &suite->vecs[i], i, req, + tsgls); + if (err) + return err; + } + return 0; } -static int test_skcipher(struct crypto_skcipher *tfm, int enc, - const struct cipher_testvec *template, - unsigned int tcount) +static int alg_test_skcipher(const struct alg_test_desc *desc, + const char *driver, u32 type, u32 mask) { - unsigned int alignmask; - int ret; + const struct cipher_test_suite *suite = &desc->suite.cipher; + struct crypto_skcipher *tfm; + struct skcipher_request *req = NULL; + struct cipher_test_sglists *tsgls = NULL; + int err; - /* test 'dst == src' case */ - ret = __test_skcipher(tfm, enc, template, tcount, false, 0); - if (ret) - return ret; + if (suite->count <= 0) { + pr_err("alg: skcipher: empty test suite for %s\n", driver); + return -EINVAL; + } - /* test 'dst != src' case */ - ret = __test_skcipher(tfm, enc, template, tcount, true, 0); - if (ret) - return ret; + tfm = crypto_alloc_skcipher(driver, type, mask); + if (IS_ERR(tfm)) { + pr_err("alg: skcipher: failed to allocate transform for %s: %ld\n", + driver, PTR_ERR(tfm)); + return PTR_ERR(tfm); + } - /* test unaligned buffers, check with one byte offset */ - ret = __test_skcipher(tfm, enc, template, tcount, true, 1); - if (ret) - return ret; + req = skcipher_request_alloc(tfm, GFP_KERNEL); + if (!req) { + pr_err("alg: skcipher: failed to allocate request for %s\n", + driver); + err = -ENOMEM; + goto out; + } - alignmask = crypto_tfm_alg_alignmask(&tfm->base); - if (alignmask) { - /* Check if alignment mask for tfm is correctly set. */ - ret = __test_skcipher(tfm, enc, template, tcount, true, - alignmask + 1); - if (ret) - return ret; + tsgls = alloc_cipher_test_sglists(); + if (!tsgls) { + pr_err("alg: skcipher: failed to allocate test buffers for %s\n", + driver); + err = -ENOMEM; + goto out; } - return 0; + err = test_skcipher(driver, ENCRYPT, suite, req, tsgls); + if (err) + goto out; + + err = test_skcipher(driver, DECRYPT, suite, req, tsgls); +out: + free_cipher_test_sglists(tsgls); + skcipher_request_free(req); + crypto_free_skcipher(tfm); + return err; } static int test_comp(struct crypto_comp *tfm, @@ -1713,35 +2088,6 @@ out: return err; } -static int alg_test_aead(const struct alg_test_desc *desc, const char *driver, - u32 type, u32 mask) -{ - struct crypto_aead *tfm; - int err = 0; - - tfm = crypto_alloc_aead(driver, type, mask); - if (IS_ERR(tfm)) { - printk(KERN_ERR "alg: aead: Failed to load transform for %s: " - "%ld\n", driver, PTR_ERR(tfm)); - return PTR_ERR(tfm); - } - - if (desc->suite.aead.enc.vecs) { - err = test_aead(tfm, ENCRYPT, desc->suite.aead.enc.vecs, - desc->suite.aead.enc.count); - if (err) - goto out; - } - - if (!err && desc->suite.aead.dec.vecs) - err = test_aead(tfm, DECRYPT, desc->suite.aead.dec.vecs, - desc->suite.aead.dec.count); - -out: - crypto_free_aead(tfm); - return err; -} - static int alg_test_cipher(const struct alg_test_desc *desc, const char *driver, u32 type, u32 mask) { @@ -1764,28 +2110,6 @@ static int alg_test_cipher(const struct alg_test_desc *desc, return err; } -static int alg_test_skcipher(const struct alg_test_desc *desc, - const char *driver, u32 type, u32 mask) -{ - const struct cipher_test_suite *suite = &desc->suite.cipher; - struct crypto_skcipher *tfm; - int err; - - tfm = crypto_alloc_skcipher(driver, type, mask); - if (IS_ERR(tfm)) { - printk(KERN_ERR "alg: skcipher: Failed to load transform for " - "%s: %ld\n", driver, PTR_ERR(tfm)); - return PTR_ERR(tfm); - } - - err = test_skcipher(tfm, ENCRYPT, suite->vecs, suite->count); - if (!err) - err = test_skcipher(tfm, DECRYPT, suite->vecs, suite->count); - - crypto_free_skcipher(tfm); - return err; -} - static int alg_test_comp(const struct alg_test_desc *desc, const char *driver, u32 type, u32 mask) { @@ -1824,84 +2148,30 @@ static int alg_test_comp(const struct alg_test_desc *desc, const char *driver, return err; } -static int __alg_test_hash(const struct hash_testvec *template, - unsigned int tcount, const char *driver, - u32 type, u32 mask) -{ - struct crypto_ahash *tfm; - int err; - - tfm = crypto_alloc_ahash(driver, type, mask); - if (IS_ERR(tfm)) { - printk(KERN_ERR "alg: hash: Failed to load transform for %s: " - "%ld\n", driver, PTR_ERR(tfm)); - return PTR_ERR(tfm); - } - - err = test_hash(tfm, template, tcount, HASH_TEST_DIGEST); - if (!err) - err = test_hash(tfm, template, tcount, HASH_TEST_FINAL); - if (!err) - err = test_hash(tfm, template, tcount, HASH_TEST_FINUP); - crypto_free_ahash(tfm); - return err; -} - -static int alg_test_hash(const struct alg_test_desc *desc, const char *driver, - u32 type, u32 mask) -{ - const struct hash_testvec *template = desc->suite.hash.vecs; - unsigned int tcount = desc->suite.hash.count; - unsigned int nr_unkeyed, nr_keyed; - int err; - - /* - * For OPTIONAL_KEY algorithms, we have to do all the unkeyed tests - * first, before setting a key on the tfm. To make this easier, we - * require that the unkeyed test vectors (if any) are listed first. - */ - - for (nr_unkeyed = 0; nr_unkeyed < tcount; nr_unkeyed++) { - if (template[nr_unkeyed].ksize) - break; - } - for (nr_keyed = 0; nr_unkeyed + nr_keyed < tcount; nr_keyed++) { - if (!template[nr_unkeyed + nr_keyed].ksize) { - pr_err("alg: hash: test vectors for %s out of order, " - "unkeyed ones must come first\n", desc->alg); - return -EINVAL; - } - } - - err = 0; - if (nr_unkeyed) { - err = __alg_test_hash(template, nr_unkeyed, driver, type, mask); - template += nr_unkeyed; - } - - if (!err && nr_keyed) - err = __alg_test_hash(template, nr_keyed, driver, type, mask); - - return err; -} - static int alg_test_crc32c(const struct alg_test_desc *desc, const char *driver, u32 type, u32 mask) { struct crypto_shash *tfm; - u32 val; + __le32 val; int err; err = alg_test_hash(desc, driver, type, mask); if (err) - goto out; + return err; tfm = crypto_alloc_shash(driver, type, mask); if (IS_ERR(tfm)) { + if (PTR_ERR(tfm) == -ENOENT) { + /* + * This crc32c implementation is only available through + * ahash API, not the shash API, so the remaining part + * of the test is not applicable to it. + */ + return 0; + } printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: " "%ld\n", driver, PTR_ERR(tfm)); - err = PTR_ERR(tfm); - goto out; + return PTR_ERR(tfm); } do { @@ -1911,7 +2181,7 @@ static int alg_test_crc32c(const struct alg_test_desc *desc, shash->tfm = tfm; shash->flags = 0; - *ctx = le32_to_cpu(420553207); + *ctx = 420553207; err = crypto_shash_final(shash, (u8 *)&val); if (err) { printk(KERN_ERR "alg: crc32c: Operation failed for " @@ -1919,16 +2189,15 @@ static int alg_test_crc32c(const struct alg_test_desc *desc, break; } - if (val != ~420553207) { - printk(KERN_ERR "alg: crc32c: Test failed for %s: " - "%d\n", driver, val); + if (val != cpu_to_le32(~420553207)) { + pr_err("alg: crc32c: Test failed for %s: %u\n", + driver, le32_to_cpu(val)); err = -EINVAL; } } while (0); crypto_free_shash(tfm); -out: return err; } @@ -2094,12 +2363,11 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec, if (vec->genkey) { /* Save party A's public key */ - a_public = kzalloc(out_len_max, GFP_KERNEL); + a_public = kmemdup(sg_virt(req->dst), out_len_max, GFP_KERNEL); if (!a_public) { err = -ENOMEM; goto free_output; } - memcpy(a_public, sg_virt(req->dst), out_len_max); } else { /* Verify calculated public key */ if (memcmp(vec->expected_a_public, sg_virt(req->dst), @@ -2112,13 +2380,12 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec, } /* Calculate shared secret key by using counter part (b) public key. */ - input_buf = kzalloc(vec->b_public_size, GFP_KERNEL); + input_buf = kmemdup(vec->b_public, vec->b_public_size, GFP_KERNEL); if (!input_buf) { err = -ENOMEM; goto free_output; } - memcpy(input_buf, vec->b_public, vec->b_public_size); sg_init_one(&src, input_buf, vec->b_public_size); sg_init_one(&dst, output_buf, out_len_max); kpp_request_set_input(req, &src, vec->b_public_size); @@ -2134,12 +2401,11 @@ static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec, if (vec->genkey) { /* Save the shared secret obtained by party A */ - a_ss = kzalloc(vec->expected_ss_size, GFP_KERNEL); + a_ss = kmemdup(sg_virt(req->dst), vec->expected_ss_size, GFP_KERNEL); if (!a_ss) { err = -ENOMEM; goto free_all; } - memcpy(a_ss, sg_virt(req->dst), vec->expected_ss_size); /* * Calculate party B's shared secret by using party A's @@ -2238,6 +2504,9 @@ static int test_akcipher_one(struct crypto_akcipher *tfm, unsigned int out_len_max, out_len = 0; int err = -ENOMEM; struct scatterlist src, dst, src_tab[2]; + const char *m, *c; + unsigned int m_size, c_size; + const char *op; if (testmgr_alloc_buf(xbuf)) return err; @@ -2259,46 +2528,72 @@ static int test_akcipher_one(struct crypto_akcipher *tfm, err = -ENOMEM; out_len_max = crypto_akcipher_maxsize(tfm); + + /* + * First run test which do not require a private key, such as + * encrypt or verify. + */ outbuf_enc = kzalloc(out_len_max, GFP_KERNEL); if (!outbuf_enc) goto free_req; - if (WARN_ON(vecs->m_size > PAGE_SIZE)) - goto free_all; + if (!vecs->siggen_sigver_test) { + m = vecs->m; + m_size = vecs->m_size; + c = vecs->c; + c_size = vecs->c_size; + op = "encrypt"; + } else { + /* Swap args so we could keep plaintext (digest) + * in vecs->m, and cooked signature in vecs->c. + */ + m = vecs->c; /* signature */ + m_size = vecs->c_size; + c = vecs->m; /* digest */ + c_size = vecs->m_size; + op = "verify"; + } - memcpy(xbuf[0], vecs->m, vecs->m_size); + if (WARN_ON(m_size > PAGE_SIZE)) + goto free_all; + memcpy(xbuf[0], m, m_size); sg_init_table(src_tab, 2); sg_set_buf(&src_tab[0], xbuf[0], 8); - sg_set_buf(&src_tab[1], xbuf[0] + 8, vecs->m_size - 8); + sg_set_buf(&src_tab[1], xbuf[0] + 8, m_size - 8); sg_init_one(&dst, outbuf_enc, out_len_max); - akcipher_request_set_crypt(req, src_tab, &dst, vecs->m_size, + akcipher_request_set_crypt(req, src_tab, &dst, m_size, out_len_max); akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, crypto_req_done, &wait); err = crypto_wait_req(vecs->siggen_sigver_test ? - /* Run asymmetric signature generation */ - crypto_akcipher_sign(req) : + /* Run asymmetric signature verification */ + crypto_akcipher_verify(req) : /* Run asymmetric encrypt */ crypto_akcipher_encrypt(req), &wait); if (err) { - pr_err("alg: akcipher: encrypt test failed. err %d\n", err); + pr_err("alg: akcipher: %s test failed. err %d\n", op, err); goto free_all; } - if (req->dst_len != vecs->c_size) { - pr_err("alg: akcipher: encrypt test failed. Invalid output len\n"); + if (req->dst_len != c_size) { + pr_err("alg: akcipher: %s test failed. Invalid output len\n", + op); err = -EINVAL; goto free_all; } /* verify that encrypted message is equal to expected */ - if (memcmp(vecs->c, outbuf_enc, vecs->c_size)) { - pr_err("alg: akcipher: encrypt test failed. Invalid output\n"); - hexdump(outbuf_enc, vecs->c_size); + if (memcmp(c, outbuf_enc, c_size)) { + pr_err("alg: akcipher: %s test failed. Invalid output\n", op); + hexdump(outbuf_enc, c_size); err = -EINVAL; goto free_all; } - /* Don't invoke decrypt for vectors with public key */ + + /* + * Don't invoke (decrypt or sign) test which require a private key + * for vectors with only a public key. + */ if (vecs->public_key_vec) { err = 0; goto free_all; @@ -2309,37 +2604,36 @@ static int test_akcipher_one(struct crypto_akcipher *tfm, goto free_all; } - if (WARN_ON(vecs->c_size > PAGE_SIZE)) + op = vecs->siggen_sigver_test ? "sign" : "decrypt"; + if (WARN_ON(c_size > PAGE_SIZE)) goto free_all; + memcpy(xbuf[0], c, c_size); - memcpy(xbuf[0], vecs->c, vecs->c_size); - - sg_init_one(&src, xbuf[0], vecs->c_size); + sg_init_one(&src, xbuf[0], c_size); sg_init_one(&dst, outbuf_dec, out_len_max); crypto_init_wait(&wait); - akcipher_request_set_crypt(req, &src, &dst, vecs->c_size, out_len_max); + akcipher_request_set_crypt(req, &src, &dst, c_size, out_len_max); err = crypto_wait_req(vecs->siggen_sigver_test ? - /* Run asymmetric signature verification */ - crypto_akcipher_verify(req) : + /* Run asymmetric signature generation */ + crypto_akcipher_sign(req) : /* Run asymmetric decrypt */ crypto_akcipher_decrypt(req), &wait); if (err) { - pr_err("alg: akcipher: decrypt test failed. err %d\n", err); + pr_err("alg: akcipher: %s test failed. err %d\n", op, err); goto free_all; } out_len = req->dst_len; - if (out_len < vecs->m_size) { - pr_err("alg: akcipher: decrypt test failed. " - "Invalid output len %u\n", out_len); + if (out_len < m_size) { + pr_err("alg: akcipher: %s test failed. Invalid output len %u\n", + op, out_len); err = -EINVAL; goto free_all; } /* verify that decrypted message is equal to the original msg */ - if (memchr_inv(outbuf_dec, 0, out_len - vecs->m_size) || - memcmp(vecs->m, outbuf_dec + out_len - vecs->m_size, - vecs->m_size)) { - pr_err("alg: akcipher: decrypt test failed. Invalid output\n"); + if (memchr_inv(outbuf_dec, 0, out_len - m_size) || + memcmp(m, outbuf_dec + out_len - m_size, m_size)) { + pr_err("alg: akcipher: %s test failed. Invalid output\n", op); hexdump(outbuf_dec, out_len); err = -EINVAL; } @@ -2419,28 +2713,19 @@ static const struct alg_test_desc alg_test_descs[] = { .alg = "aegis128", .test = alg_test_aead, .suite = { - .aead = { - .enc = __VECS(aegis128_enc_tv_template), - .dec = __VECS(aegis128_dec_tv_template), - } + .aead = __VECS(aegis128_tv_template) } }, { .alg = "aegis128l", .test = alg_test_aead, .suite = { - .aead = { - .enc = __VECS(aegis128l_enc_tv_template), - .dec = __VECS(aegis128l_dec_tv_template), - } + .aead = __VECS(aegis128l_tv_template) } }, { .alg = "aegis256", .test = alg_test_aead, .suite = { - .aead = { - .enc = __VECS(aegis256_enc_tv_template), - .dec = __VECS(aegis256_dec_tv_template), - } + .aead = __VECS(aegis256_tv_template) } }, { .alg = "ansi_cprng", @@ -2452,36 +2737,27 @@ static const struct alg_test_desc alg_test_descs[] = { .alg = "authenc(hmac(md5),ecb(cipher_null))", .test = alg_test_aead, .suite = { - .aead = { - .enc = __VECS(hmac_md5_ecb_cipher_null_enc_tv_template), - .dec = __VECS(hmac_md5_ecb_cipher_null_dec_tv_template) - } + .aead = __VECS(hmac_md5_ecb_cipher_null_tv_template) } }, { .alg = "authenc(hmac(sha1),cbc(aes))", .test = alg_test_aead, .fips_allowed = 1, .suite = { - .aead = { - .enc = __VECS(hmac_sha1_aes_cbc_enc_tv_temp) - } + .aead = __VECS(hmac_sha1_aes_cbc_tv_temp) } }, { .alg = "authenc(hmac(sha1),cbc(des))", .test = alg_test_aead, .suite = { - .aead = { - .enc = __VECS(hmac_sha1_des_cbc_enc_tv_temp) - } + .aead = __VECS(hmac_sha1_des_cbc_tv_temp) } }, { .alg = "authenc(hmac(sha1),cbc(des3_ede))", .test = alg_test_aead, .fips_allowed = 1, .suite = { - .aead = { - .enc = __VECS(hmac_sha1_des3_ede_cbc_enc_tv_temp) - } + .aead = __VECS(hmac_sha1_des3_ede_cbc_tv_temp) } }, { .alg = "authenc(hmac(sha1),ctr(aes))", @@ -2491,10 +2767,7 @@ static const struct alg_test_desc alg_test_descs[] = { .alg = "authenc(hmac(sha1),ecb(cipher_null))", .test = alg_test_aead, .suite = { - .aead = { - .enc = __VECS(hmac_sha1_ecb_cipher_null_enc_tv_temp), - .dec = __VECS(hmac_sha1_ecb_cipher_null_dec_tv_temp) - } + .aead = __VECS(hmac_sha1_ecb_cipher_null_tv_temp) } }, { .alg = "authenc(hmac(sha1),rfc3686(ctr(aes)))", @@ -2504,44 +2777,34 @@ static const struct alg_test_desc alg_test_descs[] = { .alg = "authenc(hmac(sha224),cbc(des))", .test = alg_test_aead, .suite = { - .aead = { - .enc = __VECS(hmac_sha224_des_cbc_enc_tv_temp) - } + .aead = __VECS(hmac_sha224_des_cbc_tv_temp) } }, { .alg = "authenc(hmac(sha224),cbc(des3_ede))", .test = alg_test_aead, .fips_allowed = 1, .suite = { - .aead = { - .enc = __VECS(hmac_sha224_des3_ede_cbc_enc_tv_temp) - } + .aead = __VECS(hmac_sha224_des3_ede_cbc_tv_temp) } }, { .alg = "authenc(hmac(sha256),cbc(aes))", .test = alg_test_aead, .fips_allowed = 1, .suite = { - .aead = { - .enc = __VECS(hmac_sha256_aes_cbc_enc_tv_temp) - } + .aead = __VECS(hmac_sha256_aes_cbc_tv_temp) } }, { .alg = "authenc(hmac(sha256),cbc(des))", .test = alg_test_aead, .suite = { - .aead = { - .enc = __VECS(hmac_sha256_des_cbc_enc_tv_temp) - } + .aead = __VECS(hmac_sha256_des_cbc_tv_temp) } }, { .alg = "authenc(hmac(sha256),cbc(des3_ede))", .test = alg_test_aead, .fips_allowed = 1, .suite = { - .aead = { - .enc = __VECS(hmac_sha256_des3_ede_cbc_enc_tv_temp) - } + .aead = __VECS(hmac_sha256_des3_ede_cbc_tv_temp) } }, { .alg = "authenc(hmac(sha256),ctr(aes))", @@ -2555,18 +2818,14 @@ static const struct alg_test_desc alg_test_descs[] = { .alg = "authenc(hmac(sha384),cbc(des))", .test = alg_test_aead, .suite = { - .aead = { - .enc = __VECS(hmac_sha384_des_cbc_enc_tv_temp) - } + .aead = __VECS(hmac_sha384_des_cbc_tv_temp) } }, { .alg = "authenc(hmac(sha384),cbc(des3_ede))", .test = alg_test_aead, .fips_allowed = 1, .suite = { - .aead = { - .enc = __VECS(hmac_sha384_des3_ede_cbc_enc_tv_temp) - } + .aead = __VECS(hmac_sha384_des3_ede_cbc_tv_temp) } }, { .alg = "authenc(hmac(sha384),ctr(aes))", @@ -2581,26 +2840,20 @@ static const struct alg_test_desc alg_test_descs[] = { .fips_allowed = 1, .test = alg_test_aead, .suite = { - .aead = { - .enc = __VECS(hmac_sha512_aes_cbc_enc_tv_temp) - } + .aead = __VECS(hmac_sha512_aes_cbc_tv_temp) } }, { .alg = "authenc(hmac(sha512),cbc(des))", .test = alg_test_aead, .suite = { - .aead = { - .enc = __VECS(hmac_sha512_des_cbc_enc_tv_temp) - } + .aead = __VECS(hmac_sha512_des_cbc_tv_temp) } }, { .alg = "authenc(hmac(sha512),cbc(des3_ede))", .test = alg_test_aead, .fips_allowed = 1, .suite = { - .aead = { - .enc = __VECS(hmac_sha512_des3_ede_cbc_enc_tv_temp) - } + .aead = __VECS(hmac_sha512_des3_ede_cbc_tv_temp) } }, { .alg = "authenc(hmac(sha512),ctr(aes))", @@ -2697,10 +2950,7 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_aead, .fips_allowed = 1, .suite = { - .aead = { - .enc = __VECS(aes_ccm_enc_tv_template), - .dec = __VECS(aes_ccm_dec_tv_template) - } + .aead = __VECS(aes_ccm_tv_template) } }, { .alg = "cfb(aes)", @@ -2735,6 +2985,7 @@ static const struct alg_test_desc alg_test_descs[] = { }, { .alg = "crc32", .test = alg_test_hash, + .fips_allowed = 1, .suite = { .hash = __VECS(crc32_tv_template) } @@ -3111,10 +3362,7 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_aead, .fips_allowed = 1, .suite = { - .aead = { - .enc = __VECS(aes_gcm_enc_tv_template), - .dec = __VECS(aes_gcm_dec_tv_template) - } + .aead = __VECS(aes_gcm_tv_template) } }, { .alg = "ghash", @@ -3309,19 +3557,13 @@ static const struct alg_test_desc alg_test_descs[] = { .alg = "morus1280", .test = alg_test_aead, .suite = { - .aead = { - .enc = __VECS(morus1280_enc_tv_template), - .dec = __VECS(morus1280_dec_tv_template), - } + .aead = __VECS(morus1280_tv_template) } }, { .alg = "morus640", .test = alg_test_aead, .suite = { - .aead = { - .enc = __VECS(morus640_enc_tv_template), - .dec = __VECS(morus640_dec_tv_template), - } + .aead = __VECS(morus640_tv_template) } }, { .alg = "nhpoly1305", @@ -3386,47 +3628,32 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_aead, .fips_allowed = 1, .suite = { - .aead = { - .enc = __VECS(aes_gcm_rfc4106_enc_tv_template), - .dec = __VECS(aes_gcm_rfc4106_dec_tv_template) - } + .aead = __VECS(aes_gcm_rfc4106_tv_template) } }, { .alg = "rfc4309(ccm(aes))", .test = alg_test_aead, .fips_allowed = 1, .suite = { - .aead = { - .enc = __VECS(aes_ccm_rfc4309_enc_tv_template), - .dec = __VECS(aes_ccm_rfc4309_dec_tv_template) - } + .aead = __VECS(aes_ccm_rfc4309_tv_template) } }, { .alg = "rfc4543(gcm(aes))", .test = alg_test_aead, .suite = { - .aead = { - .enc = __VECS(aes_gcm_rfc4543_enc_tv_template), - .dec = __VECS(aes_gcm_rfc4543_dec_tv_template), - } + .aead = __VECS(aes_gcm_rfc4543_tv_template) } }, { .alg = "rfc7539(chacha20,poly1305)", .test = alg_test_aead, .suite = { - .aead = { - .enc = __VECS(rfc7539_enc_tv_template), - .dec = __VECS(rfc7539_dec_tv_template), - } + .aead = __VECS(rfc7539_tv_template) } }, { .alg = "rfc7539esp(chacha20,poly1305)", .test = alg_test_aead, .suite = { - .aead = { - .enc = __VECS(rfc7539esp_enc_tv_template), - .dec = __VECS(rfc7539esp_dec_tv_template), - } + .aead = __VECS(rfc7539esp_tv_template) } }, { .alg = "rmd128", @@ -3675,18 +3902,10 @@ static const struct alg_test_desc alg_test_descs[] = { } }; -static bool alg_test_descs_checked; - -static void alg_test_descs_check_order(void) +static void alg_check_test_descs_order(void) { int i; - /* only check once */ - if (alg_test_descs_checked) - return; - - alg_test_descs_checked = true; - for (i = 1; i < ARRAY_SIZE(alg_test_descs); i++) { int diff = strcmp(alg_test_descs[i - 1].alg, alg_test_descs[i].alg); @@ -3704,6 +3923,29 @@ static void alg_test_descs_check_order(void) } } +static void alg_check_testvec_configs(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(default_cipher_testvec_configs); i++) + WARN_ON(!valid_testvec_config( + &default_cipher_testvec_configs[i])); + + for (i = 0; i < ARRAY_SIZE(default_hash_testvec_configs); i++) + WARN_ON(!valid_testvec_config( + &default_hash_testvec_configs[i])); +} + +static void testmgr_onetime_init(void) +{ + alg_check_test_descs_order(); + alg_check_testvec_configs(); + +#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS + pr_warn("alg: extra crypto tests enabled. This is intended for developer use only.\n"); +#endif +} + static int alg_find_test(const char *alg) { int start = 0; @@ -3740,7 +3982,7 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask) return 0; } - alg_test_descs_check_order(); + DO_ONCE(testmgr_onetime_init); if ((type & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_CIPHER) { char nalg[CRYPTO_MAX_ALG_NAME]; diff --git a/crypto/testmgr.h b/crypto/testmgr.h index e8f47d7b92cd..f267633cf13a 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -5,6 +5,7 @@ * Copyright (c) 2002 Jean-Francois Dive * Copyright (c) 2007 Nokia Siemens Networks * Copyright (c) 2008 Herbert Xu + * Copyright (c) 2019 Google LLC * * Updated RFC4106 AES-GCM testing. Some test vectors were taken from * http://csrc.nist.gov/groups/ST/toolkit/BCM/documents/proposedmodes/ @@ -24,19 +25,20 @@ #ifndef _CRYPTO_TESTMGR_H #define _CRYPTO_TESTMGR_H -#define MAX_DIGEST_SIZE 64 -#define MAX_TAP 8 - -#define MAX_KEYLEN 1088 #define MAX_IVLEN 32 +/* + * hash_testvec: structure to describe a hash (message digest) test + * @key: Pointer to key (NULL if none) + * @plaintext: Pointer to source data + * @digest: Pointer to expected digest + * @psize: Length of source data in bytes + * @ksize: Length of @key in bytes (0 if no key) + */ struct hash_testvec { - /* only used with keyed hash algorithms */ const char *key; const char *plaintext; const char *digest; - unsigned short tap[MAX_TAP]; - unsigned short np; unsigned short psize; unsigned short ksize; }; @@ -45,29 +47,24 @@ struct hash_testvec { * cipher_testvec: structure to describe a symmetric cipher test * @key: Pointer to key * @klen: Length of @key in bytes - * @iv: Pointer to IV (optional for some ciphers) + * @iv: Pointer to IV. If NULL, an all-zeroes IV is used. + * @iv_out: Pointer to output IV, if applicable for the cipher. * @ptext: Pointer to plaintext * @ctext: Pointer to ciphertext * @len: Length of @ptext and @ctext in bytes * @fail: If set to one, the test need to fail - * @wk: Does the test need CRYPTO_TFM_REQ_WEAK_KEY + * @wk: Does the test need CRYPTO_TFM_REQ_FORBID_WEAK_KEYS? * ( e.g. test needs to fail due to a weak key ) - * @np: numbers of SG to distribute data in (from 1 to MAX_TAP) - * @tap: How to distribute data in @np SGs - * @also_non_np: if set to 1, the test will be also done without - * splitting data in @np SGs * @fips_skip: Skip the test vector in FIPS mode - * @generates_iv: Encryption should ignore the given IV, and output @iv. - * Decryption takes @iv. Needed for AES Keywrap ("kw(aes)"). + * @generates_iv: Encryption should ignore the given IV, and output @iv_out. + * Decryption takes @iv_out. Needed for AES Keywrap ("kw(aes)"). */ struct cipher_testvec { const char *key; const char *iv; + const char *iv_out; const char *ptext; const char *ctext; - unsigned short tap[MAX_TAP]; - int np; - unsigned char also_non_np; bool fail; unsigned char wk; /* weak key flag */ unsigned char klen; @@ -76,23 +73,37 @@ struct cipher_testvec { bool generates_iv; }; +/* + * aead_testvec: structure to describe an AEAD test + * @key: Pointer to key + * @iv: Pointer to IV. If NULL, an all-zeroes IV is used. + * @ptext: Pointer to plaintext + * @assoc: Pointer to associated data + * @ctext: Pointer to the full authenticated ciphertext. For AEADs that + * produce a separate "ciphertext" and "authentication tag", these + * two parts are concatenated: ciphertext || tag. + * @fail: setkey() failure expected? + * @novrfy: Decryption verification failure expected? + * @wk: Does the test need CRYPTO_TFM_REQ_FORBID_WEAK_KEYS? + * (e.g. setkey() needs to fail due to a weak key) + * @klen: Length of @key in bytes + * @plen: Length of @ptext in bytes + * @alen: Length of @assoc in bytes + * @clen: Length of @ctext in bytes + */ struct aead_testvec { const char *key; const char *iv; - const char *input; + const char *ptext; const char *assoc; - const char *result; - unsigned char tap[MAX_TAP]; - unsigned char atap[MAX_TAP]; - int np; - int anp; + const char *ctext; bool fail; - unsigned char novrfy; /* ccm dec verification failure expected */ - unsigned char wk; /* weak key flag */ + unsigned char novrfy; + unsigned char wk; unsigned char klen; - unsigned short ilen; + unsigned short plen; + unsigned short clen; unsigned short alen; - unsigned short rlen; }; struct cprng_testvec { @@ -1015,8 +1026,6 @@ static const struct hash_testvec md4_tv_template[] = { .psize = 26, .digest = "\xd7\x9e\x1c\x30\x8a\xa5\xbb\xcd" "\xee\xa8\xed\x63\xdf\x41\x2d\xa9", - .np = 2, - .tap = { 13, 13 }, }, { .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", .psize = 62, @@ -1053,8 +1062,6 @@ static const struct hash_testvec sha3_224_tv_template[] = { "\xc9\xfd\x55\x74\x49\x44\x79\xba" "\x5c\x7e\x7a\xb7\x6e\xf2\x64\xea" "\xd0\xfc\xce\x33", - .np = 2, - .tap = { 28, 28 }, }, { .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3" "\x7a\x11\x85\x1c\xb3\x27\xbe\x55" @@ -1214,8 +1221,6 @@ static const struct hash_testvec sha3_256_tv_template[] = { "\x49\x10\x03\x76\xa8\x23\x5e\x2c" "\x82\xe1\xb9\x99\x8a\x99\x9e\x21" "\xdb\x32\xdd\x97\x49\x6d\x33\x76", - .np = 2, - .tap = { 28, 28 }, }, { .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3" "\x7a\x11\x85\x1c\xb3\x27\xbe\x55" @@ -1382,8 +1387,6 @@ static const struct hash_testvec sha3_384_tv_template[] = { "\x9b\xfd\xbc\x32\xb9\xd4\xad\x5a" "\xa0\x4a\x1f\x07\x6e\x62\xfe\xa1" "\x9e\xef\x51\xac\xd0\x65\x7c\x22", - .np = 2, - .tap = { 28, 28 }, }, { .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3" "\x7a\x11\x85\x1c\xb3\x27\xbe\x55" @@ -1558,8 +1561,6 @@ static const struct hash_testvec sha3_512_tv_template[] = { "\xba\x1b\x0d\x8d\xc7\x8c\x08\x63" "\x46\xb5\x33\xb4\x9c\x03\x0d\x99" "\xa2\x7d\xaf\x11\x39\xd6\xe7\x5e", - .np = 2, - .tap = { 28, 28 }, }, { .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3" "\x7a\x11\x85\x1c\xb3\x27\xbe\x55" @@ -1729,8 +1730,6 @@ static const struct hash_testvec md5_tv_template[] = { .psize = 26, .digest = "\xc3\xfc\xd3\xd7\x61\x92\xe4\x00" "\x7d\xfb\x49\x6c\xca\x67\xe1\x3b", - .np = 2, - .tap = {13, 13} }, { .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", .psize = 62, @@ -1791,8 +1790,6 @@ static const struct hash_testvec rmd128_tv_template[] = { .psize = 56, .digest = "\xa1\xaa\x06\x89\xd0\xfa\xfa\x2d" "\xdc\x22\xe8\x8b\x49\x13\x3a\x06", - .np = 2, - .tap = { 28, 28 }, }, { .plaintext = "abcdefghbcdefghicdefghijdefghijkefghijklfghi" "jklmghijklmnhijklmnoijklmnopjklmnopqklmnopqr" @@ -1853,8 +1850,6 @@ static const struct hash_testvec rmd160_tv_template[] = { .psize = 56, .digest = "\x12\xa0\x53\x38\x4a\x9c\x0c\x88\xe4\x05" "\xa0\x6c\x27\xdc\xf4\x9a\xda\x62\xeb\x2b", - .np = 2, - .tap = { 28, 28 }, }, { .plaintext = "abcdefghbcdefghicdefghijdefghijkefghijklfghi" "jklmghijklmnhijklmnoijklmnopjklmnopqklmnopqr" @@ -1931,8 +1926,6 @@ static const struct hash_testvec rmd256_tv_template[] = { "\xc8\xd9\x12\x85\x73\xe7\xa9\x80" "\x9a\xfb\x2a\x0f\x34\xcc\xc3\x6e" "\xa9\xe7\x2f\x16\xf6\x36\x8e\x3f", - .np = 2, - .tap = { 28, 28 }, } }; @@ -1997,8 +1990,6 @@ static const struct hash_testvec rmd320_tv_template[] = { "\xb8\x4d\xf7\x69\xa5\xde\x20\x60\xe2\x59" "\xdf\x4c\x9b\xb4\xa4\x26\x8c\x0e\x93\x5b" "\xbc\x74\x70\xa9\x69\xc9\xd0\x72\xa1\xac", - .np = 2, - .tap = { 28, 28 }, } }; @@ -2012,26 +2003,11 @@ static const struct hash_testvec crct10dif_tv_template[] = { "123456789012345678901234567890123456789", .psize = 79, .digest = (u8 *)(u16 []){ 0x4b70 }, - .np = 2, - .tap = { 63, 16 }, }, { .plaintext = "abcdddddddddddddddddddddddddddddddddddddddd" "ddddddddddddd", .psize = 56, .digest = (u8 *)(u16 []){ 0x9ce3 }, - .np = 8, - .tap = { 1, 2, 28, 7, 6, 5, 4, 3 }, - }, { - .plaintext = "1234567890123456789012345678901234567890" - "1234567890123456789012345678901234567890" - "1234567890123456789012345678901234567890" - "1234567890123456789012345678901234567890" - "1234567890123456789012345678901234567890" - "1234567890123456789012345678901234567890" - "1234567890123456789012345678901234567890" - "123456789012345678901234567890123456789", - .psize = 319, - .digest = (u8 *)(u16 []){ 0x44c6 }, }, { .plaintext = "1234567890123456789012345678901234567890" "1234567890123456789012345678901234567890" @@ -2043,8 +2019,6 @@ static const struct hash_testvec crct10dif_tv_template[] = { "123456789012345678901234567890123456789", .psize = 319, .digest = (u8 *)(u16 []){ 0x44c6 }, - .np = 4, - .tap = { 1, 255, 57, 6 }, }, { .plaintext = "\x6e\x05\x79\x10\xa7\x1b\xb2\x49" "\xe0\x54\xeb\x82\x19\x8d\x24\xbb" @@ -2510,8 +2484,6 @@ static const struct hash_testvec sha1_tv_template[] = { .psize = 56, .digest = "\x84\x98\x3e\x44\x1c\x3b\xd2\x6e\xba\xae" "\x4a\xa1\xf9\x51\x29\xe5\xe5\x46\x70\xf1", - .np = 2, - .tap = { 28, 28 } }, { .plaintext = "\xec\x29\x56\x12\x44\xed\xe7\x06" "\xb6\xeb\x30\xa1\xc3\x71\xd7\x44" @@ -2537,8 +2509,6 @@ static const struct hash_testvec sha1_tv_template[] = { .psize = 163, .digest = "\x97\x01\x11\xc4\xe7\x7b\xcc\x88\xcc\x20" "\x45\x9c\x02\xb6\x9b\x4a\xa8\xf5\x82\x17", - .np = 4, - .tap = { 63, 64, 31, 5 } }, { .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+-", .psize = 64, @@ -2707,8 +2677,6 @@ static const struct hash_testvec sha224_tv_template[] = { "\x5D\xBA\x5D\xA1\xFD\x89\x01\x50" "\xB0\xC6\x45\x5C\xB4\xF5\x8B\x19" "\x52\x52\x25\x25", - .np = 2, - .tap = { 28, 28 } }, { .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+-", .psize = 64, @@ -2878,8 +2846,6 @@ static const struct hash_testvec sha256_tv_template[] = { "\xe5\xc0\x26\x93\x0c\x3e\x60\x39" "\xa3\x3c\xe4\x59\x64\xff\x21\x67" "\xf6\xec\xed\xd4\x19\xdb\x06\xc1", - .np = 2, - .tap = { 28, 28 } }, { .plaintext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+-", .psize = 64, @@ -3075,8 +3041,6 @@ static const struct hash_testvec sha384_tv_template[] = { "\x4d\x8f\xd0\x14\xe5\x82\x82\x3a" "\x89\xe1\x6f\x9b\x2a\x7b\xbc\x1a" "\xc9\x38\xe2\xd1\x99\xe8\xbe\xa4", - .np = 4, - .tap = { 26, 26, 26, 26 } }, { .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3" "\x7a\x11\x85\x1c\xb3\x27\xbe\x55" @@ -3277,8 +3241,6 @@ static const struct hash_testvec sha512_tv_template[] = { "\xb2\x78\xe6\x6d\xff\x8b\x84\xfe" "\x2b\x28\x70\xf7\x42\xa5\x80\xd8" "\xed\xb4\x19\x87\x23\x28\x50\xc9", - .np = 4, - .tap = { 26, 26, 26, 26 } }, { .plaintext = "\x08\x9f\x13\xaa\x41\xd8\x4c\xe3" "\x7a\x11\x85\x1c\xb3\x27\xbe\x55" @@ -3811,8 +3773,6 @@ static const struct hash_testvec ghash_tv_template[] = .psize = 28, .digest = "\x3e\x1f\x5c\x4d\x65\xf0\xef\xce" "\x0d\x61\x06\x27\x66\x51\xd5\xe2", - .np = 2, - .tap = {14, 14} }, { .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa", @@ -3923,8 +3883,6 @@ static const struct hash_testvec hmac_md5_tv_template[] = .psize = 28, .digest = "\x75\x0c\x78\x3e\x6a\xb0\xb5\x03" "\xea\xa8\x6e\x31\x0a\x5d\xb7\x38", - .np = 2, - .tap = {14, 14} }, { .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa", .ksize = 16, @@ -4002,8 +3960,6 @@ static const struct hash_testvec hmac_rmd128_tv_template[] = { .psize = 28, .digest = "\x87\x5f\x82\x88\x62\xb6\xb3\x34" "\xb4\x27\xc5\x5f\x9f\x7f\xf0\x9b", - .np = 2, - .tap = { 14, 14 }, }, { .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa", .ksize = 16, @@ -4081,8 +4037,6 @@ static const struct hash_testvec hmac_rmd160_tv_template[] = { .psize = 28, .digest = "\xdd\xa6\xc0\x21\x3a\x48\x5a\x9e\x24\xf4" "\x74\x20\x64\xa7\xf0\x33\xb4\x3c\x40\x69", - .np = 2, - .tap = { 14, 14 }, }, { .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa", .ksize = 20, @@ -4161,8 +4115,6 @@ static const struct hash_testvec hmac_sha1_tv_template[] = { .psize = 28, .digest = "\xef\xfc\xdf\x6a\xe5\xeb\x2f\xa2\xd2\x74" "\x16\xd5\xf1\x84\xdf\x9c\x25\x9a\x7c\x79", - .np = 2, - .tap = { 14, 14 } }, { .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa", .ksize = 20, @@ -4252,8 +4204,6 @@ static const struct hash_testvec hmac_sha224_tv_template[] = { "\x45\x69\x0f\x3a\x7e\x9e\x6d\x0f" "\x8b\xbe\xa2\xa3\x9e\x61\x48\x00" "\x8f\xd0\x5e\x44", - .np = 4, - .tap = { 7, 7, 7, 7 } }, { .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" @@ -4397,8 +4347,6 @@ static const struct hash_testvec hmac_sha256_tv_template[] = { "\x6a\x04\x24\x26\x08\x95\x75\xc7" "\x5a\x00\x3f\x08\x9d\x27\x39\x83" "\x9d\xec\x58\xb9\x64\xec\x38\x43", - .np = 2, - .tap = { 14, 14 } }, { .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" @@ -4571,8 +4519,6 @@ static const struct hash_testvec aes_cbcmac_tv_template[] = { "\xf8\xf2\x76\x03\xac\x39\xb0\x9d", .psize = 33, .ksize = 16, - .np = 2, - .tap = { 7, 26 }, }, { .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", @@ -4689,9 +4635,7 @@ static const struct hash_testvec aes_xcbc128_tv_template[] = { "\x10\x11\x12\x13", .digest = "\x47\xf5\x1b\x45\x64\x96\x62\x15" "\xb8\x98\x5c\x63\x05\x5e\xd3\x08", - .tap = { 10, 10 }, .psize = 20, - .np = 2, .ksize = 16, }, { .key = "\x00\x01\x02\x03\x04\x05\x06\x07" @@ -4714,9 +4658,7 @@ static const struct hash_testvec aes_xcbc128_tv_template[] = { "\x20\x21", .digest = "\xbe\xcb\xb3\xbc\xcd\xb5\x18\xa3" "\x06\x77\xd5\x48\x1f\xb6\xb4\xd8", - .tap = { 17, 17 }, .psize = 34, - .np = 2, .ksize = 16, } }; @@ -4799,8 +4741,6 @@ static const struct hash_testvec vmac64_aes_tv_template[] = { "abcabcabcabcabcabcabcabcabcabcabcabcabcabcabc", .psize = 316, .digest = "\x44\x92\xdf\x6c\x5c\xac\x1b\xbe", - .tap = { 1, 100, 200, 15 }, - .np = 4, }, { .key = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", @@ -4905,8 +4845,6 @@ static const struct hash_testvec hmac_sha384_tv_template[] = { "\xe4\x2e\xc3\x73\x63\x22\x44\x5e" "\x8e\x22\x40\xca\x5e\x69\xe2\xc7" "\x8b\x32\x39\xec\xfa\xb2\x16\x49", - .np = 4, - .tap = { 7, 7, 7, 7 } }, { .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" @@ -5007,8 +4945,6 @@ static const struct hash_testvec hmac_sha512_tv_template[] = { "\x6d\x03\x4f\x65\xf8\xf0\xe6\xfd" "\xca\xea\xb1\xa3\x4d\x4a\x6b\x4b" "\x63\x6e\x07\x0a\x38\xbc\xe7\x37", - .np = 4, - .tap = { 7, 7, 7, 7 } }, { .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" @@ -5104,8 +5040,6 @@ static const struct hash_testvec hmac_sha3_224_tv_template[] = { "\x1b\x79\x86\x34\xad\x38\x68\x11" "\xc2\xcf\xc8\x5b\xfa\xf5\xd5\x2b" "\xba\xce\x5e\x66", - .np = 4, - .tap = { 7, 7, 7, 7 } }, { .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" @@ -5193,8 +5127,6 @@ static const struct hash_testvec hmac_sha3_256_tv_template[] = { "\x35\x96\xbb\xb0\xda\x73\xb8\x87" "\xc9\x17\x1f\x93\x09\x5b\x29\x4a" "\xe8\x57\xfb\xe2\x64\x5e\x1b\xa5", - .np = 4, - .tap = { 7, 7, 7, 7 } }, { .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" @@ -5286,8 +5218,6 @@ static const struct hash_testvec hmac_sha3_384_tv_template[] = { "\x3c\xa1\x35\x08\xa9\x32\x43\xce" "\x48\xc0\x45\xdc\x00\x7f\x26\xa2" "\x1b\x3f\x5e\x0e\x9d\xf4\xc2\x0a", - .np = 4, - .tap = { 7, 7, 7, 7 } }, { .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" @@ -5387,8 +5317,6 @@ static const struct hash_testvec hmac_sha3_512_tv_template[] = { "\xee\x7a\x0c\x31\xd0\x22\xa9\x5e" "\x1f\xc9\x2b\xa9\xd7\x7d\xf8\x83" "\x96\x02\x75\xbe\xb4\xe6\x20\x24", - .np = 4, - .tap = { 7, 7, 7, 7 } }, { .key = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" @@ -5996,8 +5924,150 @@ static const struct hash_testvec nhpoly1305_tv_template[] = { .psize = 16, .digest = "\x04\xbf\x7f\x6a\xce\x72\xea\x6a" "\x79\xdb\xb0\xc9\x60\xf6\x12\xcc", - .np = 6, - .tap = { 4, 4, 1, 1, 1, 5 }, + }, { + .key = "\x2e\x77\x1e\x2c\x63\x76\x34\x3f" + "\x71\x08\x4f\x5a\xe3\x3d\x74\x56" + "\xc7\x98\x46\x52\xe5\x8a\xba\x0d" + "\x72\x41\x11\x15\x14\x72\x50\x8a" + "\xd5\xec\x60\x09\xdd\x71\xcc\xb9" + "\x59\x81\x65\x2d\x9e\x50\x18\xf3" + "\x32\xf3\xf1\xe7\x01\x82\x1c\xad" + "\x88\xa0\x21\x0c\x4b\x80\x5e\x62" + "\xfc\x81\xec\x52\xaa\xe4\xa5\x86" + "\xc2\xe6\x03\x11\xdc\x66\x09\x86" + "\x3c\x3b\xf0\x59\x0f\xb3\xf7\x44" + "\x24\xb7\x88\xc5\xfc\xc8\x77\x9f" + "\x8c\x44\xc4\x11\x55\xce\x7a\xa3" + "\xe0\xa2\xb8\xbf\xb5\x3d\x07\x2c" + "\x32\xb6\x6c\xfc\xb4\x42\x95\x95" + "\x98\x32\x81\xc4\xe7\xe2\xd9\x6a" + "\x87\xf4\xf4\x1e\x74\x7c\xb5\xcd" + "\x51\x45\x68\x38\x51\xdb\x30\x74" + "\x11\xe0\xaa\xae\x19\x8f\x15\x55" + "\xdd\x47\x4a\x35\xb9\x0c\xb4\x4e" + "\xa9\xce\x2f\xfa\x8f\xc1\x8a\x5e" + "\x5b\xec\xa5\x81\x3b\xb3\x43\x06" + "\x24\x81\xf4\x24\xe2\x21\xfa\xcb" + "\x49\xa8\xf8\xbd\x31\x4a\x5b\x2d" + "\x64\x0a\x07\xf0\x80\xc9\x0d\x81" + "\x14\x58\x54\x2b\xba\x22\x31\xba" + "\xef\x66\xc9\x49\x69\x69\x83\x0d" + "\xf2\xf9\x80\x9d\x30\x36\xfb\xe3" + "\xc0\x72\x2b\xcc\x5a\x81\x2c\x5d" + "\x3b\x5e\xf8\x2b\xd3\x14\x28\x73" + "\xf9\x1c\x70\xe6\xd8\xbb\xac\x30" + "\xf9\xd9\xa0\xe2\x33\x7c\x33\x34" + "\xa5\x6a\x77\x6d\xd5\xaf\xf4\xf3" + "\xc7\xb3\x0e\x83\x3d\xcb\x01\xcc" + "\x81\xc0\xf9\x4a\xae\x36\x92\xf7" + "\x69\x7b\x65\x01\xc3\xc8\xb8\xae" + "\x16\xd8\x30\xbb\xba\x6d\x78\x6e" + "\x0d\xf0\x7d\x84\xb7\x87\xda\x28" + "\x7a\x18\x10\x0b\x29\xec\x29\xf3" + "\xb0\x7b\xa1\x28\xbf\xbc\x2b\x2c" + "\x92\x2c\x16\xfb\x02\x39\xf9\xa6" + "\xa2\x15\x05\xa6\x72\x10\xbc\x62" + "\x4a\x6e\xb8\xb5\x5d\x59\xae\x3c" + "\x32\xd3\x68\xd7\x8e\x5a\xcd\x1b" + "\xef\xf6\xa7\x5e\x10\x51\x15\x4b" + "\x2c\xe3\xba\x70\x4f\x2c\xa0\x1c" + "\x7b\x97\xd7\xb2\xa5\x05\x17\xcc" + "\xf7\x3a\x29\x6f\xd5\x4b\xb8\x24" + "\xf4\x65\x95\x12\xc0\x86\xd1\x64" + "\x81\xdf\x46\x55\x0d\x22\x06\x77" + "\xd8\xca\x8d\xc8\x87\xc3\xfa\xb9" + "\xe1\x98\x94\xe6\x7b\xed\x65\x66" + "\x0e\xc7\x25\x15\xee\x4a\xe6\x7e" + "\xea\x1b\x58\xee\x96\xa0\x75\x9a" + "\xa3\x00\x9e\x42\xc2\x26\x20\x8c" + "\x3d\x22\x1f\x94\x3e\x74\x43\x72" + "\xe9\x1d\xa6\xa1\x6c\xa7\xb8\x03" + "\xdf\xb9\x7a\xaf\xe9\xe9\x3b\xfe" + "\xdf\x91\xc1\x01\xa8\xba\x5d\x29" + "\xa5\xe0\x98\x9b\x13\xe5\x13\x11" + "\x7c\x04\x3a\xe8\x44\x7e\x78\xfc" + "\xd6\x96\xa8\xbc\x7d\xc1\x89\x3d" + "\x75\x64\xa9\x0e\x86\x33\xfb\x73" + "\xf7\x15\xbc\x2c\x9a\x3f\x29\xce" + "\x1c\x9d\x10\x4e\x85\xe1\x77\x41" + "\x01\xe2\xbc\x88\xec\x81\xef\xc2" + "\x6a\xed\x4f\xf7\xdf\xac\x10\x71" + "\x94\xed\x71\xa4\x01\xd4\xd6\xbe" + "\xfe\x3e\xc3\x92\x6a\xf2\x2b\xb5" + "\xab\x15\x96\xb7\x88\x2c\xc2\xe1" + "\xb0\x04\x22\xe7\x3d\xa9\xc9\x7d" + "\x2c\x7c\x21\xff\x97\x86\x6b\x0c" + "\x2b\x5b\xe0\xb6\x48\x74\x8f\x24" + "\xef\x8e\xdd\x0f\x2a\x5f\xff\x33" + "\xf4\x8e\xc5\xeb\x9c\xd7\x2a\x45" + "\xf3\x50\xf1\xc0\x91\x8f\xc7\xf9" + "\x97\xc1\x3c\x9c\xf4\xed\x8a\x23" + "\x61\x5b\x40\x1a\x09\xee\x23\xa8" + "\x7c\x7a\x96\xe1\x31\x55\x3d\x12" + "\x04\x1f\x21\x78\x72\xf0\x0f\xa5" + "\x80\x58\x7c\x2f\x37\xb5\x67\x24" + "\x2f\xce\xf9\xf6\x86\x9f\xb3\x34" + "\x0c\xfe\x0a\xaf\x27\xe6\x5e\x0a" + "\x21\x44\x68\xe1\x5d\x84\x25\xae" + "\x2c\x5a\x94\x66\x9a\x3f\x0e\x5a" + "\xd0\x60\x2a\xd5\x3a\x4e\x2f\x40" + "\x87\xe9\x27\x3e\xee\x92\xe1\x07" + "\x22\x43\x52\xed\x67\x49\x13\xdd" + "\x68\xd7\x54\xc2\x76\x72\x7e\x75" + "\xaf\x24\x98\x5c\xe8\x22\xaa\x35" + "\x0f\x9a\x1c\x4c\x0b\x43\x68\x99" + "\x45\xdd\xbf\x82\xa5\x6f\x0a\xef" + "\x44\x90\x85\xe7\x57\x23\x22\x41" + "\x2e\xda\x24\x28\x65\x7f\x96\x85" + "\x9f\x4b\x0d\x43\xb9\xa8\xbd\x84" + "\xad\x0b\x09\xcc\x2c\x4a\x0c\xec" + "\x71\x58\xba\xf1\xfc\x49\x4c\xca" + "\x5c\x5d\xb2\x77\x0c\x99\xae\x1c" + "\xce\x70\x05\x5b\x73\x6b\x7c\x28" + "\x3b\xeb\x21\x3f\xa3\x71\xe1\x6a" + "\xf4\x87\xd0\xbf\x73\xaa\x0b\x0b" + "\xed\x70\xb3\xd4\xa3\xca\x76\x3a" + "\xdb\xfa\xd8\x08\x95\xec\xac\x59" + "\xd0\x79\x90\xc2\x33\x7b\xcc\x28" + "\x65\xb6\x5f\x92\xc4\xac\x23\x40" + "\xd1\x20\x44\x1f\xd7\x29\xab\x46" + "\x79\x32\xc6\x8f\x79\xe5\xaa\x2c" + "\xa6\x76\x70\x3a\x9e\x46\x3f\x8c" + "\x1a\x89\x32\x28\x61\x5c\xcf\x93" + "\x1e\xde\x9e\x98\xbe\x06\x30\x23" + "\xc4\x8b\xda\x1c\xd1\x67\x46\x93" + "\x9d\x41\xa2\x8c\x03\x22\xbd\x55" + "\x7e\x91\x51\x13\xdc\xcf\x5c\x1e" + "\xcb\x5d\xfb\x14\x16\x1a\x44\x56" + "\x27\x77\xfd\xed\x7d\xbd\xd1\x49" + "\x7f\x0d\xc3\x59\x48\x6b\x3c\x02" + "\x6b\xb5\xd0\x83\xd5\x81\x29\xe7" + "\xe0\xc9\x36\x23\x8d\x41\x33\x77" + "\xff\x5f\x54\xde\x4d\x3f\xd2\x4e" + "\xb6\x4d\xdd\x85\xf8\x9b\x20\x7d" + "\x39\x27\x68\x63\xd3\x8e\x61\x39" + "\xfa\xe1\xc3\x04\x74\x27\x5a\x34" + "\x7f\xec\x59\x2d\xc5\x6e\x54\x23" + "\xf5\x7b\x4b\xbe\x58\x2b\xf2\x81" + "\x93\x63\xcc\x13\xd9\x90\xbb\x6a" + "\x41\x03\x8d\x95\xeb\xbb\x5d\x06" + "\x38\x4c\x0e\xd6\xa9\x5b\x84\x97" + "\x3e\x64\x72\xe9\x96\x07\x0f\x73" + "\x6e\xc6\x3b\x32\xbe\xac\x13\x14" + "\xd0\x0a\x17\x5f\xb9\x9c\x3e\x34" + "\xd9\xec\xd6\x8f\x89\xbf\x1e\xd3" + "\xda\x80\xb2\x29\xff\x28\x96\xb3" + "\x46\x50\x5b\x15\x80\x97\xee\x1f" + "\x6c\xd8\xe8\xe0\xbd\x09\xe7\x20" + "\x8c\x23\x8e\xd9\xbb\x92\xfa\x82" + "\xaa\x0f\xb5\xf8\x78\x60\x11\xf0", + .ksize = 1088, + .plaintext = "\x0b\xb2\x31\x2d\xad\xfe\xce\xf9" + "\xec\x5d\x3d\x64\x5f\x3f\x75\x43" + "\x05\x5b\x97", + .psize = 19, + .digest = "\x5f\x02\xae\x65\x6c\x13\x21\x67" + "\x77\x9e\xc4\x43\x58\x68\xde\x8f", }, { .key = "\x65\x4d\xe3\xf8\xd2\x4c\xac\x28" "\x68\xf5\xb3\x81\x71\x4b\xa1\xfa" @@ -6267,8 +6337,6 @@ static const struct hash_testvec nhpoly1305_tv_template[] = { .psize = 1024, .digest = "\x64\x3a\xbc\xc3\x3f\x74\x40\x51" "\x6e\x56\x01\x1a\x51\xec\x36\xde", - .np = 8, - .tap = { 64, 203, 267, 28, 263, 62, 54, 83 }, }, { .key = "\x1b\x82\x2e\x1b\x17\x23\xb9\x6d" "\xdc\x9c\xda\x99\x07\xe3\x5f\xd8" @@ -6989,18 +7057,6 @@ static const struct cipher_testvec des_tv_template[] = { .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d" "\xf7\x9c\x89\x2a\x33\x8f\x4a\x8b", .len = 16, - .np = 2, - .tap = { 8, 8 } - }, { - .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", - .klen = 8, - .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7" - "\xa3\x99\x7b\xca\xaf\x69\xa0\xf5", - .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d" - "\x69\x0f\x5b\x0d\x9a\x26\x93\x9b", - .len = 16, - .np = 2, - .tap = { 8, 8 } }, { .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", .klen = 8, @@ -7009,8 +7065,6 @@ static const struct cipher_testvec des_tv_template[] = { .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d" "\x69\x0f\x5b\x0d\x9a\x26\x93\x9b", .len = 16, - .np = 3, - .tap = { 3, 12, 1 } }, { /* Four blocks -- for testing encryption with chunking */ .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", .klen = 8, @@ -7023,38 +7077,6 @@ static const struct cipher_testvec des_tv_template[] = { "\xb4\x99\x26\xf7\x1f\xe1\xd4\x90" "\xf7\x9c\x89\x2a\x33\x8f\x4a\x8b", .len = 32, - .np = 3, - .tap = { 14, 10, 8 } - }, { - .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", - .klen = 8, - .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7" - "\x22\x33\x44\x55\x66\x77\x88\x99" - "\xca\xfe\xba\xbe\xfe\xed\xbe\xef", - .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d" - "\xf7\x9c\x89\x2a\x33\x8f\x4a\x8b" - "\xb4\x99\x26\xf7\x1f\xe1\xd4\x90", - .len = 24, - .np = 4, - .tap = { 2, 1, 3, 18 } - }, { - .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", - .klen = 8, - .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7" - "\x22\x33\x44\x55\x66\x77\x88\x99", - .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d" - "\xf7\x9c\x89\x2a\x33\x8f\x4a\x8b", - .len = 16, - .np = 5, - .tap = { 2, 2, 2, 2, 8 } - }, { - .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", - .klen = 8, - .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xe7", - .ctext = "\xc9\x57\x44\x25\x6a\x5e\xd3\x1d", - .len = 8, - .np = 8, - .tap = { 1, 1, 1, 1, 1, 1, 1, 1 } }, { /* Generated with Crypto++ */ .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55", .klen = 8, @@ -7121,9 +7143,6 @@ static const struct cipher_testvec des_tv_template[] = { "\xE1\x58\x39\x09\xB4\x8B\x40\xAC" "\x5F\x62\xC7\x72\xD9\xFC\xCB\x9A", .len = 248, - .also_non_np = 1, - .np = 3, - .tap = { 248 - 10, 2, 8 }, }, }; @@ -7132,6 +7151,7 @@ static const struct cipher_testvec des_cbc_tv_template[] = { .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", .klen = 8, .iv = "\xfe\xdc\xba\x98\x76\x54\x32\x10", + .iv_out = "\x46\x8e\x91\x15\x78\x88\xba\x68", .ptext = "\x37\x36\x35\x34\x33\x32\x31\x20" "\x4e\x6f\x77\x20\x69\x73\x20\x74" "\x68\x65\x20\x74\x69\x6d\x65\x20", @@ -7143,6 +7163,7 @@ static const struct cipher_testvec des_cbc_tv_template[] = { .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", .klen = 8, .iv = "\x12\x34\x56\x78\x90\xab\xcd\xef", + .iv_out = "\xe5\xc7\xcd\xde\x87\x2b\xf2\x7c", .ptext = "\x4e\x6f\x77\x20\x69\x73\x20\x74", .ctext = "\xe5\xc7\xcd\xde\x87\x2b\xf2\x7c", .len = 8, @@ -7150,6 +7171,7 @@ static const struct cipher_testvec des_cbc_tv_template[] = { .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", .klen = 8, .iv = "\xe5\xc7\xcd\xde\x87\x2b\xf2\x7c", + .iv_out = "\x43\xe9\x34\x00\x8c\x38\x9c\x0f", .ptext = "\x68\x65\x20\x74\x69\x6d\x65\x20", .ctext = "\x43\xe9\x34\x00\x8c\x38\x9c\x0f", .len = 8, @@ -7157,30 +7179,15 @@ static const struct cipher_testvec des_cbc_tv_template[] = { .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", .klen = 8, .iv = "\x43\xe9\x34\x00\x8c\x38\x9c\x0f", + .iv_out = "\x68\x37\x88\x49\x9a\x7c\x05\xf6", .ptext = "\x66\x6f\x72\x20\x61\x6c\x6c\x20", .ctext = "\x68\x37\x88\x49\x9a\x7c\x05\xf6", .len = 8, - .np = 2, - .tap = { 4, 4 }, - .also_non_np = 1, - }, { /* Copy of openssl vector for chunk testing */ - /* From OpenSSL */ - .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", - .klen = 8, - .iv = "\xfe\xdc\xba\x98\x76\x54\x32\x10", - .ptext = "\x37\x36\x35\x34\x33\x32\x31\x20" - "\x4e\x6f\x77\x20\x69\x73\x20\x74" - "\x68\x65\x20\x74\x69\x6d\x65\x20", - .ctext = "\xcc\xd1\x73\xff\xab\x20\x39\xf4" - "\xac\xd8\xae\xfd\xdf\xd8\xa1\xeb" - "\x46\x8e\x91\x15\x78\x88\xba\x68", - .len = 24, - .np = 2, - .tap = { 13, 11 } }, { /* Generated with Crypto++ */ .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55", .klen = 8, .iv = "\xE7\x82\x1D\xB8\x53\x11\xAC\x47", + .iv_out = "\xC6\x4A\xF3\x55\xC7\x29\x2E\x63", .ptext = "\x50\xB9\x22\xAE\x17\x80\x0C\x75" "\xDE\x47\xD3\x3C\xA5\x0E\x9A\x03" "\x6C\xF8\x61\xCA\x33\xBF\x28\x91" @@ -7244,9 +7251,6 @@ static const struct cipher_testvec des_cbc_tv_template[] = { "\x82\xA9\xBD\x6A\x31\x91\x39\x11" "\xC6\x4A\xF3\x55\xC7\x29\x2E\x63", .len = 248, - .also_non_np = 1, - .np = 3, - .tap = { 248 - 10, 2, 8 }, }, }; @@ -7255,6 +7259,7 @@ static const struct cipher_testvec des_ctr_tv_template[] = { .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55", .klen = 8, .iv = "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD", + .iv_out = "\x00\x00\x00\x00\x00\x00\x00\x1C", .ptext = "\x50\xB9\x22\xAE\x17\x80\x0C\x75" "\xDE\x47\xD3\x3C\xA5\x0E\x9A\x03" "\x6C\xF8\x61\xCA\x33\xBF\x28\x91" @@ -7318,13 +7323,11 @@ static const struct cipher_testvec des_ctr_tv_template[] = { "\x19\x7F\x99\x19\x53\xCE\x1D\x14" "\x69\x74\xA1\x06\x46\x0F\x4E\x75", .len = 248, - .also_non_np = 1, - .np = 3, - .tap = { 248 - 10, 2, 8 }, }, { /* Generated with Crypto++ */ .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55", .klen = 8, .iv = "\xE7\x82\x1D\xB8\x53\x11\xAC\x47", + .iv_out = "\xE7\x82\x1D\xB8\x53\x11\xAC\x66", .ptext = "\x50\xB9\x22\xAE\x17\x80\x0C\x75" "\xDE\x47\xD3\x3C\xA5\x0E\x9A\x03" "\x6C\xF8\x61\xCA\x33\xBF\x28\x91" @@ -7388,9 +7391,6 @@ static const struct cipher_testvec des_ctr_tv_template[] = { "\xA5\xA6\xE7\xB0\x51\x36\x52\x37" "\x91\x45\x05\x3E\x58\xBF\x32", .len = 247, - .also_non_np = 1, - .np = 2, - .tap = { 247 - 8, 8 }, }, }; @@ -7549,9 +7549,6 @@ static const struct cipher_testvec des3_ede_tv_template[] = { "\x93\x03\xD7\x51\x09\xFA\xBE\x68" "\xD8\x45\xFF\x33\xBA\xBB\x2B\x63", .len = 496, - .also_non_np = 1, - .np = 3, - .tap = { 496 - 20, 4, 16 }, }, }; @@ -7562,6 +7559,7 @@ static const struct cipher_testvec des3_ede_cbc_tv_template[] = { "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8", .klen = 24, .iv = "\x7D\x33\x88\x93\x0F\x93\xB2\x42", + .iv_out = "\x6b\xfa\xb1\x91\x13\xb0\xd9\x19", .ptext = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" "\x53\x20\x63\x65\x65\x72\x73\x74" "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" @@ -7602,6 +7600,7 @@ static const struct cipher_testvec des3_ede_cbc_tv_template[] = { .klen = 24, .iv = "\xB2\xD7\x48\xED\x06\x44\xF9\x12" "\xB7\x28\x4D\x83\x24\x59\xF2\x17", + .iv_out = "\x95\x63\x73\xA2\x44\xAC\xF8\xA5", .ptext = "\x05\xEC\x77\xFB\x42\xD5\x59\x20" "\x8B\x12\x86\x69\xF0\x5B\xCF\x56" "\x39\xAD\x34\x9F\x66\xEA\x7D\xC4" @@ -7727,9 +7726,6 @@ static const struct cipher_testvec des3_ede_cbc_tv_template[] = { "\x83\x70\xFF\x86\xE6\xAA\x0F\x1F" "\x95\x63\x73\xA2\x44\xAC\xF8\xA5", .len = 496, - .also_non_np = 1, - .np = 3, - .tap = { 496 - 20, 4, 16 }, }, }; @@ -7739,8 +7735,8 @@ static const struct cipher_testvec des3_ede_ctr_tv_template[] = { "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE" "\xEB\xB4\x51\x72\xB4\x51\x72\x1F", .klen = 24, - .iv = "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF" - "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD", + .iv = "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF", + .iv_out = "\x00\x00\x00\x00\x00\x00\x00\x3D", .ptext = "\x05\xEC\x77\xFB\x42\xD5\x59\x20" "\x8B\x12\x86\x69\xF0\x5B\xCF\x56" "\x39\xAD\x34\x9F\x66\xEA\x7D\xC4" @@ -7866,16 +7862,13 @@ static const struct cipher_testvec des3_ede_ctr_tv_template[] = { "\xFD\x51\xB0\xC6\x2C\x63\x13\x78" "\x5C\xEE\xFC\xCF\xC4\x70\x00\x34", .len = 496, - .also_non_np = 1, - .np = 3, - .tap = { 496 - 20, 4, 16 }, }, { /* Generated with Crypto++ */ .key = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00" "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE" "\xEB\xB4\x51\x72\xB4\x51\x72\x1F", .klen = 24, - .iv = "\xB2\xD7\x48\xED\x06\x44\xF9\x12" - "\xB7\x28\x4D\x83\x24\x59\xF2\x17", + .iv = "\xB2\xD7\x48\xED\x06\x44\xF9\x12", + .iv_out = "\xB2\xD7\x48\xED\x06\x44\xF9\x51", .ptext = "\x05\xEC\x77\xFB\x42\xD5\x59\x20" "\x8B\x12\x86\x69\xF0\x5B\xCF\x56" "\x39\xAD\x34\x9F\x66\xEA\x7D\xC4" @@ -8003,9 +7996,6 @@ static const struct cipher_testvec des3_ede_ctr_tv_template[] = { "\x32\x0F\x05\x2F\xF2\x4C\x95\x3B" "\xF2\x79\xD9", .len = 499, - .also_non_np = 1, - .np = 2, - .tap = { 499 - 16, 16 }, }, }; @@ -8191,9 +8181,6 @@ static const struct cipher_testvec bf_tv_template[] = { "\x56\xEB\x36\x77\x3D\xAA\xB8\xF5" "\xC9\x1A\xFB\x5D\xDE\xBB\x43\xF4", .len = 504, - .also_non_np = 1, - .np = 3, - .tap = { 504 - 10, 2, 8 }, }, }; @@ -8203,6 +8190,7 @@ static const struct cipher_testvec bf_cbc_tv_template[] = { "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87", .klen = 16, .iv = "\xfe\xdc\xba\x98\x76\x54\x32\x10", + .iv_out = "\x59\xf1\x65\x2b\xd5\xff\x92\xcc", .ptext = "\x37\x36\x35\x34\x33\x32\x31\x20" "\x4e\x6f\x77\x20\x69\x73\x20\x74" "\x68\x65\x20\x74\x69\x6d\x65\x20" @@ -8219,6 +8207,7 @@ static const struct cipher_testvec bf_cbc_tv_template[] = { "\x78\xBE\x9B\x78\x55\x32\x0F\x55", .klen = 32, .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F", + .iv_out = "\xB4\x98\xD8\x6B\x74\xE7\x65\xF4", .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" @@ -8346,9 +8335,6 @@ static const struct cipher_testvec bf_cbc_tv_template[] = { "\x93\x9B\xEE\xB5\x97\x41\xD2\xA0" "\xB4\x98\xD8\x6B\x74\xE7\x65\xF4", .len = 504, - .also_non_np = 1, - .np = 3, - .tap = { 504 - 10, 2, 8 }, }, }; @@ -8360,6 +8346,7 @@ static const struct cipher_testvec bf_ctr_tv_template[] = { "\x78\xBE\x9B\x78\x55\x32\x0F\x55", .klen = 32, .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F", + .iv_out = "\xE2\x24\x89\xEE\x53\xB8\x1D\x9E", .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" @@ -8494,6 +8481,7 @@ static const struct cipher_testvec bf_ctr_tv_template[] = { "\x78\xBE\x9B\x78\x55\x32\x0F\x55", .klen = 32, .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F", + .iv_out = "\xE2\x24\x89\xEE\x53\xB8\x1D\x9E", .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" @@ -8621,9 +8609,6 @@ static const struct cipher_testvec bf_ctr_tv_template[] = { "\x32\x44\x96\x1C\xD8\xEB\x95\xD2" "\xF3\x71\xEF\xEB\x4E\xBB\x4D", .len = 503, - .also_non_np = 1, - .np = 2, - .tap = { 503 - 8, 8 }, }, { /* Generated with Crypto++ */ .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" @@ -8631,6 +8616,7 @@ static const struct cipher_testvec bf_ctr_tv_template[] = { "\x78\xBE\x9B\x78\x55\x32\x0F\x55", .klen = 32, .iv = "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD", + .iv_out = "\x00\x00\x00\x00\x00\x00\x00\x3C", .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" @@ -8922,9 +8908,6 @@ static const struct cipher_testvec tf_tv_template[] = { "\x58\x33\x9B\x78\xC7\x58\x48\x6B" "\x2C\x75\x64\xC4\xCA\xC1\x7E\xD5", .len = 496, - .also_non_np = 1, - .np = 3, - .tap = { 496 - 20, 4, 16 }, }, }; @@ -8933,6 +8916,8 @@ static const struct cipher_testvec tf_cbc_tv_template[] = { .key = zeroed_string, .klen = 16, .iv = zeroed_string, + .iv_out = "\x9f\x58\x9f\x5c\xf6\x12\x2c\x32" + "\xb6\xbf\xec\x2f\x2a\xe8\xc3\x5a", .ptext = zeroed_string, .ctext = "\x9f\x58\x9f\x5c\xf6\x12\x2c\x32" "\xb6\xbf\xec\x2f\x2a\xe8\xc3\x5a", @@ -8942,6 +8927,8 @@ static const struct cipher_testvec tf_cbc_tv_template[] = { .klen = 16, .iv = "\x9f\x58\x9f\x5c\xf6\x12\x2c\x32" "\xb6\xbf\xec\x2f\x2a\xe8\xc3\x5a", + .iv_out = "\xd4\x91\xdb\x16\xe7\xb1\xc3\x9e" + "\x86\xcb\x08\x6b\x78\x9f\x54\x19", .ptext = zeroed_string, .ctext = "\xd4\x91\xdb\x16\xe7\xb1\xc3\x9e" "\x86\xcb\x08\x6b\x78\x9f\x54\x19", @@ -8951,6 +8938,8 @@ static const struct cipher_testvec tf_cbc_tv_template[] = { .klen = 16, .iv = "\xd4\x91\xdb\x16\xe7\xb1\xc3\x9e" "\x86\xcb\x08\x6b\x78\x9f\x54\x19", + .iv_out = "\x05\xef\x8c\x61\xa8\x11\x58\x26" + "\x34\xba\x5c\xb7\x10\x6a\xa6\x41", .ptext = zeroed_string, .ctext = "\x05\xef\x8c\x61\xa8\x11\x58\x26" "\x34\xba\x5c\xb7\x10\x6a\xa6\x41", @@ -8959,6 +8948,8 @@ static const struct cipher_testvec tf_cbc_tv_template[] = { .key = zeroed_string, .klen = 16, .iv = zeroed_string, + .iv_out = "\x05\xef\x8c\x61\xa8\x11\x58\x26" + "\x34\xba\x5c\xb7\x10\x6a\xa6\x41", .ptext = zeroed_string, .ctext = "\x9f\x58\x9f\x5c\xf6\x12\x2c\x32" "\xb6\xbf\xec\x2f\x2a\xe8\xc3\x5a" @@ -8975,6 +8966,8 @@ static const struct cipher_testvec tf_cbc_tv_template[] = { .klen = 32, .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" "\xC4\x29\x8E\xF3\x35\x9A\xFF\x64", + .iv_out = "\x30\x70\x56\xA4\x37\xDD\x7C\xC0" + "\x0A\xA3\x30\x10\x26\x25\x41\x2C", .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" @@ -9100,9 +9093,6 @@ static const struct cipher_testvec tf_cbc_tv_template[] = { "\x30\x70\x56\xA4\x37\xDD\x7C\xC0" "\x0A\xA3\x30\x10\x26\x25\x41\x2C", .len = 496, - .also_non_np = 1, - .np = 3, - .tap = { 496 - 20, 4, 16 }, }, }; @@ -9115,6 +9105,8 @@ static const struct cipher_testvec tf_ctr_tv_template[] = { .klen = 32, .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" "\xC4\x29\x8E\xF3\x35\x9A\xFF\x64", + .iv_out = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" + "\xC4\x29\x8E\xF3\x35\x9A\xFF\x83", .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" @@ -9248,6 +9240,8 @@ static const struct cipher_testvec tf_ctr_tv_template[] = { .klen = 32, .iv = "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF" "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD", + .iv_out = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x1C", .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" @@ -9381,6 +9375,8 @@ static const struct cipher_testvec tf_ctr_tv_template[] = { .klen = 32, .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" "\xC4\x29\x8E\xF3\x35\x9A\xFF\x64", + .iv_out = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" + "\xC4\x29\x8E\xF3\x35\x9A\xFF\x84", .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" @@ -9508,9 +9504,6 @@ static const struct cipher_testvec tf_ctr_tv_template[] = { "\xC5\xC9\x7F\x9E\xCF\x33\x7A\xDF" "\x6C\x82\x9D", .len = 499, - .also_non_np = 1, - .np = 2, - .tap = { 499 - 16, 16 }, }, }; @@ -9752,9 +9745,6 @@ static const struct cipher_testvec tf_lrw_tv_template[] = { "\x80\x18\xc4\x6c\x03\xd3\xb7\xba" "\x11\xd7\xb8\x6e\xea\xe1\x80\x30", .len = 512, - .also_non_np = 1, - .np = 3, - .tap = { 512 - 20, 4, 16 }, }, }; @@ -10089,9 +10079,6 @@ static const struct cipher_testvec tf_xts_tv_template[] = { "\xa4\x05\x0b\xb2\xb3\xa8\x30\x97" "\x37\x30\xe1\x91\x8d\xb3\x2a\xff", .len = 512, - .also_non_np = 1, - .np = 3, - .tap = { 512 - 20, 4, 16 }, }, }; @@ -10264,9 +10251,6 @@ static const struct cipher_testvec serpent_tv_template[] = { "\x75\x55\x9B\xFF\x36\x73\xAB\x7C" "\xF4\x46\x2E\xEB\xAC\xF3\xD2\xB7", .len = 496, - .also_non_np = 1, - .np = 3, - .tap = { 496 - 20, 4, 16 }, }, }; @@ -10358,6 +10342,8 @@ static const struct cipher_testvec serpent_cbc_tv_template[] = { .klen = 32, .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" "\xC4\x29\x8E\xF3\x35\x9A\xFF\x64", + .iv_out = "\xFC\x66\xAA\x37\xF2\x37\x39\x6B" + "\xBC\x08\x3A\xA2\x29\xB3\xDF\xD1", .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" @@ -10483,9 +10469,6 @@ static const struct cipher_testvec serpent_cbc_tv_template[] = { "\xFC\x66\xAA\x37\xF2\x37\x39\x6B" "\xBC\x08\x3A\xA2\x29\xB3\xDF\xD1", .len = 496, - .also_non_np = 1, - .np = 3, - .tap = { 496 - 20, 4, 16 }, }, }; @@ -10498,6 +10481,8 @@ static const struct cipher_testvec serpent_ctr_tv_template[] = { .klen = 32, .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" "\xC4\x29\x8E\xF3\x35\x9A\xFF\x64", + .iv_out = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" + "\xC4\x29\x8E\xF3\x35\x9A\xFF\x83", .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" @@ -10631,6 +10616,8 @@ static const struct cipher_testvec serpent_ctr_tv_template[] = { .klen = 32, .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" "\xC4\x29\x8E\xF3\x35\x9A\xFF\x64", + .iv_out = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" + "\xC4\x29\x8E\xF3\x35\x9A\xFF\x84", .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" @@ -10758,9 +10745,6 @@ static const struct cipher_testvec serpent_ctr_tv_template[] = { "\x40\x53\x77\x8C\x15\xF8\x8D\x13" "\x38\xE2\xE5", .len = 499, - .also_non_np = 1, - .np = 2, - .tap = { 499 - 16, 16 }, }, { /* Generated with Crypto++ */ .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" @@ -10769,6 +10753,8 @@ static const struct cipher_testvec serpent_ctr_tv_template[] = { .klen = 32, .iv = "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF" "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD", + .iv_out = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x1C", .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" @@ -11135,9 +11121,6 @@ static const struct cipher_testvec serpent_lrw_tv_template[] = { "\x5c\xc6\x84\xfe\x7c\xcb\x26\xfd" "\xd9\x51\x0f\xd7\x94\x2f\xc5\xa7", .len = 512, - .also_non_np = 1, - .np = 3, - .tap = { 512 - 20, 4, 16 }, }, }; @@ -11472,9 +11455,6 @@ static const struct cipher_testvec serpent_xts_tv_template[] = { "\xaf\x43\x0b\xc5\x20\x41\x92\x20" "\xd4\xa0\x91\x98\x11\x5f\x4d\xb1", .len = 512, - .also_non_np = 1, - .np = 3, - .tap = { 512 - 20, 4, 16 }, }, }; @@ -11579,6 +11559,8 @@ static const struct cipher_testvec sm4_cbc_tv_template[] = { "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb", .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F", + .iv_out = "\x4C\xB7\x01\x69\x51\x90\x92\x26" + "\x97\x9B\x0D\x15\xDC\x6A\x8F\x6D", .ctext = "\x78\xEB\xB1\x1C\xC4\x0B\x0A\x48" "\x31\x2A\xAE\xB2\x04\x02\x44\xCB" "\x4C\xB7\x01\x69\x51\x90\x92\x26" @@ -11594,6 +11576,8 @@ static const struct cipher_testvec sm4_cbc_tv_template[] = { "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb", .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F", + .iv_out = "\x91\xf2\xc1\x47\x91\x1a\x41\x44" + "\x66\x5e\x1f\xa1\xd4\x0b\xae\x38", .ctext = "\x0d\x3a\x6d\xdc\x2d\x21\xc6\x98" "\x85\x72\x15\x58\x7b\x7b\xb5\x9a" "\x91\xf2\xc1\x47\x91\x1a\x41\x44" @@ -11617,6 +11601,8 @@ static const struct cipher_testvec sm4_ctr_tv_template[] = { "\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb", .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F", + .iv_out = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0A\x0B\x0C\x0D\x0E\x13", .ctext = "\xac\x32\x36\xcb\x97\x0c\xc2\x07" "\x91\x36\x4c\x39\x5a\x13\x42\xd1" "\xa3\xcb\xc1\x87\x8c\x6f\x30\xcd" @@ -11640,6 +11626,8 @@ static const struct cipher_testvec sm4_ctr_tv_template[] = { "\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb", .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F", + .iv_out = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0A\x0B\x0C\x0D\x0E\x13", .ctext = "\x5d\xcc\xcd\x25\xb9\x5a\xb0\x74" "\x17\xa0\x85\x12\xee\x16\x0e\x2f" "\x8f\x66\x15\x21\xcb\xba\xb4\x4c" @@ -11814,9 +11802,6 @@ static const struct cipher_testvec cast6_tv_template[] = { "\x84\x52\x6D\x68\xDE\xC6\x64\xB2" "\x11\x74\x93\x57\xB4\x7E\xC6\x00", .len = 496, - .also_non_np = 1, - .np = 3, - .tap = { 496 - 20, 4, 16 }, }, }; @@ -11829,6 +11814,8 @@ static const struct cipher_testvec cast6_cbc_tv_template[] = { .klen = 32, .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" "\xC4\x29\x8E\xF3\x35\x9A\xFF\x64", + .iv_out = "\x4D\x59\x7D\xC5\x28\x69\xFA\x92" + "\x22\x46\x89\x2D\x0F\x2B\x08\x24", .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" @@ -11954,9 +11941,6 @@ static const struct cipher_testvec cast6_cbc_tv_template[] = { "\x4D\x59\x7D\xC5\x28\x69\xFA\x92" "\x22\x46\x89\x2D\x0F\x2B\x08\x24", .len = 496, - .also_non_np = 1, - .np = 3, - .tap = { 496 - 20, 4, 16 }, }, }; @@ -11969,6 +11953,8 @@ static const struct cipher_testvec cast6_ctr_tv_template[] = { .klen = 32, .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" "\xC4\x29\x8E\xF3\x35\x9A\xFF\x64", + .iv_out = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" + "\xC4\x29\x8E\xF3\x35\x9A\xFF\x66", .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" "\x3A", @@ -11984,6 +11970,8 @@ static const struct cipher_testvec cast6_ctr_tv_template[] = { .klen = 32, .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" "\xC4\x29\x8E\xF3\x35\x9A\xFF\x64", + .iv_out = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" + "\xC4\x29\x8E\xF3\x35\x9A\xFF\x83", .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" @@ -12109,9 +12097,6 @@ static const struct cipher_testvec cast6_ctr_tv_template[] = { "\x0E\x74\x33\x30\x62\xB9\x89\xDF" "\xF9\xC5\xDD\x27\xB3\x39\xCB\xCB", .len = 496, - .also_non_np = 1, - .np = 3, - .tap = { 496 - 20, 4, 16 }, }, }; @@ -12255,9 +12240,6 @@ static const struct cipher_testvec cast6_lrw_tv_template[] = { "\x8D\xD9\xCD\x3B\x22\x67\x18\xC7" "\xC4\xF5\x99\x61\xBC\xBB\x5B\x46", .len = 512, - .also_non_np = 1, - .np = 3, - .tap = { 512 - 20, 4, 16 }, }, }; @@ -12403,9 +12385,6 @@ static const struct cipher_testvec cast6_xts_tv_template[] = { "\xA1\xAC\xE8\xCF\xC6\x74\xCF\xDC" "\x22\x60\x4E\xE8\xA4\x5D\x85\xB9", .len = 512, - .also_non_np = 1, - .np = 3, - .tap = { 512 - 20, 4, 16 }, }, }; @@ -12574,9 +12553,6 @@ static const struct cipher_testvec aes_tv_template[] = { "\x09\x79\xA0\x43\x5C\x0D\x08\x58" "\x17\xBB\xC0\x6B\x62\x3F\x56\xE9", .len = 496, - .also_non_np = 1, - .np = 3, - .tap = { 496 - 20, 4, 16 }, }, }; @@ -12587,19 +12563,20 @@ static const struct cipher_testvec aes_cbc_tv_template[] = { .klen = 16, .iv = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30" "\xb4\x22\xda\x80\x2c\x9f\xac\x41", + .iv_out = "\xe3\x53\x77\x9c\x10\x79\xae\xb8" + "\x27\x08\x94\x2d\xbe\x77\x18\x1a", .ptext = "Single block msg", .ctext = "\xe3\x53\x77\x9c\x10\x79\xae\xb8" "\x27\x08\x94\x2d\xbe\x77\x18\x1a", .len = 16, - .also_non_np = 1, - .np = 8, - .tap = { 3, 2, 3, 2, 3, 1, 1, 1 }, }, { .key = "\xc2\x86\x69\x6d\x88\x7c\x9a\xa0" "\x61\x1b\xbb\x3e\x20\x25\xa4\x5a", .klen = 16, .iv = "\x56\x2e\x17\x99\x6d\x09\x3d\x28" "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58", + .iv_out = "\x75\x86\x60\x2d\x25\x3c\xff\xf9" + "\x1b\x82\x66\xbe\xa6\xd6\x1a\xb1", .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" "\x10\x11\x12\x13\x14\x15\x16\x17" @@ -12616,6 +12593,8 @@ static const struct cipher_testvec aes_cbc_tv_template[] = { .klen = 24, .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .iv_out = "\x08\xb0\xe2\x79\x88\x59\x88\x81" + "\xd9\x20\xa9\xe6\x4f\x56\x15\xcd", .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" @@ -12641,6 +12620,8 @@ static const struct cipher_testvec aes_cbc_tv_template[] = { .klen = 32, .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .iv_out = "\xb2\xeb\x05\xe2\xc3\x9b\xe9\xfc" + "\xda\x6c\x19\x07\x8c\x6a\x9d\x1b", .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" @@ -12666,6 +12647,8 @@ static const struct cipher_testvec aes_cbc_tv_template[] = { .klen = 32, .iv = "\xE7\x82\x1D\xB8\x53\x11\xAC\x47" "\xE2\x7D\x18\xD6\x71\x0C\xA7\x42", + .iv_out = "\xE0\x1F\x91\xF8\x82\x96\x2D\x65" + "\xA3\xAA\x13\xCC\x50\xFF\x7B\x02", .ptext = "\x50\xB9\x22\xAE\x17\x80\x0C\x75" "\xDE\x47\xD3\x3C\xA5\x0E\x9A\x03" "\x6C\xF8\x61\xCA\x33\xBF\x28\x91" @@ -12791,9 +12774,6 @@ static const struct cipher_testvec aes_cbc_tv_template[] = { "\xE0\x1F\x91\xF8\x82\x96\x2D\x65" "\xA3\xAA\x13\xCC\x50\xFF\x7B\x02", .len = 496, - .also_non_np = 1, - .np = 3, - .tap = { 496 - 20, 4, 16 }, }, }; @@ -12870,10 +12850,32 @@ static const struct cipher_testvec aes_cfb_tv_template[] = { "\x75\xa3\x85\x74\x1a\xb9\xce\xf8" "\x20\x31\x62\x3d\x55\xb1\xe4\x71", .len = 64, + }, { /* > 16 bytes, not a multiple of 16 bytes */ + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", + .klen = 16, + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" + "\xae", + .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20" + "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a" + "\xc8", + .len = 17, + }, { /* < 16 bytes */ + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", + .klen = 16, + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f", + .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad", + .len = 7, }, }; -static const struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = { +static const struct aead_testvec hmac_md5_ecb_cipher_null_tv_template[] = { { /* Input data from RFC 2410 Case 1 */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -12887,12 +12889,12 @@ static const struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = { "\x00\x00\x00\x00\x00\x00\x00\x00", .klen = 8 + 16 + 0, .iv = "", - .input = "\x01\x23\x45\x67\x89\xab\xcd\xef", - .ilen = 8, - .result = "\x01\x23\x45\x67\x89\xab\xcd\xef" + .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xef", + .plen = 8, + .ctext = "\x01\x23\x45\x67\x89\xab\xcd\xef" "\xaa\x42\xfe\x43\x8d\xea\xa3\x5a" "\xb9\x3d\x9f\xb1\xa3\x8e\x9b\xae", - .rlen = 8 + 16, + .clen = 8 + 16, }, { /* Input data from RFC 2410 Case 2 */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -12906,58 +12908,16 @@ static const struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = { "\x00\x00\x00\x00\x00\x00\x00\x00", .klen = 8 + 16 + 0, .iv = "", - .input = "Network Security People Have A Strange Sense Of Humor", - .ilen = 53, - .result = "Network Security People Have A Strange Sense Of Humor" - "\x73\xa5\x3e\x1c\x08\x0e\x8a\x8a" - "\x8e\xb5\x5f\x90\x8e\xfe\x13\x23", - .rlen = 53 + 16, - }, -}; - -static const struct aead_testvec hmac_md5_ecb_cipher_null_dec_tv_template[] = { - { -#ifdef __LITTLE_ENDIAN - .key = "\x08\x00" /* rta length */ - "\x01\x00" /* rta type */ -#else - .key = "\x00\x08" /* rta length */ - "\x00\x01" /* rta type */ -#endif - "\x00\x00\x00\x00" /* enc key length */ - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .klen = 8 + 16 + 0, - .iv = "", - .input = "\x01\x23\x45\x67\x89\xab\xcd\xef" - "\xaa\x42\xfe\x43\x8d\xea\xa3\x5a" - "\xb9\x3d\x9f\xb1\xa3\x8e\x9b\xae", - .ilen = 8 + 16, - .result = "\x01\x23\x45\x67\x89\xab\xcd\xef", - .rlen = 8, - }, { -#ifdef __LITTLE_ENDIAN - .key = "\x08\x00" /* rta length */ - "\x01\x00" /* rta type */ -#else - .key = "\x00\x08" /* rta length */ - "\x00\x01" /* rta type */ -#endif - "\x00\x00\x00\x00" /* enc key length */ - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .klen = 8 + 16 + 0, - .iv = "", - .input = "Network Security People Have A Strange Sense Of Humor" + .ptext = "Network Security People Have A Strange Sense Of Humor", + .plen = 53, + .ctext = "Network Security People Have A Strange Sense Of Humor" "\x73\xa5\x3e\x1c\x08\x0e\x8a\x8a" "\x8e\xb5\x5f\x90\x8e\xfe\x13\x23", - .ilen = 53 + 16, - .result = "Network Security People Have A Strange Sense Of Humor", - .rlen = 53, + .clen = 53 + 16, }, }; -static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { +static const struct aead_testvec hmac_sha1_aes_cbc_tv_temp[] = { { /* RFC 3602 Case 1 */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -12978,14 +12938,14 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { .assoc = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30" "\xb4\x22\xda\x80\x2c\x9f\xac\x41", .alen = 16, - .input = "Single block msg", - .ilen = 16, - .result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8" + .ptext = "Single block msg", + .plen = 16, + .ctext = "\xe3\x53\x77\x9c\x10\x79\xae\xb8" "\x27\x08\x94\x2d\xbe\x77\x18\x1a" "\x1b\x13\xcb\xaf\x89\x5e\xe1\x2c" "\x13\xc5\x2e\xa3\xcc\xed\xdc\xb5" "\x03\x71\xa2\x06", - .rlen = 16 + 20, + .clen = 16 + 20, }, { /* RFC 3602 Case 2 */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13006,19 +12966,19 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { .assoc = "\x56\x2e\x17\x99\x6d\x09\x3d\x28" "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58", .alen = 16, - .input = "\x00\x01\x02\x03\x04\x05\x06\x07" + .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" "\x10\x11\x12\x13\x14\x15\x16\x17" "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", - .ilen = 32, - .result = "\xd2\x96\xcd\x94\xc2\xcc\xcf\x8a" + .plen = 32, + .ctext = "\xd2\x96\xcd\x94\xc2\xcc\xcf\x8a" "\x3a\x86\x30\x28\xb5\xe1\xdc\x0a" "\x75\x86\x60\x2d\x25\x3c\xff\xf9" "\x1b\x82\x66\xbe\xa6\xd6\x1a\xb1" "\xad\x9b\x4c\x5c\x85\xe1\xda\xae" "\xee\x81\x4e\xd7\xdb\x74\xcf\x58" "\x65\x39\xf8\xde", - .rlen = 32 + 20, + .clen = 32 + 20, }, { /* RFC 3602 Case 3 */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13039,9 +12999,9 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { .assoc = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb" "\xd9\xcd\x27\xd8\x25\x68\x2c\x81", .alen = 16, - .input = "This is a 48-byte message (exactly 3 AES blocks)", - .ilen = 48, - .result = "\xd0\xa0\x2b\x38\x36\x45\x17\x53" + .ptext = "This is a 48-byte message (exactly 3 AES blocks)", + .plen = 48, + .ctext = "\xd0\xa0\x2b\x38\x36\x45\x17\x53" "\xd4\x93\x66\x5d\x33\xf0\xe8\x86" "\x2d\xea\x54\xcd\xb2\x93\xab\xc7" "\x50\x69\x39\x27\x67\x72\xf8\xd5" @@ -13050,7 +13010,7 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { "\xc2\xec\x0c\xf8\x7f\x05\xba\xca" "\xff\xee\x4c\xd0\x93\xe6\x36\x7f" "\x8d\x62\xf2\x1e", - .rlen = 48 + 20, + .clen = 48 + 20, }, { /* RFC 3602 Case 4 */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13071,7 +13031,7 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { .assoc = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c" "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9", .alen = 16, - .input = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + .ptext = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" @@ -13079,8 +13039,8 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf", - .ilen = 64, - .result = "\xc3\x0e\x32\xff\xed\xc0\x77\x4e" + .plen = 64, + .ctext = "\xc3\x0e\x32\xff\xed\xc0\x77\x4e" "\x6a\xff\x6a\xf0\x86\x9f\x71\xaa" "\x0f\x3a\xf0\x7a\x9a\x31\xa9\xc6" "\x84\xdb\x20\x7e\xb0\xef\x8e\x4e" @@ -13091,7 +13051,7 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { "\x1c\x45\x57\xa9\x56\xcb\xa9\x2d" "\x18\xac\xf1\xc7\x5d\xd1\xcd\x0d" "\x1d\xbe\xc6\xe9", - .rlen = 64 + 20, + .clen = 64 + 20, }, { /* RFC 3602 Case 5 */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13113,7 +13073,7 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { "\xe9\x6e\x8c\x08\xab\x46\x57\x63" "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93", .alen = 24, - .input = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00" + .ptext = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00" "\x8e\x9c\x08\x3d\xb9\x5b\x07\x00" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" "\x10\x11\x12\x13\x14\x15\x16\x17" @@ -13123,8 +13083,8 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { "\x30\x31\x32\x33\x34\x35\x36\x37" "\x01\x02\x03\x04\x05\x06\x07\x08" "\x09\x0a\x0b\x0c\x0d\x0e\x0e\x01", - .ilen = 80, - .result = "\xf6\x63\xc2\x5d\x32\x5c\x18\xc6" + .plen = 80, + .ctext = "\xf6\x63\xc2\x5d\x32\x5c\x18\xc6" "\xa9\x45\x3e\x19\x4e\x12\x08\x49" "\xa4\x87\x0b\x66\xcc\x6b\x99\x65" "\x33\x00\x13\xb4\x89\x8d\xc8\x56" @@ -13137,7 +13097,7 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { "\x58\xc6\x84\x75\xe4\xe9\x6b\x0c" "\xe1\xc5\x0b\x73\x4d\x82\x55\xa8" "\x85\xe1\x59\xf7", - .rlen = 80 + 20, + .clen = 80 + 20, }, { /* NIST SP800-38A F.2.3 CBC-AES192.Encrypt */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13159,7 +13119,7 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", .alen = 16, - .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" @@ -13167,8 +13127,8 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", - .ilen = 64, - .result = "\x4f\x02\x1d\xb2\x43\xbc\x63\x3d" + .plen = 64, + .ctext = "\x4f\x02\x1d\xb2\x43\xbc\x63\x3d" "\x71\x78\x18\x3a\x9f\xa0\x71\xe8" "\xb4\xd9\xad\xa9\xad\x7d\xed\xf4" "\xe5\xe7\x38\x76\x3f\x69\x14\x5a" @@ -13179,7 +13139,7 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { "\x73\xe3\x19\x3f\x8b\xc9\xc6\xf4" "\x5a\xf1\x5b\xa8\x98\x07\xc5\x36" "\x47\x4c\xfc\x36", - .rlen = 64 + 20, + .clen = 64 + 20, }, { /* NIST SP800-38A F.2.5 CBC-AES256.Encrypt */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13202,7 +13162,7 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", .alen = 16, - .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" @@ -13210,8 +13170,8 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", - .ilen = 64, - .result = "\xf5\x8c\x4c\x04\xd6\xe5\xf1\xba" + .plen = 64, + .ctext = "\xf5\x8c\x4c\x04\xd6\xe5\xf1\xba" "\x77\x9e\xab\xfb\x5f\x7b\xfb\xd6" "\x9c\xfc\x4e\x96\x7e\xdb\x80\x8d" "\x67\x9f\x77\x7b\xc6\x70\x2c\x7d" @@ -13222,11 +13182,11 @@ static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = { "\xa3\xe8\x9b\x17\xe3\xf4\x7f\xde" "\x1b\x9f\xc6\x81\x26\x43\x4a\x87" "\x51\xee\xd6\x4e", - .rlen = 64 + 20, + .clen = 64 + 20, }, }; -static const struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_temp[] = { +static const struct aead_testvec hmac_sha1_ecb_cipher_null_tv_temp[] = { { /* Input data from RFC 2410 Case 1 */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13241,13 +13201,13 @@ static const struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_temp[] = { "\x00\x00\x00\x00", .klen = 8 + 20 + 0, .iv = "", - .input = "\x01\x23\x45\x67\x89\xab\xcd\xef", - .ilen = 8, - .result = "\x01\x23\x45\x67\x89\xab\xcd\xef" + .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xef", + .plen = 8, + .ctext = "\x01\x23\x45\x67\x89\xab\xcd\xef" "\x40\xc3\x0a\xa1\xc9\xa0\x28\xab" "\x99\x5e\x19\x04\xd1\x72\xef\xb8" "\x8c\x5e\xe4\x08", - .rlen = 8 + 20, + .clen = 8 + 20, }, { /* Input data from RFC 2410 Case 2 */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13262,63 +13222,17 @@ static const struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_temp[] = { "\x00\x00\x00\x00", .klen = 8 + 20 + 0, .iv = "", - .input = "Network Security People Have A Strange Sense Of Humor", - .ilen = 53, - .result = "Network Security People Have A Strange Sense Of Humor" - "\x75\x6f\x42\x1e\xf8\x50\x21\xd2" - "\x65\x47\xee\x8e\x1a\xef\x16\xf6" - "\x91\x56\xe4\xd6", - .rlen = 53 + 20, - }, -}; - -static const struct aead_testvec hmac_sha1_ecb_cipher_null_dec_tv_temp[] = { - { -#ifdef __LITTLE_ENDIAN - .key = "\x08\x00" /* rta length */ - "\x01\x00" /* rta type */ -#else - .key = "\x00\x08" /* rta length */ - "\x00\x01" /* rta type */ -#endif - "\x00\x00\x00\x00" /* enc key length */ - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00", - .klen = 8 + 20 + 0, - .iv = "", - .input = "\x01\x23\x45\x67\x89\xab\xcd\xef" - "\x40\xc3\x0a\xa1\xc9\xa0\x28\xab" - "\x99\x5e\x19\x04\xd1\x72\xef\xb8" - "\x8c\x5e\xe4\x08", - .ilen = 8 + 20, - .result = "\x01\x23\x45\x67\x89\xab\xcd\xef", - .rlen = 8, - }, { -#ifdef __LITTLE_ENDIAN - .key = "\x08\x00" /* rta length */ - "\x01\x00" /* rta type */ -#else - .key = "\x00\x08" /* rta length */ - "\x00\x01" /* rta type */ -#endif - "\x00\x00\x00\x00" /* enc key length */ - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00", - .klen = 8 + 20 + 0, - .iv = "", - .input = "Network Security People Have A Strange Sense Of Humor" + .ptext = "Network Security People Have A Strange Sense Of Humor", + .plen = 53, + .ctext = "Network Security People Have A Strange Sense Of Humor" "\x75\x6f\x42\x1e\xf8\x50\x21\xd2" "\x65\x47\xee\x8e\x1a\xef\x16\xf6" "\x91\x56\xe4\xd6", - .ilen = 53 + 20, - .result = "Network Security People Have A Strange Sense Of Humor", - .rlen = 53, + .clen = 53 + 20, }, }; -static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { +static const struct aead_testvec hmac_sha256_aes_cbc_tv_temp[] = { { /* RFC 3602 Case 1 */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13340,15 +13254,15 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { .assoc = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30" "\xb4\x22\xda\x80\x2c\x9f\xac\x41", .alen = 16, - .input = "Single block msg", - .ilen = 16, - .result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8" + .ptext = "Single block msg", + .plen = 16, + .ctext = "\xe3\x53\x77\x9c\x10\x79\xae\xb8" "\x27\x08\x94\x2d\xbe\x77\x18\x1a" "\xcc\xde\x2d\x6a\xae\xf1\x0b\xcc" "\x38\x06\x38\x51\xb4\xb8\xf3\x5b" "\x5c\x34\xa6\xa3\x6e\x0b\x05\xe5" "\x6a\x6d\x44\xaa\x26\xa8\x44\xa5", - .rlen = 16 + 32, + .clen = 16 + 32, }, { /* RFC 3602 Case 2 */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13370,12 +13284,12 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { .assoc = "\x56\x2e\x17\x99\x6d\x09\x3d\x28" "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58", .alen = 16, - .input = "\x00\x01\x02\x03\x04\x05\x06\x07" + .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" "\x10\x11\x12\x13\x14\x15\x16\x17" "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", - .ilen = 32, - .result = "\xd2\x96\xcd\x94\xc2\xcc\xcf\x8a" + .plen = 32, + .ctext = "\xd2\x96\xcd\x94\xc2\xcc\xcf\x8a" "\x3a\x86\x30\x28\xb5\xe1\xdc\x0a" "\x75\x86\x60\x2d\x25\x3c\xff\xf9" "\x1b\x82\x66\xbe\xa6\xd6\x1a\xb1" @@ -13383,7 +13297,7 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { "\x0e\x06\x58\x8f\xba\xf6\x06\xda" "\x49\x69\x0d\x5b\xd4\x36\x06\x62" "\x35\x5e\x54\x58\x53\x4d\xdf\xbf", - .rlen = 32 + 32, + .clen = 32 + 32, }, { /* RFC 3602 Case 3 */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13405,9 +13319,9 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { .assoc = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb" "\xd9\xcd\x27\xd8\x25\x68\x2c\x81", .alen = 16, - .input = "This is a 48-byte message (exactly 3 AES blocks)", - .ilen = 48, - .result = "\xd0\xa0\x2b\x38\x36\x45\x17\x53" + .ptext = "This is a 48-byte message (exactly 3 AES blocks)", + .plen = 48, + .ctext = "\xd0\xa0\x2b\x38\x36\x45\x17\x53" "\xd4\x93\x66\x5d\x33\xf0\xe8\x86" "\x2d\xea\x54\xcd\xb2\x93\xab\xc7" "\x50\x69\x39\x27\x67\x72\xf8\xd5" @@ -13417,7 +13331,7 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { "\xe7\xc6\xce\x10\x31\x2f\x9b\x1d" "\x24\x78\xfb\xbe\x02\xe0\x4f\x40" "\x10\xbd\xaa\xc6\xa7\x79\xe0\x1a", - .rlen = 48 + 32, + .clen = 48 + 32, }, { /* RFC 3602 Case 4 */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13439,7 +13353,7 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { .assoc = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c" "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9", .alen = 16, - .input = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + .ptext = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" @@ -13447,8 +13361,8 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf", - .ilen = 64, - .result = "\xc3\x0e\x32\xff\xed\xc0\x77\x4e" + .plen = 64, + .ctext = "\xc3\x0e\x32\xff\xed\xc0\x77\x4e" "\x6a\xff\x6a\xf0\x86\x9f\x71\xaa" "\x0f\x3a\xf0\x7a\x9a\x31\xa9\xc6" "\x84\xdb\x20\x7e\xb0\xef\x8e\x4e" @@ -13460,7 +13374,7 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { "\xe0\x93\xec\xc9\x9f\xf7\xce\xd8" "\x3f\x54\xe2\x49\x39\xe3\x71\x25" "\x2b\x6c\xe9\x5d\xec\xec\x2b\x64", - .rlen = 64 + 32, + .clen = 64 + 32, }, { /* RFC 3602 Case 5 */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13483,7 +13397,7 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { "\xe9\x6e\x8c\x08\xab\x46\x57\x63" "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93", .alen = 24, - .input = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00" + .ptext = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00" "\x8e\x9c\x08\x3d\xb9\x5b\x07\x00" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" "\x10\x11\x12\x13\x14\x15\x16\x17" @@ -13493,8 +13407,8 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { "\x30\x31\x32\x33\x34\x35\x36\x37" "\x01\x02\x03\x04\x05\x06\x07\x08" "\x09\x0a\x0b\x0c\x0d\x0e\x0e\x01", - .ilen = 80, - .result = "\xf6\x63\xc2\x5d\x32\x5c\x18\xc6" + .plen = 80, + .ctext = "\xf6\x63\xc2\x5d\x32\x5c\x18\xc6" "\xa9\x45\x3e\x19\x4e\x12\x08\x49" "\xa4\x87\x0b\x66\xcc\x6b\x99\x65" "\x33\x00\x13\xb4\x89\x8d\xc8\x56" @@ -13508,7 +13422,7 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { "\x3a\xd2\xe1\x03\x86\xa5\x59\xb7" "\x73\xc3\x46\x20\x2c\xb1\xef\x68" "\xbb\x8a\x32\x7e\x12\x8c\x69\xcf", - .rlen = 80 + 32, + .clen = 80 + 32, }, { /* NIST SP800-38A F.2.3 CBC-AES192.Encrypt */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13531,7 +13445,7 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", .alen = 16, - .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" @@ -13539,8 +13453,8 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", - .ilen = 64, - .result = "\x4f\x02\x1d\xb2\x43\xbc\x63\x3d" + .plen = 64, + .ctext = "\x4f\x02\x1d\xb2\x43\xbc\x63\x3d" "\x71\x78\x18\x3a\x9f\xa0\x71\xe8" "\xb4\xd9\xad\xa9\xad\x7d\xed\xf4" "\xe5\xe7\x38\x76\x3f\x69\x14\x5a" @@ -13552,7 +13466,7 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { "\x61\x81\x31\xea\x5b\x3d\x8e\xfb" "\xca\x71\x85\x93\xf7\x85\x55\x8b" "\x7a\xe4\x94\xca\x8b\xba\x19\x33", - .rlen = 64 + 32, + .clen = 64 + 32, }, { /* NIST SP800-38A F.2.5 CBC-AES256.Encrypt */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13576,7 +13490,7 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", .alen = 16, - .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" @@ -13584,8 +13498,8 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", - .ilen = 64, - .result = "\xf5\x8c\x4c\x04\xd6\xe5\xf1\xba" + .plen = 64, + .ctext = "\xf5\x8c\x4c\x04\xd6\xe5\xf1\xba" "\x77\x9e\xab\xfb\x5f\x7b\xfb\xd6" "\x9c\xfc\x4e\x96\x7e\xdb\x80\x8d" "\x67\x9f\x77\x7b\xc6\x70\x2c\x7d" @@ -13597,11 +13511,11 @@ static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = { "\x8f\x74\xbd\x17\x92\x03\xbe\x8f" "\xf3\x61\xde\x1c\xe9\xdb\xcd\xd0" "\xcc\xce\xe9\x85\x57\xcf\x6f\x5f", - .rlen = 64 + 32, + .clen = 64 + 32, }, }; -static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { +static const struct aead_testvec hmac_sha512_aes_cbc_tv_temp[] = { { /* RFC 3602 Case 1 */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13627,9 +13541,9 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { .assoc = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30" "\xb4\x22\xda\x80\x2c\x9f\xac\x41", .alen = 16, - .input = "Single block msg", - .ilen = 16, - .result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8" + .ptext = "Single block msg", + .plen = 16, + .ctext = "\xe3\x53\x77\x9c\x10\x79\xae\xb8" "\x27\x08\x94\x2d\xbe\x77\x18\x1a" "\x3f\xdc\xad\x90\x03\x63\x5e\x68" "\xc3\x13\xdd\xa4\x5c\x4d\x54\xa7" @@ -13639,7 +13553,7 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { "\xe8\x9a\x7c\x06\x3d\xcb\xff\xb2" "\xfa\x20\x89\xdd\x9c\xac\x9e\x16" "\x18\x8a\xa0\x6d\x01\x6c\xa3\x3a", - .rlen = 16 + 64, + .clen = 16 + 64, }, { /* RFC 3602 Case 2 */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13665,12 +13579,12 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { .assoc = "\x56\x2e\x17\x99\x6d\x09\x3d\x28" "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58", .alen = 16, - .input = "\x00\x01\x02\x03\x04\x05\x06\x07" + .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" "\x10\x11\x12\x13\x14\x15\x16\x17" "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f", - .ilen = 32, - .result = "\xd2\x96\xcd\x94\xc2\xcc\xcf\x8a" + .plen = 32, + .ctext = "\xd2\x96\xcd\x94\xc2\xcc\xcf\x8a" "\x3a\x86\x30\x28\xb5\xe1\xdc\x0a" "\x75\x86\x60\x2d\x25\x3c\xff\xf9" "\x1b\x82\x66\xbe\xa6\xd6\x1a\xb1" @@ -13682,7 +13596,7 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { "\x46\x32\x7c\x41\x9c\x59\x3e\xe9" "\x8f\x9f\xd4\x31\xd6\x22\xbd\xf8" "\xf7\x0a\x94\xe5\xa9\xc3\xf6\x9d", - .rlen = 32 + 64, + .clen = 32 + 64, }, { /* RFC 3602 Case 3 */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13708,9 +13622,9 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { .assoc = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb" "\xd9\xcd\x27\xd8\x25\x68\x2c\x81", .alen = 16, - .input = "This is a 48-byte message (exactly 3 AES blocks)", - .ilen = 48, - .result = "\xd0\xa0\x2b\x38\x36\x45\x17\x53" + .ptext = "This is a 48-byte message (exactly 3 AES blocks)", + .plen = 48, + .ctext = "\xd0\xa0\x2b\x38\x36\x45\x17\x53" "\xd4\x93\x66\x5d\x33\xf0\xe8\x86" "\x2d\xea\x54\xcd\xb2\x93\xab\xc7" "\x50\x69\x39\x27\x67\x72\xf8\xd5" @@ -13724,7 +13638,7 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { "\x08\xea\x29\x6c\x74\x67\x3f\xb0" "\xac\x7f\x5c\x1d\xf5\xee\x22\x66" "\x27\xa6\xb6\x13\xba\xba\xf0\xc2", - .rlen = 48 + 64, + .clen = 48 + 64, }, { /* RFC 3602 Case 4 */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13750,7 +13664,7 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { .assoc = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c" "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9", .alen = 16, - .input = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" + .ptext = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" @@ -13758,8 +13672,8 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf", - .ilen = 64, - .result = "\xc3\x0e\x32\xff\xed\xc0\x77\x4e" + .plen = 64, + .ctext = "\xc3\x0e\x32\xff\xed\xc0\x77\x4e" "\x6a\xff\x6a\xf0\x86\x9f\x71\xaa" "\x0f\x3a\xf0\x7a\x9a\x31\xa9\xc6" "\x84\xdb\x20\x7e\xb0\xef\x8e\x4e" @@ -13775,7 +13689,7 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { "\xbc\x6f\xed\xd5\x8d\xde\x23\x7c" "\x62\x98\x14\xd7\x2f\x37\x8d\xdf" "\xf4\x33\x80\xeb\x8e\xb4\xa4\xda", - .rlen = 64 + 64, + .clen = 64 + 64, }, { /* RFC 3602 Case 5 */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13802,7 +13716,7 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { "\xe9\x6e\x8c\x08\xab\x46\x57\x63" "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93", .alen = 24, - .input = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00" + .ptext = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00" "\x8e\x9c\x08\x3d\xb9\x5b\x07\x00" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" "\x10\x11\x12\x13\x14\x15\x16\x17" @@ -13812,8 +13726,8 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { "\x30\x31\x32\x33\x34\x35\x36\x37" "\x01\x02\x03\x04\x05\x06\x07\x08" "\x09\x0a\x0b\x0c\x0d\x0e\x0e\x01", - .ilen = 80, - .result = "\xf6\x63\xc2\x5d\x32\x5c\x18\xc6" + .plen = 80, + .ctext = "\xf6\x63\xc2\x5d\x32\x5c\x18\xc6" "\xa9\x45\x3e\x19\x4e\x12\x08\x49" "\xa4\x87\x0b\x66\xcc\x6b\x99\x65" "\x33\x00\x13\xb4\x89\x8d\xc8\x56" @@ -13831,7 +13745,7 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { "\x92\x26\xc1\x76\x20\x11\xeb\xba" "\x62\x4f\x9a\x62\x25\xc3\x75\x80" "\xb7\x0a\x17\xf5\xd7\x94\xb4\x14", - .rlen = 80 + 64, + .clen = 80 + 64, }, { /* NIST SP800-38A F.2.3 CBC-AES192.Encrypt */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13858,7 +13772,7 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", .alen = 16, - .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" @@ -13866,8 +13780,8 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", - .ilen = 64, - .result = "\x4f\x02\x1d\xb2\x43\xbc\x63\x3d" + .plen = 64, + .ctext = "\x4f\x02\x1d\xb2\x43\xbc\x63\x3d" "\x71\x78\x18\x3a\x9f\xa0\x71\xe8" "\xb4\xd9\xad\xa9\xad\x7d\xed\xf4" "\xe5\xe7\x38\x76\x3f\x69\x14\x5a" @@ -13883,7 +13797,7 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { "\xba\x03\xd5\x32\xfa\x5f\x41\x58" "\x8d\x43\x98\xa7\x94\x16\x07\x02" "\x0f\xb6\x81\x50\x28\x95\x2e\x75", - .rlen = 64 + 64, + .clen = 64 + 64, }, { /* NIST SP800-38A F.2.5 CBC-AES256.Encrypt */ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13911,7 +13825,7 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", .alen = 16, - .input = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" @@ -13919,8 +13833,8 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", - .ilen = 64, - .result = "\xf5\x8c\x4c\x04\xd6\xe5\xf1\xba" + .plen = 64, + .ctext = "\xf5\x8c\x4c\x04\xd6\xe5\xf1\xba" "\x77\x9e\xab\xfb\x5f\x7b\xfb\xd6" "\x9c\xfc\x4e\x96\x7e\xdb\x80\x8d" "\x67\x9f\x77\x7b\xc6\x70\x2c\x7d" @@ -13936,11 +13850,11 @@ static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = { "\xe3\x8d\x64\xc3\x8d\xff\x7c\x8c" "\xdb\xbf\xa0\xb4\x01\xa2\xa8\xa2" "\x2c\xb1\x62\x2c\x10\xca\xf1\x21", - .rlen = 64 + 64, + .clen = 64 + 64, }, }; -static const struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = { +static const struct aead_testvec hmac_sha1_des_cbc_tv_temp[] = { { /*Generated with cryptopp*/ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -13959,7 +13873,7 @@ static const struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = { .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01" "\x7D\x33\x88\x93\x0F\x93\xB2\x42", .alen = 16, - .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" + .ptext = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" "\x53\x20\x63\x65\x65\x72\x73\x74" "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" "\x20\x79\x65\x53\x72\x63\x74\x65" @@ -13975,8 +13889,8 @@ static const struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = { "\x20\x6f\x61\x4d\x79\x6e\x53\x20" "\x63\x65\x65\x72\x73\x74\x54\x20" "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79", - .ilen = 128, - .result = "\x70\xd6\xde\x64\x87\x17\xf1\xe8" + .plen = 128, + .ctext = "\x70\xd6\xde\x64\x87\x17\xf1\xe8" "\x54\x31\x85\x37\xed\x6b\x01\x8d" "\xe3\xcc\xe0\x1d\x5e\xf3\xfe\xf1" "\x41\xaa\x33\x91\xa7\x7d\x99\x88" @@ -13995,11 +13909,11 @@ static const struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = { "\x95\x16\x20\x09\xf5\x95\x19\xfd" "\x3c\xc7\xe0\x42\xc0\x14\x69\xfa" "\x5c\x44\xa9\x37", - .rlen = 128 + 20, + .clen = 128 + 20, }, }; -static const struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = { +static const struct aead_testvec hmac_sha224_des_cbc_tv_temp[] = { { /*Generated with cryptopp*/ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -14018,7 +13932,7 @@ static const struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = { .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01" "\x7D\x33\x88\x93\x0F\x93\xB2\x42", .alen = 16, - .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" + .ptext = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" "\x53\x20\x63\x65\x65\x72\x73\x74" "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" "\x20\x79\x65\x53\x72\x63\x74\x65" @@ -14034,8 +13948,8 @@ static const struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = { "\x20\x6f\x61\x4d\x79\x6e\x53\x20" "\x63\x65\x65\x72\x73\x74\x54\x20" "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79", - .ilen = 128, - .result = "\x70\xd6\xde\x64\x87\x17\xf1\xe8" + .plen = 128, + .ctext = "\x70\xd6\xde\x64\x87\x17\xf1\xe8" "\x54\x31\x85\x37\xed\x6b\x01\x8d" "\xe3\xcc\xe0\x1d\x5e\xf3\xfe\xf1" "\x41\xaa\x33\x91\xa7\x7d\x99\x88" @@ -14054,11 +13968,11 @@ static const struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = { "\x9c\x2d\x7e\xee\x20\x34\x55\x0a" "\xce\xb5\x4e\x64\x53\xe7\xbf\x91" "\xab\xd4\xd9\xda\xc9\x12\xae\xf7", - .rlen = 128 + 24, + .clen = 128 + 24, }, }; -static const struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = { +static const struct aead_testvec hmac_sha256_des_cbc_tv_temp[] = { { /*Generated with cryptopp*/ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -14078,7 +13992,7 @@ static const struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = { .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01" "\x7D\x33\x88\x93\x0F\x93\xB2\x42", .alen = 16, - .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" + .ptext = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" "\x53\x20\x63\x65\x65\x72\x73\x74" "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" "\x20\x79\x65\x53\x72\x63\x74\x65" @@ -14094,8 +14008,8 @@ static const struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = { "\x20\x6f\x61\x4d\x79\x6e\x53\x20" "\x63\x65\x65\x72\x73\x74\x54\x20" "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79", - .ilen = 128, - .result = "\x70\xd6\xde\x64\x87\x17\xf1\xe8" + .plen = 128, + .ctext = "\x70\xd6\xde\x64\x87\x17\xf1\xe8" "\x54\x31\x85\x37\xed\x6b\x01\x8d" "\xe3\xcc\xe0\x1d\x5e\xf3\xfe\xf1" "\x41\xaa\x33\x91\xa7\x7d\x99\x88" @@ -14115,11 +14029,11 @@ static const struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = { "\x50\xf6\x5d\xab\x4b\x51\x4e\x5e" "\xde\x63\xde\x76\x52\xde\x9f\xba" "\x90\xcf\x15\xf2\xbb\x6e\x84\x00", - .rlen = 128 + 32, + .clen = 128 + 32, }, }; -static const struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = { +static const struct aead_testvec hmac_sha384_des_cbc_tv_temp[] = { { /*Generated with cryptopp*/ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -14141,7 +14055,7 @@ static const struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = { .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01" "\x7D\x33\x88\x93\x0F\x93\xB2\x42", .alen = 16, - .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" + .ptext = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" "\x53\x20\x63\x65\x65\x72\x73\x74" "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" "\x20\x79\x65\x53\x72\x63\x74\x65" @@ -14157,8 +14071,8 @@ static const struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = { "\x20\x6f\x61\x4d\x79\x6e\x53\x20" "\x63\x65\x65\x72\x73\x74\x54\x20" "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79", - .ilen = 128, - .result = "\x70\xd6\xde\x64\x87\x17\xf1\xe8" + .plen = 128, + .ctext = "\x70\xd6\xde\x64\x87\x17\xf1\xe8" "\x54\x31\x85\x37\xed\x6b\x01\x8d" "\xe3\xcc\xe0\x1d\x5e\xf3\xfe\xf1" "\x41\xaa\x33\x91\xa7\x7d\x99\x88" @@ -14180,11 +14094,11 @@ static const struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = { "\x5e\x67\xb5\x74\xe7\xe7\x85\x61" "\x6a\x95\x26\x75\xcc\x53\x89\xf3" "\x74\xc9\x2a\x76\x20\xa2\x64\x62", - .rlen = 128 + 48, + .clen = 128 + 48, }, }; -static const struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = { +static const struct aead_testvec hmac_sha512_des_cbc_tv_temp[] = { { /*Generated with cryptopp*/ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -14208,7 +14122,7 @@ static const struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = { .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01" "\x7D\x33\x88\x93\x0F\x93\xB2\x42", .alen = 16, - .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" + .ptext = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" "\x53\x20\x63\x65\x65\x72\x73\x74" "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" "\x20\x79\x65\x53\x72\x63\x74\x65" @@ -14224,8 +14138,8 @@ static const struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = { "\x20\x6f\x61\x4d\x79\x6e\x53\x20" "\x63\x65\x65\x72\x73\x74\x54\x20" "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79", - .ilen = 128, - .result = "\x70\xd6\xde\x64\x87\x17\xf1\xe8" + .plen = 128, + .ctext = "\x70\xd6\xde\x64\x87\x17\xf1\xe8" "\x54\x31\x85\x37\xed\x6b\x01\x8d" "\xe3\xcc\xe0\x1d\x5e\xf3\xfe\xf1" "\x41\xaa\x33\x91\xa7\x7d\x99\x88" @@ -14249,11 +14163,11 @@ static const struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = { "\x97\xe2\xe3\xb8\xaa\x48\x85\xee" "\x8c\xf6\x07\x95\x1f\xa6\x6c\x96" "\x99\xc7\x5c\x8d\xd8\xb5\x68\x7b", - .rlen = 128 + 64, + .clen = 128 + 64, }, }; -static const struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = { +static const struct aead_testvec hmac_sha1_des3_ede_cbc_tv_temp[] = { { /*Generated with cryptopp*/ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -14274,7 +14188,7 @@ static const struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = { .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01" "\x7D\x33\x88\x93\x0F\x93\xB2\x42", .alen = 16, - .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" + .ptext = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" "\x53\x20\x63\x65\x65\x72\x73\x74" "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" "\x20\x79\x65\x53\x72\x63\x74\x65" @@ -14290,8 +14204,8 @@ static const struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = { "\x20\x6f\x61\x4d\x79\x6e\x53\x20" "\x63\x65\x65\x72\x73\x74\x54\x20" "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79", - .ilen = 128, - .result = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4" + .plen = 128, + .ctext = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4" "\x67\x17\x21\xc7\x6e\x8a\xd5\x49" "\x74\xb3\x49\x05\xc5\x1c\xd0\xed" "\x12\x56\x5c\x53\x96\xb6\x00\x7d" @@ -14310,11 +14224,11 @@ static const struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = { "\x67\x6d\xb1\xf5\xb8\x10\xdc\xc6" "\x75\x86\x96\x6b\xb1\xc5\xe4\xcf" "\xd1\x60\x91\xb3", - .rlen = 128 + 20, + .clen = 128 + 20, }, }; -static const struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = { +static const struct aead_testvec hmac_sha224_des3_ede_cbc_tv_temp[] = { { /*Generated with cryptopp*/ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -14335,7 +14249,7 @@ static const struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = { .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01" "\x7D\x33\x88\x93\x0F\x93\xB2\x42", .alen = 16, - .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" + .ptext = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" "\x53\x20\x63\x65\x65\x72\x73\x74" "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" "\x20\x79\x65\x53\x72\x63\x74\x65" @@ -14351,8 +14265,8 @@ static const struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = { "\x20\x6f\x61\x4d\x79\x6e\x53\x20" "\x63\x65\x65\x72\x73\x74\x54\x20" "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79", - .ilen = 128, - .result = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4" + .plen = 128, + .ctext = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4" "\x67\x17\x21\xc7\x6e\x8a\xd5\x49" "\x74\xb3\x49\x05\xc5\x1c\xd0\xed" "\x12\x56\x5c\x53\x96\xb6\x00\x7d" @@ -14371,11 +14285,11 @@ static const struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = { "\x15\x24\x7f\x5a\x45\x4a\x66\xce" "\x2b\x0b\x93\x99\x2f\x9d\x0c\x6c" "\x56\x1f\xe1\xa6\x41\xb2\x4c\xd0", - .rlen = 128 + 24, + .clen = 128 + 24, }, }; -static const struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = { +static const struct aead_testvec hmac_sha256_des3_ede_cbc_tv_temp[] = { { /*Generated with cryptopp*/ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -14397,7 +14311,7 @@ static const struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = { .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01" "\x7D\x33\x88\x93\x0F\x93\xB2\x42", .alen = 16, - .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" + .ptext = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" "\x53\x20\x63\x65\x65\x72\x73\x74" "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" "\x20\x79\x65\x53\x72\x63\x74\x65" @@ -14413,8 +14327,8 @@ static const struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = { "\x20\x6f\x61\x4d\x79\x6e\x53\x20" "\x63\x65\x65\x72\x73\x74\x54\x20" "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79", - .ilen = 128, - .result = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4" + .plen = 128, + .ctext = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4" "\x67\x17\x21\xc7\x6e\x8a\xd5\x49" "\x74\xb3\x49\x05\xc5\x1c\xd0\xed" "\x12\x56\x5c\x53\x96\xb6\x00\x7d" @@ -14434,11 +14348,11 @@ static const struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = { "\x56\x38\x44\xc0\xdb\xe3\x4f\x71" "\xf7\xce\xd1\xd3\xf8\xbd\x3e\x4f" "\xca\x43\x95\xdf\x80\x61\x81\xa9", - .rlen = 128 + 32, + .clen = 128 + 32, }, }; -static const struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = { +static const struct aead_testvec hmac_sha384_des3_ede_cbc_tv_temp[] = { { /*Generated with cryptopp*/ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -14462,7 +14376,7 @@ static const struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = { .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01" "\x7D\x33\x88\x93\x0F\x93\xB2\x42", .alen = 16, - .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" + .ptext = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" "\x53\x20\x63\x65\x65\x72\x73\x74" "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" "\x20\x79\x65\x53\x72\x63\x74\x65" @@ -14478,8 +14392,8 @@ static const struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = { "\x20\x6f\x61\x4d\x79\x6e\x53\x20" "\x63\x65\x65\x72\x73\x74\x54\x20" "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79", - .ilen = 128, - .result = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4" + .plen = 128, + .ctext = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4" "\x67\x17\x21\xc7\x6e\x8a\xd5\x49" "\x74\xb3\x49\x05\xc5\x1c\xd0\xed" "\x12\x56\x5c\x53\x96\xb6\x00\x7d" @@ -14501,11 +14415,11 @@ static const struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = { "\xa4\x32\x8a\x0b\x46\xd7\xf0\x39" "\x36\x5d\x13\x2f\x86\x10\x78\xd6" "\xd6\xbe\x5c\xb9\x15\x89\xf9\x1b", - .rlen = 128 + 48, + .clen = 128 + 48, }, }; -static const struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = { +static const struct aead_testvec hmac_sha512_des3_ede_cbc_tv_temp[] = { { /*Generated with cryptopp*/ #ifdef __LITTLE_ENDIAN .key = "\x08\x00" /* rta length */ @@ -14531,7 +14445,7 @@ static const struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = { .assoc = "\x00\x00\x43\x21\x00\x00\x00\x01" "\x7D\x33\x88\x93\x0F\x93\xB2\x42", .alen = 16, - .input = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" + .ptext = "\x6f\x54\x20\x6f\x61\x4d\x79\x6e" "\x53\x20\x63\x65\x65\x72\x73\x74" "\x54\x20\x6f\x6f\x4d\x20\x6e\x61" "\x20\x79\x65\x53\x72\x63\x74\x65" @@ -14547,8 +14461,8 @@ static const struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = { "\x20\x6f\x61\x4d\x79\x6e\x53\x20" "\x63\x65\x65\x72\x73\x74\x54\x20" "\x6f\x6f\x4d\x20\x6e\x61\x0a\x79", - .ilen = 128, - .result = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4" + .plen = 128, + .ctext = "\x0e\x2d\xb6\x97\x3c\x56\x33\xf4" "\x67\x17\x21\xc7\x6e\x8a\xd5\x49" "\x74\xb3\x49\x05\xc5\x1c\xd0\xed" "\x12\x56\x5c\x53\x96\xb6\x00\x7d" @@ -14572,7 +14486,7 @@ static const struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = { "\x2a\x74\xd4\x65\x12\xcb\x55\xf2" "\xd5\x02\x6d\xe6\xaf\xc9\x2f\xf2" "\x57\xaa\x85\xf7\xf3\x6a\xcb\xdb", - .rlen = 128 + 64, + .clen = 128 + 64, }, }; @@ -14836,9 +14750,6 @@ static const struct cipher_testvec aes_lrw_tv_template[] = { "\xcd\x7e\x2b\x5d\x43\xea\x42\xe7" "\x74\x3f\x7d\x58\x88\x75\xde\x3e", .len = 512, - .also_non_np = 1, - .np = 3, - .tap = { 512 - 20, 4, 16 }, } }; @@ -15174,9 +15085,6 @@ static const struct cipher_testvec aes_xts_tv_template[] = { "\xc4\xf3\x6f\xfd\xa9\xfc\xea\x70" "\xb9\xc6\xe6\x93\xe1\x48\xc1\x51", .len = 512, - .also_non_np = 1, - .np = 3, - .tap = { 512 - 20, 4, 16 }, } }; @@ -15187,6 +15095,8 @@ static const struct cipher_testvec aes_ctr_tv_template[] = { .klen = 16, .iv = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", + .iv_out = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xff\x03", .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" @@ -15211,6 +15121,8 @@ static const struct cipher_testvec aes_ctr_tv_template[] = { .klen = 24, .iv = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", + .iv_out = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xff\x03", .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" @@ -15236,6 +15148,8 @@ static const struct cipher_testvec aes_ctr_tv_template[] = { .klen = 32, .iv = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff", + .iv_out = "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" + "\xf8\xf9\xfa\xfb\xfc\xfd\xff\x03", .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" @@ -15261,6 +15175,8 @@ static const struct cipher_testvec aes_ctr_tv_template[] = { .klen = 32, .iv = "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF" "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD", + .iv_out = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x1C", .ptext = "\x50\xB9\x22\xAE\x17\x80\x0C\x75" "\xDE\x47\xD3\x3C\xA5\x0E\x9A\x03" "\x6C\xF8\x61\xCA\x33\xBF\x28\x91" @@ -15386,9 +15302,6 @@ static const struct cipher_testvec aes_ctr_tv_template[] = { "\xFA\x3A\x05\x4C\xFA\xD1\xFF\xFE" "\xF1\x4C\xE5\xB2\x91\x64\x0C\x51", .len = 496, - .also_non_np = 1, - .np = 3, - .tap = { 496 - 20, 4, 16 }, }, { /* Generated with Crypto++ */ .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55" "\x0F\x32\x55\x78\x9B\xBE\x78\x9B" @@ -15397,6 +15310,8 @@ static const struct cipher_testvec aes_ctr_tv_template[] = { .klen = 32, .iv = "\xE7\x82\x1D\xB8\x53\x11\xAC\x47" "\xE2\x7D\x18\xD6\x71\x0C\xA7\x42", + .iv_out = "\xE7\x82\x1D\xB8\x53\x11\xAC\x47" + "\xE2\x7D\x18\xD6\x71\x0C\xA7\x62", .ptext = "\x50\xB9\x22\xAE\x17\x80\x0C\x75" "\xDE\x47\xD3\x3C\xA5\x0E\x9A\x03" "\x6C\xF8\x61\xCA\x33\xBF\x28\x91" @@ -15524,9 +15439,6 @@ static const struct cipher_testvec aes_ctr_tv_template[] = { "\xD8\xFE\xC9\x5B\x5C\x25\xE5\x76" "\xFB\xF2\x3F", .len = 499, - .also_non_np = 1, - .np = 2, - .tap = { 499 - 16, 16 }, }, }; @@ -16650,14 +16562,11 @@ static const struct cipher_testvec aes_ctr_rfc3686_tv_template[] = { "\x4b\xef\x31\x18\xea\xac\xb1\x84" "\x21\xed\xda\x86", .len = 4100, - .np = 2, - .tap = { 4064, 36 }, }, }; static const struct cipher_testvec aes_ofb_tv_template[] = { - /* From NIST Special Publication 800-38A, Appendix F.5 */ - { + { /* From NIST Special Publication 800-38A, Appendix F.5 */ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", .klen = 16, @@ -16680,33 +16589,55 @@ static const struct cipher_testvec aes_ofb_tv_template[] = { "\x30\x4c\x65\x28\xf6\x59\xc7\x78" "\x66\xa5\x10\xd9\xc1\xd6\xae\x5e", .len = 64, + }, { /* > 16 bytes, not a multiple of 16 bytes */ + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", + .klen = 16, + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" + "\xae", + .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20" + "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a" + "\x77", + .len = 17, + }, { /* < 16 bytes */ + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", + .klen = 16, + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f", + .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad", + .len = 7, } }; -static const struct aead_testvec aes_gcm_enc_tv_template[] = { +static const struct aead_testvec aes_gcm_tv_template[] = { { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */ .key = zeroed_string, .klen = 16, - .result = "\x58\xe2\xfc\xce\xfa\x7e\x30\x61" + .ctext = "\x58\xe2\xfc\xce\xfa\x7e\x30\x61" "\x36\x7f\x1d\x57\xa4\xe7\x45\x5a", - .rlen = 16, + .clen = 16, }, { .key = zeroed_string, .klen = 16, - .input = zeroed_string, - .ilen = 16, - .result = "\x03\x88\xda\xce\x60\xb6\xa3\x92" + .ptext = zeroed_string, + .plen = 16, + .ctext = "\x03\x88\xda\xce\x60\xb6\xa3\x92" "\xf3\x28\xc2\xb9\x71\xb2\xfe\x78" "\xab\x6e\x47\xd4\x2c\xec\x13\xbd" "\xf5\x3a\x67\xb2\x12\x57\xbd\xdf", - .rlen = 32, + .clen = 32, }, { .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" "\x6d\x6a\x8f\x94\x67\x30\x83\x08", .klen = 16, .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad" "\xde\xca\xf8\x88", - .input = "\xd9\x31\x32\x25\xf8\x84\x06\xe5" + .ptext = "\xd9\x31\x32\x25\xf8\x84\x06\xe5" "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a" "\x86\xa7\xa9\x53\x15\x34\xf7\xda" "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72" @@ -16714,8 +16645,8 @@ static const struct aead_testvec aes_gcm_enc_tv_template[] = { "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25" "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57" "\xba\x63\x7b\x39\x1a\xaf\xd2\x55", - .ilen = 64, - .result = "\x42\x83\x1e\xc2\x21\x77\x74\x24" + .plen = 64, + .ctext = "\x42\x83\x1e\xc2\x21\x77\x74\x24" "\x4b\x72\x21\xb7\x84\xd0\xd4\x9c" "\xe3\xaa\x21\x2f\x2c\x02\xa4\xe0" "\x35\xc1\x7e\x23\x29\xac\xa1\x2e" @@ -16725,14 +16656,14 @@ static const struct aead_testvec aes_gcm_enc_tv_template[] = { "\x3d\x58\xe0\x91\x47\x3f\x59\x85" "\x4d\x5c\x2a\xf3\x27\xcd\x64\xa6" "\x2c\xf3\x5a\xbd\x2b\xa6\xfa\xb4", - .rlen = 80, + .clen = 80, }, { .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" "\x6d\x6a\x8f\x94\x67\x30\x83\x08", .klen = 16, .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad" "\xde\xca\xf8\x88", - .input = "\xd9\x31\x32\x25\xf8\x84\x06\xe5" + .ptext = "\xd9\x31\x32\x25\xf8\x84\x06\xe5" "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a" "\x86\xa7\xa9\x53\x15\x34\xf7\xda" "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72" @@ -16740,12 +16671,12 @@ static const struct aead_testvec aes_gcm_enc_tv_template[] = { "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25" "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57" "\xba\x63\x7b\x39", - .ilen = 60, + .plen = 60, .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef" "\xfe\xed\xfa\xce\xde\xad\xbe\xef" "\xab\xad\xda\xd2", .alen = 20, - .result = "\x42\x83\x1e\xc2\x21\x77\x74\x24" + .ctext = "\x42\x83\x1e\xc2\x21\x77\x74\x24" "\x4b\x72\x21\xb7\x84\xd0\xd4\x9c" "\xe3\xaa\x21\x2f\x2c\x02\xa4\xe0" "\x35\xc1\x7e\x23\x29\xac\xa1\x2e" @@ -16755,23 +16686,23 @@ static const struct aead_testvec aes_gcm_enc_tv_template[] = { "\x3d\x58\xe0\x91" "\x5b\xc9\x4f\xbc\x32\x21\xa5\xdb" "\x94\xfa\xe9\x5a\xe7\x12\x1a\x47", - .rlen = 76, + .clen = 76, }, { .key = zeroed_string, .klen = 24, - .result = "\xcd\x33\xb2\x8a\xc7\x73\xf7\x4b" + .ctext = "\xcd\x33\xb2\x8a\xc7\x73\xf7\x4b" "\xa0\x0e\xd1\xf3\x12\x57\x24\x35", - .rlen = 16, + .clen = 16, }, { .key = zeroed_string, .klen = 24, - .input = zeroed_string, - .ilen = 16, - .result = "\x98\xe7\x24\x7c\x07\xf0\xfe\x41" + .ptext = zeroed_string, + .plen = 16, + .ctext = "\x98\xe7\x24\x7c\x07\xf0\xfe\x41" "\x1c\x26\x7e\x43\x84\xb0\xf6\x00" "\x2f\xf5\x8d\x80\x03\x39\x27\xab" "\x8e\xf4\xd4\x58\x75\x14\xf0\xfb", - .rlen = 32, + .clen = 32, }, { .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" "\x6d\x6a\x8f\x94\x67\x30\x83\x08" @@ -16779,7 +16710,7 @@ static const struct aead_testvec aes_gcm_enc_tv_template[] = { .klen = 24, .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad" "\xde\xca\xf8\x88", - .input = "\xd9\x31\x32\x25\xf8\x84\x06\xe5" + .ptext = "\xd9\x31\x32\x25\xf8\x84\x06\xe5" "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a" "\x86\xa7\xa9\x53\x15\x34\xf7\xda" "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72" @@ -16787,8 +16718,8 @@ static const struct aead_testvec aes_gcm_enc_tv_template[] = { "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25" "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57" "\xba\x63\x7b\x39\x1a\xaf\xd2\x55", - .ilen = 64, - .result = "\x39\x80\xca\x0b\x3c\x00\xe8\x41" + .plen = 64, + .ctext = "\x39\x80\xca\x0b\x3c\x00\xe8\x41" "\xeb\x06\xfa\xc4\x87\x2a\x27\x57" "\x85\x9e\x1c\xea\xa6\xef\xd9\x84" "\x62\x85\x93\xb4\x0c\xa1\xe1\x9c" @@ -16798,71 +16729,41 @@ static const struct aead_testvec aes_gcm_enc_tv_template[] = { "\xcc\xda\x27\x10\xac\xad\xe2\x56" "\x99\x24\xa7\xc8\x58\x73\x36\xbf" "\xb1\x18\x02\x4d\xb8\x67\x4a\x14", - .rlen = 80, + .clen = 80, + }, { + .key = zeroed_string, + .klen = 32, + .ctext = "\x53\x0f\x8a\xfb\xc7\x45\x36\xb9" + "\xa9\x63\xb4\xf1\xc4\xcb\x73\x8b", + .clen = 16, + }, { + .key = zeroed_string, + .klen = 32, + .ptext = zeroed_string, + .plen = 16, + .ctext = "\xce\xa7\x40\x3d\x4d\x60\x6b\x6e" + "\x07\x4e\xc5\xd3\xba\xf3\x9d\x18" + "\xd0\xd1\xc8\xa7\x99\x99\x6b\xf0" + "\x26\x5b\x98\xb5\xd4\x8a\xb9\x19", + .clen = 32, }, { .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" "\x6d\x6a\x8f\x94\x67\x30\x83\x08" - "\xfe\xff\xe9\x92\x86\x65\x73\x1c", - .klen = 24, + "\xfe\xff\xe9\x92\x86\x65\x73\x1c" + "\x6d\x6a\x8f\x94\x67\x30\x83\x08", + .klen = 32, .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad" "\xde\xca\xf8\x88", - .input = "\xd9\x31\x32\x25\xf8\x84\x06\xe5" + .ptext = "\xd9\x31\x32\x25\xf8\x84\x06\xe5" "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a" "\x86\xa7\xa9\x53\x15\x34\xf7\xda" "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72" "\x1c\x3c\x0c\x95\x95\x68\x09\x53" "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25" "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57" - "\xba\x63\x7b\x39", - .ilen = 60, - .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef" - "\xfe\xed\xfa\xce\xde\xad\xbe\xef" - "\xab\xad\xda\xd2", - .alen = 20, - .result = "\x39\x80\xca\x0b\x3c\x00\xe8\x41" - "\xeb\x06\xfa\xc4\x87\x2a\x27\x57" - "\x85\x9e\x1c\xea\xa6\xef\xd9\x84" - "\x62\x85\x93\xb4\x0c\xa1\xe1\x9c" - "\x7d\x77\x3d\x00\xc1\x44\xc5\x25" - "\xac\x61\x9d\x18\xc8\x4a\x3f\x47" - "\x18\xe2\x44\x8b\x2f\xe3\x24\xd9" - "\xcc\xda\x27\x10" - "\x25\x19\x49\x8e\x80\xf1\x47\x8f" - "\x37\xba\x55\xbd\x6d\x27\x61\x8c", - .rlen = 76, - .np = 2, - .tap = { 32, 28 }, - .anp = 2, - .atap = { 8, 12 } - }, { - .key = zeroed_string, - .klen = 32, - .result = "\x53\x0f\x8a\xfb\xc7\x45\x36\xb9" - "\xa9\x63\xb4\xf1\xc4\xcb\x73\x8b", - .rlen = 16, - } -}; - -static const struct aead_testvec aes_gcm_dec_tv_template[] = { - { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */ - .key = zeroed_string, - .klen = 32, - .input = "\xce\xa7\x40\x3d\x4d\x60\x6b\x6e" - "\x07\x4e\xc5\xd3\xba\xf3\x9d\x18" - "\xd0\xd1\xc8\xa7\x99\x99\x6b\xf0" - "\x26\x5b\x98\xb5\xd4\x8a\xb9\x19", - .ilen = 32, - .result = zeroed_string, - .rlen = 16, - }, { - .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" - "\x6d\x6a\x8f\x94\x67\x30\x83\x08" - "\xfe\xff\xe9\x92\x86\x65\x73\x1c" - "\x6d\x6a\x8f\x94\x67\x30\x83\x08", - .klen = 32, - .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad" - "\xde\xca\xf8\x88", - .input = "\x52\x2d\xc1\xf0\x99\x56\x7d\x07" + "\xba\x63\x7b\x39\x1a\xaf\xd2\x55", + .plen = 64, + .ctext = "\x52\x2d\xc1\xf0\x99\x56\x7d\x07" "\xf4\x7f\x37\xa3\x2a\x84\x42\x7d" "\x64\x3a\x8c\xdc\xbf\xe5\xc0\xc9" "\x75\x98\xa2\xbd\x25\x55\xd1\xaa" @@ -16872,16 +16773,7 @@ static const struct aead_testvec aes_gcm_dec_tv_template[] = { "\xbc\xc9\xf6\x62\x89\x80\x15\xad" "\xb0\x94\xda\xc5\xd9\x34\x71\xbd" "\xec\x1a\x50\x22\x70\xe3\xcc\x6c", - .ilen = 80, - .result = "\xd9\x31\x32\x25\xf8\x84\x06\xe5" - "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a" - "\x86\xa7\xa9\x53\x15\x34\xf7\xda" - "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72" - "\x1c\x3c\x0c\x95\x95\x68\x09\x53" - "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25" - "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57" - "\xba\x63\x7b\x39\x1a\xaf\xd2\x55", - .rlen = 64, + .clen = 80, }, { .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" "\x6d\x6a\x8f\x94\x67\x30\x83\x08" @@ -16890,22 +16782,7 @@ static const struct aead_testvec aes_gcm_dec_tv_template[] = { .klen = 32, .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad" "\xde\xca\xf8\x88", - .input = "\x52\x2d\xc1\xf0\x99\x56\x7d\x07" - "\xf4\x7f\x37\xa3\x2a\x84\x42\x7d" - "\x64\x3a\x8c\xdc\xbf\xe5\xc0\xc9" - "\x75\x98\xa2\xbd\x25\x55\xd1\xaa" - "\x8c\xb0\x8e\x48\x59\x0d\xbb\x3d" - "\xa7\xb0\x8b\x10\x56\x82\x88\x38" - "\xc5\xf6\x1e\x63\x93\xba\x7a\x0a" - "\xbc\xc9\xf6\x62" - "\x76\xfc\x6e\xce\x0f\x4e\x17\x68" - "\xcd\xdf\x88\x53\xbb\x2d\x55\x1b", - .ilen = 76, - .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef" - "\xfe\xed\xfa\xce\xde\xad\xbe\xef" - "\xab\xad\xda\xd2", - .alen = 20, - .result = "\xd9\x31\x32\x25\xf8\x84\x06\xe5" + .ptext = "\xd9\x31\x32\x25\xf8\x84\x06\xe5" "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a" "\x86\xa7\xa9\x53\x15\x34\xf7\xda" "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72" @@ -16913,77 +16790,22 @@ static const struct aead_testvec aes_gcm_dec_tv_template[] = { "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25" "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57" "\xba\x63\x7b\x39", - .rlen = 60, - .np = 2, - .tap = { 48, 28 }, - .anp = 3, - .atap = { 8, 8, 4 } - }, { - .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" - "\x6d\x6a\x8f\x94\x67\x30\x83\x08", - .klen = 16, - .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad" - "\xde\xca\xf8\x88", - .input = "\x42\x83\x1e\xc2\x21\x77\x74\x24" - "\x4b\x72\x21\xb7\x84\xd0\xd4\x9c" - "\xe3\xaa\x21\x2f\x2c\x02\xa4\xe0" - "\x35\xc1\x7e\x23\x29\xac\xa1\x2e" - "\x21\xd5\x14\xb2\x54\x66\x93\x1c" - "\x7d\x8f\x6a\x5a\xac\x84\xaa\x05" - "\x1b\xa3\x0b\x39\x6a\x0a\xac\x97" - "\x3d\x58\xe0\x91\x47\x3f\x59\x85" - "\x4d\x5c\x2a\xf3\x27\xcd\x64\xa6" - "\x2c\xf3\x5a\xbd\x2b\xa6\xfa\xb4", - .ilen = 80, - .result = "\xd9\x31\x32\x25\xf8\x84\x06\xe5" - "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a" - "\x86\xa7\xa9\x53\x15\x34\xf7\xda" - "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72" - "\x1c\x3c\x0c\x95\x95\x68\x09\x53" - "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25" - "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57" - "\xba\x63\x7b\x39\x1a\xaf\xd2\x55", - .rlen = 64, - }, { - .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" - "\x6d\x6a\x8f\x94\x67\x30\x83\x08", - .klen = 16, - .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad" - "\xde\xca\xf8\x88", - .input = "\x42\x83\x1e\xc2\x21\x77\x74\x24" - "\x4b\x72\x21\xb7\x84\xd0\xd4\x9c" - "\xe3\xaa\x21\x2f\x2c\x02\xa4\xe0" - "\x35\xc1\x7e\x23\x29\xac\xa1\x2e" - "\x21\xd5\x14\xb2\x54\x66\x93\x1c" - "\x7d\x8f\x6a\x5a\xac\x84\xaa\x05" - "\x1b\xa3\x0b\x39\x6a\x0a\xac\x97" - "\x3d\x58\xe0\x91" - "\x5b\xc9\x4f\xbc\x32\x21\xa5\xdb" - "\x94\xfa\xe9\x5a\xe7\x12\x1a\x47", - .ilen = 76, + .plen = 60, .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef" "\xfe\xed\xfa\xce\xde\xad\xbe\xef" "\xab\xad\xda\xd2", .alen = 20, - .result = "\xd9\x31\x32\x25\xf8\x84\x06\xe5" - "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a" - "\x86\xa7\xa9\x53\x15\x34\xf7\xda" - "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72" - "\x1c\x3c\x0c\x95\x95\x68\x09\x53" - "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25" - "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57" - "\xba\x63\x7b\x39", - .rlen = 60, - }, { - .key = zeroed_string, - .klen = 24, - .input = "\x98\xe7\x24\x7c\x07\xf0\xfe\x41" - "\x1c\x26\x7e\x43\x84\xb0\xf6\x00" - "\x2f\xf5\x8d\x80\x03\x39\x27\xab" - "\x8e\xf4\xd4\x58\x75\x14\xf0\xfb", - .ilen = 32, - .result = zeroed_string, - .rlen = 16, + .ctext = "\x52\x2d\xc1\xf0\x99\x56\x7d\x07" + "\xf4\x7f\x37\xa3\x2a\x84\x42\x7d" + "\x64\x3a\x8c\xdc\xbf\xe5\xc0\xc9" + "\x75\x98\xa2\xbd\x25\x55\xd1\xaa" + "\x8c\xb0\x8e\x48\x59\x0d\xbb\x3d" + "\xa7\xb0\x8b\x10\x56\x82\x88\x38" + "\xc5\xf6\x1e\x63\x93\xba\x7a\x0a" + "\xbc\xc9\xf6\x62" + "\x76\xfc\x6e\xce\x0f\x4e\x17\x68" + "\xcd\xdf\x88\x53\xbb\x2d\x55\x1b", + .clen = 76, }, { .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" "\x6d\x6a\x8f\x94\x67\x30\x83\x08" @@ -16991,34 +16813,20 @@ static const struct aead_testvec aes_gcm_dec_tv_template[] = { .klen = 24, .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad" "\xde\xca\xf8\x88", - .input = "\x39\x80\xca\x0b\x3c\x00\xe8\x41" - "\xeb\x06\xfa\xc4\x87\x2a\x27\x57" - "\x85\x9e\x1c\xea\xa6\xef\xd9\x84" - "\x62\x85\x93\xb4\x0c\xa1\xe1\x9c" - "\x7d\x77\x3d\x00\xc1\x44\xc5\x25" - "\xac\x61\x9d\x18\xc8\x4a\x3f\x47" - "\x18\xe2\x44\x8b\x2f\xe3\x24\xd9" - "\xcc\xda\x27\x10\xac\xad\xe2\x56" - "\x99\x24\xa7\xc8\x58\x73\x36\xbf" - "\xb1\x18\x02\x4d\xb8\x67\x4a\x14", - .ilen = 80, - .result = "\xd9\x31\x32\x25\xf8\x84\x06\xe5" + .ptext = "\xd9\x31\x32\x25\xf8\x84\x06\xe5" "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a" "\x86\xa7\xa9\x53\x15\x34\xf7\xda" "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72" "\x1c\x3c\x0c\x95\x95\x68\x09\x53" "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25" "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57" - "\xba\x63\x7b\x39\x1a\xaf\xd2\x55", - .rlen = 64, - }, { - .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" - "\x6d\x6a\x8f\x94\x67\x30\x83\x08" - "\xfe\xff\xe9\x92\x86\x65\x73\x1c", - .klen = 24, - .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad" - "\xde\xca\xf8\x88", - .input = "\x39\x80\xca\x0b\x3c\x00\xe8\x41" + "\xba\x63\x7b\x39", + .plen = 60, + .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef" + "\xfe\xed\xfa\xce\xde\xad\xbe\xef" + "\xab\xad\xda\xd2", + .alen = 20, + .ctext = "\x39\x80\xca\x0b\x3c\x00\xe8\x41" "\xeb\x06\xfa\xc4\x87\x2a\x27\x57" "\x85\x9e\x1c\xea\xa6\xef\xd9\x84" "\x62\x85\x93\xb4\x0c\xa1\xe1\x9c" @@ -17028,53 +16836,40 @@ static const struct aead_testvec aes_gcm_dec_tv_template[] = { "\xcc\xda\x27\x10" "\x25\x19\x49\x8e\x80\xf1\x47\x8f" "\x37\xba\x55\xbd\x6d\x27\x61\x8c", - .ilen = 76, - .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef" - "\xfe\xed\xfa\xce\xde\xad\xbe\xef" - "\xab\xad\xda\xd2", - .alen = 20, - .result = "\xd9\x31\x32\x25\xf8\x84\x06\xe5" - "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a" - "\x86\xa7\xa9\x53\x15\x34\xf7\xda" - "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72" - "\x1c\x3c\x0c\x95\x95\x68\x09\x53" - "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25" - "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57" - "\xba\x63\x7b\x39", - .rlen = 60, + .clen = 76, } }; -static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { +static const struct aead_testvec aes_gcm_rfc4106_tv_template[] = { { /* Generated using Crypto++ */ .key = zeroed_string, .klen = 20, .iv = zeroed_string, - .input = zeroed_string, - .ilen = 16, + .ptext = zeroed_string, + .plen = 16, .assoc = zeroed_string, .alen = 16, - .result = "\x03\x88\xDA\xCE\x60\xB6\xA3\x92" + .ctext = "\x03\x88\xDA\xCE\x60\xB6\xA3\x92" "\xF3\x28\xC2\xB9\x71\xB2\xFE\x78" "\x97\xFE\x4C\x23\x37\x42\x01\xE0" "\x81\x9F\x8D\xC5\xD7\x41\xA0\x1B", - .rlen = 32, + .clen = 32, },{ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" "\x6d\x6a\x8f\x94\x67\x30\x83\x08" "\x00\x00\x00\x00", .klen = 20, .iv = "\x00\x00\x00\x00\x00\x00\x00\x01", - .input = zeroed_string, - .ilen = 16, + .ptext = zeroed_string, + .plen = 16, .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00" "\x00\x00\x00\x00\x00\x00\x00\x01", .alen = 16, - .result = "\xC0\x0D\x8B\x42\x0F\x8F\x34\x18" + .ctext = "\xC0\x0D\x8B\x42\x0F\x8F\x34\x18" "\x88\xB1\xC5\xBC\xC5\xB6\xD6\x28" "\x6A\x9D\xDF\x11\x5E\xFE\x5E\x9D" "\x2F\x70\x44\x92\xF7\xF2\xE3\xEF", - .rlen = 32, + .clen = 32, }, { .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" @@ -17082,57 +16877,57 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x00\x00\x00\x00", .klen = 20, .iv = zeroed_string, - .input = "\x01\x01\x01\x01\x01\x01\x01\x01" + .ptext = "\x01\x01\x01\x01\x01\x01\x01\x01" "\x01\x01\x01\x01\x01\x01\x01\x01", - .ilen = 16, + .plen = 16, .assoc = zeroed_string, .alen = 16, - .result = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE" + .ctext = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE" "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC" "\x0B\x8F\x88\x69\x17\xE6\xB4\x3C" "\xB1\x68\xFD\x14\x52\x64\x61\xB2", - .rlen = 32, + .clen = 32, }, { .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" "\x6d\x6a\x8f\x94\x67\x30\x83\x08" "\x00\x00\x00\x00", .klen = 20, .iv = zeroed_string, - .input = "\x01\x01\x01\x01\x01\x01\x01\x01" + .ptext = "\x01\x01\x01\x01\x01\x01\x01\x01" "\x01\x01\x01\x01\x01\x01\x01\x01", - .ilen = 16, + .plen = 16, .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01" "\x00\x00\x00\x00\x00\x00\x00\x00", .alen = 16, - .result = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE" + .ctext = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE" "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC" "\x90\x92\xB7\xE3\x5F\xA3\x9A\x63" "\x7E\xD7\x1F\xD8\xD3\x7C\x4B\xF5", - .rlen = 32, + .clen = 32, }, { .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" "\x6d\x6a\x8f\x94\x67\x30\x83\x08" "\x00\x00\x00\x00", .klen = 20, .iv = "\x00\x00\x00\x00\x00\x00\x00\x01", - .input = "\x01\x01\x01\x01\x01\x01\x01\x01" + .ptext = "\x01\x01\x01\x01\x01\x01\x01\x01" "\x01\x01\x01\x01\x01\x01\x01\x01", - .ilen = 16, + .plen = 16, .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01" "\x00\x00\x00\x00\x00\x00\x00\x01", .alen = 16, - .result = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19" + .ctext = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19" "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29" "\x64\x50\xF9\x32\x13\xFB\x74\x61" "\xF4\xED\x52\xD3\xC5\x10\x55\x3C", - .rlen = 32, + .clen = 32, }, { .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" "\x6d\x6a\x8f\x94\x67\x30\x83\x08" "\x00\x00\x00\x00", .klen = 20, .iv = "\x00\x00\x00\x00\x00\x00\x00\x01", - .input = "\x01\x01\x01\x01\x01\x01\x01\x01" + .ptext = "\x01\x01\x01\x01\x01\x01\x01\x01" "\x01\x01\x01\x01\x01\x01\x01\x01" "\x01\x01\x01\x01\x01\x01\x01\x01" "\x01\x01\x01\x01\x01\x01\x01\x01" @@ -17140,11 +16935,11 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x01\x01\x01\x01\x01\x01\x01\x01" "\x01\x01\x01\x01\x01\x01\x01\x01" "\x01\x01\x01\x01\x01\x01\x01\x01", - .ilen = 64, + .plen = 64, .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01" "\x00\x00\x00\x00\x00\x00\x00\x01", .alen = 16, - .result = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19" + .ctext = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19" "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29" "\x98\x14\xA1\x42\x37\x80\xFD\x90" "\x68\x12\x01\xA8\x91\x89\xB9\x83" @@ -17154,14 +16949,14 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\xDC\xD3\xDA\x65\x73\xAF\x80\xCD" "\xD2\xB6\xC2\x4A\x76\xC2\x92\x85" "\xBD\xCF\x62\x98\x58\x14\xE5\xBD", - .rlen = 80, + .clen = 80, }, { .key = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" "\x00\x00\x00\x00", .klen = 20, .iv = "\x00\x00\x45\x67\x89\xab\xcd\xef", - .input = "\xff\xff\xff\xff\xff\xff\xff\xff" + .ptext = "\xff\xff\xff\xff\xff\xff\xff\xff" "\xff\xff\xff\xff\xff\xff\xff\xff" "\xff\xff\xff\xff\xff\xff\xff\xff" "\xff\xff\xff\xff\xff\xff\xff\xff" @@ -17185,12 +16980,12 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\xff\xff\xff\xff\xff\xff\xff\xff" "\xff\xff\xff\xff\xff\xff\xff\xff" "\xff\xff\xff\xff\xff\xff\xff\xff", - .ilen = 192, + .plen = 192, .assoc = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" "\xaa\xaa\xaa\xaa\x00\x00\x45\x67" "\x89\xab\xcd\xef", .alen = 20, - .result = "\xC1\x76\x33\x85\xE2\x9B\x5F\xDE" + .ctext = "\xC1\x76\x33\x85\xE2\x9B\x5F\xDE" "\xDE\x89\x3D\x42\xE7\xC9\x69\x8A" "\x44\x6D\xC3\x88\x46\x2E\xC2\x01" "\x5E\xF6\x0C\x39\xF0\xC4\xA5\x82" @@ -17216,14 +17011,14 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x2E\xD5\x03\x2E\x86\x7E\xAA\x3B" "\x37\x08\x1C\xCF\xBA\x5D\x71\x46" "\x80\x72\xB0\x4C\x82\x0D\x60\x3C", - .rlen = 208, + .clen = 208, }, { /* From draft-mcgrew-gcm-test-01 */ .key = "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA" "\x90\x6A\xC7\x3C\x36\x13\xA6\x34" "\x2E\x44\x3B\x68", .klen = 20, .iv = "\x49\x56\xED\x7E\x3B\x24\x4C\xFE", - .input = "\x45\x00\x00\x48\x69\x9A\x00\x00" + .ptext = "\x45\x00\x00\x48\x69\x9A\x00\x00" "\x80\x11\x4D\xB7\xC0\xA8\x01\x02" "\xC0\xA8\x01\x01\x0A\x9B\xF1\x56" "\x38\xD3\x01\x00\x00\x01\x00\x00" @@ -17232,12 +17027,12 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x69\x70\x09\x63\x79\x62\x65\x72" "\x63\x69\x74\x79\x02\x64\x6B\x00" "\x00\x21\x00\x01\x01\x02\x02\x01", - .ilen = 72, + .plen = 72, .assoc = "\x00\x00\x43\x21\x87\x65\x43\x21" "\x00\x00\x00\x00\x49\x56\xED\x7E" "\x3B\x24\x4C\xFE", .alen = 20, - .result = "\xFE\xCF\x53\x7E\x72\x9D\x5B\x07" + .ctext = "\xFE\xCF\x53\x7E\x72\x9D\x5B\x07" "\xDC\x30\xDF\x52\x8D\xD2\x2B\x76" "\x8D\x1B\x98\x73\x66\x96\xA6\xFD" "\x34\x85\x09\xFA\x13\xCE\xAC\x34" @@ -17248,14 +17043,14 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x61\xBC\x17\xD7\x68\xFD\x97\x32" "\x45\x90\x18\x14\x8F\x6C\xBE\x72" "\x2F\xD0\x47\x96\x56\x2D\xFD\xB4", - .rlen = 88, + .clen = 88, }, { .key = "\xFE\xFF\xE9\x92\x86\x65\x73\x1C" "\x6D\x6A\x8F\x94\x67\x30\x83\x08" "\xCA\xFE\xBA\xBE", .klen = 20, .iv = "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", - .input = "\x45\x00\x00\x3E\x69\x8F\x00\x00" + .ptext = "\x45\x00\x00\x3E\x69\x8F\x00\x00" "\x80\x11\x4D\xCC\xC0\xA8\x01\x02" "\xC0\xA8\x01\x01\x0A\x98\x00\x35" "\x00\x2A\x23\x43\xB2\xD0\x01\x00" @@ -17263,11 +17058,11 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x03\x73\x69\x70\x09\x63\x79\x62" "\x65\x72\x63\x69\x74\x79\x02\x64" "\x6B\x00\x00\x01\x00\x01\x00\x01", - .ilen = 64, + .plen = 64, .assoc = "\x00\x00\xA5\xF8\x00\x00\x00\x0A" "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", .alen = 16, - .result = "\xDE\xB2\x2C\xD9\xB0\x7C\x72\xC1" + .ctext = "\xDE\xB2\x2C\xD9\xB0\x7C\x72\xC1" "\x6E\x3A\x65\xBE\xEB\x8D\xF3\x04" "\xA5\xA5\x89\x7D\x33\xAE\x53\x0F" "\x1B\xA7\x6D\x5D\x11\x4D\x2A\x5C" @@ -17277,7 +17072,7 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\xEC\x3B\x9B\xA9\x5D\x91\x8B\xD1" "\x83\xB7\x0D\x3A\xA8\xBC\x6E\xE4" "\xC3\x09\xE9\xD8\x5A\x41\xAD\x4A", - .rlen = 80, + .clen = 80, }, { .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" "\x34\x45\x56\x67\x78\x89\x9A\xAB" @@ -17286,18 +17081,18 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x11\x22\x33\x44", .klen = 36, .iv = "\x01\x02\x03\x04\x05\x06\x07\x08", - .input = "\x45\x00\x00\x30\x69\xA6\x40\x00" + .ptext = "\x45\x00\x00\x30\x69\xA6\x40\x00" "\x80\x06\x26\x90\xC0\xA8\x01\x02" "\x93\x89\x15\x5E\x0A\x9E\x00\x8B" "\x2D\xC5\x7E\xE0\x00\x00\x00\x00" "\x70\x02\x40\x00\x20\xBF\x00\x00" "\x02\x04\x05\xB4\x01\x01\x04\x02" "\x01\x02\x02\x01", - .ilen = 52, + .plen = 52, .assoc = "\x4A\x2C\xBF\xE3\x00\x00\x00\x02" "\x01\x02\x03\x04\x05\x06\x07\x08", .alen = 16, - .result = "\xFF\x42\x5C\x9B\x72\x45\x99\xDF" + .ctext = "\xFF\x42\x5C\x9B\x72\x45\x99\xDF" "\x7A\x3B\xCD\x51\x01\x94\xE0\x0D" "\x6A\x78\x10\x7F\x1B\x0B\x1C\xBF" "\x06\xEF\xAE\x9D\x65\xA5\xD7\x63" @@ -17306,14 +17101,14 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\xEF\x84\x2D\x8E\xB3\x35\xF4\xEE" "\xCF\xDB\xF8\x31\x82\x4B\x4C\x49" "\x15\x95\x6C\x96", - .rlen = 68, + .clen = 68, }, { .key = "\x00\x00\x00\x00\x00\x00\x00\x00" "\x00\x00\x00\x00\x00\x00\x00\x00" "\x00\x00\x00\x00", .klen = 20, .iv = "\x00\x00\x00\x00\x00\x00\x00\x00", - .input = "\x45\x00\x00\x3C\x99\xC5\x00\x00" + .ptext = "\x45\x00\x00\x3C\x99\xC5\x00\x00" "\x80\x01\xCB\x7A\x40\x67\x93\x18" "\x01\x01\x01\x01\x08\x00\x07\x5C" "\x02\x00\x44\x00\x61\x62\x63\x64" @@ -17321,11 +17116,11 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x6D\x6E\x6F\x70\x71\x72\x73\x74" "\x75\x76\x77\x61\x62\x63\x64\x65" "\x66\x67\x68\x69\x01\x02\x02\x01", - .ilen = 64, + .plen = 64, .assoc = "\x00\x00\x00\x00\x00\x00\x00\x01" "\x00\x00\x00\x00\x00\x00\x00\x00", .alen = 16, - .result = "\x46\x88\xDA\xF2\xF9\x73\xA3\x92" + .ctext = "\x46\x88\xDA\xF2\xF9\x73\xA3\x92" "\x73\x29\x09\xC3\x31\xD5\x6D\x60" "\xF6\x94\xAB\xAA\x41\x4B\x5E\x7F" "\xF5\xFD\xCD\xFF\xF5\xE9\xA2\x84" @@ -17335,14 +17130,14 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x1D\x19\xD4\xD5\xC8\xC1\x8A\xF3" "\xF8\x21\xD4\x96\xEE\xB0\x96\xE9" "\x8A\xD2\xB6\x9E\x47\x99\xC7\x1D", - .rlen = 80, + .clen = 80, }, { .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" "\x57\x69\x0E\x43", .klen = 20, .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", - .input = "\x45\x00\x00\x3C\x99\xC3\x00\x00" + .ptext = "\x45\x00\x00\x3C\x99\xC3\x00\x00" "\x80\x01\xCB\x7C\x40\x67\x93\x18" "\x01\x01\x01\x01\x08\x00\x08\x5C" "\x02\x00\x43\x00\x61\x62\x63\x64" @@ -17350,12 +17145,12 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x6D\x6E\x6F\x70\x71\x72\x73\x74" "\x75\x76\x77\x61\x62\x63\x64\x65" "\x66\x67\x68\x69\x01\x02\x02\x01", - .ilen = 64, + .plen = 64, .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" "\x10\x10\x10\x10\x4E\x28\x00\x00" "\xA2\xFC\xA1\xA3", .alen = 20, - .result = "\xFB\xA2\xCA\xA4\x85\x3C\xF9\xF0" + .ctext = "\xFB\xA2\xCA\xA4\x85\x3C\xF9\xF0" "\xF2\x2C\xB1\x0D\x86\xDD\x83\xB0" "\xFE\xC7\x56\x91\xCF\x1A\x04\xB0" "\x0D\x11\x38\xEC\x9C\x35\x79\x17" @@ -17365,29 +17160,29 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x1F\x5E\x22\x73\x95\x30\x32\x0A" "\xE0\xD7\x31\xCC\x97\x8E\xCA\xFA" "\xEA\xE8\x8F\x00\xE8\x0D\x6E\x48", - .rlen = 80, + .clen = 80, }, { .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" "\x57\x69\x0E\x43", .klen = 20, .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", - .input = "\x45\x00\x00\x1C\x42\xA2\x00\x00" + .ptext = "\x45\x00\x00\x1C\x42\xA2\x00\x00" "\x80\x01\x44\x1F\x40\x67\x93\xB6" "\xE0\x00\x00\x02\x0A\x00\xF5\xFF" "\x01\x02\x02\x01", - .ilen = 28, + .plen = 28, .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" "\x10\x10\x10\x10\x4E\x28\x00\x00" "\xA2\xFC\xA1\xA3", .alen = 20, - .result = "\xFB\xA2\xCA\x84\x5E\x5D\xF9\xF0" + .ctext = "\xFB\xA2\xCA\x84\x5E\x5D\xF9\xF0" "\xF2\x2C\x3E\x6E\x86\xDD\x83\x1E" "\x1F\xC6\x57\x92\xCD\x1A\xF9\x13" "\x0E\x13\x79\xED\x36\x9F\x07\x1F" "\x35\xE0\x34\xBE\x95\xF1\x12\xE4" "\xE7\xD0\x5D\x35", - .rlen = 44, + .clen = 44, }, { .key = "\xFE\xFF\xE9\x92\x86\x65\x73\x1C" "\x6D\x6A\x8F\x94\x67\x30\x83\x08" @@ -17395,30 +17190,30 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\xCA\xFE\xBA\xBE", .klen = 28, .iv = "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", - .input = "\x45\x00\x00\x28\xA4\xAD\x40\x00" + .ptext = "\x45\x00\x00\x28\xA4\xAD\x40\x00" "\x40\x06\x78\x80\x0A\x01\x03\x8F" "\x0A\x01\x06\x12\x80\x23\x06\xB8" "\xCB\x71\x26\x02\xDD\x6B\xB0\x3E" "\x50\x10\x16\xD0\x75\x68\x00\x01", - .ilen = 40, + .plen = 40, .assoc = "\x00\x00\xA5\xF8\x00\x00\x00\x0A" "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", .alen = 16, - .result = "\xA5\xB1\xF8\x06\x60\x29\xAE\xA4" + .ctext = "\xA5\xB1\xF8\x06\x60\x29\xAE\xA4" "\x0E\x59\x8B\x81\x22\xDE\x02\x42" "\x09\x38\xB3\xAB\x33\xF8\x28\xE6" "\x87\xB8\x85\x8B\x5B\xFB\xDB\xD0" "\x31\x5B\x27\x45\x21\x44\xCC\x77" "\x95\x45\x7B\x96\x52\x03\x7F\x53" "\x18\x02\x7B\x5B\x4C\xD7\xA6\x36", - .rlen = 56, + .clen = 56, }, { .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" "\x34\x45\x56\x67\x78\x89\x9A\xAB" "\xDE\xCA\xF8\x88", .klen = 20, .iv = "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74", - .input = "\x45\x00\x00\x49\x33\xBA\x00\x00" + .ptext = "\x45\x00\x00\x49\x33\xBA\x00\x00" "\x7F\x11\x91\x06\xC3\xFB\x1D\x10" "\xC2\xB1\xD3\x26\xC0\x28\x31\xCE" "\x00\x35\xDD\x7B\x80\x03\x02\xD5" @@ -17428,12 +17223,12 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x92\xC9\x63\xBA\xC0\x46\xEC\x95" "\x9B\x62\x66\xC0\x47\x22\xB1\x49" "\x23\x01\x01\x01", - .ilen = 76, + .plen = 76, .assoc = "\x00\x00\x01\x00\x00\x00\x00\x00" "\x00\x00\x00\x01\xCA\xFE\xDE\xBA" "\xCE\xFA\xCE\x74", .alen = 20, - .result = "\x18\xA6\xFD\x42\xF7\x2C\xBF\x4A" + .ctext = "\x18\xA6\xFD\x42\xF7\x2C\xBF\x4A" "\xB2\xA2\xEA\x90\x1F\x73\xD8\x14" "\xE3\xE7\xF2\x43\xD9\x54\x12\xE1" "\xC3\x49\xC1\xD2\xFB\xEC\x16\x8F" @@ -17445,7 +17240,7 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\xE7\x84\x5D\x68\x65\x1F\x57\xE6" "\x5F\x35\x4F\x75\xFF\x17\x01\x57" "\x69\x62\x34\x36", - .rlen = 92, + .clen = 92, }, { .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" "\x34\x45\x56\x67\x78\x89\x9A\xAB" @@ -17454,31 +17249,31 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x73\x61\x6C\x74", .klen = 36, .iv = "\x61\x6E\x64\x01\x69\x76\x65\x63", - .input = "\x45\x08\x00\x28\x73\x2C\x00\x00" + .ptext = "\x45\x08\x00\x28\x73\x2C\x00\x00" "\x40\x06\xE9\xF9\x0A\x01\x06\x12" "\x0A\x01\x03\x8F\x06\xB8\x80\x23" "\xDD\x6B\xAF\xBE\xCB\x71\x26\x02" "\x50\x10\x1F\x64\x6D\x54\x00\x01", - .ilen = 40, + .plen = 40, .assoc = "\x17\x40\x5E\x67\x15\x6F\x31\x26" "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01" "\x69\x76\x65\x63", .alen = 20, - .result = "\xF2\xD6\x9E\xCD\xBD\x5A\x0D\x5B" + .ctext = "\xF2\xD6\x9E\xCD\xBD\x5A\x0D\x5B" "\x8D\x5E\xF3\x8B\xAD\x4D\xA5\x8D" "\x1F\x27\x8F\xDE\x98\xEF\x67\x54" "\x9D\x52\x4A\x30\x18\xD9\xA5\x7F" "\xF4\xD3\xA3\x1C\xE6\x73\x11\x9E" "\x45\x16\x26\xC2\x41\x57\x71\xE3" "\xB7\xEE\xBC\xA6\x14\xC8\x9B\x35", - .rlen = 56, + .clen = 56, }, { .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" "\x57\x69\x0E\x43", .klen = 20, .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", - .input = "\x45\x00\x00\x49\x33\x3E\x00\x00" + .ptext = "\x45\x00\x00\x49\x33\x3E\x00\x00" "\x7F\x11\x91\x82\xC3\xFB\x1D\x10" "\xC2\xB1\xD3\x26\xC0\x28\x31\xCE" "\x00\x35\xCB\x45\x80\x03\x02\x5B" @@ -17488,12 +17283,12 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x76\x4D\x6E\x5E\xE0\x49\x6B\x32" "\x5A\xE2\x70\xC0\x38\x99\x49\x39" "\x15\x01\x01\x01", - .ilen = 76, + .plen = 76, .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" "\x10\x10\x10\x10\x4E\x28\x00\x00" "\xA2\xFC\xA1\xA3", .alen = 20, - .result = "\xFB\xA2\xCA\xD1\x2F\xC1\xF9\xF0" + .ctext = "\xFB\xA2\xCA\xD1\x2F\xC1\xF9\xF0" "\x0D\x3C\xEB\xF3\x05\x41\x0D\xB8" "\x3D\x77\x84\xB6\x07\x32\x3D\x22" "\x0F\x24\xB0\xA9\x7D\x54\x18\x28" @@ -17505,7 +17300,7 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\xE5\x16\x09\x75\xCD\xB6\x08\xC5" "\x76\x91\x89\x60\x97\x63\xB8\xE1" "\x8C\xAA\x81\xE2", - .rlen = 92, + .clen = 92, }, { .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" "\x34\x45\x56\x67\x78\x89\x9A\xAB" @@ -17514,7 +17309,7 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x73\x61\x6C\x74", .klen = 36, .iv = "\x61\x6E\x64\x01\x69\x76\x65\x63", - .input = "\x63\x69\x73\x63\x6F\x01\x72\x75" + .ptext = "\x63\x69\x73\x63\x6F\x01\x72\x75" "\x6C\x65\x73\x01\x74\x68\x65\x01" "\x6E\x65\x74\x77\x65\x01\x64\x65" "\x66\x69\x6E\x65\x01\x74\x68\x65" @@ -17523,12 +17318,12 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x74\x77\x69\x6C\x6C\x01\x64\x65" "\x66\x69\x6E\x65\x74\x6F\x6D\x6F" "\x72\x72\x6F\x77\x01\x02\x02\x01", - .ilen = 72, + .plen = 72, .assoc = "\x17\x40\x5E\x67\x15\x6F\x31\x26" "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01" "\x69\x76\x65\x63", .alen = 20, - .result = "\xD4\xB7\xED\x86\xA1\x77\x7F\x2E" + .ctext = "\xD4\xB7\xED\x86\xA1\x77\x7F\x2E" "\xA1\x3D\x69\x73\xD3\x24\xC6\x9E" "\x7B\x43\xF8\x26\xFB\x56\x83\x12" "\x26\x50\x8B\xEB\xD2\xDC\xEB\x18" @@ -17539,42 +17334,42 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x12\xA4\x93\x63\x41\x23\x64\xF8" "\xC0\xCA\xC5\x87\xF2\x49\xE5\x6B" "\x11\xE2\x4F\x30\xE4\x4C\xCC\x76", - .rlen = 88, + .clen = 88, }, { .key = "\x7D\x77\x3D\x00\xC1\x44\xC5\x25" "\xAC\x61\x9D\x18\xC8\x4A\x3F\x47" "\xD9\x66\x42\x67", .klen = 20, .iv = "\x43\x45\x7E\x91\x82\x44\x3B\xC6", - .input = "\x01\x02\x02\x01", - .ilen = 4, + .ptext = "\x01\x02\x02\x01", + .plen = 4, .assoc = "\x33\x54\x67\xAE\xFF\xFF\xFF\xFF" "\x43\x45\x7E\x91\x82\x44\x3B\xC6", .alen = 16, - .result = "\x43\x7F\x86\x6B\xCB\x3F\x69\x9F" + .ctext = "\x43\x7F\x86\x6B\xCB\x3F\x69\x9F" "\xE9\xB0\x82\x2B\xAC\x96\x1C\x45" "\x04\xBE\xF2\x70", - .rlen = 20, + .clen = 20, }, { .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" "\x34\x45\x56\x67\x78\x89\x9A\xAB" "\xDE\xCA\xF8\x88", .klen = 20, .iv = "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74", - .input = "\x74\x6F\x01\x62\x65\x01\x6F\x72" + .ptext = "\x74\x6F\x01\x62\x65\x01\x6F\x72" "\x01\x6E\x6F\x74\x01\x74\x6F\x01" "\x62\x65\x00\x01", - .ilen = 20, + .plen = 20, .assoc = "\x00\x00\x01\x00\x00\x00\x00\x00" "\x00\x00\x00\x01\xCA\xFE\xDE\xBA" "\xCE\xFA\xCE\x74", .alen = 20, - .result = "\x29\xC9\xFC\x69\xA1\x97\xD0\x38" + .ctext = "\x29\xC9\xFC\x69\xA1\x97\xD0\x38" "\xCC\xDD\x14\xE2\xDD\xFC\xAA\x05" "\x43\x33\x21\x64\x41\x25\x03\x52" "\x43\x03\xED\x3C\x6C\x5F\x28\x38" "\x43\xAF\x8C\x3E", - .rlen = 36, + .clen = 36, }, { .key = "\x6C\x65\x67\x61\x6C\x69\x7A\x65" "\x6D\x61\x72\x69\x6A\x75\x61\x6E" @@ -17583,19 +17378,19 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x74\x75\x72\x6E", .klen = 36, .iv = "\x33\x30\x21\x69\x67\x65\x74\x6D", - .input = "\x45\x00\x00\x30\xDA\x3A\x00\x00" + .ptext = "\x45\x00\x00\x30\xDA\x3A\x00\x00" "\x80\x01\xDF\x3B\xC0\xA8\x00\x05" "\xC0\xA8\x00\x01\x08\x00\xC6\xCD" "\x02\x00\x07\x00\x61\x62\x63\x64" "\x65\x66\x67\x68\x69\x6A\x6B\x6C" "\x6D\x6E\x6F\x70\x71\x72\x73\x74" "\x01\x02\x02\x01", - .ilen = 52, + .plen = 52, .assoc = "\x79\x6B\x69\x63\xFF\xFF\xFF\xFF" "\xFF\xFF\xFF\xFF\x33\x30\x21\x69" "\x67\x65\x74\x6D", .alen = 20, - .result = "\xF9\x7A\xB2\xAA\x35\x6D\x8E\xDC" + .ctext = "\xF9\x7A\xB2\xAA\x35\x6D\x8E\xDC" "\xE1\x76\x44\xAC\x8C\x78\xE2\x5D" "\xD2\x4D\xED\xBB\x29\xEB\xF1\xB6" "\x4A\x27\x4B\x39\xB4\x9C\x3A\x86" @@ -17604,26 +17399,26 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x1D\xCC\x63\xB9\xD0\x93\x7B\xA2" "\x94\x5F\x66\x93\x68\x66\x1A\x32" "\x9F\xB4\xC0\x53", - .rlen = 68, + .clen = 68, }, { .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" "\x57\x69\x0E\x43", .klen = 20, .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", - .input = "\x45\x00\x00\x30\xDA\x3A\x00\x00" + .ptext = "\x45\x00\x00\x30\xDA\x3A\x00\x00" "\x80\x01\xDF\x3B\xC0\xA8\x00\x05" "\xC0\xA8\x00\x01\x08\x00\xC6\xCD" "\x02\x00\x07\x00\x61\x62\x63\x64" "\x65\x66\x67\x68\x69\x6A\x6B\x6C" "\x6D\x6E\x6F\x70\x71\x72\x73\x74" "\x01\x02\x02\x01", - .ilen = 52, + .plen = 52, .assoc = "\x3F\x7E\xF6\x42\x10\x10\x10\x10" "\x10\x10\x10\x10\x4E\x28\x00\x00" "\xA2\xFC\xA1\xA3", .alen = 20, - .result = "\xFB\xA2\xCA\xA8\xC6\xC5\xF9\xF0" + .ctext = "\xFB\xA2\xCA\xA8\xC6\xC5\xF9\xF0" "\xF2\x2C\xA5\x4A\x06\x12\x10\xAD" "\x3F\x6E\x57\x91\xCF\x1A\xCA\x21" "\x0D\x11\x7C\xEC\x9C\x35\x79\x17" @@ -17632,7262 +17427,2757 @@ static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = { "\x63\x21\x93\x06\x84\xEE\xCA\xDB" "\x56\x91\x25\x46\xE7\xA9\x5C\x97" "\x40\xD7\xCB\x05", - .rlen = 68, + .clen = 68, }, { .key = "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA" "\x90\x6A\xC7\x3C\x36\x13\xA6\x34" "\x22\x43\x3C\x64", .klen = 20, .iv = "\x48\x55\xEC\x7D\x3A\x23\x4B\xFD", - .input = "\x08\x00\xC6\xCD\x02\x00\x07\x00" + .ptext = "\x08\x00\xC6\xCD\x02\x00\x07\x00" "\x61\x62\x63\x64\x65\x66\x67\x68" "\x69\x6A\x6B\x6C\x6D\x6E\x6F\x70" "\x71\x72\x73\x74\x01\x02\x02\x01", - .ilen = 32, + .plen = 32, .assoc = "\x00\x00\x43\x21\x87\x65\x43\x21" "\x00\x00\x00\x07\x48\x55\xEC\x7D" "\x3A\x23\x4B\xFD", .alen = 20, - .result = "\x74\x75\x2E\x8A\xEB\x5D\x87\x3C" + .ctext = "\x74\x75\x2E\x8A\xEB\x5D\x87\x3C" "\xD7\xC0\xF4\xAC\xC3\x6C\x4B\xFF" "\x84\xB7\xD7\xB9\x8F\x0C\xA8\xB6" "\xAC\xDA\x68\x94\xBC\x61\x90\x69" "\xEF\x9C\xBC\x28\xFE\x1B\x56\xA7" "\xC4\xE0\xD5\x8C\x86\xCD\x2B\xC0", - .rlen = 48, + .clen = 48, } }; -static const struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = { - { /* Generated using Crypto++ */ - .key = zeroed_string, - .klen = 20, - .iv = zeroed_string, - .input = "\x03\x88\xDA\xCE\x60\xB6\xA3\x92" - "\xF3\x28\xC2\xB9\x71\xB2\xFE\x78" - "\x97\xFE\x4C\x23\x37\x42\x01\xE0" - "\x81\x9F\x8D\xC5\xD7\x41\xA0\x1B", - .ilen = 32, - .assoc = zeroed_string, - .alen = 16, - .result = zeroed_string, - .rlen = 16, - - },{ - .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" - "\x6d\x6a\x8f\x94\x67\x30\x83\x08" - "\x00\x00\x00\x00", - .klen = 20, - .iv = "\x00\x00\x00\x00\x00\x00\x00\x01", - .input = "\xC0\x0D\x8B\x42\x0F\x8F\x34\x18" - "\x88\xB1\xC5\xBC\xC5\xB6\xD6\x28" - "\x6A\x9D\xDF\x11\x5E\xFE\x5E\x9D" - "\x2F\x70\x44\x92\xF7\xF2\xE3\xEF", - .ilen = 32, - .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x01", - .alen = 16, - .result = zeroed_string, - .rlen = 16, - }, { - .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" - "\x6d\x6a\x8f\x94\x67\x30\x83\x08" - "\x00\x00\x00\x00", +static const struct aead_testvec aes_gcm_rfc4543_tv_template[] = { + { /* From draft-mcgrew-gcm-test-01 */ + .key = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda" + "\x90\x6a\xc7\x3c\x36\x13\xa6\x34" + "\x22\x43\x3c\x64", .klen = 20, - .iv = zeroed_string, - .input = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE" - "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC" - "\x0B\x8F\x88\x69\x17\xE6\xB4\x3C" - "\xB1\x68\xFD\x14\x52\x64\x61\xB2", - .ilen = 32, - .assoc = zeroed_string, - .alen = 16, - .result = "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01", - .rlen = 16, - }, { - .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" - "\x6d\x6a\x8f\x94\x67\x30\x83\x08" - "\x00\x00\x00\x00", + .iv = zeroed_string, + .assoc = "\x00\x00\x43\x21\x00\x00\x00\x07" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .alen = 16, + .ptext = "\x45\x00\x00\x30\xda\x3a\x00\x00" + "\x80\x01\xdf\x3b\xc0\xa8\x00\x05" + "\xc0\xa8\x00\x01\x08\x00\xc6\xcd" + "\x02\x00\x07\x00\x61\x62\x63\x64" + "\x65\x66\x67\x68\x69\x6a\x6b\x6c" + "\x6d\x6e\x6f\x70\x71\x72\x73\x74" + "\x01\x02\x02\x01", + .plen = 52, + .ctext = "\x45\x00\x00\x30\xda\x3a\x00\x00" + "\x80\x01\xdf\x3b\xc0\xa8\x00\x05" + "\xc0\xa8\x00\x01\x08\x00\xc6\xcd" + "\x02\x00\x07\x00\x61\x62\x63\x64" + "\x65\x66\x67\x68\x69\x6a\x6b\x6c" + "\x6d\x6e\x6f\x70\x71\x72\x73\x74" + "\x01\x02\x02\x01\xf2\xa9\xa8\x36" + "\xe1\x55\x10\x6a\xa8\xdc\xd6\x18" + "\xe4\x09\x9a\xaa", + .clen = 68, + }, { /* nearly same as previous, but should fail */ + .key = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda" + "\x90\x6a\xc7\x3c\x36\x13\xa6\x34" + "\x22\x43\x3c\x64", .klen = 20, - .iv = zeroed_string, - .input = "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE" - "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC" - "\x90\x92\xB7\xE3\x5F\xA3\x9A\x63" - "\x7E\xD7\x1F\xD8\xD3\x7C\x4B\xF5", - .ilen = 32, - .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01" + .iv = zeroed_string, + .assoc = "\x00\x00\x43\x21\x00\x00\x00\x07" "\x00\x00\x00\x00\x00\x00\x00\x00", - .alen = 16, - .result = "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01", - .rlen = 16, + .alen = 16, + .ptext = "\x45\x00\x00\x30\xda\x3a\x00\x00" + "\x80\x01\xdf\x3b\xc0\xa8\x00\x05" + "\xc0\xa8\x00\x01\x08\x00\xc6\xcd" + "\x02\x00\x07\x00\x61\x62\x63\x64" + "\x65\x66\x67\x68\x69\x6a\x6b\x6c" + "\x6d\x6e\x6f\x70\x71\x72\x73\x74" + "\x01\x02\x02\x01", + .plen = 52, + .novrfy = 1, + .ctext = "\x45\x00\x00\x30\xda\x3a\x00\x00" + "\x80\x01\xdf\x3b\xc0\xa8\x00\x05" + "\xc0\xa8\x00\x01\x08\x00\xc6\xcd" + "\x02\x00\x07\x00\x61\x62\x63\x64" + "\x65\x66\x67\x68\x69\x6a\x6b\x6c" + "\x6d\x6e\x6f\x70\x71\x72\x73\x74" + "\x01\x02\x02\x01\xf2\xa9\xa8\x36" + "\xe1\x55\x10\x6a\xa8\xdc\xd6\x18" + "\x00\x00\x00\x00", + .clen = 68, + }, +}; +static const struct aead_testvec aes_ccm_tv_template[] = { + { /* From RFC 3610 */ + .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf", + .klen = 16, + .iv = "\x01\x00\x00\x00\x03\x02\x01\x00" + "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00", + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07", + .alen = 8, + .ptext = "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e", + .plen = 23, + .ctext = "\x58\x8c\x97\x9a\x61\xc6\x63\xd2" + "\xf0\x66\xd0\xc2\xc0\xf9\x89\x80" + "\x6d\x5f\x6b\x61\xda\xc3\x84\x17" + "\xe8\xd1\x2c\xfd\xf9\x26\xe0", + .clen = 31, }, { - .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" - "\x6d\x6a\x8f\x94\x67\x30\x83\x08" - "\x00\x00\x00\x00", - .klen = 20, - .iv = "\x00\x00\x00\x00\x00\x00\x00\x01", - .input = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19" - "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29" - "\x64\x50\xF9\x32\x13\xFB\x74\x61" - "\xF4\xED\x52\xD3\xC5\x10\x55\x3C", - .ilen = 32, - .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x00\x00\x00\x00\x00\x00\x00\x01", - .alen = 16, - .result = "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01", - .rlen = 16, + .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf", + .klen = 16, + .iv = "\x01\x00\x00\x00\x07\x06\x05\x04" + "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00", + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b", + .alen = 12, + .ptext = "\x0c\x0d\x0e\x0f\x10\x11\x12\x13" + "\x14\x15\x16\x17\x18\x19\x1a\x1b" + "\x1c\x1d\x1e\x1f", + .plen = 20, + .ctext = "\xdc\xf1\xfb\x7b\x5d\x9e\x23\xfb" + "\x9d\x4e\x13\x12\x53\x65\x8a\xd8" + "\x6e\xbd\xca\x3e\x51\xe8\x3f\x07" + "\x7d\x9c\x2d\x93", + .clen = 28, }, { - .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" - "\x6d\x6a\x8f\x94\x67\x30\x83\x08" - "\x00\x00\x00\x00", - .klen = 20, - .iv = "\x00\x00\x00\x00\x00\x00\x00\x01", - .input = "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19" - "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29" - "\x98\x14\xA1\x42\x37\x80\xFD\x90" - "\x68\x12\x01\xA8\x91\x89\xB9\x83" - "\x5B\x11\x77\x12\x9B\xFF\x24\x89" - "\x94\x5F\x18\x12\xBA\x27\x09\x39" - "\x99\x96\x76\x42\x15\x1C\xCD\xCB" - "\xDC\xD3\xDA\x65\x73\xAF\x80\xCD" - "\xD2\xB6\xC2\x4A\x76\xC2\x92\x85" - "\xBD\xCF\x62\x98\x58\x14\xE5\xBD", - .ilen = 80, - .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x00\x00\x00\x00\x00\x00\x00\x01", - .alen = 16, - .result = "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01", - .rlen = 64, + .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf", + .klen = 16, + .iv = "\x01\x00\x00\x00\x0b\x0a\x09\x08" + "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00", + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07", + .alen = 8, + .ptext = "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x10\x11\x12\x13\x14\x15\x16\x17" + "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" + "\x20", + .plen = 25, + .ctext = "\x82\x53\x1a\x60\xcc\x24\x94\x5a" + "\x4b\x82\x79\x18\x1a\xb5\xc8\x4d" + "\xf2\x1c\xe7\xf9\xb7\x3f\x42\xe1" + "\x97\xea\x9c\x07\xe5\x6b\x5e\xb1" + "\x7e\x5f\x4e", + .clen = 35, }, { - .key = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" - "\x00\x00\x00\x00", - .klen = 20, - .iv = "\x00\x00\x45\x67\x89\xab\xcd\xef", - .input = "\xC1\x76\x33\x85\xE2\x9B\x5F\xDE" - "\xDE\x89\x3D\x42\xE7\xC9\x69\x8A" - "\x44\x6D\xC3\x88\x46\x2E\xC2\x01" - "\x5E\xF6\x0C\x39\xF0\xC4\xA5\x82" - "\xCD\xE8\x31\xCC\x0A\x4C\xE4\x44" - "\x41\xA9\x82\x6F\x22\xA1\x23\x1A" - "\xA8\xE3\x16\xFD\x31\x5C\x27\x31" - "\xF1\x7F\x01\x63\xA3\xAF\x70\xA1" - "\xCF\x07\x57\x41\x67\xD0\xC4\x42" - "\xDB\x18\xC6\x4C\x4C\xE0\x3D\x9F" - "\x05\x07\xFB\x13\x7D\x4A\xCA\x5B" - "\xF0\xBF\x64\x7E\x05\xB1\x72\xEE" - "\x7C\x3B\xD4\xCD\x14\x03\xB2\x2C" - "\xD3\xA9\xEE\xFA\x17\xFC\x9C\xDF" - "\xC7\x75\x40\xFF\xAE\xAD\x1E\x59" - "\x2F\x30\x24\xFB\xAD\x6B\x10\xFA" - "\x6C\x9F\x5B\xE7\x25\xD5\xD0\x25" - "\xAC\x4A\x4B\xDA\xFC\x7A\x85\x1B" - "\x7E\x13\x06\x82\x08\x17\xA4\x35" - "\xEC\xC5\x8D\x63\x96\x81\x0A\x8F" - "\xA3\x05\x38\x95\x20\x1A\x47\x04" - "\x6F\x6D\xDA\x8F\xEF\xC1\x76\x35" - "\x6B\xC7\x4D\x0F\x94\x12\xCA\x3E" - "\x2E\xD5\x03\x2E\x86\x7E\xAA\x3B" - "\x37\x08\x1C\xCF\xBA\x5D\x71\x46" - "\x80\x72\xB0\x4C\x82\x0D\x60\x3C", - .ilen = 208, - .assoc = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" - "\xaa\xaa\xaa\xaa\x00\x00\x45\x67" - "\x89\xab\xcd\xef", - .alen = 20, - .result = "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff", - .rlen = 192, + .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" + "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf", + .klen = 16, + .iv = "\x01\x00\x00\x00\x0c\x0b\x0a\x09" + "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00", + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b", + .alen = 12, + .ptext = "\x0c\x0d\x0e\x0f\x10\x11\x12\x13" + "\x14\x15\x16\x17\x18\x19\x1a\x1b" + "\x1c\x1d\x1e", + .plen = 19, + .ctext = "\x07\x34\x25\x94\x15\x77\x85\x15" + "\x2b\x07\x40\x98\x33\x0a\xbb\x14" + "\x1b\x94\x7b\x56\x6a\xa9\x40\x6b" + "\x4d\x99\x99\x88\xdd", + .clen = 29, }, { - .key = "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA" - "\x90\x6A\xC7\x3C\x36\x13\xA6\x34" - "\x2E\x44\x3B\x68", - .klen = 20, - .iv = "\x49\x56\xED\x7E\x3B\x24\x4C\xFE", - .result = "\x45\x00\x00\x48\x69\x9A\x00\x00" - "\x80\x11\x4D\xB7\xC0\xA8\x01\x02" - "\xC0\xA8\x01\x01\x0A\x9B\xF1\x56" - "\x38\xD3\x01\x00\x00\x01\x00\x00" - "\x00\x00\x00\x00\x04\x5F\x73\x69" - "\x70\x04\x5F\x75\x64\x70\x03\x73" - "\x69\x70\x09\x63\x79\x62\x65\x72" - "\x63\x69\x74\x79\x02\x64\x6B\x00" - "\x00\x21\x00\x01\x01\x02\x02\x01", - .rlen = 72, - .assoc = "\x00\x00\x43\x21\x87\x65\x43\x21" - "\x00\x00\x00\x00\x49\x56\xED\x7E" - "\x3B\x24\x4C\xFE", - .alen = 20, - .input = "\xFE\xCF\x53\x7E\x72\x9D\x5B\x07" - "\xDC\x30\xDF\x52\x8D\xD2\x2B\x76" - "\x8D\x1B\x98\x73\x66\x96\xA6\xFD" - "\x34\x85\x09\xFA\x13\xCE\xAC\x34" - "\xCF\xA2\x43\x6F\x14\xA3\xF3\xCF" - "\x65\x92\x5B\xF1\xF4\xA1\x3C\x5D" - "\x15\xB2\x1E\x18\x84\xF5\xFF\x62" - "\x47\xAE\xAB\xB7\x86\xB9\x3B\xCE" - "\x61\xBC\x17\xD7\x68\xFD\x97\x32" - "\x45\x90\x18\x14\x8F\x6C\xBE\x72" - "\x2F\xD0\x47\x96\x56\x2D\xFD\xB4", - .ilen = 88, + .key = "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3" + "\x25\xa7\x62\x36\xdf\x93\xcc\x6b", + .klen = 16, + .iv = "\x01\x00\x33\x56\x8e\xf7\xb2\x63" + "\x3c\x96\x96\x76\x6c\xfa\x00\x00", + .assoc = "\x63\x01\x8f\x76\xdc\x8a\x1b\xcb", + .alen = 8, + .ptext = "\x90\x20\xea\x6f\x91\xbd\xd8\x5a" + "\xfa\x00\x39\xba\x4b\xaf\xf9\xbf" + "\xb7\x9c\x70\x28\x94\x9c\xd0\xec", + .plen = 24, + .ctext = "\x4c\xcb\x1e\x7c\xa9\x81\xbe\xfa" + "\xa0\x72\x6c\x55\xd3\x78\x06\x12" + "\x98\xc8\x5c\x92\x81\x4a\xbc\x33" + "\xc5\x2e\xe8\x1d\x7d\x77\xc0\x8a", + .clen = 32, }, { - .key = "\xFE\xFF\xE9\x92\x86\x65\x73\x1C" - "\x6D\x6A\x8F\x94\x67\x30\x83\x08" - "\xCA\xFE\xBA\xBE", - .klen = 20, - .iv = "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", - .result = "\x45\x00\x00\x3E\x69\x8F\x00\x00" - "\x80\x11\x4D\xCC\xC0\xA8\x01\x02" - "\xC0\xA8\x01\x01\x0A\x98\x00\x35" - "\x00\x2A\x23\x43\xB2\xD0\x01\x00" - "\x00\x01\x00\x00\x00\x00\x00\x00" - "\x03\x73\x69\x70\x09\x63\x79\x62" - "\x65\x72\x63\x69\x74\x79\x02\x64" - "\x6B\x00\x00\x01\x00\x01\x00\x01", - .rlen = 64, - .assoc = "\x00\x00\xA5\xF8\x00\x00\x00\x0A" - "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", - .alen = 16, - .input = "\xDE\xB2\x2C\xD9\xB0\x7C\x72\xC1" - "\x6E\x3A\x65\xBE\xEB\x8D\xF3\x04" - "\xA5\xA5\x89\x7D\x33\xAE\x53\x0F" - "\x1B\xA7\x6D\x5D\x11\x4D\x2A\x5C" - "\x3D\xE8\x18\x27\xC1\x0E\x9A\x4F" - "\x51\x33\x0D\x0E\xEC\x41\x66\x42" - "\xCF\xBB\x85\xA5\xB4\x7E\x48\xA4" - "\xEC\x3B\x9B\xA9\x5D\x91\x8B\xD1" - "\x83\xB7\x0D\x3A\xA8\xBC\x6E\xE4" - "\xC3\x09\xE9\xD8\x5A\x41\xAD\x4A", - .ilen = 80, + .key = "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3" + "\x25\xa7\x62\x36\xdf\x93\xcc\x6b", + .klen = 16, + .iv = "\x01\x00\xd5\x60\x91\x2d\x3f\x70" + "\x3c\x96\x96\x76\x6c\xfa\x00\x00", + .assoc = "\xcd\x90\x44\xd2\xb7\x1f\xdb\x81" + "\x20\xea\x60\xc0", + .alen = 12, + .ptext = "\x64\x35\xac\xba\xfb\x11\xa8\x2e" + "\x2f\x07\x1d\x7c\xa4\xa5\xeb\xd9" + "\x3a\x80\x3b\xa8\x7f", + .plen = 21, + .ctext = "\x00\x97\x69\xec\xab\xdf\x48\x62" + "\x55\x94\xc5\x92\x51\xe6\x03\x57" + "\x22\x67\x5e\x04\xc8\x47\x09\x9e" + "\x5a\xe0\x70\x45\x51", + .clen = 29, }, { - .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" - "\x34\x45\x56\x67\x78\x89\x9A\xAB" - "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" - "\x34\x45\x56\x67\x78\x89\x9A\xAB" - "\x11\x22\x33\x44", - .klen = 36, - .iv = "\x01\x02\x03\x04\x05\x06\x07\x08", - .result = "\x45\x00\x00\x30\x69\xA6\x40\x00" - "\x80\x06\x26\x90\xC0\xA8\x01\x02" - "\x93\x89\x15\x5E\x0A\x9E\x00\x8B" - "\x2D\xC5\x7E\xE0\x00\x00\x00\x00" - "\x70\x02\x40\x00\x20\xBF\x00\x00" - "\x02\x04\x05\xB4\x01\x01\x04\x02" - "\x01\x02\x02\x01", - .rlen = 52, - .assoc = "\x4A\x2C\xBF\xE3\x00\x00\x00\x02" - "\x01\x02\x03\x04\x05\x06\x07\x08", - .alen = 16, - .input = "\xFF\x42\x5C\x9B\x72\x45\x99\xDF" - "\x7A\x3B\xCD\x51\x01\x94\xE0\x0D" - "\x6A\x78\x10\x7F\x1B\x0B\x1C\xBF" - "\x06\xEF\xAE\x9D\x65\xA5\xD7\x63" - "\x74\x8A\x63\x79\x85\x77\x1D\x34" - "\x7F\x05\x45\x65\x9F\x14\xE9\x9D" - "\xEF\x84\x2D\x8E\xB3\x35\xF4\xEE" - "\xCF\xDB\xF8\x31\x82\x4B\x4C\x49" - "\x15\x95\x6C\x96", - .ilen = 68, + .key = "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3" + "\x25\xa7\x62\x36\xdf\x93\xcc\x6b", + .klen = 16, + .iv = "\x01\x00\x42\xff\xf8\xf1\x95\x1c" + "\x3c\x96\x96\x76\x6c\xfa\x00\x00", + .assoc = "\xd8\x5b\xc7\xe6\x9f\x94\x4f\xb8", + .alen = 8, + .ptext = "\x8a\x19\xb9\x50\xbc\xf7\x1a\x01" + "\x8e\x5e\x67\x01\xc9\x17\x87\x65" + "\x98\x09\xd6\x7d\xbe\xdd\x18", + .plen = 23, + .ctext = "\xbc\x21\x8d\xaa\x94\x74\x27\xb6" + "\xdb\x38\x6a\x99\xac\x1a\xef\x23" + "\xad\xe0\xb5\x29\x39\xcb\x6a\x63" + "\x7c\xf9\xbe\xc2\x40\x88\x97\xc6" + "\xba", + .clen = 33, }, { - .key = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00", - .klen = 20, - .iv = "\x00\x00\x00\x00\x00\x00\x00\x00", - .result = "\x45\x00\x00\x3C\x99\xC5\x00\x00" - "\x80\x01\xCB\x7A\x40\x67\x93\x18" - "\x01\x01\x01\x01\x08\x00\x07\x5C" - "\x02\x00\x44\x00\x61\x62\x63\x64" - "\x65\x66\x67\x68\x69\x6A\x6B\x6C" - "\x6D\x6E\x6F\x70\x71\x72\x73\x74" - "\x75\x76\x77\x61\x62\x63\x64\x65" - "\x66\x67\x68\x69\x01\x02\x02\x01", - .rlen = 64, - .assoc = "\x00\x00\x00\x00\x00\x00\x00\x01" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .alen = 16, - .input = "\x46\x88\xDA\xF2\xF9\x73\xA3\x92" - "\x73\x29\x09\xC3\x31\xD5\x6D\x60" - "\xF6\x94\xAB\xAA\x41\x4B\x5E\x7F" - "\xF5\xFD\xCD\xFF\xF5\xE9\xA2\x84" - "\x45\x64\x76\x49\x27\x19\xFF\xB6" - "\x4D\xE7\xD9\xDC\xA1\xE1\xD8\x94" - "\xBC\x3B\xD5\x78\x73\xED\x4D\x18" - "\x1D\x19\xD4\xD5\xC8\xC1\x8A\xF3" - "\xF8\x21\xD4\x96\xEE\xB0\x96\xE9" - "\x8A\xD2\xB6\x9E\x47\x99\xC7\x1D", - .ilen = 80, - }, { - .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" - "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" - "\x57\x69\x0E\x43", - .klen = 20, - .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", - .result = "\x45\x00\x00\x3C\x99\xC3\x00\x00" - "\x80\x01\xCB\x7C\x40\x67\x93\x18" - "\x01\x01\x01\x01\x08\x00\x08\x5C" - "\x02\x00\x43\x00\x61\x62\x63\x64" - "\x65\x66\x67\x68\x69\x6A\x6B\x6C" - "\x6D\x6E\x6F\x70\x71\x72\x73\x74" - "\x75\x76\x77\x61\x62\x63\x64\x65" - "\x66\x67\x68\x69\x01\x02\x02\x01", - .rlen = 64, - .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" - "\x10\x10\x10\x10\x4E\x28\x00\x00" - "\xA2\xFC\xA1\xA3", - .alen = 20, - .input = "\xFB\xA2\xCA\xA4\x85\x3C\xF9\xF0" - "\xF2\x2C\xB1\x0D\x86\xDD\x83\xB0" - "\xFE\xC7\x56\x91\xCF\x1A\x04\xB0" - "\x0D\x11\x38\xEC\x9C\x35\x79\x17" - "\x65\xAC\xBD\x87\x01\xAD\x79\x84" - "\x5B\xF9\xFE\x3F\xBA\x48\x7B\xC9" - "\x17\x55\xE6\x66\x2B\x4C\x8D\x0D" - "\x1F\x5E\x22\x73\x95\x30\x32\x0A" - "\xE0\xD7\x31\xCC\x97\x8E\xCA\xFA" - "\xEA\xE8\x8F\x00\xE8\x0D\x6E\x48", - .ilen = 80, + /* This is taken from FIPS CAVS. */ + .key = "\x83\xac\x54\x66\xc2\xeb\xe5\x05" + "\x2e\x01\xd1\xfc\x5d\x82\x66\x2e", + .klen = 16, + .iv = "\x03\x96\xac\x59\x30\x07\xa1\xe2\xa2\xc7\x55\x24\0\0\0\0", + .alen = 0, + .ptext = "\x19\xc8\x81\xf6\xe9\x86\xff\x93" + "\x0b\x78\x67\xe5\xbb\xb7\xfc\x6e" + "\x83\x77\xb3\xa6\x0c\x8c\x9f\x9c" + "\x35\x2e\xad\xe0\x62\xf9\x91\xa1", + .plen = 32, + .ctext = "\xab\x6f\xe1\x69\x1d\x19\x99\xa8" + "\x92\xa0\xc4\x6f\x7e\xe2\x8b\xb1" + "\x70\xbb\x8c\xa6\x4c\x6e\x97\x8a" + "\x57\x2b\xbe\x5d\x98\xa6\xb1\x32" + "\xda\x24\xea\xd9\xa1\x39\x98\xfd" + "\xa4\xbe\xd9\xf2\x1a\x6d\x22\xa8", + .clen = 48, }, { - .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" - "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" - "\x57\x69\x0E\x43", - .klen = 20, - .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", - .result = "\x45\x00\x00\x1C\x42\xA2\x00\x00" - "\x80\x01\x44\x1F\x40\x67\x93\xB6" - "\xE0\x00\x00\x02\x0A\x00\xF5\xFF" - "\x01\x02\x02\x01", - .rlen = 28, - .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" - "\x10\x10\x10\x10\x4E\x28\x00\x00" - "\xA2\xFC\xA1\xA3", - .alen = 20, - .input = "\xFB\xA2\xCA\x84\x5E\x5D\xF9\xF0" - "\xF2\x2C\x3E\x6E\x86\xDD\x83\x1E" - "\x1F\xC6\x57\x92\xCD\x1A\xF9\x13" - "\x0E\x13\x79\xED\x36\x9F\x07\x1F" - "\x35\xE0\x34\xBE\x95\xF1\x12\xE4" - "\xE7\xD0\x5D\x35", - .ilen = 44, + .key = "\x1e\x2c\x7e\x01\x41\x9a\xef\xc0" + "\x0d\x58\x96\x6e\x5c\xa2\x4b\xd3", + .klen = 16, + .iv = "\x03\x4f\xa3\x19\xd3\x01\x5a\xd8" + "\x30\x60\x15\x56\x00\x00\x00\x00", + .assoc = "\xda\xe6\x28\x9c\x45\x2d\xfd\x63" + "\x5e\xda\x4c\xb6\xe6\xfc\xf9\xb7" + "\x0c\x56\xcb\xe4\xe0\x05\x7a\xe1" + "\x0a\x63\x09\x78\xbc\x2c\x55\xde", + .alen = 32, + .ptext = "\x87\xa3\x36\xfd\x96\xb3\x93\x78" + "\xa9\x28\x63\xba\x12\xa3\x14\x85" + "\x57\x1e\x06\xc9\x7b\x21\xef\x76" + "\x7f\x38\x7e\x8e\x29\xa4\x3e\x7e", + .plen = 32, + .ctext = "\x8a\x1e\x11\xf0\x02\x6b\xe2\x19" + "\xfc\x70\xc4\x6d\x8e\xb7\x99\xab" + "\xc5\x4b\xa2\xac\xd3\xf3\x48\xff" + "\x3b\xb5\xce\x53\xef\xde\xbb\x02" + "\xa9\x86\x15\x6c\x13\xfe\xda\x0a" + "\x22\xb8\x29\x3d\xd8\x39\x9a\x23", + .clen = 48, }, { - .key = "\xFE\xFF\xE9\x92\x86\x65\x73\x1C" - "\x6D\x6A\x8F\x94\x67\x30\x83\x08" - "\xFE\xFF\xE9\x92\x86\x65\x73\x1C" - "\xCA\xFE\xBA\xBE", - .klen = 28, - .iv = "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", - .result = "\x45\x00\x00\x28\xA4\xAD\x40\x00" - "\x40\x06\x78\x80\x0A\x01\x03\x8F" - "\x0A\x01\x06\x12\x80\x23\x06\xB8" - "\xCB\x71\x26\x02\xDD\x6B\xB0\x3E" - "\x50\x10\x16\xD0\x75\x68\x00\x01", - .rlen = 40, - .assoc = "\x00\x00\xA5\xF8\x00\x00\x00\x0A" - "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", - .alen = 16, - .input = "\xA5\xB1\xF8\x06\x60\x29\xAE\xA4" - "\x0E\x59\x8B\x81\x22\xDE\x02\x42" - "\x09\x38\xB3\xAB\x33\xF8\x28\xE6" - "\x87\xB8\x85\x8B\x5B\xFB\xDB\xD0" - "\x31\x5B\x27\x45\x21\x44\xCC\x77" - "\x95\x45\x7B\x96\x52\x03\x7F\x53" - "\x18\x02\x7B\x5B\x4C\xD7\xA6\x36", - .ilen = 56, + .key = "\xf4\x6b\xc2\x75\x62\xfe\xb4\xe1" + "\xa3\xf0\xff\xdd\x4e\x4b\x12\x75" + "\x53\x14\x73\x66\x8d\x88\xf6\x80", + .klen = 24, + .iv = "\x03\xa0\x20\x35\x26\xf2\x21\x8d" + "\x50\x20\xda\xe2\x00\x00\x00\x00", + .assoc = "\x5b\x9e\x13\x67\x02\x5e\xef\xc1" + "\x6c\xf9\xd7\x1e\x52\x8f\x7a\x47" + "\xe9\xd4\xcf\x20\x14\x6e\xf0\x2d" + "\xd8\x9e\x2b\x56\x10\x23\x56\xe7", + .alen = 32, + .ctext = "\x36\xea\x7a\x70\x08\xdc\x6a\xbc" + "\xad\x0c\x7a\x63\xf6\x61\xfd\x9b", + .clen = 16, }, { - .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" - "\x34\x45\x56\x67\x78\x89\x9A\xAB" - "\xDE\xCA\xF8\x88", - .klen = 20, - .iv = "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74", - .result = "\x45\x00\x00\x49\x33\xBA\x00\x00" - "\x7F\x11\x91\x06\xC3\xFB\x1D\x10" - "\xC2\xB1\xD3\x26\xC0\x28\x31\xCE" - "\x00\x35\xDD\x7B\x80\x03\x02\xD5" - "\x00\x00\x4E\x20\x00\x1E\x8C\x18" - "\xD7\x5B\x81\xDC\x91\xBA\xA0\x47" - "\x6B\x91\xB9\x24\xB2\x80\x38\x9D" - "\x92\xC9\x63\xBA\xC0\x46\xEC\x95" - "\x9B\x62\x66\xC0\x47\x22\xB1\x49" - "\x23\x01\x01\x01", - .rlen = 76, - .assoc = "\x00\x00\x01\x00\x00\x00\x00\x00" - "\x00\x00\x00\x01\xCA\xFE\xDE\xBA" - "\xCE\xFA\xCE\x74", - .alen = 20, - .input = "\x18\xA6\xFD\x42\xF7\x2C\xBF\x4A" - "\xB2\xA2\xEA\x90\x1F\x73\xD8\x14" - "\xE3\xE7\xF2\x43\xD9\x54\x12\xE1" - "\xC3\x49\xC1\xD2\xFB\xEC\x16\x8F" - "\x91\x90\xFE\xEB\xAF\x2C\xB0\x19" - "\x84\xE6\x58\x63\x96\x5D\x74\x72" - "\xB7\x9D\xA3\x45\xE0\xE7\x80\x19" - "\x1F\x0D\x2F\x0E\x0F\x49\x6C\x22" - "\x6F\x21\x27\xB2\x7D\xB3\x57\x24" - "\xE7\x84\x5D\x68\x65\x1F\x57\xE6" - "\x5F\x35\x4F\x75\xFF\x17\x01\x57" - "\x69\x62\x34\x36", - .ilen = 92, + .key = "\x56\xdf\x5c\x8f\x26\x3f\x0e\x42" + "\xef\x7a\xd3\xce\xfc\x84\x60\x62" + "\xca\xb4\x40\xaf\x5f\xc9\xc9\x01", + .klen = 24, + .iv = "\x03\xd6\x3c\x8c\x86\x84\xb6\xcd" + "\xef\x09\x2e\x94\x00\x00\x00\x00", + .assoc = "\x02\x65\x78\x3c\xe9\x21\x30\x91" + "\xb1\xb9\xda\x76\x9a\x78\x6d\x95" + "\xf2\x88\x32\xa3\xf2\x50\xcb\x4c" + "\xe3\x00\x73\x69\x84\x69\x87\x79", + .alen = 32, + .ptext = "\x9f\xd2\x02\x4b\x52\x49\x31\x3c" + "\x43\x69\x3a\x2d\x8e\x70\xad\x7e" + "\xe0\xe5\x46\x09\x80\x89\x13\xb2" + "\x8c\x8b\xd9\x3f\x86\xfb\xb5\x6b", + .plen = 32, + .ctext = "\x39\xdf\x7c\x3c\x5a\x29\xb9\x62" + "\x5d\x51\xc2\x16\xd8\xbd\x06\x9f" + "\x9b\x6a\x09\x70\xc1\x51\x83\xc2" + "\x66\x88\x1d\x4f\x9a\xda\xe0\x1e" + "\xc7\x79\x11\x58\xe5\x6b\x20\x40" + "\x7a\xea\x46\x42\x8b\xe4\x6f\xe1", + .clen = 48, }, { - .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" - "\x34\x45\x56\x67\x78\x89\x9A\xAB" - "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" - "\x34\x45\x56\x67\x78\x89\x9A\xAB" - "\x73\x61\x6C\x74", - .klen = 36, - .iv = "\x61\x6E\x64\x01\x69\x76\x65\x63", - .result = "\x45\x08\x00\x28\x73\x2C\x00\x00" - "\x40\x06\xE9\xF9\x0A\x01\x06\x12" - "\x0A\x01\x03\x8F\x06\xB8\x80\x23" - "\xDD\x6B\xAF\xBE\xCB\x71\x26\x02" - "\x50\x10\x1F\x64\x6D\x54\x00\x01", - .rlen = 40, - .assoc = "\x17\x40\x5E\x67\x15\x6F\x31\x26" - "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01" - "\x69\x76\x65\x63", - .alen = 20, - .input = "\xF2\xD6\x9E\xCD\xBD\x5A\x0D\x5B" - "\x8D\x5E\xF3\x8B\xAD\x4D\xA5\x8D" - "\x1F\x27\x8F\xDE\x98\xEF\x67\x54" - "\x9D\x52\x4A\x30\x18\xD9\xA5\x7F" - "\xF4\xD3\xA3\x1C\xE6\x73\x11\x9E" - "\x45\x16\x26\xC2\x41\x57\x71\xE3" - "\xB7\xEE\xBC\xA6\x14\xC8\x9B\x35", - .ilen = 56, + .key = "\xe0\x8d\x99\x71\x60\xd7\x97\x1a" + "\xbd\x01\x99\xd5\x8a\xdf\x71\x3a" + "\xd3\xdf\x24\x4b\x5e\x3d\x4b\x4e" + "\x30\x7a\xb9\xd8\x53\x0a\x5e\x2b", + .klen = 32, + .iv = "\x03\x1e\x29\x91\xad\x8e\xc1\x53" + "\x0a\xcf\x2d\xbe\x00\x00\x00\x00", + .assoc = "\x19\xb6\x1f\x57\xc4\xf3\xf0\x8b" + "\x78\x2b\x94\x02\x29\x0f\x42\x27" + "\x6b\x75\xcb\x98\x34\x08\x7e\x79" + "\xe4\x3e\x49\x0d\x84\x8b\x22\x87", + .alen = 32, + .ptext = "\xe1\xd9\xd8\x13\xeb\x3a\x75\x3f" + "\x9d\xbd\x5f\x66\xbe\xdc\xbb\x66" + "\xbf\x17\x99\x62\x4a\x39\x27\x1f" + "\x1d\xdc\x24\xae\x19\x2f\x98\x4c", + .plen = 32, + .ctext = "\x19\xb8\x61\x33\x45\x2b\x43\x96" + "\x6f\x51\xd0\x20\x30\x7d\x9b\xc6" + "\x26\x3d\xf8\xc9\x65\x16\xa8\x9f" + "\xf0\x62\x17\x34\xf2\x1e\x8d\x75" + "\x4e\x13\xcc\xc0\xc3\x2a\x54\x2d", + .clen = 40, }, { - .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" - "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" - "\x57\x69\x0E\x43", - .klen = 20, - .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", - .result = "\x45\x00\x00\x49\x33\x3E\x00\x00" - "\x7F\x11\x91\x82\xC3\xFB\x1D\x10" - "\xC2\xB1\xD3\x26\xC0\x28\x31\xCE" - "\x00\x35\xCB\x45\x80\x03\x02\x5B" - "\x00\x00\x01\xE0\x00\x1E\x8C\x18" - "\xD6\x57\x59\xD5\x22\x84\xA0\x35" - "\x2C\x71\x47\x5C\x88\x80\x39\x1C" - "\x76\x4D\x6E\x5E\xE0\x49\x6B\x32" - "\x5A\xE2\x70\xC0\x38\x99\x49\x39" - "\x15\x01\x01\x01", - .rlen = 76, - .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" - "\x10\x10\x10\x10\x4E\x28\x00\x00" - "\xA2\xFC\xA1\xA3", - .alen = 20, - .input = "\xFB\xA2\xCA\xD1\x2F\xC1\xF9\xF0" - "\x0D\x3C\xEB\xF3\x05\x41\x0D\xB8" - "\x3D\x77\x84\xB6\x07\x32\x3D\x22" - "\x0F\x24\xB0\xA9\x7D\x54\x18\x28" - "\x00\xCA\xDB\x0F\x68\xD9\x9E\xF0" - "\xE0\xC0\xC8\x9A\xE9\xBE\xA8\x88" - "\x4E\x52\xD6\x5B\xC1\xAF\xD0\x74" - "\x0F\x74\x24\x44\x74\x7B\x5B\x39" - "\xAB\x53\x31\x63\xAA\xD4\x55\x0E" - "\xE5\x16\x09\x75\xCD\xB6\x08\xC5" - "\x76\x91\x89\x60\x97\x63\xB8\xE1" - "\x8C\xAA\x81\xE2", - .ilen = 92, + .key = "\x7c\xc8\x18\x3b\x8d\x99\xe0\x7c" + "\x45\x41\xb8\xbd\x5c\xa7\xc2\x32" + "\x8a\xb8\x02\x59\xa4\xfe\xa9\x2c" + "\x09\x75\x9a\x9b\x3c\x9b\x27\x39", + .klen = 32, + .iv = "\x03\xf9\xd9\x4e\x63\xb5\x3d\x9d" + "\x43\xf6\x1e\x50\0\0\0\0", + .assoc = "\x57\xf5\x6b\x8b\x57\x5c\x3d\x3b" + "\x13\x02\x01\x0c\x83\x4c\x96\x35" + "\x8e\xd6\x39\xcf\x7d\x14\x9b\x94" + "\xb0\x39\x36\xe6\x8f\x57\xe0\x13", + .alen = 32, + .ptext = "\x3b\x6c\x29\x36\xb6\xef\x07\xa6" + "\x83\x72\x07\x4f\xcf\xfa\x66\x89" + "\x5f\xca\xb1\xba\xd5\x8f\x2c\x27" + "\x30\xdb\x75\x09\x93\xd4\x65\xe4", + .plen = 32, + .ctext = "\xb0\x88\x5a\x33\xaa\xe5\xc7\x1d" + "\x85\x23\xc7\xc6\x2f\xf4\x1e\x3d" + "\xcc\x63\x44\x25\x07\x78\x4f\x9e" + "\x96\xb8\x88\xeb\xbc\x48\x1f\x06" + "\x39\xaf\x39\xac\xd8\x4a\x80\x39" + "\x7b\x72\x8a\xf7", + .clen = 44, }, { - .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" - "\x34\x45\x56\x67\x78\x89\x9A\xAB" - "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" - "\x34\x45\x56\x67\x78\x89\x9A\xAB" - "\x73\x61\x6C\x74", - .klen = 36, - .iv = "\x61\x6E\x64\x01\x69\x76\x65\x63", - .result = "\x63\x69\x73\x63\x6F\x01\x72\x75" - "\x6C\x65\x73\x01\x74\x68\x65\x01" - "\x6E\x65\x74\x77\x65\x01\x64\x65" - "\x66\x69\x6E\x65\x01\x74\x68\x65" - "\x74\x65\x63\x68\x6E\x6F\x6C\x6F" - "\x67\x69\x65\x73\x01\x74\x68\x61" - "\x74\x77\x69\x6C\x6C\x01\x64\x65" - "\x66\x69\x6E\x65\x74\x6F\x6D\x6F" - "\x72\x72\x6F\x77\x01\x02\x02\x01", - .rlen = 72, - .assoc = "\x17\x40\x5E\x67\x15\x6F\x31\x26" - "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01" - "\x69\x76\x65\x63", - .alen = 20, - .input = "\xD4\xB7\xED\x86\xA1\x77\x7F\x2E" - "\xA1\x3D\x69\x73\xD3\x24\xC6\x9E" - "\x7B\x43\xF8\x26\xFB\x56\x83\x12" - "\x26\x50\x8B\xEB\xD2\xDC\xEB\x18" - "\xD0\xA6\xDF\x10\xE5\x48\x7D\xF0" - "\x74\x11\x3E\x14\xC6\x41\x02\x4E" - "\x3E\x67\x73\xD9\x1A\x62\xEE\x42" - "\x9B\x04\x3A\x10\xE3\xEF\xE6\xB0" - "\x12\xA4\x93\x63\x41\x23\x64\xF8" - "\xC0\xCA\xC5\x87\xF2\x49\xE5\x6B" - "\x11\xE2\x4F\x30\xE4\x4C\xCC\x76", - .ilen = 88, - }, { - .key = "\x7D\x77\x3D\x00\xC1\x44\xC5\x25" - "\xAC\x61\x9D\x18\xC8\x4A\x3F\x47" - "\xD9\x66\x42\x67", - .klen = 20, - .iv = "\x43\x45\x7E\x91\x82\x44\x3B\xC6", - .result = "\x01\x02\x02\x01", - .rlen = 4, - .assoc = "\x33\x54\x67\xAE\xFF\xFF\xFF\xFF" - "\x43\x45\x7E\x91\x82\x44\x3B\xC6", - .alen = 16, - .input = "\x43\x7F\x86\x6B\xCB\x3F\x69\x9F" - "\xE9\xB0\x82\x2B\xAC\x96\x1C\x45" - "\x04\xBE\xF2\x70", - .ilen = 20, - }, { - .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" - "\x34\x45\x56\x67\x78\x89\x9A\xAB" - "\xDE\xCA\xF8\x88", - .klen = 20, - .iv = "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74", - .result = "\x74\x6F\x01\x62\x65\x01\x6F\x72" - "\x01\x6E\x6F\x74\x01\x74\x6F\x01" - "\x62\x65\x00\x01", - .rlen = 20, - .assoc = "\x00\x00\x01\x00\x00\x00\x00\x00" - "\x00\x00\x00\x01\xCA\xFE\xDE\xBA" - "\xCE\xFA\xCE\x74", - .alen = 20, - .input = "\x29\xC9\xFC\x69\xA1\x97\xD0\x38" - "\xCC\xDD\x14\xE2\xDD\xFC\xAA\x05" - "\x43\x33\x21\x64\x41\x25\x03\x52" - "\x43\x03\xED\x3C\x6C\x5F\x28\x38" - "\x43\xAF\x8C\x3E", - .ilen = 36, - }, { - .key = "\x6C\x65\x67\x61\x6C\x69\x7A\x65" - "\x6D\x61\x72\x69\x6A\x75\x61\x6E" - "\x61\x61\x6E\x64\x64\x6F\x69\x74" - "\x62\x65\x66\x6F\x72\x65\x69\x61" - "\x74\x75\x72\x6E", - .klen = 36, - .iv = "\x33\x30\x21\x69\x67\x65\x74\x6D", - .result = "\x45\x00\x00\x30\xDA\x3A\x00\x00" - "\x80\x01\xDF\x3B\xC0\xA8\x00\x05" - "\xC0\xA8\x00\x01\x08\x00\xC6\xCD" - "\x02\x00\x07\x00\x61\x62\x63\x64" - "\x65\x66\x67\x68\x69\x6A\x6B\x6C" - "\x6D\x6E\x6F\x70\x71\x72\x73\x74" - "\x01\x02\x02\x01", - .rlen = 52, - .assoc = "\x79\x6B\x69\x63\xFF\xFF\xFF\xFF" - "\xFF\xFF\xFF\xFF\x33\x30\x21\x69" - "\x67\x65\x74\x6D", - .alen = 20, - .input = "\xF9\x7A\xB2\xAA\x35\x6D\x8E\xDC" - "\xE1\x76\x44\xAC\x8C\x78\xE2\x5D" - "\xD2\x4D\xED\xBB\x29\xEB\xF1\xB6" - "\x4A\x27\x4B\x39\xB4\x9C\x3A\x86" - "\x4C\xD3\xD7\x8C\xA4\xAE\x68\xA3" - "\x2B\x42\x45\x8F\xB5\x7D\xBE\x82" - "\x1D\xCC\x63\xB9\xD0\x93\x7B\xA2" - "\x94\x5F\x66\x93\x68\x66\x1A\x32" - "\x9F\xB4\xC0\x53", - .ilen = 68, - }, { - .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" - "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" - "\x57\x69\x0E\x43", - .klen = 20, - .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", - .result = "\x45\x00\x00\x30\xDA\x3A\x00\x00" - "\x80\x01\xDF\x3B\xC0\xA8\x00\x05" - "\xC0\xA8\x00\x01\x08\x00\xC6\xCD" - "\x02\x00\x07\x00\x61\x62\x63\x64" - "\x65\x66\x67\x68\x69\x6A\x6B\x6C" - "\x6D\x6E\x6F\x70\x71\x72\x73\x74" - "\x01\x02\x02\x01", - .rlen = 52, - .assoc = "\x3F\x7E\xF6\x42\x10\x10\x10\x10" - "\x10\x10\x10\x10\x4E\x28\x00\x00" - "\xA2\xFC\xA1\xA3", - .alen = 20, - .input = "\xFB\xA2\xCA\xA8\xC6\xC5\xF9\xF0" - "\xF2\x2C\xA5\x4A\x06\x12\x10\xAD" - "\x3F\x6E\x57\x91\xCF\x1A\xCA\x21" - "\x0D\x11\x7C\xEC\x9C\x35\x79\x17" - "\x65\xAC\xBD\x87\x01\xAD\x79\x84" - "\x5B\xF9\xFE\x3F\xBA\x48\x7B\xC9" - "\x63\x21\x93\x06\x84\xEE\xCA\xDB" - "\x56\x91\x25\x46\xE7\xA9\x5C\x97" - "\x40\xD7\xCB\x05", - .ilen = 68, - }, { - .key = "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA" - "\x90\x6A\xC7\x3C\x36\x13\xA6\x34" - "\x22\x43\x3C\x64", - .klen = 20, - .iv = "\x48\x55\xEC\x7D\x3A\x23\x4B\xFD", - .result = "\x08\x00\xC6\xCD\x02\x00\x07\x00" - "\x61\x62\x63\x64\x65\x66\x67\x68" - "\x69\x6A\x6B\x6C\x6D\x6E\x6F\x70" - "\x71\x72\x73\x74\x01\x02\x02\x01", - .rlen = 32, - .assoc = "\x00\x00\x43\x21\x87\x65\x43\x21" - "\x00\x00\x00\x07\x48\x55\xEC\x7D" - "\x3A\x23\x4B\xFD", - .alen = 20, - .input = "\x74\x75\x2E\x8A\xEB\x5D\x87\x3C" - "\xD7\xC0\xF4\xAC\xC3\x6C\x4B\xFF" - "\x84\xB7\xD7\xB9\x8F\x0C\xA8\xB6" - "\xAC\xDA\x68\x94\xBC\x61\x90\x69" - "\xEF\x9C\xBC\x28\xFE\x1B\x56\xA7" - "\xC4\xE0\xD5\x8C\x86\xCD\x2B\xC0", - .ilen = 48, - } -}; - -static const struct aead_testvec aes_gcm_rfc4543_enc_tv_template[] = { - { /* From draft-mcgrew-gcm-test-01 */ - .key = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda" - "\x90\x6a\xc7\x3c\x36\x13\xa6\x34" - "\x22\x43\x3c\x64", - .klen = 20, - .iv = zeroed_string, - .assoc = "\x00\x00\x43\x21\x00\x00\x00\x07" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .alen = 16, - .input = "\x45\x00\x00\x30\xda\x3a\x00\x00" - "\x80\x01\xdf\x3b\xc0\xa8\x00\x05" - "\xc0\xa8\x00\x01\x08\x00\xc6\xcd" - "\x02\x00\x07\x00\x61\x62\x63\x64" - "\x65\x66\x67\x68\x69\x6a\x6b\x6c" - "\x6d\x6e\x6f\x70\x71\x72\x73\x74" - "\x01\x02\x02\x01", - .ilen = 52, - .result = "\x45\x00\x00\x30\xda\x3a\x00\x00" - "\x80\x01\xdf\x3b\xc0\xa8\x00\x05" - "\xc0\xa8\x00\x01\x08\x00\xc6\xcd" - "\x02\x00\x07\x00\x61\x62\x63\x64" - "\x65\x66\x67\x68\x69\x6a\x6b\x6c" - "\x6d\x6e\x6f\x70\x71\x72\x73\x74" - "\x01\x02\x02\x01\xf2\xa9\xa8\x36" - "\xe1\x55\x10\x6a\xa8\xdc\xd6\x18" - "\xe4\x09\x9a\xaa", - .rlen = 68, - } -}; - -static const struct aead_testvec aes_gcm_rfc4543_dec_tv_template[] = { - { /* From draft-mcgrew-gcm-test-01 */ - .key = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda" - "\x90\x6a\xc7\x3c\x36\x13\xa6\x34" - "\x22\x43\x3c\x64", - .klen = 20, - .iv = zeroed_string, - .assoc = "\x00\x00\x43\x21\x00\x00\x00\x07" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .alen = 16, - .input = "\x45\x00\x00\x30\xda\x3a\x00\x00" - "\x80\x01\xdf\x3b\xc0\xa8\x00\x05" - "\xc0\xa8\x00\x01\x08\x00\xc6\xcd" - "\x02\x00\x07\x00\x61\x62\x63\x64" - "\x65\x66\x67\x68\x69\x6a\x6b\x6c" - "\x6d\x6e\x6f\x70\x71\x72\x73\x74" - "\x01\x02\x02\x01\xf2\xa9\xa8\x36" - "\xe1\x55\x10\x6a\xa8\xdc\xd6\x18" - "\xe4\x09\x9a\xaa", - .ilen = 68, - .result = "\x45\x00\x00\x30\xda\x3a\x00\x00" - "\x80\x01\xdf\x3b\xc0\xa8\x00\x05" - "\xc0\xa8\x00\x01\x08\x00\xc6\xcd" - "\x02\x00\x07\x00\x61\x62\x63\x64" - "\x65\x66\x67\x68\x69\x6a\x6b\x6c" - "\x6d\x6e\x6f\x70\x71\x72\x73\x74" - "\x01\x02\x02\x01", - .rlen = 52, - }, { /* nearly same as previous, but should fail */ - .key = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda" - "\x90\x6a\xc7\x3c\x36\x13\xa6\x34" - "\x22\x43\x3c\x64", - .klen = 20, - .iv = zeroed_string, - .assoc = "\x00\x00\x43\x21\x00\x00\x00\x07" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .alen = 16, - .input = "\x45\x00\x00\x30\xda\x3a\x00\x00" - "\x80\x01\xdf\x3b\xc0\xa8\x00\x05" - "\xc0\xa8\x00\x01\x08\x00\xc6\xcd" - "\x02\x00\x07\x00\x61\x62\x63\x64" - "\x65\x66\x67\x68\x69\x6a\x6b\x6c" - "\x6d\x6e\x6f\x70\x71\x72\x73\x74" - "\x01\x02\x02\x01\xf2\xa9\xa8\x36" - "\xe1\x55\x10\x6a\xa8\xdc\xd6\x18" - "\x00\x00\x00\x00", - .ilen = 68, - .novrfy = 1, - .result = "\x45\x00\x00\x30\xda\x3a\x00\x00" - "\x80\x01\xdf\x3b\xc0\xa8\x00\x05" - "\xc0\xa8\x00\x01\x08\x00\xc6\xcd" - "\x02\x00\x07\x00\x61\x62\x63\x64" - "\x65\x66\x67\x68\x69\x6a\x6b\x6c" - "\x6d\x6e\x6f\x70\x71\x72\x73\x74" - "\x01\x02\x02\x01", - .rlen = 52, - }, -}; - -static const struct aead_testvec aes_ccm_enc_tv_template[] = { - { /* From RFC 3610 */ - .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" - "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf", - .klen = 16, - .iv = "\x01\x00\x00\x00\x03\x02\x01\x00" - "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00", - .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07", - .alen = 8, - .input = "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" - "\x10\x11\x12\x13\x14\x15\x16\x17" - "\x18\x19\x1a\x1b\x1c\x1d\x1e", - .ilen = 23, - .result = "\x58\x8c\x97\x9a\x61\xc6\x63\xd2" - "\xf0\x66\xd0\xc2\xc0\xf9\x89\x80" - "\x6d\x5f\x6b\x61\xda\xc3\x84\x17" - "\xe8\xd1\x2c\xfd\xf9\x26\xe0", - .rlen = 31, - }, { - .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" - "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf", - .klen = 16, - .iv = "\x01\x00\x00\x00\x07\x06\x05\x04" - "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00", - .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b", - .alen = 12, - .input = "\x0c\x0d\x0e\x0f\x10\x11\x12\x13" - "\x14\x15\x16\x17\x18\x19\x1a\x1b" - "\x1c\x1d\x1e\x1f", - .ilen = 20, - .result = "\xdc\xf1\xfb\x7b\x5d\x9e\x23\xfb" - "\x9d\x4e\x13\x12\x53\x65\x8a\xd8" - "\x6e\xbd\xca\x3e\x51\xe8\x3f\x07" - "\x7d\x9c\x2d\x93", - .rlen = 28, - }, { - .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" - "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf", - .klen = 16, - .iv = "\x01\x00\x00\x00\x0b\x0a\x09\x08" - "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00", - .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07", - .alen = 8, - .input = "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" - "\x10\x11\x12\x13\x14\x15\x16\x17" - "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" - "\x20", - .ilen = 25, - .result = "\x82\x53\x1a\x60\xcc\x24\x94\x5a" - "\x4b\x82\x79\x18\x1a\xb5\xc8\x4d" - "\xf2\x1c\xe7\xf9\xb7\x3f\x42\xe1" - "\x97\xea\x9c\x07\xe5\x6b\x5e\xb1" - "\x7e\x5f\x4e", - .rlen = 35, - }, { - .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" - "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf", - .klen = 16, - .iv = "\x01\x00\x00\x00\x0c\x0b\x0a\x09" - "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00", - .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b", - .alen = 12, - .input = "\x0c\x0d\x0e\x0f\x10\x11\x12\x13" - "\x14\x15\x16\x17\x18\x19\x1a\x1b" - "\x1c\x1d\x1e", - .ilen = 19, - .result = "\x07\x34\x25\x94\x15\x77\x85\x15" - "\x2b\x07\x40\x98\x33\x0a\xbb\x14" - "\x1b\x94\x7b\x56\x6a\xa9\x40\x6b" - "\x4d\x99\x99\x88\xdd", - .rlen = 29, - }, { - .key = "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3" - "\x25\xa7\x62\x36\xdf\x93\xcc\x6b", - .klen = 16, - .iv = "\x01\x00\x33\x56\x8e\xf7\xb2\x63" - "\x3c\x96\x96\x76\x6c\xfa\x00\x00", - .assoc = "\x63\x01\x8f\x76\xdc\x8a\x1b\xcb", - .alen = 8, - .input = "\x90\x20\xea\x6f\x91\xbd\xd8\x5a" - "\xfa\x00\x39\xba\x4b\xaf\xf9\xbf" - "\xb7\x9c\x70\x28\x94\x9c\xd0\xec", - .ilen = 24, - .result = "\x4c\xcb\x1e\x7c\xa9\x81\xbe\xfa" - "\xa0\x72\x6c\x55\xd3\x78\x06\x12" - "\x98\xc8\x5c\x92\x81\x4a\xbc\x33" - "\xc5\x2e\xe8\x1d\x7d\x77\xc0\x8a", - .rlen = 32, - }, { - .key = "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3" - "\x25\xa7\x62\x36\xdf\x93\xcc\x6b", - .klen = 16, - .iv = "\x01\x00\xd5\x60\x91\x2d\x3f\x70" - "\x3c\x96\x96\x76\x6c\xfa\x00\x00", - .assoc = "\xcd\x90\x44\xd2\xb7\x1f\xdb\x81" - "\x20\xea\x60\xc0", - .alen = 12, - .input = "\x64\x35\xac\xba\xfb\x11\xa8\x2e" - "\x2f\x07\x1d\x7c\xa4\xa5\xeb\xd9" - "\x3a\x80\x3b\xa8\x7f", - .ilen = 21, - .result = "\x00\x97\x69\xec\xab\xdf\x48\x62" - "\x55\x94\xc5\x92\x51\xe6\x03\x57" - "\x22\x67\x5e\x04\xc8\x47\x09\x9e" - "\x5a\xe0\x70\x45\x51", - .rlen = 29, - }, { - .key = "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3" - "\x25\xa7\x62\x36\xdf\x93\xcc\x6b", - .klen = 16, - .iv = "\x01\x00\x42\xff\xf8\xf1\x95\x1c" - "\x3c\x96\x96\x76\x6c\xfa\x00\x00", - .assoc = "\xd8\x5b\xc7\xe6\x9f\x94\x4f\xb8", - .alen = 8, - .input = "\x8a\x19\xb9\x50\xbc\xf7\x1a\x01" - "\x8e\x5e\x67\x01\xc9\x17\x87\x65" - "\x98\x09\xd6\x7d\xbe\xdd\x18", - .ilen = 23, - .result = "\xbc\x21\x8d\xaa\x94\x74\x27\xb6" - "\xdb\x38\x6a\x99\xac\x1a\xef\x23" - "\xad\xe0\xb5\x29\x39\xcb\x6a\x63" - "\x7c\xf9\xbe\xc2\x40\x88\x97\xc6" - "\xba", - .rlen = 33, - }, { - /* This is taken from FIPS CAVS. */ - .key = "\x83\xac\x54\x66\xc2\xeb\xe5\x05" - "\x2e\x01\xd1\xfc\x5d\x82\x66\x2e", - .klen = 16, - .iv = "\x03\x96\xac\x59\x30\x07\xa1\xe2\xa2\xc7\x55\x24\0\0\0\0", - .alen = 0, - .input = "\x19\xc8\x81\xf6\xe9\x86\xff\x93" - "\x0b\x78\x67\xe5\xbb\xb7\xfc\x6e" - "\x83\x77\xb3\xa6\x0c\x8c\x9f\x9c" - "\x35\x2e\xad\xe0\x62\xf9\x91\xa1", - .ilen = 32, - .result = "\xab\x6f\xe1\x69\x1d\x19\x99\xa8" - "\x92\xa0\xc4\x6f\x7e\xe2\x8b\xb1" - "\x70\xbb\x8c\xa6\x4c\x6e\x97\x8a" - "\x57\x2b\xbe\x5d\x98\xa6\xb1\x32" - "\xda\x24\xea\xd9\xa1\x39\x98\xfd" - "\xa4\xbe\xd9\xf2\x1a\x6d\x22\xa8", - .rlen = 48, - }, { - .key = "\x1e\x2c\x7e\x01\x41\x9a\xef\xc0" - "\x0d\x58\x96\x6e\x5c\xa2\x4b\xd3", - .klen = 16, - .iv = "\x03\x4f\xa3\x19\xd3\x01\x5a\xd8" - "\x30\x60\x15\x56\x00\x00\x00\x00", - .assoc = "\xda\xe6\x28\x9c\x45\x2d\xfd\x63" - "\x5e\xda\x4c\xb6\xe6\xfc\xf9\xb7" - "\x0c\x56\xcb\xe4\xe0\x05\x7a\xe1" - "\x0a\x63\x09\x78\xbc\x2c\x55\xde", - .alen = 32, - .input = "\x87\xa3\x36\xfd\x96\xb3\x93\x78" - "\xa9\x28\x63\xba\x12\xa3\x14\x85" - "\x57\x1e\x06\xc9\x7b\x21\xef\x76" - "\x7f\x38\x7e\x8e\x29\xa4\x3e\x7e", - .ilen = 32, - .result = "\x8a\x1e\x11\xf0\x02\x6b\xe2\x19" - "\xfc\x70\xc4\x6d\x8e\xb7\x99\xab" - "\xc5\x4b\xa2\xac\xd3\xf3\x48\xff" - "\x3b\xb5\xce\x53\xef\xde\xbb\x02" - "\xa9\x86\x15\x6c\x13\xfe\xda\x0a" - "\x22\xb8\x29\x3d\xd8\x39\x9a\x23", - .rlen = 48, - }, { - .key = "\xf4\x6b\xc2\x75\x62\xfe\xb4\xe1" - "\xa3\xf0\xff\xdd\x4e\x4b\x12\x75" - "\x53\x14\x73\x66\x8d\x88\xf6\x80", - .klen = 24, - .iv = "\x03\xa0\x20\x35\x26\xf2\x21\x8d" - "\x50\x20\xda\xe2\x00\x00\x00\x00", - .assoc = "\x5b\x9e\x13\x67\x02\x5e\xef\xc1" - "\x6c\xf9\xd7\x1e\x52\x8f\x7a\x47" - "\xe9\xd4\xcf\x20\x14\x6e\xf0\x2d" - "\xd8\x9e\x2b\x56\x10\x23\x56\xe7", - .alen = 32, - .result = "\x36\xea\x7a\x70\x08\xdc\x6a\xbc" - "\xad\x0c\x7a\x63\xf6\x61\xfd\x9b", - .rlen = 16, - }, { - .key = "\x56\xdf\x5c\x8f\x26\x3f\x0e\x42" - "\xef\x7a\xd3\xce\xfc\x84\x60\x62" - "\xca\xb4\x40\xaf\x5f\xc9\xc9\x01", - .klen = 24, - .iv = "\x03\xd6\x3c\x8c\x86\x84\xb6\xcd" - "\xef\x09\x2e\x94\x00\x00\x00\x00", - .assoc = "\x02\x65\x78\x3c\xe9\x21\x30\x91" - "\xb1\xb9\xda\x76\x9a\x78\x6d\x95" - "\xf2\x88\x32\xa3\xf2\x50\xcb\x4c" - "\xe3\x00\x73\x69\x84\x69\x87\x79", - .alen = 32, - .input = "\x9f\xd2\x02\x4b\x52\x49\x31\x3c" - "\x43\x69\x3a\x2d\x8e\x70\xad\x7e" - "\xe0\xe5\x46\x09\x80\x89\x13\xb2" - "\x8c\x8b\xd9\x3f\x86\xfb\xb5\x6b", - .ilen = 32, - .result = "\x39\xdf\x7c\x3c\x5a\x29\xb9\x62" - "\x5d\x51\xc2\x16\xd8\xbd\x06\x9f" - "\x9b\x6a\x09\x70\xc1\x51\x83\xc2" - "\x66\x88\x1d\x4f\x9a\xda\xe0\x1e" - "\xc7\x79\x11\x58\xe5\x6b\x20\x40" - "\x7a\xea\x46\x42\x8b\xe4\x6f\xe1", - .rlen = 48, - }, { - .key = "\xe0\x8d\x99\x71\x60\xd7\x97\x1a" - "\xbd\x01\x99\xd5\x8a\xdf\x71\x3a" - "\xd3\xdf\x24\x4b\x5e\x3d\x4b\x4e" - "\x30\x7a\xb9\xd8\x53\x0a\x5e\x2b", - .klen = 32, - .iv = "\x03\x1e\x29\x91\xad\x8e\xc1\x53" - "\x0a\xcf\x2d\xbe\x00\x00\x00\x00", - .assoc = "\x19\xb6\x1f\x57\xc4\xf3\xf0\x8b" - "\x78\x2b\x94\x02\x29\x0f\x42\x27" - "\x6b\x75\xcb\x98\x34\x08\x7e\x79" - "\xe4\x3e\x49\x0d\x84\x8b\x22\x87", - .alen = 32, - .input = "\xe1\xd9\xd8\x13\xeb\x3a\x75\x3f" - "\x9d\xbd\x5f\x66\xbe\xdc\xbb\x66" - "\xbf\x17\x99\x62\x4a\x39\x27\x1f" - "\x1d\xdc\x24\xae\x19\x2f\x98\x4c", - .ilen = 32, - .result = "\x19\xb8\x61\x33\x45\x2b\x43\x96" - "\x6f\x51\xd0\x20\x30\x7d\x9b\xc6" - "\x26\x3d\xf8\xc9\x65\x16\xa8\x9f" - "\xf0\x62\x17\x34\xf2\x1e\x8d\x75" - "\x4e\x13\xcc\xc0\xc3\x2a\x54\x2d", - .rlen = 40, - }, { - .key = "\x7c\xc8\x18\x3b\x8d\x99\xe0\x7c" - "\x45\x41\xb8\xbd\x5c\xa7\xc2\x32" - "\x8a\xb8\x02\x59\xa4\xfe\xa9\x2c" - "\x09\x75\x9a\x9b\x3c\x9b\x27\x39", - .klen = 32, - .iv = "\x03\xf9\xd9\x4e\x63\xb5\x3d\x9d" - "\x43\xf6\x1e\x50\0\0\0\0", - .assoc = "\x57\xf5\x6b\x8b\x57\x5c\x3d\x3b" - "\x13\x02\x01\x0c\x83\x4c\x96\x35" - "\x8e\xd6\x39\xcf\x7d\x14\x9b\x94" - "\xb0\x39\x36\xe6\x8f\x57\xe0\x13", - .alen = 32, - .input = "\x3b\x6c\x29\x36\xb6\xef\x07\xa6" - "\x83\x72\x07\x4f\xcf\xfa\x66\x89" - "\x5f\xca\xb1\xba\xd5\x8f\x2c\x27" - "\x30\xdb\x75\x09\x93\xd4\x65\xe4", - .ilen = 32, - .result = "\xb0\x88\x5a\x33\xaa\xe5\xc7\x1d" - "\x85\x23\xc7\xc6\x2f\xf4\x1e\x3d" - "\xcc\x63\x44\x25\x07\x78\x4f\x9e" - "\x96\xb8\x88\xeb\xbc\x48\x1f\x06" - "\x39\xaf\x39\xac\xd8\x4a\x80\x39" - "\x7b\x72\x8a\xf7", - .rlen = 44, - }, { - .key = "\xab\xd0\xe9\x33\x07\x26\xe5\x83" - "\x8c\x76\x95\xd4\xb6\xdc\xf3\x46" - "\xf9\x8f\xad\xe3\x02\x13\x83\x77" - "\x3f\xb0\xf1\xa1\xa1\x22\x0f\x2b", - .klen = 32, - .iv = "\x03\x24\xa7\x8b\x07\xcb\xcc\x0e" - "\xe6\x33\xbf\xf5\x00\x00\x00\x00", - .assoc = "\xd4\xdb\x30\x1d\x03\xfe\xfd\x5f" - "\x87\xd4\x8c\xb6\xb6\xf1\x7a\x5d" - "\xab\x90\x65\x8d\x8e\xca\x4d\x4f" - "\x16\x0c\x40\x90\x4b\xc7\x36\x73", - .alen = 32, - .input = "\xf5\xc6\x7d\x48\xc1\xb7\xe6\x92" - "\x97\x5a\xca\xc4\xa9\x6d\xf9\x3d" - "\x6c\xde\xbc\xf1\x90\xea\x6a\xb2" - "\x35\x86\x36\xaf\x5c\xfe\x4b\x3a", - .ilen = 32, - .result = "\x83\x6f\x40\x87\x72\xcf\xc1\x13" - "\xef\xbb\x80\x21\x04\x6c\x58\x09" - "\x07\x1b\xfc\xdf\xc0\x3f\x5b\xc7" - "\xe0\x79\xa8\x6e\x71\x7c\x3f\xcf" - "\x5c\xda\xb2\x33\xe5\x13\xe2\x0d" - "\x74\xd1\xef\xb5\x0f\x3a\xb5\xf8", - .rlen = 48, - } -}; - -static const struct aead_testvec aes_ccm_dec_tv_template[] = { - { /* From RFC 3610 */ - .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" - "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf", - .klen = 16, - .iv = "\x01\x00\x00\x00\x03\x02\x01\x00" - "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00", - .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07", - .alen = 8, - .input = "\x58\x8c\x97\x9a\x61\xc6\x63\xd2" - "\xf0\x66\xd0\xc2\xc0\xf9\x89\x80" - "\x6d\x5f\x6b\x61\xda\xc3\x84\x17" - "\xe8\xd1\x2c\xfd\xf9\x26\xe0", - .ilen = 31, - .result = "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" - "\x10\x11\x12\x13\x14\x15\x16\x17" - "\x18\x19\x1a\x1b\x1c\x1d\x1e", - .rlen = 23, - }, { - .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" - "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf", - .klen = 16, - .iv = "\x01\x00\x00\x00\x07\x06\x05\x04" - "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00", - .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b", - .alen = 12, - .input = "\xdc\xf1\xfb\x7b\x5d\x9e\x23\xfb" - "\x9d\x4e\x13\x12\x53\x65\x8a\xd8" - "\x6e\xbd\xca\x3e\x51\xe8\x3f\x07" - "\x7d\x9c\x2d\x93", - .ilen = 28, - .result = "\x0c\x0d\x0e\x0f\x10\x11\x12\x13" - "\x14\x15\x16\x17\x18\x19\x1a\x1b" - "\x1c\x1d\x1e\x1f", - .rlen = 20, - }, { - .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" - "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf", - .klen = 16, - .iv = "\x01\x00\x00\x00\x0b\x0a\x09\x08" - "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00", - .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07", - .alen = 8, - .input = "\x82\x53\x1a\x60\xcc\x24\x94\x5a" - "\x4b\x82\x79\x18\x1a\xb5\xc8\x4d" - "\xf2\x1c\xe7\xf9\xb7\x3f\x42\xe1" - "\x97\xea\x9c\x07\xe5\x6b\x5e\xb1" - "\x7e\x5f\x4e", - .ilen = 35, - .result = "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" - "\x10\x11\x12\x13\x14\x15\x16\x17" - "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" - "\x20", - .rlen = 25, - }, { - .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" - "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf", - .klen = 16, - .iv = "\x01\x00\x00\x00\x0c\x0b\x0a\x09" - "\xa0\xa1\xa2\xa3\xa4\xa5\x00\x00", - .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b", - .alen = 12, - .input = "\x07\x34\x25\x94\x15\x77\x85\x15" - "\x2b\x07\x40\x98\x33\x0a\xbb\x14" - "\x1b\x94\x7b\x56\x6a\xa9\x40\x6b" - "\x4d\x99\x99\x88\xdd", - .ilen = 29, - .result = "\x0c\x0d\x0e\x0f\x10\x11\x12\x13" - "\x14\x15\x16\x17\x18\x19\x1a\x1b" - "\x1c\x1d\x1e", - .rlen = 19, - }, { - .key = "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3" - "\x25\xa7\x62\x36\xdf\x93\xcc\x6b", - .klen = 16, - .iv = "\x01\x00\x33\x56\x8e\xf7\xb2\x63" - "\x3c\x96\x96\x76\x6c\xfa\x00\x00", - .assoc = "\x63\x01\x8f\x76\xdc\x8a\x1b\xcb", - .alen = 8, - .input = "\x4c\xcb\x1e\x7c\xa9\x81\xbe\xfa" - "\xa0\x72\x6c\x55\xd3\x78\x06\x12" - "\x98\xc8\x5c\x92\x81\x4a\xbc\x33" - "\xc5\x2e\xe8\x1d\x7d\x77\xc0\x8a", - .ilen = 32, - .result = "\x90\x20\xea\x6f\x91\xbd\xd8\x5a" - "\xfa\x00\x39\xba\x4b\xaf\xf9\xbf" - "\xb7\x9c\x70\x28\x94\x9c\xd0\xec", - .rlen = 24, - }, { - .key = "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3" - "\x25\xa7\x62\x36\xdf\x93\xcc\x6b", - .klen = 16, - .iv = "\x01\x00\xd5\x60\x91\x2d\x3f\x70" - "\x3c\x96\x96\x76\x6c\xfa\x00\x00", - .assoc = "\xcd\x90\x44\xd2\xb7\x1f\xdb\x81" - "\x20\xea\x60\xc0", - .alen = 12, - .input = "\x00\x97\x69\xec\xab\xdf\x48\x62" - "\x55\x94\xc5\x92\x51\xe6\x03\x57" - "\x22\x67\x5e\x04\xc8\x47\x09\x9e" - "\x5a\xe0\x70\x45\x51", - .ilen = 29, - .result = "\x64\x35\xac\xba\xfb\x11\xa8\x2e" - "\x2f\x07\x1d\x7c\xa4\xa5\xeb\xd9" - "\x3a\x80\x3b\xa8\x7f", - .rlen = 21, - }, { - .key = "\xd7\x82\x8d\x13\xb2\xb0\xbd\xc3" - "\x25\xa7\x62\x36\xdf\x93\xcc\x6b", - .klen = 16, - .iv = "\x01\x00\x42\xff\xf8\xf1\x95\x1c" - "\x3c\x96\x96\x76\x6c\xfa\x00\x00", - .assoc = "\xd8\x5b\xc7\xe6\x9f\x94\x4f\xb8", - .alen = 8, - .input = "\xbc\x21\x8d\xaa\x94\x74\x27\xb6" - "\xdb\x38\x6a\x99\xac\x1a\xef\x23" - "\xad\xe0\xb5\x29\x39\xcb\x6a\x63" - "\x7c\xf9\xbe\xc2\x40\x88\x97\xc6" - "\xba", - .ilen = 33, - .result = "\x8a\x19\xb9\x50\xbc\xf7\x1a\x01" - "\x8e\x5e\x67\x01\xc9\x17\x87\x65" - "\x98\x09\xd6\x7d\xbe\xdd\x18", - .rlen = 23, - }, { - /* This is taken from FIPS CAVS. */ - .key = "\xab\x2f\x8a\x74\xb7\x1c\xd2\xb1" - "\xff\x80\x2e\x48\x7d\x82\xf8\xb9", - .klen = 16, - .iv = "\x03\xc6\xfb\x7d\x80\x0d\x13\xab" - "\xd8\xa6\xb2\xd8\x00\x00\x00\x00", - .alen = 0, - .input = "\xd5\xe8\x93\x9f\xc7\x89\x2e\x2b", - .ilen = 8, - .result = "\x00", - .rlen = 0, - .novrfy = 1, - }, { - .key = "\xab\x2f\x8a\x74\xb7\x1c\xd2\xb1" - "\xff\x80\x2e\x48\x7d\x82\xf8\xb9", - .klen = 16, - .iv = "\x03\xaf\x94\x87\x78\x35\x82\x81" - "\x7f\x88\x94\x68\x00\x00\x00\x00", - .alen = 0, - .input = "\x41\x3c\xb8\x87\x73\xcb\xf3\xf3", - .ilen = 8, - .result = "\x00", - .rlen = 0, - }, { - .key = "\x61\x0e\x8c\xae\xe3\x23\xb6\x38" - "\x76\x1c\xf6\x3a\x67\xa3\x9c\xd8", - .klen = 16, - .iv = "\x03\xc6\xfb\x7d\x80\x0d\x13\xab" - "\xd8\xa6\xb2\xd8\x00\x00\x00\x00", - .assoc = "\xf3\x94\x87\x78\x35\x82\x81\x7f" - "\x88\x94\x68\xb1\x78\x6b\x2b\xd6" - "\x04\x1f\x4e\xed\x78\xd5\x33\x66" - "\xd8\x94\x99\x91\x81\x54\x62\x57", - .alen = 32, - .input = "\xf0\x7c\x29\x02\xae\x1c\x2f\x55" - "\xd0\xd1\x3d\x1a\xa3\x6d\xe4\x0a" - "\x86\xb0\x87\x6b\x62\x33\x8c\x34" - "\xce\xab\x57\xcc\x79\x0b\xe0\x6f" - "\x5c\x3e\x48\x1f\x6c\x46\xf7\x51" - "\x8b\x84\x83\x2a\xc1\x05\xb8\xc5", - .ilen = 48, - .result = "\x50\x82\x3e\x07\xe2\x1e\xb6\xfb" - "\x33\xe4\x73\xce\xd2\xfb\x95\x79" - "\xe8\xb4\xb5\x77\x11\x10\x62\x6f" - "\x6a\x82\xd1\x13\xec\xf5\xd0\x48", - .rlen = 32, - .novrfy = 1, - }, { - .key = "\x61\x0e\x8c\xae\xe3\x23\xb6\x38" - "\x76\x1c\xf6\x3a\x67\xa3\x9c\xd8", - .klen = 16, - .iv = "\x03\x05\xe0\xc9\x0f\xed\x34\xea" - "\x97\xd4\x3b\xdf\x00\x00\x00\x00", - .assoc = "\x49\x5c\x50\x1f\x1d\x94\xcc\x81" - "\xba\xb7\xb6\x03\xaf\xa5\xc1\xa1" - "\xd8\x5c\x42\x68\xe0\x6c\xda\x89" - "\x05\xac\x56\xac\x1b\x2a\xd3\x86", - .alen = 32, - .input = "\x39\xbe\x7d\x15\x62\x77\xf3\x3c" - "\xad\x83\x52\x6d\x71\x03\x25\x1c" - "\xed\x81\x3a\x9a\x16\x7d\x19\x80" - "\x72\x04\x72\xd0\xf6\xff\x05\x0f" - "\xb7\x14\x30\x00\x32\x9e\xa0\xa6" - "\x9e\x5a\x18\xa1\xb8\xfe\xdb\xd3", - .ilen = 48, - .result = "\x75\x05\xbe\xc2\xd9\x1e\xde\x60" - "\x47\x3d\x8c\x7d\xbd\xb5\xd9\xb7" - "\xf2\xae\x61\x05\x8f\x82\x24\x3f" - "\x9c\x67\x91\xe1\x38\x4f\xe4\x0c", - .rlen = 32, - }, { - .key = "\x39\xbb\xa7\xbe\x59\x97\x9e\x73" - "\xa2\xbc\x6b\x98\xd7\x75\x7f\xe3" - "\xa4\x48\x93\x39\x26\x71\x4a\xc6", - .klen = 24, - .iv = "\x03\xee\x49\x83\xe9\xa9\xff\xe9" - "\x57\xba\xfd\x9e\x00\x00\x00\x00", - .assoc = "\x44\xa6\x2c\x05\xe9\xe1\x43\xb1" - "\x58\x7c\xf2\x5c\x6d\x39\x0a\x64" - "\xa4\xf0\x13\x05\xd1\x77\x99\x67" - "\x11\xc4\xc6\xdb\x00\x56\x36\x61", - .alen = 32, - .input = "\x71\x99\xfa\xf4\x44\x12\x68\x9b", - .ilen = 8, - .result = "\x00", - .rlen = 0, - }, { - .key = "\x58\x5d\xa0\x96\x65\x1a\x04\xd7" - "\x96\xe5\xc5\x68\xaa\x95\x35\xe0" - "\x29\xa0\xba\x9e\x48\x78\xd1\xba", - .klen = 24, - .iv = "\x03\xee\x49\x83\xe9\xa9\xff\xe9" - "\x57\xba\xfd\x9e\x00\x00\x00\x00", - .assoc = "\x44\xa6\x2c\x05\xe9\xe1\x43\xb1" - "\x58\x7c\xf2\x5c\x6d\x39\x0a\x64" - "\xa4\xf0\x13\x05\xd1\x77\x99\x67" - "\x11\xc4\xc6\xdb\x00\x56\x36\x61", - .alen = 32, - .input = "\xfb\xe5\x5d\x34\xbe\xe5\xe8\xe7" - "\x5a\xef\x2f\xbf\x1f\x7f\xd4\xb2" - "\x66\xca\x61\x1e\x96\x7a\x61\xb3" - "\x1c\x16\x45\x52\xba\x04\x9c\x9f" - "\xb1\xd2\x40\xbc\x52\x7c\x6f\xb1", - .ilen = 40, - .result = "\x85\x34\x66\x42\xc8\x92\x0f\x36" - "\x58\xe0\x6b\x91\x3c\x98\x5c\xbb" - "\x0a\x85\xcc\x02\xad\x7a\x96\xe9" - "\x65\x43\xa4\xc3\x0f\xdc\x55\x81", - .rlen = 32, - }, { - .key = "\x58\x5d\xa0\x96\x65\x1a\x04\xd7" - "\x96\xe5\xc5\x68\xaa\x95\x35\xe0" - "\x29\xa0\xba\x9e\x48\x78\xd1\xba", - .klen = 24, - .iv = "\x03\xd1\xfc\x57\x9c\xfe\xb8\x9c" - "\xad\x71\xaa\x1f\x00\x00\x00\x00", - .assoc = "\x86\x67\xa5\xa9\x14\x5f\x0d\xc6" - "\xff\x14\xc7\x44\xbf\x6c\x3a\xc3" - "\xff\xb6\x81\xbd\xe2\xd5\x06\xc7" - "\x3c\xa1\x52\x13\x03\x8a\x23\x3a", - .alen = 32, - .input = "\x3f\x66\xb0\x9d\xe5\x4b\x38\x00" - "\xc6\x0e\x6e\xe5\xd6\x98\xa6\x37" - "\x8c\x26\x33\xc6\xb2\xa2\x17\xfa" - "\x64\x19\xc0\x30\xd7\xfc\x14\x6b" - "\xe3\x33\xc2\x04\xb0\x37\xbe\x3f" - "\xa9\xb4\x2d\x68\x03\xa3\x44\xef", - .ilen = 48, - .result = "\x02\x87\x4d\x28\x80\x6e\xb2\xed" - "\x99\x2a\xa8\xca\x04\x25\x45\x90" - "\x1d\xdd\x5a\xd9\xe4\xdb\x9c\x9c" - "\x49\xe9\x01\xfe\xa7\x80\x6d\x6b", - .rlen = 32, - .novrfy = 1, - }, { - .key = "\xa4\x4b\x54\x29\x0a\xb8\x6d\x01" - "\x5b\x80\x2a\xcf\x25\xc4\xb7\x5c" - "\x20\x2c\xad\x30\xc2\x2b\x41\xfb" - "\x0e\x85\xbc\x33\xad\x0f\x2b\xff", - .klen = 32, - .iv = "\x03\xee\x49\x83\xe9\xa9\xff\xe9" - "\x57\xba\xfd\x9e\x00\x00\x00\x00", - .alen = 0, - .input = "\x1f\xb8\x8f\xa3\xdd\x54\x00\xf2", - .ilen = 8, - .result = "\x00", - .rlen = 0, - }, { - .key = "\x39\xbb\xa7\xbe\x59\x97\x9e\x73" - "\xa2\xbc\x6b\x98\xd7\x75\x7f\xe3" - "\xa4\x48\x93\x39\x26\x71\x4a\xc6" - "\xae\x8f\x11\x4c\xc2\x9c\x4a\xbb", - .klen = 32, - .iv = "\x03\x85\x34\x66\x42\xc8\x92\x0f" - "\x36\x58\xe0\x6b\x00\x00\x00\x00", - .alen = 0, - .input = "\x48\x01\x5e\x02\x24\x04\x66\x47" - "\xa1\xea\x6f\xaf\xe8\xfc\xfb\xdd" - "\xa5\xa9\x87\x8d\x84\xee\x2e\x77" - "\xbb\x86\xb9\xf5\x5c\x6c\xff\xf6" - "\x72\xc3\x8e\xf7\x70\xb1\xb2\x07" - "\xbc\xa8\xa3\xbd\x83\x7c\x1d\x2a", - .ilen = 48, - .result = "\xdc\x56\xf2\x71\xb0\xb1\xa0\x6c" - "\xf0\x97\x3a\xfb\x6d\xe7\x32\x99" - "\x3e\xaf\x70\x5e\xb2\x4d\xea\x39" - "\x89\xd4\x75\x7a\x63\xb1\xda\x93", - .rlen = 32, - .novrfy = 1, - }, { - .key = "\x58\x5d\xa0\x96\x65\x1a\x04\xd7" - "\x96\xe5\xc5\x68\xaa\x95\x35\xe0" - "\x29\xa0\xba\x9e\x48\x78\xd1\xba" - "\x0d\x1a\x53\x3b\xb5\xe3\xf8\x8b", - .klen = 32, - .iv = "\x03\xcf\x76\x3f\xd9\x95\x75\x8f" - "\x44\x89\x40\x7b\x00\x00\x00\x00", - .assoc = "\x8f\x86\x6c\x4d\x1d\xc5\x39\x88" - "\xc8\xf3\x5c\x52\x10\x63\x6f\x2b" - "\x8a\x2a\xc5\x6f\x30\x23\x58\x7b" - "\xfb\x36\x03\x11\xb4\xd9\xf2\xfe", - .alen = 32, - .input = "\x48\x58\xd6\xf3\xad\x63\x58\xbf" - "\xae\xc7\x5e\xae\x83\x8f\x7b\xe4" - "\x78\x5c\x4c\x67\x71\x89\x94\xbf" - "\x47\xf1\x63\x7e\x1c\x59\xbd\xc5" - "\x7f\x44\x0a\x0c\x01\x18\x07\x92" - "\xe1\xd3\x51\xce\x32\x6d\x0c\x5b", - .ilen = 48, - .result = "\xc2\x54\xc8\xde\x78\x87\x77\x40" - "\x49\x71\xe4\xb7\xe7\xcb\x76\x61" - "\x0a\x41\xb9\xe9\xc0\x76\x54\xab" - "\x04\x49\x3b\x19\x93\x57\x25\x5d", - .rlen = 32, - }, -}; - -/* - * rfc4309 refers to section 8 of rfc3610 for test vectors, but they all - * use a 13-byte nonce, we only support an 11-byte nonce. Worse, - * they use AD lengths which are not valid ESP header lengths. - * - * These vectors are copied/generated from the ones for rfc4106 with - * the key truncated by one byte.. - */ -static const struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = { - { /* Generated using Crypto++ */ - .key = zeroed_string, - .klen = 19, - .iv = zeroed_string, - .input = zeroed_string, - .ilen = 16, - .assoc = zeroed_string, - .alen = 16, - .result = "\x2E\x9A\xCA\x6B\xDA\x54\xFC\x6F" - "\x12\x50\xE8\xDE\x81\x3C\x63\x08" - "\x1A\x22\xBA\x75\xEE\xD4\xD5\xB5" - "\x27\x50\x01\xAC\x03\x33\x39\xFB", - .rlen = 32, - },{ - .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" - "\x6d\x6a\x8f\x94\x67\x30\x83\x08" - "\x00\x00\x00", - .klen = 19, - .iv = "\x00\x00\x00\x00\x00\x00\x00\x01", - .input = zeroed_string, - .ilen = 16, - .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x01", - .alen = 16, - .result = "\xCF\xB9\x99\x17\xC8\x86\x0E\x7F" - "\x7E\x76\xF8\xE6\xF8\xCC\x1F\x17" - "\x6A\xE0\x53\x9F\x4B\x73\x7E\xDA" - "\x08\x09\x4E\xC4\x1E\xAD\xC6\xB0", - .rlen = 32, - - }, { - .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" - "\x6d\x6a\x8f\x94\x67\x30\x83\x08" - "\x00\x00\x00", - .klen = 19, - .iv = zeroed_string, - .input = "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01", - .ilen = 16, - .assoc = zeroed_string, - .alen = 16, - .result = "\x33\xDE\x73\xBC\xA6\xCE\x4E\xA6" - "\x61\xF4\xF5\x41\x03\x4A\xE3\x86" - "\xA1\xE2\xC2\x42\x2B\x81\x70\x40" - "\xFD\x7F\x76\xD1\x03\x07\xBB\x0C", - .rlen = 32, - }, { - .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" - "\x6d\x6a\x8f\x94\x67\x30\x83\x08" - "\x00\x00\x00", - .klen = 19, - .iv = zeroed_string, - .input = "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01", - .ilen = 16, - .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .alen = 16, - .result = "\x33\xDE\x73\xBC\xA6\xCE\x4E\xA6" - "\x61\xF4\xF5\x41\x03\x4A\xE3\x86" - "\x5B\xC0\x73\xE0\x2B\x73\x68\xC9" - "\x2D\x8C\x58\xC2\x90\x3D\xB0\x3E", - .rlen = 32, - }, { - .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" - "\x6d\x6a\x8f\x94\x67\x30\x83\x08" - "\x00\x00\x00", - .klen = 19, - .iv = "\x00\x00\x00\x00\x00\x00\x00\x01", - .input = "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01", - .ilen = 16, - .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x00\x00\x00\x00\x00\x00\x00\x01", - .alen = 16, - .result = "\xCE\xB8\x98\x16\xC9\x87\x0F\x7E" - "\x7F\x77\xF9\xE7\xF9\xCD\x1E\x16" - "\x43\x8E\x76\x57\x3B\xB4\x05\xE8" - "\xA9\x9B\xBF\x25\xE0\x4F\xC0\xED", - .rlen = 32, - }, { - .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" - "\x6d\x6a\x8f\x94\x67\x30\x83\x08" - "\x00\x00\x00", - .klen = 19, - .iv = "\x00\x00\x00\x00\x00\x00\x00\x01", - .input = "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01", - .ilen = 64, - .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x00\x00\x00\x00\x00\x00\x00\x01", - .alen = 16, - .result = "\xCE\xB8\x98\x16\xC9\x87\x0F\x7E" - "\x7F\x77\xF9\xE7\xF9\xCD\x1E\x16" - "\x9C\xA4\x97\x83\x3F\x01\xA5\xF4" - "\x43\x09\xE7\xB8\xE9\xD1\xD7\x02" - "\x9B\xAB\x39\x18\xEB\x94\x34\x36" - "\xE6\xC5\xC8\x9B\x00\x81\x9E\x49" - "\x1D\x78\xE1\x48\xE3\xE9\xEA\x8E" - "\x3A\x2B\x67\x5D\x35\x6A\x0F\xDB" - "\x02\x73\xDD\xE7\x30\x4A\x30\x54" - "\x1A\x9D\x09\xCA\xC8\x1C\x32\x5F", - .rlen = 80, - }, { - .key = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" - "\x00\x00\x00", - .klen = 19, - .iv = "\x00\x00\x45\x67\x89\xab\xcd\xef", - .input = "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff", - .ilen = 192, - .assoc = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" - "\xaa\xaa\xaa\xaa\x00\x00\x45\x67" - "\x89\xab\xcd\xef", - .alen = 20, - .result = "\x64\x17\xDC\x24\x9D\x92\xBA\x5E" - "\x7C\x64\x6D\x33\x46\x77\xAC\xB1" - "\x5C\x9E\xE2\xC7\x27\x11\x3E\x95" - "\x7D\xBE\x28\xC8\xC1\xCA\x5E\x8C" - "\xB4\xE2\xDE\x9F\x53\x59\x26\xDB" - "\x0C\xD4\xE4\x07\x9A\xE6\x3E\x01" - "\x58\x0D\x3E\x3D\xD5\x21\xEB\x04" - "\x06\x9D\x5F\xB9\x02\x49\x1A\x2B" - "\xBA\xF0\x4E\x3B\x85\x50\x5B\x09" - "\xFE\xEC\xFC\x54\xEC\x0C\xE2\x79" - "\x8A\x2F\x5F\xD7\x05\x5D\xF1\x6D" - "\x22\xEB\xD1\x09\x80\x3F\x5A\x70" - "\xB2\xB9\xD3\x63\x99\xC2\x4D\x1B" - "\x36\x12\x00\x89\xAA\x5D\x55\xDA" - "\x1D\x5B\xD8\x3C\x5F\x09\xD2\xE6" - "\x39\x41\x5C\xF0\xBE\x26\x4E\x5F" - "\x2B\x50\x44\x52\xC2\x10\x7D\x38" - "\x82\x64\x83\x0C\xAE\x49\xD0\xE5" - "\x4F\xE5\x66\x4C\x58\x7A\xEE\x43" - "\x3B\x51\xFE\xBA\x24\x8A\xFE\xDC" - "\x19\x6D\x60\x66\x61\xF9\x9A\x3F" - "\x75\xFC\x38\x53\x5B\xB5\xCD\x52" - "\x4F\xE5\xE4\xC9\xFE\x10\xCB\x98" - "\xF0\x06\x5B\x07\xAB\xBB\xF4\x0E" - "\x2D\xC2\xDD\x5D\xDD\x22\x9A\xCC" - "\x39\xAB\x63\xA5\x3D\x9C\x51\x8A", - .rlen = 208, - }, { /* From draft-mcgrew-gcm-test-01 */ - .key = "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA" - "\x90\x6A\xC7\x3C\x36\x13\xA6\x34" - "\x2E\x44\x3B", - .klen = 19, - .iv = "\x49\x56\xED\x7E\x3B\x24\x4C\xFE", - .input = "\x45\x00\x00\x48\x69\x9A\x00\x00" - "\x80\x11\x4D\xB7\xC0\xA8\x01\x02" - "\xC0\xA8\x01\x01\x0A\x9B\xF1\x56" - "\x38\xD3\x01\x00\x00\x01\x00\x00" - "\x00\x00\x00\x00\x04\x5F\x73\x69" - "\x70\x04\x5F\x75\x64\x70\x03\x73" - "\x69\x70\x09\x63\x79\x62\x65\x72" - "\x63\x69\x74\x79\x02\x64\x6B\x00" - "\x00\x21\x00\x01\x01\x02\x02\x01", - .ilen = 72, - .assoc = "\x00\x00\x43\x21\x87\x65\x43\x21" - "\x00\x00\x00\x00\x49\x56\xED\x7E" - "\x3B\x24\x4C\xFE", - .alen = 20, - .result = "\x89\xBA\x3E\xEF\xE6\xD6\xCF\xDB" - "\x83\x60\xF5\xBA\x3A\x56\x79\xE6" - "\x7E\x0C\x53\xCF\x9E\x87\xE0\x4E" - "\x1A\x26\x01\x24\xC7\x2E\x3D\xBF" - "\x29\x2C\x91\xC1\xB8\xA8\xCF\xE0" - "\x39\xF8\x53\x6D\x31\x22\x2B\xBF" - "\x98\x81\xFC\x34\xEE\x85\x36\xCD" - "\x26\xDB\x6C\x7A\x0C\x77\x8A\x35" - "\x18\x85\x54\xB2\xBC\xDD\x3F\x43" - "\x61\x06\x8A\xDF\x86\x3F\xB4\xAC" - "\x97\xDC\xBD\xFD\x92\x10\xC5\xFF", - .rlen = 88, - }, { - .key = "\xFE\xFF\xE9\x92\x86\x65\x73\x1C" - "\x6D\x6A\x8F\x94\x67\x30\x83\x08" - "\xCA\xFE\xBA", - .klen = 19, - .iv = "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", - .input = "\x45\x00\x00\x3E\x69\x8F\x00\x00" - "\x80\x11\x4D\xCC\xC0\xA8\x01\x02" - "\xC0\xA8\x01\x01\x0A\x98\x00\x35" - "\x00\x2A\x23\x43\xB2\xD0\x01\x00" - "\x00\x01\x00\x00\x00\x00\x00\x00" - "\x03\x73\x69\x70\x09\x63\x79\x62" - "\x65\x72\x63\x69\x74\x79\x02\x64" - "\x6B\x00\x00\x01\x00\x01\x00\x01", - .ilen = 64, - .assoc = "\x00\x00\xA5\xF8\x00\x00\x00\x0A" - "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", - .alen = 16, - .result = "\x4B\xC2\x70\x60\x64\xD2\xF3\xC8" - "\xE5\x26\x8A\xDE\xB8\x7E\x7D\x16" - "\x56\xC7\xD2\x88\xBA\x8D\x58\xAF" - "\xF5\x71\xB6\x37\x84\xA7\xB1\x99" - "\x51\x5C\x0D\xA0\x27\xDE\xE7\x2D" - "\xEF\x25\x88\x1F\x1D\x77\x11\xFF" - "\xDB\xED\xEE\x56\x16\xC5\x5C\x9B" - "\x00\x62\x1F\x68\x4E\x7C\xA0\x97" - "\x10\x72\x7E\x53\x13\x3B\x68\xE4" - "\x30\x99\x91\x79\x09\xEA\xFF\x6A", - .rlen = 80, - }, { - .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" - "\x34\x45\x56\x67\x78\x89\x9A\xAB" - "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" - "\x34\x45\x56\x67\x78\x89\x9A\xAB" - "\x11\x22\x33", - .klen = 35, - .iv = "\x01\x02\x03\x04\x05\x06\x07\x08", - .input = "\x45\x00\x00\x30\x69\xA6\x40\x00" - "\x80\x06\x26\x90\xC0\xA8\x01\x02" - "\x93\x89\x15\x5E\x0A\x9E\x00\x8B" - "\x2D\xC5\x7E\xE0\x00\x00\x00\x00" - "\x70\x02\x40\x00\x20\xBF\x00\x00" - "\x02\x04\x05\xB4\x01\x01\x04\x02" - "\x01\x02\x02\x01", - .ilen = 52, - .assoc = "\x4A\x2C\xBF\xE3\x00\x00\x00\x02" - "\x01\x02\x03\x04\x05\x06\x07\x08", - .alen = 16, - .result = "\xD6\x31\x0D\x2B\x3D\x6F\xBD\x2F" - "\x58\x41\x7E\xFF\x9A\x9E\x09\xB4" - "\x1A\xF7\xF6\x42\x31\xCD\xBF\xAD" - "\x27\x0E\x2C\xF2\xDB\x10\xDF\x55" - "\x8F\x0D\xD7\xAC\x23\xBD\x42\x10" - "\xD0\xB2\xAF\xD8\x37\xAC\x6B\x0B" - "\x11\xD4\x0B\x12\xEC\xB4\xB1\x92" - "\x23\xA6\x10\xB0\x26\xD6\xD9\x26" - "\x5A\x48\x6A\x3E", - .rlen = 68, - }, { - .key = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00", - .klen = 19, - .iv = "\x00\x00\x00\x00\x00\x00\x00\x00", - .input = "\x45\x00\x00\x3C\x99\xC5\x00\x00" - "\x80\x01\xCB\x7A\x40\x67\x93\x18" - "\x01\x01\x01\x01\x08\x00\x07\x5C" - "\x02\x00\x44\x00\x61\x62\x63\x64" - "\x65\x66\x67\x68\x69\x6A\x6B\x6C" - "\x6D\x6E\x6F\x70\x71\x72\x73\x74" - "\x75\x76\x77\x61\x62\x63\x64\x65" - "\x66\x67\x68\x69\x01\x02\x02\x01", - .ilen = 64, - .assoc = "\x00\x00\x00\x00\x00\x00\x00\x01" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .alen = 16, - .result = "\x6B\x9A\xCA\x57\x43\x91\xFC\x6F" - "\x92\x51\x23\xA4\xC1\x5B\xF0\x10" - "\xF3\x13\xF4\xF8\xA1\x9A\xB4\xDC" - "\x89\xC8\xF8\x42\x62\x95\xB7\xCB" - "\xB8\xF5\x0F\x1B\x2E\x94\xA2\xA7" - "\xBF\xFB\x8A\x92\x13\x63\xD1\x3C" - "\x08\xF5\xE8\xA6\xAA\xF6\x34\xF9" - "\x42\x05\xAF\xB3\xE7\x9A\xFC\xEE" - "\x36\x25\xC1\x10\x12\x1C\xCA\x82" - "\xEA\xE6\x63\x5A\x57\x28\xA9\x9A", - .rlen = 80, - }, { - .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" - "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" - "\x57\x69\x0E", - .klen = 19, - .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", - .input = "\x45\x00\x00\x3C\x99\xC3\x00\x00" - "\x80\x01\xCB\x7C\x40\x67\x93\x18" - "\x01\x01\x01\x01\x08\x00\x08\x5C" - "\x02\x00\x43\x00\x61\x62\x63\x64" - "\x65\x66\x67\x68\x69\x6A\x6B\x6C" - "\x6D\x6E\x6F\x70\x71\x72\x73\x74" - "\x75\x76\x77\x61\x62\x63\x64\x65" - "\x66\x67\x68\x69\x01\x02\x02\x01", - .ilen = 64, - .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" - "\x10\x10\x10\x10\x4E\x28\x00\x00" - "\xA2\xFC\xA1\xA3", - .alen = 20, - .result = "\x6A\x6B\x45\x2B\x7C\x67\x52\xF6" - "\x10\x60\x40\x62\x6B\x4F\x97\x8E" - "\x0B\xB2\x22\x97\xCB\x21\xE0\x90" - "\xA2\xE7\xD1\x41\x30\xE4\x4B\x1B" - "\x79\x01\x58\x50\x01\x06\xE1\xE0" - "\x2C\x83\x79\xD3\xDE\x46\x97\x1A" - "\x30\xB8\xE5\xDF\xD7\x12\x56\x75" - "\xD0\x95\xB7\xB8\x91\x42\xF7\xFD" - "\x97\x57\xCA\xC1\x20\xD0\x86\xB9" - "\x66\x9D\xB4\x2B\x96\x22\xAC\x67", - .rlen = 80, - }, { - .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" - "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" - "\x57\x69\x0E", - .klen = 19, - .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", - .input = "\x45\x00\x00\x1C\x42\xA2\x00\x00" - "\x80\x01\x44\x1F\x40\x67\x93\xB6" - "\xE0\x00\x00\x02\x0A\x00\xF5\xFF" - "\x01\x02\x02\x01", - .ilen = 28, - .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" - "\x10\x10\x10\x10\x4E\x28\x00\x00" - "\xA2\xFC\xA1\xA3", - .alen = 20, - .result = "\x6A\x6B\x45\x0B\xA7\x06\x52\xF6" - "\x10\x60\xCF\x01\x6B\x4F\x97\x20" - "\xEA\xB3\x23\x94\xC9\x21\x1D\x33" - "\xA1\xE5\x90\x40\x05\x37\x45\x70" - "\xB5\xD6\x09\x0A\x23\x73\x33\xF9" - "\x08\xB4\x22\xE4", - .rlen = 44, - }, { - .key = "\xFE\xFF\xE9\x92\x86\x65\x73\x1C" - "\x6D\x6A\x8F\x94\x67\x30\x83\x08" - "\xFE\xFF\xE9\x92\x86\x65\x73\x1C" - "\xCA\xFE\xBA", - .klen = 27, - .iv = "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", - .input = "\x45\x00\x00\x28\xA4\xAD\x40\x00" - "\x40\x06\x78\x80\x0A\x01\x03\x8F" - "\x0A\x01\x06\x12\x80\x23\x06\xB8" - "\xCB\x71\x26\x02\xDD\x6B\xB0\x3E" - "\x50\x10\x16\xD0\x75\x68\x00\x01", - .ilen = 40, - .assoc = "\x00\x00\xA5\xF8\x00\x00\x00\x0A" - "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", - .alen = 16, - .result = "\x05\x22\x15\xD1\x52\x56\x85\x04" - "\xA8\x5C\x5D\x6D\x7E\x6E\xF5\xFA" - "\xEA\x16\x37\x50\xF3\xDF\x84\x3B" - "\x2F\x32\x18\x57\x34\x2A\x8C\x23" - "\x67\xDF\x6D\x35\x7B\x54\x0D\xFB" - "\x34\xA5\x9F\x6C\x48\x30\x1E\x22" - "\xFE\xB1\x22\x17\x17\x8A\xB9\x5B", - .rlen = 56, - }, { - .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" - "\x34\x45\x56\x67\x78\x89\x9A\xAB" - "\xDE\xCA\xF8", - .klen = 19, - .iv = "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74", - .input = "\x45\x00\x00\x49\x33\xBA\x00\x00" - "\x7F\x11\x91\x06\xC3\xFB\x1D\x10" - "\xC2\xB1\xD3\x26\xC0\x28\x31\xCE" - "\x00\x35\xDD\x7B\x80\x03\x02\xD5" - "\x00\x00\x4E\x20\x00\x1E\x8C\x18" - "\xD7\x5B\x81\xDC\x91\xBA\xA0\x47" - "\x6B\x91\xB9\x24\xB2\x80\x38\x9D" - "\x92\xC9\x63\xBA\xC0\x46\xEC\x95" - "\x9B\x62\x66\xC0\x47\x22\xB1\x49" - "\x23\x01\x01\x01", - .ilen = 76, - .assoc = "\x00\x00\x01\x00\x00\x00\x00\x00" - "\x00\x00\x00\x01\xCA\xFE\xDE\xBA" - "\xCE\xFA\xCE\x74", - .alen = 20, - .result = "\x92\xD0\x53\x79\x33\x38\xD5\xF3" - "\x7D\xE4\x7A\x8E\x86\x03\xC9\x90" - "\x96\x35\xAB\x9C\xFB\xE8\xA3\x76" - "\xE9\xE9\xE2\xD1\x2E\x11\x0E\x00" - "\xFA\xCE\xB5\x9E\x02\xA7\x7B\xEA" - "\x71\x9A\x58\xFB\xA5\x8A\xE1\xB7" - "\x9C\x39\x9D\xE3\xB5\x6E\x69\xE6" - "\x63\xC9\xDB\x05\x69\x51\x12\xAD" - "\x3E\x00\x32\x73\x86\xF2\xEE\xF5" - "\x0F\xE8\x81\x7E\x84\xD3\xC0\x0D" - "\x76\xD6\x55\xC6\xB4\xC2\x34\xC7" - "\x12\x25\x0B\xF9", - .rlen = 92, - }, { - .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" - "\x34\x45\x56\x67\x78\x89\x9A\xAB" - "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" - "\x34\x45\x56\x67\x78\x89\x9A\xAB" - "\x73\x61\x6C", - .klen = 35, - .iv = "\x61\x6E\x64\x01\x69\x76\x65\x63", - .input = "\x45\x08\x00\x28\x73\x2C\x00\x00" - "\x40\x06\xE9\xF9\x0A\x01\x06\x12" - "\x0A\x01\x03\x8F\x06\xB8\x80\x23" - "\xDD\x6B\xAF\xBE\xCB\x71\x26\x02" - "\x50\x10\x1F\x64\x6D\x54\x00\x01", - .ilen = 40, - .assoc = "\x17\x40\x5E\x67\x15\x6F\x31\x26" - "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01" - "\x69\x76\x65\x63", - .alen = 20, - .result = "\xCC\x74\xB7\xD3\xB0\x38\x50\x42" - "\x2C\x64\x87\x46\x1E\x34\x10\x05" - "\x29\x6B\xBB\x36\xE9\x69\xAD\x92" - "\x82\xA1\x10\x6A\xEB\x0F\xDC\x7D" - "\x08\xBA\xF3\x91\xCA\xAA\x61\xDA" - "\x62\xF4\x14\x61\x5C\x9D\xB5\xA7" - "\xEE\xD7\xB9\x7E\x87\x99\x9B\x7D", - .rlen = 56, - }, { - .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" - "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" - "\x57\x69\x0E", - .klen = 19, - .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", - .input = "\x45\x00\x00\x49\x33\x3E\x00\x00" - "\x7F\x11\x91\x82\xC3\xFB\x1D\x10" - "\xC2\xB1\xD3\x26\xC0\x28\x31\xCE" - "\x00\x35\xCB\x45\x80\x03\x02\x5B" - "\x00\x00\x01\xE0\x00\x1E\x8C\x18" - "\xD6\x57\x59\xD5\x22\x84\xA0\x35" - "\x2C\x71\x47\x5C\x88\x80\x39\x1C" - "\x76\x4D\x6E\x5E\xE0\x49\x6B\x32" - "\x5A\xE2\x70\xC0\x38\x99\x49\x39" - "\x15\x01\x01\x01", - .ilen = 76, - .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" - "\x10\x10\x10\x10\x4E\x28\x00\x00" - "\xA2\xFC\xA1\xA3", - .alen = 20, - .result = "\x6A\x6B\x45\x5E\xD6\x9A\x52\xF6" - "\xEF\x70\x1A\x9C\xE8\xD3\x19\x86" - "\xC8\x02\xF0\xB0\x03\x09\xD9\x02" - "\xA0\xD2\x59\x04\xD1\x85\x2A\x24" - "\x1C\x67\x3E\xD8\x68\x72\x06\x94" - "\x97\xBA\x4F\x76\x8D\xB0\x44\x5B" - "\x69\xBF\xD5\xE2\x3D\xF1\x0B\x0C" - "\xC0\xBF\xB1\x8F\x70\x09\x9E\xCE" - "\xA5\xF2\x55\x58\x84\xFA\xF9\xB5" - "\x23\xF4\x84\x40\x74\x14\x8A\x6B" - "\xDB\xD7\x67\xED\xA4\x93\xF3\x47" - "\xCC\xF7\x46\x6F", - .rlen = 92, - }, { - .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" - "\x34\x45\x56\x67\x78\x89\x9A\xAB" - "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" - "\x34\x45\x56\x67\x78\x89\x9A\xAB" - "\x73\x61\x6C", - .klen = 35, - .iv = "\x61\x6E\x64\x01\x69\x76\x65\x63", - .input = "\x63\x69\x73\x63\x6F\x01\x72\x75" - "\x6C\x65\x73\x01\x74\x68\x65\x01" - "\x6E\x65\x74\x77\x65\x01\x64\x65" - "\x66\x69\x6E\x65\x01\x74\x68\x65" - "\x74\x65\x63\x68\x6E\x6F\x6C\x6F" - "\x67\x69\x65\x73\x01\x74\x68\x61" - "\x74\x77\x69\x6C\x6C\x01\x64\x65" - "\x66\x69\x6E\x65\x74\x6F\x6D\x6F" - "\x72\x72\x6F\x77\x01\x02\x02\x01", - .ilen = 72, - .assoc = "\x17\x40\x5E\x67\x15\x6F\x31\x26" - "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01" - "\x69\x76\x65\x63", - .alen = 20, - .result = "\xEA\x15\xC4\x98\xAC\x15\x22\x37" - "\x00\x07\x1D\xBE\x60\x5D\x73\x16" - "\x4D\x0F\xCC\xCE\x8A\xD0\x49\xD4" - "\x39\xA3\xD1\xB1\x21\x0A\x92\x1A" - "\x2C\xCF\x8F\x9D\xC9\x91\x0D\xB4" - "\x15\xFC\xBC\xA5\xC5\xBF\x54\xE5" - "\x1C\xC7\x32\x41\x07\x7B\x2C\xB6" - "\x5C\x23\x7C\x93\xEA\xEF\x23\x1C" - "\x73\xF4\xE7\x12\x84\x4C\x37\x0A" - "\x4A\x8F\x06\x37\x48\xF9\xF9\x05" - "\x55\x13\x40\xC3\xD5\x55\x3A\x3D", - .rlen = 88, - }, { - .key = "\x7D\x77\x3D\x00\xC1\x44\xC5\x25" - "\xAC\x61\x9D\x18\xC8\x4A\x3F\x47" - "\xD9\x66\x42", - .klen = 19, - .iv = "\x43\x45\x7E\x91\x82\x44\x3B\xC6", - .input = "\x01\x02\x02\x01", - .ilen = 4, - .assoc = "\x33\x54\x67\xAE\xFF\xFF\xFF\xFF" - "\x43\x45\x7E\x91\x82\x44\x3B\xC6", - .alen = 16, - .result = "\x4C\x72\x63\x30\x2F\xE6\x56\xDD" - "\xD0\xD8\x60\x9D\x8B\xEF\x85\x90" - "\xF7\x61\x24\x62", - .rlen = 20, - }, { - .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" - "\x34\x45\x56\x67\x78\x89\x9A\xAB" - "\xDE\xCA\xF8", - .klen = 19, - .iv = "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74", - .input = "\x74\x6F\x01\x62\x65\x01\x6F\x72" - "\x01\x6E\x6F\x74\x01\x74\x6F\x01" - "\x62\x65\x00\x01", - .ilen = 20, - .assoc = "\x00\x00\x01\x00\x00\x00\x00\x00" - "\x00\x00\x00\x01\xCA\xFE\xDE\xBA" - "\xCE\xFA\xCE\x74", - .alen = 20, - .result = "\xA3\xBF\x52\x52\x65\x83\xBA\x81" - "\x03\x9B\x84\xFC\x44\x8C\xBB\x81" - "\x36\xE1\x78\xBB\xA5\x49\x3A\xD0" - "\xF0\x6B\x21\xAF\x98\xC0\x34\xDC" - "\x17\x17\x65\xAD", - .rlen = 36, - }, { - .key = "\x6C\x65\x67\x61\x6C\x69\x7A\x65" - "\x6D\x61\x72\x69\x6A\x75\x61\x6E" - "\x61\x61\x6E\x64\x64\x6F\x69\x74" - "\x62\x65\x66\x6F\x72\x65\x69\x61" - "\x74\x75\x72", - .klen = 35, - .iv = "\x33\x30\x21\x69\x67\x65\x74\x6D", - .input = "\x45\x00\x00\x30\xDA\x3A\x00\x00" - "\x80\x01\xDF\x3B\xC0\xA8\x00\x05" - "\xC0\xA8\x00\x01\x08\x00\xC6\xCD" - "\x02\x00\x07\x00\x61\x62\x63\x64" - "\x65\x66\x67\x68\x69\x6A\x6B\x6C" - "\x6D\x6E\x6F\x70\x71\x72\x73\x74" - "\x01\x02\x02\x01", - .ilen = 52, - .assoc = "\x79\x6B\x69\x63\xFF\xFF\xFF\xFF" - "\xFF\xFF\xFF\xFF\x33\x30\x21\x69" - "\x67\x65\x74\x6D", - .alen = 20, - .result = "\x96\xFD\x86\xF8\xD1\x98\xFF\x10" - "\xAB\x8C\xDA\x8A\x5A\x08\x38\x1A" - "\x48\x59\x80\x18\x1A\x18\x1A\x04" - "\xC9\x0D\xE3\xE7\x0E\xA4\x0B\x75" - "\x92\x9C\x52\x5C\x0B\xFB\xF8\xAF" - "\x16\xC3\x35\xA8\xE7\xCE\x84\x04" - "\xEB\x40\x6B\x7A\x8E\x75\xBB\x42" - "\xE0\x63\x4B\x21\x44\xA2\x2B\x2B" - "\x39\xDB\xC8\xDC", - .rlen = 68, - }, { - .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" - "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" - "\x57\x69\x0E", - .klen = 19, - .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", - .input = "\x45\x00\x00\x30\xDA\x3A\x00\x00" - "\x80\x01\xDF\x3B\xC0\xA8\x00\x05" - "\xC0\xA8\x00\x01\x08\x00\xC6\xCD" - "\x02\x00\x07\x00\x61\x62\x63\x64" - "\x65\x66\x67\x68\x69\x6A\x6B\x6C" - "\x6D\x6E\x6F\x70\x71\x72\x73\x74" - "\x01\x02\x02\x01", - .ilen = 52, - .assoc = "\x3F\x7E\xF6\x42\x10\x10\x10\x10" - "\x10\x10\x10\x10\x4E\x28\x00\x00" - "\xA2\xFC\xA1\xA3", - .alen = 20, - .result = "\x6A\x6B\x45\x27\x3F\x9E\x52\xF6" - "\x10\x60\x54\x25\xEB\x80\x04\x93" - "\xCA\x1B\x23\x97\xCB\x21\x2E\x01" - "\xA2\xE7\x95\x41\x30\xE4\x4B\x1B" - "\x79\x01\x58\x50\x01\x06\xE1\xE0" - "\x2C\x83\x79\xD3\xDE\x46\x97\x1A" - "\x44\xCC\x90\xBF\x00\x94\x94\x92" - "\x20\x17\x0C\x1B\x55\xDE\x7E\x68" - "\xF4\x95\x5D\x4F", - .rlen = 68, - }, { - .key = "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA" - "\x90\x6A\xC7\x3C\x36\x13\xA6\x34" - "\x22\x43\x3C", - .klen = 19, - .iv = "\x48\x55\xEC\x7D\x3A\x23\x4B\xFD", - .input = "\x08\x00\xC6\xCD\x02\x00\x07\x00" - "\x61\x62\x63\x64\x65\x66\x67\x68" - "\x69\x6A\x6B\x6C\x6D\x6E\x6F\x70" - "\x71\x72\x73\x74\x01\x02\x02\x01", - .ilen = 32, - .assoc = "\x00\x00\x43\x21\x87\x65\x43\x21" - "\x00\x00\x00\x07\x48\x55\xEC\x7D" - "\x3A\x23\x4B\xFD", - .alen = 20, - .result = "\x67\xE9\x28\xB3\x1C\xA4\x6D\x02" - "\xF0\xB5\x37\xB6\x6B\x2F\xF5\x4F" - "\xF8\xA3\x4C\x53\xB8\x12\x09\xBF" - "\x58\x7D\xCF\x29\xA3\x41\x68\x6B" - "\xCE\xE8\x79\x85\x3C\xB0\x3A\x8F" - "\x16\xB0\xA1\x26\xC9\xBC\xBC\xA6", - .rlen = 48, - } -}; - -static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = { - { /* Generated using Crypto++ */ - .key = zeroed_string, - .klen = 19, - .iv = zeroed_string, - .result = zeroed_string, - .rlen = 16, - .assoc = zeroed_string, - .alen = 16, - .input = "\x2E\x9A\xCA\x6B\xDA\x54\xFC\x6F" - "\x12\x50\xE8\xDE\x81\x3C\x63\x08" - "\x1A\x22\xBA\x75\xEE\xD4\xD5\xB5" - "\x27\x50\x01\xAC\x03\x33\x39\xFB", - .ilen = 32, - },{ - .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" - "\x6d\x6a\x8f\x94\x67\x30\x83\x08" - "\x00\x00\x00", - .klen = 19, - .iv = "\x00\x00\x00\x00\x00\x00\x00\x01", - .result = zeroed_string, - .rlen = 16, - .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x01", - .alen = 16, - .input = "\xCF\xB9\x99\x17\xC8\x86\x0E\x7F" - "\x7E\x76\xF8\xE6\xF8\xCC\x1F\x17" - "\x6A\xE0\x53\x9F\x4B\x73\x7E\xDA" - "\x08\x09\x4E\xC4\x1E\xAD\xC6\xB0", - .ilen = 32, - - }, { - .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" - "\x6d\x6a\x8f\x94\x67\x30\x83\x08" - "\x00\x00\x00", - .klen = 19, - .iv = zeroed_string, - .result = "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01", - .rlen = 16, - .assoc = zeroed_string, - .alen = 16, - .input = "\x33\xDE\x73\xBC\xA6\xCE\x4E\xA6" - "\x61\xF4\xF5\x41\x03\x4A\xE3\x86" - "\xA1\xE2\xC2\x42\x2B\x81\x70\x40" - "\xFD\x7F\x76\xD1\x03\x07\xBB\x0C", - .ilen = 32, - }, { - .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" - "\x6d\x6a\x8f\x94\x67\x30\x83\x08" - "\x00\x00\x00", - .klen = 19, - .iv = zeroed_string, - .result = "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01", - .rlen = 16, - .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .alen = 16, - .input = "\x33\xDE\x73\xBC\xA6\xCE\x4E\xA6" - "\x61\xF4\xF5\x41\x03\x4A\xE3\x86" - "\x5B\xC0\x73\xE0\x2B\x73\x68\xC9" - "\x2D\x8C\x58\xC2\x90\x3D\xB0\x3E", - .ilen = 32, - }, { - .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" - "\x6d\x6a\x8f\x94\x67\x30\x83\x08" - "\x00\x00\x00", - .klen = 19, - .iv = "\x00\x00\x00\x00\x00\x00\x00\x01", - .result = "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01", - .rlen = 16, - .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x00\x00\x00\x00\x00\x00\x00\x01", - .alen = 16, - .input = "\xCE\xB8\x98\x16\xC9\x87\x0F\x7E" - "\x7F\x77\xF9\xE7\xF9\xCD\x1E\x16" - "\x43\x8E\x76\x57\x3B\xB4\x05\xE8" - "\xA9\x9B\xBF\x25\xE0\x4F\xC0\xED", - .ilen = 32, - }, { - .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" - "\x6d\x6a\x8f\x94\x67\x30\x83\x08" - "\x00\x00\x00", - .klen = 19, - .iv = "\x00\x00\x00\x00\x00\x00\x00\x01", - .result = "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x01\x01\x01\x01\x01\x01\x01\x01", - .rlen = 64, - .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01" - "\x00\x00\x00\x00\x00\x00\x00\x01", - .alen = 16, - .input = "\xCE\xB8\x98\x16\xC9\x87\x0F\x7E" - "\x7F\x77\xF9\xE7\xF9\xCD\x1E\x16" - "\x9C\xA4\x97\x83\x3F\x01\xA5\xF4" - "\x43\x09\xE7\xB8\xE9\xD1\xD7\x02" - "\x9B\xAB\x39\x18\xEB\x94\x34\x36" - "\xE6\xC5\xC8\x9B\x00\x81\x9E\x49" - "\x1D\x78\xE1\x48\xE3\xE9\xEA\x8E" - "\x3A\x2B\x67\x5D\x35\x6A\x0F\xDB" - "\x02\x73\xDD\xE7\x30\x4A\x30\x54" - "\x1A\x9D\x09\xCA\xC8\x1C\x32\x5F", - .ilen = 80, - }, { - .key = "\x00\x01\x02\x03\x04\x05\x06\x07" - "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" - "\x00\x00\x00", - .klen = 19, - .iv = "\x00\x00\x45\x67\x89\xab\xcd\xef", - .result = "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff" - "\xff\xff\xff\xff\xff\xff\xff\xff", - .rlen = 192, - .assoc = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" - "\xaa\xaa\xaa\xaa\x00\x00\x45\x67" - "\x89\xab\xcd\xef", - .alen = 20, - .input = "\x64\x17\xDC\x24\x9D\x92\xBA\x5E" - "\x7C\x64\x6D\x33\x46\x77\xAC\xB1" - "\x5C\x9E\xE2\xC7\x27\x11\x3E\x95" - "\x7D\xBE\x28\xC8\xC1\xCA\x5E\x8C" - "\xB4\xE2\xDE\x9F\x53\x59\x26\xDB" - "\x0C\xD4\xE4\x07\x9A\xE6\x3E\x01" - "\x58\x0D\x3E\x3D\xD5\x21\xEB\x04" - "\x06\x9D\x5F\xB9\x02\x49\x1A\x2B" - "\xBA\xF0\x4E\x3B\x85\x50\x5B\x09" - "\xFE\xEC\xFC\x54\xEC\x0C\xE2\x79" - "\x8A\x2F\x5F\xD7\x05\x5D\xF1\x6D" - "\x22\xEB\xD1\x09\x80\x3F\x5A\x70" - "\xB2\xB9\xD3\x63\x99\xC2\x4D\x1B" - "\x36\x12\x00\x89\xAA\x5D\x55\xDA" - "\x1D\x5B\xD8\x3C\x5F\x09\xD2\xE6" - "\x39\x41\x5C\xF0\xBE\x26\x4E\x5F" - "\x2B\x50\x44\x52\xC2\x10\x7D\x38" - "\x82\x64\x83\x0C\xAE\x49\xD0\xE5" - "\x4F\xE5\x66\x4C\x58\x7A\xEE\x43" - "\x3B\x51\xFE\xBA\x24\x8A\xFE\xDC" - "\x19\x6D\x60\x66\x61\xF9\x9A\x3F" - "\x75\xFC\x38\x53\x5B\xB5\xCD\x52" - "\x4F\xE5\xE4\xC9\xFE\x10\xCB\x98" - "\xF0\x06\x5B\x07\xAB\xBB\xF4\x0E" - "\x2D\xC2\xDD\x5D\xDD\x22\x9A\xCC" - "\x39\xAB\x63\xA5\x3D\x9C\x51\x8A", - .ilen = 208, - }, { /* From draft-mcgrew-gcm-test-01 */ - .key = "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA" - "\x90\x6A\xC7\x3C\x36\x13\xA6\x34" - "\x2E\x44\x3B", - .klen = 19, - .iv = "\x49\x56\xED\x7E\x3B\x24\x4C\xFE", - .result = "\x45\x00\x00\x48\x69\x9A\x00\x00" - "\x80\x11\x4D\xB7\xC0\xA8\x01\x02" - "\xC0\xA8\x01\x01\x0A\x9B\xF1\x56" - "\x38\xD3\x01\x00\x00\x01\x00\x00" - "\x00\x00\x00\x00\x04\x5F\x73\x69" - "\x70\x04\x5F\x75\x64\x70\x03\x73" - "\x69\x70\x09\x63\x79\x62\x65\x72" - "\x63\x69\x74\x79\x02\x64\x6B\x00" - "\x00\x21\x00\x01\x01\x02\x02\x01", - .rlen = 72, - .assoc = "\x00\x00\x43\x21\x87\x65\x43\x21" - "\x00\x00\x00\x00\x49\x56\xED\x7E" - "\x3B\x24\x4C\xFE", - .alen = 20, - .input = "\x89\xBA\x3E\xEF\xE6\xD6\xCF\xDB" - "\x83\x60\xF5\xBA\x3A\x56\x79\xE6" - "\x7E\x0C\x53\xCF\x9E\x87\xE0\x4E" - "\x1A\x26\x01\x24\xC7\x2E\x3D\xBF" - "\x29\x2C\x91\xC1\xB8\xA8\xCF\xE0" - "\x39\xF8\x53\x6D\x31\x22\x2B\xBF" - "\x98\x81\xFC\x34\xEE\x85\x36\xCD" - "\x26\xDB\x6C\x7A\x0C\x77\x8A\x35" - "\x18\x85\x54\xB2\xBC\xDD\x3F\x43" - "\x61\x06\x8A\xDF\x86\x3F\xB4\xAC" - "\x97\xDC\xBD\xFD\x92\x10\xC5\xFF", - .ilen = 88, - }, { - .key = "\xFE\xFF\xE9\x92\x86\x65\x73\x1C" - "\x6D\x6A\x8F\x94\x67\x30\x83\x08" - "\xCA\xFE\xBA", - .klen = 19, - .iv = "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", - .result = "\x45\x00\x00\x3E\x69\x8F\x00\x00" - "\x80\x11\x4D\xCC\xC0\xA8\x01\x02" - "\xC0\xA8\x01\x01\x0A\x98\x00\x35" - "\x00\x2A\x23\x43\xB2\xD0\x01\x00" - "\x00\x01\x00\x00\x00\x00\x00\x00" - "\x03\x73\x69\x70\x09\x63\x79\x62" - "\x65\x72\x63\x69\x74\x79\x02\x64" - "\x6B\x00\x00\x01\x00\x01\x00\x01", - .rlen = 64, - .assoc = "\x00\x00\xA5\xF8\x00\x00\x00\x0A" - "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", - .alen = 16, - .input = "\x4B\xC2\x70\x60\x64\xD2\xF3\xC8" - "\xE5\x26\x8A\xDE\xB8\x7E\x7D\x16" - "\x56\xC7\xD2\x88\xBA\x8D\x58\xAF" - "\xF5\x71\xB6\x37\x84\xA7\xB1\x99" - "\x51\x5C\x0D\xA0\x27\xDE\xE7\x2D" - "\xEF\x25\x88\x1F\x1D\x77\x11\xFF" - "\xDB\xED\xEE\x56\x16\xC5\x5C\x9B" - "\x00\x62\x1F\x68\x4E\x7C\xA0\x97" - "\x10\x72\x7E\x53\x13\x3B\x68\xE4" - "\x30\x99\x91\x79\x09\xEA\xFF\x6A", - .ilen = 80, - }, { - .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" - "\x34\x45\x56\x67\x78\x89\x9A\xAB" - "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" - "\x34\x45\x56\x67\x78\x89\x9A\xAB" - "\x11\x22\x33", - .klen = 35, - .iv = "\x01\x02\x03\x04\x05\x06\x07\x08", - .result = "\x45\x00\x00\x30\x69\xA6\x40\x00" - "\x80\x06\x26\x90\xC0\xA8\x01\x02" - "\x93\x89\x15\x5E\x0A\x9E\x00\x8B" - "\x2D\xC5\x7E\xE0\x00\x00\x00\x00" - "\x70\x02\x40\x00\x20\xBF\x00\x00" - "\x02\x04\x05\xB4\x01\x01\x04\x02" - "\x01\x02\x02\x01", - .rlen = 52, - .assoc = "\x4A\x2C\xBF\xE3\x00\x00\x00\x02" - "\x01\x02\x03\x04\x05\x06\x07\x08", - .alen = 16, - .input = "\xD6\x31\x0D\x2B\x3D\x6F\xBD\x2F" - "\x58\x41\x7E\xFF\x9A\x9E\x09\xB4" - "\x1A\xF7\xF6\x42\x31\xCD\xBF\xAD" - "\x27\x0E\x2C\xF2\xDB\x10\xDF\x55" - "\x8F\x0D\xD7\xAC\x23\xBD\x42\x10" - "\xD0\xB2\xAF\xD8\x37\xAC\x6B\x0B" - "\x11\xD4\x0B\x12\xEC\xB4\xB1\x92" - "\x23\xA6\x10\xB0\x26\xD6\xD9\x26" - "\x5A\x48\x6A\x3E", - .ilen = 68, - }, { - .key = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00", - .klen = 19, - .iv = "\x00\x00\x00\x00\x00\x00\x00\x00", - .result = "\x45\x00\x00\x3C\x99\xC5\x00\x00" - "\x80\x01\xCB\x7A\x40\x67\x93\x18" - "\x01\x01\x01\x01\x08\x00\x07\x5C" - "\x02\x00\x44\x00\x61\x62\x63\x64" - "\x65\x66\x67\x68\x69\x6A\x6B\x6C" - "\x6D\x6E\x6F\x70\x71\x72\x73\x74" - "\x75\x76\x77\x61\x62\x63\x64\x65" - "\x66\x67\x68\x69\x01\x02\x02\x01", - .rlen = 64, - .assoc = "\x00\x00\x00\x00\x00\x00\x00\x01" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .alen = 16, - .input = "\x6B\x9A\xCA\x57\x43\x91\xFC\x6F" - "\x92\x51\x23\xA4\xC1\x5B\xF0\x10" - "\xF3\x13\xF4\xF8\xA1\x9A\xB4\xDC" - "\x89\xC8\xF8\x42\x62\x95\xB7\xCB" - "\xB8\xF5\x0F\x1B\x2E\x94\xA2\xA7" - "\xBF\xFB\x8A\x92\x13\x63\xD1\x3C" - "\x08\xF5\xE8\xA6\xAA\xF6\x34\xF9" - "\x42\x05\xAF\xB3\xE7\x9A\xFC\xEE" - "\x36\x25\xC1\x10\x12\x1C\xCA\x82" - "\xEA\xE6\x63\x5A\x57\x28\xA9\x9A", - .ilen = 80, - }, { - .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" - "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" - "\x57\x69\x0E", - .klen = 19, - .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", - .result = "\x45\x00\x00\x3C\x99\xC3\x00\x00" - "\x80\x01\xCB\x7C\x40\x67\x93\x18" - "\x01\x01\x01\x01\x08\x00\x08\x5C" - "\x02\x00\x43\x00\x61\x62\x63\x64" - "\x65\x66\x67\x68\x69\x6A\x6B\x6C" - "\x6D\x6E\x6F\x70\x71\x72\x73\x74" - "\x75\x76\x77\x61\x62\x63\x64\x65" - "\x66\x67\x68\x69\x01\x02\x02\x01", - .rlen = 64, - .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" - "\x10\x10\x10\x10\x4E\x28\x00\x00" - "\xA2\xFC\xA1\xA3", - .alen = 20, - .input = "\x6A\x6B\x45\x2B\x7C\x67\x52\xF6" - "\x10\x60\x40\x62\x6B\x4F\x97\x8E" - "\x0B\xB2\x22\x97\xCB\x21\xE0\x90" - "\xA2\xE7\xD1\x41\x30\xE4\x4B\x1B" - "\x79\x01\x58\x50\x01\x06\xE1\xE0" - "\x2C\x83\x79\xD3\xDE\x46\x97\x1A" - "\x30\xB8\xE5\xDF\xD7\x12\x56\x75" - "\xD0\x95\xB7\xB8\x91\x42\xF7\xFD" - "\x97\x57\xCA\xC1\x20\xD0\x86\xB9" - "\x66\x9D\xB4\x2B\x96\x22\xAC\x67", - .ilen = 80, - }, { - .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" - "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" - "\x57\x69\x0E", - .klen = 19, - .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", - .result = "\x45\x00\x00\x1C\x42\xA2\x00\x00" - "\x80\x01\x44\x1F\x40\x67\x93\xB6" - "\xE0\x00\x00\x02\x0A\x00\xF5\xFF" - "\x01\x02\x02\x01", - .rlen = 28, - .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" - "\x10\x10\x10\x10\x4E\x28\x00\x00" - "\xA2\xFC\xA1\xA3", - .alen = 20, - .input = "\x6A\x6B\x45\x0B\xA7\x06\x52\xF6" - "\x10\x60\xCF\x01\x6B\x4F\x97\x20" - "\xEA\xB3\x23\x94\xC9\x21\x1D\x33" - "\xA1\xE5\x90\x40\x05\x37\x45\x70" - "\xB5\xD6\x09\x0A\x23\x73\x33\xF9" - "\x08\xB4\x22\xE4", - .ilen = 44, - }, { - .key = "\xFE\xFF\xE9\x92\x86\x65\x73\x1C" - "\x6D\x6A\x8F\x94\x67\x30\x83\x08" - "\xFE\xFF\xE9\x92\x86\x65\x73\x1C" - "\xCA\xFE\xBA", - .klen = 27, - .iv = "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", - .result = "\x45\x00\x00\x28\xA4\xAD\x40\x00" - "\x40\x06\x78\x80\x0A\x01\x03\x8F" - "\x0A\x01\x06\x12\x80\x23\x06\xB8" - "\xCB\x71\x26\x02\xDD\x6B\xB0\x3E" - "\x50\x10\x16\xD0\x75\x68\x00\x01", - .rlen = 40, - .assoc = "\x00\x00\xA5\xF8\x00\x00\x00\x0A" - "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", - .alen = 16, - .input = "\x05\x22\x15\xD1\x52\x56\x85\x04" - "\xA8\x5C\x5D\x6D\x7E\x6E\xF5\xFA" - "\xEA\x16\x37\x50\xF3\xDF\x84\x3B" - "\x2F\x32\x18\x57\x34\x2A\x8C\x23" - "\x67\xDF\x6D\x35\x7B\x54\x0D\xFB" - "\x34\xA5\x9F\x6C\x48\x30\x1E\x22" - "\xFE\xB1\x22\x17\x17\x8A\xB9\x5B", - .ilen = 56, - }, { - .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" - "\x34\x45\x56\x67\x78\x89\x9A\xAB" - "\xDE\xCA\xF8", - .klen = 19, - .iv = "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74", - .result = "\x45\x00\x00\x49\x33\xBA\x00\x00" - "\x7F\x11\x91\x06\xC3\xFB\x1D\x10" - "\xC2\xB1\xD3\x26\xC0\x28\x31\xCE" - "\x00\x35\xDD\x7B\x80\x03\x02\xD5" - "\x00\x00\x4E\x20\x00\x1E\x8C\x18" - "\xD7\x5B\x81\xDC\x91\xBA\xA0\x47" - "\x6B\x91\xB9\x24\xB2\x80\x38\x9D" - "\x92\xC9\x63\xBA\xC0\x46\xEC\x95" - "\x9B\x62\x66\xC0\x47\x22\xB1\x49" - "\x23\x01\x01\x01", - .rlen = 76, - .assoc = "\x00\x00\x01\x00\x00\x00\x00\x00" - "\x00\x00\x00\x01\xCA\xFE\xDE\xBA" - "\xCE\xFA\xCE\x74", - .alen = 20, - .input = "\x92\xD0\x53\x79\x33\x38\xD5\xF3" - "\x7D\xE4\x7A\x8E\x86\x03\xC9\x90" - "\x96\x35\xAB\x9C\xFB\xE8\xA3\x76" - "\xE9\xE9\xE2\xD1\x2E\x11\x0E\x00" - "\xFA\xCE\xB5\x9E\x02\xA7\x7B\xEA" - "\x71\x9A\x58\xFB\xA5\x8A\xE1\xB7" - "\x9C\x39\x9D\xE3\xB5\x6E\x69\xE6" - "\x63\xC9\xDB\x05\x69\x51\x12\xAD" - "\x3E\x00\x32\x73\x86\xF2\xEE\xF5" - "\x0F\xE8\x81\x7E\x84\xD3\xC0\x0D" - "\x76\xD6\x55\xC6\xB4\xC2\x34\xC7" - "\x12\x25\x0B\xF9", - .ilen = 92, - }, { - .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" - "\x34\x45\x56\x67\x78\x89\x9A\xAB" - "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" - "\x34\x45\x56\x67\x78\x89\x9A\xAB" - "\x73\x61\x6C", - .klen = 35, - .iv = "\x61\x6E\x64\x01\x69\x76\x65\x63", - .result = "\x45\x08\x00\x28\x73\x2C\x00\x00" - "\x40\x06\xE9\xF9\x0A\x01\x06\x12" - "\x0A\x01\x03\x8F\x06\xB8\x80\x23" - "\xDD\x6B\xAF\xBE\xCB\x71\x26\x02" - "\x50\x10\x1F\x64\x6D\x54\x00\x01", - .rlen = 40, - .assoc = "\x17\x40\x5E\x67\x15\x6F\x31\x26" - "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01" - "\x69\x76\x65\x63", - .alen = 20, - .input = "\xCC\x74\xB7\xD3\xB0\x38\x50\x42" - "\x2C\x64\x87\x46\x1E\x34\x10\x05" - "\x29\x6B\xBB\x36\xE9\x69\xAD\x92" - "\x82\xA1\x10\x6A\xEB\x0F\xDC\x7D" - "\x08\xBA\xF3\x91\xCA\xAA\x61\xDA" - "\x62\xF4\x14\x61\x5C\x9D\xB5\xA7" - "\xEE\xD7\xB9\x7E\x87\x99\x9B\x7D", - .ilen = 56, - }, { - .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" - "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" - "\x57\x69\x0E", - .klen = 19, - .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", - .result = "\x45\x00\x00\x49\x33\x3E\x00\x00" - "\x7F\x11\x91\x82\xC3\xFB\x1D\x10" - "\xC2\xB1\xD3\x26\xC0\x28\x31\xCE" - "\x00\x35\xCB\x45\x80\x03\x02\x5B" - "\x00\x00\x01\xE0\x00\x1E\x8C\x18" - "\xD6\x57\x59\xD5\x22\x84\xA0\x35" - "\x2C\x71\x47\x5C\x88\x80\x39\x1C" - "\x76\x4D\x6E\x5E\xE0\x49\x6B\x32" - "\x5A\xE2\x70\xC0\x38\x99\x49\x39" - "\x15\x01\x01\x01", - .rlen = 76, - .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" - "\x10\x10\x10\x10\x4E\x28\x00\x00" - "\xA2\xFC\xA1\xA3", - .alen = 20, - .input = "\x6A\x6B\x45\x5E\xD6\x9A\x52\xF6" - "\xEF\x70\x1A\x9C\xE8\xD3\x19\x86" - "\xC8\x02\xF0\xB0\x03\x09\xD9\x02" - "\xA0\xD2\x59\x04\xD1\x85\x2A\x24" - "\x1C\x67\x3E\xD8\x68\x72\x06\x94" - "\x97\xBA\x4F\x76\x8D\xB0\x44\x5B" - "\x69\xBF\xD5\xE2\x3D\xF1\x0B\x0C" - "\xC0\xBF\xB1\x8F\x70\x09\x9E\xCE" - "\xA5\xF2\x55\x58\x84\xFA\xF9\xB5" - "\x23\xF4\x84\x40\x74\x14\x8A\x6B" - "\xDB\xD7\x67\xED\xA4\x93\xF3\x47" - "\xCC\xF7\x46\x6F", - .ilen = 92, - }, { - .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" - "\x34\x45\x56\x67\x78\x89\x9A\xAB" - "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" - "\x34\x45\x56\x67\x78\x89\x9A\xAB" - "\x73\x61\x6C", - .klen = 35, - .iv = "\x61\x6E\x64\x01\x69\x76\x65\x63", - .result = "\x63\x69\x73\x63\x6F\x01\x72\x75" - "\x6C\x65\x73\x01\x74\x68\x65\x01" - "\x6E\x65\x74\x77\x65\x01\x64\x65" - "\x66\x69\x6E\x65\x01\x74\x68\x65" - "\x74\x65\x63\x68\x6E\x6F\x6C\x6F" - "\x67\x69\x65\x73\x01\x74\x68\x61" - "\x74\x77\x69\x6C\x6C\x01\x64\x65" - "\x66\x69\x6E\x65\x74\x6F\x6D\x6F" - "\x72\x72\x6F\x77\x01\x02\x02\x01", - .rlen = 72, - .assoc = "\x17\x40\x5E\x67\x15\x6F\x31\x26" - "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01" - "\x69\x76\x65\x63", - .alen = 20, - .input = "\xEA\x15\xC4\x98\xAC\x15\x22\x37" - "\x00\x07\x1D\xBE\x60\x5D\x73\x16" - "\x4D\x0F\xCC\xCE\x8A\xD0\x49\xD4" - "\x39\xA3\xD1\xB1\x21\x0A\x92\x1A" - "\x2C\xCF\x8F\x9D\xC9\x91\x0D\xB4" - "\x15\xFC\xBC\xA5\xC5\xBF\x54\xE5" - "\x1C\xC7\x32\x41\x07\x7B\x2C\xB6" - "\x5C\x23\x7C\x93\xEA\xEF\x23\x1C" - "\x73\xF4\xE7\x12\x84\x4C\x37\x0A" - "\x4A\x8F\x06\x37\x48\xF9\xF9\x05" - "\x55\x13\x40\xC3\xD5\x55\x3A\x3D", - .ilen = 88, - }, { - .key = "\x7D\x77\x3D\x00\xC1\x44\xC5\x25" - "\xAC\x61\x9D\x18\xC8\x4A\x3F\x47" - "\xD9\x66\x42", - .klen = 19, - .iv = "\x43\x45\x7E\x91\x82\x44\x3B\xC6", - .result = "\x01\x02\x02\x01", - .rlen = 4, - .assoc = "\x33\x54\x67\xAE\xFF\xFF\xFF\xFF" - "\x43\x45\x7E\x91\x82\x44\x3B\xC6", - .alen = 16, - .input = "\x4C\x72\x63\x30\x2F\xE6\x56\xDD" - "\xD0\xD8\x60\x9D\x8B\xEF\x85\x90" - "\xF7\x61\x24\x62", - .ilen = 20, - }, { - .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" - "\x34\x45\x56\x67\x78\x89\x9A\xAB" - "\xDE\xCA\xF8", - .klen = 19, - .iv = "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74", - .result = "\x74\x6F\x01\x62\x65\x01\x6F\x72" - "\x01\x6E\x6F\x74\x01\x74\x6F\x01" - "\x62\x65\x00\x01", - .rlen = 20, - .assoc = "\x00\x00\x01\x00\x00\x00\x00\x00" - "\x00\x00\x00\x01\xCA\xFE\xDE\xBA" - "\xCE\xFA\xCE\x74", - .alen = 20, - .input = "\xA3\xBF\x52\x52\x65\x83\xBA\x81" - "\x03\x9B\x84\xFC\x44\x8C\xBB\x81" - "\x36\xE1\x78\xBB\xA5\x49\x3A\xD0" - "\xF0\x6B\x21\xAF\x98\xC0\x34\xDC" - "\x17\x17\x65\xAD", - .ilen = 36, - }, { - .key = "\x6C\x65\x67\x61\x6C\x69\x7A\x65" - "\x6D\x61\x72\x69\x6A\x75\x61\x6E" - "\x61\x61\x6E\x64\x64\x6F\x69\x74" - "\x62\x65\x66\x6F\x72\x65\x69\x61" - "\x74\x75\x72", - .klen = 35, - .iv = "\x33\x30\x21\x69\x67\x65\x74\x6D", - .result = "\x45\x00\x00\x30\xDA\x3A\x00\x00" - "\x80\x01\xDF\x3B\xC0\xA8\x00\x05" - "\xC0\xA8\x00\x01\x08\x00\xC6\xCD" - "\x02\x00\x07\x00\x61\x62\x63\x64" - "\x65\x66\x67\x68\x69\x6A\x6B\x6C" - "\x6D\x6E\x6F\x70\x71\x72\x73\x74" - "\x01\x02\x02\x01", - .rlen = 52, - .assoc = "\x79\x6B\x69\x63\xFF\xFF\xFF\xFF" - "\xFF\xFF\xFF\xFF\x33\x30\x21\x69" - "\x67\x65\x74\x6D", - .alen = 20, - .input = "\x96\xFD\x86\xF8\xD1\x98\xFF\x10" - "\xAB\x8C\xDA\x8A\x5A\x08\x38\x1A" - "\x48\x59\x80\x18\x1A\x18\x1A\x04" - "\xC9\x0D\xE3\xE7\x0E\xA4\x0B\x75" - "\x92\x9C\x52\x5C\x0B\xFB\xF8\xAF" - "\x16\xC3\x35\xA8\xE7\xCE\x84\x04" - "\xEB\x40\x6B\x7A\x8E\x75\xBB\x42" - "\xE0\x63\x4B\x21\x44\xA2\x2B\x2B" - "\x39\xDB\xC8\xDC", - .ilen = 68, - }, { - .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" - "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" - "\x57\x69\x0E", - .klen = 19, - .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", - .result = "\x45\x00\x00\x30\xDA\x3A\x00\x00" - "\x80\x01\xDF\x3B\xC0\xA8\x00\x05" - "\xC0\xA8\x00\x01\x08\x00\xC6\xCD" - "\x02\x00\x07\x00\x61\x62\x63\x64" - "\x65\x66\x67\x68\x69\x6A\x6B\x6C" - "\x6D\x6E\x6F\x70\x71\x72\x73\x74" - "\x01\x02\x02\x01", - .rlen = 52, - .assoc = "\x3F\x7E\xF6\x42\x10\x10\x10\x10" - "\x10\x10\x10\x10\x4E\x28\x00\x00" - "\xA2\xFC\xA1\xA3", - .alen = 20, - .input = "\x6A\x6B\x45\x27\x3F\x9E\x52\xF6" - "\x10\x60\x54\x25\xEB\x80\x04\x93" - "\xCA\x1B\x23\x97\xCB\x21\x2E\x01" - "\xA2\xE7\x95\x41\x30\xE4\x4B\x1B" - "\x79\x01\x58\x50\x01\x06\xE1\xE0" - "\x2C\x83\x79\xD3\xDE\x46\x97\x1A" - "\x44\xCC\x90\xBF\x00\x94\x94\x92" - "\x20\x17\x0C\x1B\x55\xDE\x7E\x68" - "\xF4\x95\x5D\x4F", - .ilen = 68, - }, { - .key = "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA" - "\x90\x6A\xC7\x3C\x36\x13\xA6\x34" - "\x22\x43\x3C", - .klen = 19, - .iv = "\x48\x55\xEC\x7D\x3A\x23\x4B\xFD", - .result = "\x08\x00\xC6\xCD\x02\x00\x07\x00" - "\x61\x62\x63\x64\x65\x66\x67\x68" - "\x69\x6A\x6B\x6C\x6D\x6E\x6F\x70" - "\x71\x72\x73\x74\x01\x02\x02\x01", - .rlen = 32, - .assoc = "\x00\x00\x43\x21\x87\x65\x43\x21" - "\x00\x00\x00\x07\x48\x55\xEC\x7D" - "\x3A\x23\x4B\xFD", - .alen = 20, - .input = "\x67\xE9\x28\xB3\x1C\xA4\x6D\x02" - "\xF0\xB5\x37\xB6\x6B\x2F\xF5\x4F" - "\xF8\xA3\x4C\x53\xB8\x12\x09\xBF" - "\x58\x7D\xCF\x29\xA3\x41\x68\x6B" - "\xCE\xE8\x79\x85\x3C\xB0\x3A\x8F" - "\x16\xB0\xA1\x26\xC9\xBC\xBC\xA6", - .ilen = 48, - } -}; - -/* - * ChaCha20-Poly1305 AEAD test vectors from RFC7539 2.8.2./A.5. - */ -static const struct aead_testvec rfc7539_enc_tv_template[] = { - { - .key = "\x80\x81\x82\x83\x84\x85\x86\x87" - "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" - "\x90\x91\x92\x93\x94\x95\x96\x97" - "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f", - .klen = 32, - .iv = "\x07\x00\x00\x00\x40\x41\x42\x43" - "\x44\x45\x46\x47", - .assoc = "\x50\x51\x52\x53\xc0\xc1\xc2\xc3" - "\xc4\xc5\xc6\xc7", - .alen = 12, - .input = "\x4c\x61\x64\x69\x65\x73\x20\x61" - "\x6e\x64\x20\x47\x65\x6e\x74\x6c" - "\x65\x6d\x65\x6e\x20\x6f\x66\x20" - "\x74\x68\x65\x20\x63\x6c\x61\x73" - "\x73\x20\x6f\x66\x20\x27\x39\x39" - "\x3a\x20\x49\x66\x20\x49\x20\x63" - "\x6f\x75\x6c\x64\x20\x6f\x66\x66" - "\x65\x72\x20\x79\x6f\x75\x20\x6f" - "\x6e\x6c\x79\x20\x6f\x6e\x65\x20" - "\x74\x69\x70\x20\x66\x6f\x72\x20" - "\x74\x68\x65\x20\x66\x75\x74\x75" - "\x72\x65\x2c\x20\x73\x75\x6e\x73" - "\x63\x72\x65\x65\x6e\x20\x77\x6f" - "\x75\x6c\x64\x20\x62\x65\x20\x69" - "\x74\x2e", - .ilen = 114, - .result = "\xd3\x1a\x8d\x34\x64\x8e\x60\xdb" - "\x7b\x86\xaf\xbc\x53\xef\x7e\xc2" - "\xa4\xad\xed\x51\x29\x6e\x08\xfe" - "\xa9\xe2\xb5\xa7\x36\xee\x62\xd6" - "\x3d\xbe\xa4\x5e\x8c\xa9\x67\x12" - "\x82\xfa\xfb\x69\xda\x92\x72\x8b" - "\x1a\x71\xde\x0a\x9e\x06\x0b\x29" - "\x05\xd6\xa5\xb6\x7e\xcd\x3b\x36" - "\x92\xdd\xbd\x7f\x2d\x77\x8b\x8c" - "\x98\x03\xae\xe3\x28\x09\x1b\x58" - "\xfa\xb3\x24\xe4\xfa\xd6\x75\x94" - "\x55\x85\x80\x8b\x48\x31\xd7\xbc" - "\x3f\xf4\xde\xf0\x8e\x4b\x7a\x9d" - "\xe5\x76\xd2\x65\x86\xce\xc6\x4b" - "\x61\x16\x1a\xe1\x0b\x59\x4f\x09" - "\xe2\x6a\x7e\x90\x2e\xcb\xd0\x60" - "\x06\x91", - .rlen = 130, - }, { - .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a" - "\xf3\x33\x88\x86\x04\xf6\xb5\xf0" - "\x47\x39\x17\xc1\x40\x2b\x80\x09" - "\x9d\xca\x5c\xbc\x20\x70\x75\xc0", - .klen = 32, - .iv = "\x00\x00\x00\x00\x01\x02\x03\x04" - "\x05\x06\x07\x08", - .assoc = "\xf3\x33\x88\x86\x00\x00\x00\x00" - "\x00\x00\x4e\x91", - .alen = 12, - .input = "\x49\x6e\x74\x65\x72\x6e\x65\x74" - "\x2d\x44\x72\x61\x66\x74\x73\x20" - "\x61\x72\x65\x20\x64\x72\x61\x66" - "\x74\x20\x64\x6f\x63\x75\x6d\x65" - "\x6e\x74\x73\x20\x76\x61\x6c\x69" - "\x64\x20\x66\x6f\x72\x20\x61\x20" - "\x6d\x61\x78\x69\x6d\x75\x6d\x20" - "\x6f\x66\x20\x73\x69\x78\x20\x6d" - "\x6f\x6e\x74\x68\x73\x20\x61\x6e" - "\x64\x20\x6d\x61\x79\x20\x62\x65" - "\x20\x75\x70\x64\x61\x74\x65\x64" - "\x2c\x20\x72\x65\x70\x6c\x61\x63" - "\x65\x64\x2c\x20\x6f\x72\x20\x6f" - "\x62\x73\x6f\x6c\x65\x74\x65\x64" - "\x20\x62\x79\x20\x6f\x74\x68\x65" - "\x72\x20\x64\x6f\x63\x75\x6d\x65" - "\x6e\x74\x73\x20\x61\x74\x20\x61" - "\x6e\x79\x20\x74\x69\x6d\x65\x2e" - "\x20\x49\x74\x20\x69\x73\x20\x69" - "\x6e\x61\x70\x70\x72\x6f\x70\x72" - "\x69\x61\x74\x65\x20\x74\x6f\x20" - "\x75\x73\x65\x20\x49\x6e\x74\x65" - "\x72\x6e\x65\x74\x2d\x44\x72\x61" - "\x66\x74\x73\x20\x61\x73\x20\x72" - "\x65\x66\x65\x72\x65\x6e\x63\x65" - "\x20\x6d\x61\x74\x65\x72\x69\x61" - "\x6c\x20\x6f\x72\x20\x74\x6f\x20" - "\x63\x69\x74\x65\x20\x74\x68\x65" - "\x6d\x20\x6f\x74\x68\x65\x72\x20" - "\x74\x68\x61\x6e\x20\x61\x73\x20" - "\x2f\xe2\x80\x9c\x77\x6f\x72\x6b" - "\x20\x69\x6e\x20\x70\x72\x6f\x67" - "\x72\x65\x73\x73\x2e\x2f\xe2\x80" - "\x9d", - .ilen = 265, - .result = "\x64\xa0\x86\x15\x75\x86\x1a\xf4" - "\x60\xf0\x62\xc7\x9b\xe6\x43\xbd" - "\x5e\x80\x5c\xfd\x34\x5c\xf3\x89" - "\xf1\x08\x67\x0a\xc7\x6c\x8c\xb2" - "\x4c\x6c\xfc\x18\x75\x5d\x43\xee" - "\xa0\x9e\xe9\x4e\x38\x2d\x26\xb0" - "\xbd\xb7\xb7\x3c\x32\x1b\x01\x00" - "\xd4\xf0\x3b\x7f\x35\x58\x94\xcf" - "\x33\x2f\x83\x0e\x71\x0b\x97\xce" - "\x98\xc8\xa8\x4a\xbd\x0b\x94\x81" - "\x14\xad\x17\x6e\x00\x8d\x33\xbd" - "\x60\xf9\x82\xb1\xff\x37\xc8\x55" - "\x97\x97\xa0\x6e\xf4\xf0\xef\x61" - "\xc1\x86\x32\x4e\x2b\x35\x06\x38" - "\x36\x06\x90\x7b\x6a\x7c\x02\xb0" - "\xf9\xf6\x15\x7b\x53\xc8\x67\xe4" - "\xb9\x16\x6c\x76\x7b\x80\x4d\x46" - "\xa5\x9b\x52\x16\xcd\xe7\xa4\xe9" - "\x90\x40\xc5\xa4\x04\x33\x22\x5e" - "\xe2\x82\xa1\xb0\xa0\x6c\x52\x3e" - "\xaf\x45\x34\xd7\xf8\x3f\xa1\x15" - "\x5b\x00\x47\x71\x8c\xbc\x54\x6a" - "\x0d\x07\x2b\x04\xb3\x56\x4e\xea" - "\x1b\x42\x22\x73\xf5\x48\x27\x1a" - "\x0b\xb2\x31\x60\x53\xfa\x76\x99" - "\x19\x55\xeb\xd6\x31\x59\x43\x4e" - "\xce\xbb\x4e\x46\x6d\xae\x5a\x10" - "\x73\xa6\x72\x76\x27\x09\x7a\x10" - "\x49\xe6\x17\xd9\x1d\x36\x10\x94" - "\xfa\x68\xf0\xff\x77\x98\x71\x30" - "\x30\x5b\xea\xba\x2e\xda\x04\xdf" - "\x99\x7b\x71\x4d\x6c\x6f\x2c\x29" - "\xa6\xad\x5c\xb4\x02\x2b\x02\x70" - "\x9b\xee\xad\x9d\x67\x89\x0c\xbb" - "\x22\x39\x23\x36\xfe\xa1\x85\x1f" - "\x38", - .rlen = 281, - }, -}; - -static const struct aead_testvec rfc7539_dec_tv_template[] = { - { - .key = "\x80\x81\x82\x83\x84\x85\x86\x87" - "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" - "\x90\x91\x92\x93\x94\x95\x96\x97" - "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f", - .klen = 32, - .iv = "\x07\x00\x00\x00\x40\x41\x42\x43" - "\x44\x45\x46\x47", - .assoc = "\x50\x51\x52\x53\xc0\xc1\xc2\xc3" - "\xc4\xc5\xc6\xc7", - .alen = 12, - .input = "\xd3\x1a\x8d\x34\x64\x8e\x60\xdb" - "\x7b\x86\xaf\xbc\x53\xef\x7e\xc2" - "\xa4\xad\xed\x51\x29\x6e\x08\xfe" - "\xa9\xe2\xb5\xa7\x36\xee\x62\xd6" - "\x3d\xbe\xa4\x5e\x8c\xa9\x67\x12" - "\x82\xfa\xfb\x69\xda\x92\x72\x8b" - "\x1a\x71\xde\x0a\x9e\x06\x0b\x29" - "\x05\xd6\xa5\xb6\x7e\xcd\x3b\x36" - "\x92\xdd\xbd\x7f\x2d\x77\x8b\x8c" - "\x98\x03\xae\xe3\x28\x09\x1b\x58" - "\xfa\xb3\x24\xe4\xfa\xd6\x75\x94" - "\x55\x85\x80\x8b\x48\x31\xd7\xbc" - "\x3f\xf4\xde\xf0\x8e\x4b\x7a\x9d" - "\xe5\x76\xd2\x65\x86\xce\xc6\x4b" - "\x61\x16\x1a\xe1\x0b\x59\x4f\x09" - "\xe2\x6a\x7e\x90\x2e\xcb\xd0\x60" - "\x06\x91", - .ilen = 130, - .result = "\x4c\x61\x64\x69\x65\x73\x20\x61" - "\x6e\x64\x20\x47\x65\x6e\x74\x6c" - "\x65\x6d\x65\x6e\x20\x6f\x66\x20" - "\x74\x68\x65\x20\x63\x6c\x61\x73" - "\x73\x20\x6f\x66\x20\x27\x39\x39" - "\x3a\x20\x49\x66\x20\x49\x20\x63" - "\x6f\x75\x6c\x64\x20\x6f\x66\x66" - "\x65\x72\x20\x79\x6f\x75\x20\x6f" - "\x6e\x6c\x79\x20\x6f\x6e\x65\x20" - "\x74\x69\x70\x20\x66\x6f\x72\x20" - "\x74\x68\x65\x20\x66\x75\x74\x75" - "\x72\x65\x2c\x20\x73\x75\x6e\x73" - "\x63\x72\x65\x65\x6e\x20\x77\x6f" - "\x75\x6c\x64\x20\x62\x65\x20\x69" - "\x74\x2e", - .rlen = 114, - }, { - .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a" - "\xf3\x33\x88\x86\x04\xf6\xb5\xf0" - "\x47\x39\x17\xc1\x40\x2b\x80\x09" - "\x9d\xca\x5c\xbc\x20\x70\x75\xc0", - .klen = 32, - .iv = "\x00\x00\x00\x00\x01\x02\x03\x04" - "\x05\x06\x07\x08", - .assoc = "\xf3\x33\x88\x86\x00\x00\x00\x00" - "\x00\x00\x4e\x91", - .alen = 12, - .input = "\x64\xa0\x86\x15\x75\x86\x1a\xf4" - "\x60\xf0\x62\xc7\x9b\xe6\x43\xbd" - "\x5e\x80\x5c\xfd\x34\x5c\xf3\x89" - "\xf1\x08\x67\x0a\xc7\x6c\x8c\xb2" - "\x4c\x6c\xfc\x18\x75\x5d\x43\xee" - "\xa0\x9e\xe9\x4e\x38\x2d\x26\xb0" - "\xbd\xb7\xb7\x3c\x32\x1b\x01\x00" - "\xd4\xf0\x3b\x7f\x35\x58\x94\xcf" - "\x33\x2f\x83\x0e\x71\x0b\x97\xce" - "\x98\xc8\xa8\x4a\xbd\x0b\x94\x81" - "\x14\xad\x17\x6e\x00\x8d\x33\xbd" - "\x60\xf9\x82\xb1\xff\x37\xc8\x55" - "\x97\x97\xa0\x6e\xf4\xf0\xef\x61" - "\xc1\x86\x32\x4e\x2b\x35\x06\x38" - "\x36\x06\x90\x7b\x6a\x7c\x02\xb0" - "\xf9\xf6\x15\x7b\x53\xc8\x67\xe4" - "\xb9\x16\x6c\x76\x7b\x80\x4d\x46" - "\xa5\x9b\x52\x16\xcd\xe7\xa4\xe9" - "\x90\x40\xc5\xa4\x04\x33\x22\x5e" - "\xe2\x82\xa1\xb0\xa0\x6c\x52\x3e" - "\xaf\x45\x34\xd7\xf8\x3f\xa1\x15" - "\x5b\x00\x47\x71\x8c\xbc\x54\x6a" - "\x0d\x07\x2b\x04\xb3\x56\x4e\xea" - "\x1b\x42\x22\x73\xf5\x48\x27\x1a" - "\x0b\xb2\x31\x60\x53\xfa\x76\x99" - "\x19\x55\xeb\xd6\x31\x59\x43\x4e" - "\xce\xbb\x4e\x46\x6d\xae\x5a\x10" - "\x73\xa6\x72\x76\x27\x09\x7a\x10" - "\x49\xe6\x17\xd9\x1d\x36\x10\x94" - "\xfa\x68\xf0\xff\x77\x98\x71\x30" - "\x30\x5b\xea\xba\x2e\xda\x04\xdf" - "\x99\x7b\x71\x4d\x6c\x6f\x2c\x29" - "\xa6\xad\x5c\xb4\x02\x2b\x02\x70" - "\x9b\xee\xad\x9d\x67\x89\x0c\xbb" - "\x22\x39\x23\x36\xfe\xa1\x85\x1f" - "\x38", - .ilen = 281, - .result = "\x49\x6e\x74\x65\x72\x6e\x65\x74" - "\x2d\x44\x72\x61\x66\x74\x73\x20" - "\x61\x72\x65\x20\x64\x72\x61\x66" - "\x74\x20\x64\x6f\x63\x75\x6d\x65" - "\x6e\x74\x73\x20\x76\x61\x6c\x69" - "\x64\x20\x66\x6f\x72\x20\x61\x20" - "\x6d\x61\x78\x69\x6d\x75\x6d\x20" - "\x6f\x66\x20\x73\x69\x78\x20\x6d" - "\x6f\x6e\x74\x68\x73\x20\x61\x6e" - "\x64\x20\x6d\x61\x79\x20\x62\x65" - "\x20\x75\x70\x64\x61\x74\x65\x64" - "\x2c\x20\x72\x65\x70\x6c\x61\x63" - "\x65\x64\x2c\x20\x6f\x72\x20\x6f" - "\x62\x73\x6f\x6c\x65\x74\x65\x64" - "\x20\x62\x79\x20\x6f\x74\x68\x65" - "\x72\x20\x64\x6f\x63\x75\x6d\x65" - "\x6e\x74\x73\x20\x61\x74\x20\x61" - "\x6e\x79\x20\x74\x69\x6d\x65\x2e" - "\x20\x49\x74\x20\x69\x73\x20\x69" - "\x6e\x61\x70\x70\x72\x6f\x70\x72" - "\x69\x61\x74\x65\x20\x74\x6f\x20" - "\x75\x73\x65\x20\x49\x6e\x74\x65" - "\x72\x6e\x65\x74\x2d\x44\x72\x61" - "\x66\x74\x73\x20\x61\x73\x20\x72" - "\x65\x66\x65\x72\x65\x6e\x63\x65" - "\x20\x6d\x61\x74\x65\x72\x69\x61" - "\x6c\x20\x6f\x72\x20\x74\x6f\x20" - "\x63\x69\x74\x65\x20\x74\x68\x65" - "\x6d\x20\x6f\x74\x68\x65\x72\x20" - "\x74\x68\x61\x6e\x20\x61\x73\x20" - "\x2f\xe2\x80\x9c\x77\x6f\x72\x6b" - "\x20\x69\x6e\x20\x70\x72\x6f\x67" - "\x72\x65\x73\x73\x2e\x2f\xe2\x80" - "\x9d", - .rlen = 265, - }, -}; - -/* - * draft-irtf-cfrg-chacha20-poly1305 - */ -static const struct aead_testvec rfc7539esp_enc_tv_template[] = { - { - .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a" - "\xf3\x33\x88\x86\x04\xf6\xb5\xf0" - "\x47\x39\x17\xc1\x40\x2b\x80\x09" - "\x9d\xca\x5c\xbc\x20\x70\x75\xc0" - "\x00\x00\x00\x00", - .klen = 36, - .iv = "\x01\x02\x03\x04\x05\x06\x07\x08", - .assoc = "\xf3\x33\x88\x86\x00\x00\x00\x00" - "\x00\x00\x4e\x91\x01\x02\x03\x04" - "\x05\x06\x07\x08", - .alen = 20, - .input = "\x49\x6e\x74\x65\x72\x6e\x65\x74" - "\x2d\x44\x72\x61\x66\x74\x73\x20" - "\x61\x72\x65\x20\x64\x72\x61\x66" - "\x74\x20\x64\x6f\x63\x75\x6d\x65" - "\x6e\x74\x73\x20\x76\x61\x6c\x69" - "\x64\x20\x66\x6f\x72\x20\x61\x20" - "\x6d\x61\x78\x69\x6d\x75\x6d\x20" - "\x6f\x66\x20\x73\x69\x78\x20\x6d" - "\x6f\x6e\x74\x68\x73\x20\x61\x6e" - "\x64\x20\x6d\x61\x79\x20\x62\x65" - "\x20\x75\x70\x64\x61\x74\x65\x64" - "\x2c\x20\x72\x65\x70\x6c\x61\x63" - "\x65\x64\x2c\x20\x6f\x72\x20\x6f" - "\x62\x73\x6f\x6c\x65\x74\x65\x64" - "\x20\x62\x79\x20\x6f\x74\x68\x65" - "\x72\x20\x64\x6f\x63\x75\x6d\x65" - "\x6e\x74\x73\x20\x61\x74\x20\x61" - "\x6e\x79\x20\x74\x69\x6d\x65\x2e" - "\x20\x49\x74\x20\x69\x73\x20\x69" - "\x6e\x61\x70\x70\x72\x6f\x70\x72" - "\x69\x61\x74\x65\x20\x74\x6f\x20" - "\x75\x73\x65\x20\x49\x6e\x74\x65" - "\x72\x6e\x65\x74\x2d\x44\x72\x61" - "\x66\x74\x73\x20\x61\x73\x20\x72" - "\x65\x66\x65\x72\x65\x6e\x63\x65" - "\x20\x6d\x61\x74\x65\x72\x69\x61" - "\x6c\x20\x6f\x72\x20\x74\x6f\x20" - "\x63\x69\x74\x65\x20\x74\x68\x65" - "\x6d\x20\x6f\x74\x68\x65\x72\x20" - "\x74\x68\x61\x6e\x20\x61\x73\x20" - "\x2f\xe2\x80\x9c\x77\x6f\x72\x6b" - "\x20\x69\x6e\x20\x70\x72\x6f\x67" - "\x72\x65\x73\x73\x2e\x2f\xe2\x80" - "\x9d", - .ilen = 265, - .result = "\x64\xa0\x86\x15\x75\x86\x1a\xf4" - "\x60\xf0\x62\xc7\x9b\xe6\x43\xbd" - "\x5e\x80\x5c\xfd\x34\x5c\xf3\x89" - "\xf1\x08\x67\x0a\xc7\x6c\x8c\xb2" - "\x4c\x6c\xfc\x18\x75\x5d\x43\xee" - "\xa0\x9e\xe9\x4e\x38\x2d\x26\xb0" - "\xbd\xb7\xb7\x3c\x32\x1b\x01\x00" - "\xd4\xf0\x3b\x7f\x35\x58\x94\xcf" - "\x33\x2f\x83\x0e\x71\x0b\x97\xce" - "\x98\xc8\xa8\x4a\xbd\x0b\x94\x81" - "\x14\xad\x17\x6e\x00\x8d\x33\xbd" - "\x60\xf9\x82\xb1\xff\x37\xc8\x55" - "\x97\x97\xa0\x6e\xf4\xf0\xef\x61" - "\xc1\x86\x32\x4e\x2b\x35\x06\x38" - "\x36\x06\x90\x7b\x6a\x7c\x02\xb0" - "\xf9\xf6\x15\x7b\x53\xc8\x67\xe4" - "\xb9\x16\x6c\x76\x7b\x80\x4d\x46" - "\xa5\x9b\x52\x16\xcd\xe7\xa4\xe9" - "\x90\x40\xc5\xa4\x04\x33\x22\x5e" - "\xe2\x82\xa1\xb0\xa0\x6c\x52\x3e" - "\xaf\x45\x34\xd7\xf8\x3f\xa1\x15" - "\x5b\x00\x47\x71\x8c\xbc\x54\x6a" - "\x0d\x07\x2b\x04\xb3\x56\x4e\xea" - "\x1b\x42\x22\x73\xf5\x48\x27\x1a" - "\x0b\xb2\x31\x60\x53\xfa\x76\x99" - "\x19\x55\xeb\xd6\x31\x59\x43\x4e" - "\xce\xbb\x4e\x46\x6d\xae\x5a\x10" - "\x73\xa6\x72\x76\x27\x09\x7a\x10" - "\x49\xe6\x17\xd9\x1d\x36\x10\x94" - "\xfa\x68\xf0\xff\x77\x98\x71\x30" - "\x30\x5b\xea\xba\x2e\xda\x04\xdf" - "\x99\x7b\x71\x4d\x6c\x6f\x2c\x29" - "\xa6\xad\x5c\xb4\x02\x2b\x02\x70" - "\x9b\xee\xad\x9d\x67\x89\x0c\xbb" - "\x22\x39\x23\x36\xfe\xa1\x85\x1f" - "\x38", - .rlen = 281, - }, -}; - -static const struct aead_testvec rfc7539esp_dec_tv_template[] = { - { - .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a" - "\xf3\x33\x88\x86\x04\xf6\xb5\xf0" - "\x47\x39\x17\xc1\x40\x2b\x80\x09" - "\x9d\xca\x5c\xbc\x20\x70\x75\xc0" - "\x00\x00\x00\x00", - .klen = 36, - .iv = "\x01\x02\x03\x04\x05\x06\x07\x08", - .assoc = "\xf3\x33\x88\x86\x00\x00\x00\x00" - "\x00\x00\x4e\x91\x01\x02\x03\x04" - "\x05\x06\x07\x08", - .alen = 20, - .input = "\x64\xa0\x86\x15\x75\x86\x1a\xf4" - "\x60\xf0\x62\xc7\x9b\xe6\x43\xbd" - "\x5e\x80\x5c\xfd\x34\x5c\xf3\x89" - "\xf1\x08\x67\x0a\xc7\x6c\x8c\xb2" - "\x4c\x6c\xfc\x18\x75\x5d\x43\xee" - "\xa0\x9e\xe9\x4e\x38\x2d\x26\xb0" - "\xbd\xb7\xb7\x3c\x32\x1b\x01\x00" - "\xd4\xf0\x3b\x7f\x35\x58\x94\xcf" - "\x33\x2f\x83\x0e\x71\x0b\x97\xce" - "\x98\xc8\xa8\x4a\xbd\x0b\x94\x81" - "\x14\xad\x17\x6e\x00\x8d\x33\xbd" - "\x60\xf9\x82\xb1\xff\x37\xc8\x55" - "\x97\x97\xa0\x6e\xf4\xf0\xef\x61" - "\xc1\x86\x32\x4e\x2b\x35\x06\x38" - "\x36\x06\x90\x7b\x6a\x7c\x02\xb0" - "\xf9\xf6\x15\x7b\x53\xc8\x67\xe4" - "\xb9\x16\x6c\x76\x7b\x80\x4d\x46" - "\xa5\x9b\x52\x16\xcd\xe7\xa4\xe9" - "\x90\x40\xc5\xa4\x04\x33\x22\x5e" - "\xe2\x82\xa1\xb0\xa0\x6c\x52\x3e" - "\xaf\x45\x34\xd7\xf8\x3f\xa1\x15" - "\x5b\x00\x47\x71\x8c\xbc\x54\x6a" - "\x0d\x07\x2b\x04\xb3\x56\x4e\xea" - "\x1b\x42\x22\x73\xf5\x48\x27\x1a" - "\x0b\xb2\x31\x60\x53\xfa\x76\x99" - "\x19\x55\xeb\xd6\x31\x59\x43\x4e" - "\xce\xbb\x4e\x46\x6d\xae\x5a\x10" - "\x73\xa6\x72\x76\x27\x09\x7a\x10" - "\x49\xe6\x17\xd9\x1d\x36\x10\x94" - "\xfa\x68\xf0\xff\x77\x98\x71\x30" - "\x30\x5b\xea\xba\x2e\xda\x04\xdf" - "\x99\x7b\x71\x4d\x6c\x6f\x2c\x29" - "\xa6\xad\x5c\xb4\x02\x2b\x02\x70" - "\x9b\xee\xad\x9d\x67\x89\x0c\xbb" - "\x22\x39\x23\x36\xfe\xa1\x85\x1f" - "\x38", - .ilen = 281, - .result = "\x49\x6e\x74\x65\x72\x6e\x65\x74" - "\x2d\x44\x72\x61\x66\x74\x73\x20" - "\x61\x72\x65\x20\x64\x72\x61\x66" - "\x74\x20\x64\x6f\x63\x75\x6d\x65" - "\x6e\x74\x73\x20\x76\x61\x6c\x69" - "\x64\x20\x66\x6f\x72\x20\x61\x20" - "\x6d\x61\x78\x69\x6d\x75\x6d\x20" - "\x6f\x66\x20\x73\x69\x78\x20\x6d" - "\x6f\x6e\x74\x68\x73\x20\x61\x6e" - "\x64\x20\x6d\x61\x79\x20\x62\x65" - "\x20\x75\x70\x64\x61\x74\x65\x64" - "\x2c\x20\x72\x65\x70\x6c\x61\x63" - "\x65\x64\x2c\x20\x6f\x72\x20\x6f" - "\x62\x73\x6f\x6c\x65\x74\x65\x64" - "\x20\x62\x79\x20\x6f\x74\x68\x65" - "\x72\x20\x64\x6f\x63\x75\x6d\x65" - "\x6e\x74\x73\x20\x61\x74\x20\x61" - "\x6e\x79\x20\x74\x69\x6d\x65\x2e" - "\x20\x49\x74\x20\x69\x73\x20\x69" - "\x6e\x61\x70\x70\x72\x6f\x70\x72" - "\x69\x61\x74\x65\x20\x74\x6f\x20" - "\x75\x73\x65\x20\x49\x6e\x74\x65" - "\x72\x6e\x65\x74\x2d\x44\x72\x61" - "\x66\x74\x73\x20\x61\x73\x20\x72" - "\x65\x66\x65\x72\x65\x6e\x63\x65" - "\x20\x6d\x61\x74\x65\x72\x69\x61" - "\x6c\x20\x6f\x72\x20\x74\x6f\x20" - "\x63\x69\x74\x65\x20\x74\x68\x65" - "\x6d\x20\x6f\x74\x68\x65\x72\x20" - "\x74\x68\x61\x6e\x20\x61\x73\x20" - "\x2f\xe2\x80\x9c\x77\x6f\x72\x6b" - "\x20\x69\x6e\x20\x70\x72\x6f\x67" - "\x72\x65\x73\x73\x2e\x2f\xe2\x80" - "\x9d", - .rlen = 265, - }, -}; - -static const struct aead_testvec aegis128_enc_tv_template[] = { - { - .key = "\x0f\xc9\x8e\x67\x44\x9e\xaa\x86" - "\x20\x36\x2c\x24\xfe\xc9\x30\x81", - .klen = 16, - .iv = "\x1e\x92\x1c\xcf\x88\x3d\x54\x0d" - "\x40\x6d\x59\x48\xfc\x92\x61\x03", - .assoc = "", - .alen = 0, - .input = "", - .ilen = 0, - .result = "\x07\xa5\x11\xf2\x9d\x40\xb8\x6d" - "\xda\xb8\x12\x34\x4c\x53\xd9\x72", - .rlen = 16, - }, { - .key = "\x4b\xed\xc8\x07\x54\x1a\x52\xa2" - "\xa1\x10\xde\xb5\xf8\xed\xf3\x87", - .klen = 16, - .iv = "\x5a\xb7\x56\x6e\x98\xb9\xfd\x29" - "\xc1\x47\x0b\xda\xf6\xb6\x23\x09", - .assoc = "", - .alen = 0, - .input = "\x79", - .ilen = 1, - .result = "\x9e\x78\x52\xae\xcb\x9e\xe4\xd3" - "\x9a\xd7\x5d\xd7\xaa\x9a\xe9\x5a" - "\xcc", - .rlen = 17, - }, { - .key = "\x88\x12\x01\xa6\x64\x96\xfb\xbe" - "\x22\xea\x90\x47\xf2\x11\xb5\x8e", - .klen = 16, - .iv = "\x97\xdb\x90\x0e\xa8\x35\xa5\x45" - "\x42\x21\xbd\x6b\xf0\xda\xe6\x0f", - .assoc = "", - .alen = 0, - .input = "\xb5\x6e\xad\xdd\x30\x72\xfa\x53" - "\x82\x8e\x16\xb4\xed\x6d\x47", - .ilen = 15, - .result = "\xc3\x80\x83\x04\x5f\xaa\x61\xc7" - "\xca\xdd\x6f\xac\x85\x08\xb5\x35" - "\x2b\xc2\x3e\x0b\x1b\x39\x37\x2b" - "\x7a\x21\x16\xb3\xe6\x67\x66", - .rlen = 31, - }, { - .key = "\xc4\x37\x3b\x45\x74\x11\xa4\xda" - "\xa2\xc5\x42\xd8\xec\x36\x78\x94", - .klen = 16, - .iv = "\xd3\x00\xc9\xad\xb8\xb0\x4e\x61" - "\xc3\xfb\x6f\xfd\xea\xff\xa9\x15", - .assoc = "", - .alen = 0, - .input = "\xf2\x92\xe6\x7d\x40\xee\xa3\x6f" - "\x03\x68\xc8\x45\xe7\x91\x0a\x18", - .ilen = 16, - .result = "\x23\x25\x30\xe5\x6a\xb6\x36\x7d" - "\x38\xfd\x3a\xd2\xc2\x58\xa9\x11" - "\x1e\xa8\x30\x9c\x16\xa4\xdb\x65" - "\x51\x10\x16\x27\x70\x9b\x64\x29", - .rlen = 32, - }, { - .key = "\x01\x5c\x75\xe5\x84\x8d\x4d\xf6" - "\x23\x9f\xf4\x6a\xe6\x5a\x3b\x9a", - .klen = 16, - .iv = "\x10\x25\x03\x4c\xc8\x2c\xf7\x7d" - "\x44\xd5\x21\x8e\xe4\x23\x6b\x1c", - .assoc = "", - .alen = 0, - .input = "\x2e\xb7\x20\x1c\x50\x6a\x4b\x8b" - "\x84\x42\x7a\xd7\xe1\xb5\xcd\x1f" - "\xd3", - .ilen = 17, - .result = "\x2a\x8d\x56\x91\xc6\xf3\x56\xa5" - "\x1f\xf0\x89\x2e\x13\xad\xe6\xf6" - "\x46\x80\xb1\x0e\x18\x30\x40\x97" - "\x03\xdf\x64\x3c\xbe\x93\x9e\xc9" - "\x3b", - .rlen = 33, - }, { - .key = "\x3d\x80\xae\x84\x94\x09\xf6\x12" - "\xa4\x79\xa6\xfb\xe0\x7f\xfd\xa0", - .klen = 16, - .iv = "\x4c\x49\x3d\xec\xd8\xa8\xa0\x98" - "\xc5\xb0\xd3\x1f\xde\x48\x2e\x22", - .assoc = "", - .alen = 0, - .input = "\x6b\xdc\x5a\xbb\x60\xe5\xf4\xa6" - "\x05\x1d\x2c\x68\xdb\xda\x8f\x25" - "\xfe\x8d\x45\x19\x1e\xc0\x0b\x99" - "\x88\x11\x39\x12\x1c\x3a\xbb", - .ilen = 31, - .result = "\x4e\xf6\xfa\x13\xde\x43\x63\x4c" - "\xe2\x04\x3e\xe4\x85\x14\xb6\x3f" - "\xb1\x8f\x4c\xdb\x41\xa2\x14\x99" - "\xf5\x53\x0f\x73\x86\x7e\x97\xa1" - "\x4b\x56\x5b\x94\xce\xcd\x74\xcd" - "\x75\xc4\x53\x01\x89\x45\x59", - .rlen = 47, - }, { - .key = "\x7a\xa5\xe8\x23\xa4\x84\x9e\x2d" - "\x25\x53\x58\x8c\xda\xa3\xc0\xa6", - .klen = 16, - .iv = "\x89\x6e\x77\x8b\xe8\x23\x49\xb4" - "\x45\x8a\x85\xb1\xd8\x6c\xf1\x28", - .assoc = "", - .alen = 0, - .input = "\xa7\x00\x93\x5b\x70\x61\x9d\xc2" - "\x86\xf7\xde\xfa\xd5\xfe\x52\x2b" - "\x28\x50\x51\x9d\x24\x60\x8d\xb3" - "\x49\x3e\x17\xea\xf6\x99\x5a\xdd", - .ilen = 32, - .result = "\xa4\x9a\xb7\xfd\xa0\xd4\xd6\x47" - "\x95\xf4\x58\x38\x14\x83\x27\x01" - "\x4c\xed\x32\x2c\xf7\xd6\x31\xf7" - "\x38\x1b\x2c\xc9\xb6\x31\xce\xaa" - "\xa5\x3c\x1a\x18\x5c\xce\xb9\xdf" - "\x51\x52\x77\xf2\x5e\x85\x80\x41", - .rlen = 48, - }, { - .key = "\xb6\xca\x22\xc3\xb4\x00\x47\x49" - "\xa6\x2d\x0a\x1e\xd4\xc7\x83\xad", - .klen = 16, - .iv = "\xc5\x93\xb0\x2a\xf8\x9f\xf1\xd0" - "\xc6\x64\x37\x42\xd2\x90\xb3\x2e", - .assoc = "\xd5", - .alen = 1, - .input = "", - .ilen = 0, - .result = "\xfb\xd4\x83\x71\x9e\x63\xad\x60" - "\xb9\xf9\xeb\x34\x52\x49\xcf\xb7", - .rlen = 16, - }, { - .key = "\xf3\xee\x5c\x62\xc4\x7c\xf0\x65" - "\x27\x08\xbd\xaf\xce\xec\x45\xb3", - .klen = 16, - .iv = "\x02\xb8\xea\xca\x09\x1b\x9a\xec" - "\x47\x3e\xe9\xd4\xcc\xb5\x76\x34", - .assoc = "\x11\x81\x78\x32\x4d\xb9\x44\x73" - "\x68\x75\x16\xf8\xcb\x7e\xa7", - .alen = 15, - .input = "", - .ilen = 0, - .result = "\x0c\xaf\x2e\x96\xf6\x97\x08\x71" - "\x7d\x3a\x84\xc4\x44\x57\x77\x7e", - .rlen = 16, - }, { - .key = "\x2f\x13\x95\x01\xd5\xf7\x99\x81" - "\xa8\xe2\x6f\x41\xc8\x10\x08\xb9", - .klen = 16, - .iv = "\x3f\xdc\x24\x69\x19\x96\x43\x08" - "\xc8\x18\x9b\x65\xc6\xd9\x39\x3b", - .assoc = "\x4e\xa5\xb2\xd1\x5d\x35\xed\x8f" - "\xe8\x4f\xc8\x89\xc5\xa2\x69\xbc", - .alen = 16, - .input = "", - .ilen = 0, - .result = "\xc7\x87\x09\x3b\xc7\x19\x74\x22" - "\x22\xa5\x67\x10\xb2\x36\xb3\x45", - .rlen = 16, - }, { - .key = "\x6c\x38\xcf\xa1\xe5\x73\x41\x9d" - "\x29\xbc\x21\xd2\xc2\x35\xcb\xbf", - .klen = 16, - .iv = "\x7b\x01\x5d\x08\x29\x12\xec\x24" - "\x49\xf3\x4d\xf7\xc0\xfe\xfb\x41", - .assoc = "\x8a\xca\xec\x70\x6d\xb1\x96\xab" - "\x69\x29\x7a\x1b\xbf\xc7\x2c\xc2" - "\x07", - .alen = 17, - .input = "", - .ilen = 0, - .result = "\x02\xc6\x3b\x46\x65\xb2\xef\x91" - "\x31\xf0\x45\x48\x8a\x2a\xed\xe4", - .rlen = 16, - }, { - .key = "\xa8\x5c\x09\x40\xf5\xef\xea\xb8" - "\xaa\x96\xd3\x64\xbc\x59\x8d\xc6", - .klen = 16, - .iv = "\xb8\x26\x97\xa8\x39\x8e\x94\x3f" - "\xca\xcd\xff\x88\xba\x22\xbe\x47", - .assoc = "\xc7\xef\x26\x10\x7d\x2c\x3f\xc6" - "\xea\x03\x2c\xac\xb9\xeb\xef\xc9" - "\x31\x6b\x08\x12\xfc\xd8\x37\x2d" - "\xe0\x17\x3a\x2e\x83\x5c\x8f", - .alen = 31, - .input = "", - .ilen = 0, - .result = "\x20\x85\xa8\xd0\x91\x48\x85\xf3" - "\x5a\x16\xc0\x57\x68\x47\xdd\xcb", - .rlen = 16, - }, { - .key = "\xe5\x81\x42\xdf\x05\x6a\x93\xd4" - "\x2b\x70\x85\xf5\xb6\x7d\x50\xcc", - .klen = 16, - .iv = "\xf4\x4a\xd1\x47\x49\x09\x3d\x5b" - "\x4b\xa7\xb1\x19\xb4\x46\x81\x4d", - .assoc = "\x03\x14\x5f\xaf\x8d\xa8\xe7\xe2" - "\x6b\xde\xde\x3e\xb3\x10\xb1\xcf" - "\x5c\x2d\x14\x96\x01\x78\xb9\x47" - "\xa1\x44\x19\x06\x5d\xbb\x2e\x2f", - .alen = 32, - .input = "", - .ilen = 0, - .result = "\x6a\xf8\x8d\x9c\x42\x75\x35\x79" - "\xc1\x96\xbd\x31\x6e\x69\x1b\x50", - .rlen = 16, - }, { - .key = "\x22\xa6\x7c\x7f\x15\xe6\x3c\xf0" - "\xac\x4b\x37\x86\xb0\xa2\x13\xd2", - .klen = 16, - .iv = "\x31\x6f\x0b\xe6\x59\x85\xe6\x77" - "\xcc\x81\x63\xab\xae\x6b\x43\x54", - .assoc = "\x40", - .alen = 1, - .input = "\x4f", - .ilen = 1, - .result = "\x01\x24\xb1\xba\xf6\xd3\xdf\x83" - "\x70\x45\xe3\x2a\x9d\x5c\x63\x98" - "\x39", - .rlen = 17, - }, { - .key = "\x5e\xcb\xb6\x1e\x25\x62\xe4\x0c" - "\x2d\x25\xe9\x18\xaa\xc6\xd5\xd8", - .klen = 16, - .iv = "\x6d\x94\x44\x86\x69\x00\x8f\x93" - "\x4d\x5b\x15\x3c\xa8\x8f\x06\x5a", - .assoc = "\x7c\x5d\xd3\xee\xad\x9f\x39\x1a" - "\x6d\x92\x42\x61\xa7\x58\x37", - .alen = 15, - .input = "\x8b\x26\x61\x55\xf1\x3e\xe3\xa1" - "\x8d\xc8\x6e\x85\xa5\x21\x67", - .ilen = 15, - .result = "\x18\x78\xc2\x6e\xe1\xf7\xe6\x8a" - "\xca\x0e\x62\x00\xa8\x21\xb5\x21" - "\x3d\x36\xdb\xf7\xcc\x31\x94\x9c" - "\x98\xbd\x71\x7a\xef\xa4\xfa", - .rlen = 31, - }, { - .key = "\x9b\xef\xf0\xbd\x35\xdd\x8d\x28" - "\xad\xff\x9b\xa9\xa4\xeb\x98\xdf", - .klen = 16, - .iv = "\xaa\xb8\x7e\x25\x79\x7c\x37\xaf" - "\xce\x36\xc7\xce\xa2\xb4\xc9\x60", - .assoc = "\xb9\x82\x0c\x8d\xbd\x1b\xe2\x36" - "\xee\x6c\xf4\xf2\xa1\x7d\xf9\xe2", - .alen = 16, - .input = "\xc8\x4b\x9b\xf5\x01\xba\x8c\xbd" - "\x0e\xa3\x21\x16\x9f\x46\x2a\x63", - .ilen = 16, - .result = "\xea\xd1\x81\x75\xb4\x13\x1d\x86" - "\xd4\x17\x26\xe5\xd6\x89\x39\x04" - "\xa9\x6c\xca\xac\x40\x73\xb2\x4c" - "\x9c\xb9\x0e\x79\x4c\x40\x65\xc6", - .rlen = 32, - }, { - .key = "\xd7\x14\x29\x5d\x45\x59\x36\x44" - "\x2e\xd9\x4d\x3b\x9e\x0f\x5b\xe5", - .klen = 16, - .iv = "\xe6\xdd\xb8\xc4\x89\xf8\xe0\xca" - "\x4f\x10\x7a\x5f\x9c\xd8\x8b\x66", - .assoc = "\xf5\xa6\x46\x2c\xce\x97\x8a\x51" - "\x6f\x46\xa6\x83\x9b\xa1\xbc\xe8" - "\x05", - .alen = 17, - .input = "\x05\x70\xd5\x94\x12\x36\x35\xd8" - "\x8f\x7d\xd3\xa8\x99\x6a\xed\x69" - "\xd0", - .ilen = 17, - .result = "\xf4\xb2\x84\xd1\x81\xfa\x98\x1c" - "\x38\x2d\x69\x90\x1c\x71\x38\x98" - "\x9f\xe1\x19\x3b\x63\x91\xaf\x6e" - "\x4b\x07\x2c\xac\x53\xc5\xd5\xfe" - "\x93", - .rlen = 33, - }, { - .key = "\x14\x39\x63\xfc\x56\xd5\xdf\x5f" - "\xaf\xb3\xff\xcc\x98\x33\x1d\xeb", - .klen = 16, - .iv = "\x23\x02\xf1\x64\x9a\x73\x89\xe6" - "\xd0\xea\x2c\xf1\x96\xfc\x4e\x6d", - .assoc = "\x32\xcb\x80\xcc\xde\x12\x33\x6d" - "\xf0\x20\x58\x15\x95\xc6\x7f\xee" - "\x2f\xf9\x4e\x2c\x1b\x98\x43\xc7" - "\x68\x28\x73\x40\x9f\x96\x4a", - .alen = 31, - .input = "\x41\x94\x0e\x33\x22\xb1\xdd\xf4" - "\x10\x57\x85\x39\x93\x8f\xaf\x70" - "\xfa\xa9\xd0\x4d\x5c\x40\x23\xcd" - "\x98\x34\xab\x37\x56\xae\x32", - .ilen = 31, - .result = "\xa0\xe7\x0a\x60\xe7\xb8\x8a\xdb" - "\x94\xd3\x93\xf2\x41\x86\x16\xdd" - "\x4c\xe8\xe7\xe0\x62\x48\x89\x40" - "\xc0\x49\x9b\x63\x32\xec\x8b\xdb" - "\xdc\xa6\xea\x2c\xc2\x7f\xf5\x04" - "\xcb\xe5\x47\xbb\xa7\xd1\x9d", - .rlen = 47, - }, { - .key = "\x50\x5d\x9d\x9b\x66\x50\x88\x7b" - "\x30\x8e\xb1\x5e\x92\x58\xe0\xf1", - .klen = 16, - .iv = "\x5f\x27\x2b\x03\xaa\xef\x32\x02" - "\x50\xc4\xde\x82\x90\x21\x11\x73", - .assoc = "\x6e\xf0\xba\x6b\xee\x8e\xdc\x89" - "\x71\xfb\x0a\xa6\x8f\xea\x41\xf4" - "\x5a\xbb\x59\xb0\x20\x38\xc5\xe0" - "\x29\x56\x52\x19\x79\xf5\xe9\x37", - .alen = 32, - .input = "\x7e\xb9\x48\xd3\x32\x2d\x86\x10" - "\x91\x31\x37\xcb\x8d\xb3\x72\x76" - "\x24\x6b\xdc\xd1\x61\xe0\xa5\xe7" - "\x5a\x61\x8a\x0f\x30\x0d\xd1\xec", - .ilen = 32, - .result = "\x62\xdc\x2d\x68\x2d\x71\xbb\x33" - "\x13\xdf\xc0\x46\xf6\x61\x94\xa7" - "\x60\xd3\xd4\xca\xd9\xbe\x82\xf3" - "\xf1\x5b\xa0\xfa\x15\xba\xda\xea" - "\x87\x68\x47\x08\x5d\xdd\x83\xb0" - "\x60\xf4\x93\x20\xdf\x34\x8f\xea", - .rlen = 48, - }, { - .key = "\x8d\x82\xd6\x3b\x76\xcc\x30\x97" - "\xb1\x68\x63\xef\x8c\x7c\xa3\xf7", - .klen = 16, - .iv = "\x9c\x4b\x65\xa2\xba\x6b\xdb\x1e" - "\xd1\x9e\x90\x13\x8a\x45\xd3\x79", - .assoc = "\xab\x14\xf3\x0a\xfe\x0a\x85\xa5" - "\xf2\xd5\xbc\x38\x89\x0e\x04\xfb" - "\x84\x7d\x65\x34\x25\xd8\x47\xfa" - "\xeb\x83\x31\xf1\x54\x54\x89\x0d" - "\x9d", - .alen = 33, - .input = "\xba\xde\x82\x72\x42\xa9\x2f\x2c" - "\x12\x0b\xe9\x5c\x87\xd7\x35\x7c" - "\x4f\x2e\xe8\x55\x66\x80\x27\x00" - "\x1b\x8f\x68\xe7\x0a\x6c\x71\xc3" - "\x21\x78\x55\x9d\x9c\x65\x7b\xcd" - "\x0a\x34\x97\xff\x47\x37\xb0\x2a" - "\x80\x0d\x19\x98\x33\xa9\x7a\xe3" - "\x2e\x4c\xc6\xf3\x8c\x88\x42\x01" - "\xbd", - .ilen = 65, - .result = "\x84\xc5\x21\xab\xe1\xeb\xbb\x6d" - "\xaa\x2a\xaf\xeb\x3b\x3b\x69\xe7" - "\x2c\x47\xef\x9d\xb7\x53\x36\xb7" - "\xb6\xf5\xe5\xa8\xc9\x9e\x02\xd7" - "\x83\x88\xc2\xbd\x2f\xf9\x10\xc0" - "\xf5\xa1\x6e\xd3\x97\x64\x82\xa3" - "\xfb\xda\x2c\xb1\x94\xa1\x58\x32" - "\xe8\xd4\x39\xfc\x9e\x26\xf9\xf1" - "\x61\xe6\xae\x07\xf2\xe0\xa7\x44" - "\x96\x28\x3b\xee\x6b\xc6\x16\x31" - "\x3f", - .rlen = 81, - }, { - .key = "\xc9\xa7\x10\xda\x86\x48\xd9\xb3" - "\x32\x42\x15\x80\x85\xa1\x65\xfe", - .klen = 16, - .iv = "\xd8\x70\x9f\x42\xca\xe6\x83\x3a" - "\x52\x79\x42\xa5\x84\x6a\x96\x7f", - .assoc = "\xe8\x39\x2d\xaa\x0e\x85\x2d\xc1" - "\x72\xaf\x6e\xc9\x82\x33\xc7\x01" - "\xaf\x40\x70\xb8\x2a\x78\xc9\x14" - "\xac\xb1\x10\xca\x2e\xb3\x28\xe4" - "\xac\xfa\x58\x7f\xe5\x73\x09\x8c" - "\x1d\x40\x87\x8c\xd9\x75\xc0\x55" - "\xa2\xda\x07\xd1\xc2\xa9\xd1\xbb" - "\x09\x4f\x77\x62\x88\x2d\xf2\x68" - "\x54", - .alen = 65, - .input = "\xf7\x02\xbb\x11\x52\x24\xd8\x48" - "\x93\xe6\x9b\xee\x81\xfc\xf7\x82" - "\x79\xf0\xf3\xd9\x6c\x20\xa9\x1a" - "\xdc\xbc\x47\xc0\xe4\xcb\x10\x99" - "\x2f", - .ilen = 33, - .result = "\x8f\x23\x47\xfb\xf2\xac\x23\x83" - "\x77\x09\xac\x74\xef\xd2\x56\xae" - "\x20\x7b\x7b\xca\x45\x8e\xc8\xc2" - "\x50\xbd\xc7\x44\x1c\x54\x98\xd8" - "\x1f\xd0\x9a\x79\xaa\xf9\xe1\xb3" - "\xb4\x98\x5a\x9b\xe4\x4d\xbf\x4e" - "\x39", - .rlen = 49, - }, { - .key = "\x06\xcc\x4a\x79\x96\xc3\x82\xcf" - "\xb3\x1c\xc7\x12\x7f\xc5\x28\x04", - .klen = 16, - .iv = "\x15\x95\xd8\xe1\xda\x62\x2c\x56" - "\xd3\x53\xf4\x36\x7e\x8e\x59\x85", - .assoc = "\x24\x5e\x67\x49\x1e\x01\xd6\xdd" - "\xf3\x89\x20\x5b\x7c\x57\x89\x07", - .alen = 16, - .input = "\x33\x27\xf5\xb1\x62\xa0\x80\x63" - "\x14\xc0\x4d\x7f\x7b\x20\xba\x89", - .ilen = 16, - .result = "\x42\xc3\x58\xfb\x29\xe2\x4a\x56" - "\xf1\xf5\xe1\x51\x55\x4b\x0a\x45" - "\x46\xb5\x8d\xac\xb6\x34\xd8\x8b" - "\xde\x20\x59\x77\xc1\x74\x90", - .rlen = 31, - }, { - .key = "\x42\xf0\x84\x19\xa6\x3f\x2b\xea" - "\x34\xf6\x79\xa3\x79\xe9\xeb\x0a", - .klen = 16, - .iv = "\x51\xb9\x12\x80\xea\xde\xd5\x71" - "\x54\x2d\xa6\xc8\x78\xb2\x1b\x8c", - .assoc = "\x61\x83\xa0\xe8\x2e\x7d\x7f\xf8" - "\x74\x63\xd2\xec\x76\x7c\x4c\x0d", - .alen = 16, - .input = "\x70\x4c\x2f\x50\x72\x1c\x29\x7f" - "\x95\x9a\xff\x10\x75\x45\x7d\x8f", - .ilen = 16, - .result = "\xb2\xfb\xf6\x97\x69\x7a\xe9\xec" - "\xe2\x94\xa1\x8b\xa0\x2b\x60\x72" - "\x1d\x04\xdd\x6a\xef\x46\x8f\x68" - "\xe9\xe0\x17\x45\x70\x12", - .rlen = 30, - }, { - .key = "\x7f\x15\xbd\xb8\xb6\xba\xd3\x06" - "\xb5\xd1\x2b\x35\x73\x0e\xad\x10", - .klen = 16, - .iv = "\x8e\xde\x4c\x20\xfa\x59\x7e\x8d" - "\xd5\x07\x58\x59\x72\xd7\xde\x92", - .assoc = "\x9d\xa7\xda\x88\x3e\xf8\x28\x14" - "\xf5\x3e\x85\x7d\x70\xa0\x0f\x13", - .alen = 16, - .input = "\xac\x70\x69\xef\x82\x97\xd2\x9b" - "\x15\x74\xb1\xa2\x6f\x69\x3f\x95", - .ilen = 16, - .result = "\x47\xda\x54\x42\x51\x72\xc4\x8b" - "\xf5\x57\x0f\x2f\x49\x0e\x11\x3b" - "\x78\x93\xec\xfc\xf4\xff\xe1\x2d", - .rlen = 24, - }, -}; - -/* - * AEGIS-128 test vectors - generated via reference implementation from - * SUPERCOP (https://bench.cr.yp.to/supercop.html): - * - * https://bench.cr.yp.to/supercop/supercop-20170228.tar.xz - * (see crypto_aead/aegis128/) - */ -static const struct aead_testvec aegis128_dec_tv_template[] = { - { - .key = "\x0f\xc9\x8e\x67\x44\x9e\xaa\x86" - "\x20\x36\x2c\x24\xfe\xc9\x30\x81", - .klen = 16, - .iv = "\x1e\x92\x1c\xcf\x88\x3d\x54\x0d" - "\x40\x6d\x59\x48\xfc\x92\x61\x03", - .assoc = "", - .alen = 0, - .input = "\x07\xa5\x11\xf2\x9d\x40\xb8\x6d" - "\xda\xb8\x12\x34\x4c\x53\xd9\x72", - .ilen = 16, - .result = "", - .rlen = 0, - }, { - .key = "\x4b\xed\xc8\x07\x54\x1a\x52\xa2" - "\xa1\x10\xde\xb5\xf8\xed\xf3\x87", - .klen = 16, - .iv = "\x5a\xb7\x56\x6e\x98\xb9\xfd\x29" - "\xc1\x47\x0b\xda\xf6\xb6\x23\x09", - .assoc = "", - .alen = 0, - .input = "\x9e\x78\x52\xae\xcb\x9e\xe4\xd3" - "\x9a\xd7\x5d\xd7\xaa\x9a\xe9\x5a" - "\xcc", - .ilen = 17, - .result = "\x79", - .rlen = 1, - }, { - .key = "\x88\x12\x01\xa6\x64\x96\xfb\xbe" - "\x22\xea\x90\x47\xf2\x11\xb5\x8e", - .klen = 16, - .iv = "\x97\xdb\x90\x0e\xa8\x35\xa5\x45" - "\x42\x21\xbd\x6b\xf0\xda\xe6\x0f", - .assoc = "", - .alen = 0, - .input = "\xc3\x80\x83\x04\x5f\xaa\x61\xc7" - "\xca\xdd\x6f\xac\x85\x08\xb5\x35" - "\x2b\xc2\x3e\x0b\x1b\x39\x37\x2b" - "\x7a\x21\x16\xb3\xe6\x67\x66", - .ilen = 31, - .result = "\xb5\x6e\xad\xdd\x30\x72\xfa\x53" - "\x82\x8e\x16\xb4\xed\x6d\x47", - .rlen = 15, - }, { - .key = "\xc4\x37\x3b\x45\x74\x11\xa4\xda" - "\xa2\xc5\x42\xd8\xec\x36\x78\x94", - .klen = 16, - .iv = "\xd3\x00\xc9\xad\xb8\xb0\x4e\x61" - "\xc3\xfb\x6f\xfd\xea\xff\xa9\x15", - .assoc = "", - .alen = 0, - .input = "\x23\x25\x30\xe5\x6a\xb6\x36\x7d" - "\x38\xfd\x3a\xd2\xc2\x58\xa9\x11" - "\x1e\xa8\x30\x9c\x16\xa4\xdb\x65" - "\x51\x10\x16\x27\x70\x9b\x64\x29", - .ilen = 32, - .result = "\xf2\x92\xe6\x7d\x40\xee\xa3\x6f" - "\x03\x68\xc8\x45\xe7\x91\x0a\x18", - .rlen = 16, - }, { - .key = "\x01\x5c\x75\xe5\x84\x8d\x4d\xf6" - "\x23\x9f\xf4\x6a\xe6\x5a\x3b\x9a", - .klen = 16, - .iv = "\x10\x25\x03\x4c\xc8\x2c\xf7\x7d" - "\x44\xd5\x21\x8e\xe4\x23\x6b\x1c", - .assoc = "", - .alen = 0, - .input = "\x2a\x8d\x56\x91\xc6\xf3\x56\xa5" - "\x1f\xf0\x89\x2e\x13\xad\xe6\xf6" - "\x46\x80\xb1\x0e\x18\x30\x40\x97" - "\x03\xdf\x64\x3c\xbe\x93\x9e\xc9" - "\x3b", - .ilen = 33, - .result = "\x2e\xb7\x20\x1c\x50\x6a\x4b\x8b" - "\x84\x42\x7a\xd7\xe1\xb5\xcd\x1f" - "\xd3", - .rlen = 17, - }, { - .key = "\x3d\x80\xae\x84\x94\x09\xf6\x12" - "\xa4\x79\xa6\xfb\xe0\x7f\xfd\xa0", - .klen = 16, - .iv = "\x4c\x49\x3d\xec\xd8\xa8\xa0\x98" - "\xc5\xb0\xd3\x1f\xde\x48\x2e\x22", - .assoc = "", - .alen = 0, - .input = "\x4e\xf6\xfa\x13\xde\x43\x63\x4c" - "\xe2\x04\x3e\xe4\x85\x14\xb6\x3f" - "\xb1\x8f\x4c\xdb\x41\xa2\x14\x99" - "\xf5\x53\x0f\x73\x86\x7e\x97\xa1" - "\x4b\x56\x5b\x94\xce\xcd\x74\xcd" - "\x75\xc4\x53\x01\x89\x45\x59", - .ilen = 47, - .result = "\x6b\xdc\x5a\xbb\x60\xe5\xf4\xa6" - "\x05\x1d\x2c\x68\xdb\xda\x8f\x25" - "\xfe\x8d\x45\x19\x1e\xc0\x0b\x99" - "\x88\x11\x39\x12\x1c\x3a\xbb", - .rlen = 31, - }, { - .key = "\x7a\xa5\xe8\x23\xa4\x84\x9e\x2d" - "\x25\x53\x58\x8c\xda\xa3\xc0\xa6", - .klen = 16, - .iv = "\x89\x6e\x77\x8b\xe8\x23\x49\xb4" - "\x45\x8a\x85\xb1\xd8\x6c\xf1\x28", - .assoc = "", - .alen = 0, - .input = "\xa4\x9a\xb7\xfd\xa0\xd4\xd6\x47" - "\x95\xf4\x58\x38\x14\x83\x27\x01" - "\x4c\xed\x32\x2c\xf7\xd6\x31\xf7" - "\x38\x1b\x2c\xc9\xb6\x31\xce\xaa" - "\xa5\x3c\x1a\x18\x5c\xce\xb9\xdf" - "\x51\x52\x77\xf2\x5e\x85\x80\x41", - .ilen = 48, - .result = "\xa7\x00\x93\x5b\x70\x61\x9d\xc2" - "\x86\xf7\xde\xfa\xd5\xfe\x52\x2b" - "\x28\x50\x51\x9d\x24\x60\x8d\xb3" - "\x49\x3e\x17\xea\xf6\x99\x5a\xdd", - .rlen = 32, - }, { - .key = "\xb6\xca\x22\xc3\xb4\x00\x47\x49" - "\xa6\x2d\x0a\x1e\xd4\xc7\x83\xad", - .klen = 16, - .iv = "\xc5\x93\xb0\x2a\xf8\x9f\xf1\xd0" - "\xc6\x64\x37\x42\xd2\x90\xb3\x2e", - .assoc = "\xd5", - .alen = 1, - .input = "\xfb\xd4\x83\x71\x9e\x63\xad\x60" - "\xb9\xf9\xeb\x34\x52\x49\xcf\xb7", - .ilen = 16, - .result = "", - .rlen = 0, - }, { - .key = "\xf3\xee\x5c\x62\xc4\x7c\xf0\x65" - "\x27\x08\xbd\xaf\xce\xec\x45\xb3", - .klen = 16, - .iv = "\x02\xb8\xea\xca\x09\x1b\x9a\xec" - "\x47\x3e\xe9\xd4\xcc\xb5\x76\x34", - .assoc = "\x11\x81\x78\x32\x4d\xb9\x44\x73" - "\x68\x75\x16\xf8\xcb\x7e\xa7", - .alen = 15, - .input = "\x0c\xaf\x2e\x96\xf6\x97\x08\x71" - "\x7d\x3a\x84\xc4\x44\x57\x77\x7e", - .ilen = 16, - .result = "", - .rlen = 0, - }, { - .key = "\x2f\x13\x95\x01\xd5\xf7\x99\x81" - "\xa8\xe2\x6f\x41\xc8\x10\x08\xb9", - .klen = 16, - .iv = "\x3f\xdc\x24\x69\x19\x96\x43\x08" - "\xc8\x18\x9b\x65\xc6\xd9\x39\x3b", - .assoc = "\x4e\xa5\xb2\xd1\x5d\x35\xed\x8f" - "\xe8\x4f\xc8\x89\xc5\xa2\x69\xbc", - .alen = 16, - .input = "\xc7\x87\x09\x3b\xc7\x19\x74\x22" - "\x22\xa5\x67\x10\xb2\x36\xb3\x45", - .ilen = 16, - .result = "", - .rlen = 0, - }, { - .key = "\x6c\x38\xcf\xa1\xe5\x73\x41\x9d" - "\x29\xbc\x21\xd2\xc2\x35\xcb\xbf", - .klen = 16, - .iv = "\x7b\x01\x5d\x08\x29\x12\xec\x24" - "\x49\xf3\x4d\xf7\xc0\xfe\xfb\x41", - .assoc = "\x8a\xca\xec\x70\x6d\xb1\x96\xab" - "\x69\x29\x7a\x1b\xbf\xc7\x2c\xc2" - "\x07", - .alen = 17, - .input = "\x02\xc6\x3b\x46\x65\xb2\xef\x91" - "\x31\xf0\x45\x48\x8a\x2a\xed\xe4", - .ilen = 16, - .result = "", - .rlen = 0, - }, { - .key = "\xa8\x5c\x09\x40\xf5\xef\xea\xb8" - "\xaa\x96\xd3\x64\xbc\x59\x8d\xc6", - .klen = 16, - .iv = "\xb8\x26\x97\xa8\x39\x8e\x94\x3f" - "\xca\xcd\xff\x88\xba\x22\xbe\x47", - .assoc = "\xc7\xef\x26\x10\x7d\x2c\x3f\xc6" - "\xea\x03\x2c\xac\xb9\xeb\xef\xc9" - "\x31\x6b\x08\x12\xfc\xd8\x37\x2d" - "\xe0\x17\x3a\x2e\x83\x5c\x8f", - .alen = 31, - .input = "\x20\x85\xa8\xd0\x91\x48\x85\xf3" - "\x5a\x16\xc0\x57\x68\x47\xdd\xcb", - .ilen = 16, - .result = "", - .rlen = 0, - }, { - .key = "\xe5\x81\x42\xdf\x05\x6a\x93\xd4" - "\x2b\x70\x85\xf5\xb6\x7d\x50\xcc", - .klen = 16, - .iv = "\xf4\x4a\xd1\x47\x49\x09\x3d\x5b" - "\x4b\xa7\xb1\x19\xb4\x46\x81\x4d", - .assoc = "\x03\x14\x5f\xaf\x8d\xa8\xe7\xe2" - "\x6b\xde\xde\x3e\xb3\x10\xb1\xcf" - "\x5c\x2d\x14\x96\x01\x78\xb9\x47" - "\xa1\x44\x19\x06\x5d\xbb\x2e\x2f", - .alen = 32, - .input = "\x6a\xf8\x8d\x9c\x42\x75\x35\x79" - "\xc1\x96\xbd\x31\x6e\x69\x1b\x50", - .ilen = 16, - .result = "", - .rlen = 0, - }, { - .key = "\x22\xa6\x7c\x7f\x15\xe6\x3c\xf0" - "\xac\x4b\x37\x86\xb0\xa2\x13\xd2", - .klen = 16, - .iv = "\x31\x6f\x0b\xe6\x59\x85\xe6\x77" - "\xcc\x81\x63\xab\xae\x6b\x43\x54", - .assoc = "\x40", - .alen = 1, - .input = "\x01\x24\xb1\xba\xf6\xd3\xdf\x83" - "\x70\x45\xe3\x2a\x9d\x5c\x63\x98" - "\x39", - .ilen = 17, - .result = "\x4f", - .rlen = 1, - }, { - .key = "\x5e\xcb\xb6\x1e\x25\x62\xe4\x0c" - "\x2d\x25\xe9\x18\xaa\xc6\xd5\xd8", - .klen = 16, - .iv = "\x6d\x94\x44\x86\x69\x00\x8f\x93" - "\x4d\x5b\x15\x3c\xa8\x8f\x06\x5a", - .assoc = "\x7c\x5d\xd3\xee\xad\x9f\x39\x1a" - "\x6d\x92\x42\x61\xa7\x58\x37", - .alen = 15, - .input = "\x18\x78\xc2\x6e\xe1\xf7\xe6\x8a" - "\xca\x0e\x62\x00\xa8\x21\xb5\x21" - "\x3d\x36\xdb\xf7\xcc\x31\x94\x9c" - "\x98\xbd\x71\x7a\xef\xa4\xfa", - .ilen = 31, - .result = "\x8b\x26\x61\x55\xf1\x3e\xe3\xa1" - "\x8d\xc8\x6e\x85\xa5\x21\x67", - .rlen = 15, - }, { - .key = "\x9b\xef\xf0\xbd\x35\xdd\x8d\x28" - "\xad\xff\x9b\xa9\xa4\xeb\x98\xdf", - .klen = 16, - .iv = "\xaa\xb8\x7e\x25\x79\x7c\x37\xaf" - "\xce\x36\xc7\xce\xa2\xb4\xc9\x60", - .assoc = "\xb9\x82\x0c\x8d\xbd\x1b\xe2\x36" - "\xee\x6c\xf4\xf2\xa1\x7d\xf9\xe2", - .alen = 16, - .input = "\xea\xd1\x81\x75\xb4\x13\x1d\x86" - "\xd4\x17\x26\xe5\xd6\x89\x39\x04" - "\xa9\x6c\xca\xac\x40\x73\xb2\x4c" - "\x9c\xb9\x0e\x79\x4c\x40\x65\xc6", - .ilen = 32, - .result = "\xc8\x4b\x9b\xf5\x01\xba\x8c\xbd" - "\x0e\xa3\x21\x16\x9f\x46\x2a\x63", - .rlen = 16, - }, { - .key = "\xd7\x14\x29\x5d\x45\x59\x36\x44" - "\x2e\xd9\x4d\x3b\x9e\x0f\x5b\xe5", - .klen = 16, - .iv = "\xe6\xdd\xb8\xc4\x89\xf8\xe0\xca" - "\x4f\x10\x7a\x5f\x9c\xd8\x8b\x66", - .assoc = "\xf5\xa6\x46\x2c\xce\x97\x8a\x51" - "\x6f\x46\xa6\x83\x9b\xa1\xbc\xe8" - "\x05", - .alen = 17, - .input = "\xf4\xb2\x84\xd1\x81\xfa\x98\x1c" - "\x38\x2d\x69\x90\x1c\x71\x38\x98" - "\x9f\xe1\x19\x3b\x63\x91\xaf\x6e" - "\x4b\x07\x2c\xac\x53\xc5\xd5\xfe" - "\x93", - .ilen = 33, - .result = "\x05\x70\xd5\x94\x12\x36\x35\xd8" - "\x8f\x7d\xd3\xa8\x99\x6a\xed\x69" - "\xd0", - .rlen = 17, - }, { - .key = "\x14\x39\x63\xfc\x56\xd5\xdf\x5f" - "\xaf\xb3\xff\xcc\x98\x33\x1d\xeb", - .klen = 16, - .iv = "\x23\x02\xf1\x64\x9a\x73\x89\xe6" - "\xd0\xea\x2c\xf1\x96\xfc\x4e\x6d", - .assoc = "\x32\xcb\x80\xcc\xde\x12\x33\x6d" - "\xf0\x20\x58\x15\x95\xc6\x7f\xee" - "\x2f\xf9\x4e\x2c\x1b\x98\x43\xc7" - "\x68\x28\x73\x40\x9f\x96\x4a", - .alen = 31, - .input = "\xa0\xe7\x0a\x60\xe7\xb8\x8a\xdb" - "\x94\xd3\x93\xf2\x41\x86\x16\xdd" - "\x4c\xe8\xe7\xe0\x62\x48\x89\x40" - "\xc0\x49\x9b\x63\x32\xec\x8b\xdb" - "\xdc\xa6\xea\x2c\xc2\x7f\xf5\x04" - "\xcb\xe5\x47\xbb\xa7\xd1\x9d", - .ilen = 47, - .result = "\x41\x94\x0e\x33\x22\xb1\xdd\xf4" - "\x10\x57\x85\x39\x93\x8f\xaf\x70" - "\xfa\xa9\xd0\x4d\x5c\x40\x23\xcd" - "\x98\x34\xab\x37\x56\xae\x32", - .rlen = 31, - }, { - .key = "\x50\x5d\x9d\x9b\x66\x50\x88\x7b" - "\x30\x8e\xb1\x5e\x92\x58\xe0\xf1", - .klen = 16, - .iv = "\x5f\x27\x2b\x03\xaa\xef\x32\x02" - "\x50\xc4\xde\x82\x90\x21\x11\x73", - .assoc = "\x6e\xf0\xba\x6b\xee\x8e\xdc\x89" - "\x71\xfb\x0a\xa6\x8f\xea\x41\xf4" - "\x5a\xbb\x59\xb0\x20\x38\xc5\xe0" - "\x29\x56\x52\x19\x79\xf5\xe9\x37", - .alen = 32, - .input = "\x62\xdc\x2d\x68\x2d\x71\xbb\x33" - "\x13\xdf\xc0\x46\xf6\x61\x94\xa7" - "\x60\xd3\xd4\xca\xd9\xbe\x82\xf3" - "\xf1\x5b\xa0\xfa\x15\xba\xda\xea" - "\x87\x68\x47\x08\x5d\xdd\x83\xb0" - "\x60\xf4\x93\x20\xdf\x34\x8f\xea", - .ilen = 48, - .result = "\x7e\xb9\x48\xd3\x32\x2d\x86\x10" - "\x91\x31\x37\xcb\x8d\xb3\x72\x76" - "\x24\x6b\xdc\xd1\x61\xe0\xa5\xe7" - "\x5a\x61\x8a\x0f\x30\x0d\xd1\xec", - .rlen = 32, - }, { - .key = "\x8d\x82\xd6\x3b\x76\xcc\x30\x97" - "\xb1\x68\x63\xef\x8c\x7c\xa3\xf7", - .klen = 16, - .iv = "\x9c\x4b\x65\xa2\xba\x6b\xdb\x1e" - "\xd1\x9e\x90\x13\x8a\x45\xd3\x79", - .assoc = "\xab\x14\xf3\x0a\xfe\x0a\x85\xa5" - "\xf2\xd5\xbc\x38\x89\x0e\x04\xfb" - "\x84\x7d\x65\x34\x25\xd8\x47\xfa" - "\xeb\x83\x31\xf1\x54\x54\x89\x0d" - "\x9d", - .alen = 33, - .input = "\x84\xc5\x21\xab\xe1\xeb\xbb\x6d" - "\xaa\x2a\xaf\xeb\x3b\x3b\x69\xe7" - "\x2c\x47\xef\x9d\xb7\x53\x36\xb7" - "\xb6\xf5\xe5\xa8\xc9\x9e\x02\xd7" - "\x83\x88\xc2\xbd\x2f\xf9\x10\xc0" - "\xf5\xa1\x6e\xd3\x97\x64\x82\xa3" - "\xfb\xda\x2c\xb1\x94\xa1\x58\x32" - "\xe8\xd4\x39\xfc\x9e\x26\xf9\xf1" - "\x61\xe6\xae\x07\xf2\xe0\xa7\x44" - "\x96\x28\x3b\xee\x6b\xc6\x16\x31" - "\x3f", - .ilen = 81, - .result = "\xba\xde\x82\x72\x42\xa9\x2f\x2c" - "\x12\x0b\xe9\x5c\x87\xd7\x35\x7c" - "\x4f\x2e\xe8\x55\x66\x80\x27\x00" - "\x1b\x8f\x68\xe7\x0a\x6c\x71\xc3" - "\x21\x78\x55\x9d\x9c\x65\x7b\xcd" - "\x0a\x34\x97\xff\x47\x37\xb0\x2a" - "\x80\x0d\x19\x98\x33\xa9\x7a\xe3" - "\x2e\x4c\xc6\xf3\x8c\x88\x42\x01" - "\xbd", - .rlen = 65, - }, { - .key = "\xc9\xa7\x10\xda\x86\x48\xd9\xb3" - "\x32\x42\x15\x80\x85\xa1\x65\xfe", - .klen = 16, - .iv = "\xd8\x70\x9f\x42\xca\xe6\x83\x3a" - "\x52\x79\x42\xa5\x84\x6a\x96\x7f", - .assoc = "\xe8\x39\x2d\xaa\x0e\x85\x2d\xc1" - "\x72\xaf\x6e\xc9\x82\x33\xc7\x01" - "\xaf\x40\x70\xb8\x2a\x78\xc9\x14" - "\xac\xb1\x10\xca\x2e\xb3\x28\xe4" - "\xac\xfa\x58\x7f\xe5\x73\x09\x8c" - "\x1d\x40\x87\x8c\xd9\x75\xc0\x55" - "\xa2\xda\x07\xd1\xc2\xa9\xd1\xbb" - "\x09\x4f\x77\x62\x88\x2d\xf2\x68" - "\x54", - .alen = 65, - .input = "\x8f\x23\x47\xfb\xf2\xac\x23\x83" - "\x77\x09\xac\x74\xef\xd2\x56\xae" - "\x20\x7b\x7b\xca\x45\x8e\xc8\xc2" - "\x50\xbd\xc7\x44\x1c\x54\x98\xd8" - "\x1f\xd0\x9a\x79\xaa\xf9\xe1\xb3" - "\xb4\x98\x5a\x9b\xe4\x4d\xbf\x4e" - "\x39", - .ilen = 49, - .result = "\xf7\x02\xbb\x11\x52\x24\xd8\x48" - "\x93\xe6\x9b\xee\x81\xfc\xf7\x82" - "\x79\xf0\xf3\xd9\x6c\x20\xa9\x1a" - "\xdc\xbc\x47\xc0\xe4\xcb\x10\x99" - "\x2f", - .rlen = 33, - }, { - .key = "\x06\xcc\x4a\x79\x96\xc3\x82\xcf" - "\xb3\x1c\xc7\x12\x7f\xc5\x28\x04", - .klen = 16, - .iv = "\x15\x95\xd8\xe1\xda\x62\x2c\x56" - "\xd3\x53\xf4\x36\x7e\x8e\x59\x85", - .assoc = "\x24\x5e\x67\x49\x1e\x01\xd6\xdd" - "\xf3\x89\x20\x5b\x7c\x57\x89\x07", - .alen = 16, - .input = "\x42\xc3\x58\xfb\x29\xe2\x4a\x56" - "\xf1\xf5\xe1\x51\x55\x4b\x0a\x45" - "\x46\xb5\x8d\xac\xb6\x34\xd8\x8b" - "\xde\x20\x59\x77\xc1\x74\x90", - .ilen = 31, - .result = "\x33\x27\xf5\xb1\x62\xa0\x80\x63" - "\x14\xc0\x4d\x7f\x7b\x20\xba\x89", - .rlen = 16, - }, { - .key = "\x42\xf0\x84\x19\xa6\x3f\x2b\xea" - "\x34\xf6\x79\xa3\x79\xe9\xeb\x0a", - .klen = 16, - .iv = "\x51\xb9\x12\x80\xea\xde\xd5\x71" - "\x54\x2d\xa6\xc8\x78\xb2\x1b\x8c", - .assoc = "\x61\x83\xa0\xe8\x2e\x7d\x7f\xf8" - "\x74\x63\xd2\xec\x76\x7c\x4c\x0d", - .alen = 16, - .input = "\xb2\xfb\xf6\x97\x69\x7a\xe9\xec" - "\xe2\x94\xa1\x8b\xa0\x2b\x60\x72" - "\x1d\x04\xdd\x6a\xef\x46\x8f\x68" - "\xe9\xe0\x17\x45\x70\x12", - .ilen = 30, - .result = "\x70\x4c\x2f\x50\x72\x1c\x29\x7f" - "\x95\x9a\xff\x10\x75\x45\x7d\x8f", - .rlen = 16, - }, { - .key = "\x7f\x15\xbd\xb8\xb6\xba\xd3\x06" - "\xb5\xd1\x2b\x35\x73\x0e\xad\x10", - .klen = 16, - .iv = "\x8e\xde\x4c\x20\xfa\x59\x7e\x8d" - "\xd5\x07\x58\x59\x72\xd7\xde\x92", - .assoc = "\x9d\xa7\xda\x88\x3e\xf8\x28\x14" - "\xf5\x3e\x85\x7d\x70\xa0\x0f\x13", - .alen = 16, - .input = "\x47\xda\x54\x42\x51\x72\xc4\x8b" - "\xf5\x57\x0f\x2f\x49\x0e\x11\x3b" - "\x78\x93\xec\xfc\xf4\xff\xe1\x2d", - .ilen = 24, - .result = "\xac\x70\x69\xef\x82\x97\xd2\x9b" - "\x15\x74\xb1\xa2\x6f\x69\x3f\x95", - .rlen = 16, - }, -}; - -/* - * AEGIS-128L test vectors - generated via reference implementation from - * SUPERCOP (https://bench.cr.yp.to/supercop.html): - * - * https://bench.cr.yp.to/supercop/supercop-20170228.tar.xz - * (see crypto_aead/aegis128l/) - */ -static const struct aead_testvec aegis128l_enc_tv_template[] = { - { - .key = "\x0f\xc9\x8e\x67\x44\x9e\xaa\x86" - "\x20\x36\x2c\x24\xfe\xc9\x30\x81", - .klen = 16, - .iv = "\x1e\x92\x1c\xcf\x88\x3d\x54\x0d" - "\x40\x6d\x59\x48\xfc\x92\x61\x03", - .assoc = "", - .alen = 0, - .input = "", - .ilen = 0, - .result = "\x30\x4f\xf3\xe9\xb1\xfa\x81\xa6" - "\x20\x72\x78\xdd\x93\xc8\x57\xef", - .rlen = 16, - }, { - .key = "\x4b\xed\xc8\x07\x54\x1a\x52\xa2" - "\xa1\x10\xde\xb5\xf8\xed\xf3\x87", - .klen = 16, - .iv = "\x5a\xb7\x56\x6e\x98\xb9\xfd\x29" - "\xc1\x47\x0b\xda\xf6\xb6\x23\x09", - .assoc = "", - .alen = 0, - .input = "\x79", - .ilen = 1, - .result = "\xa9\x24\xa0\xb6\x2d\xdd\x29\xdb" - "\x40\xb3\x71\xc5\x22\x58\x31\x77" - "\x6d", - .rlen = 17, - }, { - .key = "\x88\x12\x01\xa6\x64\x96\xfb\xbe" - "\x22\xea\x90\x47\xf2\x11\xb5\x8e", - .klen = 16, - .iv = "\x97\xdb\x90\x0e\xa8\x35\xa5\x45" - "\x42\x21\xbd\x6b\xf0\xda\xe6\x0f", - .assoc = "", - .alen = 0, - .input = "\xb5\x6e\xad\xdd\x30\x72\xfa\x53" - "\x82\x8e\x16\xb4\xed\x6d\x47", - .ilen = 15, - .result = "\xbb\x0a\x53\xc4\xaa\x7e\xa4\x03" - "\x2b\xee\x62\x99\x7b\x98\x13\x1f" - "\xe0\x76\x4c\x2e\x53\x99\x4f\xbe" - "\xe1\xa8\x04\x7f\xe1\x71\xbe", - .rlen = 31, - }, { - .key = "\xc4\x37\x3b\x45\x74\x11\xa4\xda" - "\xa2\xc5\x42\xd8\xec\x36\x78\x94", - .klen = 16, - .iv = "\xd3\x00\xc9\xad\xb8\xb0\x4e\x61" - "\xc3\xfb\x6f\xfd\xea\xff\xa9\x15", - .assoc = "", - .alen = 0, - .input = "\xf2\x92\xe6\x7d\x40\xee\xa3\x6f" - "\x03\x68\xc8\x45\xe7\x91\x0a\x18", - .ilen = 16, - .result = "\x66\xdf\x6e\x71\xc0\x6e\xa4\x4c" - "\x9d\xb7\x8c\x9a\xdb\x1f\xd2\x2e" - "\x23\xb6\xa4\xfb\xd3\x86\xdd\xbb" - "\xde\x54\x9b\xf5\x92\x8b\x93\xc5", - .rlen = 32, - }, { - .key = "\x01\x5c\x75\xe5\x84\x8d\x4d\xf6" - "\x23\x9f\xf4\x6a\xe6\x5a\x3b\x9a", - .klen = 16, - .iv = "\x10\x25\x03\x4c\xc8\x2c\xf7\x7d" - "\x44\xd5\x21\x8e\xe4\x23\x6b\x1c", - .assoc = "", - .alen = 0, - .input = "\x2e\xb7\x20\x1c\x50\x6a\x4b\x8b" - "\x84\x42\x7a\xd7\xe1\xb5\xcd\x1f" - "\xd3", - .ilen = 17, - .result = "\x4f\xc3\x69\xb6\xd3\xa4\x64\x8b" - "\x71\xc3\x8a\x91\x22\x4f\x1b\xd2" - "\x33\x6d\x86\xbc\xf8\x2f\x06\xf9" - "\x82\x64\xc7\x72\x00\x30\xfc\xf0" - "\xf8", - .rlen = 33, - }, { - .key = "\x3d\x80\xae\x84\x94\x09\xf6\x12" - "\xa4\x79\xa6\xfb\xe0\x7f\xfd\xa0", - .klen = 16, - .iv = "\x4c\x49\x3d\xec\xd8\xa8\xa0\x98" - "\xc5\xb0\xd3\x1f\xde\x48\x2e\x22", - .assoc = "", - .alen = 0, - .input = "\x6b\xdc\x5a\xbb\x60\xe5\xf4\xa6" - "\x05\x1d\x2c\x68\xdb\xda\x8f\x25" - "\xfe\x8d\x45\x19\x1e\xc0\x0b\x99" - "\x88\x11\x39\x12\x1c\x3a\xbb", - .ilen = 31, - .result = "\xe3\x93\x15\xae\x5f\x9d\x3c\xb5" - "\xd6\x9d\xee\xee\xcf\xaa\xaf\xe1" - "\x45\x10\x96\xe0\xbf\x55\x0f\x4c" - "\x1a\xfd\xf4\xda\x4e\x10\xde\xc9" - "\x0e\x6f\xc7\x3c\x49\x94\x41\xfc" - "\x59\x28\x88\x3c\x79\x10\x6b", - .rlen = 47, - }, { - .key = "\x7a\xa5\xe8\x23\xa4\x84\x9e\x2d" - "\x25\x53\x58\x8c\xda\xa3\xc0\xa6", - .klen = 16, - .iv = "\x89\x6e\x77\x8b\xe8\x23\x49\xb4" - "\x45\x8a\x85\xb1\xd8\x6c\xf1\x28", - .assoc = "", - .alen = 0, - .input = "\xa7\x00\x93\x5b\x70\x61\x9d\xc2" - "\x86\xf7\xde\xfa\xd5\xfe\x52\x2b" - "\x28\x50\x51\x9d\x24\x60\x8d\xb3" - "\x49\x3e\x17\xea\xf6\x99\x5a\xdd", - .ilen = 32, - .result = "\x1c\x8e\x22\x34\xfd\xab\xe6\x0d" - "\x1c\x9f\x06\x54\x8b\x0b\xb4\x40" - "\xde\x11\x59\x3e\xfd\x74\xf6\x42" - "\x97\x17\xf7\x24\xb6\x7e\xc4\xc6" - "\x06\xa3\x94\xda\x3d\x7f\x55\x0a" - "\x92\x07\x2f\xa6\xf3\x6b\x2c\xfc", - .rlen = 48, - }, { - .key = "\xb6\xca\x22\xc3\xb4\x00\x47\x49" - "\xa6\x2d\x0a\x1e\xd4\xc7\x83\xad", - .klen = 16, - .iv = "\xc5\x93\xb0\x2a\xf8\x9f\xf1\xd0" - "\xc6\x64\x37\x42\xd2\x90\xb3\x2e", - .assoc = "\xd5", - .alen = 1, - .input = "", - .ilen = 0, - .result = "\xa0\x2a\xb4\x9a\x91\x00\x15\xb8" - "\x0f\x9a\x15\x60\x0e\x9b\x13\x8f", - .rlen = 16, - }, { - .key = "\xf3\xee\x5c\x62\xc4\x7c\xf0\x65" - "\x27\x08\xbd\xaf\xce\xec\x45\xb3", - .klen = 16, - .iv = "\x02\xb8\xea\xca\x09\x1b\x9a\xec" - "\x47\x3e\xe9\xd4\xcc\xb5\x76\x34", - .assoc = "\x11\x81\x78\x32\x4d\xb9\x44\x73" - "\x68\x75\x16\xf8\xcb\x7e\xa7", - .alen = 15, - .input = "", - .ilen = 0, - .result = "\x4c\x26\xad\x9c\x14\xfd\x9c\x8c" - "\x84\xfb\x26\xfb\xd5\xca\x62\x39", - .rlen = 16, - }, { - .key = "\x2f\x13\x95\x01\xd5\xf7\x99\x81" - "\xa8\xe2\x6f\x41\xc8\x10\x08\xb9", - .klen = 16, - .iv = "\x3f\xdc\x24\x69\x19\x96\x43\x08" - "\xc8\x18\x9b\x65\xc6\xd9\x39\x3b", - .assoc = "\x4e\xa5\xb2\xd1\x5d\x35\xed\x8f" - "\xe8\x4f\xc8\x89\xc5\xa2\x69\xbc", - .alen = 16, - .input = "", - .ilen = 0, - .result = "\x45\x85\x0e\x0f\xf4\xae\x96\xa1" - "\x99\x4d\x6d\xb4\x67\x32\xb0\x3a", - .rlen = 16, - }, { - .key = "\x6c\x38\xcf\xa1\xe5\x73\x41\x9d" - "\x29\xbc\x21\xd2\xc2\x35\xcb\xbf", - .klen = 16, - .iv = "\x7b\x01\x5d\x08\x29\x12\xec\x24" - "\x49\xf3\x4d\xf7\xc0\xfe\xfb\x41", - .assoc = "\x8a\xca\xec\x70\x6d\xb1\x96\xab" - "\x69\x29\x7a\x1b\xbf\xc7\x2c\xc2" - "\x07", - .alen = 17, - .input = "", - .ilen = 0, - .result = "\x33\xb1\x42\x97\x8e\x16\x7b\x63" - "\x06\xba\x5b\xcb\xae\x6d\x8b\x56", - .rlen = 16, - }, { - .key = "\xa8\x5c\x09\x40\xf5\xef\xea\xb8" - "\xaa\x96\xd3\x64\xbc\x59\x8d\xc6", - .klen = 16, - .iv = "\xb8\x26\x97\xa8\x39\x8e\x94\x3f" - "\xca\xcd\xff\x88\xba\x22\xbe\x47", - .assoc = "\xc7\xef\x26\x10\x7d\x2c\x3f\xc6" - "\xea\x03\x2c\xac\xb9\xeb\xef\xc9" - "\x31\x6b\x08\x12\xfc\xd8\x37\x2d" - "\xe0\x17\x3a\x2e\x83\x5c\x8f", - .alen = 31, - .input = "", - .ilen = 0, - .result = "\xda\x44\x08\x8c\x2a\xa5\x07\x35" - "\x0b\x54\x4e\x6d\xe3\xfd\xc4\x5f", - .rlen = 16, - }, { - .key = "\xe5\x81\x42\xdf\x05\x6a\x93\xd4" - "\x2b\x70\x85\xf5\xb6\x7d\x50\xcc", - .klen = 16, - .iv = "\xf4\x4a\xd1\x47\x49\x09\x3d\x5b" - "\x4b\xa7\xb1\x19\xb4\x46\x81\x4d", - .assoc = "\x03\x14\x5f\xaf\x8d\xa8\xe7\xe2" - "\x6b\xde\xde\x3e\xb3\x10\xb1\xcf" - "\x5c\x2d\x14\x96\x01\x78\xb9\x47" - "\xa1\x44\x19\x06\x5d\xbb\x2e\x2f", - .alen = 32, - .input = "", - .ilen = 0, - .result = "\x1b\xb1\xf1\xa8\x9e\xc2\xb2\x88" - "\x40\x7f\x7b\x19\x7a\x52\x8c\xf0", - .rlen = 16, - }, { - .key = "\x22\xa6\x7c\x7f\x15\xe6\x3c\xf0" - "\xac\x4b\x37\x86\xb0\xa2\x13\xd2", - .klen = 16, - .iv = "\x31\x6f\x0b\xe6\x59\x85\xe6\x77" - "\xcc\x81\x63\xab\xae\x6b\x43\x54", - .assoc = "\x40", - .alen = 1, - .input = "\x4f", - .ilen = 1, - .result = "\x6e\xc8\xfb\x15\x9d\x98\x49\xc9" - "\xa0\x98\x09\x85\xbe\x56\x8e\x79" - "\xf4", - .rlen = 17, - }, { - .key = "\x5e\xcb\xb6\x1e\x25\x62\xe4\x0c" - "\x2d\x25\xe9\x18\xaa\xc6\xd5\xd8", - .klen = 16, - .iv = "\x6d\x94\x44\x86\x69\x00\x8f\x93" - "\x4d\x5b\x15\x3c\xa8\x8f\x06\x5a", - .assoc = "\x7c\x5d\xd3\xee\xad\x9f\x39\x1a" - "\x6d\x92\x42\x61\xa7\x58\x37", - .alen = 15, - .input = "\x8b\x26\x61\x55\xf1\x3e\xe3\xa1" - "\x8d\xc8\x6e\x85\xa5\x21\x67", - .ilen = 15, - .result = "\x99\x2e\x84\x50\x64\x5c\xab\x29" - "\x20\xba\xb9\x2f\x62\x3a\xce\x2a" - "\x75\x25\x3b\xe3\x40\xe0\x1d\xfc" - "\x20\x63\x0b\x49\x7e\x97\x08", - .rlen = 31, - }, { - .key = "\x9b\xef\xf0\xbd\x35\xdd\x8d\x28" - "\xad\xff\x9b\xa9\xa4\xeb\x98\xdf", - .klen = 16, - .iv = "\xaa\xb8\x7e\x25\x79\x7c\x37\xaf" - "\xce\x36\xc7\xce\xa2\xb4\xc9\x60", - .assoc = "\xb9\x82\x0c\x8d\xbd\x1b\xe2\x36" - "\xee\x6c\xf4\xf2\xa1\x7d\xf9\xe2", - .alen = 16, - .input = "\xc8\x4b\x9b\xf5\x01\xba\x8c\xbd" - "\x0e\xa3\x21\x16\x9f\x46\x2a\x63", - .ilen = 16, - .result = "\xd9\x8e\xfd\x50\x8f\x02\x9f\xee" - "\x78\x08\x12\xec\x09\xaf\x53\x14" - "\x90\x3e\x3d\x76\xad\x71\x21\x08" - "\x77\xe5\x4b\x15\xc2\xe6\xbc\xdb", - .rlen = 32, - }, { - .key = "\xd7\x14\x29\x5d\x45\x59\x36\x44" - "\x2e\xd9\x4d\x3b\x9e\x0f\x5b\xe5", - .klen = 16, - .iv = "\xe6\xdd\xb8\xc4\x89\xf8\xe0\xca" - "\x4f\x10\x7a\x5f\x9c\xd8\x8b\x66", - .assoc = "\xf5\xa6\x46\x2c\xce\x97\x8a\x51" - "\x6f\x46\xa6\x83\x9b\xa1\xbc\xe8" - "\x05", - .alen = 17, - .input = "\x05\x70\xd5\x94\x12\x36\x35\xd8" - "\x8f\x7d\xd3\xa8\x99\x6a\xed\x69" - "\xd0", - .ilen = 17, - .result = "\xf3\xe7\x95\x86\xcf\x34\x95\x96" - "\x17\xfe\x1b\xae\x1b\x31\xf2\x1a" - "\xbd\xbc\xc9\x4e\x11\x29\x09\x5c" - "\x05\xd3\xb4\x2e\x4a\x74\x59\x49" - "\x7d", - .rlen = 33, - }, { - .key = "\x14\x39\x63\xfc\x56\xd5\xdf\x5f" - "\xaf\xb3\xff\xcc\x98\x33\x1d\xeb", - .klen = 16, - .iv = "\x23\x02\xf1\x64\x9a\x73\x89\xe6" - "\xd0\xea\x2c\xf1\x96\xfc\x4e\x6d", - .assoc = "\x32\xcb\x80\xcc\xde\x12\x33\x6d" - "\xf0\x20\x58\x15\x95\xc6\x7f\xee" - "\x2f\xf9\x4e\x2c\x1b\x98\x43\xc7" - "\x68\x28\x73\x40\x9f\x96\x4a", - .alen = 31, - .input = "\x41\x94\x0e\x33\x22\xb1\xdd\xf4" - "\x10\x57\x85\x39\x93\x8f\xaf\x70" - "\xfa\xa9\xd0\x4d\x5c\x40\x23\xcd" - "\x98\x34\xab\x37\x56\xae\x32", - .ilen = 31, - .result = "\x06\x96\xb2\xbf\x63\xf4\x1e\x24" - "\x0d\x19\x15\x61\x65\x3b\x06\x26" - "\x71\xe8\x7e\x16\xdb\x96\x01\x01" - "\x52\xcd\x49\x5b\x07\x33\x4e\xe7" - "\xaa\x91\xf5\xd5\xc6\xfe\x41\xb5" - "\xed\x90\xce\xb9\xcd\xcc\xa1", - .rlen = 47, - }, { - .key = "\x50\x5d\x9d\x9b\x66\x50\x88\x7b" - "\x30\x8e\xb1\x5e\x92\x58\xe0\xf1", - .klen = 16, - .iv = "\x5f\x27\x2b\x03\xaa\xef\x32\x02" - "\x50\xc4\xde\x82\x90\x21\x11\x73", - .assoc = "\x6e\xf0\xba\x6b\xee\x8e\xdc\x89" - "\x71\xfb\x0a\xa6\x8f\xea\x41\xf4" - "\x5a\xbb\x59\xb0\x20\x38\xc5\xe0" - "\x29\x56\x52\x19\x79\xf5\xe9\x37", - .alen = 32, - .input = "\x7e\xb9\x48\xd3\x32\x2d\x86\x10" - "\x91\x31\x37\xcb\x8d\xb3\x72\x76" - "\x24\x6b\xdc\xd1\x61\xe0\xa5\xe7" - "\x5a\x61\x8a\x0f\x30\x0d\xd1\xec", - .ilen = 32, - .result = "\xf9\xd7\xee\x17\xfd\x24\xcd\xf1" - "\xbc\x0f\x35\x97\x97\x0c\x4b\x18" - "\xce\x58\xc8\x3b\xd4\x85\x93\x79" - "\xcc\x9c\xea\xc1\x73\x13\x0b\x4c" - "\xcc\x6f\x28\xf8\xa4\x4e\xb8\x56" - "\x64\x4e\x47\xce\xb2\xb4\x92\xb4", - .rlen = 48, - }, { - .key = "\x8d\x82\xd6\x3b\x76\xcc\x30\x97" - "\xb1\x68\x63\xef\x8c\x7c\xa3\xf7", - .klen = 16, - .iv = "\x9c\x4b\x65\xa2\xba\x6b\xdb\x1e" - "\xd1\x9e\x90\x13\x8a\x45\xd3\x79", - .assoc = "\xab\x14\xf3\x0a\xfe\x0a\x85\xa5" - "\xf2\xd5\xbc\x38\x89\x0e\x04\xfb" - "\x84\x7d\x65\x34\x25\xd8\x47\xfa" - "\xeb\x83\x31\xf1\x54\x54\x89\x0d" - "\x9d", - .alen = 33, - .input = "\xba\xde\x82\x72\x42\xa9\x2f\x2c" - "\x12\x0b\xe9\x5c\x87\xd7\x35\x7c" - "\x4f\x2e\xe8\x55\x66\x80\x27\x00" - "\x1b\x8f\x68\xe7\x0a\x6c\x71\xc3" - "\x21\x78\x55\x9d\x9c\x65\x7b\xcd" - "\x0a\x34\x97\xff\x47\x37\xb0\x2a" - "\x80\x0d\x19\x98\x33\xa9\x7a\xe3" - "\x2e\x4c\xc6\xf3\x8c\x88\x42\x01" - "\xbd", - .ilen = 65, - .result = "\x58\xfa\x3a\x3d\xd9\x88\x63\xe8" - "\xc5\x78\x50\x8b\x4a\xc9\xdf\x7f" - "\x4b\xfa\xc8\x2e\x67\x43\xf3\x63" - "\x42\x8e\x99\x5a\x9c\x0b\x84\x77" - "\xbc\x46\x76\x48\x82\xc7\x57\x96" - "\xe1\x65\xd1\xed\x1d\xdd\x80\x24" - "\xa6\x4d\xa9\xf1\x53\x8b\x5e\x0e" - "\x26\xb9\xcc\x37\xe5\x43\xe1\x5a" - "\x8a\xd6\x8c\x5a\xe4\x95\xd1\x8d" - "\xf7\x33\x64\xc1\xd3\xf2\xfc\x35" - "\x01", - .rlen = 81, - }, { - .key = "\xc9\xa7\x10\xda\x86\x48\xd9\xb3" - "\x32\x42\x15\x80\x85\xa1\x65\xfe", - .klen = 16, - .iv = "\xd8\x70\x9f\x42\xca\xe6\x83\x3a" - "\x52\x79\x42\xa5\x84\x6a\x96\x7f", - .assoc = "\xe8\x39\x2d\xaa\x0e\x85\x2d\xc1" - "\x72\xaf\x6e\xc9\x82\x33\xc7\x01" - "\xaf\x40\x70\xb8\x2a\x78\xc9\x14" - "\xac\xb1\x10\xca\x2e\xb3\x28\xe4" - "\xac\xfa\x58\x7f\xe5\x73\x09\x8c" - "\x1d\x40\x87\x8c\xd9\x75\xc0\x55" - "\xa2\xda\x07\xd1\xc2\xa9\xd1\xbb" - "\x09\x4f\x77\x62\x88\x2d\xf2\x68" - "\x54", - .alen = 65, - .input = "\xf7\x02\xbb\x11\x52\x24\xd8\x48" - "\x93\xe6\x9b\xee\x81\xfc\xf7\x82" - "\x79\xf0\xf3\xd9\x6c\x20\xa9\x1a" - "\xdc\xbc\x47\xc0\xe4\xcb\x10\x99" - "\x2f", - .ilen = 33, - .result = "\x4c\xa9\xac\x71\xed\x10\xa6\x24" - "\xb7\xa7\xdf\x8b\xf5\xc2\x41\xcb" - "\x05\xc9\xd6\x97\xb6\x10\x7f\x17" - "\xc2\xc0\x93\xcf\xe0\x94\xfd\x99" - "\xf2\x62\x25\x28\x01\x23\x6f\x8b" - "\x04\x52\xbc\xb0\x3e\x66\x52\x90" - "\x9f", - .rlen = 49, - }, { - .key = "\x06\xcc\x4a\x79\x96\xc3\x82\xcf" - "\xb3\x1c\xc7\x12\x7f\xc5\x28\x04", - .klen = 16, - .iv = "\x15\x95\xd8\xe1\xda\x62\x2c\x56" - "\xd3\x53\xf4\x36\x7e\x8e\x59\x85", - .assoc = "\x24\x5e\x67\x49\x1e\x01\xd6\xdd" - "\xf3\x89\x20\x5b\x7c\x57\x89\x07", - .alen = 16, - .input = "\x33\x27\xf5\xb1\x62\xa0\x80\x63" - "\x14\xc0\x4d\x7f\x7b\x20\xba\x89", - .ilen = 16, - .result = "\x6d\xed\x04\x7a\x2f\x0c\x30\xa5" - "\x96\xe6\x97\xe4\x10\xeb\x40\x95" - "\xc5\x9a\xdf\x31\xd5\xa5\xa6\xec" - "\x05\xa8\x31\x50\x11\x19\x44", - .rlen = 31, - }, { - .key = "\x42\xf0\x84\x19\xa6\x3f\x2b\xea" - "\x34\xf6\x79\xa3\x79\xe9\xeb\x0a", - .klen = 16, - .iv = "\x51\xb9\x12\x80\xea\xde\xd5\x71" - "\x54\x2d\xa6\xc8\x78\xb2\x1b\x8c", - .assoc = "\x61\x83\xa0\xe8\x2e\x7d\x7f\xf8" - "\x74\x63\xd2\xec\x76\x7c\x4c\x0d", - .alen = 16, - .input = "\x70\x4c\x2f\x50\x72\x1c\x29\x7f" - "\x95\x9a\xff\x10\x75\x45\x7d\x8f", - .ilen = 16, - .result = "\x30\x95\x7d\xea\xdc\x62\xc0\x88" - "\xa1\xe3\x8d\x8c\xac\x04\x10\xa7" - "\xfa\xfa\x07\xbd\xa0\xf0\x36\xeb" - "\x21\x93\x2e\x31\x84\x83", - .rlen = 30, - }, { - .key = "\x7f\x15\xbd\xb8\xb6\xba\xd3\x06" - "\xb5\xd1\x2b\x35\x73\x0e\xad\x10", - .klen = 16, - .iv = "\x8e\xde\x4c\x20\xfa\x59\x7e\x8d" - "\xd5\x07\x58\x59\x72\xd7\xde\x92", - .assoc = "\x9d\xa7\xda\x88\x3e\xf8\x28\x14" - "\xf5\x3e\x85\x7d\x70\xa0\x0f\x13", - .alen = 16, - .input = "\xac\x70\x69\xef\x82\x97\xd2\x9b" - "\x15\x74\xb1\xa2\x6f\x69\x3f\x95", - .ilen = 16, - .result = "\x93\xcd\xee\xd4\xcb\x9d\x8d\x16" - "\x63\x0d\x43\xd5\x49\xca\xa8\x85" - "\x49\xc0\xae\x13\xbc\x26\x1d\x4b", - .rlen = 24, - }, -}; - -static const struct aead_testvec aegis128l_dec_tv_template[] = { - { - .key = "\x0f\xc9\x8e\x67\x44\x9e\xaa\x86" - "\x20\x36\x2c\x24\xfe\xc9\x30\x81", - .klen = 16, - .iv = "\x1e\x92\x1c\xcf\x88\x3d\x54\x0d" - "\x40\x6d\x59\x48\xfc\x92\x61\x03", - .assoc = "", - .alen = 0, - .input = "\x30\x4f\xf3\xe9\xb1\xfa\x81\xa6" - "\x20\x72\x78\xdd\x93\xc8\x57\xef", - .ilen = 16, - .result = "", - .rlen = 0, - }, { - .key = "\x4b\xed\xc8\x07\x54\x1a\x52\xa2" - "\xa1\x10\xde\xb5\xf8\xed\xf3\x87", - .klen = 16, - .iv = "\x5a\xb7\x56\x6e\x98\xb9\xfd\x29" - "\xc1\x47\x0b\xda\xf6\xb6\x23\x09", - .assoc = "", - .alen = 0, - .input = "\xa9\x24\xa0\xb6\x2d\xdd\x29\xdb" - "\x40\xb3\x71\xc5\x22\x58\x31\x77" - "\x6d", - .ilen = 17, - .result = "\x79", - .rlen = 1, - }, { - .key = "\x88\x12\x01\xa6\x64\x96\xfb\xbe" - "\x22\xea\x90\x47\xf2\x11\xb5\x8e", - .klen = 16, - .iv = "\x97\xdb\x90\x0e\xa8\x35\xa5\x45" - "\x42\x21\xbd\x6b\xf0\xda\xe6\x0f", - .assoc = "", - .alen = 0, - .input = "\xbb\x0a\x53\xc4\xaa\x7e\xa4\x03" - "\x2b\xee\x62\x99\x7b\x98\x13\x1f" - "\xe0\x76\x4c\x2e\x53\x99\x4f\xbe" - "\xe1\xa8\x04\x7f\xe1\x71\xbe", - .ilen = 31, - .result = "\xb5\x6e\xad\xdd\x30\x72\xfa\x53" - "\x82\x8e\x16\xb4\xed\x6d\x47", - .rlen = 15, - }, { - .key = "\xc4\x37\x3b\x45\x74\x11\xa4\xda" - "\xa2\xc5\x42\xd8\xec\x36\x78\x94", - .klen = 16, - .iv = "\xd3\x00\xc9\xad\xb8\xb0\x4e\x61" - "\xc3\xfb\x6f\xfd\xea\xff\xa9\x15", - .assoc = "", - .alen = 0, - .input = "\x66\xdf\x6e\x71\xc0\x6e\xa4\x4c" - "\x9d\xb7\x8c\x9a\xdb\x1f\xd2\x2e" - "\x23\xb6\xa4\xfb\xd3\x86\xdd\xbb" - "\xde\x54\x9b\xf5\x92\x8b\x93\xc5", - .ilen = 32, - .result = "\xf2\x92\xe6\x7d\x40\xee\xa3\x6f" - "\x03\x68\xc8\x45\xe7\x91\x0a\x18", - .rlen = 16, - }, { - .key = "\x01\x5c\x75\xe5\x84\x8d\x4d\xf6" - "\x23\x9f\xf4\x6a\xe6\x5a\x3b\x9a", - .klen = 16, - .iv = "\x10\x25\x03\x4c\xc8\x2c\xf7\x7d" - "\x44\xd5\x21\x8e\xe4\x23\x6b\x1c", - .assoc = "", - .alen = 0, - .input = "\x4f\xc3\x69\xb6\xd3\xa4\x64\x8b" - "\x71\xc3\x8a\x91\x22\x4f\x1b\xd2" - "\x33\x6d\x86\xbc\xf8\x2f\x06\xf9" - "\x82\x64\xc7\x72\x00\x30\xfc\xf0" - "\xf8", - .ilen = 33, - .result = "\x2e\xb7\x20\x1c\x50\x6a\x4b\x8b" - "\x84\x42\x7a\xd7\xe1\xb5\xcd\x1f" - "\xd3", - .rlen = 17, - }, { - .key = "\x3d\x80\xae\x84\x94\x09\xf6\x12" - "\xa4\x79\xa6\xfb\xe0\x7f\xfd\xa0", - .klen = 16, - .iv = "\x4c\x49\x3d\xec\xd8\xa8\xa0\x98" - "\xc5\xb0\xd3\x1f\xde\x48\x2e\x22", - .assoc = "", - .alen = 0, - .input = "\xe3\x93\x15\xae\x5f\x9d\x3c\xb5" - "\xd6\x9d\xee\xee\xcf\xaa\xaf\xe1" - "\x45\x10\x96\xe0\xbf\x55\x0f\x4c" - "\x1a\xfd\xf4\xda\x4e\x10\xde\xc9" - "\x0e\x6f\xc7\x3c\x49\x94\x41\xfc" - "\x59\x28\x88\x3c\x79\x10\x6b", - .ilen = 47, - .result = "\x6b\xdc\x5a\xbb\x60\xe5\xf4\xa6" - "\x05\x1d\x2c\x68\xdb\xda\x8f\x25" - "\xfe\x8d\x45\x19\x1e\xc0\x0b\x99" - "\x88\x11\x39\x12\x1c\x3a\xbb", - .rlen = 31, - }, { - .key = "\x7a\xa5\xe8\x23\xa4\x84\x9e\x2d" - "\x25\x53\x58\x8c\xda\xa3\xc0\xa6", - .klen = 16, - .iv = "\x89\x6e\x77\x8b\xe8\x23\x49\xb4" - "\x45\x8a\x85\xb1\xd8\x6c\xf1\x28", - .assoc = "", - .alen = 0, - .input = "\x1c\x8e\x22\x34\xfd\xab\xe6\x0d" - "\x1c\x9f\x06\x54\x8b\x0b\xb4\x40" - "\xde\x11\x59\x3e\xfd\x74\xf6\x42" - "\x97\x17\xf7\x24\xb6\x7e\xc4\xc6" - "\x06\xa3\x94\xda\x3d\x7f\x55\x0a" - "\x92\x07\x2f\xa6\xf3\x6b\x2c\xfc", - .ilen = 48, - .result = "\xa7\x00\x93\x5b\x70\x61\x9d\xc2" - "\x86\xf7\xde\xfa\xd5\xfe\x52\x2b" - "\x28\x50\x51\x9d\x24\x60\x8d\xb3" - "\x49\x3e\x17\xea\xf6\x99\x5a\xdd", - .rlen = 32, - }, { - .key = "\xb6\xca\x22\xc3\xb4\x00\x47\x49" - "\xa6\x2d\x0a\x1e\xd4\xc7\x83\xad", - .klen = 16, - .iv = "\xc5\x93\xb0\x2a\xf8\x9f\xf1\xd0" - "\xc6\x64\x37\x42\xd2\x90\xb3\x2e", - .assoc = "\xd5", - .alen = 1, - .input = "\xa0\x2a\xb4\x9a\x91\x00\x15\xb8" - "\x0f\x9a\x15\x60\x0e\x9b\x13\x8f", - .ilen = 16, - .result = "", - .rlen = 0, - }, { - .key = "\xf3\xee\x5c\x62\xc4\x7c\xf0\x65" - "\x27\x08\xbd\xaf\xce\xec\x45\xb3", - .klen = 16, - .iv = "\x02\xb8\xea\xca\x09\x1b\x9a\xec" - "\x47\x3e\xe9\xd4\xcc\xb5\x76\x34", - .assoc = "\x11\x81\x78\x32\x4d\xb9\x44\x73" - "\x68\x75\x16\xf8\xcb\x7e\xa7", - .alen = 15, - .input = "\x4c\x26\xad\x9c\x14\xfd\x9c\x8c" - "\x84\xfb\x26\xfb\xd5\xca\x62\x39", - .ilen = 16, - .result = "", - .rlen = 0, - }, { - .key = "\x2f\x13\x95\x01\xd5\xf7\x99\x81" - "\xa8\xe2\x6f\x41\xc8\x10\x08\xb9", - .klen = 16, - .iv = "\x3f\xdc\x24\x69\x19\x96\x43\x08" - "\xc8\x18\x9b\x65\xc6\xd9\x39\x3b", - .assoc = "\x4e\xa5\xb2\xd1\x5d\x35\xed\x8f" - "\xe8\x4f\xc8\x89\xc5\xa2\x69\xbc", - .alen = 16, - .input = "\x45\x85\x0e\x0f\xf4\xae\x96\xa1" - "\x99\x4d\x6d\xb4\x67\x32\xb0\x3a", - .ilen = 16, - .result = "", - .rlen = 0, - }, { - .key = "\x6c\x38\xcf\xa1\xe5\x73\x41\x9d" - "\x29\xbc\x21\xd2\xc2\x35\xcb\xbf", - .klen = 16, - .iv = "\x7b\x01\x5d\x08\x29\x12\xec\x24" - "\x49\xf3\x4d\xf7\xc0\xfe\xfb\x41", - .assoc = "\x8a\xca\xec\x70\x6d\xb1\x96\xab" - "\x69\x29\x7a\x1b\xbf\xc7\x2c\xc2" - "\x07", - .alen = 17, - .input = "\x33\xb1\x42\x97\x8e\x16\x7b\x63" - "\x06\xba\x5b\xcb\xae\x6d\x8b\x56", - .ilen = 16, - .result = "", - .rlen = 0, - }, { - .key = "\xa8\x5c\x09\x40\xf5\xef\xea\xb8" - "\xaa\x96\xd3\x64\xbc\x59\x8d\xc6", - .klen = 16, - .iv = "\xb8\x26\x97\xa8\x39\x8e\x94\x3f" - "\xca\xcd\xff\x88\xba\x22\xbe\x47", - .assoc = "\xc7\xef\x26\x10\x7d\x2c\x3f\xc6" - "\xea\x03\x2c\xac\xb9\xeb\xef\xc9" - "\x31\x6b\x08\x12\xfc\xd8\x37\x2d" - "\xe0\x17\x3a\x2e\x83\x5c\x8f", - .alen = 31, - .input = "\xda\x44\x08\x8c\x2a\xa5\x07\x35" - "\x0b\x54\x4e\x6d\xe3\xfd\xc4\x5f", - .ilen = 16, - .result = "", - .rlen = 0, - }, { - .key = "\xe5\x81\x42\xdf\x05\x6a\x93\xd4" - "\x2b\x70\x85\xf5\xb6\x7d\x50\xcc", - .klen = 16, - .iv = "\xf4\x4a\xd1\x47\x49\x09\x3d\x5b" - "\x4b\xa7\xb1\x19\xb4\x46\x81\x4d", - .assoc = "\x03\x14\x5f\xaf\x8d\xa8\xe7\xe2" - "\x6b\xde\xde\x3e\xb3\x10\xb1\xcf" - "\x5c\x2d\x14\x96\x01\x78\xb9\x47" - "\xa1\x44\x19\x06\x5d\xbb\x2e\x2f", - .alen = 32, - .input = "\x1b\xb1\xf1\xa8\x9e\xc2\xb2\x88" - "\x40\x7f\x7b\x19\x7a\x52\x8c\xf0", - .ilen = 16, - .result = "", - .rlen = 0, - }, { - .key = "\x22\xa6\x7c\x7f\x15\xe6\x3c\xf0" - "\xac\x4b\x37\x86\xb0\xa2\x13\xd2", - .klen = 16, - .iv = "\x31\x6f\x0b\xe6\x59\x85\xe6\x77" - "\xcc\x81\x63\xab\xae\x6b\x43\x54", - .assoc = "\x40", - .alen = 1, - .input = "\x6e\xc8\xfb\x15\x9d\x98\x49\xc9" - "\xa0\x98\x09\x85\xbe\x56\x8e\x79" - "\xf4", - .ilen = 17, - .result = "\x4f", - .rlen = 1, - }, { - .key = "\x5e\xcb\xb6\x1e\x25\x62\xe4\x0c" - "\x2d\x25\xe9\x18\xaa\xc6\xd5\xd8", - .klen = 16, - .iv = "\x6d\x94\x44\x86\x69\x00\x8f\x93" - "\x4d\x5b\x15\x3c\xa8\x8f\x06\x5a", - .assoc = "\x7c\x5d\xd3\xee\xad\x9f\x39\x1a" - "\x6d\x92\x42\x61\xa7\x58\x37", - .alen = 15, - .input = "\x99\x2e\x84\x50\x64\x5c\xab\x29" - "\x20\xba\xb9\x2f\x62\x3a\xce\x2a" - "\x75\x25\x3b\xe3\x40\xe0\x1d\xfc" - "\x20\x63\x0b\x49\x7e\x97\x08", - .ilen = 31, - .result = "\x8b\x26\x61\x55\xf1\x3e\xe3\xa1" - "\x8d\xc8\x6e\x85\xa5\x21\x67", - .rlen = 15, - }, { - .key = "\x9b\xef\xf0\xbd\x35\xdd\x8d\x28" - "\xad\xff\x9b\xa9\xa4\xeb\x98\xdf", - .klen = 16, - .iv = "\xaa\xb8\x7e\x25\x79\x7c\x37\xaf" - "\xce\x36\xc7\xce\xa2\xb4\xc9\x60", - .assoc = "\xb9\x82\x0c\x8d\xbd\x1b\xe2\x36" - "\xee\x6c\xf4\xf2\xa1\x7d\xf9\xe2", - .alen = 16, - .input = "\xd9\x8e\xfd\x50\x8f\x02\x9f\xee" - "\x78\x08\x12\xec\x09\xaf\x53\x14" - "\x90\x3e\x3d\x76\xad\x71\x21\x08" - "\x77\xe5\x4b\x15\xc2\xe6\xbc\xdb", - .ilen = 32, - .result = "\xc8\x4b\x9b\xf5\x01\xba\x8c\xbd" - "\x0e\xa3\x21\x16\x9f\x46\x2a\x63", - .rlen = 16, - }, { - .key = "\xd7\x14\x29\x5d\x45\x59\x36\x44" - "\x2e\xd9\x4d\x3b\x9e\x0f\x5b\xe5", - .klen = 16, - .iv = "\xe6\xdd\xb8\xc4\x89\xf8\xe0\xca" - "\x4f\x10\x7a\x5f\x9c\xd8\x8b\x66", - .assoc = "\xf5\xa6\x46\x2c\xce\x97\x8a\x51" - "\x6f\x46\xa6\x83\x9b\xa1\xbc\xe8" - "\x05", - .alen = 17, - .input = "\xf3\xe7\x95\x86\xcf\x34\x95\x96" - "\x17\xfe\x1b\xae\x1b\x31\xf2\x1a" - "\xbd\xbc\xc9\x4e\x11\x29\x09\x5c" - "\x05\xd3\xb4\x2e\x4a\x74\x59\x49" - "\x7d", - .ilen = 33, - .result = "\x05\x70\xd5\x94\x12\x36\x35\xd8" - "\x8f\x7d\xd3\xa8\x99\x6a\xed\x69" - "\xd0", - .rlen = 17, - }, { - .key = "\x14\x39\x63\xfc\x56\xd5\xdf\x5f" - "\xaf\xb3\xff\xcc\x98\x33\x1d\xeb", - .klen = 16, - .iv = "\x23\x02\xf1\x64\x9a\x73\x89\xe6" - "\xd0\xea\x2c\xf1\x96\xfc\x4e\x6d", - .assoc = "\x32\xcb\x80\xcc\xde\x12\x33\x6d" - "\xf0\x20\x58\x15\x95\xc6\x7f\xee" - "\x2f\xf9\x4e\x2c\x1b\x98\x43\xc7" - "\x68\x28\x73\x40\x9f\x96\x4a", - .alen = 31, - .input = "\x06\x96\xb2\xbf\x63\xf4\x1e\x24" - "\x0d\x19\x15\x61\x65\x3b\x06\x26" - "\x71\xe8\x7e\x16\xdb\x96\x01\x01" - "\x52\xcd\x49\x5b\x07\x33\x4e\xe7" - "\xaa\x91\xf5\xd5\xc6\xfe\x41\xb5" - "\xed\x90\xce\xb9\xcd\xcc\xa1", - .ilen = 47, - .result = "\x41\x94\x0e\x33\x22\xb1\xdd\xf4" - "\x10\x57\x85\x39\x93\x8f\xaf\x70" - "\xfa\xa9\xd0\x4d\x5c\x40\x23\xcd" - "\x98\x34\xab\x37\x56\xae\x32", - .rlen = 31, - }, { - .key = "\x50\x5d\x9d\x9b\x66\x50\x88\x7b" - "\x30\x8e\xb1\x5e\x92\x58\xe0\xf1", - .klen = 16, - .iv = "\x5f\x27\x2b\x03\xaa\xef\x32\x02" - "\x50\xc4\xde\x82\x90\x21\x11\x73", - .assoc = "\x6e\xf0\xba\x6b\xee\x8e\xdc\x89" - "\x71\xfb\x0a\xa6\x8f\xea\x41\xf4" - "\x5a\xbb\x59\xb0\x20\x38\xc5\xe0" - "\x29\x56\x52\x19\x79\xf5\xe9\x37", - .alen = 32, - .input = "\xf9\xd7\xee\x17\xfd\x24\xcd\xf1" - "\xbc\x0f\x35\x97\x97\x0c\x4b\x18" - "\xce\x58\xc8\x3b\xd4\x85\x93\x79" - "\xcc\x9c\xea\xc1\x73\x13\x0b\x4c" - "\xcc\x6f\x28\xf8\xa4\x4e\xb8\x56" - "\x64\x4e\x47\xce\xb2\xb4\x92\xb4", - .ilen = 48, - .result = "\x7e\xb9\x48\xd3\x32\x2d\x86\x10" - "\x91\x31\x37\xcb\x8d\xb3\x72\x76" - "\x24\x6b\xdc\xd1\x61\xe0\xa5\xe7" - "\x5a\x61\x8a\x0f\x30\x0d\xd1\xec", - .rlen = 32, - }, { - .key = "\x8d\x82\xd6\x3b\x76\xcc\x30\x97" - "\xb1\x68\x63\xef\x8c\x7c\xa3\xf7", - .klen = 16, - .iv = "\x9c\x4b\x65\xa2\xba\x6b\xdb\x1e" - "\xd1\x9e\x90\x13\x8a\x45\xd3\x79", - .assoc = "\xab\x14\xf3\x0a\xfe\x0a\x85\xa5" - "\xf2\xd5\xbc\x38\x89\x0e\x04\xfb" - "\x84\x7d\x65\x34\x25\xd8\x47\xfa" - "\xeb\x83\x31\xf1\x54\x54\x89\x0d" - "\x9d", - .alen = 33, - .input = "\x58\xfa\x3a\x3d\xd9\x88\x63\xe8" - "\xc5\x78\x50\x8b\x4a\xc9\xdf\x7f" - "\x4b\xfa\xc8\x2e\x67\x43\xf3\x63" - "\x42\x8e\x99\x5a\x9c\x0b\x84\x77" - "\xbc\x46\x76\x48\x82\xc7\x57\x96" - "\xe1\x65\xd1\xed\x1d\xdd\x80\x24" - "\xa6\x4d\xa9\xf1\x53\x8b\x5e\x0e" - "\x26\xb9\xcc\x37\xe5\x43\xe1\x5a" - "\x8a\xd6\x8c\x5a\xe4\x95\xd1\x8d" - "\xf7\x33\x64\xc1\xd3\xf2\xfc\x35" - "\x01", - .ilen = 81, - .result = "\xba\xde\x82\x72\x42\xa9\x2f\x2c" - "\x12\x0b\xe9\x5c\x87\xd7\x35\x7c" - "\x4f\x2e\xe8\x55\x66\x80\x27\x00" - "\x1b\x8f\x68\xe7\x0a\x6c\x71\xc3" - "\x21\x78\x55\x9d\x9c\x65\x7b\xcd" - "\x0a\x34\x97\xff\x47\x37\xb0\x2a" - "\x80\x0d\x19\x98\x33\xa9\x7a\xe3" - "\x2e\x4c\xc6\xf3\x8c\x88\x42\x01" - "\xbd", - .rlen = 65, - }, { - .key = "\xc9\xa7\x10\xda\x86\x48\xd9\xb3" - "\x32\x42\x15\x80\x85\xa1\x65\xfe", - .klen = 16, - .iv = "\xd8\x70\x9f\x42\xca\xe6\x83\x3a" - "\x52\x79\x42\xa5\x84\x6a\x96\x7f", - .assoc = "\xe8\x39\x2d\xaa\x0e\x85\x2d\xc1" - "\x72\xaf\x6e\xc9\x82\x33\xc7\x01" - "\xaf\x40\x70\xb8\x2a\x78\xc9\x14" - "\xac\xb1\x10\xca\x2e\xb3\x28\xe4" - "\xac\xfa\x58\x7f\xe5\x73\x09\x8c" - "\x1d\x40\x87\x8c\xd9\x75\xc0\x55" - "\xa2\xda\x07\xd1\xc2\xa9\xd1\xbb" - "\x09\x4f\x77\x62\x88\x2d\xf2\x68" - "\x54", - .alen = 65, - .input = "\x4c\xa9\xac\x71\xed\x10\xa6\x24" - "\xb7\xa7\xdf\x8b\xf5\xc2\x41\xcb" - "\x05\xc9\xd6\x97\xb6\x10\x7f\x17" - "\xc2\xc0\x93\xcf\xe0\x94\xfd\x99" - "\xf2\x62\x25\x28\x01\x23\x6f\x8b" - "\x04\x52\xbc\xb0\x3e\x66\x52\x90" - "\x9f", - .ilen = 49, - .result = "\xf7\x02\xbb\x11\x52\x24\xd8\x48" - "\x93\xe6\x9b\xee\x81\xfc\xf7\x82" - "\x79\xf0\xf3\xd9\x6c\x20\xa9\x1a" - "\xdc\xbc\x47\xc0\xe4\xcb\x10\x99" - "\x2f", - .rlen = 33, - }, { - .key = "\x06\xcc\x4a\x79\x96\xc3\x82\xcf" - "\xb3\x1c\xc7\x12\x7f\xc5\x28\x04", - .klen = 16, - .iv = "\x15\x95\xd8\xe1\xda\x62\x2c\x56" - "\xd3\x53\xf4\x36\x7e\x8e\x59\x85", - .assoc = "\x24\x5e\x67\x49\x1e\x01\xd6\xdd" - "\xf3\x89\x20\x5b\x7c\x57\x89\x07", - .alen = 16, - .input = "\x6d\xed\x04\x7a\x2f\x0c\x30\xa5" - "\x96\xe6\x97\xe4\x10\xeb\x40\x95" - "\xc5\x9a\xdf\x31\xd5\xa5\xa6\xec" - "\x05\xa8\x31\x50\x11\x19\x44", - .ilen = 31, - .result = "\x33\x27\xf5\xb1\x62\xa0\x80\x63" - "\x14\xc0\x4d\x7f\x7b\x20\xba\x89", - .rlen = 16, - }, { - .key = "\x42\xf0\x84\x19\xa6\x3f\x2b\xea" - "\x34\xf6\x79\xa3\x79\xe9\xeb\x0a", - .klen = 16, - .iv = "\x51\xb9\x12\x80\xea\xde\xd5\x71" - "\x54\x2d\xa6\xc8\x78\xb2\x1b\x8c", - .assoc = "\x61\x83\xa0\xe8\x2e\x7d\x7f\xf8" - "\x74\x63\xd2\xec\x76\x7c\x4c\x0d", - .alen = 16, - .input = "\x30\x95\x7d\xea\xdc\x62\xc0\x88" - "\xa1\xe3\x8d\x8c\xac\x04\x10\xa7" - "\xfa\xfa\x07\xbd\xa0\xf0\x36\xeb" - "\x21\x93\x2e\x31\x84\x83", - .ilen = 30, - .result = "\x70\x4c\x2f\x50\x72\x1c\x29\x7f" - "\x95\x9a\xff\x10\x75\x45\x7d\x8f", - .rlen = 16, - }, { - .key = "\x7f\x15\xbd\xb8\xb6\xba\xd3\x06" - "\xb5\xd1\x2b\x35\x73\x0e\xad\x10", - .klen = 16, - .iv = "\x8e\xde\x4c\x20\xfa\x59\x7e\x8d" - "\xd5\x07\x58\x59\x72\xd7\xde\x92", - .assoc = "\x9d\xa7\xda\x88\x3e\xf8\x28\x14" - "\xf5\x3e\x85\x7d\x70\xa0\x0f\x13", - .alen = 16, - .input = "\x93\xcd\xee\xd4\xcb\x9d\x8d\x16" - "\x63\x0d\x43\xd5\x49\xca\xa8\x85" - "\x49\xc0\xae\x13\xbc\x26\x1d\x4b", - .ilen = 24, - .result = "\xac\x70\x69\xef\x82\x97\xd2\x9b" - "\x15\x74\xb1\xa2\x6f\x69\x3f\x95", - .rlen = 16, - }, -}; - -/* - * AEGIS-256 test vectors - generated via reference implementation from - * SUPERCOP (https://bench.cr.yp.to/supercop.html): - * - * https://bench.cr.yp.to/supercop/supercop-20170228.tar.xz - * (see crypto_aead/aegis256/) - */ -static const struct aead_testvec aegis256_enc_tv_template[] = { - { - .key = "\x0f\xc9\x8e\x67\x44\x9e\xaa\x86" - "\x20\x36\x2c\x24\xfe\xc9\x30\x81" - "\xca\xb0\x82\x21\x41\xa8\xe0\x06" - "\x30\x0b\x37\xf6\xb6\x17\xe7\xb5", - .klen = 32, - .iv = "\x1e\x92\x1c\xcf\x88\x3d\x54\x0d" - "\x40\x6d\x59\x48\xfc\x92\x61\x03" - "\x95\x61\x05\x42\x82\x50\xc0\x0c" - "\x60\x16\x6f\xec\x6d\x2f\xcf\x6b", - .assoc = "", - .alen = 0, - .input = "", - .ilen = 0, - .result = "\xd5\x65\x3a\xa9\x03\x51\xd7\xaa" - "\xfa\x4b\xd8\xa2\x41\x9b\xc1\xb2", - .rlen = 16, - }, { - .key = "\x4b\xed\xc8\x07\x54\x1a\x52\xa2" - "\xa1\x10\xde\xb5\xf8\xed\xf3\x87" - "\xf4\x72\x8e\xa5\x46\x48\x62\x20" - "\xf1\x38\x16\xce\x90\x76\x87\x8c", - .klen = 32, - .iv = "\x5a\xb7\x56\x6e\x98\xb9\xfd\x29" - "\xc1\x47\x0b\xda\xf6\xb6\x23\x09" - "\xbf\x23\x11\xc6\x87\xf0\x42\x26" - "\x22\x44\x4e\xc4\x47\x8e\x6e\x41", - .assoc = "", - .alen = 0, - .input = "\x79", - .ilen = 1, - .result = "\x84\xa2\x8f\xad\xdb\x8d\x2c\x16" - "\x9e\x89\xd9\x06\xa6\xa8\x14\x29" - "\x8b", - .rlen = 17, - }, { - .key = "\x88\x12\x01\xa6\x64\x96\xfb\xbe" - "\x22\xea\x90\x47\xf2\x11\xb5\x8e" - "\x1f\x35\x9a\x29\x4b\xe8\xe4\x39" - "\xb3\x66\xf5\xa6\x6a\xd5\x26\x62", - .klen = 32, - .iv = "\x97\xdb\x90\x0e\xa8\x35\xa5\x45" - "\x42\x21\xbd\x6b\xf0\xda\xe6\x0f" - "\xe9\xe5\x1d\x4a\x8c\x90\xc4\x40" - "\xe3\x71\x2d\x9c\x21\xed\x0e\x18", - .assoc = "", - .alen = 0, - .input = "\xb5\x6e\xad\xdd\x30\x72\xfa\x53" - "\x82\x8e\x16\xb4\xed\x6d\x47", - .ilen = 15, - .result = "\x09\x94\x1f\xa6\x13\xc3\x74\x75" - "\x17\xad\x8a\x0e\xd8\x66\x9a\x28" - "\xd7\x30\x66\x09\x2a\xdc\xfa\x2a" - "\x9f\x3b\xd7\xdd\x66\xd1\x2b", - .rlen = 31, - }, { - .key = "\xc4\x37\x3b\x45\x74\x11\xa4\xda" - "\xa2\xc5\x42\xd8\xec\x36\x78\x94" - "\x49\xf7\xa5\xad\x50\x88\x66\x53" - "\x74\x94\xd4\x7f\x44\x34\xc5\x39", - .klen = 32, - .iv = "\xd3\x00\xc9\xad\xb8\xb0\x4e\x61" - "\xc3\xfb\x6f\xfd\xea\xff\xa9\x15" - "\x14\xa8\x28\xce\x92\x30\x46\x59" - "\xa4\x9f\x0b\x75\xfb\x4c\xad\xee", - .assoc = "", - .alen = 0, - .input = "\xf2\x92\xe6\x7d\x40\xee\xa3\x6f" - "\x03\x68\xc8\x45\xe7\x91\x0a\x18", - .ilen = 16, - .result = "\x8a\x46\xa2\x22\x8c\x03\xab\x6f" - "\x54\x63\x4e\x7f\xc9\x8e\xfa\x70" - "\x7b\xe5\x8d\x78\xbc\xe9\xb6\xa1" - "\x29\x17\xc8\x3b\x52\xa4\x98\x72", - .rlen = 32, - }, { - .key = "\x01\x5c\x75\xe5\x84\x8d\x4d\xf6" - "\x23\x9f\xf4\x6a\xe6\x5a\x3b\x9a" - "\x74\xb9\xb1\x32\x55\x28\xe8\x6d" - "\x35\xc1\xb3\x57\x1f\x93\x64\x0f", - .klen = 32, - .iv = "\x10\x25\x03\x4c\xc8\x2c\xf7\x7d" - "\x44\xd5\x21\x8e\xe4\x23\x6b\x1c" - "\x3e\x6a\x34\x53\x97\xd0\xc8\x73" - "\x66\xcd\xea\x4d\xd5\xab\x4c\xc5", - .assoc = "", - .alen = 0, - .input = "\x2e\xb7\x20\x1c\x50\x6a\x4b\x8b" - "\x84\x42\x7a\xd7\xe1\xb5\xcd\x1f" - "\xd3", - .ilen = 17, - .result = "\x71\x6b\x37\x0b\x02\x61\x28\x12" - "\x83\xab\x66\x90\x84\xc7\xd1\xc5" - "\xb2\x7a\xb4\x7b\xb4\xfe\x02\xb2" - "\xc0\x00\x39\x13\xb5\x51\x68\x44" - "\xad", - .rlen = 33, - }, { - .key = "\x3d\x80\xae\x84\x94\x09\xf6\x12" - "\xa4\x79\xa6\xfb\xe0\x7f\xfd\xa0" - "\x9e\x7c\xbc\xb6\x5b\xc8\x6a\x86" - "\xf7\xef\x91\x30\xf9\xf2\x04\xe6", - .klen = 32, - .iv = "\x4c\x49\x3d\xec\xd8\xa8\xa0\x98" - "\xc5\xb0\xd3\x1f\xde\x48\x2e\x22" - "\x69\x2c\x3f\xd7\x9c\x70\x4a\x8d" - "\x27\xfa\xc9\x26\xaf\x0a\xeb\x9c", - .assoc = "", - .alen = 0, - .input = "\x6b\xdc\x5a\xbb\x60\xe5\xf4\xa6" - "\x05\x1d\x2c\x68\xdb\xda\x8f\x25" - "\xfe\x8d\x45\x19\x1e\xc0\x0b\x99" - "\x88\x11\x39\x12\x1c\x3a\xbb", - .ilen = 31, - .result = "\xaf\xa4\x34\x0d\x59\xe6\x1c\x2f" - "\x06\x3b\x52\x18\x49\x75\x1b\xf0" - "\x53\x09\x72\x7b\x45\x79\xe0\xbe" - "\x89\x85\x23\x15\xb8\x79\x07\x4c" - "\x53\x7a\x15\x37\x0a\xee\xb7\xfb" - "\xc4\x1f\x12\x27\xcf\x77\x90", - .rlen = 47, - }, { - .key = "\x7a\xa5\xe8\x23\xa4\x84\x9e\x2d" - "\x25\x53\x58\x8c\xda\xa3\xc0\xa6" - "\xc8\x3e\xc8\x3a\x60\x68\xec\xa0" - "\xb8\x1c\x70\x08\xd3\x51\xa3\xbd", - .klen = 32, - .iv = "\x89\x6e\x77\x8b\xe8\x23\x49\xb4" - "\x45\x8a\x85\xb1\xd8\x6c\xf1\x28" - "\x93\xef\x4b\x5b\xa1\x10\xcc\xa6" - "\xe8\x28\xa8\xfe\x89\x69\x8b\x72", - .assoc = "", - .alen = 0, - .input = "\xa7\x00\x93\x5b\x70\x61\x9d\xc2" - "\x86\xf7\xde\xfa\xd5\xfe\x52\x2b" - "\x28\x50\x51\x9d\x24\x60\x8d\xb3" - "\x49\x3e\x17\xea\xf6\x99\x5a\xdd", - .ilen = 32, - .result = "\xe2\xc9\x0b\x33\x31\x02\xb3\xb4" - "\x33\xfe\xeb\xa8\xb7\x9b\xb2\xd7" - "\xeb\x0f\x05\x2b\xba\xb3\xca\xef" - "\xf6\xd1\xb6\xc0\xb9\x9b\x85\xc5" - "\xbf\x7a\x3e\xcc\x31\x76\x09\x80" - "\x32\x5d\xbb\xe8\x38\x0e\x77\xd3", - .rlen = 48, - }, { - .key = "\xb6\xca\x22\xc3\xb4\x00\x47\x49" - "\xa6\x2d\x0a\x1e\xd4\xc7\x83\xad" - "\xf3\x00\xd4\xbf\x65\x08\x6e\xb9" - "\x7a\x4a\x4f\xe0\xad\xb0\x42\x93", - .klen = 32, - .iv = "\xc5\x93\xb0\x2a\xf8\x9f\xf1\xd0" - "\xc6\x64\x37\x42\xd2\x90\xb3\x2e" - "\xbd\xb1\x57\xe0\xa6\xb0\x4e\xc0" - "\xaa\x55\x87\xd6\x63\xc8\x2a\x49", - .assoc = "\xd5", - .alen = 1, - .input = "", - .ilen = 0, - .result = "\x96\x43\x30\xca\x6c\x4f\xd7\x12" - "\xba\xd9\xb3\x18\x86\xdf\xc3\x52", - .rlen = 16, - }, { - .key = "\xf3\xee\x5c\x62\xc4\x7c\xf0\x65" - "\x27\x08\xbd\xaf\xce\xec\x45\xb3" - "\x1d\xc3\xdf\x43\x6a\xa8\xf0\xd3" - "\x3b\x77\x2e\xb9\x87\x0f\xe1\x6a", - .klen = 32, - .iv = "\x02\xb8\xea\xca\x09\x1b\x9a\xec" - "\x47\x3e\xe9\xd4\xcc\xb5\x76\x34" - "\xe8\x73\x62\x64\xab\x50\xd0\xda" - "\x6b\x83\x66\xaf\x3e\x27\xc9\x1f", - .assoc = "\x11\x81\x78\x32\x4d\xb9\x44\x73" - "\x68\x75\x16\xf8\xcb\x7e\xa7", - .alen = 15, - .input = "", - .ilen = 0, - .result = "\x2f\xab\x45\xe2\xa7\x46\xc5\x83" - "\x11\x9f\xb0\x74\xee\xc7\x03\xdd", - .rlen = 16, - }, { - .key = "\x2f\x13\x95\x01\xd5\xf7\x99\x81" - "\xa8\xe2\x6f\x41\xc8\x10\x08\xb9" - "\x47\x85\xeb\xc7\x6f\x48\x72\xed" - "\xfc\xa5\x0d\x91\x61\x6e\x81\x40", - .klen = 32, - .iv = "\x3f\xdc\x24\x69\x19\x96\x43\x08" - "\xc8\x18\x9b\x65\xc6\xd9\x39\x3b" - "\x12\x35\x6e\xe8\xb0\xf0\x52\xf3" - "\x2d\xb0\x45\x87\x18\x86\x68\xf6", - .assoc = "\x4e\xa5\xb2\xd1\x5d\x35\xed\x8f" - "\xe8\x4f\xc8\x89\xc5\xa2\x69\xbc", - .alen = 16, - .input = "", - .ilen = 0, - .result = "\x16\x44\x73\x33\x5d\xf2\xb9\x04" - "\x6b\x79\x98\xef\xdb\xd5\xc5\xf1", - .rlen = 16, - }, { - .key = "\x6c\x38\xcf\xa1\xe5\x73\x41\x9d" - "\x29\xbc\x21\xd2\xc2\x35\xcb\xbf" - "\x72\x47\xf6\x4b\x74\xe8\xf4\x06" - "\xbe\xd3\xec\x6a\x3b\xcd\x20\x17", - .klen = 32, - .iv = "\x7b\x01\x5d\x08\x29\x12\xec\x24" - "\x49\xf3\x4d\xf7\xc0\xfe\xfb\x41" - "\x3c\xf8\x79\x6c\xb6\x90\xd4\x0d" - "\xee\xde\x23\x60\xf2\xe5\x08\xcc", - .assoc = "\x8a\xca\xec\x70\x6d\xb1\x96\xab" - "\x69\x29\x7a\x1b\xbf\xc7\x2c\xc2" - "\x07", - .alen = 17, - .input = "", - .ilen = 0, - .result = "\xa4\x9b\xb8\x47\xc0\xed\x7a\x45" - "\x98\x54\x8c\xed\x3d\x17\xf0\xdd", - .rlen = 16, - }, { - .key = "\xa8\x5c\x09\x40\xf5\xef\xea\xb8" - "\xaa\x96\xd3\x64\xbc\x59\x8d\xc6" - "\x9c\x0a\x02\xd0\x79\x88\x76\x20" - "\x7f\x00\xca\x42\x15\x2c\xbf\xed", - .klen = 32, - .iv = "\xb8\x26\x97\xa8\x39\x8e\x94\x3f" - "\xca\xcd\xff\x88\xba\x22\xbe\x47" - "\x67\xba\x85\xf1\xbb\x30\x56\x26" - "\xaf\x0b\x02\x38\xcc\x44\xa7\xa3", - .assoc = "\xc7\xef\x26\x10\x7d\x2c\x3f\xc6" - "\xea\x03\x2c\xac\xb9\xeb\xef\xc9" - "\x31\x6b\x08\x12\xfc\xd8\x37\x2d" - "\xe0\x17\x3a\x2e\x83\x5c\x8f", - .alen = 31, - .input = "", - .ilen = 0, - .result = "\x20\x24\xe2\x33\x5c\x60\xc9\xf0" - "\xa4\x96\x2f\x0d\x53\xc2\xf8\xfc", - .rlen = 16, - }, { - .key = "\xe5\x81\x42\xdf\x05\x6a\x93\xd4" - "\x2b\x70\x85\xf5\xb6\x7d\x50\xcc" - "\xc6\xcc\x0e\x54\x7f\x28\xf8\x3a" - "\x40\x2e\xa9\x1a\xf0\x8b\x5e\xc4", - .klen = 32, - .iv = "\xf4\x4a\xd1\x47\x49\x09\x3d\x5b" - "\x4b\xa7\xb1\x19\xb4\x46\x81\x4d" - "\x91\x7c\x91\x75\xc0\xd0\xd8\x40" - "\x71\x39\xe1\x10\xa6\xa3\x46\x7a", - .assoc = "\x03\x14\x5f\xaf\x8d\xa8\xe7\xe2" - "\x6b\xde\xde\x3e\xb3\x10\xb1\xcf" - "\x5c\x2d\x14\x96\x01\x78\xb9\x47" - "\xa1\x44\x19\x06\x5d\xbb\x2e\x2f", - .alen = 32, - .input = "", - .ilen = 0, - .result = "\x6f\x4a\xb9\xe0\xff\x51\xa3\xf1" - "\xd2\x64\x3e\x66\x6a\xb2\x03\xc0", - .rlen = 16, - }, { - .key = "\x22\xa6\x7c\x7f\x15\xe6\x3c\xf0" - "\xac\x4b\x37\x86\xb0\xa2\x13\xd2" - "\xf1\x8e\x19\xd8\x84\xc8\x7a\x53" - "\x02\x5b\x88\xf3\xca\xea\xfe\x9b", - .klen = 32, - .iv = "\x31\x6f\x0b\xe6\x59\x85\xe6\x77" - "\xcc\x81\x63\xab\xae\x6b\x43\x54" - "\xbb\x3f\x9c\xf9\xc5\x70\x5a\x5a" - "\x32\x67\xc0\xe9\x80\x02\xe5\x50", - .assoc = "\x40", - .alen = 1, - .input = "\x4f", - .ilen = 1, - .result = "\x2c\xfb\xad\x7e\xbe\xa0\x9a\x5b" - "\x7a\x3f\x81\xf7\xfc\x1b\x79\x83" - "\xc7", - .rlen = 17, - }, { - .key = "\x5e\xcb\xb6\x1e\x25\x62\xe4\x0c" - "\x2d\x25\xe9\x18\xaa\xc6\xd5\xd8" - "\x1b\x50\x25\x5d\x89\x68\xfc\x6d" - "\xc3\x89\x67\xcb\xa4\x49\x9d\x71", - .klen = 32, - .iv = "\x6d\x94\x44\x86\x69\x00\x8f\x93" - "\x4d\x5b\x15\x3c\xa8\x8f\x06\x5a" - "\xe6\x01\xa8\x7e\xca\x10\xdc\x73" - "\xf4\x94\x9f\xc1\x5a\x61\x85\x27", - .assoc = "\x7c\x5d\xd3\xee\xad\x9f\x39\x1a" - "\x6d\x92\x42\x61\xa7\x58\x37", - .alen = 15, - .input = "\x8b\x26\x61\x55\xf1\x3e\xe3\xa1" - "\x8d\xc8\x6e\x85\xa5\x21\x67", - .ilen = 15, - .result = "\x1f\x7f\xca\x3c\x2b\xe7\x27\xba" - "\x7e\x98\x83\x02\x34\x23\xf7\x94" - "\xde\x35\xe6\x1d\x14\x18\xe5\x38" - "\x14\x80\x6a\xa7\x1b\xae\x1d", - .rlen = 31, - }, { - .key = "\x9b\xef\xf0\xbd\x35\xdd\x8d\x28" - "\xad\xff\x9b\xa9\xa4\xeb\x98\xdf" - "\x46\x13\x31\xe1\x8e\x08\x7e\x87" - "\x85\xb6\x46\xa3\x7e\xa8\x3c\x48", - .klen = 32, - .iv = "\xaa\xb8\x7e\x25\x79\x7c\x37\xaf" - "\xce\x36\xc7\xce\xa2\xb4\xc9\x60" - "\x10\xc3\xb3\x02\xcf\xb0\x5e\x8d" - "\xb5\xc2\x7e\x9a\x35\xc0\x24\xfd", - .assoc = "\xb9\x82\x0c\x8d\xbd\x1b\xe2\x36" - "\xee\x6c\xf4\xf2\xa1\x7d\xf9\xe2", - .alen = 16, - .input = "\xc8\x4b\x9b\xf5\x01\xba\x8c\xbd" - "\x0e\xa3\x21\x16\x9f\x46\x2a\x63", - .ilen = 16, - .result = "\x05\x86\x9e\xd7\x2b\xa3\x97\x01" - "\xbe\x28\x98\x10\x6f\xe9\x61\x32" - "\x96\xbb\xb1\x2e\x8f\x0c\x44\xb9" - "\x46\x2d\x55\xe3\x42\x67\xf2\xaf", - .rlen = 32, - }, { - .key = "\xd7\x14\x29\x5d\x45\x59\x36\x44" - "\x2e\xd9\x4d\x3b\x9e\x0f\x5b\xe5" - "\x70\xd5\x3c\x65\x93\xa8\x00\xa0" - "\x46\xe4\x25\x7c\x58\x08\xdb\x1e", - .klen = 32, - .iv = "\xe6\xdd\xb8\xc4\x89\xf8\xe0\xca" - "\x4f\x10\x7a\x5f\x9c\xd8\x8b\x66" - "\x3b\x86\xbf\x86\xd4\x50\xe0\xa7" - "\x76\xef\x5c\x72\x0f\x1f\xc3\xd4", - .assoc = "\xf5\xa6\x46\x2c\xce\x97\x8a\x51" - "\x6f\x46\xa6\x83\x9b\xa1\xbc\xe8" - "\x05", - .alen = 17, - .input = "\x05\x70\xd5\x94\x12\x36\x35\xd8" - "\x8f\x7d\xd3\xa8\x99\x6a\xed\x69" - "\xd0", - .ilen = 17, - .result = "\x9c\xe0\x06\x7b\x86\xcf\x2e\xd8" - "\x45\x65\x1b\x72\x9b\xaa\xa3\x1e" - "\x87\x9d\x26\xdf\xff\x81\x11\xd2" - "\x47\x41\xb9\x24\xc1\x8a\xa3\x8b" - "\x55", - .rlen = 33, - }, { - .key = "\x14\x39\x63\xfc\x56\xd5\xdf\x5f" - "\xaf\xb3\xff\xcc\x98\x33\x1d\xeb" - "\x9a\x97\x48\xe9\x98\x48\x82\xba" - "\x07\x11\x04\x54\x32\x67\x7b\xf5", - .klen = 32, - .iv = "\x23\x02\xf1\x64\x9a\x73\x89\xe6" - "\xd0\xea\x2c\xf1\x96\xfc\x4e\x6d" - "\x65\x48\xcb\x0a\xda\xf0\x62\xc0" - "\x38\x1d\x3b\x4a\xe9\x7e\x62\xaa", - .assoc = "\x32\xcb\x80\xcc\xde\x12\x33\x6d" - "\xf0\x20\x58\x15\x95\xc6\x7f\xee" - "\x2f\xf9\x4e\x2c\x1b\x98\x43\xc7" - "\x68\x28\x73\x40\x9f\x96\x4a", - .alen = 31, - .input = "\x41\x94\x0e\x33\x22\xb1\xdd\xf4" - "\x10\x57\x85\x39\x93\x8f\xaf\x70" - "\xfa\xa9\xd0\x4d\x5c\x40\x23\xcd" - "\x98\x34\xab\x37\x56\xae\x32", - .ilen = 31, - .result = "\xa0\xc8\xde\x83\x0d\xc3\x4e\xd5" - "\x69\x7f\x7a\xdd\x8c\x46\xda\xba" - "\x0a\x5c\x0e\x7f\xac\xee\x02\xd2" - "\xe5\x4b\x0a\xba\xb8\xa4\x7b\x66" - "\xde\xae\xdb\xc2\xc0\x0b\xf7\x2b" - "\xdf\xb8\xea\xd8\xa9\x38\xed", - .rlen = 47, - }, { - .key = "\x50\x5d\x9d\x9b\x66\x50\x88\x7b" - "\x30\x8e\xb1\x5e\x92\x58\xe0\xf1" - "\xc5\x5a\x53\x6e\x9d\xe8\x04\xd4" - "\xc9\x3f\xe2\x2d\x0c\xc6\x1a\xcb", - .klen = 32, - .iv = "\x5f\x27\x2b\x03\xaa\xef\x32\x02" - "\x50\xc4\xde\x82\x90\x21\x11\x73" - "\x8f\x0a\xd6\x8f\xdf\x90\xe4\xda" - "\xf9\x4a\x1a\x23\xc3\xdd\x02\x81", - .assoc = "\x6e\xf0\xba\x6b\xee\x8e\xdc\x89" - "\x71\xfb\x0a\xa6\x8f\xea\x41\xf4" - "\x5a\xbb\x59\xb0\x20\x38\xc5\xe0" - "\x29\x56\x52\x19\x79\xf5\xe9\x37", - .alen = 32, - .input = "\x7e\xb9\x48\xd3\x32\x2d\x86\x10" - "\x91\x31\x37\xcb\x8d\xb3\x72\x76" - "\x24\x6b\xdc\xd1\x61\xe0\xa5\xe7" - "\x5a\x61\x8a\x0f\x30\x0d\xd1\xec", - .ilen = 32, - .result = "\xd3\x68\x14\x70\x3c\x01\x43\x86" - "\x02\xab\xbe\x75\xaa\xe7\xf5\x53" - "\x5c\x05\xbd\x9b\x19\xbb\x2a\x61" - "\x8f\x69\x05\x75\x8e\xca\x60\x0c" - "\x5b\xa2\x48\x61\x32\x74\x11\x2b" - "\xf6\xcf\x06\x78\x6f\x78\x1a\x4a", - .rlen = 48, - }, { - .key = "\x8d\x82\xd6\x3b\x76\xcc\x30\x97" - "\xb1\x68\x63\xef\x8c\x7c\xa3\xf7" - "\xef\x1c\x5f\xf2\xa3\x88\x86\xed" - "\x8a\x6d\xc1\x05\xe7\x25\xb9\xa2", - .klen = 32, - .iv = "\x9c\x4b\x65\xa2\xba\x6b\xdb\x1e" - "\xd1\x9e\x90\x13\x8a\x45\xd3\x79" - "\xba\xcd\xe2\x13\xe4\x30\x66\xf4" - "\xba\x78\xf9\xfb\x9d\x3c\xa1\x58", - .assoc = "\xab\x14\xf3\x0a\xfe\x0a\x85\xa5" - "\xf2\xd5\xbc\x38\x89\x0e\x04\xfb" - "\x84\x7d\x65\x34\x25\xd8\x47\xfa" - "\xeb\x83\x31\xf1\x54\x54\x89\x0d" - "\x9d", - .alen = 33, - .input = "\xba\xde\x82\x72\x42\xa9\x2f\x2c" - "\x12\x0b\xe9\x5c\x87\xd7\x35\x7c" - "\x4f\x2e\xe8\x55\x66\x80\x27\x00" - "\x1b\x8f\x68\xe7\x0a\x6c\x71\xc3" - "\x21\x78\x55\x9d\x9c\x65\x7b\xcd" - "\x0a\x34\x97\xff\x47\x37\xb0\x2a" - "\x80\x0d\x19\x98\x33\xa9\x7a\xe3" - "\x2e\x4c\xc6\xf3\x8c\x88\x42\x01" - "\xbd", - .ilen = 65, - .result = "\x07\x0a\x35\xb0\x82\x03\x5a\xd2" - "\x15\x3a\x6c\x72\x83\x9b\xb1\x75" - "\xea\xf2\xfc\xff\xc6\xf1\x13\xa4" - "\x1a\x93\x33\x79\x97\x82\x81\xc0" - "\x96\xc2\x00\xab\x39\xae\xa1\x62" - "\x53\xa3\x86\xc9\x07\x8c\xaf\x22" - "\x47\x31\x29\xca\x4a\x95\xf5\xd5" - "\x20\x63\x5a\x54\x80\x2c\x4a\x63" - "\xfb\x18\x73\x31\x4f\x08\x21\x5d" - "\x20\xe9\xc3\x7e\xea\x25\x77\x3a" - "\x65", - .rlen = 81, - }, { - .key = "\xc9\xa7\x10\xda\x86\x48\xd9\xb3" - "\x32\x42\x15\x80\x85\xa1\x65\xfe" - "\x19\xde\x6b\x76\xa8\x28\x08\x07" - "\x4b\x9a\xa0\xdd\xc1\x84\x58\x79", - .klen = 32, - .iv = "\xd8\x70\x9f\x42\xca\xe6\x83\x3a" - "\x52\x79\x42\xa5\x84\x6a\x96\x7f" - "\xe4\x8f\xed\x97\xe9\xd0\xe8\x0d" - "\x7c\xa6\xd8\xd4\x77\x9b\x40\x2e", - .assoc = "\xe8\x39\x2d\xaa\x0e\x85\x2d\xc1" - "\x72\xaf\x6e\xc9\x82\x33\xc7\x01" - "\xaf\x40\x70\xb8\x2a\x78\xc9\x14" - "\xac\xb1\x10\xca\x2e\xb3\x28\xe4" - "\xac\xfa\x58\x7f\xe5\x73\x09\x8c" - "\x1d\x40\x87\x8c\xd9\x75\xc0\x55" - "\xa2\xda\x07\xd1\xc2\xa9\xd1\xbb" - "\x09\x4f\x77\x62\x88\x2d\xf2\x68" - "\x54", - .alen = 65, - .input = "\xf7\x02\xbb\x11\x52\x24\xd8\x48" - "\x93\xe6\x9b\xee\x81\xfc\xf7\x82" - "\x79\xf0\xf3\xd9\x6c\x20\xa9\x1a" - "\xdc\xbc\x47\xc0\xe4\xcb\x10\x99" - "\x2f", - .ilen = 33, - .result = "\x33\xc1\xda\xfa\x15\x21\x07\x8e" - "\x93\x68\xea\x64\x7b\x3d\x4b\x6b" - "\x71\x5e\x5e\x6b\x92\xaa\x65\xc2" - "\x7a\x2a\xc1\xa9\x0a\xa1\x24\x81" - "\x26\x3a\x5a\x09\xe8\xce\x73\x72" - "\xde\x7b\x58\x9e\x85\xb9\xa4\x28" - "\xda", - .rlen = 49, - }, { - .key = "\x06\xcc\x4a\x79\x96\xc3\x82\xcf" - "\xb3\x1c\xc7\x12\x7f\xc5\x28\x04" - "\x44\xa1\x76\xfb\xad\xc8\x8a\x21" - "\x0d\xc8\x7f\xb6\x9b\xe3\xf8\x4f", - .klen = 32, - .iv = "\x15\x95\xd8\xe1\xda\x62\x2c\x56" - "\xd3\x53\xf4\x36\x7e\x8e\x59\x85" - "\x0e\x51\xf9\x1c\xee\x70\x6a\x27" - "\x3d\xd3\xb7\xac\x51\xfa\xdf\x05", - .assoc = "\x24\x5e\x67\x49\x1e\x01\xd6\xdd" - "\xf3\x89\x20\x5b\x7c\x57\x89\x07", - .alen = 16, - .input = "\x33\x27\xf5\xb1\x62\xa0\x80\x63" - "\x14\xc0\x4d\x7f\x7b\x20\xba\x89", - .ilen = 16, - .result = "\x3e\xf8\x86\x3d\x39\xf8\x96\x02" - "\x0f\xdf\xc9\x6e\x37\x1e\x57\x99" - "\x07\x2a\x1a\xac\xd1\xda\xfd\x3b" - "\xc7\xff\xbd\xbc\x85\x09\x0b", - .rlen = 31, - }, { - .key = "\x42\xf0\x84\x19\xa6\x3f\x2b\xea" - "\x34\xf6\x79\xa3\x79\xe9\xeb\x0a" - "\x6e\x63\x82\x7f\xb2\x68\x0c\x3a" - "\xce\xf5\x5e\x8e\x75\x42\x97\x26", + .key = "\xab\xd0\xe9\x33\x07\x26\xe5\x83" + "\x8c\x76\x95\xd4\xb6\xdc\xf3\x46" + "\xf9\x8f\xad\xe3\x02\x13\x83\x77" + "\x3f\xb0\xf1\xa1\xa1\x22\x0f\x2b", .klen = 32, - .iv = "\x51\xb9\x12\x80\xea\xde\xd5\x71" - "\x54\x2d\xa6\xc8\x78\xb2\x1b\x8c" - "\x39\x14\x05\xa0\xf3\x10\xec\x41" - "\xff\x01\x95\x84\x2b\x59\x7f\xdb", - .assoc = "\x61\x83\xa0\xe8\x2e\x7d\x7f\xf8" - "\x74\x63\xd2\xec\x76\x7c\x4c\x0d", - .alen = 16, - .input = "\x70\x4c\x2f\x50\x72\x1c\x29\x7f" - "\x95\x9a\xff\x10\x75\x45\x7d\x8f", - .ilen = 16, - .result = "\x2f\xc4\xd8\x0d\xa6\x07\xef\x2e" - "\x6c\xd9\x84\x63\x70\x97\x61\x37" - "\x08\x2f\x16\x90\x9e\x62\x30\x0d" - "\x62\xd5\xc8\xf0\x46\x1a", - .rlen = 30, + .iv = "\x03\x24\xa7\x8b\x07\xcb\xcc\x0e" + "\xe6\x33\xbf\xf5\x00\x00\x00\x00", + .assoc = "\xd4\xdb\x30\x1d\x03\xfe\xfd\x5f" + "\x87\xd4\x8c\xb6\xb6\xf1\x7a\x5d" + "\xab\x90\x65\x8d\x8e\xca\x4d\x4f" + "\x16\x0c\x40\x90\x4b\xc7\x36\x73", + .alen = 32, + .ptext = "\xf5\xc6\x7d\x48\xc1\xb7\xe6\x92" + "\x97\x5a\xca\xc4\xa9\x6d\xf9\x3d" + "\x6c\xde\xbc\xf1\x90\xea\x6a\xb2" + "\x35\x86\x36\xaf\x5c\xfe\x4b\x3a", + .plen = 32, + .ctext = "\x83\x6f\x40\x87\x72\xcf\xc1\x13" + "\xef\xbb\x80\x21\x04\x6c\x58\x09" + "\x07\x1b\xfc\xdf\xc0\x3f\x5b\xc7" + "\xe0\x79\xa8\x6e\x71\x7c\x3f\xcf" + "\x5c\xda\xb2\x33\xe5\x13\xe2\x0d" + "\x74\xd1\xef\xb5\x0f\x3a\xb5\xf8", + .clen = 48, }, { - .key = "\x7f\x15\xbd\xb8\xb6\xba\xd3\x06" - "\xb5\xd1\x2b\x35\x73\x0e\xad\x10" - "\x98\x25\x8d\x03\xb7\x08\x8e\x54" - "\x90\x23\x3d\x67\x4f\xa1\x36\xfc", - .klen = 32, - .iv = "\x8e\xde\x4c\x20\xfa\x59\x7e\x8d" - "\xd5\x07\x58\x59\x72\xd7\xde\x92" - "\x63\xd6\x10\x24\xf8\xb0\x6e\x5a" - "\xc0\x2e\x74\x5d\x06\xb8\x1e\xb2", - .assoc = "\x9d\xa7\xda\x88\x3e\xf8\x28\x14" - "\xf5\x3e\x85\x7d\x70\xa0\x0f\x13", - .alen = 16, - .input = "\xac\x70\x69\xef\x82\x97\xd2\x9b" - "\x15\x74\xb1\xa2\x6f\x69\x3f\x95", - .ilen = 16, - .result = "\xce\xf3\x17\x87\x49\xc2\x00\x46" - "\xc6\x12\x5c\x8f\x81\x38\xaa\x55" - "\xf8\x67\x75\xf1\x75\xe3\x2a\x24", - .rlen = 24, - }, -}; - -static const struct aead_testvec aegis256_dec_tv_template[] = { - { - .key = "\x0f\xc9\x8e\x67\x44\x9e\xaa\x86" - "\x20\x36\x2c\x24\xfe\xc9\x30\x81" - "\xca\xb0\x82\x21\x41\xa8\xe0\x06" - "\x30\x0b\x37\xf6\xb6\x17\xe7\xb5", - .klen = 32, - .iv = "\x1e\x92\x1c\xcf\x88\x3d\x54\x0d" - "\x40\x6d\x59\x48\xfc\x92\x61\x03" - "\x95\x61\x05\x42\x82\x50\xc0\x0c" - "\x60\x16\x6f\xec\x6d\x2f\xcf\x6b", - .assoc = "", + /* This is taken from FIPS CAVS. */ + .key = "\xab\x2f\x8a\x74\xb7\x1c\xd2\xb1" + "\xff\x80\x2e\x48\x7d\x82\xf8\xb9", + .klen = 16, + .iv = "\x03\xc6\xfb\x7d\x80\x0d\x13\xab" + "\xd8\xa6\xb2\xd8\x00\x00\x00\x00", .alen = 0, - .input = "\xd5\x65\x3a\xa9\x03\x51\xd7\xaa" - "\xfa\x4b\xd8\xa2\x41\x9b\xc1\xb2", - .ilen = 16, - .result = "", - .rlen = 0, + .ptext = "\x00", + .plen = 0, + .ctext = "\xd5\xe8\x93\x9f\xc7\x89\x2e\x2b", + .clen = 8, + .novrfy = 1, }, { - .key = "\x4b\xed\xc8\x07\x54\x1a\x52\xa2" - "\xa1\x10\xde\xb5\xf8\xed\xf3\x87" - "\xf4\x72\x8e\xa5\x46\x48\x62\x20" - "\xf1\x38\x16\xce\x90\x76\x87\x8c", - .klen = 32, - .iv = "\x5a\xb7\x56\x6e\x98\xb9\xfd\x29" - "\xc1\x47\x0b\xda\xf6\xb6\x23\x09" - "\xbf\x23\x11\xc6\x87\xf0\x42\x26" - "\x22\x44\x4e\xc4\x47\x8e\x6e\x41", - .assoc = "", + .key = "\xab\x2f\x8a\x74\xb7\x1c\xd2\xb1" + "\xff\x80\x2e\x48\x7d\x82\xf8\xb9", + .klen = 16, + .iv = "\x03\xaf\x94\x87\x78\x35\x82\x81" + "\x7f\x88\x94\x68\x00\x00\x00\x00", .alen = 0, - .input = "\x84\xa2\x8f\xad\xdb\x8d\x2c\x16" - "\x9e\x89\xd9\x06\xa6\xa8\x14\x29" - "\x8b", - .ilen = 17, - .result = "\x79", - .rlen = 1, + .ptext = "\x00", + .plen = 0, + .ctext = "\x41\x3c\xb8\x87\x73\xcb\xf3\xf3", + .clen = 8, }, { - .key = "\x88\x12\x01\xa6\x64\x96\xfb\xbe" - "\x22\xea\x90\x47\xf2\x11\xb5\x8e" - "\x1f\x35\x9a\x29\x4b\xe8\xe4\x39" - "\xb3\x66\xf5\xa6\x6a\xd5\x26\x62", - .klen = 32, - .iv = "\x97\xdb\x90\x0e\xa8\x35\xa5\x45" - "\x42\x21\xbd\x6b\xf0\xda\xe6\x0f" - "\xe9\xe5\x1d\x4a\x8c\x90\xc4\x40" - "\xe3\x71\x2d\x9c\x21\xed\x0e\x18", - .assoc = "", - .alen = 0, - .input = "\x09\x94\x1f\xa6\x13\xc3\x74\x75" - "\x17\xad\x8a\x0e\xd8\x66\x9a\x28" - "\xd7\x30\x66\x09\x2a\xdc\xfa\x2a" - "\x9f\x3b\xd7\xdd\x66\xd1\x2b", - .ilen = 31, - .result = "\xb5\x6e\xad\xdd\x30\x72\xfa\x53" - "\x82\x8e\x16\xb4\xed\x6d\x47", - .rlen = 15, + .key = "\x61\x0e\x8c\xae\xe3\x23\xb6\x38" + "\x76\x1c\xf6\x3a\x67\xa3\x9c\xd8", + .klen = 16, + .iv = "\x03\xc6\xfb\x7d\x80\x0d\x13\xab" + "\xd8\xa6\xb2\xd8\x00\x00\x00\x00", + .assoc = "\xf3\x94\x87\x78\x35\x82\x81\x7f" + "\x88\x94\x68\xb1\x78\x6b\x2b\xd6" + "\x04\x1f\x4e\xed\x78\xd5\x33\x66" + "\xd8\x94\x99\x91\x81\x54\x62\x57", + .alen = 32, + .ptext = "\x50\x82\x3e\x07\xe2\x1e\xb6\xfb" + "\x33\xe4\x73\xce\xd2\xfb\x95\x79" + "\xe8\xb4\xb5\x77\x11\x10\x62\x6f" + "\x6a\x82\xd1\x13\xec\xf5\xd0\x48", + .plen = 32, + .ctext = "\xf0\x7c\x29\x02\xae\x1c\x2f\x55" + "\xd0\xd1\x3d\x1a\xa3\x6d\xe4\x0a" + "\x86\xb0\x87\x6b\x62\x33\x8c\x34" + "\xce\xab\x57\xcc\x79\x0b\xe0\x6f" + "\x5c\x3e\x48\x1f\x6c\x46\xf7\x51" + "\x8b\x84\x83\x2a\xc1\x05\xb8\xc5", + .clen = 48, + .novrfy = 1, }, { - .key = "\xc4\x37\x3b\x45\x74\x11\xa4\xda" - "\xa2\xc5\x42\xd8\xec\x36\x78\x94" - "\x49\xf7\xa5\xad\x50\x88\x66\x53" - "\x74\x94\xd4\x7f\x44\x34\xc5\x39", - .klen = 32, - .iv = "\xd3\x00\xc9\xad\xb8\xb0\x4e\x61" - "\xc3\xfb\x6f\xfd\xea\xff\xa9\x15" - "\x14\xa8\x28\xce\x92\x30\x46\x59" - "\xa4\x9f\x0b\x75\xfb\x4c\xad\xee", - .assoc = "", - .alen = 0, - .input = "\x8a\x46\xa2\x22\x8c\x03\xab\x6f" - "\x54\x63\x4e\x7f\xc9\x8e\xfa\x70" - "\x7b\xe5\x8d\x78\xbc\xe9\xb6\xa1" - "\x29\x17\xc8\x3b\x52\xa4\x98\x72", - .ilen = 32, - .result = "\xf2\x92\xe6\x7d\x40\xee\xa3\x6f" - "\x03\x68\xc8\x45\xe7\x91\x0a\x18", - .rlen = 16, + .key = "\x61\x0e\x8c\xae\xe3\x23\xb6\x38" + "\x76\x1c\xf6\x3a\x67\xa3\x9c\xd8", + .klen = 16, + .iv = "\x03\x05\xe0\xc9\x0f\xed\x34\xea" + "\x97\xd4\x3b\xdf\x00\x00\x00\x00", + .assoc = "\x49\x5c\x50\x1f\x1d\x94\xcc\x81" + "\xba\xb7\xb6\x03\xaf\xa5\xc1\xa1" + "\xd8\x5c\x42\x68\xe0\x6c\xda\x89" + "\x05\xac\x56\xac\x1b\x2a\xd3\x86", + .alen = 32, + .ptext = "\x75\x05\xbe\xc2\xd9\x1e\xde\x60" + "\x47\x3d\x8c\x7d\xbd\xb5\xd9\xb7" + "\xf2\xae\x61\x05\x8f\x82\x24\x3f" + "\x9c\x67\x91\xe1\x38\x4f\xe4\x0c", + .plen = 32, + .ctext = "\x39\xbe\x7d\x15\x62\x77\xf3\x3c" + "\xad\x83\x52\x6d\x71\x03\x25\x1c" + "\xed\x81\x3a\x9a\x16\x7d\x19\x80" + "\x72\x04\x72\xd0\xf6\xff\x05\x0f" + "\xb7\x14\x30\x00\x32\x9e\xa0\xa6" + "\x9e\x5a\x18\xa1\xb8\xfe\xdb\xd3", + .clen = 48, }, { - .key = "\x01\x5c\x75\xe5\x84\x8d\x4d\xf6" - "\x23\x9f\xf4\x6a\xe6\x5a\x3b\x9a" - "\x74\xb9\xb1\x32\x55\x28\xe8\x6d" - "\x35\xc1\xb3\x57\x1f\x93\x64\x0f", - .klen = 32, - .iv = "\x10\x25\x03\x4c\xc8\x2c\xf7\x7d" - "\x44\xd5\x21\x8e\xe4\x23\x6b\x1c" - "\x3e\x6a\x34\x53\x97\xd0\xc8\x73" - "\x66\xcd\xea\x4d\xd5\xab\x4c\xc5", - .assoc = "", - .alen = 0, - .input = "\x71\x6b\x37\x0b\x02\x61\x28\x12" - "\x83\xab\x66\x90\x84\xc7\xd1\xc5" - "\xb2\x7a\xb4\x7b\xb4\xfe\x02\xb2" - "\xc0\x00\x39\x13\xb5\x51\x68\x44" - "\xad", - .ilen = 33, - .result = "\x2e\xb7\x20\x1c\x50\x6a\x4b\x8b" - "\x84\x42\x7a\xd7\xe1\xb5\xcd\x1f" - "\xd3", - .rlen = 17, + .key = "\x39\xbb\xa7\xbe\x59\x97\x9e\x73" + "\xa2\xbc\x6b\x98\xd7\x75\x7f\xe3" + "\xa4\x48\x93\x39\x26\x71\x4a\xc6", + .klen = 24, + .iv = "\x03\xee\x49\x83\xe9\xa9\xff\xe9" + "\x57\xba\xfd\x9e\x00\x00\x00\x00", + .assoc = "\x44\xa6\x2c\x05\xe9\xe1\x43\xb1" + "\x58\x7c\xf2\x5c\x6d\x39\x0a\x64" + "\xa4\xf0\x13\x05\xd1\x77\x99\x67" + "\x11\xc4\xc6\xdb\x00\x56\x36\x61", + .alen = 32, + .ptext = "\x00", + .plen = 0, + .ctext = "\x71\x99\xfa\xf4\x44\x12\x68\x9b", + .clen = 8, + }, { + .key = "\x58\x5d\xa0\x96\x65\x1a\x04\xd7" + "\x96\xe5\xc5\x68\xaa\x95\x35\xe0" + "\x29\xa0\xba\x9e\x48\x78\xd1\xba", + .klen = 24, + .iv = "\x03\xee\x49\x83\xe9\xa9\xff\xe9" + "\x57\xba\xfd\x9e\x00\x00\x00\x00", + .assoc = "\x44\xa6\x2c\x05\xe9\xe1\x43\xb1" + "\x58\x7c\xf2\x5c\x6d\x39\x0a\x64" + "\xa4\xf0\x13\x05\xd1\x77\x99\x67" + "\x11\xc4\xc6\xdb\x00\x56\x36\x61", + .alen = 32, + .ptext = "\x85\x34\x66\x42\xc8\x92\x0f\x36" + "\x58\xe0\x6b\x91\x3c\x98\x5c\xbb" + "\x0a\x85\xcc\x02\xad\x7a\x96\xe9" + "\x65\x43\xa4\xc3\x0f\xdc\x55\x81", + .plen = 32, + .ctext = "\xfb\xe5\x5d\x34\xbe\xe5\xe8\xe7" + "\x5a\xef\x2f\xbf\x1f\x7f\xd4\xb2" + "\x66\xca\x61\x1e\x96\x7a\x61\xb3" + "\x1c\x16\x45\x52\xba\x04\x9c\x9f" + "\xb1\xd2\x40\xbc\x52\x7c\x6f\xb1", + .clen = 40, + }, { + .key = "\x58\x5d\xa0\x96\x65\x1a\x04\xd7" + "\x96\xe5\xc5\x68\xaa\x95\x35\xe0" + "\x29\xa0\xba\x9e\x48\x78\xd1\xba", + .klen = 24, + .iv = "\x03\xd1\xfc\x57\x9c\xfe\xb8\x9c" + "\xad\x71\xaa\x1f\x00\x00\x00\x00", + .assoc = "\x86\x67\xa5\xa9\x14\x5f\x0d\xc6" + "\xff\x14\xc7\x44\xbf\x6c\x3a\xc3" + "\xff\xb6\x81\xbd\xe2\xd5\x06\xc7" + "\x3c\xa1\x52\x13\x03\x8a\x23\x3a", + .alen = 32, + .ptext = "\x02\x87\x4d\x28\x80\x6e\xb2\xed" + "\x99\x2a\xa8\xca\x04\x25\x45\x90" + "\x1d\xdd\x5a\xd9\xe4\xdb\x9c\x9c" + "\x49\xe9\x01\xfe\xa7\x80\x6d\x6b", + .plen = 32, + .ctext = "\x3f\x66\xb0\x9d\xe5\x4b\x38\x00" + "\xc6\x0e\x6e\xe5\xd6\x98\xa6\x37" + "\x8c\x26\x33\xc6\xb2\xa2\x17\xfa" + "\x64\x19\xc0\x30\xd7\xfc\x14\x6b" + "\xe3\x33\xc2\x04\xb0\x37\xbe\x3f" + "\xa9\xb4\x2d\x68\x03\xa3\x44\xef", + .clen = 48, + .novrfy = 1, }, { - .key = "\x3d\x80\xae\x84\x94\x09\xf6\x12" - "\xa4\x79\xa6\xfb\xe0\x7f\xfd\xa0" - "\x9e\x7c\xbc\xb6\x5b\xc8\x6a\x86" - "\xf7\xef\x91\x30\xf9\xf2\x04\xe6", + .key = "\xa4\x4b\x54\x29\x0a\xb8\x6d\x01" + "\x5b\x80\x2a\xcf\x25\xc4\xb7\x5c" + "\x20\x2c\xad\x30\xc2\x2b\x41\xfb" + "\x0e\x85\xbc\x33\xad\x0f\x2b\xff", .klen = 32, - .iv = "\x4c\x49\x3d\xec\xd8\xa8\xa0\x98" - "\xc5\xb0\xd3\x1f\xde\x48\x2e\x22" - "\x69\x2c\x3f\xd7\x9c\x70\x4a\x8d" - "\x27\xfa\xc9\x26\xaf\x0a\xeb\x9c", - .assoc = "", + .iv = "\x03\xee\x49\x83\xe9\xa9\xff\xe9" + "\x57\xba\xfd\x9e\x00\x00\x00\x00", .alen = 0, - .input = "\xaf\xa4\x34\x0d\x59\xe6\x1c\x2f" - "\x06\x3b\x52\x18\x49\x75\x1b\xf0" - "\x53\x09\x72\x7b\x45\x79\xe0\xbe" - "\x89\x85\x23\x15\xb8\x79\x07\x4c" - "\x53\x7a\x15\x37\x0a\xee\xb7\xfb" - "\xc4\x1f\x12\x27\xcf\x77\x90", - .ilen = 47, - .result = "\x6b\xdc\x5a\xbb\x60\xe5\xf4\xa6" - "\x05\x1d\x2c\x68\xdb\xda\x8f\x25" - "\xfe\x8d\x45\x19\x1e\xc0\x0b\x99" - "\x88\x11\x39\x12\x1c\x3a\xbb", - .rlen = 31, + .ptext = "\x00", + .plen = 0, + .ctext = "\x1f\xb8\x8f\xa3\xdd\x54\x00\xf2", + .clen = 8, }, { - .key = "\x7a\xa5\xe8\x23\xa4\x84\x9e\x2d" - "\x25\x53\x58\x8c\xda\xa3\xc0\xa6" - "\xc8\x3e\xc8\x3a\x60\x68\xec\xa0" - "\xb8\x1c\x70\x08\xd3\x51\xa3\xbd", + .key = "\x39\xbb\xa7\xbe\x59\x97\x9e\x73" + "\xa2\xbc\x6b\x98\xd7\x75\x7f\xe3" + "\xa4\x48\x93\x39\x26\x71\x4a\xc6" + "\xae\x8f\x11\x4c\xc2\x9c\x4a\xbb", .klen = 32, - .iv = "\x89\x6e\x77\x8b\xe8\x23\x49\xb4" - "\x45\x8a\x85\xb1\xd8\x6c\xf1\x28" - "\x93\xef\x4b\x5b\xa1\x10\xcc\xa6" - "\xe8\x28\xa8\xfe\x89\x69\x8b\x72", - .assoc = "", + .iv = "\x03\x85\x34\x66\x42\xc8\x92\x0f" + "\x36\x58\xe0\x6b\x00\x00\x00\x00", .alen = 0, - .input = "\xe2\xc9\x0b\x33\x31\x02\xb3\xb4" - "\x33\xfe\xeb\xa8\xb7\x9b\xb2\xd7" - "\xeb\x0f\x05\x2b\xba\xb3\xca\xef" - "\xf6\xd1\xb6\xc0\xb9\x9b\x85\xc5" - "\xbf\x7a\x3e\xcc\x31\x76\x09\x80" - "\x32\x5d\xbb\xe8\x38\x0e\x77\xd3", - .ilen = 48, - .result = "\xa7\x00\x93\x5b\x70\x61\x9d\xc2" - "\x86\xf7\xde\xfa\xd5\xfe\x52\x2b" - "\x28\x50\x51\x9d\x24\x60\x8d\xb3" - "\x49\x3e\x17\xea\xf6\x99\x5a\xdd", - .rlen = 32, + .ptext = "\xdc\x56\xf2\x71\xb0\xb1\xa0\x6c" + "\xf0\x97\x3a\xfb\x6d\xe7\x32\x99" + "\x3e\xaf\x70\x5e\xb2\x4d\xea\x39" + "\x89\xd4\x75\x7a\x63\xb1\xda\x93", + .plen = 32, + .ctext = "\x48\x01\x5e\x02\x24\x04\x66\x47" + "\xa1\xea\x6f\xaf\xe8\xfc\xfb\xdd" + "\xa5\xa9\x87\x8d\x84\xee\x2e\x77" + "\xbb\x86\xb9\xf5\x5c\x6c\xff\xf6" + "\x72\xc3\x8e\xf7\x70\xb1\xb2\x07" + "\xbc\xa8\xa3\xbd\x83\x7c\x1d\x2a", + .clen = 48, + .novrfy = 1, }, { - .key = "\xb6\xca\x22\xc3\xb4\x00\x47\x49" - "\xa6\x2d\x0a\x1e\xd4\xc7\x83\xad" - "\xf3\x00\xd4\xbf\x65\x08\x6e\xb9" - "\x7a\x4a\x4f\xe0\xad\xb0\x42\x93", + .key = "\x58\x5d\xa0\x96\x65\x1a\x04\xd7" + "\x96\xe5\xc5\x68\xaa\x95\x35\xe0" + "\x29\xa0\xba\x9e\x48\x78\xd1\xba" + "\x0d\x1a\x53\x3b\xb5\xe3\xf8\x8b", .klen = 32, - .iv = "\xc5\x93\xb0\x2a\xf8\x9f\xf1\xd0" - "\xc6\x64\x37\x42\xd2\x90\xb3\x2e" - "\xbd\xb1\x57\xe0\xa6\xb0\x4e\xc0" - "\xaa\x55\x87\xd6\x63\xc8\x2a\x49", - .assoc = "\xd5", - .alen = 1, - .input = "\x96\x43\x30\xca\x6c\x4f\xd7\x12" - "\xba\xd9\xb3\x18\x86\xdf\xc3\x52", - .ilen = 16, - .result = "", - .rlen = 0, + .iv = "\x03\xcf\x76\x3f\xd9\x95\x75\x8f" + "\x44\x89\x40\x7b\x00\x00\x00\x00", + .assoc = "\x8f\x86\x6c\x4d\x1d\xc5\x39\x88" + "\xc8\xf3\x5c\x52\x10\x63\x6f\x2b" + "\x8a\x2a\xc5\x6f\x30\x23\x58\x7b" + "\xfb\x36\x03\x11\xb4\xd9\xf2\xfe", + .alen = 32, + .ptext = "\xc2\x54\xc8\xde\x78\x87\x77\x40" + "\x49\x71\xe4\xb7\xe7\xcb\x76\x61" + "\x0a\x41\xb9\xe9\xc0\x76\x54\xab" + "\x04\x49\x3b\x19\x93\x57\x25\x5d", + .plen = 32, + .ctext = "\x48\x58\xd6\xf3\xad\x63\x58\xbf" + "\xae\xc7\x5e\xae\x83\x8f\x7b\xe4" + "\x78\x5c\x4c\x67\x71\x89\x94\xbf" + "\x47\xf1\x63\x7e\x1c\x59\xbd\xc5" + "\x7f\x44\x0a\x0c\x01\x18\x07\x92" + "\xe1\xd3\x51\xce\x32\x6d\x0c\x5b", + .clen = 48, + }, +}; + +/* + * rfc4309 refers to section 8 of rfc3610 for test vectors, but they all + * use a 13-byte nonce, we only support an 11-byte nonce. Worse, + * they use AD lengths which are not valid ESP header lengths. + * + * These vectors are copied/generated from the ones for rfc4106 with + * the key truncated by one byte.. + */ +static const struct aead_testvec aes_ccm_rfc4309_tv_template[] = { + { /* Generated using Crypto++ */ + .key = zeroed_string, + .klen = 19, + .iv = zeroed_string, + .ptext = zeroed_string, + .plen = 16, + .assoc = zeroed_string, + .alen = 16, + .ctext = "\x2E\x9A\xCA\x6B\xDA\x54\xFC\x6F" + "\x12\x50\xE8\xDE\x81\x3C\x63\x08" + "\x1A\x22\xBA\x75\xEE\xD4\xD5\xB5" + "\x27\x50\x01\xAC\x03\x33\x39\xFB", + .clen = 32, + },{ + .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" + "\x6d\x6a\x8f\x94\x67\x30\x83\x08" + "\x00\x00\x00", + .klen = 19, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x01", + .ptext = zeroed_string, + .plen = 16, + .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x01", + .alen = 16, + .ctext = "\xCF\xB9\x99\x17\xC8\x86\x0E\x7F" + "\x7E\x76\xF8\xE6\xF8\xCC\x1F\x17" + "\x6A\xE0\x53\x9F\x4B\x73\x7E\xDA" + "\x08\x09\x4E\xC4\x1E\xAD\xC6\xB0", + .clen = 32, + }, { - .key = "\xf3\xee\x5c\x62\xc4\x7c\xf0\x65" - "\x27\x08\xbd\xaf\xce\xec\x45\xb3" - "\x1d\xc3\xdf\x43\x6a\xa8\xf0\xd3" - "\x3b\x77\x2e\xb9\x87\x0f\xe1\x6a", - .klen = 32, - .iv = "\x02\xb8\xea\xca\x09\x1b\x9a\xec" - "\x47\x3e\xe9\xd4\xcc\xb5\x76\x34" - "\xe8\x73\x62\x64\xab\x50\xd0\xda" - "\x6b\x83\x66\xaf\x3e\x27\xc9\x1f", - .assoc = "\x11\x81\x78\x32\x4d\xb9\x44\x73" - "\x68\x75\x16\xf8\xcb\x7e\xa7", - .alen = 15, - .input = "\x2f\xab\x45\xe2\xa7\x46\xc5\x83" - "\x11\x9f\xb0\x74\xee\xc7\x03\xdd", - .ilen = 16, - .result = "", - .rlen = 0, + .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" + "\x6d\x6a\x8f\x94\x67\x30\x83\x08" + "\x00\x00\x00", + .klen = 19, + .iv = zeroed_string, + .ptext = "\x01\x01\x01\x01\x01\x01\x01\x01" + "\x01\x01\x01\x01\x01\x01\x01\x01", + .plen = 16, + .assoc = zeroed_string, + .alen = 16, + .ctext = "\x33\xDE\x73\xBC\xA6\xCE\x4E\xA6" + "\x61\xF4\xF5\x41\x03\x4A\xE3\x86" + "\xA1\xE2\xC2\x42\x2B\x81\x70\x40" + "\xFD\x7F\x76\xD1\x03\x07\xBB\x0C", + .clen = 32, + }, { + .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" + "\x6d\x6a\x8f\x94\x67\x30\x83\x08" + "\x00\x00\x00", + .klen = 19, + .iv = zeroed_string, + .ptext = "\x01\x01\x01\x01\x01\x01\x01\x01" + "\x01\x01\x01\x01\x01\x01\x01\x01", + .plen = 16, + .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01" + "\x00\x00\x00\x00\x00\x00\x00\x00", + .alen = 16, + .ctext = "\x33\xDE\x73\xBC\xA6\xCE\x4E\xA6" + "\x61\xF4\xF5\x41\x03\x4A\xE3\x86" + "\x5B\xC0\x73\xE0\x2B\x73\x68\xC9" + "\x2D\x8C\x58\xC2\x90\x3D\xB0\x3E", + .clen = 32, + }, { + .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" + "\x6d\x6a\x8f\x94\x67\x30\x83\x08" + "\x00\x00\x00", + .klen = 19, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x01", + .ptext = "\x01\x01\x01\x01\x01\x01\x01\x01" + "\x01\x01\x01\x01\x01\x01\x01\x01", + .plen = 16, + .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01" + "\x00\x00\x00\x00\x00\x00\x00\x01", + .alen = 16, + .ctext = "\xCE\xB8\x98\x16\xC9\x87\x0F\x7E" + "\x7F\x77\xF9\xE7\xF9\xCD\x1E\x16" + "\x43\x8E\x76\x57\x3B\xB4\x05\xE8" + "\xA9\x9B\xBF\x25\xE0\x4F\xC0\xED", + .clen = 32, + }, { + .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c" + "\x6d\x6a\x8f\x94\x67\x30\x83\x08" + "\x00\x00\x00", + .klen = 19, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x01", + .ptext = "\x01\x01\x01\x01\x01\x01\x01\x01" + "\x01\x01\x01\x01\x01\x01\x01\x01" + "\x01\x01\x01\x01\x01\x01\x01\x01" + "\x01\x01\x01\x01\x01\x01\x01\x01" + "\x01\x01\x01\x01\x01\x01\x01\x01" + "\x01\x01\x01\x01\x01\x01\x01\x01" + "\x01\x01\x01\x01\x01\x01\x01\x01" + "\x01\x01\x01\x01\x01\x01\x01\x01", + .plen = 64, + .assoc = "\x01\x01\x01\x01\x01\x01\x01\x01" + "\x00\x00\x00\x00\x00\x00\x00\x01", + .alen = 16, + .ctext = "\xCE\xB8\x98\x16\xC9\x87\x0F\x7E" + "\x7F\x77\xF9\xE7\xF9\xCD\x1E\x16" + "\x9C\xA4\x97\x83\x3F\x01\xA5\xF4" + "\x43\x09\xE7\xB8\xE9\xD1\xD7\x02" + "\x9B\xAB\x39\x18\xEB\x94\x34\x36" + "\xE6\xC5\xC8\x9B\x00\x81\x9E\x49" + "\x1D\x78\xE1\x48\xE3\xE9\xEA\x8E" + "\x3A\x2B\x67\x5D\x35\x6A\x0F\xDB" + "\x02\x73\xDD\xE7\x30\x4A\x30\x54" + "\x1A\x9D\x09\xCA\xC8\x1C\x32\x5F", + .clen = 80, + }, { + .key = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" + "\x00\x00\x00", + .klen = 19, + .iv = "\x00\x00\x45\x67\x89\xab\xcd\xef", + .ptext = "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff", + .plen = 192, + .assoc = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa" + "\xaa\xaa\xaa\xaa\x00\x00\x45\x67" + "\x89\xab\xcd\xef", + .alen = 20, + .ctext = "\x64\x17\xDC\x24\x9D\x92\xBA\x5E" + "\x7C\x64\x6D\x33\x46\x77\xAC\xB1" + "\x5C\x9E\xE2\xC7\x27\x11\x3E\x95" + "\x7D\xBE\x28\xC8\xC1\xCA\x5E\x8C" + "\xB4\xE2\xDE\x9F\x53\x59\x26\xDB" + "\x0C\xD4\xE4\x07\x9A\xE6\x3E\x01" + "\x58\x0D\x3E\x3D\xD5\x21\xEB\x04" + "\x06\x9D\x5F\xB9\x02\x49\x1A\x2B" + "\xBA\xF0\x4E\x3B\x85\x50\x5B\x09" + "\xFE\xEC\xFC\x54\xEC\x0C\xE2\x79" + "\x8A\x2F\x5F\xD7\x05\x5D\xF1\x6D" + "\x22\xEB\xD1\x09\x80\x3F\x5A\x70" + "\xB2\xB9\xD3\x63\x99\xC2\x4D\x1B" + "\x36\x12\x00\x89\xAA\x5D\x55\xDA" + "\x1D\x5B\xD8\x3C\x5F\x09\xD2\xE6" + "\x39\x41\x5C\xF0\xBE\x26\x4E\x5F" + "\x2B\x50\x44\x52\xC2\x10\x7D\x38" + "\x82\x64\x83\x0C\xAE\x49\xD0\xE5" + "\x4F\xE5\x66\x4C\x58\x7A\xEE\x43" + "\x3B\x51\xFE\xBA\x24\x8A\xFE\xDC" + "\x19\x6D\x60\x66\x61\xF9\x9A\x3F" + "\x75\xFC\x38\x53\x5B\xB5\xCD\x52" + "\x4F\xE5\xE4\xC9\xFE\x10\xCB\x98" + "\xF0\x06\x5B\x07\xAB\xBB\xF4\x0E" + "\x2D\xC2\xDD\x5D\xDD\x22\x9A\xCC" + "\x39\xAB\x63\xA5\x3D\x9C\x51\x8A", + .clen = 208, + }, { /* From draft-mcgrew-gcm-test-01 */ + .key = "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA" + "\x90\x6A\xC7\x3C\x36\x13\xA6\x34" + "\x2E\x44\x3B", + .klen = 19, + .iv = "\x49\x56\xED\x7E\x3B\x24\x4C\xFE", + .ptext = "\x45\x00\x00\x48\x69\x9A\x00\x00" + "\x80\x11\x4D\xB7\xC0\xA8\x01\x02" + "\xC0\xA8\x01\x01\x0A\x9B\xF1\x56" + "\x38\xD3\x01\x00\x00\x01\x00\x00" + "\x00\x00\x00\x00\x04\x5F\x73\x69" + "\x70\x04\x5F\x75\x64\x70\x03\x73" + "\x69\x70\x09\x63\x79\x62\x65\x72" + "\x63\x69\x74\x79\x02\x64\x6B\x00" + "\x00\x21\x00\x01\x01\x02\x02\x01", + .plen = 72, + .assoc = "\x00\x00\x43\x21\x87\x65\x43\x21" + "\x00\x00\x00\x00\x49\x56\xED\x7E" + "\x3B\x24\x4C\xFE", + .alen = 20, + .ctext = "\x89\xBA\x3E\xEF\xE6\xD6\xCF\xDB" + "\x83\x60\xF5\xBA\x3A\x56\x79\xE6" + "\x7E\x0C\x53\xCF\x9E\x87\xE0\x4E" + "\x1A\x26\x01\x24\xC7\x2E\x3D\xBF" + "\x29\x2C\x91\xC1\xB8\xA8\xCF\xE0" + "\x39\xF8\x53\x6D\x31\x22\x2B\xBF" + "\x98\x81\xFC\x34\xEE\x85\x36\xCD" + "\x26\xDB\x6C\x7A\x0C\x77\x8A\x35" + "\x18\x85\x54\xB2\xBC\xDD\x3F\x43" + "\x61\x06\x8A\xDF\x86\x3F\xB4\xAC" + "\x97\xDC\xBD\xFD\x92\x10\xC5\xFF", + .clen = 88, }, { - .key = "\x2f\x13\x95\x01\xd5\xf7\x99\x81" - "\xa8\xe2\x6f\x41\xc8\x10\x08\xb9" - "\x47\x85\xeb\xc7\x6f\x48\x72\xed" - "\xfc\xa5\x0d\x91\x61\x6e\x81\x40", - .klen = 32, - .iv = "\x3f\xdc\x24\x69\x19\x96\x43\x08" - "\xc8\x18\x9b\x65\xc6\xd9\x39\x3b" - "\x12\x35\x6e\xe8\xb0\xf0\x52\xf3" - "\x2d\xb0\x45\x87\x18\x86\x68\xf6", - .assoc = "\x4e\xa5\xb2\xd1\x5d\x35\xed\x8f" - "\xe8\x4f\xc8\x89\xc5\xa2\x69\xbc", + .key = "\xFE\xFF\xE9\x92\x86\x65\x73\x1C" + "\x6D\x6A\x8F\x94\x67\x30\x83\x08" + "\xCA\xFE\xBA", + .klen = 19, + .iv = "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", + .ptext = "\x45\x00\x00\x3E\x69\x8F\x00\x00" + "\x80\x11\x4D\xCC\xC0\xA8\x01\x02" + "\xC0\xA8\x01\x01\x0A\x98\x00\x35" + "\x00\x2A\x23\x43\xB2\xD0\x01\x00" + "\x00\x01\x00\x00\x00\x00\x00\x00" + "\x03\x73\x69\x70\x09\x63\x79\x62" + "\x65\x72\x63\x69\x74\x79\x02\x64" + "\x6B\x00\x00\x01\x00\x01\x00\x01", + .plen = 64, + .assoc = "\x00\x00\xA5\xF8\x00\x00\x00\x0A" + "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", .alen = 16, - .input = "\x16\x44\x73\x33\x5d\xf2\xb9\x04" - "\x6b\x79\x98\xef\xdb\xd5\xc5\xf1", - .ilen = 16, - .result = "", - .rlen = 0, - }, { - .key = "\x6c\x38\xcf\xa1\xe5\x73\x41\x9d" - "\x29\xbc\x21\xd2\xc2\x35\xcb\xbf" - "\x72\x47\xf6\x4b\x74\xe8\xf4\x06" - "\xbe\xd3\xec\x6a\x3b\xcd\x20\x17", - .klen = 32, - .iv = "\x7b\x01\x5d\x08\x29\x12\xec\x24" - "\x49\xf3\x4d\xf7\xc0\xfe\xfb\x41" - "\x3c\xf8\x79\x6c\xb6\x90\xd4\x0d" - "\xee\xde\x23\x60\xf2\xe5\x08\xcc", - .assoc = "\x8a\xca\xec\x70\x6d\xb1\x96\xab" - "\x69\x29\x7a\x1b\xbf\xc7\x2c\xc2" - "\x07", - .alen = 17, - .input = "\xa4\x9b\xb8\x47\xc0\xed\x7a\x45" - "\x98\x54\x8c\xed\x3d\x17\xf0\xdd", - .ilen = 16, - .result = "", - .rlen = 0, - }, { - .key = "\xa8\x5c\x09\x40\xf5\xef\xea\xb8" - "\xaa\x96\xd3\x64\xbc\x59\x8d\xc6" - "\x9c\x0a\x02\xd0\x79\x88\x76\x20" - "\x7f\x00\xca\x42\x15\x2c\xbf\xed", - .klen = 32, - .iv = "\xb8\x26\x97\xa8\x39\x8e\x94\x3f" - "\xca\xcd\xff\x88\xba\x22\xbe\x47" - "\x67\xba\x85\xf1\xbb\x30\x56\x26" - "\xaf\x0b\x02\x38\xcc\x44\xa7\xa3", - .assoc = "\xc7\xef\x26\x10\x7d\x2c\x3f\xc6" - "\xea\x03\x2c\xac\xb9\xeb\xef\xc9" - "\x31\x6b\x08\x12\xfc\xd8\x37\x2d" - "\xe0\x17\x3a\x2e\x83\x5c\x8f", - .alen = 31, - .input = "\x20\x24\xe2\x33\x5c\x60\xc9\xf0" - "\xa4\x96\x2f\x0d\x53\xc2\xf8\xfc", - .ilen = 16, - .result = "", - .rlen = 0, - }, { - .key = "\xe5\x81\x42\xdf\x05\x6a\x93\xd4" - "\x2b\x70\x85\xf5\xb6\x7d\x50\xcc" - "\xc6\xcc\x0e\x54\x7f\x28\xf8\x3a" - "\x40\x2e\xa9\x1a\xf0\x8b\x5e\xc4", - .klen = 32, - .iv = "\xf4\x4a\xd1\x47\x49\x09\x3d\x5b" - "\x4b\xa7\xb1\x19\xb4\x46\x81\x4d" - "\x91\x7c\x91\x75\xc0\xd0\xd8\x40" - "\x71\x39\xe1\x10\xa6\xa3\x46\x7a", - .assoc = "\x03\x14\x5f\xaf\x8d\xa8\xe7\xe2" - "\x6b\xde\xde\x3e\xb3\x10\xb1\xcf" - "\x5c\x2d\x14\x96\x01\x78\xb9\x47" - "\xa1\x44\x19\x06\x5d\xbb\x2e\x2f", - .alen = 32, - .input = "\x6f\x4a\xb9\xe0\xff\x51\xa3\xf1" - "\xd2\x64\x3e\x66\x6a\xb2\x03\xc0", - .ilen = 16, - .result = "", - .rlen = 0, - }, { - .key = "\x22\xa6\x7c\x7f\x15\xe6\x3c\xf0" - "\xac\x4b\x37\x86\xb0\xa2\x13\xd2" - "\xf1\x8e\x19\xd8\x84\xc8\x7a\x53" - "\x02\x5b\x88\xf3\xca\xea\xfe\x9b", - .klen = 32, - .iv = "\x31\x6f\x0b\xe6\x59\x85\xe6\x77" - "\xcc\x81\x63\xab\xae\x6b\x43\x54" - "\xbb\x3f\x9c\xf9\xc5\x70\x5a\x5a" - "\x32\x67\xc0\xe9\x80\x02\xe5\x50", - .assoc = "\x40", - .alen = 1, - .input = "\x2c\xfb\xad\x7e\xbe\xa0\x9a\x5b" - "\x7a\x3f\x81\xf7\xfc\x1b\x79\x83" - "\xc7", - .ilen = 17, - .result = "\x4f", - .rlen = 1, + .ctext = "\x4B\xC2\x70\x60\x64\xD2\xF3\xC8" + "\xE5\x26\x8A\xDE\xB8\x7E\x7D\x16" + "\x56\xC7\xD2\x88\xBA\x8D\x58\xAF" + "\xF5\x71\xB6\x37\x84\xA7\xB1\x99" + "\x51\x5C\x0D\xA0\x27\xDE\xE7\x2D" + "\xEF\x25\x88\x1F\x1D\x77\x11\xFF" + "\xDB\xED\xEE\x56\x16\xC5\x5C\x9B" + "\x00\x62\x1F\x68\x4E\x7C\xA0\x97" + "\x10\x72\x7E\x53\x13\x3B\x68\xE4" + "\x30\x99\x91\x79\x09\xEA\xFF\x6A", + .clen = 80, }, { - .key = "\x5e\xcb\xb6\x1e\x25\x62\xe4\x0c" - "\x2d\x25\xe9\x18\xaa\xc6\xd5\xd8" - "\x1b\x50\x25\x5d\x89\x68\xfc\x6d" - "\xc3\x89\x67\xcb\xa4\x49\x9d\x71", - .klen = 32, - .iv = "\x6d\x94\x44\x86\x69\x00\x8f\x93" - "\x4d\x5b\x15\x3c\xa8\x8f\x06\x5a" - "\xe6\x01\xa8\x7e\xca\x10\xdc\x73" - "\xf4\x94\x9f\xc1\x5a\x61\x85\x27", - .assoc = "\x7c\x5d\xd3\xee\xad\x9f\x39\x1a" - "\x6d\x92\x42\x61\xa7\x58\x37", - .alen = 15, - .input = "\x1f\x7f\xca\x3c\x2b\xe7\x27\xba" - "\x7e\x98\x83\x02\x34\x23\xf7\x94" - "\xde\x35\xe6\x1d\x14\x18\xe5\x38" - "\x14\x80\x6a\xa7\x1b\xae\x1d", - .ilen = 31, - .result = "\x8b\x26\x61\x55\xf1\x3e\xe3\xa1" - "\x8d\xc8\x6e\x85\xa5\x21\x67", - .rlen = 15, + .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" + "\x34\x45\x56\x67\x78\x89\x9A\xAB" + "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" + "\x34\x45\x56\x67\x78\x89\x9A\xAB" + "\x11\x22\x33", + .klen = 35, + .iv = "\x01\x02\x03\x04\x05\x06\x07\x08", + .ptext = "\x45\x00\x00\x30\x69\xA6\x40\x00" + "\x80\x06\x26\x90\xC0\xA8\x01\x02" + "\x93\x89\x15\x5E\x0A\x9E\x00\x8B" + "\x2D\xC5\x7E\xE0\x00\x00\x00\x00" + "\x70\x02\x40\x00\x20\xBF\x00\x00" + "\x02\x04\x05\xB4\x01\x01\x04\x02" + "\x01\x02\x02\x01", + .plen = 52, + .assoc = "\x4A\x2C\xBF\xE3\x00\x00\x00\x02" + "\x01\x02\x03\x04\x05\x06\x07\x08", + .alen = 16, + .ctext = "\xD6\x31\x0D\x2B\x3D\x6F\xBD\x2F" + "\x58\x41\x7E\xFF\x9A\x9E\x09\xB4" + "\x1A\xF7\xF6\x42\x31\xCD\xBF\xAD" + "\x27\x0E\x2C\xF2\xDB\x10\xDF\x55" + "\x8F\x0D\xD7\xAC\x23\xBD\x42\x10" + "\xD0\xB2\xAF\xD8\x37\xAC\x6B\x0B" + "\x11\xD4\x0B\x12\xEC\xB4\xB1\x92" + "\x23\xA6\x10\xB0\x26\xD6\xD9\x26" + "\x5A\x48\x6A\x3E", + .clen = 68, }, { - .key = "\x9b\xef\xf0\xbd\x35\xdd\x8d\x28" - "\xad\xff\x9b\xa9\xa4\xeb\x98\xdf" - "\x46\x13\x31\xe1\x8e\x08\x7e\x87" - "\x85\xb6\x46\xa3\x7e\xa8\x3c\x48", - .klen = 32, - .iv = "\xaa\xb8\x7e\x25\x79\x7c\x37\xaf" - "\xce\x36\xc7\xce\xa2\xb4\xc9\x60" - "\x10\xc3\xb3\x02\xcf\xb0\x5e\x8d" - "\xb5\xc2\x7e\x9a\x35\xc0\x24\xfd", - .assoc = "\xb9\x82\x0c\x8d\xbd\x1b\xe2\x36" - "\xee\x6c\xf4\xf2\xa1\x7d\xf9\xe2", + .key = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00", + .klen = 19, + .iv = "\x00\x00\x00\x00\x00\x00\x00\x00", + .ptext = "\x45\x00\x00\x3C\x99\xC5\x00\x00" + "\x80\x01\xCB\x7A\x40\x67\x93\x18" + "\x01\x01\x01\x01\x08\x00\x07\x5C" + "\x02\x00\x44\x00\x61\x62\x63\x64" + "\x65\x66\x67\x68\x69\x6A\x6B\x6C" + "\x6D\x6E\x6F\x70\x71\x72\x73\x74" + "\x75\x76\x77\x61\x62\x63\x64\x65" + "\x66\x67\x68\x69\x01\x02\x02\x01", + .plen = 64, + .assoc = "\x00\x00\x00\x00\x00\x00\x00\x01" + "\x00\x00\x00\x00\x00\x00\x00\x00", .alen = 16, - .input = "\x05\x86\x9e\xd7\x2b\xa3\x97\x01" - "\xbe\x28\x98\x10\x6f\xe9\x61\x32" - "\x96\xbb\xb1\x2e\x8f\x0c\x44\xb9" - "\x46\x2d\x55\xe3\x42\x67\xf2\xaf", - .ilen = 32, - .result = "\xc8\x4b\x9b\xf5\x01\xba\x8c\xbd" - "\x0e\xa3\x21\x16\x9f\x46\x2a\x63", - .rlen = 16, + .ctext = "\x6B\x9A\xCA\x57\x43\x91\xFC\x6F" + "\x92\x51\x23\xA4\xC1\x5B\xF0\x10" + "\xF3\x13\xF4\xF8\xA1\x9A\xB4\xDC" + "\x89\xC8\xF8\x42\x62\x95\xB7\xCB" + "\xB8\xF5\x0F\x1B\x2E\x94\xA2\xA7" + "\xBF\xFB\x8A\x92\x13\x63\xD1\x3C" + "\x08\xF5\xE8\xA6\xAA\xF6\x34\xF9" + "\x42\x05\xAF\xB3\xE7\x9A\xFC\xEE" + "\x36\x25\xC1\x10\x12\x1C\xCA\x82" + "\xEA\xE6\x63\x5A\x57\x28\xA9\x9A", + .clen = 80, }, { - .key = "\xd7\x14\x29\x5d\x45\x59\x36\x44" - "\x2e\xd9\x4d\x3b\x9e\x0f\x5b\xe5" - "\x70\xd5\x3c\x65\x93\xa8\x00\xa0" - "\x46\xe4\x25\x7c\x58\x08\xdb\x1e", - .klen = 32, - .iv = "\xe6\xdd\xb8\xc4\x89\xf8\xe0\xca" - "\x4f\x10\x7a\x5f\x9c\xd8\x8b\x66" - "\x3b\x86\xbf\x86\xd4\x50\xe0\xa7" - "\x76\xef\x5c\x72\x0f\x1f\xc3\xd4", - .assoc = "\xf5\xa6\x46\x2c\xce\x97\x8a\x51" - "\x6f\x46\xa6\x83\x9b\xa1\xbc\xe8" - "\x05", - .alen = 17, - .input = "\x9c\xe0\x06\x7b\x86\xcf\x2e\xd8" - "\x45\x65\x1b\x72\x9b\xaa\xa3\x1e" - "\x87\x9d\x26\xdf\xff\x81\x11\xd2" - "\x47\x41\xb9\x24\xc1\x8a\xa3\x8b" - "\x55", - .ilen = 33, - .result = "\x05\x70\xd5\x94\x12\x36\x35\xd8" - "\x8f\x7d\xd3\xa8\x99\x6a\xed\x69" - "\xd0", - .rlen = 17, + .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" + "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" + "\x57\x69\x0E", + .klen = 19, + .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", + .ptext = "\x45\x00\x00\x3C\x99\xC3\x00\x00" + "\x80\x01\xCB\x7C\x40\x67\x93\x18" + "\x01\x01\x01\x01\x08\x00\x08\x5C" + "\x02\x00\x43\x00\x61\x62\x63\x64" + "\x65\x66\x67\x68\x69\x6A\x6B\x6C" + "\x6D\x6E\x6F\x70\x71\x72\x73\x74" + "\x75\x76\x77\x61\x62\x63\x64\x65" + "\x66\x67\x68\x69\x01\x02\x02\x01", + .plen = 64, + .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" + "\x10\x10\x10\x10\x4E\x28\x00\x00" + "\xA2\xFC\xA1\xA3", + .alen = 20, + .ctext = "\x6A\x6B\x45\x2B\x7C\x67\x52\xF6" + "\x10\x60\x40\x62\x6B\x4F\x97\x8E" + "\x0B\xB2\x22\x97\xCB\x21\xE0\x90" + "\xA2\xE7\xD1\x41\x30\xE4\x4B\x1B" + "\x79\x01\x58\x50\x01\x06\xE1\xE0" + "\x2C\x83\x79\xD3\xDE\x46\x97\x1A" + "\x30\xB8\xE5\xDF\xD7\x12\x56\x75" + "\xD0\x95\xB7\xB8\x91\x42\xF7\xFD" + "\x97\x57\xCA\xC1\x20\xD0\x86\xB9" + "\x66\x9D\xB4\x2B\x96\x22\xAC\x67", + .clen = 80, }, { - .key = "\x14\x39\x63\xfc\x56\xd5\xdf\x5f" - "\xaf\xb3\xff\xcc\x98\x33\x1d\xeb" - "\x9a\x97\x48\xe9\x98\x48\x82\xba" - "\x07\x11\x04\x54\x32\x67\x7b\xf5", - .klen = 32, - .iv = "\x23\x02\xf1\x64\x9a\x73\x89\xe6" - "\xd0\xea\x2c\xf1\x96\xfc\x4e\x6d" - "\x65\x48\xcb\x0a\xda\xf0\x62\xc0" - "\x38\x1d\x3b\x4a\xe9\x7e\x62\xaa", - .assoc = "\x32\xcb\x80\xcc\xde\x12\x33\x6d" - "\xf0\x20\x58\x15\x95\xc6\x7f\xee" - "\x2f\xf9\x4e\x2c\x1b\x98\x43\xc7" - "\x68\x28\x73\x40\x9f\x96\x4a", - .alen = 31, - .input = "\xa0\xc8\xde\x83\x0d\xc3\x4e\xd5" - "\x69\x7f\x7a\xdd\x8c\x46\xda\xba" - "\x0a\x5c\x0e\x7f\xac\xee\x02\xd2" - "\xe5\x4b\x0a\xba\xb8\xa4\x7b\x66" - "\xde\xae\xdb\xc2\xc0\x0b\xf7\x2b" - "\xdf\xb8\xea\xd8\xa9\x38\xed", - .ilen = 47, - .result = "\x41\x94\x0e\x33\x22\xb1\xdd\xf4" - "\x10\x57\x85\x39\x93\x8f\xaf\x70" - "\xfa\xa9\xd0\x4d\x5c\x40\x23\xcd" - "\x98\x34\xab\x37\x56\xae\x32", - .rlen = 31, + .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" + "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" + "\x57\x69\x0E", + .klen = 19, + .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", + .ptext = "\x45\x00\x00\x1C\x42\xA2\x00\x00" + "\x80\x01\x44\x1F\x40\x67\x93\xB6" + "\xE0\x00\x00\x02\x0A\x00\xF5\xFF" + "\x01\x02\x02\x01", + .plen = 28, + .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" + "\x10\x10\x10\x10\x4E\x28\x00\x00" + "\xA2\xFC\xA1\xA3", + .alen = 20, + .ctext = "\x6A\x6B\x45\x0B\xA7\x06\x52\xF6" + "\x10\x60\xCF\x01\x6B\x4F\x97\x20" + "\xEA\xB3\x23\x94\xC9\x21\x1D\x33" + "\xA1\xE5\x90\x40\x05\x37\x45\x70" + "\xB5\xD6\x09\x0A\x23\x73\x33\xF9" + "\x08\xB4\x22\xE4", + .clen = 44, }, { - .key = "\x50\x5d\x9d\x9b\x66\x50\x88\x7b" - "\x30\x8e\xb1\x5e\x92\x58\xe0\xf1" - "\xc5\x5a\x53\x6e\x9d\xe8\x04\xd4" - "\xc9\x3f\xe2\x2d\x0c\xc6\x1a\xcb", - .klen = 32, - .iv = "\x5f\x27\x2b\x03\xaa\xef\x32\x02" - "\x50\xc4\xde\x82\x90\x21\x11\x73" - "\x8f\x0a\xd6\x8f\xdf\x90\xe4\xda" - "\xf9\x4a\x1a\x23\xc3\xdd\x02\x81", - .assoc = "\x6e\xf0\xba\x6b\xee\x8e\xdc\x89" - "\x71\xfb\x0a\xa6\x8f\xea\x41\xf4" - "\x5a\xbb\x59\xb0\x20\x38\xc5\xe0" - "\x29\x56\x52\x19\x79\xf5\xe9\x37", - .alen = 32, - .input = "\xd3\x68\x14\x70\x3c\x01\x43\x86" - "\x02\xab\xbe\x75\xaa\xe7\xf5\x53" - "\x5c\x05\xbd\x9b\x19\xbb\x2a\x61" - "\x8f\x69\x05\x75\x8e\xca\x60\x0c" - "\x5b\xa2\x48\x61\x32\x74\x11\x2b" - "\xf6\xcf\x06\x78\x6f\x78\x1a\x4a", - .ilen = 48, - .result = "\x7e\xb9\x48\xd3\x32\x2d\x86\x10" - "\x91\x31\x37\xcb\x8d\xb3\x72\x76" - "\x24\x6b\xdc\xd1\x61\xe0\xa5\xe7" - "\x5a\x61\x8a\x0f\x30\x0d\xd1\xec", - .rlen = 32, + .key = "\xFE\xFF\xE9\x92\x86\x65\x73\x1C" + "\x6D\x6A\x8F\x94\x67\x30\x83\x08" + "\xFE\xFF\xE9\x92\x86\x65\x73\x1C" + "\xCA\xFE\xBA", + .klen = 27, + .iv = "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", + .ptext = "\x45\x00\x00\x28\xA4\xAD\x40\x00" + "\x40\x06\x78\x80\x0A\x01\x03\x8F" + "\x0A\x01\x06\x12\x80\x23\x06\xB8" + "\xCB\x71\x26\x02\xDD\x6B\xB0\x3E" + "\x50\x10\x16\xD0\x75\x68\x00\x01", + .plen = 40, + .assoc = "\x00\x00\xA5\xF8\x00\x00\x00\x0A" + "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88", + .alen = 16, + .ctext = "\x05\x22\x15\xD1\x52\x56\x85\x04" + "\xA8\x5C\x5D\x6D\x7E\x6E\xF5\xFA" + "\xEA\x16\x37\x50\xF3\xDF\x84\x3B" + "\x2F\x32\x18\x57\x34\x2A\x8C\x23" + "\x67\xDF\x6D\x35\x7B\x54\x0D\xFB" + "\x34\xA5\x9F\x6C\x48\x30\x1E\x22" + "\xFE\xB1\x22\x17\x17\x8A\xB9\x5B", + .clen = 56, }, { - .key = "\x8d\x82\xd6\x3b\x76\xcc\x30\x97" - "\xb1\x68\x63\xef\x8c\x7c\xa3\xf7" - "\xef\x1c\x5f\xf2\xa3\x88\x86\xed" - "\x8a\x6d\xc1\x05\xe7\x25\xb9\xa2", - .klen = 32, - .iv = "\x9c\x4b\x65\xa2\xba\x6b\xdb\x1e" - "\xd1\x9e\x90\x13\x8a\x45\xd3\x79" - "\xba\xcd\xe2\x13\xe4\x30\x66\xf4" - "\xba\x78\xf9\xfb\x9d\x3c\xa1\x58", - .assoc = "\xab\x14\xf3\x0a\xfe\x0a\x85\xa5" - "\xf2\xd5\xbc\x38\x89\x0e\x04\xfb" - "\x84\x7d\x65\x34\x25\xd8\x47\xfa" - "\xeb\x83\x31\xf1\x54\x54\x89\x0d" - "\x9d", - .alen = 33, - .input = "\x07\x0a\x35\xb0\x82\x03\x5a\xd2" - "\x15\x3a\x6c\x72\x83\x9b\xb1\x75" - "\xea\xf2\xfc\xff\xc6\xf1\x13\xa4" - "\x1a\x93\x33\x79\x97\x82\x81\xc0" - "\x96\xc2\x00\xab\x39\xae\xa1\x62" - "\x53\xa3\x86\xc9\x07\x8c\xaf\x22" - "\x47\x31\x29\xca\x4a\x95\xf5\xd5" - "\x20\x63\x5a\x54\x80\x2c\x4a\x63" - "\xfb\x18\x73\x31\x4f\x08\x21\x5d" - "\x20\xe9\xc3\x7e\xea\x25\x77\x3a" - "\x65", - .ilen = 81, - .result = "\xba\xde\x82\x72\x42\xa9\x2f\x2c" - "\x12\x0b\xe9\x5c\x87\xd7\x35\x7c" - "\x4f\x2e\xe8\x55\x66\x80\x27\x00" - "\x1b\x8f\x68\xe7\x0a\x6c\x71\xc3" - "\x21\x78\x55\x9d\x9c\x65\x7b\xcd" - "\x0a\x34\x97\xff\x47\x37\xb0\x2a" - "\x80\x0d\x19\x98\x33\xa9\x7a\xe3" - "\x2e\x4c\xc6\xf3\x8c\x88\x42\x01" - "\xbd", - .rlen = 65, + .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" + "\x34\x45\x56\x67\x78\x89\x9A\xAB" + "\xDE\xCA\xF8", + .klen = 19, + .iv = "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74", + .ptext = "\x45\x00\x00\x49\x33\xBA\x00\x00" + "\x7F\x11\x91\x06\xC3\xFB\x1D\x10" + "\xC2\xB1\xD3\x26\xC0\x28\x31\xCE" + "\x00\x35\xDD\x7B\x80\x03\x02\xD5" + "\x00\x00\x4E\x20\x00\x1E\x8C\x18" + "\xD7\x5B\x81\xDC\x91\xBA\xA0\x47" + "\x6B\x91\xB9\x24\xB2\x80\x38\x9D" + "\x92\xC9\x63\xBA\xC0\x46\xEC\x95" + "\x9B\x62\x66\xC0\x47\x22\xB1\x49" + "\x23\x01\x01\x01", + .plen = 76, + .assoc = "\x00\x00\x01\x00\x00\x00\x00\x00" + "\x00\x00\x00\x01\xCA\xFE\xDE\xBA" + "\xCE\xFA\xCE\x74", + .alen = 20, + .ctext = "\x92\xD0\x53\x79\x33\x38\xD5\xF3" + "\x7D\xE4\x7A\x8E\x86\x03\xC9\x90" + "\x96\x35\xAB\x9C\xFB\xE8\xA3\x76" + "\xE9\xE9\xE2\xD1\x2E\x11\x0E\x00" + "\xFA\xCE\xB5\x9E\x02\xA7\x7B\xEA" + "\x71\x9A\x58\xFB\xA5\x8A\xE1\xB7" + "\x9C\x39\x9D\xE3\xB5\x6E\x69\xE6" + "\x63\xC9\xDB\x05\x69\x51\x12\xAD" + "\x3E\x00\x32\x73\x86\xF2\xEE\xF5" + "\x0F\xE8\x81\x7E\x84\xD3\xC0\x0D" + "\x76\xD6\x55\xC6\xB4\xC2\x34\xC7" + "\x12\x25\x0B\xF9", + .clen = 92, }, { - .key = "\xc9\xa7\x10\xda\x86\x48\xd9\xb3" - "\x32\x42\x15\x80\x85\xa1\x65\xfe" - "\x19\xde\x6b\x76\xa8\x28\x08\x07" - "\x4b\x9a\xa0\xdd\xc1\x84\x58\x79", - .klen = 32, - .iv = "\xd8\x70\x9f\x42\xca\xe6\x83\x3a" - "\x52\x79\x42\xa5\x84\x6a\x96\x7f" - "\xe4\x8f\xed\x97\xe9\xd0\xe8\x0d" - "\x7c\xa6\xd8\xd4\x77\x9b\x40\x2e", - .assoc = "\xe8\x39\x2d\xaa\x0e\x85\x2d\xc1" - "\x72\xaf\x6e\xc9\x82\x33\xc7\x01" - "\xaf\x40\x70\xb8\x2a\x78\xc9\x14" - "\xac\xb1\x10\xca\x2e\xb3\x28\xe4" - "\xac\xfa\x58\x7f\xe5\x73\x09\x8c" - "\x1d\x40\x87\x8c\xd9\x75\xc0\x55" - "\xa2\xda\x07\xd1\xc2\xa9\xd1\xbb" - "\x09\x4f\x77\x62\x88\x2d\xf2\x68" - "\x54", - .alen = 65, - .input = "\x33\xc1\xda\xfa\x15\x21\x07\x8e" - "\x93\x68\xea\x64\x7b\x3d\x4b\x6b" - "\x71\x5e\x5e\x6b\x92\xaa\x65\xc2" - "\x7a\x2a\xc1\xa9\x0a\xa1\x24\x81" - "\x26\x3a\x5a\x09\xe8\xce\x73\x72" - "\xde\x7b\x58\x9e\x85\xb9\xa4\x28" - "\xda", - .ilen = 49, - .result = "\xf7\x02\xbb\x11\x52\x24\xd8\x48" - "\x93\xe6\x9b\xee\x81\xfc\xf7\x82" - "\x79\xf0\xf3\xd9\x6c\x20\xa9\x1a" - "\xdc\xbc\x47\xc0\xe4\xcb\x10\x99" - "\x2f", - .rlen = 33, + .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" + "\x34\x45\x56\x67\x78\x89\x9A\xAB" + "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" + "\x34\x45\x56\x67\x78\x89\x9A\xAB" + "\x73\x61\x6C", + .klen = 35, + .iv = "\x61\x6E\x64\x01\x69\x76\x65\x63", + .ptext = "\x45\x08\x00\x28\x73\x2C\x00\x00" + "\x40\x06\xE9\xF9\x0A\x01\x06\x12" + "\x0A\x01\x03\x8F\x06\xB8\x80\x23" + "\xDD\x6B\xAF\xBE\xCB\x71\x26\x02" + "\x50\x10\x1F\x64\x6D\x54\x00\x01", + .plen = 40, + .assoc = "\x17\x40\x5E\x67\x15\x6F\x31\x26" + "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01" + "\x69\x76\x65\x63", + .alen = 20, + .ctext = "\xCC\x74\xB7\xD3\xB0\x38\x50\x42" + "\x2C\x64\x87\x46\x1E\x34\x10\x05" + "\x29\x6B\xBB\x36\xE9\x69\xAD\x92" + "\x82\xA1\x10\x6A\xEB\x0F\xDC\x7D" + "\x08\xBA\xF3\x91\xCA\xAA\x61\xDA" + "\x62\xF4\x14\x61\x5C\x9D\xB5\xA7" + "\xEE\xD7\xB9\x7E\x87\x99\x9B\x7D", + .clen = 56, }, { - .key = "\x06\xcc\x4a\x79\x96\xc3\x82\xcf" - "\xb3\x1c\xc7\x12\x7f\xc5\x28\x04" - "\x44\xa1\x76\xfb\xad\xc8\x8a\x21" - "\x0d\xc8\x7f\xb6\x9b\xe3\xf8\x4f", - .klen = 32, - .iv = "\x15\x95\xd8\xe1\xda\x62\x2c\x56" - "\xd3\x53\xf4\x36\x7e\x8e\x59\x85" - "\x0e\x51\xf9\x1c\xee\x70\x6a\x27" - "\x3d\xd3\xb7\xac\x51\xfa\xdf\x05", - .assoc = "\x24\x5e\x67\x49\x1e\x01\xd6\xdd" - "\xf3\x89\x20\x5b\x7c\x57\x89\x07", + .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" + "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" + "\x57\x69\x0E", + .klen = 19, + .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", + .ptext = "\x45\x00\x00\x49\x33\x3E\x00\x00" + "\x7F\x11\x91\x82\xC3\xFB\x1D\x10" + "\xC2\xB1\xD3\x26\xC0\x28\x31\xCE" + "\x00\x35\xCB\x45\x80\x03\x02\x5B" + "\x00\x00\x01\xE0\x00\x1E\x8C\x18" + "\xD6\x57\x59\xD5\x22\x84\xA0\x35" + "\x2C\x71\x47\x5C\x88\x80\x39\x1C" + "\x76\x4D\x6E\x5E\xE0\x49\x6B\x32" + "\x5A\xE2\x70\xC0\x38\x99\x49\x39" + "\x15\x01\x01\x01", + .plen = 76, + .assoc = "\x42\xF6\x7E\x3F\x10\x10\x10\x10" + "\x10\x10\x10\x10\x4E\x28\x00\x00" + "\xA2\xFC\xA1\xA3", + .alen = 20, + .ctext = "\x6A\x6B\x45\x5E\xD6\x9A\x52\xF6" + "\xEF\x70\x1A\x9C\xE8\xD3\x19\x86" + "\xC8\x02\xF0\xB0\x03\x09\xD9\x02" + "\xA0\xD2\x59\x04\xD1\x85\x2A\x24" + "\x1C\x67\x3E\xD8\x68\x72\x06\x94" + "\x97\xBA\x4F\x76\x8D\xB0\x44\x5B" + "\x69\xBF\xD5\xE2\x3D\xF1\x0B\x0C" + "\xC0\xBF\xB1\x8F\x70\x09\x9E\xCE" + "\xA5\xF2\x55\x58\x84\xFA\xF9\xB5" + "\x23\xF4\x84\x40\x74\x14\x8A\x6B" + "\xDB\xD7\x67\xED\xA4\x93\xF3\x47" + "\xCC\xF7\x46\x6F", + .clen = 92, + }, { + .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" + "\x34\x45\x56\x67\x78\x89\x9A\xAB" + "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" + "\x34\x45\x56\x67\x78\x89\x9A\xAB" + "\x73\x61\x6C", + .klen = 35, + .iv = "\x61\x6E\x64\x01\x69\x76\x65\x63", + .ptext = "\x63\x69\x73\x63\x6F\x01\x72\x75" + "\x6C\x65\x73\x01\x74\x68\x65\x01" + "\x6E\x65\x74\x77\x65\x01\x64\x65" + "\x66\x69\x6E\x65\x01\x74\x68\x65" + "\x74\x65\x63\x68\x6E\x6F\x6C\x6F" + "\x67\x69\x65\x73\x01\x74\x68\x61" + "\x74\x77\x69\x6C\x6C\x01\x64\x65" + "\x66\x69\x6E\x65\x74\x6F\x6D\x6F" + "\x72\x72\x6F\x77\x01\x02\x02\x01", + .plen = 72, + .assoc = "\x17\x40\x5E\x67\x15\x6F\x31\x26" + "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01" + "\x69\x76\x65\x63", + .alen = 20, + .ctext = "\xEA\x15\xC4\x98\xAC\x15\x22\x37" + "\x00\x07\x1D\xBE\x60\x5D\x73\x16" + "\x4D\x0F\xCC\xCE\x8A\xD0\x49\xD4" + "\x39\xA3\xD1\xB1\x21\x0A\x92\x1A" + "\x2C\xCF\x8F\x9D\xC9\x91\x0D\xB4" + "\x15\xFC\xBC\xA5\xC5\xBF\x54\xE5" + "\x1C\xC7\x32\x41\x07\x7B\x2C\xB6" + "\x5C\x23\x7C\x93\xEA\xEF\x23\x1C" + "\x73\xF4\xE7\x12\x84\x4C\x37\x0A" + "\x4A\x8F\x06\x37\x48\xF9\xF9\x05" + "\x55\x13\x40\xC3\xD5\x55\x3A\x3D", + .clen = 88, + }, { + .key = "\x7D\x77\x3D\x00\xC1\x44\xC5\x25" + "\xAC\x61\x9D\x18\xC8\x4A\x3F\x47" + "\xD9\x66\x42", + .klen = 19, + .iv = "\x43\x45\x7E\x91\x82\x44\x3B\xC6", + .ptext = "\x01\x02\x02\x01", + .plen = 4, + .assoc = "\x33\x54\x67\xAE\xFF\xFF\xFF\xFF" + "\x43\x45\x7E\x91\x82\x44\x3B\xC6", .alen = 16, - .input = "\x3e\xf8\x86\x3d\x39\xf8\x96\x02" - "\x0f\xdf\xc9\x6e\x37\x1e\x57\x99" - "\x07\x2a\x1a\xac\xd1\xda\xfd\x3b" - "\xc7\xff\xbd\xbc\x85\x09\x0b", - .ilen = 31, - .result = "\x33\x27\xf5\xb1\x62\xa0\x80\x63" - "\x14\xc0\x4d\x7f\x7b\x20\xba\x89", - .rlen = 16, + .ctext = "\x4C\x72\x63\x30\x2F\xE6\x56\xDD" + "\xD0\xD8\x60\x9D\x8B\xEF\x85\x90" + "\xF7\x61\x24\x62", + .clen = 20, }, { - .key = "\x42\xf0\x84\x19\xa6\x3f\x2b\xea" - "\x34\xf6\x79\xa3\x79\xe9\xeb\x0a" - "\x6e\x63\x82\x7f\xb2\x68\x0c\x3a" - "\xce\xf5\x5e\x8e\x75\x42\x97\x26", + .key = "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23" + "\x34\x45\x56\x67\x78\x89\x9A\xAB" + "\xDE\xCA\xF8", + .klen = 19, + .iv = "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74", + .ptext = "\x74\x6F\x01\x62\x65\x01\x6F\x72" + "\x01\x6E\x6F\x74\x01\x74\x6F\x01" + "\x62\x65\x00\x01", + .plen = 20, + .assoc = "\x00\x00\x01\x00\x00\x00\x00\x00" + "\x00\x00\x00\x01\xCA\xFE\xDE\xBA" + "\xCE\xFA\xCE\x74", + .alen = 20, + .ctext = "\xA3\xBF\x52\x52\x65\x83\xBA\x81" + "\x03\x9B\x84\xFC\x44\x8C\xBB\x81" + "\x36\xE1\x78\xBB\xA5\x49\x3A\xD0" + "\xF0\x6B\x21\xAF\x98\xC0\x34\xDC" + "\x17\x17\x65\xAD", + .clen = 36, + }, { + .key = "\x6C\x65\x67\x61\x6C\x69\x7A\x65" + "\x6D\x61\x72\x69\x6A\x75\x61\x6E" + "\x61\x61\x6E\x64\x64\x6F\x69\x74" + "\x62\x65\x66\x6F\x72\x65\x69\x61" + "\x74\x75\x72", + .klen = 35, + .iv = "\x33\x30\x21\x69\x67\x65\x74\x6D", + .ptext = "\x45\x00\x00\x30\xDA\x3A\x00\x00" + "\x80\x01\xDF\x3B\xC0\xA8\x00\x05" + "\xC0\xA8\x00\x01\x08\x00\xC6\xCD" + "\x02\x00\x07\x00\x61\x62\x63\x64" + "\x65\x66\x67\x68\x69\x6A\x6B\x6C" + "\x6D\x6E\x6F\x70\x71\x72\x73\x74" + "\x01\x02\x02\x01", + .plen = 52, + .assoc = "\x79\x6B\x69\x63\xFF\xFF\xFF\xFF" + "\xFF\xFF\xFF\xFF\x33\x30\x21\x69" + "\x67\x65\x74\x6D", + .alen = 20, + .ctext = "\x96\xFD\x86\xF8\xD1\x98\xFF\x10" + "\xAB\x8C\xDA\x8A\x5A\x08\x38\x1A" + "\x48\x59\x80\x18\x1A\x18\x1A\x04" + "\xC9\x0D\xE3\xE7\x0E\xA4\x0B\x75" + "\x92\x9C\x52\x5C\x0B\xFB\xF8\xAF" + "\x16\xC3\x35\xA8\xE7\xCE\x84\x04" + "\xEB\x40\x6B\x7A\x8E\x75\xBB\x42" + "\xE0\x63\x4B\x21\x44\xA2\x2B\x2B" + "\x39\xDB\xC8\xDC", + .clen = 68, + }, { + .key = "\x3D\xE0\x98\x74\xB3\x88\xE6\x49" + "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F" + "\x57\x69\x0E", + .klen = 19, + .iv = "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3", + .ptext = "\x45\x00\x00\x30\xDA\x3A\x00\x00" + "\x80\x01\xDF\x3B\xC0\xA8\x00\x05" + "\xC0\xA8\x00\x01\x08\x00\xC6\xCD" + "\x02\x00\x07\x00\x61\x62\x63\x64" + "\x65\x66\x67\x68\x69\x6A\x6B\x6C" + "\x6D\x6E\x6F\x70\x71\x72\x73\x74" + "\x01\x02\x02\x01", + .plen = 52, + .assoc = "\x3F\x7E\xF6\x42\x10\x10\x10\x10" + "\x10\x10\x10\x10\x4E\x28\x00\x00" + "\xA2\xFC\xA1\xA3", + .alen = 20, + .ctext = "\x6A\x6B\x45\x27\x3F\x9E\x52\xF6" + "\x10\x60\x54\x25\xEB\x80\x04\x93" + "\xCA\x1B\x23\x97\xCB\x21\x2E\x01" + "\xA2\xE7\x95\x41\x30\xE4\x4B\x1B" + "\x79\x01\x58\x50\x01\x06\xE1\xE0" + "\x2C\x83\x79\xD3\xDE\x46\x97\x1A" + "\x44\xCC\x90\xBF\x00\x94\x94\x92" + "\x20\x17\x0C\x1B\x55\xDE\x7E\x68" + "\xF4\x95\x5D\x4F", + .clen = 68, + }, { + .key = "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA" + "\x90\x6A\xC7\x3C\x36\x13\xA6\x34" + "\x22\x43\x3C", + .klen = 19, + .iv = "\x48\x55\xEC\x7D\x3A\x23\x4B\xFD", + .ptext = "\x08\x00\xC6\xCD\x02\x00\x07\x00" + "\x61\x62\x63\x64\x65\x66\x67\x68" + "\x69\x6A\x6B\x6C\x6D\x6E\x6F\x70" + "\x71\x72\x73\x74\x01\x02\x02\x01", + .plen = 32, + .assoc = "\x00\x00\x43\x21\x87\x65\x43\x21" + "\x00\x00\x00\x07\x48\x55\xEC\x7D" + "\x3A\x23\x4B\xFD", + .alen = 20, + .ctext = "\x67\xE9\x28\xB3\x1C\xA4\x6D\x02" + "\xF0\xB5\x37\xB6\x6B\x2F\xF5\x4F" + "\xF8\xA3\x4C\x53\xB8\x12\x09\xBF" + "\x58\x7D\xCF\x29\xA3\x41\x68\x6B" + "\xCE\xE8\x79\x85\x3C\xB0\x3A\x8F" + "\x16\xB0\xA1\x26\xC9\xBC\xBC\xA6", + .clen = 48, + } +}; + +/* + * ChaCha20-Poly1305 AEAD test vectors from RFC7539 2.8.2./A.5. + */ +static const struct aead_testvec rfc7539_tv_template[] = { + { + .key = "\x80\x81\x82\x83\x84\x85\x86\x87" + "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" + "\x90\x91\x92\x93\x94\x95\x96\x97" + "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f", .klen = 32, - .iv = "\x51\xb9\x12\x80\xea\xde\xd5\x71" - "\x54\x2d\xa6\xc8\x78\xb2\x1b\x8c" - "\x39\x14\x05\xa0\xf3\x10\xec\x41" - "\xff\x01\x95\x84\x2b\x59\x7f\xdb", - .assoc = "\x61\x83\xa0\xe8\x2e\x7d\x7f\xf8" - "\x74\x63\xd2\xec\x76\x7c\x4c\x0d", - .alen = 16, - .input = "\x2f\xc4\xd8\x0d\xa6\x07\xef\x2e" - "\x6c\xd9\x84\x63\x70\x97\x61\x37" - "\x08\x2f\x16\x90\x9e\x62\x30\x0d" - "\x62\xd5\xc8\xf0\x46\x1a", - .ilen = 30, - .result = "\x70\x4c\x2f\x50\x72\x1c\x29\x7f" - "\x95\x9a\xff\x10\x75\x45\x7d\x8f", - .rlen = 16, + .iv = "\x07\x00\x00\x00\x40\x41\x42\x43" + "\x44\x45\x46\x47", + .assoc = "\x50\x51\x52\x53\xc0\xc1\xc2\xc3" + "\xc4\xc5\xc6\xc7", + .alen = 12, + .ptext = "\x4c\x61\x64\x69\x65\x73\x20\x61" + "\x6e\x64\x20\x47\x65\x6e\x74\x6c" + "\x65\x6d\x65\x6e\x20\x6f\x66\x20" + "\x74\x68\x65\x20\x63\x6c\x61\x73" + "\x73\x20\x6f\x66\x20\x27\x39\x39" + "\x3a\x20\x49\x66\x20\x49\x20\x63" + "\x6f\x75\x6c\x64\x20\x6f\x66\x66" + "\x65\x72\x20\x79\x6f\x75\x20\x6f" + "\x6e\x6c\x79\x20\x6f\x6e\x65\x20" + "\x74\x69\x70\x20\x66\x6f\x72\x20" + "\x74\x68\x65\x20\x66\x75\x74\x75" + "\x72\x65\x2c\x20\x73\x75\x6e\x73" + "\x63\x72\x65\x65\x6e\x20\x77\x6f" + "\x75\x6c\x64\x20\x62\x65\x20\x69" + "\x74\x2e", + .plen = 114, + .ctext = "\xd3\x1a\x8d\x34\x64\x8e\x60\xdb" + "\x7b\x86\xaf\xbc\x53\xef\x7e\xc2" + "\xa4\xad\xed\x51\x29\x6e\x08\xfe" + "\xa9\xe2\xb5\xa7\x36\xee\x62\xd6" + "\x3d\xbe\xa4\x5e\x8c\xa9\x67\x12" + "\x82\xfa\xfb\x69\xda\x92\x72\x8b" + "\x1a\x71\xde\x0a\x9e\x06\x0b\x29" + "\x05\xd6\xa5\xb6\x7e\xcd\x3b\x36" + "\x92\xdd\xbd\x7f\x2d\x77\x8b\x8c" + "\x98\x03\xae\xe3\x28\x09\x1b\x58" + "\xfa\xb3\x24\xe4\xfa\xd6\x75\x94" + "\x55\x85\x80\x8b\x48\x31\xd7\xbc" + "\x3f\xf4\xde\xf0\x8e\x4b\x7a\x9d" + "\xe5\x76\xd2\x65\x86\xce\xc6\x4b" + "\x61\x16\x1a\xe1\x0b\x59\x4f\x09" + "\xe2\x6a\x7e\x90\x2e\xcb\xd0\x60" + "\x06\x91", + .clen = 130, }, { - .key = "\x7f\x15\xbd\xb8\xb6\xba\xd3\x06" - "\xb5\xd1\x2b\x35\x73\x0e\xad\x10" - "\x98\x25\x8d\x03\xb7\x08\x8e\x54" - "\x90\x23\x3d\x67\x4f\xa1\x36\xfc", + .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a" + "\xf3\x33\x88\x86\x04\xf6\xb5\xf0" + "\x47\x39\x17\xc1\x40\x2b\x80\x09" + "\x9d\xca\x5c\xbc\x20\x70\x75\xc0", .klen = 32, - .iv = "\x8e\xde\x4c\x20\xfa\x59\x7e\x8d" - "\xd5\x07\x58\x59\x72\xd7\xde\x92" - "\x63\xd6\x10\x24\xf8\xb0\x6e\x5a" - "\xc0\x2e\x74\x5d\x06\xb8\x1e\xb2", - .assoc = "\x9d\xa7\xda\x88\x3e\xf8\x28\x14" - "\xf5\x3e\x85\x7d\x70\xa0\x0f\x13", - .alen = 16, - .input = "\xce\xf3\x17\x87\x49\xc2\x00\x46" - "\xc6\x12\x5c\x8f\x81\x38\xaa\x55" - "\xf8\x67\x75\xf1\x75\xe3\x2a\x24", - .ilen = 24, - .result = "\xac\x70\x69\xef\x82\x97\xd2\x9b" - "\x15\x74\xb1\xa2\x6f\x69\x3f\x95", - .rlen = 16, + .iv = "\x00\x00\x00\x00\x01\x02\x03\x04" + "\x05\x06\x07\x08", + .assoc = "\xf3\x33\x88\x86\x00\x00\x00\x00" + "\x00\x00\x4e\x91", + .alen = 12, + .ptext = "\x49\x6e\x74\x65\x72\x6e\x65\x74" + "\x2d\x44\x72\x61\x66\x74\x73\x20" + "\x61\x72\x65\x20\x64\x72\x61\x66" + "\x74\x20\x64\x6f\x63\x75\x6d\x65" + "\x6e\x74\x73\x20\x76\x61\x6c\x69" + "\x64\x20\x66\x6f\x72\x20\x61\x20" + "\x6d\x61\x78\x69\x6d\x75\x6d\x20" + "\x6f\x66\x20\x73\x69\x78\x20\x6d" + "\x6f\x6e\x74\x68\x73\x20\x61\x6e" + "\x64\x20\x6d\x61\x79\x20\x62\x65" + "\x20\x75\x70\x64\x61\x74\x65\x64" + "\x2c\x20\x72\x65\x70\x6c\x61\x63" + "\x65\x64\x2c\x20\x6f\x72\x20\x6f" + "\x62\x73\x6f\x6c\x65\x74\x65\x64" + "\x20\x62\x79\x20\x6f\x74\x68\x65" + "\x72\x20\x64\x6f\x63\x75\x6d\x65" + "\x6e\x74\x73\x20\x61\x74\x20\x61" + "\x6e\x79\x20\x74\x69\x6d\x65\x2e" + "\x20\x49\x74\x20\x69\x73\x20\x69" + "\x6e\x61\x70\x70\x72\x6f\x70\x72" + "\x69\x61\x74\x65\x20\x74\x6f\x20" + "\x75\x73\x65\x20\x49\x6e\x74\x65" + "\x72\x6e\x65\x74\x2d\x44\x72\x61" + "\x66\x74\x73\x20\x61\x73\x20\x72" + "\x65\x66\x65\x72\x65\x6e\x63\x65" + "\x20\x6d\x61\x74\x65\x72\x69\x61" + "\x6c\x20\x6f\x72\x20\x74\x6f\x20" + "\x63\x69\x74\x65\x20\x74\x68\x65" + "\x6d\x20\x6f\x74\x68\x65\x72\x20" + "\x74\x68\x61\x6e\x20\x61\x73\x20" + "\x2f\xe2\x80\x9c\x77\x6f\x72\x6b" + "\x20\x69\x6e\x20\x70\x72\x6f\x67" + "\x72\x65\x73\x73\x2e\x2f\xe2\x80" + "\x9d", + .plen = 265, + .ctext = "\x64\xa0\x86\x15\x75\x86\x1a\xf4" + "\x60\xf0\x62\xc7\x9b\xe6\x43\xbd" + "\x5e\x80\x5c\xfd\x34\x5c\xf3\x89" + "\xf1\x08\x67\x0a\xc7\x6c\x8c\xb2" + "\x4c\x6c\xfc\x18\x75\x5d\x43\xee" + "\xa0\x9e\xe9\x4e\x38\x2d\x26\xb0" + "\xbd\xb7\xb7\x3c\x32\x1b\x01\x00" + "\xd4\xf0\x3b\x7f\x35\x58\x94\xcf" + "\x33\x2f\x83\x0e\x71\x0b\x97\xce" + "\x98\xc8\xa8\x4a\xbd\x0b\x94\x81" + "\x14\xad\x17\x6e\x00\x8d\x33\xbd" + "\x60\xf9\x82\xb1\xff\x37\xc8\x55" + "\x97\x97\xa0\x6e\xf4\xf0\xef\x61" + "\xc1\x86\x32\x4e\x2b\x35\x06\x38" + "\x36\x06\x90\x7b\x6a\x7c\x02\xb0" + "\xf9\xf6\x15\x7b\x53\xc8\x67\xe4" + "\xb9\x16\x6c\x76\x7b\x80\x4d\x46" + "\xa5\x9b\x52\x16\xcd\xe7\xa4\xe9" + "\x90\x40\xc5\xa4\x04\x33\x22\x5e" + "\xe2\x82\xa1\xb0\xa0\x6c\x52\x3e" + "\xaf\x45\x34\xd7\xf8\x3f\xa1\x15" + "\x5b\x00\x47\x71\x8c\xbc\x54\x6a" + "\x0d\x07\x2b\x04\xb3\x56\x4e\xea" + "\x1b\x42\x22\x73\xf5\x48\x27\x1a" + "\x0b\xb2\x31\x60\x53\xfa\x76\x99" + "\x19\x55\xeb\xd6\x31\x59\x43\x4e" + "\xce\xbb\x4e\x46\x6d\xae\x5a\x10" + "\x73\xa6\x72\x76\x27\x09\x7a\x10" + "\x49\xe6\x17\xd9\x1d\x36\x10\x94" + "\xfa\x68\xf0\xff\x77\x98\x71\x30" + "\x30\x5b\xea\xba\x2e\xda\x04\xdf" + "\x99\x7b\x71\x4d\x6c\x6f\x2c\x29" + "\xa6\xad\x5c\xb4\x02\x2b\x02\x70" + "\x9b\xee\xad\x9d\x67\x89\x0c\xbb" + "\x22\x39\x23\x36\xfe\xa1\x85\x1f" + "\x38", + .clen = 281, }, }; /* - * MORUS-640 test vectors - generated via reference implementation from + * draft-irtf-cfrg-chacha20-poly1305 + */ +static const struct aead_testvec rfc7539esp_tv_template[] = { + { + .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a" + "\xf3\x33\x88\x86\x04\xf6\xb5\xf0" + "\x47\x39\x17\xc1\x40\x2b\x80\x09" + "\x9d\xca\x5c\xbc\x20\x70\x75\xc0" + "\x00\x00\x00\x00", + .klen = 36, + .iv = "\x01\x02\x03\x04\x05\x06\x07\x08", + .assoc = "\xf3\x33\x88\x86\x00\x00\x00\x00" + "\x00\x00\x4e\x91\x01\x02\x03\x04" + "\x05\x06\x07\x08", + .alen = 20, + .ptext = "\x49\x6e\x74\x65\x72\x6e\x65\x74" + "\x2d\x44\x72\x61\x66\x74\x73\x20" + "\x61\x72\x65\x20\x64\x72\x61\x66" + "\x74\x20\x64\x6f\x63\x75\x6d\x65" + "\x6e\x74\x73\x20\x76\x61\x6c\x69" + "\x64\x20\x66\x6f\x72\x20\x61\x20" + "\x6d\x61\x78\x69\x6d\x75\x6d\x20" + "\x6f\x66\x20\x73\x69\x78\x20\x6d" + "\x6f\x6e\x74\x68\x73\x20\x61\x6e" + "\x64\x20\x6d\x61\x79\x20\x62\x65" + "\x20\x75\x70\x64\x61\x74\x65\x64" + "\x2c\x20\x72\x65\x70\x6c\x61\x63" + "\x65\x64\x2c\x20\x6f\x72\x20\x6f" + "\x62\x73\x6f\x6c\x65\x74\x65\x64" + "\x20\x62\x79\x20\x6f\x74\x68\x65" + "\x72\x20\x64\x6f\x63\x75\x6d\x65" + "\x6e\x74\x73\x20\x61\x74\x20\x61" + "\x6e\x79\x20\x74\x69\x6d\x65\x2e" + "\x20\x49\x74\x20\x69\x73\x20\x69" + "\x6e\x61\x70\x70\x72\x6f\x70\x72" + "\x69\x61\x74\x65\x20\x74\x6f\x20" + "\x75\x73\x65\x20\x49\x6e\x74\x65" + "\x72\x6e\x65\x74\x2d\x44\x72\x61" + "\x66\x74\x73\x20\x61\x73\x20\x72" + "\x65\x66\x65\x72\x65\x6e\x63\x65" + "\x20\x6d\x61\x74\x65\x72\x69\x61" + "\x6c\x20\x6f\x72\x20\x74\x6f\x20" + "\x63\x69\x74\x65\x20\x74\x68\x65" + "\x6d\x20\x6f\x74\x68\x65\x72\x20" + "\x74\x68\x61\x6e\x20\x61\x73\x20" + "\x2f\xe2\x80\x9c\x77\x6f\x72\x6b" + "\x20\x69\x6e\x20\x70\x72\x6f\x67" + "\x72\x65\x73\x73\x2e\x2f\xe2\x80" + "\x9d", + .plen = 265, + .ctext = "\x64\xa0\x86\x15\x75\x86\x1a\xf4" + "\x60\xf0\x62\xc7\x9b\xe6\x43\xbd" + "\x5e\x80\x5c\xfd\x34\x5c\xf3\x89" + "\xf1\x08\x67\x0a\xc7\x6c\x8c\xb2" + "\x4c\x6c\xfc\x18\x75\x5d\x43\xee" + "\xa0\x9e\xe9\x4e\x38\x2d\x26\xb0" + "\xbd\xb7\xb7\x3c\x32\x1b\x01\x00" + "\xd4\xf0\x3b\x7f\x35\x58\x94\xcf" + "\x33\x2f\x83\x0e\x71\x0b\x97\xce" + "\x98\xc8\xa8\x4a\xbd\x0b\x94\x81" + "\x14\xad\x17\x6e\x00\x8d\x33\xbd" + "\x60\xf9\x82\xb1\xff\x37\xc8\x55" + "\x97\x97\xa0\x6e\xf4\xf0\xef\x61" + "\xc1\x86\x32\x4e\x2b\x35\x06\x38" + "\x36\x06\x90\x7b\x6a\x7c\x02\xb0" + "\xf9\xf6\x15\x7b\x53\xc8\x67\xe4" + "\xb9\x16\x6c\x76\x7b\x80\x4d\x46" + "\xa5\x9b\x52\x16\xcd\xe7\xa4\xe9" + "\x90\x40\xc5\xa4\x04\x33\x22\x5e" + "\xe2\x82\xa1\xb0\xa0\x6c\x52\x3e" + "\xaf\x45\x34\xd7\xf8\x3f\xa1\x15" + "\x5b\x00\x47\x71\x8c\xbc\x54\x6a" + "\x0d\x07\x2b\x04\xb3\x56\x4e\xea" + "\x1b\x42\x22\x73\xf5\x48\x27\x1a" + "\x0b\xb2\x31\x60\x53\xfa\x76\x99" + "\x19\x55\xeb\xd6\x31\x59\x43\x4e" + "\xce\xbb\x4e\x46\x6d\xae\x5a\x10" + "\x73\xa6\x72\x76\x27\x09\x7a\x10" + "\x49\xe6\x17\xd9\x1d\x36\x10\x94" + "\xfa\x68\xf0\xff\x77\x98\x71\x30" + "\x30\x5b\xea\xba\x2e\xda\x04\xdf" + "\x99\x7b\x71\x4d\x6c\x6f\x2c\x29" + "\xa6\xad\x5c\xb4\x02\x2b\x02\x70" + "\x9b\xee\xad\x9d\x67\x89\x0c\xbb" + "\x22\x39\x23\x36\xfe\xa1\x85\x1f" + "\x38", + .clen = 281, + }, +}; + +/* + * AEGIS-128 test vectors - generated via reference implementation from * SUPERCOP (https://bench.cr.yp.to/supercop.html): * * https://bench.cr.yp.to/supercop/supercop-20170228.tar.xz - * (see crypto_aead/morus640128v2/) + * (see crypto_aead/aegis128/) */ -static const struct aead_testvec morus640_enc_tv_template[] = { +static const struct aead_testvec aegis128_tv_template[] = { { - .key = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .klen = 16, - .iv = "\x0f\xc9\x8e\x67\x44\x9e\xaa\x86" + .key = "\x0f\xc9\x8e\x67\x44\x9e\xaa\x86" "\x20\x36\x2c\x24\xfe\xc9\x30\x81", + .klen = 16, + .iv = "\x1e\x92\x1c\xcf\x88\x3d\x54\x0d" + "\x40\x6d\x59\x48\xfc\x92\x61\x03", .assoc = "", .alen = 0, - .input = "", - .ilen = 0, - .result = "\x89\x62\x7d\xf3\x07\x9d\x52\x05" - "\x53\xc3\x04\x60\x93\xb4\x37\x9a", - .rlen = 16, + .ptext = "", + .plen = 0, + .ctext = "\x07\xa5\x11\xf2\x9d\x40\xb8\x6d" + "\xda\xb8\x12\x34\x4c\x53\xd9\x72", + .clen = 16, }, { - .key = "\x3c\x24\x39\x9f\x10\x7b\xa8\x1b" - "\x80\xda\xb2\x91\xf9\x24\xc2\x06", - .klen = 16, - .iv = "\x4b\xed\xc8\x07\x54\x1a\x52\xa2" + .key = "\x4b\xed\xc8\x07\x54\x1a\x52\xa2" "\xa1\x10\xde\xb5\xf8\xed\xf3\x87", + .klen = 16, + .iv = "\x5a\xb7\x56\x6e\x98\xb9\xfd\x29" + "\xc1\x47\x0b\xda\xf6\xb6\x23\x09", .assoc = "", .alen = 0, - .input = "\x69", - .ilen = 1, - .result = "\xa8\x8d\xe4\x90\xb5\x50\x8f\x78" - "\xb6\x10\x9a\x59\x5f\x61\x37\x70" - "\x09", - .rlen = 17, + .ptext = "\x79", + .plen = 1, + .ctext = "\x9e\x78\x52\xae\xcb\x9e\xe4\xd3" + "\x9a\xd7\x5d\xd7\xaa\x9a\xe9\x5a" + "\xcc", + .clen = 17, }, { - .key = "\x79\x49\x73\x3e\x20\xf7\x51\x37" - "\x01\xb4\x64\x22\xf3\x48\x85\x0c", - .klen = 16, - .iv = "\x88\x12\x01\xa6\x64\x96\xfb\xbe" + .key = "\x88\x12\x01\xa6\x64\x96\xfb\xbe" "\x22\xea\x90\x47\xf2\x11\xb5\x8e", + .klen = 16, + .iv = "\x97\xdb\x90\x0e\xa8\x35\xa5\x45" + "\x42\x21\xbd\x6b\xf0\xda\xe6\x0f", .assoc = "", .alen = 0, - .input = "\xa6\xa4\x1e\x76\xec\xd4\x50\xcc" - "\x62\x58\xe9\x8f\xef\xa4\x17", - .ilen = 15, - .result = "\x76\xdd\xb9\x05\x3d\xce\x61\x38" - "\xf3\xef\xf7\xe5\xd7\xfd\x70\xa5" - "\xcf\x9d\x64\xb8\x0a\x9f\xfd\x8b" - "\xd4\x6e\xfe\xd9\xc8\x63\x4b", - .rlen = 31, + .ptext = "\xb5\x6e\xad\xdd\x30\x72\xfa\x53" + "\x82\x8e\x16\xb4\xed\x6d\x47", + .plen = 15, + .ctext = "\xc3\x80\x83\x04\x5f\xaa\x61\xc7" + "\xca\xdd\x6f\xac\x85\x08\xb5\x35" + "\x2b\xc2\x3e\x0b\x1b\x39\x37\x2b" + "\x7a\x21\x16\xb3\xe6\x67\x66", + .clen = 31, }, { - .key = "\xb5\x6e\xad\xdd\x30\x72\xfa\x53" - "\x82\x8e\x16\xb4\xed\x6d\x47\x12", - .klen = 16, - .iv = "\xc4\x37\x3b\x45\x74\x11\xa4\xda" + .key = "\xc4\x37\x3b\x45\x74\x11\xa4\xda" "\xa2\xc5\x42\xd8\xec\x36\x78\x94", + .klen = 16, + .iv = "\xd3\x00\xc9\xad\xb8\xb0\x4e\x61" + "\xc3\xfb\x6f\xfd\xea\xff\xa9\x15", .assoc = "", .alen = 0, - .input = "\xe2\xc9\x58\x15\xfc\x4f\xf8\xe8" - "\xe3\x32\x9b\x21\xe9\xc8\xd9\x97", - .ilen = 16, - .result = "\xdc\x72\xe8\x14\xfb\x63\xad\x72" - "\x1f\x57\x9a\x1f\x88\x81\xdb\xd6" - "\xc1\x91\x9d\xb9\x25\xc4\x99\x4c" - "\x97\xcd\x8a\x0c\x9d\x68\x00\x1c", - .rlen = 32, - }, { - .key = "\xf2\x92\xe6\x7d\x40\xee\xa3\x6f" + .ptext = "\xf2\x92\xe6\x7d\x40\xee\xa3\x6f" "\x03\x68\xc8\x45\xe7\x91\x0a\x18", - .klen = 16, - .iv = "\x01\x5c\x75\xe5\x84\x8d\x4d\xf6" + .plen = 16, + .ctext = "\x23\x25\x30\xe5\x6a\xb6\x36\x7d" + "\x38\xfd\x3a\xd2\xc2\x58\xa9\x11" + "\x1e\xa8\x30\x9c\x16\xa4\xdb\x65" + "\x51\x10\x16\x27\x70\x9b\x64\x29", + .clen = 32, + }, { + .key = "\x01\x5c\x75\xe5\x84\x8d\x4d\xf6" "\x23\x9f\xf4\x6a\xe6\x5a\x3b\x9a", + .klen = 16, + .iv = "\x10\x25\x03\x4c\xc8\x2c\xf7\x7d" + "\x44\xd5\x21\x8e\xe4\x23\x6b\x1c", .assoc = "", .alen = 0, - .input = "\x1f\xee\x92\xb4\x0c\xcb\xa1\x04" - "\x64\x0c\x4d\xb2\xe3\xec\x9c\x9d" - "\x09", - .ilen = 17, - .result = "\x6b\x4f\x3b\x90\x9a\xa2\xb3\x82" - "\x0a\xb8\x55\xee\xeb\x73\x4d\x7f" - "\x54\x11\x3a\x8a\x31\xa3\xb5\xf2" - "\xcd\x49\xdb\xf3\xee\x26\xbd\xa2" - "\x0d", - .rlen = 33, + .ptext = "\x2e\xb7\x20\x1c\x50\x6a\x4b\x8b" + "\x84\x42\x7a\xd7\xe1\xb5\xcd\x1f" + "\xd3", + .plen = 17, + .ctext = "\x2a\x8d\x56\x91\xc6\xf3\x56\xa5" + "\x1f\xf0\x89\x2e\x13\xad\xe6\xf6" + "\x46\x80\xb1\x0e\x18\x30\x40\x97" + "\x03\xdf\x64\x3c\xbe\x93\x9e\xc9" + "\x3b", + .clen = 33, }, { - .key = "\x2e\xb7\x20\x1c\x50\x6a\x4b\x8b" - "\x84\x42\x7a\xd7\xe1\xb5\xcd\x1f", - .klen = 16, - .iv = "\x3d\x80\xae\x84\x94\x09\xf6\x12" + .key = "\x3d\x80\xae\x84\x94\x09\xf6\x12" "\xa4\x79\xa6\xfb\xe0\x7f\xfd\xa0", + .klen = 16, + .iv = "\x4c\x49\x3d\xec\xd8\xa8\xa0\x98" + "\xc5\xb0\xd3\x1f\xde\x48\x2e\x22", .assoc = "", .alen = 0, - .input = "\x5c\x13\xcb\x54\x1c\x47\x4a\x1f" - "\xe5\xe6\xff\x44\xdd\x11\x5f\xa3" - "\x33\xdd\xc2\xf8\xdd\x18\x2b\x93" - "\x57\x05\x01\x1c\x66\x22\xd3", - .ilen = 31, - .result = "\x59\xd1\x0f\x6b\xee\x27\x84\x92" - "\xb7\xa9\xb5\xdd\x02\xa4\x12\xa5" - "\x50\x32\xb4\x9a\x2e\x35\x83\x55" - "\x36\x12\x12\xed\xa3\x31\xc5\x30" - "\xa7\xe2\x4a\x6d\x05\x59\x43\x91" - "\x75\xfa\x6c\x17\xc6\x73\xca", - .rlen = 47, + .ptext = "\x6b\xdc\x5a\xbb\x60\xe5\xf4\xa6" + "\x05\x1d\x2c\x68\xdb\xda\x8f\x25" + "\xfe\x8d\x45\x19\x1e\xc0\x0b\x99" + "\x88\x11\x39\x12\x1c\x3a\xbb", + .plen = 31, + .ctext = "\x4e\xf6\xfa\x13\xde\x43\x63\x4c" + "\xe2\x04\x3e\xe4\x85\x14\xb6\x3f" + "\xb1\x8f\x4c\xdb\x41\xa2\x14\x99" + "\xf5\x53\x0f\x73\x86\x7e\x97\xa1" + "\x4b\x56\x5b\x94\xce\xcd\x74\xcd" + "\x75\xc4\x53\x01\x89\x45\x59", + .clen = 47, }, { - .key = "\x6b\xdc\x5a\xbb\x60\xe5\xf4\xa6" - "\x05\x1d\x2c\x68\xdb\xda\x8f\x25", - .klen = 16, - .iv = "\x7a\xa5\xe8\x23\xa4\x84\x9e\x2d" + .key = "\x7a\xa5\xe8\x23\xa4\x84\x9e\x2d" "\x25\x53\x58\x8c\xda\xa3\xc0\xa6", + .klen = 16, + .iv = "\x89\x6e\x77\x8b\xe8\x23\x49\xb4" + "\x45\x8a\x85\xb1\xd8\x6c\xf1\x28", .assoc = "", .alen = 0, - .input = "\x98\x37\x05\xf3\x2c\xc2\xf3\x3b" - "\x66\xc0\xb1\xd5\xd7\x35\x21\xaa" - "\x5d\x9f\xce\x7c\xe2\xb8\xad\xad" - "\x19\x33\xe0\xf4\x40\x81\x72\x28", - .ilen = 32, - .result = "\xdb\x49\x68\x0f\x91\x5b\x21\xb1" - "\xcf\x50\xb2\x4c\x32\xe1\xa6\x69" - "\xc0\xfb\x44\x1f\xa0\x9a\xeb\x39" - "\x1b\xde\x68\x38\xcc\x27\x52\xc5" - "\xf6\x3e\x74\xea\x66\x5b\x5f\x0c" - "\x65\x9e\x58\xe6\x52\xa2\xfe\x59", - .rlen = 48, + .ptext = "\xa7\x00\x93\x5b\x70\x61\x9d\xc2" + "\x86\xf7\xde\xfa\xd5\xfe\x52\x2b" + "\x28\x50\x51\x9d\x24\x60\x8d\xb3" + "\x49\x3e\x17\xea\xf6\x99\x5a\xdd", + .plen = 32, + .ctext = "\xa4\x9a\xb7\xfd\xa0\xd4\xd6\x47" + "\x95\xf4\x58\x38\x14\x83\x27\x01" + "\x4c\xed\x32\x2c\xf7\xd6\x31\xf7" + "\x38\x1b\x2c\xc9\xb6\x31\xce\xaa" + "\xa5\x3c\x1a\x18\x5c\xce\xb9\xdf" + "\x51\x52\x77\xf2\x5e\x85\x80\x41", + .clen = 48, }, { - .key = "\xa7\x00\x93\x5b\x70\x61\x9d\xc2" - "\x86\xf7\xde\xfa\xd5\xfe\x52\x2b", - .klen = 16, - .iv = "\xb6\xca\x22\xc3\xb4\x00\x47\x49" + .key = "\xb6\xca\x22\xc3\xb4\x00\x47\x49" "\xa6\x2d\x0a\x1e\xd4\xc7\x83\xad", - .assoc = "\xc5", + .klen = 16, + .iv = "\xc5\x93\xb0\x2a\xf8\x9f\xf1\xd0" + "\xc6\x64\x37\x42\xd2\x90\xb3\x2e", + .assoc = "\xd5", .alen = 1, - .input = "", - .ilen = 0, - .result = "\x56\xe7\x24\x52\xdd\x95\x60\x5b" - "\x09\x48\x39\x69\x9c\xb3\x62\x46", - .rlen = 16, + .ptext = "", + .plen = 0, + .ctext = "\xfb\xd4\x83\x71\x9e\x63\xad\x60" + "\xb9\xf9\xeb\x34\x52\x49\xcf\xb7", + .clen = 16, }, { - .key = "\xe4\x25\xcd\xfa\x80\xdd\x46\xde" - "\x07\xd1\x90\x8b\xcf\x23\x15\x31", - .klen = 16, - .iv = "\xf3\xee\x5c\x62\xc4\x7c\xf0\x65" + .key = "\xf3\xee\x5c\x62\xc4\x7c\xf0\x65" "\x27\x08\xbd\xaf\xce\xec\x45\xb3", - .assoc = "\x02\xb8\xea\xca\x09\x1b\x9a\xec" - "\x47\x3e\xe9\xd4\xcc\xb5\x76", + .klen = 16, + .iv = "\x02\xb8\xea\xca\x09\x1b\x9a\xec" + "\x47\x3e\xe9\xd4\xcc\xb5\x76\x34", + .assoc = "\x11\x81\x78\x32\x4d\xb9\x44\x73" + "\x68\x75\x16\xf8\xcb\x7e\xa7", .alen = 15, - .input = "", - .ilen = 0, - .result = "\xdd\xfa\x6c\x1f\x5d\x86\x87\x01" - "\x13\xe5\x73\x46\x46\xf2\x5c\xe1", - .rlen = 16, + .ptext = "", + .plen = 0, + .ctext = "\x0c\xaf\x2e\x96\xf6\x97\x08\x71" + "\x7d\x3a\x84\xc4\x44\x57\x77\x7e", + .clen = 16, }, { - .key = "\x20\x4a\x07\x99\x91\x58\xee\xfa" - "\x88\xab\x42\x1c\xc9\x47\xd7\x38", - .klen = 16, - .iv = "\x2f\x13\x95\x01\xd5\xf7\x99\x81" + .key = "\x2f\x13\x95\x01\xd5\xf7\x99\x81" "\xa8\xe2\x6f\x41\xc8\x10\x08\xb9", - .assoc = "\x3f\xdc\x24\x69\x19\x96\x43\x08" + .klen = 16, + .iv = "\x3f\xdc\x24\x69\x19\x96\x43\x08" "\xc8\x18\x9b\x65\xc6\xd9\x39\x3b", + .assoc = "\x4e\xa5\xb2\xd1\x5d\x35\xed\x8f" + "\xe8\x4f\xc8\x89\xc5\xa2\x69\xbc", .alen = 16, - .input = "", - .ilen = 0, - .result = "\xa6\x1b\xb9\xd7\x5e\x3c\xcf\xac" - "\xa9\x21\x45\x0b\x16\x52\xf7\xe1", - .rlen = 16, + .ptext = "", + .plen = 0, + .ctext = "\xc7\x87\x09\x3b\xc7\x19\x74\x22" + "\x22\xa5\x67\x10\xb2\x36\xb3\x45", + .clen = 16, }, { - .key = "\x5d\x6f\x41\x39\xa1\xd4\x97\x16" - "\x09\x85\xf4\xae\xc3\x6b\x9a\x3e", - .klen = 16, - .iv = "\x6c\x38\xcf\xa1\xe5\x73\x41\x9d" + .key = "\x6c\x38\xcf\xa1\xe5\x73\x41\x9d" "\x29\xbc\x21\xd2\xc2\x35\xcb\xbf", - .assoc = "\x7b\x01\x5d\x08\x29\x12\xec\x24" - "\x49\xf3\x4d\xf7\xc0\xfe\xfb\x41" - "\x3c", + .klen = 16, + .iv = "\x7b\x01\x5d\x08\x29\x12\xec\x24" + "\x49\xf3\x4d\xf7\xc0\xfe\xfb\x41", + .assoc = "\x8a\xca\xec\x70\x6d\xb1\x96\xab" + "\x69\x29\x7a\x1b\xbf\xc7\x2c\xc2" + "\x07", .alen = 17, - .input = "", - .ilen = 0, - .result = "\x15\xff\xde\x3b\x34\xfc\xf6\xf9" - "\xbb\xa8\x62\xad\x0a\xf5\x48\x60", - .rlen = 16, + .ptext = "", + .plen = 0, + .ctext = "\x02\xc6\x3b\x46\x65\xb2\xef\x91" + "\x31\xf0\x45\x48\x8a\x2a\xed\xe4", + .clen = 16, }, { - .key = "\x99\x93\x7a\xd8\xb1\x50\x40\x31" - "\x8a\x60\xa6\x3f\xbd\x90\x5d\x44", - .klen = 16, - .iv = "\xa8\x5c\x09\x40\xf5\xef\xea\xb8" + .key = "\xa8\x5c\x09\x40\xf5\xef\xea\xb8" "\xaa\x96\xd3\x64\xbc\x59\x8d\xc6", - .assoc = "\xb8\x26\x97\xa8\x39\x8e\x94\x3f" - "\xca\xcd\xff\x88\xba\x22\xbe\x47" - "\x67\xba\x85\xf1\xbb\x30\x56\x26" - "\xaf\x0b\x02\x38\xcc\x44\xa7", + .klen = 16, + .iv = "\xb8\x26\x97\xa8\x39\x8e\x94\x3f" + "\xca\xcd\xff\x88\xba\x22\xbe\x47", + .assoc = "\xc7\xef\x26\x10\x7d\x2c\x3f\xc6" + "\xea\x03\x2c\xac\xb9\xeb\xef\xc9" + "\x31\x6b\x08\x12\xfc\xd8\x37\x2d" + "\xe0\x17\x3a\x2e\x83\x5c\x8f", .alen = 31, - .input = "", - .ilen = 0, - .result = "\xd2\x9d\xf8\x3b\xd7\x84\xe9\x2d" - "\x4b\xef\x75\x16\x0a\x99\xae\x6b", - .rlen = 16, + .ptext = "", + .plen = 0, + .ctext = "\x20\x85\xa8\xd0\x91\x48\x85\xf3" + "\x5a\x16\xc0\x57\x68\x47\xdd\xcb", + .clen = 16, }, { - .key = "\xd6\xb8\xb4\x77\xc1\xcb\xe9\x4d" - "\x0a\x3a\x58\xd1\xb7\xb4\x1f\x4a", - .klen = 16, - .iv = "\xe5\x81\x42\xdf\x05\x6a\x93\xd4" + .key = "\xe5\x81\x42\xdf\x05\x6a\x93\xd4" "\x2b\x70\x85\xf5\xb6\x7d\x50\xcc", - .assoc = "\xf4\x4a\xd1\x47\x49\x09\x3d\x5b" - "\x4b\xa7\xb1\x19\xb4\x46\x81\x4d" - "\x91\x7c\x91\x75\xc0\xd0\xd8\x40" - "\x71\x39\xe1\x10\xa6\xa3\x46\x7a", + .klen = 16, + .iv = "\xf4\x4a\xd1\x47\x49\x09\x3d\x5b" + "\x4b\xa7\xb1\x19\xb4\x46\x81\x4d", + .assoc = "\x03\x14\x5f\xaf\x8d\xa8\xe7\xe2" + "\x6b\xde\xde\x3e\xb3\x10\xb1\xcf" + "\x5c\x2d\x14\x96\x01\x78\xb9\x47" + "\xa1\x44\x19\x06\x5d\xbb\x2e\x2f", .alen = 32, - .input = "", - .ilen = 0, - .result = "\xe4\x8d\xa7\xa7\x45\xc1\x31\x4f" - "\xce\xfb\xaf\xd6\xc2\xe6\xee\xc0", - .rlen = 16, + .ptext = "", + .plen = 0, + .ctext = "\x6a\xf8\x8d\x9c\x42\x75\x35\x79" + "\xc1\x96\xbd\x31\x6e\x69\x1b\x50", + .clen = 16, }, { - .key = "\x12\xdd\xee\x17\xd1\x47\x92\x69" - "\x8b\x14\x0a\x62\xb1\xd9\xe2\x50", - .klen = 16, - .iv = "\x22\xa6\x7c\x7f\x15\xe6\x3c\xf0" + .key = "\x22\xa6\x7c\x7f\x15\xe6\x3c\xf0" "\xac\x4b\x37\x86\xb0\xa2\x13\xd2", - .assoc = "\x31", + .klen = 16, + .iv = "\x31\x6f\x0b\xe6\x59\x85\xe6\x77" + "\xcc\x81\x63\xab\xae\x6b\x43\x54", + .assoc = "\x40", .alen = 1, - .input = "\x40", - .ilen = 1, - .result = "\xe2\x67\x38\x4f\xb9\xad\x7d\x38" - "\x01\xfe\x84\x14\x85\xf8\xd1\xe3" - "\x22", - .rlen = 17, + .ptext = "\x4f", + .plen = 1, + .ctext = "\x01\x24\xb1\xba\xf6\xd3\xdf\x83" + "\x70\x45\xe3\x2a\x9d\x5c\x63\x98" + "\x39", + .clen = 17, }, { - .key = "\x4f\x01\x27\xb6\xe1\xc3\x3a\x85" - "\x0c\xee\xbc\xf4\xab\xfd\xa5\x57", - .klen = 16, - .iv = "\x5e\xcb\xb6\x1e\x25\x62\xe4\x0c" + .key = "\x5e\xcb\xb6\x1e\x25\x62\xe4\x0c" "\x2d\x25\xe9\x18\xaa\xc6\xd5\xd8", - .assoc = "\x6d\x94\x44\x86\x69\x00\x8f\x93" - "\x4d\x5b\x15\x3c\xa8\x8f\x06", - .alen = 15, - .input = "\x7c\x5d\xd3\xee\xad\x9f\x39\x1a" + .klen = 16, + .iv = "\x6d\x94\x44\x86\x69\x00\x8f\x93" + "\x4d\x5b\x15\x3c\xa8\x8f\x06\x5a", + .assoc = "\x7c\x5d\xd3\xee\xad\x9f\x39\x1a" "\x6d\x92\x42\x61\xa7\x58\x37", - .ilen = 15, - .result = "\x77\x32\x61\xeb\xb4\x33\x29\x92" - "\x29\x95\xc5\x8e\x85\x76\xab\xfc" - "\x07\x95\xa7\x44\x74\xf7\x22\xff" - "\xd8\xd8\x36\x3d\x8a\x7f\x9e", - .rlen = 31, + .alen = 15, + .ptext = "\x8b\x26\x61\x55\xf1\x3e\xe3\xa1" + "\x8d\xc8\x6e\x85\xa5\x21\x67", + .plen = 15, + .ctext = "\x18\x78\xc2\x6e\xe1\xf7\xe6\x8a" + "\xca\x0e\x62\x00\xa8\x21\xb5\x21" + "\x3d\x36\xdb\xf7\xcc\x31\x94\x9c" + "\x98\xbd\x71\x7a\xef\xa4\xfa", + .clen = 31, }, { - .key = "\x8b\x26\x61\x55\xf1\x3e\xe3\xa1" - "\x8d\xc8\x6e\x85\xa5\x21\x67\x5d", - .klen = 16, - .iv = "\x9b\xef\xf0\xbd\x35\xdd\x8d\x28" + .key = "\x9b\xef\xf0\xbd\x35\xdd\x8d\x28" "\xad\xff\x9b\xa9\xa4\xeb\x98\xdf", - .assoc = "\xaa\xb8\x7e\x25\x79\x7c\x37\xaf" + .klen = 16, + .iv = "\xaa\xb8\x7e\x25\x79\x7c\x37\xaf" "\xce\x36\xc7\xce\xa2\xb4\xc9\x60", - .alen = 16, - .input = "\xb9\x82\x0c\x8d\xbd\x1b\xe2\x36" + .assoc = "\xb9\x82\x0c\x8d\xbd\x1b\xe2\x36" "\xee\x6c\xf4\xf2\xa1\x7d\xf9\xe2", - .ilen = 16, - .result = "\xd8\xfd\x44\x45\xf6\x42\x12\x38" - "\xf2\x0b\xea\x4f\x9e\x11\x61\x07" - "\x48\x67\x98\x18\x9b\xd0\x0c\x59" - "\x67\xa4\x11\xb3\x2b\xd6\xc1\x70", - .rlen = 32, - }, { - .key = "\xc8\x4b\x9b\xf5\x01\xba\x8c\xbd" + .alen = 16, + .ptext = "\xc8\x4b\x9b\xf5\x01\xba\x8c\xbd" "\x0e\xa3\x21\x16\x9f\x46\x2a\x63", - .klen = 16, - .iv = "\xd7\x14\x29\x5d\x45\x59\x36\x44" + .plen = 16, + .ctext = "\xea\xd1\x81\x75\xb4\x13\x1d\x86" + "\xd4\x17\x26\xe5\xd6\x89\x39\x04" + "\xa9\x6c\xca\xac\x40\x73\xb2\x4c" + "\x9c\xb9\x0e\x79\x4c\x40\x65\xc6", + .clen = 32, + }, { + .key = "\xd7\x14\x29\x5d\x45\x59\x36\x44" "\x2e\xd9\x4d\x3b\x9e\x0f\x5b\xe5", - .assoc = "\xe6\xdd\xb8\xc4\x89\xf8\xe0\xca" - "\x4f\x10\x7a\x5f\x9c\xd8\x8b\x66" - "\x3b", - .alen = 17, - .input = "\xf5\xa6\x46\x2c\xce\x97\x8a\x51" + .klen = 16, + .iv = "\xe6\xdd\xb8\xc4\x89\xf8\xe0\xca" + "\x4f\x10\x7a\x5f\x9c\xd8\x8b\x66", + .assoc = "\xf5\xa6\x46\x2c\xce\x97\x8a\x51" "\x6f\x46\xa6\x83\x9b\xa1\xbc\xe8" "\x05", - .ilen = 17, - .result = "\xb1\xab\x53\x4e\xc7\x40\x16\xb6" - "\x71\x3a\x00\x9f\x41\x88\xb0\xb2" - "\x71\x83\x85\x5f\xc8\x79\x0a\x99" - "\x99\xdc\x89\x1c\x88\xd2\x3e\xf9" - "\x83", - .rlen = 33, + .alen = 17, + .ptext = "\x05\x70\xd5\x94\x12\x36\x35\xd8" + "\x8f\x7d\xd3\xa8\x99\x6a\xed\x69" + "\xd0", + .plen = 17, + .ctext = "\xf4\xb2\x84\xd1\x81\xfa\x98\x1c" + "\x38\x2d\x69\x90\x1c\x71\x38\x98" + "\x9f\xe1\x19\x3b\x63\x91\xaf\x6e" + "\x4b\x07\x2c\xac\x53\xc5\xd5\xfe" + "\x93", + .clen = 33, }, { - .key = "\x05\x70\xd5\x94\x12\x36\x35\xd8" - "\x8f\x7d\xd3\xa8\x99\x6a\xed\x69", - .klen = 16, - .iv = "\x14\x39\x63\xfc\x56\xd5\xdf\x5f" + .key = "\x14\x39\x63\xfc\x56\xd5\xdf\x5f" "\xaf\xb3\xff\xcc\x98\x33\x1d\xeb", - .assoc = "\x23\x02\xf1\x64\x9a\x73\x89\xe6" - "\xd0\xea\x2c\xf1\x96\xfc\x4e\x6d" - "\x65\x48\xcb\x0a\xda\xf0\x62\xc0" - "\x38\x1d\x3b\x4a\xe9\x7e\x62", - .alen = 31, - .input = "\x32\xcb\x80\xcc\xde\x12\x33\x6d" + .klen = 16, + .iv = "\x23\x02\xf1\x64\x9a\x73\x89\xe6" + "\xd0\xea\x2c\xf1\x96\xfc\x4e\x6d", + .assoc = "\x32\xcb\x80\xcc\xde\x12\x33\x6d" "\xf0\x20\x58\x15\x95\xc6\x7f\xee" "\x2f\xf9\x4e\x2c\x1b\x98\x43\xc7" "\x68\x28\x73\x40\x9f\x96\x4a", - .ilen = 31, - .result = "\x29\xc4\xf0\x03\xc1\x86\xdf\x06" - "\x5c\x7b\xef\x64\x87\x00\xd1\x37" - "\xa7\x08\xbc\x7f\x8f\x41\x54\xd0" - "\x3e\xf1\xc3\xa2\x96\x84\xdd\x2a" - "\x2d\x21\x30\xf9\x02\xdb\x06\x0c" - "\xf1\x5a\x66\x69\xe0\xca\x83", - .rlen = 47, + .alen = 31, + .ptext = "\x41\x94\x0e\x33\x22\xb1\xdd\xf4" + "\x10\x57\x85\x39\x93\x8f\xaf\x70" + "\xfa\xa9\xd0\x4d\x5c\x40\x23\xcd" + "\x98\x34\xab\x37\x56\xae\x32", + .plen = 31, + .ctext = "\xa0\xe7\x0a\x60\xe7\xb8\x8a\xdb" + "\x94\xd3\x93\xf2\x41\x86\x16\xdd" + "\x4c\xe8\xe7\xe0\x62\x48\x89\x40" + "\xc0\x49\x9b\x63\x32\xec\x8b\xdb" + "\xdc\xa6\xea\x2c\xc2\x7f\xf5\x04" + "\xcb\xe5\x47\xbb\xa7\xd1\x9d", + .clen = 47, }, { - .key = "\x41\x94\x0e\x33\x22\xb1\xdd\xf4" - "\x10\x57\x85\x39\x93\x8f\xaf\x70", - .klen = 16, - .iv = "\x50\x5d\x9d\x9b\x66\x50\x88\x7b" + .key = "\x50\x5d\x9d\x9b\x66\x50\x88\x7b" "\x30\x8e\xb1\x5e\x92\x58\xe0\xf1", - .assoc = "\x5f\x27\x2b\x03\xaa\xef\x32\x02" - "\x50\xc4\xde\x82\x90\x21\x11\x73" - "\x8f\x0a\xd6\x8f\xdf\x90\xe4\xda" - "\xf9\x4a\x1a\x23\xc3\xdd\x02\x81", - .alen = 32, - .input = "\x6e\xf0\xba\x6b\xee\x8e\xdc\x89" + .klen = 16, + .iv = "\x5f\x27\x2b\x03\xaa\xef\x32\x02" + "\x50\xc4\xde\x82\x90\x21\x11\x73", + .assoc = "\x6e\xf0\xba\x6b\xee\x8e\xdc\x89" "\x71\xfb\x0a\xa6\x8f\xea\x41\xf4" "\x5a\xbb\x59\xb0\x20\x38\xc5\xe0" "\x29\x56\x52\x19\x79\xf5\xe9\x37", - .ilen = 32, - .result = "\xe2\x2e\x44\xdf\xd3\x60\x6d\xb2" - "\x70\x57\x37\xc5\xc2\x4f\x8d\x14" - "\xc6\xbf\x8b\xec\xf5\x62\x67\xf2" - "\x2f\xa1\xe6\xd6\xa7\xb1\x8c\x54" - "\xe5\x6b\x49\xf9\x6e\x90\xc3\xaa" - "\x7a\x00\x2e\x4d\x7f\x31\x2e\x81", - .rlen = 48, + .alen = 32, + .ptext = "\x7e\xb9\x48\xd3\x32\x2d\x86\x10" + "\x91\x31\x37\xcb\x8d\xb3\x72\x76" + "\x24\x6b\xdc\xd1\x61\xe0\xa5\xe7" + "\x5a\x61\x8a\x0f\x30\x0d\xd1\xec", + .plen = 32, + .ctext = "\x62\xdc\x2d\x68\x2d\x71\xbb\x33" + "\x13\xdf\xc0\x46\xf6\x61\x94\xa7" + "\x60\xd3\xd4\xca\xd9\xbe\x82\xf3" + "\xf1\x5b\xa0\xfa\x15\xba\xda\xea" + "\x87\x68\x47\x08\x5d\xdd\x83\xb0" + "\x60\xf4\x93\x20\xdf\x34\x8f\xea", + .clen = 48, }, { - .key = "\x7e\xb9\x48\xd3\x32\x2d\x86\x10" - "\x91\x31\x37\xcb\x8d\xb3\x72\x76", - .klen = 16, - .iv = "\x8d\x82\xd6\x3b\x76\xcc\x30\x97" + .key = "\x8d\x82\xd6\x3b\x76\xcc\x30\x97" "\xb1\x68\x63\xef\x8c\x7c\xa3\xf7", - .assoc = "\x9c\x4b\x65\xa2\xba\x6b\xdb\x1e" - "\xd1\x9e\x90\x13\x8a\x45\xd3\x79" - "\xba\xcd\xe2\x13\xe4\x30\x66\xf4" - "\xba\x78\xf9\xfb\x9d\x3c\xa1\x58" - "\x1a", - .alen = 33, - .input = "\xab\x14\xf3\x0a\xfe\x0a\x85\xa5" + .klen = 16, + .iv = "\x9c\x4b\x65\xa2\xba\x6b\xdb\x1e" + "\xd1\x9e\x90\x13\x8a\x45\xd3\x79", + .assoc = "\xab\x14\xf3\x0a\xfe\x0a\x85\xa5" "\xf2\xd5\xbc\x38\x89\x0e\x04\xfb" "\x84\x7d\x65\x34\x25\xd8\x47\xfa" "\xeb\x83\x31\xf1\x54\x54\x89\x0d" - "\x9d\x4d\x54\x51\x84\x61\xf6\x8e" - "\x03\x31\xf2\x25\x16\xcc\xaa\xc6" - "\x75\x73\x20\x30\x59\x54\xb2\xf0" - "\x3a\x4b\xe0\x23\x8e\xa6\x08\x35" - "\x8a", - .ilen = 65, - .result = "\xc7\xca\x26\x61\x57\xee\xa2\xb9" - "\xb1\x37\xde\x95\x06\x90\x11\x08" - "\x4d\x30\x9f\x24\xc0\x56\xb7\xe1" - "\x0b\x9f\xd2\x57\xe9\xd2\xb1\x76" - "\x56\x9a\xb4\x58\xc5\x08\xfc\xb5" - "\xf2\x31\x9b\xc9\xcd\xb3\x64\xdb" - "\x6f\x50\xbf\xf4\x73\x9d\xfb\x6b" - "\xef\x35\x25\x48\xed\xcf\x29\xa8" - "\xac\xc3\xb9\xcb\x61\x8f\x73\x92" - "\x2c\x7a\x6f\xda\xf9\x09\x6f\xe1" - "\xc4", - .rlen = 81, + "\x9d", + .alen = 33, + .ptext = "\xba\xde\x82\x72\x42\xa9\x2f\x2c" + "\x12\x0b\xe9\x5c\x87\xd7\x35\x7c" + "\x4f\x2e\xe8\x55\x66\x80\x27\x00" + "\x1b\x8f\x68\xe7\x0a\x6c\x71\xc3" + "\x21\x78\x55\x9d\x9c\x65\x7b\xcd" + "\x0a\x34\x97\xff\x47\x37\xb0\x2a" + "\x80\x0d\x19\x98\x33\xa9\x7a\xe3" + "\x2e\x4c\xc6\xf3\x8c\x88\x42\x01" + "\xbd", + .plen = 65, + .ctext = "\x84\xc5\x21\xab\xe1\xeb\xbb\x6d" + "\xaa\x2a\xaf\xeb\x3b\x3b\x69\xe7" + "\x2c\x47\xef\x9d\xb7\x53\x36\xb7" + "\xb6\xf5\xe5\xa8\xc9\x9e\x02\xd7" + "\x83\x88\xc2\xbd\x2f\xf9\x10\xc0" + "\xf5\xa1\x6e\xd3\x97\x64\x82\xa3" + "\xfb\xda\x2c\xb1\x94\xa1\x58\x32" + "\xe8\xd4\x39\xfc\x9e\x26\xf9\xf1" + "\x61\xe6\xae\x07\xf2\xe0\xa7\x44" + "\x96\x28\x3b\xee\x6b\xc6\x16\x31" + "\x3f", + .clen = 81, }, { - .key = "\xba\xde\x82\x72\x42\xa9\x2f\x2c" - "\x12\x0b\xe9\x5c\x87\xd7\x35\x7c", - .klen = 16, - .iv = "\xc9\xa7\x10\xda\x86\x48\xd9\xb3" + .key = "\xc9\xa7\x10\xda\x86\x48\xd9\xb3" "\x32\x42\x15\x80\x85\xa1\x65\xfe", - .assoc = "\xd8\x70\x9f\x42\xca\xe6\x83\x3a" - "\x52\x79\x42\xa5\x84\x6a\x96\x7f" - "\xe4\x8f\xed\x97\xe9\xd0\xe8\x0d" - "\x7c\xa6\xd8\xd4\x77\x9b\x40\x2e" - "\x28\xce\x57\x34\xcd\x6e\x84\x4c" - "\x17\x3c\xe1\xb2\xa8\x0b\xbb\xf1" - "\x96\x41\x0d\x69\xe8\x54\x0a\xc8" - "\x15\x4e\x91\x92\x89\x4b\xb7\x9b" - "\x21", - .alen = 65, - .input = "\xe8\x39\x2d\xaa\x0e\x85\x2d\xc1" + .klen = 16, + .iv = "\xd8\x70\x9f\x42\xca\xe6\x83\x3a" + "\x52\x79\x42\xa5\x84\x6a\x96\x7f", + .assoc = "\xe8\x39\x2d\xaa\x0e\x85\x2d\xc1" "\x72\xaf\x6e\xc9\x82\x33\xc7\x01" "\xaf\x40\x70\xb8\x2a\x78\xc9\x14" "\xac\xb1\x10\xca\x2e\xb3\x28\xe4" - "\xac", - .ilen = 33, - .result = "\x57\xcd\x3d\x46\xc5\xf9\x68\x3b" - "\x2c\x0f\xb4\x7e\x7b\x64\x3e\x40" - "\xf3\x78\x63\x34\x89\x79\x39\x6b" - "\x61\x64\x4a\x9a\xfa\x70\xa4\xd3" - "\x54\x0b\xea\x05\xa6\x95\x64\xed" - "\x3d\x69\xa2\x0c\x27\x56\x2f\x34" - "\x66", - .rlen = 49, + "\xac\xfa\x58\x7f\xe5\x73\x09\x8c" + "\x1d\x40\x87\x8c\xd9\x75\xc0\x55" + "\xa2\xda\x07\xd1\xc2\xa9\xd1\xbb" + "\x09\x4f\x77\x62\x88\x2d\xf2\x68" + "\x54", + .alen = 65, + .ptext = "\xf7\x02\xbb\x11\x52\x24\xd8\x48" + "\x93\xe6\x9b\xee\x81\xfc\xf7\x82" + "\x79\xf0\xf3\xd9\x6c\x20\xa9\x1a" + "\xdc\xbc\x47\xc0\xe4\xcb\x10\x99" + "\x2f", + .plen = 33, + .ctext = "\x8f\x23\x47\xfb\xf2\xac\x23\x83" + "\x77\x09\xac\x74\xef\xd2\x56\xae" + "\x20\x7b\x7b\xca\x45\x8e\xc8\xc2" + "\x50\xbd\xc7\x44\x1c\x54\x98\xd8" + "\x1f\xd0\x9a\x79\xaa\xf9\xe1\xb3" + "\xb4\x98\x5a\x9b\xe4\x4d\xbf\x4e" + "\x39", + .clen = 49, }, { - .key = "\xf7\x02\xbb\x11\x52\x24\xd8\x48" - "\x93\xe6\x9b\xee\x81\xfc\xf7\x82", - .klen = 16, - .iv = "\x06\xcc\x4a\x79\x96\xc3\x82\xcf" + .key = "\x06\xcc\x4a\x79\x96\xc3\x82\xcf" "\xb3\x1c\xc7\x12\x7f\xc5\x28\x04", - .assoc = "\x15\x95\xd8\xe1\xda\x62\x2c\x56" + .klen = 16, + .iv = "\x15\x95\xd8\xe1\xda\x62\x2c\x56" "\xd3\x53\xf4\x36\x7e\x8e\x59\x85", - .alen = 16, - .input = "\x24\x5e\x67\x49\x1e\x01\xd6\xdd" + .assoc = "\x24\x5e\x67\x49\x1e\x01\xd6\xdd" "\xf3\x89\x20\x5b\x7c\x57\x89\x07", - .ilen = 16, - .result = "\xfc\x85\x06\x28\x8f\xe8\x23\x1f" - "\x33\x98\x87\xde\x08\xb6\xb6\xae" - "\x3e\xa4\xf8\x19\xf1\x92\x60\x39" - "\xb9\x6b\x3f\xdf\xc8\xcb\x30", - .rlen = 31, - }, { - .key = "\x33\x27\xf5\xb1\x62\xa0\x80\x63" + .alen = 16, + .ptext = "\x33\x27\xf5\xb1\x62\xa0\x80\x63" "\x14\xc0\x4d\x7f\x7b\x20\xba\x89", - .klen = 16, - .iv = "\x42\xf0\x84\x19\xa6\x3f\x2b\xea" + .plen = 16, + .ctext = "\x42\xc3\x58\xfb\x29\xe2\x4a\x56" + "\xf1\xf5\xe1\x51\x55\x4b\x0a\x45" + "\x46\xb5\x8d\xac\xb6\x34\xd8\x8b" + "\xde\x20\x59\x77\xc1\x74\x90", + .clen = 31, + }, { + .key = "\x42\xf0\x84\x19\xa6\x3f\x2b\xea" "\x34\xf6\x79\xa3\x79\xe9\xeb\x0a", - .assoc = "\x51\xb9\x12\x80\xea\xde\xd5\x71" + .klen = 16, + .iv = "\x51\xb9\x12\x80\xea\xde\xd5\x71" "\x54\x2d\xa6\xc8\x78\xb2\x1b\x8c", - .alen = 16, - .input = "\x61\x83\xa0\xe8\x2e\x7d\x7f\xf8" + .assoc = "\x61\x83\xa0\xe8\x2e\x7d\x7f\xf8" "\x74\x63\xd2\xec\x76\x7c\x4c\x0d", - .ilen = 16, - .result = "\x74\x7d\x70\x07\xe9\xba\x01\xee" - "\x6c\xc6\x6f\x50\x25\x33\xbe\x50" - "\x17\xb8\x17\x62\xed\x80\xa2\xf5" - "\x03\xde\x85\x71\x5d\x34", - .rlen = 30, - }, { - .key = "\x70\x4c\x2f\x50\x72\x1c\x29\x7f" + .alen = 16, + .ptext = "\x70\x4c\x2f\x50\x72\x1c\x29\x7f" "\x95\x9a\xff\x10\x75\x45\x7d\x8f", - .klen = 16, - .iv = "\x7f\x15\xbd\xb8\xb6\xba\xd3\x06" + .plen = 16, + .ctext = "\xb2\xfb\xf6\x97\x69\x7a\xe9\xec" + "\xe2\x94\xa1\x8b\xa0\x2b\x60\x72" + "\x1d\x04\xdd\x6a\xef\x46\x8f\x68" + "\xe9\xe0\x17\x45\x70\x12", + .clen = 30, + }, { + .key = "\x7f\x15\xbd\xb8\xb6\xba\xd3\x06" "\xb5\xd1\x2b\x35\x73\x0e\xad\x10", - .assoc = "\x8e\xde\x4c\x20\xfa\x59\x7e\x8d" + .klen = 16, + .iv = "\x8e\xde\x4c\x20\xfa\x59\x7e\x8d" "\xd5\x07\x58\x59\x72\xd7\xde\x92", - .alen = 16, - .input = "\x9d\xa7\xda\x88\x3e\xf8\x28\x14" + .assoc = "\x9d\xa7\xda\x88\x3e\xf8\x28\x14" "\xf5\x3e\x85\x7d\x70\xa0\x0f\x13", - .ilen = 16, - .result = "\xf4\xb3\x85\xf9\xac\xde\xb1\x38" - "\x29\xfd\x6c\x7c\x49\xe5\x1d\xaf" - "\xba\xea\xd4\xfa\x3f\x11\x33\x98", - .rlen = 24, - }, { - .key = "\xac\x70\x69\xef\x82\x97\xd2\x9b" - "\x15\x74\xb1\xa2\x6f\x69\x3f\x95", - .klen = 16, - .iv = "\xbb\x3a\xf7\x57\xc6\x36\x7c\x22" - "\x36\xab\xde\xc6\x6d\x32\x70\x17", - .assoc = "\xcb\x03\x85\xbf\x0a\xd5\x26\xa9" - "\x56\xe1\x0a\xeb\x6c\xfb\xa1\x98", .alen = 16, - .input = "\xda\xcc\x14\x27\x4e\x74\xd1\x30" - "\x76\x18\x37\x0f\x6a\xc4\xd1\x1a", - .ilen = 16, - .result = "\xe6\x5c\x49\x4f\x78\xf3\x62\x86" - "\xe1\xb7\xa5\xc3\x32\x88\x3c\x8c" - "\x6e", - .rlen = 17, + .ptext = "\xac\x70\x69\xef\x82\x97\xd2\x9b" + "\x15\x74\xb1\xa2\x6f\x69\x3f\x95", + .plen = 16, + .ctext = "\x47\xda\x54\x42\x51\x72\xc4\x8b" + "\xf5\x57\x0f\x2f\x49\x0e\x11\x3b" + "\x78\x93\xec\xfc\xf4\xff\xe1\x2d", + .clen = 24, }, }; -static const struct aead_testvec morus640_dec_tv_template[] = { +/* + * AEGIS-128L test vectors - generated via reference implementation from + * SUPERCOP (https://bench.cr.yp.to/supercop.html): + * + * https://bench.cr.yp.to/supercop/supercop-20170228.tar.xz + * (see crypto_aead/aegis128l/) + */ +static const struct aead_testvec aegis128l_tv_template[] = { { - .key = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .klen = 16, - .iv = "\x0f\xc9\x8e\x67\x44\x9e\xaa\x86" + .key = "\x0f\xc9\x8e\x67\x44\x9e\xaa\x86" "\x20\x36\x2c\x24\xfe\xc9\x30\x81", + .klen = 16, + .iv = "\x1e\x92\x1c\xcf\x88\x3d\x54\x0d" + "\x40\x6d\x59\x48\xfc\x92\x61\x03", .assoc = "", .alen = 0, - .input = "\x89\x62\x7d\xf3\x07\x9d\x52\x05" - "\x53\xc3\x04\x60\x93\xb4\x37\x9a", - .ilen = 16, - .result = "", - .rlen = 0, + .ptext = "", + .plen = 0, + .ctext = "\x30\x4f\xf3\xe9\xb1\xfa\x81\xa6" + "\x20\x72\x78\xdd\x93\xc8\x57\xef", + .clen = 16, }, { - .key = "\x3c\x24\x39\x9f\x10\x7b\xa8\x1b" - "\x80\xda\xb2\x91\xf9\x24\xc2\x06", - .klen = 16, - .iv = "\x4b\xed\xc8\x07\x54\x1a\x52\xa2" + .key = "\x4b\xed\xc8\x07\x54\x1a\x52\xa2" "\xa1\x10\xde\xb5\xf8\xed\xf3\x87", + .klen = 16, + .iv = "\x5a\xb7\x56\x6e\x98\xb9\xfd\x29" + "\xc1\x47\x0b\xda\xf6\xb6\x23\x09", .assoc = "", .alen = 0, - .input = "\xa8\x8d\xe4\x90\xb5\x50\x8f\x78" - "\xb6\x10\x9a\x59\x5f\x61\x37\x70" - "\x09", - .ilen = 17, - .result = "\x69", - .rlen = 1, + .ptext = "\x79", + .plen = 1, + .ctext = "\xa9\x24\xa0\xb6\x2d\xdd\x29\xdb" + "\x40\xb3\x71\xc5\x22\x58\x31\x77" + "\x6d", + .clen = 17, }, { - .key = "\x79\x49\x73\x3e\x20\xf7\x51\x37" - "\x01\xb4\x64\x22\xf3\x48\x85\x0c", - .klen = 16, - .iv = "\x88\x12\x01\xa6\x64\x96\xfb\xbe" + .key = "\x88\x12\x01\xa6\x64\x96\xfb\xbe" "\x22\xea\x90\x47\xf2\x11\xb5\x8e", + .klen = 16, + .iv = "\x97\xdb\x90\x0e\xa8\x35\xa5\x45" + "\x42\x21\xbd\x6b\xf0\xda\xe6\x0f", .assoc = "", .alen = 0, - .input = "\x76\xdd\xb9\x05\x3d\xce\x61\x38" - "\xf3\xef\xf7\xe5\xd7\xfd\x70\xa5" - "\xcf\x9d\x64\xb8\x0a\x9f\xfd\x8b" - "\xd4\x6e\xfe\xd9\xc8\x63\x4b", - .ilen = 31, - .result = "\xa6\xa4\x1e\x76\xec\xd4\x50\xcc" - "\x62\x58\xe9\x8f\xef\xa4\x17", - .rlen = 15, + .ptext = "\xb5\x6e\xad\xdd\x30\x72\xfa\x53" + "\x82\x8e\x16\xb4\xed\x6d\x47", + .plen = 15, + .ctext = "\xbb\x0a\x53\xc4\xaa\x7e\xa4\x03" + "\x2b\xee\x62\x99\x7b\x98\x13\x1f" + "\xe0\x76\x4c\x2e\x53\x99\x4f\xbe" + "\xe1\xa8\x04\x7f\xe1\x71\xbe", + .clen = 31, }, { - .key = "\xb5\x6e\xad\xdd\x30\x72\xfa\x53" - "\x82\x8e\x16\xb4\xed\x6d\x47\x12", - .klen = 16, - .iv = "\xc4\x37\x3b\x45\x74\x11\xa4\xda" + .key = "\xc4\x37\x3b\x45\x74\x11\xa4\xda" "\xa2\xc5\x42\xd8\xec\x36\x78\x94", + .klen = 16, + .iv = "\xd3\x00\xc9\xad\xb8\xb0\x4e\x61" + "\xc3\xfb\x6f\xfd\xea\xff\xa9\x15", .assoc = "", .alen = 0, - .input = "\xdc\x72\xe8\x14\xfb\x63\xad\x72" - "\x1f\x57\x9a\x1f\x88\x81\xdb\xd6" - "\xc1\x91\x9d\xb9\x25\xc4\x99\x4c" - "\x97\xcd\x8a\x0c\x9d\x68\x00\x1c", - .ilen = 32, - .result = "\xe2\xc9\x58\x15\xfc\x4f\xf8\xe8" - "\xe3\x32\x9b\x21\xe9\xc8\xd9\x97", - .rlen = 16, - }, { - .key = "\xf2\x92\xe6\x7d\x40\xee\xa3\x6f" + .ptext = "\xf2\x92\xe6\x7d\x40\xee\xa3\x6f" "\x03\x68\xc8\x45\xe7\x91\x0a\x18", - .klen = 16, - .iv = "\x01\x5c\x75\xe5\x84\x8d\x4d\xf6" + .plen = 16, + .ctext = "\x66\xdf\x6e\x71\xc0\x6e\xa4\x4c" + "\x9d\xb7\x8c\x9a\xdb\x1f\xd2\x2e" + "\x23\xb6\xa4\xfb\xd3\x86\xdd\xbb" + "\xde\x54\x9b\xf5\x92\x8b\x93\xc5", + .clen = 32, + }, { + .key = "\x01\x5c\x75\xe5\x84\x8d\x4d\xf6" "\x23\x9f\xf4\x6a\xe6\x5a\x3b\x9a", + .klen = 16, + .iv = "\x10\x25\x03\x4c\xc8\x2c\xf7\x7d" + "\x44\xd5\x21\x8e\xe4\x23\x6b\x1c", .assoc = "", .alen = 0, - .input = "\x6b\x4f\x3b\x90\x9a\xa2\xb3\x82" - "\x0a\xb8\x55\xee\xeb\x73\x4d\x7f" - "\x54\x11\x3a\x8a\x31\xa3\xb5\xf2" - "\xcd\x49\xdb\xf3\xee\x26\xbd\xa2" - "\x0d", - .ilen = 33, - .result = "\x1f\xee\x92\xb4\x0c\xcb\xa1\x04" - "\x64\x0c\x4d\xb2\xe3\xec\x9c\x9d" - "\x09", - .rlen = 17, + .ptext = "\x2e\xb7\x20\x1c\x50\x6a\x4b\x8b" + "\x84\x42\x7a\xd7\xe1\xb5\xcd\x1f" + "\xd3", + .plen = 17, + .ctext = "\x4f\xc3\x69\xb6\xd3\xa4\x64\x8b" + "\x71\xc3\x8a\x91\x22\x4f\x1b\xd2" + "\x33\x6d\x86\xbc\xf8\x2f\x06\xf9" + "\x82\x64\xc7\x72\x00\x30\xfc\xf0" + "\xf8", + .clen = 33, }, { - .key = "\x2e\xb7\x20\x1c\x50\x6a\x4b\x8b" - "\x84\x42\x7a\xd7\xe1\xb5\xcd\x1f", - .klen = 16, - .iv = "\x3d\x80\xae\x84\x94\x09\xf6\x12" + .key = "\x3d\x80\xae\x84\x94\x09\xf6\x12" "\xa4\x79\xa6\xfb\xe0\x7f\xfd\xa0", + .klen = 16, + .iv = "\x4c\x49\x3d\xec\xd8\xa8\xa0\x98" + "\xc5\xb0\xd3\x1f\xde\x48\x2e\x22", .assoc = "", .alen = 0, - .input = "\x59\xd1\x0f\x6b\xee\x27\x84\x92" - "\xb7\xa9\xb5\xdd\x02\xa4\x12\xa5" - "\x50\x32\xb4\x9a\x2e\x35\x83\x55" - "\x36\x12\x12\xed\xa3\x31\xc5\x30" - "\xa7\xe2\x4a\x6d\x05\x59\x43\x91" - "\x75\xfa\x6c\x17\xc6\x73\xca", - .ilen = 47, - .result = "\x5c\x13\xcb\x54\x1c\x47\x4a\x1f" - "\xe5\xe6\xff\x44\xdd\x11\x5f\xa3" - "\x33\xdd\xc2\xf8\xdd\x18\x2b\x93" - "\x57\x05\x01\x1c\x66\x22\xd3", - .rlen = 31, + .ptext = "\x6b\xdc\x5a\xbb\x60\xe5\xf4\xa6" + "\x05\x1d\x2c\x68\xdb\xda\x8f\x25" + "\xfe\x8d\x45\x19\x1e\xc0\x0b\x99" + "\x88\x11\x39\x12\x1c\x3a\xbb", + .plen = 31, + .ctext = "\xe3\x93\x15\xae\x5f\x9d\x3c\xb5" + "\xd6\x9d\xee\xee\xcf\xaa\xaf\xe1" + "\x45\x10\x96\xe0\xbf\x55\x0f\x4c" + "\x1a\xfd\xf4\xda\x4e\x10\xde\xc9" + "\x0e\x6f\xc7\x3c\x49\x94\x41\xfc" + "\x59\x28\x88\x3c\x79\x10\x6b", + .clen = 47, }, { - .key = "\x6b\xdc\x5a\xbb\x60\xe5\xf4\xa6" - "\x05\x1d\x2c\x68\xdb\xda\x8f\x25", - .klen = 16, - .iv = "\x7a\xa5\xe8\x23\xa4\x84\x9e\x2d" + .key = "\x7a\xa5\xe8\x23\xa4\x84\x9e\x2d" "\x25\x53\x58\x8c\xda\xa3\xc0\xa6", + .klen = 16, + .iv = "\x89\x6e\x77\x8b\xe8\x23\x49\xb4" + "\x45\x8a\x85\xb1\xd8\x6c\xf1\x28", .assoc = "", .alen = 0, - .input = "\xdb\x49\x68\x0f\x91\x5b\x21\xb1" - "\xcf\x50\xb2\x4c\x32\xe1\xa6\x69" - "\xc0\xfb\x44\x1f\xa0\x9a\xeb\x39" - "\x1b\xde\x68\x38\xcc\x27\x52\xc5" - "\xf6\x3e\x74\xea\x66\x5b\x5f\x0c" - "\x65\x9e\x58\xe6\x52\xa2\xfe\x59", - .ilen = 48, - .result = "\x98\x37\x05\xf3\x2c\xc2\xf3\x3b" - "\x66\xc0\xb1\xd5\xd7\x35\x21\xaa" - "\x5d\x9f\xce\x7c\xe2\xb8\xad\xad" - "\x19\x33\xe0\xf4\x40\x81\x72\x28", - .rlen = 32, + .ptext = "\xa7\x00\x93\x5b\x70\x61\x9d\xc2" + "\x86\xf7\xde\xfa\xd5\xfe\x52\x2b" + "\x28\x50\x51\x9d\x24\x60\x8d\xb3" + "\x49\x3e\x17\xea\xf6\x99\x5a\xdd", + .plen = 32, + .ctext = "\x1c\x8e\x22\x34\xfd\xab\xe6\x0d" + "\x1c\x9f\x06\x54\x8b\x0b\xb4\x40" + "\xde\x11\x59\x3e\xfd\x74\xf6\x42" + "\x97\x17\xf7\x24\xb6\x7e\xc4\xc6" + "\x06\xa3\x94\xda\x3d\x7f\x55\x0a" + "\x92\x07\x2f\xa6\xf3\x6b\x2c\xfc", + .clen = 48, }, { - .key = "\xa7\x00\x93\x5b\x70\x61\x9d\xc2" - "\x86\xf7\xde\xfa\xd5\xfe\x52\x2b", - .klen = 16, - .iv = "\xb6\xca\x22\xc3\xb4\x00\x47\x49" + .key = "\xb6\xca\x22\xc3\xb4\x00\x47\x49" "\xa6\x2d\x0a\x1e\xd4\xc7\x83\xad", - .assoc = "\xc5", + .klen = 16, + .iv = "\xc5\x93\xb0\x2a\xf8\x9f\xf1\xd0" + "\xc6\x64\x37\x42\xd2\x90\xb3\x2e", + .assoc = "\xd5", .alen = 1, - .input = "\x56\xe7\x24\x52\xdd\x95\x60\x5b" - "\x09\x48\x39\x69\x9c\xb3\x62\x46", - .ilen = 16, - .result = "", - .rlen = 0, + .ptext = "", + .plen = 0, + .ctext = "\xa0\x2a\xb4\x9a\x91\x00\x15\xb8" + "\x0f\x9a\x15\x60\x0e\x9b\x13\x8f", + .clen = 16, }, { - .key = "\xe4\x25\xcd\xfa\x80\xdd\x46\xde" - "\x07\xd1\x90\x8b\xcf\x23\x15\x31", - .klen = 16, - .iv = "\xf3\xee\x5c\x62\xc4\x7c\xf0\x65" + .key = "\xf3\xee\x5c\x62\xc4\x7c\xf0\x65" "\x27\x08\xbd\xaf\xce\xec\x45\xb3", - .assoc = "\x02\xb8\xea\xca\x09\x1b\x9a\xec" - "\x47\x3e\xe9\xd4\xcc\xb5\x76", + .klen = 16, + .iv = "\x02\xb8\xea\xca\x09\x1b\x9a\xec" + "\x47\x3e\xe9\xd4\xcc\xb5\x76\x34", + .assoc = "\x11\x81\x78\x32\x4d\xb9\x44\x73" + "\x68\x75\x16\xf8\xcb\x7e\xa7", .alen = 15, - .input = "\xdd\xfa\x6c\x1f\x5d\x86\x87\x01" - "\x13\xe5\x73\x46\x46\xf2\x5c\xe1", - .ilen = 16, - .result = "", - .rlen = 0, + .ptext = "", + .plen = 0, + .ctext = "\x4c\x26\xad\x9c\x14\xfd\x9c\x8c" + "\x84\xfb\x26\xfb\xd5\xca\x62\x39", + .clen = 16, }, { - .key = "\x20\x4a\x07\x99\x91\x58\xee\xfa" - "\x88\xab\x42\x1c\xc9\x47\xd7\x38", - .klen = 16, - .iv = "\x2f\x13\x95\x01\xd5\xf7\x99\x81" + .key = "\x2f\x13\x95\x01\xd5\xf7\x99\x81" "\xa8\xe2\x6f\x41\xc8\x10\x08\xb9", - .assoc = "\x3f\xdc\x24\x69\x19\x96\x43\x08" + .klen = 16, + .iv = "\x3f\xdc\x24\x69\x19\x96\x43\x08" "\xc8\x18\x9b\x65\xc6\xd9\x39\x3b", + .assoc = "\x4e\xa5\xb2\xd1\x5d\x35\xed\x8f" + "\xe8\x4f\xc8\x89\xc5\xa2\x69\xbc", .alen = 16, - .input = "\xa6\x1b\xb9\xd7\x5e\x3c\xcf\xac" - "\xa9\x21\x45\x0b\x16\x52\xf7\xe1", - .ilen = 16, - .result = "", - .rlen = 0, + .ptext = "", + .plen = 0, + .ctext = "\x45\x85\x0e\x0f\xf4\xae\x96\xa1" + "\x99\x4d\x6d\xb4\x67\x32\xb0\x3a", + .clen = 16, }, { - .key = "\x5d\x6f\x41\x39\xa1\xd4\x97\x16" - "\x09\x85\xf4\xae\xc3\x6b\x9a\x3e", - .klen = 16, - .iv = "\x6c\x38\xcf\xa1\xe5\x73\x41\x9d" + .key = "\x6c\x38\xcf\xa1\xe5\x73\x41\x9d" "\x29\xbc\x21\xd2\xc2\x35\xcb\xbf", - .assoc = "\x7b\x01\x5d\x08\x29\x12\xec\x24" - "\x49\xf3\x4d\xf7\xc0\xfe\xfb\x41" - "\x3c", + .klen = 16, + .iv = "\x7b\x01\x5d\x08\x29\x12\xec\x24" + "\x49\xf3\x4d\xf7\xc0\xfe\xfb\x41", + .assoc = "\x8a\xca\xec\x70\x6d\xb1\x96\xab" + "\x69\x29\x7a\x1b\xbf\xc7\x2c\xc2" + "\x07", .alen = 17, - .input = "\x15\xff\xde\x3b\x34\xfc\xf6\xf9" - "\xbb\xa8\x62\xad\x0a\xf5\x48\x60", - .ilen = 16, - .result = "", - .rlen = 0, + .ptext = "", + .plen = 0, + .ctext = "\x33\xb1\x42\x97\x8e\x16\x7b\x63" + "\x06\xba\x5b\xcb\xae\x6d\x8b\x56", + .clen = 16, }, { - .key = "\x99\x93\x7a\xd8\xb1\x50\x40\x31" - "\x8a\x60\xa6\x3f\xbd\x90\x5d\x44", - .klen = 16, - .iv = "\xa8\x5c\x09\x40\xf5\xef\xea\xb8" + .key = "\xa8\x5c\x09\x40\xf5\xef\xea\xb8" "\xaa\x96\xd3\x64\xbc\x59\x8d\xc6", - .assoc = "\xb8\x26\x97\xa8\x39\x8e\x94\x3f" - "\xca\xcd\xff\x88\xba\x22\xbe\x47" - "\x67\xba\x85\xf1\xbb\x30\x56\x26" - "\xaf\x0b\x02\x38\xcc\x44\xa7", + .klen = 16, + .iv = "\xb8\x26\x97\xa8\x39\x8e\x94\x3f" + "\xca\xcd\xff\x88\xba\x22\xbe\x47", + .assoc = "\xc7\xef\x26\x10\x7d\x2c\x3f\xc6" + "\xea\x03\x2c\xac\xb9\xeb\xef\xc9" + "\x31\x6b\x08\x12\xfc\xd8\x37\x2d" + "\xe0\x17\x3a\x2e\x83\x5c\x8f", .alen = 31, - .input = "\xd2\x9d\xf8\x3b\xd7\x84\xe9\x2d" - "\x4b\xef\x75\x16\x0a\x99\xae\x6b", - .ilen = 16, - .result = "", - .rlen = 0, + .ptext = "", + .plen = 0, + .ctext = "\xda\x44\x08\x8c\x2a\xa5\x07\x35" + "\x0b\x54\x4e\x6d\xe3\xfd\xc4\x5f", + .clen = 16, }, { - .key = "\xd6\xb8\xb4\x77\xc1\xcb\xe9\x4d" - "\x0a\x3a\x58\xd1\xb7\xb4\x1f\x4a", - .klen = 16, - .iv = "\xe5\x81\x42\xdf\x05\x6a\x93\xd4" + .key = "\xe5\x81\x42\xdf\x05\x6a\x93\xd4" "\x2b\x70\x85\xf5\xb6\x7d\x50\xcc", - .assoc = "\xf4\x4a\xd1\x47\x49\x09\x3d\x5b" - "\x4b\xa7\xb1\x19\xb4\x46\x81\x4d" - "\x91\x7c\x91\x75\xc0\xd0\xd8\x40" - "\x71\x39\xe1\x10\xa6\xa3\x46\x7a", + .klen = 16, + .iv = "\xf4\x4a\xd1\x47\x49\x09\x3d\x5b" + "\x4b\xa7\xb1\x19\xb4\x46\x81\x4d", + .assoc = "\x03\x14\x5f\xaf\x8d\xa8\xe7\xe2" + "\x6b\xde\xde\x3e\xb3\x10\xb1\xcf" + "\x5c\x2d\x14\x96\x01\x78\xb9\x47" + "\xa1\x44\x19\x06\x5d\xbb\x2e\x2f", .alen = 32, - .input = "\xe4\x8d\xa7\xa7\x45\xc1\x31\x4f" - "\xce\xfb\xaf\xd6\xc2\xe6\xee\xc0", - .ilen = 16, - .result = "", - .rlen = 0, + .ptext = "", + .plen = 0, + .ctext = "\x1b\xb1\xf1\xa8\x9e\xc2\xb2\x88" + "\x40\x7f\x7b\x19\x7a\x52\x8c\xf0", + .clen = 16, }, { - .key = "\x12\xdd\xee\x17\xd1\x47\x92\x69" - "\x8b\x14\x0a\x62\xb1\xd9\xe2\x50", - .klen = 16, - .iv = "\x22\xa6\x7c\x7f\x15\xe6\x3c\xf0" + .key = "\x22\xa6\x7c\x7f\x15\xe6\x3c\xf0" "\xac\x4b\x37\x86\xb0\xa2\x13\xd2", - .assoc = "\x31", + .klen = 16, + .iv = "\x31\x6f\x0b\xe6\x59\x85\xe6\x77" + "\xcc\x81\x63\xab\xae\x6b\x43\x54", + .assoc = "\x40", .alen = 1, - .input = "\xe2\x67\x38\x4f\xb9\xad\x7d\x38" - "\x01\xfe\x84\x14\x85\xf8\xd1\xe3" - "\x22", - .ilen = 17, - .result = "\x40", - .rlen = 1, + .ptext = "\x4f", + .plen = 1, + .ctext = "\x6e\xc8\xfb\x15\x9d\x98\x49\xc9" + "\xa0\x98\x09\x85\xbe\x56\x8e\x79" + "\xf4", + .clen = 17, }, { - .key = "\x4f\x01\x27\xb6\xe1\xc3\x3a\x85" - "\x0c\xee\xbc\xf4\xab\xfd\xa5\x57", - .klen = 16, - .iv = "\x5e\xcb\xb6\x1e\x25\x62\xe4\x0c" + .key = "\x5e\xcb\xb6\x1e\x25\x62\xe4\x0c" "\x2d\x25\xe9\x18\xaa\xc6\xd5\xd8", - .assoc = "\x6d\x94\x44\x86\x69\x00\x8f\x93" - "\x4d\x5b\x15\x3c\xa8\x8f\x06", - .alen = 15, - .input = "\x77\x32\x61\xeb\xb4\x33\x29\x92" - "\x29\x95\xc5\x8e\x85\x76\xab\xfc" - "\x07\x95\xa7\x44\x74\xf7\x22\xff" - "\xd8\xd8\x36\x3d\x8a\x7f\x9e", - .ilen = 31, - .result = "\x7c\x5d\xd3\xee\xad\x9f\x39\x1a" + .klen = 16, + .iv = "\x6d\x94\x44\x86\x69\x00\x8f\x93" + "\x4d\x5b\x15\x3c\xa8\x8f\x06\x5a", + .assoc = "\x7c\x5d\xd3\xee\xad\x9f\x39\x1a" "\x6d\x92\x42\x61\xa7\x58\x37", - .rlen = 15, + .alen = 15, + .ptext = "\x8b\x26\x61\x55\xf1\x3e\xe3\xa1" + "\x8d\xc8\x6e\x85\xa5\x21\x67", + .plen = 15, + .ctext = "\x99\x2e\x84\x50\x64\x5c\xab\x29" + "\x20\xba\xb9\x2f\x62\x3a\xce\x2a" + "\x75\x25\x3b\xe3\x40\xe0\x1d\xfc" + "\x20\x63\x0b\x49\x7e\x97\x08", + .clen = 31, }, { - .key = "\x8b\x26\x61\x55\xf1\x3e\xe3\xa1" - "\x8d\xc8\x6e\x85\xa5\x21\x67\x5d", - .klen = 16, - .iv = "\x9b\xef\xf0\xbd\x35\xdd\x8d\x28" + .key = "\x9b\xef\xf0\xbd\x35\xdd\x8d\x28" "\xad\xff\x9b\xa9\xa4\xeb\x98\xdf", - .assoc = "\xaa\xb8\x7e\x25\x79\x7c\x37\xaf" + .klen = 16, + .iv = "\xaa\xb8\x7e\x25\x79\x7c\x37\xaf" "\xce\x36\xc7\xce\xa2\xb4\xc9\x60", - .alen = 16, - .input = "\xd8\xfd\x44\x45\xf6\x42\x12\x38" - "\xf2\x0b\xea\x4f\x9e\x11\x61\x07" - "\x48\x67\x98\x18\x9b\xd0\x0c\x59" - "\x67\xa4\x11\xb3\x2b\xd6\xc1\x70", - .ilen = 32, - .result = "\xb9\x82\x0c\x8d\xbd\x1b\xe2\x36" + .assoc = "\xb9\x82\x0c\x8d\xbd\x1b\xe2\x36" "\xee\x6c\xf4\xf2\xa1\x7d\xf9\xe2", - .rlen = 16, - }, { - .key = "\xc8\x4b\x9b\xf5\x01\xba\x8c\xbd" + .alen = 16, + .ptext = "\xc8\x4b\x9b\xf5\x01\xba\x8c\xbd" "\x0e\xa3\x21\x16\x9f\x46\x2a\x63", - .klen = 16, - .iv = "\xd7\x14\x29\x5d\x45\x59\x36\x44" + .plen = 16, + .ctext = "\xd9\x8e\xfd\x50\x8f\x02\x9f\xee" + "\x78\x08\x12\xec\x09\xaf\x53\x14" + "\x90\x3e\x3d\x76\xad\x71\x21\x08" + "\x77\xe5\x4b\x15\xc2\xe6\xbc\xdb", + .clen = 32, + }, { + .key = "\xd7\x14\x29\x5d\x45\x59\x36\x44" "\x2e\xd9\x4d\x3b\x9e\x0f\x5b\xe5", - .assoc = "\xe6\xdd\xb8\xc4\x89\xf8\xe0\xca" - "\x4f\x10\x7a\x5f\x9c\xd8\x8b\x66" - "\x3b", - .alen = 17, - .input = "\xb1\xab\x53\x4e\xc7\x40\x16\xb6" - "\x71\x3a\x00\x9f\x41\x88\xb0\xb2" - "\x71\x83\x85\x5f\xc8\x79\x0a\x99" - "\x99\xdc\x89\x1c\x88\xd2\x3e\xf9" - "\x83", - .ilen = 33, - .result = "\xf5\xa6\x46\x2c\xce\x97\x8a\x51" + .klen = 16, + .iv = "\xe6\xdd\xb8\xc4\x89\xf8\xe0\xca" + "\x4f\x10\x7a\x5f\x9c\xd8\x8b\x66", + .assoc = "\xf5\xa6\x46\x2c\xce\x97\x8a\x51" "\x6f\x46\xa6\x83\x9b\xa1\xbc\xe8" "\x05", - .rlen = 17, + .alen = 17, + .ptext = "\x05\x70\xd5\x94\x12\x36\x35\xd8" + "\x8f\x7d\xd3\xa8\x99\x6a\xed\x69" + "\xd0", + .plen = 17, + .ctext = "\xf3\xe7\x95\x86\xcf\x34\x95\x96" + "\x17\xfe\x1b\xae\x1b\x31\xf2\x1a" + "\xbd\xbc\xc9\x4e\x11\x29\x09\x5c" + "\x05\xd3\xb4\x2e\x4a\x74\x59\x49" + "\x7d", + .clen = 33, }, { - .key = "\x05\x70\xd5\x94\x12\x36\x35\xd8" - "\x8f\x7d\xd3\xa8\x99\x6a\xed\x69", - .klen = 16, - .iv = "\x14\x39\x63\xfc\x56\xd5\xdf\x5f" + .key = "\x14\x39\x63\xfc\x56\xd5\xdf\x5f" "\xaf\xb3\xff\xcc\x98\x33\x1d\xeb", - .assoc = "\x23\x02\xf1\x64\x9a\x73\x89\xe6" - "\xd0\xea\x2c\xf1\x96\xfc\x4e\x6d" - "\x65\x48\xcb\x0a\xda\xf0\x62\xc0" - "\x38\x1d\x3b\x4a\xe9\x7e\x62", - .alen = 31, - .input = "\x29\xc4\xf0\x03\xc1\x86\xdf\x06" - "\x5c\x7b\xef\x64\x87\x00\xd1\x37" - "\xa7\x08\xbc\x7f\x8f\x41\x54\xd0" - "\x3e\xf1\xc3\xa2\x96\x84\xdd\x2a" - "\x2d\x21\x30\xf9\x02\xdb\x06\x0c" - "\xf1\x5a\x66\x69\xe0\xca\x83", - .ilen = 47, - .result = "\x32\xcb\x80\xcc\xde\x12\x33\x6d" + .klen = 16, + .iv = "\x23\x02\xf1\x64\x9a\x73\x89\xe6" + "\xd0\xea\x2c\xf1\x96\xfc\x4e\x6d", + .assoc = "\x32\xcb\x80\xcc\xde\x12\x33\x6d" "\xf0\x20\x58\x15\x95\xc6\x7f\xee" "\x2f\xf9\x4e\x2c\x1b\x98\x43\xc7" "\x68\x28\x73\x40\x9f\x96\x4a", - .rlen = 31, + .alen = 31, + .ptext = "\x41\x94\x0e\x33\x22\xb1\xdd\xf4" + "\x10\x57\x85\x39\x93\x8f\xaf\x70" + "\xfa\xa9\xd0\x4d\x5c\x40\x23\xcd" + "\x98\x34\xab\x37\x56\xae\x32", + .plen = 31, + .ctext = "\x06\x96\xb2\xbf\x63\xf4\x1e\x24" + "\x0d\x19\x15\x61\x65\x3b\x06\x26" + "\x71\xe8\x7e\x16\xdb\x96\x01\x01" + "\x52\xcd\x49\x5b\x07\x33\x4e\xe7" + "\xaa\x91\xf5\xd5\xc6\xfe\x41\xb5" + "\xed\x90\xce\xb9\xcd\xcc\xa1", + .clen = 47, }, { - .key = "\x41\x94\x0e\x33\x22\xb1\xdd\xf4" - "\x10\x57\x85\x39\x93\x8f\xaf\x70", - .klen = 16, - .iv = "\x50\x5d\x9d\x9b\x66\x50\x88\x7b" + .key = "\x50\x5d\x9d\x9b\x66\x50\x88\x7b" "\x30\x8e\xb1\x5e\x92\x58\xe0\xf1", - .assoc = "\x5f\x27\x2b\x03\xaa\xef\x32\x02" - "\x50\xc4\xde\x82\x90\x21\x11\x73" - "\x8f\x0a\xd6\x8f\xdf\x90\xe4\xda" - "\xf9\x4a\x1a\x23\xc3\xdd\x02\x81", - .alen = 32, - .input = "\xe2\x2e\x44\xdf\xd3\x60\x6d\xb2" - "\x70\x57\x37\xc5\xc2\x4f\x8d\x14" - "\xc6\xbf\x8b\xec\xf5\x62\x67\xf2" - "\x2f\xa1\xe6\xd6\xa7\xb1\x8c\x54" - "\xe5\x6b\x49\xf9\x6e\x90\xc3\xaa" - "\x7a\x00\x2e\x4d\x7f\x31\x2e\x81", - .ilen = 48, - .result = "\x6e\xf0\xba\x6b\xee\x8e\xdc\x89" + .klen = 16, + .iv = "\x5f\x27\x2b\x03\xaa\xef\x32\x02" + "\x50\xc4\xde\x82\x90\x21\x11\x73", + .assoc = "\x6e\xf0\xba\x6b\xee\x8e\xdc\x89" "\x71\xfb\x0a\xa6\x8f\xea\x41\xf4" "\x5a\xbb\x59\xb0\x20\x38\xc5\xe0" "\x29\x56\x52\x19\x79\xf5\xe9\x37", - .rlen = 32, + .alen = 32, + .ptext = "\x7e\xb9\x48\xd3\x32\x2d\x86\x10" + "\x91\x31\x37\xcb\x8d\xb3\x72\x76" + "\x24\x6b\xdc\xd1\x61\xe0\xa5\xe7" + "\x5a\x61\x8a\x0f\x30\x0d\xd1\xec", + .plen = 32, + .ctext = "\xf9\xd7\xee\x17\xfd\x24\xcd\xf1" + "\xbc\x0f\x35\x97\x97\x0c\x4b\x18" + "\xce\x58\xc8\x3b\xd4\x85\x93\x79" + "\xcc\x9c\xea\xc1\x73\x13\x0b\x4c" + "\xcc\x6f\x28\xf8\xa4\x4e\xb8\x56" + "\x64\x4e\x47\xce\xb2\xb4\x92\xb4", + .clen = 48, }, { - .key = "\x7e\xb9\x48\xd3\x32\x2d\x86\x10" - "\x91\x31\x37\xcb\x8d\xb3\x72\x76", - .klen = 16, - .iv = "\x8d\x82\xd6\x3b\x76\xcc\x30\x97" + .key = "\x8d\x82\xd6\x3b\x76\xcc\x30\x97" "\xb1\x68\x63\xef\x8c\x7c\xa3\xf7", - .assoc = "\x9c\x4b\x65\xa2\xba\x6b\xdb\x1e" - "\xd1\x9e\x90\x13\x8a\x45\xd3\x79" - "\xba\xcd\xe2\x13\xe4\x30\x66\xf4" - "\xba\x78\xf9\xfb\x9d\x3c\xa1\x58" - "\x1a", - .alen = 33, - .input = "\xc7\xca\x26\x61\x57\xee\xa2\xb9" - "\xb1\x37\xde\x95\x06\x90\x11\x08" - "\x4d\x30\x9f\x24\xc0\x56\xb7\xe1" - "\x0b\x9f\xd2\x57\xe9\xd2\xb1\x76" - "\x56\x9a\xb4\x58\xc5\x08\xfc\xb5" - "\xf2\x31\x9b\xc9\xcd\xb3\x64\xdb" - "\x6f\x50\xbf\xf4\x73\x9d\xfb\x6b" - "\xef\x35\x25\x48\xed\xcf\x29\xa8" - "\xac\xc3\xb9\xcb\x61\x8f\x73\x92" - "\x2c\x7a\x6f\xda\xf9\x09\x6f\xe1" - "\xc4", - .ilen = 81, - .result = "\xab\x14\xf3\x0a\xfe\x0a\x85\xa5" + .klen = 16, + .iv = "\x9c\x4b\x65\xa2\xba\x6b\xdb\x1e" + "\xd1\x9e\x90\x13\x8a\x45\xd3\x79", + .assoc = "\xab\x14\xf3\x0a\xfe\x0a\x85\xa5" "\xf2\xd5\xbc\x38\x89\x0e\x04\xfb" "\x84\x7d\x65\x34\x25\xd8\x47\xfa" "\xeb\x83\x31\xf1\x54\x54\x89\x0d" - "\x9d\x4d\x54\x51\x84\x61\xf6\x8e" - "\x03\x31\xf2\x25\x16\xcc\xaa\xc6" - "\x75\x73\x20\x30\x59\x54\xb2\xf0" - "\x3a\x4b\xe0\x23\x8e\xa6\x08\x35" - "\x8a", - .rlen = 65, - }, { - .key = "\xba\xde\x82\x72\x42\xa9\x2f\x2c" - "\x12\x0b\xe9\x5c\x87\xd7\x35\x7c", - .klen = 16, - .iv = "\xc9\xa7\x10\xda\x86\x48\xd9\xb3" - "\x32\x42\x15\x80\x85\xa1\x65\xfe", - .assoc = "\xd8\x70\x9f\x42\xca\xe6\x83\x3a" - "\x52\x79\x42\xa5\x84\x6a\x96\x7f" - "\xe4\x8f\xed\x97\xe9\xd0\xe8\x0d" - "\x7c\xa6\xd8\xd4\x77\x9b\x40\x2e" - "\x28\xce\x57\x34\xcd\x6e\x84\x4c" - "\x17\x3c\xe1\xb2\xa8\x0b\xbb\xf1" - "\x96\x41\x0d\x69\xe8\x54\x0a\xc8" - "\x15\x4e\x91\x92\x89\x4b\xb7\x9b" - "\x21", - .alen = 65, - .input = "\x57\xcd\x3d\x46\xc5\xf9\x68\x3b" - "\x2c\x0f\xb4\x7e\x7b\x64\x3e\x40" - "\xf3\x78\x63\x34\x89\x79\x39\x6b" - "\x61\x64\x4a\x9a\xfa\x70\xa4\xd3" - "\x54\x0b\xea\x05\xa6\x95\x64\xed" - "\x3d\x69\xa2\x0c\x27\x56\x2f\x34" - "\x66", - .ilen = 49, - .result = "\xe8\x39\x2d\xaa\x0e\x85\x2d\xc1" + "\x9d", + .alen = 33, + .ptext = "\xba\xde\x82\x72\x42\xa9\x2f\x2c" + "\x12\x0b\xe9\x5c\x87\xd7\x35\x7c" + "\x4f\x2e\xe8\x55\x66\x80\x27\x00" + "\x1b\x8f\x68\xe7\x0a\x6c\x71\xc3" + "\x21\x78\x55\x9d\x9c\x65\x7b\xcd" + "\x0a\x34\x97\xff\x47\x37\xb0\x2a" + "\x80\x0d\x19\x98\x33\xa9\x7a\xe3" + "\x2e\x4c\xc6\xf3\x8c\x88\x42\x01" + "\xbd", + .plen = 65, + .ctext = "\x58\xfa\x3a\x3d\xd9\x88\x63\xe8" + "\xc5\x78\x50\x8b\x4a\xc9\xdf\x7f" + "\x4b\xfa\xc8\x2e\x67\x43\xf3\x63" + "\x42\x8e\x99\x5a\x9c\x0b\x84\x77" + "\xbc\x46\x76\x48\x82\xc7\x57\x96" + "\xe1\x65\xd1\xed\x1d\xdd\x80\x24" + "\xa6\x4d\xa9\xf1\x53\x8b\x5e\x0e" + "\x26\xb9\xcc\x37\xe5\x43\xe1\x5a" + "\x8a\xd6\x8c\x5a\xe4\x95\xd1\x8d" + "\xf7\x33\x64\xc1\xd3\xf2\xfc\x35" + "\x01", + .clen = 81, + }, { + .key = "\xc9\xa7\x10\xda\x86\x48\xd9\xb3" + "\x32\x42\x15\x80\x85\xa1\x65\xfe", + .klen = 16, + .iv = "\xd8\x70\x9f\x42\xca\xe6\x83\x3a" + "\x52\x79\x42\xa5\x84\x6a\x96\x7f", + .assoc = "\xe8\x39\x2d\xaa\x0e\x85\x2d\xc1" "\x72\xaf\x6e\xc9\x82\x33\xc7\x01" "\xaf\x40\x70\xb8\x2a\x78\xc9\x14" "\xac\xb1\x10\xca\x2e\xb3\x28\xe4" - "\xac", - .rlen = 33, + "\xac\xfa\x58\x7f\xe5\x73\x09\x8c" + "\x1d\x40\x87\x8c\xd9\x75\xc0\x55" + "\xa2\xda\x07\xd1\xc2\xa9\xd1\xbb" + "\x09\x4f\x77\x62\x88\x2d\xf2\x68" + "\x54", + .alen = 65, + .ptext = "\xf7\x02\xbb\x11\x52\x24\xd8\x48" + "\x93\xe6\x9b\xee\x81\xfc\xf7\x82" + "\x79\xf0\xf3\xd9\x6c\x20\xa9\x1a" + "\xdc\xbc\x47\xc0\xe4\xcb\x10\x99" + "\x2f", + .plen = 33, + .ctext = "\x4c\xa9\xac\x71\xed\x10\xa6\x24" + "\xb7\xa7\xdf\x8b\xf5\xc2\x41\xcb" + "\x05\xc9\xd6\x97\xb6\x10\x7f\x17" + "\xc2\xc0\x93\xcf\xe0\x94\xfd\x99" + "\xf2\x62\x25\x28\x01\x23\x6f\x8b" + "\x04\x52\xbc\xb0\x3e\x66\x52\x90" + "\x9f", + .clen = 49, }, { - .key = "\xf7\x02\xbb\x11\x52\x24\xd8\x48" - "\x93\xe6\x9b\xee\x81\xfc\xf7\x82", - .klen = 16, - .iv = "\x06\xcc\x4a\x79\x96\xc3\x82\xcf" + .key = "\x06\xcc\x4a\x79\x96\xc3\x82\xcf" "\xb3\x1c\xc7\x12\x7f\xc5\x28\x04", - .assoc = "\x15\x95\xd8\xe1\xda\x62\x2c\x56" + .klen = 16, + .iv = "\x15\x95\xd8\xe1\xda\x62\x2c\x56" "\xd3\x53\xf4\x36\x7e\x8e\x59\x85", - .alen = 16, - .input = "\xfc\x85\x06\x28\x8f\xe8\x23\x1f" - "\x33\x98\x87\xde\x08\xb6\xb6\xae" - "\x3e\xa4\xf8\x19\xf1\x92\x60\x39" - "\xb9\x6b\x3f\xdf\xc8\xcb\x30", - .ilen = 31, - .result = "\x24\x5e\x67\x49\x1e\x01\xd6\xdd" + .assoc = "\x24\x5e\x67\x49\x1e\x01\xd6\xdd" "\xf3\x89\x20\x5b\x7c\x57\x89\x07", - .rlen = 16, - }, { - .key = "\x33\x27\xf5\xb1\x62\xa0\x80\x63" + .alen = 16, + .ptext = "\x33\x27\xf5\xb1\x62\xa0\x80\x63" "\x14\xc0\x4d\x7f\x7b\x20\xba\x89", - .klen = 16, - .iv = "\x42\xf0\x84\x19\xa6\x3f\x2b\xea" + .plen = 16, + .ctext = "\x6d\xed\x04\x7a\x2f\x0c\x30\xa5" + "\x96\xe6\x97\xe4\x10\xeb\x40\x95" + "\xc5\x9a\xdf\x31\xd5\xa5\xa6\xec" + "\x05\xa8\x31\x50\x11\x19\x44", + .clen = 31, + }, { + .key = "\x42\xf0\x84\x19\xa6\x3f\x2b\xea" "\x34\xf6\x79\xa3\x79\xe9\xeb\x0a", - .assoc = "\x51\xb9\x12\x80\xea\xde\xd5\x71" + .klen = 16, + .iv = "\x51\xb9\x12\x80\xea\xde\xd5\x71" "\x54\x2d\xa6\xc8\x78\xb2\x1b\x8c", - .alen = 16, - .input = "\x74\x7d\x70\x07\xe9\xba\x01\xee" - "\x6c\xc6\x6f\x50\x25\x33\xbe\x50" - "\x17\xb8\x17\x62\xed\x80\xa2\xf5" - "\x03\xde\x85\x71\x5d\x34", - .ilen = 30, - .result = "\x61\x83\xa0\xe8\x2e\x7d\x7f\xf8" + .assoc = "\x61\x83\xa0\xe8\x2e\x7d\x7f\xf8" "\x74\x63\xd2\xec\x76\x7c\x4c\x0d", - .rlen = 16, - }, { - .key = "\x70\x4c\x2f\x50\x72\x1c\x29\x7f" + .alen = 16, + .ptext = "\x70\x4c\x2f\x50\x72\x1c\x29\x7f" "\x95\x9a\xff\x10\x75\x45\x7d\x8f", - .klen = 16, - .iv = "\x7f\x15\xbd\xb8\xb6\xba\xd3\x06" + .plen = 16, + .ctext = "\x30\x95\x7d\xea\xdc\x62\xc0\x88" + "\xa1\xe3\x8d\x8c\xac\x04\x10\xa7" + "\xfa\xfa\x07\xbd\xa0\xf0\x36\xeb" + "\x21\x93\x2e\x31\x84\x83", + .clen = 30, + }, { + .key = "\x7f\x15\xbd\xb8\xb6\xba\xd3\x06" "\xb5\xd1\x2b\x35\x73\x0e\xad\x10", - .assoc = "\x8e\xde\x4c\x20\xfa\x59\x7e\x8d" + .klen = 16, + .iv = "\x8e\xde\x4c\x20\xfa\x59\x7e\x8d" "\xd5\x07\x58\x59\x72\xd7\xde\x92", - .alen = 16, - .input = "\xf4\xb3\x85\xf9\xac\xde\xb1\x38" - "\x29\xfd\x6c\x7c\x49\xe5\x1d\xaf" - "\xba\xea\xd4\xfa\x3f\x11\x33\x98", - .ilen = 24, - .result = "\x9d\xa7\xda\x88\x3e\xf8\x28\x14" + .assoc = "\x9d\xa7\xda\x88\x3e\xf8\x28\x14" "\xf5\x3e\x85\x7d\x70\xa0\x0f\x13", - .rlen = 16, - }, { - .key = "\xac\x70\x69\xef\x82\x97\xd2\x9b" - "\x15\x74\xb1\xa2\x6f\x69\x3f\x95", - .klen = 16, - .iv = "\xbb\x3a\xf7\x57\xc6\x36\x7c\x22" - "\x36\xab\xde\xc6\x6d\x32\x70\x17", - .assoc = "\xcb\x03\x85\xbf\x0a\xd5\x26\xa9" - "\x56\xe1\x0a\xeb\x6c\xfb\xa1\x98", .alen = 16, - .input = "\xe6\x5c\x49\x4f\x78\xf3\x62\x86" - "\xe1\xb7\xa5\xc3\x32\x88\x3c\x8c" - "\x6e", - .ilen = 17, - .result = "\xda\xcc\x14\x27\x4e\x74\xd1\x30" - "\x76\x18\x37\x0f\x6a\xc4\xd1\x1a", - .rlen = 16, + .ptext = "\xac\x70\x69\xef\x82\x97\xd2\x9b" + "\x15\x74\xb1\xa2\x6f\x69\x3f\x95", + .plen = 16, + .ctext = "\x93\xcd\xee\xd4\xcb\x9d\x8d\x16" + "\x63\x0d\x43\xd5\x49\xca\xa8\x85" + "\x49\xc0\xae\x13\xbc\x26\x1d\x4b", + .clen = 24, }, }; /* - * MORUS-1280 test vectors - generated via reference implementation from + * AEGIS-256 test vectors - generated via reference implementation from * SUPERCOP (https://bench.cr.yp.to/supercop.html): * * https://bench.cr.yp.to/supercop/supercop-20170228.tar.xz - * (see crypto_aead/morus1280128v2/ and crypto_aead/morus1280256v2/ ) + * (see crypto_aead/aegis256/) */ -static const struct aead_testvec morus1280_enc_tv_template[] = { +static const struct aead_testvec aegis256_tv_template[] = { { - .key = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .klen = 16, - .iv = "\x0f\xc9\x8e\x67\x44\x9e\xaa\x86" - "\x20\x36\x2c\x24\xfe\xc9\x30\x81", + .key = "\x0f\xc9\x8e\x67\x44\x9e\xaa\x86" + "\x20\x36\x2c\x24\xfe\xc9\x30\x81" + "\xca\xb0\x82\x21\x41\xa8\xe0\x06" + "\x30\x0b\x37\xf6\xb6\x17\xe7\xb5", + .klen = 32, + .iv = "\x1e\x92\x1c\xcf\x88\x3d\x54\x0d" + "\x40\x6d\x59\x48\xfc\x92\x61\x03" + "\x95\x61\x05\x42\x82\x50\xc0\x0c" + "\x60\x16\x6f\xec\x6d\x2f\xcf\x6b", .assoc = "", .alen = 0, - .input = "", - .ilen = 0, - .result = "\x91\x85\x0f\xf5\x52\x9e\xce\xce" - "\x65\x99\xc7\xbf\xd3\x76\xe8\x98", - .rlen = 16, + .ptext = "", + .plen = 0, + .ctext = "\xd5\x65\x3a\xa9\x03\x51\xd7\xaa" + "\xfa\x4b\xd8\xa2\x41\x9b\xc1\xb2", + .clen = 16, }, { - .key = "\x3c\x24\x39\x9f\x10\x7b\xa8\x1b" - "\x80\xda\xb2\x91\xf9\x24\xc2\x06", - .klen = 16, - .iv = "\x4b\xed\xc8\x07\x54\x1a\x52\xa2" - "\xa1\x10\xde\xb5\xf8\xed\xf3\x87", + .key = "\x4b\xed\xc8\x07\x54\x1a\x52\xa2" + "\xa1\x10\xde\xb5\xf8\xed\xf3\x87" + "\xf4\x72\x8e\xa5\x46\x48\x62\x20" + "\xf1\x38\x16\xce\x90\x76\x87\x8c", + .klen = 32, + .iv = "\x5a\xb7\x56\x6e\x98\xb9\xfd\x29" + "\xc1\x47\x0b\xda\xf6\xb6\x23\x09" + "\xbf\x23\x11\xc6\x87\xf0\x42\x26" + "\x22\x44\x4e\xc4\x47\x8e\x6e\x41", .assoc = "", .alen = 0, - .input = "\x69", - .ilen = 1, - .result = "\x88\xc3\x4c\xf0\x2f\x43\x76\x13" - "\x96\xda\x76\x34\x33\x4e\xd5\x39" - "\x73", - .rlen = 17, + .ptext = "\x79", + .plen = 1, + .ctext = "\x84\xa2\x8f\xad\xdb\x8d\x2c\x16" + "\x9e\x89\xd9\x06\xa6\xa8\x14\x29" + "\x8b", + .clen = 17, }, { - .key = "\x79\x49\x73\x3e\x20\xf7\x51\x37" - "\x01\xb4\x64\x22\xf3\x48\x85\x0c", - .klen = 16, - .iv = "\x88\x12\x01\xa6\x64\x96\xfb\xbe" - "\x22\xea\x90\x47\xf2\x11\xb5\x8e", + .key = "\x88\x12\x01\xa6\x64\x96\xfb\xbe" + "\x22\xea\x90\x47\xf2\x11\xb5\x8e" + "\x1f\x35\x9a\x29\x4b\xe8\xe4\x39" + "\xb3\x66\xf5\xa6\x6a\xd5\x26\x62", + .klen = 32, + .iv = "\x97\xdb\x90\x0e\xa8\x35\xa5\x45" + "\x42\x21\xbd\x6b\xf0\xda\xe6\x0f" + "\xe9\xe5\x1d\x4a\x8c\x90\xc4\x40" + "\xe3\x71\x2d\x9c\x21\xed\x0e\x18", .assoc = "", .alen = 0, - .input = "\xa6\xa4\x1e\x76\xec\xd4\x50\xcc" - "\x62\x58\xe9\x8f\xef\xa4\x17\x91" - "\xb4\x96\x9f\x6b\xce\x38\xa5\x46" - "\x13\x7d\x64\x93\xd7\x05\xf5", - .ilen = 31, - .result = "\x3e\x5c\x3b\x58\x3b\x7d\x2a\x22" - "\x75\x0b\x24\xa6\x0e\xc3\xde\x52" - "\x97\x0b\x64\xd4\xce\x90\x52\xf7" - "\xef\xdb\x6a\x38\xd2\xa8\xa1\x0d" - "\xe0\x61\x33\x24\xc6\x4d\x51\xbc" - "\xa4\x21\x74\xcf\x19\x16\x59", - .rlen = 47, + .ptext = "\xb5\x6e\xad\xdd\x30\x72\xfa\x53" + "\x82\x8e\x16\xb4\xed\x6d\x47", + .plen = 15, + .ctext = "\x09\x94\x1f\xa6\x13\xc3\x74\x75" + "\x17\xad\x8a\x0e\xd8\x66\x9a\x28" + "\xd7\x30\x66\x09\x2a\xdc\xfa\x2a" + "\x9f\x3b\xd7\xdd\x66\xd1\x2b", + .clen = 31, }, { - .key = "\xb5\x6e\xad\xdd\x30\x72\xfa\x53" - "\x82\x8e\x16\xb4\xed\x6d\x47\x12", - .klen = 16, - .iv = "\xc4\x37\x3b\x45\x74\x11\xa4\xda" - "\xa2\xc5\x42\xd8\xec\x36\x78\x94", + .key = "\xc4\x37\x3b\x45\x74\x11\xa4\xda" + "\xa2\xc5\x42\xd8\xec\x36\x78\x94" + "\x49\xf7\xa5\xad\x50\x88\x66\x53" + "\x74\x94\xd4\x7f\x44\x34\xc5\x39", + .klen = 32, + .iv = "\xd3\x00\xc9\xad\xb8\xb0\x4e\x61" + "\xc3\xfb\x6f\xfd\xea\xff\xa9\x15" + "\x14\xa8\x28\xce\x92\x30\x46\x59" + "\xa4\x9f\x0b\x75\xfb\x4c\xad\xee", .assoc = "", .alen = 0, - .input = "\xe2\xc9\x58\x15\xfc\x4f\xf8\xe8" - "\xe3\x32\x9b\x21\xe9\xc8\xd9\x97" - "\xde\x58\xab\xf0\xd3\xd8\x27\x60" - "\xd5\xaa\x43\x6b\xb1\x64\x95\xa4", - .ilen = 32, - .result = "\x30\x82\x9c\x2b\x67\xcb\xf9\x1f" - "\xde\x9f\x77\xb2\xda\x92\x61\x5c" - "\x09\x0b\x2d\x9a\x26\xaa\x1c\x06" - "\xab\x74\xb7\x2b\x95\x5f\x9f\xa1" - "\x9a\xff\x50\xa0\xa2\xff\xc5\xad" - "\x21\x8e\x84\x5c\x12\x61\xb2\xae", - .rlen = 48, - }, { - .key = "\xf2\x92\xe6\x7d\x40\xee\xa3\x6f" + .ptext = "\xf2\x92\xe6\x7d\x40\xee\xa3\x6f" "\x03\x68\xc8\x45\xe7\x91\x0a\x18", - .klen = 16, - .iv = "\x01\x5c\x75\xe5\x84\x8d\x4d\xf6" - "\x23\x9f\xf4\x6a\xe6\x5a\x3b\x9a", + .plen = 16, + .ctext = "\x8a\x46\xa2\x22\x8c\x03\xab\x6f" + "\x54\x63\x4e\x7f\xc9\x8e\xfa\x70" + "\x7b\xe5\x8d\x78\xbc\xe9\xb6\xa1" + "\x29\x17\xc8\x3b\x52\xa4\x98\x72", + .clen = 32, + }, { + .key = "\x01\x5c\x75\xe5\x84\x8d\x4d\xf6" + "\x23\x9f\xf4\x6a\xe6\x5a\x3b\x9a" + "\x74\xb9\xb1\x32\x55\x28\xe8\x6d" + "\x35\xc1\xb3\x57\x1f\x93\x64\x0f", + .klen = 32, + .iv = "\x10\x25\x03\x4c\xc8\x2c\xf7\x7d" + "\x44\xd5\x21\x8e\xe4\x23\x6b\x1c" + "\x3e\x6a\x34\x53\x97\xd0\xc8\x73" + "\x66\xcd\xea\x4d\xd5\xab\x4c\xc5", .assoc = "", .alen = 0, - .input = "\x1f\xee\x92\xb4\x0c\xcb\xa1\x04" - "\x64\x0c\x4d\xb2\xe3\xec\x9c\x9d" - "\x09\x1a\xb7\x74\xd8\x78\xa9\x79" - "\x96\xd8\x22\x43\x8c\xc3\x34\x7b" - "\xc4", - .ilen = 33, - .result = "\x67\x5d\x8e\x45\xc8\x39\xf5\x17" - "\xc1\x1d\x2a\xdd\x88\x67\xda\x1f" - "\x6d\xe8\x37\x28\x5a\xc1\x5e\x9f" - "\xa6\xec\xc6\x92\x05\x4b\xc0\xa3" - "\x63\xef\x88\xa4\x9b\x0a\x5c\xed" - "\x2b\x6a\xac\x63\x52\xaa\x10\x94" - "\xd0", - .rlen = 49, + .ptext = "\x2e\xb7\x20\x1c\x50\x6a\x4b\x8b" + "\x84\x42\x7a\xd7\xe1\xb5\xcd\x1f" + "\xd3", + .plen = 17, + .ctext = "\x71\x6b\x37\x0b\x02\x61\x28\x12" + "\x83\xab\x66\x90\x84\xc7\xd1\xc5" + "\xb2\x7a\xb4\x7b\xb4\xfe\x02\xb2" + "\xc0\x00\x39\x13\xb5\x51\x68\x44" + "\xad", + .clen = 33, }, { - .key = "\x2e\xb7\x20\x1c\x50\x6a\x4b\x8b" - "\x84\x42\x7a\xd7\xe1\xb5\xcd\x1f", - .klen = 16, - .iv = "\x3d\x80\xae\x84\x94\x09\xf6\x12" - "\xa4\x79\xa6\xfb\xe0\x7f\xfd\xa0", + .key = "\x3d\x80\xae\x84\x94\x09\xf6\x12" + "\xa4\x79\xa6\xfb\xe0\x7f\xfd\xa0" + "\x9e\x7c\xbc\xb6\x5b\xc8\x6a\x86" + "\xf7\xef\x91\x30\xf9\xf2\x04\xe6", + .klen = 32, + .iv = "\x4c\x49\x3d\xec\xd8\xa8\xa0\x98" + "\xc5\xb0\xd3\x1f\xde\x48\x2e\x22" + "\x69\x2c\x3f\xd7\x9c\x70\x4a\x8d" + "\x27\xfa\xc9\x26\xaf\x0a\xeb\x9c", .assoc = "", .alen = 0, - .input = "\x5c\x13\xcb\x54\x1c\x47\x4a\x1f" - "\xe5\xe6\xff\x44\xdd\x11\x5f\xa3" - "\x33\xdd\xc2\xf8\xdd\x18\x2b\x93" - "\x57\x05\x01\x1c\x66\x22\xd3\x51" - "\xd3\xdf\x18\xc9\x30\x66\xed\xb1" - "\x96\x58\xd5\x8c\x64\x8c\x7c\xf5" - "\x01\xd0\x74\x5f\x9b\xaa\xf6\xd1" - "\xe6\x16\xa2\xac\xde\x47\x40", - .ilen = 63, - .result = "\x7d\x61\x1a\x35\x20\xcc\x07\x88" - "\x03\x98\x87\xcf\xc0\x6e\x4d\x19" - "\xe3\xd4\x0b\xfb\x29\x8f\x49\x1a" - "\x3a\x06\x77\xce\x71\x2c\xcd\xdd" - "\xed\xf6\xc9\xbe\xa6\x3b\xb8\xfc" - "\x6c\xbe\x77\xed\x74\x0e\x20\x85" - "\xd0\x65\xde\x24\x6f\xe3\x25\xc5" - "\xdf\x5b\x0f\xbd\x8a\x88\x78\xc9" - "\xe5\x81\x37\xde\x84\x7a\xf6\x84" - "\x99\x7a\x72\x9c\x54\x31\xa1", - .rlen = 79, - }, { - .key = "\x6b\xdc\x5a\xbb\x60\xe5\xf4\xa6" - "\x05\x1d\x2c\x68\xdb\xda\x8f\x25", - .klen = 16, - .iv = "\x7a\xa5\xe8\x23\xa4\x84\x9e\x2d" - "\x25\x53\x58\x8c\xda\xa3\xc0\xa6", + .ptext = "\x6b\xdc\x5a\xbb\x60\xe5\xf4\xa6" + "\x05\x1d\x2c\x68\xdb\xda\x8f\x25" + "\xfe\x8d\x45\x19\x1e\xc0\x0b\x99" + "\x88\x11\x39\x12\x1c\x3a\xbb", + .plen = 31, + .ctext = "\xaf\xa4\x34\x0d\x59\xe6\x1c\x2f" + "\x06\x3b\x52\x18\x49\x75\x1b\xf0" + "\x53\x09\x72\x7b\x45\x79\xe0\xbe" + "\x89\x85\x23\x15\xb8\x79\x07\x4c" + "\x53\x7a\x15\x37\x0a\xee\xb7\xfb" + "\xc4\x1f\x12\x27\xcf\x77\x90", + .clen = 47, + }, { + .key = "\x7a\xa5\xe8\x23\xa4\x84\x9e\x2d" + "\x25\x53\x58\x8c\xda\xa3\xc0\xa6" + "\xc8\x3e\xc8\x3a\x60\x68\xec\xa0" + "\xb8\x1c\x70\x08\xd3\x51\xa3\xbd", + .klen = 32, + .iv = "\x89\x6e\x77\x8b\xe8\x23\x49\xb4" + "\x45\x8a\x85\xb1\xd8\x6c\xf1\x28" + "\x93\xef\x4b\x5b\xa1\x10\xcc\xa6" + "\xe8\x28\xa8\xfe\x89\x69\x8b\x72", .assoc = "", .alen = 0, - .input = "\x98\x37\x05\xf3\x2c\xc2\xf3\x3b" - "\x66\xc0\xb1\xd5\xd7\x35\x21\xaa" - "\x5d\x9f\xce\x7c\xe2\xb8\xad\xad" - "\x19\x33\xe0\xf4\x40\x81\x72\x28" - "\xe1\x8b\x1c\xf8\x91\x78\xff\xaf" - "\xb0\x68\x69\xf2\x27\x35\x91\x84" - "\x2e\x37\x5b\x00\x04\xff\x16\x9c" - "\xb5\x19\x39\xeb\xd9\xcd\x29\x9a", - .ilen = 64, - .result = "\x05\xc5\xb1\xf9\x1b\xb9\xab\x2c" - "\xa5\x07\x12\xa7\x12\x39\x60\x66" - "\x30\x81\x4a\x03\x78\x28\x45\x52" - "\xd2\x2b\x24\xfd\x8b\xa5\xb7\x66" - "\x6f\x45\xd7\x3b\x67\x6f\x51\xb9" - "\xc0\x3d\x6c\xca\x1e\xae\xff\xb6" - "\x79\xa9\xe4\x82\x5d\x4c\x2d\xdf" - "\xeb\x71\x40\xc9\x2c\x40\x45\x6d" - "\x73\x77\x01\xf3\x4f\xf3\x9d\x2a" - "\x5d\x57\xa8\xa1\x18\xa2\xad\xcb", - .rlen = 80, + .ptext = "\xa7\x00\x93\x5b\x70\x61\x9d\xc2" + "\x86\xf7\xde\xfa\xd5\xfe\x52\x2b" + "\x28\x50\x51\x9d\x24\x60\x8d\xb3" + "\x49\x3e\x17\xea\xf6\x99\x5a\xdd", + .plen = 32, + .ctext = "\xe2\xc9\x0b\x33\x31\x02\xb3\xb4" + "\x33\xfe\xeb\xa8\xb7\x9b\xb2\xd7" + "\xeb\x0f\x05\x2b\xba\xb3\xca\xef" + "\xf6\xd1\xb6\xc0\xb9\x9b\x85\xc5" + "\xbf\x7a\x3e\xcc\x31\x76\x09\x80" + "\x32\x5d\xbb\xe8\x38\x0e\x77\xd3", + .clen = 48, }, { - .key = "\xa7\x00\x93\x5b\x70\x61\x9d\xc2" - "\x86\xf7\xde\xfa\xd5\xfe\x52\x2b", - .klen = 16, - .iv = "\xb6\xca\x22\xc3\xb4\x00\x47\x49" - "\xa6\x2d\x0a\x1e\xd4\xc7\x83\xad", - .assoc = "\xc5", + .key = "\xb6\xca\x22\xc3\xb4\x00\x47\x49" + "\xa6\x2d\x0a\x1e\xd4\xc7\x83\xad" + "\xf3\x00\xd4\xbf\x65\x08\x6e\xb9" + "\x7a\x4a\x4f\xe0\xad\xb0\x42\x93", + .klen = 32, + .iv = "\xc5\x93\xb0\x2a\xf8\x9f\xf1\xd0" + "\xc6\x64\x37\x42\xd2\x90\xb3\x2e" + "\xbd\xb1\x57\xe0\xa6\xb0\x4e\xc0" + "\xaa\x55\x87\xd6\x63\xc8\x2a\x49", + .assoc = "\xd5", .alen = 1, - .input = "", - .ilen = 0, - .result = "\x4d\xbf\x11\xac\x7f\x97\x0b\x2e" - "\x89\x3b\x9d\x0f\x83\x1c\x08\xc3", - .rlen = 16, + .ptext = "", + .plen = 0, + .ctext = "\x96\x43\x30\xca\x6c\x4f\xd7\x12" + "\xba\xd9\xb3\x18\x86\xdf\xc3\x52", + .clen = 16, }, { - .key = "\xe4\x25\xcd\xfa\x80\xdd\x46\xde" - "\x07\xd1\x90\x8b\xcf\x23\x15\x31", - .klen = 16, - .iv = "\xf3\xee\x5c\x62\xc4\x7c\xf0\x65" - "\x27\x08\xbd\xaf\xce\xec\x45\xb3", - .assoc = "\x02\xb8\xea\xca\x09\x1b\x9a\xec" + .key = "\xf3\xee\x5c\x62\xc4\x7c\xf0\x65" + "\x27\x08\xbd\xaf\xce\xec\x45\xb3" + "\x1d\xc3\xdf\x43\x6a\xa8\xf0\xd3" + "\x3b\x77\x2e\xb9\x87\x0f\xe1\x6a", + .klen = 32, + .iv = "\x02\xb8\xea\xca\x09\x1b\x9a\xec" "\x47\x3e\xe9\xd4\xcc\xb5\x76\x34" "\xe8\x73\x62\x64\xab\x50\xd0\xda" - "\x6b\x83\x66\xaf\x3e\x27\xc9", - .alen = 31, - .input = "", - .ilen = 0, - .result = "\x5b\xc0\x8d\x54\xe4\xec\xbe\x38" - "\x03\x12\xf9\xcc\x9e\x46\x42\x92", - .rlen = 16, + "\x6b\x83\x66\xaf\x3e\x27\xc9\x1f", + .assoc = "\x11\x81\x78\x32\x4d\xb9\x44\x73" + "\x68\x75\x16\xf8\xcb\x7e\xa7", + .alen = 15, + .ptext = "", + .plen = 0, + .ctext = "\x2f\xab\x45\xe2\xa7\x46\xc5\x83" + "\x11\x9f\xb0\x74\xee\xc7\x03\xdd", + .clen = 16, }, { - .key = "\x20\x4a\x07\x99\x91\x58\xee\xfa" - "\x88\xab\x42\x1c\xc9\x47\xd7\x38", - .klen = 16, - .iv = "\x2f\x13\x95\x01\xd5\xf7\x99\x81" - "\xa8\xe2\x6f\x41\xc8\x10\x08\xb9", - .assoc = "\x3f\xdc\x24\x69\x19\x96\x43\x08" + .key = "\x2f\x13\x95\x01\xd5\xf7\x99\x81" + "\xa8\xe2\x6f\x41\xc8\x10\x08\xb9" + "\x47\x85\xeb\xc7\x6f\x48\x72\xed" + "\xfc\xa5\x0d\x91\x61\x6e\x81\x40", + .klen = 32, + .iv = "\x3f\xdc\x24\x69\x19\x96\x43\x08" "\xc8\x18\x9b\x65\xc6\xd9\x39\x3b" "\x12\x35\x6e\xe8\xb0\xf0\x52\xf3" "\x2d\xb0\x45\x87\x18\x86\x68\xf6", - .alen = 32, - .input = "", - .ilen = 0, - .result = "\x48\xc5\xc3\x4c\x40\x2e\x2f\xc2" - "\x6d\x65\xe0\x67\x9c\x1d\xa0\xf0", - .rlen = 16, + .assoc = "\x4e\xa5\xb2\xd1\x5d\x35\xed\x8f" + "\xe8\x4f\xc8\x89\xc5\xa2\x69\xbc", + .alen = 16, + .ptext = "", + .plen = 0, + .ctext = "\x16\x44\x73\x33\x5d\xf2\xb9\x04" + "\x6b\x79\x98\xef\xdb\xd5\xc5\xf1", + .clen = 16, }, { - .key = "\x5d\x6f\x41\x39\xa1\xd4\x97\x16" - "\x09\x85\xf4\xae\xc3\x6b\x9a\x3e", - .klen = 16, - .iv = "\x6c\x38\xcf\xa1\xe5\x73\x41\x9d" - "\x29\xbc\x21\xd2\xc2\x35\xcb\xbf", - .assoc = "\x7b\x01\x5d\x08\x29\x12\xec\x24" + .key = "\x6c\x38\xcf\xa1\xe5\x73\x41\x9d" + "\x29\xbc\x21\xd2\xc2\x35\xcb\xbf" + "\x72\x47\xf6\x4b\x74\xe8\xf4\x06" + "\xbe\xd3\xec\x6a\x3b\xcd\x20\x17", + .klen = 32, + .iv = "\x7b\x01\x5d\x08\x29\x12\xec\x24" "\x49\xf3\x4d\xf7\xc0\xfe\xfb\x41" "\x3c\xf8\x79\x6c\xb6\x90\xd4\x0d" - "\xee\xde\x23\x60\xf2\xe5\x08\xcc" - "\x97", - .alen = 33, - .input = "", - .ilen = 0, - .result = "\x28\x64\x78\x51\x55\xd8\x56\x4a" - "\x58\x3e\xf7\xbe\xee\x21\xfe\x94", - .rlen = 16, + "\xee\xde\x23\x60\xf2\xe5\x08\xcc", + .assoc = "\x8a\xca\xec\x70\x6d\xb1\x96\xab" + "\x69\x29\x7a\x1b\xbf\xc7\x2c\xc2" + "\x07", + .alen = 17, + .ptext = "", + .plen = 0, + .ctext = "\xa4\x9b\xb8\x47\xc0\xed\x7a\x45" + "\x98\x54\x8c\xed\x3d\x17\xf0\xdd", + .clen = 16, }, { - .key = "\x99\x93\x7a\xd8\xb1\x50\x40\x31" - "\x8a\x60\xa6\x3f\xbd\x90\x5d\x44", - .klen = 16, - .iv = "\xa8\x5c\x09\x40\xf5\xef\xea\xb8" - "\xaa\x96\xd3\x64\xbc\x59\x8d\xc6", - .assoc = "\xb8\x26\x97\xa8\x39\x8e\x94\x3f" + .key = "\xa8\x5c\x09\x40\xf5\xef\xea\xb8" + "\xaa\x96\xd3\x64\xbc\x59\x8d\xc6" + "\x9c\x0a\x02\xd0\x79\x88\x76\x20" + "\x7f\x00\xca\x42\x15\x2c\xbf\xed", + .klen = 32, + .iv = "\xb8\x26\x97\xa8\x39\x8e\x94\x3f" "\xca\xcd\xff\x88\xba\x22\xbe\x47" "\x67\xba\x85\xf1\xbb\x30\x56\x26" - "\xaf\x0b\x02\x38\xcc\x44\xa7\xa3" - "\xa6\xbf\x31\x93\x60\xcd\xda\x63" - "\x2c\xb1\xaa\x19\xc8\x19\xf8\xeb" - "\x03\xa1\xe8\xbe\x37\x54\xec\xa2" - "\xcd\x2c\x45\x58\xbd\x8e\x80", - .alen = 63, - .input = "", - .ilen = 0, - .result = "\xb3\xa6\x00\x4e\x09\x20\xac\x21" - "\x77\x72\x69\x76\x2d\x36\xe5\xc8", - .rlen = 16, + "\xaf\x0b\x02\x38\xcc\x44\xa7\xa3", + .assoc = "\xc7\xef\x26\x10\x7d\x2c\x3f\xc6" + "\xea\x03\x2c\xac\xb9\xeb\xef\xc9" + "\x31\x6b\x08\x12\xfc\xd8\x37\x2d" + "\xe0\x17\x3a\x2e\x83\x5c\x8f", + .alen = 31, + .ptext = "", + .plen = 0, + .ctext = "\x20\x24\xe2\x33\x5c\x60\xc9\xf0" + "\xa4\x96\x2f\x0d\x53\xc2\xf8\xfc", + .clen = 16, }, { - .key = "\xd6\xb8\xb4\x77\xc1\xcb\xe9\x4d" - "\x0a\x3a\x58\xd1\xb7\xb4\x1f\x4a", - .klen = 16, - .iv = "\xe5\x81\x42\xdf\x05\x6a\x93\xd4" - "\x2b\x70\x85\xf5\xb6\x7d\x50\xcc", - .assoc = "\xf4\x4a\xd1\x47\x49\x09\x3d\x5b" + .key = "\xe5\x81\x42\xdf\x05\x6a\x93\xd4" + "\x2b\x70\x85\xf5\xb6\x7d\x50\xcc" + "\xc6\xcc\x0e\x54\x7f\x28\xf8\x3a" + "\x40\x2e\xa9\x1a\xf0\x8b\x5e\xc4", + .klen = 32, + .iv = "\xf4\x4a\xd1\x47\x49\x09\x3d\x5b" "\x4b\xa7\xb1\x19\xb4\x46\x81\x4d" "\x91\x7c\x91\x75\xc0\xd0\xd8\x40" - "\x71\x39\xe1\x10\xa6\xa3\x46\x7a" - "\xb4\x6b\x35\xc2\xc1\xdf\xed\x60" - "\x46\xc1\x3e\x7f\x8c\xc2\x0e\x7a" - "\x30\x08\xd0\x5f\xa0\xaa\x0c\x6d" - "\x9c\x2f\xdb\x97\xb8\x15\x69\x01", - .alen = 64, - .input = "", - .ilen = 0, - .result = "\x65\x33\x7b\xa1\x63\xf4\x20\xdd" - "\xe4\xb9\x4a\xaa\x9a\x21\xaa\x14", - .rlen = 16, + "\x71\x39\xe1\x10\xa6\xa3\x46\x7a", + .assoc = "\x03\x14\x5f\xaf\x8d\xa8\xe7\xe2" + "\x6b\xde\xde\x3e\xb3\x10\xb1\xcf" + "\x5c\x2d\x14\x96\x01\x78\xb9\x47" + "\xa1\x44\x19\x06\x5d\xbb\x2e\x2f", + .alen = 32, + .ptext = "", + .plen = 0, + .ctext = "\x6f\x4a\xb9\xe0\xff\x51\xa3\xf1" + "\xd2\x64\x3e\x66\x6a\xb2\x03\xc0", + .clen = 16, }, { - .key = "\x12\xdd\xee\x17\xd1\x47\x92\x69" - "\x8b\x14\x0a\x62\xb1\xd9\xe2\x50", - .klen = 16, - .iv = "\x22\xa6\x7c\x7f\x15\xe6\x3c\xf0" - "\xac\x4b\x37\x86\xb0\xa2\x13\xd2", - .assoc = "\x31", + .key = "\x22\xa6\x7c\x7f\x15\xe6\x3c\xf0" + "\xac\x4b\x37\x86\xb0\xa2\x13\xd2" + "\xf1\x8e\x19\xd8\x84\xc8\x7a\x53" + "\x02\x5b\x88\xf3\xca\xea\xfe\x9b", + .klen = 32, + .iv = "\x31\x6f\x0b\xe6\x59\x85\xe6\x77" + "\xcc\x81\x63\xab\xae\x6b\x43\x54" + "\xbb\x3f\x9c\xf9\xc5\x70\x5a\x5a" + "\x32\x67\xc0\xe9\x80\x02\xe5\x50", + .assoc = "\x40", .alen = 1, - .input = "\x40", - .ilen = 1, - .result = "\x1d\x47\x17\x34\x86\xf5\x54\x1a" - "\x6d\x28\xb8\x5d\x6c\xcf\xa0\xb9" - "\xbf", - .rlen = 17, + .ptext = "\x4f", + .plen = 1, + .ctext = "\x2c\xfb\xad\x7e\xbe\xa0\x9a\x5b" + "\x7a\x3f\x81\xf7\xfc\x1b\x79\x83" + "\xc7", + .clen = 17, }, { - .key = "\x4f\x01\x27\xb6\xe1\xc3\x3a\x85" - "\x0c\xee\xbc\xf4\xab\xfd\xa5\x57", - .klen = 16, - .iv = "\x5e\xcb\xb6\x1e\x25\x62\xe4\x0c" - "\x2d\x25\xe9\x18\xaa\xc6\xd5\xd8", - .assoc = "\x6d\x94\x44\x86\x69\x00\x8f\x93" + .key = "\x5e\xcb\xb6\x1e\x25\x62\xe4\x0c" + "\x2d\x25\xe9\x18\xaa\xc6\xd5\xd8" + "\x1b\x50\x25\x5d\x89\x68\xfc\x6d" + "\xc3\x89\x67\xcb\xa4\x49\x9d\x71", + .klen = 32, + .iv = "\x6d\x94\x44\x86\x69\x00\x8f\x93" "\x4d\x5b\x15\x3c\xa8\x8f\x06\x5a" "\xe6\x01\xa8\x7e\xca\x10\xdc\x73" - "\xf4\x94\x9f\xc1\x5a\x61\x85", - .alen = 31, - .input = "\x7c\x5d\xd3\xee\xad\x9f\x39\x1a" - "\x6d\x92\x42\x61\xa7\x58\x37\xdb" - "\xb0\xb2\x2b\x9f\x0b\xb8\xbd\x7a" - "\x24\xa0\xd6\xb7\x11\x79\x6c", - .ilen = 31, - .result = "\x78\x90\x52\xae\x0f\xf7\x2e\xef" - "\x63\x09\x08\x58\xb5\x56\xbd\x72" - "\x6e\x42\xcf\x27\x04\x7c\xdb\x92" - "\x18\xe9\xa4\x33\x90\xba\x62\xb5" - "\x70\xd3\x88\x9b\x4f\x05\xa7\x51" - "\x85\x87\x17\x09\x42\xed\x4e", - .rlen = 47, + "\xf4\x94\x9f\xc1\x5a\x61\x85\x27", + .assoc = "\x7c\x5d\xd3\xee\xad\x9f\x39\x1a" + "\x6d\x92\x42\x61\xa7\x58\x37", + .alen = 15, + .ptext = "\x8b\x26\x61\x55\xf1\x3e\xe3\xa1" + "\x8d\xc8\x6e\x85\xa5\x21\x67", + .plen = 15, + .ctext = "\x1f\x7f\xca\x3c\x2b\xe7\x27\xba" + "\x7e\x98\x83\x02\x34\x23\xf7\x94" + "\xde\x35\xe6\x1d\x14\x18\xe5\x38" + "\x14\x80\x6a\xa7\x1b\xae\x1d", + .clen = 31, }, { - .key = "\x8b\x26\x61\x55\xf1\x3e\xe3\xa1" - "\x8d\xc8\x6e\x85\xa5\x21\x67\x5d", - .klen = 16, - .iv = "\x9b\xef\xf0\xbd\x35\xdd\x8d\x28" - "\xad\xff\x9b\xa9\xa4\xeb\x98\xdf", - .assoc = "\xaa\xb8\x7e\x25\x79\x7c\x37\xaf" + .key = "\x9b\xef\xf0\xbd\x35\xdd\x8d\x28" + "\xad\xff\x9b\xa9\xa4\xeb\x98\xdf" + "\x46\x13\x31\xe1\x8e\x08\x7e\x87" + "\x85\xb6\x46\xa3\x7e\xa8\x3c\x48", + .klen = 32, + .iv = "\xaa\xb8\x7e\x25\x79\x7c\x37\xaf" "\xce\x36\xc7\xce\xa2\xb4\xc9\x60" "\x10\xc3\xb3\x02\xcf\xb0\x5e\x8d" "\xb5\xc2\x7e\x9a\x35\xc0\x24\xfd", - .alen = 32, - .input = "\xb9\x82\x0c\x8d\xbd\x1b\xe2\x36" - "\xee\x6c\xf4\xf2\xa1\x7d\xf9\xe2" - "\xdb\x74\x36\x23\x11\x58\x3f\x93" - "\xe5\xcd\xb5\x90\xeb\xd8\x0c\xb3", - .ilen = 32, - .result = "\x1d\x2c\x57\xe0\x50\x38\x3d\x41" - "\x2e\x71\xc8\x3b\x92\x43\x58\xaf" - "\x5a\xfb\xad\x8f\xd9\xd5\x8a\x5e" - "\xdb\xf3\xcd\x3a\x2b\xe1\x2c\x1a" - "\xb0\xed\xe3\x0c\x6e\xf9\xf2\xd6" - "\x90\xe6\xb1\x0e\xa5\x8a\xac\xb7", - .rlen = 48, - }, { - .key = "\xc8\x4b\x9b\xf5\x01\xba\x8c\xbd" + .assoc = "\xb9\x82\x0c\x8d\xbd\x1b\xe2\x36" + "\xee\x6c\xf4\xf2\xa1\x7d\xf9\xe2", + .alen = 16, + .ptext = "\xc8\x4b\x9b\xf5\x01\xba\x8c\xbd" "\x0e\xa3\x21\x16\x9f\x46\x2a\x63", - .klen = 16, - .iv = "\xd7\x14\x29\x5d\x45\x59\x36\x44" - "\x2e\xd9\x4d\x3b\x9e\x0f\x5b\xe5", - .assoc = "\xe6\xdd\xb8\xc4\x89\xf8\xe0\xca" + .plen = 16, + .ctext = "\x05\x86\x9e\xd7\x2b\xa3\x97\x01" + "\xbe\x28\x98\x10\x6f\xe9\x61\x32" + "\x96\xbb\xb1\x2e\x8f\x0c\x44\xb9" + "\x46\x2d\x55\xe3\x42\x67\xf2\xaf", + .clen = 32, + }, { + .key = "\xd7\x14\x29\x5d\x45\x59\x36\x44" + "\x2e\xd9\x4d\x3b\x9e\x0f\x5b\xe5" + "\x70\xd5\x3c\x65\x93\xa8\x00\xa0" + "\x46\xe4\x25\x7c\x58\x08\xdb\x1e", + .klen = 32, + .iv = "\xe6\xdd\xb8\xc4\x89\xf8\xe0\xca" "\x4f\x10\x7a\x5f\x9c\xd8\x8b\x66" "\x3b\x86\xbf\x86\xd4\x50\xe0\xa7" - "\x76\xef\x5c\x72\x0f\x1f\xc3\xd4" - "\xee", - .alen = 33, - .input = "\xf5\xa6\x46\x2c\xce\x97\x8a\x51" + "\x76\xef\x5c\x72\x0f\x1f\xc3\xd4", + .assoc = "\xf5\xa6\x46\x2c\xce\x97\x8a\x51" "\x6f\x46\xa6\x83\x9b\xa1\xbc\xe8" - "\x05\x36\x42\xa7\x16\xf8\xc1\xad" - "\xa7\xfb\x94\x68\xc5\x37\xab\x8a" - "\x72", - .ilen = 33, - .result = "\x59\x10\x84\x1c\x83\x4c\x8b\xfc" - "\xfd\x2e\x4b\x46\x84\xff\x78\x4e" - "\x50\xda\x5c\xb9\x61\x1d\xf5\xb9" - "\xfe\xbb\x7f\xae\x8c\xc1\x24\xbd" - "\x8c\x6f\x1f\x9b\xce\xc6\xc1\x37" - "\x08\x06\x5a\xe5\x96\x10\x95\xc2" - "\x5e", - .rlen = 49, + "\x05", + .alen = 17, + .ptext = "\x05\x70\xd5\x94\x12\x36\x35\xd8" + "\x8f\x7d\xd3\xa8\x99\x6a\xed\x69" + "\xd0", + .plen = 17, + .ctext = "\x9c\xe0\x06\x7b\x86\xcf\x2e\xd8" + "\x45\x65\x1b\x72\x9b\xaa\xa3\x1e" + "\x87\x9d\x26\xdf\xff\x81\x11\xd2" + "\x47\x41\xb9\x24\xc1\x8a\xa3\x8b" + "\x55", + .clen = 33, }, { - .key = "\x05\x70\xd5\x94\x12\x36\x35\xd8" - "\x8f\x7d\xd3\xa8\x99\x6a\xed\x69", - .klen = 16, - .iv = "\x14\x39\x63\xfc\x56\xd5\xdf\x5f" - "\xaf\xb3\xff\xcc\x98\x33\x1d\xeb", - .assoc = "\x23\x02\xf1\x64\x9a\x73\x89\xe6" + .key = "\x14\x39\x63\xfc\x56\xd5\xdf\x5f" + "\xaf\xb3\xff\xcc\x98\x33\x1d\xeb" + "\x9a\x97\x48\xe9\x98\x48\x82\xba" + "\x07\x11\x04\x54\x32\x67\x7b\xf5", + .klen = 32, + .iv = "\x23\x02\xf1\x64\x9a\x73\x89\xe6" "\xd0\xea\x2c\xf1\x96\xfc\x4e\x6d" "\x65\x48\xcb\x0a\xda\xf0\x62\xc0" - "\x38\x1d\x3b\x4a\xe9\x7e\x62\xaa" - "\xfd\xc9\x4a\xa9\xa9\x39\x4b\x54" - "\xc8\x0e\x24\x7f\x5e\x10\x7a\x45" - "\x10\x0b\x56\x85\xad\x54\xaa\x66" - "\xa8\x43\xcd\xd4\x9b\xb7\xfa", - .alen = 63, - .input = "\x32\xcb\x80\xcc\xde\x12\x33\x6d" + "\x38\x1d\x3b\x4a\xe9\x7e\x62\xaa", + .assoc = "\x32\xcb\x80\xcc\xde\x12\x33\x6d" "\xf0\x20\x58\x15\x95\xc6\x7f\xee" "\x2f\xf9\x4e\x2c\x1b\x98\x43\xc7" - "\x68\x28\x73\x40\x9f\x96\x4a\x60" - "\x80\xf4\x4b\xf4\xc1\x3d\xd0\x93" - "\xcf\x12\xc9\x59\x8f\x7a\x7f\xa8" - "\x1b\xa5\x50\xed\x87\xa9\x72\x59" - "\x9c\x44\xb2\xa4\x99\x98\x34", - .ilen = 63, - .result = "\x9a\x12\xbc\xdf\x72\xa8\x56\x22" - "\x49\x2d\x07\x92\xfc\x3d\x6d\x5f" - "\xef\x36\x19\xae\x91\xfa\xd6\x63" - "\x46\xea\x8a\x39\x14\x21\xa6\x37" - "\x18\xfc\x97\x3e\x16\xa5\x4d\x39" - "\x45\x2e\x69\xcc\x9c\x5f\xdf\x6d" - "\x5e\xa2\xbf\xac\x83\x32\x72\x52" - "\x58\x58\x23\x40\xfd\xa5\xc2\xe6" - "\xe9\x5a\x50\x98\x00\x58\xc9\x86" - "\x4f\x20\x37\xdb\x7b\x22\xa3", - .rlen = 79, + "\x68\x28\x73\x40\x9f\x96\x4a", + .alen = 31, + .ptext = "\x41\x94\x0e\x33\x22\xb1\xdd\xf4" + "\x10\x57\x85\x39\x93\x8f\xaf\x70" + "\xfa\xa9\xd0\x4d\x5c\x40\x23\xcd" + "\x98\x34\xab\x37\x56\xae\x32", + .plen = 31, + .ctext = "\xa0\xc8\xde\x83\x0d\xc3\x4e\xd5" + "\x69\x7f\x7a\xdd\x8c\x46\xda\xba" + "\x0a\x5c\x0e\x7f\xac\xee\x02\xd2" + "\xe5\x4b\x0a\xba\xb8\xa4\x7b\x66" + "\xde\xae\xdb\xc2\xc0\x0b\xf7\x2b" + "\xdf\xb8\xea\xd8\xa9\x38\xed", + .clen = 47, }, { - .key = "\x41\x94\x0e\x33\x22\xb1\xdd\xf4" - "\x10\x57\x85\x39\x93\x8f\xaf\x70", - .klen = 16, - .iv = "\x50\x5d\x9d\x9b\x66\x50\x88\x7b" - "\x30\x8e\xb1\x5e\x92\x58\xe0\xf1", - .assoc = "\x5f\x27\x2b\x03\xaa\xef\x32\x02" + .key = "\x50\x5d\x9d\x9b\x66\x50\x88\x7b" + "\x30\x8e\xb1\x5e\x92\x58\xe0\xf1" + "\xc5\x5a\x53\x6e\x9d\xe8\x04\xd4" + "\xc9\x3f\xe2\x2d\x0c\xc6\x1a\xcb", + .klen = 32, + .iv = "\x5f\x27\x2b\x03\xaa\xef\x32\x02" "\x50\xc4\xde\x82\x90\x21\x11\x73" "\x8f\x0a\xd6\x8f\xdf\x90\xe4\xda" - "\xf9\x4a\x1a\x23\xc3\xdd\x02\x81" - "\x0b\x76\x4f\xd7\x0a\x4b\x5e\x51" - "\xe3\x1d\xb9\xe5\x21\xb9\x8f\xd4" - "\x3d\x72\x3e\x26\x16\xa9\xca\x32" - "\x77\x47\x63\x14\x95\x3d\xe4\x34", - .alen = 64, - .input = "\x6e\xf0\xba\x6b\xee\x8e\xdc\x89" + "\xf9\x4a\x1a\x23\xc3\xdd\x02\x81", + .assoc = "\x6e\xf0\xba\x6b\xee\x8e\xdc\x89" "\x71\xfb\x0a\xa6\x8f\xea\x41\xf4" "\x5a\xbb\x59\xb0\x20\x38\xc5\xe0" - "\x29\x56\x52\x19\x79\xf5\xe9\x37" - "\x8f\xa1\x50\x23\x22\x4f\xe3\x91" - "\xe9\x21\x5e\xbf\x52\x23\x95\x37" - "\x48\x0c\x38\x8f\xf0\xff\x92\x24" - "\x6b\x47\x49\xe3\x94\x1f\x1e\x01", - .ilen = 64, - .result = "\xe6\xeb\x92\x5a\x5b\xf0\x2d\xbb" - "\x23\xec\x35\xe3\xae\xc9\xfb\x0b" - "\x90\x14\x46\xeb\xa8\x8d\xb0\x9b" - "\x39\xda\x8b\x48\xec\xb2\x00\x4e" - "\x80\x6f\x46\x4f\x9b\x1e\xbb\x35" - "\xea\x5a\xbc\xa2\x36\xa5\x89\x45" - "\xc2\xd6\xd7\x15\x0b\xf6\x6c\x56" - "\xec\x99\x7d\x61\xb3\x15\x93\xed" - "\x83\x1e\xd9\x48\x84\x0b\x37\xfe" - "\x95\x74\x44\xd5\x54\xa6\x27\x06", - .rlen = 80, + "\x29\x56\x52\x19\x79\xf5\xe9\x37", + .alen = 32, + .ptext = "\x7e\xb9\x48\xd3\x32\x2d\x86\x10" + "\x91\x31\x37\xcb\x8d\xb3\x72\x76" + "\x24\x6b\xdc\xd1\x61\xe0\xa5\xe7" + "\x5a\x61\x8a\x0f\x30\x0d\xd1\xec", + .plen = 32, + .ctext = "\xd3\x68\x14\x70\x3c\x01\x43\x86" + "\x02\xab\xbe\x75\xaa\xe7\xf5\x53" + "\x5c\x05\xbd\x9b\x19\xbb\x2a\x61" + "\x8f\x69\x05\x75\x8e\xca\x60\x0c" + "\x5b\xa2\x48\x61\x32\x74\x11\x2b" + "\xf6\xcf\x06\x78\x6f\x78\x1a\x4a", + .clen = 48, }, { - .key = "\x7e\xb9\x48\xd3\x32\x2d\x86\x10" - "\x91\x31\x37\xcb\x8d\xb3\x72\x76", - .klen = 16, - .iv = "\x8d\x82\xd6\x3b\x76\xcc\x30\x97" - "\xb1\x68\x63\xef\x8c\x7c\xa3\xf7", - .assoc = "\x9c\x4b\x65\xa2\xba\x6b\xdb\x1e" + .key = "\x8d\x82\xd6\x3b\x76\xcc\x30\x97" + "\xb1\x68\x63\xef\x8c\x7c\xa3\xf7" + "\xef\x1c\x5f\xf2\xa3\x88\x86\xed" + "\x8a\x6d\xc1\x05\xe7\x25\xb9\xa2", + .klen = 32, + .iv = "\x9c\x4b\x65\xa2\xba\x6b\xdb\x1e" "\xd1\x9e\x90\x13\x8a\x45\xd3\x79" "\xba\xcd\xe2\x13\xe4\x30\x66\xf4" - "\xba\x78\xf9\xfb\x9d\x3c\xa1\x58" - "\x1a\x22\x53\x05\x6b\x5c\x71\x4f" - "\xfd\x2d\x4d\x4c\xe5\x62\xa5\x63" - "\x6a\xda\x26\xc8\x7f\xff\xea\xfd" - "\x46\x4a\xfa\x53\x8f\xc4\xcd\x68" - "\x58", - .alen = 65, - .input = "\xab\x14\xf3\x0a\xfe\x0a\x85\xa5" + "\xba\x78\xf9\xfb\x9d\x3c\xa1\x58", + .assoc = "\xab\x14\xf3\x0a\xfe\x0a\x85\xa5" "\xf2\xd5\xbc\x38\x89\x0e\x04\xfb" "\x84\x7d\x65\x34\x25\xd8\x47\xfa" "\xeb\x83\x31\xf1\x54\x54\x89\x0d" - "\x9d\x4d\x54\x51\x84\x61\xf6\x8e" - "\x03\x31\xf2\x25\x16\xcc\xaa\xc6" - "\x75\x73\x20\x30\x59\x54\xb2\xf0" - "\x3a\x4b\xe0\x23\x8e\xa6\x08\x35" - "\x8a\xdf\x27\xa0\xe4\x60\x99\xae" - "\x8e\x43\xd9\x39\x7b\x10\x40\x67" - "\x5c\x7e\xc9\x70\x63\x34\xca\x59" - "\xfe\x86\xbc\xb7\x9c\x39\xf3\x6d" - "\x6a\x41\x64\x6f\x16\x7f\x65\x7e" - "\x89\x84\x68\xeb\xb0\x51\xbe\x55" - "\x33\x16\x59\x6c\x3b\xef\x88\xad" - "\x2f\xab\xbc\x25\x76\x87\x41\x2f" - "\x36", - .ilen = 129, - .result = "\x89\x24\x27\x86\xdc\xd7\x6b\xd9" - "\xd1\xcd\xdc\x16\xdd\x2c\xc1\xfb" - "\x52\xb5\xb3\xab\x50\x99\x3f\xa0" - "\x38\xa4\x74\xa5\x04\x15\x63\x05" - "\x8f\x54\x81\x06\x5a\x6b\xa4\x63" - "\x6d\xa7\x21\xcb\xff\x42\x30\x8e" - "\x3b\xd1\xca\x3f\x4b\x1a\xb8\xc3" - "\x42\x01\xe6\xbc\x75\x15\x87\xee" - "\xc9\x8e\x65\x01\xd9\xd8\xb5\x9f" - "\x48\x86\xa6\x5f\x2c\xc7\xb5\xb0" - "\xed\x5d\x14\x7c\x3f\x40\xb1\x0b" - "\x72\xef\x94\x8d\x7a\x85\x56\xe5" - "\x56\x08\x15\x56\xba\xaf\xbd\xf0" - "\x20\xef\xa0\xf6\xa9\xad\xa2\xc9" - "\x1c\x3b\x28\x51\x7e\x77\xb2\x18" - "\x4f\x61\x64\x37\x22\x36\x6d\x78" - "\xed\xed\x35\xe8\x83\xa5\xec\x25" - "\x6b\xff\x5f\x1a\x09\x96\x3d\xdc" - "\x20", - .rlen = 145, + "\x9d", + .alen = 33, + .ptext = "\xba\xde\x82\x72\x42\xa9\x2f\x2c" + "\x12\x0b\xe9\x5c\x87\xd7\x35\x7c" + "\x4f\x2e\xe8\x55\x66\x80\x27\x00" + "\x1b\x8f\x68\xe7\x0a\x6c\x71\xc3" + "\x21\x78\x55\x9d\x9c\x65\x7b\xcd" + "\x0a\x34\x97\xff\x47\x37\xb0\x2a" + "\x80\x0d\x19\x98\x33\xa9\x7a\xe3" + "\x2e\x4c\xc6\xf3\x8c\x88\x42\x01" + "\xbd", + .plen = 65, + .ctext = "\x07\x0a\x35\xb0\x82\x03\x5a\xd2" + "\x15\x3a\x6c\x72\x83\x9b\xb1\x75" + "\xea\xf2\xfc\xff\xc6\xf1\x13\xa4" + "\x1a\x93\x33\x79\x97\x82\x81\xc0" + "\x96\xc2\x00\xab\x39\xae\xa1\x62" + "\x53\xa3\x86\xc9\x07\x8c\xaf\x22" + "\x47\x31\x29\xca\x4a\x95\xf5\xd5" + "\x20\x63\x5a\x54\x80\x2c\x4a\x63" + "\xfb\x18\x73\x31\x4f\x08\x21\x5d" + "\x20\xe9\xc3\x7e\xea\x25\x77\x3a" + "\x65", + .clen = 81, }, { - .key = "\xba\xde\x82\x72\x42\xa9\x2f\x2c" - "\x12\x0b\xe9\x5c\x87\xd7\x35\x7c", - .klen = 16, - .iv = "\xc9\xa7\x10\xda\x86\x48\xd9\xb3" - "\x32\x42\x15\x80\x85\xa1\x65\xfe", - .assoc = "\xd8\x70\x9f\x42\xca\xe6\x83\x3a" + .key = "\xc9\xa7\x10\xda\x86\x48\xd9\xb3" + "\x32\x42\x15\x80\x85\xa1\x65\xfe" + "\x19\xde\x6b\x76\xa8\x28\x08\x07" + "\x4b\x9a\xa0\xdd\xc1\x84\x58\x79", + .klen = 32, + .iv = "\xd8\x70\x9f\x42\xca\xe6\x83\x3a" "\x52\x79\x42\xa5\x84\x6a\x96\x7f" "\xe4\x8f\xed\x97\xe9\xd0\xe8\x0d" - "\x7c\xa6\xd8\xd4\x77\x9b\x40\x2e" - "\x28\xce\x57\x34\xcd\x6e\x84\x4c" - "\x17\x3c\xe1\xb2\xa8\x0b\xbb\xf1" - "\x96\x41\x0d\x69\xe8\x54\x0a\xc8" - "\x15\x4e\x91\x92\x89\x4b\xb7\x9b" - "\x21\xf7\x42\x89\xac\x12\x2a\x54" - "\x69\xee\x18\xc7\x8d\xed\xe8\xfd" - "\xbb\x04\x28\xe6\x8a\x3c\x98\xc1" - "\x04\x2d\xa9\xa1\x24\x83\xff\xe9" - "\x55\x7a\xf0\xd1\xf6\x63\x05\xe1" - "\xd9\x1e\x75\x72\xc1\x9f\xae\x32" - "\xe1\x6b\xcd\x9e\x61\x19\x23\x86" - "\xd9\xd2\xaf\x8e\xd5\xd3\xa8\xa9" - "\x51", - .alen = 129, - .input = "\xe8\x39\x2d\xaa\x0e\x85\x2d\xc1" + "\x7c\xa6\xd8\xd4\x77\x9b\x40\x2e", + .assoc = "\xe8\x39\x2d\xaa\x0e\x85\x2d\xc1" "\x72\xaf\x6e\xc9\x82\x33\xc7\x01" "\xaf\x40\x70\xb8\x2a\x78\xc9\x14" "\xac\xb1\x10\xca\x2e\xb3\x28\xe4" @@ -24896,756 +20186,554 @@ static const struct aead_testvec morus1280_enc_tv_template[] = { "\xa2\xda\x07\xd1\xc2\xa9\xd1\xbb" "\x09\x4f\x77\x62\x88\x2d\xf2\x68" "\x54", - .ilen = 65, - .result = "\x36\x78\xb9\x22\xde\x62\x35\x55" - "\x1a\x7a\xf5\x45\xbc\xd7\x15\x82" - "\x01\xe9\x5a\x07\xea\x46\xaf\x91" - "\xcb\x73\xa5\xee\xe1\xb4\xbf\xc2" - "\xdb\xd2\x9d\x59\xde\xfc\x83\x00" - "\xf5\x46\xac\x97\xd5\x57\xa9\xb9" - "\x1f\x8c\xe8\xca\x68\x8b\x91\x0c" - "\x01\xbe\x0a\xaf\x7c\xf6\x67\xa4" - "\xbf\xbc\x88\x3f\x5d\xd1\xf9\x19" - "\x0f\x9d\xb2\xaf\xb9\x6e\x17\xdf" - "\xa2", - .rlen = 81, - }, { - .key = "\xf7\x02\xbb\x11\x52\x24\xd8\x48" - "\x93\xe6\x9b\xee\x81\xfc\xf7\x82", - .klen = 16, - .iv = "\x06\xcc\x4a\x79\x96\xc3\x82\xcf" - "\xb3\x1c\xc7\x12\x7f\xc5\x28\x04", - .assoc = "\x15\x95\xd8\xe1\xda\x62\x2c\x56" - "\xd3\x53\xf4\x36\x7e\x8e\x59\x85" - "\x0e\x51\xf9\x1c\xee\x70\x6a\x27" - "\x3d\xd3\xb7\xac\x51\xfa\xdf\x05", - .alen = 32, - .input = "\x24\x5e\x67\x49\x1e\x01\xd6\xdd" - "\xf3\x89\x20\x5b\x7c\x57\x89\x07" - "\xd9\x02\x7c\x3d\x2f\x18\x4b\x2d" - "\x6e\xde\xee\xa2\x08\x12\xc7\xba", - .ilen = 32, - .result = "\x08\x1b\x95\x0e\x41\x95\x02\x4b" - "\x9c\xbb\xa8\xd0\x7c\xd3\x44\x6e" - "\x89\x14\x33\x70\x0a\xbc\xea\x39" - "\x88\xaa\x2b\xd5\x73\x11\x55\xf5" - "\x33\x33\x9c\xd7\x42\x34\x49\x8e" - "\x2f\x03\x30\x05\x47\xaf\x34", - .rlen = 47, + .alen = 65, + .ptext = "\xf7\x02\xbb\x11\x52\x24\xd8\x48" + "\x93\xe6\x9b\xee\x81\xfc\xf7\x82" + "\x79\xf0\xf3\xd9\x6c\x20\xa9\x1a" + "\xdc\xbc\x47\xc0\xe4\xcb\x10\x99" + "\x2f", + .plen = 33, + .ctext = "\x33\xc1\xda\xfa\x15\x21\x07\x8e" + "\x93\x68\xea\x64\x7b\x3d\x4b\x6b" + "\x71\x5e\x5e\x6b\x92\xaa\x65\xc2" + "\x7a\x2a\xc1\xa9\x0a\xa1\x24\x81" + "\x26\x3a\x5a\x09\xe8\xce\x73\x72" + "\xde\x7b\x58\x9e\x85\xb9\xa4\x28" + "\xda", + .clen = 49, }, { - .key = "\x33\x27\xf5\xb1\x62\xa0\x80\x63" + .key = "\x06\xcc\x4a\x79\x96\xc3\x82\xcf" + "\xb3\x1c\xc7\x12\x7f\xc5\x28\x04" + "\x44\xa1\x76\xfb\xad\xc8\x8a\x21" + "\x0d\xc8\x7f\xb6\x9b\xe3\xf8\x4f", + .klen = 32, + .iv = "\x15\x95\xd8\xe1\xda\x62\x2c\x56" + "\xd3\x53\xf4\x36\x7e\x8e\x59\x85" + "\x0e\x51\xf9\x1c\xee\x70\x6a\x27" + "\x3d\xd3\xb7\xac\x51\xfa\xdf\x05", + .assoc = "\x24\x5e\x67\x49\x1e\x01\xd6\xdd" + "\xf3\x89\x20\x5b\x7c\x57\x89\x07", + .alen = 16, + .ptext = "\x33\x27\xf5\xb1\x62\xa0\x80\x63" "\x14\xc0\x4d\x7f\x7b\x20\xba\x89", - .klen = 16, - .iv = "\x42\xf0\x84\x19\xa6\x3f\x2b\xea" - "\x34\xf6\x79\xa3\x79\xe9\xeb\x0a", - .assoc = "\x51\xb9\x12\x80\xea\xde\xd5\x71" + .plen = 16, + .ctext = "\x3e\xf8\x86\x3d\x39\xf8\x96\x02" + "\x0f\xdf\xc9\x6e\x37\x1e\x57\x99" + "\x07\x2a\x1a\xac\xd1\xda\xfd\x3b" + "\xc7\xff\xbd\xbc\x85\x09\x0b", + .clen = 31, + }, { + .key = "\x42\xf0\x84\x19\xa6\x3f\x2b\xea" + "\x34\xf6\x79\xa3\x79\xe9\xeb\x0a" + "\x6e\x63\x82\x7f\xb2\x68\x0c\x3a" + "\xce\xf5\x5e\x8e\x75\x42\x97\x26", + .klen = 32, + .iv = "\x51\xb9\x12\x80\xea\xde\xd5\x71" "\x54\x2d\xa6\xc8\x78\xb2\x1b\x8c" "\x39\x14\x05\xa0\xf3\x10\xec\x41" "\xff\x01\x95\x84\x2b\x59\x7f\xdb", - .alen = 32, - .input = "\x61\x83\xa0\xe8\x2e\x7d\x7f\xf8" - "\x74\x63\xd2\xec\x76\x7c\x4c\x0d" - "\x03\xc4\x88\xc1\x35\xb8\xcd\x47" - "\x2f\x0c\xcd\x7a\xe2\x71\x66\x91", - .ilen = 32, - .result = "\x97\xca\xf4\xe0\x8d\x89\xbf\x68" - "\x0c\x60\xb9\x27\xdf\xaa\x41\xc6" - "\x25\xd8\xf7\x1f\x10\x15\x48\x61" - "\x4c\x95\x00\xdf\x51\x9b\x7f\xe6" - "\x24\x40\x9e\xbe\x3b\xeb\x1b\x98" - "\xb9\x9c\xe5\xef\xf2\x05", - .rlen = 46, - }, { - .key = "\x70\x4c\x2f\x50\x72\x1c\x29\x7f" + .assoc = "\x61\x83\xa0\xe8\x2e\x7d\x7f\xf8" + "\x74\x63\xd2\xec\x76\x7c\x4c\x0d", + .alen = 16, + .ptext = "\x70\x4c\x2f\x50\x72\x1c\x29\x7f" "\x95\x9a\xff\x10\x75\x45\x7d\x8f", - .klen = 16, - .iv = "\x7f\x15\xbd\xb8\xb6\xba\xd3\x06" - "\xb5\xd1\x2b\x35\x73\x0e\xad\x10", - .assoc = "\x8e\xde\x4c\x20\xfa\x59\x7e\x8d" + .plen = 16, + .ctext = "\x2f\xc4\xd8\x0d\xa6\x07\xef\x2e" + "\x6c\xd9\x84\x63\x70\x97\x61\x37" + "\x08\x2f\x16\x90\x9e\x62\x30\x0d" + "\x62\xd5\xc8\xf0\x46\x1a", + .clen = 30, + }, { + .key = "\x7f\x15\xbd\xb8\xb6\xba\xd3\x06" + "\xb5\xd1\x2b\x35\x73\x0e\xad\x10" + "\x98\x25\x8d\x03\xb7\x08\x8e\x54" + "\x90\x23\x3d\x67\x4f\xa1\x36\xfc", + .klen = 32, + .iv = "\x8e\xde\x4c\x20\xfa\x59\x7e\x8d" "\xd5\x07\x58\x59\x72\xd7\xde\x92" "\x63\xd6\x10\x24\xf8\xb0\x6e\x5a" "\xc0\x2e\x74\x5d\x06\xb8\x1e\xb2", - .alen = 32, - .input = "\x9d\xa7\xda\x88\x3e\xf8\x28\x14" - "\xf5\x3e\x85\x7d\x70\xa0\x0f\x13" - "\x2e\x86\x93\x45\x3a\x58\x4f\x61" - "\xf0\x3a\xac\x53\xbc\xd0\x06\x68", - .ilen = 32, - .result = "\x63\x4c\x2a\x8e\xb4\x6b\x63\x0d" - "\xb5\xec\x9b\x4e\x12\x23\xa3\xcf" - "\x1a\x5a\x70\x15\x5a\x10\x40\x51" - "\xca\x47\x4c\x9d\xc9\x97\xf4\x77" - "\xdb\xc8\x10\x2d\xdc\x65\x20\x3f", - .rlen = 40, - }, { - .key = "\xac\x70\x69\xef\x82\x97\xd2\x9b" + .assoc = "\x9d\xa7\xda\x88\x3e\xf8\x28\x14" + "\xf5\x3e\x85\x7d\x70\xa0\x0f\x13", + .alen = 16, + .ptext = "\xac\x70\x69\xef\x82\x97\xd2\x9b" "\x15\x74\xb1\xa2\x6f\x69\x3f\x95", + .plen = 16, + .ctext = "\xce\xf3\x17\x87\x49\xc2\x00\x46" + "\xc6\x12\x5c\x8f\x81\x38\xaa\x55" + "\xf8\x67\x75\xf1\x75\xe3\x2a\x24", + .clen = 24, + }, +}; + +/* + * MORUS-640 test vectors - generated via reference implementation from + * SUPERCOP (https://bench.cr.yp.to/supercop.html): + * + * https://bench.cr.yp.to/supercop/supercop-20170228.tar.xz + * (see crypto_aead/morus640128v2/) + */ +static const struct aead_testvec morus640_tv_template[] = { + { + .key = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x00", .klen = 16, - .iv = "\xbb\x3a\xf7\x57\xc6\x36\x7c\x22" - "\x36\xab\xde\xc6\x6d\x32\x70\x17", - .assoc = "\xcb\x03\x85\xbf\x0a\xd5\x26\xa9" - "\x56\xe1\x0a\xeb\x6c\xfb\xa1\x98" - "\x8d\x98\x1c\xa8\xfe\x50\xf0\x74" - "\x81\x5c\x53\x35\xe0\x17\xbd\x88", - .alen = 32, - .input = "\xda\xcc\x14\x27\x4e\x74\xd1\x30" - "\x76\x18\x37\x0f\x6a\xc4\xd1\x1a" - "\x58\x49\x9f\xc9\x3f\xf8\xd1\x7a" - "\xb2\x67\x8b\x2b\x96\x2f\xa5\x3e", - .ilen = 32, - .result = "\xf1\x62\x44\xc7\x5f\x19\xca\x43" - "\x47\x2c\xaf\x68\x82\xbd\x51\xef" - "\x3d\x65\xd8\x45\x2d\x06\x07\x78" - "\x08\x2e\xb3\x23\xcd\x81\x12\x55" - "\x1a", - .rlen = 33, - }, { - .key = "\xe9\x95\xa2\x8f\x93\x13\x7b\xb7" - "\x96\x4e\x63\x33\x69\x8d\x02\x9b" - "\x23\xf9\x22\xeb\x80\xa0\xb1\x81" - "\xe2\x73\xc3\x21\x4d\x47\x8d\xf4", - .klen = 32, - .iv = "\xf8\x5e\x31\xf7\xd7\xb2\x25\x3e" - "\xb7\x85\x90\x58\x67\x57\x33\x1d", - .assoc = "", - .alen = 0, - .input = "", - .ilen = 0, - .result = "\xdf\x2f\x83\xc0\x45\x4a\x2c\xcf" - "\xb9\xd2\x41\xf6\x80\xa1\x52\x70", - .rlen = 16, - }, { - .key = "\x25\xba\xdc\x2e\xa3\x8f\x24\xd3" - "\x17\x29\x15\xc5\x63\xb2\xc5\xa1" - "\x4d\xbc\x2d\x6f\x85\x40\x33\x9a" - "\xa3\xa0\xa1\xfa\x27\xa6\x2c\xca", - .klen = 32, - .iv = "\x34\x83\x6a\x96\xe7\x2d\xce\x5a" - "\x38\x5f\x42\xe9\x61\x7b\xf5\x23", + .iv = "\x0f\xc9\x8e\x67\x44\x9e\xaa\x86" + "\x20\x36\x2c\x24\xfe\xc9\x30\x81", .assoc = "", .alen = 0, - .input = "\x53", - .ilen = 1, - .result = "\x01\xd8\x55\x3c\xc0\x5a\x4b\xc7" - "\x01\xf4\x08\xe3\x0d\xf7\xf0\x78" - "\x53", - .rlen = 17, + .ptext = "", + .plen = 0, + .ctext = "\x89\x62\x7d\xf3\x07\x9d\x52\x05" + "\x53\xc3\x04\x60\x93\xb4\x37\x9a", + .clen = 16, }, { - .key = "\x62\xdf\x16\xcd\xb3\x0a\xcc\xef" - "\x98\x03\xc7\x56\x5d\xd6\x87\xa8" - "\x77\x7e\x39\xf3\x8a\xe0\xb5\xb4" - "\x65\xce\x80\xd2\x01\x05\xcb\xa1", - .klen = 32, - .iv = "\x71\xa8\xa4\x35\xf7\xa9\x76\x75" - "\xb8\x39\xf4\x7a\x5b\x9f\xb8\x29", + .key = "\x3c\x24\x39\x9f\x10\x7b\xa8\x1b" + "\x80\xda\xb2\x91\xf9\x24\xc2\x06", + .klen = 16, + .iv = "\x4b\xed\xc8\x07\x54\x1a\x52\xa2" + "\xa1\x10\xde\xb5\xf8\xed\xf3\x87", .assoc = "", .alen = 0, - .input = "\x8f\x3a\xc1\x05\x7f\xe7\xcb\x83" - "\xf9\xa6\x4d\xc3\x58\x31\x19\x2c" - "\xd7\x90\xc2\x56\x4e\xd8\x57\xc7" - "\xf6\xf0\x27\xb4\x25\x4c\x83", - .ilen = 31, - .result = "\xc2\x4b\x41\x0f\x2d\xb9\x62\x07" - "\xff\x8e\x74\xf8\xa1\xa6\xd5\x37" - "\xa5\x64\x31\x5c\xca\x73\x9b\x43" - "\xe6\x70\x63\x46\x95\xcb\xf7\xb5" - "\x20\x8c\x75\x7a\x2a\x17\x2f\xa9" - "\xb8\x4d\x11\x42\xd1\xf8\xf1", - .rlen = 47, + .ptext = "\x69", + .plen = 1, + .ctext = "\xa8\x8d\xe4\x90\xb5\x50\x8f\x78" + "\xb6\x10\x9a\x59\x5f\x61\x37\x70" + "\x09", + .clen = 17, }, { - .key = "\x9e\x03\x4f\x6d\xc3\x86\x75\x0a" - "\x19\xdd\x79\xe8\x57\xfb\x4a\xae" - "\xa2\x40\x45\x77\x90\x80\x37\xce" - "\x26\xfb\x5f\xaa\xdb\x64\x6b\x77", - .klen = 32, - .iv = "\xae\xcc\xde\xd5\x07\x25\x1f\x91" - "\x39\x14\xa6\x0c\x55\xc4\x7b\x30", + .key = "\x79\x49\x73\x3e\x20\xf7\x51\x37" + "\x01\xb4\x64\x22\xf3\x48\x85\x0c", + .klen = 16, + .iv = "\x88\x12\x01\xa6\x64\x96\xfb\xbe" + "\x22\xea\x90\x47\xf2\x11\xb5\x8e", .assoc = "", .alen = 0, - .input = "\xcc\x5f\xfb\xa4\x8f\x63\x74\x9f" - "\x7a\x81\xff\x55\x52\x56\xdc\x33" - "\x01\x52\xcd\xdb\x53\x78\xd9\xe1" - "\xb7\x1d\x06\x8d\xff\xab\x22\x98", - .ilen = 32, - .result = "\xbb\x01\x7c\xd1\x2c\x33\x7b\x37" - "\x0a\xee\xc4\x30\x19\xd7\x3a\x6f" - "\xf8\x2b\x67\xf5\x3b\x84\x87\x2a" - "\xfb\x07\x7a\x82\xb5\xe4\x85\x26" - "\x1e\xa8\xe5\x04\x54\xce\xe5\x5f" - "\xb5\x3f\xc1\xd5\x7f\xbd\xd2\xa6", - .rlen = 48, + .ptext = "\xa6\xa4\x1e\x76\xec\xd4\x50\xcc" + "\x62\x58\xe9\x8f\xef\xa4\x17", + .plen = 15, + .ctext = "\x76\xdd\xb9\x05\x3d\xce\x61\x38" + "\xf3\xef\xf7\xe5\xd7\xfd\x70\xa5" + "\xcf\x9d\x64\xb8\x0a\x9f\xfd\x8b" + "\xd4\x6e\xfe\xd9\xc8\x63\x4b", + .clen = 31, }, { - .key = "\xdb\x28\x89\x0c\xd3\x01\x1e\x26" - "\x9a\xb7\x2b\x79\x51\x1f\x0d\xb4" - "\xcc\x03\x50\xfc\x95\x20\xb9\xe7" - "\xe8\x29\x3e\x83\xb5\xc3\x0a\x4e", - .klen = 32, - .iv = "\xea\xf1\x18\x74\x17\xa0\xc8\xad" - "\xba\xee\x58\x9d\x4f\xe8\x3d\x36", + .key = "\xb5\x6e\xad\xdd\x30\x72\xfa\x53" + "\x82\x8e\x16\xb4\xed\x6d\x47\x12", + .klen = 16, + .iv = "\xc4\x37\x3b\x45\x74\x11\xa4\xda" + "\xa2\xc5\x42\xd8\xec\x36\x78\x94", .assoc = "", .alen = 0, - .input = "\x08\x84\x34\x44\x9f\xde\x1c\xbb" - "\xfb\x5b\xb1\xe6\x4c\x7a\x9f\x39" - "\x2c\x14\xd9\x5f\x59\x18\x5b\xfb" - "\x79\x4b\xe5\x65\xd9\x0a\xc1\x6f" - "\x2e", - .ilen = 33, - .result = "\xc2\xf4\x40\x55\xf9\x59\xff\x73" - "\x08\xf5\x98\x92\x0c\x7b\x35\x9a" - "\xa8\xf4\x42\x7e\x6f\x93\xca\x22" - "\x23\x06\x1e\xf8\x89\x22\xf4\x46" - "\x7c\x7c\x67\x75\xab\xe5\x75\xaa" - "\x15\xd7\x83\x19\xfd\x31\x59\x5b" - "\x32", - .rlen = 49, + .ptext = "\xe2\xc9\x58\x15\xfc\x4f\xf8\xe8" + "\xe3\x32\x9b\x21\xe9\xc8\xd9\x97", + .plen = 16, + .ctext = "\xdc\x72\xe8\x14\xfb\x63\xad\x72" + "\x1f\x57\x9a\x1f\x88\x81\xdb\xd6" + "\xc1\x91\x9d\xb9\x25\xc4\x99\x4c" + "\x97\xcd\x8a\x0c\x9d\x68\x00\x1c", + .clen = 32, }, { - .key = "\x17\x4d\xc3\xab\xe3\x7d\xc7\x42" - "\x1b\x91\xdd\x0a\x4b\x43\xcf\xba" - "\xf6\xc5\x5c\x80\x9a\xc0\x3b\x01" - "\xa9\x56\x1d\x5b\x8f\x22\xa9\x25", - .klen = 32, - .iv = "\x27\x16\x51\x13\x27\x1c\x71\xc9" - "\x3b\xc8\x0a\x2f\x49\x0c\x00\x3c", + .key = "\xf2\x92\xe6\x7d\x40\xee\xa3\x6f" + "\x03\x68\xc8\x45\xe7\x91\x0a\x18", + .klen = 16, + .iv = "\x01\x5c\x75\xe5\x84\x8d\x4d\xf6" + "\x23\x9f\xf4\x6a\xe6\x5a\x3b\x9a", .assoc = "", .alen = 0, - .input = "\x45\xa8\x6e\xe3\xaf\x5a\xc5\xd7" - "\x7c\x35\x63\x77\x46\x9f\x61\x3f" - "\x56\xd7\xe4\xe3\x5e\xb8\xdc\x14" - "\x3a\x79\xc4\x3e\xb3\x69\x61\x46" - "\x3c\xb6\x83\x4e\xb4\x26\xc7\x73" - "\x22\xda\x52\x8b\x7d\x11\x98\xea" - "\x62\xe1\x14\x1e\xdc\xfe\x0f\xad" - "\x20\x76\x5a\xdc\x4e\x71\x13", - .ilen = 63, - .result = "\xc9\x82\x3b\x4b\x87\x84\xa5\xdb" - "\xa0\x8c\xd3\x3e\x7f\x8d\xe8\x28" - "\x2a\xdc\xfa\x01\x84\x87\x9a\x70" - "\x81\x75\x37\x0a\xd2\x75\xa9\xb6" - "\x21\x72\xee\x7e\x65\x95\xe5\xcc" - "\x01\xb7\x39\xa6\x51\x15\xca\xff" - "\x61\xdc\x97\x38\xcc\xf4\xca\xc7" - "\x83\x9b\x05\x11\x72\x60\xf0\xb4" - "\x7e\x06\xab\x0a\xc0\xbb\x59\x23" - "\xaa\x2d\xfc\x4e\x35\x05\x59", - .rlen = 79, + .ptext = "\x1f\xee\x92\xb4\x0c\xcb\xa1\x04" + "\x64\x0c\x4d\xb2\xe3\xec\x9c\x9d" + "\x09", + .plen = 17, + .ctext = "\x6b\x4f\x3b\x90\x9a\xa2\xb3\x82" + "\x0a\xb8\x55\xee\xeb\x73\x4d\x7f" + "\x54\x11\x3a\x8a\x31\xa3\xb5\xf2" + "\xcd\x49\xdb\xf3\xee\x26\xbd\xa2" + "\x0d", + .clen = 33, }, { - .key = "\x54\x71\xfd\x4b\xf3\xf9\x6f\x5e" - "\x9c\x6c\x8f\x9c\x45\x68\x92\xc1" - "\x21\x87\x67\x04\x9f\x60\xbd\x1b" - "\x6a\x84\xfc\x34\x6a\x81\x48\xfb", - .klen = 32, - .iv = "\x63\x3b\x8b\xb3\x37\x98\x1a\xe5" - "\xbc\xa2\xbc\xc0\x43\x31\xc2\x42", + .key = "\x2e\xb7\x20\x1c\x50\x6a\x4b\x8b" + "\x84\x42\x7a\xd7\xe1\xb5\xcd\x1f", + .klen = 16, + .iv = "\x3d\x80\xae\x84\x94\x09\xf6\x12" + "\xa4\x79\xa6\xfb\xe0\x7f\xfd\xa0", .assoc = "", .alen = 0, - .input = "\x81\xcd\xa8\x82\xbf\xd6\x6e\xf3" - "\xfd\x0f\x15\x09\x40\xc3\x24\x45" - "\x81\x99\xf0\x67\x63\x58\x5e\x2e" - "\xfb\xa6\xa3\x16\x8d\xc8\x00\x1c" - "\x4b\x62\x87\x7c\x15\x38\xda\x70" - "\x3d\xea\xe7\xf2\x40\xba\xae\x79" - "\x8f\x48\xfc\xbf\x45\x53\x2e\x78" - "\xef\x79\xf0\x1b\x49\xf7\xfd\x9c", - .ilen = 64, - .result = "\x11\x7c\x7d\xef\xce\x29\x95\xec" - "\x7e\x9f\x42\xa6\x26\x07\xa1\x75" - "\x2f\x4e\x09\x9a\xf6\x6b\xc2\xfa" - "\x0d\xd0\x17\xdc\x25\x1e\x9b\xdc" - "\x5f\x8c\x1c\x60\x15\x4f\x9b\x20" - "\x7b\xff\xcd\x82\x60\x84\xf4\xa5" - "\x20\x9a\x05\x19\x5b\x02\x0a\x72" - "\x43\x11\x26\x58\xcf\xc5\x41\xcf" - "\x13\xcc\xde\x32\x92\xfa\x86\xf2" - "\xaf\x16\xe8\x8f\xca\xb6\xfd\x54", - .rlen = 80, + .ptext = "\x5c\x13\xcb\x54\x1c\x47\x4a\x1f" + "\xe5\xe6\xff\x44\xdd\x11\x5f\xa3" + "\x33\xdd\xc2\xf8\xdd\x18\x2b\x93" + "\x57\x05\x01\x1c\x66\x22\xd3", + .plen = 31, + .ctext = "\x59\xd1\x0f\x6b\xee\x27\x84\x92" + "\xb7\xa9\xb5\xdd\x02\xa4\x12\xa5" + "\x50\x32\xb4\x9a\x2e\x35\x83\x55" + "\x36\x12\x12\xed\xa3\x31\xc5\x30" + "\xa7\xe2\x4a\x6d\x05\x59\x43\x91" + "\x75\xfa\x6c\x17\xc6\x73\xca", + .clen = 47, }, { - .key = "\x90\x96\x36\xea\x03\x74\x18\x7a" - "\x1d\x46\x42\x2d\x3f\x8c\x54\xc7" - "\x4b\x4a\x73\x89\xa4\x00\x3f\x34" - "\x2c\xb1\xdb\x0c\x44\xe0\xe8\xd2", - .klen = 32, - .iv = "\xa0\x5f\xc5\x52\x47\x13\xc2\x01" - "\x3d\x7c\x6e\x52\x3d\x55\x85\x48", - .assoc = "\xaf", + .key = "\x6b\xdc\x5a\xbb\x60\xe5\xf4\xa6" + "\x05\x1d\x2c\x68\xdb\xda\x8f\x25", + .klen = 16, + .iv = "\x7a\xa5\xe8\x23\xa4\x84\x9e\x2d" + "\x25\x53\x58\x8c\xda\xa3\xc0\xa6", + .assoc = "", + .alen = 0, + .ptext = "\x98\x37\x05\xf3\x2c\xc2\xf3\x3b" + "\x66\xc0\xb1\xd5\xd7\x35\x21\xaa" + "\x5d\x9f\xce\x7c\xe2\xb8\xad\xad" + "\x19\x33\xe0\xf4\x40\x81\x72\x28", + .plen = 32, + .ctext = "\xdb\x49\x68\x0f\x91\x5b\x21\xb1" + "\xcf\x50\xb2\x4c\x32\xe1\xa6\x69" + "\xc0\xfb\x44\x1f\xa0\x9a\xeb\x39" + "\x1b\xde\x68\x38\xcc\x27\x52\xc5" + "\xf6\x3e\x74\xea\x66\x5b\x5f\x0c" + "\x65\x9e\x58\xe6\x52\xa2\xfe\x59", + .clen = 48, + }, { + .key = "\xa7\x00\x93\x5b\x70\x61\x9d\xc2" + "\x86\xf7\xde\xfa\xd5\xfe\x52\x2b", + .klen = 16, + .iv = "\xb6\xca\x22\xc3\xb4\x00\x47\x49" + "\xa6\x2d\x0a\x1e\xd4\xc7\x83\xad", + .assoc = "\xc5", .alen = 1, - .input = "", - .ilen = 0, - .result = "\x9b\xc5\x3b\x20\x0a\x88\x56\xbe" - "\x69\xdf\xc4\xc4\x02\x46\x3a\xf0", - .rlen = 16, + .ptext = "", + .plen = 0, + .ctext = "\x56\xe7\x24\x52\xdd\x95\x60\x5b" + "\x09\x48\x39\x69\x9c\xb3\x62\x46", + .clen = 16, }, { - .key = "\xcd\xbb\x70\x89\x13\xf0\xc1\x95" - "\x9e\x20\xf4\xbf\x39\xb1\x17\xcd" - "\x76\x0c\x7f\x0d\xa9\xa0\xc1\x4e" - "\xed\xdf\xb9\xe4\x1e\x3f\x87\xa8", - .klen = 32, - .iv = "\xdc\x84\xfe\xf1\x58\x8f\x6b\x1c" - "\xbe\x57\x20\xe3\x37\x7a\x48\x4f", - .assoc = "\xeb\x4d\x8d\x59\x9c\x2e\x15\xa3" - "\xde\x8d\x4d\x07\x36\x43\x78\xd0" - "\x0b\x6d\x84\x4f\x2c\xf0\x82\x5b" - "\x4e\xf6\x29\xd1\x8b\x6f\x56", + .key = "\xe4\x25\xcd\xfa\x80\xdd\x46\xde" + "\x07\xd1\x90\x8b\xcf\x23\x15\x31", + .klen = 16, + .iv = "\xf3\xee\x5c\x62\xc4\x7c\xf0\x65" + "\x27\x08\xbd\xaf\xce\xec\x45\xb3", + .assoc = "\x02\xb8\xea\xca\x09\x1b\x9a\xec" + "\x47\x3e\xe9\xd4\xcc\xb5\x76", + .alen = 15, + .ptext = "", + .plen = 0, + .ctext = "\xdd\xfa\x6c\x1f\x5d\x86\x87\x01" + "\x13\xe5\x73\x46\x46\xf2\x5c\xe1", + .clen = 16, + }, { + .key = "\x20\x4a\x07\x99\x91\x58\xee\xfa" + "\x88\xab\x42\x1c\xc9\x47\xd7\x38", + .klen = 16, + .iv = "\x2f\x13\x95\x01\xd5\xf7\x99\x81" + "\xa8\xe2\x6f\x41\xc8\x10\x08\xb9", + .assoc = "\x3f\xdc\x24\x69\x19\x96\x43\x08" + "\xc8\x18\x9b\x65\xc6\xd9\x39\x3b", + .alen = 16, + .ptext = "", + .plen = 0, + .ctext = "\xa6\x1b\xb9\xd7\x5e\x3c\xcf\xac" + "\xa9\x21\x45\x0b\x16\x52\xf7\xe1", + .clen = 16, + }, { + .key = "\x5d\x6f\x41\x39\xa1\xd4\x97\x16" + "\x09\x85\xf4\xae\xc3\x6b\x9a\x3e", + .klen = 16, + .iv = "\x6c\x38\xcf\xa1\xe5\x73\x41\x9d" + "\x29\xbc\x21\xd2\xc2\x35\xcb\xbf", + .assoc = "\x7b\x01\x5d\x08\x29\x12\xec\x24" + "\x49\xf3\x4d\xf7\xc0\xfe\xfb\x41" + "\x3c", + .alen = 17, + .ptext = "", + .plen = 0, + .ctext = "\x15\xff\xde\x3b\x34\xfc\xf6\xf9" + "\xbb\xa8\x62\xad\x0a\xf5\x48\x60", + .clen = 16, + }, { + .key = "\x99\x93\x7a\xd8\xb1\x50\x40\x31" + "\x8a\x60\xa6\x3f\xbd\x90\x5d\x44", + .klen = 16, + .iv = "\xa8\x5c\x09\x40\xf5\xef\xea\xb8" + "\xaa\x96\xd3\x64\xbc\x59\x8d\xc6", + .assoc = "\xb8\x26\x97\xa8\x39\x8e\x94\x3f" + "\xca\xcd\xff\x88\xba\x22\xbe\x47" + "\x67\xba\x85\xf1\xbb\x30\x56\x26" + "\xaf\x0b\x02\x38\xcc\x44\xa7", .alen = 31, - .input = "", - .ilen = 0, - .result = "\xe0\x6d\xa1\x07\x98\x2f\x40\x2d" - "\x2e\x9a\xd6\x61\x43\xc0\x74\x69", - .rlen = 16, + .ptext = "", + .plen = 0, + .ctext = "\xd2\x9d\xf8\x3b\xd7\x84\xe9\x2d" + "\x4b\xef\x75\x16\x0a\x99\xae\x6b", + .clen = 16, }, { - .key = "\x0a\xe0\xaa\x29\x24\x6c\x6a\xb1" - "\x1f\xfa\xa6\x50\x33\xd5\xda\xd3" - "\xa0\xce\x8a\x91\xae\x40\x43\x68" - "\xae\x0d\x98\xbd\xf8\x9e\x26\x7f", - .klen = 32, - .iv = "\x19\xa9\x38\x91\x68\x0b\x14\x38" - "\x3f\x31\xd2\x74\x31\x9e\x0a\x55", - .assoc = "\x28\x72\xc7\xf8\xac\xaa\xbe\xbf" - "\x5f\x67\xff\x99\x30\x67\x3b\xd6" - "\x35\x2f\x90\xd3\x31\x90\x04\x74" - "\x0f\x23\x08\xa9\x65\xce\xf6\xea", + .key = "\xd6\xb8\xb4\x77\xc1\xcb\xe9\x4d" + "\x0a\x3a\x58\xd1\xb7\xb4\x1f\x4a", + .klen = 16, + .iv = "\xe5\x81\x42\xdf\x05\x6a\x93\xd4" + "\x2b\x70\x85\xf5\xb6\x7d\x50\xcc", + .assoc = "\xf4\x4a\xd1\x47\x49\x09\x3d\x5b" + "\x4b\xa7\xb1\x19\xb4\x46\x81\x4d" + "\x91\x7c\x91\x75\xc0\xd0\xd8\x40" + "\x71\x39\xe1\x10\xa6\xa3\x46\x7a", .alen = 32, - .input = "", - .ilen = 0, - .result = "\xb9\x57\x13\x3e\x82\x31\x61\x65" - "\x0d\x7f\x6c\x96\x93\x5c\x50\xe2", - .rlen = 16, + .ptext = "", + .plen = 0, + .ctext = "\xe4\x8d\xa7\xa7\x45\xc1\x31\x4f" + "\xce\xfb\xaf\xd6\xc2\xe6\xee\xc0", + .clen = 16, }, { - .key = "\x46\x04\xe3\xc8\x34\xe7\x12\xcd" - "\xa0\xd4\x58\xe2\x2d\xf9\x9c\xda" - "\xca\x91\x96\x15\xb4\xe0\xc5\x81" - "\x70\x3a\x77\x95\xd2\xfd\xc5\x55", - .klen = 32, - .iv = "\x55\xcd\x72\x30\x78\x86\xbd\x54" - "\xc0\x0b\x84\x06\x2b\xc2\xcd\x5b", - .assoc = "\x64\x97\x00\x98\xbc\x25\x67\xdb" - "\xe0\x41\xb1\x2a\x2a\x8c\xfe\xdd" - "\x5f\xf2\x9c\x58\x36\x30\x86\x8e" - "\xd1\x51\xe6\x81\x3f\x2d\x95\xc1" - "\x01", - .alen = 33, - .input = "", - .ilen = 0, - .result = "\x81\x96\x34\xde\xbb\x36\xdd\x3e" - "\x4e\x5e\xcb\x44\x21\xb8\x3f\xf1", - .rlen = 16, + .key = "\x12\xdd\xee\x17\xd1\x47\x92\x69" + "\x8b\x14\x0a\x62\xb1\xd9\xe2\x50", + .klen = 16, + .iv = "\x22\xa6\x7c\x7f\x15\xe6\x3c\xf0" + "\xac\x4b\x37\x86\xb0\xa2\x13\xd2", + .assoc = "\x31", + .alen = 1, + .ptext = "\x40", + .plen = 1, + .ctext = "\xe2\x67\x38\x4f\xb9\xad\x7d\x38" + "\x01\xfe\x84\x14\x85\xf8\xd1\xe3" + "\x22", + .clen = 17, }, { - .key = "\x83\x29\x1d\x67\x44\x63\xbb\xe9" - "\x20\xaf\x0a\x73\x27\x1e\x5f\xe0" - "\xf5\x53\xa1\x9a\xb9\x80\x47\x9b" - "\x31\x68\x56\x6e\xac\x5c\x65\x2c", - .klen = 32, - .iv = "\x92\xf2\xac\xcf\x88\x02\x65\x70" - "\x41\xe5\x36\x97\x25\xe7\x90\x61", - .assoc = "\xa1\xbb\x3a\x37\xcc\xa1\x10\xf7" - "\x61\x1c\x63\xbc\x24\xb0\xc0\xe3" - "\x8a\xb4\xa7\xdc\x3b\xd0\x08\xa8" - "\x92\x7f\xc5\x5a\x19\x8c\x34\x97" - "\x0f\x95\x9b\x18\xe4\x8d\xb4\x24" - "\xb9\x33\x28\x18\xe1\x9d\x14\xe0" - "\x64\xb2\x89\x7d\x78\xa8\x05\x7e" - "\x07\x8c\xfc\x88\x2d\xb8\x53", - .alen = 63, - .input = "", - .ilen = 0, - .result = "\x2e\x99\xb6\x79\x57\x56\x80\x36" - "\x8e\xc4\x1c\x12\x7d\x71\x36\x0c", - .rlen = 16, + .key = "\x4f\x01\x27\xb6\xe1\xc3\x3a\x85" + "\x0c\xee\xbc\xf4\xab\xfd\xa5\x57", + .klen = 16, + .iv = "\x5e\xcb\xb6\x1e\x25\x62\xe4\x0c" + "\x2d\x25\xe9\x18\xaa\xc6\xd5\xd8", + .assoc = "\x6d\x94\x44\x86\x69\x00\x8f\x93" + "\x4d\x5b\x15\x3c\xa8\x8f\x06", + .alen = 15, + .ptext = "\x7c\x5d\xd3\xee\xad\x9f\x39\x1a" + "\x6d\x92\x42\x61\xa7\x58\x37", + .plen = 15, + .ctext = "\x77\x32\x61\xeb\xb4\x33\x29\x92" + "\x29\x95\xc5\x8e\x85\x76\xab\xfc" + "\x07\x95\xa7\x44\x74\xf7\x22\xff" + "\xd8\xd8\x36\x3d\x8a\x7f\x9e", + .clen = 31, }, { - .key = "\xbf\x4e\x57\x07\x54\xdf\x64\x05" - "\xa1\x89\xbc\x04\x21\x42\x22\xe6" - "\x1f\x15\xad\x1e\xbe\x20\xc9\xb4" - "\xf3\x95\x35\x46\x86\xbb\x04\x03", - .klen = 32, - .iv = "\xce\x17\xe5\x6f\x98\x7e\x0e\x8c" - "\xc2\xbf\xe8\x29\x1f\x0b\x52\x68", - .assoc = "\xdd\xe0\x74\xd6\xdc\x1d\xb8\x13" - "\xe2\xf6\x15\x4d\x1e\xd4\x83\xe9" - "\xb4\x76\xb3\x60\x40\x70\x8a\xc1" - "\x53\xac\xa4\x32\xf3\xeb\xd3\x6e" - "\x1e\x42\xa0\x46\x45\x9f\xc7\x22" - "\xd3\x43\xbc\x7e\xa5\x47\x2a\x6f" - "\x91\x19\x70\x1e\xe1\xfe\x25\x49" - "\xd6\x8f\x93\xc7\x28\x3f\x3d\x03", - .alen = 64, - .input = "", - .ilen = 0, - .result = "\x7b\x25\x3d\x47\xd4\xa7\x08\xce" - "\x3b\x89\x40\x36\xba\x6d\x0e\xa2", - .rlen = 16, + .key = "\x8b\x26\x61\x55\xf1\x3e\xe3\xa1" + "\x8d\xc8\x6e\x85\xa5\x21\x67\x5d", + .klen = 16, + .iv = "\x9b\xef\xf0\xbd\x35\xdd\x8d\x28" + "\xad\xff\x9b\xa9\xa4\xeb\x98\xdf", + .assoc = "\xaa\xb8\x7e\x25\x79\x7c\x37\xaf" + "\xce\x36\xc7\xce\xa2\xb4\xc9\x60", + .alen = 16, + .ptext = "\xb9\x82\x0c\x8d\xbd\x1b\xe2\x36" + "\xee\x6c\xf4\xf2\xa1\x7d\xf9\xe2", + .plen = 16, + .ctext = "\xd8\xfd\x44\x45\xf6\x42\x12\x38" + "\xf2\x0b\xea\x4f\x9e\x11\x61\x07" + "\x48\x67\x98\x18\x9b\xd0\x0c\x59" + "\x67\xa4\x11\xb3\x2b\xd6\xc1\x70", + .clen = 32, }, { - .key = "\xfc\x72\x90\xa6\x64\x5a\x0d\x21" - "\x22\x63\x6e\x96\x1b\x67\xe4\xec" - "\x49\xd7\xb9\xa2\xc3\xc0\x4b\xce" - "\xb4\xc3\x14\x1e\x61\x1a\xa3\xd9", - .klen = 32, - .iv = "\x0b\x3c\x1f\x0e\xa8\xf9\xb7\xa7" - "\x42\x9a\x9a\xba\x19\x30\x15\x6e", - .assoc = "\x1a", - .alen = 1, - .input = "\x29", - .ilen = 1, - .result = "\xe6\x09\x6f\x95\x9a\x18\xc8\xf6" - "\x17\x75\x81\x16\xdf\x26\xff\x67" - "\x92", - .rlen = 17, + .key = "\xc8\x4b\x9b\xf5\x01\xba\x8c\xbd" + "\x0e\xa3\x21\x16\x9f\x46\x2a\x63", + .klen = 16, + .iv = "\xd7\x14\x29\x5d\x45\x59\x36\x44" + "\x2e\xd9\x4d\x3b\x9e\x0f\x5b\xe5", + .assoc = "\xe6\xdd\xb8\xc4\x89\xf8\xe0\xca" + "\x4f\x10\x7a\x5f\x9c\xd8\x8b\x66" + "\x3b", + .alen = 17, + .ptext = "\xf5\xa6\x46\x2c\xce\x97\x8a\x51" + "\x6f\x46\xa6\x83\x9b\xa1\xbc\xe8" + "\x05", + .plen = 17, + .ctext = "\xb1\xab\x53\x4e\xc7\x40\x16\xb6" + "\x71\x3a\x00\x9f\x41\x88\xb0\xb2" + "\x71\x83\x85\x5f\xc8\x79\x0a\x99" + "\x99\xdc\x89\x1c\x88\xd2\x3e\xf9" + "\x83", + .clen = 33, }, { - .key = "\x38\x97\xca\x45\x74\xd6\xb6\x3c" - "\xa3\x3d\x20\x27\x15\x8b\xa7\xf2" - "\x74\x9a\xc4\x27\xc8\x60\xcd\xe8" - "\x75\xf0\xf2\xf7\x3b\x79\x42\xb0", - .klen = 32, - .iv = "\x47\x60\x59\xad\xb8\x75\x60\xc3" - "\xc3\x74\x4c\x4c\x13\x54\xd8\x74", - .assoc = "\x56\x29\xe7\x15\xfc\x14\x0a\x4a" - "\xe4\xaa\x79\x70\x12\x1d\x08\xf6" - "\x09\xfb\xca\x69\x4b\xb0\x8e\xf5" - "\xd6\x07\x62\xe3\xa8\xa9\x12", + .key = "\x05\x70\xd5\x94\x12\x36\x35\xd8" + "\x8f\x7d\xd3\xa8\x99\x6a\xed\x69", + .klen = 16, + .iv = "\x14\x39\x63\xfc\x56\xd5\xdf\x5f" + "\xaf\xb3\xff\xcc\x98\x33\x1d\xeb", + .assoc = "\x23\x02\xf1\x64\x9a\x73\x89\xe6" + "\xd0\xea\x2c\xf1\x96\xfc\x4e\x6d" + "\x65\x48\xcb\x0a\xda\xf0\x62\xc0" + "\x38\x1d\x3b\x4a\xe9\x7e\x62", .alen = 31, - .input = "\x66\xf3\x75\x7d\x40\xb3\xb4\xd1" - "\x04\xe1\xa6\x94\x10\xe6\x39\x77" - "\xd3\xac\x4d\x8a\x8c\x58\x6e\xfb" - "\x06\x13\x9a\xd9\x5e\xc0\xfa", - .ilen = 31, - .result = "\x82\xc0\x56\xf0\xd7\xc4\xc9\xfd" - "\x3c\xd1\x2a\xd4\x15\x86\x9d\xda" - "\xea\x6c\x6f\xa1\x33\xb0\x7a\x01" - "\x57\xe7\xf3\x7b\x73\xe7\x54\x10" - "\xc6\x91\xe2\xc6\xa0\x69\xe7\xe6" - "\x76\xc3\xf5\x3a\x76\xfd\x4a", - .rlen = 47, + .ptext = "\x32\xcb\x80\xcc\xde\x12\x33\x6d" + "\xf0\x20\x58\x15\x95\xc6\x7f\xee" + "\x2f\xf9\x4e\x2c\x1b\x98\x43\xc7" + "\x68\x28\x73\x40\x9f\x96\x4a", + .plen = 31, + .ctext = "\x29\xc4\xf0\x03\xc1\x86\xdf\x06" + "\x5c\x7b\xef\x64\x87\x00\xd1\x37" + "\xa7\x08\xbc\x7f\x8f\x41\x54\xd0" + "\x3e\xf1\xc3\xa2\x96\x84\xdd\x2a" + "\x2d\x21\x30\xf9\x02\xdb\x06\x0c" + "\xf1\x5a\x66\x69\xe0\xca\x83", + .clen = 47, }, { - .key = "\x75\xbc\x04\xe5\x84\x52\x5e\x58" - "\x24\x17\xd2\xb9\x0e\xaf\x6a\xf9" - "\x9e\x5c\xd0\xab\xcd\x00\x4f\x01" - "\x37\x1e\xd1\xcf\x15\xd8\xe2\x86", - .klen = 32, - .iv = "\x84\x85\x92\x4d\xc8\xf1\x08\xdf" - "\x44\x4e\xff\xdd\x0d\x78\x9a\x7a", - .assoc = "\x93\x4e\x21\xb4\x0c\x90\xb3\x66" - "\x65\x84\x2b\x01\x0b\x42\xcb\xfc" - "\x33\xbd\xd6\xed\x50\x50\x10\x0e" - "\x97\x35\x41\xbb\x82\x08\xb1\xf2", + .key = "\x41\x94\x0e\x33\x22\xb1\xdd\xf4" + "\x10\x57\x85\x39\x93\x8f\xaf\x70", + .klen = 16, + .iv = "\x50\x5d\x9d\x9b\x66\x50\x88\x7b" + "\x30\x8e\xb1\x5e\x92\x58\xe0\xf1", + .assoc = "\x5f\x27\x2b\x03\xaa\xef\x32\x02" + "\x50\xc4\xde\x82\x90\x21\x11\x73" + "\x8f\x0a\xd6\x8f\xdf\x90\xe4\xda" + "\xf9\x4a\x1a\x23\xc3\xdd\x02\x81", .alen = 32, - .input = "\xa2\x17\xaf\x1c\x50\x2e\x5d\xed" - "\x85\xbb\x58\x26\x0a\x0b\xfc\x7d" - "\xfe\x6e\x59\x0e\x91\xf8\xf0\x15" - "\xc8\x40\x78\xb1\x38\x1f\x99\xa7", - .ilen = 32, - .result = "\x01\x47\x8e\x6c\xf6\x64\x89\x3a" - "\x71\xce\xe4\xaa\x45\x70\xe6\x84" - "\x62\x48\x08\x64\x86\x6a\xdf\xec" - "\xb4\xa0\xfb\x34\x03\x0c\x19\xf4" - "\x2b\x7b\x36\x73\xec\x54\xa9\x1e" - "\x30\x85\xdb\xe4\xac\xe9\x2c\xca", - .rlen = 48, + .ptext = "\x6e\xf0\xba\x6b\xee\x8e\xdc\x89" + "\x71\xfb\x0a\xa6\x8f\xea\x41\xf4" + "\x5a\xbb\x59\xb0\x20\x38\xc5\xe0" + "\x29\x56\x52\x19\x79\xf5\xe9\x37", + .plen = 32, + .ctext = "\xe2\x2e\x44\xdf\xd3\x60\x6d\xb2" + "\x70\x57\x37\xc5\xc2\x4f\x8d\x14" + "\xc6\xbf\x8b\xec\xf5\x62\x67\xf2" + "\x2f\xa1\xe6\xd6\xa7\xb1\x8c\x54" + "\xe5\x6b\x49\xf9\x6e\x90\xc3\xaa" + "\x7a\x00\x2e\x4d\x7f\x31\x2e\x81", + .clen = 48, }, { - .key = "\xb1\xe1\x3e\x84\x94\xcd\x07\x74" - "\xa5\xf2\x84\x4a\x08\xd4\x2c\xff" - "\xc8\x1e\xdb\x2f\xd2\xa0\xd1\x1b" - "\xf8\x4c\xb0\xa8\xef\x37\x81\x5d", - .klen = 32, - .iv = "\xc0\xaa\xcc\xec\xd8\x6c\xb1\xfb" - "\xc5\x28\xb1\x6e\x07\x9d\x5d\x81", - .assoc = "\xd0\x73\x5a\x54\x1d\x0b\x5b\x82" - "\xe5\x5f\xdd\x93\x05\x66\x8e\x02" - "\x5e\x80\xe1\x71\x55\xf0\x92\x28" - "\x59\x62\x20\x94\x5c\x67\x50\xc8" - "\x58", + .key = "\x7e\xb9\x48\xd3\x32\x2d\x86\x10" + "\x91\x31\x37\xcb\x8d\xb3\x72\x76", + .klen = 16, + .iv = "\x8d\x82\xd6\x3b\x76\xcc\x30\x97" + "\xb1\x68\x63\xef\x8c\x7c\xa3\xf7", + .assoc = "\x9c\x4b\x65\xa2\xba\x6b\xdb\x1e" + "\xd1\x9e\x90\x13\x8a\x45\xd3\x79" + "\xba\xcd\xe2\x13\xe4\x30\x66\xf4" + "\xba\x78\xf9\xfb\x9d\x3c\xa1\x58" + "\x1a", .alen = 33, - .input = "\xdf\x3c\xe9\xbc\x61\xaa\x06\x09" - "\x06\x95\x0a\xb7\x04\x2f\xbe\x84" - "\x28\x30\x64\x92\x96\x98\x72\x2e" - "\x89\x6e\x57\x8a\x13\x7e\x38\x7e" - "\xdb", - .ilen = 33, - .result = "\x85\xe0\xf8\x0f\x8e\x49\xe3\x60" - "\xcb\x4a\x54\x94\xcf\xf5\x7e\x34" - "\xe9\xf8\x80\x65\x53\xd0\x72\x70" - "\x4f\x7d\x9d\xd1\x15\x6f\xb9\x2c" - "\xfa\xe8\xdd\xac\x2e\xe1\x3f\x67" - "\x63\x0f\x1a\x59\xb7\x89\xdb\xf4" - "\xc3", - .rlen = 49, - }, { - .key = "\xee\x05\x77\x23\xa5\x49\xb0\x90" - "\x26\xcc\x36\xdc\x02\xf8\xef\x05" - "\xf3\xe1\xe7\xb3\xd8\x40\x53\x35" - "\xb9\x79\x8f\x80\xc9\x96\x20\x33", - .klen = 32, - .iv = "\xfd\xce\x06\x8b\xe9\xe8\x5a\x17" - "\x46\x02\x63\x00\x01\xc1\x20\x87", - .assoc = "\x0c\x98\x94\xf3\x2d\x87\x04\x9e" - "\x66\x39\x8f\x24\xff\x8a\x50\x08" - "\x88\x42\xed\xf6\x5a\x90\x14\x42" - "\x1a\x90\xfe\x6c\x36\xc6\xf0\x9f" - "\x66\xa0\xb5\x2d\x2c\xf8\x25\x15" - "\x55\x90\xa2\x7e\x77\x94\x96\x3a" - "\x71\x1c\xf7\x44\xee\xa8\xc3\x42" - "\xe2\xa3\x84\x04\x0b\xe1\xce", - .alen = 63, - .input = "\x1b\x61\x23\x5b\x71\x26\xae\x25" - "\x87\x6f\xbc\x49\xfe\x53\x81\x8a" - "\x53\xf2\x70\x17\x9b\x38\xf4\x48" - "\x4b\x9b\x36\x62\xed\xdd\xd8\x54" - "\xea\xcb\xb6\x79\x45\xfc\xaa\x54" - "\x5c\x94\x47\x58\xa7\xff\x9c\x9e" - "\x7c\xb6\xf1\xac\xc8\xfd\x8b\x35" - "\xd5\xa4\x6a\xd4\x09\xc2\x08", - .ilen = 63, - .result = "\x00\xe5\x5b\x87\x5c\x20\x22\x8a" - "\xda\x1f\xd3\xff\xbb\xb2\xb0\xf8" - "\xef\xe9\xeb\x9e\x7c\x80\xf4\x2b" - "\x59\xc0\x79\xbc\x17\xa0\x15\x01" - "\xf5\x72\xfb\x5a\xe7\xaf\x07\xe3" - "\x1b\x49\x21\x34\x23\x63\x55\x5e" - "\xee\x4f\x34\x17\xfa\xfe\xa5\x0c" - "\xed\x0b\x23\xea\x9b\xda\x57\x2f" - "\xf6\xa9\xae\x0d\x4e\x40\x96\x45" - "\x7f\xfa\xf0\xbf\xc4\x98\x78", - .rlen = 79, - }, { - .key = "\x2a\x2a\xb1\xc3\xb5\xc5\x59\xac" - "\xa7\xa6\xe8\x6d\xfc\x1d\xb2\x0b" - "\x1d\xa3\xf3\x38\xdd\xe0\xd5\x4e" - "\x7b\xa7\x6e\x58\xa3\xf5\xbf\x0a", - .klen = 32, - .iv = "\x39\xf3\x3f\x2b\xf9\x64\x03\x33" - "\xc7\xdd\x15\x91\xfb\xe6\xe2\x8d", - .assoc = "\x49\xbc\xce\x92\x3d\x02\xad\xba" - "\xe7\x13\x41\xb6\xf9\xaf\x13\x0f" - "\xb2\x04\xf8\x7a\x5f\x30\x96\x5b" - "\xdc\xbd\xdd\x44\x10\x25\x8f\x75" - "\x75\x4d\xb9\x5b\x8e\x0a\x38\x13" - "\x6f\x9f\x36\xe4\x3a\x3e\xac\xc9" - "\x9d\x83\xde\xe5\x57\xfd\xe3\x0e" - "\xb1\xa7\x1b\x44\x05\x67\xb7\x37", - .alen = 64, - .input = "\x58\x85\x5c\xfa\x81\xa1\x57\x40" - "\x08\x4a\x6e\xda\xf8\x78\x44\x90" - "\x7d\xb5\x7b\x9b\xa1\xd8\x76\x62" - "\x0c\xc9\x15\x3b\xc7\x3c\x77\x2b" - "\xf8\x78\xba\xa7\xa6\x0e\xbd\x52" - "\x76\xa3\xdc\xbe\x6b\xa8\xb1\x2d" - "\xa9\x1d\xd8\x4e\x31\x53\xab\x00" - "\xa5\xa7\x01\x13\x04\x49\xf2\x04", - .ilen = 64, - .result = "\x28\xdd\xb9\x4a\x12\xc7\x0a\xe1" - "\x58\x06\x1a\x9b\x8c\x67\xdf\xeb" - "\x35\x35\x60\x9d\x06\x40\x65\xc1" - "\x93\xe8\xb3\x82\x50\x29\xdd\xb5" - "\x2b\xcb\xde\x18\x78\x6b\x42\xbe" - "\x6d\x24\xd0\xb2\x7d\xd7\x08\x8f" - "\x4a\x18\x98\xad\x8c\xf2\x97\xb4" - "\xf4\x77\xe4\xbf\x41\x3b\xc4\x06" - "\xce\x9e\x34\x81\xf0\x89\x11\x13" - "\x02\x65\xa1\x7c\xdf\x07\x33\x06", - .rlen = 80, - }, { - .key = "\x67\x4f\xeb\x62\xc5\x40\x01\xc7" - "\x28\x80\x9a\xfe\xf6\x41\x74\x12" - "\x48\x65\xfe\xbc\xe2\x80\x57\x68" - "\x3c\xd4\x4d\x31\x7d\x54\x5f\xe1", - .klen = 32, - .iv = "\x76\x18\x79\xca\x09\xdf\xac\x4e" - "\x48\xb7\xc7\x23\xf5\x0a\xa5\x93", - .assoc = "\x85\xe1\x08\x32\x4d\x7e\x56\xd5" - "\x68\xed\xf3\x47\xf3\xd3\xd6\x15" - "\xdd\xc7\x04\xfe\x64\xd0\x18\x75" - "\x9d\xeb\xbc\x1d\xea\x84\x2e\x4c" - "\x83\xf9\xbe\x8a\xef\x1c\x4b\x10" - "\x89\xaf\xcb\x4b\xfe\xe7\xc1\x58" - "\xca\xea\xc6\x87\xc0\x53\x03\xd9" - "\x80\xaa\xb2\x83\xff\xee\xa1\x6a" - "\x04", - .alen = 65, - .input = "\x94\xaa\x96\x9a\x91\x1d\x00\x5c" - "\x88\x24\x20\x6b\xf2\x9c\x06\x96" - "\xa7\x77\x87\x1f\xa6\x78\xf8\x7b" - "\xcd\xf6\xf4\x13\xa1\x9b\x16\x02" - "\x07\x24\xbf\xd5\x08\x20\xd0\x4f" - "\x90\xb3\x70\x24\x2f\x51\xc7\xbb" - "\xd6\x84\xc0\xef\x9a\xa8\xca\xcc" - "\x74\xab\x97\x53\xfe\xd0\xdb\x37" - "\x37\x6a\x0e\x9f\x3f\xa3\x2a\xe3" - "\x1b\x34\x6d\x51\x72\x2b\x17\xe7" - "\x4d\xaa\x2c\x18\xda\xa3\x33\x89" - "\x2a\x9f\xf4\xd2\xed\x76\x3d\x3f" - "\x3c\x15\x9d\x8e\x4f\x3c\x27\xb0" - "\x42\x3f\x2f\x8a\xd4\xc2\x10\xb2" - "\x27\x7f\xe3\x34\x80\x02\x49\x4b" - "\x07\x68\x22\x2a\x88\x25\x53\xb2" - "\x2f", - .ilen = 129, - .result = "\x85\x39\x69\x35\xfb\xf9\xb0\xa6" - "\x85\x43\x88\xd0\xd7\x78\x60\x19" - "\x3e\x1f\xb1\xa4\xd6\xc5\x96\xec" - "\xf7\x84\x85\xc7\x27\x0f\x74\x57" - "\x28\x9e\xdd\x90\x3c\x43\x12\xc5" - "\x51\x3d\x39\x8f\xa5\xf4\xe0\x0b" - "\x57\x04\xf1\x6d\xfe\x9b\x84\x27" - "\xe8\xeb\x4d\xda\x02\x0a\xc5\x49" - "\x1a\x55\x5e\x50\x56\x4d\x94\xda" - "\x20\xf8\x12\x54\x50\xb3\x11\xda" - "\xed\x44\x27\x67\xd5\xd1\x8b\x4b" - "\x38\x67\x56\x65\x59\xda\xe6\x97" - "\x81\xae\x2f\x92\x3b\xae\x22\x1c" - "\x91\x59\x38\x18\x00\xe8\xba\x92" - "\x04\x19\x56\xdf\xb0\x82\xeb\x6f" - "\x2e\xdb\x54\x3c\x4b\xbb\x60\x90" - "\x4c\x50\x10\x62\xba\x7a\xb1\x68" - "\x37\xd7\x87\x4e\xe4\x66\x09\x1f" - "\xa5", - .rlen = 145, + .ptext = "\xab\x14\xf3\x0a\xfe\x0a\x85\xa5" + "\xf2\xd5\xbc\x38\x89\x0e\x04\xfb" + "\x84\x7d\x65\x34\x25\xd8\x47\xfa" + "\xeb\x83\x31\xf1\x54\x54\x89\x0d" + "\x9d\x4d\x54\x51\x84\x61\xf6\x8e" + "\x03\x31\xf2\x25\x16\xcc\xaa\xc6" + "\x75\x73\x20\x30\x59\x54\xb2\xf0" + "\x3a\x4b\xe0\x23\x8e\xa6\x08\x35" + "\x8a", + .plen = 65, + .ctext = "\xc7\xca\x26\x61\x57\xee\xa2\xb9" + "\xb1\x37\xde\x95\x06\x90\x11\x08" + "\x4d\x30\x9f\x24\xc0\x56\xb7\xe1" + "\x0b\x9f\xd2\x57\xe9\xd2\xb1\x76" + "\x56\x9a\xb4\x58\xc5\x08\xfc\xb5" + "\xf2\x31\x9b\xc9\xcd\xb3\x64\xdb" + "\x6f\x50\xbf\xf4\x73\x9d\xfb\x6b" + "\xef\x35\x25\x48\xed\xcf\x29\xa8" + "\xac\xc3\xb9\xcb\x61\x8f\x73\x92" + "\x2c\x7a\x6f\xda\xf9\x09\x6f\xe1" + "\xc4", + .clen = 81, }, { - .key = "\xa3\x73\x24\x01\xd5\xbc\xaa\xe3" - "\xa9\x5a\x4c\x90\xf0\x65\x37\x18" - "\x72\x28\x0a\x40\xe7\x20\xd9\x82" - "\xfe\x02\x2b\x09\x57\xb3\xfe\xb7", - .klen = 32, - .iv = "\xb3\x3d\xb3\x69\x19\x5b\x54\x6a" - "\xc9\x91\x79\xb4\xef\x2e\x68\x99", - .assoc = "\xc2\x06\x41\xd1\x5d\xfa\xff\xf1" - "\xe9\xc7\xa5\xd9\xed\xf8\x98\x1b" - "\x07\x89\x10\x82\x6a\x70\x9a\x8f" - "\x5e\x19\x9b\xf5\xc5\xe3\xcd\x22" - "\x92\xa5\xc2\xb8\x51\x2e\x5e\x0e" - "\xa4\xbe\x5f\xb1\xc1\x90\xd7\xe7" - "\xf7\x52\xae\x28\x29\xa8\x22\xa4" - "\x4f\xae\x48\xc2\xfa\x75\x8b\x9e" - "\xce\x83\x2a\x88\x07\x55\xbb\x89" - "\xf6\xdf\xac\xdf\x83\x08\xbf\x7d" - "\xac\x30\x8b\x8e\x02\xac\x00\xf1" - "\x30\x46\xe1\xbc\x75\xbf\x49\xbb" - "\x26\x4e\x29\xf0\x2f\x21\xc6\x13" - "\x92\xd9\x3d\x11\xe4\x10\x00\x8e" - "\xd4\xd4\x58\x65\xa6\x2b\xe3\x25" - "\xb1\x8f\x15\x93\xe7\x71\xb9\x2c" - "\x4b", - .alen = 129, - .input = "\xd1\xcf\xd0\x39\xa1\x99\xa9\x78" - "\x09\xfe\xd2\xfd\xec\xc1\xc9\x9d" - "\xd2\x39\x93\xa3\xab\x18\x7a\x95" - "\x8f\x24\xd3\xeb\x7b\xfa\xb5\xd8" - "\x15\xd1\xc3\x04\x69\x32\xe3\x4d" - "\xaa\xc2\x04\x8b\xf2\xfa\xdc\x4a" - "\x02\xeb\xa8\x90\x03\xfd\xea\x97" - "\x43\xaf\x2e\x92\xf8\x57\xc5\x6a" - "\x00", - .ilen = 65, - .result = "\x7d\xde\x53\x22\xe4\x23\x3b\x30" - "\x78\xde\x35\x90\x7a\xd9\x0b\x93" - "\xf6\x0e\x0b\xed\x40\xee\x10\x9c" - "\x96\x3a\xd3\x34\xb2\xd0\x67\xcf" - "\x63\x7f\x2d\x0c\xcf\x96\xec\x64" - "\x1a\x87\xcc\x7d\x2c\x5e\x81\x4b" - "\xd2\x8f\x4c\x7c\x00\xb1\xb4\xe0" - "\x87\x4d\xb1\xbc\xd8\x78\x2c\x17" - "\xf2\x3b\xd8\x28\x40\xe2\x76\xf6" - "\x20\x13\x83\x46\xaf\xff\xe3\x0f" - "\x72", - .rlen = 81, + .key = "\xba\xde\x82\x72\x42\xa9\x2f\x2c" + "\x12\x0b\xe9\x5c\x87\xd7\x35\x7c", + .klen = 16, + .iv = "\xc9\xa7\x10\xda\x86\x48\xd9\xb3" + "\x32\x42\x15\x80\x85\xa1\x65\xfe", + .assoc = "\xd8\x70\x9f\x42\xca\xe6\x83\x3a" + "\x52\x79\x42\xa5\x84\x6a\x96\x7f" + "\xe4\x8f\xed\x97\xe9\xd0\xe8\x0d" + "\x7c\xa6\xd8\xd4\x77\x9b\x40\x2e" + "\x28\xce\x57\x34\xcd\x6e\x84\x4c" + "\x17\x3c\xe1\xb2\xa8\x0b\xbb\xf1" + "\x96\x41\x0d\x69\xe8\x54\x0a\xc8" + "\x15\x4e\x91\x92\x89\x4b\xb7\x9b" + "\x21", + .alen = 65, + .ptext = "\xe8\x39\x2d\xaa\x0e\x85\x2d\xc1" + "\x72\xaf\x6e\xc9\x82\x33\xc7\x01" + "\xaf\x40\x70\xb8\x2a\x78\xc9\x14" + "\xac\xb1\x10\xca\x2e\xb3\x28\xe4" + "\xac", + .plen = 33, + .ctext = "\x57\xcd\x3d\x46\xc5\xf9\x68\x3b" + "\x2c\x0f\xb4\x7e\x7b\x64\x3e\x40" + "\xf3\x78\x63\x34\x89\x79\x39\x6b" + "\x61\x64\x4a\x9a\xfa\x70\xa4\xd3" + "\x54\x0b\xea\x05\xa6\x95\x64\xed" + "\x3d\x69\xa2\x0c\x27\x56\x2f\x34" + "\x66", + .clen = 49, }, { - .key = "\xe0\x98\x5e\xa1\xe5\x38\x53\xff" - "\x2a\x35\xfe\x21\xea\x8a\xfa\x1e" - "\x9c\xea\x15\xc5\xec\xc0\x5b\x9b" - "\xbf\x2f\x0a\xe1\x32\x12\x9d\x8e", - .klen = 32, - .iv = "\xef\x61\xed\x08\x29\xd7\xfd\x86" - "\x4a\x6b\x2b\x46\xe9\x53\x2a\xa0", - .assoc = "\xfe\x2a\x7b\x70\x6d\x75\xa7\x0d" - "\x6a\xa2\x57\x6a\xe7\x1c\x5b\x21" - "\x31\x4b\x1b\x07\x6f\x10\x1c\xa8" - "\x20\x46\x7a\xce\x9f\x42\x6d\xf9", - .alen = 32, - .input = "\x0d\xf4\x09\xd8\xb1\x14\x51\x94" - "\x8a\xd8\x84\x8e\xe6\xe5\x8c\xa3" - "\xfc\xfc\x9e\x28\xb0\xb8\xfc\xaf" - "\x50\x52\xb1\xc4\x55\x59\x55\xaf", - .ilen = 32, - .result = "\x5a\xcd\x8c\x57\xf2\x6a\xb6\xbe" - "\x53\xc7\xaa\x9a\x60\x74\x9c\xc4" - "\xa2\xc2\xd0\x6d\xe1\x03\x63\xdc" - "\xbb\x51\x7e\x9c\x89\x73\xde\x4e" - "\x24\xf8\x52\x7c\x15\x41\x0e\xba" - "\x69\x0e\x36\x5f\x2f\x22\x8c", - .rlen = 47, + .key = "\xf7\x02\xbb\x11\x52\x24\xd8\x48" + "\x93\xe6\x9b\xee\x81\xfc\xf7\x82", + .klen = 16, + .iv = "\x06\xcc\x4a\x79\x96\xc3\x82\xcf" + "\xb3\x1c\xc7\x12\x7f\xc5\x28\x04", + .assoc = "\x15\x95\xd8\xe1\xda\x62\x2c\x56" + "\xd3\x53\xf4\x36\x7e\x8e\x59\x85", + .alen = 16, + .ptext = "\x24\x5e\x67\x49\x1e\x01\xd6\xdd" + "\xf3\x89\x20\x5b\x7c\x57\x89\x07", + .plen = 16, + .ctext = "\xfc\x85\x06\x28\x8f\xe8\x23\x1f" + "\x33\x98\x87\xde\x08\xb6\xb6\xae" + "\x3e\xa4\xf8\x19\xf1\x92\x60\x39" + "\xb9\x6b\x3f\xdf\xc8\xcb\x30", + .clen = 31, }, { - .key = "\x1c\xbd\x98\x40\xf5\xb3\xfc\x1b" - "\xaa\x0f\xb0\xb3\xe4\xae\xbc\x24" - "\xc7\xac\x21\x49\xf1\x60\xdd\xb5" - "\x80\x5d\xe9\xba\x0c\x71\x3c\x64", - .klen = 32, - .iv = "\x2c\x86\x26\xa8\x39\x52\xa6\xa2" - "\xcb\x45\xdd\xd7\xe3\x77\xed\xa6", - .assoc = "\x3b\x4f\xb5\x10\x7d\xf1\x50\x29" - "\xeb\x7c\x0a\xfb\xe1\x40\x1e\x27" - "\x5c\x0d\x27\x8b\x74\xb0\x9e\xc2" - "\xe1\x74\x59\xa6\x79\xa1\x0c\xd0", - .alen = 32, - .input = "\x4a\x18\x43\x77\xc1\x90\xfa\xb0" - "\x0b\xb2\x36\x20\xe0\x09\x4e\xa9" - "\x26\xbe\xaa\xac\xb5\x58\x7e\xc8" - "\x11\x7f\x90\x9c\x2f\xb8\xf4\x85", - .ilen = 32, - .result = "\x47\xd6\xce\x78\xd6\xbf\x4a\x51" - "\xb8\xda\x92\x3c\xfd\xda\xac\x8e" - "\x8d\x88\xd7\x4d\x90\xe5\xeb\xa1" - "\xab\xd6\x7c\x76\xad\xea\x7d\x76" - "\x53\xee\xb0\xcd\xd0\x02\xbb\x70" - "\x5b\x6f\x7b\xe2\x8c\xe8", - .rlen = 46, + .key = "\x33\x27\xf5\xb1\x62\xa0\x80\x63" + "\x14\xc0\x4d\x7f\x7b\x20\xba\x89", + .klen = 16, + .iv = "\x42\xf0\x84\x19\xa6\x3f\x2b\xea" + "\x34\xf6\x79\xa3\x79\xe9\xeb\x0a", + .assoc = "\x51\xb9\x12\x80\xea\xde\xd5\x71" + "\x54\x2d\xa6\xc8\x78\xb2\x1b\x8c", + .alen = 16, + .ptext = "\x61\x83\xa0\xe8\x2e\x7d\x7f\xf8" + "\x74\x63\xd2\xec\x76\x7c\x4c\x0d", + .plen = 16, + .ctext = "\x74\x7d\x70\x07\xe9\xba\x01\xee" + "\x6c\xc6\x6f\x50\x25\x33\xbe\x50" + "\x17\xb8\x17\x62\xed\x80\xa2\xf5" + "\x03\xde\x85\x71\x5d\x34", + .clen = 30, }, { - .key = "\x59\xe1\xd2\xdf\x05\x2f\xa4\x37" - "\x2b\xe9\x63\x44\xde\xd3\x7f\x2b" - "\xf1\x6f\x2d\xcd\xf6\x00\x5f\xcf" - "\x42\x8a\xc8\x92\xe6\xd0\xdc\x3b", - .klen = 32, - .iv = "\x68\xab\x60\x47\x49\xce\x4f\xbe" - "\x4c\x20\x8f\x68\xdd\x9c\xb0\xac", - .assoc = "\x77\x74\xee\xaf\x8d\x6d\xf9\x45" - "\x6c\x56\xbc\x8d\xdb\x65\xe0\x2e" - "\x86\xd0\x32\x0f\x79\x50\x20\xdb" - "\xa2\xa1\x37\x7e\x53\x00\xab\xa6", - .alen = 32, - .input = "\x86\x3d\x7d\x17\xd1\x0c\xa3\xcc" - "\x8c\x8d\xe8\xb1\xda\x2e\x11\xaf" - "\x51\x80\xb5\x30\xba\xf8\x00\xe2" - "\xd3\xad\x6f\x75\x09\x18\x93\x5c", - .ilen = 32, - .result = "\x9f\xa9\x2b\xa4\x8f\x00\x05\x2b" - "\xe7\x68\x81\x51\xbb\xfb\xdf\x60" - "\xbb\xac\xe8\xc1\xdc\x68\xae\x68" - "\x3a\xcd\x7a\x06\x49\xfe\x80\x11" - "\xe6\x61\x99\xe2\xdd\xbe\x2c\xbf", - .rlen = 40, + .key = "\x70\x4c\x2f\x50\x72\x1c\x29\x7f" + "\x95\x9a\xff\x10\x75\x45\x7d\x8f", + .klen = 16, + .iv = "\x7f\x15\xbd\xb8\xb6\xba\xd3\x06" + "\xb5\xd1\x2b\x35\x73\x0e\xad\x10", + .assoc = "\x8e\xde\x4c\x20\xfa\x59\x7e\x8d" + "\xd5\x07\x58\x59\x72\xd7\xde\x92", + .alen = 16, + .ptext = "\x9d\xa7\xda\x88\x3e\xf8\x28\x14" + "\xf5\x3e\x85\x7d\x70\xa0\x0f\x13", + .plen = 16, + .ctext = "\xf4\xb3\x85\xf9\xac\xde\xb1\x38" + "\x29\xfd\x6c\x7c\x49\xe5\x1d\xaf" + "\xba\xea\xd4\xfa\x3f\x11\x33\x98", + .clen = 24, }, { - .key = "\x96\x06\x0b\x7f\x15\xab\x4d\x53" - "\xac\xc3\x15\xd6\xd8\xf7\x42\x31" - "\x1b\x31\x38\x51\xfc\xa0\xe1\xe8" - "\x03\xb8\xa7\x6b\xc0\x2f\x7b\x11", - .klen = 32, - .iv = "\xa5\xcf\x9a\xe6\x59\x4a\xf7\xd9" - "\xcd\xfa\x41\xfa\xd7\xc0\x72\xb2", - .assoc = "\xb4\x99\x28\x4e\x9d\xe8\xa2\x60" - "\xed\x30\x6e\x1e\xd5\x89\xa3\x34" - "\xb1\x92\x3e\x93\x7e\xf0\xa2\xf5" - "\x64\xcf\x16\x57\x2d\x5f\x4a\x7d", - .alen = 32, - .input = "\xc3\x62\xb7\xb6\xe2\x87\x4c\xe7" - "\x0d\x67\x9a\x43\xd4\x52\xd4\xb5" - "\x7b\x43\xc1\xb5\xbf\x98\x82\xfc" - "\x94\xda\x4e\x4d\xe4\x77\x32\x32", - .ilen = 32, - .result = "\xe2\x34\xfa\x25\xfd\xfb\x89\x5e" - "\x5b\x4e\x0b\x15\x6e\x39\xfb\x0c" - "\x73\xc7\xd9\x6b\xbe\xce\x9b\x70" - "\xc7\x4f\x96\x16\x03\xfc\xea\xfb" - "\x56", - .rlen = 33, + .key = "\xac\x70\x69\xef\x82\x97\xd2\x9b" + "\x15\x74\xb1\xa2\x6f\x69\x3f\x95", + .klen = 16, + .iv = "\xbb\x3a\xf7\x57\xc6\x36\x7c\x22" + "\x36\xab\xde\xc6\x6d\x32\x70\x17", + .assoc = "\xcb\x03\x85\xbf\x0a\xd5\x26\xa9" + "\x56\xe1\x0a\xeb\x6c\xfb\xa1\x98", + .alen = 16, + .ptext = "\xda\xcc\x14\x27\x4e\x74\xd1\x30" + "\x76\x18\x37\x0f\x6a\xc4\xd1\x1a", + .plen = 16, + .ctext = "\xe6\x5c\x49\x4f\x78\xf3\x62\x86" + "\xe1\xb7\xa5\xc3\x32\x88\x3c\x8c" + "\x6e", + .clen = 17, }, }; -static const struct aead_testvec morus1280_dec_tv_template[] = { +/* + * MORUS-1280 test vectors - generated via reference implementation from + * SUPERCOP (https://bench.cr.yp.to/supercop.html): + * + * https://bench.cr.yp.to/supercop/supercop-20170228.tar.xz + * (see crypto_aead/morus1280128v2/ and crypto_aead/morus1280256v2/ ) + */ +static const struct aead_testvec morus1280_tv_template[] = { { .key = "\x00\x00\x00\x00\x00\x00\x00\x00" "\x00\x00\x00\x00\x00\x00\x00\x00", @@ -25654,11 +20742,11 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\x20\x36\x2c\x24\xfe\xc9\x30\x81", .assoc = "", .alen = 0, - .input = "\x91\x85\x0f\xf5\x52\x9e\xce\xce" + .ptext = "", + .plen = 0, + .ctext = "\x91\x85\x0f\xf5\x52\x9e\xce\xce" "\x65\x99\xc7\xbf\xd3\x76\xe8\x98", - .ilen = 16, - .result = "", - .rlen = 0, + .clen = 16, }, { .key = "\x3c\x24\x39\x9f\x10\x7b\xa8\x1b" "\x80\xda\xb2\x91\xf9\x24\xc2\x06", @@ -25667,12 +20755,12 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\xa1\x10\xde\xb5\xf8\xed\xf3\x87", .assoc = "", .alen = 0, - .input = "\x88\xc3\x4c\xf0\x2f\x43\x76\x13" + .ptext = "\x69", + .plen = 1, + .ctext = "\x88\xc3\x4c\xf0\x2f\x43\x76\x13" "\x96\xda\x76\x34\x33\x4e\xd5\x39" "\x73", - .ilen = 17, - .result = "\x69", - .rlen = 1, + .clen = 17, }, { .key = "\x79\x49\x73\x3e\x20\xf7\x51\x37" "\x01\xb4\x64\x22\xf3\x48\x85\x0c", @@ -25681,18 +20769,18 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\x22\xea\x90\x47\xf2\x11\xb5\x8e", .assoc = "", .alen = 0, - .input = "\x3e\x5c\x3b\x58\x3b\x7d\x2a\x22" + .ptext = "\xa6\xa4\x1e\x76\xec\xd4\x50\xcc" + "\x62\x58\xe9\x8f\xef\xa4\x17\x91" + "\xb4\x96\x9f\x6b\xce\x38\xa5\x46" + "\x13\x7d\x64\x93\xd7\x05\xf5", + .plen = 31, + .ctext = "\x3e\x5c\x3b\x58\x3b\x7d\x2a\x22" "\x75\x0b\x24\xa6\x0e\xc3\xde\x52" "\x97\x0b\x64\xd4\xce\x90\x52\xf7" "\xef\xdb\x6a\x38\xd2\xa8\xa1\x0d" "\xe0\x61\x33\x24\xc6\x4d\x51\xbc" "\xa4\x21\x74\xcf\x19\x16\x59", - .ilen = 47, - .result = "\xa6\xa4\x1e\x76\xec\xd4\x50\xcc" - "\x62\x58\xe9\x8f\xef\xa4\x17\x91" - "\xb4\x96\x9f\x6b\xce\x38\xa5\x46" - "\x13\x7d\x64\x93\xd7\x05\xf5", - .rlen = 31, + .clen = 47, }, { .key = "\xb5\x6e\xad\xdd\x30\x72\xfa\x53" "\x82\x8e\x16\xb4\xed\x6d\x47\x12", @@ -25701,18 +20789,18 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\xa2\xc5\x42\xd8\xec\x36\x78\x94", .assoc = "", .alen = 0, - .input = "\x30\x82\x9c\x2b\x67\xcb\xf9\x1f" + .ptext = "\xe2\xc9\x58\x15\xfc\x4f\xf8\xe8" + "\xe3\x32\x9b\x21\xe9\xc8\xd9\x97" + "\xde\x58\xab\xf0\xd3\xd8\x27\x60" + "\xd5\xaa\x43\x6b\xb1\x64\x95\xa4", + .plen = 32, + .ctext = "\x30\x82\x9c\x2b\x67\xcb\xf9\x1f" "\xde\x9f\x77\xb2\xda\x92\x61\x5c" "\x09\x0b\x2d\x9a\x26\xaa\x1c\x06" "\xab\x74\xb7\x2b\x95\x5f\x9f\xa1" "\x9a\xff\x50\xa0\xa2\xff\xc5\xad" "\x21\x8e\x84\x5c\x12\x61\xb2\xae", - .ilen = 48, - .result = "\xe2\xc9\x58\x15\xfc\x4f\xf8\xe8" - "\xe3\x32\x9b\x21\xe9\xc8\xd9\x97" - "\xde\x58\xab\xf0\xd3\xd8\x27\x60" - "\xd5\xaa\x43\x6b\xb1\x64\x95\xa4", - .rlen = 32, + .clen = 48, }, { .key = "\xf2\x92\xe6\x7d\x40\xee\xa3\x6f" "\x03\x68\xc8\x45\xe7\x91\x0a\x18", @@ -25721,20 +20809,20 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\x23\x9f\xf4\x6a\xe6\x5a\x3b\x9a", .assoc = "", .alen = 0, - .input = "\x67\x5d\x8e\x45\xc8\x39\xf5\x17" + .ptext = "\x1f\xee\x92\xb4\x0c\xcb\xa1\x04" + "\x64\x0c\x4d\xb2\xe3\xec\x9c\x9d" + "\x09\x1a\xb7\x74\xd8\x78\xa9\x79" + "\x96\xd8\x22\x43\x8c\xc3\x34\x7b" + "\xc4", + .plen = 33, + .ctext = "\x67\x5d\x8e\x45\xc8\x39\xf5\x17" "\xc1\x1d\x2a\xdd\x88\x67\xda\x1f" "\x6d\xe8\x37\x28\x5a\xc1\x5e\x9f" "\xa6\xec\xc6\x92\x05\x4b\xc0\xa3" "\x63\xef\x88\xa4\x9b\x0a\x5c\xed" "\x2b\x6a\xac\x63\x52\xaa\x10\x94" "\xd0", - .ilen = 49, - .result = "\x1f\xee\x92\xb4\x0c\xcb\xa1\x04" - "\x64\x0c\x4d\xb2\xe3\xec\x9c\x9d" - "\x09\x1a\xb7\x74\xd8\x78\xa9\x79" - "\x96\xd8\x22\x43\x8c\xc3\x34\x7b" - "\xc4", - .rlen = 33, + .clen = 49, }, { .key = "\x2e\xb7\x20\x1c\x50\x6a\x4b\x8b" "\x84\x42\x7a\xd7\xe1\xb5\xcd\x1f", @@ -25743,7 +20831,16 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\xa4\x79\xa6\xfb\xe0\x7f\xfd\xa0", .assoc = "", .alen = 0, - .input = "\x7d\x61\x1a\x35\x20\xcc\x07\x88" + .ptext = "\x5c\x13\xcb\x54\x1c\x47\x4a\x1f" + "\xe5\xe6\xff\x44\xdd\x11\x5f\xa3" + "\x33\xdd\xc2\xf8\xdd\x18\x2b\x93" + "\x57\x05\x01\x1c\x66\x22\xd3\x51" + "\xd3\xdf\x18\xc9\x30\x66\xed\xb1" + "\x96\x58\xd5\x8c\x64\x8c\x7c\xf5" + "\x01\xd0\x74\x5f\x9b\xaa\xf6\xd1" + "\xe6\x16\xa2\xac\xde\x47\x40", + .plen = 63, + .ctext = "\x7d\x61\x1a\x35\x20\xcc\x07\x88" "\x03\x98\x87\xcf\xc0\x6e\x4d\x19" "\xe3\xd4\x0b\xfb\x29\x8f\x49\x1a" "\x3a\x06\x77\xce\x71\x2c\xcd\xdd" @@ -25753,16 +20850,7 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\xdf\x5b\x0f\xbd\x8a\x88\x78\xc9" "\xe5\x81\x37\xde\x84\x7a\xf6\x84" "\x99\x7a\x72\x9c\x54\x31\xa1", - .ilen = 79, - .result = "\x5c\x13\xcb\x54\x1c\x47\x4a\x1f" - "\xe5\xe6\xff\x44\xdd\x11\x5f\xa3" - "\x33\xdd\xc2\xf8\xdd\x18\x2b\x93" - "\x57\x05\x01\x1c\x66\x22\xd3\x51" - "\xd3\xdf\x18\xc9\x30\x66\xed\xb1" - "\x96\x58\xd5\x8c\x64\x8c\x7c\xf5" - "\x01\xd0\x74\x5f\x9b\xaa\xf6\xd1" - "\xe6\x16\xa2\xac\xde\x47\x40", - .rlen = 63, + .clen = 79, }, { .key = "\x6b\xdc\x5a\xbb\x60\xe5\xf4\xa6" "\x05\x1d\x2c\x68\xdb\xda\x8f\x25", @@ -25771,7 +20859,16 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\x25\x53\x58\x8c\xda\xa3\xc0\xa6", .assoc = "", .alen = 0, - .input = "\x05\xc5\xb1\xf9\x1b\xb9\xab\x2c" + .ptext = "\x98\x37\x05\xf3\x2c\xc2\xf3\x3b" + "\x66\xc0\xb1\xd5\xd7\x35\x21\xaa" + "\x5d\x9f\xce\x7c\xe2\xb8\xad\xad" + "\x19\x33\xe0\xf4\x40\x81\x72\x28" + "\xe1\x8b\x1c\xf8\x91\x78\xff\xaf" + "\xb0\x68\x69\xf2\x27\x35\x91\x84" + "\x2e\x37\x5b\x00\x04\xff\x16\x9c" + "\xb5\x19\x39\xeb\xd9\xcd\x29\x9a", + .plen = 64, + .ctext = "\x05\xc5\xb1\xf9\x1b\xb9\xab\x2c" "\xa5\x07\x12\xa7\x12\x39\x60\x66" "\x30\x81\x4a\x03\x78\x28\x45\x52" "\xd2\x2b\x24\xfd\x8b\xa5\xb7\x66" @@ -25781,16 +20878,7 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\xeb\x71\x40\xc9\x2c\x40\x45\x6d" "\x73\x77\x01\xf3\x4f\xf3\x9d\x2a" "\x5d\x57\xa8\xa1\x18\xa2\xad\xcb", - .ilen = 80, - .result = "\x98\x37\x05\xf3\x2c\xc2\xf3\x3b" - "\x66\xc0\xb1\xd5\xd7\x35\x21\xaa" - "\x5d\x9f\xce\x7c\xe2\xb8\xad\xad" - "\x19\x33\xe0\xf4\x40\x81\x72\x28" - "\xe1\x8b\x1c\xf8\x91\x78\xff\xaf" - "\xb0\x68\x69\xf2\x27\x35\x91\x84" - "\x2e\x37\x5b\x00\x04\xff\x16\x9c" - "\xb5\x19\x39\xeb\xd9\xcd\x29\x9a", - .rlen = 64, + .clen = 80, }, { .key = "\xa7\x00\x93\x5b\x70\x61\x9d\xc2" "\x86\xf7\xde\xfa\xd5\xfe\x52\x2b", @@ -25799,11 +20887,11 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\xa6\x2d\x0a\x1e\xd4\xc7\x83\xad", .assoc = "\xc5", .alen = 1, - .input = "\x4d\xbf\x11\xac\x7f\x97\x0b\x2e" + .ptext = "", + .plen = 0, + .ctext = "\x4d\xbf\x11\xac\x7f\x97\x0b\x2e" "\x89\x3b\x9d\x0f\x83\x1c\x08\xc3", - .ilen = 16, - .result = "", - .rlen = 0, + .clen = 16, }, { .key = "\xe4\x25\xcd\xfa\x80\xdd\x46\xde" "\x07\xd1\x90\x8b\xcf\x23\x15\x31", @@ -25815,11 +20903,11 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\xe8\x73\x62\x64\xab\x50\xd0\xda" "\x6b\x83\x66\xaf\x3e\x27\xc9", .alen = 31, - .input = "\x5b\xc0\x8d\x54\xe4\xec\xbe\x38" + .ptext = "", + .plen = 0, + .ctext = "\x5b\xc0\x8d\x54\xe4\xec\xbe\x38" "\x03\x12\xf9\xcc\x9e\x46\x42\x92", - .ilen = 16, - .result = "", - .rlen = 0, + .clen = 16, }, { .key = "\x20\x4a\x07\x99\x91\x58\xee\xfa" "\x88\xab\x42\x1c\xc9\x47\xd7\x38", @@ -25831,11 +20919,11 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\x12\x35\x6e\xe8\xb0\xf0\x52\xf3" "\x2d\xb0\x45\x87\x18\x86\x68\xf6", .alen = 32, - .input = "\x48\xc5\xc3\x4c\x40\x2e\x2f\xc2" + .ptext = "", + .plen = 0, + .ctext = "\x48\xc5\xc3\x4c\x40\x2e\x2f\xc2" "\x6d\x65\xe0\x67\x9c\x1d\xa0\xf0", - .ilen = 16, - .result = "", - .rlen = 0, + .clen = 16, }, { .key = "\x5d\x6f\x41\x39\xa1\xd4\x97\x16" "\x09\x85\xf4\xae\xc3\x6b\x9a\x3e", @@ -25848,11 +20936,11 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\xee\xde\x23\x60\xf2\xe5\x08\xcc" "\x97", .alen = 33, - .input = "\x28\x64\x78\x51\x55\xd8\x56\x4a" + .ptext = "", + .plen = 0, + .ctext = "\x28\x64\x78\x51\x55\xd8\x56\x4a" "\x58\x3e\xf7\xbe\xee\x21\xfe\x94", - .ilen = 16, - .result = "", - .rlen = 0, + .clen = 16, }, { .key = "\x99\x93\x7a\xd8\xb1\x50\x40\x31" "\x8a\x60\xa6\x3f\xbd\x90\x5d\x44", @@ -25868,11 +20956,11 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\x03\xa1\xe8\xbe\x37\x54\xec\xa2" "\xcd\x2c\x45\x58\xbd\x8e\x80", .alen = 63, - .input = "\xb3\xa6\x00\x4e\x09\x20\xac\x21" + .ptext = "", + .plen = 0, + .ctext = "\xb3\xa6\x00\x4e\x09\x20\xac\x21" "\x77\x72\x69\x76\x2d\x36\xe5\xc8", - .ilen = 16, - .result = "", - .rlen = 0, + .clen = 16, }, { .key = "\xd6\xb8\xb4\x77\xc1\xcb\xe9\x4d" "\x0a\x3a\x58\xd1\xb7\xb4\x1f\x4a", @@ -25888,11 +20976,11 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\x30\x08\xd0\x5f\xa0\xaa\x0c\x6d" "\x9c\x2f\xdb\x97\xb8\x15\x69\x01", .alen = 64, - .input = "\x65\x33\x7b\xa1\x63\xf4\x20\xdd" + .ptext = "", + .plen = 0, + .ctext = "\x65\x33\x7b\xa1\x63\xf4\x20\xdd" "\xe4\xb9\x4a\xaa\x9a\x21\xaa\x14", - .ilen = 16, - .result = "", - .rlen = 0, + .clen = 16, }, { .key = "\x12\xdd\xee\x17\xd1\x47\x92\x69" "\x8b\x14\x0a\x62\xb1\xd9\xe2\x50", @@ -25901,12 +20989,12 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\xac\x4b\x37\x86\xb0\xa2\x13\xd2", .assoc = "\x31", .alen = 1, - .input = "\x1d\x47\x17\x34\x86\xf5\x54\x1a" + .ptext = "\x40", + .plen = 1, + .ctext = "\x1d\x47\x17\x34\x86\xf5\x54\x1a" "\x6d\x28\xb8\x5d\x6c\xcf\xa0\xb9" "\xbf", - .ilen = 17, - .result = "\x40", - .rlen = 1, + .clen = 17, }, { .key = "\x4f\x01\x27\xb6\xe1\xc3\x3a\x85" "\x0c\xee\xbc\xf4\xab\xfd\xa5\x57", @@ -25918,18 +21006,18 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\xe6\x01\xa8\x7e\xca\x10\xdc\x73" "\xf4\x94\x9f\xc1\x5a\x61\x85", .alen = 31, - .input = "\x78\x90\x52\xae\x0f\xf7\x2e\xef" + .ptext = "\x7c\x5d\xd3\xee\xad\x9f\x39\x1a" + "\x6d\x92\x42\x61\xa7\x58\x37\xdb" + "\xb0\xb2\x2b\x9f\x0b\xb8\xbd\x7a" + "\x24\xa0\xd6\xb7\x11\x79\x6c", + .plen = 31, + .ctext = "\x78\x90\x52\xae\x0f\xf7\x2e\xef" "\x63\x09\x08\x58\xb5\x56\xbd\x72" "\x6e\x42\xcf\x27\x04\x7c\xdb\x92" "\x18\xe9\xa4\x33\x90\xba\x62\xb5" "\x70\xd3\x88\x9b\x4f\x05\xa7\x51" "\x85\x87\x17\x09\x42\xed\x4e", - .ilen = 47, - .result = "\x7c\x5d\xd3\xee\xad\x9f\x39\x1a" - "\x6d\x92\x42\x61\xa7\x58\x37\xdb" - "\xb0\xb2\x2b\x9f\x0b\xb8\xbd\x7a" - "\x24\xa0\xd6\xb7\x11\x79\x6c", - .rlen = 31, + .clen = 47, }, { .key = "\x8b\x26\x61\x55\xf1\x3e\xe3\xa1" "\x8d\xc8\x6e\x85\xa5\x21\x67\x5d", @@ -25941,18 +21029,18 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\x10\xc3\xb3\x02\xcf\xb0\x5e\x8d" "\xb5\xc2\x7e\x9a\x35\xc0\x24\xfd", .alen = 32, - .input = "\x1d\x2c\x57\xe0\x50\x38\x3d\x41" + .ptext = "\xb9\x82\x0c\x8d\xbd\x1b\xe2\x36" + "\xee\x6c\xf4\xf2\xa1\x7d\xf9\xe2" + "\xdb\x74\x36\x23\x11\x58\x3f\x93" + "\xe5\xcd\xb5\x90\xeb\xd8\x0c\xb3", + .plen = 32, + .ctext = "\x1d\x2c\x57\xe0\x50\x38\x3d\x41" "\x2e\x71\xc8\x3b\x92\x43\x58\xaf" "\x5a\xfb\xad\x8f\xd9\xd5\x8a\x5e" "\xdb\xf3\xcd\x3a\x2b\xe1\x2c\x1a" "\xb0\xed\xe3\x0c\x6e\xf9\xf2\xd6" "\x90\xe6\xb1\x0e\xa5\x8a\xac\xb7", - .ilen = 48, - .result = "\xb9\x82\x0c\x8d\xbd\x1b\xe2\x36" - "\xee\x6c\xf4\xf2\xa1\x7d\xf9\xe2" - "\xdb\x74\x36\x23\x11\x58\x3f\x93" - "\xe5\xcd\xb5\x90\xeb\xd8\x0c\xb3", - .rlen = 32, + .clen = 48, }, { .key = "\xc8\x4b\x9b\xf5\x01\xba\x8c\xbd" "\x0e\xa3\x21\x16\x9f\x46\x2a\x63", @@ -25965,20 +21053,20 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\x76\xef\x5c\x72\x0f\x1f\xc3\xd4" "\xee", .alen = 33, - .input = "\x59\x10\x84\x1c\x83\x4c\x8b\xfc" + .ptext = "\xf5\xa6\x46\x2c\xce\x97\x8a\x51" + "\x6f\x46\xa6\x83\x9b\xa1\xbc\xe8" + "\x05\x36\x42\xa7\x16\xf8\xc1\xad" + "\xa7\xfb\x94\x68\xc5\x37\xab\x8a" + "\x72", + .plen = 33, + .ctext = "\x59\x10\x84\x1c\x83\x4c\x8b\xfc" "\xfd\x2e\x4b\x46\x84\xff\x78\x4e" "\x50\xda\x5c\xb9\x61\x1d\xf5\xb9" "\xfe\xbb\x7f\xae\x8c\xc1\x24\xbd" "\x8c\x6f\x1f\x9b\xce\xc6\xc1\x37" "\x08\x06\x5a\xe5\x96\x10\x95\xc2" "\x5e", - .ilen = 49, - .result = "\xf5\xa6\x46\x2c\xce\x97\x8a\x51" - "\x6f\x46\xa6\x83\x9b\xa1\xbc\xe8" - "\x05\x36\x42\xa7\x16\xf8\xc1\xad" - "\xa7\xfb\x94\x68\xc5\x37\xab\x8a" - "\x72", - .rlen = 33, + .clen = 49, }, { .key = "\x05\x70\xd5\x94\x12\x36\x35\xd8" "\x8f\x7d\xd3\xa8\x99\x6a\xed\x69", @@ -25994,7 +21082,16 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\x10\x0b\x56\x85\xad\x54\xaa\x66" "\xa8\x43\xcd\xd4\x9b\xb7\xfa", .alen = 63, - .input = "\x9a\x12\xbc\xdf\x72\xa8\x56\x22" + .ptext = "\x32\xcb\x80\xcc\xde\x12\x33\x6d" + "\xf0\x20\x58\x15\x95\xc6\x7f\xee" + "\x2f\xf9\x4e\x2c\x1b\x98\x43\xc7" + "\x68\x28\x73\x40\x9f\x96\x4a\x60" + "\x80\xf4\x4b\xf4\xc1\x3d\xd0\x93" + "\xcf\x12\xc9\x59\x8f\x7a\x7f\xa8" + "\x1b\xa5\x50\xed\x87\xa9\x72\x59" + "\x9c\x44\xb2\xa4\x99\x98\x34", + .plen = 63, + .ctext = "\x9a\x12\xbc\xdf\x72\xa8\x56\x22" "\x49\x2d\x07\x92\xfc\x3d\x6d\x5f" "\xef\x36\x19\xae\x91\xfa\xd6\x63" "\x46\xea\x8a\x39\x14\x21\xa6\x37" @@ -26004,16 +21101,7 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\x58\x58\x23\x40\xfd\xa5\xc2\xe6" "\xe9\x5a\x50\x98\x00\x58\xc9\x86" "\x4f\x20\x37\xdb\x7b\x22\xa3", - .ilen = 79, - .result = "\x32\xcb\x80\xcc\xde\x12\x33\x6d" - "\xf0\x20\x58\x15\x95\xc6\x7f\xee" - "\x2f\xf9\x4e\x2c\x1b\x98\x43\xc7" - "\x68\x28\x73\x40\x9f\x96\x4a\x60" - "\x80\xf4\x4b\xf4\xc1\x3d\xd0\x93" - "\xcf\x12\xc9\x59\x8f\x7a\x7f\xa8" - "\x1b\xa5\x50\xed\x87\xa9\x72\x59" - "\x9c\x44\xb2\xa4\x99\x98\x34", - .rlen = 63, + .clen = 79, }, { .key = "\x41\x94\x0e\x33\x22\xb1\xdd\xf4" "\x10\x57\x85\x39\x93\x8f\xaf\x70", @@ -26029,7 +21117,16 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\x3d\x72\x3e\x26\x16\xa9\xca\x32" "\x77\x47\x63\x14\x95\x3d\xe4\x34", .alen = 64, - .input = "\xe6\xeb\x92\x5a\x5b\xf0\x2d\xbb" + .ptext = "\x6e\xf0\xba\x6b\xee\x8e\xdc\x89" + "\x71\xfb\x0a\xa6\x8f\xea\x41\xf4" + "\x5a\xbb\x59\xb0\x20\x38\xc5\xe0" + "\x29\x56\x52\x19\x79\xf5\xe9\x37" + "\x8f\xa1\x50\x23\x22\x4f\xe3\x91" + "\xe9\x21\x5e\xbf\x52\x23\x95\x37" + "\x48\x0c\x38\x8f\xf0\xff\x92\x24" + "\x6b\x47\x49\xe3\x94\x1f\x1e\x01", + .plen = 64, + .ctext = "\xe6\xeb\x92\x5a\x5b\xf0\x2d\xbb" "\x23\xec\x35\xe3\xae\xc9\xfb\x0b" "\x90\x14\x46\xeb\xa8\x8d\xb0\x9b" "\x39\xda\x8b\x48\xec\xb2\x00\x4e" @@ -26039,16 +21136,7 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\xec\x99\x7d\x61\xb3\x15\x93\xed" "\x83\x1e\xd9\x48\x84\x0b\x37\xfe" "\x95\x74\x44\xd5\x54\xa6\x27\x06", - .ilen = 80, - .result = "\x6e\xf0\xba\x6b\xee\x8e\xdc\x89" - "\x71\xfb\x0a\xa6\x8f\xea\x41\xf4" - "\x5a\xbb\x59\xb0\x20\x38\xc5\xe0" - "\x29\x56\x52\x19\x79\xf5\xe9\x37" - "\x8f\xa1\x50\x23\x22\x4f\xe3\x91" - "\xe9\x21\x5e\xbf\x52\x23\x95\x37" - "\x48\x0c\x38\x8f\xf0\xff\x92\x24" - "\x6b\x47\x49\xe3\x94\x1f\x1e\x01", - .rlen = 64, + .clen = 80, }, { .key = "\x7e\xb9\x48\xd3\x32\x2d\x86\x10" "\x91\x31\x37\xcb\x8d\xb3\x72\x76", @@ -26065,7 +21153,25 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\x46\x4a\xfa\x53\x8f\xc4\xcd\x68" "\x58", .alen = 65, - .input = "\x89\x24\x27\x86\xdc\xd7\x6b\xd9" + .ptext = "\xab\x14\xf3\x0a\xfe\x0a\x85\xa5" + "\xf2\xd5\xbc\x38\x89\x0e\x04\xfb" + "\x84\x7d\x65\x34\x25\xd8\x47\xfa" + "\xeb\x83\x31\xf1\x54\x54\x89\x0d" + "\x9d\x4d\x54\x51\x84\x61\xf6\x8e" + "\x03\x31\xf2\x25\x16\xcc\xaa\xc6" + "\x75\x73\x20\x30\x59\x54\xb2\xf0" + "\x3a\x4b\xe0\x23\x8e\xa6\x08\x35" + "\x8a\xdf\x27\xa0\xe4\x60\x99\xae" + "\x8e\x43\xd9\x39\x7b\x10\x40\x67" + "\x5c\x7e\xc9\x70\x63\x34\xca\x59" + "\xfe\x86\xbc\xb7\x9c\x39\xf3\x6d" + "\x6a\x41\x64\x6f\x16\x7f\x65\x7e" + "\x89\x84\x68\xeb\xb0\x51\xbe\x55" + "\x33\x16\x59\x6c\x3b\xef\x88\xad" + "\x2f\xab\xbc\x25\x76\x87\x41\x2f" + "\x36", + .plen = 129, + .ctext = "\x89\x24\x27\x86\xdc\xd7\x6b\xd9" "\xd1\xcd\xdc\x16\xdd\x2c\xc1\xfb" "\x52\xb5\xb3\xab\x50\x99\x3f\xa0" "\x38\xa4\x74\xa5\x04\x15\x63\x05" @@ -26084,25 +21190,7 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\xed\xed\x35\xe8\x83\xa5\xec\x25" "\x6b\xff\x5f\x1a\x09\x96\x3d\xdc" "\x20", - .ilen = 145, - .result = "\xab\x14\xf3\x0a\xfe\x0a\x85\xa5" - "\xf2\xd5\xbc\x38\x89\x0e\x04\xfb" - "\x84\x7d\x65\x34\x25\xd8\x47\xfa" - "\xeb\x83\x31\xf1\x54\x54\x89\x0d" - "\x9d\x4d\x54\x51\x84\x61\xf6\x8e" - "\x03\x31\xf2\x25\x16\xcc\xaa\xc6" - "\x75\x73\x20\x30\x59\x54\xb2\xf0" - "\x3a\x4b\xe0\x23\x8e\xa6\x08\x35" - "\x8a\xdf\x27\xa0\xe4\x60\x99\xae" - "\x8e\x43\xd9\x39\x7b\x10\x40\x67" - "\x5c\x7e\xc9\x70\x63\x34\xca\x59" - "\xfe\x86\xbc\xb7\x9c\x39\xf3\x6d" - "\x6a\x41\x64\x6f\x16\x7f\x65\x7e" - "\x89\x84\x68\xeb\xb0\x51\xbe\x55" - "\x33\x16\x59\x6c\x3b\xef\x88\xad" - "\x2f\xab\xbc\x25\x76\x87\x41\x2f" - "\x36", - .rlen = 129, + .clen = 145, }, { .key = "\xba\xde\x82\x72\x42\xa9\x2f\x2c" "\x12\x0b\xe9\x5c\x87\xd7\x35\x7c", @@ -26127,7 +21215,17 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\xd9\xd2\xaf\x8e\xd5\xd3\xa8\xa9" "\x51", .alen = 129, - .input = "\x36\x78\xb9\x22\xde\x62\x35\x55" + .ptext = "\xe8\x39\x2d\xaa\x0e\x85\x2d\xc1" + "\x72\xaf\x6e\xc9\x82\x33\xc7\x01" + "\xaf\x40\x70\xb8\x2a\x78\xc9\x14" + "\xac\xb1\x10\xca\x2e\xb3\x28\xe4" + "\xac\xfa\x58\x7f\xe5\x73\x09\x8c" + "\x1d\x40\x87\x8c\xd9\x75\xc0\x55" + "\xa2\xda\x07\xd1\xc2\xa9\xd1\xbb" + "\x09\x4f\x77\x62\x88\x2d\xf2\x68" + "\x54", + .plen = 65, + .ctext = "\x36\x78\xb9\x22\xde\x62\x35\x55" "\x1a\x7a\xf5\x45\xbc\xd7\x15\x82" "\x01\xe9\x5a\x07\xea\x46\xaf\x91" "\xcb\x73\xa5\xee\xe1\xb4\xbf\xc2" @@ -26138,17 +21236,7 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\xbf\xbc\x88\x3f\x5d\xd1\xf9\x19" "\x0f\x9d\xb2\xaf\xb9\x6e\x17\xdf" "\xa2", - .ilen = 81, - .result = "\xe8\x39\x2d\xaa\x0e\x85\x2d\xc1" - "\x72\xaf\x6e\xc9\x82\x33\xc7\x01" - "\xaf\x40\x70\xb8\x2a\x78\xc9\x14" - "\xac\xb1\x10\xca\x2e\xb3\x28\xe4" - "\xac\xfa\x58\x7f\xe5\x73\x09\x8c" - "\x1d\x40\x87\x8c\xd9\x75\xc0\x55" - "\xa2\xda\x07\xd1\xc2\xa9\xd1\xbb" - "\x09\x4f\x77\x62\x88\x2d\xf2\x68" - "\x54", - .rlen = 65, + .clen = 81, }, { .key = "\xf7\x02\xbb\x11\x52\x24\xd8\x48" "\x93\xe6\x9b\xee\x81\xfc\xf7\x82", @@ -26160,18 +21248,18 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\x0e\x51\xf9\x1c\xee\x70\x6a\x27" "\x3d\xd3\xb7\xac\x51\xfa\xdf\x05", .alen = 32, - .input = "\x08\x1b\x95\x0e\x41\x95\x02\x4b" + .ptext = "\x24\x5e\x67\x49\x1e\x01\xd6\xdd" + "\xf3\x89\x20\x5b\x7c\x57\x89\x07" + "\xd9\x02\x7c\x3d\x2f\x18\x4b\x2d" + "\x6e\xde\xee\xa2\x08\x12\xc7\xba", + .plen = 32, + .ctext = "\x08\x1b\x95\x0e\x41\x95\x02\x4b" "\x9c\xbb\xa8\xd0\x7c\xd3\x44\x6e" "\x89\x14\x33\x70\x0a\xbc\xea\x39" "\x88\xaa\x2b\xd5\x73\x11\x55\xf5" "\x33\x33\x9c\xd7\x42\x34\x49\x8e" "\x2f\x03\x30\x05\x47\xaf\x34", - .ilen = 47, - .result = "\x24\x5e\x67\x49\x1e\x01\xd6\xdd" - "\xf3\x89\x20\x5b\x7c\x57\x89\x07" - "\xd9\x02\x7c\x3d\x2f\x18\x4b\x2d" - "\x6e\xde\xee\xa2\x08\x12\xc7\xba", - .rlen = 32, + .clen = 47, }, { .key = "\x33\x27\xf5\xb1\x62\xa0\x80\x63" "\x14\xc0\x4d\x7f\x7b\x20\xba\x89", @@ -26183,18 +21271,18 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\x39\x14\x05\xa0\xf3\x10\xec\x41" "\xff\x01\x95\x84\x2b\x59\x7f\xdb", .alen = 32, - .input = "\x97\xca\xf4\xe0\x8d\x89\xbf\x68" + .ptext = "\x61\x83\xa0\xe8\x2e\x7d\x7f\xf8" + "\x74\x63\xd2\xec\x76\x7c\x4c\x0d" + "\x03\xc4\x88\xc1\x35\xb8\xcd\x47" + "\x2f\x0c\xcd\x7a\xe2\x71\x66\x91", + .plen = 32, + .ctext = "\x97\xca\xf4\xe0\x8d\x89\xbf\x68" "\x0c\x60\xb9\x27\xdf\xaa\x41\xc6" "\x25\xd8\xf7\x1f\x10\x15\x48\x61" "\x4c\x95\x00\xdf\x51\x9b\x7f\xe6" "\x24\x40\x9e\xbe\x3b\xeb\x1b\x98" "\xb9\x9c\xe5\xef\xf2\x05", - .ilen = 46, - .result = "\x61\x83\xa0\xe8\x2e\x7d\x7f\xf8" - "\x74\x63\xd2\xec\x76\x7c\x4c\x0d" - "\x03\xc4\x88\xc1\x35\xb8\xcd\x47" - "\x2f\x0c\xcd\x7a\xe2\x71\x66\x91", - .rlen = 32, + .clen = 46, }, { .key = "\x70\x4c\x2f\x50\x72\x1c\x29\x7f" "\x95\x9a\xff\x10\x75\x45\x7d\x8f", @@ -26206,17 +21294,17 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\x63\xd6\x10\x24\xf8\xb0\x6e\x5a" "\xc0\x2e\x74\x5d\x06\xb8\x1e\xb2", .alen = 32, - .input = "\x63\x4c\x2a\x8e\xb4\x6b\x63\x0d" + .ptext = "\x9d\xa7\xda\x88\x3e\xf8\x28\x14" + "\xf5\x3e\x85\x7d\x70\xa0\x0f\x13" + "\x2e\x86\x93\x45\x3a\x58\x4f\x61" + "\xf0\x3a\xac\x53\xbc\xd0\x06\x68", + .plen = 32, + .ctext = "\x63\x4c\x2a\x8e\xb4\x6b\x63\x0d" "\xb5\xec\x9b\x4e\x12\x23\xa3\xcf" "\x1a\x5a\x70\x15\x5a\x10\x40\x51" "\xca\x47\x4c\x9d\xc9\x97\xf4\x77" "\xdb\xc8\x10\x2d\xdc\x65\x20\x3f", - .ilen = 40, - .result = "\x9d\xa7\xda\x88\x3e\xf8\x28\x14" - "\xf5\x3e\x85\x7d\x70\xa0\x0f\x13" - "\x2e\x86\x93\x45\x3a\x58\x4f\x61" - "\xf0\x3a\xac\x53\xbc\xd0\x06\x68", - .rlen = 32, + .clen = 40, }, { .key = "\xac\x70\x69\xef\x82\x97\xd2\x9b" "\x15\x74\xb1\xa2\x6f\x69\x3f\x95", @@ -26228,17 +21316,17 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\x8d\x98\x1c\xa8\xfe\x50\xf0\x74" "\x81\x5c\x53\x35\xe0\x17\xbd\x88", .alen = 32, - .input = "\xf1\x62\x44\xc7\x5f\x19\xca\x43" + .ptext = "\xda\xcc\x14\x27\x4e\x74\xd1\x30" + "\x76\x18\x37\x0f\x6a\xc4\xd1\x1a" + "\x58\x49\x9f\xc9\x3f\xf8\xd1\x7a" + "\xb2\x67\x8b\x2b\x96\x2f\xa5\x3e", + .plen = 32, + .ctext = "\xf1\x62\x44\xc7\x5f\x19\xca\x43" "\x47\x2c\xaf\x68\x82\xbd\x51\xef" "\x3d\x65\xd8\x45\x2d\x06\x07\x78" "\x08\x2e\xb3\x23\xcd\x81\x12\x55" "\x1a", - .ilen = 33, - .result = "\xda\xcc\x14\x27\x4e\x74\xd1\x30" - "\x76\x18\x37\x0f\x6a\xc4\xd1\x1a" - "\x58\x49\x9f\xc9\x3f\xf8\xd1\x7a" - "\xb2\x67\x8b\x2b\x96\x2f\xa5\x3e", - .rlen = 32, + .clen = 33, }, { .key = "\xe9\x95\xa2\x8f\x93\x13\x7b\xb7" "\x96\x4e\x63\x33\x69\x8d\x02\x9b" @@ -26249,11 +21337,11 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\xb7\x85\x90\x58\x67\x57\x33\x1d", .assoc = "", .alen = 0, - .input = "\xdf\x2f\x83\xc0\x45\x4a\x2c\xcf" + .ptext = "", + .plen = 0, + .ctext = "\xdf\x2f\x83\xc0\x45\x4a\x2c\xcf" "\xb9\xd2\x41\xf6\x80\xa1\x52\x70", - .ilen = 16, - .result = "", - .rlen = 0, + .clen = 16, }, { .key = "\x25\xba\xdc\x2e\xa3\x8f\x24\xd3" "\x17\x29\x15\xc5\x63\xb2\xc5\xa1" @@ -26264,12 +21352,12 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\x38\x5f\x42\xe9\x61\x7b\xf5\x23", .assoc = "", .alen = 0, - .input = "\x01\xd8\x55\x3c\xc0\x5a\x4b\xc7" + .ptext = "\x53", + .plen = 1, + .ctext = "\x01\xd8\x55\x3c\xc0\x5a\x4b\xc7" "\x01\xf4\x08\xe3\x0d\xf7\xf0\x78" "\x53", - .ilen = 17, - .result = "\x53", - .rlen = 1, + .clen = 17, }, { .key = "\x62\xdf\x16\xcd\xb3\x0a\xcc\xef" "\x98\x03\xc7\x56\x5d\xd6\x87\xa8" @@ -26280,18 +21368,18 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\xb8\x39\xf4\x7a\x5b\x9f\xb8\x29", .assoc = "", .alen = 0, - .input = "\xc2\x4b\x41\x0f\x2d\xb9\x62\x07" + .ptext = "\x8f\x3a\xc1\x05\x7f\xe7\xcb\x83" + "\xf9\xa6\x4d\xc3\x58\x31\x19\x2c" + "\xd7\x90\xc2\x56\x4e\xd8\x57\xc7" + "\xf6\xf0\x27\xb4\x25\x4c\x83", + .plen = 31, + .ctext = "\xc2\x4b\x41\x0f\x2d\xb9\x62\x07" "\xff\x8e\x74\xf8\xa1\xa6\xd5\x37" "\xa5\x64\x31\x5c\xca\x73\x9b\x43" "\xe6\x70\x63\x46\x95\xcb\xf7\xb5" "\x20\x8c\x75\x7a\x2a\x17\x2f\xa9" "\xb8\x4d\x11\x42\xd1\xf8\xf1", - .ilen = 47, - .result = "\x8f\x3a\xc1\x05\x7f\xe7\xcb\x83" - "\xf9\xa6\x4d\xc3\x58\x31\x19\x2c" - "\xd7\x90\xc2\x56\x4e\xd8\x57\xc7" - "\xf6\xf0\x27\xb4\x25\x4c\x83", - .rlen = 31, + .clen = 47, }, { .key = "\x9e\x03\x4f\x6d\xc3\x86\x75\x0a" "\x19\xdd\x79\xe8\x57\xfb\x4a\xae" @@ -26302,18 +21390,18 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\x39\x14\xa6\x0c\x55\xc4\x7b\x30", .assoc = "", .alen = 0, - .input = "\xbb\x01\x7c\xd1\x2c\x33\x7b\x37" + .ptext = "\xcc\x5f\xfb\xa4\x8f\x63\x74\x9f" + "\x7a\x81\xff\x55\x52\x56\xdc\x33" + "\x01\x52\xcd\xdb\x53\x78\xd9\xe1" + "\xb7\x1d\x06\x8d\xff\xab\x22\x98", + .plen = 32, + .ctext = "\xbb\x01\x7c\xd1\x2c\x33\x7b\x37" "\x0a\xee\xc4\x30\x19\xd7\x3a\x6f" "\xf8\x2b\x67\xf5\x3b\x84\x87\x2a" "\xfb\x07\x7a\x82\xb5\xe4\x85\x26" "\x1e\xa8\xe5\x04\x54\xce\xe5\x5f" "\xb5\x3f\xc1\xd5\x7f\xbd\xd2\xa6", - .ilen = 48, - .result = "\xcc\x5f\xfb\xa4\x8f\x63\x74\x9f" - "\x7a\x81\xff\x55\x52\x56\xdc\x33" - "\x01\x52\xcd\xdb\x53\x78\xd9\xe1" - "\xb7\x1d\x06\x8d\xff\xab\x22\x98", - .rlen = 32, + .clen = 48, }, { .key = "\xdb\x28\x89\x0c\xd3\x01\x1e\x26" "\x9a\xb7\x2b\x79\x51\x1f\x0d\xb4" @@ -26324,20 +21412,20 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\xba\xee\x58\x9d\x4f\xe8\x3d\x36", .assoc = "", .alen = 0, - .input = "\xc2\xf4\x40\x55\xf9\x59\xff\x73" + .ptext = "\x08\x84\x34\x44\x9f\xde\x1c\xbb" + "\xfb\x5b\xb1\xe6\x4c\x7a\x9f\x39" + "\x2c\x14\xd9\x5f\x59\x18\x5b\xfb" + "\x79\x4b\xe5\x65\xd9\x0a\xc1\x6f" + "\x2e", + .plen = 33, + .ctext = "\xc2\xf4\x40\x55\xf9\x59\xff\x73" "\x08\xf5\x98\x92\x0c\x7b\x35\x9a" "\xa8\xf4\x42\x7e\x6f\x93\xca\x22" "\x23\x06\x1e\xf8\x89\x22\xf4\x46" "\x7c\x7c\x67\x75\xab\xe5\x75\xaa" "\x15\xd7\x83\x19\xfd\x31\x59\x5b" "\x32", - .ilen = 49, - .result = "\x08\x84\x34\x44\x9f\xde\x1c\xbb" - "\xfb\x5b\xb1\xe6\x4c\x7a\x9f\x39" - "\x2c\x14\xd9\x5f\x59\x18\x5b\xfb" - "\x79\x4b\xe5\x65\xd9\x0a\xc1\x6f" - "\x2e", - .rlen = 33, + .clen = 49, }, { .key = "\x17\x4d\xc3\xab\xe3\x7d\xc7\x42" "\x1b\x91\xdd\x0a\x4b\x43\xcf\xba" @@ -26348,7 +21436,16 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\x3b\xc8\x0a\x2f\x49\x0c\x00\x3c", .assoc = "", .alen = 0, - .input = "\xc9\x82\x3b\x4b\x87\x84\xa5\xdb" + .ptext = "\x45\xa8\x6e\xe3\xaf\x5a\xc5\xd7" + "\x7c\x35\x63\x77\x46\x9f\x61\x3f" + "\x56\xd7\xe4\xe3\x5e\xb8\xdc\x14" + "\x3a\x79\xc4\x3e\xb3\x69\x61\x46" + "\x3c\xb6\x83\x4e\xb4\x26\xc7\x73" + "\x22\xda\x52\x8b\x7d\x11\x98\xea" + "\x62\xe1\x14\x1e\xdc\xfe\x0f\xad" + "\x20\x76\x5a\xdc\x4e\x71\x13", + .plen = 63, + .ctext = "\xc9\x82\x3b\x4b\x87\x84\xa5\xdb" "\xa0\x8c\xd3\x3e\x7f\x8d\xe8\x28" "\x2a\xdc\xfa\x01\x84\x87\x9a\x70" "\x81\x75\x37\x0a\xd2\x75\xa9\xb6" @@ -26358,16 +21455,7 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\x83\x9b\x05\x11\x72\x60\xf0\xb4" "\x7e\x06\xab\x0a\xc0\xbb\x59\x23" "\xaa\x2d\xfc\x4e\x35\x05\x59", - .ilen = 79, - .result = "\x45\xa8\x6e\xe3\xaf\x5a\xc5\xd7" - "\x7c\x35\x63\x77\x46\x9f\x61\x3f" - "\x56\xd7\xe4\xe3\x5e\xb8\xdc\x14" - "\x3a\x79\xc4\x3e\xb3\x69\x61\x46" - "\x3c\xb6\x83\x4e\xb4\x26\xc7\x73" - "\x22\xda\x52\x8b\x7d\x11\x98\xea" - "\x62\xe1\x14\x1e\xdc\xfe\x0f\xad" - "\x20\x76\x5a\xdc\x4e\x71\x13", - .rlen = 63, + .clen = 79, }, { .key = "\x54\x71\xfd\x4b\xf3\xf9\x6f\x5e" "\x9c\x6c\x8f\x9c\x45\x68\x92\xc1" @@ -26378,7 +21466,16 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\xbc\xa2\xbc\xc0\x43\x31\xc2\x42", .assoc = "", .alen = 0, - .input = "\x11\x7c\x7d\xef\xce\x29\x95\xec" + .ptext = "\x81\xcd\xa8\x82\xbf\xd6\x6e\xf3" + "\xfd\x0f\x15\x09\x40\xc3\x24\x45" + "\x81\x99\xf0\x67\x63\x58\x5e\x2e" + "\xfb\xa6\xa3\x16\x8d\xc8\x00\x1c" + "\x4b\x62\x87\x7c\x15\x38\xda\x70" + "\x3d\xea\xe7\xf2\x40\xba\xae\x79" + "\x8f\x48\xfc\xbf\x45\x53\x2e\x78" + "\xef\x79\xf0\x1b\x49\xf7\xfd\x9c", + .plen = 64, + .ctext = "\x11\x7c\x7d\xef\xce\x29\x95\xec" "\x7e\x9f\x42\xa6\x26\x07\xa1\x75" "\x2f\x4e\x09\x9a\xf6\x6b\xc2\xfa" "\x0d\xd0\x17\xdc\x25\x1e\x9b\xdc" @@ -26388,16 +21485,7 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\x43\x11\x26\x58\xcf\xc5\x41\xcf" "\x13\xcc\xde\x32\x92\xfa\x86\xf2" "\xaf\x16\xe8\x8f\xca\xb6\xfd\x54", - .ilen = 80, - .result = "\x81\xcd\xa8\x82\xbf\xd6\x6e\xf3" - "\xfd\x0f\x15\x09\x40\xc3\x24\x45" - "\x81\x99\xf0\x67\x63\x58\x5e\x2e" - "\xfb\xa6\xa3\x16\x8d\xc8\x00\x1c" - "\x4b\x62\x87\x7c\x15\x38\xda\x70" - "\x3d\xea\xe7\xf2\x40\xba\xae\x79" - "\x8f\x48\xfc\xbf\x45\x53\x2e\x78" - "\xef\x79\xf0\x1b\x49\xf7\xfd\x9c", - .rlen = 64, + .clen = 80, }, { .key = "\x90\x96\x36\xea\x03\x74\x18\x7a" "\x1d\x46\x42\x2d\x3f\x8c\x54\xc7" @@ -26408,11 +21496,11 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\x3d\x7c\x6e\x52\x3d\x55\x85\x48", .assoc = "\xaf", .alen = 1, - .input = "\x9b\xc5\x3b\x20\x0a\x88\x56\xbe" + .ptext = "", + .plen = 0, + .ctext = "\x9b\xc5\x3b\x20\x0a\x88\x56\xbe" "\x69\xdf\xc4\xc4\x02\x46\x3a\xf0", - .ilen = 16, - .result = "", - .rlen = 0, + .clen = 16, }, { .key = "\xcd\xbb\x70\x89\x13\xf0\xc1\x95" "\x9e\x20\xf4\xbf\x39\xb1\x17\xcd" @@ -26426,11 +21514,11 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\x0b\x6d\x84\x4f\x2c\xf0\x82\x5b" "\x4e\xf6\x29\xd1\x8b\x6f\x56", .alen = 31, - .input = "\xe0\x6d\xa1\x07\x98\x2f\x40\x2d" + .ptext = "", + .plen = 0, + .ctext = "\xe0\x6d\xa1\x07\x98\x2f\x40\x2d" "\x2e\x9a\xd6\x61\x43\xc0\x74\x69", - .ilen = 16, - .result = "", - .rlen = 0, + .clen = 16, }, { .key = "\x0a\xe0\xaa\x29\x24\x6c\x6a\xb1" "\x1f\xfa\xa6\x50\x33\xd5\xda\xd3" @@ -26444,11 +21532,11 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\x35\x2f\x90\xd3\x31\x90\x04\x74" "\x0f\x23\x08\xa9\x65\xce\xf6\xea", .alen = 32, - .input = "\xb9\x57\x13\x3e\x82\x31\x61\x65" + .ptext = "", + .plen = 0, + .ctext = "\xb9\x57\x13\x3e\x82\x31\x61\x65" "\x0d\x7f\x6c\x96\x93\x5c\x50\xe2", - .ilen = 16, - .result = "", - .rlen = 0, + .clen = 16, }, { .key = "\x46\x04\xe3\xc8\x34\xe7\x12\xcd" "\xa0\xd4\x58\xe2\x2d\xf9\x9c\xda" @@ -26463,11 +21551,11 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\xd1\x51\xe6\x81\x3f\x2d\x95\xc1" "\x01", .alen = 33, - .input = "\x81\x96\x34\xde\xbb\x36\xdd\x3e" + .ptext = "", + .plen = 0, + .ctext = "\x81\x96\x34\xde\xbb\x36\xdd\x3e" "\x4e\x5e\xcb\x44\x21\xb8\x3f\xf1", - .ilen = 16, - .result = "", - .rlen = 0, + .clen = 16, }, { .key = "\x83\x29\x1d\x67\x44\x63\xbb\xe9" "\x20\xaf\x0a\x73\x27\x1e\x5f\xe0" @@ -26485,11 +21573,11 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\x64\xb2\x89\x7d\x78\xa8\x05\x7e" "\x07\x8c\xfc\x88\x2d\xb8\x53", .alen = 63, - .input = "\x2e\x99\xb6\x79\x57\x56\x80\x36" + .ptext = "", + .plen = 0, + .ctext = "\x2e\x99\xb6\x79\x57\x56\x80\x36" "\x8e\xc4\x1c\x12\x7d\x71\x36\x0c", - .ilen = 16, - .result = "", - .rlen = 0, + .clen = 16, }, { .key = "\xbf\x4e\x57\x07\x54\xdf\x64\x05" "\xa1\x89\xbc\x04\x21\x42\x22\xe6" @@ -26507,11 +21595,11 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\x91\x19\x70\x1e\xe1\xfe\x25\x49" "\xd6\x8f\x93\xc7\x28\x3f\x3d\x03", .alen = 64, - .input = "\x7b\x25\x3d\x47\xd4\xa7\x08\xce" + .ptext = "", + .plen = 0, + .ctext = "\x7b\x25\x3d\x47\xd4\xa7\x08\xce" "\x3b\x89\x40\x36\xba\x6d\x0e\xa2", - .ilen = 16, - .result = "", - .rlen = 0, + .clen = 16, }, { .key = "\xfc\x72\x90\xa6\x64\x5a\x0d\x21" "\x22\x63\x6e\x96\x1b\x67\xe4\xec" @@ -26522,12 +21610,12 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\x42\x9a\x9a\xba\x19\x30\x15\x6e", .assoc = "\x1a", .alen = 1, - .input = "\xe6\x09\x6f\x95\x9a\x18\xc8\xf6" + .ptext = "\x29", + .plen = 1, + .ctext = "\xe6\x09\x6f\x95\x9a\x18\xc8\xf6" "\x17\x75\x81\x16\xdf\x26\xff\x67" "\x92", - .ilen = 17, - .result = "\x29", - .rlen = 1, + .clen = 17, }, { .key = "\x38\x97\xca\x45\x74\xd6\xb6\x3c" "\xa3\x3d\x20\x27\x15\x8b\xa7\xf2" @@ -26541,18 +21629,18 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\x09\xfb\xca\x69\x4b\xb0\x8e\xf5" "\xd6\x07\x62\xe3\xa8\xa9\x12", .alen = 31, - .input = "\x82\xc0\x56\xf0\xd7\xc4\xc9\xfd" + .ptext = "\x66\xf3\x75\x7d\x40\xb3\xb4\xd1" + "\x04\xe1\xa6\x94\x10\xe6\x39\x77" + "\xd3\xac\x4d\x8a\x8c\x58\x6e\xfb" + "\x06\x13\x9a\xd9\x5e\xc0\xfa", + .plen = 31, + .ctext = "\x82\xc0\x56\xf0\xd7\xc4\xc9\xfd" "\x3c\xd1\x2a\xd4\x15\x86\x9d\xda" "\xea\x6c\x6f\xa1\x33\xb0\x7a\x01" "\x57\xe7\xf3\x7b\x73\xe7\x54\x10" "\xc6\x91\xe2\xc6\xa0\x69\xe7\xe6" "\x76\xc3\xf5\x3a\x76\xfd\x4a", - .ilen = 47, - .result = "\x66\xf3\x75\x7d\x40\xb3\xb4\xd1" - "\x04\xe1\xa6\x94\x10\xe6\x39\x77" - "\xd3\xac\x4d\x8a\x8c\x58\x6e\xfb" - "\x06\x13\x9a\xd9\x5e\xc0\xfa", - .rlen = 31, + .clen = 47, }, { .key = "\x75\xbc\x04\xe5\x84\x52\x5e\x58" "\x24\x17\xd2\xb9\x0e\xaf\x6a\xf9" @@ -26566,18 +21654,18 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\x33\xbd\xd6\xed\x50\x50\x10\x0e" "\x97\x35\x41\xbb\x82\x08\xb1\xf2", .alen = 32, - .input = "\x01\x47\x8e\x6c\xf6\x64\x89\x3a" + .ptext = "\xa2\x17\xaf\x1c\x50\x2e\x5d\xed" + "\x85\xbb\x58\x26\x0a\x0b\xfc\x7d" + "\xfe\x6e\x59\x0e\x91\xf8\xf0\x15" + "\xc8\x40\x78\xb1\x38\x1f\x99\xa7", + .plen = 32, + .ctext = "\x01\x47\x8e\x6c\xf6\x64\x89\x3a" "\x71\xce\xe4\xaa\x45\x70\xe6\x84" "\x62\x48\x08\x64\x86\x6a\xdf\xec" "\xb4\xa0\xfb\x34\x03\x0c\x19\xf4" "\x2b\x7b\x36\x73\xec\x54\xa9\x1e" "\x30\x85\xdb\xe4\xac\xe9\x2c\xca", - .ilen = 48, - .result = "\xa2\x17\xaf\x1c\x50\x2e\x5d\xed" - "\x85\xbb\x58\x26\x0a\x0b\xfc\x7d" - "\xfe\x6e\x59\x0e\x91\xf8\xf0\x15" - "\xc8\x40\x78\xb1\x38\x1f\x99\xa7", - .rlen = 32, + .clen = 48, }, { .key = "\xb1\xe1\x3e\x84\x94\xcd\x07\x74" "\xa5\xf2\x84\x4a\x08\xd4\x2c\xff" @@ -26592,20 +21680,20 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\x59\x62\x20\x94\x5c\x67\x50\xc8" "\x58", .alen = 33, - .input = "\x85\xe0\xf8\x0f\x8e\x49\xe3\x60" + .ptext = "\xdf\x3c\xe9\xbc\x61\xaa\x06\x09" + "\x06\x95\x0a\xb7\x04\x2f\xbe\x84" + "\x28\x30\x64\x92\x96\x98\x72\x2e" + "\x89\x6e\x57\x8a\x13\x7e\x38\x7e" + "\xdb", + .plen = 33, + .ctext = "\x85\xe0\xf8\x0f\x8e\x49\xe3\x60" "\xcb\x4a\x54\x94\xcf\xf5\x7e\x34" "\xe9\xf8\x80\x65\x53\xd0\x72\x70" "\x4f\x7d\x9d\xd1\x15\x6f\xb9\x2c" "\xfa\xe8\xdd\xac\x2e\xe1\x3f\x67" "\x63\x0f\x1a\x59\xb7\x89\xdb\xf4" "\xc3", - .ilen = 49, - .result = "\xdf\x3c\xe9\xbc\x61\xaa\x06\x09" - "\x06\x95\x0a\xb7\x04\x2f\xbe\x84" - "\x28\x30\x64\x92\x96\x98\x72\x2e" - "\x89\x6e\x57\x8a\x13\x7e\x38\x7e" - "\xdb", - .rlen = 33, + .clen = 49, }, { .key = "\xee\x05\x77\x23\xa5\x49\xb0\x90" "\x26\xcc\x36\xdc\x02\xf8\xef\x05" @@ -26623,7 +21711,16 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\x71\x1c\xf7\x44\xee\xa8\xc3\x42" "\xe2\xa3\x84\x04\x0b\xe1\xce", .alen = 63, - .input = "\x00\xe5\x5b\x87\x5c\x20\x22\x8a" + .ptext = "\x1b\x61\x23\x5b\x71\x26\xae\x25" + "\x87\x6f\xbc\x49\xfe\x53\x81\x8a" + "\x53\xf2\x70\x17\x9b\x38\xf4\x48" + "\x4b\x9b\x36\x62\xed\xdd\xd8\x54" + "\xea\xcb\xb6\x79\x45\xfc\xaa\x54" + "\x5c\x94\x47\x58\xa7\xff\x9c\x9e" + "\x7c\xb6\xf1\xac\xc8\xfd\x8b\x35" + "\xd5\xa4\x6a\xd4\x09\xc2\x08", + .plen = 63, + .ctext = "\x00\xe5\x5b\x87\x5c\x20\x22\x8a" "\xda\x1f\xd3\xff\xbb\xb2\xb0\xf8" "\xef\xe9\xeb\x9e\x7c\x80\xf4\x2b" "\x59\xc0\x79\xbc\x17\xa0\x15\x01" @@ -26633,16 +21730,7 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\xed\x0b\x23\xea\x9b\xda\x57\x2f" "\xf6\xa9\xae\x0d\x4e\x40\x96\x45" "\x7f\xfa\xf0\xbf\xc4\x98\x78", - .ilen = 79, - .result = "\x1b\x61\x23\x5b\x71\x26\xae\x25" - "\x87\x6f\xbc\x49\xfe\x53\x81\x8a" - "\x53\xf2\x70\x17\x9b\x38\xf4\x48" - "\x4b\x9b\x36\x62\xed\xdd\xd8\x54" - "\xea\xcb\xb6\x79\x45\xfc\xaa\x54" - "\x5c\x94\x47\x58\xa7\xff\x9c\x9e" - "\x7c\xb6\xf1\xac\xc8\xfd\x8b\x35" - "\xd5\xa4\x6a\xd4\x09\xc2\x08", - .rlen = 63, + .clen = 79, }, { .key = "\x2a\x2a\xb1\xc3\xb5\xc5\x59\xac" "\xa7\xa6\xe8\x6d\xfc\x1d\xb2\x0b" @@ -26660,7 +21748,16 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\x9d\x83\xde\xe5\x57\xfd\xe3\x0e" "\xb1\xa7\x1b\x44\x05\x67\xb7\x37", .alen = 64, - .input = "\x28\xdd\xb9\x4a\x12\xc7\x0a\xe1" + .ptext = "\x58\x85\x5c\xfa\x81\xa1\x57\x40" + "\x08\x4a\x6e\xda\xf8\x78\x44\x90" + "\x7d\xb5\x7b\x9b\xa1\xd8\x76\x62" + "\x0c\xc9\x15\x3b\xc7\x3c\x77\x2b" + "\xf8\x78\xba\xa7\xa6\x0e\xbd\x52" + "\x76\xa3\xdc\xbe\x6b\xa8\xb1\x2d" + "\xa9\x1d\xd8\x4e\x31\x53\xab\x00" + "\xa5\xa7\x01\x13\x04\x49\xf2\x04", + .plen = 64, + .ctext = "\x28\xdd\xb9\x4a\x12\xc7\x0a\xe1" "\x58\x06\x1a\x9b\x8c\x67\xdf\xeb" "\x35\x35\x60\x9d\x06\x40\x65\xc1" "\x93\xe8\xb3\x82\x50\x29\xdd\xb5" @@ -26670,16 +21767,7 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\xf4\x77\xe4\xbf\x41\x3b\xc4\x06" "\xce\x9e\x34\x81\xf0\x89\x11\x13" "\x02\x65\xa1\x7c\xdf\x07\x33\x06", - .ilen = 80, - .result = "\x58\x85\x5c\xfa\x81\xa1\x57\x40" - "\x08\x4a\x6e\xda\xf8\x78\x44\x90" - "\x7d\xb5\x7b\x9b\xa1\xd8\x76\x62" - "\x0c\xc9\x15\x3b\xc7\x3c\x77\x2b" - "\xf8\x78\xba\xa7\xa6\x0e\xbd\x52" - "\x76\xa3\xdc\xbe\x6b\xa8\xb1\x2d" - "\xa9\x1d\xd8\x4e\x31\x53\xab\x00" - "\xa5\xa7\x01\x13\x04\x49\xf2\x04", - .rlen = 64, + .clen = 80, }, { .key = "\x67\x4f\xeb\x62\xc5\x40\x01\xc7" "\x28\x80\x9a\xfe\xf6\x41\x74\x12" @@ -26698,7 +21786,25 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\x80\xaa\xb2\x83\xff\xee\xa1\x6a" "\x04", .alen = 65, - .input = "\x85\x39\x69\x35\xfb\xf9\xb0\xa6" + .ptext = "\x94\xaa\x96\x9a\x91\x1d\x00\x5c" + "\x88\x24\x20\x6b\xf2\x9c\x06\x96" + "\xa7\x77\x87\x1f\xa6\x78\xf8\x7b" + "\xcd\xf6\xf4\x13\xa1\x9b\x16\x02" + "\x07\x24\xbf\xd5\x08\x20\xd0\x4f" + "\x90\xb3\x70\x24\x2f\x51\xc7\xbb" + "\xd6\x84\xc0\xef\x9a\xa8\xca\xcc" + "\x74\xab\x97\x53\xfe\xd0\xdb\x37" + "\x37\x6a\x0e\x9f\x3f\xa3\x2a\xe3" + "\x1b\x34\x6d\x51\x72\x2b\x17\xe7" + "\x4d\xaa\x2c\x18\xda\xa3\x33\x89" + "\x2a\x9f\xf4\xd2\xed\x76\x3d\x3f" + "\x3c\x15\x9d\x8e\x4f\x3c\x27\xb0" + "\x42\x3f\x2f\x8a\xd4\xc2\x10\xb2" + "\x27\x7f\xe3\x34\x80\x02\x49\x4b" + "\x07\x68\x22\x2a\x88\x25\x53\xb2" + "\x2f", + .plen = 129, + .ctext = "\x85\x39\x69\x35\xfb\xf9\xb0\xa6" "\x85\x43\x88\xd0\xd7\x78\x60\x19" "\x3e\x1f\xb1\xa4\xd6\xc5\x96\xec" "\xf7\x84\x85\xc7\x27\x0f\x74\x57" @@ -26717,25 +21823,7 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\x4c\x50\x10\x62\xba\x7a\xb1\x68" "\x37\xd7\x87\x4e\xe4\x66\x09\x1f" "\xa5", - .ilen = 145, - .result = "\x94\xaa\x96\x9a\x91\x1d\x00\x5c" - "\x88\x24\x20\x6b\xf2\x9c\x06\x96" - "\xa7\x77\x87\x1f\xa6\x78\xf8\x7b" - "\xcd\xf6\xf4\x13\xa1\x9b\x16\x02" - "\x07\x24\xbf\xd5\x08\x20\xd0\x4f" - "\x90\xb3\x70\x24\x2f\x51\xc7\xbb" - "\xd6\x84\xc0\xef\x9a\xa8\xca\xcc" - "\x74\xab\x97\x53\xfe\xd0\xdb\x37" - "\x37\x6a\x0e\x9f\x3f\xa3\x2a\xe3" - "\x1b\x34\x6d\x51\x72\x2b\x17\xe7" - "\x4d\xaa\x2c\x18\xda\xa3\x33\x89" - "\x2a\x9f\xf4\xd2\xed\x76\x3d\x3f" - "\x3c\x15\x9d\x8e\x4f\x3c\x27\xb0" - "\x42\x3f\x2f\x8a\xd4\xc2\x10\xb2" - "\x27\x7f\xe3\x34\x80\x02\x49\x4b" - "\x07\x68\x22\x2a\x88\x25\x53\xb2" - "\x2f", - .rlen = 129, + .clen = 145, }, { .key = "\xa3\x73\x24\x01\xd5\xbc\xaa\xe3" "\xa9\x5a\x4c\x90\xf0\x65\x37\x18" @@ -26762,7 +21850,17 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\xb1\x8f\x15\x93\xe7\x71\xb9\x2c" "\x4b", .alen = 129, - .input = "\x7d\xde\x53\x22\xe4\x23\x3b\x30" + .ptext = "\xd1\xcf\xd0\x39\xa1\x99\xa9\x78" + "\x09\xfe\xd2\xfd\xec\xc1\xc9\x9d" + "\xd2\x39\x93\xa3\xab\x18\x7a\x95" + "\x8f\x24\xd3\xeb\x7b\xfa\xb5\xd8" + "\x15\xd1\xc3\x04\x69\x32\xe3\x4d" + "\xaa\xc2\x04\x8b\xf2\xfa\xdc\x4a" + "\x02\xeb\xa8\x90\x03\xfd\xea\x97" + "\x43\xaf\x2e\x92\xf8\x57\xc5\x6a" + "\x00", + .plen = 65, + .ctext = "\x7d\xde\x53\x22\xe4\x23\x3b\x30" "\x78\xde\x35\x90\x7a\xd9\x0b\x93" "\xf6\x0e\x0b\xed\x40\xee\x10\x9c" "\x96\x3a\xd3\x34\xb2\xd0\x67\xcf" @@ -26773,17 +21871,7 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\xf2\x3b\xd8\x28\x40\xe2\x76\xf6" "\x20\x13\x83\x46\xaf\xff\xe3\x0f" "\x72", - .ilen = 81, - .result = "\xd1\xcf\xd0\x39\xa1\x99\xa9\x78" - "\x09\xfe\xd2\xfd\xec\xc1\xc9\x9d" - "\xd2\x39\x93\xa3\xab\x18\x7a\x95" - "\x8f\x24\xd3\xeb\x7b\xfa\xb5\xd8" - "\x15\xd1\xc3\x04\x69\x32\xe3\x4d" - "\xaa\xc2\x04\x8b\xf2\xfa\xdc\x4a" - "\x02\xeb\xa8\x90\x03\xfd\xea\x97" - "\x43\xaf\x2e\x92\xf8\x57\xc5\x6a" - "\x00", - .rlen = 65, + .clen = 81, }, { .key = "\xe0\x98\x5e\xa1\xe5\x38\x53\xff" "\x2a\x35\xfe\x21\xea\x8a\xfa\x1e" @@ -26797,18 +21885,18 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\x31\x4b\x1b\x07\x6f\x10\x1c\xa8" "\x20\x46\x7a\xce\x9f\x42\x6d\xf9", .alen = 32, - .input = "\x5a\xcd\x8c\x57\xf2\x6a\xb6\xbe" + .ptext = "\x0d\xf4\x09\xd8\xb1\x14\x51\x94" + "\x8a\xd8\x84\x8e\xe6\xe5\x8c\xa3" + "\xfc\xfc\x9e\x28\xb0\xb8\xfc\xaf" + "\x50\x52\xb1\xc4\x55\x59\x55\xaf", + .plen = 32, + .ctext = "\x5a\xcd\x8c\x57\xf2\x6a\xb6\xbe" "\x53\xc7\xaa\x9a\x60\x74\x9c\xc4" "\xa2\xc2\xd0\x6d\xe1\x03\x63\xdc" "\xbb\x51\x7e\x9c\x89\x73\xde\x4e" "\x24\xf8\x52\x7c\x15\x41\x0e\xba" "\x69\x0e\x36\x5f\x2f\x22\x8c", - .ilen = 47, - .result = "\x0d\xf4\x09\xd8\xb1\x14\x51\x94" - "\x8a\xd8\x84\x8e\xe6\xe5\x8c\xa3" - "\xfc\xfc\x9e\x28\xb0\xb8\xfc\xaf" - "\x50\x52\xb1\xc4\x55\x59\x55\xaf", - .rlen = 32, + .clen = 47, }, { .key = "\x1c\xbd\x98\x40\xf5\xb3\xfc\x1b" "\xaa\x0f\xb0\xb3\xe4\xae\xbc\x24" @@ -26822,18 +21910,18 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\x5c\x0d\x27\x8b\x74\xb0\x9e\xc2" "\xe1\x74\x59\xa6\x79\xa1\x0c\xd0", .alen = 32, - .input = "\x47\xd6\xce\x78\xd6\xbf\x4a\x51" + .ptext = "\x4a\x18\x43\x77\xc1\x90\xfa\xb0" + "\x0b\xb2\x36\x20\xe0\x09\x4e\xa9" + "\x26\xbe\xaa\xac\xb5\x58\x7e\xc8" + "\x11\x7f\x90\x9c\x2f\xb8\xf4\x85", + .plen = 32, + .ctext = "\x47\xd6\xce\x78\xd6\xbf\x4a\x51" "\xb8\xda\x92\x3c\xfd\xda\xac\x8e" "\x8d\x88\xd7\x4d\x90\xe5\xeb\xa1" "\xab\xd6\x7c\x76\xad\xea\x7d\x76" "\x53\xee\xb0\xcd\xd0\x02\xbb\x70" "\x5b\x6f\x7b\xe2\x8c\xe8", - .ilen = 46, - .result = "\x4a\x18\x43\x77\xc1\x90\xfa\xb0" - "\x0b\xb2\x36\x20\xe0\x09\x4e\xa9" - "\x26\xbe\xaa\xac\xb5\x58\x7e\xc8" - "\x11\x7f\x90\x9c\x2f\xb8\xf4\x85", - .rlen = 32, + .clen = 46, }, { .key = "\x59\xe1\xd2\xdf\x05\x2f\xa4\x37" "\x2b\xe9\x63\x44\xde\xd3\x7f\x2b" @@ -26847,17 +21935,17 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\x86\xd0\x32\x0f\x79\x50\x20\xdb" "\xa2\xa1\x37\x7e\x53\x00\xab\xa6", .alen = 32, - .input = "\x9f\xa9\x2b\xa4\x8f\x00\x05\x2b" + .ptext = "\x86\x3d\x7d\x17\xd1\x0c\xa3\xcc" + "\x8c\x8d\xe8\xb1\xda\x2e\x11\xaf" + "\x51\x80\xb5\x30\xba\xf8\x00\xe2" + "\xd3\xad\x6f\x75\x09\x18\x93\x5c", + .plen = 32, + .ctext = "\x9f\xa9\x2b\xa4\x8f\x00\x05\x2b" "\xe7\x68\x81\x51\xbb\xfb\xdf\x60" "\xbb\xac\xe8\xc1\xdc\x68\xae\x68" "\x3a\xcd\x7a\x06\x49\xfe\x80\x11" "\xe6\x61\x99\xe2\xdd\xbe\x2c\xbf", - .ilen = 40, - .result = "\x86\x3d\x7d\x17\xd1\x0c\xa3\xcc" - "\x8c\x8d\xe8\xb1\xda\x2e\x11\xaf" - "\x51\x80\xb5\x30\xba\xf8\x00\xe2" - "\xd3\xad\x6f\x75\x09\x18\x93\x5c", - .rlen = 32, + .clen = 40, }, { .key = "\x96\x06\x0b\x7f\x15\xab\x4d\x53" "\xac\xc3\x15\xd6\xd8\xf7\x42\x31" @@ -26871,17 +21959,17 @@ static const struct aead_testvec morus1280_dec_tv_template[] = { "\xb1\x92\x3e\x93\x7e\xf0\xa2\xf5" "\x64\xcf\x16\x57\x2d\x5f\x4a\x7d", .alen = 32, - .input = "\xe2\x34\xfa\x25\xfd\xfb\x89\x5e" + .ptext = "\xc3\x62\xb7\xb6\xe2\x87\x4c\xe7" + "\x0d\x67\x9a\x43\xd4\x52\xd4\xb5" + "\x7b\x43\xc1\xb5\xbf\x98\x82\xfc" + "\x94\xda\x4e\x4d\xe4\x77\x32\x32", + .plen = 32, + .ctext = "\xe2\x34\xfa\x25\xfd\xfb\x89\x5e" "\x5b\x4e\x0b\x15\x6e\x39\xfb\x0c" "\x73\xc7\xd9\x6b\xbe\xce\x9b\x70" "\xc7\x4f\x96\x16\x03\xfc\xea\xfb" "\x56", - .ilen = 33, - .result = "\xc3\x62\xb7\xb6\xe2\x87\x4c\xe7" - "\x0d\x67\x9a\x43\xd4\x52\xd4\xb5" - "\x7b\x43\xc1\xb5\xbf\x98\x82\xfc" - "\x94\xda\x4e\x4d\xe4\x77\x32\x32", - .rlen = 32, + .clen = 33, }, }; @@ -26903,7 +21991,7 @@ static const struct cipher_testvec aes_kw_tv_template[] = { .ctext = "\xf6\x85\x94\x81\x6f\x64\xca\xa3" "\xf5\x6f\xab\xea\x25\x48\xf5\xfb", .len = 16, - .iv = "\x03\x1f\x6b\xd7\xe6\x1e\x64\x3d", + .iv_out = "\x03\x1f\x6b\xd7\xe6\x1e\x64\x3d", .generates_iv = true, }, { .key = "\x80\xaa\x99\x73\x27\xa4\x80\x6b" @@ -26916,7 +22004,7 @@ static const struct cipher_testvec aes_kw_tv_template[] = { .ctext = "\xd3\x3d\x3d\x97\x7b\xf0\xa9\x15" "\x59\xf9\x9c\x8a\xcd\x29\x3d\x43", .len = 16, - .iv = "\x42\x3c\x96\x0d\x8a\x2a\xc4\xc1", + .iv_out = "\x42\x3c\x96\x0d\x8a\x2a\xc4\xc1", .generates_iv = true, }, }; @@ -27995,9 +23083,6 @@ static const struct cipher_testvec cast5_tv_template[] = { "\x4F\xFE\x24\x9C\x9A\x02\xE5\x57" "\xF5\xBC\x25\xD6\x02\x56\x57\x1C", .len = 496, - .also_non_np = 1, - .np = 3, - .tap = { 496 - 20, 4, 16 }, }, }; @@ -28007,6 +23092,7 @@ static const struct cipher_testvec cast5_cbc_tv_template[] = { "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A", .klen = 16, .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F", + .iv_out = "\x1D\x18\x66\x44\x5B\x8F\x14\xEB", .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" @@ -28132,9 +23218,6 @@ static const struct cipher_testvec cast5_cbc_tv_template[] = { "\x15\x5F\xDB\xE9\xB1\x83\xD2\xE6" "\x1D\x18\x66\x44\x5B\x8F\x14\xEB", .len = 496, - .also_non_np = 1, - .np = 3, - .tap = { 496 - 20, 4, 16 }, }, }; @@ -28144,6 +23227,7 @@ static const struct cipher_testvec cast5_ctr_tv_template[] = { "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A", .klen = 16, .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F", + .iv_out = "\xE2\x24\x89\xEE\x53\xB8\x1D\x62", .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" "\x3A", @@ -28156,6 +23240,7 @@ static const struct cipher_testvec cast5_ctr_tv_template[] = { "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A", .klen = 16, .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F", + .iv_out = "\xE2\x24\x89\xEE\x53\xB8\x1D\x9D", .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" @@ -28281,9 +23366,6 @@ static const struct cipher_testvec cast5_ctr_tv_template[] = { "\x8C\x98\xDB\xDE\xFC\x72\x94\xAA" "\xC0\x0D\x96\xAA\x23\xF8\xFE\x13", .len = 496, - .also_non_np = 1, - .np = 3, - .tap = { 496 - 20, 4, 16 }, }, }; @@ -28544,6 +23626,8 @@ static const struct cipher_testvec anubis_cbc_tv_template[] = { .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe" "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe", .klen = 16, + .iv_out = "\x86\xd8\xb5\x6f\x98\x5e\x8a\x66" + "\x4f\x1f\x78\xa1\xbb\x37\xf1\xbe", .ptext = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe" "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe" "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe" @@ -28560,6 +23644,8 @@ static const struct cipher_testvec anubis_cbc_tv_template[] = { "\x35\x35\x35\x35\x35\x35\x35\x35" "\x35\x35\x35\x35\x35\x35\x35\x35", .klen = 40, + .iv_out = "\xa2\xbc\x06\x98\xc6\x4b\xda\x75" + "\x2e\xaa\xbe\x58\xce\x01\x5b\xc7", .ptext = "\x35\x35\x35\x35\x35\x35\x35\x35" "\x35\x35\x35\x35\x35\x35\x35\x35" "\x35\x35\x35\x35\x35\x35\x35\x35" @@ -28656,20 +23742,6 @@ static const struct cipher_testvec fcrypt_pcbc_tv_template[] = { "\x19\x89\x09\x1c\x2a\x8e\x8c\x94" "\xfc\xc7\x68\xe4\x88\xaa\xde\x0f", .len = 48, - }, { /* split-page version */ - .key = "\xfe\xdc\xba\x98\x76\x54\x32\x10", - .klen = 8, - .iv = "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87", - .ptext = "The quick brown fox jumps over the lazy dogs.\0\0", - .ctext = "\xca\x90\xf5\x9d\xcb\xd4\xd2\x3c" - "\x01\x88\x7f\x3e\x31\x6e\x62\x9d" - "\xd8\xe0\x57\xa3\x06\x3a\x42\x58" - "\x2a\x28\xfe\x72\x52\x2f\xdd\xe0" - "\x19\x89\x09\x1c\x2a\x8e\x8c\x94" - "\xfc\xc7\x68\xe4\x88\xaa\xde\x0f", - .len = 48, - .np = 2, - .tap = { 20, 28 }, } }; @@ -28966,9 +24038,6 @@ static const struct cipher_testvec camellia_tv_template[] = { "\xF8\xB2\xAA\x7A\xD6\xFF\xFA\x55" "\x33\x1A\xBB\xD3\xA2\x7E\x97\x66", .len = 1008, - .also_non_np = 1, - .np = 3, - .tap = { 1008 - 20, 4, 16 }, }, }; @@ -28979,6 +24048,8 @@ static const struct cipher_testvec camellia_cbc_tv_template[] = { .klen = 16, .iv = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30" "\xb4\x22\xda\x80\x2c\x9f\xac\x41", + .iv_out = "\xea\x32\x12\x76\x3b\x50\x10\xe7" + "\x18\xf6\xfd\x5d\xf6\x8f\x13\x51", .ptext = "Single block msg", .ctext = "\xea\x32\x12\x76\x3b\x50\x10\xe7" "\x18\xf6\xfd\x5d\xf6\x8f\x13\x51", @@ -28989,6 +24060,8 @@ static const struct cipher_testvec camellia_cbc_tv_template[] = { .klen = 16, .iv = "\x56\x2e\x17\x99\x6d\x09\x3d\x28" "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58", + .iv_out = "\x19\xb4\x3e\x57\x1c\x02\x5e\xa0" + "\x15\x78\xe0\x5e\xf2\xcb\x87\x16", .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" "\x10\x11\x12\x13\x14\x15\x16\x17" @@ -29006,6 +24079,8 @@ static const struct cipher_testvec camellia_cbc_tv_template[] = { .klen = 32, .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" "\xC4\x29\x8E\xF3\x35\x9A\xFF\x64", + .iv_out = "\x55\x01\xD4\x58\xB2\xF2\x85\x49" + "\x70\xC5\xB9\x0B\x3B\x7A\x6E\x6C", .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" @@ -29259,9 +24334,6 @@ static const struct cipher_testvec camellia_cbc_tv_template[] = { "\x55\x01\xD4\x58\xB2\xF2\x85\x49" "\x70\xC5\xB9\x0B\x3B\x7A\x6E\x6C", .len = 1008, - .also_non_np = 1, - .np = 3, - .tap = { 1008 - 20, 4, 16 }, }, }; @@ -29274,6 +24346,8 @@ static const struct cipher_testvec camellia_ctr_tv_template[] = { .klen = 32, .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" "\xC4\x29\x8E\xF3\x35\x9A\xFF\x64", + .iv_out = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" + "\xC4\x29\x8E\xF3\x35\x9A\xFF\x83", .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" @@ -29407,6 +24481,8 @@ static const struct cipher_testvec camellia_ctr_tv_template[] = { .klen = 32, .iv = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" "\xC4\x29\x8E\xF3\x35\x9A\xFF\x64", + .iv_out = "\xE2\x24\x89\xEE\x53\xB8\x1D\x5F" + "\xC4\x29\x8E\xF3\x35\x9A\xFF\xA4", .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" @@ -29662,9 +24738,6 @@ static const struct cipher_testvec camellia_ctr_tv_template[] = { "\xE7\x2C\x49\x08\x8B\x72\xFA\x5C" "\xF1\x6B\xD9", .len = 1011, - .also_non_np = 1, - .np = 2, - .tap = { 1011 - 16, 16 }, }, { /* Generated with Crypto++ */ .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9" "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A" @@ -29673,6 +24746,8 @@ static const struct cipher_testvec camellia_ctr_tv_template[] = { .klen = 32, .iv = "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF" "\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFD", + .iv_out = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x3C", .ptext = "\x56\xED\x84\x1B\x8F\x26\xBD\x31" "\xC8\x5F\xF6\x6A\x01\x98\x0C\xA3" "\x3A\xD1\x45\xDC\x73\x0A\x7E\x15" @@ -30167,9 +25242,6 @@ static const struct cipher_testvec camellia_lrw_tv_template[] = { "\xb2\x1a\xd8\x4c\xbd\x1d\x10\xe9" "\x5a\xa8\x92\x7f\xba\xe6\x0c\x95", .len = 512, - .also_non_np = 1, - .np = 3, - .tap = { 512 - 20, 4, 16 }, }, }; @@ -30504,9 +25576,6 @@ static const struct cipher_testvec camellia_xts_tv_template[] = { "\xb7\x16\xd8\x12\x5c\xcd\x7d\x4e" "\xd5\xc6\x99\xcc\x4e\x6c\x94\x95", .len = 512, - .also_non_np = 1, - .np = 3, - .tap = { 512 - 20, 4, 16 }, }, }; @@ -31710,8 +26779,6 @@ static const struct cipher_testvec salsa20_stream_tv_template[] = { "\x87\x13\xc6\x5b\x59\x8d\xf2\xc8" "\xaf\xdf\x11\x95", .len = 4100, - .np = 2, - .tap = { 4064, 36 }, }, }; @@ -31844,9 +26911,6 @@ static const struct cipher_testvec chacha20_tv_template[] = { "\x5b\x86\x2f\x37\x30\xe3\x7c\xfd" "\xc4\xfd\x80\x6c\x22\xf2\x21", .len = 375, - .also_non_np = 1, - .np = 3, - .tap = { 375 - 20, 4, 16 }, }, { /* RFC7539 A.2. Test Vector #3 */ .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a" @@ -32220,9 +27284,6 @@ static const struct cipher_testvec chacha20_tv_template[] = { "\xa1\xed\xad\xd5\x76\xfa\x24\x8f" "\x98", .len = 1281, - .also_non_np = 1, - .np = 3, - .tap = { 1200, 1, 80 }, }, }; @@ -32415,9 +27476,6 @@ static const struct cipher_testvec xchacha20_tv_template[] = { "\xab\xff\x1f\x12\xc3\xee\xe5\x65" "\x12\x8d\x7b\x61\xe5\x1f\x98", .len = 375, - .also_non_np = 1, - .np = 3, - .tap = { 375 - 20, 4, 16 }, }, { /* Derived from a ChaCha20 test vector, via the process above */ .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a" @@ -32795,9 +27853,6 @@ static const struct cipher_testvec xchacha20_tv_template[] = { "\xba\xd0\x34\xc9\x2d\x91\xc5\x17" "\x11", .len = 1281, - .also_non_np = 1, - .np = 3, - .tap = { 1200, 1, 80 }, }, { /* test vector from https://tools.ietf.org/html/draft-arciszewski-xchacha-02#appendix-A.3.2 */ .key = "\x80\x81\x82\x83\x84\x85\x86\x87" "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" @@ -33080,9 +28135,6 @@ static const struct cipher_testvec xchacha12_tv_template[] = { "\xda\x4e\xc9\xab\x9b\x8a\x7b", .len = 375, - .also_non_np = 1, - .np = 3, - .tap = { 375 - 20, 4, 16 }, }, { .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a" @@ -33460,9 +28512,6 @@ static const struct cipher_testvec xchacha12_tv_template[] = { "\xf0\xfc\x5e\x1c\xf1\xf5\xf9\xf3" "\x5b", .len = 1281, - .also_non_np = 1, - .np = 3, - .tap = { 1200, 1, 80 }, }, { .key = "\x80\x81\x82\x83\x84\x85\x86\x87" "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" @@ -33570,9 +28619,6 @@ static const struct cipher_testvec adiantum_xchacha12_aes_tv_template[] = { .ctext = "\x6d\x32\x86\x18\x67\x86\x0f\x3f" "\x96\x7c\x9d\x28\x0d\x53\xec\x9f", .len = 16, - .also_non_np = 1, - .np = 2, - .tap = { 14, 2 }, }, { .key = "\x36\x2b\x57\x97\xf8\x5d\xcd\x99" "\x5f\x1a\x5a\x44\x1d\x92\x0f\x27" @@ -33635,9 +28681,6 @@ static const struct cipher_testvec adiantum_xchacha12_aes_tv_template[] = { "\x74\xa6\xaa\xa3\xac\xdc\xc2\xf5" "\x8d\xde\x34\x86\x78\x60\x75\x8d", .len = 128, - .also_non_np = 1, - .np = 4, - .tap = { 104, 16, 4, 4 }, }, { .key = "\xd3\x81\x72\x18\x23\xff\x6f\x4a" "\x25\x74\x29\x0d\x51\x8a\x0e\x13" @@ -33777,9 +28820,1436 @@ static const struct cipher_testvec adiantum_xchacha12_aes_tv_template[] = { "\x21\xb0\x21\x52\xba\xa7\x37\xaa" "\xcc\xbf\x95\xa8\xf4\xd0\x91\xf6", .len = 512, - .also_non_np = 1, - .np = 2, - .tap = { 144, 368 }, + }, { + .key = "\xeb\xe5\x11\x3a\x72\xeb\x10\xbe" + "\x70\xcf\xe3\xea\xc2\x74\xa4\x48" + "\x29\x0f\x8f\x3f\xcf\x4c\x28\x2a" + "\x4e\x1e\x3c\xc3\x27\x9f\x16\x13", + .klen = 32, + .iv = "\x84\x3e\xa2\x7c\x06\x72\xb2\xad" + "\x88\x76\x65\xb4\x1a\x29\x27\x12" + "\x45\xb6\x8d\x0e\x4b\x87\x04\xfc" + "\xb5\xcd\x1c\x4d\xe8\x06\xf1\xcb", + .ptext = "\x8e\xb6\x07\x9b\x7c\xe4\xa4\xa2" + "\x41\x6c\x24\x1d\xc0\x77\x4e\xd9" + "\x4a\xa4\x2c\xb6\xe4\x55\x02\x7f" + "\xc4\xec\xab\xc2\x5c\x63\x40\x92" + "\x38\x24\x62\xdb\x65\x82\x10\x7f" + "\x21\xa5\x39\x3a\x3f\x38\x7e\xad" + "\x6c\x7b\xc9\x3f\x89\x8f\xa8\x08" + "\xbd\x31\x57\x3c\x7a\x45\x67\x30" + "\xa9\x27\x58\x34\xbe\xe3\xa4\xc3" + "\xff\xc2\x9f\x43\xf0\x04\xba\x1e" + "\xb6\xf3\xc4\xce\x09\x7a\x2e\x42" + "\x7d\xad\x97\xc9\x77\x9a\x3a\x78" + "\x6c\xaf\x7c\x2a\x46\xb4\x41\x86" + "\x1a\x20\xf2\x5b\x1a\x60\xc9\xc4" + "\x47\x5d\x10\xa4\xd2\x15\x6a\x19" + "\x4f\xd5\x51\x37\xd5\x06\x70\x1a" + "\x3e\x78\xf0\x2e\xaa\xb5\x2a\xbd" + "\x83\x09\x7c\xcb\x29\xac\xd7\x9c" + "\xbf\x80\xfd\x9d\xd4\xcf\x64\xca" + "\xf8\xc9\xf1\x77\x2e\xbb\x39\x26" + "\xac\xd9\xbe\xce\x24\x7f\xbb\xa2" + "\x82\xba\xeb\x5f\x65\xc5\xf1\x56" + "\x8a\x52\x02\x4d\x45\x23\x6d\xeb" + "\xb0\x60\x7b\xd8\x6e\xb2\x98\xd2" + "\xaf\x76\xf2\x33\x9b\xf3\xbb\x95" + "\xc0\x50\xaa\xc7\x47\xf6\xb3\xf3" + "\x77\x16\xcb\x14\x95\xbf\x1d\x32" + "\x45\x0c\x75\x52\x2c\xe8\xd7\x31" + "\xc0\x87\xb0\x97\x30\x30\xc5\x5e" + "\x50\x70\x6e\xb0\x4b\x4e\x38\x19" + "\x46\xca\x38\x6a\xca\x7d\xfe\x05" + "\xc8\x80\x7c\x14\x6c\x24\xb5\x42" + "\x28\x04\x4c\xff\x98\x20\x08\x10" + "\x90\x31\x03\x78\xd8\xa1\xe6\xf9" + "\x52\xc2\xfc\x3e\xa7\x68\xce\xeb" + "\x59\x5d\xeb\xd8\x64\x4e\xf8\x8b" + "\x24\x62\xcf\x17\x36\x84\xc0\x72" + "\x60\x4f\x3e\x47\xda\x72\x3b\x0e" + "\xce\x0b\xa9\x9c\x51\xdc\xa5\xb9" + "\x71\x73\x08\x4e\x22\x31\xfd\x88" + "\x29\xfc\x8d\x17\x3a\x7a\xe5\xb9" + "\x0b\x9c\x6d\xdb\xce\xdb\xde\x81" + "\x73\x5a\x16\x9d\x3c\x72\x88\x51" + "\x10\x16\xf3\x11\x6e\x32\x5f\x4c" + "\x87\xce\x88\x2c\xd2\xaf\xf5\xb7" + "\xd8\x22\xed\xc9\xae\x68\x7f\xc5" + "\x30\x62\xbe\xc9\xe0\x27\xa1\xb5" + "\x57\x74\x36\x60\xb8\x6b\x8c\xec" + "\x14\xad\xed\x69\xc9\xd8\xa5\x5b" + "\x38\x07\x5b\xf3\x3e\x74\x48\x90" + "\x61\x17\x23\xdd\x44\xbc\x9d\x12" + "\x0a\x3a\x63\xb2\xab\x86\xb8\x67" + "\x85\xd6\xb2\x5d\xde\x4a\xc1\x73" + "\x2a\x7c\x53\x8e\xd6\x7d\x0e\xe4" + "\x3b\xab\xc5\x3d\x32\x79\x18\xb7" + "\xd6\x50\x4d\xf0\x8a\x37\xbb\xd3" + "\x8d\xd8\x08\xd7\x7d\xaa\x24\x52" + "\xf7\x90\xe3\xaa\xd6\x49\x7a\x47" + "\xec\x37\xad\x74\x8b\xc1\xb7\xfe" + "\x4f\x70\x14\x62\x22\x8c\x63\xc2" + "\x1c\x4e\x38\xc3\x63\xb7\xbf\x53" + "\xbd\x1f\xac\xa6\x94\xc5\x81\xfa" + "\xe0\xeb\x81\xe9\xd9\x1d\x32\x3c" + "\x85\x12\xca\x61\x65\xd1\x66\xd8" + "\xe2\x0e\xc3\xa3\xff\x0d\xd3\xee" + "\xdf\xcc\x3e\x01\xf5\x9b\x45\x5c" + "\x33\xb5\xb0\x8d\x36\x1a\xdf\xf8" + "\xa3\x81\xbe\xdb\x3d\x4b\xf6\xc6" + "\xdf\x7f\xb0\x89\xbd\x39\x32\x50" + "\xbb\xb2\xe3\x5c\xbb\x4b\x18\x98" + "\x08\x66\x51\xe7\x4d\xfb\xfc\x4e" + "\x22\x42\x6f\x61\xdb\x7f\x27\x88" + "\x29\x3f\x02\xa9\xc6\x83\x30\xcc" + "\x8b\xd5\x64\x7b\x7c\x76\x16\xbe" + "\xb6\x8b\x26\xb8\x83\x16\xf2\x6b" + "\xd1\xdc\x20\x6b\x42\x5a\xef\x7a" + "\xa9\x60\xb8\x1a\xd3\x0d\x4e\xcb" + "\x75\x6b\xc5\x80\x43\x38\x7f\xad" + "\x9c\x56\xd9\xc4\xf1\x01\x74\xf0" + "\x16\x53\x8d\x69\xbe\xf2\x5d\x92" + "\x34\x38\xc8\x84\xf9\x1a\xfc\x26" + "\x16\xcb\xae\x7d\x38\x21\x67\x74" + "\x4c\x40\xaa\x6b\x97\xe0\xb0\x2f" + "\xf5\x3e\xf6\xe2\x24\xc8\x22\xa4" + "\xa8\x88\x27\x86\x44\x75\x5b\x29" + "\x34\x08\x4b\xa1\xfe\x0c\x26\xe5" + "\xac\x26\xf6\x21\x0c\xfb\xde\x14" + "\xfe\xd7\xbe\xee\x48\x93\xd6\x99" + "\x56\x9c\xcf\x22\xad\xa2\x53\x41" + "\xfd\x58\xa1\x68\xdc\xc4\xef\x20" + "\xa1\xee\xcf\x2b\x43\xb6\x57\xd8" + "\xfe\x01\x80\x25\xdf\xd2\x35\x44" + "\x0d\x15\x15\xc3\xfc\x49\xbf\xd0" + "\xbf\x2f\x95\x81\x09\xa6\xb6\xd7" + "\x21\x03\xfe\x52\xb7\xa8\x32\x4d" + "\x75\x1e\x46\x44\xbc\x2b\x61\x04" + "\x1b\x1c\xeb\x39\x86\x8f\xe9\x49" + "\xce\x78\xa5\x5e\x67\xc5\xe9\xef" + "\x43\xf8\xf1\x35\x22\x43\x61\xc1" + "\x27\xb5\x09\xb2\xb8\xe1\x5e\x26" + "\xcc\xf3\x6f\xb2\xb7\x55\x30\x98" + "\x87\xfc\xe7\xa8\xc8\x94\x86\xa1" + "\xd9\xa0\x3c\x74\x16\xb3\x25\x98" + "\xba\xc6\x84\x4a\x27\xa6\x58\xfe" + "\xe1\x68\x04\x30\xc8\xdb\x44\x52" + "\x4e\xb2\xa4\x6f\xf7\x63\xf2\xd6" + "\x63\x36\x17\x04\xf8\x06\xdb\xeb" + "\x99\x17\xa5\x1b\x61\x90\xa3\x9f" + "\x05\xae\x3e\xe4\xdb\xc8\x1c\x8e" + "\x77\x27\x88\xdf\xd3\x22\x5a\xc5" + "\x9c\xd6\x22\xf8\xc4\xd8\x92\x9d" + "\x16\xcc\x54\x25\x3b\x6f\xdb\xc0" + "\x78\xd8\xe3\xb3\x03\x69\xd7\x5d" + "\xf8\x08\x04\x63\x61\x9d\x76\xf9" + "\xad\x1d\xc4\x30\x9f\x75\x89\x6b" + "\xfb\x62\xba\xae\xcb\x1b\x6c\xe5" + "\x7e\xea\x58\x6b\xae\xce\x9b\x48" + "\x4b\x80\xd4\x5e\x71\x53\xa7\x24" + "\x73\xca\xf5\x3e\xbb\x5e\xd3\x1c" + "\x33\xe3\xec\x5b\xa0\x32\x9d\x25" + "\x0e\x0c\x28\x29\x39\x51\xc5\x70" + "\xec\x60\x8f\x77\xfc\x06\x7a\x33" + "\x19\xd5\x7a\x6e\x94\xea\xa3\xeb" + "\x13\xa4\x2e\x09\xd8\x81\x65\x83" + "\x03\x63\x8b\xb5\xc9\x89\x98\x73" + "\x69\x53\x8e\xab\xf1\xd2\x2f\x67" + "\xbd\xa6\x16\x6e\xd0\x8b\xc1\x25" + "\x93\xd2\x50\x7c\x1f\xe1\x11\xd0" + "\x58\x0d\x2f\x72\xe7\x5e\xdb\xa2" + "\x55\x9a\xe0\x09\x21\xac\x61\x85" + "\x4b\x20\x95\x73\x63\x26\xe3\x83" + "\x4b\x5b\x40\x03\x14\xb0\x44\x16" + "\xbd\xe0\x0e\xb7\x66\x56\xd7\x30" + "\xb3\xfd\x8a\xd3\xda\x6a\xa7\x3d" + "\x98\x09\x11\xb7\x00\x06\x24\x5a" + "\xf7\x42\x94\xa6\x0e\xb1\x6d\x48" + "\x74\xb1\xa7\xe6\x92\x0a\x15\x9a" + "\xf5\xfa\x55\x1a\x6c\xdd\x71\x08" + "\xd0\xf7\x8d\x0e\x7c\x67\x4d\xc6" + "\xe6\xde\x78\x88\x88\x3c\x5e\x23" + "\x46\xd2\x25\xa4\xfb\xa3\x26\x3f" + "\x2b\xfd\x9c\x20\xda\x72\xe1\x81" + "\x8f\xe6\xae\x08\x1d\x67\x15\xde" + "\x86\x69\x1d\xc6\x1e\x6d\xb7\x5c" + "\xdd\x43\x72\x5a\x7d\xa7\xd8\xd7" + "\x1e\x66\xc5\x90\xf6\x51\x76\x91" + "\xb3\xe3\x39\x81\x75\x08\xfa\xc5" + "\x06\x70\x69\x1b\x2c\x20\x74\xe0" + "\x53\xb0\x0c\x9d\xda\xa9\x5b\xdd" + "\x1c\x38\x6c\x9e\x3b\xc4\x7a\x82" + "\x93\x9e\xbb\x75\xfb\x19\x4a\x55" + "\x65\x7a\x3c\xda\xcb\x66\x5c\x13" + "\x17\x97\xe8\xbd\xae\x24\xd9\x76" + "\xfb\x8c\x73\xde\xbd\xb4\x1b\xe0" + "\xb9\x2c\xe8\xe0\x1d\x3f\xa8\x2c" + "\x1e\x81\x5b\x77\xe7\xdf\x6d\x06" + "\x7c\x9a\xf0\x2b\x5d\xfc\x86\xd5" + "\xb1\xad\xbc\xa8\x73\x48\x61\x67" + "\xd6\xba\xc8\xe8\xe2\xb8\xee\x40" + "\x36\x22\x3e\x61\xf6\xc8\x16\xe4" + "\x0e\x88\xad\x71\x53\x58\xe1\x6c" + "\x8f\x4f\x89\x4b\x3e\x9c\x7f\xe9" + "\xad\xc2\x28\xc2\x3a\x29\xf3\xec" + "\xa9\x28\x39\xba\xc2\x86\xe1\x06" + "\xf3\x8b\xe3\x95\x0c\x87\xb8\x1b" + "\x72\x35\x8e\x8f\x6d\x18\xc8\x1c" + "\xa5\x5d\x57\x9d\x73\x8a\xbb\x9e" + "\x21\x05\x12\xd7\xe0\x21\x1c\x16" + "\x3a\x95\x85\xbc\xb0\x71\x0b\x36" + "\x6c\x44\x8d\xef\x3b\xec\x3f\x8e" + "\x24\xa9\xe3\xa7\x63\x23\xca\x09" + "\x62\x96\x79\x0c\x81\x05\x41\xf2" + "\x07\x20\x26\xe5\x8e\x10\x54\x03" + "\x05\x7b\xfe\x0c\xcc\x8c\x50\xe5" + "\xca\x33\x4d\x48\x7a\x03\xd5\x64" + "\x49\x09\xf2\x5c\x5d\xfe\x2b\x30" + "\xbf\x29\x14\x29\x8b\x9b\x7c\x96" + "\x47\x07\x86\x4d\x4e\x4d\xf1\x47" + "\xd1\x10\x2a\xa8\xd3\x15\x8c\xf2" + "\x2f\xf4\x3a\xdf\xd0\xa7\xcb\x5a" + "\xad\x99\x39\x4a\xdf\x60\xbe\xf9" + "\x91\x4e\xf5\x94\xef\xc5\x56\x32" + "\x33\x86\x78\xa3\xd6\x4c\x29\x7c" + "\xe8\xac\x06\xb5\xf5\x01\x5c\x9f" + "\x02\xc8\xe8\xbf\x5c\x1a\x7f\x4d" + "\x28\xa5\xb9\xda\xa9\x5e\xe7\x4b" + "\xf4\x3d\xe9\x1d\x28\xaa\x1a\x8a" + "\x76\xc8\x6c\x19\x61\x3c\x9e\x29" + "\xcd\xbe\xff\xe0\x1c\xb8\x67\xb5" + "\xa4\x46\xf8\xb9\x8a\xa2\xf6\x7c" + "\xef\x23\x73\x0c\xe9\x72\x0a\x0d" + "\x9b\x40\xd8\xfb\x0c\x9c\xab\xa8", + .ctext = "\xcb\x78\x87\x9c\xc7\x13\xc1\x30" + "\xdd\x2c\x7d\xb2\x97\xab\x06\x69" + "\x47\x87\x8a\x12\x2b\x5d\x86\xd7" + "\x2e\xe6\x7a\x0d\x58\x5d\xe7\x01" + "\x78\x0e\xff\xc7\xc5\xd2\x94\xd6" + "\xdd\x6b\x38\x1f\xa4\xe3\x3d\xe7" + "\xc5\x8a\xb5\xbe\x65\x11\x2b\xe1" + "\x2b\x8e\x84\xe8\xe0\x00\x7f\xdd" + "\x15\x15\xab\xbd\x22\x94\xf7\xce" + "\x99\x6f\xfd\x0e\x9b\x16\xeb\xeb" + "\x24\xc7\xbb\xc6\xe1\x6c\x57\xba" + "\x84\xab\x16\xf2\x57\xd6\x42\x9d" + "\x56\x92\x5b\x44\x18\xd4\xa2\x1b" + "\x1e\xa9\xdc\x7a\x16\x88\xc4\x4f" + "\x6d\x77\x9a\x2e\x82\xa9\xc3\xee" + "\xa4\xca\x05\x1b\x0e\xdc\x48\x96" + "\xd0\x50\x21\x1f\x46\xc7\xc7\x70" + "\x53\xcd\x1e\x4e\x5f\x2d\x4b\xb2" + "\x86\xe5\x3a\xe6\x1d\xec\x7b\x9d" + "\x8f\xd6\x41\xc6\xbb\x00\x4f\xe6" + "\x02\x47\x07\x73\x50\x6b\xcf\xb2" + "\x9e\x1c\x01\xc9\x09\xcc\xc3\x52" + "\x27\xe6\x63\xe0\x5b\x55\x60\x4d" + "\x72\xd0\xda\x4b\xec\xcb\x72\x5d" + "\x37\x4a\xf5\xb8\xd9\xe2\x08\x10" + "\xf3\xb9\xdc\x07\xc0\x02\x10\x14" + "\x9f\xe6\x8f\xc4\xc4\xe1\x39\x7b" + "\x47\xea\xae\x7c\xdd\x27\xa8\x4c" + "\x6b\x0f\x4c\xf8\xff\x16\x4e\xcb" + "\xec\x88\x33\x0d\x15\x10\x82\x66" + "\xa7\x3d\x2c\xb6\xbc\x2e\xe4\xce" + "\x4c\x2f\x4b\x46\x0f\x67\x78\xa5" + "\xff\x6a\x7d\x0d\x5e\x6d\xab\xfb" + "\x59\x99\xd8\x1f\x30\xd4\x33\xe8" + "\x7d\x11\xae\xe3\xba\xd0\x3f\xa7" + "\xa5\x5e\x43\xda\xf3\x0f\x3a\x5f" + "\xba\xb0\x47\xb2\x08\x60\xf4\xed" + "\x35\x23\x0c\xe9\x4f\x81\xc4\xc5" + "\xa8\x35\xdc\x99\x52\x33\x19\xd4" + "\x00\x01\x8d\x5a\x10\x82\x39\x78" + "\xfc\x72\x24\x63\x4a\x38\xc5\x6f" + "\xfe\xec\x2f\x26\x0c\x3c\x1c\xf6" + "\x4d\x99\x7a\x77\x59\xfe\x10\xa5" + "\xa1\x35\xbf\x2f\x15\xfa\x4e\x52" + "\xe6\xd5\x1c\x88\x90\x75\xd5\xcc" + "\xdb\x2a\xb1\xf0\x70\x54\x89\xc7" + "\xeb\x1d\x6e\x61\x45\xa3\x50\x48" + "\xcd\xdb\x32\xba\x7f\x6b\xaf\xef" + "\x50\xcb\x0d\x36\xf7\x29\x3a\x10" + "\x02\x73\xca\x8f\x3f\x5d\x82\x17" + "\x91\x9a\xd8\x15\x15\xe3\xe1\x41" + "\x43\xef\x85\xa6\xb0\xc7\x3b\x0f" + "\xf0\xa5\xaa\x66\x77\x70\x5e\x70" + "\xce\x17\x84\x68\x45\x39\x2c\x25" + "\xc6\xc1\x5f\x7e\xe8\xfa\xe4\x3a" + "\x47\x51\x7b\x9d\x54\x84\x98\x04" + "\x5f\xf7\x5f\x3c\x34\xe7\xa3\x1d" + "\xea\xb7\x6d\x05\xab\x28\xe4\x2c" + "\xb1\x7f\x08\xa8\x5d\x07\xbf\xfe" + "\x39\x72\x44\x87\x51\xc5\x73\xe4" + "\x9a\x5f\xdd\x46\xbc\x4e\xb1\x39" + "\xe4\x78\xb8\xbf\xdc\x5b\x88\x9b" + "\xc1\x3f\xd9\xd0\xb3\x5a\xdf\xaa" + "\x53\x6a\x91\x6d\x2a\x09\xf0\x0b" + "\x5e\xe8\xb2\xa0\xb4\x73\x07\x1d" + "\xc8\x33\x84\xe6\xda\xe6\xad\xd6" + "\xad\x91\x01\x4e\x14\x42\x34\x2c" + "\xe5\xf9\x99\x21\x56\x1f\x6c\x2b" + "\x4c\xe3\xd5\x9e\x04\xdc\x9a\x16" + "\xd1\x54\xe9\xc2\xf7\xc0\xd5\x06" + "\x2f\xa1\x38\x2a\x55\x88\x23\xf8" + "\xb0\xdb\x87\x32\xc9\x4e\xb0\x0c" + "\xc5\x05\x78\x58\xa1\x2e\x75\x75" + "\x68\xdc\xea\xdd\x0c\x33\x16\x5e" + "\xe7\xdc\xfd\x42\x74\xbe\xae\x60" + "\x3c\x37\x4b\x27\xf5\x2c\x5f\x55" + "\x4a\x0b\x64\xfd\xa2\x01\x65\x9c" + "\x27\x9f\x5e\x87\xd5\x95\x88\x66" + "\x09\x84\x42\xab\x00\xe2\x58\xc3" + "\x97\x45\xf1\x93\xe2\x34\x37\x3d" + "\xfe\x93\x8c\x17\xb9\x79\x65\x06" + "\xf7\x58\xe5\x1b\x3b\x4e\xda\x36" + "\x17\xe3\x56\xec\x26\x0f\x2e\xfa" + "\xd1\xb9\x2b\x3e\x7f\x1d\xe3\x4b" + "\x67\xdf\x43\x53\x10\xba\xa3\xfb" + "\x5d\x5a\xd8\xc4\xab\x19\x7e\x12" + "\xaa\x83\xf1\xc0\xa1\xe0\xbf\x72" + "\x5f\xe8\x68\x39\xef\x1a\xbe\xee" + "\x6f\x47\x79\x19\xed\xf2\xa1\x4a" + "\xe5\xfc\xb5\x58\xae\x63\x82\xcb" + "\x16\x0b\x94\xbb\x3e\x02\x49\xc4" + "\x3c\x33\xf1\xec\x1b\x11\x71\x9b" + "\x5b\x80\xf1\x6f\x88\x1c\x05\x36" + "\xa8\xd8\xee\x44\xb5\x18\xc3\x14" + "\x62\xba\x98\xb9\xc0\x2a\x70\x93" + "\xb3\xd8\x11\x69\x95\x1d\x43\x7b" + "\x39\xc1\x91\x05\xc4\xe3\x1e\xc2" + "\x1e\x5d\xe7\xde\xbe\xfd\xae\x99" + "\x4b\x8f\x83\x1e\xf4\x9b\xb0\x2b" + "\x66\x6e\x62\x24\x8d\xe0\x1b\x22" + "\x59\xeb\xbd\x2a\x6b\x2e\x37\x17" + "\x9e\x1f\x66\xcb\x66\xb4\xfb\x2c" + "\x36\x22\x5d\x73\x56\xc1\xb0\x27" + "\xe0\xf0\x1b\xe4\x47\x8b\xc6\xdc" + "\x7c\x0c\x3d\x29\xcb\x33\x10\xfe" + "\xc3\xc3\x1e\xff\x4c\x9b\x27\x86" + "\xe2\xb0\xaf\xb7\x89\xce\x61\x69" + "\xe7\x00\x3e\x92\xea\x5f\x9e\xc1" + "\xfa\x6b\x20\xe2\x41\x23\x82\xeb" + "\x07\x76\x4c\x4c\x2a\x96\x33\xbe" + "\x89\xa9\xa8\xb9\x9a\x7d\x27\x18" + "\x48\x23\x70\x46\xf3\x87\xa7\x91" + "\x58\xb8\x74\xba\xed\xc6\xb2\xa1" + "\x4d\xb6\x43\x9a\xe1\xa2\x41\xa5" + "\x35\xd3\x90\x8a\xc7\x4d\xb7\x88" + "\x0b\xe3\x74\x9f\x84\xfc\xd9\x73" + "\xf2\x86\x0c\xad\xeb\x5d\x70\xac" + "\x65\x07\x14\x8e\x57\xf6\xdc\xb4" + "\xc2\x02\x7c\xd6\x89\xe2\x8a\x3e" + "\x8e\x08\x3c\x12\x37\xaf\xe1\xa8" + "\x04\x11\x5c\xae\x5a\x2b\x60\xa0" + "\x03\x3c\x7a\xa2\x38\x92\xbe\xce" + "\x09\xa2\x5e\x0f\xc2\xb2\xb5\x06" + "\xc2\x97\x97\x9b\x09\x2f\x04\xfe" + "\x2c\xe7\xa3\xc4\x42\xe9\xa3\x40" + "\xa5\x52\x07\x2c\x3b\x89\x1a\xa5" + "\x28\xb1\x93\x05\x98\x0c\x2f\x3d" + "\xc6\xf5\x83\xac\x24\x1d\x28\x9f" + "\x32\x66\x4d\x70\xb7\xe0\xab\xb8" + "\x75\xc5\xf3\xd2\x7b\x26\x3e\xec" + "\x64\xe6\xf7\x70\xe7\xf8\x10\x8e" + "\x67\xd2\xb3\x87\x69\x40\x06\x9a" + "\x2f\x6a\x1a\xfd\x62\x0c\xee\x31" + "\x2e\xbe\x58\x97\x77\xd1\x09\x08" + "\x1f\x8d\x42\x29\x34\xd5\xd8\xb5" + "\x1f\xd7\x21\x18\xe3\xe7\x2e\x4a" + "\x42\xfc\xdb\x19\xe9\xee\xb9\x22" + "\xad\x5c\x07\xe9\xc8\x07\xe5\xe9" + "\x95\xa2\x0d\x30\x46\xe2\x65\x51" + "\x01\xa5\x74\x85\xe2\x52\x6e\x07" + "\xc9\xf5\x33\x09\xde\x78\x62\xa9" + "\x30\x2a\xd3\x86\xe5\x46\x2e\x60" + "\xff\x74\xb0\x5f\xec\x76\xb7\xd1" + "\x5e\x4d\x61\x97\x3c\x9c\x99\xc3" + "\x41\x65\x21\x47\xf9\xb1\x06\xec" + "\x18\xf8\x3f\xc7\x38\xfa\x7b\x14" + "\x62\x79\x6a\x0b\x0c\xf5\x2c\xb7" + "\xab\xcf\x63\x49\x6d\x1f\x46\xa8" + "\xbc\x7d\x42\x53\x75\x6b\xca\x38" + "\xac\x8b\xe7\xa1\xa1\x92\x19\x6b" + "\x0d\x75\x80\x5b\x7d\x35\x86\x70" + "\x12\x6b\xe5\x3e\xe5\x85\xa0\xa4" + "\xd6\x77\x5e\x4d\x24\x57\x84\xa9" + "\xe5\xa4\xbf\x25\xfb\x36\x65\x3b" + "\x81\x39\x61\xec\x5e\x4a\x7e\x10" + "\x58\x19\x13\x5c\x0f\x79\xec\xcf" + "\xbb\x5f\x69\x21\xc3\xa7\x5a\xff" + "\x3b\xc7\x85\x9b\x47\xbc\x3e\xad" + "\xbf\x54\x60\xb6\x5b\x3f\xfc\x50" + "\x68\x83\x76\x24\xb0\xc3\x3f\x93" + "\x0d\xce\x36\x0a\x58\x9d\xcc\xe9" + "\x52\xbb\xd0\x0b\x65\xe5\x0f\x62" + "\x82\x16\xaa\xd2\xba\x5a\x4c\xd0" + "\x67\xb5\x4e\x84\x1c\x02\x6e\xa3" + "\xaa\x22\x54\x96\xc8\xd9\x9c\x58" + "\x15\x63\xf4\x98\x1a\xa1\xd9\x11" + "\x64\x25\x56\xb5\x03\x8e\x29\x85" + "\x75\x88\xd1\xd2\xe4\xe6\x27\x48" + "\x13\x9c\x2b\xaa\xfb\xd3\x6e\x2c" + "\xe6\xd4\xe4\x8b\xd9\xf7\x01\x16" + "\x46\xf9\x5c\x88\x7a\x93\x9e\x2d" + "\xa6\xeb\x01\x2a\x72\xe4\x7f\xb4" + "\x78\x0c\x50\x18\xd3\x8e\x65\xa7" + "\x1b\xf9\x28\x5d\x89\x70\x96\x2f" + "\xa1\xc2\x9b\x34\xfc\x7c\x27\x63" + "\x93\xe6\xe3\xa4\x9d\x17\x97\x7e" + "\x13\x79\x9c\x4b\x2c\x23\x91\x2c" + "\x4f\xb1\x1d\x4b\xb4\x61\x6e\xe8" + "\x32\x35\xc3\x41\x7a\x50\x60\xc8" + "\x3e\xd8\x3f\x38\xfc\xc2\xa2\xe0" + "\x3a\x21\x25\x8f\xc2\x22\xed\x04" + "\x31\xb8\x72\x69\xaf\x6c\x6d\xab" + "\x25\x16\x95\x87\x92\xc7\x46\x3f" + "\x47\x05\x6c\xad\xa0\xa6\x1d\xf0" + "\x66\x2e\x01\x1a\xc3\xbe\xe4\xf6" + "\x51\xec\xa3\x95\x81\xe1\xcc\xab" + "\xc1\x71\x65\x0a\xe6\x53\xfb\xb8" + "\x53\x69\xad\x8b\xab\x8b\xa7\xcd" + "\x8f\x15\x01\x25\xb1\x1f\x9c\x3b" + "\x9b\x47\xad\x38\x38\x89\x6b\x1c" + "\x8a\x33\xdd\x8a\x06\x23\x06\x0b" + "\x7f\x70\xbe\x7e\xa1\x80\xbc\x7a", + .len = 1536, + }, { + .key = "\x60\xd5\x36\xb0\x8e\x5d\x0e\x5f" + "\x70\x47\x8c\xea\x87\x30\x1d\x58" + "\x2a\xb2\xe8\xc6\xcb\x60\xe7\x6f" + "\x56\x95\x83\x98\x38\x80\x84\x8a", + .klen = 32, + .iv = "\x43\xfe\x63\x3c\xdc\x9e\x0c\xa6" + "\xee\x9c\x0b\x97\x65\xc2\x56\x1d" + "\x5d\xd0\xbf\xa3\x9f\x1e\xfb\x78" + "\xbf\x51\x1b\x18\x73\x27\x27\x8c", + .ptext = "\x0b\x77\xd8\xa3\x8c\xa6\xb2\x2d" + "\x3e\xdd\xcc\x7c\x4a\x3e\x61\xc4" + "\x9a\x7f\x73\xb0\xb3\x29\x32\x61" + "\x13\x25\x62\xcc\x59\x4c\xf4\xdb" + "\xd7\xf5\xf4\xac\x75\x51\xb2\x83" + "\x64\x9d\x1c\x8b\xd1\x8b\x0c\x06" + "\xf1\x9f\xba\x9d\xae\x62\xd4\xd8" + "\x96\xbe\x3c\x4c\x32\xe4\x82\x44" + "\x47\x5a\xec\xb8\x8a\x5b\xd5\x35" + "\x57\x1e\x5c\x80\x6f\x77\xa9\xb9" + "\xf2\x4f\x71\x1e\x48\x51\x86\x43" + "\x0d\xd5\x5b\x52\x30\x40\xcd\xbb" + "\x2c\x25\xc1\x47\x8b\xb7\x13\xc2" + "\x3a\x11\x40\xfc\xed\x45\xa4\xf0" + "\xd6\xfd\x32\x99\x13\x71\x47\x2e" + "\x4c\xb0\x81\xac\x95\x31\xd6\x23" + "\xa4\x2f\xa9\xe8\x5a\x62\xdc\x96" + "\xcf\x49\xa7\x17\x77\x76\x8a\x8c" + "\x04\x22\xaf\xaf\x6d\xd9\x16\xba" + "\x35\x21\x66\x78\x3d\xb6\x65\x83" + "\xc6\xc1\x67\x8c\x32\xd6\xc0\xc7" + "\xf5\x8a\xfc\x47\xd5\x87\x09\x2f" + "\x51\x9d\x57\x6c\x29\x0b\x1c\x32" + "\x47\x6e\x47\xb5\xf3\x81\xc8\x82" + "\xca\x5d\xe3\x61\x38\xa0\xdc\xcc" + "\x35\x73\xfd\xb3\x92\x5c\x72\xd2" + "\x2d\xad\xf6\xcd\x20\x36\xff\x49" + "\x48\x80\x21\xd3\x2f\x5f\xe9\xd8" + "\x91\x20\x6b\xb1\x38\x52\x1e\xbc" + "\x88\x48\xa1\xde\xc0\xa5\x46\xce" + "\x9f\x32\x29\xbc\x2b\x51\x0b\xae" + "\x7a\x44\x4e\xed\xeb\x95\x63\x99" + "\x96\x87\xc9\x34\x02\x26\xde\x20" + "\xe4\xcb\x59\x0c\xb5\x55\xbd\x55" + "\x3f\xa9\x15\x25\xa7\x5f\xab\x10" + "\xbe\x9a\x59\x6c\xd5\x27\xf3\xf0" + "\x73\x4a\xb3\xe4\x08\x11\x00\xeb" + "\xf1\xae\xc8\x0d\xef\xcd\xb5\xfc" + "\x0d\x7e\x03\x67\xad\x0d\xec\xf1" + "\x9a\xfd\x31\x60\x3e\xa2\xfa\x1c" + "\x93\x79\x31\x31\xd6\x66\x7a\xbd" + "\x85\xfd\x22\x08\x00\xae\x72\x10" + "\xd6\xb0\xf4\xb8\x4a\x72\x5b\x9c" + "\xbf\x84\xdd\xeb\x13\x05\x28\xb7" + "\x61\x60\xfd\x7f\xf0\xbe\x4d\x18" + "\x7d\xc9\xba\xb0\x01\x59\x74\x18" + "\xe4\xf6\xa6\x74\x5d\x3f\xdc\xa0" + "\x9e\x57\x93\xbf\x16\x6c\xf6\xbd" + "\x93\x45\x38\x95\xb9\x69\xe9\x62" + "\x21\x73\xbd\x81\x73\xac\x15\x74" + "\x9e\x68\x28\x91\x38\xb7\xd4\x47" + "\xc7\xab\xc9\x14\xad\x52\xe0\x4c" + "\x17\x1c\x42\xc1\xb4\x9f\xac\xcc" + "\xc8\x12\xea\xa9\x9e\x30\x21\x14" + "\xa8\x74\xb4\x74\xec\x8d\x40\x06" + "\x82\xb7\x92\xd7\x42\x5b\xf2\xf9" + "\x6a\x1e\x75\x6e\x44\x55\xc2\x8d" + "\x73\x5b\xb8\x8c\x3c\xef\x97\xde" + "\x24\x43\xb3\x0e\xba\xad\x63\x63" + "\x16\x0a\x77\x03\x48\xcf\x02\x8d" + "\x76\x83\xa3\xba\x73\xbe\x80\x3f" + "\x8f\x6e\x76\x24\xc1\xff\x2d\xb4" + "\x20\x06\x9b\x67\xea\x29\xb5\xe0" + "\x57\xda\x30\x9d\x38\xa2\x7d\x1e" + "\x8f\xb9\xa8\x17\x64\xea\xbe\x04" + "\x84\xd1\xce\x2b\xfd\x84\xf9\x26" + "\x1f\x26\x06\x5c\x77\x6d\xc5\x9d" + "\xe6\x37\x76\x60\x7d\x3e\xf9\x02" + "\xba\xa6\xf3\x7f\xd3\x95\xb4\x0e" + "\x52\x1c\x6a\x00\x8f\x3a\x0b\xce" + "\x30\x98\xb2\x63\x2f\xff\x2d\x3b" + "\x3a\x06\x65\xaf\xf4\x2c\xef\xbb" + "\x88\xff\x2d\x4c\xa9\xf4\xff\x69" + "\x9d\x46\xae\x67\x00\x3b\x40\x94" + "\xe9\x7a\xf7\x0b\xb7\x3c\xa2\x2f" + "\xc3\xde\x5e\x29\x01\xde\xca\xfa" + "\xc6\xda\xd7\x19\xc7\xde\x4a\x16" + "\x93\x6a\xb3\x9b\x47\xe9\xd2\xfc" + "\xa1\xc3\x95\x9c\x0b\xa0\x2b\xd4" + "\xd3\x1e\xd7\x21\x96\xf9\x1e\xf4" + "\x59\xf4\xdf\x00\xf3\x37\x72\x7e" + "\xd8\xfd\x49\xd4\xcd\x61\x7b\x22" + "\x99\x56\x94\xff\x96\xcd\x9b\xb2" + "\x76\xca\x9f\x56\xae\x04\x2e\x75" + "\x89\x4e\x1b\x60\x52\xeb\x84\xf4" + "\xd1\x33\xd2\x6c\x09\xb1\x1c\x43" + "\x08\x67\x02\x01\xe3\x64\x82\xee" + "\x36\xcd\xd0\x70\xf1\x93\xd5\x63" + "\xef\x48\xc5\x56\xdb\x0a\x35\xfe" + "\x85\x48\xb6\x97\x97\x02\x43\x1f" + "\x7d\xc9\xa8\x2e\x71\x90\x04\x83" + "\xe7\x46\xbd\x94\x52\xe3\xc5\xd1" + "\xce\x6a\x2d\x6b\x86\x9a\xf5\x31" + "\xcd\x07\x9c\xa2\xcd\x49\xf5\xec" + "\x01\x3e\xdf\xd5\xdc\x15\x12\x9b" + "\x0c\x99\x19\x7b\x2e\x83\xfb\xd8" + "\x89\x3a\x1c\x1e\xb4\xdb\xeb\x23" + "\xd9\x42\xae\x47\xfc\xda\x37\xe0" + "\xd2\xb7\x47\xd9\xe8\xb5\xf6\x20" + "\x42\x8a\x9d\xaf\xb9\x46\x80\xfd" + "\xd4\x74\x6f\x38\x64\xf3\x8b\xed" + "\x81\x94\x56\xe7\xf1\x1a\x64\x17" + "\xd4\x27\x59\x09\xdf\x9b\x74\x05" + "\x79\x6e\x13\x29\x2b\x9e\x1b\x86" + "\x73\x9f\x40\xbe\x6e\xff\x92\x4e" + "\xbf\xaa\xf4\xd0\x88\x8b\x6f\x73" + "\x9d\x8b\xbf\xe5\x8a\x85\x45\x67" + "\xd3\x13\x72\xc6\x2a\x63\x3d\xb1" + "\x35\x7c\xb4\x38\xbb\x31\xe3\x77" + "\x37\xad\x75\xa9\x6f\x84\x4e\x4f" + "\xeb\x5b\x5d\x39\x6d\xed\x0a\xad" + "\x6c\x1b\x8e\x1f\x57\xfa\xc7\x7c" + "\xbf\xcf\xf2\xd1\x72\x3b\x70\x78" + "\xee\x8e\xf3\x4f\xfd\x61\x30\x9f" + "\x56\x05\x1d\x7d\x94\x9b\x5f\x8c" + "\xa1\x0f\xeb\xc3\xa9\x9e\xb8\xa0" + "\xc6\x4e\x1e\xb1\xbc\x0a\x87\xa8" + "\x52\xa9\x1e\x3d\x58\x8e\xc6\x95" + "\x85\x58\xa3\xc3\x3a\x43\x32\x50" + "\x6c\xb3\x61\xe1\x0c\x7d\x02\x63" + "\x5f\x8b\xdf\xef\x13\xf8\x66\xea" + "\x89\x00\x1f\xbd\x5b\x4c\xd5\x67" + "\x8f\x89\x84\x33\x2d\xd3\x70\x94" + "\xde\x7b\xd4\xb0\xeb\x07\x96\x98" + "\xc5\xc0\xbf\xc8\xcf\xdc\xc6\x5c" + "\xd3\x7d\x78\x30\x0e\x14\xa0\x86" + "\xd7\x8a\xb7\x53\xa3\xec\x71\xbf" + "\x85\xf2\xea\xbd\x77\xa6\xd1\xfd" + "\x5a\x53\x0c\xc3\xff\xf5\x1d\x46" + "\x37\xb7\x2d\x88\x5c\xeb\x7a\x0c" + "\x0d\x39\xc6\x40\x08\x90\x1f\x58" + "\x36\x12\x35\x28\x64\x12\xe7\xbb" + "\x50\xac\x45\x15\x7b\x16\x23\x5e" + "\xd4\x11\x2a\x8e\x17\x47\xe1\xd0" + "\x69\xc6\xd2\x5c\x2c\x76\xe6\xbb" + "\xf7\xe7\x34\x61\x8e\x07\x36\xc8" + "\xce\xcf\x3b\xeb\x0a\x55\xbd\x4e" + "\x59\x95\xc9\x32\x5b\x79\x7a\x86" + "\x03\x74\x4b\x10\x87\xb3\x60\xf6" + "\x21\xa4\xa6\xa8\x9a\xc9\x3a\x6f" + "\xd8\x13\xc9\x18\xd4\x38\x2b\xc2" + "\xa5\x7e\x6a\x09\x0f\x06\xdf\x53" + "\x9a\x44\xd9\x69\x2d\x39\x61\xb7" + "\x1c\x36\x7f\x9e\xc6\x44\x9f\x42" + "\x18\x0b\x99\xe6\x27\xa3\x1e\xa6" + "\xd0\xb9\x9a\x2b\x6f\x60\x75\xbd" + "\x52\x4a\x91\xd4\x7b\x8f\x95\x9f" + "\xdd\x74\xed\x8b\x20\x00\xdd\x08" + "\x6e\x5b\x61\x7b\x06\x6a\x19\x84" + "\x1c\xf9\x86\x65\xcd\x1c\x73\x3f" + "\x28\x5c\x8a\x93\x1a\xf3\xa3\x6c" + "\x6c\xa9\x7c\xea\x3c\xd4\x15\x45" + "\x7f\xbc\xe3\xbb\x42\xf0\x2e\x10" + "\xcd\x0c\x8b\x44\x1a\x82\x83\x0c" + "\x58\xb1\x24\x28\xa0\x11\x2f\x63" + "\xa5\x82\xc5\x9f\x86\x42\xf4\x4d" + "\x89\xdb\x76\x4a\xc3\x7f\xc4\xb8" + "\xdd\x0d\x14\xde\xd2\x62\x02\xcb" + "\x70\xb7\xee\xf4\x6a\x09\x12\x5e" + "\xd1\x26\x1a\x2c\x20\x71\x31\xef" + "\x7d\x65\x57\x65\x98\xff\x8b\x02" + "\x9a\xb5\xa4\xa1\xaf\x03\xc4\x50" + "\x33\xcf\x1b\x25\xfa\x7a\x79\xcc" + "\x55\xe3\x21\x63\x0c\x6d\xeb\x5b" + "\x1c\xad\x61\x0b\xbd\xb0\x48\xdb" + "\xb3\xc8\xa0\x87\x7f\x8b\xac\xfd" + "\xd2\x68\x9e\xb4\x11\x3c\x6f\xb1" + "\xfe\x25\x7d\x84\x5a\xae\xc9\x31" + "\xc3\xe5\x6a\x6f\xbc\xab\x41\xd9" + "\xde\xce\xf9\xfa\xd5\x7c\x47\xd2" + "\x66\x30\xc9\x97\xf2\x67\xdf\x59" + "\xef\x4e\x11\xbc\x4e\x70\xe3\x46" + "\x53\xbe\x16\x6d\x33\xfb\x57\x98" + "\x4e\x34\x79\x3b\xc7\x3b\xaf\x94" + "\xc1\x87\x4e\x47\x11\x1b\x22\x41" + "\x99\x12\x61\xe0\xe0\x8c\xa9\xbd" + "\x79\xb6\x06\x4d\x90\x3b\x0d\x30" + "\x1a\x00\xaa\x0e\xed\x7c\x16\x2f" + "\x0d\x1a\xfb\xf8\xad\x51\x4c\xab" + "\x98\x4c\x80\xb6\x92\x03\xcb\xa9" + "\x99\x9d\x16\xab\x43\x8c\x3f\x52" + "\x96\x53\x63\x7e\xbb\xd2\x76\xb7" + "\x6b\x77\xab\x52\x80\x33\xe3\xdf" + "\x4b\x3c\x23\x1a\x33\xe1\x43\x40" + "\x39\x1a\xe8\xbd\x3c\x6a\x77\x42" + "\x88\x9f\xc6\xaa\x65\x28\xf2\x1e" + "\xb0\x7c\x8e\x10\x41\x31\xe9\xd5" + "\x9d\xfd\x28\x7f\xfb\x61\xd3\x39" + "\x5f\x7e\xb4\xfb\x9c\x7d\x98\xb7" + "\x37\x2f\x18\xd9\x3b\x83\xaf\x4e" + "\xbb\xd5\x49\x69\x46\x93\x3a\x21" + "\x46\x1d\xad\x84\xb5\xe7\x8c\xff" + "\xbf\x81\x7e\x22\xf6\x88\x8c\x82" + "\xf5\xde\xfe\x18\xc9\xfb\x58\x07" + "\xe4\x68\xff\x9c\xf4\xe0\x24\x20" + "\x90\x92\x01\x49\xc2\x38\xe1\x7c" + "\xac\x61\x0b\x96\x36\xa4\x77\xe9" + "\x29\xd4\x97\xae\x15\x13\x7c\x6c" + "\x2d\xf1\xc5\x83\x97\x02\xa8\x2e" + "\x0b\x0f\xaf\xb5\x42\x18\x8a\x8c" + "\xb8\x28\x85\x28\x1b\x2a\x12\xa5" + "\x4b\x0a\xaf\xd2\x72\x37\x66\x23" + "\x28\xe6\x71\xa0\x77\x85\x7c\xff" + "\xf3\x8d\x2f\x0c\x33\x30\xcd\x7f" + "\x61\x64\x23\xb2\xe9\x79\x05\xb8" + "\x61\x47\xb1\x2b\xda\xf7\x9a\x24" + "\x94\xf6\xcf\x07\x78\xa2\x80\xaa" + "\x6e\xe9\x58\x97\x19\x0c\x58\x73" + "\xaf\xee\x2d\x6e\x26\x67\x18\x8a" + "\xc6\x6d\xf6\xbc\x65\xa9\xcb\xe7" + "\x53\xf1\x61\x97\x63\x52\x38\x86" + "\x0e\xdd\x33\xa5\x30\xe9\x9f\x32" + "\x43\x64\xbc\x2d\xdc\x28\x43\xd8" + "\x6c\xcd\x00\x2c\x87\x9a\x33\x79" + "\xbd\x63\x6d\x4d\xf9\x8a\x91\x83" + "\x9a\xdb\xf7\x9a\x11\xe1\xd1\x93" + "\x4a\x54\x0d\x51\x38\x30\x84\x0b" + "\xc5\x29\x8d\x92\x18\x6c\x28\xfe" + "\x1b\x07\x57\xec\x94\x74\x0b\x2c" + "\x21\x01\xf6\x23\xf9\xb0\xa0\xaf" + "\xb1\x3e\x2e\xa8\x0d\xbc\x2a\x68" + "\x59\xde\x0b\x2d\xde\x74\x42\xa1" + "\xb4\xce\xaf\xd8\x42\xeb\x59\xbd" + "\x61\xcc\x27\x28\xc6\xf2\xde\x3e" + "\x68\x64\x13\xd3\xc3\xc0\x31\xe0" + "\x5d\xf9\xb4\xa1\x09\x20\x46\x8b" + "\x48\xb9\x27\x62\x00\x12\xc5\x03" + "\x28\xfd\x55\x27\x1c\x31\xfc\xdb" + "\xc1\xcb\x7e\x67\x91\x2e\x50\x0c" + "\x61\xf8\x9f\x31\x26\x5a\x3d\x2e" + "\xa0\xc7\xef\x2a\xb6\x24\x48\xc9" + "\xbb\x63\x99\xf4\x7c\x4e\xc5\x94" + "\x99\xd5\xff\x34\x93\x8f\x31\x45" + "\xae\x5e\x7b\xfd\xf4\x81\x84\x65" + "\x5b\x41\x70\x0b\xe5\xaa\xec\x95" + "\x6b\x3d\xe3\xdc\x12\x78\xf8\x28" + "\x26\xec\x3a\x64\xc4\xab\x74\x97" + "\x3d\xcf\x21\x7d\xcf\x59\xd3\x15" + "\x47\x94\xe4\xd9\x48\x4c\x02\x49" + "\x68\x50\x22\x16\x96\x2f\xc4\x23" + "\x80\x47\x27\xd1\xee\x10\x3b\xa7" + "\x19\xae\xe1\x40\x5f\x3a\xde\x5d" + "\x97\x1c\x59\xce\xe1\xe7\x32\xa7" + "\x20\x89\xef\x44\x22\x38\x3c\x14" + "\x99\x3f\x1b\xd6\x37\xfe\x93\xbf" + "\x34\x13\x86\xd7\x9b\xe5\x2a\x37" + "\x72\x16\xa4\xdf\x7f\xe4\xa4\x66" + "\x9d\xf2\x0b\x29\xa1\xe2\x9d\x36" + "\xe1\x9d\x56\x95\x73\xe1\x91\x58" + "\x0f\x64\xf8\x90\xbb\x0c\x48\x0f" + "\xf5\x52\xae\xd9\xeb\x95\xb7\xdd" + "\xae\x0b\x20\x55\x87\x3d\xf0\x69" + "\x3c\x0a\x54\x61\xea\x00\xbd\xba" + "\x5f\x7e\x25\x8c\x3e\x61\xee\xb2" + "\x1a\xc8\x0e\x0b\xa5\x18\x49\xf2" + "\x6e\x1d\x3f\x83\xc3\xf1\x1a\xcb" + "\x9f\xc9\x82\x4e\x7b\x26\xfd\x68" + "\x28\x25\x8d\x22\x17\xab\xf8\x4e" + "\x1a\xa9\x81\x48\xb0\x9f\x52\x75" + "\xe4\xef\xdd\xbd\x5b\xbe\xab\x3c" + "\x43\x76\x23\x62\xce\xb8\xc2\x5b" + "\xc6\x31\xe6\x81\xb4\x42\xb2\xfd" + "\xf3\x74\xdd\x02\x3c\xa0\xd7\x97" + "\xb0\xe7\xe9\xe0\xce\xef\xe9\x1c" + "\x09\xa2\x6d\xd3\xc4\x60\xd6\xd6" + "\x9e\x54\x31\x45\x76\xc9\x14\xd4" + "\x95\x17\xe9\xbe\x69\x92\x71\xcb" + "\xde\x7c\xf1\xbd\x2b\xef\x8d\xaf" + "\x51\xe8\x28\xec\x48\x7f\xf8\xfa" + "\x9f\x9f\x5e\x52\x61\xc3\xfc\x9a" + "\x7e\xeb\xe3\x30\xb6\xfe\xc4\x4a" + "\x87\x1a\xff\x54\x64\xc7\xaa\xa2" + "\xfa\xb7\xb2\xe7\x25\xce\x95\xb4" + "\x15\x93\xbd\x24\xb6\xbc\xe4\x62" + "\x93\x7f\x44\x40\x72\xcb\xfb\xb2" + "\xbf\xe8\x03\xa5\x87\x12\x27\xfd" + "\xc6\x21\x8a\x8f\xc2\x48\x48\xb9" + "\x6b\xb6\xf0\xf0\x0e\x0a\x0e\xa4" + "\x40\xa9\xd8\x23\x24\xd0\x7f\xe2" + "\xf9\xed\x76\xf0\x91\xa5\x83\x3c" + "\x55\xe1\x92\xb8\xb6\x32\x9e\x63" + "\x60\x81\x75\x29\x9e\xce\x2a\x70" + "\x28\x0c\x87\xe5\x46\x73\x76\x66" + "\xbc\x4b\x6c\x37\xc7\xd0\x1a\xa0" + "\x9d\xcf\x04\xd3\x8c\x42\xae\x9d" + "\x35\x5a\xf1\x40\x4c\x4e\x81\xaa" + "\xfe\xd5\x83\x4f\x29\x19\xf3\x6c" + "\x9e\xd0\x53\xe5\x05\x8f\x14\xfb" + "\x68\xec\x0a\x3a\x85\xcd\x3e\xb4" + "\x4a\xc2\x5b\x92\x2e\x0b\x58\x64" + "\xde\xca\x64\x86\x53\xdb\x7f\x4e" + "\x54\xc6\x5e\xaa\xe5\x82\x3b\x98" + "\x5b\x01\xa7\x1f\x7b\x3d\xcc\x19" + "\xf1\x11\x02\x64\x09\x25\x7c\x26" + "\xee\xad\x50\x68\x31\x26\x16\x0f" + "\xb6\x7b\x6f\xa2\x17\x1a\xba\xbe" + "\xc3\x60\xdc\xd2\x44\xe0\xb4\xc4" + "\xfe\xff\x69\xdb\x60\xa6\xaf\x39" + "\x0a\xbd\x6e\x41\xd1\x9f\x87\x71" + "\xcc\x43\xa8\x47\x10\xbc\x2b\x7d" + "\x40\x12\x43\x31\xb8\x12\xe0\x95" + "\x6f\x9d\xf8\x75\x51\x3d\x61\xbe" + "\xa0\xd1\x0b\x8d\x50\xc7\xb8\xe7" + "\xab\x03\xda\x41\xab\xc5\x4e\x33" + "\x5a\x63\x94\x90\x22\x72\x54\x26" + "\x93\x65\x99\x45\x55\xd3\x55\x56" + "\xc5\x39\xe4\xb4\xb1\xea\xd8\xf9" + "\xb5\x31\xf7\xeb\x80\x1a\x9e\x8d" + "\xd2\x40\x01\xea\x33\xb9\xf2\x7a" + "\x43\x41\x72\x0c\xbf\x20\xab\xf7" + "\xfa\x65\xec\x3e\x35\x57\x1e\xef" + "\x2a\x81\xfa\x10\xb2\xdb\x8e\xfa" + "\x7f\xe7\xaf\x73\xfc\xbb\x57\xa2" + "\xaf\x6f\x41\x11\x30\xd8\xaf\x94" + "\x53\x8d\x4c\x23\xa5\x20\x63\xcf" + "\x0d\x00\xe0\x94\x5e\x92\xaa\xb5" + "\xe0\x4e\x96\x3c\xf4\x26\x2f\xf0" + "\x3f\xd7\xed\x75\x2c\x63\xdf\xc8" + "\xfb\x20\xb5\xae\x44\x83\xc0\xab" + "\x05\xf9\xbb\xa7\x62\x7d\x21\x5b" + "\x04\x80\x93\x84\x5f\x1d\x9e\xcd" + "\xa2\x07\x7e\x22\x2f\x55\x94\x23" + "\x74\x35\xa3\x0f\x03\xbe\x07\x62" + "\xe9\x16\x69\x7e\xae\x38\x0e\x9b" + "\xad\x6e\x83\x90\x21\x10\xb8\x07" + "\xdc\xc1\x44\x20\xa5\x88\x00\xdc" + "\xe1\x82\x16\xf1\x0c\xdc\xed\x8c" + "\x32\xb5\x49\xab\x11\x41\xd5\xd2" + "\x35\x2c\x70\x73\xce\xeb\xe3\xd6" + "\xe4\x7d\x2c\xe8\x8c\xec\x8a\x92" + "\x50\x87\x51\xbd\x2d\x9d\xf2\xf0" + "\x3c\x7d\xb1\x87\xf5\x01\xb0\xed" + "\x02\x5a\x20\x4d\x43\x08\x71\x49" + "\x77\x72\x9b\xe6\xef\x30\xc9\xa2" + "\x66\x66\xb8\x68\x9d\xdf\xc6\x16" + "\xa5\x78\xee\x3c\x47\xa6\x7a\x31" + "\x07\x6d\xce\x7b\x86\xf8\xb2\x31" + "\xa8\xa4\x77\x3c\x63\x36\xe8\xd3" + "\x7d\x40\x56\xd8\x48\x56\x9e\x3e" + "\x56\xf6\x3d\xd2\x12\x6e\x35\x29" + "\xd4\x7a\xdb\xff\x97\x4c\xeb\x3c" + "\x28\x2a\xeb\xe9\x43\x40\x61\x06" + "\xb8\xa8\x6d\x18\xc8\xbc\xc7\x23" + "\x53\x2b\x8b\xcc\xce\x88\xdf\xf8" + "\xff\xf8\x94\xe4\x5c\xee\xcf\x39" + "\xe0\xf6\x1a\xae\xf2\xd5\x41\x6a" + "\x09\x5a\x50\x66\xc4\xf4\x66\xdc" + "\x6a\x69\xee\xc8\x47\xe6\x87\x52" + "\x9e\x28\xe4\x39\x02\x0d\xc4\x7e" + "\x18\xe6\xc6\x09\x07\x03\x30\xb9" + "\xd1\xb0\x48\xe6\x80\xe8\x8c\xe6" + "\xc7\x2c\x33\xca\x64\xe5\xc0\x6e" + "\xac\x14\x4b\xe1\xf6\xeb\xce\xe4" + "\xc1\x8c\xea\x5b\x8d\x3c\x86\x91" + "\xd1\xd7\x16\x9c\x09\x9c\x6a\x51" + "\xe5\xcd\xe3\xb0\x33\x1f\x03\xcd" + "\xe5\xd8\x40\x9b\xdc\x29\xbe\xfa" + "\x24\xcc\xf1\x55\x68\x3a\x89\x0d" + "\x08\x48\xfd\x9b\x47\x41\x10\xae" + "\x53\x3a\x83\x87\xd4\x89\xe7\x38" + "\x47\xee\xd7\xbe\xe2\x58\x37\xd2" + "\xfc\x21\x1d\x20\xa5\x2d\x69\x0c" + "\x36\x5b\x2f\xcd\xa1\xa6\xe4\xa1" + "\x00\x4d\xf7\xc8\x2d\xc7\x16\x6c" + "\x6d\xad\x32\x8c\x8f\x74\xf9\xfa" + "\x78\x1c\x9a\x0f\x6e\x93\x9c\x20" + "\x43\xb9\xe4\xda\xc4\xc7\x90\x47" + "\x86\x68\xb7\x6f\x82\x59\x4a\x30" + "\xf1\xfd\x31\x0f\xa1\xea\x9b\x6b" + "\x18\x5c\x39\xb0\xc7\x80\x64\xff" + "\x6d\x5b\xb4\x8b\xba\x90\xea\x4e" + "\x9a\x04\xd2\x68\x18\x50\xb5\x91" + "\x45\x4f\x58\x5a\xe5\xc6\x7c\xab" + "\x61\x3e\x3d\xec\x18\x87\xfc\xea" + "\x26\x35\x4c\x99\x8a\x3f\x00\x7b" + "\xf5\x89\x62\xda\xdd\xf1\x43\xef" + "\x2c\x1d\x92\xfa\x9a\xd0\x37\x03" + "\x69\x9c\xd8\x1f\x41\x44\xb7\x73" + "\x54\x14\x91\x12\x41\x41\x54\xa2" + "\x91\x55\xb6\xf7\x23\x41\xc9\xc2" + "\x5b\x53\xf2\x61\x63\x0d\xa9\x87" + "\x1a\xbb\x11\x1f\x3c\xbb\xa8\x1f" + "\xe2\x66\x56\x88\x06\x3c\xd2\x0f" + "\x3b\xc4\xd6\x8c\xbe\x54\x9f\xa8" + "\x9c\x89\xfb\x88\x05\xef\xcd\xe7" + "\xc1\xc4\x21\x36\x22\x8d\x9a\x5d" + "\x1b\x1e\x4a\xc0\x89\xdd\x76\x16" + "\x5a\xce\xcd\x1e\x6a\x1f\xa0\x2b" + "\x83\xf6\x5e\x28\x8e\x65\xb5\x86" + "\x72\x8f\xc5\xf2\x54\x81\x10\x8d" + "\x63\x7b\x42\x7d\x06\x08\x16\xb3" + "\xb0\x60\x65\x41\x49\xdb\x0d\xc1" + "\xe2\xef\x72\x72\x06\xe7\x60\x5c" + "\x95\x1c\x7d\x52\xec\x82\xee\xd3" + "\x5b\xab\x61\xa4\x1f\x61\x64\x0c" + "\x28\x32\x21\x7a\x81\xe7\x81\xf3" + "\xdb\xc0\x18\xd9\xae\x0b\x3c\x9a" + "\x58\xec\x70\x4f\x40\x25\x2b\xba" + "\x96\x59\xac\x34\x45\x29\xc6\x57" + "\xc1\xc3\x93\x60\x77\x92\xbb\x83" + "\x8a\xa7\x72\x45\x2a\xc9\x35\xe7" + "\x66\xd6\xa9\xe9\x43\x87\x20\x11" + "\x6a\x2f\x87\xac\xe0\x93\x82\xe5" + "\x6c\x57\xa9\x4c\x9e\x56\x57\x33" + "\x1c\xd8\x7e\x25\x27\x41\x89\x97" + "\xea\xa5\x56\x02\x5b\x93\x13\x46" + "\xdc\x53\x3d\x95\xef\xaf\x9f\xf0" + "\x0a\x8a\xfe\x0c\xbf\xf0\x25\x5f" + "\xb4\x9f\x1b\x72\x9c\x37\xba\x46" + "\x4e\xcc\xcc\x02\x5c\xec\x3f\x98" + "\xff\x56\x1a\xc2\x7a\x65\x8f\xf6" + "\xd2\x81\x37\x7a\x0a\xfc\x79\xb9" + "\xcb\x8c\xc8\x1a\xd0\xba\x5d\x55" + "\xbc\x6d\x2e\xb2\x2f\x75\x29\x3f" + "\x1a\x4b\xa8\xd7\xe8\xf6\xf4\x2a" + "\xa5\xa1\x68\xec\xf3\xd5\xdd\x0f" + "\xad\x57\xae\x98\x83\xd5\x92\x4e" + "\x76\x86\x8e\x5e\x4b\x87\x7b\xf7" + "\x2d\x79\x3f\x12\x6a\x24\x58\xc8" + "\xab\x9a\x65\x75\x82\x6f\xa5\x39" + "\x72\xb0\xdf\x93\xb5\xa2\xf3\xdd" + "\x1f\x32\xfa\xdb\xfe\x1b\xbf\x0a" + "\xd9\x95\xdd\x02\xf1\x23\x54\xb1" + "\xa5\xbb\x24\x04\x5c\x2a\x97\x92" + "\xe6\xe0\x10\x61\xe3\x46\xc7\x0c" + "\xcb\xbc\x51\x9a\x35\x16\xd9\x42" + "\x62\xb3\x5e\xa4\x3c\x84\xa0\x7f" + "\xb8\x7f\x70\xd1\x8b\x03\xdf\x27" + "\x32\x06\x3f\x12\x23\x19\x22\x82" + "\x2d\x37\xa5\x00\x31\x9b\xa9\x21" + "\x8e\x34\x8c\x8e\x4f\xe8\xd4\x63" + "\x6c\xb2\xa9\x6e\xf6\x7c\x96\xf1" + "\x0e\x64\xab\x14\x3d\x8f\x74\xb3" + "\x35\x79\x84\x78\x06\x68\x97\x30" + "\xe0\x22\x55\xd6\xc5\x5b\x38\xb2" + "\x75\x24\x0c\x52\xb6\x57\xcc\x0a" + "\xbd\x3c\xd0\x73\x47\xd1\x25\xd6" + "\x1c\xfd\x27\x05\x3f\x70\xe1\xa7" + "\x69\x3b\xee\xc9\x9f\xfd\x2a\x7e" + "\xab\x58\xe6\x0b\x35\x5e\x52\xf9" + "\xff\xac\x5b\x82\x88\xa7\x65\xbc" + "\x61\x29\xdc\xa1\x94\x42\xd1\xd3" + "\xa0\xd8\xba\x3b\x49\xc8\xa7\xce" + "\x01\x6c\xb7\x3f\xe3\x98\x4d\xd1" + "\x9f\x46\x0d\xb3\xf2\x43\x33\x49" + "\xb7\x27\xbd\xba\xcc\x3f\x09\x56" + "\xfa\x64\x18\xb8\x17\x28\xde\x0d" + "\x29\xfa\x1f\xad\x60\x3b\x90\xa7" + "\x05\x9f\x4c\xc4\xdc\x05\x3b\x17" + "\x58\xea\x99\xfd\x6b\x8a\x93\x77" + "\xa5\x44\xbd\x8d\x29\x44\x29\x89" + "\x52\x1d\x89\x8b\x44\x8f\xb9\x68" + "\xeb\x93\xfd\x92\xd9\x14\x35\x9c" + "\x28\x3a\x9f\x1d\xd8\xe0\x2a\x76" + "\x51\xc1\xf0\xa9\x1d\xb4\xf8\xb9" + "\xfc\x14\x78\x5a\xa2\xb1\xdb\x94" + "\xcb\x18\xb9\x34\xbd\x0c\x65\x1d" + "\x64\xde\xd0\x3a\xe4\x68\x0e\xbc" + "\x13\xa7\x47\x89\x62\xa3\x03\x19" + "\x64\xa1\x02\x27\x3a\x8d\x43\xfa" + "\x68\xff\xda\x8b\x40\xe9\x19\x8b" + "\x56\xbe\x1c\x9b\xe6\xf6\x3f\x60" + "\xdb\x7a\xd5\xab\x82\xd8\xd9\x99" + "\xe3\x5b\x0c\x0c\x69\x18\x5c\xed" + "\x03\xf9\xc1\x61\xc4\x7b\xd4\x90" + "\x43\xc3\x39\xec\xac\xcb\x1f\x4b" + "\x23\xf8\xa9\x98\x2f\xf6\x48\x90" + "\x6c\x2b\x94\xad\x14\xdd\xcc\xa2" + "\x3d\xc7\x86\x0f\x7f\x1c\x0b\x93" + "\x4b\x74\x1f\x80\x75\xb4\x91\xdf" + "\xa8\x26\xf9\x06\x2b\x3a\x2c\xfd" + "\x3c\x31\x40\x1e\x5b\xa6\x86\x01" + "\xc4\xa2\x80\x4f\xf5\xa2\xf4\xff" + "\xf6\x07\x8c\x92\xf7\x74\xbd\x42" + "\xb0\x3f\x6b\x05\xca\x40\xeb\x04" + "\x20\xa9\x37\x78\x32\x03\x60\xcc" + "\xf3\xec\xb2\x2d\xb5\x80\x7c\xe4" + "\x37\x53\x25\xd1\xe8\x91\x6a\xe5" + "\xdf\xdd\xb0\xab\x69\xc7\xa1\xb2" + "\xfc\xb3\xd1\x9e\xda\xa8\x0d\x68" + "\xfe\x7d\xdc\x56\x33\x65\x99\xd2" + "\xec\xa5\xa0\xa1\x26\xc9\xec\xbd" + "\x22\x20\x5e\x0d\xcb\x93\x64\x7a" + "\x56\x75\xed\xe5\x45\xa2\xbd\x16" + "\x59\xf7\x43\xd9\x5b\x2c\xdd\xb6" + "\x1d\xa8\x05\x89\x2f\x65\x2e\x66" + "\xfe\xad\x93\xeb\x85\x8f\xe8\x4c" + "\x00\x44\x71\x03\x0e\x26\xaf\xfd" + "\xfa\x56\x0f\xdc\x9c\xf3\x2e\xab" + "\x88\x26\x61\xc6\x13\xfe\xba\xc1" + "\xd8\x8a\x38\xc3\xb6\x4e\x6d\x80" + "\x4c\x65\x93\x2f\xf5\x54\xff\x63" + "\xbe\xdf\x9a\xe3\x4f\xca\xc9\x71" + "\x12\xab\x95\x66\xec\x09\x64\xea" + "\xdc\x9f\x01\x61\x24\x88\xd1\xa7" + "\xd0\x69\x26\xf0\x80\xb0\xec\x86" + "\xc2\x58\x2f\x6a\xc5\xfd\xfc\x2a" + "\xf6\x3e\x23\x77\x3b\x7e\xc5\xc5" + "\xe7\xf9\x4d\xcc\x68\x53\x11\xc8" + "\x5b\x44\xbd\x48\x0f\xb3\x35\x1a" + "\x93\x4a\x80\x16\xa3\x0d\x50\x85" + "\xa6\xc4\xd4\x74\x4d\x87\x59\x51" + "\xd7\xf7\x7d\xee\xd0\x9b\xd1\x83" + "\x25\x2b\xc6\x39\x27\x6a\xb3\x41" + "\x5f\xd2\x24\xd4\xd6\xfa\x8c\x3e" + "\xb2\xf9\x11\x71\x7a\x9e\x5e\x7b" + "\x5b\x9a\x47\x80\xca\x1c\xbe\x04" + "\x5d\x34\xc4\xa2\x2d\x41\xfe\x73" + "\x53\x15\x9f\xdb\xe7\x7d\x82\x19" + "\x21\x1b\x67\x2a\x74\x7a\x21\x4a" + "\xc4\x96\x6f\x00\x92\x69\xf1\x99" + "\x50\xf1\x4a\x16\x11\xf1\x16\x51", + .ctext = "\x57\xd1\xcf\x26\xe5\x07\x7a\x3f" + "\xa5\x5e\xd4\xa8\x12\xe9\x4e\x36" + "\x9c\x28\x65\xe0\xbd\xef\xf1\x49" + "\x04\xd4\xd4\x01\x4d\xf5\xfc\x2a" + "\x32\xd8\x19\x21\xcd\x58\x2a\x1a" + "\x43\x78\xa4\x57\x69\xa0\x52\xeb" + "\xcd\xa5\x9c\x4d\x03\x28\xef\x8b" + "\x54\xc6\x6c\x31\xab\x3e\xaf\x6d" + "\x0a\x87\x83\x3d\xb7\xea\x6b\x3d" + "\x11\x58\x7d\x5f\xaf\xc9\xfc\x50" + "\x58\x9a\x84\xa1\xcf\x76\xdc\x77" + "\x83\x9a\x28\x74\x69\xc9\x0c\xc2" + "\x7b\x1e\x4e\xe4\x25\x41\x23\x0d" + "\x4e\x0e\x2d\x7a\x87\xaa\x0f\x7c" + "\x98\xad\xf0\x6f\xbf\xcb\xd5\x1a" + "\x3e\xcf\x0e\xc5\xde\xbd\x8d\xf1" + "\xaa\x19\x16\xb8\xc5\x25\x02\x33" + "\xbd\x5a\x85\xe2\xc0\x77\x71\xda" + "\x12\x4c\xdf\x7f\xce\xc0\x32\x95" + "\x1a\xde\xcb\x0a\x70\xd0\x9e\x89" + "\xc5\x97\x18\x04\xab\x8c\x38\x56" + "\x69\xe5\xf6\xa5\x76\x2c\x52\x7a" + "\x49\xd2\x9a\x95\xa6\xa8\x82\x42" + "\x20\x1f\x58\x57\x4e\x22\xdb\x92" + "\xec\xbd\x4a\x21\x66\x9b\x7a\xcb" + "\x73\xcd\x6d\x15\x07\xc9\x97\xb8" + "\x11\x35\xee\x29\xa4\x90\xfc\x46" + "\x0f\x39\x56\xc6\x4a\x3a\xcf\xcc" + "\xb1\xbf\x62\x1c\x16\xc5\x12\x6c" + "\x0e\x69\x89\xce\xcf\x11\x4e\xe5" + "\x7e\x4e\x7c\x8f\xb4\xc9\xe6\x54" + "\x42\x89\x28\x27\xe6\xec\x50\xb7" + "\x69\x91\x44\x3e\x46\xd4\x64\xf6" + "\x25\x4c\x4d\x2f\x60\xd9\x9a\xd3" + "\x1c\x70\xf4\xd8\x24\x1e\xdb\xcf" + "\xa8\xc0\x22\xe6\x82\x57\xf6\xf0" + "\xe1\x1e\x38\x66\xec\xdc\x20\xdb" + "\x6a\x57\x68\xb1\x43\x61\xe1\x12" + "\x18\x5f\x31\x57\x39\xcb\xea\x3c" + "\x6e\x5d\x9a\xe0\xa6\x70\x4d\xd8" + "\xf9\x47\x4e\xef\x31\xa5\x66\x9b" + "\xb7\xf1\xd9\x59\x85\xfc\xdb\x7e" + "\xa2\x7a\x70\x25\x0c\xfd\x18\x0d" + "\x00\x42\xc9\x48\x8a\xbd\x74\xc5" + "\x3e\xe1\x20\x5a\x5d\x2e\xe5\x32" + "\x1d\x1c\x08\x65\x80\x69\xae\x24" + "\x80\xde\xb6\xdf\x97\xaa\x42\x8d" + "\xce\x39\x07\xe6\x69\x94\x5a\x75" + "\x39\xda\x5e\x1a\xed\x4a\x4c\x23" + "\x66\x1f\xf3\xb1\x6e\x8f\x21\x94" + "\x45\xc4\x63\xbd\x06\x93\x5e\x30" + "\xe7\x8f\xcb\xe0\xbb\x2a\x27\xcf" + "\x57\xa9\xa6\x28\xaf\xae\xcb\xa5" + "\x7b\x36\x61\x77\x3a\x4f\xec\x51" + "\x71\xfd\x52\x9e\x32\x7b\x98\x09" + "\xae\x27\xbc\x93\x96\xab\xb6\x02" + "\xf7\x21\xd3\x42\x00\x7e\x7a\x92" + "\x17\xfe\x1b\x3d\xcf\xb6\xfe\x1e" + "\x40\xc3\x10\x25\xac\x22\x9e\xcc" + "\xc2\x02\x61\xf5\x0a\x4b\xc3\xec" + "\xb1\x44\x06\x05\xb8\xd6\xcb\xd5" + "\xf1\xf5\xb5\x65\xbc\x1a\x19\xa2" + "\x7d\x60\x87\x11\x06\x83\x25\xe3" + "\x5e\xf0\xeb\x15\x93\xb6\x8e\xab" + "\x49\x52\xe8\xdb\xde\xd1\x8e\xa2" + "\x3a\x64\x13\x30\xaa\x20\xaf\x81" + "\x8d\x3c\x24\x2a\x76\x6d\xca\x32" + "\x63\x51\x6b\x8e\x4b\xa7\xf6\xad" + "\xa5\x94\x16\x82\xa6\x97\x3b\xe5" + "\x41\xcd\x87\x33\xdc\xc1\x48\xca" + "\x4e\xa2\x82\xad\x8e\x1b\xae\xcb" + "\x12\x93\x27\xa3\x2b\xfa\xe6\x26" + "\x43\xbd\xb0\x00\x01\x22\x1d\xd3" + "\x28\x9d\x69\xe0\xd4\xf8\x5b\x01" + "\x40\x7d\x54\xe5\xe2\xbd\x78\x5a" + "\x0e\xab\x51\xfc\xd4\xde\xba\xbc" + "\xa4\x7a\x74\x6d\xf8\x36\xc2\x70" + "\x03\x27\x36\xa2\xc0\xde\xf2\xc7" + "\x55\xd4\x66\xee\x9a\x9e\xaa\x99" + "\x2b\xeb\xa2\x6f\x17\x80\x60\x64" + "\xed\x73\xdb\xc1\x70\xda\xde\x67" + "\xcd\x6e\xc9\xfa\x3f\xef\x49\xd9" + "\x18\x42\xf1\x87\x6e\x2c\xac\xe1" + "\x12\x26\x52\xbe\x3e\xf1\xcc\x85" + "\x9a\xd1\x9e\xc1\x02\xd3\xca\x2b" + "\x99\xe7\xe8\x95\x7f\x91\x4b\xc0" + "\xab\xd4\x5a\xf7\x88\x1c\x7e\xea" + "\xd3\x15\x38\x26\xb5\xa3\xf2\xfc" + "\xc4\x12\x70\x5a\x37\x83\x49\xac" + "\xf4\x5e\x4c\xc8\x64\x03\x98\xad" + "\xd2\xbb\x8d\x90\x01\x80\xa1\x2a" + "\x23\xd1\x8d\x26\x43\x7d\x2b\xd0" + "\x87\xe1\x8e\x6a\xb3\x73\x9d\xc2" + "\x66\x75\xee\x2b\x41\x1a\xa0\x3b" + "\x1b\xdd\xb9\x21\x69\x5c\xef\x52" + "\x21\x57\xd6\x53\x31\x67\x7e\xd1" + "\xd0\x67\x8b\xc0\x97\x2c\x0a\x09" + "\x1d\xd4\x35\xc5\xd4\x11\x68\xf8" + "\x5e\x75\xaf\x0c\xc3\x9d\xa7\x09" + "\x38\xf5\x77\xb9\x80\xa9\x6b\xbd" + "\x0c\x98\xb4\x8d\xf0\x35\x5a\x19" + "\x1d\xf8\xb3\x5b\x45\xad\x4e\x4e" + "\xd5\x59\xf5\xd7\x53\x63\x3e\x97" + "\x7f\x91\x50\x65\x61\x21\xa9\xb7" + "\x65\x12\xdc\x01\x56\x40\xe0\xb1" + "\xe1\x23\xba\x9d\xb9\xc4\x8b\x1f" + "\xa6\xfe\x24\x19\xe9\x42\x9f\x9b" + "\x02\x48\xaa\x60\x0b\xf5\x7f\x8f" + "\x35\x70\xed\x85\xb8\xc4\xdc\xb7" + "\x16\xb7\x03\xe0\x2e\xa0\x25\xab" + "\x02\x1f\x97\x8e\x5a\x48\xb6\xdb" + "\x25\x7a\x16\xf6\x4c\xec\xec\xa6" + "\xc1\x4e\xe3\x4e\xe3\x27\x78\xc8" + "\xb6\xd7\x01\x61\x98\x1b\x38\xaa" + "\x36\x93\xac\x6d\x05\x61\x4d\x5a" + "\xc9\xe5\x27\xa9\x22\xf2\x38\x5e" + "\x9e\xe5\xf7\x4a\x64\xd2\x14\x15" + "\x71\x7c\x65\x6e\x90\x31\xc7\x49" + "\x25\xec\x9f\xf1\xb2\xd6\xbc\x20" + "\x6a\x13\xd5\x70\x65\xfc\x8b\x66" + "\x2c\xf1\x57\xc2\xe7\xb8\x89\xf7" + "\x17\xb2\x45\x64\xe0\xb3\x8c\x0d" + "\x69\x57\xf9\x5c\xff\xc2\x3c\x18" + "\x1e\xfd\x4b\x5e\x0d\x20\x01\x1a" + "\xa3\xa3\xb3\x76\x98\x9c\x92\x41" + "\xb4\xcd\x9f\x8f\x88\xcb\xb1\xb5" + "\x25\x87\x45\x4c\x07\xa7\x15\x99" + "\x24\x85\x15\x9e\xfc\x28\x98\x2b" + "\xd0\x22\x0a\xcc\x62\x12\x86\x0a" + "\xa8\x0e\x7d\x15\x32\x98\xae\x2d" + "\x95\x25\x55\x33\x41\x5b\x8d\x75" + "\x46\x61\x01\xa4\xfb\xf8\x6e\xe5" + "\xec\x24\xfe\xd2\xd2\x46\xe2\x3a" + "\x77\xf3\xa1\x39\xd3\x39\x32\xd8" + "\x2a\x6b\x44\xd7\x70\x36\x23\x89" + "\x4f\x75\x85\x42\x70\xd4\x2d\x4f" + "\xea\xfc\xc9\xfe\xb4\x86\xd8\x73" + "\x1d\xeb\xf7\x54\x0a\x47\x7e\x2c" + "\x04\x7b\x47\xea\x52\x8f\x13\x1a" + "\xf0\x19\x65\xe2\x0a\x1c\xae\x89" + "\xe1\xc5\x87\x6e\x5d\x7f\xf8\x79" + "\x08\xbf\xd2\x7f\x2c\x95\x22\xba" + "\x32\x78\xa9\xf6\x03\x98\x18\xed" + "\x15\xbf\x49\xb0\x6c\xa1\x4b\xb0" + "\xf3\x17\xd5\x35\x5d\x19\x57\x5b" + "\xf1\x07\x1e\xaa\x4d\xef\xd0\xd6" + "\x72\x12\x6b\xd9\xbc\x10\x49\xc5" + "\x28\xd4\xec\xe9\x8a\xb1\x6d\x50" + "\x4b\xf3\x44\xb8\x49\x04\x62\xe9" + "\xa4\xd8\x5a\xe7\x90\x02\xb7\x1e" + "\x66\x89\xbc\x5a\x71\x4e\xbd\xf8" + "\x18\xfb\x34\x2f\x67\xa2\x65\x71" + "\x00\x63\x22\xef\x3a\xa5\x18\x0e" + "\x54\x76\xaa\x58\xae\x87\x23\x93" + "\xb0\x3c\xa2\xa4\x07\x77\x3e\xd7" + "\x1a\x9c\xfe\x32\xc3\x54\x04\x4e" + "\xd6\x98\x44\xda\x98\xf8\xd3\xc8" + "\x1c\x07\x4b\xcd\x97\x5d\x96\x95" + "\x9a\x1d\x4a\xfc\x19\xcb\x0b\xd0" + "\x6d\x43\x3a\x9a\x39\x1c\xa8\x90" + "\x9f\x53\x8b\xc4\x41\x75\xb5\xb9" + "\x91\x5f\x02\x0a\x57\x6c\x8f\xc3" + "\x1b\x0b\x3a\x8b\x58\x3b\xbe\x2e" + "\xdc\x4c\x23\x71\x2e\x14\x06\x21" + "\x0b\x3b\x58\xb8\x97\xd1\x00\x62" + "\x2e\x74\x3e\x6e\x21\x8a\xcf\x60" + "\xda\x0c\xf8\x7c\xfd\x07\x55\x7f" + "\xb9\x1d\xda\x34\xc7\x27\xbf\x2a" + "\xd9\xba\x41\x9b\x37\xa1\xc4\x5d" + "\x03\x01\xce\xbb\x58\xff\xee\x74" + "\x08\xbd\x0b\x80\xb1\xd5\xf8\xb5" + "\x92\xf9\xbb\xbe\x03\xb5\xec\xbe" + "\x17\xee\xd7\x4e\x87\x2b\x61\x1b" + "\x27\xc3\x51\x50\xa0\x02\x73\x00" + "\x1a\xea\x2a\x2b\xf8\xf6\xe6\x96" + "\x75\x00\x56\xcc\xcb\x7a\x24\x29" + "\xe8\xdb\x95\xbf\x4e\x8f\x0a\x78" + "\xb8\xeb\x5a\x90\x37\xd0\x21\x94" + "\x6a\x89\x6b\x41\x3a\x1b\xa7\x20" + "\x43\x37\xda\xad\x81\xdd\xb4\xfc" + "\xe9\x60\x82\x77\x44\x3f\x89\x23" + "\x35\x04\x8f\xa1\xe8\xc0\xb6\x9f" + "\x56\xa7\x86\x3d\x65\x9c\x57\xbb" + "\x27\xdb\xe1\xb2\x13\x07\x9c\xb1" + "\x60\x8b\x38\x6b\x7f\x24\x28\x14" + "\xfe\xbf\xc0\xda\x61\x6e\xc2\xc7" + "\x63\x36\xa8\x02\x54\x93\xb0\xba" + "\xbd\x4d\x29\x14\x5a\x8b\xbc\x78" + "\xb3\xa6\xc5\x15\x5d\x36\x4d\x38" + "\x20\x9c\x1e\x98\x2e\x16\x89\x33" + "\x66\xa2\x54\x57\xcc\xde\x12\xa6" + "\x3b\x44\xf1\xac\x36\x3b\x97\xc1" + "\x96\x94\xf2\x67\x57\x23\x9c\x29" + "\xcd\xb7\x24\x2a\x8c\x86\xee\xaa" + "\x0f\xee\xaf\xa0\xec\x40\x8c\x08" + "\x18\xa1\xb4\x2c\x09\x46\x11\x7e" + "\x97\x84\xb1\x03\xa5\x3e\x59\x05" + "\x07\xc5\xf0\xcc\xb6\x71\x72\x2a" + "\xa2\x02\x78\x60\x0b\xc4\x47\x93" + "\xab\xcd\x67\x2b\xf5\xc5\x67\xa0" + "\xc0\x3c\x6a\xd4\x7e\xc9\x93\x0c" + "\x02\xdc\x15\x87\x48\x16\x26\x18" + "\x4e\x0b\x16\x0e\xb3\x02\x3e\x4b" + "\xc2\xe4\x49\x08\x9f\xb9\x8b\x1a" + "\xca\x10\xe8\x6c\x58\xa9\x7e\xb8" + "\xbe\xff\x58\x0e\x8a\xfb\x35\x93" + "\xcc\x76\x7d\xd9\x44\x7c\x31\x96" + "\xc0\x29\x73\xd3\x91\x0a\xc0\x65" + "\x5c\xbe\xe7\x4e\xda\x31\x85\xf2" + "\x72\xee\x34\xbe\x41\x90\xd4\x07" + "\x50\x64\x56\x81\xe3\x27\xfb\xcc" + "\xb7\x5c\x36\xb4\x6e\xbd\x23\xf8" + "\xe8\x71\xce\xa8\x73\x77\x82\x74" + "\xab\x8d\x0e\xe5\x93\x68\xb1\xd2" + "\x51\xc2\x18\x58\xd5\x3f\x29\x6b" + "\x2e\xd0\x88\x7f\x4a\x9d\xa2\xb8" + "\xae\x96\x09\xbf\x47\xae\x7d\x12" + "\x70\x67\xf1\xdd\xda\xdf\x47\x57" + "\xc9\x2c\x0f\xcb\xf3\x57\xd4\xda" + "\x00\x2e\x13\x48\x8f\xc0\xaa\x46" + "\xe1\xc1\x57\x75\x1e\xce\x74\xc2" + "\x82\xef\x31\x85\x8e\x38\x56\xff" + "\xcb\xab\xe0\x78\x40\x51\xd3\xc5" + "\xc3\xb1\xee\x9b\xd7\x72\x7f\x13" + "\x83\x7f\x45\x49\x45\xa1\x05\x8e" + "\xdc\x83\x81\x3c\x24\x28\x87\x08" + "\xa0\x70\x73\x80\x42\xcf\x5c\x26" + "\x39\xa5\xc5\x90\x5c\x56\xda\x58" + "\x93\x45\x5d\x45\x64\x59\x16\x3f" + "\xf1\x20\xf7\xa8\x2a\xd4\x3d\xbd" + "\x17\xfb\x90\x01\xcf\x1e\x71\xab" + "\x22\xa2\x24\xb5\x80\xac\xa2\x9a" + "\x9c\x2d\x85\x69\xa7\x87\x33\x55" + "\x65\x72\xc0\x91\x2a\x3d\x05\x33" + "\x25\x0d\x29\x25\x9f\x45\x4e\xfa" + "\x5d\x90\x3f\x34\x08\x54\xdb\x7d" + "\x94\x20\xa2\x3b\x10\x01\xa4\x89" + "\x1e\x90\x4f\x36\x3f\xc2\x40\x07" + "\x3f\xab\x2e\x89\xce\x80\xe1\xf5" + "\xac\xaf\x17\x10\x18\x0f\x4d\xe3" + "\xfc\x82\x2b\xbe\xe2\x91\xfa\x5b" + "\x9a\x9b\x2a\xd7\x99\x8d\x8f\xdc" + "\x54\x99\xc4\xa3\x97\xfd\xd3\xdb" + "\xd1\x51\x7c\xce\x13\x5c\x3b\x74" + "\xda\x9a\xe3\xdc\xdc\x87\x84\x98" + "\x16\x6d\xb0\x3d\x65\x57\x0b\xb2" + "\xb8\x04\xd4\xea\x49\x72\xc3\x66" + "\xbc\xdc\x91\x05\x2b\xa6\x5e\xeb" + "\x55\x72\x3e\x34\xd4\x28\x4b\x9c" + "\x07\x51\xf7\x30\xf3\xca\x04\xc1" + "\xd3\x69\x50\x2c\x27\x27\xc4\xb9" + "\x56\xc7\xa2\xd2\x66\x29\xea\xe0" + "\x25\xb8\x49\xd1\x60\xc9\x5e\xb5" + "\xed\x87\xb8\x74\x98\x0d\x16\x86" + "\x2a\x02\x24\xde\xb9\xa9\x5e\xf0" + "\xdd\xf7\x55\xb0\x26\x7a\x93\xd4" + "\xe6\x7d\xd2\x43\xb2\x8f\x7e\x9a" + "\x5d\x81\xe6\x28\xe5\x96\x7d\xc8" + "\x33\xe0\x56\x57\xe2\xa0\xf2\x1d" + "\x61\x78\x60\xd5\x81\x70\xa4\x11" + "\x43\x36\xe9\xd1\x68\x27\x21\x3c" + "\xb2\xa2\xad\x5f\x04\xd4\x55\x00" + "\x25\x71\x91\xed\x3a\xc9\x7b\x57" + "\x7b\xd1\x8a\xfb\x0e\xf5\x7b\x08" + "\xa9\x26\x4f\x24\x5f\xdd\x79\xed" + "\x19\xc4\xe1\xd5\xa8\x66\x60\xfc" + "\x5d\x48\x11\xb0\xa3\xc3\xe6\xc0" + "\xc6\x16\x7d\x20\x3f\x7c\x25\x52" + "\xdf\x05\xdd\xb5\x0b\x92\xee\xc5" + "\xe6\xd2\x7c\x3e\x2e\xd5\xac\xda" + "\xdb\x48\x31\xac\x87\x13\x8c\xfa" + "\xac\x18\xbc\xd1\x7f\x2d\xc6\x19" + "\x8a\xfa\xa0\x97\x89\x26\x50\x46" + "\x9c\xca\xe1\x73\x97\x26\x0a\x50" + "\x95\xec\x79\x19\xf6\xbd\x9a\xa1" + "\xcf\xc9\xab\xf7\x85\x84\xb2\xf5" + "\x2c\x7c\x73\xaa\xe2\xc2\xfb\xcd" + "\x5f\x08\x46\x2f\x8e\xd9\xff\xfd" + "\x19\xf6\xf4\x5d\x2b\x4b\x54\xe2" + "\x27\xaa\xfd\x2c\x5f\x75\x7c\xf6" + "\x2c\x95\x77\xcc\x90\xa2\xda\x1e" + "\x85\x37\x18\x34\x1d\xcf\x1b\xf2" + "\x86\xda\x71\xfb\x72\xab\x87\x0f" + "\x1e\x10\xb3\xba\x51\xea\x29\xd3" + "\x8c\x87\xce\x4b\x66\xbf\x60\x6d" + "\x81\x7c\xb8\x9c\xcc\x2e\x35\x02" + "\x02\x32\x4a\x7a\x24\xc4\x9f\xce" + "\xf0\x8a\x85\x90\xf3\x24\x95\x02" + "\xec\x13\xc1\xa4\xdd\x44\x01\xef" + "\xf6\xaa\x30\x70\xbf\x4e\x1a\xb9" + "\xc0\xff\x3b\x57\x5d\x12\xfe\xc3" + "\x1d\x5c\x3f\x74\xf9\xd9\x64\x61" + "\x20\xb2\x76\x79\x38\xd2\x21\xfb" + "\xc9\x32\xe8\xcc\x8e\x5f\xd7\x01" + "\x9e\x25\x76\x4d\xa7\xc1\x33\x21" + "\xfa\xcf\x98\x40\xd2\x1d\x48\xbd" + "\xd0\xc0\x38\x90\x27\x9b\x89\x4a" + "\x10\x1e\xaf\xa0\x78\x7d\x87\x2b" + "\x72\x10\x02\xf0\x5d\x22\x8b\x22" + "\xd7\x56\x7c\xd7\x6d\xcd\x9b\xc6" + "\xbc\xb2\xa6\x36\xde\xac\x87\x14" + "\x92\x93\x47\xca\x7d\xf4\x0b\x88" + "\xea\xbf\x3f\x2f\xa9\x94\x24\x13" + "\xa1\x52\x29\xfd\x5d\xa9\x76\x85" + "\x21\x62\x39\xa3\xf0\xf7\xb5\xa3" + "\xe0\x6c\x1b\xcb\xdb\x41\x91\xc6" + "\x4f\xaa\x26\x8b\x15\xd5\x84\x3a" + "\xda\xd6\x05\xc8\x8c\x0f\xe9\x19" + "\x00\x81\x38\xfb\x8f\xdf\xb0\x63" + "\x75\xe0\xe8\x8f\xef\x4a\xe0\x83" + "\x34\xe9\x4e\x06\xd7\xbb\xcd\xed" + "\x70\x0c\x72\x80\x64\x94\x67\xad" + "\x4a\xda\x82\xcf\x60\xfc\x92\x43" + "\xe3\x2f\xd1\x1e\x81\x1d\xdc\x62" + "\xec\xb1\xb0\xad\x4f\x43\x1d\x38" + "\x4e\x0d\x90\x40\x29\x1b\x98\xf1" + "\xbc\x70\x4e\x5a\x08\xbe\x88\x3a" + "\x55\xfb\x8c\x33\x1f\x0a\x7d\x2d" + "\xdc\x75\x03\xd2\x3b\xe8\xb8\x32" + "\x13\xab\x04\xbc\xe2\x33\x44\xa6" + "\xff\x6e\xba\xbd\xdc\xe2\xbf\x54" + "\x99\x71\x76\x59\x3b\x7a\xbc\xde" + "\xa1\x6e\x73\x62\x96\x73\x56\x66" + "\xfb\x1a\x56\x91\x2a\x8b\x12\xb0" + "\x82\x9f\x9b\x0c\x42\xc7\x22\x2c" + "\xbc\x49\xc5\x3c\x3b\xbf\x52\x64" + "\xd6\xd4\x03\x52\xf3\xfd\x13\x98" + "\xcc\xd8\xaa\x3e\x1d\x1f\x04\x8a" + "\x03\x41\x19\x5b\x31\xf3\x48\x83" + "\x49\xa3\xdd\xc9\x7c\x01\x34\x64" + "\xe5\xf3\xdf\xc9\x7f\x17\xa2\xf5" + "\x9c\x21\x79\x93\x91\x93\xbf\x9b" + "\xa5\xa5\xda\x1d\x55\x32\x72\x78" + "\xa6\x45\x2d\x21\x97\x6b\xfe\xbc" + "\xd0\xe7\x8e\x97\x66\x85\x9e\x41" + "\xfa\x2c\x8a\xee\x0d\x5a\x18\xf2" + "\x15\x89\x8f\xfb\xbc\xd8\xa6\x0c" + "\x83\xcc\x20\x08\xce\x70\xe5\xe6" + "\xbb\x7d\x9f\x11\x5f\x1e\x16\x68" + "\x18\xad\xa9\x4b\x04\x97\x8c\x18" + "\xed\x2a\x70\x79\x39\xcf\x36\x72" + "\x1e\x3e\x6d\x3c\x19\xce\x13\x19" + "\xb5\x13\xe7\x02\xd8\x5c\xec\x0c" + "\x81\xc5\xe5\x86\x10\x83\x9e\x67" + "\x3b\x74\x29\x63\xda\x23\xbc\x43" + "\xe9\x73\xa6\x2d\x25\x77\x66\xd0" + "\x2e\x05\x38\xae\x2e\x0e\x7f\xaf" + "\x82\xed\xef\x28\x39\x4c\x4b\x6f" + "\xdb\xa1\xb5\x79\xd0\x5b\x50\x77" + "\x6d\x75\x9f\x3c\xcf\xde\x41\xb8" + "\xa9\x13\x11\x60\x19\x23\xc7\x35" + "\x48\xbc\x14\x08\xf9\x57\xfe\x15" + "\xfd\xb2\xbb\x8c\x44\x3b\xf1\x62" + "\xbc\x0e\x01\x45\x39\xc0\xbb\xce" + "\xf5\xb7\xe1\x16\x7b\xcc\x8d\x7f" + "\xd3\x15\x36\xef\x8e\x4b\xaa\xee" + "\x49\x0c\x6e\x9b\x8c\x0e\x9f\xe0" + "\xd5\x7b\xdd\xbc\xb3\x67\x53\x6d" + "\x8b\xbe\xa3\xcd\x1e\x37\x9d\xc3" + "\x61\x36\xf4\x77\xec\x2b\xc7\x8b" + "\xd7\xad\x8d\x23\xdd\xf7\x9d\xf1" + "\x61\x1c\xbf\x09\xa5\x5e\xb9\x14" + "\xa6\x3f\x1a\xd9\x12\xb4\xef\x56" + "\x20\xa0\x77\x3e\xab\xf1\xb9\x91" + "\x5a\x92\x85\x5c\x92\x15\xb2\x1f" + "\xaf\xb0\x92\x23\x2d\x27\x8b\x7e" + "\x12\xcc\x56\xaa\x62\x85\x15\xd7" + "\x41\x89\x62\xd6\xd9\xd0\x6d\xbd" + "\x21\xa8\x49\xb6\x35\x40\x2f\x8d" + "\x2e\xfa\x24\x1e\x30\x12\x9c\x05" + "\x59\xfa\xe1\xad\xc0\x53\x09\xda" + "\xc0\x2e\x9d\x24\x0e\x4b\x6e\xd7" + "\x68\x32\x6a\xa0\x3c\x23\xb6\x5a" + "\x90\xb1\x1f\x62\xc8\x37\x36\x88" + "\xa4\x4d\x91\x12\x8d\x51\x8d\x81" + "\x44\x21\xfe\xd3\x61\x8d\xea\x5b" + "\x87\x24\xa9\xe9\x87\xde\x75\x77" + "\xc6\xa0\xd3\xf6\x99\x8b\x32\x56" + "\x47\xc6\x60\x65\xb6\x4f\xd1\x59" + "\x08\xb2\xe0\x15\x3e\xcb\x2c\xd6" + "\x8d\xc6\xbf\xda\x63\xe2\x04\x88" + "\x30\x9f\x37\x38\x98\x1c\x3e\x7a" + "\xa8\x8f\x3e\x2c\xcf\x90\x15\x6e" + "\x5d\xe9\x76\xd5\xdf\xc6\x2f\xf6" + "\xf5\x4a\x86\xbd\x36\x2a\xda\xdf" + "\x2f\xd8\x6e\x15\x18\x6b\xe9\xdb" + "\x26\x54\x6e\x60\x3b\xb8\xf9\x91" + "\xc1\x1d\xc0\x4f\x26\x8b\xdf\x55" + "\x47\x2f\xce\xdd\x4e\x93\x58\x3f" + "\x70\xdc\xf9\x4e\x9b\x37\x5e\x4f" + "\x39\xb9\x30\xe6\xce\xdb\xaf\x46" + "\xca\xfa\x52\xc9\x75\x3e\xd6\x96" + "\xe8\x97\xf1\xb1\x64\x31\x71\x1e" + "\x9f\xb6\xff\x69\xd6\xcd\x85\x4e" + "\x20\xf5\xfc\x84\x3c\xaf\xcc\x8d" + "\x5b\x52\xb8\xa2\x1c\x38\x47\x82" + "\x96\xff\x06\x4c\xaf\x8a\xf4\x8f" + "\xf8\x15\x97\xf6\xc3\xbc\x8c\x9e" + "\xc2\x06\xd9\x64\xb8\x1b\x0d\xd1" + "\x53\x55\x83\x7d\xcb\x8b\x7d\x20" + "\xa7\x70\xcb\xaa\x25\xae\x5a\x4f" + "\xdc\x66\xad\xe4\x54\xff\x09\xef" + "\x25\xcb\xac\x59\x89\x1d\x06\xcf" + "\xc7\x74\xe0\x5d\xa6\xd0\x04\xb4" + "\x41\x75\x34\x80\x6c\x4c\xc9\xd0" + "\x51\x0c\x0f\x84\x26\x75\x69\x23" + "\x81\x67\xde\xbf\x6c\x57\x8a\xc4" + "\xba\x91\xba\x8c\x2c\x75\xeb\x55" + "\xe5\x1b\x13\xbc\xaa\xec\x31\xdb" + "\xcc\x00\x3b\xe6\x50\xd8\xc3\xcc" + "\x9c\xb8\x6e\xb4\x9b\x16\xee\x74" + "\x26\x51\xda\x39\xe6\x31\xa1\xb2" + "\xd7\x6f\xcb\xae\x7d\x9f\x38\x7d" + "\x86\x49\x2a\x16\x5c\xc0\x08\xea" + "\x6b\x55\x85\x47\xbb\x90\xba\x69" + "\x56\xa5\x44\x62\x5b\xe6\x3b\xcc" + "\xe7\x6d\x1e\xca\x4b\xf3\x86\xe0" + "\x09\x76\x51\x83\x0a\x46\x19\x61" + "\xf0\xce\xe1\x06\x7d\x06\xb4\xfe" + "\xd9\xd3\x64\x8e\x0f\xd9\x64\x9e" + "\x74\x44\x97\x5d\x92\x7b\xe3\xcf" + "\x51\x44\xe7\xf2\xe7\xc0\x0c\xc2" + "\xf1\xf7\xa6\x36\x52\x2f\x7c\x09" + "\xfe\x8c\x59\x77\x52\x6a\x7e\xb3" + "\x2b\xb9\x17\x78\xe4\xf2\x82\x62" + "\x7f\x68\x8e\x04\xb4\x8f\x60\xd2" + "\xc6\x22\x1e\x0f\x3a\x8e\x3c\xb2" + "\x60\xbc\xa9\xb3\xda\xbd\x50\xe4" + "\x33\x98\xdd\x6f\xe9\x3b\x77\x57" + "\xeb\x7c\x8f\xbc\xfc\x34\x34\xb9" + "\x40\x31\x67\xcf\xfe\x22\x20\xa5" + "\x97\xe8\x4c\xa2\xc3\x94\xc6\x28" + "\xa6\x24\xe5\xa6\xb5\xd8\x24\xef" + "\x16\xa1\xc9\xe5\x92\xe6\x8c\x45" + "\x24\x24\x51\x22\x1e\xad\xef\x2f" + "\xb6\xbe\xfc\x92\x20\xac\x45\xe6" + "\xc0\xb0\xc8\xfb\x21\x34\xd4\x05" + "\x54\xb3\x99\xa4\xfe\xa9\xd5\xb5" + "\x3b\x72\x83\xf6\xe2\xf9\x88\x0e" + "\x20\x80\x3e\x4e\x8f\xa1\x75\x69" + "\x43\x5a\x7c\x38\x62\x51\xb5\xb7" + "\x84\x95\x3f\x6d\x24\xcc\xfd\x4b" + "\x4a\xaa\x97\x83\x6d\x16\xa8\xc5" + "\x18\xd9\xb9\xfe\xe2\x3f\xe8\xbd" + "\x37\x44\xdf\x79\x3b\x34\x19\x1a" + "\x65\x5e\xc7\x61\x1f\x17\x5e\x84" + "\x20\x72\x32\x98\x8c\x9e\xac\x1f" + "\x6e\x32\xae\x86\x46\x4f\x0f\x64" + "\x3f\xce\x96\xe6\x02\x41\x53\x1f" + "\x35\x30\x57\x7f\xfe\xb7\x47\xb9" + "\x0c\x2f\x14\x34\x9b\x1c\x88\x17" + "\xb5\xe5\x94\x17\x3e\xdc\x4d\x49" + "\xe1\x5d\x75\x3e\xa6\x16\x42\xd4" + "\x59\xb5\x24\x7c\x4c\x54\x1c\xf9" + "\xd6\xed\x69\x22\x5f\x74\xc9\xa9" + "\x7c\xb8\x09\xa7\xf9\x2b\x0d\x5f" + "\x42\xff\x4e\x57\xde\x0c\x67\x45" + "\xa4\x6e\xa0\x7e\x28\x34\xc5\xfe" + "\x58\x7e\xda\xec\x9f\x0b\x31\x2a" + "\x1f\x1b\x98\xad\x14\xcf\x9f\x96" + "\xf8\x87\x0e\x14\x19\x81\x23\x53" + "\x5f\x38\x08\xd9\xc1\xcb\xb2\xc5" + "\x19\x72\x75\x01\xd4\xcf\xd9\x91" + "\xfc\x48\xcc\xa3\x3c\xe6\x4c\xc6" + "\x73\xde\x5e\x90\xce\x6c\x85\x43" + "\x0d\xdf\xe3\x8c\x02\x62\xef\xac" + "\xb8\x05\x80\x81\xf6\x22\x30\xad" + "\x30\xa8\xcb\x55\x1e\xe6\x05\x7f" + "\xc5\x58\x1a\x78\xb7\x2f\x8e\x3c" + "\x80\x09\xca\xa2\x9a\x72\xeb\x10" + "\x84\x54\xaa\x98\x35\x5e\xb1\xc2" + "\xb7\x73\x14\x69\xef\xf8\x28\x43" + "\x36\xd3\x10\x0a\xd6\x69\xf8\xc8" + "\xbb\xe9\xe9\xf9\x29\x52\xf8\x6f" + "\x12\x78\xf9\xc6\xb2\x12\xfd\x39" + "\xa9\xeb\xe2\x47\xb9\x22\xc5\x8f" + "\x4d\xb1\x17\x40\x02\x84\xed\x53" + "\xc5\xfa\xc1\xcd\x59\x56\x93\xaa" + "\x3f\x23\x3f\x02\xb7\xe9\x6e\xa0" + "\xbc\x96\xb8\xb2\xf8\x04\x19\x87" + "\xe9\x4f\x29\xbf\x3a\xcb\x6d\x48" + "\xc9\xe7\x1f\xb7\xa8\xf8\xd4\xb4" + "\x6d\x0f\xb4\xf6\x44\x11\x0f\xf7" + "\x3d\xd2\x36\x05\x67\xa1\x46\x81" + "\x90\xe9\x60\x64\xfa\x52\x87\x37" + "\x44\x01\xbd\x58\xe1\xda\xda\x1e" + "\xa7\x09\xf7\x43\x31\x2b\x4b\x55" + "\xbd\x0d\x53\x7f\x12\x6c\xf5\x07" + "\xfc\x61\xda\xd6\x0a\xbd\x89\x5f" + "\x2c\xf5\xa8\x1f\x0d\x60\xe4\x3c" + "\x5d\x94\x8a\x1f\x64\xce\xd5\x16" + "\x73\xbc\xbe\xb1\x85\x28\xcb\x0b" + "\x47\x5c\x1f\x66\x25\x89\x61\x6a" + "\xa7\xcd\xf8\x1b\x31\x88\x42\x71" + "\x58\x65\x53\xd5\xc0\xa3\x56\x2e" + "\xb6\x86\x9e\x13\x78\x34\x36\x85" + "\xbb\xce\x6e\x54\x33\xb9\x97\xc5" + "\x72\xb8\xe0\x13\x34\x04\xbf\x83" + "\xbf\x78\x1d\x7c\x23\x34\x90\xe0" + "\x57\xd4\x3f\xc6\x61\xe3\xca\x96" + "\x13\xdd\x9e\x20\x51\x18\x73\x37" + "\x69\x37\xfb\xe5\x60\x1f\xf2\xa1" + "\xef\xa2\x6e\x16\x32\x8e\xc3\xb6" + "\x21\x5e\xc2\x1c\xb6\xc6\x96\x72" + "\x4f\xa6\x85\x69\xa9\x5d\xb2\x2e" + "\xac\xfe\x6e\xc3\xe7\xb3\x51\x08" + "\x66\x2a\xac\x59\xb3\x73\x86\xae" + "\x6d\x85\x97\x37\x68\xef\xa7\x85" + "\xb7\xdd\xdd\xd9\x85\xc9\x57\x01" + "\x10\x2b\x9a\x1e\x44\x12\x87\xa5" + "\x60\x1f\x88\xae\xbf\x14\x2d\x05" + "\x4c\x60\x85\x8a\x45\xac\x0f\xc2", + .len = 4096, } }; @@ -33801,9 +30271,6 @@ static const struct cipher_testvec adiantum_xchacha20_aes_tv_template[] = { .ctext = "\xf6\x78\x97\xd6\xaa\x94\x01\x27" "\x2e\x4d\x83\xe0\x6e\x64\x9a\xdf", .len = 16, - .also_non_np = 1, - .np = 3, - .tap = { 5, 2, 9 }, }, { .key = "\x36\x2b\x57\x97\xf8\x5d\xcd\x99" "\x5f\x1a\x5a\x44\x1d\x92\x0f\x27" @@ -33823,9 +30290,6 @@ static const struct cipher_testvec adiantum_xchacha20_aes_tv_template[] = { "\x57\x72\xb5\xfd\xb5\x5d\xb8\x28" "\x0c\x04\x91\x14\x91\xe9\x37", .len = 31, - .also_non_np = 1, - .np = 2, - .tap = { 16, 15 }, }, { .key = "\xa5\x28\x24\x34\x1a\x3c\xd8\xf7" "\x05\x91\x8f\xee\x85\x1f\x35\x7f" @@ -33869,9 +30333,6 @@ static const struct cipher_testvec adiantum_xchacha20_aes_tv_template[] = { "\x29\x62\x0d\xb2\xf6\x3c\x58\x57" "\xc1\xd5\x5a\xbb\xd6\xa6\x2a\xe5", .len = 128, - .also_non_np = 1, - .np = 4, - .tap = { 112, 7, 8, 1 }, }, { .key = "\xd3\x81\x72\x18\x23\xff\x6f\x4a" "\x25\x74\x29\x0d\x51\x8a\x0e\x13" @@ -34011,6 +30472,1436 @@ static const struct cipher_testvec adiantum_xchacha20_aes_tv_template[] = { "\xb8\x78\xd2\xa3\xc6\xf3\x79\x9c" "\xc7\x27\xe1\x6a\x29\xad\xa4\x03", .len = 512, + }, { + .key = "\xeb\xe5\x11\x3a\x72\xeb\x10\xbe" + "\x70\xcf\xe3\xea\xc2\x74\xa4\x48" + "\x29\x0f\x8f\x3f\xcf\x4c\x28\x2a" + "\x4e\x1e\x3c\xc3\x27\x9f\x16\x13", + .klen = 32, + .iv = "\x84\x3e\xa2\x7c\x06\x72\xb2\xad" + "\x88\x76\x65\xb4\x1a\x29\x27\x12" + "\x45\xb6\x8d\x0e\x4b\x87\x04\xfc" + "\xb5\xcd\x1c\x4d\xe8\x06\xf1\xcb", + .ptext = "\x8e\xb6\x07\x9b\x7c\xe4\xa4\xa2" + "\x41\x6c\x24\x1d\xc0\x77\x4e\xd9" + "\x4a\xa4\x2c\xb6\xe4\x55\x02\x7f" + "\xc4\xec\xab\xc2\x5c\x63\x40\x92" + "\x38\x24\x62\xdb\x65\x82\x10\x7f" + "\x21\xa5\x39\x3a\x3f\x38\x7e\xad" + "\x6c\x7b\xc9\x3f\x89\x8f\xa8\x08" + "\xbd\x31\x57\x3c\x7a\x45\x67\x30" + "\xa9\x27\x58\x34\xbe\xe3\xa4\xc3" + "\xff\xc2\x9f\x43\xf0\x04\xba\x1e" + "\xb6\xf3\xc4\xce\x09\x7a\x2e\x42" + "\x7d\xad\x97\xc9\x77\x9a\x3a\x78" + "\x6c\xaf\x7c\x2a\x46\xb4\x41\x86" + "\x1a\x20\xf2\x5b\x1a\x60\xc9\xc4" + "\x47\x5d\x10\xa4\xd2\x15\x6a\x19" + "\x4f\xd5\x51\x37\xd5\x06\x70\x1a" + "\x3e\x78\xf0\x2e\xaa\xb5\x2a\xbd" + "\x83\x09\x7c\xcb\x29\xac\xd7\x9c" + "\xbf\x80\xfd\x9d\xd4\xcf\x64\xca" + "\xf8\xc9\xf1\x77\x2e\xbb\x39\x26" + "\xac\xd9\xbe\xce\x24\x7f\xbb\xa2" + "\x82\xba\xeb\x5f\x65\xc5\xf1\x56" + "\x8a\x52\x02\x4d\x45\x23\x6d\xeb" + "\xb0\x60\x7b\xd8\x6e\xb2\x98\xd2" + "\xaf\x76\xf2\x33\x9b\xf3\xbb\x95" + "\xc0\x50\xaa\xc7\x47\xf6\xb3\xf3" + "\x77\x16\xcb\x14\x95\xbf\x1d\x32" + "\x45\x0c\x75\x52\x2c\xe8\xd7\x31" + "\xc0\x87\xb0\x97\x30\x30\xc5\x5e" + "\x50\x70\x6e\xb0\x4b\x4e\x38\x19" + "\x46\xca\x38\x6a\xca\x7d\xfe\x05" + "\xc8\x80\x7c\x14\x6c\x24\xb5\x42" + "\x28\x04\x4c\xff\x98\x20\x08\x10" + "\x90\x31\x03\x78\xd8\xa1\xe6\xf9" + "\x52\xc2\xfc\x3e\xa7\x68\xce\xeb" + "\x59\x5d\xeb\xd8\x64\x4e\xf8\x8b" + "\x24\x62\xcf\x17\x36\x84\xc0\x72" + "\x60\x4f\x3e\x47\xda\x72\x3b\x0e" + "\xce\x0b\xa9\x9c\x51\xdc\xa5\xb9" + "\x71\x73\x08\x4e\x22\x31\xfd\x88" + "\x29\xfc\x8d\x17\x3a\x7a\xe5\xb9" + "\x0b\x9c\x6d\xdb\xce\xdb\xde\x81" + "\x73\x5a\x16\x9d\x3c\x72\x88\x51" + "\x10\x16\xf3\x11\x6e\x32\x5f\x4c" + "\x87\xce\x88\x2c\xd2\xaf\xf5\xb7" + "\xd8\x22\xed\xc9\xae\x68\x7f\xc5" + "\x30\x62\xbe\xc9\xe0\x27\xa1\xb5" + "\x57\x74\x36\x60\xb8\x6b\x8c\xec" + "\x14\xad\xed\x69\xc9\xd8\xa5\x5b" + "\x38\x07\x5b\xf3\x3e\x74\x48\x90" + "\x61\x17\x23\xdd\x44\xbc\x9d\x12" + "\x0a\x3a\x63\xb2\xab\x86\xb8\x67" + "\x85\xd6\xb2\x5d\xde\x4a\xc1\x73" + "\x2a\x7c\x53\x8e\xd6\x7d\x0e\xe4" + "\x3b\xab\xc5\x3d\x32\x79\x18\xb7" + "\xd6\x50\x4d\xf0\x8a\x37\xbb\xd3" + "\x8d\xd8\x08\xd7\x7d\xaa\x24\x52" + "\xf7\x90\xe3\xaa\xd6\x49\x7a\x47" + "\xec\x37\xad\x74\x8b\xc1\xb7\xfe" + "\x4f\x70\x14\x62\x22\x8c\x63\xc2" + "\x1c\x4e\x38\xc3\x63\xb7\xbf\x53" + "\xbd\x1f\xac\xa6\x94\xc5\x81\xfa" + "\xe0\xeb\x81\xe9\xd9\x1d\x32\x3c" + "\x85\x12\xca\x61\x65\xd1\x66\xd8" + "\xe2\x0e\xc3\xa3\xff\x0d\xd3\xee" + "\xdf\xcc\x3e\x01\xf5\x9b\x45\x5c" + "\x33\xb5\xb0\x8d\x36\x1a\xdf\xf8" + "\xa3\x81\xbe\xdb\x3d\x4b\xf6\xc6" + "\xdf\x7f\xb0\x89\xbd\x39\x32\x50" + "\xbb\xb2\xe3\x5c\xbb\x4b\x18\x98" + "\x08\x66\x51\xe7\x4d\xfb\xfc\x4e" + "\x22\x42\x6f\x61\xdb\x7f\x27\x88" + "\x29\x3f\x02\xa9\xc6\x83\x30\xcc" + "\x8b\xd5\x64\x7b\x7c\x76\x16\xbe" + "\xb6\x8b\x26\xb8\x83\x16\xf2\x6b" + "\xd1\xdc\x20\x6b\x42\x5a\xef\x7a" + "\xa9\x60\xb8\x1a\xd3\x0d\x4e\xcb" + "\x75\x6b\xc5\x80\x43\x38\x7f\xad" + "\x9c\x56\xd9\xc4\xf1\x01\x74\xf0" + "\x16\x53\x8d\x69\xbe\xf2\x5d\x92" + "\x34\x38\xc8\x84\xf9\x1a\xfc\x26" + "\x16\xcb\xae\x7d\x38\x21\x67\x74" + "\x4c\x40\xaa\x6b\x97\xe0\xb0\x2f" + "\xf5\x3e\xf6\xe2\x24\xc8\x22\xa4" + "\xa8\x88\x27\x86\x44\x75\x5b\x29" + "\x34\x08\x4b\xa1\xfe\x0c\x26\xe5" + "\xac\x26\xf6\x21\x0c\xfb\xde\x14" + "\xfe\xd7\xbe\xee\x48\x93\xd6\x99" + "\x56\x9c\xcf\x22\xad\xa2\x53\x41" + "\xfd\x58\xa1\x68\xdc\xc4\xef\x20" + "\xa1\xee\xcf\x2b\x43\xb6\x57\xd8" + "\xfe\x01\x80\x25\xdf\xd2\x35\x44" + "\x0d\x15\x15\xc3\xfc\x49\xbf\xd0" + "\xbf\x2f\x95\x81\x09\xa6\xb6\xd7" + "\x21\x03\xfe\x52\xb7\xa8\x32\x4d" + "\x75\x1e\x46\x44\xbc\x2b\x61\x04" + "\x1b\x1c\xeb\x39\x86\x8f\xe9\x49" + "\xce\x78\xa5\x5e\x67\xc5\xe9\xef" + "\x43\xf8\xf1\x35\x22\x43\x61\xc1" + "\x27\xb5\x09\xb2\xb8\xe1\x5e\x26" + "\xcc\xf3\x6f\xb2\xb7\x55\x30\x98" + "\x87\xfc\xe7\xa8\xc8\x94\x86\xa1" + "\xd9\xa0\x3c\x74\x16\xb3\x25\x98" + "\xba\xc6\x84\x4a\x27\xa6\x58\xfe" + "\xe1\x68\x04\x30\xc8\xdb\x44\x52" + "\x4e\xb2\xa4\x6f\xf7\x63\xf2\xd6" + "\x63\x36\x17\x04\xf8\x06\xdb\xeb" + "\x99\x17\xa5\x1b\x61\x90\xa3\x9f" + "\x05\xae\x3e\xe4\xdb\xc8\x1c\x8e" + "\x77\x27\x88\xdf\xd3\x22\x5a\xc5" + "\x9c\xd6\x22\xf8\xc4\xd8\x92\x9d" + "\x16\xcc\x54\x25\x3b\x6f\xdb\xc0" + "\x78\xd8\xe3\xb3\x03\x69\xd7\x5d" + "\xf8\x08\x04\x63\x61\x9d\x76\xf9" + "\xad\x1d\xc4\x30\x9f\x75\x89\x6b" + "\xfb\x62\xba\xae\xcb\x1b\x6c\xe5" + "\x7e\xea\x58\x6b\xae\xce\x9b\x48" + "\x4b\x80\xd4\x5e\x71\x53\xa7\x24" + "\x73\xca\xf5\x3e\xbb\x5e\xd3\x1c" + "\x33\xe3\xec\x5b\xa0\x32\x9d\x25" + "\x0e\x0c\x28\x29\x39\x51\xc5\x70" + "\xec\x60\x8f\x77\xfc\x06\x7a\x33" + "\x19\xd5\x7a\x6e\x94\xea\xa3\xeb" + "\x13\xa4\x2e\x09\xd8\x81\x65\x83" + "\x03\x63\x8b\xb5\xc9\x89\x98\x73" + "\x69\x53\x8e\xab\xf1\xd2\x2f\x67" + "\xbd\xa6\x16\x6e\xd0\x8b\xc1\x25" + "\x93\xd2\x50\x7c\x1f\xe1\x11\xd0" + "\x58\x0d\x2f\x72\xe7\x5e\xdb\xa2" + "\x55\x9a\xe0\x09\x21\xac\x61\x85" + "\x4b\x20\x95\x73\x63\x26\xe3\x83" + "\x4b\x5b\x40\x03\x14\xb0\x44\x16" + "\xbd\xe0\x0e\xb7\x66\x56\xd7\x30" + "\xb3\xfd\x8a\xd3\xda\x6a\xa7\x3d" + "\x98\x09\x11\xb7\x00\x06\x24\x5a" + "\xf7\x42\x94\xa6\x0e\xb1\x6d\x48" + "\x74\xb1\xa7\xe6\x92\x0a\x15\x9a" + "\xf5\xfa\x55\x1a\x6c\xdd\x71\x08" + "\xd0\xf7\x8d\x0e\x7c\x67\x4d\xc6" + "\xe6\xde\x78\x88\x88\x3c\x5e\x23" + "\x46\xd2\x25\xa4\xfb\xa3\x26\x3f" + "\x2b\xfd\x9c\x20\xda\x72\xe1\x81" + "\x8f\xe6\xae\x08\x1d\x67\x15\xde" + "\x86\x69\x1d\xc6\x1e\x6d\xb7\x5c" + "\xdd\x43\x72\x5a\x7d\xa7\xd8\xd7" + "\x1e\x66\xc5\x90\xf6\x51\x76\x91" + "\xb3\xe3\x39\x81\x75\x08\xfa\xc5" + "\x06\x70\x69\x1b\x2c\x20\x74\xe0" + "\x53\xb0\x0c\x9d\xda\xa9\x5b\xdd" + "\x1c\x38\x6c\x9e\x3b\xc4\x7a\x82" + "\x93\x9e\xbb\x75\xfb\x19\x4a\x55" + "\x65\x7a\x3c\xda\xcb\x66\x5c\x13" + "\x17\x97\xe8\xbd\xae\x24\xd9\x76" + "\xfb\x8c\x73\xde\xbd\xb4\x1b\xe0" + "\xb9\x2c\xe8\xe0\x1d\x3f\xa8\x2c" + "\x1e\x81\x5b\x77\xe7\xdf\x6d\x06" + "\x7c\x9a\xf0\x2b\x5d\xfc\x86\xd5" + "\xb1\xad\xbc\xa8\x73\x48\x61\x67" + "\xd6\xba\xc8\xe8\xe2\xb8\xee\x40" + "\x36\x22\x3e\x61\xf6\xc8\x16\xe4" + "\x0e\x88\xad\x71\x53\x58\xe1\x6c" + "\x8f\x4f\x89\x4b\x3e\x9c\x7f\xe9" + "\xad\xc2\x28\xc2\x3a\x29\xf3\xec" + "\xa9\x28\x39\xba\xc2\x86\xe1\x06" + "\xf3\x8b\xe3\x95\x0c\x87\xb8\x1b" + "\x72\x35\x8e\x8f\x6d\x18\xc8\x1c" + "\xa5\x5d\x57\x9d\x73\x8a\xbb\x9e" + "\x21\x05\x12\xd7\xe0\x21\x1c\x16" + "\x3a\x95\x85\xbc\xb0\x71\x0b\x36" + "\x6c\x44\x8d\xef\x3b\xec\x3f\x8e" + "\x24\xa9\xe3\xa7\x63\x23\xca\x09" + "\x62\x96\x79\x0c\x81\x05\x41\xf2" + "\x07\x20\x26\xe5\x8e\x10\x54\x03" + "\x05\x7b\xfe\x0c\xcc\x8c\x50\xe5" + "\xca\x33\x4d\x48\x7a\x03\xd5\x64" + "\x49\x09\xf2\x5c\x5d\xfe\x2b\x30" + "\xbf\x29\x14\x29\x8b\x9b\x7c\x96" + "\x47\x07\x86\x4d\x4e\x4d\xf1\x47" + "\xd1\x10\x2a\xa8\xd3\x15\x8c\xf2" + "\x2f\xf4\x3a\xdf\xd0\xa7\xcb\x5a" + "\xad\x99\x39\x4a\xdf\x60\xbe\xf9" + "\x91\x4e\xf5\x94\xef\xc5\x56\x32" + "\x33\x86\x78\xa3\xd6\x4c\x29\x7c" + "\xe8\xac\x06\xb5\xf5\x01\x5c\x9f" + "\x02\xc8\xe8\xbf\x5c\x1a\x7f\x4d" + "\x28\xa5\xb9\xda\xa9\x5e\xe7\x4b" + "\xf4\x3d\xe9\x1d\x28\xaa\x1a\x8a" + "\x76\xc8\x6c\x19\x61\x3c\x9e\x29" + "\xcd\xbe\xff\xe0\x1c\xb8\x67\xb5" + "\xa4\x46\xf8\xb9\x8a\xa2\xf6\x7c" + "\xef\x23\x73\x0c\xe9\x72\x0a\x0d" + "\x9b\x40\xd8\xfb\x0c\x9c\xab\xa8", + .ctext = "\xfc\x02\x83\x13\x73\x06\x70\x3f" + "\x71\x28\x98\x61\xe5\x2c\x45\x49" + "\x18\xa2\x0e\x17\xc9\xdb\x4d\xf6" + "\xbe\x05\x02\x35\xc1\x18\x61\x28" + "\xff\x28\x0a\xd9\x00\xb8\xed\xec" + "\x14\x80\x88\x56\xcf\x98\x32\xcc" + "\xb0\xee\xb4\x5e\x2d\x61\x59\xcb" + "\x48\xc9\x25\xaa\x7e\x5f\xe5\x4f" + "\x95\x8f\x5d\x47\xe8\xc3\x09\xb4" + "\xce\xe7\x74\xcd\xc6\x09\x5c\xfc" + "\xc7\x79\xc9\x39\xe4\xe3\x9b\x59" + "\x67\x61\x10\xc9\xb7\x7a\xa8\x11" + "\x59\xf6\x7a\x67\x1c\x3a\x70\x76" + "\x2e\x0e\xbd\x10\x93\x01\x06\xea" + "\x51\xc6\x5c\xa7\xda\xd1\x7d\x06" + "\x8b\x1d\x5b\xb6\x87\xf0\x32\xbe" + "\xff\x55\xaa\x58\x5a\x28\xd1\x64" + "\x45\x3b\x0b\x5c\xee\xc4\x12\x2d" + "\x1f\xb7\xa5\x73\xf5\x20\xf5\xa8" + "\x10\x9d\xd8\x16\xd2\x05\x4d\x49" + "\x99\x4a\x71\x56\xec\xa3\xc7\x27" + "\xb0\x98\xcd\x59\x3c\x8a\xd1\x9e" + "\x33\xa5\x92\xf2\xb7\x87\x23\x5d" + "\x53\x9a\x8e\x7c\x63\x57\x5e\x9a" + "\x21\x54\x7a\x3c\x5a\xd5\x68\x69" + "\x35\x17\x51\x06\x19\x82\x9d\x44" + "\x9e\x8a\x75\xc5\x16\x55\xa4\x78" + "\x95\x63\xc3\xf0\x91\x73\x77\x44" + "\x0c\xff\xb9\xb3\xa7\x5f\xcf\x2a" + "\xa2\x54\x9c\xe3\x8b\x7e\x9d\x65" + "\xe5\x64\x8b\xbe\x06\x3a\x90\x31" + "\xdb\x42\x78\xe9\xe6\x8a\xae\xba" + "\x8f\xfb\xc9\x3d\xd9\xc2\x3e\x57" + "\xd5\x58\xfe\x70\x44\xe5\x2a\xd5" + "\x87\xcf\x9f\x6a\x02\xde\x48\xe9" + "\x13\xed\x8d\x2b\xf2\xa1\x56\x07" + "\x36\x2d\xcf\xc3\x5c\xd4\x4b\x20" + "\xb0\xdf\x1a\x70\xed\x0a\xe4\x2e" + "\x9a\xfc\x88\xa1\xc4\x2d\xd6\xb8" + "\xf1\x6e\x2c\x5c\xdc\x0e\xb0\x21" + "\x2d\x76\xb8\xc3\x05\x4c\xf5\xc5" + "\x9a\x14\xab\x08\xc2\x67\x59\x30" + "\x7a\xef\xd8\x4a\x89\x49\xd4\xf0" + "\x22\x39\xf2\x61\xaa\x70\x36\xcf" + "\x65\xee\x43\x83\x2e\x32\xe4\xc9" + "\xc2\xf1\xc7\x08\x28\x59\x10\x6f" + "\x7a\xeb\x8f\x78\x9e\xdf\x07\x0f" + "\xca\xc7\x02\x6a\x2e\x2a\xf0\x64" + "\xfa\x4c\x8c\x4c\xfc\x13\x23\x63" + "\x54\xeb\x1d\x41\xdf\x88\xd6\x66" + "\xae\x5e\x31\x74\x5d\x84\x65\xb8" + "\x61\x1c\x88\x1b\x8f\xb6\x14\x4e" + "\x73\x23\x27\x71\x85\x04\x07\x59" + "\x18\xa3\x2b\x69\x2a\x42\x81\xbf" + "\x40\xf4\x40\xdf\x04\xb8\x6c\x2e" + "\x21\x5b\x22\x25\x61\x01\x96\xce" + "\xfb\xbc\x75\x25\x2c\x03\x55\xea" + "\xb6\x56\x31\x03\xc8\x98\x77\xd6" + "\x30\x19\x9e\x45\x05\xfd\xca\xdf" + "\xae\x89\x30\xa3\xc1\x65\x41\x67" + "\x12\x8e\xa4\x61\xd0\x87\x04\x0a" + "\xe6\xf3\x43\x3a\x38\xce\x22\x36" + "\x41\xdc\xe1\x7d\xd2\xa6\xe2\x66" + "\x21\x8d\xc9\x59\x73\x52\x34\xd8" + "\x1f\xf1\x87\x00\x9b\x12\x74\xeb" + "\xbb\xa9\x34\x0c\x8e\x79\x74\x64" + "\xbf\x94\x97\xe4\x94\xda\xf0\x39" + "\x66\xa8\xd9\x82\xe3\x11\x3d\xe7" + "\xb3\x9a\x40\x7a\x6f\x71\xc7\x0f" + "\x7b\x6d\x59\x79\x18\x2f\x11\x60" + "\x1e\xe0\xae\x1b\x1b\xb4\xad\x4d" + "\x63\xd9\x3e\xa0\x8f\xe3\x66\x8c" + "\xfe\x5a\x73\x07\x95\x27\x1a\x07" + "\x6e\xd6\x14\x3f\xbe\xc5\x99\x94" + "\xcf\x40\xf4\x39\x1c\xf2\x99\x5b" + "\xb7\xfb\xb4\x4e\x5f\x21\x10\x04" + "\x24\x08\xd4\x0d\x10\x7a\x2f\x52" + "\x7d\x91\xc3\x38\xd3\x16\xf0\xfd" + "\x53\xba\xda\x88\xa5\xf6\xc7\xfd" + "\x63\x4a\x9f\x48\xb5\x31\xc2\xe1" + "\x7b\x3e\xac\x8d\xc9\x95\x02\x92" + "\xcc\xbd\x0e\x15\x2d\x97\x08\x82" + "\xa6\x99\xbc\x2c\x96\x91\xde\xa4" + "\x9c\xf5\x2c\xef\x12\x29\xb0\x72" + "\x5f\x60\x5d\x3d\xf3\x85\x59\x79" + "\xac\x06\x63\x74\xcc\x1a\x8d\x0e" + "\xa7\x5f\xd9\x3e\x84\xf7\xbb\xde" + "\x06\xd9\x4b\xab\xee\xb2\x03\xbe" + "\x68\x49\x72\x84\x8e\xf8\x45\x2b" + "\x59\x99\x17\xd3\xe9\x32\x79\xc3" + "\x83\x4c\x7a\x6c\x71\x53\x8c\x09" + "\x76\xfb\x3e\x80\x99\xbc\x2c\x7d" + "\x42\xe5\x70\x08\x80\xc7\xaf\x15" + "\x90\xda\x98\x98\x81\x04\x1c\x4d" + "\x78\xf1\xf3\xcc\x1b\x3a\x7b\xef" + "\xea\xe1\xee\x0e\xd2\x32\xb6\x63" + "\xbf\xb2\xb5\x86\x8d\x16\xd3\x23" + "\x04\x59\x51\xbb\x17\x03\xc0\x07" + "\x93\xbf\x72\x58\x30\xf2\x0a\xa2" + "\xbc\x60\x86\x3b\x68\x91\x67\x14" + "\x10\x76\xda\xa3\x98\x2d\xfc\x8a" + "\xb8\x95\xf7\xd2\x8b\x97\x8b\xfc" + "\xf2\x9e\x86\x20\xb6\xdf\x93\x41" + "\x06\x5e\x37\x3e\xe2\xb8\xd5\x06" + "\x59\xd2\x8d\x43\x91\x5a\xed\x94" + "\x54\xc2\x77\xbc\x0b\xb4\x29\x80" + "\x22\x19\xe7\x35\x1f\x29\x4f\xd8" + "\x02\x98\xee\x83\xca\x4c\x94\xa3" + "\xec\xde\x4b\xf5\xca\x57\x93\xa3" + "\x72\x69\xfe\x27\x7d\x39\x24\x9a" + "\x60\x19\x72\xbe\x24\xb2\x2d\x99" + "\x8c\xb7\x32\xf8\x74\x77\xfc\x8d" + "\xb2\xc1\x7a\x88\x28\x26\xea\xb7" + "\xad\xf0\x38\x49\x88\x78\x73\xcd" + "\x01\xef\xb9\x30\x1a\x33\xa3\x24" + "\x9b\x0b\xc5\x89\x64\x3f\xbe\x76" + "\xd5\xa5\x28\x74\xa2\xc6\xa0\xa0" + "\xdd\x13\x81\x64\x2f\xd1\xab\x15" + "\xab\x13\xb5\x68\x59\xa4\x9f\x0e" + "\x1e\x0a\xaf\xf7\x0b\x6e\x6b\x0b" + "\xf7\x95\x4c\xbc\x1d\x40\x6d\x9c" + "\x08\x42\xef\x07\x03\xb7\xa3\xea" + "\x2a\x5f\xec\x41\x3c\x72\x31\x9d" + "\xdc\x6b\x3a\x5e\x35\x3d\x12\x09" + "\x27\xe8\x63\xbe\xcf\xb3\xbc\x01" + "\x2d\x0c\x86\xb2\xab\x4a\x69\xe5" + "\xf8\x45\x97\x76\x0e\x31\xe5\xc6" + "\x4c\x4f\x94\xa5\x26\x19\x9f\x1b" + "\xe1\xf4\x79\x04\xb4\x93\x92\xdc" + "\xa5\x2a\x66\x25\x0d\xb2\x9e\xea" + "\xa8\xf6\x02\x77\x2d\xd1\x3f\x59" + "\x5c\x04\xe2\x36\x52\x5f\xa1\x27" + "\x0a\x07\x56\xb6\x2d\xd5\x90\x32" + "\x64\xee\x3f\x42\x8f\x61\xf8\xa0" + "\xc1\x8b\x1e\x0b\xa2\x73\xa9\xf3" + "\xc9\x0e\xb1\x96\x3a\x67\x5f\x1e" + "\xd1\x98\x57\xa2\xba\xb3\x23\x9d" + "\xa3\xc6\x3c\x7d\x5e\x3e\xb3\xe8" + "\x80\xae\x2d\xda\x85\x90\x69\x3c" + "\xf0\xe7\xdd\x9e\x20\x10\x52\xdb" + "\xc3\xa0\x15\x73\xee\xb1\xf1\x0f" + "\xf1\xf8\x3f\x40\xe5\x17\x80\x4e" + "\x91\x95\xc7\xec\xd1\x9c\xd9\x1a" + "\x8b\xac\xec\xc9\x0c\x07\xf4\xdc" + "\x77\x2d\xa2\xc4\xf8\x27\xb5\x41" + "\x2f\x85\xa6\x48\xad\x2a\x58\xc5" + "\xea\xfa\x1c\xdb\xfd\xb7\x70\x45" + "\xfc\xad\x11\xaf\x05\xed\xbf\xb6" + "\x3c\xe1\x57\xb8\x72\x4a\xa0\x6b" + "\x40\xd3\xda\xa9\xbc\xa5\x02\x95" + "\x8c\xf0\x4e\x67\xb2\x58\x66\xea" + "\x58\x0e\xc4\x88\xbc\x1d\x3b\x15" + "\x17\xc8\xf5\xd0\x69\x08\x0a\x01" + "\x80\x2e\x9e\x69\x4c\x37\x0b\xba" + "\xfb\x1a\xa9\xc3\x5f\xec\x93\x7c" + "\x4f\x72\x68\x1a\x05\xa1\x32\xe1" + "\x16\x57\x9e\xa6\xe0\x42\xfa\x76" + "\xc2\xf6\xd3\x9b\x37\x0d\xa3\x58" + "\x30\x27\xe7\xea\xb1\xc3\x43\xfb" + "\x67\x04\x70\x86\x0a\x71\x69\x34" + "\xca\xb1\xe3\x4a\x56\xc9\x29\xd1" + "\x12\x6a\xee\x89\xfd\x27\x83\xdf" + "\x32\x1a\xc2\xe9\x94\xcc\x44\x2e" + "\x0f\x3e\xc8\xc1\x70\x5b\xb0\xe8" + "\x6d\x47\xe3\x39\x75\xd5\x45\x8a" + "\x48\x4c\x64\x76\x6f\xae\x24\x6f" + "\xae\x77\x33\x5b\xf5\xca\x9c\x30" + "\x2c\x27\x15\x5e\x9c\x65\xad\x2a" + "\x88\xb1\x36\xf6\xcd\x5e\x73\x72" + "\x99\x5c\xe2\xe4\xb8\x3e\x12\xfb" + "\x55\x86\xfa\xab\x53\x12\xdc\x6a" + "\xe3\xfe\x6a\xeb\x9b\x5d\xeb\x72" + "\x9d\xf1\xbb\x80\x80\x76\x2d\x57" + "\x11\xde\xcf\xae\x46\xad\xdb\xcd" + "\x62\x66\x3d\x7b\x7f\xcb\xc4\x43" + "\x81\x0c\x7e\xb9\xb7\x47\x1a\x40" + "\xfd\x08\x51\xbe\x01\x1a\xd8\x31" + "\x43\x5e\x24\x91\xa2\x53\xa1\xc5" + "\x8a\xe4\xbc\x00\x8e\xf7\x0c\x30" + "\xdf\x03\x34\x2f\xce\xe4\x2e\xda" + "\x2b\x87\xfc\xf8\x9b\x50\xd5\xb0" + "\x5b\x08\xc6\x17\xa0\xae\x6b\x24" + "\xe2\x1d\xd0\x47\xbe\xc4\x8f\x62" + "\x1d\x12\x26\xc7\x78\xd4\xf2\xa3" + "\xea\x39\x8c\xcb\x54\x3e\x2b\xb9" + "\x9a\x8f\x97\xcf\x68\x53\x40\x02" + "\x56\xac\x52\xbb\x62\x3c\xc6\x3f" + "\x3a\x53\x3c\xe8\x21\x9a\x60\x65" + "\x10\x6e\x59\xc3\x4f\xc3\x07\xc8" + "\x61\x1c\xea\x62\x6e\xa2\x5a\x12" + "\xd6\x10\x91\xbe\x5e\x58\x73\xbe" + "\x77\xb8\xb7\x98\xc7\x7e\x78\x9a", + .len = 1536, + }, { + .key = "\x60\xd5\x36\xb0\x8e\x5d\x0e\x5f" + "\x70\x47\x8c\xea\x87\x30\x1d\x58" + "\x2a\xb2\xe8\xc6\xcb\x60\xe7\x6f" + "\x56\x95\x83\x98\x38\x80\x84\x8a", + .klen = 32, + .iv = "\x43\xfe\x63\x3c\xdc\x9e\x0c\xa6" + "\xee\x9c\x0b\x97\x65\xc2\x56\x1d" + "\x5d\xd0\xbf\xa3\x9f\x1e\xfb\x78" + "\xbf\x51\x1b\x18\x73\x27\x27\x8c", + .ptext = "\x0b\x77\xd8\xa3\x8c\xa6\xb2\x2d" + "\x3e\xdd\xcc\x7c\x4a\x3e\x61\xc4" + "\x9a\x7f\x73\xb0\xb3\x29\x32\x61" + "\x13\x25\x62\xcc\x59\x4c\xf4\xdb" + "\xd7\xf5\xf4\xac\x75\x51\xb2\x83" + "\x64\x9d\x1c\x8b\xd1\x8b\x0c\x06" + "\xf1\x9f\xba\x9d\xae\x62\xd4\xd8" + "\x96\xbe\x3c\x4c\x32\xe4\x82\x44" + "\x47\x5a\xec\xb8\x8a\x5b\xd5\x35" + "\x57\x1e\x5c\x80\x6f\x77\xa9\xb9" + "\xf2\x4f\x71\x1e\x48\x51\x86\x43" + "\x0d\xd5\x5b\x52\x30\x40\xcd\xbb" + "\x2c\x25\xc1\x47\x8b\xb7\x13\xc2" + "\x3a\x11\x40\xfc\xed\x45\xa4\xf0" + "\xd6\xfd\x32\x99\x13\x71\x47\x2e" + "\x4c\xb0\x81\xac\x95\x31\xd6\x23" + "\xa4\x2f\xa9\xe8\x5a\x62\xdc\x96" + "\xcf\x49\xa7\x17\x77\x76\x8a\x8c" + "\x04\x22\xaf\xaf\x6d\xd9\x16\xba" + "\x35\x21\x66\x78\x3d\xb6\x65\x83" + "\xc6\xc1\x67\x8c\x32\xd6\xc0\xc7" + "\xf5\x8a\xfc\x47\xd5\x87\x09\x2f" + "\x51\x9d\x57\x6c\x29\x0b\x1c\x32" + "\x47\x6e\x47\xb5\xf3\x81\xc8\x82" + "\xca\x5d\xe3\x61\x38\xa0\xdc\xcc" + "\x35\x73\xfd\xb3\x92\x5c\x72\xd2" + "\x2d\xad\xf6\xcd\x20\x36\xff\x49" + "\x48\x80\x21\xd3\x2f\x5f\xe9\xd8" + "\x91\x20\x6b\xb1\x38\x52\x1e\xbc" + "\x88\x48\xa1\xde\xc0\xa5\x46\xce" + "\x9f\x32\x29\xbc\x2b\x51\x0b\xae" + "\x7a\x44\x4e\xed\xeb\x95\x63\x99" + "\x96\x87\xc9\x34\x02\x26\xde\x20" + "\xe4\xcb\x59\x0c\xb5\x55\xbd\x55" + "\x3f\xa9\x15\x25\xa7\x5f\xab\x10" + "\xbe\x9a\x59\x6c\xd5\x27\xf3\xf0" + "\x73\x4a\xb3\xe4\x08\x11\x00\xeb" + "\xf1\xae\xc8\x0d\xef\xcd\xb5\xfc" + "\x0d\x7e\x03\x67\xad\x0d\xec\xf1" + "\x9a\xfd\x31\x60\x3e\xa2\xfa\x1c" + "\x93\x79\x31\x31\xd6\x66\x7a\xbd" + "\x85\xfd\x22\x08\x00\xae\x72\x10" + "\xd6\xb0\xf4\xb8\x4a\x72\x5b\x9c" + "\xbf\x84\xdd\xeb\x13\x05\x28\xb7" + "\x61\x60\xfd\x7f\xf0\xbe\x4d\x18" + "\x7d\xc9\xba\xb0\x01\x59\x74\x18" + "\xe4\xf6\xa6\x74\x5d\x3f\xdc\xa0" + "\x9e\x57\x93\xbf\x16\x6c\xf6\xbd" + "\x93\x45\x38\x95\xb9\x69\xe9\x62" + "\x21\x73\xbd\x81\x73\xac\x15\x74" + "\x9e\x68\x28\x91\x38\xb7\xd4\x47" + "\xc7\xab\xc9\x14\xad\x52\xe0\x4c" + "\x17\x1c\x42\xc1\xb4\x9f\xac\xcc" + "\xc8\x12\xea\xa9\x9e\x30\x21\x14" + "\xa8\x74\xb4\x74\xec\x8d\x40\x06" + "\x82\xb7\x92\xd7\x42\x5b\xf2\xf9" + "\x6a\x1e\x75\x6e\x44\x55\xc2\x8d" + "\x73\x5b\xb8\x8c\x3c\xef\x97\xde" + "\x24\x43\xb3\x0e\xba\xad\x63\x63" + "\x16\x0a\x77\x03\x48\xcf\x02\x8d" + "\x76\x83\xa3\xba\x73\xbe\x80\x3f" + "\x8f\x6e\x76\x24\xc1\xff\x2d\xb4" + "\x20\x06\x9b\x67\xea\x29\xb5\xe0" + "\x57\xda\x30\x9d\x38\xa2\x7d\x1e" + "\x8f\xb9\xa8\x17\x64\xea\xbe\x04" + "\x84\xd1\xce\x2b\xfd\x84\xf9\x26" + "\x1f\x26\x06\x5c\x77\x6d\xc5\x9d" + "\xe6\x37\x76\x60\x7d\x3e\xf9\x02" + "\xba\xa6\xf3\x7f\xd3\x95\xb4\x0e" + "\x52\x1c\x6a\x00\x8f\x3a\x0b\xce" + "\x30\x98\xb2\x63\x2f\xff\x2d\x3b" + "\x3a\x06\x65\xaf\xf4\x2c\xef\xbb" + "\x88\xff\x2d\x4c\xa9\xf4\xff\x69" + "\x9d\x46\xae\x67\x00\x3b\x40\x94" + "\xe9\x7a\xf7\x0b\xb7\x3c\xa2\x2f" + "\xc3\xde\x5e\x29\x01\xde\xca\xfa" + "\xc6\xda\xd7\x19\xc7\xde\x4a\x16" + "\x93\x6a\xb3\x9b\x47\xe9\xd2\xfc" + "\xa1\xc3\x95\x9c\x0b\xa0\x2b\xd4" + "\xd3\x1e\xd7\x21\x96\xf9\x1e\xf4" + "\x59\xf4\xdf\x00\xf3\x37\x72\x7e" + "\xd8\xfd\x49\xd4\xcd\x61\x7b\x22" + "\x99\x56\x94\xff\x96\xcd\x9b\xb2" + "\x76\xca\x9f\x56\xae\x04\x2e\x75" + "\x89\x4e\x1b\x60\x52\xeb\x84\xf4" + "\xd1\x33\xd2\x6c\x09\xb1\x1c\x43" + "\x08\x67\x02\x01\xe3\x64\x82\xee" + "\x36\xcd\xd0\x70\xf1\x93\xd5\x63" + "\xef\x48\xc5\x56\xdb\x0a\x35\xfe" + "\x85\x48\xb6\x97\x97\x02\x43\x1f" + "\x7d\xc9\xa8\x2e\x71\x90\x04\x83" + "\xe7\x46\xbd\x94\x52\xe3\xc5\xd1" + "\xce\x6a\x2d\x6b\x86\x9a\xf5\x31" + "\xcd\x07\x9c\xa2\xcd\x49\xf5\xec" + "\x01\x3e\xdf\xd5\xdc\x15\x12\x9b" + "\x0c\x99\x19\x7b\x2e\x83\xfb\xd8" + "\x89\x3a\x1c\x1e\xb4\xdb\xeb\x23" + "\xd9\x42\xae\x47\xfc\xda\x37\xe0" + "\xd2\xb7\x47\xd9\xe8\xb5\xf6\x20" + "\x42\x8a\x9d\xaf\xb9\x46\x80\xfd" + "\xd4\x74\x6f\x38\x64\xf3\x8b\xed" + "\x81\x94\x56\xe7\xf1\x1a\x64\x17" + "\xd4\x27\x59\x09\xdf\x9b\x74\x05" + "\x79\x6e\x13\x29\x2b\x9e\x1b\x86" + "\x73\x9f\x40\xbe\x6e\xff\x92\x4e" + "\xbf\xaa\xf4\xd0\x88\x8b\x6f\x73" + "\x9d\x8b\xbf\xe5\x8a\x85\x45\x67" + "\xd3\x13\x72\xc6\x2a\x63\x3d\xb1" + "\x35\x7c\xb4\x38\xbb\x31\xe3\x77" + "\x37\xad\x75\xa9\x6f\x84\x4e\x4f" + "\xeb\x5b\x5d\x39\x6d\xed\x0a\xad" + "\x6c\x1b\x8e\x1f\x57\xfa\xc7\x7c" + "\xbf\xcf\xf2\xd1\x72\x3b\x70\x78" + "\xee\x8e\xf3\x4f\xfd\x61\x30\x9f" + "\x56\x05\x1d\x7d\x94\x9b\x5f\x8c" + "\xa1\x0f\xeb\xc3\xa9\x9e\xb8\xa0" + "\xc6\x4e\x1e\xb1\xbc\x0a\x87\xa8" + "\x52\xa9\x1e\x3d\x58\x8e\xc6\x95" + "\x85\x58\xa3\xc3\x3a\x43\x32\x50" + "\x6c\xb3\x61\xe1\x0c\x7d\x02\x63" + "\x5f\x8b\xdf\xef\x13\xf8\x66\xea" + "\x89\x00\x1f\xbd\x5b\x4c\xd5\x67" + "\x8f\x89\x84\x33\x2d\xd3\x70\x94" + "\xde\x7b\xd4\xb0\xeb\x07\x96\x98" + "\xc5\xc0\xbf\xc8\xcf\xdc\xc6\x5c" + "\xd3\x7d\x78\x30\x0e\x14\xa0\x86" + "\xd7\x8a\xb7\x53\xa3\xec\x71\xbf" + "\x85\xf2\xea\xbd\x77\xa6\xd1\xfd" + "\x5a\x53\x0c\xc3\xff\xf5\x1d\x46" + "\x37\xb7\x2d\x88\x5c\xeb\x7a\x0c" + "\x0d\x39\xc6\x40\x08\x90\x1f\x58" + "\x36\x12\x35\x28\x64\x12\xe7\xbb" + "\x50\xac\x45\x15\x7b\x16\x23\x5e" + "\xd4\x11\x2a\x8e\x17\x47\xe1\xd0" + "\x69\xc6\xd2\x5c\x2c\x76\xe6\xbb" + "\xf7\xe7\x34\x61\x8e\x07\x36\xc8" + "\xce\xcf\x3b\xeb\x0a\x55\xbd\x4e" + "\x59\x95\xc9\x32\x5b\x79\x7a\x86" + "\x03\x74\x4b\x10\x87\xb3\x60\xf6" + "\x21\xa4\xa6\xa8\x9a\xc9\x3a\x6f" + "\xd8\x13\xc9\x18\xd4\x38\x2b\xc2" + "\xa5\x7e\x6a\x09\x0f\x06\xdf\x53" + "\x9a\x44\xd9\x69\x2d\x39\x61\xb7" + "\x1c\x36\x7f\x9e\xc6\x44\x9f\x42" + "\x18\x0b\x99\xe6\x27\xa3\x1e\xa6" + "\xd0\xb9\x9a\x2b\x6f\x60\x75\xbd" + "\x52\x4a\x91\xd4\x7b\x8f\x95\x9f" + "\xdd\x74\xed\x8b\x20\x00\xdd\x08" + "\x6e\x5b\x61\x7b\x06\x6a\x19\x84" + "\x1c\xf9\x86\x65\xcd\x1c\x73\x3f" + "\x28\x5c\x8a\x93\x1a\xf3\xa3\x6c" + "\x6c\xa9\x7c\xea\x3c\xd4\x15\x45" + "\x7f\xbc\xe3\xbb\x42\xf0\x2e\x10" + "\xcd\x0c\x8b\x44\x1a\x82\x83\x0c" + "\x58\xb1\x24\x28\xa0\x11\x2f\x63" + "\xa5\x82\xc5\x9f\x86\x42\xf4\x4d" + "\x89\xdb\x76\x4a\xc3\x7f\xc4\xb8" + "\xdd\x0d\x14\xde\xd2\x62\x02\xcb" + "\x70\xb7\xee\xf4\x6a\x09\x12\x5e" + "\xd1\x26\x1a\x2c\x20\x71\x31\xef" + "\x7d\x65\x57\x65\x98\xff\x8b\x02" + "\x9a\xb5\xa4\xa1\xaf\x03\xc4\x50" + "\x33\xcf\x1b\x25\xfa\x7a\x79\xcc" + "\x55\xe3\x21\x63\x0c\x6d\xeb\x5b" + "\x1c\xad\x61\x0b\xbd\xb0\x48\xdb" + "\xb3\xc8\xa0\x87\x7f\x8b\xac\xfd" + "\xd2\x68\x9e\xb4\x11\x3c\x6f\xb1" + "\xfe\x25\x7d\x84\x5a\xae\xc9\x31" + "\xc3\xe5\x6a\x6f\xbc\xab\x41\xd9" + "\xde\xce\xf9\xfa\xd5\x7c\x47\xd2" + "\x66\x30\xc9\x97\xf2\x67\xdf\x59" + "\xef\x4e\x11\xbc\x4e\x70\xe3\x46" + "\x53\xbe\x16\x6d\x33\xfb\x57\x98" + "\x4e\x34\x79\x3b\xc7\x3b\xaf\x94" + "\xc1\x87\x4e\x47\x11\x1b\x22\x41" + "\x99\x12\x61\xe0\xe0\x8c\xa9\xbd" + "\x79\xb6\x06\x4d\x90\x3b\x0d\x30" + "\x1a\x00\xaa\x0e\xed\x7c\x16\x2f" + "\x0d\x1a\xfb\xf8\xad\x51\x4c\xab" + "\x98\x4c\x80\xb6\x92\x03\xcb\xa9" + "\x99\x9d\x16\xab\x43\x8c\x3f\x52" + "\x96\x53\x63\x7e\xbb\xd2\x76\xb7" + "\x6b\x77\xab\x52\x80\x33\xe3\xdf" + "\x4b\x3c\x23\x1a\x33\xe1\x43\x40" + "\x39\x1a\xe8\xbd\x3c\x6a\x77\x42" + "\x88\x9f\xc6\xaa\x65\x28\xf2\x1e" + "\xb0\x7c\x8e\x10\x41\x31\xe9\xd5" + "\x9d\xfd\x28\x7f\xfb\x61\xd3\x39" + "\x5f\x7e\xb4\xfb\x9c\x7d\x98\xb7" + "\x37\x2f\x18\xd9\x3b\x83\xaf\x4e" + "\xbb\xd5\x49\x69\x46\x93\x3a\x21" + "\x46\x1d\xad\x84\xb5\xe7\x8c\xff" + "\xbf\x81\x7e\x22\xf6\x88\x8c\x82" + "\xf5\xde\xfe\x18\xc9\xfb\x58\x07" + "\xe4\x68\xff\x9c\xf4\xe0\x24\x20" + "\x90\x92\x01\x49\xc2\x38\xe1\x7c" + "\xac\x61\x0b\x96\x36\xa4\x77\xe9" + "\x29\xd4\x97\xae\x15\x13\x7c\x6c" + "\x2d\xf1\xc5\x83\x97\x02\xa8\x2e" + "\x0b\x0f\xaf\xb5\x42\x18\x8a\x8c" + "\xb8\x28\x85\x28\x1b\x2a\x12\xa5" + "\x4b\x0a\xaf\xd2\x72\x37\x66\x23" + "\x28\xe6\x71\xa0\x77\x85\x7c\xff" + "\xf3\x8d\x2f\x0c\x33\x30\xcd\x7f" + "\x61\x64\x23\xb2\xe9\x79\x05\xb8" + "\x61\x47\xb1\x2b\xda\xf7\x9a\x24" + "\x94\xf6\xcf\x07\x78\xa2\x80\xaa" + "\x6e\xe9\x58\x97\x19\x0c\x58\x73" + "\xaf\xee\x2d\x6e\x26\x67\x18\x8a" + "\xc6\x6d\xf6\xbc\x65\xa9\xcb\xe7" + "\x53\xf1\x61\x97\x63\x52\x38\x86" + "\x0e\xdd\x33\xa5\x30\xe9\x9f\x32" + "\x43\x64\xbc\x2d\xdc\x28\x43\xd8" + "\x6c\xcd\x00\x2c\x87\x9a\x33\x79" + "\xbd\x63\x6d\x4d\xf9\x8a\x91\x83" + "\x9a\xdb\xf7\x9a\x11\xe1\xd1\x93" + "\x4a\x54\x0d\x51\x38\x30\x84\x0b" + "\xc5\x29\x8d\x92\x18\x6c\x28\xfe" + "\x1b\x07\x57\xec\x94\x74\x0b\x2c" + "\x21\x01\xf6\x23\xf9\xb0\xa0\xaf" + "\xb1\x3e\x2e\xa8\x0d\xbc\x2a\x68" + "\x59\xde\x0b\x2d\xde\x74\x42\xa1" + "\xb4\xce\xaf\xd8\x42\xeb\x59\xbd" + "\x61\xcc\x27\x28\xc6\xf2\xde\x3e" + "\x68\x64\x13\xd3\xc3\xc0\x31\xe0" + "\x5d\xf9\xb4\xa1\x09\x20\x46\x8b" + "\x48\xb9\x27\x62\x00\x12\xc5\x03" + "\x28\xfd\x55\x27\x1c\x31\xfc\xdb" + "\xc1\xcb\x7e\x67\x91\x2e\x50\x0c" + "\x61\xf8\x9f\x31\x26\x5a\x3d\x2e" + "\xa0\xc7\xef\x2a\xb6\x24\x48\xc9" + "\xbb\x63\x99\xf4\x7c\x4e\xc5\x94" + "\x99\xd5\xff\x34\x93\x8f\x31\x45" + "\xae\x5e\x7b\xfd\xf4\x81\x84\x65" + "\x5b\x41\x70\x0b\xe5\xaa\xec\x95" + "\x6b\x3d\xe3\xdc\x12\x78\xf8\x28" + "\x26\xec\x3a\x64\xc4\xab\x74\x97" + "\x3d\xcf\x21\x7d\xcf\x59\xd3\x15" + "\x47\x94\xe4\xd9\x48\x4c\x02\x49" + "\x68\x50\x22\x16\x96\x2f\xc4\x23" + "\x80\x47\x27\xd1\xee\x10\x3b\xa7" + "\x19\xae\xe1\x40\x5f\x3a\xde\x5d" + "\x97\x1c\x59\xce\xe1\xe7\x32\xa7" + "\x20\x89\xef\x44\x22\x38\x3c\x14" + "\x99\x3f\x1b\xd6\x37\xfe\x93\xbf" + "\x34\x13\x86\xd7\x9b\xe5\x2a\x37" + "\x72\x16\xa4\xdf\x7f\xe4\xa4\x66" + "\x9d\xf2\x0b\x29\xa1\xe2\x9d\x36" + "\xe1\x9d\x56\x95\x73\xe1\x91\x58" + "\x0f\x64\xf8\x90\xbb\x0c\x48\x0f" + "\xf5\x52\xae\xd9\xeb\x95\xb7\xdd" + "\xae\x0b\x20\x55\x87\x3d\xf0\x69" + "\x3c\x0a\x54\x61\xea\x00\xbd\xba" + "\x5f\x7e\x25\x8c\x3e\x61\xee\xb2" + "\x1a\xc8\x0e\x0b\xa5\x18\x49\xf2" + "\x6e\x1d\x3f\x83\xc3\xf1\x1a\xcb" + "\x9f\xc9\x82\x4e\x7b\x26\xfd\x68" + "\x28\x25\x8d\x22\x17\xab\xf8\x4e" + "\x1a\xa9\x81\x48\xb0\x9f\x52\x75" + "\xe4\xef\xdd\xbd\x5b\xbe\xab\x3c" + "\x43\x76\x23\x62\xce\xb8\xc2\x5b" + "\xc6\x31\xe6\x81\xb4\x42\xb2\xfd" + "\xf3\x74\xdd\x02\x3c\xa0\xd7\x97" + "\xb0\xe7\xe9\xe0\xce\xef\xe9\x1c" + "\x09\xa2\x6d\xd3\xc4\x60\xd6\xd6" + "\x9e\x54\x31\x45\x76\xc9\x14\xd4" + "\x95\x17\xe9\xbe\x69\x92\x71\xcb" + "\xde\x7c\xf1\xbd\x2b\xef\x8d\xaf" + "\x51\xe8\x28\xec\x48\x7f\xf8\xfa" + "\x9f\x9f\x5e\x52\x61\xc3\xfc\x9a" + "\x7e\xeb\xe3\x30\xb6\xfe\xc4\x4a" + "\x87\x1a\xff\x54\x64\xc7\xaa\xa2" + "\xfa\xb7\xb2\xe7\x25\xce\x95\xb4" + "\x15\x93\xbd\x24\xb6\xbc\xe4\x62" + "\x93\x7f\x44\x40\x72\xcb\xfb\xb2" + "\xbf\xe8\x03\xa5\x87\x12\x27\xfd" + "\xc6\x21\x8a\x8f\xc2\x48\x48\xb9" + "\x6b\xb6\xf0\xf0\x0e\x0a\x0e\xa4" + "\x40\xa9\xd8\x23\x24\xd0\x7f\xe2" + "\xf9\xed\x76\xf0\x91\xa5\x83\x3c" + "\x55\xe1\x92\xb8\xb6\x32\x9e\x63" + "\x60\x81\x75\x29\x9e\xce\x2a\x70" + "\x28\x0c\x87\xe5\x46\x73\x76\x66" + "\xbc\x4b\x6c\x37\xc7\xd0\x1a\xa0" + "\x9d\xcf\x04\xd3\x8c\x42\xae\x9d" + "\x35\x5a\xf1\x40\x4c\x4e\x81\xaa" + "\xfe\xd5\x83\x4f\x29\x19\xf3\x6c" + "\x9e\xd0\x53\xe5\x05\x8f\x14\xfb" + "\x68\xec\x0a\x3a\x85\xcd\x3e\xb4" + "\x4a\xc2\x5b\x92\x2e\x0b\x58\x64" + "\xde\xca\x64\x86\x53\xdb\x7f\x4e" + "\x54\xc6\x5e\xaa\xe5\x82\x3b\x98" + "\x5b\x01\xa7\x1f\x7b\x3d\xcc\x19" + "\xf1\x11\x02\x64\x09\x25\x7c\x26" + "\xee\xad\x50\x68\x31\x26\x16\x0f" + "\xb6\x7b\x6f\xa2\x17\x1a\xba\xbe" + "\xc3\x60\xdc\xd2\x44\xe0\xb4\xc4" + "\xfe\xff\x69\xdb\x60\xa6\xaf\x39" + "\x0a\xbd\x6e\x41\xd1\x9f\x87\x71" + "\xcc\x43\xa8\x47\x10\xbc\x2b\x7d" + "\x40\x12\x43\x31\xb8\x12\xe0\x95" + "\x6f\x9d\xf8\x75\x51\x3d\x61\xbe" + "\xa0\xd1\x0b\x8d\x50\xc7\xb8\xe7" + "\xab\x03\xda\x41\xab\xc5\x4e\x33" + "\x5a\x63\x94\x90\x22\x72\x54\x26" + "\x93\x65\x99\x45\x55\xd3\x55\x56" + "\xc5\x39\xe4\xb4\xb1\xea\xd8\xf9" + "\xb5\x31\xf7\xeb\x80\x1a\x9e\x8d" + "\xd2\x40\x01\xea\x33\xb9\xf2\x7a" + "\x43\x41\x72\x0c\xbf\x20\xab\xf7" + "\xfa\x65\xec\x3e\x35\x57\x1e\xef" + "\x2a\x81\xfa\x10\xb2\xdb\x8e\xfa" + "\x7f\xe7\xaf\x73\xfc\xbb\x57\xa2" + "\xaf\x6f\x41\x11\x30\xd8\xaf\x94" + "\x53\x8d\x4c\x23\xa5\x20\x63\xcf" + "\x0d\x00\xe0\x94\x5e\x92\xaa\xb5" + "\xe0\x4e\x96\x3c\xf4\x26\x2f\xf0" + "\x3f\xd7\xed\x75\x2c\x63\xdf\xc8" + "\xfb\x20\xb5\xae\x44\x83\xc0\xab" + "\x05\xf9\xbb\xa7\x62\x7d\x21\x5b" + "\x04\x80\x93\x84\x5f\x1d\x9e\xcd" + "\xa2\x07\x7e\x22\x2f\x55\x94\x23" + "\x74\x35\xa3\x0f\x03\xbe\x07\x62" + "\xe9\x16\x69\x7e\xae\x38\x0e\x9b" + "\xad\x6e\x83\x90\x21\x10\xb8\x07" + "\xdc\xc1\x44\x20\xa5\x88\x00\xdc" + "\xe1\x82\x16\xf1\x0c\xdc\xed\x8c" + "\x32\xb5\x49\xab\x11\x41\xd5\xd2" + "\x35\x2c\x70\x73\xce\xeb\xe3\xd6" + "\xe4\x7d\x2c\xe8\x8c\xec\x8a\x92" + "\x50\x87\x51\xbd\x2d\x9d\xf2\xf0" + "\x3c\x7d\xb1\x87\xf5\x01\xb0\xed" + "\x02\x5a\x20\x4d\x43\x08\x71\x49" + "\x77\x72\x9b\xe6\xef\x30\xc9\xa2" + "\x66\x66\xb8\x68\x9d\xdf\xc6\x16" + "\xa5\x78\xee\x3c\x47\xa6\x7a\x31" + "\x07\x6d\xce\x7b\x86\xf8\xb2\x31" + "\xa8\xa4\x77\x3c\x63\x36\xe8\xd3" + "\x7d\x40\x56\xd8\x48\x56\x9e\x3e" + "\x56\xf6\x3d\xd2\x12\x6e\x35\x29" + "\xd4\x7a\xdb\xff\x97\x4c\xeb\x3c" + "\x28\x2a\xeb\xe9\x43\x40\x61\x06" + "\xb8\xa8\x6d\x18\xc8\xbc\xc7\x23" + "\x53\x2b\x8b\xcc\xce\x88\xdf\xf8" + "\xff\xf8\x94\xe4\x5c\xee\xcf\x39" + "\xe0\xf6\x1a\xae\xf2\xd5\x41\x6a" + "\x09\x5a\x50\x66\xc4\xf4\x66\xdc" + "\x6a\x69\xee\xc8\x47\xe6\x87\x52" + "\x9e\x28\xe4\x39\x02\x0d\xc4\x7e" + "\x18\xe6\xc6\x09\x07\x03\x30\xb9" + "\xd1\xb0\x48\xe6\x80\xe8\x8c\xe6" + "\xc7\x2c\x33\xca\x64\xe5\xc0\x6e" + "\xac\x14\x4b\xe1\xf6\xeb\xce\xe4" + "\xc1\x8c\xea\x5b\x8d\x3c\x86\x91" + "\xd1\xd7\x16\x9c\x09\x9c\x6a\x51" + "\xe5\xcd\xe3\xb0\x33\x1f\x03\xcd" + "\xe5\xd8\x40\x9b\xdc\x29\xbe\xfa" + "\x24\xcc\xf1\x55\x68\x3a\x89\x0d" + "\x08\x48\xfd\x9b\x47\x41\x10\xae" + "\x53\x3a\x83\x87\xd4\x89\xe7\x38" + "\x47\xee\xd7\xbe\xe2\x58\x37\xd2" + "\xfc\x21\x1d\x20\xa5\x2d\x69\x0c" + "\x36\x5b\x2f\xcd\xa1\xa6\xe4\xa1" + "\x00\x4d\xf7\xc8\x2d\xc7\x16\x6c" + "\x6d\xad\x32\x8c\x8f\x74\xf9\xfa" + "\x78\x1c\x9a\x0f\x6e\x93\x9c\x20" + "\x43\xb9\xe4\xda\xc4\xc7\x90\x47" + "\x86\x68\xb7\x6f\x82\x59\x4a\x30" + "\xf1\xfd\x31\x0f\xa1\xea\x9b\x6b" + "\x18\x5c\x39\xb0\xc7\x80\x64\xff" + "\x6d\x5b\xb4\x8b\xba\x90\xea\x4e" + "\x9a\x04\xd2\x68\x18\x50\xb5\x91" + "\x45\x4f\x58\x5a\xe5\xc6\x7c\xab" + "\x61\x3e\x3d\xec\x18\x87\xfc\xea" + "\x26\x35\x4c\x99\x8a\x3f\x00\x7b" + "\xf5\x89\x62\xda\xdd\xf1\x43\xef" + "\x2c\x1d\x92\xfa\x9a\xd0\x37\x03" + "\x69\x9c\xd8\x1f\x41\x44\xb7\x73" + "\x54\x14\x91\x12\x41\x41\x54\xa2" + "\x91\x55\xb6\xf7\x23\x41\xc9\xc2" + "\x5b\x53\xf2\x61\x63\x0d\xa9\x87" + "\x1a\xbb\x11\x1f\x3c\xbb\xa8\x1f" + "\xe2\x66\x56\x88\x06\x3c\xd2\x0f" + "\x3b\xc4\xd6\x8c\xbe\x54\x9f\xa8" + "\x9c\x89\xfb\x88\x05\xef\xcd\xe7" + "\xc1\xc4\x21\x36\x22\x8d\x9a\x5d" + "\x1b\x1e\x4a\xc0\x89\xdd\x76\x16" + "\x5a\xce\xcd\x1e\x6a\x1f\xa0\x2b" + "\x83\xf6\x5e\x28\x8e\x65\xb5\x86" + "\x72\x8f\xc5\xf2\x54\x81\x10\x8d" + "\x63\x7b\x42\x7d\x06\x08\x16\xb3" + "\xb0\x60\x65\x41\x49\xdb\x0d\xc1" + "\xe2\xef\x72\x72\x06\xe7\x60\x5c" + "\x95\x1c\x7d\x52\xec\x82\xee\xd3" + "\x5b\xab\x61\xa4\x1f\x61\x64\x0c" + "\x28\x32\x21\x7a\x81\xe7\x81\xf3" + "\xdb\xc0\x18\xd9\xae\x0b\x3c\x9a" + "\x58\xec\x70\x4f\x40\x25\x2b\xba" + "\x96\x59\xac\x34\x45\x29\xc6\x57" + "\xc1\xc3\x93\x60\x77\x92\xbb\x83" + "\x8a\xa7\x72\x45\x2a\xc9\x35\xe7" + "\x66\xd6\xa9\xe9\x43\x87\x20\x11" + "\x6a\x2f\x87\xac\xe0\x93\x82\xe5" + "\x6c\x57\xa9\x4c\x9e\x56\x57\x33" + "\x1c\xd8\x7e\x25\x27\x41\x89\x97" + "\xea\xa5\x56\x02\x5b\x93\x13\x46" + "\xdc\x53\x3d\x95\xef\xaf\x9f\xf0" + "\x0a\x8a\xfe\x0c\xbf\xf0\x25\x5f" + "\xb4\x9f\x1b\x72\x9c\x37\xba\x46" + "\x4e\xcc\xcc\x02\x5c\xec\x3f\x98" + "\xff\x56\x1a\xc2\x7a\x65\x8f\xf6" + "\xd2\x81\x37\x7a\x0a\xfc\x79\xb9" + "\xcb\x8c\xc8\x1a\xd0\xba\x5d\x55" + "\xbc\x6d\x2e\xb2\x2f\x75\x29\x3f" + "\x1a\x4b\xa8\xd7\xe8\xf6\xf4\x2a" + "\xa5\xa1\x68\xec\xf3\xd5\xdd\x0f" + "\xad\x57\xae\x98\x83\xd5\x92\x4e" + "\x76\x86\x8e\x5e\x4b\x87\x7b\xf7" + "\x2d\x79\x3f\x12\x6a\x24\x58\xc8" + "\xab\x9a\x65\x75\x82\x6f\xa5\x39" + "\x72\xb0\xdf\x93\xb5\xa2\xf3\xdd" + "\x1f\x32\xfa\xdb\xfe\x1b\xbf\x0a" + "\xd9\x95\xdd\x02\xf1\x23\x54\xb1" + "\xa5\xbb\x24\x04\x5c\x2a\x97\x92" + "\xe6\xe0\x10\x61\xe3\x46\xc7\x0c" + "\xcb\xbc\x51\x9a\x35\x16\xd9\x42" + "\x62\xb3\x5e\xa4\x3c\x84\xa0\x7f" + "\xb8\x7f\x70\xd1\x8b\x03\xdf\x27" + "\x32\x06\x3f\x12\x23\x19\x22\x82" + "\x2d\x37\xa5\x00\x31\x9b\xa9\x21" + "\x8e\x34\x8c\x8e\x4f\xe8\xd4\x63" + "\x6c\xb2\xa9\x6e\xf6\x7c\x96\xf1" + "\x0e\x64\xab\x14\x3d\x8f\x74\xb3" + "\x35\x79\x84\x78\x06\x68\x97\x30" + "\xe0\x22\x55\xd6\xc5\x5b\x38\xb2" + "\x75\x24\x0c\x52\xb6\x57\xcc\x0a" + "\xbd\x3c\xd0\x73\x47\xd1\x25\xd6" + "\x1c\xfd\x27\x05\x3f\x70\xe1\xa7" + "\x69\x3b\xee\xc9\x9f\xfd\x2a\x7e" + "\xab\x58\xe6\x0b\x35\x5e\x52\xf9" + "\xff\xac\x5b\x82\x88\xa7\x65\xbc" + "\x61\x29\xdc\xa1\x94\x42\xd1\xd3" + "\xa0\xd8\xba\x3b\x49\xc8\xa7\xce" + "\x01\x6c\xb7\x3f\xe3\x98\x4d\xd1" + "\x9f\x46\x0d\xb3\xf2\x43\x33\x49" + "\xb7\x27\xbd\xba\xcc\x3f\x09\x56" + "\xfa\x64\x18\xb8\x17\x28\xde\x0d" + "\x29\xfa\x1f\xad\x60\x3b\x90\xa7" + "\x05\x9f\x4c\xc4\xdc\x05\x3b\x17" + "\x58\xea\x99\xfd\x6b\x8a\x93\x77" + "\xa5\x44\xbd\x8d\x29\x44\x29\x89" + "\x52\x1d\x89\x8b\x44\x8f\xb9\x68" + "\xeb\x93\xfd\x92\xd9\x14\x35\x9c" + "\x28\x3a\x9f\x1d\xd8\xe0\x2a\x76" + "\x51\xc1\xf0\xa9\x1d\xb4\xf8\xb9" + "\xfc\x14\x78\x5a\xa2\xb1\xdb\x94" + "\xcb\x18\xb9\x34\xbd\x0c\x65\x1d" + "\x64\xde\xd0\x3a\xe4\x68\x0e\xbc" + "\x13\xa7\x47\x89\x62\xa3\x03\x19" + "\x64\xa1\x02\x27\x3a\x8d\x43\xfa" + "\x68\xff\xda\x8b\x40\xe9\x19\x8b" + "\x56\xbe\x1c\x9b\xe6\xf6\x3f\x60" + "\xdb\x7a\xd5\xab\x82\xd8\xd9\x99" + "\xe3\x5b\x0c\x0c\x69\x18\x5c\xed" + "\x03\xf9\xc1\x61\xc4\x7b\xd4\x90" + "\x43\xc3\x39\xec\xac\xcb\x1f\x4b" + "\x23\xf8\xa9\x98\x2f\xf6\x48\x90" + "\x6c\x2b\x94\xad\x14\xdd\xcc\xa2" + "\x3d\xc7\x86\x0f\x7f\x1c\x0b\x93" + "\x4b\x74\x1f\x80\x75\xb4\x91\xdf" + "\xa8\x26\xf9\x06\x2b\x3a\x2c\xfd" + "\x3c\x31\x40\x1e\x5b\xa6\x86\x01" + "\xc4\xa2\x80\x4f\xf5\xa2\xf4\xff" + "\xf6\x07\x8c\x92\xf7\x74\xbd\x42" + "\xb0\x3f\x6b\x05\xca\x40\xeb\x04" + "\x20\xa9\x37\x78\x32\x03\x60\xcc" + "\xf3\xec\xb2\x2d\xb5\x80\x7c\xe4" + "\x37\x53\x25\xd1\xe8\x91\x6a\xe5" + "\xdf\xdd\xb0\xab\x69\xc7\xa1\xb2" + "\xfc\xb3\xd1\x9e\xda\xa8\x0d\x68" + "\xfe\x7d\xdc\x56\x33\x65\x99\xd2" + "\xec\xa5\xa0\xa1\x26\xc9\xec\xbd" + "\x22\x20\x5e\x0d\xcb\x93\x64\x7a" + "\x56\x75\xed\xe5\x45\xa2\xbd\x16" + "\x59\xf7\x43\xd9\x5b\x2c\xdd\xb6" + "\x1d\xa8\x05\x89\x2f\x65\x2e\x66" + "\xfe\xad\x93\xeb\x85\x8f\xe8\x4c" + "\x00\x44\x71\x03\x0e\x26\xaf\xfd" + "\xfa\x56\x0f\xdc\x9c\xf3\x2e\xab" + "\x88\x26\x61\xc6\x13\xfe\xba\xc1" + "\xd8\x8a\x38\xc3\xb6\x4e\x6d\x80" + "\x4c\x65\x93\x2f\xf5\x54\xff\x63" + "\xbe\xdf\x9a\xe3\x4f\xca\xc9\x71" + "\x12\xab\x95\x66\xec\x09\x64\xea" + "\xdc\x9f\x01\x61\x24\x88\xd1\xa7" + "\xd0\x69\x26\xf0\x80\xb0\xec\x86" + "\xc2\x58\x2f\x6a\xc5\xfd\xfc\x2a" + "\xf6\x3e\x23\x77\x3b\x7e\xc5\xc5" + "\xe7\xf9\x4d\xcc\x68\x53\x11\xc8" + "\x5b\x44\xbd\x48\x0f\xb3\x35\x1a" + "\x93\x4a\x80\x16\xa3\x0d\x50\x85" + "\xa6\xc4\xd4\x74\x4d\x87\x59\x51" + "\xd7\xf7\x7d\xee\xd0\x9b\xd1\x83" + "\x25\x2b\xc6\x39\x27\x6a\xb3\x41" + "\x5f\xd2\x24\xd4\xd6\xfa\x8c\x3e" + "\xb2\xf9\x11\x71\x7a\x9e\x5e\x7b" + "\x5b\x9a\x47\x80\xca\x1c\xbe\x04" + "\x5d\x34\xc4\xa2\x2d\x41\xfe\x73" + "\x53\x15\x9f\xdb\xe7\x7d\x82\x19" + "\x21\x1b\x67\x2a\x74\x7a\x21\x4a" + "\xc4\x96\x6f\x00\x92\x69\xf1\x99" + "\x50\xf1\x4a\x16\x11\xf1\x16\x51", + .ctext = "\x2c\xf5\x4c\xc9\x99\x19\x83\x84" + "\x09\xbc\xe6\xad\xbe\xb6\x6b\x1b" + "\x75\x0b\x3d\x33\x10\xb4\x8b\xf7" + "\xa7\xc7\xba\x9f\x6e\xd7\xc7\xfd" + "\x58\xef\x24\xf4\xdc\x26\x3f\x35" + "\x02\x98\xf2\x8c\x96\xca\xfc\xca" + "\xca\xfa\x27\xe6\x23\x1f\xf0\xc7" + "\xe3\x46\xbf\xca\x7b\x4e\x24\xcd" + "\xd0\x13\x3f\x80\xd6\x5b\x0b\xdc" + "\xad\xc6\x49\x77\xd7\x58\xf5\xfd" + "\x58\xba\x72\x0d\x9e\x0b\x63\xc3" + "\x86\xac\x06\x97\x70\x42\xec\x3a" + "\x0d\x53\x27\x17\xbd\x3e\xcb\xe0" + "\xaa\x19\xb4\xfe\x5d\x1b\xcb\xd7" + "\x99\xc3\x19\x45\x6f\xdf\x64\x44" + "\x9f\xf8\x55\x1b\x72\x8d\x78\x51" + "\x3c\x83\x48\x8f\xaf\x05\x60\x7d" + "\x22\xce\x07\x53\xfd\x91\xcf\xfa" + "\x5f\x86\x66\x3e\x72\x67\x7f\xc1" + "\x49\x82\xc7\x1c\x91\x1e\x48\xcd" + "\x5e\xc6\x5f\xd9\xc9\x43\x88\x35" + "\x80\xba\x91\xe1\x54\x4b\x14\xbe" + "\xbd\x75\x48\xb8\xde\x22\x64\xb5" + "\x8c\xcb\x5e\x92\x99\x8f\x4a\xab" + "\x00\x6c\xb4\x2e\x03\x3b\x0e\xee" + "\x4d\x39\x05\xbc\x94\x80\xbb\xb2" + "\x36\x16\xa3\xd9\x8f\x61\xd7\x67" + "\xb5\x90\x46\x85\xe1\x4e\x71\x84" + "\xd0\x84\xc0\xc0\x8f\xad\xdb\xeb" + "\x44\xf4\x66\x35\x3f\x92\xa2\x05" + "\xa4\x9c\xb8\xdc\x77\x6c\x85\x34" + "\xd2\x6a\xea\x32\xb8\x08\xf6\x13" + "\x78\x1e\x29\xef\x12\x54\x16\x28" + "\x25\xf8\x32\x0e\x4f\x94\xe6\xb3" + "\x0b\x97\x79\x97\xb3\xb0\x37\x61" + "\xa4\x10\x6f\x15\x9c\x7d\x22\x41" + "\xe2\xd7\xa7\xa0\xfc\xc5\x62\x55" + "\xed\x68\x39\x7b\x09\xd2\x17\xaa" + "\xf2\xb8\xc9\x1d\xa2\x23\xfd\xaa" + "\x9c\x57\x16\x0d\xe3\x63\x3c\x2b" + "\x13\xdd\xa2\xf0\x8e\xd3\x02\x81" + "\x09\xba\x80\x02\xdb\x97\xfe\x0f" + "\x77\x8d\x18\xf1\xf4\x59\x27\x79" + "\xa3\x46\x88\xda\x51\x67\xd0\xe9" + "\x5d\x22\x98\xc1\xe4\xea\x08\xda" + "\xf7\xb9\x16\x71\x36\xbd\x43\x8a" + "\x4b\x6e\xf3\xaa\xb0\xba\x1a\xbc" + "\xaa\xca\xde\x5c\xc0\xa5\x11\x6d" + "\x8a\x8f\xcc\x04\xfc\x6c\x89\x75" + "\x4b\x2c\x29\x6f\x41\xc7\x6e\xda" + "\xea\xa6\xaf\xb0\xb1\x46\x9e\x30" + "\x5e\x11\x46\x07\x3b\xd6\xaa\x36" + "\xa4\x01\x84\x1d\xb9\x8e\x58\x9d" + "\xa9\xb6\x1c\x56\x5c\x5a\xde\xfa" + "\x66\x96\xe6\x29\x26\xd4\x68\xd0" + "\x1a\xcb\x98\xbb\xce\x19\xbb\x87" + "\x00\x6c\x59\x17\xe3\xd1\xe6\x5c" + "\xd0\x98\xe1\x91\xc4\x28\xaf\xbf" + "\xbb\xdf\x75\x4e\xd9\x9d\x99\x0f" + "\xc6\x0c\x03\x24\x3e\xb6\xd7\x3f" + "\xd5\x43\x4a\x47\x26\xab\xf6\x3f" + "\x7f\xf1\x15\x0c\xde\x68\xa0\x5f" + "\x63\xf9\xe2\x5e\x5d\x42\xf1\x36" + "\x38\x90\x06\x18\x84\xf2\xfa\x81" + "\x36\x33\x29\x18\xaa\x8c\x49\x0e" + "\xda\x27\x38\x9c\x12\x8b\x83\xfa" + "\x40\xd0\xb6\x0a\x72\x85\xf0\xc7" + "\xaa\x5f\x30\x1a\x6f\x45\xe4\x35" + "\x4c\xf3\x4c\xe4\x1c\xd7\x48\x77" + "\xdd\x3e\xe4\x73\x44\xb1\xb8\x1c" + "\x42\x40\x90\x61\xb1\x6d\x8b\x20" + "\x2d\x30\x63\x01\x26\x71\xbc\x5a" + "\x76\xce\xc1\xfb\x13\xf9\x4c\x6e" + "\x7a\x16\x8a\x53\xcb\x07\xaa\xa1" + "\xba\xd0\x68\x7a\x2d\x25\x48\x85" + "\xb7\x6b\x0a\x05\xf2\xdf\x0e\x46" + "\x4e\xc8\xcd\x59\x5b\x9a\x2e\x9e" + "\xdb\x4a\xf6\xfd\x7b\xa4\x5c\x4d" + "\x78\x8d\xe7\xb0\x84\x3f\xf0\xc1" + "\x47\x39\xbf\x1e\x8c\xc2\x11\x0d" + "\x90\xd1\x17\x42\xb3\x50\xeb\xaa" + "\xcd\xc0\x98\x36\x84\xd0\xfe\x75" + "\xf8\x8f\xdc\xa0\xa1\x53\xe5\x8c" + "\xf2\x0f\x4a\x31\x48\xae\x3d\xaf" + "\x19\x4b\x75\x2e\xc1\xe3\xcd\x4d" + "\x2c\xa4\x54\x7b\x4d\x5e\x93\xa2" + "\xe7\x1f\x34\x19\x9f\xb2\xbf\x22" + "\x65\x1a\x03\x48\x12\x66\x50\x3e" + "\x0e\x5d\x60\x29\x44\x69\x90\xee" + "\x9d\x8b\x55\x78\xdf\x63\x31\xc3" + "\x1b\x21\x7d\x06\x21\x86\x60\xb0" + "\x9d\xdb\x3d\xcc\xe2\x20\xf4\x88" + "\x20\x62\x2e\xe8\xa9\xea\x42\x41" + "\xb0\xab\x73\x61\x40\x39\xac\x11" + "\x55\x27\x51\x5f\x11\xef\xb1\x23" + "\xff\x81\x99\x86\x0c\x6f\x16\xaf" + "\xf6\x89\x86\xd8\xf6\x41\xc2\x80" + "\x21\xf4\xd5\x6d\xef\xa3\x0c\x4d" + "\x59\xfd\xdc\x93\x1a\x4f\xe6\x22" + "\x83\x40\x0c\x98\x67\xba\x7c\x93" + "\x0b\xa9\x89\xfc\x3e\xff\x84\x12" + "\x3e\x27\xa3\x8a\x48\x17\xd6\x08" + "\x85\x2f\xf1\xa8\x90\x90\x71\xbe" + "\x44\xd6\x34\xbf\x74\x52\x0a\x17" + "\x39\x64\x78\x1a\xbc\x81\xbe\xc8" + "\xea\x7f\x0b\x5a\x2c\x77\xff\xac" + "\xdd\x37\x35\x78\x09\x28\x29\x4a" + "\xd1\xd6\x6c\xc3\xd5\x70\xdd\xfc" + "\x21\xcd\xce\xeb\x51\x11\xf7\xbc" + "\x12\x43\x1e\x6c\xa1\xa3\x79\xe6" + "\x1d\x63\x52\xff\xf0\xbb\xcf\xec" + "\x56\x58\x63\xe2\x21\x0b\x2d\x5c" + "\x64\x09\xf3\xee\x05\x42\x34\x93" + "\x38\xa8\x60\xea\x1d\x95\x90\x65" + "\xad\x2f\xda\x1d\xdd\x21\x1a\xf1" + "\x94\xe0\x6a\x81\xa1\xd3\x63\x31" + "\x45\x73\xce\x54\x4e\xb1\x75\x26" + "\x59\x18\xc2\x31\x73\xe6\xf5\x7d" + "\x06\x5b\x65\x67\xe5\x69\x90\xdf" + "\x27\x6a\xbf\x81\x7d\x92\xbe\xd1" + "\x4e\x0b\xa8\x18\x94\x72\xe1\xd0" + "\xb6\x2a\x16\x08\x7a\x34\xb8\xf2" + "\xe1\xac\x08\x66\xe6\x78\x66\xfd" + "\x36\xbd\xee\xc6\x71\xa4\x09\x4e" + "\x3b\x09\xf2\x8e\x3a\x90\xba\xa0" + "\xc2\x1d\x9f\xad\x52\x0e\xc9\x10" + "\x99\x40\x90\xd5\x7d\x73\x56\xef" + "\x48\x1e\x56\x5c\x7d\x3c\xcb\x84" + "\x10\x0a\xcc\xda\xce\xad\xd8\xa8" + "\x79\xc7\x29\x95\x31\x3b\xd9\x9b" + "\xb6\x84\x3e\x03\x74\xc5\x76\xba" + "\x4b\xd9\x4f\x7c\xc4\x5f\x7f\x70" + "\xc5\xe3\x6e\xd0\x14\x32\xec\x60" + "\xb0\x69\x78\xb7\xef\xda\x5a\xe7" + "\x4e\x50\x97\xd4\x94\x58\x67\x57" + "\x4e\x7c\x75\xe0\xcf\x8d\xe1\x78" + "\x97\x52\xc8\x73\x81\xf9\xb6\x02" + "\x54\x72\x6d\xc0\x70\xff\xe2\xeb" + "\x6c\xe1\x30\x0a\x94\xd0\x55\xec" + "\xed\x61\x9c\x6d\xd9\xa0\x92\x62" + "\x4e\xfd\xd8\x79\x27\x02\x4e\x13" + "\xb2\x04\xba\x00\x9a\x77\xed\xc3" + "\x5b\xa4\x22\x02\xa9\xed\xaf\xac" + "\x4f\xe1\x74\x73\x51\x36\x78\x8b" + "\xdb\xf5\x32\xfd\x0d\xb9\xcb\x15" + "\x4c\xae\x43\x72\xeb\xbe\xc0\xf8" + "\x91\x67\xf1\x4f\x5a\xd4\xa4\x69" + "\x8f\x3e\x16\xd2\x09\x31\x72\x5a" + "\x5e\x0a\xc4\xbc\x44\xd4\xbb\x82" + "\x7a\xdf\x52\x25\x8c\x45\xdc\xe4" + "\xe0\x71\x84\xe4\xe0\x3d\x59\x30" + "\x5b\x94\x12\x33\x78\x85\x90\x84" + "\x52\x05\x33\xa7\xa7\x16\xe0\x4d" + "\x6a\xf7\xfa\x03\x98\x6c\x4f\xb0" + "\x06\x66\x06\xa1\xdd\x3c\xbe\xbb" + "\xb2\x62\xab\x64\xd3\xbf\x2c\x30" + "\x0e\xfc\xd9\x95\x32\x32\xf3\x3b" + "\x39\x7e\xda\x62\x62\x0f\xc3\xfe" + "\x55\x76\x09\xf5\x8a\x09\x91\x93" + "\x32\xea\xbc\x2b\x0b\xcf\x1d\x65" + "\x48\x33\xba\xeb\x0f\xd4\xf9\x3b" + "\x1e\x90\x74\x6d\x93\x52\x61\x81" + "\xa3\xf2\xb5\xea\x1d\x61\x86\x68" + "\x00\x40\xcc\x58\xdd\xf2\x64\x01" + "\xab\xfd\x94\xc0\xa3\x83\x83\x33" + "\xa4\xb0\xb8\xd3\x9d\x08\x3c\x7f" + "\x8e\xa8\xaf\x87\xa5\xe7\xcd\x36" + "\x92\x96\xdc\xa1\xf2\xea\xe6\xd1" + "\x1e\xe9\x65\xa4\xff\xda\x17\x96" + "\xad\x91\x4a\xc5\x26\xb4\x1d\x1c" + "\x2b\x50\x48\x26\xc8\x86\x3f\x05" + "\xb8\x87\x1b\x3f\xee\x2e\x55\x61" + "\x0d\xdc\xcf\x56\x0e\xe2\xcc\xda" + "\x87\xee\xc5\xcd\x0e\xf4\xa4\xaf" + "\x8a\x02\xee\x16\x0b\xc4\xdd\x6d" + "\x80\x3e\xf3\xfe\x95\xb4\xfe\x97" + "\x0d\xe2\xab\xbb\x27\x84\xee\x25" + "\x39\x74\xb0\xfb\xdc\x5a\x0f\x65" + "\x31\x2a\x89\x08\xa4\x8c\x9f\x25" + "\x5f\x93\x83\x39\xda\xb4\x22\x17" + "\xbd\xd2\x0d\xfc\xde\xf8\x00\x34" + "\xc2\x48\x55\x06\x4c\x8b\x79\xe5" + "\xba\x0c\x50\x4f\x98\xa3\x59\x3d" + "\xc4\xec\xd1\x85\xf3\x60\x41\x16" + "\x0a\xe2\xf4\x38\x33\x24\xc1\xe0" + "\x0d\x86\x1f\x5a\xd2\xba\x7c\x5f" + "\x97\x60\x54\xa3\x52\x31\x78\x57" + "\x7a\xc0\xc7\x1e\xd4\x11\x8f\xef" + "\x86\x0a\x60\x26\x4a\x8f\x06\xf7" + "\x1f\x47\x45\x6e\x87\x13\x15\xf3" + "\x91\x08\xbf\x2a\x6e\x71\x21\x8e" + "\x92\x90\xde\x01\x97\x81\x46\x87" + "\x8a\xfc\xab\x12\x0c\x60\x3e\x9d" + "\xbd\x40\x0a\x45\x3f\x5b\x83\x04" + "\xb5\x8f\x42\x78\x68\xfe\x3a\xd1" + "\x59\xf7\x12\xaa\x86\x86\x1c\x77" + "\xfc\xc6\x64\x47\x0f\x7e\xd3\xbc" + "\x95\x90\x23\xb3\x60\xdc\x0d\xf4" + "\x67\xe6\x32\xee\xad\xbf\x60\x07" + "\xbd\xdb\x6e\x3f\x55\x88\xdb\x93" + "\x62\x41\xd6\xeb\x34\xd6\xa3\x96" + "\xd2\xbc\x29\xaa\x75\x65\x41\x9f" + "\x70\x43\xbb\x6d\xd9\xa5\x95\x22" + "\x3e\xf9\x07\xa0\x7d\x75\xba\xb8" + "\xcd\x81\x3b\x94\x01\x19\xc3\x67" + "\x9d\xa4\x7f\xa0\x99\xcc\x4a\xc4" + "\xfa\x76\x3f\xab\x5c\xea\x26\xdf" + "\xa2\x4c\x5b\x11\x55\xa3\x6a\x70" + "\xcb\xbc\x93\x11\x48\x38\x73\x7a" + "\x40\xbf\xbc\x04\x05\xb0\x2d\x9b" + "\x9a\x23\x57\xa5\xf6\x63\xfa\xc7" + "\xd8\x4d\xc2\xc0\xf8\xbd\xfb\x7d" + "\xea\x20\xa2\xe0\x4d\xaa\x63\x1e" + "\x9a\xa2\xed\x54\xe6\x49\xaf\x52" + "\xaf\x7e\x94\x57\x19\x07\x06\x74" + "\x57\x5b\x62\x61\x99\x20\xe7\x95" + "\x14\x19\xcf\x42\x83\x6a\x94\xf5" + "\xab\xa7\xf2\x48\xf6\x0b\x40\x3d" + "\x93\x8d\x3d\x14\x5d\xf2\x45\x2c" + "\xac\x1c\x0b\x12\xc9\x56\x3f\x7c" + "\x17\xeb\x1d\xed\x7e\x5c\xaa\x37" + "\xe3\xb4\x56\xf9\x0e\xb9\x8e\xc8" + "\x16\x70\x3e\xff\x95\xb9\x89\x9c" + "\x19\x0d\x0d\x48\xbd\xb9\xe3\x73" + "\xdf\x4e\x67\x9d\x93\x6c\x0b\x75" + "\x8a\x2d\x89\x5c\x32\x9d\x75\x05" + "\xd9\x13\xbe\x14\x5f\xf0\xb7\xb4" + "\xd9\x2c\x02\x22\x41\xf2\x9c\x1f" + "\xc1\x8c\xf5\x6a\x8c\xd5\xa5\x6b" + "\x54\x47\xec\x3a\x76\x08\xf6\xf7" + "\xed\x7c\x7e\x3b\x55\xb8\xa9\x20" + "\xa6\xec\x2d\x8c\x03\x38\x9d\x74" + "\xe9\x36\xe7\x05\x40\xec\xf4\xa1" + "\xa7\x70\xa7\x6f\x1f\x93\xc2\x1d" + "\x2c\x4e\x5f\xe8\x04\x6d\x91\x67" + "\x23\xd9\x47\xb4\xf6\xbc\x35\x25" + "\x1b\xa8\xe1\x17\xa8\x21\x38\xd8" + "\x7a\x55\xd9\xc6\x6f\x0a\x1b\xcb" + "\xde\xf8\x1e\x20\x8c\xa1\x14\x49" + "\x49\x00\x00\x31\x0f\xa8\x24\x67" + "\x97\x7a\x1f\x04\xb9\x6b\x60\xd0" + "\x32\xc3\xf4\xf9\x4f\xb2\xfd\x7b" + "\xf9\xb3\x43\xd8\x23\xaa\x21\x37" + "\x9e\x91\xc5\xa4\xce\xd8\xe4\xf5" + "\x55\x3e\xc9\xe4\xc5\x51\xd3\x4d" + "\xc6\x83\xe9\x23\x8e\x3e\x21\xe0" + "\x40\x23\x4e\x2b\x2d\x89\xc4\x5d" + "\x58\xdc\x43\x03\x8e\x9a\xfb\xef" + "\x76\xac\x78\x57\xc3\xb8\xf7\x9f" + "\xf5\xb1\xc2\xa4\x0c\xee\x58\x52" + "\x45\xdf\x1a\xd9\x0e\xe0\x56\x1f" + "\x23\x79\x99\x5f\x34\xad\x9f\x41" + "\x67\x2a\xc7\x8b\xe7\x82\x6e\x67" + "\x58\xb5\xae\x18\xd7\x2f\x8f\x57" + "\x0e\xa4\x21\x3c\x84\x21\x05\x50" + "\x57\xb0\xd1\xb1\xc8\x9d\xd4\x44" + "\x25\x40\x6b\xd5\x6f\x18\x92\x89" + "\x6d\x5b\xe9\x5a\x3c\x74\xc0\x33" + "\x2c\x7a\xa7\x99\x71\x4e\x9d\x1b" + "\xe1\x1d\xcb\x62\x8b\x3c\x07\x07" + "\x67\xf6\xa6\x54\x10\x72\x3f\xea" + "\xe5\xcd\xe6\xf1\xeb\x3d\x43\x0b" + "\xfe\x4b\xc7\x1d\x3d\xd9\xa3\xe2" + "\x9b\x79\x47\xc7\xab\x28\xcc\x4d" + "\xa8\x77\x9c\xec\xef\x56\xf8\x92" + "\x07\x48\x1b\x21\x04\xa8\x24\xb0" + "\x82\x7d\xd1\x17\xa4\xaf\x5f\xfa" + "\x92\xbf\x6a\xb7\x7e\xc7\xb7\x75" + "\x40\x3c\x14\x09\x57\xae\xe0\x4e" + "\xf8\xc9\xda\x1e\x5d\x27\xc4\x8c" + "\x27\xe3\x4d\xe3\x55\x8c\xd2\xef" + "\x0c\xab\x67\x53\x96\xd3\x48\xfb" + "\x75\x4f\x74\x9e\xcb\x82\xa4\x96" + "\x91\x41\x48\xaa\x65\xdb\x34\x72" + "\xc9\xee\xa2\x77\x8b\x6e\x44\x12" + "\x4e\x51\x51\xc3\xf5\xef\x6a\x50" + "\x99\x26\x41\x1e\x66\xa4\x2b\xb9" + "\x21\x15\x38\xc2\x0b\x7f\x37\xb6" + "\x89\x8b\x27\x70\xae\xa1\x90\x28" + "\x04\xe7\xd5\x17\xcb\x60\x99\xb4" + "\xe2\xd7\x04\xd3\x11\x27\x86\xe4" + "\xd0\x0d\x36\x04\x68\xe0\xb4\x71" + "\xe8\x86\x4b\x9f\xa3\xd2\xda\x87" + "\xc2\x2c\xad\x66\xfa\x53\x18\xf8" + "\xec\x10\x74\xc5\xb6\x53\x09\x93" + "\x21\x09\xbd\x77\x2d\x2a\x12\x4c" + "\x86\xfe\x50\x8e\xd1\x16\xab\xb1" + "\xfd\xd7\x87\xde\xc3\x6f\x7c\x16" + "\xe2\x88\x3d\x41\xac\x36\x7e\xf8" + "\xc2\x3b\x46\xd5\x44\x3d\x9d\xe8" + "\xe9\x0c\xb7\xb3\xc6\xb9\xe5\xe7" + "\x27\x17\x78\x03\xd4\xda\xe4\x73" + "\x38\x34\xe7\x53\x29\xc4\xcb\x93" + "\xc9\xa1\x10\x8a\xb2\xfc\x0b\x07" + "\x47\xb8\xb1\x13\x49\x86\x24\x8b" + "\x10\xb1\xd9\x5f\xbb\xd8\x90\x37" + "\x06\x03\xe0\x76\xff\x19\x1a\x16" + "\xd8\x2d\xa7\x4a\xea\x22\x64\xbe" + "\xed\x1c\xc8\x33\xb4\xf4\xb1\x48" + "\x95\xb5\x2f\xaa\x05\xc7\x03\xa0" + "\xf1\xa4\xf3\x63\x4b\xbe\x79\xb9" + "\x4b\x67\x7e\x4e\x3e\x81\x8f\xef" + "\xe9\x55\x99\x30\xd0\x26\xec\x5d" + "\x89\xb6\x3f\x28\x38\x81\x7a\x00" + "\x89\x85\xb8\xff\x19\x0f\x8f\x5d" + "\x5c\x6d\x6a\x3d\x6c\xb9\xfb\x7c" + "\x0c\x4b\x7e\xbc\x0c\xc4\xad\xbb" + "\x0a\x8b\xc8\x48\xb7\xfa\x4d\x53" + "\x82\x10\xd6\x29\x58\x83\x50\x3c" + "\xd4\x5a\xfd\x14\xa3\xb5\x88\xfb" + "\x23\xee\xc9\xcc\xab\x92\x52\xb3" + "\x0b\x07\xf3\x1e\x9a\x2a\x2e\x35" + "\x32\x37\xa5\x86\xd0\xe5\x5f\xdd" + "\x3d\x67\x70\xb4\x9a\xc9\x93\xdc" + "\x31\x33\xe3\x3a\xc5\xcf\xd9\x44" + "\x2f\x3f\x87\xb2\x0c\x36\x55\x17" + "\xa9\xda\xb1\xca\x00\x09\x87\xe6" + "\x66\x34\xb3\x9f\x52\x37\x98\x10" + "\x2e\x5d\xa4\x14\x7f\x63\xa6\xcd" + "\x6c\x2d\x7c\x74\x4c\xae\x9c\x65" + "\xe0\x79\xc0\xd6\xc3\xfe\xa8\xf4" + "\x1a\x4f\xf5\xbc\xea\x7a\x92\x40" + "\x51\xa7\x05\x45\x40\xd8\x9c\x3c" + "\xde\x5f\x0b\x6e\x10\x5c\x1c\xdc" + "\xd2\x65\x60\xbb\x70\x68\x5c\xa9" + "\x59\x25\x0e\x4e\x93\xb8\x49\x89" + "\xf6\xae\xeb\x1f\x8b\x56\xc8\x56" + "\xb0\xb5\xc9\xee\xa5\x15\x07\x4d" + "\x8a\xcc\xad\x04\x4d\x99\x8c\x49" + "\x8d\x7c\xe0\xa5\x7d\x7f\x33\x61" + "\xf2\xfc\xe7\x88\x3f\x2b\x73\xab" + "\x2e\x38\x17\x48\xa9\x86\xdd\x81" + "\x21\x45\xbc\x98\x1d\xe5\xa5\xbc" + "\x0d\x0b\x18\x8e\x86\x1e\x76\x0a" + "\x30\x12\x21\xf0\x51\xed\xc1\xcd" + "\x9a\xf1\x7e\x7e\x64\xb2\xa3\xd6" + "\x37\xe7\xc6\xde\x97\xb9\x5d\x05" + "\xf5\x50\xe2\x0a\xaa\x68\x16\xa6" + "\x26\x9c\x7d\xff\x4c\x05\xce\x48" + "\xa7\xff\x10\x19\x5e\xef\x46\x54" + "\xec\xe4\x7b\xb6\x12\x23\xae\x93" + "\x4f\x79\xf8\x3c\x1c\x07\x15\x66" + "\x07\xc1\x52\xde\x7f\xda\x51\x7b" + "\xfe\x13\x67\xab\x8d\x56\xdc\xc1" + "\x70\x4b\x13\xd2\x30\x00\xc1\x97" + "\x22\xa7\x83\xf8\x18\xd9\x6d\x40" + "\x54\xe0\xc1\xdb\x3e\x83\x73\x12" + "\xe1\x48\x49\xb9\xd4\x20\x0c\x06" + "\x1c\x82\xb5\xbe\x5a\xae\x60\x5e" + "\xe2\x09\xba\x05\xbb\x9a\x80\x63" + "\xf2\xc4\x4b\x41\x39\x16\x76\x26" + "\xb1\x03\x06\x23\x65\x37\x33\x92" + "\xca\xf9\x72\xf5\xcd\x95\xc1\xc0" + "\x91\x5a\xfd\x28\xb9\x62\x59\x84" + "\x87\x9d\x82\xcb\xe0\x67\x7c\x26" + "\xb8\x00\x16\xd9\x5d\xb4\x74\xd4" + "\x75\x8c\x75\xf8\x87\x3b\xa8\x77" + "\xcd\x82\x3d\x7b\xb9\x63\x44\x0f" + "\x44\x83\x55\x5b\xc7\xdc\x18\x0b" + "\x8c\x36\xb3\x59\xeb\x58\x13\x38" + "\x4b\x8a\xb7\xa3\x9a\xa2\xf3\xeb" + "\xc6\x30\x84\x86\x0a\xcf\x8b\xfa" + "\x36\x66\x26\xbc\xd0\x96\xa3\xb4" + "\x8d\x6b\xf7\x5b\x75\x59\xbb\xd3" + "\x14\x78\x57\x2f\x27\xa8\x95\xcf" + "\xa2\xa5\x76\x28\xbd\xab\x8b\x59" + "\x04\x91\x8a\xc5\x3c\xc3\xa7\xcf" + "\xe0\xfb\xdd\x7a\xbb\x10\xde\x36" + "\x43\x1c\x59\xf7\x41\xb6\xa5\x80" + "\x72\x7b\xe3\x7a\xa3\x01\xc3\x8c" + "\x7e\xf3\xf2\x42\x1a\x0c\x7e\xf3" + "\xfc\x5b\x6e\x1f\x20\xf1\x32\x76" + "\x83\x71\x36\x3e\x7e\xa7\xf7\xdd" + "\x25\x2e\xe6\x04\xe2\x5b\x44\xb5" + "\x16\xfb\xdf\x9b\x46\x2a\xa8\x81" + "\x89\x15\x3e\xb5\xb0\x09\x40\x33" + "\x60\xc7\x37\x63\x14\x09\xc1\x6e" + "\x56\x52\xbe\xe4\x88\xe0\x75\xbc" + "\x49\x62\x8c\xf1\xdf\x62\xe6\xac" + "\xd5\x87\xf7\xc9\x92\x52\x36\x59" + "\x22\x6f\x31\x99\x76\xdb\x41\xb6" + "\x26\x91\x79\x7e\xd2\x78\xaf\x07" + "\x78\x4b\xed\x54\x30\xb2\xff\xbc" + "\x2c\x0a\x1a\xbe\xbf\xd5\x5a\x4d" + "\xd1\xbc\x30\xc2\xf4\xf1\xc1\x9e" + "\x9a\x96\x89\x00\x50\xfc\xf6\xaf" + "\xfa\x60\xbf\x1a\x32\x8f\x57\x36" + "\x2f\x02\xb7\x28\x50\xc3\xd3\xfd" + "\x6b\xc4\xe6\xbb\xc9\xec\xed\x86" + "\xdf\x27\x45\x2c\x0c\x6d\x65\x3b" + "\x6e\x63\x96\xc7\xd6\xb5\xb2\x05" + "\x8b\xe0\x02\x2a\xfa\x20\x0c\x82" + "\xa5\x45\x75\x12\x01\x40\xff\x3e" + "\xfd\xfc\xfb\xbc\x30\x49\xe8\x99" + "\x8d\x48\x8e\x49\x65\x2a\xe3\xa5" + "\x06\xe3\x22\x68\x3b\xd9\xa4\xcf" + "\x84\x6f\xfa\x2b\xb1\xd8\x8c\x30" + "\xd5\x5d\x0c\x63\x32\x59\x28\x6e" + "\x2a\x60\xa4\x57\x12\xf8\xc2\x95" + "\x0a\xf6\xc6\x48\x23\xce\x72\x40" + "\x0d\x75\xa0\xd4\x48\x03\xf5\xc4" + "\xcd\x26\xe7\x83\xcc\x0d\xcf\x7f" + "\x22\x5f\x91\xb3\x42\x02\x9a\x26" + "\x12\x26\x68\x12\x25\x0b\x08\x61" + "\xcb\x25\x86\x95\xfc\x57\x4d\xb6" + "\x36\x6c\xb4\xdc\xa9\x2d\x76\x7f" + "\x25\x06\xa2\x08\x69\x09\xd9\x09" + "\x3c\x40\xe1\xfd\x30\x8f\xc2\x13" + "\x92\xd4\xb5\x3b\x0c\xb2\x32\x4f" + "\x10\xc9\x1a\x41\xa6\xb2\x11\xf6" + "\x3b\x1b\x88\x56\xbf\x61\x3c\xb2" + "\xe6\xdb\x24\x9a\x55\x7e\x35\xf8" + "\x82\x5e\x52\xe3\xf2\xb3\x40\x1c" + "\xdd\xe3\x29\x37\xe0\x85\x08\x8b" + "\xb2\x8b\x09\x38\xac\xa9\x85\xe5" + "\x9e\x36\xb8\x95\x0b\x84\x9d\x10" + "\xcc\xae\xe2\x06\x56\x3c\x85\xce" + "\xc0\xdc\x36\x59\x17\xf9\x48\xf4" + "\x5b\x08\x8e\x86\x00\xa0\xf5\xdd" + "\x0c\xb6\x63\xfd\x5a\xe5\x1e\xa6" + "\x0a\xef\x76\xc2\xc7\x9b\x96\x2f" + "\x66\x2b\x7d\x50\xa6\x0c\x42\xc6" + "\xa5\x05\x05\x10\xeb\xd8\xda\x15" + "\x03\xbe\x2f\x24\x34\x8f\x84\xd8" + "\x58\xb8\xa3\xf2\x63\xc8\xc3\xf6" + "\xc2\xde\x27\x58\x69\xf9\x07\xca" + "\x12\x3e\xe2\xf4\xc8\x29\x60\x30" + "\x2f\x87\xf4\x50\xc2\x25\xcc\xfd" + "\xdc\x76\x4f\x56\x1c\xb2\xd9\x78" + "\x11\x6b\x6e\xb4\x67\xbf\x25\xc4" + "\xae\x7d\x50\x7f\xb2\x5c\x69\x26" + "\xed\x6b\xd2\x3b\x42\x64\xe3\x0c" + "\x15\xa6\xd1\xb6\x3e\x23\x76\x09" + "\x48\xd2\x08\x41\x76\xc9\x7d\x5f" + "\x50\x5d\x8e\xf9\x04\x96\xed\x3a" + "\xf8\x7c\x3b\x7d\x84\xba\xea\xe6" + "\x24\xd2\x0f\x7f\x5a\x0b\x6f\xd9" + "\x33\x14\x67\xfb\x9f\xe7\x44\x4e" + "\x3b\x4b\x06\xaa\xb4\x7a\x8b\x83" + "\x82\x74\xa6\x5e\x10\xea\xd6\x4b" + "\x56\x32\xd7\x79\x7c\x05\xf4\x64" + "\x9c\x64\x25\x9c\xc2\xda\x21\x9a" + "\xd8\xde\x37\x83\x3f\xd8\x83\xa2" + "\x1e\x3c\x1e\x41\x7e\xf2\x97\x84" + "\xe5\xa2\x02\x2b\x6e\xc5\xd7\x91" + "\x24\x66\xc1\xf0\x05\x1c\x0f\x3d" + "\xcf\x63\x94\x10\x2e\x0e\x89\xda" + "\x0d\xe9\x58\x2a\x48\x0c\xc8\x36" + "\xc4\x7b\xf0\xd3\xe2\x5b\xf1\xf6" + "\xad\x3d\xe7\x25\x6b\x83\x08\x5c" + "\xd9\x79\xde\x93\x37\x93\x92\x46" + "\xe7\xf4\x1c\x9e\x94\x91\x30\xd9" + "\xb6\x57\xf1\x04\xb5\x2f\xe3\xb9" + "\x0a\x78\xfe\xcb\xb5\x31\xc1\xc6" + "\x99\xb3\xaf\x73\xfb\x69\xcb\x49" + "\xd2\xec\xea\xd3\x0f\x45\x13\x23" + "\xc8\xae\x92\x29\xce\x71\xd0\xba" + "\xcf\xfd\xb2\x14\x61\xfd\xf6\x7b" + "\xdf\x05\xe5\xbb\x58\xf7\x41\x3b" + "\x6e\xd2\x14\x28\x7c\x15\xb7\x70" + "\xca\xc7\x7a\xd7\x4e\x4b\x35\x6e" + "\x9e\x09\x24\x33\xaf\xca\x41\x1f" + "\x0d\xe3\xf1\x7c\x35\xcb\xe2\x0a" + "\xb2\xeb\x94\x7a\xbc\x53\xd7\xe1" + "\x5e\xbc\xa1\x55\xef\x3c\x37\xef" + "\x6d\xfe\x3a\xcd\xcf\x48\x36\x26" + "\xdb\x3e\x44\xdd\xc8\x03\xa6\xa6" + "\x85\xb5\xfe\xf3\xec\x44\xb3\x22" + "\x9d\x21\x82\xc6\x0b\x1a\x7c\xc6" + "\xf7\xa9\x8e\x7e\x13\x1a\x85\x1f" + "\x93\x81\x38\x47\xc0\x83\x21\xa3" + "\xde\xec\xc0\x8f\x4c\x3b\x57\x2f" + "\x92\xbb\x66\xe3\x24\xeb\xae\x1e" + "\xb3\x18\x57\xf2\xf3\x4a\x50\x52" + "\xe9\x91\x08\x1f\x85\x44\xc1\x07" + "\xa1\xd3\x62\xe9\xe0\x82\x38\xfd" + "\x27\x3f\x7e\x10\x7d\xaf\xa1\x7a" + "\xf0\xaa\x79\xee\x6e\xa2\xc0\xbb" + "\x01\xda\xfb\xc4\x85\x26\x85\x31" + "\x15\xf4\x3c\xe0\x96\x79\x0e\xd7" + "\x50\x68\x37\x57\xb5\x31\xf7\x3c" + "\xbd\xaa\xcc\x2c\x8f\x57\x59\xa5" + "\xd4\x4b\xc6\x45\xc0\x32\x3d\x85" + "\x6d\xee\xf4\x6b\x63\xf9\x3a\xfb" + "\x2f\xdb\xb8\x42\x19\x8e\x88\x1f" + "\xfd\x7d\x0b\x69\x14\x8f\x36\xb2" + "\xd9\x27\x34\x53\x9c\x52\x00\x94" + "\xcc\x8b\x37\x82\xaf\x8e\xb3\xc0" + "\x8a\xcf\x44\xc6\x3a\x19\xbe\x1f" + "\x23\x33\x68\xc4\xb6\xbb\x13\x20" + "\xec\x6a\x87\x5b\xc2\x7c\xd3\x04" + "\x34\x97\x32\xd5\x11\x02\x06\x45" + "\x98\x0b\xaa\xab\xbe\xfb\xd0\x2c" + "\x0e\xf1\x8b\x7f\x1c\x70\x85\x67" + "\x60\x50\x66\x79\xbb\x45\x21\xc4" + "\xb5\xd3\xb9\x4f\xe5\x41\x49\x86" + "\x6b\x20\xef\xac\x16\x74\xe9\x23" + "\xa5\x2d\x5c\x2b\x85\xb2\x33\xe8" + "\x2a\xd1\x24\xd1\x5b\x9b\x7f\xfc" + "\x2f\x3b\xf7\x6a\x8b\xde\x55\x7e" + "\xda\x13\x1b\xd6\x90\x74\xb0\xbe" + "\x46\x0d\xcf\xc7\x78\x33\x31\xdc" + "\x6a\x6a\x50\x3e\x4c\xe2\xab\x48" + "\xbc\x4e\x7d\x62\xb9\xfc\xdd\x85" + "\x1c\x5d\x93\x15\x5e\x01\xd9\x2b" + "\x48\x71\x82\xd6\x44\xd6\x0e\x92" + "\x6e\x75\xc9\x3c\x1d\x31\x18\x6f" + "\x8b\xd7\x18\xf3\x09\x08\x45\xb1" + "\x3e\xa4\x25\xc6\x34\x48\xaf\x42" + "\x77\x33\x03\x65\x3e\x2f\xff\x8f" + "\xe9\xe1\xa0\xfe\xb2\xc3\x80\x77" + "\x20\x05\xe4\x9b\x47\x3b\xb2\xbd", + .len = 4096, } }; @@ -34582,8 +32473,6 @@ static const struct hash_testvec crc32_tv_template[] = { "\xe9\xea\xeb\xec\xed\xee\xef\xf0", .psize = 240, .digest = "\x6c\xc6\x56\xde", - .np = 2, - .tap = { 31, 209 } }, { .key = "\xff\xff\xff\xff", .ksize = 4, @@ -35023,8 +32912,6 @@ static const struct hash_testvec crc32c_tv_template[] = { "\xe9\xea\xeb\xec\xed\xee\xef\xf0", .psize = 240, .digest = "\x75\xd3\xc5\x24", - .np = 2, - .tap = { 31, 209 } }, { .key = "\xff\xff\xff\xff", .ksize = 4, diff --git a/crypto/tgr192.c b/crypto/tgr192.c index 022d3dd76c3b..f8e1d9f9938f 100644 --- a/crypto/tgr192.c +++ b/crypto/tgr192.c @@ -25,8 +25,9 @@ #include #include #include -#include #include +#include +#include #define TGR192_DIGEST_SIZE 24 #define TGR160_DIGEST_SIZE 20 @@ -468,10 +469,9 @@ static void tgr192_transform(struct tgr192_ctx *tctx, const u8 * data) u64 a, b, c, aa, bb, cc; u64 x[8]; int i; - const __le64 *ptr = (const __le64 *)data; for (i = 0; i < 8; i++) - x[i] = le64_to_cpu(ptr[i]); + x[i] = get_unaligned_le64(data + i * sizeof(__le64)); /* save */ a = aa = tctx->a; diff --git a/drivers/Kconfig b/drivers/Kconfig index 4f9f99057ff8..45f9decb9848 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -228,4 +228,6 @@ source "drivers/siox/Kconfig" source "drivers/slimbus/Kconfig" +source "drivers/interconnect/Kconfig" + endmenu diff --git a/drivers/Makefile b/drivers/Makefile index e1ce029d28fd..c61cde554340 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -56,7 +56,7 @@ obj-y += tty/ obj-y += char/ # iommu/ comes before gpu as gpu are using iommu controllers -obj-$(CONFIG_IOMMU_SUPPORT) += iommu/ +obj-y += iommu/ # gpu/ comes after char for AGP vs DRM startup and after iommu obj-y += gpu/ @@ -186,3 +186,4 @@ obj-$(CONFIG_MULTIPLEXER) += mux/ obj-$(CONFIG_UNISYS_VISORBUS) += visorbus/ obj-$(CONFIG_SIOX) += siox/ obj-$(CONFIG_GNSS) += gnss/ +obj-$(CONFIG_INTERCONNECT) += interconnect/ diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 90ff0a47c12e..4e015c77e48e 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -357,6 +357,16 @@ config ACPI_TABLE_UPGRADE initrd, therefore it's safe to say Y. See Documentation/acpi/initrd_table_override.txt for details +config ACPI_TABLE_OVERRIDE_VIA_BUILTIN_INITRD + bool "Override ACPI tables from built-in initrd" + depends on ACPI_TABLE_UPGRADE + depends on INITRAMFS_SOURCE!="" && INITRAMFS_COMPRESSION="" + help + This option provides functionality to override arbitrary ACPI tables + from built-in uncompressed initrd. + + See Documentation/acpi/initrd_table_override.txt for details + config ACPI_DEBUG bool "Debug Statements" help diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c index a2dcd62ea32f..4a434c23a196 100644 --- a/drivers/acpi/acpi_dbg.c +++ b/drivers/acpi/acpi_dbg.c @@ -750,48 +750,36 @@ static const struct acpi_debugger_ops acpi_aml_debugger = { int __init acpi_aml_init(void) { - int ret = 0; - - if (!acpi_debugfs_dir) { - ret = -ENOENT; - goto err_exit; - } + int ret; /* Initialize AML IO interface */ mutex_init(&acpi_aml_io.lock); init_waitqueue_head(&acpi_aml_io.wait); acpi_aml_io.out_crc.buf = acpi_aml_io.out_buf; acpi_aml_io.in_crc.buf = acpi_aml_io.in_buf; + acpi_aml_dentry = debugfs_create_file("acpidbg", S_IFREG | S_IRUGO | S_IWUSR, acpi_debugfs_dir, NULL, &acpi_aml_operations); - if (acpi_aml_dentry == NULL) { - ret = -ENODEV; - goto err_exit; - } - ret = acpi_register_debugger(THIS_MODULE, &acpi_aml_debugger); - if (ret) - goto err_fs; - acpi_aml_initialized = true; -err_fs: + ret = acpi_register_debugger(THIS_MODULE, &acpi_aml_debugger); if (ret) { debugfs_remove(acpi_aml_dentry); acpi_aml_dentry = NULL; + return ret; } -err_exit: - return ret; + + acpi_aml_initialized = true; + return 0; } void __exit acpi_aml_exit(void) { if (acpi_aml_initialized) { acpi_unregister_debugger(&acpi_aml_debugger); - if (acpi_aml_dentry) { - debugfs_remove(acpi_aml_dentry); - acpi_aml_dentry = NULL; - } + debugfs_remove(acpi_aml_dentry); + acpi_aml_dentry = NULL; acpi_aml_initialized = false; } } diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index f0b52266b3ac..d73afb562ad9 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -2124,21 +2124,29 @@ static int __init intel_opregion_present(void) return opregion; } +/* Check if the chassis-type indicates there is no builtin LCD panel */ static bool dmi_is_desktop(void) { const char *chassis_type; + unsigned long type; chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE); if (!chassis_type) return false; - if (!strcmp(chassis_type, "3") || /* 3: Desktop */ - !strcmp(chassis_type, "4") || /* 4: Low Profile Desktop */ - !strcmp(chassis_type, "5") || /* 5: Pizza Box */ - !strcmp(chassis_type, "6") || /* 6: Mini Tower */ - !strcmp(chassis_type, "7") || /* 7: Tower */ - !strcmp(chassis_type, "11")) /* 11: Main Server Chassis */ + if (kstrtoul(chassis_type, 10, &type) != 0) + return false; + + switch (type) { + case 0x03: /* Desktop */ + case 0x04: /* Low Profile Desktop */ + case 0x05: /* Pizza Box */ + case 0x06: /* Mini Tower */ + case 0x07: /* Tower */ + case 0x10: /* Lunch Box */ + case 0x11: /* Main Server Chassis */ return true; + } return false; } diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h index 5a9c2febc0fb..863ade9add6d 100644 --- a/drivers/acpi/acpica/acapps.h +++ b/drivers/acpi/acpica/acapps.h @@ -3,7 +3,7 @@ * * Module Name: acapps - common include for ACPI applications/tools * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ @@ -17,7 +17,7 @@ /* Common info for tool signons */ #define ACPICA_NAME "Intel ACPI Component Architecture" -#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2018 Intel Corporation" +#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2019 Intel Corporation" #if ACPI_MACHINE_WIDTH == 64 #define ACPI_WIDTH " (64-bit version)" diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h index 8bc935977d8e..54f81eac7ec9 100644 --- a/drivers/acpi/acpica/accommon.h +++ b/drivers/acpi/acpica/accommon.h @@ -3,7 +3,7 @@ * * Name: accommon.h - Common include files for generation of ACPICA source * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acconvert.h b/drivers/acpi/acpica/acconvert.h index 4ebe18826646..d5478cd4a857 100644 --- a/drivers/acpi/acpica/acconvert.h +++ b/drivers/acpi/acpica/acconvert.h @@ -3,7 +3,7 @@ * * Module Name: acapps - common include for ACPI applications/tools * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h index 57d9495e5933..32f2e38c7570 100644 --- a/drivers/acpi/acpica/acdebug.h +++ b/drivers/acpi/acpica/acdebug.h @@ -3,7 +3,7 @@ * * Name: acdebug.h - ACPI/AML debugger * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ @@ -16,7 +16,8 @@ #include "acdisasm.h" #endif -#define ACPI_DEBUG_BUFFER_SIZE 0x4000 /* 16K buffer for return objects */ +#define ACPI_DEBUG_BUFFER_SIZE 0x4000 /* 16K buffer for return objects */ +#define ACPI_DEBUG_LENGTH_FORMAT " (%.4X bits, %.3X bytes)" struct acpi_db_command_info { const char *name; /* Command Name */ diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h index e577f3a40e6a..82f81501566b 100644 --- a/drivers/acpi/acpica/acdispat.h +++ b/drivers/acpi/acpica/acdispat.h @@ -3,7 +3,7 @@ * * Name: acdispat.h - dispatcher (parser to interpreter interface) * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h index b412aa909907..831660179662 100644 --- a/drivers/acpi/acpica/acevents.h +++ b/drivers/acpi/acpica/acevents.h @@ -3,7 +3,7 @@ * * Name: acevents.h - Event subcomponent prototypes and defines * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index 87d6eb01beaf..d056a1845613 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h @@ -3,7 +3,7 @@ * * Name: acglobal.h - Declarations for global variables * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ @@ -164,6 +164,7 @@ ACPI_GLOBAL(struct acpi_memory_list *, acpi_gbl_global_list); ACPI_GLOBAL(struct acpi_memory_list *, acpi_gbl_ns_node_list); ACPI_GLOBAL(u8, acpi_gbl_display_final_mem_stats); ACPI_GLOBAL(u8, acpi_gbl_disable_mem_tracking); +ACPI_GLOBAL(u8, acpi_gbl_verbose_leak_dump); #endif /***************************************************************************** diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h index ef99e2fc37f8..bcf8f7501db7 100644 --- a/drivers/acpi/acpica/achware.h +++ b/drivers/acpi/acpica/achware.h @@ -3,7 +3,7 @@ * * Name: achware.h -- hardware specific interfaces * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h index c5b2be0b6613..20706adbc148 100644 --- a/drivers/acpi/acpica/acinterp.h +++ b/drivers/acpi/acpica/acinterp.h @@ -3,7 +3,7 @@ * * Name: acinterp.h - Interpreter subcomponent prototypes and defines * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index 99b0da899109..a2dfbf6b004e 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h @@ -3,7 +3,7 @@ * * Name: aclocal.h - Internal data types used across the ACPI subsystem * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ @@ -802,7 +802,7 @@ struct acpi_comment_addr_node { /* * File node - used for "Include" operator file stack and - * depdendency tree for the -ca option + * dependency tree for the -ca option */ struct acpi_file_node { void *file; diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h index de52cd6e868a..283614e82a20 100644 --- a/drivers/acpi/acpica/acmacros.h +++ b/drivers/acpi/acpica/acmacros.h @@ -3,7 +3,7 @@ * * Name: acmacros.h - C macros for the entire subsystem. * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ @@ -462,7 +462,7 @@ #define ACPI_IS_OCTAL_DIGIT(d) (((char)(d) >= '0') && ((char)(d) <= '7')) /* - * Macors used for the ASL-/ASL+ converter utility + * Macros used for the ASL-/ASL+ converter utility */ #ifdef ACPI_ASL_COMPILER diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h index 9bd25f36c608..39812fc4386a 100644 --- a/drivers/acpi/acpica/acnamesp.h +++ b/drivers/acpi/acpica/acnamesp.h @@ -3,7 +3,7 @@ * * Name: acnamesp.h - Namespace subcomponent prototypes and defines * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h index ac992b6ebce9..b2ef703d7df8 100644 --- a/drivers/acpi/acpica/acobject.h +++ b/drivers/acpi/acpica/acobject.h @@ -3,7 +3,7 @@ * * Name: acobject.h - Definition of union acpi_operand_object (Internal object only) * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ @@ -239,6 +239,7 @@ struct acpi_object_region_field { union acpi_operand_object *region_obj; /* Containing op_region object */ u8 *resource_buffer; /* resource_template for serial regions/fields */ u16 pin_number_index; /* Index relative to previous Connection/Template */ + u8 *internal_pcc_buffer; /* Internal buffer for fields associated with PCC */ }; struct acpi_object_bank_field { diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h index 818eba413614..9d78134428e3 100644 --- a/drivers/acpi/acpica/acopcode.h +++ b/drivers/acpi/acpica/acopcode.h @@ -3,7 +3,7 @@ * * Name: acopcode.h - AML opcode information for the AML parser and interpreter * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h index ab48196ae55e..6e32c97cba6c 100644 --- a/drivers/acpi/acpica/acparser.h +++ b/drivers/acpi/acpica/acparser.h @@ -3,7 +3,7 @@ * * Module Name: acparser.h - AML Parser subcomponent prototypes and defines * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h index d31bb04facb6..387163b962a7 100644 --- a/drivers/acpi/acpica/acpredef.h +++ b/drivers/acpi/acpica/acpredef.h @@ -3,7 +3,7 @@ * * Name: acpredef - Information table for ACPI predefined methods and objects * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ @@ -631,6 +631,21 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = { {{"_MTL", METHOD_0ARGS, /* ACPI 6.0 */ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, + {{"_NBS", METHOD_0ARGS, /* ACPI 6.3 */ + METHOD_RETURNS(ACPI_RTYPE_BUFFER)}}, + + {{"_NCH", METHOD_0ARGS, /* ACPI 6.3 */ + METHOD_RETURNS(ACPI_RTYPE_BUFFER)}}, + + {{"_NIC", METHOD_0ARGS, /* ACPI 6.3 */ + METHOD_RETURNS(ACPI_RTYPE_BUFFER)}}, + + {{"_NIG", METHOD_1ARGS(ACPI_TYPE_BUFFER), /* ACPI 6.3 */ + METHOD_RETURNS(ACPI_RTYPE_BUFFER)}}, + + {{"_NIH", METHOD_0ARGS, /* ACPI 6.3 */ + METHOD_RETURNS(ACPI_RTYPE_BUFFER)}}, + {{"_NTT", METHOD_0ARGS, METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h index 59ae8b1a6e40..422cd8f2b92e 100644 --- a/drivers/acpi/acpica/acresrc.h +++ b/drivers/acpi/acpica/acresrc.h @@ -3,7 +3,7 @@ * * Name: acresrc.h - Resource Manager function prototypes * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h index 14be32961b4c..8a4e6b4aaf2c 100644 --- a/drivers/acpi/acpica/acstruct.h +++ b/drivers/acpi/acpica/acstruct.h @@ -3,7 +3,7 @@ * * Name: acstruct.h - Internal structs * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h index 12fac33ce77e..dfbf1dbd4033 100644 --- a/drivers/acpi/acpica/actables.h +++ b/drivers/acpi/acpica/actables.h @@ -3,7 +3,7 @@ * * Name: actables.h - ACPI table management * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h index 3374d41582b5..9022537567e9 100644 --- a/drivers/acpi/acpica/acutils.h +++ b/drivers/acpi/acpica/acutils.h @@ -3,7 +3,7 @@ * * Name: acutils.h -- prototypes for the common (subsystem-wide) procedures * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h index 6c05355447c1..49e412edd7c6 100644 --- a/drivers/acpi/acpica/amlcode.h +++ b/drivers/acpi/acpica/amlcode.h @@ -5,7 +5,7 @@ * Declarations and definitions contained herein are derived * directly from the ACPI specification. * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h index cdb590176e9d..7c3bd4ab60fc 100644 --- a/drivers/acpi/acpica/amlresrc.h +++ b/drivers/acpi/acpica/amlresrc.h @@ -3,7 +3,7 @@ * * Module Name: amlresrc.h - AML resource descriptors * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dbdisply.c b/drivers/acpi/acpica/dbdisply.c index 9fcb8ec64681..30ab62b0fec8 100644 --- a/drivers/acpi/acpica/dbdisply.c +++ b/drivers/acpi/acpica/dbdisply.c @@ -237,7 +237,7 @@ void acpi_db_decode_and_display_object(char *target, char *output_type) default: - /* Is not a recognizeable object */ + /* Is not a recognizable object */ acpi_os_printf ("Not a known ACPI internal object, descriptor type %2.2X\n", @@ -647,7 +647,7 @@ void acpi_db_display_object_type(char *object_arg) * * DESCRIPTION: Display the result of an AML opcode * - * Note: Curently only displays the result object if we are single stepping. + * Note: Currently only displays the result object if we are single stepping. * However, this output may be useful in other contexts and could be enabled * to do so if needed. * diff --git a/drivers/acpi/acpica/dbexec.c b/drivers/acpi/acpica/dbexec.c index 6abb6b834d97..bb43305cb215 100644 --- a/drivers/acpi/acpica/dbexec.c +++ b/drivers/acpi/acpica/dbexec.c @@ -160,12 +160,12 @@ acpi_db_execute_method(struct acpi_db_method_info *info, } ACPI_EXCEPTION((AE_INFO, status, - "while executing %s from debugger", + "while executing %s from AML Debugger", info->pathname)); if (status == AE_BUFFER_OVERFLOW) { ACPI_ERROR((AE_INFO, - "Possible overflow of internal debugger " + "Possible buffer overflow within AML Debugger " "buffer (size 0x%X needed 0x%X)", ACPI_DEBUG_BUFFER_SIZE, (u32)return_obj->length)); diff --git a/drivers/acpi/acpica/dbhistry.c b/drivers/acpi/acpica/dbhistry.c index b0b9a26c7db5..7809bd94a18d 100644 --- a/drivers/acpi/acpica/dbhistry.c +++ b/drivers/acpi/acpica/dbhistry.c @@ -3,7 +3,7 @@ * * Module Name: dbhistry - debugger HISTORY command * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dbnames.c b/drivers/acpi/acpica/dbnames.c index 992bd7b92540..004d34d9369b 100644 --- a/drivers/acpi/acpica/dbnames.c +++ b/drivers/acpi/acpica/dbnames.c @@ -904,7 +904,7 @@ acpi_db_bus_walk(acpi_handle obj_handle, * * RETURN: None * - * DESCRIPTION: Display info about system busses. + * DESCRIPTION: Display info about system buses. * ******************************************************************************/ diff --git a/drivers/acpi/acpica/dbobject.c b/drivers/acpi/acpica/dbobject.c index a1c76bf21122..d220168dca01 100644 --- a/drivers/acpi/acpica/dbobject.c +++ b/drivers/acpi/acpica/dbobject.c @@ -243,7 +243,7 @@ acpi_db_display_internal_object(union acpi_operand_object *obj_desc, acpi_os_printf("[%s] ", acpi_ut_get_reference_name(obj_desc)); - /* Decode the refererence */ + /* Decode the reference */ switch (obj_desc->reference.class) { case ACPI_REFCLASS_LOCAL: diff --git a/drivers/acpi/acpica/dbtest.c b/drivers/acpi/acpica/dbtest.c index 8a5462439a97..6db44a5ac786 100644 --- a/drivers/acpi/acpica/dbtest.c +++ b/drivers/acpi/acpica/dbtest.c @@ -10,6 +10,7 @@ #include "acdebug.h" #include "acnamesp.h" #include "acpredef.h" +#include "acinterp.h" #define _COMPONENT ACPI_CA_DEBUGGER ACPI_MODULE_NAME("dbtest") @@ -32,6 +33,9 @@ acpi_db_test_string_type(struct acpi_namespace_node *node, u32 byte_length); static acpi_status acpi_db_test_package_type(struct acpi_namespace_node *node); +static acpi_status +acpi_db_test_field_unit_type(union acpi_operand_object *obj_desc); + static acpi_status acpi_db_read_from_object(struct acpi_namespace_node *node, acpi_object_type expected_type, @@ -74,7 +78,7 @@ static struct acpi_db_argument_info acpi_db_test_types[] = { static acpi_handle read_handle = NULL; static acpi_handle write_handle = NULL; -/* ASL Definitions of the debugger read/write control methods */ +/* ASL Definitions of the debugger read/write control methods. AML below. */ #if 0 definition_block("ssdt.aml", "SSDT", 2, "Intel", "DEBUG", 0x00000001) @@ -227,10 +231,8 @@ static void acpi_db_test_all_objects(void) * RETURN: Status * * DESCRIPTION: Test one namespace object. Supported types are Integer, - * String, Buffer, buffer_field, and field_unit. All other object - * types are simply ignored. - * - * Note: Support for Packages is not implemented. + * String, Buffer, Package, buffer_field, and field_unit. + * All other object types are simply ignored. * ******************************************************************************/ @@ -240,7 +242,6 @@ acpi_db_test_one_object(acpi_handle obj_handle, { struct acpi_namespace_node *node; union acpi_operand_object *obj_desc; - union acpi_operand_object *region_obj; acpi_object_type local_type; u32 bit_length = 0; u32 byte_length = 0; @@ -281,18 +282,21 @@ acpi_db_test_one_object(acpi_handle obj_handle, break; case ACPI_TYPE_FIELD_UNIT: - case ACPI_TYPE_BUFFER_FIELD: case ACPI_TYPE_LOCAL_REGION_FIELD: case ACPI_TYPE_LOCAL_INDEX_FIELD: case ACPI_TYPE_LOCAL_BANK_FIELD: + local_type = ACPI_TYPE_FIELD_UNIT; + break; + + case ACPI_TYPE_BUFFER_FIELD: + /* + * The returned object will be a Buffer if the field length + * is larger than the size of an Integer (32 or 64 bits + * depending on the DSDT version). + */ local_type = ACPI_TYPE_INTEGER; if (obj_desc) { - /* - * Returned object will be a Buffer if the field length - * is larger than the size of an Integer (32 or 64 bits - * depending on the DSDT version). - */ bit_length = obj_desc->common_field.bit_length; byte_length = ACPI_ROUND_BITS_UP_TO_BYTES(bit_length); if (bit_length > acpi_gbl_integer_bit_width) { @@ -303,7 +307,7 @@ acpi_db_test_one_object(acpi_handle obj_handle, default: - /* Ignore all other types */ + /* Ignore all non-data types - Methods, Devices, Scopes, etc. */ return (AE_OK); } @@ -314,40 +318,10 @@ acpi_db_test_one_object(acpi_handle obj_handle, acpi_ut_get_type_name(node->type), node->name.ascii); if (!obj_desc) { - acpi_os_printf(" Ignoring, no attached object\n"); + acpi_os_printf(" No attached sub-object, ignoring\n"); return (AE_OK); } - /* - * Check for unsupported region types. Note: acpi_exec simulates - * access to system_memory, system_IO, PCI_Config, and EC. - */ - switch (node->type) { - case ACPI_TYPE_LOCAL_REGION_FIELD: - - region_obj = obj_desc->field.region_obj; - switch (region_obj->region.space_id) { - case ACPI_ADR_SPACE_SYSTEM_MEMORY: - case ACPI_ADR_SPACE_SYSTEM_IO: - case ACPI_ADR_SPACE_PCI_CONFIG: - - break; - - default: - - acpi_os_printf - (" %s space is not supported in this command [%4.4s]\n", - acpi_ut_get_region_name(region_obj->region. - space_id), - region_obj->region.node->name.ascii); - return (AE_OK); - } - break; - - default: - break; - } - /* At this point, we have resolved the object to one of the major types */ switch (local_type) { @@ -371,6 +345,11 @@ acpi_db_test_one_object(acpi_handle obj_handle, status = acpi_db_test_package_type(node); break; + case ACPI_TYPE_FIELD_UNIT: + + status = acpi_db_test_field_unit_type(obj_desc); + break; + default: acpi_os_printf(" Ignoring, type not implemented (%2.2X)", @@ -382,24 +361,8 @@ acpi_db_test_one_object(acpi_handle obj_handle, if (ACPI_FAILURE(status)) { status = AE_OK; - goto exit; - } - - switch (node->type) { - case ACPI_TYPE_LOCAL_REGION_FIELD: - - region_obj = obj_desc->field.region_obj; - acpi_os_printf(" (%s)", - acpi_ut_get_region_name(region_obj->region. - space_id)); - - break; - - default: - break; } -exit: acpi_os_printf("\n"); return (status); } @@ -444,7 +407,7 @@ acpi_db_test_integer_type(struct acpi_namespace_node *node, u32 bit_length) return (status); } - acpi_os_printf(" (%4.4X/%3.3X) %8.8X%8.8X", + acpi_os_printf(ACPI_DEBUG_LENGTH_FORMAT " %8.8X%8.8X", bit_length, ACPI_ROUND_BITS_UP_TO_BYTES(bit_length), ACPI_FORMAT_UINT64(temp1->integer.value)); @@ -558,8 +521,9 @@ acpi_db_test_buffer_type(struct acpi_namespace_node *node, u32 bit_length) /* Emit a few bytes of the buffer */ - acpi_os_printf(" (%4.4X/%3.3X)", bit_length, temp1->buffer.length); - for (i = 0; ((i < 4) && (i < byte_length)); i++) { + acpi_os_printf(ACPI_DEBUG_LENGTH_FORMAT, bit_length, + temp1->buffer.length); + for (i = 0; ((i < 8) && (i < byte_length)); i++) { acpi_os_printf(" %2.2X", temp1->buffer.pointer[i]); } acpi_os_printf("... "); @@ -665,8 +629,9 @@ acpi_db_test_string_type(struct acpi_namespace_node *node, u32 byte_length) return (status); } - acpi_os_printf(" (%4.4X/%3.3X) \"%s\"", (temp1->string.length * 8), - temp1->string.length, temp1->string.pointer); + acpi_os_printf(ACPI_DEBUG_LENGTH_FORMAT " \"%s\"", + (temp1->string.length * 8), temp1->string.length, + temp1->string.pointer); /* Write a new value */ @@ -750,11 +715,78 @@ static acpi_status acpi_db_test_package_type(struct acpi_namespace_node *node) return (status); } - acpi_os_printf(" %8.8X Elements", temp1->package.count); + acpi_os_printf(" %.2X Elements", temp1->package.count); acpi_os_free(temp1); return (status); } +/******************************************************************************* + * + * FUNCTION: acpi_db_test_field_unit_type + * + * PARAMETERS: obj_desc - A field unit object + * + * RETURN: Status + * + * DESCRIPTION: Test read/write on a named field unit. + * + ******************************************************************************/ + +static acpi_status +acpi_db_test_field_unit_type(union acpi_operand_object *obj_desc) +{ + union acpi_operand_object *region_obj; + u32 bit_length = 0; + u32 byte_length = 0; + acpi_status status = AE_OK; + union acpi_operand_object *ret_buffer_desc; + + /* Supported spaces are memory/io/pci_config */ + + region_obj = obj_desc->field.region_obj; + switch (region_obj->region.space_id) { + case ACPI_ADR_SPACE_SYSTEM_MEMORY: + case ACPI_ADR_SPACE_SYSTEM_IO: + case ACPI_ADR_SPACE_PCI_CONFIG: + + /* Need the interpreter to execute */ + + acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER); + acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); + + /* Exercise read-then-write */ + + status = + acpi_ex_read_data_from_field(NULL, obj_desc, + &ret_buffer_desc); + if (status == AE_OK) { + acpi_ex_write_data_to_field(ret_buffer_desc, obj_desc, + NULL); + acpi_ut_remove_reference(ret_buffer_desc); + } + + acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); + acpi_ut_release_mutex(ACPI_MTX_INTERPRETER); + + bit_length = obj_desc->common_field.bit_length; + byte_length = ACPI_ROUND_BITS_UP_TO_BYTES(bit_length); + + acpi_os_printf(ACPI_DEBUG_LENGTH_FORMAT " [%s]", bit_length, + byte_length, + acpi_ut_get_region_name(region_obj->region. + space_id)); + return (status); + + default: + + acpi_os_printf + (" %s address space is not supported in this command [%4.4s]", + acpi_ut_get_region_name(region_obj->region.space_id), + region_obj->region.node->name.ascii); + return (AE_OK); + } +} + /******************************************************************************* * * FUNCTION: acpi_db_read_from_object diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c index 6b15625e8099..85b34d02233e 100644 --- a/drivers/acpi/acpica/dsargs.c +++ b/drivers/acpi/acpica/dsargs.c @@ -4,7 +4,7 @@ * Module Name: dsargs - Support for execution of dynamic arguments for static * objects (regions, fields, buffer fields, etc.) * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c index 0da96268deb5..4847f89c678c 100644 --- a/drivers/acpi/acpica/dscontrol.c +++ b/drivers/acpi/acpica/dscontrol.c @@ -4,7 +4,7 @@ * Module Name: dscontrol - Support for execution control opcodes - * if/else/while/return * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dsdebug.c b/drivers/acpi/acpica/dsdebug.c index 9d33f0bb2885..0d3e1ced1f57 100644 --- a/drivers/acpi/acpica/dsdebug.c +++ b/drivers/acpi/acpica/dsdebug.c @@ -3,7 +3,7 @@ * * Module Name: dsdebug - Parser/Interpreter interface - debugging * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c index 30fe89545d6a..cf4e061bb0f0 100644 --- a/drivers/acpi/acpica/dsfield.c +++ b/drivers/acpi/acpica/dsfield.c @@ -3,7 +3,7 @@ * * Module Name: dsfield - Dispatcher field routines * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ @@ -518,6 +518,13 @@ acpi_ds_create_field(union acpi_parse_object *op, info.region_node = region_node; status = acpi_ds_get_field_names(&info, walk_state, arg->common.next); + if (info.region_node->object->region.space_id == + ACPI_ADR_SPACE_PLATFORM_COMM + && !(region_node->object->field.internal_pcc_buffer = + ACPI_ALLOCATE_ZEROED(info.region_node->object->region. + length))) { + return_ACPI_STATUS(AE_NO_MEMORY); + } return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c index e8de1b0ce2f5..a4a24ffe5fae 100644 --- a/drivers/acpi/acpica/dsinit.c +++ b/drivers/acpi/acpica/dsinit.c @@ -3,7 +3,7 @@ * * Module Name: dsinit - Object initialization namespace walk * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c index c1a4d02fafd5..f59b4d944f7f 100644 --- a/drivers/acpi/acpica/dsmethod.c +++ b/drivers/acpi/acpica/dsmethod.c @@ -3,7 +3,7 @@ * * Module Name: dsmethod - Parser/Interpreter interface - control method parsing * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c index 6a9cc613adaa..179129a2deb1 100644 --- a/drivers/acpi/acpica/dsobject.c +++ b/drivers/acpi/acpica/dsobject.c @@ -3,7 +3,7 @@ * * Module Name: dsobject - Dispatcher object management routines * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c index 78f9de260d5f..10f32b62608e 100644 --- a/drivers/acpi/acpica/dsopcode.c +++ b/drivers/acpi/acpica/dsopcode.c @@ -3,7 +3,7 @@ * * Module Name: dsopcode - Dispatcher support for regions and fields * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ @@ -130,8 +130,8 @@ acpi_ds_init_buffer_field(u16 aml_opcode, /* Must have a valid (>0) bit count */ if (bit_count == 0) { - ACPI_ERROR((AE_INFO, - "Attempt to CreateField of length zero")); + ACPI_BIOS_ERROR((AE_INFO, + "Attempt to CreateField of length zero")); status = AE_AML_OPERAND_VALUE; goto cleanup; } @@ -194,12 +194,13 @@ acpi_ds_init_buffer_field(u16 aml_opcode, /* Entire field must fit within the current length of the buffer */ if ((bit_offset + bit_count) > (8 * (u32)buffer_desc->buffer.length)) { - ACPI_ERROR((AE_INFO, - "Field [%4.4s] at bit offset/length %u/%u " - "exceeds size of target Buffer (%u bits)", - acpi_ut_get_node_name(result_desc), bit_offset, - bit_count, 8 * (u32)buffer_desc->buffer.length)); status = AE_AML_BUFFER_LIMIT; + ACPI_BIOS_EXCEPTION((AE_INFO, status, + "Field [%4.4s] at bit offset/length %u/%u " + "exceeds size of target Buffer (%u bits)", + acpi_ut_get_node_name(result_desc), + bit_offset, bit_count, + 8 * (u32)buffer_desc->buffer.length)); goto cleanup; } @@ -355,6 +356,7 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state, union acpi_operand_object *operand_desc; struct acpi_namespace_node *node; union acpi_parse_object *next_op; + acpi_adr_space_type space_id; ACPI_FUNCTION_TRACE_PTR(ds_eval_region_operands, op); @@ -367,6 +369,7 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state, /* next_op points to the op that holds the space_ID */ next_op = op->common.value.arg; + space_id = (acpi_adr_space_type)next_op->common.value.integer; /* next_op points to address op */ @@ -402,6 +405,15 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state, obj_desc->region.length = (u32) operand_desc->integer.value; acpi_ut_remove_reference(operand_desc); + /* A zero-length operation region is unusable. Just warn */ + + if (!obj_desc->region.length + && (space_id < ACPI_NUM_PREDEFINED_REGIONS)) { + ACPI_WARNING((AE_INFO, + "Operation Region [%4.4s] has zero length (SpaceId %X)", + node->name.ascii, space_id)); + } + /* * Get the address and save it * (at top of stack - 1) diff --git a/drivers/acpi/acpica/dspkginit.c b/drivers/acpi/acpica/dspkginit.c index 584853385268..997faa10f615 100644 --- a/drivers/acpi/acpica/dspkginit.c +++ b/drivers/acpi/acpica/dspkginit.c @@ -3,7 +3,7 @@ * * Module Name: dspkginit - Completion of deferred package initialization * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c index 1504b93cc5f4..d75aae304595 100644 --- a/drivers/acpi/acpica/dswexec.c +++ b/drivers/acpi/acpica/dswexec.c @@ -4,7 +4,7 @@ * Module Name: dswexec - Dispatcher method execution callbacks; * dispatch to interpreter. * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c index e2ef09643d50..c88fd31208a5 100644 --- a/drivers/acpi/acpica/dswload.c +++ b/drivers/acpi/acpica/dswload.c @@ -3,7 +3,7 @@ * * Module Name: dswload - Dispatcher first pass namespace load callbacks * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c index 9a309f5c4de8..935a8e2623e4 100644 --- a/drivers/acpi/acpica/dswload2.c +++ b/drivers/acpi/acpica/dswload2.c @@ -3,7 +3,7 @@ * * Module Name: dswload2 - Dispatcher second pass namespace load callbacks * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ @@ -24,7 +24,7 @@ ACPI_MODULE_NAME("dswload2") * FUNCTION: acpi_ds_load2_begin_op * * PARAMETERS: walk_state - Current state of the parse tree walk - * out_op - Wher to return op if a new one is created + * out_op - Where to return op if a new one is created * * RETURN: Status * diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c index 7592176a8fa2..39acf7b286da 100644 --- a/drivers/acpi/acpica/dswscope.c +++ b/drivers/acpi/acpica/dswscope.c @@ -3,7 +3,7 @@ * * Module Name: dswscope - Scope stack manipulation * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c index 4c1ec202d5ab..de79f835a373 100644 --- a/drivers/acpi/acpica/dswstate.c +++ b/drivers/acpi/acpica/dswstate.c @@ -3,7 +3,7 @@ * * Module Name: dswstate - Dispatcher parse tree walk management routines * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c index b3d07cc14d75..9e2f5a05c066 100644 --- a/drivers/acpi/acpica/evevent.c +++ b/drivers/acpi/acpica/evevent.c @@ -3,7 +3,7 @@ * * Module Name: evevent - Fixed Event handling and dispatch * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c index 1b8a662a14a9..5c77bee5d31f 100644 --- a/drivers/acpi/acpica/evglock.c +++ b/drivers/acpi/acpica/evglock.c @@ -3,7 +3,7 @@ * * Module Name: evglock - Global Lock support * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index e10fec99a182..62d3aa74277b 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c @@ -3,7 +3,7 @@ * * Module Name: evgpe - General Purpose Event handling and dispatch * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ @@ -801,7 +801,7 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device, dispatch.handler-> context); - /* If requested, clear (if level-triggered) and reenable the GPE */ + /* If requested, clear (if level-triggered) and re-enable the GPE */ if (return_value & ACPI_REENABLE_GPE) { (void)acpi_ev_finish_gpe(gpe_event_info); diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c index b253063b09d3..328d1d6123ad 100644 --- a/drivers/acpi/acpica/evgpeblk.c +++ b/drivers/acpi/acpica/evgpeblk.c @@ -3,7 +3,7 @@ * * Module Name: evgpeblk - GPE block creation and initialization. * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c index 1f686750bb1a..c92d2f6ebe01 100644 --- a/drivers/acpi/acpica/evgpeinit.c +++ b/drivers/acpi/acpica/evgpeinit.c @@ -3,7 +3,7 @@ * * Module Name: evgpeinit - System GPE initialization and update * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c index 0fb6c70f44ed..917892227e09 100644 --- a/drivers/acpi/acpica/evgpeutil.c +++ b/drivers/acpi/acpica/evgpeutil.c @@ -3,7 +3,7 @@ * * Module Name: evgpeutil - GPE utilities * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c index 4ed1e67db6be..3ef4e27995f0 100644 --- a/drivers/acpi/acpica/evhandler.c +++ b/drivers/acpi/acpica/evhandler.c @@ -3,7 +3,7 @@ * * Module Name: evhandler - Support for Address Space handlers * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c index baadd635b5af..d45f7639f7ee 100644 --- a/drivers/acpi/acpica/evmisc.c +++ b/drivers/acpi/acpica/evmisc.c @@ -3,7 +3,7 @@ * * Module Name: evmisc - Miscellaneous event manager support functions * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c index 49decca4e08f..45dc797df05d 100644 --- a/drivers/acpi/acpica/evregion.c +++ b/drivers/acpi/acpica/evregion.c @@ -3,7 +3,7 @@ * * Module Name: evregion - Operation Region support * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ @@ -250,7 +250,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, /* * For handlers other than the default (supplied) handlers, we must * exit the interpreter because the handler *might* block -- we don't - * know what it will do, so we can't hold the lock on the intepreter. + * know what it will do, so we can't hold the lock on the interpreter. */ acpi_ex_exit_interpreter(); } diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c index 17df5dacd43c..0b47bbcd2a23 100644 --- a/drivers/acpi/acpica/evrgnini.c +++ b/drivers/acpi/acpica/evrgnini.c @@ -3,7 +3,7 @@ * * Module Name: evrgnini- ACPI address_space (op_region) init * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ @@ -516,25 +516,6 @@ acpi_status acpi_ev_initialize_region(union acpi_operand_object *region_obj) handler_obj = obj_desc->common_notify.handler; break; - case ACPI_TYPE_METHOD: - /* - * If we are executing module level code, the original - * Node's object was replaced by this Method object and we - * saved the handler in the method object. - * - * Note: Only used for the legacy MLC support. Will - * be removed in the future. - * - * See acpi_ns_exec_module_code - */ - if (!acpi_gbl_execute_tables_as_methods && - obj_desc->method. - info_flags & ACPI_METHOD_MODULE_LEVEL) { - handler_obj = - obj_desc->method.dispatch.handler; - } - break; - default: /* Ignore other objects */ diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c index febc332b00ac..3df00eb6621b 100644 --- a/drivers/acpi/acpica/evxface.c +++ b/drivers/acpi/acpica/evxface.c @@ -3,7 +3,7 @@ * * Module Name: evxface - External interfaces for ACPI events * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c index 970e940bdb17..e528fe56b755 100644 --- a/drivers/acpi/acpica/evxfevnt.c +++ b/drivers/acpi/acpica/evxfevnt.c @@ -3,7 +3,7 @@ * * Module Name: evxfevnt - External Interfaces, ACPI event disable/enable * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c index b2d5f66cc1b0..30a083902f52 100644 --- a/drivers/acpi/acpica/evxfgpe.c +++ b/drivers/acpi/acpica/evxfgpe.c @@ -3,7 +3,7 @@ * * Module Name: evxfgpe - External Interfaces for General Purpose Events (GPEs) * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ @@ -669,9 +669,9 @@ ACPI_EXPORT_SYMBOL(acpi_dispatch_gpe) * * RETURN: Status * - * DESCRIPTION: Clear and conditionally reenable a GPE. This completes the GPE + * DESCRIPTION: Clear and conditionally re-enable a GPE. This completes the GPE * processing. Intended for use by asynchronous host-installed - * GPE handlers. The GPE is only reenabled if the enable_for_run bit + * GPE handlers. The GPE is only re-enabled if the enable_for_run bit * is set in the GPE info. * ******************************************************************************/ diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c index 3b3a25d9f0e6..47265b073e6f 100644 --- a/drivers/acpi/acpica/evxfregn.c +++ b/drivers/acpi/acpica/evxfregn.c @@ -4,7 +4,7 @@ * Module Name: evxfregn - External Interfaces, ACPI Operation Regions and * Address Spaces. * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exconcat.c b/drivers/acpi/acpica/exconcat.c index 5e75c510ca25..c7af07566b7b 100644 --- a/drivers/acpi/acpica/exconcat.c +++ b/drivers/acpi/acpica/exconcat.c @@ -3,7 +3,7 @@ * * Module Name: exconcat - Concatenate-type AML operators * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c index 2373a7492151..587aeeeb5070 100644 --- a/drivers/acpi/acpica/exconfig.c +++ b/drivers/acpi/acpica/exconfig.c @@ -3,7 +3,7 @@ * * Module Name: exconfig - Namespace reconfiguration (Load/Unload opcodes) * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c index 1a70b80cc406..ca2966bacb50 100644 --- a/drivers/acpi/acpica/exconvrt.c +++ b/drivers/acpi/acpica/exconvrt.c @@ -3,7 +3,7 @@ * * Module Name: exconvrt - Object conversion routines * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ @@ -520,7 +520,7 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc, for (i = 0; i < obj_desc->buffer.length; i++) { if (base == 16) { - /* Emit 0x prefix for explict/implicit hex conversion */ + /* Emit 0x prefix for explicit/implicit hex conversion */ *new_buf++ = '0'; *new_buf++ = 'x'; diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c index 3304c6b1e8a7..f376fc00064e 100644 --- a/drivers/acpi/acpica/excreate.c +++ b/drivers/acpi/acpica/excreate.c @@ -3,7 +3,7 @@ * * Module Name: excreate - Named object creation * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c index ebbc244039ab..b1aeec8cac55 100644 --- a/drivers/acpi/acpica/exdebug.c +++ b/drivers/acpi/acpica/exdebug.c @@ -3,7 +3,7 @@ * * Module Name: exdebug - Support for stores to the AML Debug Object * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c index f71dfa1e90e1..6526b2deeaad 100644 --- a/drivers/acpi/acpica/exdump.c +++ b/drivers/acpi/acpica/exdump.c @@ -3,7 +3,7 @@ * * Module Name: exdump - Interpreter debug output routines * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c index e5798f15793a..d3d2dbfba680 100644 --- a/drivers/acpi/acpica/exfield.c +++ b/drivers/acpi/acpica/exfield.c @@ -3,7 +3,7 @@ * * Module Name: exfield - AML execution - field_unit read/write * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ @@ -41,6 +41,17 @@ const u8 acpi_protocol_lengths[] = { 0xFF /* F - ATTRIB_RAW_PROCESS_BYTES */ }; +#define PCC_MASTER_SUBSPACE 3 + +/* + * The following macros determine a given offset is a COMD field. + * According to the specification, generic subspaces (types 0-2) contains a + * 2-byte COMD field at offset 4 and master subspaces (type 3) contains a 4-byte + * COMD field starting at offset 12. + */ +#define GENERIC_SUBSPACE_COMMAND(a) (4 == a || a == 5) +#define MASTER_SUBSPACE_COMMAND(a) (12 <= a && a <= 15) + /******************************************************************************* * * FUNCTION: acpi_ex_get_protocol_buffer_length @@ -177,6 +188,25 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state, status = acpi_ex_read_gpio(obj_desc, buffer); goto exit; + } else if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) && + (obj_desc->field.region_obj->region.space_id == + ACPI_ADR_SPACE_PLATFORM_COMM)) { + /* + * Reading from a PCC field unit does not require the handler because + * it only requires reading from the internal_pcc_buffer. + */ + ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, + "PCC FieldRead bits %u\n", + obj_desc->field.bit_length)); + + memcpy(buffer, + obj_desc->field.region_obj->field.internal_pcc_buffer + + obj_desc->field.base_byte_offset, + (acpi_size)ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->field. + bit_length)); + + *ret_buffer_desc = buffer_desc; + return AE_OK; } ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, @@ -229,6 +259,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc, { acpi_status status; u32 buffer_length; + u32 data_length; void *buffer; ACPI_FUNCTION_TRACE_PTR(ex_write_data_to_field, obj_desc); @@ -272,6 +303,44 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc, acpi_ex_write_serial_bus(source_desc, obj_desc, result_desc); return_ACPI_STATUS(status); + } else if ((obj_desc->common.type == ACPI_TYPE_LOCAL_REGION_FIELD) && + (obj_desc->field.region_obj->region.space_id == + ACPI_ADR_SPACE_PLATFORM_COMM)) { + /* + * According to the spec a write to the COMD field will invoke the + * region handler. Otherwise, write to the pcc_internal buffer. This + * implementation will use the offsets specified rather than the name + * of the field. This is considered safer because some firmware tools + * are known to obfiscate named objects. + */ + data_length = + (acpi_size)ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->field. + bit_length); + memcpy(obj_desc->field.region_obj->field.internal_pcc_buffer + + obj_desc->field.base_byte_offset, + source_desc->buffer.pointer, data_length); + + if ((obj_desc->field.region_obj->region.address == + PCC_MASTER_SUBSPACE + && MASTER_SUBSPACE_COMMAND(obj_desc->field. + base_byte_offset)) + || GENERIC_SUBSPACE_COMMAND(obj_desc->field. + base_byte_offset)) { + + /* Perform the write */ + + ACPI_DEBUG_PRINT((ACPI_DB_BFIELD, + "PCC COMD field has been written. Invoking PCC handler now.\n")); + + status = + acpi_ex_access_region(obj_desc, 0, + (u64 *)obj_desc->field. + region_obj->field. + internal_pcc_buffer, + ACPI_WRITE); + return_ACPI_STATUS(status); + } + return (AE_OK); } /* Get a pointer to the data to be written */ diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c index 516994133128..95a0dcb4f7b9 100644 --- a/drivers/acpi/acpica/exfldio.c +++ b/drivers/acpi/acpica/exfldio.c @@ -3,7 +3,7 @@ * * Module Name: exfldio - Aml Field I/O * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c index d91f15cdf3ae..60e854965af9 100644 --- a/drivers/acpi/acpica/exmisc.c +++ b/drivers/acpi/acpica/exmisc.c @@ -3,7 +3,7 @@ * * Module Name: exmisc - ACPI AML (p-code) execution - specific opcodes * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c index c06079774bad..775cd62af5b3 100644 --- a/drivers/acpi/acpica/exmutex.c +++ b/drivers/acpi/acpica/exmutex.c @@ -3,7 +3,7 @@ * * Module Name: exmutex - ASL Mutex Acquire/Release functions * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c index 7eed79dcda83..bd68d66e89f0 100644 --- a/drivers/acpi/acpica/exnames.c +++ b/drivers/acpi/acpica/exnames.c @@ -3,7 +3,7 @@ * * Module Name: exnames - interpreter/scanner name load/execute * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c index ba9fbae0cf91..06e35ea09823 100644 --- a/drivers/acpi/acpica/exoparg1.c +++ b/drivers/acpi/acpica/exoparg1.c @@ -3,7 +3,7 @@ * * Module Name: exoparg1 - AML execution - opcodes with 1 argument * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c index 3a477566ba1b..5e4a31a11df4 100644 --- a/drivers/acpi/acpica/exoparg2.c +++ b/drivers/acpi/acpica/exoparg2.c @@ -3,7 +3,7 @@ * * Module Name: exoparg2 - AML execution - opcodes with 2 arguments * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ @@ -390,10 +390,10 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state) /* Failure means that the Index was beyond the end of the object */ if (ACPI_FAILURE(status)) { - ACPI_EXCEPTION((AE_INFO, status, - "Index (0x%X%8.8X) is beyond end of object (length 0x%X)", - ACPI_FORMAT_UINT64(index), - (u32)length)); + ACPI_BIOS_EXCEPTION((AE_INFO, status, + "Index (0x%X%8.8X) is beyond end of object (length 0x%X)", + ACPI_FORMAT_UINT64(index), + (u32)length)); goto cleanup; } diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c index 764fa6f924ff..a4ebce417930 100644 --- a/drivers/acpi/acpica/exoparg3.c +++ b/drivers/acpi/acpica/exoparg3.c @@ -3,7 +3,7 @@ * * Module Name: exoparg3 - AML execution - opcodes with 3 arguments * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c index 3941525f3d6b..31385a0b2dab 100644 --- a/drivers/acpi/acpica/exoparg6.c +++ b/drivers/acpi/acpica/exoparg6.c @@ -3,7 +3,7 @@ * * Module Name: exoparg6 - AML execution - opcodes with 6 arguments * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c index 738f3c732363..728d752f7adc 100644 --- a/drivers/acpi/acpica/exprep.c +++ b/drivers/acpi/acpica/exprep.c @@ -3,7 +3,7 @@ * * Module Name: exprep - ACPI AML field prep utilities * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c index 2c58f5e00b1a..c08521194b29 100644 --- a/drivers/acpi/acpica/exregion.c +++ b/drivers/acpi/acpica/exregion.c @@ -3,7 +3,7 @@ * * Module Name: exregion - ACPI default op_region (address space) handlers * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c index ea4b0fe674f1..b223d01e6bf8 100644 --- a/drivers/acpi/acpica/exresnte.c +++ b/drivers/acpi/acpica/exresnte.c @@ -3,7 +3,7 @@ * * Module Name: exresnte - AML Interpreter object resolution * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c index 5e42c7de46fa..36da5c0ef69c 100644 --- a/drivers/acpi/acpica/exresolv.c +++ b/drivers/acpi/acpica/exresolv.c @@ -3,7 +3,7 @@ * * Module Name: exresolv - AML Interpreter object resolution * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c index d94190bc5985..bdfe4d33b483 100644 --- a/drivers/acpi/acpica/exresop.c +++ b/drivers/acpi/acpica/exresop.c @@ -3,7 +3,7 @@ * * Module Name: exresop - AML Interpreter operand/object resolution * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exserial.c b/drivers/acpi/acpica/exserial.c index ec61553c4483..c5aa4b0deb70 100644 --- a/drivers/acpi/acpica/exserial.c +++ b/drivers/acpi/acpica/exserial.c @@ -3,7 +3,7 @@ * * Module Name: exserial - field_unit support for serial address spaces * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ @@ -21,7 +21,7 @@ ACPI_MODULE_NAME("exserial") * FUNCTION: acpi_ex_read_gpio * * PARAMETERS: obj_desc - The named field to read - * buffer - Where the return data is returnd + * buffer - Where the return data is returned * * RETURN: Status * diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c index 75d5665b7b2f..7f3c3571c292 100644 --- a/drivers/acpi/acpica/exstore.c +++ b/drivers/acpi/acpica/exstore.c @@ -3,7 +3,7 @@ * * Module Name: exstore - AML Interpreter object store support * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c index 31cba19652ed..4e43c8277f07 100644 --- a/drivers/acpi/acpica/exstoren.c +++ b/drivers/acpi/acpica/exstoren.c @@ -4,7 +4,7 @@ * Module Name: exstoren - AML Interpreter object store support, * Store to Node (namespace object) * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c index 4cd82ff509bc..dc9e2b1c1ad9 100644 --- a/drivers/acpi/acpica/exstorob.c +++ b/drivers/acpi/acpica/exstorob.c @@ -3,7 +3,7 @@ * * Module Name: exstorob - AML object store support, store to object * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c index ec8b5a22cad4..a538f7799b78 100644 --- a/drivers/acpi/acpica/exsystem.c +++ b/drivers/acpi/acpica/exsystem.c @@ -3,7 +3,7 @@ * * Module Name: exsystem - Interface to OS services * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/extrace.c b/drivers/acpi/acpica/extrace.c index 9bd3fa56b51a..db7f93ca539f 100644 --- a/drivers/acpi/acpica/extrace.c +++ b/drivers/acpi/acpica/extrace.c @@ -3,7 +3,7 @@ * * Module Name: extrace - Support for interpreter execution tracing * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c index bd22e27adf9b..75380be1c2ef 100644 --- a/drivers/acpi/acpica/exutils.c +++ b/drivers/acpi/acpica/exutils.c @@ -3,7 +3,7 @@ * * Module Name: exutils - interpreter/scanner utilities * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ @@ -160,7 +160,7 @@ u8 acpi_ex_truncate_for32bit_table(union acpi_operand_object *obj_desc) * RETURN: None * * DESCRIPTION: Obtain the ACPI hardware Global Lock, only if the field - * flags specifiy that it is to be obtained before field access. + * flags specify that it is to be obtained before field access. * ******************************************************************************/ diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c index 525e6ea5c114..926f7e080f22 100644 --- a/drivers/acpi/acpica/hwacpi.c +++ b/drivers/acpi/acpica/hwacpi.c @@ -3,7 +3,7 @@ * * Module Name: hwacpi - ACPI Hardware Initialization/Mode Interface * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c index e0ad3f11142e..dee3affaca49 100644 --- a/drivers/acpi/acpica/hwesleep.c +++ b/drivers/acpi/acpica/hwesleep.c @@ -4,7 +4,7 @@ * Name: hwesleep.c - ACPI Hardware Sleep/Wake Support functions for the * extended FADT-V5 sleep registers. * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c index 2d2e2e41a685..565bd3f29f31 100644 --- a/drivers/acpi/acpica/hwgpe.c +++ b/drivers/acpi/acpica/hwgpe.c @@ -3,7 +3,7 @@ * * Module Name: hwgpe - Low level GPE enable/disable/clear functions * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c index d8b8fc2ff563..b62db8ec446f 100644 --- a/drivers/acpi/acpica/hwsleep.c +++ b/drivers/acpi/acpica/hwsleep.c @@ -4,7 +4,7 @@ * Name: hwsleep.c - ACPI Hardware Sleep/Wake Support functions for the * original/legacy sleep/PM registers. * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c index 5d5e27146fc2..2fb9f75d71c5 100644 --- a/drivers/acpi/acpica/hwtimer.c +++ b/drivers/acpi/acpica/hwtimer.c @@ -3,7 +3,7 @@ * * Name: hwtimer.c - ACPI Power Management Timer Interface * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c index 24f9b61aa404..cd576153257c 100644 --- a/drivers/acpi/acpica/hwvalid.c +++ b/drivers/acpi/acpica/hwvalid.c @@ -3,7 +3,7 @@ * * Module Name: hwvalid - I/O request validation * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c index 6e39a771a56e..c4fd97104024 100644 --- a/drivers/acpi/acpica/hwxface.c +++ b/drivers/acpi/acpica/hwxface.c @@ -3,7 +3,7 @@ * * Module Name: hwxface - Public ACPICA hardware interfaces * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c index 3f22f7dd4556..abbf9702aa7f 100644 --- a/drivers/acpi/acpica/hwxfsleep.c +++ b/drivers/acpi/acpica/hwxfsleep.c @@ -3,7 +3,7 @@ * * Name: hwxfsleep.c - ACPI Hardware Sleep/Wake External Interfaces * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ @@ -23,33 +23,6 @@ acpi_hw_set_firmware_waking_vector(struct acpi_table_facs *facs, acpi_physical_address physical_address64); #endif -static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id); - -/* - * Dispatch table used to efficiently branch to the various sleep - * functions. - */ -#define ACPI_SLEEP_FUNCTION_ID 0 -#define ACPI_WAKE_PREP_FUNCTION_ID 1 -#define ACPI_WAKE_FUNCTION_ID 2 - -/* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */ - -static struct acpi_sleep_functions acpi_sleep_dispatch[] = { - {ACPI_STRUCT_INIT(legacy_function, - ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep)), - ACPI_STRUCT_INIT(extended_function, - acpi_hw_extended_sleep)}, - {ACPI_STRUCT_INIT(legacy_function, - ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep)), - ACPI_STRUCT_INIT(extended_function, - acpi_hw_extended_wake_prep)}, - {ACPI_STRUCT_INIT(legacy_function, - ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake)), - ACPI_STRUCT_INIT(extended_function, - acpi_hw_extended_wake)} -}; - /* * These functions are removed for the ACPI_REDUCED_HARDWARE case: * acpi_set_firmware_waking_vector @@ -209,53 +182,6 @@ acpi_status acpi_enter_sleep_state_s4bios(void) ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios) #endif /* !ACPI_REDUCED_HARDWARE */ -/******************************************************************************* - * - * FUNCTION: acpi_hw_sleep_dispatch - * - * PARAMETERS: sleep_state - Which sleep state to enter/exit - * function_id - Sleep, wake_prep, or Wake - * - * RETURN: Status from the invoked sleep handling function. - * - * DESCRIPTION: Dispatch a sleep/wake request to the appropriate handling - * function. - * - ******************************************************************************/ -static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id) -{ - acpi_status status; - struct acpi_sleep_functions *sleep_functions = - &acpi_sleep_dispatch[function_id]; - -#if (!ACPI_REDUCED_HARDWARE) - /* - * If the Hardware Reduced flag is set (from the FADT), we must - * use the extended sleep registers (FADT). Note: As per the ACPI - * specification, these extended registers are to be used for HW-reduced - * platforms only. They are not general-purpose replacements for the - * legacy PM register sleep support. - */ - if (acpi_gbl_reduced_hardware) { - status = sleep_functions->extended_function(sleep_state); - } else { - /* Legacy sleep */ - - status = sleep_functions->legacy_function(sleep_state); - } - - return (status); - -#else - /* - * For the case where reduced-hardware-only code is being generated, - * we know that only the extended sleep registers are available - */ - status = sleep_functions->extended_function(sleep_state); - return (status); - -#endif /* !ACPI_REDUCED_HARDWARE */ -} /******************************************************************************* * @@ -362,7 +288,12 @@ acpi_status acpi_enter_sleep_state(u8 sleep_state) return_ACPI_STATUS(AE_AML_OPERAND_VALUE); } - status = acpi_hw_sleep_dispatch(sleep_state, ACPI_SLEEP_FUNCTION_ID); +#if !ACPI_REDUCED_HARDWARE + if (!acpi_gbl_reduced_hardware) + status = acpi_hw_legacy_sleep(sleep_state); + else +#endif + status = acpi_hw_extended_sleep(sleep_state); return_ACPI_STATUS(status); } @@ -388,8 +319,12 @@ acpi_status acpi_leave_sleep_state_prep(u8 sleep_state) ACPI_FUNCTION_TRACE(acpi_leave_sleep_state_prep); - status = - acpi_hw_sleep_dispatch(sleep_state, ACPI_WAKE_PREP_FUNCTION_ID); +#if !ACPI_REDUCED_HARDWARE + if (!acpi_gbl_reduced_hardware) + status = acpi_hw_legacy_wake_prep(sleep_state); + else +#endif + status = acpi_hw_extended_wake_prep(sleep_state); return_ACPI_STATUS(status); } @@ -413,7 +348,12 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state) ACPI_FUNCTION_TRACE(acpi_leave_sleep_state); - status = acpi_hw_sleep_dispatch(sleep_state, ACPI_WAKE_FUNCTION_ID); +#if !ACPI_REDUCED_HARDWARE + if (!acpi_gbl_reduced_hardware) + status = acpi_hw_legacy_wake(sleep_state); + else +#endif + status = acpi_hw_extended_wake(sleep_state); return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/nsarguments.c b/drivers/acpi/acpica/nsarguments.c index b9ede797b654..0e97ed38973f 100644 --- a/drivers/acpi/acpica/nsarguments.c +++ b/drivers/acpi/acpica/nsarguments.c @@ -3,7 +3,7 @@ * * Module Name: nsarguments - Validation of args for ACPI predefined methods * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c index f9527346b0f7..14cbf63f1991 100644 --- a/drivers/acpi/acpica/nsconvert.c +++ b/drivers/acpi/acpica/nsconvert.c @@ -4,7 +4,7 @@ * Module Name: nsconvert - Object conversions for objects returned by * predefined methods * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c index 90ccffcd770b..15070bd0c28a 100644 --- a/drivers/acpi/acpica/nsdump.c +++ b/drivers/acpi/acpica/nsdump.c @@ -3,7 +3,7 @@ * * Module Name: nsdump - table dumping routines for debug * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c index 2b291c500fb0..73e5c83c8c9f 100644 --- a/drivers/acpi/acpica/nsdumpdv.c +++ b/drivers/acpi/acpica/nsdumpdv.c @@ -3,7 +3,7 @@ * * Module Name: nsdump - table dumping routines for debug * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c index d77257d1c827..19fb8dda870f 100644 --- a/drivers/acpi/acpica/nsinit.c +++ b/drivers/acpi/acpica/nsinit.c @@ -3,7 +3,7 @@ * * Module Name: nsinit - namespace initialization * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c index 04bc73e82aed..35fff5c75da1 100644 --- a/drivers/acpi/acpica/nsload.c +++ b/drivers/acpi/acpica/nsload.c @@ -3,7 +3,7 @@ * * Module Name: nsload - namespace loading/expanding/contracting procedures * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ @@ -75,7 +75,7 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node) /* * On error, delete any namespace objects created by this table. * We cannot initialize these objects, so delete them. There are - * a couple of expecially bad cases: + * a couple of especially bad cases: * AE_ALREADY_EXISTS - namespace collision. * AE_NOT_FOUND - the target of a Scope operator does not * exist. This target of Scope must already exist in the diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c index 488ff39d86f7..c0b4f7bedfab 100644 --- a/drivers/acpi/acpica/nsparse.c +++ b/drivers/acpi/acpica/nsparse.c @@ -3,7 +3,7 @@ * * Module Name: nsparse - namespace interface to AML parser * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ @@ -253,61 +253,19 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node) ACPI_FUNCTION_TRACE(ns_parse_table); - if (acpi_gbl_execute_tables_as_methods) { - /* - * This case executes the AML table as one large control method. - * The point of this is to execute any module-level code in-place - * as the table is parsed. Some AML code depends on this behavior. - * - * It is a run-time option at this time, but will eventually become - * the default. - * - * Note: This causes the table to only have a single-pass parse. - * However, this is compatible with other ACPI implementations. - */ - ACPI_DEBUG_PRINT_RAW((ACPI_DB_PARSE, - "%s: **** Start table execution pass\n", - ACPI_GET_FUNCTION_NAME)); - - status = acpi_ns_execute_table(table_index, start_node); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - } else { - /* - * AML Parse, pass 1 - * - * In this pass, we load most of the namespace. Control methods - * are not parsed until later. A parse tree is not created. - * Instead, each Parser Op subtree is deleted when it is finished. - * This saves a great deal of memory, and allows a small cache of - * parse objects to service the entire parse. The second pass of - * the parse then performs another complete parse of the AML. - */ - ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 1\n")); - - status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS1, - table_index, start_node); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } + /* + * Executes the AML table as one large control method. + * The point of this is to execute any module-level code in-place + * as the table is parsed. Some AML code depends on this behavior. + * + * Note: This causes the table to only have a single-pass parse. + * However, this is compatible with other ACPI implementations. + */ + ACPI_DEBUG_PRINT_RAW((ACPI_DB_PARSE, + "%s: **** Start table execution pass\n", + ACPI_GET_FUNCTION_NAME)); - /* - * AML Parse, pass 2 - * - * In this pass, we resolve forward references and other things - * that could not be completed during the first pass. - * Another complete parse of the AML is performed, but the - * overhead of this is compensated for by the fact that the - * parse objects are all cached. - */ - ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 2\n")); - status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS2, - table_index, start_node); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - } + status = acpi_ns_execute_table(table_index, start_node); return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c index 29c68b15a64f..2f9d93122d0c 100644 --- a/drivers/acpi/acpica/nspredef.c +++ b/drivers/acpi/acpica/nspredef.c @@ -3,7 +3,7 @@ * * Module Name: nspredef - Validation of ACPI predefined methods and objects * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c index 51523473e7fe..9a80e3b23496 100644 --- a/drivers/acpi/acpica/nsprepkg.c +++ b/drivers/acpi/acpica/nsprepkg.c @@ -3,7 +3,7 @@ * * Module Name: nsprepkg - Validation of package objects for predefined names * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c index ff2ab8fbec38..0aacfa48e20d 100644 --- a/drivers/acpi/acpica/nsrepair.c +++ b/drivers/acpi/acpica/nsrepair.c @@ -3,7 +3,7 @@ * * Module Name: nsrepair - Repair for objects returned by predefined methods * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c index a3bd6280882c..d5804a6d1d65 100644 --- a/drivers/acpi/acpica/nsrepair2.c +++ b/drivers/acpi/acpica/nsrepair2.c @@ -4,7 +4,7 @@ * Module Name: nsrepair2 - Repair for objects returned by specific * predefined methods * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c index a2bf4b2caa6c..e5cef1edf49f 100644 --- a/drivers/acpi/acpica/nsutils.c +++ b/drivers/acpi/acpica/nsutils.c @@ -4,7 +4,7 @@ * Module Name: nsutils - Utilities for accessing ACPI namespace, accessing * parents and siblings and Scope manipulation * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ @@ -350,7 +350,7 @@ acpi_ns_internalize_name(const char *external_name, char **converted_name) * * FUNCTION: acpi_ns_externalize_name * - * PARAMETERS: internal_name_length - Lenth of the internal name below + * PARAMETERS: internal_name_length - Length of the internal name below * internal_name - Internal representation of name * converted_name_length - Where the length is returned * converted_name - Where the resulting external name diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c index e9a061da9bb2..ceea6af79d12 100644 --- a/drivers/acpi/acpica/nswalk.c +++ b/drivers/acpi/acpica/nswalk.c @@ -3,7 +3,7 @@ * * Module Name: nswalk - Functions for walking the ACPI namespace * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c index b2915c9cceaf..de2d3135d6a9 100644 --- a/drivers/acpi/acpica/nsxfname.c +++ b/drivers/acpi/acpica/nsxfname.c @@ -4,7 +4,7 @@ * Module Name: nsxfname - Public interfaces to the ACPI subsystem * ACPI Namespace oriented interfaces * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c index 176d28d60125..9d9d442cd999 100644 --- a/drivers/acpi/acpica/psargs.c +++ b/drivers/acpi/acpica/psargs.c @@ -3,7 +3,7 @@ * * Module Name: psargs - Parse AML opcode arguments * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c index e00d1af6fa80..207805047bc4 100644 --- a/drivers/acpi/acpica/psloop.c +++ b/drivers/acpi/acpica/psloop.c @@ -3,7 +3,7 @@ * * Module Name: psloop - Main AML parse loop * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ @@ -32,10 +32,6 @@ static acpi_status acpi_ps_get_arguments(struct acpi_walk_state *walk_state, u8 * aml_op_start, union acpi_parse_object *op); -static void -acpi_ps_link_module_code(union acpi_parse_object *parent_op, - u8 *aml_start, u32 aml_length, acpi_owner_id owner_id); - /******************************************************************************* * * FUNCTION: acpi_ps_get_arguments @@ -56,7 +52,6 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state, { acpi_status status = AE_OK; union acpi_parse_object *arg = NULL; - const struct acpi_opcode_info *op_info; ACPI_FUNCTION_TRACE_PTR(ps_get_arguments, walk_state); @@ -136,96 +131,6 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state, walk_state->arg_count, walk_state->pass_number)); - /* - * This case handles the legacy option that groups all module-level - * code blocks together and defers execution until all of the tables - * are loaded. Execute all of these blocks at this time. - * Execute any module-level code that was detected during the table - * load phase. - * - * Note: this option is deprecated and will be eliminated in the - * future. Use of this option can cause problems with AML code that - * depends upon in-order immediate execution of module-level code. - */ - if (!acpi_gbl_execute_tables_as_methods && - (walk_state->pass_number <= ACPI_IMODE_LOAD_PASS2) && - ((walk_state->parse_flags & ACPI_PARSE_DISASSEMBLE) == 0)) { - /* - * We want to skip If/Else/While constructs during Pass1 because we - * want to actually conditionally execute the code during Pass2. - * - * Except for disassembly, where we always want to walk the - * If/Else/While packages - */ - switch (op->common.aml_opcode) { - case AML_IF_OP: - case AML_ELSE_OP: - case AML_WHILE_OP: - /* - * Currently supported module-level opcodes are: - * IF/ELSE/WHILE. These appear to be the most common, - * and easiest to support since they open an AML - * package. - */ - if (walk_state->pass_number == - ACPI_IMODE_LOAD_PASS1) { - acpi_ps_link_module_code(op->common. - parent, - aml_op_start, - (u32) - (walk_state-> - parser_state. - pkg_end - - aml_op_start), - walk_state-> - owner_id); - } - - ACPI_DEBUG_PRINT((ACPI_DB_PARSE, - "Pass1: Skipping an If/Else/While body\n")); - - /* Skip body of if/else/while in pass 1 */ - - walk_state->parser_state.aml = - walk_state->parser_state.pkg_end; - walk_state->arg_count = 0; - break; - - default: - /* - * Check for an unsupported executable opcode at module - * level. We must be in PASS1, the parent must be a SCOPE, - * The opcode class must be EXECUTE, and the opcode must - * not be an argument to another opcode. - */ - if ((walk_state->pass_number == - ACPI_IMODE_LOAD_PASS1) - && (op->common.parent->common.aml_opcode == - AML_SCOPE_OP)) { - op_info = - acpi_ps_get_opcode_info(op->common. - aml_opcode); - if ((op_info->class == - AML_CLASS_EXECUTE) && (!arg)) { - ACPI_WARNING((AE_INFO, - "Unsupported module-level executable opcode " - "0x%.2X at table offset 0x%.4X", - op->common. - aml_opcode, - (u32) - (ACPI_PTR_DIFF - (aml_op_start, - walk_state-> - parser_state. - aml_start) + - sizeof(struct - acpi_table_header)))); - } - } - break; - } - } - /* Special processing for certain opcodes */ switch (op->common.aml_opcode) { @@ -300,104 +205,6 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state, return_ACPI_STATUS(AE_OK); } -/******************************************************************************* - * - * FUNCTION: acpi_ps_link_module_code - * - * PARAMETERS: parent_op - Parent parser op - * aml_start - Pointer to the AML - * aml_length - Length of executable AML - * owner_id - owner_id of module level code - * - * RETURN: None. - * - * DESCRIPTION: Wrap the module-level code with a method object and link the - * object to the global list. Note, the mutex field of the method - * object is used to link multiple module-level code objects. - * - * NOTE: In this legacy option, each block of detected executable AML - * code that is outside of any control method is wrapped with a temporary - * control method object and placed on a global list below. - * - * This function executes the module-level code for all tables only after - * all of the tables have been loaded. It is a legacy option and is - * not compatible with other ACPI implementations. See acpi_ns_load_table. - * - * This function will be removed when the legacy option is removed. - * - ******************************************************************************/ - -static void -acpi_ps_link_module_code(union acpi_parse_object *parent_op, - u8 *aml_start, u32 aml_length, acpi_owner_id owner_id) -{ - union acpi_operand_object *prev; - union acpi_operand_object *next; - union acpi_operand_object *method_obj; - struct acpi_namespace_node *parent_node; - - ACPI_FUNCTION_TRACE(ps_link_module_code); - - /* Get the tail of the list */ - - prev = next = acpi_gbl_module_code_list; - while (next) { - prev = next; - next = next->method.mutex; - } - - /* - * Insert the module level code into the list. Merge it if it is - * adjacent to the previous element. - */ - if (!prev || - ((prev->method.aml_start + prev->method.aml_length) != aml_start)) { - - /* Create, initialize, and link a new temporary method object */ - - method_obj = acpi_ut_create_internal_object(ACPI_TYPE_METHOD); - if (!method_obj) { - return_VOID; - } - - ACPI_DEBUG_PRINT((ACPI_DB_PARSE, - "Create/Link new code block: %p\n", - method_obj)); - - if (parent_op->common.node) { - parent_node = parent_op->common.node; - } else { - parent_node = acpi_gbl_root_node; - } - - method_obj->method.aml_start = aml_start; - method_obj->method.aml_length = aml_length; - method_obj->method.owner_id = owner_id; - method_obj->method.info_flags |= ACPI_METHOD_MODULE_LEVEL; - - /* - * Save the parent node in next_object. This is cheating, but we - * don't want to expand the method object. - */ - method_obj->method.next_object = - ACPI_CAST_PTR(union acpi_operand_object, parent_node); - - if (!prev) { - acpi_gbl_module_code_list = method_obj; - } else { - prev->method.mutex = method_obj; - } - } else { - ACPI_DEBUG_PRINT((ACPI_DB_PARSE, - "Appending to existing code block: %p\n", - prev)); - - prev->method.aml_length += aml_length; - } - - return_VOID; -} - /******************************************************************************* * * FUNCTION: acpi_ps_parse_loop diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c index e1fd819a2955..98e5c7400e54 100644 --- a/drivers/acpi/acpica/psobject.c +++ b/drivers/acpi/acpica/psobject.c @@ -3,7 +3,7 @@ * * Module Name: psobject - Support for parse objects * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c index 8d7dc98bad17..43775c5ce17c 100644 --- a/drivers/acpi/acpica/psopcode.c +++ b/drivers/acpi/acpica/psopcode.c @@ -3,7 +3,7 @@ * * Module Name: psopcode - Parser/Interpreter opcode information table * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c index f310954eea59..15e7563829f1 100644 --- a/drivers/acpi/acpica/psopinfo.c +++ b/drivers/acpi/acpica/psopinfo.c @@ -3,7 +3,7 @@ * * Module Name: psopinfo - AML opcode information functions and dispatch tables * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c index 65603473b6cb..9b386530ffbe 100644 --- a/drivers/acpi/acpica/psparse.c +++ b/drivers/acpi/acpica/psparse.c @@ -3,7 +3,7 @@ * * Module Name: psparse - Parser top level AML parse routines * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ @@ -523,12 +523,12 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state) if (status == AE_ABORT_METHOD) { acpi_ns_print_node_pathname(walk_state-> method_node, - "Method aborted:"); + "Aborting method"); acpi_os_printf("\n"); } else { - ACPI_ERROR_METHOD - ("Method parse/execution failed", - walk_state->method_node, NULL, status); + ACPI_ERROR_METHOD("Aborting method", + walk_state->method_node, NULL, + status); } acpi_ex_enter_interpreter(); diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c index 00c67bc249aa..f153ca804740 100644 --- a/drivers/acpi/acpica/psscope.c +++ b/drivers/acpi/acpica/psscope.c @@ -3,7 +3,7 @@ * * Module Name: psscope - Parser scope stack management routines * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c index 64a8329a17f1..22d8a2becdd0 100644 --- a/drivers/acpi/acpica/pstree.c +++ b/drivers/acpi/acpica/pstree.c @@ -3,7 +3,7 @@ * * Module Name: pstree - Parser op tree manipulation/traversal/search * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c index ef8a5805a836..2512f584fa3c 100644 --- a/drivers/acpi/acpica/psutils.c +++ b/drivers/acpi/acpica/psutils.c @@ -3,7 +3,7 @@ * * Module Name: psutils - Parser miscellaneous utilities (Parser only) * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c index bd6af8c87d48..cf91841297c2 100644 --- a/drivers/acpi/acpica/pswalk.c +++ b/drivers/acpi/acpica/pswalk.c @@ -3,7 +3,7 @@ * * Module Name: pswalk - Parser routines to walk parsed op tree(s) * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c index 5743b22399a0..ee2ee2c858f2 100644 --- a/drivers/acpi/acpica/psxface.c +++ b/drivers/acpi/acpica/psxface.c @@ -3,7 +3,7 @@ * * Module Name: psxface - Parser external interfaces * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/rsdumpinfo.c b/drivers/acpi/acpica/rsdumpinfo.c index 77a3263169fa..cafa8134b4c6 100644 --- a/drivers/acpi/acpica/rsdumpinfo.c +++ b/drivers/acpi/acpica/rsdumpinfo.c @@ -32,7 +32,7 @@ struct acpi_rsdump_info acpi_rs_dump_irq[7] = { acpi_gbl_he_decode}, {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.polarity), "Polarity", acpi_gbl_ll_decode}, - {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(irq.sharable), "Sharing", + {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(irq.shareable), "Sharing", acpi_gbl_shr_decode}, {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(irq.interrupt_count), "Interrupt Count", NULL}, @@ -222,7 +222,7 @@ struct acpi_rsdump_info acpi_rs_dump_ext_irq[8] = { "Triggering", acpi_gbl_he_decode}, {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(extended_irq.polarity), "Polarity", acpi_gbl_ll_decode}, - {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(extended_irq.sharable), "Sharing", + {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(extended_irq.shareable), "Sharing", acpi_gbl_shr_decode}, {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(extended_irq.resource_source), NULL, NULL}, @@ -255,7 +255,7 @@ struct acpi_rsdump_info acpi_rs_dump_gpio[16] = { "ProducerConsumer", acpi_gbl_consume_decode}, {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.pin_config), "PinConfig", acpi_gbl_ppc_decode}, - {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.sharable), "Sharing", + {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.shareable), "Sharing", acpi_gbl_shr_decode}, {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.io_restriction), "IoRestriction", acpi_gbl_ior_decode}, @@ -285,7 +285,7 @@ struct acpi_rsdump_info acpi_rs_dump_pin_function[10] = { "RevisionId", NULL}, {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(pin_function.pin_config), "PinConfig", acpi_gbl_ppc_decode}, - {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_function.sharable), "Sharing", + {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_function.shareable), "Sharing", acpi_gbl_shr_decode}, {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(pin_function.function_number), "FunctionNumber", NULL}, @@ -308,7 +308,7 @@ struct acpi_rsdump_info acpi_rs_dump_pin_config[11] = { NULL}, {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_config.producer_consumer), "ProducerConsumer", acpi_gbl_consume_decode}, - {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_config.sharable), "Sharing", + {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_config.shareable), "Sharing", acpi_gbl_shr_decode}, {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(pin_config.pin_config_type), "PinConfigType", NULL}, @@ -353,7 +353,7 @@ struct acpi_rsdump_info acpi_rs_dump_pin_group_function[9] = { {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_group_function.producer_consumer), "ProducerConsumer", acpi_gbl_consume_decode}, - {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_group_function.sharable), + {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_group_function.shareable), "Sharing", acpi_gbl_shr_decode}, {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(pin_group_function.function_number), "FunctionNumber", NULL}, @@ -375,7 +375,7 @@ struct acpi_rsdump_info acpi_rs_dump_pin_group_config[10] = { "RevisionId", NULL}, {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_group_config.producer_consumer), "ProducerConsumer", acpi_gbl_consume_decode}, - {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_group_config.sharable), + {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(pin_group_config.shareable), "Sharing", acpi_gbl_shr_decode}, {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(pin_group_config.pin_config_type), "PinConfigType", NULL}, diff --git a/drivers/acpi/acpica/rsirq.c b/drivers/acpi/acpica/rsirq.c index 134b67cd48ee..b0d970efa072 100644 --- a/drivers/acpi/acpica/rsirq.c +++ b/drivers/acpi/acpica/rsirq.c @@ -54,7 +54,7 @@ struct acpi_rsconvert_info acpi_rs_get_irq[9] = { AML_OFFSET(irq.flags), 3}, - {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.sharable), + {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.shareable), AML_OFFSET(irq.flags), 4}, @@ -92,7 +92,7 @@ struct acpi_rsconvert_info acpi_rs_set_irq[14] = { AML_OFFSET(irq.flags), 3}, - {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.sharable), + {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.shareable), AML_OFFSET(irq.flags), 4}, @@ -139,7 +139,7 @@ struct acpi_rsconvert_info acpi_rs_set_irq[14] = { ACPI_ACTIVE_HIGH}, {ACPI_RSC_EXIT_NE, ACPI_RSC_COMPARE_VALUE, - ACPI_RS_OFFSET(data.irq.sharable), + ACPI_RS_OFFSET(data.irq.shareable), ACPI_EXCLUSIVE}, /* We can optimize to a 2-byte irq_no_flags() descriptor */ @@ -178,7 +178,7 @@ struct acpi_rsconvert_info acpi_rs_convert_ext_irq[10] = { AML_OFFSET(extended_irq.flags), 2}, - {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.extended_irq.sharable), + {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.extended_irq.shareable), AML_OFFSET(extended_irq.flags), 3}, diff --git a/drivers/acpi/acpica/rsserial.c b/drivers/acpi/acpica/rsserial.c index d073ebb51f90..1b937d88980f 100644 --- a/drivers/acpi/acpica/rsserial.c +++ b/drivers/acpi/acpica/rsserial.c @@ -39,7 +39,7 @@ struct acpi_rsconvert_info acpi_rs_convert_gpio[18] = { AML_OFFSET(gpio.flags), 0}, - {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.gpio.sharable), + {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.gpio.shareable), AML_OFFSET(gpio.int_flags), 3}, @@ -128,7 +128,7 @@ struct acpi_rsconvert_info acpi_rs_convert_pin_function[13] = { AML_OFFSET(pin_function.revision_id), 1}, - {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.pin_function.sharable), + {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.pin_function.shareable), AML_OFFSET(pin_function.flags), 0}, @@ -518,7 +518,7 @@ struct acpi_rsconvert_info acpi_rs_convert_pin_config[14] = { AML_OFFSET(pin_config.revision_id), 1}, - {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.pin_config.sharable), + {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.pin_config.shareable), AML_OFFSET(pin_config.flags), 0}, @@ -658,7 +658,7 @@ struct acpi_rsconvert_info acpi_rs_convert_pin_group_function[13] = { AML_OFFSET(pin_group_function.revision_id), 1}, - {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.pin_group_function.sharable), + {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.pin_group_function.shareable), AML_OFFSET(pin_group_function.flags), 0}, @@ -735,7 +735,7 @@ struct acpi_rsconvert_info acpi_rs_convert_pin_group_config[14] = { AML_OFFSET(pin_group_config.revision_id), 1}, - {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.pin_group_config.sharable), + {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.pin_group_config.shareable), AML_OFFSET(pin_group_config.flags), 0}, diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c index 862149c8a208..0cecd0039acf 100644 --- a/drivers/acpi/acpica/tbdata.c +++ b/drivers/acpi/acpica/tbdata.c @@ -3,7 +3,7 @@ * * Module Name: tbdata - Table manager data structure functions * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c index 99d325a51816..0041bfba9abc 100644 --- a/drivers/acpi/acpica/tbfadt.c +++ b/drivers/acpi/acpica/tbfadt.c @@ -3,7 +3,7 @@ * * Module Name: tbfadt - FADT table utilities * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ @@ -556,7 +556,7 @@ static void acpi_tb_convert_fadt(void) * 64-bit X length field. * Note: If the legacy length field is > 0xFF bits, ignore * this check. (GPE registers can be larger than the - * 64-bit GAS structure can accomodate, 0xFF bits). + * 64-bit GAS structure can accommodate, 0xFF bits). */ if ((ACPI_MUL_8(length) <= ACPI_UINT8_MAX) && (address64->bit_width != diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c index f00694b1d000..951bd8e1c50a 100644 --- a/drivers/acpi/acpica/tbfind.c +++ b/drivers/acpi/acpica/tbfind.c @@ -3,7 +3,7 @@ * * Module Name: tbfind - find table * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c index 5f8e7b561c90..be6642bf6366 100644 --- a/drivers/acpi/acpica/tbinstal.c +++ b/drivers/acpi/acpica/tbinstal.c @@ -3,7 +3,7 @@ * * Module Name: tbinstal - ACPI table installation and removal * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c index e303418a895b..9b5df95d881b 100644 --- a/drivers/acpi/acpica/tbprint.c +++ b/drivers/acpi/acpica/tbprint.c @@ -3,7 +3,7 @@ * * Module Name: tbprint - Table output utilities * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c index b526096560b5..2469e01310e2 100644 --- a/drivers/acpi/acpica/tbutils.c +++ b/drivers/acpi/acpica/tbutils.c @@ -3,7 +3,7 @@ * * Module Name: tbutils - ACPI Table utilities * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c index e4d0dc8948cd..36592888f0e7 100644 --- a/drivers/acpi/acpica/tbxface.c +++ b/drivers/acpi/acpica/tbxface.c @@ -3,7 +3,7 @@ * * Module Name: tbxface - ACPI table-oriented external interfaces * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ @@ -108,7 +108,7 @@ acpi_initialize_tables(struct acpi_table_desc *initial_table_array, /* * Get the root table (RSDT or XSDT) and extract all entries to the local * Root Table Array. This array contains the information of the RSDT/XSDT - * in a common, more useable format. + * in a common, more usable format. */ status = acpi_tb_parse_root_table(rsdp_address); return_ACPI_STATUS(status); @@ -169,7 +169,7 @@ acpi_status ACPI_INIT_FUNCTION acpi_reallocate_root_table(void) if (!acpi_gbl_enable_table_validation) { /* * Now it's safe to do full table validation. We can do deferred - * table initilization here once the flag is set. + * table initialization here once the flag is set. */ acpi_gbl_enable_table_validation = TRUE; for (i = 0; i < acpi_gbl_root_table_list.current_table_count; diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c index 9011297552af..1a2592cc3245 100644 --- a/drivers/acpi/acpica/tbxfload.c +++ b/drivers/acpi/acpica/tbxfload.c @@ -3,7 +3,7 @@ * * Module Name: tbxfload - Table load/unload external interfaces * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ @@ -69,23 +69,18 @@ acpi_status ACPI_INIT_FUNCTION acpi_load_tables(void) "While loading namespace from ACPI tables")); } - if (acpi_gbl_execute_tables_as_methods) { - /* - * If the module-level code support is enabled, initialize the objects - * in the namespace that remain uninitialized. This runs the executable - * AML that may be part of the declaration of these name objects: - * operation_regions, buffer_fields, Buffers, and Packages. - * - * Note: The module-level code is optional at this time, but will - * become the default in the future. - */ - status = acpi_ns_initialize_objects(); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } + /* + * Initialize the objects in the namespace that remain uninitialized. + * This runs the executable AML that may be part of the declaration of + * these name objects: + * operation_regions, buffer_fields, Buffers, and Packages. + * + */ + status = acpi_ns_initialize_objects(); + if (ACPI_SUCCESS(status)) { + acpi_gbl_namespace_initialized = TRUE; } - acpi_gbl_namespace_initialized = TRUE; return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c index 483d0ce5180a..e2859d09ca2e 100644 --- a/drivers/acpi/acpica/tbxfroot.c +++ b/drivers/acpi/acpica/tbxfroot.c @@ -3,7 +3,7 @@ * * Module Name: tbxfroot - Find the root ACPI table (RSDT) * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c index dbabe680ff58..bb260376bd59 100644 --- a/drivers/acpi/acpica/utaddress.c +++ b/drivers/acpi/acpica/utaddress.c @@ -3,7 +3,7 @@ * * Module Name: utaddress - op_region address range check * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c index 8cbcd7d6bd5e..d64da4d9e8d0 100644 --- a/drivers/acpi/acpica/utalloc.c +++ b/drivers/acpi/acpica/utalloc.c @@ -3,7 +3,7 @@ * * Module Name: utalloc - local memory allocation routines * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utascii.c b/drivers/acpi/acpica/utascii.c index 04ff61e284f5..79d7426fd7bf 100644 --- a/drivers/acpi/acpica/utascii.c +++ b/drivers/acpi/acpica/utascii.c @@ -3,7 +3,7 @@ * * Module Name: utascii - Utility ascii functions * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utbuffer.c b/drivers/acpi/acpica/utbuffer.c index fffa6f5ae59e..61db9967ebe4 100644 --- a/drivers/acpi/acpica/utbuffer.c +++ b/drivers/acpi/acpica/utbuffer.c @@ -3,7 +3,7 @@ * * Module Name: utbuffer - Buffer dump routines * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c index 97d6ec174c28..8533fce7fa93 100644 --- a/drivers/acpi/acpica/utcache.c +++ b/drivers/acpi/acpica/utcache.c @@ -3,7 +3,7 @@ * * Module Name: utcache - local cache allocation routines * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c index a872ed7879ca..1fb8327f3c3b 100644 --- a/drivers/acpi/acpica/utcopy.c +++ b/drivers/acpi/acpica/utcopy.c @@ -3,7 +3,7 @@ * * Module Name: utcopy - Internal to external object translation utilities * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c index aabdc25effd9..01b1b36c8a8e 100644 --- a/drivers/acpi/acpica/utdebug.c +++ b/drivers/acpi/acpica/utdebug.c @@ -3,7 +3,7 @@ * * Module Name: utdebug - Debug print/trace routines * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c index dad02b821e19..ad9f77eb554f 100644 --- a/drivers/acpi/acpica/utdecode.c +++ b/drivers/acpi/acpica/utdecode.c @@ -3,7 +3,7 @@ * * Module Name: utdecode - Utility decoding routines (value-to-string) * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ @@ -284,7 +284,7 @@ const char *acpi_ut_get_node_name(void *object) static const char *acpi_gbl_desc_type_names[] = { /* 00 */ "Not a Descriptor", - /* 01 */ "Cached", + /* 01 */ "Cached Object", /* 02 */ "State-Generic", /* 03 */ "State-Update", /* 04 */ "State-Package", @@ -295,10 +295,10 @@ static const char *acpi_gbl_desc_type_names[] = { /* 09 */ "State-Result", /* 10 */ "State-Notify", /* 11 */ "State-Thread", - /* 12 */ "Walk", - /* 13 */ "Parser", - /* 14 */ "Operand", - /* 15 */ "Node" + /* 12 */ "Tree Walk State", + /* 13 */ "Parse Tree Op", + /* 14 */ "Operand Object", + /* 15 */ "Namespace Node" }; const char *acpi_ut_get_descriptor_name(void *object) @@ -430,8 +430,10 @@ static const char *acpi_gbl_generic_notify[ACPI_GENERIC_NOTIFY_MAX + 1] = { /* 0C */ "Reserved (was previously Shutdown Request)", /* Reserved in ACPI 6.0 */ /* 0D */ "System Resource Affinity Update", - /* 0E */ "Heterogeneous Memory Attributes Update" + /* 0E */ "Heterogeneous Memory Attributes Update", /* ACPI 6.2 */ + /* 0F */ "Error Disconnect Recover" + /* ACPI 6.3 */ }; static const char *acpi_gbl_device_notify[5] = { @@ -461,13 +463,13 @@ static const char *acpi_gbl_thermal_notify[5] = { const char *acpi_ut_get_notify_name(u32 notify_value, acpi_object_type type) { - /* 00 - 0D are "common to all object types" (from ACPI Spec) */ + /* 00 - 0F are "common to all object types" (from ACPI Spec) */ if (notify_value <= ACPI_GENERIC_NOTIFY_MAX) { return (acpi_gbl_generic_notify[notify_value]); } - /* 0E - 7F are reserved */ + /* 10 - 7F are reserved */ if (notify_value <= ACPI_MAX_SYS_NOTIFY) { return ("Reserved"); diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c index 8cc4392c61f3..eee263cb7beb 100644 --- a/drivers/acpi/acpica/utdelete.c +++ b/drivers/acpi/acpica/utdelete.c @@ -257,6 +257,10 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object) acpi_ut_delete_object_desc(second_desc); } + if (object->field.internal_pcc_buffer) { + ACPI_FREE(object->field.internal_pcc_buffer); + } + break; case ACPI_TYPE_BUFFER_FIELD: diff --git a/drivers/acpi/acpica/uterror.c b/drivers/acpi/acpica/uterror.c index e47430272692..075457341bad 100644 --- a/drivers/acpi/acpica/uterror.c +++ b/drivers/acpi/acpica/uterror.c @@ -183,19 +183,19 @@ acpi_ut_prefixed_namespace_error(const char *module_name, case AE_ALREADY_EXISTS: acpi_os_printf(ACPI_MSG_BIOS_ERROR); - message = "Failure creating"; + message = "Failure creating named object"; break; case AE_NOT_FOUND: acpi_os_printf(ACPI_MSG_BIOS_ERROR); - message = "Could not resolve"; + message = "Could not resolve symbol"; break; default: acpi_os_printf(ACPI_MSG_ERROR); - message = "Failure resolving"; + message = "Failure resolving symbol"; break; } @@ -317,7 +317,8 @@ acpi_ut_method_error(const char *module_name, } acpi_ns_print_node_pathname(node, message); - acpi_os_printf(", %s", acpi_format_exception(method_status)); + acpi_os_printf(" due to previous error (%s)", + acpi_format_exception(method_status)); ACPI_MSG_SUFFIX; ACPI_MSG_REDIRECT_END; diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c index c56ae6e058d5..558a9f3b0678 100644 --- a/drivers/acpi/acpica/uteval.c +++ b/drivers/acpi/acpica/uteval.c @@ -3,7 +3,7 @@ * * Module Name: uteval - Object evaluation * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c index f8c5b49344df..b0622ec4bb85 100644 --- a/drivers/acpi/acpica/utglobal.c +++ b/drivers/acpi/acpica/utglobal.c @@ -3,7 +3,7 @@ * * Module Name: utglobal - Global variables for the ACPI subsystem * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/uthex.c b/drivers/acpi/acpica/uthex.c index 3d63a9e8da4f..b6da135d5f41 100644 --- a/drivers/acpi/acpica/uthex.c +++ b/drivers/acpi/acpica/uthex.c @@ -3,7 +3,7 @@ * * Module Name: uthex -- Hex/ASCII support functions * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c index 70e6bf1107a1..e805abdd95b8 100644 --- a/drivers/acpi/acpica/utids.c +++ b/drivers/acpi/acpica/utids.c @@ -3,7 +3,7 @@ * * Module Name: utids - support for device Ids - HID, UID, CID, SUB, CLS * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c index 0646ed62b351..bc124591320e 100644 --- a/drivers/acpi/acpica/utinit.c +++ b/drivers/acpi/acpica/utinit.c @@ -3,7 +3,7 @@ * * Module Name: utinit - Common ACPI subsystem initialization * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c index d61e01bd01a3..8b4ff11d617a 100644 --- a/drivers/acpi/acpica/utlock.c +++ b/drivers/acpi/acpica/utlock.c @@ -3,7 +3,7 @@ * * Module Name: utlock - Reader/Writer lock interfaces * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c index ae6d8cc18cec..eee97a902696 100644 --- a/drivers/acpi/acpica/utobject.c +++ b/drivers/acpi/acpica/utobject.c @@ -3,7 +3,7 @@ * * Module Name: utobject - ACPI object create/delete/size/cache routines * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c index 902a47463abf..688c61a90725 100644 --- a/drivers/acpi/acpica/utosi.c +++ b/drivers/acpi/acpica/utosi.c @@ -3,7 +3,7 @@ * * Module Name: utosi - Support for the _OSI predefined control method * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utpredef.c b/drivers/acpi/acpica/utpredef.c index 65ca9807c2a8..a9f08f43c685 100644 --- a/drivers/acpi/acpica/utpredef.c +++ b/drivers/acpi/acpica/utpredef.c @@ -3,7 +3,7 @@ * * Module Name: utpredef - support functions for predefined names * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c index a98c334c3bb7..5839f2fa7400 100644 --- a/drivers/acpi/acpica/utprint.c +++ b/drivers/acpi/acpica/utprint.c @@ -3,7 +3,7 @@ * * Module Name: utprint - Formatted printing routines * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c index 016a6621cc6f..8052f7ef5025 100644 --- a/drivers/acpi/acpica/uttrack.c +++ b/drivers/acpi/acpica/uttrack.c @@ -3,7 +3,7 @@ * * Module Name: uttrack - Memory allocation tracking routines (debug only) * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ @@ -588,6 +588,18 @@ void acpi_ut_dump_allocations(u32 component, const char *module) acpi_ut_get_descriptor_name (descriptor)); + /* Optional object hex dump */ + + if (acpi_gbl_verbose_leak_dump) { + acpi_os_printf("\n"); + acpi_ut_dump_buffer((u8 *) + descriptor, + element-> + size, + DB_BYTE_DISPLAY, + 0); + } + /* Validate the descriptor type using Type field and length */ descriptor_type = 0; /* Not a valid descriptor type */ diff --git a/drivers/acpi/acpica/utuuid.c b/drivers/acpi/acpica/utuuid.c index 59ae118092a3..0a7cf8007643 100644 --- a/drivers/acpi/acpica/utuuid.c +++ b/drivers/acpi/acpica/utuuid.c @@ -3,7 +3,7 @@ * * Module Name: utuuid -- UUID support functions * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c index d2d6cc065181..f497c4b30e65 100644 --- a/drivers/acpi/acpica/utxface.c +++ b/drivers/acpi/acpica/utxface.c @@ -3,7 +3,7 @@ * * Module Name: utxface - External interfaces, miscellaneous utility functions * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c index 6bb85d691fcb..a1ed7fced4db 100644 --- a/drivers/acpi/acpica/utxferror.c +++ b/drivers/acpi/acpica/utxferror.c @@ -185,6 +185,50 @@ acpi_bios_error(const char *module_name, ACPI_EXPORT_SYMBOL(acpi_bios_error) +/******************************************************************************* + * + * FUNCTION: acpi_bios_exception + * + * PARAMETERS: module_name - Caller's module name (for error output) + * line_number - Caller's line number (for error output) + * status - Status value to be decoded/formatted + * format - Printf format string + additional args + * + * RETURN: None + * + * DESCRIPTION: Print an "ACPI Firmware Error" message with module/line/version + * info as well as decoded acpi_status. + * + ******************************************************************************/ +void ACPI_INTERNAL_VAR_XFACE +acpi_bios_exception(const char *module_name, + u32 line_number, + acpi_status status, const char *format, ...) +{ + va_list arg_list; + + ACPI_MSG_REDIRECT_BEGIN; + + /* For AE_OK, just print the message */ + + if (ACPI_SUCCESS(status)) { + acpi_os_printf(ACPI_MSG_BIOS_ERROR); + + } else { + acpi_os_printf(ACPI_MSG_BIOS_ERROR "%s, ", + acpi_format_exception(status)); + } + + va_start(arg_list, format); + acpi_os_vprintf(format, arg_list); + ACPI_MSG_SUFFIX; + va_end(arg_list); + + ACPI_MSG_REDIRECT_END; +} + +ACPI_EXPORT_SYMBOL(acpi_bios_exception) + /******************************************************************************* * * FUNCTION: acpi_bios_warning diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c index e3c60f57c9f0..9f3b1e3a09de 100644 --- a/drivers/acpi/acpica/utxfinit.c +++ b/drivers/acpi/acpica/utxfinit.c @@ -3,7 +3,7 @@ * * Module Name: utxfinit - External interfaces for ACPICA initialization * - * Copyright (C) 2000 - 2018, Intel Corp. + * Copyright (C) 2000 - 2019, Intel Corp. * *****************************************************************************/ diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig index 52ae5438edeb..6b18f8bc7be3 100644 --- a/drivers/acpi/apei/Kconfig +++ b/drivers/acpi/apei/Kconfig @@ -41,19 +41,9 @@ config ACPI_APEI_PCIEAER Turn on this option to enable the corresponding support. config ACPI_APEI_SEA - bool "APEI Synchronous External Abort logging/recovering support" + bool depends on ARM64 && ACPI_APEI_GHES default y - help - This option should be enabled if the system supports - firmware first handling of SEA (Synchronous External Abort). - SEA happens with certain faults of data abort or instruction - abort synchronous exceptions on ARMv8 systems. If a system - supports firmware first handling of SEA, the platform analyzes - and handles hardware error notifications from SEA, and it may then - form a HW error record for the OS to parse and handle. This - option allows the OS to look for such hardware error record, and - take appropriate action. config ACPI_APEI_MEMORY_FAILURE bool "APEI memory error recovering support" diff --git a/drivers/acpi/apei/bert.c b/drivers/acpi/apei/bert.c index 12771fcf0417..0d948d0a41af 100644 --- a/drivers/acpi/apei/bert.c +++ b/drivers/acpi/apei/bert.c @@ -42,15 +42,7 @@ static void __init bert_print_all(struct acpi_bert_region *region, int remain = region_len; u32 estatus_len; - if (!estatus->block_status) - return; - - while (remain > sizeof(struct acpi_bert_region)) { - if (cper_estatus_check(estatus)) { - pr_err(FW_BUG "Invalid error record.\n"); - return; - } - + while (remain >= sizeof(struct acpi_bert_region)) { estatus_len = cper_estatus_len(estatus); if (remain < estatus_len) { pr_err(FW_BUG "Truncated status block (length: %u).\n", @@ -58,6 +50,15 @@ static void __init bert_print_all(struct acpi_bert_region *region, return; } + /* No more error records. */ + if (!estatus->block_status) + return; + + if (cper_estatus_check(estatus)) { + pr_err(FW_BUG "Invalid error record.\n"); + return; + } + pr_info_once("Error records from previous boot:\n"); cper_estatus_print(KERN_INFO HW_ERR, estatus); @@ -70,10 +71,6 @@ static void __init bert_print_all(struct acpi_bert_region *region, estatus->block_status = 0; estatus = (void *)estatus + estatus_len; - /* No more error records. */ - if (!estatus->block_status) - return; - remain -= estatus_len; } } diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c index fcccbfdbdd1a..2d4be94f8c00 100644 --- a/drivers/acpi/apei/einj.c +++ b/drivers/acpi/apei/einj.c @@ -644,8 +644,8 @@ static int error_type_set(void *data, u64 val) return 0; } -DEFINE_SIMPLE_ATTRIBUTE(error_type_fops, error_type_get, - error_type_set, "0x%llx\n"); +DEFINE_DEBUGFS_ATTRIBUTE(error_type_fops, error_type_get, error_type_set, + "0x%llx\n"); static int error_inject_set(void *data, u64 val) { @@ -656,8 +656,7 @@ static int error_inject_set(void *data, u64 val) error_param3, error_param4); } -DEFINE_SIMPLE_ATTRIBUTE(error_inject_fops, NULL, - error_inject_set, "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(error_inject_fops, NULL, error_inject_set, "%llu\n"); static int einj_check_table(struct acpi_table_einj *einj_tab) { @@ -679,7 +678,6 @@ static int __init einj_init(void) { int rc; acpi_status status; - struct dentry *fentry; struct apei_exec_context ctx; if (acpi_disabled) { @@ -707,25 +705,13 @@ static int __init einj_init(void) rc = -ENOMEM; einj_debug_dir = debugfs_create_dir("einj", apei_get_debugfs_dir()); - if (!einj_debug_dir) { - pr_err("Error creating debugfs node.\n"); - goto err_cleanup; - } - fentry = debugfs_create_file("available_error_type", S_IRUSR, - einj_debug_dir, NULL, - &available_error_type_fops); - if (!fentry) - goto err_cleanup; - - fentry = debugfs_create_file("error_type", S_IRUSR | S_IWUSR, - einj_debug_dir, NULL, &error_type_fops); - if (!fentry) - goto err_cleanup; - fentry = debugfs_create_file("error_inject", S_IWUSR, - einj_debug_dir, NULL, &error_inject_fops); - if (!fentry) - goto err_cleanup; + debugfs_create_file("available_error_type", S_IRUSR, einj_debug_dir, + NULL, &available_error_type_fops); + debugfs_create_file_unsafe("error_type", 0600, einj_debug_dir, + NULL, &error_type_fops); + debugfs_create_file_unsafe("error_inject", 0200, einj_debug_dir, + NULL, &error_inject_fops); apei_resources_init(&einj_resources); einj_exec_ctx_init(&ctx); @@ -750,66 +736,37 @@ static int __init einj_init(void) rc = -ENOMEM; einj_param = einj_get_parameter_address(); if ((param_extension || acpi5) && einj_param) { - fentry = debugfs_create_x32("flags", S_IRUSR | S_IWUSR, - einj_debug_dir, &error_flags); - if (!fentry) - goto err_unmap; - fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR, - einj_debug_dir, &error_param1); - if (!fentry) - goto err_unmap; - fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR, - einj_debug_dir, &error_param2); - if (!fentry) - goto err_unmap; - fentry = debugfs_create_x64("param3", S_IRUSR | S_IWUSR, - einj_debug_dir, &error_param3); - if (!fentry) - goto err_unmap; - fentry = debugfs_create_x64("param4", S_IRUSR | S_IWUSR, - einj_debug_dir, &error_param4); - if (!fentry) - goto err_unmap; - - fentry = debugfs_create_x32("notrigger", S_IRUSR | S_IWUSR, - einj_debug_dir, ¬rigger); - if (!fentry) - goto err_unmap; + debugfs_create_x32("flags", S_IRUSR | S_IWUSR, einj_debug_dir, + &error_flags); + debugfs_create_x64("param1", S_IRUSR | S_IWUSR, einj_debug_dir, + &error_param1); + debugfs_create_x64("param2", S_IRUSR | S_IWUSR, einj_debug_dir, + &error_param2); + debugfs_create_x64("param3", S_IRUSR | S_IWUSR, einj_debug_dir, + &error_param3); + debugfs_create_x64("param4", S_IRUSR | S_IWUSR, einj_debug_dir, + &error_param4); + debugfs_create_x32("notrigger", S_IRUSR | S_IWUSR, + einj_debug_dir, ¬rigger); } if (vendor_dev[0]) { vendor_blob.data = vendor_dev; vendor_blob.size = strlen(vendor_dev); - fentry = debugfs_create_blob("vendor", S_IRUSR, - einj_debug_dir, &vendor_blob); - if (!fentry) - goto err_unmap; - fentry = debugfs_create_x32("vendor_flags", S_IRUSR | S_IWUSR, - einj_debug_dir, &vendor_flags); - if (!fentry) - goto err_unmap; + debugfs_create_blob("vendor", S_IRUSR, einj_debug_dir, + &vendor_blob); + debugfs_create_x32("vendor_flags", S_IRUSR | S_IWUSR, + einj_debug_dir, &vendor_flags); } pr_info("Error INJection is initialized.\n"); return 0; -err_unmap: - if (einj_param) { - acpi_size size = (acpi5) ? - sizeof(struct set_error_type_with_address) : - sizeof(struct einj_parameter); - - acpi_os_unmap_iomem(einj_param, size); - pr_err("Error creating param extension debugfs nodes.\n"); - } - apei_exec_post_unmap_gars(&ctx); err_release: apei_resources_release(&einj_resources); err_fini: apei_resources_fini(&einj_resources); -err_cleanup: - pr_err("Error creating primary debugfs nodes.\n"); debugfs_remove_recursive(einj_debug_dir); return rc; diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index 9953e50667ec..389d88e35ffb 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c @@ -938,17 +938,17 @@ static struct pstore_info erst_info = { }; #define CPER_CREATOR_PSTORE \ - UUID_LE(0x75a574e3, 0x5052, 0x4b29, 0x8a, 0x8e, 0xbe, 0x2c, \ - 0x64, 0x90, 0xb8, 0x9d) + GUID_INIT(0x75a574e3, 0x5052, 0x4b29, 0x8a, 0x8e, 0xbe, 0x2c, \ + 0x64, 0x90, 0xb8, 0x9d) #define CPER_SECTION_TYPE_DMESG \ - UUID_LE(0xc197e04e, 0xd545, 0x4a70, 0x9c, 0x17, 0xa5, 0x54, \ - 0x94, 0x19, 0xeb, 0x12) + GUID_INIT(0xc197e04e, 0xd545, 0x4a70, 0x9c, 0x17, 0xa5, 0x54, \ + 0x94, 0x19, 0xeb, 0x12) #define CPER_SECTION_TYPE_DMESG_Z \ - UUID_LE(0x4f118707, 0x04dd, 0x4055, 0xb5, 0xdd, 0x95, 0x6d, \ - 0x34, 0xdd, 0xfa, 0xc6) + GUID_INIT(0x4f118707, 0x04dd, 0x4055, 0xb5, 0xdd, 0x95, 0x6d, \ + 0x34, 0xdd, 0xfa, 0xc6) #define CPER_SECTION_TYPE_MCE \ - UUID_LE(0xfe08ffbe, 0x95e4, 0x4be7, 0xbc, 0x73, 0x40, 0x96, \ - 0x04, 0x4a, 0x38, 0xfc) + GUID_INIT(0xfe08ffbe, 0x95e4, 0x4be7, 0xbc, 0x73, 0x40, 0x96, \ + 0x04, 0x4a, 0x38, 0xfc) struct cper_pstore_record { struct cper_record_header hdr; @@ -1012,7 +1012,7 @@ skip: rc = -EIO; goto out; } - if (uuid_le_cmp(rcd->hdr.creator_id, CPER_CREATOR_PSTORE) != 0) + if (!guid_equal(&rcd->hdr.creator_id, &CPER_CREATOR_PSTORE)) goto skip; record->buf = kmalloc(len, GFP_KERNEL); @@ -1024,15 +1024,12 @@ skip: record->id = record_id; record->compressed = false; record->ecc_notice_size = 0; - if (uuid_le_cmp(rcd->sec_hdr.section_type, - CPER_SECTION_TYPE_DMESG_Z) == 0) { + if (guid_equal(&rcd->sec_hdr.section_type, &CPER_SECTION_TYPE_DMESG_Z)) { record->type = PSTORE_TYPE_DMESG; record->compressed = true; - } else if (uuid_le_cmp(rcd->sec_hdr.section_type, - CPER_SECTION_TYPE_DMESG) == 0) + } else if (guid_equal(&rcd->sec_hdr.section_type, &CPER_SECTION_TYPE_DMESG)) record->type = PSTORE_TYPE_DMESG; - else if (uuid_le_cmp(rcd->sec_hdr.section_type, - CPER_SECTION_TYPE_MCE) == 0) + else if (guid_equal(&rcd->sec_hdr.section_type, &CPER_SECTION_TYPE_MCE)) record->type = PSTORE_TYPE_MCE; else record->type = PSTORE_TYPE_MAX; diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index f008ba7c9ced..0b5ae91fd0fb 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -25,6 +25,7 @@ * GNU General Public License for more details. */ +#include #include #include #include @@ -33,7 +34,6 @@ #include #include #include -#include #include #include #include @@ -42,6 +42,7 @@ #include #include #include +#include #include #include #include @@ -85,6 +86,15 @@ ((struct acpi_hest_generic_status *) \ ((struct ghes_estatus_node *)(estatus_node) + 1)) +/* + * NMI-like notifications vary by architecture, before the compiler can prune + * unused static functions it needs a value for these enums. + */ +#ifndef CONFIG_ARM_SDE_INTERFACE +#define FIX_APEI_GHES_SDEI_NORMAL __end_of_fixed_addresses +#define FIX_APEI_GHES_SDEI_CRITICAL __end_of_fixed_addresses +#endif + static inline bool is_hest_type_generic_v2(struct ghes *ghes) { return ghes->generic->header.type == ACPI_HEST_TYPE_GENERIC_ERROR_V2; @@ -115,11 +125,10 @@ static DEFINE_MUTEX(ghes_list_mutex); * handler, but general ioremap can not be used in atomic context, so * the fixmap is used instead. * - * These 2 spinlocks are used to prevent the fixmap entries from being used + * This spinlock is used to prevent the fixmap entry from being used * simultaneously. */ -static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi); -static DEFINE_SPINLOCK(ghes_ioremap_lock_irq); +static DEFINE_SPINLOCK(ghes_notify_lock_irq); static struct gen_pool *ghes_estatus_pool; static unsigned long ghes_estatus_pool_size_request; @@ -129,82 +138,49 @@ static atomic_t ghes_estatus_cache_alloced; static int ghes_panic_timeout __read_mostly = 30; -static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn) +static void __iomem *ghes_map(u64 pfn, enum fixed_addresses fixmap_idx) { phys_addr_t paddr; pgprot_t prot; - paddr = pfn << PAGE_SHIFT; + paddr = PFN_PHYS(pfn); prot = arch_apei_get_mem_attribute(paddr); - __set_fixmap(FIX_APEI_GHES_NMI, paddr, prot); + __set_fixmap(fixmap_idx, paddr, prot); - return (void __iomem *) fix_to_virt(FIX_APEI_GHES_NMI); + return (void __iomem *) __fix_to_virt(fixmap_idx); } -static void __iomem *ghes_ioremap_pfn_irq(u64 pfn) +static void ghes_unmap(void __iomem *vaddr, enum fixed_addresses fixmap_idx) { - phys_addr_t paddr; - pgprot_t prot; - - paddr = pfn << PAGE_SHIFT; - prot = arch_apei_get_mem_attribute(paddr); - __set_fixmap(FIX_APEI_GHES_IRQ, paddr, prot); - - return (void __iomem *) fix_to_virt(FIX_APEI_GHES_IRQ); -} + int _idx = virt_to_fix((unsigned long)vaddr); -static void ghes_iounmap_nmi(void) -{ - clear_fixmap(FIX_APEI_GHES_NMI); + WARN_ON_ONCE(fixmap_idx != _idx); + clear_fixmap(fixmap_idx); } -static void ghes_iounmap_irq(void) +int ghes_estatus_pool_init(int num_ghes) { - clear_fixmap(FIX_APEI_GHES_IRQ); -} + unsigned long addr, len; -static int ghes_estatus_pool_init(void) -{ ghes_estatus_pool = gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER, -1); if (!ghes_estatus_pool) return -ENOMEM; - return 0; -} -static void ghes_estatus_pool_free_chunk_page(struct gen_pool *pool, - struct gen_pool_chunk *chunk, - void *data) -{ - free_page(chunk->start_addr); -} - -static void ghes_estatus_pool_exit(void) -{ - gen_pool_for_each_chunk(ghes_estatus_pool, - ghes_estatus_pool_free_chunk_page, NULL); - gen_pool_destroy(ghes_estatus_pool); -} + len = GHES_ESTATUS_CACHE_AVG_SIZE * GHES_ESTATUS_CACHE_ALLOCED_MAX; + len += (num_ghes * GHES_ESOURCE_PREALLOC_MAX_SIZE); -static int ghes_estatus_pool_expand(unsigned long len) -{ - unsigned long i, pages, size, addr; - int ret; + ghes_estatus_pool_size_request = PAGE_ALIGN(len); + addr = (unsigned long)vmalloc(PAGE_ALIGN(len)); + if (!addr) + return -ENOMEM; - ghes_estatus_pool_size_request += PAGE_ALIGN(len); - size = gen_pool_size(ghes_estatus_pool); - if (size >= ghes_estatus_pool_size_request) - return 0; - pages = (ghes_estatus_pool_size_request - size) / PAGE_SIZE; - for (i = 0; i < pages; i++) { - addr = __get_free_page(GFP_KERNEL); - if (!addr) - return -ENOMEM; - ret = gen_pool_add(ghes_estatus_pool, addr, PAGE_SIZE, -1); - if (ret) - return ret; - } + /* + * New allocation must be visible in all pgd before it can be found by + * an NMI allocating from the pool. + */ + vmalloc_sync_all(); - return 0; + return gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1); } static int map_gen_v2(struct ghes *ghes) @@ -217,6 +193,21 @@ static void unmap_gen_v2(struct ghes *ghes) apei_unmap_generic_address(&ghes->generic_v2->read_ack_register); } +static void ghes_ack_error(struct acpi_hest_generic_v2 *gv2) +{ + int rc; + u64 val = 0; + + rc = apei_read(&val, &gv2->read_ack_register); + if (rc) + return; + + val &= gv2->read_ack_preserve << gv2->read_ack_register.bit_offset; + val |= gv2->read_ack_write << gv2->read_ack_register.bit_offset; + + apei_write(val, &gv2->read_ack_register); +} + static struct ghes *ghes_new(struct acpi_hest_generic *generic) { struct ghes *ghes; @@ -289,23 +280,16 @@ static inline int ghes_severity(int severity) } static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len, - int from_phys) + int from_phys, + enum fixed_addresses fixmap_idx) { void __iomem *vaddr; - unsigned long flags = 0; - int in_nmi = in_nmi(); u64 offset; u32 trunk; while (len > 0) { offset = paddr - (paddr & PAGE_MASK); - if (in_nmi) { - raw_spin_lock(&ghes_ioremap_lock_nmi); - vaddr = ghes_ioremap_pfn_nmi(paddr >> PAGE_SHIFT); - } else { - spin_lock_irqsave(&ghes_ioremap_lock_irq, flags); - vaddr = ghes_ioremap_pfn_irq(paddr >> PAGE_SHIFT); - } + vaddr = ghes_map(PHYS_PFN(paddr), fixmap_idx); trunk = PAGE_SIZE - offset; trunk = min(trunk, len); if (from_phys) @@ -315,72 +299,114 @@ static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len, len -= trunk; paddr += trunk; buffer += trunk; - if (in_nmi) { - ghes_iounmap_nmi(); - raw_spin_unlock(&ghes_ioremap_lock_nmi); - } else { - ghes_iounmap_irq(); - spin_unlock_irqrestore(&ghes_ioremap_lock_irq, flags); - } + ghes_unmap(vaddr, fixmap_idx); + } +} + +/* Check the top-level record header has an appropriate size. */ +static int __ghes_check_estatus(struct ghes *ghes, + struct acpi_hest_generic_status *estatus) +{ + u32 len = cper_estatus_len(estatus); + + if (len < sizeof(*estatus)) { + pr_warn_ratelimited(FW_WARN GHES_PFX "Truncated error status block!\n"); + return -EIO; + } + + if (len > ghes->generic->error_block_length) { + pr_warn_ratelimited(FW_WARN GHES_PFX "Invalid error status block length!\n"); + return -EIO; + } + + if (cper_estatus_check_header(estatus)) { + pr_warn_ratelimited(FW_WARN GHES_PFX "Invalid CPER header!\n"); + return -EIO; } + + return 0; } -static int ghes_read_estatus(struct ghes *ghes, int silent) +/* Read the CPER block, returning its address, and header in estatus. */ +static int __ghes_peek_estatus(struct ghes *ghes, + struct acpi_hest_generic_status *estatus, + u64 *buf_paddr, enum fixed_addresses fixmap_idx) { struct acpi_hest_generic *g = ghes->generic; - u64 buf_paddr; - u32 len; int rc; - rc = apei_read(&buf_paddr, &g->error_status_address); + rc = apei_read(buf_paddr, &g->error_status_address); if (rc) { - if (!silent && printk_ratelimit()) - pr_warning(FW_WARN GHES_PFX + *buf_paddr = 0; + pr_warn_ratelimited(FW_WARN GHES_PFX "Failed to read error status block address for hardware error source: %d.\n", g->header.source_id); return -EIO; } - if (!buf_paddr) + if (!*buf_paddr) return -ENOENT; - ghes_copy_tofrom_phys(ghes->estatus, buf_paddr, - sizeof(*ghes->estatus), 1); - if (!ghes->estatus->block_status) + ghes_copy_tofrom_phys(estatus, *buf_paddr, sizeof(*estatus), 1, + fixmap_idx); + if (!estatus->block_status) { + *buf_paddr = 0; return -ENOENT; + } - ghes->buffer_paddr = buf_paddr; - ghes->flags |= GHES_TO_CLEAR; + return __ghes_check_estatus(ghes, estatus); +} - rc = -EIO; - len = cper_estatus_len(ghes->estatus); - if (len < sizeof(*ghes->estatus)) - goto err_read_block; - if (len > ghes->generic->error_block_length) - goto err_read_block; - if (cper_estatus_check_header(ghes->estatus)) - goto err_read_block; - ghes_copy_tofrom_phys(ghes->estatus + 1, - buf_paddr + sizeof(*ghes->estatus), - len - sizeof(*ghes->estatus), 1); - if (cper_estatus_check(ghes->estatus)) - goto err_read_block; - rc = 0; - -err_read_block: - if (rc && !silent && printk_ratelimit()) - pr_warning(FW_WARN GHES_PFX - "Failed to read error status block!\n"); - return rc; +static int __ghes_read_estatus(struct acpi_hest_generic_status *estatus, + u64 buf_paddr, enum fixed_addresses fixmap_idx, + size_t buf_len) +{ + ghes_copy_tofrom_phys(estatus, buf_paddr, buf_len, 1, fixmap_idx); + if (cper_estatus_check(estatus)) { + pr_warn_ratelimited(FW_WARN GHES_PFX + "Failed to read error status block!\n"); + return -EIO; + } + + return 0; } -static void ghes_clear_estatus(struct ghes *ghes) +static int ghes_read_estatus(struct ghes *ghes, + struct acpi_hest_generic_status *estatus, + u64 *buf_paddr, enum fixed_addresses fixmap_idx) { - ghes->estatus->block_status = 0; - if (!(ghes->flags & GHES_TO_CLEAR)) + int rc; + + rc = __ghes_peek_estatus(ghes, estatus, buf_paddr, fixmap_idx); + if (rc) + return rc; + + rc = __ghes_check_estatus(ghes, estatus); + if (rc) + return rc; + + return __ghes_read_estatus(estatus, *buf_paddr, fixmap_idx, + cper_estatus_len(estatus)); +} + +static void ghes_clear_estatus(struct ghes *ghes, + struct acpi_hest_generic_status *estatus, + u64 buf_paddr, enum fixed_addresses fixmap_idx) +{ + estatus->block_status = 0; + + if (!buf_paddr) return; - ghes_copy_tofrom_phys(ghes->estatus, ghes->buffer_paddr, - sizeof(ghes->estatus->block_status), 0); - ghes->flags &= ~GHES_TO_CLEAR; + + ghes_copy_tofrom_phys(estatus, buf_paddr, + sizeof(estatus->block_status), 0, + fixmap_idx); + + /* + * GHESv2 type HEST entries introduce support for error acknowledgment, + * so only acknowledge the error if this support is present. + */ + if (is_hest_type_generic_v2(ghes)) + ghes_ack_error(ghes->generic_v2); } static void ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, int sev) @@ -672,26 +698,13 @@ static void ghes_estatus_cache_add( rcu_read_unlock(); } -static int ghes_ack_error(struct acpi_hest_generic_v2 *gv2) +static void __ghes_panic(struct ghes *ghes, + struct acpi_hest_generic_status *estatus, + u64 buf_paddr, enum fixed_addresses fixmap_idx) { - int rc; - u64 val = 0; + __ghes_print_estatus(KERN_EMERG, ghes->generic, estatus); - rc = apei_read(&val, &gv2->read_ack_register); - if (rc) - return rc; - - val &= gv2->read_ack_preserve << gv2->read_ack_register.bit_offset; - val |= gv2->read_ack_write << gv2->read_ack_register.bit_offset; - - return apei_write(val, &gv2->read_ack_register); -} - -static void __ghes_panic(struct ghes *ghes) -{ - __ghes_print_estatus(KERN_EMERG, ghes->generic, ghes->estatus); - - ghes_clear_estatus(ghes); + ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx); /* reboot to log the error! */ if (!panic_timeout) @@ -701,34 +714,25 @@ static void __ghes_panic(struct ghes *ghes) static int ghes_proc(struct ghes *ghes) { + struct acpi_hest_generic_status *estatus = ghes->estatus; + u64 buf_paddr; int rc; - rc = ghes_read_estatus(ghes, 0); + rc = ghes_read_estatus(ghes, estatus, &buf_paddr, FIX_APEI_GHES_IRQ); if (rc) goto out; - if (ghes_severity(ghes->estatus->error_severity) >= GHES_SEV_PANIC) { - __ghes_panic(ghes); - } + if (ghes_severity(estatus->error_severity) >= GHES_SEV_PANIC) + __ghes_panic(ghes, estatus, buf_paddr, FIX_APEI_GHES_IRQ); - if (!ghes_estatus_cached(ghes->estatus)) { - if (ghes_print_estatus(NULL, ghes->generic, ghes->estatus)) - ghes_estatus_cache_add(ghes->generic, ghes->estatus); + if (!ghes_estatus_cached(estatus)) { + if (ghes_print_estatus(NULL, ghes->generic, estatus)) + ghes_estatus_cache_add(ghes->generic, estatus); } - ghes_do_proc(ghes, ghes->estatus); + ghes_do_proc(ghes, estatus); out: - ghes_clear_estatus(ghes); - - if (rc == -ENOENT) - return rc; - - /* - * GHESv2 type HEST entries introduce support for error acknowledgment, - * so only acknowledge the error if this support is present. - */ - if (is_hest_type_generic_v2(ghes)) - return ghes_ack_error(ghes->generic_v2); + ghes_clear_estatus(ghes, estatus, buf_paddr, FIX_APEI_GHES_IRQ); return rc; } @@ -751,8 +755,11 @@ static void ghes_add_timer(struct ghes *ghes) static void ghes_poll_func(struct timer_list *t) { struct ghes *ghes = from_timer(ghes, t, timer); + unsigned long flags; + spin_lock_irqsave(&ghes_notify_lock_irq, flags); ghes_proc(ghes); + spin_unlock_irqrestore(&ghes_notify_lock_irq, flags); if (!(ghes->flags & GHES_EXITING)) ghes_add_timer(ghes); } @@ -760,9 +767,12 @@ static void ghes_poll_func(struct timer_list *t) static irqreturn_t ghes_irq_func(int irq, void *data) { struct ghes *ghes = data; + unsigned long flags; int rc; + spin_lock_irqsave(&ghes_notify_lock_irq, flags); rc = ghes_proc(ghes); + spin_unlock_irqrestore(&ghes_notify_lock_irq, flags); if (rc) return IRQ_NONE; @@ -773,14 +783,17 @@ static int ghes_notify_hed(struct notifier_block *this, unsigned long event, void *data) { struct ghes *ghes; + unsigned long flags; int ret = NOTIFY_DONE; + spin_lock_irqsave(&ghes_notify_lock_irq, flags); rcu_read_lock(); list_for_each_entry_rcu(ghes, &ghes_hed, list) { if (!ghes_proc(ghes)) ret = NOTIFY_OK; } rcu_read_unlock(); + spin_unlock_irqrestore(&ghes_notify_lock_irq, flags); return ret; } @@ -789,66 +802,20 @@ static struct notifier_block ghes_notifier_hed = { .notifier_call = ghes_notify_hed, }; -#ifdef CONFIG_ACPI_APEI_SEA -static LIST_HEAD(ghes_sea); - -/* - * Return 0 only if one of the SEA error sources successfully reported an error - * record sent from the firmware. - */ -int ghes_notify_sea(void) -{ - struct ghes *ghes; - int ret = -ENOENT; - - rcu_read_lock(); - list_for_each_entry_rcu(ghes, &ghes_sea, list) { - if (!ghes_proc(ghes)) - ret = 0; - } - rcu_read_unlock(); - return ret; -} - -static void ghes_sea_add(struct ghes *ghes) -{ - mutex_lock(&ghes_list_mutex); - list_add_rcu(&ghes->list, &ghes_sea); - mutex_unlock(&ghes_list_mutex); -} - -static void ghes_sea_remove(struct ghes *ghes) -{ - mutex_lock(&ghes_list_mutex); - list_del_rcu(&ghes->list); - mutex_unlock(&ghes_list_mutex); - synchronize_rcu(); -} -#else /* CONFIG_ACPI_APEI_SEA */ -static inline void ghes_sea_add(struct ghes *ghes) { } -static inline void ghes_sea_remove(struct ghes *ghes) { } -#endif /* CONFIG_ACPI_APEI_SEA */ - -#ifdef CONFIG_HAVE_ACPI_APEI_NMI /* - * printk is not safe in NMI context. So in NMI handler, we allocate - * required memory from lock-less memory allocator - * (ghes_estatus_pool), save estatus into it, put them into lock-less - * list (ghes_estatus_llist), then delay printk into IRQ context via - * irq_work (ghes_proc_irq_work). ghes_estatus_size_request record - * required pool size by all NMI error source. + * Handlers for CPER records may not be NMI safe. For example, + * memory_failure_queue() takes spinlocks and calls schedule_work_on(). + * In any NMI-like handler, memory from ghes_estatus_pool is used to save + * estatus, and added to the ghes_estatus_llist. irq_work_queue() causes + * ghes_proc_in_irq() to run in IRQ context where each estatus in + * ghes_estatus_llist is processed. + * + * Memory from the ghes_estatus_pool is also used with the ghes_estatus_cache + * to suppress frequent messages. */ static struct llist_head ghes_estatus_llist; static struct irq_work ghes_proc_irq_work; -/* - * NMI may be triggered on any CPU, so ghes_in_nmi is used for - * having only one concurrent reader. - */ -static atomic_t ghes_in_nmi = ATOMIC_INIT(0); - -static LIST_HEAD(ghes_nmi); - static void ghes_proc_in_irq(struct irq_work *irq_work) { struct llist_node *llnode, *next; @@ -905,96 +872,154 @@ static void ghes_print_queued_estatus(void) } } -/* Save estatus for further processing in IRQ context */ -static void __process_error(struct ghes *ghes) +static int ghes_in_nmi_queue_one_entry(struct ghes *ghes, + enum fixed_addresses fixmap_idx) { -#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG - u32 len, node_len; + struct acpi_hest_generic_status *estatus, tmp_header; struct ghes_estatus_node *estatus_node; - struct acpi_hest_generic_status *estatus; + u32 len, node_len; + u64 buf_paddr; + int sev, rc; - if (ghes_estatus_cached(ghes->estatus)) - return; + if (!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG)) + return -EOPNOTSUPP; - len = cper_estatus_len(ghes->estatus); - node_len = GHES_ESTATUS_NODE_LEN(len); + rc = __ghes_peek_estatus(ghes, &tmp_header, &buf_paddr, fixmap_idx); + if (rc) { + ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx); + return rc; + } + rc = __ghes_check_estatus(ghes, &tmp_header); + if (rc) { + ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx); + return rc; + } + + len = cper_estatus_len(&tmp_header); + node_len = GHES_ESTATUS_NODE_LEN(len); estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool, node_len); if (!estatus_node) - return; + return -ENOMEM; estatus_node->ghes = ghes; estatus_node->generic = ghes->generic; estatus = GHES_ESTATUS_FROM_NODE(estatus_node); - memcpy(estatus, ghes->estatus, len); - llist_add(&estatus_node->llnode, &ghes_estatus_llist); -#endif -} -static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs) -{ - struct ghes *ghes; - int sev, ret = NMI_DONE; + if (__ghes_read_estatus(estatus, buf_paddr, fixmap_idx, len)) { + ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx); + rc = -ENOENT; + goto no_work; + } - if (!atomic_add_unless(&ghes_in_nmi, 1, 1)) - return ret; + sev = ghes_severity(estatus->error_severity); + if (sev >= GHES_SEV_PANIC) { + ghes_print_queued_estatus(); + __ghes_panic(ghes, estatus, buf_paddr, fixmap_idx); + } - list_for_each_entry_rcu(ghes, &ghes_nmi, list) { - if (ghes_read_estatus(ghes, 1)) { - ghes_clear_estatus(ghes); - continue; - } else { - ret = NMI_HANDLED; - } + ghes_clear_estatus(ghes, &tmp_header, buf_paddr, fixmap_idx); - sev = ghes_severity(ghes->estatus->error_severity); - if (sev >= GHES_SEV_PANIC) { - oops_begin(); - ghes_print_queued_estatus(); - __ghes_panic(ghes); - } + /* This error has been reported before, don't process it again. */ + if (ghes_estatus_cached(estatus)) + goto no_work; - if (!(ghes->flags & GHES_TO_CLEAR)) - continue; + llist_add(&estatus_node->llnode, &ghes_estatus_llist); + + return rc; + +no_work: + gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, + node_len); + + return rc; +} - __process_error(ghes); - ghes_clear_estatus(ghes); +static int ghes_in_nmi_spool_from_list(struct list_head *rcu_list, + enum fixed_addresses fixmap_idx) +{ + int ret = -ENOENT; + struct ghes *ghes; + + rcu_read_lock(); + list_for_each_entry_rcu(ghes, rcu_list, list) { + if (!ghes_in_nmi_queue_one_entry(ghes, fixmap_idx)) + ret = 0; } + rcu_read_unlock(); -#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG - if (ret == NMI_HANDLED) + if (IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) && !ret) irq_work_queue(&ghes_proc_irq_work); -#endif - atomic_dec(&ghes_in_nmi); + return ret; } -static unsigned long ghes_esource_prealloc_size( - const struct acpi_hest_generic *generic) +#ifdef CONFIG_ACPI_APEI_SEA +static LIST_HEAD(ghes_sea); + +/* + * Return 0 only if one of the SEA error sources successfully reported an error + * record sent from the firmware. + */ +int ghes_notify_sea(void) { - unsigned long block_length, prealloc_records, prealloc_size; + static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sea); + int rv; - block_length = min_t(unsigned long, generic->error_block_length, - GHES_ESTATUS_MAX_SIZE); - prealloc_records = max_t(unsigned long, - generic->records_to_preallocate, 1); - prealloc_size = min_t(unsigned long, block_length * prealloc_records, - GHES_ESOURCE_PREALLOC_MAX_SIZE); + raw_spin_lock(&ghes_notify_lock_sea); + rv = ghes_in_nmi_spool_from_list(&ghes_sea, FIX_APEI_GHES_SEA); + raw_spin_unlock(&ghes_notify_lock_sea); - return prealloc_size; + return rv; } -static void ghes_estatus_pool_shrink(unsigned long len) +static void ghes_sea_add(struct ghes *ghes) { - ghes_estatus_pool_size_request -= PAGE_ALIGN(len); + mutex_lock(&ghes_list_mutex); + list_add_rcu(&ghes->list, &ghes_sea); + mutex_unlock(&ghes_list_mutex); } -static void ghes_nmi_add(struct ghes *ghes) +static void ghes_sea_remove(struct ghes *ghes) { - unsigned long len; + mutex_lock(&ghes_list_mutex); + list_del_rcu(&ghes->list); + mutex_unlock(&ghes_list_mutex); + synchronize_rcu(); +} +#else /* CONFIG_ACPI_APEI_SEA */ +static inline void ghes_sea_add(struct ghes *ghes) { } +static inline void ghes_sea_remove(struct ghes *ghes) { } +#endif /* CONFIG_ACPI_APEI_SEA */ + +#ifdef CONFIG_HAVE_ACPI_APEI_NMI +/* + * NMI may be triggered on any CPU, so ghes_in_nmi is used for + * having only one concurrent reader. + */ +static atomic_t ghes_in_nmi = ATOMIC_INIT(0); + +static LIST_HEAD(ghes_nmi); + +static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs) +{ + static DEFINE_RAW_SPINLOCK(ghes_notify_lock_nmi); + int ret = NMI_DONE; + + if (!atomic_add_unless(&ghes_in_nmi, 1, 1)) + return ret; + + raw_spin_lock(&ghes_notify_lock_nmi); + if (!ghes_in_nmi_spool_from_list(&ghes_nmi, FIX_APEI_GHES_NMI)) + ret = NMI_HANDLED; + raw_spin_unlock(&ghes_notify_lock_nmi); - len = ghes_esource_prealloc_size(ghes->generic); - ghes_estatus_pool_expand(len); + atomic_dec(&ghes_in_nmi); + return ret; +} + +static void ghes_nmi_add(struct ghes *ghes) +{ mutex_lock(&ghes_list_mutex); if (list_empty(&ghes_nmi)) register_nmi_handler(NMI_LOCAL, ghes_notify_nmi, 0, "ghes"); @@ -1004,8 +1029,6 @@ static void ghes_nmi_add(struct ghes *ghes) static void ghes_nmi_remove(struct ghes *ghes) { - unsigned long len; - mutex_lock(&ghes_list_mutex); list_del_rcu(&ghes->list); if (list_empty(&ghes_nmi)) @@ -1016,24 +1039,79 @@ static void ghes_nmi_remove(struct ghes *ghes) * freed after NMI handler finishes. */ synchronize_rcu(); - len = ghes_esource_prealloc_size(ghes->generic); - ghes_estatus_pool_shrink(len); } +#else /* CONFIG_HAVE_ACPI_APEI_NMI */ +static inline void ghes_nmi_add(struct ghes *ghes) { } +static inline void ghes_nmi_remove(struct ghes *ghes) { } +#endif /* CONFIG_HAVE_ACPI_APEI_NMI */ static void ghes_nmi_init_cxt(void) { init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq); } -#else /* CONFIG_HAVE_ACPI_APEI_NMI */ -static inline void ghes_nmi_add(struct ghes *ghes) { } -static inline void ghes_nmi_remove(struct ghes *ghes) { } -static inline void ghes_nmi_init_cxt(void) { } -#endif /* CONFIG_HAVE_ACPI_APEI_NMI */ + +static int __ghes_sdei_callback(struct ghes *ghes, + enum fixed_addresses fixmap_idx) +{ + if (!ghes_in_nmi_queue_one_entry(ghes, fixmap_idx)) { + irq_work_queue(&ghes_proc_irq_work); + + return 0; + } + + return -ENOENT; +} + +static int ghes_sdei_normal_callback(u32 event_num, struct pt_regs *regs, + void *arg) +{ + static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sdei_normal); + struct ghes *ghes = arg; + int err; + + raw_spin_lock(&ghes_notify_lock_sdei_normal); + err = __ghes_sdei_callback(ghes, FIX_APEI_GHES_SDEI_NORMAL); + raw_spin_unlock(&ghes_notify_lock_sdei_normal); + + return err; +} + +static int ghes_sdei_critical_callback(u32 event_num, struct pt_regs *regs, + void *arg) +{ + static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sdei_critical); + struct ghes *ghes = arg; + int err; + + raw_spin_lock(&ghes_notify_lock_sdei_critical); + err = __ghes_sdei_callback(ghes, FIX_APEI_GHES_SDEI_CRITICAL); + raw_spin_unlock(&ghes_notify_lock_sdei_critical); + + return err; +} + +static int apei_sdei_register_ghes(struct ghes *ghes) +{ + if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE)) + return -EOPNOTSUPP; + + return sdei_register_ghes(ghes, ghes_sdei_normal_callback, + ghes_sdei_critical_callback); +} + +static int apei_sdei_unregister_ghes(struct ghes *ghes) +{ + if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE)) + return -EOPNOTSUPP; + + return sdei_unregister_ghes(ghes); +} static int ghes_probe(struct platform_device *ghes_dev) { struct acpi_hest_generic *generic; struct ghes *ghes = NULL; + unsigned long flags; int rc = -EINVAL; @@ -1064,6 +1142,13 @@ static int ghes_probe(struct platform_device *ghes_dev) goto err; } break; + case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED: + if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE)) { + pr_warn(GHES_PFX "Generic hardware error source: %d notified via SDE Interface is not supported!\n", + generic->header.source_id); + goto err; + } + break; case ACPI_HEST_NOTIFY_LOCAL: pr_warning(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n", generic->header.source_id); @@ -1127,6 +1212,11 @@ static int ghes_probe(struct platform_device *ghes_dev) case ACPI_HEST_NOTIFY_NMI: ghes_nmi_add(ghes); break; + case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED: + rc = apei_sdei_register_ghes(ghes); + if (rc) + goto err; + break; default: BUG(); } @@ -1136,7 +1226,9 @@ static int ghes_probe(struct platform_device *ghes_dev) ghes_edac_register(ghes, &ghes_dev->dev); /* Handle any pending errors right away */ + spin_lock_irqsave(&ghes_notify_lock_irq, flags); ghes_proc(ghes); + spin_unlock_irqrestore(&ghes_notify_lock_irq, flags); return 0; @@ -1150,6 +1242,7 @@ err: static int ghes_remove(struct platform_device *ghes_dev) { + int rc; struct ghes *ghes; struct acpi_hest_generic *generic; @@ -1182,6 +1275,11 @@ static int ghes_remove(struct platform_device *ghes_dev) case ACPI_HEST_NOTIFY_NMI: ghes_nmi_remove(ghes); break; + case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED: + rc = apei_sdei_unregister_ghes(ghes); + if (rc) + return rc; + break; default: BUG(); break; @@ -1230,18 +1328,9 @@ static int __init ghes_init(void) ghes_nmi_init_cxt(); - rc = ghes_estatus_pool_init(); - if (rc) - goto err; - - rc = ghes_estatus_pool_expand(GHES_ESTATUS_CACHE_AVG_SIZE * - GHES_ESTATUS_CACHE_ALLOCED_MAX); - if (rc) - goto err_pool_exit; - rc = platform_driver_register(&ghes_platform_driver); if (rc) - goto err_pool_exit; + goto err; rc = apei_osc_setup(); if (rc == 0 && osc_sb_apei_support_acked) @@ -1254,8 +1343,6 @@ static int __init ghes_init(void) pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n"); return 0; -err_pool_exit: - ghes_estatus_pool_exit(); err: return rc; } diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c index b1e9f81ebeea..8113ddb14d28 100644 --- a/drivers/acpi/apei/hest.c +++ b/drivers/acpi/apei/hest.c @@ -32,6 +32,7 @@ #include #include #include +#include #include "apei-internal.h" @@ -53,6 +54,7 @@ static const int hest_esrc_len_tab[ACPI_HEST_TYPE_RESERVED] = { [ACPI_HEST_TYPE_AER_BRIDGE] = sizeof(struct acpi_hest_aer_bridge), [ACPI_HEST_TYPE_GENERIC_ERROR] = sizeof(struct acpi_hest_generic), [ACPI_HEST_TYPE_GENERIC_ERROR_V2] = sizeof(struct acpi_hest_generic_v2), + [ACPI_HEST_TYPE_IA32_DEFERRED_CHECK] = -1, }; static int hest_esrc_len(struct acpi_hest_header *hest_hdr) @@ -75,6 +77,11 @@ static int hest_esrc_len(struct acpi_hest_header *hest_hdr) mc = (struct acpi_hest_ia_machine_check *)hest_hdr; len = sizeof(*mc) + mc->num_hardware_banks * sizeof(struct acpi_hest_ia_error_bank); + } else if (hest_type == ACPI_HEST_TYPE_IA32_DEFERRED_CHECK) { + struct acpi_hest_ia_deferred_check *mc; + mc = (struct acpi_hest_ia_deferred_check *)hest_hdr; + len = sizeof(*mc) + mc->num_hardware_banks * + sizeof(struct acpi_hest_ia_error_bank); } BUG_ON(len == -1); @@ -203,6 +210,11 @@ static int __init hest_ghes_dev_register(unsigned int ghes_count) rc = apei_hest_parse(hest_parse_ghes, &ghes_arr); if (rc) goto err; + + rc = ghes_estatus_pool_init(ghes_count); + if (rc) + goto err; + out: kfree(ghes_arr.ghes_devs); return rc; @@ -251,7 +263,9 @@ void __init acpi_hest_init(void) rc = apei_hest_parse(hest_parse_ghes_count, &ghes_count); if (rc) goto err; - rc = hest_ghes_dev_register(ghes_count); + + if (ghes_count) + rc = hest_ghes_dev_register(ghes_count); if (rc) goto err; } diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 5c093ce01bcd..6ecbbabf1233 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -799,10 +799,24 @@ const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, } EXPORT_SYMBOL_GPL(acpi_match_device); +static const void *acpi_of_device_get_match_data(const struct device *dev) +{ + struct acpi_device *adev = ACPI_COMPANION(dev); + const struct of_device_id *match = NULL; + + if (!acpi_of_match_device(adev, dev->driver->of_match_table, &match)) + return NULL; + + return match->data; +} + const void *acpi_device_get_match_data(const struct device *dev) { const struct acpi_device_id *match; + if (!dev->driver->acpi_match_table) + return acpi_of_device_get_match_data(dev); + match = acpi_match_device(dev->driver->acpi_match_table, dev); if (!match) return NULL; @@ -1029,6 +1043,9 @@ void __init acpi_early_init(void) acpi_permanent_mmap = true; + /* Initialize debug output. Linux does not use ACPICA defaults */ + acpi_dbg_level = ACPI_LV_INFO | ACPI_LV_REPAIR; + #ifdef CONFIG_X86 /* * If the machine falls into the DMI check table, diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 217a782c3e55..1b207fca1420 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -1050,6 +1050,48 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) return ret_val; } +/** + * cppc_get_desired_perf - Get the value of desired performance register. + * @cpunum: CPU from which to get desired performance. + * @desired_perf: address of a variable to store the returned desired performance + * + * Return: 0 for success, -EIO otherwise. + */ +int cppc_get_desired_perf(int cpunum, u64 *desired_perf) +{ + struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); + int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum); + struct cpc_register_resource *desired_reg; + struct cppc_pcc_data *pcc_ss_data = NULL; + + desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF]; + + if (CPC_IN_PCC(desired_reg)) { + int ret = 0; + + if (pcc_ss_id < 0) + return -EIO; + + pcc_ss_data = pcc_data[pcc_ss_id]; + + down_write(&pcc_ss_data->pcc_lock); + + if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0) + cpc_read(cpunum, desired_reg, desired_perf); + else + ret = -EIO; + + up_write(&pcc_ss_data->pcc_lock); + + return ret; + } + + cpc_read(cpunum, desired_reg, desired_perf); + + return 0; +} +EXPORT_SYMBOL_GPL(cppc_get_desired_perf); + /** * cppc_get_perf_caps - Get a CPUs performance capabilities. * @cpunum: CPU from which to get capabilities info. diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c index 4451877f83b6..aa972dc5cb7e 100644 --- a/drivers/acpi/custom_method.c +++ b/drivers/acpi/custom_method.c @@ -79,14 +79,8 @@ static const struct file_operations cm_fops = { static int __init acpi_custom_method_init(void) { - if (acpi_debugfs_dir == NULL) - return -ENOENT; - cm_dentry = debugfs_create_file("custom_method", S_IWUSR, acpi_debugfs_dir, NULL, &cm_fops); - if (cm_dentry == NULL) - return -ENODEV; - return 0; } diff --git a/drivers/acpi/dptf/Makefile b/drivers/acpi/dptf/Makefile index 06ea8809583d..e6032e47e83f 100644 --- a/drivers/acpi/dptf/Makefile +++ b/drivers/acpi/dptf/Makefile @@ -1,4 +1,2 @@ obj-$(CONFIG_ACPI) += int340x_thermal.o obj-$(CONFIG_DPTF_POWER) += dptf_power.o - -ccflags-y += -Idrivers/acpi diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c index 86364097e236..0aa7c2e62e95 100644 --- a/drivers/acpi/dptf/int340x_thermal.c +++ b/drivers/acpi/dptf/int340x_thermal.c @@ -12,7 +12,7 @@ #include #include -#include "internal.h" +#include "../internal.h" #define INT3401_DEVICE 0X01 static const struct acpi_device_id int340x_thermal_device_ids[] = { diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 9d66a47d32fb..48d4815603e5 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -186,14 +186,17 @@ static void advance_transaction(struct acpi_ec *ec); static void acpi_ec_event_handler(struct work_struct *work); static void acpi_ec_event_processor(struct work_struct *work); -struct acpi_ec *boot_ec, *first_ec; +struct acpi_ec *first_ec; EXPORT_SYMBOL(first_ec); + +static struct acpi_ec *boot_ec; static bool boot_ec_is_ecdt = false; static struct workqueue_struct *ec_query_wq; static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */ static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */ static int EC_FLAGS_IGNORE_DSDT_GPE; /* Needs ECDT GPE as correction setting */ +static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */ /* -------------------------------------------------------------------------- * Logging/Debugging @@ -499,6 +502,26 @@ static inline void __acpi_ec_disable_event(struct acpi_ec *ec) ec_log_drv("event blocked"); } +/* + * Process _Q events that might have accumulated in the EC. + * Run with locked ec mutex. + */ +static void acpi_ec_clear(struct acpi_ec *ec) +{ + int i, status; + u8 value = 0; + + for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) { + status = acpi_ec_query(ec, &value); + if (status || !value) + break; + } + if (unlikely(i == ACPI_EC_CLEAR_MAX)) + pr_warn("Warning: Maximum of %d stale EC events cleared\n", i); + else + pr_info("%d stale EC events cleared\n", i); +} + static void acpi_ec_enable_event(struct acpi_ec *ec) { unsigned long flags; @@ -507,6 +530,10 @@ static void acpi_ec_enable_event(struct acpi_ec *ec) if (acpi_ec_started(ec)) __acpi_ec_enable_event(ec); spin_unlock_irqrestore(&ec->lock, flags); + + /* Drain additional events if hardware requires that */ + if (EC_FLAGS_CLEAR_ON_RESUME) + acpi_ec_clear(ec); } #ifdef CONFIG_PM_SLEEP @@ -1539,49 +1566,6 @@ static int acpi_ec_setup(struct acpi_ec *ec, bool handle_events) return ret; } -static int acpi_config_boot_ec(struct acpi_ec *ec, acpi_handle handle, - bool handle_events, bool is_ecdt) -{ - int ret; - - /* - * Changing the ACPI handle results in a re-configuration of the - * boot EC. And if it happens after the namespace initialization, - * it causes _REG evaluations. - */ - if (boot_ec && boot_ec->handle != handle) - ec_remove_handlers(boot_ec); - - /* Unset old boot EC */ - if (boot_ec != ec) - acpi_ec_free(boot_ec); - - /* - * ECDT device creation is split into acpi_ec_ecdt_probe() and - * acpi_ec_ecdt_start(). This function takes care of completing the - * ECDT parsing logic as the handle update should be performed - * between the installation/uninstallation of the handlers. - */ - if (ec->handle != handle) - ec->handle = handle; - - ret = acpi_ec_setup(ec, handle_events); - if (ret) - return ret; - - /* Set new boot EC */ - if (!boot_ec) { - boot_ec = ec; - boot_ec_is_ecdt = is_ecdt; - } - - acpi_handle_info(boot_ec->handle, - "Used as boot %s EC to handle transactions%s\n", - is_ecdt ? "ECDT" : "DSDT", - handle_events ? " and events" : ""); - return ret; -} - static bool acpi_ec_ecdt_get_handle(acpi_handle *phandle) { struct acpi_table_ecdt *ecdt_ptr; @@ -1601,43 +1585,34 @@ static bool acpi_ec_ecdt_get_handle(acpi_handle *phandle) return true; } -static bool acpi_is_boot_ec(struct acpi_ec *ec) -{ - if (!boot_ec) - return false; - if (ec->command_addr == boot_ec->command_addr && - ec->data_addr == boot_ec->data_addr) - return true; - return false; -} - static int acpi_ec_add(struct acpi_device *device) { struct acpi_ec *ec = NULL; - int ret; - bool is_ecdt = false; + bool dep_update = true; acpi_status status; + int ret; strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME); strcpy(acpi_device_class(device), ACPI_EC_CLASS); if (!strcmp(acpi_device_hid(device), ACPI_ECDT_HID)) { - is_ecdt = true; + boot_ec_is_ecdt = true; ec = boot_ec; + dep_update = false; } else { ec = acpi_ec_alloc(); if (!ec) return -ENOMEM; + status = ec_parse_device(device->handle, 0, ec, NULL); if (status != AE_CTRL_TERMINATE) { ret = -EINVAL; goto err_alloc; } - } - if (acpi_is_boot_ec(ec)) { - boot_ec_is_ecdt = is_ecdt; - if (!is_ecdt) { + if (boot_ec && ec->command_addr == boot_ec->command_addr && + ec->data_addr == boot_ec->data_addr) { + boot_ec_is_ecdt = false; /* * Trust PNP0C09 namespace location rather than * ECDT ID. But trust ECDT GPE rather than _GPE @@ -1649,12 +1624,17 @@ static int acpi_ec_add(struct acpi_device *device) acpi_ec_free(ec); ec = boot_ec; } - ret = acpi_config_boot_ec(ec, ec->handle, true, is_ecdt); - } else - ret = acpi_ec_setup(ec, true); + } + + ret = acpi_ec_setup(ec, true); if (ret) goto err_query; + if (ec == boot_ec) + acpi_handle_info(boot_ec->handle, + "Boot %s EC used to handle transactions and events\n", + boot_ec_is_ecdt ? "ECDT" : "DSDT"); + device->driver_data = ec; ret = !!request_region(ec->data_addr, 1, "EC data"); @@ -1662,7 +1642,7 @@ static int acpi_ec_add(struct acpi_device *device) ret = !!request_region(ec->command_addr, 1, "EC cmd"); WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr); - if (!is_ecdt) { + if (dep_update) { /* Reprobe devices depending on the EC */ acpi_walk_dep_device_list(ec->handle); } @@ -1730,10 +1710,10 @@ static const struct acpi_device_id ec_device_ids[] = { * namespace EC before the main ACPI device enumeration process. It is * retained for historical reason and will be deprecated in the future. */ -int __init acpi_ec_dsdt_probe(void) +void __init acpi_ec_dsdt_probe(void) { - acpi_status status; struct acpi_ec *ec; + acpi_status status; int ret; /* @@ -1743,21 +1723,22 @@ int __init acpi_ec_dsdt_probe(void) * picking up an invalid EC device. */ if (boot_ec) - return -ENODEV; + return; ec = acpi_ec_alloc(); if (!ec) - return -ENOMEM; + return; + /* * At this point, the namespace is initialized, so start to find * the namespace objects. */ - status = acpi_get_devices(ec_device_ids[0].id, - ec_parse_device, ec, NULL); + status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device, ec, NULL); if (ACPI_FAILURE(status) || !ec->handle) { - ret = -ENODEV; - goto error; + acpi_ec_free(ec); + return; } + /* * When the DSDT EC is available, always re-configure boot EC to * have _REG evaluated. _REG can only be evaluated after the @@ -1765,11 +1746,16 @@ int __init acpi_ec_dsdt_probe(void) * At this point, the GPE is not fully initialized, so do not to * handle the events. */ - ret = acpi_config_boot_ec(ec, ec->handle, false, false); -error: - if (ret) + ret = acpi_ec_setup(ec, false); + if (ret) { acpi_ec_free(ec); - return ret; + return; + } + + boot_ec = ec; + + acpi_handle_info(ec->handle, + "Boot DSDT EC used to handle transactions\n"); } /* @@ -1820,6 +1806,31 @@ static int ec_flag_query_handshake(const struct dmi_system_id *id) } #endif +/* + * On some hardware it is necessary to clear events accumulated by the EC during + * sleep. These ECs stop reporting GPEs until they are manually polled, if too + * many events are accumulated. (e.g. Samsung Series 5/9 notebooks) + * + * https://bugzilla.kernel.org/show_bug.cgi?id=44161 + * + * Ideally, the EC should also be instructed NOT to accumulate events during + * sleep (which Windows seems to do somehow), but the interface to control this + * behaviour is not known at this time. + * + * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx, + * however it is very likely that other Samsung models are affected. + * + * On systems which don't accumulate _Q events during sleep, this extra check + * should be harmless. + */ +static int ec_clear_on_resume(const struct dmi_system_id *id) +{ + pr_debug("Detected system needing EC poll on resume.\n"); + EC_FLAGS_CLEAR_ON_RESUME = 1; + ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS; + return 0; +} + /* * Some ECDTs contain wrong register addresses. * MSI MS-171F @@ -1869,39 +1880,38 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = { ec_honor_ecdt_gpe, "ASUS X580VD", { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_MATCH(DMI_PRODUCT_NAME, "X580VD"),}, NULL}, + { + ec_clear_on_resume, "Samsung hardware", { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL}, {}, }; -int __init acpi_ec_ecdt_probe(void) +void __init acpi_ec_ecdt_probe(void) { - int ret; - acpi_status status; struct acpi_table_ecdt *ecdt_ptr; struct acpi_ec *ec; + acpi_status status; + int ret; - ec = acpi_ec_alloc(); - if (!ec) - return -ENOMEM; - /* - * Generate a boot ec context - */ + /* Generate a boot ec context. */ dmi_check_system(ec_dmi_table); status = acpi_get_table(ACPI_SIG_ECDT, 1, (struct acpi_table_header **)&ecdt_ptr); - if (ACPI_FAILURE(status)) { - ret = -ENODEV; - goto error; - } + if (ACPI_FAILURE(status)) + return; if (!ecdt_ptr->control.address || !ecdt_ptr->data.address) { /* * Asus X50GL: * https://bugzilla.kernel.org/show_bug.cgi?id=11880 */ - ret = -ENODEV; - goto error; + return; } + ec = acpi_ec_alloc(); + if (!ec) + return; + if (EC_FLAGS_CORRECT_ECDT) { ec->command_addr = ecdt_ptr->data.address; ec->data_addr = ecdt_ptr->control.address; @@ -1910,16 +1920,22 @@ int __init acpi_ec_ecdt_probe(void) ec->data_addr = ecdt_ptr->data.address; } ec->gpe = ecdt_ptr->gpe; + ec->handle = ACPI_ROOT_OBJECT; /* * At this point, the namespace is not initialized, so do not find * the namespace objects, or handle the events. */ - ret = acpi_config_boot_ec(ec, ACPI_ROOT_OBJECT, false, true); -error: - if (ret) + ret = acpi_ec_setup(ec, false); + if (ret) { acpi_ec_free(ec); - return ret; + return; + } + + boot_ec = ec; + boot_ec_is_ecdt = true; + + pr_info("Boot ECDT EC used to handle transactions\n"); } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c index dd70d6c2bca0..23faa66ea772 100644 --- a/drivers/acpi/ec_sys.c +++ b/drivers/acpi/ec_sys.c @@ -108,52 +108,32 @@ static const struct file_operations acpi_ec_io_ops = { .llseek = default_llseek, }; -static int acpi_ec_add_debugfs(struct acpi_ec *ec, unsigned int ec_device_count) +static void acpi_ec_add_debugfs(struct acpi_ec *ec, unsigned int ec_device_count) { struct dentry *dev_dir; char name[64]; umode_t mode = 0400; - if (ec_device_count == 0) { + if (ec_device_count == 0) acpi_ec_debugfs_dir = debugfs_create_dir("ec", NULL); - if (!acpi_ec_debugfs_dir) - return -ENOMEM; - } sprintf(name, "ec%u", ec_device_count); dev_dir = debugfs_create_dir(name, acpi_ec_debugfs_dir); - if (!dev_dir) { - if (ec_device_count != 0) - goto error; - return -ENOMEM; - } - if (!debugfs_create_x32("gpe", 0444, dev_dir, &first_ec->gpe)) - goto error; - if (!debugfs_create_bool("use_global_lock", 0444, dev_dir, - &first_ec->global_lock)) - goto error; + debugfs_create_x32("gpe", 0444, dev_dir, &first_ec->gpe); + debugfs_create_bool("use_global_lock", 0444, dev_dir, + &first_ec->global_lock); if (write_support) mode = 0600; - if (!debugfs_create_file("io", mode, dev_dir, ec, &acpi_ec_io_ops)) - goto error; - - return 0; - -error: - debugfs_remove_recursive(acpi_ec_debugfs_dir); - return -ENOMEM; + debugfs_create_file("io", mode, dev_dir, ec, &acpi_ec_io_ops); } static int __init acpi_ec_sys_init(void) { - int err = 0; if (first_ec) - err = acpi_ec_add_debugfs(first_ec, 0); - else - err = -ENODEV; - return err; + acpi_ec_add_debugfs(first_ec, 0); + return 0; } static void __exit acpi_ec_sys_exit(void) diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 6a9e1fb8913a..6eaf06db7752 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -192,8 +192,8 @@ extern struct acpi_ec *first_ec; typedef int (*acpi_ec_query_func) (void *data); int acpi_ec_init(void); -int acpi_ec_ecdt_probe(void); -int acpi_ec_dsdt_probe(void); +void acpi_ec_ecdt_probe(void); +void acpi_ec_dsdt_probe(void); void acpi_ec_block_transactions(void); void acpi_ec_unblock_transactions(void); void acpi_ec_mark_gpe_for_wake(void); diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c index 7c352cba0528..c3b2222e2129 100644 --- a/drivers/acpi/irq.c +++ b/drivers/acpi/irq.c @@ -196,7 +196,7 @@ static acpi_status acpi_irq_parse_one_cb(struct acpi_resource *ares, fwnode = acpi_gsi_domain_id; acpi_irq_parse_one_match(fwnode, irq->interrupts[ctx->index], irq->triggering, irq->polarity, - irq->sharable, ctx); + irq->shareable, ctx); return AE_CTRL_TERMINATE; case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: eirq = &ares->data.extended_irq; @@ -209,7 +209,7 @@ static acpi_status acpi_irq_parse_one_cb(struct acpi_resource *ares, fwnode = acpi_get_irq_source_fwhandle(&eirq->resource_source); acpi_irq_parse_one_match(fwnode, eirq->interrupts[ctx->index], eirq->triggering, eirq->polarity, - eirq->sharable, ctx); + eirq->shareable, ctx); return AE_CTRL_TERMINATE; } diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index e18ade5d74e9..df8979008dd4 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -55,6 +55,10 @@ static bool no_init_ars; module_param(no_init_ars, bool, 0644); MODULE_PARM_DESC(no_init_ars, "Skip ARS run at nfit init time"); +static bool force_labels; +module_param(force_labels, bool, 0444); +MODULE_PARM_DESC(force_labels, "Opt-in to labels despite missing methods"); + LIST_HEAD(acpi_descs); DEFINE_MUTEX(acpi_desc_lock); @@ -415,7 +419,7 @@ static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd, if (call_pkg) { int i; - if (nfit_mem->family != call_pkg->nd_family) + if (nfit_mem && nfit_mem->family != call_pkg->nd_family) return -ENOTTY; for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++) @@ -424,6 +428,10 @@ static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd, return call_pkg->nd_command; } + /* In the !call_pkg case, bus commands == bus functions */ + if (!nfit_mem) + return cmd; + /* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */ if (nfit_mem->family == NVDIMM_FAMILY_INTEL) return cmd; @@ -454,17 +462,18 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, if (cmd_rc) *cmd_rc = -EINVAL; + if (cmd == ND_CMD_CALL) + call_pkg = buf; + func = cmd_to_func(nfit_mem, cmd, call_pkg); + if (func < 0) + return func; + if (nvdimm) { struct acpi_device *adev = nfit_mem->adev; if (!adev) return -ENOTTY; - if (cmd == ND_CMD_CALL) - call_pkg = buf; - func = cmd_to_func(nfit_mem, cmd, call_pkg); - if (func < 0) - return func; dimm_name = nvdimm_name(nvdimm); cmd_name = nvdimm_cmd_name(cmd); cmd_mask = nvdimm_cmd_mask(nvdimm); @@ -475,12 +484,9 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, } else { struct acpi_device *adev = to_acpi_dev(acpi_desc); - func = cmd; cmd_name = nvdimm_bus_cmd_name(cmd); cmd_mask = nd_desc->cmd_mask; - dsm_mask = cmd_mask; - if (cmd == ND_CMD_CALL) - dsm_mask = nd_desc->bus_dsm_mask; + dsm_mask = nd_desc->bus_dsm_mask; desc = nd_cmd_bus_desc(cmd); guid = to_nfit_uuid(NFIT_DEV_BUS); handle = adev->handle; @@ -554,6 +560,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, return -EINVAL; } + if (out_obj->type != ACPI_TYPE_BUFFER) { + dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n", + dimm_name, cmd_name, out_obj->type); + rc = -EINVAL; + goto out; + } + if (call_pkg) { call_pkg->nd_fw_size = out_obj->buffer.length; memcpy(call_pkg->nd_payload + call_pkg->nd_size_in, @@ -572,13 +585,6 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, return 0; } - if (out_obj->package.type != ACPI_TYPE_BUFFER) { - dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n", - dimm_name, cmd_name, out_obj->type); - rc = -EINVAL; - goto out; - } - dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name, cmd_name, out_obj->buffer.length); print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4, @@ -1317,19 +1323,30 @@ static ssize_t scrub_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvdimm_bus_descriptor *nd_desc; + struct acpi_nfit_desc *acpi_desc; ssize_t rc = -ENXIO; + bool busy; device_lock(dev); nd_desc = dev_get_drvdata(dev); - if (nd_desc) { - struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + if (!nd_desc) { + device_unlock(dev); + return rc; + } + acpi_desc = to_acpi_desc(nd_desc); - mutex_lock(&acpi_desc->init_mutex); - rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, - acpi_desc->scrub_busy - && !acpi_desc->cancel ? "+\n" : "\n"); - mutex_unlock(&acpi_desc->init_mutex); + mutex_lock(&acpi_desc->init_mutex); + busy = test_bit(ARS_BUSY, &acpi_desc->scrub_flags) + && !test_bit(ARS_CANCEL, &acpi_desc->scrub_flags); + rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, busy ? "+\n" : "\n"); + /* Allow an admin to poll the busy state at a higher rate */ + if (busy && capable(CAP_SYS_RAWIO) && !test_and_set_bit(ARS_POLL, + &acpi_desc->scrub_flags)) { + acpi_desc->scrub_tmo = 1; + mod_delayed_work(nfit_wq, &acpi_desc->dwork, HZ); } + + mutex_unlock(&acpi_desc->init_mutex); device_unlock(dev); return rc; } @@ -1759,14 +1776,14 @@ static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method) __weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem) { + struct device *dev = &nfit_mem->adev->dev; struct nd_intel_smart smart = { 0 }; union acpi_object in_buf = { - .type = ACPI_TYPE_BUFFER, - .buffer.pointer = (char *) &smart, - .buffer.length = sizeof(smart), + .buffer.type = ACPI_TYPE_BUFFER, + .buffer.length = 0, }; union acpi_object in_obj = { - .type = ACPI_TYPE_PACKAGE, + .package.type = ACPI_TYPE_PACKAGE, .package.count = 1, .package.elements = &in_buf, }; @@ -1781,8 +1798,15 @@ __weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem) return; out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj); - if (!out_obj) + if (!out_obj || out_obj->type != ACPI_TYPE_BUFFER + || out_obj->buffer.length < sizeof(smart)) { + dev_dbg(dev->parent, "%s: failed to retrieve initial health\n", + dev_name(dev)); + ACPI_FREE(out_obj); return; + } + memcpy(&smart, out_obj->buffer.pointer, sizeof(smart)); + ACPI_FREE(out_obj); if (smart.flags & ND_INTEL_SMART_SHUTDOWN_VALID) { if (smart.shutdown_state) @@ -1793,7 +1817,6 @@ __weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem) set_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags); nfit_mem->dirty_shutdown = smart.shutdown_count; } - ACPI_FREE(out_obj); } static void populate_shutdown_status(struct nfit_mem *nfit_mem) @@ -1861,9 +1884,17 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, dev_set_drvdata(&adev_dimm->dev, nfit_mem); /* - * Until standardization materializes we need to consider 4 - * different command sets. Note, that checking for function0 (bit0) - * tells us if any commands are reachable through this GUID. + * There are 4 "legacy" NVDIMM command sets + * (NVDIMM_FAMILY_{INTEL,MSFT,HPE1,HPE2}) that were created before + * an EFI working group was established to constrain this + * proliferation. The nfit driver probes for the supported command + * set by GUID. Note, if you're a platform developer looking to add + * a new command set to this probe, consider using an existing set, + * or otherwise seek approval to publish the command set at + * http://www.uefi.org/RFIC_LIST. + * + * Note, that checking for function0 (bit0) tells us if any commands + * are reachable through this GUID. */ for (i = 0; i <= NVDIMM_FAMILY_MAX; i++) if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) @@ -1886,6 +1917,8 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, dsm_mask &= ~(1 << 8); } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) { dsm_mask = 0xffffffff; + } else if (nfit_mem->family == NVDIMM_FAMILY_HYPERV) { + dsm_mask = 0x1f; } else { dev_dbg(dev, "unknown dimm command family\n"); nfit_mem->family = -1; @@ -1915,18 +1948,32 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, | 1 << ND_CMD_SET_CONFIG_DATA; if (family == NVDIMM_FAMILY_INTEL && (dsm_mask & label_mask) == label_mask) - return 0; + /* skip _LS{I,R,W} enabling */; + else { + if (acpi_nvdimm_has_method(adev_dimm, "_LSI") + && acpi_nvdimm_has_method(adev_dimm, "_LSR")) { + dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev)); + set_bit(NFIT_MEM_LSR, &nfit_mem->flags); + } - if (acpi_nvdimm_has_method(adev_dimm, "_LSI") - && acpi_nvdimm_has_method(adev_dimm, "_LSR")) { - dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev)); - set_bit(NFIT_MEM_LSR, &nfit_mem->flags); - } + if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags) + && acpi_nvdimm_has_method(adev_dimm, "_LSW")) { + dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev)); + set_bit(NFIT_MEM_LSW, &nfit_mem->flags); + } - if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags) - && acpi_nvdimm_has_method(adev_dimm, "_LSW")) { - dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev)); - set_bit(NFIT_MEM_LSW, &nfit_mem->flags); + /* + * Quirk read-only label configurations to preserve + * access to label-less namespaces by default. + */ + if (!test_bit(NFIT_MEM_LSW, &nfit_mem->flags) + && !force_labels) { + dev_dbg(dev, "%s: No _LSW, disable labels\n", + dev_name(&adev_dimm->dev)); + clear_bit(NFIT_MEM_LSR, &nfit_mem->flags); + } else + dev_dbg(dev, "%s: Force enable labels\n", + dev_name(&adev_dimm->dev)); } populate_shutdown_status(nfit_mem); @@ -2027,6 +2074,10 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK; } + /* Quirk to ignore LOCAL for labels on HYPERV DIMMs */ + if (nfit_mem->family == NVDIMM_FAMILY_HYPERV) + set_bit(NDD_NOBLK, &flags); + if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) { set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask); set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask); @@ -2050,7 +2101,7 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0) continue; - dev_info(acpi_desc->dev, "%s flags:%s%s%s%s%s\n", + dev_err(acpi_desc->dev, "Error found in NVDIMM %s flags:%s%s%s%s%s\n", nvdimm_name(nvdimm), mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "", mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"", @@ -2641,7 +2692,10 @@ static int ars_start(struct acpi_nfit_desc *acpi_desc, if (rc < 0) return rc; - return cmd_rc; + if (cmd_rc < 0) + return cmd_rc; + set_bit(ARS_VALID, &acpi_desc->scrub_flags); + return 0; } static int ars_continue(struct acpi_nfit_desc *acpi_desc) @@ -2651,11 +2705,11 @@ static int ars_continue(struct acpi_nfit_desc *acpi_desc) struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; - memset(&ars_start, 0, sizeof(ars_start)); - ars_start.address = ars_status->restart_address; - ars_start.length = ars_status->restart_length; - ars_start.type = ars_status->type; - ars_start.flags = acpi_desc->ars_start_flags; + ars_start = (struct nd_cmd_ars_start) { + .address = ars_status->restart_address, + .length = ars_status->restart_length, + .type = ars_status->type, + }; rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start, sizeof(ars_start), &cmd_rc); if (rc < 0) @@ -2734,6 +2788,17 @@ static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc) */ if (ars_status->out_length < 44) return 0; + + /* + * Ignore potentially stale results that are only refreshed + * after a start-ARS event. + */ + if (!test_and_clear_bit(ARS_VALID, &acpi_desc->scrub_flags)) { + dev_dbg(acpi_desc->dev, "skip %d stale records\n", + ars_status->num_records); + return 0; + } + for (i = 0; i < ars_status->num_records; i++) { /* only process full records */ if (ars_status->out_length @@ -3004,14 +3069,16 @@ static int ars_register(struct acpi_nfit_desc *acpi_desc, { int rc; - if (no_init_ars || test_bit(ARS_FAILED, &nfit_spa->ars_state)) + if (test_bit(ARS_FAILED, &nfit_spa->ars_state)) return acpi_nfit_register_region(acpi_desc, nfit_spa); set_bit(ARS_REQ_SHORT, &nfit_spa->ars_state); - set_bit(ARS_REQ_LONG, &nfit_spa->ars_state); + if (!no_init_ars) + set_bit(ARS_REQ_LONG, &nfit_spa->ars_state); switch (acpi_nfit_query_poison(acpi_desc)) { case 0: + case -ENOSPC: case -EAGAIN: rc = ars_start(acpi_desc, nfit_spa, ARS_REQ_SHORT); /* shouldn't happen, try again later */ @@ -3036,7 +3103,6 @@ static int ars_register(struct acpi_nfit_desc *acpi_desc, break; case -EBUSY: case -ENOMEM: - case -ENOSPC: /* * BIOS was using ARS, wait for it to complete (or * resources to become available) and then perform our @@ -3071,7 +3137,7 @@ static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc, lockdep_assert_held(&acpi_desc->init_mutex); - if (acpi_desc->cancel) + if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags)) return 0; if (query_rc == -EBUSY) { @@ -3145,7 +3211,7 @@ static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo) { lockdep_assert_held(&acpi_desc->init_mutex); - acpi_desc->scrub_busy = 1; + set_bit(ARS_BUSY, &acpi_desc->scrub_flags); /* note this should only be set from within the workqueue */ if (tmo) acpi_desc->scrub_tmo = tmo; @@ -3161,7 +3227,7 @@ static void notify_ars_done(struct acpi_nfit_desc *acpi_desc) { lockdep_assert_held(&acpi_desc->init_mutex); - acpi_desc->scrub_busy = 0; + clear_bit(ARS_BUSY, &acpi_desc->scrub_flags); acpi_desc->scrub_count++; if (acpi_desc->scrub_count_state) sysfs_notify_dirent(acpi_desc->scrub_count_state); @@ -3182,6 +3248,7 @@ static void acpi_nfit_scrub(struct work_struct *work) else notify_ars_done(acpi_desc); memset(acpi_desc->ars_status, 0, acpi_desc->max_ars); + clear_bit(ARS_POLL, &acpi_desc->scrub_flags); mutex_unlock(&acpi_desc->init_mutex); } @@ -3216,6 +3283,7 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) struct nfit_spa *nfit_spa; int rc; + set_bit(ARS_VALID, &acpi_desc->scrub_flags); list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { switch (nfit_spa_type(nfit_spa->spa)) { case NFIT_SPA_VOLATILE: @@ -3450,7 +3518,7 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa; mutex_lock(&acpi_desc->init_mutex); - if (acpi_desc->cancel) { + if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags)) { mutex_unlock(&acpi_desc->init_mutex); return 0; } @@ -3529,7 +3597,7 @@ void acpi_nfit_shutdown(void *data) mutex_unlock(&acpi_desc_lock); mutex_lock(&acpi_desc->init_mutex); - acpi_desc->cancel = 1; + set_bit(ARS_CANCEL, &acpi_desc->scrub_flags); cancel_delayed_work_sync(&acpi_desc->dwork); mutex_unlock(&acpi_desc->init_mutex); @@ -3729,6 +3797,7 @@ static __init int nfit_init(void) guid_parse(UUID_NFIT_DIMM_N_HPE1, &nfit_uuid[NFIT_DEV_DIMM_N_HPE1]); guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]); guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]); + guid_parse(UUID_NFIT_DIMM_N_HYPERV, &nfit_uuid[NFIT_DEV_DIMM_N_HYPERV]); nfit_wq = create_singlethread_workqueue("nfit"); if (!nfit_wq) diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h index 33691aecfcee..2f8cf2a11e3b 100644 --- a/drivers/acpi/nfit/nfit.h +++ b/drivers/acpi/nfit/nfit.h @@ -34,11 +34,14 @@ /* https://msdn.microsoft.com/library/windows/hardware/mt604741 */ #define UUID_NFIT_DIMM_N_MSFT "1ee68b36-d4bd-4a1a-9a16-4f8e53d46e05" +/* http://www.uefi.org/RFIC_LIST (see "Virtual NVDIMM 0x1901") */ +#define UUID_NFIT_DIMM_N_HYPERV "5746c5f2-a9a2-4264-ad0e-e4ddc9e09e80" + #define ACPI_NFIT_MEM_FAILED_MASK (ACPI_NFIT_MEM_SAVE_FAILED \ | ACPI_NFIT_MEM_RESTORE_FAILED | ACPI_NFIT_MEM_FLUSH_FAILED \ | ACPI_NFIT_MEM_NOT_ARMED | ACPI_NFIT_MEM_MAP_FAILED) -#define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_MSFT +#define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_HYPERV #define NVDIMM_STANDARD_CMDMASK \ (1 << ND_CMD_SMART | 1 << ND_CMD_SMART_THRESHOLD | 1 << ND_CMD_DIMM_FLAGS \ @@ -94,6 +97,7 @@ enum nfit_uuids { NFIT_DEV_DIMM_N_HPE1 = NVDIMM_FAMILY_HPE1, NFIT_DEV_DIMM_N_HPE2 = NVDIMM_FAMILY_HPE2, NFIT_DEV_DIMM_N_MSFT = NVDIMM_FAMILY_MSFT, + NFIT_DEV_DIMM_N_HYPERV = NVDIMM_FAMILY_HYPERV, NFIT_SPA_VOLATILE, NFIT_SPA_PM, NFIT_SPA_DCR, @@ -210,6 +214,13 @@ struct nfit_mem { int family; }; +enum scrub_flags { + ARS_BUSY, + ARS_CANCEL, + ARS_VALID, + ARS_POLL, +}; + struct acpi_nfit_desc { struct nvdimm_bus_descriptor nd_desc; struct acpi_table_header acpi_header; @@ -223,7 +234,6 @@ struct acpi_nfit_desc { struct list_head idts; struct nvdimm_bus *nvdimm_bus; struct device *dev; - u8 ars_start_flags; struct nd_cmd_ars_status *ars_status; struct nfit_spa *scrub_spa; struct delayed_work dwork; @@ -232,8 +242,7 @@ struct acpi_nfit_desc { unsigned int max_ars; unsigned int scrub_count; unsigned int scrub_mode; - unsigned int scrub_busy:1; - unsigned int cancel:1; + unsigned long scrub_flags; unsigned long dimm_cmd_force_en; unsigned long bus_cmd_force_en; unsigned long bus_nfit_cmd_force_en; diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c index d5eec352a6e1..df70b1eaef58 100644 --- a/drivers/acpi/pci_link.c +++ b/drivers/acpi/pci_link.c @@ -317,10 +317,10 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq) resource->res.data.irq.polarity = link->irq.polarity; if (link->irq.triggering == ACPI_EDGE_SENSITIVE) - resource->res.data.irq.sharable = + resource->res.data.irq.shareable = ACPI_EXCLUSIVE; else - resource->res.data.irq.sharable = ACPI_SHARED; + resource->res.data.irq.shareable = ACPI_SHARED; resource->res.data.irq.interrupt_count = 1; resource->res.data.irq.interrupts[0] = irq; break; @@ -335,10 +335,10 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq) resource->res.data.extended_irq.polarity = link->irq.polarity; if (link->irq.triggering == ACPI_EDGE_SENSITIVE) - resource->res.data.irq.sharable = + resource->res.data.irq.shareable = ACPI_EXCLUSIVE; else - resource->res.data.irq.sharable = ACPI_SHARED; + resource->res.data.irq.shareable = ACPI_SHARED; resource->res.data.extended_irq.interrupt_count = 1; resource->res.data.extended_irq.interrupts[0] = irq; /* ignore resource_source, it's optional */ diff --git a/drivers/acpi/pmic/intel_pmic.c b/drivers/acpi/pmic/intel_pmic.c index ca18e0d23df9..c14cfaea92e2 100644 --- a/drivers/acpi/pmic/intel_pmic.c +++ b/drivers/acpi/pmic/intel_pmic.c @@ -15,6 +15,7 @@ #include #include +#include #include #include #include "intel_pmic.h" @@ -36,6 +37,8 @@ struct intel_pmic_opregion { struct intel_pmic_regs_handler_ctx ctx; }; +static struct intel_pmic_opregion *intel_pmic_opregion; + static int pmic_get_reg_bit(int address, struct pmic_table *table, int count, int *reg, int *bit) { @@ -304,6 +307,7 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle, } opregion->data = d; + intel_pmic_opregion = opregion; return 0; out_remove_thermal_handler: @@ -319,3 +323,60 @@ out_error: return ret; } EXPORT_SYMBOL_GPL(intel_pmic_install_opregion_handler); + +/** + * intel_soc_pmic_exec_mipi_pmic_seq_element - Execute PMIC MIPI sequence + * @i2c_address: I2C client address for the PMIC + * @reg_address: PMIC register address + * @value: New value for the register bits to change + * @mask: Mask indicating which register bits to change + * + * DSI LCD panels describe an initialization sequence in the i915 VBT (Video + * BIOS Tables) using so called MIPI sequences. One possible element in these + * sequences is a PMIC specific element of 15 bytes. + * + * This function executes these PMIC specific elements sending the embedded + * commands to the PMIC. + * + * Return 0 on success, < 0 on failure. + */ +int intel_soc_pmic_exec_mipi_pmic_seq_element(u16 i2c_address, u32 reg_address, + u32 value, u32 mask) +{ + struct intel_pmic_opregion_data *d; + int ret; + + if (!intel_pmic_opregion) { + pr_warn("%s: No PMIC registered\n", __func__); + return -ENXIO; + } + + d = intel_pmic_opregion->data; + + mutex_lock(&intel_pmic_opregion->lock); + + if (d->exec_mipi_pmic_seq_element) { + ret = d->exec_mipi_pmic_seq_element(intel_pmic_opregion->regmap, + i2c_address, reg_address, + value, mask); + } else if (d->pmic_i2c_address) { + if (i2c_address == d->pmic_i2c_address) { + ret = regmap_update_bits(intel_pmic_opregion->regmap, + reg_address, mask, value); + } else { + pr_err("%s: Unexpected i2c-addr: 0x%02x (reg-addr 0x%x value 0x%x mask 0x%x)\n", + __func__, i2c_address, reg_address, value, mask); + ret = -ENXIO; + } + } else { + pr_warn("%s: Not implemented\n", __func__); + pr_warn("%s: i2c-addr: 0x%x reg-addr 0x%x value 0x%x mask 0x%x\n", + __func__, i2c_address, reg_address, value, mask); + ret = -EOPNOTSUPP; + } + + mutex_unlock(&intel_pmic_opregion->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(intel_soc_pmic_exec_mipi_pmic_seq_element); diff --git a/drivers/acpi/pmic/intel_pmic.h b/drivers/acpi/pmic/intel_pmic.h index 095afc96952e..89379476a1df 100644 --- a/drivers/acpi/pmic/intel_pmic.h +++ b/drivers/acpi/pmic/intel_pmic.h @@ -15,10 +15,14 @@ struct intel_pmic_opregion_data { int (*update_aux)(struct regmap *r, int reg, int raw_temp); int (*get_policy)(struct regmap *r, int reg, int bit, u64 *value); int (*update_policy)(struct regmap *r, int reg, int bit, int enable); + int (*exec_mipi_pmic_seq_element)(struct regmap *r, u16 i2c_address, + u32 reg_address, u32 value, u32 mask); struct pmic_table *power_table; int power_table_count; struct pmic_table *thermal_table; int thermal_table_count; + /* For generic exec_mipi_pmic_seq_element handling */ + int pmic_i2c_address; }; int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle, struct regmap *regmap, struct intel_pmic_opregion_data *d); diff --git a/drivers/acpi/pmic/intel_pmic_chtwc.c b/drivers/acpi/pmic/intel_pmic_chtwc.c index 078b0448f30a..7ffd5624b8e1 100644 --- a/drivers/acpi/pmic/intel_pmic_chtwc.c +++ b/drivers/acpi/pmic/intel_pmic_chtwc.c @@ -231,6 +231,24 @@ static int intel_cht_wc_pmic_update_power(struct regmap *regmap, int reg, return regmap_update_bits(regmap, reg, bitmask, on ? 1 : 0); } +static int intel_cht_wc_exec_mipi_pmic_seq_element(struct regmap *regmap, + u16 i2c_client_address, + u32 reg_address, + u32 value, u32 mask) +{ + u32 address; + + if (i2c_client_address > 0xff || reg_address > 0xff) { + pr_warn("%s warning addresses too big client 0x%x reg 0x%x\n", + __func__, i2c_client_address, reg_address); + return -ERANGE; + } + + address = (i2c_client_address << 8) | reg_address; + + return regmap_update_bits(regmap, address, mask, value); +} + /* * The thermal table and ops are empty, we do not support the Thermal opregion * (DPTF) due to lacking documentation. @@ -238,6 +256,7 @@ static int intel_cht_wc_pmic_update_power(struct regmap *regmap, int reg, static struct intel_pmic_opregion_data intel_cht_wc_pmic_opregion_data = { .get_power = intel_cht_wc_pmic_get_power, .update_power = intel_cht_wc_pmic_update_power, + .exec_mipi_pmic_seq_element = intel_cht_wc_exec_mipi_pmic_seq_element, .power_table = power_table, .power_table_count = ARRAY_SIZE(power_table), }; diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c index e7c0006e6602..a091d5a8392c 100644 --- a/drivers/acpi/pmic/intel_pmic_xpower.c +++ b/drivers/acpi/pmic/intel_pmic_xpower.c @@ -265,6 +265,7 @@ static struct intel_pmic_opregion_data intel_xpower_pmic_opregion_data = { .power_table_count = ARRAY_SIZE(power_table), .thermal_table = thermal_table, .thermal_table_count = ARRAY_SIZE(thermal_table), + .pmic_i2c_address = 0x34, }; static acpi_status intel_xpower_pmic_gpio_handler(u32 function, diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index da031b1df6f5..ad31c50de3be 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -451,6 +451,11 @@ static struct acpi_pptt_processor *acpi_find_processor_package_id(struct acpi_ta return cpu; } +static void acpi_pptt_warn_missing(void) +{ + pr_warn_once("No PPTT table found, cpu and cache topology may be inaccurate\n"); +} + /** * topology_get_acpi_cpu_tag() - Find a unique topology value for a feature * @table: Pointer to the head of the PPTT table @@ -498,7 +503,7 @@ static int find_acpi_cpu_topology_tag(unsigned int cpu, int level, int flag) status = acpi_get_table(ACPI_SIG_PPTT, 0, &table); if (ACPI_FAILURE(status)) { - pr_warn_once("No PPTT table found, cpu topology may be inaccurate\n"); + acpi_pptt_warn_missing(); return -ENOENT; } retval = topology_get_acpi_cpu_tag(table, cpu, level, flag); @@ -531,7 +536,7 @@ int acpi_find_last_cache_level(unsigned int cpu) acpi_cpu_id = get_acpi_id_for_cpu(cpu); status = acpi_get_table(ACPI_SIG_PPTT, 0, &table); if (ACPI_FAILURE(status)) { - pr_warn_once("No PPTT table found, cache topology may be inaccurate\n"); + acpi_pptt_warn_missing(); } else { number_of_levels = acpi_find_cache_levels(table, acpi_cpu_id); acpi_put_table(table); @@ -563,7 +568,7 @@ int cache_setup_acpi(unsigned int cpu) status = acpi_get_table(ACPI_SIG_PPTT, 0, &table); if (ACPI_FAILURE(status)) { - pr_warn_once("No PPTT table found, cache topology may be inaccurate\n"); + acpi_pptt_warn_missing(); return -ENOENT; } @@ -617,7 +622,7 @@ int find_acpi_cpu_cache_topology(unsigned int cpu, int level) status = acpi_get_table(ACPI_SIG_PPTT, 0, &table); if (ACPI_FAILURE(status)) { - pr_warn_once("No PPTT table found, topology may be inaccurate\n"); + acpi_pptt_warn_missing(); return -ENOENT; } diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index b2131c4ea124..98d4ec5bf450 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -282,6 +282,13 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) pr->power.states[ACPI_STATE_C2].address, pr->power.states[ACPI_STATE_C3].address)); + snprintf(pr->power.states[ACPI_STATE_C2].desc, + ACPI_CX_DESC_LEN, "ACPI P_LVL2 IOPORT 0x%x", + pr->power.states[ACPI_STATE_C2].address); + snprintf(pr->power.states[ACPI_STATE_C3].desc, + ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x", + pr->power.states[ACPI_STATE_C3].address); + return 0; } diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index 316a0fc785e3..d556f2144de8 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -476,7 +476,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, } acpi_dev_get_irqresource(res, irq->interrupts[index], irq->triggering, irq->polarity, - irq->sharable, true); + irq->shareable, true); break; case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: ext_irq = &ares->data.extended_irq; @@ -487,7 +487,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, if (is_gsi(ext_irq)) acpi_dev_get_irqresource(res, ext_irq->interrupts[index], ext_irq->triggering, ext_irq->polarity, - ext_irq->sharable, false); + ext_irq->shareable, false); else acpi_dev_irqresource_disabled(res, 0); break; diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 5efd4219f112..446c959a8f08 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1545,6 +1545,7 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device) */ static const struct acpi_device_id i2c_multi_instantiate_ids[] = { {"BSG1160", }, + {"BSG2150", }, {"INT33FE", }, {"INT3515", }, {} diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c index 48eabb6c2d4f..8fccbe49612a 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c @@ -473,14 +473,22 @@ static DECLARE_BITMAP(acpi_initrd_installed, NR_ACPI_INITRD_TABLES); void __init acpi_table_upgrade(void) { - void *data = (void *)initrd_start; - size_t size = initrd_end - initrd_start; + void *data; + size_t size; int sig, no, table_nr = 0, total_offset = 0; long offset = 0; struct acpi_table_header *table; char cpio_path[32] = "kernel/firmware/acpi/"; struct cpio_data file; + if (IS_ENABLED(CONFIG_ACPI_TABLE_OVERRIDE_VIA_BUILTIN_INITRD)) { + data = __initramfs_start; + size = __initramfs_size; + } else { + data = (void *)initrd_start; + size = initrd_end - initrd_start; + } + if (data == NULL || size == 0) return; diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c index 9a8e286dd86f..c6df14802741 100644 --- a/drivers/acpi/x86/utils.c +++ b/drivers/acpi/x86/utils.c @@ -56,6 +56,11 @@ static const struct always_present_id always_present_ids[] = { */ ENTRY("80860F09", "1", ICPU(INTEL_FAM6_ATOM_SILVERMONT), {}), ENTRY("80862288", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), {}), + + /* Lenovo Yoga Book uses PWM2 for keyboard backlight control */ + ENTRY("80862289", "2", ICPU(INTEL_FAM6_ATOM_AIRMONT), { + DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9"), + }), /* * The INT0002 device is necessary to clear wakeup interrupt sources * on Cherry Trail devices, without it we get nobody cared IRQ msgs. diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig index 4c190f8d1f4c..6fdf2abe4598 100644 --- a/drivers/android/Kconfig +++ b/drivers/android/Kconfig @@ -10,7 +10,7 @@ if ANDROID config ANDROID_BINDER_IPC bool "Android Binder IPC Driver" - depends on MMU && !CPU_CACHE_VIVT + depends on MMU default n ---help--- Binder is used in Android for both communication between processes, diff --git a/drivers/android/binder.c b/drivers/android/binder.c index cdfc87629efb..8685882da64c 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -329,6 +329,8 @@ struct binder_error { * (invariant after initialized) * @min_priority: minimum scheduling priority * (invariant after initialized) + * @txn_security_ctx: require sender's security context + * (invariant after initialized) * @async_todo: list of async work items * (protected by @proc->inner_lock) * @@ -365,6 +367,7 @@ struct binder_node { * invariant after initialization */ u8 accept_fds:1; + u8 txn_security_ctx:1; u8 min_priority; }; bool has_async_transaction; @@ -615,6 +618,7 @@ struct binder_transaction { long saved_priority; kuid_t sender_euid; struct list_head fd_fixups; + binder_uintptr_t security_ctx; /** * @lock: protects @from, @to_proc, and @to_thread * @@ -624,6 +628,26 @@ struct binder_transaction { spinlock_t lock; }; +/** + * struct binder_object - union of flat binder object types + * @hdr: generic object header + * @fbo: binder object (nodes and refs) + * @fdo: file descriptor object + * @bbo: binder buffer pointer + * @fdao: file descriptor array + * + * Used for type-independent object copies + */ +struct binder_object { + union { + struct binder_object_header hdr; + struct flat_binder_object fbo; + struct binder_fd_object fdo; + struct binder_buffer_object bbo; + struct binder_fd_array_object fdao; + }; +}; + /** * binder_proc_lock() - Acquire outer lock for given binder_proc * @proc: struct binder_proc to acquire @@ -1152,6 +1176,7 @@ static struct binder_node *binder_init_node_ilocked( node->work.type = BINDER_WORK_NODE; node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK; node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); + node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX); spin_lock_init(&node->lock); INIT_LIST_HEAD(&node->work.entry); INIT_LIST_HEAD(&node->async_todo); @@ -2012,26 +2037,33 @@ static void binder_cleanup_transaction(struct binder_transaction *t, } /** - * binder_validate_object() - checks for a valid metadata object in a buffer. + * binder_get_object() - gets object and checks for valid metadata + * @proc: binder_proc owning the buffer * @buffer: binder_buffer that we're parsing. - * @offset: offset in the buffer at which to validate an object. + * @offset: offset in the @buffer at which to validate an object. + * @object: struct binder_object to read into * * Return: If there's a valid metadata object at @offset in @buffer, the - * size of that object. Otherwise, it returns zero. + * size of that object. Otherwise, it returns zero. The object + * is read into the struct binder_object pointed to by @object. */ -static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset) +static size_t binder_get_object(struct binder_proc *proc, + struct binder_buffer *buffer, + unsigned long offset, + struct binder_object *object) { - /* Check if we can read a header first */ + size_t read_size; struct binder_object_header *hdr; size_t object_size = 0; - if (buffer->data_size < sizeof(*hdr) || - offset > buffer->data_size - sizeof(*hdr) || - !IS_ALIGNED(offset, sizeof(u32))) + read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset); + if (read_size < sizeof(*hdr) || !IS_ALIGNED(offset, sizeof(u32))) return 0; + binder_alloc_copy_from_buffer(&proc->alloc, object, buffer, + offset, read_size); - /* Ok, now see if we can read a complete object. */ - hdr = (struct binder_object_header *)(buffer->data + offset); + /* Ok, now see if we read a complete object. */ + hdr = &object->hdr; switch (hdr->type) { case BINDER_TYPE_BINDER: case BINDER_TYPE_WEAK_BINDER: @@ -2060,10 +2092,13 @@ static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset) /** * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer. + * @proc: binder_proc owning the buffer * @b: binder_buffer containing the object + * @object: struct binder_object to read into * @index: index in offset array at which the binder_buffer_object is * located - * @start: points to the start of the offset array + * @start_offset: points to the start of the offset array + * @object_offsetp: offset of @object read from @b * @num_valid: the number of valid offsets in the offset array * * Return: If @index is within the valid range of the offset array @@ -2074,34 +2109,46 @@ static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset) * Note that the offset found in index @index itself is not * verified; this function assumes that @num_valid elements * from @start were previously verified to have valid offsets. + * If @object_offsetp is non-NULL, then the offset within + * @b is written to it. */ -static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b, - binder_size_t index, - binder_size_t *start, - binder_size_t num_valid) +static struct binder_buffer_object *binder_validate_ptr( + struct binder_proc *proc, + struct binder_buffer *b, + struct binder_object *object, + binder_size_t index, + binder_size_t start_offset, + binder_size_t *object_offsetp, + binder_size_t num_valid) { - struct binder_buffer_object *buffer_obj; - binder_size_t *offp; + size_t object_size; + binder_size_t object_offset; + unsigned long buffer_offset; if (index >= num_valid) return NULL; - offp = start + index; - buffer_obj = (struct binder_buffer_object *)(b->data + *offp); - if (buffer_obj->hdr.type != BINDER_TYPE_PTR) + buffer_offset = start_offset + sizeof(binder_size_t) * index; + binder_alloc_copy_from_buffer(&proc->alloc, &object_offset, + b, buffer_offset, sizeof(object_offset)); + object_size = binder_get_object(proc, b, object_offset, object); + if (!object_size || object->hdr.type != BINDER_TYPE_PTR) return NULL; + if (object_offsetp) + *object_offsetp = object_offset; - return buffer_obj; + return &object->bbo; } /** * binder_validate_fixup() - validates pointer/fd fixups happen in order. + * @proc: binder_proc owning the buffer * @b: transaction buffer - * @objects_start start of objects buffer - * @buffer: binder_buffer_object in which to fix up - * @offset: start offset in @buffer to fix up - * @last_obj: last binder_buffer_object that we fixed up in - * @last_min_offset: minimum fixup offset in @last_obj + * @objects_start_offset: offset to start of objects buffer + * @buffer_obj_offset: offset to binder_buffer_object in which to fix up + * @fixup_offset: start offset in @buffer to fix up + * @last_obj_offset: offset to last binder_buffer_object that we fixed + * @last_min_offset: minimum fixup offset in object at @last_obj_offset * * Return: %true if a fixup in buffer @buffer at offset @offset is * allowed. @@ -2132,28 +2179,41 @@ static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b, * C (parent = A, offset = 16) * D (parent = B, offset = 0) // B is not A or any of A's parents */ -static bool binder_validate_fixup(struct binder_buffer *b, - binder_size_t *objects_start, - struct binder_buffer_object *buffer, +static bool binder_validate_fixup(struct binder_proc *proc, + struct binder_buffer *b, + binder_size_t objects_start_offset, + binder_size_t buffer_obj_offset, binder_size_t fixup_offset, - struct binder_buffer_object *last_obj, + binder_size_t last_obj_offset, binder_size_t last_min_offset) { - if (!last_obj) { + if (!last_obj_offset) { /* Nothing to fix up in */ return false; } - while (last_obj != buffer) { + while (last_obj_offset != buffer_obj_offset) { + unsigned long buffer_offset; + struct binder_object last_object; + struct binder_buffer_object *last_bbo; + size_t object_size = binder_get_object(proc, b, last_obj_offset, + &last_object); + if (object_size != sizeof(*last_bbo)) + return false; + + last_bbo = &last_object.bbo; /* * Safe to retrieve the parent of last_obj, since it * was already previously verified by the driver. */ - if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0) + if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0) return false; - last_min_offset = last_obj->parent_offset + sizeof(uintptr_t); - last_obj = (struct binder_buffer_object *) - (b->data + *(objects_start + last_obj->parent)); + last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t); + buffer_offset = objects_start_offset + + sizeof(binder_size_t) * last_bbo->parent, + binder_alloc_copy_from_buffer(&proc->alloc, &last_obj_offset, + b, buffer_offset, + sizeof(last_obj_offset)); } return (fixup_offset >= last_min_offset); } @@ -2218,35 +2278,42 @@ static void binder_deferred_fd_close(int fd) static void binder_transaction_buffer_release(struct binder_proc *proc, struct binder_buffer *buffer, - binder_size_t *failed_at) + binder_size_t failed_at, + bool is_failure) { - binder_size_t *offp, *off_start, *off_end; int debug_id = buffer->debug_id; + binder_size_t off_start_offset, buffer_offset, off_end_offset; binder_debug(BINDER_DEBUG_TRANSACTION, - "%d buffer release %d, size %zd-%zd, failed at %pK\n", + "%d buffer release %d, size %zd-%zd, failed at %llx\n", proc->pid, buffer->debug_id, - buffer->data_size, buffer->offsets_size, failed_at); + buffer->data_size, buffer->offsets_size, + (unsigned long long)failed_at); if (buffer->target_node) binder_dec_node(buffer->target_node, 1, 0); - off_start = (binder_size_t *)(buffer->data + - ALIGN(buffer->data_size, sizeof(void *))); - if (failed_at) - off_end = failed_at; - else - off_end = (void *)off_start + buffer->offsets_size; - for (offp = off_start; offp < off_end; offp++) { + off_start_offset = ALIGN(buffer->data_size, sizeof(void *)); + off_end_offset = is_failure ? failed_at : + off_start_offset + buffer->offsets_size; + for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; + buffer_offset += sizeof(binder_size_t)) { struct binder_object_header *hdr; - size_t object_size = binder_validate_object(buffer, *offp); - + size_t object_size; + struct binder_object object; + binder_size_t object_offset; + + binder_alloc_copy_from_buffer(&proc->alloc, &object_offset, + buffer, buffer_offset, + sizeof(object_offset)); + object_size = binder_get_object(proc, buffer, + object_offset, &object); if (object_size == 0) { pr_err("transaction release %d bad object at offset %lld, size %zd\n", - debug_id, (u64)*offp, buffer->data_size); + debug_id, (u64)object_offset, buffer->data_size); continue; } - hdr = (struct binder_object_header *)(buffer->data + *offp); + hdr = &object.hdr; switch (hdr->type) { case BINDER_TYPE_BINDER: case BINDER_TYPE_WEAK_BINDER: { @@ -2309,10 +2376,11 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, case BINDER_TYPE_FDA: { struct binder_fd_array_object *fda; struct binder_buffer_object *parent; - uintptr_t parent_buffer; - u32 *fd_array; + struct binder_object ptr_object; + binder_size_t fda_offset; size_t fd_index; binder_size_t fd_buf_size; + binder_size_t num_valid; if (proc->tsk != current->group_leader) { /* @@ -2323,23 +2391,19 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, continue; } + num_valid = (buffer_offset - off_start_offset) / + sizeof(binder_size_t); fda = to_binder_fd_array_object(hdr); - parent = binder_validate_ptr(buffer, fda->parent, - off_start, - offp - off_start); + parent = binder_validate_ptr(proc, buffer, &ptr_object, + fda->parent, + off_start_offset, + NULL, + num_valid); if (!parent) { pr_err("transaction release %d bad parent offset\n", debug_id); continue; } - /* - * Since the parent was already fixed up, convert it - * back to kernel address space to access it - */ - parent_buffer = parent->buffer - - binder_alloc_get_user_buffer_offset( - &proc->alloc); - fd_buf_size = sizeof(u32) * fda->num_fds; if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { pr_err("transaction release %d invalid number of fds (%lld)\n", @@ -2353,9 +2417,29 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, debug_id, (u64)fda->num_fds); continue; } - fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset); - for (fd_index = 0; fd_index < fda->num_fds; fd_index++) - binder_deferred_fd_close(fd_array[fd_index]); + /* + * the source data for binder_buffer_object is visible + * to user-space and the @buffer element is the user + * pointer to the buffer_object containing the fd_array. + * Convert the address to an offset relative to + * the base of the transaction buffer. + */ + fda_offset = + (parent->buffer - (uintptr_t)buffer->user_data) + + fda->parent_offset; + for (fd_index = 0; fd_index < fda->num_fds; + fd_index++) { + u32 fd; + binder_size_t offset = fda_offset + + fd_index * sizeof(fd); + + binder_alloc_copy_from_buffer(&proc->alloc, + &fd, + buffer, + offset, + sizeof(fd)); + binder_deferred_fd_close(fd); + } } break; default: pr_err("transaction release %d bad object type %x\n", @@ -2491,7 +2575,7 @@ done: return ret; } -static int binder_translate_fd(u32 *fdp, +static int binder_translate_fd(u32 fd, binder_size_t fd_offset, struct binder_transaction *t, struct binder_thread *thread, struct binder_transaction *in_reply_to) @@ -2502,7 +2586,6 @@ static int binder_translate_fd(u32 *fdp, struct file *file; int ret = 0; bool target_allows_fd; - int fd = *fdp; if (in_reply_to) target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS); @@ -2541,7 +2624,7 @@ static int binder_translate_fd(u32 *fdp, goto err_alloc; } fixup->file = file; - fixup->offset = (uintptr_t)fdp - (uintptr_t)t->buffer->data; + fixup->offset = fd_offset; trace_binder_transaction_fd_send(t, fd, fixup->offset); list_add_tail(&fixup->fixup_entry, &t->fd_fixups); @@ -2562,8 +2645,7 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda, struct binder_transaction *in_reply_to) { binder_size_t fdi, fd_buf_size; - uintptr_t parent_buffer; - u32 *fd_array; + binder_size_t fda_offset; struct binder_proc *proc = thread->proc; struct binder_proc *target_proc = t->to_proc; @@ -2581,20 +2663,29 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda, return -EINVAL; } /* - * Since the parent was already fixed up, convert it - * back to the kernel address space to access it + * the source data for binder_buffer_object is visible + * to user-space and the @buffer element is the user + * pointer to the buffer_object containing the fd_array. + * Convert the address to an offset relative to + * the base of the transaction buffer. */ - parent_buffer = parent->buffer - - binder_alloc_get_user_buffer_offset(&target_proc->alloc); - fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset); - if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) { + fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) + + fda->parent_offset; + if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) { binder_user_error("%d:%d parent offset not aligned correctly.\n", proc->pid, thread->pid); return -EINVAL; } for (fdi = 0; fdi < fda->num_fds; fdi++) { - int ret = binder_translate_fd(&fd_array[fdi], t, thread, - in_reply_to); + u32 fd; + int ret; + binder_size_t offset = fda_offset + fdi * sizeof(fd); + + binder_alloc_copy_from_buffer(&target_proc->alloc, + &fd, t->buffer, + offset, sizeof(fd)); + ret = binder_translate_fd(fd, offset, t, thread, + in_reply_to); if (ret < 0) return ret; } @@ -2604,30 +2695,34 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda, static int binder_fixup_parent(struct binder_transaction *t, struct binder_thread *thread, struct binder_buffer_object *bp, - binder_size_t *off_start, + binder_size_t off_start_offset, binder_size_t num_valid, - struct binder_buffer_object *last_fixup_obj, + binder_size_t last_fixup_obj_off, binder_size_t last_fixup_min_off) { struct binder_buffer_object *parent; - u8 *parent_buffer; struct binder_buffer *b = t->buffer; struct binder_proc *proc = thread->proc; struct binder_proc *target_proc = t->to_proc; + struct binder_object object; + binder_size_t buffer_offset; + binder_size_t parent_offset; if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT)) return 0; - parent = binder_validate_ptr(b, bp->parent, off_start, num_valid); + parent = binder_validate_ptr(target_proc, b, &object, bp->parent, + off_start_offset, &parent_offset, + num_valid); if (!parent) { binder_user_error("%d:%d got transaction with invalid parent offset or type\n", proc->pid, thread->pid); return -EINVAL; } - if (!binder_validate_fixup(b, off_start, - parent, bp->parent_offset, - last_fixup_obj, + if (!binder_validate_fixup(target_proc, b, off_start_offset, + parent_offset, bp->parent_offset, + last_fixup_obj_off, last_fixup_min_off)) { binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", proc->pid, thread->pid); @@ -2641,10 +2736,10 @@ static int binder_fixup_parent(struct binder_transaction *t, proc->pid, thread->pid); return -EINVAL; } - parent_buffer = (u8 *)((uintptr_t)parent->buffer - - binder_alloc_get_user_buffer_offset( - &target_proc->alloc)); - *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer; + buffer_offset = bp->parent_offset + + (uintptr_t)parent->buffer - (uintptr_t)b->user_data; + binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset, + &bp->buffer, sizeof(bp->buffer)); return 0; } @@ -2763,9 +2858,10 @@ static void binder_transaction(struct binder_proc *proc, struct binder_transaction *t; struct binder_work *w; struct binder_work *tcomplete; - binder_size_t *offp, *off_end, *off_start; + binder_size_t buffer_offset = 0; + binder_size_t off_start_offset, off_end_offset; binder_size_t off_min; - u8 *sg_bufp, *sg_buf_end; + binder_size_t sg_buf_offset, sg_buf_end_offset; struct binder_proc *target_proc = NULL; struct binder_thread *target_thread = NULL; struct binder_node *target_node = NULL; @@ -2774,10 +2870,12 @@ static void binder_transaction(struct binder_proc *proc, uint32_t return_error = 0; uint32_t return_error_param = 0; uint32_t return_error_line = 0; - struct binder_buffer_object *last_fixup_obj = NULL; + binder_size_t last_fixup_obj_off = 0; binder_size_t last_fixup_min_off = 0; struct binder_context *context = proc->context; int t_debug_id = atomic_inc_return(&binder_last_id); + char *secctx = NULL; + u32 secctx_sz = 0; e = binder_transaction_log_add(&binder_transaction_log); e->debug_id = t_debug_id; @@ -3020,6 +3118,20 @@ static void binder_transaction(struct binder_proc *proc, t->flags = tr->flags; t->priority = task_nice(current); + if (target_node && target_node->txn_security_ctx) { + u32 secid; + + security_task_getsecid(proc->tsk, &secid); + ret = security_secid_to_secctx(secid, &secctx, &secctx_sz); + if (ret) { + return_error = BR_FAILED_REPLY; + return_error_param = ret; + return_error_line = __LINE__; + goto err_get_secctx_failed; + } + extra_buffers_size += ALIGN(secctx_sz, sizeof(u64)); + } + trace_binder_transaction(reply, t, target_node); t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, @@ -3036,16 +3148,30 @@ static void binder_transaction(struct binder_proc *proc, t->buffer = NULL; goto err_binder_alloc_buf_failed; } + if (secctx) { + size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) + + ALIGN(tr->offsets_size, sizeof(void *)) + + ALIGN(extra_buffers_size, sizeof(void *)) - + ALIGN(secctx_sz, sizeof(u64)); + + t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset; + binder_alloc_copy_to_buffer(&target_proc->alloc, + t->buffer, buf_offset, + secctx, secctx_sz); + security_release_secctx(secctx, secctx_sz); + secctx = NULL; + } t->buffer->debug_id = t->debug_id; t->buffer->transaction = t; t->buffer->target_node = target_node; trace_binder_transaction_alloc_buf(t->buffer); - off_start = (binder_size_t *)(t->buffer->data + - ALIGN(tr->data_size, sizeof(void *))); - offp = off_start; - if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t) - tr->data.ptr.buffer, tr->data_size)) { + if (binder_alloc_copy_user_to_buffer( + &target_proc->alloc, + t->buffer, 0, + (const void __user *) + (uintptr_t)tr->data.ptr.buffer, + tr->data_size)) { binder_user_error("%d:%d got transaction with invalid data ptr\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; @@ -3053,8 +3179,13 @@ static void binder_transaction(struct binder_proc *proc, return_error_line = __LINE__; goto err_copy_data_failed; } - if (copy_from_user(offp, (const void __user *)(uintptr_t) - tr->data.ptr.offsets, tr->offsets_size)) { + if (binder_alloc_copy_user_to_buffer( + &target_proc->alloc, + t->buffer, + ALIGN(tr->data_size, sizeof(void *)), + (const void __user *) + (uintptr_t)tr->data.ptr.offsets, + tr->offsets_size)) { binder_user_error("%d:%d got transaction with invalid offsets ptr\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; @@ -3079,17 +3210,30 @@ static void binder_transaction(struct binder_proc *proc, return_error_line = __LINE__; goto err_bad_offset; } - off_end = (void *)off_start + tr->offsets_size; - sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *))); - sg_buf_end = sg_bufp + extra_buffers_size; + off_start_offset = ALIGN(tr->data_size, sizeof(void *)); + buffer_offset = off_start_offset; + off_end_offset = off_start_offset + tr->offsets_size; + sg_buf_offset = ALIGN(off_end_offset, sizeof(void *)); + sg_buf_end_offset = sg_buf_offset + extra_buffers_size; off_min = 0; - for (; offp < off_end; offp++) { + for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; + buffer_offset += sizeof(binder_size_t)) { struct binder_object_header *hdr; - size_t object_size = binder_validate_object(t->buffer, *offp); - - if (object_size == 0 || *offp < off_min) { + size_t object_size; + struct binder_object object; + binder_size_t object_offset; + + binder_alloc_copy_from_buffer(&target_proc->alloc, + &object_offset, + t->buffer, + buffer_offset, + sizeof(object_offset)); + object_size = binder_get_object(target_proc, t->buffer, + object_offset, &object); + if (object_size == 0 || object_offset < off_min) { binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n", - proc->pid, thread->pid, (u64)*offp, + proc->pid, thread->pid, + (u64)object_offset, (u64)off_min, (u64)t->buffer->data_size); return_error = BR_FAILED_REPLY; @@ -3098,8 +3242,8 @@ static void binder_transaction(struct binder_proc *proc, goto err_bad_offset; } - hdr = (struct binder_object_header *)(t->buffer->data + *offp); - off_min = *offp + object_size; + hdr = &object.hdr; + off_min = object_offset + object_size; switch (hdr->type) { case BINDER_TYPE_BINDER: case BINDER_TYPE_WEAK_BINDER: { @@ -3113,6 +3257,9 @@ static void binder_transaction(struct binder_proc *proc, return_error_line = __LINE__; goto err_translate_failed; } + binder_alloc_copy_to_buffer(&target_proc->alloc, + t->buffer, object_offset, + fp, sizeof(*fp)); } break; case BINDER_TYPE_HANDLE: case BINDER_TYPE_WEAK_HANDLE: { @@ -3126,12 +3273,17 @@ static void binder_transaction(struct binder_proc *proc, return_error_line = __LINE__; goto err_translate_failed; } + binder_alloc_copy_to_buffer(&target_proc->alloc, + t->buffer, object_offset, + fp, sizeof(*fp)); } break; case BINDER_TYPE_FD: { struct binder_fd_object *fp = to_binder_fd_object(hdr); - int ret = binder_translate_fd(&fp->fd, t, thread, - in_reply_to); + binder_size_t fd_offset = object_offset + + (uintptr_t)&fp->fd - (uintptr_t)fp; + int ret = binder_translate_fd(fp->fd, fd_offset, t, + thread, in_reply_to); if (ret < 0) { return_error = BR_FAILED_REPLY; @@ -3140,14 +3292,23 @@ static void binder_transaction(struct binder_proc *proc, goto err_translate_failed; } fp->pad_binder = 0; + binder_alloc_copy_to_buffer(&target_proc->alloc, + t->buffer, object_offset, + fp, sizeof(*fp)); } break; case BINDER_TYPE_FDA: { + struct binder_object ptr_object; + binder_size_t parent_offset; struct binder_fd_array_object *fda = to_binder_fd_array_object(hdr); + size_t num_valid = (buffer_offset - off_start_offset) * + sizeof(binder_size_t); struct binder_buffer_object *parent = - binder_validate_ptr(t->buffer, fda->parent, - off_start, - offp - off_start); + binder_validate_ptr(target_proc, t->buffer, + &ptr_object, fda->parent, + off_start_offset, + &parent_offset, + num_valid); if (!parent) { binder_user_error("%d:%d got transaction with invalid parent offset or type\n", proc->pid, thread->pid); @@ -3156,9 +3317,11 @@ static void binder_transaction(struct binder_proc *proc, return_error_line = __LINE__; goto err_bad_parent; } - if (!binder_validate_fixup(t->buffer, off_start, - parent, fda->parent_offset, - last_fixup_obj, + if (!binder_validate_fixup(target_proc, t->buffer, + off_start_offset, + parent_offset, + fda->parent_offset, + last_fixup_obj_off, last_fixup_min_off)) { binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", proc->pid, thread->pid); @@ -3175,14 +3338,15 @@ static void binder_transaction(struct binder_proc *proc, return_error_line = __LINE__; goto err_translate_failed; } - last_fixup_obj = parent; + last_fixup_obj_off = parent_offset; last_fixup_min_off = fda->parent_offset + sizeof(u32) * fda->num_fds; } break; case BINDER_TYPE_PTR: { struct binder_buffer_object *bp = to_binder_buffer_object(hdr); - size_t buf_left = sg_buf_end - sg_bufp; + size_t buf_left = sg_buf_end_offset - sg_buf_offset; + size_t num_valid; if (bp->length > buf_left) { binder_user_error("%d:%d got transaction with too large buffer\n", @@ -3192,9 +3356,13 @@ static void binder_transaction(struct binder_proc *proc, return_error_line = __LINE__; goto err_bad_offset; } - if (copy_from_user(sg_bufp, - (const void __user *)(uintptr_t) - bp->buffer, bp->length)) { + if (binder_alloc_copy_user_to_buffer( + &target_proc->alloc, + t->buffer, + sg_buf_offset, + (const void __user *) + (uintptr_t)bp->buffer, + bp->length)) { binder_user_error("%d:%d got transaction with invalid offsets ptr\n", proc->pid, thread->pid); return_error_param = -EFAULT; @@ -3203,14 +3371,16 @@ static void binder_transaction(struct binder_proc *proc, goto err_copy_data_failed; } /* Fixup buffer pointer to target proc address space */ - bp->buffer = (uintptr_t)sg_bufp + - binder_alloc_get_user_buffer_offset( - &target_proc->alloc); - sg_bufp += ALIGN(bp->length, sizeof(u64)); - - ret = binder_fixup_parent(t, thread, bp, off_start, - offp - off_start, - last_fixup_obj, + bp->buffer = (uintptr_t) + t->buffer->user_data + sg_buf_offset; + sg_buf_offset += ALIGN(bp->length, sizeof(u64)); + + num_valid = (buffer_offset - off_start_offset) * + sizeof(binder_size_t); + ret = binder_fixup_parent(t, thread, bp, + off_start_offset, + num_valid, + last_fixup_obj_off, last_fixup_min_off); if (ret < 0) { return_error = BR_FAILED_REPLY; @@ -3218,7 +3388,10 @@ static void binder_transaction(struct binder_proc *proc, return_error_line = __LINE__; goto err_translate_failed; } - last_fixup_obj = bp; + binder_alloc_copy_to_buffer(&target_proc->alloc, + t->buffer, object_offset, + bp, sizeof(*bp)); + last_fixup_obj_off = object_offset; last_fixup_min_off = 0; } break; default: @@ -3298,13 +3471,17 @@ err_bad_parent: err_copy_data_failed: binder_free_txn_fixups(t); trace_binder_transaction_failed_buffer_release(t->buffer); - binder_transaction_buffer_release(target_proc, t->buffer, offp); + binder_transaction_buffer_release(target_proc, t->buffer, + buffer_offset, true); if (target_node) binder_dec_node_tmpref(target_node); target_node = NULL; t->buffer->transaction = NULL; binder_alloc_free_buf(&target_proc->alloc, t->buffer); err_binder_alloc_buf_failed: + if (secctx) + security_release_secctx(secctx, secctx_sz); +err_get_secctx_failed: kfree(tcomplete); binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); err_alloc_tcomplete_failed: @@ -3396,7 +3573,7 @@ binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer) binder_node_inner_unlock(buf_node); } trace_binder_transaction_buffer_release(buffer); - binder_transaction_buffer_release(proc, buffer, NULL); + binder_transaction_buffer_release(proc, buffer, 0, false); binder_alloc_free_buf(&proc->alloc, buffer); } @@ -3915,6 +4092,7 @@ static int binder_wait_for_work(struct binder_thread *thread, /** * binder_apply_fd_fixups() - finish fd translation + * @proc: binder_proc associated @t->buffer * @t: binder transaction with list of fd fixups * * Now that we are in the context of the transaction target @@ -3926,14 +4104,14 @@ static int binder_wait_for_work(struct binder_thread *thread, * fput'ing files that have not been processed and ksys_close'ing * any fds that have already been allocated. */ -static int binder_apply_fd_fixups(struct binder_transaction *t) +static int binder_apply_fd_fixups(struct binder_proc *proc, + struct binder_transaction *t) { struct binder_txn_fd_fixup *fixup, *tmp; int ret = 0; list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) { int fd = get_unused_fd_flags(O_CLOEXEC); - u32 *fdp; if (fd < 0) { binder_debug(BINDER_DEBUG_TRANSACTION, @@ -3948,33 +4126,20 @@ static int binder_apply_fd_fixups(struct binder_transaction *t) trace_binder_transaction_fd_recv(t, fd, fixup->offset); fd_install(fd, fixup->file); fixup->file = NULL; - fdp = (u32 *)(t->buffer->data + fixup->offset); - /* - * This store can cause problems for CPUs with a - * VIVT cache (eg ARMv5) since the cache cannot - * detect virtual aliases to the same physical cacheline. - * To support VIVT, this address and the user-space VA - * would both need to be flushed. Since this kernel - * VA is not constructed via page_to_virt(), we can't - * use flush_dcache_page() on it, so we'd have to use - * an internal function. If devices with VIVT ever - * need to run Android, we'll either need to go back - * to patching the translated fd from the sender side - * (using the non-standard kernel functions), or rework - * how the kernel uses the buffer to use page_to_virt() - * addresses instead of allocating in our own vm area. - * - * For now, we disable compilation if CONFIG_CPU_CACHE_VIVT. - */ - *fdp = fd; + binder_alloc_copy_to_buffer(&proc->alloc, t->buffer, + fixup->offset, &fd, + sizeof(u32)); } list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { if (fixup->file) { fput(fixup->file); } else if (ret) { - u32 *fdp = (u32 *)(t->buffer->data + fixup->offset); + u32 fd; - binder_deferred_fd_close(*fdp); + binder_alloc_copy_from_buffer(&proc->alloc, &fd, + t->buffer, fixup->offset, + sizeof(fd)); + binder_deferred_fd_close(fd); } list_del(&fixup->fixup_entry); kfree(fixup); @@ -4036,11 +4201,13 @@ retry: while (1) { uint32_t cmd; - struct binder_transaction_data tr; + struct binder_transaction_data_secctx tr; + struct binder_transaction_data *trd = &tr.transaction_data; struct binder_work *w = NULL; struct list_head *list = NULL; struct binder_transaction *t = NULL; struct binder_thread *t_from; + size_t trsize = sizeof(*trd); binder_inner_proc_lock(proc); if (!binder_worklist_empty_ilocked(&thread->todo)) @@ -4240,8 +4407,8 @@ retry: if (t->buffer->target_node) { struct binder_node *target_node = t->buffer->target_node; - tr.target.ptr = target_node->ptr; - tr.cookie = target_node->cookie; + trd->target.ptr = target_node->ptr; + trd->cookie = target_node->cookie; t->saved_priority = task_nice(current); if (t->priority < target_node->min_priority && !(t->flags & TF_ONE_WAY)) @@ -4251,25 +4418,26 @@ retry: binder_set_nice(target_node->min_priority); cmd = BR_TRANSACTION; } else { - tr.target.ptr = 0; - tr.cookie = 0; + trd->target.ptr = 0; + trd->cookie = 0; cmd = BR_REPLY; } - tr.code = t->code; - tr.flags = t->flags; - tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid); + trd->code = t->code; + trd->flags = t->flags; + trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid); t_from = binder_get_txn_from(t); if (t_from) { struct task_struct *sender = t_from->proc->tsk; - tr.sender_pid = task_tgid_nr_ns(sender, - task_active_pid_ns(current)); + trd->sender_pid = + task_tgid_nr_ns(sender, + task_active_pid_ns(current)); } else { - tr.sender_pid = 0; + trd->sender_pid = 0; } - ret = binder_apply_fd_fixups(t); + ret = binder_apply_fd_fixups(proc, t); if (ret) { struct binder_buffer *buffer = t->buffer; bool oneway = !!(t->flags & TF_ONE_WAY); @@ -4297,15 +4465,18 @@ retry: } continue; } - tr.data_size = t->buffer->data_size; - tr.offsets_size = t->buffer->offsets_size; - tr.data.ptr.buffer = (binder_uintptr_t) - ((uintptr_t)t->buffer->data + - binder_alloc_get_user_buffer_offset(&proc->alloc)); - tr.data.ptr.offsets = tr.data.ptr.buffer + + trd->data_size = t->buffer->data_size; + trd->offsets_size = t->buffer->offsets_size; + trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data; + trd->data.ptr.offsets = trd->data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *)); + tr.secctx = t->security_ctx; + if (t->security_ctx) { + cmd = BR_TRANSACTION_SEC_CTX; + trsize = sizeof(tr); + } if (put_user(cmd, (uint32_t __user *)ptr)) { if (t_from) binder_thread_dec_tmpref(t_from); @@ -4316,7 +4487,7 @@ retry: return -EFAULT; } ptr += sizeof(uint32_t); - if (copy_to_user(ptr, &tr, sizeof(tr))) { + if (copy_to_user(ptr, &tr, trsize)) { if (t_from) binder_thread_dec_tmpref(t_from); @@ -4325,7 +4496,7 @@ retry: return -EFAULT; } - ptr += sizeof(tr); + ptr += trsize; trace_binder_transaction_received(t); binder_stat_br(proc, thread, cmd); @@ -4333,16 +4504,18 @@ retry: "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n", proc->pid, thread->pid, (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : - "BR_REPLY", + (cmd == BR_TRANSACTION_SEC_CTX) ? + "BR_TRANSACTION_SEC_CTX" : "BR_REPLY", t->debug_id, t_from ? t_from->proc->pid : 0, t_from ? t_from->pid : 0, cmd, t->buffer->data_size, t->buffer->offsets_size, - (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets); + (u64)trd->data.ptr.buffer, + (u64)trd->data.ptr.offsets); if (t_from) binder_thread_dec_tmpref(t_from); t->buffer->allow_user_free = 1; - if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { + if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) { binder_inner_proc_lock(thread->proc); t->to_parent = thread->transaction_stack; t->to_thread = thread; @@ -4690,7 +4863,8 @@ out: return ret; } -static int binder_ioctl_set_ctx_mgr(struct file *filp) +static int binder_ioctl_set_ctx_mgr(struct file *filp, + struct flat_binder_object *fbo) { int ret = 0; struct binder_proc *proc = filp->private_data; @@ -4719,7 +4893,7 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp) } else { context->binder_context_mgr_uid = curr_euid; } - new_node = binder_new_node(proc, NULL); + new_node = binder_new_node(proc, fbo); if (!new_node) { ret = -ENOMEM; goto out; @@ -4842,8 +5016,20 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) binder_inner_proc_unlock(proc); break; } + case BINDER_SET_CONTEXT_MGR_EXT: { + struct flat_binder_object fbo; + + if (copy_from_user(&fbo, ubuf, sizeof(fbo))) { + ret = -EINVAL; + goto err; + } + ret = binder_ioctl_set_ctx_mgr(filp, &fbo); + if (ret) + goto err; + break; + } case BINDER_SET_CONTEXT_MGR: - ret = binder_ioctl_set_ctx_mgr(filp); + ret = binder_ioctl_set_ctx_mgr(filp, NULL); if (ret) goto err; break; @@ -5319,7 +5505,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m, seq_printf(m, " node %d", buffer->target_node->debug_id); seq_printf(m, " size %zd:%zd data %pK\n", buffer->data_size, buffer->offsets_size, - buffer->data); + buffer->user_data); } static void print_binder_work_ilocked(struct seq_file *m, @@ -5854,9 +6040,10 @@ static int __init init_binder_device(const char *name) static int __init binder_init(void) { int ret; - char *device_name, *device_names, *device_tmp; + char *device_name, *device_tmp; struct binder_device *device; struct hlist_node *tmp; + char *device_names = NULL; ret = binder_alloc_shrinker_init(); if (ret) @@ -5898,23 +6085,29 @@ static int __init binder_init(void) &transaction_log_fops); } - /* - * Copy the module_parameter string, because we don't want to - * tokenize it in-place. - */ - device_names = kstrdup(binder_devices_param, GFP_KERNEL); - if (!device_names) { - ret = -ENOMEM; - goto err_alloc_device_names_failed; - } + if (strcmp(binder_devices_param, "") != 0) { + /* + * Copy the module_parameter string, because we don't want to + * tokenize it in-place. + */ + device_names = kstrdup(binder_devices_param, GFP_KERNEL); + if (!device_names) { + ret = -ENOMEM; + goto err_alloc_device_names_failed; + } - device_tmp = device_names; - while ((device_name = strsep(&device_tmp, ","))) { - ret = init_binder_device(device_name); - if (ret) - goto err_init_binder_device_failed; + device_tmp = device_names; + while ((device_name = strsep(&device_tmp, ","))) { + ret = init_binder_device(device_name); + if (ret) + goto err_init_binder_device_failed; + } } + ret = init_binderfs(); + if (ret) + goto err_init_binder_device_failed; + return ret; err_init_binder_device_failed: diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 022cd80e80cc..6389467670a0 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -29,6 +29,8 @@ #include #include #include +#include +#include #include "binder_alloc.h" #include "binder_trace.h" @@ -67,9 +69,8 @@ static size_t binder_alloc_buffer_size(struct binder_alloc *alloc, struct binder_buffer *buffer) { if (list_is_last(&buffer->entry, &alloc->buffers)) - return (u8 *)alloc->buffer + - alloc->buffer_size - (u8 *)buffer->data; - return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data; + return alloc->buffer + alloc->buffer_size - buffer->user_data; + return binder_buffer_next(buffer)->user_data - buffer->user_data; } static void binder_insert_free_buffer(struct binder_alloc *alloc, @@ -119,9 +120,9 @@ static void binder_insert_allocated_buffer_locked( buffer = rb_entry(parent, struct binder_buffer, rb_node); BUG_ON(buffer->free); - if (new_buffer->data < buffer->data) + if (new_buffer->user_data < buffer->user_data) p = &parent->rb_left; - else if (new_buffer->data > buffer->data) + else if (new_buffer->user_data > buffer->user_data) p = &parent->rb_right; else BUG(); @@ -136,17 +137,17 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked( { struct rb_node *n = alloc->allocated_buffers.rb_node; struct binder_buffer *buffer; - void *kern_ptr; + void __user *uptr; - kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset); + uptr = (void __user *)user_ptr; while (n) { buffer = rb_entry(n, struct binder_buffer, rb_node); BUG_ON(buffer->free); - if (kern_ptr < buffer->data) + if (uptr < buffer->user_data) n = n->rb_left; - else if (kern_ptr > buffer->data) + else if (uptr > buffer->user_data) n = n->rb_right; else { /* @@ -186,9 +187,9 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc, } static int binder_update_page_range(struct binder_alloc *alloc, int allocate, - void *start, void *end) + void __user *start, void __user *end) { - void *page_addr; + void __user *page_addr; unsigned long user_page_addr; struct binder_lru_page *page; struct vm_area_struct *vma = NULL; @@ -263,18 +264,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, page->alloc = alloc; INIT_LIST_HEAD(&page->lru); - ret = map_kernel_range_noflush((unsigned long)page_addr, - PAGE_SIZE, PAGE_KERNEL, - &page->page_ptr); - flush_cache_vmap((unsigned long)page_addr, - (unsigned long)page_addr + PAGE_SIZE); - if (ret != 1) { - pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n", - alloc->pid, page_addr); - goto err_map_kernel_failed; - } - user_page_addr = - (uintptr_t)page_addr + alloc->user_buffer_offset; + user_page_addr = (uintptr_t)page_addr; ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr); if (ret) { pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", @@ -312,8 +302,6 @@ free_range: continue; err_vm_insert_page_failed: - unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); -err_map_kernel_failed: __free_page(page->page_ptr); page->page_ptr = NULL; err_alloc_page_failed: @@ -368,8 +356,8 @@ static struct binder_buffer *binder_alloc_new_buf_locked( struct binder_buffer *buffer; size_t buffer_size; struct rb_node *best_fit = NULL; - void *has_page_addr; - void *end_page_addr; + void __user *has_page_addr; + void __user *end_page_addr; size_t size, data_offsets_size; int ret; @@ -467,15 +455,15 @@ static struct binder_buffer *binder_alloc_new_buf_locked( "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n", alloc->pid, size, buffer, buffer_size); - has_page_addr = - (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); + has_page_addr = (void __user *) + (((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK); WARN_ON(n && buffer_size != size); end_page_addr = - (void *)PAGE_ALIGN((uintptr_t)buffer->data + size); + (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size); if (end_page_addr > has_page_addr) end_page_addr = has_page_addr; - ret = binder_update_page_range(alloc, 1, - (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr); + ret = binder_update_page_range(alloc, 1, (void __user *) + PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr); if (ret) return ERR_PTR(ret); @@ -488,7 +476,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked( __func__, alloc->pid); goto err_alloc_buf_struct_failed; } - new_buffer->data = (u8 *)buffer->data + size; + new_buffer->user_data = (u8 __user *)buffer->user_data + size; list_add(&new_buffer->entry, &buffer->entry); new_buffer->free = 1; binder_insert_free_buffer(alloc, new_buffer); @@ -514,8 +502,8 @@ static struct binder_buffer *binder_alloc_new_buf_locked( return buffer; err_alloc_buf_struct_failed: - binder_update_page_range(alloc, 0, - (void *)PAGE_ALIGN((uintptr_t)buffer->data), + binder_update_page_range(alloc, 0, (void __user *) + PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr); return ERR_PTR(-ENOMEM); } @@ -550,14 +538,15 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, return buffer; } -static void *buffer_start_page(struct binder_buffer *buffer) +static void __user *buffer_start_page(struct binder_buffer *buffer) { - return (void *)((uintptr_t)buffer->data & PAGE_MASK); + return (void __user *)((uintptr_t)buffer->user_data & PAGE_MASK); } -static void *prev_buffer_end_page(struct binder_buffer *buffer) +static void __user *prev_buffer_end_page(struct binder_buffer *buffer) { - return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK); + return (void __user *) + (((uintptr_t)(buffer->user_data) - 1) & PAGE_MASK); } static void binder_delete_free_buffer(struct binder_alloc *alloc, @@ -572,7 +561,8 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc, to_free = false; binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: merge free, buffer %pK share page with %pK\n", - alloc->pid, buffer->data, prev->data); + alloc->pid, buffer->user_data, + prev->user_data); } if (!list_is_last(&buffer->entry, &alloc->buffers)) { @@ -582,23 +572,24 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc, binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: merge free, buffer %pK share page with %pK\n", alloc->pid, - buffer->data, - next->data); + buffer->user_data, + next->user_data); } } - if (PAGE_ALIGNED(buffer->data)) { + if (PAGE_ALIGNED(buffer->user_data)) { binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: merge free, buffer start %pK is page aligned\n", - alloc->pid, buffer->data); + alloc->pid, buffer->user_data); to_free = false; } if (to_free) { binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: merge free, buffer %pK do not share page with %pK or %pK\n", - alloc->pid, buffer->data, - prev->data, next ? next->data : NULL); + alloc->pid, buffer->user_data, + prev->user_data, + next ? next->user_data : NULL); binder_update_page_range(alloc, 0, buffer_start_page(buffer), buffer_start_page(buffer) + PAGE_SIZE); } @@ -624,8 +615,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc, BUG_ON(buffer->free); BUG_ON(size > buffer_size); BUG_ON(buffer->transaction != NULL); - BUG_ON(buffer->data < alloc->buffer); - BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size); + BUG_ON(buffer->user_data < alloc->buffer); + BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size); if (buffer->async_transaction) { alloc->free_async_space += size + sizeof(struct binder_buffer); @@ -636,8 +627,9 @@ static void binder_free_buf_locked(struct binder_alloc *alloc, } binder_update_page_range(alloc, 0, - (void *)PAGE_ALIGN((uintptr_t)buffer->data), - (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK)); + (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data), + (void __user *)(((uintptr_t) + buffer->user_data + buffer_size) & PAGE_MASK)); rb_erase(&buffer->rb_node, &alloc->allocated_buffers); buffer->free = 1; @@ -693,7 +685,6 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, struct vm_area_struct *vma) { int ret; - struct vm_struct *area; const char *failure_string; struct binder_buffer *buffer; @@ -704,28 +695,9 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, goto err_already_mapped; } - area = get_vm_area(vma->vm_end - vma->vm_start, VM_ALLOC); - if (area == NULL) { - ret = -ENOMEM; - failure_string = "get_vm_area"; - goto err_get_vm_area_failed; - } - alloc->buffer = area->addr; - alloc->user_buffer_offset = - vma->vm_start - (uintptr_t)alloc->buffer; + alloc->buffer = (void __user *)vma->vm_start; mutex_unlock(&binder_alloc_mmap_lock); -#ifdef CONFIG_CPU_CACHE_VIPT - if (cache_is_vipt_aliasing()) { - while (CACHE_COLOUR( - (vma->vm_start ^ (uint32_t)alloc->buffer))) { - pr_info("%s: %d %lx-%lx maps %pK bad alignment\n", - __func__, alloc->pid, vma->vm_start, - vma->vm_end, alloc->buffer); - vma->vm_start += PAGE_SIZE; - } - } -#endif alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE, sizeof(alloc->pages[0]), GFP_KERNEL); @@ -743,7 +715,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, goto err_alloc_buf_struct_failed; } - buffer->data = alloc->buffer; + buffer->user_data = alloc->buffer; list_add(&buffer->entry, &alloc->buffers); buffer->free = 1; binder_insert_free_buffer(alloc, buffer); @@ -758,9 +730,7 @@ err_alloc_buf_struct_failed: alloc->pages = NULL; err_alloc_pages_failed: mutex_lock(&binder_alloc_mmap_lock); - vfree(alloc->buffer); alloc->buffer = NULL; -err_get_vm_area_failed: err_already_mapped: mutex_unlock(&binder_alloc_mmap_lock); binder_alloc_debug(BINDER_DEBUG_USER_ERROR, @@ -806,7 +776,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) int i; for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { - void *page_addr; + void __user *page_addr; bool on_lru; if (!alloc->pages[i].page_ptr) @@ -819,12 +789,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) "%s: %d: page %d at %pK %s\n", __func__, alloc->pid, i, page_addr, on_lru ? "on lru" : "active"); - unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); __free_page(alloc->pages[i].page_ptr); page_count++; } kfree(alloc->pages); - vfree(alloc->buffer); } mutex_unlock(&alloc->mutex); if (alloc->vma_vm_mm) @@ -839,7 +807,7 @@ static void print_binder_buffer(struct seq_file *m, const char *prefix, struct binder_buffer *buffer) { seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n", - prefix, buffer->debug_id, buffer->data, + prefix, buffer->debug_id, buffer->user_data, buffer->data_size, buffer->offsets_size, buffer->extra_buffers_size, buffer->transaction ? "active" : "delivered"); @@ -964,7 +932,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item, if (!mmget_not_zero(alloc->vma_vm_mm)) goto err_mmget; mm = alloc->vma_vm_mm; - if (!down_write_trylock(&mm->mmap_sem)) + if (!down_read_trylock(&mm->mmap_sem)) goto err_down_write_mmap_sem_failed; } @@ -974,19 +942,16 @@ enum lru_status binder_alloc_free_page(struct list_head *item, if (vma) { trace_binder_unmap_user_start(alloc, index); - zap_page_range(vma, - page_addr + alloc->user_buffer_offset, - PAGE_SIZE); + zap_page_range(vma, page_addr, PAGE_SIZE); trace_binder_unmap_user_end(alloc, index); - up_write(&mm->mmap_sem); + up_read(&mm->mmap_sem); mmput(mm); } trace_binder_unmap_kernel_start(alloc, index); - unmap_kernel_range(page_addr, PAGE_SIZE); __free_page(page->page_ptr); page->page_ptr = NULL; @@ -1053,3 +1018,173 @@ int binder_alloc_shrinker_init(void) } return ret; } + +/** + * check_buffer() - verify that buffer/offset is safe to access + * @alloc: binder_alloc for this proc + * @buffer: binder buffer to be accessed + * @offset: offset into @buffer data + * @bytes: bytes to access from offset + * + * Check that the @offset/@bytes are within the size of the given + * @buffer and that the buffer is currently active and not freeable. + * Offsets must also be multiples of sizeof(u32). The kernel is + * allowed to touch the buffer in two cases: + * + * 1) when the buffer is being created: + * (buffer->free == 0 && buffer->allow_user_free == 0) + * 2) when the buffer is being torn down: + * (buffer->free == 0 && buffer->transaction == NULL). + * + * Return: true if the buffer is safe to access + */ +static inline bool check_buffer(struct binder_alloc *alloc, + struct binder_buffer *buffer, + binder_size_t offset, size_t bytes) +{ + size_t buffer_size = binder_alloc_buffer_size(alloc, buffer); + + return buffer_size >= bytes && + offset <= buffer_size - bytes && + IS_ALIGNED(offset, sizeof(u32)) && + !buffer->free && + (!buffer->allow_user_free || !buffer->transaction); +} + +/** + * binder_alloc_get_page() - get kernel pointer for given buffer offset + * @alloc: binder_alloc for this proc + * @buffer: binder buffer to be accessed + * @buffer_offset: offset into @buffer data + * @pgoffp: address to copy final page offset to + * + * Lookup the struct page corresponding to the address + * at @buffer_offset into @buffer->user_data. If @pgoffp is not + * NULL, the byte-offset into the page is written there. + * + * The caller is responsible to ensure that the offset points + * to a valid address within the @buffer and that @buffer is + * not freeable by the user. Since it can't be freed, we are + * guaranteed that the corresponding elements of @alloc->pages[] + * cannot change. + * + * Return: struct page + */ +static struct page *binder_alloc_get_page(struct binder_alloc *alloc, + struct binder_buffer *buffer, + binder_size_t buffer_offset, + pgoff_t *pgoffp) +{ + binder_size_t buffer_space_offset = buffer_offset + + (buffer->user_data - alloc->buffer); + pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK; + size_t index = buffer_space_offset >> PAGE_SHIFT; + struct binder_lru_page *lru_page; + + lru_page = &alloc->pages[index]; + *pgoffp = pgoff; + return lru_page->page_ptr; +} + +/** + * binder_alloc_copy_user_to_buffer() - copy src user to tgt user + * @alloc: binder_alloc for this proc + * @buffer: binder buffer to be accessed + * @buffer_offset: offset into @buffer data + * @from: userspace pointer to source buffer + * @bytes: bytes to copy + * + * Copy bytes from source userspace to target buffer. + * + * Return: bytes remaining to be copied + */ +unsigned long +binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc, + struct binder_buffer *buffer, + binder_size_t buffer_offset, + const void __user *from, + size_t bytes) +{ + if (!check_buffer(alloc, buffer, buffer_offset, bytes)) + return bytes; + + while (bytes) { + unsigned long size; + unsigned long ret; + struct page *page; + pgoff_t pgoff; + void *kptr; + + page = binder_alloc_get_page(alloc, buffer, + buffer_offset, &pgoff); + size = min_t(size_t, bytes, PAGE_SIZE - pgoff); + kptr = kmap(page) + pgoff; + ret = copy_from_user(kptr, from, size); + kunmap(page); + if (ret) + return bytes - size + ret; + bytes -= size; + from += size; + buffer_offset += size; + } + return 0; +} + +static void binder_alloc_do_buffer_copy(struct binder_alloc *alloc, + bool to_buffer, + struct binder_buffer *buffer, + binder_size_t buffer_offset, + void *ptr, + size_t bytes) +{ + /* All copies must be 32-bit aligned and 32-bit size */ + BUG_ON(!check_buffer(alloc, buffer, buffer_offset, bytes)); + + while (bytes) { + unsigned long size; + struct page *page; + pgoff_t pgoff; + void *tmpptr; + void *base_ptr; + + page = binder_alloc_get_page(alloc, buffer, + buffer_offset, &pgoff); + size = min_t(size_t, bytes, PAGE_SIZE - pgoff); + base_ptr = kmap_atomic(page); + tmpptr = base_ptr + pgoff; + if (to_buffer) + memcpy(tmpptr, ptr, size); + else + memcpy(ptr, tmpptr, size); + /* + * kunmap_atomic() takes care of flushing the cache + * if this device has VIVT cache arch + */ + kunmap_atomic(base_ptr); + bytes -= size; + pgoff = 0; + ptr = ptr + size; + buffer_offset += size; + } +} + +void binder_alloc_copy_to_buffer(struct binder_alloc *alloc, + struct binder_buffer *buffer, + binder_size_t buffer_offset, + void *src, + size_t bytes) +{ + binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset, + src, bytes); +} + +void binder_alloc_copy_from_buffer(struct binder_alloc *alloc, + void *dest, + struct binder_buffer *buffer, + binder_size_t buffer_offset, + size_t bytes) +{ + binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset, + dest, bytes); +} + diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h index c0aadbbf7f19..b60d161b7a7a 100644 --- a/drivers/android/binder_alloc.h +++ b/drivers/android/binder_alloc.h @@ -22,6 +22,7 @@ #include #include #include +#include extern struct list_lru binder_alloc_lru; struct binder_transaction; @@ -39,7 +40,7 @@ struct binder_transaction; * @data_size: size of @transaction data * @offsets_size: size of array of offsets * @extra_buffers_size: size of space for other objects (like sg lists) - * @data: pointer to base of buffer space + * @user_data: user pointer to base of buffer space * * Bookkeeping structure for binder transaction buffers */ @@ -58,7 +59,7 @@ struct binder_buffer { size_t data_size; size_t offsets_size; size_t extra_buffers_size; - void *data; + void __user *user_data; }; /** @@ -81,7 +82,6 @@ struct binder_lru_page { * (invariant after init) * @vma_vm_mm: copy of vma->vm_mm (invarient after mmap) * @buffer: base of per-proc address space mapped via mmap - * @user_buffer_offset: offset between user and kernel VAs for buffer * @buffers: list of all buffers for this proc * @free_buffers: rb tree of buffers available for allocation * sorted by size @@ -102,8 +102,7 @@ struct binder_alloc { struct mutex mutex; struct vm_area_struct *vma; struct mm_struct *vma_vm_mm; - void *buffer; - ptrdiff_t user_buffer_offset; + void __user *buffer; struct list_head buffers; struct rb_root free_buffers; struct rb_root allocated_buffers; @@ -162,26 +161,24 @@ binder_alloc_get_free_async_space(struct binder_alloc *alloc) return free_async_space; } -/** - * binder_alloc_get_user_buffer_offset() - get offset between kernel/user addrs - * @alloc: binder_alloc for this proc - * - * Return: the offset between kernel and user-space addresses to use for - * virtual address conversion - */ -static inline ptrdiff_t -binder_alloc_get_user_buffer_offset(struct binder_alloc *alloc) -{ - /* - * user_buffer_offset is constant if vma is set and - * undefined if vma is not set. It is possible to - * get here with !alloc->vma if the target process - * is dying while a transaction is being initiated. - * Returning the old value is ok in this case and - * the transaction will fail. - */ - return alloc->user_buffer_offset; -} +unsigned long +binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc, + struct binder_buffer *buffer, + binder_size_t buffer_offset, + const void __user *from, + size_t bytes); + +void binder_alloc_copy_to_buffer(struct binder_alloc *alloc, + struct binder_buffer *buffer, + binder_size_t buffer_offset, + void *src, + size_t bytes); + +void binder_alloc_copy_from_buffer(struct binder_alloc *alloc, + void *dest, + struct binder_buffer *buffer, + binder_size_t buffer_offset, + size_t bytes); #endif /* _LINUX_BINDER_ALLOC_H */ diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c index 8bd7bcef967d..b72708918b06 100644 --- a/drivers/android/binder_alloc_selftest.c +++ b/drivers/android/binder_alloc_selftest.c @@ -102,11 +102,12 @@ static bool check_buffer_pages_allocated(struct binder_alloc *alloc, struct binder_buffer *buffer, size_t size) { - void *page_addr, *end; + void __user *page_addr; + void __user *end; int page_index; - end = (void *)PAGE_ALIGN((uintptr_t)buffer->data + size); - page_addr = buffer->data; + end = (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size); + page_addr = buffer->user_data; for (; page_addr < end; page_addr += PAGE_SIZE) { page_index = (page_addr - alloc->buffer) / PAGE_SIZE; if (!alloc->pages[page_index].page_ptr || diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h index 7fb97f503ef2..045b3e42d98b 100644 --- a/drivers/android/binder_internal.h +++ b/drivers/android/binder_internal.h @@ -46,4 +46,13 @@ static inline bool is_binderfs_device(const struct inode *inode) } #endif +#ifdef CONFIG_ANDROID_BINDERFS +extern int __init init_binderfs(void); +#else +static inline int __init init_binderfs(void) +{ + return 0; +} +#endif + #endif /* _LINUX_BINDER_INTERNAL_H */ diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h index 14de7ac57a34..83cc254d2335 100644 --- a/drivers/android/binder_trace.h +++ b/drivers/android/binder_trace.h @@ -293,7 +293,7 @@ DEFINE_EVENT(binder_buffer_class, binder_transaction_failed_buffer_release, TRACE_EVENT(binder_update_page_range, TP_PROTO(struct binder_alloc *alloc, bool allocate, - void *start, void *end), + void __user *start, void __user *end), TP_ARGS(alloc, allocate, start, end), TP_STRUCT__entry( __field(int, proc) diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c index 6a2185eb66c5..e773f45d19d9 100644 --- a/drivers/android/binderfs.c +++ b/drivers/android/binderfs.c @@ -395,6 +395,11 @@ static int binderfs_binder_ctl_create(struct super_block *sb) struct inode *inode = NULL; struct dentry *root = sb->s_root; struct binderfs_info *info = sb->s_fs_info; +#if defined(CONFIG_IPC_NS) + bool use_reserve = (info->ipc_ns == &init_ipc_ns); +#else + bool use_reserve = true; +#endif device = kzalloc(sizeof(*device), GFP_KERNEL); if (!device) @@ -413,7 +418,10 @@ static int binderfs_binder_ctl_create(struct super_block *sb) /* Reserve a new minor number for the new device. */ mutex_lock(&binderfs_minors_mutex); - minor = ida_alloc_max(&binderfs_minors, BINDERFS_MAX_MINOR, GFP_KERNEL); + minor = ida_alloc_max(&binderfs_minors, + use_reserve ? BINDERFS_MAX_MINOR : + BINDERFS_MAX_MINOR_CAPPED, + GFP_KERNEL); mutex_unlock(&binderfs_minors_mutex); if (minor < 0) { ret = minor; @@ -542,7 +550,7 @@ static struct file_system_type binder_fs_type = { .fs_flags = FS_USERNS_MOUNT, }; -static int __init init_binderfs(void) +int __init init_binderfs(void) { int ret; @@ -560,5 +568,3 @@ static int __init init_binderfs(void) return ret; } - -device_initcall(init_binderfs); diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 8218db17ebdb..a6beb2c5a692 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -963,6 +963,18 @@ config PATA_GAYLE If unsure, say N. +config PATA_BUDDHA + tristate "Buddha/Catweasel/X-Surf PATA support" + depends on ZORRO + help + This option enables support for the IDE interfaces + on the Buddha, Catweasel and X-Surf expansion boards + on the Zorro expansion bus. It supports up to two + interfaces on the Buddha, three on the Catweasel and + two on the X-Surf. + + If unsure, say N. + config PATA_ISAPNP tristate "ISA Plug and Play PATA support" depends on ISAPNP diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index d21cdd83f7ab..d8cc2e04a6c7 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile @@ -98,6 +98,7 @@ obj-$(CONFIG_PATA_WINBOND) += pata_sl82c105.o obj-$(CONFIG_PATA_CMD640_PCI) += pata_cmd640.o obj-$(CONFIG_PATA_FALCON) += pata_falcon.o obj-$(CONFIG_PATA_GAYLE) += pata_gayle.o +obj-$(CONFIG_PATA_BUDDHA) += pata_buddha.o obj-$(CONFIG_PATA_ISAPNP) += pata_isapnp.o obj-$(CONFIG_PATA_IXP4XX_CF) += pata_ixp4xx_cf.o obj-$(CONFIG_PATA_MPIIX) += pata_mpiix.o diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index b5f57c69c487..692782dddc0f 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -2599,7 +2599,8 @@ int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht) int rc; if (hpriv->flags & AHCI_HFLAG_MULTI_MSI) { - if (hpriv->irq_handler) + if (hpriv->irq_handler && + hpriv->irq_handler != ahci_single_level_irq_intr) dev_warn(host->dev, "both AHCI_HFLAG_MULTI_MSI flag set and custom irq handler implemented\n"); if (!hpriv->get_irq_vector) { diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index b8c3f9e6af89..adf28788cab5 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4554,6 +4554,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, }, { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, }, { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM, }, + { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM, }, /* devices that don't properly handle queued TRIM commands */ { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 3d4887d0e84a..c10ee2391031 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -778,7 +778,7 @@ static int ata_ioc32(struct ata_port *ap) } int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev, - int cmd, void __user *arg) + unsigned int cmd, void __user *arg) { unsigned long val; int rc = -EINVAL; @@ -829,7 +829,8 @@ int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev, } EXPORT_SYMBOL_GPL(ata_sas_scsi_ioctl); -int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg) +int ata_scsi_ioctl(struct scsi_device *scsidev, unsigned int cmd, + void __user *arg) { return ata_sas_scsi_ioctl(ata_shost_to_port(scsidev->host), scsidev, cmd, arg); @@ -1318,8 +1319,6 @@ static int ata_scsi_dev_config(struct scsi_device *sdev, scsi_change_queue_depth(sdev, depth); } - blk_queue_flush_queueable(q, false); - if (dev->flags & ATA_DFLAG_TRUSTED) sdev->security_supported = 1; @@ -2990,7 +2989,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) * This inconsistency confuses several controllers which * perform PIO using DMA such as Intel AHCIs and sil3124/32. * These controllers use actual number of transferred bytes to - * update DMA poitner and transfer of 4n+2 bytes make those + * update DMA pointer and transfer of 4n+2 bytes make those * controller push DMA pointer by 4n+4 bytes because SATA data * FISes are aligned to 4 bytes. This causes data corruption * and buffer overrun. diff --git a/drivers/ata/pata_buddha.c b/drivers/ata/pata_buddha.c new file mode 100644 index 000000000000..11a8044ff633 --- /dev/null +++ b/drivers/ata/pata_buddha.c @@ -0,0 +1,257 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Buddha, Catweasel and X-Surf PATA controller driver + * + * Copyright (c) 2018 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Based on buddha.c: + * + * Copyright (C) 1997, 2001 by Geert Uytterhoeven and others + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#define DRV_NAME "pata_buddha" +#define DRV_VERSION "0.1.0" + +#define BUDDHA_BASE1 0x800 +#define BUDDHA_BASE2 0xa00 +#define BUDDHA_BASE3 0xc00 +#define XSURF_BASE1 0xb000 /* 2.5" interface */ +#define XSURF_BASE2 0xd000 /* 3.5" interface */ +#define BUDDHA_CONTROL 0x11a +#define BUDDHA_IRQ 0xf00 +#define XSURF_IRQ 0x7e +#define BUDDHA_IRQ_MR 0xfc0 /* master interrupt enable */ + +enum { + BOARD_BUDDHA = 0, + BOARD_CATWEASEL, + BOARD_XSURF +}; + +static unsigned int buddha_bases[3] __initdata = { + BUDDHA_BASE1, BUDDHA_BASE2, BUDDHA_BASE3 +}; + +static unsigned int xsurf_bases[2] __initdata = { + XSURF_BASE1, XSURF_BASE2 +}; + +static struct scsi_host_template pata_buddha_sht = { + ATA_PIO_SHT(DRV_NAME), +}; + +/* FIXME: is this needed? */ +static unsigned int pata_buddha_data_xfer(struct ata_queued_cmd *qc, + unsigned char *buf, + unsigned int buflen, int rw) +{ + struct ata_device *dev = qc->dev; + struct ata_port *ap = dev->link->ap; + void __iomem *data_addr = ap->ioaddr.data_addr; + unsigned int words = buflen >> 1; + + /* Transfer multiple of 2 bytes */ + if (rw == READ) + raw_insw((u16 *)data_addr, (u16 *)buf, words); + else + raw_outsw((u16 *)data_addr, (u16 *)buf, words); + + /* Transfer trailing byte, if any. */ + if (unlikely(buflen & 0x01)) { + unsigned char pad[2] = { }; + + /* Point buf to the tail of buffer */ + buf += buflen - 1; + + if (rw == READ) { + raw_insw((u16 *)data_addr, (u16 *)pad, 1); + *buf = pad[0]; + } else { + pad[0] = *buf; + raw_outsw((u16 *)data_addr, (u16 *)pad, 1); + } + words++; + } + + return words << 1; +} + +/* + * Provide our own set_mode() as we don't want to change anything that has + * already been configured.. + */ +static int pata_buddha_set_mode(struct ata_link *link, + struct ata_device **unused) +{ + struct ata_device *dev; + + ata_for_each_dev(dev, link, ENABLED) { + /* We don't really care */ + dev->pio_mode = dev->xfer_mode = XFER_PIO_0; + dev->xfer_shift = ATA_SHIFT_PIO; + dev->flags |= ATA_DFLAG_PIO; + ata_dev_info(dev, "configured for PIO\n"); + } + return 0; +} + +static bool pata_buddha_irq_check(struct ata_port *ap) +{ + u8 ch; + + ch = z_readb((unsigned long)ap->private_data); + + return !!(ch & 0x80); +} + +static void pata_xsurf_irq_clear(struct ata_port *ap) +{ + z_writeb(0, (unsigned long)ap->private_data); +} + +static struct ata_port_operations pata_buddha_ops = { + .inherits = &ata_sff_port_ops, + .sff_data_xfer = pata_buddha_data_xfer, + .sff_irq_check = pata_buddha_irq_check, + .cable_detect = ata_cable_unknown, + .set_mode = pata_buddha_set_mode, +}; + +static struct ata_port_operations pata_xsurf_ops = { + .inherits = &ata_sff_port_ops, + .sff_data_xfer = pata_buddha_data_xfer, + .sff_irq_check = pata_buddha_irq_check, + .sff_irq_clear = pata_xsurf_irq_clear, + .cable_detect = ata_cable_unknown, + .set_mode = pata_buddha_set_mode, +}; + +static int __init pata_buddha_init_one(void) +{ + struct zorro_dev *z = NULL; + + while ((z = zorro_find_device(ZORRO_WILDCARD, z))) { + static const char *board_name[] + = { "Buddha", "Catweasel", "X-Surf" }; + struct ata_host *host; + void __iomem *buddha_board; + unsigned long board; + unsigned int type, nr_ports = 2; + int i; + + if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_BUDDHA) { + type = BOARD_BUDDHA; + } else if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_CATWEASEL) { + type = BOARD_CATWEASEL; + nr_ports++; + } else if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF) { + type = BOARD_XSURF; + } else + continue; + + dev_info(&z->dev, "%s IDE controller\n", board_name[type]); + + board = z->resource.start; + + if (type != BOARD_XSURF) { + if (!devm_request_mem_region(&z->dev, + board + BUDDHA_BASE1, + 0x800, DRV_NAME)) + continue; + } else { + if (!devm_request_mem_region(&z->dev, + board + XSURF_BASE1, + 0x1000, DRV_NAME)) + continue; + if (!devm_request_mem_region(&z->dev, + board + XSURF_BASE2, + 0x1000, DRV_NAME)) + continue; + } + + /* allocate host */ + host = ata_host_alloc(&z->dev, nr_ports); + if (!host) + continue; + + buddha_board = ZTWO_VADDR(board); + + /* enable the board IRQ on Buddha/Catweasel */ + if (type != BOARD_XSURF) + z_writeb(0, buddha_board + BUDDHA_IRQ_MR); + + for (i = 0; i < nr_ports; i++) { + struct ata_port *ap = host->ports[i]; + void __iomem *base, *irqport; + unsigned long ctl = 0; + + if (type != BOARD_XSURF) { + ap->ops = &pata_buddha_ops; + base = buddha_board + buddha_bases[i]; + ctl = BUDDHA_CONTROL; + irqport = buddha_board + BUDDHA_IRQ + i * 0x40; + } else { + ap->ops = &pata_xsurf_ops; + base = buddha_board + xsurf_bases[i]; + /* X-Surf has no CS1* (Control/AltStat) */ + irqport = buddha_board + XSURF_IRQ; + } + + ap->pio_mask = ATA_PIO4; + ap->flags |= ATA_FLAG_SLAVE_POSS | ATA_FLAG_NO_IORDY; + + ap->ioaddr.data_addr = base; + ap->ioaddr.error_addr = base + 2 + 1 * 4; + ap->ioaddr.feature_addr = base + 2 + 1 * 4; + ap->ioaddr.nsect_addr = base + 2 + 2 * 4; + ap->ioaddr.lbal_addr = base + 2 + 3 * 4; + ap->ioaddr.lbam_addr = base + 2 + 4 * 4; + ap->ioaddr.lbah_addr = base + 2 + 5 * 4; + ap->ioaddr.device_addr = base + 2 + 6 * 4; + ap->ioaddr.status_addr = base + 2 + 7 * 4; + ap->ioaddr.command_addr = base + 2 + 7 * 4; + + if (ctl) { + ap->ioaddr.altstatus_addr = base + ctl; + ap->ioaddr.ctl_addr = base + ctl; + } + + ap->private_data = (void *)irqport; + + ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", board, + ctl ? board + buddha_bases[i] + ctl : 0); + } + + ata_host_activate(host, IRQ_AMIGA_PORTS, ata_sff_interrupt, + IRQF_SHARED, &pata_buddha_sht); + + } + + return 0; +} + +module_init(pata_buddha_init_one); + +MODULE_AUTHOR("Bartlomiej Zolnierkiewicz"); +MODULE_DESCRIPTION("low-level driver for Buddha/Catweasel/X-Surf PATA"); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c index 9e7fc302430f..456ae7184f92 100644 --- a/drivers/ata/pata_macio.c +++ b/drivers/ata/pata_macio.c @@ -954,7 +954,7 @@ static void pata_macio_invariants(struct pata_macio_priv *priv) priv->kind = controller_k2_ata6; priv->timings = pata_macio_kauai_timings; } else if (of_device_is_compatible(priv->node, "keylargo-ata")) { - if (strcmp(priv->node->name, "ata-4") == 0) { + if (of_node_name_eq(priv->node, "ata-4")) { priv->kind = controller_kl_ata4; priv->timings = pata_macio_kl66_timings; } else { diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c index 01161c1aef4d..7a0b1759e5f0 100644 --- a/drivers/ata/pata_of_platform.c +++ b/drivers/ata/pata_of_platform.c @@ -32,6 +32,7 @@ static int pata_of_platform_probe(struct platform_device *ofdev) unsigned int reg_shift = 0; int pio_mode = 0; int pio_mask; + bool use16bit; ret = of_address_to_resource(dn, 0, &io_res); if (ret) { @@ -60,11 +61,14 @@ static int pata_of_platform_probe(struct platform_device *ofdev) dev_info(&ofdev->dev, "pio-mode unspecified, assuming PIO0\n"); } + use16bit = of_property_read_bool(dn, "ata-generic,use16bit"); + pio_mask = 1 << pio_mode; pio_mask |= (1 << pio_mode) - 1; return __pata_platform_probe(&ofdev->dev, &io_res, &ctl_res, irq_res, - reg_shift, pio_mask, &pata_platform_sht); + reg_shift, pio_mask, &pata_platform_sht, + use16bit); } static const struct of_device_id pata_of_platform_match[] = { diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c index d6f8f5406442..5aba691f09af 100644 --- a/drivers/ata/pata_platform.c +++ b/drivers/ata/pata_platform.c @@ -47,13 +47,6 @@ static struct scsi_host_template pata_platform_sht = { ATA_PIO_SHT(DRV_NAME), }; -static struct ata_port_operations pata_platform_port_ops = { - .inherits = &ata_sff_port_ops, - .sff_data_xfer = ata_sff_data_xfer32, - .cable_detect = ata_cable_unknown, - .set_mode = pata_platform_set_mode, -}; - static void pata_platform_setup_port(struct ata_ioports *ioaddr, unsigned int shift) { @@ -79,6 +72,7 @@ static void pata_platform_setup_port(struct ata_ioports *ioaddr, * @ioport_shift: I/O port shift * @__pio_mask: PIO mask * @sht: scsi_host_template to use when registering + * @use16bit: Flag to indicate 16-bit IO instead of 32-bit * * Register a platform bus IDE interface. Such interfaces are PIO and we * assume do not support IRQ sharing. @@ -101,7 +95,7 @@ static void pata_platform_setup_port(struct ata_ioports *ioaddr, int __pata_platform_probe(struct device *dev, struct resource *io_res, struct resource *ctl_res, struct resource *irq_res, unsigned int ioport_shift, int __pio_mask, - struct scsi_host_template *sht) + struct scsi_host_template *sht, bool use16bit) { struct ata_host *host; struct ata_port *ap; @@ -120,7 +114,7 @@ int __pata_platform_probe(struct device *dev, struct resource *io_res, */ if (irq_res && irq_res->start > 0) { irq = irq_res->start; - irq_flags = irq_res->flags & IRQF_TRIGGER_MASK; + irq_flags = (irq_res->flags & IRQF_TRIGGER_MASK) | IRQF_SHARED; } /* @@ -131,7 +125,15 @@ int __pata_platform_probe(struct device *dev, struct resource *io_res, return -ENOMEM; ap = host->ports[0]; - ap->ops = &pata_platform_port_ops; + ap->ops = devm_kzalloc(dev, sizeof(*ap->ops), GFP_KERNEL); + ap->ops->inherits = &ata_sff_port_ops; + ap->ops->cable_detect = ata_cable_unknown; + ap->ops->set_mode = pata_platform_set_mode; + if (use16bit) + ap->ops->sff_data_xfer = ata_sff_data_xfer; + else + ap->ops->sff_data_xfer = ata_sff_data_xfer32; + ap->pio_mask = __pio_mask; ap->flags |= ATA_FLAG_SLAVE_POSS; @@ -218,7 +220,7 @@ static int pata_platform_probe(struct platform_device *pdev) return __pata_platform_probe(&pdev->dev, io_res, ctl_res, irq_res, pp_info ? pp_info->ioport_shift : 0, - pio_mask, &pata_platform_sht); + pio_mask, &pata_platform_sht, false); } static struct platform_driver pata_platform_driver = { diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c index f5bd44b8bd63..1dc3361cb5a5 100644 --- a/drivers/ata/pata_samsung_cf.c +++ b/drivers/ata/pata_samsung_cf.c @@ -609,17 +609,15 @@ static int __exit pata_s3c_remove(struct platform_device *pdev) #ifdef CONFIG_PM_SLEEP static int pata_s3c_suspend(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct ata_host *host = platform_get_drvdata(pdev); + struct ata_host *host = dev_get_drvdata(dev); return ata_host_suspend(host, PMSG_SUSPEND); } static int pata_s3c_resume(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct ata_host *host = platform_get_drvdata(pdev); - struct s3c_ide_platdata *pdata = dev_get_platdata(&pdev->dev); + struct ata_host *host = dev_get_drvdata(dev); + struct s3c_ide_platdata *pdata = dev_get_platdata(dev); struct s3c_ide_info *info = host->private_data; pata_s3c_hwinit(info, pdata); diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c index a43276c76fc6..21393ec3b9a4 100644 --- a/drivers/auxdisplay/ht16k33.c +++ b/drivers/auxdisplay/ht16k33.c @@ -509,7 +509,7 @@ static int ht16k33_remove(struct i2c_client *client) struct ht16k33_priv *priv = i2c_get_clientdata(client); struct ht16k33_fbdev *fbdev = &priv->fbdev; - cancel_delayed_work(&fbdev->work); + cancel_delayed_work_sync(&fbdev->work); unregister_framebuffer(fbdev->info); framebuffer_release(fbdev->info); free_page((unsigned long) fbdev->buffer); diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index 3e63a900b330..059700ea3521 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -191,83 +191,6 @@ config DMA_FENCE_TRACE lockup related problems for dma-buffers shared across multiple devices. -config DMA_CMA - bool "DMA Contiguous Memory Allocator" - depends on HAVE_DMA_CONTIGUOUS && CMA - help - This enables the Contiguous Memory Allocator which allows drivers - to allocate big physically-contiguous blocks of memory for use with - hardware components that do not support I/O map nor scatter-gather. - - You can disable CMA by specifying "cma=0" on the kernel's command - line. - - For more information see . - If unsure, say "n". - -if DMA_CMA -comment "Default contiguous memory area size:" - -config CMA_SIZE_MBYTES - int "Size in Mega Bytes" - depends on !CMA_SIZE_SEL_PERCENTAGE - default 0 if X86 - default 16 - help - Defines the size (in MiB) of the default memory area for Contiguous - Memory Allocator. If the size of 0 is selected, CMA is disabled by - default, but it can be enabled by passing cma=size[MG] to the kernel. - - -config CMA_SIZE_PERCENTAGE - int "Percentage of total memory" - depends on !CMA_SIZE_SEL_MBYTES - default 0 if X86 - default 10 - help - Defines the size of the default memory area for Contiguous Memory - Allocator as a percentage of the total memory in the system. - If 0 percent is selected, CMA is disabled by default, but it can be - enabled by passing cma=size[MG] to the kernel. - -choice - prompt "Selected region size" - default CMA_SIZE_SEL_MBYTES - -config CMA_SIZE_SEL_MBYTES - bool "Use mega bytes value only" - -config CMA_SIZE_SEL_PERCENTAGE - bool "Use percentage value only" - -config CMA_SIZE_SEL_MIN - bool "Use lower value (minimum)" - -config CMA_SIZE_SEL_MAX - bool "Use higher value (maximum)" - -endchoice - -config CMA_ALIGNMENT - int "Maximum PAGE_SIZE order of alignment for contiguous buffers" - range 4 12 - default 8 - help - DMA mapping framework by default aligns all buffers to the smallest - PAGE_SIZE order which is greater than or equal to the requested buffer - size. This works well for buffers up to a few hundreds kilobytes, but - for larger buffers it just a memory waste. With this parameter you can - specify the maximum PAGE_SIZE order for contiguous buffers. Larger - buffers will be aligned only to this specified order. The order is - expressed as a power of two multiplied by the PAGE_SIZE. - - For example, if your system defaults to 4KiB pages, the order value - of 8 means that the buffers will be aligned up to 1MiB only. - - If unsure, leave the default value "8". - -endif - config GENERIC_ARCH_TOPOLOGY bool help diff --git a/drivers/base/base.h b/drivers/base/base.h index 7a419a7a6235..b405436ee28e 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -60,12 +60,17 @@ struct driver_private { * @knode_parent - node in sibling list * @knode_driver - node in driver list * @knode_bus - node in bus list + * @knode_class - node in class list * @deferred_probe - entry in deferred_probe_list which is used to retry the * binding of drivers which were unable to get all the resources needed by * the device; typically because it depends on another driver getting * probed first. + * @async_driver - pointer to device driver awaiting probe via async_probe * @device - pointer back to the struct device that this structure is * associated with. + * @dead - This device is currently either in the process of or has been + * removed from the system. Any asynchronous events scheduled for this + * device should exit without taking any action. * * Nothing outside of the driver core should ever touch these fields. */ @@ -74,8 +79,11 @@ struct device_private { struct klist_node knode_parent; struct klist_node knode_driver; struct klist_node knode_bus; + struct klist_node knode_class; struct list_head deferred_probe; + struct device_driver *async_driver; struct device *device; + u8 dead:1; }; #define to_device_private_parent(obj) \ container_of(obj, struct device_private, knode_parent) @@ -83,6 +91,8 @@ struct device_private { container_of(obj, struct device_private, knode_driver) #define to_device_private_bus(obj) \ container_of(obj, struct device_private, knode_bus) +#define to_device_private_class(obj) \ + container_of(obj, struct device_private, knode_class) /* initialisation functions */ extern int devices_init(void); @@ -124,6 +134,8 @@ extern int driver_add_groups(struct device_driver *drv, const struct attribute_group **groups); extern void driver_remove_groups(struct device_driver *drv, const struct attribute_group **groups); +int device_driver_attach(struct device_driver *drv, struct device *dev); +void device_driver_detach(struct device *dev); extern char *make_class_name(const char *name, struct kobject *kobj); diff --git a/drivers/base/bus.c b/drivers/base/bus.c index e06a57936cc9..0a58e969f8b7 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -187,11 +187,7 @@ static ssize_t unbind_store(struct device_driver *drv, const char *buf, dev = bus_find_device_by_name(bus, NULL, buf); if (dev && dev->driver == drv) { - if (dev->parent && dev->bus->need_parent_lock) - device_lock(dev->parent); - device_release_driver(dev); - if (dev->parent && dev->bus->need_parent_lock) - device_unlock(dev->parent); + device_driver_detach(dev); err = count; } put_device(dev); @@ -214,13 +210,7 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf, dev = bus_find_device_by_name(bus, NULL, buf); if (dev && dev->driver == NULL && driver_match_device(drv, dev)) { - if (dev->parent && bus->need_parent_lock) - device_lock(dev->parent); - device_lock(dev); - err = driver_probe_device(drv, dev); - device_unlock(dev); - if (dev->parent && bus->need_parent_lock) - device_unlock(dev->parent); + err = device_driver_attach(drv, dev); if (err > 0) { /* success */ @@ -236,12 +226,12 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf, } static DRIVER_ATTR_IGNORE_LOCKDEP(bind, S_IWUSR, NULL, bind_store); -static ssize_t show_drivers_autoprobe(struct bus_type *bus, char *buf) +static ssize_t drivers_autoprobe_show(struct bus_type *bus, char *buf) { return sprintf(buf, "%d\n", bus->p->drivers_autoprobe); } -static ssize_t store_drivers_autoprobe(struct bus_type *bus, +static ssize_t drivers_autoprobe_store(struct bus_type *bus, const char *buf, size_t count) { if (buf[0] == '0') @@ -251,7 +241,7 @@ static ssize_t store_drivers_autoprobe(struct bus_type *bus, return count; } -static ssize_t store_drivers_probe(struct bus_type *bus, +static ssize_t drivers_probe_store(struct bus_type *bus, const char *buf, size_t count) { struct device *dev; @@ -586,9 +576,8 @@ static void remove_bind_files(struct device_driver *drv) driver_remove_file(drv, &driver_attr_unbind); } -static BUS_ATTR(drivers_probe, S_IWUSR, NULL, store_drivers_probe); -static BUS_ATTR(drivers_autoprobe, S_IWUSR | S_IRUGO, - show_drivers_autoprobe, store_drivers_autoprobe); +static BUS_ATTR_WO(drivers_probe); +static BUS_ATTR_RW(drivers_autoprobe); static int add_probe_files(struct bus_type *bus) { @@ -621,17 +610,6 @@ static ssize_t uevent_store(struct device_driver *drv, const char *buf, } static DRIVER_ATTR_WO(uevent); -static void driver_attach_async(void *_drv, async_cookie_t cookie) -{ - struct device_driver *drv = _drv; - int ret; - - ret = driver_attach(drv); - - pr_debug("bus: '%s': driver %s async attach completed: %d\n", - drv->bus->name, drv->name, ret); -} - /** * bus_add_driver - Add a driver to the bus. * @drv: driver. @@ -664,15 +642,9 @@ int bus_add_driver(struct device_driver *drv) klist_add_tail(&priv->knode_bus, &bus->p->klist_drivers); if (drv->bus->p->drivers_autoprobe) { - if (driver_allows_async_probing(drv)) { - pr_debug("bus: '%s': probing driver %s asynchronously\n", - drv->bus->name, drv->name); - async_schedule(driver_attach_async, drv); - } else { - error = driver_attach(drv); - if (error) - goto out_unregister; - } + error = driver_attach(drv); + if (error) + goto out_unregister; } module_add_driver(drv->owner, drv); @@ -774,13 +746,8 @@ EXPORT_SYMBOL_GPL(bus_rescan_devices); */ int device_reprobe(struct device *dev) { - if (dev->driver) { - if (dev->parent && dev->bus->need_parent_lock) - device_lock(dev->parent); - device_release_driver(dev); - if (dev->parent && dev->bus->need_parent_lock) - device_unlock(dev->parent); - } + if (dev->driver) + device_driver_detach(dev); return bus_rescan_devices_helper(dev, NULL); } EXPORT_SYMBOL_GPL(device_reprobe); @@ -838,7 +805,14 @@ static ssize_t bus_uevent_store(struct bus_type *bus, rc = kobject_synth_uevent(&bus->p->subsys.kobj, buf, count); return rc ? rc : count; } -static BUS_ATTR(uevent, S_IWUSR, NULL, bus_uevent_store); +/* + * "open code" the old BUS_ATTR() macro here. We want to use BUS_ATTR_WO() + * here, but can not use it as earlier in the file we have + * DEVICE_ATTR_WO(uevent), which would cause a clash with the with the store + * function name. + */ +static struct bus_attribute bus_attr_uevent = __ATTR(uevent, S_IWUSR, NULL, + bus_uevent_store); /** * bus_register - register a driver-core subsystem diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index cf78fa6d470d..a7359535caf5 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -79,8 +79,7 @@ static void cache_size(struct cacheinfo *this_leaf, struct device_node *np) ct_idx = get_cacheinfo_idx(this_leaf->type); propname = cache_type_info[ct_idx].size_prop; - if (of_property_read_u32(np, propname, &this_leaf->size)) - this_leaf->size = 0; + of_property_read_u32(np, propname, &this_leaf->size); } /* not cache_line_size() because that's a macro in include/linux/cache.h */ @@ -114,8 +113,7 @@ static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np) ct_idx = get_cacheinfo_idx(this_leaf->type); propname = cache_type_info[ct_idx].nr_sets_prop; - if (of_property_read_u32(np, propname, &this_leaf->number_of_sets)) - this_leaf->number_of_sets = 0; + of_property_read_u32(np, propname, &this_leaf->number_of_sets); } static void cache_associativity(struct cacheinfo *this_leaf) diff --git a/drivers/base/class.c b/drivers/base/class.c index 54def4e02f00..d8a6a5864c2e 100644 --- a/drivers/base/class.c +++ b/drivers/base/class.c @@ -117,16 +117,22 @@ static void class_put(struct class *cls) kset_put(&cls->p->subsys); } +static struct device *klist_class_to_dev(struct klist_node *n) +{ + struct device_private *p = to_device_private_class(n); + return p->device; +} + static void klist_class_dev_get(struct klist_node *n) { - struct device *dev = container_of(n, struct device, knode_class); + struct device *dev = klist_class_to_dev(n); get_device(dev); } static void klist_class_dev_put(struct klist_node *n) { - struct device *dev = container_of(n, struct device, knode_class); + struct device *dev = klist_class_to_dev(n); put_device(dev); } @@ -277,7 +283,7 @@ void class_dev_iter_init(struct class_dev_iter *iter, struct class *class, struct klist_node *start_knode = NULL; if (start) - start_knode = &start->knode_class; + start_knode = &start->p->knode_class; klist_iter_init_node(&class->p->klist_devices, &iter->ki, start_knode); iter->type = type; } @@ -304,7 +310,7 @@ struct device *class_dev_iter_next(struct class_dev_iter *iter) knode = klist_next(&iter->ki); if (!knode) return NULL; - dev = container_of(knode, struct device, knode_class); + dev = klist_class_to_dev(knode); if (!iter->type || iter->type == dev->type) return dev; } diff --git a/drivers/base/component.c b/drivers/base/component.c index ddcea8739c12..532a3a5d8f63 100644 --- a/drivers/base/component.c +++ b/drivers/base/component.c @@ -16,11 +16,38 @@ #include #include +/** + * DOC: overview + * + * The component helper allows drivers to collect a pile of sub-devices, + * including their bound drivers, into an aggregate driver. Various subsystems + * already provide functions to get hold of such components, e.g. + * of_clk_get_by_name(). The component helper can be used when such a + * subsystem-specific way to find a device is not available: The component + * helper fills the niche of aggregate drivers for specific hardware, where + * further standardization into a subsystem would not be practical. The common + * example is when a logical device (e.g. a DRM display driver) is spread around + * the SoC on various components (scanout engines, blending blocks, transcoders + * for various outputs and so on). + * + * The component helper also doesn't solve runtime dependencies, e.g. for system + * suspend and resume operations. See also :ref:`device links`. + * + * Components are registered using component_add() and unregistered with + * component_del(), usually from the driver's probe and disconnect functions. + * + * Aggregate drivers first assemble a component match list of what they need + * using component_match_add(). This is then registered as an aggregate driver + * using component_master_add_with_match(), and unregistered using + * component_master_del(). + */ + struct component; struct component_match_array { void *data; int (*compare)(struct device *, void *); + int (*compare_typed)(struct device *, int, void *); void (*release)(struct device *, void *); struct component *component; bool duplicate; @@ -48,6 +75,7 @@ struct component { bool bound; const struct component_ops *ops; + int subcomponent; struct device *dev; }; @@ -132,7 +160,7 @@ static struct master *__master_find(struct device *dev, } static struct component *find_component(struct master *master, - int (*compare)(struct device *, void *), void *compare_data) + struct component_match_array *mc) { struct component *c; @@ -140,7 +168,11 @@ static struct component *find_component(struct master *master, if (c->master && c->master != master) continue; - if (compare(c->dev, compare_data)) + if (mc->compare && mc->compare(c->dev, mc->data)) + return c; + + if (mc->compare_typed && + mc->compare_typed(c->dev, c->subcomponent, mc->data)) return c; } @@ -166,7 +198,7 @@ static int find_components(struct master *master) if (match->compare[i].component) continue; - c = find_component(master, mc->compare, mc->data); + c = find_component(master, mc); if (!c) { ret = -ENXIO; break; @@ -301,15 +333,12 @@ static int component_match_realloc(struct device *dev, return 0; } -/* - * Add a component to be matched, with a release function. - * - * The match array is first created or extended if necessary. - */ -void component_match_add_release(struct device *master, +static void __component_match_add(struct device *master, struct component_match **matchptr, void (*release)(struct device *, void *), - int (*compare)(struct device *, void *), void *compare_data) + int (*compare)(struct device *, void *), + int (*compare_typed)(struct device *, int, void *), + void *compare_data) { struct component_match *match = *matchptr; @@ -341,13 +370,69 @@ void component_match_add_release(struct device *master, } match->compare[match->num].compare = compare; + match->compare[match->num].compare_typed = compare_typed; match->compare[match->num].release = release; match->compare[match->num].data = compare_data; match->compare[match->num].component = NULL; match->num++; } + +/** + * component_match_add_release - add a component match entry with release callback + * @master: device with the aggregate driver + * @matchptr: pointer to the list of component matches + * @release: release function for @compare_data + * @compare: compare function to match against all components + * @compare_data: opaque pointer passed to the @compare function + * + * Adds a new component match to the list stored in @matchptr, which the @master + * aggregate driver needs to function. The list of component matches pointed to + * by @matchptr must be initialized to NULL before adding the first match. This + * only matches against components added with component_add(). + * + * The allocated match list in @matchptr is automatically released using devm + * actions, where upon @release will be called to free any references held by + * @compare_data, e.g. when @compare_data is a &device_node that must be + * released with of_node_put(). + * + * See also component_match_add() and component_match_add_typed(). + */ +void component_match_add_release(struct device *master, + struct component_match **matchptr, + void (*release)(struct device *, void *), + int (*compare)(struct device *, void *), void *compare_data) +{ + __component_match_add(master, matchptr, release, compare, NULL, + compare_data); +} EXPORT_SYMBOL(component_match_add_release); +/** + * component_match_add_typed - add a component match entry for a typed component + * @master: device with the aggregate driver + * @matchptr: pointer to the list of component matches + * @compare_typed: compare function to match against all typed components + * @compare_data: opaque pointer passed to the @compare function + * + * Adds a new component match to the list stored in @matchptr, which the @master + * aggregate driver needs to function. The list of component matches pointed to + * by @matchptr must be initialized to NULL before adding the first match. This + * only matches against components added with component_add_typed(). + * + * The allocated match list in @matchptr is automatically released using devm + * actions. + * + * See also component_match_add_release() and component_match_add_typed(). + */ +void component_match_add_typed(struct device *master, + struct component_match **matchptr, + int (*compare_typed)(struct device *, int, void *), void *compare_data) +{ + __component_match_add(master, matchptr, NULL, NULL, compare_typed, + compare_data); +} +EXPORT_SYMBOL(component_match_add_typed); + static void free_master(struct master *master) { struct component_match *match = master->match; @@ -367,6 +452,18 @@ static void free_master(struct master *master) kfree(master); } +/** + * component_master_add_with_match - register an aggregate driver + * @dev: device with the aggregate driver + * @ops: callbacks for the aggregate driver + * @match: component match list for the aggregate driver + * + * Registers a new aggregate driver consisting of the components added to @match + * by calling one of the component_match_add() functions. Once all components in + * @match are available, it will be assembled by calling + * &component_master_ops.bind from @ops. Must be unregistered by calling + * component_master_del(). + */ int component_master_add_with_match(struct device *dev, const struct component_master_ops *ops, struct component_match *match) @@ -403,6 +500,15 @@ int component_master_add_with_match(struct device *dev, } EXPORT_SYMBOL_GPL(component_master_add_with_match); +/** + * component_master_del - unregister an aggregate driver + * @dev: device with the aggregate driver + * @ops: callbacks for the aggregate driver + * + * Unregisters an aggregate driver registered with + * component_master_add_with_match(). If necessary the aggregate driver is first + * disassembled by calling &component_master_ops.unbind from @ops. + */ void component_master_del(struct device *dev, const struct component_master_ops *ops) { @@ -430,6 +536,15 @@ static void component_unbind(struct component *component, devres_release_group(component->dev, component); } +/** + * component_unbind_all - unbind all components of an aggregate driver + * @master_dev: device with the aggregate driver + * @data: opaque pointer, passed to all components + * + * Unbinds all components of the aggregate @dev by passing @data to their + * &component_ops.unbind functions. Should be called from + * &component_master_ops.unbind. + */ void component_unbind_all(struct device *master_dev, void *data) { struct master *master; @@ -503,6 +618,15 @@ static int component_bind(struct component *component, struct master *master, return ret; } +/** + * component_bind_all - bind all components of an aggregate driver + * @master_dev: device with the aggregate driver + * @data: opaque pointer, passed to all components + * + * Binds all components of the aggregate @dev by passing @data to their + * &component_ops.bind functions. Should be called from + * &component_master_ops.bind. + */ int component_bind_all(struct device *master_dev, void *data) { struct master *master; @@ -537,7 +661,8 @@ int component_bind_all(struct device *master_dev, void *data) } EXPORT_SYMBOL_GPL(component_bind_all); -int component_add(struct device *dev, const struct component_ops *ops) +static int __component_add(struct device *dev, const struct component_ops *ops, + int subcomponent) { struct component *component; int ret; @@ -548,6 +673,7 @@ int component_add(struct device *dev, const struct component_ops *ops) component->ops = ops; component->dev = dev; + component->subcomponent = subcomponent; dev_dbg(dev, "adding component (ops %ps)\n", ops); @@ -566,8 +692,66 @@ int component_add(struct device *dev, const struct component_ops *ops) return ret < 0 ? ret : 0; } + +/** + * component_add_typed - register a component + * @dev: component device + * @ops: component callbacks + * @subcomponent: nonzero identifier for subcomponents + * + * Register a new component for @dev. Functions in @ops will be call when the + * aggregate driver is ready to bind the overall driver by calling + * component_bind_all(). See also &struct component_ops. + * + * @subcomponent must be nonzero and is used to differentiate between multiple + * components registerd on the same device @dev. These components are match + * using component_match_add_typed(). + * + * The component needs to be unregistered at driver unload/disconnect by + * calling component_del(). + * + * See also component_add(). + */ +int component_add_typed(struct device *dev, const struct component_ops *ops, + int subcomponent) +{ + if (WARN_ON(subcomponent == 0)) + return -EINVAL; + + return __component_add(dev, ops, subcomponent); +} +EXPORT_SYMBOL_GPL(component_add_typed); + +/** + * component_add - register a component + * @dev: component device + * @ops: component callbacks + * + * Register a new component for @dev. Functions in @ops will be called when the + * aggregate driver is ready to bind the overall driver by calling + * component_bind_all(). See also &struct component_ops. + * + * The component needs to be unregistered at driver unload/disconnect by + * calling component_del(). + * + * See also component_add_typed() for a variant that allows multipled different + * components on the same device. + */ +int component_add(struct device *dev, const struct component_ops *ops) +{ + return __component_add(dev, ops, 0); +} EXPORT_SYMBOL_GPL(component_add); +/** + * component_del - unregister a component + * @dev: component device + * @ops: component callbacks + * + * Unregister a component added with component_add(). If the component is bound + * into an aggregate driver, this will force the entire aggregate driver, including + * all its components, to be unbound. + */ void component_del(struct device *dev, const struct component_ops *ops) { struct component *c, *component = NULL; diff --git a/drivers/base/core.c b/drivers/base/core.c index 0073b09bb99f..4aeaa0c92bda 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -179,10 +179,31 @@ void device_pm_move_to_tail(struct device *dev) * of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be * ignored. * - * If the DL_FLAG_AUTOREMOVE_CONSUMER is set, the link will be removed - * automatically when the consumer device driver unbinds from it. - * The combination of both DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_STATELESS - * set is invalid and will cause NULL to be returned. + * If DL_FLAG_STATELESS is set in @flags, the link is not going to be managed by + * the driver core and, in particular, the caller of this function is expected + * to drop the reference to the link acquired by it directly. + * + * If that flag is not set, however, the caller of this function is handing the + * management of the link over to the driver core entirely and its return value + * can only be used to check whether or not the link is present. In that case, + * the DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_AUTOREMOVE_SUPPLIER device link + * flags can be used to indicate to the driver core when the link can be safely + * deleted. Namely, setting one of them in @flags indicates to the driver core + * that the link is not going to be used (by the given caller of this function) + * after unbinding the consumer or supplier driver, respectively, from its + * device, so the link can be deleted at that point. If none of them is set, + * the link will be maintained until one of the devices pointed to by it (either + * the consumer or the supplier) is unregistered. + * + * Also, if DL_FLAG_STATELESS, DL_FLAG_AUTOREMOVE_CONSUMER and + * DL_FLAG_AUTOREMOVE_SUPPLIER are not set in @flags (that is, a persistent + * managed device link is being added), the DL_FLAG_AUTOPROBE_CONSUMER flag can + * be used to request the driver core to automaticall probe for a consmer + * driver after successfully binding a driver to the supplier device. + * + * The combination of DL_FLAG_STATELESS and either DL_FLAG_AUTOREMOVE_CONSUMER + * or DL_FLAG_AUTOREMOVE_SUPPLIER set in @flags at the same time is invalid and + * will cause NULL to be returned upfront. * * A side effect of the link creation is re-ordering of dpm_list and the * devices_kset list by moving the consumer device and all devices depending @@ -199,10 +220,22 @@ struct device_link *device_link_add(struct device *consumer, struct device_link *link; if (!consumer || !supplier || - ((flags & DL_FLAG_STATELESS) && - (flags & DL_FLAG_AUTOREMOVE_CONSUMER))) + (flags & DL_FLAG_STATELESS && + flags & (DL_FLAG_AUTOREMOVE_CONSUMER | + DL_FLAG_AUTOREMOVE_SUPPLIER | + DL_FLAG_AUTOPROBE_CONSUMER)) || + (flags & DL_FLAG_AUTOPROBE_CONSUMER && + flags & (DL_FLAG_AUTOREMOVE_CONSUMER | + DL_FLAG_AUTOREMOVE_SUPPLIER))) return NULL; + if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) { + if (pm_runtime_get_sync(supplier) < 0) { + pm_runtime_put_noidle(supplier); + return NULL; + } + } + device_links_write_lock(); device_pm_lock(); @@ -217,35 +250,71 @@ struct device_link *device_link_add(struct device *consumer, goto out; } - list_for_each_entry(link, &supplier->links.consumers, s_node) - if (link->consumer == consumer) { + /* + * DL_FLAG_AUTOREMOVE_SUPPLIER indicates that the link will be needed + * longer than for DL_FLAG_AUTOREMOVE_CONSUMER and setting them both + * together doesn't make sense, so prefer DL_FLAG_AUTOREMOVE_SUPPLIER. + */ + if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) + flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER; + + list_for_each_entry(link, &supplier->links.consumers, s_node) { + if (link->consumer != consumer) + continue; + + /* + * Don't return a stateless link if the caller wants a stateful + * one and vice versa. + */ + if (WARN_ON((flags & DL_FLAG_STATELESS) != (link->flags & DL_FLAG_STATELESS))) { + link = NULL; + goto out; + } + + if (flags & DL_FLAG_PM_RUNTIME) { + if (!(link->flags & DL_FLAG_PM_RUNTIME)) { + pm_runtime_new_link(consumer); + link->flags |= DL_FLAG_PM_RUNTIME; + } + if (flags & DL_FLAG_RPM_ACTIVE) + refcount_inc(&link->rpm_active); + } + + if (flags & DL_FLAG_STATELESS) { kref_get(&link->kref); goto out; } + /* + * If the life time of the link following from the new flags is + * longer than indicated by the flags of the existing link, + * update the existing link to stay around longer. + */ + if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER) { + if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) { + link->flags &= ~DL_FLAG_AUTOREMOVE_CONSUMER; + link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER; + } + } else if (!(flags & DL_FLAG_AUTOREMOVE_CONSUMER)) { + link->flags &= ~(DL_FLAG_AUTOREMOVE_CONSUMER | + DL_FLAG_AUTOREMOVE_SUPPLIER); + } + goto out; + } + link = kzalloc(sizeof(*link), GFP_KERNEL); if (!link) goto out; + refcount_set(&link->rpm_active, 1); + if (flags & DL_FLAG_PM_RUNTIME) { - if (flags & DL_FLAG_RPM_ACTIVE) { - if (pm_runtime_get_sync(supplier) < 0) { - pm_runtime_put_noidle(supplier); - kfree(link); - link = NULL; - goto out; - } - link->rpm_active = true; - } + if (flags & DL_FLAG_RPM_ACTIVE) + refcount_inc(&link->rpm_active); + pm_runtime_new_link(consumer); - /* - * If the link is being added by the consumer driver at probe - * time, balance the decrementation of the supplier's runtime PM - * usage counter after consumer probe in driver_probe_device(). - */ - if (consumer->links.status == DL_DEV_PROBING) - pm_runtime_get_noresume(supplier); } + get_device(supplier); link->supplier = supplier; INIT_LIST_HEAD(&link->s_node); @@ -260,17 +329,26 @@ struct device_link *device_link_add(struct device *consumer, link->status = DL_STATE_NONE; } else { switch (supplier->links.status) { - case DL_DEV_DRIVER_BOUND: + case DL_DEV_PROBING: switch (consumer->links.status) { case DL_DEV_PROBING: /* - * Some callers expect the link creation during - * consumer driver probe to resume the supplier - * even without DL_FLAG_RPM_ACTIVE. + * A consumer driver can create a link to a + * supplier that has not completed its probing + * yet as long as it knows that the supplier is + * already functional (for example, it has just + * acquired some resources from the supplier). */ - if (flags & DL_FLAG_PM_RUNTIME) - pm_runtime_resume(supplier); - + link->status = DL_STATE_CONSUMER_PROBE; + break; + default: + link->status = DL_STATE_DORMANT; + break; + } + break; + case DL_DEV_DRIVER_BOUND: + switch (consumer->links.status) { + case DL_DEV_PROBING: link->status = DL_STATE_CONSUMER_PROBE; break; case DL_DEV_DRIVER_BOUND: @@ -290,6 +368,14 @@ struct device_link *device_link_add(struct device *consumer, } } + /* + * Some callers expect the link creation during consumer driver probe to + * resume the supplier even without DL_FLAG_RPM_ACTIVE. + */ + if (link->status == DL_STATE_CONSUMER_PROBE && + flags & DL_FLAG_PM_RUNTIME) + pm_runtime_resume(supplier); + /* * Move the consumer and all of the devices depending on it to the end * of dpm_list and the devices_kset list. @@ -302,17 +388,24 @@ struct device_link *device_link_add(struct device *consumer, list_add_tail_rcu(&link->s_node, &supplier->links.consumers); list_add_tail_rcu(&link->c_node, &consumer->links.suppliers); - dev_info(consumer, "Linked as a consumer to %s\n", dev_name(supplier)); + dev_dbg(consumer, "Linked as a consumer to %s\n", dev_name(supplier)); out: device_pm_unlock(); device_links_write_unlock(); + + if ((flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) && !link) + pm_runtime_put(supplier); + return link; } EXPORT_SYMBOL_GPL(device_link_add); static void device_link_free(struct device_link *link) { + while (refcount_dec_not_one(&link->rpm_active)) + pm_runtime_put(link->supplier); + put_device(link->consumer); put_device(link->supplier); kfree(link); @@ -328,8 +421,8 @@ static void __device_link_del(struct kref *kref) { struct device_link *link = container_of(kref, struct device_link, kref); - dev_info(link->consumer, "Dropping the link to %s\n", - dev_name(link->supplier)); + dev_dbg(link->consumer, "Dropping the link to %s\n", + dev_name(link->supplier)); if (link->flags & DL_FLAG_PM_RUNTIME) pm_runtime_drop_link(link->consumer); @@ -355,8 +448,16 @@ static void __device_link_del(struct kref *kref) } #endif /* !CONFIG_SRCU */ +static void device_link_put_kref(struct device_link *link) +{ + if (link->flags & DL_FLAG_STATELESS) + kref_put(&link->kref, __device_link_del); + else + WARN(1, "Unable to drop a managed device link reference\n"); +} + /** - * device_link_del - Delete a link between two devices. + * device_link_del - Delete a stateless link between two devices. * @link: Device link to delete. * * The caller must ensure proper synchronization of this function with runtime @@ -368,14 +469,14 @@ void device_link_del(struct device_link *link) { device_links_write_lock(); device_pm_lock(); - kref_put(&link->kref, __device_link_del); + device_link_put_kref(link); device_pm_unlock(); device_links_write_unlock(); } EXPORT_SYMBOL_GPL(device_link_del); /** - * device_link_remove - remove a link between two devices. + * device_link_remove - Delete a stateless link between two devices. * @consumer: Consumer end of the link. * @supplier: Supplier end of the link. * @@ -394,7 +495,7 @@ void device_link_remove(void *consumer, struct device *supplier) list_for_each_entry(link, &supplier->links.consumers, s_node) { if (link->consumer == consumer) { - kref_put(&link->kref, __device_link_del); + device_link_put_kref(link); break; } } @@ -474,8 +575,21 @@ void device_links_driver_bound(struct device *dev) if (link->flags & DL_FLAG_STATELESS) continue; + /* + * Links created during consumer probe may be in the "consumer + * probe" state to start with if the supplier is still probing + * when they are created and they may become "active" if the + * consumer probe returns first. Skip them here. + */ + if (link->status == DL_STATE_CONSUMER_PROBE || + link->status == DL_STATE_ACTIVE) + continue; + WARN_ON(link->status != DL_STATE_DORMANT); WRITE_ONCE(link->status, DL_STATE_AVAILABLE); + + if (link->flags & DL_FLAG_AUTOPROBE_CONSUMER) + driver_deferred_probe_add(link->consumer); } list_for_each_entry(link, &dev->links.suppliers, c_node) { @@ -512,18 +626,49 @@ static void __device_links_no_driver(struct device *dev) continue; if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER) - kref_put(&link->kref, __device_link_del); - else if (link->status != DL_STATE_SUPPLIER_UNBIND) + __device_link_del(&link->kref); + else if (link->status == DL_STATE_CONSUMER_PROBE || + link->status == DL_STATE_ACTIVE) WRITE_ONCE(link->status, DL_STATE_AVAILABLE); } dev->links.status = DL_DEV_NO_DRIVER; } +/** + * device_links_no_driver - Update links after failing driver probe. + * @dev: Device whose driver has just failed to probe. + * + * Clean up leftover links to consumers for @dev and invoke + * %__device_links_no_driver() to update links to suppliers for it as + * appropriate. + * + * Links with the DL_FLAG_STATELESS flag set are ignored. + */ void device_links_no_driver(struct device *dev) { + struct device_link *link; + device_links_write_lock(); + + list_for_each_entry(link, &dev->links.consumers, s_node) { + if (link->flags & DL_FLAG_STATELESS) + continue; + + /* + * The probe has failed, so if the status of the link is + * "consumer probe" or "active", it must have been added by + * a probing consumer while this device was still probing. + * Change its state to "dormant", as it represents a valid + * relationship, but it is not functionally meaningful. + */ + if (link->status == DL_STATE_CONSUMER_PROBE || + link->status == DL_STATE_ACTIVE) + WRITE_ONCE(link->status, DL_STATE_DORMANT); + } + __device_links_no_driver(dev); + device_links_write_unlock(); } @@ -539,11 +684,11 @@ void device_links_no_driver(struct device *dev) */ void device_links_driver_cleanup(struct device *dev) { - struct device_link *link; + struct device_link *link, *ln; device_links_write_lock(); - list_for_each_entry(link, &dev->links.consumers, s_node) { + list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) { if (link->flags & DL_FLAG_STATELESS) continue; @@ -557,7 +702,7 @@ void device_links_driver_cleanup(struct device *dev) */ if (link->status == DL_STATE_SUPPLIER_UNBIND && link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER) - kref_put(&link->kref, __device_link_del); + __device_link_del(&link->kref); WRITE_ONCE(link->status, DL_STATE_DORMANT); } @@ -1966,7 +2111,7 @@ int device_add(struct device *dev) if (dev->class) { mutex_lock(&dev->class->p->mutex); /* tie the class to the device */ - klist_add_tail(&dev->knode_class, + klist_add_tail(&dev->p->knode_class, &dev->class->p->klist_devices); /* notify any interfaces that the device is here */ @@ -2080,6 +2225,17 @@ void device_del(struct device *dev) struct kobject *glue_dir = NULL; struct class_interface *class_intf; + /* + * Hold the device lock and set the "dead" flag to guarantee that + * the update behavior is consistent with the other bitfields near + * it and that we cannot have an asynchronous probe routine trying + * to run while we are tearing out the bus/class/sysfs from + * underneath the device. + */ + device_lock(dev); + dev->p->dead = true; + device_unlock(dev); + /* Notify clients of device removal. This call must come * before dpm_sysfs_remove(). */ @@ -2105,7 +2261,7 @@ void device_del(struct device *dev) if (class_intf->remove_dev) class_intf->remove_dev(dev, class_intf); /* remove the device from the class list */ - klist_del(&dev->knode_class); + klist_del(&dev->p->knode_class); mutex_unlock(&dev->class->p->mutex); } device_remove_file(dev, &dev_attr_uevent); diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index eb9443d5bae1..668139cfa664 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -409,6 +409,7 @@ static void device_create_release(struct device *dev) kfree(dev); } +__printf(4, 0) static struct device * __cpu_device_create(struct device *parent, void *drvdata, const struct attribute_group **groups, @@ -427,6 +428,7 @@ __cpu_device_create(struct device *parent, void *drvdata, dev->parent = parent; dev->groups = groups; dev->release = device_create_release; + device_set_pm_not_required(dev); dev_set_drvdata(dev, drvdata); retval = kobject_set_name_vargs(&dev->kobj, fmt, args); diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 8ac10af17c00..a823f469e53f 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -57,6 +57,10 @@ static atomic_t deferred_trigger_count = ATOMIC_INIT(0); static struct dentry *deferred_devices; static bool initcalls_done; +/* Save the async probe drivers' name from kernel cmdline */ +#define ASYNC_DRV_NAMES_MAX_LEN 256 +static char async_probe_drv_names[ASYNC_DRV_NAMES_MAX_LEN]; + /* * In some cases, like suspend to RAM or hibernation, It might be reasonable * to prohibit probing of devices as it could be unsafe. @@ -116,7 +120,7 @@ static void deferred_probe_work_func(struct work_struct *work) } static DECLARE_WORK(deferred_probe_work, deferred_probe_work_func); -static void driver_deferred_probe_add(struct device *dev) +void driver_deferred_probe_add(struct device *dev) { mutex_lock(&deferred_probe_mutex); if (list_empty(&dev->p->deferred_probe)) { @@ -674,6 +678,23 @@ int driver_probe_device(struct device_driver *drv, struct device *dev) return ret; } +static inline bool cmdline_requested_async_probing(const char *drv_name) +{ + return parse_option_str(async_probe_drv_names, drv_name); +} + +/* The option format is "driver_async_probe=drv_name1,drv_name2,..." */ +static int __init save_async_options(char *buf) +{ + if (strlen(buf) >= ASYNC_DRV_NAMES_MAX_LEN) + printk(KERN_WARNING + "Too long list of driver names for 'driver_async_probe'!\n"); + + strlcpy(async_probe_drv_names, buf, ASYNC_DRV_NAMES_MAX_LEN); + return 0; +} +__setup("driver_async_probe=", save_async_options); + bool driver_allows_async_probing(struct device_driver *drv) { switch (drv->probe_type) { @@ -684,6 +705,9 @@ bool driver_allows_async_probing(struct device_driver *drv) return false; default: + if (cmdline_requested_async_probing(drv->name)) + return true; + if (module_requested_async_probing(drv->owner)) return true; @@ -731,15 +755,6 @@ static int __device_attach_driver(struct device_driver *drv, void *_data) bool async_allowed; int ret; - /* - * Check if device has already been claimed. This may - * happen with driver loading, device discovery/registration, - * and deferred probe processing happens all at once with - * multiple threads. - */ - if (dev->driver) - return -EBUSY; - ret = driver_match_device(drv, dev); if (ret == 0) { /* no match */ @@ -774,6 +789,15 @@ static void __device_attach_async_helper(void *_dev, async_cookie_t cookie) device_lock(dev); + /* + * Check if device has already been removed or claimed. This may + * happen with driver loading, device discovery/registration, + * and deferred probe processing happens all at once with + * multiple threads. + */ + if (dev->p->dead || dev->driver) + goto out_unlock; + if (dev->parent) pm_runtime_get_sync(dev->parent); @@ -784,7 +808,7 @@ static void __device_attach_async_helper(void *_dev, async_cookie_t cookie) if (dev->parent) pm_runtime_put(dev->parent); - +out_unlock: device_unlock(dev); put_device(dev); @@ -829,7 +853,7 @@ static int __device_attach(struct device *dev, bool allow_async) */ dev_dbg(dev, "scheduling asynchronous probe\n"); get_device(dev); - async_schedule(__device_attach_async_helper, dev); + async_schedule_dev(__device_attach_async_helper, dev); } else { pm_request_idle(dev); } @@ -867,6 +891,88 @@ void device_initial_probe(struct device *dev) __device_attach(dev, true); } +/* + * __device_driver_lock - acquire locks needed to manipulate dev->drv + * @dev: Device we will update driver info for + * @parent: Parent device. Needed if the bus requires parent lock + * + * This function will take the required locks for manipulating dev->drv. + * Normally this will just be the @dev lock, but when called for a USB + * interface, @parent lock will be held as well. + */ +static void __device_driver_lock(struct device *dev, struct device *parent) +{ + if (parent && dev->bus->need_parent_lock) + device_lock(parent); + device_lock(dev); +} + +/* + * __device_driver_unlock - release locks needed to manipulate dev->drv + * @dev: Device we will update driver info for + * @parent: Parent device. Needed if the bus requires parent lock + * + * This function will release the required locks for manipulating dev->drv. + * Normally this will just be the the @dev lock, but when called for a + * USB interface, @parent lock will be released as well. + */ +static void __device_driver_unlock(struct device *dev, struct device *parent) +{ + device_unlock(dev); + if (parent && dev->bus->need_parent_lock) + device_unlock(parent); +} + +/** + * device_driver_attach - attach a specific driver to a specific device + * @drv: Driver to attach + * @dev: Device to attach it to + * + * Manually attach driver to a device. Will acquire both @dev lock and + * @dev->parent lock if needed. + */ +int device_driver_attach(struct device_driver *drv, struct device *dev) +{ + int ret = 0; + + __device_driver_lock(dev, dev->parent); + + /* + * If device has been removed or someone has already successfully + * bound a driver before us just skip the driver probe call. + */ + if (!dev->p->dead && !dev->driver) + ret = driver_probe_device(drv, dev); + + __device_driver_unlock(dev, dev->parent); + + return ret; +} + +static void __driver_attach_async_helper(void *_dev, async_cookie_t cookie) +{ + struct device *dev = _dev; + struct device_driver *drv; + int ret = 0; + + __device_driver_lock(dev, dev->parent); + + drv = dev->p->async_driver; + + /* + * If device has been removed or someone has already successfully + * bound a driver before us just skip the driver probe call. + */ + if (!dev->p->dead && !dev->driver) + ret = driver_probe_device(drv, dev); + + __device_driver_unlock(dev, dev->parent); + + dev_dbg(dev, "driver %s async attach completed: %d\n", drv->name, ret); + + put_device(dev); +} + static int __driver_attach(struct device *dev, void *data) { struct device_driver *drv = data; @@ -894,14 +1000,26 @@ static int __driver_attach(struct device *dev, void *data) return ret; } /* ret > 0 means positive match */ - if (dev->parent && dev->bus->need_parent_lock) - device_lock(dev->parent); - device_lock(dev); - if (!dev->driver) - driver_probe_device(drv, dev); - device_unlock(dev); - if (dev->parent && dev->bus->need_parent_lock) - device_unlock(dev->parent); + if (driver_allows_async_probing(drv)) { + /* + * Instead of probing the device synchronously we will + * probe it asynchronously to allow for more parallelism. + * + * We only take the device lock here in order to guarantee + * that the dev->driver and async_driver fields are protected + */ + dev_dbg(dev, "probing driver %s asynchronously\n", drv->name); + device_lock(dev); + if (!dev->driver) { + get_device(dev); + dev->p->async_driver = drv; + async_schedule_dev(__driver_attach_async_helper, dev); + } + device_unlock(dev); + return 0; + } + + device_driver_attach(drv, dev); return 0; } @@ -932,15 +1050,11 @@ static void __device_release_driver(struct device *dev, struct device *parent) drv = dev->driver; if (drv) { while (device_links_busy(dev)) { - device_unlock(dev); - if (parent && dev->bus->need_parent_lock) - device_unlock(parent); + __device_driver_unlock(dev, parent); device_links_unbind_consumers(dev); - if (parent && dev->bus->need_parent_lock) - device_lock(parent); - device_lock(dev); + __device_driver_lock(dev, parent); /* * A concurrent invocation of the same function might * have released the driver successfully while this one @@ -968,9 +1082,9 @@ static void __device_release_driver(struct device *dev, struct device *parent) drv->remove(dev); device_links_driver_cleanup(dev); - arch_teardown_dma_ops(dev); devres_release_all(dev); + arch_teardown_dma_ops(dev); dev->driver = NULL; dev_set_drvdata(dev, NULL); if (dev->pm_domain && dev->pm_domain->dismiss) @@ -993,16 +1107,12 @@ void device_release_driver_internal(struct device *dev, struct device_driver *drv, struct device *parent) { - if (parent && dev->bus->need_parent_lock) - device_lock(parent); + __device_driver_lock(dev, parent); - device_lock(dev); if (!drv || drv == dev->driver) __device_release_driver(dev, parent); - device_unlock(dev); - if (parent && dev->bus->need_parent_lock) - device_unlock(parent); + __device_driver_unlock(dev, parent); } /** @@ -1027,6 +1137,18 @@ void device_release_driver(struct device *dev) } EXPORT_SYMBOL_GPL(device_release_driver); +/** + * device_driver_detach - detach driver from a specific device + * @dev: device to detach driver from + * + * Detach driver from device. Will acquire both @dev lock and @dev->parent + * lock if needed. + */ +void device_driver_detach(struct device *dev) +{ + device_release_driver_internal(dev, NULL, dev->parent); +} + /** * driver_detach - detach driver from all devices it controls. * @drv: driver. diff --git a/drivers/base/devcon.c b/drivers/base/devcon.c index d427e806cd73..04db9ae235e4 100644 --- a/drivers/base/devcon.c +++ b/drivers/base/devcon.c @@ -7,10 +7,37 @@ */ #include +#include static DEFINE_MUTEX(devcon_lock); static LIST_HEAD(devcon_list); +typedef void *(*devcon_match_fn_t)(struct device_connection *con, int ep, + void *data); + +static void * +fwnode_graph_devcon_match(struct fwnode_handle *fwnode, const char *con_id, + void *data, devcon_match_fn_t match) +{ + struct device_connection con = { .id = con_id }; + struct fwnode_handle *ep; + void *ret; + + fwnode_graph_for_each_endpoint(fwnode, ep) { + con.fwnode = fwnode_graph_get_remote_port_parent(ep); + if (!fwnode_device_is_available(con.fwnode)) + continue; + + ret = match(&con, -1, data); + fwnode_handle_put(con.fwnode); + if (ret) { + fwnode_handle_put(ep); + return ret; + } + } + return NULL; +} + /** * device_connection_find_match - Find physical connection to a device * @dev: Device with the connection @@ -23,10 +50,9 @@ static LIST_HEAD(devcon_list); * caller is expecting to be returned. */ void *device_connection_find_match(struct device *dev, const char *con_id, - void *data, - void *(*match)(struct device_connection *con, - int ep, void *data)) + void *data, devcon_match_fn_t match) { + struct fwnode_handle *fwnode = dev_fwnode(dev); const char *devname = dev_name(dev); struct device_connection *con; void *ret = NULL; @@ -35,6 +61,12 @@ void *device_connection_find_match(struct device *dev, const char *con_id, if (!match) return NULL; + if (fwnode) { + ret = fwnode_graph_devcon_match(fwnode, con_id, data, match); + if (ret) + return ret; + } + mutex_lock(&devcon_lock); list_for_each_entry(con, &devcon_list, list) { @@ -75,12 +107,36 @@ static struct bus_type *generic_match_buses[] = { NULL, }; +static int device_fwnode_match(struct device *dev, void *fwnode) +{ + return dev_fwnode(dev) == fwnode; +} + +static void *device_connection_fwnode_match(struct device_connection *con) +{ + struct bus_type *bus; + struct device *dev; + + for (bus = generic_match_buses[0]; bus; bus++) { + dev = bus_find_device(bus, NULL, (void *)con->fwnode, + device_fwnode_match); + if (dev && !strncmp(dev_name(dev), con->id, strlen(con->id))) + return dev; + + put_device(dev); + } + return NULL; +} + /* This tries to find the device from the most common bus types by name. */ static void *generic_match(struct device_connection *con, int ep, void *data) { struct bus_type *bus; struct device *dev; + if (con->fwnode) + return device_connection_fwnode_match(con); + for (bus = generic_match_buses[0]; bus; bus++) { dev = bus_find_device_by_name(bus, NULL, con->endpoint[ep]); if (dev) diff --git a/drivers/base/firmware_loader/Makefile b/drivers/base/firmware_loader/Makefile index a97eeb0be1d8..0b2dfa6259c9 100644 --- a/drivers/base/firmware_loader/Makefile +++ b/drivers/base/firmware_loader/Makefile @@ -1,7 +1,9 @@ # SPDX-License-Identifier: GPL-2.0 # Makefile for the Linux firmware loader -obj-y := fallback_table.o +obj-$(CONFIG_FW_LOADER_USER_HELPER) += fallback_table.o obj-$(CONFIG_FW_LOADER) += firmware_class.o firmware_class-objs := main.o firmware_class-$(CONFIG_FW_LOADER_USER_HELPER) += fallback.o + +obj-y += builtin/ diff --git a/firmware/.gitignore b/drivers/base/firmware_loader/builtin/.gitignore similarity index 100% rename from firmware/.gitignore rename to drivers/base/firmware_loader/builtin/.gitignore diff --git a/firmware/Makefile b/drivers/base/firmware_loader/builtin/Makefile similarity index 100% rename from firmware/Makefile rename to drivers/base/firmware_loader/builtin/Makefile diff --git a/drivers/base/firmware_loader/fallback_table.c b/drivers/base/firmware_loader/fallback_table.c index 7428659d8df9..776dd69cf5be 100644 --- a/drivers/base/firmware_loader/fallback_table.c +++ b/drivers/base/firmware_loader/fallback_table.c @@ -16,9 +16,6 @@ * firmware fallback configuration table */ -/* Module or buit-in */ -#ifdef CONFIG_FW_LOADER_USER_HELPER - static unsigned int zero; static unsigned int one = 1; @@ -51,5 +48,3 @@ struct ctl_table firmware_config_table[] = { { } }; EXPORT_SYMBOL_GPL(firmware_config_table); - -#endif diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c index 8e9213b36e31..7eaaf5ee5ba6 100644 --- a/drivers/base/firmware_loader/main.c +++ b/drivers/base/firmware_loader/main.c @@ -328,12 +328,12 @@ fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv) rc = kernel_read_file_from_path(path, &fw_priv->data, &size, msize, id); if (rc) { - if (rc == -ENOENT) - dev_dbg(device, "loading %s failed with error %d\n", - path, rc); - else + if (rc != -ENOENT) dev_warn(device, "loading %s failed with error %d\n", path, rc); + else + dev_dbg(device, "loading %s failed for no such file or directory.\n", + path); continue; } dev_dbg(device, "direct-loading %s\n", fw_priv->fw_name); diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 1c958eb33ef4..dab0a5abc391 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -79,6 +79,26 @@ struct resource *platform_get_resource(struct platform_device *dev, } EXPORT_SYMBOL_GPL(platform_get_resource); +/** + * devm_platform_ioremap_resource - call devm_ioremap_resource() for a platform + * device + * + * @pdev: platform device to use both for memory resource lookup as well as + * resource managemend + * @index: resource index + */ +#ifdef CONFIG_HAS_IOMEM +void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev, + unsigned int index) +{ + struct resource *res; + + res = platform_get_resource(pdev, IORESOURCE_MEM, index); + return devm_ioremap_resource(&pdev->dev, res); +} +EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource); +#endif /* CONFIG_HAS_IOMEM */ + /** * platform_get_irq - get an IRQ for a device * @dev: platform device @@ -127,7 +147,20 @@ int platform_get_irq(struct platform_device *dev, unsigned int num) irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS); } - return r ? r->start : -ENXIO; + if (r) + return r->start; + + /* + * For the index 0 interrupt, allow falling back to GpioInt + * resources. While a device could have both Interrupt and GpioInt + * resources, making this fallback ambiguous, in many common cases + * the device will only expose one IRQ, and this fallback + * allows a common code path across either kind of resource. + */ + if (num == 0 && has_acpi_companion(&dev->dev)) + return acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num); + + return -ENXIO; #endif } EXPORT_SYMBOL_GPL(platform_get_irq); @@ -508,10 +541,12 @@ struct platform_device *platform_device_register_full( pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id); if (!pdev) - goto err_alloc; + return ERR_PTR(-ENOMEM); pdev->dev.parent = pdevinfo->parent; pdev->dev.fwnode = pdevinfo->fwnode; + pdev->dev.of_node = of_node_get(to_of_node(pdev->dev.fwnode)); + pdev->dev.of_node_reused = pdevinfo->of_node_reused; if (pdevinfo->dma_mask) { /* @@ -553,8 +588,6 @@ struct platform_device *platform_device_register_full( err: ACPI_COMPANION_SET(&pdev->dev, NULL); kfree(pdev->dev.dma_mask); - -err_alloc: platform_device_put(pdev); return ERR_PTR(ret); } diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index 5a42ae4078c2..365ad751ce0f 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c @@ -65,10 +65,15 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce) if (IS_ERR(ce->clk)) { ce->status = PCE_STATUS_ERROR; } else { - clk_prepare(ce->clk); - ce->status = PCE_STATUS_ACQUIRED; - dev_dbg(dev, "Clock %pC con_id %s managed by runtime PM.\n", - ce->clk, ce->con_id); + if (clk_prepare(ce->clk)) { + ce->status = PCE_STATUS_ERROR; + dev_err(dev, "clk_prepare() failed\n"); + } else { + ce->status = PCE_STATUS_ACQUIRED; + dev_dbg(dev, + "Clock %pC con_id %s managed by runtime PM.\n", + ce->clk, ce->con_id); + } } } diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c index b413951c6abc..22aedb28aad7 100644 --- a/drivers/base/power/common.c +++ b/drivers/base/power/common.c @@ -160,7 +160,7 @@ EXPORT_SYMBOL_GPL(dev_pm_domain_attach_by_id); * For a detailed function description, see dev_pm_domain_attach_by_id(). */ struct device *dev_pm_domain_attach_by_name(struct device *dev, - char *name) + const char *name) { if (dev->pm_domain) return ERR_PTR(-EEXIST); diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 500de1dee967..2c334c01fc43 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -2483,7 +2483,7 @@ EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id); * power-domain-names DT property. For further description see * genpd_dev_pm_attach_by_id(). */ -struct device *genpd_dev_pm_attach_by_name(struct device *dev, char *name) +struct device *genpd_dev_pm_attach_by_name(struct device *dev, const char *name) { int index; @@ -2948,18 +2948,11 @@ static int __init genpd_debug_init(void) genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL); - if (!genpd_debugfs_dir) - return -ENOMEM; - - d = debugfs_create_file("pm_genpd_summary", S_IRUGO, - genpd_debugfs_dir, NULL, &summary_fops); - if (!d) - return -ENOMEM; + debugfs_create_file("pm_genpd_summary", S_IRUGO, genpd_debugfs_dir, + NULL, &summary_fops); list_for_each_entry(genpd, &gpd_list, gpd_list_node) { d = debugfs_create_dir(genpd->name, genpd_debugfs_dir); - if (!d) - return -ENOMEM; debugfs_create_file("current_state", 0444, d, genpd, &status_fops); diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 0992e67e862b..5a8149829ab3 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -124,6 +124,10 @@ void device_pm_unlock(void) */ void device_pm_add(struct device *dev) { + /* Skip PM setup/initialization. */ + if (device_pm_not_required(dev)) + return; + pr_debug("PM: Adding info for %s:%s\n", dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); device_pm_check_callbacks(dev); @@ -142,6 +146,9 @@ void device_pm_add(struct device *dev) */ void device_pm_remove(struct device *dev) { + if (device_pm_not_required(dev)) + return; + pr_debug("PM: Removing info for %s:%s\n", dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); complete_all(&dev->power.completion); @@ -727,7 +734,7 @@ void dpm_noirq_resume_devices(pm_message_t state) reinit_completion(&dev->power.completion); if (is_async(dev)) { get_device(dev); - async_schedule(async_resume_noirq, dev); + async_schedule_dev(async_resume_noirq, dev); } } @@ -884,7 +891,7 @@ void dpm_resume_early(pm_message_t state) reinit_completion(&dev->power.completion); if (is_async(dev)) { get_device(dev); - async_schedule(async_resume_early, dev); + async_schedule_dev(async_resume_early, dev); } } @@ -1048,7 +1055,7 @@ void dpm_resume(pm_message_t state) reinit_completion(&dev->power.completion); if (is_async(dev)) { get_device(dev); - async_schedule(async_resume, dev); + async_schedule_dev(async_resume, dev); } } @@ -1368,7 +1375,7 @@ static int device_suspend_noirq(struct device *dev) if (is_async(dev)) { get_device(dev); - async_schedule(async_suspend_noirq, dev); + async_schedule_dev(async_suspend_noirq, dev); return 0; } return __device_suspend_noirq(dev, pm_transition, false); @@ -1571,7 +1578,7 @@ static int device_suspend_late(struct device *dev) if (is_async(dev)) { get_device(dev); - async_schedule(async_suspend_late, dev); + async_schedule_dev(async_suspend_late, dev); return 0; } @@ -1741,8 +1748,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) if (dev->power.direct_complete) { if (pm_runtime_status_suspended(dev)) { pm_runtime_disable(dev); - if (pm_runtime_status_suspended(dev)) + if (pm_runtime_status_suspended(dev)) { + pm_dev_dbg(dev, state, "direct-complete "); goto Complete; + } pm_runtime_enable(dev); } @@ -1835,7 +1844,7 @@ static int device_suspend(struct device *dev) if (is_async(dev)) { get_device(dev); - async_schedule(async_suspend, dev); + async_schedule_dev(async_suspend, dev); return 0; } diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 457be03b744d..a80dbf08a99c 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -66,20 +66,30 @@ static int rpm_suspend(struct device *dev, int rpmflags); */ void update_pm_runtime_accounting(struct device *dev) { - unsigned long now = jiffies; - unsigned long delta; + u64 now, last, delta; - delta = now - dev->power.accounting_timestamp; + if (dev->power.disable_depth > 0) + return; + + last = dev->power.accounting_timestamp; + now = ktime_get_mono_fast_ns(); dev->power.accounting_timestamp = now; - if (dev->power.disable_depth > 0) + /* + * Because ktime_get_mono_fast_ns() is not monotonic during + * timekeeping updates, ensure that 'now' is after the last saved + * timesptamp. + */ + if (now < last) return; + delta = now - last; + if (dev->power.runtime_status == RPM_SUSPENDED) - dev->power.suspended_jiffies += delta; + dev->power.suspended_time += delta; else - dev->power.active_jiffies += delta; + dev->power.active_time += delta; } static void __update_runtime_status(struct device *dev, enum rpm_status status) @@ -88,6 +98,22 @@ static void __update_runtime_status(struct device *dev, enum rpm_status status) dev->power.runtime_status = status; } +u64 pm_runtime_suspended_time(struct device *dev) +{ + u64 time; + unsigned long flags; + + spin_lock_irqsave(&dev->power.lock, flags); + + update_pm_runtime_accounting(dev); + time = dev->power.suspended_time; + + spin_unlock_irqrestore(&dev->power.lock, flags); + + return time; +} +EXPORT_SYMBOL_GPL(pm_runtime_suspended_time); + /** * pm_runtime_deactivate_timer - Deactivate given device's suspend timer. * @dev: Device to handle. @@ -95,7 +121,7 @@ static void __update_runtime_status(struct device *dev, enum rpm_status status) static void pm_runtime_deactivate_timer(struct device *dev) { if (dev->power.timer_expires > 0) { - hrtimer_cancel(&dev->power.suspend_timer); + hrtimer_try_to_cancel(&dev->power.suspend_timer); dev->power.timer_expires = 0; } } @@ -129,24 +155,21 @@ static void pm_runtime_cancel_pending(struct device *dev) u64 pm_runtime_autosuspend_expiration(struct device *dev) { int autosuspend_delay; - u64 last_busy, expires = 0; - u64 now = ktime_to_ns(ktime_get()); + u64 expires; if (!dev->power.use_autosuspend) - goto out; + return 0; autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay); if (autosuspend_delay < 0) - goto out; - - last_busy = READ_ONCE(dev->power.last_busy); + return 0; - expires = last_busy + (u64)autosuspend_delay * NSEC_PER_MSEC; - if (expires <= now) - expires = 0; /* Already expired. */ + expires = READ_ONCE(dev->power.last_busy); + expires += (u64)autosuspend_delay * NSEC_PER_MSEC; + if (expires > ktime_get_mono_fast_ns()) + return expires; /* Expires in the future */ - out: - return expires; + return 0; } EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration); @@ -259,11 +282,8 @@ static int rpm_get_suppliers(struct device *dev) list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) { int retval; - if (!(link->flags & DL_FLAG_PM_RUNTIME)) - continue; - - if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND || - link->rpm_active) + if (!(link->flags & DL_FLAG_PM_RUNTIME) || + READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND) continue; retval = pm_runtime_get_sync(link->supplier); @@ -272,7 +292,7 @@ static int rpm_get_suppliers(struct device *dev) pm_runtime_put_noidle(link->supplier); return retval; } - link->rpm_active = true; + refcount_inc(&link->rpm_active); } return 0; } @@ -281,12 +301,13 @@ static void rpm_put_suppliers(struct device *dev) { struct device_link *link; - list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) - if (link->rpm_active && - READ_ONCE(link->status) != DL_STATE_SUPPLIER_UNBIND) { + list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) { + if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND) + continue; + + while (refcount_dec_not_one(&link->rpm_active)) pm_runtime_put(link->supplier); - link->rpm_active = false; - } + } } /** @@ -909,7 +930,7 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer) * If 'expires' is after the current time, we've been called * too early. */ - if (expires > 0 && expires < ktime_to_ns(ktime_get())) { + if (expires > 0 && expires < ktime_get_mono_fast_ns()) { dev->power.timer_expires = 0; rpm_suspend(dev, dev->power.timer_autosuspends ? (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC); @@ -928,7 +949,7 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer) int pm_schedule_suspend(struct device *dev, unsigned int delay) { unsigned long flags; - ktime_t expires; + u64 expires; int retval; spin_lock_irqsave(&dev->power.lock, flags); @@ -945,8 +966,8 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay) /* Other scheduled or pending requests need to be canceled. */ pm_runtime_cancel_pending(dev); - expires = ktime_add(ktime_get(), ms_to_ktime(delay)); - dev->power.timer_expires = ktime_to_ns(expires); + expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC; + dev->power.timer_expires = expires; dev->power.timer_autosuspends = 0; hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS); @@ -1091,24 +1112,57 @@ EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use); * and the device parent's counter of unsuspended children is modified to * reflect the new status. If the new status is RPM_SUSPENDED, an idle * notification request for the parent is submitted. + * + * If @dev has any suppliers (as reflected by device links to them), and @status + * is RPM_ACTIVE, they will be activated upfront and if the activation of one + * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead + * of the @status value) and the suppliers will be deacticated on exit. The + * error returned by the failing supplier activation will be returned in that + * case. */ int __pm_runtime_set_status(struct device *dev, unsigned int status) { struct device *parent = dev->parent; - unsigned long flags; bool notify_parent = false; int error = 0; if (status != RPM_ACTIVE && status != RPM_SUSPENDED) return -EINVAL; - spin_lock_irqsave(&dev->power.lock, flags); + spin_lock_irq(&dev->power.lock); - if (!dev->power.runtime_error && !dev->power.disable_depth) { + /* + * Prevent PM-runtime from being enabled for the device or return an + * error if it is enabled already and working. + */ + if (dev->power.runtime_error || dev->power.disable_depth) + dev->power.disable_depth++; + else error = -EAGAIN; - goto out; + + spin_unlock_irq(&dev->power.lock); + + if (error) + return error; + + /* + * If the new status is RPM_ACTIVE, the suppliers can be activated + * upfront regardless of the current status, because next time + * rpm_put_suppliers() runs, the rpm_active refcounts of the links + * involved will be dropped down to one anyway. + */ + if (status == RPM_ACTIVE) { + int idx = device_links_read_lock(); + + error = rpm_get_suppliers(dev); + if (error) + status = RPM_SUSPENDED; + + device_links_read_unlock(idx); } + spin_lock_irq(&dev->power.lock); + if (dev->power.runtime_status == status || !parent) goto out_set; @@ -1136,19 +1190,33 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status) spin_unlock(&parent->power.lock); - if (error) + if (error) { + status = RPM_SUSPENDED; goto out; + } } out_set: __update_runtime_status(dev, status); - dev->power.runtime_error = 0; + if (!error) + dev->power.runtime_error = 0; + out: - spin_unlock_irqrestore(&dev->power.lock, flags); + spin_unlock_irq(&dev->power.lock); if (notify_parent) pm_request_idle(parent); + if (status == RPM_SUSPENDED) { + int idx = device_links_read_lock(); + + rpm_put_suppliers(dev); + + device_links_read_unlock(idx); + } + + pm_runtime_enable(dev); + return error; } EXPORT_SYMBOL_GPL(__pm_runtime_set_status); @@ -1276,6 +1344,9 @@ void __pm_runtime_disable(struct device *dev, bool check_resume) pm_runtime_put_noidle(dev); } + /* Update time accounting before disabling PM-runtime. */ + update_pm_runtime_accounting(dev); + if (!dev->power.disable_depth++) __pm_runtime_barrier(dev); @@ -1294,10 +1365,15 @@ void pm_runtime_enable(struct device *dev) spin_lock_irqsave(&dev->power.lock, flags); - if (dev->power.disable_depth > 0) + if (dev->power.disable_depth > 0) { dev->power.disable_depth--; - else + + /* About to enable runtime pm, set accounting_timestamp to now */ + if (!dev->power.disable_depth) + dev->power.accounting_timestamp = ktime_get_mono_fast_ns(); + } else { dev_warn(dev, "Unbalanced %s!\n", __func__); + } WARN(!dev->power.disable_depth && dev->power.runtime_status == RPM_SUSPENDED && @@ -1494,7 +1570,6 @@ void pm_runtime_init(struct device *dev) dev->power.request_pending = false; dev->power.request = RPM_REQ_NONE; dev->power.deferred_resume = false; - dev->power.accounting_timestamp = jiffies; INIT_WORK(&dev->power.work, pm_runtime_work); dev->power.timer_expires = 0; @@ -1539,7 +1614,7 @@ void pm_runtime_remove(struct device *dev) * * Check links from this device to any consumers and if any of them have active * runtime PM references to the device, drop the usage counter of the device - * (once per link). + * (as many times as needed). * * Links with the DL_FLAG_STATELESS flag set are ignored. * @@ -1561,10 +1636,8 @@ void pm_runtime_clean_up_links(struct device *dev) if (link->flags & DL_FLAG_STATELESS) continue; - if (link->rpm_active) { + while (refcount_dec_not_one(&link->rpm_active)) pm_runtime_put_noidle(dev); - link->rpm_active = false; - } } device_links_read_unlock(idx); @@ -1582,8 +1655,11 @@ void pm_runtime_get_suppliers(struct device *dev) idx = device_links_read_lock(); list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) - if (link->flags & DL_FLAG_PM_RUNTIME) + if (link->flags & DL_FLAG_PM_RUNTIME) { + link->supplier_preactivated = true; + refcount_inc(&link->rpm_active); pm_runtime_get_sync(link->supplier); + } device_links_read_unlock(idx); } @@ -1600,8 +1676,11 @@ void pm_runtime_put_suppliers(struct device *dev) idx = device_links_read_lock(); list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) - if (link->flags & DL_FLAG_PM_RUNTIME) - pm_runtime_put(link->supplier); + if (link->supplier_preactivated) { + link->supplier_preactivated = false; + if (refcount_dec_not_one(&link->rpm_active)) + pm_runtime_put(link->supplier); + } device_links_read_unlock(idx); } @@ -1615,8 +1694,6 @@ void pm_runtime_new_link(struct device *dev) void pm_runtime_drop_link(struct device *dev) { - rpm_put_suppliers(dev); - spin_lock_irq(&dev->power.lock); WARN_ON(dev->power.links_count == 0); dev->power.links_count--; diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index d713738ce796..c6bf76124184 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -125,9 +125,12 @@ static ssize_t runtime_active_time_show(struct device *dev, struct device_attribute *attr, char *buf) { int ret; + u64 tmp; spin_lock_irq(&dev->power.lock); update_pm_runtime_accounting(dev); - ret = sprintf(buf, "%i\n", jiffies_to_msecs(dev->power.active_jiffies)); + tmp = dev->power.active_time; + do_div(tmp, NSEC_PER_MSEC); + ret = sprintf(buf, "%llu\n", tmp); spin_unlock_irq(&dev->power.lock); return ret; } @@ -138,10 +141,12 @@ static ssize_t runtime_suspended_time_show(struct device *dev, struct device_attribute *attr, char *buf) { int ret; + u64 tmp; spin_lock_irq(&dev->power.lock); update_pm_runtime_accounting(dev); - ret = sprintf(buf, "%i\n", - jiffies_to_msecs(dev->power.suspended_jiffies)); + tmp = dev->power.suspended_time; + do_div(tmp, NSEC_PER_MSEC); + ret = sprintf(buf, "%llu\n", tmp); spin_unlock_irq(&dev->power.lock); return ret; } @@ -648,6 +653,10 @@ int dpm_sysfs_add(struct device *dev) { int rc; + /* No need to create PM sysfs if explicitly disabled. */ + if (device_pm_not_required(dev)) + return 0; + rc = sysfs_create_group(&dev->kobj, &pm_attr_group); if (rc) return rc; @@ -727,6 +736,8 @@ void rpm_sysfs_remove(struct device *dev) void dpm_sysfs_remove(struct device *dev) { + if (device_pm_not_required(dev)) + return; sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); dev_pm_qos_constraints_destroy(dev); rpm_sysfs_remove(dev); diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 5fa1898755a3..f1fee72ed970 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -783,7 +783,7 @@ void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard) EXPORT_SYMBOL_GPL(pm_wakeup_ws_event); /** - * pm_wakeup_event - Notify the PM core of a wakeup event. + * pm_wakeup_dev_event - Notify the PM core of a wakeup event. * @dev: Device the wakeup event is related to. * @msec: Anticipated event processing time (in milliseconds). * @hard: If set, abort suspends in progress and wake up from suspend-to-idle. diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index 2e8f0144f9ab..9cbb4b0cd01b 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c @@ -33,7 +33,7 @@ struct regcache_rbtree_node { unsigned int blklen; /* the actual rbtree node holding this block */ struct rb_node node; -} __attribute__ ((packed)); +}; struct regcache_rbtree_ctx { struct rb_root root; diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 330c1f7e9665..5059748afd4c 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -35,6 +35,7 @@ struct regmap_irq_chip_data { int wake_count; void *status_reg_buf; + unsigned int *main_status_buf; unsigned int *status_buf; unsigned int *mask_buf; unsigned int *mask_buf_def; @@ -329,6 +330,33 @@ static const struct irq_chip regmap_irq_chip = { .irq_set_wake = regmap_irq_set_wake, }; +static inline int read_sub_irq_data(struct regmap_irq_chip_data *data, + unsigned int b) +{ + const struct regmap_irq_chip *chip = data->chip; + struct regmap *map = data->map; + struct regmap_irq_sub_irq_map *subreg; + int i, ret = 0; + + if (!chip->sub_reg_offsets) { + /* Assume linear mapping */ + ret = regmap_read(map, chip->status_base + + (b * map->reg_stride * data->irq_reg_stride), + &data->status_buf[b]); + } else { + subreg = &chip->sub_reg_offsets[b]; + for (i = 0; i < subreg->num_regs; i++) { + unsigned int offset = subreg->offset[i]; + + ret = regmap_read(map, chip->status_base + offset, + &data->status_buf[offset]); + if (ret) + break; + } + } + return ret; +} + static irqreturn_t regmap_irq_thread(int irq, void *d) { struct regmap_irq_chip_data *data = d; @@ -352,11 +380,65 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) } /* - * Read in the statuses, using a single bulk read if possible - * in order to reduce the I/O overheads. + * Read only registers with active IRQs if the chip has 'main status + * register'. Else read in the statuses, using a single bulk read if + * possible in order to reduce the I/O overheads. */ - if (!map->use_single_read && map->reg_stride == 1 && - data->irq_reg_stride == 1) { + + if (chip->num_main_regs) { + unsigned int max_main_bits; + unsigned long size; + + size = chip->num_regs * sizeof(unsigned int); + + max_main_bits = (chip->num_main_status_bits) ? + chip->num_main_status_bits : chip->num_regs; + /* Clear the status buf as we don't read all status regs */ + memset(data->status_buf, 0, size); + + /* We could support bulk read for main status registers + * but I don't expect to see devices with really many main + * status registers so let's only support single reads for the + * sake of simplicity. and add bulk reads only if needed + */ + for (i = 0; i < chip->num_main_regs; i++) { + ret = regmap_read(map, chip->main_status + + (i * map->reg_stride + * data->irq_reg_stride), + &data->main_status_buf[i]); + if (ret) { + dev_err(map->dev, + "Failed to read IRQ status %d\n", + ret); + goto exit; + } + } + + /* Read sub registers with active IRQs */ + for (i = 0; i < chip->num_main_regs; i++) { + unsigned int b; + const unsigned long mreg = data->main_status_buf[i]; + + for_each_set_bit(b, &mreg, map->format.val_bytes * 8) { + if (i * map->format.val_bytes * 8 + b > + max_main_bits) + break; + ret = read_sub_irq_data(data, b); + + if (ret != 0) { + dev_err(map->dev, + "Failed to read IRQ status %d\n", + ret); + if (chip->runtime_pm) + pm_runtime_put(map->dev); + goto exit; + } + } + + } + } else if (!map->use_single_read && map->reg_stride == 1 && + data->irq_reg_stride == 1) { + u8 *buf8 = data->status_reg_buf; u16 *buf16 = data->status_reg_buf; u32 *buf32 = data->status_reg_buf; @@ -521,6 +603,15 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, if (!d) return -ENOMEM; + if (chip->num_main_regs) { + d->main_status_buf = kcalloc(chip->num_main_regs, + sizeof(unsigned int), + GFP_KERNEL); + + if (!d->main_status_buf) + goto err_alloc; + } + d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int), GFP_KERNEL); if (!d->status_buf) diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c index 89ad8dee6ad5..1fad9291f6aa 100644 --- a/drivers/base/swnode.c +++ b/drivers/base/swnode.c @@ -499,6 +499,28 @@ software_node_get_next_child(const struct fwnode_handle *fwnode, return &c->fwnode; } +static struct fwnode_handle * +software_node_get_named_child_node(const struct fwnode_handle *fwnode, + const char *childname) +{ + struct software_node *swnode = to_software_node(fwnode); + const struct property_entry *prop; + struct software_node *child; + + if (!swnode || list_empty(&swnode->children)) + return NULL; + + list_for_each_entry(child, &swnode->children, entry) { + prop = property_entry_get(child->properties, "name"); + if (!prop) + continue; + if (!strcmp(childname, prop->value.str)) { + kobject_get(&child->kobj); + return &child->fwnode; + } + } + return NULL; +} static const struct fwnode_operations software_node_ops = { .get = software_node_get, @@ -508,6 +530,7 @@ static const struct fwnode_operations software_node_ops = { .property_read_string_array = software_node_read_string_array, .get_parent = software_node_get_parent, .get_next_child_node = software_node_get_next_child, + .get_named_child_node = software_node_get_named_child_node, }; /* -------------------------------------------------------------------------- */ diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c index e7f145d662f0..f4b1d8e54daf 100644 --- a/drivers/base/test/test_async_driver_probe.c +++ b/drivers/base/test/test_async_driver_probe.c @@ -11,16 +11,47 @@ #include #include #include +#include +#include +#include #define TEST_PROBE_DELAY (5 * 1000) /* 5 sec */ #define TEST_PROBE_THRESHOLD (TEST_PROBE_DELAY / 2) +static atomic_t warnings, errors, timeout, async_completed; + static int test_probe(struct platform_device *pdev) { - dev_info(&pdev->dev, "sleeping for %d msecs in probe\n", - TEST_PROBE_DELAY); - msleep(TEST_PROBE_DELAY); - dev_info(&pdev->dev, "done sleeping\n"); + struct device *dev = &pdev->dev; + + /* + * Determine if we have hit the "timeout" limit for the test if we + * have then report it as an error, otherwise we wil sleep for the + * required amount of time and then report completion. + */ + if (atomic_read(&timeout)) { + dev_err(dev, "async probe took too long\n"); + atomic_inc(&errors); + } else { + dev_dbg(&pdev->dev, "sleeping for %d msecs in probe\n", + TEST_PROBE_DELAY); + msleep(TEST_PROBE_DELAY); + dev_dbg(&pdev->dev, "done sleeping\n"); + } + + /* + * Report NUMA mismatch if device node is set and we are not + * performing an async init on that node. + */ + if (dev->driver->probe_type == PROBE_PREFER_ASYNCHRONOUS) { + if (dev_to_node(dev) != numa_node_id()) { + dev_warn(dev, "NUMA node mismatch %d != %d\n", + dev_to_node(dev), numa_node_id()); + atomic_inc(&warnings); + } + + atomic_inc(&async_completed); + } return 0; } @@ -41,31 +72,64 @@ static struct platform_driver sync_driver = { .probe = test_probe, }; -static struct platform_device *async_dev_1, *async_dev_2; -static struct platform_device *sync_dev_1; +static struct platform_device *async_dev[NR_CPUS * 2]; +static struct platform_device *sync_dev[2]; + +static struct platform_device * +test_platform_device_register_node(char *name, int id, int nid) +{ + struct platform_device *pdev; + int ret; + + pdev = platform_device_alloc(name, id); + if (!pdev) + return NULL; + + if (nid != NUMA_NO_NODE) + set_dev_node(&pdev->dev, nid); + + ret = platform_device_add(pdev); + if (ret) { + platform_device_put(pdev); + return ERR_PTR(ret); + } + + return pdev; + +} static int __init test_async_probe_init(void) { - ktime_t calltime, delta; + struct platform_device **pdev = NULL; + int async_id = 0, sync_id = 0; unsigned long long duration; - int error; + ktime_t calltime, delta; + int err, nid, cpu; + + pr_info("registering first set of asynchronous devices...\n"); - pr_info("registering first asynchronous device...\n"); + for_each_online_cpu(cpu) { + nid = cpu_to_node(cpu); + pdev = &async_dev[async_id]; + *pdev = test_platform_device_register_node("test_async_driver", + async_id, + nid); + if (IS_ERR(*pdev)) { + err = PTR_ERR(*pdev); + *pdev = NULL; + pr_err("failed to create async_dev: %d\n", err); + goto err_unregister_async_devs; + } - async_dev_1 = platform_device_register_simple("test_async_driver", 1, - NULL, 0); - if (IS_ERR(async_dev_1)) { - error = PTR_ERR(async_dev_1); - pr_err("failed to create async_dev_1: %d\n", error); - return error; + async_id++; } pr_info("registering asynchronous driver...\n"); calltime = ktime_get(); - error = platform_driver_register(&async_driver); - if (error) { - pr_err("Failed to register async_driver: %d\n", error); - goto err_unregister_async_dev_1; + err = platform_driver_register(&async_driver); + if (err) { + pr_err("Failed to register async_driver: %d\n", err); + goto err_unregister_async_devs; } delta = ktime_sub(ktime_get(), calltime); @@ -73,86 +137,163 @@ static int __init test_async_probe_init(void) pr_info("registration took %lld msecs\n", duration); if (duration > TEST_PROBE_THRESHOLD) { pr_err("test failed: probe took too long\n"); - error = -ETIMEDOUT; + err = -ETIMEDOUT; goto err_unregister_async_driver; } - pr_info("registering second asynchronous device...\n"); + pr_info("registering second set of asynchronous devices...\n"); calltime = ktime_get(); - async_dev_2 = platform_device_register_simple("test_async_driver", 2, - NULL, 0); - if (IS_ERR(async_dev_2)) { - error = PTR_ERR(async_dev_2); - pr_err("failed to create async_dev_2: %d\n", error); - goto err_unregister_async_driver; + for_each_online_cpu(cpu) { + nid = cpu_to_node(cpu); + pdev = &sync_dev[sync_id]; + + *pdev = test_platform_device_register_node("test_async_driver", + async_id, + nid); + if (IS_ERR(*pdev)) { + err = PTR_ERR(*pdev); + *pdev = NULL; + pr_err("failed to create async_dev: %d\n", err); + goto err_unregister_async_driver; + } + + async_id++; } delta = ktime_sub(ktime_get(), calltime); duration = (unsigned long long) ktime_to_ms(delta); - pr_info("registration took %lld msecs\n", duration); + dev_info(&(*pdev)->dev, + "registration took %lld msecs\n", duration); if (duration > TEST_PROBE_THRESHOLD) { - pr_err("test failed: probe took too long\n"); - error = -ETIMEDOUT; - goto err_unregister_async_dev_2; + dev_err(&(*pdev)->dev, + "test failed: probe took too long\n"); + err = -ETIMEDOUT; + goto err_unregister_async_driver; } - pr_info("registering synchronous driver...\n"); - error = platform_driver_register(&sync_driver); - if (error) { - pr_err("Failed to register async_driver: %d\n", error); - goto err_unregister_async_dev_2; + pr_info("registering first synchronous device...\n"); + nid = cpu_to_node(cpu); + pdev = &sync_dev[sync_id]; + + *pdev = test_platform_device_register_node("test_sync_driver", + sync_id, + NUMA_NO_NODE); + if (IS_ERR(*pdev)) { + err = PTR_ERR(*pdev); + *pdev = NULL; + pr_err("failed to create sync_dev: %d\n", err); + goto err_unregister_async_driver; } - pr_info("registering synchronous device...\n"); + sync_id++; + + pr_info("registering synchronous driver...\n"); calltime = ktime_get(); - sync_dev_1 = platform_device_register_simple("test_sync_driver", 1, - NULL, 0); - if (IS_ERR(sync_dev_1)) { - error = PTR_ERR(sync_dev_1); - pr_err("failed to create sync_dev_1: %d\n", error); - goto err_unregister_sync_driver; + err = platform_driver_register(&sync_driver); + if (err) { + pr_err("Failed to register async_driver: %d\n", err); + goto err_unregister_sync_devs; } delta = ktime_sub(ktime_get(), calltime); duration = (unsigned long long) ktime_to_ms(delta); pr_info("registration took %lld msecs\n", duration); if (duration < TEST_PROBE_THRESHOLD) { - pr_err("test failed: probe was too quick\n"); - error = -ETIMEDOUT; - goto err_unregister_sync_dev_1; + dev_err(&(*pdev)->dev, + "test failed: probe was too quick\n"); + err = -ETIMEDOUT; + goto err_unregister_sync_driver; } - pr_info("completed successfully"); + pr_info("registering second synchronous device...\n"); + pdev = &sync_dev[sync_id]; + calltime = ktime_get(); - return 0; + *pdev = test_platform_device_register_node("test_sync_driver", + sync_id, + NUMA_NO_NODE); + if (IS_ERR(*pdev)) { + err = PTR_ERR(*pdev); + *pdev = NULL; + pr_err("failed to create sync_dev: %d\n", err); + goto err_unregister_sync_driver; + } -err_unregister_sync_dev_1: - platform_device_unregister(sync_dev_1); + sync_id++; -err_unregister_sync_driver: - platform_driver_unregister(&sync_driver); + delta = ktime_sub(ktime_get(), calltime); + duration = (unsigned long long) ktime_to_ms(delta); + dev_info(&(*pdev)->dev, + "registration took %lld msecs\n", duration); + if (duration < TEST_PROBE_THRESHOLD) { + dev_err(&(*pdev)->dev, + "test failed: probe was too quick\n"); + err = -ETIMEDOUT; + goto err_unregister_sync_driver; + } -err_unregister_async_dev_2: - platform_device_unregister(async_dev_2); + /* + * The async events should have completed while we were taking care + * of the synchronous events. We will now terminate any outstanding + * asynchronous probe calls remaining by forcing timeout and remove + * the driver before we return which should force the flush of the + * pending asynchronous probe calls. + * + * Otherwise if they completed without errors or warnings then + * report successful completion. + */ + if (atomic_read(&async_completed) != async_id) { + pr_err("async events still pending, forcing timeout\n"); + atomic_inc(&timeout); + err = -ETIMEDOUT; + } else if (!atomic_read(&errors) && !atomic_read(&warnings)) { + pr_info("completed successfully\n"); + return 0; + } +err_unregister_sync_driver: + platform_driver_unregister(&sync_driver); +err_unregister_sync_devs: + while (sync_id--) + platform_device_unregister(sync_dev[sync_id]); err_unregister_async_driver: platform_driver_unregister(&async_driver); +err_unregister_async_devs: + while (async_id--) + platform_device_unregister(async_dev[async_id]); + + /* + * If err is already set then count that as an additional error for + * the test. Otherwise we will report an invalid argument error and + * not count that as we should have reached here as a result of + * errors or warnings being reported by the probe routine. + */ + if (err) + atomic_inc(&errors); + else + err = -EINVAL; -err_unregister_async_dev_1: - platform_device_unregister(async_dev_1); + pr_err("Test failed with %d errors and %d warnings\n", + atomic_read(&errors), atomic_read(&warnings)); - return error; + return err; } module_init(test_async_probe_init); static void __exit test_async_probe_exit(void) { + int id = 2; + platform_driver_unregister(&async_driver); platform_driver_unregister(&sync_driver); - platform_device_unregister(async_dev_1); - platform_device_unregister(async_dev_2); - platform_device_unregister(sync_dev_1); + + while (id--) + platform_device_unregister(sync_dev[id]); + + id = NR_CPUS * 2; + while (id--) + platform_device_unregister(async_dev[id]); } module_exit(test_async_probe_exit); diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h index a4aac370f21f..6eded32d1aac 100644 --- a/drivers/bcma/bcma_private.h +++ b/drivers/bcma/bcma_private.h @@ -10,13 +10,13 @@ #include #define bcma_err(bus, fmt, ...) \ - pr_err("bus%d: " fmt, (bus)->num, ##__VA_ARGS__) + dev_err((bus)->dev, "bus%d: " fmt, (bus)->num, ##__VA_ARGS__) #define bcma_warn(bus, fmt, ...) \ - pr_warn("bus%d: " fmt, (bus)->num, ##__VA_ARGS__) + dev_warn((bus)->dev, "bus%d: " fmt, (bus)->num, ##__VA_ARGS__) #define bcma_info(bus, fmt, ...) \ - pr_info("bus%d: " fmt, (bus)->num, ##__VA_ARGS__) + dev_info((bus)->dev, "bus%d: " fmt, (bus)->num, ##__VA_ARGS__) #define bcma_debug(bus, fmt, ...) \ - pr_debug("bus%d: " fmt, (bus)->num, ##__VA_ARGS__) + dev_dbg((bus)->dev, "bus%d: " fmt, (bus)->num, ##__VA_ARGS__) struct bcma_bus; @@ -33,7 +33,6 @@ int __init bcma_bus_early_register(struct bcma_bus *bus); int bcma_bus_suspend(struct bcma_bus *bus); int bcma_bus_resume(struct bcma_bus *bus); #endif -struct device *bcma_bus_get_host_dev(struct bcma_bus *bus); /* scan.c */ void bcma_detect_chip(struct bcma_bus *bus); diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c index 2c0ffb77d738..a5df3d111334 100644 --- a/drivers/bcma/driver_gpio.c +++ b/drivers/bcma/driver_gpio.c @@ -183,7 +183,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc) chip->direction_input = bcma_gpio_direction_input; chip->direction_output = bcma_gpio_direction_output; chip->owner = THIS_MODULE; - chip->parent = bcma_bus_get_host_dev(bus); + chip->parent = bus->dev; #if IS_BUILTIN(CONFIG_OF) chip->of_node = cc->core->dev.of_node; #endif diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c index 63410ecfe640..f52239feb4cb 100644 --- a/drivers/bcma/host_pci.c +++ b/drivers/bcma/host_pci.c @@ -196,6 +196,8 @@ static int bcma_host_pci_probe(struct pci_dev *dev, goto err_pci_release_regions; } + bus->dev = &dev->dev; + /* Map MMIO */ err = -ENOMEM; bus->mmio = pci_iomap(dev, 0, ~0UL); diff --git a/drivers/bcma/host_soc.c b/drivers/bcma/host_soc.c index 2dce34789329..c8073b509a2b 100644 --- a/drivers/bcma/host_soc.c +++ b/drivers/bcma/host_soc.c @@ -179,7 +179,6 @@ int __init bcma_host_soc_register(struct bcma_soc *soc) /* Host specific */ bus->hosttype = BCMA_HOSTTYPE_SOC; bus->ops = &bcma_host_soc_ops; - bus->host_pdev = NULL; /* Initialize struct, detect chip */ bcma_init_bus(bus); @@ -213,6 +212,8 @@ static int bcma_host_soc_probe(struct platform_device *pdev) if (!bus) return -ENOMEM; + bus->dev = dev; + /* Map MMIO */ bus->mmio = of_iomap(np, 0); if (!bus->mmio) @@ -221,7 +222,6 @@ static int bcma_host_soc_probe(struct platform_device *pdev) /* Host specific */ bus->hosttype = BCMA_HOSTTYPE_SOC; bus->ops = &bcma_host_soc_ops; - bus->host_pdev = pdev; /* Initialize struct, detect chip */ bcma_init_bus(bus); diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index fc1f4acdd189..6535614a7dc1 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c @@ -223,8 +223,8 @@ unsigned int bcma_core_irq(struct bcma_device *core, int num) mips_irq = bcma_core_mips_irq(core); return mips_irq <= 4 ? mips_irq + 2 : 0; } - if (bus->host_pdev) - return bcma_of_get_irq(&bus->host_pdev->dev, core, num); + if (bus->dev) + return bcma_of_get_irq(bus->dev, core, num); return 0; case BCMA_HOSTTYPE_SDIO: return 0; @@ -239,18 +239,18 @@ void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core) core->dev.release = bcma_release_core_dev; core->dev.bus = &bcma_bus_type; dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index); - core->dev.parent = bcma_bus_get_host_dev(bus); - if (core->dev.parent) - bcma_of_fill_device(core->dev.parent, core); + core->dev.parent = bus->dev; + if (bus->dev) + bcma_of_fill_device(bus->dev, core); switch (bus->hosttype) { case BCMA_HOSTTYPE_PCI: - core->dma_dev = &bus->host_pci->dev; + core->dma_dev = bus->dev; core->irq = bus->host_pci->irq; break; case BCMA_HOSTTYPE_SOC: - if (IS_ENABLED(CONFIG_OF) && bus->host_pdev) { - core->dma_dev = &bus->host_pdev->dev; + if (IS_ENABLED(CONFIG_OF) && bus->dev) { + core->dma_dev = bus->dev; } else { core->dev.dma_mask = &core->dev.coherent_dma_mask; core->dma_dev = &core->dev; @@ -261,28 +261,6 @@ void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core) } } -struct device *bcma_bus_get_host_dev(struct bcma_bus *bus) -{ - switch (bus->hosttype) { - case BCMA_HOSTTYPE_PCI: - if (bus->host_pci) - return &bus->host_pci->dev; - else - return NULL; - case BCMA_HOSTTYPE_SOC: - if (bus->host_pdev) - return &bus->host_pdev->dev; - else - return NULL; - case BCMA_HOSTTYPE_SDIO: - if (bus->host_sdio) - return &bus->host_sdio->dev; - else - return NULL; - } - return NULL; -} - void bcma_init_bus(struct bcma_bus *bus) { mutex_lock(&bcma_buses_mutex); @@ -402,7 +380,6 @@ int bcma_bus_register(struct bcma_bus *bus) { int err; struct bcma_device *core; - struct device *dev; /* Scan for devices (cores) */ err = bcma_bus_scan(bus); @@ -425,10 +402,8 @@ int bcma_bus_register(struct bcma_bus *bus) bcma_core_pci_early_init(&bus->drv_pci[0]); } - dev = bcma_bus_get_host_dev(bus); - if (dev) { - of_platform_default_populate(dev->of_node, NULL, dev); - } + if (bus->dev) + of_platform_default_populate(bus->dev->of_node, NULL, bus->dev); /* Cores providing flash access go before SPROM init */ list_for_each_entry(core, &bus->cores, list) { diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 6f2856c6d0f2..95f608d1a098 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -2230,7 +2230,6 @@ static void floppy_end_request(struct request *req, blk_status_t error) static void request_done(int uptodate) { struct request *req = current_req; - struct request_queue *q; int block; char msg[sizeof("request done ") + sizeof(int) * 3]; @@ -2243,8 +2242,6 @@ static void request_done(int uptodate) return; } - q = req->q; - if (uptodate) { /* maintain values for invalidation on geometry * change */ @@ -4075,7 +4072,7 @@ static unsigned int floppy_check_events(struct gendisk *disk, if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) { if (lock_fdc(drive)) - return -EINTR; + return 0; poll_drive(false, 0); process_fd_request(); } diff --git a/drivers/block/loop.c b/drivers/block/loop.c index cf5538942834..1e6edd568214 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -511,21 +511,22 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, loff_t pos, bool rw) { struct iov_iter iter; + struct req_iterator rq_iter; struct bio_vec *bvec; struct request *rq = blk_mq_rq_from_pdu(cmd); struct bio *bio = rq->bio; struct file *file = lo->lo_backing_file; + struct bio_vec tmp; unsigned int offset; - int segments = 0; + int nr_bvec = 0; int ret; + rq_for_each_bvec(tmp, rq, rq_iter) + nr_bvec++; + if (rq->bio != rq->biotail) { - struct req_iterator iter; - struct bio_vec tmp; - __rq_for_each_bio(bio, rq) - segments += bio_segments(bio); - bvec = kmalloc_array(segments, sizeof(struct bio_vec), + bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec), GFP_NOIO); if (!bvec) return -EIO; @@ -534,10 +535,10 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, /* * The bios of the request may be started from the middle of * the 'bvec' because of bio splitting, so we can't directly - * copy bio->bi_iov_vec to new bvec. The rq_for_each_segment + * copy bio->bi_iov_vec to new bvec. The rq_for_each_bvec * API will take care of all details for us. */ - rq_for_each_segment(tmp, rq, iter) { + rq_for_each_bvec(tmp, rq, rq_iter) { *bvec = tmp; bvec++; } @@ -551,11 +552,10 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, */ offset = bio->bi_iter.bi_bvec_done; bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); - segments = bio_segments(bio); } atomic_set(&cmd->ref, 2); - iov_iter_bvec(&iter, rw, bvec, segments, blk_rq_bytes(rq)); + iov_iter_bvec(&iter, rw, bvec, nr_bvec, blk_rq_bytes(rq)); iter.iov_offset = offset; cmd->iocb.ki_pos = pos; @@ -1089,16 +1089,12 @@ static int __loop_clr_fd(struct loop_device *lo, bool release) kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); } mapping_set_gfp_mask(filp->f_mapping, gfp); - lo->lo_state = Lo_unbound; /* This is safe: open() is still holding a reference. */ module_put(THIS_MODULE); blk_mq_unfreeze_queue(lo->lo_queue); partscan = lo->lo_flags & LO_FLAGS_PARTSCAN && bdev; lo_number = lo->lo_number; - lo->lo_flags = 0; - if (!part_shift) - lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN; loop_unprepare_queue(lo); out_unlock: mutex_unlock(&loop_ctl_mutex); @@ -1115,11 +1111,29 @@ out_unlock: err = __blkdev_reread_part(bdev); else err = blkdev_reread_part(bdev); - pr_warn("%s: partition scan of loop%d failed (rc=%d)\n", - __func__, lo_number, err); + if (err) + pr_warn("%s: partition scan of loop%d failed (rc=%d)\n", + __func__, lo_number, err); /* Device is gone, no point in returning error */ err = 0; } + + /* + * lo->lo_state is set to Lo_unbound here after above partscan has + * finished. + * + * There cannot be anybody else entering __loop_clr_fd() as + * lo->lo_backing_file is already cleared and Lo_rundown state + * protects us from all the other places trying to change the 'lo' + * device. + */ + mutex_lock(&loop_ctl_mutex); + lo->lo_flags = 0; + if (!part_shift) + lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN; + lo->lo_state = Lo_unbound; + mutex_unlock(&loop_ctl_mutex); + /* * Need not hold loop_ctl_mutex to fput backing file. * Calling fput holding loop_ctl_mutex triggers a circular @@ -1937,7 +1951,7 @@ static int loop_add(struct loop_device **l, int i) lo->tag_set.queue_depth = 128; lo->tag_set.numa_node = NUMA_NO_NODE; lo->tag_set.cmd_size = sizeof(struct loop_cmd); - lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; + lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; lo->tag_set.driver_data = lo; err = blk_mq_alloc_tag_set(&lo->tag_set); diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 88e8440e75c3..83302ecdc8db 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -40,6 +40,7 @@ #include #include #include +#include #include "mtip32xx.h" #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32) @@ -1415,7 +1416,7 @@ static blk_status_t mtip_send_trim(struct driver_data *dd, unsigned int lba, WARN_ON(sizeof(struct mtip_trim) > ATA_SECT_SIZE); /* Allocate a DMA buffer for the trim structure */ - buf = dmam_alloc_coherent(&dd->pdev->dev, ATA_SECT_SIZE, &dma_addr, + buf = dma_alloc_coherent(&dd->pdev->dev, ATA_SECT_SIZE, &dma_addr, GFP_KERNEL); if (!buf) return BLK_STS_RESOURCE; @@ -1452,7 +1453,7 @@ static blk_status_t mtip_send_trim(struct driver_data *dd, unsigned int lba, MTIP_TRIM_TIMEOUT_MS) < 0) ret = BLK_STS_IOERR; - dmam_free_coherent(&dd->pdev->dev, ATA_SECT_SIZE, buf, dma_addr); + dma_free_coherent(&dd->pdev->dev, ATA_SECT_SIZE, buf, dma_addr); return ret; } @@ -1655,7 +1656,7 @@ static int exec_drive_command(struct mtip_port *port, u8 *command, if (!user_buffer) return -EFAULT; - buf = dmam_alloc_coherent(&port->dd->pdev->dev, + buf = dma_alloc_coherent(&port->dd->pdev->dev, ATA_SECT_SIZE * xfer_sz, &dma_addr, GFP_KERNEL); @@ -1733,7 +1734,7 @@ static int exec_drive_command(struct mtip_port *port, u8 *command, } exit_drive_command: if (buf) - dmam_free_coherent(&port->dd->pdev->dev, + dma_free_coherent(&port->dd->pdev->dev, ATA_SECT_SIZE * xfer_sz, buf, dma_addr); return rv; } @@ -2837,11 +2838,11 @@ static void mtip_dma_free(struct driver_data *dd) struct mtip_port *port = dd->port; if (port->block1) - dmam_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ, + dma_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ, port->block1, port->block1_dma); if (port->command_list) { - dmam_free_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ, + dma_free_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ, port->command_list, port->command_list_dma); } } @@ -2860,7 +2861,7 @@ static int mtip_dma_alloc(struct driver_data *dd) /* Allocate dma memory for RX Fis, Identify, and Sector Bufffer */ port->block1 = - dmam_alloc_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ, + dma_alloc_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ, &port->block1_dma, GFP_KERNEL); if (!port->block1) return -ENOMEM; @@ -2868,10 +2869,10 @@ static int mtip_dma_alloc(struct driver_data *dd) /* Allocate dma memory for command list */ port->command_list = - dmam_alloc_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ, + dma_alloc_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ, &port->command_list_dma, GFP_KERNEL); if (!port->command_list) { - dmam_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ, + dma_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ, port->block1, port->block1_dma); port->block1 = NULL; port->block1_dma = 0; @@ -3056,13 +3057,8 @@ static int mtip_hw_init(struct driver_data *dd) mtip_start_port(dd->port); /* Setup the ISR and enable interrupts. */ - rv = devm_request_irq(&dd->pdev->dev, - dd->pdev->irq, - mtip_irq_handler, - IRQF_SHARED, - dev_driver_string(&dd->pdev->dev), - dd); - + rv = request_irq(dd->pdev->irq, mtip_irq_handler, IRQF_SHARED, + dev_driver_string(&dd->pdev->dev), dd); if (rv) { dev_err(&dd->pdev->dev, "Unable to allocate IRQ %d\n", dd->pdev->irq); @@ -3090,7 +3086,7 @@ out3: /* Release the IRQ. */ irq_set_affinity_hint(dd->pdev->irq, NULL); - devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd); + free_irq(dd->pdev->irq, dd); out2: mtip_deinit_port(dd->port); @@ -3145,7 +3141,7 @@ static int mtip_hw_exit(struct driver_data *dd) /* Release the IRQ. */ irq_set_affinity_hint(dd->pdev->irq, NULL); - devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd); + free_irq(dd->pdev->irq, dd); msleep(1000); /* Free dma regions */ @@ -3609,8 +3605,8 @@ static void mtip_free_cmd(struct blk_mq_tag_set *set, struct request *rq, if (!cmd->command) return; - dmam_free_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ, - cmd->command, cmd->command_dma); + dma_free_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ, cmd->command, + cmd->command_dma); } static int mtip_init_cmd(struct blk_mq_tag_set *set, struct request *rq, @@ -3619,7 +3615,7 @@ static int mtip_init_cmd(struct blk_mq_tag_set *set, struct request *rq, struct driver_data *dd = set->driver_data; struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); - cmd->command = dmam_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ, + cmd->command = dma_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ, &cmd->command_dma, GFP_KERNEL); if (!cmd->command) return -ENOMEM; @@ -4018,9 +4014,9 @@ static int get_least_used_cpu_on_node(int node) /* Helper for selecting a node in round robin mode */ static inline int mtip_get_next_rr_node(void) { - static int next_node = -1; + static int next_node = NUMA_NO_NODE; - if (next_node == -1) { + if (next_node == NUMA_NO_NODE) { next_node = first_online_node; return next_node; } diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 7c9a949e876b..90ba9f4c03f3 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -1571,7 +1571,7 @@ static int nbd_dev_add(int index) nbd->tag_set.numa_node = NUMA_NO_NODE; nbd->tag_set.cmd_size = sizeof(struct nbd_cmd); nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | - BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING; + BLK_MQ_F_BLOCKING; nbd->tag_set.driver_data = nbd; err = blk_mq_alloc_tag_set(&nbd->tag_set); @@ -2118,8 +2118,7 @@ static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info) } nla_nest_end(reply, dev_list); genlmsg_end(reply, reply_head); - genlmsg_reply(reply, info); - ret = 0; + ret = genlmsg_reply(reply, info); out: mutex_unlock(&nbd_index_mutex); return ret; diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c index 62c9654b9ce8..417a9f15c116 100644 --- a/drivers/block/null_blk_main.c +++ b/drivers/block/null_blk_main.c @@ -1104,7 +1104,7 @@ static int null_handle_bio(struct nullb_cmd *cmd) len = bvec.bv_len; err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset, op_is_write(bio_op(bio)), sector, - bio_op(bio) & REQ_FUA); + bio->bi_opf & REQ_FUA); if (err) { spin_unlock_irq(&nullb->lock); return err; @@ -1678,7 +1678,6 @@ static int null_add_dev(struct nullb_device *dev) if (dev->cache_size > 0) { set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags); blk_queue_write_cache(nullb->q, true, true); - blk_queue_flush_queueable(nullb->q, true); } if (dev->zoned) { diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 1e92b61d0bd5..4ba967d65cf9 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -115,12 +115,14 @@ static int atomic_dec_return_safe(atomic_t *v) #define RBD_FEATURE_LAYERING (1ULL<<0) #define RBD_FEATURE_STRIPINGV2 (1ULL<<1) #define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2) +#define RBD_FEATURE_DEEP_FLATTEN (1ULL<<5) #define RBD_FEATURE_DATA_POOL (1ULL<<7) #define RBD_FEATURE_OPERATIONS (1ULL<<8) #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \ RBD_FEATURE_STRIPINGV2 | \ RBD_FEATURE_EXCLUSIVE_LOCK | \ + RBD_FEATURE_DEEP_FLATTEN | \ RBD_FEATURE_DATA_POOL | \ RBD_FEATURE_OPERATIONS) @@ -214,28 +216,40 @@ enum obj_operation_type { OBJ_OP_READ = 1, OBJ_OP_WRITE, OBJ_OP_DISCARD, + OBJ_OP_ZEROOUT, }; /* * Writes go through the following state machine to deal with * layering: * - * need copyup - * RBD_OBJ_WRITE_GUARD ---------------> RBD_OBJ_WRITE_COPYUP - * | ^ | - * v \------------------------------/ - * done - * ^ - * | - * RBD_OBJ_WRITE_FLAT + * . . . . . RBD_OBJ_WRITE_GUARD. . . . . . . . . . . . . . + * . | . + * . v . + * . RBD_OBJ_WRITE_READ_FROM_PARENT. . . . + * . | . . + * . v v (deep-copyup . + * (image . RBD_OBJ_WRITE_COPYUP_EMPTY_SNAPC . not needed) . + * flattened) v | . . + * . v . . + * . . . .RBD_OBJ_WRITE_COPYUP_OPS. . . . . (copyup . + * | not needed) v + * v . + * done . . . . . . . . . . . . . . . . . . + * ^ + * | + * RBD_OBJ_WRITE_FLAT * * Writes start in RBD_OBJ_WRITE_GUARD or _FLAT, depending on whether - * there is a parent or not. + * assert_exists guard is needed or not (in some cases it's not needed + * even if there is a parent). */ enum rbd_obj_write_state { RBD_OBJ_WRITE_FLAT = 1, RBD_OBJ_WRITE_GUARD, - RBD_OBJ_WRITE_COPYUP, + RBD_OBJ_WRITE_READ_FROM_PARENT, + RBD_OBJ_WRITE_COPYUP_EMPTY_SNAPC, + RBD_OBJ_WRITE_COPYUP_OPS, }; struct rbd_obj_request { @@ -291,7 +305,6 @@ struct rbd_img_request { int result; /* first nonzero obj_request result */ struct list_head object_extents; /* obj_req.ex structs */ - u32 obj_request_count; u32 pending_count; struct kref kref; @@ -421,6 +434,10 @@ static DEFINE_IDA(rbd_dev_id_ida); static struct workqueue_struct *rbd_wq; +static struct ceph_snap_context rbd_empty_snapc = { + .nref = REFCOUNT_INIT(1), +}; + /* * single-major requires >= 0.75 version of userspace rbd utility. */ @@ -428,14 +445,13 @@ static bool single_major = true; module_param(single_major, bool, 0444); MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)"); -static ssize_t rbd_add(struct bus_type *bus, const char *buf, - size_t count); -static ssize_t rbd_remove(struct bus_type *bus, const char *buf, - size_t count); -static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf, - size_t count); -static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf, - size_t count); +static ssize_t add_store(struct bus_type *bus, const char *buf, size_t count); +static ssize_t remove_store(struct bus_type *bus, const char *buf, + size_t count); +static ssize_t add_single_major_store(struct bus_type *bus, const char *buf, + size_t count); +static ssize_t remove_single_major_store(struct bus_type *bus, const char *buf, + size_t count); static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth); static int rbd_dev_id_to_minor(int dev_id) @@ -464,16 +480,16 @@ static bool rbd_is_lock_owner(struct rbd_device *rbd_dev) return is_lock_owner; } -static ssize_t rbd_supported_features_show(struct bus_type *bus, char *buf) +static ssize_t supported_features_show(struct bus_type *bus, char *buf) { return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED); } -static BUS_ATTR(add, 0200, NULL, rbd_add); -static BUS_ATTR(remove, 0200, NULL, rbd_remove); -static BUS_ATTR(add_single_major, 0200, NULL, rbd_add_single_major); -static BUS_ATTR(remove_single_major, 0200, NULL, rbd_remove_single_major); -static BUS_ATTR(supported_features, 0444, rbd_supported_features_show, NULL); +static BUS_ATTR_WO(add); +static BUS_ATTR_WO(remove); +static BUS_ATTR_WO(add_single_major); +static BUS_ATTR_WO(remove_single_major); +static BUS_ATTR_RO(supported_features); static struct attribute *rbd_bus_attrs[] = { &bus_attr_add.attr, @@ -733,6 +749,7 @@ static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts) */ enum { Opt_queue_depth, + Opt_alloc_size, Opt_lock_timeout, Opt_last_int, /* int args above */ @@ -749,6 +766,7 @@ enum { static match_table_t rbd_opts_tokens = { {Opt_queue_depth, "queue_depth=%d"}, + {Opt_alloc_size, "alloc_size=%d"}, {Opt_lock_timeout, "lock_timeout=%d"}, /* int args above */ {Opt_pool_ns, "_pool_ns=%s"}, @@ -765,6 +783,7 @@ static match_table_t rbd_opts_tokens = { struct rbd_options { int queue_depth; + int alloc_size; unsigned long lock_timeout; bool read_only; bool lock_on_read; @@ -773,6 +792,7 @@ struct rbd_options { }; #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ +#define RBD_ALLOC_SIZE_DEFAULT (64 * 1024) #define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */ #define RBD_READ_ONLY_DEFAULT false #define RBD_LOCK_ON_READ_DEFAULT false @@ -812,6 +832,17 @@ static int parse_rbd_opts_token(char *c, void *private) } pctx->opts->queue_depth = intval; break; + case Opt_alloc_size: + if (intval < 1) { + pr_err("alloc_size out of range\n"); + return -EINVAL; + } + if (!is_power_of_2(intval)) { + pr_err("alloc_size must be a power of 2\n"); + return -EINVAL; + } + pctx->opts->alloc_size = intval; + break; case Opt_lock_timeout: /* 0 is "wait forever" (i.e. infinite timeout) */ if (intval < 0 || intval > INT_MAX / 1000) { @@ -858,6 +889,8 @@ static char* obj_op_name(enum obj_operation_type op_type) return "write"; case OBJ_OP_DISCARD: return "discard"; + case OBJ_OP_ZEROOUT: + return "zeroout"; default: return "???"; } @@ -1345,7 +1378,6 @@ static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request, /* Image request now owns object's original reference */ obj_request->img_request = img_request; - img_request->obj_request_count++; img_request->pending_count++; dout("%s: img %p obj %p\n", __func__, img_request, obj_request); } @@ -1355,8 +1387,6 @@ static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request, { dout("%s: img %p obj %p\n", __func__, img_request, obj_request); list_del(&obj_request->ex.oe_item); - rbd_assert(img_request->obj_request_count > 0); - img_request->obj_request_count--; rbd_assert(obj_request->img_request == img_request); rbd_obj_request_put(obj_request); } @@ -1410,6 +1440,19 @@ static bool rbd_obj_is_tail(struct rbd_obj_request *obj_req) rbd_dev->layout.object_size; } +/* + * Must be called after rbd_obj_calc_img_extents(). + */ +static bool rbd_obj_copyup_enabled(struct rbd_obj_request *obj_req) +{ + if (!obj_req->num_img_extents || + (rbd_obj_is_entire(obj_req) && + !obj_req->img_request->snapc->num_snaps)) + return false; + + return true; +} + static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req) { return ceph_file_extents_bytes(obj_req->img_extents, @@ -1423,6 +1466,7 @@ static bool rbd_img_is_write(struct rbd_img_request *img_req) return false; case OBJ_OP_WRITE: case OBJ_OP_DISCARD: + case OBJ_OP_ZEROOUT: return true; default: BUG(); @@ -1471,18 +1515,16 @@ static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request) } static struct ceph_osd_request * -rbd_osd_req_create(struct rbd_obj_request *obj_req, unsigned int num_ops) +__rbd_osd_req_create(struct rbd_obj_request *obj_req, + struct ceph_snap_context *snapc, unsigned int num_ops) { - struct rbd_img_request *img_req = obj_req->img_request; - struct rbd_device *rbd_dev = img_req->rbd_dev; + struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev; struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; struct ceph_osd_request *req; const char *name_format = rbd_dev->image_format == 1 ? RBD_V1_DATA_FORMAT : RBD_V2_DATA_FORMAT; - req = ceph_osdc_alloc_request(osdc, - (rbd_img_is_write(img_req) ? img_req->snapc : NULL), - num_ops, false, GFP_NOIO); + req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false, GFP_NOIO); if (!req) return NULL; @@ -1507,6 +1549,13 @@ err_req: return NULL; } +static struct ceph_osd_request * +rbd_osd_req_create(struct rbd_obj_request *obj_req, unsigned int num_ops) +{ + return __rbd_osd_req_create(obj_req, obj_req->img_request->snapc, + num_ops); +} + static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req) { ceph_osdc_put_request(osd_req); @@ -1672,7 +1721,6 @@ static void rbd_img_request_destroy(struct kref *kref) for_each_obj_request_safe(img_request, obj_request, next_obj_request) rbd_img_obj_request_del(img_request, obj_request); - rbd_assert(img_request->obj_request_count == 0); if (img_request_layered_test(img_request)) { img_request_layered_clear(img_request); @@ -1755,7 +1803,7 @@ static void rbd_osd_req_setup_data(struct rbd_obj_request *obj_req, u32 which) static int rbd_obj_setup_read(struct rbd_obj_request *obj_req) { - obj_req->osd_req = rbd_osd_req_create(obj_req, 1); + obj_req->osd_req = __rbd_osd_req_create(obj_req, NULL, 1); if (!obj_req->osd_req) return -ENOMEM; @@ -1791,6 +1839,11 @@ static int __rbd_obj_setup_stat(struct rbd_obj_request *obj_req, return 0; } +static int count_write_ops(struct rbd_obj_request *obj_req) +{ + return 2; /* setallochint + write/writefull */ +} + static void __rbd_obj_setup_write(struct rbd_obj_request *obj_req, unsigned int which) { @@ -1817,6 +1870,7 @@ static void __rbd_obj_setup_write(struct rbd_obj_request *obj_req, static int rbd_obj_setup_write(struct rbd_obj_request *obj_req) { unsigned int num_osd_ops, which = 0; + bool need_guard; int ret; /* reverse map the entire object onto the parent */ @@ -1824,47 +1878,112 @@ static int rbd_obj_setup_write(struct rbd_obj_request *obj_req) if (ret) return ret; - if (obj_req->num_img_extents) { - obj_req->write_state = RBD_OBJ_WRITE_GUARD; - num_osd_ops = 3; /* stat + setallochint + write/writefull */ - } else { - obj_req->write_state = RBD_OBJ_WRITE_FLAT; - num_osd_ops = 2; /* setallochint + write/writefull */ - } + need_guard = rbd_obj_copyup_enabled(obj_req); + num_osd_ops = need_guard + count_write_ops(obj_req); obj_req->osd_req = rbd_osd_req_create(obj_req, num_osd_ops); if (!obj_req->osd_req) return -ENOMEM; - if (obj_req->num_img_extents) { + if (need_guard) { ret = __rbd_obj_setup_stat(obj_req, which++); if (ret) return ret; + + obj_req->write_state = RBD_OBJ_WRITE_GUARD; + } else { + obj_req->write_state = RBD_OBJ_WRITE_FLAT; } __rbd_obj_setup_write(obj_req, which); return 0; } -static void __rbd_obj_setup_discard(struct rbd_obj_request *obj_req, +static u16 truncate_or_zero_opcode(struct rbd_obj_request *obj_req) +{ + return rbd_obj_is_tail(obj_req) ? CEPH_OSD_OP_TRUNCATE : + CEPH_OSD_OP_ZERO; +} + +static int rbd_obj_setup_discard(struct rbd_obj_request *obj_req) +{ + struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev; + u64 off = obj_req->ex.oe_off; + u64 next_off = obj_req->ex.oe_off + obj_req->ex.oe_len; + int ret; + + /* + * Align the range to alloc_size boundary and punt on discards + * that are too small to free up any space. + * + * alloc_size == object_size && is_tail() is a special case for + * filestore with filestore_punch_hole = false, needed to allow + * truncate (in addition to delete). + */ + if (rbd_dev->opts->alloc_size != rbd_dev->layout.object_size || + !rbd_obj_is_tail(obj_req)) { + off = round_up(off, rbd_dev->opts->alloc_size); + next_off = round_down(next_off, rbd_dev->opts->alloc_size); + if (off >= next_off) + return 1; + } + + /* reverse map the entire object onto the parent */ + ret = rbd_obj_calc_img_extents(obj_req, true); + if (ret) + return ret; + + obj_req->osd_req = rbd_osd_req_create(obj_req, 1); + if (!obj_req->osd_req) + return -ENOMEM; + + if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents) { + osd_req_op_init(obj_req->osd_req, 0, CEPH_OSD_OP_DELETE, 0); + } else { + dout("%s %p %llu~%llu -> %llu~%llu\n", __func__, + obj_req, obj_req->ex.oe_off, obj_req->ex.oe_len, + off, next_off - off); + osd_req_op_extent_init(obj_req->osd_req, 0, + truncate_or_zero_opcode(obj_req), + off, next_off - off, 0, 0); + } + + obj_req->write_state = RBD_OBJ_WRITE_FLAT; + rbd_osd_req_format_write(obj_req); + return 0; +} + +static int count_zeroout_ops(struct rbd_obj_request *obj_req) +{ + int num_osd_ops; + + if (rbd_obj_is_entire(obj_req) && obj_req->num_img_extents && + !rbd_obj_copyup_enabled(obj_req)) + num_osd_ops = 2; /* create + truncate */ + else + num_osd_ops = 1; /* delete/truncate/zero */ + + return num_osd_ops; +} + +static void __rbd_obj_setup_zeroout(struct rbd_obj_request *obj_req, unsigned int which) { u16 opcode; if (rbd_obj_is_entire(obj_req)) { if (obj_req->num_img_extents) { - osd_req_op_init(obj_req->osd_req, which++, - CEPH_OSD_OP_CREATE, 0); + if (!rbd_obj_copyup_enabled(obj_req)) + osd_req_op_init(obj_req->osd_req, which++, + CEPH_OSD_OP_CREATE, 0); opcode = CEPH_OSD_OP_TRUNCATE; } else { osd_req_op_init(obj_req->osd_req, which++, CEPH_OSD_OP_DELETE, 0); opcode = 0; } - } else if (rbd_obj_is_tail(obj_req)) { - opcode = CEPH_OSD_OP_TRUNCATE; } else { - opcode = CEPH_OSD_OP_ZERO; + opcode = truncate_or_zero_opcode(obj_req); } if (opcode) @@ -1876,9 +1995,10 @@ static void __rbd_obj_setup_discard(struct rbd_obj_request *obj_req, rbd_osd_req_format_write(obj_req); } -static int rbd_obj_setup_discard(struct rbd_obj_request *obj_req) +static int rbd_obj_setup_zeroout(struct rbd_obj_request *obj_req) { unsigned int num_osd_ops, which = 0; + bool need_guard; int ret; /* reverse map the entire object onto the parent */ @@ -1886,33 +2006,24 @@ static int rbd_obj_setup_discard(struct rbd_obj_request *obj_req) if (ret) return ret; - if (rbd_obj_is_entire(obj_req)) { - obj_req->write_state = RBD_OBJ_WRITE_FLAT; - if (obj_req->num_img_extents) - num_osd_ops = 2; /* create + truncate */ - else - num_osd_ops = 1; /* delete */ - } else { - if (obj_req->num_img_extents) { - obj_req->write_state = RBD_OBJ_WRITE_GUARD; - num_osd_ops = 2; /* stat + truncate/zero */ - } else { - obj_req->write_state = RBD_OBJ_WRITE_FLAT; - num_osd_ops = 1; /* truncate/zero */ - } - } + need_guard = rbd_obj_copyup_enabled(obj_req); + num_osd_ops = need_guard + count_zeroout_ops(obj_req); obj_req->osd_req = rbd_osd_req_create(obj_req, num_osd_ops); if (!obj_req->osd_req) return -ENOMEM; - if (!rbd_obj_is_entire(obj_req) && obj_req->num_img_extents) { + if (need_guard) { ret = __rbd_obj_setup_stat(obj_req, which++); if (ret) return ret; + + obj_req->write_state = RBD_OBJ_WRITE_GUARD; + } else { + obj_req->write_state = RBD_OBJ_WRITE_FLAT; } - __rbd_obj_setup_discard(obj_req, which); + __rbd_obj_setup_zeroout(obj_req, which); return 0; } @@ -1923,10 +2034,10 @@ static int rbd_obj_setup_discard(struct rbd_obj_request *obj_req) */ static int __rbd_img_fill_request(struct rbd_img_request *img_req) { - struct rbd_obj_request *obj_req; + struct rbd_obj_request *obj_req, *next_obj_req; int ret; - for_each_obj_request(img_req, obj_req) { + for_each_obj_request_safe(img_req, obj_req, next_obj_req) { switch (img_req->op_type) { case OBJ_OP_READ: ret = rbd_obj_setup_read(obj_req); @@ -1937,11 +2048,20 @@ static int __rbd_img_fill_request(struct rbd_img_request *img_req) case OBJ_OP_DISCARD: ret = rbd_obj_setup_discard(obj_req); break; + case OBJ_OP_ZEROOUT: + ret = rbd_obj_setup_zeroout(obj_req); + break; default: rbd_assert(0); } - if (ret) + if (ret < 0) return ret; + if (ret > 0) { + img_req->xferred += obj_req->ex.oe_len; + img_req->pending_count--; + rbd_img_obj_request_del(img_req, obj_req); + continue; + } ret = ceph_osdc_alloc_messages(obj_req->osd_req, GFP_NOIO); if (ret) @@ -2357,21 +2477,19 @@ static bool is_zero_bvecs(struct bio_vec *bvecs, u32 bytes) return true; } -static int rbd_obj_issue_copyup(struct rbd_obj_request *obj_req, u32 bytes) +#define MODS_ONLY U32_MAX + +static int rbd_obj_issue_copyup_empty_snapc(struct rbd_obj_request *obj_req, + u32 bytes) { - unsigned int num_osd_ops = obj_req->osd_req->r_num_ops; int ret; dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes); rbd_assert(obj_req->osd_req->r_ops[0].op == CEPH_OSD_OP_STAT); + rbd_assert(bytes > 0 && bytes != MODS_ONLY); rbd_osd_req_destroy(obj_req->osd_req); - /* - * Create a copyup request with the same number of OSD ops as - * the original request. The original request was stat + op(s), - * the new copyup request will be copyup + the same op(s). - */ - obj_req->osd_req = rbd_osd_req_create(obj_req, num_osd_ops); + obj_req->osd_req = __rbd_osd_req_create(obj_req, &rbd_empty_snapc, 1); if (!obj_req->osd_req) return -ENOMEM; @@ -2379,27 +2497,65 @@ static int rbd_obj_issue_copyup(struct rbd_obj_request *obj_req, u32 bytes) if (ret) return ret; - /* - * Only send non-zero copyup data to save some I/O and network - * bandwidth -- zero copyup data is equivalent to the object not - * existing. - */ - if (is_zero_bvecs(obj_req->copyup_bvecs, bytes)) { - dout("%s obj_req %p detected zeroes\n", __func__, obj_req); - bytes = 0; - } osd_req_op_cls_request_data_bvecs(obj_req->osd_req, 0, obj_req->copyup_bvecs, obj_req->copyup_bvec_count, bytes); + rbd_osd_req_format_write(obj_req); - switch (obj_req->img_request->op_type) { + ret = ceph_osdc_alloc_messages(obj_req->osd_req, GFP_NOIO); + if (ret) + return ret; + + rbd_obj_request_submit(obj_req); + return 0; +} + +static int rbd_obj_issue_copyup_ops(struct rbd_obj_request *obj_req, u32 bytes) +{ + struct rbd_img_request *img_req = obj_req->img_request; + unsigned int num_osd_ops = (bytes != MODS_ONLY); + unsigned int which = 0; + int ret; + + dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes); + rbd_assert(obj_req->osd_req->r_ops[0].op == CEPH_OSD_OP_STAT || + obj_req->osd_req->r_ops[0].op == CEPH_OSD_OP_CALL); + rbd_osd_req_destroy(obj_req->osd_req); + + switch (img_req->op_type) { case OBJ_OP_WRITE: - __rbd_obj_setup_write(obj_req, 1); + num_osd_ops += count_write_ops(obj_req); break; - case OBJ_OP_DISCARD: - rbd_assert(!rbd_obj_is_entire(obj_req)); - __rbd_obj_setup_discard(obj_req, 1); + case OBJ_OP_ZEROOUT: + num_osd_ops += count_zeroout_ops(obj_req); + break; + default: + rbd_assert(0); + } + + obj_req->osd_req = rbd_osd_req_create(obj_req, num_osd_ops); + if (!obj_req->osd_req) + return -ENOMEM; + + if (bytes != MODS_ONLY) { + ret = osd_req_op_cls_init(obj_req->osd_req, which, "rbd", + "copyup"); + if (ret) + return ret; + + osd_req_op_cls_request_data_bvecs(obj_req->osd_req, which++, + obj_req->copyup_bvecs, + obj_req->copyup_bvec_count, + bytes); + } + + switch (img_req->op_type) { + case OBJ_OP_WRITE: + __rbd_obj_setup_write(obj_req, which); + break; + case OBJ_OP_ZEROOUT: + __rbd_obj_setup_zeroout(obj_req, which); break; default: rbd_assert(0); @@ -2413,6 +2569,33 @@ static int rbd_obj_issue_copyup(struct rbd_obj_request *obj_req, u32 bytes) return 0; } +static int rbd_obj_issue_copyup(struct rbd_obj_request *obj_req, u32 bytes) +{ + /* + * Only send non-zero copyup data to save some I/O and network + * bandwidth -- zero copyup data is equivalent to the object not + * existing. + */ + if (is_zero_bvecs(obj_req->copyup_bvecs, bytes)) { + dout("%s obj_req %p detected zeroes\n", __func__, obj_req); + bytes = 0; + } + + if (obj_req->img_request->snapc->num_snaps && bytes > 0) { + /* + * Send a copyup request with an empty snapshot context to + * deep-copyup the object through all existing snapshots. + * A second request with the current snapshot context will be + * sent for the actual modification. + */ + obj_req->write_state = RBD_OBJ_WRITE_COPYUP_EMPTY_SNAPC; + return rbd_obj_issue_copyup_empty_snapc(obj_req, bytes); + } + + obj_req->write_state = RBD_OBJ_WRITE_COPYUP_OPS; + return rbd_obj_issue_copyup_ops(obj_req, bytes); +} + static int setup_copyup_bvecs(struct rbd_obj_request *obj_req, u64 obj_overlap) { u32 i; @@ -2452,22 +2635,19 @@ static int rbd_obj_handle_write_guard(struct rbd_obj_request *obj_req) if (!obj_req->num_img_extents) { /* * The overlap has become 0 (most likely because the - * image has been flattened). Use rbd_obj_issue_copyup() - * to re-submit the original write request -- the copyup - * operation itself will be a no-op, since someone must - * have populated the child object while we weren't - * looking. Move to WRITE_FLAT state as we'll be done - * with the operation once the null copyup completes. + * image has been flattened). Re-submit the original write + * request -- pass MODS_ONLY since the copyup isn't needed + * anymore. */ - obj_req->write_state = RBD_OBJ_WRITE_FLAT; - return rbd_obj_issue_copyup(obj_req, 0); + obj_req->write_state = RBD_OBJ_WRITE_COPYUP_OPS; + return rbd_obj_issue_copyup_ops(obj_req, MODS_ONLY); } ret = setup_copyup_bvecs(obj_req, rbd_obj_img_extents_bytes(obj_req)); if (ret) return ret; - obj_req->write_state = RBD_OBJ_WRITE_COPYUP; + obj_req->write_state = RBD_OBJ_WRITE_READ_FROM_PARENT; return rbd_obj_read_from_parent(obj_req); } @@ -2475,7 +2655,6 @@ static bool rbd_obj_handle_write(struct rbd_obj_request *obj_req) { int ret; -again: switch (obj_req->write_state) { case RBD_OBJ_WRITE_GUARD: rbd_assert(!obj_req->xferred); @@ -2494,6 +2673,7 @@ again: } /* fall through */ case RBD_OBJ_WRITE_FLAT: + case RBD_OBJ_WRITE_COPYUP_OPS: if (!obj_req->result) /* * There is no such thing as a successful short @@ -2501,13 +2681,24 @@ again: */ obj_req->xferred = obj_req->ex.oe_len; return true; - case RBD_OBJ_WRITE_COPYUP: - obj_req->write_state = RBD_OBJ_WRITE_GUARD; + case RBD_OBJ_WRITE_READ_FROM_PARENT: if (obj_req->result) - goto again; + return true; rbd_assert(obj_req->xferred); ret = rbd_obj_issue_copyup(obj_req, obj_req->xferred); + if (ret) { + obj_req->result = ret; + obj_req->xferred = 0; + return true; + } + return false; + case RBD_OBJ_WRITE_COPYUP_EMPTY_SNAPC: + if (obj_req->result) + return true; + + obj_req->write_state = RBD_OBJ_WRITE_COPYUP_OPS; + ret = rbd_obj_issue_copyup_ops(obj_req, MODS_ONLY); if (ret) { obj_req->result = ret; return true; @@ -2529,6 +2720,7 @@ static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req) case OBJ_OP_WRITE: return rbd_obj_handle_write(obj_req); case OBJ_OP_DISCARD: + case OBJ_OP_ZEROOUT: if (rbd_obj_handle_write(obj_req)) { /* * Hide -ENOENT from delete/truncate/zero -- discarding @@ -3641,9 +3833,11 @@ static void rbd_queue_workfn(struct work_struct *work) switch (req_op(rq)) { case REQ_OP_DISCARD: - case REQ_OP_WRITE_ZEROES: op_type = OBJ_OP_DISCARD; break; + case REQ_OP_WRITE_ZEROES: + op_type = OBJ_OP_ZEROOUT; + break; case REQ_OP_WRITE: op_type = OBJ_OP_WRITE; break; @@ -3723,12 +3917,12 @@ static void rbd_queue_workfn(struct work_struct *work) img_request->rq = rq; snapc = NULL; /* img_request consumes a ref */ - if (op_type == OBJ_OP_DISCARD) + if (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_ZEROOUT) result = rbd_img_fill_nodata(img_request, offset, length); else result = rbd_img_fill_from_bio(img_request, offset, length, rq->bio); - if (result) + if (result || !img_request->pending_count) goto err_img_request; rbd_img_request_submit(img_request); @@ -3988,7 +4182,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) rbd_dev->tag_set.ops = &rbd_mq_ops; rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth; rbd_dev->tag_set.numa_node = NUMA_NO_NODE; - rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; + rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; rbd_dev->tag_set.nr_hw_queues = 1; rbd_dev->tag_set.cmd_size = sizeof(struct work_struct); @@ -5389,6 +5583,7 @@ static int rbd_add_parse_args(const char *buf, pctx.opts->read_only = RBD_READ_ONLY_DEFAULT; pctx.opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT; + pctx.opts->alloc_size = RBD_ALLOC_SIZE_DEFAULT; pctx.opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT; pctx.opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT; pctx.opts->exclusive = RBD_EXCLUSIVE_DEFAULT; @@ -5796,14 +5991,6 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth) ret = rbd_dev_v2_parent_info(rbd_dev); if (ret) goto err_out_probe; - - /* - * Need to warn users if this image is the one being - * mapped and has a parent. - */ - if (!depth && rbd_dev->parent_spec) - rbd_warn(rbd_dev, - "WARNING: kernel layering is EXPERIMENTAL!"); } ret = rbd_dev_probe_parent(rbd_dev, depth); @@ -5886,6 +6073,12 @@ static ssize_t do_rbd_add(struct bus_type *bus, if (rbd_dev->spec->snap_id != CEPH_NOSNAP) rbd_dev->opts->read_only = true; + if (rbd_dev->opts->alloc_size > rbd_dev->layout.object_size) { + rbd_warn(rbd_dev, "alloc_size adjusted to %u", + rbd_dev->layout.object_size); + rbd_dev->opts->alloc_size = rbd_dev->layout.object_size; + } + rc = rbd_dev_device_setup(rbd_dev); if (rc) goto err_out_image_probe; @@ -5934,9 +6127,7 @@ err_out_args: goto out; } -static ssize_t rbd_add(struct bus_type *bus, - const char *buf, - size_t count) +static ssize_t add_store(struct bus_type *bus, const char *buf, size_t count) { if (single_major) return -EINVAL; @@ -5944,9 +6135,8 @@ static ssize_t rbd_add(struct bus_type *bus, return do_rbd_add(bus, buf, count); } -static ssize_t rbd_add_single_major(struct bus_type *bus, - const char *buf, - size_t count) +static ssize_t add_single_major_store(struct bus_type *bus, const char *buf, + size_t count) { return do_rbd_add(bus, buf, count); } @@ -6049,9 +6239,7 @@ static ssize_t do_rbd_remove(struct bus_type *bus, return count; } -static ssize_t rbd_remove(struct bus_type *bus, - const char *buf, - size_t count) +static ssize_t remove_store(struct bus_type *bus, const char *buf, size_t count) { if (single_major) return -EINVAL; @@ -6059,9 +6247,8 @@ static ssize_t rbd_remove(struct bus_type *bus, return do_rbd_remove(bus, buf, count); } -static ssize_t rbd_remove_single_major(struct bus_type *bus, - const char *buf, - size_t count) +static ssize_t remove_single_major_store(struct bus_type *bus, const char *buf, + size_t count) { return do_rbd_remove(bus, buf, count); } diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index ab893a7571a2..7d3ad6c22ee5 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c @@ -2843,7 +2843,6 @@ static int skd_cons_disk(struct skd_device *skdev) skdev->sgs_per_request * sizeof(struct scatterlist); skdev->tag_set.numa_node = NUMA_NO_NODE; skdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | - BLK_MQ_F_SG_MERGE | BLK_ALLOC_POLICY_TO_MQ_FLAG(BLK_TAG_ALLOC_FIFO); skdev->tag_set.driver_data = skdev; rc = blk_mq_alloc_tag_set(&skdev->tag_set); diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index b16a887bbd02..4bc083b7c9b5 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -723,7 +723,7 @@ static int virtblk_probe(struct virtio_device *vdev) struct request_queue *q; int err, index; - u32 v, blk_size, sg_elems, opt_io_size; + u32 v, blk_size, max_size, sg_elems, opt_io_size; u16 min_io_size; u8 physical_block_exp, alignment_offset; @@ -826,14 +826,16 @@ static int virtblk_probe(struct virtio_device *vdev) /* No real sector limit. */ blk_queue_max_hw_sectors(q, -1U); + max_size = virtio_max_dma_size(vdev); + /* Host can optionally specify maximum segment size and number of * segments. */ err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX, struct virtio_blk_config, size_max, &v); if (!err) - blk_queue_max_segment_size(q, v); - else - blk_queue_max_segment_size(q, -1U); + max_size = min(max_size, v); + + blk_queue_max_segment_size(q, max_size); /* Host can optionally specify the block size of the device */ err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE, diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 0ed4b200fa58..d43a5677ccbc 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -977,7 +977,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, } else info->tag_set.queue_depth = BLK_RING_SIZE(info); info->tag_set.numa_node = NUMA_NO_NODE; - info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; + info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; info->tag_set.cmd_size = sizeof(struct blkif_req); info->tag_set.driver_data = info; diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c index 4ed0a78fdc09..4d9a38890965 100644 --- a/drivers/block/zram/zcomp.c +++ b/drivers/block/zram/zcomp.c @@ -20,6 +20,7 @@ static const char * const backends[] = { "lzo", + "lzo-rle", #if IS_ENABLED(CONFIG_CRYPTO_LZ4) "lz4", #endif diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index 845b0314ce3a..7b2e76e7f22f 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -336,7 +336,7 @@ config BT_MRVL The core driver to support Marvell Bluetooth devices. This driver is required if you want to support - Marvell Bluetooth devices, such as 8688/8787/8797/8887/8897/8997. + Marvell Bluetooth devices, such as 8688/8787/8797/8887/8897/8977/8997. Say Y here to compile Marvell Bluetooth driver into the kernel or say M to compile it as module. @@ -350,7 +350,7 @@ config BT_MRVL_SDIO The driver for Marvell Bluetooth chipsets with SDIO interface. This driver is required if you want to use Marvell Bluetooth - devices with SDIO interface. Currently SD8688/SD8787/SD8797/SD8887/SD8897/SD8997 + devices with SDIO interface. Currently SD8688/SD8787/SD8797/SD8887/SD8897/SD8977/SD8997 chipsets are supported. Say Y here to compile support for Marvell BT-over-SDIO driver diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h index f0454541e5fd..fb7729779166 100644 --- a/drivers/bluetooth/btmrvl_drv.h +++ b/drivers/bluetooth/btmrvl_drv.h @@ -24,11 +24,9 @@ #include #include #include -#include #include #include #include -#include #include #include #include diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index fb3d03928460..047b75ce1deb 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -62,13 +62,14 @@ static const struct of_device_id btmrvl_sdio_of_match_table[] = { static irqreturn_t btmrvl_wake_irq_bt(int irq, void *priv) { struct btmrvl_sdio_card *card = priv; + struct device *dev = &card->func->dev; struct btmrvl_plt_wake_cfg *cfg = card->plt_wake_cfg; - pr_info("%s: wake by bt\n", __func__); + dev_info(dev, "wake by bt\n"); cfg->wake_by_bt = true; disable_irq_nosync(irq); - pm_wakeup_event(&card->func->dev, 0); + pm_wakeup_event(dev, 0); pm_system_wakeup(); return IRQ_HANDLED; @@ -87,7 +88,7 @@ static int btmrvl_sdio_probe_of(struct device *dev, if (!dev->of_node || !of_match_node(btmrvl_sdio_of_match_table, dev->of_node)) { - pr_err("sdio platform data not available\n"); + dev_info(dev, "sdio device tree data not available\n"); return -1; } @@ -211,6 +212,29 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_8897 = { .fw_dump_end = 0xea, }; +static const struct btmrvl_sdio_card_reg btmrvl_reg_8977 = { + .cfg = 0x00, + .host_int_mask = 0x08, + .host_intstatus = 0x0c, + .card_status = 0x5c, + .sq_read_base_addr_a0 = 0xf8, + .sq_read_base_addr_a1 = 0xf9, + .card_revision = 0xc8, + .card_fw_status0 = 0xe8, + .card_fw_status1 = 0xe9, + .card_rx_len = 0xea, + .card_rx_unit = 0xeb, + .io_port_0 = 0xe4, + .io_port_1 = 0xe5, + .io_port_2 = 0xe6, + .int_read_to_clear = true, + .host_int_rsr = 0x04, + .card_misc_cfg = 0xD8, + .fw_dump_ctrl = 0xf0, + .fw_dump_start = 0xf1, + .fw_dump_end = 0xf8, +}; + static const struct btmrvl_sdio_card_reg btmrvl_reg_8997 = { .cfg = 0x00, .host_int_mask = 0x08, @@ -279,6 +303,15 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = { .supports_fw_dump = true, }; +static const struct btmrvl_sdio_device btmrvl_sdio_sd8977 = { + .helper = NULL, + .firmware = "mrvl/sd8977_uapsta.bin", + .reg = &btmrvl_reg_8977, + .support_pscan_win_report = true, + .sd_blksz_fw_dl = 256, + .supports_fw_dump = true, +}; + static const struct btmrvl_sdio_device btmrvl_sdio_sd8997 = { .helper = NULL, .firmware = "mrvl/sd8997_uapsta.bin", @@ -307,6 +340,9 @@ static const struct sdio_device_id btmrvl_sdio_ids[] = { /* Marvell SD8897 Bluetooth device */ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912E), .driver_data = (unsigned long)&btmrvl_sdio_sd8897 }, + /* Marvell SD8977 Bluetooth device */ + { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9146), + .driver_data = (unsigned long)&btmrvl_sdio_sd8977 }, /* Marvell SD8997 Bluetooth device */ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9142), .driver_data = (unsigned long)&btmrvl_sdio_sd8997 }, @@ -1760,4 +1796,5 @@ MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin"); MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin"); MODULE_FIRMWARE("mrvl/sd8887_uapsta.bin"); MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin"); +MODULE_FIRMWARE("mrvl/sd8977_uapsta.bin"); MODULE_FIRMWARE("mrvl/sd8997_uapsta.bin"); diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c index 4593baff2bc9..b0b680dd69f4 100644 --- a/drivers/bluetooth/btmtkuart.c +++ b/drivers/bluetooth/btmtkuart.c @@ -12,10 +12,15 @@ #include #include #include +#include +#include #include #include #include +#include +#include #include +#include #include #include @@ -24,20 +29,38 @@ #include "h4_recv.h" -#define VERSION "0.1" +#define VERSION "0.2" #define FIRMWARE_MT7622 "mediatek/mt7622pr2h.bin" +#define FIRMWARE_MT7663 "mediatek/mt7663pr2h.bin" +#define FIRMWARE_MT7668 "mediatek/mt7668pr2h.bin" #define MTK_STP_TLR_SIZE 2 #define BTMTKUART_TX_STATE_ACTIVE 1 #define BTMTKUART_TX_STATE_WAKEUP 2 #define BTMTKUART_TX_WAIT_VND_EVT 3 +#define BTMTKUART_REQUIRED_WAKEUP 4 + +#define BTMTKUART_FLAG_STANDALONE_HW BIT(0) enum { MTK_WMT_PATCH_DWNLD = 0x1, + MTK_WMT_TEST = 0x2, + MTK_WMT_WAKEUP = 0x3, + MTK_WMT_HIF = 0x4, MTK_WMT_FUNC_CTRL = 0x6, - MTK_WMT_RST = 0x7 + MTK_WMT_RST = 0x7, + MTK_WMT_SEMAPHORE = 0x17, +}; + +enum { + BTMTK_WMT_INVALID, + BTMTK_WMT_PATCH_UNDONE, + BTMTK_WMT_PATCH_DONE, + BTMTK_WMT_ON_UNDONE, + BTMTK_WMT_ON_DONE, + BTMTK_WMT_ON_PROGRESS, }; struct mtk_stp_hdr { @@ -46,6 +69,11 @@ struct mtk_stp_hdr { u8 cs; } __packed; +struct btmtkuart_data { + unsigned int flags; + const char *fwname; +}; + struct mtk_wmt_hdr { u8 dir; u8 op; @@ -58,41 +86,85 @@ struct mtk_hci_wmt_cmd { u8 data[256]; } __packed; +struct btmtk_hci_wmt_evt { + struct hci_event_hdr hhdr; + struct mtk_wmt_hdr whdr; +} __packed; + +struct btmtk_hci_wmt_evt_funcc { + struct btmtk_hci_wmt_evt hwhdr; + __be16 status; +} __packed; + +struct btmtk_tci_sleep { + u8 mode; + __le16 duration; + __le16 host_duration; + u8 host_wakeup_pin; + u8 time_compensation; +} __packed; + +struct btmtk_hci_wmt_params { + u8 op; + u8 flag; + u16 dlen; + const void *data; + u32 *status; +}; + struct btmtkuart_dev { struct hci_dev *hdev; struct serdev_device *serdev; struct clk *clk; + struct regulator *vcc; + struct gpio_desc *reset; + struct pinctrl *pinctrl; + struct pinctrl_state *pins_runtime; + struct pinctrl_state *pins_boot; + speed_t desired_speed; + speed_t curr_speed; + struct work_struct tx_work; unsigned long tx_state; struct sk_buff_head txq; struct sk_buff *rx_skb; + struct sk_buff *evt_skb; u8 stp_pad[6]; u8 stp_cursor; u16 stp_dlen; + + const struct btmtkuart_data *data; }; -static int mtk_hci_wmt_sync(struct hci_dev *hdev, u8 op, u8 flag, u16 plen, - const void *param) +#define btmtkuart_is_standalone(bdev) \ + ((bdev)->data->flags & BTMTKUART_FLAG_STANDALONE_HW) +#define btmtkuart_is_builtin_soc(bdev) \ + !((bdev)->data->flags & BTMTKUART_FLAG_STANDALONE_HW) + +static int mtk_hci_wmt_sync(struct hci_dev *hdev, + struct btmtk_hci_wmt_params *wmt_params) { struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); + struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc; + u32 hlen, status = BTMTK_WMT_INVALID; + struct btmtk_hci_wmt_evt *wmt_evt; struct mtk_hci_wmt_cmd wc; struct mtk_wmt_hdr *hdr; - u32 hlen; int err; - hlen = sizeof(*hdr) + plen; + hlen = sizeof(*hdr) + wmt_params->dlen; if (hlen > 255) return -EINVAL; hdr = (struct mtk_wmt_hdr *)&wc; hdr->dir = 1; - hdr->op = op; - hdr->dlen = cpu_to_le16(plen + 1); - hdr->flag = flag; - memcpy(wc.data, param, plen); + hdr->op = wmt_params->op; + hdr->dlen = cpu_to_le16(wmt_params->dlen + 1); + hdr->flag = wmt_params->flag; + memcpy(wc.data, wmt_params->data, wmt_params->dlen); set_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state); @@ -107,7 +179,7 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev, u8 op, u8 flag, u16 plen, * Complete as with usual HCI command flow control. * * After sending the command, wait for BTMTKUART_TX_WAIT_VND_EVT - * state to be cleared. The driver speicfic event receive routine + * state to be cleared. The driver specific event receive routine * will clear that state and with that indicate completion of the * WMT command. */ @@ -115,26 +187,63 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev, u8 op, u8 flag, u16 plen, TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT); if (err == -EINTR) { bt_dev_err(hdev, "Execution of wmt command interrupted"); + clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state); return err; } if (err) { bt_dev_err(hdev, "Execution of wmt command timed out"); + clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state); return -ETIMEDOUT; } - return 0; + /* Parse and handle the return WMT event */ + wmt_evt = (struct btmtk_hci_wmt_evt *)bdev->evt_skb->data; + if (wmt_evt->whdr.op != hdr->op) { + bt_dev_err(hdev, "Wrong op received %d expected %d", + wmt_evt->whdr.op, hdr->op); + err = -EIO; + goto err_free_skb; + } + + switch (wmt_evt->whdr.op) { + case MTK_WMT_SEMAPHORE: + if (wmt_evt->whdr.flag == 2) + status = BTMTK_WMT_PATCH_UNDONE; + else + status = BTMTK_WMT_PATCH_DONE; + break; + case MTK_WMT_FUNC_CTRL: + wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt; + if (be16_to_cpu(wmt_evt_funcc->status) == 0x404) + status = BTMTK_WMT_ON_DONE; + else if (be16_to_cpu(wmt_evt_funcc->status) == 0x420) + status = BTMTK_WMT_ON_PROGRESS; + else + status = BTMTK_WMT_ON_UNDONE; + break; + } + + if (wmt_params->status) + *wmt_params->status = status; + +err_free_skb: + kfree_skb(bdev->evt_skb); + bdev->evt_skb = NULL; + + return err; } -static int mtk_setup_fw(struct hci_dev *hdev) +static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname) { + struct btmtk_hci_wmt_params wmt_params; const struct firmware *fw; const u8 *fw_ptr; size_t fw_size; int err, dlen; u8 flag; - err = request_firmware(&fw, FIRMWARE_MT7622, &hdev->dev); + err = request_firmware(&fw, fwname, &hdev->dev); if (err < 0) { bt_dev_err(hdev, "Failed to load firmware file (%d)", err); return err; @@ -153,6 +262,9 @@ static int mtk_setup_fw(struct hci_dev *hdev) fw_ptr += 30; flag = 1; + wmt_params.op = MTK_WMT_PATCH_DWNLD; + wmt_params.status = NULL; + while (fw_size > 0) { dlen = min_t(int, 250, fw_size); @@ -162,18 +274,37 @@ static int mtk_setup_fw(struct hci_dev *hdev) else if (fw_size < fw->size - 30) flag = 2; - err = mtk_hci_wmt_sync(hdev, MTK_WMT_PATCH_DWNLD, flag, dlen, - fw_ptr); + wmt_params.flag = flag; + wmt_params.dlen = dlen; + wmt_params.data = fw_ptr; + + err = mtk_hci_wmt_sync(hdev, &wmt_params); if (err < 0) { bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)", err); - break; + goto free_fw; } fw_size -= dlen; fw_ptr += dlen; } + wmt_params.op = MTK_WMT_RST; + wmt_params.flag = 4; + wmt_params.dlen = 0; + wmt_params.data = NULL; + wmt_params.status = NULL; + + /* Activate funciton the firmware providing to */ + err = mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to send wmt rst (%d)", err); + goto free_fw; + } + + /* Wait a few moments for firmware activation done */ + usleep_range(10000, 12000); + free_fw: release_firmware(fw); return err; @@ -192,7 +323,20 @@ static int btmtkuart_recv_event(struct hci_dev *hdev, struct sk_buff *skb) if (hdr->evt == 0xe4) hdr->evt = HCI_EV_VENDOR; + /* When someone waits for the WMT event, the skb is being cloned + * and being processed the events from there then. + */ + if (test_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state)) { + bdev->evt_skb = skb_clone(skb, GFP_KERNEL); + if (!bdev->evt_skb) { + err = -ENOMEM; + goto err_out; + } + } + err = hci_recv_frame(hdev, skb); + if (err < 0) + goto err_free_skb; if (hdr->evt == HCI_EV_VENDOR) { if (test_and_clear_bit(BTMTKUART_TX_WAIT_VND_EVT, @@ -203,6 +347,13 @@ static int btmtkuart_recv_event(struct hci_dev *hdev, struct sk_buff *skb) } } + return 0; + +err_free_skb: + kfree_skb(bdev->evt_skb); + bdev->evt_skb = NULL; + +err_out: return err; } @@ -404,6 +555,23 @@ static int btmtkuart_open(struct hci_dev *hdev) goto err_open; } + if (btmtkuart_is_standalone(bdev)) { + if (bdev->curr_speed != bdev->desired_speed) + err = serdev_device_set_baudrate(bdev->serdev, + 115200); + else + err = serdev_device_set_baudrate(bdev->serdev, + bdev->desired_speed); + + if (err < 0) { + bt_dev_err(hdev, "Unable to set baudrate UART device %s", + dev_name(&bdev->serdev->dev)); + goto err_serdev_close; + } + + serdev_device_set_flow_control(bdev->serdev, false); + } + bdev->stp_cursor = 2; bdev->stp_dlen = 0; @@ -427,6 +595,8 @@ err_put_rpm: pm_runtime_put_sync(dev); err_disable_rpm: pm_runtime_disable(dev); +err_serdev_close: + serdev_device_close(bdev->serdev); err_open: return err; } @@ -465,42 +635,222 @@ static int btmtkuart_flush(struct hci_dev *hdev) return 0; } +static int btmtkuart_func_query(struct hci_dev *hdev) +{ + struct btmtk_hci_wmt_params wmt_params; + int status, err; + u8 param = 0; + + /* Query whether the function is enabled */ + wmt_params.op = MTK_WMT_FUNC_CTRL; + wmt_params.flag = 4; + wmt_params.dlen = sizeof(param); + wmt_params.data = ¶m; + wmt_params.status = &status; + + err = mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to query function status (%d)", err); + return err; + } + + return status; +} + +static int btmtkuart_change_baudrate(struct hci_dev *hdev) +{ + struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); + struct btmtk_hci_wmt_params wmt_params; + u32 baudrate; + u8 param; + int err; + + /* Indicate the device to enter the probe state the host is + * ready to change a new baudrate. + */ + baudrate = cpu_to_le32(bdev->desired_speed); + wmt_params.op = MTK_WMT_HIF; + wmt_params.flag = 1; + wmt_params.dlen = 4; + wmt_params.data = &baudrate; + wmt_params.status = NULL; + + err = mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to device baudrate (%d)", err); + return err; + } + + err = serdev_device_set_baudrate(bdev->serdev, + bdev->desired_speed); + if (err < 0) { + bt_dev_err(hdev, "Failed to set up host baudrate (%d)", + err); + return err; + } + + serdev_device_set_flow_control(bdev->serdev, false); + + /* Send a dummy byte 0xff to activate the new baudrate */ + param = 0xff; + err = serdev_device_write(bdev->serdev, ¶m, sizeof(param), + MAX_SCHEDULE_TIMEOUT); + if (err < 0 || err < sizeof(param)) + return err; + + serdev_device_wait_until_sent(bdev->serdev, 0); + + /* Wait some time for the device changing baudrate done */ + usleep_range(20000, 22000); + + /* Test the new baudrate */ + wmt_params.op = MTK_WMT_TEST; + wmt_params.flag = 7; + wmt_params.dlen = 0; + wmt_params.data = NULL; + wmt_params.status = NULL; + + err = mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to test new baudrate (%d)", + err); + return err; + } + + bdev->curr_speed = bdev->desired_speed; + + return 0; +} + static int btmtkuart_setup(struct hci_dev *hdev) { + struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); + struct btmtk_hci_wmt_params wmt_params; + ktime_t calltime, delta, rettime; + struct btmtk_tci_sleep tci_sleep; + unsigned long long duration; + struct sk_buff *skb; + int err, status; u8 param = 0x1; - int err = 0; + + calltime = ktime_get(); + + /* Wakeup MCUSYS is required for certain devices before we start to + * do any setups. + */ + if (test_bit(BTMTKUART_REQUIRED_WAKEUP, &bdev->tx_state)) { + wmt_params.op = MTK_WMT_WAKEUP; + wmt_params.flag = 3; + wmt_params.dlen = 0; + wmt_params.data = NULL; + wmt_params.status = NULL; + + err = mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to wakeup the chip (%d)", err); + return err; + } + + clear_bit(BTMTKUART_REQUIRED_WAKEUP, &bdev->tx_state); + } + + if (btmtkuart_is_standalone(bdev)) + btmtkuart_change_baudrate(hdev); + + /* Query whether the firmware is already download */ + wmt_params.op = MTK_WMT_SEMAPHORE; + wmt_params.flag = 1; + wmt_params.dlen = 0; + wmt_params.data = NULL; + wmt_params.status = &status; + + err = mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to query firmware status (%d)", err); + return err; + } + + if (status == BTMTK_WMT_PATCH_DONE) { + bt_dev_info(hdev, "Firmware already downloaded"); + goto ignore_setup_fw; + } /* Setup a firmware which the device definitely requires */ - err = mtk_setup_fw(hdev); + err = mtk_setup_firmware(hdev, bdev->data->fwname); if (err < 0) return err; - /* Activate function the firmware providing to */ - err = mtk_hci_wmt_sync(hdev, MTK_WMT_RST, 0x4, 0, 0); - if (err < 0) { - bt_dev_err(hdev, "Failed to send wmt rst (%d)", err); +ignore_setup_fw: + /* Query whether the device is already enabled */ + err = readx_poll_timeout(btmtkuart_func_query, hdev, status, + status < 0 || status != BTMTK_WMT_ON_PROGRESS, + 2000, 5000000); + /* -ETIMEDOUT happens */ + if (err < 0) return err; + + /* The other errors happen in btusb_mtk_func_query */ + if (status < 0) + return status; + + if (status == BTMTK_WMT_ON_DONE) { + bt_dev_info(hdev, "function already on"); + goto ignore_func_on; } /* Enable Bluetooth protocol */ - err = mtk_hci_wmt_sync(hdev, MTK_WMT_FUNC_CTRL, 0x0, sizeof(param), - ¶m); + wmt_params.op = MTK_WMT_FUNC_CTRL; + wmt_params.flag = 0; + wmt_params.dlen = sizeof(param); + wmt_params.data = ¶m; + wmt_params.status = NULL; + + err = mtk_hci_wmt_sync(hdev, &wmt_params); if (err < 0) { bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); return err; } +ignore_func_on: + /* Apply the low power environment setup */ + tci_sleep.mode = 0x5; + tci_sleep.duration = cpu_to_le16(0x640); + tci_sleep.host_duration = cpu_to_le16(0x640); + tci_sleep.host_wakeup_pin = 0; + tci_sleep.time_compensation = 0; + + skb = __hci_cmd_sync(hdev, 0xfc7a, sizeof(tci_sleep), &tci_sleep, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + bt_dev_err(hdev, "Failed to apply low power setting (%d)", err); + return err; + } + kfree_skb(skb); + + rettime = ktime_get(); + delta = ktime_sub(rettime, calltime); + duration = (unsigned long long)ktime_to_ns(delta) >> 10; + + bt_dev_info(hdev, "Device setup in %llu usecs", duration); + return 0; } static int btmtkuart_shutdown(struct hci_dev *hdev) { + struct btmtk_hci_wmt_params wmt_params; u8 param = 0x0; int err; /* Disable the device */ - err = mtk_hci_wmt_sync(hdev, MTK_WMT_FUNC_CTRL, 0x0, sizeof(param), - ¶m); + wmt_params.op = MTK_WMT_FUNC_CTRL; + wmt_params.flag = 0; + wmt_params.dlen = sizeof(param); + wmt_params.data = ¶m; + wmt_params.status = NULL; + + err = mtk_hci_wmt_sync(hdev, &wmt_params); if (err < 0) { bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); return err; @@ -543,24 +893,82 @@ static int btmtkuart_send_frame(struct hci_dev *hdev, struct sk_buff *skb) return 0; } +static int btmtkuart_parse_dt(struct serdev_device *serdev) +{ + struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev); + struct device_node *node = serdev->dev.of_node; + u32 speed = 921600; + int err; + + if (btmtkuart_is_standalone(bdev)) { + of_property_read_u32(node, "current-speed", &speed); + + bdev->desired_speed = speed; + + bdev->vcc = devm_regulator_get(&serdev->dev, "vcc"); + if (IS_ERR(bdev->vcc)) { + err = PTR_ERR(bdev->vcc); + return err; + } + + bdev->pinctrl = devm_pinctrl_get(&serdev->dev); + if (IS_ERR(bdev->pinctrl)) { + err = PTR_ERR(bdev->pinctrl); + return err; + } + + bdev->pins_boot = pinctrl_lookup_state(bdev->pinctrl, + "default"); + if (IS_ERR(bdev->pins_boot)) { + err = PTR_ERR(bdev->pins_boot); + return err; + } + + bdev->pins_runtime = pinctrl_lookup_state(bdev->pinctrl, + "runtime"); + if (IS_ERR(bdev->pins_runtime)) { + err = PTR_ERR(bdev->pins_runtime); + return err; + } + + bdev->reset = devm_gpiod_get_optional(&serdev->dev, "reset", + GPIOD_OUT_LOW); + if (IS_ERR(bdev->reset)) { + err = PTR_ERR(bdev->reset); + return err; + } + } else if (btmtkuart_is_builtin_soc(bdev)) { + bdev->clk = devm_clk_get(&serdev->dev, "ref"); + if (IS_ERR(bdev->clk)) + return PTR_ERR(bdev->clk); + } + + return 0; +} + static int btmtkuart_probe(struct serdev_device *serdev) { struct btmtkuart_dev *bdev; struct hci_dev *hdev; + int err; bdev = devm_kzalloc(&serdev->dev, sizeof(*bdev), GFP_KERNEL); if (!bdev) return -ENOMEM; - bdev->clk = devm_clk_get(&serdev->dev, "ref"); - if (IS_ERR(bdev->clk)) - return PTR_ERR(bdev->clk); + bdev->data = of_device_get_match_data(&serdev->dev); + if (!bdev->data) + return -ENODEV; bdev->serdev = serdev; serdev_device_set_drvdata(serdev, bdev); serdev_device_set_client_ops(serdev, &btmtkuart_client_ops); + err = btmtkuart_parse_dt(serdev); + if (err < 0) + return err; + INIT_WORK(&bdev->tx_work, btmtkuart_tx_work); skb_queue_head_init(&bdev->txq); @@ -587,13 +995,54 @@ static int btmtkuart_probe(struct serdev_device *serdev) hdev->manufacturer = 70; set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks); - if (hci_register_dev(hdev) < 0) { + if (btmtkuart_is_standalone(bdev)) { + /* Switch to the specific pin state for the booting requires */ + pinctrl_select_state(bdev->pinctrl, bdev->pins_boot); + + /* Power on */ + err = regulator_enable(bdev->vcc); + if (err < 0) + return err; + + /* Reset if the reset-gpios is available otherwise the board + * -level design should be guaranteed. + */ + if (bdev->reset) { + gpiod_set_value_cansleep(bdev->reset, 1); + usleep_range(1000, 2000); + gpiod_set_value_cansleep(bdev->reset, 0); + } + + /* Wait some time until device got ready and switch to the pin + * mode the device requires for UART transfers. + */ + msleep(50); + pinctrl_select_state(bdev->pinctrl, bdev->pins_runtime); + + /* A standalone device doesn't depends on power domain on SoC, + * so mark it as no callbacks. + */ + pm_runtime_no_callbacks(&serdev->dev); + + set_bit(BTMTKUART_REQUIRED_WAKEUP, &bdev->tx_state); + } + + err = hci_register_dev(hdev); + if (err < 0) { dev_err(&serdev->dev, "Can't register HCI device\n"); hci_free_dev(hdev); - return -ENODEV; + goto err_regulator_disable; } return 0; + +err_regulator_disable: + if (btmtkuart_is_standalone(bdev)) { + pinctrl_select_state(bdev->pinctrl, bdev->pins_boot); + regulator_disable(bdev->vcc); + } + + return err; } static void btmtkuart_remove(struct serdev_device *serdev) @@ -601,13 +1050,34 @@ static void btmtkuart_remove(struct serdev_device *serdev) struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev); struct hci_dev *hdev = bdev->hdev; + if (btmtkuart_is_standalone(bdev)) { + pinctrl_select_state(bdev->pinctrl, bdev->pins_boot); + regulator_disable(bdev->vcc); + } + hci_unregister_dev(hdev); hci_free_dev(hdev); } +static const struct btmtkuart_data mt7622_data = { + .fwname = FIRMWARE_MT7622, +}; + +static const struct btmtkuart_data mt7663_data = { + .flags = BTMTKUART_FLAG_STANDALONE_HW, + .fwname = FIRMWARE_MT7663, +}; + +static const struct btmtkuart_data mt7668_data = { + .flags = BTMTKUART_FLAG_STANDALONE_HW, + .fwname = FIRMWARE_MT7668, +}; + #ifdef CONFIG_OF static const struct of_device_id mtk_of_match_table[] = { - { .compatible = "mediatek,mt7622-bluetooth"}, + { .compatible = "mediatek,mt7622-bluetooth", .data = &mt7622_data}, + { .compatible = "mediatek,mt7663u-bluetooth", .data = &mt7663_data}, + { .compatible = "mediatek,mt7668u-bluetooth", .data = &mt7668_data}, { } }; MODULE_DEVICE_TABLE(of, mtk_of_match_table); @@ -629,3 +1099,5 @@ MODULE_DESCRIPTION("MediaTek Bluetooth Serial driver ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); MODULE_FIRMWARE(FIRMWARE_MT7622); +MODULE_FIRMWARE(FIRMWARE_MT7663); +MODULE_FIRMWARE(FIRMWARE_MT7668); diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c index ec9e03a6b778..612268574fc7 100644 --- a/drivers/bluetooth/btqca.c +++ b/drivers/bluetooth/btqca.c @@ -391,6 +391,25 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, } EXPORT_SYMBOL_GPL(qca_uart_setup); +int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) +{ + struct sk_buff *skb; + int err; + + skb = __hci_cmd_sync_ev(hdev, EDL_WRITE_BD_ADDR_OPCODE, 6, bdaddr, + HCI_EV_VENDOR, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + bt_dev_err(hdev, "QCA Change address cmd failed (%d)", err); + return err; + } + + kfree_skb(skb); + + return 0; +} +EXPORT_SYMBOL_GPL(qca_set_bdaddr); + MODULE_AUTHOR("Ben Young Tae Kim "); MODULE_DESCRIPTION("Bluetooth support for Qualcomm Atheros family ver " VERSION); MODULE_VERSION(VERSION); diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h index 0c01f375fe83..c72c56ea7480 100644 --- a/drivers/bluetooth/btqca.h +++ b/drivers/bluetooth/btqca.h @@ -20,6 +20,7 @@ #define EDL_PATCH_CMD_OPCODE (0xFC00) #define EDL_NVM_ACCESS_OPCODE (0xFC0B) +#define EDL_WRITE_BD_ADDR_OPCODE (0xFC14) #define EDL_PATCH_CMD_LEN (1) #define EDL_PATCH_VER_REQ_CMD (0x19) #define EDL_PATCH_TLV_REQ_CMD (0x1E) @@ -140,7 +141,7 @@ int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr); int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, enum qca_btsoc_type soc_type, u32 soc_ver); int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version); - +int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr); #else static inline int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr) @@ -159,4 +160,9 @@ static inline int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version) return -EOPNOTSUPP; } +static inline int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) +{ + return -EOPNOTSUPP; +} + #endif diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c index 7df3eed1ef5e..e0d4c6f1d3ab 100644 --- a/drivers/bluetooth/btqcomsmd.c +++ b/drivers/bluetooth/btqcomsmd.c @@ -28,7 +28,6 @@ struct btqcomsmd { struct hci_dev *hdev; - bdaddr_t bdaddr; struct rpmsg_endpoint *acl_channel; struct rpmsg_endpoint *cmd_channel; }; @@ -116,32 +115,17 @@ static int btqcomsmd_close(struct hci_dev *hdev) static int btqcomsmd_setup(struct hci_dev *hdev) { - struct btqcomsmd *btq = hci_get_drvdata(hdev); struct sk_buff *skb; - int err; skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) return PTR_ERR(skb); kfree_skb(skb); - /* Devices do not have persistent storage for BD address. If no - * BD address has been retrieved during probe, mark the device - * as having an invalid BD address. - */ - if (!bacmp(&btq->bdaddr, BDADDR_ANY)) { - set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); - return 0; - } - - /* When setting a configured BD address fails, mark the device - * as having an invalid BD address. + /* Devices do not have persistent storage for BD address. Retrieve + * it from the firmware node property. */ - err = qca_set_bdaddr_rome(hdev, &btq->bdaddr); - if (err) { - set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); - return 0; - } + set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks); return 0; } @@ -169,15 +153,6 @@ static int btqcomsmd_probe(struct platform_device *pdev) if (IS_ERR(btq->cmd_channel)) return PTR_ERR(btq->cmd_channel); - /* The local-bd-address property is usually injected by the - * bootloader which has access to the allocated BD address. - */ - if (!of_property_read_u8_array(pdev->dev.of_node, "local-bd-address", - (u8 *)&btq->bdaddr, sizeof(bdaddr_t))) { - dev_info(&pdev->dev, "BD address %pMR retrieved from device-tree", - &btq->bdaddr); - } - hdev = hci_alloc_dev(); if (!hdev) return -ENOMEM; diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c index 41405de27d66..c91bba00df4e 100644 --- a/drivers/bluetooth/btrtl.c +++ b/drivers/bluetooth/btrtl.c @@ -552,10 +552,9 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev, hdev->bus); if (!btrtl_dev->ic_info) { - rtl_dev_err(hdev, "rtl: unknown IC info, lmp subver %04x, hci rev %04x, hci ver %04x", + rtl_dev_info(hdev, "rtl: unknown IC info, lmp subver %04x, hci rev %04x, hci ver %04x", lmp_subver, hci_rev, hci_ver); - ret = -EINVAL; - goto err_free; + return btrtl_dev; } if (btrtl_dev->ic_info->has_rom_version) { @@ -610,6 +609,11 @@ int btrtl_download_firmware(struct hci_dev *hdev, * standard btusb. Once that firmware is uploaded, the subver changes * to a different value. */ + if (!btrtl_dev->ic_info) { + rtl_dev_info(hdev, "rtl: assuming no firmware upload needed\n"); + return 0; + } + switch (btrtl_dev->ic_info->lmp_subver) { case RTL_ROM_LMP_8723A: case RTL_ROM_LMP_3499: diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 4761499db9ee..ded198328f21 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -439,6 +440,7 @@ static const struct dmi_system_id btusb_needs_reset_resume_table[] = { #define BTUSB_BOOTING 9 #define BTUSB_DIAG_RUNNING 10 #define BTUSB_OOB_WAKE_ENABLED 11 +#define BTUSB_HW_RESET_ACTIVE 12 struct btusb_data { struct hci_dev *hdev; @@ -476,6 +478,8 @@ struct btusb_data { struct usb_endpoint_descriptor *diag_tx_ep; struct usb_endpoint_descriptor *diag_rx_ep; + struct gpio_desc *reset_gpio; + __u8 cmdreq_type; __u8 cmdreq; @@ -489,8 +493,41 @@ struct btusb_data { int (*setup_on_usb)(struct hci_dev *hdev); int oob_wake_irq; /* irq for out-of-band wake-on-bt */ + unsigned cmd_timeout_cnt; }; + +static void btusb_intel_cmd_timeout(struct hci_dev *hdev) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + struct gpio_desc *reset_gpio = data->reset_gpio; + + if (++data->cmd_timeout_cnt < 5) + return; + + if (!reset_gpio) { + bt_dev_err(hdev, "No way to reset. Ignoring and continuing"); + return; + } + + /* + * Toggle the hard reset line if the platform provides one. The reset + * is going to yank the device off the USB and then replug. So doing + * once is enough. The cleanup is handled correctly on the way out + * (standard USB disconnect), and the new device is detected cleanly + * and bound to the driver again like it should be. + */ + if (test_and_set_bit(BTUSB_HW_RESET_ACTIVE, &data->flags)) { + bt_dev_err(hdev, "last reset failed? Not resetting again"); + return; + } + + bt_dev_err(hdev, "Initiating HW reset via gpio"); + gpiod_set_value_cansleep(reset_gpio, 1); + msleep(100); + gpiod_set_value_cansleep(reset_gpio, 0); +} + static inline void btusb_free_frags(struct btusb_data *data) { unsigned long flags; @@ -2397,6 +2434,24 @@ static int btusb_shutdown_intel(struct hci_dev *hdev) return 0; } +static int btusb_shutdown_intel_new(struct hci_dev *hdev) +{ + struct sk_buff *skb; + + /* Send HCI Reset to the controller to stop any BT activity which + * were triggered. This will help to save power and maintain the + * sync b/w Host and controller + */ + skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "HCI reset during shutdown failed"); + return PTR_ERR(skb); + } + kfree_skb(skb); + + return 0; +} + #ifdef CONFIG_PM /* Configure an out-of-band gpio as wake-up pin, if specified in device tree */ static int marvell_config_oob_wake(struct hci_dev *hdev) @@ -2862,6 +2917,8 @@ static irqreturn_t btusb_oob_wake_handler(int irq, void *priv) static const struct of_device_id btusb_match_table[] = { { .compatible = "usb1286,204e" }, + { .compatible = "usbcf3,e300" }, /* QCA6174A */ + { .compatible = "usb4ca,301a" }, /* QCA6174A (Lite-On) */ { } }; MODULE_DEVICE_TABLE(of, btusb_match_table); @@ -2915,6 +2972,7 @@ static int btusb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_endpoint_descriptor *ep_desc; + struct gpio_desc *reset_gpio; struct btusb_data *data; struct hci_dev *hdev; unsigned ifnum_base; @@ -3028,6 +3086,15 @@ static int btusb_probe(struct usb_interface *intf, SET_HCIDEV_DEV(hdev, &intf->dev); + reset_gpio = gpiod_get_optional(&data->udev->dev, "reset", + GPIOD_OUT_LOW); + if (IS_ERR(reset_gpio)) { + err = PTR_ERR(reset_gpio); + goto out_free_dev; + } else if (reset_gpio) { + data->reset_gpio = reset_gpio; + } + hdev->open = btusb_open; hdev->close = btusb_close; hdev->flush = btusb_flush; @@ -3082,6 +3149,7 @@ static int btusb_probe(struct usb_interface *intf, hdev->shutdown = btusb_shutdown_intel; hdev->set_diag = btintel_set_diag_mfg; hdev->set_bdaddr = btintel_set_bdaddr; + hdev->cmd_timeout = btusb_intel_cmd_timeout; set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks); @@ -3091,9 +3159,11 @@ static int btusb_probe(struct usb_interface *intf, hdev->manufacturer = 2; hdev->send = btusb_send_frame_intel; hdev->setup = btusb_setup_intel_new; + hdev->shutdown = btusb_shutdown_intel_new; hdev->hw_error = btintel_hw_error; hdev->set_diag = btintel_set_diag; hdev->set_bdaddr = btintel_set_bdaddr; + hdev->cmd_timeout = btusb_intel_cmd_timeout; set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks); @@ -3226,6 +3296,8 @@ static int btusb_probe(struct usb_interface *intf, return 0; out_free_dev: + if (data->reset_gpio) + gpiod_put(data->reset_gpio); hci_free_dev(hdev); return err; } @@ -3269,6 +3341,9 @@ static void btusb_disconnect(struct usb_interface *intf) if (data->oob_wake_irq) device_init_wakeup(&data->udev->dev, false); + if (data->reset_gpio) + gpiod_put(data->reset_gpio); + hci_free_dev(hdev); } diff --git a/drivers/bluetooth/h4_recv.h b/drivers/bluetooth/h4_recv.h index b432651f8236..87ccaceadba7 100644 --- a/drivers/bluetooth/h4_recv.h +++ b/drivers/bluetooth/h4_recv.h @@ -60,12 +60,13 @@ static inline struct sk_buff *h4_recv_buf(struct hci_dev *hdev, const struct h4_recv_pkt *pkts, int pkts_count) { + /* Check for error from previous call */ + if (IS_ERR(skb)) + skb = NULL; + while (count) { int i, len; - if (!count) - break; - if (!skb) { for (i = 0; i < pkts_count; i++) { if (buffer[0] != (&pkts[i])->type) diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c index fb97a3bf069b..5d97d77627c1 100644 --- a/drivers/bluetooth/hci_h4.c +++ b/drivers/bluetooth/hci_h4.c @@ -174,6 +174,10 @@ struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb, struct hci_uart *hu = hci_get_drvdata(hdev); u8 alignment = hu->alignment ? hu->alignment : 1; + /* Check for error from previous call */ + if (IS_ERR(skb)) + skb = NULL; + while (count) { int i, len; diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index fbf7b4df23ab..9562e72c1ae5 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -207,11 +207,11 @@ void hci_uart_init_work(struct work_struct *work) err = hci_register_dev(hu->hdev); if (err < 0) { BT_ERR("Can't register HCI device"); + clear_bit(HCI_UART_PROTO_READY, &hu->flags); + hu->proto->close(hu); hdev = hu->hdev; hu->hdev = NULL; hci_free_dev(hdev); - clear_bit(HCI_UART_PROTO_READY, &hu->flags); - hu->proto->close(hu); return; } @@ -616,6 +616,7 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, static int hci_uart_register_dev(struct hci_uart *hu) { struct hci_dev *hdev; + int err; BT_DBG(""); @@ -659,11 +660,22 @@ static int hci_uart_register_dev(struct hci_uart *hu) else hdev->dev_type = HCI_PRIMARY; + /* Only call open() for the protocol after hdev is fully initialized as + * open() (or a timer/workqueue it starts) may attempt to reference it. + */ + err = hu->proto->open(hu); + if (err) { + hu->hdev = NULL; + hci_free_dev(hdev); + return err; + } + if (test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags)) return 0; if (hci_register_dev(hdev) < 0) { BT_ERR("Can't register HCI device"); + hu->proto->close(hu); hu->hdev = NULL; hci_free_dev(hdev); return -ENODEV; @@ -683,20 +695,14 @@ static int hci_uart_set_proto(struct hci_uart *hu, int id) if (!p) return -EPROTONOSUPPORT; - err = p->open(hu); - if (err) - return err; - hu->proto = p; - set_bit(HCI_UART_PROTO_READY, &hu->flags); err = hci_uart_register_dev(hu); if (err) { - clear_bit(HCI_UART_PROTO_READY, &hu->flags); - p->close(hu); return err; } + set_bit(HCI_UART_PROTO_READY, &hu->flags); return 0; } diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index f036c8f98ea3..237aea34b69f 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -59,7 +59,7 @@ #define IBS_WAKE_RETRANS_TIMEOUT_MS 100 #define IBS_TX_IDLE_TIMEOUT_MS 2000 -#define BAUDRATE_SETTLE_TIMEOUT_MS 300 +#define CMD_TRANS_TIMEOUT_MS 100 /* susclk rate */ #define SUSCLK_RATE_32KHZ 32768 @@ -770,16 +770,17 @@ static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb) /* Prepend skb with frame type */ memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); + spin_lock_irqsave(&qca->hci_ibs_lock, flags); + /* Don't go to sleep in middle of patch download or * Out-Of-Band(GPIOs control) sleep is selected. */ if (!test_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags)) { skb_queue_tail(&qca->txq, skb); + spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); return 0; } - spin_lock_irqsave(&qca->hci_ibs_lock, flags); - /* Act according to current state */ switch (qca->tx_ibs_state) { case HCI_IBS_TX_AWAKE: @@ -962,8 +963,8 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate) { struct hci_uart *hu = hci_get_drvdata(hdev); struct qca_data *qca = hu->priv; - struct sk_buff *skb; struct qca_serdev *qcadev; + struct sk_buff *skb; u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 }; if (baudrate > QCA_BAUDRATE_3200000) @@ -977,13 +978,6 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate) return -ENOMEM; } - /* Disabling hardware flow control is mandatory while - * sending change baudrate request to wcn3990 SoC. - */ - qcadev = serdev_device_get_drvdata(hu->serdev); - if (qcadev->btsoc_type == QCA_WCN3990) - hci_uart_set_flow_control(hu, true); - /* Assign commands to change baudrate and packet type. */ skb_put_data(skb, cmd, sizeof(cmd)); hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; @@ -991,16 +985,21 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate) skb_queue_tail(&qca->txq, skb); hci_uart_tx_wakeup(hu); - /* wait 300ms to change new baudrate on controller side - * controller will come back after they receive this HCI command - * then host can communicate with new baudrate to controller - */ - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS)); - set_current_state(TASK_RUNNING); + qcadev = serdev_device_get_drvdata(hu->serdev); + + /* Wait for the baudrate change request to be sent */ + + while (!skb_queue_empty(&qca->txq)) + usleep_range(100, 200); + serdev_device_wait_until_sent(hu->serdev, + msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS)); + + /* Give the controller time to process the request */ if (qcadev->btsoc_type == QCA_WCN3990) - hci_uart_set_flow_control(hu, false); + msleep(10); + else + msleep(300); return 0; } @@ -1013,11 +1012,11 @@ static inline void host_set_baudrate(struct hci_uart *hu, unsigned int speed) hci_uart_set_baudrate(hu, speed); } -static int qca_send_power_pulse(struct hci_dev *hdev, u8 cmd) +static int qca_send_power_pulse(struct hci_uart *hu, bool on) { - struct hci_uart *hu = hci_get_drvdata(hdev); - struct qca_data *qca = hu->priv; - struct sk_buff *skb; + int ret; + int timeout = msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS); + u8 cmd = on ? QCA_WCN3990_POWERON_PULSE : QCA_WCN3990_POWEROFF_PULSE; /* These power pulses are single byte command which are sent * at required baudrate to wcn3990. On wcn3990, we have an external @@ -1029,24 +1028,25 @@ static int qca_send_power_pulse(struct hci_dev *hdev, u8 cmd) * save power. Disabling hardware flow control is mandatory while * sending power pulses to SoC. */ - bt_dev_dbg(hdev, "sending power pulse %02x to SoC", cmd); - - skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL); - if (!skb) - return -ENOMEM; + bt_dev_dbg(hu->hdev, "sending power pulse %02x to controller", cmd); + serdev_device_write_flush(hu->serdev); hci_uart_set_flow_control(hu, true); + ret = serdev_device_write_buf(hu->serdev, &cmd, sizeof(cmd)); + if (ret < 0) { + bt_dev_err(hu->hdev, "failed to send power pulse %02x", cmd); + return ret; + } - skb_put_u8(skb, cmd); - hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; - - skb_queue_tail(&qca->txq, skb); - hci_uart_tx_wakeup(hu); - - /* Wait for 100 uS for SoC to settle down */ - usleep_range(100, 200); + serdev_device_wait_until_sent(hu->serdev, timeout); hci_uart_set_flow_control(hu, false); + /* Give to controller time to boot/shutdown */ + if (on) + msleep(100); + else + msleep(10); + return 0; } @@ -1091,7 +1091,8 @@ static int qca_check_speeds(struct hci_uart *hu) static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type) { unsigned int speed, qca_baudrate; - int ret; + struct qca_serdev *qcadev; + int ret = 0; if (speed_type == QCA_INIT_SPEED) { speed = qca_get_speed(hu, QCA_INIT_SPEED); @@ -1102,21 +1103,31 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type) if (!speed) return 0; + /* Disable flow control for wcn3990 to deassert RTS while + * changing the baudrate of chip and host. + */ + qcadev = serdev_device_get_drvdata(hu->serdev); + if (qcadev->btsoc_type == QCA_WCN3990) + hci_uart_set_flow_control(hu, true); + qca_baudrate = qca_get_baudrate_value(speed); bt_dev_dbg(hu->hdev, "Set UART speed to %d", speed); ret = qca_set_baudrate(hu->hdev, qca_baudrate); if (ret) - return ret; + goto error; host_set_baudrate(hu, speed); + +error: + if (qcadev->btsoc_type == QCA_WCN3990) + hci_uart_set_flow_control(hu, false); } - return 0; + return ret; } static int qca_wcn3990_init(struct hci_uart *hu) { - struct hci_dev *hdev = hu->hdev; struct qca_serdev *qcadev; int ret; @@ -1139,18 +1150,15 @@ static int qca_wcn3990_init(struct hci_uart *hu) /* Forcefully enable wcn3990 to enter in to boot mode. */ host_set_baudrate(hu, 2400); - ret = qca_send_power_pulse(hdev, QCA_WCN3990_POWEROFF_PULSE); + ret = qca_send_power_pulse(hu, false); if (ret) return ret; qca_set_speed(hu, QCA_INIT_SPEED); - ret = qca_send_power_pulse(hdev, QCA_WCN3990_POWERON_PULSE); + ret = qca_send_power_pulse(hu, true); if (ret) return ret; - /* Wait for 100 ms for SoC to boot */ - msleep(100); - /* Now the device is in ready state to communicate with host. * To sync host with device we need to reopen port. * Without this, we will have RTS and CTS synchronization @@ -1193,6 +1201,7 @@ static int qca_setup(struct hci_uart *hu) * setup for every hci up. */ set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks); + set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks); hu->hdev->shutdown = qca_power_off; ret = qca_wcn3990_init(hu); if (ret) @@ -1241,7 +1250,10 @@ static int qca_setup(struct hci_uart *hu) } /* Setup bdaddr */ - hu->hdev->set_bdaddr = qca_set_bdaddr_rome; + if (qcadev->btsoc_type == QCA_WCN3990) + hu->hdev->set_bdaddr = qca_set_bdaddr; + else + hu->hdev->set_bdaddr = qca_set_bdaddr_rome; return ret; } @@ -1274,13 +1286,20 @@ static const struct qca_vreg_data qca_soc_data = { static void qca_power_shutdown(struct hci_uart *hu) { - struct serdev_device *serdev = hu->serdev; - unsigned char cmd = QCA_WCN3990_POWEROFF_PULSE; + struct qca_data *qca = hu->priv; + unsigned long flags; + + /* From this point we go into power off state. But serial port is + * still open, stop queueing the IBS data and flush all the buffered + * data in skb's. + */ + spin_lock_irqsave(&qca->hci_ibs_lock, flags); + clear_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags); + qca_flush(hu); + spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); host_set_baudrate(hu, 2400); - hci_uart_set_flow_control(hu, true); - serdev_device_write_buf(serdev, &cmd, sizeof(cmd)); - hci_uart_set_flow_control(hu, false); + qca_send_power_pulse(hu, false); qca_power_setup(hu, false); } diff --git a/drivers/bus/fsl-mc/fsl-mc-allocator.c b/drivers/bus/fsl-mc/fsl-mc-allocator.c index e906ecfe23dd..8ad77246f322 100644 --- a/drivers/bus/fsl-mc/fsl-mc-allocator.c +++ b/drivers/bus/fsl-mc/fsl-mc-allocator.c @@ -295,6 +295,14 @@ int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev, if (!mc_adev) goto error; + mc_adev->consumer_link = device_link_add(&mc_dev->dev, + &mc_adev->dev, + DL_FLAG_AUTOREMOVE_CONSUMER); + if (!mc_adev->consumer_link) { + error = -EINVAL; + goto error; + } + *new_mc_adev = mc_adev; return 0; error: @@ -321,6 +329,9 @@ void fsl_mc_object_free(struct fsl_mc_device *mc_adev) return; fsl_mc_resource_free(resource); + + device_link_del(mc_adev->consumer_link); + mc_adev->consumer_link = NULL; } EXPORT_SYMBOL_GPL(fsl_mc_object_free); diff --git a/drivers/bus/fsl-mc/mc-io.c b/drivers/bus/fsl-mc/mc-io.c index 7226cfc49b6f..3ae574a58cce 100644 --- a/drivers/bus/fsl-mc/mc-io.c +++ b/drivers/bus/fsl-mc/mc-io.c @@ -209,9 +209,19 @@ int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev, if (error < 0) goto error_cleanup_resource; + dpmcp_dev->consumer_link = device_link_add(&mc_dev->dev, + &dpmcp_dev->dev, + DL_FLAG_AUTOREMOVE_CONSUMER); + if (!dpmcp_dev->consumer_link) { + error = -EINVAL; + goto error_cleanup_mc_io; + } + *new_mc_io = mc_io; return 0; +error_cleanup_mc_io: + fsl_destroy_mc_io(mc_io); error_cleanup_resource: fsl_mc_resource_free(resource); return error; @@ -244,6 +254,9 @@ void fsl_mc_portal_free(struct fsl_mc_io *mc_io) fsl_destroy_mc_io(mc_io); fsl_mc_resource_free(resource); + + device_link_del(dpmcp_dev->consumer_link); + dpmcp_dev->consumer_link = NULL; } EXPORT_SYMBOL_GPL(fsl_mc_portal_free); diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c index d5f85455fa62..19d7b6ff2f17 100644 --- a/drivers/bus/hisi_lpc.c +++ b/drivers/bus/hisi_lpc.c @@ -522,10 +522,9 @@ static int hisi_lpc_acpi_probe(struct device *hostdev) if (!found) { dev_warn(hostdev, - "could not find cell for child device (%s)\n", + "could not find cell for child device (%s), discarding\n", hid); - ret = -ENODEV; - goto fail; + continue; } pdev = platform_device_alloc(cell->name, PLATFORM_DEVID_AUTO); diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c index d84996a4528e..db74334ca5ef 100644 --- a/drivers/bus/imx-weim.c +++ b/drivers/bus/imx-weim.c @@ -46,6 +46,17 @@ static const struct imx_weim_devtype imx51_weim_devtype = { }; #define MAX_CS_REGS_COUNT 6 +#define MAX_CS_COUNT 6 +#define OF_REG_SIZE 3 + +struct cs_timing { + bool is_applied; + u32 regs[MAX_CS_REGS_COUNT]; +}; + +struct cs_timing_state { + struct cs_timing cs[MAX_CS_COUNT]; +}; static const struct of_device_id weim_id_table[] = { /* i.MX1/21 */ @@ -111,21 +122,19 @@ err: } /* Parse and set the timing for this device. */ -static int __init weim_timing_setup(struct device_node *np, void __iomem *base, - const struct imx_weim_devtype *devtype) +static int __init weim_timing_setup(struct device *dev, + struct device_node *np, void __iomem *base, + const struct imx_weim_devtype *devtype, + struct cs_timing_state *ts) { u32 cs_idx, value[MAX_CS_REGS_COUNT]; int i, ret; + int reg_idx, num_regs; + struct cs_timing *cst; if (WARN_ON(devtype->cs_regs_count > MAX_CS_REGS_COUNT)) return -EINVAL; - - /* get the CS index from this child node's "reg" property. */ - ret = of_property_read_u32(np, "reg", &cs_idx); - if (ret) - return ret; - - if (cs_idx >= devtype->cs_count) + if (WARN_ON(devtype->cs_count > MAX_CS_COUNT)) return -EINVAL; ret = of_property_read_u32_array(np, "fsl,weim-cs-timing", @@ -133,9 +142,43 @@ static int __init weim_timing_setup(struct device_node *np, void __iomem *base, if (ret) return ret; - /* set the timing for WEIM */ - for (i = 0; i < devtype->cs_regs_count; i++) - writel(value[i], base + cs_idx * devtype->cs_stride + i * 4); + /* + * the child node's "reg" property may contain multiple address ranges, + * extract the chip select for each. + */ + num_regs = of_property_count_elems_of_size(np, "reg", OF_REG_SIZE); + if (num_regs < 0) + return num_regs; + if (!num_regs) + return -EINVAL; + for (reg_idx = 0; reg_idx < num_regs; reg_idx++) { + /* get the CS index from this child node's "reg" property. */ + ret = of_property_read_u32_index(np, "reg", + reg_idx * OF_REG_SIZE, &cs_idx); + if (ret) + break; + + if (cs_idx >= devtype->cs_count) + return -EINVAL; + + /* prevent re-configuring a CS that's already been configured */ + cst = &ts->cs[cs_idx]; + if (cst->is_applied && memcmp(value, cst->regs, + devtype->cs_regs_count * sizeof(u32))) { + dev_err(dev, "fsl,weim-cs-timing conflict on %pOF", np); + return -EINVAL; + } + + /* set the timing for WEIM */ + for (i = 0; i < devtype->cs_regs_count; i++) + writel(value[i], + base + cs_idx * devtype->cs_stride + i * 4); + if (!cst->is_applied) { + cst->is_applied = true; + memcpy(cst->regs, value, + devtype->cs_regs_count * sizeof(u32)); + } + } return 0; } @@ -148,6 +191,7 @@ static int __init weim_parse_dt(struct platform_device *pdev, const struct imx_weim_devtype *devtype = of_id->data; struct device_node *child; int ret, have_child = 0; + struct cs_timing_state ts = {}; if (devtype == &imx50_weim_devtype) { ret = imx_weim_gpr_setup(pdev); @@ -156,7 +200,7 @@ static int __init weim_parse_dt(struct platform_device *pdev, } for_each_available_child_of_node(pdev->dev.of_node, child) { - ret = weim_timing_setup(child, base, devtype); + ret = weim_timing_setup(&pdev->dev, child, base, devtype, &ts); if (ret) dev_warn(&pdev->dev, "%pOF set timing failed.\n", child); diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index f94d33525771..d299ec79e4c3 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -781,12 +781,12 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { SYSC_QUIRK("smartreflex", 0, -1, 0x38, -1, 0x00000000, 0xffffffff, SYSC_QUIRK_LEGACY_IDLE), SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff, - SYSC_QUIRK_LEGACY_IDLE), + 0), /* Some timers on omap4 and later */ SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x50002100, 0xffffffff, - SYSC_QUIRK_LEGACY_IDLE), + 0), SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x4fff1301, 0xffff00ff, - SYSC_QUIRK_LEGACY_IDLE), + 0), SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff, SYSC_QUIRK_LEGACY_IDLE), /* Uarts on omap4 and later */ diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 614ecdbb4ab7..933268b8d6a5 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -265,6 +265,7 @@ /* #define ERRLOGMASK (CD_WARNING|CD_OPEN|CD_COUNT_TRACKS|CD_CLOSE) */ /* #define ERRLOGMASK (CD_WARNING|CD_REG_UNREG|CD_DO_IOCTL|CD_OPEN|CD_CLOSE|CD_COUNT_TRACKS) */ +#include #include #include #include @@ -3692,9 +3693,9 @@ static struct ctl_table_header *cdrom_sysctl_header; static void cdrom_sysctl_register(void) { - static int initialized; + static atomic_t initialized = ATOMIC_INIT(0); - if (initialized == 1) + if (!atomic_add_unless(&initialized, 1, 1)) return; cdrom_sysctl_header = register_sysctl_table(cdrom_root_table); @@ -3705,8 +3706,6 @@ static void cdrom_sysctl_register(void) cdrom_sysctl_settings.debug = debug; cdrom_sysctl_settings.lock = lockdoor; cdrom_sysctl_settings.check = check_media_type; - - initialized = 1; } static void cdrom_sysctl_unregister(void) diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 2e2ffe7010aa..72866a004f07 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -244,26 +244,23 @@ source "drivers/char/hw_random/Kconfig" config NVRAM tristate "/dev/nvram support" - depends on ATARI || X86 || GENERIC_NVRAM + depends on X86 || HAVE_ARCH_NVRAM_OPS + default M68K || PPC ---help--- If you say Y here and create a character special file /dev/nvram with major number 10 and minor number 144 using mknod ("man mknod"), - you get read and write access to the extra bytes of non-volatile - memory in the real time clock (RTC), which is contained in every PC - and most Ataris. The actual number of bytes varies, depending on the - nvram in the system, but is usually 114 (128-14 for the RTC). - - This memory is conventionally called "CMOS RAM" on PCs and "NVRAM" - on Ataris. /dev/nvram may be used to view settings there, or to - change them (with some utility). It could also be used to frequently + you get read and write access to the non-volatile memory. + + /dev/nvram may be used to view settings in NVRAM or to change them + (with some utility). It could also be used to frequently save a few bits of very important data that may not be lost over power-off and for which writing to disk is too insecure. Note however that most NVRAM space in a PC belongs to the BIOS and you should NEVER idly tamper with it. See Ralf Brown's interrupt list for a guide to the use of CMOS bytes by your BIOS. - On Atari machines, /dev/nvram is always configured and does not need - to be selected. + This memory is conventionally called "NVRAM" on PowerPC machines, + "CMOS RAM" on PCs, "NVRAM" on Ataris and "PRAM" on Macintoshes. To compile this driver as a module, choose M here: the module will be called nvram. diff --git a/drivers/char/Makefile b/drivers/char/Makefile index b8d42b4e979b..fbea7dd12932 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -26,11 +26,7 @@ obj-$(CONFIG_RTC) += rtc.o obj-$(CONFIG_HPET) += hpet.o obj-$(CONFIG_EFI_RTC) += efirtc.o obj-$(CONFIG_XILINX_HWICAP) += xilinx_hwicap/ -ifeq ($(CONFIG_GENERIC_NVRAM),y) - obj-$(CONFIG_NVRAM) += generic_nvram.o -else - obj-$(CONFIG_NVRAM) += nvram.o -endif +obj-$(CONFIG_NVRAM) += nvram.o obj-$(CONFIG_TOSHIBA) += toshiba.o obj-$(CONFIG_DS1620) += ds1620.o obj-$(CONFIG_HW_RANDOM) += hw_random/ diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c index 7f88490b5479..c53f0f9ef5b0 100644 --- a/drivers/char/agp/efficeon-agp.c +++ b/drivers/char/agp/efficeon-agp.c @@ -163,7 +163,6 @@ static int efficeon_free_gatt_table(struct agp_bridge_data *bridge) unsigned long page = efficeon_private.l1_table[index]; if (page) { efficeon_private.l1_table[index] = 0; - ClearPageReserved(virt_to_page((char *)page)); free_page(page); freed++; } @@ -219,7 +218,6 @@ static int efficeon_create_gatt_table(struct agp_bridge_data *bridge) efficeon_free_gatt_table(agp_bridge); return -ENOMEM; } - SetPageReserved(virt_to_page((char *)page)); for (offset = 0; offset < PAGE_SIZE; offset += clflush_chunk) clflush((char *)page+offset); diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c index c0a5b1f3a986..4ccc39e00ced 100644 --- a/drivers/char/applicom.c +++ b/drivers/char/applicom.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -386,7 +387,11 @@ static ssize_t ac_write(struct file *file, const char __user *buf, size_t count, TicCard = st_loc.tic_des_from_pc; /* tic number to send */ IndexCard = NumCard - 1; - if((NumCard < 1) || (NumCard > MAX_BOARD) || !apbs[IndexCard].RamIO) + if (IndexCard >= MAX_BOARD) + return -EINVAL; + IndexCard = array_index_nospec(IndexCard, MAX_BOARD); + + if (!apbs[IndexCard].RamIO) return -EINVAL; #ifdef DEBUG @@ -697,6 +702,7 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg) unsigned char IndexCard; void __iomem *pmem; int ret = 0; + static int warncount = 10; volatile unsigned char byte_reset_it; struct st_ram_io *adgl; void __user *argp = (void __user *)arg; @@ -711,16 +717,12 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg) mutex_lock(&ac_mutex); IndexCard = adgl->num_card-1; - if(cmd != 6 && ((IndexCard >= MAX_BOARD) || !apbs[IndexCard].RamIO)) { - static int warncount = 10; - if (warncount) { - printk( KERN_WARNING "APPLICOM driver IOCTL, bad board number %d\n",(int)IndexCard+1); - warncount--; - } - kfree(adgl); - mutex_unlock(&ac_mutex); - return -EINVAL; - } + if (cmd != 6 && IndexCard >= MAX_BOARD) + goto err; + IndexCard = array_index_nospec(IndexCard, MAX_BOARD); + + if (cmd != 6 && !apbs[IndexCard].RamIO) + goto err; switch (cmd) { @@ -838,5 +840,16 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg) kfree(adgl); mutex_unlock(&ac_mutex); return 0; + +err: + if (warncount) { + pr_warn("APPLICOM driver IOCTL, bad board number %d\n", + (int)IndexCard + 1); + warncount--; + } + kfree(adgl); + mutex_unlock(&ac_mutex); + return -EINVAL; + } diff --git a/drivers/char/efirtc.c b/drivers/char/efirtc.c index d9aab643997e..11781ebffbf7 100644 --- a/drivers/char/efirtc.c +++ b/drivers/char/efirtc.c @@ -254,27 +254,6 @@ static long efi_rtc_ioctl(struct file *file, unsigned int cmd, return -ENOTTY; } -/* - * We enforce only one user at a time here with the open/close. - * Also clear the previous interrupt data on an open, and clean - * up things on a close. - */ - -static int efi_rtc_open(struct inode *inode, struct file *file) -{ - /* - * nothing special to do here - * We do accept multiple open files at the same time as we - * synchronize on the per call operation. - */ - return 0; -} - -static int efi_rtc_close(struct inode *inode, struct file *file) -{ - return 0; -} - /* * The various file operations we support. */ @@ -282,8 +261,6 @@ static int efi_rtc_close(struct inode *inode, struct file *file) static const struct file_operations efi_rtc_fops = { .owner = THIS_MODULE, .unlocked_ioctl = efi_rtc_ioctl, - .open = efi_rtc_open, - .release = efi_rtc_close, .llseek = no_llseek, }; diff --git a/drivers/char/generic_nvram.c b/drivers/char/generic_nvram.c deleted file mode 100644 index ff5394f47587..000000000000 --- a/drivers/char/generic_nvram.c +++ /dev/null @@ -1,159 +0,0 @@ -/* - * Generic /dev/nvram driver for architectures providing some - * "generic" hooks, that is : - * - * nvram_read_byte, nvram_write_byte, nvram_sync, nvram_get_size - * - * Note that an additional hook is supported for PowerMac only - * for getting the nvram "partition" informations - * - */ - -#define NVRAM_VERSION "1.1" - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#ifdef CONFIG_PPC_PMAC -#include -#endif - -#define NVRAM_SIZE 8192 - -static DEFINE_MUTEX(nvram_mutex); -static ssize_t nvram_len; - -static loff_t nvram_llseek(struct file *file, loff_t offset, int origin) -{ - return generic_file_llseek_size(file, offset, origin, - MAX_LFS_FILESIZE, nvram_len); -} - -static ssize_t read_nvram(struct file *file, char __user *buf, - size_t count, loff_t *ppos) -{ - unsigned int i; - char __user *p = buf; - - if (!access_ok(buf, count)) - return -EFAULT; - if (*ppos >= nvram_len) - return 0; - for (i = *ppos; count > 0 && i < nvram_len; ++i, ++p, --count) - if (__put_user(nvram_read_byte(i), p)) - return -EFAULT; - *ppos = i; - return p - buf; -} - -static ssize_t write_nvram(struct file *file, const char __user *buf, - size_t count, loff_t *ppos) -{ - unsigned int i; - const char __user *p = buf; - char c; - - if (!access_ok(buf, count)) - return -EFAULT; - if (*ppos >= nvram_len) - return 0; - for (i = *ppos; count > 0 && i < nvram_len; ++i, ++p, --count) { - if (__get_user(c, p)) - return -EFAULT; - nvram_write_byte(c, i); - } - *ppos = i; - return p - buf; -} - -static int nvram_ioctl(struct file *file, unsigned int cmd, unsigned long arg) -{ - switch(cmd) { -#ifdef CONFIG_PPC_PMAC - case OBSOLETE_PMAC_NVRAM_GET_OFFSET: - printk(KERN_WARNING "nvram: Using obsolete PMAC_NVRAM_GET_OFFSET ioctl\n"); - case IOC_NVRAM_GET_OFFSET: { - int part, offset; - - if (!machine_is(powermac)) - return -EINVAL; - if (copy_from_user(&part, (void __user*)arg, sizeof(part)) != 0) - return -EFAULT; - if (part < pmac_nvram_OF || part > pmac_nvram_NR) - return -EINVAL; - offset = pmac_get_partition(part); - if (copy_to_user((void __user*)arg, &offset, sizeof(offset)) != 0) - return -EFAULT; - break; - } -#endif /* CONFIG_PPC_PMAC */ - case IOC_NVRAM_SYNC: - nvram_sync(); - break; - default: - return -EINVAL; - } - - return 0; -} - -static long nvram_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) -{ - int ret; - - mutex_lock(&nvram_mutex); - ret = nvram_ioctl(file, cmd, arg); - mutex_unlock(&nvram_mutex); - - return ret; -} - -const struct file_operations nvram_fops = { - .owner = THIS_MODULE, - .llseek = nvram_llseek, - .read = read_nvram, - .write = write_nvram, - .unlocked_ioctl = nvram_unlocked_ioctl, -}; - -static struct miscdevice nvram_dev = { - NVRAM_MINOR, - "nvram", - &nvram_fops -}; - -int __init nvram_init(void) -{ - int ret = 0; - - printk(KERN_INFO "Generic non-volatile memory driver v%s\n", - NVRAM_VERSION); - ret = misc_register(&nvram_dev); - if (ret != 0) - goto out; - - nvram_len = nvram_get_size(); - if (nvram_len < 0) - nvram_len = NVRAM_SIZE; - -out: - return ret; -} - -void __exit nvram_cleanup(void) -{ - misc_deregister( &nvram_dev ); -} - -module_init(nvram_init); -module_exit(nvram_cleanup); -MODULE_LICENSE("GPL"); diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index 4a22b4b41aef..d0ad85900b79 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -377,7 +377,7 @@ static __init int hpet_mmap_enable(char *str) pr_info("HPET mmap %s\n", hpet_mmap_enabled ? "enabled" : "disabled"); return 1; } -__setup("hpet_mmap", hpet_mmap_enable); +__setup("hpet_mmap=", hpet_mmap_enable); static int hpet_mmap(struct file *file, struct vm_area_struct *vma) { @@ -842,7 +842,6 @@ int hpet_alloc(struct hpet_data *hdp) struct hpet_dev *devp; u32 i, ntimer; struct hpets *hpetp; - size_t siz; struct hpet __iomem *hpet; static struct hpets *last; unsigned long period; @@ -860,10 +859,8 @@ int hpet_alloc(struct hpet_data *hdp) return 0; } - siz = sizeof(struct hpets) + ((hdp->hd_nirqs - 1) * - sizeof(struct hpet_dev)); - - hpetp = kzalloc(siz, GFP_KERNEL); + hpetp = kzalloc(struct_size(hpetp, hp_dev, hdp->hd_nirqs - 1), + GFP_KERNEL); if (!hpetp) return -ENOMEM; diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index dac895dc01b9..25a7d8ffdb5d 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -424,6 +424,21 @@ config HW_RANDOM_EXYNOS will be called exynos-trng. If unsure, say Y. + +config HW_RANDOM_OPTEE + tristate "OP-TEE based Random Number Generator support" + depends on OPTEE + default HW_RANDOM + help + This driver provides support for OP-TEE based Random Number + Generator on ARM SoCs where hardware entropy sources are not + accessible to normal world (Linux). + + To compile this driver as a module, choose M here: the module + will be called optee-rng. + + If unsure, say Y. + endif # HW_RANDOM config UML_RANDOM diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index e35ec3ce3a20..7c9ef4a7667f 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile @@ -38,3 +38,4 @@ obj-$(CONFIG_HW_RANDOM_CAVIUM) += cavium-rng.o cavium-rng-vf.o obj-$(CONFIG_HW_RANDOM_MTK) += mtk-rng.o obj-$(CONFIG_HW_RANDOM_S390) += s390-trng.o obj-$(CONFIG_HW_RANDOM_KEYSTONE) += ks-sa-rng.o +obj-$(CONFIG_HW_RANDOM_OPTEE) += optee-rng.o diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c index 256b0b1d0f26..f759790c3cdb 100644 --- a/drivers/char/hw_random/bcm2835-rng.c +++ b/drivers/char/hw_random/bcm2835-rng.c @@ -168,14 +168,16 @@ static int bcm2835_rng_probe(struct platform_device *pdev) priv->rng.read = bcm2835_rng_read; priv->rng.cleanup = bcm2835_rng_cleanup; - rng_id = of_match_node(bcm2835_rng_of_match, np); - if (!rng_id) - return -EINVAL; - - /* Check for rng init function, execute it */ - of_data = rng_id->data; - if (of_data) - priv->mask_interrupts = of_data->mask_interrupts; + if (dev_of_node(dev)) { + rng_id = of_match_node(bcm2835_rng_of_match, np); + if (!rng_id) + return -EINVAL; + + /* Check for rng init function, execute it */ + of_data = rng_id->data; + if (of_data) + priv->mask_interrupts = of_data->mask_interrupts; + } /* register driver */ err = devm_hwrng_register(dev, &priv->rng); diff --git a/drivers/char/hw_random/optee-rng.c b/drivers/char/hw_random/optee-rng.c new file mode 100644 index 000000000000..ddfbabaa5f8f --- /dev/null +++ b/drivers/char/hw_random/optee-rng.c @@ -0,0 +1,306 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018-2019 Linaro Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_NAME "optee-rng" + +#define TEE_ERROR_HEALTH_TEST_FAIL 0x00000001 + +/* + * TA_CMD_GET_ENTROPY - Get Entropy from RNG + * + * param[0] (inout memref) - Entropy buffer memory reference + * param[1] unused + * param[2] unused + * param[3] unused + * + * Result: + * TEE_SUCCESS - Invoke command success + * TEE_ERROR_BAD_PARAMETERS - Incorrect input param + * TEE_ERROR_NOT_SUPPORTED - Requested entropy size greater than size of pool + * TEE_ERROR_HEALTH_TEST_FAIL - Continuous health testing failed + */ +#define TA_CMD_GET_ENTROPY 0x0 + +/* + * TA_CMD_GET_RNG_INFO - Get RNG information + * + * param[0] (out value) - value.a: RNG data-rate in bytes per second + * value.b: Quality/Entropy per 1024 bit of data + * param[1] unused + * param[2] unused + * param[3] unused + * + * Result: + * TEE_SUCCESS - Invoke command success + * TEE_ERROR_BAD_PARAMETERS - Incorrect input param + */ +#define TA_CMD_GET_RNG_INFO 0x1 + +#define MAX_ENTROPY_REQ_SZ (4 * 1024) + +/** + * struct optee_rng_private - OP-TEE Random Number Generator private data + * @dev: OP-TEE based RNG device. + * @ctx: OP-TEE context handler. + * @session_id: RNG TA session identifier. + * @data_rate: RNG data rate. + * @entropy_shm_pool: Memory pool shared with RNG device. + * @optee_rng: OP-TEE RNG driver structure. + */ +struct optee_rng_private { + struct device *dev; + struct tee_context *ctx; + u32 session_id; + u32 data_rate; + struct tee_shm *entropy_shm_pool; + struct hwrng optee_rng; +}; + +#define to_optee_rng_private(r) \ + container_of(r, struct optee_rng_private, optee_rng) + +static size_t get_optee_rng_data(struct optee_rng_private *pvt_data, + void *buf, size_t req_size) +{ + int ret = 0; + u8 *rng_data = NULL; + size_t rng_size = 0; + struct tee_ioctl_invoke_arg inv_arg; + struct tee_param param[4]; + + memset(&inv_arg, 0, sizeof(inv_arg)); + memset(¶m, 0, sizeof(param)); + + /* Invoke TA_CMD_GET_ENTROPY function of Trusted App */ + inv_arg.func = TA_CMD_GET_ENTROPY; + inv_arg.session = pvt_data->session_id; + inv_arg.num_params = 4; + + /* Fill invoke cmd params */ + param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT; + param[0].u.memref.shm = pvt_data->entropy_shm_pool; + param[0].u.memref.size = req_size; + param[0].u.memref.shm_offs = 0; + + ret = tee_client_invoke_func(pvt_data->ctx, &inv_arg, param); + if ((ret < 0) || (inv_arg.ret != 0)) { + dev_err(pvt_data->dev, "TA_CMD_GET_ENTROPY invoke err: %x\n", + inv_arg.ret); + return 0; + } + + rng_data = tee_shm_get_va(pvt_data->entropy_shm_pool, 0); + if (IS_ERR(rng_data)) { + dev_err(pvt_data->dev, "tee_shm_get_va failed\n"); + return 0; + } + + rng_size = param[0].u.memref.size; + memcpy(buf, rng_data, rng_size); + + return rng_size; +} + +static int optee_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) +{ + struct optee_rng_private *pvt_data = to_optee_rng_private(rng); + size_t read = 0, rng_size = 0; + int timeout = 1; + u8 *data = buf; + + if (max > MAX_ENTROPY_REQ_SZ) + max = MAX_ENTROPY_REQ_SZ; + + while (read == 0) { + rng_size = get_optee_rng_data(pvt_data, data, (max - read)); + + data += rng_size; + read += rng_size; + + if (wait) { + if (timeout-- == 0) + return read; + msleep((1000 * (max - read)) / pvt_data->data_rate); + } else { + return read; + } + } + + return read; +} + +static int optee_rng_init(struct hwrng *rng) +{ + struct optee_rng_private *pvt_data = to_optee_rng_private(rng); + struct tee_shm *entropy_shm_pool = NULL; + + entropy_shm_pool = tee_shm_alloc(pvt_data->ctx, MAX_ENTROPY_REQ_SZ, + TEE_SHM_MAPPED | TEE_SHM_DMA_BUF); + if (IS_ERR(entropy_shm_pool)) { + dev_err(pvt_data->dev, "tee_shm_alloc failed\n"); + return PTR_ERR(entropy_shm_pool); + } + + pvt_data->entropy_shm_pool = entropy_shm_pool; + + return 0; +} + +static void optee_rng_cleanup(struct hwrng *rng) +{ + struct optee_rng_private *pvt_data = to_optee_rng_private(rng); + + tee_shm_free(pvt_data->entropy_shm_pool); +} + +static struct optee_rng_private pvt_data = { + .optee_rng = { + .name = DRIVER_NAME, + .init = optee_rng_init, + .cleanup = optee_rng_cleanup, + .read = optee_rng_read, + } +}; + +static int get_optee_rng_info(struct device *dev) +{ + int ret = 0; + struct tee_ioctl_invoke_arg inv_arg; + struct tee_param param[4]; + + memset(&inv_arg, 0, sizeof(inv_arg)); + memset(¶m, 0, sizeof(param)); + + /* Invoke TA_CMD_GET_RNG_INFO function of Trusted App */ + inv_arg.func = TA_CMD_GET_RNG_INFO; + inv_arg.session = pvt_data.session_id; + inv_arg.num_params = 4; + + /* Fill invoke cmd params */ + param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT; + + ret = tee_client_invoke_func(pvt_data.ctx, &inv_arg, param); + if ((ret < 0) || (inv_arg.ret != 0)) { + dev_err(dev, "TA_CMD_GET_RNG_INFO invoke err: %x\n", + inv_arg.ret); + return -EINVAL; + } + + pvt_data.data_rate = param[0].u.value.a; + pvt_data.optee_rng.quality = param[0].u.value.b; + + return 0; +} + +static int optee_ctx_match(struct tee_ioctl_version_data *ver, const void *data) +{ + if (ver->impl_id == TEE_IMPL_ID_OPTEE) + return 1; + else + return 0; +} + +static int optee_rng_probe(struct device *dev) +{ + struct tee_client_device *rng_device = to_tee_client_device(dev); + int ret = 0, err = -ENODEV; + struct tee_ioctl_open_session_arg sess_arg; + + memset(&sess_arg, 0, sizeof(sess_arg)); + + /* Open context with TEE driver */ + pvt_data.ctx = tee_client_open_context(NULL, optee_ctx_match, NULL, + NULL); + if (IS_ERR(pvt_data.ctx)) + return -ENODEV; + + /* Open session with hwrng Trusted App */ + memcpy(sess_arg.uuid, rng_device->id.uuid.b, TEE_IOCTL_UUID_LEN); + sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC; + sess_arg.num_params = 0; + + ret = tee_client_open_session(pvt_data.ctx, &sess_arg, NULL); + if ((ret < 0) || (sess_arg.ret != 0)) { + dev_err(dev, "tee_client_open_session failed, err: %x\n", + sess_arg.ret); + err = -EINVAL; + goto out_ctx; + } + pvt_data.session_id = sess_arg.session; + + err = get_optee_rng_info(dev); + if (err) + goto out_sess; + + err = hwrng_register(&pvt_data.optee_rng); + if (err) { + dev_err(dev, "hwrng registration failed (%d)\n", err); + goto out_sess; + } + + pvt_data.dev = dev; + + return 0; + +out_sess: + tee_client_close_session(pvt_data.ctx, pvt_data.session_id); +out_ctx: + tee_client_close_context(pvt_data.ctx); + + return err; +} + +static int optee_rng_remove(struct device *dev) +{ + hwrng_unregister(&pvt_data.optee_rng); + tee_client_close_session(pvt_data.ctx, pvt_data.session_id); + tee_client_close_context(pvt_data.ctx); + + return 0; +} + +static const struct tee_client_device_id optee_rng_id_table[] = { + {UUID_INIT(0xab7a617c, 0xb8e7, 0x4d8f, + 0x83, 0x01, 0xd0, 0x9b, 0x61, 0x03, 0x6b, 0x64)}, + {} +}; + +MODULE_DEVICE_TABLE(tee, optee_rng_id_table); + +static struct tee_client_driver optee_rng_driver = { + .id_table = optee_rng_id_table, + .driver = { + .name = DRIVER_NAME, + .bus = &tee_bus_type, + .probe = optee_rng_probe, + .remove = optee_rng_remove, + }, +}; + +static int __init optee_rng_mod_init(void) +{ + return driver_register(&optee_rng_driver.driver); +} + +static void __exit optee_rng_mod_exit(void) +{ + driver_unregister(&optee_rng_driver.driver); +} + +module_init(optee_rng_mod_init); +module_exit(optee_rng_mod_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Sumit Garg "); +MODULE_DESCRIPTION("OP-TEE based random number generator driver"); diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index b89df66ea1ae..7abd604e938c 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c @@ -73,7 +73,7 @@ static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait) if (!vi->busy) { vi->busy = true; - init_completion(&vi->have_data); + reinit_completion(&vi->have_data); register_buffer(vi, buf, size); } diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig index c108441882cc..94719fc6ff9d 100644 --- a/drivers/char/ipmi/Kconfig +++ b/drivers/char/ipmi/Kconfig @@ -18,6 +18,10 @@ menuconfig IPMI_HANDLER If unsure, say N. config IPMI_DMI_DECODE + select IPMI_PLAT_DATA + bool + +config IPMI_PLAT_DATA bool if IPMI_HANDLER @@ -56,6 +60,7 @@ config IPMI_DEVICE_INTERFACE config IPMI_SI tristate 'IPMI System Interface handler' + select IPMI_PLAT_DATA help Provides a driver for System Interfaces (KCS, SMIC, BT). Currently, only KCS and SMIC are supported. If diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile index 7a3baf301a8f..3f06b2062475 100644 --- a/drivers/char/ipmi/Makefile +++ b/drivers/char/ipmi/Makefile @@ -17,6 +17,7 @@ obj-$(CONFIG_IPMI_HANDLER) += ipmi_msghandler.o obj-$(CONFIG_IPMI_DEVICE_INTERFACE) += ipmi_devintf.o obj-$(CONFIG_IPMI_SI) += ipmi_si.o obj-$(CONFIG_IPMI_DMI_DECODE) += ipmi_dmi.o +obj-$(CONFIG_IPMI_PLAT_DATA) += ipmi_plat_data.o obj-$(CONFIG_IPMI_SSIF) += ipmi_ssif.o obj-$(CONFIG_IPMI_POWERNV) += ipmi_powernv.o obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c index effab11887ca..99c9f581a1f3 100644 --- a/drivers/char/ipmi/ipmi_devintf.c +++ b/drivers/char/ipmi/ipmi_devintf.c @@ -207,7 +207,7 @@ static int handle_recv(struct ipmi_file_private *priv, struct list_head *entry; struct ipmi_recv_msg *msg; unsigned long flags; - int rv = 0; + int rv = 0, rv2 = 0; /* We claim a mutex because we don't want two users getting something from the queue at a time. @@ -250,7 +250,7 @@ static int handle_recv(struct ipmi_file_private *priv, if (msg->msg.data_len > 0) { if (rsp->msg.data_len < msg->msg.data_len) { - rv = -EMSGSIZE; + rv2 = -EMSGSIZE; if (trunc) msg->msg.data_len = rsp->msg.data_len; else @@ -274,7 +274,7 @@ static int handle_recv(struct ipmi_file_private *priv, mutex_unlock(&priv->recv_mutex); ipmi_free_recv_msg(msg); - return 0; + return rv2; recv_putback_on_err: /* If we got an error, put the message back onto diff --git a/drivers/char/ipmi/ipmi_dmi.c b/drivers/char/ipmi/ipmi_dmi.c index 249880457b17..ff0b199be472 100644 --- a/drivers/char/ipmi/ipmi_dmi.c +++ b/drivers/char/ipmi/ipmi_dmi.c @@ -14,6 +14,7 @@ #include #include "ipmi_si_sm.h" #include "ipmi_dmi.h" +#include "ipmi_plat_data.h" #define IPMI_DMI_TYPE_KCS 0x01 #define IPMI_DMI_TYPE_SMIC 0x02 @@ -22,7 +23,7 @@ struct ipmi_dmi_info { enum si_type si_type; - u32 flags; + unsigned int space; /* addr space for si, intf# for ssif */ unsigned long addr; u8 slave_addr; struct ipmi_dmi_info *next; @@ -33,133 +34,60 @@ static struct ipmi_dmi_info *ipmi_dmi_infos; static int ipmi_dmi_nr __initdata; static void __init dmi_add_platform_ipmi(unsigned long base_addr, - u32 flags, + unsigned int space, u8 slave_addr, int irq, int offset, int type) { - struct platform_device *pdev; - struct resource r[4]; - unsigned int num_r = 1, size; - struct property_entry p[5]; - unsigned int pidx = 0; - char *name; - int rv; - enum si_type si_type; + const char *name; struct ipmi_dmi_info *info; + struct ipmi_plat_data p; - memset(p, 0, sizeof(p)); + memset(&p, 0, sizeof(p)); name = "dmi-ipmi-si"; switch (type) { case IPMI_DMI_TYPE_SSIF: name = "dmi-ipmi-ssif"; - offset = 1; - size = 1; - si_type = SI_TYPE_INVALID; + p.type = SI_TYPE_INVALID; break; case IPMI_DMI_TYPE_BT: - size = 3; - si_type = SI_BT; + p.type = SI_BT; break; case IPMI_DMI_TYPE_KCS: - size = 2; - si_type = SI_KCS; + p.type = SI_KCS; break; case IPMI_DMI_TYPE_SMIC: - size = 2; - si_type = SI_SMIC; + p.type = SI_SMIC; break; default: pr_err("Invalid IPMI type: %d\n", type); return; } - if (si_type != SI_TYPE_INVALID) - p[pidx++] = PROPERTY_ENTRY_U8("ipmi-type", si_type); - - p[pidx++] = PROPERTY_ENTRY_U8("slave-addr", slave_addr); - p[pidx++] = PROPERTY_ENTRY_U8("addr-source", SI_SMBIOS); + memset(&p, 0, sizeof(p)); + p.addr = base_addr; + p.space = space; + p.regspacing = offset; + p.irq = irq; + p.slave_addr = slave_addr; + p.addr_source = SI_SMBIOS; info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) { pr_warn("Could not allocate dmi info\n"); } else { - info->si_type = si_type; - info->flags = flags; + info->si_type = p.type; + info->space = space; info->addr = base_addr; info->slave_addr = slave_addr; info->next = ipmi_dmi_infos; ipmi_dmi_infos = info; } - pdev = platform_device_alloc(name, ipmi_dmi_nr); - if (!pdev) { - pr_err("Error allocation IPMI platform device\n"); - return; - } - - if (type == IPMI_DMI_TYPE_SSIF) { - p[pidx++] = PROPERTY_ENTRY_U16("i2c-addr", base_addr); - goto add_properties; - } - - memset(r, 0, sizeof(r)); - - r[0].start = base_addr; - r[0].end = r[0].start + offset - 1; - r[0].name = "IPMI Address 1"; - r[0].flags = flags; - - if (size > 1) { - r[1].start = r[0].start + offset; - r[1].end = r[1].start + offset - 1; - r[1].name = "IPMI Address 2"; - r[1].flags = flags; - num_r++; - } - - if (size > 2) { - r[2].start = r[1].start + offset; - r[2].end = r[2].start + offset - 1; - r[2].name = "IPMI Address 3"; - r[2].flags = flags; - num_r++; - } - - if (irq) { - r[num_r].start = irq; - r[num_r].end = irq; - r[num_r].name = "IPMI IRQ"; - r[num_r].flags = IORESOURCE_IRQ; - num_r++; - } - - rv = platform_device_add_resources(pdev, r, num_r); - if (rv) { - dev_err(&pdev->dev, "Unable to add resources: %d\n", rv); - goto err; - } - -add_properties: - rv = platform_device_add_properties(pdev, p); - if (rv) { - dev_err(&pdev->dev, "Unable to add properties: %d\n", rv); - goto err; - } - - rv = platform_device_add(pdev); - if (rv) { - dev_err(&pdev->dev, "Unable to add device: %d\n", rv); - goto err; - } - - ipmi_dmi_nr++; - return; - -err: - platform_device_put(pdev); + if (ipmi_platform_add(name, ipmi_dmi_nr, &p)) + ipmi_dmi_nr++; } /* @@ -169,14 +97,14 @@ err: * This function allows an ACPI-specified IPMI device to look up the * slave address from the DMI table. */ -int ipmi_dmi_get_slave_addr(enum si_type si_type, u32 flags, +int ipmi_dmi_get_slave_addr(enum si_type si_type, unsigned int space, unsigned long base_addr) { struct ipmi_dmi_info *info = ipmi_dmi_infos; while (info) { if (info->si_type == si_type && - info->flags == flags && + info->space == space && info->addr == base_addr) return info->slave_addr; info = info->next; @@ -197,13 +125,13 @@ EXPORT_SYMBOL(ipmi_dmi_get_slave_addr); static void __init dmi_decode_ipmi(const struct dmi_header *dm) { - const u8 *data = (const u8 *) dm; - u32 flags = IORESOURCE_IO; - unsigned long base_addr; - u8 len = dm->length; - u8 slave_addr; - int irq = 0, offset; - int type; + const u8 *data = (const u8 *) dm; + int space = IPMI_IO_ADDR_SPACE; + unsigned long base_addr; + u8 len = dm->length; + u8 slave_addr; + int irq = 0, offset = 0; + int type; if (len < DMI_IPMI_MIN_LENGTH) return; @@ -218,8 +146,7 @@ static void __init dmi_decode_ipmi(const struct dmi_header *dm) } if (len >= DMI_IPMI_VER2_LENGTH) { if (type == IPMI_DMI_TYPE_SSIF) { - offset = 0; - flags = 0; + space = 0; /* Match I2C interface 0. */ base_addr = data[DMI_IPMI_ADDR] >> 1; if (base_addr == 0) { /* @@ -236,7 +163,7 @@ static void __init dmi_decode_ipmi(const struct dmi_header *dm) base_addr &= DMI_IPMI_IO_MASK; } else { /* Memory */ - flags = IORESOURCE_MEM; + space = IPMI_MEM_ADDR_SPACE; } /* @@ -280,7 +207,7 @@ static void __init dmi_decode_ipmi(const struct dmi_header *dm) offset = 1; } - dmi_add_platform_ipmi(base_addr, flags, slave_addr, irq, + dmi_add_platform_ipmi(base_addr, space, slave_addr, irq, offset, type); } diff --git a/drivers/char/ipmi/ipmi_dmi.h b/drivers/char/ipmi/ipmi_dmi.h index 8d2b094db8e6..2dbec0461d0c 100644 --- a/drivers/char/ipmi/ipmi_dmi.h +++ b/drivers/char/ipmi/ipmi_dmi.h @@ -4,6 +4,6 @@ */ #ifdef CONFIG_IPMI_DMI_DECODE -int ipmi_dmi_get_slave_addr(enum si_type si_type, u32 flags, +int ipmi_dmi_get_slave_addr(enum si_type si_type, unsigned int space, unsigned long base_addr); #endif diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index c518659b4d9f..e8ba67834746 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -529,9 +529,27 @@ struct ipmi_smi { unsigned int waiting_events_count; /* How many events in queue? */ char delivering_events; char event_msg_printed; + + /* How many users are waiting for events? */ atomic_t event_waiters; unsigned int ticks_to_req_ev; - int last_needs_timer; + + spinlock_t watch_lock; /* For dealing with watch stuff below. */ + + /* How many users are waiting for commands? */ + unsigned int command_waiters; + + /* How many users are waiting for watchdogs? */ + unsigned int watchdog_waiters; + + /* How many users are waiting for message responses? */ + unsigned int response_waiters; + + /* + * Tells what the lower layer has last been asked to watch for, + * messages and/or watchdogs. Protected by watch_lock. + */ + unsigned int last_watch_mask; /* * The event receiver for my BMC, only really used at panic @@ -925,6 +943,64 @@ static void deliver_err_response(struct ipmi_smi *intf, deliver_local_response(intf, msg); } +static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags) +{ + unsigned long iflags; + + if (!intf->handlers->set_need_watch) + return; + + spin_lock_irqsave(&intf->watch_lock, iflags); + if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) + intf->response_waiters++; + + if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG) + intf->watchdog_waiters++; + + if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS) + intf->command_waiters++; + + if ((intf->last_watch_mask & flags) != flags) { + intf->last_watch_mask |= flags; + intf->handlers->set_need_watch(intf->send_info, + intf->last_watch_mask); + } + spin_unlock_irqrestore(&intf->watch_lock, iflags); +} + +static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags) +{ + unsigned long iflags; + + if (!intf->handlers->set_need_watch) + return; + + spin_lock_irqsave(&intf->watch_lock, iflags); + if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) + intf->response_waiters--; + + if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG) + intf->watchdog_waiters--; + + if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS) + intf->command_waiters--; + + flags = 0; + if (intf->response_waiters) + flags |= IPMI_WATCH_MASK_CHECK_MESSAGES; + if (intf->watchdog_waiters) + flags |= IPMI_WATCH_MASK_CHECK_WATCHDOG; + if (intf->command_waiters) + flags |= IPMI_WATCH_MASK_CHECK_COMMANDS; + + if (intf->last_watch_mask != flags) { + intf->last_watch_mask = flags; + intf->handlers->set_need_watch(intf->send_info, + intf->last_watch_mask); + } + spin_unlock_irqrestore(&intf->watch_lock, iflags); +} + /* * Find the next sequence number not being used and add the given * message with the given timeout to the sequence table. This must be @@ -968,6 +1044,7 @@ static int intf_next_seq(struct ipmi_smi *intf, *seq = i; *seqid = intf->seq_table[i].seqid; intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ; + smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); need_waiter(intf); } else { rv = -EAGAIN; @@ -1006,6 +1083,7 @@ static int intf_find_seq(struct ipmi_smi *intf, && (ipmi_addr_equal(addr, &msg->addr))) { *recv_msg = msg; intf->seq_table[seq].inuse = 0; + smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); rv = 0; } } @@ -1067,6 +1145,7 @@ static int intf_err_seq(struct ipmi_smi *intf, struct seq_table *ent = &intf->seq_table[seq]; ent->inuse = 0; + smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); msg = ent->recv_msg; rv = 0; } @@ -1078,7 +1157,6 @@ static int intf_err_seq(struct ipmi_smi *intf, return rv; } - int ipmi_create_user(unsigned int if_num, const struct ipmi_user_hndl *handler, void *handler_data, @@ -1139,11 +1217,9 @@ int ipmi_create_user(unsigned int if_num, spin_lock_irqsave(&intf->seq_lock, flags); list_add_rcu(&new_user->link, &intf->users); spin_unlock_irqrestore(&intf->seq_lock, flags); - if (handler->ipmi_watchdog_pretimeout) { + if (handler->ipmi_watchdog_pretimeout) /* User wants pretimeouts, so make sure to watch for them. */ - if (atomic_inc_return(&intf->event_waiters) == 1) - need_waiter(intf); - } + smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG); srcu_read_unlock(&ipmi_interfaces_srcu, index); *user = new_user; return 0; @@ -1214,7 +1290,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user) user->handler->shutdown(user->handler_data); if (user->handler->ipmi_watchdog_pretimeout) - atomic_dec(&intf->event_waiters); + smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG); if (user->gets_events) atomic_dec(&intf->event_waiters); @@ -1227,6 +1303,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user) if (intf->seq_table[i].inuse && (intf->seq_table[i].recv_msg->user == user)) { intf->seq_table[i].inuse = 0; + smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); ipmi_free_recv_msg(intf->seq_table[i].recv_msg); } } @@ -1569,8 +1646,7 @@ int ipmi_register_for_cmd(struct ipmi_user *user, goto out_unlock; } - if (atomic_inc_return(&intf->event_waiters) == 1) - need_waiter(intf); + smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS); list_add_rcu(&rcvr->link, &intf->cmd_rcvrs); @@ -1620,7 +1696,7 @@ int ipmi_unregister_for_cmd(struct ipmi_user *user, synchronize_rcu(); release_ipmi_user(user, index); while (rcvrs) { - atomic_dec(&intf->event_waiters); + smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS); rcvr = rcvrs; rcvrs = rcvr->next; kfree(rcvr); @@ -1737,22 +1813,19 @@ static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf, return smi_msg; } - static void smi_send(struct ipmi_smi *intf, const struct ipmi_smi_handlers *handlers, struct ipmi_smi_msg *smi_msg, int priority) { int run_to_completion = intf->run_to_completion; + unsigned long flags = 0; - if (run_to_completion) { - smi_msg = smi_add_send_msg(intf, smi_msg, priority); - } else { - unsigned long flags; - + if (!run_to_completion) spin_lock_irqsave(&intf->xmit_msgs_lock, flags); - smi_msg = smi_add_send_msg(intf, smi_msg, priority); + smi_msg = smi_add_send_msg(intf, smi_msg, priority); + + if (!run_to_completion) spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); - } if (smi_msg) handlers->sender(intf->send_info, smi_msg); @@ -2676,7 +2749,7 @@ static ssize_t guid_show(struct device *dev, struct device_attribute *attr, if (!guid_set) return -ENOENT; - return snprintf(buf, 38, "%pUl\n", guid.b); + return snprintf(buf, UUID_STRING_LEN + 1 + 1, "%pUl\n", &guid); } static DEVICE_ATTR_RO(guid); @@ -3075,15 +3148,15 @@ static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) goto out; } - if (msg->msg.data_len < 17) { + if (msg->msg.data_len < UUID_SIZE + 1) { bmc->dyn_guid_set = 0; dev_warn(intf->si_dev, - "The GUID response from the BMC was too short, it was %d but should have been 17. Assuming GUID is not available.\n", - msg->msg.data_len); + "The GUID response from the BMC was too short, it was %d but should have been %d. Assuming GUID is not available.\n", + msg->msg.data_len, UUID_SIZE + 1); goto out; } - memcpy(bmc->fetch_guid.b, msg->msg.data + 1, 16); + guid_copy(&bmc->fetch_guid, (guid_t *)(msg->msg.data + 1)); /* * Make sure the guid data is available before setting * dyn_guid_set. @@ -3350,6 +3423,7 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers, INIT_LIST_HEAD(&intf->xmit_msgs); INIT_LIST_HEAD(&intf->hp_xmit_msgs); spin_lock_init(&intf->events_lock); + spin_lock_init(&intf->watch_lock); atomic_set(&intf->event_waiters, 0); intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; INIT_LIST_HEAD(&intf->waiting_events); @@ -4365,6 +4439,7 @@ static void smi_recv_tasklet(unsigned long val) intf->curr_msg = newmsg; } } + if (!run_to_completion) spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); if (newmsg) @@ -4492,7 +4567,7 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent, struct list_head *timeouts, unsigned long timeout_period, int slot, unsigned long *flags, - unsigned int *waiting_msgs) + bool *need_timer) { struct ipmi_recv_msg *msg; @@ -4504,13 +4579,14 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent, if (timeout_period < ent->timeout) { ent->timeout -= timeout_period; - (*waiting_msgs)++; + *need_timer = true; return; } if (ent->retries_left == 0) { /* The message has used all its retries. */ ent->inuse = 0; + smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); msg = ent->recv_msg; list_add_tail(&msg->link, timeouts); if (ent->broadcast) @@ -4523,7 +4599,7 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent, struct ipmi_smi_msg *smi_msg; /* More retries, send again. */ - (*waiting_msgs)++; + *need_timer = true; /* * Start with the max timer, set to normal timer after @@ -4568,20 +4644,20 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent, } } -static unsigned int ipmi_timeout_handler(struct ipmi_smi *intf, - unsigned long timeout_period) +static bool ipmi_timeout_handler(struct ipmi_smi *intf, + unsigned long timeout_period) { struct list_head timeouts; struct ipmi_recv_msg *msg, *msg2; unsigned long flags; int i; - unsigned int waiting_msgs = 0; + bool need_timer = false; if (!intf->bmc_registered) { kref_get(&intf->refcount); if (!schedule_work(&intf->bmc_reg_work)) { kref_put(&intf->refcount, intf_free); - waiting_msgs++; + need_timer = true; } } @@ -4601,7 +4677,7 @@ static unsigned int ipmi_timeout_handler(struct ipmi_smi *intf, for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) check_msg_timeout(intf, &intf->seq_table[i], &timeouts, timeout_period, i, - &flags, &waiting_msgs); + &flags, &need_timer); spin_unlock_irqrestore(&intf->seq_lock, flags); list_for_each_entry_safe(msg, msg2, &timeouts, link) @@ -4632,7 +4708,7 @@ static unsigned int ipmi_timeout_handler(struct ipmi_smi *intf, tasklet_schedule(&intf->recv_tasklet); - return waiting_msgs; + return need_timer; } static void ipmi_request_event(struct ipmi_smi *intf) @@ -4652,37 +4728,28 @@ static atomic_t stop_operation; static void ipmi_timeout(struct timer_list *unused) { struct ipmi_smi *intf; - int nt = 0, index; + bool need_timer = false; + int index; if (atomic_read(&stop_operation)) return; index = srcu_read_lock(&ipmi_interfaces_srcu); list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { - int lnt = 0; - if (atomic_read(&intf->event_waiters)) { intf->ticks_to_req_ev--; if (intf->ticks_to_req_ev == 0) { ipmi_request_event(intf); intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; } - lnt++; + need_timer = true; } - lnt += ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME); - - lnt = !!lnt; - if (lnt != intf->last_needs_timer && - intf->handlers->set_need_watch) - intf->handlers->set_need_watch(intf->send_info, lnt); - intf->last_needs_timer = lnt; - - nt += lnt; + need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME); } srcu_read_unlock(&ipmi_interfaces_srcu, index); - if (nt) + if (need_timer) mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); } diff --git a/drivers/char/ipmi/ipmi_plat_data.c b/drivers/char/ipmi/ipmi_plat_data.c new file mode 100644 index 000000000000..8f0ca2a848eb --- /dev/null +++ b/drivers/char/ipmi/ipmi_plat_data.c @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/* + * Add an IPMI platform device. + */ + +#include +#include "ipmi_plat_data.h" +#include "ipmi_si.h" + +struct platform_device *ipmi_platform_add(const char *name, unsigned int inst, + struct ipmi_plat_data *p) +{ + struct platform_device *pdev; + unsigned int num_r = 1, size, pidx = 0; + struct resource r[4]; + struct property_entry pr[6]; + u32 flags; + int rv; + + memset(pr, 0, sizeof(pr)); + memset(r, 0, sizeof(r)); + + if (p->type == SI_BT) + size = 3; + else if (p->type == SI_TYPE_INVALID) + size = 0; + else + size = 2; + + if (p->regsize == 0) + p->regsize = DEFAULT_REGSIZE; + if (p->regspacing == 0) + p->regspacing = p->regsize; + + pr[pidx++] = PROPERTY_ENTRY_U8("ipmi-type", p->type); + if (p->slave_addr) + pr[pidx++] = PROPERTY_ENTRY_U8("slave-addr", p->slave_addr); + pr[pidx++] = PROPERTY_ENTRY_U8("addr-source", p->addr_source); + if (p->regshift) + pr[pidx++] = PROPERTY_ENTRY_U8("reg-shift", p->regshift); + pr[pidx++] = PROPERTY_ENTRY_U8("reg-size", p->regsize); + /* Last entry must be left NULL to terminate it. */ + + pdev = platform_device_alloc(name, inst); + if (!pdev) { + pr_err("Error allocating IPMI platform device %s.%d\n", + name, inst); + return NULL; + } + + if (size == 0) + /* An invalid or SSIF interface, no resources. */ + goto add_properties; + + /* + * Register spacing is derived from the resources in + * the IPMI platform code. + */ + + if (p->space == IPMI_IO_ADDR_SPACE) + flags = IORESOURCE_IO; + else + flags = IORESOURCE_MEM; + + r[0].start = p->addr; + r[0].end = r[0].start + p->regsize - 1; + r[0].name = "IPMI Address 1"; + r[0].flags = flags; + + if (size > 1) { + r[1].start = r[0].start + p->regspacing; + r[1].end = r[1].start + p->regsize - 1; + r[1].name = "IPMI Address 2"; + r[1].flags = flags; + num_r++; + } + + if (size > 2) { + r[2].start = r[1].start + p->regspacing; + r[2].end = r[2].start + p->regsize - 1; + r[2].name = "IPMI Address 3"; + r[2].flags = flags; + num_r++; + } + + if (p->irq) { + r[num_r].start = p->irq; + r[num_r].end = p->irq; + r[num_r].name = "IPMI IRQ"; + r[num_r].flags = IORESOURCE_IRQ; + num_r++; + } + + rv = platform_device_add_resources(pdev, r, num_r); + if (rv) { + dev_err(&pdev->dev, + "Unable to add hard-code resources: %d\n", rv); + goto err; + } + add_properties: + rv = platform_device_add_properties(pdev, pr); + if (rv) { + dev_err(&pdev->dev, + "Unable to add hard-code properties: %d\n", rv); + goto err; + } + + rv = platform_device_add(pdev); + if (rv) { + dev_err(&pdev->dev, + "Unable to add hard-code device: %d\n", rv); + goto err; + } + return pdev; + +err: + platform_device_put(pdev); + return NULL; +} +EXPORT_SYMBOL(ipmi_platform_add); diff --git a/drivers/char/ipmi/ipmi_plat_data.h b/drivers/char/ipmi/ipmi_plat_data.h new file mode 100644 index 000000000000..567cfcec8ada --- /dev/null +++ b/drivers/char/ipmi/ipmi_plat_data.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +/* + * Generic code to add IPMI platform devices. + */ + +#include + +struct ipmi_plat_data { + unsigned int type; /* si_type for si, SI_INVALID for others */ + unsigned int space; /* addr_space for si, intf# for ssif. */ + unsigned long addr; + unsigned int regspacing; + unsigned int regsize; + unsigned int regshift; + unsigned int irq; + unsigned int slave_addr; + enum ipmi_addr_src addr_source; +}; + +struct platform_device *ipmi_platform_add(const char *name, unsigned int inst, + struct ipmi_plat_data *p); diff --git a/drivers/char/ipmi/ipmi_si.h b/drivers/char/ipmi/ipmi_si.h index 52f6152d1fcb..357a229c9012 100644 --- a/drivers/char/ipmi/ipmi_si.h +++ b/drivers/char/ipmi/ipmi_si.h @@ -7,11 +7,9 @@ */ #include +#include #include "ipmi_si_sm.h" -#define IPMI_IO_ADDR_SPACE 0 -#define IPMI_MEM_ADDR_SPACE 1 - #define DEFAULT_REGSPACING 1 #define DEFAULT_REGSIZE 1 @@ -23,11 +21,15 @@ void ipmi_irq_start_cleanup(struct si_sm_io *io); int ipmi_std_irq_setup(struct si_sm_io *io); void ipmi_irq_finish_setup(struct si_sm_io *io); int ipmi_si_remove_by_dev(struct device *dev); -void ipmi_si_remove_by_data(int addr_space, enum si_type si_type, - unsigned long addr); -int ipmi_si_hardcode_find_bmc(void); +struct device *ipmi_si_remove_by_data(int addr_space, enum si_type si_type, + unsigned long addr); +void ipmi_hardcode_init(void); +void ipmi_si_hardcode_exit(void); +void ipmi_si_hotmod_exit(void); +int ipmi_si_hardcode_match(int addr_space, unsigned long addr); void ipmi_si_platform_init(void); void ipmi_si_platform_shutdown(void); +void ipmi_remove_platform_device_by_name(char *name); extern struct platform_driver ipmi_platform_driver; diff --git a/drivers/char/ipmi/ipmi_si_hardcode.c b/drivers/char/ipmi/ipmi_si_hardcode.c index 487642809c58..01946cad3d13 100644 --- a/drivers/char/ipmi/ipmi_si_hardcode.c +++ b/drivers/char/ipmi/ipmi_si_hardcode.c @@ -3,7 +3,9 @@ #define pr_fmt(fmt) "ipmi_hardcode: " fmt #include +#include #include "ipmi_si.h" +#include "ipmi_plat_data.h" /* * There can be 4 IO ports passed in (with or without IRQs), 4 addresses, @@ -12,23 +14,22 @@ #define SI_MAX_PARMS 4 -static char *si_type[SI_MAX_PARMS]; #define MAX_SI_TYPE_STR 30 -static char si_type_str[MAX_SI_TYPE_STR]; +static char si_type_str[MAX_SI_TYPE_STR] __initdata; static unsigned long addrs[SI_MAX_PARMS]; static unsigned int num_addrs; static unsigned int ports[SI_MAX_PARMS]; static unsigned int num_ports; -static int irqs[SI_MAX_PARMS]; -static unsigned int num_irqs; -static int regspacings[SI_MAX_PARMS]; -static unsigned int num_regspacings; -static int regsizes[SI_MAX_PARMS]; -static unsigned int num_regsizes; -static int regshifts[SI_MAX_PARMS]; -static unsigned int num_regshifts; -static int slave_addrs[SI_MAX_PARMS]; /* Leaving 0 chooses the default value */ -static unsigned int num_slave_addrs; +static int irqs[SI_MAX_PARMS] __initdata; +static unsigned int num_irqs __initdata; +static int regspacings[SI_MAX_PARMS] __initdata; +static unsigned int num_regspacings __initdata; +static int regsizes[SI_MAX_PARMS] __initdata; +static unsigned int num_regsizes __initdata; +static int regshifts[SI_MAX_PARMS] __initdata; +static unsigned int num_regshifts __initdata; +static int slave_addrs[SI_MAX_PARMS] __initdata; +static unsigned int num_slave_addrs __initdata; module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0); MODULE_PARM_DESC(type, "Defines the type of each interface, each" @@ -73,12 +74,49 @@ MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for" " overridden by this parm. This is an array indexed" " by interface number."); -int ipmi_si_hardcode_find_bmc(void) +static void __init ipmi_hardcode_init_one(const char *si_type_str, + unsigned int i, + unsigned long addr, + enum ipmi_addr_space addr_space) { - int ret = -ENODEV; - int i; - struct si_sm_io io; + struct ipmi_plat_data p; + + memset(&p, 0, sizeof(p)); + + if (!si_type_str || !*si_type_str || strcmp(si_type_str, "kcs") == 0) { + p.type = SI_KCS; + } else if (strcmp(si_type_str, "smic") == 0) { + p.type = SI_SMIC; + } else if (strcmp(si_type_str, "bt") == 0) { + p.type = SI_BT; + } else if (strcmp(si_type_str, "invalid") == 0) { + /* + * Allow a firmware-specified interface to be + * disabled. + */ + p.type = SI_TYPE_INVALID; + } else { + pr_warn("Interface type specified for interface %d, was invalid: %s\n", + i, si_type_str); + return; + } + + p.regsize = regsizes[i]; + p.slave_addr = slave_addrs[i]; + p.addr_source = SI_HARDCODED; + p.regshift = regshifts[i]; + p.regsize = regsizes[i]; + p.addr = addr; + p.space = addr_space; + + ipmi_platform_add("hardcode-ipmi-si", i, &p); +} + +void __init ipmi_hardcode_init(void) +{ + unsigned int i; char *str; + char *si_type[SI_MAX_PARMS]; /* Parse out the si_type string into its components. */ str = si_type_str; @@ -95,54 +133,41 @@ int ipmi_si_hardcode_find_bmc(void) } } - memset(&io, 0, sizeof(io)); for (i = 0; i < SI_MAX_PARMS; i++) { - if (!ports[i] && !addrs[i]) - continue; - - io.addr_source = SI_HARDCODED; - pr_info("probing via hardcoded address\n"); - - if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) { - io.si_type = SI_KCS; - } else if (strcmp(si_type[i], "smic") == 0) { - io.si_type = SI_SMIC; - } else if (strcmp(si_type[i], "bt") == 0) { - io.si_type = SI_BT; - } else { - pr_warn("Interface type specified for interface %d, was invalid: %s\n", - i, si_type[i]); - continue; - } + if (i < num_ports && ports[i]) + ipmi_hardcode_init_one(si_type[i], i, ports[i], + IPMI_IO_ADDR_SPACE); + if (i < num_addrs && addrs[i]) + ipmi_hardcode_init_one(si_type[i], i, addrs[i], + IPMI_MEM_ADDR_SPACE); + } +} - if (ports[i]) { - /* An I/O port */ - io.addr_data = ports[i]; - io.addr_type = IPMI_IO_ADDR_SPACE; - } else if (addrs[i]) { - /* A memory port */ - io.addr_data = addrs[i]; - io.addr_type = IPMI_MEM_ADDR_SPACE; - } else { - pr_warn("Interface type specified for interface %d, but port and address were not set or set to zero\n", - i); - continue; - } - io.addr = NULL; - io.regspacing = regspacings[i]; - if (!io.regspacing) - io.regspacing = DEFAULT_REGSPACING; - io.regsize = regsizes[i]; - if (!io.regsize) - io.regsize = DEFAULT_REGSIZE; - io.regshift = regshifts[i]; - io.irq = irqs[i]; - if (io.irq) - io.irq_setup = ipmi_std_irq_setup; - io.slave_addr = slave_addrs[i]; - - ret = ipmi_si_add_smi(&io); +void ipmi_si_hardcode_exit(void) +{ + ipmi_remove_platform_device_by_name("hardcode-ipmi-si"); +} + +/* + * Returns true of the given address exists as a hardcoded address, + * false if not. + */ +int ipmi_si_hardcode_match(int addr_space, unsigned long addr) +{ + unsigned int i; + + if (addr_space == IPMI_IO_ADDR_SPACE) { + for (i = 0; i < num_ports; i++) { + if (ports[i] == addr) + return 1; + } + } else { + for (i = 0; i < num_addrs; i++) { + if (addrs[i] == addr) + return 1; + } } - return ret; + + return 0; } diff --git a/drivers/char/ipmi/ipmi_si_hotmod.c b/drivers/char/ipmi/ipmi_si_hotmod.c index c0067fd0480d..03140f6cdf6f 100644 --- a/drivers/char/ipmi/ipmi_si_hotmod.c +++ b/drivers/char/ipmi/ipmi_si_hotmod.c @@ -10,7 +10,9 @@ #include #include +#include #include "ipmi_si.h" +#include "ipmi_plat_data.h" static int hotmod_handler(const char *val, const struct kernel_param *kp); @@ -54,8 +56,8 @@ static const struct hotmod_vals hotmod_as[] = { { NULL } }; -static int parse_str(const struct hotmod_vals *v, int *val, char *name, - char **curr) +static int parse_str(const struct hotmod_vals *v, unsigned int *val, char *name, + const char **curr) { char *s; int i; @@ -80,7 +82,7 @@ static int parse_str(const struct hotmod_vals *v, int *val, char *name, } static int check_hotmod_int_op(const char *curr, const char *option, - const char *name, int *val) + const char *name, unsigned int *val) { char *n; @@ -99,22 +101,94 @@ static int check_hotmod_int_op(const char *curr, const char *option, return 0; } +static int parse_hotmod_str(const char *curr, enum hotmod_op *op, + struct ipmi_plat_data *h) +{ + char *s, *o; + int rv; + unsigned int ival; + + rv = parse_str(hotmod_ops, &ival, "operation", &curr); + if (rv) + return rv; + *op = ival; + + rv = parse_str(hotmod_si, &ival, "interface type", &curr); + if (rv) + return rv; + h->type = ival; + + rv = parse_str(hotmod_as, &ival, "address space", &curr); + if (rv) + return rv; + h->space = ival; + + s = strchr(curr, ','); + if (s) { + *s = '\0'; + s++; + } + rv = kstrtoul(curr, 0, &h->addr); + if (rv) { + pr_warn("Invalid hotmod address '%s': %d\n", curr, rv); + return rv; + } + + while (s) { + curr = s; + s = strchr(curr, ','); + if (s) { + *s = '\0'; + s++; + } + o = strchr(curr, '='); + if (o) { + *o = '\0'; + o++; + } + rv = check_hotmod_int_op(curr, o, "rsp", &h->regspacing); + if (rv < 0) + return rv; + else if (rv) + continue; + rv = check_hotmod_int_op(curr, o, "rsi", &h->regsize); + if (rv < 0) + return rv; + else if (rv) + continue; + rv = check_hotmod_int_op(curr, o, "rsh", &h->regshift); + if (rv < 0) + return rv; + else if (rv) + continue; + rv = check_hotmod_int_op(curr, o, "irq", &h->irq); + if (rv < 0) + return rv; + else if (rv) + continue; + rv = check_hotmod_int_op(curr, o, "ipmb", &h->slave_addr); + if (rv < 0) + return rv; + else if (rv) + continue; + + pr_warn("Invalid hotmod option '%s'\n", curr); + return -EINVAL; + } + + h->addr_source = SI_HOTMOD; + return 0; +} + +static atomic_t hotmod_nr; + static int hotmod_handler(const char *val, const struct kernel_param *kp) { - char *str = kstrdup(val, GFP_KERNEL); + char *str = kstrdup(val, GFP_KERNEL), *curr, *next; int rv; - char *next, *curr, *s, *n, *o; - enum hotmod_op op; - enum si_type si_type; - int addr_space; - unsigned long addr; - int regspacing; - int regsize; - int regshift; - int irq; - int ipmb; + struct ipmi_plat_data h; + unsigned int len; int ival; - int len; if (!str) return -ENOMEM; @@ -128,11 +202,7 @@ static int hotmod_handler(const char *val, const struct kernel_param *kp) } for (curr = str; curr; curr = next) { - regspacing = 1; - regsize = 1; - regshift = 0; - irq = 0; - ipmb = 0; /* Choose the default if not specified */ + enum hotmod_op op; next = strchr(curr, ':'); if (next) { @@ -140,101 +210,28 @@ static int hotmod_handler(const char *val, const struct kernel_param *kp) next++; } - rv = parse_str(hotmod_ops, &ival, "operation", &curr); - if (rv) - break; - op = ival; - - rv = parse_str(hotmod_si, &ival, "interface type", &curr); + memset(&h, 0, sizeof(h)); + rv = parse_hotmod_str(curr, &op, &h); if (rv) - break; - si_type = ival; - - rv = parse_str(hotmod_as, &addr_space, "address space", &curr); - if (rv) - break; - - s = strchr(curr, ','); - if (s) { - *s = '\0'; - s++; - } - addr = simple_strtoul(curr, &n, 0); - if ((*n != '\0') || (*curr == '\0')) { - pr_warn("Invalid hotmod address '%s'\n", curr); - break; - } - - while (s) { - curr = s; - s = strchr(curr, ','); - if (s) { - *s = '\0'; - s++; - } - o = strchr(curr, '='); - if (o) { - *o = '\0'; - o++; - } - rv = check_hotmod_int_op(curr, o, "rsp", ®spacing); - if (rv < 0) - goto out; - else if (rv) - continue; - rv = check_hotmod_int_op(curr, o, "rsi", ®size); - if (rv < 0) - goto out; - else if (rv) - continue; - rv = check_hotmod_int_op(curr, o, "rsh", ®shift); - if (rv < 0) - goto out; - else if (rv) - continue; - rv = check_hotmod_int_op(curr, o, "irq", &irq); - if (rv < 0) - goto out; - else if (rv) - continue; - rv = check_hotmod_int_op(curr, o, "ipmb", &ipmb); - if (rv < 0) - goto out; - else if (rv) - continue; - - rv = -EINVAL; - pr_warn("Invalid hotmod option '%s'\n", curr); goto out; - } if (op == HM_ADD) { - struct si_sm_io io; - - memset(&io, 0, sizeof(io)); - io.addr_source = SI_HOTMOD; - io.si_type = si_type; - io.addr_data = addr; - io.addr_type = addr_space; - - io.addr = NULL; - io.regspacing = regspacing; - if (!io.regspacing) - io.regspacing = DEFAULT_REGSPACING; - io.regsize = regsize; - if (!io.regsize) - io.regsize = DEFAULT_REGSIZE; - io.regshift = regshift; - io.irq = irq; - if (io.irq) - io.irq_setup = ipmi_std_irq_setup; - io.slave_addr = ipmb; - - rv = ipmi_si_add_smi(&io); - if (rv) - goto out; + ipmi_platform_add("hotmod-ipmi-si", + atomic_inc_return(&hotmod_nr), + &h); } else { - ipmi_si_remove_by_data(addr_space, si_type, addr); + struct device *dev; + + dev = ipmi_si_remove_by_data(h.space, h.type, h.addr); + if (dev && dev_is_platform(dev)) { + struct platform_device *pdev; + + pdev = to_platform_device(dev); + if (strcmp(pdev->name, "hotmod-ipmi-si") == 0) + platform_device_unregister(pdev); + } + if (dev) + put_device(dev); } } rv = len; @@ -242,3 +239,8 @@ out: kfree(str); return rv; } + +void ipmi_si_hotmod_exit(void) +{ + ipmi_remove_platform_device_by_name("hotmod-ipmi-si"); +} diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index dc8603d34320..b1732882b97e 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -229,15 +229,9 @@ struct smi_info { /* From the get device id response... */ struct ipmi_device_id device_id; - /* Default driver model device. */ - struct platform_device *pdev; - /* Have we added the device group to the device? */ bool dev_group_added; - /* Have we added the platform device? */ - bool pdev_registered; - /* Counters and things for the proc filesystem. */ atomic_t stats[SI_NUM_STATS]; @@ -1060,10 +1054,13 @@ static void request_events(void *send_info) atomic_set(&smi_info->req_events, 1); } -static void set_need_watch(void *send_info, bool enable) +static void set_need_watch(void *send_info, unsigned int watch_mask) { struct smi_info *smi_info = send_info; unsigned long flags; + int enable; + + enable = !!watch_mask; atomic_set(&smi_info->need_watch, enable); spin_lock_irqsave(&smi_info->si_lock, flags); @@ -1642,7 +1639,7 @@ static ssize_t ipmi_params_show(struct device *dev, return snprintf(buf, 200, "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n", si_to_str[smi_info->io.si_type], - addr_space_to_str[smi_info->io.addr_type], + addr_space_to_str[smi_info->io.addr_space], smi_info->io.addr_data, smi_info->io.regspacing, smi_info->io.regsize, @@ -1840,7 +1837,7 @@ static struct smi_info *find_dup_si(struct smi_info *info) struct smi_info *e; list_for_each_entry(e, &smi_infos, link) { - if (e->io.addr_type != info->io.addr_type) + if (e->io.addr_space != info->io.addr_space) continue; if (e->io.addr_data == info->io.addr_data) { /* @@ -1862,10 +1859,22 @@ int ipmi_si_add_smi(struct si_sm_io *io) int rv = 0; struct smi_info *new_smi, *dup; + /* + * If the user gave us a hard-coded device at the same + * address, they presumably want us to use it and not what is + * in the firmware. + */ + if (io->addr_source != SI_HARDCODED && io->addr_source != SI_HOTMOD && + ipmi_si_hardcode_match(io->addr_space, io->addr_data)) { + dev_info(io->dev, + "Hard-coded device at this address already exists"); + return -ENODEV; + } + if (!io->io_setup) { - if (io->addr_type == IPMI_IO_ADDR_SPACE) { + if (io->addr_space == IPMI_IO_ADDR_SPACE) { io->io_setup = ipmi_si_port_setup; - } else if (io->addr_type == IPMI_MEM_ADDR_SPACE) { + } else if (io->addr_space == IPMI_MEM_ADDR_SPACE) { io->io_setup = ipmi_si_mem_setup; } else { return -EINVAL; @@ -1927,7 +1936,7 @@ static int try_smi_init(struct smi_info *new_smi) pr_info("Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n", ipmi_addr_src_to_str(new_smi->io.addr_source), si_to_str[new_smi->io.si_type], - addr_space_to_str[new_smi->io.addr_type], + addr_space_to_str[new_smi->io.addr_space], new_smi->io.addr_data, new_smi->io.slave_addr, new_smi->io.irq); @@ -1954,24 +1963,9 @@ static int try_smi_init(struct smi_info *new_smi) /* Do this early so it's available for logs. */ if (!new_smi->io.dev) { - init_name = kasprintf(GFP_KERNEL, "ipmi_si.%d", - new_smi->si_num); - - /* - * If we don't already have a device from something - * else (like PCI), then register a new one. - */ - new_smi->pdev = platform_device_alloc("ipmi_si", - new_smi->si_num); - if (!new_smi->pdev) { - pr_err("Unable to allocate platform device\n"); - rv = -ENOMEM; - goto out_err; - } - new_smi->io.dev = &new_smi->pdev->dev; - new_smi->io.dev->driver = &ipmi_platform_driver.driver; - /* Nulled by device_add() */ - new_smi->io.dev->init_name = init_name; + pr_err("IPMI interface added with no device\n"); + rv = EIO; + goto out_err; } /* Allocate the state machine's data and initialize it. */ @@ -2044,17 +2038,6 @@ static int try_smi_init(struct smi_info *new_smi) atomic_set(&new_smi->req_events, 1); } - if (new_smi->pdev && !new_smi->pdev_registered) { - rv = platform_device_add(new_smi->pdev); - if (rv) { - dev_err(new_smi->io.dev, - "Unable to register system interface device: %d\n", - rv); - goto out_err; - } - new_smi->pdev_registered = true; - } - dev_set_drvdata(new_smi->io.dev, new_smi); rv = device_add_group(new_smi->io.dev, &ipmi_si_dev_attr_group); if (rv) { @@ -2085,11 +2068,16 @@ static int try_smi_init(struct smi_info *new_smi) WARN_ON(new_smi->io.dev->init_name != NULL); out_err: + if (rv && new_smi->io.io_cleanup) { + new_smi->io.io_cleanup(&new_smi->io); + new_smi->io.io_cleanup = NULL; + } + kfree(init_name); return rv; } -static int init_ipmi_si(void) +static int __init init_ipmi_si(void) { struct smi_info *e; enum ipmi_addr_src type = SI_INVALID; @@ -2097,11 +2085,9 @@ static int init_ipmi_si(void) if (initialized) return 0; - pr_info("IPMI System Interface driver\n"); + ipmi_hardcode_init(); - /* If the user gave us a device, they presumably want us to use it */ - if (!ipmi_si_hardcode_find_bmc()) - goto do_scan; + pr_info("IPMI System Interface driver\n"); ipmi_si_platform_init(); @@ -2113,7 +2099,6 @@ static int init_ipmi_si(void) with multiple BMCs we assume that there will be several instances of a given type so if we succeed in registering a type then also try to register everything else of the same type */ -do_scan: mutex_lock(&smi_infos_lock); list_for_each_entry(e, &smi_infos, link) { /* Try to register a device if it has an IRQ and we either @@ -2236,13 +2221,6 @@ static void cleanup_one_si(struct smi_info *smi_info) if (smi_info->intf) ipmi_unregister_smi(smi_info->intf); - if (smi_info->pdev) { - if (smi_info->pdev_registered) - platform_device_unregister(smi_info->pdev); - else - platform_device_put(smi_info->pdev); - } - kfree(smi_info); } @@ -2264,22 +2242,27 @@ int ipmi_si_remove_by_dev(struct device *dev) return rv; } -void ipmi_si_remove_by_data(int addr_space, enum si_type si_type, - unsigned long addr) +struct device *ipmi_si_remove_by_data(int addr_space, enum si_type si_type, + unsigned long addr) { /* remove */ struct smi_info *e, *tmp_e; + struct device *dev = NULL; mutex_lock(&smi_infos_lock); list_for_each_entry_safe(e, tmp_e, &smi_infos, link) { - if (e->io.addr_type != addr_space) + if (e->io.addr_space != addr_space) continue; if (e->io.si_type != si_type) continue; - if (e->io.addr_data == addr) + if (e->io.addr_data == addr) { + dev = get_device(e->io.dev); cleanup_one_si(e); + } } mutex_unlock(&smi_infos_lock); + + return dev; } static void cleanup_ipmi_si(void) @@ -2299,6 +2282,9 @@ static void cleanup_ipmi_si(void) list_for_each_entry_safe(e, tmp_e, &smi_infos, link) cleanup_one_si(e); mutex_unlock(&smi_infos_lock); + + ipmi_si_hardcode_exit(); + ipmi_si_hotmod_exit(); } module_exit(cleanup_ipmi_si); diff --git a/drivers/char/ipmi/ipmi_si_mem_io.c b/drivers/char/ipmi/ipmi_si_mem_io.c index fd0ec8d6bf0e..75583612ab10 100644 --- a/drivers/char/ipmi/ipmi_si_mem_io.c +++ b/drivers/char/ipmi/ipmi_si_mem_io.c @@ -81,8 +81,6 @@ int ipmi_si_mem_setup(struct si_sm_io *io) if (!addr) return -ENODEV; - io->io_cleanup = mem_cleanup; - /* * Figure out the actual readb/readw/readl/etc routine to use based * upon the register size. @@ -141,5 +139,8 @@ int ipmi_si_mem_setup(struct si_sm_io *io) mem_region_cleanup(io, io->io_size); return -EIO; } + + io->io_cleanup = mem_cleanup; + return 0; } diff --git a/drivers/char/ipmi/ipmi_si_parisc.c b/drivers/char/ipmi/ipmi_si_parisc.c index f3c99820f564..11c9160275df 100644 --- a/drivers/char/ipmi/ipmi_si_parisc.c +++ b/drivers/char/ipmi/ipmi_si_parisc.c @@ -15,7 +15,7 @@ static int __init ipmi_parisc_probe(struct parisc_device *dev) io.si_type = SI_KCS; io.addr_source = SI_DEVICETREE; - io.addr_type = IPMI_MEM_ADDR_SPACE; + io.addr_space = IPMI_MEM_ADDR_SPACE; io.addr_data = dev->hpa.start; io.regsize = 1; io.regspacing = 1; diff --git a/drivers/char/ipmi/ipmi_si_pci.c b/drivers/char/ipmi/ipmi_si_pci.c index ce00c0da5866..ce93fc7a1e36 100644 --- a/drivers/char/ipmi/ipmi_si_pci.c +++ b/drivers/char/ipmi/ipmi_si_pci.c @@ -107,10 +107,10 @@ static int ipmi_pci_probe(struct pci_dev *pdev, io.addr_source_data = pdev; if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) { - io.addr_type = IPMI_IO_ADDR_SPACE; + io.addr_space = IPMI_IO_ADDR_SPACE; io.io_setup = ipmi_si_port_setup; } else { - io.addr_type = IPMI_MEM_ADDR_SPACE; + io.addr_space = IPMI_MEM_ADDR_SPACE; io.io_setup = ipmi_si_mem_setup; } io.addr_data = pci_resource_start(pdev, 0); diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c index 15cf819f884f..54c7ded2a1ff 100644 --- a/drivers/char/ipmi/ipmi_si_platform.c +++ b/drivers/char/ipmi/ipmi_si_platform.c @@ -107,11 +107,11 @@ ipmi_get_info_from_resources(struct platform_device *pdev, res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (res) { - io->addr_type = IPMI_IO_ADDR_SPACE; + io->addr_space = IPMI_IO_ADDR_SPACE; } else { res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res) - io->addr_type = IPMI_MEM_ADDR_SPACE; + io->addr_space = IPMI_MEM_ADDR_SPACE; } if (!res) { dev_err(&pdev->dev, "no I/O or memory address\n"); @@ -121,15 +121,13 @@ ipmi_get_info_from_resources(struct platform_device *pdev, io->regspacing = DEFAULT_REGSPACING; res_second = platform_get_resource(pdev, - (io->addr_type == IPMI_IO_ADDR_SPACE) ? + (io->addr_space == IPMI_IO_ADDR_SPACE) ? IORESOURCE_IO : IORESOURCE_MEM, 1); if (res_second) { if (res_second->start > io->addr_data) io->regspacing = res_second->start - io->addr_data; } - io->regsize = DEFAULT_REGSIZE; - io->regshift = 0; return res; } @@ -137,7 +135,7 @@ ipmi_get_info_from_resources(struct platform_device *pdev, static int platform_ipmi_probe(struct platform_device *pdev) { struct si_sm_io io; - u8 type, slave_addr, addr_source; + u8 type, slave_addr, addr_source, regsize, regshift; int rv; rv = device_property_read_u8(&pdev->dev, "addr-source", &addr_source); @@ -149,7 +147,7 @@ static int platform_ipmi_probe(struct platform_device *pdev) if (addr_source == SI_SMBIOS) { if (!si_trydmi) return -ENODEV; - } else { + } else if (addr_source != SI_HARDCODED) { if (!si_tryplatform) return -ENODEV; } @@ -169,11 +167,23 @@ static int platform_ipmi_probe(struct platform_device *pdev) case SI_BT: io.si_type = type; break; + case SI_TYPE_INVALID: /* User disabled this in hardcode. */ + return -ENODEV; default: dev_err(&pdev->dev, "ipmi-type property is invalid\n"); return -EINVAL; } + io.regsize = DEFAULT_REGSIZE; + rv = device_property_read_u8(&pdev->dev, "reg-size", ®size); + if (!rv) + io.regsize = regsize; + + io.regshift = 0; + rv = device_property_read_u8(&pdev->dev, "reg-shift", ®shift); + if (!rv) + io.regshift = regshift; + if (!ipmi_get_info_from_resources(pdev, &io)) return -EINVAL; @@ -193,8 +203,9 @@ static int platform_ipmi_probe(struct platform_device *pdev) io.dev = &pdev->dev; - pr_info("ipmi_si: SMBIOS: %s %#lx regsize %d spacing %d irq %d\n", - (io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem", + pr_info("ipmi_si: %s: %s %#lx regsize %d spacing %d irq %d\n", + ipmi_addr_src_to_str(addr_source), + (io.addr_space == IPMI_IO_ADDR_SPACE) ? "io" : "mem", io.addr_data, io.regsize, io.regspacing, io.irq); ipmi_si_add_smi(&io); @@ -266,9 +277,9 @@ static int of_ipmi_probe(struct platform_device *pdev) io.irq_setup = ipmi_std_irq_setup; if (resource.flags & IORESOURCE_IO) - io.addr_type = IPMI_IO_ADDR_SPACE; + io.addr_space = IPMI_IO_ADDR_SPACE; else - io.addr_type = IPMI_MEM_ADDR_SPACE; + io.addr_space = IPMI_MEM_ADDR_SPACE; io.addr_data = resource.start; @@ -296,15 +307,10 @@ static int of_ipmi_probe(struct platform_device *dev) static int find_slave_address(struct si_sm_io *io, int slave_addr) { #ifdef CONFIG_IPMI_DMI_DECODE - if (!slave_addr) { - u32 flags = IORESOURCE_IO; - - if (io->addr_type == IPMI_MEM_ADDR_SPACE) - flags = IORESOURCE_MEM; - - slave_addr = ipmi_dmi_get_slave_addr(io->si_type, flags, + if (!slave_addr) + slave_addr = ipmi_dmi_get_slave_addr(io->si_type, + io->addr_space, io->addr_data); - } #endif return slave_addr; @@ -358,6 +364,9 @@ static int acpi_ipmi_probe(struct platform_device *pdev) goto err_free; } + io.regsize = DEFAULT_REGSIZE; + io.regshift = 0; + res = ipmi_get_info_from_resources(pdev, &io); if (!res) { rv = -EINVAL; @@ -419,9 +428,31 @@ static int ipmi_remove(struct platform_device *pdev) return ipmi_si_remove_by_dev(&pdev->dev); } +static int pdev_match_name(struct device *dev, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + const char *name = data; + + return strcmp(pdev->name, name) == 0; +} + +void ipmi_remove_platform_device_by_name(char *name) +{ + struct device *dev; + + while ((dev = bus_find_device(&platform_bus_type, NULL, name, + pdev_match_name))) { + struct platform_device *pdev = to_platform_device(dev); + + platform_device_unregister(pdev); + } +} + static const struct platform_device_id si_plat_ids[] = { - { "dmi-ipmi-si", 0 }, - { } + { "dmi-ipmi-si", 0 }, + { "hardcode-ipmi-si", 0 }, + { "hotmod-ipmi-si", 0 }, + { } }; struct platform_driver ipmi_platform_driver = { diff --git a/drivers/char/ipmi/ipmi_si_port_io.c b/drivers/char/ipmi/ipmi_si_port_io.c index ef6dffcea9fa..03924c32b6e9 100644 --- a/drivers/char/ipmi/ipmi_si_port_io.c +++ b/drivers/char/ipmi/ipmi_si_port_io.c @@ -68,8 +68,6 @@ int ipmi_si_port_setup(struct si_sm_io *io) if (!addr) return -ENODEV; - io->io_cleanup = port_cleanup; - /* * Figure out the actual inb/inw/inl/etc routine to use based * upon the register size. @@ -109,5 +107,8 @@ int ipmi_si_port_setup(struct si_sm_io *io) return -EIO; } } + + io->io_cleanup = port_cleanup; + return 0; } diff --git a/drivers/char/ipmi/ipmi_si_sm.h b/drivers/char/ipmi/ipmi_si_sm.h index aaddf047d923..499db820fadb 100644 --- a/drivers/char/ipmi/ipmi_si_sm.h +++ b/drivers/char/ipmi/ipmi_si_sm.h @@ -26,6 +26,10 @@ enum si_type { SI_TYPE_INVALID, SI_KCS, SI_SMIC, SI_BT }; +enum ipmi_addr_space { + IPMI_IO_ADDR_SPACE, IPMI_MEM_ADDR_SPACE +}; + /* * The structure for doing I/O in the state machine. The state * machine doesn't have the actual I/O routines, they are done through @@ -42,11 +46,11 @@ struct si_sm_io { * state machine shouldn't touch these. */ void __iomem *addr; - int regspacing; - int regsize; - int regshift; - int addr_type; - long addr_data; + unsigned int regspacing; + unsigned int regsize; + unsigned int regshift; + enum ipmi_addr_space addr_space; + unsigned long addr_data; enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */ void (*addr_source_cleanup)(struct si_sm_io *io); void *addr_source_data; diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index b7a1ae2afaea..8b5aec5430f1 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -28,6 +28,7 @@ */ #define pr_fmt(fmt) "ipmi_ssif: " fmt +#define dev_fmt(fmt) "ipmi_ssif: " fmt #if defined(MODVERSIONS) #include @@ -90,6 +91,12 @@ #define SSIF_MSG_JIFFIES ((SSIF_MSG_USEC * 1000) / TICK_NSEC) #define SSIF_MSG_PART_JIFFIES ((SSIF_MSG_PART_USEC * 1000) / TICK_NSEC) +/* + * Timeout for the watch, only used for get flag timer. + */ +#define SSIF_WATCH_MSG_TIMEOUT msecs_to_jiffies(10) +#define SSIF_WATCH_WATCHDOG_TIMEOUT msecs_to_jiffies(250) + enum ssif_intf_state { SSIF_NORMAL, SSIF_GETTING_FLAGS, @@ -270,6 +277,9 @@ struct ssif_info { struct timer_list retry_timer; int retries_left; + long watch_timeout; /* Timeout for flags check, 0 if off. */ + struct timer_list watch_timer; /* Flag fetch timer. */ + /* Info from SSIF cmd */ unsigned char max_xmit_msg_size; unsigned char max_recv_msg_size; @@ -319,7 +329,8 @@ static void deliver_recv_msg(struct ssif_info *ssif_info, { if (msg->rsp_size < 0) { return_hosed_msg(ssif_info, msg); - pr_err("%s: Malformed message: rsp_size = %d\n", + dev_err(&ssif_info->client->dev, + "%s: Malformed message: rsp_size = %d\n", __func__, msg->rsp_size); } else { ipmi_smi_msg_received(ssif_info->intf, msg); @@ -536,7 +547,8 @@ static void start_get(struct ssif_info *ssif_info) if (rv < 0) { /* request failed, just return the error. */ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) - pr_info("Error from i2c_non_blocking_op(5)\n"); + dev_dbg(&ssif_info->client->dev, + "Error from i2c_non_blocking_op(5)\n"); msg_done_handler(ssif_info, -EIO, NULL, 0); } @@ -560,6 +572,26 @@ static void retry_timeout(struct timer_list *t) start_get(ssif_info); } +static void watch_timeout(struct timer_list *t) +{ + struct ssif_info *ssif_info = from_timer(ssif_info, t, watch_timer); + unsigned long oflags, *flags; + + if (ssif_info->stopping) + return; + + flags = ipmi_ssif_lock_cond(ssif_info, &oflags); + if (ssif_info->watch_timeout) { + mod_timer(&ssif_info->watch_timer, + jiffies + ssif_info->watch_timeout); + if (SSIF_IDLE(ssif_info)) { + start_flag_fetch(ssif_info, flags); /* Releases lock */ + return; + } + ssif_info->req_flags = true; + } + ipmi_ssif_unlock_cond(ssif_info, flags); +} static void ssif_alert(struct i2c_client *client, enum i2c_alert_protocol type, unsigned int data) @@ -618,7 +650,8 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, ssif_inc_stat(ssif_info, receive_errors); if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) - pr_info("Error in msg_done_handler: %d\n", result); + dev_dbg(&ssif_info->client->dev, + "%s: Error %d\n", __func__, result); len = 0; goto continue_op; } @@ -643,7 +676,8 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, ssif_info->recv, I2C_SMBUS_BLOCK_DATA); if (rv < 0) { if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) - pr_info("Error from i2c_non_blocking_op(1)\n"); + dev_dbg(&ssif_info->client->dev, + "Error from i2c_non_blocking_op(1)\n"); result = -EIO; } else @@ -656,7 +690,8 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, if (len == 0) { result = -EIO; if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) - pr_info("Middle message with no data\n"); + dev_dbg(&ssif_info->client->dev, + "Middle message with no data\n"); goto continue_op; } @@ -669,7 +704,8 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, /* All blocks but the last must have 31 data bytes. */ result = -EIO; if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) - pr_info("Received middle message <31\n"); + dev_dbg(&ssif_info->client->dev, + "Received middle message <31\n"); goto continue_op; } @@ -678,7 +714,8 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, /* Received message too big, abort the operation. */ result = -E2BIG; if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) - pr_info("Received message too big\n"); + dev_dbg(&ssif_info->client->dev, + "Received message too big\n"); goto continue_op; } @@ -709,7 +746,8 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, I2C_SMBUS_BLOCK_DATA); if (rv < 0) { if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) - pr_info("Error from ssif_i2c_send\n"); + dev_dbg(&ssif_info->client->dev, + "Error from ssif_i2c_send\n"); result = -EIO; } else @@ -726,7 +764,8 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, } if (ssif_info->ssif_debug & SSIF_DEBUG_STATE) - pr_info("DONE 1: state = %d, result=%d\n", + dev_dbg(&ssif_info->client->dev, + "DONE 1: state = %d, result=%d\n", ssif_info->ssif_state, result); flags = ipmi_ssif_lock_cond(ssif_info, &oflags); @@ -760,8 +799,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, */ ssif_info->ssif_state = SSIF_NORMAL; ipmi_ssif_unlock_cond(ssif_info, flags); - pr_warn("Error getting flags: %d %d, %x\n", - result, len, (len >= 3) ? data[2] : 0); + dev_warn(&ssif_info->client->dev, + "Error getting flags: %d %d, %x\n", + result, len, (len >= 3) ? data[2] : 0); } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || data[1] != IPMI_GET_MSG_FLAGS_CMD) { /* @@ -769,8 +809,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, * response to a previous command. */ ipmi_ssif_unlock_cond(ssif_info, flags); - pr_warn("Invalid response getting flags: %x %x\n", - data[0], data[1]); + dev_warn(&ssif_info->client->dev, + "Invalid response getting flags: %x %x\n", + data[0], data[1]); } else { ssif_inc_stat(ssif_info, flag_fetches); ssif_info->msg_flags = data[3]; @@ -782,12 +823,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, /* We cleared the flags. */ if ((result < 0) || (len < 3) || (data[2] != 0)) { /* Error clearing flags */ - pr_warn("Error clearing flags: %d %d, %x\n", - result, len, (len >= 3) ? data[2] : 0); + dev_warn(&ssif_info->client->dev, + "Error clearing flags: %d %d, %x\n", + result, len, (len >= 3) ? data[2] : 0); } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || data[1] != IPMI_CLEAR_MSG_FLAGS_CMD) { - pr_warn("Invalid response clearing flags: %x %x\n", - data[0], data[1]); + dev_warn(&ssif_info->client->dev, + "Invalid response clearing flags: %x %x\n", + data[0], data[1]); } ssif_info->ssif_state = SSIF_NORMAL; ipmi_ssif_unlock_cond(ssif_info, flags); @@ -803,8 +846,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, handle_flags(ssif_info, flags); } else if (msg->rsp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || msg->rsp[1] != IPMI_READ_EVENT_MSG_BUFFER_CMD) { - pr_warn("Invalid response getting events: %x %x\n", - msg->rsp[0], msg->rsp[1]); + dev_warn(&ssif_info->client->dev, + "Invalid response getting events: %x %x\n", + msg->rsp[0], msg->rsp[1]); msg->done(msg); /* Take off the event flag. */ ssif_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL; @@ -826,8 +870,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, handle_flags(ssif_info, flags); } else if (msg->rsp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || msg->rsp[1] != IPMI_GET_MSG_CMD) { - pr_warn("Invalid response clearing flags: %x %x\n", - msg->rsp[0], msg->rsp[1]); + dev_warn(&ssif_info->client->dev, + "Invalid response clearing flags: %x %x\n", + msg->rsp[0], msg->rsp[1]); msg->done(msg); /* Take off the msg flag. */ @@ -853,7 +898,8 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, ipmi_ssif_unlock_cond(ssif_info, flags); if (ssif_info->ssif_debug & SSIF_DEBUG_STATE) - pr_info("DONE 2: state = %d.\n", ssif_info->ssif_state); + dev_dbg(&ssif_info->client->dev, + "DONE 2: state = %d.\n", ssif_info->ssif_state); } static void msg_written_handler(struct ssif_info *ssif_info, int result, @@ -873,7 +919,8 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result, ssif_inc_stat(ssif_info, send_errors); if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) - pr_info("%s: Out of retries\n", __func__); + dev_dbg(&ssif_info->client->dev, + "%s: Out of retries\n", __func__); msg_done_handler(ssif_info, -EIO, NULL, 0); return; } @@ -885,7 +932,8 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result, * handle it. */ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) - pr_info("Error in msg_written_handler: %d\n", result); + dev_dbg(&ssif_info->client->dev, + "%s: Error %d\n", __func__, result); msg_done_handler(ssif_info, result, NULL, 0); return; @@ -929,7 +977,8 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result, ssif_inc_stat(ssif_info, send_errors); if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) - pr_info("Error from i2c_non_blocking_op(3)\n"); + dev_dbg(&ssif_info->client->dev, + "Error from i2c_non_blocking_op(3)\n"); msg_done_handler(ssif_info, -EIO, NULL, 0); } } else { @@ -985,7 +1034,8 @@ static int start_resend(struct ssif_info *ssif_info) rv = ssif_i2c_send(ssif_info, msg_written_handler, I2C_SMBUS_WRITE, command, ssif_info->data, I2C_SMBUS_BLOCK_DATA); if (rv && (ssif_info->ssif_debug & SSIF_DEBUG_MSG)) - pr_info("Error from i2c_non_blocking_op(4)\n"); + dev_dbg(&ssif_info->client->dev, + "Error from i2c_non_blocking_op(4)\n"); return rv; } @@ -1054,7 +1104,8 @@ static void sender(void *send_info, struct timespec64 t; ktime_get_real_ts64(&t); - pr_info("**Enqueue %02x %02x: %lld.%6.6ld\n", + dev_dbg(&ssif_info->client->dev, + "**Enqueue %02x %02x: %lld.%6.6ld\n", msg->data[0], msg->data[1], (long long)t.tv_sec, (long)t.tv_nsec / NSEC_PER_USEC); } @@ -1073,8 +1124,7 @@ static int get_smi_info(void *send_info, struct ipmi_smi_info *data) } /* - * Instead of having our own timer to periodically check the message - * flags, we let the message handler drive us. + * Upper layer wants us to request events. */ static void request_events(void *send_info) { @@ -1085,18 +1135,33 @@ static void request_events(void *send_info) return; flags = ipmi_ssif_lock_cond(ssif_info, &oflags); - /* - * Request flags first, not events, because the lower layer - * doesn't have a way to send an attention. But make sure - * event checking still happens. - */ ssif_info->req_events = true; - if (SSIF_IDLE(ssif_info)) - start_flag_fetch(ssif_info, flags); - else { - ssif_info->req_flags = true; - ipmi_ssif_unlock_cond(ssif_info, flags); + ipmi_ssif_unlock_cond(ssif_info, flags); +} + +/* + * Upper layer is changing the flag saying whether we need to request + * flags periodically or not. + */ +static void ssif_set_need_watch(void *send_info, unsigned int watch_mask) +{ + struct ssif_info *ssif_info = (struct ssif_info *) send_info; + unsigned long oflags, *flags; + long timeout = 0; + + if (watch_mask & IPMI_WATCH_MASK_CHECK_MESSAGES) + timeout = SSIF_WATCH_MSG_TIMEOUT; + else if (watch_mask) + timeout = SSIF_WATCH_WATCHDOG_TIMEOUT; + + flags = ipmi_ssif_lock_cond(ssif_info, &oflags); + if (timeout != ssif_info->watch_timeout) { + ssif_info->watch_timeout = timeout; + if (ssif_info->watch_timeout) + mod_timer(&ssif_info->watch_timer, + jiffies + ssif_info->watch_timeout); } + ipmi_ssif_unlock_cond(ssif_info, flags); } static int ssif_start_processing(void *send_info, @@ -1223,6 +1288,7 @@ static void shutdown_ssif(void *send_info) schedule_timeout(1); ssif_info->stopping = true; + del_timer_sync(&ssif_info->watch_timer); del_timer_sync(&ssif_info->retry_timer); if (ssif_info->thread) { complete(&ssif_info->wake_thread); @@ -1570,7 +1636,8 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) slave_addr = find_slave_address(client, slave_addr); - pr_info("Trying %s-specified SSIF interface at i2c address 0x%x, adapter %s, slave address 0x%x\n", + dev_info(&client->dev, + "Trying %s-specified SSIF interface at i2c address 0x%x, adapter %s, slave address 0x%x\n", ipmi_addr_src_to_str(ssif_info->addr_source), client->addr, client->adapter->name, slave_addr); @@ -1585,7 +1652,8 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) if (!rv && (len >= 3) && (resp[2] == 0)) { if (len < 7) { if (ssif_dbg_probe) - pr_info("SSIF info too short: %d\n", len); + dev_dbg(&ssif_info->client->dev, + "SSIF info too short: %d\n", len); goto no_support; } @@ -1622,7 +1690,8 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) } else { no_support: /* Assume no multi-part or PEC support */ - pr_info("Error fetching SSIF: %d %d %2.2x, your system probably doesn't support this command so using defaults\n", + dev_info(&ssif_info->client->dev, + "Error fetching SSIF: %d %d %2.2x, your system probably doesn't support this command so using defaults\n", rv, len, resp[2]); ssif_info->max_xmit_msg_size = 32; @@ -1639,16 +1708,18 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) msg[2] = WDT_PRE_TIMEOUT_INT; rv = do_cmd(client, 3, msg, &len, resp); if (rv || (len < 3) || (resp[2] != 0)) - pr_warn("Unable to clear message flags: %d %d %2.2x\n", - rv, len, resp[2]); + dev_warn(&ssif_info->client->dev, + "Unable to clear message flags: %d %d %2.2x\n", + rv, len, resp[2]); /* Attempt to enable the event buffer. */ msg[0] = IPMI_NETFN_APP_REQUEST << 2; msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; rv = do_cmd(client, 2, msg, &len, resp); if (rv || (len < 4) || (resp[2] != 0)) { - pr_warn("Error getting global enables: %d %d %2.2x\n", - rv, len, resp[2]); + dev_warn(&ssif_info->client->dev, + "Error getting global enables: %d %d %2.2x\n", + rv, len, resp[2]); rv = 0; /* Not fatal */ goto found; } @@ -1666,8 +1737,9 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) msg[2] = ssif_info->global_enables | IPMI_BMC_EVT_MSG_BUFF; rv = do_cmd(client, 3, msg, &len, resp); if (rv || (len < 2)) { - pr_warn("Error setting global enables: %d %d %2.2x\n", - rv, len, resp[2]); + dev_warn(&ssif_info->client->dev, + "Error setting global enables: %d %d %2.2x\n", + rv, len, resp[2]); rv = 0; /* Not fatal */ goto found; } @@ -1687,8 +1759,9 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) msg[2] = ssif_info->global_enables | IPMI_BMC_RCV_MSG_INTR; rv = do_cmd(client, 3, msg, &len, resp); if (rv || (len < 2)) { - pr_warn("Error setting global enables: %d %d %2.2x\n", - rv, len, resp[2]); + dev_warn(&ssif_info->client->dev, + "Error setting global enables: %d %d %2.2x\n", + rv, len, resp[2]); rv = 0; /* Not fatal */ goto found; } @@ -1701,13 +1774,15 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) found: if (ssif_dbg_probe) { - pr_info("ssif_probe: i2c_probe found device at i2c address %x\n", - client->addr); + dev_dbg(&ssif_info->client->dev, + "%s: i2c_probe found device at i2c address %x\n", + __func__, client->addr); } spin_lock_init(&ssif_info->lock); ssif_info->ssif_state = SSIF_NORMAL; timer_setup(&ssif_info->retry_timer, retry_timeout, 0); + timer_setup(&ssif_info->watch_timer, watch_timeout, 0); for (i = 0; i < SSIF_NUM_STATS; i++) atomic_set(&ssif_info->stats[i], 0); @@ -1721,6 +1796,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) ssif_info->handlers.get_smi_info = get_smi_info; ssif_info->handlers.sender = sender; ssif_info->handlers.request_events = request_events; + ssif_info->handlers.set_need_watch = ssif_set_need_watch; { unsigned int thread_num; @@ -1754,8 +1830,9 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) ssif_info, &ssif_info->client->dev, slave_addr); - if (rv) { - pr_err("Unable to register device: error %d\n", rv); + if (rv) { + dev_err(&ssif_info->client->dev, + "Unable to register device: error %d\n", rv); goto out_remove_attr; } @@ -1764,7 +1841,8 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) if (addr_info) addr_info->client = NULL; - dev_err(&client->dev, "Unable to start IPMI SSIF: %d\n", rv); + dev_err(&ssif_info->client->dev, + "Unable to start IPMI SSIF: %d\n", rv); kfree(ssif_info); } kfree(resp); diff --git a/drivers/char/ipmi/kcs_bmc.c b/drivers/char/ipmi/kcs_bmc.c index e6124bd548df..ed4dc3b1843e 100644 --- a/drivers/char/ipmi/kcs_bmc.c +++ b/drivers/char/ipmi/kcs_bmc.c @@ -440,12 +440,13 @@ struct kcs_bmc *kcs_bmc_alloc(struct device *dev, int sizeof_priv, u32 channel) kcs_bmc->data_in = devm_kmalloc(dev, KCS_MSG_BUFSIZ, GFP_KERNEL); kcs_bmc->data_out = devm_kmalloc(dev, KCS_MSG_BUFSIZ, GFP_KERNEL); kcs_bmc->kbuffer = devm_kmalloc(dev, KCS_MSG_BUFSIZ, GFP_KERNEL); - if (!kcs_bmc->data_in || !kcs_bmc->data_out || !kcs_bmc->kbuffer) - return NULL; kcs_bmc->miscdev.minor = MISC_DYNAMIC_MINOR; kcs_bmc->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s%u", DEVICE_NAME, channel); + if (!kcs_bmc->data_in || !kcs_bmc->data_out || !kcs_bmc->kbuffer || + !kcs_bmc->miscdev.name) + return NULL; kcs_bmc->miscdev.fops = &kcs_bmc_fops; return kcs_bmc; diff --git a/drivers/char/lp.c b/drivers/char/lp.c index 5c8d780637bd..3406852f67ff 100644 --- a/drivers/char/lp.c +++ b/drivers/char/lp.c @@ -729,7 +729,7 @@ static long lp_ioctl(struct file *file, unsigned int cmd, ret = lp_set_timeout32(minor, (void __user *)arg); break; } - /* fallthrough for 64-bit */ + /* fall through - for 64-bit */ case LPSETTIMEOUT_NEW: ret = lp_set_timeout64(minor, (void __user *)arg); break; @@ -757,7 +757,7 @@ static long lp_compat_ioctl(struct file *file, unsigned int cmd, ret = lp_set_timeout32(minor, (void __user *)arg); break; } - /* fallthrough for x32 mode */ + /* fall through - for x32 mode */ case LPSETTIMEOUT_NEW: ret = lp_set_timeout64(minor, (void __user *)arg); break; diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c index 8c9216a0f62e..0a31b60bee7b 100644 --- a/drivers/char/mbcs.c +++ b/drivers/char/mbcs.c @@ -50,6 +50,7 @@ static LIST_HEAD(soft_list); * file operations */ static const struct file_operations mbcs_ops = { + .owner = THIS_MODULE, .open = mbcs_open, .llseek = mbcs_sram_llseek, .read = mbcs_sram_read, diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c index 25264d65e716..eff1e3f1b3a2 100644 --- a/drivers/char/nvram.c +++ b/drivers/char/nvram.c @@ -21,13 +21,6 @@ * ioctl(NVRAM_SETCKS) (doesn't change contents, just makes checksum valid * again; use with care!) * - * This file also provides some functions for other parts of the kernel that - * want to access the NVRAM: nvram_{read,write,check_checksum,set_checksum}. - * Obviously this can be used only if this driver is always configured into - * the kernel and is not a module. Since the functions are used by some Atari - * drivers, this is the case on the Atari. - * - * * 1.1 Cesar Barros: SMP locking fixes * added changelog * 1.2 Erik Gilling: Cobalt Networks support @@ -39,64 +32,6 @@ #include #include - -#define PC 1 -#define ATARI 2 - -/* select machine configuration */ -#if defined(CONFIG_ATARI) -# define MACH ATARI -#elif defined(__i386__) || defined(__x86_64__) || defined(__arm__) /* and ?? */ -# define MACH PC -#else -# error Cannot build nvram driver for this machine configuration. -#endif - -#if MACH == PC - -/* RTC in a PC */ -#define CHECK_DRIVER_INIT() 1 - -/* On PCs, the checksum is built only over bytes 2..31 */ -#define PC_CKS_RANGE_START 2 -#define PC_CKS_RANGE_END 31 -#define PC_CKS_LOC 32 -#define NVRAM_BYTES (128-NVRAM_FIRST_BYTE) - -#define mach_check_checksum pc_check_checksum -#define mach_set_checksum pc_set_checksum -#define mach_proc_infos pc_proc_infos - -#endif - -#if MACH == ATARI - -/* Special parameters for RTC in Atari machines */ -#include -#include -#define RTC_PORT(x) (TT_RTC_BAS + 2*(x)) -#define CHECK_DRIVER_INIT() (MACH_IS_ATARI && ATARIHW_PRESENT(TT_CLK)) - -#define NVRAM_BYTES 50 - -/* On Ataris, the checksum is over all bytes except the checksum bytes - * themselves; these are at the very end */ -#define ATARI_CKS_RANGE_START 0 -#define ATARI_CKS_RANGE_END 47 -#define ATARI_CKS_LOC 48 - -#define mach_check_checksum atari_check_checksum -#define mach_set_checksum atari_set_checksum -#define mach_proc_infos atari_proc_infos - -#endif - -/* Note that *all* calls to CMOS_READ and CMOS_WRITE must be done with - * rtc_lock held. Due to the index-port/data-port design of the RTC, we - * don't want two different things trying to get to it at once. (e.g. the - * periodic 11 min sync from kernel/time/ntp.c vs. this driver.) - */ - #include #include #include @@ -106,28 +41,26 @@ #include #include #include +#include #include #include #include #include #include +#ifdef CONFIG_PPC +#include +#endif static DEFINE_MUTEX(nvram_mutex); static DEFINE_SPINLOCK(nvram_state_lock); static int nvram_open_cnt; /* #times opened */ static int nvram_open_mode; /* special open modes */ +static ssize_t nvram_size; #define NVRAM_WRITE 1 /* opened for writing (exclusive) */ #define NVRAM_EXCL 2 /* opened with O_EXCL */ -static int mach_check_checksum(void); -static void mach_set_checksum(void); - -#ifdef CONFIG_PROC_FS -static void mach_proc_infos(unsigned char *contents, struct seq_file *seq, - void *offset); -#endif - +#ifdef CONFIG_X86 /* * These functions are provided to be called internally or by other parts of * the kernel. It's up to the caller to ensure correct checksum before reading @@ -139,13 +72,20 @@ static void mach_proc_infos(unsigned char *contents, struct seq_file *seq, * know about the RTC cruft. */ -unsigned char __nvram_read_byte(int i) +#define NVRAM_BYTES (128 - NVRAM_FIRST_BYTE) + +/* Note that *all* calls to CMOS_READ and CMOS_WRITE must be done with + * rtc_lock held. Due to the index-port/data-port design of the RTC, we + * don't want two different things trying to get to it at once. (e.g. the + * periodic 11 min sync from kernel/time/ntp.c vs. this driver.) + */ + +static unsigned char __nvram_read_byte(int i) { return CMOS_READ(NVRAM_FIRST_BYTE + i); } -EXPORT_SYMBOL(__nvram_read_byte); -unsigned char nvram_read_byte(int i) +static unsigned char pc_nvram_read_byte(int i) { unsigned long flags; unsigned char c; @@ -155,16 +95,14 @@ unsigned char nvram_read_byte(int i) spin_unlock_irqrestore(&rtc_lock, flags); return c; } -EXPORT_SYMBOL(nvram_read_byte); /* This races nicely with trying to read with checksum checking (nvram_read) */ -void __nvram_write_byte(unsigned char c, int i) +static void __nvram_write_byte(unsigned char c, int i) { CMOS_WRITE(c, NVRAM_FIRST_BYTE + i); } -EXPORT_SYMBOL(__nvram_write_byte); -void nvram_write_byte(unsigned char c, int i) +static void pc_nvram_write_byte(unsigned char c, int i) { unsigned long flags; @@ -172,172 +110,266 @@ void nvram_write_byte(unsigned char c, int i) __nvram_write_byte(c, i); spin_unlock_irqrestore(&rtc_lock, flags); } -EXPORT_SYMBOL(nvram_write_byte); -int __nvram_check_checksum(void) +/* On PCs, the checksum is built only over bytes 2..31 */ +#define PC_CKS_RANGE_START 2 +#define PC_CKS_RANGE_END 31 +#define PC_CKS_LOC 32 + +static int __nvram_check_checksum(void) { - return mach_check_checksum(); + int i; + unsigned short sum = 0; + unsigned short expect; + + for (i = PC_CKS_RANGE_START; i <= PC_CKS_RANGE_END; ++i) + sum += __nvram_read_byte(i); + expect = __nvram_read_byte(PC_CKS_LOC)<<8 | + __nvram_read_byte(PC_CKS_LOC+1); + return (sum & 0xffff) == expect; } -EXPORT_SYMBOL(__nvram_check_checksum); -int nvram_check_checksum(void) +static void __nvram_set_checksum(void) { - unsigned long flags; - int rv; + int i; + unsigned short sum = 0; - spin_lock_irqsave(&rtc_lock, flags); - rv = __nvram_check_checksum(); - spin_unlock_irqrestore(&rtc_lock, flags); - return rv; + for (i = PC_CKS_RANGE_START; i <= PC_CKS_RANGE_END; ++i) + sum += __nvram_read_byte(i); + __nvram_write_byte(sum >> 8, PC_CKS_LOC); + __nvram_write_byte(sum & 0xff, PC_CKS_LOC + 1); } -EXPORT_SYMBOL(nvram_check_checksum); -static void __nvram_set_checksum(void) +static long pc_nvram_set_checksum(void) { - mach_set_checksum(); + spin_lock_irq(&rtc_lock); + __nvram_set_checksum(); + spin_unlock_irq(&rtc_lock); + return 0; } -#if 0 -void nvram_set_checksum(void) +static long pc_nvram_initialize(void) { - unsigned long flags; + ssize_t i; - spin_lock_irqsave(&rtc_lock, flags); + spin_lock_irq(&rtc_lock); + for (i = 0; i < NVRAM_BYTES; ++i) + __nvram_write_byte(0, i); __nvram_set_checksum(); - spin_unlock_irqrestore(&rtc_lock, flags); + spin_unlock_irq(&rtc_lock); + return 0; } -#endif /* 0 */ - -/* - * The are the file operation function for user access to /dev/nvram - */ -static loff_t nvram_llseek(struct file *file, loff_t offset, int origin) +static ssize_t pc_nvram_get_size(void) { - return generic_file_llseek_size(file, offset, origin, MAX_LFS_FILESIZE, - NVRAM_BYTES); + return NVRAM_BYTES; } -static ssize_t nvram_read(struct file *file, char __user *buf, - size_t count, loff_t *ppos) +static ssize_t pc_nvram_read(char *buf, size_t count, loff_t *ppos) { - unsigned char contents[NVRAM_BYTES]; - unsigned i = *ppos; - unsigned char *tmp; + char *p = buf; + loff_t i; spin_lock_irq(&rtc_lock); + if (!__nvram_check_checksum()) { + spin_unlock_irq(&rtc_lock); + return -EIO; + } + for (i = *ppos; count > 0 && i < NVRAM_BYTES; --count, ++i, ++p) + *p = __nvram_read_byte(i); + spin_unlock_irq(&rtc_lock); - if (!__nvram_check_checksum()) - goto checksum_err; + *ppos = i; + return p - buf; +} - for (tmp = contents; count-- > 0 && i < NVRAM_BYTES; ++i, ++tmp) - *tmp = __nvram_read_byte(i); +static ssize_t pc_nvram_write(char *buf, size_t count, loff_t *ppos) +{ + char *p = buf; + loff_t i; + spin_lock_irq(&rtc_lock); + if (!__nvram_check_checksum()) { + spin_unlock_irq(&rtc_lock); + return -EIO; + } + for (i = *ppos; count > 0 && i < NVRAM_BYTES; --count, ++i, ++p) + __nvram_write_byte(*p, i); + __nvram_set_checksum(); spin_unlock_irq(&rtc_lock); - if (copy_to_user(buf, contents, tmp - contents)) - return -EFAULT; - *ppos = i; + return p - buf; +} - return tmp - contents; +const struct nvram_ops arch_nvram_ops = { + .read = pc_nvram_read, + .write = pc_nvram_write, + .read_byte = pc_nvram_read_byte, + .write_byte = pc_nvram_write_byte, + .get_size = pc_nvram_get_size, + .set_checksum = pc_nvram_set_checksum, + .initialize = pc_nvram_initialize, +}; +EXPORT_SYMBOL(arch_nvram_ops); +#endif /* CONFIG_X86 */ -checksum_err: - spin_unlock_irq(&rtc_lock); - return -EIO; -} +/* + * The are the file operation function for user access to /dev/nvram + */ -static ssize_t nvram_write(struct file *file, const char __user *buf, - size_t count, loff_t *ppos) +static loff_t nvram_misc_llseek(struct file *file, loff_t offset, int origin) { - unsigned char contents[NVRAM_BYTES]; - unsigned i = *ppos; - unsigned char *tmp; + return generic_file_llseek_size(file, offset, origin, MAX_LFS_FILESIZE, + nvram_size); +} - if (i >= NVRAM_BYTES) - return 0; /* Past EOF */ +static ssize_t nvram_misc_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + char *tmp; + ssize_t ret; - if (count > NVRAM_BYTES - i) - count = NVRAM_BYTES - i; - if (count > NVRAM_BYTES) - return -EFAULT; /* Can't happen, but prove it to gcc */ - if (copy_from_user(contents, buf, count)) + if (!access_ok(buf, count)) return -EFAULT; + if (*ppos >= nvram_size) + return 0; - spin_lock_irq(&rtc_lock); + count = min_t(size_t, count, nvram_size - *ppos); + count = min_t(size_t, count, PAGE_SIZE); - if (!__nvram_check_checksum()) - goto checksum_err; + tmp = kmalloc(count, GFP_KERNEL); + if (!tmp) + return -ENOMEM; - for (tmp = contents; count--; ++i, ++tmp) - __nvram_write_byte(*tmp, i); + ret = nvram_read(tmp, count, ppos); + if (ret <= 0) + goto out; - __nvram_set_checksum(); + if (copy_to_user(buf, tmp, ret)) { + *ppos -= ret; + ret = -EFAULT; + } - spin_unlock_irq(&rtc_lock); +out: + kfree(tmp); + return ret; +} - *ppos = i; +static ssize_t nvram_misc_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + char *tmp; + ssize_t ret; - return tmp - contents; + if (!access_ok(buf, count)) + return -EFAULT; + if (*ppos >= nvram_size) + return 0; -checksum_err: - spin_unlock_irq(&rtc_lock); - return -EIO; + count = min_t(size_t, count, nvram_size - *ppos); + count = min_t(size_t, count, PAGE_SIZE); + + tmp = memdup_user(buf, count); + if (IS_ERR(tmp)) + return PTR_ERR(tmp); + + ret = nvram_write(tmp, count, ppos); + kfree(tmp); + return ret; } -static long nvram_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) +static long nvram_misc_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) { - int i; + long ret = -ENOTTY; switch (cmd) { - +#ifdef CONFIG_PPC + case OBSOLETE_PMAC_NVRAM_GET_OFFSET: + pr_warn("nvram: Using obsolete PMAC_NVRAM_GET_OFFSET ioctl\n"); + /* fall through */ + case IOC_NVRAM_GET_OFFSET: + ret = -EINVAL; +#ifdef CONFIG_PPC_PMAC + if (machine_is(powermac)) { + int part, offset; + + if (copy_from_user(&part, (void __user *)arg, + sizeof(part)) != 0) + return -EFAULT; + if (part < pmac_nvram_OF || part > pmac_nvram_NR) + return -EINVAL; + offset = pmac_get_partition(part); + if (offset < 0) + return -EINVAL; + if (copy_to_user((void __user *)arg, + &offset, sizeof(offset)) != 0) + return -EFAULT; + ret = 0; + } +#endif + break; +#ifdef CONFIG_PPC32 + case IOC_NVRAM_SYNC: + if (ppc_md.nvram_sync != NULL) { + mutex_lock(&nvram_mutex); + ppc_md.nvram_sync(); + mutex_unlock(&nvram_mutex); + } + ret = 0; + break; +#endif +#elif defined(CONFIG_X86) || defined(CONFIG_M68K) case NVRAM_INIT: /* initialize NVRAM contents and checksum */ if (!capable(CAP_SYS_ADMIN)) return -EACCES; - mutex_lock(&nvram_mutex); - spin_lock_irq(&rtc_lock); - - for (i = 0; i < NVRAM_BYTES; ++i) - __nvram_write_byte(0, i); - __nvram_set_checksum(); - - spin_unlock_irq(&rtc_lock); - mutex_unlock(&nvram_mutex); - return 0; - + if (arch_nvram_ops.initialize != NULL) { + mutex_lock(&nvram_mutex); + ret = arch_nvram_ops.initialize(); + mutex_unlock(&nvram_mutex); + } + break; case NVRAM_SETCKS: /* just set checksum, contents unchanged (maybe useful after * checksum garbaged somehow...) */ if (!capable(CAP_SYS_ADMIN)) return -EACCES; - mutex_lock(&nvram_mutex); - spin_lock_irq(&rtc_lock); - __nvram_set_checksum(); - spin_unlock_irq(&rtc_lock); - mutex_unlock(&nvram_mutex); - return 0; - - default: - return -ENOTTY; + if (arch_nvram_ops.set_checksum != NULL) { + mutex_lock(&nvram_mutex); + ret = arch_nvram_ops.set_checksum(); + mutex_unlock(&nvram_mutex); + } + break; +#endif /* CONFIG_X86 || CONFIG_M68K */ } + return ret; } -static int nvram_open(struct inode *inode, struct file *file) +static int nvram_misc_open(struct inode *inode, struct file *file) { spin_lock(&nvram_state_lock); + /* Prevent multiple readers/writers if desired. */ if ((nvram_open_cnt && (file->f_flags & O_EXCL)) || - (nvram_open_mode & NVRAM_EXCL) || - ((file->f_mode & FMODE_WRITE) && (nvram_open_mode & NVRAM_WRITE))) { + (nvram_open_mode & NVRAM_EXCL)) { spin_unlock(&nvram_state_lock); return -EBUSY; } +#if defined(CONFIG_X86) || defined(CONFIG_M68K) + /* Prevent multiple writers if the set_checksum ioctl is implemented. */ + if ((arch_nvram_ops.set_checksum != NULL) && + (file->f_mode & FMODE_WRITE) && (nvram_open_mode & NVRAM_WRITE)) { + spin_unlock(&nvram_state_lock); + return -EBUSY; + } +#endif + if (file->f_flags & O_EXCL) nvram_open_mode |= NVRAM_EXCL; if (file->f_mode & FMODE_WRITE) @@ -349,7 +381,7 @@ static int nvram_open(struct inode *inode, struct file *file) return 0; } -static int nvram_release(struct inode *inode, struct file *file) +static int nvram_misc_release(struct inode *inode, struct file *file) { spin_lock(&nvram_state_lock); @@ -366,123 +398,7 @@ static int nvram_release(struct inode *inode, struct file *file) return 0; } -#ifndef CONFIG_PROC_FS -static int nvram_add_proc_fs(void) -{ - return 0; -} - -#else - -static int nvram_proc_read(struct seq_file *seq, void *offset) -{ - unsigned char contents[NVRAM_BYTES]; - int i = 0; - - spin_lock_irq(&rtc_lock); - for (i = 0; i < NVRAM_BYTES; ++i) - contents[i] = __nvram_read_byte(i); - spin_unlock_irq(&rtc_lock); - - mach_proc_infos(contents, seq, offset); - - return 0; -} - -static int nvram_add_proc_fs(void) -{ - if (!proc_create_single("driver/nvram", 0, NULL, nvram_proc_read)) - return -ENOMEM; - return 0; -} - -#endif /* CONFIG_PROC_FS */ - -static const struct file_operations nvram_fops = { - .owner = THIS_MODULE, - .llseek = nvram_llseek, - .read = nvram_read, - .write = nvram_write, - .unlocked_ioctl = nvram_ioctl, - .open = nvram_open, - .release = nvram_release, -}; - -static struct miscdevice nvram_dev = { - NVRAM_MINOR, - "nvram", - &nvram_fops -}; - -static int __init nvram_init(void) -{ - int ret; - - /* First test whether the driver should init at all */ - if (!CHECK_DRIVER_INIT()) - return -ENODEV; - - ret = misc_register(&nvram_dev); - if (ret) { - printk(KERN_ERR "nvram: can't misc_register on minor=%d\n", - NVRAM_MINOR); - goto out; - } - ret = nvram_add_proc_fs(); - if (ret) { - printk(KERN_ERR "nvram: can't create /proc/driver/nvram\n"); - goto outmisc; - } - ret = 0; - printk(KERN_INFO "Non-volatile memory driver v" NVRAM_VERSION "\n"); -out: - return ret; -outmisc: - misc_deregister(&nvram_dev); - goto out; -} - -static void __exit nvram_cleanup_module(void) -{ - remove_proc_entry("driver/nvram", NULL); - misc_deregister(&nvram_dev); -} - -module_init(nvram_init); -module_exit(nvram_cleanup_module); - -/* - * Machine specific functions - */ - -#if MACH == PC - -static int pc_check_checksum(void) -{ - int i; - unsigned short sum = 0; - unsigned short expect; - - for (i = PC_CKS_RANGE_START; i <= PC_CKS_RANGE_END; ++i) - sum += __nvram_read_byte(i); - expect = __nvram_read_byte(PC_CKS_LOC)<<8 | - __nvram_read_byte(PC_CKS_LOC+1); - return (sum & 0xffff) == expect; -} - -static void pc_set_checksum(void) -{ - int i; - unsigned short sum = 0; - - for (i = PC_CKS_RANGE_START; i <= PC_CKS_RANGE_END; ++i) - sum += __nvram_read_byte(i); - __nvram_write_byte(sum >> 8, PC_CKS_LOC); - __nvram_write_byte(sum & 0xff, PC_CKS_LOC + 1); -} - -#ifdef CONFIG_PROC_FS - +#if defined(CONFIG_X86) && defined(CONFIG_PROC_FS) static const char * const floppy_types[] = { "none", "5.25'' 360k", "5.25'' 1.2M", "3.5'' 720k", "3.5'' 1.44M", "3.5'' 2.88M", "3.5'' 2.88M" @@ -495,8 +411,8 @@ static const char * const gfx_types[] = { "monochrome", }; -static void pc_proc_infos(unsigned char *nvram, struct seq_file *seq, - void *offset) +static void pc_nvram_proc_read(unsigned char *nvram, struct seq_file *seq, + void *offset) { int checksum; int type; @@ -557,143 +473,76 @@ static void pc_proc_infos(unsigned char *nvram, struct seq_file *seq, return; } -#endif -#endif /* MACH == PC */ - -#if MACH == ATARI - -static int atari_check_checksum(void) +static int nvram_proc_read(struct seq_file *seq, void *offset) { - int i; - unsigned char sum = 0; + unsigned char contents[NVRAM_BYTES]; + int i = 0; - for (i = ATARI_CKS_RANGE_START; i <= ATARI_CKS_RANGE_END; ++i) - sum += __nvram_read_byte(i); - return (__nvram_read_byte(ATARI_CKS_LOC) == (~sum & 0xff)) && - (__nvram_read_byte(ATARI_CKS_LOC + 1) == (sum & 0xff)); -} + spin_lock_irq(&rtc_lock); + for (i = 0; i < NVRAM_BYTES; ++i) + contents[i] = __nvram_read_byte(i); + spin_unlock_irq(&rtc_lock); -static void atari_set_checksum(void) -{ - int i; - unsigned char sum = 0; + pc_nvram_proc_read(contents, seq, offset); - for (i = ATARI_CKS_RANGE_START; i <= ATARI_CKS_RANGE_END; ++i) - sum += __nvram_read_byte(i); - __nvram_write_byte(~sum, ATARI_CKS_LOC); - __nvram_write_byte(sum, ATARI_CKS_LOC + 1); + return 0; } +#endif /* CONFIG_X86 && CONFIG_PROC_FS */ -#ifdef CONFIG_PROC_FS - -static struct { - unsigned char val; - const char *name; -} boot_prefs[] = { - { 0x80, "TOS" }, - { 0x40, "ASV" }, - { 0x20, "NetBSD (?)" }, - { 0x10, "Linux" }, - { 0x00, "unspecified" } -}; - -static const char * const languages[] = { - "English (US)", - "German", - "French", - "English (UK)", - "Spanish", - "Italian", - "6 (undefined)", - "Swiss (French)", - "Swiss (German)" -}; - -static const char * const dateformat[] = { - "MM%cDD%cYY", - "DD%cMM%cYY", - "YY%cMM%cDD", - "YY%cDD%cMM", - "4 (undefined)", - "5 (undefined)", - "6 (undefined)", - "7 (undefined)" +static const struct file_operations nvram_misc_fops = { + .owner = THIS_MODULE, + .llseek = nvram_misc_llseek, + .read = nvram_misc_read, + .write = nvram_misc_write, + .unlocked_ioctl = nvram_misc_ioctl, + .open = nvram_misc_open, + .release = nvram_misc_release, }; -static const char * const colors[] = { - "2", "4", "16", "256", "65536", "??", "??", "??" +static struct miscdevice nvram_misc = { + NVRAM_MINOR, + "nvram", + &nvram_misc_fops, }; -static void atari_proc_infos(unsigned char *nvram, struct seq_file *seq, - void *offset) +static int __init nvram_module_init(void) { - int checksum = nvram_check_checksum(); - int i; - unsigned vmode; + int ret; - seq_printf(seq, "Checksum status : %svalid\n", checksum ? "" : "not "); + nvram_size = nvram_get_size(); + if (nvram_size < 0) + return nvram_size; - seq_printf(seq, "Boot preference : "); - for (i = ARRAY_SIZE(boot_prefs) - 1; i >= 0; --i) { - if (nvram[1] == boot_prefs[i].val) { - seq_printf(seq, "%s\n", boot_prefs[i].name); - break; - } + ret = misc_register(&nvram_misc); + if (ret) { + pr_err("nvram: can't misc_register on minor=%d\n", NVRAM_MINOR); + return ret; } - if (i < 0) - seq_printf(seq, "0x%02x (undefined)\n", nvram[1]); - - seq_printf(seq, "SCSI arbitration : %s\n", - (nvram[16] & 0x80) ? "on" : "off"); - seq_printf(seq, "SCSI host ID : "); - if (nvram[16] & 0x80) - seq_printf(seq, "%d\n", nvram[16] & 7); - else - seq_printf(seq, "n/a\n"); - - /* the following entries are defined only for the Falcon */ - if ((atari_mch_cookie >> 16) != ATARI_MCH_FALCON) - return; - seq_printf(seq, "OS language : "); - if (nvram[6] < ARRAY_SIZE(languages)) - seq_printf(seq, "%s\n", languages[nvram[6]]); - else - seq_printf(seq, "%u (undefined)\n", nvram[6]); - seq_printf(seq, "Keyboard language: "); - if (nvram[7] < ARRAY_SIZE(languages)) - seq_printf(seq, "%s\n", languages[nvram[7]]); - else - seq_printf(seq, "%u (undefined)\n", nvram[7]); - seq_printf(seq, "Date format : "); - seq_printf(seq, dateformat[nvram[8] & 7], - nvram[9] ? nvram[9] : '/', nvram[9] ? nvram[9] : '/'); - seq_printf(seq, ", %dh clock\n", nvram[8] & 16 ? 24 : 12); - seq_printf(seq, "Boot delay : "); - if (nvram[10] == 0) - seq_printf(seq, "default"); - else - seq_printf(seq, "%ds%s\n", nvram[10], - nvram[10] < 8 ? ", no memory test" : ""); - - vmode = (nvram[14] << 8) | nvram[15]; - seq_printf(seq, - "Video mode : %s colors, %d columns, %s %s monitor\n", - colors[vmode & 7], - vmode & 8 ? 80 : 40, - vmode & 16 ? "VGA" : "TV", vmode & 32 ? "PAL" : "NTSC"); - seq_printf(seq, " %soverscan, compat. mode %s%s\n", - vmode & 64 ? "" : "no ", - vmode & 128 ? "on" : "off", - vmode & 256 ? - (vmode & 16 ? ", line doubling" : ", half screen") : ""); +#if defined(CONFIG_X86) && defined(CONFIG_PROC_FS) + if (!proc_create_single("driver/nvram", 0, NULL, nvram_proc_read)) { + pr_err("nvram: can't create /proc/driver/nvram\n"); + misc_deregister(&nvram_misc); + return -ENOMEM; + } +#endif - return; + pr_info("Non-volatile memory driver v" NVRAM_VERSION "\n"); + return 0; } + +static void __exit nvram_module_exit(void) +{ +#if defined(CONFIG_X86) && defined(CONFIG_PROC_FS) + remove_proc_entry("driver/nvram", NULL); #endif + misc_deregister(&nvram_misc); +} -#endif /* MACH == ATARI */ +module_init(nvram_module_init); +module_exit(nvram_module_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(NVRAM_MINOR); +MODULE_ALIAS("devname:nvram"); diff --git a/drivers/char/tpm/eventlog/tpm1.c b/drivers/char/tpm/eventlog/tpm1.c index 58c84784ba25..bfdff9271be0 100644 --- a/drivers/char/tpm/eventlog/tpm1.c +++ b/drivers/char/tpm/eventlog/tpm1.c @@ -74,7 +74,7 @@ static const char* tcpa_pc_event_id_strings[] = { /* returns pointer to start of pos. entry of tcg log */ static void *tpm1_bios_measurements_start(struct seq_file *m, loff_t *pos) { - loff_t i; + loff_t i = 0; struct tpm_chip *chip = m->private; struct tpm_bios_log *log = &chip->log; void *addr = log->bios_event_log; @@ -83,38 +83,29 @@ static void *tpm1_bios_measurements_start(struct seq_file *m, loff_t *pos) u32 converted_event_size; u32 converted_event_type; - /* read over *pos measurements */ - for (i = 0; i < *pos; i++) { + do { event = addr; + /* check if current entry is valid */ + if (addr + sizeof(struct tcpa_event) > limit) + return NULL; + converted_event_size = do_endian_conversion(event->event_size); converted_event_type = do_endian_conversion(event->event_type); - if ((addr + sizeof(struct tcpa_event)) < limit) { - if ((converted_event_type == 0) && - (converted_event_size == 0)) - return NULL; - addr += (sizeof(struct tcpa_event) + - converted_event_size); - } - } - - /* now check if current entry is valid */ - if ((addr + sizeof(struct tcpa_event)) >= limit) - return NULL; - - event = addr; + if (((converted_event_type == 0) && (converted_event_size == 0)) + || ((addr + sizeof(struct tcpa_event) + converted_event_size) + > limit)) + return NULL; - converted_event_size = do_endian_conversion(event->event_size); - converted_event_type = do_endian_conversion(event->event_type); + if (i++ == *pos) + break; - if (((converted_event_type == 0) && (converted_event_size == 0)) - || ((addr + sizeof(struct tcpa_event) + converted_event_size) - >= limit)) - return NULL; + addr += (sizeof(struct tcpa_event) + converted_event_size); + } while (1); return addr; } @@ -134,7 +125,7 @@ static void *tpm1_bios_measurements_next(struct seq_file *m, void *v, v += sizeof(struct tcpa_event) + converted_event_size; /* now check if current entry is valid */ - if ((v + sizeof(struct tcpa_event)) >= limit) + if ((v + sizeof(struct tcpa_event)) > limit) return NULL; event = v; @@ -143,7 +134,7 @@ static void *tpm1_bios_measurements_next(struct seq_file *m, void *v, converted_event_type = do_endian_conversion(event->event_type); if (((converted_event_type == 0) && (converted_event_size == 0)) || - ((v + sizeof(struct tcpa_event) + converted_event_size) >= limit)) + ((v + sizeof(struct tcpa_event) + converted_event_size) > limit)) return NULL; (*pos)++; diff --git a/drivers/char/tpm/eventlog/tpm2.c b/drivers/char/tpm/eventlog/tpm2.c index 1b8fa9de2cac..d8b77133a83a 100644 --- a/drivers/char/tpm/eventlog/tpm2.c +++ b/drivers/char/tpm/eventlog/tpm2.c @@ -37,10 +37,10 @@ * * Returns size of the event. If it is an invalid event, returns 0. */ -static int calc_tpm2_event_size(struct tcg_pcr_event2 *event, +static int calc_tpm2_event_size(struct tcg_pcr_event2_head *event, struct tcg_pcr_event *event_header) { - struct tcg_efi_specid_event *efispecid; + struct tcg_efi_specid_event_head *efispecid; struct tcg_event_field *event_field; void *marker; void *marker_start; @@ -55,7 +55,7 @@ static int calc_tpm2_event_size(struct tcg_pcr_event2 *event, marker = marker + sizeof(event->pcr_idx) + sizeof(event->event_type) + sizeof(event->count); - efispecid = (struct tcg_efi_specid_event *)event_header->event; + efispecid = (struct tcg_efi_specid_event_head *)event_header->event; /* Check if event is malformed. */ if (event->count > efispecid->num_algs) @@ -95,7 +95,7 @@ static void *tpm2_bios_measurements_start(struct seq_file *m, loff_t *pos) void *addr = log->bios_event_log; void *limit = log->bios_event_log_end; struct tcg_pcr_event *event_header; - struct tcg_pcr_event2 *event; + struct tcg_pcr_event2_head *event; size_t size; int i; @@ -136,7 +136,7 @@ static void *tpm2_bios_measurements_next(struct seq_file *m, void *v, loff_t *pos) { struct tcg_pcr_event *event_header; - struct tcg_pcr_event2 *event; + struct tcg_pcr_event2_head *event; struct tpm_chip *chip = m->private; struct tpm_bios_log *log = &chip->log; void *limit = log->bios_event_log_end; @@ -180,7 +180,7 @@ static int tpm2_binary_bios_measurements_show(struct seq_file *m, void *v) struct tpm_chip *chip = m->private; struct tpm_bios_log *log = &chip->log; struct tcg_pcr_event *event_header = log->bios_event_log; - struct tcg_pcr_event2 *event = v; + struct tcg_pcr_event2_head *event = v; void *temp_ptr; size_t size; diff --git a/drivers/char/tpm/st33zp24/i2c.c b/drivers/char/tpm/st33zp24/i2c.c index be5d1abd3e8e..8390c5b54c3b 100644 --- a/drivers/char/tpm/st33zp24/i2c.c +++ b/drivers/char/tpm/st33zp24/i2c.c @@ -33,7 +33,7 @@ struct st33zp24_i2c_phy { struct i2c_client *client; - u8 buf[TPM_BUFSIZE + 1]; + u8 buf[ST33ZP24_BUFSIZE + 1]; int io_lpcpd; }; diff --git a/drivers/char/tpm/st33zp24/spi.c b/drivers/char/tpm/st33zp24/spi.c index d7909ab287a8..ff019a1e3c68 100644 --- a/drivers/char/tpm/st33zp24/spi.c +++ b/drivers/char/tpm/st33zp24/spi.c @@ -63,7 +63,7 @@ * some latency byte before the answer is available (max 15). * We have 2048 + 1024 + 15. */ -#define ST33ZP24_SPI_BUFFER_SIZE (TPM_BUFSIZE + (TPM_BUFSIZE / 2) +\ +#define ST33ZP24_SPI_BUFFER_SIZE (ST33ZP24_BUFSIZE + (ST33ZP24_BUFSIZE / 2) +\ MAX_SPI_LATENCY) diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c index 64dc560859f2..13dc614b7ebc 100644 --- a/drivers/char/tpm/st33zp24/st33zp24.c +++ b/drivers/char/tpm/st33zp24/st33zp24.c @@ -436,7 +436,7 @@ static int st33zp24_send(struct tpm_chip *chip, unsigned char *buf, goto out_err; } - return len; + return 0; out_err: st33zp24_cancel(chip); release_locality(chip); diff --git a/drivers/char/tpm/st33zp24/st33zp24.h b/drivers/char/tpm/st33zp24/st33zp24.h index 6f4a4198af6a..20da0a84988d 100644 --- a/drivers/char/tpm/st33zp24/st33zp24.h +++ b/drivers/char/tpm/st33zp24/st33zp24.h @@ -18,8 +18,8 @@ #ifndef __LOCAL_ST33ZP24_H__ #define __LOCAL_ST33ZP24_H__ -#define TPM_WRITE_DIRECTION 0x80 -#define TPM_BUFSIZE 2048 +#define TPM_WRITE_DIRECTION 0x80 +#define ST33ZP24_BUFSIZE 2048 struct st33zp24_dev { struct tpm_chip *chip; diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index 32db84683c40..8804c9e916fd 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -37,6 +37,103 @@ struct class *tpm_class; struct class *tpmrm_class; dev_t tpm_devt; +static int tpm_request_locality(struct tpm_chip *chip) +{ + int rc; + + if (!chip->ops->request_locality) + return 0; + + rc = chip->ops->request_locality(chip, 0); + if (rc < 0) + return rc; + + chip->locality = rc; + return 0; +} + +static void tpm_relinquish_locality(struct tpm_chip *chip) +{ + int rc; + + if (!chip->ops->relinquish_locality) + return; + + rc = chip->ops->relinquish_locality(chip, chip->locality); + if (rc) + dev_err(&chip->dev, "%s: : error %d\n", __func__, rc); + + chip->locality = -1; +} + +static int tpm_cmd_ready(struct tpm_chip *chip) +{ + if (!chip->ops->cmd_ready) + return 0; + + return chip->ops->cmd_ready(chip); +} + +static int tpm_go_idle(struct tpm_chip *chip) +{ + if (!chip->ops->go_idle) + return 0; + + return chip->ops->go_idle(chip); +} + +/** + * tpm_chip_start() - power on the TPM + * @chip: a TPM chip to use + * + * Return: + * * The response length - OK + * * -errno - A system error + */ +int tpm_chip_start(struct tpm_chip *chip) +{ + int ret; + + if (chip->ops->clk_enable) + chip->ops->clk_enable(chip, true); + + if (chip->locality == -1) { + ret = tpm_request_locality(chip); + if (ret) { + chip->ops->clk_enable(chip, false); + return ret; + } + } + + ret = tpm_cmd_ready(chip); + if (ret) { + tpm_relinquish_locality(chip); + if (chip->ops->clk_enable) + chip->ops->clk_enable(chip, false); + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(tpm_chip_start); + +/** + * tpm_chip_stop() - power off the TPM + * @chip: a TPM chip to use + * + * Return: + * * The response length - OK + * * -errno - A system error + */ +void tpm_chip_stop(struct tpm_chip *chip) +{ + tpm_go_idle(chip); + tpm_relinquish_locality(chip); + if (chip->ops->clk_enable) + chip->ops->clk_enable(chip, false); +} +EXPORT_SYMBOL_GPL(tpm_chip_stop); + /** * tpm_try_get_ops() - Get a ref to the tpm_chip * @chip: Chip to ref @@ -56,10 +153,17 @@ int tpm_try_get_ops(struct tpm_chip *chip) down_read(&chip->ops_sem); if (!chip->ops) + goto out_ops; + + mutex_lock(&chip->tpm_mutex); + rc = tpm_chip_start(chip); + if (rc) goto out_lock; return 0; out_lock: + mutex_unlock(&chip->tpm_mutex); +out_ops: up_read(&chip->ops_sem); put_device(&chip->dev); return rc; @@ -75,6 +179,8 @@ EXPORT_SYMBOL_GPL(tpm_try_get_ops); */ void tpm_put_ops(struct tpm_chip *chip) { + tpm_chip_stop(chip); + mutex_unlock(&chip->tpm_mutex); up_read(&chip->ops_sem); put_device(&chip->dev); } @@ -160,6 +266,7 @@ static void tpm_dev_release(struct device *dev) kfree(chip->log.bios_event_log); kfree(chip->work_space.context_buf); kfree(chip->work_space.session_buf); + kfree(chip->allocated_banks); kfree(chip); } @@ -189,7 +296,10 @@ static int tpm_class_shutdown(struct device *dev) if (chip->flags & TPM_CHIP_FLAG_TPM2) { down_write(&chip->ops_sem); - tpm2_shutdown(chip, TPM2_SU_CLEAR); + if (!tpm_chip_start(chip)) { + tpm2_shutdown(chip, TPM2_SU_CLEAR); + tpm_chip_stop(chip); + } chip->ops = NULL; up_write(&chip->ops_sem); } @@ -368,8 +478,12 @@ static void tpm_del_char_device(struct tpm_chip *chip) /* Make the driver uncallable. */ down_write(&chip->ops_sem); - if (chip->flags & TPM_CHIP_FLAG_TPM2) - tpm2_shutdown(chip, TPM2_SU_CLEAR); + if (chip->flags & TPM_CHIP_FLAG_TPM2) { + if (!tpm_chip_start(chip)) { + tpm2_shutdown(chip, TPM2_SU_CLEAR); + tpm_chip_stop(chip); + } + } chip->ops = NULL; up_write(&chip->ops_sem); } @@ -451,7 +565,11 @@ int tpm_chip_register(struct tpm_chip *chip) { int rc; + rc = tpm_chip_start(chip); + if (rc) + return rc; rc = tpm_auto_startup(chip); + tpm_chip_stop(chip); if (rc) return rc; diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c index 5eecad233ea1..8856cce5a23b 100644 --- a/drivers/char/tpm/tpm-dev-common.c +++ b/drivers/char/tpm/tpm-dev-common.c @@ -27,7 +27,38 @@ static struct workqueue_struct *tpm_dev_wq; static DEFINE_MUTEX(tpm_dev_wq_lock); -static void tpm_async_work(struct work_struct *work) +static ssize_t tpm_dev_transmit(struct tpm_chip *chip, struct tpm_space *space, + u8 *buf, size_t bufsiz) +{ + struct tpm_header *header = (void *)buf; + ssize_t ret, len; + + ret = tpm2_prepare_space(chip, space, buf, bufsiz); + /* If the command is not implemented by the TPM, synthesize a + * response with a TPM2_RC_COMMAND_CODE return for user-space. + */ + if (ret == -EOPNOTSUPP) { + header->length = cpu_to_be32(sizeof(*header)); + header->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS); + header->return_code = cpu_to_be32(TPM2_RC_COMMAND_CODE | + TSS2_RESMGR_TPM_RC_LAYER); + ret = sizeof(*header); + } + if (ret) + goto out_rc; + + len = tpm_transmit(chip, buf, bufsiz); + if (len < 0) + ret = len; + + if (!ret) + ret = tpm2_commit_space(chip, space, buf, &len); + +out_rc: + return ret ? ret : len; +} + +static void tpm_dev_async_work(struct work_struct *work) { struct file_priv *priv = container_of(work, struct file_priv, async_work); @@ -35,9 +66,8 @@ static void tpm_async_work(struct work_struct *work) mutex_lock(&priv->buffer_mutex); priv->command_enqueued = false; - ret = tpm_transmit(priv->chip, priv->space, priv->data_buffer, - sizeof(priv->data_buffer), 0); - + ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer, + sizeof(priv->data_buffer)); tpm_put_ops(priv->chip); if (ret > 0) { priv->response_length = ret; @@ -80,7 +110,7 @@ void tpm_common_open(struct file *file, struct tpm_chip *chip, mutex_init(&priv->buffer_mutex); timer_setup(&priv->user_read_timer, user_reader_timeout, 0); INIT_WORK(&priv->timeout_work, tpm_timeout_work); - INIT_WORK(&priv->async_work, tpm_async_work); + INIT_WORK(&priv->async_work, tpm_dev_async_work); init_waitqueue_head(&priv->async_wait); file->private_data = priv; } @@ -183,8 +213,8 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf, return size; } - ret = tpm_transmit(priv->chip, priv->space, priv->data_buffer, - sizeof(priv->data_buffer), 0); + ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer, + sizeof(priv->data_buffer)); tpm_put_ops(priv->chip); if (ret > 0) { diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index d9439f9abe78..83ece5639f86 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c @@ -62,137 +62,22 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal) } EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration); -static int tpm_validate_command(struct tpm_chip *chip, - struct tpm_space *space, - const u8 *cmd, - size_t len) -{ - const struct tpm_input_header *header = (const void *)cmd; - int i; - u32 cc; - u32 attrs; - unsigned int nr_handles; - - if (len < TPM_HEADER_SIZE) - return -EINVAL; - - if (!space) - return 0; - - if (chip->flags & TPM_CHIP_FLAG_TPM2 && chip->nr_commands) { - cc = be32_to_cpu(header->ordinal); - - i = tpm2_find_cc(chip, cc); - if (i < 0) { - dev_dbg(&chip->dev, "0x%04X is an invalid command\n", - cc); - return -EOPNOTSUPP; - } - - attrs = chip->cc_attrs_tbl[i]; - nr_handles = - 4 * ((attrs >> TPM2_CC_ATTR_CHANDLES) & GENMASK(2, 0)); - if (len < TPM_HEADER_SIZE + 4 * nr_handles) - goto err_len; - } - - return 0; -err_len: - dev_dbg(&chip->dev, - "%s: insufficient command length %zu", __func__, len); - return -EINVAL; -} - -static int tpm_request_locality(struct tpm_chip *chip, unsigned int flags) +static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz) { - int rc; - - if (flags & TPM_TRANSMIT_NESTED) - return 0; - - if (!chip->ops->request_locality) - return 0; - - rc = chip->ops->request_locality(chip, 0); - if (rc < 0) - return rc; - - chip->locality = rc; - - return 0; -} - -static void tpm_relinquish_locality(struct tpm_chip *chip, unsigned int flags) -{ - int rc; - - if (flags & TPM_TRANSMIT_NESTED) - return; - - if (!chip->ops->relinquish_locality) - return; - - rc = chip->ops->relinquish_locality(chip, chip->locality); - if (rc) - dev_err(&chip->dev, "%s: : error %d\n", __func__, rc); - - chip->locality = -1; -} - -static int tpm_cmd_ready(struct tpm_chip *chip, unsigned int flags) -{ - if (flags & TPM_TRANSMIT_NESTED) - return 0; - - if (!chip->ops->cmd_ready) - return 0; - - return chip->ops->cmd_ready(chip); -} - -static int tpm_go_idle(struct tpm_chip *chip, unsigned int flags) -{ - if (flags & TPM_TRANSMIT_NESTED) - return 0; - - if (!chip->ops->go_idle) - return 0; - - return chip->ops->go_idle(chip); -} - -static ssize_t tpm_try_transmit(struct tpm_chip *chip, - struct tpm_space *space, - u8 *buf, size_t bufsiz, - unsigned int flags) -{ - struct tpm_output_header *header = (void *)buf; + struct tpm_header *header = buf; int rc; ssize_t len = 0; u32 count, ordinal; unsigned long stop; - bool need_locality; - rc = tpm_validate_command(chip, space, buf, bufsiz); - if (rc == -EINVAL) - return rc; - /* - * If the command is not implemented by the TPM, synthesize a - * response with a TPM2_RC_COMMAND_CODE return for user-space. - */ - if (rc == -EOPNOTSUPP) { - header->length = cpu_to_be32(sizeof(*header)); - header->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS); - header->return_code = cpu_to_be32(TPM2_RC_COMMAND_CODE | - TSS2_RESMGR_TPM_RC_LAYER); - return sizeof(*header); - } + if (bufsiz < TPM_HEADER_SIZE) + return -EINVAL; if (bufsiz > TPM_BUFSIZE) bufsiz = TPM_BUFSIZE; - count = be32_to_cpu(*((__be32 *) (buf + 2))); - ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); + count = be32_to_cpu(header->length); + ordinal = be32_to_cpu(header->ordinal); if (count == 0) return -ENODATA; if (count > bufsiz) { @@ -201,37 +86,21 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, return -E2BIG; } - if (!(flags & TPM_TRANSMIT_UNLOCKED) && !(flags & TPM_TRANSMIT_NESTED)) - mutex_lock(&chip->tpm_mutex); - - if (chip->ops->clk_enable != NULL) - chip->ops->clk_enable(chip, true); - - /* Store the decision as chip->locality will be changed. */ - need_locality = chip->locality == -1; - - if (need_locality) { - rc = tpm_request_locality(chip, flags); - if (rc < 0) { - need_locality = false; - goto out_locality; - } - } - - rc = tpm_cmd_ready(chip, flags); - if (rc) - goto out_locality; - - rc = tpm2_prepare_space(chip, space, ordinal, buf); - if (rc) - goto out; - rc = chip->ops->send(chip, buf, count); if (rc < 0) { if (rc != -EPIPE) dev_err(&chip->dev, - "%s: tpm_send: error %d\n", __func__, rc); - goto out; + "%s: send(): error %d\n", __func__, rc); + return rc; + } + + /* A sanity check. send() should just return zero on success e.g. + * not the command length. + */ + if (rc > 0) { + dev_warn(&chip->dev, + "%s: send(): invalid value %d\n", __func__, rc); + rc = 0; } if (chip->flags & TPM_CHIP_FLAG_IRQ) @@ -246,8 +115,7 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, if (chip->ops->req_canceled(chip, status)) { dev_err(&chip->dev, "Operation Canceled\n"); - rc = -ECANCELED; - goto out; + return -ECANCELED; } tpm_msleep(TPM_TIMEOUT_POLL); @@ -256,77 +124,45 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, chip->ops->cancel(chip); dev_err(&chip->dev, "Operation Timed out\n"); - rc = -ETIME; - goto out; + return -ETIME; out_recv: len = chip->ops->recv(chip, buf, bufsiz); if (len < 0) { rc = len; - dev_err(&chip->dev, - "tpm_transmit: tpm_recv: error %d\n", rc); - goto out; - } else if (len < TPM_HEADER_SIZE) { + dev_err(&chip->dev, "tpm_transmit: tpm_recv: error %d\n", rc); + } else if (len < TPM_HEADER_SIZE || len != be32_to_cpu(header->length)) rc = -EFAULT; - goto out; - } - if (len != be32_to_cpu(header->length)) { - rc = -EFAULT; - goto out; - } - - rc = tpm2_commit_space(chip, space, ordinal, buf, &len); - if (rc) - dev_err(&chip->dev, "tpm2_commit_space: error %d\n", rc); - -out: - /* may fail but do not override previous error value in rc */ - tpm_go_idle(chip, flags); - -out_locality: - if (need_locality) - tpm_relinquish_locality(chip, flags); - - if (chip->ops->clk_enable != NULL) - chip->ops->clk_enable(chip, false); - - if (!(flags & TPM_TRANSMIT_UNLOCKED) && !(flags & TPM_TRANSMIT_NESTED)) - mutex_unlock(&chip->tpm_mutex); return rc ? rc : len; } /** * tpm_transmit - Internal kernel interface to transmit TPM commands. + * @chip: a TPM chip to use + * @buf: a TPM command buffer + * @bufsiz: length of the TPM command buffer * - * @chip: TPM chip to use - * @space: tpm space - * @buf: TPM command buffer - * @bufsiz: length of the TPM command buffer - * @flags: tpm transmit flags - bitmap - * - * A wrapper around tpm_try_transmit that handles TPM2_RC_RETRY - * returns from the TPM and retransmits the command after a delay up - * to a maximum wait of TPM2_DURATION_LONG. + * A wrapper around tpm_try_transmit() that handles TPM2_RC_RETRY returns from + * the TPM and retransmits the command after a delay up to a maximum wait of + * TPM2_DURATION_LONG. * - * Note: TPM1 never returns TPM2_RC_RETRY so the retry logic is TPM2 - * only + * Note that TPM 1.x never returns TPM2_RC_RETRY so the retry logic is TPM 2.0 + * only. * * Return: - * the length of the return when the operation is successful. - * A negative number for system errors (errno). + * * The response length - OK + * * -errno - A system error */ -ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space, - u8 *buf, size_t bufsiz, unsigned int flags) +ssize_t tpm_transmit(struct tpm_chip *chip, u8 *buf, size_t bufsiz) { - struct tpm_output_header *header = (struct tpm_output_header *)buf; + struct tpm_header *header = (struct tpm_header *)buf; /* space for header and handles */ u8 save[TPM_HEADER_SIZE + 3*sizeof(u32)]; unsigned int delay_msec = TPM2_DURATION_SHORT; u32 rc = 0; ssize_t ret; - const size_t save_size = min(space ? sizeof(save) : TPM_HEADER_SIZE, - bufsiz); + const size_t save_size = min(sizeof(save), bufsiz); /* the command code is where the return code will be */ u32 cc = be32_to_cpu(header->return_code); @@ -338,7 +174,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space, memcpy(save, buf, save_size); for (;;) { - ret = tpm_try_transmit(chip, space, buf, bufsiz, flags); + ret = tpm_try_transmit(chip, buf, bufsiz); if (ret < 0) break; rc = be32_to_cpu(header->return_code); @@ -365,39 +201,33 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space, } return ret; } + /** * tpm_transmit_cmd - send a tpm command to the device - * The function extracts tpm out header return code - * - * @chip: TPM chip to use - * @space: tpm space - * @buf: TPM command buffer - * @bufsiz: length of the buffer - * @min_rsp_body_length: minimum expected length of response body - * @flags: tpm transmit flags - bitmap - * @desc: command description used in the error message + * @chip: a TPM chip to use + * @buf: a TPM command buffer + * @min_rsp_body_length: minimum expected length of response body + * @desc: command description used in the error message * * Return: - * 0 when the operation is successful. - * A negative number for system errors (errno). - * A positive number for a TPM error. + * * 0 - OK + * * -errno - A system error + * * TPM_RC - A TPM error */ -ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_space *space, - void *buf, size_t bufsiz, - size_t min_rsp_body_length, unsigned int flags, - const char *desc) +ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_buf *buf, + size_t min_rsp_body_length, const char *desc) { - const struct tpm_output_header *header = buf; + const struct tpm_header *header = (struct tpm_header *)buf->data; int err; ssize_t len; - len = tpm_transmit(chip, space, buf, bufsiz, flags); + len = tpm_transmit(chip, buf->data, PAGE_SIZE); if (len < 0) return len; err = be32_to_cpu(header->return_code); if (err != 0 && err != TPM_ERR_DISABLED && err != TPM_ERR_DEACTIVATED - && desc) + && err != TPM2_RC_TESTING && desc) dev_err(&chip->dev, "A TPM error (%d) occurred %s\n", err, desc); if (err) @@ -451,11 +281,12 @@ EXPORT_SYMBOL_GPL(tpm_is_tpm2); * tpm_pcr_read - read a PCR value from SHA1 bank * @chip: a &struct tpm_chip instance, %NULL for the default chip * @pcr_idx: the PCR to be retrieved - * @res_buf: the value of the PCR + * @digest: the PCR bank and buffer current PCR value is written to * * Return: same as with tpm_transmit_cmd() */ -int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf) +int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx, + struct tpm_digest *digest) { int rc; @@ -464,9 +295,9 @@ int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf) return -ENODEV; if (chip->flags & TPM_CHIP_FLAG_TPM2) - rc = tpm2_pcr_read(chip, pcr_idx, res_buf); + rc = tpm2_pcr_read(chip, pcr_idx, digest, NULL); else - rc = tpm1_pcr_read(chip, pcr_idx, res_buf); + rc = tpm1_pcr_read(chip, pcr_idx, digest->digest); tpm_put_ops(chip); return rc; @@ -477,41 +308,34 @@ EXPORT_SYMBOL_GPL(tpm_pcr_read); * tpm_pcr_extend - extend a PCR value in SHA1 bank. * @chip: a &struct tpm_chip instance, %NULL for the default chip * @pcr_idx: the PCR to be retrieved - * @hash: the hash value used to extend the PCR value + * @digests: array of tpm_digest structures used to extend PCRs * - * Note: with TPM 2.0 extends also those banks with a known digest size to the - * cryto subsystem in order to prevent malicious use of those PCR banks. In the - * future we should dynamically determine digest sizes. + * Note: callers must pass a digest for every allocated PCR bank, in the same + * order of the banks in chip->allocated_banks. * * Return: same as with tpm_transmit_cmd() */ -int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, const u8 *hash) +int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, + struct tpm_digest *digests) { int rc; - struct tpm2_digest digest_list[ARRAY_SIZE(chip->active_banks)]; - u32 count = 0; int i; chip = tpm_find_get_ops(chip); if (!chip) return -ENODEV; - if (chip->flags & TPM_CHIP_FLAG_TPM2) { - memset(digest_list, 0, sizeof(digest_list)); - - for (i = 0; i < ARRAY_SIZE(chip->active_banks) && - chip->active_banks[i] != TPM2_ALG_ERROR; i++) { - digest_list[i].alg_id = chip->active_banks[i]; - memcpy(digest_list[i].digest, hash, TPM_DIGEST_SIZE); - count++; - } + for (i = 0; i < chip->nr_allocated_banks; i++) + if (digests[i].alg_id != chip->allocated_banks[i].alg_id) + return -EINVAL; - rc = tpm2_pcr_extend(chip, pcr_idx, count, digest_list); + if (chip->flags & TPM_CHIP_FLAG_TPM2) { + rc = tpm2_pcr_extend(chip, pcr_idx, digests); tpm_put_ops(chip); return rc; } - rc = tpm1_pcr_extend(chip, pcr_idx, hash, + rc = tpm1_pcr_extend(chip, pcr_idx, digests[0].digest, "attempting extend a PCR value"); tpm_put_ops(chip); return rc; @@ -528,14 +352,21 @@ EXPORT_SYMBOL_GPL(tpm_pcr_extend); */ int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen) { + struct tpm_buf buf; int rc; chip = tpm_find_get_ops(chip); if (!chip) return -ENODEV; - rc = tpm_transmit_cmd(chip, NULL, cmd, buflen, 0, 0, - "attempting to a send a command"); + rc = tpm_buf_init(&buf, 0, 0); + if (rc) + goto out; + + memcpy(buf.data, cmd, buflen); + rc = tpm_transmit_cmd(chip, &buf, 0, "attempting to a send a command"); + tpm_buf_destroy(&buf); +out: tpm_put_ops(chip); return rc; } @@ -571,10 +402,16 @@ int tpm_pm_suspend(struct device *dev) if (chip->flags & TPM_CHIP_FLAG_ALWAYS_POWERED) return 0; - if (chip->flags & TPM_CHIP_FLAG_TPM2) - tpm2_shutdown(chip, TPM2_SU_STATE); - else + if (chip->flags & TPM_CHIP_FLAG_TPM2) { + mutex_lock(&chip->tpm_mutex); + if (!tpm_chip_start(chip)) { + tpm2_shutdown(chip, TPM2_SU_STATE); + tpm_chip_stop(chip); + } + mutex_unlock(&chip->tpm_mutex); + } else { rc = tpm1_pm_suspend(chip, tpm_suspend_pcr); + } return rc; } diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c index b88e08ec2c59..533a260d744e 100644 --- a/drivers/char/tpm/tpm-sysfs.c +++ b/drivers/char/tpm/tpm-sysfs.c @@ -39,7 +39,6 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr, { struct tpm_buf tpm_buf; struct tpm_readpubek_out *out; - ssize_t rc; int i; char *str = buf; struct tpm_chip *chip = to_tpm_chip(dev); @@ -47,19 +46,17 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr, memset(&anti_replay, 0, sizeof(anti_replay)); - rc = tpm_buf_init(&tpm_buf, TPM_TAG_RQU_COMMAND, TPM_ORD_READPUBEK); - if (rc) - return rc; + if (tpm_try_get_ops(chip)) + return 0; + + if (tpm_buf_init(&tpm_buf, TPM_TAG_RQU_COMMAND, TPM_ORD_READPUBEK)) + goto out_ops; tpm_buf_append(&tpm_buf, anti_replay, sizeof(anti_replay)); - rc = tpm_transmit_cmd(chip, NULL, tpm_buf.data, PAGE_SIZE, - READ_PUBEK_RESULT_MIN_BODY_SIZE, 0, - "attempting to read the PUBEK"); - if (rc) { - tpm_buf_destroy(&tpm_buf); - return 0; - } + if (tpm_transmit_cmd(chip, &tpm_buf, READ_PUBEK_RESULT_MIN_BODY_SIZE, + "attempting to read the PUBEK")) + goto out_buf; out = (struct tpm_readpubek_out *)&tpm_buf.data[10]; str += @@ -90,9 +87,11 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr, str += sprintf(str, "\n"); } - rc = str - buf; +out_buf: tpm_buf_destroy(&tpm_buf); - return rc; +out_ops: + tpm_put_ops(chip); + return str - buf; } static DEVICE_ATTR_RO(pubek); @@ -101,27 +100,32 @@ static ssize_t pcrs_show(struct device *dev, struct device_attribute *attr, { cap_t cap; u8 digest[TPM_DIGEST_SIZE]; - ssize_t rc; u32 i, j, num_pcrs; char *str = buf; struct tpm_chip *chip = to_tpm_chip(dev); - rc = tpm1_getcap(chip, TPM_CAP_PROP_PCR, &cap, - "attempting to determine the number of PCRS", - sizeof(cap.num_pcrs)); - if (rc) + if (tpm_try_get_ops(chip)) return 0; + if (tpm1_getcap(chip, TPM_CAP_PROP_PCR, &cap, + "attempting to determine the number of PCRS", + sizeof(cap.num_pcrs))) { + tpm_put_ops(chip); + return 0; + } + num_pcrs = be32_to_cpu(cap.num_pcrs); for (i = 0; i < num_pcrs; i++) { - rc = tpm1_pcr_read(chip, i, digest); - if (rc) + if (tpm1_pcr_read(chip, i, digest)) { + str = buf; break; + } str += sprintf(str, "PCR-%02d: ", i); for (j = 0; j < TPM_DIGEST_SIZE; j++) str += sprintf(str, "%02X ", digest[j]); str += sprintf(str, "\n"); } + tpm_put_ops(chip); return str - buf; } static DEVICE_ATTR_RO(pcrs); @@ -129,16 +133,21 @@ static DEVICE_ATTR_RO(pcrs); static ssize_t enabled_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct tpm_chip *chip = to_tpm_chip(dev); + ssize_t rc = 0; cap_t cap; - ssize_t rc; - rc = tpm1_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_PERM, &cap, - "attempting to determine the permanent enabled state", - sizeof(cap.perm_flags)); - if (rc) + if (tpm_try_get_ops(chip)) return 0; + if (tpm1_getcap(chip, TPM_CAP_FLAG_PERM, &cap, + "attempting to determine the permanent enabled state", + sizeof(cap.perm_flags))) + goto out_ops; + rc = sprintf(buf, "%d\n", !cap.perm_flags.disable); +out_ops: + tpm_put_ops(chip); return rc; } static DEVICE_ATTR_RO(enabled); @@ -146,16 +155,21 @@ static DEVICE_ATTR_RO(enabled); static ssize_t active_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct tpm_chip *chip = to_tpm_chip(dev); + ssize_t rc = 0; cap_t cap; - ssize_t rc; - rc = tpm1_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_PERM, &cap, - "attempting to determine the permanent active state", - sizeof(cap.perm_flags)); - if (rc) + if (tpm_try_get_ops(chip)) return 0; + if (tpm1_getcap(chip, TPM_CAP_FLAG_PERM, &cap, + "attempting to determine the permanent active state", + sizeof(cap.perm_flags))) + goto out_ops; + rc = sprintf(buf, "%d\n", !cap.perm_flags.deactivated); +out_ops: + tpm_put_ops(chip); return rc; } static DEVICE_ATTR_RO(active); @@ -163,16 +177,21 @@ static DEVICE_ATTR_RO(active); static ssize_t owned_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct tpm_chip *chip = to_tpm_chip(dev); + ssize_t rc = 0; cap_t cap; - ssize_t rc; - rc = tpm1_getcap(to_tpm_chip(dev), TPM_CAP_PROP_OWNER, &cap, - "attempting to determine the owner state", - sizeof(cap.owned)); - if (rc) + if (tpm_try_get_ops(chip)) return 0; + if (tpm1_getcap(to_tpm_chip(dev), TPM_CAP_PROP_OWNER, &cap, + "attempting to determine the owner state", + sizeof(cap.owned))) + goto out_ops; + rc = sprintf(buf, "%d\n", cap.owned); +out_ops: + tpm_put_ops(chip); return rc; } static DEVICE_ATTR_RO(owned); @@ -180,16 +199,21 @@ static DEVICE_ATTR_RO(owned); static ssize_t temp_deactivated_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct tpm_chip *chip = to_tpm_chip(dev); + ssize_t rc = 0; cap_t cap; - ssize_t rc; - rc = tpm1_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_VOL, &cap, - "attempting to determine the temporary state", - sizeof(cap.stclear_flags)); - if (rc) + if (tpm_try_get_ops(chip)) return 0; + if (tpm1_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_VOL, &cap, + "attempting to determine the temporary state", + sizeof(cap.stclear_flags))) + goto out_ops; + rc = sprintf(buf, "%d\n", cap.stclear_flags.deactivated); +out_ops: + tpm_put_ops(chip); return rc; } static DEVICE_ATTR_RO(temp_deactivated); @@ -198,15 +222,18 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tpm_chip *chip = to_tpm_chip(dev); - cap_t cap; - ssize_t rc; + ssize_t rc = 0; char *str = buf; + cap_t cap; - rc = tpm1_getcap(chip, TPM_CAP_PROP_MANUFACTURER, &cap, - "attempting to determine the manufacturer", - sizeof(cap.manufacturer_id)); - if (rc) + if (tpm_try_get_ops(chip)) return 0; + + if (tpm1_getcap(chip, TPM_CAP_PROP_MANUFACTURER, &cap, + "attempting to determine the manufacturer", + sizeof(cap.manufacturer_id))) + goto out_ops; + str += sprintf(str, "Manufacturer: 0x%x\n", be32_to_cpu(cap.manufacturer_id)); @@ -223,11 +250,10 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr, cap.tpm_version_1_2.revMinor); } else { /* Otherwise just use TPM_STRUCT_VER */ - rc = tpm1_getcap(chip, TPM_CAP_VERSION_1_1, &cap, - "attempting to determine the 1.1 version", - sizeof(cap.tpm_version)); - if (rc) - return 0; + if (tpm1_getcap(chip, TPM_CAP_VERSION_1_1, &cap, + "attempting to determine the 1.1 version", + sizeof(cap.tpm_version))) + goto out_ops; str += sprintf(str, "TCG version: %d.%d\nFirmware version: %d.%d\n", cap.tpm_version.Major, @@ -235,8 +261,10 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr, cap.tpm_version.revMajor, cap.tpm_version.revMinor); } - - return str - buf; + rc = str - buf; +out_ops: + tpm_put_ops(chip); + return rc; } static DEVICE_ATTR_RO(caps); @@ -244,10 +272,12 @@ static ssize_t cancel_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct tpm_chip *chip = to_tpm_chip(dev); - if (chip == NULL) + + if (tpm_try_get_ops(chip)) return 0; chip->ops->cancel(chip); + tpm_put_ops(chip); return count; } static DEVICE_ATTR_WO(cancel); diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index f27d1f38a93d..2cce072f25b5 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -25,30 +25,22 @@ #include #include -#include -#include #include #include #include #include #include -#include -#include #include #include -#include #ifdef CONFIG_X86 #include #endif -enum tpm_const { - TPM_MINOR = 224, /* officially assigned */ - TPM_BUFSIZE = 4096, - TPM_NUM_DEVICES = 65536, - TPM_RETRY = 50, /* 5 seconds */ - TPM_NUM_EVENT_LOG_FILES = 3, -}; +#define TPM_MINOR 224 /* officially assigned */ +#define TPM_BUFSIZE 4096 +#define TPM_NUM_DEVICES 65536 +#define TPM_RETRY 50 enum tpm_timeout { TPM_TIMEOUT = 5, /* msecs */ @@ -65,16 +57,6 @@ enum tpm_addr { TPM_ADDR = 0x4E, }; -/* Indexes the duration array */ -enum tpm_duration { - TPM_SHORT = 0, - TPM_MEDIUM = 1, - TPM_LONG = 2, - TPM_LONG_LONG = 3, - TPM_UNDEFINED, - TPM_NUM_DURATIONS = TPM_UNDEFINED, -}; - #define TPM_WARN_RETRY 0x800 #define TPM_WARN_DOING_SELFTEST 0x802 #define TPM_ERR_DEACTIVATED 0x6 @@ -122,17 +104,6 @@ enum tpm2_return_codes { TPM2_RC_RETRY = 0x0922, }; -enum tpm2_algorithms { - TPM2_ALG_ERROR = 0x0000, - TPM2_ALG_SHA1 = 0x0004, - TPM2_ALG_KEYEDHASH = 0x0008, - TPM2_ALG_SHA256 = 0x000B, - TPM2_ALG_SHA384 = 0x000C, - TPM2_ALG_SHA512 = 0x000D, - TPM2_ALG_NULL = 0x0010, - TPM2_ALG_SM3_256 = 0x0012, -}; - enum tpm2_command_codes { TPM2_CC_FIRST = 0x011F, TPM2_CC_HIERARCHY_CONTROL = 0x0121, @@ -190,15 +161,6 @@ enum tpm2_cc_attrs { #define TPM_VID_WINBOND 0x1050 #define TPM_VID_STM 0x104A -#define TPM_PPI_VERSION_LEN 3 - -struct tpm_space { - u32 context_tbl[3]; - u8 *context_buf; - u32 session_tbl[3]; - u8 *session_buf; -}; - enum tpm_chip_flags { TPM_CHIP_FLAG_TPM2 = BIT(1), TPM_CHIP_FLAG_IRQ = BIT(2), @@ -207,82 +169,15 @@ enum tpm_chip_flags { TPM_CHIP_FLAG_ALWAYS_POWERED = BIT(5), }; -struct tpm_bios_log { - void *bios_event_log; - void *bios_event_log_end; -}; - -struct tpm_chip_seqops { - struct tpm_chip *chip; - const struct seq_operations *seqops; -}; - -struct tpm_chip { - struct device dev; - struct device devs; - struct cdev cdev; - struct cdev cdevs; - - /* A driver callback under ops cannot be run unless ops_sem is held - * (sometimes implicitly, eg for the sysfs code). ops becomes null - * when the driver is unregistered, see tpm_try_get_ops. - */ - struct rw_semaphore ops_sem; - const struct tpm_class_ops *ops; - - struct tpm_bios_log log; - struct tpm_chip_seqops bin_log_seqops; - struct tpm_chip_seqops ascii_log_seqops; - - unsigned int flags; - - int dev_num; /* /dev/tpm# */ - unsigned long is_open; /* only one allowed */ - - char hwrng_name[64]; - struct hwrng hwrng; - - struct mutex tpm_mutex; /* tpm is processing */ - - unsigned long timeout_a; /* jiffies */ - unsigned long timeout_b; /* jiffies */ - unsigned long timeout_c; /* jiffies */ - unsigned long timeout_d; /* jiffies */ - bool timeout_adjusted; - unsigned long duration[TPM_NUM_DURATIONS]; /* jiffies */ - bool duration_adjusted; - - struct dentry *bios_dir[TPM_NUM_EVENT_LOG_FILES]; - - const struct attribute_group *groups[3]; - unsigned int groups_cnt; - - u16 active_banks[7]; -#ifdef CONFIG_ACPI - acpi_handle acpi_dev_handle; - char ppi_version[TPM_PPI_VERSION_LEN + 1]; -#endif /* CONFIG_ACPI */ - - struct tpm_space work_space; - u32 nr_commands; - u32 *cc_attrs_tbl; - - /* active locality */ - int locality; -}; - #define to_tpm_chip(d) container_of(d, struct tpm_chip, dev) -struct tpm_input_header { - __be16 tag; - __be32 length; - __be32 ordinal; -} __packed; - -struct tpm_output_header { - __be16 tag; - __be32 length; - __be32 return_code; +struct tpm_header { + __be16 tag; + __be32 length; + union { + __be32 ordinal; + __be32 return_code; + }; } __packed; #define TPM_TAG_RQU_COMMAND 193 @@ -401,8 +296,8 @@ struct tpm_buf { static inline void tpm_buf_reset(struct tpm_buf *buf, u16 tag, u32 ordinal) { - struct tpm_input_header *head; - head = (struct tpm_input_header *)buf->data; + struct tpm_header *head = (struct tpm_header *)buf->data; + head->tag = cpu_to_be16(tag); head->length = cpu_to_be32(sizeof(*head)); head->ordinal = cpu_to_be32(ordinal); @@ -428,14 +323,14 @@ static inline void tpm_buf_destroy(struct tpm_buf *buf) static inline u32 tpm_buf_length(struct tpm_buf *buf) { - struct tpm_input_header *head = (struct tpm_input_header *) buf->data; + struct tpm_header *head = (struct tpm_header *)buf->data; return be32_to_cpu(head->length); } static inline u16 tpm_buf_tag(struct tpm_buf *buf) { - struct tpm_input_header *head = (struct tpm_input_header *) buf->data; + struct tpm_header *head = (struct tpm_header *)buf->data; return be16_to_cpu(head->tag); } @@ -444,7 +339,7 @@ static inline void tpm_buf_append(struct tpm_buf *buf, const unsigned char *new_data, unsigned int new_len) { - struct tpm_input_header *head = (struct tpm_input_header *) buf->data; + struct tpm_header *head = (struct tpm_header *)buf->data; u32 len = tpm_buf_length(buf); /* Return silently if overflow has already happened. */ @@ -487,25 +382,9 @@ extern const struct file_operations tpm_fops; extern const struct file_operations tpmrm_fops; extern struct idr dev_nums_idr; -/** - * enum tpm_transmit_flags - flags for tpm_transmit() - * - * @TPM_TRANSMIT_UNLOCKED: do not lock the chip - * @TPM_TRANSMIT_NESTED: discard setup steps (power management, - * locality) including locking (i.e. implicit - * UNLOCKED) - */ -enum tpm_transmit_flags { - TPM_TRANSMIT_UNLOCKED = BIT(0), - TPM_TRANSMIT_NESTED = BIT(1), -}; - -ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space, - u8 *buf, size_t bufsiz, unsigned int flags); -ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_space *space, - void *buf, size_t bufsiz, - size_t min_rsp_body_length, unsigned int flags, - const char *desc); +ssize_t tpm_transmit(struct tpm_chip *chip, u8 *buf, size_t bufsiz); +ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_buf *buf, + size_t min_rsp_body_length, const char *desc); int tpm_get_timeouts(struct tpm_chip *); int tpm_auto_startup(struct tpm_chip *chip); @@ -530,6 +409,8 @@ static inline void tpm_msleep(unsigned int delay_msec) delay_msec * 1000); }; +int tpm_chip_start(struct tpm_chip *chip); +void tpm_chip_stop(struct tpm_chip *chip); struct tpm_chip *tpm_find_get_ops(struct tpm_chip *chip); __must_check int tpm_try_get_ops(struct tpm_chip *chip); void tpm_put_ops(struct tpm_chip *chip); @@ -558,12 +439,12 @@ static inline u32 tpm2_rc_value(u32 rc) } int tpm2_get_timeouts(struct tpm_chip *chip); -int tpm2_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf); -int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, u32 count, - struct tpm2_digest *digests); +int tpm2_pcr_read(struct tpm_chip *chip, u32 pcr_idx, + struct tpm_digest *digest, u16 *digest_size_ptr); +int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, + struct tpm_digest *digests); int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max); -void tpm2_flush_context_cmd(struct tpm_chip *chip, u32 handle, - unsigned int flags); +void tpm2_flush_context(struct tpm_chip *chip, u32 handle); int tpm2_seal_trusted(struct tpm_chip *chip, struct trusted_key_payload *payload, struct trusted_key_options *options); @@ -580,10 +461,11 @@ int tpm2_probe(struct tpm_chip *chip); int tpm2_find_cc(struct tpm_chip *chip, u32 cc); int tpm2_init_space(struct tpm_space *space); void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space); -int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u32 cc, - u8 *cmd); -int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, - u32 cc, u8 *buf, size_t *bufsiz); +void tpm2_flush_space(struct tpm_chip *chip); +int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd, + size_t cmdsiz); +int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, void *buf, + size_t *bufsiz); int tpm_bios_log_setup(struct tpm_chip *chip); void tpm_bios_log_teardown(struct tpm_chip *chip); diff --git a/drivers/char/tpm/tpm1-cmd.c b/drivers/char/tpm/tpm1-cmd.c index 6f306338953b..85dcf2654d11 100644 --- a/drivers/char/tpm/tpm1-cmd.c +++ b/drivers/char/tpm/tpm1-cmd.c @@ -334,11 +334,8 @@ static int tpm1_startup(struct tpm_chip *chip) tpm_buf_append_u16(&buf, TPM_ST_CLEAR); - rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, 0, - "attempting to start the TPM"); - + rc = tpm_transmit_cmd(chip, &buf, 0, "attempting to start the TPM"); tpm_buf_destroy(&buf); - return rc; } @@ -380,8 +377,7 @@ int tpm1_get_timeouts(struct tpm_chip *chip) * of misreporting. */ if (chip->ops->update_timeouts) - chip->timeout_adjusted = - chip->ops->update_timeouts(chip, timeout_eff); + chip->ops->update_timeouts(chip, timeout_eff); if (!chip->timeout_adjusted) { /* Restore default if chip reported 0 */ @@ -462,9 +458,7 @@ int tpm1_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, const u8 *hash, tpm_buf_append_u32(&buf, pcr_idx); tpm_buf_append(&buf, hash, TPM_DIGEST_SIZE); - rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, - TPM_DIGEST_SIZE, 0, log_msg); - + rc = tpm_transmit_cmd(chip, &buf, TPM_DIGEST_SIZE, log_msg); tpm_buf_destroy(&buf); return rc; } @@ -494,11 +488,9 @@ ssize_t tpm1_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap, tpm_buf_append_u32(&buf, 4); tpm_buf_append_u32(&buf, subcap_id); } - rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, - min_cap_length, 0, desc); + rc = tpm_transmit_cmd(chip, &buf, min_cap_length, desc); if (!rc) *cap = *(cap_t *)&buf.data[TPM_HEADER_SIZE + 4]; - tpm_buf_destroy(&buf); return rc; } @@ -537,8 +529,7 @@ int tpm1_get_random(struct tpm_chip *chip, u8 *dest, size_t max) do { tpm_buf_append_u32(&buf, num_bytes); - rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, - sizeof(out->rng_data_len), 0, + rc = tpm_transmit_cmd(chip, &buf, sizeof(out->rng_data_len), "attempting get random"); if (rc) goto out; @@ -583,8 +574,7 @@ int tpm1_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf) tpm_buf_append_u32(&buf, pcr_idx); - rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, - TPM_DIGEST_SIZE, 0, + rc = tpm_transmit_cmd(chip, &buf, TPM_DIGEST_SIZE, "attempting to read a pcr value"); if (rc) goto out; @@ -618,11 +608,8 @@ static int tpm1_continue_selftest(struct tpm_chip *chip) if (rc) return rc; - rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, - 0, 0, "continue selftest"); - + rc = tpm_transmit_cmd(chip, &buf, 0, "continue selftest"); tpm_buf_destroy(&buf); - return rc; } @@ -709,6 +696,18 @@ int tpm1_auto_startup(struct tpm_chip *chip) goto out; } + chip->allocated_banks = kcalloc(1, sizeof(*chip->allocated_banks), + GFP_KERNEL); + if (!chip->allocated_banks) { + rc = -ENOMEM; + goto out; + } + + chip->allocated_banks[0].alg_id = TPM_ALG_SHA1; + chip->allocated_banks[0].digest_size = hash_digest_size[HASH_ALGO_SHA1]; + chip->allocated_banks[0].crypto_id = HASH_ALGO_SHA1; + chip->nr_allocated_banks = 1; + return rc; out: if (rc > 0) @@ -747,9 +746,7 @@ int tpm1_pm_suspend(struct tpm_chip *chip, u32 tpm_suspend_pcr) return rc; /* now do the actual savestate */ for (try = 0; try < TPM_RETRY; try++) { - rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, - 0, 0, NULL); - + rc = tpm_transmit_cmd(chip, &buf, 0, NULL); /* * If the TPM indicates that it is too busy to respond to * this command then retry before giving up. It can take diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index a6bec13afa69..e74c5b7b64bf 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c @@ -33,11 +33,11 @@ struct tpm2_hash { }; static struct tpm2_hash tpm2_hash_map[] = { - {HASH_ALGO_SHA1, TPM2_ALG_SHA1}, - {HASH_ALGO_SHA256, TPM2_ALG_SHA256}, - {HASH_ALGO_SHA384, TPM2_ALG_SHA384}, - {HASH_ALGO_SHA512, TPM2_ALG_SHA512}, - {HASH_ALGO_SM3_256, TPM2_ALG_SM3_256}, + {HASH_ALGO_SHA1, TPM_ALG_SHA1}, + {HASH_ALGO_SHA256, TPM_ALG_SHA256}, + {HASH_ALGO_SHA384, TPM_ALG_SHA384}, + {HASH_ALGO_SHA512, TPM_ALG_SHA512}, + {HASH_ALGO_SM3_256, TPM_ALG_SM3_256}, }; int tpm2_get_timeouts(struct tpm_chip *chip) @@ -171,20 +171,36 @@ struct tpm2_pcr_read_out { * tpm2_pcr_read() - read a PCR value * @chip: TPM chip to use. * @pcr_idx: index of the PCR to read. - * @res_buf: buffer to store the resulting hash. + * @digest: PCR bank and buffer current PCR value is written to. + * @digest_size_ptr: pointer to variable that stores the digest size. * * Return: Same as with tpm_transmit_cmd. */ -int tpm2_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf) +int tpm2_pcr_read(struct tpm_chip *chip, u32 pcr_idx, + struct tpm_digest *digest, u16 *digest_size_ptr) { + int i; int rc; struct tpm_buf buf; struct tpm2_pcr_read_out *out; u8 pcr_select[TPM2_PCR_SELECT_MIN] = {0}; + u16 digest_size; + u16 expected_digest_size = 0; if (pcr_idx >= TPM2_PLATFORM_PCR) return -EINVAL; + if (!digest_size_ptr) { + for (i = 0; i < chip->nr_allocated_banks && + chip->allocated_banks[i].alg_id != digest->alg_id; i++) + ; + + if (i == chip->nr_allocated_banks) + return -EINVAL; + + expected_digest_size = chip->allocated_banks[i].digest_size; + } + rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_PCR_READ); if (rc) return rc; @@ -192,18 +208,28 @@ int tpm2_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf) pcr_select[pcr_idx >> 3] = 1 << (pcr_idx & 0x7); tpm_buf_append_u32(&buf, 1); - tpm_buf_append_u16(&buf, TPM2_ALG_SHA1); + tpm_buf_append_u16(&buf, digest->alg_id); tpm_buf_append_u8(&buf, TPM2_PCR_SELECT_MIN); tpm_buf_append(&buf, (const unsigned char *)pcr_select, sizeof(pcr_select)); - rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, 0, - res_buf ? "attempting to read a pcr value" : NULL); - if (rc == 0 && res_buf) { - out = (struct tpm2_pcr_read_out *)&buf.data[TPM_HEADER_SIZE]; - memcpy(res_buf, out->digest, SHA1_DIGEST_SIZE); + rc = tpm_transmit_cmd(chip, &buf, 0, "attempting to read a pcr value"); + if (rc) + goto out; + + out = (struct tpm2_pcr_read_out *)&buf.data[TPM_HEADER_SIZE]; + digest_size = be16_to_cpu(out->digest_size); + if (digest_size > sizeof(digest->digest) || + (!digest_size_ptr && digest_size != expected_digest_size)) { + rc = -EINVAL; + goto out; } + if (digest_size_ptr) + *digest_size_ptr = digest_size; + + memcpy(digest->digest, out->digest, digest_size); +out: tpm_buf_destroy(&buf); return rc; } @@ -220,22 +246,17 @@ struct tpm2_null_auth_area { * * @chip: TPM chip to use. * @pcr_idx: index of the PCR. - * @count: number of digests passed. * @digests: list of pcr banks and corresponding digest values to extend. * * Return: Same as with tpm_transmit_cmd. */ -int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, u32 count, - struct tpm2_digest *digests) +int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, + struct tpm_digest *digests) { struct tpm_buf buf; struct tpm2_null_auth_area auth_area; int rc; int i; - int j; - - if (count > ARRAY_SIZE(chip->active_banks)) - return -EINVAL; rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_PCR_EXTEND); if (rc) @@ -251,21 +272,15 @@ int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, u32 count, tpm_buf_append_u32(&buf, sizeof(struct tpm2_null_auth_area)); tpm_buf_append(&buf, (const unsigned char *)&auth_area, sizeof(auth_area)); - tpm_buf_append_u32(&buf, count); - - for (i = 0; i < count; i++) { - for (j = 0; j < ARRAY_SIZE(tpm2_hash_map); j++) { - if (digests[i].alg_id != tpm2_hash_map[j].tpm_id) - continue; - tpm_buf_append_u16(&buf, digests[i].alg_id); - tpm_buf_append(&buf, (const unsigned char - *)&digests[i].digest, - hash_digest_size[tpm2_hash_map[j].crypto_id]); - } + tpm_buf_append_u32(&buf, chip->nr_allocated_banks); + + for (i = 0; i < chip->nr_allocated_banks; i++) { + tpm_buf_append_u16(&buf, digests[i].alg_id); + tpm_buf_append(&buf, (const unsigned char *)&digests[i].digest, + chip->allocated_banks[i].digest_size); } - rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, 0, - "attempting extend a PCR value"); + rc = tpm_transmit_cmd(chip, &buf, 0, "attempting extend a PCR value"); tpm_buf_destroy(&buf); @@ -309,10 +324,10 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max) do { tpm_buf_reset(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_GET_RANDOM); tpm_buf_append_u16(&buf, num_bytes); - err = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, + err = tpm_transmit_cmd(chip, &buf, offsetof(struct tpm2_get_random_out, buffer), - 0, "attempting get random"); + "attempting get random"); if (err) goto out; @@ -341,14 +356,11 @@ out: } /** - * tpm2_flush_context_cmd() - execute a TPM2_FlushContext command + * tpm2_flush_context() - execute a TPM2_FlushContext command * @chip: TPM chip to use * @handle: context handle - * @flags: tpm transmit flags - bitmap - * */ -void tpm2_flush_context_cmd(struct tpm_chip *chip, u32 handle, - unsigned int flags) +void tpm2_flush_context(struct tpm_chip *chip, u32 handle) { struct tpm_buf buf; int rc; @@ -362,9 +374,7 @@ void tpm2_flush_context_cmd(struct tpm_chip *chip, u32 handle, tpm_buf_append_u32(&buf, handle); - (void) tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, flags, - "flushing context"); - + tpm_transmit_cmd(chip, &buf, 0, "flushing context"); tpm_buf_destroy(&buf); } @@ -449,7 +459,7 @@ int tpm2_seal_trusted(struct tpm_chip *chip, /* public */ tpm_buf_append_u16(&buf, 14 + options->policydigest_len); - tpm_buf_append_u16(&buf, TPM2_ALG_KEYEDHASH); + tpm_buf_append_u16(&buf, TPM_ALG_KEYEDHASH); tpm_buf_append_u16(&buf, hash); /* policy */ @@ -464,7 +474,7 @@ int tpm2_seal_trusted(struct tpm_chip *chip, } /* public parameters */ - tpm_buf_append_u16(&buf, TPM2_ALG_NULL); + tpm_buf_append_u16(&buf, TPM_ALG_NULL); tpm_buf_append_u16(&buf, 0); /* outside info */ @@ -478,8 +488,7 @@ int tpm2_seal_trusted(struct tpm_chip *chip, goto out; } - rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 4, 0, - "sealing data"); + rc = tpm_transmit_cmd(chip, &buf, 4, "sealing data"); if (rc) goto out; @@ -516,7 +525,6 @@ out: * @payload: the key data in clear and encrypted form * @options: authentication values and other options * @blob_handle: returned blob handle - * @flags: tpm transmit flags * * Return: 0 on success. * -E2BIG on wrong payload size. @@ -526,7 +534,7 @@ out: static int tpm2_load_cmd(struct tpm_chip *chip, struct trusted_key_payload *payload, struct trusted_key_options *options, - u32 *blob_handle, unsigned int flags) + u32 *blob_handle) { struct tpm_buf buf; unsigned int private_len; @@ -561,8 +569,7 @@ static int tpm2_load_cmd(struct tpm_chip *chip, goto out; } - rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 4, flags, - "loading blob"); + rc = tpm_transmit_cmd(chip, &buf, 4, "loading blob"); if (!rc) *blob_handle = be32_to_cpup( (__be32 *) &buf.data[TPM_HEADER_SIZE]); @@ -583,7 +590,6 @@ out: * @payload: the key data in clear and encrypted form * @options: authentication values and other options * @blob_handle: blob handle - * @flags: tpm_transmit_cmd flags * * Return: 0 on success * -EPERM on tpm error status @@ -592,7 +598,7 @@ out: static int tpm2_unseal_cmd(struct tpm_chip *chip, struct trusted_key_payload *payload, struct trusted_key_options *options, - u32 blob_handle, unsigned int flags) + u32 blob_handle) { struct tpm_buf buf; u16 data_len; @@ -612,8 +618,7 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip, options->blobauth /* hmac */, TPM_DIGEST_SIZE); - rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 6, flags, - "unsealing"); + rc = tpm_transmit_cmd(chip, &buf, 6, "unsealing"); if (rc > 0) rc = -EPERM; @@ -657,17 +662,12 @@ int tpm2_unseal_trusted(struct tpm_chip *chip, u32 blob_handle; int rc; - mutex_lock(&chip->tpm_mutex); - rc = tpm2_load_cmd(chip, payload, options, &blob_handle, - TPM_TRANSMIT_UNLOCKED); + rc = tpm2_load_cmd(chip, payload, options, &blob_handle); if (rc) - goto out; + return rc; - rc = tpm2_unseal_cmd(chip, payload, options, blob_handle, - TPM_TRANSMIT_UNLOCKED); - tpm2_flush_context_cmd(chip, blob_handle, TPM_TRANSMIT_UNLOCKED); -out: - mutex_unlock(&chip->tpm_mutex); + rc = tpm2_unseal_cmd(chip, payload, options, blob_handle); + tpm2_flush_context(chip, blob_handle); return rc; } @@ -703,7 +703,7 @@ ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value, tpm_buf_append_u32(&buf, TPM2_CAP_TPM_PROPERTIES); tpm_buf_append_u32(&buf, property_id); tpm_buf_append_u32(&buf, 1); - rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, 0, NULL); + rc = tpm_transmit_cmd(chip, &buf, 0, NULL); if (!rc) { out = (struct tpm2_get_cap_out *) &buf.data[TPM_HEADER_SIZE]; @@ -733,8 +733,7 @@ void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type) if (rc) return; tpm_buf_append_u16(&buf, shutdown_type); - tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, 0, - "stopping the TPM"); + tpm_transmit_cmd(chip, &buf, 0, "stopping the TPM"); tpm_buf_destroy(&buf); } @@ -763,7 +762,7 @@ static int tpm2_do_selftest(struct tpm_chip *chip) return rc; tpm_buf_append_u8(&buf, full); - rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, 0, + rc = tpm_transmit_cmd(chip, &buf, 0, "attempting the self test"); tpm_buf_destroy(&buf); @@ -790,7 +789,7 @@ static int tpm2_do_selftest(struct tpm_chip *chip) */ int tpm2_probe(struct tpm_chip *chip) { - struct tpm_output_header *out; + struct tpm_header *out; struct tpm_buf buf; int rc; @@ -800,10 +799,10 @@ int tpm2_probe(struct tpm_chip *chip) tpm_buf_append_u32(&buf, TPM2_CAP_TPM_PROPERTIES); tpm_buf_append_u32(&buf, TPM_PT_TOTAL_COMMANDS); tpm_buf_append_u32(&buf, 1); - rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, 0, NULL); + rc = tpm_transmit_cmd(chip, &buf, 0, NULL); /* We ignore TPM return codes on purpose. */ if (rc >= 0) { - out = (struct tpm_output_header *)buf.data; + out = (struct tpm_header *)buf.data; if (be16_to_cpu(out->tag) == TPM2_ST_NO_SESSIONS) chip->flags |= TPM_CHIP_FLAG_TPM2; } @@ -812,6 +811,30 @@ int tpm2_probe(struct tpm_chip *chip) } EXPORT_SYMBOL_GPL(tpm2_probe); +static int tpm2_init_bank_info(struct tpm_chip *chip, u32 bank_index) +{ + struct tpm_bank_info *bank = chip->allocated_banks + bank_index; + struct tpm_digest digest = { .alg_id = bank->alg_id }; + int i; + + /* + * Avoid unnecessary PCR read operations to reduce overhead + * and obtain identifiers of the crypto subsystem. + */ + for (i = 0; i < ARRAY_SIZE(tpm2_hash_map); i++) { + enum hash_algo crypto_algo = tpm2_hash_map[i].crypto_id; + + if (bank->alg_id != tpm2_hash_map[i].tpm_id) + continue; + + bank->digest_size = hash_digest_size[crypto_algo]; + bank->crypto_id = crypto_algo; + return 0; + } + + return tpm2_pcr_read(chip, 0, &digest, &bank->digest_size); +} + struct tpm2_pcr_selection { __be16 hash_alg; u8 size_of_select; @@ -825,8 +848,10 @@ static ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip) void *marker; void *end; void *pcr_select_offset; - unsigned int count; u32 sizeof_pcr_selection; + u32 nr_possible_banks; + u32 nr_alloc_banks = 0; + u16 hash_alg; u32 rsp_len; int rc; int i = 0; @@ -839,16 +864,18 @@ static ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip) tpm_buf_append_u32(&buf, 0); tpm_buf_append_u32(&buf, 1); - rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 9, 0, - "get tpm pcr allocation"); + rc = tpm_transmit_cmd(chip, &buf, 9, "get tpm pcr allocation"); if (rc) goto out; - count = be32_to_cpup( + nr_possible_banks = be32_to_cpup( (__be32 *)&buf.data[TPM_HEADER_SIZE + 5]); - if (count > ARRAY_SIZE(chip->active_banks)) { - rc = -ENODEV; + chip->allocated_banks = kcalloc(nr_possible_banks, + sizeof(*chip->allocated_banks), + GFP_KERNEL); + if (!chip->allocated_banks) { + rc = -ENOMEM; goto out; } @@ -857,7 +884,7 @@ static ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip) rsp_len = be32_to_cpup((__be32 *)&buf.data[2]); end = &buf.data[rsp_len]; - for (i = 0; i < count; i++) { + for (i = 0; i < nr_possible_banks; i++) { pcr_select_offset = marker + offsetof(struct tpm2_pcr_selection, size_of_select); if (pcr_select_offset >= end) { @@ -866,17 +893,28 @@ static ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip) } memcpy(&pcr_selection, marker, sizeof(pcr_selection)); - chip->active_banks[i] = be16_to_cpu(pcr_selection.hash_alg); + hash_alg = be16_to_cpu(pcr_selection.hash_alg); + + pcr_select_offset = memchr_inv(pcr_selection.pcr_select, 0, + pcr_selection.size_of_select); + if (pcr_select_offset) { + chip->allocated_banks[nr_alloc_banks].alg_id = hash_alg; + + rc = tpm2_init_bank_info(chip, nr_alloc_banks); + if (rc < 0) + break; + + nr_alloc_banks++; + } + sizeof_pcr_selection = sizeof(pcr_selection.hash_alg) + sizeof(pcr_selection.size_of_select) + pcr_selection.size_of_select; marker = marker + sizeof_pcr_selection; } + chip->nr_allocated_banks = nr_alloc_banks; out: - if (i < ARRAY_SIZE(chip->active_banks)) - chip->active_banks[i] = TPM2_ALG_ERROR; - tpm_buf_destroy(&buf); return rc; @@ -911,8 +949,7 @@ static int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip) tpm_buf_append_u32(&buf, TPM2_CC_FIRST); tpm_buf_append_u32(&buf, nr_commands); - rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, - 9 + 4 * nr_commands, 0, NULL); + rc = tpm_transmit_cmd(chip, &buf, 9 + 4 * nr_commands, NULL); if (rc) { tpm_buf_destroy(&buf); goto out; @@ -969,8 +1006,7 @@ static int tpm2_startup(struct tpm_chip *chip) return rc; tpm_buf_append_u16(&buf, TPM2_SU_CLEAR); - rc = tpm_transmit_cmd(chip, NULL, buf.data, PAGE_SIZE, 0, 0, - "attempting to start the TPM"); + rc = tpm_transmit_cmd(chip, &buf, 0, "attempting to start the TPM"); tpm_buf_destroy(&buf); return rc; diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c index dcdfde3c253e..4a2773c3374f 100644 --- a/drivers/char/tpm/tpm2-space.c +++ b/drivers/char/tpm/tpm2-space.c @@ -38,8 +38,7 @@ static void tpm2_flush_sessions(struct tpm_chip *chip, struct tpm_space *space) for (i = 0; i < ARRAY_SIZE(space->session_tbl); i++) { if (space->session_tbl[i]) - tpm2_flush_context_cmd(chip, space->session_tbl[i], - TPM_TRANSMIT_NESTED); + tpm2_flush_context(chip, space->session_tbl[i]); } } @@ -61,7 +60,10 @@ int tpm2_init_space(struct tpm_space *space) void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space) { mutex_lock(&chip->tpm_mutex); - tpm2_flush_sessions(chip, space); + if (!tpm_chip_start(chip)) { + tpm2_flush_sessions(chip, space); + tpm_chip_stop(chip); + } mutex_unlock(&chip->tpm_mutex); kfree(space->context_buf); kfree(space->session_buf); @@ -83,8 +85,7 @@ static int tpm2_load_context(struct tpm_chip *chip, u8 *buf, body_size = sizeof(*ctx) + be16_to_cpu(ctx->blob_size); tpm_buf_append(&tbuf, &buf[*offset], body_size); - rc = tpm_transmit_cmd(chip, NULL, tbuf.data, PAGE_SIZE, 4, - TPM_TRANSMIT_NESTED, NULL); + rc = tpm_transmit_cmd(chip, &tbuf, 4, NULL); if (rc < 0) { dev_warn(&chip->dev, "%s: failed with a system error %d\n", __func__, rc); @@ -132,8 +133,7 @@ static int tpm2_save_context(struct tpm_chip *chip, u32 handle, u8 *buf, tpm_buf_append_u32(&tbuf, handle); - rc = tpm_transmit_cmd(chip, NULL, tbuf.data, PAGE_SIZE, 0, - TPM_TRANSMIT_NESTED, NULL); + rc = tpm_transmit_cmd(chip, &tbuf, 0, NULL); if (rc < 0) { dev_warn(&chip->dev, "%s: failed with a system error %d\n", __func__, rc); @@ -162,15 +162,14 @@ static int tpm2_save_context(struct tpm_chip *chip, u32 handle, u8 *buf, return 0; } -static void tpm2_flush_space(struct tpm_chip *chip) +void tpm2_flush_space(struct tpm_chip *chip) { struct tpm_space *space = &chip->work_space; int i; for (i = 0; i < ARRAY_SIZE(space->context_tbl); i++) if (space->context_tbl[i] && ~space->context_tbl[i]) - tpm2_flush_context_cmd(chip, space->context_tbl[i], - TPM_TRANSMIT_NESTED); + tpm2_flush_context(chip, space->context_tbl[i]); tpm2_flush_sessions(chip, space); } @@ -264,14 +263,54 @@ static int tpm2_map_command(struct tpm_chip *chip, u32 cc, u8 *cmd) return 0; } -int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u32 cc, - u8 *cmd) +static int tpm_find_and_validate_cc(struct tpm_chip *chip, + struct tpm_space *space, + const void *cmd, size_t len) +{ + const struct tpm_header *header = (const void *)cmd; + int i; + u32 cc; + u32 attrs; + unsigned int nr_handles; + + if (len < TPM_HEADER_SIZE || !chip->nr_commands) + return -EINVAL; + + cc = be32_to_cpu(header->ordinal); + + i = tpm2_find_cc(chip, cc); + if (i < 0) { + dev_dbg(&chip->dev, "0x%04X is an invalid command\n", + cc); + return -EOPNOTSUPP; + } + + attrs = chip->cc_attrs_tbl[i]; + nr_handles = + 4 * ((attrs >> TPM2_CC_ATTR_CHANDLES) & GENMASK(2, 0)); + if (len < TPM_HEADER_SIZE + 4 * nr_handles) + goto err_len; + + return cc; +err_len: + dev_dbg(&chip->dev, "%s: insufficient command length %zu", __func__, + len); + return -EINVAL; +} + +int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd, + size_t cmdsiz) { int rc; + int cc; if (!space) return 0; + cc = tpm_find_and_validate_cc(chip, space, cmd, cmdsiz); + if (cc < 0) + return cc; + memcpy(&chip->work_space.context_tbl, &space->context_tbl, sizeof(space->context_tbl)); memcpy(&chip->work_space.session_tbl, &space->session_tbl, @@ -291,6 +330,7 @@ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u32 cc, return rc; } + chip->last_cc = cc; return 0; } @@ -334,7 +374,7 @@ static int tpm2_map_response_header(struct tpm_chip *chip, u32 cc, u8 *rsp, size_t len) { struct tpm_space *space = &chip->work_space; - struct tpm_output_header *header = (void *)rsp; + struct tpm_header *header = (struct tpm_header *)rsp; u32 phandle; u32 phandle_type; u32 vhandle; @@ -377,7 +417,7 @@ static int tpm2_map_response_header(struct tpm_chip *chip, u32 cc, u8 *rsp, return 0; out_no_slots: - tpm2_flush_context_cmd(chip, phandle, TPM_TRANSMIT_NESTED); + tpm2_flush_context(chip, phandle); dev_warn(&chip->dev, "%s: out of slots for 0x%08X\n", __func__, phandle); return -ENOMEM; @@ -394,7 +434,7 @@ static int tpm2_map_response_body(struct tpm_chip *chip, u32 cc, u8 *rsp, size_t len) { struct tpm_space *space = &chip->work_space; - struct tpm_output_header *header = (void *)rsp; + struct tpm_header *header = (struct tpm_header *)rsp; struct tpm2_cap_handles *data; u32 phandle; u32 phandle_type; @@ -464,8 +504,7 @@ static int tpm2_save_space(struct tpm_chip *chip) } else if (rc) return rc; - tpm2_flush_context_cmd(chip, space->context_tbl[i], - TPM_TRANSMIT_NESTED); + tpm2_flush_context(chip, space->context_tbl[i]); space->context_tbl[i] = ~0; } @@ -490,30 +529,30 @@ static int tpm2_save_space(struct tpm_chip *chip) } int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, - u32 cc, u8 *buf, size_t *bufsiz) + void *buf, size_t *bufsiz) { - struct tpm_output_header *header = (void *)buf; + struct tpm_header *header = buf; int rc; if (!space) return 0; - rc = tpm2_map_response_header(chip, cc, buf, *bufsiz); + rc = tpm2_map_response_header(chip, chip->last_cc, buf, *bufsiz); if (rc) { tpm2_flush_space(chip); - return rc; + goto out; } - rc = tpm2_map_response_body(chip, cc, buf, *bufsiz); + rc = tpm2_map_response_body(chip, chip->last_cc, buf, *bufsiz); if (rc) { tpm2_flush_space(chip); - return rc; + goto out; } rc = tpm2_save_space(chip); if (rc) { tpm2_flush_space(chip); - return rc; + goto out; } *bufsiz = be32_to_cpu(header->length); @@ -526,4 +565,7 @@ int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, memcpy(space->session_buf, chip->work_space.session_buf, PAGE_SIZE); return 0; +out: + dev_err(&chip->dev, "%s: error %d\n", __func__, rc); + return rc; } diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c index 66a14526aaf4..a290b30a0c35 100644 --- a/drivers/char/tpm/tpm_atmel.c +++ b/drivers/char/tpm/tpm_atmel.c @@ -105,7 +105,7 @@ static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t count) iowrite8(buf[i], priv->iobase); } - return count; + return 0; } static void tpm_atml_cancel(struct tpm_chip *chip) diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c index 36952ef98f90..763fc7e6c005 100644 --- a/drivers/char/tpm/tpm_crb.c +++ b/drivers/char/tpm/tpm_crb.c @@ -287,19 +287,29 @@ static int crb_recv(struct tpm_chip *chip, u8 *buf, size_t count) struct crb_priv *priv = dev_get_drvdata(&chip->dev); unsigned int expected; - /* sanity check */ - if (count < 6) + /* A sanity check that the upper layer wants to get at least the header + * as that is the minimum size for any TPM response. + */ + if (count < TPM_HEADER_SIZE) return -EIO; + /* If this bit is set, according to the spec, the TPM is in + * unrecoverable condition. + */ if (ioread32(&priv->regs_t->ctrl_sts) & CRB_CTRL_STS_ERROR) return -EIO; - memcpy_fromio(buf, priv->rsp, 6); - expected = be32_to_cpup((__be32 *) &buf[2]); - if (expected > count || expected < 6) + /* Read the first 8 bytes in order to get the length of the response. + * We read exactly a quad word in order to make sure that the remaining + * reads will be aligned. + */ + memcpy_fromio(buf, priv->rsp, 8); + + expected = be32_to_cpup((__be32 *)&buf[2]); + if (expected > count || expected < TPM_HEADER_SIZE) return -EIO; - memcpy_fromio(&buf[6], &priv->rsp[6], expected - 6); + memcpy_fromio(&buf[8], &priv->rsp[8], expected - 8); return expected; } diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c index 95ce2e9ccdc6..8a7e80923091 100644 --- a/drivers/char/tpm/tpm_i2c_atmel.c +++ b/drivers/char/tpm/tpm_i2c_atmel.c @@ -46,7 +46,7 @@ struct priv_data { /* This is the amount we read on the first try. 25 was chosen to fit a * fair number of read responses in the buffer so a 2nd retry can be * avoided in small message cases. */ - u8 buffer[sizeof(struct tpm_output_header) + 25]; + u8 buffer[sizeof(struct tpm_header) + 25]; }; static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len) @@ -65,15 +65,22 @@ static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len) dev_dbg(&chip->dev, "%s(buf=%*ph len=%0zx) -> sts=%d\n", __func__, (int)min_t(size_t, 64, len), buf, len, status); - return status; + + if (status < 0) + return status; + + /* The upper layer does not support incomplete sends. */ + if (status != len) + return -E2BIG; + + return 0; } static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count) { struct priv_data *priv = dev_get_drvdata(&chip->dev); struct i2c_client *client = to_i2c_client(chip->dev.parent); - struct tpm_output_header *hdr = - (struct tpm_output_header *)priv->buffer; + struct tpm_header *hdr = (struct tpm_header *)priv->buffer; u32 expected_len; int rc; diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c index 9086edc9066b..3b4e9672ff6c 100644 --- a/drivers/char/tpm/tpm_i2c_infineon.c +++ b/drivers/char/tpm/tpm_i2c_infineon.c @@ -26,8 +26,7 @@ #include #include "tpm.h" -/* max. buffer size supported by our TPM */ -#define TPM_BUFSIZE 1260 +#define TPM_I2C_INFINEON_BUFSIZE 1260 /* max. number of iterations after I2C NAK */ #define MAX_COUNT 3 @@ -63,11 +62,13 @@ enum i2c_chip_type { UNKNOWN, }; -/* Structure to store I2C TPM specific stuff */ struct tpm_inf_dev { struct i2c_client *client; int locality; - u8 buf[TPM_BUFSIZE + sizeof(u8)]; /* max. buffer size + addr */ + /* In addition to the data itself, the buffer must fit the 7-bit I2C + * address and the direction bit. + */ + u8 buf[TPM_I2C_INFINEON_BUFSIZE + 1]; struct tpm_chip *chip; enum i2c_chip_type chip_type; unsigned int adapterlimit; @@ -219,7 +220,7 @@ static int iic_tpm_write_generic(u8 addr, u8 *buffer, size_t len, .buf = tpm_dev.buf }; - if (len > TPM_BUFSIZE) + if (len > TPM_I2C_INFINEON_BUFSIZE) return -EINVAL; if (!tpm_dev.client->adapter->algo->master_xfer) @@ -527,8 +528,8 @@ static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len) u8 retries = 0; u8 sts = TPM_STS_GO; - if (len > TPM_BUFSIZE) - return -E2BIG; /* command is too long for our tpm, sorry */ + if (len > TPM_I2C_INFINEON_BUFSIZE) + return -E2BIG; if (request_locality(chip, 0) < 0) return -EBUSY; @@ -587,7 +588,7 @@ static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len) /* go and do it */ iic_tpm_write(TPM_STS(tpm_dev.locality), &sts, 1); - return len; + return 0; out_err: tpm_tis_i2c_ready(chip); /* The TPM needs some time to clean up here, diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c index 217f7f1cbde8..315a3b4548f7 100644 --- a/drivers/char/tpm/tpm_i2c_nuvoton.c +++ b/drivers/char/tpm/tpm_i2c_nuvoton.c @@ -35,14 +35,12 @@ #include "tpm.h" /* I2C interface offsets */ -#define TPM_STS 0x00 -#define TPM_BURST_COUNT 0x01 -#define TPM_DATA_FIFO_W 0x20 -#define TPM_DATA_FIFO_R 0x40 -#define TPM_VID_DID_RID 0x60 -/* TPM command header size */ -#define TPM_HEADER_SIZE 10 -#define TPM_RETRY 5 +#define TPM_STS 0x00 +#define TPM_BURST_COUNT 0x01 +#define TPM_DATA_FIFO_W 0x20 +#define TPM_DATA_FIFO_R 0x40 +#define TPM_VID_DID_RID 0x60 +#define TPM_I2C_RETRIES 5 /* * I2C bus device maximum buffer size w/o counting I2C address or command * i.e. max size required for I2C write is 34 = addr, command, 32 bytes data @@ -292,7 +290,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) dev_err(dev, "%s() count < header size\n", __func__); return -EIO; } - for (retries = 0; retries < TPM_RETRY; retries++) { + for (retries = 0; retries < TPM_I2C_RETRIES; retries++) { if (retries > 0) { /* if this is not the first trial, set responseRetry */ i2c_nuvoton_write_status(client, @@ -467,7 +465,7 @@ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len) } dev_dbg(dev, "%s() -> %zd\n", __func__, len); - return len; + return 0; } static bool i2c_nuvoton_req_canceled(struct tpm_chip *chip, u8 status) diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c index 07b5a487d0c8..757ca45b39b8 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.c +++ b/drivers/char/tpm/tpm_ibmvtpm.c @@ -139,14 +139,14 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) } /** - * tpm_ibmvtpm_send - Send tpm request - * + * tpm_ibmvtpm_send() - Send a TPM command * @chip: tpm chip struct * @buf: buffer contains data to send * @count: size of buffer * * Return: - * Number of bytes sent or < 0 on error. + * 0 on success, + * -errno on error */ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) { @@ -192,7 +192,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) rc = 0; ibmvtpm->tpm_processing_cmd = false; } else - rc = count; + rc = 0; spin_unlock(&ibmvtpm->rtce_lock); return rc; diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c index d8f10047fbba..97f6d4fe0aee 100644 --- a/drivers/char/tpm/tpm_infineon.c +++ b/drivers/char/tpm/tpm_infineon.c @@ -354,7 +354,7 @@ static int tpm_inf_send(struct tpm_chip *chip, u8 * buf, size_t count) for (i = 0; i < count; i++) { wait_and_send(chip, buf[i]); } - return count; + return 0; } static void tpm_inf_cancel(struct tpm_chip *chip) diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c index 5d6cce74cd3f..9bee3c5eb4bf 100644 --- a/drivers/char/tpm/tpm_nsc.c +++ b/drivers/char/tpm/tpm_nsc.c @@ -226,7 +226,7 @@ static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count) } outb(NSC_COMMAND_EOC, priv->base + NSC_COMMAND); - return count; + return 0; } static void tpm_nsc_cancel(struct tpm_chip *chip) diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c index 86dd8521feef..75e7a856177c 100644 --- a/drivers/char/tpm/tpm_ppi.c +++ b/drivers/char/tpm/tpm_ppi.c @@ -20,7 +20,8 @@ #include #include "tpm.h" -#define TPM_PPI_REVISION_ID 1 +#define TPM_PPI_REVISION_ID_1 1 +#define TPM_PPI_REVISION_ID_2 2 #define TPM_PPI_FN_VERSION 1 #define TPM_PPI_FN_SUBREQ 2 #define TPM_PPI_FN_GETREQ 3 @@ -28,7 +29,7 @@ #define TPM_PPI_FN_GETRSP 5 #define TPM_PPI_FN_SUBREQ2 7 #define TPM_PPI_FN_GETOPR 8 -#define PPI_TPM_REQ_MAX 22 +#define PPI_TPM_REQ_MAX 101 /* PPI 1.3 for TPM 2 */ #define PPI_VS_REQ_START 128 #define PPI_VS_REQ_END 255 @@ -36,14 +37,18 @@ static const guid_t tpm_ppi_guid = GUID_INIT(0x3DDDFAA6, 0x361B, 0x4EB4, 0xA4, 0x24, 0x8D, 0x10, 0x08, 0x9D, 0x16, 0x53); +static bool tpm_ppi_req_has_parameter(u64 req) +{ + return req == 23; +} + static inline union acpi_object * tpm_eval_dsm(acpi_handle ppi_handle, int func, acpi_object_type type, - union acpi_object *argv4) + union acpi_object *argv4, u64 rev) { BUG_ON(!ppi_handle); return acpi_evaluate_dsm_typed(ppi_handle, &tpm_ppi_guid, - TPM_PPI_REVISION_ID, - func, argv4, type); + rev, func, argv4, type); } static ssize_t tpm_show_ppi_version(struct device *dev, @@ -60,9 +65,14 @@ static ssize_t tpm_show_ppi_request(struct device *dev, ssize_t size = -EINVAL; union acpi_object *obj; struct tpm_chip *chip = to_tpm_chip(dev); + u64 rev = TPM_PPI_REVISION_ID_2; + u64 req; + + if (strcmp(chip->ppi_version, "1.2") < 0) + rev = TPM_PPI_REVISION_ID_1; obj = tpm_eval_dsm(chip->acpi_dev_handle, TPM_PPI_FN_GETREQ, - ACPI_TYPE_PACKAGE, NULL); + ACPI_TYPE_PACKAGE, NULL, rev); if (!obj) return -ENXIO; @@ -72,7 +82,23 @@ static ssize_t tpm_show_ppi_request(struct device *dev, * error. The second is pending TPM operation requested by the OS, 0 * means none and >0 means operation value. */ - if (obj->package.count == 2 && + if (obj->package.count == 3 && + obj->package.elements[0].type == ACPI_TYPE_INTEGER && + obj->package.elements[1].type == ACPI_TYPE_INTEGER && + obj->package.elements[2].type == ACPI_TYPE_INTEGER) { + if (obj->package.elements[0].integer.value) + size = -EFAULT; + else { + req = obj->package.elements[1].integer.value; + if (tpm_ppi_req_has_parameter(req)) + size = scnprintf(buf, PAGE_SIZE, + "%llu %llu\n", req, + obj->package.elements[2].integer.value); + else + size = scnprintf(buf, PAGE_SIZE, + "%llu\n", req); + } + } else if (obj->package.count == 2 && obj->package.elements[0].type == ACPI_TYPE_INTEGER && obj->package.elements[1].type == ACPI_TYPE_INTEGER) { if (obj->package.elements[0].integer.value) @@ -94,9 +120,10 @@ static ssize_t tpm_store_ppi_request(struct device *dev, u32 req; u64 ret; int func = TPM_PPI_FN_SUBREQ; - union acpi_object *obj, tmp; - union acpi_object argv4 = ACPI_INIT_DSM_ARGV4(1, &tmp); + union acpi_object *obj, tmp[2]; + union acpi_object argv4 = ACPI_INIT_DSM_ARGV4(2, tmp); struct tpm_chip *chip = to_tpm_chip(dev); + u64 rev = TPM_PPI_REVISION_ID_1; /* * the function to submit TPM operation request to pre-os environment @@ -104,7 +131,7 @@ static ssize_t tpm_store_ppi_request(struct device *dev, * version 1.1 */ if (acpi_check_dsm(chip->acpi_dev_handle, &tpm_ppi_guid, - TPM_PPI_REVISION_ID, 1 << TPM_PPI_FN_SUBREQ2)) + TPM_PPI_REVISION_ID_1, 1 << TPM_PPI_FN_SUBREQ2)) func = TPM_PPI_FN_SUBREQ2; /* @@ -113,20 +140,29 @@ static ssize_t tpm_store_ppi_request(struct device *dev, * string/package type. For PPI version 1.0 and 1.1, use buffer type * for compatibility, and use package type since 1.2 according to spec. */ - if (strcmp(chip->ppi_version, "1.2") < 0) { + if (strcmp(chip->ppi_version, "1.3") == 0) { + if (sscanf(buf, "%llu %llu", &tmp[0].integer.value, + &tmp[1].integer.value) != 2) + goto ppi12; + rev = TPM_PPI_REVISION_ID_2; + tmp[0].type = ACPI_TYPE_INTEGER; + tmp[1].type = ACPI_TYPE_INTEGER; + } else if (strcmp(chip->ppi_version, "1.2") < 0) { if (sscanf(buf, "%d", &req) != 1) return -EINVAL; argv4.type = ACPI_TYPE_BUFFER; argv4.buffer.length = sizeof(req); argv4.buffer.pointer = (u8 *)&req; } else { - tmp.type = ACPI_TYPE_INTEGER; - if (sscanf(buf, "%llu", &tmp.integer.value) != 1) +ppi12: + argv4.package.count = 1; + tmp[0].type = ACPI_TYPE_INTEGER; + if (sscanf(buf, "%llu", &tmp[0].integer.value) != 1) return -EINVAL; } obj = tpm_eval_dsm(chip->acpi_dev_handle, func, ACPI_TYPE_INTEGER, - &argv4); + &argv4, rev); if (!obj) { return -ENXIO; } else { @@ -170,7 +206,7 @@ static ssize_t tpm_show_ppi_transition_action(struct device *dev, if (strcmp(chip->ppi_version, "1.2") < 0) obj = &tmp; obj = tpm_eval_dsm(chip->acpi_dev_handle, TPM_PPI_FN_GETACT, - ACPI_TYPE_INTEGER, obj); + ACPI_TYPE_INTEGER, obj, TPM_PPI_REVISION_ID_1); if (!obj) { return -ENXIO; } else { @@ -196,7 +232,7 @@ static ssize_t tpm_show_ppi_response(struct device *dev, struct tpm_chip *chip = to_tpm_chip(dev); obj = tpm_eval_dsm(chip->acpi_dev_handle, TPM_PPI_FN_GETRSP, - ACPI_TYPE_PACKAGE, NULL); + ACPI_TYPE_PACKAGE, NULL, TPM_PPI_REVISION_ID_1); if (!obj) return -ENXIO; @@ -264,7 +300,7 @@ static ssize_t show_ppi_operations(acpi_handle dev_handle, char *buf, u32 start, "User not required", }; - if (!acpi_check_dsm(dev_handle, &tpm_ppi_guid, TPM_PPI_REVISION_ID, + if (!acpi_check_dsm(dev_handle, &tpm_ppi_guid, TPM_PPI_REVISION_ID_1, 1 << TPM_PPI_FN_GETOPR)) return -EPERM; @@ -272,7 +308,8 @@ static ssize_t show_ppi_operations(acpi_handle dev_handle, char *buf, u32 start, for (i = start; i <= end; i++) { tmp.integer.value = i; obj = tpm_eval_dsm(dev_handle, TPM_PPI_FN_GETOPR, - ACPI_TYPE_INTEGER, &argv); + ACPI_TYPE_INTEGER, &argv, + TPM_PPI_REVISION_ID_1); if (!obj) { return -ENOMEM; } else { @@ -338,12 +375,13 @@ void tpm_add_ppi(struct tpm_chip *chip) return; if (!acpi_check_dsm(chip->acpi_dev_handle, &tpm_ppi_guid, - TPM_PPI_REVISION_ID, 1 << TPM_PPI_FN_VERSION)) + TPM_PPI_REVISION_ID_1, 1 << TPM_PPI_FN_VERSION)) return; /* Cache PPI version string. */ obj = acpi_evaluate_dsm_typed(chip->acpi_dev_handle, &tpm_ppi_guid, - TPM_PPI_REVISION_ID, TPM_PPI_FN_VERSION, + TPM_PPI_REVISION_ID_1, + TPM_PPI_FN_VERSION, NULL, ACPI_TYPE_STRING); if (obj) { strlcpy(chip->ppi_version, obj->string.pointer, diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index bf7e49cfa643..b9f64684c3fb 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -481,7 +481,7 @@ static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len) goto out_err; } } - return len; + return 0; out_err: tpm_tis_ready(chip); return rc; @@ -521,35 +521,38 @@ static const struct tis_vendor_timeout_override vendor_timeout_overrides[] = { (TIS_SHORT_TIMEOUT*1000), (TIS_SHORT_TIMEOUT*1000) } }, }; -static bool tpm_tis_update_timeouts(struct tpm_chip *chip, +static void tpm_tis_update_timeouts(struct tpm_chip *chip, unsigned long *timeout_cap) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); int i, rc; u32 did_vid; + chip->timeout_adjusted = false; + if (chip->ops->clk_enable != NULL) chip->ops->clk_enable(chip, true); rc = tpm_tis_read32(priv, TPM_DID_VID(0), &did_vid); - if (rc < 0) + if (rc < 0) { + dev_warn(&chip->dev, "%s: failed to read did_vid: %d\n", + __func__, rc); goto out; + } for (i = 0; i != ARRAY_SIZE(vendor_timeout_overrides); i++) { if (vendor_timeout_overrides[i].did_vid != did_vid) continue; memcpy(timeout_cap, vendor_timeout_overrides[i].timeout_us, sizeof(vendor_timeout_overrides[i].timeout_us)); - rc = true; + chip->timeout_adjusted = true; } - rc = false; - out: if (chip->ops->clk_enable != NULL) chip->ops->clk_enable(chip, false); - return rc; + return; } /* @@ -913,7 +916,11 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, intmask &= ~TPM_GLOBAL_INT_ENABLE; tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask); + rc = tpm_chip_start(chip); + if (rc) + goto out_err; rc = tpm2_probe(chip); + tpm_chip_stop(chip); if (rc) goto out_err; diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c index 87a0ce47f201..d74f3de74ae6 100644 --- a/drivers/char/tpm/tpm_vtpm_proxy.c +++ b/drivers/char/tpm/tpm_vtpm_proxy.c @@ -303,9 +303,9 @@ out: static int vtpm_proxy_is_driver_command(struct tpm_chip *chip, u8 *buf, size_t count) { - struct tpm_input_header *hdr = (struct tpm_input_header *)buf; + struct tpm_header *hdr = (struct tpm_header *)buf; - if (count < sizeof(struct tpm_input_header)) + if (count < sizeof(struct tpm_header)) return 0; if (chip->flags & TPM_CHIP_FLAG_TPM2) { @@ -335,7 +335,6 @@ static int vtpm_proxy_is_driver_command(struct tpm_chip *chip, static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t count) { struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev); - int rc = 0; if (count > sizeof(proxy_dev->buffer)) { dev_err(&chip->dev, @@ -366,7 +365,7 @@ static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t count) wake_up_interruptible(&proxy_dev->wq); - return rc; + return 0; } static void vtpm_proxy_tpm_op_cancel(struct tpm_chip *chip) @@ -402,7 +401,7 @@ static int vtpm_proxy_request_locality(struct tpm_chip *chip, int locality) { struct tpm_buf buf; int rc; - const struct tpm_output_header *header; + const struct tpm_header *header; struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev); if (chip->flags & TPM_CHIP_FLAG_TPM2) @@ -417,9 +416,7 @@ static int vtpm_proxy_request_locality(struct tpm_chip *chip, int locality) proxy_dev->state |= STATE_DRIVER_COMMAND; - rc = tpm_transmit_cmd(chip, NULL, buf.data, tpm_buf_length(&buf), 0, - TPM_TRANSMIT_NESTED, - "attempting to set locality"); + rc = tpm_transmit_cmd(chip, &buf, 0, "attempting to set locality"); proxy_dev->state &= ~STATE_DRIVER_COMMAND; @@ -428,7 +425,7 @@ static int vtpm_proxy_request_locality(struct tpm_chip *chip, int locality) goto out; } - header = (const struct tpm_output_header *)buf.data; + header = (const struct tpm_header *)buf.data; rc = be32_to_cpu(header->return_code); if (rc) locality = -1; diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c index b150f87f38f5..4e2d00cb0d81 100644 --- a/drivers/char/tpm/xen-tpmfront.c +++ b/drivers/char/tpm/xen-tpmfront.c @@ -163,7 +163,7 @@ static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) wmb(); notify_remote_via_evtchn(priv->evtchn); - ordinal = be32_to_cpu(((struct tpm_input_header*)buf)->ordinal); + ordinal = be32_to_cpu(((struct tpm_header *)buf)->ordinal); duration = tpm_calc_ordinal_duration(chip, ordinal); if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, duration, @@ -173,7 +173,7 @@ static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) return -ETIME; } - return count; + return 0; } static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count) diff --git a/drivers/clk/at91/at91sam9x5.c b/drivers/clk/at91/at91sam9x5.c index 2fe225a697df..3487e03d4bc6 100644 --- a/drivers/clk/at91/at91sam9x5.c +++ b/drivers/clk/at91/at91sam9x5.c @@ -144,8 +144,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np, return; at91sam9x5_pmc = pmc_data_allocate(PMC_MAIN + 1, - nck(at91sam9x5_systemck), - nck(at91sam9x35_periphck), 0); + nck(at91sam9x5_systemck), 31, 0); if (!at91sam9x5_pmc) return; @@ -210,7 +209,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np, parent_names[1] = "mainck"; parent_names[2] = "plladivck"; parent_names[3] = "utmick"; - parent_names[4] = "mck"; + parent_names[4] = "masterck"; for (i = 0; i < 2; i++) { char name[6]; diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c index d69ad96fe988..cd0ef7274fdb 100644 --- a/drivers/clk/at91/sama5d2.c +++ b/drivers/clk/at91/sama5d2.c @@ -240,7 +240,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np) parent_names[1] = "mainck"; parent_names[2] = "plladivck"; parent_names[3] = "utmick"; - parent_names[4] = "mck"; + parent_names[4] = "masterck"; for (i = 0; i < 3; i++) { char name[6]; @@ -291,7 +291,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np) parent_names[1] = "mainck"; parent_names[2] = "plladivck"; parent_names[3] = "utmick"; - parent_names[4] = "mck"; + parent_names[4] = "masterck"; parent_names[5] = "audiopll_pmcck"; for (i = 0; i < ARRAY_SIZE(sama5d2_gck); i++) { hw = at91_clk_register_generated(regmap, &pmc_pcr_lock, diff --git a/drivers/clk/at91/sama5d4.c b/drivers/clk/at91/sama5d4.c index e358be7f6c8d..b645a9d59cdb 100644 --- a/drivers/clk/at91/sama5d4.c +++ b/drivers/clk/at91/sama5d4.c @@ -207,7 +207,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np) parent_names[1] = "mainck"; parent_names[2] = "plladivck"; parent_names[3] = "utmick"; - parent_names[4] = "mck"; + parent_names[4] = "masterck"; for (i = 0; i < 3; i++) { char name[6]; diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 6ccdbedb02f3..d2477a5058ac 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -1513,9 +1513,19 @@ static int clk_fetch_parent_index(struct clk_core *core, if (!parent) return -EINVAL; - for (i = 0; i < core->num_parents; i++) - if (clk_core_get_parent_by_index(core, i) == parent) + for (i = 0; i < core->num_parents; i++) { + if (core->parents[i] == parent) + return i; + + if (core->parents[i]) + continue; + + /* Fallback to comparing globally unique names */ + if (!strcmp(parent->name, core->parent_names[i])) { + core->parents[i] = parent; return i; + } + } return -EINVAL; } diff --git a/drivers/clk/imx/clk-frac-pll.c b/drivers/clk/imx/clk-frac-pll.c index 0026c3969b1e..76b9eb15604e 100644 --- a/drivers/clk/imx/clk-frac-pll.c +++ b/drivers/clk/imx/clk-frac-pll.c @@ -155,13 +155,14 @@ static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate, { struct clk_frac_pll *pll = to_clk_frac_pll(hw); u32 val, divfi, divff; - u64 temp64 = parent_rate; + u64 temp64; int ret; parent_rate *= 8; rate *= 2; divfi = rate / parent_rate; - temp64 *= rate - divfi; + temp64 = parent_rate * divfi; + temp64 = rate - temp64; temp64 *= PLL_FRAC_DENOM; do_div(temp64, parent_rate); divff = temp64; diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c index 61fefc046ec5..d083b860f083 100644 --- a/drivers/clk/mmp/clk-of-mmp2.c +++ b/drivers/clk/mmp/clk-of-mmp2.c @@ -53,7 +53,6 @@ #define APMU_DISP1 0x110 #define APMU_CCIC0 0x50 #define APMU_CCIC1 0xf4 -#define APMU_SP 0x68 #define MPMU_UART_PLL 0x14 struct mmp2_clk_unit { @@ -210,8 +209,6 @@ static struct mmp_clk_mix_config ccic1_mix_config = { .reg_info = DEFINE_MIX_REG_INFO(4, 16, 2, 6, 32), }; -static DEFINE_SPINLOCK(sp_lock); - static struct mmp_param_mux_clk apmu_mux_clks[] = { {MMP2_CLK_DISP0_MUX, "disp0_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP0, 6, 2, 0, &disp0_lock}, {MMP2_CLK_DISP1_MUX, "disp1_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP1, 6, 2, 0, &disp1_lock}, @@ -242,7 +239,6 @@ static struct mmp_param_gate_clk apmu_gate_clks[] = { {MMP2_CLK_CCIC1, "ccic1_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x1b, 0x1b, 0x0, 0, &ccic1_lock}, {MMP2_CLK_CCIC1_PHY, "ccic1_phy_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x24, 0x24, 0x0, 0, &ccic1_lock}, {MMP2_CLK_CCIC1_SPHY, "ccic1_sphy_clk", "ccic1_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x300, 0x300, 0x0, 0, &ccic1_lock}, - {MMP2_CLK_SP, "sp_clk", NULL, CLK_SET_RATE_PARENT, APMU_SP, 0x1b, 0x1b, 0x0, 0, &sp_lock}, }; static void mmp2_axi_periph_clk_init(struct mmp2_clk_unit *pxa_unit) diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c index c782e62dd98b..58fa5c247af1 100644 --- a/drivers/clk/qcom/gcc-sdm845.c +++ b/drivers/clk/qcom/gcc-sdm845.c @@ -115,8 +115,8 @@ static const char * const gcc_parent_names_6[] = { "core_bi_pll_test_se", }; -static const char * const gcc_parent_names_7[] = { - "bi_tcxo", +static const char * const gcc_parent_names_7_ao[] = { + "bi_tcxo_ao", "gpll0", "gpll0_out_even", "core_bi_pll_test_se", @@ -128,6 +128,12 @@ static const char * const gcc_parent_names_8[] = { "core_bi_pll_test_se", }; +static const char * const gcc_parent_names_8_ao[] = { + "bi_tcxo_ao", + "gpll0", + "core_bi_pll_test_se", +}; + static const struct parent_map gcc_parent_map_10[] = { { P_BI_TCXO, 0 }, { P_GPLL0_OUT_MAIN, 1 }, @@ -210,7 +216,7 @@ static struct clk_rcg2 gcc_cpuss_ahb_clk_src = { .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "gcc_cpuss_ahb_clk_src", - .parent_names = gcc_parent_names_7, + .parent_names = gcc_parent_names_7_ao, .num_parents = 4, .ops = &clk_rcg2_ops, }, @@ -229,7 +235,7 @@ static struct clk_rcg2 gcc_cpuss_rbcpr_clk_src = { .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "gcc_cpuss_rbcpr_clk_src", - .parent_names = gcc_parent_names_8, + .parent_names = gcc_parent_names_8_ao, .num_parents = 3, .ops = &clk_rcg2_ops, }, diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c index 3b97f60540ad..609970c0b666 100644 --- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c +++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c @@ -264,9 +264,9 @@ static SUNXI_CCU_GATE(ahb1_mmc1_clk, "ahb1-mmc1", "ahb1", static SUNXI_CCU_GATE(ahb1_mmc2_clk, "ahb1-mmc2", "ahb1", 0x060, BIT(10), 0); static SUNXI_CCU_GATE(ahb1_mmc3_clk, "ahb1-mmc3", "ahb1", - 0x060, BIT(12), 0); + 0x060, BIT(11), 0); static SUNXI_CCU_GATE(ahb1_nand1_clk, "ahb1-nand1", "ahb1", - 0x060, BIT(13), 0); + 0x060, BIT(12), 0); static SUNXI_CCU_GATE(ahb1_nand0_clk, "ahb1-nand0", "ahb1", 0x060, BIT(13), 0); static SUNXI_CCU_GATE(ahb1_sdram_clk, "ahb1-sdram", "ahb1", diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c index 621b1cd996db..ac12f261f8ca 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c @@ -542,7 +542,7 @@ static struct ccu_reset_map sun8i_v3s_ccu_resets[] = { [RST_BUS_OHCI0] = { 0x2c0, BIT(29) }, [RST_BUS_VE] = { 0x2c4, BIT(0) }, - [RST_BUS_TCON0] = { 0x2c4, BIT(3) }, + [RST_BUS_TCON0] = { 0x2c4, BIT(4) }, [RST_BUS_CSI] = { 0x2c4, BIT(8) }, [RST_BUS_DE] = { 0x2c4, BIT(12) }, [RST_BUS_DBG] = { 0x2c4, BIT(31) }, diff --git a/drivers/clk/tegra/Kconfig b/drivers/clk/tegra/Kconfig index 7ddacae5d0b1..1adcccfa7829 100644 --- a/drivers/clk/tegra/Kconfig +++ b/drivers/clk/tegra/Kconfig @@ -5,3 +5,8 @@ config TEGRA_CLK_EMC config CLK_TEGRA_BPMP def_bool y depends on TEGRA_BPMP + +config TEGRA_CLK_DFLL + depends on ARCH_TEGRA_124_SOC || ARCH_TEGRA_210_SOC + select PM_OPP + def_bool y diff --git a/drivers/clk/tegra/Makefile b/drivers/clk/tegra/Makefile index 6507acc843c7..4812e45c2214 100644 --- a/drivers/clk/tegra/Makefile +++ b/drivers/clk/tegra/Makefile @@ -20,7 +20,7 @@ obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += clk-tegra20.o obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += clk-tegra30.o obj-$(CONFIG_ARCH_TEGRA_114_SOC) += clk-tegra114.o obj-$(CONFIG_ARCH_TEGRA_124_SOC) += clk-tegra124.o -obj-$(CONFIG_ARCH_TEGRA_124_SOC) += clk-tegra124-dfll-fcpu.o +obj-$(CONFIG_TEGRA_CLK_DFLL) += clk-tegra124-dfll-fcpu.o obj-$(CONFIG_ARCH_TEGRA_132_SOC) += clk-tegra124.o obj-y += cvb.o obj-$(CONFIG_ARCH_TEGRA_210_SOC) += clk-tegra210.o diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c index 609e363dabf8..0400e5b1d627 100644 --- a/drivers/clk/tegra/clk-dfll.c +++ b/drivers/clk/tegra/clk-dfll.c @@ -1,7 +1,7 @@ /* * clk-dfll.c - Tegra DFLL clock source common code * - * Copyright (C) 2012-2014 NVIDIA Corporation. All rights reserved. + * Copyright (C) 2012-2019 NVIDIA Corporation. All rights reserved. * * Aleksandr Frid * Paul Walmsley @@ -47,6 +47,7 @@ #include #include #include +#include #include #include #include @@ -243,6 +244,12 @@ enum dfll_tune_range { DFLL_TUNE_LOW = 1, }; + +enum tegra_dfll_pmu_if { + TEGRA_DFLL_PMU_I2C = 0, + TEGRA_DFLL_PMU_PWM = 1, +}; + /** * struct dfll_rate_req - target DFLL rate request data * @rate: target frequency, after the postscaling @@ -300,10 +307,19 @@ struct tegra_dfll { u32 i2c_reg; u32 i2c_slave_addr; - /* i2c_lut array entries are regulator framework selectors */ - unsigned i2c_lut[MAX_DFLL_VOLTAGES]; - int i2c_lut_size; - u8 lut_min, lut_max, lut_safe; + /* lut array entries are regulator framework selectors or PWM values*/ + unsigned lut[MAX_DFLL_VOLTAGES]; + unsigned long lut_uv[MAX_DFLL_VOLTAGES]; + int lut_size; + u8 lut_bottom, lut_min, lut_max, lut_safe; + + /* PWM interface */ + enum tegra_dfll_pmu_if pmu_if; + unsigned long pwm_rate; + struct pinctrl *pwm_pin; + struct pinctrl_state *pwm_enable_state; + struct pinctrl_state *pwm_disable_state; + u32 reg_init_uV; }; #define clk_hw_to_dfll(_hw) container_of(_hw, struct tegra_dfll, dfll_clk_hw) @@ -489,6 +505,34 @@ static void dfll_set_mode(struct tegra_dfll *td, dfll_wmb(td); } +/* + * DVCO rate control + */ + +static unsigned long get_dvco_rate_below(struct tegra_dfll *td, u8 out_min) +{ + struct dev_pm_opp *opp; + unsigned long rate, prev_rate; + unsigned long uv, min_uv; + + min_uv = td->lut_uv[out_min]; + for (rate = 0, prev_rate = 0; ; rate++) { + opp = dev_pm_opp_find_freq_ceil(td->soc->dev, &rate); + if (IS_ERR(opp)) + break; + + uv = dev_pm_opp_get_voltage(opp); + dev_pm_opp_put(opp); + + if (uv && uv > min_uv) + return prev_rate; + + prev_rate = rate; + } + + return prev_rate; +} + /* * DFLL-to-I2C controller interface */ @@ -518,6 +562,118 @@ static int dfll_i2c_set_output_enabled(struct tegra_dfll *td, bool enable) return 0; } + +/* + * DFLL-to-PWM controller interface + */ + +/** + * dfll_pwm_set_output_enabled - enable/disable PWM voltage requests + * @td: DFLL instance + * @enable: whether to enable or disable the PWM voltage requests + * + * Set the master enable control for PWM control value updates. If disabled, + * then the PWM signal is not driven. Also configure the PWM output pad + * to the appropriate state. + */ +static int dfll_pwm_set_output_enabled(struct tegra_dfll *td, bool enable) +{ + int ret; + u32 val, div; + + if (enable) { + ret = pinctrl_select_state(td->pwm_pin, td->pwm_enable_state); + if (ret < 0) { + dev_err(td->dev, "setting enable state failed\n"); + return -EINVAL; + } + val = dfll_readl(td, DFLL_OUTPUT_CFG); + val &= ~DFLL_OUTPUT_CFG_PWM_DIV_MASK; + div = DIV_ROUND_UP(td->ref_rate, td->pwm_rate); + val |= (div << DFLL_OUTPUT_CFG_PWM_DIV_SHIFT) + & DFLL_OUTPUT_CFG_PWM_DIV_MASK; + dfll_writel(td, val, DFLL_OUTPUT_CFG); + dfll_wmb(td); + + val |= DFLL_OUTPUT_CFG_PWM_ENABLE; + dfll_writel(td, val, DFLL_OUTPUT_CFG); + dfll_wmb(td); + } else { + ret = pinctrl_select_state(td->pwm_pin, td->pwm_disable_state); + if (ret < 0) + dev_warn(td->dev, "setting disable state failed\n"); + + val = dfll_readl(td, DFLL_OUTPUT_CFG); + val &= ~DFLL_OUTPUT_CFG_PWM_ENABLE; + dfll_writel(td, val, DFLL_OUTPUT_CFG); + dfll_wmb(td); + } + + return 0; +} + +/** + * dfll_set_force_output_value - set fixed value for force output + * @td: DFLL instance + * @out_val: value to force output + * + * Set the fixed value for force output, DFLL will output this value when + * force output is enabled. + */ +static u32 dfll_set_force_output_value(struct tegra_dfll *td, u8 out_val) +{ + u32 val = dfll_readl(td, DFLL_OUTPUT_FORCE); + + val = (val & DFLL_OUTPUT_FORCE_ENABLE) | (out_val & OUT_MASK); + dfll_writel(td, val, DFLL_OUTPUT_FORCE); + dfll_wmb(td); + + return dfll_readl(td, DFLL_OUTPUT_FORCE); +} + +/** + * dfll_set_force_output_enabled - enable/disable force output + * @td: DFLL instance + * @enable: whether to enable or disable the force output + * + * Set the enable control for fouce output with fixed value. + */ +static void dfll_set_force_output_enabled(struct tegra_dfll *td, bool enable) +{ + u32 val = dfll_readl(td, DFLL_OUTPUT_FORCE); + + if (enable) + val |= DFLL_OUTPUT_FORCE_ENABLE; + else + val &= ~DFLL_OUTPUT_FORCE_ENABLE; + + dfll_writel(td, val, DFLL_OUTPUT_FORCE); + dfll_wmb(td); +} + +/** + * dfll_force_output - force output a fixed value + * @td: DFLL instance + * @out_sel: value to force output + * + * Set the fixed value for force output, DFLL will output this value. + */ +static int dfll_force_output(struct tegra_dfll *td, unsigned int out_sel) +{ + u32 val; + + if (out_sel > OUT_MASK) + return -EINVAL; + + val = dfll_set_force_output_value(td, out_sel); + if ((td->mode < DFLL_CLOSED_LOOP) && + !(val & DFLL_OUTPUT_FORCE_ENABLE)) { + dfll_set_force_output_enabled(td, true); + } + + return 0; +} + /** * dfll_load_lut - load the voltage lookup table * @td: struct tegra_dfll * @@ -539,7 +695,7 @@ static void dfll_load_i2c_lut(struct tegra_dfll *td) lut_index = i; val = regulator_list_hardware_vsel(td->vdd_reg, - td->i2c_lut[lut_index]); + td->lut[lut_index]); __raw_writel(val, td->lut_base + i * 4); } @@ -594,24 +750,41 @@ static void dfll_init_out_if(struct tegra_dfll *td) { u32 val; - td->lut_min = 0; - td->lut_max = td->i2c_lut_size - 1; - td->lut_safe = td->lut_min + 1; + td->lut_min = td->lut_bottom; + td->lut_max = td->lut_size - 1; + td->lut_safe = td->lut_min + (td->lut_min < td->lut_max ? 1 : 0); + + /* clear DFLL_OUTPUT_CFG before setting new value */ + dfll_writel(td, 0, DFLL_OUTPUT_CFG); + dfll_wmb(td); - dfll_i2c_writel(td, 0, DFLL_OUTPUT_CFG); val = (td->lut_safe << DFLL_OUTPUT_CFG_SAFE_SHIFT) | - (td->lut_max << DFLL_OUTPUT_CFG_MAX_SHIFT) | - (td->lut_min << DFLL_OUTPUT_CFG_MIN_SHIFT); - dfll_i2c_writel(td, val, DFLL_OUTPUT_CFG); - dfll_i2c_wmb(td); + (td->lut_max << DFLL_OUTPUT_CFG_MAX_SHIFT) | + (td->lut_min << DFLL_OUTPUT_CFG_MIN_SHIFT); + dfll_writel(td, val, DFLL_OUTPUT_CFG); + dfll_wmb(td); dfll_writel(td, 0, DFLL_OUTPUT_FORCE); dfll_i2c_writel(td, 0, DFLL_INTR_EN); dfll_i2c_writel(td, DFLL_INTR_MAX_MASK | DFLL_INTR_MIN_MASK, DFLL_INTR_STS); - dfll_load_i2c_lut(td); - dfll_init_i2c_if(td); + if (td->pmu_if == TEGRA_DFLL_PMU_PWM) { + u32 vinit = td->reg_init_uV; + int vstep = td->soc->alignment.step_uv; + unsigned long vmin = td->lut_uv[0]; + + /* set initial voltage */ + if ((vinit >= vmin) && vstep) { + unsigned int vsel; + + vsel = DIV_ROUND_UP((vinit - vmin), vstep); + dfll_force_output(td, vsel); + } + } else { + dfll_load_i2c_lut(td); + dfll_init_i2c_if(td); + } } /* @@ -631,17 +804,17 @@ static void dfll_init_out_if(struct tegra_dfll *td) static int find_lut_index_for_rate(struct tegra_dfll *td, unsigned long rate) { struct dev_pm_opp *opp; - int i, uv; + int i, align_step; opp = dev_pm_opp_find_freq_ceil(td->soc->dev, &rate); if (IS_ERR(opp)) return PTR_ERR(opp); - uv = dev_pm_opp_get_voltage(opp); + align_step = dev_pm_opp_get_voltage(opp) / td->soc->alignment.step_uv; dev_pm_opp_put(opp); - for (i = 0; i < td->i2c_lut_size; i++) { - if (regulator_list_voltage(td->vdd_reg, td->i2c_lut[i]) == uv) + for (i = td->lut_bottom; i < td->lut_size; i++) { + if ((td->lut_uv[i] / td->soc->alignment.step_uv) >= align_step) return i; } @@ -863,9 +1036,14 @@ static int dfll_lock(struct tegra_dfll *td) return -EINVAL; } - dfll_i2c_set_output_enabled(td, true); + if (td->pmu_if == TEGRA_DFLL_PMU_PWM) + dfll_pwm_set_output_enabled(td, true); + else + dfll_i2c_set_output_enabled(td, true); + dfll_set_mode(td, DFLL_CLOSED_LOOP); dfll_set_frequency_request(td, req); + dfll_set_force_output_enabled(td, false); return 0; default: @@ -889,7 +1067,10 @@ static int dfll_unlock(struct tegra_dfll *td) case DFLL_CLOSED_LOOP: dfll_set_open_loop_config(td); dfll_set_mode(td, DFLL_OPEN_LOOP); - dfll_i2c_set_output_enabled(td, false); + if (td->pmu_if == TEGRA_DFLL_PMU_PWM) + dfll_pwm_set_output_enabled(td, false); + else + dfll_i2c_set_output_enabled(td, false); return 0; case DFLL_OPEN_LOOP: @@ -1171,15 +1352,17 @@ static int attr_registers_show(struct seq_file *s, void *data) seq_printf(s, "[0x%02x] = 0x%08x\n", offs, dfll_i2c_readl(td, offs)); - seq_puts(s, "\nINTEGRATED I2C CONTROLLER REGISTERS:\n"); - offs = DFLL_I2C_CLK_DIVISOR; - seq_printf(s, "[0x%02x] = 0x%08x\n", offs, - __raw_readl(td->i2c_controller_base + offs)); - - seq_puts(s, "\nLUT:\n"); - for (offs = 0; offs < 4 * MAX_DFLL_VOLTAGES; offs += 4) + if (td->pmu_if == TEGRA_DFLL_PMU_I2C) { + seq_puts(s, "\nINTEGRATED I2C CONTROLLER REGISTERS:\n"); + offs = DFLL_I2C_CLK_DIVISOR; seq_printf(s, "[0x%02x] = 0x%08x\n", offs, - __raw_readl(td->lut_base + offs)); + __raw_readl(td->i2c_controller_base + offs)); + + seq_puts(s, "\nLUT:\n"); + for (offs = 0; offs < 4 * MAX_DFLL_VOLTAGES; offs += 4) + seq_printf(s, "[0x%02x] = 0x%08x\n", offs, + __raw_readl(td->lut_base + offs)); + } return 0; } @@ -1349,15 +1532,21 @@ di_err1: */ static int find_vdd_map_entry_exact(struct tegra_dfll *td, int uV) { - int i, n_voltages, reg_uV; + int i, n_voltages, reg_uV,reg_volt_id, align_step; + + if (WARN_ON(td->pmu_if == TEGRA_DFLL_PMU_PWM)) + return -EINVAL; + align_step = uV / td->soc->alignment.step_uv; n_voltages = regulator_count_voltages(td->vdd_reg); for (i = 0; i < n_voltages; i++) { reg_uV = regulator_list_voltage(td->vdd_reg, i); if (reg_uV < 0) break; - if (uV == reg_uV) + reg_volt_id = reg_uV / td->soc->alignment.step_uv; + + if (align_step == reg_volt_id) return i; } @@ -1371,15 +1560,21 @@ static int find_vdd_map_entry_exact(struct tegra_dfll *td, int uV) * */ static int find_vdd_map_entry_min(struct tegra_dfll *td, int uV) { - int i, n_voltages, reg_uV; + int i, n_voltages, reg_uV, reg_volt_id, align_step; + if (WARN_ON(td->pmu_if == TEGRA_DFLL_PMU_PWM)) + return -EINVAL; + + align_step = uV / td->soc->alignment.step_uv; n_voltages = regulator_count_voltages(td->vdd_reg); for (i = 0; i < n_voltages; i++) { reg_uV = regulator_list_voltage(td->vdd_reg, i); if (reg_uV < 0) break; - if (uV <= reg_uV) + reg_volt_id = reg_uV / td->soc->alignment.step_uv; + + if (align_step <= reg_volt_id) return i; } @@ -1387,9 +1582,61 @@ static int find_vdd_map_entry_min(struct tegra_dfll *td, int uV) return -EINVAL; } +/* + * dfll_build_pwm_lut - build the PWM regulator lookup table + * @td: DFLL instance + * @v_max: Vmax from OPP table + * + * Look-up table in h/w is ignored when PWM is used as DFLL interface to PMIC. + * In this case closed loop output is controlling duty cycle directly. The s/w + * look-up that maps PWM duty cycle to voltage is still built by this function. + */ +static int dfll_build_pwm_lut(struct tegra_dfll *td, unsigned long v_max) +{ + int i; + unsigned long rate, reg_volt; + u8 lut_bottom = MAX_DFLL_VOLTAGES; + int v_min = td->soc->cvb->min_millivolts * 1000; + + for (i = 0; i < MAX_DFLL_VOLTAGES; i++) { + reg_volt = td->lut_uv[i]; + + /* since opp voltage is exact mv */ + reg_volt = (reg_volt / 1000) * 1000; + if (reg_volt > v_max) + break; + + td->lut[i] = i; + if ((lut_bottom == MAX_DFLL_VOLTAGES) && (reg_volt >= v_min)) + lut_bottom = i; + } + + /* determine voltage boundaries */ + td->lut_size = i; + if ((lut_bottom == MAX_DFLL_VOLTAGES) || + (lut_bottom + 1 >= td->lut_size)) { + dev_err(td->dev, "no voltage above DFLL minimum %d mV\n", + td->soc->cvb->min_millivolts); + return -EINVAL; + } + td->lut_bottom = lut_bottom; + + /* determine rate boundaries */ + rate = get_dvco_rate_below(td, td->lut_bottom); + if (!rate) { + dev_err(td->dev, "no opp below DFLL minimum voltage %d mV\n", + td->soc->cvb->min_millivolts); + return -EINVAL; + } + td->dvco_rate_min = rate; + + return 0; +} + /** * dfll_build_i2c_lut - build the I2C voltage register lookup table * @td: DFLL instance + * @v_max: Vmax from OPP table * * The DFLL hardware has 33 bytes of look-up table RAM that must be filled with * PMIC voltage register values that span the entire DFLL operating range. @@ -1397,33 +1644,24 @@ static int find_vdd_map_entry_min(struct tegra_dfll *td, int uV) * the soc-specific platform driver (td->soc->opp_dev) and the PMIC * register-to-voltage mapping queried from the regulator framework. * - * On success, fills in td->i2c_lut and returns 0, or -err on failure. + * On success, fills in td->lut and returns 0, or -err on failure. */ -static int dfll_build_i2c_lut(struct tegra_dfll *td) +static int dfll_build_i2c_lut(struct tegra_dfll *td, unsigned long v_max) { + unsigned long rate, v, v_opp; int ret = -EINVAL; - int j, v, v_max, v_opp; - int selector; - unsigned long rate; - struct dev_pm_opp *opp; - int lut; - - rate = ULONG_MAX; - opp = dev_pm_opp_find_freq_floor(td->soc->dev, &rate); - if (IS_ERR(opp)) { - dev_err(td->dev, "couldn't get vmax opp, empty opp table?\n"); - goto out; - } - v_max = dev_pm_opp_get_voltage(opp); - dev_pm_opp_put(opp); + int j, selector, lut; v = td->soc->cvb->min_millivolts * 1000; lut = find_vdd_map_entry_exact(td, v); if (lut < 0) goto out; - td->i2c_lut[0] = lut; + td->lut[0] = lut; + td->lut_bottom = 0; for (j = 1, rate = 0; ; rate++) { + struct dev_pm_opp *opp; + opp = dev_pm_opp_find_freq_ceil(td->soc->dev, &rate); if (IS_ERR(opp)) break; @@ -1435,39 +1673,64 @@ static int dfll_build_i2c_lut(struct tegra_dfll *td) dev_pm_opp_put(opp); for (;;) { - v += max(1, (v_max - v) / (MAX_DFLL_VOLTAGES - j)); + v += max(1UL, (v_max - v) / (MAX_DFLL_VOLTAGES - j)); if (v >= v_opp) break; selector = find_vdd_map_entry_min(td, v); if (selector < 0) goto out; - if (selector != td->i2c_lut[j - 1]) - td->i2c_lut[j++] = selector; + if (selector != td->lut[j - 1]) + td->lut[j++] = selector; } v = (j == MAX_DFLL_VOLTAGES - 1) ? v_max : v_opp; selector = find_vdd_map_entry_exact(td, v); if (selector < 0) goto out; - if (selector != td->i2c_lut[j - 1]) - td->i2c_lut[j++] = selector; + if (selector != td->lut[j - 1]) + td->lut[j++] = selector; if (v >= v_max) break; } - td->i2c_lut_size = j; + td->lut_size = j; if (!td->dvco_rate_min) dev_err(td->dev, "no opp above DFLL minimum voltage %d mV\n", td->soc->cvb->min_millivolts); - else + else { ret = 0; + for (j = 0; j < td->lut_size; j++) + td->lut_uv[j] = + regulator_list_voltage(td->vdd_reg, + td->lut[j]); + } out: return ret; } +static int dfll_build_lut(struct tegra_dfll *td) +{ + unsigned long rate, v_max; + struct dev_pm_opp *opp; + + rate = ULONG_MAX; + opp = dev_pm_opp_find_freq_floor(td->soc->dev, &rate); + if (IS_ERR(opp)) { + dev_err(td->dev, "couldn't get vmax opp, empty opp table?\n"); + return -EINVAL; + } + v_max = dev_pm_opp_get_voltage(opp); + dev_pm_opp_put(opp); + + if (td->pmu_if == TEGRA_DFLL_PMU_PWM) + return dfll_build_pwm_lut(td, v_max); + else + return dfll_build_i2c_lut(td, v_max); +} + /** * read_dt_param - helper function for reading required parameters from the DT * @td: DFLL instance @@ -1526,11 +1789,56 @@ static int dfll_fetch_i2c_params(struct tegra_dfll *td) } td->i2c_reg = vsel_reg; - ret = dfll_build_i2c_lut(td); - if (ret) { - dev_err(td->dev, "couldn't build I2C LUT\n"); + return 0; +} + +static int dfll_fetch_pwm_params(struct tegra_dfll *td) +{ + int ret, i; + u32 pwm_period; + + if (!td->soc->alignment.step_uv || !td->soc->alignment.offset_uv) { + dev_err(td->dev, + "Missing step or alignment info for PWM regulator"); + return -EINVAL; + } + for (i = 0; i < MAX_DFLL_VOLTAGES; i++) + td->lut_uv[i] = td->soc->alignment.offset_uv + + i * td->soc->alignment.step_uv; + + ret = read_dt_param(td, "nvidia,pwm-tristate-microvolts", + &td->reg_init_uV); + if (!ret) { + dev_err(td->dev, "couldn't get initialized voltage\n"); + return ret; + } + + ret = read_dt_param(td, "nvidia,pwm-period-nanoseconds", &pwm_period); + if (!ret) { + dev_err(td->dev, "couldn't get PWM period\n"); return ret; } + td->pwm_rate = (NSEC_PER_SEC / pwm_period) * (MAX_DFLL_VOLTAGES - 1); + + td->pwm_pin = devm_pinctrl_get(td->dev); + if (IS_ERR(td->pwm_pin)) { + dev_err(td->dev, "DT: missing pinctrl device\n"); + return PTR_ERR(td->pwm_pin); + } + + td->pwm_enable_state = pinctrl_lookup_state(td->pwm_pin, + "dvfs_pwm_enable"); + if (IS_ERR(td->pwm_enable_state)) { + dev_err(td->dev, "DT: missing pwm enabled state\n"); + return PTR_ERR(td->pwm_enable_state); + } + + td->pwm_disable_state = pinctrl_lookup_state(td->pwm_pin, + "dvfs_pwm_disable"); + if (IS_ERR(td->pwm_disable_state)) { + dev_err(td->dev, "DT: missing pwm disabled state\n"); + return PTR_ERR(td->pwm_disable_state); + } return 0; } @@ -1597,16 +1905,6 @@ int tegra_dfll_register(struct platform_device *pdev, td->soc = soc; - td->vdd_reg = devm_regulator_get(td->dev, "vdd-cpu"); - if (IS_ERR(td->vdd_reg)) { - ret = PTR_ERR(td->vdd_reg); - if (ret != -EPROBE_DEFER) - dev_err(td->dev, "couldn't get vdd_cpu regulator: %d\n", - ret); - - return ret; - } - td->dvco_rst = devm_reset_control_get(td->dev, "dvco"); if (IS_ERR(td->dvco_rst)) { dev_err(td->dev, "couldn't get dvco reset\n"); @@ -1619,10 +1917,27 @@ int tegra_dfll_register(struct platform_device *pdev, return ret; } - ret = dfll_fetch_i2c_params(td); + if (of_property_read_bool(td->dev->of_node, "nvidia,pwm-to-pmic")) { + td->pmu_if = TEGRA_DFLL_PMU_PWM; + ret = dfll_fetch_pwm_params(td); + } else { + td->vdd_reg = devm_regulator_get(td->dev, "vdd-cpu"); + if (IS_ERR(td->vdd_reg)) { + dev_err(td->dev, "couldn't get vdd_cpu regulator\n"); + return PTR_ERR(td->vdd_reg); + } + td->pmu_if = TEGRA_DFLL_PMU_I2C; + ret = dfll_fetch_i2c_params(td); + } if (ret) return ret; + ret = dfll_build_lut(td); + if (ret) { + dev_err(td->dev, "couldn't build LUT\n"); + return ret; + } + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(td->dev, "no control register resource\n"); diff --git a/drivers/clk/tegra/clk-dfll.h b/drivers/clk/tegra/clk-dfll.h index 83352c8078f2..85d0d95223f3 100644 --- a/drivers/clk/tegra/clk-dfll.h +++ b/drivers/clk/tegra/clk-dfll.h @@ -1,6 +1,6 @@ /* * clk-dfll.h - prototypes and macros for the Tegra DFLL clocksource driver - * Copyright (C) 2013 NVIDIA Corporation. All rights reserved. + * Copyright (C) 2013-2019 NVIDIA Corporation. All rights reserved. * * Aleksandr Frid * Paul Walmsley @@ -22,11 +22,14 @@ #include #include +#include "cvb.h" + /** * struct tegra_dfll_soc_data - SoC-specific hooks/integration for the DFLL driver * @dev: struct device * that holds the OPP table for the DFLL * @max_freq: maximum frequency supported on this SoC * @cvb: CPU frequency table for this SoC + * @alignment: parameters of the regulator step and offset * @init_clock_trimmers: callback to initialize clock trimmers * @set_clock_trimmers_high: callback to tune clock trimmers for high voltage * @set_clock_trimmers_low: callback to tune clock trimmers for low voltage @@ -35,6 +38,7 @@ struct tegra_dfll_soc_data { struct device *dev; unsigned long max_freq; const struct cvb_table *cvb; + struct rail_alignment alignment; void (*init_clock_trimmers)(void); void (*set_clock_trimmers_high)(void); diff --git a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c index edc31bb56674..e8ec42bf8638 100644 --- a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c +++ b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c @@ -1,7 +1,7 @@ /* * Tegra124 DFLL FCPU clock source driver * - * Copyright (C) 2012-2014 NVIDIA Corporation. All rights reserved. + * Copyright (C) 2012-2019 NVIDIA Corporation. All rights reserved. * * Aleksandr Frid * Paul Walmsley @@ -21,15 +21,24 @@ #include #include #include +#include #include +#include #include #include "clk.h" #include "clk-dfll.h" #include "cvb.h" +struct dfll_fcpu_data { + const unsigned long *cpu_max_freq_table; + unsigned int cpu_max_freq_table_size; + const struct cvb_table *cpu_cvb_tables; + unsigned int cpu_cvb_tables_size; +}; + /* Maximum CPU frequency, indexed by CPU speedo id */ -static const unsigned long cpu_max_freq_table[] = { +static const unsigned long tegra124_cpu_max_freq_table[] = { [0] = 2014500000UL, [1] = 2320500000UL, [2] = 2116500000UL, @@ -42,9 +51,6 @@ static const struct cvb_table tegra124_cpu_cvb_tables[] = { .process_id = -1, .min_millivolts = 900, .max_millivolts = 1260, - .alignment = { - .step_uv = 10000, /* 10mV */ - }, .speedo_scale = 100, .voltage_scale = 1000, .entries = { @@ -82,16 +88,493 @@ static const struct cvb_table tegra124_cpu_cvb_tables[] = { }, }; +static const unsigned long tegra210_cpu_max_freq_table[] = { + [0] = 1912500000UL, + [1] = 1912500000UL, + [2] = 2218500000UL, + [3] = 1785000000UL, + [4] = 1632000000UL, + [5] = 1912500000UL, + [6] = 2014500000UL, + [7] = 1734000000UL, + [8] = 1683000000UL, + [9] = 1555500000UL, + [10] = 1504500000UL, +}; + +#define CPU_CVB_TABLE \ + .speedo_scale = 100, \ + .voltage_scale = 1000, \ + .entries = { \ + { 204000000UL, { 1007452, -23865, 370 } }, \ + { 306000000UL, { 1052709, -24875, 370 } }, \ + { 408000000UL, { 1099069, -25895, 370 } }, \ + { 510000000UL, { 1146534, -26905, 370 } }, \ + { 612000000UL, { 1195102, -27915, 370 } }, \ + { 714000000UL, { 1244773, -28925, 370 } }, \ + { 816000000UL, { 1295549, -29935, 370 } }, \ + { 918000000UL, { 1347428, -30955, 370 } }, \ + { 1020000000UL, { 1400411, -31965, 370 } }, \ + { 1122000000UL, { 1454497, -32975, 370 } }, \ + { 1224000000UL, { 1509687, -33985, 370 } }, \ + { 1326000000UL, { 1565981, -35005, 370 } }, \ + { 1428000000UL, { 1623379, -36015, 370 } }, \ + { 1530000000UL, { 1681880, -37025, 370 } }, \ + { 1632000000UL, { 1741485, -38035, 370 } }, \ + { 1734000000UL, { 1802194, -39055, 370 } }, \ + { 1836000000UL, { 1864006, -40065, 370 } }, \ + { 1912500000UL, { 1910780, -40815, 370 } }, \ + { 2014500000UL, { 1227000, 0, 0 } }, \ + { 2218500000UL, { 1227000, 0, 0 } }, \ + { 0UL, { 0, 0, 0 } }, \ + } + +#define CPU_CVB_TABLE_XA \ + .speedo_scale = 100, \ + .voltage_scale = 1000, \ + .entries = { \ + { 204000000UL, { 1250024, -39785, 565 } }, \ + { 306000000UL, { 1297556, -41145, 565 } }, \ + { 408000000UL, { 1346718, -42505, 565 } }, \ + { 510000000UL, { 1397511, -43855, 565 } }, \ + { 612000000UL, { 1449933, -45215, 565 } }, \ + { 714000000UL, { 1503986, -46575, 565 } }, \ + { 816000000UL, { 1559669, -47935, 565 } }, \ + { 918000000UL, { 1616982, -49295, 565 } }, \ + { 1020000000UL, { 1675926, -50645, 565 } }, \ + { 1122000000UL, { 1736500, -52005, 565 } }, \ + { 1224000000UL, { 1798704, -53365, 565 } }, \ + { 1326000000UL, { 1862538, -54725, 565 } }, \ + { 1428000000UL, { 1928003, -56085, 565 } }, \ + { 1530000000UL, { 1995097, -57435, 565 } }, \ + { 1606500000UL, { 2046149, -58445, 565 } }, \ + { 1632000000UL, { 2063822, -58795, 565 } }, \ + { 0UL, { 0, 0, 0 } }, \ + } + +#define CPU_CVB_TABLE_EUCM1 \ + .speedo_scale = 100, \ + .voltage_scale = 1000, \ + .entries = { \ + { 204000000UL, { 734429, 0, 0 } }, \ + { 306000000UL, { 768191, 0, 0 } }, \ + { 408000000UL, { 801953, 0, 0 } }, \ + { 510000000UL, { 835715, 0, 0 } }, \ + { 612000000UL, { 869477, 0, 0 } }, \ + { 714000000UL, { 903239, 0, 0 } }, \ + { 816000000UL, { 937001, 0, 0 } }, \ + { 918000000UL, { 970763, 0, 0 } }, \ + { 1020000000UL, { 1004525, 0, 0 } }, \ + { 1122000000UL, { 1038287, 0, 0 } }, \ + { 1224000000UL, { 1072049, 0, 0 } }, \ + { 1326000000UL, { 1105811, 0, 0 } }, \ + { 1428000000UL, { 1130000, 0, 0 } }, \ + { 1555500000UL, { 1130000, 0, 0 } }, \ + { 1632000000UL, { 1170000, 0, 0 } }, \ + { 1734000000UL, { 1227500, 0, 0 } }, \ + { 0UL, { 0, 0, 0 } }, \ + } + +#define CPU_CVB_TABLE_EUCM2 \ + .speedo_scale = 100, \ + .voltage_scale = 1000, \ + .entries = { \ + { 204000000UL, { 742283, 0, 0 } }, \ + { 306000000UL, { 776249, 0, 0 } }, \ + { 408000000UL, { 810215, 0, 0 } }, \ + { 510000000UL, { 844181, 0, 0 } }, \ + { 612000000UL, { 878147, 0, 0 } }, \ + { 714000000UL, { 912113, 0, 0 } }, \ + { 816000000UL, { 946079, 0, 0 } }, \ + { 918000000UL, { 980045, 0, 0 } }, \ + { 1020000000UL, { 1014011, 0, 0 } }, \ + { 1122000000UL, { 1047977, 0, 0 } }, \ + { 1224000000UL, { 1081943, 0, 0 } }, \ + { 1326000000UL, { 1090000, 0, 0 } }, \ + { 1479000000UL, { 1090000, 0, 0 } }, \ + { 1555500000UL, { 1162000, 0, 0 } }, \ + { 1683000000UL, { 1195000, 0, 0 } }, \ + { 0UL, { 0, 0, 0 } }, \ + } + +#define CPU_CVB_TABLE_EUCM2_JOINT_RAIL \ + .speedo_scale = 100, \ + .voltage_scale = 1000, \ + .entries = { \ + { 204000000UL, { 742283, 0, 0 } }, \ + { 306000000UL, { 776249, 0, 0 } }, \ + { 408000000UL, { 810215, 0, 0 } }, \ + { 510000000UL, { 844181, 0, 0 } }, \ + { 612000000UL, { 878147, 0, 0 } }, \ + { 714000000UL, { 912113, 0, 0 } }, \ + { 816000000UL, { 946079, 0, 0 } }, \ + { 918000000UL, { 980045, 0, 0 } }, \ + { 1020000000UL, { 1014011, 0, 0 } }, \ + { 1122000000UL, { 1047977, 0, 0 } }, \ + { 1224000000UL, { 1081943, 0, 0 } }, \ + { 1326000000UL, { 1090000, 0, 0 } }, \ + { 1479000000UL, { 1090000, 0, 0 } }, \ + { 1504500000UL, { 1120000, 0, 0 } }, \ + { 0UL, { 0, 0, 0 } }, \ + } + +#define CPU_CVB_TABLE_ODN \ + .speedo_scale = 100, \ + .voltage_scale = 1000, \ + .entries = { \ + { 204000000UL, { 721094, 0, 0 } }, \ + { 306000000UL, { 754040, 0, 0 } }, \ + { 408000000UL, { 786986, 0, 0 } }, \ + { 510000000UL, { 819932, 0, 0 } }, \ + { 612000000UL, { 852878, 0, 0 } }, \ + { 714000000UL, { 885824, 0, 0 } }, \ + { 816000000UL, { 918770, 0, 0 } }, \ + { 918000000UL, { 915716, 0, 0 } }, \ + { 1020000000UL, { 984662, 0, 0 } }, \ + { 1122000000UL, { 1017608, 0, 0 } }, \ + { 1224000000UL, { 1050554, 0, 0 } }, \ + { 1326000000UL, { 1083500, 0, 0 } }, \ + { 1428000000UL, { 1116446, 0, 0 } }, \ + { 1581000000UL, { 1130000, 0, 0 } }, \ + { 1683000000UL, { 1168000, 0, 0 } }, \ + { 1785000000UL, { 1227500, 0, 0 } }, \ + { 0UL, { 0, 0, 0 } }, \ + } + +static struct cvb_table tegra210_cpu_cvb_tables[] = { + { + .speedo_id = 10, + .process_id = 0, + .min_millivolts = 840, + .max_millivolts = 1120, + CPU_CVB_TABLE_EUCM2_JOINT_RAIL, + .cpu_dfll_data = { + .tune0_low = 0xffead0ff, + .tune0_high = 0xffead0ff, + .tune1 = 0x20091d9, + .tune_high_min_millivolts = 864, + } + }, + { + .speedo_id = 10, + .process_id = 1, + .min_millivolts = 840, + .max_millivolts = 1120, + CPU_CVB_TABLE_EUCM2_JOINT_RAIL, + .cpu_dfll_data = { + .tune0_low = 0xffead0ff, + .tune0_high = 0xffead0ff, + .tune1 = 0x20091d9, + .tune_high_min_millivolts = 864, + } + }, + { + .speedo_id = 9, + .process_id = 0, + .min_millivolts = 900, + .max_millivolts = 1162, + CPU_CVB_TABLE_EUCM2, + .cpu_dfll_data = { + .tune0_low = 0xffead0ff, + .tune0_high = 0xffead0ff, + .tune1 = 0x20091d9, + } + }, + { + .speedo_id = 9, + .process_id = 1, + .min_millivolts = 900, + .max_millivolts = 1162, + CPU_CVB_TABLE_EUCM2, + .cpu_dfll_data = { + .tune0_low = 0xffead0ff, + .tune0_high = 0xffead0ff, + .tune1 = 0x20091d9, + } + }, + { + .speedo_id = 8, + .process_id = 0, + .min_millivolts = 900, + .max_millivolts = 1195, + CPU_CVB_TABLE_EUCM2, + .cpu_dfll_data = { + .tune0_low = 0xffead0ff, + .tune0_high = 0xffead0ff, + .tune1 = 0x20091d9, + } + }, + { + .speedo_id = 8, + .process_id = 1, + .min_millivolts = 900, + .max_millivolts = 1195, + CPU_CVB_TABLE_EUCM2, + .cpu_dfll_data = { + .tune0_low = 0xffead0ff, + .tune0_high = 0xffead0ff, + .tune1 = 0x20091d9, + } + }, + { + .speedo_id = 7, + .process_id = 0, + .min_millivolts = 841, + .max_millivolts = 1227, + CPU_CVB_TABLE_EUCM1, + .cpu_dfll_data = { + .tune0_low = 0xffead0ff, + .tune0_high = 0xffead0ff, + .tune1 = 0x20091d9, + .tune_high_min_millivolts = 864, + } + }, + { + .speedo_id = 7, + .process_id = 1, + .min_millivolts = 841, + .max_millivolts = 1227, + CPU_CVB_TABLE_EUCM1, + .cpu_dfll_data = { + .tune0_low = 0xffead0ff, + .tune0_high = 0xffead0ff, + .tune1 = 0x20091d9, + .tune_high_min_millivolts = 864, + } + }, + { + .speedo_id = 6, + .process_id = 0, + .min_millivolts = 870, + .max_millivolts = 1150, + CPU_CVB_TABLE, + .cpu_dfll_data = { + .tune0_low = 0xffead0ff, + .tune1 = 0x20091d9, + } + }, + { + .speedo_id = 6, + .process_id = 1, + .min_millivolts = 870, + .max_millivolts = 1150, + CPU_CVB_TABLE, + .cpu_dfll_data = { + .tune0_low = 0xffead0ff, + .tune1 = 0x25501d0, + } + }, + { + .speedo_id = 5, + .process_id = 0, + .min_millivolts = 818, + .max_millivolts = 1227, + CPU_CVB_TABLE, + .cpu_dfll_data = { + .tune0_low = 0xffead0ff, + .tune0_high = 0xffead0ff, + .tune1 = 0x20091d9, + .tune_high_min_millivolts = 864, + } + }, + { + .speedo_id = 5, + .process_id = 1, + .min_millivolts = 818, + .max_millivolts = 1227, + CPU_CVB_TABLE, + .cpu_dfll_data = { + .tune0_low = 0xffead0ff, + .tune0_high = 0xffead0ff, + .tune1 = 0x25501d0, + .tune_high_min_millivolts = 864, + } + }, + { + .speedo_id = 4, + .process_id = -1, + .min_millivolts = 918, + .max_millivolts = 1113, + CPU_CVB_TABLE_XA, + .cpu_dfll_data = { + .tune0_low = 0xffead0ff, + .tune1 = 0x17711BD, + } + }, + { + .speedo_id = 3, + .process_id = 0, + .min_millivolts = 825, + .max_millivolts = 1227, + CPU_CVB_TABLE_ODN, + .cpu_dfll_data = { + .tune0_low = 0xffead0ff, + .tune0_high = 0xffead0ff, + .tune1 = 0x20091d9, + .tune_high_min_millivolts = 864, + } + }, + { + .speedo_id = 3, + .process_id = 1, + .min_millivolts = 825, + .max_millivolts = 1227, + CPU_CVB_TABLE_ODN, + .cpu_dfll_data = { + .tune0_low = 0xffead0ff, + .tune0_high = 0xffead0ff, + .tune1 = 0x25501d0, + .tune_high_min_millivolts = 864, + } + }, + { + .speedo_id = 2, + .process_id = 0, + .min_millivolts = 870, + .max_millivolts = 1227, + CPU_CVB_TABLE, + .cpu_dfll_data = { + .tune0_low = 0xffead0ff, + .tune1 = 0x20091d9, + } + }, + { + .speedo_id = 2, + .process_id = 1, + .min_millivolts = 870, + .max_millivolts = 1227, + CPU_CVB_TABLE, + .cpu_dfll_data = { + .tune0_low = 0xffead0ff, + .tune1 = 0x25501d0, + } + }, + { + .speedo_id = 1, + .process_id = 0, + .min_millivolts = 837, + .max_millivolts = 1227, + CPU_CVB_TABLE, + .cpu_dfll_data = { + .tune0_low = 0xffead0ff, + .tune0_high = 0xffead0ff, + .tune1 = 0x20091d9, + .tune_high_min_millivolts = 864, + } + }, + { + .speedo_id = 1, + .process_id = 1, + .min_millivolts = 837, + .max_millivolts = 1227, + CPU_CVB_TABLE, + .cpu_dfll_data = { + .tune0_low = 0xffead0ff, + .tune0_high = 0xffead0ff, + .tune1 = 0x25501d0, + .tune_high_min_millivolts = 864, + } + }, + { + .speedo_id = 0, + .process_id = 0, + .min_millivolts = 850, + .max_millivolts = 1170, + CPU_CVB_TABLE, + .cpu_dfll_data = { + .tune0_low = 0xffead0ff, + .tune0_high = 0xffead0ff, + .tune1 = 0x20091d9, + .tune_high_min_millivolts = 864, + } + }, + { + .speedo_id = 0, + .process_id = 1, + .min_millivolts = 850, + .max_millivolts = 1170, + CPU_CVB_TABLE, + .cpu_dfll_data = { + .tune0_low = 0xffead0ff, + .tune0_high = 0xffead0ff, + .tune1 = 0x25501d0, + .tune_high_min_millivolts = 864, + } + }, +}; + +static const struct dfll_fcpu_data tegra124_dfll_fcpu_data = { + .cpu_max_freq_table = tegra124_cpu_max_freq_table, + .cpu_max_freq_table_size = ARRAY_SIZE(tegra124_cpu_max_freq_table), + .cpu_cvb_tables = tegra124_cpu_cvb_tables, + .cpu_cvb_tables_size = ARRAY_SIZE(tegra124_cpu_cvb_tables) +}; + +static const struct dfll_fcpu_data tegra210_dfll_fcpu_data = { + .cpu_max_freq_table = tegra210_cpu_max_freq_table, + .cpu_max_freq_table_size = ARRAY_SIZE(tegra210_cpu_max_freq_table), + .cpu_cvb_tables = tegra210_cpu_cvb_tables, + .cpu_cvb_tables_size = ARRAY_SIZE(tegra210_cpu_cvb_tables), +}; + +static const struct of_device_id tegra124_dfll_fcpu_of_match[] = { + { + .compatible = "nvidia,tegra124-dfll", + .data = &tegra124_dfll_fcpu_data, + }, + { + .compatible = "nvidia,tegra210-dfll", + .data = &tegra210_dfll_fcpu_data + }, + { }, +}; + +static void get_alignment_from_dt(struct device *dev, + struct rail_alignment *align) +{ + if (of_property_read_u32(dev->of_node, + "nvidia,pwm-voltage-step-microvolts", + &align->step_uv)) + align->step_uv = 0; + + if (of_property_read_u32(dev->of_node, + "nvidia,pwm-min-microvolts", + &align->offset_uv)) + align->offset_uv = 0; +} + +static int get_alignment_from_regulator(struct device *dev, + struct rail_alignment *align) +{ + struct regulator *reg = devm_regulator_get(dev, "vdd-cpu"); + + if (IS_ERR(reg)) + return PTR_ERR(reg); + + align->offset_uv = regulator_list_voltage(reg, 0); + align->step_uv = regulator_get_linear_step(reg); + + devm_regulator_put(reg); + + return 0; +} + static int tegra124_dfll_fcpu_probe(struct platform_device *pdev) { int process_id, speedo_id, speedo_value, err; struct tegra_dfll_soc_data *soc; + const struct dfll_fcpu_data *fcpu_data; + struct rail_alignment align; + + fcpu_data = of_device_get_match_data(&pdev->dev); + if (!fcpu_data) + return -ENODEV; process_id = tegra_sku_info.cpu_process_id; speedo_id = tegra_sku_info.cpu_speedo_id; speedo_value = tegra_sku_info.cpu_speedo_value; - if (speedo_id >= ARRAY_SIZE(cpu_max_freq_table)) { + if (speedo_id >= fcpu_data->cpu_max_freq_table_size) { dev_err(&pdev->dev, "unknown max CPU freq for speedo_id=%d\n", speedo_id); return -ENODEV; @@ -107,12 +590,22 @@ static int tegra124_dfll_fcpu_probe(struct platform_device *pdev) return -ENODEV; } - soc->max_freq = cpu_max_freq_table[speedo_id]; + if (of_property_read_bool(pdev->dev.of_node, "nvidia,pwm-to-pmic")) { + get_alignment_from_dt(&pdev->dev, &align); + } else { + err = get_alignment_from_regulator(&pdev->dev, &align); + if (err) + return err; + } + + soc->max_freq = fcpu_data->cpu_max_freq_table[speedo_id]; + + soc->cvb = tegra_cvb_add_opp_table(soc->dev, fcpu_data->cpu_cvb_tables, + fcpu_data->cpu_cvb_tables_size, + &align, process_id, speedo_id, + speedo_value, soc->max_freq); + soc->alignment = align; - soc->cvb = tegra_cvb_add_opp_table(soc->dev, tegra124_cpu_cvb_tables, - ARRAY_SIZE(tegra124_cpu_cvb_tables), - process_id, speedo_id, speedo_value, - soc->max_freq); if (IS_ERR(soc->cvb)) { dev_err(&pdev->dev, "couldn't add OPP table: %ld\n", PTR_ERR(soc->cvb)); @@ -144,11 +637,6 @@ static int tegra124_dfll_fcpu_remove(struct platform_device *pdev) return 0; } -static const struct of_device_id tegra124_dfll_fcpu_of_match[] = { - { .compatible = "nvidia,tegra124-dfll", }, - { }, -}; - static const struct dev_pm_ops tegra124_dfll_pm_ops = { SET_RUNTIME_PM_OPS(tegra_dfll_runtime_suspend, tegra_dfll_runtime_resume, NULL) diff --git a/drivers/clk/tegra/cvb.c b/drivers/clk/tegra/cvb.c index da9e8e7b5ce5..35eeb6adc68e 100644 --- a/drivers/clk/tegra/cvb.c +++ b/drivers/clk/tegra/cvb.c @@ -1,7 +1,7 @@ /* * Utility functions for parsing Tegra CVB voltage tables * - * Copyright (C) 2012-2014 NVIDIA Corporation. All rights reserved. + * Copyright (C) 2012-2019 NVIDIA Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -62,9 +62,9 @@ static int round_voltage(int mv, const struct rail_alignment *align, int up) } static int build_opp_table(struct device *dev, const struct cvb_table *table, + struct rail_alignment *align, int speedo_value, unsigned long max_freq) { - const struct rail_alignment *align = &table->alignment; int i, ret, dfll_mv, min_mv, max_mv; min_mv = round_voltage(table->min_millivolts, align, UP); @@ -109,8 +109,9 @@ static int build_opp_table(struct device *dev, const struct cvb_table *table, */ const struct cvb_table * tegra_cvb_add_opp_table(struct device *dev, const struct cvb_table *tables, - size_t count, int process_id, int speedo_id, - int speedo_value, unsigned long max_freq) + size_t count, struct rail_alignment *align, + int process_id, int speedo_id, int speedo_value, + unsigned long max_freq) { size_t i; int ret; @@ -124,7 +125,8 @@ tegra_cvb_add_opp_table(struct device *dev, const struct cvb_table *tables, if (table->process_id != -1 && table->process_id != process_id) continue; - ret = build_opp_table(dev, table, speedo_value, max_freq); + ret = build_opp_table(dev, table, align, speedo_value, + max_freq); return ret ? ERR_PTR(ret) : table; } diff --git a/drivers/clk/tegra/cvb.h b/drivers/clk/tegra/cvb.h index c1f077993b2a..91a1941c21ef 100644 --- a/drivers/clk/tegra/cvb.h +++ b/drivers/clk/tegra/cvb.h @@ -41,6 +41,7 @@ struct cvb_cpu_dfll_data { u32 tune0_low; u32 tune0_high; u32 tune1; + unsigned int tune_high_min_millivolts; }; struct cvb_table { @@ -49,7 +50,6 @@ struct cvb_table { int min_millivolts; int max_millivolts; - struct rail_alignment alignment; int speedo_scale; int voltage_scale; @@ -59,8 +59,9 @@ struct cvb_table { const struct cvb_table * tegra_cvb_add_opp_table(struct device *dev, const struct cvb_table *cvb_tables, - size_t count, int process_id, int speedo_id, - int speedo_value, unsigned long max_freq); + size_t count, struct rail_alignment *align, + int process_id, int speedo_id, int speedo_value, + unsigned long max_freq); void tegra_cvb_remove_opp_table(struct device *dev, const struct cvb_table *table, unsigned long max_freq); diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c index d0cd58534781..5d7fb2eecce4 100644 --- a/drivers/clk/ti/clk.c +++ b/drivers/clk/ti/clk.c @@ -351,6 +351,9 @@ void __init omap2_clk_legacy_provider_init(int index, void __iomem *mem) struct clk_iomap *io; io = memblock_alloc(sizeof(*io), SMP_CACHE_BYTES); + if (!io) + panic("%s: Failed to allocate %zu bytes\n", __func__, + sizeof(*io)); io->mem = mem; diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c index 8d77090ad94a..0241450f3eb3 100644 --- a/drivers/clk/ti/divider.c +++ b/drivers/clk/ti/divider.c @@ -403,8 +403,10 @@ int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div, num_dividers = i; tmp = kcalloc(valid_div + 1, sizeof(*tmp), GFP_KERNEL); - if (!tmp) + if (!tmp) { + *table = ERR_PTR(-ENOMEM); return -ENOMEM; + } valid_div = 0; *width = 0; @@ -439,6 +441,7 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup) { struct clk_omap_divider *div; struct clk_omap_reg *reg; + int ret; if (!setup) return NULL; @@ -458,6 +461,12 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup) div->flags |= CLK_DIVIDER_POWER_OF_TWO; div->table = _get_div_table_from_setup(setup, &div->width); + if (IS_ERR(div->table)) { + ret = PTR_ERR(div->table); + kfree(div); + return ERR_PTR(ret); + } + div->shift = setup->bit_shift; div->latch = -EINVAL; diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index a9e26f6a81a1..171502a356aa 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -131,7 +131,8 @@ config SUN5I_HSTIMER config TEGRA_TIMER bool "Tegra timer driver" if COMPILE_TEST select CLKSRC_MMIO - depends on ARM + select TIMER_OF + depends on ARM || ARM64 help Enables support for the Tegra driver. @@ -360,6 +361,16 @@ config ARM64_ERRATUM_858921 The workaround will be dynamically enabled when an affected core is detected. +config SUN50I_ERRATUM_UNKNOWN1 + bool "Workaround for Allwinner A64 erratum UNKNOWN1" + default y + depends on ARM_ARCH_TIMER && ARM64 && ARCH_SUNXI + select ARM_ARCH_TIMER_OOL_WORKAROUND + help + This option enables a workaround for instability in the timer on + the Allwinner A64 SoC. The workaround will only be active if the + allwinner,erratum-unknown1 property is found in the timer node. + config ARM_GLOBAL_TIMER bool "Support for the ARM global timer" if COMPILE_TEST select TIMER_OF if OF @@ -634,4 +645,13 @@ config GX6605S_TIMER help This option enables support for gx6605s SOC's timer. +config MILBEAUT_TIMER + bool "Milbeaut timer driver" if COMPILE_TEST + depends on OF + depends on ARM + select TIMER_OF + select CLKSRC_MMIO + help + Enables the support for Milbeaut timer driver. + endmenu diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index cdd210ff89ea..be6e0fbc7489 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -6,7 +6,7 @@ obj-$(CONFIG_ATMEL_ST) += timer-atmel-st.o obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o -obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o +obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += timer-cs5535.o obj-$(CONFIG_CLKSRC_JCORE_PIT) += jcore-pit.o obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o @@ -29,7 +29,7 @@ obj-$(CONFIG_BCM2835_TIMER) += bcm2835_timer.o obj-$(CONFIG_CLPS711X_TIMER) += clps711x-timer.o obj-$(CONFIG_ATLAS7_TIMER) += timer-atlas7.o obj-$(CONFIG_MXS_TIMER) += mxs_timer.o -obj-$(CONFIG_CLKSRC_PXA) += pxa_timer.o +obj-$(CONFIG_CLKSRC_PXA) += timer-pxa.o obj-$(CONFIG_PRIMA2_TIMER) += timer-prima2.o obj-$(CONFIG_U300_TIMER) += timer-u300.o obj-$(CONFIG_SUN4I_TIMER) += timer-sun4i.o @@ -55,6 +55,7 @@ obj-$(CONFIG_CLKSRC_TI_32K) += timer-ti-32k.o obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o obj-$(CONFIG_OWL_TIMER) += timer-owl.o +obj-$(CONFIG_MILBEAUT_TIMER) += timer-milbeaut.o obj-$(CONFIG_SPRD_TIMER) += timer-sprd.o obj-$(CONFIG_NPCM7XX_TIMER) += timer-npcm7xx.o obj-$(CONFIG_RDA_TIMER) += timer-rda.o @@ -69,7 +70,7 @@ obj-$(CONFIG_KEYSTONE_TIMER) += timer-keystone.o obj-$(CONFIG_INTEGRATOR_AP_TIMER) += timer-integrator-ap.o obj-$(CONFIG_CLKSRC_VERSATILE) += timer-versatile.o obj-$(CONFIG_CLKSRC_MIPS_GIC) += mips-gic-timer.o -obj-$(CONFIG_CLKSRC_TANGO_XTAL) += tango_xtal.o +obj-$(CONFIG_CLKSRC_TANGO_XTAL) += timer-tango-xtal.o obj-$(CONFIG_CLKSRC_IMX_GPT) += timer-imx-gpt.o obj-$(CONFIG_CLKSRC_IMX_TPM) += timer-imx-tpm.o obj-$(CONFIG_ASM9260_TIMER) += asm9260_timer.o diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 9a7d4dc00b6e..a8b20b65bd4b 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -326,6 +326,48 @@ static u64 notrace arm64_1188873_read_cntvct_el0(void) } #endif +#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1 +/* + * The low bits of the counter registers are indeterminate while bit 10 or + * greater is rolling over. Since the counter value can jump both backward + * (7ff -> 000 -> 800) and forward (7ff -> fff -> 800), ignore register values + * with all ones or all zeros in the low bits. Bound the loop by the maximum + * number of CPU cycles in 3 consecutive 24 MHz counter periods. + */ +#define __sun50i_a64_read_reg(reg) ({ \ + u64 _val; \ + int _retries = 150; \ + \ + do { \ + _val = read_sysreg(reg); \ + _retries--; \ + } while (((_val + 1) & GENMASK(9, 0)) <= 1 && _retries); \ + \ + WARN_ON_ONCE(!_retries); \ + _val; \ +}) + +static u64 notrace sun50i_a64_read_cntpct_el0(void) +{ + return __sun50i_a64_read_reg(cntpct_el0); +} + +static u64 notrace sun50i_a64_read_cntvct_el0(void) +{ + return __sun50i_a64_read_reg(cntvct_el0); +} + +static u32 notrace sun50i_a64_read_cntp_tval_el0(void) +{ + return read_sysreg(cntp_cval_el0) - sun50i_a64_read_cntpct_el0(); +} + +static u32 notrace sun50i_a64_read_cntv_tval_el0(void) +{ + return read_sysreg(cntv_cval_el0) - sun50i_a64_read_cntvct_el0(); +} +#endif + #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround); EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround); @@ -423,6 +465,19 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = { .read_cntvct_el0 = arm64_1188873_read_cntvct_el0, }, #endif +#ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1 + { + .match_type = ate_match_dt, + .id = "allwinner,erratum-unknown1", + .desc = "Allwinner erratum UNKNOWN1", + .read_cntp_tval_el0 = sun50i_a64_read_cntp_tval_el0, + .read_cntv_tval_el0 = sun50i_a64_read_cntv_tval_el0, + .read_cntpct_el0 = sun50i_a64_read_cntpct_el0, + .read_cntvct_el0 = sun50i_a64_read_cntvct_el0, + .set_next_event_phys = erratum_set_next_event_tval_phys, + .set_next_event_virt = erratum_set_next_event_tval_virt, + }, +#endif }; typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *, diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index 7a244b681876..34bd250d46c6 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c @@ -10,14 +10,12 @@ * published by the Free Software Foundation. */ -#include #include #include #include #include #include #include -#include #include #include #include @@ -388,6 +386,13 @@ static void exynos4_mct_tick_start(unsigned long cycles, exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET); } +static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt) +{ + /* Clear the MCT tick interrupt */ + if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) + exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET); +} + static int exynos4_tick_set_next_event(unsigned long cycles, struct clock_event_device *evt) { @@ -404,6 +409,7 @@ static int set_state_shutdown(struct clock_event_device *evt) mevt = container_of(evt, struct mct_clock_event_device, evt); exynos4_mct_tick_stop(mevt); + exynos4_mct_tick_clear(mevt); return 0; } @@ -420,8 +426,11 @@ static int set_state_periodic(struct clock_event_device *evt) return 0; } -static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt) +static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id) { + struct mct_clock_event_device *mevt = dev_id; + struct clock_event_device *evt = &mevt->evt; + /* * This is for supporting oneshot mode. * Mct would generate interrupt periodically @@ -430,16 +439,6 @@ static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt) if (!clockevent_state_periodic(&mevt->evt)) exynos4_mct_tick_stop(mevt); - /* Clear the MCT tick interrupt */ - if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) - exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET); -} - -static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id) -{ - struct mct_clock_event_device *mevt = dev_id; - struct clock_event_device *evt = &mevt->evt; - exynos4_mct_tick_clear(mevt); evt->event_handler(evt); @@ -507,13 +506,12 @@ static int __init exynos4_timer_resources(struct device_node *np, void __iomem * int err, cpu; struct clk *mct_clk, *tick_clk; - tick_clk = np ? of_clk_get_by_name(np, "fin_pll") : - clk_get(NULL, "fin_pll"); + tick_clk = of_clk_get_by_name(np, "fin_pll"); if (IS_ERR(tick_clk)) panic("%s: unable to determine tick clock rate\n", __func__); clk_rate = clk_get_rate(tick_clk); - mct_clk = np ? of_clk_get_by_name(np, "mct") : clk_get(NULL, "mct"); + mct_clk = of_clk_get_by_name(np, "mct"); if (IS_ERR(mct_clk)) panic("%s: unable to retrieve mct clock instance\n", __func__); clk_prepare_enable(mct_clk); @@ -562,7 +560,19 @@ static int __init exynos4_timer_resources(struct device_node *np, void __iomem * return 0; out_irq: - free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick); + if (mct_int_type == MCT_INT_PPI) { + free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick); + } else { + for_each_possible_cpu(cpu) { + struct mct_clock_event_device *pcpu_mevt = + per_cpu_ptr(&percpu_mct_tick, cpu); + + if (pcpu_mevt->evt.irq != -1) { + free_irq(pcpu_mevt->evt.irq, pcpu_mevt); + pcpu_mevt->evt.irq = -1; + } + } + } return err; } @@ -581,11 +591,7 @@ static int __init mct_init_dt(struct device_node *np, unsigned int int_type) * timer irqs are specified after the four global timer * irqs are specified. */ -#ifdef CONFIG_OF nr_irqs = of_irq_count(np); -#else - nr_irqs = 0; -#endif for (i = MCT_L0_IRQ; i < nr_irqs; i++) mct_irqs[i] = irq_of_parse_and_map(np, i); diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/timer-cs5535.c similarity index 100% rename from drivers/clocksource/cs5535-clockevt.c rename to drivers/clocksource/timer-cs5535.c diff --git a/drivers/clocksource/timer-milbeaut.c b/drivers/clocksource/timer-milbeaut.c new file mode 100644 index 000000000000..f2019a88e3ee --- /dev/null +++ b/drivers/clocksource/timer-milbeaut.c @@ -0,0 +1,161 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 Socionext Inc. + */ + +#include +#include +#include +#include +#include +#include "timer-of.h" + +#define MLB_TMR_TMCSR_OFS 0x0 +#define MLB_TMR_TMR_OFS 0x4 +#define MLB_TMR_TMRLR1_OFS 0x8 +#define MLB_TMR_TMRLR2_OFS 0xc +#define MLB_TMR_REGSZPCH 0x10 + +#define MLB_TMR_TMCSR_OUTL BIT(5) +#define MLB_TMR_TMCSR_RELD BIT(4) +#define MLB_TMR_TMCSR_INTE BIT(3) +#define MLB_TMR_TMCSR_UF BIT(2) +#define MLB_TMR_TMCSR_CNTE BIT(1) +#define MLB_TMR_TMCSR_TRG BIT(0) + +#define MLB_TMR_TMCSR_CSL_DIV2 0 +#define MLB_TMR_DIV_CNT 2 + +#define MLB_TMR_SRC_CH (1) +#define MLB_TMR_EVT_CH (0) + +#define MLB_TMR_SRC_CH_OFS (MLB_TMR_REGSZPCH * MLB_TMR_SRC_CH) +#define MLB_TMR_EVT_CH_OFS (MLB_TMR_REGSZPCH * MLB_TMR_EVT_CH) + +#define MLB_TMR_SRC_TMCSR_OFS (MLB_TMR_SRC_CH_OFS + MLB_TMR_TMCSR_OFS) +#define MLB_TMR_SRC_TMR_OFS (MLB_TMR_SRC_CH_OFS + MLB_TMR_TMR_OFS) +#define MLB_TMR_SRC_TMRLR1_OFS (MLB_TMR_SRC_CH_OFS + MLB_TMR_TMRLR1_OFS) +#define MLB_TMR_SRC_TMRLR2_OFS (MLB_TMR_SRC_CH_OFS + MLB_TMR_TMRLR2_OFS) + +#define MLB_TMR_EVT_TMCSR_OFS (MLB_TMR_EVT_CH_OFS + MLB_TMR_TMCSR_OFS) +#define MLB_TMR_EVT_TMR_OFS (MLB_TMR_EVT_CH_OFS + MLB_TMR_TMR_OFS) +#define MLB_TMR_EVT_TMRLR1_OFS (MLB_TMR_EVT_CH_OFS + MLB_TMR_TMRLR1_OFS) +#define MLB_TMR_EVT_TMRLR2_OFS (MLB_TMR_EVT_CH_OFS + MLB_TMR_TMRLR2_OFS) + +#define MLB_TIMER_RATING 500 + +static irqreturn_t mlb_timer_interrupt(int irq, void *dev_id) +{ + struct clock_event_device *clk = dev_id; + struct timer_of *to = to_timer_of(clk); + u32 val; + + val = readl_relaxed(timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS); + val &= ~MLB_TMR_TMCSR_UF; + writel_relaxed(val, timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS); + + clk->event_handler(clk); + + return IRQ_HANDLED; +} + +static int mlb_set_state_periodic(struct clock_event_device *clk) +{ + struct timer_of *to = to_timer_of(clk); + u32 val = MLB_TMR_TMCSR_CSL_DIV2; + + writel_relaxed(val, timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS); + + writel_relaxed(to->of_clk.period, timer_of_base(to) + + MLB_TMR_EVT_TMRLR1_OFS); + val |= MLB_TMR_TMCSR_RELD | MLB_TMR_TMCSR_CNTE | + MLB_TMR_TMCSR_TRG | MLB_TMR_TMCSR_INTE; + writel_relaxed(val, timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS); + return 0; +} + +static int mlb_set_state_oneshot(struct clock_event_device *clk) +{ + struct timer_of *to = to_timer_of(clk); + u32 val = MLB_TMR_TMCSR_CSL_DIV2; + + writel_relaxed(val, timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS); + return 0; +} + +static int mlb_clkevt_next_event(unsigned long event, + struct clock_event_device *clk) +{ + struct timer_of *to = to_timer_of(clk); + + writel_relaxed(event, timer_of_base(to) + MLB_TMR_EVT_TMRLR1_OFS); + writel_relaxed(MLB_TMR_TMCSR_CSL_DIV2 | + MLB_TMR_TMCSR_CNTE | MLB_TMR_TMCSR_INTE | + MLB_TMR_TMCSR_TRG, timer_of_base(to) + + MLB_TMR_EVT_TMCSR_OFS); + return 0; +} + +static int mlb_config_clock_source(struct timer_of *to) +{ + writel_relaxed(0, timer_of_base(to) + MLB_TMR_SRC_TMCSR_OFS); + writel_relaxed(~0, timer_of_base(to) + MLB_TMR_SRC_TMR_OFS); + writel_relaxed(~0, timer_of_base(to) + MLB_TMR_SRC_TMRLR1_OFS); + writel_relaxed(~0, timer_of_base(to) + MLB_TMR_SRC_TMRLR2_OFS); + writel_relaxed(BIT(4) | BIT(1) | BIT(0), timer_of_base(to) + + MLB_TMR_SRC_TMCSR_OFS); + return 0; +} + +static int mlb_config_clock_event(struct timer_of *to) +{ + writel_relaxed(0, timer_of_base(to) + MLB_TMR_EVT_TMCSR_OFS); + return 0; +} + +static struct timer_of to = { + .flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK, + + .clkevt = { + .name = "mlb-clkevt", + .rating = MLB_TIMER_RATING, + .cpumask = cpu_possible_mask, + .features = CLOCK_EVT_FEAT_DYNIRQ | CLOCK_EVT_FEAT_ONESHOT, + .set_state_oneshot = mlb_set_state_oneshot, + .set_state_periodic = mlb_set_state_periodic, + .set_next_event = mlb_clkevt_next_event, + }, + + .of_irq = { + .flags = IRQF_TIMER | IRQF_IRQPOLL, + .handler = mlb_timer_interrupt, + }, +}; + +static u64 notrace mlb_timer_sched_read(void) +{ + return ~readl_relaxed(timer_of_base(&to) + MLB_TMR_SRC_TMR_OFS); +} + +static int __init mlb_timer_init(struct device_node *node) +{ + int ret; + unsigned long rate; + + ret = timer_of_init(node, &to); + if (ret) + return ret; + + rate = timer_of_rate(&to) / MLB_TMR_DIV_CNT; + mlb_config_clock_source(&to); + clocksource_mmio_init(timer_of_base(&to) + MLB_TMR_SRC_TMR_OFS, + node->name, rate, MLB_TIMER_RATING, 32, + clocksource_mmio_readl_down); + sched_clock_register(mlb_timer_sched_read, 32, rate); + mlb_config_clock_event(&to); + clockevents_config_and_register(&to.clkevt, timer_of_rate(&to), 15, + 0xffffffff); + return 0; +} +TIMER_OF_DECLARE(mlb_peritimer, "socionext,milbeaut-timer", + mlb_timer_init); diff --git a/drivers/clocksource/pxa_timer.c b/drivers/clocksource/timer-pxa.c similarity index 100% rename from drivers/clocksource/pxa_timer.c rename to drivers/clocksource/timer-pxa.c diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c index 431892200a08..e8163693e936 100644 --- a/drivers/clocksource/timer-riscv.c +++ b/drivers/clocksource/timer-riscv.c @@ -95,13 +95,30 @@ static int __init riscv_timer_init_dt(struct device_node *n) struct clocksource *cs; hartid = riscv_of_processor_hartid(n); + if (hartid < 0) { + pr_warn("Not valid hartid for node [%pOF] error = [%d]\n", + n, hartid); + return hartid; + } + cpuid = riscv_hartid_to_cpuid(hartid); + if (cpuid < 0) { + pr_warn("Invalid cpuid for hartid [%d]\n", hartid); + return cpuid; + } if (cpuid != smp_processor_id()) return 0; + pr_info("%s: Registering clocksource cpuid [%d] hartid [%d]\n", + __func__, cpuid, hartid); cs = per_cpu_ptr(&riscv_clocksource, cpuid); - clocksource_register_hz(cs, riscv_timebase); + error = clocksource_register_hz(cs, riscv_timebase); + if (error) { + pr_err("RISCV timer register failed [%d] for cpu = [%d]\n", + error, cpuid); + return error; + } sched_clock_register(riscv_sched_clock, BITS_PER_LONG, riscv_timebase); @@ -110,8 +127,8 @@ static int __init riscv_timer_init_dt(struct device_node *n) "clockevents/riscv/timer:starting", riscv_timer_starting_cpu, riscv_timer_dying_cpu); if (error) - pr_err("RISCV timer register failed [%d] for cpu = [%d]\n", - error, cpuid); + pr_err("cpu hp setup state failed for RISCV timer [%d]\n", + error); return error; } diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c index 3b56ea3f52af..552c5254390c 100644 --- a/drivers/clocksource/timer-sun5i.c +++ b/drivers/clocksource/timer-sun5i.c @@ -202,6 +202,11 @@ static int __init sun5i_setup_clocksource(struct device_node *node, } rate = clk_get_rate(clk); + if (!rate) { + pr_err("Couldn't get parent clock rate\n"); + ret = -EINVAL; + goto err_disable_clk; + } cs->timer.base = base; cs->timer.clk = clk; @@ -275,6 +280,11 @@ static int __init sun5i_setup_clockevent(struct device_node *node, void __iomem } rate = clk_get_rate(clk); + if (!rate) { + pr_err("Couldn't get parent clock rate\n"); + ret = -EINVAL; + goto err_disable_clk; + } ce->timer.base = base; ce->timer.ticks_per_jiffy = DIV_ROUND_UP(rate, HZ); diff --git a/drivers/clocksource/tango_xtal.c b/drivers/clocksource/timer-tango-xtal.c similarity index 100% rename from drivers/clocksource/tango_xtal.c rename to drivers/clocksource/timer-tango-xtal.c diff --git a/drivers/clocksource/timer-tegra20.c b/drivers/clocksource/timer-tegra20.c index 4293943f4e2b..fdb3d795a409 100644 --- a/drivers/clocksource/timer-tegra20.c +++ b/drivers/clocksource/timer-tegra20.c @@ -15,21 +15,24 @@ * */ -#include +#include +#include +#include +#include +#include #include -#include #include -#include -#include -#include -#include -#include #include #include +#include #include -#include +#include + +#include "timer-of.h" +#ifdef CONFIG_ARM #include +#endif #define RTC_SECONDS 0x08 #define RTC_SHADOW_SECONDS 0x0c @@ -39,74 +42,161 @@ #define TIMERUS_USEC_CFG 0x14 #define TIMERUS_CNTR_FREEZE 0x4c -#define TIMER1_BASE 0x0 -#define TIMER2_BASE 0x8 -#define TIMER3_BASE 0x50 -#define TIMER4_BASE 0x58 - -#define TIMER_PTV 0x0 -#define TIMER_PCR 0x4 - +#define TIMER_PTV 0x0 +#define TIMER_PTV_EN BIT(31) +#define TIMER_PTV_PER BIT(30) +#define TIMER_PCR 0x4 +#define TIMER_PCR_INTR_CLR BIT(30) + +#ifdef CONFIG_ARM +#define TIMER_CPU0 0x50 /* TIMER3 */ +#else +#define TIMER_CPU0 0x90 /* TIMER10 */ +#define TIMER10_IRQ_IDX 10 +#define IRQ_IDX_FOR_CPU(cpu) (TIMER10_IRQ_IDX + cpu) +#endif +#define TIMER_BASE_FOR_CPU(cpu) (TIMER_CPU0 + (cpu) * 8) + +static u32 usec_config; static void __iomem *timer_reg_base; +#ifdef CONFIG_ARM static void __iomem *rtc_base; - static struct timespec64 persistent_ts; static u64 persistent_ms, last_persistent_ms; - static struct delay_timer tegra_delay_timer; - -#define timer_writel(value, reg) \ - writel_relaxed(value, timer_reg_base + (reg)) -#define timer_readl(reg) \ - readl_relaxed(timer_reg_base + (reg)) +#endif static int tegra_timer_set_next_event(unsigned long cycles, struct clock_event_device *evt) { - u32 reg; + void __iomem *reg_base = timer_of_base(to_timer_of(evt)); - reg = 0x80000000 | ((cycles > 1) ? (cycles-1) : 0); - timer_writel(reg, TIMER3_BASE + TIMER_PTV); + writel(TIMER_PTV_EN | + ((cycles > 1) ? (cycles - 1) : 0), /* n+1 scheme */ + reg_base + TIMER_PTV); return 0; } -static inline void timer_shutdown(struct clock_event_device *evt) +static int tegra_timer_shutdown(struct clock_event_device *evt) { - timer_writel(0, TIMER3_BASE + TIMER_PTV); + void __iomem *reg_base = timer_of_base(to_timer_of(evt)); + + writel(0, reg_base + TIMER_PTV); + + return 0; } -static int tegra_timer_shutdown(struct clock_event_device *evt) +static int tegra_timer_set_periodic(struct clock_event_device *evt) { - timer_shutdown(evt); + void __iomem *reg_base = timer_of_base(to_timer_of(evt)); + + writel(TIMER_PTV_EN | TIMER_PTV_PER | + ((timer_of_rate(to_timer_of(evt)) / HZ) - 1), + reg_base + TIMER_PTV); + return 0; } -static int tegra_timer_set_periodic(struct clock_event_device *evt) +static irqreturn_t tegra_timer_isr(int irq, void *dev_id) +{ + struct clock_event_device *evt = (struct clock_event_device *)dev_id; + void __iomem *reg_base = timer_of_base(to_timer_of(evt)); + + writel(TIMER_PCR_INTR_CLR, reg_base + TIMER_PCR); + evt->event_handler(evt); + + return IRQ_HANDLED; +} + +static void tegra_timer_suspend(struct clock_event_device *evt) +{ + void __iomem *reg_base = timer_of_base(to_timer_of(evt)); + + writel(TIMER_PCR_INTR_CLR, reg_base + TIMER_PCR); +} + +static void tegra_timer_resume(struct clock_event_device *evt) +{ + writel(usec_config, timer_reg_base + TIMERUS_USEC_CFG); +} + +#ifdef CONFIG_ARM64 +static DEFINE_PER_CPU(struct timer_of, tegra_to) = { + .flags = TIMER_OF_CLOCK | TIMER_OF_BASE, + + .clkevt = { + .name = "tegra_timer", + .rating = 460, + .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, + .set_next_event = tegra_timer_set_next_event, + .set_state_shutdown = tegra_timer_shutdown, + .set_state_periodic = tegra_timer_set_periodic, + .set_state_oneshot = tegra_timer_shutdown, + .tick_resume = tegra_timer_shutdown, + .suspend = tegra_timer_suspend, + .resume = tegra_timer_resume, + }, +}; + +static int tegra_timer_setup(unsigned int cpu) { - u32 reg = 0xC0000000 | ((1000000 / HZ) - 1); + struct timer_of *to = per_cpu_ptr(&tegra_to, cpu); + + irq_force_affinity(to->clkevt.irq, cpumask_of(cpu)); + enable_irq(to->clkevt.irq); + + clockevents_config_and_register(&to->clkevt, timer_of_rate(to), + 1, /* min */ + 0x1fffffff); /* 29 bits */ - timer_shutdown(evt); - timer_writel(reg, TIMER3_BASE + TIMER_PTV); return 0; } -static struct clock_event_device tegra_clockevent = { - .name = "timer0", - .rating = 300, - .features = CLOCK_EVT_FEAT_ONESHOT | - CLOCK_EVT_FEAT_PERIODIC | - CLOCK_EVT_FEAT_DYNIRQ, - .set_next_event = tegra_timer_set_next_event, - .set_state_shutdown = tegra_timer_shutdown, - .set_state_periodic = tegra_timer_set_periodic, - .set_state_oneshot = tegra_timer_shutdown, - .tick_resume = tegra_timer_shutdown, +static int tegra_timer_stop(unsigned int cpu) +{ + struct timer_of *to = per_cpu_ptr(&tegra_to, cpu); + + to->clkevt.set_state_shutdown(&to->clkevt); + disable_irq_nosync(to->clkevt.irq); + + return 0; +} +#else /* CONFIG_ARM */ +static struct timer_of tegra_to = { + .flags = TIMER_OF_CLOCK | TIMER_OF_BASE | TIMER_OF_IRQ, + + .clkevt = { + .name = "tegra_timer", + .rating = 300, + .features = CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_DYNIRQ, + .set_next_event = tegra_timer_set_next_event, + .set_state_shutdown = tegra_timer_shutdown, + .set_state_periodic = tegra_timer_set_periodic, + .set_state_oneshot = tegra_timer_shutdown, + .tick_resume = tegra_timer_shutdown, + .suspend = tegra_timer_suspend, + .resume = tegra_timer_resume, + .cpumask = cpu_possible_mask, + }, + + .of_irq = { + .index = 2, + .flags = IRQF_TIMER | IRQF_TRIGGER_HIGH, + .handler = tegra_timer_isr, + }, }; static u64 notrace tegra_read_sched_clock(void) { - return timer_readl(TIMERUS_CNTR_1US); + return readl(timer_reg_base + TIMERUS_CNTR_1US); +} + +static unsigned long tegra_delay_timer_read_counter_long(void) +{ + return readl(timer_reg_base + TIMERUS_CNTR_1US); } /* @@ -143,100 +233,155 @@ static void tegra_read_persistent_clock64(struct timespec64 *ts) timespec64_add_ns(&persistent_ts, delta * NSEC_PER_MSEC); *ts = persistent_ts; } +#endif -static unsigned long tegra_delay_timer_read_counter_long(void) -{ - return readl(timer_reg_base + TIMERUS_CNTR_1US); -} - -static irqreturn_t tegra_timer_interrupt(int irq, void *dev_id) -{ - struct clock_event_device *evt = (struct clock_event_device *)dev_id; - timer_writel(1<<30, TIMER3_BASE + TIMER_PCR); - evt->event_handler(evt); - return IRQ_HANDLED; -} - -static struct irqaction tegra_timer_irq = { - .name = "timer0", - .flags = IRQF_TIMER | IRQF_TRIGGER_HIGH, - .handler = tegra_timer_interrupt, - .dev_id = &tegra_clockevent, -}; - -static int __init tegra20_init_timer(struct device_node *np) +static int tegra_timer_common_init(struct device_node *np, struct timer_of *to) { - struct clk *clk; - unsigned long rate; - int ret; - - timer_reg_base = of_iomap(np, 0); - if (!timer_reg_base) { - pr_err("Can't map timer registers\n"); - return -ENXIO; - } + int ret = 0; - tegra_timer_irq.irq = irq_of_parse_and_map(np, 2); - if (tegra_timer_irq.irq <= 0) { - pr_err("Failed to map timer IRQ\n"); - return -EINVAL; - } + ret = timer_of_init(np, to); + if (ret < 0) + goto out; - clk = of_clk_get(np, 0); - if (IS_ERR(clk)) { - pr_warn("Unable to get timer clock. Assuming 12Mhz input clock.\n"); - rate = 12000000; - } else { - clk_prepare_enable(clk); - rate = clk_get_rate(clk); - } + timer_reg_base = timer_of_base(to); - switch (rate) { + /* + * Configure microsecond timers to have 1MHz clock + * Config register is 0xqqww, where qq is "dividend", ww is "divisor" + * Uses n+1 scheme + */ + switch (timer_of_rate(to)) { case 12000000: - timer_writel(0x000b, TIMERUS_USEC_CFG); + usec_config = 0x000b; /* (11+1)/(0+1) */ + break; + case 12800000: + usec_config = 0x043f; /* (63+1)/(4+1) */ break; case 13000000: - timer_writel(0x000c, TIMERUS_USEC_CFG); + usec_config = 0x000c; /* (12+1)/(0+1) */ + break; + case 16800000: + usec_config = 0x0453; /* (83+1)/(4+1) */ break; case 19200000: - timer_writel(0x045f, TIMERUS_USEC_CFG); + usec_config = 0x045f; /* (95+1)/(4+1) */ break; case 26000000: - timer_writel(0x0019, TIMERUS_USEC_CFG); + usec_config = 0x0019; /* (25+1)/(0+1) */ + break; + case 38400000: + usec_config = 0x04bf; /* (191+1)/(4+1) */ + break; + case 48000000: + usec_config = 0x002f; /* (47+1)/(0+1) */ break; default: - WARN(1, "Unknown clock rate"); + ret = -EINVAL; + goto out; + } + + writel(usec_config, timer_of_base(to) + TIMERUS_USEC_CFG); + +out: + return ret; +} + +#ifdef CONFIG_ARM64 +static int __init tegra_init_timer(struct device_node *np) +{ + int cpu, ret = 0; + struct timer_of *to; + + to = this_cpu_ptr(&tegra_to); + ret = tegra_timer_common_init(np, to); + if (ret < 0) + goto out; + + for_each_possible_cpu(cpu) { + struct timer_of *cpu_to; + + cpu_to = per_cpu_ptr(&tegra_to, cpu); + cpu_to->of_base.base = timer_reg_base + TIMER_BASE_FOR_CPU(cpu); + cpu_to->of_clk.rate = timer_of_rate(to); + cpu_to->clkevt.cpumask = cpumask_of(cpu); + cpu_to->clkevt.irq = + irq_of_parse_and_map(np, IRQ_IDX_FOR_CPU(cpu)); + if (!cpu_to->clkevt.irq) { + pr_err("%s: can't map IRQ for CPU%d\n", + __func__, cpu); + ret = -EINVAL; + goto out; + } + + irq_set_status_flags(cpu_to->clkevt.irq, IRQ_NOAUTOEN); + ret = request_irq(cpu_to->clkevt.irq, tegra_timer_isr, + IRQF_TIMER | IRQF_NOBALANCING, + cpu_to->clkevt.name, &cpu_to->clkevt); + if (ret) { + pr_err("%s: cannot setup irq %d for CPU%d\n", + __func__, cpu_to->clkevt.irq, cpu); + ret = -EINVAL; + goto out_irq; + } + } + + cpuhp_setup_state(CPUHP_AP_TEGRA_TIMER_STARTING, + "AP_TEGRA_TIMER_STARTING", tegra_timer_setup, + tegra_timer_stop); + + return ret; +out_irq: + for_each_possible_cpu(cpu) { + struct timer_of *cpu_to; + + cpu_to = per_cpu_ptr(&tegra_to, cpu); + if (cpu_to->clkevt.irq) { + free_irq(cpu_to->clkevt.irq, &cpu_to->clkevt); + irq_dispose_mapping(cpu_to->clkevt.irq); + } } +out: + timer_of_cleanup(to); + return ret; +} +#else /* CONFIG_ARM */ +static int __init tegra_init_timer(struct device_node *np) +{ + int ret = 0; + + ret = tegra_timer_common_init(np, &tegra_to); + if (ret < 0) + goto out; - sched_clock_register(tegra_read_sched_clock, 32, 1000000); + tegra_to.of_base.base = timer_reg_base + TIMER_BASE_FOR_CPU(0); + tegra_to.of_clk.rate = 1000000; /* microsecond timer */ + sched_clock_register(tegra_read_sched_clock, 32, + timer_of_rate(&tegra_to)); ret = clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US, - "timer_us", 1000000, 300, 32, - clocksource_mmio_readl_up); + "timer_us", timer_of_rate(&tegra_to), + 300, 32, clocksource_mmio_readl_up); if (ret) { pr_err("Failed to register clocksource\n"); - return ret; + goto out; } tegra_delay_timer.read_current_timer = tegra_delay_timer_read_counter_long; - tegra_delay_timer.freq = 1000000; + tegra_delay_timer.freq = timer_of_rate(&tegra_to); register_current_timer_delay(&tegra_delay_timer); - ret = setup_irq(tegra_timer_irq.irq, &tegra_timer_irq); - if (ret) { - pr_err("Failed to register timer IRQ: %d\n", ret); - return ret; - } + clockevents_config_and_register(&tegra_to.clkevt, + timer_of_rate(&tegra_to), + 0x1, + 0x1fffffff); - tegra_clockevent.cpumask = cpu_possible_mask; - tegra_clockevent.irq = tegra_timer_irq.irq; - clockevents_config_and_register(&tegra_clockevent, 1000000, - 0x1, 0x1fffffff); + return ret; +out: + timer_of_cleanup(&tegra_to); - return 0; + return ret; } -TIMER_OF_DECLARE(tegra20_timer, "nvidia,tegra20-timer", tegra20_init_timer); static int __init tegra20_init_rtc(struct device_node *np) { @@ -261,3 +406,6 @@ static int __init tegra20_init_rtc(struct device_node *np) return register_persistent_clock(tegra_read_persistent_clock64); } TIMER_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc); +#endif +TIMER_OF_DECLARE(tegra210_timer, "nvidia,tegra210-timer", tegra_init_timer); +TIMER_OF_DECLARE(tegra20_timer, "nvidia,tegra20-timer", tegra_init_timer); diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c index 595124074821..c364027638e1 100644 --- a/drivers/clocksource/timer-ti-dm.c +++ b/drivers/clocksource/timer-ti-dm.c @@ -154,6 +154,10 @@ static int omap_dm_timer_of_set_source(struct omap_dm_timer *timer) if (IS_ERR(parent)) return -ENODEV; + /* Bail out if both clocks point to fck */ + if (clk_is_match(parent, timer->fclk)) + return 0; + ret = clk_set_parent(timer->fclk, parent); if (ret < 0) pr_err("%s: failed to set parent\n", __func__); @@ -864,7 +868,6 @@ static int omap_dm_timer_probe(struct platform_device *pdev) timer->pdev = pdev; pm_runtime_enable(dev); - pm_runtime_irq_safe(dev); if (!timer->reserved) { ret = pm_runtime_get_sync(dev); diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c index ed5e42461094..ad48fd52cb53 100644 --- a/drivers/connector/cn_proc.c +++ b/drivers/connector/cn_proc.c @@ -250,6 +250,7 @@ void proc_coredump_connector(struct task_struct *task) { struct cn_msg *msg; struct proc_event *ev; + struct task_struct *parent; __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); if (atomic_read(&proc_event_num_listeners) < 1) @@ -262,8 +263,14 @@ void proc_coredump_connector(struct task_struct *task) ev->what = PROC_EVENT_COREDUMP; ev->event_data.coredump.process_pid = task->pid; ev->event_data.coredump.process_tgid = task->tgid; - ev->event_data.coredump.parent_pid = task->real_parent->pid; - ev->event_data.coredump.parent_tgid = task->real_parent->tgid; + + rcu_read_lock(); + if (pid_alive(task)) { + parent = rcu_dereference(task->real_parent); + ev->event_data.coredump.parent_pid = parent->pid; + ev->event_data.coredump.parent_tgid = parent->tgid; + } + rcu_read_unlock(); memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); msg->ack = 0; /* not used */ @@ -276,6 +283,7 @@ void proc_exit_connector(struct task_struct *task) { struct cn_msg *msg; struct proc_event *ev; + struct task_struct *parent; __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8); if (atomic_read(&proc_event_num_listeners) < 1) @@ -290,8 +298,14 @@ void proc_exit_connector(struct task_struct *task) ev->event_data.exit.process_tgid = task->tgid; ev->event_data.exit.exit_code = task->exit_code; ev->event_data.exit.exit_signal = task->exit_signal; - ev->event_data.exit.parent_pid = task->real_parent->pid; - ev->event_data.exit.parent_tgid = task->real_parent->tgid; + + rcu_read_lock(); + if (pid_alive(task)) { + parent = rcu_dereference(task->real_parent); + ev->event_data.exit.parent_pid = parent->pid; + ev->event_data.exit.parent_tgid = parent->tgid; + } + rcu_read_unlock(); memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); msg->ack = 0; /* not used */ diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 608af20a3494..b22e6bba71f1 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -207,8 +207,6 @@ comment "CPU frequency scaling drivers" config CPUFREQ_DT tristate "Generic DT based cpufreq driver" depends on HAVE_CLK && OF - # if CPU_THERMAL is on and THERMAL=m, CPUFREQ_DT cannot be =y: - depends on !CPU_THERMAL || THERMAL select CPUFREQ_DT_PLATDEV select PM_OPP help @@ -327,7 +325,6 @@ endif config QORIQ_CPUFREQ tristate "CPU frequency scaling driver for Freescale QorIQ SoCs" depends on OF && COMMON_CLK && (PPC_E500MC || ARM || ARM64) - depends on !CPU_THERMAL || THERMAL select CLK_QORIQ help This adds the CPUFreq driver support for Freescale QorIQ SoCs diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 688f10227793..179a1d302f48 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -25,12 +25,21 @@ config ARM_ARMADA_37XX_CPUFREQ This adds the CPUFreq driver support for Marvell Armada 37xx SoCs. The Armada 37xx PMU supports 4 frequency and VDD levels. +config ARM_ARMADA_8K_CPUFREQ + tristate "Armada 8K CPUFreq driver" + depends on ARCH_MVEBU && CPUFREQ_DT + help + This enables the CPUFreq driver support for Marvell + Armada8k SOCs. + Armada8K device has the AP806 which supports scaling + to any full integer divider. + + If in doubt, say N. + # big LITTLE core layer and glue drivers config ARM_BIG_LITTLE_CPUFREQ tristate "Generic ARM big LITTLE CPUfreq driver" depends on ARM_CPU_TOPOLOGY && HAVE_CLK - # if CPU_THERMAL is on and THERMAL=m, ARM_BIT_LITTLE_CPUFREQ cannot be =y - depends on !CPU_THERMAL || THERMAL select PM_OPP help This enables the Generic CPUfreq driver for ARM big.LITTLE platforms. @@ -38,7 +47,6 @@ config ARM_BIG_LITTLE_CPUFREQ config ARM_SCPI_CPUFREQ tristate "SCPI based CPUfreq driver" depends on ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI - depends on !CPU_THERMAL || THERMAL help This adds the CPUfreq driver support for ARM platforms using SCPI protocol for CPU power management. @@ -93,7 +101,6 @@ config ARM_KIRKWOOD_CPUFREQ config ARM_MEDIATEK_CPUFREQ tristate "CPU Frequency scaling support for MediaTek SoCs" depends on ARCH_MEDIATEK && REGULATOR - depends on !CPU_THERMAL || THERMAL select PM_OPP help This adds the CPUFreq driver support for MediaTek SoCs. @@ -233,7 +240,6 @@ config ARM_SA1110_CPUFREQ config ARM_SCMI_CPUFREQ tristate "SCMI based CPUfreq driver" depends on ARM_SCMI_PROTOCOL || COMPILE_TEST - depends on !CPU_THERMAL || THERMAL select PM_OPP help This adds the CPUfreq driver support for ARM platforms using SCMI @@ -272,8 +278,8 @@ config ARM_TEGRA20_CPUFREQ This adds the CPUFreq driver support for Tegra20 SOCs. config ARM_TEGRA124_CPUFREQ - tristate "Tegra124 CPUFreq support" - depends on ARCH_TEGRA && CPUFREQ_DT && REGULATOR + bool "Tegra124 CPUFreq support" + depends on ARCH_TEGRA && CPUFREQ_DT default y help This adds the CPUFreq driver support for Tegra124 SOCs. diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 08c071be2491..689b26c6f949 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -50,6 +50,7 @@ obj-$(CONFIG_X86_SFI_CPUFREQ) += sfi-cpufreq.o obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o obj-$(CONFIG_ARM_ARMADA_37XX_CPUFREQ) += armada-37xx-cpufreq.o +obj-$(CONFIG_ARM_ARMADA_8K_CPUFREQ) += armada-8k-cpufreq.o obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index d62fd374d5c7..c72258a44ba4 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -916,8 +916,10 @@ static void __init acpi_cpufreq_boost_init(void) { int ret; - if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA))) + if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA))) { + pr_debug("Boost capabilities not present in the processor\n"); return; + } acpi_cpufreq_driver.set_boost = set_boost; acpi_cpufreq_driver.boost_enabled = boost_state(0); diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c index cf62a1f64dd7..7fe52fcddcf1 100644 --- a/drivers/cpufreq/arm_big_little.c +++ b/drivers/cpufreq/arm_big_little.c @@ -487,6 +487,8 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy) policy->cpuinfo.transition_latency = arm_bL_ops->get_transition_latency(cpu_dev); + dev_pm_opp_of_register_em(policy->cpus); + if (is_bL_switching_enabled()) per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu); diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c new file mode 100644 index 000000000000..b3f4bd647e9b --- /dev/null +++ b/drivers/cpufreq/armada-8k-cpufreq.c @@ -0,0 +1,206 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * CPUFreq support for Armada 8K + * + * Copyright (C) 2018 Marvell + * + * Omri Itach + * Gregory Clement + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Setup the opps list with the divider for the max frequency, that + * will be filled at runtime. + */ +static const int opps_div[] __initconst = {1, 2, 3, 4}; + +static struct platform_device *armada_8k_pdev; + +struct freq_table { + struct device *cpu_dev; + unsigned int freq[ARRAY_SIZE(opps_div)]; +}; + +/* If the CPUs share the same clock, then they are in the same cluster. */ +static void __init armada_8k_get_sharing_cpus(struct clk *cur_clk, + struct cpumask *cpumask) +{ + int cpu; + + for_each_possible_cpu(cpu) { + struct device *cpu_dev; + struct clk *clk; + + cpu_dev = get_cpu_device(cpu); + if (!cpu_dev) { + pr_warn("Failed to get cpu%d device\n", cpu); + continue; + } + + clk = clk_get(cpu_dev, 0); + if (IS_ERR(clk)) { + pr_warn("Cannot get clock for CPU %d\n", cpu); + } else { + if (clk_is_match(clk, cur_clk)) + cpumask_set_cpu(cpu, cpumask); + + clk_put(clk); + } + } +} + +static int __init armada_8k_add_opp(struct clk *clk, struct device *cpu_dev, + struct freq_table *freq_tables, + int opps_index) +{ + unsigned int cur_frequency; + unsigned int freq; + int i, ret; + + /* Get nominal (current) CPU frequency. */ + cur_frequency = clk_get_rate(clk); + if (!cur_frequency) { + dev_err(cpu_dev, "Failed to get clock rate for this CPU\n"); + return -EINVAL; + } + + freq_tables[opps_index].cpu_dev = cpu_dev; + + for (i = 0; i < ARRAY_SIZE(opps_div); i++) { + freq = cur_frequency / opps_div[i]; + + ret = dev_pm_opp_add(cpu_dev, freq, 0); + if (ret) + return ret; + + freq_tables[opps_index].freq[i] = freq; + } + + return 0; +} + +static void armada_8k_cpufreq_free_table(struct freq_table *freq_tables) +{ + int opps_index, nb_cpus = num_possible_cpus(); + + for (opps_index = 0 ; opps_index <= nb_cpus; opps_index++) { + int i; + + /* If cpu_dev is NULL then we reached the end of the array */ + if (!freq_tables[opps_index].cpu_dev) + break; + + for (i = 0; i < ARRAY_SIZE(opps_div); i++) { + /* + * A 0Hz frequency is not valid, this meant + * that it was not yet initialized so there is + * no more opp to free + */ + if (freq_tables[opps_index].freq[i] == 0) + break; + + dev_pm_opp_remove(freq_tables[opps_index].cpu_dev, + freq_tables[opps_index].freq[i]); + } + } + + kfree(freq_tables); +} + +static int __init armada_8k_cpufreq_init(void) +{ + int ret = 0, opps_index = 0, cpu, nb_cpus; + struct freq_table *freq_tables; + struct device_node *node; + struct cpumask cpus; + + node = of_find_compatible_node(NULL, NULL, "marvell,ap806-cpu-clock"); + if (!node || !of_device_is_available(node)) { + of_node_put(node); + return -ENODEV; + } + + nb_cpus = num_possible_cpus(); + freq_tables = kcalloc(nb_cpus, sizeof(*freq_tables), GFP_KERNEL); + cpumask_copy(&cpus, cpu_possible_mask); + + /* + * For each CPU, this loop registers the operating points + * supported (which are the nominal CPU frequency and full integer + * divisions of it). + */ + for_each_cpu(cpu, &cpus) { + struct cpumask shared_cpus; + struct device *cpu_dev; + struct clk *clk; + + cpu_dev = get_cpu_device(cpu); + + if (!cpu_dev) { + pr_err("Cannot get CPU %d\n", cpu); + continue; + } + + clk = clk_get(cpu_dev, 0); + + if (IS_ERR(clk)) { + pr_err("Cannot get clock for CPU %d\n", cpu); + ret = PTR_ERR(clk); + goto remove_opp; + } + + ret = armada_8k_add_opp(clk, cpu_dev, freq_tables, opps_index); + if (ret) { + clk_put(clk); + goto remove_opp; + } + + opps_index++; + cpumask_clear(&shared_cpus); + armada_8k_get_sharing_cpus(clk, &shared_cpus); + dev_pm_opp_set_sharing_cpus(cpu_dev, &shared_cpus); + cpumask_andnot(&cpus, &cpus, &shared_cpus); + clk_put(clk); + } + + armada_8k_pdev = platform_device_register_simple("cpufreq-dt", -1, + NULL, 0); + ret = PTR_ERR_OR_ZERO(armada_8k_pdev); + if (ret) + goto remove_opp; + + platform_set_drvdata(armada_8k_pdev, freq_tables); + + return 0; + +remove_opp: + armada_8k_cpufreq_free_table(freq_tables); + return ret; +} +module_init(armada_8k_cpufreq_init); + +static void __exit armada_8k_cpufreq_exit(void) +{ + struct freq_table *freq_tables = platform_get_drvdata(armada_8k_pdev); + + platform_device_unregister(armada_8k_pdev); + armada_8k_cpufreq_free_table(freq_tables); +} +module_exit(armada_8k_cpufreq_exit); + +MODULE_AUTHOR("Gregory Clement "); +MODULE_DESCRIPTION("Armada 8K cpufreq driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index fd25c21cee72..2ae978d27e61 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -42,6 +42,66 @@ */ static struct cppc_cpudata **all_cpu_data; +struct cppc_workaround_oem_info { + char oem_id[ACPI_OEM_ID_SIZE +1]; + char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1]; + u32 oem_revision; +}; + +static bool apply_hisi_workaround; + +static struct cppc_workaround_oem_info wa_info[] = { + { + .oem_id = "HISI ", + .oem_table_id = "HIP07 ", + .oem_revision = 0, + }, { + .oem_id = "HISI ", + .oem_table_id = "HIP08 ", + .oem_revision = 0, + } +}; + +static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu, + unsigned int perf); + +/* + * HISI platform does not support delivered performance counter and + * reference performance counter. It can calculate the performance using the + * platform specific mechanism. We reuse the desired performance register to + * store the real performance calculated by the platform. + */ +static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpunum) +{ + struct cppc_cpudata *cpudata = all_cpu_data[cpunum]; + u64 desired_perf; + int ret; + + ret = cppc_get_desired_perf(cpunum, &desired_perf); + if (ret < 0) + return -EIO; + + return cppc_cpufreq_perf_to_khz(cpudata, desired_perf); +} + +static void cppc_check_hisi_workaround(void) +{ + struct acpi_table_header *tbl; + acpi_status status = AE_OK; + int i; + + status = acpi_get_table(ACPI_SIG_PCCT, 0, &tbl); + if (ACPI_FAILURE(status) || !tbl) + return; + + for (i = 0; i < ARRAY_SIZE(wa_info); i++) { + if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) && + !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) && + wa_info[i].oem_revision == tbl->oem_revision) + apply_hisi_workaround = true; + } +} + /* Callback function used to retrieve the max frequency from DMI */ static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private) { @@ -334,6 +394,9 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpunum) struct cppc_cpudata *cpu = all_cpu_data[cpunum]; int ret; + if (apply_hisi_workaround) + return hisi_cppc_cpufreq_get_rate(cpunum); + ret = cppc_get_perf_ctrs(cpunum, &fb_ctrs_t0); if (ret) return ret; @@ -386,6 +449,8 @@ static int __init cppc_cpufreq_init(void) goto out; } + cppc_check_hisi_workaround(); + ret = cpufreq_register_driver(&cppc_cpufreq_driver); if (ret) goto out; diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index b1c5468dca16..47729a22c159 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -119,6 +119,7 @@ static const struct of_device_id blacklist[] __initconst = { { .compatible = "mediatek,mt8176", }, { .compatible = "nvidia,tegra124", }, + { .compatible = "nvidia,tegra210", }, { .compatible = "qcom,apq8096", }, { .compatible = "qcom,msm8996", }, diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index e58bfcb1169e..bde28878725b 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -13,7 +13,6 @@ #include #include -#include #include #include #include @@ -30,7 +29,6 @@ struct private_data { struct opp_table *opp_table; struct device *cpu_dev; - struct thermal_cooling_device *cdev; const char *reg_name; bool have_static_opps; }; @@ -280,6 +278,8 @@ static int cpufreq_init(struct cpufreq_policy *policy) policy->cpuinfo.transition_latency = transition_latency; policy->dvfs_possible_from_any_cpu = true; + dev_pm_opp_of_register_em(policy->cpus); + return 0; out_free_cpufreq_table: @@ -297,11 +297,25 @@ out_put_clk: return ret; } +static int cpufreq_online(struct cpufreq_policy *policy) +{ + /* We did light-weight tear down earlier, nothing to do here */ + return 0; +} + +static int cpufreq_offline(struct cpufreq_policy *policy) +{ + /* + * Preserve policy->driver_data and don't free resources on light-weight + * tear down. + */ + return 0; +} + static int cpufreq_exit(struct cpufreq_policy *policy) { struct private_data *priv = policy->driver_data; - cpufreq_cooling_unregister(priv->cdev); dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); if (priv->have_static_opps) dev_pm_opp_of_cpumask_remove_table(policy->related_cpus); @@ -314,21 +328,16 @@ static int cpufreq_exit(struct cpufreq_policy *policy) return 0; } -static void cpufreq_ready(struct cpufreq_policy *policy) -{ - struct private_data *priv = policy->driver_data; - - priv->cdev = of_cpufreq_cooling_register(policy); -} - static struct cpufreq_driver dt_cpufreq_driver = { - .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK | + CPUFREQ_IS_COOLING_DEV, .verify = cpufreq_generic_frequency_table_verify, .target_index = set_target, .get = cpufreq_generic_get, .init = cpufreq_init, .exit = cpufreq_exit, - .ready = cpufreq_ready, + .online = cpufreq_online, + .offline = cpufreq_offline, .name = "cpufreq-dt", .attr = cpufreq_dt_attr, .suspend = cpufreq_generic_suspend, diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index e35a886e00bc..0e626b00053b 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -19,6 +19,7 @@ #include #include +#include #include #include #include @@ -545,13 +546,13 @@ EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us); * SYSFS INTERFACE * *********************************************************************/ static ssize_t show_boost(struct kobject *kobj, - struct attribute *attr, char *buf) + struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled); } -static ssize_t store_boost(struct kobject *kobj, struct attribute *attr, - const char *buf, size_t count) +static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) { int ret, enable; @@ -1200,28 +1201,39 @@ static int cpufreq_online(unsigned int cpu) return -ENOMEM; } - cpumask_copy(policy->cpus, cpumask_of(cpu)); + if (!new_policy && cpufreq_driver->online) { + ret = cpufreq_driver->online(policy); + if (ret) { + pr_debug("%s: %d: initialization failed\n", __func__, + __LINE__); + goto out_exit_policy; + } - /* call driver. From then on the cpufreq must be able - * to accept all calls to ->verify and ->setpolicy for this CPU - */ - ret = cpufreq_driver->init(policy); - if (ret) { - pr_debug("initialization failed\n"); - goto out_free_policy; - } + /* Recover policy->cpus using related_cpus */ + cpumask_copy(policy->cpus, policy->related_cpus); + } else { + cpumask_copy(policy->cpus, cpumask_of(cpu)); - ret = cpufreq_table_validate_and_sort(policy); - if (ret) - goto out_exit_policy; + /* + * Call driver. From then on the cpufreq must be able + * to accept all calls to ->verify and ->setpolicy for this CPU. + */ + ret = cpufreq_driver->init(policy); + if (ret) { + pr_debug("%s: %d: initialization failed\n", __func__, + __LINE__); + goto out_free_policy; + } - down_write(&policy->rwsem); + ret = cpufreq_table_validate_and_sort(policy); + if (ret) + goto out_exit_policy; - if (new_policy) { /* related_cpus should at least include policy->cpus. */ cpumask_copy(policy->related_cpus, policy->cpus); } + down_write(&policy->rwsem); /* * affected cpus must always be the one, which are online. We aren't * managing offline cpus here. @@ -1305,8 +1317,6 @@ static int cpufreq_online(unsigned int cpu) if (ret) { pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n", __func__, cpu, ret); - /* cpufreq_policy_free() will notify based on this */ - new_policy = false; goto out_destroy_policy; } @@ -1318,6 +1328,10 @@ static int cpufreq_online(unsigned int cpu) if (cpufreq_driver->ready) cpufreq_driver->ready(policy); + if (IS_ENABLED(CONFIG_CPU_THERMAL) && + cpufreq_driver->flags & CPUFREQ_IS_COOLING_DEV) + policy->cdev = of_cpufreq_cooling_register(policy); + pr_debug("initialization complete\n"); return 0; @@ -1405,6 +1419,12 @@ static int cpufreq_offline(unsigned int cpu) goto unlock; } + if (IS_ENABLED(CONFIG_CPU_THERMAL) && + cpufreq_driver->flags & CPUFREQ_IS_COOLING_DEV) { + cpufreq_cooling_unregister(policy->cdev); + policy->cdev = NULL; + } + if (cpufreq_driver->stop_cpu) cpufreq_driver->stop_cpu(policy); @@ -1412,11 +1432,12 @@ static int cpufreq_offline(unsigned int cpu) cpufreq_exit_governor(policy); /* - * Perform the ->exit() even during light-weight tear-down, - * since this is a core component, and is essential for the - * subsequent light-weight ->init() to succeed. + * Perform the ->offline() during light-weight tear-down, as + * that allows fast recovery when the CPU comes back. */ - if (cpufreq_driver->exit) { + if (cpufreq_driver->offline) { + cpufreq_driver->offline(policy); + } else if (cpufreq_driver->exit) { cpufreq_driver->exit(policy); policy->freq_table = NULL; } @@ -1445,8 +1466,13 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) cpumask_clear_cpu(cpu, policy->real_cpus); remove_cpu_dev_symlink(policy, dev); - if (cpumask_empty(policy->real_cpus)) + if (cpumask_empty(policy->real_cpus)) { + /* We did light-weight exit earlier, do full tear down now */ + if (cpufreq_driver->offline) + cpufreq_driver->exit(policy); + cpufreq_policy_free(policy); + } } /** @@ -2192,12 +2218,25 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) } EXPORT_SYMBOL(cpufreq_get_policy); -/* - * policy : current policy. - * new_policy: policy to be set. +/** + * cpufreq_set_policy - Modify cpufreq policy parameters. + * @policy: Policy object to modify. + * @new_policy: New policy data. + * + * Pass @new_policy to the cpufreq driver's ->verify() callback, run the + * installed policy notifiers for it with the CPUFREQ_ADJUST value, pass it to + * the driver's ->verify() callback again and run the notifiers for it again + * with the CPUFREQ_NOTIFY value. Next, copy the min and max parameters + * of @new_policy to @policy and either invoke the driver's ->setpolicy() + * callback (if present) or carry out a governor update for @policy. That is, + * run the current governor's ->limits() callback (if the governor field in + * @new_policy points to the same object as the one in @policy) or replace the + * governor for @policy with the new one stored in @new_policy. + * + * The cpuinfo part of @policy is not updated by this function. */ static int cpufreq_set_policy(struct cpufreq_policy *policy, - struct cpufreq_policy *new_policy) + struct cpufreq_policy *new_policy) { struct cpufreq_governor *old_gov; int ret; @@ -2247,11 +2286,11 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, if (cpufreq_driver->setpolicy) { policy->policy = new_policy->policy; pr_debug("setting range\n"); - return cpufreq_driver->setpolicy(new_policy); + return cpufreq_driver->setpolicy(policy); } if (new_policy->governor == policy->governor) { - pr_debug("cpufreq: governor limits update\n"); + pr_debug("governor limits update\n"); cpufreq_governor_limits(policy); return 0; } @@ -2272,7 +2311,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, if (!ret) { ret = cpufreq_start_governor(policy); if (!ret) { - pr_debug("cpufreq: governor change\n"); + pr_debug("governor change\n"); sched_cpufreq_governor_change(policy, old_gov); return 0; } @@ -2293,11 +2332,14 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, } /** - * cpufreq_update_policy - re-evaluate an existing cpufreq policy - * @cpu: CPU which shall be re-evaluated + * cpufreq_update_policy - Re-evaluate an existing cpufreq policy. + * @cpu: CPU to re-evaluate the policy for. * - * Useful for policy notifiers which have different necessities - * at different times. + * Update the current frequency for the cpufreq policy of @cpu and use + * cpufreq_set_policy() to re-apply the min and max limits saved in the + * user_policy sub-structure of that policy, which triggers the evaluation + * of policy notifiers and the cpufreq driver's ->verify() callback for the + * policy in question, among other things. */ void cpufreq_update_policy(unsigned int cpu) { @@ -2312,23 +2354,18 @@ void cpufreq_update_policy(unsigned int cpu) if (policy_is_inactive(policy)) goto unlock; - pr_debug("updating policy for CPU %u\n", cpu); - memcpy(&new_policy, policy, sizeof(*policy)); - new_policy.min = policy->user_policy.min; - new_policy.max = policy->user_policy.max; - /* * BIOS might change freq behind our back * -> ask driver for current freq and notify governors about a change */ - if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { - if (cpufreq_suspended) - goto unlock; + if (cpufreq_driver->get && !cpufreq_driver->setpolicy && + (cpufreq_suspended || WARN_ON(!cpufreq_update_current_freq(policy)))) + goto unlock; - new_policy.cur = cpufreq_update_current_freq(policy); - if (WARN_ON(!new_policy.cur)) - goto unlock; - } + pr_debug("updating policy for CPU %u\n", cpu); + memcpy(&new_policy, policy, sizeof(*policy)); + new_policy.min = policy->user_policy.min; + new_policy.max = policy->user_policy.max; cpufreq_set_policy(policy, &new_policy); @@ -2479,7 +2516,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) driver_data->target) || (driver_data->setpolicy && (driver_data->target_index || driver_data->target)) || - (!!driver_data->get_intermediate != !!driver_data->target_intermediate)) + (!driver_data->get_intermediate != !driver_data->target_intermediate) || + (!driver_data->online != !driver_data->offline)) return -EINVAL; pr_debug("trying to register driver %s\n", driver_data->name); diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index 1572129844a5..e2db5581489a 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c @@ -31,26 +31,27 @@ static void cpufreq_stats_update(struct cpufreq_stats *stats) { unsigned long long cur_time = get_jiffies_64(); - spin_lock(&cpufreq_stats_lock); stats->time_in_state[stats->last_index] += cur_time - stats->last_time; stats->last_time = cur_time; - spin_unlock(&cpufreq_stats_lock); } static void cpufreq_stats_clear_table(struct cpufreq_stats *stats) { unsigned int count = stats->max_state; + spin_lock(&cpufreq_stats_lock); memset(stats->time_in_state, 0, count * sizeof(u64)); memset(stats->trans_table, 0, count * count * sizeof(int)); stats->last_time = get_jiffies_64(); stats->total_trans = 0; + spin_unlock(&cpufreq_stats_lock); } static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf) { return sprintf(buf, "%d\n", policy->stats->total_trans); } +cpufreq_freq_attr_ro(total_trans); static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf) { @@ -61,7 +62,10 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf) if (policy->fast_switch_enabled) return 0; + spin_lock(&cpufreq_stats_lock); cpufreq_stats_update(stats); + spin_unlock(&cpufreq_stats_lock); + for (i = 0; i < stats->state_num; i++) { len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i], (unsigned long long) @@ -69,6 +73,7 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf) } return len; } +cpufreq_freq_attr_ro(time_in_state); static ssize_t store_reset(struct cpufreq_policy *policy, const char *buf, size_t count) @@ -77,6 +82,7 @@ static ssize_t store_reset(struct cpufreq_policy *policy, const char *buf, cpufreq_stats_clear_table(policy->stats); return count; } +cpufreq_freq_attr_wo(reset); static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf) { @@ -126,10 +132,6 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf) } cpufreq_freq_attr_ro(trans_table); -cpufreq_freq_attr_ro(total_trans); -cpufreq_freq_attr_ro(time_in_state); -cpufreq_freq_attr_wo(reset); - static struct attribute *default_attrs[] = { &total_trans.attr, &time_in_state.attr, @@ -240,9 +242,11 @@ void cpufreq_stats_record_transition(struct cpufreq_policy *policy, if (old_index == -1 || new_index == -1 || old_index == new_index) return; + spin_lock(&cpufreq_stats_lock); cpufreq_stats_update(stats); stats->last_index = new_index; stats->trans_table[old_index * stats->max_state + new_index]++; stats->total_trans++; + spin_unlock(&cpufreq_stats_lock); } diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c index d54a27c99121..940fe85db97a 100644 --- a/drivers/cpufreq/davinci-cpufreq.c +++ b/drivers/cpufreq/davinci-cpufreq.c @@ -23,13 +23,10 @@ #include #include #include +#include #include #include -#include -#include -#include - struct davinci_cpufreq { struct device *dev; struct clk *armclk; diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c index 60bea302abbe..2d3ef208dd70 100644 --- a/drivers/cpufreq/e_powersaver.c +++ b/drivers/cpufreq/e_powersaver.c @@ -323,9 +323,8 @@ static int eps_cpu_init(struct cpufreq_policy *policy) states = 2; /* Allocate private data and frequency table for current cpu */ - centaur = kzalloc(sizeof(*centaur) - + (states + 1) * sizeof(struct cpufreq_frequency_table), - GFP_KERNEL); + centaur = kzalloc(struct_size(centaur, freq_table, states + 1), + GFP_KERNEL); if (!centaur) return -ENOMEM; eps_cpu[0] = centaur; diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c index 9fedf627e000..a4ff09f91c8f 100644 --- a/drivers/cpufreq/imx6q-cpufreq.c +++ b/drivers/cpufreq/imx6q-cpufreq.c @@ -9,7 +9,6 @@ #include #include #include -#include #include #include #include @@ -52,7 +51,6 @@ static struct clk_bulk_data clks[] = { }; static struct device *cpu_dev; -static struct thermal_cooling_device *cdev; static bool free_opp; static struct cpufreq_frequency_table *freq_table; static unsigned int max_freq; @@ -193,16 +191,6 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index) return 0; } -static void imx6q_cpufreq_ready(struct cpufreq_policy *policy) -{ - cdev = of_cpufreq_cooling_register(policy); - - if (!cdev) - dev_err(cpu_dev, - "running cpufreq without cooling device: %ld\n", - PTR_ERR(cdev)); -} - static int imx6q_cpufreq_init(struct cpufreq_policy *policy) { int ret; @@ -210,26 +198,19 @@ static int imx6q_cpufreq_init(struct cpufreq_policy *policy) policy->clk = clks[ARM].clk; ret = cpufreq_generic_init(policy, freq_table, transition_latency); policy->suspend_freq = max_freq; + dev_pm_opp_of_register_em(policy->cpus); return ret; } -static int imx6q_cpufreq_exit(struct cpufreq_policy *policy) -{ - cpufreq_cooling_unregister(cdev); - - return 0; -} - static struct cpufreq_driver imx6q_cpufreq_driver = { - .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK | + CPUFREQ_IS_COOLING_DEV, .verify = cpufreq_generic_frequency_table_verify, .target_index = imx6q_set_target, .get = cpufreq_generic_get, .init = imx6q_cpufreq_init, - .exit = imx6q_cpufreq_exit, .name = "imx6q-cpufreq", - .ready = imx6q_cpufreq_ready, .attr = cpufreq_generic_attr, .suspend = cpufreq_generic_suspend, }; diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index dd66decf2087..002f5169d4eb 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -50,6 +50,8 @@ #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) #define fp_toint(X) ((X) >> FRAC_BITS) +#define ONE_EIGHTH_FP ((int64_t)1 << (FRAC_BITS - 3)) + #define EXT_BITS 6 #define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS) #define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS) @@ -895,7 +897,7 @@ static void intel_pstate_update_policies(void) /************************** sysfs begin ************************/ #define show_one(file_name, object) \ static ssize_t show_##file_name \ - (struct kobject *kobj, struct attribute *attr, char *buf) \ + (struct kobject *kobj, struct kobj_attribute *attr, char *buf) \ { \ return sprintf(buf, "%u\n", global.object); \ } @@ -904,7 +906,7 @@ static ssize_t intel_pstate_show_status(char *buf); static int intel_pstate_update_status(const char *buf, size_t size); static ssize_t show_status(struct kobject *kobj, - struct attribute *attr, char *buf) + struct kobj_attribute *attr, char *buf) { ssize_t ret; @@ -915,7 +917,7 @@ static ssize_t show_status(struct kobject *kobj, return ret; } -static ssize_t store_status(struct kobject *a, struct attribute *b, +static ssize_t store_status(struct kobject *a, struct kobj_attribute *b, const char *buf, size_t count) { char *p = memchr(buf, '\n', count); @@ -929,7 +931,7 @@ static ssize_t store_status(struct kobject *a, struct attribute *b, } static ssize_t show_turbo_pct(struct kobject *kobj, - struct attribute *attr, char *buf) + struct kobj_attribute *attr, char *buf) { struct cpudata *cpu; int total, no_turbo, turbo_pct; @@ -955,7 +957,7 @@ static ssize_t show_turbo_pct(struct kobject *kobj, } static ssize_t show_num_pstates(struct kobject *kobj, - struct attribute *attr, char *buf) + struct kobj_attribute *attr, char *buf) { struct cpudata *cpu; int total; @@ -976,7 +978,7 @@ static ssize_t show_num_pstates(struct kobject *kobj, } static ssize_t show_no_turbo(struct kobject *kobj, - struct attribute *attr, char *buf) + struct kobj_attribute *attr, char *buf) { ssize_t ret; @@ -998,7 +1000,7 @@ static ssize_t show_no_turbo(struct kobject *kobj, return ret; } -static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, +static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, const char *buf, size_t count) { unsigned int input; @@ -1045,7 +1047,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, return count; } -static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, +static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b, const char *buf, size_t count) { unsigned int input; @@ -1075,7 +1077,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, return count; } -static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, +static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b, const char *buf, size_t count) { unsigned int input; @@ -1107,12 +1109,13 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, } static ssize_t show_hwp_dynamic_boost(struct kobject *kobj, - struct attribute *attr, char *buf) + struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%u\n", hwp_boost); } -static ssize_t store_hwp_dynamic_boost(struct kobject *a, struct attribute *b, +static ssize_t store_hwp_dynamic_boost(struct kobject *a, + struct kobj_attribute *b, const char *buf, size_t count) { unsigned int input; @@ -1444,12 +1447,6 @@ static int knl_get_turbo_pstate(void) return ret; } -static int intel_pstate_get_base_pstate(struct cpudata *cpu) -{ - return global.no_turbo || global.turbo_disabled ? - cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; -} - static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) { trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); @@ -1470,11 +1467,9 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu) static void intel_pstate_max_within_limits(struct cpudata *cpu) { - int pstate; + int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio); update_turbo_state(); - pstate = intel_pstate_get_base_pstate(cpu); - pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio); intel_pstate_set_pstate(cpu, pstate); } @@ -1678,17 +1673,14 @@ static inline int32_t get_avg_pstate(struct cpudata *cpu) static inline int32_t get_target_pstate(struct cpudata *cpu) { struct sample *sample = &cpu->sample; - int32_t busy_frac, boost; + int32_t busy_frac; int target, avg_pstate; busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift, sample->tsc); - boost = cpu->iowait_boost; - cpu->iowait_boost >>= 1; - - if (busy_frac < boost) - busy_frac = boost; + if (busy_frac < cpu->iowait_boost) + busy_frac = cpu->iowait_boost; sample->busy_scaled = busy_frac * 100; @@ -1715,11 +1707,9 @@ static inline int32_t get_target_pstate(struct cpudata *cpu) static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate) { - int max_pstate = intel_pstate_get_base_pstate(cpu); - int min_pstate; + int min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio); + int max_pstate = max(min_pstate, cpu->max_perf_ratio); - min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio); - max_pstate = max(min_pstate, cpu->max_perf_ratio); return clamp_t(int, pstate, min_pstate, max_pstate); } @@ -1767,29 +1757,30 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time, if (smp_processor_id() != cpu->cpu) return; + delta_ns = time - cpu->last_update; if (flags & SCHED_CPUFREQ_IOWAIT) { - cpu->iowait_boost = int_tofp(1); - cpu->last_update = time; - /* - * The last time the busy was 100% so P-state was max anyway - * so avoid overhead of computation. - */ - if (fp_toint(cpu->sample.busy_scaled) == 100) - return; - - goto set_pstate; + /* Start over if the CPU may have been idle. */ + if (delta_ns > TICK_NSEC) { + cpu->iowait_boost = ONE_EIGHTH_FP; + } else if (cpu->iowait_boost) { + cpu->iowait_boost <<= 1; + if (cpu->iowait_boost > int_tofp(1)) + cpu->iowait_boost = int_tofp(1); + } else { + cpu->iowait_boost = ONE_EIGHTH_FP; + } } else if (cpu->iowait_boost) { /* Clear iowait_boost if the CPU may have been idle. */ - delta_ns = time - cpu->last_update; if (delta_ns > TICK_NSEC) cpu->iowait_boost = 0; + else + cpu->iowait_boost >>= 1; } cpu->last_update = time; delta_ns = time - cpu->sample.time; if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL) return; -set_pstate: if (intel_pstate_sample(cpu, time)) intel_pstate_adjust_pstate(cpu); } @@ -1976,7 +1967,8 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy, if (hwp_active) { intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state); } else { - max_state = intel_pstate_get_base_pstate(cpu); + max_state = global.no_turbo || global.turbo_disabled ? + cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; turbo_max = cpu->pstate.turbo_pstate; } @@ -2475,6 +2467,7 @@ static bool __init intel_pstate_no_acpi_pss(void) kfree(pss); } + pr_debug("ACPI _PSS not found\n"); return true; } @@ -2485,9 +2478,14 @@ static bool __init intel_pstate_no_acpi_pcch(void) status = acpi_get_handle(NULL, "\\_SB", &handle); if (ACPI_FAILURE(status)) - return true; + goto not_found; + + if (acpi_has_method(handle, "PCCH")) + return false; - return !acpi_has_method(handle, "PCCH"); +not_found: + pr_debug("ACPI PCCH not found\n"); + return true; } static bool __init intel_pstate_has_acpi_ppc(void) @@ -2502,6 +2500,7 @@ static bool __init intel_pstate_has_acpi_ppc(void) if (acpi_has_method(pr->handle, "_PPC")) return true; } + pr_debug("ACPI _PPC not found\n"); return false; } @@ -2539,8 +2538,10 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void) id = x86_match_cpu(intel_pstate_cpu_oob_ids); if (id) { rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); - if ( misc_pwr & (1 << 8)) + if (misc_pwr & (1 << 8)) { + pr_debug("Bit 8 in the MISC_PWR_MGMT MSR set\n"); return true; + } } idx = acpi_match_platform_list(plat_info); @@ -2606,22 +2607,28 @@ static int __init intel_pstate_init(void) } } else { id = x86_match_cpu(intel_pstate_cpu_ids); - if (!id) + if (!id) { + pr_info("CPU ID not supported\n"); return -ENODEV; + } copy_cpu_funcs((struct pstate_funcs *)id->driver_data); } - if (intel_pstate_msrs_not_valid()) + if (intel_pstate_msrs_not_valid()) { + pr_info("Invalid MSRs\n"); return -ENODEV; + } hwp_cpu_matched: /* * The Intel pstate driver will be ignored if the platform * firmware has its own power management modes. */ - if (intel_pstate_platform_pwr_mgmt_exists()) + if (intel_pstate_platform_pwr_mgmt_exists()) { + pr_info("P-states controlled by the platform\n"); return -ENODEV; + } if (!hwp_active && hwp_only) return -ENOTSUPP; diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c index 279bd9e9fa95..fb546e0d0356 100644 --- a/drivers/cpufreq/longhaul.c +++ b/drivers/cpufreq/longhaul.c @@ -851,7 +851,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy) case TYPE_POWERSAVER: pr_cont("Powersaver supported\n"); break; - }; + } /* Doesn't hurt */ longhaul_setup_southbridge(); diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c index eb8920d39818..48e9829274c6 100644 --- a/drivers/cpufreq/mediatek-cpufreq.c +++ b/drivers/cpufreq/mediatek-cpufreq.c @@ -14,7 +14,6 @@ #include #include -#include #include #include #include @@ -48,7 +47,6 @@ struct mtk_cpu_dvfs_info { struct regulator *sram_reg; struct clk *cpu_clk; struct clk *inter_clk; - struct thermal_cooling_device *cdev; struct list_head list_head; int intermediate_voltage; bool need_voltage_tracking; @@ -307,13 +305,6 @@ static int mtk_cpufreq_set_target(struct cpufreq_policy *policy, #define DYNAMIC_POWER "dynamic-power-coefficient" -static void mtk_cpufreq_ready(struct cpufreq_policy *policy) -{ - struct mtk_cpu_dvfs_info *info = policy->driver_data; - - info->cdev = of_cpufreq_cooling_register(policy); -} - static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu) { struct device *cpu_dev; @@ -465,6 +456,8 @@ static int mtk_cpufreq_init(struct cpufreq_policy *policy) policy->driver_data = info; policy->clk = info->cpu_clk; + dev_pm_opp_of_register_em(policy->cpus); + return 0; } @@ -472,7 +465,6 @@ static int mtk_cpufreq_exit(struct cpufreq_policy *policy) { struct mtk_cpu_dvfs_info *info = policy->driver_data; - cpufreq_cooling_unregister(info->cdev); dev_pm_opp_free_cpufreq_table(info->cpu_dev, &policy->freq_table); return 0; @@ -480,13 +472,13 @@ static int mtk_cpufreq_exit(struct cpufreq_policy *policy) static struct cpufreq_driver mtk_cpufreq_driver = { .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK | - CPUFREQ_HAVE_GOVERNOR_PER_POLICY, + CPUFREQ_HAVE_GOVERNOR_PER_POLICY | + CPUFREQ_IS_COOLING_DEV, .verify = cpufreq_generic_frequency_table_verify, .target_index = mtk_cpufreq_set_target, .get = cpufreq_generic_get, .init = mtk_cpufreq_init, .exit = mtk_cpufreq_exit, - .ready = mtk_cpufreq_ready, .name = "mtk-cpufreq", .attr = cpufreq_generic_attr, }; diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c index 71e81bbf031b..68052b74d28f 100644 --- a/drivers/cpufreq/omap-cpufreq.c +++ b/drivers/cpufreq/omap-cpufreq.c @@ -133,8 +133,10 @@ static int omap_cpu_init(struct cpufreq_policy *policy) /* FIXME: what's the actual transition time? */ result = cpufreq_generic_init(policy, freq_table, 300 * 1000); - if (!result) + if (!result) { + dev_pm_opp_of_register_em(policy->cpus); return 0; + } freq_table_free(); fail: diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index 099a849396f6..1e5e64643c3a 100644 --- a/drivers/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c @@ -268,7 +268,7 @@ static int pcc_get_offset(int cpu) if (!pccp || pccp->type != ACPI_TYPE_PACKAGE) { ret = -ENODEV; goto out_free; - }; + } offset = &(pccp->package.elements[0]); if (!offset || offset->type != ACPI_TYPE_INTEGER) { diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c index 7e7ad3879c4e..d2230812fa4b 100644 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c @@ -244,6 +244,7 @@ static int init_powernv_pstates(void) u32 len_ids, len_freqs; u32 pstate_min, pstate_max, pstate_nominal; u32 pstate_turbo, pstate_ultra_turbo; + int rc = -ENODEV; power_mgt = of_find_node_by_path("/ibm,opal/power-mgt"); if (!power_mgt) { @@ -327,8 +328,11 @@ next: powernv_freqs[i].frequency = freq * 1000; /* kHz */ powernv_freqs[i].driver_data = id & 0xFF; - revmap_data = (struct pstate_idx_revmap_data *) - kmalloc(sizeof(*revmap_data), GFP_KERNEL); + revmap_data = kmalloc(sizeof(*revmap_data), GFP_KERNEL); + if (!revmap_data) { + rc = -ENOMEM; + goto out; + } revmap_data->pstate_id = id & 0xFF; revmap_data->cpufreq_table_idx = i; @@ -357,7 +361,7 @@ next: return 0; out: of_node_put(power_mgt); - return -ENODEV; + return rc; } /* Returns the CPU frequency corresponding to the pstate_id. */ diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c index d83939a1b3d4..4b0b50403901 100644 --- a/drivers/cpufreq/qcom-cpufreq-hw.c +++ b/drivers/cpufreq/qcom-cpufreq-hw.c @@ -10,18 +10,21 @@ #include #include #include +#include #include #define LUT_MAX_ENTRIES 40U #define LUT_SRC GENMASK(31, 30) #define LUT_L_VAL GENMASK(7, 0) #define LUT_CORE_COUNT GENMASK(18, 16) +#define LUT_VOLT GENMASK(11, 0) #define LUT_ROW_SIZE 32 #define CLK_HW_DIV 2 /* Register offsets */ #define REG_ENABLE 0x0 -#define REG_LUT_TABLE 0x110 +#define REG_FREQ_LUT 0x110 +#define REG_VOLT_LUT 0x114 #define REG_PERF_STATE 0x920 static unsigned long cpu_hw_rate, xo_rate; @@ -70,11 +73,12 @@ static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy, return policy->freq_table[index].frequency; } -static int qcom_cpufreq_hw_read_lut(struct device *dev, +static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev, struct cpufreq_policy *policy, void __iomem *base) { u32 data, src, lval, i, core_count, prev_cc = 0, prev_freq = 0, freq; + u32 volt; unsigned int max_cores = cpumask_weight(policy->cpus); struct cpufreq_frequency_table *table; @@ -83,23 +87,28 @@ static int qcom_cpufreq_hw_read_lut(struct device *dev, return -ENOMEM; for (i = 0; i < LUT_MAX_ENTRIES; i++) { - data = readl_relaxed(base + REG_LUT_TABLE + i * LUT_ROW_SIZE); + data = readl_relaxed(base + REG_FREQ_LUT + + i * LUT_ROW_SIZE); src = FIELD_GET(LUT_SRC, data); lval = FIELD_GET(LUT_L_VAL, data); core_count = FIELD_GET(LUT_CORE_COUNT, data); + data = readl_relaxed(base + REG_VOLT_LUT + + i * LUT_ROW_SIZE); + volt = FIELD_GET(LUT_VOLT, data) * 1000; + if (src) freq = xo_rate * lval / 1000; else freq = cpu_hw_rate / 1000; - /* Ignore boosts in the middle of the table */ - if (core_count != max_cores) { - table[i].frequency = CPUFREQ_ENTRY_INVALID; - } else { + if (freq != prev_freq && core_count == max_cores) { table[i].frequency = freq; - dev_dbg(dev, "index=%d freq=%d, core_count %d\n", i, + dev_pm_opp_add(cpu_dev, freq * 1000, volt); + dev_dbg(cpu_dev, "index=%d freq=%d, core_count %d\n", i, freq, core_count); + } else { + table[i].frequency = CPUFREQ_ENTRY_INVALID; } /* @@ -116,6 +125,7 @@ static int qcom_cpufreq_hw_read_lut(struct device *dev, if (prev_cc != max_cores) { prev->frequency = prev_freq; prev->flags = CPUFREQ_BOOST_FREQ; + dev_pm_opp_add(cpu_dev, prev_freq * 1000, volt); } break; @@ -127,6 +137,7 @@ static int qcom_cpufreq_hw_read_lut(struct device *dev, table[i].frequency = CPUFREQ_TABLE_END; policy->freq_table = table; + dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus); return 0; } @@ -159,10 +170,18 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy) struct device *dev = &global_pdev->dev; struct of_phandle_args args; struct device_node *cpu_np; + struct device *cpu_dev; struct resource *res; void __iomem *base; int ret, index; + cpu_dev = get_cpu_device(policy->cpu); + if (!cpu_dev) { + pr_err("%s: failed to get cpu%d device\n", __func__, + policy->cpu); + return -ENODEV; + } + cpu_np = of_cpu_device_node_get(policy->cpu); if (!cpu_np) return -EINVAL; @@ -199,12 +218,21 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy) policy->driver_data = base + REG_PERF_STATE; - ret = qcom_cpufreq_hw_read_lut(dev, policy, base); + ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy, base); if (ret) { dev_err(dev, "Domain-%d failed to read LUT\n", index); goto error; } + ret = dev_pm_opp_get_opp_count(cpu_dev); + if (ret <= 0) { + dev_err(cpu_dev, "Failed to add OPPs\n"); + ret = -ENODEV; + goto error; + } + + dev_pm_opp_of_register_em(policy->cpus); + policy->fast_switch_possible = true; return 0; @@ -215,8 +243,10 @@ error: static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy) { + struct device *cpu_dev = get_cpu_device(policy->cpu); void __iomem *base = policy->driver_data - REG_PERF_STATE; + dev_pm_opp_remove_all_dynamic(cpu_dev); kfree(policy->freq_table); devm_iounmap(&global_pdev->dev, base); @@ -231,7 +261,8 @@ static struct freq_attr *qcom_cpufreq_hw_attr[] = { static struct cpufreq_driver cpufreq_qcom_hw_driver = { .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK | - CPUFREQ_HAVE_GOVERNOR_PER_POLICY, + CPUFREQ_HAVE_GOVERNOR_PER_POLICY | + CPUFREQ_IS_COOLING_DEV, .verify = cpufreq_generic_frequency_table_verify, .target_index = qcom_cpufreq_hw_target_index, .get = qcom_cpufreq_hw_get, @@ -296,7 +327,7 @@ static int __init qcom_cpufreq_hw_init(void) { return platform_driver_register(&qcom_cpufreq_hw_driver); } -subsys_initcall(qcom_cpufreq_hw_init); +device_initcall(qcom_cpufreq_hw_init); static void __exit qcom_cpufreq_hw_exit(void) { diff --git a/drivers/cpufreq/qcom-cpufreq-kryo.c b/drivers/cpufreq/qcom-cpufreq-kryo.c index 2a3675c24032..dd64dcf89c74 100644 --- a/drivers/cpufreq/qcom-cpufreq-kryo.c +++ b/drivers/cpufreq/qcom-cpufreq-kryo.c @@ -42,7 +42,7 @@ enum _msm8996_version { NUM_OF_MSM8996_VERSIONS, }; -struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev; +static struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev; static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void) { @@ -75,7 +75,7 @@ static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void) static int qcom_cpufreq_kryo_probe(struct platform_device *pdev) { - struct opp_table *opp_tables[NR_CPUS] = {0}; + struct opp_table **opp_tables; enum _msm8996_version msm8996_version; struct nvmem_cell *speedbin_nvmem; struct device_node *np; @@ -133,6 +133,10 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev) } kfree(speedbin); + opp_tables = kcalloc(num_possible_cpus(), sizeof(*opp_tables), GFP_KERNEL); + if (!opp_tables) + return -ENOMEM; + for_each_possible_cpu(cpu) { cpu_dev = get_cpu_device(cpu); if (NULL == cpu_dev) { @@ -151,8 +155,10 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev) cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1, NULL, 0); - if (!IS_ERR(cpufreq_dt_pdev)) + if (!IS_ERR(cpufreq_dt_pdev)) { + platform_set_drvdata(pdev, opp_tables); return 0; + } ret = PTR_ERR(cpufreq_dt_pdev); dev_err(cpu_dev, "Failed to register platform device\n"); @@ -163,13 +169,23 @@ free_opp: break; dev_pm_opp_put_supported_hw(opp_tables[cpu]); } + kfree(opp_tables); return ret; } static int qcom_cpufreq_kryo_remove(struct platform_device *pdev) { + struct opp_table **opp_tables = platform_get_drvdata(pdev); + unsigned int cpu; + platform_device_unregister(cpufreq_dt_pdev); + + for_each_possible_cpu(cpu) + dev_pm_opp_put_supported_hw(opp_tables[cpu]); + + kfree(opp_tables); + return 0; } diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c index 3d773f64b4df..4295e5476264 100644 --- a/drivers/cpufreq/qoriq-cpufreq.c +++ b/drivers/cpufreq/qoriq-cpufreq.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include #include @@ -31,7 +30,6 @@ struct cpu_data { struct clk **pclk; struct cpufreq_frequency_table *table; - struct thermal_cooling_device *cdev; }; /* @@ -239,7 +237,6 @@ static int qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy) { struct cpu_data *data = policy->driver_data; - cpufreq_cooling_unregister(data->cdev); kfree(data->pclk); kfree(data->table); kfree(data); @@ -258,23 +255,15 @@ static int qoriq_cpufreq_target(struct cpufreq_policy *policy, return clk_set_parent(policy->clk, parent); } - -static void qoriq_cpufreq_ready(struct cpufreq_policy *policy) -{ - struct cpu_data *cpud = policy->driver_data; - - cpud->cdev = of_cpufreq_cooling_register(policy); -} - static struct cpufreq_driver qoriq_cpufreq_driver = { .name = "qoriq_cpufreq", - .flags = CPUFREQ_CONST_LOOPS, + .flags = CPUFREQ_CONST_LOOPS | + CPUFREQ_IS_COOLING_DEV, .init = qoriq_cpufreq_cpu_init, .exit = qoriq_cpufreq_cpu_exit, .verify = cpufreq_generic_frequency_table_verify, .target_index = qoriq_cpufreq_target, .get = cpufreq_generic_get, - .ready = qoriq_cpufreq_ready, .attr = cpufreq_generic_attr, }; diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c index dbecd7667db2..5b4289460bc9 100644 --- a/drivers/cpufreq/s5pv210-cpufreq.c +++ b/drivers/cpufreq/s5pv210-cpufreq.c @@ -584,7 +584,7 @@ static struct notifier_block s5pv210_cpufreq_reboot_notifier = { static int s5pv210_cpufreq_probe(struct platform_device *pdev) { struct device_node *np; - int id; + int id, result = 0; /* * HACK: This is a temporary workaround to get access to clock @@ -594,18 +594,39 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev) * this whole driver as soon as S5PV210 gets migrated to use * cpufreq-dt driver. */ + arm_regulator = regulator_get(NULL, "vddarm"); + if (IS_ERR(arm_regulator)) { + if (PTR_ERR(arm_regulator) == -EPROBE_DEFER) + pr_debug("vddarm regulator not ready, defer\n"); + else + pr_err("failed to get regulator vddarm\n"); + return PTR_ERR(arm_regulator); + } + + int_regulator = regulator_get(NULL, "vddint"); + if (IS_ERR(int_regulator)) { + if (PTR_ERR(int_regulator) == -EPROBE_DEFER) + pr_debug("vddint regulator not ready, defer\n"); + else + pr_err("failed to get regulator vddint\n"); + result = PTR_ERR(int_regulator); + goto err_int_regulator; + } + np = of_find_compatible_node(NULL, NULL, "samsung,s5pv210-clock"); if (!np) { pr_err("%s: failed to find clock controller DT node\n", __func__); - return -ENODEV; + result = -ENODEV; + goto err_clock; } clk_base = of_iomap(np, 0); of_node_put(np); if (!clk_base) { pr_err("%s: failed to map clock registers\n", __func__); - return -EFAULT; + result = -EFAULT; + goto err_clock; } for_each_compatible_node(np, NULL, "samsung,s5pv210-dmc") { @@ -614,7 +635,8 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev) pr_err("%s: failed to get alias of dmc node '%pOFn'\n", __func__, np); of_node_put(np); - return id; + result = id; + goto err_clk_base; } dmc_base[id] = of_iomap(np, 0); @@ -622,33 +644,40 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev) pr_err("%s: failed to map dmc%d registers\n", __func__, id); of_node_put(np); - return -EFAULT; + result = -EFAULT; + goto err_dmc; } } for (id = 0; id < ARRAY_SIZE(dmc_base); ++id) { if (!dmc_base[id]) { pr_err("%s: failed to find dmc%d node\n", __func__, id); - return -ENODEV; + result = -ENODEV; + goto err_dmc; } } - arm_regulator = regulator_get(NULL, "vddarm"); - if (IS_ERR(arm_regulator)) { - pr_err("failed to get regulator vddarm\n"); - return PTR_ERR(arm_regulator); - } - - int_regulator = regulator_get(NULL, "vddint"); - if (IS_ERR(int_regulator)) { - pr_err("failed to get regulator vddint\n"); - regulator_put(arm_regulator); - return PTR_ERR(int_regulator); - } - register_reboot_notifier(&s5pv210_cpufreq_reboot_notifier); return cpufreq_register_driver(&s5pv210_driver); + +err_dmc: + for (id = 0; id < ARRAY_SIZE(dmc_base); ++id) + if (dmc_base[id]) { + iounmap(dmc_base[id]); + dmc_base[id] = NULL; + } + +err_clk_base: + iounmap(clk_base); + +err_clock: + regulator_put(int_regulator); + +err_int_regulator: + regulator_put(arm_regulator); + + return result; } static struct platform_driver s5pv210_cpufreq_platdrv = { diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index 242c3370544e..e6182c89df79 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #include #include @@ -22,7 +22,6 @@ struct scmi_data { int domain_id; struct device *cpu_dev; - struct thermal_cooling_device *cdev; }; static const struct scmi_handle *handle; @@ -103,13 +102,42 @@ scmi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) return 0; } +static int __maybe_unused +scmi_get_cpu_power(unsigned long *power, unsigned long *KHz, int cpu) +{ + struct device *cpu_dev = get_cpu_device(cpu); + unsigned long Hz; + int ret, domain; + + if (!cpu_dev) { + pr_err("failed to get cpu%d device\n", cpu); + return -ENODEV; + } + + domain = handle->perf_ops->device_domain_id(cpu_dev); + if (domain < 0) + return domain; + + /* Get the power cost of the performance domain. */ + Hz = *KHz * 1000; + ret = handle->perf_ops->est_power_get(handle, domain, &Hz, power); + if (ret) + return ret; + + /* The EM framework specifies the frequency in KHz. */ + *KHz = Hz / 1000; + + return 0; +} + static int scmi_cpufreq_init(struct cpufreq_policy *policy) { - int ret; + int ret, nr_opp; unsigned int latency; struct device *cpu_dev; struct scmi_data *priv; struct cpufreq_frequency_table *freq_table; + struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power); cpu_dev = get_cpu_device(policy->cpu); if (!cpu_dev) { @@ -136,8 +164,8 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) return ret; } - ret = dev_pm_opp_get_opp_count(cpu_dev); - if (ret <= 0) { + nr_opp = dev_pm_opp_get_opp_count(cpu_dev); + if (nr_opp <= 0) { dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n"); ret = -EPROBE_DEFER; goto out_free_opp; @@ -171,6 +199,9 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) policy->cpuinfo.transition_latency = latency; policy->fast_switch_possible = true; + + em_register_perf_domain(policy->cpus, nr_opp, &em_cb); + return 0; out_free_priv: @@ -185,25 +216,18 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy) { struct scmi_data *priv = policy->driver_data; - cpufreq_cooling_unregister(priv->cdev); dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); - kfree(priv); dev_pm_opp_remove_all_dynamic(priv->cpu_dev); + kfree(priv); return 0; } -static void scmi_cpufreq_ready(struct cpufreq_policy *policy) -{ - struct scmi_data *priv = policy->driver_data; - - priv->cdev = of_cpufreq_cooling_register(policy); -} - static struct cpufreq_driver scmi_cpufreq_driver = { .name = "scmi", .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY | - CPUFREQ_NEED_INITIAL_FREQ_CHECK, + CPUFREQ_NEED_INITIAL_FREQ_CHECK | + CPUFREQ_IS_COOLING_DEV, .verify = cpufreq_generic_frequency_table_verify, .attr = cpufreq_generic_attr, .target_index = scmi_cpufreq_set_target, @@ -211,7 +235,6 @@ static struct cpufreq_driver scmi_cpufreq_driver = { .get = scmi_cpufreq_get_rate, .init = scmi_cpufreq_init, .exit = scmi_cpufreq_exit, - .ready = scmi_cpufreq_ready, }; static int scmi_cpufreq_probe(struct scmi_device *sdev) diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c index 99449738faa4..3f49427766b8 100644 --- a/drivers/cpufreq/scpi-cpufreq.c +++ b/drivers/cpufreq/scpi-cpufreq.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include @@ -34,7 +33,6 @@ struct scpi_data { struct clk *clk; struct device *cpu_dev; - struct thermal_cooling_device *cdev; }; static struct scpi_ops *scpi_ops; @@ -170,6 +168,9 @@ static int scpi_cpufreq_init(struct cpufreq_policy *policy) policy->cpuinfo.transition_latency = latency; policy->fast_switch_possible = false; + + dev_pm_opp_of_register_em(policy->cpus); + return 0; out_free_cpufreq_table: @@ -186,7 +187,6 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy) { struct scpi_data *priv = policy->driver_data; - cpufreq_cooling_unregister(priv->cdev); clk_put(priv->clk); dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); kfree(priv); @@ -195,23 +195,16 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy) return 0; } -static void scpi_cpufreq_ready(struct cpufreq_policy *policy) -{ - struct scpi_data *priv = policy->driver_data; - - priv->cdev = of_cpufreq_cooling_register(policy); -} - static struct cpufreq_driver scpi_cpufreq_driver = { .name = "scpi-cpufreq", .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY | - CPUFREQ_NEED_INITIAL_FREQ_CHECK, + CPUFREQ_NEED_INITIAL_FREQ_CHECK | + CPUFREQ_IS_COOLING_DEV, .verify = cpufreq_generic_frequency_table_verify, .attr = cpufreq_generic_attr, .get = scpi_cpufreq_get_rate, .init = scpi_cpufreq_init, .exit = scpi_cpufreq_exit, - .ready = scpi_cpufreq_ready, .target_index = scpi_cpufreq_set_target, }; diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c index fbbcb88db061..5d8a09b82efb 100644 --- a/drivers/cpufreq/speedstep-ich.c +++ b/drivers/cpufreq/speedstep-ich.c @@ -243,8 +243,7 @@ static unsigned int speedstep_get(unsigned int cpu) unsigned int speed; /* You're supposed to ensure CPU is online. */ - if (smp_call_function_single(cpu, get_freq_data, &speed, 1) != 0) - BUG(); + BUG_ON(smp_call_function_single(cpu, get_freq_data, &speed, 1)); pr_debug("detected %u kHz as current frequency\n", speed); return speed; diff --git a/drivers/cpufreq/tegra124-cpufreq.c b/drivers/cpufreq/tegra124-cpufreq.c index 43530254201a..5e748c8a5c9a 100644 --- a/drivers/cpufreq/tegra124-cpufreq.c +++ b/drivers/cpufreq/tegra124-cpufreq.c @@ -22,11 +22,9 @@ #include #include #include -#include #include struct tegra124_cpufreq_priv { - struct regulator *vdd_cpu_reg; struct clk *cpu_clk; struct clk *pllp_clk; struct clk *pllx_clk; @@ -60,14 +58,6 @@ out: return ret; } -static void tegra124_cpu_switch_to_pllx(struct tegra124_cpufreq_priv *priv) -{ - clk_set_parent(priv->cpu_clk, priv->pllp_clk); - clk_disable_unprepare(priv->dfll_clk); - regulator_sync_voltage(priv->vdd_cpu_reg); - clk_set_parent(priv->cpu_clk, priv->pllx_clk); -} - static int tegra124_cpufreq_probe(struct platform_device *pdev) { struct tegra124_cpufreq_priv *priv; @@ -88,16 +78,10 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev) if (!np) return -ENODEV; - priv->vdd_cpu_reg = regulator_get(cpu_dev, "vdd-cpu"); - if (IS_ERR(priv->vdd_cpu_reg)) { - ret = PTR_ERR(priv->vdd_cpu_reg); - goto out_put_np; - } - priv->cpu_clk = of_clk_get_by_name(np, "cpu_g"); if (IS_ERR(priv->cpu_clk)) { ret = PTR_ERR(priv->cpu_clk); - goto out_put_vdd_cpu_reg; + goto out_put_np; } priv->dfll_clk = of_clk_get_by_name(np, "dfll"); @@ -129,15 +113,15 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev) platform_device_register_full(&cpufreq_dt_devinfo); if (IS_ERR(priv->cpufreq_dt_pdev)) { ret = PTR_ERR(priv->cpufreq_dt_pdev); - goto out_switch_to_pllx; + goto out_put_pllp_clk; } platform_set_drvdata(pdev, priv); + of_node_put(np); + return 0; -out_switch_to_pllx: - tegra124_cpu_switch_to_pllx(priv); out_put_pllp_clk: clk_put(priv->pllp_clk); out_put_pllx_clk: @@ -146,34 +130,15 @@ out_put_dfll_clk: clk_put(priv->dfll_clk); out_put_cpu_clk: clk_put(priv->cpu_clk); -out_put_vdd_cpu_reg: - regulator_put(priv->vdd_cpu_reg); out_put_np: of_node_put(np); return ret; } -static int tegra124_cpufreq_remove(struct platform_device *pdev) -{ - struct tegra124_cpufreq_priv *priv = platform_get_drvdata(pdev); - - platform_device_unregister(priv->cpufreq_dt_pdev); - tegra124_cpu_switch_to_pllx(priv); - - clk_put(priv->pllp_clk); - clk_put(priv->pllx_clk); - clk_put(priv->dfll_clk); - clk_put(priv->cpu_clk); - regulator_put(priv->vdd_cpu_reg); - - return 0; -} - static struct platform_driver tegra124_cpufreq_platdrv = { .driver.name = "cpufreq-tegra124", .probe = tegra124_cpufreq_probe, - .remove = tegra124_cpufreq_remove, }; static int __init tegra_cpufreq_init(void) @@ -181,7 +146,8 @@ static int __init tegra_cpufreq_init(void) int ret; struct platform_device *pdev; - if (!of_machine_is_compatible("nvidia,tegra124")) + if (!(of_machine_is_compatible("nvidia,tegra124") || + of_machine_is_compatible("nvidia,tegra210"))) return -ENODEV; /* diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig index 7e48eb5bf0a7..8caccbbd7353 100644 --- a/drivers/cpuidle/Kconfig +++ b/drivers/cpuidle/Kconfig @@ -4,7 +4,7 @@ config CPU_IDLE bool "CPU idle PM support" default y if ACPI || PPC_PSERIES select CPU_IDLE_GOV_LADDER if (!NO_HZ && !NO_HZ_IDLE) - select CPU_IDLE_GOV_MENU if (NO_HZ || NO_HZ_IDLE) + select CPU_IDLE_GOV_MENU if (NO_HZ || NO_HZ_IDLE) && !CPU_IDLE_GOV_TEO help CPU idle is a generic framework for supporting software-controlled idle processor power management. It includes modular cross-platform @@ -23,6 +23,15 @@ config CPU_IDLE_GOV_LADDER config CPU_IDLE_GOV_MENU bool "Menu governor (for tickless system)" +config CPU_IDLE_GOV_TEO + bool "Timer events oriented (TEO) governor (for tickless systems)" + help + This governor implements a simplified idle state selection method + focused on timer events and does not do any interactivity boosting. + + Some workloads benefit from using it and it generally should be safe + to use. Say Y here if you are not happy with the alternatives. + config DT_IDLE_STATES bool diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c index 53342b7f1010..add9569636b5 100644 --- a/drivers/cpuidle/dt_idle_states.c +++ b/drivers/cpuidle/dt_idle_states.c @@ -22,16 +22,12 @@ #include "dt_idle_states.h" static int init_state_node(struct cpuidle_state *idle_state, - const struct of_device_id *matches, + const struct of_device_id *match_id, struct device_node *state_node) { int err; - const struct of_device_id *match_id; const char *desc; - match_id = of_match_node(matches, state_node); - if (!match_id) - return -ENODEV; /* * CPUidle drivers are expected to initialize the const void *data * pointer of the passed in struct of_device_id array to the idle @@ -160,6 +156,7 @@ int dt_init_idle_driver(struct cpuidle_driver *drv, { struct cpuidle_state *idle_state; struct device_node *state_node, *cpu_node; + const struct of_device_id *match_id; int i, err = 0; const cpumask_t *cpumask; unsigned int state_idx = start_idx; @@ -180,6 +177,12 @@ int dt_init_idle_driver(struct cpuidle_driver *drv, if (!state_node) break; + match_id = of_match_node(matches, state_node); + if (!match_id) { + err = -ENODEV; + break; + } + if (!of_device_is_available(state_node)) { of_node_put(state_node); continue; @@ -198,7 +201,7 @@ int dt_init_idle_driver(struct cpuidle_driver *drv, } idle_state = &drv->states[state_idx++]; - err = init_state_node(idle_state, matches, state_node); + err = init_state_node(idle_state, match_id, state_node); if (err) { pr_err("Parsing idle state node %pOF failed with err %d\n", state_node, err); diff --git a/drivers/cpuidle/governors/Makefile b/drivers/cpuidle/governors/Makefile index 1b512722689f..4d8aff5248a8 100644 --- a/drivers/cpuidle/governors/Makefile +++ b/drivers/cpuidle/governors/Makefile @@ -4,3 +4,4 @@ obj-$(CONFIG_CPU_IDLE_GOV_LADDER) += ladder.o obj-$(CONFIG_CPU_IDLE_GOV_MENU) += menu.o +obj-$(CONFIG_CPU_IDLE_GOV_TEO) += teo.o diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c new file mode 100644 index 000000000000..7d05efdbd3c6 --- /dev/null +++ b/drivers/cpuidle/governors/teo.c @@ -0,0 +1,444 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Timer events oriented CPU idle governor + * + * Copyright (C) 2018 Intel Corporation + * Author: Rafael J. Wysocki + * + * The idea of this governor is based on the observation that on many systems + * timer events are two or more orders of magnitude more frequent than any + * other interrupts, so they are likely to be the most significant source of CPU + * wakeups from idle states. Moreover, information about what happened in the + * (relatively recent) past can be used to estimate whether or not the deepest + * idle state with target residency within the time to the closest timer is + * likely to be suitable for the upcoming idle time of the CPU and, if not, then + * which of the shallower idle states to choose. + * + * Of course, non-timer wakeup sources are more important in some use cases and + * they can be covered by taking a few most recent idle time intervals of the + * CPU into account. However, even in that case it is not necessary to consider + * idle duration values greater than the time till the closest timer, as the + * patterns that they may belong to produce average values close enough to + * the time till the closest timer (sleep length) anyway. + * + * Thus this governor estimates whether or not the upcoming idle time of the CPU + * is likely to be significantly shorter than the sleep length and selects an + * idle state for it in accordance with that, as follows: + * + * - Find an idle state on the basis of the sleep length and state statistics + * collected over time: + * + * o Find the deepest idle state whose target residency is less than or equal + * to the sleep length. + * + * o Select it if it matched both the sleep length and the observed idle + * duration in the past more often than it matched the sleep length alone + * (i.e. the observed idle duration was significantly shorter than the sleep + * length matched by it). + * + * o Otherwise, select the shallower state with the greatest matched "early" + * wakeups metric. + * + * - If the majority of the most recent idle duration values are below the + * target residency of the idle state selected so far, use those values to + * compute the new expected idle duration and find an idle state matching it + * (which has to be shallower than the one selected so far). + */ + +#include +#include +#include +#include +#include + +/* + * The PULSE value is added to metrics when they grow and the DECAY_SHIFT value + * is used for decreasing metrics on a regular basis. + */ +#define PULSE 1024 +#define DECAY_SHIFT 3 + +/* + * Number of the most recent idle duration values to take into consideration for + * the detection of wakeup patterns. + */ +#define INTERVALS 8 + +/** + * struct teo_idle_state - Idle state data used by the TEO cpuidle governor. + * @early_hits: "Early" CPU wakeups "matching" this state. + * @hits: "On time" CPU wakeups "matching" this state. + * @misses: CPU wakeups "missing" this state. + * + * A CPU wakeup is "matched" by a given idle state if the idle duration measured + * after the wakeup is between the target residency of that state and the target + * residency of the next one (or if this is the deepest available idle state, it + * "matches" a CPU wakeup when the measured idle duration is at least equal to + * its target residency). + * + * Also, from the TEO governor perspective, a CPU wakeup from idle is "early" if + * it occurs significantly earlier than the closest expected timer event (that + * is, early enough to match an idle state shallower than the one matching the + * time till the closest timer event). Otherwise, the wakeup is "on time", or + * it is a "hit". + * + * A "miss" occurs when the given state doesn't match the wakeup, but it matches + * the time till the closest timer event used for idle state selection. + */ +struct teo_idle_state { + unsigned int early_hits; + unsigned int hits; + unsigned int misses; +}; + +/** + * struct teo_cpu - CPU data used by the TEO cpuidle governor. + * @time_span_ns: Time between idle state selection and post-wakeup update. + * @sleep_length_ns: Time till the closest timer event (at the selection time). + * @states: Idle states data corresponding to this CPU. + * @last_state: Idle state entered by the CPU last time. + * @interval_idx: Index of the most recent saved idle interval. + * @intervals: Saved idle duration values. + */ +struct teo_cpu { + u64 time_span_ns; + u64 sleep_length_ns; + struct teo_idle_state states[CPUIDLE_STATE_MAX]; + int last_state; + int interval_idx; + unsigned int intervals[INTERVALS]; +}; + +static DEFINE_PER_CPU(struct teo_cpu, teo_cpus); + +/** + * teo_update - Update CPU data after wakeup. + * @drv: cpuidle driver containing state data. + * @dev: Target CPU. + */ +static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) +{ + struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu); + unsigned int sleep_length_us = ktime_to_us(cpu_data->sleep_length_ns); + int i, idx_hit = -1, idx_timer = -1; + unsigned int measured_us; + + if (cpu_data->time_span_ns >= cpu_data->sleep_length_ns) { + /* + * One of the safety nets has triggered or this was a timer + * wakeup (or equivalent). + */ + measured_us = sleep_length_us; + } else { + unsigned int lat = drv->states[cpu_data->last_state].exit_latency; + + measured_us = ktime_to_us(cpu_data->time_span_ns); + /* + * The delay between the wakeup and the first instruction + * executed by the CPU is not likely to be worst-case every + * time, so take 1/2 of the exit latency as a very rough + * approximation of the average of it. + */ + if (measured_us >= lat) + measured_us -= lat / 2; + else + measured_us /= 2; + } + + /* + * Decay the "early hits" metric for all of the states and find the + * states matching the sleep length and the measured idle duration. + */ + for (i = 0; i < drv->state_count; i++) { + unsigned int early_hits = cpu_data->states[i].early_hits; + + cpu_data->states[i].early_hits -= early_hits >> DECAY_SHIFT; + + if (drv->states[i].target_residency <= sleep_length_us) { + idx_timer = i; + if (drv->states[i].target_residency <= measured_us) + idx_hit = i; + } + } + + /* + * Update the "hits" and "misses" data for the state matching the sleep + * length. If it matches the measured idle duration too, this is a hit, + * so increase the "hits" metric for it then. Otherwise, this is a + * miss, so increase the "misses" metric for it. In the latter case + * also increase the "early hits" metric for the state that actually + * matches the measured idle duration. + */ + if (idx_timer >= 0) { + unsigned int hits = cpu_data->states[idx_timer].hits; + unsigned int misses = cpu_data->states[idx_timer].misses; + + hits -= hits >> DECAY_SHIFT; + misses -= misses >> DECAY_SHIFT; + + if (idx_timer > idx_hit) { + misses += PULSE; + if (idx_hit >= 0) + cpu_data->states[idx_hit].early_hits += PULSE; + } else { + hits += PULSE; + } + + cpu_data->states[idx_timer].misses = misses; + cpu_data->states[idx_timer].hits = hits; + } + + /* + * If the total time span between idle state selection and the "reflect" + * callback is greater than or equal to the sleep length determined at + * the idle state selection time, the wakeup is likely to be due to a + * timer event. + */ + if (cpu_data->time_span_ns >= cpu_data->sleep_length_ns) + measured_us = UINT_MAX; + + /* + * Save idle duration values corresponding to non-timer wakeups for + * pattern detection. + */ + cpu_data->intervals[cpu_data->interval_idx++] = measured_us; + if (cpu_data->interval_idx > INTERVALS) + cpu_data->interval_idx = 0; +} + +/** + * teo_find_shallower_state - Find shallower idle state matching given duration. + * @drv: cpuidle driver containing state data. + * @dev: Target CPU. + * @state_idx: Index of the capping idle state. + * @duration_us: Idle duration value to match. + */ +static int teo_find_shallower_state(struct cpuidle_driver *drv, + struct cpuidle_device *dev, int state_idx, + unsigned int duration_us) +{ + int i; + + for (i = state_idx - 1; i >= 0; i--) { + if (drv->states[i].disabled || dev->states_usage[i].disable) + continue; + + state_idx = i; + if (drv->states[i].target_residency <= duration_us) + break; + } + return state_idx; +} + +/** + * teo_select - Selects the next idle state to enter. + * @drv: cpuidle driver containing state data. + * @dev: Target CPU. + * @stop_tick: Indication on whether or not to stop the scheduler tick. + */ +static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev, + bool *stop_tick) +{ + struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu); + int latency_req = cpuidle_governor_latency_req(dev->cpu); + unsigned int duration_us, count; + int max_early_idx, idx, i; + ktime_t delta_tick; + + if (cpu_data->last_state >= 0) { + teo_update(drv, dev); + cpu_data->last_state = -1; + } + + cpu_data->time_span_ns = local_clock(); + + cpu_data->sleep_length_ns = tick_nohz_get_sleep_length(&delta_tick); + duration_us = ktime_to_us(cpu_data->sleep_length_ns); + + count = 0; + max_early_idx = -1; + idx = -1; + + for (i = 0; i < drv->state_count; i++) { + struct cpuidle_state *s = &drv->states[i]; + struct cpuidle_state_usage *su = &dev->states_usage[i]; + + if (s->disabled || su->disable) { + /* + * If the "early hits" metric of a disabled state is + * greater than the current maximum, it should be taken + * into account, because it would be a mistake to select + * a deeper state with lower "early hits" metric. The + * index cannot be changed to point to it, however, so + * just increase the max count alone and let the index + * still point to a shallower idle state. + */ + if (max_early_idx >= 0 && + count < cpu_data->states[i].early_hits) + count = cpu_data->states[i].early_hits; + + continue; + } + + if (idx < 0) + idx = i; /* first enabled state */ + + if (s->target_residency > duration_us) + break; + + if (s->exit_latency > latency_req) { + /* + * If we break out of the loop for latency reasons, use + * the target residency of the selected state as the + * expected idle duration to avoid stopping the tick + * as long as that target residency is low enough. + */ + duration_us = drv->states[idx].target_residency; + goto refine; + } + + idx = i; + + if (count < cpu_data->states[i].early_hits && + !(tick_nohz_tick_stopped() && + drv->states[i].target_residency < TICK_USEC)) { + count = cpu_data->states[i].early_hits; + max_early_idx = i; + } + } + + /* + * If the "hits" metric of the idle state matching the sleep length is + * greater than its "misses" metric, that is the one to use. Otherwise, + * it is more likely that one of the shallower states will match the + * idle duration observed after wakeup, so take the one with the maximum + * "early hits" metric, but if that cannot be determined, just use the + * state selected so far. + */ + if (cpu_data->states[idx].hits <= cpu_data->states[idx].misses && + max_early_idx >= 0) { + idx = max_early_idx; + duration_us = drv->states[idx].target_residency; + } + +refine: + if (idx < 0) { + idx = 0; /* No states enabled. Must use 0. */ + } else if (idx > 0) { + u64 sum = 0; + + count = 0; + + /* + * Count and sum the most recent idle duration values less than + * the target residency of the state selected so far, find the + * max. + */ + for (i = 0; i < INTERVALS; i++) { + unsigned int val = cpu_data->intervals[i]; + + if (val >= drv->states[idx].target_residency) + continue; + + count++; + sum += val; + } + + /* + * Give up unless the majority of the most recent idle duration + * values are in the interesting range. + */ + if (count > INTERVALS / 2) { + unsigned int avg_us = div64_u64(sum, count); + + /* + * Avoid spending too much time in an idle state that + * would be too shallow. + */ + if (!(tick_nohz_tick_stopped() && avg_us < TICK_USEC)) { + idx = teo_find_shallower_state(drv, dev, idx, avg_us); + duration_us = avg_us; + } + } + } + + /* + * Don't stop the tick if the selected state is a polling one or if the + * expected idle duration is shorter than the tick period length. + */ + if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) || + duration_us < TICK_USEC) && !tick_nohz_tick_stopped()) { + unsigned int delta_tick_us = ktime_to_us(delta_tick); + + *stop_tick = false; + + /* + * The tick is not going to be stopped, so if the target + * residency of the state to be returned is not within the time + * till the closest timer including the tick, try to correct + * that. + */ + if (idx > 0 && drv->states[idx].target_residency > delta_tick_us) + idx = teo_find_shallower_state(drv, dev, idx, delta_tick_us); + } + + return idx; +} + +/** + * teo_reflect - Note that governor data for the CPU need to be updated. + * @dev: Target CPU. + * @state: Entered state. + */ +static void teo_reflect(struct cpuidle_device *dev, int state) +{ + struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu); + + cpu_data->last_state = state; + /* + * If the wakeup was not "natural", but triggered by one of the safety + * nets, assume that the CPU might have been idle for the entire sleep + * length time. + */ + if (dev->poll_time_limit || + (tick_nohz_idle_got_tick() && cpu_data->sleep_length_ns > TICK_NSEC)) { + dev->poll_time_limit = false; + cpu_data->time_span_ns = cpu_data->sleep_length_ns; + } else { + cpu_data->time_span_ns = local_clock() - cpu_data->time_span_ns; + } +} + +/** + * teo_enable_device - Initialize the governor's data for the target CPU. + * @drv: cpuidle driver (not used). + * @dev: Target CPU. + */ +static int teo_enable_device(struct cpuidle_driver *drv, + struct cpuidle_device *dev) +{ + struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu); + int i; + + memset(cpu_data, 0, sizeof(*cpu_data)); + + for (i = 0; i < INTERVALS; i++) + cpu_data->intervals[i] = UINT_MAX; + + return 0; +} + +static struct cpuidle_governor teo_governor = { + .name = "teo", + .rating = 19, + .enable = teo_enable_device, + .select = teo_select, + .reflect = teo_reflect, +}; + +static int __init teo_governor_init(void) +{ + return cpuidle_register_governor(&teo_governor); +} + +postcore_initcall(teo_governor_init); diff --git a/drivers/cpuidle/poll_state.c b/drivers/cpuidle/poll_state.c index b17d153e724f..23a1b27579a5 100644 --- a/drivers/cpuidle/poll_state.c +++ b/drivers/cpuidle/poll_state.c @@ -21,7 +21,7 @@ static int __cpuidle poll_idle(struct cpuidle_device *dev, local_irq_enable(); if (!current_set_polling_and_test()) { unsigned int loop_count = 0; - u64 limit = TICK_USEC; + u64 limit = TICK_NSEC; int i; for (i = 1; i < drv->state_count; i++) { diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c index acf79889d903..06574a884715 100644 --- a/drivers/crypto/amcc/crypto4xx_core.c +++ b/drivers/crypto/amcc/crypto4xx_core.c @@ -40,9 +40,11 @@ #include #include #include +#include #include #include #include +#include #include #include "crypto4xx_reg_def.h" #include "crypto4xx_core.h" @@ -1035,6 +1037,10 @@ static int crypto4xx_register_alg(struct crypto4xx_device *sec_dev, rc = crypto_register_ahash(&alg->alg.u.hash); break; + case CRYPTO_ALG_TYPE_RNG: + rc = crypto_register_rng(&alg->alg.u.rng); + break; + default: rc = crypto_register_skcipher(&alg->alg.u.cipher); break; @@ -1064,6 +1070,10 @@ static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev) crypto_unregister_aead(&alg->alg.u.aead); break; + case CRYPTO_ALG_TYPE_RNG: + crypto_unregister_rng(&alg->alg.u.rng); + break; + default: crypto_unregister_skcipher(&alg->alg.u.cipher); } @@ -1122,6 +1132,69 @@ static irqreturn_t crypto4xx_ce_interrupt_handler_revb(int irq, void *data) PPC4XX_TMO_ERR_INT); } +static int ppc4xx_prng_data_read(struct crypto4xx_device *dev, + u8 *data, unsigned int max) +{ + unsigned int i, curr = 0; + u32 val[2]; + + do { + /* trigger PRN generation */ + writel(PPC4XX_PRNG_CTRL_AUTO_EN, + dev->ce_base + CRYPTO4XX_PRNG_CTRL); + + for (i = 0; i < 1024; i++) { + /* usually 19 iterations are enough */ + if ((readl(dev->ce_base + CRYPTO4XX_PRNG_STAT) & + CRYPTO4XX_PRNG_STAT_BUSY)) + continue; + + val[0] = readl_be(dev->ce_base + CRYPTO4XX_PRNG_RES_0); + val[1] = readl_be(dev->ce_base + CRYPTO4XX_PRNG_RES_1); + break; + } + if (i == 1024) + return -ETIMEDOUT; + + if ((max - curr) >= 8) { + memcpy(data, &val, 8); + data += 8; + curr += 8; + } else { + /* copy only remaining bytes */ + memcpy(data, &val, max - curr); + break; + } + } while (curr < max); + + return curr; +} + +static int crypto4xx_prng_generate(struct crypto_rng *tfm, + const u8 *src, unsigned int slen, + u8 *dstn, unsigned int dlen) +{ + struct rng_alg *alg = crypto_rng_alg(tfm); + struct crypto4xx_alg *amcc_alg; + struct crypto4xx_device *dev; + int ret; + + amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.rng); + dev = amcc_alg->dev; + + mutex_lock(&dev->core_dev->rng_lock); + ret = ppc4xx_prng_data_read(dev, dstn, dlen); + mutex_unlock(&dev->core_dev->rng_lock); + return ret; +} + + +static int crypto4xx_prng_seed(struct crypto_rng *tfm, const u8 *seed, + unsigned int slen) +{ + return 0; +} + /** * Supported Crypto Algorithms */ @@ -1291,6 +1364,18 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = { .cra_module = THIS_MODULE, }, } }, + { .type = CRYPTO_ALG_TYPE_RNG, .u.rng = { + .base = { + .cra_name = "stdrng", + .cra_driver_name = "crypto4xx_rng", + .cra_priority = 300, + .cra_ctxsize = 0, + .cra_module = THIS_MODULE, + }, + .generate = crypto4xx_prng_generate, + .seed = crypto4xx_prng_seed, + .seedsize = 0, + } }, }; /** @@ -1360,6 +1445,7 @@ static int crypto4xx_probe(struct platform_device *ofdev) core_dev->dev->core_dev = core_dev; core_dev->dev->is_revb = is_revb; core_dev->device = dev; + mutex_init(&core_dev->rng_lock); spin_lock_init(&core_dev->lock); INIT_LIST_HEAD(&core_dev->dev->alg_list); ratelimit_default_init(&core_dev->dev->aead_ratelimit); @@ -1439,6 +1525,7 @@ static int crypto4xx_remove(struct platform_device *ofdev) tasklet_kill(&core_dev->tasklet); /* Un-register with Linux CryptoAPI */ crypto4xx_unregister_alg(core_dev->dev); + mutex_destroy(&core_dev->rng_lock); /* Free all allocated memory */ crypto4xx_stop_all(core_dev); diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h index e2ca56722f07..18df695ca6b1 100644 --- a/drivers/crypto/amcc/crypto4xx_core.h +++ b/drivers/crypto/amcc/crypto4xx_core.h @@ -23,8 +23,10 @@ #define __CRYPTO4XX_CORE_H__ #include +#include #include #include +#include #include #include "crypto4xx_reg_def.h" #include "crypto4xx_sa.h" @@ -119,6 +121,7 @@ struct crypto4xx_core_device { u32 irq; struct tasklet_struct tasklet; spinlock_t lock; + struct mutex rng_lock; }; struct crypto4xx_ctx { @@ -143,6 +146,7 @@ struct crypto4xx_alg_common { struct skcipher_alg cipher; struct ahash_alg hash; struct aead_alg aead; + struct rng_alg rng; } u; }; diff --git a/drivers/crypto/amcc/crypto4xx_reg_def.h b/drivers/crypto/amcc/crypto4xx_reg_def.h index 472331787e04..80c67490bbf6 100644 --- a/drivers/crypto/amcc/crypto4xx_reg_def.h +++ b/drivers/crypto/amcc/crypto4xx_reg_def.h @@ -100,6 +100,7 @@ #define CRYPTO4XX_ENDIAN_CFG 0x000600d8 #define CRYPTO4XX_PRNG_STAT 0x00070000 +#define CRYPTO4XX_PRNG_STAT_BUSY 0x1 #define CRYPTO4XX_PRNG_CTRL 0x00070004 #define CRYPTO4XX_PRNG_SEED_L 0x00070008 #define CRYPTO4XX_PRNG_SEED_H 0x0007000c diff --git a/drivers/crypto/amcc/crypto4xx_trng.c b/drivers/crypto/amcc/crypto4xx_trng.c index 5e63742b0d22..53ab1f140a26 100644 --- a/drivers/crypto/amcc/crypto4xx_trng.c +++ b/drivers/crypto/amcc/crypto4xx_trng.c @@ -80,8 +80,10 @@ void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev) /* Find the TRNG device node and map it */ trng = of_find_matching_node(NULL, ppc4xx_trng_match); - if (!trng || !of_device_is_available(trng)) + if (!trng || !of_device_is_available(trng)) { + of_node_put(trng); return; + } dev->trng_base = of_iomap(trng, 0); of_node_put(trng); diff --git a/drivers/crypto/amcc/crypto4xx_trng.h b/drivers/crypto/amcc/crypto4xx_trng.h index 931d22531f51..7bbda51b7337 100644 --- a/drivers/crypto/amcc/crypto4xx_trng.h +++ b/drivers/crypto/amcc/crypto4xx_trng.h @@ -26,9 +26,9 @@ void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev); void ppc4xx_trng_remove(struct crypto4xx_core_device *core_dev); #else static inline void ppc4xx_trng_probe( - struct crypto4xx_device *dev __maybe_unused) { } + struct crypto4xx_core_device *dev __maybe_unused) { } static inline void ppc4xx_trng_remove( - struct crypto4xx_device *dev __maybe_unused) { } + struct crypto4xx_core_device *dev __maybe_unused) { } #endif #endif diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c index 438e1ffb2ec0..65bf1a299562 100644 --- a/drivers/crypto/atmel-tdes.c +++ b/drivers/crypto/atmel-tdes.c @@ -785,7 +785,7 @@ static int atmel_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key, } err = des_ekey(tmp, key); - if (err == 0 && (ctfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + if (err == 0 && (ctfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { ctfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c index f3442c2bdbdc..57e5dca3253f 100644 --- a/drivers/crypto/axis/artpec6_crypto.c +++ b/drivers/crypto/axis/artpec6_crypto.c @@ -135,8 +135,6 @@ #define regk_crypto_ext 0x00000001 #define regk_crypto_hmac_sha1 0x00000007 #define regk_crypto_hmac_sha256 0x00000009 -#define regk_crypto_hmac_sha384 0x0000000b -#define regk_crypto_hmac_sha512 0x0000000d #define regk_crypto_init 0x00000000 #define regk_crypto_key_128 0x00000000 #define regk_crypto_key_192 0x00000001 @@ -144,8 +142,6 @@ #define regk_crypto_null 0x00000000 #define regk_crypto_sha1 0x00000006 #define regk_crypto_sha256 0x00000008 -#define regk_crypto_sha384 0x0000000a -#define regk_crypto_sha512 0x0000000c /* DMA descriptor structures */ struct pdma_descr_ctrl { @@ -190,8 +186,6 @@ struct pdma_stat_descr { /* Hash modes (including HMAC variants) */ #define ARTPEC6_CRYPTO_HASH_SHA1 1 #define ARTPEC6_CRYPTO_HASH_SHA256 2 -#define ARTPEC6_CRYPTO_HASH_SHA384 3 -#define ARTPEC6_CRYPTO_HASH_SHA512 4 /* Crypto modes */ #define ARTPEC6_CRYPTO_CIPHER_AES_ECB 1 @@ -284,6 +278,7 @@ enum artpec6_crypto_hash_flags { struct artpec6_crypto_req_common { struct list_head list; + struct list_head complete_in_progress; struct artpec6_crypto_dma_descriptors *dma; struct crypto_async_request *req; void (*complete)(struct crypto_async_request *req); @@ -291,11 +286,11 @@ struct artpec6_crypto_req_common { }; struct artpec6_hash_request_context { - char partial_buffer[SHA512_BLOCK_SIZE]; - char partial_buffer_out[SHA512_BLOCK_SIZE]; - char key_buffer[SHA512_BLOCK_SIZE]; - char pad_buffer[SHA512_BLOCK_SIZE + 32]; - unsigned char digeststate[SHA512_DIGEST_SIZE]; + char partial_buffer[SHA256_BLOCK_SIZE]; + char partial_buffer_out[SHA256_BLOCK_SIZE]; + char key_buffer[SHA256_BLOCK_SIZE]; + char pad_buffer[SHA256_BLOCK_SIZE + 32]; + unsigned char digeststate[SHA256_DIGEST_SIZE]; size_t partial_bytes; u64 digcnt; u32 key_md; @@ -305,8 +300,8 @@ struct artpec6_hash_request_context { }; struct artpec6_hash_export_state { - char partial_buffer[SHA512_BLOCK_SIZE]; - unsigned char digeststate[SHA512_DIGEST_SIZE]; + char partial_buffer[SHA256_BLOCK_SIZE]; + unsigned char digeststate[SHA256_DIGEST_SIZE]; size_t partial_bytes; u64 digcnt; int oper; @@ -314,7 +309,7 @@ struct artpec6_hash_export_state { }; struct artpec6_hashalg_context { - char hmac_key[SHA512_BLOCK_SIZE]; + char hmac_key[SHA256_BLOCK_SIZE]; size_t hmac_key_length; struct crypto_shash *child_hash; }; @@ -670,8 +665,8 @@ artpec6_crypto_dma_map_descs(struct artpec6_crypto_req_common *common) * to be written. */ return artpec6_crypto_dma_map_single(common, - dma->stat + dma->in_cnt - 1, - sizeof(dma->stat[0]), + dma->stat, + sizeof(dma->stat[0]) * dma->in_cnt, DMA_BIDIRECTIONAL, &dma->stat_dma_addr); } @@ -1315,8 +1310,7 @@ static int artpec6_crypto_prepare_hash(struct ahash_request *areq) struct artpec6_hashalg_context *ctx = crypto_tfm_ctx(areq->base.tfm); struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(areq); size_t digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq)); - size_t contextsize = digestsize == SHA384_DIGEST_SIZE ? - SHA512_DIGEST_SIZE : digestsize; + size_t contextsize = digestsize; size_t blocksize = crypto_tfm_alg_blocksize( crypto_ahash_tfm(crypto_ahash_reqtfm(areq))); struct artpec6_crypto_req_common *common = &req_ctx->common; @@ -1456,7 +1450,6 @@ static int artpec6_crypto_prepare_hash(struct ahash_request *areq) /* Finalize */ if (req_ctx->hash_flags & HASH_FLAG_FINALIZE) { - bool needtrim = contextsize != digestsize; size_t hash_pad_len; u64 digest_bits; u32 oper; @@ -1502,19 +1495,10 @@ static int artpec6_crypto_prepare_hash(struct ahash_request *areq) /* Descriptor for the final result */ error = artpec6_crypto_setup_in_descr(common, areq->result, digestsize, - !needtrim); + true); if (error) return error; - if (needtrim) { - /* Discard the extra context bytes for SHA-384 */ - error = artpec6_crypto_setup_in_descr(common, - req_ctx->partial_buffer, - digestsize - contextsize, true); - if (error) - return error; - } - } else { /* This is not the final operation for this request */ if (!run_hw) return ARTPEC6_CRYPTO_PREPARE_HASH_NO_START; @@ -1923,7 +1907,7 @@ static int artpec6_crypto_prepare_aead(struct aead_request *areq) /* For the decryption, cryptlen includes the tag. */ input_length = areq->cryptlen; if (req_ctx->decrypt) - input_length -= AES_BLOCK_SIZE; + input_length -= crypto_aead_authsize(cipher); /* Prepare the context buffer */ req_ctx->hw_ctx.aad_length_bits = @@ -1988,7 +1972,7 @@ static int artpec6_crypto_prepare_aead(struct aead_request *areq) size_t output_len = areq->cryptlen; if (req_ctx->decrypt) - output_len -= AES_BLOCK_SIZE; + output_len -= crypto_aead_authsize(cipher); artpec6_crypto_walk_init(&walk, areq->dst); @@ -2017,19 +2001,32 @@ static int artpec6_crypto_prepare_aead(struct aead_request *areq) * the output ciphertext. For decryption it is put in a context * buffer for later compare against the input tag. */ - count = AES_BLOCK_SIZE; if (req_ctx->decrypt) { ret = artpec6_crypto_setup_in_descr(common, - req_ctx->decryption_tag, count, false); + req_ctx->decryption_tag, AES_BLOCK_SIZE, false); if (ret) return ret; } else { + /* For encryption the requested tag size may be smaller + * than the hardware's generated tag. + */ + size_t authsize = crypto_aead_authsize(cipher); + ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, - count); + authsize); if (ret) return ret; + + if (authsize < AES_BLOCK_SIZE) { + count = AES_BLOCK_SIZE - authsize; + ret = artpec6_crypto_setup_in_descr(common, + ac->pad_buffer, + count, false); + if (ret) + return ret; + } } } @@ -2045,7 +2042,8 @@ static int artpec6_crypto_prepare_aead(struct aead_request *areq) return artpec6_crypto_dma_map_descs(common); } -static void artpec6_crypto_process_queue(struct artpec6_crypto *ac) +static void artpec6_crypto_process_queue(struct artpec6_crypto *ac, + struct list_head *completions) { struct artpec6_crypto_req_common *req; @@ -2056,7 +2054,7 @@ static void artpec6_crypto_process_queue(struct artpec6_crypto *ac) list_move_tail(&req->list, &ac->pending); artpec6_crypto_start_dma(req); - req->req->complete(req->req, -EINPROGRESS); + list_add_tail(&req->complete_in_progress, completions); } /* @@ -2086,6 +2084,11 @@ static void artpec6_crypto_task(unsigned long data) struct artpec6_crypto *ac = (struct artpec6_crypto *)data; struct artpec6_crypto_req_common *req; struct artpec6_crypto_req_common *n; + struct list_head complete_done; + struct list_head complete_in_progress; + + INIT_LIST_HEAD(&complete_done); + INIT_LIST_HEAD(&complete_in_progress); if (list_empty(&ac->pending)) { pr_debug("Spurious IRQ\n"); @@ -2097,9 +2100,12 @@ static void artpec6_crypto_task(unsigned long data) list_for_each_entry_safe(req, n, &ac->pending, list) { struct artpec6_crypto_dma_descriptors *dma = req->dma; u32 stat; + dma_addr_t stataddr; - dma_sync_single_for_cpu(artpec6_crypto_dev, dma->stat_dma_addr, - sizeof(dma->stat[0]), + stataddr = dma->stat_dma_addr + 4 * (req->dma->in_cnt - 1); + dma_sync_single_for_cpu(artpec6_crypto_dev, + stataddr, + 4, DMA_BIDIRECTIONAL); stat = req->dma->stat[req->dma->in_cnt-1]; @@ -2119,19 +2125,30 @@ static void artpec6_crypto_task(unsigned long data) pr_debug("Completing request %p\n", req); - list_del(&req->list); + list_move_tail(&req->list, &complete_done); + ac->pending_count--; + } + + artpec6_crypto_process_queue(ac, &complete_in_progress); + + spin_unlock_bh(&ac->queue_lock); + + /* Perform the completion callbacks without holding the queue lock + * to allow new request submissions from the callbacks. + */ + list_for_each_entry_safe(req, n, &complete_done, list) { artpec6_crypto_dma_unmap_all(req); artpec6_crypto_copy_bounce_buffers(req); - - ac->pending_count--; artpec6_crypto_common_destroy(req); + req->complete(req->req); } - artpec6_crypto_process_queue(ac); - - spin_unlock_bh(&ac->queue_lock); + list_for_each_entry_safe(req, n, &complete_in_progress, + complete_in_progress) { + req->req->complete(req->req, -EINPROGRESS); + } } static void artpec6_crypto_complete_crypto(struct crypto_async_request *req) @@ -2170,27 +2187,29 @@ static void artpec6_crypto_complete_aead(struct crypto_async_request *req) /* Verify GCM hashtag. */ struct aead_request *areq = container_of(req, struct aead_request, base); + struct crypto_aead *aead = crypto_aead_reqtfm(areq); struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq); if (req_ctx->decrypt) { u8 input_tag[AES_BLOCK_SIZE]; + unsigned int authsize = crypto_aead_authsize(aead); sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), input_tag, - AES_BLOCK_SIZE, + authsize, areq->assoclen + areq->cryptlen - - AES_BLOCK_SIZE); + authsize); - if (memcmp(req_ctx->decryption_tag, - input_tag, - AES_BLOCK_SIZE)) { + if (crypto_memneq(req_ctx->decryption_tag, + input_tag, + authsize)) { pr_debug("***EBADMSG:\n"); print_hex_dump_debug("ref:", DUMP_PREFIX_ADDRESS, 32, 1, - input_tag, AES_BLOCK_SIZE, true); + input_tag, authsize, true); print_hex_dump_debug("out:", DUMP_PREFIX_ADDRESS, 32, 1, req_ctx->decryption_tag, - AES_BLOCK_SIZE, true); + authsize, true); result = -EBADMSG; } @@ -2266,13 +2285,6 @@ artpec6_crypto_init_hash(struct ahash_request *req, u8 type, int hmac) case ARTPEC6_CRYPTO_HASH_SHA256: oper = hmac ? regk_crypto_hmac_sha256 : regk_crypto_sha256; break; - case ARTPEC6_CRYPTO_HASH_SHA384: - oper = hmac ? regk_crypto_hmac_sha384 : regk_crypto_sha384; - break; - case ARTPEC6_CRYPTO_HASH_SHA512: - oper = hmac ? regk_crypto_hmac_sha512 : regk_crypto_sha512; - break; - default: pr_err("%s: Unsupported hash type 0x%x\n", MODULE_NAME, type); return -EINVAL; @@ -2368,53 +2380,11 @@ static int artpec6_crypto_sha256_digest(struct ahash_request *req) return artpec6_crypto_prepare_submit_hash(req); } -static int __maybe_unused artpec6_crypto_sha384_init(struct ahash_request *req) -{ - return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 0); -} - -static int __maybe_unused -artpec6_crypto_sha384_digest(struct ahash_request *req) -{ - struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); - - artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 0); - req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE; - - return artpec6_crypto_prepare_submit_hash(req); -} - -static int artpec6_crypto_sha512_init(struct ahash_request *req) -{ - return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 0); -} - -static int artpec6_crypto_sha512_digest(struct ahash_request *req) -{ - struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); - - artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 0); - req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE; - - return artpec6_crypto_prepare_submit_hash(req); -} - static int artpec6_crypto_hmac_sha256_init(struct ahash_request *req) { return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1); } -static int __maybe_unused -artpec6_crypto_hmac_sha384_init(struct ahash_request *req) -{ - return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 1); -} - -static int artpec6_crypto_hmac_sha512_init(struct ahash_request *req) -{ - return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 1); -} - static int artpec6_crypto_hmac_sha256_digest(struct ahash_request *req) { struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); @@ -2425,27 +2395,6 @@ static int artpec6_crypto_hmac_sha256_digest(struct ahash_request *req) return artpec6_crypto_prepare_submit_hash(req); } -static int __maybe_unused -artpec6_crypto_hmac_sha384_digest(struct ahash_request *req) -{ - struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); - - artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 1); - req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE; - - return artpec6_crypto_prepare_submit_hash(req); -} - -static int artpec6_crypto_hmac_sha512_digest(struct ahash_request *req) -{ - struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); - - artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 1); - req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE; - - return artpec6_crypto_prepare_submit_hash(req); -} - static int artpec6_crypto_ahash_init_common(struct crypto_tfm *tfm, const char *base_hash_name) { @@ -2480,17 +2429,6 @@ static int artpec6_crypto_ahash_init_hmac_sha256(struct crypto_tfm *tfm) return artpec6_crypto_ahash_init_common(tfm, "sha256"); } -static int __maybe_unused -artpec6_crypto_ahash_init_hmac_sha384(struct crypto_tfm *tfm) -{ - return artpec6_crypto_ahash_init_common(tfm, "sha384"); -} - -static int artpec6_crypto_ahash_init_hmac_sha512(struct crypto_tfm *tfm) -{ - return artpec6_crypto_ahash_init_common(tfm, "sha512"); -} - static void artpec6_crypto_ahash_exit(struct crypto_tfm *tfm) { struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm); @@ -2761,103 +2699,6 @@ static struct ahash_alg hash_algos[] = { }, }; -static struct ahash_alg artpec7_hash_algos[] = { - /* SHA-384 */ - { - .init = artpec6_crypto_sha384_init, - .update = artpec6_crypto_hash_update, - .final = artpec6_crypto_hash_final, - .digest = artpec6_crypto_sha384_digest, - .import = artpec6_crypto_hash_import, - .export = artpec6_crypto_hash_export, - .halg.digestsize = SHA384_DIGEST_SIZE, - .halg.statesize = sizeof(struct artpec6_hash_export_state), - .halg.base = { - .cra_name = "sha384", - .cra_driver_name = "artpec-sha384", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = SHA384_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct artpec6_hashalg_context), - .cra_alignmask = 3, - .cra_module = THIS_MODULE, - .cra_init = artpec6_crypto_ahash_init, - .cra_exit = artpec6_crypto_ahash_exit, - } - }, - /* HMAC SHA-384 */ - { - .init = artpec6_crypto_hmac_sha384_init, - .update = artpec6_crypto_hash_update, - .final = artpec6_crypto_hash_final, - .digest = artpec6_crypto_hmac_sha384_digest, - .import = artpec6_crypto_hash_import, - .export = artpec6_crypto_hash_export, - .setkey = artpec6_crypto_hash_set_key, - .halg.digestsize = SHA384_DIGEST_SIZE, - .halg.statesize = sizeof(struct artpec6_hash_export_state), - .halg.base = { - .cra_name = "hmac(sha384)", - .cra_driver_name = "artpec-hmac-sha384", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = SHA384_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct artpec6_hashalg_context), - .cra_alignmask = 3, - .cra_module = THIS_MODULE, - .cra_init = artpec6_crypto_ahash_init_hmac_sha384, - .cra_exit = artpec6_crypto_ahash_exit, - } - }, - /* SHA-512 */ - { - .init = artpec6_crypto_sha512_init, - .update = artpec6_crypto_hash_update, - .final = artpec6_crypto_hash_final, - .digest = artpec6_crypto_sha512_digest, - .import = artpec6_crypto_hash_import, - .export = artpec6_crypto_hash_export, - .halg.digestsize = SHA512_DIGEST_SIZE, - .halg.statesize = sizeof(struct artpec6_hash_export_state), - .halg.base = { - .cra_name = "sha512", - .cra_driver_name = "artpec-sha512", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = SHA512_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct artpec6_hashalg_context), - .cra_alignmask = 3, - .cra_module = THIS_MODULE, - .cra_init = artpec6_crypto_ahash_init, - .cra_exit = artpec6_crypto_ahash_exit, - } - }, - /* HMAC SHA-512 */ - { - .init = artpec6_crypto_hmac_sha512_init, - .update = artpec6_crypto_hash_update, - .final = artpec6_crypto_hash_final, - .digest = artpec6_crypto_hmac_sha512_digest, - .import = artpec6_crypto_hash_import, - .export = artpec6_crypto_hash_export, - .setkey = artpec6_crypto_hash_set_key, - .halg.digestsize = SHA512_DIGEST_SIZE, - .halg.statesize = sizeof(struct artpec6_hash_export_state), - .halg.base = { - .cra_name = "hmac(sha512)", - .cra_driver_name = "artpec-hmac-sha512", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = SHA512_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct artpec6_hashalg_context), - .cra_alignmask = 3, - .cra_module = THIS_MODULE, - .cra_init = artpec6_crypto_ahash_init_hmac_sha512, - .cra_exit = artpec6_crypto_ahash_exit, - } - }, -}; - /* Crypto */ static struct skcipher_alg crypto_algos[] = { /* AES - ECB */ @@ -2984,12 +2825,6 @@ static void artpec6_crypto_init_debugfs(void) { dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL); - if (!dbgfs_root || IS_ERR(dbgfs_root)) { - dbgfs_root = NULL; - pr_err("%s: Could not initialise debugfs!\n", MODULE_NAME); - return; - } - #ifdef CONFIG_FAULT_INJECTION fault_create_debugfs_attr("fail_status_read", dbgfs_root, &artpec6_crypto_fail_status_read); @@ -3001,9 +2836,6 @@ static void artpec6_crypto_init_debugfs(void) static void artpec6_crypto_free_debugfs(void) { - if (!dbgfs_root) - return; - debugfs_remove_recursive(dbgfs_root); dbgfs_root = NULL; } @@ -3104,19 +2936,10 @@ static int artpec6_crypto_probe(struct platform_device *pdev) goto disable_hw; } - if (variant != ARTPEC6_CRYPTO) { - err = crypto_register_ahashes(artpec7_hash_algos, - ARRAY_SIZE(artpec7_hash_algos)); - if (err) { - dev_err(dev, "Failed to register ahashes\n"); - goto unregister_ahashes; - } - } - err = crypto_register_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos)); if (err) { dev_err(dev, "Failed to register ciphers\n"); - goto unregister_a7_ahashes; + goto unregister_ahashes; } err = crypto_register_aeads(aead_algos, ARRAY_SIZE(aead_algos)); @@ -3129,10 +2952,6 @@ static int artpec6_crypto_probe(struct platform_device *pdev) unregister_algs: crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos)); -unregister_a7_ahashes: - if (variant != ARTPEC6_CRYPTO) - crypto_unregister_ahashes(artpec7_hash_algos, - ARRAY_SIZE(artpec7_hash_algos)); unregister_ahashes: crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos)); disable_hw: @@ -3148,9 +2967,6 @@ static int artpec6_crypto_remove(struct platform_device *pdev) int irq = platform_get_irq(pdev, 0); crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos)); - if (ac->variant != ARTPEC6_CRYPTO) - crypto_unregister_ahashes(artpec7_hash_algos, - ARRAY_SIZE(artpec7_hash_algos)); crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos)); crypto_unregister_aeads(aead_algos, ARRAY_SIZE(aead_algos)); diff --git a/drivers/crypto/bcm/Makefile b/drivers/crypto/bcm/Makefile index 13cb80eb2665..7469e19afe85 100644 --- a/drivers/crypto/bcm/Makefile +++ b/drivers/crypto/bcm/Makefile @@ -11,5 +11,3 @@ obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) := bcm_crypto_spu.o bcm_crypto_spu-objs := util.o spu.o spu2.o cipher.o - -ccflags-y += -I. -DBCMDRIVER diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index 5567cbda2798..28f592f7e1b7 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -717,7 +717,7 @@ static int handle_ahash_req(struct iproc_reqctx_s *rctx) */ unsigned int new_data_len; - unsigned int chunk_start = 0; + unsigned int __maybe_unused chunk_start = 0; u32 db_size; /* Length of data field, incl gcm and hash padding */ int pad_len = 0; /* total pad len, including gcm, hash, stat padding */ u32 data_pad_len = 0; /* length of GCM/CCM padding */ @@ -1675,8 +1675,6 @@ static void spu_rx_callback(struct mbox_client *cl, void *msg) struct spu_hw *spu = &iproc_priv.spu; struct brcm_message *mssg = msg; struct iproc_reqctx_s *rctx; - struct iproc_ctx_s *ctx; - struct crypto_async_request *areq; int err = 0; rctx = mssg->ctx; @@ -1686,8 +1684,6 @@ static void spu_rx_callback(struct mbox_client *cl, void *msg) err = -EFAULT; goto cb_finish; } - areq = rctx->parent; - ctx = rctx->ctx; /* process the SPU status */ err = spu->spu_status_process(rctx->msg_buf.rx_stat); @@ -1822,7 +1818,7 @@ static int des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, if (keylen == DES_KEY_SIZE) { if (des_ekey(tmp, key) == 0) { if (crypto_ablkcipher_get_flags(cipher) & - CRYPTO_TFM_REQ_WEAK_KEY) { + CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) { u32 flags = CRYPTO_TFM_RES_WEAK_KEY; crypto_ablkcipher_set_flags(cipher, flags); @@ -2876,7 +2872,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, if (des_ekey(tmp, keys.enckey) == 0) { if (crypto_aead_get_flags(cipher) & - CRYPTO_TFM_REQ_WEAK_KEY) { + CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) { crypto_aead_set_flags(cipher, flags); return -EINVAL; } diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h index 763c425c41ca..f6da49758954 100644 --- a/drivers/crypto/bcm/cipher.h +++ b/drivers/crypto/bcm/cipher.h @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -34,9 +35,6 @@ /* Driver supports up to MAX_SPUS SPU blocks */ #define MAX_SPUS 16 -#define ARC4_MIN_KEY_SIZE 1 -#define ARC4_MAX_KEY_SIZE 256 -#define ARC4_BLOCK_SIZE 1 #define ARC4_STATE_SIZE 4 #define CCM_AES_IV_SIZE 16 diff --git a/drivers/crypto/bcm/util.c b/drivers/crypto/bcm/util.c index a912c6ad3e85..d8cda5fb75ad 100644 --- a/drivers/crypto/bcm/util.c +++ b/drivers/crypto/bcm/util.c @@ -201,46 +201,6 @@ struct sdesc { char ctx[]; }; -/* do a synchronous decrypt operation */ -int do_decrypt(char *alg_name, - void *key_ptr, unsigned int key_len, - void *iv_ptr, void *src_ptr, void *dst_ptr, - unsigned int block_len) -{ - struct scatterlist sg_in[1], sg_out[1]; - struct crypto_blkcipher *tfm = - crypto_alloc_blkcipher(alg_name, 0, CRYPTO_ALG_ASYNC); - struct blkcipher_desc desc = {.tfm = tfm, .flags = 0 }; - int ret = 0; - void *iv; - int ivsize; - - flow_log("%s() name:%s block_len:%u\n", __func__, alg_name, block_len); - - if (IS_ERR(tfm)) - return PTR_ERR(tfm); - - crypto_blkcipher_setkey((void *)tfm, key_ptr, key_len); - - sg_init_table(sg_in, 1); - sg_set_buf(sg_in, src_ptr, block_len); - - sg_init_table(sg_out, 1); - sg_set_buf(sg_out, dst_ptr, block_len); - - iv = crypto_blkcipher_crt(tfm)->iv; - ivsize = crypto_blkcipher_ivsize(tfm); - memcpy(iv, iv_ptr, ivsize); - - ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, block_len); - crypto_free_blkcipher(tfm); - - if (ret < 0) - pr_err("aes_decrypt failed %d\n", ret); - - return ret; -} - /** * do_shash() - Do a synchronous hash operation in software * @name: The name of the hash algorithm diff --git a/drivers/crypto/bcm/util.h b/drivers/crypto/bcm/util.h index 712e029795f8..15c60356518a 100644 --- a/drivers/crypto/bcm/util.h +++ b/drivers/crypto/bcm/util.h @@ -95,12 +95,6 @@ u32 spu_msg_sg_add(struct scatterlist **to_sg, void add_to_ctr(u8 *ctr_pos, unsigned int increment); -/* do a synchronous decrypt operation */ -int do_decrypt(char *alg_name, - void *key_ptr, unsigned int key_len, - void *iv_ptr, void *src_ptr, void *dst_ptr, - unsigned int block_len); - /* produce a message digest from data of length n bytes */ int do_shash(unsigned char *name, unsigned char *result, const u8 *data1, unsigned int data1_len, diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig index c4b1cade55c1..577c9844b322 100644 --- a/drivers/crypto/caam/Kconfig +++ b/drivers/crypto/caam/Kconfig @@ -91,6 +91,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API select CRYPTO_AEAD select CRYPTO_AUTHENC select CRYPTO_BLKCIPHER + select CRYPTO_DES help Selecting this will offload crypto for users of the scatterlist crypto API (such as the linux native IPSec diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 80ae69f906fb..579578498deb 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -3,7 +3,7 @@ * caam - Freescale FSL CAAM support for crypto API * * Copyright 2008-2011 Freescale Semiconductor, Inc. - * Copyright 2016-2018 NXP + * Copyright 2016-2019 NXP * * Based on talitos crypto API driver. * @@ -766,6 +766,27 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, return 0; } +static int des_skcipher_setkey(struct crypto_skcipher *skcipher, + const u8 *key, unsigned int keylen) +{ + u32 tmp[DES3_EDE_EXPKEY_WORDS]; + struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); + + if (keylen == DES3_EDE_KEY_SIZE && + __des3_ede_setkey(tmp, &tfm->crt_flags, key, DES3_EDE_KEY_SIZE)) { + return -EINVAL; + } + + if (!des_ekey(tmp, key) && (crypto_skcipher_get_flags(skcipher) & + CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { + crypto_skcipher_set_flags(skcipher, + CRYPTO_TFM_RES_WEAK_KEY); + return -EINVAL; + } + + return skcipher_setkey(skcipher, key, keylen); +} + static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { @@ -802,6 +823,8 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, * aead_edesc - s/w-extended aead descriptor * @src_nents: number of segments in input s/w scatterlist * @dst_nents: number of segments in output s/w scatterlist + * @mapped_src_nents: number of segments in input h/w link table + * @mapped_dst_nents: number of segments in output h/w link table * @sec4_sg_bytes: length of dma mapped sec4_sg space * @sec4_sg_dma: bus physical mapped address of h/w link table * @sec4_sg: pointer to h/w link table @@ -810,6 +833,8 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, struct aead_edesc { int src_nents; int dst_nents; + int mapped_src_nents; + int mapped_dst_nents; int sec4_sg_bytes; dma_addr_t sec4_sg_dma; struct sec4_sg_entry *sec4_sg; @@ -820,6 +845,8 @@ struct aead_edesc { * skcipher_edesc - s/w-extended skcipher descriptor * @src_nents: number of segments in input s/w scatterlist * @dst_nents: number of segments in output s/w scatterlist + * @mapped_src_nents: number of segments in input h/w link table + * @mapped_dst_nents: number of segments in output h/w link table * @iv_dma: dma address of iv for checking continuity and link table * @sec4_sg_bytes: length of dma mapped sec4_sg space * @sec4_sg_dma: bus physical mapped address of h/w link table @@ -830,6 +857,8 @@ struct aead_edesc { struct skcipher_edesc { int src_nents; int dst_nents; + int mapped_src_nents; + int mapped_dst_nents; dma_addr_t iv_dma; int sec4_sg_bytes; dma_addr_t sec4_sg_dma; @@ -846,7 +875,8 @@ static void caam_unmap(struct device *dev, struct scatterlist *src, if (dst != src) { if (src_nents) dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); - dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); + if (dst_nents) + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); } else { dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); } @@ -961,8 +991,9 @@ static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, * The crypto API expects us to set the IV (req->iv) to the last * ciphertext block. This is used e.g. by the CTS mode. */ - scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize, - ivsize, 0); + if (ivsize) + scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - + ivsize, ivsize, 0); kfree(edesc); @@ -1023,11 +1054,12 @@ static void init_aead_job(struct aead_request *req, init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); if (all_contig) { - src_dma = edesc->src_nents ? sg_dma_address(req->src) : 0; + src_dma = edesc->mapped_src_nents ? sg_dma_address(req->src) : + 0; in_options = 0; } else { src_dma = edesc->sec4_sg_dma; - sec4_sg_index += edesc->src_nents; + sec4_sg_index += edesc->mapped_src_nents; in_options = LDST_SGF; } @@ -1038,8 +1070,11 @@ static void init_aead_job(struct aead_request *req, out_options = in_options; if (unlikely(req->src != req->dst)) { - if (edesc->dst_nents == 1) { + if (!edesc->mapped_dst_nents) { + dst_dma = 0; + } else if (edesc->mapped_dst_nents == 1) { dst_dma = sg_dma_address(req->dst); + out_options = 0; } else { dst_dma = edesc->sec4_sg_dma + sec4_sg_index * @@ -1183,9 +1218,9 @@ static void init_skcipher_job(struct skcipher_request *req, int ivsize = crypto_skcipher_ivsize(skcipher); u32 *desc = edesc->hw_desc; u32 *sh_desc; - u32 out_options = 0; - dma_addr_t dst_dma, ptr; - int len; + u32 in_options = 0, out_options = 0; + dma_addr_t src_dma, dst_dma, ptr; + int len, sec4_sg_index = 0; #ifdef DEBUG print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ", @@ -1203,21 +1238,27 @@ static void init_skcipher_job(struct skcipher_request *req, len = desc_len(sh_desc); init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); - append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->cryptlen + ivsize, - LDST_SGF); + if (ivsize || edesc->mapped_src_nents > 1) { + src_dma = edesc->sec4_sg_dma; + sec4_sg_index = edesc->mapped_src_nents + !!ivsize; + in_options = LDST_SGF; + } else { + src_dma = sg_dma_address(req->src); + } + + append_seq_in_ptr(desc, src_dma, req->cryptlen + ivsize, in_options); if (likely(req->src == req->dst)) { - dst_dma = edesc->sec4_sg_dma + sizeof(struct sec4_sg_entry); - out_options = LDST_SGF; + dst_dma = src_dma + !!ivsize * sizeof(struct sec4_sg_entry); + out_options = in_options; + } else if (edesc->mapped_dst_nents == 1) { + dst_dma = sg_dma_address(req->dst); } else { - if (edesc->dst_nents == 1) { - dst_dma = sg_dma_address(req->dst); - } else { - dst_dma = edesc->sec4_sg_dma + (edesc->src_nents + 1) * - sizeof(struct sec4_sg_entry); - out_options = LDST_SGF; - } + dst_dma = edesc->sec4_sg_dma + sec4_sg_index * + sizeof(struct sec4_sg_entry); + out_options = LDST_SGF; } + append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options); } @@ -1289,12 +1330,19 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, mapped_src_nents = 0; } - mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents, - DMA_FROM_DEVICE); - if (unlikely(!mapped_dst_nents)) { - dev_err(jrdev, "unable to map destination\n"); - dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); - return ERR_PTR(-ENOMEM); + /* Cover also the case of null (zero length) output data */ + if (dst_nents) { + mapped_dst_nents = dma_map_sg(jrdev, req->dst, + dst_nents, + DMA_FROM_DEVICE); + if (unlikely(!mapped_dst_nents)) { + dev_err(jrdev, "unable to map destination\n"); + dma_unmap_sg(jrdev, req->src, src_nents, + DMA_TO_DEVICE); + return ERR_PTR(-ENOMEM); + } + } else { + mapped_dst_nents = 0; } } @@ -1313,6 +1361,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, edesc->src_nents = src_nents; edesc->dst_nents = dst_nents; + edesc->mapped_src_nents = mapped_src_nents; + edesc->mapped_dst_nents = mapped_dst_nents; edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + desc_bytes; *all_contig_ptr = !(mapped_src_nents > 1); @@ -1586,7 +1636,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, GFP_KERNEL : GFP_ATOMIC; int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; struct skcipher_edesc *edesc; - dma_addr_t iv_dma; + dma_addr_t iv_dma = 0; u8 *iv; int ivsize = crypto_skcipher_ivsize(skcipher); int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes; @@ -1621,7 +1671,6 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, dev_err(jrdev, "unable to map source\n"); return ERR_PTR(-ENOMEM); } - mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents, DMA_FROM_DEVICE); if (unlikely(!mapped_dst_nents)) { @@ -1631,7 +1680,10 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, } } - sec4_sg_ents = 1 + mapped_src_nents; + if (!ivsize && mapped_src_nents == 1) + sec4_sg_ents = 0; // no need for an input hw s/g table + else + sec4_sg_ents = mapped_src_nents + !!ivsize; dst_sg_idx = sec4_sg_ents; sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0; sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry); @@ -1650,39 +1702,48 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, edesc->src_nents = src_nents; edesc->dst_nents = dst_nents; + edesc->mapped_src_nents = mapped_src_nents; + edesc->mapped_dst_nents = mapped_dst_nents; edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc + desc_bytes); /* Make sure IV is located in a DMAable area */ - iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes; - memcpy(iv, req->iv, ivsize); + if (ivsize) { + iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes; + memcpy(iv, req->iv, ivsize); + + iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, iv_dma)) { + dev_err(jrdev, "unable to map IV\n"); + caam_unmap(jrdev, req->src, req->dst, src_nents, + dst_nents, 0, 0, 0, 0); + kfree(edesc); + return ERR_PTR(-ENOMEM); + } - iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, iv_dma)) { - dev_err(jrdev, "unable to map IV\n"); - caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, - 0, 0, 0); - kfree(edesc); - return ERR_PTR(-ENOMEM); + dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); } - - dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); - sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg + 1, 0); + if (dst_sg_idx) + sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg + + !!ivsize, 0); if (mapped_dst_nents > 1) { sg_to_sec4_sg_last(req->dst, mapped_dst_nents, edesc->sec4_sg + dst_sg_idx, 0); } - edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, - sec4_sg_bytes, DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { - dev_err(jrdev, "unable to map S/G table\n"); - caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, - iv_dma, ivsize, 0, 0); - kfree(edesc); - return ERR_PTR(-ENOMEM); + if (sec4_sg_bytes) { + edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, + sec4_sg_bytes, + DMA_TO_DEVICE); + if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { + dev_err(jrdev, "unable to map S/G table\n"); + caam_unmap(jrdev, req->src, req->dst, src_nents, + dst_nents, iv_dma, ivsize, 0, 0); + kfree(edesc); + return ERR_PTR(-ENOMEM); + } } edesc->iv_dma = iv_dma; @@ -1749,8 +1810,9 @@ static int skcipher_decrypt(struct skcipher_request *req) * The crypto API expects us to set the IV (req->iv) to the last * ciphertext block. */ - scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize, - ivsize, 0); + if (ivsize) + scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - + ivsize, ivsize, 0); /* Create and submit job descriptor*/ init_skcipher_job(req, edesc, false); @@ -1796,7 +1858,7 @@ static struct caam_skcipher_alg driver_algs[] = { .cra_driver_name = "cbc-3des-caam", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, - .setkey = skcipher_setkey, + .setkey = des_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = DES3_EDE_KEY_SIZE, @@ -1812,7 +1874,7 @@ static struct caam_skcipher_alg driver_algs[] = { .cra_driver_name = "cbc-des-caam", .cra_blocksize = DES_BLOCK_SIZE, }, - .setkey = skcipher_setkey, + .setkey = des_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = DES_KEY_SIZE, @@ -1878,6 +1940,66 @@ static struct caam_skcipher_alg driver_algs[] = { }, .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS, }, + { + .skcipher = { + .base = { + .cra_name = "ecb(des)", + .cra_driver_name = "ecb-des-caam", + .cra_blocksize = DES_BLOCK_SIZE, + }, + .setkey = des_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_ECB, + }, + { + .skcipher = { + .base = { + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb-aes-caam", + .cra_blocksize = AES_BLOCK_SIZE, + }, + .setkey = skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_ECB, + }, + { + .skcipher = { + .base = { + .cra_name = "ecb(des3_ede)", + .cra_driver_name = "ecb-des3-caam", + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + }, + .setkey = des_skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_ECB, + }, + { + .skcipher = { + .base = { + .cra_name = "ecb(arc4)", + .cra_driver_name = "ecb-arc4-caam", + .cra_blocksize = ARC4_BLOCK_SIZE, + }, + .setkey = skcipher_setkey, + .encrypt = skcipher_encrypt, + .decrypt = skcipher_decrypt, + .min_keysize = ARC4_MIN_KEY_SIZE, + .max_keysize = ARC4_MAX_KEY_SIZE, + }, + .caam.class1_alg_type = OP_ALG_ALGSEL_ARC4 | OP_ALG_AAI_ECB, + }, }; static struct caam_aead_alg driver_aeads[] = { @@ -3333,10 +3455,10 @@ static int __init caam_algapi_init(void) { struct device_node *dev_node; struct platform_device *pdev; - struct device *ctrldev; struct caam_drv_private *priv; int i = 0, err = 0; u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst; + u32 arc4_inst; unsigned int md_limit = SHA512_DIGEST_SIZE; bool registered = false; @@ -3353,16 +3475,17 @@ static int __init caam_algapi_init(void) return -ENODEV; } - ctrldev = &pdev->dev; - priv = dev_get_drvdata(ctrldev); + priv = dev_get_drvdata(&pdev->dev); of_node_put(dev_node); /* * If priv is NULL, it's probably because the caam driver wasn't * properly initialized (e.g. RNG4 init failed). Thus, bail out here. */ - if (!priv) - return -ENODEV; + if (!priv) { + err = -ENODEV; + goto out_put_dev; + } /* @@ -3381,6 +3504,8 @@ static int __init caam_algapi_init(void) CHA_ID_LS_DES_SHIFT; aes_inst = cha_inst & CHA_ID_LS_AES_MASK; md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; + arc4_inst = (cha_inst & CHA_ID_LS_ARC4_MASK) >> + CHA_ID_LS_ARC4_SHIFT; ccha_inst = 0; ptha_inst = 0; } else { @@ -3397,6 +3522,7 @@ static int __init caam_algapi_init(void) md_inst = mdha & CHA_VER_NUM_MASK; ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK; ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK; + arc4_inst = rd_reg32(&priv->ctrl->vreg.afha) & CHA_VER_NUM_MASK; } /* If MD is present, limit digest size based on LP256 */ @@ -3417,6 +3543,10 @@ static int __init caam_algapi_init(void) if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES)) continue; + /* Skip ARC4 algorithms if not supported by device */ + if (!arc4_inst && alg_sel == OP_ALG_ALGSEL_ARC4) + continue; + /* * Check support for AES modes not available * on LP devices. @@ -3496,6 +3626,8 @@ static int __init caam_algapi_init(void) if (registered) pr_info("caam algorithms registered in /proc/crypto\n"); +out_put_dev: + put_device(&pdev->dev); return err; } diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c index 7db1640d3577..1e1a376edc2f 100644 --- a/drivers/crypto/caam/caamalg_desc.c +++ b/drivers/crypto/caam/caamalg_desc.c @@ -2,7 +2,7 @@ /* * Shared descriptors for aead, skcipher algorithms * - * Copyright 2016-2018 NXP + * Copyright 2016-2019 NXP */ #include "compat.h" @@ -1396,9 +1396,11 @@ void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata, set_jump_tgt_here(desc, key_jump_cmd); - /* Load iv */ - append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT | - LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT)); + /* Load IV, if there is one */ + if (ivsize) + append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT | + LDST_CLASS_1_CCB | (ctx1_iv_off << + LDST_OFFSET_SHIFT)); /* Load counter into CONTEXT1 reg */ if (is_rfc3686) @@ -1462,9 +1464,11 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata, set_jump_tgt_here(desc, key_jump_cmd); - /* load IV */ - append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT | - LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT)); + /* Load IV, if there is one */ + if (ivsize) + append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT | + LDST_CLASS_1_CCB | (ctx1_iv_off << + LDST_OFFSET_SHIFT)); /* Load counter into CONTEXT1 reg */ if (is_rfc3686) diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c index c0d55310aade..c61921d32489 100644 --- a/drivers/crypto/caam/caamalg_qi.c +++ b/drivers/crypto/caam/caamalg_qi.c @@ -782,7 +782,7 @@ static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx, cpu = smp_processor_id(); drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc); - if (likely(!IS_ERR_OR_NULL(drv_ctx))) + if (!IS_ERR_OR_NULL(drv_ctx)) drv_ctx->op_type = type; ctx->drv_ctx[type] = drv_ctx; @@ -802,7 +802,8 @@ static void caam_unmap(struct device *dev, struct scatterlist *src, if (dst != src) { if (src_nents) dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); - dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); + if (dst_nents) + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); } else { dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); } @@ -892,7 +893,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, struct caam_drv_ctx *drv_ctx; drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT); - if (unlikely(IS_ERR_OR_NULL(drv_ctx))) + if (IS_ERR_OR_NULL(drv_ctx)) return (struct aead_edesc *)drv_ctx; /* allocate space for base edesc and hw desc commands, link tables */ @@ -955,13 +956,19 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, mapped_src_nents = 0; } - mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents, - DMA_FROM_DEVICE); - if (unlikely(!mapped_dst_nents)) { - dev_err(qidev, "unable to map destination\n"); - dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE); - qi_cache_free(edesc); - return ERR_PTR(-ENOMEM); + if (dst_nents) { + mapped_dst_nents = dma_map_sg(qidev, req->dst, + dst_nents, + DMA_FROM_DEVICE); + if (unlikely(!mapped_dst_nents)) { + dev_err(qidev, "unable to map destination\n"); + dma_unmap_sg(qidev, req->src, src_nents, + DMA_TO_DEVICE); + qi_cache_free(edesc); + return ERR_PTR(-ENOMEM); + } + } else { + mapped_dst_nents = 0; } } @@ -1184,7 +1191,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, struct caam_drv_ctx *drv_ctx; drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT); - if (unlikely(IS_ERR_OR_NULL(drv_ctx))) + if (IS_ERR_OR_NULL(drv_ctx)) return (struct skcipher_edesc *)drv_ctx; src_nents = sg_nents_for_len(req->src, req->cryptlen); @@ -2485,12 +2492,15 @@ static int __init caam_qi_algapi_init(void) * If priv is NULL, it's probably because the caam driver wasn't * properly initialized (e.g. RNG4 init failed). Thus, bail out here. */ - if (!priv || !priv->qi_present) - return -ENODEV; + if (!priv || !priv->qi_present) { + err = -ENODEV; + goto out_put_dev; + } if (caam_dpaa2) { dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n"); - return -ENODEV; + err = -ENODEV; + goto out_put_dev; } /* @@ -2603,6 +2613,8 @@ static int __init caam_qi_algapi_init(void) if (registered) dev_info(priv->qidev, "algorithms registered in /proc/crypto\n"); +out_put_dev: + put_device(ctrldev); return err; } diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index 425d5d974613..c2c1abc68f81 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -25,13 +25,6 @@ #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \ SHA512_DIGEST_SIZE * 2) -#if !IS_ENABLED(CONFIG_CRYPTO_DEV_FSL_CAAM) -bool caam_little_end; -EXPORT_SYMBOL(caam_little_end); -bool caam_imx; -EXPORT_SYMBOL(caam_imx); -#endif - /* * This is a a cache of buffers, from which the users of CAAM QI driver * can allocate short buffers. It's speedier than doing kmalloc on the hotpath. @@ -151,7 +144,8 @@ static void caam_unmap(struct device *dev, struct scatterlist *src, if (dst != src) { if (src_nents) dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); - dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); + if (dst_nents) + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); } else { dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); } @@ -392,13 +386,18 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, mapped_src_nents = 0; } - mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents, - DMA_FROM_DEVICE); - if (unlikely(!mapped_dst_nents)) { - dev_err(dev, "unable to map destination\n"); - dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE); - qi_cache_free(edesc); - return ERR_PTR(-ENOMEM); + if (dst_nents) { + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents, + DMA_FROM_DEVICE); + if (unlikely(!mapped_dst_nents)) { + dev_err(dev, "unable to map destination\n"); + dma_unmap_sg(dev, req->src, src_nents, + DMA_TO_DEVICE); + qi_cache_free(edesc); + return ERR_PTR(-ENOMEM); + } + } else { + mapped_dst_nents = 0; } } else { src_nents = sg_nents_for_len(req->src, req->assoclen + @@ -4503,7 +4502,8 @@ static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv) nctx->cb = dpaa2_caam_fqdan_cb; /* Register notification callbacks */ - err = dpaa2_io_service_register(NULL, nctx); + ppriv->dpio = dpaa2_io_service_select(cpu); + err = dpaa2_io_service_register(ppriv->dpio, nctx, dev); if (unlikely(err)) { dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu); nctx->cb = NULL; @@ -4536,7 +4536,7 @@ err: ppriv = per_cpu_ptr(priv->ppriv, cpu); if (!ppriv->nctx.cb) break; - dpaa2_io_service_deregister(NULL, &ppriv->nctx); + dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev); } for_each_online_cpu(cpu) { @@ -4556,7 +4556,8 @@ static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv) for_each_online_cpu(cpu) { ppriv = per_cpu_ptr(priv->ppriv, cpu); - dpaa2_io_service_deregister(NULL, &ppriv->nctx); + dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, + priv->dev); dpaa2_io_store_destroy(ppriv->store); if (++i == priv->num_pairs) @@ -4654,7 +4655,7 @@ static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv) /* Retry while portal is busy */ do { - err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid, + err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid, ppriv->store); } while (err == -EBUSY); @@ -4722,7 +4723,7 @@ static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget) if (cleaned < budget) { napi_complete_done(napi, cleaned); - err = dpaa2_io_service_rearm(NULL, &ppriv->nctx); + err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx); if (unlikely(err)) dev_err(priv->dev, "Notification rearm failed: %d\n", err); @@ -4863,21 +4864,31 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev) i = 0; for_each_online_cpu(cpu) { - dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", i, - priv->rx_queue_attr[i].fqid, - priv->tx_queue_attr[i].fqid); + u8 j; + + j = i % priv->num_pairs; ppriv = per_cpu_ptr(priv->ppriv, cpu); - ppriv->req_fqid = priv->tx_queue_attr[i].fqid; - ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid; - ppriv->prio = i; + ppriv->req_fqid = priv->tx_queue_attr[j].fqid; + + /* + * Allow all cores to enqueue, while only some of them + * will take part in dequeuing. + */ + if (++i > priv->num_pairs) + continue; + + ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid; + ppriv->prio = j; + + dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", j, + priv->rx_queue_attr[j].fqid, + priv->tx_queue_attr[j].fqid); ppriv->net_dev.dev = *dev; INIT_LIST_HEAD(&ppriv->net_dev.napi_list); netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll, DPAA2_CAAM_NAPI_WEIGHT); - if (++i == priv->num_pairs) - break; } return 0; @@ -5229,7 +5240,8 @@ int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req) { struct dpaa2_fd fd; struct dpaa2_caam_priv *priv = dev_get_drvdata(dev); - int err = 0, i, id; + struct dpaa2_caam_priv_per_cpu *ppriv; + int err = 0, i; if (IS_ERR(req)) return PTR_ERR(req); @@ -5259,23 +5271,18 @@ int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req) dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1])); dpaa2_fd_set_flc(&fd, req->flc_dma); - /* - * There is no guarantee that preemption is disabled here, - * thus take action. - */ - preempt_disable(); - id = smp_processor_id() % priv->dpseci_attr.num_tx_queues; + ppriv = this_cpu_ptr(priv->ppriv); for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) { - err = dpaa2_io_service_enqueue_fq(NULL, - priv->tx_queue_attr[id].fqid, + err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid, &fd); if (err != -EBUSY) break; + + cpu_relax(); } - preempt_enable(); if (unlikely(err)) { - dev_err(dev, "Error enqueuing frame: %d\n", err); + dev_err_ratelimited(dev, "Error enqueuing frame: %d\n", err); goto err_out; } diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h index 9823bdefd029..20890780fb82 100644 --- a/drivers/crypto/caam/caamalg_qi2.h +++ b/drivers/crypto/caam/caamalg_qi2.h @@ -76,6 +76,7 @@ struct dpaa2_caam_priv { * @nctx: notification context of response FQ * @store: where dequeued frames are stored * @priv: backpointer to dpaa2_caam_priv + * @dpio: portal used for data path operations */ struct dpaa2_caam_priv_per_cpu { struct napi_struct napi; @@ -86,6 +87,7 @@ struct dpaa2_caam_priv_per_cpu { struct dpaa2_io_notification_ctx nctx; struct dpaa2_io_store *store; struct dpaa2_caam_priv *priv; + struct dpaa2_io *dpio; }; /* diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index bb1a2cdf1951..b1eadc6652b5 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -3,7 +3,7 @@ * caam - Freescale FSL CAAM support for ahash functions of crypto API * * Copyright 2011 Freescale Semiconductor, Inc. - * Copyright 2018 NXP + * Copyright 2018-2019 NXP * * Based on caamalg.c crypto API driver. * @@ -98,13 +98,14 @@ struct caam_hash_ctx { u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; + u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned; dma_addr_t sh_desc_update_dma ____cacheline_aligned; dma_addr_t sh_desc_update_first_dma; dma_addr_t sh_desc_fin_dma; dma_addr_t sh_desc_digest_dma; + dma_addr_t key_dma; enum dma_data_direction dir; struct device *jrdev; - u8 key[CAAM_MAX_HASH_KEY_SIZE]; int ctx_len; struct alginfo adata; }; @@ -113,6 +114,7 @@ struct caam_hash_ctx { struct caam_hash_state { dma_addr_t buf_dma; dma_addr_t ctx_dma; + int ctx_dma_len; u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; int buflen_0; u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; @@ -158,6 +160,11 @@ static inline int *alt_buflen(struct caam_hash_state *state) return state->current_buf ? &state->buflen_0 : &state->buflen_1; } +static inline bool is_cmac_aes(u32 algtype) +{ + return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) == + (OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC); +} /* Common job descriptor seq in/out ptr routines */ /* Map state->caam_ctx, and append seq_out_ptr command that points to it */ @@ -165,6 +172,7 @@ static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev, struct caam_hash_state *state, int ctx_len) { + state->ctx_dma_len = ctx_len; state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, DMA_FROM_DEVICE); if (dma_mapping_error(jrdev, state->ctx_dma)) { @@ -178,18 +186,6 @@ static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev, return 0; } -/* Map req->result, and append seq_out_ptr command that points to it */ -static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev, - u8 *result, int digestsize) -{ - dma_addr_t dst_dma; - - dst_dma = dma_map_single(jrdev, result, digestsize, DMA_FROM_DEVICE); - append_seq_out_ptr(desc, dst_dma, digestsize, 0); - - return dst_dma; -} - /* Map current buffer in state (if length > 0) and put it in link table */ static inline int buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg, @@ -218,6 +214,7 @@ static inline int ctx_map_to_sec4_sg(struct device *jrdev, struct caam_hash_state *state, int ctx_len, struct sec4_sg_entry *sec4_sg, u32 flag) { + state->ctx_dma_len = ctx_len; state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag); if (dma_mapping_error(jrdev, state->ctx_dma)) { dev_err(jrdev, "unable to map ctx\n"); @@ -292,14 +289,119 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash) return 0; } +static int axcbc_set_sh_desc(struct crypto_ahash *ahash) +{ + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + int digestsize = crypto_ahash_digestsize(ahash); + struct device *jrdev = ctx->jrdev; + u32 *desc; + + /* key is loaded from memory for UPDATE and FINALIZE states */ + ctx->adata.key_dma = ctx->key_dma; + + /* shared descriptor for ahash_update */ + desc = ctx->sh_desc_update; + cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE, + ctx->ctx_len, ctx->ctx_len, 0); + dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, + desc_bytes(desc), ctx->dir); + print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); + + /* shared descriptor for ahash_{final,finup} */ + desc = ctx->sh_desc_fin; + cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, + digestsize, ctx->ctx_len, 0); + dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, + desc_bytes(desc), ctx->dir); + print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); + + /* key is immediate data for INIT and INITFINAL states */ + ctx->adata.key_virt = ctx->key; + + /* shared descriptor for first invocation of ahash_update */ + desc = ctx->sh_desc_update_first; + cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, + ctx->ctx_len, ctx->key_dma); + dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, + desc_bytes(desc), ctx->dir); + print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)" : ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); + + /* shared descriptor for ahash_digest */ + desc = ctx->sh_desc_digest; + cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, + digestsize, ctx->ctx_len, 0); + dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, + desc_bytes(desc), ctx->dir); + print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), + 1); + return 0; +} + +static int acmac_set_sh_desc(struct crypto_ahash *ahash) +{ + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + int digestsize = crypto_ahash_digestsize(ahash); + struct device *jrdev = ctx->jrdev; + u32 *desc; + + /* shared descriptor for ahash_update */ + desc = ctx->sh_desc_update; + cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE, + ctx->ctx_len, ctx->ctx_len, 0); + dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, + desc_bytes(desc), ctx->dir); + print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); + + /* shared descriptor for ahash_{final,finup} */ + desc = ctx->sh_desc_fin; + cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, + digestsize, ctx->ctx_len, 0); + dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, + desc_bytes(desc), ctx->dir); + print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); + + /* shared descriptor for first invocation of ahash_update */ + desc = ctx->sh_desc_update_first; + cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, + ctx->ctx_len, 0); + dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, + desc_bytes(desc), ctx->dir); + print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)" : ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); + + /* shared descriptor for ahash_digest */ + desc = ctx->sh_desc_digest; + cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, + digestsize, ctx->ctx_len, 0); + dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, + desc_bytes(desc), ctx->dir); + print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ", + DUMP_PREFIX_ADDRESS, 16, 4, desc, + desc_bytes(desc), 1); + + return 0; +} + /* Digest hash size if it is too large */ -static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, - u32 *keylen, u8 *key_out, u32 digestsize) +static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key, + u32 digestsize) { struct device *jrdev = ctx->jrdev; u32 *desc; struct split_key_result result; - dma_addr_t src_dma, dst_dma; + dma_addr_t key_dma; int ret; desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); @@ -310,18 +412,9 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, init_job_desc(desc, 0); - src_dma = dma_map_single(jrdev, (void *)key_in, *keylen, - DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, src_dma)) { - dev_err(jrdev, "unable to map key input memory\n"); - kfree(desc); - return -ENOMEM; - } - dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize, - DMA_FROM_DEVICE); - if (dma_mapping_error(jrdev, dst_dma)) { - dev_err(jrdev, "unable to map key output memory\n"); - dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); + key_dma = dma_map_single(jrdev, key, *keylen, DMA_BIDIRECTIONAL); + if (dma_mapping_error(jrdev, key_dma)) { + dev_err(jrdev, "unable to map key memory\n"); kfree(desc); return -ENOMEM; } @@ -329,16 +422,16 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, /* Job descriptor to perform unkeyed hash on key_in */ append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT | OP_ALG_AS_INITFINAL); - append_seq_in_ptr(desc, src_dma, *keylen, 0); + append_seq_in_ptr(desc, key_dma, *keylen, 0); append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG); - append_seq_out_ptr(desc, dst_dma, digestsize, 0); + append_seq_out_ptr(desc, key_dma, digestsize, 0); append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_CONTEXT); #ifdef DEBUG print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1); + DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1); print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); #endif @@ -354,12 +447,10 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, #ifdef DEBUG print_hex_dump(KERN_ERR, "digested key@"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, key_in, - digestsize, 1); + DUMP_PREFIX_ADDRESS, 16, 4, key, digestsize, 1); #endif } - dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE); - dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE); + dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL); *keylen = digestsize; @@ -383,13 +474,10 @@ static int ahash_setkey(struct crypto_ahash *ahash, #endif if (keylen > blocksize) { - hashed_key = kmalloc_array(digestsize, - sizeof(*hashed_key), - GFP_KERNEL | GFP_DMA); + hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA); if (!hashed_key) return -ENOMEM; - ret = hash_digest_key(ctx, key, &keylen, hashed_key, - digestsize); + ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize); if (ret) goto bad_free_key; key = hashed_key; @@ -424,9 +512,39 @@ static int ahash_setkey(struct crypto_ahash *ahash, return -EINVAL; } +static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key, + unsigned int keylen) +{ + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct device *jrdev = ctx->jrdev; + + memcpy(ctx->key, key, keylen); + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); + ctx->adata.keylen = keylen; + + print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ", + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1); + + return axcbc_set_sh_desc(ahash); +} + +static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key, + unsigned int keylen) +{ + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); + + /* key is immediate data for all cmac shared descriptors */ + ctx->adata.key_virt = key; + ctx->adata.keylen = keylen; + + print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ", + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); + + return acmac_set_sh_desc(ahash); +} + /* * ahash_edesc - s/w-extended ahash descriptor - * @dst_dma: physical mapped address of req->result * @sec4_sg_dma: physical mapped address of h/w link table * @src_nents: number of segments in input scatterlist * @sec4_sg_bytes: length of dma mapped sec4_sg space @@ -434,7 +552,6 @@ static int ahash_setkey(struct crypto_ahash *ahash, * @sec4_sg: h/w link table */ struct ahash_edesc { - dma_addr_t dst_dma; dma_addr_t sec4_sg_dma; int src_nents; int sec4_sg_bytes; @@ -450,8 +567,6 @@ static inline void ahash_unmap(struct device *dev, if (edesc->src_nents) dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); - if (edesc->dst_dma) - dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE); if (edesc->sec4_sg_bytes) dma_unmap_single(dev, edesc->sec4_sg_dma, @@ -468,12 +583,10 @@ static inline void ahash_unmap_ctx(struct device *dev, struct ahash_edesc *edesc, struct ahash_request *req, int dst_len, u32 flag) { - struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); - struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); struct caam_hash_state *state = ahash_request_ctx(req); if (state->ctx_dma) { - dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag); + dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag); state->ctx_dma = 0; } ahash_unmap(dev, edesc, req, dst_len); @@ -486,9 +599,9 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err, struct ahash_edesc *edesc; struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); int digestsize = crypto_ahash_digestsize(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); #ifdef DEBUG struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); - struct caam_hash_state *state = ahash_request_ctx(req); dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); #endif @@ -497,17 +610,14 @@ static void ahash_done(struct device *jrdev, u32 *desc, u32 err, if (err) caam_jr_strstatus(jrdev, err); - ahash_unmap(jrdev, edesc, req, digestsize); + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); + memcpy(req->result, state->caam_ctx, digestsize); kfree(edesc); #ifdef DEBUG print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, ctx->ctx_len, 1); - if (req->result) - print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, req->result, - digestsize, 1); #endif req->base.complete(&req->base, err); @@ -555,9 +665,9 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, struct ahash_edesc *edesc; struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); int digestsize = crypto_ahash_digestsize(ahash); + struct caam_hash_state *state = ahash_request_ctx(req); #ifdef DEBUG struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); - struct caam_hash_state *state = ahash_request_ctx(req); dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); #endif @@ -566,17 +676,14 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, if (err) caam_jr_strstatus(jrdev, err); - ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE); + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); + memcpy(req->result, state->caam_ctx, digestsize); kfree(edesc); #ifdef DEBUG print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, ctx->ctx_len, 1); - if (req->result) - print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, req->result, - digestsize, 1); #endif req->base.complete(&req->base, err); @@ -688,6 +795,7 @@ static int ahash_update_ctx(struct ahash_request *req) u8 *buf = current_buf(state); int *buflen = current_buflen(state); u8 *next_buf = alt_buf(state); + int blocksize = crypto_ahash_blocksize(ahash); int *next_buflen = alt_buflen(state), last_buflen; int in_len = *buflen + req->nbytes, to_hash; u32 *desc; @@ -696,9 +804,20 @@ static int ahash_update_ctx(struct ahash_request *req) int ret = 0; last_buflen = *next_buflen; - *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); + *next_buflen = in_len & (blocksize - 1); to_hash = in_len - *next_buflen; + /* + * For XCBC and CMAC, if to_hash is multiple of block size, + * keep last block in internal buffer + */ + if ((is_xcbc_aes(ctx->adata.algtype) || + is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize && + (*next_buflen == 0)) { + *next_buflen = blocksize; + to_hash -= blocksize; + } + if (to_hash) { src_nents = sg_nents_for_len(req->src, req->nbytes - (*next_buflen)); @@ -801,7 +920,7 @@ static int ahash_update_ctx(struct ahash_request *req) #endif return ret; - unmap_ctx: +unmap_ctx: ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); kfree(edesc); return ret; @@ -837,7 +956,7 @@ static int ahash_final_ctx(struct ahash_request *req) edesc->sec4_sg_bytes = sec4_sg_bytes; ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, - edesc->sec4_sg, DMA_TO_DEVICE); + edesc->sec4_sg, DMA_BIDIRECTIONAL); if (ret) goto unmap_ctx; @@ -857,14 +976,7 @@ static int ahash_final_ctx(struct ahash_request *req) append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, LDST_SGF); - - edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, - digestsize); - if (dma_mapping_error(jrdev, edesc->dst_dma)) { - dev_err(jrdev, "unable to map dst\n"); - ret = -ENOMEM; - goto unmap_ctx; - } + append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0); #ifdef DEBUG print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", @@ -877,7 +989,7 @@ static int ahash_final_ctx(struct ahash_request *req) return -EINPROGRESS; unmap_ctx: - ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); kfree(edesc); return ret; } @@ -931,7 +1043,7 @@ static int ahash_finup_ctx(struct ahash_request *req) edesc->src_nents = src_nents; ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, - edesc->sec4_sg, DMA_TO_DEVICE); + edesc->sec4_sg, DMA_BIDIRECTIONAL); if (ret) goto unmap_ctx; @@ -945,13 +1057,7 @@ static int ahash_finup_ctx(struct ahash_request *req) if (ret) goto unmap_ctx; - edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, - digestsize); - if (dma_mapping_error(jrdev, edesc->dst_dma)) { - dev_err(jrdev, "unable to map dst\n"); - ret = -ENOMEM; - goto unmap_ctx; - } + append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0); #ifdef DEBUG print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", @@ -964,7 +1070,7 @@ static int ahash_finup_ctx(struct ahash_request *req) return -EINPROGRESS; unmap_ctx: - ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); kfree(edesc); return ret; } @@ -1023,10 +1129,8 @@ static int ahash_digest(struct ahash_request *req) desc = edesc->hw_desc; - edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, - digestsize); - if (dma_mapping_error(jrdev, edesc->dst_dma)) { - dev_err(jrdev, "unable to map dst\n"); + ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize); + if (ret) { ahash_unmap(jrdev, edesc, req, digestsize); kfree(edesc); return -ENOMEM; @@ -1041,7 +1145,7 @@ static int ahash_digest(struct ahash_request *req) if (!ret) { ret = -EINPROGRESS; } else { - ahash_unmap(jrdev, edesc, req, digestsize); + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); kfree(edesc); } @@ -1083,12 +1187,9 @@ static int ahash_final_no_ctx(struct ahash_request *req) append_seq_in_ptr(desc, state->buf_dma, buflen, 0); } - edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, - digestsize); - if (dma_mapping_error(jrdev, edesc->dst_dma)) { - dev_err(jrdev, "unable to map dst\n"); + ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize); + if (ret) goto unmap; - } #ifdef DEBUG print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", @@ -1099,7 +1200,7 @@ static int ahash_final_no_ctx(struct ahash_request *req) if (!ret) { ret = -EINPROGRESS; } else { - ahash_unmap(jrdev, edesc, req, digestsize); + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); kfree(edesc); } @@ -1122,6 +1223,7 @@ static int ahash_update_no_ctx(struct ahash_request *req) GFP_KERNEL : GFP_ATOMIC; u8 *buf = current_buf(state); int *buflen = current_buflen(state); + int blocksize = crypto_ahash_blocksize(ahash); u8 *next_buf = alt_buf(state); int *next_buflen = alt_buflen(state); int in_len = *buflen + req->nbytes, to_hash; @@ -1130,9 +1232,20 @@ static int ahash_update_no_ctx(struct ahash_request *req) u32 *desc; int ret = 0; - *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); + *next_buflen = in_len & (blocksize - 1); to_hash = in_len - *next_buflen; + /* + * For XCBC and CMAC, if to_hash is multiple of block size, + * keep last block in internal buffer + */ + if ((is_xcbc_aes(ctx->adata.algtype) || + is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize && + (*next_buflen == 0)) { + *next_buflen = blocksize; + to_hash -= blocksize; + } + if (to_hash) { src_nents = sg_nents_for_len(req->src, req->nbytes - *next_buflen); @@ -1298,12 +1411,9 @@ static int ahash_finup_no_ctx(struct ahash_request *req) goto unmap; } - edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, - digestsize); - if (dma_mapping_error(jrdev, edesc->dst_dma)) { - dev_err(jrdev, "unable to map dst\n"); + ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize); + if (ret) goto unmap; - } #ifdef DEBUG print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", @@ -1314,7 +1424,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req) if (!ret) { ret = -EINPROGRESS; } else { - ahash_unmap(jrdev, edesc, req, digestsize); + ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); kfree(edesc); } @@ -1338,15 +1448,26 @@ static int ahash_update_first(struct ahash_request *req) u8 *next_buf = alt_buf(state); int *next_buflen = alt_buflen(state); int to_hash; + int blocksize = crypto_ahash_blocksize(ahash); u32 *desc; int src_nents, mapped_nents; struct ahash_edesc *edesc; int ret = 0; - *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) - - 1); + *next_buflen = req->nbytes & (blocksize - 1); to_hash = req->nbytes - *next_buflen; + /* + * For XCBC and CMAC, if to_hash is multiple of block size, + * keep last block in internal buffer + */ + if ((is_xcbc_aes(ctx->adata.algtype) || + is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize && + (*next_buflen == 0)) { + *next_buflen = blocksize; + to_hash -= blocksize; + } + if (to_hash) { src_nents = sg_nents_for_len(req->src, req->nbytes - *next_buflen); @@ -1446,6 +1567,7 @@ static int ahash_init(struct ahash_request *req) state->final = ahash_final_no_ctx; state->ctx_dma = 0; + state->ctx_dma_len = 0; state->current_buf = 0; state->buf_dma = 0; state->buflen_0 = 0; @@ -1654,6 +1776,44 @@ static struct caam_hash_template driver_hash[] = { }, }, .alg_type = OP_ALG_ALGSEL_MD5, + }, { + .hmac_name = "xcbc(aes)", + .hmac_driver_name = "xcbc-aes-caam", + .blocksize = AES_BLOCK_SIZE, + .template_ahash = { + .init = ahash_init, + .update = ahash_update, + .final = ahash_final, + .finup = ahash_finup, + .digest = ahash_digest, + .export = ahash_export, + .import = ahash_import, + .setkey = axcbc_setkey, + .halg = { + .digestsize = AES_BLOCK_SIZE, + .statesize = sizeof(struct caam_export_state), + }, + }, + .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC, + }, { + .hmac_name = "cmac(aes)", + .hmac_driver_name = "cmac-aes-caam", + .blocksize = AES_BLOCK_SIZE, + .template_ahash = { + .init = ahash_init, + .update = ahash_update, + .final = ahash_final, + .finup = ahash_finup, + .digest = ahash_digest, + .export = ahash_export, + .import = ahash_import, + .setkey = acmac_setkey, + .halg = { + .digestsize = AES_BLOCK_SIZE, + .statesize = sizeof(struct caam_export_state), + }, + }, + .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC, }, }; @@ -1695,14 +1855,45 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) } priv = dev_get_drvdata(ctx->jrdev->parent); - ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; + + if (is_xcbc_aes(caam_hash->alg_type)) { + ctx->dir = DMA_TO_DEVICE; + ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type; + ctx->ctx_len = 48; + + ctx->key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key, + ARRAY_SIZE(ctx->key), + DMA_BIDIRECTIONAL, + DMA_ATTR_SKIP_CPU_SYNC); + if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) { + dev_err(ctx->jrdev, "unable to map key\n"); + caam_jr_free(ctx->jrdev); + return -ENOMEM; + } + } else if (is_cmac_aes(caam_hash->alg_type)) { + ctx->dir = DMA_TO_DEVICE; + ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type; + ctx->ctx_len = 32; + } else { + ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; + ctx->ctx_len = runninglen[(ctx->adata.algtype & + OP_ALG_ALGSEL_SUBMASK) >> + OP_ALG_ALGSEL_SHIFT]; + } dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update, - offsetof(struct caam_hash_ctx, - sh_desc_update_dma), + offsetof(struct caam_hash_ctx, key), ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); if (dma_mapping_error(ctx->jrdev, dma_addr)) { dev_err(ctx->jrdev, "unable to map shared descriptors\n"); + + if (is_xcbc_aes(caam_hash->alg_type)) + dma_unmap_single_attrs(ctx->jrdev, ctx->key_dma, + ARRAY_SIZE(ctx->key), + DMA_BIDIRECTIONAL, + DMA_ATTR_SKIP_CPU_SYNC); + caam_jr_free(ctx->jrdev); return -ENOMEM; } @@ -1716,16 +1907,14 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm) ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx, sh_desc_digest); - /* copy descriptor header template value */ - ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; - - ctx->ctx_len = runninglen[(ctx->adata.algtype & - OP_ALG_ALGSEL_SUBMASK) >> - OP_ALG_ALGSEL_SHIFT]; - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct caam_hash_state)); - return ahash_set_sh_desc(ahash); + + /* + * For keyed hash algorithms shared descriptors + * will be created later in setkey() callback + */ + return alg->setkey ? 0 : ahash_set_sh_desc(ahash); } static void caam_hash_cra_exit(struct crypto_tfm *tfm) @@ -1733,9 +1922,12 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm) struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma, - offsetof(struct caam_hash_ctx, - sh_desc_update_dma), + offsetof(struct caam_hash_ctx, key), ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); + if (is_xcbc_aes(ctx->adata.algtype)) + dma_unmap_single_attrs(ctx->jrdev, ctx->key_dma, + ARRAY_SIZE(ctx->key), DMA_BIDIRECTIONAL, + DMA_ATTR_SKIP_CPU_SYNC); caam_jr_free(ctx->jrdev); } @@ -1801,7 +1993,6 @@ static int __init caam_algapi_hash_init(void) { struct device_node *dev_node; struct platform_device *pdev; - struct device *ctrldev; int i = 0, err = 0; struct caam_drv_private *priv; unsigned int md_limit = SHA512_DIGEST_SIZE; @@ -1820,16 +2011,17 @@ static int __init caam_algapi_hash_init(void) return -ENODEV; } - ctrldev = &pdev->dev; - priv = dev_get_drvdata(ctrldev); + priv = dev_get_drvdata(&pdev->dev); of_node_put(dev_node); /* * If priv is NULL, it's probably because the caam driver wasn't * properly initialized (e.g. RNG4 init failed). Thus, bail out here. */ - if (!priv) - return -ENODEV; + if (!priv) { + err = -ENODEV; + goto out_put_dev; + } /* * Register crypto algorithms the device supports. First, identify @@ -1851,8 +2043,10 @@ static int __init caam_algapi_hash_init(void) * Skip registration of any hashing algorithms if MD block * is not present. */ - if (!md_inst) - return -ENODEV; + if (!md_inst) { + err = -ENODEV; + goto out_put_dev; + } /* Limit digest size based on LP256 */ if (md_vid == CHA_VER_VID_MD_LP256) @@ -1866,14 +2060,16 @@ static int __init caam_algapi_hash_init(void) struct caam_hash_template *alg = driver_hash + i; /* If MD size is not supported by device, skip registration */ - if (alg->template_ahash.halg.digestsize > md_limit) + if (is_mdha(alg->alg_type) && + alg->template_ahash.halg.digestsize > md_limit) continue; /* register hmac version */ t_alg = caam_hash_alloc(alg, true); if (IS_ERR(t_alg)) { err = PTR_ERR(t_alg); - pr_warn("%s alg allocation failed\n", alg->driver_name); + pr_warn("%s alg allocation failed\n", + alg->hmac_driver_name); continue; } @@ -1886,6 +2082,9 @@ static int __init caam_algapi_hash_init(void) } else list_add_tail(&t_alg->entry, &hash_list); + if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES) + continue; + /* register unkeyed version */ t_alg = caam_hash_alloc(alg, false); if (IS_ERR(t_alg)) { @@ -1904,6 +2103,8 @@ static int __init caam_algapi_hash_init(void) list_add_tail(&t_alg->entry, &hash_list); } +out_put_dev: + put_device(&pdev->dev); return err; } diff --git a/drivers/crypto/caam/caamhash_desc.c b/drivers/crypto/caam/caamhash_desc.c index a12f7959a2c3..71d018343ee4 100644 --- a/drivers/crypto/caam/caamhash_desc.c +++ b/drivers/crypto/caam/caamhash_desc.c @@ -2,7 +2,7 @@ /* * Shared descriptors for ahash algorithms * - * Copyright 2017 NXP + * Copyright 2017-2019 NXP */ #include "compat.h" @@ -75,6 +75,72 @@ void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state, } EXPORT_SYMBOL(cnstr_shdsc_ahash); +/** + * cnstr_shdsc_sk_hash - shared descriptor for symmetric key cipher-based + * hash algorithms + * @desc: pointer to buffer used for descriptor construction + * @adata: pointer to authentication transform definitions. + * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE} + * @digestsize: algorithm's digest size + * @ctx_len: size of Context Register + * @key_dma: I/O Virtual Address of the key + */ +void cnstr_shdsc_sk_hash(u32 * const desc, struct alginfo *adata, u32 state, + int digestsize, int ctx_len, dma_addr_t key_dma) +{ + u32 *skip_key_load; + + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); + + /* Skip loading of key, context if already shared */ + skip_key_load = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); + + if (state == OP_ALG_AS_INIT || state == OP_ALG_AS_INITFINAL) { + append_key_as_imm(desc, adata->key_virt, adata->keylen, + adata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); + } else { /* UPDATE, FINALIZE */ + if (is_xcbc_aes(adata->algtype)) + /* Load K1 */ + append_key(desc, adata->key_dma, adata->keylen, + CLASS_1 | KEY_DEST_CLASS_REG | KEY_ENC); + else /* CMAC */ + append_key_as_imm(desc, adata->key_virt, adata->keylen, + adata->keylen, CLASS_1 | + KEY_DEST_CLASS_REG); + /* Restore context */ + append_seq_load(desc, ctx_len, LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT); + } + + set_jump_tgt_here(desc, skip_key_load); + + /* Class 1 operation */ + append_operation(desc, adata->algtype | state | OP_ALG_ENCRYPT); + + /* + * Load from buf and/or src and write to req->result or state->context + * Calculate remaining bytes to read + */ + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); + + /* Read remaining bytes */ + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_LAST1 | + FIFOLD_TYPE_MSG | FIFOLDST_VLF); + + /* + * Save context: + * - xcbc: partial hash, keys K2 and K3 + * - cmac: partial hash, constant L = E(K,0) + */ + append_seq_store(desc, digestsize, LDST_CLASS_1_CCB | + LDST_SRCDST_BYTE_CONTEXT); + if (is_xcbc_aes(adata->algtype) && state == OP_ALG_AS_INIT) + /* Save K1 */ + append_fifo_store(desc, key_dma, adata->keylen, + LDST_CLASS_1_CCB | FIFOST_TYPE_KEY_KEK); +} +EXPORT_SYMBOL(cnstr_shdsc_sk_hash); + MODULE_LICENSE("Dual BSD/GPL"); MODULE_DESCRIPTION("FSL CAAM ahash descriptors support"); MODULE_AUTHOR("NXP Semiconductors"); diff --git a/drivers/crypto/caam/caamhash_desc.h b/drivers/crypto/caam/caamhash_desc.h index 631fc1ac312c..6947ee1f200c 100644 --- a/drivers/crypto/caam/caamhash_desc.h +++ b/drivers/crypto/caam/caamhash_desc.h @@ -15,7 +15,15 @@ #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ) #define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ) +static inline bool is_xcbc_aes(u32 algtype) +{ + return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) == + (OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC); +} + void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state, int digestsize, int ctx_len, bool import_ctx, int era); +void cnstr_shdsc_sk_hash(u32 * const desc, struct alginfo *adata, u32 state, + int digestsize, int ctx_len, dma_addr_t key_dma); #endif /* _CAAMHASH_DESC_H_ */ diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index 77ab28a2811a..58285642306e 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c @@ -1042,8 +1042,10 @@ static int __init caam_pkc_init(void) * If priv is NULL, it's probably because the caam driver wasn't * properly initialized (e.g. RNG4 init failed). Thus, bail out here. */ - if (!priv) - return -ENODEV; + if (!priv) { + err = -ENODEV; + goto out_put_dev; + } /* Determine public key hardware accelerator presence. */ if (priv->era < 10) @@ -1053,8 +1055,10 @@ static int __init caam_pkc_init(void) pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK; /* Do not register algorithms if PKHA is not present. */ - if (!pk_inst) - return -ENODEV; + if (!pk_inst) { + err = -ENODEV; + goto out_put_dev; + } err = crypto_register_akcipher(&caam_rsa); if (err) @@ -1063,6 +1067,8 @@ static int __init caam_pkc_init(void) else dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n"); +out_put_dev: + put_device(ctrldev); return err; } diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c index a387c8d49a62..95eb5402c59f 100644 --- a/drivers/crypto/caam/caamrng.c +++ b/drivers/crypto/caam/caamrng.c @@ -308,7 +308,6 @@ static int __init caam_rng_init(void) struct device *dev; struct device_node *dev_node; struct platform_device *pdev; - struct device *ctrldev; struct caam_drv_private *priv; u32 rng_inst; int err; @@ -326,16 +325,17 @@ static int __init caam_rng_init(void) return -ENODEV; } - ctrldev = &pdev->dev; - priv = dev_get_drvdata(ctrldev); + priv = dev_get_drvdata(&pdev->dev); of_node_put(dev_node); /* * If priv is NULL, it's probably because the caam driver wasn't * properly initialized (e.g. RNG4 init failed). Thus, bail out here. */ - if (!priv) - return -ENODEV; + if (!priv) { + err = -ENODEV; + goto out_put_dev; + } /* Check for an instantiated RNG before registration */ if (priv->era < 10) @@ -344,13 +344,16 @@ static int __init caam_rng_init(void) else rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK; - if (!rng_inst) - return -ENODEV; + if (!rng_inst) { + err = -ENODEV; + goto out_put_dev; + } dev = caam_jr_alloc(); if (IS_ERR(dev)) { pr_err("Job Ring Device allocation for transform failed\n"); - return PTR_ERR(dev); + err = PTR_ERR(dev); + goto out_put_dev; } rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL); if (!rng_ctx) { @@ -361,6 +364,7 @@ static int __init caam_rng_init(void) if (err) goto free_rng_ctx; + put_device(&pdev->dev); dev_info(dev, "registering rng-caam\n"); return hwrng_register(&caam_rng); @@ -368,6 +372,8 @@ free_rng_ctx: kfree(rng_ctx); free_caam_alloc: caam_jr_free(dev); +out_put_dev: + put_device(&pdev->dev); return err; } diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h index 87d9efe4c7aa..8639b2df0371 100644 --- a/drivers/crypto/caam/compat.h +++ b/drivers/crypto/caam/compat.h @@ -43,6 +43,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 16bbc72f041a..858bdc9ab4a3 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -18,12 +18,8 @@ #include "desc_constr.h" #include "ctrl.h" -bool caam_little_end; -EXPORT_SYMBOL(caam_little_end); bool caam_dpaa2; EXPORT_SYMBOL(caam_dpaa2); -bool caam_imx; -EXPORT_SYMBOL(caam_imx); #ifdef CONFIG_CAAM_QI #include "qi.h" @@ -863,27 +859,18 @@ static int caam_probe(struct platform_device *pdev) /* Internal covering keys (useful in non-secure mode only) */ ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0]; ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32); - ctrlpriv->ctl_kek = debugfs_create_blob("kek", - S_IRUSR | - S_IRGRP | S_IROTH, - ctrlpriv->ctl, - &ctrlpriv->ctl_kek_wrap); + debugfs_create_blob("kek", S_IRUSR | S_IRGRP | S_IROTH, ctrlpriv->ctl, + &ctrlpriv->ctl_kek_wrap); ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0]; ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32); - ctrlpriv->ctl_tkek = debugfs_create_blob("tkek", - S_IRUSR | - S_IRGRP | S_IROTH, - ctrlpriv->ctl, - &ctrlpriv->ctl_tkek_wrap); + debugfs_create_blob("tkek", S_IRUSR | S_IRGRP | S_IROTH, ctrlpriv->ctl, + &ctrlpriv->ctl_tkek_wrap); ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0]; ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32); - ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk", - S_IRUSR | - S_IRGRP | S_IROTH, - ctrlpriv->ctl, - &ctrlpriv->ctl_tdsk_wrap); + debugfs_create_blob("tdsk", S_IRUSR | S_IRGRP | S_IROTH, ctrlpriv->ctl, + &ctrlpriv->ctl_tdsk_wrap); #endif return 0; diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c index 7e8d690f2827..21a70fd32f5d 100644 --- a/drivers/crypto/caam/error.c +++ b/drivers/crypto/caam/error.c @@ -50,6 +50,12 @@ void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type, #endif /* DEBUG */ EXPORT_SYMBOL(caam_dump_sg); +bool caam_little_end; +EXPORT_SYMBOL(caam_little_end); + +bool caam_imx; +EXPORT_SYMBOL(caam_imx); + static const struct { u8 value; const char *error_text; diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h index babc78abd155..5869ad58d497 100644 --- a/drivers/crypto/caam/intern.h +++ b/drivers/crypto/caam/intern.h @@ -106,7 +106,6 @@ struct caam_drv_private { struct dentry *dfs_root; struct dentry *ctl; /* controller dir */ struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap; - struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk; #endif }; diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c index 312b5f042f31..8d0713fae6ac 100644 --- a/drivers/crypto/caam/key_gen.c +++ b/drivers/crypto/caam/key_gen.c @@ -48,7 +48,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, { u32 *desc; struct split_key_result result; - dma_addr_t dma_addr_in, dma_addr_out; + dma_addr_t dma_addr; int ret = -ENOMEM; adata->keylen = split_key_len(adata->algtype & OP_ALG_ALGSEL_MASK); @@ -71,22 +71,17 @@ int gen_split_key(struct device *jrdev, u8 *key_out, return ret; } - dma_addr_in = dma_map_single(jrdev, (void *)key_in, keylen, - DMA_TO_DEVICE); - if (dma_mapping_error(jrdev, dma_addr_in)) { - dev_err(jrdev, "unable to map key input memory\n"); - goto out_free; - } + memcpy(key_out, key_in, keylen); - dma_addr_out = dma_map_single(jrdev, key_out, adata->keylen_pad, - DMA_FROM_DEVICE); - if (dma_mapping_error(jrdev, dma_addr_out)) { - dev_err(jrdev, "unable to map key output memory\n"); - goto out_unmap_in; + dma_addr = dma_map_single(jrdev, key_out, adata->keylen_pad, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(jrdev, dma_addr)) { + dev_err(jrdev, "unable to map key memory\n"); + goto out_free; } init_job_desc(desc, 0); - append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG); + append_key(desc, dma_addr, keylen, CLASS_2 | KEY_DEST_CLASS_REG); /* Sets MDHA up into an HMAC-INIT */ append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) | @@ -104,12 +99,10 @@ int gen_split_key(struct device *jrdev, u8 *key_out, * FIFO_STORE with the explicit split-key content store * (0x26 output type) */ - append_fifo_store(desc, dma_addr_out, adata->keylen, + append_fifo_store(desc, dma_addr, adata->keylen, LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK); #ifdef DEBUG - print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1); print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); #endif @@ -129,10 +122,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, #endif } - dma_unmap_single(jrdev, dma_addr_out, adata->keylen_pad, - DMA_FROM_DEVICE); -out_unmap_in: - dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE); + dma_unmap_single(jrdev, dma_addr, adata->keylen_pad, DMA_BIDIRECTIONAL); out_free: kfree(desc); return ret; diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c index b84e6c8b1e13..7cb8b1755e57 100644 --- a/drivers/crypto/caam/qi.c +++ b/drivers/crypto/caam/qi.c @@ -318,7 +318,7 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc) /* Create a new req FQ in parked state */ new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq, drv_ctx->context_a, 0); - if (unlikely(IS_ERR_OR_NULL(new_fq))) { + if (IS_ERR_OR_NULL(new_fq)) { dev_err(qidev, "FQ allocation for shdesc update failed\n"); return PTR_ERR(new_fq); } @@ -431,7 +431,7 @@ struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev, /* Attach request FQ */ drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc, QMAN_INITFQ_FLAG_SCHED); - if (unlikely(IS_ERR_OR_NULL(drv_ctx->req_fq))) { + if (IS_ERR_OR_NULL(drv_ctx->req_fq)) { dev_err(qidev, "create_caam_req_fq failed\n"); dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL); kfree(drv_ctx); diff --git a/drivers/crypto/cavium/nitrox/nitrox_debugfs.c b/drivers/crypto/cavium/nitrox/nitrox_debugfs.c index 0196b992280f..848ec93d4333 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_debugfs.c +++ b/drivers/crypto/cavium/nitrox/nitrox_debugfs.c @@ -55,31 +55,14 @@ void nitrox_debugfs_exit(struct nitrox_device *ndev) ndev->debugfs_dir = NULL; } -int nitrox_debugfs_init(struct nitrox_device *ndev) +void nitrox_debugfs_init(struct nitrox_device *ndev) { - struct dentry *dir, *f; + struct dentry *dir; dir = debugfs_create_dir(KBUILD_MODNAME, NULL); - if (!dir) - return -ENOMEM; ndev->debugfs_dir = dir; - f = debugfs_create_file("firmware", 0400, dir, ndev, - &firmware_fops); - if (!f) - goto err; - f = debugfs_create_file("device", 0400, dir, ndev, - &device_fops); - if (!f) - goto err; - f = debugfs_create_file("stats", 0400, dir, ndev, - &stats_fops); - if (!f) - goto err; - - return 0; - -err: - nitrox_debugfs_exit(ndev); - return -ENODEV; + debugfs_create_file("firmware", 0400, dir, ndev, &firmware_fops); + debugfs_create_file("device", 0400, dir, ndev, &device_fops); + debugfs_create_file("stats", 0400, dir, ndev, &stats_fops); } diff --git a/drivers/crypto/cavium/nitrox/nitrox_debugfs.h b/drivers/crypto/cavium/nitrox/nitrox_debugfs.h index a8d85ffa619c..f177b79bbab0 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_debugfs.h +++ b/drivers/crypto/cavium/nitrox/nitrox_debugfs.h @@ -5,12 +5,11 @@ #include "nitrox_dev.h" #ifdef CONFIG_DEBUG_FS -int nitrox_debugfs_init(struct nitrox_device *ndev); +void nitrox_debugfs_init(struct nitrox_device *ndev); void nitrox_debugfs_exit(struct nitrox_device *ndev); #else -static inline int nitrox_debugfs_init(struct nitrox_device *ndev) +static inline void nitrox_debugfs_init(struct nitrox_device *ndev) { - return 0; } static inline void nitrox_debugfs_exit(struct nitrox_device *ndev) diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c index 014e9863c20e..faa78f651238 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_main.c +++ b/drivers/crypto/cavium/nitrox/nitrox_main.c @@ -404,9 +404,7 @@ static int nitrox_probe(struct pci_dev *pdev, if (err) goto pf_hw_fail; - err = nitrox_debugfs_init(ndev); - if (err) - goto pf_hw_fail; + nitrox_debugfs_init(ndev); /* clear the statistics */ atomic64_set(&ndev->stats.posted, 0); diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c index fe070d75c842..4c97478d44bd 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c +++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c @@ -537,6 +537,8 @@ static void process_response_list(struct nitrox_cmdq *cmdq) struct nitrox_device *ndev = cmdq->ndev; struct nitrox_softreq *sr; int req_completed = 0, err = 0, budget; + completion_t callback; + void *cb_arg; /* check all pending requests */ budget = atomic_read(&cmdq->pending_count); @@ -564,13 +566,13 @@ static void process_response_list(struct nitrox_cmdq *cmdq) smp_mb__after_atomic(); /* remove from response list */ response_list_del(sr, cmdq); - /* ORH error code */ err = READ_ONCE(*sr->resp.orh) & 0xff; - - if (sr->callback) - sr->callback(sr->cb_arg, err); + callback = sr->callback; + cb_arg = sr->cb_arg; softreq_destroy(sr); + if (callback) + callback(cb_arg, err); req_completed++; } diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c index be055b9547f6..a8447a3cf366 100644 --- a/drivers/crypto/cavium/zip/zip_main.c +++ b/drivers/crypto/cavium/zip/zip_main.c @@ -351,6 +351,7 @@ static struct pci_driver zip_driver = { static struct crypto_alg zip_comp_deflate = { .cra_name = "deflate", + .cra_driver_name = "deflate-cavium", .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, .cra_ctxsize = sizeof(struct zip_kernel_ctx), .cra_priority = 300, @@ -365,6 +366,7 @@ static struct crypto_alg zip_comp_deflate = { static struct crypto_alg zip_comp_lzs = { .cra_name = "lzs", + .cra_driver_name = "lzs-cavium", .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, .cra_ctxsize = sizeof(struct zip_kernel_ctx), .cra_priority = 300, @@ -384,7 +386,7 @@ static struct scomp_alg zip_scomp_deflate = { .decompress = zip_scomp_decompress, .base = { .cra_name = "deflate", - .cra_driver_name = "deflate-scomp", + .cra_driver_name = "deflate-scomp-cavium", .cra_module = THIS_MODULE, .cra_priority = 300, } @@ -397,7 +399,7 @@ static struct scomp_alg zip_scomp_lzs = { .decompress = zip_scomp_decompress, .base = { .cra_name = "lzs", - .cra_driver_name = "lzs-scomp", + .cra_driver_name = "lzs-scomp-cavium", .cra_module = THIS_MODULE, .cra_priority = 300, } @@ -618,41 +620,23 @@ static const struct file_operations zip_regs_fops = { /* Root directory for thunderx_zip debugfs entry */ static struct dentry *zip_debugfs_root; -static int __init zip_debugfs_init(void) +static void __init zip_debugfs_init(void) { - struct dentry *zip_stats, *zip_clear, *zip_regs; - if (!debugfs_initialized()) - return -ENODEV; + return; zip_debugfs_root = debugfs_create_dir("thunderx_zip", NULL); - if (!zip_debugfs_root) - return -ENOMEM; /* Creating files for entries inside thunderx_zip directory */ - zip_stats = debugfs_create_file("zip_stats", 0444, - zip_debugfs_root, - NULL, &zip_stats_fops); - if (!zip_stats) - goto failed_to_create; - - zip_clear = debugfs_create_file("zip_clear", 0444, - zip_debugfs_root, - NULL, &zip_clear_fops); - if (!zip_clear) - goto failed_to_create; - - zip_regs = debugfs_create_file("zip_regs", 0444, - zip_debugfs_root, - NULL, &zip_regs_fops); - if (!zip_regs) - goto failed_to_create; + debugfs_create_file("zip_stats", 0444, zip_debugfs_root, NULL, + &zip_stats_fops); - return 0; + debugfs_create_file("zip_clear", 0444, zip_debugfs_root, NULL, + &zip_clear_fops); + + debugfs_create_file("zip_regs", 0444, zip_debugfs_root, NULL, + &zip_regs_fops); -failed_to_create: - debugfs_remove_recursive(zip_debugfs_root); - return -ENOENT; } static void __exit zip_debugfs_exit(void) @@ -661,13 +645,8 @@ static void __exit zip_debugfs_exit(void) } #else -static int __init zip_debugfs_init(void) -{ - return 0; -} - +static void __init zip_debugfs_init(void) { } static void __exit zip_debugfs_exit(void) { } - #endif /* debugfs - end */ @@ -691,17 +670,10 @@ static int __init zip_init_module(void) } /* comp-decomp statistics are handled with debugfs interface */ - ret = zip_debugfs_init(); - if (ret < 0) { - zip_err("ZIP: debugfs initialization failed\n"); - goto err_crypto_unregister; - } + zip_debugfs_init(); return ret; -err_crypto_unregister: - zip_unregister_compression_device(); - err_pci_unregister: pci_unregister_driver(&zip_driver); return ret; diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c index 9108015e56cc..f6e252c1d6fb 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c @@ -1,7 +1,7 @@ /* * AMD Cryptographic Coprocessor (CCP) AES CMAC crypto API support * - * Copyright (C) 2013 Advanced Micro Devices, Inc. + * Copyright (C) 2013,2018 Advanced Micro Devices, Inc. * * Author: Tom Lendacky * diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c index ae87b741f9d5..c2ff551d215b 100644 --- a/drivers/crypto/ccp/ccp-crypto-des3.c +++ b/drivers/crypto/ccp/ccp-crypto-des3.c @@ -57,7 +57,7 @@ static int ccp_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key, if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || !((K[2] ^ K[4]) | (K[3] ^ K[5]))) && - (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { *flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c index 2ca64bb57d2e..10a61cd54fce 100644 --- a/drivers/crypto/ccp/ccp-crypto-sha.c +++ b/drivers/crypto/ccp/ccp-crypto-sha.c @@ -1,7 +1,7 @@ /* * AMD Cryptographic Coprocessor (CCP) SHA crypto API support * - * Copyright (C) 2013,2017 Advanced Micro Devices, Inc. + * Copyright (C) 2013,2018 Advanced Micro Devices, Inc. * * Author: Tom Lendacky * Author: Gary R Hook diff --git a/drivers/crypto/ccp/ccp-debugfs.c b/drivers/crypto/ccp/ccp-debugfs.c index 1a734bd2070a..4bd26af7098d 100644 --- a/drivers/crypto/ccp/ccp-debugfs.c +++ b/drivers/crypto/ccp/ccp-debugfs.c @@ -286,10 +286,7 @@ void ccp5_debugfs_setup(struct ccp_device *ccp) { struct ccp_cmd_queue *cmd_q; char name[MAX_NAME_LEN + 1]; - struct dentry *debugfs_info; - struct dentry *debugfs_stats; struct dentry *debugfs_q_instance; - struct dentry *debugfs_q_stats; int i; if (!debugfs_initialized()) @@ -299,24 +296,14 @@ void ccp5_debugfs_setup(struct ccp_device *ccp) if (!ccp_debugfs_dir) ccp_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); mutex_unlock(&ccp_debugfs_lock); - if (!ccp_debugfs_dir) - return; ccp->debugfs_instance = debugfs_create_dir(ccp->name, ccp_debugfs_dir); - if (!ccp->debugfs_instance) - goto err; - debugfs_info = debugfs_create_file("info", 0400, - ccp->debugfs_instance, ccp, - &ccp_debugfs_info_ops); - if (!debugfs_info) - goto err; + debugfs_create_file("info", 0400, ccp->debugfs_instance, ccp, + &ccp_debugfs_info_ops); - debugfs_stats = debugfs_create_file("stats", 0600, - ccp->debugfs_instance, ccp, - &ccp_debugfs_stats_ops); - if (!debugfs_stats) - goto err; + debugfs_create_file("stats", 0600, ccp->debugfs_instance, ccp, + &ccp_debugfs_stats_ops); for (i = 0; i < ccp->cmd_q_count; i++) { cmd_q = &ccp->cmd_q[i]; @@ -325,21 +312,12 @@ void ccp5_debugfs_setup(struct ccp_device *ccp) debugfs_q_instance = debugfs_create_dir(name, ccp->debugfs_instance); - if (!debugfs_q_instance) - goto err; - - debugfs_q_stats = - debugfs_create_file("stats", 0600, - debugfs_q_instance, cmd_q, - &ccp_debugfs_queue_ops); - if (!debugfs_q_stats) - goto err; + + debugfs_create_file("stats", 0600, debugfs_q_instance, cmd_q, + &ccp_debugfs_queue_ops); } return; - -err: - debugfs_remove_recursive(ccp->debugfs_instance); } void ccp5_debugfs_destroy(void) diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 0ea43cdeb05f..267a367bd076 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -1,7 +1,7 @@ /* * AMD Cryptographic Coprocessor (CCP) driver * - * Copyright (C) 2013,2017 Advanced Micro Devices, Inc. + * Copyright (C) 2013,2018 Advanced Micro Devices, Inc. * * Author: Tom Lendacky * Author: Gary R Hook diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index b16be8a11d92..fadf859a14b8 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -1,7 +1,7 @@ /* * AMD Platform Security Processor (PSP) interface * - * Copyright (C) 2016-2017 Advanced Micro Devices, Inc. + * Copyright (C) 2016,2018 Advanced Micro Devices, Inc. * * Author: Brijesh Singh * @@ -437,6 +437,7 @@ static int sev_get_api_version(void) psp_master->api_major = status->api_major; psp_master->api_minor = status->api_minor; psp_master->build = status->build; + psp_master->sev_state = status->state; return 0; } @@ -857,15 +858,15 @@ static int sev_misc_init(struct psp_device *psp) return 0; } -static int sev_init(struct psp_device *psp) +static int psp_check_sev_support(struct psp_device *psp) { /* Check if device supports SEV feature */ if (!(ioread32(psp->io_regs + psp->vdata->feature_reg) & 1)) { - dev_dbg(psp->dev, "device does not support SEV\n"); - return 1; + dev_dbg(psp->dev, "psp does not support SEV\n"); + return -ENODEV; } - return sev_misc_init(psp); + return 0; } int psp_dev_init(struct sp_device *sp) @@ -890,6 +891,10 @@ int psp_dev_init(struct sp_device *sp) psp->io_regs = sp->io_map; + ret = psp_check_sev_support(psp); + if (ret) + goto e_disable; + /* Disable and clear interrupts until ready */ iowrite32(0, psp->io_regs + psp->vdata->inten_reg); iowrite32(-1, psp->io_regs + psp->vdata->intsts_reg); @@ -901,7 +906,7 @@ int psp_dev_init(struct sp_device *sp) goto e_err; } - ret = sev_init(psp); + ret = sev_misc_init(psp); if (ret) goto e_irq; @@ -922,6 +927,11 @@ e_err: dev_notice(dev, "psp initialization failed\n"); + return ret; + +e_disable: + sp->psp_data = NULL; + return ret; } @@ -964,6 +974,21 @@ void psp_pci_init(void) if (sev_get_api_version()) goto err; + /* + * If platform is not in UNINIT state then firmware upgrade and/or + * platform INIT command will fail. These command require UNINIT state. + * + * In a normal boot we should never run into case where the firmware + * is not in UNINIT state on boot. But in case of kexec boot, a reboot + * may not go through a typical shutdown sequence and may leave the + * firmware in INIT or WORKING state. + */ + + if (psp_master->sev_state != SEV_STATE_UNINIT) { + sev_platform_shutdown(NULL); + psp_master->sev_state = SEV_STATE_UNINIT; + } + if (SEV_VERSION_GREATER_OR_EQUAL(0, 15) && sev_update_firmware(psp_master->dev) == 0) sev_get_api_version(); diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h index 8b53a9674ecb..f5afeccf42a1 100644 --- a/drivers/crypto/ccp/psp-dev.h +++ b/drivers/crypto/ccp/psp-dev.h @@ -1,7 +1,7 @@ /* * AMD Platform Security Processor (PSP) interface driver * - * Copyright (C) 2017 Advanced Micro Devices, Inc. + * Copyright (C) 2017-2018 Advanced Micro Devices, Inc. * * Author: Brijesh Singh * diff --git a/drivers/crypto/ccp/sp-dev.c b/drivers/crypto/ccp/sp-dev.c index e0459002eb71..b2879767fc98 100644 --- a/drivers/crypto/ccp/sp-dev.c +++ b/drivers/crypto/ccp/sp-dev.c @@ -1,7 +1,7 @@ /* * AMD Secure Processor driver * - * Copyright (C) 2017 Advanced Micro Devices, Inc. + * Copyright (C) 2017-2018 Advanced Micro Devices, Inc. * * Author: Tom Lendacky * Author: Gary R Hook diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h index 14398cad1625..5b0790025db3 100644 --- a/drivers/crypto/ccp/sp-dev.h +++ b/drivers/crypto/ccp/sp-dev.h @@ -1,7 +1,7 @@ /* * AMD Secure Processor driver * - * Copyright (C) 2017 Advanced Micro Devices, Inc. + * Copyright (C) 2017-2018 Advanced Micro Devices, Inc. * * Author: Tom Lendacky * Author: Gary R Hook diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index 7da93e9bebed..41bce0a3f4bb 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -1,7 +1,7 @@ /* * AMD Secure Processor device driver * - * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. + * Copyright (C) 2013,2018 Advanced Micro Devices, Inc. * * Author: Tom Lendacky * Author: Gary R Hook @@ -226,8 +226,6 @@ static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (ret) goto e_err; - dev_notice(dev, "enabled\n"); - return 0; e_err: @@ -246,8 +244,6 @@ static void sp_pci_remove(struct pci_dev *pdev) sp_destroy(sp); sp_free_irqs(sp); - - dev_notice(dev, "disabled\n"); } #ifdef CONFIG_PM diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c index b75dc7db2d4a..d24228efbaaa 100644 --- a/drivers/crypto/ccp/sp-platform.c +++ b/drivers/crypto/ccp/sp-platform.c @@ -1,7 +1,7 @@ /* * AMD Secure Processor device driver * - * Copyright (C) 2014,2016 Advanced Micro Devices, Inc. + * Copyright (C) 2014,2018 Advanced Micro Devices, Inc. * * Author: Tom Lendacky * diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c index dd948e1df9e5..0ee1c52da0a4 100644 --- a/drivers/crypto/ccree/cc_buffer_mgr.c +++ b/drivers/crypto/ccree/cc_buffer_mgr.c @@ -156,8 +156,11 @@ static int cc_render_buff_to_mlli(struct device *dev, dma_addr_t buff_dma, /* Verify there is no memory overflow*/ new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1); - if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES) + if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES) { + dev_err(dev, "Too many mlli entries. current %d max %d\n", + new_nents, MAX_NUM_OF_TOTAL_MLLI_ENTRIES); return -ENOMEM; + } /*handle buffer longer than 64 kbytes */ while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) { @@ -511,10 +514,8 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx, /* Map the src SGL */ rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents); - if (rc) { - rc = -ENOMEM; + if (rc) goto cipher_exit; - } if (mapped_nents > 1) req_ctx->dma_buf_type = CC_DMA_BUF_MLLI; @@ -528,12 +529,11 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx, } } else { /* Map the dst sg */ - if (cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL, - &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, - &dummy, &mapped_nents)) { - rc = -ENOMEM; + rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL, + &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, + &dummy, &mapped_nents); + if (rc) goto cipher_exit; - } if (mapped_nents > 1) req_ctx->dma_buf_type = CC_DMA_BUF_MLLI; @@ -614,10 +614,10 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req) hw_iv_size, DMA_BIDIRECTIONAL); } - /*In case a pool was set, a table was - *allocated and should be released - */ - if (areq_ctx->mlli_params.curr_pool) { + /* Release pool */ + if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI || + areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) && + (areq_ctx->mlli_params.mlli_virt_addr)) { dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n", &areq_ctx->mlli_params.mlli_dma_addr, areq_ctx->mlli_params.mlli_virt_addr); @@ -1078,10 +1078,8 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata, &areq_ctx->dst.nents, LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes, &dst_mapped_nents); - if (rc) { - rc = -ENOMEM; + if (rc) goto chain_data_exit; - } } dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map, @@ -1235,11 +1233,10 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req) } areq_ctx->ccm_iv0_dma_addr = dma_addr; - if (cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config, - &sg_data, req->assoclen)) { - rc = -ENOMEM; + rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config, + &sg_data, req->assoclen); + if (rc) goto aead_map_failure; - } } if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) { @@ -1299,10 +1296,8 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req) (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + LLI_MAX_NUM_OF_DATA_ENTRIES), &dummy, &mapped_nents); - if (rc) { - rc = -ENOMEM; + if (rc) goto aead_map_failure; - } if (areq_ctx->is_single_pass) { /* @@ -1386,6 +1381,7 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx, struct mlli_params *mlli_params = &areq_ctx->mlli_params; struct buffer_array sg_data; struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle; + int rc = 0; u32 dummy = 0; u32 mapped_nents = 0; @@ -1405,18 +1401,18 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx, /*TODO: copy data in case that buffer is enough for operation */ /* map the previous buffer */ if (*curr_buff_cnt) { - if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt, - &sg_data)) { - return -ENOMEM; - } + rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt, + &sg_data); + if (rc) + return rc; } if (src && nbytes > 0 && do_update) { - if (cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE, - &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, - &dummy, &mapped_nents)) { + rc = cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE, + &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, + &dummy, &mapped_nents); + if (rc) goto unmap_curr_buff; - } if (src && mapped_nents == 1 && areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) { memcpy(areq_ctx->buff_sg, src, @@ -1435,7 +1431,8 @@ int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx, /* add the src data to the sg_data */ cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes, 0, true, &areq_ctx->mlli_nents); - if (cc_generate_mlli(dev, &sg_data, mlli_params, flags)) + rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags); + if (rc) goto fail_unmap_din; } /* change the buffer index for the unmap function */ @@ -1451,7 +1448,7 @@ unmap_curr_buff: if (*curr_buff_cnt) dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); - return -ENOMEM; + return rc; } int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx, @@ -1470,6 +1467,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx, struct buffer_array sg_data; struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle; unsigned int swap_index = 0; + int rc = 0; u32 dummy = 0; u32 mapped_nents = 0; @@ -1514,21 +1512,21 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx, } if (*curr_buff_cnt) { - if (cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt, - &sg_data)) { - return -ENOMEM; - } + rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt, + &sg_data); + if (rc) + return rc; /* change the buffer index for next operation */ swap_index = 1; } if (update_data_len > *curr_buff_cnt) { - if (cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt), - DMA_TO_DEVICE, &areq_ctx->in_nents, - LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, - &mapped_nents)) { + rc = cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt), + DMA_TO_DEVICE, &areq_ctx->in_nents, + LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, + &mapped_nents); + if (rc) goto unmap_curr_buff; - } if (mapped_nents == 1 && areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) { /* only one entry in the SG and no previous data */ @@ -1548,7 +1546,8 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx, cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, (update_data_len - *curr_buff_cnt), 0, true, &areq_ctx->mlli_nents); - if (cc_generate_mlli(dev, &sg_data, mlli_params, flags)) + rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags); + if (rc) goto fail_unmap_din; } areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index); @@ -1562,7 +1561,7 @@ unmap_curr_buff: if (*curr_buff_cnt) dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); - return -ENOMEM; + return rc; } void cc_unmap_hash_request(struct device *dev, void *ctx, diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c index cc92b031fad1..d9c17078517b 100644 --- a/drivers/crypto/ccree/cc_cipher.c +++ b/drivers/crypto/ccree/cc_cipher.c @@ -80,6 +80,7 @@ static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size) default: break; } + break; case S_DIN_to_DES: if (size == DES3_EDE_KEY_SIZE || size == DES_KEY_SIZE) return 0; @@ -352,7 +353,8 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key, dev_dbg(dev, "weak 3DES key"); return -EINVAL; } else if (!des_ekey(tmp, key) && - (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_WEAK_KEY)) { + (crypto_tfm_get_flags(tfm) & + CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; dev_dbg(dev, "weak DES key"); return -EINVAL; @@ -652,6 +654,8 @@ static void cc_cipher_complete(struct device *dev, void *cc_req, int err) unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm); unsigned int len; + cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst); + switch (ctx_p->cipher_mode) { case DRV_CIPHER_CBC: /* @@ -681,7 +685,6 @@ static void cc_cipher_complete(struct device *dev, void *cc_req, int err) break; } - cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst); kzfree(req_ctx->iv); skcipher_request_complete(req, err); @@ -799,7 +802,8 @@ static int cc_cipher_decrypt(struct skcipher_request *req) memset(req_ctx, 0, sizeof(*req_ctx)); - if (ctx_p->cipher_mode == DRV_CIPHER_CBC) { + if ((ctx_p->cipher_mode == DRV_CIPHER_CBC) && + (req->cryptlen >= ivsize)) { /* Allocate and save the last IV sized bytes of the source, * which will be lost in case of in-place decryption. diff --git a/drivers/crypto/ccree/cc_debugfs.c b/drivers/crypto/ccree/cc_debugfs.c index 5ca184e42483..5fa05a7bcf36 100644 --- a/drivers/crypto/ccree/cc_debugfs.c +++ b/drivers/crypto/ccree/cc_debugfs.c @@ -39,11 +39,9 @@ static struct debugfs_reg32 debug_regs[] = { CC_DEBUG_REG(AXIM_MON_COMP), }; -int __init cc_debugfs_global_init(void) +void __init cc_debugfs_global_init(void) { cc_debugfs_dir = debugfs_create_dir("ccree", NULL); - - return !cc_debugfs_dir; } void __exit cc_debugfs_global_fini(void) @@ -56,7 +54,6 @@ int cc_debugfs_init(struct cc_drvdata *drvdata) struct device *dev = drvdata_to_dev(drvdata); struct cc_debugfs_ctx *ctx; struct debugfs_regset32 *regset; - struct dentry *file; debug_regs[0].offset = drvdata->sig_offset; debug_regs[1].offset = drvdata->ver_offset; @@ -74,22 +71,9 @@ int cc_debugfs_init(struct cc_drvdata *drvdata) regset->base = drvdata->cc_base; ctx->dir = debugfs_create_dir(drvdata->plat_dev->name, cc_debugfs_dir); - if (!ctx->dir) - return -ENFILE; - - file = debugfs_create_regset32("regs", 0400, ctx->dir, regset); - if (!file) { - debugfs_remove(ctx->dir); - return -ENFILE; - } - file = debugfs_create_bool("coherent", 0400, ctx->dir, - &drvdata->coherent); - - if (!file) { - debugfs_remove_recursive(ctx->dir); - return -ENFILE; - } + debugfs_create_regset32("regs", 0400, ctx->dir, regset); + debugfs_create_bool("coherent", 0400, ctx->dir, &drvdata->coherent); drvdata->debugfs = ctx; diff --git a/drivers/crypto/ccree/cc_debugfs.h b/drivers/crypto/ccree/cc_debugfs.h index 5b5320eca7d2..01cbd9a95659 100644 --- a/drivers/crypto/ccree/cc_debugfs.h +++ b/drivers/crypto/ccree/cc_debugfs.h @@ -5,7 +5,7 @@ #define __CC_DEBUGFS_H__ #ifdef CONFIG_DEBUG_FS -int cc_debugfs_global_init(void); +void cc_debugfs_global_init(void); void cc_debugfs_global_fini(void); int cc_debugfs_init(struct cc_drvdata *drvdata); @@ -13,11 +13,7 @@ void cc_debugfs_fini(struct cc_drvdata *drvdata); #else -static inline int cc_debugfs_global_init(void) -{ - return 0; -} - +static inline void cc_debugfs_global_init(void) {} static inline void cc_debugfs_global_fini(void) {} static inline int cc_debugfs_init(struct cc_drvdata *drvdata) diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c index 8ada308d72ee..3bcc6c76e090 100644 --- a/drivers/crypto/ccree/cc_driver.c +++ b/drivers/crypto/ccree/cc_driver.c @@ -103,10 +103,10 @@ static irqreturn_t cc_isr(int irq, void *dev_id) /* read the interrupt status */ irr = cc_ioread(drvdata, CC_REG(HOST_IRR)); dev_dbg(dev, "Got IRR=0x%08X\n", irr); - if (irr == 0) { /* Probably shared interrupt line */ - dev_err(dev, "Got interrupt with empty IRR\n"); + + if (irr == 0) /* Probably shared interrupt line */ return IRQ_NONE; - } + imr = cc_ioread(drvdata, CC_REG(HOST_IMR)); /* clear interrupt - must be before processing events */ @@ -380,7 +380,7 @@ static int init_cc_resources(struct platform_device *plat_dev) rc = cc_ivgen_init(new_drvdata); if (rc) { dev_err(dev, "cc_ivgen_init failed\n"); - goto post_power_mgr_err; + goto post_buf_mgr_err; } /* Allocate crypto algs */ @@ -403,6 +403,9 @@ static int init_cc_resources(struct platform_device *plat_dev) goto post_hash_err; } + /* All set, we can allow autosuspend */ + cc_pm_go(new_drvdata); + /* If we got here and FIPS mode is enabled * it means all FIPS test passed, so let TEE * know we're good. @@ -417,8 +420,6 @@ post_cipher_err: cc_cipher_free(new_drvdata); post_ivgen_err: cc_ivgen_fini(new_drvdata); -post_power_mgr_err: - cc_pm_fini(new_drvdata); post_buf_mgr_err: cc_buffer_mgr_fini(new_drvdata); post_req_mgr_err: @@ -538,13 +539,8 @@ static struct platform_driver ccree_driver = { static int __init ccree_init(void) { - int ret; - cc_hash_global_init(); - - ret = cc_debugfs_global_init(); - if (ret) - return ret; + cc_debugfs_global_init(); return platform_driver_register(&ccree_driver); } diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h index 5be7fd431b05..33dbf3e6d15d 100644 --- a/drivers/crypto/ccree/cc_driver.h +++ b/drivers/crypto/ccree/cc_driver.h @@ -111,13 +111,11 @@ struct cc_crypto_req { * @cc_base: virt address of the CC registers * @irq: device IRQ number * @irq_mask: Interrupt mask shadow (1 for masked interrupts) - * @fw_ver: SeP loaded firmware version */ struct cc_drvdata { void __iomem *cc_base; int irq; u32 irq_mask; - u32 fw_ver; struct completion hw_queue_avail; /* wait for HW queue availability */ struct platform_device *plat_dev; cc_sram_addr_t mlli_sram_addr; diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c index d990f472e89f..6ff7e75ad90e 100644 --- a/drivers/crypto/ccree/cc_pm.c +++ b/drivers/crypto/ccree/cc_pm.c @@ -100,20 +100,19 @@ int cc_pm_put_suspend(struct device *dev) int cc_pm_init(struct cc_drvdata *drvdata) { - int rc = 0; struct device *dev = drvdata_to_dev(drvdata); /* must be before the enabling to avoid resdundent suspending */ pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT); pm_runtime_use_autosuspend(dev); /* activate the PM module */ - rc = pm_runtime_set_active(dev); - if (rc) - return rc; - /* enable the PM module*/ - pm_runtime_enable(dev); + return pm_runtime_set_active(dev); +} - return rc; +/* enable the PM module*/ +void cc_pm_go(struct cc_drvdata *drvdata) +{ + pm_runtime_enable(drvdata_to_dev(drvdata)); } void cc_pm_fini(struct cc_drvdata *drvdata) diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h index 020a5403c58b..907a6db4d6c0 100644 --- a/drivers/crypto/ccree/cc_pm.h +++ b/drivers/crypto/ccree/cc_pm.h @@ -16,6 +16,7 @@ extern const struct dev_pm_ops ccree_pm; int cc_pm_init(struct cc_drvdata *drvdata); +void cc_pm_go(struct cc_drvdata *drvdata); void cc_pm_fini(struct cc_drvdata *drvdata); int cc_pm_suspend(struct device *dev); int cc_pm_resume(struct device *dev); @@ -29,6 +30,8 @@ static inline int cc_pm_init(struct cc_drvdata *drvdata) return 0; } +static inline void cc_pm_go(struct cc_drvdata *drvdata) {} + static inline void cc_pm_fini(struct cc_drvdata *drvdata) {} static inline int cc_pm_suspend(struct device *dev) diff --git a/drivers/crypto/chelsio/Makefile b/drivers/crypto/chelsio/Makefile index 639e5718dff4..b7bd980a27d8 100644 --- a/drivers/crypto/chelsio/Makefile +++ b/drivers/crypto/chelsio/Makefile @@ -1,4 +1,4 @@ -ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4 +ccflags-y := -I $(srctree)/drivers/net/ethernet/chelsio/cxgb4 obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chcr.o chcr-objs := chcr_core.o chcr_algo.o diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index bcef76508dfa..8d8cf80b9294 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -1368,7 +1368,6 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req) static int chcr_device_init(struct chcr_context *ctx) { struct uld_ctx *u_ctx = NULL; - struct adapter *adap; unsigned int id; int txq_perchan, txq_idx, ntxq; int err = 0, rxq_perchan, rxq_idx; @@ -1382,7 +1381,6 @@ static int chcr_device_init(struct chcr_context *ctx) goto out; } ctx->dev = &u_ctx->dev; - adap = padap(ctx->dev); ntxq = u_ctx->lldi.ntxq; rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan; txq_perchan = ntxq / u_ctx->lldi.nchan; @@ -2762,7 +2760,7 @@ static int set_msg_len(u8 *block, unsigned int msglen, int csize) return 0; } -static void generate_b0(struct aead_request *req, u8 *ivptr, +static int generate_b0(struct aead_request *req, u8 *ivptr, unsigned short op_type) { unsigned int l, lp, m; @@ -2787,6 +2785,8 @@ static void generate_b0(struct aead_request *req, u8 *ivptr, rc = set_msg_len(b0 + 16 - l, (op_type == CHCR_DECRYPT_OP) ? req->cryptlen - m : req->cryptlen, l); + + return rc; } static inline int crypto_ccm_check_iv(const u8 *iv) @@ -2821,7 +2821,7 @@ static int ccm_format_packet(struct aead_request *req, *((unsigned short *)(reqctx->scratch_pad + 16)) = htons(assoclen); - generate_b0(req, ivptr, op_type); + rc = generate_b0(req, ivptr, op_type); /* zero the ctr value */ memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1); return rc; @@ -3676,9 +3676,9 @@ static int chcr_aead_op(struct aead_request *req, /* Form a WR from req */ skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size); - if (IS_ERR(skb) || !skb) { + if (IS_ERR_OR_NULL(skb)) { chcr_dec_wrcount(cdev); - return PTR_ERR(skb); + return PTR_ERR_OR_ZERO(skb); } skb->dev = u_ctx->lldi.ports[0]; diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h index 1159dee964ed..ad874d548aa5 100644 --- a/drivers/crypto/chelsio/chcr_core.h +++ b/drivers/crypto/chelsio/chcr_core.h @@ -183,7 +183,7 @@ struct chcr_ipsec_aadiv { struct ipsec_sa_entry { int hmac_ctrl; u16 esn; - u16 imm; + u16 resv; unsigned int enckey_len; unsigned int kctx_len; unsigned int authsize; diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c index 2fb48cce4462..2f60049361ef 100644 --- a/drivers/crypto/chelsio/chcr_ipsec.c +++ b/drivers/crypto/chelsio/chcr_ipsec.c @@ -303,6 +303,9 @@ static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x) if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) return false; } + /* Inline single pdu */ + if (skb_shinfo(skb)->gso_size) + return false; return true; } @@ -333,7 +336,8 @@ static inline int is_eth_imm(const struct sk_buff *skb, } static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb, - struct ipsec_sa_entry *sa_entry) + struct ipsec_sa_entry *sa_entry, + bool *immediate) { unsigned int kctx_len; unsigned int flits; @@ -351,8 +355,10 @@ static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb, * TX Packet header plus the skb data in the Work Request. */ - if (hdrlen) + if (hdrlen) { + *immediate = true; return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64)); + } flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); @@ -415,12 +421,12 @@ inline void *copy_esn_pktxt(struct sk_buff *skb, iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr); memcpy(aadiv->iv, iv, 8); - if (sa_entry->imm) { + if (is_eth_imm(skb, sa_entry) && !skb_is_nonlinear(skb)) { sc_imm = (struct ulptx_idata *)(pos + (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), sizeof(__be64)) << 3)); - sc_imm->cmd_more = FILL_CMD_MORE(!sa_entry->imm); - sc_imm->len = cpu_to_be32(sa_entry->imm); + sc_imm->cmd_more = FILL_CMD_MORE(0); + sc_imm->len = cpu_to_be32(skb->len); } pos += len; return pos; @@ -528,15 +534,18 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb, struct adapter *adap = pi->adapter; unsigned int ivsize = GCM_ESP_IV_SIZE; struct chcr_ipsec_wr *wr; + bool immediate = false; u16 immdatalen = 0; unsigned int flits; u32 ivinoffset; u32 aadstart; u32 aadstop; u32 ciphstart; + u16 sc_more = 0; u32 ivdrop = 0; u32 esnlen = 0; u32 wr_mid; + u16 ndesc; int qidx = skb_get_queue_mapping(skb); struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset]; unsigned int kctx_len = sa_entry->kctx_len; @@ -544,22 +553,24 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb, atomic_inc(&adap->chcr_stats.ipsec_cnt); - flits = calc_tx_sec_flits(skb, sa_entry); + flits = calc_tx_sec_flits(skb, sa_entry, &immediate); + ndesc = DIV_ROUND_UP(flits, 2); if (sa_entry->esn) ivdrop = 1; - if (is_eth_imm(skb, sa_entry)) { + if (immediate) immdatalen = skb->len; - sa_entry->imm = immdatalen; - } - if (sa_entry->esn) + if (sa_entry->esn) { esnlen = sizeof(struct chcr_ipsec_aadiv); + if (!skb_is_nonlinear(skb)) + sc_more = 1; + } /* WR Header */ wr = (struct chcr_ipsec_wr *)pos; wr->wreq.op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR)); - wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(flits, 2)); + wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(ndesc); if (unlikely(credits < ETHTXQ_STOP_THRES)) { netif_tx_stop_queue(q->txq); @@ -571,10 +582,10 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb, /* ULPTX */ wr->req.ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(pi->port_id, qid); - wr->req.ulptx.len = htonl(DIV_ROUND_UP(flits, 2) - 1); + wr->req.ulptx.len = htonl(ndesc - 1); /* Sub-command */ - wr->req.sc_imm.cmd_more = FILL_CMD_MORE(!immdatalen); + wr->req.sc_imm.cmd_more = FILL_CMD_MORE(!immdatalen || sc_more); wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + sizeof(wr->req.key_ctx) + kctx_len + @@ -697,7 +708,7 @@ out_free: dev_kfree_skb_any(skb); cxgb4_reclaim_completed_tx(adap, &q->q, true); - flits = calc_tx_sec_flits(skb, sa_entry); + flits = calc_tx_sec_flits(skb, sa_entry, &immediate); ndesc = flits_to_desc(flits); credits = txq_avail(&q->q) - ndesc; @@ -710,9 +721,6 @@ out_free: dev_kfree_skb_any(skb); return NETDEV_TX_BUSY; } - if (is_eth_imm(skb, sa_entry)) - immediate = true; - if (!immediate && unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) { q->mapping_err++; diff --git a/drivers/crypto/chelsio/chtls/Makefile b/drivers/crypto/chelsio/chtls/Makefile index df1379570a8e..b958f1b8ec39 100644 --- a/drivers/crypto/chelsio/chtls/Makefile +++ b/drivers/crypto/chelsio/chtls/Makefile @@ -1,4 +1,5 @@ -ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4 -Idrivers/crypto/chelsio/ +ccflags-y := -I $(srctree)/drivers/net/ethernet/chelsio/cxgb4 \ + -I $(srctree)/drivers/crypto/chelsio obj-$(CONFIG_CRYPTO_DEV_CHELSIO_TLS) += chtls.o chtls-objs := chtls_main.o chtls_cm.o chtls_io.o chtls_hw.o diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c index 59b75299fcbc..4e22332496c5 100644 --- a/drivers/crypto/chelsio/chtls/chtls_cm.c +++ b/drivers/crypto/chelsio/chtls/chtls_cm.c @@ -24,6 +24,7 @@ #include #include #include +#include #include "chtls.h" #include "chtls_cm.h" @@ -615,7 +616,7 @@ int chtls_listen_start(struct chtls_dev *cdev, struct sock *sk) pi = netdev_priv(ndev); adap = pi->adapter; - if (!(adap->flags & FULL_INIT_DONE)) + if (!(adap->flags & CXGB4_FULL_INIT_DONE)) return -EBADF; if (listen_hash_find(cdev, sk) >= 0) /* already have it */ @@ -1015,6 +1016,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk, { struct inet_sock *newinet; const struct iphdr *iph; + struct tls_context *ctx; struct net_device *ndev; struct chtls_sock *csk; struct dst_entry *dst; @@ -1063,6 +1065,8 @@ static struct sock *chtls_recv_sock(struct sock *lsk, oreq->ts_recent = PASS_OPEN_TID_G(ntohl(req->tos_stid)); sk_setup_caps(newsk, dst); + ctx = tls_get_ctx(lsk); + newsk->sk_destruct = ctx->sk_destruct; csk->sk = newsk; csk->passive_reap_next = oreq; csk->tx_chan = cxgb4_port_chan(ndev); diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c index 18f553fcc167..1285a1bceda7 100644 --- a/drivers/crypto/chelsio/chtls/chtls_io.c +++ b/drivers/crypto/chelsio/chtls/chtls_io.c @@ -922,14 +922,13 @@ static int csk_wait_memory(struct chtls_dev *cdev, struct sock *sk, long *timeo_p) { DEFINE_WAIT_FUNC(wait, woken_wake_function); - int sndbuf, err = 0; + int err = 0; long current_timeo; long vm_wait = 0; bool noblock; current_timeo = *timeo_p; noblock = (*timeo_p ? false : true); - sndbuf = cdev->max_host_sndbuf; if (csk_mem_free(cdev, sk)) { current_timeo = (prandom_u32() % (HZ / 5)) + 2; vm_wait = (prandom_u32() % (HZ / 5)) + 2; @@ -1401,23 +1400,18 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, int flags, int *addr_len) { struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); - struct net_device *dev = csk->egress_dev; struct chtls_hws *hws = &csk->tlshws; struct tcp_sock *tp = tcp_sk(sk); - struct adapter *adap; unsigned long avail; int buffers_freed; int copied = 0; - int request; int target; long timeo; - adap = netdev2adap(dev); buffers_freed = 0; timeo = sock_rcvtimeo(sk, nonblock); target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); - request = len; if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND))) chtls_cleanup_rbuf(sk, copied); @@ -1694,11 +1688,9 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, { struct tcp_sock *tp = tcp_sk(sk); struct chtls_sock *csk; - struct chtls_hws *hws; unsigned long avail; /* amount of available data in current skb */ int buffers_freed; int copied = 0; - int request; long timeo; int target; /* Read at least this many bytes */ @@ -1718,7 +1710,6 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, lock_sock(sk); csk = rcu_dereference_sk_user_data(sk); - hws = &csk->tlshws; if (is_tls_rx(csk)) return chtls_pt_recvmsg(sk, msg, len, nonblock, @@ -1726,7 +1717,6 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, timeo = sock_rcvtimeo(sk, nonblock); target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); - request = len; if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND))) chtls_cleanup_rbuf(sk, copied); diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c index 563f8fe7686a..dd2daf2a54e0 100644 --- a/drivers/crypto/chelsio/chtls/chtls_main.c +++ b/drivers/crypto/chelsio/chtls/chtls_main.c @@ -30,7 +30,6 @@ */ static LIST_HEAD(cdev_list); static DEFINE_MUTEX(cdev_mutex); -static DEFINE_MUTEX(cdev_list_lock); static DEFINE_MUTEX(notify_mutex); static RAW_NOTIFIER_HEAD(listen_notify_list); diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c index a5a36fe7bf2c..dad212cabe63 100644 --- a/drivers/crypto/hifn_795x.c +++ b/drivers/crypto/hifn_795x.c @@ -1961,7 +1961,8 @@ static int hifn_setkey(struct crypto_ablkcipher *cipher, const u8 *key, u32 tmp[DES_EXPKEY_WORDS]; int ret = des_ekey(tmp, key); - if (unlikely(ret == 0) && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + if (unlikely(ret == 0) && + (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index d531c14020dc..7ef30a98cb24 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -940,7 +940,7 @@ static int safexcel_des_setkey(struct crypto_skcipher *ctfm, const u8 *key, } ret = des_ekey(tmp, key); - if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 1b0d156bb9be..5c4659b04d70 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -847,7 +847,7 @@ static int ablk_setkey(struct crypto_ablkcipher *tfm, const u8 *key, goto out; if (*flags & CRYPTO_TFM_RES_WEAK_KEY) { - if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) { + if (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) { ret = -EINVAL; } else { *flags &= ~CRYPTO_TFM_RES_WEAK_KEY; @@ -1125,7 +1125,7 @@ static int aead_setup(struct crypto_aead *tfm, unsigned int authsize) goto out; if (*flags & CRYPTO_TFM_RES_WEAK_KEY) { - if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) { + if (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) { ret = -EINVAL; goto out; } else { diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c index 0ae84ec9e21c..fb279b3a1ca1 100644 --- a/drivers/crypto/marvell/cipher.c +++ b/drivers/crypto/marvell/cipher.c @@ -286,7 +286,7 @@ static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key, } ret = des_ekey(tmp, key); - if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } @@ -322,7 +322,6 @@ static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req, struct mv_cesa_skcipher_dma_iter iter; bool skip_ctx = false; int ret; - unsigned int ivsize; basereq->chain.first = NULL; basereq->chain.last = NULL; @@ -381,7 +380,6 @@ static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req, } while (mv_cesa_skcipher_req_iter_next_op(&iter)); /* Add output data for IV */ - ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(req)); ret = mv_cesa_dma_add_result_op(&basereq->chain, CESA_SA_CFG_SRAM_OFFSET, CESA_SA_DATA_SRAM_OFFSET, CESA_TDMA_SRC_IN_SRAM, flags); diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 55f34cfc43ff..9450c41211b2 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -772,7 +772,7 @@ static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, } err = des_ekey(tmp, key); - if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c index 6369019219d4..1ba2633e90d6 100644 --- a/drivers/crypto/omap-des.c +++ b/drivers/crypto/omap-des.c @@ -662,7 +662,7 @@ static int omap_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, pr_debug("enter, keylen: %d\n", keylen); /* Do we need to test against weak key? */ - if (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY) { + if (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) { u32 tmp[DES_EXPKEY_WORDS]; int ret = des_ekey(tmp, key); diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index 17068b55fea5..1b3acdeffede 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c @@ -759,7 +759,8 @@ static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, } if (unlikely(!des_ekey(tmp, key)) && - (crypto_ablkcipher_get_flags(cipher) & CRYPTO_TFM_REQ_WEAK_KEY)) { + (crypto_ablkcipher_get_flags(cipher) & + CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } diff --git a/drivers/crypto/qat/qat_c3xxx/Makefile b/drivers/crypto/qat/qat_c3xxx/Makefile index 8f5fd4838a96..822b5de58ec6 100644 --- a/drivers/crypto/qat/qat_c3xxx/Makefile +++ b/drivers/crypto/qat/qat_c3xxx/Makefile @@ -1,3 +1,3 @@ -ccflags-y := -I$(src)/../qat_common +ccflags-y := -I $(srctree)/$(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx.o qat_c3xxx-objs := adf_drv.o adf_c3xxx_hw_data.o diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/qat/qat_c3xxx/adf_drv.c index 763c2166ee0e..d937cc7248a5 100644 --- a/drivers/crypto/qat/qat_c3xxx/adf_drv.c +++ b/drivers/crypto/qat/qat_c3xxx/adf_drv.c @@ -193,11 +193,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) PCI_FUNC(pdev->devfn)); accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); - if (!accel_dev->debugfs_dir) { - dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name); - ret = -EINVAL; - goto out_err; - } /* Create device configuration table */ ret = adf_cfg_dev_add(accel_dev); diff --git a/drivers/crypto/qat/qat_c3xxxvf/Makefile b/drivers/crypto/qat/qat_c3xxxvf/Makefile index 16d178e2eaa2..8f56d27c7479 100644 --- a/drivers/crypto/qat/qat_c3xxxvf/Makefile +++ b/drivers/crypto/qat/qat_c3xxxvf/Makefile @@ -1,3 +1,3 @@ -ccflags-y := -I$(src)/../qat_common +ccflags-y := -I $(srctree)/$(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf.o qat_c3xxxvf-objs := adf_drv.o adf_c3xxxvf_hw_data.o diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c index 613c7d5644ce..1dc5ac859f7b 100644 --- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c +++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c @@ -177,11 +177,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) PCI_FUNC(pdev->devfn)); accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); - if (!accel_dev->debugfs_dir) { - dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name); - ret = -EINVAL; - goto out_err; - } /* Create device configuration table */ ret = adf_cfg_dev_add(accel_dev); diff --git a/drivers/crypto/qat/qat_c62x/Makefile b/drivers/crypto/qat/qat_c62x/Makefile index bd75ace59b76..6dcd404578fc 100644 --- a/drivers/crypto/qat/qat_c62x/Makefile +++ b/drivers/crypto/qat/qat_c62x/Makefile @@ -1,3 +1,3 @@ -ccflags-y := -I$(src)/../qat_common +ccflags-y := -I $(srctree)/$(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x.o qat_c62x-objs := adf_drv.o adf_c62x_hw_data.o diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c index 9cb832963357..2bc06c89d2fe 100644 --- a/drivers/crypto/qat/qat_c62x/adf_drv.c +++ b/drivers/crypto/qat/qat_c62x/adf_drv.c @@ -193,11 +193,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) PCI_FUNC(pdev->devfn)); accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); - if (!accel_dev->debugfs_dir) { - dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name); - ret = -EINVAL; - goto out_err; - } /* Create device configuration table */ ret = adf_cfg_dev_add(accel_dev); diff --git a/drivers/crypto/qat/qat_c62xvf/Makefile b/drivers/crypto/qat/qat_c62xvf/Makefile index ecd708c213b2..1e5d51de778f 100644 --- a/drivers/crypto/qat/qat_c62xvf/Makefile +++ b/drivers/crypto/qat/qat_c62xvf/Makefile @@ -1,3 +1,3 @@ -ccflags-y := -I$(src)/../qat_common +ccflags-y := -I $(srctree)/$(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf.o qat_c62xvf-objs := adf_drv.o adf_c62xvf_hw_data.o diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c index 278452b8ef81..a68358b31292 100644 --- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c +++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c @@ -177,11 +177,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) PCI_FUNC(pdev->devfn)); accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); - if (!accel_dev->debugfs_dir) { - dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name); - ret = -EINVAL; - goto out_err; - } /* Create device configuration table */ ret = adf_cfg_dev_add(accel_dev); diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c index d0879790561f..5c7fdb0fc53d 100644 --- a/drivers/crypto/qat/qat_common/adf_cfg.c +++ b/drivers/crypto/qat/qat_common/adf_cfg.c @@ -141,13 +141,6 @@ int adf_cfg_dev_add(struct adf_accel_dev *accel_dev) accel_dev->debugfs_dir, dev_cfg_data, &qat_dev_cfg_fops); - if (!dev_cfg_data->debug) { - dev_err(&GET_DEV(accel_dev), - "Failed to create qat cfg debugfs entry.\n"); - kfree(dev_cfg_data); - accel_dev->cfg = NULL; - return -EFAULT; - } return 0; } EXPORT_SYMBOL_GPL(adf_cfg_dev_add); diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c index 57d2622728a5..2136cbe4bf6c 100644 --- a/drivers/crypto/qat/qat_common/adf_transport.c +++ b/drivers/crypto/qat/qat_common/adf_transport.c @@ -486,12 +486,6 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev) /* accel_dev->debugfs_dir should always be non-NULL here */ etr_data->debug = debugfs_create_dir("transport", accel_dev->debugfs_dir); - if (!etr_data->debug) { - dev_err(&GET_DEV(accel_dev), - "Unable to create transport debugfs entry\n"); - ret = -ENOENT; - goto err_bank_debug; - } for (i = 0; i < num_banks; i++) { ret = adf_init_bank(accel_dev, &etr_data->banks[i], i, @@ -504,7 +498,6 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev) err_bank_all: debugfs_remove(etr_data->debug); -err_bank_debug: kfree(etr_data->banks); err_bank: kfree(etr_data); diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/qat/qat_common/adf_transport_debug.c index 52340b9bb387..e794e9d97b2c 100644 --- a/drivers/crypto/qat/qat_common/adf_transport_debug.c +++ b/drivers/crypto/qat/qat_common/adf_transport_debug.c @@ -163,11 +163,6 @@ int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name) ring_debug->debug = debugfs_create_file(entry_name, S_IRUSR, ring->bank->bank_debug_dir, ring, &adf_ring_debug_fops); - if (!ring_debug->debug) { - pr_err("QAT: Failed to create ring debug entry.\n"); - kfree(ring_debug); - return -EFAULT; - } ring->ring_debug = ring_debug; return 0; } @@ -271,19 +266,9 @@ int adf_bank_debugfs_add(struct adf_etr_bank_data *bank) snprintf(name, sizeof(name), "bank_%02d", bank->bank_number); bank->bank_debug_dir = debugfs_create_dir(name, parent); - if (!bank->bank_debug_dir) { - pr_err("QAT: Failed to create bank debug dir.\n"); - return -EFAULT; - } - bank->bank_debug_cfg = debugfs_create_file("config", S_IRUSR, bank->bank_debug_dir, bank, &adf_bank_debug_fops); - if (!bank->bank_debug_cfg) { - pr_err("QAT: Failed to create bank debug entry.\n"); - debugfs_remove(bank->bank_debug_dir); - return -EFAULT; - } return 0; } diff --git a/drivers/crypto/qat/qat_dh895xcc/Makefile b/drivers/crypto/qat/qat_dh895xcc/Makefile index 180a00ed7f89..0fc06b1e1632 100644 --- a/drivers/crypto/qat/qat_dh895xcc/Makefile +++ b/drivers/crypto/qat/qat_dh895xcc/Makefile @@ -1,3 +1,3 @@ -ccflags-y := -I$(src)/../qat_common +ccflags-y := -I $(srctree)/$(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o qat_dh895xcc-objs := adf_drv.o adf_dh895xcc_hw_data.o diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c index 3a9708ef4ce2..b11bf8c0e683 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c @@ -193,11 +193,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) PCI_FUNC(pdev->devfn)); accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); - if (!accel_dev->debugfs_dir) { - dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name); - ret = -EINVAL; - goto out_err; - } /* Create device configuration table */ ret = adf_cfg_dev_add(accel_dev); diff --git a/drivers/crypto/qat/qat_dh895xccvf/Makefile b/drivers/crypto/qat/qat_dh895xccvf/Makefile index 5c3ccf8267eb..9ce906af6034 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/Makefile +++ b/drivers/crypto/qat/qat_dh895xccvf/Makefile @@ -1,3 +1,3 @@ -ccflags-y := -I$(src)/../qat_common +ccflags-y := -I $(srctree)/$(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o qat_dh895xccvf-objs := adf_drv.o adf_dh895xccvf_hw_data.o diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c index 3da0f951cb59..1b762eefc6c1 100644 --- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c +++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c @@ -177,11 +177,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) PCI_FUNC(pdev->devfn)); accel_dev->debugfs_dir = debugfs_create_dir(name, NULL); - if (!accel_dev->debugfs_dir) { - dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name); - ret = -EINVAL; - goto out_err; - } /* Create device configuration table */ ret = adf_cfg_dev_add(accel_dev); diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c index 25c13e26d012..154b6baa124e 100644 --- a/drivers/crypto/qce/ablkcipher.c +++ b/drivers/crypto/qce/ablkcipher.c @@ -180,8 +180,8 @@ static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key, u32 tmp[DES_EXPKEY_WORDS]; ret = des_ekey(tmp, key); - if (!ret && crypto_ablkcipher_get_flags(ablk) & - CRYPTO_TFM_REQ_WEAK_KEY) + if (!ret && (crypto_ablkcipher_get_flags(ablk) & + CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) goto weakkey; } diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c index c9d622abd90c..0ce4a65b95f5 100644 --- a/drivers/crypto/rockchip/rk3288_crypto.c +++ b/drivers/crypto/rockchip/rk3288_crypto.c @@ -119,7 +119,7 @@ static int rk_load_data(struct rk_crypto_info *dev, count = (dev->left_bytes > PAGE_SIZE) ? PAGE_SIZE : dev->left_bytes; - if (!sg_pcopy_to_buffer(dev->first, dev->nents, + if (!sg_pcopy_to_buffer(dev->first, dev->src_nents, dev->addr_vir, count, dev->total - dev->left_bytes)) { dev_err(dev->dev, "[%s:%d] pcopy err\n", diff --git a/drivers/crypto/rockchip/rk3288_crypto.h b/drivers/crypto/rockchip/rk3288_crypto.h index d5fb4013fb42..54ee5b3ed9db 100644 --- a/drivers/crypto/rockchip/rk3288_crypto.h +++ b/drivers/crypto/rockchip/rk3288_crypto.h @@ -207,7 +207,8 @@ struct rk_crypto_info { void *addr_vir; int aligned; int align_size; - size_t nents; + size_t src_nents; + size_t dst_nents; unsigned int total; unsigned int count; dma_addr_t addr_in; @@ -244,6 +245,7 @@ struct rk_cipher_ctx { struct rk_crypto_info *dev; unsigned int keylen; u32 mode; + u8 iv[AES_BLOCK_SIZE]; }; enum alg_type { diff --git a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c index 639c15c5364b..02dac6ae7e53 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c +++ b/drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c @@ -60,7 +60,7 @@ static int rk_tdes_setkey(struct crypto_ablkcipher *cipher, if (keylen == DES_KEY_SIZE) { if (!des_ekey(tmp, key) && - (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; return -EINVAL; } @@ -242,6 +242,17 @@ static void crypto_dma_start(struct rk_crypto_info *dev) static int rk_set_data_start(struct rk_crypto_info *dev) { int err; + struct ablkcipher_request *req = + ablkcipher_request_cast(dev->async_req); + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); + u32 ivsize = crypto_ablkcipher_ivsize(tfm); + u8 *src_last_blk = page_address(sg_page(dev->sg_src)) + + dev->sg_src->offset + dev->sg_src->length - ivsize; + + /* store the iv that need to be updated in chain mode */ + if (ctx->mode & RK_CRYPTO_DEC) + memcpy(ctx->iv, src_last_blk, ivsize); err = dev->load_data(dev, dev->sg_src, dev->sg_dst); if (!err) @@ -260,8 +271,9 @@ static int rk_ablk_start(struct rk_crypto_info *dev) dev->total = req->nbytes; dev->sg_src = req->src; dev->first = req->src; - dev->nents = sg_nents(req->src); + dev->src_nents = sg_nents(req->src); dev->sg_dst = req->dst; + dev->dst_nents = sg_nents(req->dst); dev->aligned = 1; spin_lock_irqsave(&dev->lock, flags); @@ -285,6 +297,28 @@ static void rk_iv_copyback(struct rk_crypto_info *dev) memcpy_fromio(req->info, dev->reg + RK_CRYPTO_AES_IV_0, ivsize); } +static void rk_update_iv(struct rk_crypto_info *dev) +{ + struct ablkcipher_request *req = + ablkcipher_request_cast(dev->async_req); + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); + u32 ivsize = crypto_ablkcipher_ivsize(tfm); + u8 *new_iv = NULL; + + if (ctx->mode & RK_CRYPTO_DEC) { + new_iv = ctx->iv; + } else { + new_iv = page_address(sg_page(dev->sg_dst)) + + dev->sg_dst->offset + dev->sg_dst->length - ivsize; + } + + if (ivsize == DES_BLOCK_SIZE) + memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, new_iv, ivsize); + else if (ivsize == AES_BLOCK_SIZE) + memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, new_iv, ivsize); +} + /* return: * true some err was occurred * fault no err, continue @@ -297,7 +331,7 @@ static int rk_ablk_rx(struct rk_crypto_info *dev) dev->unload_data(dev); if (!dev->aligned) { - if (!sg_pcopy_from_buffer(req->dst, dev->nents, + if (!sg_pcopy_from_buffer(req->dst, dev->dst_nents, dev->addr_vir, dev->count, dev->total - dev->left_bytes - dev->count)) { @@ -306,6 +340,7 @@ static int rk_ablk_rx(struct rk_crypto_info *dev) } } if (dev->left_bytes) { + rk_update_iv(dev); if (dev->aligned) { if (sg_is_last(dev->sg_src)) { dev_err(dev->dev, "[%s:%d] Lack of data\n", diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c index 821a506b9e17..c336ae75e361 100644 --- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c +++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c @@ -206,7 +206,7 @@ static int rk_ahash_start(struct rk_crypto_info *dev) dev->sg_dst = NULL; dev->sg_src = req->src; dev->first = req->src; - dev->nents = sg_nents(req->src); + dev->src_nents = sg_nents(req->src); rctx = ahash_request_ctx(req); rctx->mode = 0; diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index 0064be0e3941..1afdcb81d8ed 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c @@ -232,6 +232,7 @@ * struct samsung_aes_variant - platform specific SSS driver data * @aes_offset: AES register offset from SSS module's base. * @hash_offset: HASH register offset from SSS module's base. + * @clk_names: names of clocks needed to run SSS IP * * Specifies platform specific configuration of SSS module. * Note: A structure for driver specific platform data is used for future @@ -240,6 +241,7 @@ struct samsung_aes_variant { unsigned int aes_offset; unsigned int hash_offset; + const char *clk_names[2]; }; struct s5p_aes_reqctx { @@ -296,6 +298,7 @@ struct s5p_aes_ctx { struct s5p_aes_dev { struct device *dev; struct clk *clk; + struct clk *pclk; void __iomem *ioaddr; void __iomem *aes_ioaddr; int irq_fc; @@ -384,11 +387,19 @@ struct s5p_hash_ctx { static const struct samsung_aes_variant s5p_aes_data = { .aes_offset = 0x4000, .hash_offset = 0x6000, + .clk_names = { "secss", }, }; static const struct samsung_aes_variant exynos_aes_data = { .aes_offset = 0x200, .hash_offset = 0x400, + .clk_names = { "secss", }, +}; + +static const struct samsung_aes_variant exynos5433_slim_aes_data = { + .aes_offset = 0x400, + .hash_offset = 0x800, + .clk_names = { "pclk", "aclk", }, }; static const struct of_device_id s5p_sss_dt_match[] = { @@ -400,6 +411,10 @@ static const struct of_device_id s5p_sss_dt_match[] = { .compatible = "samsung,exynos4210-secss", .data = &exynos_aes_data, }, + { + .compatible = "samsung,exynos5433-slim-sss", + .data = &exynos5433_slim_aes_data, + }, { }, }; MODULE_DEVICE_TABLE(of, s5p_sss_dt_match); @@ -463,6 +478,9 @@ static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg, static void s5p_sg_done(struct s5p_aes_dev *dev) { + struct ablkcipher_request *req = dev->req; + struct s5p_aes_reqctx *reqctx = ablkcipher_request_ctx(req); + if (dev->sg_dst_cpy) { dev_dbg(dev->dev, "Copying %d bytes of output data back to original place\n", @@ -472,6 +490,11 @@ static void s5p_sg_done(struct s5p_aes_dev *dev) } s5p_free_sg_cpy(dev, &dev->sg_src_cpy); s5p_free_sg_cpy(dev, &dev->sg_dst_cpy); + if (reqctx->mode & FLAGS_AES_CBC) + memcpy_fromio(req->info, dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), AES_BLOCK_SIZE); + + else if (reqctx->mode & FLAGS_AES_CTR) + memcpy_fromio(req->info, dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), AES_BLOCK_SIZE); } /* Calls the completion. Cannot be called with dev->lock hold. */ @@ -1819,10 +1842,12 @@ static void s5p_set_aes(struct s5p_aes_dev *dev, void __iomem *keystart; if (iv) - memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10); + memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, + AES_BLOCK_SIZE); if (ctr) - memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), ctr, 0x10); + memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), ctr, + AES_BLOCK_SIZE); if (keylen == AES_KEYSIZE_256) keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0); @@ -2208,18 +2233,39 @@ static int s5p_aes_probe(struct platform_device *pdev) return PTR_ERR(pdata->ioaddr); } - pdata->clk = devm_clk_get(dev, "secss"); + pdata->clk = devm_clk_get(dev, variant->clk_names[0]); if (IS_ERR(pdata->clk)) { - dev_err(dev, "failed to find secss clock source\n"); + dev_err(dev, "failed to find secss clock %s\n", + variant->clk_names[0]); return -ENOENT; } err = clk_prepare_enable(pdata->clk); if (err < 0) { - dev_err(dev, "Enabling SSS clk failed, err %d\n", err); + dev_err(dev, "Enabling clock %s failed, err %d\n", + variant->clk_names[0], err); return err; } + if (variant->clk_names[1]) { + pdata->pclk = devm_clk_get(dev, variant->clk_names[1]); + if (IS_ERR(pdata->pclk)) { + dev_err(dev, "failed to find clock %s\n", + variant->clk_names[1]); + err = -ENOENT; + goto err_clk; + } + + err = clk_prepare_enable(pdata->pclk); + if (err < 0) { + dev_err(dev, "Enabling clock %s failed, err %d\n", + variant->clk_names[0], err); + goto err_clk; + } + } else { + pdata->pclk = NULL; + } + spin_lock_init(&pdata->lock); spin_lock_init(&pdata->hash_lock); @@ -2295,8 +2341,11 @@ err_algs: tasklet_kill(&pdata->tasklet); err_irq: - clk_disable_unprepare(pdata->clk); + if (pdata->pclk) + clk_disable_unprepare(pdata->pclk); +err_clk: + clk_disable_unprepare(pdata->clk); s5p_dev = NULL; return err; @@ -2323,6 +2372,9 @@ static int s5p_aes_remove(struct platform_device *pdev) pdata->use_hash = false; } + if (pdata->pclk) + clk_disable_unprepare(pdata->pclk); + clk_disable_unprepare(pdata->clk); s5p_dev = NULL; diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index 590d7352837e..4a6cc8a3045d 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c @@ -1564,7 +1564,7 @@ err_engine: static int stm32_hash_remove(struct platform_device *pdev) { - static struct stm32_hash_dev *hdev; + struct stm32_hash_dev *hdev; int ret; hdev = platform_get_drvdata(pdev); diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c index 5cf64746731a..54fd714d53ca 100644 --- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c +++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c @@ -517,7 +517,7 @@ int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key, flags = crypto_skcipher_get_flags(tfm); ret = des_ekey(tmp, key); - if (unlikely(!ret) && (flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + if (unlikely(!ret) && (flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY); dev_dbg(ss->dev, "Weak key %u\n", keylen); return -EINVAL; diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index f8e2c5c3f4eb..de78b54bcfb1 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -1535,7 +1535,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, } if (unlikely(crypto_ablkcipher_get_flags(cipher) & - CRYPTO_TFM_REQ_WEAK_KEY) && + CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) && !des_ekey(tmp, key)) { crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY); return -EINVAL; diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index a92a66b1ff46..3235611928f2 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c @@ -595,6 +595,12 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx, } cookie = dmaengine_submit(desc); + if (dma_submit_error(cookie)) { + dev_dbg(ctx->device->dev, "[%s]: DMA submission failed\n", + __func__); + return cookie; + } + dma_async_issue_pending(channel); return 0; @@ -994,10 +1000,11 @@ static int des_ablkcipher_setkey(struct crypto_ablkcipher *cipher, } ret = des_ekey(tmp, key); - if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + if (unlikely(ret == 0) && + (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { *flags |= CRYPTO_TFM_RES_WEAK_KEY; - pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY", - __func__); + pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_WEAK_KEY", + __func__); return -EINVAL; } @@ -1028,18 +1035,19 @@ static int des3_ablkcipher_setkey(struct crypto_ablkcipher *cipher, /* Checking key interdependency for weak key detection. */ if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) || !((K[2] ^ K[4]) | (K[3] ^ K[5]))) && - (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { *flags |= CRYPTO_TFM_RES_WEAK_KEY; - pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_REQ_WEAK_KEY", - __func__); + pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_WEAK_KEY", + __func__); return -EINVAL; } for (i = 0; i < 3; i++) { ret = des_ekey(tmp, key + i*DES_KEY_SIZE); - if (unlikely(ret == 0) && (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + if (unlikely(ret == 0) && + (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { *flags |= CRYPTO_TFM_RES_WEAK_KEY; - pr_debug(DEV_DBG_NAME " [%s]: " - "CRYPTO_TFM_REQ_WEAK_KEY", __func__); + pr_debug(DEV_DBG_NAME " [%s]: CRYPTO_TFM_RES_WEAK_KEY", + __func__); return -EINVAL; } } diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c index 2c573d1aaa64..0704833ece92 100644 --- a/drivers/crypto/virtio/virtio_crypto_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_algs.c @@ -406,7 +406,7 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req, } else { req_data->header.session_id = cpu_to_le64(ctx->dec_sess_info.session_id); - req_data->header.opcode = + req_data->header.opcode = cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT); } req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER); diff --git a/drivers/dax/super.c b/drivers/dax/super.c index 6e928f37d084..0cb8c30ea278 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -86,12 +86,14 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize) { struct dax_device *dax_dev; bool dax_enabled = false; + pgoff_t pgoff, pgoff_end; struct request_queue *q; - pgoff_t pgoff; - int err, id; - pfn_t pfn; - long len; char buf[BDEVNAME_SIZE]; + void *kaddr, *end_kaddr; + pfn_t pfn, end_pfn; + sector_t last_page; + long len, len2; + int err, id; if (blocksize != PAGE_SIZE) { pr_debug("%s: error: unsupported blocksize for dax\n", @@ -113,6 +115,14 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize) return false; } + last_page = PFN_DOWN(i_size_read(bdev->bd_inode) - 1) * 8; + err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end); + if (err) { + pr_debug("%s: error: unaligned partition for dax\n", + bdevname(bdev, buf)); + return false; + } + dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); if (!dax_dev) { pr_debug("%s: error: device does not support dax\n", @@ -121,14 +131,15 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize) } id = dax_read_lock(); - len = dax_direct_access(dax_dev, pgoff, 1, NULL, &pfn); + len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn); + len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn); dax_read_unlock(id); put_dax(dax_dev); - if (len < 1) { + if (len < 1 || len2 < 1) { pr_debug("%s: error: dax access failed (%ld)\n", - bdevname(bdev, buf), len); + bdevname(bdev, buf), len < 1 ? len : len2); return false; } @@ -143,13 +154,20 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize) */ WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API)); dax_enabled = true; - } else if (pfn_t_devmap(pfn)) { - struct dev_pagemap *pgmap; + } else if (pfn_t_devmap(pfn) && pfn_t_devmap(end_pfn)) { + struct dev_pagemap *pgmap, *end_pgmap; pgmap = get_dev_pagemap(pfn_t_to_pfn(pfn), NULL); - if (pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX) + end_pgmap = get_dev_pagemap(pfn_t_to_pfn(end_pfn), NULL); + if (pgmap && pgmap == end_pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX + && pfn_t_to_page(pfn)->pgmap == pgmap + && pfn_t_to_page(end_pfn)->pgmap == pgmap + && pfn_t_to_pfn(pfn) == PHYS_PFN(__pa(kaddr)) + && pfn_t_to_pfn(end_pfn) == PHYS_PFN(__pa(end_kaddr))) dax_enabled = true; put_dev_pagemap(pgmap); + put_dev_pagemap(end_pgmap); + } if (!dax_enabled) { diff --git a/drivers/dio/dio.c b/drivers/dio/dio.c index 92e78d16b476..c9aa15fb86a9 100644 --- a/drivers/dio/dio.c +++ b/drivers/dio/dio.c @@ -89,8 +89,8 @@ static struct dioname names[] = #undef DIONAME #undef DIOFBNAME -static const char *unknowndioname - = "unknown DIO board -- please email !"; +static const char unknowndioname[] + = "unknown DIO board, please email linux-m68k@lists.linux-m68k.org"; static const char *dio_getname(int id) { diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 02f7f9a89979..7c858020d14b 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -1093,17 +1093,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) return 0; } -static int dma_buf_debug_open(struct inode *inode, struct file *file) -{ - return single_open(file, dma_buf_debug_show, NULL); -} - -static const struct file_operations dma_buf_debug_fops = { - .open = dma_buf_debug_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(dma_buf_debug); static struct dentry *dma_buf_debugfs_dir; diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 136ec04d683f..3aa8733f832a 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -649,7 +649,7 @@ EXPORT_SYMBOL(dma_fence_wait_any_timeout); */ void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, - spinlock_t *lock, u64 context, unsigned seqno) + spinlock_t *lock, u64 context, u64 seqno) { BUG_ON(!lock); BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name); diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index 53c1d6d36a64..32dcf7b4c935 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -172,7 +172,7 @@ static bool timeline_fence_enable_signaling(struct dma_fence *fence) static void timeline_fence_value_str(struct dma_fence *fence, char *str, int size) { - snprintf(str, size, "%d", fence->seqno); + snprintf(str, size, "%lld", fence->seqno); } static void timeline_fence_timeline_value_str(struct dma_fence *fence, diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c index c4c8ecb24aa9..c0abf37df88b 100644 --- a/drivers/dma-buf/sync_debug.c +++ b/drivers/dma-buf/sync_debug.c @@ -147,7 +147,7 @@ static void sync_print_sync_file(struct seq_file *s, } } -static int sync_debugfs_show(struct seq_file *s, void *unused) +static int sync_info_debugfs_show(struct seq_file *s, void *unused) { struct list_head *pos; @@ -178,17 +178,7 @@ static int sync_debugfs_show(struct seq_file *s, void *unused) return 0; } -static int sync_info_debugfs_open(struct inode *inode, struct file *file) -{ - return single_open(file, sync_debugfs_show, inode->i_private); -} - -static const struct file_operations sync_info_debugfs_fops = { - .open = sync_info_debugfs_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(sync_info_debugfs); static __init int sync_debugfs_init(void) { @@ -218,7 +208,7 @@ void sync_dump(void) }; int i; - sync_debugfs_show(&s, NULL); + sync_info_debugfs_show(&s, NULL); for (i = 0; i < s.count; i += DUMP_CHUNK) { if ((s.count - i) > DUMP_CHUNK) { diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index 35dd06479867..4f6305ca52c8 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -144,7 +144,7 @@ char *sync_file_get_name(struct sync_file *sync_file, char *buf, int len) } else { struct dma_fence *fence = sync_file->fence; - snprintf(buf, len, "%s-%s%llu-%d", + snprintf(buf, len, "%s-%s%llu-%lld", fence->ops->get_driver_name(fence), fence->ops->get_timeline_name(fence), fence->context, @@ -258,7 +258,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, i_b++; } else { - if (pt_a->seqno - pt_b->seqno <= INT_MAX) + if (__dma_fence_is_later(pt_a->seqno, pt_b->seqno)) add_fence(fences, &i, pt_a); else add_fence(fences, &i, pt_b); diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 4e557684f792..fe69dccfa0c0 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -203,6 +203,7 @@ struct at_xdmac_chan { u32 save_cim; u32 save_cnda; u32 save_cndc; + u32 irq_status; unsigned long status; struct tasklet_struct tasklet; struct dma_slave_config sconfig; @@ -1580,8 +1581,8 @@ static void at_xdmac_tasklet(unsigned long data) struct at_xdmac_desc *desc; u32 error_mask; - dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n", - __func__, atchan->status); + dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n", + __func__, atchan->irq_status); error_mask = AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS @@ -1589,15 +1590,15 @@ static void at_xdmac_tasklet(unsigned long data) if (at_xdmac_chan_is_cyclic(atchan)) { at_xdmac_handle_cyclic(atchan); - } else if ((atchan->status & AT_XDMAC_CIS_LIS) - || (atchan->status & error_mask)) { + } else if ((atchan->irq_status & AT_XDMAC_CIS_LIS) + || (atchan->irq_status & error_mask)) { struct dma_async_tx_descriptor *txd; - if (atchan->status & AT_XDMAC_CIS_RBEIS) + if (atchan->irq_status & AT_XDMAC_CIS_RBEIS) dev_err(chan2dev(&atchan->chan), "read bus error!!!"); - if (atchan->status & AT_XDMAC_CIS_WBEIS) + if (atchan->irq_status & AT_XDMAC_CIS_WBEIS) dev_err(chan2dev(&atchan->chan), "write bus error!!!"); - if (atchan->status & AT_XDMAC_CIS_ROIS) + if (atchan->irq_status & AT_XDMAC_CIS_ROIS) dev_err(chan2dev(&atchan->chan), "request overflow error!!!"); spin_lock(&atchan->lock); @@ -1652,7 +1653,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id) atchan = &atxdmac->chan[i]; chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM); chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS); - atchan->status = chan_status & chan_imr; + atchan->irq_status = chan_status & chan_imr; dev_vdbg(atxdmac->dma.dev, "%s: chan%d: imr=0x%x, status=0x%x\n", __func__, i, chan_imr, chan_status); @@ -1666,7 +1667,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id) at_xdmac_chan_read(atchan, AT_XDMAC_CDA), at_xdmac_chan_read(atchan, AT_XDMAC_CUBC)); - if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS)) + if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS)) at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); tasklet_schedule(&atchan->tasklet); diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index 1a44c8086d77..ae10f5614f95 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -406,38 +406,32 @@ static void bcm2835_dma_fill_cb_chain_with_sg( } } -static int bcm2835_dma_abort(void __iomem *chan_base) +static int bcm2835_dma_abort(struct bcm2835_chan *c) { - unsigned long cs; + void __iomem *chan_base = c->chan_base; long int timeout = 10000; - cs = readl(chan_base + BCM2835_DMA_CS); - if (!(cs & BCM2835_DMA_ACTIVE)) + /* + * A zero control block address means the channel is idle. + * (The ACTIVE flag in the CS register is not a reliable indicator.) + */ + if (!readl(chan_base + BCM2835_DMA_ADDR)) return 0; /* Write 0 to the active bit - Pause the DMA */ writel(0, chan_base + BCM2835_DMA_CS); /* Wait for any current AXI transfer to complete */ - while ((cs & BCM2835_DMA_ISPAUSED) && --timeout) { + while ((readl(chan_base + BCM2835_DMA_CS) & + BCM2835_DMA_WAITING_FOR_WRITES) && --timeout) cpu_relax(); - cs = readl(chan_base + BCM2835_DMA_CS); - } - /* We'll un-pause when we set of our next DMA */ + /* Peripheral might be stuck and fail to signal AXI write responses */ if (!timeout) - return -ETIMEDOUT; - - if (!(cs & BCM2835_DMA_ACTIVE)) - return 0; - - /* Terminate the control block chain */ - writel(0, chan_base + BCM2835_DMA_NEXTCB); - - /* Abort the whole DMA */ - writel(BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE, - chan_base + BCM2835_DMA_CS); + dev_err(c->vc.chan.device->dev, + "failed to complete outstanding writes\n"); + writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS); return 0; } @@ -476,8 +470,15 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data) spin_lock_irqsave(&c->vc.lock, flags); - /* Acknowledge interrupt */ - writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS); + /* + * Clear the INT flag to receive further interrupts. Keep the channel + * active in case the descriptor is cyclic or in case the client has + * already terminated the descriptor and issued a new one. (May happen + * if this IRQ handler is threaded.) If the channel is finished, it + * will remain idle despite the ACTIVE flag being set. + */ + writel(BCM2835_DMA_INT | BCM2835_DMA_ACTIVE, + c->chan_base + BCM2835_DMA_CS); d = c->desc; @@ -485,11 +486,7 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data) if (d->cyclic) { /* call the cyclic callback */ vchan_cyclic_callback(&d->vd); - - /* Keep the DMA engine running */ - writel(BCM2835_DMA_ACTIVE, - c->chan_base + BCM2835_DMA_CS); - } else { + } else if (!readl(c->chan_base + BCM2835_DMA_ADDR)) { vchan_cookie_complete(&c->desc->vd); bcm2835_dma_start_desc(c); } @@ -779,7 +776,6 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan) struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device); unsigned long flags; - int timeout = 10000; LIST_HEAD(head); spin_lock_irqsave(&c->vc.lock, flags); @@ -789,27 +785,11 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan) list_del_init(&c->node); spin_unlock(&d->lock); - /* - * Stop DMA activity: we assume the callback will not be called - * after bcm_dma_abort() returns (even if it does, it will see - * c->desc is NULL and exit.) - */ + /* stop DMA activity */ if (c->desc) { vchan_terminate_vdesc(&c->desc->vd); c->desc = NULL; - bcm2835_dma_abort(c->chan_base); - - /* Wait for stopping */ - while (--timeout) { - if (!(readl(c->chan_base + BCM2835_DMA_CS) & - BCM2835_DMA_ACTIVE)) - break; - - cpu_relax(); - } - - if (!timeout) - dev_err(d->ddev.dev, "DMA transfer could not be terminated\n"); + bcm2835_dma_abort(c); } vchan_get_all_descriptors(&c->vc, &head); diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index f1a441ab395d..3a11b1092e80 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -63,6 +63,7 @@ #include #include #include +#include static DEFINE_MUTEX(dma_list_mutex); static DEFINE_IDA(dma_ida); @@ -386,7 +387,8 @@ EXPORT_SYMBOL(dma_issue_pending_all); static bool dma_chan_is_local(struct dma_chan *chan, int cpu) { int node = dev_to_node(chan->device->dev); - return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node)); + return node == NUMA_NO_NODE || + cpumask_test_cpu(cpu, cpumask_of_node(node)); } /** diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 2eea4ef72915..6511928b4cdf 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -711,11 +711,9 @@ static int dmatest_func(void *data) srcs[i] = um->addr[i] + src_off; ret = dma_mapping_error(dev->dev, um->addr[i]); if (ret) { - dmaengine_unmap_put(um); result("src mapping error", total_tests, src_off, dst_off, len, ret); - failed_tests++; - continue; + goto error_unmap_continue; } um->to_cnt++; } @@ -730,11 +728,9 @@ static int dmatest_func(void *data) DMA_BIDIRECTIONAL); ret = dma_mapping_error(dev->dev, dsts[i]); if (ret) { - dmaengine_unmap_put(um); result("dst mapping error", total_tests, src_off, dst_off, len, ret); - failed_tests++; - continue; + goto error_unmap_continue; } um->bidi_cnt++; } @@ -762,12 +758,10 @@ static int dmatest_func(void *data) } if (!tx) { - dmaengine_unmap_put(um); result("prep error", total_tests, src_off, dst_off, len, ret); msleep(100); - failed_tests++; - continue; + goto error_unmap_continue; } done->done = false; @@ -776,12 +770,10 @@ static int dmatest_func(void *data) cookie = tx->tx_submit(tx); if (dma_submit_error(cookie)) { - dmaengine_unmap_put(um); result("submit error", total_tests, src_off, dst_off, len, ret); msleep(100); - failed_tests++; - continue; + goto error_unmap_continue; } dma_async_issue_pending(chan); @@ -790,22 +782,20 @@ static int dmatest_func(void *data) status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); - dmaengine_unmap_put(um); - if (!done->done) { result("test timed out", total_tests, src_off, dst_off, len, 0); - failed_tests++; - continue; + goto error_unmap_continue; } else if (status != DMA_COMPLETE) { result(status == DMA_ERROR ? "completion error status" : "completion busy status", total_tests, src_off, dst_off, len, ret); - failed_tests++; - continue; + goto error_unmap_continue; } + dmaengine_unmap_put(um); + if (params->noverify) { verbose_result("test passed", total_tests, src_off, dst_off, len, 0); @@ -846,6 +836,12 @@ static int dmatest_func(void *data) verbose_result("test passed", total_tests, src_off, dst_off, len, 0); } + + continue; + +error_unmap_continue: + dmaengine_unmap_put(um); + failed_tests++; } ktime = ktime_sub(ktime_get(), ktime); ktime = ktime_sub(ktime, comparetime); diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index c2fff3f6c9ca..4a09af3cd546 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -618,7 +618,7 @@ static void imxdma_tasklet(unsigned long data) { struct imxdma_channel *imxdmac = (void *)data; struct imxdma_engine *imxdma = imxdmac->imxdma; - struct imxdma_desc *desc; + struct imxdma_desc *desc, *next_desc; unsigned long flags; spin_lock_irqsave(&imxdma->lock, flags); @@ -648,10 +648,10 @@ static void imxdma_tasklet(unsigned long data) list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free); if (!list_empty(&imxdmac->ld_queue)) { - desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc, - node); + next_desc = list_first_entry(&imxdmac->ld_queue, + struct imxdma_desc, node); list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active); - if (imxdma_xfer_desc(desc) < 0) + if (imxdma_xfer_desc(next_desc) < 0) dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n", __func__, imxdmac->channel); } diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index afd8f27bda96..538b6e0e17bb 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -972,7 +972,6 @@ static void pch_dma_remove(struct pci_dev *pdev) } /* PCI Device ID of DMA device */ -#define PCI_VENDOR_ID_ROHM 0x10DB #define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH 0x8810 #define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH 0x8815 #define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026 diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index e286b5b99003..47eb4d13ed5f 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -241,6 +241,18 @@ config EDAC_SKX system has non-volatile DIMMs you should also manually select CONFIG_ACPI_NFIT. +config EDAC_I10NM + tristate "Intel 10nm server Integrated MC" + depends on PCI && X86_64 && X86_MCE_INTEL && PCI_MMCONFIG && ACPI + depends on ACPI_NFIT || !ACPI_NFIT # if ACPI_NFIT=m, EDAC_I10NM can't be y + select DMI + select ACPI_ADXL + help + Support for error detection and correction the Intel + 10nm server Integrated Memory Controllers. If your + system has non-volatile DIMMs you should also manually + select CONFIG_ACPI_NFIT. + config EDAC_PND2 tristate "Intel Pondicherry2" depends on PCI && X86_64 && X86_MCE_INTEL @@ -379,9 +391,17 @@ config EDAC_ALTERA depends on EDAC=y && (ARCH_SOCFPGA || ARCH_STRATIX10) help Support for error detection and correction on the - Altera SOCs. This must be selected for SDRAM ECC. - Note that the preloader must initialize the SDRAM - before loading the kernel. + Altera SOCs. This is the global enable for the + various Altera peripherals. + +config EDAC_ALTERA_SDRAM + bool "Altera SDRAM ECC" + depends on EDAC_ALTERA=y + help + Support for error detection and correction on the + Altera SDRAM Memory for Altera SoCs. Note that the + preloader must initialize the SDRAM before loading + the kernel. config EDAC_ALTERA_L2C bool "Altera L2 Cache ECC" @@ -475,4 +495,13 @@ config EDAC_QCOM For debugging issues having to do with stability and overall system health, you should probably say 'Y' here. +config EDAC_ASPEED + tristate "Aspeed AST 2500 SoC" + depends on MACH_ASPEED_G5 + help + Support for error detection and correction on the Aspeed AST 2500 SoC. + + First, ECC must be configured in the bootloader. Then, this driver + will expose error counters via the EDAC kernel framework. + endif # EDAC diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index 716096d08ea0..89ad4a84a0f6 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile @@ -30,7 +30,6 @@ obj-$(CONFIG_EDAC_I5400) += i5400_edac.o obj-$(CONFIG_EDAC_I7300) += i7300_edac.o obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o obj-$(CONFIG_EDAC_SBRIDGE) += sb_edac.o -obj-$(CONFIG_EDAC_SKX) += skx_edac.o obj-$(CONFIG_EDAC_PND2) += pnd2_edac.o obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o obj-$(CONFIG_EDAC_E752X) += e752x_edac.o @@ -58,6 +57,12 @@ obj-$(CONFIG_EDAC_MPC85XX) += mpc85xx_edac_mod.o layerscape_edac_mod-y := fsl_ddr_edac.o layerscape_edac.o obj-$(CONFIG_EDAC_LAYERSCAPE) += layerscape_edac_mod.o +skx_edac-y := skx_common.o skx_base.o +obj-$(CONFIG_EDAC_SKX) += skx_edac.o + +i10nm_edac-y := skx_common.o i10nm_base.o +obj-$(CONFIG_EDAC_I10NM) += i10nm_edac.o + obj-$(CONFIG_EDAC_MV64X60) += mv64x60_edac.o obj-$(CONFIG_EDAC_CELL) += cell_edac.o obj-$(CONFIG_EDAC_PPC4XX) += ppc4xx_edac.o @@ -78,3 +83,4 @@ obj-$(CONFIG_EDAC_SYNOPSYS) += synopsys_edac.o obj-$(CONFIG_EDAC_XGENE) += xgene_edac.o obj-$(CONFIG_EDAC_TI) += ti_edac.o obj-$(CONFIG_EDAC_QCOM) += qcom_edac.o +obj-$(CONFIG_EDAC_ASPEED) += aspeed_edac.o diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c index c89d82aa2776..1bcf9aea0cdf 100644 --- a/drivers/edac/altera_edac.c +++ b/drivers/edac/altera_edac.c @@ -29,6 +29,7 @@ #define EDAC_MOD_STR "altera_edac" #define EDAC_DEVICE "Altera" +#ifdef CONFIG_EDAC_ALTERA_SDRAM static const struct altr_sdram_prv_data c5_data = { .ecc_ctrl_offset = CV_CTLCFG_OFST, .ecc_ctl_en_mask = CV_CTLCFG_ECC_AUTO_EN, @@ -468,6 +469,39 @@ static int altr_sdram_remove(struct platform_device *pdev) return 0; } +/* + * If you want to suspend, need to disable EDAC by removing it + * from the device tree or defconfig. + */ +#ifdef CONFIG_PM +static int altr_sdram_prepare(struct device *dev) +{ + pr_err("Suspend not allowed when EDAC is enabled.\n"); + + return -EPERM; +} + +static const struct dev_pm_ops altr_sdram_pm_ops = { + .prepare = altr_sdram_prepare, +}; +#endif + +static struct platform_driver altr_sdram_edac_driver = { + .probe = altr_sdram_probe, + .remove = altr_sdram_remove, + .driver = { + .name = "altr_sdram_edac", +#ifdef CONFIG_PM + .pm = &altr_sdram_pm_ops, +#endif + .of_match_table = altr_sdram_ctrl_of_match, + }, +}; + +module_platform_driver(altr_sdram_edac_driver); + +#endif /* CONFIG_EDAC_ALTERA_SDRAM */ + /**************** Stratix 10 EDAC Memory Controller Functions ************/ /** @@ -530,37 +564,6 @@ static const struct regmap_config s10_sdram_regmap_cfg = { /************** ***********/ -/* - * If you want to suspend, need to disable EDAC by removing it - * from the device tree or defconfig. - */ -#ifdef CONFIG_PM -static int altr_sdram_prepare(struct device *dev) -{ - pr_err("Suspend not allowed when EDAC is enabled.\n"); - - return -EPERM; -} - -static const struct dev_pm_ops altr_sdram_pm_ops = { - .prepare = altr_sdram_prepare, -}; -#endif - -static struct platform_driver altr_sdram_edac_driver = { - .probe = altr_sdram_probe, - .remove = altr_sdram_remove, - .driver = { - .name = "altr_sdram_edac", -#ifdef CONFIG_PM - .pm = &altr_sdram_pm_ops, -#endif - .of_match_table = altr_sdram_ctrl_of_match, - }, -}; - -module_platform_driver(altr_sdram_edac_driver); - /************************* EDAC Parent Probe *************************/ static const struct of_device_id altr_edac_device_of_match[]; @@ -1046,14 +1049,17 @@ altr_init_a10_ecc_block(struct device_node *np, u32 irq_mask, return -ENODEV; } - if (of_address_to_resource(sysmgr_np, 0, &res)) + if (of_address_to_resource(sysmgr_np, 0, &res)) { + of_node_put(sysmgr_np); return -ENOMEM; + } /* Need physical address for SMCC call */ base = res.start; ecc_mgr_map = regmap_init(NULL, NULL, (void *)base, &s10_sdram_regmap_cfg); + of_node_put(sysmgr_np); } of_node_put(np_eccmgr); if (IS_ERR(ecc_mgr_map)) { @@ -2140,11 +2146,13 @@ static int altr_edac_a10_probe(struct platform_device *pdev) altr_edac_a10_device_add(edac, child); +#ifdef CONFIG_EDAC_ALTERA_SDRAM else if ((of_device_is_compatible(child, "altr,sdram-edac-a10")) || (of_device_is_compatible(child, "altr,sdram-edac-s10"))) of_platform_populate(pdev->dev.of_node, altr_sdram_ctrl_of_match, NULL, &pdev->dev); +#endif } return 0; diff --git a/drivers/edac/aspeed_edac.c b/drivers/edac/aspeed_edac.c new file mode 100644 index 000000000000..11833c0a5d07 --- /dev/null +++ b/drivers/edac/aspeed_edac.c @@ -0,0 +1,421 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2018, 2019 Cisco Systems + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "edac_module.h" + + +#define DRV_NAME "aspeed-edac" + + +#define ASPEED_MCR_PROT 0x00 /* protection key register */ +#define ASPEED_MCR_CONF 0x04 /* configuration register */ +#define ASPEED_MCR_INTR_CTRL 0x50 /* interrupt control/status register */ +#define ASPEED_MCR_ADDR_UNREC 0x58 /* address of first un-recoverable error */ +#define ASPEED_MCR_ADDR_REC 0x5c /* address of last recoverable error */ +#define ASPEED_MCR_LAST ASPEED_MCR_ADDR_REC + + +#define ASPEED_MCR_PROT_PASSWD 0xfc600309 +#define ASPEED_MCR_CONF_DRAM_TYPE BIT(4) +#define ASPEED_MCR_CONF_ECC BIT(7) +#define ASPEED_MCR_INTR_CTRL_CLEAR BIT(31) +#define ASPEED_MCR_INTR_CTRL_CNT_REC GENMASK(23, 16) +#define ASPEED_MCR_INTR_CTRL_CNT_UNREC GENMASK(15, 12) +#define ASPEED_MCR_INTR_CTRL_ENABLE (BIT(0) | BIT(1)) + + +static struct regmap *aspeed_regmap; + + +static int regmap_reg_write(void *context, unsigned int reg, unsigned int val) +{ + void __iomem *regs = (void __iomem *)context; + + /* enable write to MCR register set */ + writel(ASPEED_MCR_PROT_PASSWD, regs + ASPEED_MCR_PROT); + + writel(val, regs + reg); + + /* disable write to MCR register set */ + writel(~ASPEED_MCR_PROT_PASSWD, regs + ASPEED_MCR_PROT); + + return 0; +} + + +static int regmap_reg_read(void *context, unsigned int reg, unsigned int *val) +{ + void __iomem *regs = (void __iomem *)context; + + *val = readl(regs + reg); + + return 0; +} + +static bool regmap_is_volatile(struct device *dev, unsigned int reg) +{ + switch (reg) { + case ASPEED_MCR_PROT: + case ASPEED_MCR_INTR_CTRL: + case ASPEED_MCR_ADDR_UNREC: + case ASPEED_MCR_ADDR_REC: + return true; + default: + return false; + } +} + + +static const struct regmap_config aspeed_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .max_register = ASPEED_MCR_LAST, + .reg_write = regmap_reg_write, + .reg_read = regmap_reg_read, + .volatile_reg = regmap_is_volatile, + .fast_io = true, +}; + + +static void count_rec(struct mem_ctl_info *mci, u8 rec_cnt, u32 rec_addr) +{ + struct csrow_info *csrow = mci->csrows[0]; + u32 page, offset, syndrome; + + if (!rec_cnt) + return; + + /* report first few errors (if there are) */ + /* note: no addresses are recorded */ + if (rec_cnt > 1) { + /* page, offset and syndrome are not available */ + page = 0; + offset = 0; + syndrome = 0; + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, rec_cnt-1, + page, offset, syndrome, 0, 0, -1, + "address(es) not available", ""); + } + + /* report last error */ + /* note: rec_addr is the last recoverable error addr */ + page = rec_addr >> PAGE_SHIFT; + offset = rec_addr & ~PAGE_MASK; + /* syndrome is not available */ + syndrome = 0; + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, + csrow->first_page + page, offset, syndrome, + 0, 0, -1, "", ""); +} + + +static void count_un_rec(struct mem_ctl_info *mci, u8 un_rec_cnt, + u32 un_rec_addr) +{ + struct csrow_info *csrow = mci->csrows[0]; + u32 page, offset, syndrome; + + if (!un_rec_cnt) + return; + + /* report 1. error */ + /* note: un_rec_addr is the first unrecoverable error addr */ + page = un_rec_addr >> PAGE_SHIFT; + offset = un_rec_addr & ~PAGE_MASK; + /* syndrome is not available */ + syndrome = 0; + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, + csrow->first_page + page, offset, syndrome, + 0, 0, -1, "", ""); + + /* report further errors (if there are) */ + /* note: no addresses are recorded */ + if (un_rec_cnt > 1) { + /* page, offset and syndrome are not available */ + page = 0; + offset = 0; + syndrome = 0; + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, un_rec_cnt-1, + page, offset, syndrome, 0, 0, -1, + "address(es) not available", ""); + } +} + + +static irqreturn_t mcr_isr(int irq, void *arg) +{ + struct mem_ctl_info *mci = arg; + u32 rec_addr, un_rec_addr; + u32 reg50, reg5c, reg58; + u8 rec_cnt, un_rec_cnt; + + regmap_read(aspeed_regmap, ASPEED_MCR_INTR_CTRL, ®50); + dev_dbg(mci->pdev, "received edac interrupt w/ mcr register 50: 0x%x\n", + reg50); + + /* collect data about recoverable and unrecoverable errors */ + rec_cnt = (reg50 & ASPEED_MCR_INTR_CTRL_CNT_REC) >> 16; + un_rec_cnt = (reg50 & ASPEED_MCR_INTR_CTRL_CNT_UNREC) >> 12; + + dev_dbg(mci->pdev, "%d recoverable interrupts and %d unrecoverable interrupts\n", + rec_cnt, un_rec_cnt); + + regmap_read(aspeed_regmap, ASPEED_MCR_ADDR_UNREC, ®58); + un_rec_addr = reg58; + + regmap_read(aspeed_regmap, ASPEED_MCR_ADDR_REC, ®5c); + rec_addr = reg5c; + + /* clear interrupt flags and error counters: */ + regmap_update_bits(aspeed_regmap, ASPEED_MCR_INTR_CTRL, + ASPEED_MCR_INTR_CTRL_CLEAR, + ASPEED_MCR_INTR_CTRL_CLEAR); + + regmap_update_bits(aspeed_regmap, ASPEED_MCR_INTR_CTRL, + ASPEED_MCR_INTR_CTRL_CLEAR, 0); + + /* process recoverable and unrecoverable errors */ + count_rec(mci, rec_cnt, rec_addr); + count_un_rec(mci, un_rec_cnt, un_rec_addr); + + if (!rec_cnt && !un_rec_cnt) + dev_dbg(mci->pdev, "received edac interrupt, but did not find any ECC counters\n"); + + regmap_read(aspeed_regmap, ASPEED_MCR_INTR_CTRL, ®50); + dev_dbg(mci->pdev, "edac interrupt handled. mcr reg 50 is now: 0x%x\n", + reg50); + + return IRQ_HANDLED; +} + + +static int config_irq(void *ctx, struct platform_device *pdev) +{ + int irq; + int rc; + + /* register interrupt handler */ + irq = platform_get_irq(pdev, 0); + dev_dbg(&pdev->dev, "got irq %d\n", irq); + if (!irq) + return -ENODEV; + + rc = devm_request_irq(&pdev->dev, irq, mcr_isr, IRQF_TRIGGER_HIGH, + DRV_NAME, ctx); + if (rc) { + dev_err(&pdev->dev, "unable to request irq %d\n", irq); + return rc; + } + + /* enable interrupts */ + regmap_update_bits(aspeed_regmap, ASPEED_MCR_INTR_CTRL, + ASPEED_MCR_INTR_CTRL_ENABLE, + ASPEED_MCR_INTR_CTRL_ENABLE); + + return 0; +} + + +static int init_csrows(struct mem_ctl_info *mci) +{ + struct csrow_info *csrow = mci->csrows[0]; + u32 nr_pages, dram_type; + struct dimm_info *dimm; + struct device_node *np; + struct resource r; + u32 reg04; + int rc; + + /* retrieve info about physical memory from device tree */ + np = of_find_node_by_path("/memory"); + if (!np) { + dev_err(mci->pdev, "dt: missing /memory node\n"); + return -ENODEV; + }; + + rc = of_address_to_resource(np, 0, &r); + + of_node_put(np); + + if (rc) { + dev_err(mci->pdev, "dt: failed requesting resource for /memory node\n"); + return rc; + }; + + dev_dbg(mci->pdev, "dt: /memory node resources: first page r.start=0x%x, resource_size=0x%x, PAGE_SHIFT macro=0x%x\n", + r.start, resource_size(&r), PAGE_SHIFT); + + csrow->first_page = r.start >> PAGE_SHIFT; + nr_pages = resource_size(&r) >> PAGE_SHIFT; + csrow->last_page = csrow->first_page + nr_pages - 1; + + regmap_read(aspeed_regmap, ASPEED_MCR_CONF, ®04); + dram_type = (reg04 & ASPEED_MCR_CONF_DRAM_TYPE) ? MEM_DDR4 : MEM_DDR3; + + dimm = csrow->channels[0]->dimm; + dimm->mtype = dram_type; + dimm->edac_mode = EDAC_SECDED; + dimm->nr_pages = nr_pages / csrow->nr_channels; + + dev_dbg(mci->pdev, "initialized dimm with first_page=0x%lx and nr_pages=0x%x\n", + csrow->first_page, nr_pages); + + return 0; +} + + +static int aspeed_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct edac_mc_layer layers[2]; + struct mem_ctl_info *mci; + struct device_node *np; + struct resource *res; + void __iomem *regs; + u32 reg04; + int rc; + + /* setup regmap */ + np = dev->of_node; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENOENT; + + regs = devm_ioremap_resource(dev, res); + if (IS_ERR(regs)) + return PTR_ERR(regs); + + aspeed_regmap = devm_regmap_init(dev, NULL, (__force void *)regs, + &aspeed_regmap_config); + if (IS_ERR(aspeed_regmap)) + return PTR_ERR(aspeed_regmap); + + /* bail out if ECC mode is not configured */ + regmap_read(aspeed_regmap, ASPEED_MCR_CONF, ®04); + if (!(reg04 & ASPEED_MCR_CONF_ECC)) { + dev_err(&pdev->dev, "ECC mode is not configured in u-boot\n"); + return -EPERM; + } + + edac_op_state = EDAC_OPSTATE_INT; + + /* allocate & init EDAC MC data structure */ + layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; + layers[0].size = 1; + layers[0].is_virt_csrow = true; + layers[1].type = EDAC_MC_LAYER_CHANNEL; + layers[1].size = 1; + layers[1].is_virt_csrow = false; + + mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0); + if (!mci) + return -ENOMEM; + + mci->pdev = &pdev->dev; + mci->mtype_cap = MEM_FLAG_DDR3 | MEM_FLAG_DDR4; + mci->edac_ctl_cap = EDAC_FLAG_SECDED; + mci->edac_cap = EDAC_FLAG_SECDED; + mci->scrub_cap = SCRUB_FLAG_HW_SRC; + mci->scrub_mode = SCRUB_HW_SRC; + mci->mod_name = DRV_NAME; + mci->ctl_name = "MIC"; + mci->dev_name = dev_name(&pdev->dev); + + rc = init_csrows(mci); + if (rc) { + dev_err(&pdev->dev, "failed to init csrows\n"); + goto probe_exit02; + } + + platform_set_drvdata(pdev, mci); + + /* register with edac core */ + rc = edac_mc_add_mc(mci); + if (rc) { + dev_err(&pdev->dev, "failed to register with EDAC core\n"); + goto probe_exit02; + } + + /* register interrupt handler and enable interrupts */ + rc = config_irq(mci, pdev); + if (rc) { + dev_err(&pdev->dev, "failed setting up irq\n"); + goto probe_exit01; + } + + return 0; + +probe_exit01: + edac_mc_del_mc(&pdev->dev); +probe_exit02: + edac_mc_free(mci); + return rc; +} + + +static int aspeed_remove(struct platform_device *pdev) +{ + struct mem_ctl_info *mci; + + /* disable interrupts */ + regmap_update_bits(aspeed_regmap, ASPEED_MCR_INTR_CTRL, + ASPEED_MCR_INTR_CTRL_ENABLE, 0); + + /* free resources */ + mci = edac_mc_del_mc(&pdev->dev); + if (mci) + edac_mc_free(mci); + + return 0; +} + + +static const struct of_device_id aspeed_of_match[] = { + { .compatible = "aspeed,ast2500-sdram-edac" }, + {}, +}; + + +static struct platform_driver aspeed_driver = { + .driver = { + .name = DRV_NAME, + .of_match_table = aspeed_of_match + }, + .probe = aspeed_probe, + .remove = aspeed_remove +}; + + +static int __init aspeed_init(void) +{ + return platform_driver_register(&aspeed_driver); +} + + +static void __exit aspeed_exit(void) +{ + platform_driver_unregister(&aspeed_driver); +} + + +module_init(aspeed_init); +module_exit(aspeed_exit); + + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Stefan Schaeckeler "); +MODULE_DESCRIPTION("Aspeed AST2500 EDAC driver"); +MODULE_VERSION("1.0"); diff --git a/drivers/edac/debugfs.c b/drivers/edac/debugfs.c index 92dbb7e2320c..0a9277228c50 100644 --- a/drivers/edac/debugfs.c +++ b/drivers/edac/debugfs.c @@ -41,14 +41,9 @@ static const struct file_operations debug_fake_inject_fops = { .llseek = generic_file_llseek, }; -int __init edac_debugfs_init(void) +void __init edac_debugfs_init(void) { edac_debugfs = debugfs_create_dir("edac", NULL); - if (IS_ERR(edac_debugfs)) { - edac_debugfs = NULL; - return -ENOMEM; - } - return 0; } void edac_debugfs_exit(void) @@ -56,50 +51,31 @@ void edac_debugfs_exit(void) debugfs_remove_recursive(edac_debugfs); } -int edac_create_debugfs_nodes(struct mem_ctl_info *mci) +void edac_create_debugfs_nodes(struct mem_ctl_info *mci) { - struct dentry *d, *parent; + struct dentry *parent; char name[80]; int i; - if (!edac_debugfs) - return -ENODEV; - - d = debugfs_create_dir(mci->dev.kobj.name, edac_debugfs); - if (!d) - return -ENOMEM; - parent = d; + parent = debugfs_create_dir(mci->dev.kobj.name, edac_debugfs); for (i = 0; i < mci->n_layers; i++) { sprintf(name, "fake_inject_%s", edac_layer_name[mci->layers[i].type]); - d = debugfs_create_u8(name, S_IRUGO | S_IWUSR, parent, - &mci->fake_inject_layer[i]); - if (!d) - goto nomem; + debugfs_create_u8(name, S_IRUGO | S_IWUSR, parent, + &mci->fake_inject_layer[i]); } - d = debugfs_create_bool("fake_inject_ue", S_IRUGO | S_IWUSR, parent, - &mci->fake_inject_ue); - if (!d) - goto nomem; + debugfs_create_bool("fake_inject_ue", S_IRUGO | S_IWUSR, parent, + &mci->fake_inject_ue); - d = debugfs_create_u16("fake_inject_count", S_IRUGO | S_IWUSR, parent, - &mci->fake_inject_count); - if (!d) - goto nomem; + debugfs_create_u16("fake_inject_count", S_IRUGO | S_IWUSR, parent, + &mci->fake_inject_count); - d = debugfs_create_file("fake_inject", S_IWUSR, parent, - &mci->dev, - &debug_fake_inject_fops); - if (!d) - goto nomem; + debugfs_create_file("fake_inject", S_IWUSR, parent, &mci->dev, + &debug_fake_inject_fops); mci->debugfs = parent; - return 0; -nomem: - edac_debugfs_remove_recursive(mci->debugfs); - return -ENOMEM; } /* Create a toplevel dir under EDAC's debugfs hierarchy */ diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h index dec88dcea036..dd7d0b509aa3 100644 --- a/drivers/edac/edac_module.h +++ b/drivers/edac/edac_module.h @@ -69,9 +69,9 @@ extern void *edac_align_ptr(void **p, unsigned size, int n_elems); #define edac_debugfs_remove_recursive debugfs_remove_recursive #define edac_debugfs_remove debugfs_remove #ifdef CONFIG_EDAC_DEBUG -int edac_debugfs_init(void); +void edac_debugfs_init(void); void edac_debugfs_exit(void); -int edac_create_debugfs_nodes(struct mem_ctl_info *mci); +void edac_create_debugfs_nodes(struct mem_ctl_info *mci); struct dentry *edac_debugfs_create_dir(const char *dirname); struct dentry * edac_debugfs_create_dir_at(const char *dirname, struct dentry *parent); @@ -83,9 +83,9 @@ edac_debugfs_create_x8(const char *name, umode_t mode, struct dentry *parent, u8 struct dentry * edac_debugfs_create_x16(const char *name, umode_t mode, struct dentry *parent, u16 *value); #else -static inline int edac_debugfs_init(void) { return -ENODEV; } +static inline void edac_debugfs_init(void) { } static inline void edac_debugfs_exit(void) { } -static inline int edac_create_debugfs_nodes(struct mem_ctl_info *mci) { return 0; } +static inline void edac_create_debugfs_nodes(struct mem_ctl_info *mci) { } static inline struct dentry *edac_debugfs_create_dir(const char *dirname) { return NULL; } static inline struct dentry * edac_debugfs_create_dir_at(const char *dirname, struct dentry *parent) { return NULL; } diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c new file mode 100644 index 000000000000..c334fb7c63df --- /dev/null +++ b/drivers/edac/i10nm_base.c @@ -0,0 +1,275 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for Intel(R) 10nm server memory controller. + * Copyright (c) 2019, Intel Corporation. + * + */ + +#include +#include +#include +#include +#include "edac_module.h" +#include "skx_common.h" + +#define I10NM_REVISION "v0.0.3" +#define EDAC_MOD_STR "i10nm_edac" + +/* Debug macros */ +#define i10nm_printk(level, fmt, arg...) \ + edac_printk(level, "i10nm", fmt, ##arg) + +#define I10NM_GET_SCK_BAR(d, reg) \ + pci_read_config_dword((d)->uracu, 0xd0, &(reg)) +#define I10NM_GET_IMC_BAR(d, i, reg) \ + pci_read_config_dword((d)->uracu, 0xd8 + (i) * 4, &(reg)) +#define I10NM_GET_DIMMMTR(m, i, j) \ + (*(u32 *)((m)->mbase + 0x2080c + (i) * 0x4000 + (j) * 4)) +#define I10NM_GET_MCDDRTCFG(m, i, j) \ + (*(u32 *)((m)->mbase + 0x20970 + (i) * 0x4000 + (j) * 4)) + +#define I10NM_GET_SCK_MMIO_BASE(reg) (GET_BITFIELD(reg, 0, 28) << 23) +#define I10NM_GET_IMC_MMIO_OFFSET(reg) (GET_BITFIELD(reg, 0, 10) << 12) +#define I10NM_GET_IMC_MMIO_SIZE(reg) ((GET_BITFIELD(reg, 13, 23) - \ + GET_BITFIELD(reg, 0, 10) + 1) << 12) + +static struct list_head *i10nm_edac_list; + +static struct pci_dev *pci_get_dev_wrapper(int dom, unsigned int bus, + unsigned int dev, unsigned int fun) +{ + struct pci_dev *pdev; + + pdev = pci_get_domain_bus_and_slot(dom, bus, PCI_DEVFN(dev, fun)); + if (!pdev) { + edac_dbg(2, "No device %02x:%02x.%x\n", + bus, dev, fun); + return NULL; + } + + if (unlikely(pci_enable_device(pdev) < 0)) { + edac_dbg(2, "Failed to enable device %02x:%02x.%x\n", + bus, dev, fun); + return NULL; + } + + pci_dev_get(pdev); + + return pdev; +} + +static int i10nm_get_all_munits(void) +{ + struct pci_dev *mdev; + void __iomem *mbase; + unsigned long size; + struct skx_dev *d; + int i, j = 0; + u32 reg, off; + u64 base; + + list_for_each_entry(d, i10nm_edac_list, list) { + d->util_all = pci_get_dev_wrapper(d->seg, d->bus[1], 29, 1); + if (!d->util_all) + return -ENODEV; + + d->uracu = pci_get_dev_wrapper(d->seg, d->bus[0], 0, 1); + if (!d->uracu) + return -ENODEV; + + if (I10NM_GET_SCK_BAR(d, reg)) { + i10nm_printk(KERN_ERR, "Failed to socket bar\n"); + return -ENODEV; + } + + base = I10NM_GET_SCK_MMIO_BASE(reg); + edac_dbg(2, "socket%d mmio base 0x%llx (reg 0x%x)\n", + j++, base, reg); + + for (i = 0; i < I10NM_NUM_IMC; i++) { + mdev = pci_get_dev_wrapper(d->seg, d->bus[0], + 12 + i, 0); + if (i == 0 && !mdev) { + i10nm_printk(KERN_ERR, "No IMC found\n"); + return -ENODEV; + } + if (!mdev) + continue; + + d->imc[i].mdev = mdev; + + if (I10NM_GET_IMC_BAR(d, i, reg)) { + i10nm_printk(KERN_ERR, "Failed to get mc bar\n"); + return -ENODEV; + } + + off = I10NM_GET_IMC_MMIO_OFFSET(reg); + size = I10NM_GET_IMC_MMIO_SIZE(reg); + edac_dbg(2, "mc%d mmio base 0x%llx size 0x%lx (reg 0x%x)\n", + i, base + off, size, reg); + + mbase = ioremap(base + off, size); + if (!mbase) { + i10nm_printk(KERN_ERR, "Failed to ioremap 0x%llx\n", + base + off); + return -ENODEV; + } + + d->imc[i].mbase = mbase; + } + } + + return 0; +} + +static const struct x86_cpu_id i10nm_cpuids[] = { + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_TREMONT_X, 0, 0 }, + { } +}; +MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids); + +static bool i10nm_check_ecc(struct skx_imc *imc, int chan) +{ + u32 mcmtr; + + mcmtr = *(u32 *)(imc->mbase + 0x20ef8 + chan * 0x4000); + edac_dbg(1, "ch%d mcmtr reg %x\n", chan, mcmtr); + + return !!GET_BITFIELD(mcmtr, 2, 2); +} + +static int i10nm_get_dimm_config(struct mem_ctl_info *mci) +{ + struct skx_pvt *pvt = mci->pvt_info; + struct skx_imc *imc = pvt->imc; + struct dimm_info *dimm; + u32 mtr, mcddrtcfg; + int i, j, ndimms; + + for (i = 0; i < I10NM_NUM_CHANNELS; i++) { + if (!imc->mbase) + continue; + + ndimms = 0; + for (j = 0; j < I10NM_NUM_DIMMS; j++) { + dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, + mci->n_layers, i, j, 0); + mtr = I10NM_GET_DIMMMTR(imc, i, j); + mcddrtcfg = I10NM_GET_MCDDRTCFG(imc, i, j); + edac_dbg(1, "dimmmtr 0x%x mcddrtcfg 0x%x (mc%d ch%d dimm%d)\n", + mtr, mcddrtcfg, imc->mc, i, j); + + if (IS_DIMM_PRESENT(mtr)) + ndimms += skx_get_dimm_info(mtr, 0, dimm, + imc, i, j); + else if (IS_NVDIMM_PRESENT(mcddrtcfg, j)) + ndimms += skx_get_nvdimm_info(dimm, imc, i, j, + EDAC_MOD_STR); + } + if (ndimms && !i10nm_check_ecc(imc, 0)) { + i10nm_printk(KERN_ERR, "ECC is disabled on imc %d\n", + imc->mc); + return -ENODEV; + } + } + + return 0; +} + +static struct notifier_block i10nm_mce_dec = { + .notifier_call = skx_mce_check_error, + .priority = MCE_PRIO_EDAC, +}; + +static int __init i10nm_init(void) +{ + u8 mc = 0, src_id = 0, node_id = 0; + const struct x86_cpu_id *id; + const char *owner; + struct skx_dev *d; + int rc, i, off[3] = {0xd0, 0xc8, 0xcc}; + u64 tolm, tohm; + + edac_dbg(2, "\n"); + + owner = edac_get_owner(); + if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR))) + return -EBUSY; + + id = x86_match_cpu(i10nm_cpuids); + if (!id) + return -ENODEV; + + rc = skx_get_hi_lo(0x09a2, off, &tolm, &tohm); + if (rc) + return rc; + + rc = skx_get_all_bus_mappings(0x3452, 0xcc, I10NM, &i10nm_edac_list); + if (rc < 0) + goto fail; + if (rc == 0) { + i10nm_printk(KERN_ERR, "No memory controllers found\n"); + return -ENODEV; + } + + rc = i10nm_get_all_munits(); + if (rc < 0) + goto fail; + + list_for_each_entry(d, i10nm_edac_list, list) { + rc = skx_get_src_id(d, &src_id); + if (rc < 0) + goto fail; + + rc = skx_get_node_id(d, &node_id); + if (rc < 0) + goto fail; + + edac_dbg(2, "src_id = %d node_id = %d\n", src_id, node_id); + for (i = 0; i < I10NM_NUM_IMC; i++) { + if (!d->imc[i].mdev) + continue; + + d->imc[i].mc = mc++; + d->imc[i].lmc = i; + d->imc[i].src_id = src_id; + d->imc[i].node_id = node_id; + + rc = skx_register_mci(&d->imc[i], d->imc[i].mdev, + "Intel_10nm Socket", EDAC_MOD_STR, + i10nm_get_dimm_config); + if (rc < 0) + goto fail; + } + } + + rc = skx_adxl_get(); + if (rc) + goto fail; + + opstate_init(); + mce_register_decode_chain(&i10nm_mce_dec); + setup_skx_debug("i10nm_test"); + + i10nm_printk(KERN_INFO, "%s\n", I10NM_REVISION); + + return 0; +fail: + skx_remove(); + return rc; +} + +static void __exit i10nm_exit(void) +{ + edac_dbg(2, "\n"); + teardown_skx_debug(); + mce_unregister_decode_chain(&i10nm_mce_dec); + skx_adxl_put(); + skx_remove(); +} + +module_init(i10nm_init); +module_exit(i10nm_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("MC Driver for Intel 10nm server processors"); diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c index c605089d899f..0a1814dad6cf 100644 --- a/drivers/edac/mce_amd.c +++ b/drivers/edac/mce_amd.c @@ -151,138 +151,223 @@ static const char * const mc6_mce_desc[] = { /* Scalable MCA error strings */ static const char * const smca_ls_mce_desc[] = { - "Load queue parity", - "Store queue parity", - "Miss address buffer payload parity", - "L1 TLB parity", - "Reserved", - "DC tag error type 6", - "DC tag error type 1", + "Load queue parity error", + "Store queue parity error", + "Miss address buffer payload parity error", + "Level 1 TLB parity error", + "DC Tag error type 5", + "DC Tag error type 6", + "DC Tag error type 1", "Internal error type 1", "Internal error type 2", - "Sys Read data error thread 0", - "Sys read data error thread 1", - "DC tag error type 2", - "DC data error type 1 (poison consumption)", - "DC data error type 2", - "DC data error type 3", - "DC tag error type 4", - "L2 TLB parity", + "System Read Data Error Thread 0", + "System Read Data Error Thread 1", + "DC Tag error type 2", + "DC Data error type 1 and poison consumption", + "DC Data error type 2", + "DC Data error type 3", + "DC Tag error type 4", + "Level 2 TLB parity error", "PDC parity error", - "DC tag error type 3", - "DC tag error type 5", - "L2 fill data error", + "DC Tag error type 3", + "DC Tag error type 5", + "L2 Fill Data error", }; static const char * const smca_if_mce_desc[] = { - "microtag probe port parity error", - "IC microtag or full tag multi-hit error", - "IC full tag parity", - "IC data array parity", - "Decoupling queue phys addr parity error", - "L0 ITLB parity error", - "L1 ITLB parity error", - "L2 ITLB parity error", - "BPQ snoop parity on Thread 0", - "BPQ snoop parity on Thread 1", - "L1 BTB multi-match error", - "L2 BTB multi-match error", - "L2 Cache Response Poison error", - "System Read Data error", + "Op Cache Microtag Probe Port Parity Error", + "IC Microtag or Full Tag Multi-hit Error", + "IC Full Tag Parity Error", + "IC Data Array Parity Error", + "Decoupling Queue PhysAddr Parity Error", + "L0 ITLB Parity Error", + "L1 ITLB Parity Error", + "L2 ITLB Parity Error", + "BPQ Thread 0 Snoop Parity Error", + "BPQ Thread 1 Snoop Parity Error", + "L1 BTB Multi-Match Error", + "L2 BTB Multi-Match Error", + "L2 Cache Response Poison Error", + "System Read Data Error", }; static const char * const smca_l2_mce_desc[] = { - "L2M tag multi-way-hit error", - "L2M tag ECC error", - "L2M data ECC error", - "HW assert", + "L2M Tag Multiple-Way-Hit error", + "L2M Tag or State Array ECC Error", + "L2M Data Array ECC Error", + "Hardware Assert Error", }; static const char * const smca_de_mce_desc[] = { - "uop cache tag parity error", - "uop cache data parity error", - "Insn buffer parity error", - "uop queue parity error", - "Insn dispatch queue parity error", - "Fetch address FIFO parity", - "Patch RAM data parity", - "Patch RAM sequencer parity", - "uop buffer parity" + "Micro-op cache tag parity error", + "Micro-op cache data parity error", + "Instruction buffer parity error", + "Micro-op queue parity error", + "Instruction dispatch queue parity error", + "Fetch address FIFO parity error", + "Patch RAM data parity error", + "Patch RAM sequencer parity error", + "Micro-op buffer parity error" }; static const char * const smca_ex_mce_desc[] = { - "Watchdog timeout error", - "Phy register file parity", - "Flag register file parity", - "Immediate displacement register file parity", - "Address generator payload parity", - "EX payload parity", - "Checkpoint queue parity", - "Retire dispatch queue parity", + "Watchdog Timeout error", + "Physical register file parity error", + "Flag register file parity error", + "Immediate displacement register file parity error", + "Address generator payload parity error", + "EX payload parity error", + "Checkpoint queue parity error", + "Retire dispatch queue parity error", "Retire status queue parity error", "Scheduling queue parity error", "Branch buffer queue parity error", + "Hardware Assertion error", }; static const char * const smca_fp_mce_desc[] = { - "Physical register file parity", - "Freelist parity error", - "Schedule queue parity", + "Physical register file (PRF) parity error", + "Freelist (FL) parity error", + "Schedule queue parity error", "NSQ parity error", - "Retire queue parity", - "Status register file parity", + "Retire queue (RQ) parity error", + "Status register file (SRF) parity error", "Hardware assertion", }; static const char * const smca_l3_mce_desc[] = { - "Shadow tag macro ECC error", - "Shadow tag macro multi-way-hit error", - "L3M tag ECC error", - "L3M tag multi-way-hit error", - "L3M data ECC error", - "XI parity, L3 fill done channel error", - "L3 victim queue parity", - "L3 HW assert", + "Shadow Tag Macro ECC Error", + "Shadow Tag Macro Multi-way-hit Error", + "L3M Tag ECC Error", + "L3M Tag Multi-way-hit Error", + "L3M Data ECC Error", + "SDP Parity Error or SystemReadDataError from XI", + "L3 Victim Queue Parity Error", + "L3 Hardware Assertion", }; static const char * const smca_cs_mce_desc[] = { - "Illegal request from transport layer", - "Address violation", - "Security violation", - "Illegal response from transport layer", - "Unexpected response", - "Parity error on incoming request or probe response data", - "Parity error on incoming read response data", - "Atomic request parity", - "ECC error on probe filter access", + "Illegal Request", + "Address Violation", + "Security Violation", + "Illegal Response", + "Unexpected Response", + "Request or Probe Parity Error", + "Read Response Parity Error", + "Atomic Request Parity Error", + "Probe Filter ECC Error", +}; + +static const char * const smca_cs2_mce_desc[] = { + "Illegal Request", + "Address Violation", + "Security Violation", + "Illegal Response", + "Unexpected Response", + "Request or Probe Parity Error", + "Read Response Parity Error", + "Atomic Request Parity Error", + "SDP read response had no match in the CS queue", + "Probe Filter Protocol Error", + "Probe Filter ECC Error", + "SDP read response had an unexpected RETRY error", + "Counter overflow error", + "Counter underflow error", }; static const char * const smca_pie_mce_desc[] = { - "HW assert", - "Internal PIE register security violation", - "Error on GMI link", - "Poison data written to internal PIE register", + "Hardware Assert", + "Register security violation", + "Link Error", + "Poison data consumption", + "A deferred error was detected in the DF" }; static const char * const smca_umc_mce_desc[] = { "DRAM ECC error", - "Data poison error on DRAM", + "Data poison error", "SDP parity error", "Advanced peripheral bus error", - "Command/address parity error", + "Address/Command parity error", "Write data CRC error", + "DCQ SRAM ECC error", + "AES SRAM ECC error", }; static const char * const smca_pb_mce_desc[] = { - "Parameter Block RAM ECC error", + "An ECC error in the Parameter Block RAM array", }; static const char * const smca_psp_mce_desc[] = { - "PSP RAM ECC or parity error", + "An ECC or parity error in a PSP RAM instance", +}; + +static const char * const smca_psp2_mce_desc[] = { + "High SRAM ECC or parity error", + "Low SRAM ECC or parity error", + "Instruction Cache Bank 0 ECC or parity error", + "Instruction Cache Bank 1 ECC or parity error", + "Instruction Tag Ram 0 parity error", + "Instruction Tag Ram 1 parity error", + "Data Cache Bank 0 ECC or parity error", + "Data Cache Bank 1 ECC or parity error", + "Data Cache Bank 2 ECC or parity error", + "Data Cache Bank 3 ECC or parity error", + "Data Tag Bank 0 parity error", + "Data Tag Bank 1 parity error", + "Data Tag Bank 2 parity error", + "Data Tag Bank 3 parity error", + "Dirty Data Ram parity error", + "TLB Bank 0 parity error", + "TLB Bank 1 parity error", + "System Hub Read Buffer ECC or parity error", }; static const char * const smca_smu_mce_desc[] = { - "SMU RAM ECC or parity error", + "An ECC or parity error in an SMU RAM instance", +}; + +static const char * const smca_smu2_mce_desc[] = { + "High SRAM ECC or parity error", + "Low SRAM ECC or parity error", + "Data Cache Bank A ECC or parity error", + "Data Cache Bank B ECC or parity error", + "Data Tag Cache Bank A ECC or parity error", + "Data Tag Cache Bank B ECC or parity error", + "Instruction Cache Bank A ECC or parity error", + "Instruction Cache Bank B ECC or parity error", + "Instruction Tag Cache Bank A ECC or parity error", + "Instruction Tag Cache Bank B ECC or parity error", + "System Hub Read Buffer ECC or parity error", +}; + +static const char * const smca_mp5_mce_desc[] = { + "High SRAM ECC or parity error", + "Low SRAM ECC or parity error", + "Data Cache Bank A ECC or parity error", + "Data Cache Bank B ECC or parity error", + "Data Tag Cache Bank A ECC or parity error", + "Data Tag Cache Bank B ECC or parity error", + "Instruction Cache Bank A ECC or parity error", + "Instruction Cache Bank B ECC or parity error", + "Instruction Tag Cache Bank A ECC or parity error", + "Instruction Tag Cache Bank B ECC or parity error", +}; + +static const char * const smca_nbio_mce_desc[] = { + "ECC or Parity error", + "PCIE error", + "SDP ErrEvent error", + "SDP Egress Poison Error", + "IOHC Internal Poison Error", +}; + +static const char * const smca_pcie_mce_desc[] = { + "CCIX PER Message logging", + "CCIX Read Response with Status: Non-Data Error", + "CCIX Write Response with Status: Non-Data Error", + "CCIX Read Response with Status: Data Error", + "CCIX Non-okay write response with data error", }; struct smca_mce_desc { @@ -299,11 +384,17 @@ static struct smca_mce_desc smca_mce_descs[] = { [SMCA_FP] = { smca_fp_mce_desc, ARRAY_SIZE(smca_fp_mce_desc) }, [SMCA_L3_CACHE] = { smca_l3_mce_desc, ARRAY_SIZE(smca_l3_mce_desc) }, [SMCA_CS] = { smca_cs_mce_desc, ARRAY_SIZE(smca_cs_mce_desc) }, + [SMCA_CS_V2] = { smca_cs2_mce_desc, ARRAY_SIZE(smca_cs2_mce_desc) }, [SMCA_PIE] = { smca_pie_mce_desc, ARRAY_SIZE(smca_pie_mce_desc) }, [SMCA_UMC] = { smca_umc_mce_desc, ARRAY_SIZE(smca_umc_mce_desc) }, [SMCA_PB] = { smca_pb_mce_desc, ARRAY_SIZE(smca_pb_mce_desc) }, [SMCA_PSP] = { smca_psp_mce_desc, ARRAY_SIZE(smca_psp_mce_desc) }, + [SMCA_PSP_V2] = { smca_psp2_mce_desc, ARRAY_SIZE(smca_psp2_mce_desc) }, [SMCA_SMU] = { smca_smu_mce_desc, ARRAY_SIZE(smca_smu_mce_desc) }, + [SMCA_SMU_V2] = { smca_smu2_mce_desc, ARRAY_SIZE(smca_smu2_mce_desc) }, + [SMCA_MP5] = { smca_mp5_mce_desc, ARRAY_SIZE(smca_mp5_mce_desc) }, + [SMCA_NBIO] = { smca_nbio_mce_desc, ARRAY_SIZE(smca_nbio_mce_desc) }, + [SMCA_PCIE] = { smca_pcie_mce_desc, ARRAY_SIZE(smca_pcie_mce_desc) }, }; static bool f12h_mc0_mce(u16 ec, u8 xec) @@ -874,13 +965,12 @@ static void decode_smca_error(struct mce *m) ip_name = smca_get_long_name(bank_type); - pr_emerg(HW_ERR "%s Extended Error Code: %d\n", ip_name, xec); + pr_emerg(HW_ERR "%s Ext. Error Code: %d", ip_name, xec); /* Only print the decode of valid error codes */ if (xec < smca_mce_descs[bank_type].num_descs && (hwid->xec_bitmap & BIT_ULL(xec))) { - pr_emerg(HW_ERR "%s Error: ", ip_name); - pr_cont("%s.\n", smca_mce_descs[bank_type].descs[xec]); + pr_cont(", %s.\n", smca_mce_descs[bank_type].descs[xec]); } if (bank_type == SMCA_UMC && xec == 0 && decode_dram_ecc) @@ -961,26 +1051,18 @@ amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data) ((m->status & MCI_STATUS_UC) ? "UE" : (m->status & MCI_STATUS_DEFERRED) ? "-" : "CE"), ((m->status & MCI_STATUS_MISCV) ? "MiscV" : "-"), - ((m->status & MCI_STATUS_PCC) ? "PCC" : "-"), - ((m->status & MCI_STATUS_ADDRV) ? "AddrV" : "-")); - - if (fam >= 0x15) { - pr_cont("|%s", (m->status & MCI_STATUS_DEFERRED ? "Deferred" : "-")); - - /* F15h, bank4, bit 43 is part of McaStatSubCache. */ - if (fam != 0x15 || m->bank != 4) - pr_cont("|%s", (m->status & MCI_STATUS_POISON ? "Poison" : "-")); - } + ((m->status & MCI_STATUS_ADDRV) ? "AddrV" : "-"), + ((m->status & MCI_STATUS_PCC) ? "PCC" : "-")); if (boot_cpu_has(X86_FEATURE_SMCA)) { u32 low, high; u32 addr = MSR_AMD64_SMCA_MCx_CONFIG(m->bank); - pr_cont("|%s", ((m->status & MCI_STATUS_SYNDV) ? "SyndV" : "-")); - if (!rdmsr_safe(addr, &low, &high) && (low & MCI_CONFIG_MCAX)) pr_cont("|%s", ((m->status & MCI_STATUS_TCC) ? "TCC" : "-")); + + pr_cont("|%s", ((m->status & MCI_STATUS_SYNDV) ? "SyndV" : "-")); } /* do the two bits[14:13] together */ @@ -988,6 +1070,17 @@ amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data) if (ecc) pr_cont("|%sECC", ((ecc == 2) ? "C" : "U")); + if (fam >= 0x15) { + pr_cont("|%s", (m->status & MCI_STATUS_DEFERRED ? "Deferred" : "-")); + + /* F15h, bank4, bit 43 is part of McaStatSubCache. */ + if (fam != 0x15 || m->bank != 4) + pr_cont("|%s", (m->status & MCI_STATUS_POISON ? "Poison" : "-")); + } + + if (fam >= 0x17) + pr_cont("|%s", (m->status & MCI_STATUS_SCRUB ? "Scrub" : "-")); + pr_cont("]: 0x%016llx\n", m->status); if (m->status & MCI_STATUS_ADDRV) diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c new file mode 100644 index 000000000000..adae4c848ca1 --- /dev/null +++ b/drivers/edac/skx_base.c @@ -0,0 +1,650 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * EDAC driver for Intel(R) Xeon(R) Skylake processors + * Copyright (c) 2016, Intel Corporation. + */ + +#include +#include +#include +#include +#include + +#include "edac_module.h" +#include "skx_common.h" + +#define EDAC_MOD_STR "skx_edac" + +/* + * Debug macros + */ +#define skx_printk(level, fmt, arg...) \ + edac_printk(level, "skx", fmt, ##arg) + +#define skx_mc_printk(mci, level, fmt, arg...) \ + edac_mc_chipset_printk(mci, level, "skx", fmt, ##arg) + +static struct list_head *skx_edac_list; + +static u64 skx_tolm, skx_tohm; +static int skx_num_sockets; +static unsigned int nvdimm_count; + +#define MASK26 0x3FFFFFF /* Mask for 2^26 */ +#define MASK29 0x1FFFFFFF /* Mask for 2^29 */ + +static struct skx_dev *get_skx_dev(struct pci_bus *bus, u8 idx) +{ + struct skx_dev *d; + + list_for_each_entry(d, skx_edac_list, list) { + if (d->seg == pci_domain_nr(bus) && d->bus[idx] == bus->number) + return d; + } + + return NULL; +} + +enum munittype { + CHAN0, CHAN1, CHAN2, SAD_ALL, UTIL_ALL, SAD +}; + +struct munit { + u16 did; + u16 devfn[SKX_NUM_IMC]; + u8 busidx; + u8 per_socket; + enum munittype mtype; +}; + +/* + * List of PCI device ids that we need together with some device + * number and function numbers to tell which memory controller the + * device belongs to. + */ +static const struct munit skx_all_munits[] = { + { 0x2054, { }, 1, 1, SAD_ALL }, + { 0x2055, { }, 1, 1, UTIL_ALL }, + { 0x2040, { PCI_DEVFN(10, 0), PCI_DEVFN(12, 0) }, 2, 2, CHAN0 }, + { 0x2044, { PCI_DEVFN(10, 4), PCI_DEVFN(12, 4) }, 2, 2, CHAN1 }, + { 0x2048, { PCI_DEVFN(11, 0), PCI_DEVFN(13, 0) }, 2, 2, CHAN2 }, + { 0x208e, { }, 1, 0, SAD }, + { } +}; + +static int get_all_munits(const struct munit *m) +{ + struct pci_dev *pdev, *prev; + struct skx_dev *d; + u32 reg; + int i = 0, ndev = 0; + + prev = NULL; + for (;;) { + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, m->did, prev); + if (!pdev) + break; + ndev++; + if (m->per_socket == SKX_NUM_IMC) { + for (i = 0; i < SKX_NUM_IMC; i++) + if (m->devfn[i] == pdev->devfn) + break; + if (i == SKX_NUM_IMC) + goto fail; + } + d = get_skx_dev(pdev->bus, m->busidx); + if (!d) + goto fail; + + /* Be sure that the device is enabled */ + if (unlikely(pci_enable_device(pdev) < 0)) { + skx_printk(KERN_ERR, "Couldn't enable device %04x:%04x\n", + PCI_VENDOR_ID_INTEL, m->did); + goto fail; + } + + switch (m->mtype) { + case CHAN0: case CHAN1: case CHAN2: + pci_dev_get(pdev); + d->imc[i].chan[m->mtype].cdev = pdev; + break; + case SAD_ALL: + pci_dev_get(pdev); + d->sad_all = pdev; + break; + case UTIL_ALL: + pci_dev_get(pdev); + d->util_all = pdev; + break; + case SAD: + /* + * one of these devices per core, including cores + * that don't exist on this SKU. Ignore any that + * read a route table of zero, make sure all the + * non-zero values match. + */ + pci_read_config_dword(pdev, 0xB4, ®); + if (reg != 0) { + if (d->mcroute == 0) { + d->mcroute = reg; + } else if (d->mcroute != reg) { + skx_printk(KERN_ERR, "mcroute mismatch\n"); + goto fail; + } + } + ndev--; + break; + } + + prev = pdev; + } + + return ndev; +fail: + pci_dev_put(pdev); + return -ENODEV; +} + +static const struct x86_cpu_id skx_cpuids[] = { + { X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_X, 0, 0 }, + { } +}; +MODULE_DEVICE_TABLE(x86cpu, skx_cpuids); + +#define SKX_GET_MTMTR(dev, reg) \ + pci_read_config_dword((dev), 0x87c, &(reg)) + +static bool skx_check_ecc(struct pci_dev *pdev) +{ + u32 mtmtr; + + SKX_GET_MTMTR(pdev, mtmtr); + + return !!GET_BITFIELD(mtmtr, 2, 2); +} + +static int skx_get_dimm_config(struct mem_ctl_info *mci) +{ + struct skx_pvt *pvt = mci->pvt_info; + struct skx_imc *imc = pvt->imc; + u32 mtr, amap, mcddrtcfg; + struct dimm_info *dimm; + int i, j; + int ndimms; + + for (i = 0; i < SKX_NUM_CHANNELS; i++) { + ndimms = 0; + pci_read_config_dword(imc->chan[i].cdev, 0x8C, &amap); + pci_read_config_dword(imc->chan[i].cdev, 0x400, &mcddrtcfg); + for (j = 0; j < SKX_NUM_DIMMS; j++) { + dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, + mci->n_layers, i, j, 0); + pci_read_config_dword(imc->chan[i].cdev, + 0x80 + 4 * j, &mtr); + if (IS_DIMM_PRESENT(mtr)) { + ndimms += skx_get_dimm_info(mtr, amap, dimm, imc, i, j); + } else if (IS_NVDIMM_PRESENT(mcddrtcfg, j)) { + ndimms += skx_get_nvdimm_info(dimm, imc, i, j, + EDAC_MOD_STR); + nvdimm_count++; + } + } + if (ndimms && !skx_check_ecc(imc->chan[0].cdev)) { + skx_printk(KERN_ERR, "ECC is disabled on imc %d\n", imc->mc); + return -ENODEV; + } + } + + return 0; +} + +#define SKX_MAX_SAD 24 + +#define SKX_GET_SAD(d, i, reg) \ + pci_read_config_dword((d)->sad_all, 0x60 + 8 * (i), &(reg)) +#define SKX_GET_ILV(d, i, reg) \ + pci_read_config_dword((d)->sad_all, 0x64 + 8 * (i), &(reg)) + +#define SKX_SAD_MOD3MODE(sad) GET_BITFIELD((sad), 30, 31) +#define SKX_SAD_MOD3(sad) GET_BITFIELD((sad), 27, 27) +#define SKX_SAD_LIMIT(sad) (((u64)GET_BITFIELD((sad), 7, 26) << 26) | MASK26) +#define SKX_SAD_MOD3ASMOD2(sad) GET_BITFIELD((sad), 5, 6) +#define SKX_SAD_ATTR(sad) GET_BITFIELD((sad), 3, 4) +#define SKX_SAD_INTERLEAVE(sad) GET_BITFIELD((sad), 1, 2) +#define SKX_SAD_ENABLE(sad) GET_BITFIELD((sad), 0, 0) + +#define SKX_ILV_REMOTE(tgt) (((tgt) & 8) == 0) +#define SKX_ILV_TARGET(tgt) ((tgt) & 7) + +static bool skx_sad_decode(struct decoded_addr *res) +{ + struct skx_dev *d = list_first_entry(skx_edac_list, typeof(*d), list); + u64 addr = res->addr; + int i, idx, tgt, lchan, shift; + u32 sad, ilv; + u64 limit, prev_limit; + int remote = 0; + + /* Simple sanity check for I/O space or out of range */ + if (addr >= skx_tohm || (addr >= skx_tolm && addr < BIT_ULL(32))) { + edac_dbg(0, "Address 0x%llx out of range\n", addr); + return false; + } + +restart: + prev_limit = 0; + for (i = 0; i < SKX_MAX_SAD; i++) { + SKX_GET_SAD(d, i, sad); + limit = SKX_SAD_LIMIT(sad); + if (SKX_SAD_ENABLE(sad)) { + if (addr >= prev_limit && addr <= limit) + goto sad_found; + } + prev_limit = limit + 1; + } + edac_dbg(0, "No SAD entry for 0x%llx\n", addr); + return false; + +sad_found: + SKX_GET_ILV(d, i, ilv); + + switch (SKX_SAD_INTERLEAVE(sad)) { + case 0: + idx = GET_BITFIELD(addr, 6, 8); + break; + case 1: + idx = GET_BITFIELD(addr, 8, 10); + break; + case 2: + idx = GET_BITFIELD(addr, 12, 14); + break; + case 3: + idx = GET_BITFIELD(addr, 30, 32); + break; + } + + tgt = GET_BITFIELD(ilv, 4 * idx, 4 * idx + 3); + + /* If point to another node, find it and start over */ + if (SKX_ILV_REMOTE(tgt)) { + if (remote) { + edac_dbg(0, "Double remote!\n"); + return false; + } + remote = 1; + list_for_each_entry(d, skx_edac_list, list) { + if (d->imc[0].src_id == SKX_ILV_TARGET(tgt)) + goto restart; + } + edac_dbg(0, "Can't find node %d\n", SKX_ILV_TARGET(tgt)); + return false; + } + + if (SKX_SAD_MOD3(sad) == 0) { + lchan = SKX_ILV_TARGET(tgt); + } else { + switch (SKX_SAD_MOD3MODE(sad)) { + case 0: + shift = 6; + break; + case 1: + shift = 8; + break; + case 2: + shift = 12; + break; + default: + edac_dbg(0, "illegal mod3mode\n"); + return false; + } + switch (SKX_SAD_MOD3ASMOD2(sad)) { + case 0: + lchan = (addr >> shift) % 3; + break; + case 1: + lchan = (addr >> shift) % 2; + break; + case 2: + lchan = (addr >> shift) % 2; + lchan = (lchan << 1) | !lchan; + break; + case 3: + lchan = ((addr >> shift) % 2) << 1; + break; + } + lchan = (lchan << 1) | (SKX_ILV_TARGET(tgt) & 1); + } + + res->dev = d; + res->socket = d->imc[0].src_id; + res->imc = GET_BITFIELD(d->mcroute, lchan * 3, lchan * 3 + 2); + res->channel = GET_BITFIELD(d->mcroute, lchan * 2 + 18, lchan * 2 + 19); + + edac_dbg(2, "0x%llx: socket=%d imc=%d channel=%d\n", + res->addr, res->socket, res->imc, res->channel); + return true; +} + +#define SKX_MAX_TAD 8 + +#define SKX_GET_TADBASE(d, mc, i, reg) \ + pci_read_config_dword((d)->imc[mc].chan[0].cdev, 0x850 + 4 * (i), &(reg)) +#define SKX_GET_TADWAYNESS(d, mc, i, reg) \ + pci_read_config_dword((d)->imc[mc].chan[0].cdev, 0x880 + 4 * (i), &(reg)) +#define SKX_GET_TADCHNILVOFFSET(d, mc, ch, i, reg) \ + pci_read_config_dword((d)->imc[mc].chan[ch].cdev, 0x90 + 4 * (i), &(reg)) + +#define SKX_TAD_BASE(b) ((u64)GET_BITFIELD((b), 12, 31) << 26) +#define SKX_TAD_SKT_GRAN(b) GET_BITFIELD((b), 4, 5) +#define SKX_TAD_CHN_GRAN(b) GET_BITFIELD((b), 6, 7) +#define SKX_TAD_LIMIT(b) (((u64)GET_BITFIELD((b), 12, 31) << 26) | MASK26) +#define SKX_TAD_OFFSET(b) ((u64)GET_BITFIELD((b), 4, 23) << 26) +#define SKX_TAD_SKTWAYS(b) (1 << GET_BITFIELD((b), 10, 11)) +#define SKX_TAD_CHNWAYS(b) (GET_BITFIELD((b), 8, 9) + 1) + +/* which bit used for both socket and channel interleave */ +static int skx_granularity[] = { 6, 8, 12, 30 }; + +static u64 skx_do_interleave(u64 addr, int shift, int ways, u64 lowbits) +{ + addr >>= shift; + addr /= ways; + addr <<= shift; + + return addr | (lowbits & ((1ull << shift) - 1)); +} + +static bool skx_tad_decode(struct decoded_addr *res) +{ + int i; + u32 base, wayness, chnilvoffset; + int skt_interleave_bit, chn_interleave_bit; + u64 channel_addr; + + for (i = 0; i < SKX_MAX_TAD; i++) { + SKX_GET_TADBASE(res->dev, res->imc, i, base); + SKX_GET_TADWAYNESS(res->dev, res->imc, i, wayness); + if (SKX_TAD_BASE(base) <= res->addr && res->addr <= SKX_TAD_LIMIT(wayness)) + goto tad_found; + } + edac_dbg(0, "No TAD entry for 0x%llx\n", res->addr); + return false; + +tad_found: + res->sktways = SKX_TAD_SKTWAYS(wayness); + res->chanways = SKX_TAD_CHNWAYS(wayness); + skt_interleave_bit = skx_granularity[SKX_TAD_SKT_GRAN(base)]; + chn_interleave_bit = skx_granularity[SKX_TAD_CHN_GRAN(base)]; + + SKX_GET_TADCHNILVOFFSET(res->dev, res->imc, res->channel, i, chnilvoffset); + channel_addr = res->addr - SKX_TAD_OFFSET(chnilvoffset); + + if (res->chanways == 3 && skt_interleave_bit > chn_interleave_bit) { + /* Must handle channel first, then socket */ + channel_addr = skx_do_interleave(channel_addr, chn_interleave_bit, + res->chanways, channel_addr); + channel_addr = skx_do_interleave(channel_addr, skt_interleave_bit, + res->sktways, channel_addr); + } else { + /* Handle socket then channel. Preserve low bits from original address */ + channel_addr = skx_do_interleave(channel_addr, skt_interleave_bit, + res->sktways, res->addr); + channel_addr = skx_do_interleave(channel_addr, chn_interleave_bit, + res->chanways, res->addr); + } + + res->chan_addr = channel_addr; + + edac_dbg(2, "0x%llx: chan_addr=0x%llx sktways=%d chanways=%d\n", + res->addr, res->chan_addr, res->sktways, res->chanways); + return true; +} + +#define SKX_MAX_RIR 4 + +#define SKX_GET_RIRWAYNESS(d, mc, ch, i, reg) \ + pci_read_config_dword((d)->imc[mc].chan[ch].cdev, \ + 0x108 + 4 * (i), &(reg)) +#define SKX_GET_RIRILV(d, mc, ch, idx, i, reg) \ + pci_read_config_dword((d)->imc[mc].chan[ch].cdev, \ + 0x120 + 16 * (idx) + 4 * (i), &(reg)) + +#define SKX_RIR_VALID(b) GET_BITFIELD((b), 31, 31) +#define SKX_RIR_LIMIT(b) (((u64)GET_BITFIELD((b), 1, 11) << 29) | MASK29) +#define SKX_RIR_WAYS(b) (1 << GET_BITFIELD((b), 28, 29)) +#define SKX_RIR_CHAN_RANK(b) GET_BITFIELD((b), 16, 19) +#define SKX_RIR_OFFSET(b) ((u64)(GET_BITFIELD((b), 2, 15) << 26)) + +static bool skx_rir_decode(struct decoded_addr *res) +{ + int i, idx, chan_rank; + int shift; + u32 rirway, rirlv; + u64 rank_addr, prev_limit = 0, limit; + + if (res->dev->imc[res->imc].chan[res->channel].dimms[0].close_pg) + shift = 6; + else + shift = 13; + + for (i = 0; i < SKX_MAX_RIR; i++) { + SKX_GET_RIRWAYNESS(res->dev, res->imc, res->channel, i, rirway); + limit = SKX_RIR_LIMIT(rirway); + if (SKX_RIR_VALID(rirway)) { + if (prev_limit <= res->chan_addr && + res->chan_addr <= limit) + goto rir_found; + } + prev_limit = limit; + } + edac_dbg(0, "No RIR entry for 0x%llx\n", res->addr); + return false; + +rir_found: + rank_addr = res->chan_addr >> shift; + rank_addr /= SKX_RIR_WAYS(rirway); + rank_addr <<= shift; + rank_addr |= res->chan_addr & GENMASK_ULL(shift - 1, 0); + + res->rank_address = rank_addr; + idx = (res->chan_addr >> shift) % SKX_RIR_WAYS(rirway); + + SKX_GET_RIRILV(res->dev, res->imc, res->channel, idx, i, rirlv); + res->rank_address = rank_addr - SKX_RIR_OFFSET(rirlv); + chan_rank = SKX_RIR_CHAN_RANK(rirlv); + res->channel_rank = chan_rank; + res->dimm = chan_rank / 4; + res->rank = chan_rank % 4; + + edac_dbg(2, "0x%llx: dimm=%d rank=%d chan_rank=%d rank_addr=0x%llx\n", + res->addr, res->dimm, res->rank, + res->channel_rank, res->rank_address); + return true; +} + +static u8 skx_close_row[] = { + 15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33 +}; + +static u8 skx_close_column[] = { + 3, 4, 5, 14, 19, 23, 24, 25, 26, 27 +}; + +static u8 skx_open_row[] = { + 14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33 +}; + +static u8 skx_open_column[] = { + 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 +}; + +static u8 skx_open_fine_column[] = { + 3, 4, 5, 7, 8, 9, 10, 11, 12, 13 +}; + +static int skx_bits(u64 addr, int nbits, u8 *bits) +{ + int i, res = 0; + + for (i = 0; i < nbits; i++) + res |= ((addr >> bits[i]) & 1) << i; + return res; +} + +static int skx_bank_bits(u64 addr, int b0, int b1, int do_xor, int x0, int x1) +{ + int ret = GET_BITFIELD(addr, b0, b0) | (GET_BITFIELD(addr, b1, b1) << 1); + + if (do_xor) + ret ^= GET_BITFIELD(addr, x0, x0) | (GET_BITFIELD(addr, x1, x1) << 1); + + return ret; +} + +static bool skx_mad_decode(struct decoded_addr *r) +{ + struct skx_dimm *dimm = &r->dev->imc[r->imc].chan[r->channel].dimms[r->dimm]; + int bg0 = dimm->fine_grain_bank ? 6 : 13; + + if (dimm->close_pg) { + r->row = skx_bits(r->rank_address, dimm->rowbits, skx_close_row); + r->column = skx_bits(r->rank_address, dimm->colbits, skx_close_column); + r->column |= 0x400; /* C10 is autoprecharge, always set */ + r->bank_address = skx_bank_bits(r->rank_address, 8, 9, dimm->bank_xor_enable, 22, 28); + r->bank_group = skx_bank_bits(r->rank_address, 6, 7, dimm->bank_xor_enable, 20, 21); + } else { + r->row = skx_bits(r->rank_address, dimm->rowbits, skx_open_row); + if (dimm->fine_grain_bank) + r->column = skx_bits(r->rank_address, dimm->colbits, skx_open_fine_column); + else + r->column = skx_bits(r->rank_address, dimm->colbits, skx_open_column); + r->bank_address = skx_bank_bits(r->rank_address, 18, 19, dimm->bank_xor_enable, 22, 23); + r->bank_group = skx_bank_bits(r->rank_address, bg0, 17, dimm->bank_xor_enable, 20, 21); + } + r->row &= (1u << dimm->rowbits) - 1; + + edac_dbg(2, "0x%llx: row=0x%x col=0x%x bank_addr=%d bank_group=%d\n", + r->addr, r->row, r->column, r->bank_address, + r->bank_group); + return true; +} + +static bool skx_decode(struct decoded_addr *res) +{ + return skx_sad_decode(res) && skx_tad_decode(res) && + skx_rir_decode(res) && skx_mad_decode(res); +} + +static struct notifier_block skx_mce_dec = { + .notifier_call = skx_mce_check_error, + .priority = MCE_PRIO_EDAC, +}; + +/* + * skx_init: + * make sure we are running on the correct cpu model + * search for all the devices we need + * check which DIMMs are present. + */ +static int __init skx_init(void) +{ + const struct x86_cpu_id *id; + const struct munit *m; + const char *owner; + int rc = 0, i, off[3] = {0xd0, 0xd4, 0xd8}; + u8 mc = 0, src_id, node_id; + struct skx_dev *d; + + edac_dbg(2, "\n"); + + owner = edac_get_owner(); + if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR))) + return -EBUSY; + + id = x86_match_cpu(skx_cpuids); + if (!id) + return -ENODEV; + + rc = skx_get_hi_lo(0x2034, off, &skx_tolm, &skx_tohm); + if (rc) + return rc; + + rc = skx_get_all_bus_mappings(0x2016, 0xcc, SKX, &skx_edac_list); + if (rc < 0) + goto fail; + if (rc == 0) { + edac_dbg(2, "No memory controllers found\n"); + return -ENODEV; + } + skx_num_sockets = rc; + + for (m = skx_all_munits; m->did; m++) { + rc = get_all_munits(m); + if (rc < 0) + goto fail; + if (rc != m->per_socket * skx_num_sockets) { + edac_dbg(2, "Expected %d, got %d of 0x%x\n", + m->per_socket * skx_num_sockets, rc, m->did); + rc = -ENODEV; + goto fail; + } + } + + list_for_each_entry(d, skx_edac_list, list) { + rc = skx_get_src_id(d, &src_id); + if (rc < 0) + goto fail; + rc = skx_get_node_id(d, &node_id); + if (rc < 0) + goto fail; + edac_dbg(2, "src_id=%d node_id=%d\n", src_id, node_id); + for (i = 0; i < SKX_NUM_IMC; i++) { + d->imc[i].mc = mc++; + d->imc[i].lmc = i; + d->imc[i].src_id = src_id; + d->imc[i].node_id = node_id; + rc = skx_register_mci(&d->imc[i], d->imc[i].chan[0].cdev, + "Skylake Socket", EDAC_MOD_STR, + skx_get_dimm_config); + if (rc < 0) + goto fail; + } + } + + skx_set_decode(skx_decode); + + if (nvdimm_count && skx_adxl_get() == -ENODEV) + skx_printk(KERN_NOTICE, "Only decoding DDR4 address!\n"); + + /* Ensure that the OPSTATE is set correctly for POLL or NMI */ + opstate_init(); + + setup_skx_debug("skx_test"); + + mce_register_decode_chain(&skx_mce_dec); + + return 0; +fail: + skx_remove(); + return rc; +} + +static void __exit skx_exit(void) +{ + edac_dbg(2, "\n"); + mce_unregister_decode_chain(&skx_mce_dec); + teardown_skx_debug(); + if (nvdimm_count) + skx_adxl_put(); + skx_remove(); +} + +module_init(skx_init); +module_exit(skx_exit); + +module_param(edac_op_state, int, 0444); +MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Tony Luck"); +MODULE_DESCRIPTION("MC Driver for Intel Skylake server processors"); diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c new file mode 100644 index 000000000000..0e96e7b5b0a7 --- /dev/null +++ b/drivers/edac/skx_common.c @@ -0,0 +1,691 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Common codes for both the skx_edac driver and Intel 10nm server EDAC driver. + * Originally split out from the skx_edac driver. + * + * Copyright (c) 2018, Intel Corporation. + */ + +#include +#include +#include +#include +#include +#include "edac_module.h" +#include "skx_common.h" + +static const char * const component_names[] = { + [INDEX_SOCKET] = "ProcessorSocketId", + [INDEX_MEMCTRL] = "MemoryControllerId", + [INDEX_CHANNEL] = "ChannelId", + [INDEX_DIMM] = "DimmSlotId", +}; + +static int component_indices[ARRAY_SIZE(component_names)]; +static int adxl_component_count; +static const char * const *adxl_component_names; +static u64 *adxl_values; +static char *adxl_msg; + +static char skx_msg[MSG_SIZE]; +static skx_decode_f skx_decode; +static u64 skx_tolm, skx_tohm; +static LIST_HEAD(dev_edac_list); + +int __init skx_adxl_get(void) +{ + const char * const *names; + int i, j; + + names = adxl_get_component_names(); + if (!names) { + skx_printk(KERN_NOTICE, "No firmware support for address translation.\n"); + return -ENODEV; + } + + for (i = 0; i < INDEX_MAX; i++) { + for (j = 0; names[j]; j++) { + if (!strcmp(component_names[i], names[j])) { + component_indices[i] = j; + break; + } + } + + if (!names[j]) + goto err; + } + + adxl_component_names = names; + while (*names++) + adxl_component_count++; + + adxl_values = kcalloc(adxl_component_count, sizeof(*adxl_values), + GFP_KERNEL); + if (!adxl_values) { + adxl_component_count = 0; + return -ENOMEM; + } + + adxl_msg = kzalloc(MSG_SIZE, GFP_KERNEL); + if (!adxl_msg) { + adxl_component_count = 0; + kfree(adxl_values); + return -ENOMEM; + } + + return 0; +err: + skx_printk(KERN_ERR, "'%s' is not matched from DSM parameters: ", + component_names[i]); + for (j = 0; names[j]; j++) + skx_printk(KERN_CONT, "%s ", names[j]); + skx_printk(KERN_CONT, "\n"); + + return -ENODEV; +} + +void __exit skx_adxl_put(void) +{ + kfree(adxl_values); + kfree(adxl_msg); +} + +static bool skx_adxl_decode(struct decoded_addr *res) +{ + int i, len = 0; + + if (res->addr >= skx_tohm || (res->addr >= skx_tolm && + res->addr < BIT_ULL(32))) { + edac_dbg(0, "Address 0x%llx out of range\n", res->addr); + return false; + } + + if (adxl_decode(res->addr, adxl_values)) { + edac_dbg(0, "Failed to decode 0x%llx\n", res->addr); + return false; + } + + res->socket = (int)adxl_values[component_indices[INDEX_SOCKET]]; + res->imc = (int)adxl_values[component_indices[INDEX_MEMCTRL]]; + res->channel = (int)adxl_values[component_indices[INDEX_CHANNEL]]; + res->dimm = (int)adxl_values[component_indices[INDEX_DIMM]]; + + for (i = 0; i < adxl_component_count; i++) { + if (adxl_values[i] == ~0x0ull) + continue; + + len += snprintf(adxl_msg + len, MSG_SIZE - len, " %s:0x%llx", + adxl_component_names[i], adxl_values[i]); + if (MSG_SIZE - len <= 0) + break; + } + + return true; +} + +void skx_set_decode(skx_decode_f decode) +{ + skx_decode = decode; +} + +int skx_get_src_id(struct skx_dev *d, u8 *id) +{ + u32 reg; + + if (pci_read_config_dword(d->util_all, 0xf0, ®)) { + skx_printk(KERN_ERR, "Failed to read src id\n"); + return -ENODEV; + } + + *id = GET_BITFIELD(reg, 12, 14); + return 0; +} + +int skx_get_node_id(struct skx_dev *d, u8 *id) +{ + u32 reg; + + if (pci_read_config_dword(d->util_all, 0xf4, ®)) { + skx_printk(KERN_ERR, "Failed to read node id\n"); + return -ENODEV; + } + + *id = GET_BITFIELD(reg, 0, 2); + return 0; +} + +static int get_width(u32 mtr) +{ + switch (GET_BITFIELD(mtr, 8, 9)) { + case 0: + return DEV_X4; + case 1: + return DEV_X8; + case 2: + return DEV_X16; + } + return DEV_UNKNOWN; +} + +/* + * We use the per-socket device @did to count how many sockets are present, + * and to detemine which PCI buses are associated with each socket. Allocate + * and build the full list of all the skx_dev structures that we need here. + */ +int skx_get_all_bus_mappings(unsigned int did, int off, enum type type, + struct list_head **list) +{ + struct pci_dev *pdev, *prev; + struct skx_dev *d; + u32 reg; + int ndev = 0; + + prev = NULL; + for (;;) { + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, did, prev); + if (!pdev) + break; + ndev++; + d = kzalloc(sizeof(*d), GFP_KERNEL); + if (!d) { + pci_dev_put(pdev); + return -ENOMEM; + } + + if (pci_read_config_dword(pdev, off, ®)) { + kfree(d); + pci_dev_put(pdev); + skx_printk(KERN_ERR, "Failed to read bus idx\n"); + return -ENODEV; + } + + d->bus[0] = GET_BITFIELD(reg, 0, 7); + d->bus[1] = GET_BITFIELD(reg, 8, 15); + if (type == SKX) { + d->seg = pci_domain_nr(pdev->bus); + d->bus[2] = GET_BITFIELD(reg, 16, 23); + d->bus[3] = GET_BITFIELD(reg, 24, 31); + } else { + d->seg = GET_BITFIELD(reg, 16, 23); + } + + edac_dbg(2, "busses: 0x%x, 0x%x, 0x%x, 0x%x\n", + d->bus[0], d->bus[1], d->bus[2], d->bus[3]); + list_add_tail(&d->list, &dev_edac_list); + prev = pdev; + } + + if (list) + *list = &dev_edac_list; + return ndev; +} + +int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm) +{ + struct pci_dev *pdev; + u32 reg; + + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, did, NULL); + if (!pdev) { + skx_printk(KERN_ERR, "Can't get tolm/tohm\n"); + return -ENODEV; + } + + if (pci_read_config_dword(pdev, off[0], ®)) { + skx_printk(KERN_ERR, "Failed to read tolm\n"); + goto fail; + } + skx_tolm = reg; + + if (pci_read_config_dword(pdev, off[1], ®)) { + skx_printk(KERN_ERR, "Failed to read lower tohm\n"); + goto fail; + } + skx_tohm = reg; + + if (pci_read_config_dword(pdev, off[2], ®)) { + skx_printk(KERN_ERR, "Failed to read upper tohm\n"); + goto fail; + } + skx_tohm |= (u64)reg << 32; + + pci_dev_put(pdev); + *tolm = skx_tolm; + *tohm = skx_tohm; + edac_dbg(2, "tolm = 0x%llx tohm = 0x%llx\n", skx_tolm, skx_tohm); + return 0; +fail: + pci_dev_put(pdev); + return -ENODEV; +} + +static int skx_get_dimm_attr(u32 reg, int lobit, int hibit, int add, + int minval, int maxval, const char *name) +{ + u32 val = GET_BITFIELD(reg, lobit, hibit); + + if (val < minval || val > maxval) { + edac_dbg(2, "bad %s = %d (raw=0x%x)\n", name, val, reg); + return -EINVAL; + } + return val + add; +} + +#define numrank(reg) skx_get_dimm_attr(reg, 12, 13, 0, 0, 2, "ranks") +#define numrow(reg) skx_get_dimm_attr(reg, 2, 4, 12, 1, 6, "rows") +#define numcol(reg) skx_get_dimm_attr(reg, 0, 1, 10, 0, 2, "cols") + +int skx_get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm, + struct skx_imc *imc, int chan, int dimmno) +{ + int banks = 16, ranks, rows, cols, npages; + u64 size; + + ranks = numrank(mtr); + rows = numrow(mtr); + cols = numcol(mtr); + + /* + * Compute size in 8-byte (2^3) words, then shift to MiB (2^20) + */ + size = ((1ull << (rows + cols + ranks)) * banks) >> (20 - 3); + npages = MiB_TO_PAGES(size); + + edac_dbg(0, "mc#%d: channel %d, dimm %d, %lld MiB (%d pages) bank: %d, rank: %d, row: 0x%x, col: 0x%x\n", + imc->mc, chan, dimmno, size, npages, + banks, 1 << ranks, rows, cols); + + imc->chan[chan].dimms[dimmno].close_pg = GET_BITFIELD(mtr, 0, 0); + imc->chan[chan].dimms[dimmno].bank_xor_enable = GET_BITFIELD(mtr, 9, 9); + imc->chan[chan].dimms[dimmno].fine_grain_bank = GET_BITFIELD(amap, 0, 0); + imc->chan[chan].dimms[dimmno].rowbits = rows; + imc->chan[chan].dimms[dimmno].colbits = cols; + + dimm->nr_pages = npages; + dimm->grain = 32; + dimm->dtype = get_width(mtr); + dimm->mtype = MEM_DDR4; + dimm->edac_mode = EDAC_SECDED; /* likely better than this */ + snprintf(dimm->label, sizeof(dimm->label), "CPU_SrcID#%u_MC#%u_Chan#%u_DIMM#%u", + imc->src_id, imc->lmc, chan, dimmno); + + return 1; +} + +int skx_get_nvdimm_info(struct dimm_info *dimm, struct skx_imc *imc, + int chan, int dimmno, const char *mod_str) +{ + int smbios_handle; + u32 dev_handle; + u16 flags; + u64 size = 0; + + dev_handle = ACPI_NFIT_BUILD_DEVICE_HANDLE(dimmno, chan, imc->lmc, + imc->src_id, 0); + + smbios_handle = nfit_get_smbios_id(dev_handle, &flags); + if (smbios_handle == -EOPNOTSUPP) { + pr_warn_once("%s: Can't find size of NVDIMM. Try enabling CONFIG_ACPI_NFIT\n", mod_str); + goto unknown_size; + } + + if (smbios_handle < 0) { + skx_printk(KERN_ERR, "Can't find handle for NVDIMM ADR=0x%x\n", dev_handle); + goto unknown_size; + } + + if (flags & ACPI_NFIT_MEM_MAP_FAILED) { + skx_printk(KERN_ERR, "NVDIMM ADR=0x%x is not mapped\n", dev_handle); + goto unknown_size; + } + + size = dmi_memdev_size(smbios_handle); + if (size == ~0ull) + skx_printk(KERN_ERR, "Can't find size for NVDIMM ADR=0x%x/SMBIOS=0x%x\n", + dev_handle, smbios_handle); + +unknown_size: + dimm->nr_pages = size >> PAGE_SHIFT; + dimm->grain = 32; + dimm->dtype = DEV_UNKNOWN; + dimm->mtype = MEM_NVDIMM; + dimm->edac_mode = EDAC_SECDED; /* likely better than this */ + + edac_dbg(0, "mc#%d: channel %d, dimm %d, %llu MiB (%u pages)\n", + imc->mc, chan, dimmno, size >> 20, dimm->nr_pages); + + snprintf(dimm->label, sizeof(dimm->label), "CPU_SrcID#%u_MC#%u_Chan#%u_DIMM#%u", + imc->src_id, imc->lmc, chan, dimmno); + + return (size == 0 || size == ~0ull) ? 0 : 1; +} + +int skx_register_mci(struct skx_imc *imc, struct pci_dev *pdev, + const char *ctl_name, const char *mod_str, + get_dimm_config_f get_dimm_config) +{ + struct mem_ctl_info *mci; + struct edac_mc_layer layers[2]; + struct skx_pvt *pvt; + int rc; + + /* Allocate a new MC control structure */ + layers[0].type = EDAC_MC_LAYER_CHANNEL; + layers[0].size = NUM_CHANNELS; + layers[0].is_virt_csrow = false; + layers[1].type = EDAC_MC_LAYER_SLOT; + layers[1].size = NUM_DIMMS; + layers[1].is_virt_csrow = true; + mci = edac_mc_alloc(imc->mc, ARRAY_SIZE(layers), layers, + sizeof(struct skx_pvt)); + + if (unlikely(!mci)) + return -ENOMEM; + + edac_dbg(0, "MC#%d: mci = %p\n", imc->mc, mci); + + /* Associate skx_dev and mci for future usage */ + imc->mci = mci; + pvt = mci->pvt_info; + pvt->imc = imc; + + mci->ctl_name = kasprintf(GFP_KERNEL, "%s#%d IMC#%d", ctl_name, + imc->node_id, imc->lmc); + if (!mci->ctl_name) { + rc = -ENOMEM; + goto fail0; + } + + mci->mtype_cap = MEM_FLAG_DDR4 | MEM_FLAG_NVDIMM; + mci->edac_ctl_cap = EDAC_FLAG_NONE; + mci->edac_cap = EDAC_FLAG_NONE; + mci->mod_name = mod_str; + mci->dev_name = pci_name(pdev); + mci->ctl_page_to_phys = NULL; + + rc = get_dimm_config(mci); + if (rc < 0) + goto fail; + + /* Record ptr to the generic device */ + mci->pdev = &pdev->dev; + + /* Add this new MC control structure to EDAC's list of MCs */ + if (unlikely(edac_mc_add_mc(mci))) { + edac_dbg(0, "MC: failed edac_mc_add_mc()\n"); + rc = -EINVAL; + goto fail; + } + + return 0; + +fail: + kfree(mci->ctl_name); +fail0: + edac_mc_free(mci); + imc->mci = NULL; + return rc; +} + +static void skx_unregister_mci(struct skx_imc *imc) +{ + struct mem_ctl_info *mci = imc->mci; + + if (!mci) + return; + + edac_dbg(0, "MC%d: mci = %p\n", imc->mc, mci); + + /* Remove MC sysfs nodes */ + edac_mc_del_mc(mci->pdev); + + edac_dbg(1, "%s: free mci struct\n", mci->ctl_name); + kfree(mci->ctl_name); + edac_mc_free(mci); +} + +static struct mem_ctl_info *get_mci(int src_id, int lmc) +{ + struct skx_dev *d; + + if (lmc > NUM_IMC - 1) { + skx_printk(KERN_ERR, "Bad lmc %d\n", lmc); + return NULL; + } + + list_for_each_entry(d, &dev_edac_list, list) { + if (d->imc[0].src_id == src_id) + return d->imc[lmc].mci; + } + + skx_printk(KERN_ERR, "No mci for src_id %d lmc %d\n", src_id, lmc); + return NULL; +} + +static void skx_mce_output_error(struct mem_ctl_info *mci, + const struct mce *m, + struct decoded_addr *res) +{ + enum hw_event_mc_err_type tp_event; + char *type, *optype; + bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0); + bool overflow = GET_BITFIELD(m->status, 62, 62); + bool uncorrected_error = GET_BITFIELD(m->status, 61, 61); + bool recoverable; + u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52); + u32 mscod = GET_BITFIELD(m->status, 16, 31); + u32 errcode = GET_BITFIELD(m->status, 0, 15); + u32 optypenum = GET_BITFIELD(m->status, 4, 6); + + recoverable = GET_BITFIELD(m->status, 56, 56); + + if (uncorrected_error) { + core_err_cnt = 1; + if (ripv) { + type = "FATAL"; + tp_event = HW_EVENT_ERR_FATAL; + } else { + type = "NON_FATAL"; + tp_event = HW_EVENT_ERR_UNCORRECTED; + } + } else { + type = "CORRECTED"; + tp_event = HW_EVENT_ERR_CORRECTED; + } + + /* + * According to Intel Architecture spec vol 3B, + * Table 15-10 "IA32_MCi_Status [15:0] Compound Error Code Encoding" + * memory errors should fit one of these masks: + * 000f 0000 1mmm cccc (binary) + * 000f 0010 1mmm cccc (binary) [RAM used as cache] + * where: + * f = Correction Report Filtering Bit. If 1, subsequent errors + * won't be shown + * mmm = error type + * cccc = channel + * If the mask doesn't match, report an error to the parsing logic + */ + if (!((errcode & 0xef80) == 0x80 || (errcode & 0xef80) == 0x280)) { + optype = "Can't parse: it is not a mem"; + } else { + switch (optypenum) { + case 0: + optype = "generic undef request error"; + break; + case 1: + optype = "memory read error"; + break; + case 2: + optype = "memory write error"; + break; + case 3: + optype = "addr/cmd error"; + break; + case 4: + optype = "memory scrubbing error"; + break; + default: + optype = "reserved"; + break; + } + } + if (adxl_component_count) { + snprintf(skx_msg, MSG_SIZE, "%s%s err_code:0x%04x:0x%04x %s", + overflow ? " OVERFLOW" : "", + (uncorrected_error && recoverable) ? " recoverable" : "", + mscod, errcode, adxl_msg); + } else { + snprintf(skx_msg, MSG_SIZE, + "%s%s err_code:0x%04x:0x%04x socket:%d imc:%d rank:%d bg:%d ba:%d row:0x%x col:0x%x", + overflow ? " OVERFLOW" : "", + (uncorrected_error && recoverable) ? " recoverable" : "", + mscod, errcode, + res->socket, res->imc, res->rank, + res->bank_group, res->bank_address, res->row, res->column); + } + + edac_dbg(0, "%s\n", skx_msg); + + /* Call the helper to output message */ + edac_mc_handle_error(tp_event, mci, core_err_cnt, + m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0, + res->channel, res->dimm, -1, + optype, skx_msg); +} + +int skx_mce_check_error(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct mce *mce = (struct mce *)data; + struct decoded_addr res; + struct mem_ctl_info *mci; + char *type; + + if (edac_get_report_status() == EDAC_REPORTING_DISABLED) + return NOTIFY_DONE; + + /* ignore unless this is memory related with an address */ + if ((mce->status & 0xefff) >> 7 != 1 || !(mce->status & MCI_STATUS_ADDRV)) + return NOTIFY_DONE; + + memset(&res, 0, sizeof(res)); + res.addr = mce->addr; + + if (adxl_component_count) { + if (!skx_adxl_decode(&res)) + return NOTIFY_DONE; + + mci = get_mci(res.socket, res.imc); + } else { + if (!skx_decode || !skx_decode(&res)) + return NOTIFY_DONE; + + mci = res.dev->imc[res.imc].mci; + } + + if (!mci) + return NOTIFY_DONE; + + if (mce->mcgstatus & MCG_STATUS_MCIP) + type = "Exception"; + else + type = "Event"; + + skx_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n"); + + skx_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: 0x%llx " + "Bank %d: 0x%llx\n", mce->extcpu, type, + mce->mcgstatus, mce->bank, mce->status); + skx_mc_printk(mci, KERN_DEBUG, "TSC 0x%llx ", mce->tsc); + skx_mc_printk(mci, KERN_DEBUG, "ADDR 0x%llx ", mce->addr); + skx_mc_printk(mci, KERN_DEBUG, "MISC 0x%llx ", mce->misc); + + skx_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:0x%x TIME %llu SOCKET " + "%u APIC 0x%x\n", mce->cpuvendor, mce->cpuid, + mce->time, mce->socketid, mce->apicid); + + skx_mce_output_error(mci, mce, &res); + + return NOTIFY_DONE; +} + +void skx_remove(void) +{ + int i, j; + struct skx_dev *d, *tmp; + + edac_dbg(0, "\n"); + + list_for_each_entry_safe(d, tmp, &dev_edac_list, list) { + list_del(&d->list); + for (i = 0; i < NUM_IMC; i++) { + if (d->imc[i].mci) + skx_unregister_mci(&d->imc[i]); + + if (d->imc[i].mdev) + pci_dev_put(d->imc[i].mdev); + + if (d->imc[i].mbase) + iounmap(d->imc[i].mbase); + + for (j = 0; j < NUM_CHANNELS; j++) { + if (d->imc[i].chan[j].cdev) + pci_dev_put(d->imc[i].chan[j].cdev); + } + } + if (d->util_all) + pci_dev_put(d->util_all); + if (d->sad_all) + pci_dev_put(d->sad_all); + if (d->uracu) + pci_dev_put(d->uracu); + + kfree(d); + } +} + +#ifdef CONFIG_EDAC_DEBUG +/* + * Debug feature. + * Exercise the address decode logic by writing an address to + * /sys/kernel/debug/edac/dirname/addr. + */ +static struct dentry *skx_test; + +static int debugfs_u64_set(void *data, u64 val) +{ + struct mce m; + + pr_warn_once("Fake error to 0x%llx injected via debugfs\n", val); + + memset(&m, 0, sizeof(m)); + /* ADDRV + MemRd + Unknown channel */ + m.status = MCI_STATUS_ADDRV + 0x90; + /* One corrected error */ + m.status |= BIT_ULL(MCI_STATUS_CEC_SHIFT); + m.addr = val; + skx_mce_check_error(NULL, 0, &m); + + return 0; +} +DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n"); + +void setup_skx_debug(const char *dirname) +{ + skx_test = edac_debugfs_create_dir(dirname); + if (!skx_test) + return; + + if (!edac_debugfs_create_file("addr", 0200, skx_test, + NULL, &fops_u64_wo)) { + debugfs_remove(skx_test); + skx_test = NULL; + } +} + +void teardown_skx_debug(void) +{ + debugfs_remove_recursive(skx_test); +} +#endif /*CONFIG_EDAC_DEBUG*/ diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h new file mode 100644 index 000000000000..d25374e34d4f --- /dev/null +++ b/drivers/edac/skx_common.h @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Common codes for both the skx_edac driver and Intel 10nm server EDAC driver. + * Originally split out from the skx_edac driver. + * + * Copyright (c) 2018, Intel Corporation. + */ + +#ifndef _SKX_COMM_EDAC_H +#define _SKX_COMM_EDAC_H + +#define MSG_SIZE 1024 + +/* + * Debug macros + */ +#define skx_printk(level, fmt, arg...) \ + edac_printk(level, "skx", fmt, ##arg) + +#define skx_mc_printk(mci, level, fmt, arg...) \ + edac_mc_chipset_printk(mci, level, "skx", fmt, ##arg) + +/* + * Get a bit field at register value , from bit to bit + */ +#define GET_BITFIELD(v, lo, hi) \ + (((v) & GENMASK_ULL((hi), (lo))) >> (lo)) + +#define SKX_NUM_IMC 2 /* Memory controllers per socket */ +#define SKX_NUM_CHANNELS 3 /* Channels per memory controller */ +#define SKX_NUM_DIMMS 2 /* Max DIMMS per channel */ + +#define I10NM_NUM_IMC 4 +#define I10NM_NUM_CHANNELS 2 +#define I10NM_NUM_DIMMS 2 + +#define MAX(a, b) ((a) > (b) ? (a) : (b)) +#define NUM_IMC MAX(SKX_NUM_IMC, I10NM_NUM_IMC) +#define NUM_CHANNELS MAX(SKX_NUM_CHANNELS, I10NM_NUM_CHANNELS) +#define NUM_DIMMS MAX(SKX_NUM_DIMMS, I10NM_NUM_DIMMS) + +#define IS_DIMM_PRESENT(r) GET_BITFIELD(r, 15, 15) +#define IS_NVDIMM_PRESENT(r, i) GET_BITFIELD(r, i, i) + +/* + * Each cpu socket contains some pci devices that provide global + * information, and also some that are local to each of the two + * memory controllers on the die. + */ +struct skx_dev { + struct list_head list; + u8 bus[4]; + int seg; + struct pci_dev *sad_all; + struct pci_dev *util_all; + struct pci_dev *uracu; /* for i10nm CPU */ + u32 mcroute; + struct skx_imc { + struct mem_ctl_info *mci; + struct pci_dev *mdev; /* for i10nm CPU */ + void __iomem *mbase; /* for i10nm CPU */ + u8 mc; /* system wide mc# */ + u8 lmc; /* socket relative mc# */ + u8 src_id, node_id; + struct skx_channel { + struct pci_dev *cdev; + struct skx_dimm { + u8 close_pg; + u8 bank_xor_enable; + u8 fine_grain_bank; + u8 rowbits; + u8 colbits; + } dimms[NUM_DIMMS]; + } chan[NUM_CHANNELS]; + } imc[NUM_IMC]; +}; + +struct skx_pvt { + struct skx_imc *imc; +}; + +enum type { + SKX, + I10NM +}; + +enum { + INDEX_SOCKET, + INDEX_MEMCTRL, + INDEX_CHANNEL, + INDEX_DIMM, + INDEX_MAX +}; + +struct decoded_addr { + struct skx_dev *dev; + u64 addr; + int socket; + int imc; + int channel; + u64 chan_addr; + int sktways; + int chanways; + int dimm; + int rank; + int channel_rank; + u64 rank_address; + int row; + int column; + int bank_address; + int bank_group; +}; + +typedef int (*get_dimm_config_f)(struct mem_ctl_info *mci); +typedef bool (*skx_decode_f)(struct decoded_addr *res); + +int __init skx_adxl_get(void); +void __exit skx_adxl_put(void); +void skx_set_decode(skx_decode_f decode); + +int skx_get_src_id(struct skx_dev *d, u8 *id); +int skx_get_node_id(struct skx_dev *d, u8 *id); + +int skx_get_all_bus_mappings(unsigned int did, int off, enum type, + struct list_head **list); + +int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm); + +int skx_get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm, + struct skx_imc *imc, int chan, int dimmno); + +int skx_get_nvdimm_info(struct dimm_info *dimm, struct skx_imc *imc, + int chan, int dimmno, const char *mod_str); + +int skx_register_mci(struct skx_imc *imc, struct pci_dev *pdev, + const char *ctl_name, const char *mod_str, + get_dimm_config_f get_dimm_config); + +int skx_mce_check_error(struct notifier_block *nb, unsigned long val, + void *data); + +void skx_remove(void); + +#ifdef CONFIG_EDAC_DEBUG +void setup_skx_debug(const char *dirname); +void teardown_skx_debug(void); +#else +static inline void setup_skx_debug(const char *dirname) {} +static inline void teardown_skx_debug(void) {} +#endif /*CONFIG_EDAC_DEBUG*/ + +#endif /* _SKX_COMM_EDAC_H */ diff --git a/drivers/edac/skx_edac.c b/drivers/edac/skx_edac.c deleted file mode 100644 index 93ef161bb5e1..000000000000 --- a/drivers/edac/skx_edac.c +++ /dev/null @@ -1,1358 +0,0 @@ -/* - * EDAC driver for Intel(R) Xeon(R) Skylake processors - * Copyright (c) 2016, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "edac_module.h" - -#define EDAC_MOD_STR "skx_edac" -#define MSG_SIZE 1024 - -/* - * Debug macros - */ -#define skx_printk(level, fmt, arg...) \ - edac_printk(level, "skx", fmt, ##arg) - -#define skx_mc_printk(mci, level, fmt, arg...) \ - edac_mc_chipset_printk(mci, level, "skx", fmt, ##arg) - -/* - * Get a bit field at register value , from bit to bit - */ -#define GET_BITFIELD(v, lo, hi) \ - (((v) & GENMASK_ULL((hi), (lo))) >> (lo)) - -static LIST_HEAD(skx_edac_list); - -static u64 skx_tolm, skx_tohm; -static char *skx_msg; -static unsigned int nvdimm_count; - -enum { - INDEX_SOCKET, - INDEX_MEMCTRL, - INDEX_CHANNEL, - INDEX_DIMM, - INDEX_MAX -}; - -static const char * const component_names[] = { - [INDEX_SOCKET] = "ProcessorSocketId", - [INDEX_MEMCTRL] = "MemoryControllerId", - [INDEX_CHANNEL] = "ChannelId", - [INDEX_DIMM] = "DimmSlotId", -}; - -static int component_indices[ARRAY_SIZE(component_names)]; -static int adxl_component_count; -static const char * const *adxl_component_names; -static u64 *adxl_values; -static char *adxl_msg; - -#define NUM_IMC 2 /* memory controllers per socket */ -#define NUM_CHANNELS 3 /* channels per memory controller */ -#define NUM_DIMMS 2 /* Max DIMMS per channel */ - -#define MASK26 0x3FFFFFF /* Mask for 2^26 */ -#define MASK29 0x1FFFFFFF /* Mask for 2^29 */ - -/* - * Each cpu socket contains some pci devices that provide global - * information, and also some that are local to each of the two - * memory controllers on the die. - */ -struct skx_dev { - struct list_head list; - u8 bus[4]; - int seg; - struct pci_dev *sad_all; - struct pci_dev *util_all; - u32 mcroute; - struct skx_imc { - struct mem_ctl_info *mci; - u8 mc; /* system wide mc# */ - u8 lmc; /* socket relative mc# */ - u8 src_id, node_id; - struct skx_channel { - struct pci_dev *cdev; - struct skx_dimm { - u8 close_pg; - u8 bank_xor_enable; - u8 fine_grain_bank; - u8 rowbits; - u8 colbits; - } dimms[NUM_DIMMS]; - } chan[NUM_CHANNELS]; - } imc[NUM_IMC]; -}; -static int skx_num_sockets; - -struct skx_pvt { - struct skx_imc *imc; -}; - -struct decoded_addr { - struct skx_dev *dev; - u64 addr; - int socket; - int imc; - int channel; - u64 chan_addr; - int sktways; - int chanways; - int dimm; - int rank; - int channel_rank; - u64 rank_address; - int row; - int column; - int bank_address; - int bank_group; -}; - -static struct skx_dev *get_skx_dev(struct pci_bus *bus, u8 idx) -{ - struct skx_dev *d; - - list_for_each_entry(d, &skx_edac_list, list) { - if (d->seg == pci_domain_nr(bus) && d->bus[idx] == bus->number) - return d; - } - - return NULL; -} - -enum munittype { - CHAN0, CHAN1, CHAN2, SAD_ALL, UTIL_ALL, SAD -}; - -struct munit { - u16 did; - u16 devfn[NUM_IMC]; - u8 busidx; - u8 per_socket; - enum munittype mtype; -}; - -/* - * List of PCI device ids that we need together with some device - * number and function numbers to tell which memory controller the - * device belongs to. - */ -static const struct munit skx_all_munits[] = { - { 0x2054, { }, 1, 1, SAD_ALL }, - { 0x2055, { }, 1, 1, UTIL_ALL }, - { 0x2040, { PCI_DEVFN(10, 0), PCI_DEVFN(12, 0) }, 2, 2, CHAN0 }, - { 0x2044, { PCI_DEVFN(10, 4), PCI_DEVFN(12, 4) }, 2, 2, CHAN1 }, - { 0x2048, { PCI_DEVFN(11, 0), PCI_DEVFN(13, 0) }, 2, 2, CHAN2 }, - { 0x208e, { }, 1, 0, SAD }, - { } -}; - -/* - * We use the per-socket device 0x2016 to count how many sockets are present, - * and to detemine which PCI buses are associated with each socket. Allocate - * and build the full list of all the skx_dev structures that we need here. - */ -static int get_all_bus_mappings(void) -{ - struct pci_dev *pdev, *prev; - struct skx_dev *d; - u32 reg; - int ndev = 0; - - prev = NULL; - for (;;) { - pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2016, prev); - if (!pdev) - break; - ndev++; - d = kzalloc(sizeof(*d), GFP_KERNEL); - if (!d) { - pci_dev_put(pdev); - return -ENOMEM; - } - d->seg = pci_domain_nr(pdev->bus); - pci_read_config_dword(pdev, 0xCC, ®); - d->bus[0] = GET_BITFIELD(reg, 0, 7); - d->bus[1] = GET_BITFIELD(reg, 8, 15); - d->bus[2] = GET_BITFIELD(reg, 16, 23); - d->bus[3] = GET_BITFIELD(reg, 24, 31); - edac_dbg(2, "busses: 0x%x, 0x%x, 0x%x, 0x%x\n", - d->bus[0], d->bus[1], d->bus[2], d->bus[3]); - list_add_tail(&d->list, &skx_edac_list); - skx_num_sockets++; - prev = pdev; - } - - return ndev; -} - -static int get_all_munits(const struct munit *m) -{ - struct pci_dev *pdev, *prev; - struct skx_dev *d; - u32 reg; - int i = 0, ndev = 0; - - prev = NULL; - for (;;) { - pdev = pci_get_device(PCI_VENDOR_ID_INTEL, m->did, prev); - if (!pdev) - break; - ndev++; - if (m->per_socket == NUM_IMC) { - for (i = 0; i < NUM_IMC; i++) - if (m->devfn[i] == pdev->devfn) - break; - if (i == NUM_IMC) - goto fail; - } - d = get_skx_dev(pdev->bus, m->busidx); - if (!d) - goto fail; - - /* Be sure that the device is enabled */ - if (unlikely(pci_enable_device(pdev) < 0)) { - skx_printk(KERN_ERR, "Couldn't enable device %04x:%04x\n", - PCI_VENDOR_ID_INTEL, m->did); - goto fail; - } - - switch (m->mtype) { - case CHAN0: case CHAN1: case CHAN2: - pci_dev_get(pdev); - d->imc[i].chan[m->mtype].cdev = pdev; - break; - case SAD_ALL: - pci_dev_get(pdev); - d->sad_all = pdev; - break; - case UTIL_ALL: - pci_dev_get(pdev); - d->util_all = pdev; - break; - case SAD: - /* - * one of these devices per core, including cores - * that don't exist on this SKU. Ignore any that - * read a route table of zero, make sure all the - * non-zero values match. - */ - pci_read_config_dword(pdev, 0xB4, ®); - if (reg != 0) { - if (d->mcroute == 0) - d->mcroute = reg; - else if (d->mcroute != reg) { - skx_printk(KERN_ERR, - "mcroute mismatch\n"); - goto fail; - } - } - ndev--; - break; - } - - prev = pdev; - } - - return ndev; -fail: - pci_dev_put(pdev); - return -ENODEV; -} - -static const struct x86_cpu_id skx_cpuids[] = { - { X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_X, 0, 0 }, - { } -}; -MODULE_DEVICE_TABLE(x86cpu, skx_cpuids); - -static u8 get_src_id(struct skx_dev *d) -{ - u32 reg; - - pci_read_config_dword(d->util_all, 0xF0, ®); - - return GET_BITFIELD(reg, 12, 14); -} - -static u8 skx_get_node_id(struct skx_dev *d) -{ - u32 reg; - - pci_read_config_dword(d->util_all, 0xF4, ®); - - return GET_BITFIELD(reg, 0, 2); -} - -static int get_dimm_attr(u32 reg, int lobit, int hibit, int add, int minval, - int maxval, char *name) -{ - u32 val = GET_BITFIELD(reg, lobit, hibit); - - if (val < minval || val > maxval) { - edac_dbg(2, "bad %s = %d (raw=0x%x)\n", name, val, reg); - return -EINVAL; - } - return val + add; -} - -#define IS_DIMM_PRESENT(mtr) GET_BITFIELD((mtr), 15, 15) -#define IS_NVDIMM_PRESENT(mcddrtcfg, i) GET_BITFIELD((mcddrtcfg), (i), (i)) - -#define numrank(reg) get_dimm_attr((reg), 12, 13, 0, 0, 2, "ranks") -#define numrow(reg) get_dimm_attr((reg), 2, 4, 12, 1, 6, "rows") -#define numcol(reg) get_dimm_attr((reg), 0, 1, 10, 0, 2, "cols") - -static int get_width(u32 mtr) -{ - switch (GET_BITFIELD(mtr, 8, 9)) { - case 0: - return DEV_X4; - case 1: - return DEV_X8; - case 2: - return DEV_X16; - } - return DEV_UNKNOWN; -} - -static int skx_get_hi_lo(void) -{ - struct pci_dev *pdev; - u32 reg; - - pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2034, NULL); - if (!pdev) { - edac_dbg(0, "Can't get tolm/tohm\n"); - return -ENODEV; - } - - pci_read_config_dword(pdev, 0xD0, ®); - skx_tolm = reg; - pci_read_config_dword(pdev, 0xD4, ®); - skx_tohm = reg; - pci_read_config_dword(pdev, 0xD8, ®); - skx_tohm |= (u64)reg << 32; - - pci_dev_put(pdev); - edac_dbg(2, "tolm=0x%llx tohm=0x%llx\n", skx_tolm, skx_tohm); - - return 0; -} - -static int get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm, - struct skx_imc *imc, int chan, int dimmno) -{ - int banks = 16, ranks, rows, cols, npages; - u64 size; - - ranks = numrank(mtr); - rows = numrow(mtr); - cols = numcol(mtr); - - /* - * Compute size in 8-byte (2^3) words, then shift to MiB (2^20) - */ - size = ((1ull << (rows + cols + ranks)) * banks) >> (20 - 3); - npages = MiB_TO_PAGES(size); - - edac_dbg(0, "mc#%d: channel %d, dimm %d, %lld MiB (%d pages) bank: %d, rank: %d, row: 0x%#x, col: 0x%#x\n", - imc->mc, chan, dimmno, size, npages, - banks, 1 << ranks, rows, cols); - - imc->chan[chan].dimms[dimmno].close_pg = GET_BITFIELD(mtr, 0, 0); - imc->chan[chan].dimms[dimmno].bank_xor_enable = GET_BITFIELD(mtr, 9, 9); - imc->chan[chan].dimms[dimmno].fine_grain_bank = GET_BITFIELD(amap, 0, 0); - imc->chan[chan].dimms[dimmno].rowbits = rows; - imc->chan[chan].dimms[dimmno].colbits = cols; - - dimm->nr_pages = npages; - dimm->grain = 32; - dimm->dtype = get_width(mtr); - dimm->mtype = MEM_DDR4; - dimm->edac_mode = EDAC_SECDED; /* likely better than this */ - snprintf(dimm->label, sizeof(dimm->label), "CPU_SrcID#%u_MC#%u_Chan#%u_DIMM#%u", - imc->src_id, imc->lmc, chan, dimmno); - - return 1; -} - -static int get_nvdimm_info(struct dimm_info *dimm, struct skx_imc *imc, - int chan, int dimmno) -{ - int smbios_handle; - u32 dev_handle; - u16 flags; - u64 size = 0; - - nvdimm_count++; - - dev_handle = ACPI_NFIT_BUILD_DEVICE_HANDLE(dimmno, chan, imc->lmc, - imc->src_id, 0); - - smbios_handle = nfit_get_smbios_id(dev_handle, &flags); - if (smbios_handle == -EOPNOTSUPP) { - pr_warn_once(EDAC_MOD_STR ": Can't find size of NVDIMM. Try enabling CONFIG_ACPI_NFIT\n"); - goto unknown_size; - } - - if (smbios_handle < 0) { - skx_printk(KERN_ERR, "Can't find handle for NVDIMM ADR=0x%x\n", dev_handle); - goto unknown_size; - } - - if (flags & ACPI_NFIT_MEM_MAP_FAILED) { - skx_printk(KERN_ERR, "NVDIMM ADR=0x%x is not mapped\n", dev_handle); - goto unknown_size; - } - - size = dmi_memdev_size(smbios_handle); - if (size == ~0ull) - skx_printk(KERN_ERR, "Can't find size for NVDIMM ADR=0x%x/SMBIOS=0x%x\n", - dev_handle, smbios_handle); - -unknown_size: - dimm->nr_pages = size >> PAGE_SHIFT; - dimm->grain = 32; - dimm->dtype = DEV_UNKNOWN; - dimm->mtype = MEM_NVDIMM; - dimm->edac_mode = EDAC_SECDED; /* likely better than this */ - - edac_dbg(0, "mc#%d: channel %d, dimm %d, %llu MiB (%u pages)\n", - imc->mc, chan, dimmno, size >> 20, dimm->nr_pages); - - snprintf(dimm->label, sizeof(dimm->label), "CPU_SrcID#%u_MC#%u_Chan#%u_DIMM#%u", - imc->src_id, imc->lmc, chan, dimmno); - - return (size == 0 || size == ~0ull) ? 0 : 1; -} - -#define SKX_GET_MTMTR(dev, reg) \ - pci_read_config_dword((dev), 0x87c, ®) - -static bool skx_check_ecc(struct pci_dev *pdev) -{ - u32 mtmtr; - - SKX_GET_MTMTR(pdev, mtmtr); - - return !!GET_BITFIELD(mtmtr, 2, 2); -} - -static int skx_get_dimm_config(struct mem_ctl_info *mci) -{ - struct skx_pvt *pvt = mci->pvt_info; - struct skx_imc *imc = pvt->imc; - u32 mtr, amap, mcddrtcfg; - struct dimm_info *dimm; - int i, j; - int ndimms; - - for (i = 0; i < NUM_CHANNELS; i++) { - ndimms = 0; - pci_read_config_dword(imc->chan[i].cdev, 0x8C, &amap); - pci_read_config_dword(imc->chan[i].cdev, 0x400, &mcddrtcfg); - for (j = 0; j < NUM_DIMMS; j++) { - dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, - mci->n_layers, i, j, 0); - pci_read_config_dword(imc->chan[i].cdev, - 0x80 + 4*j, &mtr); - if (IS_DIMM_PRESENT(mtr)) - ndimms += get_dimm_info(mtr, amap, dimm, imc, i, j); - else if (IS_NVDIMM_PRESENT(mcddrtcfg, j)) - ndimms += get_nvdimm_info(dimm, imc, i, j); - } - if (ndimms && !skx_check_ecc(imc->chan[0].cdev)) { - skx_printk(KERN_ERR, "ECC is disabled on imc %d\n", imc->mc); - return -ENODEV; - } - } - - return 0; -} - -static void skx_unregister_mci(struct skx_imc *imc) -{ - struct mem_ctl_info *mci = imc->mci; - - if (!mci) - return; - - edac_dbg(0, "MC%d: mci = %p\n", imc->mc, mci); - - /* Remove MC sysfs nodes */ - edac_mc_del_mc(mci->pdev); - - edac_dbg(1, "%s: free mci struct\n", mci->ctl_name); - kfree(mci->ctl_name); - edac_mc_free(mci); -} - -static int skx_register_mci(struct skx_imc *imc) -{ - struct mem_ctl_info *mci; - struct edac_mc_layer layers[2]; - struct pci_dev *pdev = imc->chan[0].cdev; - struct skx_pvt *pvt; - int rc; - - /* allocate a new MC control structure */ - layers[0].type = EDAC_MC_LAYER_CHANNEL; - layers[0].size = NUM_CHANNELS; - layers[0].is_virt_csrow = false; - layers[1].type = EDAC_MC_LAYER_SLOT; - layers[1].size = NUM_DIMMS; - layers[1].is_virt_csrow = true; - mci = edac_mc_alloc(imc->mc, ARRAY_SIZE(layers), layers, - sizeof(struct skx_pvt)); - - if (unlikely(!mci)) - return -ENOMEM; - - edac_dbg(0, "MC#%d: mci = %p\n", imc->mc, mci); - - /* Associate skx_dev and mci for future usage */ - imc->mci = mci; - pvt = mci->pvt_info; - pvt->imc = imc; - - mci->ctl_name = kasprintf(GFP_KERNEL, "Skylake Socket#%d IMC#%d", - imc->node_id, imc->lmc); - if (!mci->ctl_name) { - rc = -ENOMEM; - goto fail0; - } - - mci->mtype_cap = MEM_FLAG_DDR4 | MEM_FLAG_NVDIMM; - mci->edac_ctl_cap = EDAC_FLAG_NONE; - mci->edac_cap = EDAC_FLAG_NONE; - mci->mod_name = EDAC_MOD_STR; - mci->dev_name = pci_name(imc->chan[0].cdev); - mci->ctl_page_to_phys = NULL; - - rc = skx_get_dimm_config(mci); - if (rc < 0) - goto fail; - - /* record ptr to the generic device */ - mci->pdev = &pdev->dev; - - /* add this new MC control structure to EDAC's list of MCs */ - if (unlikely(edac_mc_add_mc(mci))) { - edac_dbg(0, "MC: failed edac_mc_add_mc()\n"); - rc = -EINVAL; - goto fail; - } - - return 0; - -fail: - kfree(mci->ctl_name); -fail0: - edac_mc_free(mci); - imc->mci = NULL; - return rc; -} - -#define SKX_MAX_SAD 24 - -#define SKX_GET_SAD(d, i, reg) \ - pci_read_config_dword((d)->sad_all, 0x60 + 8 * (i), ®) -#define SKX_GET_ILV(d, i, reg) \ - pci_read_config_dword((d)->sad_all, 0x64 + 8 * (i), ®) - -#define SKX_SAD_MOD3MODE(sad) GET_BITFIELD((sad), 30, 31) -#define SKX_SAD_MOD3(sad) GET_BITFIELD((sad), 27, 27) -#define SKX_SAD_LIMIT(sad) (((u64)GET_BITFIELD((sad), 7, 26) << 26) | MASK26) -#define SKX_SAD_MOD3ASMOD2(sad) GET_BITFIELD((sad), 5, 6) -#define SKX_SAD_ATTR(sad) GET_BITFIELD((sad), 3, 4) -#define SKX_SAD_INTERLEAVE(sad) GET_BITFIELD((sad), 1, 2) -#define SKX_SAD_ENABLE(sad) GET_BITFIELD((sad), 0, 0) - -#define SKX_ILV_REMOTE(tgt) (((tgt) & 8) == 0) -#define SKX_ILV_TARGET(tgt) ((tgt) & 7) - -static bool skx_sad_decode(struct decoded_addr *res) -{ - struct skx_dev *d = list_first_entry(&skx_edac_list, typeof(*d), list); - u64 addr = res->addr; - int i, idx, tgt, lchan, shift; - u32 sad, ilv; - u64 limit, prev_limit; - int remote = 0; - - /* Simple sanity check for I/O space or out of range */ - if (addr >= skx_tohm || (addr >= skx_tolm && addr < BIT_ULL(32))) { - edac_dbg(0, "Address 0x%llx out of range\n", addr); - return false; - } - -restart: - prev_limit = 0; - for (i = 0; i < SKX_MAX_SAD; i++) { - SKX_GET_SAD(d, i, sad); - limit = SKX_SAD_LIMIT(sad); - if (SKX_SAD_ENABLE(sad)) { - if (addr >= prev_limit && addr <= limit) - goto sad_found; - } - prev_limit = limit + 1; - } - edac_dbg(0, "No SAD entry for 0x%llx\n", addr); - return false; - -sad_found: - SKX_GET_ILV(d, i, ilv); - - switch (SKX_SAD_INTERLEAVE(sad)) { - case 0: - idx = GET_BITFIELD(addr, 6, 8); - break; - case 1: - idx = GET_BITFIELD(addr, 8, 10); - break; - case 2: - idx = GET_BITFIELD(addr, 12, 14); - break; - case 3: - idx = GET_BITFIELD(addr, 30, 32); - break; - } - - tgt = GET_BITFIELD(ilv, 4 * idx, 4 * idx + 3); - - /* If point to another node, find it and start over */ - if (SKX_ILV_REMOTE(tgt)) { - if (remote) { - edac_dbg(0, "Double remote!\n"); - return false; - } - remote = 1; - list_for_each_entry(d, &skx_edac_list, list) { - if (d->imc[0].src_id == SKX_ILV_TARGET(tgt)) - goto restart; - } - edac_dbg(0, "Can't find node %d\n", SKX_ILV_TARGET(tgt)); - return false; - } - - if (SKX_SAD_MOD3(sad) == 0) - lchan = SKX_ILV_TARGET(tgt); - else { - switch (SKX_SAD_MOD3MODE(sad)) { - case 0: - shift = 6; - break; - case 1: - shift = 8; - break; - case 2: - shift = 12; - break; - default: - edac_dbg(0, "illegal mod3mode\n"); - return false; - } - switch (SKX_SAD_MOD3ASMOD2(sad)) { - case 0: - lchan = (addr >> shift) % 3; - break; - case 1: - lchan = (addr >> shift) % 2; - break; - case 2: - lchan = (addr >> shift) % 2; - lchan = (lchan << 1) | !lchan; - break; - case 3: - lchan = ((addr >> shift) % 2) << 1; - break; - } - lchan = (lchan << 1) | (SKX_ILV_TARGET(tgt) & 1); - } - - res->dev = d; - res->socket = d->imc[0].src_id; - res->imc = GET_BITFIELD(d->mcroute, lchan * 3, lchan * 3 + 2); - res->channel = GET_BITFIELD(d->mcroute, lchan * 2 + 18, lchan * 2 + 19); - - edac_dbg(2, "0x%llx: socket=%d imc=%d channel=%d\n", - res->addr, res->socket, res->imc, res->channel); - return true; -} - -#define SKX_MAX_TAD 8 - -#define SKX_GET_TADBASE(d, mc, i, reg) \ - pci_read_config_dword((d)->imc[mc].chan[0].cdev, 0x850 + 4 * (i), ®) -#define SKX_GET_TADWAYNESS(d, mc, i, reg) \ - pci_read_config_dword((d)->imc[mc].chan[0].cdev, 0x880 + 4 * (i), ®) -#define SKX_GET_TADCHNILVOFFSET(d, mc, ch, i, reg) \ - pci_read_config_dword((d)->imc[mc].chan[ch].cdev, 0x90 + 4 * (i), ®) - -#define SKX_TAD_BASE(b) ((u64)GET_BITFIELD((b), 12, 31) << 26) -#define SKX_TAD_SKT_GRAN(b) GET_BITFIELD((b), 4, 5) -#define SKX_TAD_CHN_GRAN(b) GET_BITFIELD((b), 6, 7) -#define SKX_TAD_LIMIT(b) (((u64)GET_BITFIELD((b), 12, 31) << 26) | MASK26) -#define SKX_TAD_OFFSET(b) ((u64)GET_BITFIELD((b), 4, 23) << 26) -#define SKX_TAD_SKTWAYS(b) (1 << GET_BITFIELD((b), 10, 11)) -#define SKX_TAD_CHNWAYS(b) (GET_BITFIELD((b), 8, 9) + 1) - -/* which bit used for both socket and channel interleave */ -static int skx_granularity[] = { 6, 8, 12, 30 }; - -static u64 skx_do_interleave(u64 addr, int shift, int ways, u64 lowbits) -{ - addr >>= shift; - addr /= ways; - addr <<= shift; - - return addr | (lowbits & ((1ull << shift) - 1)); -} - -static bool skx_tad_decode(struct decoded_addr *res) -{ - int i; - u32 base, wayness, chnilvoffset; - int skt_interleave_bit, chn_interleave_bit; - u64 channel_addr; - - for (i = 0; i < SKX_MAX_TAD; i++) { - SKX_GET_TADBASE(res->dev, res->imc, i, base); - SKX_GET_TADWAYNESS(res->dev, res->imc, i, wayness); - if (SKX_TAD_BASE(base) <= res->addr && res->addr <= SKX_TAD_LIMIT(wayness)) - goto tad_found; - } - edac_dbg(0, "No TAD entry for 0x%llx\n", res->addr); - return false; - -tad_found: - res->sktways = SKX_TAD_SKTWAYS(wayness); - res->chanways = SKX_TAD_CHNWAYS(wayness); - skt_interleave_bit = skx_granularity[SKX_TAD_SKT_GRAN(base)]; - chn_interleave_bit = skx_granularity[SKX_TAD_CHN_GRAN(base)]; - - SKX_GET_TADCHNILVOFFSET(res->dev, res->imc, res->channel, i, chnilvoffset); - channel_addr = res->addr - SKX_TAD_OFFSET(chnilvoffset); - - if (res->chanways == 3 && skt_interleave_bit > chn_interleave_bit) { - /* Must handle channel first, then socket */ - channel_addr = skx_do_interleave(channel_addr, chn_interleave_bit, - res->chanways, channel_addr); - channel_addr = skx_do_interleave(channel_addr, skt_interleave_bit, - res->sktways, channel_addr); - } else { - /* Handle socket then channel. Preserve low bits from original address */ - channel_addr = skx_do_interleave(channel_addr, skt_interleave_bit, - res->sktways, res->addr); - channel_addr = skx_do_interleave(channel_addr, chn_interleave_bit, - res->chanways, res->addr); - } - - res->chan_addr = channel_addr; - - edac_dbg(2, "0x%llx: chan_addr=0x%llx sktways=%d chanways=%d\n", - res->addr, res->chan_addr, res->sktways, res->chanways); - return true; -} - -#define SKX_MAX_RIR 4 - -#define SKX_GET_RIRWAYNESS(d, mc, ch, i, reg) \ - pci_read_config_dword((d)->imc[mc].chan[ch].cdev, \ - 0x108 + 4 * (i), ®) -#define SKX_GET_RIRILV(d, mc, ch, idx, i, reg) \ - pci_read_config_dword((d)->imc[mc].chan[ch].cdev, \ - 0x120 + 16 * idx + 4 * (i), ®) - -#define SKX_RIR_VALID(b) GET_BITFIELD((b), 31, 31) -#define SKX_RIR_LIMIT(b) (((u64)GET_BITFIELD((b), 1, 11) << 29) | MASK29) -#define SKX_RIR_WAYS(b) (1 << GET_BITFIELD((b), 28, 29)) -#define SKX_RIR_CHAN_RANK(b) GET_BITFIELD((b), 16, 19) -#define SKX_RIR_OFFSET(b) ((u64)(GET_BITFIELD((b), 2, 15) << 26)) - -static bool skx_rir_decode(struct decoded_addr *res) -{ - int i, idx, chan_rank; - int shift; - u32 rirway, rirlv; - u64 rank_addr, prev_limit = 0, limit; - - if (res->dev->imc[res->imc].chan[res->channel].dimms[0].close_pg) - shift = 6; - else - shift = 13; - - for (i = 0; i < SKX_MAX_RIR; i++) { - SKX_GET_RIRWAYNESS(res->dev, res->imc, res->channel, i, rirway); - limit = SKX_RIR_LIMIT(rirway); - if (SKX_RIR_VALID(rirway)) { - if (prev_limit <= res->chan_addr && - res->chan_addr <= limit) - goto rir_found; - } - prev_limit = limit; - } - edac_dbg(0, "No RIR entry for 0x%llx\n", res->addr); - return false; - -rir_found: - rank_addr = res->chan_addr >> shift; - rank_addr /= SKX_RIR_WAYS(rirway); - rank_addr <<= shift; - rank_addr |= res->chan_addr & GENMASK_ULL(shift - 1, 0); - - res->rank_address = rank_addr; - idx = (res->chan_addr >> shift) % SKX_RIR_WAYS(rirway); - - SKX_GET_RIRILV(res->dev, res->imc, res->channel, idx, i, rirlv); - res->rank_address = rank_addr - SKX_RIR_OFFSET(rirlv); - chan_rank = SKX_RIR_CHAN_RANK(rirlv); - res->channel_rank = chan_rank; - res->dimm = chan_rank / 4; - res->rank = chan_rank % 4; - - edac_dbg(2, "0x%llx: dimm=%d rank=%d chan_rank=%d rank_addr=0x%llx\n", - res->addr, res->dimm, res->rank, - res->channel_rank, res->rank_address); - return true; -} - -static u8 skx_close_row[] = { - 15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33 -}; -static u8 skx_close_column[] = { - 3, 4, 5, 14, 19, 23, 24, 25, 26, 27 -}; -static u8 skx_open_row[] = { - 14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33 -}; -static u8 skx_open_column[] = { - 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 -}; -static u8 skx_open_fine_column[] = { - 3, 4, 5, 7, 8, 9, 10, 11, 12, 13 -}; - -static int skx_bits(u64 addr, int nbits, u8 *bits) -{ - int i, res = 0; - - for (i = 0; i < nbits; i++) - res |= ((addr >> bits[i]) & 1) << i; - return res; -} - -static int skx_bank_bits(u64 addr, int b0, int b1, int do_xor, int x0, int x1) -{ - int ret = GET_BITFIELD(addr, b0, b0) | (GET_BITFIELD(addr, b1, b1) << 1); - - if (do_xor) - ret ^= GET_BITFIELD(addr, x0, x0) | (GET_BITFIELD(addr, x1, x1) << 1); - - return ret; -} - -static bool skx_mad_decode(struct decoded_addr *r) -{ - struct skx_dimm *dimm = &r->dev->imc[r->imc].chan[r->channel].dimms[r->dimm]; - int bg0 = dimm->fine_grain_bank ? 6 : 13; - - if (dimm->close_pg) { - r->row = skx_bits(r->rank_address, dimm->rowbits, skx_close_row); - r->column = skx_bits(r->rank_address, dimm->colbits, skx_close_column); - r->column |= 0x400; /* C10 is autoprecharge, always set */ - r->bank_address = skx_bank_bits(r->rank_address, 8, 9, dimm->bank_xor_enable, 22, 28); - r->bank_group = skx_bank_bits(r->rank_address, 6, 7, dimm->bank_xor_enable, 20, 21); - } else { - r->row = skx_bits(r->rank_address, dimm->rowbits, skx_open_row); - if (dimm->fine_grain_bank) - r->column = skx_bits(r->rank_address, dimm->colbits, skx_open_fine_column); - else - r->column = skx_bits(r->rank_address, dimm->colbits, skx_open_column); - r->bank_address = skx_bank_bits(r->rank_address, 18, 19, dimm->bank_xor_enable, 22, 23); - r->bank_group = skx_bank_bits(r->rank_address, bg0, 17, dimm->bank_xor_enable, 20, 21); - } - r->row &= (1u << dimm->rowbits) - 1; - - edac_dbg(2, "0x%llx: row=0x%x col=0x%x bank_addr=%d bank_group=%d\n", - r->addr, r->row, r->column, r->bank_address, - r->bank_group); - return true; -} - -static bool skx_decode(struct decoded_addr *res) -{ - - return skx_sad_decode(res) && skx_tad_decode(res) && - skx_rir_decode(res) && skx_mad_decode(res); -} - -static bool skx_adxl_decode(struct decoded_addr *res) - -{ - int i, len = 0; - - if (res->addr >= skx_tohm || (res->addr >= skx_tolm && - res->addr < BIT_ULL(32))) { - edac_dbg(0, "Address 0x%llx out of range\n", res->addr); - return false; - } - - if (adxl_decode(res->addr, adxl_values)) { - edac_dbg(0, "Failed to decode 0x%llx\n", res->addr); - return false; - } - - res->socket = (int)adxl_values[component_indices[INDEX_SOCKET]]; - res->imc = (int)adxl_values[component_indices[INDEX_MEMCTRL]]; - res->channel = (int)adxl_values[component_indices[INDEX_CHANNEL]]; - res->dimm = (int)adxl_values[component_indices[INDEX_DIMM]]; - - for (i = 0; i < adxl_component_count; i++) { - if (adxl_values[i] == ~0x0ull) - continue; - - len += snprintf(adxl_msg + len, MSG_SIZE - len, " %s:0x%llx", - adxl_component_names[i], adxl_values[i]); - if (MSG_SIZE - len <= 0) - break; - } - - return true; -} - -static void skx_mce_output_error(struct mem_ctl_info *mci, - const struct mce *m, - struct decoded_addr *res) -{ - enum hw_event_mc_err_type tp_event; - char *type, *optype; - bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0); - bool overflow = GET_BITFIELD(m->status, 62, 62); - bool uncorrected_error = GET_BITFIELD(m->status, 61, 61); - bool recoverable; - u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52); - u32 mscod = GET_BITFIELD(m->status, 16, 31); - u32 errcode = GET_BITFIELD(m->status, 0, 15); - u32 optypenum = GET_BITFIELD(m->status, 4, 6); - - recoverable = GET_BITFIELD(m->status, 56, 56); - - if (uncorrected_error) { - core_err_cnt = 1; - if (ripv) { - type = "FATAL"; - tp_event = HW_EVENT_ERR_FATAL; - } else { - type = "NON_FATAL"; - tp_event = HW_EVENT_ERR_UNCORRECTED; - } - } else { - type = "CORRECTED"; - tp_event = HW_EVENT_ERR_CORRECTED; - } - - /* - * According with Table 15-9 of the Intel Architecture spec vol 3A, - * memory errors should fit in this mask: - * 000f 0000 1mmm cccc (binary) - * where: - * f = Correction Report Filtering Bit. If 1, subsequent errors - * won't be shown - * mmm = error type - * cccc = channel - * If the mask doesn't match, report an error to the parsing logic - */ - if (!((errcode & 0xef80) == 0x80)) { - optype = "Can't parse: it is not a mem"; - } else { - switch (optypenum) { - case 0: - optype = "generic undef request error"; - break; - case 1: - optype = "memory read error"; - break; - case 2: - optype = "memory write error"; - break; - case 3: - optype = "addr/cmd error"; - break; - case 4: - optype = "memory scrubbing error"; - break; - default: - optype = "reserved"; - break; - } - } - if (adxl_component_count) { - snprintf(skx_msg, MSG_SIZE, "%s%s err_code:0x%04x:0x%04x %s", - overflow ? " OVERFLOW" : "", - (uncorrected_error && recoverable) ? " recoverable" : "", - mscod, errcode, adxl_msg); - } else { - snprintf(skx_msg, MSG_SIZE, - "%s%s err_code:0x%04x:0x%04x socket:%d imc:%d rank:%d bg:%d ba:%d row:0x%x col:0x%x", - overflow ? " OVERFLOW" : "", - (uncorrected_error && recoverable) ? " recoverable" : "", - mscod, errcode, - res->socket, res->imc, res->rank, - res->bank_group, res->bank_address, res->row, res->column); - } - - edac_dbg(0, "%s\n", skx_msg); - - /* Call the helper to output message */ - edac_mc_handle_error(tp_event, mci, core_err_cnt, - m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0, - res->channel, res->dimm, -1, - optype, skx_msg); -} - -static struct mem_ctl_info *get_mci(int src_id, int lmc) -{ - struct skx_dev *d; - - if (lmc > NUM_IMC - 1) { - skx_printk(KERN_ERR, "Bad lmc %d\n", lmc); - return NULL; - } - - list_for_each_entry(d, &skx_edac_list, list) { - if (d->imc[0].src_id == src_id) - return d->imc[lmc].mci; - } - - skx_printk(KERN_ERR, "No mci for src_id %d lmc %d\n", src_id, lmc); - - return NULL; -} - -static int skx_mce_check_error(struct notifier_block *nb, unsigned long val, - void *data) -{ - struct mce *mce = (struct mce *)data; - struct decoded_addr res; - struct mem_ctl_info *mci; - char *type; - - if (edac_get_report_status() == EDAC_REPORTING_DISABLED) - return NOTIFY_DONE; - - /* ignore unless this is memory related with an address */ - if ((mce->status & 0xefff) >> 7 != 1 || !(mce->status & MCI_STATUS_ADDRV)) - return NOTIFY_DONE; - - memset(&res, 0, sizeof(res)); - res.addr = mce->addr; - - if (adxl_component_count) { - if (!skx_adxl_decode(&res)) - return NOTIFY_DONE; - - mci = get_mci(res.socket, res.imc); - } else { - if (!skx_decode(&res)) - return NOTIFY_DONE; - - mci = res.dev->imc[res.imc].mci; - } - - if (!mci) - return NOTIFY_DONE; - - if (mce->mcgstatus & MCG_STATUS_MCIP) - type = "Exception"; - else - type = "Event"; - - skx_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n"); - - skx_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: 0x%llx " - "Bank %d: %016Lx\n", mce->extcpu, type, - mce->mcgstatus, mce->bank, mce->status); - skx_mc_printk(mci, KERN_DEBUG, "TSC 0x%llx ", mce->tsc); - skx_mc_printk(mci, KERN_DEBUG, "ADDR 0x%llx ", mce->addr); - skx_mc_printk(mci, KERN_DEBUG, "MISC 0x%llx ", mce->misc); - - skx_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:0x%x TIME %llu SOCKET " - "%u APIC 0x%x\n", mce->cpuvendor, mce->cpuid, - mce->time, mce->socketid, mce->apicid); - - skx_mce_output_error(mci, mce, &res); - - return NOTIFY_DONE; -} - -static struct notifier_block skx_mce_dec = { - .notifier_call = skx_mce_check_error, - .priority = MCE_PRIO_EDAC, -}; - -#ifdef CONFIG_EDAC_DEBUG -/* - * Debug feature. - * Exercise the address decode logic by writing an address to - * /sys/kernel/debug/edac/skx_test/addr. - */ -static struct dentry *skx_test; - -static int debugfs_u64_set(void *data, u64 val) -{ - struct mce m; - - pr_warn_once("Fake error to 0x%llx injected via debugfs\n", val); - - memset(&m, 0, sizeof(m)); - /* ADDRV + MemRd + Unknown channel */ - m.status = MCI_STATUS_ADDRV + 0x90; - /* One corrected error */ - m.status |= BIT_ULL(MCI_STATUS_CEC_SHIFT); - m.addr = val; - skx_mce_check_error(NULL, 0, &m); - - return 0; -} -DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n"); - -static void setup_skx_debug(void) -{ - skx_test = edac_debugfs_create_dir("skx_test"); - if (!skx_test) - return; - - if (!edac_debugfs_create_file("addr", 0200, skx_test, - NULL, &fops_u64_wo)) { - debugfs_remove(skx_test); - skx_test = NULL; - } -} - -static void teardown_skx_debug(void) -{ - debugfs_remove_recursive(skx_test); -} -#else -static void setup_skx_debug(void) {} -static void teardown_skx_debug(void) {} -#endif /*CONFIG_EDAC_DEBUG*/ - -static void skx_remove(void) -{ - int i, j; - struct skx_dev *d, *tmp; - - edac_dbg(0, "\n"); - - list_for_each_entry_safe(d, tmp, &skx_edac_list, list) { - list_del(&d->list); - for (i = 0; i < NUM_IMC; i++) { - skx_unregister_mci(&d->imc[i]); - for (j = 0; j < NUM_CHANNELS; j++) - pci_dev_put(d->imc[i].chan[j].cdev); - } - pci_dev_put(d->util_all); - pci_dev_put(d->sad_all); - - kfree(d); - } -} - -static void __init skx_adxl_get(void) -{ - const char * const *names; - int i, j; - - names = adxl_get_component_names(); - if (!names) { - skx_printk(KERN_NOTICE, "No firmware support for address translation."); - skx_printk(KERN_CONT, " Only decoding DDR4 address!\n"); - return; - } - - for (i = 0; i < INDEX_MAX; i++) { - for (j = 0; names[j]; j++) { - if (!strcmp(component_names[i], names[j])) { - component_indices[i] = j; - break; - } - } - - if (!names[j]) - goto err; - } - - adxl_component_names = names; - while (*names++) - adxl_component_count++; - - adxl_values = kcalloc(adxl_component_count, sizeof(*adxl_values), - GFP_KERNEL); - if (!adxl_values) { - adxl_component_count = 0; - return; - } - - adxl_msg = kzalloc(MSG_SIZE, GFP_KERNEL); - if (!adxl_msg) { - adxl_component_count = 0; - kfree(adxl_values); - } - - return; -err: - skx_printk(KERN_ERR, "'%s' is not matched from DSM parameters: ", - component_names[i]); - for (j = 0; names[j]; j++) - skx_printk(KERN_CONT, "%s ", names[j]); - skx_printk(KERN_CONT, "\n"); -} - -static void __exit skx_adxl_put(void) -{ - kfree(adxl_values); - kfree(adxl_msg); -} - -/* - * skx_init: - * make sure we are running on the correct cpu model - * search for all the devices we need - * check which DIMMs are present. - */ -static int __init skx_init(void) -{ - const struct x86_cpu_id *id; - const struct munit *m; - const char *owner; - int rc = 0, i; - u8 mc = 0, src_id, node_id; - struct skx_dev *d; - - edac_dbg(2, "\n"); - - owner = edac_get_owner(); - if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR))) - return -EBUSY; - - id = x86_match_cpu(skx_cpuids); - if (!id) - return -ENODEV; - - rc = skx_get_hi_lo(); - if (rc) - return rc; - - rc = get_all_bus_mappings(); - if (rc < 0) - goto fail; - if (rc == 0) { - edac_dbg(2, "No memory controllers found\n"); - return -ENODEV; - } - - for (m = skx_all_munits; m->did; m++) { - rc = get_all_munits(m); - if (rc < 0) - goto fail; - if (rc != m->per_socket * skx_num_sockets) { - edac_dbg(2, "Expected %d, got %d of 0x%x\n", - m->per_socket * skx_num_sockets, rc, m->did); - rc = -ENODEV; - goto fail; - } - } - - list_for_each_entry(d, &skx_edac_list, list) { - src_id = get_src_id(d); - node_id = skx_get_node_id(d); - edac_dbg(2, "src_id=%d node_id=%d\n", src_id, node_id); - for (i = 0; i < NUM_IMC; i++) { - d->imc[i].mc = mc++; - d->imc[i].lmc = i; - d->imc[i].src_id = src_id; - d->imc[i].node_id = node_id; - rc = skx_register_mci(&d->imc[i]); - if (rc < 0) - goto fail; - } - } - - skx_msg = kzalloc(MSG_SIZE, GFP_KERNEL); - if (!skx_msg) { - rc = -ENOMEM; - goto fail; - } - - if (nvdimm_count) - skx_adxl_get(); - - /* Ensure that the OPSTATE is set correctly for POLL or NMI */ - opstate_init(); - - setup_skx_debug(); - - mce_register_decode_chain(&skx_mce_dec); - - return 0; -fail: - skx_remove(); - return rc; -} - -static void __exit skx_exit(void) -{ - edac_dbg(2, "\n"); - mce_unregister_decode_chain(&skx_mce_dec); - teardown_skx_debug(); - if (nvdimm_count) - skx_adxl_put(); - kfree(skx_msg); - skx_remove(); -} - -module_init(skx_init); -module_exit(skx_exit); - -module_param(edac_op_state, int, 0444); -MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); - -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Tony Luck"); -MODULE_DESCRIPTION("MC Driver for Intel Skylake server processors"); diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig index de15bf55895b..8e17149655f0 100644 --- a/drivers/extcon/Kconfig +++ b/drivers/extcon/Kconfig @@ -114,6 +114,14 @@ config EXTCON_PALMAS Say Y here to enable support for USB peripheral and USB host detection by palmas usb. +config EXTCON_PTN5150 + tristate "NXP PTN5150 CC LOGIC USB EXTCON support" + depends on I2C && GPIOLIB || COMPILE_TEST + select REGMAP_I2C + help + Say Y here to enable support for USB peripheral and USB host + detection by NXP PTN5150 CC (Configuration Channel) logic chip. + config EXTCON_QCOM_SPMI_MISC tristate "Qualcomm USB extcon support" depends on ARCH_QCOM || COMPILE_TEST diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile index 0888fdeded72..261ce4cfe209 100644 --- a/drivers/extcon/Makefile +++ b/drivers/extcon/Makefile @@ -17,6 +17,7 @@ obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o obj-$(CONFIG_EXTCON_MAX77843) += extcon-max77843.o obj-$(CONFIG_EXTCON_MAX8997) += extcon-max8997.o obj-$(CONFIG_EXTCON_PALMAS) += extcon-palmas.o +obj-$(CONFIG_EXTCON_PTN5150) += extcon-ptn5150.o obj-$(CONFIG_EXTCON_QCOM_SPMI_MISC) += extcon-qcom-spmi-misc.o obj-$(CONFIG_EXTCON_RT8973A) += extcon-rt8973a.o obj-$(CONFIG_EXTCON_SM5502) += extcon-sm5502.o diff --git a/drivers/extcon/extcon-ptn5150.c b/drivers/extcon/extcon-ptn5150.c new file mode 100644 index 000000000000..d1c997599390 --- /dev/null +++ b/drivers/extcon/extcon-ptn5150.c @@ -0,0 +1,339 @@ +// SPDX-License-Identifier: GPL-2.0+ +// +// extcon-ptn5150.c - PTN5150 CC logic extcon driver to support USB detection +// +// Based on extcon-sm5502.c driver +// Copyright (c) 2018-2019 by Vijai Kumar K +// Author: Vijai Kumar K + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* PTN5150 registers */ +enum ptn5150_reg { + PTN5150_REG_DEVICE_ID = 0x01, + PTN5150_REG_CONTROL, + PTN5150_REG_INT_STATUS, + PTN5150_REG_CC_STATUS, + PTN5150_REG_CON_DET = 0x09, + PTN5150_REG_VCONN_STATUS, + PTN5150_REG_RESET, + PTN5150_REG_INT_MASK = 0x18, + PTN5150_REG_INT_REG_STATUS, + PTN5150_REG_END, +}; + +#define PTN5150_DFP_ATTACHED 0x1 +#define PTN5150_UFP_ATTACHED 0x2 + +/* Define PTN5150 MASK/SHIFT constant */ +#define PTN5150_REG_DEVICE_ID_VENDOR_SHIFT 0 +#define PTN5150_REG_DEVICE_ID_VENDOR_MASK \ + (0x3 << PTN5150_REG_DEVICE_ID_VENDOR_SHIFT) + +#define PTN5150_REG_DEVICE_ID_VERSION_SHIFT 3 +#define PTN5150_REG_DEVICE_ID_VERSION_MASK \ + (0x1f << PTN5150_REG_DEVICE_ID_VERSION_SHIFT) + +#define PTN5150_REG_CC_PORT_ATTACHMENT_SHIFT 2 +#define PTN5150_REG_CC_PORT_ATTACHMENT_MASK \ + (0x7 << PTN5150_REG_CC_PORT_ATTACHMENT_SHIFT) + +#define PTN5150_REG_CC_VBUS_DETECTION_SHIFT 7 +#define PTN5150_REG_CC_VBUS_DETECTION_MASK \ + (0x1 << PTN5150_REG_CC_VBUS_DETECTION_SHIFT) + +#define PTN5150_REG_INT_CABLE_ATTACH_SHIFT 0 +#define PTN5150_REG_INT_CABLE_ATTACH_MASK \ + (0x1 << PTN5150_REG_INT_CABLE_ATTACH_SHIFT) + +#define PTN5150_REG_INT_CABLE_DETACH_SHIFT 1 +#define PTN5150_REG_INT_CABLE_DETACH_MASK \ + (0x1 << PTN5150_REG_CC_CABLE_DETACH_SHIFT) + +struct ptn5150_info { + struct device *dev; + struct extcon_dev *edev; + struct i2c_client *i2c; + struct regmap *regmap; + struct gpio_desc *int_gpiod; + struct gpio_desc *vbus_gpiod; + int irq; + struct work_struct irq_work; + struct mutex mutex; +}; + +/* List of detectable cables */ +static const unsigned int ptn5150_extcon_cable[] = { + EXTCON_USB, + EXTCON_USB_HOST, + EXTCON_NONE, +}; + +static const struct regmap_config ptn5150_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = PTN5150_REG_END, +}; + +static void ptn5150_irq_work(struct work_struct *work) +{ + struct ptn5150_info *info = container_of(work, + struct ptn5150_info, irq_work); + int ret = 0; + unsigned int reg_data; + unsigned int int_status; + + if (!info->edev) + return; + + mutex_lock(&info->mutex); + + ret = regmap_read(info->regmap, PTN5150_REG_CC_STATUS, ®_data); + if (ret) { + dev_err(info->dev, "failed to read CC STATUS %d\n", ret); + mutex_unlock(&info->mutex); + return; + } + + /* Clear interrupt. Read would clear the register */ + ret = regmap_read(info->regmap, PTN5150_REG_INT_STATUS, &int_status); + if (ret) { + dev_err(info->dev, "failed to read INT STATUS %d\n", ret); + mutex_unlock(&info->mutex); + return; + } + + if (int_status) { + unsigned int cable_attach; + + cable_attach = int_status & PTN5150_REG_INT_CABLE_ATTACH_MASK; + if (cable_attach) { + unsigned int port_status; + unsigned int vbus; + + port_status = ((reg_data & + PTN5150_REG_CC_PORT_ATTACHMENT_MASK) >> + PTN5150_REG_CC_PORT_ATTACHMENT_SHIFT); + + switch (port_status) { + case PTN5150_DFP_ATTACHED: + extcon_set_state_sync(info->edev, + EXTCON_USB_HOST, false); + gpiod_set_value(info->vbus_gpiod, 0); + extcon_set_state_sync(info->edev, EXTCON_USB, + true); + break; + case PTN5150_UFP_ATTACHED: + extcon_set_state_sync(info->edev, EXTCON_USB, + false); + vbus = ((reg_data & + PTN5150_REG_CC_VBUS_DETECTION_MASK) >> + PTN5150_REG_CC_VBUS_DETECTION_SHIFT); + if (vbus) + gpiod_set_value(info->vbus_gpiod, 0); + else + gpiod_set_value(info->vbus_gpiod, 1); + + extcon_set_state_sync(info->edev, + EXTCON_USB_HOST, true); + break; + default: + dev_err(info->dev, + "Unknown Port status : %x\n", + port_status); + break; + } + } else { + extcon_set_state_sync(info->edev, + EXTCON_USB_HOST, false); + extcon_set_state_sync(info->edev, + EXTCON_USB, false); + gpiod_set_value(info->vbus_gpiod, 0); + } + } + + /* Clear interrupt. Read would clear the register */ + ret = regmap_read(info->regmap, PTN5150_REG_INT_REG_STATUS, + &int_status); + if (ret) { + dev_err(info->dev, + "failed to read INT REG STATUS %d\n", ret); + mutex_unlock(&info->mutex); + return; + } + + mutex_unlock(&info->mutex); +} + + +static irqreturn_t ptn5150_irq_handler(int irq, void *data) +{ + struct ptn5150_info *info = data; + + schedule_work(&info->irq_work); + + return IRQ_HANDLED; +} + +static int ptn5150_init_dev_type(struct ptn5150_info *info) +{ + unsigned int reg_data, vendor_id, version_id; + int ret; + + ret = regmap_read(info->regmap, PTN5150_REG_DEVICE_ID, ®_data); + if (ret) { + dev_err(info->dev, "failed to read DEVICE_ID %d\n", ret); + return -EINVAL; + } + + vendor_id = ((reg_data & PTN5150_REG_DEVICE_ID_VENDOR_MASK) >> + PTN5150_REG_DEVICE_ID_VENDOR_SHIFT); + version_id = ((reg_data & PTN5150_REG_DEVICE_ID_VERSION_MASK) >> + PTN5150_REG_DEVICE_ID_VERSION_SHIFT); + + dev_info(info->dev, "Device type: version: 0x%x, vendor: 0x%x\n", + version_id, vendor_id); + + /* Clear any existing interrupts */ + ret = regmap_read(info->regmap, PTN5150_REG_INT_STATUS, ®_data); + if (ret) { + dev_err(info->dev, + "failed to read PTN5150_REG_INT_STATUS %d\n", + ret); + return -EINVAL; + } + + ret = regmap_read(info->regmap, PTN5150_REG_INT_REG_STATUS, ®_data); + if (ret) { + dev_err(info->dev, + "failed to read PTN5150_REG_INT_REG_STATUS %d\n", ret); + return -EINVAL; + } + + return 0; +} + +static int ptn5150_i2c_probe(struct i2c_client *i2c, + const struct i2c_device_id *id) +{ + struct device *dev = &i2c->dev; + struct device_node *np = i2c->dev.of_node; + struct ptn5150_info *info; + int ret; + + if (!np) + return -EINVAL; + + info = devm_kzalloc(&i2c->dev, sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + i2c_set_clientdata(i2c, info); + + info->dev = &i2c->dev; + info->i2c = i2c; + info->int_gpiod = devm_gpiod_get(&i2c->dev, "int", GPIOD_IN); + if (IS_ERR(info->int_gpiod)) { + dev_err(dev, "failed to get INT GPIO\n"); + return PTR_ERR(info->int_gpiod); + } + info->vbus_gpiod = devm_gpiod_get(&i2c->dev, "vbus", GPIOD_IN); + if (IS_ERR(info->vbus_gpiod)) { + dev_err(dev, "failed to get VBUS GPIO\n"); + return PTR_ERR(info->vbus_gpiod); + } + ret = gpiod_direction_output(info->vbus_gpiod, 0); + if (ret) { + dev_err(dev, "failed to set VBUS GPIO direction\n"); + return -EINVAL; + } + + mutex_init(&info->mutex); + + INIT_WORK(&info->irq_work, ptn5150_irq_work); + + info->regmap = devm_regmap_init_i2c(i2c, &ptn5150_regmap_config); + if (IS_ERR(info->regmap)) { + ret = PTR_ERR(info->regmap); + dev_err(info->dev, "failed to allocate register map: %d\n", + ret); + return ret; + } + + if (info->int_gpiod) { + info->irq = gpiod_to_irq(info->int_gpiod); + if (info->irq < 0) { + dev_err(dev, "failed to get INTB IRQ\n"); + return info->irq; + } + + ret = devm_request_threaded_irq(dev, info->irq, NULL, + ptn5150_irq_handler, + IRQF_TRIGGER_FALLING | + IRQF_ONESHOT, + i2c->name, info); + if (ret < 0) { + dev_err(dev, "failed to request handler for INTB IRQ\n"); + return ret; + } + } + + /* Allocate extcon device */ + info->edev = devm_extcon_dev_allocate(info->dev, ptn5150_extcon_cable); + if (IS_ERR(info->edev)) { + dev_err(info->dev, "failed to allocate memory for extcon\n"); + return -ENOMEM; + } + + /* Register extcon device */ + ret = devm_extcon_dev_register(info->dev, info->edev); + if (ret) { + dev_err(info->dev, "failed to register extcon device\n"); + return ret; + } + + /* Initialize PTN5150 device and print vendor id and version id */ + ret = ptn5150_init_dev_type(info); + if (ret) + return -EINVAL; + + return 0; +} + +static const struct of_device_id ptn5150_dt_match[] = { + { .compatible = "nxp,ptn5150" }, + { }, +}; +MODULE_DEVICE_TABLE(of, ptn5150_dt_match); + +static const struct i2c_device_id ptn5150_i2c_id[] = { + { "ptn5150", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, ptn5150_i2c_id); + +static struct i2c_driver ptn5150_i2c_driver = { + .driver = { + .name = "ptn5150", + .of_match_table = ptn5150_dt_match, + }, + .probe = ptn5150_i2c_probe, + .id_table = ptn5150_i2c_id, +}; + +static int __init ptn5150_i2c_init(void) +{ + return i2c_add_driver(&ptn5150_i2c_driver); +} +subsys_initcall(ptn5150_i2c_init); + +MODULE_DESCRIPTION("NXP PTN5150 CC logic Extcon driver"); +MODULE_AUTHOR("Vijai Kumar K "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index f754578414f0..cac16c4b0df3 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -218,7 +218,7 @@ config FW_CFG_SYSFS_CMDLINE config INTEL_STRATIX10_SERVICE tristate "Intel Stratix10 Service Layer" - depends on HAVE_ARM_SMCCC + depends on ARCH_STRATIX10 && HAVE_ARM_SMCCC default n help Intel Stratix10 service layer runs at privileged exception level, diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c index 472c88ae1c0f..92f843eaf1e0 100644 --- a/drivers/firmware/arm_scmi/bus.c +++ b/drivers/firmware/arm_scmi/bus.c @@ -119,6 +119,11 @@ void scmi_driver_unregister(struct scmi_driver *driver) } EXPORT_SYMBOL_GPL(scmi_driver_unregister); +static void scmi_device_release(struct device *dev) +{ + kfree(to_scmi_dev(dev)); +} + struct scmi_device * scmi_device_create(struct device_node *np, struct device *parent, int protocol) { @@ -138,6 +143,7 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol) scmi_dev->dev.parent = parent; scmi_dev->dev.of_node = np; scmi_dev->dev.bus = &scmi_bus_type; + scmi_dev->dev.release = scmi_device_release; dev_set_name(&scmi_dev->dev, "scmi_dev.%d", id); retval = device_register(&scmi_dev->dev); @@ -156,9 +162,8 @@ free_mem: void scmi_device_destroy(struct scmi_device *scmi_dev) { scmi_handle_put(scmi_dev->handle); - device_unregister(&scmi_dev->dev); ida_simple_remove(&scmi_bus_id, scmi_dev->id); - kfree(scmi_dev); + device_unregister(&scmi_dev->dev); } void scmi_set_handle(struct scmi_device *scmi_dev) diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c index c64c7da73829..e6376f985ef7 100644 --- a/drivers/firmware/arm_sdei.c +++ b/drivers/firmware/arm_sdei.c @@ -2,6 +2,7 @@ // Copyright (C) 2017 Arm Ltd. #define pr_fmt(fmt) "sdei: " fmt +#include #include #include #include @@ -887,6 +888,73 @@ static void sdei_smccc_hvc(unsigned long function_id, arm_smccc_hvc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res); } +int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb, + sdei_event_callback *critical_cb) +{ + int err; + u64 result; + u32 event_num; + sdei_event_callback *cb; + + if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES)) + return -EOPNOTSUPP; + + event_num = ghes->generic->notify.vector; + if (event_num == 0) { + /* + * Event 0 is reserved by the specification for + * SDEI_EVENT_SIGNAL. + */ + return -EINVAL; + } + + err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY, + &result); + if (err) + return err; + + if (result == SDEI_EVENT_PRIORITY_CRITICAL) + cb = critical_cb; + else + cb = normal_cb; + + err = sdei_event_register(event_num, cb, ghes); + if (!err) + err = sdei_event_enable(event_num); + + return err; +} + +int sdei_unregister_ghes(struct ghes *ghes) +{ + int i; + int err; + u32 event_num = ghes->generic->notify.vector; + + might_sleep(); + + if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES)) + return -EOPNOTSUPP; + + /* + * The event may be running on another CPU. Disable it + * to stop new events, then try to unregister a few times. + */ + err = sdei_event_disable(event_num); + if (err) + return err; + + for (i = 0; i < 3; i++) { + err = sdei_event_unregister(event_num); + if (err != -EINPROGRESS) + break; + + schedule(); + } + + return err; +} + static int sdei_get_conduit(struct platform_device *pdev) { const char *method; diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index 89110dfc7127..190be0b1d109 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -198,3 +198,9 @@ config EFI_DEV_PATH_PARSER bool depends on ACPI default n + +config EFI_EARLYCON + def_bool y + depends on SERIAL_EARLYCON && !ARM && !IA64 + select FONT_SUPPORT + select ARCH_USE_MEMREMAP_PROT diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile index 5f9f5039de50..d2d0d2030620 100644 --- a/drivers/firmware/efi/Makefile +++ b/drivers/firmware/efi/Makefile @@ -30,5 +30,6 @@ arm-obj-$(CONFIG_EFI) := arm-init.o arm-runtime.o obj-$(CONFIG_ARM) += $(arm-obj-y) obj-$(CONFIG_ARM64) += $(arm-obj-y) obj-$(CONFIG_EFI_CAPSULE_LOADER) += capsule-loader.o +obj-$(CONFIG_EFI_EARLYCON) += earlycon.o obj-$(CONFIG_UEFI_CPER_ARM) += cper-arm.o obj-$(CONFIG_UEFI_CPER_X86) += cper-x86.o diff --git a/drivers/firmware/efi/apple-properties.c b/drivers/firmware/efi/apple-properties.c index ac1654f74dc7..0e206c9e0d7a 100644 --- a/drivers/firmware/efi/apple-properties.c +++ b/drivers/firmware/efi/apple-properties.c @@ -1,19 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 /* * apple-properties.c - EFI device properties on Macs * Copyright (C) 2016 Lukas Wunner * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License (version 2) as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see . - * * Note, all properties are considered as u8 arrays. * To get a value of any of them the caller must use device_property_read_u8_array(). */ diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c index 1a6a77df8a5e..311cd349a862 100644 --- a/drivers/firmware/efi/arm-init.c +++ b/drivers/firmware/efi/arm-init.c @@ -1,14 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Extensible Firmware Interface * * Based on Extensible Firmware Interface Specification version 2.4 * * Copyright (C) 2013 - 2015 Linaro Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * */ #define pr_fmt(fmt) "efi: " fmt diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c index 23ea1ed409d1..0c1af675c338 100644 --- a/drivers/firmware/efi/arm-runtime.c +++ b/drivers/firmware/efi/arm-runtime.c @@ -1,14 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Extensible Firmware Interface * * Based on Extensible Firmware Interface Specification version 2.4 * * Copyright (C) 2013, 2014 Linaro Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * */ #include @@ -37,18 +33,19 @@ extern u64 efi_system_table; static struct ptdump_info efi_ptdump_info = { .mm = &efi_mm, .markers = (struct addr_marker[]){ - { 0, "UEFI runtime start" }, - { DEFAULT_MAP_WINDOW_64, "UEFI runtime end" } + { 0, "UEFI runtime start" }, + { DEFAULT_MAP_WINDOW_64, "UEFI runtime end" }, + { -1, NULL } }, .base_addr = 0, }; static int __init ptdump_init(void) { - if (!efi_enabled(EFI_RUNTIME_SERVICES)) - return 0; + if (efi_enabled(EFI_RUNTIME_SERVICES)) + ptdump_debugfs_register(&efi_ptdump_info, "efi_page_tables"); - return ptdump_debugfs_register(&efi_ptdump_info, "efi_page_tables"); + return 0; } device_initcall(ptdump_init); diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c index 96688986da56..b1395133389e 100644 --- a/drivers/firmware/efi/capsule-loader.c +++ b/drivers/firmware/efi/capsule-loader.c @@ -1,10 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 /* * EFI capsule loader driver. * * Copyright 2015 Intel Corporation - * - * This file is part of the Linux kernel, and is made available under - * the terms of the GNU General Public License version 2. */ #define pr_fmt(fmt) "efi: " fmt diff --git a/drivers/firmware/efi/capsule.c b/drivers/firmware/efi/capsule.c index 4938c29b7c5d..598b7800d14e 100644 --- a/drivers/firmware/efi/capsule.c +++ b/drivers/firmware/efi/capsule.c @@ -1,10 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 /* * EFI capsule support. * * Copyright 2013 Intel Corporation; author Matt Fleming - * - * This file is part of the Linux kernel, and is made available under - * the terms of the GNU General Public License version 2. */ #define pr_fmt(fmt) "efi: " fmt diff --git a/drivers/firmware/efi/cper-arm.c b/drivers/firmware/efi/cper-arm.c index 502811344e81..36d3b8b9da47 100644 --- a/drivers/firmware/efi/cper-arm.c +++ b/drivers/firmware/efi/cper-arm.c @@ -1,20 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 /* * UEFI Common Platform Error Record (CPER) support * * Copyright (C) 2017, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index a7902fccdcfa..8fa977c7861f 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * UEFI Common Platform Error Record (CPER) support * @@ -9,19 +10,6 @@ * * For more information about CPER, please refer to Appendix N of UEFI * Specification version 2.4. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include @@ -546,19 +534,24 @@ EXPORT_SYMBOL_GPL(cper_estatus_check_header); int cper_estatus_check(const struct acpi_hest_generic_status *estatus) { struct acpi_hest_generic_data *gdata; - unsigned int data_len, gedata_len; + unsigned int data_len, record_size; int rc; rc = cper_estatus_check_header(estatus); if (rc) return rc; + data_len = estatus->data_length; apei_estatus_for_each_section(estatus, gdata) { - gedata_len = acpi_hest_get_error_length(gdata); - if (gedata_len > data_len - acpi_hest_get_size(gdata)) + if (sizeof(struct acpi_hest_generic_data) > data_len) return -EINVAL; - data_len -= acpi_hest_get_record_size(gdata); + + record_size = acpi_hest_get_record_size(gdata); + if (record_size > data_len) + return -EINVAL; + + data_len -= record_size; } if (data_len) return -EINVAL; diff --git a/drivers/firmware/efi/dev-path-parser.c b/drivers/firmware/efi/dev-path-parser.c index 85d1834ee9b7..85ec99f97841 100644 --- a/drivers/firmware/efi/dev-path-parser.c +++ b/drivers/firmware/efi/dev-path-parser.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * dev-path-parser.c - EFI Device Path parser * Copyright (C) 2016 Lukas Wunner @@ -5,14 +6,6 @@ * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License (version 2) as * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see . */ #include diff --git a/drivers/firmware/efi/earlycon.c b/drivers/firmware/efi/earlycon.c new file mode 100644 index 000000000000..c9a0efca17b0 --- /dev/null +++ b/drivers/firmware/efi/earlycon.c @@ -0,0 +1,206 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2013 Intel Corporation; author Matt Fleming + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +static const struct font_desc *font; +static u32 efi_x, efi_y; +static u64 fb_base; +static pgprot_t fb_prot; + +static __ref void *efi_earlycon_map(unsigned long start, unsigned long len) +{ + return early_memremap_prot(fb_base + start, len, pgprot_val(fb_prot)); +} + +static __ref void efi_earlycon_unmap(void *addr, unsigned long len) +{ + early_memunmap(addr, len); +} + +static void efi_earlycon_clear_scanline(unsigned int y) +{ + unsigned long *dst; + u16 len; + + len = screen_info.lfb_linelength; + dst = efi_earlycon_map(y*len, len); + if (!dst) + return; + + memset(dst, 0, len); + efi_earlycon_unmap(dst, len); +} + +static void efi_earlycon_scroll_up(void) +{ + unsigned long *dst, *src; + u16 len; + u32 i, height; + + len = screen_info.lfb_linelength; + height = screen_info.lfb_height; + + for (i = 0; i < height - font->height; i++) { + dst = efi_earlycon_map(i*len, len); + if (!dst) + return; + + src = efi_earlycon_map((i + font->height) * len, len); + if (!src) { + efi_earlycon_unmap(dst, len); + return; + } + + memmove(dst, src, len); + + efi_earlycon_unmap(src, len); + efi_earlycon_unmap(dst, len); + } +} + +static void efi_earlycon_write_char(u32 *dst, unsigned char c, unsigned int h) +{ + const u32 color_black = 0x00000000; + const u32 color_white = 0x00ffffff; + const u8 *src; + u8 s8; + int m; + + src = font->data + c * font->height; + s8 = *(src + h); + + for (m = 0; m < 8; m++) { + if ((s8 >> (7 - m)) & 1) + *dst = color_white; + else + *dst = color_black; + dst++; + } +} + +static void +efi_earlycon_write(struct console *con, const char *str, unsigned int num) +{ + struct screen_info *si; + unsigned int len; + const char *s; + void *dst; + + si = &screen_info; + len = si->lfb_linelength; + + while (num) { + unsigned int linemax; + unsigned int h, count = 0; + + for (s = str; *s && *s != '\n'; s++) { + if (count == num) + break; + count++; + } + + linemax = (si->lfb_width - efi_x) / font->width; + if (count > linemax) + count = linemax; + + for (h = 0; h < font->height; h++) { + unsigned int n, x; + + dst = efi_earlycon_map((efi_y + h) * len, len); + if (!dst) + return; + + s = str; + n = count; + x = efi_x; + + while (n-- > 0) { + efi_earlycon_write_char(dst + x*4, *s, h); + x += font->width; + s++; + } + + efi_earlycon_unmap(dst, len); + } + + num -= count; + efi_x += count * font->width; + str += count; + + if (num > 0 && *s == '\n') { + efi_x = 0; + efi_y += font->height; + str++; + num--; + } + + if (efi_x + font->width > si->lfb_width) { + efi_x = 0; + efi_y += font->height; + } + + if (efi_y + font->height > si->lfb_height) { + u32 i; + + efi_y -= font->height; + efi_earlycon_scroll_up(); + + for (i = 0; i < font->height; i++) + efi_earlycon_clear_scanline(efi_y + i); + } + } +} + +static int __init efi_earlycon_setup(struct earlycon_device *device, + const char *opt) +{ + struct screen_info *si; + u16 xres, yres; + u32 i; + + if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI) + return -ENODEV; + + fb_base = screen_info.lfb_base; + if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE) + fb_base |= (u64)screen_info.ext_lfb_base << 32; + + if (opt && !strcmp(opt, "ram")) + fb_prot = PAGE_KERNEL; + else + fb_prot = pgprot_writecombine(PAGE_KERNEL); + + si = &screen_info; + xres = si->lfb_width; + yres = si->lfb_height; + + /* + * efi_earlycon_write_char() implicitly assumes a framebuffer with + * 32 bits per pixel. + */ + if (si->lfb_depth != 32) + return -ENODEV; + + font = get_default_font(xres, yres, -1, -1); + if (!font) + return -ENODEV; + + efi_y = rounddown(yres, font->height) - font->height; + for (i = 0; i < (yres - efi_y) / font->height; i++) + efi_earlycon_scroll_up(); + + device->con->write = efi_earlycon_write; + return 0; +} +EARLYCON_DECLARE(efifb, efi_earlycon_setup); diff --git a/drivers/firmware/efi/efi-bgrt.c b/drivers/firmware/efi/efi-bgrt.c index b22ccfb0c991..a2384184a7de 100644 --- a/drivers/firmware/efi/efi-bgrt.c +++ b/drivers/firmware/efi/efi-bgrt.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright 2012 Intel Corporation * Author: Josh Triplett @@ -5,10 +6,6 @@ * Based on the bgrt driver: * Copyright 2012 Red Hat, Inc * Author: Matthew Garrett - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c index 0f7d97917197..9ea13e8d12ec 100644 --- a/drivers/firmware/efi/efi-pstore.c +++ b/drivers/firmware/efi/efi-pstore.c @@ -1,3 +1,5 @@ +// SPDX-License-Identifier: GPL-2.0+ + #include #include #include diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 4c46ff6f2242..55b77c576c42 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -592,11 +592,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz, early_memunmap(tbl, sizeof(*tbl)); } - return 0; -} -int __init efi_apply_persistent_mem_reservations(void) -{ if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) { unsigned long prsv = efi.mem_reserve; diff --git a/drivers/firmware/efi/efibc.c b/drivers/firmware/efi/efibc.c index 503bbe2a9d49..61e099826cbb 100644 --- a/drivers/firmware/efi/efibc.c +++ b/drivers/firmware/efi/efibc.c @@ -1,15 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* * efibc: control EFI bootloaders which obey LoaderEntryOneShot var * Copyright (c) 2013-2016, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. */ #define pr_fmt(fmt) "efibc: " fmt diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c index 8061667a6765..7576450c8254 100644 --- a/drivers/firmware/efi/efivars.c +++ b/drivers/firmware/efi/efivars.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Originally from efivars.c, * @@ -6,63 +7,6 @@ * * This code takes all variables accessible from EFI runtime and * exports them via sysfs - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - * Changelog: - * - * 17 May 2004 - Matt Domsch - * remove check for efi_enabled in exit - * add MODULE_VERSION - * - * 26 Apr 2004 - Matt Domsch - * minor bug fixes - * - * 21 Apr 2004 - Matt Tolentino - * fix locking per Peter Chubb's findings - * - * 25 Mar 2002 - Matt Domsch - * move uuid_unparse() to include/asm-ia64/efi.h:efi_guid_to_str() - * - * 12 Feb 2002 - Matt Domsch - * use list_for_each_safe when deleting vars. - * remove ifdef CONFIG_SMP around include - * v0.04 release to linux-ia64@linuxia64.org - * - * 20 April 2001 - Matt Domsch - * Moved vars from /proc/efi to /proc/efi/vars, and made - * efi.c own the /proc/efi directory. - * v0.03 release to linux-ia64@linuxia64.org - * - * 26 March 2001 - Matt Domsch - * At the request of Stephane, moved ownership of /proc/efi - * to efi.c, and now efivars lives under /proc/efi/vars. - * - * 12 March 2001 - Matt Domsch - * Feedback received from Stephane Eranian incorporated. - * efivar_write() checks copy_from_user() return value. - * efivar_read/write() returns proper errno. - * v0.02 release to linux-ia64@linuxia64.org - * - * 26 February 2001 - Matt Domsch - * v0.01 release to linux-ia64@linuxia64.org */ #include diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c index 5d06bd247d07..d6dd5f503fa2 100644 --- a/drivers/firmware/efi/esrt.c +++ b/drivers/firmware/efi/esrt.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * esrt.c * diff --git a/drivers/firmware/efi/fake_mem.c b/drivers/firmware/efi/fake_mem.c index 6c7d60c239b5..9501edc0fcfb 100644 --- a/drivers/firmware/efi/fake_mem.c +++ b/drivers/firmware/efi/fake_mem.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * fake_mem.c * @@ -8,21 +9,6 @@ * By specifying this parameter, you can add arbitrary attribute to * specific memory range by updating original (firmware provided) EFI * memmap. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, see . - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". */ #include diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index d9845099635e..b0103e16fc1b 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -52,7 +52,7 @@ lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o string.o random.o \ lib-$(CONFIG_ARM) += arm32-stub.o lib-$(CONFIG_ARM64) += arm64-stub.o -CFLAGS_arm64-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET) +CFLAGS_arm64-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET) # # arm64 puts the stub in the kernel proper, which will unnecessarily retain all @@ -89,7 +89,7 @@ quiet_cmd_stubcopy = STUBCPY $@ cmd_stubcopy = if $(STRIP) --strip-debug $(STUBCOPY_RM-y) -o $@ $<; \ then if $(OBJDUMP) -r $@ | grep $(STUBCOPY_RELOC-y); \ then (echo >&2 "$@: absolute symbol references not allowed in the EFI stub"; \ - rm -f $@; /bin/false); \ + rm -f $@; /bin/false); \ else $(OBJCOPY) $(STUBCOPY_FLAGS-y) $< $@; fi \ else /bin/false; fi diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c index eee42d5e25ee..04e6ecd72cd9 100644 --- a/drivers/firmware/efi/libstub/arm-stub.c +++ b/drivers/firmware/efi/libstub/arm-stub.c @@ -75,9 +75,6 @@ void install_memreserve_table(efi_system_table_t *sys_table_arg) efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID; efi_status_t status; - if (IS_ENABLED(CONFIG_ARM)) - return; - status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv), (void **)&rsv); if (status != EFI_SUCCESS) { @@ -370,6 +367,11 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size, paddr = in->phys_addr; size = in->num_pages * EFI_PAGE_SIZE; + if (novamap()) { + in->virt_addr = in->phys_addr; + continue; + } + /* * Make the mapping compatible with 64k pages: this allows * a 4k page size kernel to kexec a 64k page size kernel and diff --git a/drivers/firmware/efi/libstub/arm32-stub.c b/drivers/firmware/efi/libstub/arm32-stub.c index becbda445913..e8f7aefb6813 100644 --- a/drivers/firmware/efi/libstub/arm32-stub.c +++ b/drivers/firmware/efi/libstub/arm32-stub.c @@ -1,10 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2013 Linaro Ltd; - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * */ #include #include diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c index 1b4d465cc5d9..1550d244e996 100644 --- a/drivers/firmware/efi/libstub/arm64-stub.c +++ b/drivers/firmware/efi/libstub/arm64-stub.c @@ -1,13 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2013, 2014 Linaro Ltd; * * This file implements the EFI boot stub for the arm64 kernel. * Adapted from ARM version by Mark Salter - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * */ /* diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c index e94975f4655b..e4610e72b78f 100644 --- a/drivers/firmware/efi/libstub/efi-stub-helper.c +++ b/drivers/firmware/efi/libstub/efi-stub-helper.c @@ -1,13 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Helper functions used by the EFI stub on multiple * architectures. This should be #included by the EFI stub * implementation files. * * Copyright 2011 Intel Corporation; author Matt Fleming - * - * This file is part of the Linux kernel, and is made available - * under the terms of the GNU General Public License version 2. - * */ #include @@ -34,6 +31,7 @@ static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE; static int __section(.data) __nokaslr; static int __section(.data) __quiet; +static int __section(.data) __novamap; int __pure nokaslr(void) { @@ -43,6 +41,10 @@ int __pure is_quiet(void) { return __quiet; } +int __pure novamap(void) +{ + return __novamap; +} #define EFI_MMAP_NR_SLACK_SLOTS 8 @@ -482,6 +484,11 @@ efi_status_t efi_parse_options(char const *cmdline) __chunk_size = -1UL; } + if (!strncmp(str, "novamap", 7)) { + str += strlen("novamap"); + __novamap = 1; + } + /* Group words together, delimited by "," */ while (*str && *str != ' ' && *str != ',') str++; diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h index 32799cf039ef..1b1dfcaa6fb9 100644 --- a/drivers/firmware/efi/libstub/efistub.h +++ b/drivers/firmware/efi/libstub/efistub.h @@ -27,6 +27,7 @@ extern int __pure nokaslr(void); extern int __pure is_quiet(void); +extern int __pure novamap(void); #define pr_efi(sys_table, msg) do { \ if (!is_quiet()) efi_printk(sys_table, "EFI stub: "msg); \ @@ -64,4 +65,15 @@ efi_status_t check_platform_features(efi_system_table_t *sys_table_arg); efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg); +/* Helper macros for the usual case of using simple C variables: */ +#ifndef fdt_setprop_inplace_var +#define fdt_setprop_inplace_var(fdt, node_offset, name, var) \ + fdt_setprop_inplace((fdt), (node_offset), (name), &(var), sizeof(var)) +#endif + +#ifndef fdt_setprop_var +#define fdt_setprop_var(fdt, node_offset, name, var) \ + fdt_setprop((fdt), (node_offset), (name), &(var), sizeof(var)) +#endif + #endif diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c index 0dc7b4987cc2..5440ba17a1c5 100644 --- a/drivers/firmware/efi/libstub/fdt.c +++ b/drivers/firmware/efi/libstub/fdt.c @@ -1,13 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 /* * FDT related Helper functions used by the EFI stub on multiple * architectures. This should be #included by the EFI stub * implementation files. * * Copyright 2013 Linaro Limited; author Roy Franz - * - * This file is part of the Linux kernel, and is made available - * under the terms of the GNU General Public License version 2. - * */ #include @@ -26,10 +23,8 @@ static void fdt_update_cell_size(efi_system_table_t *sys_table, void *fdt) offset = fdt_path_offset(fdt, "/"); /* Set the #address-cells and #size-cells values for an empty tree */ - fdt_setprop_u32(fdt, offset, "#address-cells", - EFI_DT_ADDR_CELLS_DEFAULT); - - fdt_setprop_u32(fdt, offset, "#size-cells", EFI_DT_SIZE_CELLS_DEFAULT); + fdt_setprop_u32(fdt, offset, "#address-cells", EFI_DT_ADDR_CELLS_DEFAULT); + fdt_setprop_u32(fdt, offset, "#size-cells", EFI_DT_SIZE_CELLS_DEFAULT); } static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, @@ -42,7 +37,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, u32 fdt_val32; u64 fdt_val64; - /* Do some checks on provided FDT, if it exists*/ + /* Do some checks on provided FDT, if it exists: */ if (orig_fdt) { if (fdt_check_header(orig_fdt)) { pr_efi_err(sys_table, "Device Tree header not valid!\n"); @@ -50,7 +45,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, } /* * We don't get the size of the FDT if we get if from a - * configuration table. + * configuration table: */ if (orig_fdt_size && fdt_totalsize(orig_fdt) > orig_fdt_size) { pr_efi_err(sys_table, "Truncated device tree! foo!\n"); @@ -64,8 +59,8 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, status = fdt_create_empty_tree(fdt, new_fdt_size); if (status == 0) { /* - * Any failure from the following function is non - * critical + * Any failure from the following function is + * non-critical: */ fdt_update_cell_size(sys_table, fdt); } @@ -86,12 +81,13 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, if (node < 0) { node = fdt_add_subnode(fdt, 0, "chosen"); if (node < 0) { - status = node; /* node is error code when negative */ + /* 'node' is an error code when negative: */ + status = node; goto fdt_set_fail; } } - if ((cmdline_ptr != NULL) && (strlen(cmdline_ptr) > 0)) { + if (cmdline_ptr != NULL && strlen(cmdline_ptr) > 0) { status = fdt_setprop(fdt, node, "bootargs", cmdline_ptr, strlen(cmdline_ptr) + 1); if (status) @@ -103,13 +99,12 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, u64 initrd_image_end; u64 initrd_image_start = cpu_to_fdt64(initrd_addr); - status = fdt_setprop(fdt, node, "linux,initrd-start", - &initrd_image_start, sizeof(u64)); + status = fdt_setprop_var(fdt, node, "linux,initrd-start", initrd_image_start); if (status) goto fdt_set_fail; + initrd_image_end = cpu_to_fdt64(initrd_addr + initrd_size); - status = fdt_setprop(fdt, node, "linux,initrd-end", - &initrd_image_end, sizeof(u64)); + status = fdt_setprop_var(fdt, node, "linux,initrd-end", initrd_image_end); if (status) goto fdt_set_fail; } @@ -117,30 +112,28 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, /* Add FDT entries for EFI runtime services in chosen node. */ node = fdt_subnode_offset(fdt, 0, "chosen"); fdt_val64 = cpu_to_fdt64((u64)(unsigned long)sys_table); - status = fdt_setprop(fdt, node, "linux,uefi-system-table", - &fdt_val64, sizeof(fdt_val64)); + + status = fdt_setprop_var(fdt, node, "linux,uefi-system-table", fdt_val64); if (status) goto fdt_set_fail; fdt_val64 = U64_MAX; /* placeholder */ - status = fdt_setprop(fdt, node, "linux,uefi-mmap-start", - &fdt_val64, sizeof(fdt_val64)); + + status = fdt_setprop_var(fdt, node, "linux,uefi-mmap-start", fdt_val64); if (status) goto fdt_set_fail; fdt_val32 = U32_MAX; /* placeholder */ - status = fdt_setprop(fdt, node, "linux,uefi-mmap-size", - &fdt_val32, sizeof(fdt_val32)); + + status = fdt_setprop_var(fdt, node, "linux,uefi-mmap-size", fdt_val32); if (status) goto fdt_set_fail; - status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-size", - &fdt_val32, sizeof(fdt_val32)); + status = fdt_setprop_var(fdt, node, "linux,uefi-mmap-desc-size", fdt_val32); if (status) goto fdt_set_fail; - status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-ver", - &fdt_val32, sizeof(fdt_val32)); + status = fdt_setprop_var(fdt, node, "linux,uefi-mmap-desc-ver", fdt_val32); if (status) goto fdt_set_fail; @@ -150,8 +143,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, efi_status = efi_get_random_bytes(sys_table, sizeof(fdt_val64), (u8 *)&fdt_val64); if (efi_status == EFI_SUCCESS) { - status = fdt_setprop(fdt, node, "kaslr-seed", - &fdt_val64, sizeof(fdt_val64)); + status = fdt_setprop_var(fdt, node, "kaslr-seed", fdt_val64); if (status) goto fdt_set_fail; } else if (efi_status != EFI_NOT_FOUND) { @@ -159,7 +151,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, } } - /* shrink the FDT back to its minimum size */ + /* Shrink the FDT back to its minimum size: */ fdt_pack(fdt); return EFI_SUCCESS; @@ -182,26 +174,26 @@ static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map) return EFI_LOAD_ERROR; fdt_val64 = cpu_to_fdt64((unsigned long)*map->map); - err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-start", - &fdt_val64, sizeof(fdt_val64)); + + err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-start", fdt_val64); if (err) return EFI_LOAD_ERROR; fdt_val32 = cpu_to_fdt32(*map->map_size); - err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-size", - &fdt_val32, sizeof(fdt_val32)); + + err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-size", fdt_val32); if (err) return EFI_LOAD_ERROR; fdt_val32 = cpu_to_fdt32(*map->desc_size); - err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-desc-size", - &fdt_val32, sizeof(fdt_val32)); + + err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-desc-size", fdt_val32); if (err) return EFI_LOAD_ERROR; fdt_val32 = cpu_to_fdt32(*map->desc_ver); - err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-desc-ver", - &fdt_val32, sizeof(fdt_val32)); + + err = fdt_setprop_inplace_var(fdt, node, "linux,uefi-mmap-desc-ver", fdt_val32); if (err) return EFI_LOAD_ERROR; @@ -209,13 +201,13 @@ static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map) } #ifndef EFI_FDT_ALIGN -#define EFI_FDT_ALIGN EFI_PAGE_SIZE +# define EFI_FDT_ALIGN EFI_PAGE_SIZE #endif struct exit_boot_struct { - efi_memory_desc_t *runtime_map; - int *runtime_entry_count; - void *new_fdt_addr; + efi_memory_desc_t *runtime_map; + int *runtime_entry_count; + void *new_fdt_addr; }; static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg, @@ -235,7 +227,7 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg, } #ifndef MAX_FDT_SIZE -#define MAX_FDT_SIZE SZ_2M +# define MAX_FDT_SIZE SZ_2M #endif /* @@ -266,16 +258,16 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, unsigned long mmap_key; efi_memory_desc_t *memory_map, *runtime_map; efi_status_t status; - int runtime_entry_count = 0; + int runtime_entry_count; struct efi_boot_memmap map; struct exit_boot_struct priv; - map.map = &runtime_map; - map.map_size = &map_size; - map.desc_size = &desc_size; - map.desc_ver = &desc_ver; - map.key_ptr = &mmap_key; - map.buff_size = &buff_size; + map.map = &runtime_map; + map.map_size = &map_size; + map.desc_size = &desc_size; + map.desc_ver = &desc_ver; + map.key_ptr = &mmap_key; + map.buff_size = &buff_size; /* * Get a copy of the current memory map that we will use to prepare @@ -289,15 +281,13 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, return status; } - pr_efi(sys_table, - "Exiting boot services and installing virtual address map...\n"); + pr_efi(sys_table, "Exiting boot services and installing virtual address map...\n"); map.map = &memory_map; status = efi_high_alloc(sys_table, MAX_FDT_SIZE, EFI_FDT_ALIGN, new_fdt_addr, max_addr); if (status != EFI_SUCCESS) { - pr_efi_err(sys_table, - "Unable to allocate memory for new device tree.\n"); + pr_efi_err(sys_table, "Unable to allocate memory for new device tree.\n"); goto fail; } @@ -318,15 +308,19 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, goto fail_free_new_fdt; } - priv.runtime_map = runtime_map; - priv.runtime_entry_count = &runtime_entry_count; - priv.new_fdt_addr = (void *)*new_fdt_addr; - status = efi_exit_boot_services(sys_table, handle, &map, &priv, - exit_boot_func); + runtime_entry_count = 0; + priv.runtime_map = runtime_map; + priv.runtime_entry_count = &runtime_entry_count; + priv.new_fdt_addr = (void *)*new_fdt_addr; + + status = efi_exit_boot_services(sys_table, handle, &map, &priv, exit_boot_func); if (status == EFI_SUCCESS) { efi_set_virtual_address_map_t *svam; + if (novamap()) + return EFI_SUCCESS; + /* Install the new virtual address map */ svam = sys_table->runtime->set_virtual_address_map; status = svam(runtime_entry_count * desc_size, desc_size, @@ -363,6 +357,7 @@ fail_free_new_fdt: fail: sys_table->boottime->free_pool(runtime_map); + return EFI_LOAD_ERROR; } diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c index 24c461dea7af..0101ca4c13b1 100644 --- a/drivers/firmware/efi/libstub/gop.c +++ b/drivers/firmware/efi/libstub/gop.c @@ -1,10 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 /* ----------------------------------------------------------------------- * * Copyright 2011 Intel Corporation; author Matt Fleming * - * This file is part of the Linux kernel, and is made available under - * the terms of the GNU General Public License version 2. - * * ----------------------------------------------------------------------- */ #include diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c index e0e603a89aa9..b4b1d1dcb5fd 100644 --- a/drivers/firmware/efi/libstub/random.c +++ b/drivers/firmware/efi/libstub/random.c @@ -1,10 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2016 Linaro Ltd; - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * */ #include diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c index 72d9dfbebf08..edba5e7a3743 100644 --- a/drivers/firmware/efi/libstub/secureboot.c +++ b/drivers/firmware/efi/libstub/secureboot.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Secure boot handling. * @@ -5,9 +6,6 @@ * Roy Franz - * - * This file is part of the Linux kernel, and is made available under the - * terms of the GNU General Public License version 2. */ #include #include diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c index a90b0b8fc69a..5bd04f75d8d6 100644 --- a/drivers/firmware/efi/libstub/tpm.c +++ b/drivers/firmware/efi/libstub/tpm.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * TPM handling. * @@ -5,9 +6,6 @@ * Copyright (C) 2017 Google, Inc. * Matthew Garrett * Thiebaud Weksteen - * - * This file is part of the Linux kernel, and is made available under the - * terms of the GNU General Public License version 2. */ #include #include diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c index 8986757eafaf..58452fde92cc 100644 --- a/drivers/firmware/efi/memattr.c +++ b/drivers/firmware/efi/memattr.c @@ -1,9 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2016 Linaro Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #define pr_fmt(fmt) "efi: memattr: " fmt @@ -94,7 +91,7 @@ static bool entry_is_valid(const efi_memory_desc_t *in, efi_memory_desc_t *out) if (!(md->attribute & EFI_MEMORY_RUNTIME)) continue; - if (md->virt_addr == 0) { + if (md->virt_addr == 0 && md->phys_addr != 0) { /* no virtual mapping has been installed by the stub */ break; } diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c index 84a11d0a8023..ad9ddefc9dcb 100644 --- a/drivers/firmware/efi/runtime-map.c +++ b/drivers/firmware/efi/runtime-map.c @@ -1,8 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* * linux/drivers/efi/runtime-map.c * Copyright (C) 2013 Red Hat, Inc., Dave Young - * - * This file is released under the GPLv2. */ #include diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c index 8903b9ccfc2b..6fa2df383f22 100644 --- a/drivers/firmware/efi/runtime-wrappers.c +++ b/drivers/firmware/efi/runtime-wrappers.c @@ -85,15 +85,28 @@ struct efi_runtime_work efi_rts_work; pr_err("Failed to queue work to efi_rts_wq.\n"); \ \ exit: \ - efi_rts_work.efi_rts_id = NONE; \ + efi_rts_work.efi_rts_id = EFI_NONE; \ efi_rts_work.status; \ }) +#ifndef arch_efi_save_flags +#define arch_efi_save_flags(state_flags) local_save_flags(state_flags) +#define arch_efi_restore_flags(state_flags) local_irq_restore(state_flags) +#endif + +unsigned long efi_call_virt_save_flags(void) +{ + unsigned long flags; + + arch_efi_save_flags(flags); + return flags; +} + void efi_call_virt_check_flags(unsigned long flags, const char *call) { unsigned long cur_flags, mismatch; - local_save_flags(cur_flags); + cur_flags = efi_call_virt_save_flags(); mismatch = flags ^ cur_flags; if (!WARN_ON_ONCE(mismatch & ARCH_EFI_IRQ_FLAGS_MASK)) @@ -102,7 +115,7 @@ void efi_call_virt_check_flags(unsigned long flags, const char *call) add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_NOW_UNRELIABLE); pr_err_ratelimited(FW_BUG "IRQ flags corrupted (0x%08lx=>0x%08lx) by EFI %s\n", flags, cur_flags, call); - local_irq_restore(flags); + arch_efi_restore_flags(flags); } /* @@ -146,6 +159,13 @@ void efi_call_virt_check_flags(unsigned long flags, const char *call) */ static DEFINE_SEMAPHORE(efi_runtime_lock); +/* + * Expose the EFI runtime lock to the UV platform + */ +#ifdef CONFIG_X86_UV +extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock); +#endif + /* * Calls the appropriate efi_runtime_service() with the appropriate * arguments. @@ -168,50 +188,50 @@ static void efi_call_rts(struct work_struct *work) arg5 = efi_rts_work.arg5; switch (efi_rts_work.efi_rts_id) { - case GET_TIME: + case EFI_GET_TIME: status = efi_call_virt(get_time, (efi_time_t *)arg1, (efi_time_cap_t *)arg2); break; - case SET_TIME: + case EFI_SET_TIME: status = efi_call_virt(set_time, (efi_time_t *)arg1); break; - case GET_WAKEUP_TIME: + case EFI_GET_WAKEUP_TIME: status = efi_call_virt(get_wakeup_time, (efi_bool_t *)arg1, (efi_bool_t *)arg2, (efi_time_t *)arg3); break; - case SET_WAKEUP_TIME: + case EFI_SET_WAKEUP_TIME: status = efi_call_virt(set_wakeup_time, *(efi_bool_t *)arg1, (efi_time_t *)arg2); break; - case GET_VARIABLE: + case EFI_GET_VARIABLE: status = efi_call_virt(get_variable, (efi_char16_t *)arg1, (efi_guid_t *)arg2, (u32 *)arg3, (unsigned long *)arg4, (void *)arg5); break; - case GET_NEXT_VARIABLE: + case EFI_GET_NEXT_VARIABLE: status = efi_call_virt(get_next_variable, (unsigned long *)arg1, (efi_char16_t *)arg2, (efi_guid_t *)arg3); break; - case SET_VARIABLE: + case EFI_SET_VARIABLE: status = efi_call_virt(set_variable, (efi_char16_t *)arg1, (efi_guid_t *)arg2, *(u32 *)arg3, *(unsigned long *)arg4, (void *)arg5); break; - case QUERY_VARIABLE_INFO: + case EFI_QUERY_VARIABLE_INFO: status = efi_call_virt(query_variable_info, *(u32 *)arg1, (u64 *)arg2, (u64 *)arg3, (u64 *)arg4); break; - case GET_NEXT_HIGH_MONO_COUNT: + case EFI_GET_NEXT_HIGH_MONO_COUNT: status = efi_call_virt(get_next_high_mono_count, (u32 *)arg1); break; - case UPDATE_CAPSULE: + case EFI_UPDATE_CAPSULE: status = efi_call_virt(update_capsule, (efi_capsule_header_t **)arg1, *(unsigned long *)arg2, *(unsigned long *)arg3); break; - case QUERY_CAPSULE_CAPS: + case EFI_QUERY_CAPSULE_CAPS: status = efi_call_virt(query_capsule_caps, (efi_capsule_header_t **)arg1, *(unsigned long *)arg2, (u64 *)arg3, @@ -235,7 +255,7 @@ static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc) if (down_interruptible(&efi_runtime_lock)) return EFI_ABORTED; - status = efi_queue_work(GET_TIME, tm, tc, NULL, NULL, NULL); + status = efi_queue_work(EFI_GET_TIME, tm, tc, NULL, NULL, NULL); up(&efi_runtime_lock); return status; } @@ -246,7 +266,7 @@ static efi_status_t virt_efi_set_time(efi_time_t *tm) if (down_interruptible(&efi_runtime_lock)) return EFI_ABORTED; - status = efi_queue_work(SET_TIME, tm, NULL, NULL, NULL, NULL); + status = efi_queue_work(EFI_SET_TIME, tm, NULL, NULL, NULL, NULL); up(&efi_runtime_lock); return status; } @@ -259,7 +279,7 @@ static efi_status_t virt_efi_get_wakeup_time(efi_bool_t *enabled, if (down_interruptible(&efi_runtime_lock)) return EFI_ABORTED; - status = efi_queue_work(GET_WAKEUP_TIME, enabled, pending, tm, NULL, + status = efi_queue_work(EFI_GET_WAKEUP_TIME, enabled, pending, tm, NULL, NULL); up(&efi_runtime_lock); return status; @@ -271,7 +291,7 @@ static efi_status_t virt_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm) if (down_interruptible(&efi_runtime_lock)) return EFI_ABORTED; - status = efi_queue_work(SET_WAKEUP_TIME, &enabled, tm, NULL, NULL, + status = efi_queue_work(EFI_SET_WAKEUP_TIME, &enabled, tm, NULL, NULL, NULL); up(&efi_runtime_lock); return status; @@ -287,7 +307,7 @@ static efi_status_t virt_efi_get_variable(efi_char16_t *name, if (down_interruptible(&efi_runtime_lock)) return EFI_ABORTED; - status = efi_queue_work(GET_VARIABLE, name, vendor, attr, data_size, + status = efi_queue_work(EFI_GET_VARIABLE, name, vendor, attr, data_size, data); up(&efi_runtime_lock); return status; @@ -301,7 +321,7 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size, if (down_interruptible(&efi_runtime_lock)) return EFI_ABORTED; - status = efi_queue_work(GET_NEXT_VARIABLE, name_size, name, vendor, + status = efi_queue_work(EFI_GET_NEXT_VARIABLE, name_size, name, vendor, NULL, NULL); up(&efi_runtime_lock); return status; @@ -317,7 +337,7 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name, if (down_interruptible(&efi_runtime_lock)) return EFI_ABORTED; - status = efi_queue_work(SET_VARIABLE, name, vendor, &attr, &data_size, + status = efi_queue_work(EFI_SET_VARIABLE, name, vendor, &attr, &data_size, data); up(&efi_runtime_lock); return status; @@ -352,7 +372,7 @@ static efi_status_t virt_efi_query_variable_info(u32 attr, if (down_interruptible(&efi_runtime_lock)) return EFI_ABORTED; - status = efi_queue_work(QUERY_VARIABLE_INFO, &attr, storage_space, + status = efi_queue_work(EFI_QUERY_VARIABLE_INFO, &attr, storage_space, remaining_space, max_variable_size, NULL); up(&efi_runtime_lock); return status; @@ -384,7 +404,7 @@ static efi_status_t virt_efi_get_next_high_mono_count(u32 *count) if (down_interruptible(&efi_runtime_lock)) return EFI_ABORTED; - status = efi_queue_work(GET_NEXT_HIGH_MONO_COUNT, count, NULL, NULL, + status = efi_queue_work(EFI_GET_NEXT_HIGH_MONO_COUNT, count, NULL, NULL, NULL, NULL); up(&efi_runtime_lock); return status; @@ -400,7 +420,7 @@ static void virt_efi_reset_system(int reset_type, "could not get exclusive access to the firmware\n"); return; } - efi_rts_work.efi_rts_id = RESET_SYSTEM; + efi_rts_work.efi_rts_id = EFI_RESET_SYSTEM; __efi_call_virt(reset_system, reset_type, status, data_size, data); up(&efi_runtime_lock); } @@ -416,7 +436,7 @@ static efi_status_t virt_efi_update_capsule(efi_capsule_header_t **capsules, if (down_interruptible(&efi_runtime_lock)) return EFI_ABORTED; - status = efi_queue_work(UPDATE_CAPSULE, capsules, &count, &sg_list, + status = efi_queue_work(EFI_UPDATE_CAPSULE, capsules, &count, &sg_list, NULL, NULL); up(&efi_runtime_lock); return status; @@ -434,7 +454,7 @@ static efi_status_t virt_efi_query_capsule_caps(efi_capsule_header_t **capsules, if (down_interruptible(&efi_runtime_lock)) return EFI_ABORTED; - status = efi_queue_work(QUERY_CAPSULE_CAPS, capsules, &count, + status = efi_queue_work(EFI_QUERY_CAPSULE_CAPS, capsules, &count, max_size, reset_type, NULL); up(&efi_runtime_lock); return status; diff --git a/drivers/firmware/efi/test/efi_test.c b/drivers/firmware/efi/test/efi_test.c index 51ecf7d6da48..877745c3aaf2 100644 --- a/drivers/firmware/efi/test/efi_test.c +++ b/drivers/firmware/efi/test/efi_test.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * EFI Test Driver for Runtime Services * diff --git a/drivers/firmware/efi/test/efi_test.h b/drivers/firmware/efi/test/efi_test.h index 5f4818bf112f..f2446aa1c2e3 100644 --- a/drivers/firmware/efi/test/efi_test.h +++ b/drivers/firmware/efi/test/efi_test.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * EFI Test driver Header * diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c index 0cbeb3d46b18..3a689b40ccc0 100644 --- a/drivers/firmware/efi/tpm.c +++ b/drivers/firmware/efi/tpm.c @@ -1,10 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2017 Google, Inc. * Thiebaud Weksteen - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c index fceaafd67ec6..436d1776bc7b 100644 --- a/drivers/firmware/efi/vars.c +++ b/drivers/firmware/efi/vars.c @@ -1,22 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Originally from efivars.c * * Copyright (C) 2001,2003,2004 Dell * Copyright (C) 2004 Intel Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include diff --git a/drivers/firmware/imx/misc.c b/drivers/firmware/imx/misc.c index 97f5424dbac9..4b56a587dacd 100644 --- a/drivers/firmware/imx/misc.c +++ b/drivers/firmware/imx/misc.c @@ -18,6 +18,14 @@ struct imx_sc_msg_req_misc_set_ctrl { u16 resource; } __packed; +struct imx_sc_msg_req_cpu_start { + struct imx_sc_rpc_msg hdr; + u32 address_hi; + u32 address_lo; + u16 resource; + u8 enable; +} __packed; + struct imx_sc_msg_req_misc_get_ctrl { struct imx_sc_rpc_msg hdr; u32 ctrl; @@ -97,3 +105,33 @@ int imx_sc_misc_get_control(struct imx_sc_ipc *ipc, u32 resource, return 0; } EXPORT_SYMBOL(imx_sc_misc_get_control); + +/* + * This function starts/stops a CPU identified by @resource + * + * @param[in] ipc IPC handle + * @param[in] resource resource the control is associated with + * @param[in] enable true for start, false for stop + * @param[in] phys_addr initial instruction address to be executed + * + * @return Returns 0 for success and < 0 for errors. + */ +int imx_sc_pm_cpu_start(struct imx_sc_ipc *ipc, u32 resource, + bool enable, u64 phys_addr) +{ + struct imx_sc_msg_req_cpu_start msg; + struct imx_sc_rpc_msg *hdr = &msg.hdr; + + hdr->ver = IMX_SC_RPC_VERSION; + hdr->svc = IMX_SC_RPC_SVC_PM; + hdr->func = IMX_SC_PM_FUNC_CPU_START; + hdr->size = 4; + + msg.address_hi = phys_addr >> 32; + msg.address_lo = phys_addr; + msg.resource = resource; + msg.enable = enable; + + return imx_scu_call_rpc(ipc, &msg, true); +} +EXPORT_SYMBOL(imx_sc_pm_cpu_start); diff --git a/drivers/firmware/imx/scu-pd.c b/drivers/firmware/imx/scu-pd.c index 407245f2efd0..39a94c7177fc 100644 --- a/drivers/firmware/imx/scu-pd.c +++ b/drivers/firmware/imx/scu-pd.c @@ -322,6 +322,7 @@ static int imx_sc_pd_probe(struct platform_device *pdev) static const struct of_device_id imx_sc_pd_match[] = { { .compatible = "fsl,imx8qxp-scu-pd", &imx8qxp_scu_pd}, + { .compatible = "fsl,scu-pd", &imx8qxp_scu_pd}, { /* sentinel */ } }; diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c index 6bc8e6640d71..c51462f5aa1e 100644 --- a/drivers/firmware/iscsi_ibft.c +++ b/drivers/firmware/iscsi_ibft.c @@ -542,6 +542,7 @@ static umode_t __init ibft_check_tgt_for(void *data, int type) case ISCSI_BOOT_TGT_NIC_ASSOC: case ISCSI_BOOT_TGT_CHAP_TYPE: rc = S_IRUGO; + break; case ISCSI_BOOT_TGT_NAME: if (tgt->tgt_name_len) rc = S_IRUGO; diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c index 72d9ea18270b..85c656d04bb0 100644 --- a/drivers/firmware/iscsi_ibft_find.c +++ b/drivers/firmware/iscsi_ibft_find.c @@ -104,7 +104,7 @@ unsigned long __init find_ibft_region(unsigned long *sizep) if (ibft_addr) { *sizep = PAGE_ALIGN(ibft_addr->header.length); - return (u64)isa_virt_to_bus(ibft_addr); + return (u64)virt_to_phys(ibft_addr); } *sizep = 0; diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c index ec4fd253a4e9..d168c87c7d30 100644 --- a/drivers/firmware/memmap.c +++ b/drivers/firmware/memmap.c @@ -333,7 +333,7 @@ int __init firmware_map_add_early(u64 start, u64 end, const char *type) { struct firmware_map_entry *entry; - entry = memblock_alloc_nopanic(sizeof(struct firmware_map_entry), + entry = memblock_alloc(sizeof(struct firmware_map_entry), SMP_CACHE_BYTES); if (WARN_ON(!entry)) return -ENOMEM; diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c index a13558154ac3..61be15d9df7d 100644 --- a/drivers/firmware/raspberrypi.c +++ b/drivers/firmware/raspberrypi.c @@ -238,6 +238,16 @@ static int rpi_firmware_probe(struct platform_device *pdev) return 0; } +static void rpi_firmware_shutdown(struct platform_device *pdev) +{ + struct rpi_firmware *fw = platform_get_drvdata(pdev); + + if (!fw) + return; + + rpi_firmware_property(fw, RPI_FIRMWARE_NOTIFY_REBOOT, NULL, 0); +} + static int rpi_firmware_remove(struct platform_device *pdev) { struct rpi_firmware *fw = platform_get_drvdata(pdev); @@ -278,6 +288,7 @@ static struct platform_driver rpi_firmware_driver = { .of_match_table = rpi_firmware_of_match, }, .probe = rpi_firmware_probe, + .shutdown = rpi_firmware_shutdown, .remove = rpi_firmware_remove, }; module_platform_driver(rpi_firmware_driver); diff --git a/drivers/firmware/tegra/Makefile b/drivers/firmware/tegra/Makefile index 1b826dcca719..676b01caff05 100644 --- a/drivers/firmware/tegra/Makefile +++ b/drivers/firmware/tegra/Makefile @@ -1,4 +1,7 @@ tegra-bpmp-y = bpmp.o +tegra-bpmp-$(CONFIG_ARCH_TEGRA_210_SOC) += bpmp-tegra210.o +tegra-bpmp-$(CONFIG_ARCH_TEGRA_186_SOC) += bpmp-tegra186.o +tegra-bpmp-$(CONFIG_ARCH_TEGRA_194_SOC) += bpmp-tegra186.o tegra-bpmp-$(CONFIG_DEBUG_FS) += bpmp-debugfs.o obj-$(CONFIG_TEGRA_BPMP) += tegra-bpmp.o obj-$(CONFIG_TEGRA_IVC) += ivc.o diff --git a/drivers/firmware/tegra/bpmp-private.h b/drivers/firmware/tegra/bpmp-private.h new file mode 100644 index 000000000000..54d560c48398 --- /dev/null +++ b/drivers/firmware/tegra/bpmp-private.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018, NVIDIA CORPORATION. + */ + +#ifndef __FIRMWARE_TEGRA_BPMP_PRIVATE_H +#define __FIRMWARE_TEGRA_BPMP_PRIVATE_H + +#include + +struct tegra_bpmp_ops { + int (*init)(struct tegra_bpmp *bpmp); + void (*deinit)(struct tegra_bpmp *bpmp); + bool (*is_response_ready)(struct tegra_bpmp_channel *channel); + bool (*is_request_ready)(struct tegra_bpmp_channel *channel); + int (*ack_response)(struct tegra_bpmp_channel *channel); + int (*ack_request)(struct tegra_bpmp_channel *channel); + bool (*is_response_channel_free)(struct tegra_bpmp_channel *channel); + bool (*is_request_channel_free)(struct tegra_bpmp_channel *channel); + int (*post_response)(struct tegra_bpmp_channel *channel); + int (*post_request)(struct tegra_bpmp_channel *channel); + int (*ring_doorbell)(struct tegra_bpmp *bpmp); + int (*resume)(struct tegra_bpmp *bpmp); +}; + +#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \ + IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) +extern const struct tegra_bpmp_ops tegra186_bpmp_ops; +#endif +#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) +extern const struct tegra_bpmp_ops tegra210_bpmp_ops; +#endif + +#endif diff --git a/drivers/firmware/tegra/bpmp-tegra186.c b/drivers/firmware/tegra/bpmp-tegra186.c new file mode 100644 index 000000000000..ea308751635f --- /dev/null +++ b/drivers/firmware/tegra/bpmp-tegra186.c @@ -0,0 +1,305 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018, NVIDIA CORPORATION. + */ + +#include +#include +#include + +#include +#include +#include + +#include "bpmp-private.h" + +struct tegra186_bpmp { + struct tegra_bpmp *parent; + + struct { + struct gen_pool *pool; + dma_addr_t phys; + void *virt; + } tx, rx; + + struct { + struct mbox_client client; + struct mbox_chan *channel; + } mbox; +}; + +static inline struct tegra_bpmp * +mbox_client_to_bpmp(struct mbox_client *client) +{ + struct tegra186_bpmp *priv; + + priv = container_of(client, struct tegra186_bpmp, mbox.client); + + return priv->parent; +} + +static bool tegra186_bpmp_is_message_ready(struct tegra_bpmp_channel *channel) +{ + void *frame; + + frame = tegra_ivc_read_get_next_frame(channel->ivc); + if (IS_ERR(frame)) { + channel->ib = NULL; + return false; + } + + channel->ib = frame; + + return true; +} + +static bool tegra186_bpmp_is_channel_free(struct tegra_bpmp_channel *channel) +{ + void *frame; + + frame = tegra_ivc_write_get_next_frame(channel->ivc); + if (IS_ERR(frame)) { + channel->ob = NULL; + return false; + } + + channel->ob = frame; + + return true; +} + +static int tegra186_bpmp_ack_message(struct tegra_bpmp_channel *channel) +{ + return tegra_ivc_read_advance(channel->ivc); +} + +static int tegra186_bpmp_post_message(struct tegra_bpmp_channel *channel) +{ + return tegra_ivc_write_advance(channel->ivc); +} + +static int tegra186_bpmp_ring_doorbell(struct tegra_bpmp *bpmp) +{ + struct tegra186_bpmp *priv = bpmp->priv; + int err; + + err = mbox_send_message(priv->mbox.channel, NULL); + if (err < 0) + return err; + + mbox_client_txdone(priv->mbox.channel, 0); + + return 0; +} + +static void tegra186_bpmp_ivc_notify(struct tegra_ivc *ivc, void *data) +{ + struct tegra_bpmp *bpmp = data; + struct tegra186_bpmp *priv = bpmp->priv; + + if (WARN_ON(priv->mbox.channel == NULL)) + return; + + tegra186_bpmp_ring_doorbell(bpmp); +} + +static int tegra186_bpmp_channel_init(struct tegra_bpmp_channel *channel, + struct tegra_bpmp *bpmp, + unsigned int index) +{ + struct tegra186_bpmp *priv = bpmp->priv; + size_t message_size, queue_size; + unsigned int offset; + int err; + + channel->ivc = devm_kzalloc(bpmp->dev, sizeof(*channel->ivc), + GFP_KERNEL); + if (!channel->ivc) + return -ENOMEM; + + message_size = tegra_ivc_align(MSG_MIN_SZ); + queue_size = tegra_ivc_total_queue_size(message_size); + offset = queue_size * index; + + err = tegra_ivc_init(channel->ivc, NULL, + priv->rx.virt + offset, priv->rx.phys + offset, + priv->tx.virt + offset, priv->tx.phys + offset, + 1, message_size, tegra186_bpmp_ivc_notify, + bpmp); + if (err < 0) { + dev_err(bpmp->dev, "failed to setup IVC for channel %u: %d\n", + index, err); + return err; + } + + init_completion(&channel->completion); + channel->bpmp = bpmp; + + return 0; +} + +static void tegra186_bpmp_channel_reset(struct tegra_bpmp_channel *channel) +{ + /* reset the channel state */ + tegra_ivc_reset(channel->ivc); + + /* sync the channel state with BPMP */ + while (tegra_ivc_notified(channel->ivc)) + ; +} + +static void tegra186_bpmp_channel_cleanup(struct tegra_bpmp_channel *channel) +{ + tegra_ivc_cleanup(channel->ivc); +} + +static void mbox_handle_rx(struct mbox_client *client, void *data) +{ + struct tegra_bpmp *bpmp = mbox_client_to_bpmp(client); + + tegra_bpmp_handle_rx(bpmp); +} + +static int tegra186_bpmp_init(struct tegra_bpmp *bpmp) +{ + struct tegra186_bpmp *priv; + unsigned int i; + int err; + + priv = devm_kzalloc(bpmp->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + bpmp->priv = priv; + priv->parent = bpmp; + + priv->tx.pool = of_gen_pool_get(bpmp->dev->of_node, "shmem", 0); + if (!priv->tx.pool) { + dev_err(bpmp->dev, "TX shmem pool not found\n"); + return -ENOMEM; + } + + priv->tx.virt = gen_pool_dma_alloc(priv->tx.pool, 4096, &priv->tx.phys); + if (!priv->tx.virt) { + dev_err(bpmp->dev, "failed to allocate from TX pool\n"); + return -ENOMEM; + } + + priv->rx.pool = of_gen_pool_get(bpmp->dev->of_node, "shmem", 1); + if (!priv->rx.pool) { + dev_err(bpmp->dev, "RX shmem pool not found\n"); + err = -ENOMEM; + goto free_tx; + } + + priv->rx.virt = gen_pool_dma_alloc(priv->rx.pool, 4096, &priv->rx.phys); + if (!priv->rx.virt) { + dev_err(bpmp->dev, "failed to allocate from RX pool\n"); + err = -ENOMEM; + goto free_tx; + } + + err = tegra186_bpmp_channel_init(bpmp->tx_channel, bpmp, + bpmp->soc->channels.cpu_tx.offset); + if (err < 0) + goto free_rx; + + err = tegra186_bpmp_channel_init(bpmp->rx_channel, bpmp, + bpmp->soc->channels.cpu_rx.offset); + if (err < 0) + goto cleanup_tx_channel; + + for (i = 0; i < bpmp->threaded.count; i++) { + unsigned int index = bpmp->soc->channels.thread.offset + i; + + err = tegra186_bpmp_channel_init(&bpmp->threaded_channels[i], + bpmp, index); + if (err < 0) + goto cleanup_channels; + } + + /* mbox registration */ + priv->mbox.client.dev = bpmp->dev; + priv->mbox.client.rx_callback = mbox_handle_rx; + priv->mbox.client.tx_block = false; + priv->mbox.client.knows_txdone = false; + + priv->mbox.channel = mbox_request_channel(&priv->mbox.client, 0); + if (IS_ERR(priv->mbox.channel)) { + err = PTR_ERR(priv->mbox.channel); + dev_err(bpmp->dev, "failed to get HSP mailbox: %d\n", err); + goto cleanup_channels; + } + + tegra186_bpmp_channel_reset(bpmp->tx_channel); + tegra186_bpmp_channel_reset(bpmp->rx_channel); + + for (i = 0; i < bpmp->threaded.count; i++) + tegra186_bpmp_channel_reset(&bpmp->threaded_channels[i]); + + return 0; + +cleanup_channels: + for (i = 0; i < bpmp->threaded.count; i++) { + if (!bpmp->threaded_channels[i].bpmp) + continue; + + tegra186_bpmp_channel_cleanup(&bpmp->threaded_channels[i]); + } + + tegra186_bpmp_channel_cleanup(bpmp->rx_channel); +cleanup_tx_channel: + tegra186_bpmp_channel_cleanup(bpmp->tx_channel); +free_rx: + gen_pool_free(priv->rx.pool, (unsigned long)priv->rx.virt, 4096); +free_tx: + gen_pool_free(priv->tx.pool, (unsigned long)priv->tx.virt, 4096); + + return err; +} + +static void tegra186_bpmp_deinit(struct tegra_bpmp *bpmp) +{ + struct tegra186_bpmp *priv = bpmp->priv; + unsigned int i; + + mbox_free_channel(priv->mbox.channel); + + for (i = 0; i < bpmp->threaded.count; i++) + tegra186_bpmp_channel_cleanup(&bpmp->threaded_channels[i]); + + tegra186_bpmp_channel_cleanup(bpmp->rx_channel); + tegra186_bpmp_channel_cleanup(bpmp->tx_channel); + + gen_pool_free(priv->rx.pool, (unsigned long)priv->rx.virt, 4096); + gen_pool_free(priv->tx.pool, (unsigned long)priv->tx.virt, 4096); +} + +static int tegra186_bpmp_resume(struct tegra_bpmp *bpmp) +{ + unsigned int i; + + /* reset message channels */ + tegra186_bpmp_channel_reset(bpmp->tx_channel); + tegra186_bpmp_channel_reset(bpmp->rx_channel); + + for (i = 0; i < bpmp->threaded.count; i++) + tegra186_bpmp_channel_reset(&bpmp->threaded_channels[i]); + + return 0; +} + +const struct tegra_bpmp_ops tegra186_bpmp_ops = { + .init = tegra186_bpmp_init, + .deinit = tegra186_bpmp_deinit, + .is_response_ready = tegra186_bpmp_is_message_ready, + .is_request_ready = tegra186_bpmp_is_message_ready, + .ack_response = tegra186_bpmp_ack_message, + .ack_request = tegra186_bpmp_ack_message, + .is_response_channel_free = tegra186_bpmp_is_channel_free, + .is_request_channel_free = tegra186_bpmp_is_channel_free, + .post_response = tegra186_bpmp_post_message, + .post_request = tegra186_bpmp_post_message, + .ring_doorbell = tegra186_bpmp_ring_doorbell, + .resume = tegra186_bpmp_resume, +}; diff --git a/drivers/firmware/tegra/bpmp-tegra210.c b/drivers/firmware/tegra/bpmp-tegra210.c new file mode 100644 index 000000000000..ae15940a078e --- /dev/null +++ b/drivers/firmware/tegra/bpmp-tegra210.c @@ -0,0 +1,243 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018, NVIDIA CORPORATION. + */ + +#include +#include +#include +#include +#include + +#include + +#include "bpmp-private.h" + +#define TRIGGER_OFFSET 0x000 +#define RESULT_OFFSET(id) (0xc00 + id * 4) +#define TRIGGER_ID_SHIFT 16 +#define TRIGGER_CMD_GET 4 + +#define STA_OFFSET 0 +#define SET_OFFSET 4 +#define CLR_OFFSET 8 + +#define CH_MASK(ch) (0x3 << ((ch) * 2)) +#define SL_SIGL(ch) (0x0 << ((ch) * 2)) +#define SL_QUED(ch) (0x1 << ((ch) * 2)) +#define MA_FREE(ch) (0x2 << ((ch) * 2)) +#define MA_ACKD(ch) (0x3 << ((ch) * 2)) + +struct tegra210_bpmp { + void __iomem *atomics; + void __iomem *arb_sema; + struct irq_data *tx_irq_data; +}; + +static u32 bpmp_channel_status(struct tegra_bpmp *bpmp, unsigned int index) +{ + struct tegra210_bpmp *priv = bpmp->priv; + + return __raw_readl(priv->arb_sema + STA_OFFSET) & CH_MASK(index); +} + +static bool tegra210_bpmp_is_response_ready(struct tegra_bpmp_channel *channel) +{ + unsigned int index = channel->index; + + return bpmp_channel_status(channel->bpmp, index) == MA_ACKD(index); +} + +static bool tegra210_bpmp_is_request_ready(struct tegra_bpmp_channel *channel) +{ + unsigned int index = channel->index; + + return bpmp_channel_status(channel->bpmp, index) == SL_SIGL(index); +} + +static bool +tegra210_bpmp_is_request_channel_free(struct tegra_bpmp_channel *channel) +{ + unsigned int index = channel->index; + + return bpmp_channel_status(channel->bpmp, index) == MA_FREE(index); +} + +static bool +tegra210_bpmp_is_response_channel_free(struct tegra_bpmp_channel *channel) +{ + unsigned int index = channel->index; + + return bpmp_channel_status(channel->bpmp, index) == SL_QUED(index); +} + +static int tegra210_bpmp_post_request(struct tegra_bpmp_channel *channel) +{ + struct tegra210_bpmp *priv = channel->bpmp->priv; + + __raw_writel(CH_MASK(channel->index), priv->arb_sema + CLR_OFFSET); + + return 0; +} + +static int tegra210_bpmp_post_response(struct tegra_bpmp_channel *channel) +{ + struct tegra210_bpmp *priv = channel->bpmp->priv; + + __raw_writel(MA_ACKD(channel->index), priv->arb_sema + SET_OFFSET); + + return 0; +} + +static int tegra210_bpmp_ack_response(struct tegra_bpmp_channel *channel) +{ + struct tegra210_bpmp *priv = channel->bpmp->priv; + + __raw_writel(MA_ACKD(channel->index) ^ MA_FREE(channel->index), + priv->arb_sema + CLR_OFFSET); + + return 0; +} + +static int tegra210_bpmp_ack_request(struct tegra_bpmp_channel *channel) +{ + struct tegra210_bpmp *priv = channel->bpmp->priv; + + __raw_writel(SL_QUED(channel->index), priv->arb_sema + SET_OFFSET); + + return 0; +} + +static int tegra210_bpmp_ring_doorbell(struct tegra_bpmp *bpmp) +{ + struct tegra210_bpmp *priv = bpmp->priv; + struct irq_data *irq_data = priv->tx_irq_data; + + /* + * Tegra Legacy Interrupt Controller (LIC) is used to notify BPMP of + * available messages + */ + if (irq_data->chip->irq_retrigger) + return irq_data->chip->irq_retrigger(irq_data); + + return -EINVAL; +} + +static irqreturn_t rx_irq(int irq, void *data) +{ + struct tegra_bpmp *bpmp = data; + + tegra_bpmp_handle_rx(bpmp); + + return IRQ_HANDLED; +} + +static int tegra210_bpmp_channel_init(struct tegra_bpmp_channel *channel, + struct tegra_bpmp *bpmp, + unsigned int index) +{ + struct tegra210_bpmp *priv = bpmp->priv; + u32 address; + void *p; + + /* Retrieve channel base address from BPMP */ + writel(index << TRIGGER_ID_SHIFT | TRIGGER_CMD_GET, + priv->atomics + TRIGGER_OFFSET); + address = readl(priv->atomics + RESULT_OFFSET(index)); + + p = devm_ioremap(bpmp->dev, address, 0x80); + if (!p) + return -ENOMEM; + + channel->ib = p; + channel->ob = p; + channel->index = index; + init_completion(&channel->completion); + channel->bpmp = bpmp; + + return 0; +} + +static int tegra210_bpmp_init(struct tegra_bpmp *bpmp) +{ + struct platform_device *pdev = to_platform_device(bpmp->dev); + struct tegra210_bpmp *priv; + struct resource *res; + unsigned int i; + int err; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + bpmp->priv = priv; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->atomics = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->atomics)) + return PTR_ERR(priv->atomics); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + priv->arb_sema = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->arb_sema)) + return PTR_ERR(priv->arb_sema); + + err = tegra210_bpmp_channel_init(bpmp->tx_channel, bpmp, + bpmp->soc->channels.cpu_tx.offset); + if (err < 0) + return err; + + err = tegra210_bpmp_channel_init(bpmp->rx_channel, bpmp, + bpmp->soc->channels.cpu_rx.offset); + if (err < 0) + return err; + + for (i = 0; i < bpmp->threaded.count; i++) { + unsigned int index = bpmp->soc->channels.thread.offset + i; + + err = tegra210_bpmp_channel_init(&bpmp->threaded_channels[i], + bpmp, index); + if (err < 0) + return err; + } + + err = platform_get_irq_byname(pdev, "tx"); + if (err < 0) { + dev_err(&pdev->dev, "failed to get TX IRQ: %d\n", err); + return err; + } + + priv->tx_irq_data = irq_get_irq_data(err); + if (!priv->tx_irq_data) { + dev_err(&pdev->dev, "failed to get IRQ data for TX IRQ\n"); + return err; + } + + err = platform_get_irq_byname(pdev, "rx"); + if (err < 0) { + dev_err(&pdev->dev, "failed to get rx IRQ: %d\n", err); + return err; + } + + err = devm_request_irq(&pdev->dev, err, rx_irq, + IRQF_NO_SUSPEND, dev_name(&pdev->dev), bpmp); + if (err < 0) { + dev_err(&pdev->dev, "failed to request IRQ: %d\n", err); + return err; + } + + return 0; +} + +const struct tegra_bpmp_ops tegra210_bpmp_ops = { + .init = tegra210_bpmp_init, + .is_response_ready = tegra210_bpmp_is_response_ready, + .is_request_ready = tegra210_bpmp_is_request_ready, + .ack_response = tegra210_bpmp_ack_response, + .ack_request = tegra210_bpmp_ack_request, + .is_response_channel_free = tegra210_bpmp_is_response_channel_free, + .is_request_channel_free = tegra210_bpmp_is_request_channel_free, + .post_response = tegra210_bpmp_post_response, + .post_request = tegra210_bpmp_post_request, + .ring_doorbell = tegra210_bpmp_ring_doorbell, +}; diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c index 689478b92bce..dd775e8ba5a0 100644 --- a/drivers/firmware/tegra/bpmp.c +++ b/drivers/firmware/tegra/bpmp.c @@ -26,6 +26,8 @@ #include #include +#include "bpmp-private.h" + #define MSG_ACK BIT(0) #define MSG_RING BIT(1) #define TAG_SZ 32 @@ -36,6 +38,14 @@ mbox_client_to_bpmp(struct mbox_client *client) return container_of(client, struct tegra_bpmp, mbox.client); } +static inline const struct tegra_bpmp_ops * +channel_to_ops(struct tegra_bpmp_channel *channel) +{ + struct tegra_bpmp *bpmp = channel->bpmp; + + return bpmp->soc->ops; +} + struct tegra_bpmp *tegra_bpmp_get(struct device *dev) { struct platform_device *pdev; @@ -96,22 +106,21 @@ static bool tegra_bpmp_message_valid(const struct tegra_bpmp_message *msg) (msg->rx.size == 0 || msg->rx.data); } -static bool tegra_bpmp_master_acked(struct tegra_bpmp_channel *channel) +static bool tegra_bpmp_is_response_ready(struct tegra_bpmp_channel *channel) { - void *frame; + const struct tegra_bpmp_ops *ops = channel_to_ops(channel); - frame = tegra_ivc_read_get_next_frame(channel->ivc); - if (IS_ERR(frame)) { - channel->ib = NULL; - return false; - } + return ops->is_response_ready(channel); +} - channel->ib = frame; +static bool tegra_bpmp_is_request_ready(struct tegra_bpmp_channel *channel) +{ + const struct tegra_bpmp_ops *ops = channel_to_ops(channel); - return true; + return ops->is_request_ready(channel); } -static int tegra_bpmp_wait_ack(struct tegra_bpmp_channel *channel) +static int tegra_bpmp_wait_response(struct tegra_bpmp_channel *channel) { unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout; ktime_t end; @@ -119,29 +128,45 @@ static int tegra_bpmp_wait_ack(struct tegra_bpmp_channel *channel) end = ktime_add_us(ktime_get(), timeout); do { - if (tegra_bpmp_master_acked(channel)) + if (tegra_bpmp_is_response_ready(channel)) return 0; } while (ktime_before(ktime_get(), end)); return -ETIMEDOUT; } -static bool tegra_bpmp_master_free(struct tegra_bpmp_channel *channel) +static int tegra_bpmp_ack_response(struct tegra_bpmp_channel *channel) { - void *frame; + const struct tegra_bpmp_ops *ops = channel_to_ops(channel); - frame = tegra_ivc_write_get_next_frame(channel->ivc); - if (IS_ERR(frame)) { - channel->ob = NULL; - return false; - } + return ops->ack_response(channel); +} - channel->ob = frame; +static int tegra_bpmp_ack_request(struct tegra_bpmp_channel *channel) +{ + const struct tegra_bpmp_ops *ops = channel_to_ops(channel); - return true; + return ops->ack_request(channel); } -static int tegra_bpmp_wait_master_free(struct tegra_bpmp_channel *channel) +static bool +tegra_bpmp_is_request_channel_free(struct tegra_bpmp_channel *channel) +{ + const struct tegra_bpmp_ops *ops = channel_to_ops(channel); + + return ops->is_request_channel_free(channel); +} + +static bool +tegra_bpmp_is_response_channel_free(struct tegra_bpmp_channel *channel) +{ + const struct tegra_bpmp_ops *ops = channel_to_ops(channel); + + return ops->is_response_channel_free(channel); +} + +static int +tegra_bpmp_wait_request_channel_free(struct tegra_bpmp_channel *channel) { unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout; ktime_t start, now; @@ -149,7 +174,7 @@ static int tegra_bpmp_wait_master_free(struct tegra_bpmp_channel *channel) start = ns_to_ktime(local_clock()); do { - if (tegra_bpmp_master_free(channel)) + if (tegra_bpmp_is_request_channel_free(channel)) return 0; now = ns_to_ktime(local_clock()); @@ -158,6 +183,25 @@ static int tegra_bpmp_wait_master_free(struct tegra_bpmp_channel *channel) return -ETIMEDOUT; } +static int tegra_bpmp_post_request(struct tegra_bpmp_channel *channel) +{ + const struct tegra_bpmp_ops *ops = channel_to_ops(channel); + + return ops->post_request(channel); +} + +static int tegra_bpmp_post_response(struct tegra_bpmp_channel *channel) +{ + const struct tegra_bpmp_ops *ops = channel_to_ops(channel); + + return ops->post_response(channel); +} + +static int tegra_bpmp_ring_doorbell(struct tegra_bpmp *bpmp) +{ + return bpmp->soc->ops->ring_doorbell(bpmp); +} + static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel, void *data, size_t size, int *ret) { @@ -166,7 +210,7 @@ static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel, if (data && size > 0) memcpy(data, channel->ib->data, size); - err = tegra_ivc_read_advance(channel->ivc); + err = tegra_bpmp_ack_response(channel); if (err < 0) return err; @@ -210,7 +254,7 @@ static ssize_t __tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel, if (data && size > 0) memcpy(channel->ob->data, data, size); - return tegra_ivc_write_advance(channel->ivc); + return tegra_bpmp_post_request(channel); } static struct tegra_bpmp_channel * @@ -238,7 +282,7 @@ tegra_bpmp_write_threaded(struct tegra_bpmp *bpmp, unsigned int mrq, channel = &bpmp->threaded_channels[index]; - if (!tegra_bpmp_master_free(channel)) { + if (!tegra_bpmp_is_request_channel_free(channel)) { err = -EBUSY; goto unlock; } @@ -270,7 +314,7 @@ static ssize_t tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel, { int err; - err = tegra_bpmp_wait_master_free(channel); + err = tegra_bpmp_wait_request_channel_free(channel); if (err < 0) return err; @@ -302,13 +346,11 @@ int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp, spin_unlock(&bpmp->atomic_tx_lock); - err = mbox_send_message(bpmp->mbox.channel, NULL); + err = tegra_bpmp_ring_doorbell(bpmp); if (err < 0) return err; - mbox_client_txdone(bpmp->mbox.channel, 0); - - err = tegra_bpmp_wait_ack(channel); + err = tegra_bpmp_wait_response(channel); if (err < 0) return err; @@ -335,12 +377,10 @@ int tegra_bpmp_transfer(struct tegra_bpmp *bpmp, if (IS_ERR(channel)) return PTR_ERR(channel); - err = mbox_send_message(bpmp->mbox.channel, NULL); + err = tegra_bpmp_ring_doorbell(bpmp); if (err < 0) return err; - mbox_client_txdone(bpmp->mbox.channel, 0); - timeout = usecs_to_jiffies(bpmp->soc->channels.thread.timeout); err = wait_for_completion_timeout(&channel->completion, timeout); @@ -369,38 +409,34 @@ void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel, int code, { unsigned long flags = channel->ib->flags; struct tegra_bpmp *bpmp = channel->bpmp; - struct tegra_bpmp_mb_data *frame; int err; if (WARN_ON(size > MSG_DATA_MIN_SZ)) return; - err = tegra_ivc_read_advance(channel->ivc); + err = tegra_bpmp_ack_request(channel); if (WARN_ON(err < 0)) return; if ((flags & MSG_ACK) == 0) return; - frame = tegra_ivc_write_get_next_frame(channel->ivc); - if (WARN_ON(IS_ERR(frame))) + if (WARN_ON(!tegra_bpmp_is_response_channel_free(channel))) return; - frame->code = code; + channel->ob->code = code; if (data && size > 0) - memcpy(frame->data, data, size); + memcpy(channel->ob->data, data, size); - err = tegra_ivc_write_advance(channel->ivc); + err = tegra_bpmp_post_response(channel); if (WARN_ON(err < 0)) return; if (flags & MSG_RING) { - err = mbox_send_message(bpmp->mbox.channel, NULL); + err = tegra_bpmp_ring_doorbell(bpmp); if (WARN_ON(err < 0)) return; - - mbox_client_txdone(bpmp->mbox.channel, 0); } } EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_return); @@ -627,9 +663,8 @@ static void tegra_bpmp_channel_signal(struct tegra_bpmp_channel *channel) complete(&channel->completion); } -static void tegra_bpmp_handle_rx(struct mbox_client *client, void *data) +void tegra_bpmp_handle_rx(struct tegra_bpmp *bpmp) { - struct tegra_bpmp *bpmp = mbox_client_to_bpmp(client); struct tegra_bpmp_channel *channel; unsigned int i, count; unsigned long *busy; @@ -638,7 +673,7 @@ static void tegra_bpmp_handle_rx(struct mbox_client *client, void *data) count = bpmp->soc->channels.thread.count; busy = bpmp->threaded.busy; - if (tegra_bpmp_master_acked(channel)) + if (tegra_bpmp_is_request_ready(channel)) tegra_bpmp_handle_mrq(bpmp, channel->ib->code, channel); spin_lock(&bpmp->lock); @@ -648,7 +683,7 @@ static void tegra_bpmp_handle_rx(struct mbox_client *client, void *data) channel = &bpmp->threaded_channels[i]; - if (tegra_bpmp_master_acked(channel)) { + if (tegra_bpmp_is_response_ready(channel)) { tegra_bpmp_channel_signal(channel); clear_bit(i, busy); } @@ -657,74 +692,9 @@ static void tegra_bpmp_handle_rx(struct mbox_client *client, void *data) spin_unlock(&bpmp->lock); } -static void tegra_bpmp_ivc_notify(struct tegra_ivc *ivc, void *data) -{ - struct tegra_bpmp *bpmp = data; - int err; - - if (WARN_ON(bpmp->mbox.channel == NULL)) - return; - - err = mbox_send_message(bpmp->mbox.channel, NULL); - if (err < 0) - return; - - mbox_client_txdone(bpmp->mbox.channel, 0); -} - -static int tegra_bpmp_channel_init(struct tegra_bpmp_channel *channel, - struct tegra_bpmp *bpmp, - unsigned int index) -{ - size_t message_size, queue_size; - unsigned int offset; - int err; - - channel->ivc = devm_kzalloc(bpmp->dev, sizeof(*channel->ivc), - GFP_KERNEL); - if (!channel->ivc) - return -ENOMEM; - - message_size = tegra_ivc_align(MSG_MIN_SZ); - queue_size = tegra_ivc_total_queue_size(message_size); - offset = queue_size * index; - - err = tegra_ivc_init(channel->ivc, NULL, - bpmp->rx.virt + offset, bpmp->rx.phys + offset, - bpmp->tx.virt + offset, bpmp->tx.phys + offset, - 1, message_size, tegra_bpmp_ivc_notify, - bpmp); - if (err < 0) { - dev_err(bpmp->dev, "failed to setup IVC for channel %u: %d\n", - index, err); - return err; - } - - init_completion(&channel->completion); - channel->bpmp = bpmp; - - return 0; -} - -static void tegra_bpmp_channel_reset(struct tegra_bpmp_channel *channel) -{ - /* reset the channel state */ - tegra_ivc_reset(channel->ivc); - - /* sync the channel state with BPMP */ - while (tegra_ivc_notified(channel->ivc)) - ; -} - -static void tegra_bpmp_channel_cleanup(struct tegra_bpmp_channel *channel) -{ - tegra_ivc_cleanup(channel->ivc); -} - static int tegra_bpmp_probe(struct platform_device *pdev) { struct tegra_bpmp *bpmp; - unsigned int i; char tag[TAG_SZ]; size_t size; int err; @@ -736,32 +706,6 @@ static int tegra_bpmp_probe(struct platform_device *pdev) bpmp->soc = of_device_get_match_data(&pdev->dev); bpmp->dev = &pdev->dev; - bpmp->tx.pool = of_gen_pool_get(pdev->dev.of_node, "shmem", 0); - if (!bpmp->tx.pool) { - dev_err(&pdev->dev, "TX shmem pool not found\n"); - return -ENOMEM; - } - - bpmp->tx.virt = gen_pool_dma_alloc(bpmp->tx.pool, 4096, &bpmp->tx.phys); - if (!bpmp->tx.virt) { - dev_err(&pdev->dev, "failed to allocate from TX pool\n"); - return -ENOMEM; - } - - bpmp->rx.pool = of_gen_pool_get(pdev->dev.of_node, "shmem", 1); - if (!bpmp->rx.pool) { - dev_err(&pdev->dev, "RX shmem pool not found\n"); - err = -ENOMEM; - goto free_tx; - } - - bpmp->rx.virt = gen_pool_dma_alloc(bpmp->rx.pool, 4096, &bpmp->rx.phys); - if (!bpmp->rx.virt) { - dev_err(&pdev->dev, "failed to allocate from RX pool\n"); - err = -ENOMEM; - goto free_tx; - } - INIT_LIST_HEAD(&bpmp->mrqs); spin_lock_init(&bpmp->lock); @@ -771,81 +715,38 @@ static int tegra_bpmp_probe(struct platform_device *pdev) size = BITS_TO_LONGS(bpmp->threaded.count) * sizeof(long); bpmp->threaded.allocated = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); - if (!bpmp->threaded.allocated) { - err = -ENOMEM; - goto free_rx; - } + if (!bpmp->threaded.allocated) + return -ENOMEM; bpmp->threaded.busy = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); - if (!bpmp->threaded.busy) { - err = -ENOMEM; - goto free_rx; - } + if (!bpmp->threaded.busy) + return -ENOMEM; spin_lock_init(&bpmp->atomic_tx_lock); bpmp->tx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->tx_channel), GFP_KERNEL); - if (!bpmp->tx_channel) { - err = -ENOMEM; - goto free_rx; - } + if (!bpmp->tx_channel) + return -ENOMEM; bpmp->rx_channel = devm_kzalloc(&pdev->dev, sizeof(*bpmp->rx_channel), GFP_KERNEL); - if (!bpmp->rx_channel) { - err = -ENOMEM; - goto free_rx; - } + if (!bpmp->rx_channel) + return -ENOMEM; bpmp->threaded_channels = devm_kcalloc(&pdev->dev, bpmp->threaded.count, sizeof(*bpmp->threaded_channels), GFP_KERNEL); - if (!bpmp->threaded_channels) { - err = -ENOMEM; - goto free_rx; - } - - err = tegra_bpmp_channel_init(bpmp->tx_channel, bpmp, - bpmp->soc->channels.cpu_tx.offset); - if (err < 0) - goto free_rx; + if (!bpmp->threaded_channels) + return -ENOMEM; - err = tegra_bpmp_channel_init(bpmp->rx_channel, bpmp, - bpmp->soc->channels.cpu_rx.offset); + err = bpmp->soc->ops->init(bpmp); if (err < 0) - goto cleanup_tx_channel; - - for (i = 0; i < bpmp->threaded.count; i++) { - err = tegra_bpmp_channel_init( - &bpmp->threaded_channels[i], bpmp, - bpmp->soc->channels.thread.offset + i); - if (err < 0) - goto cleanup_threaded_channels; - } - - /* mbox registration */ - bpmp->mbox.client.dev = &pdev->dev; - bpmp->mbox.client.rx_callback = tegra_bpmp_handle_rx; - bpmp->mbox.client.tx_block = false; - bpmp->mbox.client.knows_txdone = false; - - bpmp->mbox.channel = mbox_request_channel(&bpmp->mbox.client, 0); - if (IS_ERR(bpmp->mbox.channel)) { - err = PTR_ERR(bpmp->mbox.channel); - dev_err(&pdev->dev, "failed to get HSP mailbox: %d\n", err); - goto cleanup_threaded_channels; - } - - /* reset message channels */ - tegra_bpmp_channel_reset(bpmp->tx_channel); - tegra_bpmp_channel_reset(bpmp->rx_channel); - for (i = 0; i < bpmp->threaded.count; i++) - tegra_bpmp_channel_reset(&bpmp->threaded_channels[i]); + return err; err = tegra_bpmp_request_mrq(bpmp, MRQ_PING, tegra_bpmp_mrq_handle_ping, bpmp); if (err < 0) - goto free_mbox; + goto deinit; err = tegra_bpmp_ping(bpmp); if (err < 0) { @@ -867,17 +768,23 @@ static int tegra_bpmp_probe(struct platform_device *pdev) if (err < 0) goto free_mrq; - err = tegra_bpmp_init_clocks(bpmp); - if (err < 0) - goto free_mrq; + if (of_find_property(pdev->dev.of_node, "#clock-cells", NULL)) { + err = tegra_bpmp_init_clocks(bpmp); + if (err < 0) + goto free_mrq; + } - err = tegra_bpmp_init_resets(bpmp); - if (err < 0) - goto free_mrq; + if (of_find_property(pdev->dev.of_node, "#reset-cells", NULL)) { + err = tegra_bpmp_init_resets(bpmp); + if (err < 0) + goto free_mrq; + } - err = tegra_bpmp_init_powergates(bpmp); - if (err < 0) - goto free_mrq; + if (of_find_property(pdev->dev.of_node, "#power-domain-cells", NULL)) { + err = tegra_bpmp_init_powergates(bpmp); + if (err < 0) + goto free_mrq; + } err = tegra_bpmp_init_debugfs(bpmp); if (err < 0) @@ -887,41 +794,27 @@ static int tegra_bpmp_probe(struct platform_device *pdev) free_mrq: tegra_bpmp_free_mrq(bpmp, MRQ_PING, bpmp); -free_mbox: - mbox_free_channel(bpmp->mbox.channel); -cleanup_threaded_channels: - for (i = 0; i < bpmp->threaded.count; i++) { - if (bpmp->threaded_channels[i].bpmp) - tegra_bpmp_channel_cleanup(&bpmp->threaded_channels[i]); - } +deinit: + if (bpmp->soc->ops->deinit) + bpmp->soc->ops->deinit(bpmp); - tegra_bpmp_channel_cleanup(bpmp->rx_channel); -cleanup_tx_channel: - tegra_bpmp_channel_cleanup(bpmp->tx_channel); -free_rx: - gen_pool_free(bpmp->rx.pool, (unsigned long)bpmp->rx.virt, 4096); -free_tx: - gen_pool_free(bpmp->tx.pool, (unsigned long)bpmp->tx.virt, 4096); return err; } static int __maybe_unused tegra_bpmp_resume(struct device *dev) { struct tegra_bpmp *bpmp = dev_get_drvdata(dev); - unsigned int i; - - /* reset message channels */ - tegra_bpmp_channel_reset(bpmp->tx_channel); - tegra_bpmp_channel_reset(bpmp->rx_channel); - - for (i = 0; i < bpmp->threaded.count; i++) - tegra_bpmp_channel_reset(&bpmp->threaded_channels[i]); - return 0; + if (bpmp->soc->ops->resume) + return bpmp->soc->ops->resume(bpmp); + else + return 0; } static SIMPLE_DEV_PM_OPS(tegra_bpmp_pm_ops, NULL, tegra_bpmp_resume); +#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \ + IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) static const struct tegra_bpmp_soc tegra186_soc = { .channels = { .cpu_tx = { @@ -938,11 +831,42 @@ static const struct tegra_bpmp_soc tegra186_soc = { .timeout = 0, }, }, + .ops = &tegra186_bpmp_ops, .num_resets = 193, }; +#endif + +#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) +static const struct tegra_bpmp_soc tegra210_soc = { + .channels = { + .cpu_tx = { + .offset = 0, + .count = 1, + .timeout = 60 * USEC_PER_SEC, + }, + .thread = { + .offset = 4, + .count = 1, + .timeout = 600 * USEC_PER_SEC, + }, + .cpu_rx = { + .offset = 8, + .count = 1, + .timeout = 0, + }, + }, + .ops = &tegra210_bpmp_ops, +}; +#endif static const struct of_device_id tegra_bpmp_match[] = { +#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \ + IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) { .compatible = "nvidia,tegra186-bpmp", .data = &tegra186_soc }, +#endif +#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) + { .compatible = "nvidia,tegra210-bpmp", .data = &tegra210_soc }, +#endif { } }; diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c index 69ed1464175c..3fbbb61012c4 100644 --- a/drivers/firmware/ti_sci.c +++ b/drivers/firmware/ti_sci.c @@ -146,25 +146,8 @@ static int ti_sci_debug_show(struct seq_file *s, void *unused) return 0; } -/** - * ti_sci_debug_open() - debug file open - * @inode: inode pointer - * @file: file pointer - * - * Return: result of single_open - */ -static int ti_sci_debug_open(struct inode *inode, struct file *file) -{ - return single_open(file, ti_sci_debug_show, inode->i_private); -} - -/* log file operations */ -static const struct file_operations ti_sci_debug_fops = { - .open = ti_sci_debug_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +/* Provide the log file operations interface*/ +DEFINE_SHOW_ATTRIBUTE(ti_sci_debug); /** * ti_sci_debugfs_create() - Create log debug file diff --git a/drivers/firmware/xilinx/Kconfig b/drivers/firmware/xilinx/Kconfig index 8f44b9cd295a..bd33bbf70daf 100644 --- a/drivers/firmware/xilinx/Kconfig +++ b/drivers/firmware/xilinx/Kconfig @@ -6,6 +6,7 @@ menu "Zynq MPSoC Firmware Drivers" config ZYNQMP_FIRMWARE bool "Enable Xilinx Zynq MPSoC firmware interface" + select MFD_CORE help Firmware interface driver is used by different drivers to communicate with the firmware for diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c index 9a1c72a9280f..98f936125643 100644 --- a/drivers/firmware/xilinx/zynqmp.c +++ b/drivers/firmware/xilinx/zynqmp.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -23,6 +24,12 @@ #include #include "zynqmp-debug.h" +static const struct mfd_cell firmware_devs[] = { + { + .name = "zynqmp_power_controller", + }, +}; + /** * zynqmp_pm_ret_code() - Convert PMU-FW error codes to Linux error codes * @ret_status: PMUFW return code @@ -186,6 +193,29 @@ static int zynqmp_pm_get_api_version(u32 *version) return ret; } +/** + * zynqmp_pm_get_chipid - Get silicon ID registers + * @idcode: IDCODE register + * @version: version register + * + * Return: Returns the status of the operation and the idcode and version + * registers in @idcode and @version. + */ +static int zynqmp_pm_get_chipid(u32 *idcode, u32 *version) +{ + u32 ret_payload[PAYLOAD_ARG_CNT]; + int ret; + + if (!idcode || !version) + return -EINVAL; + + ret = zynqmp_pm_invoke_fn(PM_GET_CHIPID, 0, 0, 0, 0, ret_payload); + *idcode = ret_payload[1]; + *version = ret_payload[2]; + + return ret; +} + /** * zynqmp_pm_get_trustzone_version() - Get secure trustzone firmware version * @version: Returned version value @@ -469,8 +499,129 @@ static int zynqmp_pm_ioctl(u32 node_id, u32 ioctl_id, u32 arg1, u32 arg2, arg1, arg2, out); } +/** + * zynqmp_pm_reset_assert - Request setting of reset (1 - assert, 0 - release) + * @reset: Reset to be configured + * @assert_flag: Flag stating should reset be asserted (1) or + * released (0) + * + * Return: Returns status, either success or error+reason + */ +static int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset, + const enum zynqmp_pm_reset_action assert_flag) +{ + return zynqmp_pm_invoke_fn(PM_RESET_ASSERT, reset, assert_flag, + 0, 0, NULL); +} + +/** + * zynqmp_pm_reset_get_status - Get status of the reset + * @reset: Reset whose status should be returned + * @status: Returned status + * + * Return: Returns status, either success or error+reason + */ +static int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset, + u32 *status) +{ + u32 ret_payload[PAYLOAD_ARG_CNT]; + int ret; + + if (!status) + return -EINVAL; + + ret = zynqmp_pm_invoke_fn(PM_RESET_GET_STATUS, reset, 0, + 0, 0, ret_payload); + *status = ret_payload[1]; + + return ret; +} + +/** + * zynqmp_pm_init_finalize() - PM call to inform firmware that the caller + * master has initialized its own power management + * + * This API function is to be used for notify the power management controller + * about the completed power management initialization. + * + * Return: Returns status, either success or error+reason + */ +static int zynqmp_pm_init_finalize(void) +{ + return zynqmp_pm_invoke_fn(PM_PM_INIT_FINALIZE, 0, 0, 0, 0, NULL); +} + +/** + * zynqmp_pm_set_suspend_mode() - Set system suspend mode + * @mode: Mode to set for system suspend + * + * This API function is used to set mode of system suspend. + * + * Return: Returns status, either success or error+reason + */ +static int zynqmp_pm_set_suspend_mode(u32 mode) +{ + return zynqmp_pm_invoke_fn(PM_SET_SUSPEND_MODE, mode, 0, 0, 0, NULL); +} + +/** + * zynqmp_pm_request_node() - Request a node with specific capabilities + * @node: Node ID of the slave + * @capabilities: Requested capabilities of the slave + * @qos: Quality of service (not supported) + * @ack: Flag to specify whether acknowledge is requested + * + * This function is used by master to request particular node from firmware. + * Every master must request node before using it. + * + * Return: Returns status, either success or error+reason + */ +static int zynqmp_pm_request_node(const u32 node, const u32 capabilities, + const u32 qos, + const enum zynqmp_pm_request_ack ack) +{ + return zynqmp_pm_invoke_fn(PM_REQUEST_NODE, node, capabilities, + qos, ack, NULL); +} + +/** + * zynqmp_pm_release_node() - Release a node + * @node: Node ID of the slave + * + * This function is used by master to inform firmware that master + * has released node. Once released, master must not use that node + * without re-request. + * + * Return: Returns status, either success or error+reason + */ +static int zynqmp_pm_release_node(const u32 node) +{ + return zynqmp_pm_invoke_fn(PM_RELEASE_NODE, node, 0, 0, 0, NULL); +} + +/** + * zynqmp_pm_set_requirement() - PM call to set requirement for PM slaves + * @node: Node ID of the slave + * @capabilities: Requested capabilities of the slave + * @qos: Quality of service (not supported) + * @ack: Flag to specify whether acknowledge is requested + * + * This API function is to be used for slaves a PU already has requested + * to change its capabilities. + * + * Return: Returns status, either success or error+reason + */ +static int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities, + const u32 qos, + const enum zynqmp_pm_request_ack ack) +{ + return zynqmp_pm_invoke_fn(PM_SET_REQUIREMENT, node, capabilities, + qos, ack, NULL); +} + static const struct zynqmp_eemi_ops eemi_ops = { .get_api_version = zynqmp_pm_get_api_version, + .get_chipid = zynqmp_pm_get_chipid, .query_data = zynqmp_pm_query_data, .clock_enable = zynqmp_pm_clock_enable, .clock_disable = zynqmp_pm_clock_disable, @@ -482,6 +633,13 @@ static const struct zynqmp_eemi_ops eemi_ops = { .clock_setparent = zynqmp_pm_clock_setparent, .clock_getparent = zynqmp_pm_clock_getparent, .ioctl = zynqmp_pm_ioctl, + .reset_assert = zynqmp_pm_reset_assert, + .reset_get_status = zynqmp_pm_reset_get_status, + .init_finalize = zynqmp_pm_init_finalize, + .set_suspend_mode = zynqmp_pm_set_suspend_mode, + .request_node = zynqmp_pm_request_node, + .release_node = zynqmp_pm_release_node, + .set_requirement = zynqmp_pm_set_requirement, }; /** @@ -538,11 +696,19 @@ static int zynqmp_firmware_probe(struct platform_device *pdev) zynqmp_pm_api_debugfs_init(); + ret = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_NONE, firmware_devs, + ARRAY_SIZE(firmware_devs), NULL, 0, NULL); + if (ret) { + dev_err(&pdev->dev, "failed to add MFD devices %d\n", ret); + return ret; + } + return of_platform_populate(dev->of_node, NULL, NULL, dev); } static int zynqmp_firmware_remove(struct platform_device *pdev) { + mfd_remove_devices(&pdev->dev); zynqmp_pm_api_debugfs_exit(); return 0; diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig index 0bb7b5cd6cdc..c20445b867ae 100644 --- a/drivers/fpga/Kconfig +++ b/drivers/fpga/Kconfig @@ -104,7 +104,7 @@ config SOCFPGA_FPGA_BRIDGE config ALTERA_FREEZE_BRIDGE tristate "Altera FPGA Freeze Bridge" - depends on ARCH_SOCFPGA && FPGA_BRIDGE + depends on FPGA_BRIDGE && HAS_IOMEM help Say Y to enable drivers for Altera FPGA Freeze bridges. A freeze bridge is a bridge that exists in the FPGA fabric to diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c index 8c18beec6b57..678d0115f840 100644 --- a/drivers/fpga/altera-ps-spi.c +++ b/drivers/fpga/altera-ps-spi.c @@ -205,7 +205,7 @@ static int altera_ps_write_complete(struct fpga_manager *mgr, struct fpga_image_info *info) { struct altera_ps_conf *conf = mgr->priv; - const char dummy[] = {0}; + static const char dummy[] = {0}; int ret; if (gpiod_get_value_cansleep(conf->status)) { diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c index a1a09e04fab8..13851b3d1c56 100644 --- a/drivers/fpga/stratix10-soc.c +++ b/drivers/fpga/stratix10-soc.c @@ -508,14 +508,11 @@ static int __init s10_init(void) return -ENODEV; np = of_find_matching_node(fw_np, s10_of_match); - if (!np) { - of_node_put(fw_np); + if (!np) return -ENODEV; - } of_node_put(np); ret = of_platform_populate(fw_np, s10_of_match, NULL, NULL); - of_node_put(fw_np); if (ret) return ret; diff --git a/drivers/gnss/Kconfig b/drivers/gnss/Kconfig index 6abc88514512..6d8c8027e1cd 100644 --- a/drivers/gnss/Kconfig +++ b/drivers/gnss/Kconfig @@ -15,6 +15,19 @@ if GNSS config GNSS_SERIAL tristate +config GNSS_MTK_SERIAL + tristate "Mediatek GNSS receiver support" + depends on SERIAL_DEV_BUS + select GNSS_SERIAL + help + Say Y here if you have a Mediatek-based GNSS receiver which uses a + serial interface. + + To compile this driver as a module, choose M here: the module will + be called gnss-mtk. + + If unsure, say N. + config GNSS_SIRF_SERIAL tristate "SiRFstar GNSS receiver support" depends on SERIAL_DEV_BUS diff --git a/drivers/gnss/Makefile b/drivers/gnss/Makefile index 5cf0ebe0330a..451f11401ecc 100644 --- a/drivers/gnss/Makefile +++ b/drivers/gnss/Makefile @@ -9,6 +9,9 @@ gnss-y := core.o obj-$(CONFIG_GNSS_SERIAL) += gnss-serial.o gnss-serial-y := serial.o +obj-$(CONFIG_GNSS_MTK_SERIAL) += gnss-mtk.o +gnss-mtk-y := mtk.o + obj-$(CONFIG_GNSS_SIRF_SERIAL) += gnss-sirf.o gnss-sirf-y := sirf.o diff --git a/drivers/gnss/core.c b/drivers/gnss/core.c index 4291a0dd22aa..320cfca80d5f 100644 --- a/drivers/gnss/core.c +++ b/drivers/gnss/core.c @@ -334,6 +334,7 @@ static const char * const gnss_type_names[GNSS_TYPE_COUNT] = { [GNSS_TYPE_NMEA] = "NMEA", [GNSS_TYPE_SIRF] = "SiRF", [GNSS_TYPE_UBX] = "UBX", + [GNSS_TYPE_MTK] = "MTK", }; static const char *gnss_type_name(struct gnss_device *gdev) diff --git a/drivers/gnss/mtk.c b/drivers/gnss/mtk.c new file mode 100644 index 000000000000..d1fc55560daf --- /dev/null +++ b/drivers/gnss/mtk.c @@ -0,0 +1,152 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Mediatek GNSS receiver driver + * + * Copyright (C) 2018 Johan Hovold + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "serial.h" + +struct mtk_data { + struct regulator *vbackup; + struct regulator *vcc; +}; + +static int mtk_set_active(struct gnss_serial *gserial) +{ + struct mtk_data *data = gnss_serial_get_drvdata(gserial); + int ret; + + ret = regulator_enable(data->vcc); + if (ret) + return ret; + + return 0; +} + +static int mtk_set_standby(struct gnss_serial *gserial) +{ + struct mtk_data *data = gnss_serial_get_drvdata(gserial); + int ret; + + ret = regulator_disable(data->vcc); + if (ret) + return ret; + + return 0; +} + +static int mtk_set_power(struct gnss_serial *gserial, + enum gnss_serial_pm_state state) +{ + switch (state) { + case GNSS_SERIAL_ACTIVE: + return mtk_set_active(gserial); + case GNSS_SERIAL_OFF: + case GNSS_SERIAL_STANDBY: + return mtk_set_standby(gserial); + } + + return -EINVAL; +} + +static const struct gnss_serial_ops mtk_gserial_ops = { + .set_power = mtk_set_power, +}; + +static int mtk_probe(struct serdev_device *serdev) +{ + struct gnss_serial *gserial; + struct mtk_data *data; + int ret; + + gserial = gnss_serial_allocate(serdev, sizeof(*data)); + if (IS_ERR(gserial)) { + ret = PTR_ERR(gserial); + return ret; + } + + gserial->ops = &mtk_gserial_ops; + + gserial->gdev->type = GNSS_TYPE_MTK; + + data = gnss_serial_get_drvdata(gserial); + + data->vcc = devm_regulator_get(&serdev->dev, "vcc"); + if (IS_ERR(data->vcc)) { + ret = PTR_ERR(data->vcc); + goto err_free_gserial; + } + + data->vbackup = devm_regulator_get_optional(&serdev->dev, "vbackup"); + if (IS_ERR(data->vbackup)) { + ret = PTR_ERR(data->vbackup); + if (ret == -ENODEV) + data->vbackup = NULL; + else + goto err_free_gserial; + } + + if (data->vbackup) { + ret = regulator_enable(data->vbackup); + if (ret) + goto err_free_gserial; + } + + ret = gnss_serial_register(gserial); + if (ret) + goto err_disable_vbackup; + + return 0; + +err_disable_vbackup: + if (data->vbackup) + regulator_disable(data->vbackup); +err_free_gserial: + gnss_serial_free(gserial); + + return ret; +} + +static void mtk_remove(struct serdev_device *serdev) +{ + struct gnss_serial *gserial = serdev_device_get_drvdata(serdev); + struct mtk_data *data = gnss_serial_get_drvdata(gserial); + + gnss_serial_deregister(gserial); + if (data->vbackup) + regulator_disable(data->vbackup); + gnss_serial_free(gserial); +}; + +#ifdef CONFIG_OF +static const struct of_device_id mtk_of_match[] = { + { .compatible = "globaltop,pa6h" }, + {}, +}; +MODULE_DEVICE_TABLE(of, mtk_of_match); +#endif + +static struct serdev_device_driver mtk_driver = { + .driver = { + .name = "gnss-mtk", + .of_match_table = of_match_ptr(mtk_of_match), + .pm = &gnss_serial_pm_ops, + }, + .probe = mtk_probe, + .remove = mtk_remove, +}; +module_serdev_device_driver(mtk_driver); + +MODULE_AUTHOR("Loys Ollivier "); +MODULE_DESCRIPTION("Mediatek GNSS receiver driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gnss/sirf.c b/drivers/gnss/sirf.c index 226f6e6fe01b..effed3a8d398 100644 --- a/drivers/gnss/sirf.c +++ b/drivers/gnss/sirf.c @@ -25,31 +25,83 @@ #define SIRF_ON_OFF_PULSE_TIME 100 #define SIRF_ACTIVATE_TIMEOUT 200 #define SIRF_HIBERNATE_TIMEOUT 200 +/* + * If no data arrives for this time, we assume that the chip is off. + * REVISIT: The report cycle is configurable and can be several minutes long, + * so this will only work reliably if the report cycle is set to a reasonable + * low value. Also power saving settings (like send data only on movement) + * might things work even worse. + * Workaround might be to parse shutdown or bootup messages. + */ +#define SIRF_REPORT_CYCLE 2000 struct sirf_data { struct gnss_device *gdev; struct serdev_device *serdev; speed_t speed; struct regulator *vcc; + struct regulator *lna; struct gpio_desc *on_off; struct gpio_desc *wakeup; int irq; bool active; + + struct mutex gdev_mutex; + bool open; + + struct mutex serdev_mutex; + int serdev_count; + wait_queue_head_t power_wait; }; +static int sirf_serdev_open(struct sirf_data *data) +{ + int ret = 0; + + mutex_lock(&data->serdev_mutex); + if (++data->serdev_count == 1) { + ret = serdev_device_open(data->serdev); + if (ret) { + data->serdev_count--; + goto out_unlock; + } + + serdev_device_set_baudrate(data->serdev, data->speed); + serdev_device_set_flow_control(data->serdev, false); + } + +out_unlock: + mutex_unlock(&data->serdev_mutex); + + return ret; +} + +static void sirf_serdev_close(struct sirf_data *data) +{ + mutex_lock(&data->serdev_mutex); + if (--data->serdev_count == 0) + serdev_device_close(data->serdev); + mutex_unlock(&data->serdev_mutex); +} + static int sirf_open(struct gnss_device *gdev) { struct sirf_data *data = gnss_get_drvdata(gdev); struct serdev_device *serdev = data->serdev; int ret; - ret = serdev_device_open(serdev); - if (ret) - return ret; + mutex_lock(&data->gdev_mutex); + data->open = true; + mutex_unlock(&data->gdev_mutex); - serdev_device_set_baudrate(serdev, data->speed); - serdev_device_set_flow_control(serdev, false); + ret = sirf_serdev_open(data); + if (ret) { + mutex_lock(&data->gdev_mutex); + data->open = false; + mutex_unlock(&data->gdev_mutex); + return ret; + } ret = pm_runtime_get_sync(&serdev->dev); if (ret < 0) { @@ -61,7 +113,11 @@ static int sirf_open(struct gnss_device *gdev) return 0; err_close: - serdev_device_close(serdev); + sirf_serdev_close(data); + + mutex_lock(&data->gdev_mutex); + data->open = false; + mutex_unlock(&data->gdev_mutex); return ret; } @@ -71,9 +127,13 @@ static void sirf_close(struct gnss_device *gdev) struct sirf_data *data = gnss_get_drvdata(gdev); struct serdev_device *serdev = data->serdev; - serdev_device_close(serdev); + sirf_serdev_close(data); pm_runtime_put(&serdev->dev); + + mutex_lock(&data->gdev_mutex); + data->open = false; + mutex_unlock(&data->gdev_mutex); } static int sirf_write_raw(struct gnss_device *gdev, const unsigned char *buf, @@ -105,8 +165,19 @@ static int sirf_receive_buf(struct serdev_device *serdev, { struct sirf_data *data = serdev_device_get_drvdata(serdev); struct gnss_device *gdev = data->gdev; + int ret = 0; + + if (!data->wakeup && !data->active) { + data->active = true; + wake_up_interruptible(&data->power_wait); + } + + mutex_lock(&data->gdev_mutex); + if (data->open) + ret = gnss_insert_raw(gdev, buf, count); + mutex_unlock(&data->gdev_mutex); - return gnss_insert_raw(gdev, buf, count); + return ret; } static const struct serdev_device_ops sirf_serdev_ops = { @@ -125,17 +196,45 @@ static irqreturn_t sirf_wakeup_handler(int irq, void *dev_id) if (ret < 0) goto out; - data->active = !!ret; + data->active = ret; wake_up_interruptible(&data->power_wait); out: return IRQ_HANDLED; } +static int sirf_wait_for_power_state_nowakeup(struct sirf_data *data, + bool active, + unsigned long timeout) +{ + int ret; + + /* Wait for state change (including any shutdown messages). */ + msleep(timeout); + + /* Wait for data reception or timeout. */ + data->active = false; + ret = wait_event_interruptible_timeout(data->power_wait, + data->active, msecs_to_jiffies(SIRF_REPORT_CYCLE)); + if (ret < 0) + return ret; + + if (ret > 0 && !active) + return -ETIMEDOUT; + + if (ret == 0 && active) + return -ETIMEDOUT; + + return 0; +} + static int sirf_wait_for_power_state(struct sirf_data *data, bool active, unsigned long timeout) { int ret; + if (!data->wakeup) + return sirf_wait_for_power_state_nowakeup(data, active, timeout); + ret = wait_event_interruptible_timeout(data->power_wait, data->active == active, msecs_to_jiffies(timeout)); if (ret < 0) @@ -168,21 +267,22 @@ static int sirf_set_active(struct sirf_data *data, bool active) else timeout = SIRF_HIBERNATE_TIMEOUT; + if (!data->wakeup) { + ret = sirf_serdev_open(data); + if (ret) + return ret; + } + do { sirf_pulse_on_off(data); ret = sirf_wait_for_power_state(data, active, timeout); - if (ret < 0) { - if (ret == -ETIMEDOUT) - continue; + } while (ret == -ETIMEDOUT && retries--); - return ret; - } + if (!data->wakeup) + sirf_serdev_close(data); - break; - } while (retries--); - - if (retries < 0) - return -ETIMEDOUT; + if (ret) + return ret; return 0; } @@ -190,21 +290,60 @@ static int sirf_set_active(struct sirf_data *data, bool active) static int sirf_runtime_suspend(struct device *dev) { struct sirf_data *data = dev_get_drvdata(dev); + int ret2; + int ret; - if (!data->on_off) - return regulator_disable(data->vcc); + if (data->on_off) + ret = sirf_set_active(data, false); + else + ret = regulator_disable(data->vcc); + + if (ret) + return ret; + + ret = regulator_disable(data->lna); + if (ret) + goto err_reenable; - return sirf_set_active(data, false); + return 0; + +err_reenable: + if (data->on_off) + ret2 = sirf_set_active(data, true); + else + ret2 = regulator_enable(data->vcc); + + if (ret2) + dev_err(dev, + "failed to reenable power on failed suspend: %d\n", + ret2); + + return ret; } static int sirf_runtime_resume(struct device *dev) { struct sirf_data *data = dev_get_drvdata(dev); + int ret; - if (!data->on_off) - return regulator_enable(data->vcc); + ret = regulator_enable(data->lna); + if (ret) + return ret; + + if (data->on_off) + ret = sirf_set_active(data, true); + else + ret = regulator_enable(data->vcc); + + if (ret) + goto err_disable_lna; + + return 0; + +err_disable_lna: + regulator_disable(data->lna); - return sirf_set_active(data, true); + return ret; } static int __maybe_unused sirf_suspend(struct device *dev) @@ -275,6 +414,8 @@ static int sirf_probe(struct serdev_device *serdev) data->serdev = serdev; data->gdev = gdev; + mutex_init(&data->gdev_mutex); + mutex_init(&data->serdev_mutex); init_waitqueue_head(&data->power_wait); serdev_device_set_drvdata(serdev, data); @@ -290,6 +431,12 @@ static int sirf_probe(struct serdev_device *serdev) goto err_put_device; } + data->lna = devm_regulator_get(dev, "lna"); + if (IS_ERR(data->lna)) { + ret = PTR_ERR(data->lna); + goto err_put_device; + } + data->on_off = devm_gpiod_get_optional(dev, "sirf,onoff", GPIOD_OUT_LOW); if (IS_ERR(data->on_off)) @@ -301,39 +448,53 @@ static int sirf_probe(struct serdev_device *serdev) if (IS_ERR(data->wakeup)) goto err_put_device; - /* - * Configurations where WAKEUP has been left not connected, - * are currently not supported. - */ - if (!data->wakeup) { - dev_err(dev, "no wakeup gpio specified\n"); - ret = -ENODEV; + ret = regulator_enable(data->vcc); + if (ret) goto err_put_device; - } + + /* Wait for chip to boot into hibernate mode. */ + msleep(SIRF_BOOT_DELAY); } if (data->wakeup) { - ret = gpiod_to_irq(data->wakeup); + ret = gpiod_get_value_cansleep(data->wakeup); if (ret < 0) - goto err_put_device; + goto err_disable_vcc; + data->active = ret; + ret = gpiod_to_irq(data->wakeup); + if (ret < 0) + goto err_disable_vcc; data->irq = ret; - ret = devm_request_threaded_irq(dev, data->irq, NULL, - sirf_wakeup_handler, + ret = request_threaded_irq(data->irq, NULL, sirf_wakeup_handler, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "wakeup", data); if (ret) - goto err_put_device; + goto err_disable_vcc; } if (data->on_off) { - ret = regulator_enable(data->vcc); - if (ret) - goto err_put_device; + if (!data->wakeup) { + data->active = false; - /* Wait for chip to boot into hibernate mode */ - msleep(SIRF_BOOT_DELAY); + ret = sirf_serdev_open(data); + if (ret) + goto err_disable_vcc; + + msleep(SIRF_REPORT_CYCLE); + sirf_serdev_close(data); + } + + /* Force hibernate mode if already active. */ + if (data->active) { + ret = sirf_set_active(data, false); + if (ret) { + dev_err(dev, "failed to set hibernate mode: %d\n", + ret); + goto err_free_irq; + } + } } if (IS_ENABLED(CONFIG_PM)) { @@ -342,7 +503,7 @@ static int sirf_probe(struct serdev_device *serdev) } else { ret = sirf_runtime_resume(dev); if (ret < 0) - goto err_disable_vcc; + goto err_free_irq; } ret = gnss_register_device(gdev); @@ -356,6 +517,9 @@ err_disable_rpm: pm_runtime_disable(dev); else sirf_runtime_suspend(dev); +err_free_irq: + if (data->wakeup) + free_irq(data->irq, data); err_disable_vcc: if (data->on_off) regulator_disable(data->vcc); @@ -376,6 +540,9 @@ static void sirf_remove(struct serdev_device *serdev) else sirf_runtime_suspend(&serdev->dev); + if (data->wakeup) + free_irq(data->irq, data); + if (data->on_off) regulator_disable(data->vcc); @@ -386,6 +553,7 @@ static void sirf_remove(struct serdev_device *serdev) static const struct of_device_id sirf_of_match[] = { { .compatible = "fastrax,uc430" }, { .compatible = "linx,r4" }, + { .compatible = "wi2wi,w2sg0004" }, { .compatible = "wi2wi,w2sg0008i" }, { .compatible = "wi2wi,w2sg0084i" }, {}, diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index b5a2845347ec..3f50526a771f 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -258,6 +258,7 @@ config GPIO_HLWD tristate "Nintendo Wii (Hollywood) GPIO" depends on OF_GPIO select GPIO_GENERIC + select GPIOLIB_IRQCHIP help Select this to support the GPIO controller of the Nintendo Wii. @@ -654,6 +655,15 @@ config GPIO_LOONGSON1 help Say Y or M here to support GPIO on Loongson1 SoCs. +config GPIO_AMD_FCH + tristate "GPIO support for AMD Fusion Controller Hub (G-series SOCs)" + help + This option enables driver for GPIO on AMDs Fusion Controller Hub, + as found on G-series SOCs (eg. GX-412TC) + + Note: This driver doesn't registers itself automatically, as it + needs to be provided with platform specific configuration. + (See eg. CONFIG_PCENGINES_APU2.) endmenu menu "Port-mapped I/O GPIO drivers" @@ -830,6 +840,13 @@ config GPIO_ADNP enough to represent all pins, but the driver will assume a register layout for 64 pins (8 registers). +config GPIO_GW_PLD + tristate "Gateworks PLD GPIO Expander" + depends on OF_GPIO + help + Say yes here to provide access to the Gateworks I2C PLD GPIO + Expander. This is used at least on the Cambria GW2358-4. + config GPIO_MAX7300 tristate "Maxim MAX7300 GPIO expander" select GPIO_MAX730X @@ -1190,6 +1207,13 @@ config GPIO_TPS68470 of the TPS68470 must be available before dependent drivers are loaded. +config GPIO_TQMX86 + tristate "TQ-Systems QTMX86 GPIO" + depends on MFD_TQMX86 || COMPILE_TEST + select GPIOLIB_IRQCHIP + help + This driver supports GPIO on the TQMX86 IO controller. + config GPIO_TWL4030 tristate "TWL4030, TWL5030, and TPS659x0 GPIOs" depends on TWL4030_CORE diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index 37628f8dbf70..54d55274b93a 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -27,6 +27,7 @@ obj-$(CONFIG_GPIO_ADP5520) += gpio-adp5520.o obj-$(CONFIG_GPIO_ADP5588) += gpio-adp5588.o obj-$(CONFIG_GPIO_ALTERA) += gpio-altera.o obj-$(CONFIG_GPIO_ALTERA_A10SR) += gpio-altera-a10sr.o +obj-$(CONFIG_GPIO_AMD_FCH) += gpio-amd-fch.o obj-$(CONFIG_GPIO_AMD8111) += gpio-amd8111.o obj-$(CONFIG_GPIO_AMDPT) += gpio-amdpt.o obj-$(CONFIG_GPIO_ARIZONA) += gpio-arizona.o @@ -55,6 +56,7 @@ obj-$(CONFIG_GPIO_FTGPIO010) += gpio-ftgpio010.o obj-$(CONFIG_GPIO_GE_FPGA) += gpio-ge.o obj-$(CONFIG_GPIO_GPIO_MM) += gpio-gpio-mm.o obj-$(CONFIG_GPIO_GRGPIO) += gpio-grgpio.o +obj-$(CONFIG_GPIO_GW_PLD) += gpio-gw-pld.o obj-$(CONFIG_GPIO_HLWD) += gpio-hlwd.o obj-$(CONFIG_HTC_EGPIO) += gpio-htc-egpio.o obj-$(CONFIG_GPIO_ICH) += gpio-ich.o @@ -135,6 +137,7 @@ obj-$(CONFIG_GPIO_TPS6586X) += gpio-tps6586x.o obj-$(CONFIG_GPIO_TPS65910) += gpio-tps65910.o obj-$(CONFIG_GPIO_TPS65912) += gpio-tps65912.o obj-$(CONFIG_GPIO_TPS68470) += gpio-tps68470.o +obj-$(CONFIG_GPIO_TQMX86) += gpio-tqmx86.o obj-$(CONFIG_GPIO_TS4800) += gpio-ts4800.o obj-$(CONFIG_GPIO_TS4900) += gpio-ts4900.o obj-$(CONFIG_GPIO_TS5500) += gpio-ts5500.o diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c index cc33d8986ad3..c4a5b499f53e 100644 --- a/drivers/gpio/gpio-adp5588.c +++ b/drivers/gpio/gpio-adp5588.c @@ -15,6 +15,7 @@ #include #include #include +#include #include @@ -33,16 +34,13 @@ struct adp5588_gpio { struct mutex lock; /* protect cached dir, dat_out */ /* protect serialized access to the interrupt controller bus */ struct mutex irq_lock; - unsigned gpio_start; - unsigned irq_base; uint8_t dat_out[3]; uint8_t dir[3]; - uint8_t int_lvl[3]; + uint8_t int_lvl_low[3]; + uint8_t int_lvl_high[3]; uint8_t int_en[3]; uint8_t irq_mask[3]; - uint8_t irq_stat[3]; uint8_t int_input_en[3]; - uint8_t int_lvl_cached[3]; }; static int adp5588_gpio_read(struct i2c_client *client, u8 reg) @@ -148,16 +146,11 @@ static int adp5588_gpio_direction_output(struct gpio_chip *chip, } #ifdef CONFIG_GPIO_ADP5588_IRQ -static int adp5588_gpio_to_irq(struct gpio_chip *chip, unsigned off) -{ - struct adp5588_gpio *dev = gpiochip_get_data(chip); - - return dev->irq_base + off; -} static void adp5588_irq_bus_lock(struct irq_data *d) { - struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct adp5588_gpio *dev = gpiochip_get_data(gc); mutex_lock(&dev->irq_lock); } @@ -172,7 +165,8 @@ static void adp5588_irq_bus_lock(struct irq_data *d) static void adp5588_irq_bus_sync_unlock(struct irq_data *d) { - struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct adp5588_gpio *dev = gpiochip_get_data(gc); int i; for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) { @@ -185,15 +179,9 @@ static void adp5588_irq_bus_sync_unlock(struct irq_data *d) mutex_unlock(&dev->lock); } - if (dev->int_lvl_cached[i] != dev->int_lvl[i]) { - dev->int_lvl_cached[i] = dev->int_lvl[i]; - adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + i, - dev->int_lvl[i]); - } - if (dev->int_en[i] ^ dev->irq_mask[i]) { dev->int_en[i] = dev->irq_mask[i]; - adp5588_gpio_write(dev->client, GPIO_INT_EN1 + i, + adp5588_gpio_write(dev->client, GPI_EM1 + i, dev->int_en[i]); } } @@ -203,41 +191,38 @@ static void adp5588_irq_bus_sync_unlock(struct irq_data *d) static void adp5588_irq_mask(struct irq_data *d) { - struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d); - unsigned gpio = d->irq - dev->irq_base; + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct adp5588_gpio *dev = gpiochip_get_data(gc); - dev->irq_mask[ADP5588_BANK(gpio)] &= ~ADP5588_BIT(gpio); + dev->irq_mask[ADP5588_BANK(d->hwirq)] &= ~ADP5588_BIT(d->hwirq); } static void adp5588_irq_unmask(struct irq_data *d) { - struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d); - unsigned gpio = d->irq - dev->irq_base; + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct adp5588_gpio *dev = gpiochip_get_data(gc); - dev->irq_mask[ADP5588_BANK(gpio)] |= ADP5588_BIT(gpio); + dev->irq_mask[ADP5588_BANK(d->hwirq)] |= ADP5588_BIT(d->hwirq); } static int adp5588_irq_set_type(struct irq_data *d, unsigned int type) { - struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d); - uint16_t gpio = d->irq - dev->irq_base; + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct adp5588_gpio *dev = gpiochip_get_data(gc); + uint16_t gpio = d->hwirq; unsigned bank, bit; - if ((type & IRQ_TYPE_EDGE_BOTH)) { - dev_err(&dev->client->dev, "irq %d: unsupported type %d\n", - d->irq, type); - return -EINVAL; - } - bank = ADP5588_BANK(gpio); bit = ADP5588_BIT(gpio); - if (type & IRQ_TYPE_LEVEL_HIGH) - dev->int_lvl[bank] |= bit; - else if (type & IRQ_TYPE_LEVEL_LOW) - dev->int_lvl[bank] &= ~bit; - else - return -EINVAL; + dev->int_lvl_low[bank] &= ~bit; + dev->int_lvl_high[bank] &= ~bit; + + if (type & IRQ_TYPE_EDGE_BOTH || type & IRQ_TYPE_LEVEL_HIGH) + dev->int_lvl_high[bank] |= bit; + + if (type & IRQ_TYPE_EDGE_BOTH || type & IRQ_TYPE_LEVEL_LOW) + dev->int_lvl_low[bank] |= bit; dev->int_input_en[bank] |= bit; @@ -253,40 +238,32 @@ static struct irq_chip adp5588_irq_chip = { .irq_set_type = adp5588_irq_set_type, }; -static int adp5588_gpio_read_intstat(struct i2c_client *client, u8 *buf) -{ - int ret = i2c_smbus_read_i2c_block_data(client, GPIO_INT_STAT1, 3, buf); - - if (ret < 0) - dev_err(&client->dev, "Read INT_STAT Error\n"); - - return ret; -} - static irqreturn_t adp5588_irq_handler(int irq, void *devid) { struct adp5588_gpio *dev = devid; - unsigned status, bank, bit, pending; - int ret; - status = adp5588_gpio_read(dev->client, INT_STAT); - - if (status & ADP5588_GPI_INT) { - ret = adp5588_gpio_read_intstat(dev->client, dev->irq_stat); - if (ret < 0) - memset(dev->irq_stat, 0, ARRAY_SIZE(dev->irq_stat)); - - for (bank = 0, bit = 0; bank <= ADP5588_BANK(ADP5588_MAXGPIO); - bank++, bit = 0) { - pending = dev->irq_stat[bank] & dev->irq_mask[bank]; - - while (pending) { - if (pending & (1 << bit)) { - handle_nested_irq(dev->irq_base + - (bank << 3) + bit); - pending &= ~(1 << bit); - - } - bit++; + int status = adp5588_gpio_read(dev->client, INT_STAT); + + if (status & ADP5588_KE_INT) { + int ev_cnt = adp5588_gpio_read(dev->client, KEY_LCK_EC_STAT); + + if (ev_cnt > 0) { + int i; + + for (i = 0; i < (ev_cnt & ADP5588_KEC); i++) { + int key = adp5588_gpio_read(dev->client, + Key_EVENTA + i); + /* GPIN events begin at 97, + * bit 7 indicates logic level + */ + int gpio = (key & 0x7f) - 97; + int lvl = key & (1 << 7); + int bank = ADP5588_BANK(gpio); + int bit = ADP5588_BIT(gpio); + + if ((lvl && dev->int_lvl_high[bank] & bit) || + (!lvl && dev->int_lvl_low[bank] & bit)) + handle_nested_irq(irq_find_mapping( + dev->gpio_chip.irq.domain, gpio)); } } } @@ -299,53 +276,42 @@ static irqreturn_t adp5588_irq_handler(int irq, void *devid) static int adp5588_irq_setup(struct adp5588_gpio *dev) { struct i2c_client *client = dev->client; + int ret; struct adp5588_gpio_platform_data *pdata = dev_get_platdata(&client->dev); - unsigned gpio; - int ret; + int irq_base = pdata ? pdata->irq_base : 0; adp5588_gpio_write(client, CFG, ADP5588_AUTO_INC); adp5588_gpio_write(client, INT_STAT, -1); /* status is W1C */ - adp5588_gpio_read_intstat(client, dev->irq_stat); /* read to clear */ - dev->irq_base = pdata->irq_base; mutex_init(&dev->irq_lock); - for (gpio = 0; gpio < dev->gpio_chip.ngpio; gpio++) { - int irq = gpio + dev->irq_base; - irq_set_chip_data(irq, dev); - irq_set_chip_and_handler(irq, &adp5588_irq_chip, - handle_level_irq); - irq_set_nested_thread(irq, 1); - irq_modify_status(irq, IRQ_NOREQUEST, IRQ_NOPROBE); - } - - ret = request_threaded_irq(client->irq, - NULL, - adp5588_irq_handler, - IRQF_TRIGGER_FALLING | IRQF_ONESHOT, - dev_name(&client->dev), dev); + ret = devm_request_threaded_irq(&client->dev, client->irq, + NULL, adp5588_irq_handler, IRQF_ONESHOT + | IRQF_TRIGGER_FALLING | IRQF_SHARED, + dev_name(&client->dev), dev); if (ret) { dev_err(&client->dev, "failed to request irq %d\n", client->irq); - goto out; + return ret; } + ret = gpiochip_irqchip_add_nested(&dev->gpio_chip, + &adp5588_irq_chip, irq_base, + handle_simple_irq, + IRQ_TYPE_NONE); + if (ret) { + dev_err(&client->dev, + "could not connect irqchip to gpiochip\n"); + return ret; + } + gpiochip_set_nested_irqchip(&dev->gpio_chip, + &adp5588_irq_chip, + client->irq); - dev->gpio_chip.to_irq = adp5588_gpio_to_irq; adp5588_gpio_write(client, CFG, - ADP5588_AUTO_INC | ADP5588_INT_CFG | ADP5588_GPI_INT); + ADP5588_AUTO_INC | ADP5588_INT_CFG | ADP5588_KE_IEN); return 0; - -out: - dev->irq_base = 0; - return ret; -} - -static void adp5588_irq_teardown(struct adp5588_gpio *dev) -{ - if (dev->irq_base) - free_irq(dev->client->irq, dev); } #else @@ -357,24 +323,16 @@ static int adp5588_irq_setup(struct adp5588_gpio *dev) return 0; } -static void adp5588_irq_teardown(struct adp5588_gpio *dev) -{ -} #endif /* CONFIG_GPIO_ADP5588_IRQ */ -static int adp5588_gpio_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int adp5588_gpio_probe(struct i2c_client *client) { struct adp5588_gpio_platform_data *pdata = dev_get_platdata(&client->dev); struct adp5588_gpio *dev; struct gpio_chip *gc; int ret, i, revid; - - if (!pdata) { - dev_err(&client->dev, "missing platform data\n"); - return -ENODEV; - } + unsigned int pullup_dis_mask = 0; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { @@ -394,18 +352,24 @@ static int adp5588_gpio_probe(struct i2c_client *client, gc->get = adp5588_gpio_get_value; gc->set = adp5588_gpio_set_value; gc->can_sleep = true; + gc->base = -1; + gc->parent = &client->dev; + + if (pdata) { + gc->base = pdata->gpio_start; + gc->names = pdata->names; + pullup_dis_mask = pdata->pullup_dis_mask; + } - gc->base = pdata->gpio_start; gc->ngpio = ADP5588_MAXGPIO; gc->label = client->name; gc->owner = THIS_MODULE; - gc->names = pdata->names; mutex_init(&dev->lock); ret = adp5588_gpio_read(dev->client, DEV_ID); if (ret < 0) - goto err; + return ret; revid = ret & ADP5588_DEVICE_ID_MASK; @@ -414,30 +378,27 @@ static int adp5588_gpio_probe(struct i2c_client *client, dev->dir[i] = adp5588_gpio_read(client, GPIO_DIR1 + i); ret |= adp5588_gpio_write(client, KP_GPIO1 + i, 0); ret |= adp5588_gpio_write(client, GPIO_PULL1 + i, - (pdata->pullup_dis_mask >> (8 * i)) & 0xFF); + (pullup_dis_mask >> (8 * i)) & 0xFF); ret |= adp5588_gpio_write(client, GPIO_INT_EN1 + i, 0); if (ret) - goto err; + return ret; } - if (pdata->irq_base) { + if (client->irq) { if (WA_DELAYED_READOUT_REVID(revid)) { dev_warn(&client->dev, "GPIO int not supported\n"); } else { ret = adp5588_irq_setup(dev); if (ret) - goto err; + return ret; } } ret = devm_gpiochip_add_data(&client->dev, &dev->gpio_chip, dev); if (ret) - goto err_irq; + return ret; - dev_info(&client->dev, "IRQ Base: %d Rev.: %d\n", - pdata->irq_base, revid); - - if (pdata->setup) { + if (pdata && pdata->setup) { ret = pdata->setup(client, gc->base, gc->ngpio, pdata->context); if (ret < 0) dev_warn(&client->dev, "setup failed, %d\n", ret); @@ -446,11 +407,6 @@ static int adp5588_gpio_probe(struct i2c_client *client, i2c_set_clientdata(client, dev); return 0; - -err_irq: - adp5588_irq_teardown(dev); -err: - return ret; } static int adp5588_gpio_remove(struct i2c_client *client) @@ -460,7 +416,7 @@ static int adp5588_gpio_remove(struct i2c_client *client) struct adp5588_gpio *dev = i2c_get_clientdata(client); int ret; - if (pdata->teardown) { + if (pdata && pdata->teardown) { ret = pdata->teardown(client, dev->gpio_chip.base, dev->gpio_chip.ngpio, pdata->context); @@ -470,7 +426,7 @@ static int adp5588_gpio_remove(struct i2c_client *client) } } - if (dev->irq_base) + if (dev->client->irq) free_irq(dev->client->irq, dev); return 0; @@ -480,14 +436,22 @@ static const struct i2c_device_id adp5588_gpio_id[] = { {DRV_NAME, 0}, {} }; - MODULE_DEVICE_TABLE(i2c, adp5588_gpio_id); +#ifdef CONFIG_OF +static const struct of_device_id adp5588_gpio_of_id[] = { + { .compatible = "adi," DRV_NAME, }, + {}, +}; +MODULE_DEVICE_TABLE(of, adp5588_gpio_of_id); +#endif + static struct i2c_driver adp5588_gpio_driver = { .driver = { - .name = DRV_NAME, - }, - .probe = adp5588_gpio_probe, + .name = DRV_NAME, + .of_match_table = of_match_ptr(adp5588_gpio_of_id), + }, + .probe_new = adp5588_gpio_probe, .remove = adp5588_gpio_remove, .id_table = adp5588_gpio_id, }; diff --git a/drivers/gpio/gpio-altera-a10sr.c b/drivers/gpio/gpio-altera-a10sr.c index 6b11f1314248..1cea4efccf7c 100644 --- a/drivers/gpio/gpio-altera-a10sr.c +++ b/drivers/gpio/gpio-altera-a10sr.c @@ -58,17 +58,20 @@ static void altr_a10sr_gpio_set(struct gpio_chip *chip, unsigned int offset, static int altr_a10sr_gpio_direction_input(struct gpio_chip *gc, unsigned int nr) { - if (nr >= (ALTR_A10SR_IN_VALID_RANGE_LO - ALTR_A10SR_LED_VALID_SHIFT)) - return 0; - return -EINVAL; + if (nr < (ALTR_A10SR_IN_VALID_RANGE_LO - ALTR_A10SR_LED_VALID_SHIFT)) + return -EINVAL; + + return 0; } static int altr_a10sr_gpio_direction_output(struct gpio_chip *gc, unsigned int nr, int value) { - if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT)) - return 0; - return -EINVAL; + if (nr > (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT)) + return -EINVAL; + + altr_a10sr_gpio_set(gc, nr, value); + return 0; } static const struct gpio_chip altr_a10sr_gc = { diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c index 8c3ff6e2366f..748fdd4e9a53 100644 --- a/drivers/gpio/gpio-altera.c +++ b/drivers/gpio/gpio-altera.c @@ -32,9 +32,9 @@ * struct altera_gpio_chip * @mmchip : memory mapped chip structure. * @gpio_lock : synchronization lock so that new irq/set/get requests - will be blocked until the current one completes. +* will be blocked until the current one completes. * @interrupt_trigger : specifies the hardware configured IRQ trigger type - (rising, falling, both, high) +* (rising, falling, both, high) * @mapped_irq : kernel mapped irq number. */ struct altera_gpio_chip { diff --git a/drivers/gpio/gpio-amd-fch.c b/drivers/gpio/gpio-amd-fch.c new file mode 100644 index 000000000000..38c3f4a3d4aa --- /dev/null +++ b/drivers/gpio/gpio-amd-fch.c @@ -0,0 +1,194 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/* + * GPIO driver for the AMD G series FCH (eg. GX-412TC) + * + * Copyright (C) 2018 metux IT consult + * Author: Enrico Weigelt, metux IT consult + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define AMD_FCH_MMIO_BASE 0xFED80000 +#define AMD_FCH_GPIO_BANK0_BASE 0x1500 +#define AMD_FCH_GPIO_SIZE 0x0300 + +#define AMD_FCH_GPIO_FLAG_DIRECTION BIT(23) +#define AMD_FCH_GPIO_FLAG_WRITE BIT(22) +#define AMD_FCH_GPIO_FLAG_READ BIT(16) + +static struct resource amd_fch_gpio_iores = + DEFINE_RES_MEM_NAMED( + AMD_FCH_MMIO_BASE + AMD_FCH_GPIO_BANK0_BASE, + AMD_FCH_GPIO_SIZE, + "amd-fch-gpio-iomem"); + +struct amd_fch_gpio_priv { + struct platform_device *pdev; + struct gpio_chip gc; + void __iomem *base; + struct amd_fch_gpio_pdata *pdata; + spinlock_t lock; +}; + +static void __iomem *amd_fch_gpio_addr(struct amd_fch_gpio_priv *priv, + unsigned int gpio) +{ + return priv->base + priv->pdata->gpio_reg[gpio]*sizeof(u32); +} + +static int amd_fch_gpio_direction_input(struct gpio_chip *gc, + unsigned int offset) +{ + unsigned long flags; + struct amd_fch_gpio_priv *priv = gpiochip_get_data(gc); + void __iomem *ptr = amd_fch_gpio_addr(priv, offset); + + spin_lock_irqsave(&priv->lock, flags); + writel_relaxed(readl_relaxed(ptr) & ~AMD_FCH_GPIO_FLAG_DIRECTION, ptr); + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +static int amd_fch_gpio_direction_output(struct gpio_chip *gc, + unsigned int gpio, int value) +{ + unsigned long flags; + struct amd_fch_gpio_priv *priv = gpiochip_get_data(gc); + void __iomem *ptr = amd_fch_gpio_addr(priv, gpio); + u32 val; + + spin_lock_irqsave(&priv->lock, flags); + + val = readl_relaxed(ptr); + if (value) + val |= AMD_FCH_GPIO_FLAG_WRITE; + else + val &= ~AMD_FCH_GPIO_FLAG_WRITE; + + writel_relaxed(val | AMD_FCH_GPIO_FLAG_DIRECTION, ptr); + + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +static int amd_fch_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio) +{ + int ret; + unsigned long flags; + struct amd_fch_gpio_priv *priv = gpiochip_get_data(gc); + void __iomem *ptr = amd_fch_gpio_addr(priv, gpio); + + spin_lock_irqsave(&priv->lock, flags); + ret = (readl_relaxed(ptr) & AMD_FCH_GPIO_FLAG_DIRECTION); + spin_unlock_irqrestore(&priv->lock, flags); + + return ret; +} + +static void amd_fch_gpio_set(struct gpio_chip *gc, + unsigned int gpio, int value) +{ + unsigned long flags; + struct amd_fch_gpio_priv *priv = gpiochip_get_data(gc); + void __iomem *ptr = amd_fch_gpio_addr(priv, gpio); + u32 mask; + + spin_lock_irqsave(&priv->lock, flags); + + mask = readl_relaxed(ptr); + if (value) + mask |= AMD_FCH_GPIO_FLAG_WRITE; + else + mask &= ~AMD_FCH_GPIO_FLAG_WRITE; + writel_relaxed(mask, ptr); + + spin_unlock_irqrestore(&priv->lock, flags); +} + +static int amd_fch_gpio_get(struct gpio_chip *gc, + unsigned int offset) +{ + unsigned long flags; + int ret; + struct amd_fch_gpio_priv *priv = gpiochip_get_data(gc); + void __iomem *ptr = amd_fch_gpio_addr(priv, offset); + + spin_lock_irqsave(&priv->lock, flags); + ret = (readl_relaxed(ptr) & AMD_FCH_GPIO_FLAG_READ); + spin_unlock_irqrestore(&priv->lock, flags); + + return ret; +} + +static int amd_fch_gpio_request(struct gpio_chip *chip, + unsigned int gpio_pin) +{ + return 0; +} + +static int amd_fch_gpio_probe(struct platform_device *pdev) +{ + struct amd_fch_gpio_priv *priv; + struct amd_fch_gpio_pdata *pdata; + + pdata = dev_get_platdata(&pdev->dev); + if (!pdata) { + dev_err(&pdev->dev, "no platform_data\n"); + return -ENOENT; + } + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->pdata = pdata; + priv->pdev = pdev; + + priv->gc.owner = THIS_MODULE; + priv->gc.parent = &pdev->dev; + priv->gc.label = dev_name(&pdev->dev); + priv->gc.ngpio = priv->pdata->gpio_num; + priv->gc.names = priv->pdata->gpio_names; + priv->gc.base = -1; + priv->gc.request = amd_fch_gpio_request; + priv->gc.direction_input = amd_fch_gpio_direction_input; + priv->gc.direction_output = amd_fch_gpio_direction_output; + priv->gc.get_direction = amd_fch_gpio_get_direction; + priv->gc.get = amd_fch_gpio_get; + priv->gc.set = amd_fch_gpio_set; + + spin_lock_init(&priv->lock); + + priv->base = devm_ioremap_resource(&pdev->dev, &amd_fch_gpio_iores); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + platform_set_drvdata(pdev, priv); + + return devm_gpiochip_add_data(&pdev->dev, &priv->gc, priv); +} + +static struct platform_driver amd_fch_gpio_driver = { + .driver = { + .name = AMD_FCH_GPIO_DRIVER_NAME, + }, + .probe = amd_fch_gpio_probe, +}; + +module_platform_driver(amd_fch_gpio_driver); + +MODULE_AUTHOR("Enrico Weigelt, metux IT consult "); +MODULE_DESCRIPTION("AMD G-series FCH GPIO driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" AMD_FCH_GPIO_DRIVER_NAME); diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c index 58531d8b8c6e..14d1f4c933b6 100644 --- a/drivers/gpio/gpio-crystalcove.c +++ b/drivers/gpio/gpio-crystalcove.c @@ -1,28 +1,20 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * gpio-crystalcove.c - Intel Crystal Cove GPIO Driver + * Intel Crystal Cove GPIO Driver * * Copyright (C) 2012, 2014 Intel Corporation. All rights reserved. * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * Author: Yang, Bin */ +#include +#include #include +#include #include #include -#include -#include -#include #include -#include +#include #define CRYSTALCOVE_GPIO_NUM 16 #define CRYSTALCOVE_VGPIO_NUM 95 @@ -279,8 +271,8 @@ static struct irq_chip crystalcove_irqchip = { static irqreturn_t crystalcove_gpio_irq_handler(int irq, void *data) { struct crystalcove_gpio *cg = data; + unsigned long pending; unsigned int p0, p1; - int pending; int gpio; unsigned int virq; @@ -293,11 +285,9 @@ static irqreturn_t crystalcove_gpio_irq_handler(int irq, void *data) pending = p0 | p1 << 8; - for (gpio = 0; gpio < CRYSTALCOVE_GPIO_NUM; gpio++) { - if (pending & BIT(gpio)) { - virq = irq_find_mapping(cg->chip.irq.domain, gpio); - handle_nested_irq(virq); - } + for_each_set_bit(gpio, &pending, CRYSTALCOVE_GPIO_NUM) { + virq = irq_find_mapping(cg->chip.irq.domain, gpio); + handle_nested_irq(virq); } return IRQ_HANDLED; diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c index bdb29e51b417..188b8e5c8e67 100644 --- a/drivers/gpio/gpio-davinci.c +++ b/drivers/gpio/gpio-davinci.c @@ -198,7 +198,6 @@ static int davinci_gpio_probe(struct platform_device *pdev) struct davinci_gpio_controller *chips; struct davinci_gpio_platform_data *pdata; struct device *dev = &pdev->dev; - struct resource *res; pdata = davinci_gpio_get_pdata(pdev); if (!pdata) { @@ -236,8 +235,7 @@ static int davinci_gpio_probe(struct platform_device *pdev) if (!chips) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - gpio_base = devm_ioremap_resource(dev, res); + gpio_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(gpio_base)) return PTR_ERR(gpio_base); diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c index e0d6a0a7bc69..f0223cee9774 100644 --- a/drivers/gpio/gpio-eic-sprd.c +++ b/drivers/gpio/gpio-eic-sprd.c @@ -180,7 +180,18 @@ static void sprd_eic_free(struct gpio_chip *chip, unsigned int offset) static int sprd_eic_get(struct gpio_chip *chip, unsigned int offset) { - return sprd_eic_read(chip, offset, SPRD_EIC_DBNC_DATA); + struct sprd_eic *sprd_eic = gpiochip_get_data(chip); + + switch (sprd_eic->type) { + case SPRD_EIC_DEBOUNCE: + return sprd_eic_read(chip, offset, SPRD_EIC_DBNC_DATA); + case SPRD_EIC_ASYNC: + return sprd_eic_read(chip, offset, SPRD_EIC_ASYNC_DATA); + case SPRD_EIC_SYNC: + return sprd_eic_read(chip, offset, SPRD_EIC_SYNC_DATA); + default: + return -ENOTSUPP; + } } static int sprd_eic_direction_input(struct gpio_chip *chip, unsigned int offset) @@ -368,6 +379,7 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type) irq_set_handler_locked(data, handle_edge_irq); break; case IRQ_TYPE_EDGE_BOTH: + sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0); sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 1); irq_set_handler_locked(data, handle_edge_irq); break; @@ -420,6 +432,7 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type) default: return -ENOTSUPP; } + break; default: dev_err(chip->parent, "Unsupported EIC type.\n"); return -ENOTSUPP; diff --git a/drivers/gpio/gpio-f7188x.c b/drivers/gpio/gpio-f7188x.c index 13350c9d7f5e..0896c825b312 100644 --- a/drivers/gpio/gpio-f7188x.c +++ b/drivers/gpio/gpio-f7188x.c @@ -39,8 +39,10 @@ #define SIO_F71889_ID 0x0909 /* F71889 chipset ID */ #define SIO_F71889A_ID 0x1005 /* F71889A chipset ID */ #define SIO_F81866_ID 0x1010 /* F81866 chipset ID */ +#define SIO_F81804_ID 0x1502 /* F81804 chipset ID, same for f81966 */ -enum chips { f71869, f71869a, f71882fg, f71889a, f71889f, f81866 }; + +enum chips { f71869, f71869a, f71882fg, f71889a, f71889f, f81866, f81804 }; static const char * const f7188x_names[] = { "f71869", @@ -49,6 +51,7 @@ static const char * const f7188x_names[] = { "f71889a", "f71889f", "f81866", + "f81804", }; struct f7188x_sio { @@ -223,6 +226,18 @@ static struct f7188x_gpio_bank f81866_gpio_bank[] = { F7188X_GPIO_BANK(80, 8, 0x88), }; + +static struct f7188x_gpio_bank f81804_gpio_bank[] = { + F7188X_GPIO_BANK(0, 8, 0xF0), + F7188X_GPIO_BANK(10, 8, 0xE0), + F7188X_GPIO_BANK(20, 8, 0xD0), + F7188X_GPIO_BANK(50, 8, 0xA0), + F7188X_GPIO_BANK(60, 8, 0x90), + F7188X_GPIO_BANK(70, 8, 0x80), + F7188X_GPIO_BANK(90, 8, 0x98), +}; + + static int f7188x_gpio_get_direction(struct gpio_chip *chip, unsigned offset) { int err; @@ -407,6 +422,10 @@ static int f7188x_gpio_probe(struct platform_device *pdev) data->nr_bank = ARRAY_SIZE(f81866_gpio_bank); data->bank = f81866_gpio_bank; break; + case f81804: + data->nr_bank = ARRAY_SIZE(f81804_gpio_bank); + data->bank = f81804_gpio_bank; + break; default: return -ENODEV; } @@ -469,6 +488,9 @@ static int __init f7188x_find(int addr, struct f7188x_sio *sio) case SIO_F81866_ID: sio->type = f81866; break; + case SIO_F81804_ID: + sio->type = f81804; + break; default: pr_info(DRVNAME ": Unsupported Fintek device 0x%04x\n", devid); goto err; diff --git a/drivers/gpio/gpio-ftgpio010.c b/drivers/gpio/gpio-ftgpio010.c index 95f578804b0e..45fe125823a8 100644 --- a/drivers/gpio/gpio-ftgpio010.c +++ b/drivers/gpio/gpio-ftgpio010.c @@ -41,12 +41,14 @@ * struct ftgpio_gpio - Gemini GPIO state container * @dev: containing device for this instance * @gc: gpiochip for this instance + * @irq: irqchip for this instance * @base: remapped I/O-memory base * @clk: silicon clock */ struct ftgpio_gpio { struct device *dev; struct gpio_chip gc; + struct irq_chip irq; void __iomem *base; struct clk *clk; }; @@ -134,14 +136,6 @@ static int ftgpio_gpio_set_irq_type(struct irq_data *d, unsigned int type) return 0; } -static struct irq_chip ftgpio_gpio_irqchip = { - .name = "FTGPIO010", - .irq_ack = ftgpio_gpio_ack_irq, - .irq_mask = ftgpio_gpio_mask_irq, - .irq_unmask = ftgpio_gpio_unmask_irq, - .irq_set_type = ftgpio_gpio_set_irq_type, -}; - static void ftgpio_gpio_irq_handler(struct irq_desc *desc) { struct gpio_chip *gc = irq_desc_get_handler_data(desc); @@ -297,14 +291,20 @@ static int ftgpio_gpio_probe(struct platform_device *pdev) /* Clear any use of debounce */ writel(0x0, g->base + GPIO_DEBOUNCE_EN); - ret = gpiochip_irqchip_add(&g->gc, &ftgpio_gpio_irqchip, + g->irq.name = "FTGPIO010"; + g->irq.irq_ack = ftgpio_gpio_ack_irq; + g->irq.irq_mask = ftgpio_gpio_mask_irq; + g->irq.irq_unmask = ftgpio_gpio_unmask_irq; + g->irq.irq_set_type = ftgpio_gpio_set_irq_type; + + ret = gpiochip_irqchip_add(&g->gc, &g->irq, 0, handle_bad_irq, IRQ_TYPE_NONE); if (ret) { dev_info(dev, "could not add irqchip\n"); goto dis_clk; } - gpiochip_set_chained_irqchip(&g->gc, &ftgpio_gpio_irqchip, + gpiochip_set_chained_irqchip(&g->gc, &g->irq, irq, ftgpio_gpio_irq_handler); platform_set_drvdata(pdev, g); diff --git a/drivers/gpio/gpio-gw-pld.c b/drivers/gpio/gpio-gw-pld.c new file mode 100644 index 000000000000..242112ff60ee --- /dev/null +++ b/drivers/gpio/gpio-gw-pld.c @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: GPL-2.0+ +// +// Gateworks I2C PLD GPIO expander +// +// Copyright (C) 2019 Linus Walleij +// +// Based on code and know-how from the OpenWrt driver: +// Copyright (C) 2009 Gateworks Corporation +// Authors: Chris Lang, Imre Kaloz + +#include +#include +#include +#include +#include +#include + +/** + * struct gw_pld - State container for Gateworks PLD + * @chip: GPIO chip instance + * @client: I2C client + * @out: shadow register for the output bute + */ +struct gw_pld { + struct gpio_chip chip; + struct i2c_client *client; + u8 out; +}; + +/* + * The Gateworks I2C PLD chip only expose one read and one write register. + * Writing a "one" bit (to match the reset state) lets that pin be used as an + * input. It is an open-drain model. + */ +static int gw_pld_input8(struct gpio_chip *gc, unsigned offset) +{ + struct gw_pld *gw = gpiochip_get_data(gc); + + gw->out |= BIT(offset); + return i2c_smbus_write_byte(gw->client, gw->out); +} + +static int gw_pld_get8(struct gpio_chip *gc, unsigned offset) +{ + struct gw_pld *gw = gpiochip_get_data(gc); + s32 val; + + val = i2c_smbus_read_byte(gw->client); + + return (val < 0) ? 0 : !!(val & BIT(offset)); +} + +static int gw_pld_output8(struct gpio_chip *gc, unsigned offset, int value) +{ + struct gw_pld *gw = gpiochip_get_data(gc); + + if (value) + gw->out |= BIT(offset); + else + gw->out &= ~BIT(offset); + + return i2c_smbus_write_byte(gw->client, gw->out); +} + +static void gw_pld_set8(struct gpio_chip *gc, unsigned offset, int value) +{ + gw_pld_output8(gc, offset, value); +} + +static int gw_pld_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct device_node *np = dev->of_node; + struct gw_pld *gw; + int ret; + + gw = devm_kzalloc(dev, sizeof(*gw), GFP_KERNEL); + if (!gw) + return -ENOMEM; + + gw->chip.base = -1; + gw->chip.can_sleep = true; + gw->chip.parent = dev; + gw->chip.of_node = np; + gw->chip.owner = THIS_MODULE; + gw->chip.label = dev_name(dev); + gw->chip.ngpio = 8; + gw->chip.direction_input = gw_pld_input8; + gw->chip.get = gw_pld_get8; + gw->chip.direction_output = gw_pld_output8; + gw->chip.set = gw_pld_set8; + gw->client = client; + + /* + * The Gateworks I2C PLD chip does not properly send the acknowledge + * bit at all times, but we can still use the standard i2c_smbus + * functions by simply ignoring this bit. + */ + client->flags |= I2C_M_IGNORE_NAK; + gw->out = 0xFF; + + i2c_set_clientdata(client, gw); + + ret = devm_gpiochip_add_data(dev, &gw->chip, gw); + if (ret) + return ret; + + dev_info(dev, "registered Gateworks PLD GPIO device\n"); + + return 0; +} + +static const struct i2c_device_id gw_pld_id[] = { + { "gw-pld", }, + { } +}; +MODULE_DEVICE_TABLE(i2c, gw_pld_id); + +static const struct of_device_id gw_pld_dt_ids[] = { + { .compatible = "gateworks,pld-gpio", }, + { }, +}; +MODULE_DEVICE_TABLE(of, gw_pld_dt_ids); + +static struct i2c_driver gw_pld_driver = { + .driver = { + .name = "gw_pld", + .of_match_table = gw_pld_dt_ids, + }, + .probe = gw_pld_probe, + .id_table = gw_pld_id, +}; +module_i2c_driver(gw_pld_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Linus Walleij "); diff --git a/drivers/gpio/gpio-hlwd.c b/drivers/gpio/gpio-hlwd.c index a63136a68ba3..a7b17897356e 100644 --- a/drivers/gpio/gpio-hlwd.c +++ b/drivers/gpio/gpio-hlwd.c @@ -48,9 +48,163 @@ struct hlwd_gpio { struct gpio_chip gpioc; + struct irq_chip irqc; void __iomem *regs; + int irq; + u32 edge_emulation; + u32 rising_edge, falling_edge; }; +static void hlwd_gpio_irqhandler(struct irq_desc *desc) +{ + struct hlwd_gpio *hlwd = + gpiochip_get_data(irq_desc_get_handler_data(desc)); + struct irq_chip *chip = irq_desc_get_chip(desc); + unsigned long flags; + unsigned long pending; + int hwirq; + u32 emulated_pending; + + spin_lock_irqsave(&hlwd->gpioc.bgpio_lock, flags); + pending = ioread32be(hlwd->regs + HW_GPIOB_INTFLAG); + pending &= ioread32be(hlwd->regs + HW_GPIOB_INTMASK); + + /* Treat interrupts due to edge trigger emulation separately */ + emulated_pending = hlwd->edge_emulation & pending; + pending &= ~emulated_pending; + if (emulated_pending) { + u32 level, rising, falling; + + level = ioread32be(hlwd->regs + HW_GPIOB_INTLVL); + rising = level & emulated_pending; + falling = ~level & emulated_pending; + + /* Invert the levels */ + iowrite32be(level ^ emulated_pending, + hlwd->regs + HW_GPIOB_INTLVL); + + /* Ack all emulated-edge interrupts */ + iowrite32be(emulated_pending, hlwd->regs + HW_GPIOB_INTFLAG); + + /* Signal interrupts only on the correct edge */ + rising &= hlwd->rising_edge; + falling &= hlwd->falling_edge; + + /* Mark emulated interrupts as pending */ + pending |= rising | falling; + } + spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags); + + chained_irq_enter(chip, desc); + + for_each_set_bit(hwirq, &pending, 32) { + int irq = irq_find_mapping(hlwd->gpioc.irq.domain, hwirq); + + generic_handle_irq(irq); + } + + chained_irq_exit(chip, desc); +} + +static void hlwd_gpio_irq_ack(struct irq_data *data) +{ + struct hlwd_gpio *hlwd = + gpiochip_get_data(irq_data_get_irq_chip_data(data)); + + iowrite32be(BIT(data->hwirq), hlwd->regs + HW_GPIOB_INTFLAG); +} + +static void hlwd_gpio_irq_mask(struct irq_data *data) +{ + struct hlwd_gpio *hlwd = + gpiochip_get_data(irq_data_get_irq_chip_data(data)); + unsigned long flags; + u32 mask; + + spin_lock_irqsave(&hlwd->gpioc.bgpio_lock, flags); + mask = ioread32be(hlwd->regs + HW_GPIOB_INTMASK); + mask &= ~BIT(data->hwirq); + iowrite32be(mask, hlwd->regs + HW_GPIOB_INTMASK); + spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags); +} + +static void hlwd_gpio_irq_unmask(struct irq_data *data) +{ + struct hlwd_gpio *hlwd = + gpiochip_get_data(irq_data_get_irq_chip_data(data)); + unsigned long flags; + u32 mask; + + spin_lock_irqsave(&hlwd->gpioc.bgpio_lock, flags); + mask = ioread32be(hlwd->regs + HW_GPIOB_INTMASK); + mask |= BIT(data->hwirq); + iowrite32be(mask, hlwd->regs + HW_GPIOB_INTMASK); + spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags); +} + +static void hlwd_gpio_irq_enable(struct irq_data *data) +{ + hlwd_gpio_irq_ack(data); + hlwd_gpio_irq_unmask(data); +} + +static void hlwd_gpio_irq_setup_emulation(struct hlwd_gpio *hlwd, int hwirq, + unsigned int flow_type) +{ + u32 level, state; + + /* Set the trigger level to the inactive level */ + level = ioread32be(hlwd->regs + HW_GPIOB_INTLVL); + state = ioread32be(hlwd->regs + HW_GPIOB_IN) & BIT(hwirq); + level &= ~BIT(hwirq); + level |= state ^ BIT(hwirq); + iowrite32be(level, hlwd->regs + HW_GPIOB_INTLVL); + + hlwd->edge_emulation |= BIT(hwirq); + hlwd->rising_edge &= ~BIT(hwirq); + hlwd->falling_edge &= ~BIT(hwirq); + if (flow_type & IRQ_TYPE_EDGE_RISING) + hlwd->rising_edge |= BIT(hwirq); + if (flow_type & IRQ_TYPE_EDGE_FALLING) + hlwd->falling_edge |= BIT(hwirq); +} + +static int hlwd_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type) +{ + struct hlwd_gpio *hlwd = + gpiochip_get_data(irq_data_get_irq_chip_data(data)); + unsigned long flags; + u32 level; + + spin_lock_irqsave(&hlwd->gpioc.bgpio_lock, flags); + + hlwd->edge_emulation &= ~BIT(data->hwirq); + + switch (flow_type) { + case IRQ_TYPE_LEVEL_HIGH: + level = ioread32be(hlwd->regs + HW_GPIOB_INTLVL); + level |= BIT(data->hwirq); + iowrite32be(level, hlwd->regs + HW_GPIOB_INTLVL); + break; + case IRQ_TYPE_LEVEL_LOW: + level = ioread32be(hlwd->regs + HW_GPIOB_INTLVL); + level &= ~BIT(data->hwirq); + iowrite32be(level, hlwd->regs + HW_GPIOB_INTLVL); + break; + case IRQ_TYPE_EDGE_RISING: + case IRQ_TYPE_EDGE_FALLING: + case IRQ_TYPE_EDGE_BOTH: + hlwd_gpio_irq_setup_emulation(hlwd, data->hwirq, flow_type); + break; + default: + spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags); + return -EINVAL; + } + + spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags); + return 0; +} + static int hlwd_gpio_probe(struct platform_device *pdev) { struct hlwd_gpio *hlwd; @@ -92,7 +246,43 @@ static int hlwd_gpio_probe(struct platform_device *pdev) ngpios = 32; hlwd->gpioc.ngpio = ngpios; - return devm_gpiochip_add_data(&pdev->dev, &hlwd->gpioc, hlwd); + res = devm_gpiochip_add_data(&pdev->dev, &hlwd->gpioc, hlwd); + if (res) + return res; + + /* Mask and ack all interrupts */ + iowrite32be(0, hlwd->regs + HW_GPIOB_INTMASK); + iowrite32be(0xffffffff, hlwd->regs + HW_GPIOB_INTFLAG); + + /* + * If this GPIO controller is not marked as an interrupt controller in + * the DT, return. + */ + if (!of_property_read_bool(pdev->dev.of_node, "interrupt-controller")) + return 0; + + hlwd->irq = platform_get_irq(pdev, 0); + if (hlwd->irq < 0) { + dev_info(&pdev->dev, "platform_get_irq returned %d\n", + hlwd->irq); + return hlwd->irq; + } + + hlwd->irqc.name = dev_name(&pdev->dev); + hlwd->irqc.irq_mask = hlwd_gpio_irq_mask; + hlwd->irqc.irq_unmask = hlwd_gpio_irq_unmask; + hlwd->irqc.irq_enable = hlwd_gpio_irq_enable; + hlwd->irqc.irq_set_type = hlwd_gpio_irq_set_type; + + res = gpiochip_irqchip_add(&hlwd->gpioc, &hlwd->irqc, 0, + handle_level_irq, IRQ_TYPE_NONE); + if (res) + return res; + + gpiochip_set_chained_irqchip(&hlwd->gpioc, &hlwd->irqc, + hlwd->irq, hlwd_gpio_irqhandler); + + return 0; } static const struct of_device_id hlwd_gpio_match[] = { diff --git a/drivers/gpio/gpio-madera.c b/drivers/gpio/gpio-madera.c index 7ba68d1a0932..c9dad0543672 100644 --- a/drivers/gpio/gpio-madera.c +++ b/drivers/gpio/gpio-madera.c @@ -107,7 +107,7 @@ static void madera_gpio_set(struct gpio_chip *chip, unsigned int offset, MADERA_GPIO1_CTRL_1 + reg_offset, ret); } -static struct gpio_chip madera_gpio_chip = { +static const struct gpio_chip madera_gpio_chip = { .label = "madera", .owner = THIS_MODULE, .request = gpiochip_generic_request, diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c index 51c7d1b84c2e..0c076dce9e17 100644 --- a/drivers/gpio/gpio-ml-ioh.c +++ b/drivers/gpio/gpio-ml-ioh.c @@ -31,8 +31,6 @@ #define IOH_IRQ_BASE 0 -#define PCI_VENDOR_ID_ROHM 0x10DB - struct ioh_reg_comn { u32 ien; u32 istatus; diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c index 6a50f9f59c90..154d959e8993 100644 --- a/drivers/gpio/gpio-mockup.c +++ b/drivers/gpio/gpio-mockup.c @@ -47,6 +47,7 @@ enum { struct gpio_mockup_line_status { int dir; int value; + int pull; }; struct gpio_mockup_chip { @@ -54,12 +55,13 @@ struct gpio_mockup_chip { struct gpio_mockup_line_status *lines; struct irq_sim irqsim; struct dentry *dbg_dir; + struct mutex lock; }; struct gpio_mockup_dbgfs_private { struct gpio_mockup_chip *chip; struct gpio_desc *desc; - int offset; + unsigned int offset; }; static int gpio_mockup_ranges[GPIO_MOCKUP_MAX_RANGES]; @@ -82,29 +84,66 @@ static int gpio_mockup_range_ngpio(unsigned int index) return gpio_mockup_ranges[index * 2 + 1]; } +static int __gpio_mockup_get(struct gpio_mockup_chip *chip, + unsigned int offset) +{ + return chip->lines[offset].value; +} + static int gpio_mockup_get(struct gpio_chip *gc, unsigned int offset) { struct gpio_mockup_chip *chip = gpiochip_get_data(gc); + int val; - return chip->lines[offset].value; + mutex_lock(&chip->lock); + val = __gpio_mockup_get(chip, offset); + mutex_unlock(&chip->lock); + + return val; } -static void gpio_mockup_set(struct gpio_chip *gc, - unsigned int offset, int value) +static int gpio_mockup_get_multiple(struct gpio_chip *gc, + unsigned long *mask, unsigned long *bits) { struct gpio_mockup_chip *chip = gpiochip_get_data(gc); + unsigned int bit, val; + + mutex_lock(&chip->lock); + for_each_set_bit(bit, mask, gc->ngpio) { + val = __gpio_mockup_get(chip, bit); + __assign_bit(bit, bits, val); + } + mutex_unlock(&chip->lock); + + return 0; +} +static void __gpio_mockup_set(struct gpio_mockup_chip *chip, + unsigned int offset, int value) +{ chip->lines[offset].value = !!value; } +static void gpio_mockup_set(struct gpio_chip *gc, + unsigned int offset, int value) +{ + struct gpio_mockup_chip *chip = gpiochip_get_data(gc); + + mutex_lock(&chip->lock); + __gpio_mockup_set(chip, offset, value); + mutex_unlock(&chip->lock); +} + static void gpio_mockup_set_multiple(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits) { + struct gpio_mockup_chip *chip = gpiochip_get_data(gc); unsigned int bit; + mutex_lock(&chip->lock); for_each_set_bit(bit, mask, gc->ngpio) - gpio_mockup_set(gc, bit, test_bit(bit, bits)); - + __gpio_mockup_set(chip, bit, test_bit(bit, bits)); + mutex_unlock(&chip->lock); } static int gpio_mockup_dirout(struct gpio_chip *gc, @@ -112,8 +151,10 @@ static int gpio_mockup_dirout(struct gpio_chip *gc, { struct gpio_mockup_chip *chip = gpiochip_get_data(gc); - gpio_mockup_set(gc, offset, value); + mutex_lock(&chip->lock); chip->lines[offset].dir = GPIO_MOCKUP_DIR_OUT; + __gpio_mockup_set(chip, offset, value); + mutex_unlock(&chip->lock); return 0; } @@ -122,7 +163,9 @@ static int gpio_mockup_dirin(struct gpio_chip *gc, unsigned int offset) { struct gpio_mockup_chip *chip = gpiochip_get_data(gc); + mutex_lock(&chip->lock); chip->lines[offset].dir = GPIO_MOCKUP_DIR_IN; + mutex_unlock(&chip->lock); return 0; } @@ -130,8 +173,13 @@ static int gpio_mockup_dirin(struct gpio_chip *gc, unsigned int offset) static int gpio_mockup_get_direction(struct gpio_chip *gc, unsigned int offset) { struct gpio_mockup_chip *chip = gpiochip_get_data(gc); + int direction; - return !chip->lines[offset].dir; + mutex_lock(&chip->lock); + direction = !chip->lines[offset].dir; + mutex_unlock(&chip->lock); + + return direction; } static int gpio_mockup_to_irq(struct gpio_chip *gc, unsigned int offset) @@ -141,15 +189,56 @@ static int gpio_mockup_to_irq(struct gpio_chip *gc, unsigned int offset) return irq_sim_irqnum(&chip->irqsim, offset); } -static ssize_t gpio_mockup_event_write(struct file *file, - const char __user *usr_buf, - size_t size, loff_t *ppos) +static void gpio_mockup_free(struct gpio_chip *gc, unsigned int offset) +{ + struct gpio_mockup_chip *chip = gpiochip_get_data(gc); + + __gpio_mockup_set(chip, offset, chip->lines[offset].pull); +} + +static ssize_t gpio_mockup_debugfs_read(struct file *file, + char __user *usr_buf, + size_t size, loff_t *ppos) { struct gpio_mockup_dbgfs_private *priv; struct gpio_mockup_chip *chip; struct seq_file *sfile; + struct gpio_chip *gc; + char buf[3]; + int val, rv; + + if (*ppos != 0) + return 0; + + sfile = file->private_data; + priv = sfile->private; + chip = priv->chip; + gc = &chip->gc; + + val = gpio_mockup_get(gc, priv->offset); + snprintf(buf, sizeof(buf), "%d\n", val); + + rv = copy_to_user(usr_buf, buf, sizeof(buf)); + if (rv) + return rv; + + return sizeof(buf) - 1; +} + +static ssize_t gpio_mockup_debugfs_write(struct file *file, + const char __user *usr_buf, + size_t size, loff_t *ppos) +{ + struct gpio_mockup_dbgfs_private *priv; + int rv, val, curr, irq, irq_type; + struct gpio_mockup_chip *chip; + struct seq_file *sfile; struct gpio_desc *desc; - int rv, val; + struct gpio_chip *gc; + struct irq_sim *sim; + + if (*ppos != 0) + return -EINVAL; rv = kstrtoint_from_user(usr_buf, size, 0, &val); if (rv) @@ -159,24 +248,70 @@ static ssize_t gpio_mockup_event_write(struct file *file, sfile = file->private_data; priv = sfile->private; - desc = priv->desc; chip = priv->chip; + gc = &chip->gc; + desc = &gc->gpiodev->descs[priv->offset]; + sim = &chip->irqsim; + + mutex_lock(&chip->lock); + + if (test_bit(FLAG_REQUESTED, &desc->flags) && + !test_bit(FLAG_IS_OUT, &desc->flags)) { + curr = __gpio_mockup_get(chip, priv->offset); + if (curr == val) + goto out; + + irq = irq_sim_irqnum(sim, priv->offset); + irq_type = irq_get_trigger_type(irq); + + if ((val == 1 && (irq_type & IRQ_TYPE_EDGE_RISING)) || + (val == 0 && (irq_type & IRQ_TYPE_EDGE_FALLING))) + irq_sim_fire(sim, priv->offset); + } + + /* Change the value unless we're actively driving the line. */ + if (!test_bit(FLAG_REQUESTED, &desc->flags) || + !test_bit(FLAG_IS_OUT, &desc->flags)) + __gpio_mockup_set(chip, priv->offset, val); - gpiod_set_value_cansleep(desc, val); - irq_sim_fire(&chip->irqsim, priv->offset); +out: + chip->lines[priv->offset].pull = val; + mutex_unlock(&chip->lock); return size; } -static int gpio_mockup_event_open(struct inode *inode, struct file *file) +static int gpio_mockup_debugfs_open(struct inode *inode, struct file *file) { return single_open(file, NULL, inode->i_private); } -static const struct file_operations gpio_mockup_event_ops = { +/* + * Each mockup chip is represented by a directory named after the chip's device + * name under /sys/kernel/debug/gpio-mockup/. Each line is represented by + * a file using the line's offset as the name under the chip's directory. + * + * Reading from the line's file yields the current *value*, writing to the + * line's file changes the current *pull*. Default pull for mockup lines is + * down. + * + * Examples: + * - when a line pulled down is requested in output mode and driven high, its + * value will return to 0 once it's released + * - when the line is requested in output mode and driven high, writing 0 to + * the corresponding debugfs file will change the pull to down but the + * reported value will still be 1 until the line is released + * - line requested in input mode always reports the same value as its pull + * configuration + * - when the line is requested in input mode and monitored for events, writing + * the same value to the debugfs file will be a noop, while writing the + * opposite value will generate a dummy interrupt with an appropriate edge + */ +static const struct file_operations gpio_mockup_debugfs_ops = { .owner = THIS_MODULE, - .open = gpio_mockup_event_open, - .write = gpio_mockup_event_write, + .open = gpio_mockup_debugfs_open, + .read = gpio_mockup_debugfs_read, + .write = gpio_mockup_debugfs_write, .llseek = no_llseek, }; @@ -184,7 +319,7 @@ static void gpio_mockup_debugfs_setup(struct device *dev, struct gpio_mockup_chip *chip) { struct gpio_mockup_dbgfs_private *priv; - struct dentry *evfile, *link; + struct dentry *evfile; struct gpio_chip *gc; const char *devname; char *name; @@ -197,10 +332,6 @@ static void gpio_mockup_debugfs_setup(struct device *dev, if (IS_ERR_OR_NULL(chip->dbg_dir)) goto err; - link = debugfs_create_symlink(gc->label, gpio_mockup_dbg_dir, devname); - if (IS_ERR_OR_NULL(link)) - goto err; - for (i = 0; i < gc->ngpio; i++) { name = devm_kasprintf(dev, GFP_KERNEL, "%d", i); if (!name) @@ -215,7 +346,7 @@ static void gpio_mockup_debugfs_setup(struct device *dev, priv->desc = &gc->gpiodev->descs[i]; evfile = debugfs_create_file(name, 0200, chip->dbg_dir, priv, - &gpio_mockup_event_ops); + &gpio_mockup_debugfs_ops); if (IS_ERR_OR_NULL(evfile)) goto err; } @@ -223,7 +354,7 @@ static void gpio_mockup_debugfs_setup(struct device *dev, return; err: - dev_err(dev, "error creating debugfs event files\n"); + dev_err(dev, "error creating debugfs files\n"); } static int gpio_mockup_name_lines(struct device *dev, @@ -283,6 +414,8 @@ static int gpio_mockup_probe(struct platform_device *pdev) return -ENOMEM; } + mutex_init(&chip->lock); + gc = &chip->gc; gc->base = base; gc->ngpio = ngpio; @@ -291,11 +424,13 @@ static int gpio_mockup_probe(struct platform_device *pdev) gc->parent = dev; gc->get = gpio_mockup_get; gc->set = gpio_mockup_set; + gc->get_multiple = gpio_mockup_get_multiple; gc->set_multiple = gpio_mockup_set_multiple; gc->direction_output = gpio_mockup_dirout; gc->direction_input = gpio_mockup_dirin; gc->get_direction = gpio_mockup_get_direction; gc->to_irq = gpio_mockup_to_irq; + gc->free = gpio_mockup_free; chip->lines = devm_kcalloc(dev, gc->ngpio, sizeof(*chip->lines), GFP_KERNEL); @@ -369,7 +504,7 @@ static int __init gpio_mockup_init(void) return -EINVAL; } - gpio_mockup_dbg_dir = debugfs_create_dir("gpio-mockup-event", NULL); + gpio_mockup_dbg_dir = debugfs_create_dir("gpio-mockup", NULL); if (IS_ERR_OR_NULL(gpio_mockup_dbg_dir)) gpio_mockup_err("error creating debugfs directory\n"); diff --git a/drivers/gpio/gpio-msic.c b/drivers/gpio/gpio-msic.c index 3b34dbecef99..7e3c96e4ab2c 100644 --- a/drivers/gpio/gpio-msic.c +++ b/drivers/gpio/gpio-msic.c @@ -1,32 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Intel Medfield MSIC GPIO driver> * Copyright (c) 2011, Intel Corporation. * * Author: Mathias Nyman * Based on intel_pmic_gpio.c - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * */ -#include -#include -#include -#include #include -#include +#include +#include +#include #include +#include +#include /* the offset for the mapping of global gpio pin to irq */ #define MSIC_GPIO_IRQ_OFFSET 0x100 @@ -237,20 +224,17 @@ static void msic_gpio_irq_handler(struct irq_desc *desc) struct msic_gpio *mg = irq_data_get_irq_handler_data(data); struct irq_chip *chip = irq_data_get_irq_chip(data); struct intel_msic *msic = pdev_to_intel_msic(mg->pdev); + unsigned long pending; int i; int bitnr; u8 pin; - unsigned long pending = 0; for (i = 0; i < (mg->chip.ngpio / BITS_PER_BYTE); i++) { intel_msic_irq_read(msic, INTEL_MSIC_GPIO0LVIRQ + i, &pin); pending = pin; - if (pending) { - for_each_set_bit(bitnr, &pending, BITS_PER_BYTE) - generic_handle_irq(mg->irq_base + - (i * BITS_PER_BYTE) + bitnr); - } + for_each_set_bit(bitnr, &pending, BITS_PER_BYTE) + generic_handle_irq(mg->irq_base + i * BITS_PER_BYTE + bitnr); } chip->irq_eoi(data); } diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c index 00e954f22bc9..74401e0adb29 100644 --- a/drivers/gpio/gpio-mt7621.c +++ b/drivers/gpio/gpio-mt7621.c @@ -30,6 +30,7 @@ #define GPIO_REG_EDGE 0xA0 struct mtk_gc { + struct irq_chip irq_chip; struct gpio_chip chip; spinlock_t lock; int bank; @@ -189,13 +190,6 @@ mediatek_gpio_irq_type(struct irq_data *d, unsigned int type) return 0; } -static struct irq_chip mediatek_gpio_irq_chip = { - .irq_unmask = mediatek_gpio_irq_unmask, - .irq_mask = mediatek_gpio_irq_mask, - .irq_mask_ack = mediatek_gpio_irq_mask, - .irq_set_type = mediatek_gpio_irq_type, -}; - static int mediatek_gpio_xlate(struct gpio_chip *chip, const struct of_phandle_args *spec, u32 *flags) @@ -254,6 +248,13 @@ mediatek_gpio_bank_probe(struct device *dev, return ret; } + rg->irq_chip.name = dev_name(dev); + rg->irq_chip.parent_device = dev; + rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask; + rg->irq_chip.irq_mask = mediatek_gpio_irq_mask; + rg->irq_chip.irq_mask_ack = mediatek_gpio_irq_mask; + rg->irq_chip.irq_set_type = mediatek_gpio_irq_type; + if (mtk->gpio_irq) { /* * Manually request the irq here instead of passing @@ -270,14 +271,14 @@ mediatek_gpio_bank_probe(struct device *dev, return ret; } - ret = gpiochip_irqchip_add(&rg->chip, &mediatek_gpio_irq_chip, + ret = gpiochip_irqchip_add(&rg->chip, &rg->irq_chip, 0, handle_simple_irq, IRQ_TYPE_NONE); if (ret) { dev_err(dev, "failed to add gpiochip_irqchip\n"); return ret; } - gpiochip_set_chained_irqchip(&rg->chip, &mediatek_gpio_irq_chip, + gpiochip_set_chained_irqchip(&rg->chip, &rg->irq_chip, mtk->gpio_irq, NULL); } @@ -310,7 +311,6 @@ mediatek_gpio_probe(struct platform_device *pdev) mtk->gpio_irq = irq_of_parse_and_map(np, 0); mtk->dev = dev; platform_set_drvdata(pdev, mtk); - mediatek_gpio_irq_chip.name = dev_name(dev); for (i = 0; i < MTK_BANK_CNT; i++) { ret = mediatek_gpio_bank_probe(dev, np, i); diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index 7d5c55494ccd..f97ed32b8beb 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c @@ -376,6 +376,16 @@ static int mvebu_gpio_direction_output(struct gpio_chip *chip, unsigned int pin, return 0; } +static int mvebu_gpio_get_direction(struct gpio_chip *chip, unsigned int pin) +{ + struct mvebu_gpio_chip *mvchip = gpiochip_get_data(chip); + u32 u; + + regmap_read(mvchip->regs, GPIO_IO_CONF_OFF + mvchip->offset, &u); + + return !!(u & BIT(pin)); +} + static int mvebu_gpio_to_irq(struct gpio_chip *chip, unsigned int pin) { struct mvebu_gpio_chip *mvchip = gpiochip_get_data(chip); @@ -1130,6 +1140,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev) mvchip->chip.parent = &pdev->dev; mvchip->chip.request = gpiochip_generic_request; mvchip->chip.free = gpiochip_generic_free; + mvchip->chip.get_direction = mvebu_gpio_get_direction; mvchip->chip.direction_input = mvebu_gpio_direction_input; mvchip->chip.get = mvebu_gpio_get; mvchip->chip.direction_output = mvebu_gpio_direction_output; diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c index 2d1dfa1e0745..e86e61dda4b7 100644 --- a/drivers/gpio/gpio-mxc.c +++ b/drivers/gpio/gpio-mxc.c @@ -438,8 +438,11 @@ static int mxc_gpio_probe(struct platform_device *pdev) /* the controller clock is optional */ port->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(port->clk)) + if (IS_ERR(port->clk)) { + if (PTR_ERR(port->clk) == -EPROBE_DEFER) + return -EPROBE_DEFER; port->clk = NULL; + } err = clk_prepare_enable(port->clk); if (err) { diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index f4e9921fa966..7f33024b6d83 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c @@ -883,14 +883,16 @@ static void omap_gpio_unmask_irq(struct irq_data *d) if (trigger) omap_set_gpio_triggering(bank, offset, trigger); - /* For level-triggered GPIOs, the clearing must be done after - * the HW source is cleared, thus after the handler has run */ - if (bank->level_mask & BIT(offset)) { - omap_set_gpio_irqenable(bank, offset, 0); + omap_set_gpio_irqenable(bank, offset, 1); + + /* + * For level-triggered GPIOs, clearing must be done after the source + * is cleared, thus after the handler has run. OMAP4 needs this done + * after enabing the interrupt to clear the wakeup status. + */ + if (bank->level_mask & BIT(offset)) omap_clear_gpio_irqstatus(bank, offset); - } - omap_set_gpio_irqenable(bank, offset, 1); raw_spin_unlock_irqrestore(&bank->lock, flags); } diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index 0dc96419efe3..7e76830b3368 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -65,7 +65,7 @@ #define PCA_INT 0x0100 #define PCA_PCAL 0x0200 -#define PCA_LATCH_INT (PCA_PCAL | PCA_INT) +#define PCA_LATCH_INT (PCA_PCAL | PCA_INT) #define PCA953X_TYPE 0x1000 #define PCA957X_TYPE 0x2000 #define PCA_TYPE_MASK 0xF000 @@ -88,8 +88,9 @@ static const struct i2c_device_id pca953x_id[] = { { "pca9575", 16 | PCA957X_TYPE | PCA_INT, }, { "pca9698", 40 | PCA953X_TYPE, }, - { "pcal6524", 24 | PCA953X_TYPE | PCA_INT | PCA_PCAL, }, - { "pcal9555a", 16 | PCA953X_TYPE | PCA_INT | PCA_PCAL, }, + { "pcal6416", 16 | PCA953X_TYPE | PCA_LATCH_INT, }, + { "pcal6524", 24 | PCA953X_TYPE | PCA_LATCH_INT, }, + { "pcal9555a", 16 | PCA953X_TYPE | PCA_LATCH_INT, }, { "max7310", 8 | PCA953X_TYPE, }, { "max7312", 16 | PCA953X_TYPE | PCA_INT, }, @@ -108,7 +109,7 @@ static const struct i2c_device_id pca953x_id[] = { MODULE_DEVICE_TABLE(i2c, pca953x_id); static const struct acpi_device_id pca953x_acpi_ids[] = { - { "INT3491", 16 | PCA953X_TYPE | PCA_INT | PCA_PCAL, }, + { "INT3491", 16 | PCA953X_TYPE | PCA_LATCH_INT, }, { } }; MODULE_DEVICE_TABLE(acpi, pca953x_acpi_ids); @@ -150,6 +151,7 @@ struct pca953x_chip { u8 irq_stat[MAX_BANK]; u8 irq_trig_raise[MAX_BANK]; u8 irq_trig_fall[MAX_BANK]; + struct irq_chip irq_chip; #endif struct i2c_client *client; @@ -178,6 +180,8 @@ static int pca953x_bank_shift(struct pca953x_chip *chip) #define PCA957x_BANK_OUTPUT BIT(5) #define PCAL9xxx_BANK_IN_LATCH BIT(8 + 2) +#define PCAL9xxx_BANK_PULL_EN BIT(8 + 3) +#define PCAL9xxx_BANK_PULL_SEL BIT(8 + 4) #define PCAL9xxx_BANK_IRQ_MASK BIT(8 + 5) #define PCAL9xxx_BANK_IRQ_STAT BIT(8 + 6) @@ -199,6 +203,8 @@ static int pca953x_bank_shift(struct pca953x_chip *chip) * - Extended set, above 0x40, often chip specific. * - PCAL6524/PCAL9555A with custom PCAL IRQ handling: * Input latch register 0x40 + 2 * bank_size RW + * Pull-up/pull-down enable reg 0x40 + 3 * bank_size RW + * Pull-up/pull-down select reg 0x40 + 4 * bank_size RW * Interrupt mask register 0x40 + 5 * bank_size RW * Interrupt status register 0x40 + 6 * bank_size R * @@ -247,7 +253,8 @@ static bool pca953x_readable_register(struct device *dev, unsigned int reg) } if (chip->driver_data & PCA_PCAL) { - bank |= PCAL9xxx_BANK_IN_LATCH | PCAL9xxx_BANK_IRQ_MASK | + bank |= PCAL9xxx_BANK_IN_LATCH | PCAL9xxx_BANK_PULL_EN | + PCAL9xxx_BANK_PULL_SEL | PCAL9xxx_BANK_IRQ_MASK | PCAL9xxx_BANK_IRQ_STAT; } @@ -268,7 +275,8 @@ static bool pca953x_writeable_register(struct device *dev, unsigned int reg) } if (chip->driver_data & PCA_PCAL) - bank |= PCAL9xxx_BANK_IN_LATCH | PCAL9xxx_BANK_IRQ_MASK; + bank |= PCAL9xxx_BANK_IN_LATCH | PCAL9xxx_BANK_PULL_EN | + PCAL9xxx_BANK_PULL_SEL | PCAL9xxx_BANK_IRQ_MASK; return pca953x_check_register(chip, reg, bank); } @@ -473,6 +481,61 @@ exit: mutex_unlock(&chip->i2c_lock); } +static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip, + unsigned int offset, + unsigned long config) +{ + u8 pull_en_reg = pca953x_recalc_addr(chip, PCAL953X_PULL_EN, offset, + true, false); + u8 pull_sel_reg = pca953x_recalc_addr(chip, PCAL953X_PULL_SEL, offset, + true, false); + u8 bit = BIT(offset % BANK_SZ); + int ret; + + /* + * pull-up/pull-down configuration requires PCAL extended + * registers + */ + if (!(chip->driver_data & PCA_PCAL)) + return -ENOTSUPP; + + mutex_lock(&chip->i2c_lock); + + /* Disable pull-up/pull-down */ + ret = regmap_write_bits(chip->regmap, pull_en_reg, bit, 0); + if (ret) + goto exit; + + /* Configure pull-up/pull-down */ + if (config == PIN_CONFIG_BIAS_PULL_UP) + ret = regmap_write_bits(chip->regmap, pull_sel_reg, bit, bit); + else if (config == PIN_CONFIG_BIAS_PULL_DOWN) + ret = regmap_write_bits(chip->regmap, pull_sel_reg, bit, 0); + if (ret) + goto exit; + + /* Enable pull-up/pull-down */ + ret = regmap_write_bits(chip->regmap, pull_en_reg, bit, bit); + +exit: + mutex_unlock(&chip->i2c_lock); + return ret; +} + +static int pca953x_gpio_set_config(struct gpio_chip *gc, unsigned int offset, + unsigned long config) +{ + struct pca953x_chip *chip = gpiochip_get_data(gc); + + switch (config) { + case PIN_CONFIG_BIAS_PULL_UP: + case PIN_CONFIG_BIAS_PULL_DOWN: + return pca953x_gpio_set_pull_up_down(chip, offset, config); + default: + return -ENOTSUPP; + } +} + static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios) { struct gpio_chip *gc; @@ -485,6 +548,7 @@ static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios) gc->set = pca953x_gpio_set_value; gc->get_direction = pca953x_gpio_get_direction; gc->set_multiple = pca953x_gpio_set_multiple; + gc->set_config = pca953x_gpio_set_config; gc->can_sleep = true; gc->base = chip->gpio_start; @@ -512,6 +576,14 @@ static void pca953x_irq_unmask(struct irq_data *d) chip->irq_mask[d->hwirq / BANK_SZ] |= 1 << (d->hwirq % BANK_SZ); } +static int pca953x_irq_set_wake(struct irq_data *d, unsigned int on) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct pca953x_chip *chip = gpiochip_get_data(gc); + + return irq_set_irq_wake(chip->client->irq, on); +} + static void pca953x_irq_bus_lock(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); @@ -587,23 +659,14 @@ static int pca953x_irq_set_type(struct irq_data *d, unsigned int type) static void pca953x_irq_shutdown(struct irq_data *d) { - struct pca953x_chip *chip = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct pca953x_chip *chip = gpiochip_get_data(gc); u8 mask = 1 << (d->hwirq % BANK_SZ); chip->irq_trig_raise[d->hwirq / BANK_SZ] &= ~mask; chip->irq_trig_fall[d->hwirq / BANK_SZ] &= ~mask; } -static struct irq_chip pca953x_irq_chip = { - .name = "pca953x", - .irq_mask = pca953x_irq_mask, - .irq_unmask = pca953x_irq_unmask, - .irq_bus_lock = pca953x_irq_bus_lock, - .irq_bus_sync_unlock = pca953x_irq_bus_sync_unlock, - .irq_set_type = pca953x_irq_set_type, - .irq_shutdown = pca953x_irq_shutdown, -}; - static bool pca953x_irq_pending(struct pca953x_chip *chip, u8 *pending) { u8 cur_stat[MAX_BANK]; @@ -699,56 +762,65 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, int irq_base) { struct i2c_client *client = chip->client; + struct irq_chip *irq_chip = &chip->irq_chip; int reg_direction[MAX_BANK]; int ret, i; - if (client->irq && irq_base != -1 - && (chip->driver_data & PCA_INT)) { - ret = pca953x_read_regs(chip, - chip->regs->input, chip->irq_stat); - if (ret) - return ret; + if (!client->irq) + return 0; - /* - * There is no way to know which GPIO line generated the - * interrupt. We have to rely on the previous read for - * this purpose. - */ - regmap_bulk_read(chip->regmap, chip->regs->direction, - reg_direction, NBANK(chip)); - for (i = 0; i < NBANK(chip); i++) - chip->irq_stat[i] &= reg_direction[i]; - mutex_init(&chip->irq_lock); - - ret = devm_request_threaded_irq(&client->dev, - client->irq, - NULL, - pca953x_irq_handler, - IRQF_TRIGGER_LOW | IRQF_ONESHOT | - IRQF_SHARED, - dev_name(&client->dev), chip); - if (ret) { - dev_err(&client->dev, "failed to request irq %d\n", - client->irq); - return ret; - } + if (irq_base == -1) + return 0; - ret = gpiochip_irqchip_add_nested(&chip->gpio_chip, - &pca953x_irq_chip, - irq_base, - handle_simple_irq, - IRQ_TYPE_NONE); - if (ret) { - dev_err(&client->dev, - "could not connect irqchip to gpiochip\n"); - return ret; - } + if (!(chip->driver_data & PCA_INT)) + return 0; - gpiochip_set_nested_irqchip(&chip->gpio_chip, - &pca953x_irq_chip, - client->irq); + ret = pca953x_read_regs(chip, chip->regs->input, chip->irq_stat); + if (ret) + return ret; + + /* + * There is no way to know which GPIO line generated the + * interrupt. We have to rely on the previous read for + * this purpose. + */ + regmap_bulk_read(chip->regmap, chip->regs->direction, reg_direction, + NBANK(chip)); + for (i = 0; i < NBANK(chip); i++) + chip->irq_stat[i] &= reg_direction[i]; + mutex_init(&chip->irq_lock); + + ret = devm_request_threaded_irq(&client->dev, client->irq, + NULL, pca953x_irq_handler, + IRQF_TRIGGER_LOW | IRQF_ONESHOT | + IRQF_SHARED, + dev_name(&client->dev), chip); + if (ret) { + dev_err(&client->dev, "failed to request irq %d\n", + client->irq); + return ret; } + irq_chip->name = dev_name(&chip->client->dev); + irq_chip->irq_mask = pca953x_irq_mask; + irq_chip->irq_unmask = pca953x_irq_unmask; + irq_chip->irq_set_wake = pca953x_irq_set_wake; + irq_chip->irq_bus_lock = pca953x_irq_bus_lock; + irq_chip->irq_bus_sync_unlock = pca953x_irq_bus_sync_unlock; + irq_chip->irq_set_type = pca953x_irq_set_type; + irq_chip->irq_shutdown = pca953x_irq_shutdown; + + ret = gpiochip_irqchip_add_nested(&chip->gpio_chip, irq_chip, + irq_base, handle_simple_irq, + IRQ_TYPE_NONE); + if (ret) { + dev_err(&client->dev, + "could not connect irqchip to gpiochip\n"); + return ret; + } + + gpiochip_set_nested_irqchip(&chip->gpio_chip, irq_chip, client->irq); + return 0; } diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c index adf72dda25a2..c9b650f617fa 100644 --- a/drivers/gpio/gpio-pcf857x.c +++ b/drivers/gpio/gpio-pcf857x.c @@ -84,11 +84,11 @@ MODULE_DEVICE_TABLE(of, pcf857x_of_table); */ struct pcf857x { struct gpio_chip chip; + struct irq_chip irqchip; struct i2c_client *client; struct mutex lock; /* protect 'out' */ unsigned out; /* software latch */ unsigned status; /* current status */ - unsigned int irq_parent; unsigned irq_enabled; /* enabled irqs */ int (*write)(struct i2c_client *client, unsigned data); @@ -210,18 +210,7 @@ static int pcf857x_irq_set_wake(struct irq_data *data, unsigned int on) { struct pcf857x *gpio = irq_data_get_irq_chip_data(data); - int error = 0; - - if (gpio->irq_parent) { - error = irq_set_irq_wake(gpio->irq_parent, on); - if (error) { - dev_dbg(&gpio->client->dev, - "irq %u doesn't support irq_set_wake\n", - gpio->irq_parent); - gpio->irq_parent = 0; - } - } - return error; + return irq_set_irq_wake(gpio->client->irq, on); } static void pcf857x_irq_enable(struct irq_data *data) @@ -252,18 +241,6 @@ static void pcf857x_irq_bus_sync_unlock(struct irq_data *data) mutex_unlock(&gpio->lock); } -static struct irq_chip pcf857x_irq_chip = { - .name = "pcf857x", - .irq_enable = pcf857x_irq_enable, - .irq_disable = pcf857x_irq_disable, - .irq_ack = noop, - .irq_mask = noop, - .irq_unmask = noop, - .irq_set_wake = pcf857x_irq_set_wake, - .irq_bus_lock = pcf857x_irq_bus_lock, - .irq_bus_sync_unlock = pcf857x_irq_bus_sync_unlock, -}; - /*-------------------------------------------------------------------------*/ static int pcf857x_probe(struct i2c_client *client, @@ -376,8 +353,17 @@ static int pcf857x_probe(struct i2c_client *client, /* Enable irqchip if we have an interrupt */ if (client->irq) { + gpio->irqchip.name = "pcf857x", + gpio->irqchip.irq_enable = pcf857x_irq_enable, + gpio->irqchip.irq_disable = pcf857x_irq_disable, + gpio->irqchip.irq_ack = noop, + gpio->irqchip.irq_mask = noop, + gpio->irqchip.irq_unmask = noop, + gpio->irqchip.irq_set_wake = pcf857x_irq_set_wake, + gpio->irqchip.irq_bus_lock = pcf857x_irq_bus_lock, + gpio->irqchip.irq_bus_sync_unlock = pcf857x_irq_bus_sync_unlock, status = gpiochip_irqchip_add_nested(&gpio->chip, - &pcf857x_irq_chip, + &gpio->irqchip, 0, handle_level_irq, IRQ_TYPE_NONE); if (status) { @@ -392,9 +378,8 @@ static int pcf857x_probe(struct i2c_client *client, if (status) goto fail; - gpiochip_set_nested_irqchip(&gpio->chip, &pcf857x_irq_chip, + gpiochip_set_nested_irqchip(&gpio->chip, &gpio->irqchip, client->irq); - gpio->irq_parent = client->irq; } /* Let platform code set up the GPIOs and their users. diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c index ee79e5f88b5a..1d99293096f2 100644 --- a/drivers/gpio/gpio-pch.c +++ b/drivers/gpio/gpio-pch.c @@ -437,7 +437,6 @@ static int __maybe_unused pch_gpio_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(pch_gpio_pm_ops, pch_gpio_suspend, pch_gpio_resume); -#define PCI_VENDOR_ID_ROHM 0x10DB static const struct pci_device_id pch_gpio_pcidev_id[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803) }, { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8014) }, diff --git a/drivers/gpio/gpio-pmic-eic-sprd.c b/drivers/gpio/gpio-pmic-eic-sprd.c index 29e044ff4b17..24228cf79afc 100644 --- a/drivers/gpio/gpio-pmic-eic-sprd.c +++ b/drivers/gpio/gpio-pmic-eic-sprd.c @@ -322,7 +322,6 @@ static int sprd_pmic_eic_probe(struct platform_device *pdev) ret = devm_request_threaded_irq(&pdev->dev, pmic_eic->irq, NULL, sprd_pmic_eic_irq_handler, - IRQF_TRIGGER_LOW | IRQF_ONESHOT | IRQF_NO_SUSPEND, dev_name(&pdev->dev), pmic_eic); if (ret) { @@ -365,7 +364,7 @@ static int sprd_pmic_eic_probe(struct platform_device *pdev) } static const struct of_device_id sprd_pmic_eic_of_match[] = { - { .compatible = "sprd,sc27xx-eic", }, + { .compatible = "sprd,sc2731-eic", }, { /* end of list */ } }; MODULE_DEVICE_TABLE(of, sprd_pmic_eic_of_match); diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c index e9600b556f39..bcc6be4a5cb2 100644 --- a/drivers/gpio/gpio-pxa.c +++ b/drivers/gpio/gpio-pxa.c @@ -245,6 +245,7 @@ static bool pxa_gpio_has_pinctrl(void) { switch (gpio_type) { case PXA3XX_GPIO: + case MMP2_GPIO: return false; default: diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c index 068ce25ffd28..500a3596aaf4 100644 --- a/drivers/gpio/gpio-rcar.c +++ b/drivers/gpio/gpio-rcar.c @@ -40,6 +40,7 @@ struct gpio_rcar_priv { struct irq_chip irq_chip; unsigned int irq_parent; atomic_t wakeup_path; + bool has_outdtsel; bool has_both_edge_trigger; struct gpio_rcar_bank_info bank_info; }; @@ -55,6 +56,7 @@ struct gpio_rcar_priv { #define POSNEG 0x20 /* Positive/Negative Logic Select Register */ #define EDGLEVEL 0x24 /* Edge/level Select Register */ #define FILONOFF 0x28 /* Chattering Prevention On/Off Register */ +#define OUTDTSEL 0x40 /* Output Data Select Register */ #define BOTHEDGE 0x4c /* One Edge/Both Edge Select Register */ #define RCAR_MAX_GPIO_PER_BANK 32 @@ -235,6 +237,10 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip, /* Select Input Mode or Output Mode in INOUTSEL */ gpio_rcar_modify_bit(p, INOUTSEL, gpio, output); + /* Select General Output Register to output data in OUTDTSEL */ + if (p->has_outdtsel && output) + gpio_rcar_modify_bit(p, OUTDTSEL, gpio, false); + spin_unlock_irqrestore(&p->lock, flags); } @@ -336,14 +342,17 @@ static int gpio_rcar_direction_output(struct gpio_chip *chip, unsigned offset, } struct gpio_rcar_info { + bool has_outdtsel; bool has_both_edge_trigger; }; static const struct gpio_rcar_info gpio_rcar_info_gen1 = { + .has_outdtsel = false, .has_both_edge_trigger = false, }; static const struct gpio_rcar_info gpio_rcar_info_gen2 = { + .has_outdtsel = true, .has_both_edge_trigger = true, }; @@ -403,10 +412,11 @@ static int gpio_rcar_parse_dt(struct gpio_rcar_priv *p, unsigned int *npins) int ret; info = of_device_get_match_data(p->dev); + p->has_outdtsel = info->has_outdtsel; + p->has_both_edge_trigger = info->has_both_edge_trigger; ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &args); *npins = ret == 0 ? args.args[2] : RCAR_MAX_GPIO_PER_BANK; - p->has_both_edge_trigger = info->has_both_edge_trigger; if (*npins == 0 || *npins > RCAR_MAX_GPIO_PER_BANK) { dev_warn(p->dev, "Invalid number of gpio lines %u, using %u\n", diff --git a/drivers/gpio/gpio-sama5d2-piobu.c b/drivers/gpio/gpio-sama5d2-piobu.c index 03a000659fa1..7d718557092e 100644 --- a/drivers/gpio/gpio-sama5d2-piobu.c +++ b/drivers/gpio/gpio-sama5d2-piobu.c @@ -108,16 +108,6 @@ static int sama5d2_piobu_read_value(struct gpio_chip *chip, unsigned int pin, return val & mask; } -/** - * sama5d2_piobu_set_direction() - mark pin as input or output - */ -static int sama5d2_piobu_set_direction(struct gpio_chip *chip, - unsigned int direction, - unsigned int pin) -{ - return sama5d2_piobu_write_value(chip, pin, PIOBU_DIRECTION, direction); -} - /** * sama5d2_piobu_get_direction() - gpiochip get_direction */ @@ -138,7 +128,7 @@ static int sama5d2_piobu_get_direction(struct gpio_chip *chip, static int sama5d2_piobu_direction_input(struct gpio_chip *chip, unsigned int pin) { - return sama5d2_piobu_set_direction(chip, PIOBU_IN, pin); + return sama5d2_piobu_write_value(chip, pin, PIOBU_DIRECTION, PIOBU_IN); } /** @@ -147,7 +137,13 @@ static int sama5d2_piobu_direction_input(struct gpio_chip *chip, static int sama5d2_piobu_direction_output(struct gpio_chip *chip, unsigned int pin, int value) { - return sama5d2_piobu_set_direction(chip, PIOBU_OUT, pin); + unsigned int val = PIOBU_OUT; + + if (value) + val |= PIOBU_HIGH; + + return sama5d2_piobu_write_value(chip, pin, PIOBU_DIRECTION | PIOBU_SOD, + val); } /** diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c index 02f6db925fd5..1ececf2c3282 100644 --- a/drivers/gpio/gpio-tegra.c +++ b/drivers/gpio/gpio-tegra.c @@ -2,6 +2,7 @@ * arch/arm/mach-tegra/gpio.c * * Copyright (c) 2010 Google, Inc + * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. * * Author: * Erik Gilling @@ -141,14 +142,14 @@ static void tegra_gpio_disable(struct tegra_gpio_info *tgi, unsigned int gpio) static int tegra_gpio_request(struct gpio_chip *chip, unsigned int offset) { - return pinctrl_gpio_request(offset); + return pinctrl_gpio_request(chip->base + offset); } static void tegra_gpio_free(struct gpio_chip *chip, unsigned int offset) { struct tegra_gpio_info *tgi = gpiochip_get_data(chip); - pinctrl_gpio_free(offset); + pinctrl_gpio_free(chip->base + offset); tegra_gpio_disable(tgi, offset); } @@ -176,10 +177,18 @@ static int tegra_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) { struct tegra_gpio_info *tgi = gpiochip_get_data(chip); + int ret; tegra_gpio_mask_write(tgi, GPIO_MSK_OE(tgi, offset), offset, 0); tegra_gpio_enable(tgi, offset); - return 0; + + ret = pinctrl_gpio_direction_input(chip->base + offset); + if (ret < 0) + dev_err(tgi->dev, + "Failed to set pinctrl input direction of GPIO %d: %d", + chip->base + offset, ret); + + return ret; } static int tegra_gpio_direction_output(struct gpio_chip *chip, @@ -187,11 +196,19 @@ static int tegra_gpio_direction_output(struct gpio_chip *chip, int value) { struct tegra_gpio_info *tgi = gpiochip_get_data(chip); + int ret; tegra_gpio_set(chip, offset, value); tegra_gpio_mask_write(tgi, GPIO_MSK_OE(tgi, offset), offset, 1); tegra_gpio_enable(tgi, offset); - return 0; + + ret = pinctrl_gpio_direction_output(chip->base + offset); + if (ret < 0) + dev_err(tgi->dev, + "Failed to set pinctrl output direction of GPIO %d: %d", + chip->base + offset, ret); + + return ret; } static int tegra_gpio_get_direction(struct gpio_chip *chip, diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c index 66ec38bb7954..7d42e3d7572c 100644 --- a/drivers/gpio/gpio-tegra186.c +++ b/drivers/gpio/gpio-tegra186.c @@ -529,8 +529,8 @@ static int tegra186_gpio_remove(struct platform_device *pdev) return 0; } -#define TEGRA_MAIN_GPIO_PORT(port, base, count, controller) \ - [TEGRA_MAIN_GPIO_PORT_##port] = { \ +#define TEGRA186_MAIN_GPIO_PORT(port, base, count, controller) \ + [TEGRA186_MAIN_GPIO_PORT_##port] = { \ .name = #port, \ .offset = base, \ .pins = count, \ @@ -538,29 +538,29 @@ static int tegra186_gpio_remove(struct platform_device *pdev) } static const struct tegra_gpio_port tegra186_main_ports[] = { - TEGRA_MAIN_GPIO_PORT( A, 0x2000, 7, 2), - TEGRA_MAIN_GPIO_PORT( B, 0x3000, 7, 3), - TEGRA_MAIN_GPIO_PORT( C, 0x3200, 7, 3), - TEGRA_MAIN_GPIO_PORT( D, 0x3400, 6, 3), - TEGRA_MAIN_GPIO_PORT( E, 0x2200, 8, 2), - TEGRA_MAIN_GPIO_PORT( F, 0x2400, 6, 2), - TEGRA_MAIN_GPIO_PORT( G, 0x4200, 6, 4), - TEGRA_MAIN_GPIO_PORT( H, 0x1000, 7, 1), - TEGRA_MAIN_GPIO_PORT( I, 0x0800, 8, 0), - TEGRA_MAIN_GPIO_PORT( J, 0x5000, 8, 5), - TEGRA_MAIN_GPIO_PORT( K, 0x5200, 1, 5), - TEGRA_MAIN_GPIO_PORT( L, 0x1200, 8, 1), - TEGRA_MAIN_GPIO_PORT( M, 0x5600, 6, 5), - TEGRA_MAIN_GPIO_PORT( N, 0x0000, 7, 0), - TEGRA_MAIN_GPIO_PORT( O, 0x0200, 4, 0), - TEGRA_MAIN_GPIO_PORT( P, 0x4000, 7, 4), - TEGRA_MAIN_GPIO_PORT( Q, 0x0400, 6, 0), - TEGRA_MAIN_GPIO_PORT( R, 0x0a00, 6, 0), - TEGRA_MAIN_GPIO_PORT( T, 0x0600, 4, 0), - TEGRA_MAIN_GPIO_PORT( X, 0x1400, 8, 1), - TEGRA_MAIN_GPIO_PORT( Y, 0x1600, 7, 1), - TEGRA_MAIN_GPIO_PORT(BB, 0x2600, 2, 2), - TEGRA_MAIN_GPIO_PORT(CC, 0x5400, 4, 5), + TEGRA186_MAIN_GPIO_PORT( A, 0x2000, 7, 2), + TEGRA186_MAIN_GPIO_PORT( B, 0x3000, 7, 3), + TEGRA186_MAIN_GPIO_PORT( C, 0x3200, 7, 3), + TEGRA186_MAIN_GPIO_PORT( D, 0x3400, 6, 3), + TEGRA186_MAIN_GPIO_PORT( E, 0x2200, 8, 2), + TEGRA186_MAIN_GPIO_PORT( F, 0x2400, 6, 2), + TEGRA186_MAIN_GPIO_PORT( G, 0x4200, 6, 4), + TEGRA186_MAIN_GPIO_PORT( H, 0x1000, 7, 1), + TEGRA186_MAIN_GPIO_PORT( I, 0x0800, 8, 0), + TEGRA186_MAIN_GPIO_PORT( J, 0x5000, 8, 5), + TEGRA186_MAIN_GPIO_PORT( K, 0x5200, 1, 5), + TEGRA186_MAIN_GPIO_PORT( L, 0x1200, 8, 1), + TEGRA186_MAIN_GPIO_PORT( M, 0x5600, 6, 5), + TEGRA186_MAIN_GPIO_PORT( N, 0x0000, 7, 0), + TEGRA186_MAIN_GPIO_PORT( O, 0x0200, 4, 0), + TEGRA186_MAIN_GPIO_PORT( P, 0x4000, 7, 4), + TEGRA186_MAIN_GPIO_PORT( Q, 0x0400, 6, 0), + TEGRA186_MAIN_GPIO_PORT( R, 0x0a00, 6, 0), + TEGRA186_MAIN_GPIO_PORT( T, 0x0600, 4, 0), + TEGRA186_MAIN_GPIO_PORT( X, 0x1400, 8, 1), + TEGRA186_MAIN_GPIO_PORT( Y, 0x1600, 7, 1), + TEGRA186_MAIN_GPIO_PORT(BB, 0x2600, 2, 2), + TEGRA186_MAIN_GPIO_PORT(CC, 0x5400, 4, 5), }; static const struct tegra_gpio_soc tegra186_main_soc = { @@ -569,8 +569,8 @@ static const struct tegra_gpio_soc tegra186_main_soc = { .name = "tegra186-gpio", }; -#define TEGRA_AON_GPIO_PORT(port, base, count, controller) \ - [TEGRA_AON_GPIO_PORT_##port] = { \ +#define TEGRA186_AON_GPIO_PORT(port, base, count, controller) \ + [TEGRA186_AON_GPIO_PORT_##port] = { \ .name = #port, \ .offset = base, \ .pins = count, \ @@ -578,14 +578,14 @@ static const struct tegra_gpio_soc tegra186_main_soc = { } static const struct tegra_gpio_port tegra186_aon_ports[] = { - TEGRA_AON_GPIO_PORT( S, 0x0200, 5, 0), - TEGRA_AON_GPIO_PORT( U, 0x0400, 6, 0), - TEGRA_AON_GPIO_PORT( V, 0x0800, 8, 0), - TEGRA_AON_GPIO_PORT( W, 0x0a00, 8, 0), - TEGRA_AON_GPIO_PORT( Z, 0x0e00, 4, 0), - TEGRA_AON_GPIO_PORT(AA, 0x0c00, 8, 0), - TEGRA_AON_GPIO_PORT(EE, 0x0600, 3, 0), - TEGRA_AON_GPIO_PORT(FF, 0x0000, 5, 0), + TEGRA186_AON_GPIO_PORT( S, 0x0200, 5, 0), + TEGRA186_AON_GPIO_PORT( U, 0x0400, 6, 0), + TEGRA186_AON_GPIO_PORT( V, 0x0800, 8, 0), + TEGRA186_AON_GPIO_PORT( W, 0x0a00, 8, 0), + TEGRA186_AON_GPIO_PORT( Z, 0x0e00, 4, 0), + TEGRA186_AON_GPIO_PORT(AA, 0x0c00, 8, 0), + TEGRA186_AON_GPIO_PORT(EE, 0x0600, 3, 0), + TEGRA186_AON_GPIO_PORT(FF, 0x0000, 5, 0), }; static const struct tegra_gpio_soc tegra186_aon_soc = { diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c new file mode 100644 index 000000000000..d5880db7f9d4 --- /dev/null +++ b/drivers/gpio/gpio-tqmx86.c @@ -0,0 +1,332 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * TQ-Systems TQMx86 PLD GPIO driver + * + * Based on vendor driver by: + * Vadim V.Vlasov + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define TQMX86_NGPIO 8 +#define TQMX86_NGPO 4 /* 0-3 - output */ +#define TQMX86_NGPI 4 /* 4-7 - input */ +#define TQMX86_DIR_INPUT_MASK 0xf0 /* 0-3 - output, 4-7 - input */ + +#define TQMX86_GPIODD 0 /* GPIO Data Direction Register */ +#define TQMX86_GPIOD 1 /* GPIO Data Register */ +#define TQMX86_GPIIC 3 /* GPI Interrupt Configuration Register */ +#define TQMX86_GPIIS 4 /* GPI Interrupt Status Register */ + +#define TQMX86_GPII_FALLING BIT(0) +#define TQMX86_GPII_RISING BIT(1) +#define TQMX86_GPII_MASK (BIT(0) | BIT(1)) +#define TQMX86_GPII_BITS 2 + +struct tqmx86_gpio_data { + struct gpio_chip chip; + struct irq_chip irq_chip; + void __iomem *io_base; + int irq; + raw_spinlock_t spinlock; + u8 irq_type[TQMX86_NGPI]; +}; + +static u8 tqmx86_gpio_read(struct tqmx86_gpio_data *gd, unsigned int reg) +{ + return ioread8(gd->io_base + reg); +} + +static void tqmx86_gpio_write(struct tqmx86_gpio_data *gd, u8 val, + unsigned int reg) +{ + iowrite8(val, gd->io_base + reg); +} + +static int tqmx86_gpio_get(struct gpio_chip *chip, unsigned int offset) +{ + struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip); + + return !!(tqmx86_gpio_read(gpio, TQMX86_GPIOD) & BIT(offset)); +} + +static void tqmx86_gpio_set(struct gpio_chip *chip, unsigned int offset, + int value) +{ + struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip); + unsigned long flags; + u8 val; + + raw_spin_lock_irqsave(&gpio->spinlock, flags); + val = tqmx86_gpio_read(gpio, TQMX86_GPIOD); + if (value) + val |= BIT(offset); + else + val &= ~BIT(offset); + tqmx86_gpio_write(gpio, val, TQMX86_GPIOD); + raw_spin_unlock_irqrestore(&gpio->spinlock, flags); +} + +static int tqmx86_gpio_direction_input(struct gpio_chip *chip, + unsigned int offset) +{ + /* Direction cannot be changed. Validate is an input. */ + if (BIT(offset) & TQMX86_DIR_INPUT_MASK) + return 0; + else + return -EINVAL; +} + +static int tqmx86_gpio_direction_output(struct gpio_chip *chip, + unsigned int offset, + int value) +{ + /* Direction cannot be changed, validate is an output */ + if (BIT(offset) & TQMX86_DIR_INPUT_MASK) + return -EINVAL; + + tqmx86_gpio_set(chip, offset, value); + return 0; +} + +static int tqmx86_gpio_get_direction(struct gpio_chip *chip, + unsigned int offset) +{ + return !!(TQMX86_DIR_INPUT_MASK & BIT(offset)); +} + +static void tqmx86_gpio_irq_mask(struct irq_data *data) +{ + unsigned int offset = (data->hwirq - TQMX86_NGPO); + struct tqmx86_gpio_data *gpio = gpiochip_get_data( + irq_data_get_irq_chip_data(data)); + unsigned long flags; + u8 gpiic, mask; + + mask = TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS); + + raw_spin_lock_irqsave(&gpio->spinlock, flags); + gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC); + gpiic &= ~mask; + tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC); + raw_spin_unlock_irqrestore(&gpio->spinlock, flags); +} + +static void tqmx86_gpio_irq_unmask(struct irq_data *data) +{ + unsigned int offset = (data->hwirq - TQMX86_NGPO); + struct tqmx86_gpio_data *gpio = gpiochip_get_data( + irq_data_get_irq_chip_data(data)); + unsigned long flags; + u8 gpiic, mask; + + mask = TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS); + + raw_spin_lock_irqsave(&gpio->spinlock, flags); + gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC); + gpiic &= ~mask; + gpiic |= gpio->irq_type[offset] << (offset * TQMX86_GPII_BITS); + tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC); + raw_spin_unlock_irqrestore(&gpio->spinlock, flags); +} + +static int tqmx86_gpio_irq_set_type(struct irq_data *data, unsigned int type) +{ + struct tqmx86_gpio_data *gpio = gpiochip_get_data( + irq_data_get_irq_chip_data(data)); + unsigned int offset = (data->hwirq - TQMX86_NGPO); + unsigned int edge_type = type & IRQF_TRIGGER_MASK; + unsigned long flags; + u8 new_type, gpiic; + + switch (edge_type) { + case IRQ_TYPE_EDGE_RISING: + new_type = TQMX86_GPII_RISING; + break; + case IRQ_TYPE_EDGE_FALLING: + new_type = TQMX86_GPII_FALLING; + break; + case IRQ_TYPE_EDGE_BOTH: + new_type = TQMX86_GPII_FALLING | TQMX86_GPII_RISING; + break; + default: + return -EINVAL; /* not supported */ + } + + gpio->irq_type[offset] = new_type; + + raw_spin_lock_irqsave(&gpio->spinlock, flags); + gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC); + gpiic &= ~((TQMX86_GPII_MASK) << (offset * TQMX86_GPII_BITS)); + gpiic |= new_type << (offset * TQMX86_GPII_BITS); + tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC); + raw_spin_unlock_irqrestore(&gpio->spinlock, flags); + + return 0; +} + +static void tqmx86_gpio_irq_handler(struct irq_desc *desc) +{ + struct gpio_chip *chip = irq_desc_get_handler_data(desc); + struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip); + struct irq_chip *irq_chip = irq_desc_get_chip(desc); + unsigned long irq_bits; + int i = 0, child_irq; + u8 irq_status; + + chained_irq_enter(irq_chip, desc); + + irq_status = tqmx86_gpio_read(gpio, TQMX86_GPIIS); + tqmx86_gpio_write(gpio, irq_status, TQMX86_GPIIS); + + irq_bits = irq_status; + for_each_set_bit(i, &irq_bits, TQMX86_NGPI) { + child_irq = irq_find_mapping(gpio->chip.irq.domain, + i + TQMX86_NGPO); + generic_handle_irq(child_irq); + } + + chained_irq_exit(irq_chip, desc); +} + +/* Minimal runtime PM is needed by the IRQ subsystem */ +static int __maybe_unused tqmx86_gpio_runtime_suspend(struct device *dev) +{ + return 0; +} + +static int __maybe_unused tqmx86_gpio_runtime_resume(struct device *dev) +{ + return 0; +} + +static const struct dev_pm_ops tqmx86_gpio_dev_pm_ops = { + SET_RUNTIME_PM_OPS(tqmx86_gpio_runtime_suspend, + tqmx86_gpio_runtime_resume, NULL) +}; + +static int tqmx86_gpio_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct tqmx86_gpio_data *gpio; + struct gpio_chip *chip; + void __iomem *io_base; + struct resource *res; + int ret, irq; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + res = platform_get_resource(pdev, IORESOURCE_IO, 0); + if (!res) { + dev_err(&pdev->dev, "Cannot get I/O\n"); + return -ENODEV; + } + + io_base = devm_ioport_map(&pdev->dev, res->start, resource_size(res)); + if (!io_base) + return -ENOMEM; + + gpio = devm_kzalloc(dev, sizeof(*gpio), GFP_KERNEL); + if (!gpio) + return -ENOMEM; + + raw_spin_lock_init(&gpio->spinlock); + gpio->io_base = io_base; + + tqmx86_gpio_write(gpio, (u8)~TQMX86_DIR_INPUT_MASK, TQMX86_GPIODD); + + platform_set_drvdata(pdev, gpio); + + chip = &gpio->chip; + chip->label = "gpio-tqmx86"; + chip->owner = THIS_MODULE; + chip->can_sleep = false; + chip->base = -1; + chip->direction_input = tqmx86_gpio_direction_input; + chip->direction_output = tqmx86_gpio_direction_output; + chip->get_direction = tqmx86_gpio_get_direction; + chip->get = tqmx86_gpio_get; + chip->set = tqmx86_gpio_set; + chip->ngpio = TQMX86_NGPIO; + chip->irq.need_valid_mask = true; + chip->parent = pdev->dev.parent; + + pm_runtime_enable(&pdev->dev); + + ret = devm_gpiochip_add_data(dev, chip, gpio); + if (ret) { + dev_err(dev, "Could not register GPIO chip\n"); + goto out_pm_dis; + } + + if (irq) { + struct irq_chip *irq_chip = &gpio->irq_chip; + u8 irq_status; + + irq_chip->name = chip->label; + irq_chip->parent_device = &pdev->dev; + irq_chip->irq_mask = tqmx86_gpio_irq_mask; + irq_chip->irq_unmask = tqmx86_gpio_irq_unmask; + irq_chip->irq_set_type = tqmx86_gpio_irq_set_type; + + /* Mask all interrupts */ + tqmx86_gpio_write(gpio, 0, TQMX86_GPIIC); + + /* Clear all pending interrupts */ + irq_status = tqmx86_gpio_read(gpio, TQMX86_GPIIS); + tqmx86_gpio_write(gpio, irq_status, TQMX86_GPIIS); + + ret = gpiochip_irqchip_add(chip, irq_chip, + 0, handle_simple_irq, + IRQ_TYPE_EDGE_BOTH); + if (ret) { + dev_err(dev, "Could not add irq chip\n"); + goto out_pm_dis; + } + + gpiochip_set_chained_irqchip(chip, irq_chip, + irq, tqmx86_gpio_irq_handler); + } + + /* Only GPIOs 4-7 are valid for interrupts. Clear the others */ + clear_bit(0, chip->irq.valid_mask); + clear_bit(1, chip->irq.valid_mask); + clear_bit(2, chip->irq.valid_mask); + clear_bit(3, chip->irq.valid_mask); + + dev_info(dev, "GPIO functionality initialized with %d pins\n", + chip->ngpio); + + return 0; + +out_pm_dis: + pm_runtime_disable(&pdev->dev); + + return ret; +} + +static struct platform_driver tqmx86_gpio_driver = { + .driver = { + .name = "tqmx86-gpio", + .pm = &tqmx86_gpio_dev_pm_ops, + }, + .probe = tqmx86_gpio_probe, +}; + +module_platform_driver(tqmx86_gpio_driver); + +MODULE_DESCRIPTION("TQMx86 PLD GPIO Driver"); +MODULE_AUTHOR("Andrew Lunn "); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:tqmx86-gpio"); diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c index 1b79ebcfce3e..541fa6ac399d 100644 --- a/drivers/gpio/gpio-vf610.c +++ b/drivers/gpio/gpio-vf610.c @@ -253,6 +253,7 @@ static int vf610_gpio_probe(struct platform_device *pdev) struct vf610_gpio_port *port; struct resource *iores; struct gpio_chip *gc; + int i; int ret; port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL); @@ -319,6 +320,10 @@ static int vf610_gpio_probe(struct platform_device *pdev) if (ret < 0) return ret; + /* Mask all GPIO interrupts */ + for (i = 0; i < gc->ngpio; i++) + vf610_gpio_writel(0, port->base + PORT_PCR(i)); + /* Clear the interrupt status register for all GPIO's */ vf610_gpio_writel(~0, port->base + PORT_ISFR); diff --git a/drivers/gpio/gpio-wcove.c b/drivers/gpio/gpio-wcove.c index dde7c6aecbb5..444fe9e7f04a 100644 --- a/drivers/gpio/gpio-wcove.c +++ b/drivers/gpio/gpio-wcove.c @@ -1,34 +1,26 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Intel Whiskey Cove PMIC GPIO Driver * * This driver is written based on gpio-crystalcove.c * * Copyright (C) 2016 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include -#include -#include #include +#include #include +#include #include #include #include /* * Whiskey Cove PMIC has 13 physical GPIO pins divided into 3 banks: - * Bank 0: Pin 0 - 6 - * Bank 1: Pin 7 - 10 - * Bank 2: Pin 11 -12 + * Bank 0: Pin 0 - 6 + * Bank 1: Pin 7 - 10 + * Bank 2: Pin 11 - 12 * Each pin has one output control register and one input control register. */ #define BANK0_NR_PINS 7 @@ -75,8 +67,8 @@ #define CTLO_RVAL_50KDOWN (2 << 1) #define CTLO_RVAL_50KUP (3 << 1) -#define CTLO_INPUT_SET (CTLO_DRV_CMOS | CTLO_DRV_REN | CTLO_RVAL_2KUP) -#define CTLO_OUTPUT_SET (CTLO_DIR_OUT | CTLO_INPUT_SET) +#define CTLO_INPUT_SET (CTLO_DRV_CMOS | CTLO_DRV_REN | CTLO_RVAL_2KUP) +#define CTLO_OUTPUT_SET (CTLO_DIR_OUT | CTLO_INPUT_SET) enum ctrl_register { CTRL_IN, @@ -105,7 +97,7 @@ struct wcove_gpio { bool set_irq_mask; }; -static inline unsigned int to_reg(int gpio, enum ctrl_register reg_type) +static inline int to_reg(int gpio, enum ctrl_register reg_type) { unsigned int reg; @@ -203,8 +195,7 @@ static int wcove_gpio_get(struct gpio_chip *chip, unsigned int gpio) return val & 0x1; } -static void wcove_gpio_set(struct gpio_chip *chip, - unsigned int gpio, int value) +static void wcove_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value) { struct wcove_gpio *wg = gpiochip_get_data(chip); int reg = to_reg(gpio, CTRL_OUT); diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c index b3b4edcdffe0..00ff7b1fa8a1 100644 --- a/drivers/gpio/gpio-zynq.c +++ b/drivers/gpio/gpio-zynq.c @@ -555,6 +555,26 @@ static int zynq_gpio_set_wake(struct irq_data *data, unsigned int on) return 0; } +static int zynq_gpio_irq_reqres(struct irq_data *d) +{ + struct gpio_chip *chip = irq_data_get_irq_chip_data(d); + int ret; + + ret = pm_runtime_get_sync(chip->parent); + if (ret < 0) + return ret; + + return gpiochip_reqres_irq(chip, d->hwirq); +} + +static void zynq_gpio_irq_relres(struct irq_data *d) +{ + struct gpio_chip *chip = irq_data_get_irq_chip_data(d); + + gpiochip_relres_irq(chip, d->hwirq); + pm_runtime_put(chip->parent); +} + /* irq chip descriptor */ static struct irq_chip zynq_gpio_level_irqchip = { .name = DRIVER_NAME, @@ -564,6 +584,8 @@ static struct irq_chip zynq_gpio_level_irqchip = { .irq_unmask = zynq_gpio_irq_unmask, .irq_set_type = zynq_gpio_set_irq_type, .irq_set_wake = zynq_gpio_set_wake, + .irq_request_resources = zynq_gpio_irq_reqres, + .irq_release_resources = zynq_gpio_irq_relres, .flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED | IRQCHIP_MASK_ON_SUSPEND, }; @@ -576,6 +598,8 @@ static struct irq_chip zynq_gpio_edge_irqchip = { .irq_unmask = zynq_gpio_irq_unmask, .irq_set_type = zynq_gpio_set_irq_type, .irq_set_wake = zynq_gpio_set_wake, + .irq_request_resources = zynq_gpio_irq_reqres, + .irq_release_resources = zynq_gpio_irq_relres, .flags = IRQCHIP_MASK_ON_SUSPEND, }; diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 259cf6ab969b..30d0baf7ddae 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -29,7 +29,7 @@ * @irq: Linux IRQ number for the event, for request_ / free_irq * @irqflags: flags to pass to request_irq when requesting the IRQ * @irq_is_wake: If the ACPI flags indicate the IRQ is a wakeup source - * @is_requested: True if request_irq has been done + * @irq_requested:True if request_irq has been done * @desc: gpio_desc for the GPIO pin for this event */ struct acpi_gpio_event { @@ -469,6 +469,9 @@ acpi_gpio_to_gpiod_flags(const struct acpi_resource_gpio *agpio) static int __acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, enum gpiod_flags update) { + const enum gpiod_flags mask = + GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT | + GPIOD_FLAGS_BIT_DIR_VAL; int ret = 0; /* @@ -489,7 +492,7 @@ __acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, enum gpiod_flags update) if (((*flags & GPIOD_FLAGS_BIT_DIR_SET) && (diff & GPIOD_FLAGS_BIT_DIR_OUT)) || ((*flags & GPIOD_FLAGS_BIT_DIR_OUT) && (diff & GPIOD_FLAGS_BIT_DIR_VAL))) ret = -EINVAL; - *flags = update; + *flags = (*flags & ~mask) | (update & mask); } return ret; } @@ -530,17 +533,24 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data) if (ares->type != ACPI_RESOURCE_TYPE_GPIO) return 1; - if (lookup->n++ == lookup->index && !lookup->desc) { + if (!lookup->desc) { const struct acpi_resource_gpio *agpio = &ares->data.gpio; - int pin_index = lookup->pin_index; + bool gpioint = agpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT; + int pin_index; + if (lookup->info.quirks & ACPI_GPIO_QUIRK_ONLY_GPIOIO && gpioint) + lookup->index++; + + if (lookup->n++ != lookup->index) + return 1; + + pin_index = lookup->pin_index; if (pin_index >= agpio->pin_table_length) return 1; lookup->desc = acpi_get_gpiod(agpio->resource_source.string_ptr, agpio->pin_table[pin_index]); - lookup->info.gpioint = - agpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT; + lookup->info.gpioint = gpioint; /* * Polarity and triggering are only specified for GpioInt @@ -892,7 +902,7 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address, * event but only if the access here is ACPI_READ. In that * case we "borrow" the event GPIO instead. */ - if (!found && agpio->sharable == ACPI_SHARED && + if (!found && agpio->shareable == ACPI_SHARED && function == ACPI_READ) { struct acpi_gpio_event *event; diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index a6e1891217e2..8b9c3ab70f6e 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -86,7 +86,9 @@ static void of_gpio_flags_quirks(struct device_node *np, if (IS_ENABLED(CONFIG_REGULATOR) && (of_device_is_compatible(np, "regulator-fixed") || of_device_is_compatible(np, "reg-fixed-voltage") || - of_device_is_compatible(np, "regulator-gpio"))) { + (of_device_is_compatible(np, "regulator-gpio") && + !(strcmp(propname, "enable-gpio") && + strcmp(propname, "enable-gpios"))))) { /* * The regulator GPIO handles are specified such that the * presence or absence of "enable-active-high" solely controls @@ -125,7 +127,7 @@ static void of_gpio_flags_quirks(struct device_node *np, for_each_child_of_node(np, child) { ret = of_property_read_u32(child, "reg", &cs); - if (!ret) + if (ret) continue; if (cs == index) { /* @@ -345,6 +347,11 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id, if (of_flags & OF_GPIO_TRANSITORY) *flags |= GPIO_TRANSITORY; + if (of_flags & OF_GPIO_PULL_UP) + *flags |= GPIO_PULL_UP; + if (of_flags & OF_GPIO_PULL_DOWN) + *flags |= GPIO_PULL_DOWN; + return desc; } diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 1651d7f0a303..144af0733581 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -828,7 +828,14 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p) /* Do not leak kernel stack to userspace */ memset(&ge, 0, sizeof(ge)); - ge.timestamp = le->timestamp; + /* + * We may be running from a nested threaded interrupt in which case + * we didn't get the timestamp from lineevent_irq_handler(). + */ + if (!le->timestamp) + ge.timestamp = ktime_get_real_ns(); + else + ge.timestamp = le->timestamp; if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { @@ -1775,6 +1782,43 @@ static const struct irq_domain_ops gpiochip_domain_ops = { .xlate = irq_domain_xlate_twocell, }; +/** + * gpiochip_irq_domain_activate() - Lock a GPIO to be used as an IRQ + * @domain: The IRQ domain used by this IRQ chip + * @data: Outermost irq_data associated with the IRQ + * @reserve: If set, only reserve an interrupt vector instead of assigning one + * + * This function is a wrapper that calls gpiochip_lock_as_irq() and is to be + * used as the activate function for the &struct irq_domain_ops. The host_data + * for the IRQ domain must be the &struct gpio_chip. + */ +int gpiochip_irq_domain_activate(struct irq_domain *domain, + struct irq_data *data, bool reserve) +{ + struct gpio_chip *chip = domain->host_data; + + return gpiochip_lock_as_irq(chip, data->hwirq); +} +EXPORT_SYMBOL_GPL(gpiochip_irq_domain_activate); + +/** + * gpiochip_irq_domain_deactivate() - Unlock a GPIO used as an IRQ + * @domain: The IRQ domain used by this IRQ chip + * @data: Outermost irq_data associated with the IRQ + * + * This function is a wrapper that will call gpiochip_unlock_as_irq() and is to + * be used as the deactivate function for the &struct irq_domain_ops. The + * host_data for the IRQ domain must be the &struct gpio_chip. + */ +void gpiochip_irq_domain_deactivate(struct irq_domain *domain, + struct irq_data *data) +{ + struct gpio_chip *chip = domain->host_data; + + return gpiochip_unlock_as_irq(chip, data->hwirq); +} +EXPORT_SYMBOL_GPL(gpiochip_irq_domain_deactivate); + static int gpiochip_to_irq(struct gpio_chip *chip, unsigned offset) { if (!gpiochip_irqchip_irq_valid(chip, offset)) @@ -2518,6 +2562,14 @@ EXPORT_SYMBOL_GPL(gpiochip_free_own_desc); * rely on gpio_request() having been called beforehand. */ +static int gpio_set_config(struct gpio_chip *gc, unsigned offset, + enum pin_config_param mode) +{ + unsigned long config = { PIN_CONF_PACKED(mode, 0) }; + + return gc->set_config ? gc->set_config(gc, offset, config) : -ENOTSUPP; +} + /** * gpiod_direction_input - set the GPIO direction to input * @desc: GPIO to set to input @@ -2565,20 +2617,19 @@ int gpiod_direction_input(struct gpio_desc *desc) if (status == 0) clear_bit(FLAG_IS_OUT, &desc->flags); + if (test_bit(FLAG_PULL_UP, &desc->flags)) + gpio_set_config(chip, gpio_chip_hwgpio(desc), + PIN_CONFIG_BIAS_PULL_UP); + else if (test_bit(FLAG_PULL_DOWN, &desc->flags)) + gpio_set_config(chip, gpio_chip_hwgpio(desc), + PIN_CONFIG_BIAS_PULL_DOWN); + trace_gpio_direction(desc_to_gpio(desc), 1, status); return status; } EXPORT_SYMBOL_GPL(gpiod_direction_input); -static int gpio_set_drive_single_ended(struct gpio_chip *gc, unsigned offset, - enum pin_config_param mode) -{ - unsigned long config = { PIN_CONF_PACKED(mode, 0) }; - - return gc->set_config ? gc->set_config(gc, offset, config) : -ENOTSUPP; -} - static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value) { struct gpio_chip *gc = desc->gdev->chip; @@ -2675,8 +2726,8 @@ int gpiod_direction_output(struct gpio_desc *desc, int value) gc = desc->gdev->chip; if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) { /* First see if we can enable open drain in hardware */ - ret = gpio_set_drive_single_ended(gc, gpio_chip_hwgpio(desc), - PIN_CONFIG_DRIVE_OPEN_DRAIN); + ret = gpio_set_config(gc, gpio_chip_hwgpio(desc), + PIN_CONFIG_DRIVE_OPEN_DRAIN); if (!ret) goto set_output_value; /* Emulate open drain by not actively driving the line high */ @@ -2684,16 +2735,16 @@ int gpiod_direction_output(struct gpio_desc *desc, int value) return gpiod_direction_input(desc); } else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) { - ret = gpio_set_drive_single_ended(gc, gpio_chip_hwgpio(desc), - PIN_CONFIG_DRIVE_OPEN_SOURCE); + ret = gpio_set_config(gc, gpio_chip_hwgpio(desc), + PIN_CONFIG_DRIVE_OPEN_SOURCE); if (!ret) goto set_output_value; /* Emulate open source by not actively driving the line low */ if (!value) return gpiod_direction_input(desc); } else { - gpio_set_drive_single_ended(gc, gpio_chip_hwgpio(desc), - PIN_CONFIG_DRIVE_PUSH_PULL); + gpio_set_config(gc, gpio_chip_hwgpio(desc), + PIN_CONFIG_DRIVE_PUSH_PULL); } set_output_value: @@ -2725,7 +2776,7 @@ int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce) } config = pinconf_to_config_packed(PIN_CONFIG_INPUT_DEBOUNCE, debounce); - return chip->set_config(chip, gpio_chip_hwgpio(desc), config); + return gpio_set_config(chip, gpio_chip_hwgpio(desc), config); } EXPORT_SYMBOL_GPL(gpiod_set_debounce); @@ -2762,7 +2813,7 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory) packed = pinconf_to_config_packed(PIN_CONFIG_PERSIST_STATE, !transitory); gpio = gpio_chip_hwgpio(desc); - rc = chip->set_config(chip, gpio, packed); + rc = gpio_set_config(chip, gpio, packed); if (rc == -ENOTSUPP) { dev_dbg(&desc->gdev->dev, "Persistence not supported for GPIO %d\n", gpio); @@ -4050,6 +4101,17 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id, if (lflags & GPIO_OPEN_SOURCE) set_bit(FLAG_OPEN_SOURCE, &desc->flags); + if ((lflags & GPIO_PULL_UP) && (lflags & GPIO_PULL_DOWN)) { + gpiod_err(desc, + "both pull-up and pull-down enabled, invalid configuration\n"); + return -EINVAL; + } + + if (lflags & GPIO_PULL_UP) + set_bit(FLAG_PULL_UP, &desc->flags); + else if (lflags & GPIO_PULL_DOWN) + set_bit(FLAG_PULL_DOWN, &desc->flags); + status = gpiod_set_transitory(desc, (lflags & GPIO_TRANSITORY)); if (status < 0) return status; diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h index bc57f0dc5953..078ab17b96bf 100644 --- a/drivers/gpio/gpiolib.h +++ b/drivers/gpio/gpiolib.h @@ -219,6 +219,8 @@ struct gpio_desc { #define FLAG_IRQ_IS_ENABLED 10 /* GPIO is connected to an enabled IRQ */ #define FLAG_IS_HOGGED 11 /* GPIO is hogged */ #define FLAG_TRANSITORY 12 /* GPIO may lose value in sleep or reset */ +#define FLAG_PULL_UP 13 /* GPIO has pull up enabled */ +#define FLAG_PULL_DOWN 14 /* GPIO has pull down enabled */ /* Connection label */ const char *label; diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 4385f00e1d05..bd943a71756c 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -170,10 +170,6 @@ config DRM_KMS_CMA_HELPER bool depends on DRM select DRM_GEM_CMA_HELPER - select DRM_KMS_FB_HELPER - select FB_SYS_FILLRECT - select FB_SYS_COPYAREA - select FB_SYS_IMAGEBLIT help Choose this if you need the KMS CMA helper functions diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index ce8d1d384319..1ac55c65eac0 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -51,7 +51,7 @@ obj-$(CONFIG_DRM_DEBUG_SELFTEST) += selftests/ obj-$(CONFIG_DRM) += drm.o obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o obj-$(CONFIG_DRM_PANEL_ORIENTATION_QUIRKS) += drm_panel_orientation_quirks.o -obj-$(CONFIG_DRM_ARM) += arm/ +obj-y += arm/ obj-$(CONFIG_DRM_TTM) += ttm/ obj-$(CONFIG_DRM_SCHED) += scheduler/ obj-$(CONFIG_DRM_TDFX) += tdfx/ @@ -81,7 +81,7 @@ obj-$(CONFIG_DRM_UDL) += udl/ obj-$(CONFIG_DRM_AST) += ast/ obj-$(CONFIG_DRM_ARMADA) += armada/ obj-$(CONFIG_DRM_ATMEL_HLCDC) += atmel-hlcdc/ -obj-$(CONFIG_DRM_RCAR_DU) += rcar-du/ +obj-y += rcar-du/ obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/ obj-y += omapdrm/ obj-$(CONFIG_DRM_SUN4I) += sun4i/ diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index f76bcb9c45e4..466da5954a68 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -57,7 +57,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ # add asic specific block amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ - ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o + dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index bcef6ea4bcf9..8d0d7f3dd5fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -411,6 +411,8 @@ struct amdgpu_fpriv { struct amdgpu_ctx_mgr ctx_mgr; }; +int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv); + int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned size, struct amdgpu_ib *ib); void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, @@ -542,6 +544,11 @@ struct amdgpu_asic_funcs { bool (*need_full_reset)(struct amdgpu_device *adev); /* initialize doorbell layout for specific asic*/ void (*init_doorbell_index)(struct amdgpu_device *adev); + /* PCIe bandwidth usage */ + void (*get_pcie_usage)(struct amdgpu_device *adev, uint64_t *count0, + uint64_t *count1); + /* do we need to reset the asic at init time (e.g., kexec) */ + bool (*need_reset_on_init)(struct amdgpu_device *adev); }; /* @@ -634,7 +641,7 @@ struct amdgpu_nbio_funcs { void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring); u32 (*get_memsize)(struct amdgpu_device *adev); void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance, - bool use_doorbell, int doorbell_index); + bool use_doorbell, int doorbell_index, int doorbell_size); void (*enable_doorbell_aperture)(struct amdgpu_device *adev, bool enable); void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev, @@ -1042,6 +1049,8 @@ int emu_soc_asic_init(struct amdgpu_device *adev); #define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r)) #define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev)) #define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev)) +#define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1))) +#define amdgpu_asic_need_reset_on_init(adev) (adev)->asic_funcs->need_reset_on_init((adev)) /* Common functions */ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 2dfaf158ef07..fe1d7368c1e6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -28,8 +28,6 @@ #include #include -const struct kgd2kfd_calls *kgd2kfd; - static const unsigned int compute_vmid_bitmap = 0xFF00; /* Total memory size in system memory and all GPU VRAM. Used to @@ -47,12 +45,9 @@ int amdgpu_amdkfd_init(void) amdgpu_amdkfd_total_mem_size *= si.mem_unit; #ifdef CONFIG_HSA_AMD - ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd); - if (ret) - kgd2kfd = NULL; + ret = kgd2kfd_init(); amdgpu_amdkfd_gpuvm_init_mem_limits(); #else - kgd2kfd = NULL; ret = -ENOENT; #endif @@ -61,17 +56,13 @@ int amdgpu_amdkfd_init(void) void amdgpu_amdkfd_fini(void) { - if (kgd2kfd) - kgd2kfd->exit(); + kgd2kfd_exit(); } void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) { const struct kfd2kgd_calls *kfd2kgd; - if (!kgd2kfd) - return; - switch (adev->asic_type) { #ifdef CONFIG_DRM_AMDGPU_CIK case CHIP_KAVERI: @@ -98,8 +89,8 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) return; } - adev->kfd.dev = kgd2kfd->probe((struct kgd_dev *)adev, - adev->pdev, kfd2kgd); + adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev, + adev->pdev, kfd2kgd); if (adev->kfd.dev) amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size; @@ -140,7 +131,7 @@ static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev, void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) { - int i, n; + int i; int last_valid_bit; if (adev->kfd.dev) { @@ -151,7 +142,9 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) .gpuvm_size = min(adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT, AMDGPU_GMC_HOLE_START), - .drm_render_minor = adev->ddev->render->index + .drm_render_minor = adev->ddev->render->index, + .sdma_doorbell_idx = adev->doorbell_index.sdma_engine, + }; /* this is going to have a few of the MSBs set that we need to @@ -181,44 +174,29 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) &gpu_resources.doorbell_aperture_size, &gpu_resources.doorbell_start_offset); - if (adev->asic_type < CHIP_VEGA10) { - kgd2kfd->device_init(adev->kfd.dev, &gpu_resources); - return; - } - - n = (adev->asic_type < CHIP_VEGA20) ? 2 : 8; - - for (i = 0; i < n; i += 2) { - /* On SOC15 the BIF is involved in routing - * doorbells using the low 12 bits of the - * address. Communicate the assignments to - * KFD. KFD uses two doorbell pages per - * process in case of 64-bit doorbells so we - * can use each doorbell assignment twice. - */ - gpu_resources.sdma_doorbell[0][i] = - adev->doorbell_index.sdma_engine0 + (i >> 1); - gpu_resources.sdma_doorbell[0][i+1] = - adev->doorbell_index.sdma_engine0 + 0x200 + (i >> 1); - gpu_resources.sdma_doorbell[1][i] = - adev->doorbell_index.sdma_engine1 + (i >> 1); - gpu_resources.sdma_doorbell[1][i+1] = - adev->doorbell_index.sdma_engine1 + 0x200 + (i >> 1); - } - /* Doorbells 0x0e0-0ff and 0x2e0-2ff are reserved for - * SDMA, IH and VCN. So don't use them for the CP. + /* Since SOC15, BIF starts to statically use the + * lower 12 bits of doorbell addresses for routing + * based on settings in registers like + * SDMA0_DOORBELL_RANGE etc.. + * In order to route a doorbell to CP engine, the lower + * 12 bits of its address has to be outside the range + * set for SDMA, VCN, and IH blocks. */ - gpu_resources.reserved_doorbell_mask = 0x1e0; - gpu_resources.reserved_doorbell_val = 0x0e0; + if (adev->asic_type >= CHIP_VEGA10) { + gpu_resources.non_cp_doorbells_start = + adev->doorbell_index.first_non_cp; + gpu_resources.non_cp_doorbells_end = + adev->doorbell_index.last_non_cp; + } - kgd2kfd->device_init(adev->kfd.dev, &gpu_resources); + kgd2kfd_device_init(adev->kfd.dev, &gpu_resources); } } void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev) { if (adev->kfd.dev) { - kgd2kfd->device_exit(adev->kfd.dev); + kgd2kfd_device_exit(adev->kfd.dev); adev->kfd.dev = NULL; } } @@ -227,13 +205,13 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, const void *ih_ring_entry) { if (adev->kfd.dev) - kgd2kfd->interrupt(adev->kfd.dev, ih_ring_entry); + kgd2kfd_interrupt(adev->kfd.dev, ih_ring_entry); } void amdgpu_amdkfd_suspend(struct amdgpu_device *adev) { if (adev->kfd.dev) - kgd2kfd->suspend(adev->kfd.dev); + kgd2kfd_suspend(adev->kfd.dev); } int amdgpu_amdkfd_resume(struct amdgpu_device *adev) @@ -241,7 +219,7 @@ int amdgpu_amdkfd_resume(struct amdgpu_device *adev) int r = 0; if (adev->kfd.dev) - r = kgd2kfd->resume(adev->kfd.dev); + r = kgd2kfd_resume(adev->kfd.dev); return r; } @@ -251,7 +229,7 @@ int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev) int r = 0; if (adev->kfd.dev) - r = kgd2kfd->pre_reset(adev->kfd.dev); + r = kgd2kfd_pre_reset(adev->kfd.dev); return r; } @@ -261,7 +239,7 @@ int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev) int r = 0; if (adev->kfd.dev) - r = kgd2kfd->post_reset(adev->kfd.dev); + r = kgd2kfd_post_reset(adev->kfd.dev); return r; } @@ -619,4 +597,47 @@ struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void) { return NULL; } + +struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev, + const struct kfd2kgd_calls *f2g) +{ + return NULL; +} + +bool kgd2kfd_device_init(struct kfd_dev *kfd, + const struct kgd2kfd_shared_resources *gpu_resources) +{ + return false; +} + +void kgd2kfd_device_exit(struct kfd_dev *kfd) +{ +} + +void kgd2kfd_exit(void) +{ +} + +void kgd2kfd_suspend(struct kfd_dev *kfd) +{ +} + +int kgd2kfd_resume(struct kfd_dev *kfd) +{ + return 0; +} + +int kgd2kfd_pre_reset(struct kfd_dev *kfd) +{ + return 0; +} + +int kgd2kfd_post_reset(struct kfd_dev *kfd) +{ + return 0; +} + +void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) +{ +} #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 70429f7aa9a8..0b31a1859023 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -33,7 +33,6 @@ #include "amdgpu_sync.h" #include "amdgpu_vm.h" -extern const struct kgd2kfd_calls *kgd2kfd; extern uint64_t amdgpu_amdkfd_total_mem_size; struct amdgpu_device; @@ -214,4 +213,22 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, void amdgpu_amdkfd_gpuvm_init_mem_limits(void); void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo); +/* KGD2KFD callbacks */ +int kgd2kfd_init(void); +void kgd2kfd_exit(void); +struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev, + const struct kfd2kgd_calls *f2g); +bool kgd2kfd_device_init(struct kfd_dev *kfd, + const struct kgd2kfd_shared_resources *gpu_resources); +void kgd2kfd_device_exit(struct kfd_dev *kfd); +void kgd2kfd_suspend(struct kfd_dev *kfd); +int kgd2kfd_resume(struct kfd_dev *kfd); +int kgd2kfd_pre_reset(struct kfd_dev *kfd); +int kgd2kfd_post_reset(struct kfd_dev *kfd); +void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry); +int kgd2kfd_quiesce_mm(struct mm_struct *mm); +int kgd2kfd_resume_mm(struct mm_struct *mm); +int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm, + struct dma_fence *fence); + #endif /* AMDGPU_AMDKFD_H_INCLUDED */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c index 574c1181ae9a..3107b9575929 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c @@ -122,7 +122,7 @@ static bool amdkfd_fence_enable_signaling(struct dma_fence *f) if (dma_fence_is_signaled(f)) return true; - if (!kgd2kfd->schedule_evict_and_restore_process(fence->mm, f)) + if (!kgd2kfd_schedule_evict_and_restore_process(fence->mm, f)) return true; return false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index be1ab43473c6..1921dec3df7a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -204,38 +204,25 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) } -/* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence(s) from BO's +/* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's * reservation object. * * @bo: [IN] Remove eviction fence(s) from this BO - * @ef: [IN] If ef is specified, then this eviction fence is removed if it + * @ef: [IN] This eviction fence is removed if it * is present in the shared list. - * @ef_list: [OUT] Returns list of eviction fences. These fences are removed - * from BO's reservation object shared list. - * @ef_count: [OUT] Number of fences in ef_list. * - * NOTE: If called with ef_list, then amdgpu_amdkfd_add_eviction_fence must be - * called to restore the eviction fences and to avoid memory leak. This is - * useful for shared BOs. * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held. */ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, - struct amdgpu_amdkfd_fence *ef, - struct amdgpu_amdkfd_fence ***ef_list, - unsigned int *ef_count) + struct amdgpu_amdkfd_fence *ef) { struct reservation_object *resv = bo->tbo.resv; struct reservation_object_list *old, *new; unsigned int i, j, k; - if (!ef && !ef_list) + if (!ef) return -EINVAL; - if (ef_list) { - *ef_list = NULL; - *ef_count = 0; - } - old = reservation_object_get_list(resv); if (!old) return 0; @@ -254,8 +241,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, f = rcu_dereference_protected(old->shared[i], reservation_object_held(resv)); - if ((ef && f->context == ef->base.context) || - (!ef && to_amdgpu_amdkfd_fence(f))) + if (f->context == ef->base.context) RCU_INIT_POINTER(new->shared[--j], f); else RCU_INIT_POINTER(new->shared[k++], f); @@ -263,21 +249,6 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, new->shared_max = old->shared_max; new->shared_count = k; - if (!ef) { - unsigned int count = old->shared_count - j; - - /* Alloc memory for count number of eviction fence pointers. - * Fill the ef_list array and ef_count - */ - *ef_list = kcalloc(count, sizeof(**ef_list), GFP_KERNEL); - *ef_count = count; - - if (!*ef_list) { - kfree(new); - return -ENOMEM; - } - } - /* Install the new fence list, seqcount provides the barriers */ preempt_disable(); write_seqcount_begin(&resv->seq); @@ -291,46 +262,13 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, f = rcu_dereference_protected(new->shared[i], reservation_object_held(resv)); - if (!ef) - (*ef_list)[k++] = to_amdgpu_amdkfd_fence(f); - else - dma_fence_put(f); + dma_fence_put(f); } kfree_rcu(old, rcu); return 0; } -/* amdgpu_amdkfd_add_eviction_fence - Adds eviction fence(s) back into BO's - * reservation object. - * - * @bo: [IN] Add eviction fences to this BO - * @ef_list: [IN] List of eviction fences to be added - * @ef_count: [IN] Number of fences in ef_list. - * - * NOTE: Must call amdgpu_amdkfd_remove_eviction_fence before calling this - * function. - */ -static void amdgpu_amdkfd_add_eviction_fence(struct amdgpu_bo *bo, - struct amdgpu_amdkfd_fence **ef_list, - unsigned int ef_count) -{ - int i; - - if (!ef_list || !ef_count) - return; - - for (i = 0; i < ef_count; i++) { - amdgpu_bo_fence(bo, &ef_list[i]->base, true); - /* Re-adding the fence takes an additional reference. Drop that - * reference. - */ - dma_fence_put(&ef_list[i]->base); - } - - kfree(ef_list); -} - static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain, bool wait) { @@ -346,18 +284,8 @@ static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain, ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); if (ret) goto validate_fail; - if (wait) { - struct amdgpu_amdkfd_fence **ef_list; - unsigned int ef_count; - - ret = amdgpu_amdkfd_remove_eviction_fence(bo, NULL, &ef_list, - &ef_count); - if (ret) - goto validate_fail; - - ttm_bo_wait(&bo->tbo, false, false); - amdgpu_amdkfd_add_eviction_fence(bo, ef_list, ef_count); - } + if (wait) + amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false); validate_fail: return ret; @@ -444,7 +372,6 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem, { int ret; struct kfd_bo_va_list *bo_va_entry; - struct amdgpu_bo *pd = vm->root.base.bo; struct amdgpu_bo *bo = mem->bo; uint64_t va = mem->va; struct list_head *list_bo_va = &mem->bo_va_list; @@ -484,14 +411,8 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem, *p_bo_va_entry = bo_va_entry; /* Allocate new page tables if needed and validate - * them. Clearing of new page tables and validate need to wait - * on move fences. We don't want that to trigger the eviction - * fence, so remove it temporarily. + * them. */ - amdgpu_amdkfd_remove_eviction_fence(pd, - vm->process_info->eviction_fence, - NULL, NULL); - ret = amdgpu_vm_alloc_pts(adev, vm, va, amdgpu_bo_size(bo)); if (ret) { pr_err("Failed to allocate pts, err=%d\n", ret); @@ -504,13 +425,9 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem, goto err_alloc_pts; } - /* Add the eviction fence back */ - amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true); - return 0; err_alloc_pts: - amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true); amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va); list_del(&bo_va_entry->bo_list); err_vmadd: @@ -809,24 +726,11 @@ static int unmap_bo_from_gpuvm(struct amdgpu_device *adev, { struct amdgpu_bo_va *bo_va = entry->bo_va; struct amdgpu_vm *vm = bo_va->base.vm; - struct amdgpu_bo *pd = vm->root.base.bo; - /* Remove eviction fence from PD (and thereby from PTs too as - * they share the resv. object). Otherwise during PT update - * job (see amdgpu_vm_bo_update_mapping), eviction fence would - * get added to job->sync object and job execution would - * trigger the eviction fence. - */ - amdgpu_amdkfd_remove_eviction_fence(pd, - vm->process_info->eviction_fence, - NULL, NULL); amdgpu_vm_bo_unmap(adev, bo_va, entry->va); amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update); - /* Add the eviction fence back */ - amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true); - amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false); return 0; @@ -1002,7 +906,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, pr_err("validate_pt_pd_bos() failed\n"); goto validate_pd_fail; } - ret = ttm_bo_wait(&vm->root.base.bo->tbo, false, false); + amdgpu_bo_sync_wait(vm->root.base.bo, AMDGPU_FENCE_OWNER_KFD, false); if (ret) goto wait_pd_fail; amdgpu_bo_fence(vm->root.base.bo, @@ -1389,8 +1293,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( * attached */ amdgpu_amdkfd_remove_eviction_fence(mem->bo, - process_info->eviction_fence, - NULL, NULL); + process_info->eviction_fence); pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va, mem->va + bo_size * (1 + mem->aql_queue)); @@ -1617,8 +1520,7 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( if (mem->mapped_to_gpu_memory == 0 && !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count) amdgpu_amdkfd_remove_eviction_fence(mem->bo, - process_info->eviction_fence, - NULL, NULL); + process_info->eviction_fence); unreserve_out: unreserve_bo_and_vms(&ctx, false, false); @@ -1679,7 +1581,7 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd, } amdgpu_amdkfd_remove_eviction_fence( - bo, mem->process_info->eviction_fence, NULL, NULL); + bo, mem->process_info->eviction_fence); list_del_init(&mem->validate_list.head); if (size) @@ -1790,7 +1692,7 @@ int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, evicted_bos = atomic_inc_return(&process_info->evicted_bos); if (evicted_bos == 1) { /* First eviction, stop the queues */ - r = kgd2kfd->quiesce_mm(mm); + r = kgd2kfd_quiesce_mm(mm); if (r) pr_err("Failed to quiesce KFD\n"); schedule_delayed_work(&process_info->restore_userptr_work, @@ -1945,16 +1847,6 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) amdgpu_sync_create(&sync); - /* Avoid triggering eviction fences when unmapping invalid - * userptr BOs (waits for all fences, doesn't use - * FENCE_OWNER_VM) - */ - list_for_each_entry(peer_vm, &process_info->vm_list_head, - vm_list_node) - amdgpu_amdkfd_remove_eviction_fence(peer_vm->root.base.bo, - process_info->eviction_fence, - NULL, NULL); - ret = process_validate_vms(process_info); if (ret) goto unreserve_out; @@ -2015,10 +1907,6 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) ret = process_update_pds(process_info, &sync); unreserve_out: - list_for_each_entry(peer_vm, &process_info->vm_list_head, - vm_list_node) - amdgpu_bo_fence(peer_vm->root.base.bo, - &process_info->eviction_fence->base, true); ttm_eu_backoff_reservation(&ticket, &resv_list); amdgpu_sync_wait(&sync, false); amdgpu_sync_free(&sync); @@ -2082,7 +1970,7 @@ static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work) evicted_bos) goto unlock_out; evicted_bos = 0; - if (kgd2kfd->resume_mm(mm)) { + if (kgd2kfd_resume_mm(mm)) { pr_err("%s: Failed to resume KFD\n", __func__); /* No recovery from this failure. Probably the CP is * hanging. No point trying again. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 69ad6ec0a4f3..bf04c12bd324 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -25,8 +25,8 @@ */ #include #include -#include #include +#include #include #include "amdgpu.h" #include "atom.h" diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 1c49b8266d69..52a5e4fdc95b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -214,6 +214,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs case AMDGPU_CHUNK_ID_DEPENDENCIES: case AMDGPU_CHUNK_ID_SYNCOBJ_IN: case AMDGPU_CHUNK_ID_SYNCOBJ_OUT: + case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES: break; default: @@ -1090,6 +1091,15 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle); + + if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) { + struct drm_sched_fence *s_fence = to_drm_sched_fence(fence); + struct dma_fence *old = fence; + + fence = dma_fence_get(&s_fence->scheduled); + dma_fence_put(old); + } + if (IS_ERR(fence)) { r = PTR_ERR(fence); amdgpu_ctx_put(ctx); @@ -1177,7 +1187,8 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, chunk = &p->chunks[i]; - if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES) { + if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES || + chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) { r = amdgpu_cs_process_fence_dep(p, chunk); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index d85184b5b35c..7b526593eb77 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -124,6 +124,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS]; unsigned num_rings; + unsigned num_rqs = 0; switch (i) { case AMDGPU_HW_IP_GFX: @@ -166,12 +167,16 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, break; } - for (j = 0; j < num_rings; ++j) - rqs[j] = &rings[j]->sched.sched_rq[priority]; + for (j = 0; j < num_rings; ++j) { + if (!rings[j]->adev) + continue; + + rqs[num_rqs++] = &rings[j]->sched.sched_rq[priority]; + } for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) r = drm_sched_entity_init(&ctx->entities[i][j].entity, - rqs, num_rings, &ctx->guilty); + rqs, num_rqs, &ctx->guilty); if (r) goto error_cleanup_entities; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index dd9a4fb9ce39..4ae3ff9a1d4c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -158,9 +158,6 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, while (size) { uint32_t value; - if (*pos > adev->rmmio_size) - goto end; - if (read) { value = RREG32(*pos >> 2); r = put_user(value, (uint32_t *)buf); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 7ff3a28fc903..4f8fb4ecde34 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -30,8 +30,8 @@ #include #include #include -#include #include +#include #include #include #include @@ -1645,7 +1645,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) if (r) { DRM_ERROR("sw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].version->funcs->name, r); - return r; + goto init_failed; } adev->ip_blocks[i].status.sw = true; @@ -1654,17 +1654,17 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) r = amdgpu_device_vram_scratch_init(adev); if (r) { DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r); - return r; + goto init_failed; } r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); if (r) { DRM_ERROR("hw_init %d failed %d\n", i, r); - return r; + goto init_failed; } r = amdgpu_device_wb_init(adev); if (r) { DRM_ERROR("amdgpu_device_wb_init failed %d\n", r); - return r; + goto init_failed; } adev->ip_blocks[i].status.hw = true; @@ -1675,7 +1675,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) AMDGPU_CSA_SIZE); if (r) { DRM_ERROR("allocate CSA failed %d\n", r); - return r; + goto init_failed; } } } @@ -1683,30 +1683,32 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/ if (r) - return r; + goto init_failed; r = amdgpu_device_ip_hw_init_phase1(adev); if (r) - return r; + goto init_failed; r = amdgpu_device_fw_loading(adev); if (r) - return r; + goto init_failed; r = amdgpu_device_ip_hw_init_phase2(adev); if (r) - return r; + goto init_failed; if (adev->gmc.xgmi.num_physical_nodes > 1) amdgpu_xgmi_add_device(adev); amdgpu_amdkfd_device_init(adev); +init_failed: if (amdgpu_sriov_vf(adev)) { - amdgpu_virt_init_data_exchange(adev); + if (!r) + amdgpu_virt_init_data_exchange(adev); amdgpu_virt_release_full_gpu(adev, true); } - return 0; + return r; } /** @@ -2133,7 +2135,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) continue; r = block->version->funcs->hw_init(adev); - DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); + DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); if (r) return r; } @@ -2167,7 +2169,7 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev) continue; r = block->version->funcs->hw_init(adev); - DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); + DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); if (r) return r; } @@ -2548,6 +2550,17 @@ int amdgpu_device_init(struct amdgpu_device *adev, /* detect if we are with an SRIOV vbios */ amdgpu_device_detect_sriov_bios(adev); + /* check if we need to reset the asic + * E.g., driver was not cleanly unloaded previously, etc. + */ + if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) { + r = amdgpu_asic_reset(adev); + if (r) { + dev_err(adev->dev, "asic reset on init failed\n"); + goto failed; + } + } + /* Post card if necessary */ if (amdgpu_device_need_post(adev)) { if (!adev->bios) { @@ -2612,6 +2625,8 @@ fence_driver_init: } dev_err(adev->dev, "amdgpu_device_ip_init failed\n"); amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0); + if (amdgpu_virt_request_full_gpu(adev, false)) + amdgpu_virt_release_full_gpu(adev, false); goto failed; } @@ -2707,7 +2722,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev) amdgpu_irq_disable_all(adev); if (adev->mode_info.mode_config_initialized){ if (!amdgpu_device_has_dc_support(adev)) - drm_crtc_force_disable_all(adev->ddev); + drm_helper_force_disable_all(adev->ddev); else drm_atomic_helper_shutdown(adev->ddev); } @@ -3298,17 +3313,15 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, if (!ring || !ring->sched.thread) continue; - kthread_park(ring->sched.thread); - - if (job && job->base.sched != &ring->sched) - continue; - - drm_sched_hw_job_reset(&ring->sched, job ? &job->base : NULL); + drm_sched_stop(&ring->sched); /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ amdgpu_fence_driver_force_completion(ring); } + if(job) + drm_sched_increase_karma(&job->base); + if (!amdgpu_sriov_vf(adev)) { @@ -3454,14 +3467,10 @@ static void amdgpu_device_post_asic_reset(struct amdgpu_device *adev, if (!ring || !ring->sched.thread) continue; - /* only need recovery sched of the given job's ring - * or all rings (in the case @job is NULL) - * after above amdgpu_reset accomplished - */ - if ((!job || job->base.sched == &ring->sched) && !adev->asic_reset_res) - drm_sched_job_recovery(&ring->sched); + if (!adev->asic_reset_res) + drm_sched_resubmit_jobs(&ring->sched); - kthread_unpark(ring->sched.thread); + drm_sched_start(&ring->sched, !adev->asic_reset_res); } if (!amdgpu_device_has_dc_support(adev)) { @@ -3521,9 +3530,9 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, * by different nodes. No point also since the one node already executing * reset will also reset all the other nodes in the hive. */ - hive = amdgpu_get_xgmi_hive(adev); + hive = amdgpu_get_xgmi_hive(adev, 0); if (hive && adev->gmc.xgmi.num_physical_nodes > 1 && - !mutex_trylock(&hive->hive_lock)) + !mutex_trylock(&hive->reset_lock)) return 0; /* Start with adev pre asic reset first for soft reset check.*/ @@ -3602,13 +3611,45 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */ } if (hive && adev->gmc.xgmi.num_physical_nodes > 1) - mutex_unlock(&hive->hive_lock); + mutex_unlock(&hive->reset_lock); if (r) dev_info(adev->dev, "GPU reset end with ret = %d\n", r); return r; } +static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev, + enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + struct pci_dev *pdev = adev->pdev; + enum pci_bus_speed cur_speed; + enum pcie_link_width cur_width; + + *speed = PCI_SPEED_UNKNOWN; + *width = PCIE_LNK_WIDTH_UNKNOWN; + + while (pdev) { + cur_speed = pcie_get_speed_cap(pdev); + cur_width = pcie_get_width_cap(pdev); + + if (cur_speed != PCI_SPEED_UNKNOWN) { + if (*speed == PCI_SPEED_UNKNOWN) + *speed = cur_speed; + else if (cur_speed < *speed) + *speed = cur_speed; + } + + if (cur_width != PCIE_LNK_WIDTH_UNKNOWN) { + if (*width == PCIE_LNK_WIDTH_UNKNOWN) + *width = cur_width; + else if (cur_width < *width) + *width = cur_width; + } + pdev = pci_upstream_bridge(pdev); + } +} + /** * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot * @@ -3621,8 +3662,8 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) { struct pci_dev *pdev; - enum pci_bus_speed speed_cap; - enum pcie_link_width link_width; + enum pci_bus_speed speed_cap, platform_speed_cap; + enum pcie_link_width platform_link_width; if (amdgpu_pcie_gen_cap) adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap; @@ -3639,6 +3680,12 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) return; } + if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask) + return; + + amdgpu_device_get_min_pci_speed_width(adev, &platform_speed_cap, + &platform_link_width); + if (adev->pm.pcie_gen_mask == 0) { /* asic caps */ pdev = adev->pdev; @@ -3664,22 +3711,20 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1; } /* platform caps */ - pdev = adev->ddev->pdev->bus->self; - speed_cap = pcie_get_speed_cap(pdev); - if (speed_cap == PCI_SPEED_UNKNOWN) { + if (platform_speed_cap == PCI_SPEED_UNKNOWN) { adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2); } else { - if (speed_cap == PCIE_SPEED_16_0GT) + if (platform_speed_cap == PCIE_SPEED_16_0GT) adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 | CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4); - else if (speed_cap == PCIE_SPEED_8_0GT) + else if (platform_speed_cap == PCIE_SPEED_8_0GT) adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3); - else if (speed_cap == PCIE_SPEED_5_0GT) + else if (platform_speed_cap == PCIE_SPEED_5_0GT) adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2); else @@ -3688,12 +3733,10 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) } } if (adev->pm.pcie_mlw_mask == 0) { - pdev = adev->ddev->pdev->bus->self; - link_width = pcie_get_width_cap(pdev); - if (link_width == PCIE_LNK_WIDTH_UNKNOWN) { + if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) { adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK; } else { - switch (link_width) { + switch (platform_link_width) { case PCIE_LNK_X32: adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h index be620b29f4aa..68959b923f89 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h @@ -51,14 +51,7 @@ struct amdgpu_doorbell_index { uint32_t userqueue_start; uint32_t userqueue_end; uint32_t gfx_ring0; - uint32_t sdma_engine0; - uint32_t sdma_engine1; - uint32_t sdma_engine2; - uint32_t sdma_engine3; - uint32_t sdma_engine4; - uint32_t sdma_engine5; - uint32_t sdma_engine6; - uint32_t sdma_engine7; + uint32_t sdma_engine[8]; uint32_t ih; union { struct { @@ -78,7 +71,11 @@ struct amdgpu_doorbell_index { uint32_t vce_ring6_7; } uvd_vce; }; + uint32_t first_non_cp; + uint32_t last_non_cp; uint32_t max_assignment; + /* Per engine SDMA doorbell size in dword */ + uint32_t sdma_doorbell_range; }; typedef enum _AMDGPU_DOORBELL_ASSIGNMENT @@ -148,6 +145,10 @@ typedef enum _AMDGPU_VEGA20_DOORBELL_ASSIGNMENT AMDGPU_VEGA20_DOORBELL64_VCE_RING2_3 = 0x18D, AMDGPU_VEGA20_DOORBELL64_VCE_RING4_5 = 0x18E, AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7 = 0x18F, + + AMDGPU_VEGA20_DOORBELL64_FIRST_NON_CP = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE0, + AMDGPU_VEGA20_DOORBELL64_LAST_NON_CP = AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7, + AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT = 0x18F, AMDGPU_VEGA20_DOORBELL_INVALID = 0xFFFF } AMDGPU_VEGA20_DOORBELL_ASSIGNMENT; @@ -227,6 +228,9 @@ typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT AMDGPU_DOORBELL64_VCE_RING4_5 = 0xFE, AMDGPU_DOORBELL64_VCE_RING6_7 = 0xFF, + AMDGPU_DOORBELL64_FIRST_NON_CP = AMDGPU_DOORBELL64_sDMA_ENGINE0, + AMDGPU_DOORBELL64_LAST_NON_CP = AMDGPU_DOORBELL64_VCE_RING6_7, + AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF, AMDGPU_DOORBELL64_INVALID = 0xFFFF } AMDGPU_DOORBELL64_ASSIGNMENT; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c index 1c4595562f8f..344967df3137 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c @@ -184,61 +184,6 @@ u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev) return vrefresh; } -void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b, - u32 *p, u32 *u) -{ - u32 b_c = 0; - u32 i_c; - u32 tmp; - - i_c = (i * r_c) / 100; - tmp = i_c >> p_b; - - while (tmp) { - b_c++; - tmp >>= 1; - } - - *u = (b_c + 1) / 2; - *p = i_c / (1 << (2 * (*u))); -} - -int amdgpu_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th) -{ - u32 k, a, ah, al; - u32 t1; - - if ((fl == 0) || (fh == 0) || (fl > fh)) - return -EINVAL; - - k = (100 * fh) / fl; - t1 = (t * (k - 100)); - a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100)); - a = (a + 5) / 10; - ah = ((a * t) + 5000) / 10000; - al = a - ah; - - *th = t - ah; - *tl = t + al; - - return 0; -} - -bool amdgpu_is_uvd_state(u32 class, u32 class2) -{ - if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) - return true; - if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) - return true; - if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) - return true; - if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) - return true; - if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) - return true; - return false; -} - bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor) { switch (sensor) { @@ -949,39 +894,6 @@ enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev, return AMDGPU_PCIE_GEN1; } -u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev, - u16 asic_lanes, - u16 default_lanes) -{ - switch (asic_lanes) { - case 0: - default: - return default_lanes; - case 1: - return 1; - case 2: - return 2; - case 4: - return 4; - case 8: - return 8; - case 12: - return 12; - case 16: - return 16; - } -} - -u8 amdgpu_encode_pci_lane_width(u32 lanes) -{ - u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 }; - - if (lanes > 16) - return 0; - - return encoded_lanes[lanes]; -} - struct amd_vce_state* amdgpu_get_vce_clock_state(void *handle, u32 idx) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h index f972cd156795..e871e022c129 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h @@ -364,6 +364,14 @@ enum amdgpu_pcie_gen { ((adev)->powerplay.pp_funcs->enable_mgpu_fan_boost(\ (adev)->powerplay.pp_handle)) +#define amdgpu_dpm_get_ppfeature_status(adev, buf) \ + ((adev)->powerplay.pp_funcs->get_ppfeature_status(\ + (adev)->powerplay.pp_handle, (buf))) + +#define amdgpu_dpm_set_ppfeature_status(adev, ppfeatures) \ + ((adev)->powerplay.pp_funcs->set_ppfeature_status(\ + (adev)->powerplay.pp_handle, (ppfeatures))) + struct amdgpu_dpm { struct amdgpu_ps *ps; /* number of valid power states */ @@ -478,10 +486,6 @@ void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev, u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev); u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev); void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev); -bool amdgpu_is_uvd_state(u32 class, u32 class2); -void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b, - u32 *p, u32 *u); -int amdgpu_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th); bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor); @@ -497,11 +501,6 @@ enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev, enum amdgpu_pcie_gen asic_gen, enum amdgpu_pcie_gen default_gen); -u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev, - u16 asic_lanes, - u16 default_lanes); -u8 amdgpu_encode_pci_lane_width(u32 lanes); - struct amd_vce_state* amdgpu_get_vce_clock_state(void *handle, u32 idx); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index c806f984bcc5..7419ea8a388b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -32,7 +32,7 @@ #include #include #include -#include +#include #include "amdgpu.h" #include "amdgpu_irq.h" @@ -71,9 +71,12 @@ * - 3.25.0 - Add support for sensor query info (stable pstate sclk/mclk). * - 3.26.0 - GFX9: Process AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE. * - 3.27.0 - Add new chunk to to AMDGPU_CS to enable BO_LIST creation. + * - 3.28.0 - Add AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES + * - 3.29.0 - Add AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID + * - 3.30.0 - Add AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE. */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 27 +#define KMS_DRIVER_MINOR 30 #define KMS_DRIVER_PATCHLEVEL 0 int amdgpu_vram_limit = 0; @@ -1176,6 +1179,22 @@ static const struct file_operations amdgpu_driver_kms_fops = { #endif }; +int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv) +{ + struct drm_file *file; + + if (!filp) + return -EINVAL; + + if (filp->f_op != &amdgpu_driver_kms_fops) { + return -EINVAL; + } + + file = filp->private_data; + *fpriv = file->driver_priv; + return 0; +} + static bool amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe, bool in_vblank_irq, int *vpos, int *hpos, @@ -1189,7 +1208,7 @@ amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe, static struct drm_driver kms_driver = { .driver_features = DRIVER_USE_AGP | DRIVER_ATOMIC | - DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | + DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ, .load = amdgpu_driver_load_kms, .open = amdgpu_driver_open_kms, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h index ecbcefe49a98..f89f5734d985 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h @@ -37,6 +37,8 @@ struct amdgpu_gds { struct amdgpu_gds_asic_info mem; struct amdgpu_gds_asic_info gws; struct amdgpu_gds_asic_info oa; + uint32_t gds_compute_max_wave_id; + /* At present, GDS, GWS and OA resources for gfx (graphics) * is always pre-allocated and available for graphics operation. * Such resource is shared between all gfx clients. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index f4f00217546e..d21dd2f369da 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -54,10 +54,6 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, memset(&bp, 0, sizeof(bp)); *obj = NULL; - /* At least align on page size */ - if (alignment < PAGE_SIZE) { - alignment = PAGE_SIZE; - } bp.size = size; bp.byte_align = alignment; @@ -244,9 +240,6 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, return -EINVAL; } flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS; - /* GDS allocations must be DW aligned */ - if (args->in.domains & AMDGPU_GEM_DOMAIN_GDS) - size = ALIGN(size, 4); } if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index c48207b377bc..0b8ef2d27d6b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -202,12 +202,12 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, amdgpu_asic_flush_hdp(adev, ring); } + if (need_ctx_switch) + status |= AMDGPU_HAVE_CTX_SWITCH; + skip_preamble = ring->current_ctx == fence_ctx; if (job && ring->funcs->emit_cntxcntl) { - if (need_ctx_switch) - status |= AMDGPU_HAVE_CTX_SWITCH; status |= job->preamble_status; - amdgpu_ring_emit_cntxcntl(ring, status); } @@ -221,8 +221,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */ continue; - amdgpu_ring_emit_ib(ring, job, ib, need_ctx_switch); - need_ctx_switch = false; + amdgpu_ring_emit_ib(ring, job, ib, status); + status &= ~AMDGPU_HAVE_CTX_SWITCH; } if (ring->funcs->emit_tmz) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c index 8af67f649660..1c50be3ab8a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c @@ -52,6 +52,8 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, ih->use_bus_addr = use_bus_addr; if (use_bus_addr) { + dma_addr_t dma_addr; + if (ih->ring) return 0; @@ -59,21 +61,26 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, * add them to the end of the ring allocation. */ ih->ring = dma_alloc_coherent(adev->dev, ih->ring_size + 8, - &ih->rb_dma_addr, GFP_KERNEL); + &dma_addr, GFP_KERNEL); if (ih->ring == NULL) return -ENOMEM; memset((void *)ih->ring, 0, ih->ring_size + 8); - ih->wptr_offs = (ih->ring_size / 4) + 0; - ih->rptr_offs = (ih->ring_size / 4) + 1; + ih->gpu_addr = dma_addr; + ih->wptr_addr = dma_addr + ih->ring_size; + ih->wptr_cpu = &ih->ring[ih->ring_size / 4]; + ih->rptr_addr = dma_addr + ih->ring_size + 4; + ih->rptr_cpu = &ih->ring[(ih->ring_size / 4) + 1]; } else { - r = amdgpu_device_wb_get(adev, &ih->wptr_offs); + unsigned wptr_offs, rptr_offs; + + r = amdgpu_device_wb_get(adev, &wptr_offs); if (r) return r; - r = amdgpu_device_wb_get(adev, &ih->rptr_offs); + r = amdgpu_device_wb_get(adev, &rptr_offs); if (r) { - amdgpu_device_wb_free(adev, ih->wptr_offs); + amdgpu_device_wb_free(adev, wptr_offs); return r; } @@ -82,10 +89,15 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, &ih->ring_obj, &ih->gpu_addr, (void **)&ih->ring); if (r) { - amdgpu_device_wb_free(adev, ih->rptr_offs); - amdgpu_device_wb_free(adev, ih->wptr_offs); + amdgpu_device_wb_free(adev, rptr_offs); + amdgpu_device_wb_free(adev, wptr_offs); return r; } + + ih->wptr_addr = adev->wb.gpu_addr + wptr_offs * 4; + ih->wptr_cpu = &adev->wb.wb[wptr_offs]; + ih->rptr_addr = adev->wb.gpu_addr + rptr_offs * 4; + ih->rptr_cpu = &adev->wb.wb[rptr_offs]; } return 0; } @@ -109,13 +121,13 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) * add them to the end of the ring allocation. */ dma_free_coherent(adev->dev, ih->ring_size + 8, - (void *)ih->ring, ih->rb_dma_addr); + (void *)ih->ring, ih->gpu_addr); ih->ring = NULL; } else { amdgpu_bo_free_kernel(&ih->ring_obj, &ih->gpu_addr, (void **)&ih->ring); - amdgpu_device_wb_free(adev, ih->wptr_offs); - amdgpu_device_wb_free(adev, ih->rptr_offs); + amdgpu_device_wb_free(adev, (ih->wptr_addr - ih->gpu_addr) / 4); + amdgpu_device_wb_free(adev, (ih->rptr_addr - ih->gpu_addr) / 4); } } @@ -128,16 +140,14 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) * Interrupt hander (VI), walk the IH ring. * Returns irq process return code. */ -int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, - void (*callback)(struct amdgpu_device *adev, - struct amdgpu_ih_ring *ih)) +int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) { u32 wptr; if (!ih->enabled || adev->shutdown) return IRQ_NONE; - wptr = amdgpu_ih_get_wptr(adev); + wptr = amdgpu_ih_get_wptr(adev, ih); restart_ih: /* is somebody else already processing irqs? */ @@ -150,15 +160,15 @@ restart_ih: rmb(); while (ih->rptr != wptr) { - callback(adev, ih); + amdgpu_irq_dispatch(adev, ih); ih->rptr &= ih->ptr_mask; } - amdgpu_ih_set_rptr(adev); + amdgpu_ih_set_rptr(adev, ih); atomic_set(&ih->lock, 0); /* make sure wptr hasn't changed while processing */ - wptr = amdgpu_ih_get_wptr(adev); + wptr = amdgpu_ih_get_wptr(adev, ih); if (wptr != ih->rptr) goto restart_ih; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h index f877bb78d10a..113a1ba13d4a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h @@ -31,40 +31,44 @@ struct amdgpu_iv_entry; * R6xx+ IH ring */ struct amdgpu_ih_ring { - struct amdgpu_bo *ring_obj; - volatile uint32_t *ring; - unsigned rptr; unsigned ring_size; - uint64_t gpu_addr; uint32_t ptr_mask; - atomic_t lock; - bool enabled; - unsigned wptr_offs; - unsigned rptr_offs; u32 doorbell_index; bool use_doorbell; bool use_bus_addr; - dma_addr_t rb_dma_addr; /* only used when use_bus_addr = true */ + + struct amdgpu_bo *ring_obj; + volatile uint32_t *ring; + uint64_t gpu_addr; + + uint64_t wptr_addr; + volatile uint32_t *wptr_cpu; + + uint64_t rptr_addr; + volatile uint32_t *rptr_cpu; + + bool enabled; + unsigned rptr; + atomic_t lock; }; /* provided by the ih block */ struct amdgpu_ih_funcs { /* ring read/write ptr handling, called from interrupt context */ - u32 (*get_wptr)(struct amdgpu_device *adev); - void (*decode_iv)(struct amdgpu_device *adev, + u32 (*get_wptr)(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih); + void (*decode_iv)(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, struct amdgpu_iv_entry *entry); - void (*set_rptr)(struct amdgpu_device *adev); + void (*set_rptr)(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih); }; -#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev)) -#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) -#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) +#define amdgpu_ih_get_wptr(adev, ih) (adev)->irq.ih_funcs->get_wptr((adev), (ih)) +#define amdgpu_ih_decode_iv(adev, iv) \ + (adev)->irq.ih_funcs->decode_iv((adev), (ih), (iv)) +#define amdgpu_ih_set_rptr(adev, ih) (adev)->irq.ih_funcs->set_rptr((adev), (ih)) int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, unsigned ring_size, bool use_bus_addr); void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih); -int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, - void (*callback)(struct amdgpu_device *adev, - struct amdgpu_ih_ring *ih)); +int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index b7968f426862..af4c3b1af322 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -130,27 +130,6 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev) spin_unlock_irqrestore(&adev->irq.lock, irqflags); } -/** - * amdgpu_irq_callback - callback from the IH ring - * - * @adev: amdgpu device pointer - * @ih: amdgpu ih ring - * - * Callback from IH ring processing to handle the entry at the current position - * and advance the read pointer. - */ -static void amdgpu_irq_callback(struct amdgpu_device *adev, - struct amdgpu_ih_ring *ih) -{ - u32 ring_index = ih->rptr >> 2; - struct amdgpu_iv_entry entry; - - entry.iv_entry = (const uint32_t *)&ih->ring[ring_index]; - amdgpu_ih_decode_iv(adev, &entry); - - amdgpu_irq_dispatch(adev, &entry); -} - /** * amdgpu_irq_handler - IRQ handler * @@ -168,12 +147,42 @@ irqreturn_t amdgpu_irq_handler(int irq, void *arg) struct amdgpu_device *adev = dev->dev_private; irqreturn_t ret; - ret = amdgpu_ih_process(adev, &adev->irq.ih, amdgpu_irq_callback); + ret = amdgpu_ih_process(adev, &adev->irq.ih); if (ret == IRQ_HANDLED) pm_runtime_mark_last_busy(dev->dev); return ret; } +/** + * amdgpu_irq_handle_ih1 - kick of processing for IH1 + * + * @work: work structure in struct amdgpu_irq + * + * Kick of processing IH ring 1. + */ +static void amdgpu_irq_handle_ih1(struct work_struct *work) +{ + struct amdgpu_device *adev = container_of(work, struct amdgpu_device, + irq.ih1_work); + + amdgpu_ih_process(adev, &adev->irq.ih1); +} + +/** + * amdgpu_irq_handle_ih2 - kick of processing for IH2 + * + * @work: work structure in struct amdgpu_irq + * + * Kick of processing IH ring 2. + */ +static void amdgpu_irq_handle_ih2(struct work_struct *work) +{ + struct amdgpu_device *adev = container_of(work, struct amdgpu_device, + irq.ih2_work); + + amdgpu_ih_process(adev, &adev->irq.ih2); +} + /** * amdgpu_msi_ok - check whether MSI functionality is enabled * @@ -238,6 +247,9 @@ int amdgpu_irq_init(struct amdgpu_device *adev) amdgpu_hotplug_work_func); } + INIT_WORK(&adev->irq.ih1_work, amdgpu_irq_handle_ih1); + INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2); + adev->irq.installed = true; r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq); if (r) { @@ -359,15 +371,22 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev, * Dispatches IRQ to IP blocks. */ void amdgpu_irq_dispatch(struct amdgpu_device *adev, - struct amdgpu_iv_entry *entry) + struct amdgpu_ih_ring *ih) { - unsigned client_id = entry->client_id; - unsigned src_id = entry->src_id; + u32 ring_index = ih->rptr >> 2; + struct amdgpu_iv_entry entry; + unsigned client_id, src_id; struct amdgpu_irq_src *src; bool handled = false; int r; - trace_amdgpu_iv(entry); + entry.iv_entry = (const uint32_t *)&ih->ring[ring_index]; + amdgpu_ih_decode_iv(adev, &entry); + + trace_amdgpu_iv(ih - &adev->irq.ih, &entry); + + client_id = entry.client_id; + src_id = entry.src_id; if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) { DRM_DEBUG("Invalid client_id in IV: %d\n", client_id); @@ -383,7 +402,7 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev, client_id, src_id); } else if ((src = adev->irq.client[client_id].sources[src_id])) { - r = src->funcs->process(adev, src, entry); + r = src->funcs->process(adev, src, &entry); if (r < 0) DRM_ERROR("error processing interrupt (%d)\n", r); else if (r) @@ -395,7 +414,7 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev, /* Send it to amdkfd as well if it isn't already handled */ if (!handled) - amdgpu_amdkfd_interrupt(adev, entry->iv_entry); + amdgpu_amdkfd_interrupt(adev, entry.iv_entry); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h index f6ce171cb8aa..c718e94a55c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h @@ -87,9 +87,11 @@ struct amdgpu_irq { /* status, etc. */ bool msi_enabled; /* msi enabled */ - /* interrupt ring */ - struct amdgpu_ih_ring ih; - const struct amdgpu_ih_funcs *ih_funcs; + /* interrupt rings */ + struct amdgpu_ih_ring ih, ih1, ih2; + const struct amdgpu_ih_funcs *ih_funcs; + struct work_struct ih1_work, ih2_work; + struct amdgpu_irq_src self_irq; /* gen irq stuff */ struct irq_domain *domain; /* GPU irq controller domain */ @@ -106,7 +108,7 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned client_id, unsigned src_id, struct amdgpu_irq_src *source); void amdgpu_irq_dispatch(struct amdgpu_device *adev, - struct amdgpu_iv_entry *entry); + struct amdgpu_ih_ring *ih); int amdgpu_irq_update(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type); int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index bc62bf41b7e9..e860412043bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -207,11 +207,12 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) if (!r) { acpi_status = amdgpu_acpi_init(adev); if (acpi_status) - dev_dbg(&dev->pdev->dev, + dev_dbg(&dev->pdev->dev, "Error during ACPI methods call\n"); } if (amdgpu_device_is_px(dev)) { + dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP); pm_runtime_use_autosuspend(dev->dev); pm_runtime_set_autosuspend_delay(dev->dev, 5000); pm_runtime_set_active(dev->dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index aadd0fa42e43..889e443eeee7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -38,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -405,6 +406,7 @@ struct amdgpu_crtc { struct amdgpu_flip_work *pflip_works; enum amdgpu_flip_status pflip_status; int deferred_flip_completion; + u64 last_flip_vblank; /* pll sharing */ struct amdgpu_atom_ss ss; bool ss_enabled; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 728e15e5d68a..ec9e45004bff 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -426,12 +426,20 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, size_t acc_size; int r; - page_align = roundup(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT; - if (bp->domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | - AMDGPU_GEM_DOMAIN_OA)) + /* Note that GDS/GWS/OA allocates 1 page per byte/resource. */ + if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { + /* GWS and OA don't need any alignment. */ + page_align = bp->byte_align; size <<= PAGE_SHIFT; - else + } else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) { + /* Both size and alignment must be a multiple of 4. */ + page_align = ALIGN(bp->byte_align, 4); + size = ALIGN(size, 4) << PAGE_SHIFT; + } else { + /* Memory should be aligned at least to a page size. */ + page_align = ALIGN(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT; size = ALIGN(size, PAGE_SIZE); + } if (!amdgpu_bo_validate_size(adev, size, bp->domain)) return -ENOMEM; @@ -1276,6 +1284,30 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, reservation_object_add_excl_fence(resv, fence); } +/** + * amdgpu_sync_wait_resv - Wait for BO reservation fences + * + * @bo: buffer object + * @owner: fence owner + * @intr: Whether the wait is interruptible + * + * Returns: + * 0 on success, errno otherwise. + */ +int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr) +{ + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + struct amdgpu_sync sync; + int r; + + amdgpu_sync_create(&sync); + amdgpu_sync_resv(adev, &sync, bo->tbo.resv, owner, false); + r = amdgpu_sync_wait(&sync, intr); + amdgpu_sync_free(&sync); + + return r; +} + /** * amdgpu_bo_gpu_offset - return GPU offset of bo * @bo: amdgpu object for which we query the offset diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 9291c2f837e9..220a6a7b1bc1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -266,6 +266,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, bool shared); +int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr); u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); int amdgpu_bo_validate(struct amdgpu_bo *bo); int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 6896dec97fc7..a7adb7b6bd98 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -626,11 +626,71 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, } /** - * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_pcie + * DOC: ppfeatures + * + * The amdgpu driver provides a sysfs API for adjusting what powerplay + * features to be enabled. The file ppfeatures is used for this. And + * this is only available for Vega10 and later dGPUs. + * + * Reading back the file will show you the followings: + * - Current ppfeature masks + * - List of the all supported powerplay features with their naming, + * bitmasks and enablement status('Y'/'N' means "enabled"/"disabled"). + * + * To manually enable or disable a specific feature, just set or clear + * the corresponding bit from original ppfeature masks and input the + * new ppfeature masks. + */ +static ssize_t amdgpu_set_ppfeature_status(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + uint64_t featuremask; + int ret; + + ret = kstrtou64(buf, 0, &featuremask); + if (ret) + return -EINVAL; + + pr_debug("featuremask = 0x%llx\n", featuremask); + + if (adev->powerplay.pp_funcs->set_ppfeature_status) { + ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask); + if (ret) + return -EINVAL; + } + + return count; +} + +static ssize_t amdgpu_get_ppfeature_status(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + + if (adev->powerplay.pp_funcs->get_ppfeature_status) + return amdgpu_dpm_get_ppfeature_status(adev, buf); + + return snprintf(buf, PAGE_SIZE, "\n"); +} + +/** + * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk + * pp_dpm_pcie * * The amdgpu driver provides a sysfs API for adjusting what power levels * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk, - * and pp_dpm_pcie are used for this. + * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for + * this. + * + * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for + * Vega10 and later ASICs. + * pp_dpm_fclk interface is only available for Vega20 and later ASICs. * * Reading back the files will show you the available power levels within * the power state and the clock information for those levels. @@ -640,6 +700,8 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, * Secondly,Enter a new value for each level by inputing a string that * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie" * E.g., echo 4 5 6 to > pp_dpm_sclk will enable sclk levels 4, 5, and 6. + * + * NOTE: change to the dcefclk max dpm level is not supported now */ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, @@ -750,6 +812,114 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, return count; } +static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + + if (adev->powerplay.pp_funcs->print_clock_levels) + return amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf); + else + return snprintf(buf, PAGE_SIZE, "\n"); +} + +static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + int ret; + uint32_t mask = 0; + + ret = amdgpu_read_mask(buf, count, &mask); + if (ret) + return ret; + + if (adev->powerplay.pp_funcs->force_clock_level) + ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask); + + if (ret) + return -EINVAL; + + return count; +} + +static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + + if (adev->powerplay.pp_funcs->print_clock_levels) + return amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf); + else + return snprintf(buf, PAGE_SIZE, "\n"); +} + +static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + int ret; + uint32_t mask = 0; + + ret = amdgpu_read_mask(buf, count, &mask); + if (ret) + return ret; + + if (adev->powerplay.pp_funcs->force_clock_level) + ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask); + + if (ret) + return -EINVAL; + + return count; +} + +static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + + if (adev->powerplay.pp_funcs->print_clock_levels) + return amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf); + else + return snprintf(buf, PAGE_SIZE, "\n"); +} + +static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + int ret; + uint32_t mask = 0; + + ret = amdgpu_read_mask(buf, count, &mask); + if (ret) + return ret; + + if (adev->powerplay.pp_funcs->force_clock_level) + ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask); + + if (ret) + return -EINVAL; + + return count; +} + static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev, struct device_attribute *attr, char *buf) @@ -990,6 +1160,31 @@ static ssize_t amdgpu_get_busy_percent(struct device *dev, return snprintf(buf, PAGE_SIZE, "%d\n", value); } +/** + * DOC: pcie_bw + * + * The amdgpu driver provides a sysfs API for estimating how much data + * has been received and sent by the GPU in the last second through PCIe. + * The file pcie_bw is used for this. + * The Perf counters count the number of received and sent messages and return + * those values, as well as the maximum payload size of a PCIe packet (mps). + * Note that it is not possible to easily and quickly obtain the size of each + * packet transmitted, so we output the max payload size (mps) to allow for + * quick estimation of the PCIe bandwidth usage + */ +static ssize_t amdgpu_get_pcie_bw(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + uint64_t count0, count1; + + amdgpu_asic_get_pcie_usage(adev, &count0, &count1); + return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n", + count0, count1, pcie_get_mps(adev->pdev)); +} + static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state); static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR, amdgpu_get_dpm_forced_performance_level, @@ -1008,6 +1203,15 @@ static DEVICE_ATTR(pp_dpm_sclk, S_IRUGO | S_IWUSR, static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR, amdgpu_get_pp_dpm_mclk, amdgpu_set_pp_dpm_mclk); +static DEVICE_ATTR(pp_dpm_socclk, S_IRUGO | S_IWUSR, + amdgpu_get_pp_dpm_socclk, + amdgpu_set_pp_dpm_socclk); +static DEVICE_ATTR(pp_dpm_fclk, S_IRUGO | S_IWUSR, + amdgpu_get_pp_dpm_fclk, + amdgpu_set_pp_dpm_fclk); +static DEVICE_ATTR(pp_dpm_dcefclk, S_IRUGO | S_IWUSR, + amdgpu_get_pp_dpm_dcefclk, + amdgpu_set_pp_dpm_dcefclk); static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR, amdgpu_get_pp_dpm_pcie, amdgpu_set_pp_dpm_pcie); @@ -1025,6 +1229,10 @@ static DEVICE_ATTR(pp_od_clk_voltage, S_IRUGO | S_IWUSR, amdgpu_set_pp_od_clk_voltage); static DEVICE_ATTR(gpu_busy_percent, S_IRUGO, amdgpu_get_busy_percent, NULL); +static DEVICE_ATTR(pcie_bw, S_IRUGO, amdgpu_get_pcie_bw, NULL); +static DEVICE_ATTR(ppfeatures, S_IRUGO | S_IWUSR, + amdgpu_get_ppfeature_status, + amdgpu_set_ppfeature_status); static ssize_t amdgpu_hwmon_show_temp(struct device *dev, struct device_attribute *attr, @@ -1516,6 +1724,75 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, return count; } +static ssize_t amdgpu_hwmon_show_sclk(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct amdgpu_device *adev = dev_get_drvdata(dev); + struct drm_device *ddev = adev->ddev; + uint32_t sclk; + int r, size = sizeof(sclk); + + /* Can't get voltage when the card is off */ + if ((adev->flags & AMD_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return -EINVAL; + + /* sanity check PP is enabled */ + if (!(adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->read_sensor)) + return -EINVAL; + + /* get the sclk */ + r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, + (void *)&sclk, &size); + if (r) + return r; + + return snprintf(buf, PAGE_SIZE, "%d\n", sclk * 10 * 1000); +} + +static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "sclk\n"); +} + +static ssize_t amdgpu_hwmon_show_mclk(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct amdgpu_device *adev = dev_get_drvdata(dev); + struct drm_device *ddev = adev->ddev; + uint32_t mclk; + int r, size = sizeof(mclk); + + /* Can't get voltage when the card is off */ + if ((adev->flags & AMD_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return -EINVAL; + + /* sanity check PP is enabled */ + if (!(adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->read_sensor)) + return -EINVAL; + + /* get the sclk */ + r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, + (void *)&mclk, &size); + if (r) + return r; + + return snprintf(buf, PAGE_SIZE, "%d\n", mclk * 10 * 1000); +} + +static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "mclk\n"); +} /** * DOC: hwmon @@ -1532,6 +1809,10 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, * * - GPU fan * + * - GPU gfx/compute engine clock + * + * - GPU memory clock (dGPU only) + * * hwmon interfaces for GPU temperature: * * - temp1_input: the on die GPU temperature in millidegrees Celsius @@ -1576,6 +1857,12 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, * * - fan[1-*]_enable: Enable or disable the sensors.1: Enable 0: Disable * + * hwmon interfaces for GPU clocks: + * + * - freq1_input: the gfx/compute clock in hertz + * + * - freq2_input: the memory clock in hertz + * * You can use hwmon tools like sensors to view this information on your system. * */ @@ -1600,6 +1887,10 @@ static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0); static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0); static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0); +static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0); +static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0); +static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0); +static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0); static struct attribute *hwmon_attributes[] = { &sensor_dev_attr_temp1_input.dev_attr.attr, @@ -1622,6 +1913,10 @@ static struct attribute *hwmon_attributes[] = { &sensor_dev_attr_power1_cap_max.dev_attr.attr, &sensor_dev_attr_power1_cap_min.dev_attr.attr, &sensor_dev_attr_power1_cap.dev_attr.attr, + &sensor_dev_attr_freq1_input.dev_attr.attr, + &sensor_dev_attr_freq1_label.dev_attr.attr, + &sensor_dev_attr_freq2_input.dev_attr.attr, + &sensor_dev_attr_freq2_label.dev_attr.attr, NULL }; @@ -1686,7 +1981,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, effective_mode &= ~S_IWUSR; if ((adev->flags & AMD_IS_APU) && - (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || + (attr == &sensor_dev_attr_power1_average.dev_attr.attr || + attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr|| attr == &sensor_dev_attr_power1_cap.dev_attr.attr)) return 0; @@ -1712,6 +2008,12 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, attr == &sensor_dev_attr_in1_label.dev_attr.attr)) return 0; + /* no mclk on APUs */ + if ((adev->flags & AMD_IS_APU) && + (attr == &sensor_dev_attr_freq2_input.dev_attr.attr || + attr == &sensor_dev_attr_freq2_label.dev_attr.attr)) + return 0; + return effective_mode; } @@ -2070,6 +2372,25 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) DRM_ERROR("failed to create device file pp_dpm_mclk\n"); return ret; } + if (adev->asic_type >= CHIP_VEGA10) { + ret = device_create_file(adev->dev, &dev_attr_pp_dpm_socclk); + if (ret) { + DRM_ERROR("failed to create device file pp_dpm_socclk\n"); + return ret; + } + ret = device_create_file(adev->dev, &dev_attr_pp_dpm_dcefclk); + if (ret) { + DRM_ERROR("failed to create device file pp_dpm_dcefclk\n"); + return ret; + } + } + if (adev->asic_type >= CHIP_VEGA20) { + ret = device_create_file(adev->dev, &dev_attr_pp_dpm_fclk); + if (ret) { + DRM_ERROR("failed to create device file pp_dpm_fclk\n"); + return ret; + } + } ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie); if (ret) { DRM_ERROR("failed to create device file pp_dpm_pcie\n"); @@ -2108,12 +2429,31 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) "gpu_busy_level\n"); return ret; } + /* PCIe Perf counters won't work on APU nodes */ + if (!(adev->flags & AMD_IS_APU)) { + ret = device_create_file(adev->dev, &dev_attr_pcie_bw); + if (ret) { + DRM_ERROR("failed to create device file pcie_bw\n"); + return ret; + } + } ret = amdgpu_debugfs_pm_init(adev); if (ret) { DRM_ERROR("Failed to register debugfs file for dpm!\n"); return ret; } + if ((adev->asic_type >= CHIP_VEGA10) && + !(adev->flags & AMD_IS_APU)) { + ret = device_create_file(adev->dev, + &dev_attr_ppfeatures); + if (ret) { + DRM_ERROR("failed to create device file " + "ppfeatures\n"); + return ret; + } + } + adev->pm.sysfs_initialized = true; return 0; @@ -2138,7 +2478,13 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk); device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk); + if (adev->asic_type >= CHIP_VEGA10) { + device_remove_file(adev->dev, &dev_attr_pp_dpm_socclk); + device_remove_file(adev->dev, &dev_attr_pp_dpm_dcefclk); + } device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie); + if (adev->asic_type >= CHIP_VEGA20) + device_remove_file(adev->dev, &dev_attr_pp_dpm_fclk); device_remove_file(adev->dev, &dev_attr_pp_sclk_od); device_remove_file(adev->dev, &dev_attr_pp_mclk_od); device_remove_file(adev->dev, @@ -2147,6 +2493,11 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) device_remove_file(adev->dev, &dev_attr_pp_od_clk_voltage); device_remove_file(adev->dev, &dev_attr_gpu_busy_percent); + if (!(adev->flags & AMD_IS_APU)) + device_remove_file(adev->dev, &dev_attr_pcie_bw); + if ((adev->asic_type >= CHIP_VEGA10) && + !(adev->flags & AMD_IS_APU)) + device_remove_file(adev->dev, &dev_attr_ppfeatures); } void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c index 71913a18d142..a38e0fb4a6fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c @@ -38,6 +38,7 @@ #include "amdgpu_gem.h" #include #include +#include /** * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table @@ -187,6 +188,48 @@ error: return ERR_PTR(ret); } +static int +__reservation_object_make_exclusive(struct reservation_object *obj) +{ + struct dma_fence **fences; + unsigned int count; + int r; + + if (!reservation_object_get_list(obj)) /* no shared fences to convert */ + return 0; + + r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences); + if (r) + return r; + + if (count == 0) { + /* Now that was unexpected. */ + } else if (count == 1) { + reservation_object_add_excl_fence(obj, fences[0]); + dma_fence_put(fences[0]); + kfree(fences); + } else { + struct dma_fence_array *array; + + array = dma_fence_array_create(count, fences, + dma_fence_context_alloc(1), 0, + false); + if (!array) + goto err_fences_put; + + reservation_object_add_excl_fence(obj, &array->base); + dma_fence_put(&array->base); + } + + return 0; + +err_fences_put: + while (count--) + dma_fence_put(fences[count]); + kfree(fences); + return -ENOMEM; +} + /** * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation * @dma_buf: Shared DMA buffer @@ -218,16 +261,16 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf, if (attach->dev->driver != adev->dev->driver) { /* - * Wait for all shared fences to complete before we switch to future - * use of exclusive fence on this prime shared bo. + * We only create shared fences for internal use, but importers + * of the dmabuf rely on exclusive fences for implicitly + * tracking write hazards. As any of the current fences may + * correspond to a write, we need to convert all existing + * fences on the reservation object into a single exclusive + * fence. */ - r = reservation_object_wait_timeout_rcu(bo->tbo.resv, - true, false, - MAX_SCHEDULE_TIMEOUT); - if (unlikely(r < 0)) { - DRM_DEBUG_PRIME("Fence wait failed: %li\n", r); + r = __reservation_object_make_exclusive(bo->tbo.resv); + if (r) goto error_unreserve; - } } /* pin buffer into GTT */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 8fab0d637ee5..3091488cd8cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -67,9 +67,6 @@ static int psp_sw_init(void *handle) psp->adev = adev; - if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) - return 0; - ret = psp_init_microcode(psp); if (ret) { DRM_ERROR("Failed to load psp firmware!\n"); @@ -83,15 +80,14 @@ static int psp_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) - return 0; - release_firmware(adev->psp.sos_fw); adev->psp.sos_fw = NULL; release_firmware(adev->psp.asd_fw); adev->psp.asd_fw = NULL; - release_firmware(adev->psp.ta_fw); - adev->psp.ta_fw = NULL; + if (adev->psp.ta_fw) { + release_firmware(adev->psp.ta_fw); + adev->psp.ta_fw = NULL; + } return 0; } @@ -140,13 +136,24 @@ psp_cmd_submit_buf(struct psp_context *psp, while (*((unsigned int *)psp->fence_buf) != index) msleep(1); - /* the status field must be 0 after FW is loaded */ - if (ucode && psp->cmd_buf_mem->resp.status) { - DRM_ERROR("failed loading with status (%d) and ucode id (%d)\n", - psp->cmd_buf_mem->resp.status, ucode->ucode_id); - return -EINVAL; + /* In some cases, psp response status is not 0 even there is no + * problem while the command is submitted. Some version of PSP FW + * doesn't write 0 to that field. + * So here we would like to only print a warning instead of an error + * during psp initialization to avoid breaking hw_init and it doesn't + * return -EINVAL. + */ + if (psp->cmd_buf_mem->resp.status) { + if (ucode) + DRM_WARN("failed to load ucode id (%d) ", + ucode->ucode_id); + DRM_WARN("psp command failed and response status is (%d)\n", + psp->cmd_buf_mem->resp.status); } + /* get xGMI session id from response buffer */ + cmd->resp.session_id = psp->cmd_buf_mem->resp.session_id; + if (ucode) { ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo; ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi; @@ -435,6 +442,9 @@ static int psp_xgmi_initialize(struct psp_context *psp) struct ta_xgmi_shared_memory *xgmi_cmd; int ret; + if (!psp->adev->psp.ta_fw) + return -ENOENT; + if (!psp->xgmi_context.initialized) { ret = psp_xgmi_init_shared_buf(psp); if (ret) @@ -495,6 +505,98 @@ static int psp_hw_start(struct psp_context *psp) return 0; } +static int psp_get_fw_type(struct amdgpu_firmware_info *ucode, + enum psp_gfx_fw_type *type) +{ + switch (ucode->ucode_id) { + case AMDGPU_UCODE_ID_SDMA0: + *type = GFX_FW_TYPE_SDMA0; + break; + case AMDGPU_UCODE_ID_SDMA1: + *type = GFX_FW_TYPE_SDMA1; + break; + case AMDGPU_UCODE_ID_CP_CE: + *type = GFX_FW_TYPE_CP_CE; + break; + case AMDGPU_UCODE_ID_CP_PFP: + *type = GFX_FW_TYPE_CP_PFP; + break; + case AMDGPU_UCODE_ID_CP_ME: + *type = GFX_FW_TYPE_CP_ME; + break; + case AMDGPU_UCODE_ID_CP_MEC1: + *type = GFX_FW_TYPE_CP_MEC; + break; + case AMDGPU_UCODE_ID_CP_MEC1_JT: + *type = GFX_FW_TYPE_CP_MEC_ME1; + break; + case AMDGPU_UCODE_ID_CP_MEC2: + *type = GFX_FW_TYPE_CP_MEC; + break; + case AMDGPU_UCODE_ID_CP_MEC2_JT: + *type = GFX_FW_TYPE_CP_MEC_ME2; + break; + case AMDGPU_UCODE_ID_RLC_G: + *type = GFX_FW_TYPE_RLC_G; + break; + case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL: + *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL; + break; + case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM: + *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM; + break; + case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM: + *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM; + break; + case AMDGPU_UCODE_ID_SMC: + *type = GFX_FW_TYPE_SMU; + break; + case AMDGPU_UCODE_ID_UVD: + *type = GFX_FW_TYPE_UVD; + break; + case AMDGPU_UCODE_ID_UVD1: + *type = GFX_FW_TYPE_UVD1; + break; + case AMDGPU_UCODE_ID_VCE: + *type = GFX_FW_TYPE_VCE; + break; + case AMDGPU_UCODE_ID_VCN: + *type = GFX_FW_TYPE_VCN; + break; + case AMDGPU_UCODE_ID_DMCU_ERAM: + *type = GFX_FW_TYPE_DMCU_ERAM; + break; + case AMDGPU_UCODE_ID_DMCU_INTV: + *type = GFX_FW_TYPE_DMCU_ISR; + break; + case AMDGPU_UCODE_ID_MAXIMUM: + default: + return -EINVAL; + } + + return 0; +} + +static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode, + struct psp_gfx_cmd_resp *cmd) +{ + int ret; + uint64_t fw_mem_mc_addr = ucode->mc_addr; + + memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp)); + + cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; + cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr); + cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr); + cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size; + + ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type); + if (ret) + DRM_ERROR("Unknown firmware type\n"); + + return ret; +} + static int psp_np_fw_load(struct psp_context *psp) { int i, ret; @@ -516,7 +618,7 @@ static int psp_np_fw_load(struct psp_context *psp) /*skip ucode loading in SRIOV VF */ continue; - ret = psp_prep_cmd_buf(ucode, psp->cmd); + ret = psp_prep_load_ip_fw_cmd_buf(ucode, psp->cmd); if (ret) return ret; @@ -541,7 +643,7 @@ static int psp_load_fw(struct amdgpu_device *adev) struct psp_context *psp = &adev->psp; if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) { - psp_ring_destroy(psp, PSP_RING_TYPE__KM); + psp_ring_stop(psp, PSP_RING_TYPE__KM); /* should not destroy ring, only stop */ goto skip_memalloc; } @@ -618,10 +720,6 @@ static int psp_hw_init(void *handle) int ret; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) - return 0; - mutex_lock(&adev->firmware.mutex); /* * This sequence is just used on hw_init only once, no need on @@ -651,9 +749,6 @@ static int psp_hw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct psp_context *psp = &adev->psp; - if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) - return 0; - if (adev->gmc.xgmi.num_physical_nodes > 1 && psp->xgmi_context.initialized == 1) psp_xgmi_terminate(psp); @@ -682,9 +777,6 @@ static int psp_suspend(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct psp_context *psp = &adev->psp; - if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) - return 0; - if (adev->gmc.xgmi.num_physical_nodes > 1 && psp->xgmi_context.initialized == 1) { ret = psp_xgmi_terminate(psp); @@ -709,9 +801,6 @@ static int psp_resume(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct psp_context *psp = &adev->psp; - if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) - return 0; - DRM_INFO("PSP is resuming...\n"); mutex_lock(&adev->firmware.mutex); @@ -747,11 +836,6 @@ static bool psp_check_fw_loading_status(struct amdgpu_device *adev, { struct amdgpu_firmware_info *ucode = NULL; - if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { - DRM_INFO("firmware is not loaded by PSP\n"); - return true; - } - if (!adev->firmware.fw_size) return false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 3ee573b4016e..2ef98cc755d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -65,8 +65,6 @@ struct psp_funcs int (*init_microcode)(struct psp_context *psp); int (*bootloader_load_sysdrv)(struct psp_context *psp); int (*bootloader_load_sos)(struct psp_context *psp); - int (*prep_cmd_buf)(struct amdgpu_firmware_info *ucode, - struct psp_gfx_cmd_resp *cmd); int (*ring_init)(struct psp_context *psp, enum psp_ring_type ring_type); int (*ring_create)(struct psp_context *psp, enum psp_ring_type ring_type); @@ -176,7 +174,6 @@ struct psp_xgmi_topology_info { struct psp_xgmi_node_info nodes[AMDGPU_XGMI_MAX_CONNECTED_NODES]; }; -#define psp_prep_cmd_buf(ucode, type) (psp)->funcs->prep_cmd_buf((ucode), (type)) #define psp_ring_init(psp, type) (psp)->funcs->ring_init((psp), (type)) #define psp_ring_create(psp, type) (psp)->funcs->ring_create((psp), (type)) #define psp_ring_stop(psp, type) (psp)->funcs->ring_stop((psp), (type)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index d87e828a084b..d7fae2676269 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -131,7 +131,7 @@ struct amdgpu_ring_funcs { void (*emit_ib)(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, - bool ctx_switch); + uint32_t flags); void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr, uint64_t seq, unsigned flags); void (*emit_pipeline_sync)(struct amdgpu_ring *ring); @@ -229,7 +229,7 @@ struct amdgpu_ring { #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r)) #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r)) #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r)) -#define amdgpu_ring_emit_ib(r, job, ib, c) ((r)->funcs->emit_ib((r), (job), (ib), (c))) +#define amdgpu_ring_emit_ib(r, job, ib, flags) ((r)->funcs->emit_ib((r), (job), (ib), (flags))) #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r)) #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr)) #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index 12f2bf97611f..bfaf5c6323be 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c @@ -388,7 +388,7 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, soffset, eoffset, eoffset - soffset); if (i->fence) - seq_printf(m, " protected by 0x%08x on context %llu", + seq_printf(m, " protected by 0x%016llx on context %llu", i->fence->seqno, i->fence->context); seq_printf(m, "\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c index 1cafe8d83a4d..0767a93e4d91 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c @@ -54,16 +54,20 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev, enum drm_sched_priority priority) { struct file *filp = fget(fd); - struct drm_file *file; struct amdgpu_fpriv *fpriv; struct amdgpu_ctx *ctx; uint32_t id; + int r; if (!filp) return -EINVAL; - file = filp->private_data; - fpriv = file->driver_priv; + r = amdgpu_file_to_fpriv(filp, &fpriv); + if (r) { + fput(filp); + return r; + } + idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id) amdgpu_ctx_priority_override(ctx, priority); @@ -72,6 +76,39 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev, return 0; } +static int amdgpu_sched_context_priority_override(struct amdgpu_device *adev, + int fd, + unsigned ctx_id, + enum drm_sched_priority priority) +{ + struct file *filp = fget(fd); + struct amdgpu_fpriv *fpriv; + struct amdgpu_ctx *ctx; + int r; + + if (!filp) + return -EINVAL; + + r = amdgpu_file_to_fpriv(filp, &fpriv); + if (r) { + fput(filp); + return r; + } + + ctx = amdgpu_ctx_get(fpriv, ctx_id); + + if (!ctx) { + fput(filp); + return -EINVAL; + } + + amdgpu_ctx_priority_override(ctx, priority); + amdgpu_ctx_put(ctx); + fput(filp); + + return 0; +} + int amdgpu_sched_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { @@ -81,7 +118,7 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data, int r; priority = amdgpu_to_sched_priority(args->in.priority); - if (args->in.flags || priority == DRM_SCHED_PRIORITY_INVALID) + if (priority == DRM_SCHED_PRIORITY_INVALID) return -EINVAL; switch (args->in.op) { @@ -90,6 +127,12 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data, args->in.fd, priority); break; + case AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE: + r = amdgpu_sched_context_priority_override(adev, + args->in.fd, + args->in.ctx_id, + priority); + break; default: DRM_ERROR("Invalid sched op specified: %d\n", args->in.op); r = -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 626abca770a0..d3ca2424b5fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -76,9 +76,10 @@ TRACE_EVENT(amdgpu_mm_wreg, ); TRACE_EVENT(amdgpu_iv, - TP_PROTO(struct amdgpu_iv_entry *iv), - TP_ARGS(iv), + TP_PROTO(unsigned ih, struct amdgpu_iv_entry *iv), + TP_ARGS(ih, iv), TP_STRUCT__entry( + __field(unsigned, ih) __field(unsigned, client_id) __field(unsigned, src_id) __field(unsigned, ring_id) @@ -90,6 +91,7 @@ TRACE_EVENT(amdgpu_iv, __array(unsigned, src_data, 4) ), TP_fast_assign( + __entry->ih = ih; __entry->client_id = iv->client_id; __entry->src_id = iv->src_id; __entry->ring_id = iv->ring_id; @@ -103,8 +105,9 @@ TRACE_EVENT(amdgpu_iv, __entry->src_data[2] = iv->src_data[2]; __entry->src_data[3] = iv->src_data[3]; ), - TP_printk("client_id:%u src_id:%u ring:%u vmid:%u timestamp: %llu pasid:%u src_data: %08x %08x %08x %08x", - __entry->client_id, __entry->src_id, + TP_printk("ih:%u client_id:%u src_id:%u ring:%u vmid:%u " + "timestamp: %llu pasid:%u src_data: %08x %08x %08x %08x", + __entry->ih, __entry->client_id, __entry->src_id, __entry->ring_id, __entry->vmid, __entry->timestamp, __entry->pasid, __entry->src_data[0], __entry->src_data[1], diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index c91ec3101d00..73e71e61dc99 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1546,7 +1546,8 @@ static struct ttm_bo_driver amdgpu_bo_driver = { .io_mem_reserve = &amdgpu_ttm_io_mem_reserve, .io_mem_free = &amdgpu_ttm_io_mem_free, .io_mem_pfn = amdgpu_ttm_io_mem_pfn, - .access_memory = &amdgpu_ttm_access_memory + .access_memory = &amdgpu_ttm_access_memory, + .del_from_lru_notify = &amdgpu_vm_del_from_lru_notify }; /* @@ -1755,7 +1756,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) } r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size, - PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS, + 4, AMDGPU_GEM_DOMAIN_GDS, &adev->gds.gds_gfx_bo, NULL, NULL); if (r) return r; @@ -1768,7 +1769,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) } r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size, - PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS, + 1, AMDGPU_GEM_DOMAIN_GWS, &adev->gds.gws_gfx_bo, NULL, NULL); if (r) return r; @@ -1781,7 +1782,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) } r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size, - PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA, + 1, AMDGPU_GEM_DOMAIN_OA, &adev->gds.oa_gfx_bo, NULL, NULL); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 98a1b2ce2b9d..c021b114c8a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -1035,7 +1035,7 @@ out: void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, - bool ctx_switch) + uint32_t flags) { amdgpu_ring_write(ring, VCE_CMD_IB); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h index 50293652af14..30ea54dd9117 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h @@ -66,7 +66,7 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp); int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx); int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx); void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, - struct amdgpu_ib *ib, bool ctx_switch); + struct amdgpu_ib *ib, uint32_t flags); void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, unsigned flags); int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index d2ea5ce2cefb..ead851413c0a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -107,14 +107,6 @@ struct amdgpu_pte_update_params { * DMA addresses to use for mapping, used during VM update by CPU */ dma_addr_t *pages_addr; - - /** - * @kptr: - * - * Kernel pointer of PD/PT BO that needs to be updated, - * used during VM update by CPU - */ - void *kptr; }; /** @@ -623,6 +615,28 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, list_add(&entry->tv.head, validated); } +void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo) +{ + struct amdgpu_bo *abo; + struct amdgpu_vm_bo_base *bo_base; + + if (!amdgpu_bo_is_amdgpu_bo(bo)) + return; + + if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) + return; + + abo = ttm_to_amdgpu_bo(bo); + if (!abo->parent) + return; + for (bo_base = abo->vm_bo; bo_base; bo_base = bo_base->next) { + struct amdgpu_vm *vm = bo_base->vm; + + if (abo->tbo.resv == vm->root.base.bo->tbo.resv) + vm->bulk_moveable = false; + } + +} /** * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU * @@ -638,12 +652,14 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, struct ttm_bo_global *glob = adev->mman.bdev.glob; struct amdgpu_vm_bo_base *bo_base; +#if 0 if (vm->bulk_moveable) { spin_lock(&glob->lru_lock); ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move); spin_unlock(&glob->lru_lock); return; } +#endif memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move)); @@ -684,8 +700,6 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_vm_bo_base *bo_base, *tmp; int r = 0; - vm->bulk_moveable &= list_empty(&vm->evicted); - list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) { struct amdgpu_bo *bo = bo_base->bo; @@ -799,15 +813,22 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, addr += ats_entries * 8; } - if (entries) + if (entries) { + uint64_t value = 0; + + /* Workaround for fault priority problem on GMC9 */ + if (level == AMDGPU_VM_PTB && adev->asic_type >= CHIP_VEGA10) + value = AMDGPU_PTE_EXECUTABLE; + amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0, - entries, 0, 0); + entries, 0, value); + } amdgpu_ring_pad_ib(ring, &job->ibs[0]); WARN_ON(job->ibs[0].length_dw > 64); r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv, - AMDGPU_FENCE_OWNER_UNDEFINED, false); + AMDGPU_FENCE_OWNER_KFD, false); if (r) goto error_free; @@ -1311,31 +1332,6 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params, } } - -/** - * amdgpu_vm_wait_pd - Wait for PT BOs to be free. - * - * @adev: amdgpu_device pointer - * @vm: related vm - * @owner: fence owner - * - * Returns: - * 0 on success, errno otherwise. - */ -static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm, - void *owner) -{ - struct amdgpu_sync sync; - int r; - - amdgpu_sync_create(&sync); - amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner, false); - r = amdgpu_sync_wait(&sync, true); - amdgpu_sync_free(&sync); - - return r; -} - /** * amdgpu_vm_update_func - helper to call update function * @@ -1430,7 +1426,8 @@ restart: params.adev = adev; if (vm->use_cpu_for_update) { - r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM); + r = amdgpu_bo_sync_wait(vm->root.base.bo, + AMDGPU_FENCE_OWNER_VM, true); if (unlikely(r)) return r; @@ -1503,20 +1500,27 @@ error: } /** - * amdgpu_vm_update_huge - figure out parameters for PTE updates + * amdgpu_vm_update_flags - figure out flags for PTE updates * * Make sure to set the right flags for the PTEs at the desired level. */ -static void amdgpu_vm_update_huge(struct amdgpu_pte_update_params *params, - struct amdgpu_bo *bo, unsigned level, - uint64_t pe, uint64_t addr, - unsigned count, uint32_t incr, - uint64_t flags) +static void amdgpu_vm_update_flags(struct amdgpu_pte_update_params *params, + struct amdgpu_bo *bo, unsigned level, + uint64_t pe, uint64_t addr, + unsigned count, uint32_t incr, + uint64_t flags) { if (level != AMDGPU_VM_PTB) { flags |= AMDGPU_PDE_PTE; amdgpu_gmc_get_vm_pde(params->adev, level, &addr, &flags); + + } else if (params->adev->asic_type >= CHIP_VEGA10 && + !(flags & AMDGPU_PTE_VALID) && + !(flags & AMDGPU_PTE_PRT)) { + + /* Workaround for fault priority problem on GMC9 */ + flags |= AMDGPU_PTE_EXECUTABLE; } amdgpu_vm_update_func(params, bo, pe, addr, count, incr, flags); @@ -1673,9 +1677,9 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, uint64_t upd_end = min(entry_end, frag_end); unsigned nptes = (upd_end - frag_start) >> shift; - amdgpu_vm_update_huge(params, pt, cursor.level, - pe_start, dst, nptes, incr, - flags | AMDGPU_PTE_FRAG(frag)); + amdgpu_vm_update_flags(params, pt, cursor.level, + pe_start, dst, nptes, incr, + flags | AMDGPU_PTE_FRAG(frag)); pe_start += nptes * 8; dst += (uint64_t)nptes * AMDGPU_GPU_PAGE_SIZE << shift; @@ -1744,22 +1748,29 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, params.adev = adev; params.vm = vm; - /* sync to everything on unmapping */ + /* sync to everything except eviction fences on unmapping */ if (!(flags & AMDGPU_PTE_VALID)) - owner = AMDGPU_FENCE_OWNER_UNDEFINED; + owner = AMDGPU_FENCE_OWNER_KFD; if (vm->use_cpu_for_update) { /* params.src is used as flag to indicate system Memory */ if (pages_addr) params.src = ~0; - /* Wait for PT BOs to be free. PTs share the same resv. object + /* Wait for PT BOs to be idle. PTs share the same resv. object * as the root PD BO */ - r = amdgpu_vm_wait_pd(adev, vm, owner); + r = amdgpu_bo_sync_wait(vm->root.base.bo, owner, true); if (unlikely(r)) return r; + /* Wait for any BO move to be completed */ + if (exclusive) { + r = dma_fence_wait(exclusive, true); + if (unlikely(r)) + return r; + } + params.func = amdgpu_vm_cpu_set_ptes; params.pages_addr = pages_addr; return amdgpu_vm_update_ptes(¶ms, start, last + 1, @@ -1773,13 +1784,12 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, /* * reserve space for two commands every (1 << BLOCK_SIZE) * entries or 2k dwords (whatever is smaller) - * - * The second command is for the shadow pagetables. */ + ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1); + + /* The second command is for the shadow pagetables. */ if (vm->root.base.bo->shadow) - ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1) * 2; - else - ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1); + ncmds *= 2; /* padding, etc. */ ndw = 64; @@ -1798,10 +1808,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, ndw += ncmds * 10; /* extra commands for begin/end fragments */ + ncmds = 2 * adev->vm_manager.fragment_size; if (vm->root.base.bo->shadow) - ndw += 2 * 10 * adev->vm_manager.fragment_size * 2; - else - ndw += 2 * 10 * adev->vm_manager.fragment_size; + ncmds *= 2; + + ndw += 10 * ncmds; params.func = amdgpu_vm_do_set_ptes; } @@ -3003,7 +3014,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, } DRM_DEBUG_DRIVER("VM update mode is %s\n", vm->use_cpu_for_update ? "CPU" : "SDMA"); - WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)), + WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)), "CPU update of VM recommended only for large BAR system\n"); vm->last_update = NULL; @@ -3133,7 +3144,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns vm->pte_support_ats = pte_support_ats; DRM_DEBUG_DRIVER("VM update mode is %s\n", vm->use_cpu_for_update ? "CPU" : "SDMA"); - WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)), + WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)), "CPU update of VM recommended only for large BAR system\n"); if (vm->pasid) { @@ -3363,14 +3374,15 @@ void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid, struct amdgpu_task_info *task_info) { struct amdgpu_vm *vm; + unsigned long flags; - spin_lock(&adev->vm_manager.pasid_lock); + spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); vm = idr_find(&adev->vm_manager.pasid_idr, pasid); if (vm) *task_info = vm->task_info; - spin_unlock(&adev->vm_manager.pasid_lock); + spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index e8dcfd59fc93..81ff8177f092 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -363,4 +363,6 @@ int amdgpu_vm_add_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key) void amdgpu_vm_clear_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key); +void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 8a8bc60cb6b4..407dd16cc35c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -40,26 +40,40 @@ void *amdgpu_xgmi_hive_try_lock(struct amdgpu_hive_info *hive) return &hive->device_list; } -struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev) +struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock) { int i; struct amdgpu_hive_info *tmp; if (!adev->gmc.xgmi.hive_id) return NULL; + + mutex_lock(&xgmi_mutex); + for (i = 0 ; i < hive_count; ++i) { tmp = &xgmi_hives[i]; - if (tmp->hive_id == adev->gmc.xgmi.hive_id) + if (tmp->hive_id == adev->gmc.xgmi.hive_id) { + if (lock) + mutex_lock(&tmp->hive_lock); + mutex_unlock(&xgmi_mutex); return tmp; + } } - if (i >= AMDGPU_MAX_XGMI_HIVE) + if (i >= AMDGPU_MAX_XGMI_HIVE) { + mutex_unlock(&xgmi_mutex); return NULL; + } /* initialize new hive if not exist */ tmp = &xgmi_hives[hive_count++]; tmp->hive_id = adev->gmc.xgmi.hive_id; INIT_LIST_HEAD(&tmp->device_list); mutex_init(&tmp->hive_lock); + mutex_init(&tmp->reset_lock); + if (lock) + mutex_lock(&tmp->hive_lock); + + mutex_unlock(&xgmi_mutex); return tmp; } @@ -77,10 +91,6 @@ int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_dev "XGMI: Set topology failure on device %llx, hive %llx, ret %d", adev->gmc.xgmi.node_id, adev->gmc.xgmi.hive_id, ret); - else - dev_info(adev->dev, "XGMI: Set topology for node %d, hive 0x%llx.\n", - adev->gmc.xgmi.physical_node_id, - adev->gmc.xgmi.hive_id); return ret; } @@ -111,10 +121,14 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) return ret; } - mutex_lock(&xgmi_mutex); - hive = amdgpu_get_xgmi_hive(adev); - if (!hive) + hive = amdgpu_get_xgmi_hive(adev, 1); + if (!hive) { + ret = -EINVAL; + dev_err(adev->dev, + "XGMI: node 0x%llx, can not match hive 0x%llx in the hive list.\n", + adev->gmc.xgmi.node_id, adev->gmc.xgmi.hive_id); goto exit; + } hive_topology = &hive->topology_info; @@ -142,8 +156,11 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) break; } + dev_info(adev->dev, "XGMI: Add node %d, hive 0x%llx.\n", + adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id); + + mutex_unlock(&hive->hive_lock); exit: - mutex_unlock(&xgmi_mutex); return ret; } @@ -154,15 +171,14 @@ void amdgpu_xgmi_remove_device(struct amdgpu_device *adev) if (!adev->gmc.xgmi.supported) return; - mutex_lock(&xgmi_mutex); - - hive = amdgpu_get_xgmi_hive(adev); + hive = amdgpu_get_xgmi_hive(adev, 1); if (!hive) - goto exit; + return; - if (!(hive->number_devices--)) + if (!(hive->number_devices--)) { mutex_destroy(&hive->hive_lock); - -exit: - mutex_unlock(&xgmi_mutex); + mutex_destroy(&hive->reset_lock); + } else { + mutex_unlock(&hive->hive_lock); + } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h index 6151eb9c8ad3..14bc60664159 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h @@ -29,10 +29,11 @@ struct amdgpu_hive_info { struct list_head device_list; struct psp_xgmi_topology_info topology_info; int number_devices; - struct mutex hive_lock; + struct mutex hive_lock, + reset_lock; }; -struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev); +struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock); int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev); int amdgpu_xgmi_add_device(struct amdgpu_device *adev); void amdgpu_xgmi_remove_device(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c index e9934de1b9cf..dd30f4e61a8c 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom.c +++ b/drivers/gpu/drm/amd/amdgpu/atom.c @@ -27,6 +27,8 @@ #include #include +#include + #define ATOM_DEBUG #include "atom.h" diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c deleted file mode 100644 index 86e14c754dd4..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ /dev/null @@ -1,6844 +0,0 @@ -/* - * Copyright 2013 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include -#include -#include "amdgpu.h" -#include "amdgpu_pm.h" -#include "amdgpu_ucode.h" -#include "cikd.h" -#include "amdgpu_dpm.h" -#include "ci_dpm.h" -#include "gfx_v7_0.h" -#include "atom.h" -#include "amd_pcie.h" -#include - -#include "smu/smu_7_0_1_d.h" -#include "smu/smu_7_0_1_sh_mask.h" - -#include "dce/dce_8_0_d.h" -#include "dce/dce_8_0_sh_mask.h" - -#include "bif/bif_4_1_d.h" -#include "bif/bif_4_1_sh_mask.h" - -#include "gca/gfx_7_2_d.h" -#include "gca/gfx_7_2_sh_mask.h" - -#include "gmc/gmc_7_1_d.h" -#include "gmc/gmc_7_1_sh_mask.h" - -MODULE_FIRMWARE("amdgpu/bonaire_smc.bin"); -MODULE_FIRMWARE("amdgpu/bonaire_k_smc.bin"); -MODULE_FIRMWARE("amdgpu/hawaii_smc.bin"); -MODULE_FIRMWARE("amdgpu/hawaii_k_smc.bin"); - -#define MC_CG_ARB_FREQ_F0 0x0a -#define MC_CG_ARB_FREQ_F1 0x0b -#define MC_CG_ARB_FREQ_F2 0x0c -#define MC_CG_ARB_FREQ_F3 0x0d - -#define SMC_RAM_END 0x40000 - -#define VOLTAGE_SCALE 4 -#define VOLTAGE_VID_OFFSET_SCALE1 625 -#define VOLTAGE_VID_OFFSET_SCALE2 100 - -static const struct amd_pm_funcs ci_dpm_funcs; - -static const struct ci_pt_defaults defaults_hawaii_xt = -{ - 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000, - { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 }, - { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 } -}; - -static const struct ci_pt_defaults defaults_hawaii_pro = -{ - 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062, - { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 }, - { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 } -}; - -static const struct ci_pt_defaults defaults_bonaire_xt = -{ - 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000, - { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 }, - { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } -}; - -#if 0 -static const struct ci_pt_defaults defaults_bonaire_pro = -{ - 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062, - { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F }, - { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB } -}; -#endif - -static const struct ci_pt_defaults defaults_saturn_xt = -{ - 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000, - { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D }, - { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 } -}; - -#if 0 -static const struct ci_pt_defaults defaults_saturn_pro = -{ - 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000, - { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A }, - { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 } -}; -#endif - -static const struct ci_pt_config_reg didt_config_ci[] = -{ - { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND }, - { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND }, - { 0xFFFFFFFF } -}; - -static u8 ci_get_memory_module_index(struct amdgpu_device *adev) -{ - return (u8) ((RREG32(mmBIOS_SCRATCH_4) >> 16) & 0xff); -} - -#define MC_CG_ARB_FREQ_F0 0x0a -#define MC_CG_ARB_FREQ_F1 0x0b -#define MC_CG_ARB_FREQ_F2 0x0c -#define MC_CG_ARB_FREQ_F3 0x0d - -static int ci_copy_and_switch_arb_sets(struct amdgpu_device *adev, - u32 arb_freq_src, u32 arb_freq_dest) -{ - u32 mc_arb_dram_timing; - u32 mc_arb_dram_timing2; - u32 burst_time; - u32 mc_cg_config; - - switch (arb_freq_src) { - case MC_CG_ARB_FREQ_F0: - mc_arb_dram_timing = RREG32(mmMC_ARB_DRAM_TIMING); - mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2); - burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK) >> - MC_ARB_BURST_TIME__STATE0__SHIFT; - break; - case MC_CG_ARB_FREQ_F1: - mc_arb_dram_timing = RREG32(mmMC_ARB_DRAM_TIMING_1); - mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2_1); - burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE1_MASK) >> - MC_ARB_BURST_TIME__STATE1__SHIFT; - break; - default: - return -EINVAL; - } - - switch (arb_freq_dest) { - case MC_CG_ARB_FREQ_F0: - WREG32(mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing); - WREG32(mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2); - WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE0__SHIFT), - ~MC_ARB_BURST_TIME__STATE0_MASK); - break; - case MC_CG_ARB_FREQ_F1: - WREG32(mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing); - WREG32(mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2); - WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE1__SHIFT), - ~MC_ARB_BURST_TIME__STATE1_MASK); - break; - default: - return -EINVAL; - } - - mc_cg_config = RREG32(mmMC_CG_CONFIG) | 0x0000000F; - WREG32(mmMC_CG_CONFIG, mc_cg_config); - WREG32_P(mmMC_ARB_CG, (arb_freq_dest) << MC_ARB_CG__CG_ARB_REQ__SHIFT, - ~MC_ARB_CG__CG_ARB_REQ_MASK); - - return 0; -} - -static u8 ci_get_ddr3_mclk_frequency_ratio(u32 memory_clock) -{ - u8 mc_para_index; - - if (memory_clock < 10000) - mc_para_index = 0; - else if (memory_clock >= 80000) - mc_para_index = 0x0f; - else - mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1); - return mc_para_index; -} - -static u8 ci_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode) -{ - u8 mc_para_index; - - if (strobe_mode) { - if (memory_clock < 12500) - mc_para_index = 0x00; - else if (memory_clock > 47500) - mc_para_index = 0x0f; - else - mc_para_index = (u8)((memory_clock - 10000) / 2500); - } else { - if (memory_clock < 65000) - mc_para_index = 0x00; - else if (memory_clock > 135000) - mc_para_index = 0x0f; - else - mc_para_index = (u8)((memory_clock - 60000) / 5000); - } - return mc_para_index; -} - -static void ci_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev, - u32 max_voltage_steps, - struct atom_voltage_table *voltage_table) -{ - unsigned int i, diff; - - if (voltage_table->count <= max_voltage_steps) - return; - - diff = voltage_table->count - max_voltage_steps; - - for (i = 0; i < max_voltage_steps; i++) - voltage_table->entries[i] = voltage_table->entries[i + diff]; - - voltage_table->count = max_voltage_steps; -} - -static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev, - struct atom_voltage_table_entry *voltage_table, - u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd); -static int ci_set_power_limit(struct amdgpu_device *adev, u32 n); -static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev, - u32 target_tdp); -static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate); -static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev); - -static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, - PPSMC_Msg msg, u32 parameter); -static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev); -static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev); - -static struct ci_power_info *ci_get_pi(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = adev->pm.dpm.priv; - - return pi; -} - -static struct ci_ps *ci_get_ps(struct amdgpu_ps *rps) -{ - struct ci_ps *ps = rps->ps_priv; - - return ps; -} - -static void ci_initialize_powertune_defaults(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - - switch (adev->pdev->device) { - case 0x6649: - case 0x6650: - case 0x6651: - case 0x6658: - case 0x665C: - case 0x665D: - default: - pi->powertune_defaults = &defaults_bonaire_xt; - break; - case 0x6640: - case 0x6641: - case 0x6646: - case 0x6647: - pi->powertune_defaults = &defaults_saturn_xt; - break; - case 0x67B8: - case 0x67B0: - pi->powertune_defaults = &defaults_hawaii_xt; - break; - case 0x67BA: - case 0x67B1: - pi->powertune_defaults = &defaults_hawaii_pro; - break; - case 0x67A0: - case 0x67A1: - case 0x67A2: - case 0x67A8: - case 0x67A9: - case 0x67AA: - case 0x67B9: - case 0x67BE: - pi->powertune_defaults = &defaults_bonaire_xt; - break; - } - - pi->dte_tj_offset = 0; - - pi->caps_power_containment = true; - pi->caps_cac = false; - pi->caps_sq_ramping = false; - pi->caps_db_ramping = false; - pi->caps_td_ramping = false; - pi->caps_tcp_ramping = false; - - if (pi->caps_power_containment) { - pi->caps_cac = true; - if (adev->asic_type == CHIP_HAWAII) - pi->enable_bapm_feature = false; - else - pi->enable_bapm_feature = true; - pi->enable_tdc_limit_feature = true; - pi->enable_pkg_pwr_tracking_feature = true; - } -} - -static u8 ci_convert_to_vid(u16 vddc) -{ - return (6200 - (vddc * VOLTAGE_SCALE)) / 25; -} - -static int ci_populate_bapm_vddc_vid_sidd(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd; - u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd; - u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2; - u32 i; - - if (adev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL) - return -EINVAL; - if (adev->pm.dpm.dyn_state.cac_leakage_table.count > 8) - return -EINVAL; - if (adev->pm.dpm.dyn_state.cac_leakage_table.count != - adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count) - return -EINVAL; - - for (i = 0; i < adev->pm.dpm.dyn_state.cac_leakage_table.count; i++) { - if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { - lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1); - hi_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2); - hi2_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3); - } else { - lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc); - hi_vid[i] = ci_convert_to_vid((u16)adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage); - } - } - return 0; -} - -static int ci_populate_vddc_vid(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - u8 *vid = pi->smc_powertune_table.VddCVid; - u32 i; - - if (pi->vddc_voltage_table.count > 8) - return -EINVAL; - - for (i = 0; i < pi->vddc_voltage_table.count; i++) - vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value); - - return 0; -} - -static int ci_populate_svi_load_line(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults; - - pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en; - pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc; - pi->smc_powertune_table.SviLoadLineTrimVddC = 3; - pi->smc_powertune_table.SviLoadLineOffsetVddC = 0; - - return 0; -} - -static int ci_populate_tdc_limit(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults; - u16 tdc_limit; - - tdc_limit = adev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256; - pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit); - pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc = - pt_defaults->tdc_vddc_throttle_release_limit_perc; - pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt; - - return 0; -} - -static int ci_populate_dw8(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults; - int ret; - - ret = amdgpu_ci_read_smc_sram_dword(adev, - SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU7_Firmware_Header, PmFuseTable) + - offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl), - (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl, - pi->sram_end); - if (ret) - return -EINVAL; - else - pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl; - - return 0; -} - -static int ci_populate_fuzzy_fan(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - - if ((adev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) || - (adev->pm.dpm.fan.fan_output_sensitivity == 0)) - adev->pm.dpm.fan.fan_output_sensitivity = - adev->pm.dpm.fan.default_fan_output_sensitivity; - - pi->smc_powertune_table.FuzzyFan_PwmSetDelta = - cpu_to_be16(adev->pm.dpm.fan.fan_output_sensitivity); - - return 0; -} - -static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd; - u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd; - int i, min, max; - - min = max = hi_vid[0]; - for (i = 0; i < 8; i++) { - if (0 != hi_vid[i]) { - if (min > hi_vid[i]) - min = hi_vid[i]; - if (max < hi_vid[i]) - max = hi_vid[i]; - } - - if (0 != lo_vid[i]) { - if (min > lo_vid[i]) - min = lo_vid[i]; - if (max < lo_vid[i]) - max = lo_vid[i]; - } - } - - if ((min == 0) || (max == 0)) - return -EINVAL; - pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max; - pi->smc_powertune_table.GnbLPMLMinVid = (u8)min; - - return 0; -} - -static int ci_populate_bapm_vddc_base_leakage_sidd(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd; - u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd; - struct amdgpu_cac_tdp_table *cac_tdp_table = - adev->pm.dpm.dyn_state.cac_tdp_table; - - hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256; - lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256; - - pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd); - pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd); - - return 0; -} - -static int ci_populate_bapm_parameters_in_dpm_table(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults; - SMU7_Discrete_DpmTable *dpm_table = &pi->smc_state_table; - struct amdgpu_cac_tdp_table *cac_tdp_table = - adev->pm.dpm.dyn_state.cac_tdp_table; - struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table; - int i, j, k; - const u16 *def1; - const u16 *def2; - - dpm_table->DefaultTdp = cac_tdp_table->tdp * 256; - dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256; - - dpm_table->DTETjOffset = (u8)pi->dte_tj_offset; - dpm_table->GpuTjMax = - (u8)(pi->thermal_temp_setting.temperature_high / 1000); - dpm_table->GpuTjHyst = 8; - - dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base; - - if (ppm) { - dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000); - dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256); - } else { - dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0); - dpm_table->PPM_TemperatureLimit = cpu_to_be16(0); - } - - dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient); - def1 = pt_defaults->bapmti_r; - def2 = pt_defaults->bapmti_rc; - - for (i = 0; i < SMU7_DTE_ITERATIONS; i++) { - for (j = 0; j < SMU7_DTE_SOURCES; j++) { - for (k = 0; k < SMU7_DTE_SINKS; k++) { - dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1); - dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2); - def1++; - def2++; - } - } - } - - return 0; -} - -static int ci_populate_pm_base(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - u32 pm_fuse_table_offset; - int ret; - - if (pi->caps_power_containment) { - ret = amdgpu_ci_read_smc_sram_dword(adev, - SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU7_Firmware_Header, PmFuseTable), - &pm_fuse_table_offset, pi->sram_end); - if (ret) - return ret; - ret = ci_populate_bapm_vddc_vid_sidd(adev); - if (ret) - return ret; - ret = ci_populate_vddc_vid(adev); - if (ret) - return ret; - ret = ci_populate_svi_load_line(adev); - if (ret) - return ret; - ret = ci_populate_tdc_limit(adev); - if (ret) - return ret; - ret = ci_populate_dw8(adev); - if (ret) - return ret; - ret = ci_populate_fuzzy_fan(adev); - if (ret) - return ret; - ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(adev); - if (ret) - return ret; - ret = ci_populate_bapm_vddc_base_leakage_sidd(adev); - if (ret) - return ret; - ret = amdgpu_ci_copy_bytes_to_smc(adev, pm_fuse_table_offset, - (u8 *)&pi->smc_powertune_table, - sizeof(SMU7_Discrete_PmFuses), pi->sram_end); - if (ret) - return ret; - } - - return 0; -} - -static void ci_do_enable_didt(struct amdgpu_device *adev, const bool enable) -{ - struct ci_power_info *pi = ci_get_pi(adev); - u32 data; - - if (pi->caps_sq_ramping) { - data = RREG32_DIDT(ixDIDT_SQ_CTRL0); - if (enable) - data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; - else - data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; - WREG32_DIDT(ixDIDT_SQ_CTRL0, data); - } - - if (pi->caps_db_ramping) { - data = RREG32_DIDT(ixDIDT_DB_CTRL0); - if (enable) - data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; - else - data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; - WREG32_DIDT(ixDIDT_DB_CTRL0, data); - } - - if (pi->caps_td_ramping) { - data = RREG32_DIDT(ixDIDT_TD_CTRL0); - if (enable) - data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; - else - data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; - WREG32_DIDT(ixDIDT_TD_CTRL0, data); - } - - if (pi->caps_tcp_ramping) { - data = RREG32_DIDT(ixDIDT_TCP_CTRL0); - if (enable) - data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; - else - data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; - WREG32_DIDT(ixDIDT_TCP_CTRL0, data); - } -} - -static int ci_program_pt_config_registers(struct amdgpu_device *adev, - const struct ci_pt_config_reg *cac_config_regs) -{ - const struct ci_pt_config_reg *config_regs = cac_config_regs; - u32 data; - u32 cache = 0; - - if (config_regs == NULL) - return -EINVAL; - - while (config_regs->offset != 0xFFFFFFFF) { - if (config_regs->type == CISLANDS_CONFIGREG_CACHE) { - cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); - } else { - switch (config_regs->type) { - case CISLANDS_CONFIGREG_SMC_IND: - data = RREG32_SMC(config_regs->offset); - break; - case CISLANDS_CONFIGREG_DIDT_IND: - data = RREG32_DIDT(config_regs->offset); - break; - default: - data = RREG32(config_regs->offset); - break; - } - - data &= ~config_regs->mask; - data |= ((config_regs->value << config_regs->shift) & config_regs->mask); - data |= cache; - - switch (config_regs->type) { - case CISLANDS_CONFIGREG_SMC_IND: - WREG32_SMC(config_regs->offset, data); - break; - case CISLANDS_CONFIGREG_DIDT_IND: - WREG32_DIDT(config_regs->offset, data); - break; - default: - WREG32(config_regs->offset, data); - break; - } - cache = 0; - } - config_regs++; - } - return 0; -} - -static int ci_enable_didt(struct amdgpu_device *adev, bool enable) -{ - struct ci_power_info *pi = ci_get_pi(adev); - int ret; - - if (pi->caps_sq_ramping || pi->caps_db_ramping || - pi->caps_td_ramping || pi->caps_tcp_ramping) { - amdgpu_gfx_rlc_enter_safe_mode(adev); - - if (enable) { - ret = ci_program_pt_config_registers(adev, didt_config_ci); - if (ret) { - amdgpu_gfx_rlc_exit_safe_mode(adev); - return ret; - } - } - - ci_do_enable_didt(adev, enable); - - amdgpu_gfx_rlc_exit_safe_mode(adev); - } - - return 0; -} - -static int ci_enable_power_containment(struct amdgpu_device *adev, bool enable) -{ - struct ci_power_info *pi = ci_get_pi(adev); - PPSMC_Result smc_result; - int ret = 0; - - if (enable) { - pi->power_containment_features = 0; - if (pi->caps_power_containment) { - if (pi->enable_bapm_feature) { - smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE); - if (smc_result != PPSMC_Result_OK) - ret = -EINVAL; - else - pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM; - } - - if (pi->enable_tdc_limit_feature) { - smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitEnable); - if (smc_result != PPSMC_Result_OK) - ret = -EINVAL; - else - pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit; - } - - if (pi->enable_pkg_pwr_tracking_feature) { - smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitEnable); - if (smc_result != PPSMC_Result_OK) { - ret = -EINVAL; - } else { - struct amdgpu_cac_tdp_table *cac_tdp_table = - adev->pm.dpm.dyn_state.cac_tdp_table; - u32 default_pwr_limit = - (u32)(cac_tdp_table->maximum_power_delivery_limit * 256); - - pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit; - - ci_set_power_limit(adev, default_pwr_limit); - } - } - } - } else { - if (pi->caps_power_containment && pi->power_containment_features) { - if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit) - amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitDisable); - - if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM) - amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE); - - if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) - amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitDisable); - pi->power_containment_features = 0; - } - } - - return ret; -} - -static int ci_enable_smc_cac(struct amdgpu_device *adev, bool enable) -{ - struct ci_power_info *pi = ci_get_pi(adev); - PPSMC_Result smc_result; - int ret = 0; - - if (pi->caps_cac) { - if (enable) { - smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableCac); - if (smc_result != PPSMC_Result_OK) { - ret = -EINVAL; - pi->cac_enabled = false; - } else { - pi->cac_enabled = true; - } - } else if (pi->cac_enabled) { - amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableCac); - pi->cac_enabled = false; - } - } - - return ret; -} - -static int ci_enable_thermal_based_sclk_dpm(struct amdgpu_device *adev, - bool enable) -{ - struct ci_power_info *pi = ci_get_pi(adev); - PPSMC_Result smc_result = PPSMC_Result_OK; - - if (pi->thermal_sclk_dpm_enabled) { - if (enable) - smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ENABLE_THERMAL_DPM); - else - smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DISABLE_THERMAL_DPM); - } - - if (smc_result == PPSMC_Result_OK) - return 0; - else - return -EINVAL; -} - -static int ci_power_control_set_level(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - struct amdgpu_cac_tdp_table *cac_tdp_table = - adev->pm.dpm.dyn_state.cac_tdp_table; - s32 adjust_percent; - s32 target_tdp; - int ret = 0; - bool adjust_polarity = false; /* ??? */ - - if (pi->caps_power_containment) { - adjust_percent = adjust_polarity ? - adev->pm.dpm.tdp_adjustment : (-1 * adev->pm.dpm.tdp_adjustment); - target_tdp = ((100 + adjust_percent) * - (s32)cac_tdp_table->configurable_tdp) / 100; - - ret = ci_set_overdrive_target_tdp(adev, (u32)target_tdp); - } - - return ret; -} - -static void ci_dpm_powergate_uvd(void *handle, bool gate) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct ci_power_info *pi = ci_get_pi(adev); - - pi->uvd_power_gated = gate; - - if (gate) { - /* stop the UVD block */ - amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, - AMD_PG_STATE_GATE); - ci_update_uvd_dpm(adev, gate); - } else { - amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, - AMD_PG_STATE_UNGATE); - ci_update_uvd_dpm(adev, gate); - } -} - -static bool ci_dpm_vblank_too_short(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - u32 vblank_time = amdgpu_dpm_get_vblank_time(adev); - u32 switch_limit = adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300; - - /* disable mclk switching if the refresh is >120Hz, even if the - * blanking period would allow it - */ - if (amdgpu_dpm_get_vrefresh(adev) > 120) - return true; - - if (vblank_time < switch_limit) - return true; - else - return false; - -} - -static void ci_apply_state_adjust_rules(struct amdgpu_device *adev, - struct amdgpu_ps *rps) -{ - struct ci_ps *ps = ci_get_ps(rps); - struct ci_power_info *pi = ci_get_pi(adev); - struct amdgpu_clock_and_voltage_limits *max_limits; - bool disable_mclk_switching; - u32 sclk, mclk; - int i; - - if (rps->vce_active) { - rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk; - rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk; - } else { - rps->evclk = 0; - rps->ecclk = 0; - } - - if ((adev->pm.dpm.new_active_crtc_count > 1) || - ci_dpm_vblank_too_short(adev)) - disable_mclk_switching = true; - else - disable_mclk_switching = false; - - if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) - pi->battery_state = true; - else - pi->battery_state = false; - - if (adev->pm.ac_power) - max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; - else - max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc; - - if (adev->pm.ac_power == false) { - for (i = 0; i < ps->performance_level_count; i++) { - if (ps->performance_levels[i].mclk > max_limits->mclk) - ps->performance_levels[i].mclk = max_limits->mclk; - if (ps->performance_levels[i].sclk > max_limits->sclk) - ps->performance_levels[i].sclk = max_limits->sclk; - } - } - - /* XXX validate the min clocks required for display */ - - if (disable_mclk_switching) { - mclk = ps->performance_levels[ps->performance_level_count - 1].mclk; - sclk = ps->performance_levels[0].sclk; - } else { - mclk = ps->performance_levels[0].mclk; - sclk = ps->performance_levels[0].sclk; - } - - if (adev->pm.pm_display_cfg.min_core_set_clock > sclk) - sclk = adev->pm.pm_display_cfg.min_core_set_clock; - - if (adev->pm.pm_display_cfg.min_mem_set_clock > mclk) - mclk = adev->pm.pm_display_cfg.min_mem_set_clock; - - if (rps->vce_active) { - if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk) - sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk; - if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk) - mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk; - } - - ps->performance_levels[0].sclk = sclk; - ps->performance_levels[0].mclk = mclk; - - if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk) - ps->performance_levels[1].sclk = ps->performance_levels[0].sclk; - - if (disable_mclk_switching) { - if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk) - ps->performance_levels[0].mclk = ps->performance_levels[1].mclk; - } else { - if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk) - ps->performance_levels[1].mclk = ps->performance_levels[0].mclk; - } -} - -static int ci_thermal_set_temperature_range(struct amdgpu_device *adev, - int min_temp, int max_temp) -{ - int low_temp = 0 * 1000; - int high_temp = 255 * 1000; - u32 tmp; - - if (low_temp < min_temp) - low_temp = min_temp; - if (high_temp > max_temp) - high_temp = max_temp; - if (high_temp < low_temp) { - DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); - return -EINVAL; - } - - tmp = RREG32_SMC(ixCG_THERMAL_INT); - tmp &= ~(CG_THERMAL_INT__DIG_THERM_INTH_MASK | CG_THERMAL_INT__DIG_THERM_INTL_MASK); - tmp |= ((high_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTH__SHIFT) | - ((low_temp / 1000)) << CG_THERMAL_INT__DIG_THERM_INTL__SHIFT; - WREG32_SMC(ixCG_THERMAL_INT, tmp); - -#if 0 - /* XXX: need to figure out how to handle this properly */ - tmp = RREG32_SMC(ixCG_THERMAL_CTRL); - tmp &= DIG_THERM_DPM_MASK; - tmp |= DIG_THERM_DPM(high_temp / 1000); - WREG32_SMC(ixCG_THERMAL_CTRL, tmp); -#endif - - adev->pm.dpm.thermal.min_temp = low_temp; - adev->pm.dpm.thermal.max_temp = high_temp; - return 0; -} - -static int ci_thermal_enable_alert(struct amdgpu_device *adev, - bool enable) -{ - u32 thermal_int = RREG32_SMC(ixCG_THERMAL_INT); - PPSMC_Result result; - - if (enable) { - thermal_int &= ~(CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK | - CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK); - WREG32_SMC(ixCG_THERMAL_INT, thermal_int); - result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Enable); - if (result != PPSMC_Result_OK) { - DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); - return -EINVAL; - } - } else { - thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK | - CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; - WREG32_SMC(ixCG_THERMAL_INT, thermal_int); - result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Disable); - if (result != PPSMC_Result_OK) { - DRM_DEBUG_KMS("Could not disable thermal interrupts.\n"); - return -EINVAL; - } - } - - return 0; -} - -static void ci_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode) -{ - struct ci_power_info *pi = ci_get_pi(adev); - u32 tmp; - - if (pi->fan_ctrl_is_in_default_mode) { - tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK) - >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT; - pi->fan_ctrl_default_mode = tmp; - tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__TMIN_MASK) - >> CG_FDO_CTRL2__TMIN__SHIFT; - pi->t_min = tmp; - pi->fan_ctrl_is_in_default_mode = false; - } - - tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK; - tmp |= 0 << CG_FDO_CTRL2__TMIN__SHIFT; - WREG32_SMC(ixCG_FDO_CTRL2, tmp); - - tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK; - tmp |= mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT; - WREG32_SMC(ixCG_FDO_CTRL2, tmp); -} - -static int ci_thermal_setup_fan_table(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE }; - u32 duty100; - u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2; - u16 fdo_min, slope1, slope2; - u32 reference_clock, tmp; - int ret; - u64 tmp64; - - if (!pi->fan_table_start) { - adev->pm.dpm.fan.ucode_fan_control = false; - return 0; - } - - duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK) - >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT; - - if (duty100 == 0) { - adev->pm.dpm.fan.ucode_fan_control = false; - return 0; - } - - tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100; - do_div(tmp64, 10000); - fdo_min = (u16)tmp64; - - t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min; - t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med; - - pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min; - pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med; - - slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); - slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); - - fan_table.TempMin = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100); - fan_table.TempMed = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100); - fan_table.TempMax = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100); - - fan_table.Slope1 = cpu_to_be16(slope1); - fan_table.Slope2 = cpu_to_be16(slope2); - - fan_table.FdoMin = cpu_to_be16(fdo_min); - - fan_table.HystDown = cpu_to_be16(adev->pm.dpm.fan.t_hyst); - - fan_table.HystUp = cpu_to_be16(1); - - fan_table.HystSlope = cpu_to_be16(1); - - fan_table.TempRespLim = cpu_to_be16(5); - - reference_clock = amdgpu_asic_get_xclk(adev); - - fan_table.RefreshPeriod = cpu_to_be32((adev->pm.dpm.fan.cycle_delay * - reference_clock) / 1600); - - fan_table.FdoMax = cpu_to_be16((u16)duty100); - - tmp = (RREG32_SMC(ixCG_MULT_THERMAL_CTRL) & CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK) - >> CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT; - fan_table.TempSrc = (uint8_t)tmp; - - ret = amdgpu_ci_copy_bytes_to_smc(adev, - pi->fan_table_start, - (u8 *)(&fan_table), - sizeof(fan_table), - pi->sram_end); - - if (ret) { - DRM_ERROR("Failed to load fan table to the SMC."); - adev->pm.dpm.fan.ucode_fan_control = false; - } - - return 0; -} - -static int ci_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - PPSMC_Result ret; - - if (pi->caps_od_fuzzy_fan_control_support) { - ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev, - PPSMC_StartFanControl, - FAN_CONTROL_FUZZY); - if (ret != PPSMC_Result_OK) - return -EINVAL; - ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SetFanPwmMax, - adev->pm.dpm.fan.default_max_fan_pwm); - if (ret != PPSMC_Result_OK) - return -EINVAL; - } else { - ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev, - PPSMC_StartFanControl, - FAN_CONTROL_TABLE); - if (ret != PPSMC_Result_OK) - return -EINVAL; - } - - pi->fan_is_controlled_by_smc = true; - return 0; -} - - -static int ci_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev) -{ - PPSMC_Result ret; - struct ci_power_info *pi = ci_get_pi(adev); - - ret = amdgpu_ci_send_msg_to_smc(adev, PPSMC_StopFanControl); - if (ret == PPSMC_Result_OK) { - pi->fan_is_controlled_by_smc = false; - return 0; - } else { - return -EINVAL; - } -} - -static int ci_dpm_get_fan_speed_percent(void *handle, - u32 *speed) -{ - u32 duty, duty100; - u64 tmp64; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (adev->pm.no_fan) - return -ENOENT; - - duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK) - >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT; - duty = (RREG32_SMC(ixCG_THERMAL_STATUS) & CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK) - >> CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT; - - if (duty100 == 0) - return -EINVAL; - - tmp64 = (u64)duty * 100; - do_div(tmp64, duty100); - *speed = (u32)tmp64; - - if (*speed > 100) - *speed = 100; - - return 0; -} - -static int ci_dpm_set_fan_speed_percent(void *handle, - u32 speed) -{ - u32 tmp; - u32 duty, duty100; - u64 tmp64; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct ci_power_info *pi = ci_get_pi(adev); - - if (adev->pm.no_fan) - return -ENOENT; - - if (pi->fan_is_controlled_by_smc) - return -EINVAL; - - if (speed > 100) - return -EINVAL; - - duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK) - >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT; - - if (duty100 == 0) - return -EINVAL; - - tmp64 = (u64)speed * duty100; - do_div(tmp64, 100); - duty = (u32)tmp64; - - tmp = RREG32_SMC(ixCG_FDO_CTRL0) & ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK; - tmp |= duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT; - WREG32_SMC(ixCG_FDO_CTRL0, tmp); - - return 0; -} - -static void ci_dpm_set_fan_control_mode(void *handle, u32 mode) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - switch (mode) { - case AMD_FAN_CTRL_NONE: - if (adev->pm.dpm.fan.ucode_fan_control) - ci_fan_ctrl_stop_smc_fan_control(adev); - ci_dpm_set_fan_speed_percent(adev, 100); - break; - case AMD_FAN_CTRL_MANUAL: - if (adev->pm.dpm.fan.ucode_fan_control) - ci_fan_ctrl_stop_smc_fan_control(adev); - break; - case AMD_FAN_CTRL_AUTO: - if (adev->pm.dpm.fan.ucode_fan_control) - ci_thermal_start_smc_fan_control(adev); - break; - default: - break; - } -} - -static u32 ci_dpm_get_fan_control_mode(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct ci_power_info *pi = ci_get_pi(adev); - - if (pi->fan_is_controlled_by_smc) - return AMD_FAN_CTRL_AUTO; - else - return AMD_FAN_CTRL_MANUAL; -} - -#if 0 -static int ci_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev, - u32 *speed) -{ - u32 tach_period; - u32 xclk = amdgpu_asic_get_xclk(adev); - - if (adev->pm.no_fan) - return -ENOENT; - - if (adev->pm.fan_pulses_per_revolution == 0) - return -ENOENT; - - tach_period = (RREG32_SMC(ixCG_TACH_STATUS) & CG_TACH_STATUS__TACH_PERIOD_MASK) - >> CG_TACH_STATUS__TACH_PERIOD__SHIFT; - if (tach_period == 0) - return -ENOENT; - - *speed = 60 * xclk * 10000 / tach_period; - - return 0; -} - -static int ci_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev, - u32 speed) -{ - u32 tach_period, tmp; - u32 xclk = amdgpu_asic_get_xclk(adev); - - if (adev->pm.no_fan) - return -ENOENT; - - if (adev->pm.fan_pulses_per_revolution == 0) - return -ENOENT; - - if ((speed < adev->pm.fan_min_rpm) || - (speed > adev->pm.fan_max_rpm)) - return -EINVAL; - - if (adev->pm.dpm.fan.ucode_fan_control) - ci_fan_ctrl_stop_smc_fan_control(adev); - - tach_period = 60 * xclk * 10000 / (8 * speed); - tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__TARGET_PERIOD_MASK; - tmp |= tach_period << CG_TACH_CTRL__TARGET_PERIOD__SHIFT; - WREG32_SMC(CG_TACH_CTRL, tmp); - - ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM); - - return 0; -} -#endif - -static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - u32 tmp; - - if (!pi->fan_ctrl_is_in_default_mode) { - tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK; - tmp |= pi->fan_ctrl_default_mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT; - WREG32_SMC(ixCG_FDO_CTRL2, tmp); - - tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK; - tmp |= pi->t_min << CG_FDO_CTRL2__TMIN__SHIFT; - WREG32_SMC(ixCG_FDO_CTRL2, tmp); - pi->fan_ctrl_is_in_default_mode = true; - } -} - -static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev) -{ - if (adev->pm.dpm.fan.ucode_fan_control) { - ci_fan_ctrl_start_smc_fan_control(adev); - ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC); - } -} - -static void ci_thermal_initialize(struct amdgpu_device *adev) -{ - u32 tmp; - - if (adev->pm.fan_pulses_per_revolution) { - tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__EDGE_PER_REV_MASK; - tmp |= (adev->pm.fan_pulses_per_revolution - 1) - << CG_TACH_CTRL__EDGE_PER_REV__SHIFT; - WREG32_SMC(ixCG_TACH_CTRL, tmp); - } - - tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK; - tmp |= 0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT; - WREG32_SMC(ixCG_FDO_CTRL2, tmp); -} - -static int ci_thermal_start_thermal_controller(struct amdgpu_device *adev) -{ - int ret; - - ci_thermal_initialize(adev); - ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN, CISLANDS_TEMP_RANGE_MAX); - if (ret) - return ret; - ret = ci_thermal_enable_alert(adev, true); - if (ret) - return ret; - if (adev->pm.dpm.fan.ucode_fan_control) { - ret = ci_thermal_setup_fan_table(adev); - if (ret) - return ret; - ci_thermal_start_smc_fan_control(adev); - } - - return 0; -} - -static void ci_thermal_stop_thermal_controller(struct amdgpu_device *adev) -{ - if (!adev->pm.no_fan) - ci_fan_ctrl_set_default_mode(adev); -} - -static int ci_read_smc_soft_register(struct amdgpu_device *adev, - u16 reg_offset, u32 *value) -{ - struct ci_power_info *pi = ci_get_pi(adev); - - return amdgpu_ci_read_smc_sram_dword(adev, - pi->soft_regs_start + reg_offset, - value, pi->sram_end); -} - -static int ci_write_smc_soft_register(struct amdgpu_device *adev, - u16 reg_offset, u32 value) -{ - struct ci_power_info *pi = ci_get_pi(adev); - - return amdgpu_ci_write_smc_sram_dword(adev, - pi->soft_regs_start + reg_offset, - value, pi->sram_end); -} - -static void ci_init_fps_limits(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - SMU7_Discrete_DpmTable *table = &pi->smc_state_table; - - if (pi->caps_fps) { - u16 tmp; - - tmp = 45; - table->FpsHighT = cpu_to_be16(tmp); - - tmp = 30; - table->FpsLowT = cpu_to_be16(tmp); - } -} - -static int ci_update_sclk_t(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - int ret = 0; - u32 low_sclk_interrupt_t = 0; - - if (pi->caps_sclk_throttle_low_notification) { - low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t); - - ret = amdgpu_ci_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT), - (u8 *)&low_sclk_interrupt_t, - sizeof(u32), pi->sram_end); - - } - - return ret; -} - -static void ci_get_leakage_voltages(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - u16 leakage_id, virtual_voltage_id; - u16 vddc, vddci; - int i; - - pi->vddc_leakage.count = 0; - pi->vddci_leakage.count = 0; - - if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { - for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) { - virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; - if (amdgpu_atombios_get_voltage_evv(adev, virtual_voltage_id, &vddc) != 0) - continue; - if (vddc != 0 && vddc != virtual_voltage_id) { - pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc; - pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id; - pi->vddc_leakage.count++; - } - } - } else if (amdgpu_atombios_get_leakage_id_from_vbios(adev, &leakage_id) == 0) { - for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) { - virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; - if (amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(adev, &vddc, &vddci, - virtual_voltage_id, - leakage_id) == 0) { - if (vddc != 0 && vddc != virtual_voltage_id) { - pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc; - pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id; - pi->vddc_leakage.count++; - } - if (vddci != 0 && vddci != virtual_voltage_id) { - pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci; - pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id; - pi->vddci_leakage.count++; - } - } - } - } -} - -static void ci_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources) -{ - struct ci_power_info *pi = ci_get_pi(adev); - bool want_thermal_protection; - enum amdgpu_dpm_event_src dpm_event_src; - u32 tmp; - - switch (sources) { - case 0: - default: - want_thermal_protection = false; - break; - case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL): - want_thermal_protection = true; - dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL; - break; - case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL): - want_thermal_protection = true; - dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL; - break; - case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) | - (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)): - want_thermal_protection = true; - dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL; - break; - } - - if (want_thermal_protection) { -#if 0 - /* XXX: need to figure out how to handle this properly */ - tmp = RREG32_SMC(ixCG_THERMAL_CTRL); - tmp &= DPM_EVENT_SRC_MASK; - tmp |= DPM_EVENT_SRC(dpm_event_src); - WREG32_SMC(ixCG_THERMAL_CTRL, tmp); -#endif - - tmp = RREG32_SMC(ixGENERAL_PWRMGT); - if (pi->thermal_protection) - tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK; - else - tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK; - WREG32_SMC(ixGENERAL_PWRMGT, tmp); - } else { - tmp = RREG32_SMC(ixGENERAL_PWRMGT); - tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK; - WREG32_SMC(ixGENERAL_PWRMGT, tmp); - } -} - -static void ci_enable_auto_throttle_source(struct amdgpu_device *adev, - enum amdgpu_dpm_auto_throttle_src source, - bool enable) -{ - struct ci_power_info *pi = ci_get_pi(adev); - - if (enable) { - if (!(pi->active_auto_throttle_sources & (1 << source))) { - pi->active_auto_throttle_sources |= 1 << source; - ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources); - } - } else { - if (pi->active_auto_throttle_sources & (1 << source)) { - pi->active_auto_throttle_sources &= ~(1 << source); - ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources); - } - } -} - -static void ci_enable_vr_hot_gpio_interrupt(struct amdgpu_device *adev) -{ - if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT) - amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableVRHotGPIOInterrupt); -} - -static int ci_unfreeze_sclk_mclk_dpm(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - PPSMC_Result smc_result; - - if (!pi->need_update_smu7_dpm_table) - return 0; - - if ((!pi->sclk_dpm_key_disabled) && - (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) { - smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_UnfreezeLevel); - if (smc_result != PPSMC_Result_OK) - return -EINVAL; - } - - if ((!pi->mclk_dpm_key_disabled) && - (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { - smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_UnfreezeLevel); - if (smc_result != PPSMC_Result_OK) - return -EINVAL; - } - - pi->need_update_smu7_dpm_table = 0; - return 0; -} - -static int ci_enable_sclk_mclk_dpm(struct amdgpu_device *adev, bool enable) -{ - struct ci_power_info *pi = ci_get_pi(adev); - PPSMC_Result smc_result; - - if (enable) { - if (!pi->sclk_dpm_key_disabled) { - smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Enable); - if (smc_result != PPSMC_Result_OK) - return -EINVAL; - } - - if (!pi->mclk_dpm_key_disabled) { - smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Enable); - if (smc_result != PPSMC_Result_OK) - return -EINVAL; - - WREG32_P(mmMC_SEQ_CNTL_3, MC_SEQ_CNTL_3__CAC_EN_MASK, - ~MC_SEQ_CNTL_3__CAC_EN_MASK); - - WREG32_SMC(ixLCAC_MC0_CNTL, 0x05); - WREG32_SMC(ixLCAC_MC1_CNTL, 0x05); - WREG32_SMC(ixLCAC_CPL_CNTL, 0x100005); - - udelay(10); - - WREG32_SMC(ixLCAC_MC0_CNTL, 0x400005); - WREG32_SMC(ixLCAC_MC1_CNTL, 0x400005); - WREG32_SMC(ixLCAC_CPL_CNTL, 0x500005); - } - } else { - if (!pi->sclk_dpm_key_disabled) { - smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Disable); - if (smc_result != PPSMC_Result_OK) - return -EINVAL; - } - - if (!pi->mclk_dpm_key_disabled) { - smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Disable); - if (smc_result != PPSMC_Result_OK) - return -EINVAL; - } - } - - return 0; -} - -static int ci_start_dpm(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - PPSMC_Result smc_result; - int ret; - u32 tmp; - - tmp = RREG32_SMC(ixGENERAL_PWRMGT); - tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK; - WREG32_SMC(ixGENERAL_PWRMGT, tmp); - - tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL); - tmp |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK; - WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp); - - ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000); - - WREG32_P(mmBIF_LNCNT_RESET, 0, ~BIF_LNCNT_RESET__RESET_LNCNT_EN_MASK); - - smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Enable); - if (smc_result != PPSMC_Result_OK) - return -EINVAL; - - ret = ci_enable_sclk_mclk_dpm(adev, true); - if (ret) - return ret; - - if (!pi->pcie_dpm_key_disabled) { - smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Enable); - if (smc_result != PPSMC_Result_OK) - return -EINVAL; - } - - return 0; -} - -static int ci_freeze_sclk_mclk_dpm(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - PPSMC_Result smc_result; - - if (!pi->need_update_smu7_dpm_table) - return 0; - - if ((!pi->sclk_dpm_key_disabled) && - (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) { - smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_FreezeLevel); - if (smc_result != PPSMC_Result_OK) - return -EINVAL; - } - - if ((!pi->mclk_dpm_key_disabled) && - (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { - smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_FreezeLevel); - if (smc_result != PPSMC_Result_OK) - return -EINVAL; - } - - return 0; -} - -static int ci_stop_dpm(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - PPSMC_Result smc_result; - int ret; - u32 tmp; - - tmp = RREG32_SMC(ixGENERAL_PWRMGT); - tmp &= ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK; - WREG32_SMC(ixGENERAL_PWRMGT, tmp); - - tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL); - tmp &= ~SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK; - WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp); - - if (!pi->pcie_dpm_key_disabled) { - smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Disable); - if (smc_result != PPSMC_Result_OK) - return -EINVAL; - } - - ret = ci_enable_sclk_mclk_dpm(adev, false); - if (ret) - return ret; - - smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Disable); - if (smc_result != PPSMC_Result_OK) - return -EINVAL; - - return 0; -} - -static void ci_enable_sclk_control(struct amdgpu_device *adev, bool enable) -{ - u32 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL); - - if (enable) - tmp &= ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK; - else - tmp |= SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK; - WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp); -} - -#if 0 -static int ci_notify_hw_of_power_source(struct amdgpu_device *adev, - bool ac_power) -{ - struct ci_power_info *pi = ci_get_pi(adev); - struct amdgpu_cac_tdp_table *cac_tdp_table = - adev->pm.dpm.dyn_state.cac_tdp_table; - u32 power_limit; - - if (ac_power) - power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256); - else - power_limit = (u32)(cac_tdp_table->battery_power_limit * 256); - - ci_set_power_limit(adev, power_limit); - - if (pi->caps_automatic_dc_transition) { - if (ac_power) - amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC); - else - amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Remove_DC_Clamp); - } - - return 0; -} -#endif - -static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, - PPSMC_Msg msg, u32 parameter) -{ - WREG32(mmSMC_MSG_ARG_0, parameter); - return amdgpu_ci_send_msg_to_smc(adev, msg); -} - -static PPSMC_Result amdgpu_ci_send_msg_to_smc_return_parameter(struct amdgpu_device *adev, - PPSMC_Msg msg, u32 *parameter) -{ - PPSMC_Result smc_result; - - smc_result = amdgpu_ci_send_msg_to_smc(adev, msg); - - if ((smc_result == PPSMC_Result_OK) && parameter) - *parameter = RREG32(mmSMC_MSG_ARG_0); - - return smc_result; -} - -static int ci_dpm_force_state_sclk(struct amdgpu_device *adev, u32 n) -{ - struct ci_power_info *pi = ci_get_pi(adev); - - if (!pi->sclk_dpm_key_disabled) { - PPSMC_Result smc_result = - amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n); - if (smc_result != PPSMC_Result_OK) - return -EINVAL; - } - - return 0; -} - -static int ci_dpm_force_state_mclk(struct amdgpu_device *adev, u32 n) -{ - struct ci_power_info *pi = ci_get_pi(adev); - - if (!pi->mclk_dpm_key_disabled) { - PPSMC_Result smc_result = - amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n); - if (smc_result != PPSMC_Result_OK) - return -EINVAL; - } - - return 0; -} - -static int ci_dpm_force_state_pcie(struct amdgpu_device *adev, u32 n) -{ - struct ci_power_info *pi = ci_get_pi(adev); - - if (!pi->pcie_dpm_key_disabled) { - PPSMC_Result smc_result = - amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PCIeDPM_ForceLevel, n); - if (smc_result != PPSMC_Result_OK) - return -EINVAL; - } - - return 0; -} - -static int ci_set_power_limit(struct amdgpu_device *adev, u32 n) -{ - struct ci_power_info *pi = ci_get_pi(adev); - - if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) { - PPSMC_Result smc_result = - amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PkgPwrSetLimit, n); - if (smc_result != PPSMC_Result_OK) - return -EINVAL; - } - - return 0; -} - -static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev, - u32 target_tdp) -{ - PPSMC_Result smc_result = - amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp); - if (smc_result != PPSMC_Result_OK) - return -EINVAL; - return 0; -} - -#if 0 -static int ci_set_boot_state(struct amdgpu_device *adev) -{ - return ci_enable_sclk_mclk_dpm(adev, false); -} -#endif - -static u32 ci_get_average_sclk_freq(struct amdgpu_device *adev) -{ - u32 sclk_freq; - PPSMC_Result smc_result = - amdgpu_ci_send_msg_to_smc_return_parameter(adev, - PPSMC_MSG_API_GetSclkFrequency, - &sclk_freq); - if (smc_result != PPSMC_Result_OK) - sclk_freq = 0; - - return sclk_freq; -} - -static u32 ci_get_average_mclk_freq(struct amdgpu_device *adev) -{ - u32 mclk_freq; - PPSMC_Result smc_result = - amdgpu_ci_send_msg_to_smc_return_parameter(adev, - PPSMC_MSG_API_GetMclkFrequency, - &mclk_freq); - if (smc_result != PPSMC_Result_OK) - mclk_freq = 0; - - return mclk_freq; -} - -static void ci_dpm_start_smc(struct amdgpu_device *adev) -{ - int i; - - amdgpu_ci_program_jump_on_start(adev); - amdgpu_ci_start_smc_clock(adev); - amdgpu_ci_start_smc(adev); - for (i = 0; i < adev->usec_timeout; i++) { - if (RREG32_SMC(ixFIRMWARE_FLAGS) & FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) - break; - } -} - -static void ci_dpm_stop_smc(struct amdgpu_device *adev) -{ - amdgpu_ci_reset_smc(adev); - amdgpu_ci_stop_smc_clock(adev); -} - -static int ci_process_firmware_header(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - u32 tmp; - int ret; - - ret = amdgpu_ci_read_smc_sram_dword(adev, - SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU7_Firmware_Header, DpmTable), - &tmp, pi->sram_end); - if (ret) - return ret; - - pi->dpm_table_start = tmp; - - ret = amdgpu_ci_read_smc_sram_dword(adev, - SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU7_Firmware_Header, SoftRegisters), - &tmp, pi->sram_end); - if (ret) - return ret; - - pi->soft_regs_start = tmp; - - ret = amdgpu_ci_read_smc_sram_dword(adev, - SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU7_Firmware_Header, mcRegisterTable), - &tmp, pi->sram_end); - if (ret) - return ret; - - pi->mc_reg_table_start = tmp; - - ret = amdgpu_ci_read_smc_sram_dword(adev, - SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU7_Firmware_Header, FanTable), - &tmp, pi->sram_end); - if (ret) - return ret; - - pi->fan_table_start = tmp; - - ret = amdgpu_ci_read_smc_sram_dword(adev, - SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU7_Firmware_Header, mcArbDramTimingTable), - &tmp, pi->sram_end); - if (ret) - return ret; - - pi->arb_table_start = tmp; - - return 0; -} - -static void ci_read_clock_registers(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - - pi->clock_registers.cg_spll_func_cntl = - RREG32_SMC(ixCG_SPLL_FUNC_CNTL); - pi->clock_registers.cg_spll_func_cntl_2 = - RREG32_SMC(ixCG_SPLL_FUNC_CNTL_2); - pi->clock_registers.cg_spll_func_cntl_3 = - RREG32_SMC(ixCG_SPLL_FUNC_CNTL_3); - pi->clock_registers.cg_spll_func_cntl_4 = - RREG32_SMC(ixCG_SPLL_FUNC_CNTL_4); - pi->clock_registers.cg_spll_spread_spectrum = - RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM); - pi->clock_registers.cg_spll_spread_spectrum_2 = - RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM_2); - pi->clock_registers.dll_cntl = RREG32(mmDLL_CNTL); - pi->clock_registers.mclk_pwrmgt_cntl = RREG32(mmMCLK_PWRMGT_CNTL); - pi->clock_registers.mpll_ad_func_cntl = RREG32(mmMPLL_AD_FUNC_CNTL); - pi->clock_registers.mpll_dq_func_cntl = RREG32(mmMPLL_DQ_FUNC_CNTL); - pi->clock_registers.mpll_func_cntl = RREG32(mmMPLL_FUNC_CNTL); - pi->clock_registers.mpll_func_cntl_1 = RREG32(mmMPLL_FUNC_CNTL_1); - pi->clock_registers.mpll_func_cntl_2 = RREG32(mmMPLL_FUNC_CNTL_2); - pi->clock_registers.mpll_ss1 = RREG32(mmMPLL_SS1); - pi->clock_registers.mpll_ss2 = RREG32(mmMPLL_SS2); -} - -static void ci_init_sclk_t(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - - pi->low_sclk_interrupt_t = 0; -} - -static void ci_enable_thermal_protection(struct amdgpu_device *adev, - bool enable) -{ - u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT); - - if (enable) - tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK; - else - tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK; - WREG32_SMC(ixGENERAL_PWRMGT, tmp); -} - -static void ci_enable_acpi_power_management(struct amdgpu_device *adev) -{ - u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT); - - tmp |= GENERAL_PWRMGT__STATIC_PM_EN_MASK; - - WREG32_SMC(ixGENERAL_PWRMGT, tmp); -} - -#if 0 -static int ci_enter_ulp_state(struct amdgpu_device *adev) -{ - - WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower); - - udelay(25000); - - return 0; -} - -static int ci_exit_ulp_state(struct amdgpu_device *adev) -{ - int i; - - WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower); - - udelay(7000); - - for (i = 0; i < adev->usec_timeout; i++) { - if (RREG32(mmSMC_RESP_0) == 1) - break; - udelay(1000); - } - - return 0; -} -#endif - -static int ci_notify_smc_display_change(struct amdgpu_device *adev, - bool has_display) -{ - PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay; - - return (amdgpu_ci_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ? 0 : -EINVAL; -} - -static int ci_enable_ds_master_switch(struct amdgpu_device *adev, - bool enable) -{ - struct ci_power_info *pi = ci_get_pi(adev); - - if (enable) { - if (pi->caps_sclk_ds) { - if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK) - return -EINVAL; - } else { - if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK) - return -EINVAL; - } - } else { - if (pi->caps_sclk_ds) { - if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK) - return -EINVAL; - } - } - - return 0; -} - -static void ci_program_display_gap(struct amdgpu_device *adev) -{ - u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL); - u32 pre_vbi_time_in_us; - u32 frame_time_in_us; - u32 ref_clock = adev->clock.spll.reference_freq; - u32 refresh_rate = amdgpu_dpm_get_vrefresh(adev); - u32 vblank_time = amdgpu_dpm_get_vblank_time(adev); - - tmp &= ~CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK; - if (adev->pm.dpm.new_active_crtc_count > 0) - tmp |= (AMDGPU_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT); - else - tmp |= (AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT); - WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp); - - if (refresh_rate == 0) - refresh_rate = 60; - if (vblank_time == 0xffffffff) - vblank_time = 500; - frame_time_in_us = 1000000 / refresh_rate; - pre_vbi_time_in_us = - frame_time_in_us - 200 - vblank_time; - tmp = pre_vbi_time_in_us * (ref_clock / 100); - - WREG32_SMC(ixCG_DISPLAY_GAP_CNTL2, tmp); - ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64); - ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us)); - - - ci_notify_smc_display_change(adev, (adev->pm.dpm.new_active_crtc_count == 1)); - -} - -static void ci_enable_spread_spectrum(struct amdgpu_device *adev, bool enable) -{ - struct ci_power_info *pi = ci_get_pi(adev); - u32 tmp; - - if (enable) { - if (pi->caps_sclk_ss_support) { - tmp = RREG32_SMC(ixGENERAL_PWRMGT); - tmp |= GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK; - WREG32_SMC(ixGENERAL_PWRMGT, tmp); - } - } else { - tmp = RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM); - tmp &= ~CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK; - WREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM, tmp); - - tmp = RREG32_SMC(ixGENERAL_PWRMGT); - tmp &= ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK; - WREG32_SMC(ixGENERAL_PWRMGT, tmp); - } -} - -static void ci_program_sstp(struct amdgpu_device *adev) -{ - WREG32_SMC(ixCG_STATIC_SCREEN_PARAMETER, - ((CISLANDS_SSTU_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT__SHIFT) | - (CISLANDS_SST_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD__SHIFT))); -} - -static void ci_enable_display_gap(struct amdgpu_device *adev) -{ - u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL); - - tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK | - CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG_MASK); - tmp |= ((AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT) | - (AMDGPU_PM_DISPLAY_GAP_VBLANK << CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG__SHIFT)); - - WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp); -} - -static void ci_program_vc(struct amdgpu_device *adev) -{ - u32 tmp; - - tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL); - tmp &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK); - WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp); - - WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, CISLANDS_VRC_DFLT0); - WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, CISLANDS_VRC_DFLT1); - WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, CISLANDS_VRC_DFLT2); - WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, CISLANDS_VRC_DFLT3); - WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, CISLANDS_VRC_DFLT4); - WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, CISLANDS_VRC_DFLT5); - WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, CISLANDS_VRC_DFLT6); - WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, CISLANDS_VRC_DFLT7); -} - -static void ci_clear_vc(struct amdgpu_device *adev) -{ - u32 tmp; - - tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL); - tmp |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK); - WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp); - - WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0); - WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, 0); - WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, 0); - WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, 0); - WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, 0); - WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, 0); - WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, 0); - WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, 0); -} - -static int ci_upload_firmware(struct amdgpu_device *adev) -{ - int i, ret; - - if (amdgpu_ci_is_smc_running(adev)) { - DRM_INFO("smc is running, no need to load smc firmware\n"); - return 0; - } - - for (i = 0; i < adev->usec_timeout; i++) { - if (RREG32_SMC(ixRCU_UC_EVENTS) & RCU_UC_EVENTS__boot_seq_done_MASK) - break; - } - WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, 1); - - amdgpu_ci_stop_smc_clock(adev); - amdgpu_ci_reset_smc(adev); - - ret = amdgpu_ci_load_smc_ucode(adev, SMC_RAM_END); - - return ret; - -} - -static int ci_get_svi2_voltage_table(struct amdgpu_device *adev, - struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table, - struct atom_voltage_table *voltage_table) -{ - u32 i; - - if (voltage_dependency_table == NULL) - return -EINVAL; - - voltage_table->mask_low = 0; - voltage_table->phase_delay = 0; - - voltage_table->count = voltage_dependency_table->count; - for (i = 0; i < voltage_table->count; i++) { - voltage_table->entries[i].value = voltage_dependency_table->entries[i].v; - voltage_table->entries[i].smio_low = 0; - } - - return 0; -} - -static int ci_construct_voltage_tables(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - int ret; - - if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) { - ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC, - VOLTAGE_OBJ_GPIO_LUT, - &pi->vddc_voltage_table); - if (ret) - return ret; - } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) { - ret = ci_get_svi2_voltage_table(adev, - &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk, - &pi->vddc_voltage_table); - if (ret) - return ret; - } - - if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC) - ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDC, - &pi->vddc_voltage_table); - - if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) { - ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI, - VOLTAGE_OBJ_GPIO_LUT, - &pi->vddci_voltage_table); - if (ret) - return ret; - } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) { - ret = ci_get_svi2_voltage_table(adev, - &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk, - &pi->vddci_voltage_table); - if (ret) - return ret; - } - - if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI) - ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDCI, - &pi->vddci_voltage_table); - - if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) { - ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC, - VOLTAGE_OBJ_GPIO_LUT, - &pi->mvdd_voltage_table); - if (ret) - return ret; - } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) { - ret = ci_get_svi2_voltage_table(adev, - &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk, - &pi->mvdd_voltage_table); - if (ret) - return ret; - } - - if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD) - ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_MVDD, - &pi->mvdd_voltage_table); - - return 0; -} - -static void ci_populate_smc_voltage_table(struct amdgpu_device *adev, - struct atom_voltage_table_entry *voltage_table, - SMU7_Discrete_VoltageLevel *smc_voltage_table) -{ - int ret; - - ret = ci_get_std_voltage_value_sidd(adev, voltage_table, - &smc_voltage_table->StdVoltageHiSidd, - &smc_voltage_table->StdVoltageLoSidd); - - if (ret) { - smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE; - smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE; - } - - smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE); - smc_voltage_table->StdVoltageHiSidd = - cpu_to_be16(smc_voltage_table->StdVoltageHiSidd); - smc_voltage_table->StdVoltageLoSidd = - cpu_to_be16(smc_voltage_table->StdVoltageLoSidd); -} - -static int ci_populate_smc_vddc_table(struct amdgpu_device *adev, - SMU7_Discrete_DpmTable *table) -{ - struct ci_power_info *pi = ci_get_pi(adev); - unsigned int count; - - table->VddcLevelCount = pi->vddc_voltage_table.count; - for (count = 0; count < table->VddcLevelCount; count++) { - ci_populate_smc_voltage_table(adev, - &pi->vddc_voltage_table.entries[count], - &table->VddcLevel[count]); - - if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) - table->VddcLevel[count].Smio |= - pi->vddc_voltage_table.entries[count].smio_low; - else - table->VddcLevel[count].Smio = 0; - } - table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount); - - return 0; -} - -static int ci_populate_smc_vddci_table(struct amdgpu_device *adev, - SMU7_Discrete_DpmTable *table) -{ - unsigned int count; - struct ci_power_info *pi = ci_get_pi(adev); - - table->VddciLevelCount = pi->vddci_voltage_table.count; - for (count = 0; count < table->VddciLevelCount; count++) { - ci_populate_smc_voltage_table(adev, - &pi->vddci_voltage_table.entries[count], - &table->VddciLevel[count]); - - if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) - table->VddciLevel[count].Smio |= - pi->vddci_voltage_table.entries[count].smio_low; - else - table->VddciLevel[count].Smio = 0; - } - table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount); - - return 0; -} - -static int ci_populate_smc_mvdd_table(struct amdgpu_device *adev, - SMU7_Discrete_DpmTable *table) -{ - struct ci_power_info *pi = ci_get_pi(adev); - unsigned int count; - - table->MvddLevelCount = pi->mvdd_voltage_table.count; - for (count = 0; count < table->MvddLevelCount; count++) { - ci_populate_smc_voltage_table(adev, - &pi->mvdd_voltage_table.entries[count], - &table->MvddLevel[count]); - - if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) - table->MvddLevel[count].Smio |= - pi->mvdd_voltage_table.entries[count].smio_low; - else - table->MvddLevel[count].Smio = 0; - } - table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount); - - return 0; -} - -static int ci_populate_smc_voltage_tables(struct amdgpu_device *adev, - SMU7_Discrete_DpmTable *table) -{ - int ret; - - ret = ci_populate_smc_vddc_table(adev, table); - if (ret) - return ret; - - ret = ci_populate_smc_vddci_table(adev, table); - if (ret) - return ret; - - ret = ci_populate_smc_mvdd_table(adev, table); - if (ret) - return ret; - - return 0; -} - -static int ci_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk, - SMU7_Discrete_VoltageLevel *voltage) -{ - struct ci_power_info *pi = ci_get_pi(adev); - u32 i = 0; - - if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) { - for (i = 0; i < adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) { - if (mclk <= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) { - voltage->Voltage = pi->mvdd_voltage_table.entries[i].value; - break; - } - } - - if (i >= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count) - return -EINVAL; - } - - return -EINVAL; -} - -static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev, - struct atom_voltage_table_entry *voltage_table, - u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd) -{ - u16 v_index, idx; - bool voltage_found = false; - *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE; - *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE; - - if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL) - return -EINVAL; - - if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) { - for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) { - if (voltage_table->value == - adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) { - voltage_found = true; - if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count) - idx = v_index; - else - idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1; - *std_voltage_lo_sidd = - adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE; - *std_voltage_hi_sidd = - adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE; - break; - } - } - - if (!voltage_found) { - for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) { - if (voltage_table->value <= - adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) { - voltage_found = true; - if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count) - idx = v_index; - else - idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1; - *std_voltage_lo_sidd = - adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE; - *std_voltage_hi_sidd = - adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE; - break; - } - } - } - } - - return 0; -} - -static void ci_populate_phase_value_based_on_sclk(struct amdgpu_device *adev, - const struct amdgpu_phase_shedding_limits_table *limits, - u32 sclk, - u32 *phase_shedding) -{ - unsigned int i; - - *phase_shedding = 1; - - for (i = 0; i < limits->count; i++) { - if (sclk < limits->entries[i].sclk) { - *phase_shedding = i; - break; - } - } -} - -static void ci_populate_phase_value_based_on_mclk(struct amdgpu_device *adev, - const struct amdgpu_phase_shedding_limits_table *limits, - u32 mclk, - u32 *phase_shedding) -{ - unsigned int i; - - *phase_shedding = 1; - - for (i = 0; i < limits->count; i++) { - if (mclk < limits->entries[i].mclk) { - *phase_shedding = i; - break; - } - } -} - -static int ci_init_arb_table_index(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - u32 tmp; - int ret; - - ret = amdgpu_ci_read_smc_sram_dword(adev, pi->arb_table_start, - &tmp, pi->sram_end); - if (ret) - return ret; - - tmp &= 0x00FFFFFF; - tmp |= MC_CG_ARB_FREQ_F1 << 24; - - return amdgpu_ci_write_smc_sram_dword(adev, pi->arb_table_start, - tmp, pi->sram_end); -} - -static int ci_get_dependency_volt_by_clk(struct amdgpu_device *adev, - struct amdgpu_clock_voltage_dependency_table *allowed_clock_voltage_table, - u32 clock, u32 *voltage) -{ - u32 i = 0; - - if (allowed_clock_voltage_table->count == 0) - return -EINVAL; - - for (i = 0; i < allowed_clock_voltage_table->count; i++) { - if (allowed_clock_voltage_table->entries[i].clk >= clock) { - *voltage = allowed_clock_voltage_table->entries[i].v; - return 0; - } - } - - *voltage = allowed_clock_voltage_table->entries[i-1].v; - - return 0; -} - -static u8 ci_get_sleep_divider_id_from_clock(u32 sclk, u32 min_sclk_in_sr) -{ - u32 i; - u32 tmp; - u32 min = max(min_sclk_in_sr, (u32)CISLAND_MINIMUM_ENGINE_CLOCK); - - if (sclk < min) - return 0; - - for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) { - tmp = sclk >> i; - if (tmp >= min || i == 0) - break; - } - - return (u8)i; -} - -static int ci_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev) -{ - return ci_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); -} - -static int ci_reset_to_default(struct amdgpu_device *adev) -{ - return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ? - 0 : -EINVAL; -} - -static int ci_force_switch_to_arb_f0(struct amdgpu_device *adev) -{ - u32 tmp; - - tmp = (RREG32_SMC(ixSMC_SCRATCH9) & 0x0000ff00) >> 8; - - if (tmp == MC_CG_ARB_FREQ_F0) - return 0; - - return ci_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0); -} - -static void ci_register_patching_mc_arb(struct amdgpu_device *adev, - const u32 engine_clock, - const u32 memory_clock, - u32 *dram_timimg2) -{ - bool patch; - u32 tmp, tmp2; - - tmp = RREG32(mmMC_SEQ_MISC0); - patch = ((tmp & 0x0000f00) == 0x300) ? true : false; - - if (patch && - ((adev->pdev->device == 0x67B0) || - (adev->pdev->device == 0x67B1))) { - if ((memory_clock > 100000) && (memory_clock <= 125000)) { - tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff; - *dram_timimg2 &= ~0x00ff0000; - *dram_timimg2 |= tmp2 << 16; - } else if ((memory_clock > 125000) && (memory_clock <= 137500)) { - tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff; - *dram_timimg2 &= ~0x00ff0000; - *dram_timimg2 |= tmp2 << 16; - } - } -} - -static int ci_populate_memory_timing_parameters(struct amdgpu_device *adev, - u32 sclk, - u32 mclk, - SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs) -{ - u32 dram_timing; - u32 dram_timing2; - u32 burst_time; - - amdgpu_atombios_set_engine_dram_timings(adev, sclk, mclk); - - dram_timing = RREG32(mmMC_ARB_DRAM_TIMING); - dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2); - burst_time = RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK; - - ci_register_patching_mc_arb(adev, sclk, mclk, &dram_timing2); - - arb_regs->McArbDramTiming = cpu_to_be32(dram_timing); - arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2); - arb_regs->McArbBurstTime = (u8)burst_time; - - return 0; -} - -static int ci_do_program_memory_timing_parameters(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - SMU7_Discrete_MCArbDramTimingTable arb_regs; - u32 i, j; - int ret = 0; - - memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable)); - - for (i = 0; i < pi->dpm_table.sclk_table.count; i++) { - for (j = 0; j < pi->dpm_table.mclk_table.count; j++) { - ret = ci_populate_memory_timing_parameters(adev, - pi->dpm_table.sclk_table.dpm_levels[i].value, - pi->dpm_table.mclk_table.dpm_levels[j].value, - &arb_regs.entries[i][j]); - if (ret) - break; - } - } - - if (ret == 0) - ret = amdgpu_ci_copy_bytes_to_smc(adev, - pi->arb_table_start, - (u8 *)&arb_regs, - sizeof(SMU7_Discrete_MCArbDramTimingTable), - pi->sram_end); - - return ret; -} - -static int ci_program_memory_timing_parameters(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - - if (pi->need_update_smu7_dpm_table == 0) - return 0; - - return ci_do_program_memory_timing_parameters(adev); -} - -static void ci_populate_smc_initial_state(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_boot_state) -{ - struct ci_ps *boot_state = ci_get_ps(amdgpu_boot_state); - struct ci_power_info *pi = ci_get_pi(adev); - u32 level = 0; - - for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) { - if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >= - boot_state->performance_levels[0].sclk) { - pi->smc_state_table.GraphicsBootLevel = level; - break; - } - } - - for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) { - if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >= - boot_state->performance_levels[0].mclk) { - pi->smc_state_table.MemoryBootLevel = level; - break; - } - } -} - -static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table) -{ - u32 i; - u32 mask_value = 0; - - for (i = dpm_table->count; i > 0; i--) { - mask_value = mask_value << 1; - if (dpm_table->dpm_levels[i-1].enabled) - mask_value |= 0x1; - else - mask_value &= 0xFFFFFFFE; - } - - return mask_value; -} - -static void ci_populate_smc_link_level(struct amdgpu_device *adev, - SMU7_Discrete_DpmTable *table) -{ - struct ci_power_info *pi = ci_get_pi(adev); - struct ci_dpm_table *dpm_table = &pi->dpm_table; - u32 i; - - for (i = 0; i < dpm_table->pcie_speed_table.count; i++) { - table->LinkLevel[i].PcieGenSpeed = - (u8)dpm_table->pcie_speed_table.dpm_levels[i].value; - table->LinkLevel[i].PcieLaneCount = - amdgpu_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1); - table->LinkLevel[i].EnabledForActivity = 1; - table->LinkLevel[i].DownT = cpu_to_be32(5); - table->LinkLevel[i].UpT = cpu_to_be32(30); - } - - pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count; - pi->dpm_level_enable_mask.pcie_dpm_enable_mask = - ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table); -} - -static int ci_populate_smc_uvd_level(struct amdgpu_device *adev, - SMU7_Discrete_DpmTable *table) -{ - u32 count; - struct atom_clock_dividers dividers; - int ret = -EINVAL; - - table->UvdLevelCount = - adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count; - - for (count = 0; count < table->UvdLevelCount; count++) { - table->UvdLevel[count].VclkFrequency = - adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk; - table->UvdLevel[count].DclkFrequency = - adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk; - table->UvdLevel[count].MinVddc = - adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE; - table->UvdLevel[count].MinVddcPhases = 1; - - ret = amdgpu_atombios_get_clock_dividers(adev, - COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, - table->UvdLevel[count].VclkFrequency, false, ÷rs); - if (ret) - return ret; - - table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider; - - ret = amdgpu_atombios_get_clock_dividers(adev, - COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, - table->UvdLevel[count].DclkFrequency, false, ÷rs); - if (ret) - return ret; - - table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider; - - table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency); - table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency); - table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc); - } - - return ret; -} - -static int ci_populate_smc_vce_level(struct amdgpu_device *adev, - SMU7_Discrete_DpmTable *table) -{ - u32 count; - struct atom_clock_dividers dividers; - int ret = -EINVAL; - - table->VceLevelCount = - adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count; - - for (count = 0; count < table->VceLevelCount; count++) { - table->VceLevel[count].Frequency = - adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk; - table->VceLevel[count].MinVoltage = - (u16)adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE; - table->VceLevel[count].MinPhases = 1; - - ret = amdgpu_atombios_get_clock_dividers(adev, - COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, - table->VceLevel[count].Frequency, false, ÷rs); - if (ret) - return ret; - - table->VceLevel[count].Divider = (u8)dividers.post_divider; - - table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency); - table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage); - } - - return ret; - -} - -static int ci_populate_smc_acp_level(struct amdgpu_device *adev, - SMU7_Discrete_DpmTable *table) -{ - u32 count; - struct atom_clock_dividers dividers; - int ret = -EINVAL; - - table->AcpLevelCount = (u8) - (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count); - - for (count = 0; count < table->AcpLevelCount; count++) { - table->AcpLevel[count].Frequency = - adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk; - table->AcpLevel[count].MinVoltage = - adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v; - table->AcpLevel[count].MinPhases = 1; - - ret = amdgpu_atombios_get_clock_dividers(adev, - COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, - table->AcpLevel[count].Frequency, false, ÷rs); - if (ret) - return ret; - - table->AcpLevel[count].Divider = (u8)dividers.post_divider; - - table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency); - table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage); - } - - return ret; -} - -static int ci_populate_smc_samu_level(struct amdgpu_device *adev, - SMU7_Discrete_DpmTable *table) -{ - u32 count; - struct atom_clock_dividers dividers; - int ret = -EINVAL; - - table->SamuLevelCount = - adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count; - - for (count = 0; count < table->SamuLevelCount; count++) { - table->SamuLevel[count].Frequency = - adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk; - table->SamuLevel[count].MinVoltage = - adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE; - table->SamuLevel[count].MinPhases = 1; - - ret = amdgpu_atombios_get_clock_dividers(adev, - COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, - table->SamuLevel[count].Frequency, false, ÷rs); - if (ret) - return ret; - - table->SamuLevel[count].Divider = (u8)dividers.post_divider; - - table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency); - table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage); - } - - return ret; -} - -static int ci_calculate_mclk_params(struct amdgpu_device *adev, - u32 memory_clock, - SMU7_Discrete_MemoryLevel *mclk, - bool strobe_mode, - bool dll_state_on) -{ - struct ci_power_info *pi = ci_get_pi(adev); - u32 dll_cntl = pi->clock_registers.dll_cntl; - u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl; - u32 mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl; - u32 mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl; - u32 mpll_func_cntl = pi->clock_registers.mpll_func_cntl; - u32 mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1; - u32 mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2; - u32 mpll_ss1 = pi->clock_registers.mpll_ss1; - u32 mpll_ss2 = pi->clock_registers.mpll_ss2; - struct atom_mpll_param mpll_param; - int ret; - - ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param); - if (ret) - return ret; - - mpll_func_cntl &= ~MPLL_FUNC_CNTL__BWCTRL_MASK; - mpll_func_cntl |= (mpll_param.bwcntl << MPLL_FUNC_CNTL__BWCTRL__SHIFT); - - mpll_func_cntl_1 &= ~(MPLL_FUNC_CNTL_1__CLKF_MASK | MPLL_FUNC_CNTL_1__CLKFRAC_MASK | - MPLL_FUNC_CNTL_1__VCO_MODE_MASK); - mpll_func_cntl_1 |= (mpll_param.clkf) << MPLL_FUNC_CNTL_1__CLKF__SHIFT | - (mpll_param.clkfrac << MPLL_FUNC_CNTL_1__CLKFRAC__SHIFT) | - (mpll_param.vco_mode << MPLL_FUNC_CNTL_1__VCO_MODE__SHIFT); - - mpll_ad_func_cntl &= ~MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK; - mpll_ad_func_cntl |= (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT); - - if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) { - mpll_dq_func_cntl &= ~(MPLL_DQ_FUNC_CNTL__YCLK_SEL_MASK | - MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK); - mpll_dq_func_cntl |= (mpll_param.yclk_sel << MPLL_DQ_FUNC_CNTL__YCLK_SEL__SHIFT) | - (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT); - } - - if (pi->caps_mclk_ss_support) { - struct amdgpu_atom_ss ss; - u32 freq_nom; - u32 tmp; - u32 reference_clock = adev->clock.mpll.reference_freq; - - if (mpll_param.qdr == 1) - freq_nom = memory_clock * 4 * (1 << mpll_param.post_div); - else - freq_nom = memory_clock * 2 * (1 << mpll_param.post_div); - - tmp = (freq_nom / reference_clock); - tmp = tmp * tmp; - if (amdgpu_atombios_get_asic_ss_info(adev, &ss, - ASIC_INTERNAL_MEMORY_SS, freq_nom)) { - u32 clks = reference_clock * 5 / ss.rate; - u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom); - - mpll_ss1 &= ~MPLL_SS1__CLKV_MASK; - mpll_ss1 |= (clkv << MPLL_SS1__CLKV__SHIFT); - - mpll_ss2 &= ~MPLL_SS2__CLKS_MASK; - mpll_ss2 |= (clks << MPLL_SS2__CLKS__SHIFT); - } - } - - mclk_pwrmgt_cntl &= ~MCLK_PWRMGT_CNTL__DLL_SPEED_MASK; - mclk_pwrmgt_cntl |= (mpll_param.dll_speed << MCLK_PWRMGT_CNTL__DLL_SPEED__SHIFT); - - if (dll_state_on) - mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK | - MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK; - else - mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK | - MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK); - - mclk->MclkFrequency = memory_clock; - mclk->MpllFuncCntl = mpll_func_cntl; - mclk->MpllFuncCntl_1 = mpll_func_cntl_1; - mclk->MpllFuncCntl_2 = mpll_func_cntl_2; - mclk->MpllAdFuncCntl = mpll_ad_func_cntl; - mclk->MpllDqFuncCntl = mpll_dq_func_cntl; - mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl; - mclk->DllCntl = dll_cntl; - mclk->MpllSs1 = mpll_ss1; - mclk->MpllSs2 = mpll_ss2; - - return 0; -} - -static int ci_populate_single_memory_level(struct amdgpu_device *adev, - u32 memory_clock, - SMU7_Discrete_MemoryLevel *memory_level) -{ - struct ci_power_info *pi = ci_get_pi(adev); - int ret; - bool dll_state_on; - - if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) { - ret = ci_get_dependency_volt_by_clk(adev, - &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk, - memory_clock, &memory_level->MinVddc); - if (ret) - return ret; - } - - if (adev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) { - ret = ci_get_dependency_volt_by_clk(adev, - &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk, - memory_clock, &memory_level->MinVddci); - if (ret) - return ret; - } - - if (adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) { - ret = ci_get_dependency_volt_by_clk(adev, - &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk, - memory_clock, &memory_level->MinMvdd); - if (ret) - return ret; - } - - memory_level->MinVddcPhases = 1; - - if (pi->vddc_phase_shed_control) - ci_populate_phase_value_based_on_mclk(adev, - &adev->pm.dpm.dyn_state.phase_shedding_limits_table, - memory_clock, - &memory_level->MinVddcPhases); - - memory_level->EnabledForActivity = 1; - memory_level->EnabledForThrottle = 1; - memory_level->UpH = 0; - memory_level->DownH = 100; - memory_level->VoltageDownH = 0; - memory_level->ActivityLevel = (u16)pi->mclk_activity_target; - - memory_level->StutterEnable = false; - memory_level->StrobeEnable = false; - memory_level->EdcReadEnable = false; - memory_level->EdcWriteEnable = false; - memory_level->RttEnable = false; - - memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; - - if (pi->mclk_stutter_mode_threshold && - (memory_clock <= pi->mclk_stutter_mode_threshold) && - (!pi->uvd_enabled) && - (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) && - (adev->pm.dpm.new_active_crtc_count <= 2)) - memory_level->StutterEnable = true; - - if (pi->mclk_strobe_mode_threshold && - (memory_clock <= pi->mclk_strobe_mode_threshold)) - memory_level->StrobeEnable = 1; - - if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) { - memory_level->StrobeRatio = - ci_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable); - if (pi->mclk_edc_enable_threshold && - (memory_clock > pi->mclk_edc_enable_threshold)) - memory_level->EdcReadEnable = true; - - if (pi->mclk_edc_wr_enable_threshold && - (memory_clock > pi->mclk_edc_wr_enable_threshold)) - memory_level->EdcWriteEnable = true; - - if (memory_level->StrobeEnable) { - if (ci_get_mclk_frequency_ratio(memory_clock, true) >= - ((RREG32(mmMC_SEQ_MISC7) >> 16) & 0xf)) - dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false; - else - dll_state_on = ((RREG32(mmMC_SEQ_MISC6) >> 1) & 0x1) ? true : false; - } else { - dll_state_on = pi->dll_default_on; - } - } else { - memory_level->StrobeRatio = ci_get_ddr3_mclk_frequency_ratio(memory_clock); - dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false; - } - - ret = ci_calculate_mclk_params(adev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on); - if (ret) - return ret; - - memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE); - memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases); - memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE); - memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE); - - memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency); - memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel); - memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl); - memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1); - memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2); - memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl); - memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl); - memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl); - memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl); - memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1); - memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2); - - return 0; -} - -static int ci_populate_smc_acpi_level(struct amdgpu_device *adev, - SMU7_Discrete_DpmTable *table) -{ - struct ci_power_info *pi = ci_get_pi(adev); - struct atom_clock_dividers dividers; - SMU7_Discrete_VoltageLevel voltage_level; - u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl; - u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2; - u32 dll_cntl = pi->clock_registers.dll_cntl; - u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl; - int ret; - - table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; - - if (pi->acpi_vddc) - table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE); - else - table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE); - - table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1; - - table->ACPILevel.SclkFrequency = adev->clock.spll.reference_freq; - - ret = amdgpu_atombios_get_clock_dividers(adev, - COMPUTE_GPUCLK_INPUT_FLAG_SCLK, - table->ACPILevel.SclkFrequency, false, ÷rs); - if (ret) - return ret; - - table->ACPILevel.SclkDid = (u8)dividers.post_divider; - table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; - table->ACPILevel.DeepSleepDivId = 0; - - spll_func_cntl &= ~CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK; - spll_func_cntl |= CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK; - - spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK; - spll_func_cntl_2 |= (4 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT); - - table->ACPILevel.CgSpllFuncCntl = spll_func_cntl; - table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2; - table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3; - table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4; - table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum; - table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2; - table->ACPILevel.CcPwrDynRm = 0; - table->ACPILevel.CcPwrDynRm1 = 0; - - table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags); - table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases); - table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency); - table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl); - table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2); - table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3); - table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4); - table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum); - table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2); - table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm); - table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1); - - table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc; - table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases; - - if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) { - if (pi->acpi_vddci) - table->MemoryACPILevel.MinVddci = - cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE); - else - table->MemoryACPILevel.MinVddci = - cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE); - } - - if (ci_populate_mvdd_value(adev, 0, &voltage_level)) - table->MemoryACPILevel.MinMvdd = 0; - else - table->MemoryACPILevel.MinMvdd = - cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE); - - mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_RESET_MASK | - MCLK_PWRMGT_CNTL__MRDCK1_RESET_MASK; - mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK | - MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK); - - dll_cntl &= ~(DLL_CNTL__MRDCK0_BYPASS_MASK | DLL_CNTL__MRDCK1_BYPASS_MASK); - - table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl); - table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl); - table->MemoryACPILevel.MpllAdFuncCntl = - cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl); - table->MemoryACPILevel.MpllDqFuncCntl = - cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl); - table->MemoryACPILevel.MpllFuncCntl = - cpu_to_be32(pi->clock_registers.mpll_func_cntl); - table->MemoryACPILevel.MpllFuncCntl_1 = - cpu_to_be32(pi->clock_registers.mpll_func_cntl_1); - table->MemoryACPILevel.MpllFuncCntl_2 = - cpu_to_be32(pi->clock_registers.mpll_func_cntl_2); - table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1); - table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2); - - table->MemoryACPILevel.EnabledForThrottle = 0; - table->MemoryACPILevel.EnabledForActivity = 0; - table->MemoryACPILevel.UpH = 0; - table->MemoryACPILevel.DownH = 100; - table->MemoryACPILevel.VoltageDownH = 0; - table->MemoryACPILevel.ActivityLevel = - cpu_to_be16((u16)pi->mclk_activity_target); - - table->MemoryACPILevel.StutterEnable = false; - table->MemoryACPILevel.StrobeEnable = false; - table->MemoryACPILevel.EdcReadEnable = false; - table->MemoryACPILevel.EdcWriteEnable = false; - table->MemoryACPILevel.RttEnable = false; - - return 0; -} - - -static int ci_enable_ulv(struct amdgpu_device *adev, bool enable) -{ - struct ci_power_info *pi = ci_get_pi(adev); - struct ci_ulv_parm *ulv = &pi->ulv; - - if (ulv->supported) { - if (enable) - return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ? - 0 : -EINVAL; - else - return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ? - 0 : -EINVAL; - } - - return 0; -} - -static int ci_populate_ulv_level(struct amdgpu_device *adev, - SMU7_Discrete_Ulv *state) -{ - struct ci_power_info *pi = ci_get_pi(adev); - u16 ulv_voltage = adev->pm.dpm.backbias_response_time; - - state->CcPwrDynRm = 0; - state->CcPwrDynRm1 = 0; - - if (ulv_voltage == 0) { - pi->ulv.supported = false; - return 0; - } - - if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) { - if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v) - state->VddcOffset = 0; - else - state->VddcOffset = - adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage; - } else { - if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v) - state->VddcOffsetVid = 0; - else - state->VddcOffsetVid = (u8) - ((adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) * - VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1); - } - state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1; - - state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm); - state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1); - state->VddcOffset = cpu_to_be16(state->VddcOffset); - - return 0; -} - -static int ci_calculate_sclk_params(struct amdgpu_device *adev, - u32 engine_clock, - SMU7_Discrete_GraphicsLevel *sclk) -{ - struct ci_power_info *pi = ci_get_pi(adev); - struct atom_clock_dividers dividers; - u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3; - u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4; - u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum; - u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2; - u32 reference_clock = adev->clock.spll.reference_freq; - u32 reference_divider; - u32 fbdiv; - int ret; - - ret = amdgpu_atombios_get_clock_dividers(adev, - COMPUTE_GPUCLK_INPUT_FLAG_SCLK, - engine_clock, false, ÷rs); - if (ret) - return ret; - - reference_divider = 1 + dividers.ref_div; - fbdiv = dividers.fb_div & 0x3FFFFFF; - - spll_func_cntl_3 &= ~CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK; - spll_func_cntl_3 |= (fbdiv << CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT); - spll_func_cntl_3 |= CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK; - - if (pi->caps_sclk_ss_support) { - struct amdgpu_atom_ss ss; - u32 vco_freq = engine_clock * dividers.post_div; - - if (amdgpu_atombios_get_asic_ss_info(adev, &ss, - ASIC_INTERNAL_ENGINE_SS, vco_freq)) { - u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate); - u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000); - - cg_spll_spread_spectrum &= ~(CG_SPLL_SPREAD_SPECTRUM__CLKS_MASK | CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK); - cg_spll_spread_spectrum |= (clk_s << CG_SPLL_SPREAD_SPECTRUM__CLKS__SHIFT); - cg_spll_spread_spectrum |= (1 << CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT); - - cg_spll_spread_spectrum_2 &= ~CG_SPLL_SPREAD_SPECTRUM_2__CLKV_MASK; - cg_spll_spread_spectrum_2 |= (clk_v << CG_SPLL_SPREAD_SPECTRUM_2__CLKV__SHIFT); - } - } - - sclk->SclkFrequency = engine_clock; - sclk->CgSpllFuncCntl3 = spll_func_cntl_3; - sclk->CgSpllFuncCntl4 = spll_func_cntl_4; - sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum; - sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2; - sclk->SclkDid = (u8)dividers.post_divider; - - return 0; -} - -static int ci_populate_single_graphic_level(struct amdgpu_device *adev, - u32 engine_clock, - u16 sclk_activity_level_t, - SMU7_Discrete_GraphicsLevel *graphic_level) -{ - struct ci_power_info *pi = ci_get_pi(adev); - int ret; - - ret = ci_calculate_sclk_params(adev, engine_clock, graphic_level); - if (ret) - return ret; - - ret = ci_get_dependency_volt_by_clk(adev, - &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk, - engine_clock, &graphic_level->MinVddc); - if (ret) - return ret; - - graphic_level->SclkFrequency = engine_clock; - - graphic_level->Flags = 0; - graphic_level->MinVddcPhases = 1; - - if (pi->vddc_phase_shed_control) - ci_populate_phase_value_based_on_sclk(adev, - &adev->pm.dpm.dyn_state.phase_shedding_limits_table, - engine_clock, - &graphic_level->MinVddcPhases); - - graphic_level->ActivityLevel = sclk_activity_level_t; - - graphic_level->CcPwrDynRm = 0; - graphic_level->CcPwrDynRm1 = 0; - graphic_level->EnabledForThrottle = 1; - graphic_level->UpH = 0; - graphic_level->DownH = 0; - graphic_level->VoltageDownH = 0; - graphic_level->PowerThrottle = 0; - - if (pi->caps_sclk_ds) - graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(engine_clock, - CISLAND_MINIMUM_ENGINE_CLOCK); - - graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; - - graphic_level->Flags = cpu_to_be32(graphic_level->Flags); - graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE); - graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases); - graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency); - graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel); - graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3); - graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4); - graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum); - graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2); - graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm); - graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1); - - return 0; -} - -static int ci_populate_all_graphic_levels(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - struct ci_dpm_table *dpm_table = &pi->dpm_table; - u32 level_array_address = pi->dpm_table_start + - offsetof(SMU7_Discrete_DpmTable, GraphicsLevel); - u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) * - SMU7_MAX_LEVELS_GRAPHICS; - SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel; - u32 i, ret; - - memset(levels, 0, level_array_size); - - for (i = 0; i < dpm_table->sclk_table.count; i++) { - ret = ci_populate_single_graphic_level(adev, - dpm_table->sclk_table.dpm_levels[i].value, - (u16)pi->activity_target[i], - &pi->smc_state_table.GraphicsLevel[i]); - if (ret) - return ret; - if (i > 1) - pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0; - if (i == (dpm_table->sclk_table.count - 1)) - pi->smc_state_table.GraphicsLevel[i].DisplayWatermark = - PPSMC_DISPLAY_WATERMARK_HIGH; - } - pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1; - - pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count; - pi->dpm_level_enable_mask.sclk_dpm_enable_mask = - ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table); - - ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address, - (u8 *)levels, level_array_size, - pi->sram_end); - if (ret) - return ret; - - return 0; -} - -static int ci_populate_ulv_state(struct amdgpu_device *adev, - SMU7_Discrete_Ulv *ulv_level) -{ - return ci_populate_ulv_level(adev, ulv_level); -} - -static int ci_populate_all_memory_levels(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - struct ci_dpm_table *dpm_table = &pi->dpm_table; - u32 level_array_address = pi->dpm_table_start + - offsetof(SMU7_Discrete_DpmTable, MemoryLevel); - u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) * - SMU7_MAX_LEVELS_MEMORY; - SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel; - u32 i, ret; - - memset(levels, 0, level_array_size); - - for (i = 0; i < dpm_table->mclk_table.count; i++) { - if (dpm_table->mclk_table.dpm_levels[i].value == 0) - return -EINVAL; - ret = ci_populate_single_memory_level(adev, - dpm_table->mclk_table.dpm_levels[i].value, - &pi->smc_state_table.MemoryLevel[i]); - if (ret) - return ret; - } - - if ((dpm_table->mclk_table.count >= 2) && - ((adev->pdev->device == 0x67B0) || (adev->pdev->device == 0x67B1))) { - pi->smc_state_table.MemoryLevel[1].MinVddc = - pi->smc_state_table.MemoryLevel[0].MinVddc; - pi->smc_state_table.MemoryLevel[1].MinVddcPhases = - pi->smc_state_table.MemoryLevel[0].MinVddcPhases; - } - - pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F); - - pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count; - pi->dpm_level_enable_mask.mclk_dpm_enable_mask = - ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); - - pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark = - PPSMC_DISPLAY_WATERMARK_HIGH; - - ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address, - (u8 *)levels, level_array_size, - pi->sram_end); - if (ret) - return ret; - - return 0; -} - -static void ci_reset_single_dpm_table(struct amdgpu_device *adev, - struct ci_single_dpm_table* dpm_table, - u32 count) -{ - u32 i; - - dpm_table->count = count; - for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++) - dpm_table->dpm_levels[i].enabled = false; -} - -static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table, - u32 index, u32 pcie_gen, u32 pcie_lanes) -{ - dpm_table->dpm_levels[index].value = pcie_gen; - dpm_table->dpm_levels[index].param1 = pcie_lanes; - dpm_table->dpm_levels[index].enabled = true; -} - -static int ci_setup_default_pcie_tables(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - - if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) - return -EINVAL; - - if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) { - pi->pcie_gen_powersaving = pi->pcie_gen_performance; - pi->pcie_lane_powersaving = pi->pcie_lane_performance; - } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) { - pi->pcie_gen_performance = pi->pcie_gen_powersaving; - pi->pcie_lane_performance = pi->pcie_lane_powersaving; - } - - ci_reset_single_dpm_table(adev, - &pi->dpm_table.pcie_speed_table, - SMU7_MAX_LEVELS_LINK); - - if (adev->asic_type == CHIP_BONAIRE) - ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0, - pi->pcie_gen_powersaving.min, - pi->pcie_lane_powersaving.max); - else - ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0, - pi->pcie_gen_powersaving.min, - pi->pcie_lane_powersaving.min); - ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1, - pi->pcie_gen_performance.min, - pi->pcie_lane_performance.min); - ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2, - pi->pcie_gen_powersaving.min, - pi->pcie_lane_powersaving.max); - ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3, - pi->pcie_gen_performance.min, - pi->pcie_lane_performance.max); - ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4, - pi->pcie_gen_powersaving.max, - pi->pcie_lane_powersaving.max); - ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5, - pi->pcie_gen_performance.max, - pi->pcie_lane_performance.max); - - pi->dpm_table.pcie_speed_table.count = 6; - - return 0; -} - -static int ci_setup_default_dpm_tables(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table = - &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; - struct amdgpu_clock_voltage_dependency_table *allowed_mclk_table = - &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk; - struct amdgpu_cac_leakage_table *std_voltage_table = - &adev->pm.dpm.dyn_state.cac_leakage_table; - u32 i; - - if (allowed_sclk_vddc_table == NULL) - return -EINVAL; - if (allowed_sclk_vddc_table->count < 1) - return -EINVAL; - if (allowed_mclk_table == NULL) - return -EINVAL; - if (allowed_mclk_table->count < 1) - return -EINVAL; - - memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table)); - - ci_reset_single_dpm_table(adev, - &pi->dpm_table.sclk_table, - SMU7_MAX_LEVELS_GRAPHICS); - ci_reset_single_dpm_table(adev, - &pi->dpm_table.mclk_table, - SMU7_MAX_LEVELS_MEMORY); - ci_reset_single_dpm_table(adev, - &pi->dpm_table.vddc_table, - SMU7_MAX_LEVELS_VDDC); - ci_reset_single_dpm_table(adev, - &pi->dpm_table.vddci_table, - SMU7_MAX_LEVELS_VDDCI); - ci_reset_single_dpm_table(adev, - &pi->dpm_table.mvdd_table, - SMU7_MAX_LEVELS_MVDD); - - pi->dpm_table.sclk_table.count = 0; - for (i = 0; i < allowed_sclk_vddc_table->count; i++) { - if ((i == 0) || - (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value != - allowed_sclk_vddc_table->entries[i].clk)) { - pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value = - allowed_sclk_vddc_table->entries[i].clk; - pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled = - (i == 0) ? true : false; - pi->dpm_table.sclk_table.count++; - } - } - - pi->dpm_table.mclk_table.count = 0; - for (i = 0; i < allowed_mclk_table->count; i++) { - if ((i == 0) || - (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value != - allowed_mclk_table->entries[i].clk)) { - pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value = - allowed_mclk_table->entries[i].clk; - pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled = - (i == 0) ? true : false; - pi->dpm_table.mclk_table.count++; - } - } - - for (i = 0; i < allowed_sclk_vddc_table->count; i++) { - pi->dpm_table.vddc_table.dpm_levels[i].value = - allowed_sclk_vddc_table->entries[i].v; - pi->dpm_table.vddc_table.dpm_levels[i].param1 = - std_voltage_table->entries[i].leakage; - pi->dpm_table.vddc_table.dpm_levels[i].enabled = true; - } - pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count; - - allowed_mclk_table = &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk; - if (allowed_mclk_table) { - for (i = 0; i < allowed_mclk_table->count; i++) { - pi->dpm_table.vddci_table.dpm_levels[i].value = - allowed_mclk_table->entries[i].v; - pi->dpm_table.vddci_table.dpm_levels[i].enabled = true; - } - pi->dpm_table.vddci_table.count = allowed_mclk_table->count; - } - - allowed_mclk_table = &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk; - if (allowed_mclk_table) { - for (i = 0; i < allowed_mclk_table->count; i++) { - pi->dpm_table.mvdd_table.dpm_levels[i].value = - allowed_mclk_table->entries[i].v; - pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true; - } - pi->dpm_table.mvdd_table.count = allowed_mclk_table->count; - } - - ci_setup_default_pcie_tables(adev); - - /* save a copy of the default DPM table */ - memcpy(&(pi->golden_dpm_table), &(pi->dpm_table), - sizeof(struct ci_dpm_table)); - - return 0; -} - -static int ci_find_boot_level(struct ci_single_dpm_table *table, - u32 value, u32 *boot_level) -{ - u32 i; - int ret = -EINVAL; - - for(i = 0; i < table->count; i++) { - if (value == table->dpm_levels[i].value) { - *boot_level = i; - ret = 0; - } - } - - return ret; -} - -static int ci_init_smc_table(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - struct ci_ulv_parm *ulv = &pi->ulv; - struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps; - SMU7_Discrete_DpmTable *table = &pi->smc_state_table; - int ret; - - ret = ci_setup_default_dpm_tables(adev); - if (ret) - return ret; - - if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) - ci_populate_smc_voltage_tables(adev, table); - - ci_init_fps_limits(adev); - - if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC) - table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; - - if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC) - table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; - - if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) - table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; - - if (ulv->supported) { - ret = ci_populate_ulv_state(adev, &pi->smc_state_table.Ulv); - if (ret) - return ret; - WREG32_SMC(ixCG_ULV_PARAMETER, ulv->cg_ulv_parameter); - } - - ret = ci_populate_all_graphic_levels(adev); - if (ret) - return ret; - - ret = ci_populate_all_memory_levels(adev); - if (ret) - return ret; - - ci_populate_smc_link_level(adev, table); - - ret = ci_populate_smc_acpi_level(adev, table); - if (ret) - return ret; - - ret = ci_populate_smc_vce_level(adev, table); - if (ret) - return ret; - - ret = ci_populate_smc_acp_level(adev, table); - if (ret) - return ret; - - ret = ci_populate_smc_samu_level(adev, table); - if (ret) - return ret; - - ret = ci_do_program_memory_timing_parameters(adev); - if (ret) - return ret; - - ret = ci_populate_smc_uvd_level(adev, table); - if (ret) - return ret; - - table->UvdBootLevel = 0; - table->VceBootLevel = 0; - table->AcpBootLevel = 0; - table->SamuBootLevel = 0; - table->GraphicsBootLevel = 0; - table->MemoryBootLevel = 0; - - ret = ci_find_boot_level(&pi->dpm_table.sclk_table, - pi->vbios_boot_state.sclk_bootup_value, - (u32 *)&pi->smc_state_table.GraphicsBootLevel); - - ret = ci_find_boot_level(&pi->dpm_table.mclk_table, - pi->vbios_boot_state.mclk_bootup_value, - (u32 *)&pi->smc_state_table.MemoryBootLevel); - - table->BootVddc = pi->vbios_boot_state.vddc_bootup_value; - table->BootVddci = pi->vbios_boot_state.vddci_bootup_value; - table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value; - - ci_populate_smc_initial_state(adev, amdgpu_boot_state); - - ret = ci_populate_bapm_parameters_in_dpm_table(adev); - if (ret) - return ret; - - table->UVDInterval = 1; - table->VCEInterval = 1; - table->ACPInterval = 1; - table->SAMUInterval = 1; - table->GraphicsVoltageChangeEnable = 1; - table->GraphicsThermThrottleEnable = 1; - table->GraphicsInterval = 1; - table->VoltageInterval = 1; - table->ThermalInterval = 1; - table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high * - CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000); - table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low * - CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000); - table->MemoryVoltageChangeEnable = 1; - table->MemoryInterval = 1; - table->VoltageResponseTime = 0; - table->VddcVddciDelta = 4000; - table->PhaseResponseTime = 0; - table->MemoryThermThrottleEnable = 1; - table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1; - table->PCIeGenInterval = 1; - if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) - table->SVI2Enable = 1; - else - table->SVI2Enable = 0; - - table->ThermGpio = 17; - table->SclkStepSize = 0x4000; - - table->SystemFlags = cpu_to_be32(table->SystemFlags); - table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid); - table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase); - table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid); - table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid); - table->SclkStepSize = cpu_to_be32(table->SclkStepSize); - table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh); - table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow); - table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta); - table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime); - table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime); - table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE); - table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE); - table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE); - - ret = amdgpu_ci_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Discrete_DpmTable, SystemFlags), - (u8 *)&table->SystemFlags, - sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController), - pi->sram_end); - if (ret) - return ret; - - return 0; -} - -static void ci_trim_single_dpm_states(struct amdgpu_device *adev, - struct ci_single_dpm_table *dpm_table, - u32 low_limit, u32 high_limit) -{ - u32 i; - - for (i = 0; i < dpm_table->count; i++) { - if ((dpm_table->dpm_levels[i].value < low_limit) || - (dpm_table->dpm_levels[i].value > high_limit)) - dpm_table->dpm_levels[i].enabled = false; - else - dpm_table->dpm_levels[i].enabled = true; - } -} - -static void ci_trim_pcie_dpm_states(struct amdgpu_device *adev, - u32 speed_low, u32 lanes_low, - u32 speed_high, u32 lanes_high) -{ - struct ci_power_info *pi = ci_get_pi(adev); - struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table; - u32 i, j; - - for (i = 0; i < pcie_table->count; i++) { - if ((pcie_table->dpm_levels[i].value < speed_low) || - (pcie_table->dpm_levels[i].param1 < lanes_low) || - (pcie_table->dpm_levels[i].value > speed_high) || - (pcie_table->dpm_levels[i].param1 > lanes_high)) - pcie_table->dpm_levels[i].enabled = false; - else - pcie_table->dpm_levels[i].enabled = true; - } - - for (i = 0; i < pcie_table->count; i++) { - if (pcie_table->dpm_levels[i].enabled) { - for (j = i + 1; j < pcie_table->count; j++) { - if (pcie_table->dpm_levels[j].enabled) { - if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) && - (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1)) - pcie_table->dpm_levels[j].enabled = false; - } - } - } - } -} - -static int ci_trim_dpm_states(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_state) -{ - struct ci_ps *state = ci_get_ps(amdgpu_state); - struct ci_power_info *pi = ci_get_pi(adev); - u32 high_limit_count; - - if (state->performance_level_count < 1) - return -EINVAL; - - if (state->performance_level_count == 1) - high_limit_count = 0; - else - high_limit_count = 1; - - ci_trim_single_dpm_states(adev, - &pi->dpm_table.sclk_table, - state->performance_levels[0].sclk, - state->performance_levels[high_limit_count].sclk); - - ci_trim_single_dpm_states(adev, - &pi->dpm_table.mclk_table, - state->performance_levels[0].mclk, - state->performance_levels[high_limit_count].mclk); - - ci_trim_pcie_dpm_states(adev, - state->performance_levels[0].pcie_gen, - state->performance_levels[0].pcie_lane, - state->performance_levels[high_limit_count].pcie_gen, - state->performance_levels[high_limit_count].pcie_lane); - - return 0; -} - -static int ci_apply_disp_minimum_voltage_request(struct amdgpu_device *adev) -{ - struct amdgpu_clock_voltage_dependency_table *disp_voltage_table = - &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk; - struct amdgpu_clock_voltage_dependency_table *vddc_table = - &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; - u32 requested_voltage = 0; - u32 i; - - if (disp_voltage_table == NULL) - return -EINVAL; - if (!disp_voltage_table->count) - return -EINVAL; - - for (i = 0; i < disp_voltage_table->count; i++) { - if (adev->clock.current_dispclk == disp_voltage_table->entries[i].clk) - requested_voltage = disp_voltage_table->entries[i].v; - } - - for (i = 0; i < vddc_table->count; i++) { - if (requested_voltage <= vddc_table->entries[i].v) { - requested_voltage = vddc_table->entries[i].v; - return (amdgpu_ci_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_VddC_Request, - requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ? - 0 : -EINVAL; - } - } - - return -EINVAL; -} - -static int ci_upload_dpm_level_enable_mask(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - PPSMC_Result result; - - ci_apply_disp_minimum_voltage_request(adev); - - if (!pi->sclk_dpm_key_disabled) { - if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) { - result = amdgpu_ci_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SCLKDPM_SetEnabledMask, - pi->dpm_level_enable_mask.sclk_dpm_enable_mask); - if (result != PPSMC_Result_OK) - return -EINVAL; - } - } - - if (!pi->mclk_dpm_key_disabled) { - if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) { - result = amdgpu_ci_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_MCLKDPM_SetEnabledMask, - pi->dpm_level_enable_mask.mclk_dpm_enable_mask); - if (result != PPSMC_Result_OK) - return -EINVAL; - } - } - -#if 0 - if (!pi->pcie_dpm_key_disabled) { - if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) { - result = amdgpu_ci_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_PCIeDPM_SetEnabledMask, - pi->dpm_level_enable_mask.pcie_dpm_enable_mask); - if (result != PPSMC_Result_OK) - return -EINVAL; - } - } -#endif - - return 0; -} - -static void ci_find_dpm_states_clocks_in_dpm_table(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_state) -{ - struct ci_power_info *pi = ci_get_pi(adev); - struct ci_ps *state = ci_get_ps(amdgpu_state); - struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table; - u32 sclk = state->performance_levels[state->performance_level_count-1].sclk; - struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table; - u32 mclk = state->performance_levels[state->performance_level_count-1].mclk; - u32 i; - - pi->need_update_smu7_dpm_table = 0; - - for (i = 0; i < sclk_table->count; i++) { - if (sclk == sclk_table->dpm_levels[i].value) - break; - } - - if (i >= sclk_table->count) { - pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; - } else { - /* XXX check display min clock requirements */ - if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK) - pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK; - } - - for (i = 0; i < mclk_table->count; i++) { - if (mclk == mclk_table->dpm_levels[i].value) - break; - } - - if (i >= mclk_table->count) - pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; - - if (adev->pm.dpm.current_active_crtc_count != - adev->pm.dpm.new_active_crtc_count) - pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK; -} - -static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_state) -{ - struct ci_power_info *pi = ci_get_pi(adev); - struct ci_ps *state = ci_get_ps(amdgpu_state); - u32 sclk = state->performance_levels[state->performance_level_count-1].sclk; - u32 mclk = state->performance_levels[state->performance_level_count-1].mclk; - struct ci_dpm_table *dpm_table = &pi->dpm_table; - int ret; - - if (!pi->need_update_smu7_dpm_table) - return 0; - - if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) - dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk; - - if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) - dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk; - - if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) { - ret = ci_populate_all_graphic_levels(adev); - if (ret) - return ret; - } - - if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) { - ret = ci_populate_all_memory_levels(adev); - if (ret) - return ret; - } - - return 0; -} - -static int ci_enable_uvd_dpm(struct amdgpu_device *adev, bool enable) -{ - struct ci_power_info *pi = ci_get_pi(adev); - const struct amdgpu_clock_and_voltage_limits *max_limits; - int i; - - if (adev->pm.ac_power) - max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; - else - max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc; - - if (enable) { - pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0; - - for (i = adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) { - if (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { - pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i; - - if (!pi->caps_uvd_dpm) - break; - } - } - - amdgpu_ci_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_UVDDPM_SetEnabledMask, - pi->dpm_level_enable_mask.uvd_dpm_enable_mask); - - if (pi->last_mclk_dpm_enable_mask & 0x1) { - pi->uvd_enabled = true; - pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE; - amdgpu_ci_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_MCLKDPM_SetEnabledMask, - pi->dpm_level_enable_mask.mclk_dpm_enable_mask); - } - } else { - if (pi->uvd_enabled) { - pi->uvd_enabled = false; - pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1; - amdgpu_ci_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_MCLKDPM_SetEnabledMask, - pi->dpm_level_enable_mask.mclk_dpm_enable_mask); - } - } - - return (amdgpu_ci_send_msg_to_smc(adev, enable ? - PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ? - 0 : -EINVAL; -} - -static int ci_enable_vce_dpm(struct amdgpu_device *adev, bool enable) -{ - struct ci_power_info *pi = ci_get_pi(adev); - const struct amdgpu_clock_and_voltage_limits *max_limits; - int i; - - if (adev->pm.ac_power) - max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; - else - max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc; - - if (enable) { - pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0; - for (i = adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) { - if (adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { - pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i; - - if (!pi->caps_vce_dpm) - break; - } - } - - amdgpu_ci_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_VCEDPM_SetEnabledMask, - pi->dpm_level_enable_mask.vce_dpm_enable_mask); - } - - return (amdgpu_ci_send_msg_to_smc(adev, enable ? - PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ? - 0 : -EINVAL; -} - -#if 0 -static int ci_enable_samu_dpm(struct amdgpu_device *adev, bool enable) -{ - struct ci_power_info *pi = ci_get_pi(adev); - const struct amdgpu_clock_and_voltage_limits *max_limits; - int i; - - if (adev->pm.ac_power) - max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; - else - max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc; - - if (enable) { - pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0; - for (i = adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) { - if (adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { - pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i; - - if (!pi->caps_samu_dpm) - break; - } - } - - amdgpu_ci_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SAMUDPM_SetEnabledMask, - pi->dpm_level_enable_mask.samu_dpm_enable_mask); - } - return (amdgpu_ci_send_msg_to_smc(adev, enable ? - PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ? - 0 : -EINVAL; -} - -static int ci_enable_acp_dpm(struct amdgpu_device *adev, bool enable) -{ - struct ci_power_info *pi = ci_get_pi(adev); - const struct amdgpu_clock_and_voltage_limits *max_limits; - int i; - - if (adev->pm.ac_power) - max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; - else - max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc; - - if (enable) { - pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0; - for (i = adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) { - if (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) { - pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i; - - if (!pi->caps_acp_dpm) - break; - } - } - - amdgpu_ci_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_ACPDPM_SetEnabledMask, - pi->dpm_level_enable_mask.acp_dpm_enable_mask); - } - - return (amdgpu_ci_send_msg_to_smc(adev, enable ? - PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ? - 0 : -EINVAL; -} -#endif - -static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate) -{ - struct ci_power_info *pi = ci_get_pi(adev); - u32 tmp; - int ret = 0; - - if (!gate) { - /* turn the clocks on when decoding */ - if (pi->caps_uvd_dpm || - (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0)) - pi->smc_state_table.UvdBootLevel = 0; - else - pi->smc_state_table.UvdBootLevel = - adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; - - tmp = RREG32_SMC(ixDPM_TABLE_475); - tmp &= ~DPM_TABLE_475__UvdBootLevel_MASK; - tmp |= (pi->smc_state_table.UvdBootLevel << DPM_TABLE_475__UvdBootLevel__SHIFT); - WREG32_SMC(ixDPM_TABLE_475, tmp); - ret = ci_enable_uvd_dpm(adev, true); - } else { - ret = ci_enable_uvd_dpm(adev, false); - if (ret) - return ret; - } - - return ret; -} - -static u8 ci_get_vce_boot_level(struct amdgpu_device *adev) -{ - u8 i; - u32 min_evclk = 30000; /* ??? */ - struct amdgpu_vce_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; - - for (i = 0; i < table->count; i++) { - if (table->entries[i].evclk >= min_evclk) - return i; - } - - return table->count - 1; -} - -static int ci_update_vce_dpm(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_new_state, - struct amdgpu_ps *amdgpu_current_state) -{ - struct ci_power_info *pi = ci_get_pi(adev); - int ret = 0; - u32 tmp; - - if (amdgpu_current_state->evclk != amdgpu_new_state->evclk) { - if (amdgpu_new_state->evclk) { - pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(adev); - tmp = RREG32_SMC(ixDPM_TABLE_475); - tmp &= ~DPM_TABLE_475__VceBootLevel_MASK; - tmp |= (pi->smc_state_table.VceBootLevel << DPM_TABLE_475__VceBootLevel__SHIFT); - WREG32_SMC(ixDPM_TABLE_475, tmp); - - ret = ci_enable_vce_dpm(adev, true); - } else { - ret = ci_enable_vce_dpm(adev, false); - if (ret) - return ret; - } - } - return ret; -} - -#if 0 -static int ci_update_samu_dpm(struct amdgpu_device *adev, bool gate) -{ - return ci_enable_samu_dpm(adev, gate); -} - -static int ci_update_acp_dpm(struct amdgpu_device *adev, bool gate) -{ - struct ci_power_info *pi = ci_get_pi(adev); - u32 tmp; - - if (!gate) { - pi->smc_state_table.AcpBootLevel = 0; - - tmp = RREG32_SMC(ixDPM_TABLE_475); - tmp &= ~AcpBootLevel_MASK; - tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel); - WREG32_SMC(ixDPM_TABLE_475, tmp); - } - - return ci_enable_acp_dpm(adev, !gate); -} -#endif - -static int ci_generate_dpm_level_enable_mask(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_state) -{ - struct ci_power_info *pi = ci_get_pi(adev); - int ret; - - ret = ci_trim_dpm_states(adev, amdgpu_state); - if (ret) - return ret; - - pi->dpm_level_enable_mask.sclk_dpm_enable_mask = - ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table); - pi->dpm_level_enable_mask.mclk_dpm_enable_mask = - ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table); - pi->last_mclk_dpm_enable_mask = - pi->dpm_level_enable_mask.mclk_dpm_enable_mask; - if (pi->uvd_enabled) { - if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1) - pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE; - } - pi->dpm_level_enable_mask.pcie_dpm_enable_mask = - ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table); - - return 0; -} - -static u32 ci_get_lowest_enabled_level(struct amdgpu_device *adev, - u32 level_mask) -{ - u32 level = 0; - - while ((level_mask & (1 << level)) == 0) - level++; - - return level; -} - - -static int ci_dpm_force_performance_level(void *handle, - enum amd_dpm_forced_level level) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct ci_power_info *pi = ci_get_pi(adev); - u32 tmp, levels, i; - int ret; - - if (level == AMD_DPM_FORCED_LEVEL_HIGH) { - if ((!pi->pcie_dpm_key_disabled) && - pi->dpm_level_enable_mask.pcie_dpm_enable_mask) { - levels = 0; - tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask; - while (tmp >>= 1) - levels++; - if (levels) { - ret = ci_dpm_force_state_pcie(adev, level); - if (ret) - return ret; - for (i = 0; i < adev->usec_timeout; i++) { - tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) & - TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >> - TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT; - if (tmp == levels) - break; - udelay(1); - } - } - } - if ((!pi->sclk_dpm_key_disabled) && - pi->dpm_level_enable_mask.sclk_dpm_enable_mask) { - levels = 0; - tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask; - while (tmp >>= 1) - levels++; - if (levels) { - ret = ci_dpm_force_state_sclk(adev, levels); - if (ret) - return ret; - for (i = 0; i < adev->usec_timeout; i++) { - tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & - TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> - TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; - if (tmp == levels) - break; - udelay(1); - } - } - } - if ((!pi->mclk_dpm_key_disabled) && - pi->dpm_level_enable_mask.mclk_dpm_enable_mask) { - levels = 0; - tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask; - while (tmp >>= 1) - levels++; - if (levels) { - ret = ci_dpm_force_state_mclk(adev, levels); - if (ret) - return ret; - for (i = 0; i < adev->usec_timeout; i++) { - tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & - TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >> - TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT; - if (tmp == levels) - break; - udelay(1); - } - } - } - } else if (level == AMD_DPM_FORCED_LEVEL_LOW) { - if ((!pi->sclk_dpm_key_disabled) && - pi->dpm_level_enable_mask.sclk_dpm_enable_mask) { - levels = ci_get_lowest_enabled_level(adev, - pi->dpm_level_enable_mask.sclk_dpm_enable_mask); - ret = ci_dpm_force_state_sclk(adev, levels); - if (ret) - return ret; - for (i = 0; i < adev->usec_timeout; i++) { - tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & - TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> - TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; - if (tmp == levels) - break; - udelay(1); - } - } - if ((!pi->mclk_dpm_key_disabled) && - pi->dpm_level_enable_mask.mclk_dpm_enable_mask) { - levels = ci_get_lowest_enabled_level(adev, - pi->dpm_level_enable_mask.mclk_dpm_enable_mask); - ret = ci_dpm_force_state_mclk(adev, levels); - if (ret) - return ret; - for (i = 0; i < adev->usec_timeout; i++) { - tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & - TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >> - TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT; - if (tmp == levels) - break; - udelay(1); - } - } - if ((!pi->pcie_dpm_key_disabled) && - pi->dpm_level_enable_mask.pcie_dpm_enable_mask) { - levels = ci_get_lowest_enabled_level(adev, - pi->dpm_level_enable_mask.pcie_dpm_enable_mask); - ret = ci_dpm_force_state_pcie(adev, levels); - if (ret) - return ret; - for (i = 0; i < adev->usec_timeout; i++) { - tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) & - TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >> - TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT; - if (tmp == levels) - break; - udelay(1); - } - } - } else if (level == AMD_DPM_FORCED_LEVEL_AUTO) { - if (!pi->pcie_dpm_key_disabled) { - PPSMC_Result smc_result; - - smc_result = amdgpu_ci_send_msg_to_smc(adev, - PPSMC_MSG_PCIeDPM_UnForceLevel); - if (smc_result != PPSMC_Result_OK) - return -EINVAL; - } - ret = ci_upload_dpm_level_enable_mask(adev); - if (ret) - return ret; - } - - adev->pm.dpm.forced_level = level; - - return 0; -} - -static int ci_set_mc_special_registers(struct amdgpu_device *adev, - struct ci_mc_reg_table *table) -{ - u8 i, j, k; - u32 temp_reg; - - for (i = 0, j = table->last; i < table->last; i++) { - if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) - return -EINVAL; - switch(table->mc_reg_address[i].s1) { - case mmMC_SEQ_MISC1: - temp_reg = RREG32(mmMC_PMG_CMD_EMRS); - table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS; - table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP; - for (k = 0; k < table->num_entries; k++) { - table->mc_reg_table_entry[k].mc_data[j] = - ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); - } - j++; - - if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) - return -EINVAL; - temp_reg = RREG32(mmMC_PMG_CMD_MRS); - table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS; - table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP; - for (k = 0; k < table->num_entries; k++) { - table->mc_reg_table_entry[k].mc_data[j] = - (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); - if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) - table->mc_reg_table_entry[k].mc_data[j] |= 0x100; - } - j++; - - if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) { - if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) - return -EINVAL; - table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD; - table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD; - for (k = 0; k < table->num_entries; k++) { - table->mc_reg_table_entry[k].mc_data[j] = - (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; - } - j++; - } - break; - case mmMC_SEQ_RESERVE_M: - temp_reg = RREG32(mmMC_PMG_CMD_MRS1); - table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1; - table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP; - for (k = 0; k < table->num_entries; k++) { - table->mc_reg_table_entry[k].mc_data[j] = - (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); - } - j++; - break; - default: - break; - } - - } - - table->last = j; - - return 0; -} - -static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg) -{ - bool result = true; - - switch(in_reg) { - case mmMC_SEQ_RAS_TIMING: - *out_reg = mmMC_SEQ_RAS_TIMING_LP; - break; - case mmMC_SEQ_DLL_STBY: - *out_reg = mmMC_SEQ_DLL_STBY_LP; - break; - case mmMC_SEQ_G5PDX_CMD0: - *out_reg = mmMC_SEQ_G5PDX_CMD0_LP; - break; - case mmMC_SEQ_G5PDX_CMD1: - *out_reg = mmMC_SEQ_G5PDX_CMD1_LP; - break; - case mmMC_SEQ_G5PDX_CTRL: - *out_reg = mmMC_SEQ_G5PDX_CTRL_LP; - break; - case mmMC_SEQ_CAS_TIMING: - *out_reg = mmMC_SEQ_CAS_TIMING_LP; - break; - case mmMC_SEQ_MISC_TIMING: - *out_reg = mmMC_SEQ_MISC_TIMING_LP; - break; - case mmMC_SEQ_MISC_TIMING2: - *out_reg = mmMC_SEQ_MISC_TIMING2_LP; - break; - case mmMC_SEQ_PMG_DVS_CMD: - *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP; - break; - case mmMC_SEQ_PMG_DVS_CTL: - *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP; - break; - case mmMC_SEQ_RD_CTL_D0: - *out_reg = mmMC_SEQ_RD_CTL_D0_LP; - break; - case mmMC_SEQ_RD_CTL_D1: - *out_reg = mmMC_SEQ_RD_CTL_D1_LP; - break; - case mmMC_SEQ_WR_CTL_D0: - *out_reg = mmMC_SEQ_WR_CTL_D0_LP; - break; - case mmMC_SEQ_WR_CTL_D1: - *out_reg = mmMC_SEQ_WR_CTL_D1_LP; - break; - case mmMC_PMG_CMD_EMRS: - *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP; - break; - case mmMC_PMG_CMD_MRS: - *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP; - break; - case mmMC_PMG_CMD_MRS1: - *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP; - break; - case mmMC_SEQ_PMG_TIMING: - *out_reg = mmMC_SEQ_PMG_TIMING_LP; - break; - case mmMC_PMG_CMD_MRS2: - *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP; - break; - case mmMC_SEQ_WR_CTL_2: - *out_reg = mmMC_SEQ_WR_CTL_2_LP; - break; - default: - result = false; - break; - } - - return result; -} - -static void ci_set_valid_flag(struct ci_mc_reg_table *table) -{ - u8 i, j; - - for (i = 0; i < table->last; i++) { - for (j = 1; j < table->num_entries; j++) { - if (table->mc_reg_table_entry[j-1].mc_data[i] != - table->mc_reg_table_entry[j].mc_data[i]) { - table->valid_flag |= 1 << i; - break; - } - } - } -} - -static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table) -{ - u32 i; - u16 address; - - for (i = 0; i < table->last; i++) { - table->mc_reg_address[i].s0 = - ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ? - address : table->mc_reg_address[i].s1; - } -} - -static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table, - struct ci_mc_reg_table *ci_table) -{ - u8 i, j; - - if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) - return -EINVAL; - if (table->num_entries > MAX_AC_TIMING_ENTRIES) - return -EINVAL; - - for (i = 0; i < table->last; i++) - ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; - - ci_table->last = table->last; - - for (i = 0; i < table->num_entries; i++) { - ci_table->mc_reg_table_entry[i].mclk_max = - table->mc_reg_table_entry[i].mclk_max; - for (j = 0; j < table->last; j++) - ci_table->mc_reg_table_entry[i].mc_data[j] = - table->mc_reg_table_entry[i].mc_data[j]; - } - ci_table->num_entries = table->num_entries; - - return 0; -} - -static int ci_register_patching_mc_seq(struct amdgpu_device *adev, - struct ci_mc_reg_table *table) -{ - u8 i, k; - u32 tmp; - bool patch; - - tmp = RREG32(mmMC_SEQ_MISC0); - patch = ((tmp & 0x0000f00) == 0x300) ? true : false; - - if (patch && - ((adev->pdev->device == 0x67B0) || - (adev->pdev->device == 0x67B1))) { - for (i = 0; i < table->last; i++) { - if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) - return -EINVAL; - switch (table->mc_reg_address[i].s1) { - case mmMC_SEQ_MISC1: - for (k = 0; k < table->num_entries; k++) { - if ((table->mc_reg_table_entry[k].mclk_max == 125000) || - (table->mc_reg_table_entry[k].mclk_max == 137500)) - table->mc_reg_table_entry[k].mc_data[i] = - (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) | - 0x00000007; - } - break; - case mmMC_SEQ_WR_CTL_D0: - for (k = 0; k < table->num_entries; k++) { - if ((table->mc_reg_table_entry[k].mclk_max == 125000) || - (table->mc_reg_table_entry[k].mclk_max == 137500)) - table->mc_reg_table_entry[k].mc_data[i] = - (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) | - 0x0000D0DD; - } - break; - case mmMC_SEQ_WR_CTL_D1: - for (k = 0; k < table->num_entries; k++) { - if ((table->mc_reg_table_entry[k].mclk_max == 125000) || - (table->mc_reg_table_entry[k].mclk_max == 137500)) - table->mc_reg_table_entry[k].mc_data[i] = - (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) | - 0x0000D0DD; - } - break; - case mmMC_SEQ_WR_CTL_2: - for (k = 0; k < table->num_entries; k++) { - if ((table->mc_reg_table_entry[k].mclk_max == 125000) || - (table->mc_reg_table_entry[k].mclk_max == 137500)) - table->mc_reg_table_entry[k].mc_data[i] = 0; - } - break; - case mmMC_SEQ_CAS_TIMING: - for (k = 0; k < table->num_entries; k++) { - if (table->mc_reg_table_entry[k].mclk_max == 125000) - table->mc_reg_table_entry[k].mc_data[i] = - (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) | - 0x000C0140; - else if (table->mc_reg_table_entry[k].mclk_max == 137500) - table->mc_reg_table_entry[k].mc_data[i] = - (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) | - 0x000C0150; - } - break; - case mmMC_SEQ_MISC_TIMING: - for (k = 0; k < table->num_entries; k++) { - if (table->mc_reg_table_entry[k].mclk_max == 125000) - table->mc_reg_table_entry[k].mc_data[i] = - (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) | - 0x00000030; - else if (table->mc_reg_table_entry[k].mclk_max == 137500) - table->mc_reg_table_entry[k].mc_data[i] = - (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) | - 0x00000035; - } - break; - default: - break; - } - } - - WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3); - tmp = RREG32(mmMC_SEQ_IO_DEBUG_DATA); - tmp = (tmp & 0xFFF8FFFF) | (1 << 16); - WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3); - WREG32(mmMC_SEQ_IO_DEBUG_DATA, tmp); - } - - return 0; -} - -static int ci_initialize_mc_reg_table(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - struct atom_mc_reg_table *table; - struct ci_mc_reg_table *ci_table = &pi->mc_reg_table; - u8 module_index = ci_get_memory_module_index(adev); - int ret; - - table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL); - if (!table) - return -ENOMEM; - - WREG32(mmMC_SEQ_RAS_TIMING_LP, RREG32(mmMC_SEQ_RAS_TIMING)); - WREG32(mmMC_SEQ_CAS_TIMING_LP, RREG32(mmMC_SEQ_CAS_TIMING)); - WREG32(mmMC_SEQ_DLL_STBY_LP, RREG32(mmMC_SEQ_DLL_STBY)); - WREG32(mmMC_SEQ_G5PDX_CMD0_LP, RREG32(mmMC_SEQ_G5PDX_CMD0)); - WREG32(mmMC_SEQ_G5PDX_CMD1_LP, RREG32(mmMC_SEQ_G5PDX_CMD1)); - WREG32(mmMC_SEQ_G5PDX_CTRL_LP, RREG32(mmMC_SEQ_G5PDX_CTRL)); - WREG32(mmMC_SEQ_PMG_DVS_CMD_LP, RREG32(mmMC_SEQ_PMG_DVS_CMD)); - WREG32(mmMC_SEQ_PMG_DVS_CTL_LP, RREG32(mmMC_SEQ_PMG_DVS_CTL)); - WREG32(mmMC_SEQ_MISC_TIMING_LP, RREG32(mmMC_SEQ_MISC_TIMING)); - WREG32(mmMC_SEQ_MISC_TIMING2_LP, RREG32(mmMC_SEQ_MISC_TIMING2)); - WREG32(mmMC_SEQ_PMG_CMD_EMRS_LP, RREG32(mmMC_PMG_CMD_EMRS)); - WREG32(mmMC_SEQ_PMG_CMD_MRS_LP, RREG32(mmMC_PMG_CMD_MRS)); - WREG32(mmMC_SEQ_PMG_CMD_MRS1_LP, RREG32(mmMC_PMG_CMD_MRS1)); - WREG32(mmMC_SEQ_WR_CTL_D0_LP, RREG32(mmMC_SEQ_WR_CTL_D0)); - WREG32(mmMC_SEQ_WR_CTL_D1_LP, RREG32(mmMC_SEQ_WR_CTL_D1)); - WREG32(mmMC_SEQ_RD_CTL_D0_LP, RREG32(mmMC_SEQ_RD_CTL_D0)); - WREG32(mmMC_SEQ_RD_CTL_D1_LP, RREG32(mmMC_SEQ_RD_CTL_D1)); - WREG32(mmMC_SEQ_PMG_TIMING_LP, RREG32(mmMC_SEQ_PMG_TIMING)); - WREG32(mmMC_SEQ_PMG_CMD_MRS2_LP, RREG32(mmMC_PMG_CMD_MRS2)); - WREG32(mmMC_SEQ_WR_CTL_2_LP, RREG32(mmMC_SEQ_WR_CTL_2)); - - ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table); - if (ret) - goto init_mc_done; - - ret = ci_copy_vbios_mc_reg_table(table, ci_table); - if (ret) - goto init_mc_done; - - ci_set_s0_mc_reg_index(ci_table); - - ret = ci_register_patching_mc_seq(adev, ci_table); - if (ret) - goto init_mc_done; - - ret = ci_set_mc_special_registers(adev, ci_table); - if (ret) - goto init_mc_done; - - ci_set_valid_flag(ci_table); - -init_mc_done: - kfree(table); - - return ret; -} - -static int ci_populate_mc_reg_addresses(struct amdgpu_device *adev, - SMU7_Discrete_MCRegisters *mc_reg_table) -{ - struct ci_power_info *pi = ci_get_pi(adev); - u32 i, j; - - for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) { - if (pi->mc_reg_table.valid_flag & (1 << j)) { - if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) - return -EINVAL; - mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0); - mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1); - i++; - } - } - - mc_reg_table->last = (u8)i; - - return 0; -} - -static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry, - SMU7_Discrete_MCRegisterSet *data, - u32 num_entries, u32 valid_flag) -{ - u32 i, j; - - for (i = 0, j = 0; j < num_entries; j++) { - if (valid_flag & (1 << j)) { - data->value[i] = cpu_to_be32(entry->mc_data[j]); - i++; - } - } -} - -static void ci_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev, - const u32 memory_clock, - SMU7_Discrete_MCRegisterSet *mc_reg_table_data) -{ - struct ci_power_info *pi = ci_get_pi(adev); - u32 i = 0; - - for(i = 0; i < pi->mc_reg_table.num_entries; i++) { - if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max) - break; - } - - if ((i == pi->mc_reg_table.num_entries) && (i > 0)) - --i; - - ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i], - mc_reg_table_data, pi->mc_reg_table.last, - pi->mc_reg_table.valid_flag); -} - -static void ci_convert_mc_reg_table_to_smc(struct amdgpu_device *adev, - SMU7_Discrete_MCRegisters *mc_reg_table) -{ - struct ci_power_info *pi = ci_get_pi(adev); - u32 i; - - for (i = 0; i < pi->dpm_table.mclk_table.count; i++) - ci_convert_mc_reg_table_entry_to_smc(adev, - pi->dpm_table.mclk_table.dpm_levels[i].value, - &mc_reg_table->data[i]); -} - -static int ci_populate_initial_mc_reg_table(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - int ret; - - memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters)); - - ret = ci_populate_mc_reg_addresses(adev, &pi->smc_mc_reg_table); - if (ret) - return ret; - ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table); - - return amdgpu_ci_copy_bytes_to_smc(adev, - pi->mc_reg_table_start, - (u8 *)&pi->smc_mc_reg_table, - sizeof(SMU7_Discrete_MCRegisters), - pi->sram_end); -} - -static int ci_update_and_upload_mc_reg_table(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - - if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) - return 0; - - memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters)); - - ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table); - - return amdgpu_ci_copy_bytes_to_smc(adev, - pi->mc_reg_table_start + - offsetof(SMU7_Discrete_MCRegisters, data[0]), - (u8 *)&pi->smc_mc_reg_table.data[0], - sizeof(SMU7_Discrete_MCRegisterSet) * - pi->dpm_table.mclk_table.count, - pi->sram_end); -} - -static void ci_enable_voltage_control(struct amdgpu_device *adev) -{ - u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT); - - tmp |= GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK; - WREG32_SMC(ixGENERAL_PWRMGT, tmp); -} - -static enum amdgpu_pcie_gen ci_get_maximum_link_speed(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_state) -{ - struct ci_ps *state = ci_get_ps(amdgpu_state); - int i; - u16 pcie_speed, max_speed = 0; - - for (i = 0; i < state->performance_level_count; i++) { - pcie_speed = state->performance_levels[i].pcie_gen; - if (max_speed < pcie_speed) - max_speed = pcie_speed; - } - - return max_speed; -} - -static u16 ci_get_current_pcie_speed(struct amdgpu_device *adev) -{ - u32 speed_cntl = 0; - - speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL) & - PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK; - speed_cntl >>= PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; - - return (u16)speed_cntl; -} - -static int ci_get_current_pcie_lane_number(struct amdgpu_device *adev) -{ - u32 link_width = 0; - - link_width = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL) & - PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK; - link_width >>= PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; - - switch (link_width) { - case 1: - return 1; - case 2: - return 2; - case 3: - return 4; - case 4: - return 8; - case 0: - case 6: - default: - return 16; - } -} - -static void ci_request_link_speed_change_before_state_change(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_new_state, - struct amdgpu_ps *amdgpu_current_state) -{ - struct ci_power_info *pi = ci_get_pi(adev); - enum amdgpu_pcie_gen target_link_speed = - ci_get_maximum_link_speed(adev, amdgpu_new_state); - enum amdgpu_pcie_gen current_link_speed; - - if (pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID) - current_link_speed = ci_get_maximum_link_speed(adev, amdgpu_current_state); - else - current_link_speed = pi->force_pcie_gen; - - pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID; - pi->pspp_notify_required = false; - if (target_link_speed > current_link_speed) { - switch (target_link_speed) { -#ifdef CONFIG_ACPI - case AMDGPU_PCIE_GEN3: - if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0) - break; - pi->force_pcie_gen = AMDGPU_PCIE_GEN2; - if (current_link_speed == AMDGPU_PCIE_GEN2) - break; - case AMDGPU_PCIE_GEN2: - if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0) - break; -#endif - default: - pi->force_pcie_gen = ci_get_current_pcie_speed(adev); - break; - } - } else { - if (target_link_speed < current_link_speed) - pi->pspp_notify_required = true; - } -} - -static void ci_notify_link_speed_change_after_state_change(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_new_state, - struct amdgpu_ps *amdgpu_current_state) -{ - struct ci_power_info *pi = ci_get_pi(adev); - enum amdgpu_pcie_gen target_link_speed = - ci_get_maximum_link_speed(adev, amdgpu_new_state); - u8 request; - - if (pi->pspp_notify_required) { - if (target_link_speed == AMDGPU_PCIE_GEN3) - request = PCIE_PERF_REQ_PECI_GEN3; - else if (target_link_speed == AMDGPU_PCIE_GEN2) - request = PCIE_PERF_REQ_PECI_GEN2; - else - request = PCIE_PERF_REQ_PECI_GEN1; - - if ((request == PCIE_PERF_REQ_PECI_GEN1) && - (ci_get_current_pcie_speed(adev) > 0)) - return; - -#ifdef CONFIG_ACPI - amdgpu_acpi_pcie_performance_request(adev, request, false); -#endif - } -} - -static int ci_set_private_data_variables_based_on_pptable(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table = - &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; - struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddc_table = - &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk; - struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddci_table = - &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk; - - if (allowed_sclk_vddc_table == NULL) - return -EINVAL; - if (allowed_sclk_vddc_table->count < 1) - return -EINVAL; - if (allowed_mclk_vddc_table == NULL) - return -EINVAL; - if (allowed_mclk_vddc_table->count < 1) - return -EINVAL; - if (allowed_mclk_vddci_table == NULL) - return -EINVAL; - if (allowed_mclk_vddci_table->count < 1) - return -EINVAL; - - pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v; - pi->max_vddc_in_pp_table = - allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; - - pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v; - pi->max_vddci_in_pp_table = - allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v; - - adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = - allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk; - adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = - allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk; - adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = - allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; - adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = - allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v; - - return 0; -} - -static void ci_patch_with_vddc_leakage(struct amdgpu_device *adev, u16 *vddc) -{ - struct ci_power_info *pi = ci_get_pi(adev); - struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage; - u32 leakage_index; - - for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) { - if (leakage_table->leakage_id[leakage_index] == *vddc) { - *vddc = leakage_table->actual_voltage[leakage_index]; - break; - } - } -} - -static void ci_patch_with_vddci_leakage(struct amdgpu_device *adev, u16 *vddci) -{ - struct ci_power_info *pi = ci_get_pi(adev); - struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage; - u32 leakage_index; - - for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) { - if (leakage_table->leakage_id[leakage_index] == *vddci) { - *vddci = leakage_table->actual_voltage[leakage_index]; - break; - } - } -} - -static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev, - struct amdgpu_clock_voltage_dependency_table *table) -{ - u32 i; - - if (table) { - for (i = 0; i < table->count; i++) - ci_patch_with_vddc_leakage(adev, &table->entries[i].v); - } -} - -static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct amdgpu_device *adev, - struct amdgpu_clock_voltage_dependency_table *table) -{ - u32 i; - - if (table) { - for (i = 0; i < table->count; i++) - ci_patch_with_vddci_leakage(adev, &table->entries[i].v); - } -} - -static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev, - struct amdgpu_vce_clock_voltage_dependency_table *table) -{ - u32 i; - - if (table) { - for (i = 0; i < table->count; i++) - ci_patch_with_vddc_leakage(adev, &table->entries[i].v); - } -} - -static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev, - struct amdgpu_uvd_clock_voltage_dependency_table *table) -{ - u32 i; - - if (table) { - for (i = 0; i < table->count; i++) - ci_patch_with_vddc_leakage(adev, &table->entries[i].v); - } -} - -static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct amdgpu_device *adev, - struct amdgpu_phase_shedding_limits_table *table) -{ - u32 i; - - if (table) { - for (i = 0; i < table->count; i++) - ci_patch_with_vddc_leakage(adev, &table->entries[i].voltage); - } -} - -static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct amdgpu_device *adev, - struct amdgpu_clock_and_voltage_limits *table) -{ - if (table) { - ci_patch_with_vddc_leakage(adev, (u16 *)&table->vddc); - ci_patch_with_vddci_leakage(adev, (u16 *)&table->vddci); - } -} - -static void ci_patch_cac_leakage_table_with_vddc_leakage(struct amdgpu_device *adev, - struct amdgpu_cac_leakage_table *table) -{ - u32 i; - - if (table) { - for (i = 0; i < table->count; i++) - ci_patch_with_vddc_leakage(adev, &table->entries[i].vddc); - } -} - -static void ci_patch_dependency_tables_with_leakage(struct amdgpu_device *adev) -{ - - ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev, - &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk); - ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev, - &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk); - ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev, - &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk); - ci_patch_clock_voltage_dependency_table_with_vddci_leakage(adev, - &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk); - ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(adev, - &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table); - ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(adev, - &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table); - ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev, - &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table); - ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev, - &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table); - ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(adev, - &adev->pm.dpm.dyn_state.phase_shedding_limits_table); - ci_patch_clock_voltage_limits_with_vddc_leakage(adev, - &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac); - ci_patch_clock_voltage_limits_with_vddc_leakage(adev, - &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc); - ci_patch_cac_leakage_table_with_vddc_leakage(adev, - &adev->pm.dpm.dyn_state.cac_leakage_table); - -} - -static void ci_update_current_ps(struct amdgpu_device *adev, - struct amdgpu_ps *rps) -{ - struct ci_ps *new_ps = ci_get_ps(rps); - struct ci_power_info *pi = ci_get_pi(adev); - - pi->current_rps = *rps; - pi->current_ps = *new_ps; - pi->current_rps.ps_priv = &pi->current_ps; - adev->pm.dpm.current_ps = &pi->current_rps; -} - -static void ci_update_requested_ps(struct amdgpu_device *adev, - struct amdgpu_ps *rps) -{ - struct ci_ps *new_ps = ci_get_ps(rps); - struct ci_power_info *pi = ci_get_pi(adev); - - pi->requested_rps = *rps; - pi->requested_ps = *new_ps; - pi->requested_rps.ps_priv = &pi->requested_ps; - adev->pm.dpm.requested_ps = &pi->requested_rps; -} - -static int ci_dpm_pre_set_power_state(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct ci_power_info *pi = ci_get_pi(adev); - struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps; - struct amdgpu_ps *new_ps = &requested_ps; - - ci_update_requested_ps(adev, new_ps); - - ci_apply_state_adjust_rules(adev, &pi->requested_rps); - - return 0; -} - -static void ci_dpm_post_set_power_state(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct ci_power_info *pi = ci_get_pi(adev); - struct amdgpu_ps *new_ps = &pi->requested_rps; - - ci_update_current_ps(adev, new_ps); -} - - -static void ci_dpm_setup_asic(struct amdgpu_device *adev) -{ - ci_read_clock_registers(adev); - ci_enable_acpi_power_management(adev); - ci_init_sclk_t(adev); -} - -static int ci_dpm_enable(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps; - int ret; - - if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) { - ci_enable_voltage_control(adev); - ret = ci_construct_voltage_tables(adev); - if (ret) { - DRM_ERROR("ci_construct_voltage_tables failed\n"); - return ret; - } - } - if (pi->caps_dynamic_ac_timing) { - ret = ci_initialize_mc_reg_table(adev); - if (ret) - pi->caps_dynamic_ac_timing = false; - } - if (pi->dynamic_ss) - ci_enable_spread_spectrum(adev, true); - if (pi->thermal_protection) - ci_enable_thermal_protection(adev, true); - ci_program_sstp(adev); - ci_enable_display_gap(adev); - ci_program_vc(adev); - ret = ci_upload_firmware(adev); - if (ret) { - DRM_ERROR("ci_upload_firmware failed\n"); - return ret; - } - ret = ci_process_firmware_header(adev); - if (ret) { - DRM_ERROR("ci_process_firmware_header failed\n"); - return ret; - } - ret = ci_initial_switch_from_arb_f0_to_f1(adev); - if (ret) { - DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n"); - return ret; - } - ret = ci_init_smc_table(adev); - if (ret) { - DRM_ERROR("ci_init_smc_table failed\n"); - return ret; - } - ret = ci_init_arb_table_index(adev); - if (ret) { - DRM_ERROR("ci_init_arb_table_index failed\n"); - return ret; - } - if (pi->caps_dynamic_ac_timing) { - ret = ci_populate_initial_mc_reg_table(adev); - if (ret) { - DRM_ERROR("ci_populate_initial_mc_reg_table failed\n"); - return ret; - } - } - ret = ci_populate_pm_base(adev); - if (ret) { - DRM_ERROR("ci_populate_pm_base failed\n"); - return ret; - } - ci_dpm_start_smc(adev); - ci_enable_vr_hot_gpio_interrupt(adev); - ret = ci_notify_smc_display_change(adev, false); - if (ret) { - DRM_ERROR("ci_notify_smc_display_change failed\n"); - return ret; - } - ci_enable_sclk_control(adev, true); - ret = ci_enable_ulv(adev, true); - if (ret) { - DRM_ERROR("ci_enable_ulv failed\n"); - return ret; - } - ret = ci_enable_ds_master_switch(adev, true); - if (ret) { - DRM_ERROR("ci_enable_ds_master_switch failed\n"); - return ret; - } - ret = ci_start_dpm(adev); - if (ret) { - DRM_ERROR("ci_start_dpm failed\n"); - return ret; - } - ret = ci_enable_didt(adev, true); - if (ret) { - DRM_ERROR("ci_enable_didt failed\n"); - return ret; - } - ret = ci_enable_smc_cac(adev, true); - if (ret) { - DRM_ERROR("ci_enable_smc_cac failed\n"); - return ret; - } - ret = ci_enable_power_containment(adev, true); - if (ret) { - DRM_ERROR("ci_enable_power_containment failed\n"); - return ret; - } - - ret = ci_power_control_set_level(adev); - if (ret) { - DRM_ERROR("ci_power_control_set_level failed\n"); - return ret; - } - - ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true); - - ret = ci_enable_thermal_based_sclk_dpm(adev, true); - if (ret) { - DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n"); - return ret; - } - - ci_thermal_start_thermal_controller(adev); - - ci_update_current_ps(adev, boot_ps); - - return 0; -} - -static void ci_dpm_disable(struct amdgpu_device *adev) -{ - struct ci_power_info *pi = ci_get_pi(adev); - struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps; - - amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, - AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); - amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, - AMDGPU_THERMAL_IRQ_HIGH_TO_LOW); - - ci_dpm_powergate_uvd(adev, true); - - if (!amdgpu_ci_is_smc_running(adev)) - return; - - ci_thermal_stop_thermal_controller(adev); - - if (pi->thermal_protection) - ci_enable_thermal_protection(adev, false); - ci_enable_power_containment(adev, false); - ci_enable_smc_cac(adev, false); - ci_enable_didt(adev, false); - ci_enable_spread_spectrum(adev, false); - ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false); - ci_stop_dpm(adev); - ci_enable_ds_master_switch(adev, false); - ci_enable_ulv(adev, false); - ci_clear_vc(adev); - ci_reset_to_default(adev); - ci_dpm_stop_smc(adev); - ci_force_switch_to_arb_f0(adev); - ci_enable_thermal_based_sclk_dpm(adev, false); - - ci_update_current_ps(adev, boot_ps); -} - -static int ci_dpm_set_power_state(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct ci_power_info *pi = ci_get_pi(adev); - struct amdgpu_ps *new_ps = &pi->requested_rps; - struct amdgpu_ps *old_ps = &pi->current_rps; - int ret; - - ci_find_dpm_states_clocks_in_dpm_table(adev, new_ps); - if (pi->pcie_performance_request) - ci_request_link_speed_change_before_state_change(adev, new_ps, old_ps); - ret = ci_freeze_sclk_mclk_dpm(adev); - if (ret) { - DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n"); - return ret; - } - ret = ci_populate_and_upload_sclk_mclk_dpm_levels(adev, new_ps); - if (ret) { - DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n"); - return ret; - } - ret = ci_generate_dpm_level_enable_mask(adev, new_ps); - if (ret) { - DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n"); - return ret; - } - - ret = ci_update_vce_dpm(adev, new_ps, old_ps); - if (ret) { - DRM_ERROR("ci_update_vce_dpm failed\n"); - return ret; - } - - ret = ci_update_sclk_t(adev); - if (ret) { - DRM_ERROR("ci_update_sclk_t failed\n"); - return ret; - } - if (pi->caps_dynamic_ac_timing) { - ret = ci_update_and_upload_mc_reg_table(adev); - if (ret) { - DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n"); - return ret; - } - } - ret = ci_program_memory_timing_parameters(adev); - if (ret) { - DRM_ERROR("ci_program_memory_timing_parameters failed\n"); - return ret; - } - ret = ci_unfreeze_sclk_mclk_dpm(adev); - if (ret) { - DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n"); - return ret; - } - ret = ci_upload_dpm_level_enable_mask(adev); - if (ret) { - DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n"); - return ret; - } - if (pi->pcie_performance_request) - ci_notify_link_speed_change_after_state_change(adev, new_ps, old_ps); - - return 0; -} - -#if 0 -static void ci_dpm_reset_asic(struct amdgpu_device *adev) -{ - ci_set_boot_state(adev); -} -#endif - -static void ci_dpm_display_configuration_changed(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - ci_program_display_gap(adev); -} - -union power_info { - struct _ATOM_POWERPLAY_INFO info; - struct _ATOM_POWERPLAY_INFO_V2 info_2; - struct _ATOM_POWERPLAY_INFO_V3 info_3; - struct _ATOM_PPLIB_POWERPLAYTABLE pplib; - struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; - struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; -}; - -union pplib_clock_info { - struct _ATOM_PPLIB_R600_CLOCK_INFO r600; - struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; - struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; - struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; - struct _ATOM_PPLIB_SI_CLOCK_INFO si; - struct _ATOM_PPLIB_CI_CLOCK_INFO ci; -}; - -union pplib_power_state { - struct _ATOM_PPLIB_STATE v1; - struct _ATOM_PPLIB_STATE_V2 v2; -}; - -static void ci_parse_pplib_non_clock_info(struct amdgpu_device *adev, - struct amdgpu_ps *rps, - struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, - u8 table_rev) -{ - rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); - rps->class = le16_to_cpu(non_clock_info->usClassification); - rps->class2 = le16_to_cpu(non_clock_info->usClassification2); - - if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { - rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); - rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); - } else { - rps->vclk = 0; - rps->dclk = 0; - } - - if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) - adev->pm.dpm.boot_ps = rps; - if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) - adev->pm.dpm.uvd_ps = rps; -} - -static void ci_parse_pplib_clock_info(struct amdgpu_device *adev, - struct amdgpu_ps *rps, int index, - union pplib_clock_info *clock_info) -{ - struct ci_power_info *pi = ci_get_pi(adev); - struct ci_ps *ps = ci_get_ps(rps); - struct ci_pl *pl = &ps->performance_levels[index]; - - ps->performance_level_count = index + 1; - - pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow); - pl->sclk |= clock_info->ci.ucEngineClockHigh << 16; - pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow); - pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16; - - pl->pcie_gen = amdgpu_get_pcie_gen_support(adev, - pi->sys_pcie_mask, - pi->vbios_boot_state.pcie_gen_bootup_value, - clock_info->ci.ucPCIEGen); - pl->pcie_lane = amdgpu_get_pcie_lane_support(adev, - pi->vbios_boot_state.pcie_lane_bootup_value, - le16_to_cpu(clock_info->ci.usPCIELane)); - - if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { - pi->acpi_pcie_gen = pl->pcie_gen; - } - - if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) { - pi->ulv.supported = true; - pi->ulv.pl = *pl; - pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT; - } - - /* patch up boot state */ - if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { - pl->mclk = pi->vbios_boot_state.mclk_bootup_value; - pl->sclk = pi->vbios_boot_state.sclk_bootup_value; - pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value; - pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value; - } - - switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { - case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: - pi->use_pcie_powersaving_levels = true; - if (pi->pcie_gen_powersaving.max < pl->pcie_gen) - pi->pcie_gen_powersaving.max = pl->pcie_gen; - if (pi->pcie_gen_powersaving.min > pl->pcie_gen) - pi->pcie_gen_powersaving.min = pl->pcie_gen; - if (pi->pcie_lane_powersaving.max < pl->pcie_lane) - pi->pcie_lane_powersaving.max = pl->pcie_lane; - if (pi->pcie_lane_powersaving.min > pl->pcie_lane) - pi->pcie_lane_powersaving.min = pl->pcie_lane; - break; - case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: - pi->use_pcie_performance_levels = true; - if (pi->pcie_gen_performance.max < pl->pcie_gen) - pi->pcie_gen_performance.max = pl->pcie_gen; - if (pi->pcie_gen_performance.min > pl->pcie_gen) - pi->pcie_gen_performance.min = pl->pcie_gen; - if (pi->pcie_lane_performance.max < pl->pcie_lane) - pi->pcie_lane_performance.max = pl->pcie_lane; - if (pi->pcie_lane_performance.min > pl->pcie_lane) - pi->pcie_lane_performance.min = pl->pcie_lane; - break; - default: - break; - } -} - -static int ci_parse_power_table(struct amdgpu_device *adev) -{ - struct amdgpu_mode_info *mode_info = &adev->mode_info; - struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; - union pplib_power_state *power_state; - int i, j, k, non_clock_array_index, clock_array_index; - union pplib_clock_info *clock_info; - struct _StateArray *state_array; - struct _ClockInfoArray *clock_info_array; - struct _NonClockInfoArray *non_clock_info_array; - union power_info *power_info; - int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); - u16 data_offset; - u8 frev, crev; - u8 *power_state_offset; - struct ci_ps *ps; - - if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, - &frev, &crev, &data_offset)) - return -EINVAL; - power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); - - amdgpu_add_thermal_controller(adev); - - state_array = (struct _StateArray *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib.usStateArrayOffset)); - clock_info_array = (struct _ClockInfoArray *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); - non_clock_info_array = (struct _NonClockInfoArray *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); - - adev->pm.dpm.ps = kcalloc(state_array->ucNumEntries, - sizeof(struct amdgpu_ps), - GFP_KERNEL); - if (!adev->pm.dpm.ps) - return -ENOMEM; - power_state_offset = (u8 *)state_array->states; - for (i = 0; i < state_array->ucNumEntries; i++) { - u8 *idx; - power_state = (union pplib_power_state *)power_state_offset; - non_clock_array_index = power_state->v2.nonClockInfoIndex; - non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) - &non_clock_info_array->nonClockInfo[non_clock_array_index]; - ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL); - if (ps == NULL) { - kfree(adev->pm.dpm.ps); - return -ENOMEM; - } - adev->pm.dpm.ps[i].ps_priv = ps; - ci_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i], - non_clock_info, - non_clock_info_array->ucEntrySize); - k = 0; - idx = (u8 *)&power_state->v2.clockInfoIndex[0]; - for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { - clock_array_index = idx[j]; - if (clock_array_index >= clock_info_array->ucNumEntries) - continue; - if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS) - break; - clock_info = (union pplib_clock_info *) - ((u8 *)&clock_info_array->clockInfo[0] + - (clock_array_index * clock_info_array->ucEntrySize)); - ci_parse_pplib_clock_info(adev, - &adev->pm.dpm.ps[i], k, - clock_info); - k++; - } - power_state_offset += 2 + power_state->v2.ucNumDPMLevels; - } - adev->pm.dpm.num_ps = state_array->ucNumEntries; - - /* fill in the vce power states */ - for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) { - u32 sclk, mclk; - clock_array_index = adev->pm.dpm.vce_states[i].clk_idx; - clock_info = (union pplib_clock_info *) - &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; - sclk = le16_to_cpu(clock_info->ci.usEngineClockLow); - sclk |= clock_info->ci.ucEngineClockHigh << 16; - mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow); - mclk |= clock_info->ci.ucMemoryClockHigh << 16; - adev->pm.dpm.vce_states[i].sclk = sclk; - adev->pm.dpm.vce_states[i].mclk = mclk; - } - - return 0; -} - -static int ci_get_vbios_boot_values(struct amdgpu_device *adev, - struct ci_vbios_boot_state *boot_state) -{ - struct amdgpu_mode_info *mode_info = &adev->mode_info; - int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); - ATOM_FIRMWARE_INFO_V2_2 *firmware_info; - u8 frev, crev; - u16 data_offset; - - if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, - &frev, &crev, &data_offset)) { - firmware_info = - (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios + - data_offset); - boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage); - boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage); - boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage); - boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(adev); - boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(adev); - boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock); - boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock); - - return 0; - } - return -EINVAL; -} - -static void ci_dpm_fini(struct amdgpu_device *adev) -{ - int i; - - for (i = 0; i < adev->pm.dpm.num_ps; i++) { - kfree(adev->pm.dpm.ps[i].ps_priv); - } - kfree(adev->pm.dpm.ps); - kfree(adev->pm.dpm.priv); - kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries); - amdgpu_free_extended_power_table(adev); -} - -/** - * ci_dpm_init_microcode - load ucode images from disk - * - * @adev: amdgpu_device pointer - * - * Use the firmware interface to load the ucode images into - * the driver (not loaded into hw). - * Returns 0 on success, error on failure. - */ -static int ci_dpm_init_microcode(struct amdgpu_device *adev) -{ - const char *chip_name; - char fw_name[30]; - int err; - - DRM_DEBUG("\n"); - - switch (adev->asic_type) { - case CHIP_BONAIRE: - if ((adev->pdev->revision == 0x80) || - (adev->pdev->revision == 0x81) || - (adev->pdev->device == 0x665f)) - chip_name = "bonaire_k"; - else - chip_name = "bonaire"; - break; - case CHIP_HAWAII: - if (adev->pdev->revision == 0x80) - chip_name = "hawaii_k"; - else - chip_name = "hawaii"; - break; - case CHIP_KAVERI: - case CHIP_KABINI: - case CHIP_MULLINS: - default: BUG(); - } - - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name); - err = request_firmware(&adev->pm.fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->pm.fw); - -out: - if (err) { - pr_err("cik_smc: Failed to load firmware \"%s\"\n", fw_name); - release_firmware(adev->pm.fw); - adev->pm.fw = NULL; - } - return err; -} - -static int ci_dpm_init(struct amdgpu_device *adev) -{ - int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); - SMU7_Discrete_DpmTable *dpm_table; - struct amdgpu_gpio_rec gpio; - u16 data_offset, size; - u8 frev, crev; - struct ci_power_info *pi; - int ret; - - pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL); - if (pi == NULL) - return -ENOMEM; - adev->pm.dpm.priv = pi; - - pi->sys_pcie_mask = - adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK; - - pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID; - - pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1; - pi->pcie_gen_performance.min = AMDGPU_PCIE_GEN3; - pi->pcie_gen_powersaving.max = AMDGPU_PCIE_GEN1; - pi->pcie_gen_powersaving.min = AMDGPU_PCIE_GEN3; - - pi->pcie_lane_performance.max = 0; - pi->pcie_lane_performance.min = 16; - pi->pcie_lane_powersaving.max = 0; - pi->pcie_lane_powersaving.min = 16; - - ret = ci_get_vbios_boot_values(adev, &pi->vbios_boot_state); - if (ret) { - ci_dpm_fini(adev); - return ret; - } - - ret = amdgpu_get_platform_caps(adev); - if (ret) { - ci_dpm_fini(adev); - return ret; - } - - ret = amdgpu_parse_extended_power_table(adev); - if (ret) { - ci_dpm_fini(adev); - return ret; - } - - ret = ci_parse_power_table(adev); - if (ret) { - ci_dpm_fini(adev); - return ret; - } - - pi->dll_default_on = false; - pi->sram_end = SMC_RAM_END; - - pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT; - pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT; - pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT; - pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT; - pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT; - pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT; - pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT; - pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT; - - pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT; - - pi->sclk_dpm_key_disabled = 0; - pi->mclk_dpm_key_disabled = 0; - pi->pcie_dpm_key_disabled = 0; - pi->thermal_sclk_dpm_enabled = 0; - - if (adev->powerplay.pp_feature & PP_SCLK_DEEP_SLEEP_MASK) - pi->caps_sclk_ds = true; - else - pi->caps_sclk_ds = false; - - pi->mclk_strobe_mode_threshold = 40000; - pi->mclk_stutter_mode_threshold = 40000; - pi->mclk_edc_enable_threshold = 40000; - pi->mclk_edc_wr_enable_threshold = 40000; - - ci_initialize_powertune_defaults(adev); - - pi->caps_fps = false; - - pi->caps_sclk_throttle_low_notification = false; - - pi->caps_uvd_dpm = true; - pi->caps_vce_dpm = true; - - ci_get_leakage_voltages(adev); - ci_patch_dependency_tables_with_leakage(adev); - ci_set_private_data_variables_based_on_pptable(adev); - - adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries = - kcalloc(4, - sizeof(struct amdgpu_clock_voltage_dependency_entry), - GFP_KERNEL); - if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) { - ci_dpm_fini(adev); - return -ENOMEM; - } - adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4; - adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0; - adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0; - adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000; - adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720; - adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000; - adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810; - adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000; - adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900; - - adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4; - adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000; - adev->pm.dpm.dyn_state.vddc_vddci_delta = 200; - - adev->pm.dpm.dyn_state.valid_sclk_values.count = 0; - adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL; - adev->pm.dpm.dyn_state.valid_mclk_values.count = 0; - adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL; - - if (adev->asic_type == CHIP_HAWAII) { - pi->thermal_temp_setting.temperature_low = 94500; - pi->thermal_temp_setting.temperature_high = 95000; - pi->thermal_temp_setting.temperature_shutdown = 104000; - } else { - pi->thermal_temp_setting.temperature_low = 99500; - pi->thermal_temp_setting.temperature_high = 100000; - pi->thermal_temp_setting.temperature_shutdown = 104000; - } - - pi->uvd_enabled = false; - - dpm_table = &pi->smc_state_table; - - gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_VRHOT_GPIO_PINID); - if (gpio.valid) { - dpm_table->VRHotGpio = gpio.shift; - adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT; - } else { - dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN; - adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT; - } - - gpio = amdgpu_atombios_lookup_gpio(adev, PP_AC_DC_SWITCH_GPIO_PINID); - if (gpio.valid) { - dpm_table->AcDcGpio = gpio.shift; - adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC; - } else { - dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN; - adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC; - } - - gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_PCC_GPIO_PINID); - if (gpio.valid) { - u32 tmp = RREG32_SMC(ixCNB_PWRMGT_CNTL); - - switch (gpio.shift) { - case 0: - tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK; - tmp |= 1 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT; - break; - case 1: - tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK; - tmp |= 2 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT; - break; - case 2: - tmp |= CNB_PWRMGT_CNTL__GNB_SLOW_MASK; - break; - case 3: - tmp |= CNB_PWRMGT_CNTL__FORCE_NB_PS1_MASK; - break; - case 4: - tmp |= CNB_PWRMGT_CNTL__DPM_ENABLED_MASK; - break; - default: - DRM_INFO("Invalid PCC GPIO: %u!\n", gpio.shift); - break; - } - WREG32_SMC(ixCNB_PWRMGT_CNTL, tmp); - } - - pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE; - pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE; - pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE; - if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT)) - pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO; - else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) - pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2; - - if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) { - if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT)) - pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO; - else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2)) - pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2; - else - adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL; - } - - if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) { - if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT)) - pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO; - else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) - pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2; - else - adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL; - } - - pi->vddc_phase_shed_control = true; - -#if defined(CONFIG_ACPI) - pi->pcie_performance_request = - amdgpu_acpi_is_pcie_performance_request_supported(adev); -#else - pi->pcie_performance_request = false; -#endif - - if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size, - &frev, &crev, &data_offset)) { - pi->caps_sclk_ss_support = true; - pi->caps_mclk_ss_support = true; - pi->dynamic_ss = true; - } else { - pi->caps_sclk_ss_support = false; - pi->caps_mclk_ss_support = false; - pi->dynamic_ss = true; - } - - if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE) - pi->thermal_protection = true; - else - pi->thermal_protection = false; - - pi->caps_dynamic_ac_timing = true; - - pi->uvd_power_gated = true; - - /* make sure dc limits are valid */ - if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) || - (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0)) - adev->pm.dpm.dyn_state.max_clock_voltage_on_dc = - adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; - - pi->fan_ctrl_is_in_default_mode = true; - - return 0; -} - -static void -ci_dpm_debugfs_print_current_performance_level(void *handle, - struct seq_file *m) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct ci_power_info *pi = ci_get_pi(adev); - struct amdgpu_ps *rps = &pi->current_rps; - u32 sclk = ci_get_average_sclk_freq(adev); - u32 mclk = ci_get_average_mclk_freq(adev); - u32 activity_percent = 50; - int ret; - - ret = ci_read_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, AverageGraphicsA), - &activity_percent); - - if (ret == 0) { - activity_percent += 0x80; - activity_percent >>= 8; - activity_percent = activity_percent > 100 ? 100 : activity_percent; - } - - seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en"); - seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis"); - seq_printf(m, "power level avg sclk: %u mclk: %u\n", - sclk, mclk); - seq_printf(m, "GPU load: %u %%\n", activity_percent); -} - -static void ci_dpm_print_power_state(void *handle, void *current_ps) -{ - struct amdgpu_ps *rps = (struct amdgpu_ps *)current_ps; - struct ci_ps *ps = ci_get_ps(rps); - struct ci_pl *pl; - int i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - amdgpu_dpm_print_class_info(rps->class, rps->class2); - amdgpu_dpm_print_cap_info(rps->caps); - printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); - for (i = 0; i < ps->performance_level_count; i++) { - pl = &ps->performance_levels[i]; - printk("\t\tpower level %d sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n", - i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane); - } - amdgpu_dpm_print_ps_status(adev, rps); -} - -static inline bool ci_are_power_levels_equal(const struct ci_pl *ci_cpl1, - const struct ci_pl *ci_cpl2) -{ - return ((ci_cpl1->mclk == ci_cpl2->mclk) && - (ci_cpl1->sclk == ci_cpl2->sclk) && - (ci_cpl1->pcie_gen == ci_cpl2->pcie_gen) && - (ci_cpl1->pcie_lane == ci_cpl2->pcie_lane)); -} - -static int ci_check_state_equal(void *handle, - void *current_ps, - void *request_ps, - bool *equal) -{ - struct ci_ps *ci_cps; - struct ci_ps *ci_rps; - int i; - struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps; - struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (adev == NULL || cps == NULL || rps == NULL || equal == NULL) - return -EINVAL; - - ci_cps = ci_get_ps((struct amdgpu_ps *)cps); - ci_rps = ci_get_ps((struct amdgpu_ps *)rps); - - if (ci_cps == NULL) { - *equal = false; - return 0; - } - - if (ci_cps->performance_level_count != ci_rps->performance_level_count) { - - *equal = false; - return 0; - } - - for (i = 0; i < ci_cps->performance_level_count; i++) { - if (!ci_are_power_levels_equal(&(ci_cps->performance_levels[i]), - &(ci_rps->performance_levels[i]))) { - *equal = false; - return 0; - } - } - - /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ - *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk)); - *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk)); - - return 0; -} - -static u32 ci_dpm_get_sclk(void *handle, bool low) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct ci_power_info *pi = ci_get_pi(adev); - struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps); - - if (low) - return requested_state->performance_levels[0].sclk; - else - return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk; -} - -static u32 ci_dpm_get_mclk(void *handle, bool low) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct ci_power_info *pi = ci_get_pi(adev); - struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps); - - if (low) - return requested_state->performance_levels[0].mclk; - else - return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk; -} - -/* get temperature in millidegrees */ -static int ci_dpm_get_temp(void *handle) -{ - u32 temp; - int actual_temp = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - temp = (RREG32_SMC(ixCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >> - CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT; - - if (temp & 0x200) - actual_temp = 255; - else - actual_temp = temp & 0x1ff; - - actual_temp = actual_temp * 1000; - - return actual_temp; -} - -static int ci_set_temperature_range(struct amdgpu_device *adev) -{ - int ret; - - ret = ci_thermal_enable_alert(adev, false); - if (ret) - return ret; - ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN, - CISLANDS_TEMP_RANGE_MAX); - if (ret) - return ret; - ret = ci_thermal_enable_alert(adev, true); - if (ret) - return ret; - return ret; -} - -static int ci_dpm_early_init(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - adev->powerplay.pp_funcs = &ci_dpm_funcs; - adev->powerplay.pp_handle = adev; - ci_dpm_set_irq_funcs(adev); - - return 0; -} - -static int ci_dpm_late_init(void *handle) -{ - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!adev->pm.dpm_enabled) - return 0; - - /* init the sysfs and debugfs files late */ - ret = amdgpu_pm_sysfs_init(adev); - if (ret) - return ret; - - ret = ci_set_temperature_range(adev); - if (ret) - return ret; - - return 0; -} - -static int ci_dpm_sw_init(void *handle) -{ - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230, - &adev->pm.dpm.thermal.irq); - if (ret) - return ret; - - ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 231, - &adev->pm.dpm.thermal.irq); - if (ret) - return ret; - - /* default to balanced state */ - adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; - adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; - adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO; - adev->pm.default_sclk = adev->clock.default_sclk; - adev->pm.default_mclk = adev->clock.default_mclk; - adev->pm.current_sclk = adev->clock.default_sclk; - adev->pm.current_mclk = adev->clock.default_mclk; - adev->pm.int_thermal_type = THERMAL_TYPE_NONE; - - ret = ci_dpm_init_microcode(adev); - if (ret) - return ret; - - if (amdgpu_dpm == 0) - return 0; - - INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler); - mutex_lock(&adev->pm.mutex); - ret = ci_dpm_init(adev); - if (ret) - goto dpm_failed; - adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; - if (amdgpu_dpm == 1) - amdgpu_pm_print_power_states(adev); - mutex_unlock(&adev->pm.mutex); - DRM_INFO("amdgpu: dpm initialized\n"); - - return 0; - -dpm_failed: - ci_dpm_fini(adev); - mutex_unlock(&adev->pm.mutex); - DRM_ERROR("amdgpu: dpm initialization failed\n"); - return ret; -} - -static int ci_dpm_sw_fini(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - flush_work(&adev->pm.dpm.thermal.work); - - mutex_lock(&adev->pm.mutex); - ci_dpm_fini(adev); - mutex_unlock(&adev->pm.mutex); - - release_firmware(adev->pm.fw); - adev->pm.fw = NULL; - - return 0; -} - -static int ci_dpm_hw_init(void *handle) -{ - int ret; - - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!amdgpu_dpm) { - ret = ci_upload_firmware(adev); - if (ret) { - DRM_ERROR("ci_upload_firmware failed\n"); - return ret; - } - ci_dpm_start_smc(adev); - return 0; - } - - mutex_lock(&adev->pm.mutex); - ci_dpm_setup_asic(adev); - ret = ci_dpm_enable(adev); - if (ret) - adev->pm.dpm_enabled = false; - else - adev->pm.dpm_enabled = true; - mutex_unlock(&adev->pm.mutex); - - return ret; -} - -static int ci_dpm_hw_fini(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (adev->pm.dpm_enabled) { - mutex_lock(&adev->pm.mutex); - ci_dpm_disable(adev); - mutex_unlock(&adev->pm.mutex); - } else { - ci_dpm_stop_smc(adev); - } - - return 0; -} - -static int ci_dpm_suspend(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (adev->pm.dpm_enabled) { - mutex_lock(&adev->pm.mutex); - amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, - AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); - amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, - AMDGPU_THERMAL_IRQ_HIGH_TO_LOW); - adev->pm.dpm.last_user_state = adev->pm.dpm.user_state; - adev->pm.dpm.last_state = adev->pm.dpm.state; - adev->pm.dpm.user_state = POWER_STATE_TYPE_INTERNAL_BOOT; - adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_BOOT; - mutex_unlock(&adev->pm.mutex); - amdgpu_pm_compute_clocks(adev); - - } - - return 0; -} - -static int ci_dpm_resume(void *handle) -{ - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (adev->pm.dpm_enabled) { - /* asic init will reset to the boot state */ - mutex_lock(&adev->pm.mutex); - ci_dpm_setup_asic(adev); - ret = ci_dpm_enable(adev); - if (ret) - adev->pm.dpm_enabled = false; - else - adev->pm.dpm_enabled = true; - adev->pm.dpm.user_state = adev->pm.dpm.last_user_state; - adev->pm.dpm.state = adev->pm.dpm.last_state; - mutex_unlock(&adev->pm.mutex); - if (adev->pm.dpm_enabled) - amdgpu_pm_compute_clocks(adev); - } - return 0; -} - -static bool ci_dpm_is_idle(void *handle) -{ - /* XXX */ - return true; -} - -static int ci_dpm_wait_for_idle(void *handle) -{ - /* XXX */ - return 0; -} - -static int ci_dpm_soft_reset(void *handle) -{ - return 0; -} - -static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - unsigned type, - enum amdgpu_interrupt_state state) -{ - u32 cg_thermal_int; - - switch (type) { - case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH: - switch (state) { - case AMDGPU_IRQ_STATE_DISABLE: - cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); - cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; - WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); - break; - case AMDGPU_IRQ_STATE_ENABLE: - cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); - cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; - WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); - break; - default: - break; - } - break; - - case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW: - switch (state) { - case AMDGPU_IRQ_STATE_DISABLE: - cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); - cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; - WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); - break; - case AMDGPU_IRQ_STATE_ENABLE: - cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); - cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; - WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); - break; - default: - break; - } - break; - - default: - break; - } - return 0; -} - -static int ci_dpm_process_interrupt(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - struct amdgpu_iv_entry *entry) -{ - bool queue_thermal = false; - - if (entry == NULL) - return -EINVAL; - - switch (entry->src_id) { - case 230: /* thermal low to high */ - DRM_DEBUG("IH: thermal low to high\n"); - adev->pm.dpm.thermal.high_to_low = false; - queue_thermal = true; - break; - case 231: /* thermal high to low */ - DRM_DEBUG("IH: thermal high to low\n"); - adev->pm.dpm.thermal.high_to_low = true; - queue_thermal = true; - break; - default: - break; - } - - if (queue_thermal) - schedule_work(&adev->pm.dpm.thermal.work); - - return 0; -} - -static int ci_dpm_set_clockgating_state(void *handle, - enum amd_clockgating_state state) -{ - return 0; -} - -static int ci_dpm_set_powergating_state(void *handle, - enum amd_powergating_state state) -{ - return 0; -} - -static int ci_dpm_print_clock_levels(void *handle, - enum pp_clock_type type, char *buf) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct ci_power_info *pi = ci_get_pi(adev); - struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table; - struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table; - struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table; - - int i, now, size = 0; - uint32_t clock, pcie_speed; - - switch (type) { - case PP_SCLK: - amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetSclkFrequency); - clock = RREG32(mmSMC_MSG_ARG_0); - - for (i = 0; i < sclk_table->count; i++) { - if (clock > sclk_table->dpm_levels[i].value) - continue; - break; - } - now = i; - - for (i = 0; i < sclk_table->count; i++) - size += sprintf(buf + size, "%d: %uMhz %s\n", - i, sclk_table->dpm_levels[i].value / 100, - (i == now) ? "*" : ""); - break; - case PP_MCLK: - amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetMclkFrequency); - clock = RREG32(mmSMC_MSG_ARG_0); - - for (i = 0; i < mclk_table->count; i++) { - if (clock > mclk_table->dpm_levels[i].value) - continue; - break; - } - now = i; - - for (i = 0; i < mclk_table->count; i++) - size += sprintf(buf + size, "%d: %uMhz %s\n", - i, mclk_table->dpm_levels[i].value / 100, - (i == now) ? "*" : ""); - break; - case PP_PCIE: - pcie_speed = ci_get_current_pcie_speed(adev); - for (i = 0; i < pcie_table->count; i++) { - if (pcie_speed != pcie_table->dpm_levels[i].value) - continue; - break; - } - now = i; - - for (i = 0; i < pcie_table->count; i++) - size += sprintf(buf + size, "%d: %s %s\n", i, - (pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x1" : - (pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" : - (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "", - (i == now) ? "*" : ""); - break; - default: - break; - } - - return size; -} - -static int ci_dpm_force_clock_level(void *handle, - enum pp_clock_type type, uint32_t mask) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct ci_power_info *pi = ci_get_pi(adev); - - if (adev->pm.dpm.forced_level != AMD_DPM_FORCED_LEVEL_MANUAL) - return -EINVAL; - - if (mask == 0) - return -EINVAL; - - switch (type) { - case PP_SCLK: - if (!pi->sclk_dpm_key_disabled) - amdgpu_ci_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SCLKDPM_SetEnabledMask, - pi->dpm_level_enable_mask.sclk_dpm_enable_mask & mask); - break; - - case PP_MCLK: - if (!pi->mclk_dpm_key_disabled) - amdgpu_ci_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_MCLKDPM_SetEnabledMask, - pi->dpm_level_enable_mask.mclk_dpm_enable_mask & mask); - break; - - case PP_PCIE: - { - uint32_t tmp = mask & pi->dpm_level_enable_mask.pcie_dpm_enable_mask; - - if (!pi->pcie_dpm_key_disabled) { - if (fls(tmp) != ffs(tmp)) - amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_UnForceLevel); - else - amdgpu_ci_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_PCIeDPM_ForceLevel, - fls(tmp) - 1); - } - break; - } - default: - break; - } - - return 0; -} - -static int ci_dpm_get_sclk_od(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct ci_power_info *pi = ci_get_pi(adev); - struct ci_single_dpm_table *sclk_table = &(pi->dpm_table.sclk_table); - struct ci_single_dpm_table *golden_sclk_table = - &(pi->golden_dpm_table.sclk_table); - int value; - - value = (sclk_table->dpm_levels[sclk_table->count - 1].value - - golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * - 100 / - golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; - - return value; -} - -static int ci_dpm_set_sclk_od(void *handle, uint32_t value) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct ci_power_info *pi = ci_get_pi(adev); - struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps); - struct ci_single_dpm_table *golden_sclk_table = - &(pi->golden_dpm_table.sclk_table); - - if (value > 20) - value = 20; - - ps->performance_levels[ps->performance_level_count - 1].sclk = - golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * - value / 100 + - golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; - - return 0; -} - -static int ci_dpm_get_mclk_od(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct ci_power_info *pi = ci_get_pi(adev); - struct ci_single_dpm_table *mclk_table = &(pi->dpm_table.mclk_table); - struct ci_single_dpm_table *golden_mclk_table = - &(pi->golden_dpm_table.mclk_table); - int value; - - value = (mclk_table->dpm_levels[mclk_table->count - 1].value - - golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * - 100 / - golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; - - return value; -} - -static int ci_dpm_set_mclk_od(void *handle, uint32_t value) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct ci_power_info *pi = ci_get_pi(adev); - struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps); - struct ci_single_dpm_table *golden_mclk_table = - &(pi->golden_dpm_table.mclk_table); - - if (value > 20) - value = 20; - - ps->performance_levels[ps->performance_level_count - 1].mclk = - golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * - value / 100 + - golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; - - return 0; -} - -static int ci_dpm_read_sensor(void *handle, int idx, - void *value, int *size) -{ - u32 activity_percent = 50; - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - /* size must be at least 4 bytes for all sensors */ - if (*size < 4) - return -EINVAL; - - switch (idx) { - case AMDGPU_PP_SENSOR_GFX_SCLK: - *((uint32_t *)value) = ci_get_average_sclk_freq(adev); - *size = 4; - return 0; - case AMDGPU_PP_SENSOR_GFX_MCLK: - *((uint32_t *)value) = ci_get_average_mclk_freq(adev); - *size = 4; - return 0; - case AMDGPU_PP_SENSOR_GPU_TEMP: - *((uint32_t *)value) = ci_dpm_get_temp(adev); - *size = 4; - return 0; - case AMDGPU_PP_SENSOR_GPU_LOAD: - ret = ci_read_smc_soft_register(adev, - offsetof(SMU7_SoftRegisters, - AverageGraphicsA), - &activity_percent); - if (ret == 0) { - activity_percent += 0x80; - activity_percent >>= 8; - activity_percent = - activity_percent > 100 ? 100 : activity_percent; - } - *((uint32_t *)value) = activity_percent; - *size = 4; - return 0; - default: - return -EINVAL; - } -} - -static int ci_set_powergating_by_smu(void *handle, - uint32_t block_type, bool gate) -{ - switch (block_type) { - case AMD_IP_BLOCK_TYPE_UVD: - ci_dpm_powergate_uvd(handle, gate); - break; - default: - break; - } - return 0; -} - -static const struct amd_ip_funcs ci_dpm_ip_funcs = { - .name = "ci_dpm", - .early_init = ci_dpm_early_init, - .late_init = ci_dpm_late_init, - .sw_init = ci_dpm_sw_init, - .sw_fini = ci_dpm_sw_fini, - .hw_init = ci_dpm_hw_init, - .hw_fini = ci_dpm_hw_fini, - .suspend = ci_dpm_suspend, - .resume = ci_dpm_resume, - .is_idle = ci_dpm_is_idle, - .wait_for_idle = ci_dpm_wait_for_idle, - .soft_reset = ci_dpm_soft_reset, - .set_clockgating_state = ci_dpm_set_clockgating_state, - .set_powergating_state = ci_dpm_set_powergating_state, -}; - -const struct amdgpu_ip_block_version ci_smu_ip_block = -{ - .type = AMD_IP_BLOCK_TYPE_SMC, - .major = 7, - .minor = 0, - .rev = 0, - .funcs = &ci_dpm_ip_funcs, -}; - -static const struct amd_pm_funcs ci_dpm_funcs = { - .pre_set_power_state = &ci_dpm_pre_set_power_state, - .set_power_state = &ci_dpm_set_power_state, - .post_set_power_state = &ci_dpm_post_set_power_state, - .display_configuration_changed = &ci_dpm_display_configuration_changed, - .get_sclk = &ci_dpm_get_sclk, - .get_mclk = &ci_dpm_get_mclk, - .print_power_state = &ci_dpm_print_power_state, - .debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level, - .force_performance_level = &ci_dpm_force_performance_level, - .vblank_too_short = &ci_dpm_vblank_too_short, - .set_powergating_by_smu = &ci_set_powergating_by_smu, - .set_fan_control_mode = &ci_dpm_set_fan_control_mode, - .get_fan_control_mode = &ci_dpm_get_fan_control_mode, - .set_fan_speed_percent = &ci_dpm_set_fan_speed_percent, - .get_fan_speed_percent = &ci_dpm_get_fan_speed_percent, - .print_clock_levels = ci_dpm_print_clock_levels, - .force_clock_level = ci_dpm_force_clock_level, - .get_sclk_od = ci_dpm_get_sclk_od, - .set_sclk_od = ci_dpm_set_sclk_od, - .get_mclk_od = ci_dpm_get_mclk_od, - .set_mclk_od = ci_dpm_set_mclk_od, - .check_state_equal = ci_check_state_equal, - .get_vce_clock_state = amdgpu_get_vce_clock_state, - .read_sensor = ci_dpm_read_sensor, -}; - -static const struct amdgpu_irq_src_funcs ci_dpm_irq_funcs = { - .set = ci_dpm_set_interrupt_state, - .process = ci_dpm_process_interrupt, -}; - -static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev) -{ - adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST; - adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs; -} diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.h b/drivers/gpu/drm/amd/amdgpu/ci_dpm.h deleted file mode 100644 index 91be2996ae7c..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.h +++ /dev/null @@ -1,349 +0,0 @@ -/* - * Copyright 2013 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef __CI_DPM_H__ -#define __CI_DPM_H__ - -#include "amdgpu_atombios.h" -#include "ppsmc.h" - -#define SMU__NUM_SCLK_DPM_STATE 8 -#define SMU__NUM_MCLK_DPM_LEVELS 6 -#define SMU__NUM_LCLK_DPM_LEVELS 8 -#define SMU__NUM_PCIE_DPM_LEVELS 8 -#include "smu7_discrete.h" - -#define CISLANDS_MAX_HARDWARE_POWERLEVELS 2 - -#define CISLANDS_UNUSED_GPIO_PIN 0x7F - -struct ci_pl { - u32 mclk; - u32 sclk; - enum amdgpu_pcie_gen pcie_gen; - u16 pcie_lane; -}; - -struct ci_ps { - u16 performance_level_count; - bool dc_compatible; - u32 sclk_t; - struct ci_pl performance_levels[CISLANDS_MAX_HARDWARE_POWERLEVELS]; -}; - -struct ci_dpm_level { - bool enabled; - u32 value; - u32 param1; -}; - -#define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5 -#define MAX_REGULAR_DPM_NUMBER 8 -#define CISLAND_MINIMUM_ENGINE_CLOCK 800 - -struct ci_single_dpm_table { - u32 count; - struct ci_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER]; -}; - -struct ci_dpm_table { - struct ci_single_dpm_table sclk_table; - struct ci_single_dpm_table mclk_table; - struct ci_single_dpm_table pcie_speed_table; - struct ci_single_dpm_table vddc_table; - struct ci_single_dpm_table vddci_table; - struct ci_single_dpm_table mvdd_table; -}; - -struct ci_mc_reg_entry { - u32 mclk_max; - u32 mc_data[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE]; -}; - -struct ci_mc_reg_table { - u8 last; - u8 num_entries; - u16 valid_flag; - struct ci_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES]; - SMU7_Discrete_MCRegisterAddress mc_reg_address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE]; -}; - -struct ci_ulv_parm -{ - bool supported; - u32 cg_ulv_parameter; - u32 volt_change_delay; - struct ci_pl pl; -}; - -#define CISLANDS_MAX_LEAKAGE_COUNT 8 - -struct ci_leakage_voltage { - u16 count; - u16 leakage_id[CISLANDS_MAX_LEAKAGE_COUNT]; - u16 actual_voltage[CISLANDS_MAX_LEAKAGE_COUNT]; -}; - -struct ci_dpm_level_enable_mask { - u32 uvd_dpm_enable_mask; - u32 vce_dpm_enable_mask; - u32 acp_dpm_enable_mask; - u32 samu_dpm_enable_mask; - u32 sclk_dpm_enable_mask; - u32 mclk_dpm_enable_mask; - u32 pcie_dpm_enable_mask; -}; - -struct ci_vbios_boot_state -{ - u16 mvdd_bootup_value; - u16 vddc_bootup_value; - u16 vddci_bootup_value; - u32 sclk_bootup_value; - u32 mclk_bootup_value; - u16 pcie_gen_bootup_value; - u16 pcie_lane_bootup_value; -}; - -struct ci_clock_registers { - u32 cg_spll_func_cntl; - u32 cg_spll_func_cntl_2; - u32 cg_spll_func_cntl_3; - u32 cg_spll_func_cntl_4; - u32 cg_spll_spread_spectrum; - u32 cg_spll_spread_spectrum_2; - u32 dll_cntl; - u32 mclk_pwrmgt_cntl; - u32 mpll_ad_func_cntl; - u32 mpll_dq_func_cntl; - u32 mpll_func_cntl; - u32 mpll_func_cntl_1; - u32 mpll_func_cntl_2; - u32 mpll_ss1; - u32 mpll_ss2; -}; - -struct ci_thermal_temperature_setting { - s32 temperature_low; - s32 temperature_high; - s32 temperature_shutdown; -}; - -struct ci_pcie_perf_range { - u16 max; - u16 min; -}; - -enum ci_pt_config_reg_type { - CISLANDS_CONFIGREG_MMR = 0, - CISLANDS_CONFIGREG_SMC_IND, - CISLANDS_CONFIGREG_DIDT_IND, - CISLANDS_CONFIGREG_CACHE, - CISLANDS_CONFIGREG_MAX -}; - -#define POWERCONTAINMENT_FEATURE_BAPM 0x00000001 -#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002 -#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004 - -struct ci_pt_config_reg { - u32 offset; - u32 mask; - u32 shift; - u32 value; - enum ci_pt_config_reg_type type; -}; - -struct ci_pt_defaults { - u8 svi_load_line_en; - u8 svi_load_line_vddc; - u8 tdc_vddc_throttle_release_limit_perc; - u8 tdc_mawt; - u8 tdc_waterfall_ctl; - u8 dte_ambient_temp_base; - u32 display_cac; - u32 bapm_temp_gradient; - u16 bapmti_r[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS]; - u16 bapmti_rc[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS]; -}; - -#define DPMTABLE_OD_UPDATE_SCLK 0x00000001 -#define DPMTABLE_OD_UPDATE_MCLK 0x00000002 -#define DPMTABLE_UPDATE_SCLK 0x00000004 -#define DPMTABLE_UPDATE_MCLK 0x00000008 - -struct ci_power_info { - struct ci_dpm_table dpm_table; - struct ci_dpm_table golden_dpm_table; - u32 voltage_control; - u32 mvdd_control; - u32 vddci_control; - u32 active_auto_throttle_sources; - struct ci_clock_registers clock_registers; - u16 acpi_vddc; - u16 acpi_vddci; - enum amdgpu_pcie_gen force_pcie_gen; - enum amdgpu_pcie_gen acpi_pcie_gen; - struct ci_leakage_voltage vddc_leakage; - struct ci_leakage_voltage vddci_leakage; - u16 max_vddc_in_pp_table; - u16 min_vddc_in_pp_table; - u16 max_vddci_in_pp_table; - u16 min_vddci_in_pp_table; - u32 mclk_strobe_mode_threshold; - u32 mclk_stutter_mode_threshold; - u32 mclk_edc_enable_threshold; - u32 mclk_edc_wr_enable_threshold; - struct ci_vbios_boot_state vbios_boot_state; - /* smc offsets */ - u32 sram_end; - u32 dpm_table_start; - u32 soft_regs_start; - u32 mc_reg_table_start; - u32 fan_table_start; - u32 arb_table_start; - /* smc tables */ - SMU7_Discrete_DpmTable smc_state_table; - SMU7_Discrete_MCRegisters smc_mc_reg_table; - SMU7_Discrete_PmFuses smc_powertune_table; - /* other stuff */ - struct ci_mc_reg_table mc_reg_table; - struct atom_voltage_table vddc_voltage_table; - struct atom_voltage_table vddci_voltage_table; - struct atom_voltage_table mvdd_voltage_table; - struct ci_ulv_parm ulv; - u32 power_containment_features; - const struct ci_pt_defaults *powertune_defaults; - u32 dte_tj_offset; - bool vddc_phase_shed_control; - struct ci_thermal_temperature_setting thermal_temp_setting; - struct ci_dpm_level_enable_mask dpm_level_enable_mask; - u32 need_update_smu7_dpm_table; - u32 sclk_dpm_key_disabled; - u32 mclk_dpm_key_disabled; - u32 pcie_dpm_key_disabled; - u32 thermal_sclk_dpm_enabled; - struct ci_pcie_perf_range pcie_gen_performance; - struct ci_pcie_perf_range pcie_lane_performance; - struct ci_pcie_perf_range pcie_gen_powersaving; - struct ci_pcie_perf_range pcie_lane_powersaving; - u32 activity_target[SMU7_MAX_LEVELS_GRAPHICS]; - u32 mclk_activity_target; - u32 low_sclk_interrupt_t; - u32 last_mclk_dpm_enable_mask; - u32 sys_pcie_mask; - /* caps */ - bool caps_power_containment; - bool caps_cac; - bool caps_sq_ramping; - bool caps_db_ramping; - bool caps_td_ramping; - bool caps_tcp_ramping; - bool caps_fps; - bool caps_sclk_ds; - bool caps_sclk_ss_support; - bool caps_mclk_ss_support; - bool caps_uvd_dpm; - bool caps_vce_dpm; - bool caps_samu_dpm; - bool caps_acp_dpm; - bool caps_automatic_dc_transition; - bool caps_sclk_throttle_low_notification; - bool caps_dynamic_ac_timing; - bool caps_od_fuzzy_fan_control_support; - /* flags */ - bool thermal_protection; - bool pcie_performance_request; - bool dynamic_ss; - bool dll_default_on; - bool cac_enabled; - bool uvd_enabled; - bool battery_state; - bool pspp_notify_required; - bool enable_bapm_feature; - bool enable_tdc_limit_feature; - bool enable_pkg_pwr_tracking_feature; - bool use_pcie_performance_levels; - bool use_pcie_powersaving_levels; - bool uvd_power_gated; - /* driver states */ - struct amdgpu_ps current_rps; - struct ci_ps current_ps; - struct amdgpu_ps requested_rps; - struct ci_ps requested_ps; - /* fan control */ - bool fan_ctrl_is_in_default_mode; - bool fan_is_controlled_by_smc; - u32 t_min; - u32 fan_ctrl_default_mode; -}; - -#define CISLANDS_VOLTAGE_CONTROL_NONE 0x0 -#define CISLANDS_VOLTAGE_CONTROL_BY_GPIO 0x1 -#define CISLANDS_VOLTAGE_CONTROL_BY_SVID2 0x2 - -#define CISLANDS_Q88_FORMAT_CONVERSION_UNIT 256 - -#define CISLANDS_VRC_DFLT0 0x3FFFC000 -#define CISLANDS_VRC_DFLT1 0x000400 -#define CISLANDS_VRC_DFLT2 0xC00080 -#define CISLANDS_VRC_DFLT3 0xC00200 -#define CISLANDS_VRC_DFLT4 0xC01680 -#define CISLANDS_VRC_DFLT5 0xC00033 -#define CISLANDS_VRC_DFLT6 0xC00033 -#define CISLANDS_VRC_DFLT7 0x3FFFC000 - -#define CISLANDS_CGULVPARAMETER_DFLT 0x00040035 -#define CISLAND_TARGETACTIVITY_DFLT 30 -#define CISLAND_MCLK_TARGETACTIVITY_DFLT 10 - -#define PCIE_PERF_REQ_REMOVE_REGISTRY 0 -#define PCIE_PERF_REQ_FORCE_LOWPOWER 1 -#define PCIE_PERF_REQ_PECI_GEN1 2 -#define PCIE_PERF_REQ_PECI_GEN2 3 -#define PCIE_PERF_REQ_PECI_GEN3 4 - -#define CISLANDS_SSTU_DFLT 0 -#define CISLANDS_SST_DFLT 0x00C8 - -/* XXX are these ok? */ -#define CISLANDS_TEMP_RANGE_MIN (90 * 1000) -#define CISLANDS_TEMP_RANGE_MAX (120 * 1000) - -int amdgpu_ci_copy_bytes_to_smc(struct amdgpu_device *adev, - u32 smc_start_address, - const u8 *src, u32 byte_count, u32 limit); -void amdgpu_ci_start_smc(struct amdgpu_device *adev); -void amdgpu_ci_reset_smc(struct amdgpu_device *adev); -int amdgpu_ci_program_jump_on_start(struct amdgpu_device *adev); -void amdgpu_ci_stop_smc_clock(struct amdgpu_device *adev); -void amdgpu_ci_start_smc_clock(struct amdgpu_device *adev); -bool amdgpu_ci_is_smc_running(struct amdgpu_device *adev); -PPSMC_Result amdgpu_ci_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg); -PPSMC_Result amdgpu_ci_wait_for_smc_inactive(struct amdgpu_device *adev); -int amdgpu_ci_load_smc_ucode(struct amdgpu_device *adev, u32 limit); -int amdgpu_ci_read_smc_sram_dword(struct amdgpu_device *adev, - u32 smc_address, u32 *value, u32 limit); -int amdgpu_ci_write_smc_sram_dword(struct amdgpu_device *adev, - u32 smc_address, u32 value, u32 limit); - -#endif diff --git a/drivers/gpu/drm/amd/amdgpu/ci_smc.c b/drivers/gpu/drm/amd/amdgpu/ci_smc.c deleted file mode 100644 index b8ba51e045b5..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/ci_smc.c +++ /dev/null @@ -1,279 +0,0 @@ -/* - * Copyright 2011 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Alex Deucher - */ - -#include -#include -#include "amdgpu.h" -#include "cikd.h" -#include "ppsmc.h" -#include "amdgpu_ucode.h" -#include "ci_dpm.h" - -#include "smu/smu_7_0_1_d.h" -#include "smu/smu_7_0_1_sh_mask.h" - -static int ci_set_smc_sram_address(struct amdgpu_device *adev, - u32 smc_address, u32 limit) -{ - if (smc_address & 3) - return -EINVAL; - if ((smc_address + 3) > limit) - return -EINVAL; - - WREG32(mmSMC_IND_INDEX_0, smc_address); - WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK); - - return 0; -} - -int amdgpu_ci_copy_bytes_to_smc(struct amdgpu_device *adev, - u32 smc_start_address, - const u8 *src, u32 byte_count, u32 limit) -{ - unsigned long flags; - u32 data, original_data; - u32 addr; - u32 extra_shift; - int ret = 0; - - if (smc_start_address & 3) - return -EINVAL; - if ((smc_start_address + byte_count) > limit) - return -EINVAL; - - addr = smc_start_address; - - spin_lock_irqsave(&adev->smc_idx_lock, flags); - while (byte_count >= 4) { - /* SMC address space is BE */ - data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; - - ret = ci_set_smc_sram_address(adev, addr, limit); - if (ret) - goto done; - - WREG32(mmSMC_IND_DATA_0, data); - - src += 4; - byte_count -= 4; - addr += 4; - } - - /* RMW for the final bytes */ - if (byte_count > 0) { - data = 0; - - ret = ci_set_smc_sram_address(adev, addr, limit); - if (ret) - goto done; - - original_data = RREG32(mmSMC_IND_DATA_0); - - extra_shift = 8 * (4 - byte_count); - - while (byte_count > 0) { - data = (data << 8) + *src++; - byte_count--; - } - - data <<= extra_shift; - - data |= (original_data & ~((~0UL) << extra_shift)); - - ret = ci_set_smc_sram_address(adev, addr, limit); - if (ret) - goto done; - - WREG32(mmSMC_IND_DATA_0, data); - } - -done: - spin_unlock_irqrestore(&adev->smc_idx_lock, flags); - - return ret; -} - -void amdgpu_ci_start_smc(struct amdgpu_device *adev) -{ - u32 tmp = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); - - tmp &= ~SMC_SYSCON_RESET_CNTL__rst_reg_MASK; - WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, tmp); -} - -void amdgpu_ci_reset_smc(struct amdgpu_device *adev) -{ - u32 tmp = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); - - tmp |= SMC_SYSCON_RESET_CNTL__rst_reg_MASK; - WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, tmp); -} - -int amdgpu_ci_program_jump_on_start(struct amdgpu_device *adev) -{ - static u8 data[] = { 0xE0, 0x00, 0x80, 0x40 }; - - return amdgpu_ci_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1); -} - -void amdgpu_ci_stop_smc_clock(struct amdgpu_device *adev) -{ - u32 tmp = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); - - tmp |= SMC_SYSCON_CLOCK_CNTL_0__ck_disable_MASK; - - WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, tmp); -} - -void amdgpu_ci_start_smc_clock(struct amdgpu_device *adev) -{ - u32 tmp = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); - - tmp &= ~SMC_SYSCON_CLOCK_CNTL_0__ck_disable_MASK; - - WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, tmp); -} - -bool amdgpu_ci_is_smc_running(struct amdgpu_device *adev) -{ - u32 clk = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); - u32 pc_c = RREG32_SMC(ixSMC_PC_C); - - if (!(clk & SMC_SYSCON_CLOCK_CNTL_0__ck_disable_MASK) && (0x20100 <= pc_c)) - return true; - - return false; -} - -PPSMC_Result amdgpu_ci_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg) -{ - u32 tmp; - int i; - - if (!amdgpu_ci_is_smc_running(adev)) - return PPSMC_Result_Failed; - - WREG32(mmSMC_MESSAGE_0, msg); - - for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(mmSMC_RESP_0); - if (tmp != 0) - break; - udelay(1); - } - tmp = RREG32(mmSMC_RESP_0); - - return (PPSMC_Result)tmp; -} - -PPSMC_Result amdgpu_ci_wait_for_smc_inactive(struct amdgpu_device *adev) -{ - u32 tmp; - int i; - - if (!amdgpu_ci_is_smc_running(adev)) - return PPSMC_Result_OK; - - for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); - if ((tmp & SMC_SYSCON_CLOCK_CNTL_0__cken_MASK) == 0) - break; - udelay(1); - } - - return PPSMC_Result_OK; -} - -int amdgpu_ci_load_smc_ucode(struct amdgpu_device *adev, u32 limit) -{ - const struct smc_firmware_header_v1_0 *hdr; - unsigned long flags; - u32 ucode_start_address; - u32 ucode_size; - const u8 *src; - u32 data; - - if (!adev->pm.fw) - return -EINVAL; - - hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data; - amdgpu_ucode_print_smc_hdr(&hdr->header); - - adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); - ucode_start_address = le32_to_cpu(hdr->ucode_start_addr); - ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); - src = (const u8 *) - (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); - - if (ucode_size & 3) - return -EINVAL; - - spin_lock_irqsave(&adev->smc_idx_lock, flags); - WREG32(mmSMC_IND_INDEX_0, ucode_start_address); - WREG32_P(mmSMC_IND_ACCESS_CNTL, SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK, - ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK); - while (ucode_size >= 4) { - /* SMC address space is BE */ - data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; - - WREG32(mmSMC_IND_DATA_0, data); - - src += 4; - ucode_size -= 4; - } - WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK); - spin_unlock_irqrestore(&adev->smc_idx_lock, flags); - - return 0; -} - -int amdgpu_ci_read_smc_sram_dword(struct amdgpu_device *adev, - u32 smc_address, u32 *value, u32 limit) -{ - unsigned long flags; - int ret; - - spin_lock_irqsave(&adev->smc_idx_lock, flags); - ret = ci_set_smc_sram_address(adev, smc_address, limit); - if (ret == 0) - *value = RREG32(mmSMC_IND_DATA_0); - spin_unlock_irqrestore(&adev->smc_idx_lock, flags); - - return ret; -} - -int amdgpu_ci_write_smc_sram_dword(struct amdgpu_device *adev, - u32 smc_address, u32 value, u32 limit) -{ - unsigned long flags; - int ret; - - spin_lock_irqsave(&adev->smc_idx_lock, flags); - ret = ci_set_smc_sram_address(adev, smc_address, limit); - if (ret == 0) - WREG32(mmSMC_IND_DATA_0, value); - spin_unlock_irqrestore(&adev->smc_idx_lock, flags); - - return ret; -} diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 71c50d8900e3..07c1f239e9c3 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1741,6 +1741,69 @@ static bool cik_need_full_reset(struct amdgpu_device *adev) return true; } +static void cik_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, + uint64_t *count1) +{ + uint32_t perfctr = 0; + uint64_t cnt0_of, cnt1_of; + int tmp; + + /* This reports 0 on APUs, so return to avoid writing/reading registers + * that may or may not be different from their GPU counterparts + */ + if (adev->flags & AMD_IS_APU) + return; + + /* Set the 2 events that we wish to watch, defined above */ + /* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */ + perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40); + perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104); + + /* Write to enable desired perf counters */ + WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK, perfctr); + /* Zero out and enable the perf counters + * Write 0x5: + * Bit 0 = Start all counters(1) + * Bit 2 = Global counter reset enable(1) + */ + WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000005); + + msleep(1000); + + /* Load the shadow and disable the perf counters + * Write 0x2: + * Bit 0 = Stop counters(0) + * Bit 1 = Load the shadow counters(1) + */ + WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000002); + + /* Read register values to get any >32bit overflow */ + tmp = RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK); + cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER); + cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER); + + /* Get the values and add the overflow */ + *count0 = RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32); + *count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32); +} + +static bool cik_need_reset_on_init(struct amdgpu_device *adev) +{ + u32 clock_cntl, pc; + + if (adev->flags & AMD_IS_APU) + return false; + + /* check if the SMC is already running */ + clock_cntl = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); + pc = RREG32_SMC(ixSMC_PC_C); + if ((0 == REG_GET_FIELD(clock_cntl, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) && + (0x20100 <= pc)) + return true; + + return false; +} + static const struct amdgpu_asic_funcs cik_asic_funcs = { .read_disabled_bios = &cik_read_disabled_bios, @@ -1756,6 +1819,8 @@ static const struct amdgpu_asic_funcs cik_asic_funcs = .invalidate_hdp = &cik_invalidate_hdp, .need_full_reset = &cik_need_full_reset, .init_doorbell_index = &legacy_doorbell_index_init, + .get_pcie_usage = &cik_get_pcie_usage, + .need_reset_on_init = &cik_need_reset_on_init, }; static int cik_common_early_init(void *handle) @@ -2005,10 +2070,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &cik_ih_ip_block); amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block); amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block); - if (amdgpu_dpm == -1) - amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); - else - amdgpu_device_ip_block_add(adev, &ci_smu_ip_block); + amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); if (adev->enable_virtual_display) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); #if defined(CONFIG_DRM_AMD_DC) @@ -2026,10 +2088,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &cik_ih_ip_block); amdgpu_device_ip_block_add(adev, &gfx_v7_3_ip_block); amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block); - if (amdgpu_dpm == -1) - amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); - else - amdgpu_device_ip_block_add(adev, &ci_smu_ip_block); + amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); if (adev->enable_virtual_display) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); #if defined(CONFIG_DRM_AMD_DC) diff --git a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h b/drivers/gpu/drm/amd/amdgpu/cik_dpm.h index 2a086610f74d..2fcc4b60153c 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h +++ b/drivers/gpu/drm/amd/amdgpu/cik_dpm.h @@ -24,7 +24,6 @@ #ifndef __CIK_DPM_H__ #define __CIK_DPM_H__ -extern const struct amdgpu_ip_block_version ci_smu_ip_block; extern const struct amdgpu_ip_block_version kv_smu_ip_block; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c index 8a8b4967a101..721c757156e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c @@ -103,9 +103,9 @@ static void cik_ih_disable_interrupts(struct amdgpu_device *adev) */ static int cik_ih_irq_init(struct amdgpu_device *adev) { + struct amdgpu_ih_ring *ih = &adev->irq.ih; int rb_bufsz; u32 interrupt_cntl, ih_cntl, ih_rb_cntl; - u64 wptr_off; /* disable irqs */ cik_ih_disable_interrupts(adev); @@ -131,9 +131,8 @@ static int cik_ih_irq_init(struct amdgpu_device *adev) ih_rb_cntl |= IH_RB_CNTL__WPTR_WRITEBACK_ENABLE_MASK; /* set the writeback address whether it's enabled or not */ - wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4); - WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off)); - WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF); + WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(ih->wptr_addr)); + WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(ih->wptr_addr) & 0xFF); WREG32(mmIH_RB_CNTL, ih_rb_cntl); @@ -183,11 +182,12 @@ static void cik_ih_irq_disable(struct amdgpu_device *adev) * Used by cik_irq_process(). * Returns the value of the wptr. */ -static u32 cik_ih_get_wptr(struct amdgpu_device *adev) +static u32 cik_ih_get_wptr(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) { u32 wptr, tmp; - wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]); + wptr = le32_to_cpu(*ih->wptr_cpu); if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) { wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK; @@ -196,13 +196,13 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev) * this should allow us to catchup. */ dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", - wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask); - adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask; + wptr, ih->rptr, (wptr + 16) & ih->ptr_mask); + ih->rptr = (wptr + 16) & ih->ptr_mask; tmp = RREG32(mmIH_RB_CNTL); tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK; WREG32(mmIH_RB_CNTL, tmp); } - return (wptr & adev->irq.ih.ptr_mask); + return (wptr & ih->ptr_mask); } /* CIK IV Ring @@ -237,16 +237,17 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev) * position and also advance the position. */ static void cik_ih_decode_iv(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih, struct amdgpu_iv_entry *entry) { /* wptr/rptr are in bytes! */ - u32 ring_index = adev->irq.ih.rptr >> 2; + u32 ring_index = ih->rptr >> 2; uint32_t dw[4]; - dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]); - dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]); - dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]); - dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]); + dw[0] = le32_to_cpu(ih->ring[ring_index + 0]); + dw[1] = le32_to_cpu(ih->ring[ring_index + 1]); + dw[2] = le32_to_cpu(ih->ring[ring_index + 2]); + dw[3] = le32_to_cpu(ih->ring[ring_index + 3]); entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY; entry->src_id = dw[0] & 0xff; @@ -256,7 +257,7 @@ static void cik_ih_decode_iv(struct amdgpu_device *adev, entry->pasid = (dw[2] >> 16) & 0xffff; /* wptr/rptr are in bytes! */ - adev->irq.ih.rptr += 16; + ih->rptr += 16; } /** @@ -266,9 +267,10 @@ static void cik_ih_decode_iv(struct amdgpu_device *adev, * * Set the IH ring buffer rptr. */ -static void cik_ih_set_rptr(struct amdgpu_device *adev) +static void cik_ih_set_rptr(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) { - WREG32(mmIH_RB_RPTR, adev->irq.ih.rptr); + WREG32(mmIH_RB_RPTR, ih->rptr); } static int cik_ih_early_init(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 45795191de1f..189599b694e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -220,7 +220,7 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, - bool ctx_switch) + uint32_t flags) { unsigned vmid = AMDGPU_JOB_GET_VMID(job); u32 extra_bits = vmid & 0xf; diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c index 9d3ea298e116..61024b9c7a4b 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c @@ -103,9 +103,9 @@ static void cz_ih_disable_interrupts(struct amdgpu_device *adev) */ static int cz_ih_irq_init(struct amdgpu_device *adev) { - int rb_bufsz; + struct amdgpu_ih_ring *ih = &adev->irq.ih; u32 interrupt_cntl, ih_cntl, ih_rb_cntl; - u64 wptr_off; + int rb_bufsz; /* disable irqs */ cz_ih_disable_interrupts(adev); @@ -133,9 +133,8 @@ static int cz_ih_irq_init(struct amdgpu_device *adev) ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_WRITEBACK_ENABLE, 1); /* set the writeback address whether it's enabled or not */ - wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4); - WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off)); - WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF); + WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(ih->wptr_addr)); + WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(ih->wptr_addr) & 0xFF); WREG32(mmIH_RB_CNTL, ih_rb_cntl); @@ -185,11 +184,12 @@ static void cz_ih_irq_disable(struct amdgpu_device *adev) * Used by cz_irq_process(VI). * Returns the value of the wptr. */ -static u32 cz_ih_get_wptr(struct amdgpu_device *adev) +static u32 cz_ih_get_wptr(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) { u32 wptr, tmp; - wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]); + wptr = le32_to_cpu(*ih->wptr_cpu); if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) { wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); @@ -198,13 +198,13 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev) * this should allow us to catchup. */ dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", - wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask); - adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask; + wptr, ih->rptr, (wptr + 16) & ih->ptr_mask); + ih->rptr = (wptr + 16) & ih->ptr_mask; tmp = RREG32(mmIH_RB_CNTL); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); WREG32(mmIH_RB_CNTL, tmp); } - return (wptr & adev->irq.ih.ptr_mask); + return (wptr & ih->ptr_mask); } /** @@ -216,16 +216,17 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev) * position and also advance the position. */ static void cz_ih_decode_iv(struct amdgpu_device *adev, - struct amdgpu_iv_entry *entry) + struct amdgpu_ih_ring *ih, + struct amdgpu_iv_entry *entry) { /* wptr/rptr are in bytes! */ - u32 ring_index = adev->irq.ih.rptr >> 2; + u32 ring_index = ih->rptr >> 2; uint32_t dw[4]; - dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]); - dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]); - dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]); - dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]); + dw[0] = le32_to_cpu(ih->ring[ring_index + 0]); + dw[1] = le32_to_cpu(ih->ring[ring_index + 1]); + dw[2] = le32_to_cpu(ih->ring[ring_index + 2]); + dw[3] = le32_to_cpu(ih->ring[ring_index + 3]); entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY; entry->src_id = dw[0] & 0xff; @@ -235,7 +236,7 @@ static void cz_ih_decode_iv(struct amdgpu_device *adev, entry->pasid = (dw[2] >> 16) & 0xffff; /* wptr/rptr are in bytes! */ - adev->irq.ih.rptr += 16; + ih->rptr += 16; } /** @@ -245,9 +246,10 @@ static void cz_ih_decode_iv(struct amdgpu_device *adev, * * Set the IH ring buffer rptr. */ -static void cz_ih_set_rptr(struct amdgpu_device *adev) +static void cz_ih_set_rptr(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) { - WREG32(mmIH_RB_RPTR, adev->irq.ih.rptr); + WREG32(mmIH_RB_RPTR, ih->rptr); } static int cz_ih_early_init(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 4cfecdce29a3..1f0426d2fc2a 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -1682,7 +1682,7 @@ static void dce_v10_0_afmt_setmode(struct drm_encoder *encoder, dce_v10_0_audio_write_sad_regs(encoder); dce_v10_0_audio_write_latency_fields(encoder, mode); - err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false); + err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode); if (err < 0) { DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); return; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 7c868916d90f..2280b971d758 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -1724,7 +1724,7 @@ static void dce_v11_0_afmt_setmode(struct drm_encoder *encoder, dce_v11_0_audio_write_sad_regs(encoder); dce_v11_0_audio_write_latency_fields(encoder, mode); - err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false); + err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode); if (err < 0) { DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); return; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index 17eaaba36017..bea32f076b91 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -1423,6 +1423,7 @@ static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder, struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; + struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); struct hdmi_avi_infoframe frame; u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; uint8_t *payload = buffer + 3; @@ -1430,7 +1431,7 @@ static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder, ssize_t err; u32 tmp; - err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false); + err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode); if (err < 0) { DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); return; @@ -2979,7 +2980,7 @@ static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - unsigned long flags; + unsigned long flags; unsigned crtc_id; struct amdgpu_crtc *amdgpu_crtc; struct amdgpu_flip_work *works; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 8c0576978d36..13da915991dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -1616,7 +1616,7 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder, dce_v8_0_audio_write_sad_regs(encoder); dce_v8_0_audio_write_latency_fields(encoder, mode); - err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode, false); + err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode); if (err < 0) { DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); return; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 1dc3013ea1d5..305276c7e4bf 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -1842,13 +1842,13 @@ static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, - bool ctx_switch) + uint32_t flags) { unsigned vmid = AMDGPU_JOB_GET_VMID(job); u32 header, control = 0; /* insert SWITCH_BUFFER packet before first IB in the ring frame */ - if (ctx_switch) { + if (flags & AMDGPU_HAVE_CTX_SWITCH) { amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); amdgpu_ring_write(ring, 0); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 3a9fb6018c16..a59e0fdf5a97 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2228,13 +2228,13 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring, static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, - bool ctx_switch) + uint32_t flags) { unsigned vmid = AMDGPU_JOB_GET_VMID(job); u32 header, control = 0; /* insert SWITCH_BUFFER packet before first IB in the ring frame */ - if (ctx_switch) { + if (flags & AMDGPU_HAVE_CTX_SWITCH) { amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); amdgpu_ring_write(ring, 0); } @@ -2259,11 +2259,27 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, - bool ctx_switch) + uint32_t flags) { unsigned vmid = AMDGPU_JOB_GET_VMID(job); u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); + /* Currently, there is a high possibility to get wave ID mismatch + * between ME and GDS, leading to a hw deadlock, because ME generates + * different wave IDs than the GDS expects. This situation happens + * randomly when at least 5 compute pipes use GDS ordered append. + * The wave IDs generated by ME are also wrong after suspend/resume. + * Those are probably bugs somewhere else in the kernel driver. + * + * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and + * GDS to 0 for this ring (me/pipe). + */ + if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) { + amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); + amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID - PACKET3_SET_CONFIG_REG_START); + amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id); + } + amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); amdgpu_ring_write(ring, #ifdef __BIG_ENDIAN @@ -5000,7 +5016,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { 7 + /* gfx_v7_0_ring_emit_pipeline_sync */ CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v7_0_ring_emit_vm_flush */ 7 + 7 + 7, /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */ - .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_compute */ + .emit_ib_size = 7, /* gfx_v7_0_ring_emit_ib_compute */ .emit_ib = gfx_v7_0_ring_emit_ib_compute, .emit_fence = gfx_v7_0_ring_emit_fence_compute, .emit_pipeline_sync = gfx_v7_0_ring_emit_pipeline_sync, @@ -5057,6 +5073,7 @@ static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev) adev->gds.mem.total_size = RREG32(mmGDS_VMID0_SIZE); adev->gds.gws.total_size = 64; adev->gds.oa.total_size = 16; + adev->gds.gds_compute_max_wave_id = RREG32(mmGDS_COMPUTE_MAX_WAVE_ID); if (adev->gds.mem.total_size == 64 * 1024) { adev->gds.mem.gfx_partition_size = 4096; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 57cb3a51bda7..b8e50a34bdb3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -6047,7 +6047,7 @@ static void gfx_v8_0_ring_emit_vgt_flush(struct amdgpu_ring *ring) static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, - bool ctx_switch) + uint32_t flags) { unsigned vmid = AMDGPU_JOB_GET_VMID(job); u32 header, control = 0; @@ -6079,11 +6079,27 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, - bool ctx_switch) + uint32_t flags) { unsigned vmid = AMDGPU_JOB_GET_VMID(job); u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); + /* Currently, there is a high possibility to get wave ID mismatch + * between ME and GDS, leading to a hw deadlock, because ME generates + * different wave IDs than the GDS expects. This situation happens + * randomly when at least 5 compute pipes use GDS ordered append. + * The wave IDs generated by ME are also wrong after suspend/resume. + * Those are probably bugs somewhere else in the kernel driver. + * + * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and + * GDS to 0 for this ring (me/pipe). + */ + if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) { + amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); + amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID - PACKET3_SET_CONFIG_REG_START); + amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id); + } + amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); amdgpu_ring_write(ring, #ifdef __BIG_ENDIAN @@ -6890,7 +6906,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { 7 + /* gfx_v8_0_ring_emit_pipeline_sync */ VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v8_0_ring_emit_vm_flush */ 7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */ - .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */ + .emit_ib_size = 7, /* gfx_v8_0_ring_emit_ib_compute */ .emit_ib = gfx_v8_0_ring_emit_ib_compute, .emit_fence = gfx_v8_0_ring_emit_fence_compute, .emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync, @@ -6920,7 +6936,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = { 7 + /* gfx_v8_0_ring_emit_pipeline_sync */ 17 + /* gfx_v8_0_ring_emit_vm_flush */ 7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_kiq x3 for user fence, vm fence */ - .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */ + .emit_ib_size = 7, /* gfx_v8_0_ring_emit_ib_compute */ .emit_fence = gfx_v8_0_ring_emit_fence_kiq, .test_ring = gfx_v8_0_ring_test_ring, .insert_nop = amdgpu_ring_insert_nop, @@ -6996,6 +7012,7 @@ static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev) adev->gds.mem.total_size = RREG32(mmGDS_VMID0_SIZE); adev->gds.gws.total_size = 64; adev->gds.oa.total_size = 16; + adev->gds.gds_compute_max_wave_id = RREG32(mmGDS_COMPUTE_MAX_WAVE_ID); if (adev->gds.mem.total_size == 64 * 1024) { adev->gds.mem.gfx_partition_size = 4096; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index fbca0494f871..5533f6e4f4a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -3972,7 +3972,7 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, - bool ctx_switch) + uint32_t flags) { unsigned vmid = AMDGPU_JOB_GET_VMID(job); u32 header, control = 0; @@ -4005,11 +4005,27 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, - bool ctx_switch) + uint32_t flags) { unsigned vmid = AMDGPU_JOB_GET_VMID(job); u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); + /* Currently, there is a high possibility to get wave ID mismatch + * between ME and GDS, leading to a hw deadlock, because ME generates + * different wave IDs than the GDS expects. This situation happens + * randomly when at least 5 compute pipes use GDS ordered append. + * The wave IDs generated by ME are also wrong after suspend/resume. + * Those are probably bugs somewhere else in the kernel driver. + * + * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and + * GDS to 0 for this ring (me/pipe). + */ + if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) { + amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); + amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID); + amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id); + } + amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ amdgpu_ring_write(ring, @@ -4729,7 +4745,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = { SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 2 + /* gfx_v9_0_ring_emit_vm_flush */ 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */ - .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */ + .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */ .emit_ib = gfx_v9_0_ring_emit_ib_compute, .emit_fence = gfx_v9_0_ring_emit_fence, .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync, @@ -4764,7 +4780,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = { SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 2 + /* gfx_v9_0_ring_emit_vm_flush */ 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */ - .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */ + .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */ .emit_fence = gfx_v9_0_ring_emit_fence_kiq, .test_ring = gfx_v9_0_ring_test_ring, .insert_nop = amdgpu_ring_insert_nop, @@ -4846,6 +4862,26 @@ static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev) break; } + switch (adev->asic_type) { + case CHIP_VEGA10: + case CHIP_VEGA20: + adev->gds.gds_compute_max_wave_id = 0x7ff; + break; + case CHIP_VEGA12: + adev->gds.gds_compute_max_wave_id = 0x27f; + break; + case CHIP_RAVEN: + if (adev->rev_id >= 0x8) + adev->gds.gds_compute_max_wave_id = 0x77; /* raven2 */ + else + adev->gds.gds_compute_max_wave_id = 0x15f; /* raven1 */ + break; + default: + /* this really depends on the chip */ + adev->gds.gds_compute_max_wave_id = 0x7ff; + break; + } + adev->gds.gws.total_size = 64; adev->gds.oa.total_size = 16; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 1ad7e6b8ed1d..34440672f938 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -1471,8 +1471,9 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, gmc_v8_0_set_fault_enable_default(adev, false); if (printk_ratelimit()) { - struct amdgpu_task_info task_info = { 0 }; + struct amdgpu_task_info task_info; + memset(&task_info, 0, sizeof(struct amdgpu_task_info)); amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); dev_err(adev->dev, "GPU fault detected: %d 0x%08x for process %s pid %d thread %s pid %d\n", diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index bacdaef77b6c..600259b4e291 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -305,6 +305,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry) { struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src]; + bool retry_fault = !!(entry->src_data[1] & 0x80); uint32_t status = 0; u64 addr; @@ -320,13 +321,16 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, } if (printk_ratelimit()) { - struct amdgpu_task_info task_info = { 0 }; + struct amdgpu_task_info task_info; + memset(&task_info, 0, sizeof(struct amdgpu_task_info)); amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); dev_err(adev->dev, - "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d)\n", + "[%s] %s page fault (src_id:%u ring:%u vmid:%u " + "pasid:%u, for process %s pid %d thread %s pid %d)\n", entry->vmid_src ? "mmhub" : "gfxhub", + retry_fault ? "retry" : "no-retry", entry->src_id, entry->ring_id, entry->vmid, entry->pasid, task_info.process_name, task_info.tgid, task_info.task_name, task_info.pid); @@ -961,7 +965,11 @@ static int gmc_v9_0_sw_init(void *handle) * vm size is 256TB (48bit), maximum size of Vega10, * block size 512 (9bit) */ - amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); + /* sriov restrict max_pfn below AMDGPU_GMC_HOLE */ + if (amdgpu_sriov_vf(adev)) + amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47); + else + amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c index a3984d10b604..b1626e1d2f5d 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c @@ -103,9 +103,9 @@ static void iceland_ih_disable_interrupts(struct amdgpu_device *adev) */ static int iceland_ih_irq_init(struct amdgpu_device *adev) { + struct amdgpu_ih_ring *ih = &adev->irq.ih; int rb_bufsz; u32 interrupt_cntl, ih_cntl, ih_rb_cntl; - u64 wptr_off; /* disable irqs */ iceland_ih_disable_interrupts(adev); @@ -133,9 +133,8 @@ static int iceland_ih_irq_init(struct amdgpu_device *adev) ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_WRITEBACK_ENABLE, 1); /* set the writeback address whether it's enabled or not */ - wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4); - WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off)); - WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF); + WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(ih->wptr_addr)); + WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(ih->wptr_addr) & 0xFF); WREG32(mmIH_RB_CNTL, ih_rb_cntl); @@ -185,11 +184,12 @@ static void iceland_ih_irq_disable(struct amdgpu_device *adev) * Used by cz_irq_process(VI). * Returns the value of the wptr. */ -static u32 iceland_ih_get_wptr(struct amdgpu_device *adev) +static u32 iceland_ih_get_wptr(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) { u32 wptr, tmp; - wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]); + wptr = le32_to_cpu(*ih->wptr_cpu); if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) { wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); @@ -198,13 +198,13 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev) * this should allow us to catchup. */ dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", - wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask); - adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask; + wptr, ih->rptr, (wptr + 16) & ih->ptr_mask); + ih->rptr = (wptr + 16) & ih->ptr_mask; tmp = RREG32(mmIH_RB_CNTL); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); WREG32(mmIH_RB_CNTL, tmp); } - return (wptr & adev->irq.ih.ptr_mask); + return (wptr & ih->ptr_mask); } /** @@ -216,16 +216,17 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev) * position and also advance the position. */ static void iceland_ih_decode_iv(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih, struct amdgpu_iv_entry *entry) { /* wptr/rptr are in bytes! */ - u32 ring_index = adev->irq.ih.rptr >> 2; + u32 ring_index = ih->rptr >> 2; uint32_t dw[4]; - dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]); - dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]); - dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]); - dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]); + dw[0] = le32_to_cpu(ih->ring[ring_index + 0]); + dw[1] = le32_to_cpu(ih->ring[ring_index + 1]); + dw[2] = le32_to_cpu(ih->ring[ring_index + 2]); + dw[3] = le32_to_cpu(ih->ring[ring_index + 3]); entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY; entry->src_id = dw[0] & 0xff; @@ -235,7 +236,7 @@ static void iceland_ih_decode_iv(struct amdgpu_device *adev, entry->pasid = (dw[2] >> 16) & 0xffff; /* wptr/rptr are in bytes! */ - adev->irq.ih.rptr += 16; + ih->rptr += 16; } /** @@ -245,9 +246,10 @@ static void iceland_ih_decode_iv(struct amdgpu_device *adev, * * Set the IH ring buffer rptr. */ -static void iceland_ih_set_rptr(struct amdgpu_device *adev) +static void iceland_ih_set_rptr(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) { - WREG32(mmIH_RB_RPTR, adev->irq.ih.rptr); + WREG32(mmIH_RB_RPTR, ih->rptr); } static int iceland_ih_early_init(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index b11a1c17a7f2..73851ebb3833 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -266,7 +266,8 @@ flr_done: } /* Trigger recovery for world switch failure if no TDR */ - if (amdgpu_device_should_recover_gpu(adev)) + if (amdgpu_device_should_recover_gpu(adev) + && amdgpu_lockup_timeout == MAX_SCHEDULE_TIMEOUT) amdgpu_device_gpu_recover(adev, NULL); } diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c index accdedd63c98..cc967dbfd631 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c @@ -27,13 +27,9 @@ #include "nbio/nbio_6_1_default.h" #include "nbio/nbio_6_1_offset.h" #include "nbio/nbio_6_1_sh_mask.h" +#include "nbio/nbio_6_1_smn.h" #include "vega10_enum.h" -#define smnCPM_CONTROL 0x11180460 -#define smnPCIE_CNTL2 0x11180070 -#define smnPCIE_CONFIG_CNTL 0x11180044 -#define smnPCIE_CI_CNTL 0x11180080 - static u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev) { u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0); @@ -72,7 +68,7 @@ static u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev) } static void nbio_v6_1_sdma_doorbell_range(struct amdgpu_device *adev, int instance, - bool use_doorbell, int doorbell_index) + bool use_doorbell, int doorbell_index, int doorbell_size) { u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) : SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE); @@ -81,7 +77,7 @@ static void nbio_v6_1_sdma_doorbell_range(struct amdgpu_device *adev, int instan if (use_doorbell) { doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index); - doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 2); + doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, doorbell_size); } else doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c index df34dc79d444..1cdb98ad2db3 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c @@ -27,13 +27,11 @@ #include "nbio/nbio_7_0_default.h" #include "nbio/nbio_7_0_offset.h" #include "nbio/nbio_7_0_sh_mask.h" +#include "nbio/nbio_7_0_smn.h" #include "vega10_enum.h" #define smnNBIF_MGCG_CTRL_LCLK 0x1013a05c -#define smnCPM_CONTROL 0x11180460 -#define smnPCIE_CNTL2 0x11180070 - static u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev) { u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0); @@ -69,7 +67,7 @@ static u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev) } static void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instance, - bool use_doorbell, int doorbell_index) + bool use_doorbell, int doorbell_index, int doorbell_size) { u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) : SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE); @@ -78,7 +76,7 @@ static void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instan if (use_doorbell) { doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index); - doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 2); + doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, doorbell_size); } else doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index 4cd31a276dcd..c69d51598cfe 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -26,16 +26,13 @@ #include "nbio/nbio_7_4_offset.h" #include "nbio/nbio_7_4_sh_mask.h" +#include "nbio/nbio_7_4_0_smn.h" #define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c -#define smnCPM_CONTROL 0x11180460 -#define smnPCIE_CNTL2 0x11180070 -#define smnPCIE_CI_CNTL 0x11180080 - static u32 nbio_v7_4_get_rev_id(struct amdgpu_device *adev) { - u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0); + u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0); tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK; tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT; @@ -68,7 +65,7 @@ static u32 nbio_v7_4_get_memsize(struct amdgpu_device *adev) } static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instance, - bool use_doorbell, int doorbell_index) + bool use_doorbell, int doorbell_index, int doorbell_size) { u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) : SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE); @@ -77,7 +74,7 @@ static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instan if (use_doorbell) { doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index); - doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 2); + doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, doorbell_size); } else doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0); @@ -93,7 +90,20 @@ static void nbio_v7_4_enable_doorbell_aperture(struct amdgpu_device *adev, static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev, bool enable) { + u32 tmp = 0; + + if (enable) { + tmp = REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_EN, 1) | + REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) | + REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0); + + WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_LOW, + lower_32_bits(adev->doorbell.base)); + WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_HIGH, + upper_32_bits(adev->doorbell.base)); + } + WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_CNTL, tmp); } static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h index 0de00fbe9233..f3a7d207af07 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h +++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h @@ -191,7 +191,7 @@ enum psp_gfx_fw_type GFX_FW_TYPE_MMSCH = 19, GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM = 20, GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM = 21, - GFX_FW_TYPE_RLC_RESTORE_LIST_CNTL = 22, + GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL = 22, GFX_FW_TYPE_UVD1 = 23, GFX_FW_TYPE_MAX = 24 }; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c index d78b4306a36f..77c2bc344dfc 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c @@ -38,75 +38,6 @@ MODULE_FIRMWARE("amdgpu/raven_asd.bin"); MODULE_FIRMWARE("amdgpu/picasso_asd.bin"); MODULE_FIRMWARE("amdgpu/raven2_asd.bin"); -static int -psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *type) -{ - switch(ucode->ucode_id) { - case AMDGPU_UCODE_ID_SDMA0: - *type = GFX_FW_TYPE_SDMA0; - break; - case AMDGPU_UCODE_ID_SDMA1: - *type = GFX_FW_TYPE_SDMA1; - break; - case AMDGPU_UCODE_ID_CP_CE: - *type = GFX_FW_TYPE_CP_CE; - break; - case AMDGPU_UCODE_ID_CP_PFP: - *type = GFX_FW_TYPE_CP_PFP; - break; - case AMDGPU_UCODE_ID_CP_ME: - *type = GFX_FW_TYPE_CP_ME; - break; - case AMDGPU_UCODE_ID_CP_MEC1: - *type = GFX_FW_TYPE_CP_MEC; - break; - case AMDGPU_UCODE_ID_CP_MEC1_JT: - *type = GFX_FW_TYPE_CP_MEC_ME1; - break; - case AMDGPU_UCODE_ID_CP_MEC2: - *type = GFX_FW_TYPE_CP_MEC; - break; - case AMDGPU_UCODE_ID_CP_MEC2_JT: - *type = GFX_FW_TYPE_CP_MEC_ME2; - break; - case AMDGPU_UCODE_ID_RLC_G: - *type = GFX_FW_TYPE_RLC_G; - break; - case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL: - *type = GFX_FW_TYPE_RLC_RESTORE_LIST_CNTL; - break; - case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM: - *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM; - break; - case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM: - *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM; - break; - case AMDGPU_UCODE_ID_SMC: - *type = GFX_FW_TYPE_SMU; - break; - case AMDGPU_UCODE_ID_UVD: - *type = GFX_FW_TYPE_UVD; - break; - case AMDGPU_UCODE_ID_VCE: - *type = GFX_FW_TYPE_VCE; - break; - case AMDGPU_UCODE_ID_VCN: - *type = GFX_FW_TYPE_VCN; - break; - case AMDGPU_UCODE_ID_DMCU_ERAM: - *type = GFX_FW_TYPE_DMCU_ERAM; - break; - case AMDGPU_UCODE_ID_DMCU_INTV: - *type = GFX_FW_TYPE_DMCU_ISR; - break; - case AMDGPU_UCODE_ID_MAXIMUM: - default: - return -EINVAL; - } - - return 0; -} - static int psp_v10_0_init_microcode(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; @@ -158,26 +89,6 @@ out: return err; } -static int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode, - struct psp_gfx_cmd_resp *cmd) -{ - int ret; - uint64_t fw_mem_mc_addr = ucode->mc_addr; - - memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp)); - - cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; - cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr); - cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr); - cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size; - - ret = psp_v10_0_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type); - if (ret) - DRM_ERROR("Unknown firmware type\n"); - - return ret; -} - static int psp_v10_0_ring_init(struct psp_context *psp, enum psp_ring_type ring_type) { @@ -454,7 +365,6 @@ static int psp_v10_0_mode1_reset(struct psp_context *psp) static const struct psp_funcs psp_v10_0_funcs = { .init_microcode = psp_v10_0_init_microcode, - .prep_cmd_buf = psp_v10_0_prep_cmd_buf, .ring_init = psp_v10_0_ring_init, .ring_create = psp_v10_0_ring_create, .ring_stop = psp_v10_0_ring_stop, diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 0c6e7f9b143f..860b70d80d3c 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -40,60 +40,6 @@ MODULE_FIRMWARE("amdgpu/vega20_ta.bin"); /* address block */ #define smnMP1_FIRMWARE_FLAGS 0x3010024 -static int -psp_v11_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *type) -{ - switch (ucode->ucode_id) { - case AMDGPU_UCODE_ID_SDMA0: - *type = GFX_FW_TYPE_SDMA0; - break; - case AMDGPU_UCODE_ID_SDMA1: - *type = GFX_FW_TYPE_SDMA1; - break; - case AMDGPU_UCODE_ID_CP_CE: - *type = GFX_FW_TYPE_CP_CE; - break; - case AMDGPU_UCODE_ID_CP_PFP: - *type = GFX_FW_TYPE_CP_PFP; - break; - case AMDGPU_UCODE_ID_CP_ME: - *type = GFX_FW_TYPE_CP_ME; - break; - case AMDGPU_UCODE_ID_CP_MEC1: - *type = GFX_FW_TYPE_CP_MEC; - break; - case AMDGPU_UCODE_ID_CP_MEC1_JT: - *type = GFX_FW_TYPE_CP_MEC_ME1; - break; - case AMDGPU_UCODE_ID_CP_MEC2: - *type = GFX_FW_TYPE_CP_MEC; - break; - case AMDGPU_UCODE_ID_CP_MEC2_JT: - *type = GFX_FW_TYPE_CP_MEC_ME2; - break; - case AMDGPU_UCODE_ID_RLC_G: - *type = GFX_FW_TYPE_RLC_G; - break; - case AMDGPU_UCODE_ID_SMC: - *type = GFX_FW_TYPE_SMU; - break; - case AMDGPU_UCODE_ID_UVD: - *type = GFX_FW_TYPE_UVD; - break; - case AMDGPU_UCODE_ID_VCE: - *type = GFX_FW_TYPE_VCE; - break; - case AMDGPU_UCODE_ID_UVD1: - *type = GFX_FW_TYPE_UVD1; - break; - case AMDGPU_UCODE_ID_MAXIMUM: - default: - return -EINVAL; - } - - return 0; -} - static int psp_v11_0_init_microcode(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; @@ -152,18 +98,22 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); - if (err) - goto out2; - - err = amdgpu_ucode_validate(adev->psp.ta_fw); - if (err) - goto out2; - - ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data; - adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version); - adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes); - adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr + - le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); + if (err) { + release_firmware(adev->psp.ta_fw); + adev->psp.ta_fw = NULL; + dev_info(adev->dev, + "psp v11.0: Failed to load firmware \"%s\"\n", fw_name); + } else { + err = amdgpu_ucode_validate(adev->psp.ta_fw); + if (err) + goto out2; + + ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data; + adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version); + adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes); + adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr + + le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); + } return 0; @@ -267,26 +217,6 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp) return ret; } -static int psp_v11_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode, - struct psp_gfx_cmd_resp *cmd) -{ - int ret; - uint64_t fw_mem_mc_addr = ucode->mc_addr; - - memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp)); - - cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; - cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr); - cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr); - cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size; - - ret = psp_v11_0_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type); - if (ret) - DRM_ERROR("Unknown firmware type\n"); - - return ret; -} - static int psp_v11_0_ring_init(struct psp_context *psp, enum psp_ring_type ring_type) { @@ -753,7 +683,6 @@ static const struct psp_funcs psp_v11_0_funcs = { .init_microcode = psp_v11_0_init_microcode, .bootloader_load_sysdrv = psp_v11_0_bootloader_load_sysdrv, .bootloader_load_sos = psp_v11_0_bootloader_load_sos, - .prep_cmd_buf = psp_v11_0_prep_cmd_buf, .ring_init = psp_v11_0_ring_init, .ring_create = psp_v11_0_ring_create, .ring_stop = psp_v11_0_ring_stop, diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index 79694ff16969..c63de945c021 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c @@ -47,57 +47,6 @@ MODULE_FIRMWARE("amdgpu/vega12_asd.bin"); static uint32_t sos_old_versions[] = {1517616, 1510592, 1448594, 1446554}; -static int -psp_v3_1_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *type) -{ - switch(ucode->ucode_id) { - case AMDGPU_UCODE_ID_SDMA0: - *type = GFX_FW_TYPE_SDMA0; - break; - case AMDGPU_UCODE_ID_SDMA1: - *type = GFX_FW_TYPE_SDMA1; - break; - case AMDGPU_UCODE_ID_CP_CE: - *type = GFX_FW_TYPE_CP_CE; - break; - case AMDGPU_UCODE_ID_CP_PFP: - *type = GFX_FW_TYPE_CP_PFP; - break; - case AMDGPU_UCODE_ID_CP_ME: - *type = GFX_FW_TYPE_CP_ME; - break; - case AMDGPU_UCODE_ID_CP_MEC1: - *type = GFX_FW_TYPE_CP_MEC; - break; - case AMDGPU_UCODE_ID_CP_MEC1_JT: - *type = GFX_FW_TYPE_CP_MEC_ME1; - break; - case AMDGPU_UCODE_ID_CP_MEC2: - *type = GFX_FW_TYPE_CP_MEC; - break; - case AMDGPU_UCODE_ID_CP_MEC2_JT: - *type = GFX_FW_TYPE_CP_MEC_ME2; - break; - case AMDGPU_UCODE_ID_RLC_G: - *type = GFX_FW_TYPE_RLC_G; - break; - case AMDGPU_UCODE_ID_SMC: - *type = GFX_FW_TYPE_SMU; - break; - case AMDGPU_UCODE_ID_UVD: - *type = GFX_FW_TYPE_UVD; - break; - case AMDGPU_UCODE_ID_VCE: - *type = GFX_FW_TYPE_VCE; - break; - case AMDGPU_UCODE_ID_MAXIMUM: - default: - return -EINVAL; - } - - return 0; -} - static int psp_v3_1_init_microcode(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; @@ -277,26 +226,6 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp) return ret; } -static int psp_v3_1_prep_cmd_buf(struct amdgpu_firmware_info *ucode, - struct psp_gfx_cmd_resp *cmd) -{ - int ret; - uint64_t fw_mem_mc_addr = ucode->mc_addr; - - memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp)); - - cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW; - cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr); - cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr); - cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size; - - ret = psp_v3_1_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type); - if (ret) - DRM_ERROR("Unknown firmware type\n"); - - return ret; -} - static int psp_v3_1_ring_init(struct psp_context *psp, enum psp_ring_type ring_type) { @@ -615,7 +544,6 @@ static const struct psp_funcs psp_v3_1_funcs = { .init_microcode = psp_v3_1_init_microcode, .bootloader_load_sysdrv = psp_v3_1_bootloader_load_sysdrv, .bootloader_load_sos = psp_v3_1_bootloader_load_sos, - .prep_cmd_buf = psp_v3_1_prep_cmd_buf, .ring_init = psp_v3_1_ring_init, .ring_create = psp_v3_1_ring_create, .ring_stop = psp_v3_1_ring_stop, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 9f3cb2aec7c2..cca3552b36ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -247,7 +247,7 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, - bool ctx_switch) + uint32_t flags) { unsigned vmid = AMDGPU_JOB_GET_VMID(job); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 1bccc5fe2d9d..0ce8331baeb2 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -421,7 +421,7 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, - bool ctx_switch) + uint32_t flags) { unsigned vmid = AMDGPU_JOB_GET_VMID(job); @@ -1145,8 +1145,7 @@ static int sdma_v3_0_sw_init(void *handle) ring->ring_obj = NULL; if (!amdgpu_sriov_vf(adev)) { ring->use_doorbell = true; - ring->doorbell_index = (i == 0) ? - adev->doorbell_index.sdma_engine0 : adev->doorbell_index.sdma_engine1; + ring->doorbell_index = adev->doorbell_index.sdma_engine[i]; } else { ring->use_pollmem = true; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 6811a5d05b27..c816e55d43a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -128,7 +128,7 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2_init[] = { static const struct soc15_reg_golden golden_settings_sdma0_4_2[] = { - SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), @@ -158,7 +158,7 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2[] = }; static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = { - SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), @@ -500,7 +500,7 @@ static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, - bool ctx_switch) + uint32_t flags) { unsigned vmid = AMDGPU_JOB_GET_VMID(job); @@ -834,8 +834,6 @@ static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i) OFFSET, ring->doorbell_index); WREG32_SDMA(i, mmSDMA0_GFX_DOORBELL, doorbell); WREG32_SDMA(i, mmSDMA0_GFX_DOORBELL_OFFSET, doorbell_offset); - adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, - ring->doorbell_index); sdma_v4_0_ring_set_wptr(ring); @@ -1522,9 +1520,7 @@ static int sdma_v4_0_sw_init(void *handle) ring->use_doorbell?"true":"false"); /* doorbell size is 2 dwords, get DWORD offset */ - ring->doorbell_index = (i == 0) ? - (adev->doorbell_index.sdma_engine0 << 1) - : (adev->doorbell_index.sdma_engine1 << 1); + ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1; sprintf(ring->name, "sdma%d", i); r = amdgpu_ring_init(adev, ring, 1024, @@ -1543,9 +1539,7 @@ static int sdma_v4_0_sw_init(void *handle) /* paging queue use same doorbell index/routing as gfx queue * with 0x400 (4096 dwords) offset on second doorbell page */ - ring->doorbell_index = (i == 0) ? - (adev->doorbell_index.sdma_engine0 << 1) - : (adev->doorbell_index.sdma_engine1 << 1); + ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1; ring->doorbell_index += 0x400; sprintf(ring->name, "page%d", i); diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index f8408f88cd37..9d8df68893b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -47,6 +47,7 @@ #include "dce/dce_6_0_d.h" #include "uvd/uvd_4_0_d.h" #include "bif/bif_3_0_d.h" +#include "bif/bif_3_0_sh_mask.h" static const u32 tahiti_golden_registers[] = { @@ -1258,6 +1259,11 @@ static bool si_need_full_reset(struct amdgpu_device *adev) return true; } +static bool si_need_reset_on_init(struct amdgpu_device *adev) +{ + return false; +} + static int si_get_pcie_lanes(struct amdgpu_device *adev) { u32 link_width_cntl; @@ -1323,6 +1329,52 @@ static void si_set_pcie_lanes(struct amdgpu_device *adev, int lanes) WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); } +static void si_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, + uint64_t *count1) +{ + uint32_t perfctr = 0; + uint64_t cnt0_of, cnt1_of; + int tmp; + + /* This reports 0 on APUs, so return to avoid writing/reading registers + * that may or may not be different from their GPU counterparts + */ + if (adev->flags & AMD_IS_APU) + return; + + /* Set the 2 events that we wish to watch, defined above */ + /* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */ + perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40); + perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104); + + /* Write to enable desired perf counters */ + WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK, perfctr); + /* Zero out and enable the perf counters + * Write 0x5: + * Bit 0 = Start all counters(1) + * Bit 2 = Global counter reset enable(1) + */ + WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000005); + + msleep(1000); + + /* Load the shadow and disable the perf counters + * Write 0x2: + * Bit 0 = Stop counters(0) + * Bit 1 = Load the shadow counters(1) + */ + WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000002); + + /* Read register values to get any >32bit overflow */ + tmp = RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK); + cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER); + cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER); + + /* Get the values and add the overflow */ + *count0 = RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32); + *count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32); +} + static const struct amdgpu_asic_funcs si_asic_funcs = { .read_disabled_bios = &si_read_disabled_bios, @@ -1339,6 +1391,8 @@ static const struct amdgpu_asic_funcs si_asic_funcs = .flush_hdp = &si_flush_hdp, .invalidate_hdp = &si_invalidate_hdp, .need_full_reset = &si_need_full_reset, + .get_pcie_usage = &si_get_pcie_usage, + .need_reset_on_init = &si_need_reset_on_init, }; static uint32_t si_get_rev_id(struct amdgpu_device *adev) @@ -1382,7 +1436,7 @@ static int si_common_early_init(void *handle) AMD_CG_SUPPORT_UVD_MGCG | AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG; - adev->pg_flags = 0; + adev->pg_flags = 0; adev->external_rev_id = (adev->rev_id == 0) ? 1 : (adev->rev_id == 1) ? 5 : 6; break; diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index b6e473134e19..f15f196684ba 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c @@ -63,7 +63,7 @@ static void si_dma_ring_set_wptr(struct amdgpu_ring *ring) static void si_dma_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, - bool ctx_switch) + uint32_t flags) { unsigned vmid = AMDGPU_JOB_GET_VMID(job); /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index da58040fdbdc..41e01a7f57a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c @@ -6216,10 +6216,12 @@ static void si_request_link_speed_change_before_state_change(struct amdgpu_devic si_pi->force_pcie_gen = AMDGPU_PCIE_GEN2; if (current_link_speed == AMDGPU_PCIE_GEN2) break; + /* fall through */ case AMDGPU_PCIE_GEN2: if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0) break; #endif + /* fall through */ default: si_pi->force_pcie_gen = si_get_current_pcie_speed(adev); break; diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c index 2938fb9f17cc..8c50c9cab455 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c @@ -57,9 +57,9 @@ static void si_ih_disable_interrupts(struct amdgpu_device *adev) static int si_ih_irq_init(struct amdgpu_device *adev) { + struct amdgpu_ih_ring *ih = &adev->irq.ih; int rb_bufsz; u32 interrupt_cntl, ih_cntl, ih_rb_cntl; - u64 wptr_off; si_ih_disable_interrupts(adev); WREG32(INTERRUPT_CNTL2, adev->irq.ih.gpu_addr >> 8); @@ -76,9 +76,8 @@ static int si_ih_irq_init(struct amdgpu_device *adev) (rb_bufsz << 1) | IH_WPTR_WRITEBACK_ENABLE; - wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4); - WREG32(IH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off)); - WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF); + WREG32(IH_RB_WPTR_ADDR_LO, lower_32_bits(ih->wptr_addr)); + WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(ih->wptr_addr) & 0xFF); WREG32(IH_RB_CNTL, ih_rb_cntl); WREG32(IH_RB_RPTR, 0); WREG32(IH_RB_WPTR, 0); @@ -100,34 +99,36 @@ static void si_ih_irq_disable(struct amdgpu_device *adev) mdelay(1); } -static u32 si_ih_get_wptr(struct amdgpu_device *adev) +static u32 si_ih_get_wptr(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) { u32 wptr, tmp; - wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]); + wptr = le32_to_cpu(*ih->wptr_cpu); if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) { wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK; dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", - wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask); - adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask; + wptr, ih->rptr, (wptr + 16) & ih->ptr_mask); + ih->rptr = (wptr + 16) & ih->ptr_mask; tmp = RREG32(IH_RB_CNTL); tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK; WREG32(IH_RB_CNTL, tmp); } - return (wptr & adev->irq.ih.ptr_mask); + return (wptr & ih->ptr_mask); } static void si_ih_decode_iv(struct amdgpu_device *adev, - struct amdgpu_iv_entry *entry) + struct amdgpu_ih_ring *ih, + struct amdgpu_iv_entry *entry) { - u32 ring_index = adev->irq.ih.rptr >> 2; + u32 ring_index = ih->rptr >> 2; uint32_t dw[4]; - dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]); - dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]); - dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]); - dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]); + dw[0] = le32_to_cpu(ih->ring[ring_index + 0]); + dw[1] = le32_to_cpu(ih->ring[ring_index + 1]); + dw[2] = le32_to_cpu(ih->ring[ring_index + 2]); + dw[3] = le32_to_cpu(ih->ring[ring_index + 3]); entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY; entry->src_id = dw[0] & 0xff; @@ -135,12 +136,13 @@ static void si_ih_decode_iv(struct amdgpu_device *adev, entry->ring_id = dw[2] & 0xff; entry->vmid = (dw[2] >> 8) & 0xff; - adev->irq.ih.rptr += 16; + ih->rptr += 16; } -static void si_ih_set_rptr(struct amdgpu_device *adev) +static void si_ih_set_rptr(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) { - WREG32(IH_RB_RPTR, adev->irq.ih.rptr); + WREG32(IH_RB_RPTR, ih->rptr); } static int si_ih_early_init(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 8849b74078d6..99ebcf29dcb0 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -43,6 +43,10 @@ #include "hdp/hdp_4_0_sh_mask.h" #include "smuio/smuio_9_0_offset.h" #include "smuio/smuio_9_0_sh_mask.h" +#include "nbio/nbio_7_0_default.h" +#include "nbio/nbio_7_0_sh_mask.h" +#include "nbio/nbio_7_0_smn.h" +#include "mp/mp_9_0_offset.h" #include "soc15.h" #include "soc15_common.h" @@ -385,14 +389,13 @@ void soc15_program_register_sequence(struct amdgpu_device *adev, } - -static int soc15_asic_reset(struct amdgpu_device *adev) +static int soc15_asic_mode1_reset(struct amdgpu_device *adev) { u32 i; amdgpu_atombios_scratch_regs_engine_hung(adev, true); - dev_info(adev->dev, "GPU reset\n"); + dev_info(adev->dev, "GPU mode1 reset\n"); /* disable BM */ pci_clear_master(adev->pdev); @@ -417,6 +420,63 @@ static int soc15_asic_reset(struct amdgpu_device *adev) return 0; } +static int soc15_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap) +{ + void *pp_handle = adev->powerplay.pp_handle; + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs || !pp_funcs->get_asic_baco_capability) { + *cap = false; + return -ENOENT; + } + + return pp_funcs->get_asic_baco_capability(pp_handle, cap); +} + +static int soc15_asic_baco_reset(struct amdgpu_device *adev) +{ + void *pp_handle = adev->powerplay.pp_handle; + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state) + return -ENOENT; + + /* enter BACO state */ + if (pp_funcs->set_asic_baco_state(pp_handle, 1)) + return -EIO; + + /* exit BACO state */ + if (pp_funcs->set_asic_baco_state(pp_handle, 0)) + return -EIO; + + dev_info(adev->dev, "GPU BACO reset\n"); + + return 0; +} + +static int soc15_asic_reset(struct amdgpu_device *adev) +{ + int ret; + bool baco_reset; + + switch (adev->asic_type) { + case CHIP_VEGA10: + case CHIP_VEGA20: + soc15_asic_get_baco_capability(adev, &baco_reset); + break; + default: + baco_reset = false; + break; + } + + if (baco_reset) + ret = soc15_asic_baco_reset(adev); + else + ret = soc15_asic_mode1_reset(adev); + + return ret; +} + /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock, u32 cntl_reg, u32 status_reg) { @@ -535,10 +595,12 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); - if (adev->asic_type == CHIP_VEGA20) - amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); - else - amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); + if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { + if (adev->asic_type == CHIP_VEGA20) + amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); + else + amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); + } amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); if (!amdgpu_sriov_vf(adev)) @@ -560,7 +622,8 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); - amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); + if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) + amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); @@ -601,6 +664,68 @@ static bool soc15_need_full_reset(struct amdgpu_device *adev) /* change this when we implement soft reset */ return true; } +static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, + uint64_t *count1) +{ + uint32_t perfctr = 0; + uint64_t cnt0_of, cnt1_of; + int tmp; + + /* This reports 0 on APUs, so return to avoid writing/reading registers + * that may or may not be different from their GPU counterparts + */ + if (adev->flags & AMD_IS_APU) + return; + + /* Set the 2 events that we wish to watch, defined above */ + /* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */ + perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40); + perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104); + + /* Write to enable desired perf counters */ + WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK, perfctr); + /* Zero out and enable the perf counters + * Write 0x5: + * Bit 0 = Start all counters(1) + * Bit 2 = Global counter reset enable(1) + */ + WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005); + + msleep(1000); + + /* Load the shadow and disable the perf counters + * Write 0x2: + * Bit 0 = Stop counters(0) + * Bit 1 = Load the shadow counters(1) + */ + WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002); + + /* Read register values to get any >32bit overflow */ + tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK); + cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER); + cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER); + + /* Get the values and add the overflow */ + *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32); + *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32); +} + +static bool soc15_need_reset_on_init(struct amdgpu_device *adev) +{ + u32 sol_reg; + + if (adev->flags & AMD_IS_APU) + return false; + + /* Check sOS sign of life register to confirm sys driver and sOS + * are already been loaded. + */ + sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); + if (sol_reg) + return true; + + return false; +} static const struct amdgpu_asic_funcs soc15_asic_funcs = { @@ -617,6 +742,8 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs = .invalidate_hdp = &soc15_invalidate_hdp, .need_full_reset = &soc15_need_full_reset, .init_doorbell_index = &vega10_doorbell_index_init, + .get_pcie_usage = &soc15_get_pcie_usage, + .need_reset_on_init = &soc15_need_reset_on_init, }; static const struct amdgpu_asic_funcs vega20_asic_funcs = @@ -634,6 +761,8 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs = .invalidate_hdp = &soc15_invalidate_hdp, .need_full_reset = &soc15_need_full_reset, .init_doorbell_index = &vega20_doorbell_index_init, + .get_pcie_usage = &soc15_get_pcie_usage, + .need_reset_on_init = &soc15_need_reset_on_init, }; static int soc15_common_early_init(void *handle) @@ -729,11 +858,13 @@ static int soc15_common_early_init(void *handle) case CHIP_RAVEN: adev->asic_funcs = &soc15_asic_funcs; if (adev->rev_id >= 0x8) - adev->external_rev_id = adev->rev_id + 0x81; + adev->external_rev_id = adev->rev_id + 0x79; else if (adev->pdev->device == 0x15d8) adev->external_rev_id = adev->rev_id + 0x41; + else if (adev->rev_id == 1) + adev->external_rev_id = adev->rev_id + 0x20; else - adev->external_rev_id = 0x1; + adev->external_rev_id = adev->rev_id + 0x01; if (adev->rev_id >= 0x8) { adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | @@ -840,6 +971,22 @@ static int soc15_common_sw_fini(void *handle) return 0; } +static void soc15_doorbell_range_init(struct amdgpu_device *adev) +{ + int i; + struct amdgpu_ring *ring; + + for (i = 0; i < adev->sdma.num_instances; i++) { + ring = &adev->sdma.instance[i].ring; + adev->nbio_funcs->sdma_doorbell_range(adev, i, + ring->use_doorbell, ring->doorbell_index, + adev->doorbell_index.sdma_doorbell_range); + } + + adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell, + adev->irq.ih.doorbell_index); +} + static int soc15_common_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -852,6 +999,12 @@ static int soc15_common_hw_init(void *handle) adev->nbio_funcs->init_registers(adev); /* enable the doorbell aperture */ soc15_enable_doorbell_aperture(adev, true); + /* HW doorbell routing policy: doorbell writing not + * in SDMA/IH/MM/ACV range will be routed to CP. So + * we need to init SDMA/IH/MM/ACV doorbell range prior + * to CP ip block init and ring test. + */ + soc15_doorbell_range_init(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c index 15da06ddeb75..a20b711a6756 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c @@ -99,9 +99,9 @@ static void tonga_ih_disable_interrupts(struct amdgpu_device *adev) */ static int tonga_ih_irq_init(struct amdgpu_device *adev) { - int rb_bufsz; u32 interrupt_cntl, ih_rb_cntl, ih_doorbell_rtpr; - u64 wptr_off; + struct amdgpu_ih_ring *ih = &adev->irq.ih; + int rb_bufsz; /* disable irqs */ tonga_ih_disable_interrupts(adev); @@ -118,10 +118,7 @@ static int tonga_ih_irq_init(struct amdgpu_device *adev) WREG32(mmINTERRUPT_CNTL, interrupt_cntl); /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/ - if (adev->irq.ih.use_bus_addr) - WREG32(mmIH_RB_BASE, adev->irq.ih.rb_dma_addr >> 8); - else - WREG32(mmIH_RB_BASE, adev->irq.ih.gpu_addr >> 8); + WREG32(mmIH_RB_BASE, ih->gpu_addr >> 8); rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4); ih_rb_cntl = REG_SET_FIELD(0, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); @@ -136,12 +133,8 @@ static int tonga_ih_irq_init(struct amdgpu_device *adev) WREG32(mmIH_RB_CNTL, ih_rb_cntl); /* set the writeback address whether it's enabled or not */ - if (adev->irq.ih.use_bus_addr) - wptr_off = adev->irq.ih.rb_dma_addr + (adev->irq.ih.wptr_offs * 4); - else - wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4); - WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off)); - WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF); + WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(ih->wptr_addr)); + WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(ih->wptr_addr) & 0xFF); /* set rptr, wptr to 0 */ WREG32(mmIH_RB_RPTR, 0); @@ -193,14 +186,12 @@ static void tonga_ih_irq_disable(struct amdgpu_device *adev) * Used by cz_irq_process(VI). * Returns the value of the wptr. */ -static u32 tonga_ih_get_wptr(struct amdgpu_device *adev) +static u32 tonga_ih_get_wptr(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) { u32 wptr, tmp; - if (adev->irq.ih.use_bus_addr) - wptr = le32_to_cpu(adev->irq.ih.ring[adev->irq.ih.wptr_offs]); - else - wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]); + wptr = le32_to_cpu(*ih->wptr_cpu); if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) { wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); @@ -209,13 +200,13 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev) * this should allow us to catchup. */ dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", - wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask); - adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask; + wptr, ih->rptr, (wptr + 16) & ih->ptr_mask); + ih->rptr = (wptr + 16) & ih->ptr_mask; tmp = RREG32(mmIH_RB_CNTL); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); WREG32(mmIH_RB_CNTL, tmp); } - return (wptr & adev->irq.ih.ptr_mask); + return (wptr & ih->ptr_mask); } /** @@ -227,16 +218,17 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev) * position and also advance the position. */ static void tonga_ih_decode_iv(struct amdgpu_device *adev, - struct amdgpu_iv_entry *entry) + struct amdgpu_ih_ring *ih, + struct amdgpu_iv_entry *entry) { /* wptr/rptr are in bytes! */ - u32 ring_index = adev->irq.ih.rptr >> 2; + u32 ring_index = ih->rptr >> 2; uint32_t dw[4]; - dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]); - dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]); - dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]); - dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]); + dw[0] = le32_to_cpu(ih->ring[ring_index + 0]); + dw[1] = le32_to_cpu(ih->ring[ring_index + 1]); + dw[2] = le32_to_cpu(ih->ring[ring_index + 2]); + dw[3] = le32_to_cpu(ih->ring[ring_index + 3]); entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY; entry->src_id = dw[0] & 0xff; @@ -246,7 +238,7 @@ static void tonga_ih_decode_iv(struct amdgpu_device *adev, entry->pasid = (dw[2] >> 16) & 0xffff; /* wptr/rptr are in bytes! */ - adev->irq.ih.rptr += 16; + ih->rptr += 16; } /** @@ -256,17 +248,15 @@ static void tonga_ih_decode_iv(struct amdgpu_device *adev, * * Set the IH ring buffer rptr. */ -static void tonga_ih_set_rptr(struct amdgpu_device *adev) +static void tonga_ih_set_rptr(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) { - if (adev->irq.ih.use_doorbell) { + if (ih->use_doorbell) { /* XXX check if swapping is necessary on BE */ - if (adev->irq.ih.use_bus_addr) - adev->irq.ih.ring[adev->irq.ih.rptr_offs] = adev->irq.ih.rptr; - else - adev->wb.wb[adev->irq.ih.rptr_offs] = adev->irq.ih.rptr; - WDOORBELL32(adev->irq.ih.doorbell_index, adev->irq.ih.rptr); + *ih->rptr_cpu = ih->rptr; + WDOORBELL32(ih->doorbell_index, ih->rptr); } else { - WREG32(mmIH_RB_RPTR, adev->irq.ih.rptr); + WREG32(mmIH_RB_RPTR, ih->rptr); } } diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index d69c8f6daaf8..c4fb58667fd4 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -511,7 +511,7 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring) static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, - bool ctx_switch) + uint32_t flags) { amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0)); amdgpu_ring_write(ring, ib->gpu_addr); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index ee8cd06ddc38..52bd8a654734 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -526,7 +526,7 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring) static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, - bool ctx_switch) + uint32_t flags) { amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index d4f4a66f8324..c9edddf9f88a 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -977,7 +977,7 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring) static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, - bool ctx_switch) + uint32_t flags) { unsigned vmid = AMDGPU_JOB_GET_VMID(job); @@ -1003,7 +1003,7 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, - bool ctx_switch) + uint32_t flags) { unsigned vmid = AMDGPU_JOB_GET_VMID(job); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index aef924026a28..dc461df48da0 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -1272,7 +1272,7 @@ static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, - bool ctx_switch) + uint32_t flags) { struct amdgpu_device *adev = ring->adev; unsigned vmid = AMDGPU_JOB_GET_VMID(job); @@ -1303,7 +1303,7 @@ static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring, static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, - bool ctx_switch) + uint32_t flags) { unsigned vmid = AMDGPU_JOB_GET_VMID(job); diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 2668effadd27..6ec65cf11112 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -834,7 +834,7 @@ out: static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, - bool ctx_switch) + uint32_t flags) { unsigned vmid = AMDGPU_JOB_GET_VMID(job); diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index 9fb34b7d8e03..aadc3e66ebd7 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -947,7 +947,7 @@ static int vce_v4_0_set_powergating_state(void *handle, #endif static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, - struct amdgpu_ib *ib, bool ctx_switch) + struct amdgpu_ib *ib, uint32_t flags) { unsigned vmid = AMDGPU_JOB_GET_VMID(job); diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 89bb2fef90eb..3dbc51f9d3b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -1371,7 +1371,7 @@ static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, - bool ctx_switch) + uint32_t flags) { struct amdgpu_device *adev = ring->adev; unsigned vmid = AMDGPU_JOB_GET_VMID(job); @@ -1531,7 +1531,7 @@ static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring) static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, - bool ctx_switch) + uint32_t flags) { unsigned vmid = AMDGPU_JOB_GET_VMID(job); @@ -1736,7 +1736,7 @@ static void vcn_v1_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u6 static void vcn_v1_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, - bool ctx_switch) + uint32_t flags) { struct amdgpu_device *adev = ring->adev; unsigned vmid = AMDGPU_JOB_GET_VMID(job); diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index 2c250b01a903..6d1f804277f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c @@ -50,6 +50,22 @@ static void vega10_ih_enable_interrupts(struct amdgpu_device *adev) ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1); WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl); adev->irq.ih.enabled = true; + + if (adev->irq.ih1.ring_size) { + ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1, + RB_ENABLE, 1); + WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl); + adev->irq.ih1.enabled = true; + } + + if (adev->irq.ih2.ring_size) { + ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2, + RB_ENABLE, 1); + WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl); + adev->irq.ih2.enabled = true; + } } /** @@ -71,6 +87,53 @@ static void vega10_ih_disable_interrupts(struct amdgpu_device *adev) WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0); adev->irq.ih.enabled = false; adev->irq.ih.rptr = 0; + + if (adev->irq.ih1.ring_size) { + ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1, + RB_ENABLE, 0); + WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl); + /* set rptr, wptr to 0 */ + WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0); + WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0); + adev->irq.ih1.enabled = false; + adev->irq.ih1.rptr = 0; + } + + if (adev->irq.ih2.ring_size) { + ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2, + RB_ENABLE, 0); + WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl); + /* set rptr, wptr to 0 */ + WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0); + WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0); + adev->irq.ih2.enabled = false; + adev->irq.ih2.rptr = 0; + } +} + +static uint32_t vega10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl) +{ + int rb_bufsz = order_base_2(ih->ring_size / 4); + + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, + MC_SPACE, ih->use_bus_addr ? 1 : 4); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, + WPTR_OVERFLOW_CLEAR, 1); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, + WPTR_OVERFLOW_ENABLE, 1); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz); + /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register + * value is written to memory + */ + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, + WPTR_WRITEBACK_ENABLE, 1); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0); + + return ih_rb_cntl; } /** @@ -86,50 +149,32 @@ static void vega10_ih_disable_interrupts(struct amdgpu_device *adev) */ static int vega10_ih_irq_init(struct amdgpu_device *adev) { + struct amdgpu_ih_ring *ih; int ret = 0; - int rb_bufsz; u32 ih_rb_cntl, ih_doorbell_rtpr; u32 tmp; - u64 wptr_off; /* disable irqs */ vega10_ih_disable_interrupts(adev); adev->nbio_funcs->ih_control(adev); - ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL); + ih = &adev->irq.ih; /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/ - if (adev->irq.ih.use_bus_addr) { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, adev->irq.ih.rb_dma_addr >> 8); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, ((u64)adev->irq.ih.rb_dma_addr >> 40) & 0xff); - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SPACE, 1); - } else { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, adev->irq.ih.gpu_addr >> 8); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, (adev->irq.ih.gpu_addr >> 40) & 0xff); - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SPACE, 4); - } - rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4); - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 1); - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz); - /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register value is written to memory */ - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_WRITEBACK_ENABLE, 1); - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1); - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0); - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0); - - if (adev->irq.msi_enabled) - ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM, 1); + WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, ih->gpu_addr >> 8); + WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, (ih->gpu_addr >> 40) & 0xff); + ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL); + ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl); + ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM, + !!adev->irq.msi_enabled); WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl); /* set the writeback address whether it's enabled or not */ - if (adev->irq.ih.use_bus_addr) - wptr_off = adev->irq.ih.rb_dma_addr + (adev->irq.ih.wptr_offs * 4); - else - wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off)); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFFFF); + WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO, + lower_32_bits(ih->wptr_addr)); + WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI, + upper_32_bits(ih->wptr_addr) & 0xFFFF); /* set rptr, wptr to 0 */ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0); @@ -137,17 +182,48 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev) ih_doorbell_rtpr = RREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR); if (adev->irq.ih.use_doorbell) { - ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR, - OFFSET, adev->irq.ih.doorbell_index); - ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR, + ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, + IH_DOORBELL_RPTR, OFFSET, + adev->irq.ih.doorbell_index); + ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, + IH_DOORBELL_RPTR, ENABLE, 1); } else { - ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR, + ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, + IH_DOORBELL_RPTR, ENABLE, 0); } WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr); - adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell, - adev->irq.ih.doorbell_index); + + ih = &adev->irq.ih1; + if (ih->ring_size) { + WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING1, ih->gpu_addr >> 8); + WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING1, + (ih->gpu_addr >> 40) & 0xff); + + ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1); + ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl); + WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl); + + /* set rptr, wptr to 0 */ + WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0); + WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0); + } + + ih = &adev->irq.ih2; + if (ih->ring_size) { + WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING2, ih->gpu_addr >> 8); + WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING2, + (ih->gpu_addr >> 40) & 0xff); + + ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1); + ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl); + WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl); + + /* set rptr, wptr to 0 */ + WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0); + WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0); + } tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL); tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL, @@ -191,32 +267,58 @@ static void vega10_ih_irq_disable(struct amdgpu_device *adev) * ring buffer overflow and deal with it. * Returns the value of the wptr. */ -static u32 vega10_ih_get_wptr(struct amdgpu_device *adev) +static u32 vega10_ih_get_wptr(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) { - u32 wptr, tmp; + u32 wptr, reg, tmp; - if (adev->irq.ih.use_bus_addr) - wptr = le32_to_cpu(adev->irq.ih.ring[adev->irq.ih.wptr_offs]); + wptr = le32_to_cpu(*ih->wptr_cpu); + + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) + goto out; + + /* Double check that the overflow wasn't already cleared. */ + + if (ih == &adev->irq.ih) + reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR); + else if (ih == &adev->irq.ih1) + reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1); + else if (ih == &adev->irq.ih2) + reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2); else - wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]); - - if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) { - wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); - - /* When a ring buffer overflow happen start parsing interrupt - * from the last not overwritten vector (wptr + 32). Hopefully - * this should allow us to catchup. - */ - tmp = (wptr + 32) & adev->irq.ih.ptr_mask; - dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", - wptr, adev->irq.ih.rptr, tmp); - adev->irq.ih.rptr = tmp; - - tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL)); - tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); - WREG32_NO_KIQ(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL), tmp); - } - return (wptr & adev->irq.ih.ptr_mask); + BUG(); + + wptr = RREG32_NO_KIQ(reg); + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) + goto out; + + wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); + + /* When a ring buffer overflow happen start parsing interrupt + * from the last not overwritten vector (wptr + 32). Hopefully + * this should allow us to catchup. + */ + tmp = (wptr + 32) & ih->ptr_mask; + dev_warn(adev->dev, "IH ring buffer overflow " + "(0x%08X, 0x%08X, 0x%08X)\n", + wptr, ih->rptr, tmp); + ih->rptr = tmp; + + if (ih == &adev->irq.ih) + reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL); + else if (ih == &adev->irq.ih1) + reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1); + else if (ih == &adev->irq.ih2) + reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2); + else + BUG(); + + tmp = RREG32_NO_KIQ(reg); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); + WREG32_NO_KIQ(reg, tmp); + +out: + return (wptr & ih->ptr_mask); } /** @@ -228,20 +330,21 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev) * position and also advance the position. */ static void vega10_ih_decode_iv(struct amdgpu_device *adev, - struct amdgpu_iv_entry *entry) + struct amdgpu_ih_ring *ih, + struct amdgpu_iv_entry *entry) { /* wptr/rptr are in bytes! */ - u32 ring_index = adev->irq.ih.rptr >> 2; + u32 ring_index = ih->rptr >> 2; uint32_t dw[8]; - dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]); - dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]); - dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]); - dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]); - dw[4] = le32_to_cpu(adev->irq.ih.ring[ring_index + 4]); - dw[5] = le32_to_cpu(adev->irq.ih.ring[ring_index + 5]); - dw[6] = le32_to_cpu(adev->irq.ih.ring[ring_index + 6]); - dw[7] = le32_to_cpu(adev->irq.ih.ring[ring_index + 7]); + dw[0] = le32_to_cpu(ih->ring[ring_index + 0]); + dw[1] = le32_to_cpu(ih->ring[ring_index + 1]); + dw[2] = le32_to_cpu(ih->ring[ring_index + 2]); + dw[3] = le32_to_cpu(ih->ring[ring_index + 3]); + dw[4] = le32_to_cpu(ih->ring[ring_index + 4]); + dw[5] = le32_to_cpu(ih->ring[ring_index + 5]); + dw[6] = le32_to_cpu(ih->ring[ring_index + 6]); + dw[7] = le32_to_cpu(ih->ring[ring_index + 7]); entry->client_id = dw[0] & 0xff; entry->src_id = (dw[0] >> 8) & 0xff; @@ -257,9 +360,8 @@ static void vega10_ih_decode_iv(struct amdgpu_device *adev, entry->src_data[2] = dw[6]; entry->src_data[3] = dw[7]; - /* wptr/rptr are in bytes! */ - adev->irq.ih.rptr += 32; + ih->rptr += 32; } /** @@ -269,37 +371,95 @@ static void vega10_ih_decode_iv(struct amdgpu_device *adev, * * Set the IH ring buffer rptr. */ -static void vega10_ih_set_rptr(struct amdgpu_device *adev) +static void vega10_ih_set_rptr(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) { - if (adev->irq.ih.use_doorbell) { + if (ih->use_doorbell) { /* XXX check if swapping is necessary on BE */ - if (adev->irq.ih.use_bus_addr) - adev->irq.ih.ring[adev->irq.ih.rptr_offs] = adev->irq.ih.rptr; - else - adev->wb.wb[adev->irq.ih.rptr_offs] = adev->irq.ih.rptr; - WDOORBELL32(adev->irq.ih.doorbell_index, adev->irq.ih.rptr); - } else { - WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, adev->irq.ih.rptr); + *ih->rptr_cpu = ih->rptr; + WDOORBELL32(ih->doorbell_index, ih->rptr); + } else if (ih == &adev->irq.ih) { + WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr); + } else if (ih == &adev->irq.ih1) { + WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, ih->rptr); + } else if (ih == &adev->irq.ih2) { + WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, ih->rptr); } } +/** + * vega10_ih_self_irq - dispatch work for ring 1 and 2 + * + * @adev: amdgpu_device pointer + * @source: irq source + * @entry: IV with WPTR update + * + * Update the WPTR from the IV and schedule work to handle the entries. + */ +static int vega10_ih_self_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + uint32_t wptr = cpu_to_le32(entry->src_data[0]); + + switch (entry->ring_id) { + case 1: + *adev->irq.ih1.wptr_cpu = wptr; + schedule_work(&adev->irq.ih1_work); + break; + case 2: + *adev->irq.ih2.wptr_cpu = wptr; + schedule_work(&adev->irq.ih2_work); + break; + default: break; + } + return 0; +} + +static const struct amdgpu_irq_src_funcs vega10_ih_self_irq_funcs = { + .process = vega10_ih_self_irq, +}; + +static void vega10_ih_set_self_irq_funcs(struct amdgpu_device *adev) +{ + adev->irq.self_irq.num_types = 0; + adev->irq.self_irq.funcs = &vega10_ih_self_irq_funcs; +} + static int vega10_ih_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; vega10_ih_set_interrupt_funcs(adev); + vega10_ih_set_self_irq_funcs(adev); return 0; } static int vega10_ih_sw_init(void *handle) { - int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0, + &adev->irq.self_irq); + if (r) + return r; r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, true); if (r) return r; + if (adev->asic_type == CHIP_VEGA10) { + r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true); + if (r) + return r; + + r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true); + if (r) + return r; + } + + /* TODO add doorbell for IH1 & IH2 as well */ adev->irq.ih.use_doorbell = true; adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1; @@ -313,6 +473,8 @@ static int vega10_ih_sw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; amdgpu_irq_fini(adev); + amdgpu_ih_ring_fini(adev, &adev->irq.ih2); + amdgpu_ih_ring_fini(adev, &adev->irq.ih1); amdgpu_ih_ring_fini(adev, &adev->irq.ih); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c index 422674bb3cdf..a8e92638a2e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c @@ -70,8 +70,8 @@ void vega10_doorbell_index_init(struct amdgpu_device *adev) adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL64_USERQUEUE_START; adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL64_USERQUEUE_END; adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL64_GFX_RING0; - adev->doorbell_index.sdma_engine0 = AMDGPU_DOORBELL64_sDMA_ENGINE0; - adev->doorbell_index.sdma_engine1 = AMDGPU_DOORBELL64_sDMA_ENGINE1; + adev->doorbell_index.sdma_engine[0] = AMDGPU_DOORBELL64_sDMA_ENGINE0; + adev->doorbell_index.sdma_engine[1] = AMDGPU_DOORBELL64_sDMA_ENGINE1; adev->doorbell_index.ih = AMDGPU_DOORBELL64_IH; adev->doorbell_index.uvd_vce.uvd_ring0_1 = AMDGPU_DOORBELL64_UVD_RING0_1; adev->doorbell_index.uvd_vce.uvd_ring2_3 = AMDGPU_DOORBELL64_UVD_RING2_3; @@ -81,7 +81,12 @@ void vega10_doorbell_index_init(struct amdgpu_device *adev) adev->doorbell_index.uvd_vce.vce_ring2_3 = AMDGPU_DOORBELL64_VCE_RING2_3; adev->doorbell_index.uvd_vce.vce_ring4_5 = AMDGPU_DOORBELL64_VCE_RING4_5; adev->doorbell_index.uvd_vce.vce_ring6_7 = AMDGPU_DOORBELL64_VCE_RING6_7; + + adev->doorbell_index.first_non_cp = AMDGPU_DOORBELL64_FIRST_NON_CP; + adev->doorbell_index.last_non_cp = AMDGPU_DOORBELL64_LAST_NON_CP; + /* In unit of dword doorbell */ adev->doorbell_index.max_assignment = AMDGPU_DOORBELL64_MAX_ASSIGNMENT << 1; + adev->doorbell_index.sdma_doorbell_range = 4; } diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c index edce413fda9a..0db84386252a 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c @@ -68,14 +68,14 @@ void vega20_doorbell_index_init(struct amdgpu_device *adev) adev->doorbell_index.userqueue_start = AMDGPU_VEGA20_DOORBELL_USERQUEUE_START; adev->doorbell_index.userqueue_end = AMDGPU_VEGA20_DOORBELL_USERQUEUE_END; adev->doorbell_index.gfx_ring0 = AMDGPU_VEGA20_DOORBELL_GFX_RING0; - adev->doorbell_index.sdma_engine0 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE0; - adev->doorbell_index.sdma_engine1 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE1; - adev->doorbell_index.sdma_engine2 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE2; - adev->doorbell_index.sdma_engine3 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE3; - adev->doorbell_index.sdma_engine4 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE4; - adev->doorbell_index.sdma_engine5 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE5; - adev->doorbell_index.sdma_engine6 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE6; - adev->doorbell_index.sdma_engine7 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE7; + adev->doorbell_index.sdma_engine[0] = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE0; + adev->doorbell_index.sdma_engine[1] = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE1; + adev->doorbell_index.sdma_engine[2] = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE2; + adev->doorbell_index.sdma_engine[3] = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE3; + adev->doorbell_index.sdma_engine[4] = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE4; + adev->doorbell_index.sdma_engine[5] = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE5; + adev->doorbell_index.sdma_engine[6] = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE6; + adev->doorbell_index.sdma_engine[7] = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE7; adev->doorbell_index.ih = AMDGPU_VEGA20_DOORBELL_IH; adev->doorbell_index.uvd_vce.uvd_ring0_1 = AMDGPU_VEGA20_DOORBELL64_UVD_RING0_1; adev->doorbell_index.uvd_vce.uvd_ring2_3 = AMDGPU_VEGA20_DOORBELL64_UVD_RING2_3; @@ -85,6 +85,11 @@ void vega20_doorbell_index_init(struct amdgpu_device *adev) adev->doorbell_index.uvd_vce.vce_ring2_3 = AMDGPU_VEGA20_DOORBELL64_VCE_RING2_3; adev->doorbell_index.uvd_vce.vce_ring4_5 = AMDGPU_VEGA20_DOORBELL64_VCE_RING4_5; adev->doorbell_index.uvd_vce.vce_ring6_7 = AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7; + + adev->doorbell_index.first_non_cp = AMDGPU_VEGA20_DOORBELL64_FIRST_NON_CP; + adev->doorbell_index.last_non_cp = AMDGPU_VEGA20_DOORBELL64_LAST_NON_CP; + adev->doorbell_index.max_assignment = AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT << 1; + adev->doorbell_index.sdma_doorbell_range = 20; } diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 77e367459101..5e5b42a0744a 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -941,6 +941,69 @@ static bool vi_need_full_reset(struct amdgpu_device *adev) } } +static void vi_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, + uint64_t *count1) +{ + uint32_t perfctr = 0; + uint64_t cnt0_of, cnt1_of; + int tmp; + + /* This reports 0 on APUs, so return to avoid writing/reading registers + * that may or may not be different from their GPU counterparts + */ + if (adev->flags & AMD_IS_APU) + return; + + /* Set the 2 events that we wish to watch, defined above */ + /* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */ + perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40); + perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104); + + /* Write to enable desired perf counters */ + WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK, perfctr); + /* Zero out and enable the perf counters + * Write 0x5: + * Bit 0 = Start all counters(1) + * Bit 2 = Global counter reset enable(1) + */ + WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000005); + + msleep(1000); + + /* Load the shadow and disable the perf counters + * Write 0x2: + * Bit 0 = Stop counters(0) + * Bit 1 = Load the shadow counters(1) + */ + WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000002); + + /* Read register values to get any >32bit overflow */ + tmp = RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK); + cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER); + cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER); + + /* Get the values and add the overflow */ + *count0 = RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32); + *count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32); +} + +static bool vi_need_reset_on_init(struct amdgpu_device *adev) +{ + u32 clock_cntl, pc; + + if (adev->flags & AMD_IS_APU) + return false; + + /* check if the SMC is already running */ + clock_cntl = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); + pc = RREG32_SMC(ixSMC_PC_C); + if ((0 == REG_GET_FIELD(clock_cntl, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) && + (0x20100 <= pc)) + return true; + + return false; +} + static const struct amdgpu_asic_funcs vi_asic_funcs = { .read_disabled_bios = &vi_read_disabled_bios, @@ -956,6 +1019,8 @@ static const struct amdgpu_asic_funcs vi_asic_funcs = .invalidate_hdp = &vi_invalidate_hdp, .need_full_reset = &vi_need_full_reset, .init_doorbell_index = &legacy_doorbell_index_init, + .get_pcie_usage = &vi_get_pcie_usage, + .need_reset_on_init = &vi_need_reset_on_init, }; #define CZ_REV_BRISTOL(rev) \ @@ -1726,8 +1791,8 @@ void legacy_doorbell_index_init(struct amdgpu_device *adev) adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL_MEC_RING6; adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL_MEC_RING7; adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL_GFX_RING0; - adev->doorbell_index.sdma_engine0 = AMDGPU_DOORBELL_sDMA_ENGINE0; - adev->doorbell_index.sdma_engine1 = AMDGPU_DOORBELL_sDMA_ENGINE1; + adev->doorbell_index.sdma_engine[0] = AMDGPU_DOORBELL_sDMA_ENGINE0; + adev->doorbell_index.sdma_engine[1] = AMDGPU_DOORBELL_sDMA_ENGINE1; adev->doorbell_index.ih = AMDGPU_DOORBELL_IH; adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_MAX_ASSIGNMENT; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 5d85ff341385..2e7c44955f43 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -863,7 +863,7 @@ static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size, return 0; } -#if CONFIG_X86_64 +#ifdef CONFIG_X86_64 static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size, uint32_t *num_entries, struct crat_subtype_iolink *sub_type_hdr) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 8372556b52eb..c6c9530e704e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -134,12 +134,18 @@ static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q) */ q->doorbell_id = q->properties.queue_id; } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { - /* For SDMA queues on SOC15, use static doorbell - * assignments based on the engine and queue. + /* For SDMA queues on SOC15 with 8-byte doorbell, use static + * doorbell assignments based on the engine and queue id. + * The doobell index distance between RLC (2*i) and (2*i+1) + * for a SDMA engine is 512. */ - q->doorbell_id = dev->shared_resources.sdma_doorbell - [q->properties.sdma_engine_id] - [q->properties.sdma_queue_id]; + uint32_t *idx_offset = + dev->shared_resources.sdma_doorbell_idx; + + q->doorbell_id = idx_offset[q->properties.sdma_engine_id] + + (q->properties.sdma_queue_id & 1) + * KFD_QUEUE_DOORBELL_MIRROR_OFFSET + + (q->properties.sdma_queue_id >> 1); } else { /* For CP queues on SOC15 reserve a free doorbell ID */ unsigned int found; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c index 8018163414ff..932007eb9168 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c @@ -23,22 +23,7 @@ #include #include #include "kfd_priv.h" - -static const struct kgd2kfd_calls kgd2kfd = { - .exit = kgd2kfd_exit, - .probe = kgd2kfd_probe, - .device_init = kgd2kfd_device_init, - .device_exit = kgd2kfd_device_exit, - .interrupt = kgd2kfd_interrupt, - .suspend = kgd2kfd_suspend, - .resume = kgd2kfd_resume, - .quiesce_mm = kgd2kfd_quiesce_mm, - .resume_mm = kgd2kfd_resume_mm, - .schedule_evict_and_restore_process = - kgd2kfd_schedule_evict_and_restore_process, - .pre_reset = kgd2kfd_pre_reset, - .post_reset = kgd2kfd_post_reset, -}; +#include "amdgpu_amdkfd.h" static int kfd_init(void) { @@ -91,20 +76,10 @@ static void kfd_exit(void) kfd_chardev_exit(); } -int kgd2kfd_init(unsigned int interface_version, - const struct kgd2kfd_calls **g2f) +int kgd2kfd_init() { - int err; - - err = kfd_init(); - if (err) - return err; - - *g2f = &kgd2kfd; - - return 0; + return kfd_init(); } -EXPORT_SYMBOL(kgd2kfd_init); void kgd2kfd_exit(void) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 0689d4ccbbc0..0eeee3c6d6dc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -97,17 +97,29 @@ #define KFD_CWSR_TBA_TMA_SIZE (PAGE_SIZE * 2) #define KFD_CWSR_TMA_OFFSET PAGE_SIZE +#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \ + (KFD_MAX_NUM_OF_PROCESSES * \ + KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) + +#define KFD_KERNEL_QUEUE_SIZE 2048 + +/* + * 512 = 0x200 + * The doorbell index distance between SDMA RLC (2*i) and (2*i+1) in the + * same SDMA engine on SOC15, which has 8-byte doorbells for SDMA. + * 512 8-byte doorbell distance (i.e. one page away) ensures that SDMA RLC + * (2*i+1) doorbells (in terms of the lower 12 bit address) lie exactly in + * the OFFSET and SIZE set in registers like BIF_SDMA0_DOORBELL_RANGE. + */ +#define KFD_QUEUE_DOORBELL_MIRROR_OFFSET 512 + + /* * Kernel module parameter to specify maximum number of supported queues per * device */ extern int max_num_of_queues_per_device; -#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \ - (KFD_MAX_NUM_OF_PROCESSES * \ - KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) - -#define KFD_KERNEL_QUEUE_SIZE 2048 /* Kernel module parameter to specify the scheduling policy */ extern int sched_policy; @@ -266,14 +278,6 @@ struct kfd_dev { bool pci_atomic_requested; }; -/* KGD2KFD callbacks */ -void kgd2kfd_exit(void); -struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, - struct pci_dev *pdev, const struct kfd2kgd_calls *f2g); -bool kgd2kfd_device_init(struct kfd_dev *kfd, - const struct kgd2kfd_shared_resources *gpu_resources); -void kgd2kfd_device_exit(struct kfd_dev *kfd); - enum kfd_mempool { KFD_MEMPOOL_SYSTEM_CACHEABLE = 1, KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2, @@ -541,11 +545,6 @@ struct qcm_process_device { /* Approx. time before evicting the process again */ #define PROCESS_ACTIVE_TIME_MS 10 -int kgd2kfd_quiesce_mm(struct mm_struct *mm); -int kgd2kfd_resume_mm(struct mm_struct *mm); -int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm, - struct dma_fence *fence); - /* 8 byte handle containing GPU ID in the most significant 4 bytes and * idr_handle in the least significant 4 bytes */ @@ -800,20 +799,11 @@ int kfd_numa_node_to_apic_id(int numa_node_id); /* Interrupts */ int kfd_interrupt_init(struct kfd_dev *dev); void kfd_interrupt_exit(struct kfd_dev *dev); -void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry); bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry); bool interrupt_is_wanted(struct kfd_dev *dev, const uint32_t *ih_ring_entry, uint32_t *patched_ihre, bool *flag); -/* Power Management */ -void kgd2kfd_suspend(struct kfd_dev *kfd); -int kgd2kfd_resume(struct kfd_dev *kfd); - -/* GPU reset */ -int kgd2kfd_pre_reset(struct kfd_dev *kfd); -int kgd2kfd_post_reset(struct kfd_dev *kfd); - /* amdkfd Apertures */ int kfd_init_apertures(struct kfd_process *process); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 80b36e860a0a..4bdae78bab8e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -607,13 +607,17 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd, if (!qpd->doorbell_bitmap) return -ENOMEM; - /* Mask out any reserved doorbells */ - for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS; i++) - if ((dev->shared_resources.reserved_doorbell_mask & i) == - dev->shared_resources.reserved_doorbell_val) { + /* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */ + for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) { + if (i >= dev->shared_resources.non_cp_doorbells_start + && i <= dev->shared_resources.non_cp_doorbells_end) { set_bit(i, qpd->doorbell_bitmap); - pr_debug("reserved doorbell 0x%03x\n", i); + set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET, + qpd->doorbell_bitmap); + pr_debug("reserved doorbell 0x%03x and 0x%03x\n", i, + i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET); } + } return 0; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index f4fa40c387d3..2f26581b93ff 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -303,12 +303,11 @@ static void dm_pflip_high_irq(void *interrupt_params) return; } + /* Update to correct count(s) if racing with vblank irq */ + amdgpu_crtc->last_flip_vblank = drm_crtc_accurate_vblank_count(&amdgpu_crtc->base); /* wake up userspace */ if (amdgpu_crtc->event) { - /* Update to correct count(s) if racing with vblank irq */ - drm_crtc_accurate_vblank_count(&amdgpu_crtc->base); - drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event); /* page flip completed. clean up */ @@ -786,12 +785,13 @@ static int dm_suspend(void *handle) struct amdgpu_display_manager *dm = &adev->dm; int ret = 0; + WARN_ON(adev->dm.cached_state); + adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev); + s3_handle_mst(adev->ddev, true); amdgpu_dm_irq_suspend(adev); - WARN_ON(adev->dm.cached_state); - adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev); dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); @@ -1705,7 +1705,8 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) dc_resource_state_copy_construct_current(adev->dm.dc, state->context); - drm_atomic_private_obj_init(&adev->dm.atomic_obj, + drm_atomic_private_obj_init(adev->ddev, + &adev->dm.atomic_obj, &state->base, &dm_atomic_state_funcs); @@ -2296,6 +2297,71 @@ static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb, return r; } +static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags) +{ + uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B); + + return offset ? (address + offset * 256) : 0; +} + +static bool fill_plane_dcc_attributes(struct amdgpu_device *adev, + const struct amdgpu_framebuffer *afb, + struct dc_plane_state *plane_state, + uint64_t info) +{ + struct dc *dc = adev->dm.dc; + struct dc_dcc_surface_param input; + struct dc_surface_dcc_cap output; + uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B); + uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0; + uint64_t dcc_address; + + memset(&input, 0, sizeof(input)); + memset(&output, 0, sizeof(output)); + + if (!offset) + return false; + + if (!dc->cap_funcs.get_dcc_compression_cap) + return false; + + input.format = plane_state->format; + input.surface_size.width = + plane_state->plane_size.grph.surface_size.width; + input.surface_size.height = + plane_state->plane_size.grph.surface_size.height; + input.swizzle_mode = plane_state->tiling_info.gfx9.swizzle; + + if (plane_state->rotation == ROTATION_ANGLE_0 || + plane_state->rotation == ROTATION_ANGLE_180) + input.scan = SCAN_DIRECTION_HORIZONTAL; + else if (plane_state->rotation == ROTATION_ANGLE_90 || + plane_state->rotation == ROTATION_ANGLE_270) + input.scan = SCAN_DIRECTION_VERTICAL; + + if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output)) + return false; + + if (!output.capable) + return false; + + if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0) + return false; + + plane_state->dcc.enable = 1; + plane_state->dcc.grph.meta_pitch = + AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1; + plane_state->dcc.grph.independent_64b_blks = i64b; + + dcc_address = get_dcc_address(afb->address, info); + plane_state->address.grph.meta_addr.low_part = + lower_32_bits(dcc_address); + plane_state->address.grph.meta_addr.high_part = + upper_32_bits(dcc_address); + + return true; +} + static int fill_plane_attributes_from_fb(struct amdgpu_device *adev, struct dc_plane_state *plane_state, const struct amdgpu_framebuffer *amdgpu_fb) @@ -2348,6 +2414,10 @@ static int fill_plane_attributes_from_fb(struct amdgpu_device *adev, return -EINVAL; } + memset(&plane_state->address, 0, sizeof(plane_state->address)); + memset(&plane_state->tiling_info, 0, sizeof(plane_state->tiling_info)); + memset(&plane_state->dcc, 0, sizeof(plane_state->dcc)); + if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { plane_state->address.type = PLN_ADDR_TYPE_GRAPHICS; plane_state->plane_size.grph.surface_size.x = 0; @@ -2379,8 +2449,6 @@ static int fill_plane_attributes_from_fb(struct amdgpu_device *adev, plane_state->color_space = COLOR_SPACE_YCBCR709; } - memset(&plane_state->tiling_info, 0, sizeof(plane_state->tiling_info)); - /* Fill GFX8 params */ if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) { unsigned int bankw, bankh, mtaspect, tile_split, num_banks; @@ -2429,6 +2497,9 @@ static int fill_plane_attributes_from_fb(struct amdgpu_device *adev, plane_state->tiling_info.gfx9.swizzle = AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE); plane_state->tiling_info.gfx9.shaderEnable = 1; + + fill_plane_dcc_attributes(adev, amdgpu_fb, plane_state, + tiling_flags); } plane_state->visible = true; @@ -2592,7 +2663,7 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing) * according to HDMI spec, we use YCbCr709 and YCbCr601 * respectively */ - if (dc_crtc_timing->pix_clk_khz > 27030) { + if (dc_crtc_timing->pix_clk_100hz > 270300) { if (dc_crtc_timing->flags.Y_ONLY) color_space = COLOR_SPACE_YCBCR709_LIMITED; @@ -2635,7 +2706,7 @@ static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_ if (timing_out->display_color_depth <= COLOR_DEPTH_888) return; do { - normalized_clk = timing_out->pix_clk_khz; + normalized_clk = timing_out->pix_clk_100hz / 10; /* YCbCr 4:2:0 requires additional adjustment of 1/2 */ if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420) normalized_clk /= 2; @@ -2678,10 +2749,10 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream, timing_out->v_border_bottom = 0; /* TODO: un-hardcode */ if (drm_mode_is_420_only(info, mode_in) - && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) + && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444) - && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) + && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444; else timing_out->pixel_encoding = PIXEL_ENCODING_RGB; @@ -2716,14 +2787,14 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream, mode_in->crtc_vsync_start - mode_in->crtc_vdisplay; timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start; - timing_out->pix_clk_khz = mode_in->crtc_clock; + timing_out->pix_clk_100hz = mode_in->crtc_clock * 10; timing_out->aspect_ratio = get_aspect_ratio(mode_in); stream->output_color_space = get_output_color_space(timing_out); stream->out_transfer_func->type = TF_TYPE_PREDEFINED; stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; - if (stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) + if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) adjust_colour_depth_from_display_info(timing_out, info); } @@ -2844,7 +2915,7 @@ static void set_master_stream(struct dc_stream_state *stream_set[], if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) { int refresh_rate = 0; - refresh_rate = (stream_set[j]->timing.pix_clk_khz*1000)/ + refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/ (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total); if (refresh_rate > highest_rfr) { highest_rfr = refresh_rate; @@ -2901,11 +2972,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, drm_connector = &aconnector->base; if (!aconnector->dc_sink) { - if (!aconnector->mst_port) { - sink = create_fake_sink(aconnector); - if (!sink) - return stream; - } + sink = create_fake_sink(aconnector); + if (!sink) + return stream; } else { sink = aconnector->dc_sink; } @@ -2917,6 +2986,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, goto finish; } + stream->dm_stream_context = aconnector; + list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { /* Search for preferred mode */ if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) { @@ -2968,10 +3039,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, drm_connector, sink); - update_stream_signal(stream); - - if (dm_state && dm_state->freesync_capable) - stream->ignore_msa_timing_param = true; + update_stream_signal(stream, sink); finish: if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON) @@ -3544,6 +3612,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane, struct amdgpu_bo *rbo; uint64_t chroma_addr = 0; struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old; + uint64_t tiling_flags, dcc_address; unsigned int awidth; uint32_t domain; int r; @@ -3584,6 +3653,9 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane, DRM_ERROR("%p bind failed\n", rbo); return r; } + + amdgpu_bo_get_tiling_flags(rbo, &tiling_flags); + amdgpu_bo_unreserve(rbo); afb->address = amdgpu_bo_gpu_offset(rbo); @@ -3597,6 +3669,13 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane, if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { plane_state->address.grph.addr.low_part = lower_32_bits(afb->address); plane_state->address.grph.addr.high_part = upper_32_bits(afb->address); + + dcc_address = + get_dcc_address(afb->address, tiling_flags); + plane_state->address.grph.meta_addr.low_part = + lower_32_bits(dcc_address); + plane_state->address.grph.meta_addr.high_part = + upper_32_bits(dcc_address); } else { awidth = ALIGN(new_state->fb->width, 64); plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE; @@ -3711,7 +3790,6 @@ static const struct drm_plane_helper_funcs dm_plane_helper_funcs = { * check will succeed, and let DC implement proper check */ static const uint32_t rgb_formats[] = { - DRM_FORMAT_RGB888, DRM_FORMAT_XRGB8888, DRM_FORMAT_ARGB8888, DRM_FORMAT_RGBA8888, @@ -4082,7 +4160,8 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, } if (connector_type == DRM_MODE_CONNECTOR_HDMIA || - connector_type == DRM_MODE_CONNECTOR_DisplayPort) { + connector_type == DRM_MODE_CONNECTOR_DisplayPort || + connector_type == DRM_MODE_CONNECTOR_eDP) { drm_connector_attach_vrr_capable_property( &aconnector->base); } @@ -4468,20 +4547,6 @@ static void prepare_flip_isr(struct amdgpu_crtc *acrtc) acrtc->crtc_id); } -struct dc_stream_status *dc_state_get_stream_status( - struct dc_state *state, - struct dc_stream_state *stream) -{ - uint8_t i; - - for (i = 0; i < state->stream_count; i++) { - if (stream == state->streams[i]) - return &state->stream_status[i]; - } - - return NULL; -} - static void update_freesync_state_on_stream( struct amdgpu_display_manager *dm, struct dm_crtc_state *new_crtc_state, @@ -4535,12 +4600,12 @@ static void update_freesync_state_on_stream( TRANSFER_FUNC_UNKNOWN, &vrr_infopacket); - new_crtc_state->freesync_timing_changed = + new_crtc_state->freesync_timing_changed |= (memcmp(&new_crtc_state->vrr_params.adjust, &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0); - new_crtc_state->freesync_vrr_info_changed = + new_crtc_state->freesync_vrr_info_changed |= (memcmp(&new_crtc_state->vrr_infopacket, &vrr_infopacket, sizeof(vrr_infopacket)) != 0); @@ -4556,254 +4621,6 @@ static void update_freesync_state_on_stream( new_crtc_state->base.crtc->base.id, (int)new_crtc_state->base.vrr_enabled, (int)vrr_params.state); - - if (new_crtc_state->freesync_timing_changed) - DRM_DEBUG_KMS("VRR timing update: crtc=%u min=%u max=%u\n", - new_crtc_state->base.crtc->base.id, - vrr_params.adjust.v_total_min, - vrr_params.adjust.v_total_max); -} - -/* - * Executes flip - * - * Waits on all BO's fences and for proper vblank count - */ -static void amdgpu_dm_do_flip(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - uint32_t target, - struct dc_state *state) -{ - unsigned long flags; - uint64_t timestamp_ns; - uint32_t target_vblank; - int r, vpos, hpos; - struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); - struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb); - struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]); - struct amdgpu_device *adev = crtc->dev->dev_private; - bool async_flip = (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0; - struct dc_flip_addrs addr = { {0} }; - /* TODO eliminate or rename surface_update */ - struct dc_surface_update surface_updates[1] = { {0} }; - struct dc_stream_update stream_update = {0}; - struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state); - struct dc_stream_status *stream_status; - struct dc_plane_state *surface; - - - /* Prepare wait for target vblank early - before the fence-waits */ - target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) + - amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id); - - /* - * TODO This might fail and hence better not used, wait - * explicitly on fences instead - * and in general should be called for - * blocking commit to as per framework helpers - */ - r = amdgpu_bo_reserve(abo, true); - if (unlikely(r != 0)) { - DRM_ERROR("failed to reserve buffer before flip\n"); - WARN_ON(1); - } - - /* Wait for all fences on this FB */ - WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false, - MAX_SCHEDULE_TIMEOUT) < 0); - - amdgpu_bo_unreserve(abo); - - /* - * Wait until we're out of the vertical blank period before the one - * targeted by the flip - */ - while ((acrtc->enabled && - (amdgpu_display_get_crtc_scanoutpos(adev->ddev, acrtc->crtc_id, - 0, &vpos, &hpos, NULL, - NULL, &crtc->hwmode) - & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == - (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && - (int)(target_vblank - - amdgpu_get_vblank_counter_kms(adev->ddev, acrtc->crtc_id)) > 0)) { - usleep_range(1000, 1100); - } - - /* Flip */ - spin_lock_irqsave(&crtc->dev->event_lock, flags); - - WARN_ON(acrtc->pflip_status != AMDGPU_FLIP_NONE); - WARN_ON(!acrtc_state->stream); - - addr.address.grph.addr.low_part = lower_32_bits(afb->address); - addr.address.grph.addr.high_part = upper_32_bits(afb->address); - addr.flip_immediate = async_flip; - - timestamp_ns = ktime_get_ns(); - addr.flip_timestamp_in_us = div_u64(timestamp_ns, 1000); - - - if (acrtc->base.state->event) - prepare_flip_isr(acrtc); - - spin_unlock_irqrestore(&crtc->dev->event_lock, flags); - - stream_status = dc_stream_get_status(acrtc_state->stream); - if (!stream_status) { - DRM_ERROR("No stream status for CRTC: id=%d\n", - acrtc->crtc_id); - return; - } - - surface = stream_status->plane_states[0]; - surface_updates->surface = surface; - - if (!surface) { - DRM_ERROR("No surface for CRTC: id=%d\n", - acrtc->crtc_id); - return; - } - surface_updates->flip_addr = &addr; - - if (acrtc_state->stream) { - update_freesync_state_on_stream( - &adev->dm, - acrtc_state, - acrtc_state->stream, - surface, - addr.flip_timestamp_in_us); - - if (acrtc_state->freesync_timing_changed) - stream_update.adjust = - &acrtc_state->stream->adjust; - - if (acrtc_state->freesync_vrr_info_changed) - stream_update.vrr_infopacket = - &acrtc_state->stream->vrr_infopacket; - } - - /* Update surface timing information. */ - surface->time.time_elapsed_in_us[surface->time.index] = - addr.flip_timestamp_in_us - surface->time.prev_update_time_in_us; - surface->time.prev_update_time_in_us = addr.flip_timestamp_in_us; - surface->time.index++; - if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX) - surface->time.index = 0; - - mutex_lock(&adev->dm.dc_lock); - - dc_commit_updates_for_stream(adev->dm.dc, - surface_updates, - 1, - acrtc_state->stream, - &stream_update, - &surface_updates->surface, - state); - mutex_unlock(&adev->dm.dc_lock); - - DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n", - __func__, - addr.address.grph.addr.high_part, - addr.address.grph.addr.low_part); -} - -/* - * TODO this whole function needs to go - * - * dc_surface_update is needlessly complex. See if we can just replace this - * with a dc_plane_state and follow the atomic model a bit more closely here. - */ -static bool commit_planes_to_stream( - struct amdgpu_display_manager *dm, - struct dc *dc, - struct dc_plane_state **plane_states, - uint8_t new_plane_count, - struct dm_crtc_state *dm_new_crtc_state, - struct dm_crtc_state *dm_old_crtc_state, - struct dc_state *state) -{ - /* no need to dynamically allocate this. it's pretty small */ - struct dc_surface_update updates[MAX_SURFACES]; - struct dc_flip_addrs *flip_addr; - struct dc_plane_info *plane_info; - struct dc_scaling_info *scaling_info; - int i; - struct dc_stream_state *dc_stream = dm_new_crtc_state->stream; - struct dc_stream_update *stream_update = - kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL); - unsigned int abm_level; - - if (!stream_update) { - BREAK_TO_DEBUGGER(); - return false; - } - - flip_addr = kcalloc(MAX_SURFACES, sizeof(struct dc_flip_addrs), - GFP_KERNEL); - plane_info = kcalloc(MAX_SURFACES, sizeof(struct dc_plane_info), - GFP_KERNEL); - scaling_info = kcalloc(MAX_SURFACES, sizeof(struct dc_scaling_info), - GFP_KERNEL); - - if (!flip_addr || !plane_info || !scaling_info) { - kfree(flip_addr); - kfree(plane_info); - kfree(scaling_info); - kfree(stream_update); - return false; - } - - memset(updates, 0, sizeof(updates)); - - stream_update->src = dc_stream->src; - stream_update->dst = dc_stream->dst; - stream_update->out_transfer_func = dc_stream->out_transfer_func; - - if (dm_new_crtc_state->abm_level != dm_old_crtc_state->abm_level) { - abm_level = dm_new_crtc_state->abm_level; - stream_update->abm_level = &abm_level; - } - - for (i = 0; i < new_plane_count; i++) { - updates[i].surface = plane_states[i]; - updates[i].gamma = - (struct dc_gamma *)plane_states[i]->gamma_correction; - updates[i].in_transfer_func = plane_states[i]->in_transfer_func; - flip_addr[i].address = plane_states[i]->address; - flip_addr[i].flip_immediate = plane_states[i]->flip_immediate; - plane_info[i].color_space = plane_states[i]->color_space; - plane_info[i].format = plane_states[i]->format; - plane_info[i].plane_size = plane_states[i]->plane_size; - plane_info[i].rotation = plane_states[i]->rotation; - plane_info[i].horizontal_mirror = plane_states[i]->horizontal_mirror; - plane_info[i].stereo_format = plane_states[i]->stereo_format; - plane_info[i].tiling_info = plane_states[i]->tiling_info; - plane_info[i].visible = plane_states[i]->visible; - plane_info[i].per_pixel_alpha = plane_states[i]->per_pixel_alpha; - plane_info[i].dcc = plane_states[i]->dcc; - scaling_info[i].scaling_quality = plane_states[i]->scaling_quality; - scaling_info[i].src_rect = plane_states[i]->src_rect; - scaling_info[i].dst_rect = plane_states[i]->dst_rect; - scaling_info[i].clip_rect = plane_states[i]->clip_rect; - - updates[i].flip_addr = &flip_addr[i]; - updates[i].plane_info = &plane_info[i]; - updates[i].scaling_info = &scaling_info[i]; - } - - mutex_lock(&dm->dc_lock); - dc_commit_updates_for_stream( - dc, - updates, - new_plane_count, - dc_stream, stream_update, plane_states, state); - mutex_unlock(&dm->dc_lock); - - kfree(flip_addr); - kfree(plane_info); - kfree(scaling_info); - kfree(stream_update); - return true; } static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, @@ -4813,32 +4630,58 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, struct drm_crtc *pcrtc, bool *wait_for_vblank) { - uint32_t i; + uint32_t i, r; + uint64_t timestamp_ns; struct drm_plane *plane; struct drm_plane_state *old_plane_state, *new_plane_state; - struct dc_stream_state *dc_stream_attach; - struct dc_plane_state *plane_states_constructed[MAX_SURFACES]; struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc); struct drm_crtc_state *new_pcrtc_state = drm_atomic_get_new_crtc_state(state, pcrtc); struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state); struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc)); - int planes_count = 0; + int flip_count = 0, planes_count = 0, vpos, hpos; unsigned long flags; + struct amdgpu_bo *abo; + uint64_t tiling_flags, dcc_address; + uint32_t target, target_vblank; + uint64_t last_flip_vblank; + bool vrr_active = acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE; + + struct { + struct dc_surface_update surface_updates[MAX_SURFACES]; + struct dc_flip_addrs flip_addrs[MAX_SURFACES]; + struct dc_stream_update stream_update; + } *flip; + + struct { + struct dc_surface_update surface_updates[MAX_SURFACES]; + struct dc_plane_info plane_infos[MAX_SURFACES]; + struct dc_scaling_info scaling_infos[MAX_SURFACES]; + struct dc_stream_update stream_update; + } *full; + + flip = kzalloc(sizeof(*flip), GFP_KERNEL); + full = kzalloc(sizeof(*full), GFP_KERNEL); + + if (!flip || !full) { + dm_error("Failed to allocate update bundles\n"); + goto cleanup; + } /* update planes when needed */ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { struct drm_crtc *crtc = new_plane_state->crtc; struct drm_crtc_state *new_crtc_state; struct drm_framebuffer *fb = new_plane_state->fb; + struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb); bool pflip_needed; + struct dc_plane_state *dc_plane; struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state); - if (plane->type == DRM_PLANE_TYPE_CURSOR) { - handle_cursor_update(plane, old_plane_state); + /* Cursor plane is handled after stream updates */ + if (plane->type == DRM_PLANE_TYPE_CURSOR) continue; - } if (!fb || !crtc || pcrtc != crtc) continue; @@ -4847,73 +4690,228 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, if (!new_crtc_state->active) continue; - pflip_needed = !state->allow_modeset; + pflip_needed = old_plane_state->fb && + old_plane_state->fb != new_plane_state->fb; - spin_lock_irqsave(&crtc->dev->event_lock, flags); - if (acrtc_attach->pflip_status != AMDGPU_FLIP_NONE) { - DRM_ERROR("%s: acrtc %d, already busy\n", - __func__, - acrtc_attach->crtc_id); - /* In commit tail framework this cannot happen */ - WARN_ON(1); + dc_plane = dm_new_plane_state->dc_state; + + if (pflip_needed) { + /* + * Assume even ONE crtc with immediate flip means + * entire can't wait for VBLANK + * TODO Check if it's correct + */ + if (new_pcrtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) + *wait_for_vblank = false; + + /* + * TODO This might fail and hence better not used, wait + * explicitly on fences instead + * and in general should be called for + * blocking commit to as per framework helpers + */ + abo = gem_to_amdgpu_bo(fb->obj[0]); + r = amdgpu_bo_reserve(abo, true); + if (unlikely(r != 0)) + DRM_ERROR("failed to reserve buffer before flip\n"); + + /* + * Wait for all fences on this FB. Do limited wait to avoid + * deadlock during GPU reset when this fence will not signal + * but we hold reservation lock for the BO. + */ + r = reservation_object_wait_timeout_rcu(abo->tbo.resv, + true, false, + msecs_to_jiffies(5000)); + if (unlikely(r == 0)) + DRM_ERROR("Waiting for fences timed out."); + + + + amdgpu_bo_get_tiling_flags(abo, &tiling_flags); + + amdgpu_bo_unreserve(abo); + + flip->flip_addrs[flip_count].address.grph.addr.low_part = lower_32_bits(afb->address); + flip->flip_addrs[flip_count].address.grph.addr.high_part = upper_32_bits(afb->address); + + dcc_address = get_dcc_address(afb->address, tiling_flags); + flip->flip_addrs[flip_count].address.grph.meta_addr.low_part = lower_32_bits(dcc_address); + flip->flip_addrs[flip_count].address.grph.meta_addr.high_part = upper_32_bits(dcc_address); + + flip->flip_addrs[flip_count].flip_immediate = + (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0; + + timestamp_ns = ktime_get_ns(); + flip->flip_addrs[flip_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000); + flip->surface_updates[flip_count].flip_addr = &flip->flip_addrs[flip_count]; + flip->surface_updates[flip_count].surface = dc_plane; + + if (!flip->surface_updates[flip_count].surface) { + DRM_ERROR("No surface for CRTC: id=%d\n", + acrtc_attach->crtc_id); + continue; + } + + if (plane == pcrtc->primary) + update_freesync_state_on_stream( + dm, + acrtc_state, + acrtc_state->stream, + dc_plane, + flip->flip_addrs[flip_count].flip_timestamp_in_us); + + DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n", + __func__, + flip->flip_addrs[flip_count].address.grph.addr.high_part, + flip->flip_addrs[flip_count].address.grph.addr.low_part); + + flip_count += 1; } - spin_unlock_irqrestore(&crtc->dev->event_lock, flags); - if (!pflip_needed || plane->type == DRM_PLANE_TYPE_OVERLAY) { - WARN_ON(!dm_new_plane_state->dc_state); + full->surface_updates[planes_count].surface = dc_plane; + if (new_pcrtc_state->color_mgmt_changed) { + full->surface_updates[planes_count].gamma = dc_plane->gamma_correction; + full->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func; + } - plane_states_constructed[planes_count] = dm_new_plane_state->dc_state; - dc_stream_attach = acrtc_state->stream; - planes_count++; + full->scaling_infos[planes_count].scaling_quality = dc_plane->scaling_quality; + full->scaling_infos[planes_count].src_rect = dc_plane->src_rect; + full->scaling_infos[planes_count].dst_rect = dc_plane->dst_rect; + full->scaling_infos[planes_count].clip_rect = dc_plane->clip_rect; + full->surface_updates[planes_count].scaling_info = &full->scaling_infos[planes_count]; - } else if (new_crtc_state->planes_changed) { - /* Assume even ONE crtc with immediate flip means - * entire can't wait for VBLANK - * TODO Check if it's correct + + full->plane_infos[planes_count].color_space = dc_plane->color_space; + full->plane_infos[planes_count].format = dc_plane->format; + full->plane_infos[planes_count].plane_size = dc_plane->plane_size; + full->plane_infos[planes_count].rotation = dc_plane->rotation; + full->plane_infos[planes_count].horizontal_mirror = dc_plane->horizontal_mirror; + full->plane_infos[planes_count].stereo_format = dc_plane->stereo_format; + full->plane_infos[planes_count].tiling_info = dc_plane->tiling_info; + full->plane_infos[planes_count].visible = dc_plane->visible; + full->plane_infos[planes_count].per_pixel_alpha = dc_plane->per_pixel_alpha; + full->plane_infos[planes_count].dcc = dc_plane->dcc; + full->surface_updates[planes_count].plane_info = &full->plane_infos[planes_count]; + + planes_count += 1; + + } + + /* + * TODO: For proper atomic behaviour, we should be calling into DC once with + * all the changes. However, DC refuses to do pageflips and non-pageflip + * changes in the same call. Change DC to respect atomic behaviour, + * hopefully eliminating dc_*_update structs in their entirety. + */ + if (flip_count) { + if (!vrr_active) { + /* Use old throttling in non-vrr fixed refresh rate mode + * to keep flip scheduling based on target vblank counts + * working in a backwards compatible way, e.g., for + * clients using the GLX_OML_sync_control extension or + * DRI3/Present extension with defined target_msc. */ - *wait_for_vblank = - new_pcrtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC ? - false : true; - - /* TODO: Needs rework for multiplane flip */ - if (plane->type == DRM_PLANE_TYPE_PRIMARY) - drm_crtc_vblank_get(crtc); - - amdgpu_dm_do_flip( - crtc, - fb, - (uint32_t)drm_crtc_vblank_count(crtc) + *wait_for_vblank, - dc_state); + last_flip_vblank = drm_crtc_vblank_count(pcrtc); + } + else { + /* For variable refresh rate mode only: + * Get vblank of last completed flip to avoid > 1 vrr + * flips per video frame by use of throttling, but allow + * flip programming anywhere in the possibly large + * variable vrr vblank interval for fine-grained flip + * timing control and more opportunity to avoid stutter + * on late submission of flips. + */ + spin_lock_irqsave(&pcrtc->dev->event_lock, flags); + last_flip_vblank = acrtc_attach->last_flip_vblank; + spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); } - } + target = (uint32_t)last_flip_vblank + *wait_for_vblank; - if (planes_count) { - unsigned long flags; + /* Prepare wait for target vblank early - before the fence-waits */ + target_vblank = target - (uint32_t)drm_crtc_vblank_count(pcrtc) + + amdgpu_get_vblank_counter_kms(pcrtc->dev, acrtc_attach->crtc_id); - if (new_pcrtc_state->event) { + /* + * Wait until we're out of the vertical blank period before the one + * targeted by the flip + */ + while ((acrtc_attach->enabled && + (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id, + 0, &vpos, &hpos, NULL, + NULL, &pcrtc->hwmode) + & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == + (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && + (int)(target_vblank - + amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id)) > 0)) { + usleep_range(1000, 1100); + } + if (acrtc_attach->base.state->event) { drm_crtc_vblank_get(pcrtc); spin_lock_irqsave(&pcrtc->dev->event_lock, flags); + + WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE); prepare_flip_isr(acrtc_attach); + spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); } - dc_stream_attach->abm_level = acrtc_state->abm_level; + if (acrtc_state->stream) { - if (false == commit_planes_to_stream(dm, - dm->dc, - plane_states_constructed, - planes_count, - acrtc_state, - dm_old_crtc_state, - dc_state)) - dm_error("%s: Failed to attach plane!\n", __func__); - } else { - /*TODO BUG Here should go disable planes on CRTC. */ + if (acrtc_state->freesync_timing_changed) + flip->stream_update.adjust = + &acrtc_state->stream->adjust; + + if (acrtc_state->freesync_vrr_info_changed) + flip->stream_update.vrr_infopacket = + &acrtc_state->stream->vrr_infopacket; + } + + mutex_lock(&dm->dc_lock); + dc_commit_updates_for_stream(dm->dc, + flip->surface_updates, + flip_count, + acrtc_state->stream, + &flip->stream_update, + dc_state); + mutex_unlock(&dm->dc_lock); + } + + if (planes_count) { + if (new_pcrtc_state->mode_changed) { + full->stream_update.src = acrtc_state->stream->src; + full->stream_update.dst = acrtc_state->stream->dst; + } + + if (new_pcrtc_state->color_mgmt_changed) + full->stream_update.out_transfer_func = acrtc_state->stream->out_transfer_func; + + acrtc_state->stream->abm_level = acrtc_state->abm_level; + if (acrtc_state->abm_level != dm_old_crtc_state->abm_level) + full->stream_update.abm_level = &acrtc_state->abm_level; + + mutex_lock(&dm->dc_lock); + dc_commit_updates_for_stream(dm->dc, + full->surface_updates, + planes_count, + acrtc_state->stream, + &full->stream_update, + dc_state); + mutex_unlock(&dm->dc_lock); } + + for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) + if (plane->type == DRM_PLANE_TYPE_CURSOR) + handle_cursor_update(plane, old_plane_state); + +cleanup: + kfree(flip); + kfree(full); } /* @@ -4927,7 +4925,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state, struct dc_stream_state *stream_state) { - stream_state->mode_changed = crtc_state->mode_changed; + stream_state->mode_changed = + crtc_state->mode_changed || crtc_state->active_changed; } static int amdgpu_dm_atomic_commit(struct drm_device *dev, @@ -4948,10 +4947,25 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev, */ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); + struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); - if (drm_atomic_crtc_needs_modeset(new_crtc_state) && dm_old_crtc_state->stream) + if (drm_atomic_crtc_needs_modeset(new_crtc_state) + && dm_old_crtc_state->stream) { + /* + * If the stream is removed and CRC capture was + * enabled on the CRTC the extra vblank reference + * needs to be dropped since CRC capture will be + * disabled. + */ + if (!dm_new_crtc_state->stream + && dm_new_crtc_state->crc_enabled) { + drm_crtc_vblank_put(crtc); + dm_new_crtc_state->crc_enabled = false; + } + manage_dm_interrupts(adev, acrtc, false); + } } /* * Add check here for SoC's that support hardware cursor plane, to @@ -5089,8 +5103,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) dc_stream_get_status(dm_new_crtc_state->stream); if (!status) - status = dc_state_get_stream_status(dc_state, - dm_new_crtc_state->stream); + status = dc_stream_get_status_from_state(dc_state, + dm_new_crtc_state->stream); if (!status) DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc); @@ -5099,13 +5113,18 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) } } - /* Handle scaling, underscan, and abm changes*/ + /* Handle connector state changes */ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); + struct dc_surface_update dummy_updates[MAX_SURFACES]; + struct dc_stream_update stream_update; struct dc_stream_status *status = NULL; + memset(&dummy_updates, 0, sizeof(dummy_updates)); + memset(&stream_update, 0, sizeof(stream_update)); + if (acrtc) { new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); @@ -5115,37 +5134,48 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state)) continue; - dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); - /* Skip anything that is not scaling or underscan changes */ if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state) && (dm_new_crtc_state->abm_level == dm_old_crtc_state->abm_level)) continue; - update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode, - dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream); + if (is_scaling_state_different(dm_new_con_state, dm_old_con_state)) { + update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode, + dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream); - if (!dm_new_crtc_state->stream) - continue; + stream_update.src = dm_new_crtc_state->stream->src; + stream_update.dst = dm_new_crtc_state->stream->dst; + } + + if (dm_new_crtc_state->abm_level != dm_old_crtc_state->abm_level) { + dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level; + + stream_update.abm_level = &dm_new_crtc_state->abm_level; + } status = dc_stream_get_status(dm_new_crtc_state->stream); WARN_ON(!status); WARN_ON(!status->plane_count); - dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level; + /* + * TODO: DC refuses to perform stream updates without a dc_surface_update. + * Here we create an empty update on each plane. + * To fix this, DC should permit updating only stream properties. + */ + for (j = 0; j < status->plane_count; j++) + dummy_updates[j].surface = status->plane_states[0]; - /*TODO How it works with MPO ?*/ - if (!commit_planes_to_stream( - dm, - dm->dc, - status->plane_states, - status->plane_count, - dm_new_crtc_state, - to_dm_crtc_state(old_crtc_state), - dc_state)) - dm_error("%s: Failed to update stream scaling!\n", __func__); + + mutex_lock(&dm->dc_lock); + dc_commit_updates_for_stream(dm->dc, + dummy_updates, + status->plane_count, + dm_new_crtc_state->stream, + &stream_update, + dc_state); + mutex_unlock(&dm->dc_lock); } for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, @@ -5170,6 +5200,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) continue; manage_dm_interrupts(adev, acrtc, true); + +#ifdef CONFIG_DEBUG_FS + /* The stream has changed so CRC capture needs to re-enabled. */ + if (dm_new_crtc_state->crc_enabled) + amdgpu_dm_crtc_set_crc_source(crtc, "auto"); +#endif } /* update planes when needed per crtc*/ @@ -5196,18 +5232,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) } spin_unlock_irqrestore(&adev->ddev->event_lock, flags); + /* Signal HW programming completion */ + drm_atomic_helper_commit_hw_done(state); if (wait_for_vblank) drm_atomic_helper_wait_for_flip_done(dev, state); - /* - * FIXME: - * Delay hw_done() until flip_done() is signaled. This is to block - * another commit from freeing the CRTC state while we're still - * waiting on flip_done. - */ - drm_atomic_helper_commit_hw_done(state); - drm_atomic_helper_cleanup_planes(dev, state); /* @@ -5371,10 +5401,13 @@ static void get_freesync_config_for_crtc( struct mod_freesync_config config = {0}; struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(new_con_state->base.connector); + struct drm_display_mode *mode = &new_crtc_state->base.mode; - new_crtc_state->vrr_supported = new_con_state->freesync_capable; + new_crtc_state->vrr_supported = new_con_state->freesync_capable && + aconnector->min_vfreq <= drm_mode_vrefresh(mode); - if (new_con_state->freesync_capable) { + if (new_crtc_state->vrr_supported) { + new_crtc_state->stream->ignore_msa_timing_param = true; config.state = new_crtc_state->base.vrr_enabled ? VRR_STATE_ACTIVE_VARIABLE : VRR_STATE_INACTIVE; @@ -5400,15 +5433,15 @@ static void reset_freesync_config_for_crtc( sizeof(new_crtc_state->vrr_infopacket)); } -static int dm_update_crtcs_state(struct amdgpu_display_manager *dm, - struct drm_atomic_state *state, - bool enable, - bool *lock_and_validation_needed) +static int dm_update_crtc_state(struct amdgpu_display_manager *dm, + struct drm_atomic_state *state, + struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state, + struct drm_crtc_state *new_crtc_state, + bool enable, + bool *lock_and_validation_needed) { struct dm_atomic_state *dm_state = NULL; - struct drm_crtc *crtc; - struct drm_crtc_state *old_crtc_state, *new_crtc_state; - int i; struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; struct dc_stream_state *new_stream; int ret = 0; @@ -5417,200 +5450,203 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm, * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set * update changed items */ - for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - struct amdgpu_crtc *acrtc = NULL; - struct amdgpu_dm_connector *aconnector = NULL; - struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL; - struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL; - struct drm_plane_state *new_plane_state = NULL; - - new_stream = NULL; + struct amdgpu_crtc *acrtc = NULL; + struct amdgpu_dm_connector *aconnector = NULL; + struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL; + struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL; + struct drm_plane_state *new_plane_state = NULL; - dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); - dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); - acrtc = to_amdgpu_crtc(crtc); + new_stream = NULL; - new_plane_state = drm_atomic_get_new_plane_state(state, new_crtc_state->crtc->primary); + dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + acrtc = to_amdgpu_crtc(crtc); - if (new_crtc_state->enable && new_plane_state && !new_plane_state->fb) { - ret = -EINVAL; - goto fail; - } + new_plane_state = drm_atomic_get_new_plane_state(state, new_crtc_state->crtc->primary); - aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); + if (new_crtc_state->enable && new_plane_state && !new_plane_state->fb) { + ret = -EINVAL; + goto fail; + } - /* TODO This hack should go away */ - if (aconnector && enable) { - /* Make sure fake sink is created in plug-in scenario */ - drm_new_conn_state = drm_atomic_get_new_connector_state(state, - &aconnector->base); - drm_old_conn_state = drm_atomic_get_old_connector_state(state, - &aconnector->base); + aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); - if (IS_ERR(drm_new_conn_state)) { - ret = PTR_ERR_OR_ZERO(drm_new_conn_state); - break; - } + /* TODO This hack should go away */ + if (aconnector && enable) { + /* Make sure fake sink is created in plug-in scenario */ + drm_new_conn_state = drm_atomic_get_new_connector_state(state, + &aconnector->base); + drm_old_conn_state = drm_atomic_get_old_connector_state(state, + &aconnector->base); - dm_new_conn_state = to_dm_connector_state(drm_new_conn_state); - dm_old_conn_state = to_dm_connector_state(drm_old_conn_state); + if (IS_ERR(drm_new_conn_state)) { + ret = PTR_ERR_OR_ZERO(drm_new_conn_state); + goto fail; + } - new_stream = create_stream_for_sink(aconnector, - &new_crtc_state->mode, - dm_new_conn_state, - dm_old_crtc_state->stream); + dm_new_conn_state = to_dm_connector_state(drm_new_conn_state); + dm_old_conn_state = to_dm_connector_state(drm_old_conn_state); - /* - * we can have no stream on ACTION_SET if a display - * was disconnected during S3, in this case it is not an - * error, the OS will be updated after detection, and - * will do the right thing on next atomic commit - */ + if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) + goto skip_modeset; - if (!new_stream) { - DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n", - __func__, acrtc->base.base.id); - break; - } + new_stream = create_stream_for_sink(aconnector, + &new_crtc_state->mode, + dm_new_conn_state, + dm_old_crtc_state->stream); - dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level; + /* + * we can have no stream on ACTION_SET if a display + * was disconnected during S3, in this case it is not an + * error, the OS will be updated after detection, and + * will do the right thing on next atomic commit + */ - if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && - dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) { - new_crtc_state->mode_changed = false; - DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d", - new_crtc_state->mode_changed); - } + if (!new_stream) { + DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n", + __func__, acrtc->base.base.id); + ret = -ENOMEM; + goto fail; } - if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) - goto next_crtc; + dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level; - DRM_DEBUG_DRIVER( - "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, " - "planes_changed:%d, mode_changed:%d,active_changed:%d," - "connectors_changed:%d\n", - acrtc->crtc_id, - new_crtc_state->enable, - new_crtc_state->active, - new_crtc_state->planes_changed, - new_crtc_state->mode_changed, - new_crtc_state->active_changed, - new_crtc_state->connectors_changed); + if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && + dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) { + new_crtc_state->mode_changed = false; + DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d", + new_crtc_state->mode_changed); + } + } - /* Remove stream for any changed/disabled CRTC */ - if (!enable) { + /* mode_changed flag may get updated above, need to check again */ + if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) + goto skip_modeset; - if (!dm_old_crtc_state->stream) - goto next_crtc; + DRM_DEBUG_DRIVER( + "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, " + "planes_changed:%d, mode_changed:%d,active_changed:%d," + "connectors_changed:%d\n", + acrtc->crtc_id, + new_crtc_state->enable, + new_crtc_state->active, + new_crtc_state->planes_changed, + new_crtc_state->mode_changed, + new_crtc_state->active_changed, + new_crtc_state->connectors_changed); - ret = dm_atomic_get_state(state, &dm_state); - if (ret) - goto fail; + /* Remove stream for any changed/disabled CRTC */ + if (!enable) { - DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n", - crtc->base.id); + if (!dm_old_crtc_state->stream) + goto skip_modeset; - /* i.e. reset mode */ - if (dc_remove_stream_from_ctx( - dm->dc, - dm_state->context, - dm_old_crtc_state->stream) != DC_OK) { - ret = -EINVAL; - goto fail; - } + ret = dm_atomic_get_state(state, &dm_state); + if (ret) + goto fail; - dc_stream_release(dm_old_crtc_state->stream); - dm_new_crtc_state->stream = NULL; + DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n", + crtc->base.id); + + /* i.e. reset mode */ + if (dc_remove_stream_from_ctx( + dm->dc, + dm_state->context, + dm_old_crtc_state->stream) != DC_OK) { + ret = -EINVAL; + goto fail; + } - reset_freesync_config_for_crtc(dm_new_crtc_state); + dc_stream_release(dm_old_crtc_state->stream); + dm_new_crtc_state->stream = NULL; - *lock_and_validation_needed = true; + reset_freesync_config_for_crtc(dm_new_crtc_state); - } else {/* Add stream for any updated/enabled CRTC */ - /* - * Quick fix to prevent NULL pointer on new_stream when - * added MST connectors not found in existing crtc_state in the chained mode - * TODO: need to dig out the root cause of that - */ - if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port)) - goto next_crtc; + *lock_and_validation_needed = true; - if (modereset_required(new_crtc_state)) - goto next_crtc; + } else {/* Add stream for any updated/enabled CRTC */ + /* + * Quick fix to prevent NULL pointer on new_stream when + * added MST connectors not found in existing crtc_state in the chained mode + * TODO: need to dig out the root cause of that + */ + if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port)) + goto skip_modeset; - if (modeset_required(new_crtc_state, new_stream, - dm_old_crtc_state->stream)) { + if (modereset_required(new_crtc_state)) + goto skip_modeset; - WARN_ON(dm_new_crtc_state->stream); + if (modeset_required(new_crtc_state, new_stream, + dm_old_crtc_state->stream)) { - ret = dm_atomic_get_state(state, &dm_state); - if (ret) - goto fail; + WARN_ON(dm_new_crtc_state->stream); - dm_new_crtc_state->stream = new_stream; + ret = dm_atomic_get_state(state, &dm_state); + if (ret) + goto fail; - dc_stream_retain(new_stream); + dm_new_crtc_state->stream = new_stream; - DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n", - crtc->base.id); + dc_stream_retain(new_stream); - if (dc_add_stream_to_ctx( - dm->dc, - dm_state->context, - dm_new_crtc_state->stream) != DC_OK) { - ret = -EINVAL; - goto fail; - } + DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n", + crtc->base.id); - *lock_and_validation_needed = true; + if (dc_add_stream_to_ctx( + dm->dc, + dm_state->context, + dm_new_crtc_state->stream) != DC_OK) { + ret = -EINVAL; + goto fail; } - } -next_crtc: - /* Release extra reference */ - if (new_stream) - dc_stream_release(new_stream); + *lock_and_validation_needed = true; + } + } - /* - * We want to do dc stream updates that do not require a - * full modeset below. - */ - if (!(enable && aconnector && new_crtc_state->enable && - new_crtc_state->active)) - continue; - /* - * Given above conditions, the dc state cannot be NULL because: - * 1. We're in the process of enabling CRTCs (just been added - * to the dc context, or already is on the context) - * 2. Has a valid connector attached, and - * 3. Is currently active and enabled. - * => The dc stream state currently exists. - */ - BUG_ON(dm_new_crtc_state->stream == NULL); +skip_modeset: + /* Release extra reference */ + if (new_stream) + dc_stream_release(new_stream); - /* Scaling or underscan settings */ - if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state)) - update_stream_scaling_settings( - &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream); + /* + * We want to do dc stream updates that do not require a + * full modeset below. + */ + if (!(enable && aconnector && new_crtc_state->enable && + new_crtc_state->active)) + return 0; + /* + * Given above conditions, the dc state cannot be NULL because: + * 1. We're in the process of enabling CRTCs (just been added + * to the dc context, or already is on the context) + * 2. Has a valid connector attached, and + * 3. Is currently active and enabled. + * => The dc stream state currently exists. + */ + BUG_ON(dm_new_crtc_state->stream == NULL); - /* - * Color management settings. We also update color properties - * when a modeset is needed, to ensure it gets reprogrammed. - */ - if (dm_new_crtc_state->base.color_mgmt_changed || - drm_atomic_crtc_needs_modeset(new_crtc_state)) { - ret = amdgpu_dm_set_regamma_lut(dm_new_crtc_state); - if (ret) - goto fail; - amdgpu_dm_set_ctm(dm_new_crtc_state); - } + /* Scaling or underscan settings */ + if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state)) + update_stream_scaling_settings( + &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream); - /* Update Freesync settings. */ - get_freesync_config_for_crtc(dm_new_crtc_state, - dm_new_conn_state); + /* + * Color management settings. We also update color properties + * when a modeset is needed, to ensure it gets reprogrammed. + */ + if (dm_new_crtc_state->base.color_mgmt_changed || + drm_atomic_crtc_needs_modeset(new_crtc_state)) { + ret = amdgpu_dm_set_regamma_lut(dm_new_crtc_state); + if (ret) + goto fail; + amdgpu_dm_set_ctm(dm_new_crtc_state); } + /* Update Freesync settings. */ + get_freesync_config_for_crtc(dm_new_crtc_state, + dm_new_conn_state); + return ret; fail: @@ -5619,145 +5655,141 @@ fail: return ret; } -static int dm_update_planes_state(struct dc *dc, - struct drm_atomic_state *state, - bool enable, - bool *lock_and_validation_needed) +static int dm_update_plane_state(struct dc *dc, + struct drm_atomic_state *state, + struct drm_plane *plane, + struct drm_plane_state *old_plane_state, + struct drm_plane_state *new_plane_state, + bool enable, + bool *lock_and_validation_needed) { struct dm_atomic_state *dm_state = NULL; struct drm_crtc *new_plane_crtc, *old_plane_crtc; struct drm_crtc_state *old_crtc_state, *new_crtc_state; - struct drm_plane *plane; - struct drm_plane_state *old_plane_state, *new_plane_state; struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state; struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state; - int i ; /* TODO return page_flip_needed() function */ bool pflip_needed = !state->allow_modeset; int ret = 0; - /* Add new planes, in reverse order as DC expectation */ - for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { - new_plane_crtc = new_plane_state->crtc; - old_plane_crtc = old_plane_state->crtc; - dm_new_plane_state = to_dm_plane_state(new_plane_state); - dm_old_plane_state = to_dm_plane_state(old_plane_state); + new_plane_crtc = new_plane_state->crtc; + old_plane_crtc = old_plane_state->crtc; + dm_new_plane_state = to_dm_plane_state(new_plane_state); + dm_old_plane_state = to_dm_plane_state(old_plane_state); - /*TODO Implement atomic check for cursor plane */ - if (plane->type == DRM_PLANE_TYPE_CURSOR) - continue; + /*TODO Implement atomic check for cursor plane */ + if (plane->type == DRM_PLANE_TYPE_CURSOR) + return 0; - /* Remove any changed/removed planes */ - if (!enable) { - if (pflip_needed && - plane->type != DRM_PLANE_TYPE_OVERLAY) - continue; + /* Remove any changed/removed planes */ + if (!enable) { + if (pflip_needed && + plane->type != DRM_PLANE_TYPE_OVERLAY) + return 0; - if (!old_plane_crtc) - continue; + if (!old_plane_crtc) + return 0; - old_crtc_state = drm_atomic_get_old_crtc_state( - state, old_plane_crtc); - dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); + old_crtc_state = drm_atomic_get_old_crtc_state( + state, old_plane_crtc); + dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); - if (!dm_old_crtc_state->stream) - continue; + if (!dm_old_crtc_state->stream) + return 0; - DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n", - plane->base.id, old_plane_crtc->base.id); + DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n", + plane->base.id, old_plane_crtc->base.id); - ret = dm_atomic_get_state(state, &dm_state); - if (ret) - return ret; + ret = dm_atomic_get_state(state, &dm_state); + if (ret) + return ret; - if (!dc_remove_plane_from_context( - dc, - dm_old_crtc_state->stream, - dm_old_plane_state->dc_state, - dm_state->context)) { + if (!dc_remove_plane_from_context( + dc, + dm_old_crtc_state->stream, + dm_old_plane_state->dc_state, + dm_state->context)) { - ret = EINVAL; - return ret; - } + ret = EINVAL; + return ret; + } - dc_plane_state_release(dm_old_plane_state->dc_state); - dm_new_plane_state->dc_state = NULL; + dc_plane_state_release(dm_old_plane_state->dc_state); + dm_new_plane_state->dc_state = NULL; - *lock_and_validation_needed = true; + *lock_and_validation_needed = true; - } else { /* Add new planes */ - struct dc_plane_state *dc_new_plane_state; + } else { /* Add new planes */ + struct dc_plane_state *dc_new_plane_state; - if (drm_atomic_plane_disabling(plane->state, new_plane_state)) - continue; + if (drm_atomic_plane_disabling(plane->state, new_plane_state)) + return 0; - if (!new_plane_crtc) - continue; + if (!new_plane_crtc) + return 0; - new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc); - dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc); + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); - if (!dm_new_crtc_state->stream) - continue; + if (!dm_new_crtc_state->stream) + return 0; - if (pflip_needed && - plane->type != DRM_PLANE_TYPE_OVERLAY) - continue; + if (pflip_needed && plane->type != DRM_PLANE_TYPE_OVERLAY) + return 0; - WARN_ON(dm_new_plane_state->dc_state); + WARN_ON(dm_new_plane_state->dc_state); - dc_new_plane_state = dc_create_plane_state(dc); - if (!dc_new_plane_state) - return -ENOMEM; + dc_new_plane_state = dc_create_plane_state(dc); + if (!dc_new_plane_state) + return -ENOMEM; - DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n", - plane->base.id, new_plane_crtc->base.id); + DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n", + plane->base.id, new_plane_crtc->base.id); - ret = fill_plane_attributes( - new_plane_crtc->dev->dev_private, - dc_new_plane_state, - new_plane_state, - new_crtc_state); - if (ret) { - dc_plane_state_release(dc_new_plane_state); - return ret; - } + ret = fill_plane_attributes( + new_plane_crtc->dev->dev_private, + dc_new_plane_state, + new_plane_state, + new_crtc_state); + if (ret) { + dc_plane_state_release(dc_new_plane_state); + return ret; + } - ret = dm_atomic_get_state(state, &dm_state); - if (ret) { - dc_plane_state_release(dc_new_plane_state); - return ret; - } + ret = dm_atomic_get_state(state, &dm_state); + if (ret) { + dc_plane_state_release(dc_new_plane_state); + return ret; + } - /* - * Any atomic check errors that occur after this will - * not need a release. The plane state will be attached - * to the stream, and therefore part of the atomic - * state. It'll be released when the atomic state is - * cleaned. - */ - if (!dc_add_plane_to_context( - dc, - dm_new_crtc_state->stream, - dc_new_plane_state, - dm_state->context)) { - - dc_plane_state_release(dc_new_plane_state); - return -EINVAL; - } + /* + * Any atomic check errors that occur after this will + * not need a release. The plane state will be attached + * to the stream, and therefore part of the atomic + * state. It'll be released when the atomic state is + * cleaned. + */ + if (!dc_add_plane_to_context( + dc, + dm_new_crtc_state->stream, + dc_new_plane_state, + dm_state->context)) { - dm_new_plane_state->dc_state = dc_new_plane_state; + dc_plane_state_release(dc_new_plane_state); + return -EINVAL; + } - /* Tell DC to do a full surface update every time there - * is a plane change. Inefficient, but works for now. - */ - dm_new_plane_state->dc_state->update_flags.bits.full_update = 1; + dm_new_plane_state->dc_state = dc_new_plane_state; - *lock_and_validation_needed = true; - } + /* Tell DC to do a full surface update every time there + * is a plane change. Inefficient, but works for now. + */ + dm_new_plane_state->dc_state->update_flags.bits.full_update = 1; + + *lock_and_validation_needed = true; } @@ -5781,11 +5813,13 @@ dm_determine_update_type_for_commit(struct dc *dc, struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state; struct dc_stream_status *status = NULL; - struct dc_surface_update *updates = kzalloc(MAX_SURFACES * sizeof(struct dc_surface_update), GFP_KERNEL); - struct dc_plane_state *surface = kzalloc(MAX_SURFACES * sizeof(struct dc_plane_state), GFP_KERNEL); - struct dc_stream_update stream_update; + struct dc_surface_update *updates; + struct dc_plane_state *surface; enum surface_update_type update_type = UPDATE_TYPE_FAST; + updates = kcalloc(MAX_SURFACES, sizeof(*updates), GFP_KERNEL); + surface = kcalloc(MAX_SURFACES, sizeof(*surface), GFP_KERNEL); + if (!updates || !surface) { DRM_ERROR("Plane or surface update failed to allocate"); /* Set type to FULL to avoid crashing in DC*/ @@ -5794,79 +5828,89 @@ dm_determine_update_type_for_commit(struct dc *dc, } for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + struct dc_stream_update stream_update = { 0 }; + new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); old_dm_crtc_state = to_dm_crtc_state(old_crtc_state); num_plane = 0; - if (new_dm_crtc_state->stream) { - - for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) { - new_plane_crtc = new_plane_state->crtc; - old_plane_crtc = old_plane_state->crtc; - new_dm_plane_state = to_dm_plane_state(new_plane_state); - old_dm_plane_state = to_dm_plane_state(old_plane_state); - - if (plane->type == DRM_PLANE_TYPE_CURSOR) - continue; - - if (!state->allow_modeset) - continue; - - if (crtc == new_plane_crtc) { - updates[num_plane].surface = &surface[num_plane]; - - if (new_crtc_state->mode_changed) { - updates[num_plane].surface->src_rect = - new_dm_plane_state->dc_state->src_rect; - updates[num_plane].surface->dst_rect = - new_dm_plane_state->dc_state->dst_rect; - updates[num_plane].surface->rotation = - new_dm_plane_state->dc_state->rotation; - updates[num_plane].surface->in_transfer_func = - new_dm_plane_state->dc_state->in_transfer_func; - stream_update.dst = new_dm_crtc_state->stream->dst; - stream_update.src = new_dm_crtc_state->stream->src; - } - - if (new_crtc_state->color_mgmt_changed) { - updates[num_plane].gamma = - new_dm_plane_state->dc_state->gamma_correction; - updates[num_plane].in_transfer_func = - new_dm_plane_state->dc_state->in_transfer_func; - stream_update.gamut_remap = - &new_dm_crtc_state->stream->gamut_remap_matrix; - stream_update.out_transfer_func = - new_dm_crtc_state->stream->out_transfer_func; - } - - num_plane++; - } - } + if (new_dm_crtc_state->stream != old_dm_crtc_state->stream) { + update_type = UPDATE_TYPE_FULL; + goto cleanup; + } - if (num_plane > 0) { - ret = dm_atomic_get_state(state, &dm_state); - if (ret) - goto cleanup; + if (!new_dm_crtc_state->stream) + continue; - old_dm_state = dm_atomic_get_old_state(state); - if (!old_dm_state) { - ret = -EINVAL; - goto cleanup; - } + for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) { + new_plane_crtc = new_plane_state->crtc; + old_plane_crtc = old_plane_state->crtc; + new_dm_plane_state = to_dm_plane_state(new_plane_state); + old_dm_plane_state = to_dm_plane_state(old_plane_state); + + if (plane->type == DRM_PLANE_TYPE_CURSOR) + continue; - status = dc_state_get_stream_status(old_dm_state->context, - new_dm_crtc_state->stream); + if (new_dm_plane_state->dc_state != old_dm_plane_state->dc_state) { + update_type = UPDATE_TYPE_FULL; + goto cleanup; + } - update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane, - &stream_update, status); + if (!state->allow_modeset) + continue; - if (update_type > UPDATE_TYPE_MED) { - update_type = UPDATE_TYPE_FULL; - goto cleanup; - } + if (crtc != new_plane_crtc) + continue; + + updates[num_plane].surface = &surface[num_plane]; + + if (new_crtc_state->mode_changed) { + updates[num_plane].surface->src_rect = + new_dm_plane_state->dc_state->src_rect; + updates[num_plane].surface->dst_rect = + new_dm_plane_state->dc_state->dst_rect; + updates[num_plane].surface->rotation = + new_dm_plane_state->dc_state->rotation; + updates[num_plane].surface->in_transfer_func = + new_dm_plane_state->dc_state->in_transfer_func; + stream_update.dst = new_dm_crtc_state->stream->dst; + stream_update.src = new_dm_crtc_state->stream->src; } - } else if (!new_dm_crtc_state->stream && old_dm_crtc_state->stream) { + if (new_crtc_state->color_mgmt_changed) { + updates[num_plane].gamma = + new_dm_plane_state->dc_state->gamma_correction; + updates[num_plane].in_transfer_func = + new_dm_plane_state->dc_state->in_transfer_func; + stream_update.gamut_remap = + &new_dm_crtc_state->stream->gamut_remap_matrix; + stream_update.out_transfer_func = + new_dm_crtc_state->stream->out_transfer_func; + } + + num_plane++; + } + + if (num_plane == 0) + continue; + + ret = dm_atomic_get_state(state, &dm_state); + if (ret) + goto cleanup; + + old_dm_state = dm_atomic_get_old_state(state); + if (!old_dm_state) { + ret = -EINVAL; + goto cleanup; + } + + status = dc_stream_get_status_from_state(old_dm_state->context, + new_dm_crtc_state->stream); + + update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane, + &stream_update, status); + + if (update_type > UPDATE_TYPE_MED) { update_type = UPDATE_TYPE_FULL; goto cleanup; } @@ -5915,6 +5959,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, struct drm_connector_state *old_con_state, *new_con_state; struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct drm_plane *plane; + struct drm_plane_state *old_plane_state, *new_plane_state; enum surface_update_type update_type = UPDATE_TYPE_FAST; enum surface_update_type overall_update_type = UPDATE_TYPE_FAST; @@ -5948,28 +5994,84 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, goto fail; } + /* + * Add all primary and overlay planes on the CRTC to the state + * whenever a plane is enabled to maintain correct z-ordering + * and to enable fast surface updates. + */ + drm_for_each_crtc(crtc, dev) { + bool modified = false; + + for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { + if (plane->type == DRM_PLANE_TYPE_CURSOR) + continue; + + if (new_plane_state->crtc == crtc || + old_plane_state->crtc == crtc) { + modified = true; + break; + } + } + + if (!modified) + continue; + + drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) { + if (plane->type == DRM_PLANE_TYPE_CURSOR) + continue; + + new_plane_state = + drm_atomic_get_plane_state(state, plane); + + if (IS_ERR(new_plane_state)) { + ret = PTR_ERR(new_plane_state); + goto fail; + } + } + } + /* Remove exiting planes if they are modified */ - ret = dm_update_planes_state(dc, state, false, &lock_and_validation_needed); - if (ret) { - goto fail; + for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { + ret = dm_update_plane_state(dc, state, plane, + old_plane_state, + new_plane_state, + false, + &lock_and_validation_needed); + if (ret) + goto fail; } /* Disable all crtcs which require disable */ - ret = dm_update_crtcs_state(&adev->dm, state, false, &lock_and_validation_needed); - if (ret) { - goto fail; + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + ret = dm_update_crtc_state(&adev->dm, state, crtc, + old_crtc_state, + new_crtc_state, + false, + &lock_and_validation_needed); + if (ret) + goto fail; } /* Enable all crtcs which require enable */ - ret = dm_update_crtcs_state(&adev->dm, state, true, &lock_and_validation_needed); - if (ret) { - goto fail; + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + ret = dm_update_crtc_state(&adev->dm, state, crtc, + old_crtc_state, + new_crtc_state, + true, + &lock_and_validation_needed); + if (ret) + goto fail; } /* Add new/modified planes */ - ret = dm_update_planes_state(dc, state, true, &lock_and_validation_needed); - if (ret) { - goto fail; + for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { + ret = dm_update_plane_state(dc, state, plane, + old_plane_state, + new_plane_state, + true, + &lock_and_validation_needed); + if (ret) + goto fail; } /* Run this here since we want to validate the streams we created */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c index f088ac585978..a10e3a50d9ef 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c @@ -64,8 +64,10 @@ amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name, int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) { + struct amdgpu_device *adev = crtc->dev->dev_private; struct dm_crtc_state *crtc_state = to_dm_crtc_state(crtc->state); struct dc_stream_state *stream_state = crtc_state->stream; + bool enable; enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name); @@ -80,29 +82,33 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) return -EINVAL; } - /* When enabling CRC, we should also disable dithering. */ - if (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO) { - if (dc_stream_configure_crc(stream_state->ctx->dc, - stream_state, - true, true)) { - crtc_state->crc_enabled = true; - dc_stream_set_dither_option(stream_state, - DITHER_OPTION_TRUN8); - } - else - return -EINVAL; - } else { - if (dc_stream_configure_crc(stream_state->ctx->dc, - stream_state, - false, false)) { - crtc_state->crc_enabled = false; - dc_stream_set_dither_option(stream_state, - DITHER_OPTION_DEFAULT); - } - else - return -EINVAL; + enable = (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO); + + mutex_lock(&adev->dm.dc_lock); + if (!dc_stream_configure_crc(stream_state->ctx->dc, stream_state, + enable, enable)) { + mutex_unlock(&adev->dm.dc_lock); + return -EINVAL; } + /* When enabling CRC, we should also disable dithering. */ + dc_stream_set_dither_option(stream_state, + enable ? DITHER_OPTION_TRUN8 + : DITHER_OPTION_DEFAULT); + + mutex_unlock(&adev->dm.dc_lock); + + /* + * Reading the CRC requires the vblank interrupt handler to be + * enabled. Keep a reference until CRC capture stops. + */ + if (!crtc_state->crc_enabled && enable) + drm_crtc_vblank_get(crtc); + else if (crtc_state->crc_enabled && !enable) + drm_crtc_vblank_put(crtc); + + crtc_state->crc_enabled = enable; + /* Reset crc_skipped on dm state */ crtc_state->crc_skip_count = 0; return 0; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 9a7ac58eb18e..4a55cde027cf 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -671,6 +671,25 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us return bytes_from_user; } +/* + * Returns the min and max vrr vfreq through the connector's debugfs file. + * Example usage: cat /sys/kernel/debug/dri/0/DP-1/vrr_range + */ +static int vrr_range_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + + if (connector->status != connector_status_connected) + return -ENODEV; + + seq_printf(m, "Min: %u\n", (unsigned int)aconnector->min_vfreq); + seq_printf(m, "Max: %u\n", (unsigned int)aconnector->max_vfreq); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(vrr_range); + static const struct file_operations dp_link_settings_debugfs_fops = { .owner = THIS_MODULE, .read = dp_link_settings_read, @@ -697,7 +716,8 @@ static const struct { } dp_debugfs_entries[] = { {"link_settings", &dp_link_settings_debugfs_fops}, {"phy_settings", &dp_phy_settings_debugfs_fop}, - {"test_pattern", &dp_phy_test_pattern_fops} + {"test_pattern", &dp_phy_test_pattern_fops}, + {"vrr_range", &vrr_range_fops} }; int connector_debugfs_init(struct amdgpu_dm_connector *connector) @@ -783,6 +803,45 @@ static ssize_t dtn_log_write( return size; } +/* + * Backlight at this moment. Read only. + * As written to display, taking ABM and backlight lut into account. + * Ranges from 0x0 to 0x10000 (= 100% PWM) + */ +static int current_backlight_read(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *)m->private; + struct drm_device *dev = node->minor->dev; + struct amdgpu_device *adev = dev->dev_private; + struct dc *dc = adev->dm.dc; + unsigned int backlight = dc_get_current_backlight_pwm(dc); + + seq_printf(m, "0x%x\n", backlight); + return 0; +} + +/* + * Backlight value that is being approached. Read only. + * As written to display, taking ABM and backlight lut into account. + * Ranges from 0x0 to 0x10000 (= 100% PWM) + */ +static int target_backlight_read(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *)m->private; + struct drm_device *dev = node->minor->dev; + struct amdgpu_device *adev = dev->dev_private; + struct dc *dc = adev->dm.dc; + unsigned int backlight = dc_get_target_backlight_pwm(dc); + + seq_printf(m, "0x%x\n", backlight); + return 0; +} + +static const struct drm_info_list amdgpu_dm_debugfs_list[] = { + {"amdgpu_current_backlight_pwm", ¤t_backlight_read}, + {"amdgpu_target_backlight_pwm", &target_backlight_read}, +}; + int dtn_debugfs_init(struct amdgpu_device *adev) { static const struct file_operations dtn_log_fops = { @@ -793,9 +852,15 @@ int dtn_debugfs_init(struct amdgpu_device *adev) }; struct drm_minor *minor = adev->ddev->primary; - struct dentry *root = minor->debugfs_root; + struct dentry *ent, *root = minor->debugfs_root; + int ret; + + ret = amdgpu_debugfs_add_files(adev, amdgpu_dm_debugfs_list, + ARRAY_SIZE(amdgpu_dm_debugfs_list)); + if (ret) + return ret; - struct dentry *ent = debugfs_create_file( + ent = debugfs_create_file( "amdgpu_dm_dtn_log", 0644, root, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 39997d977efb..b39766bd2840 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -29,7 +29,7 @@ #include #include -#include +#include #include #include @@ -192,7 +192,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( int bpp = 0; int pbn = 0; - aconnector = stream->sink->priv; + aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; if (!aconnector || !aconnector->mst_port) return false; @@ -205,7 +205,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( mst_port = aconnector->port; if (enable) { - clock = stream->timing.pix_clk_khz; + clock = stream->timing.pix_clk_100hz / 10; switch (stream->timing.display_color_depth) { @@ -263,6 +263,13 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( return true; } +/* + * poll pending down reply before clear payload allocation table + */ +void dm_helpers_dp_mst_poll_pending_down_reply( + struct dc_context *ctx, + const struct dc_link *link) +{} /* * Clear payload allocation table before enable MST DP link. @@ -284,7 +291,7 @@ bool dm_helpers_dp_mst_poll_for_allocation_change_trigger( struct drm_dp_mst_topology_mgr *mst_mgr; int ret; - aconnector = stream->sink->priv; + aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; if (!aconnector || !aconnector->mst_port) return false; @@ -312,7 +319,7 @@ bool dm_helpers_dp_mst_send_payload_allocation( struct drm_dp_mst_port *mst_port; int ret; - aconnector = stream->sink->priv; + aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; if (!aconnector || !aconnector->mst_port) return false; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 1b0d209d8367..f51d52eb52e6 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -35,6 +35,8 @@ #include "dc_link_ddc.h" +#include "i2caux_interface.h" + /* #define TRACE_DPCD */ #ifdef TRACE_DPCD @@ -81,80 +83,24 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { ssize_t result = 0; - enum i2caux_transaction_action action; - enum aux_transaction_type type; + struct aux_payload payload; if (WARN_ON(msg->size > 16)) return -E2BIG; - switch (msg->request & ~DP_AUX_I2C_MOT) { - case DP_AUX_NATIVE_READ: - type = AUX_TRANSACTION_TYPE_DP; - action = I2CAUX_TRANSACTION_ACTION_DP_READ; - - result = dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service, - msg->address, - &msg->reply, - msg->buffer, - msg->size, - type, - action); - break; - case DP_AUX_NATIVE_WRITE: - type = AUX_TRANSACTION_TYPE_DP; - action = I2CAUX_TRANSACTION_ACTION_DP_WRITE; - - dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service, - msg->address, - &msg->reply, - msg->buffer, - msg->size, - type, - action); - result = msg->size; - break; - case DP_AUX_I2C_READ: - type = AUX_TRANSACTION_TYPE_I2C; - if (msg->request & DP_AUX_I2C_MOT) - action = I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT; - else - action = I2CAUX_TRANSACTION_ACTION_I2C_READ; - - result = dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service, - msg->address, - &msg->reply, - msg->buffer, - msg->size, - type, - action); - break; - case DP_AUX_I2C_WRITE: - type = AUX_TRANSACTION_TYPE_I2C; - if (msg->request & DP_AUX_I2C_MOT) - action = I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT; - else - action = I2CAUX_TRANSACTION_ACTION_I2C_WRITE; - - dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service, - msg->address, - &msg->reply, - msg->buffer, - msg->size, - type, - action); - result = msg->size; - break; - default: - return -EINVAL; - } + payload.address = msg->address; + payload.data = msg->buffer; + payload.length = msg->size; + payload.reply = &msg->reply; + payload.i2c_over_aux = (msg->request & DP_AUX_NATIVE_WRITE) == 0; + payload.write = (msg->request & DP_AUX_I2C_READ) == 0; + payload.mot = (msg->request & DP_AUX_I2C_MOT) != 0; + payload.defer_delay = 0; -#ifdef TRACE_DPCD - log_dpcd(msg->request, - msg->address, - msg->buffer, - msg->size, - r == DDC_RESULT_SUCESSFULL); -#endif + result = dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service, &payload); + + if (payload.write) + result = msg->size; if (result < 0) /* DC doesn't know about kernel error codes */ result = -EIO; @@ -191,6 +137,7 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector) drm_encoder_cleanup(&amdgpu_encoder->base); kfree(amdgpu_encoder); drm_connector_cleanup(connector); + drm_dp_mst_put_port_malloc(amdgpu_dm_connector->port); kfree(amdgpu_dm_connector); } @@ -227,6 +174,11 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) aconnector->edid = edid; } + if (aconnector->dc_sink && aconnector->dc_sink->sink_signal == SIGNAL_TYPE_VIRTUAL) { + dc_sink_release(aconnector->dc_sink); + aconnector->dc_sink = NULL; + } + if (!aconnector->dc_sink) { struct dc_sink *dc_sink; struct dc_sink_init_data init_params = { @@ -363,7 +315,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, amdgpu_dm_connector_funcs_reset(connector); DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n", - aconnector, connector->base.id, aconnector->mst_port); + aconnector, connector->base.id, aconnector->mst_port); + + drm_dp_mst_get_port_malloc(port); DRM_DEBUG_KMS(":%d\n", connector->base.id); @@ -379,12 +333,12 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n", - aconnector, connector->base.id, aconnector->mst_port); + aconnector, connector->base.id, aconnector->mst_port); - aconnector->port = NULL; if (aconnector->dc_sink) { amdgpu_dm_update_freesync_caps(connector, NULL); - dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink); + dc_link_remove_remote_sink(aconnector->dc_link, + aconnector->dc_sink); dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; } @@ -395,14 +349,6 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, drm_connector_put(connector); } -static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) -{ - struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr); - struct drm_device *dev = master->base.dev; - - drm_kms_helper_hotplug_event(dev); -} - static void dm_dp_mst_register_connector(struct drm_connector *connector) { struct drm_device *dev = connector->dev; @@ -419,7 +365,6 @@ static void dm_dp_mst_register_connector(struct drm_connector *connector) static const struct drm_dp_mst_topology_cbs dm_mst_cbs = { .add_connector = dm_dp_add_mst_connector, .destroy_connector = dm_dp_destroy_mst_connector, - .hotplug = dm_dp_mst_hotplug, .register_connector = dm_dp_mst_register_connector }; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index 9d2d6986b983..a114954d6a5b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c @@ -25,7 +25,7 @@ #include #include -#include +#include #include #include "dm_services.h" #include "amdgpu.h" @@ -559,6 +559,58 @@ void pp_rv_set_pme_wa_enable(struct pp_smu *pp) pp_funcs->notify_smu_enable_pwe(pp_handle); } +void pp_rv_set_active_display_count(struct pp_smu *pp, int count) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + void *pp_handle = adev->powerplay.pp_handle; + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs || !pp_funcs->set_active_display_count) + return; + + pp_funcs->set_active_display_count(pp_handle, count); +} + +void pp_rv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int clock) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + void *pp_handle = adev->powerplay.pp_handle; + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs || !pp_funcs->set_min_deep_sleep_dcefclk) + return; + + pp_funcs->set_min_deep_sleep_dcefclk(pp_handle, clock); +} + +void pp_rv_set_hard_min_dcefclk_by_freq(struct pp_smu *pp, int clock) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + void *pp_handle = adev->powerplay.pp_handle; + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs || !pp_funcs->set_hard_min_dcefclk_by_freq) + return; + + pp_funcs->set_hard_min_dcefclk_by_freq(pp_handle, clock); +} + +void pp_rv_set_hard_min_fclk_by_freq(struct pp_smu *pp, int mhz) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + void *pp_handle = adev->powerplay.pp_handle; + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs || !pp_funcs->set_hard_min_fclk_by_freq) + return; + + pp_funcs->set_hard_min_fclk_by_freq(pp_handle, mhz); +} + void dm_pp_get_funcs_rv( struct dc_context *ctx, struct pp_smu_funcs_rv *funcs) @@ -567,4 +619,9 @@ void dm_pp_get_funcs_rv( funcs->set_display_requirement = pp_rv_set_display_requirement; funcs->set_wm_ranges = pp_rv_set_wm_ranges; funcs->set_pme_wa_enable = pp_rv_set_pme_wa_enable; + funcs->set_display_count = pp_rv_set_active_display_count; + funcs->set_min_deep_sleep_dcfclk = pp_rv_set_min_deep_sleep_dcfclk; + funcs->set_hard_min_dcfclk_by_freq = pp_rv_set_hard_min_dcefclk_by_freq; + funcs->set_hard_min_fclk_by_freq = pp_rv_set_hard_min_fclk_by_freq; } + diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c index 516795342dd2..d915e8c8769b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c @@ -27,7 +27,7 @@ #include #include -#include +#include #include #include "dm_services.h" #include "amdgpu.h" diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile index aed538a4d1ba..b8ddb4acccdb 100644 --- a/drivers/gpu/drm/amd/display/dc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/Makefile @@ -23,7 +23,7 @@ # Makefile for Display Core (dc) component. # -DC_LIBS = basics bios calcs dce gpio i2caux irq virtual +DC_LIBS = basics bios calcs dce gpio irq virtual ifdef CONFIG_DRM_AMD_DC_DCN1_0 DC_LIBS += dcn10 dml @@ -41,7 +41,8 @@ AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LI include $(AMD_DC) DISPLAY_CORE = dc.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \ -dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o +dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o \ +dc_vm_helper.o AMD_DISPLAY_CORE = $(addprefix $(AMDDALPATH)/dc/core/,$(DISPLAY_CORE)) diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index c2ab026aee91..a4c97d32e751 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c @@ -835,18 +835,6 @@ static enum bp_result bios_parser_enable_crtc( return bp->cmd_tbl.enable_crtc(bp, id, enable); } -static enum bp_result bios_parser_crtc_source_select( - struct dc_bios *dcb, - struct bp_crtc_source_select *bp_params) -{ - struct bios_parser *bp = BP_FROM_DCB(dcb); - - if (!bp->cmd_tbl.select_crtc_source) - return BP_RESULT_FAILURE; - - return bp->cmd_tbl.select_crtc_source(bp, bp_params); -} - static enum bp_result bios_parser_enable_disp_power_gating( struct dc_bios *dcb, enum controller_id controller_id, @@ -2842,8 +2830,6 @@ static const struct dc_vbios_funcs vbios_funcs = { .program_crtc_timing = bios_parser_program_crtc_timing, /* still use. should probably retire and program directly */ - .crtc_source_select = bios_parser_crtc_source_select, /* still use. should probably retire and program directly */ - .program_display_engine_pll = bios_parser_program_display_engine_pll, .enable_disp_power_gating = bios_parser_enable_disp_power_gating, diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index c513ab6f3843..fd5266a58297 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -265,6 +265,7 @@ static struct atom_display_object_path_v2 *get_bios_object( && id.enum_id == obj_id.enum_id) return &bp->object_info_tbl.v1_4->display_path[i]; } + /* fall through */ case OBJECT_TYPE_CONNECTOR: case OBJECT_TYPE_GENERIC: /* Both Generic and Connector Object ID @@ -277,6 +278,7 @@ static struct atom_display_object_path_v2 *get_bios_object( && id.enum_id == obj_id.enum_id) return &bp->object_info_tbl.v1_4->display_path[i]; } + /* fall through */ default: return NULL; } @@ -1083,18 +1085,6 @@ static enum bp_result bios_parser_enable_crtc( return bp->cmd_tbl.enable_crtc(bp, id, enable); } -static enum bp_result bios_parser_crtc_source_select( - struct dc_bios *dcb, - struct bp_crtc_source_select *bp_params) -{ - struct bios_parser *bp = BP_FROM_DCB(dcb); - - if (!bp->cmd_tbl.select_crtc_source) - return BP_RESULT_FAILURE; - - return bp->cmd_tbl.select_crtc_source(bp, bp_params); -} - static enum bp_result bios_parser_enable_disp_power_gating( struct dc_bios *dcb, enum controller_id controller_id, @@ -1899,8 +1889,6 @@ static const struct dc_vbios_funcs vbios_funcs = { .is_accelerated_mode = bios_parser_is_accelerated_mode, - .is_active_display = bios_is_active_display, - .set_scratch_critical_state = bios_parser_set_scratch_critical_state, @@ -1917,8 +1905,6 @@ static const struct dc_vbios_funcs vbios_funcs = { .program_crtc_timing = bios_parser_program_crtc_timing, - .crtc_source_select = bios_parser_crtc_source_select, - .enable_disp_power_gating = bios_parser_enable_disp_power_gating, .bios_parser_destroy = firmware_parser_destroy, diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c index fdda8aa8e303..fce46ab54c54 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c @@ -83,101 +83,7 @@ uint32_t bios_get_vga_enabled_displays( { uint32_t active_disp = 1; - if (bios->regs->BIOS_SCRATCH_3) /*follow up with other asic, todo*/ - active_disp = REG_READ(BIOS_SCRATCH_3) & 0XFFFF; + active_disp = REG_READ(BIOS_SCRATCH_3) & 0XFFFF; return active_disp; } -bool bios_is_active_display( - struct dc_bios *bios, - enum signal_type signal, - const struct connector_device_tag_info *device_tag) -{ - uint32_t active = 0; - uint32_t connected = 0; - uint32_t bios_scratch_0 = 0; - uint32_t bios_scratch_3 = 0; - - switch (signal) { - case SIGNAL_TYPE_DVI_SINGLE_LINK: - case SIGNAL_TYPE_DVI_DUAL_LINK: - case SIGNAL_TYPE_HDMI_TYPE_A: - case SIGNAL_TYPE_DISPLAY_PORT: - case SIGNAL_TYPE_DISPLAY_PORT_MST: - { - if (device_tag->dev_id.device_type == DEVICE_TYPE_DFP) { - switch (device_tag->dev_id.enum_id) { - case 1: - { - active = ATOM_S3_DFP1_ACTIVE; - connected = 0x0008; //ATOM_DISPLAY_DFP1_CONNECT - } - break; - - case 2: - { - active = ATOM_S3_DFP2_ACTIVE; - connected = 0x0080; //ATOM_DISPLAY_DFP2_CONNECT - } - break; - - case 3: - { - active = ATOM_S3_DFP3_ACTIVE; - connected = 0x0200; //ATOM_DISPLAY_DFP3_CONNECT - } - break; - - case 4: - { - active = ATOM_S3_DFP4_ACTIVE; - connected = 0x0400; //ATOM_DISPLAY_DFP4_CONNECT - } - break; - - case 5: - { - active = ATOM_S3_DFP5_ACTIVE; - connected = 0x0800; //ATOM_DISPLAY_DFP5_CONNECT - } - break; - - case 6: - { - active = ATOM_S3_DFP6_ACTIVE; - connected = 0x0040; //ATOM_DISPLAY_DFP6_CONNECT - } - break; - - default: - break; - } - } - } - break; - - case SIGNAL_TYPE_LVDS: - case SIGNAL_TYPE_EDP: - { - active = ATOM_S3_LCD1_ACTIVE; - connected = 0x0002; //ATOM_DISPLAY_LCD1_CONNECT - } - break; - - default: - break; - } - - - if (bios->regs->BIOS_SCRATCH_0) /*follow up with other asic, todo*/ - bios_scratch_0 = REG_READ(BIOS_SCRATCH_0); - if (bios->regs->BIOS_SCRATCH_3) /*follow up with other asic, todo*/ - bios_scratch_3 = REG_READ(BIOS_SCRATCH_3); - - bios_scratch_3 &= ATOM_S3_DEVICE_ACTIVE_MASK; - if ((active & bios_scratch_3) && (connected & bios_scratch_0)) - return true; - - return false; -} - diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h index f33cac2147e3..75a29e68fb27 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h @@ -35,10 +35,6 @@ bool bios_is_accelerated_mode(struct dc_bios *bios); void bios_set_scratch_acc_mode_change(struct dc_bios *bios); void bios_set_scratch_critical_state(struct dc_bios *bios, bool state); uint32_t bios_get_vga_enabled_displays(struct dc_bios *bios); -bool bios_is_active_display( - struct dc_bios *bios, - enum signal_type signal, - const struct connector_device_tag_info *device_tag); #define GET_IMAGE(type, offset) ((type *) bios_get_image(&bp->base, offset, sizeof(type))) diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c index 2bd7cd97e00d..5815983caaf8 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c @@ -55,7 +55,6 @@ static void init_adjust_display_pll(struct bios_parser *bp); static void init_dac_encoder_control(struct bios_parser *bp); static void init_dac_output_control(struct bios_parser *bp); static void init_set_crtc_timing(struct bios_parser *bp); -static void init_select_crtc_source(struct bios_parser *bp); static void init_enable_crtc(struct bios_parser *bp); static void init_enable_crtc_mem_req(struct bios_parser *bp); static void init_external_encoder_control(struct bios_parser *bp); @@ -73,7 +72,6 @@ void dal_bios_parser_init_cmd_tbl(struct bios_parser *bp) init_dac_encoder_control(bp); init_dac_output_control(bp); init_set_crtc_timing(bp); - init_select_crtc_source(bp); init_enable_crtc(bp); init_enable_crtc_mem_req(bp); init_program_clock(bp); @@ -964,9 +962,9 @@ static enum bp_result set_pixel_clock_v3( allocation.sPCLKInput.ucPostDiv = (uint8_t)bp_params->pixel_clock_post_divider; - /* We need to convert from KHz units into 10KHz units */ + /* We need to convert from 100Hz units into 10KHz units */ allocation.sPCLKInput.usPixelClock = - cpu_to_le16((uint16_t)(bp_params->target_pixel_clock / 10)); + cpu_to_le16((uint16_t)(bp_params->target_pixel_clock_100hz / 100)); params = (PIXEL_CLOCK_PARAMETERS_V3 *)&allocation.sPCLKInput; params->ucTransmitterId = @@ -1042,9 +1040,9 @@ static enum bp_result set_pixel_clock_v5( (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom( bp_params->signal_type, false); - /* We need to convert from KHz units into 10KHz units */ + /* We need to convert from 100Hz units into 10KHz units */ clk.sPCLKInput.usPixelClock = - cpu_to_le16((uint16_t)(bp_params->target_pixel_clock / 10)); + cpu_to_le16((uint16_t)(bp_params->target_pixel_clock_100hz / 100)); if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL) clk.sPCLKInput.ucMiscInfo |= @@ -1118,9 +1116,9 @@ static enum bp_result set_pixel_clock_v6( (uint8_t) bp->cmd_helper->encoder_mode_bp_to_atom( bp_params->signal_type, false); - /* We need to convert from KHz units into 10KHz units */ + /* We need to convert from 100 Hz units into 10KHz units */ clk.sPCLKInput.ulCrtcPclkFreq.ulPixelClock = - cpu_to_le32(bp_params->target_pixel_clock / 10); + cpu_to_le32(bp_params->target_pixel_clock_100hz / 100); if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL) { clk.sPCLKInput.ucMiscInfo |= @@ -1182,8 +1180,7 @@ static enum bp_result set_pixel_clock_v7( clk.ucTransmitterID = bp->cmd_helper->encoder_id_to_atom(dal_graphics_object_id_get_encoder_id(bp_params->encoder_object_id)); clk.ucEncoderMode = (uint8_t) bp->cmd_helper->encoder_mode_bp_to_atom(bp_params->signal_type, false); - /* We need to convert from KHz units into 10KHz units */ - clk.ulPixelClock = cpu_to_le32(bp_params->target_pixel_clock * 10); + clk.ulPixelClock = cpu_to_le32(bp_params->target_pixel_clock_100hz); clk.ucDeepColorRatio = (uint8_t) bp->cmd_helper->transmitter_color_depth_to_atom(bp_params->color_depth); @@ -1896,120 +1893,6 @@ static enum bp_result set_crtc_using_dtd_timing_v3( return result; } -/******************************************************************************* - ******************************************************************************** - ** - ** SELECT CRTC SOURCE - ** - ******************************************************************************** - *******************************************************************************/ - -static enum bp_result select_crtc_source_v2( - struct bios_parser *bp, - struct bp_crtc_source_select *bp_params); -static enum bp_result select_crtc_source_v3( - struct bios_parser *bp, - struct bp_crtc_source_select *bp_params); - -static void init_select_crtc_source(struct bios_parser *bp) -{ - switch (BIOS_CMD_TABLE_PARA_REVISION(SelectCRTC_Source)) { - case 2: - bp->cmd_tbl.select_crtc_source = select_crtc_source_v2; - break; - case 3: - bp->cmd_tbl.select_crtc_source = select_crtc_source_v3; - break; - default: - dm_output_to_console("Don't select_crtc_source enable_crtc for v%d\n", - BIOS_CMD_TABLE_PARA_REVISION(SelectCRTC_Source)); - bp->cmd_tbl.select_crtc_source = NULL; - break; - } -} - -static enum bp_result select_crtc_source_v2( - struct bios_parser *bp, - struct bp_crtc_source_select *bp_params) -{ - enum bp_result result = BP_RESULT_FAILURE; - SELECT_CRTC_SOURCE_PARAMETERS_V2 params; - uint8_t atom_controller_id; - uint32_t atom_engine_id; - enum signal_type s = bp_params->signal; - - memset(¶ms, 0, sizeof(params)); - - /* set controller id */ - if (bp->cmd_helper->controller_id_to_atom( - bp_params->controller_id, &atom_controller_id)) - params.ucCRTC = atom_controller_id; - else - return BP_RESULT_FAILURE; - - /* set encoder id */ - if (bp->cmd_helper->engine_bp_to_atom( - bp_params->engine_id, &atom_engine_id)) - params.ucEncoderID = (uint8_t)atom_engine_id; - else - return BP_RESULT_FAILURE; - - if (SIGNAL_TYPE_EDP == s || - (SIGNAL_TYPE_DISPLAY_PORT == s && - SIGNAL_TYPE_LVDS == bp_params->sink_signal)) - s = SIGNAL_TYPE_LVDS; - - params.ucEncodeMode = - (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom( - s, bp_params->enable_dp_audio); - - if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params)) - result = BP_RESULT_OK; - - return result; -} - -static enum bp_result select_crtc_source_v3( - struct bios_parser *bp, - struct bp_crtc_source_select *bp_params) -{ - bool result = BP_RESULT_FAILURE; - SELECT_CRTC_SOURCE_PARAMETERS_V3 params; - uint8_t atom_controller_id; - uint32_t atom_engine_id; - enum signal_type s = bp_params->signal; - - memset(¶ms, 0, sizeof(params)); - - if (bp->cmd_helper->controller_id_to_atom(bp_params->controller_id, - &atom_controller_id)) - params.ucCRTC = atom_controller_id; - else - return result; - - if (bp->cmd_helper->engine_bp_to_atom(bp_params->engine_id, - &atom_engine_id)) - params.ucEncoderID = (uint8_t)atom_engine_id; - else - return result; - - if (SIGNAL_TYPE_EDP == s || - (SIGNAL_TYPE_DISPLAY_PORT == s && - SIGNAL_TYPE_LVDS == bp_params->sink_signal)) - s = SIGNAL_TYPE_LVDS; - - params.ucEncodeMode = - bp->cmd_helper->encoder_mode_bp_to_atom( - s, bp_params->enable_dp_audio); - /* Needed for VBIOS Random Spatial Dithering feature */ - params.ucDstBpc = (uint8_t)(bp_params->display_output_bit_depth); - - if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params)) - result = BP_RESULT_OK; - - return result; -} - /******************************************************************************* ******************************************************************************** ** @@ -2164,7 +2047,7 @@ static enum bp_result program_clock_v5( /* We need to convert from KHz units into 10KHz units */ params.sPCLKInput.ucPpll = (uint8_t) atom_pll_id; params.sPCLKInput.usPixelClock = - cpu_to_le16((uint16_t) (bp_params->target_pixel_clock / 10)); + cpu_to_le16((uint16_t) (bp_params->target_pixel_clock_100hz / 100)); params.sPCLKInput.ucCRTC = (uint8_t) ATOM_CRTC_INVALID; if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC) @@ -2196,7 +2079,7 @@ static enum bp_result program_clock_v6( /* We need to convert from KHz units into 10KHz units */ params.sPCLKInput.ucPpll = (uint8_t)atom_pll_id; params.sPCLKInput.ulDispEngClkFreq = - cpu_to_le32(bp_params->target_pixel_clock / 10); + cpu_to_le32(bp_params->target_pixel_clock_100hz / 100); if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC) params.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC; diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.h b/drivers/gpu/drm/amd/display/dc/bios/command_table.h index 94f3d43a7471..ad533775e724 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table.h +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.h @@ -71,9 +71,6 @@ struct cmd_tbl { enum bp_result (*set_crtc_timing)( struct bios_parser *bp, struct bp_hw_crtc_timing_parameters *bp_params); - enum bp_result (*select_crtc_source)( - struct bios_parser *bp, - struct bp_crtc_source_select *bp_params); enum bp_result (*enable_crtc)( struct bios_parser *bp, enum controller_id controller_id, diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c index 2b5dc499a35e..bb2e8105e6ab 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c @@ -301,17 +301,17 @@ static enum bp_result set_pixel_clock_v7( cmd_helper->encoder_mode_bp_to_atom( bp_params->signal_type, false); - /* We need to convert from KHz units into 10KHz units */ - clk.pixclk_100hz = cpu_to_le32(bp_params->target_pixel_clock * - 10); + clk.pixclk_100hz = cpu_to_le32(bp_params->target_pixel_clock_100hz); clk.deep_color_ratio = (uint8_t) bp->cmd_helper-> transmitter_color_depth_to_atom( bp_params->color_depth); - DC_LOG_BIOS("%s:program display clock = %d"\ - "colorDepth = %d\n", __func__,\ - bp_params->target_pixel_clock, bp_params->color_depth); + + DC_LOG_BIOS("%s:program display clock = %d, tg = %d, pll = %d, "\ + "colorDepth = %d\n", __func__, + bp_params->target_pixel_clock_100hz, (int)controller_id, + pll_id, bp_params->color_depth); if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL) clk.miscinfo |= PIXEL_CLOCK_V7_MISC_FORCE_PROG_PPLL; @@ -460,75 +460,6 @@ static enum bp_result set_crtc_using_dtd_timing_v3( return result; } -/****************************************************************************** - ****************************************************************************** - ** - ** SELECT CRTC SOURCE - ** - ****************************************************************************** - *****************************************************************************/ - - -static enum bp_result select_crtc_source_v3( - struct bios_parser *bp, - struct bp_crtc_source_select *bp_params); - -static void init_select_crtc_source(struct bios_parser *bp) -{ - switch (BIOS_CMD_TABLE_PARA_REVISION(selectcrtc_source)) { - case 3: - bp->cmd_tbl.select_crtc_source = select_crtc_source_v3; - break; - default: - dm_output_to_console("Don't select_crtc_source enable_crtc for v%d\n", - BIOS_CMD_TABLE_PARA_REVISION(selectcrtc_source)); - bp->cmd_tbl.select_crtc_source = NULL; - break; - } -} - - -static enum bp_result select_crtc_source_v3( - struct bios_parser *bp, - struct bp_crtc_source_select *bp_params) -{ - bool result = BP_RESULT_FAILURE; - struct select_crtc_source_parameters_v2_3 params; - uint8_t atom_controller_id; - uint32_t atom_engine_id; - enum signal_type s = bp_params->signal; - - memset(¶ms, 0, sizeof(params)); - - if (bp->cmd_helper->controller_id_to_atom(bp_params->controller_id, - &atom_controller_id)) - params.crtc_id = atom_controller_id; - else - return result; - - if (bp->cmd_helper->engine_bp_to_atom(bp_params->engine_id, - &atom_engine_id)) - params.encoder_id = (uint8_t)atom_engine_id; - else - return result; - - if (s == SIGNAL_TYPE_EDP || - (s == SIGNAL_TYPE_DISPLAY_PORT && bp_params->sink_signal == - SIGNAL_TYPE_LVDS)) - s = SIGNAL_TYPE_LVDS; - - params.encode_mode = - bp->cmd_helper->encoder_mode_bp_to_atom( - s, bp_params->enable_dp_audio); - /* Needed for VBIOS Random Spatial Dithering feature */ - params.dst_bpc = (uint8_t)(bp_params->display_output_bit_depth); - - if (EXEC_BIOS_CMD_TABLE(selectcrtc_source, params)) - result = BP_RESULT_OK; - - return result; -} - /****************************************************************************** ****************************************************************************** ** @@ -808,7 +739,6 @@ void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp) init_set_crtc_timing(bp); - init_select_crtc_source(bp); init_enable_crtc(bp); init_external_encoder_control(bp); diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h index ec1c0c9f3f1d..7a2af24dfe60 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h @@ -71,9 +71,6 @@ struct cmd_tbl { enum bp_result (*set_crtc_timing)( struct bios_parser *bp, struct bp_hw_crtc_timing_parameters *bp_params); - enum bp_result (*select_crtc_source)( - struct bios_parser *bp, - struct bp_crtc_source_select *bp_params); enum bp_result (*enable_crtc)( struct bios_parser *bp, enum controller_id controller_id, diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c index 9ebe30ba4dab..f3aa7b53d2aa 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c @@ -2792,7 +2792,7 @@ static void populate_initial_data( data->lpt_en[num_displays + 4] = false; data->h_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.h_total); data->v_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.v_total); - data->pixel_rate[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->timing.pix_clk_khz, 1000); + data->pixel_rate[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->timing.pix_clk_100hz, 10000); data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.width); data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4]; data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.height); @@ -2881,7 +2881,7 @@ static void populate_initial_data( /* Pipes without underlay after */ for (i = 0; i < pipe_count; i++) { - unsigned int pixel_clock_khz; + unsigned int pixel_clock_100hz; if (!pipe[i].stream || pipe[i].bottom_pipe) continue; @@ -2890,10 +2890,10 @@ static void populate_initial_data( data->lpt_en[num_displays + 4] = false; data->h_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.h_total); data->v_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.v_total); - pixel_clock_khz = pipe[i].stream->timing.pix_clk_khz; + pixel_clock_100hz = pipe[i].stream->timing.pix_clk_100hz; if (pipe[i].stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) - pixel_clock_khz *= 2; - data->pixel_rate[num_displays + 4] = bw_frc_to_fixed(pixel_clock_khz, 1000); + pixel_clock_100hz *= 2; + data->pixel_rate[num_displays + 4] = bw_frc_to_fixed(pixel_clock_100hz, 10000); if (pipe[i].plane_state) { data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.width); data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4]; diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c index d0fc54f8fb1c..1ef0074302c5 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c @@ -63,7 +63,7 @@ void scaler_settings_calculation(struct dcn_bw_internal_vars *v) if (v->interlace_output[k] == 1.0) { v->v_ratio[k] = 2.0 * v->v_ratio[k]; } - if ((v->underscan_output[k] == 1.0)) { + if (v->underscan_output[k] == 1.0) { v->h_ratio[k] = v->h_ratio[k] * v->under_scan_factor; v->v_ratio[k] = v->v_ratio[k] * v->under_scan_factor; } @@ -797,9 +797,40 @@ void mode_support_and_system_configuration(struct dcn_bw_internal_vars *v) else { v->maximum_vstartup = v->v_sync_plus_back_porch[k] - 1.0; } - v->line_times_for_prefetch[k] = v->maximum_vstartup - v->urgent_latency / (v->htotal[k] / v->pixel_clock[k]) - (v->time_calc + v->time_setup) / (v->htotal[k] / v->pixel_clock[k]) - (v->dst_y_after_scaler + v->dst_x_after_scaler / v->htotal[k]); - v->line_times_for_prefetch[k] =dcn_bw_floor2(4.0 * (v->line_times_for_prefetch[k] + 0.125), 1.0) / 4; - v->prefetch_bw[k] = (v->meta_pte_bytes_per_frame[k] + 2.0 * v->meta_row_bytes[k] + 2.0 * v->dpte_bytes_per_row[k] + v->prefetch_lines_y[k] * v->swath_width_yper_state[i][j][k] *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) + v->prefetch_lines_c[k] * v->swath_width_yper_state[i][j][k] / 2.0 *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0)) / (v->line_times_for_prefetch[k] * v->htotal[k] / v->pixel_clock[k]); + + do { + v->line_times_for_prefetch[k] = v->maximum_vstartup - v->urgent_latency / (v->htotal[k] / v->pixel_clock[k]) - (v->time_calc + v->time_setup) / (v->htotal[k] / v->pixel_clock[k]) - (v->dst_y_after_scaler + v->dst_x_after_scaler / v->htotal[k]); + v->line_times_for_prefetch[k] =dcn_bw_floor2(4.0 * (v->line_times_for_prefetch[k] + 0.125), 1.0) / 4; + v->prefetch_bw[k] = (v->meta_pte_bytes_per_frame[k] + 2.0 * v->meta_row_bytes[k] + 2.0 * v->dpte_bytes_per_row[k] + v->prefetch_lines_y[k] * v->swath_width_yper_state[i][j][k] *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) + v->prefetch_lines_c[k] * v->swath_width_yper_state[i][j][k] / 2.0 *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0)) / (v->line_times_for_prefetch[k] * v->htotal[k] / v->pixel_clock[k]); + + if (v->pte_enable == dcn_bw_yes && v->dcc_enable[k] == dcn_bw_yes) { + v->time_for_meta_pte_without_immediate_flip = dcn_bw_max3( + v->meta_pte_bytes_frame[k] / v->prefetch_bandwidth[k], + v->extra_latency, + v->htotal[k] / v->pixel_clock[k] / 4.0); + } else { + v->time_for_meta_pte_without_immediate_flip = v->htotal[k] / v->pixel_clock[k] / 4.0; + } + + if (v->pte_enable == dcn_bw_yes || v->dcc_enable[k] == dcn_bw_yes) { + v->time_for_meta_and_dpte_row_without_immediate_flip = dcn_bw_max3(( + v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) / v->prefetch_bandwidth[k], + v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_without_immediate_flip, + v->extra_latency); + } else { + v->time_for_meta_and_dpte_row_without_immediate_flip = dcn_bw_max2( + v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_without_immediate_flip, + v->extra_latency - v->time_for_meta_pte_with_immediate_flip); + } + + v->lines_for_meta_pte_without_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_pte_without_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4; + v->lines_for_meta_and_dpte_row_without_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_and_dpte_row_without_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4; + v->maximum_vstartup = v->maximum_vstartup - 1; + + if (v->lines_for_meta_pte_without_immediate_flip[k] < 8.0 && v->lines_for_meta_and_dpte_row_without_immediate_flip[k] < 16.0) + break; + + } while(1); } v->bw_available_for_immediate_flip = v->return_bw_per_state[i]; for (k = 0; k <= v->number_of_active_planes - 1; k++) { @@ -814,24 +845,18 @@ void mode_support_and_system_configuration(struct dcn_bw_internal_vars *v) for (k = 0; k <= v->number_of_active_planes - 1; k++) { if (v->pte_enable == dcn_bw_yes && v->dcc_enable[k] == dcn_bw_yes) { v->time_for_meta_pte_with_immediate_flip =dcn_bw_max5(v->meta_pte_bytes_per_frame[k] / v->prefetch_bw[k], v->meta_pte_bytes_per_frame[k] * v->total_immediate_flip_bytes[k] / (v->bw_available_for_immediate_flip * (v->meta_pte_bytes_per_frame[k] + v->meta_row_bytes[k] + v->dpte_bytes_per_row[k])), v->extra_latency, v->urgent_latency, v->htotal[k] / v->pixel_clock[k] / 4.0); - v->time_for_meta_pte_without_immediate_flip =dcn_bw_max3(v->meta_pte_bytes_per_frame[k] / v->prefetch_bw[k], v->extra_latency, v->htotal[k] / v->pixel_clock[k] / 4.0); } else { v->time_for_meta_pte_with_immediate_flip = v->htotal[k] / v->pixel_clock[k] / 4.0; - v->time_for_meta_pte_without_immediate_flip = v->htotal[k] / v->pixel_clock[k] / 4.0; } if (v->pte_enable == dcn_bw_yes || v->dcc_enable[k] == dcn_bw_yes) { v->time_for_meta_and_dpte_row_with_immediate_flip =dcn_bw_max5((v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) / v->prefetch_bw[k], (v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) * v->total_immediate_flip_bytes[k] / (v->bw_available_for_immediate_flip * (v->meta_pte_bytes_per_frame[k] + v->meta_row_bytes[k] + v->dpte_bytes_per_row[k])), v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_with_immediate_flip, v->extra_latency, 2.0 * v->urgent_latency); - v->time_for_meta_and_dpte_row_without_immediate_flip =dcn_bw_max3((v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) / v->prefetch_bw[k], v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_without_immediate_flip, v->extra_latency); } else { v->time_for_meta_and_dpte_row_with_immediate_flip =dcn_bw_max2(v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_with_immediate_flip, v->extra_latency - v->time_for_meta_pte_with_immediate_flip); - v->time_for_meta_and_dpte_row_without_immediate_flip =dcn_bw_max2(v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_without_immediate_flip, v->extra_latency - v->time_for_meta_pte_without_immediate_flip); } v->lines_for_meta_pte_with_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_pte_with_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4; - v->lines_for_meta_pte_without_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_pte_without_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4; v->lines_for_meta_and_dpte_row_with_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_and_dpte_row_with_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4; - v->lines_for_meta_and_dpte_row_without_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_and_dpte_row_without_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4; v->line_times_to_request_prefetch_pixel_data_with_immediate_flip = v->line_times_for_prefetch[k] - v->lines_for_meta_pte_with_immediate_flip[k] - v->lines_for_meta_and_dpte_row_with_immediate_flip[k]; v->line_times_to_request_prefetch_pixel_data_without_immediate_flip = v->line_times_for_prefetch[k] - v->lines_for_meta_pte_without_immediate_flip[k] - v->lines_for_meta_and_dpte_row_without_immediate_flip[k]; if (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip > 0.0) { diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c index 43e4a2be0fa6..12d1842079ae 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c @@ -290,41 +290,34 @@ static void pipe_ctx_to_e2e_pipe_params ( switch (pipe->plane_state->tiling_info.gfx9.swizzle) { /* for 4/8/16 high tiles */ case DC_SW_LINEAR: - input->src.is_display_sw = 1; input->src.macro_tile_size = dm_4k_tile; break; case DC_SW_4KB_S: case DC_SW_4KB_S_X: - input->src.is_display_sw = 0; input->src.macro_tile_size = dm_4k_tile; break; case DC_SW_64KB_S: case DC_SW_64KB_S_X: case DC_SW_64KB_S_T: - input->src.is_display_sw = 0; input->src.macro_tile_size = dm_64k_tile; break; case DC_SW_VAR_S: case DC_SW_VAR_S_X: - input->src.is_display_sw = 0; input->src.macro_tile_size = dm_256k_tile; break; /* For 64bpp 2 high tiles */ case DC_SW_4KB_D: case DC_SW_4KB_D_X: - input->src.is_display_sw = 1; input->src.macro_tile_size = dm_4k_tile; break; case DC_SW_64KB_D: case DC_SW_64KB_D_X: case DC_SW_64KB_D_T: - input->src.is_display_sw = 1; input->src.macro_tile_size = dm_64k_tile; break; case DC_SW_VAR_D: case DC_SW_VAR_D_X: - input->src.is_display_sw = 1; input->src.macro_tile_size = dm_256k_tile; break; @@ -423,7 +416,7 @@ static void pipe_ctx_to_e2e_pipe_params ( - pipe->stream->timing.v_addressable - pipe->stream->timing.v_border_bottom - pipe->stream->timing.v_border_top; - input->dest.pixel_rate_mhz = pipe->stream->timing.pix_clk_khz/1000.0; + input->dest.pixel_rate_mhz = pipe->stream->timing.pix_clk_100hz/10000.0; input->dest.vstartup_start = pipe->pipe_dlg_param.vstartup_start; input->dest.vupdate_offset = pipe->pipe_dlg_param.vupdate_offset; input->dest.vupdate_offset = pipe->pipe_dlg_param.vupdate_offset; @@ -670,9 +663,9 @@ static void hack_disable_optional_pipe_split(struct dcn_bw_internal_vars *v) } static void hack_force_pipe_split(struct dcn_bw_internal_vars *v, - unsigned int pixel_rate_khz) + unsigned int pixel_rate_100hz) { - float pixel_rate_mhz = pixel_rate_khz / 1000; + float pixel_rate_mhz = pixel_rate_100hz / 10000; /* * force enabling pipe split by lower dpp clock for DPM0 to just @@ -695,7 +688,7 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v, if (context->stream_count == 1 && dbg->force_single_disp_pipe_split) - hack_force_pipe_split(v, context->streams[0]->timing.pix_clk_khz); + hack_force_pipe_split(v, context->streams[0]->timing.pix_clk_100hz); } bool dcn_validate_bandwidth( @@ -852,7 +845,7 @@ bool dcn_validate_bandwidth( v->v_sync_plus_back_porch[input_idx] = pipe->stream->timing.v_total - v->vactive[input_idx] - pipe->stream->timing.v_front_porch; - v->pixel_clock[input_idx] = pipe->stream->timing.pix_clk_khz/1000.0; + v->pixel_clock[input_idx] = pipe->stream->timing.pix_clk_100hz/10000.0; if (pipe->stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) v->pixel_clock[input_idx] *= 2; if (!pipe->plane_state) { @@ -961,7 +954,7 @@ bool dcn_validate_bandwidth( v->dcc_rate[input_idx] = 1; /*TODO: Worst case? does this change?*/ v->output_format[input_idx] = pipe->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420 ? dcn_bw_420 : dcn_bw_444; - v->output[input_idx] = pipe->stream->sink->sink_signal == + v->output[input_idx] = pipe->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A ? dcn_bw_hdmi : dcn_bw_dp; v->output_deep_color[input_idx] = dcn_bw_encoder_8bpc; if (v->output[input_idx] == dcn_bw_hdmi) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 5fd52094d459..c68fbd55db3c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -384,7 +384,7 @@ void dc_stream_set_dither_option(struct dc_stream_state *stream, enum dc_dither_option option) { struct bit_depth_reduction_params params; - struct dc_link *link = stream->sink->link; + struct dc_link *link = stream->link; struct pipe_ctx *pipes = NULL; int i; @@ -451,7 +451,7 @@ bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream) pipes, stream->output_color_space, stream->csc_color_matrix.matrix, - pipes->plane_res.hubp->opp_id); + pipes->plane_res.hubp ? pipes->plane_res.hubp->opp_id : 0); ret = true; } } @@ -526,9 +526,8 @@ void dc_link_set_preferred_link_settings(struct dc *dc, for (i = 0; i < MAX_PIPES; i++) { pipe = &dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe->stream && pipe->stream->sink - && pipe->stream->sink->link) { - if (pipe->stream->sink->link == link) + if (pipe->stream && pipe->stream->link) { + if (pipe->stream->link == link) break; } } @@ -586,9 +585,6 @@ static void destruct(struct dc *dc) if (dc->ctx->gpio_service) dal_gpio_service_destroy(&dc->ctx->gpio_service); - if (dc->ctx->i2caux) - dal_i2caux_destroy(&dc->ctx->i2caux); - if (dc->ctx->created_bios) dal_bios_parser_destroy(&dc->ctx->dc_bios); @@ -625,7 +621,6 @@ static bool construct(struct dc *dc, #endif enum dce_version dc_version = DCE_VERSION_UNKNOWN; - dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL); if (!dc_dceip) { dm_error("%s: failed to create dceip\n", __func__); @@ -670,6 +665,7 @@ static bool construct(struct dc *dc, dc_ctx->dc = dc; dc_ctx->asic_id = init_params->asic_id; dc_ctx->dc_sink_id_count = 0; + dc_ctx->dc_stream_id_count = 0; dc->ctx = dc_ctx; dc->current_state = dc_create_state(); @@ -709,14 +705,6 @@ static bool construct(struct dc *dc, dc_ctx->created_bios = true; } - /* Create I2C AUX */ - dc_ctx->i2caux = dal_i2caux_create(dc_ctx); - - if (!dc_ctx->i2caux) { - ASSERT_CRITICAL(false); - goto fail; - } - dc_ctx->perf_trace = dc_perf_trace_create(); if (!dc_ctx->perf_trace) { ASSERT_CRITICAL(false); @@ -840,6 +828,11 @@ alloc_fail: return NULL; } +void dc_init_callbacks(struct dc *dc, + const struct dc_callback_init *init_params) +{ +} + void dc_destroy(struct dc **dc) { destruct(*dc); @@ -875,8 +868,9 @@ static void program_timing_sync( struct dc *dc, struct dc_state *ctx) { - int i, j; + int i, j, k; int group_index = 0; + int num_group = 0; int pipe_count = dc->res_pool->pipe_count; struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL }; @@ -913,11 +907,11 @@ static void program_timing_sync( } } - /* set first unblanked pipe as master */ + /* set first pipe with plane as master */ for (j = 0; j < group_size; j++) { struct pipe_ctx *temp; - if (pipe_set[j]->stream_res.tg->funcs->is_blanked && !pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) { + if (pipe_set[j]->plane_state) { if (j == 0) break; @@ -928,9 +922,21 @@ static void program_timing_sync( } } - /* remove any other unblanked pipes as they have already been synced */ + + for (k = 0; k < group_size; k++) { + struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream); + + status->timing_sync_info.group_id = num_group; + status->timing_sync_info.group_size = group_size; + if (k == 0) + status->timing_sync_info.master = true; + else + status->timing_sync_info.master = false; + + } + /* remove any other pipes with plane as they have already been synced */ for (j = j + 1; j < group_size; j++) { - if (pipe_set[j]->stream_res.tg->funcs->is_blanked && !pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) { + if (pipe_set[j]->plane_state) { group_size--; pipe_set[j] = pipe_set[group_size]; j--; @@ -942,6 +948,7 @@ static void program_timing_sync( dc, group_index, group_size, pipe_set); group_index++; } + num_group++; } } @@ -962,6 +969,52 @@ static bool context_changed( return false; } +bool dc_validate_seamless_boot_timing(struct dc *dc, + const struct dc_sink *sink, + struct dc_crtc_timing *crtc_timing) +{ + struct timing_generator *tg; + struct dc_link *link = sink->link; + unsigned int inst; + + /* Check for enabled DIG to identify enabled display */ + if (!link->link_enc->funcs->is_dig_enabled(link->link_enc)) + return false; + + /* Check for which front end is used by this encoder. + * Note the inst is 1 indexed, where 0 is undefined. + * Note that DIG_FE can source from different OTG but our + * current implementation always map 1-to-1, so this code makes + * the same assumption and doesn't check OTG source. + */ + inst = link->link_enc->funcs->get_dig_frontend(link->link_enc) - 1; + + /* Instance should be within the range of the pool */ + if (inst >= dc->res_pool->pipe_count) + return false; + + tg = dc->res_pool->timing_generators[inst]; + + if (!tg->funcs->is_matching_timing) + return false; + + if (!tg->funcs->is_matching_timing(tg, crtc_timing)) + return false; + + if (dc_is_dp_signal(link->connector_signal)) { + unsigned int pix_clk_100hz; + + dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz( + dc->res_pool->dp_clock_source, + inst, &pix_clk_100hz); + + if (crtc_timing->pix_clk_100hz != pix_clk_100hz) + return false; + } + + return true; +} + bool dc_enable_stereo( struct dc *dc, struct dc_state *context, @@ -1040,7 +1093,11 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c /* Program all planes within new context*/ for (i = 0; i < context->stream_count; i++) { - const struct dc_sink *sink = context->streams[i]->sink; + const struct dc_link *link = context->streams[i]->link; + struct dc_stream_status *status; + + if (context->streams[i]->apply_seamless_boot_optimization) + context->streams[i]->apply_seamless_boot_optimization = false; if (!context->streams[i]->mode_changed) continue; @@ -1065,12 +1122,15 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c } } - CONN_MSG_MODE(sink->link, "{%dx%d, %dx%d@%dKhz}", + status = dc_stream_get_status_from_state(context, context->streams[i]); + context->streams[i]->out.otg_offset = status->primary_otg_inst; + + CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}", context->streams[i]->timing.h_addressable, context->streams[i]->timing.v_addressable, context->streams[i]->timing.h_total, context->streams[i]->timing.v_total, - context->streams[i]->timing.pix_clk_khz); + context->streams[i]->timing.pix_clk_100hz / 10); } dc_enable_stereo(dc, context, dc_streams, context->stream_count); @@ -1078,6 +1138,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c /* pplib is notified if disp_num changed */ dc->hwss.optimize_bandwidth(dc, context); + for (i = 0; i < context->stream_count; i++) + context->streams[i]->mode_changed = false; + dc_release_state(dc->current_state); dc->current_state = context; @@ -1114,6 +1177,9 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc) int i; struct dc_state *context = dc->current_state; + if (dc->optimized_required == false) + return true; + post_surface_trace(dc); for (i = 0; i < dc->res_pool->pipe_count; i++) @@ -1215,6 +1281,12 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa */ update_flags->bits.bpp_change = 1; + if (u->plane_info->plane_size.grph.surface_pitch != u->surface->plane_size.grph.surface_pitch + || u->plane_info->plane_size.video.luma_pitch != u->surface->plane_size.video.luma_pitch + || u->plane_info->plane_size.video.chroma_pitch != u->surface->plane_size.video.chroma_pitch) + update_flags->bits.plane_size_change = 1; + + if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info, sizeof(union dc_tiling_info)) != 0) { update_flags->bits.swizzle_change = 1; @@ -1236,7 +1308,7 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa || update_flags->bits.output_tf_change) return UPDATE_TYPE_FULL; - return UPDATE_TYPE_MED; + return update_flags->raw ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST; } static enum surface_update_type get_scaling_info_update_type( @@ -1436,6 +1508,101 @@ static struct dc_stream_status *stream_get_status( static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL; +static void copy_surface_update_to_plane( + struct dc_plane_state *surface, + struct dc_surface_update *srf_update) +{ + if (srf_update->flip_addr) { + surface->address = srf_update->flip_addr->address; + surface->flip_immediate = + srf_update->flip_addr->flip_immediate; + surface->time.time_elapsed_in_us[surface->time.index] = + srf_update->flip_addr->flip_timestamp_in_us - + surface->time.prev_update_time_in_us; + surface->time.prev_update_time_in_us = + srf_update->flip_addr->flip_timestamp_in_us; + surface->time.index++; + if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX) + surface->time.index = 0; + } + + if (srf_update->scaling_info) { + surface->scaling_quality = + srf_update->scaling_info->scaling_quality; + surface->dst_rect = + srf_update->scaling_info->dst_rect; + surface->src_rect = + srf_update->scaling_info->src_rect; + surface->clip_rect = + srf_update->scaling_info->clip_rect; + } + + if (srf_update->plane_info) { + surface->color_space = + srf_update->plane_info->color_space; + surface->format = + srf_update->plane_info->format; + surface->plane_size = + srf_update->plane_info->plane_size; + surface->rotation = + srf_update->plane_info->rotation; + surface->horizontal_mirror = + srf_update->plane_info->horizontal_mirror; + surface->stereo_format = + srf_update->plane_info->stereo_format; + surface->tiling_info = + srf_update->plane_info->tiling_info; + surface->visible = + srf_update->plane_info->visible; + surface->per_pixel_alpha = + srf_update->plane_info->per_pixel_alpha; + surface->global_alpha = + srf_update->plane_info->global_alpha; + surface->global_alpha_value = + srf_update->plane_info->global_alpha_value; + surface->dcc = + srf_update->plane_info->dcc; + surface->sdr_white_level = + srf_update->plane_info->sdr_white_level; + } + + if (srf_update->gamma && + (surface->gamma_correction != + srf_update->gamma)) { + memcpy(&surface->gamma_correction->entries, + &srf_update->gamma->entries, + sizeof(struct dc_gamma_entries)); + surface->gamma_correction->is_identity = + srf_update->gamma->is_identity; + surface->gamma_correction->num_entries = + srf_update->gamma->num_entries; + surface->gamma_correction->type = + srf_update->gamma->type; + } + + if (srf_update->in_transfer_func && + (surface->in_transfer_func != + srf_update->in_transfer_func)) { + surface->in_transfer_func->sdr_ref_white_level = + srf_update->in_transfer_func->sdr_ref_white_level; + surface->in_transfer_func->tf = + srf_update->in_transfer_func->tf; + surface->in_transfer_func->type = + srf_update->in_transfer_func->type; + memcpy(&surface->in_transfer_func->tf_pts, + &srf_update->in_transfer_func->tf_pts, + sizeof(struct dc_transfer_func_distributed_points)); + } + + if (srf_update->input_csc_color_matrix) + surface->input_csc_color_matrix = + *srf_update->input_csc_color_matrix; + + if (srf_update->coeff_reduction_factor) + surface->coeff_reduction_factor = + *srf_update->coeff_reduction_factor; +} + static void commit_planes_do_stream_update(struct dc *dc, struct dc_stream_state *stream, struct dc_stream_update *stream_update, @@ -1459,11 +1626,13 @@ static void commit_planes_do_stream_update(struct dc *dc, stream_update->adjust->v_total_min, stream_update->adjust->v_total_max); - if (stream_update->periodic_fn_vsync_delta && - pipe_ctx->stream_res.tg->funcs->program_vline_interrupt) - pipe_ctx->stream_res.tg->funcs->program_vline_interrupt( - pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, - pipe_ctx->stream->periodic_fn_vsync_delta); + if (stream_update->periodic_interrupt0 && + dc->hwss.setup_periodic_interrupt) + dc->hwss.setup_periodic_interrupt(pipe_ctx, VLINE0); + + if (stream_update->periodic_interrupt1 && + dc->hwss.setup_periodic_interrupt) + dc->hwss.setup_periodic_interrupt(pipe_ctx, VLINE1); if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) || stream_update->vrr_infopacket || @@ -1605,7 +1774,6 @@ void dc_commit_updates_for_stream(struct dc *dc, int surface_count, struct dc_stream_state *stream, struct dc_stream_update *stream_update, - struct dc_plane_state **plane_states, struct dc_state *state) { const struct dc_stream_status *stream_status; @@ -1640,14 +1808,7 @@ void dc_commit_updates_for_stream(struct dc *dc, for (i = 0; i < surface_count; i++) { struct dc_plane_state *surface = srf_updates[i].surface; - /* TODO: On flip we don't build the state, so it still has the - * old address. Which is why we are updating the address here - */ - if (srf_updates[i].flip_addr) { - surface->address = srf_updates[i].flip_addr->address; - surface->flip_immediate = srf_updates[i].flip_addr->flip_immediate; - - } + copy_surface_update_to_plane(surface, &srf_updates[i]); if (update_type >= UPDATE_TYPE_MED) { for (j = 0; j < dc->res_pool->pipe_count; j++) { @@ -1764,6 +1925,26 @@ void dc_resume(struct dc *dc) core_link_resume(dc->links[i]); } +unsigned int dc_get_current_backlight_pwm(struct dc *dc) +{ + struct abm *abm = dc->res_pool->abm; + + if (abm) + return abm->funcs->get_current_backlight(abm); + + return 0; +} + +unsigned int dc_get_target_backlight_pwm(struct dc *dc) +{ + struct abm *abm = dc->res_pool->abm; + + if (abm) + return abm->funcs->get_target_backlight(abm); + + return 0; +} + bool dc_is_dmcu_initialized(struct dc *dc) { struct dmcu *dmcu = dc->res_pool->dmcu; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index b0265dbebd4c..7f5a947ad31d 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -43,10 +43,6 @@ #include "dpcd_defs.h" #include "dmcu.h" -#include "dce/dce_11_0_d.h" -#include "dce/dce_11_0_enum.h" -#include "dce/dce_11_0_sh_mask.h" - #define DC_LOGGER_INIT(logger) @@ -80,6 +76,12 @@ static void destruct(struct dc_link *link) { int i; + if (link->hpd_gpio != NULL) { + dal_gpio_close(link->hpd_gpio); + dal_gpio_destroy_irq(&link->hpd_gpio); + link->hpd_gpio = NULL; + } + if (link->ddc) dal_ddc_service_destroy(&link->ddc); @@ -789,7 +791,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) return false; } - sink->dongle_max_pix_clk = sink_caps.max_hdmi_pixel_clock; + sink->link->dongle_max_pix_clk = sink_caps.max_hdmi_pixel_clock; sink->converter_disable_audio = converter_disable_audio; link->local_sink = sink; @@ -935,18 +937,11 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) bool dc_link_get_hpd_state(struct dc_link *dc_link) { - struct gpio *hpd_pin; uint32_t state; - hpd_pin = get_hpd_gpio(dc_link->ctx->dc_bios, - dc_link->link_id, dc_link->ctx->gpio_service); - if (hpd_pin == NULL) - ASSERT(false); - - dal_gpio_open(hpd_pin, GPIO_MODE_INTERRUPT); - dal_gpio_get_value(hpd_pin, &state); - dal_gpio_close(hpd_pin); - dal_gpio_destroy_irq(&hpd_pin); + dal_gpio_lock_pin(dc_link->hpd_gpio); + dal_gpio_get_value(dc_link->hpd_gpio, &state); + dal_gpio_unlock_pin(dc_link->hpd_gpio); return state; } @@ -1102,7 +1097,6 @@ static bool construct( const struct link_init_data *init_params) { uint8_t i; - struct gpio *hpd_gpio = NULL; struct ddc_service_init_data ddc_service_init_data = { { 0 } }; struct dc_context *dc_ctx = init_params->ctx; struct encoder_init_data enc_init_data = { 0 }; @@ -1132,10 +1126,12 @@ static bool construct( if (link->dc->res_pool->funcs->link_init) link->dc->res_pool->funcs->link_init(link); - hpd_gpio = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service); - - if (hpd_gpio != NULL) - link->irq_source_hpd = dal_irq_get_source(hpd_gpio); + link->hpd_gpio = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service); + if (link->hpd_gpio != NULL) { + dal_gpio_open(link->hpd_gpio, GPIO_MODE_INTERRUPT); + dal_gpio_unlock_pin(link->hpd_gpio); + link->irq_source_hpd = dal_irq_get_source(link->hpd_gpio); + } switch (link->link_id.id) { case CONNECTOR_ID_HDMI_TYPE_A: @@ -1153,18 +1149,18 @@ static bool construct( case CONNECTOR_ID_DISPLAY_PORT: link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT; - if (hpd_gpio != NULL) + if (link->hpd_gpio != NULL) link->irq_source_hpd_rx = - dal_irq_get_rx_source(hpd_gpio); + dal_irq_get_rx_source(link->hpd_gpio); break; case CONNECTOR_ID_EDP: link->connector_signal = SIGNAL_TYPE_EDP; - if (hpd_gpio != NULL) { + if (link->hpd_gpio != NULL) { link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; link->irq_source_hpd_rx = - dal_irq_get_rx_source(hpd_gpio); + dal_irq_get_rx_source(link->hpd_gpio); } break; case CONNECTOR_ID_LVDS: @@ -1175,10 +1171,7 @@ static bool construct( goto create_fail; } - if (hpd_gpio != NULL) { - dal_gpio_destroy_irq(&hpd_gpio); - hpd_gpio = NULL; - } + /* TODO: #DAL3 Implement id to str function.*/ LINK_INFO("Connector[%d] description:" @@ -1281,8 +1274,9 @@ link_enc_create_fail: ddc_create_fail: create_fail: - if (hpd_gpio != NULL) { - dal_gpio_destroy_irq(&hpd_gpio); + if (link->hpd_gpio != NULL) { + dal_gpio_destroy_irq(&link->hpd_gpio); + link->hpd_gpio = NULL; } return false; @@ -1372,7 +1366,7 @@ static void dpcd_configure_panel_mode( static void enable_stream_features(struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; - struct dc_link *link = stream->sink->link; + struct dc_link *link = stream->link; union down_spread_ctrl old_downspread; union down_spread_ctrl new_downspread; @@ -1397,7 +1391,7 @@ static enum dc_status enable_link_dp( struct dc_stream_state *stream = pipe_ctx->stream; enum dc_status status; bool skip_video_pattern; - struct dc_link *link = stream->sink->link; + struct dc_link *link = stream->link; struct dc_link_settings link_settings = {0}; enum dp_panel_mode panel_mode; @@ -1414,8 +1408,8 @@ static enum dc_status enable_link_dp( pipe_ctx->clock_source->id, &link_settings); - if (stream->sink->edid_caps.panel_patch.dppowerup_delay > 0) { - int delay_dp_power_up_in_ms = stream->sink->edid_caps.panel_patch.dppowerup_delay; + if (stream->sink_patches.dppowerup_delay > 0) { + int delay_dp_power_up_in_ms = stream->sink_patches.dppowerup_delay; msleep(delay_dp_power_up_in_ms); } @@ -1448,7 +1442,7 @@ static enum dc_status enable_link_edp( { enum dc_status status; struct dc_stream_state *stream = pipe_ctx->stream; - struct dc_link *link = stream->sink->link; + struct dc_link *link = stream->link; /*in case it is not on*/ link->dc->hwss.edp_power_control(link, true); link->dc->hwss.edp_wait_for_hpd_ready(link, true); @@ -1463,7 +1457,7 @@ static enum dc_status enable_link_dp_mst( struct dc_state *state, struct pipe_ctx *pipe_ctx) { - struct dc_link *link = pipe_ctx->stream->sink->link; + struct dc_link *link = pipe_ctx->stream->link; /* sink signal type after MST branch is MST. Multiple MST sinks * share one link. Link DP PHY is enable or training only once. @@ -1471,6 +1465,11 @@ static enum dc_status enable_link_dp_mst( if (link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) return DC_OK; + /* to make sure the pending down rep can be processed + * before clear payload table + */ + dm_helpers_dp_mst_poll_pending_down_reply(link->ctx, link); + /* clear payload table */ dm_helpers_dp_mst_clear_payload_allocation_table(link->ctx, link); @@ -1597,7 +1596,7 @@ static bool i2c_write(struct pipe_ctx *pipe_ctx, cmd.payloads = &payload; if (dm_helpers_submit_i2c(pipe_ctx->stream->ctx, - pipe_ctx->stream->sink->link, &cmd)) + pipe_ctx->stream->link, &cmd)) return true; return false; @@ -1651,7 +1650,7 @@ static void write_i2c_retimer_setting( else { i2c_success = dal_ddc_service_query_ddc_data( - pipe_ctx->stream->sink->link->ddc, + pipe_ctx->stream->link->ddc, slave_address, &offset, 1, &value, 1); if (!i2c_success) /* Write failure */ @@ -1704,7 +1703,7 @@ static void write_i2c_retimer_setting( else { i2c_success = dal_ddc_service_query_ddc_data( - pipe_ctx->stream->sink->link->ddc, + pipe_ctx->stream->link->ddc, slave_address, &offset, 1, &value, 1); if (!i2c_success) /* Write failure */ @@ -1929,7 +1928,7 @@ static void write_i2c_redriver_setting( static void enable_link_hdmi(struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; - struct dc_link *link = stream->sink->link; + struct dc_link *link = stream->link; enum dc_color_depth display_color_depth; enum engine_id eng_id; struct ext_hdmi_settings settings = {0}; @@ -1938,12 +1937,12 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx) && (stream->timing.v_addressable == 480); if (stream->phy_pix_clk == 0) - stream->phy_pix_clk = stream->timing.pix_clk_khz; + stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10; if (stream->phy_pix_clk > 340000) is_over_340mhz = true; if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) { - unsigned short masked_chip_caps = pipe_ctx->stream->sink->link->chip_caps & + unsigned short masked_chip_caps = pipe_ctx->stream->link->chip_caps & EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK; if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) { /* DP159, Retimer settings */ @@ -1964,11 +1963,11 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx) if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) dal_ddc_service_write_scdc_data( - stream->sink->link->ddc, + stream->link->ddc, stream->phy_pix_clk, stream->timing.flags.LTE_340MCSC_SCRAMBLE); - memset(&stream->sink->link->cur_link_settings, 0, + memset(&stream->link->cur_link_settings, 0, sizeof(struct dc_link_settings)); display_color_depth = stream->timing.display_color_depth; @@ -1989,12 +1988,12 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx) static void enable_link_lvds(struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; - struct dc_link *link = stream->sink->link; + struct dc_link *link = stream->link; if (stream->phy_pix_clk == 0) - stream->phy_pix_clk = stream->timing.pix_clk_khz; + stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10; - memset(&stream->sink->link->cur_link_settings, 0, + memset(&stream->link->cur_link_settings, 0, sizeof(struct dc_link_settings)); link->link_enc->funcs->enable_lvds_output( @@ -2067,7 +2066,7 @@ static bool dp_active_dongle_validate_timing( const struct dc_crtc_timing *timing, const struct dpcd_caps *dpcd_caps) { - unsigned int required_pix_clk = timing->pix_clk_khz; + unsigned int required_pix_clk_100hz = timing->pix_clk_100hz; const struct dc_dongle_caps *dongle_caps = &dpcd_caps->dongle_caps; switch (dpcd_caps->dongle_type) { @@ -2107,9 +2106,9 @@ static bool dp_active_dongle_validate_timing( /* Check Color Depth and Pixel Clock */ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) - required_pix_clk /= 2; + required_pix_clk_100hz /= 2; else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) - required_pix_clk = required_pix_clk * 2 / 3; + required_pix_clk_100hz = required_pix_clk_100hz * 2 / 3; switch (timing->display_color_depth) { case COLOR_DEPTH_666: @@ -2119,12 +2118,12 @@ static bool dp_active_dongle_validate_timing( case COLOR_DEPTH_101010: if (dongle_caps->dp_hdmi_max_bpc < 10) return false; - required_pix_clk = required_pix_clk * 10 / 8; + required_pix_clk_100hz = required_pix_clk_100hz * 10 / 8; break; case COLOR_DEPTH_121212: if (dongle_caps->dp_hdmi_max_bpc < 12) return false; - required_pix_clk = required_pix_clk * 12 / 8; + required_pix_clk_100hz = required_pix_clk_100hz * 12 / 8; break; case COLOR_DEPTH_141414: @@ -2134,7 +2133,7 @@ static bool dp_active_dongle_validate_timing( return false; } - if (required_pix_clk > dongle_caps->dp_hdmi_max_pixel_clk) + if (required_pix_clk_100hz > (dongle_caps->dp_hdmi_max_pixel_clk * 10)) return false; return true; @@ -2145,7 +2144,7 @@ enum dc_status dc_link_validate_mode_timing( struct dc_link *link, const struct dc_crtc_timing *timing) { - uint32_t max_pix_clk = stream->sink->dongle_max_pix_clk; + uint32_t max_pix_clk = stream->link->dongle_max_pix_clk * 10; struct dpcd_caps *dpcd_caps = &link->dpcd_caps; /* A hack to avoid failing any modes for EDID override feature on @@ -2155,7 +2154,7 @@ enum dc_status dc_link_validate_mode_timing( return DC_OK; /* Passive Dongle */ - if (0 != max_pix_clk && timing->pix_clk_khz > max_pix_clk) + if (0 != max_pix_clk && timing->pix_clk_100hz > max_pix_clk) return DC_EXCEED_DONGLE_CAP; /* Active Dongle*/ @@ -2214,7 +2213,7 @@ bool dc_link_set_backlight_level(const struct dc_link *link, for (i = 0; i < MAX_PIPES; i++) { if (core_dc->current_state->res_ctx.pipe_ctx[i].stream) { if (core_dc->current_state->res_ctx. - pipe_ctx[i].stream->sink->link + pipe_ctx[i].stream->link == link) /* DMCU -1 for all controller id values, * therefore +1 here @@ -2274,7 +2273,7 @@ void core_link_resume(struct dc_link *link) static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream) { struct dc_link_settings *link_settings = - &stream->sink->link->cur_link_settings; + &stream->link->cur_link_settings; uint32_t link_rate_in_mbps = link_settings->link_rate * LINK_RATE_REF_FREQ_IN_MHZ; struct fixed31_32 mbps = dc_fixpt_from_int( @@ -2305,7 +2304,7 @@ static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx) uint32_t denominator; bpc = get_color_depth(pipe_ctx->stream_res.pix_clk_params.color_depth); - kbps = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk * bpc * 3; + kbps = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10 * bpc * 3; /* * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006 @@ -2381,7 +2380,7 @@ static void update_mst_stream_alloc_table( static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; - struct dc_link *link = stream->sink->link; + struct dc_link *link = stream->link; struct link_encoder *link_encoder = link->link_enc; struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc; struct dp_mst_stream_allocation_table proposed_table = {0}; @@ -2461,7 +2460,7 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx) static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; - struct dc_link *link = stream->sink->link; + struct dc_link *link = stream->link; struct link_encoder *link_encoder = link->link_enc; struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc; struct dp_mst_stream_allocation_table proposed_table = {0}; @@ -2546,8 +2545,8 @@ void core_link_enable_stream( DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); if (pipe_ctx->stream->signal != SIGNAL_TYPE_VIRTUAL) { - stream->sink->link->link_enc->funcs->setup( - stream->sink->link->link_enc, + stream->link->link_enc->funcs->setup( + stream->link->link_enc, pipe_ctx->stream->signal); pipe_ctx->stream_res.stream_enc->funcs->setup_stereo_sync( pipe_ctx->stream_res.stream_enc, @@ -2581,13 +2580,23 @@ void core_link_enable_stream( &stream->timing); if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { + bool apply_edp_fast_boot_optimization = + pipe_ctx->stream->apply_edp_fast_boot_optimization; + + pipe_ctx->stream->apply_edp_fast_boot_optimization = false; + resource_build_info_frame(pipe_ctx); core_dc->hwss.update_info_frame(pipe_ctx); + /* Do not touch link on seamless boot optimization. */ + if (pipe_ctx->stream->apply_seamless_boot_optimization) { + pipe_ctx->stream->dpms_off = false; + return; + } + /* eDP lit up by bios already, no need to enable again. */ if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && - pipe_ctx->stream->apply_edp_fast_boot_optimization) { - pipe_ctx->stream->apply_edp_fast_boot_optimization = false; + apply_edp_fast_boot_optimization) { pipe_ctx->stream->dpms_off = false; return; } @@ -2599,7 +2608,7 @@ void core_link_enable_stream( if (status != DC_OK) { DC_LOG_WARNING("enabling link %u failed: %d\n", - pipe_ctx->stream->sink->link->link_index, + pipe_ctx->stream->link->link_index, status); /* Abort stream enable *unless* the failure was due to @@ -2614,6 +2623,8 @@ void core_link_enable_stream( } } + stream->link->link_status.link_active = true; + core_dc->hwss.enable_audio_stream(pipe_ctx); /* turn off otg test pattern if enable */ @@ -2628,7 +2639,7 @@ void core_link_enable_stream( allocate_mst_payload(pipe_ctx); core_dc->hwss.unblank_stream(pipe_ctx, - &pipe_ctx->stream->sink->link->cur_link_settings); + &pipe_ctx->stream->link->cur_link_settings); if (dc_is_dp_signal(pipe_ctx->stream->signal)) enable_stream_features(pipe_ctx); @@ -2647,7 +2658,9 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option) core_dc->hwss.disable_stream(pipe_ctx, option); - disable_link(pipe_ctx->stream->sink->link, pipe_ctx->stream->signal); + disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal); + + pipe_ctx->stream->link->link_status.link_active = false; } void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c index 506a97e16956..b7ee63cd8dc7 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c @@ -33,7 +33,7 @@ #include "include/vector.h" #include "core_types.h" #include "dc_link_ddc.h" -#include "aux_engine.h" +#include "dce/dce_aux.h" #define AUX_POWER_UP_WA_DELAY 500 #define I2C_OVER_AUX_DEFER_WA_DELAY 70 @@ -42,7 +42,6 @@ #define CV_SMART_DONGLE_ADDRESS 0x20 /* DVI-HDMI dongle slave address for retrieving dongle signature*/ #define DVI_HDMI_DONGLE_ADDRESS 0x68 -static const int8_t dvi_hdmi_dongle_signature_str[] = "6140063500G"; struct dvi_hdmi_dongle_signature_data { int8_t vendor[3];/* "AMD" */ uint8_t version[2]; @@ -165,43 +164,6 @@ static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads **p) } -static struct aux_payloads *dal_ddc_aux_payloads_create(struct dc_context *ctx, uint32_t count) -{ - struct aux_payloads *payloads; - - payloads = kzalloc(sizeof(struct aux_payloads), GFP_KERNEL); - - if (!payloads) - return NULL; - - if (dal_vector_construct( - &payloads->payloads, ctx, count, sizeof(struct aux_payload))) - return payloads; - - kfree(payloads); - return NULL; -} - -static struct aux_payload *dal_ddc_aux_payloads_get(struct aux_payloads *p) -{ - return (struct aux_payload *)p->payloads.container; -} - -static uint32_t dal_ddc_aux_payloads_get_count(struct aux_payloads *p) -{ - return p->payloads.count; -} - -static void dal_ddc_aux_payloads_destroy(struct aux_payloads **p) -{ - if (!p || !*p) - return; - - dal_vector_destruct(&(*p)->payloads); - kfree(*p); - *p = NULL; -} - #define DDC_MIN(a, b) (((a) < (b)) ? (a) : (b)) void dal_ddc_i2c_payloads_add( @@ -225,27 +187,6 @@ void dal_ddc_i2c_payloads_add( } -void dal_ddc_aux_payloads_add( - struct aux_payloads *payloads, - uint32_t address, - uint32_t len, - uint8_t *data, - bool write) -{ - uint32_t payload_size = DEFAULT_AUX_MAX_DATA_SIZE; - uint32_t pos; - - for (pos = 0; pos < len; pos += payload_size) { - struct aux_payload payload = { - .i2c_over_aux = true, - .write = write, - .address = address, - .length = DDC_MIN(payload_size, len - pos), - .data = data + pos }; - dal_vector_append(&payloads->payloads, &payload); - } -} - static void construct( struct ddc_service *ddc_service, struct ddc_service_init_data *init_data) @@ -574,32 +515,34 @@ bool dal_ddc_service_query_ddc_data( /*TODO: len of payload data for i2c and aux is uint8!!!!, * but we want to read 256 over i2c!!!!*/ if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) { - - struct aux_payloads *payloads = - dal_ddc_aux_payloads_create(ddc->ctx, payloads_num); - - struct aux_command command = { - .payloads = dal_ddc_aux_payloads_get(payloads), - .number_of_payloads = 0, + struct aux_payload write_payload = { + .i2c_over_aux = true, + .write = true, + .mot = true, + .address = address, + .length = write_size, + .data = write_buf, + .reply = NULL, .defer_delay = get_defer_delay(ddc), - .max_defer_write_retry = 0 }; + }; - dal_ddc_aux_payloads_add( - payloads, address, write_size, write_buf, true); - - dal_ddc_aux_payloads_add( - payloads, address, read_size, read_buf, false); - - command.number_of_payloads = - dal_ddc_aux_payloads_get_count(payloads); + struct aux_payload read_payload = { + .i2c_over_aux = true, + .write = false, + .mot = false, + .address = address, + .length = read_size, + .data = read_buf, + .reply = NULL, + .defer_delay = get_defer_delay(ddc), + }; - ret = dal_i2caux_submit_aux_command( - ddc->ctx->i2caux, - ddc->ddc_pin, - &command); + ret = dc_link_aux_transfer_with_retries(ddc, &write_payload); - dal_ddc_aux_payloads_destroy(&payloads); + if (!ret) + return false; + ret = dc_link_aux_transfer_with_retries(ddc, &read_payload); } else { struct i2c_payloads *payloads = dal_ddc_i2c_payloads_create(ddc->ctx, payloads_num); @@ -631,56 +574,15 @@ bool dal_ddc_service_query_ddc_data( } int dc_link_aux_transfer(struct ddc_service *ddc, - unsigned int address, - uint8_t *reply, - void *buffer, - unsigned int size, - enum aux_transaction_type type, - enum i2caux_transaction_action action) + struct aux_payload *payload) { - struct ddc *ddc_pin = ddc->ddc_pin; - struct aux_engine *aux_engine; - enum aux_channel_operation_result operation_result; - struct aux_request_transaction_data aux_req; - struct aux_reply_transaction_data aux_rep; - uint8_t returned_bytes = 0; - int res = -1; - uint32_t status; - - memset(&aux_req, 0, sizeof(aux_req)); - memset(&aux_rep, 0, sizeof(aux_rep)); - - aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]; - aux_engine->funcs->acquire(aux_engine, ddc_pin); - - aux_req.type = type; - aux_req.action = action; - - aux_req.address = address; - aux_req.delay = 0; - aux_req.length = size; - aux_req.data = buffer; - - aux_engine->funcs->submit_channel_request(aux_engine, &aux_req); - operation_result = aux_engine->funcs->get_channel_status(aux_engine, &returned_bytes); - - switch (operation_result) { - case AUX_CHANNEL_OPERATION_SUCCEEDED: - res = aux_engine->funcs->read_channel_reply(aux_engine, size, - buffer, reply, - &status); - break; - case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON: - res = 0; - break; - case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN: - case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY: - case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT: - res = -1; - break; - } - aux_engine->funcs->release_engine(aux_engine); - return res; + return dce_aux_transfer(ddc, payload); +} + +bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc, + struct aux_payload *payload) +{ + return dce_aux_transfer_with_retries(ddc, payload); } /*test only function*/ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 0caacb60b02f..09d301216076 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -49,6 +49,8 @@ static void wait_for_training_aux_rd_interval( { union training_aux_rd_interval training_rd_interval; + memset(&training_rd_interval, 0, sizeof(training_rd_interval)); + /* overwrite the delay if rev > 1.1*/ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { /* DP 1.2 or later - retrieve delay through @@ -117,6 +119,13 @@ static void dpcd_set_link_settings( core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, &downspread.raw, sizeof(downspread)); + if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 && + (link->dpcd_caps.link_rate_set >= 1 && + link->dpcd_caps.link_rate_set <= 8)) { + core_link_write_dpcd(link, DP_LINK_RATE_SET, + &link->dpcd_caps.link_rate_set, 1); + } + DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x\n %x spread = %x\n", __func__, DP_LINK_BW_SET, @@ -1542,7 +1551,7 @@ static uint32_t bandwidth_in_kbps_from_timing( ASSERT(bits_per_channel != 0); - kbps = timing->pix_clk_khz; + kbps = timing->pix_clk_100hz / 10; kbps *= bits_per_channel; if (timing->flags.Y_ONLY != 1) { @@ -1584,7 +1593,7 @@ bool dp_validate_mode_timing( const struct dc_link_settings *link_setting; /*always DP fail safe mode*/ - if (timing->pix_clk_khz == (uint32_t) 25175 && + if ((timing->pix_clk_100hz / 10) == (uint32_t) 25175 && timing->h_addressable == (uint32_t) 640 && timing->v_addressable == (uint32_t) 480) return true; @@ -1634,7 +1643,7 @@ void decide_link_settings(struct dc_stream_state *stream, req_bw = bandwidth_in_kbps_from_timing(&stream->timing); - link = stream->sink->link; + link = stream->link; /* if preferred is specified through AMDDP, use it, if it's enough * to drive the mode @@ -1656,7 +1665,7 @@ void decide_link_settings(struct dc_stream_state *stream, } /* EDP use the link cap setting */ - if (stream->sink->sink_signal == SIGNAL_TYPE_EDP) { + if (link->connector_signal == SIGNAL_TYPE_EDP) { *link_setting = link->verified_link_cap; return; } @@ -2002,11 +2011,7 @@ static void handle_automated_test(struct dc_link *link) dp_test_send_phy_test_pattern(link); test_response.bits.ACK = 1; } - if (!test_request.raw) - /* no requests, revert all test signals - * TODO: revert all test signals - */ - test_response.bits.ACK = 1; + /* send request acknowledgment */ if (test_response.bits.ACK) core_link_write_dpcd( @@ -2493,13 +2498,72 @@ bool detect_dp_sink_caps(struct dc_link *link) /* TODO save sink caps in link->sink */ } +enum dc_link_rate linkRateInKHzToLinkRateMultiplier(uint32_t link_rate_in_khz) +{ + enum dc_link_rate link_rate; + // LinkRate is normally stored as a multiplier of 0.27 Gbps per lane. Do the translation. + switch (link_rate_in_khz) { + case 1620000: + link_rate = LINK_RATE_LOW; // Rate_1 (RBR) - 1.62 Gbps/Lane + break; + case 2160000: + link_rate = LINK_RATE_RATE_2; // Rate_2 - 2.16 Gbps/Lane + break; + case 2430000: + link_rate = LINK_RATE_RATE_3; // Rate_3 - 2.43 Gbps/Lane + break; + case 2700000: + link_rate = LINK_RATE_HIGH; // Rate_4 (HBR) - 2.70 Gbps/Lane + break; + case 3240000: + link_rate = LINK_RATE_RBR2; // Rate_5 (RBR2) - 3.24 Gbps/Lane + break; + case 4320000: + link_rate = LINK_RATE_RATE_6; // Rate_6 - 4.32 Gbps/Lane + break; + case 5400000: + link_rate = LINK_RATE_HIGH2; // Rate_7 (HBR2) - 5.40 Gbps/Lane + break; + case 8100000: + link_rate = LINK_RATE_HIGH3; // Rate_8 (HBR3) - 8.10 Gbps/Lane + break; + default: + link_rate = LINK_RATE_UNKNOWN; + break; + } + return link_rate; +} + void detect_edp_sink_caps(struct dc_link *link) { - retrieve_link_cap(link); + uint8_t supported_link_rates[16] = {0}; + uint32_t entry; + uint32_t link_rate_in_khz; + enum dc_link_rate link_rate = LINK_RATE_UNKNOWN; - if (link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN) - link->reported_link_cap.link_rate = LINK_RATE_HIGH2; + retrieve_link_cap(link); + if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14) { + // Read DPCD 00010h - 0001Fh 16 bytes at one shot + core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES, + supported_link_rates, sizeof(supported_link_rates)); + + link->dpcd_caps.link_rate_set = 0; + for (entry = 0; entry < 16; entry += 2) { + // DPCD register reports per-lane link rate = 16-bit link rate capability + // value X 200 kHz. Need multipler to find link rate in kHz. + link_rate_in_khz = (supported_link_rates[entry+1] * 0x100 + + supported_link_rates[entry]) * 200; + + if (link_rate_in_khz != 0) { + link_rate = linkRateInKHzToLinkRateMultiplier(link_rate_in_khz); + if (link->reported_link_cap.link_rate < link_rate) { + link->reported_link_cap.link_rate = link_rate; + link->dpcd_caps.link_rate_set = entry; + } + } + } + } link->verified_link_cap = link->reported_link_cap; } @@ -2621,7 +2685,7 @@ bool dc_link_dp_set_test_pattern( memset(&training_pattern, 0, sizeof(training_pattern)); for (i = 0; i < MAX_PIPES; i++) { - if (pipes[i].stream->sink->link == link) { + if (pipes[i].stream->link == link) { pipe_ctx = &pipes[i]; break; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index 0065ec7d5330..f7f7515f65f4 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -70,13 +70,12 @@ void dp_enable_link_phy( */ for (i = 0; i < MAX_PIPES; i++) { if (pipes[i].stream != NULL && - pipes[i].stream->sink != NULL && - pipes[i].stream->sink->link == link) { + pipes[i].stream->link == link) { if (pipes[i].clock_source != NULL && pipes[i].clock_source->id != CLOCK_SOURCE_ID_DP_DTO) { pipes[i].clock_source = dp_cs; - pipes[i].stream_res.pix_clk_params.requested_pix_clk = - pipes[i].stream->timing.pix_clk_khz; + pipes[i].stream_res.pix_clk_params.requested_pix_clk_100hz = + pipes[i].stream->timing.pix_clk_100hz; pipes[i].clock_source->funcs->program_pix_clk( pipes[i].clock_source, &pipes[i].stream_res.pix_clk_params, @@ -120,6 +119,10 @@ bool edp_receiver_ready_T9(struct dc_link *link) break; udelay(100); //MAx T9 } while (++tries < 50); + + if (link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off > 0) + udelay(link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off * 1000); + return result; } bool edp_receiver_ready_T7(struct dc_link *link) @@ -279,10 +282,8 @@ void dp_retrain_link_dp_test(struct dc_link *link, for (i = 0; i < MAX_PIPES; i++) { if (pipes[i].stream != NULL && !pipes[i].top_pipe && - pipes[i].stream->sink != NULL && - pipes[i].stream->sink->link != NULL && - pipes[i].stream_res.stream_enc != NULL && - pipes[i].stream->sink->link == link) { + pipes[i].stream->link != NULL && + pipes[i].stream_res.stream_enc != NULL) { udelay(100); pipes[i].stream_res.stream_enc->funcs->dp_blank( diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 76137df74a53..349ab8017776 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -355,8 +355,8 @@ bool resource_are_streams_timing_synchronizable( != stream2->timing.v_addressable) return false; - if (stream1->timing.pix_clk_khz - != stream2->timing.pix_clk_khz) + if (stream1->timing.pix_clk_100hz + != stream2->timing.pix_clk_100hz) return false; if (stream1->clamping.c_depth != stream2->clamping.c_depth) @@ -1559,7 +1559,7 @@ static struct stream_encoder *find_first_free_match_stream_enc_for_link( { int i; int j = -1; - struct dc_link *link = stream->sink->link; + struct dc_link *link = stream->link; for (i = 0; i < pool->stream_enc_count; i++) { if (!res_ctx->is_stream_enc_acquired[i] && @@ -1748,7 +1748,7 @@ static struct dc_stream_state *find_pll_sharable_stream( if (resource_are_streams_timing_synchronizable( stream_needs_pll, stream_has_pll) && !dc_is_dp_signal(stream_has_pll->signal) - && stream_has_pll->sink->link->connector_signal + && stream_has_pll->link->connector_signal != SIGNAL_TYPE_VIRTUAL) return stream_has_pll; @@ -1759,7 +1759,7 @@ static struct dc_stream_state *find_pll_sharable_stream( static int get_norm_pix_clk(const struct dc_crtc_timing *timing) { - uint32_t pix_clk = timing->pix_clk_khz; + uint32_t pix_clk = timing->pix_clk_100hz; uint32_t normalized_pix_clk = pix_clk; if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) @@ -1791,15 +1791,60 @@ static void calculate_phy_pix_clks(struct dc_stream_state *stream) /* update actual pixel clock on all streams */ if (dc_is_hdmi_signal(stream->signal)) stream->phy_pix_clk = get_norm_pix_clk( - &stream->timing); + &stream->timing) / 10; else stream->phy_pix_clk = - stream->timing.pix_clk_khz; + stream->timing.pix_clk_100hz / 10; if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) stream->phy_pix_clk *= 2; } +static int acquire_resource_from_hw_enabled_state( + struct resource_context *res_ctx, + const struct resource_pool *pool, + struct dc_stream_state *stream) +{ + struct dc_link *link = stream->link; + unsigned int inst; + + /* Check for enabled DIG to identify enabled display */ + if (!link->link_enc->funcs->is_dig_enabled(link->link_enc)) + return -1; + + /* Check for which front end is used by this encoder. + * Note the inst is 1 indexed, where 0 is undefined. + * Note that DIG_FE can source from different OTG but our + * current implementation always map 1-to-1, so this code makes + * the same assumption and doesn't check OTG source. + */ + inst = link->link_enc->funcs->get_dig_frontend(link->link_enc) - 1; + + /* Instance should be within the range of the pool */ + if (inst >= pool->pipe_count) + return -1; + + if (!res_ctx->pipe_ctx[inst].stream) { + struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[inst]; + + pipe_ctx->stream_res.tg = pool->timing_generators[inst]; + pipe_ctx->plane_res.mi = pool->mis[inst]; + pipe_ctx->plane_res.hubp = pool->hubps[inst]; + pipe_ctx->plane_res.ipp = pool->ipps[inst]; + pipe_ctx->plane_res.xfm = pool->transforms[inst]; + pipe_ctx->plane_res.dpp = pool->dpps[inst]; + pipe_ctx->stream_res.opp = pool->opps[inst]; + if (pool->dpps[inst]) + pipe_ctx->plane_res.mpcc_inst = pool->dpps[inst]->inst; + pipe_ctx->pipe_idx = inst; + + pipe_ctx->stream = stream; + return inst; + } + + return -1; +} + enum dc_status resource_map_pool_resources( const struct dc *dc, struct dc_state *context, @@ -1824,8 +1869,15 @@ enum dc_status resource_map_pool_resources( calculate_phy_pix_clks(stream); - /* acquire new resources */ - pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream); + if (stream->apply_seamless_boot_optimization) + pipe_idx = acquire_resource_from_hw_enabled_state( + &context->res_ctx, + pool, + stream); + + if (pipe_idx < 0) + /* acquire new resources */ + pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream); #ifdef CONFIG_DRM_AMD_DC_DCN1_0 if (pipe_idx < 0) @@ -1842,7 +1894,7 @@ enum dc_status resource_map_pool_resources( &context->res_ctx, pool, stream); if (!pipe_ctx->stream_res.stream_enc) - return DC_NO_STREAM_ENG_RESOURCE; + return DC_NO_STREAM_ENC_RESOURCE; update_stream_engine_usage( &context->res_ctx, pool, @@ -1850,7 +1902,7 @@ enum dc_status resource_map_pool_resources( true); /* TODO: Add check if ASIC support and EDID audio */ - if (!stream->sink->converter_disable_audio && + if (!stream->converter_disable_audio && dc_is_audio_capable_signal(pipe_ctx->stream->signal) && stream->audio_info.mode_count) { pipe_ctx->stream_res.audio = find_first_free_audio( @@ -2112,7 +2164,7 @@ static void set_avi_info_frame( itc = true; itc_value = 1; - support = stream->sink->edid_caps.content_support; + support = stream->content_support; if (itc) { if (!support.bits.valid_content_type) { @@ -2151,8 +2203,8 @@ static void set_avi_info_frame( /* TODO : We should handle YCC quantization */ /* but we do not have matrix calculation */ - if (stream->sink->edid_caps.qs_bit == 1 && - stream->sink->edid_caps.qy_bit == 1) { + if (stream->qs_bit == 1 && + stream->qy_bit == 1) { if (color_space == COLOR_SPACE_SRGB || color_space == COLOR_SPACE_2020_RGB_FULLRANGE) { hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_FULL_RANGE; @@ -2596,7 +2648,7 @@ void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream, enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream) { struct dc *core_dc = dc; - struct dc_link *link = stream->sink->link; + struct dc_link *link = stream->link; struct timing_generator *tg = core_dc->res_pool->timing_generators[0]; enum dc_status res = DC_OK; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 66e5c4623a49..996298c35f42 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -35,20 +35,17 @@ /******************************************************************************* * Private functions ******************************************************************************/ -void update_stream_signal(struct dc_stream_state *stream) +void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink) { - - struct dc_sink *dc_sink = stream->sink; - - if (dc_sink->sink_signal == SIGNAL_TYPE_NONE) - stream->signal = stream->sink->link->connector_signal; + if (sink->sink_signal == SIGNAL_TYPE_NONE) + stream->signal = stream->link->connector_signal; else - stream->signal = dc_sink->sink_signal; + stream->signal = sink->sink_signal; if (dc_is_dvi_signal(stream->signal)) { if (stream->ctx->dc->caps.dual_link_dvi && - stream->timing.pix_clk_khz > TMDS_MAX_PIXEL_CLOCK && - stream->sink->sink_signal != SIGNAL_TYPE_DVI_SINGLE_LINK) + (stream->timing.pix_clk_100hz / 10) > TMDS_MAX_PIXEL_CLOCK && + sink->sink_signal != SIGNAL_TYPE_DVI_SINGLE_LINK) stream->signal = SIGNAL_TYPE_DVI_DUAL_LINK; else stream->signal = SIGNAL_TYPE_DVI_SINGLE_LINK; @@ -61,10 +58,15 @@ static void construct(struct dc_stream_state *stream, uint32_t i = 0; stream->sink = dc_sink_data; - stream->ctx = stream->sink->ctx; - dc_sink_retain(dc_sink_data); + stream->ctx = dc_sink_data->ctx; + stream->link = dc_sink_data->link; + stream->sink_patches = dc_sink_data->edid_caps.panel_patch; + stream->converter_disable_audio = dc_sink_data->converter_disable_audio; + stream->qs_bit = dc_sink_data->edid_caps.qs_bit; + stream->qy_bit = dc_sink_data->edid_caps.qy_bit; + /* Copy audio modes */ /* TODO - Remove this translation */ for (i = 0; i < (dc_sink_data->edid_caps.audio_mode_count); i++) @@ -100,11 +102,14 @@ static void construct(struct dc_stream_state *stream, /* EDID CAP translation for HDMI 2.0 */ stream->timing.flags.LTE_340MCSC_SCRAMBLE = dc_sink_data->edid_caps.lte_340mcsc_scramble; - update_stream_signal(stream); + update_stream_signal(stream, dc_sink_data); stream->out_transfer_func = dc_create_transfer_func(); stream->out_transfer_func->type = TF_TYPE_BYPASS; stream->out_transfer_func->ctx = stream->ctx; + + stream->stream_id = stream->ctx->dc_stream_id_count; + stream->ctx->dc_stream_id_count++; } static void destruct(struct dc_stream_state *stream) @@ -155,20 +160,42 @@ struct dc_stream_state *dc_create_stream_for_sink( return stream; } -struct dc_stream_status *dc_stream_get_status( +/** + * dc_stream_get_status_from_state - Get stream status from given dc state + * @state: DC state to find the stream status in + * @stream: The stream to get the stream status for + * + * The given stream is expected to exist in the given dc state. Otherwise, NULL + * will be returned. + */ +struct dc_stream_status *dc_stream_get_status_from_state( + struct dc_state *state, struct dc_stream_state *stream) { uint8_t i; - struct dc *dc = stream->ctx->dc; - for (i = 0; i < dc->current_state->stream_count; i++) { - if (stream == dc->current_state->streams[i]) - return &dc->current_state->stream_status[i]; + for (i = 0; i < state->stream_count; i++) { + if (stream == state->streams[i]) + return &state->stream_status[i]; } return NULL; } +/** + * dc_stream_get_status() - Get current stream status of the given stream state + * @stream: The stream to get the stream status for. + * + * The given stream is expected to exist in dc->current_state. Otherwise, NULL + * will be returned. + */ +struct dc_stream_status *dc_stream_get_status( + struct dc_stream_state *stream) +{ + struct dc *dc = stream->ctx->dc; + return dc_stream_get_status_from_state(dc->current_state, stream); +} + /** * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address */ @@ -334,16 +361,12 @@ void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream) stream->output_color_space); DC_LOG_DC( "\tpix_clk_khz: %d, h_total: %d, v_total: %d, pixelencoder:%d, displaycolorDepth:%d\n", - stream->timing.pix_clk_khz, + stream->timing.pix_clk_100hz / 10, stream->timing.h_total, stream->timing.v_total, stream->timing.pixel_encoding, stream->timing.display_color_depth); - DC_LOG_DC( - "\tsink name: %s, serial: %d\n", - stream->sink->edid_caps.display_name, - stream->sink->edid_caps.serial_number); DC_LOG_DC( "\tlink: %d\n", - stream->sink->link->link_index); + stream->link->link_index); } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c index c60c9b4c3075..ee6bd50f60b8 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c @@ -40,11 +40,14 @@ static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state plane_state->ctx = ctx; plane_state->gamma_correction = dc_create_gamma(); - plane_state->gamma_correction->is_identity = true; + if (plane_state->gamma_correction != NULL) + plane_state->gamma_correction->is_identity = true; plane_state->in_transfer_func = dc_create_transfer_func(); - plane_state->in_transfer_func->type = TF_TYPE_BYPASS; - plane_state->in_transfer_func->ctx = ctx; + if (plane_state->in_transfer_func != NULL) { + plane_state->in_transfer_func->type = TF_TYPE_BYPASS; + plane_state->in_transfer_func->ctx = ctx; + } } static void destruct(struct dc_plane_state *plane_state) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c new file mode 100644 index 000000000000..6ce87b682a32 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c @@ -0,0 +1,123 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "vm_helper.h" + +static void mark_vmid_used(struct vm_helper *vm_helper, unsigned int pos, uint8_t hubp_idx) +{ + struct vmid_usage vmids = vm_helper->hubp_vmid_usage[hubp_idx]; + + vmids.vmid_usage[0] = vmids.vmid_usage[1]; + vmids.vmid_usage[1] = 1 << pos; +} + +static void add_ptb_to_table(struct vm_helper *vm_helper, unsigned int vmid, uint64_t ptb) +{ + vm_helper->ptb_assigned_to_vmid[vmid] = ptb; + vm_helper->num_vmids_available--; +} + +static void clear_entry_from_vmid_table(struct vm_helper *vm_helper, unsigned int vmid) +{ + vm_helper->ptb_assigned_to_vmid[vmid] = 0; + vm_helper->num_vmids_available++; +} + +static void evict_vmids(struct vm_helper *vm_helper) +{ + int i; + uint16_t ord = 0; + + for (i = 0; i < vm_helper->num_vmid; i++) + ord |= vm_helper->hubp_vmid_usage[i].vmid_usage[0] | vm_helper->hubp_vmid_usage[i].vmid_usage[1]; + + // At this point any positions with value 0 are unused vmids, evict them + for (i = 1; i < vm_helper->num_vmid; i++) { + if (ord & (1u << i)) + clear_entry_from_vmid_table(vm_helper, i); + } +} + +// Return value of -1 indicates vmid table unitialized or ptb dne in the table +static int get_existing_vmid_for_ptb(struct vm_helper *vm_helper, uint64_t ptb) +{ + int i; + + for (i = 0; i < vm_helper->num_vmid; i++) { + if (vm_helper->ptb_assigned_to_vmid[i] == ptb) + return i; + } + + return -1; +} + +// Expected to be called only when there's an available vmid +static int get_next_available_vmid(struct vm_helper *vm_helper) +{ + int i; + + for (i = 1; i < vm_helper->num_vmid; i++) { + if (vm_helper->ptb_assigned_to_vmid[i] == 0) + return i; + } + + return -1; +} + +uint8_t get_vmid_for_ptb(struct vm_helper *vm_helper, int64_t ptb, uint8_t hubp_idx) +{ + unsigned int vmid = 0; + int vmid_exists = -1; + + // Physical address gets vmid 0 + if (ptb == 0) + return 0; + + vmid_exists = get_existing_vmid_for_ptb(vm_helper, ptb); + + if (vmid_exists != -1) { + mark_vmid_used(vm_helper, vmid_exists, hubp_idx); + vmid = vmid_exists; + } else { + if (vm_helper->num_vmids_available == 0) + evict_vmids(vm_helper); + + vmid = get_next_available_vmid(vm_helper); + mark_vmid_used(vm_helper, vmid, hubp_idx); + add_ptb_to_table(vm_helper, vmid, ptb); + } + + return vmid; +} + +void init_vm_helper(struct vm_helper *vm_helper, unsigned int num_vmid, unsigned int num_hubp) +{ + vm_helper->num_vmid = num_vmid; + vm_helper->num_hubp = num_hubp; + vm_helper->num_vmids_available = num_vmid - 1; + + memset(vm_helper->hubp_vmid_usage, 0, sizeof(vm_helper->hubp_vmid_usage[0]) * MAX_HUBP); + memset(vm_helper->ptb_assigned_to_vmid, 0, sizeof(vm_helper->ptb_assigned_to_vmid[0]) * MAX_VMID); +} diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 4b5bbb13ce7f..1a7fd6aa77eb 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -39,7 +39,7 @@ #include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.2.08" +#define DC_VER "3.2.17" #define MAX_SURFACES 3 #define MAX_STREAMS 6 @@ -255,6 +255,8 @@ struct dc_debug_options { bool scl_reset_length10; bool hdmi20_disable; bool skip_detection_link_training; + unsigned int force_odm_combine; //bit vector based on otg inst + unsigned int force_fclk_khz; }; struct dc_debug_data { @@ -263,7 +265,6 @@ struct dc_debug_data { uint32_t auxErrorCount; }; - struct dc_state; struct resource_pool; struct dce_hwseq; @@ -339,8 +340,13 @@ struct dc_init_data { uint32_t log_mask; }; -struct dc *dc_create(const struct dc_init_data *init_params); +struct dc_callback_init { + uint8_t reserved; +}; +struct dc *dc_create(const struct dc_init_data *init_params); +void dc_init_callbacks(struct dc *dc, + const struct dc_callback_init *init_params); void dc_destroy(struct dc **dc); /******************************************************************************* @@ -440,6 +446,7 @@ union surface_update_flags { uint32_t coeff_reduction_change:1; uint32_t output_tf_change:1; uint32_t pixel_format_change:1; + uint32_t plane_size_change:1; /* Full updates */ uint32_t new_plane:1; @@ -587,6 +594,10 @@ struct dc_validation_set { uint8_t plane_count; }; +bool dc_validate_seamless_boot_timing(struct dc *dc, + const struct dc_sink *sink, + struct dc_crtc_timing *crtc_timing); + enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *plane_state); void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info); @@ -652,6 +663,7 @@ struct dpcd_caps { int8_t branch_dev_name[6]; int8_t branch_hw_revision; int8_t branch_fw_revision[2]; + uint8_t link_rate_set; bool allow_invalid_MSA_timing_param; bool panel_mode_edp; @@ -742,6 +754,9 @@ void dc_set_power_state( struct dc *dc, enum dc_acpi_cm_power_state power_state); void dc_resume(struct dc *dc); +unsigned int dc_get_current_backlight_pwm(struct dc *dc); +unsigned int dc_get_target_backlight_pwm(struct dc *dc); + bool dc_is_dmcu_initialized(struct dc *dc); #endif /* DC_INTERFACE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h index a8b3cedf9431..78c3b300ec45 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h @@ -86,10 +86,6 @@ struct dc_vbios_funcs { bool (*is_accelerated_mode)( struct dc_bios *bios); - bool (*is_active_display)( - struct dc_bios *bios, - enum signal_type signal, - const struct connector_device_tag_info *device_tag); void (*set_scratch_critical_state)( struct dc_bios *bios, bool state); @@ -125,10 +121,6 @@ struct dc_vbios_funcs { enum bp_result (*program_crtc_timing)( struct dc_bios *bios, struct bp_hw_crtc_timing_parameters *bp_params); - - enum bp_result (*crtc_source_select)( - struct dc_bios *bios, - struct bp_crtc_source_select *bp_params); enum bp_result (*program_display_engine_pll)( struct dc_bios *bios, struct bp_pixel_clock_parameters *bp_params); @@ -145,7 +137,6 @@ struct dc_vbios_funcs { }; struct bios_registers { - uint32_t BIOS_SCRATCH_0; uint32_t BIOS_SCRATCH_3; uint32_t BIOS_SCRATCH_6; }; diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index da93ab43f2d8..d4eab33c453b 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -46,11 +46,14 @@ enum dc_lane_count { */ enum dc_link_rate { LINK_RATE_UNKNOWN = 0, - LINK_RATE_LOW = 0x06, - LINK_RATE_HIGH = 0x0A, - LINK_RATE_RBR2 = 0x0C, - LINK_RATE_HIGH2 = 0x14, - LINK_RATE_HIGH3 = 0x1E + LINK_RATE_LOW = 0x06, // Rate_1 (RBR) - 1.62 Gbps/Lane + LINK_RATE_RATE_2 = 0x08, // Rate_2 - 2.16 Gbps/Lane + LINK_RATE_RATE_3 = 0x09, // Rate_3 - 2.43 Gbps/Lane + LINK_RATE_HIGH = 0x0A, // Rate_4 (HBR) - 2.70 Gbps/Lane + LINK_RATE_RBR2 = 0x0C, // Rate_5 (RBR2)- 3.24 Gbps/Lane + LINK_RATE_RATE_6 = 0x10, // Rate_6 - 4.32 Gbps/Lane + LINK_RATE_HIGH2 = 0x14, // Rate_7 (HBR2)- 5.40 Gbps/Lane + LINK_RATE_HIGH3 = 0x1E // Rate_8 (HBR3)- 8.10 Gbps/Lane }; enum dc_link_spread { diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c index 4842d2378bbf..597d38393379 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c @@ -29,31 +29,59 @@ #include "dm_services.h" #include +struct dc_reg_value_masks { + uint32_t value; + uint32_t mask; +}; + +struct dc_reg_sequence { + uint32_t addr; + struct dc_reg_value_masks value_masks; +}; + +static inline void set_reg_field_value_masks( + struct dc_reg_value_masks *field_value_mask, + uint32_t value, + uint32_t mask, + uint8_t shift) +{ + ASSERT(mask != 0); + + field_value_mask->value = (field_value_mask->value & ~mask) | (mask & (value << shift)); + field_value_mask->mask = field_value_mask->mask | mask; +} + uint32_t generic_reg_update_ex(const struct dc_context *ctx, uint32_t addr, uint32_t reg_val, int n, uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...) { + struct dc_reg_value_masks field_value_mask = {0}; uint32_t shift, mask, field_value; int i = 1; va_list ap; va_start(ap, field_value1); - reg_val = set_reg_field_value_ex(reg_val, field_value1, mask1, shift1); + /* gather all bits value/mask getting updated in this register */ + set_reg_field_value_masks(&field_value_mask, + field_value1, mask1, shift1); while (i < n) { shift = va_arg(ap, uint32_t); mask = va_arg(ap, uint32_t); field_value = va_arg(ap, uint32_t); - reg_val = set_reg_field_value_ex(reg_val, field_value, mask, shift); + set_reg_field_value_masks(&field_value_mask, + field_value, mask, shift); i++; } - - dm_write_reg(ctx, addr, reg_val); va_end(ap); + + /* mmio write directly */ + reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value; + dm_write_reg(ctx, addr, reg_val); return reg_val; } diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index e72fce4eca65..da55d623647a 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -97,6 +97,8 @@ struct dc_plane_address { union large_integer chroma_dcc_const_color; } video_progressive; }; + + union large_integer page_table_base; }; struct dc_size { @@ -730,7 +732,7 @@ struct dc_crtc_timing { uint32_t v_front_porch; uint32_t v_sync_width; - uint32_t pix_clk_khz; + uint32_t pix_clk_100hz; uint32_t vic; uint32_t hdmi_vic; diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index b2243e0dad1f..8fc223defed4 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -30,6 +30,7 @@ #include "grph_object_defs.h" struct dc_link_status { + bool link_active; struct dpcd_caps *dpcd_caps; }; @@ -110,6 +111,7 @@ struct dc_link { union ddi_channel_mapping ddi_channel_mapping; struct connector_device_tag_info device_tag; struct dpcd_caps dpcd_caps; + uint32_t dongle_max_pix_clk; unsigned short chip_caps; unsigned int dpcd_sink_count; enum edp_revision edp_revision; @@ -124,6 +126,7 @@ struct dc_link { struct dc_link_status link_status; struct link_trace link_trace; + struct gpio *hpd_gpio; }; const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link); diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index d70c9e1cda3d..5657cb3a2ad3 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -32,17 +32,18 @@ /******************************************************************************* * Stream Interfaces ******************************************************************************/ +struct timing_sync_info { + int group_id; + int group_size; + bool master; +}; struct dc_stream_status { int primary_otg_inst; int stream_enc_inst; int plane_count; + struct timing_sync_info timing_sync_info; struct dc_plane_state *plane_states[MAX_SURFACE_NUM]; - - /* - * link this stream passes through - */ - struct dc_link *link; }; // TODO: References to this needs to be removed.. @@ -50,8 +51,30 @@ struct freesync_context { bool dummy; }; +enum vertical_interrupt_ref_point { + START_V_UPDATE = 0, + START_V_SYNC, + INVALID_POINT + + //For now, only v_update interrupt is used. + //START_V_BLANK, + //START_V_ACTIVE +}; + +struct periodic_interrupt_config { + enum vertical_interrupt_ref_point ref_point; + int lines_offset; +}; + + struct dc_stream_state { + // sink is deprecated, new code should not reference + // this pointer struct dc_sink *sink; + + struct dc_link *link; + struct dc_panel_patch sink_patches; + union display_content_support content_support; struct dc_crtc_timing timing; struct dc_crtc_timing_adjust adjust; struct dc_info_packet vrr_infopacket; @@ -80,8 +103,9 @@ struct dc_stream_state { enum view_3d_format view_format; bool ignore_msa_timing_param; - - unsigned long long periodic_fn_vsync_delta; + bool converter_disable_audio; + uint8_t qs_bit; + uint8_t qy_bit; /* TODO: custom INFO packets */ /* TODO: ABM info (DMCU) */ @@ -92,6 +116,9 @@ struct dc_stream_state { /* DMCU info */ unsigned int abm_level; + struct periodic_interrupt_config periodic_interrupt0; + struct periodic_interrupt_config periodic_interrupt1; + /* from core_stream struct */ struct dc_context *ctx; @@ -102,7 +129,8 @@ struct dc_stream_state { int phy_pix_clk; enum signal_type signal; bool dpms_off; - bool apply_edp_fast_boot_optimization; + + void *dm_stream_context; struct dc_cursor_attributes cursor_attributes; struct dc_cursor_position cursor_position; @@ -116,6 +144,21 @@ struct dc_stream_state { /* Computed state bits */ bool mode_changed : 1; + /* Output from DC when stream state is committed or altered + * DC may only access these values during: + * dc_commit_state, dc_commit_state_no_check, dc_commit_streams + * values may not change outside of those calls + */ + struct { + // For interrupt management, some hardware instance + // offsets need to be exposed to DM + uint8_t otg_offset; + } out; + + bool apply_edp_fast_boot_optimization; + bool apply_seamless_boot_optimization; + + uint32_t stream_id; }; struct dc_stream_update { @@ -125,7 +168,9 @@ struct dc_stream_update { struct dc_info_packet *hdr_static_metadata; unsigned int *abm_level; - unsigned long long *periodic_fn_vsync_delta; + struct periodic_interrupt_config *periodic_interrupt0; + struct periodic_interrupt_config *periodic_interrupt1; + struct dc_crtc_timing_adjust *adjust; struct dc_info_packet *vrr_infopacket; struct dc_info_packet *vsc_infopacket; @@ -162,7 +207,6 @@ void dc_commit_updates_for_stream(struct dc *dc, int surface_count, struct dc_stream_state *stream, struct dc_stream_update *stream_update, - struct dc_plane_state **plane_states, struct dc_state *state); /* * Log the current stream state. @@ -255,11 +299,14 @@ enum surface_update_type dc_check_update_surfaces_for_stream( */ struct dc_stream_state *dc_create_stream_for_sink(struct dc_sink *dc_sink); -void update_stream_signal(struct dc_stream_state *stream); +void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink); void dc_stream_retain(struct dc_stream_state *dc_stream); void dc_stream_release(struct dc_stream_state *dc_stream); +struct dc_stream_status *dc_stream_get_status_from_state( + struct dc_state *state, + struct dc_stream_state *stream); struct dc_stream_status *dc_stream_get_status( struct dc_stream_state *dc_stream); diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 0b20ae23f169..da2009a108cf 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -97,8 +97,8 @@ struct dc_context { struct dc_bios *dc_bios; bool created_bios; struct gpio_service *gpio_service; - struct i2caux *i2caux; uint32_t dc_sink_id_count; + uint32_t dc_stream_id_count; uint64_t fbc_gpu_addr; }; @@ -201,6 +201,7 @@ union display_content_support { struct dc_panel_patch { unsigned int dppowerup_delay; unsigned int extra_t12_ms; + unsigned int extra_delay_backlight_off; }; struct dc_edid_caps { diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c index 2a342eae80fd..da96229db53a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c @@ -53,6 +53,27 @@ #define MCP_DISABLE_ABM_IMMEDIATELY 255 +static bool dce_abm_set_pipe(struct abm *abm, uint32_t controller_id) +{ + struct dce_abm *abm_dce = TO_DCE_ABM(abm); + uint32_t rampingBoundary = 0xFFFF; + + REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, + 1, 80000); + + /* set ramping boundary */ + REG_WRITE(MASTER_COMM_DATA_REG1, rampingBoundary); + + /* setDMCUParam_Pipe */ + REG_UPDATE_2(MASTER_COMM_CMD_REG, + MASTER_COMM_CMD_REG_BYTE0, MCP_ABM_PIPE_SET, + MASTER_COMM_CMD_REG_BYTE1, controller_id); + + /* notifyDMCUMsg */ + REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); + + return true; +} static unsigned int calculate_16_bit_backlight_from_pwm(struct dce_abm *abm_dce) { @@ -175,7 +196,6 @@ static void dmcu_set_backlight_level( uint32_t controller_id) { unsigned int backlight_8_bit = 0; - uint32_t rampingBoundary = 0xFFFF; uint32_t s2; if (backlight_pwm_u16_16 & 0x10000) @@ -185,16 +205,7 @@ static void dmcu_set_backlight_level( // Take MSB of fractional part since backlight is not max backlight_8_bit = (backlight_pwm_u16_16 >> 8) & 0xFF; - /* set ramping boundary */ - REG_WRITE(MASTER_COMM_DATA_REG1, rampingBoundary); - - /* setDMCUParam_Pipe */ - REG_UPDATE_2(MASTER_COMM_CMD_REG, - MASTER_COMM_CMD_REG_BYTE0, MCP_ABM_PIPE_SET, - MASTER_COMM_CMD_REG_BYTE1, controller_id); - - /* notifyDMCUMsg */ - REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); + dce_abm_set_pipe(&abm_dce->base, controller_id); /* waitDMCUReadyForCmd */ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, @@ -309,16 +320,7 @@ static bool dce_abm_immediate_disable(struct abm *abm) { struct dce_abm *abm_dce = TO_DCE_ABM(abm); - REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, - 1, 80000); - - /* setDMCUParam_ABMLevel */ - REG_UPDATE_2(MASTER_COMM_CMD_REG, - MASTER_COMM_CMD_REG_BYTE0, MCP_ABM_LEVEL_SET, - MASTER_COMM_CMD_REG_BYTE2, MCP_DISABLE_ABM_IMMEDIATELY); - - /* notifyDMCUMsg */ - REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); + dce_abm_set_pipe(abm, MCP_DISABLE_ABM_IMMEDIATELY); abm->stored_backlight_registers.BL_PWM_CNTL = REG_READ(BL_PWM_CNTL); @@ -419,6 +421,7 @@ static const struct abm_funcs dce_funcs = { .abm_init = dce_abm_init, .set_abm_level = dce_abm_set_level, .init_backlight = dce_abm_init_backlight, + .set_pipe = dce_abm_set_pipe, .set_backlight_level_pwm = dce_abm_set_backlight_level_pwm, .get_current_backlight = dce_abm_get_current_backlight, .get_target_backlight = dce_abm_get_target_backlight, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c index aaeb7faac0c4..4febf4ef7240 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c @@ -24,6 +24,7 @@ */ #include "dm_services.h" +#include "core_types.h" #include "dce_aux.h" #include "dce/dce_11_0_sh_mask.h" @@ -41,17 +42,17 @@ container_of((ptr), struct aux_engine_dce110, base) #define FROM_ENGINE(ptr) \ - FROM_AUX_ENGINE(container_of((ptr), struct aux_engine, base)) + FROM_AUX_ENGINE(container_of((ptr), struct dce_aux, base)) #define FROM_AUX_ENGINE_ENGINE(ptr) \ - container_of((ptr), struct aux_engine, base) + container_of((ptr), struct dce_aux, base) enum { AUX_INVALID_REPLY_RETRY_COUNTER = 1, AUX_TIMED_OUT_RETRY_COUNTER = 2, AUX_DEFER_RETRY_COUNTER = 6 }; static void release_engine( - struct aux_engine *engine) + struct dce_aux *engine) { struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine); @@ -66,7 +67,7 @@ static void release_engine( #define DMCU_CAN_ACCESS_AUX 2 static bool is_engine_available( - struct aux_engine *engine) + struct dce_aux *engine) { struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine); @@ -79,7 +80,7 @@ static bool is_engine_available( return (field != DMCU_CAN_ACCESS_AUX); } static bool acquire_engine( - struct aux_engine *engine) + struct dce_aux *engine) { struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine); @@ -155,7 +156,7 @@ static bool acquire_engine( (0xFF & (address)) static void submit_channel_request( - struct aux_engine *engine, + struct dce_aux *engine, struct aux_request_transaction_data *request) { struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine); @@ -247,7 +248,7 @@ static void submit_channel_request( REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1); } -static int read_channel_reply(struct aux_engine *engine, uint32_t size, +static int read_channel_reply(struct dce_aux *engine, uint32_t size, uint8_t *buffer, uint8_t *reply_result, uint32_t *sw_status) { @@ -273,7 +274,8 @@ static int read_channel_reply(struct aux_engine *engine, uint32_t size, REG_GET(AUX_SW_DATA, AUX_SW_DATA, &reply_result_32); reply_result_32 = reply_result_32 >> 4; - *reply_result = (uint8_t)reply_result_32; + if (reply_result != NULL) + *reply_result = (uint8_t)reply_result_32; if (reply_result_32 == 0) { /* ACK */ uint32_t i = 0; @@ -299,61 +301,8 @@ static int read_channel_reply(struct aux_engine *engine, uint32_t size, return 0; } -static void process_channel_reply( - struct aux_engine *engine, - struct aux_reply_transaction_data *reply) -{ - int bytes_replied; - uint8_t reply_result; - uint32_t sw_status; - - bytes_replied = read_channel_reply(engine, reply->length, reply->data, - &reply_result, &sw_status); - - /* in case HPD is LOW, exit AUX transaction */ - if ((sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) { - reply->status = AUX_TRANSACTION_REPLY_HPD_DISCON; - return; - } - - if (bytes_replied < 0) { - /* Need to handle an error case... - * Hopefully, upper layer function won't call this function if - * the number of bytes in the reply was 0, because there was - * surely an error that was asserted that should have been - * handled for hot plug case, this could happens - */ - if (!(sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) { - reply->status = AUX_TRANSACTION_REPLY_INVALID; - ASSERT_CRITICAL(false); - return; - } - } else { - - switch (reply_result) { - case 0: /* ACK */ - reply->status = AUX_TRANSACTION_REPLY_AUX_ACK; - break; - case 1: /* NACK */ - reply->status = AUX_TRANSACTION_REPLY_AUX_NACK; - break; - case 2: /* DEFER */ - reply->status = AUX_TRANSACTION_REPLY_AUX_DEFER; - break; - case 4: /* AUX ACK / I2C NACK */ - reply->status = AUX_TRANSACTION_REPLY_I2C_NACK; - break; - case 8: /* AUX ACK / I2C DEFER */ - reply->status = AUX_TRANSACTION_REPLY_I2C_DEFER; - break; - default: - reply->status = AUX_TRANSACTION_REPLY_INVALID; - } - } -} - static enum aux_channel_operation_result get_channel_status( - struct aux_engine *engine, + struct dce_aux *engine, uint8_t *returned_bytes) { struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine); @@ -414,469 +363,22 @@ static enum aux_channel_operation_result get_channel_status( return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT; } } -static void process_read_reply( - struct aux_engine *engine, - struct read_command_context *ctx) -{ - engine->funcs->process_channel_reply(engine, &ctx->reply); - - switch (ctx->reply.status) { - case AUX_TRANSACTION_REPLY_AUX_ACK: - ctx->defer_retry_aux = 0; - if (ctx->returned_byte > ctx->current_read_length) { - ctx->status = - I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR; - ctx->operation_succeeded = false; - } else if (ctx->returned_byte < ctx->current_read_length) { - ctx->current_read_length -= ctx->returned_byte; - - ctx->offset += ctx->returned_byte; - - ++ctx->invalid_reply_retry_aux_on_ack; - - if (ctx->invalid_reply_retry_aux_on_ack > - AUX_INVALID_REPLY_RETRY_COUNTER) { - ctx->status = - I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR; - ctx->operation_succeeded = false; - } - } else { - ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED; - ctx->transaction_complete = true; - ctx->operation_succeeded = true; - } - break; - case AUX_TRANSACTION_REPLY_AUX_NACK: - ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_NACK; - ctx->operation_succeeded = false; - break; - case AUX_TRANSACTION_REPLY_AUX_DEFER: - ++ctx->defer_retry_aux; - - if (ctx->defer_retry_aux > AUX_DEFER_RETRY_COUNTER) { - ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT; - ctx->operation_succeeded = false; - } - break; - case AUX_TRANSACTION_REPLY_I2C_DEFER: - ctx->defer_retry_aux = 0; - - ++ctx->defer_retry_i2c; - - if (ctx->defer_retry_i2c > AUX_DEFER_RETRY_COUNTER) { - ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT; - ctx->operation_succeeded = false; - } - break; - case AUX_TRANSACTION_REPLY_HPD_DISCON: - ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON; - ctx->operation_succeeded = false; - break; - default: - ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN; - ctx->operation_succeeded = false; - } -} -static void process_read_request( - struct aux_engine *engine, - struct read_command_context *ctx) -{ - enum aux_channel_operation_result operation_result; - engine->funcs->submit_channel_request(engine, &ctx->request); - - operation_result = engine->funcs->get_channel_status( - engine, &ctx->returned_byte); - - switch (operation_result) { - case AUX_CHANNEL_OPERATION_SUCCEEDED: - if (ctx->returned_byte > ctx->current_read_length) { - ctx->status = - I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR; - ctx->operation_succeeded = false; - } else { - ctx->timed_out_retry_aux = 0; - ctx->invalid_reply_retry_aux = 0; - - ctx->reply.length = ctx->returned_byte; - ctx->reply.data = ctx->buffer; - - process_read_reply(engine, ctx); - } - break; - case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY: - ++ctx->invalid_reply_retry_aux; - - if (ctx->invalid_reply_retry_aux > - AUX_INVALID_REPLY_RETRY_COUNTER) { - ctx->status = - I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR; - ctx->operation_succeeded = false; - } else - udelay(400); - break; - case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT: - ++ctx->timed_out_retry_aux; - - if (ctx->timed_out_retry_aux > AUX_TIMED_OUT_RETRY_COUNTER) { - ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT; - ctx->operation_succeeded = false; - } else { - /* DP 1.2a, table 2-58: - * "S3: AUX Request CMD PENDING: - * retry 3 times, with 400usec wait on each" - * The HW timeout is set to 550usec, - * so we should not wait here - */ - } - break; - case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON: - ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON; - ctx->operation_succeeded = false; - break; - default: - ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN; - ctx->operation_succeeded = false; - } -} -static bool read_command( - struct aux_engine *engine, - struct i2caux_transaction_request *request, - bool middle_of_transaction) -{ - struct read_command_context ctx; - - ctx.buffer = request->payload.data; - ctx.current_read_length = request->payload.length; - ctx.offset = 0; - ctx.timed_out_retry_aux = 0; - ctx.invalid_reply_retry_aux = 0; - ctx.defer_retry_aux = 0; - ctx.defer_retry_i2c = 0; - ctx.invalid_reply_retry_aux_on_ack = 0; - ctx.transaction_complete = false; - ctx.operation_succeeded = true; - - if (request->payload.address_space == - I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) { - ctx.request.type = AUX_TRANSACTION_TYPE_DP; - ctx.request.action = I2CAUX_TRANSACTION_ACTION_DP_READ; - ctx.request.address = request->payload.address; - } else if (request->payload.address_space == - I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C) { - ctx.request.type = AUX_TRANSACTION_TYPE_I2C; - ctx.request.action = middle_of_transaction ? - I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT : - I2CAUX_TRANSACTION_ACTION_I2C_READ; - ctx.request.address = request->payload.address >> 1; - } else { - /* in DAL2, there was no return in such case */ - BREAK_TO_DEBUGGER(); - return false; - } - - ctx.request.delay = 0; - - do { - memset(ctx.buffer + ctx.offset, 0, ctx.current_read_length); - - ctx.request.data = ctx.buffer + ctx.offset; - ctx.request.length = ctx.current_read_length; - - process_read_request(engine, &ctx); - - request->status = ctx.status; - - if (ctx.operation_succeeded && !ctx.transaction_complete) - if (ctx.request.type == AUX_TRANSACTION_TYPE_I2C) - msleep(engine->delay); - } while (ctx.operation_succeeded && !ctx.transaction_complete); - - if (request->payload.address_space == - I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) { - DC_LOG_I2C_AUX("READ: addr:0x%x value:0x%x Result:%d", - request->payload.address, - request->payload.data[0], - ctx.operation_succeeded); - } - - return ctx.operation_succeeded; -} - -static void process_write_reply( - struct aux_engine *engine, - struct write_command_context *ctx) -{ - engine->funcs->process_channel_reply(engine, &ctx->reply); - - switch (ctx->reply.status) { - case AUX_TRANSACTION_REPLY_AUX_ACK: - ctx->operation_succeeded = true; - - if (ctx->returned_byte) { - ctx->request.action = ctx->mot ? - I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT : - I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST; - - ctx->current_write_length = 0; - - ++ctx->ack_m_retry; - - if (ctx->ack_m_retry > AUX_DEFER_RETRY_COUNTER) { - ctx->status = - I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT; - ctx->operation_succeeded = false; - } else - udelay(300); - } else { - ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED; - ctx->defer_retry_aux = 0; - ctx->ack_m_retry = 0; - ctx->transaction_complete = true; - } - break; - case AUX_TRANSACTION_REPLY_AUX_NACK: - ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_NACK; - ctx->operation_succeeded = false; - break; - case AUX_TRANSACTION_REPLY_AUX_DEFER: - ++ctx->defer_retry_aux; - - if (ctx->defer_retry_aux > ctx->max_defer_retry) { - ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT; - ctx->operation_succeeded = false; - } - break; - case AUX_TRANSACTION_REPLY_I2C_DEFER: - ctx->defer_retry_aux = 0; - ctx->current_write_length = 0; - - ctx->request.action = ctx->mot ? - I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT : - I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST; - - ++ctx->defer_retry_i2c; - - if (ctx->defer_retry_i2c > ctx->max_defer_retry) { - ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT; - ctx->operation_succeeded = false; - } - break; - case AUX_TRANSACTION_REPLY_HPD_DISCON: - ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON; - ctx->operation_succeeded = false; - break; - default: - ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN; - ctx->operation_succeeded = false; - } -} -static void process_write_request( - struct aux_engine *engine, - struct write_command_context *ctx) -{ - enum aux_channel_operation_result operation_result; - - engine->funcs->submit_channel_request(engine, &ctx->request); - - operation_result = engine->funcs->get_channel_status( - engine, &ctx->returned_byte); - - switch (operation_result) { - case AUX_CHANNEL_OPERATION_SUCCEEDED: - ctx->timed_out_retry_aux = 0; - ctx->invalid_reply_retry_aux = 0; - - ctx->reply.length = ctx->returned_byte; - ctx->reply.data = ctx->reply_data; - - process_write_reply(engine, ctx); - break; - case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY: - ++ctx->invalid_reply_retry_aux; - - if (ctx->invalid_reply_retry_aux > - AUX_INVALID_REPLY_RETRY_COUNTER) { - ctx->status = - I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR; - ctx->operation_succeeded = false; - } else - udelay(400); - break; - case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT: - ++ctx->timed_out_retry_aux; - - if (ctx->timed_out_retry_aux > AUX_TIMED_OUT_RETRY_COUNTER) { - ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT; - ctx->operation_succeeded = false; - } else { - /* DP 1.2a, table 2-58: - * "S3: AUX Request CMD PENDING: - * retry 3 times, with 400usec wait on each" - * The HW timeout is set to 550usec, - * so we should not wait here - */ - } - break; - case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON: - ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON; - ctx->operation_succeeded = false; - break; - default: - ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN; - ctx->operation_succeeded = false; - } -} -static bool write_command( - struct aux_engine *engine, - struct i2caux_transaction_request *request, - bool middle_of_transaction) -{ - struct write_command_context ctx; - - ctx.mot = middle_of_transaction; - ctx.buffer = request->payload.data; - ctx.current_write_length = request->payload.length; - ctx.timed_out_retry_aux = 0; - ctx.invalid_reply_retry_aux = 0; - ctx.defer_retry_aux = 0; - ctx.defer_retry_i2c = 0; - ctx.ack_m_retry = 0; - ctx.transaction_complete = false; - ctx.operation_succeeded = true; - - if (request->payload.address_space == - I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) { - ctx.request.type = AUX_TRANSACTION_TYPE_DP; - ctx.request.action = I2CAUX_TRANSACTION_ACTION_DP_WRITE; - ctx.request.address = request->payload.address; - } else if (request->payload.address_space == - I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C) { - ctx.request.type = AUX_TRANSACTION_TYPE_I2C; - ctx.request.action = middle_of_transaction ? - I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT : - I2CAUX_TRANSACTION_ACTION_I2C_WRITE; - ctx.request.address = request->payload.address >> 1; - } else { - /* in DAL2, there was no return in such case */ - BREAK_TO_DEBUGGER(); - return false; - } - - ctx.request.delay = 0; - - ctx.max_defer_retry = - (engine->max_defer_write_retry > AUX_DEFER_RETRY_COUNTER) ? - engine->max_defer_write_retry : AUX_DEFER_RETRY_COUNTER; - - do { - ctx.request.data = ctx.buffer; - ctx.request.length = ctx.current_write_length; - - process_write_request(engine, &ctx); - - request->status = ctx.status; - - if (ctx.operation_succeeded && !ctx.transaction_complete) - if (ctx.request.type == AUX_TRANSACTION_TYPE_I2C) - msleep(engine->delay); - } while (ctx.operation_succeeded && !ctx.transaction_complete); - - if (request->payload.address_space == - I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) { - DC_LOG_I2C_AUX("WRITE: addr:0x%x value:0x%x Result:%d", - request->payload.address, - request->payload.data[0], - ctx.operation_succeeded); - } - - return ctx.operation_succeeded; -} -static bool end_of_transaction_command( - struct aux_engine *engine, - struct i2caux_transaction_request *request) -{ - struct i2caux_transaction_request dummy_request; - uint8_t dummy_data; - - /* [tcheng] We only need to send the stop (read with MOT = 0) - * for I2C-over-Aux, not native AUX - */ - - if (request->payload.address_space != - I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C) - return false; - - dummy_request.operation = request->operation; - dummy_request.payload.address_space = request->payload.address_space; - dummy_request.payload.address = request->payload.address; - - /* - * Add a dummy byte due to some receiver quirk - * where one byte is sent along with MOT = 0. - * Ideally this should be 0. - */ - - dummy_request.payload.length = 0; - dummy_request.payload.data = &dummy_data; - - if (request->operation == I2CAUX_TRANSACTION_READ) - return read_command(engine, &dummy_request, false); - else - return write_command(engine, &dummy_request, false); - - /* according Syed, it does not need now DoDummyMOT */ -} -static bool submit_request( - struct aux_engine *engine, - struct i2caux_transaction_request *request, - bool middle_of_transaction) -{ - - bool result; - bool mot_used = true; - - switch (request->operation) { - case I2CAUX_TRANSACTION_READ: - result = read_command(engine, request, mot_used); - break; - case I2CAUX_TRANSACTION_WRITE: - result = write_command(engine, request, mot_used); - break; - default: - result = false; - } - - /* [tcheng] - * need to send stop for the last transaction to free up the AUX - * if the above command fails, this would be the last transaction - */ - - if (!middle_of_transaction || !result) - end_of_transaction_command(engine, request); - - /* mask AUX interrupt */ - - return result; -} enum i2caux_engine_type get_engine_type( - const struct aux_engine *engine) + const struct dce_aux *engine) { return I2CAUX_ENGINE_TYPE_AUX; } static bool acquire( - struct aux_engine *engine, + struct dce_aux *engine, struct ddc *ddc) { enum gpio_result result; - if (engine->funcs->is_engine_available) { - /*check whether SW could use the engine*/ - if (!engine->funcs->is_engine_available(engine)) - return false; - } + if (!is_engine_available(engine)) + return false; result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE, GPIO_DDC_CONFIG_TYPE_MODE_AUX); @@ -884,7 +386,7 @@ static bool acquire( if (result != GPIO_RESULT_OK) return false; - if (!engine->funcs->acquire_engine(engine)) { + if (!acquire_engine(engine)) { dal_ddc_close(ddc); return false; } @@ -894,21 +396,7 @@ static bool acquire( return true; } -static const struct aux_engine_funcs aux_engine_funcs = { - .acquire_engine = acquire_engine, - .submit_channel_request = submit_channel_request, - .process_channel_reply = process_channel_reply, - .read_channel_reply = read_channel_reply, - .get_channel_status = get_channel_status, - .is_engine_available = is_engine_available, - .release_engine = release_engine, - .destroy_engine = dce110_engine_destroy, - .submit_request = submit_request, - .get_engine_type = get_engine_type, - .acquire = acquire, -}; - -void dce110_engine_destroy(struct aux_engine **engine) +void dce110_engine_destroy(struct dce_aux **engine) { struct aux_engine_dce110 *engine110 = FROM_AUX_ENGINE(*engine); @@ -917,7 +405,7 @@ void dce110_engine_destroy(struct aux_engine **engine) *engine = NULL; } -struct aux_engine *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_engine110, +struct dce_aux *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_engine110, struct dc_context *ctx, uint32_t inst, uint32_t timeout_period, @@ -927,7 +415,6 @@ struct aux_engine *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_eng aux_engine110->base.ctx = ctx; aux_engine110->base.delay = 0; aux_engine110->base.max_defer_write_retry = 0; - aux_engine110->base.funcs = &aux_engine_funcs; aux_engine110->base.inst = inst; aux_engine110->timeout_period = timeout_period; aux_engine110->regs = regs; @@ -935,3 +422,101 @@ struct aux_engine *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_eng return &aux_engine110->base; } +static enum i2caux_transaction_action i2caux_action_from_payload(struct aux_payload *payload) +{ + if (payload->i2c_over_aux) { + if (payload->write) { + if (payload->mot) + return I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT; + return I2CAUX_TRANSACTION_ACTION_I2C_WRITE; + } + if (payload->mot) + return I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT; + return I2CAUX_TRANSACTION_ACTION_I2C_READ; + } + if (payload->write) + return I2CAUX_TRANSACTION_ACTION_DP_WRITE; + return I2CAUX_TRANSACTION_ACTION_DP_READ; +} + +int dce_aux_transfer(struct ddc_service *ddc, + struct aux_payload *payload) +{ + struct ddc *ddc_pin = ddc->ddc_pin; + struct dce_aux *aux_engine; + enum aux_channel_operation_result operation_result; + struct aux_request_transaction_data aux_req; + struct aux_reply_transaction_data aux_rep; + uint8_t returned_bytes = 0; + int res = -1; + uint32_t status; + + memset(&aux_req, 0, sizeof(aux_req)); + memset(&aux_rep, 0, sizeof(aux_rep)); + + aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]; + acquire(aux_engine, ddc_pin); + + if (payload->i2c_over_aux) + aux_req.type = AUX_TRANSACTION_TYPE_I2C; + else + aux_req.type = AUX_TRANSACTION_TYPE_DP; + + aux_req.action = i2caux_action_from_payload(payload); + + aux_req.address = payload->address; + aux_req.delay = payload->defer_delay * 10; + aux_req.length = payload->length; + aux_req.data = payload->data; + + submit_channel_request(aux_engine, &aux_req); + operation_result = get_channel_status(aux_engine, &returned_bytes); + + switch (operation_result) { + case AUX_CHANNEL_OPERATION_SUCCEEDED: + res = read_channel_reply(aux_engine, payload->length, + payload->data, payload->reply, + &status); + break; + case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON: + res = 0; + break; + case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN: + case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY: + case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT: + res = -1; + break; + } + release_engine(aux_engine); + return res; +} + +#define AUX_RETRY_MAX 7 + +bool dce_aux_transfer_with_retries(struct ddc_service *ddc, + struct aux_payload *payload) +{ + int i, ret = 0; + uint8_t reply; + bool payload_reply = true; + + if (!payload->reply) { + payload_reply = false; + payload->reply = &reply; + } + + for (i = 0; i < AUX_RETRY_MAX; i++) { + ret = dce_aux_transfer(ddc, payload); + + if (ret >= 0) { + if (*payload->reply == 0) { + if (!payload_reply) + payload->reply = NULL; + return true; + } + } + + udelay(1000); + } + return false; +} diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h index f7caab85dc80..d27f22c05e4b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h @@ -25,7 +25,9 @@ #ifndef __DAL_AUX_ENGINE_DCE110_H__ #define __DAL_AUX_ENGINE_DCE110_H__ -#include "aux_engine.h" + +#include "i2caux_interface.h" +#include "inc/hw/aux_engine.h" #define AUX_COMMON_REG_LIST(id)\ SRI(AUX_CONTROL, DP_AUX, id), \ @@ -75,8 +77,20 @@ enum { /* This is the timeout as defined in DP 1.2a, */ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 4 }; + +struct dce_aux { + uint32_t inst; + struct ddc *ddc; + struct dc_context *ctx; + /* following values are expressed in milliseconds */ + uint32_t delay; + uint32_t max_defer_write_retry; + + bool acquire_reset; +}; + struct aux_engine_dce110 { - struct aux_engine base; + struct dce_aux base; const struct dce110_aux_registers *regs; struct { uint32_t aux_control; @@ -96,16 +110,22 @@ struct aux_engine_dce110_init_data { const struct dce110_aux_registers *regs; }; -struct aux_engine *dce110_aux_engine_construct( +struct dce_aux *dce110_aux_engine_construct( struct aux_engine_dce110 *aux_engine110, struct dc_context *ctx, uint32_t inst, uint32_t timeout_period, const struct dce110_aux_registers *regs); -void dce110_engine_destroy(struct aux_engine **engine); +void dce110_engine_destroy(struct dce_aux **engine); bool dce110_aux_engine_acquire( - struct aux_engine *aux_engine, + struct dce_aux *aux_engine, struct ddc *ddc); + +int dce_aux_transfer(struct ddc_service *ddc, + struct aux_payload *cmd); + +bool dce_aux_transfer_with_retries(struct ddc_service *ddc, + struct aux_payload *cmd); #endif diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c index afd287f08bc9..6e142c2db986 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c @@ -194,8 +194,8 @@ static uint32_t get_max_pixel_clock_for_all_paths(struct dc_state *context) if (pipe_ctx->top_pipe) continue; - if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk) - max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk; + if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10 > max_pix_clk) + max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10; /* raise clock state for HBR3/2 if required. Confirmed with HW DCE/DPCS * logic for HBR3 still needs Nominal (0.8V) on VDDC rail @@ -257,7 +257,7 @@ static int dce_set_clock( clk_mgr_dce->dentist_vco_freq_khz / 64); /* Prepare to program display clock*/ - pxl_clk_params.target_pixel_clock = requested_clk_khz; + pxl_clk_params.target_pixel_clock_100hz = requested_clk_khz * 10; pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; if (clk_mgr_dce->dfs_bypass_active) @@ -450,6 +450,42 @@ void dce_clock_read_ss_info(struct dce_clk_mgr *clk_mgr_dce) } } +/** + * dce121_clock_patch_xgmi_ss_info() - Save XGMI spread spectrum info + * @clk_mgr: clock manager base structure + * + * Reads from VBIOS the XGMI spread spectrum info and saves it within + * the dce clock manager. This operation will overwrite the existing dprefclk + * SS values if the vBIOS query succeeds. Otherwise, it does nothing. It also + * sets the ->xgmi_enabled flag. + */ +void dce121_clock_patch_xgmi_ss_info(struct clk_mgr *clk_mgr) +{ + struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); + enum bp_result result; + struct spread_spectrum_info info = { { 0 } }; + struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios; + + clk_mgr_dce->xgmi_enabled = false; + + result = bp->funcs->get_spread_spectrum_info(bp, AS_SIGNAL_TYPE_XGMI, + 0, &info); + if (result == BP_RESULT_OK && info.spread_spectrum_percentage != 0) { + clk_mgr_dce->xgmi_enabled = true; + clk_mgr_dce->ss_on_dprefclk = true; + clk_mgr_dce->dprefclk_ss_divider = + info.spread_percentage_divider; + + if (info.type.CENTER_MODE == 0) { + /* Currently for DP Reference clock we + * need only SS percentage for + * downspread */ + clk_mgr_dce->dprefclk_ss_percentage = + info.spread_spectrum_percentage; + } + } +} + void dce110_fill_display_configs( const struct dc_state *context, struct dm_pp_display_configuration *pp_display_cfg) @@ -483,18 +519,18 @@ void dce110_fill_display_configs( cfg->src_height = stream->src.height; cfg->src_width = stream->src.width; cfg->ddi_channel_mapping = - stream->sink->link->ddi_channel_mapping.raw; + stream->link->ddi_channel_mapping.raw; cfg->transmitter = - stream->sink->link->link_enc->transmitter; + stream->link->link_enc->transmitter; cfg->link_settings.lane_count = - stream->sink->link->cur_link_settings.lane_count; + stream->link->cur_link_settings.lane_count; cfg->link_settings.link_rate = - stream->sink->link->cur_link_settings.link_rate; + stream->link->cur_link_settings.link_rate; cfg->link_settings.link_spread = - stream->sink->link->cur_link_settings.link_spread; + stream->link->cur_link_settings.link_spread; cfg->sym_clock = stream->phy_pix_clk; /* Round v_refresh*/ - cfg->v_refresh = stream->timing.pix_clk_khz * 1000; + cfg->v_refresh = stream->timing.pix_clk_100hz * 100; cfg->v_refresh /= stream->timing.h_total; cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2) / stream->timing.v_total; @@ -518,7 +554,7 @@ static uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context) - stream->timing.v_addressable); vertical_blank_time = vertical_blank_in_pixels - * 1000 / stream->timing.pix_clk_khz; + * 10000 / stream->timing.pix_clk_100hz; if (min_vertical_blank_time > vertical_blank_time) min_vertical_blank_time = vertical_blank_time; @@ -591,7 +627,15 @@ static void dce11_pplib_apply_display_requirements( dc, context->bw.dce.sclk_khz); - pp_display_cfg->min_dcfclock_khz = pp_display_cfg->min_engine_clock_khz; + /* + * As workaround for >4x4K lightup set dcfclock to min_engine_clock value. + * This is not required for less than 5 displays, + * thus don't request decfclk in dc to avoid impact + * on power saving. + * + */ + pp_display_cfg->min_dcfclock_khz = (context->stream_count > 4)? + pp_display_cfg->min_engine_clock_khz : 0; pp_display_cfg->min_engine_clock_deep_sleep_khz = context->bw.dce.sclk_deep_sleep_khz; @@ -612,7 +656,7 @@ static void dce11_pplib_apply_display_requirements( pp_display_cfg->crtc_index = pp_display_cfg->disp_configs[0].pipe_idx; - pp_display_cfg->line_time_in_us = timing->h_total * 1000 / timing->pix_clk_khz; + pp_display_cfg->line_time_in_us = timing->h_total * 10000 / timing->pix_clk_100hz; } if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0) @@ -625,11 +669,11 @@ static void dce_update_clocks(struct clk_mgr *clk_mgr, { struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); struct dm_pp_power_level_change_request level_change_req; - int unpatched_disp_clk = context->bw.dce.dispclk_khz; + int patched_disp_clk = context->bw.dce.dispclk_khz; /*TODO: W/A for dal3 linux, investigate why this works */ if (!clk_mgr_dce->dfs_bypass_active) - context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100; + patched_disp_clk = patched_disp_clk * 115 / 100; level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context); /* get max clock state from PPLIB */ @@ -639,13 +683,11 @@ static void dce_update_clocks(struct clk_mgr *clk_mgr, clk_mgr_dce->cur_min_clks_state = level_change_req.power_level; } - if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) { - context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, context->bw.dce.dispclk_khz); - clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz; + if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) { + patched_disp_clk = dce_set_clock(clk_mgr, patched_disp_clk); + clk_mgr->clks.dispclk_khz = patched_disp_clk; } dce_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); - - context->bw.dce.dispclk_khz = unpatched_disp_clk; } static void dce11_update_clocks(struct clk_mgr *clk_mgr, @@ -654,6 +696,11 @@ static void dce11_update_clocks(struct clk_mgr *clk_mgr, { struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); struct dm_pp_power_level_change_request level_change_req; + int patched_disp_clk = context->bw.dce.dispclk_khz; + + /*TODO: W/A for dal3 linux, investigate why this works */ + if (!clk_mgr_dce->dfs_bypass_active) + patched_disp_clk = patched_disp_clk * 115 / 100; level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context); /* get max clock state from PPLIB */ @@ -663,9 +710,9 @@ static void dce11_update_clocks(struct clk_mgr *clk_mgr, clk_mgr_dce->cur_min_clks_state = level_change_req.power_level; } - if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) { - context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, context->bw.dce.dispclk_khz); - clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz; + if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) { + context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, patched_disp_clk); + clk_mgr->clks.dispclk_khz = patched_disp_clk; } dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); } @@ -676,11 +723,11 @@ static void dce112_update_clocks(struct clk_mgr *clk_mgr, { struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); struct dm_pp_power_level_change_request level_change_req; - int unpatched_disp_clk = context->bw.dce.dispclk_khz; + int patched_disp_clk = context->bw.dce.dispclk_khz; /*TODO: W/A for dal3 linux, investigate why this works */ if (!clk_mgr_dce->dfs_bypass_active) - context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100; + patched_disp_clk = patched_disp_clk * 115 / 100; level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context); /* get max clock state from PPLIB */ @@ -690,13 +737,11 @@ static void dce112_update_clocks(struct clk_mgr *clk_mgr, clk_mgr_dce->cur_min_clks_state = level_change_req.power_level; } - if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) { - context->bw.dce.dispclk_khz = dce112_set_clock(clk_mgr, context->bw.dce.dispclk_khz); - clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz; + if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) { + patched_disp_clk = dce112_set_clock(clk_mgr, patched_disp_clk); + clk_mgr->clks.dispclk_khz = patched_disp_clk; } dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); - - context->bw.dce.dispclk_khz = unpatched_disp_clk; } static void dce12_update_clocks(struct clk_mgr *clk_mgr, @@ -706,17 +751,23 @@ static void dce12_update_clocks(struct clk_mgr *clk_mgr, struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); struct dm_pp_clock_for_voltage_req clock_voltage_req = {0}; int max_pix_clk = get_max_pixel_clock_for_all_paths(context); - int unpatched_disp_clk = context->bw.dce.dispclk_khz; + int patched_disp_clk = context->bw.dce.dispclk_khz; /*TODO: W/A for dal3 linux, investigate why this works */ if (!clk_mgr_dce->dfs_bypass_active) - context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100; + patched_disp_clk = patched_disp_clk * 115 / 100; - if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) { + if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) { clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK; - clock_voltage_req.clocks_in_khz = context->bw.dce.dispclk_khz; - context->bw.dce.dispclk_khz = dce112_set_clock(clk_mgr, context->bw.dce.dispclk_khz); - clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz; + /* + * When xGMI is enabled, the display clk needs to be adjusted + * with the WAFL link's SS percentage. + */ + if (clk_mgr_dce->xgmi_enabled) + patched_disp_clk = clk_mgr_adjust_dp_ref_freq_for_ss( + clk_mgr_dce, patched_disp_clk); + clock_voltage_req.clocks_in_khz = patched_disp_clk; + clk_mgr->clks.dispclk_khz = dce112_set_clock(clk_mgr, patched_disp_clk); dm_pp_apply_clock_for_voltage_request(clk_mgr->ctx, &clock_voltage_req); } @@ -729,8 +780,6 @@ static void dce12_update_clocks(struct clk_mgr *clk_mgr, dm_pp_apply_clock_for_voltage_request(clk_mgr->ctx, &clock_voltage_req); } dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); - - context->bw.dce.dispclk_khz = unpatched_disp_clk; } static const struct clk_mgr_funcs dce120_funcs = { @@ -882,6 +931,27 @@ struct clk_mgr *dce120_clk_mgr_create(struct dc_context *ctx) return &clk_mgr_dce->base; } +struct clk_mgr *dce121_clk_mgr_create(struct dc_context *ctx) +{ + struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), + GFP_KERNEL); + + if (clk_mgr_dce == NULL) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + memcpy(clk_mgr_dce->max_clks_by_state, dce120_max_clks_by_state, + sizeof(dce120_max_clks_by_state)); + + dce_clk_mgr_construct(clk_mgr_dce, ctx, NULL, NULL, NULL); + + clk_mgr_dce->dprefclk_khz = 625000; + clk_mgr_dce->base.funcs = &dce120_funcs; + + return &clk_mgr_dce->base; +} + void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr) { struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(*clk_mgr); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h index 3bceb31d910d..c8f8c442142a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h @@ -94,11 +94,37 @@ struct dce_clk_mgr { * This is basically "Crystal Frequency In KHz" (XTALIN) frequency */ int dfs_bypass_disp_clk; - /* Flag for Enabled SS on DPREFCLK */ + /** + * @ss_on_dprefclk: + * + * True if spread spectrum is enabled on the DP ref clock. + */ bool ss_on_dprefclk; - /* DPREFCLK SS percentage (if down-spread enabled) */ + + /** + * @xgmi_enabled: + * + * True if xGMI is enabled. On VG20, both audio and display clocks need + * to be adjusted with the WAFL link's SS info if xGMI is enabled. + */ + bool xgmi_enabled; + + /** + * @dprefclk_ss_percentage: + * + * DPREFCLK SS percentage (if down-spread enabled). + * + * Note that if XGMI is enabled, the SS info (percentage and divider) + * from the WAFL link is used instead. This is decided during + * dce_clk_mgr initialization. + */ int dprefclk_ss_percentage; - /* DPREFCLK SS percentage Divider (100 or 1000) */ + + /** + * @dprefclk_ss_divider: + * + * DPREFCLK SS percentage Divider (100 or 1000). + */ int dprefclk_ss_divider; int dprefclk_khz; @@ -163,6 +189,9 @@ struct clk_mgr *dce112_clk_mgr_create( struct clk_mgr *dce120_clk_mgr_create(struct dc_context *ctx); +struct clk_mgr *dce121_clk_mgr_create(struct dc_context *ctx); +void dce121_clock_patch_xgmi_ss_info(struct clk_mgr *clk_mgr); + void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr); int dentist_get_divider_from_did(int did); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c index 723ce80ed89c..71d5777de961 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c @@ -108,28 +108,28 @@ static const struct spread_spectrum_data *get_ss_data_entry( } /** -* Function: calculate_fb_and_fractional_fb_divider -* -* * DESCRIPTION: Calculates feedback and fractional feedback dividers values -* -*PARAMETERS: -* targetPixelClock Desired frequency in 10 KHz -* ref_divider Reference divider (already known) -* postDivider Post Divider (already known) -* feedback_divider_param Pointer where to store -* calculated feedback divider value -* fract_feedback_divider_param Pointer where to store -* calculated fract feedback divider value -* -*RETURNS: -* It fills the locations pointed by feedback_divider_param -* and fract_feedback_divider_param -* It returns - true if feedback divider not 0 -* - false should never happen) -*/ + * Function: calculate_fb_and_fractional_fb_divider + * + * * DESCRIPTION: Calculates feedback and fractional feedback dividers values + * + *PARAMETERS: + * targetPixelClock Desired frequency in 100 Hz + * ref_divider Reference divider (already known) + * postDivider Post Divider (already known) + * feedback_divider_param Pointer where to store + * calculated feedback divider value + * fract_feedback_divider_param Pointer where to store + * calculated fract feedback divider value + * + *RETURNS: + * It fills the locations pointed by feedback_divider_param + * and fract_feedback_divider_param + * It returns - true if feedback divider not 0 + * - false should never happen) + */ static bool calculate_fb_and_fractional_fb_divider( struct calc_pll_clock_source *calc_pll_cs, - uint32_t target_pix_clk_khz, + uint32_t target_pix_clk_100hz, uint32_t ref_divider, uint32_t post_divider, uint32_t *feedback_divider_param, @@ -138,11 +138,11 @@ static bool calculate_fb_and_fractional_fb_divider( uint64_t feedback_divider; feedback_divider = - (uint64_t)target_pix_clk_khz * ref_divider * post_divider; + (uint64_t)target_pix_clk_100hz * ref_divider * post_divider; feedback_divider *= 10; /* additional factor, since we divide by 10 afterwards */ feedback_divider *= (uint64_t)(calc_pll_cs->fract_fb_divider_factor); - feedback_divider = div_u64(feedback_divider, calc_pll_cs->ref_freq_khz); + feedback_divider = div_u64(feedback_divider, calc_pll_cs->ref_freq_khz * 10ull); /*Round to the number of precision * The following code replace the old code (ullfeedbackDivider + 5)/10 @@ -195,36 +195,36 @@ static bool calc_fb_divider_checking_tolerance( { uint32_t feedback_divider; uint32_t fract_feedback_divider; - uint32_t actual_calculated_clock_khz; + uint32_t actual_calculated_clock_100hz; uint32_t abs_err; - uint64_t actual_calc_clk_khz; + uint64_t actual_calc_clk_100hz; calculate_fb_and_fractional_fb_divider( calc_pll_cs, - pll_settings->adjusted_pix_clk, + pll_settings->adjusted_pix_clk_100hz, ref_divider, post_divider, &feedback_divider, &fract_feedback_divider); /*Actual calculated value*/ - actual_calc_clk_khz = (uint64_t)feedback_divider * + actual_calc_clk_100hz = (uint64_t)feedback_divider * calc_pll_cs->fract_fb_divider_factor + fract_feedback_divider; - actual_calc_clk_khz *= calc_pll_cs->ref_freq_khz; - actual_calc_clk_khz = - div_u64(actual_calc_clk_khz, + actual_calc_clk_100hz *= calc_pll_cs->ref_freq_khz * 10; + actual_calc_clk_100hz = + div_u64(actual_calc_clk_100hz, ref_divider * post_divider * calc_pll_cs->fract_fb_divider_factor); - actual_calculated_clock_khz = (uint32_t)(actual_calc_clk_khz); + actual_calculated_clock_100hz = (uint32_t)(actual_calc_clk_100hz); - abs_err = (actual_calculated_clock_khz > - pll_settings->adjusted_pix_clk) - ? actual_calculated_clock_khz - - pll_settings->adjusted_pix_clk - : pll_settings->adjusted_pix_clk - - actual_calculated_clock_khz; + abs_err = (actual_calculated_clock_100hz > + pll_settings->adjusted_pix_clk_100hz) + ? actual_calculated_clock_100hz - + pll_settings->adjusted_pix_clk_100hz + : pll_settings->adjusted_pix_clk_100hz - + actual_calculated_clock_100hz; if (abs_err <= tolerance) { /*found good values*/ @@ -233,10 +233,10 @@ static bool calc_fb_divider_checking_tolerance( pll_settings->feedback_divider = feedback_divider; pll_settings->fract_feedback_divider = fract_feedback_divider; pll_settings->pix_clk_post_divider = post_divider; - pll_settings->calculated_pix_clk = - actual_calculated_clock_khz; + pll_settings->calculated_pix_clk_100hz = + actual_calculated_clock_100hz; pll_settings->vco_freq = - actual_calculated_clock_khz * post_divider; + actual_calculated_clock_100hz * post_divider / 10; return true; } return false; @@ -257,8 +257,8 @@ static bool calc_pll_dividers_in_range( /* This is err_tolerance / 10000 = 0.0025 - acceptable error of 0.25% * This is errorTolerance / 10000 = 0.0001 - acceptable error of 0.01%*/ - tolerance = (pll_settings->adjusted_pix_clk * err_tolerance) / - 10000; + tolerance = (pll_settings->adjusted_pix_clk_100hz * err_tolerance) / + 100000; if (tolerance < CALC_PLL_CLK_SRC_ERR_TOLERANCE) tolerance = CALC_PLL_CLK_SRC_ERR_TOLERANCE; @@ -294,7 +294,7 @@ static uint32_t calculate_pixel_clock_pll_dividers( uint32_t min_ref_divider; uint32_t max_ref_divider; - if (pll_settings->adjusted_pix_clk == 0) { + if (pll_settings->adjusted_pix_clk_100hz == 0) { DC_LOG_ERROR( "%s Bad requested pixel clock", __func__); return MAX_PLL_CALC_ERROR; @@ -306,21 +306,21 @@ static uint32_t calculate_pixel_clock_pll_dividers( max_post_divider = pll_settings->pix_clk_post_divider; } else { min_post_divider = calc_pll_cs->min_pix_clock_pll_post_divider; - if (min_post_divider * pll_settings->adjusted_pix_clk < - calc_pll_cs->min_vco_khz) { - min_post_divider = calc_pll_cs->min_vco_khz / - pll_settings->adjusted_pix_clk; + if (min_post_divider * pll_settings->adjusted_pix_clk_100hz < + calc_pll_cs->min_vco_khz * 10) { + min_post_divider = calc_pll_cs->min_vco_khz * 10 / + pll_settings->adjusted_pix_clk_100hz; if ((min_post_divider * - pll_settings->adjusted_pix_clk) < - calc_pll_cs->min_vco_khz) + pll_settings->adjusted_pix_clk_100hz) < + calc_pll_cs->min_vco_khz * 10) min_post_divider++; } max_post_divider = calc_pll_cs->max_pix_clock_pll_post_divider; - if (max_post_divider * pll_settings->adjusted_pix_clk - > calc_pll_cs->max_vco_khz) - max_post_divider = calc_pll_cs->max_vco_khz / - pll_settings->adjusted_pix_clk; + if (max_post_divider * pll_settings->adjusted_pix_clk_100hz + > calc_pll_cs->max_vco_khz * 10) + max_post_divider = calc_pll_cs->max_vco_khz * 10 / + pll_settings->adjusted_pix_clk_100hz; } /* 2) Find Reference divider ranges @@ -392,47 +392,47 @@ static bool pll_adjust_pix_clk( struct pixel_clk_params *pix_clk_params, struct pll_settings *pll_settings) { - uint32_t actual_pix_clk_khz = 0; - uint32_t requested_clk_khz = 0; + uint32_t actual_pix_clk_100hz = 0; + uint32_t requested_clk_100hz = 0; struct bp_adjust_pixel_clock_parameters bp_adjust_pixel_clock_params = { 0 }; enum bp_result bp_result; switch (pix_clk_params->signal_type) { case SIGNAL_TYPE_HDMI_TYPE_A: { - requested_clk_khz = pix_clk_params->requested_pix_clk; + requested_clk_100hz = pix_clk_params->requested_pix_clk_100hz; if (pix_clk_params->pixel_encoding != PIXEL_ENCODING_YCBCR422) { switch (pix_clk_params->color_depth) { case COLOR_DEPTH_101010: - requested_clk_khz = (requested_clk_khz * 5) >> 2; + requested_clk_100hz = (requested_clk_100hz * 5) >> 2; break; /* x1.25*/ case COLOR_DEPTH_121212: - requested_clk_khz = (requested_clk_khz * 6) >> 2; + requested_clk_100hz = (requested_clk_100hz * 6) >> 2; break; /* x1.5*/ case COLOR_DEPTH_161616: - requested_clk_khz = requested_clk_khz * 2; + requested_clk_100hz = requested_clk_100hz * 2; break; /* x2.0*/ default: break; } } - actual_pix_clk_khz = requested_clk_khz; + actual_pix_clk_100hz = requested_clk_100hz; } break; case SIGNAL_TYPE_DISPLAY_PORT: case SIGNAL_TYPE_DISPLAY_PORT_MST: case SIGNAL_TYPE_EDP: - requested_clk_khz = pix_clk_params->requested_sym_clk; - actual_pix_clk_khz = pix_clk_params->requested_pix_clk; + requested_clk_100hz = pix_clk_params->requested_sym_clk * 10; + actual_pix_clk_100hz = pix_clk_params->requested_pix_clk_100hz; break; default: - requested_clk_khz = pix_clk_params->requested_pix_clk; - actual_pix_clk_khz = pix_clk_params->requested_pix_clk; + requested_clk_100hz = pix_clk_params->requested_pix_clk_100hz; + actual_pix_clk_100hz = pix_clk_params->requested_pix_clk_100hz; break; } - bp_adjust_pixel_clock_params.pixel_clock = requested_clk_khz; + bp_adjust_pixel_clock_params.pixel_clock = requested_clk_100hz / 10; bp_adjust_pixel_clock_params. encoder_object_id = pix_clk_params->encoder_object_id; bp_adjust_pixel_clock_params.signal_type = pix_clk_params->signal_type; @@ -441,9 +441,9 @@ static bool pll_adjust_pix_clk( bp_result = clk_src->bios->funcs->adjust_pixel_clock( clk_src->bios, &bp_adjust_pixel_clock_params); if (bp_result == BP_RESULT_OK) { - pll_settings->actual_pix_clk = actual_pix_clk_khz; - pll_settings->adjusted_pix_clk = - bp_adjust_pixel_clock_params.adjusted_pixel_clock; + pll_settings->actual_pix_clk_100hz = actual_pix_clk_100hz; + pll_settings->adjusted_pix_clk_100hz = + bp_adjust_pixel_clock_params.adjusted_pixel_clock * 10; pll_settings->reference_divider = bp_adjust_pixel_clock_params.reference_divider; pll_settings->pix_clk_post_divider = @@ -490,7 +490,7 @@ static uint32_t dce110_get_pix_clk_dividers_helper ( const struct spread_spectrum_data *ss_data = get_ss_data_entry( clk_src, pix_clk_params->signal_type, - pll_settings->adjusted_pix_clk); + pll_settings->adjusted_pix_clk_100hz / 10); if (NULL != ss_data) pll_settings->ss_percentage = ss_data->percentage; @@ -502,13 +502,13 @@ static uint32_t dce110_get_pix_clk_dividers_helper ( * to continue. */ DC_LOG_ERROR( "%s: Failed to adjust pixel clock!!", __func__); - pll_settings->actual_pix_clk = - pix_clk_params->requested_pix_clk; - pll_settings->adjusted_pix_clk = - pix_clk_params->requested_pix_clk; + pll_settings->actual_pix_clk_100hz = + pix_clk_params->requested_pix_clk_100hz; + pll_settings->adjusted_pix_clk_100hz = + pix_clk_params->requested_pix_clk_100hz; if (dc_is_dp_signal(pix_clk_params->signal_type)) - pll_settings->adjusted_pix_clk = 100000; + pll_settings->adjusted_pix_clk_100hz = 1000000; } /* Calculate Dividers */ @@ -533,28 +533,28 @@ static void dce112_get_pix_clk_dividers_helper ( struct pll_settings *pll_settings, struct pixel_clk_params *pix_clk_params) { - uint32_t actualPixelClockInKHz; + uint32_t actual_pixel_clock_100hz; - actualPixelClockInKHz = pix_clk_params->requested_pix_clk; + actual_pixel_clock_100hz = pix_clk_params->requested_pix_clk_100hz; /* Calculate Dividers */ if (pix_clk_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A) { switch (pix_clk_params->color_depth) { case COLOR_DEPTH_101010: - actualPixelClockInKHz = (actualPixelClockInKHz * 5) >> 2; + actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 5) >> 2; break; case COLOR_DEPTH_121212: - actualPixelClockInKHz = (actualPixelClockInKHz * 6) >> 2; + actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 6) >> 2; break; case COLOR_DEPTH_161616: - actualPixelClockInKHz = actualPixelClockInKHz * 2; + actual_pixel_clock_100hz = actual_pixel_clock_100hz * 2; break; default: break; } } - pll_settings->actual_pix_clk = actualPixelClockInKHz; - pll_settings->adjusted_pix_clk = actualPixelClockInKHz; - pll_settings->calculated_pix_clk = pix_clk_params->requested_pix_clk; + pll_settings->actual_pix_clk_100hz = actual_pixel_clock_100hz; + pll_settings->adjusted_pix_clk_100hz = actual_pixel_clock_100hz; + pll_settings->calculated_pix_clk_100hz = pix_clk_params->requested_pix_clk_100hz; } static uint32_t dce110_get_pix_clk_dividers( @@ -567,7 +567,7 @@ static uint32_t dce110_get_pix_clk_dividers( DC_LOGGER_INIT(); if (pix_clk_params == NULL || pll_settings == NULL - || pix_clk_params->requested_pix_clk == 0) { + || pix_clk_params->requested_pix_clk_100hz == 0) { DC_LOG_ERROR( "%s: Invalid parameters!!\n", __func__); return pll_calc_error; @@ -577,10 +577,10 @@ static uint32_t dce110_get_pix_clk_dividers( if (cs->id == CLOCK_SOURCE_ID_DP_DTO || cs->id == CLOCK_SOURCE_ID_EXTERNAL) { - pll_settings->adjusted_pix_clk = clk_src->ext_clk_khz; - pll_settings->calculated_pix_clk = clk_src->ext_clk_khz; - pll_settings->actual_pix_clk = - pix_clk_params->requested_pix_clk; + pll_settings->adjusted_pix_clk_100hz = clk_src->ext_clk_khz * 10; + pll_settings->calculated_pix_clk_100hz = clk_src->ext_clk_khz * 10; + pll_settings->actual_pix_clk_100hz = + pix_clk_params->requested_pix_clk_100hz; return 0; } @@ -599,7 +599,7 @@ static uint32_t dce112_get_pix_clk_dividers( DC_LOGGER_INIT(); if (pix_clk_params == NULL || pll_settings == NULL - || pix_clk_params->requested_pix_clk == 0) { + || pix_clk_params->requested_pix_clk_100hz == 0) { DC_LOG_ERROR( "%s: Invalid parameters!!\n", __func__); return -1; @@ -609,10 +609,10 @@ static uint32_t dce112_get_pix_clk_dividers( if (cs->id == CLOCK_SOURCE_ID_DP_DTO || cs->id == CLOCK_SOURCE_ID_EXTERNAL) { - pll_settings->adjusted_pix_clk = clk_src->ext_clk_khz; - pll_settings->calculated_pix_clk = clk_src->ext_clk_khz; - pll_settings->actual_pix_clk = - pix_clk_params->requested_pix_clk; + pll_settings->adjusted_pix_clk_100hz = clk_src->ext_clk_khz * 10; + pll_settings->calculated_pix_clk_100hz = clk_src->ext_clk_khz * 10; + pll_settings->actual_pix_clk_100hz = + pix_clk_params->requested_pix_clk_100hz; return -1; } @@ -714,7 +714,7 @@ static bool enable_spread_spectrum( ss_data = get_ss_data_entry( clk_src, signal, - pll_settings->calculated_pix_clk); + pll_settings->calculated_pix_clk_100hz / 10); /* Pixel clock PLL has been programmed to generate desired pixel clock, * now enable SS on pixel clock */ @@ -853,7 +853,7 @@ static bool dce110_program_pix_clk( /*ATOMBIOS expects pixel rate adjusted by deep color ratio)*/ bp_pc_params.controller_id = pix_clk_params->controller_id; bp_pc_params.pll_id = clock_source->id; - bp_pc_params.target_pixel_clock = pll_settings->actual_pix_clk; + bp_pc_params.target_pixel_clock_100hz = pll_settings->actual_pix_clk_100hz; bp_pc_params.encoder_object_id = pix_clk_params->encoder_object_id; bp_pc_params.signal_type = pix_clk_params->signal_type; @@ -903,12 +903,12 @@ static bool dce112_program_pix_clk( #if defined(CONFIG_DRM_AMD_DC_DCN1_0) if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) { unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0; - unsigned dp_dto_ref_kHz = 700000; - unsigned clock_kHz = pll_settings->actual_pix_clk; + unsigned dp_dto_ref_100hz = 7000000; + unsigned clock_100hz = pll_settings->actual_pix_clk_100hz; /* Set DTO values: phase = target clock, modulo = reference clock */ - REG_WRITE(PHASE[inst], clock_kHz); - REG_WRITE(MODULO[inst], dp_dto_ref_kHz); + REG_WRITE(PHASE[inst], clock_100hz); + REG_WRITE(MODULO[inst], dp_dto_ref_100hz); /* Enable DTO */ REG_UPDATE(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, 1); @@ -927,7 +927,7 @@ static bool dce112_program_pix_clk( /*ATOMBIOS expects pixel rate adjusted by deep color ratio)*/ bp_pc_params.controller_id = pix_clk_params->controller_id; bp_pc_params.pll_id = clock_source->id; - bp_pc_params.target_pixel_clock = pll_settings->actual_pix_clk; + bp_pc_params.target_pixel_clock_100hz = pll_settings->actual_pix_clk_100hz; bp_pc_params.encoder_object_id = pix_clk_params->encoder_object_id; bp_pc_params.signal_type = pix_clk_params->signal_type; @@ -977,6 +977,28 @@ static bool dce110_clock_source_power_down( return bp_result == BP_RESULT_OK; } +static bool get_pixel_clk_frequency_100hz( + struct clock_source *clock_source, + unsigned int inst, + unsigned int *pixel_clk_khz) +{ + struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source); + unsigned int clock_hz = 0; + + if (clock_source->id == CLOCK_SOURCE_ID_DP_DTO) { + clock_hz = REG_READ(PHASE[inst]); + + /* NOTE: There is agreement with VBIOS here that MODULO is + * programmed equal to DPREFCLK, in which case PHASE will be + * equivalent to pixel clock. + */ + *pixel_clk_khz = clock_hz / 100; + return true; + } + + return false; +} + /*****************************************/ /* Constructor */ /*****************************************/ @@ -984,12 +1006,14 @@ static bool dce110_clock_source_power_down( static const struct clock_source_funcs dce112_clk_src_funcs = { .cs_power_down = dce110_clock_source_power_down, .program_pix_clk = dce112_program_pix_clk, - .get_pix_clk_dividers = dce112_get_pix_clk_dividers + .get_pix_clk_dividers = dce112_get_pix_clk_dividers, + .get_pixel_clk_frequency_100hz = get_pixel_clk_frequency_100hz }; static const struct clock_source_funcs dce110_clk_src_funcs = { .cs_power_down = dce110_clock_source_power_down, .program_pix_clk = dce110_program_pix_clk, - .get_pix_clk_dividers = dce110_get_pix_clk_dividers + .get_pix_clk_dividers = dce110_get_pix_clk_dividers, + .get_pixel_clk_frequency_100hz = get_pixel_clk_frequency_100hz }; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c index dea40b322191..c2926cf19dee 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c @@ -51,7 +51,6 @@ #define PSR_SET_WAITLOOP 0x31 #define MCP_INIT_DMCU 0x88 #define MCP_INIT_IRAM 0x89 -#define MCP_DMCU_VERSION 0x90 #define MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT_MASK 0x00000001L static bool dce_dmcu_init(struct dmcu *dmcu) @@ -317,38 +316,11 @@ static void dce_get_psr_wait_loop( } #if defined(CONFIG_DRM_AMD_DC_DCN1_0) -static void dcn10_get_dmcu_state(struct dmcu *dmcu) -{ - struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); - uint32_t dmcu_state_offset = 0xf6; - - /* Enable write access to IRAM */ - REG_UPDATE_2(DMCU_RAM_ACCESS_CTRL, - IRAM_HOST_ACCESS_EN, 1, - IRAM_RD_ADDR_AUTO_INC, 1); - - REG_WAIT(DMU_MEM_PWR_CNTL, DMCU_IRAM_MEM_PWR_STATE, 0, 2, 10); - - /* Write address to IRAM_RD_ADDR in DMCU_IRAM_RD_CTRL */ - REG_WRITE(DMCU_IRAM_RD_CTRL, dmcu_state_offset); - - /* Read data from IRAM_RD_DATA in DMCU_IRAM_RD_DATA*/ - dmcu->dmcu_state = REG_READ(DMCU_IRAM_RD_DATA); - - /* Disable write access to IRAM to allow dynamic sleep state */ - REG_UPDATE_2(DMCU_RAM_ACCESS_CTRL, - IRAM_HOST_ACCESS_EN, 0, - IRAM_RD_ADDR_AUTO_INC, 0); -} - static void dcn10_get_dmcu_version(struct dmcu *dmcu) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); uint32_t dmcu_version_offset = 0xf1; - /* Clear scratch */ - REG_WRITE(DC_DMCU_SCRATCH, 0); - /* Enable write access to IRAM */ REG_UPDATE_2(DMCU_RAM_ACCESS_CTRL, IRAM_HOST_ACCESS_EN, 1, @@ -359,85 +331,74 @@ static void dcn10_get_dmcu_version(struct dmcu *dmcu) /* Write address to IRAM_RD_ADDR and read from DATA register */ REG_WRITE(DMCU_IRAM_RD_CTRL, dmcu_version_offset); dmcu->dmcu_version.interface_version = REG_READ(DMCU_IRAM_RD_DATA); - dmcu->dmcu_version.year = ((REG_READ(DMCU_IRAM_RD_DATA) << 8) | + dmcu->dmcu_version.abm_version = REG_READ(DMCU_IRAM_RD_DATA); + dmcu->dmcu_version.psr_version = REG_READ(DMCU_IRAM_RD_DATA); + dmcu->dmcu_version.build_version = ((REG_READ(DMCU_IRAM_RD_DATA) << 8) | REG_READ(DMCU_IRAM_RD_DATA)); - dmcu->dmcu_version.month = REG_READ(DMCU_IRAM_RD_DATA); - dmcu->dmcu_version.date = REG_READ(DMCU_IRAM_RD_DATA); /* Disable write access to IRAM to allow dynamic sleep state */ REG_UPDATE_2(DMCU_RAM_ACCESS_CTRL, IRAM_HOST_ACCESS_EN, 0, IRAM_RD_ADDR_AUTO_INC, 0); - - /* Send MCP command message to DMCU to get version reply from FW. - * We expect this version should match the one in IRAM, otherwise - * something is wrong with DMCU and we should fail and disable UC. - */ - REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800); - - /* Set command to get DMCU version from microcontroller */ - REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, - MCP_DMCU_VERSION); - - /* Notify microcontroller of new command */ - REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); - - /* Ensure command has been executed before continuing */ - REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800); - - /* Somehow version does not match, so fail and return version 0 */ - if (dmcu->dmcu_version.interface_version != REG_READ(DC_DMCU_SCRATCH)) - dmcu->dmcu_version.interface_version = 0; } static bool dcn10_dmcu_init(struct dmcu *dmcu) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); + bool status = false; - /* DMCU FW should populate the scratch register if running */ - if (REG_READ(DC_DMCU_SCRATCH) == 0) - return false; - - /* Check state is uninitialized */ - dcn10_get_dmcu_state(dmcu); - - /* If microcontroller is already initialized, do nothing */ - if (dmcu->dmcu_state == DMCU_RUNNING) - return true; - - /* Retrieve and cache the DMCU firmware version. */ - dcn10_get_dmcu_version(dmcu); - - /* Check interface version to confirm firmware is loaded and running */ - if (dmcu->dmcu_version.interface_version == 0) - return false; + /* Definition of DC_DMCU_SCRATCH + * 0 : firmare not loaded + * 1 : PSP load DMCU FW but not initialized + * 2 : Firmware already initialized + */ + dmcu->dmcu_state = REG_READ(DC_DMCU_SCRATCH); - /* Wait until microcontroller is ready to process interrupt */ - REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800); + switch (dmcu->dmcu_state) { + case DMCU_UNLOADED: + status = false; + break; + case DMCU_LOADED_UNINITIALIZED: + /* Wait until microcontroller is ready to process interrupt */ + REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800); - /* Set initialized ramping boundary value */ - REG_WRITE(MASTER_COMM_DATA_REG1, 0xFFFF); + /* Set initialized ramping boundary value */ + REG_WRITE(MASTER_COMM_DATA_REG1, 0xFFFF); - /* Set command to initialize microcontroller */ - REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, + /* Set command to initialize microcontroller */ + REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, MCP_INIT_DMCU); - /* Notify microcontroller of new command */ - REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); + /* Notify microcontroller of new command */ + REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); - /* Ensure command has been executed before continuing */ - REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800); + /* Ensure command has been executed before continuing */ + REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800); - // Check state is initialized - dcn10_get_dmcu_state(dmcu); + // Check state is initialized + dmcu->dmcu_state = REG_READ(DC_DMCU_SCRATCH); - // If microcontroller is not in running state, fail - if (dmcu->dmcu_state != DMCU_RUNNING) - return false; + // If microcontroller is not in running state, fail + if (dmcu->dmcu_state == DMCU_RUNNING) { + /* Retrieve and cache the DMCU firmware version. */ + dcn10_get_dmcu_version(dmcu); + status = true; + } else + status = false; - return true; + break; + case DMCU_RUNNING: + status = true; + break; + default: + status = false; + break; + } + + return status; } + static bool dcn10_dmcu_load_iram(struct dmcu *dmcu, unsigned int start_offset, const char *src, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h index c83a7f05f14c..956bdf14503f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h @@ -133,6 +133,10 @@ SR(DCHUB_AGP_TOP), \ BL_REG_LIST() +#define HWSEQ_VG20_REG_LIST() \ + HWSEQ_DCE120_REG_LIST(),\ + MMHUB_SR(MC_VM_XGMI_LFB_CNTL) + #define HWSEQ_DCE112_REG_LIST() \ HWSEQ_DCE10_REG_LIST(), \ HWSEQ_PIXEL_RATE_REG_LIST(CRTC), \ @@ -298,6 +302,7 @@ struct dce_hwseq_registers { uint32_t MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB; uint32_t MC_VM_SYSTEM_APERTURE_LOW_ADDR; uint32_t MC_VM_SYSTEM_APERTURE_HIGH_ADDR; + uint32_t MC_VM_XGMI_LFB_CNTL; uint32_t AZALIA_AUDIO_DTO; uint32_t AZALIA_CONTROLLER_CLOCK_GATING; }; @@ -382,6 +387,11 @@ struct dce_hwseq_registers { HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \ HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh) +#define HWSEQ_VG20_MASK_SH_LIST(mask_sh)\ + HWSEQ_DCE12_MASK_SH_LIST(mask_sh),\ + HWS_SF(, MC_VM_XGMI_LFB_CNTL, PF_LFB_REGION, mask_sh),\ + HWS_SF(, MC_VM_XGMI_LFB_CNTL, PF_MAX_REGION, mask_sh) + #define HWSEQ_DCN_MASK_SH_LIST(mask_sh)\ HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, OTG0_),\ HWS_SF1(OTG0_, PHYPLL_PIXEL_RATE_CNTL, PHYPLL_PIXEL_RATE_SOURCE, mask_sh), \ @@ -470,6 +480,8 @@ struct dce_hwseq_registers { type PHYSICAL_PAGE_NUMBER_MSB;\ type PHYSICAL_PAGE_NUMBER_LSB;\ type LOGICAL_ADDR; \ + type PF_LFB_REGION;\ + type PF_MAX_REGION;\ type ENABLE_L1_TLB;\ type SYSTEM_ACCESS_MODE;\ type LVTMA_BLON;\ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c index 3e18ea84b1f9..314c04a915d2 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c @@ -599,12 +599,12 @@ bool dce110_link_encoder_validate_dvi_output( if ((connector_signal == SIGNAL_TYPE_DVI_SINGLE_LINK || connector_signal == SIGNAL_TYPE_HDMI_TYPE_A) && signal != SIGNAL_TYPE_HDMI_TYPE_A && - crtc_timing->pix_clk_khz > TMDS_MAX_PIXEL_CLOCK) + crtc_timing->pix_clk_100hz > (TMDS_MAX_PIXEL_CLOCK * 10)) return false; - if (crtc_timing->pix_clk_khz < TMDS_MIN_PIXEL_CLOCK) + if (crtc_timing->pix_clk_100hz < (TMDS_MIN_PIXEL_CLOCK * 10)) return false; - if (crtc_timing->pix_clk_khz > max_pixel_clock) + if (crtc_timing->pix_clk_100hz > (max_pixel_clock * 10)) return false; /* DVI supports 6/8bpp single-link and 10/16bpp dual-link */ @@ -788,7 +788,7 @@ bool dce110_link_encoder_validate_output_with_stream( case SIGNAL_TYPE_DVI_DUAL_LINK: is_valid = dce110_link_encoder_validate_dvi_output( enc110, - stream->sink->link->connector_signal, + stream->link->connector_signal, stream->signal, &stream->timing); break; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c index 85686d917636..a24a2bda8656 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c @@ -479,7 +479,7 @@ static void program_grph_pixel_format( case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: sign = 1; floating = 1; - /* no break */ + /* fall through */ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: /* shouldn't this get float too? */ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: grph_depth = 3; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c index cce0d18f91da..1fa2d4fd7a35 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c @@ -288,9 +288,18 @@ static void dce110_stream_encoder_dp_set_stream_attribute( #endif struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); - + struct dc_crtc_timing hw_crtc_timing = *crtc_timing; + if (hw_crtc_timing.flags.INTERLACE) { + /*the input timing is in VESA spec format with Interlace flag =1*/ + hw_crtc_timing.v_total /= 2; + hw_crtc_timing.v_border_top /= 2; + hw_crtc_timing.v_addressable /= 2; + hw_crtc_timing.v_border_bottom /= 2; + hw_crtc_timing.v_front_porch /= 2; + hw_crtc_timing.v_sync_width /= 2; + } /* set pixel encoding */ - switch (crtc_timing->pixel_encoding) { + switch (hw_crtc_timing.pixel_encoding) { case PIXEL_ENCODING_YCBCR422: REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, DP_PIXEL_ENCODING_TYPE_YCBCR422); @@ -299,8 +308,8 @@ static void dce110_stream_encoder_dp_set_stream_attribute( REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, DP_PIXEL_ENCODING_TYPE_YCBCR444); - if (crtc_timing->flags.Y_ONLY) - if (crtc_timing->display_color_depth != COLOR_DEPTH_666) + if (hw_crtc_timing.flags.Y_ONLY) + if (hw_crtc_timing.display_color_depth != COLOR_DEPTH_666) /* HW testing only, no use case yet. * Color depth of Y-only could be * 8, 10, 12, 16 bits */ @@ -335,7 +344,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute( /* set color depth */ - switch (crtc_timing->display_color_depth) { + switch (hw_crtc_timing.display_color_depth) { case COLOR_DEPTH_666: REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, 0); @@ -363,7 +372,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute( #if defined(CONFIG_DRM_AMD_DC_DCN1_0) - switch (crtc_timing->display_color_depth) { + switch (hw_crtc_timing.display_color_depth) { case COLOR_DEPTH_666: colorimetry_bpc = 0; break; @@ -401,9 +410,9 @@ static void dce110_stream_encoder_dp_set_stream_attribute( misc0 = misc0 | 0x8; /* bit3=1, bit4=0 */ misc1 = misc1 & ~0x80; /* bit7 = 0*/ dynamic_range_ycbcr = 0; /*bt601*/ - if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) + if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */ - else if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR444) + else if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR444) misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */ break; case COLOR_SPACE_YCBCR709: @@ -411,9 +420,9 @@ static void dce110_stream_encoder_dp_set_stream_attribute( misc0 = misc0 | 0x18; /* bit3=1, bit4=1 */ misc1 = misc1 & ~0x80; /* bit7 = 0*/ dynamic_range_ycbcr = 1; /*bt709*/ - if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) + if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */ - else if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR444) + else if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR444) misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */ break; case COLOR_SPACE_2020_RGB_LIMITEDRANGE: @@ -453,27 +462,27 @@ static void dce110_stream_encoder_dp_set_stream_attribute( */ if (REG(DP_MSA_TIMING_PARAM1)) REG_SET_2(DP_MSA_TIMING_PARAM1, 0, - DP_MSA_HTOTAL, crtc_timing->h_total, - DP_MSA_VTOTAL, crtc_timing->v_total); + DP_MSA_HTOTAL, hw_crtc_timing.h_total, + DP_MSA_VTOTAL, hw_crtc_timing.v_total); #endif /* calcuate from vesa timing parameters * h_active_start related to leading edge of sync */ - h_blank = crtc_timing->h_total - crtc_timing->h_border_left - - crtc_timing->h_addressable - crtc_timing->h_border_right; + h_blank = hw_crtc_timing.h_total - hw_crtc_timing.h_border_left - + hw_crtc_timing.h_addressable - hw_crtc_timing.h_border_right; - h_back_porch = h_blank - crtc_timing->h_front_porch - - crtc_timing->h_sync_width; + h_back_porch = h_blank - hw_crtc_timing.h_front_porch - + hw_crtc_timing.h_sync_width; /* start at begining of left border */ - h_active_start = crtc_timing->h_sync_width + h_back_porch; + h_active_start = hw_crtc_timing.h_sync_width + h_back_porch; - v_active_start = crtc_timing->v_total - crtc_timing->v_border_top - - crtc_timing->v_addressable - crtc_timing->v_border_bottom - - crtc_timing->v_front_porch; + v_active_start = hw_crtc_timing.v_total - hw_crtc_timing.v_border_top - + hw_crtc_timing.v_addressable - hw_crtc_timing.v_border_bottom - + hw_crtc_timing.v_front_porch; #if defined(CONFIG_DRM_AMD_DC_DCN1_0) @@ -486,21 +495,21 @@ static void dce110_stream_encoder_dp_set_stream_attribute( if (REG(DP_MSA_TIMING_PARAM3)) REG_SET_4(DP_MSA_TIMING_PARAM3, 0, DP_MSA_HSYNCWIDTH, - crtc_timing->h_sync_width, + hw_crtc_timing.h_sync_width, DP_MSA_HSYNCPOLARITY, - !crtc_timing->flags.HSYNC_POSITIVE_POLARITY, + !hw_crtc_timing.flags.HSYNC_POSITIVE_POLARITY, DP_MSA_VSYNCWIDTH, - crtc_timing->v_sync_width, + hw_crtc_timing.v_sync_width, DP_MSA_VSYNCPOLARITY, - !crtc_timing->flags.VSYNC_POSITIVE_POLARITY); + !hw_crtc_timing.flags.VSYNC_POSITIVE_POLARITY); /* HWDITH include border or overscan */ if (REG(DP_MSA_TIMING_PARAM4)) REG_SET_2(DP_MSA_TIMING_PARAM4, 0, - DP_MSA_HWIDTH, crtc_timing->h_border_left + - crtc_timing->h_addressable + crtc_timing->h_border_right, - DP_MSA_VHEIGHT, crtc_timing->v_border_top + - crtc_timing->v_addressable + crtc_timing->v_border_bottom); + DP_MSA_HWIDTH, hw_crtc_timing.h_border_left + + hw_crtc_timing.h_addressable + hw_crtc_timing.h_border_right, + DP_MSA_VHEIGHT, hw_crtc_timing.v_border_top + + hw_crtc_timing.v_addressable + hw_crtc_timing.v_border_bottom); #endif } #endif @@ -662,7 +671,7 @@ static void dce110_stream_encoder_dvi_set_stream_attribute( cntl.signal = is_dual_link ? SIGNAL_TYPE_DVI_DUAL_LINK : SIGNAL_TYPE_DVI_SINGLE_LINK; cntl.enable_dp_audio = false; - cntl.pixel_clock = crtc_timing->pix_clk_khz; + cntl.pixel_clock = crtc_timing->pix_clk_100hz / 10; cntl.lanes_number = (is_dual_link) ? LANE_COUNT_EIGHT : LANE_COUNT_FOUR; if (enc110->base.bp->funcs->encoder_control( @@ -686,7 +695,7 @@ static void dce110_stream_encoder_lvds_set_stream_attribute( cntl.engine_id = enc110->base.id; cntl.signal = SIGNAL_TYPE_LVDS; cntl.enable_dp_audio = false; - cntl.pixel_clock = crtc_timing->pix_clk_khz; + cntl.pixel_clock = crtc_timing->pix_clk_100hz / 10; cntl.lanes_number = LANE_COUNT_FOUR; if (enc110->base.bp->funcs->encoder_control( @@ -1575,6 +1584,14 @@ static void setup_stereo_sync( REG_UPDATE(DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, !enable); } +static void dig_connect_to_otg( + struct stream_encoder *enc, + int tg_inst) +{ + struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); + + REG_UPDATE(DIG_FE_CNTL, DIG_SOURCE_SELECT, tg_inst); +} static const struct stream_encoder_funcs dce110_str_enc_funcs = { .dp_set_stream_attribute = @@ -1609,7 +1626,7 @@ static const struct stream_encoder_funcs dce110_str_enc_funcs = { .hdmi_audio_disable = dce110_se_hdmi_audio_disable, .setup_stereo_sync = setup_stereo_sync, .set_avmute = dce110_stream_encoder_set_avmute, - + .dig_connect_to_otg = dig_connect_to_otg, }; void dce110_stream_encoder_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h index 6c28229c76eb..f9cdf2b5242c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h @@ -199,7 +199,8 @@ SE_SF(DP_SEC_CNTL, DP_SEC_ATP_ENABLE, mask_sh),\ SE_SF(DP_SEC_CNTL, DP_SEC_AIP_ENABLE, mask_sh),\ SE_SF(DP_SEC_CNTL, DP_SEC_ACM_ENABLE, mask_sh),\ - SE_SF(AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, mask_sh) + SE_SF(AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, mask_sh),\ + SE_SF(DIG_FE_CNTL, DIG_SOURCE_SELECT, mask_sh) #define SE_COMMON_MASK_SH_LIST_DCE_COMMON(mask_sh)\ SE_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) @@ -284,7 +285,8 @@ SE_SF(DIG0_DIG_FE_CNTL, TMDS_PIXEL_ENCODING, mask_sh),\ SE_SF(DIG0_DIG_FE_CNTL, TMDS_COLOR_FORMAT, mask_sh),\ SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_SELECT, mask_sh),\ - SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, mask_sh) + SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, mask_sh),\ + SE_SF(DIG0_DIG_FE_CNTL, DIG_SOURCE_SELECT, mask_sh) #define SE_COMMON_MASK_SH_LIST_SOC(mask_sh)\ SE_COMMON_MASK_SH_LIST_SOC_BASE(mask_sh) @@ -494,6 +496,7 @@ struct dce_stream_encoder_shift { uint8_t HDMI_DB_DISABLE; uint8_t DP_VID_N_MUL; uint8_t DP_VID_M_DOUBLE_VALUE_EN; + uint8_t DIG_SOURCE_SELECT; }; struct dce_stream_encoder_mask { @@ -624,6 +627,7 @@ struct dce_stream_encoder_mask { uint32_t HDMI_DB_DISABLE; uint32_t DP_VID_N_MUL; uint32_t DP_VID_M_DOUBLE_VALUE_EN; + uint32_t DIG_SOURCE_SELECT; }; struct dce110_stream_enc_registers { diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h index acd418515346..a6b80fdaa666 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h @@ -37,6 +37,10 @@ void dce100_prepare_bandwidth( struct dc *dc, struct dc_state *context); +void dce100_optimize_bandwidth( + struct dc *dc, + struct dc_state *context); + bool dce100_enable_display_power_gating(struct dc *dc, uint8_t controller_id, struct dc_bios *dcb, enum pipe_gating_control power_gating); diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c index 6ae51a5dfc04..23044e6723e8 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c @@ -76,6 +76,7 @@ #ifndef mmBIOS_SCRATCH_2 #define mmBIOS_SCRATCH_2 0x05CB + #define mmBIOS_SCRATCH_3 0x05CC #define mmBIOS_SCRATCH_6 0x05CF #endif @@ -365,6 +366,7 @@ static const struct dce_abm_mask abm_mask = { #define DCFE_MEM_PWR_CTRL_REG_BASE 0x1b03 static const struct bios_registers bios_regs = { + .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 }; @@ -587,7 +589,7 @@ struct output_pixel_processor *dce100_opp_create( return &opp->base; } -struct aux_engine *dce100_aux_engine_create( +struct dce_aux *dce100_aux_engine_create( struct dc_context *ctx, uint32_t inst) { diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c index 52d50e24a995..7b23239d33fe 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c @@ -62,8 +62,6 @@ static const struct dce110_compressor_reg_offsets reg_offsets[] = { } }; -static const uint32_t dce11_one_lpt_channel_max_resolution = 2560 * 1600; - static uint32_t align_to_chunks_number_per_line(uint32_t pixels) { return 256 * ((pixels + 255) / 256); diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 8f09b8625c5d..5e4db3712eef 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -614,55 +614,6 @@ dce110_set_output_transfer_func(struct pipe_ctx *pipe_ctx, return true; } -static enum dc_status bios_parser_crtc_source_select( - struct pipe_ctx *pipe_ctx) -{ - struct dc_bios *dcb; - /* call VBIOS table to set CRTC source for the HW - * encoder block - * note: video bios clears all FMT setting here. */ - struct bp_crtc_source_select crtc_source_select = {0}; - const struct dc_sink *sink = pipe_ctx->stream->sink; - - crtc_source_select.engine_id = pipe_ctx->stream_res.stream_enc->id; - crtc_source_select.controller_id = pipe_ctx->stream_res.tg->inst + 1; - /*TODO: Need to un-hardcode color depth, dp_audio and account for - * the case where signal and sink signal is different (translator - * encoder)*/ - crtc_source_select.signal = pipe_ctx->stream->signal; - crtc_source_select.enable_dp_audio = false; - crtc_source_select.sink_signal = pipe_ctx->stream->signal; - - switch (pipe_ctx->stream->timing.display_color_depth) { - case COLOR_DEPTH_666: - crtc_source_select.display_output_bit_depth = PANEL_6BIT_COLOR; - break; - case COLOR_DEPTH_888: - crtc_source_select.display_output_bit_depth = PANEL_8BIT_COLOR; - break; - case COLOR_DEPTH_101010: - crtc_source_select.display_output_bit_depth = PANEL_10BIT_COLOR; - break; - case COLOR_DEPTH_121212: - crtc_source_select.display_output_bit_depth = PANEL_12BIT_COLOR; - break; - default: - BREAK_TO_DEBUGGER(); - crtc_source_select.display_output_bit_depth = PANEL_8BIT_COLOR; - break; - } - - dcb = sink->ctx->dc_bios; - - if (BP_RESULT_OK != dcb->funcs->crtc_source_select( - dcb, - &crtc_source_select)) { - return DC_ERROR_UNEXPECTED; - } - - return DC_OK; -} - void dce110_update_info_frame(struct pipe_ctx *pipe_ctx) { bool is_hdmi; @@ -692,10 +643,10 @@ void dce110_update_info_frame(struct pipe_ctx *pipe_ctx) void dce110_enable_stream(struct pipe_ctx *pipe_ctx) { enum dc_lane_count lane_count = - pipe_ctx->stream->sink->link->cur_link_settings.lane_count; + pipe_ctx->stream->link->cur_link_settings.lane_count; struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; - struct dc_link *link = pipe_ctx->stream->sink->link; + struct dc_link *link = pipe_ctx->stream->link; uint32_t active_total_with_borders; @@ -1053,7 +1004,7 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option) void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option) { struct dc_stream_state *stream = pipe_ctx->stream; - struct dc_link *link = stream->sink->link; + struct dc_link *link = stream->link; struct dc *dc = pipe_ctx->stream->ctx->dc; if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) @@ -1078,11 +1029,10 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx, { struct encoder_unblank_param params = { { 0 } }; struct dc_stream_state *stream = pipe_ctx->stream; - struct dc_link *link = stream->sink->link; + struct dc_link *link = stream->link; /* only 3 items below are used by unblank */ - params.pixel_clk_khz = - pipe_ctx->stream->timing.pix_clk_khz; + params.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10; params.link_settings.link_rate = link_settings->link_rate; if (dc_is_dp_signal(pipe_ctx->stream->signal)) @@ -1092,10 +1042,11 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx, link->dc->hwss.edp_backlight_control(link, true); } } + void dce110_blank_stream(struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; - struct dc_link *link = stream->sink->link; + struct dc_link *link = stream->link; if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { link->dc->hwss.edp_backlight_control(link, false); @@ -1168,27 +1119,27 @@ static void build_audio_output( stream->timing.flags.INTERLACE; audio_output->crtc_info.refresh_rate = - (stream->timing.pix_clk_khz*1000)/ + (stream->timing.pix_clk_100hz*10000)/ (stream->timing.h_total*stream->timing.v_total); audio_output->crtc_info.color_depth = stream->timing.display_color_depth; audio_output->crtc_info.requested_pixel_clock = - pipe_ctx->stream_res.pix_clk_params.requested_pix_clk; + pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10; audio_output->crtc_info.calculated_pixel_clock = - pipe_ctx->stream_res.pix_clk_params.requested_pix_clk; + pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10; /*for HDMI, audio ACR is with deep color ratio factor*/ if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && audio_output->crtc_info.requested_pixel_clock == - stream->timing.pix_clk_khz) { + (stream->timing.pix_clk_100hz / 10)) { if (pipe_ctx->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420) { audio_output->crtc_info.requested_pixel_clock = audio_output->crtc_info.requested_pixel_clock/2; audio_output->crtc_info.calculated_pixel_clock = - pipe_ctx->stream_res.pix_clk_params.requested_pix_clk/2; + pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz/20; } } @@ -1299,8 +1250,6 @@ static enum dc_status dce110_enable_stream_timing( struct pipe_ctx *pipe_ctx_old = &dc->current_state->res_ctx. pipe_ctx[pipe_ctx->pipe_idx]; struct tg_color black_color = {0}; - struct drr_params params = {0}; - unsigned int event_triggers = 0; if (!pipe_ctx_old->stream) { @@ -1329,20 +1278,6 @@ static enum dc_status dce110_enable_stream_timing( pipe_ctx->stream_res.tg, &stream->timing, true); - - params.vertical_total_min = stream->adjust.v_total_min; - params.vertical_total_max = stream->adjust.v_total_max; - if (pipe_ctx->stream_res.tg->funcs->set_drr) - pipe_ctx->stream_res.tg->funcs->set_drr( - pipe_ctx->stream_res.tg, ¶ms); - - // DRR should set trigger event to monitor surface update event - if (stream->adjust.v_total_min != 0 && - stream->adjust.v_total_max != 0) - event_triggers = 0x80; - if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control) - pipe_ctx->stream_res.tg->funcs->set_static_screen_control( - pipe_ctx->stream_res.tg, event_triggers); } if (!pipe_ctx_old->stream) { @@ -1362,6 +1297,12 @@ static enum dc_status apply_single_controller_ctx_to_hw( struct dc *dc) { struct dc_stream_state *stream = pipe_ctx->stream; + struct drr_params params = {0}; + unsigned int event_triggers = 0; + + if (dc->hwss.disable_stream_gating) { + dc->hwss.disable_stream_gating(dc, pipe_ctx); + } if (pipe_ctx->stream_res.audio != NULL) { struct audio_output audio_output; @@ -1388,14 +1329,30 @@ static enum dc_status apply_single_controller_ctx_to_hw( } /* */ - dc->hwss.enable_stream_timing(pipe_ctx, context, dc); + /* Do not touch stream timing on seamless boot optimization. */ + if (!pipe_ctx->stream->apply_seamless_boot_optimization) + dc->hwss.enable_stream_timing(pipe_ctx, context, dc); + + if (dc->hwss.setup_vupdate_interrupt) + dc->hwss.setup_vupdate_interrupt(pipe_ctx); + + params.vertical_total_min = stream->adjust.v_total_min; + params.vertical_total_max = stream->adjust.v_total_max; + if (pipe_ctx->stream_res.tg->funcs->set_drr) + pipe_ctx->stream_res.tg->funcs->set_drr( + pipe_ctx->stream_res.tg, ¶ms); + + // DRR should set trigger event to monitor surface update event + if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0) + event_triggers = 0x80; + if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control) + pipe_ctx->stream_res.tg->funcs->set_static_screen_control( + pipe_ctx->stream_res.tg, event_triggers); - /* TODO: move to stream encoder */ if (pipe_ctx->stream->signal != SIGNAL_TYPE_VIRTUAL) - if (DC_OK != bios_parser_crtc_source_select(pipe_ctx)) { - BREAK_TO_DEBUGGER(); - return DC_ERROR_UNEXPECTED; - } + pipe_ctx->stream_res.stream_enc->funcs->dig_connect_to_otg( + pipe_ctx->stream_res.stream_enc, + pipe_ctx->stream_res.tg->inst); pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( pipe_ctx->stream_res.opp, @@ -1413,7 +1370,7 @@ static enum dc_status apply_single_controller_ctx_to_hw( pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0; - pipe_ctx->stream->sink->link->psr_enabled = false; + pipe_ctx->stream->link->psr_enabled = false; return DC_OK; } @@ -1523,7 +1480,7 @@ static struct dc_link *get_link_for_edp(struct dc *dc) return NULL; } -static struct dc_link *get_link_for_edp_not_in_use( +static struct dc_link *get_link_for_edp_to_turn_off( struct dc *dc, struct dc_state *context) { @@ -1532,8 +1489,12 @@ static struct dc_link *get_link_for_edp_not_in_use( /* check if eDP panel is suppose to be set mode, if yes, no need to disable */ for (i = 0; i < context->stream_count; i++) { - if (context->streams[i]->signal == SIGNAL_TYPE_EDP) - return NULL; + if (context->streams[i]->signal == SIGNAL_TYPE_EDP) { + if (context->streams[i]->dpms_off == true) + return context->streams[i]->sink->link; + else + return NULL; + } } /* check if there is an eDP panel not in use */ @@ -1560,9 +1521,16 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) int i; struct dc_link *edp_link_to_turnoff = NULL; struct dc_link *edp_link = get_link_for_edp(dc); - struct dc_bios *bios = dc->ctx->dc_bios; bool can_edp_fast_boot_optimize = false; bool apply_edp_fast_boot_optimization = false; + bool can_apply_seamless_boot = false; + + for (i = 0; i < context->stream_count; i++) { + if (context->streams[i]->apply_seamless_boot_optimization) { + can_apply_seamless_boot = true; + break; + } + } if (edp_link) { /* this seems to cause blank screens on DCE8 */ @@ -1576,7 +1544,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) } if (can_edp_fast_boot_optimize) - edp_link_to_turnoff = get_link_for_edp_not_in_use(dc, context); + edp_link_to_turnoff = get_link_for_edp_to_turn_off(dc, context); /* if OS doesn't light up eDP and eDP link is available, we want to disable * If resume from S4/S5, should optimization. @@ -1587,25 +1555,11 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) if (context->streams[i]->signal == SIGNAL_TYPE_EDP) { context->streams[i]->apply_edp_fast_boot_optimization = true; apply_edp_fast_boot_optimization = true; - - /* When after S4 and S5, vbios may post edp and previous dpms_off - * doesn't make sense. - * Update dpms_off state to align hw and sw state via check - * vBios scratch register. - */ - if (bios->funcs->is_active_display) { - const struct connector_device_tag_info *device_tag = &(edp_link->device_tag); - - if (bios->funcs->is_active_display(bios, - context->streams[i]->signal, - device_tag)) - context->streams[i]->dpms_off = false; - } } } } - if (!apply_edp_fast_boot_optimization) { + if (!apply_edp_fast_boot_optimization && !can_apply_seamless_boot) { if (edp_link_to_turnoff) { /*turn off backlight before DP_blank and encoder powered down*/ dc->hwss.edp_backlight_control(edp_link_to_turnoff, false); @@ -1629,8 +1583,8 @@ static uint32_t compute_pstate_blackout_duration( pstate_blackout_duration_ns = 1000 * blackout_duration.value >> 24; total_dest_line_time_ns = 1000000UL * - stream->timing.h_total / - stream->timing.pix_clk_khz + + (stream->timing.h_total * 10) / + stream->timing.pix_clk_100hz + pstate_blackout_duration_ns; return total_dest_line_time_ns; @@ -1818,18 +1772,15 @@ static bool should_enable_fbc(struct dc *dc, if (i == dc->res_pool->pipe_count) return false; - if (!pipe_ctx->stream->sink) - return false; - - if (!pipe_ctx->stream->sink->link) + if (!pipe_ctx->stream->link) return false; /* Only supports eDP */ - if (pipe_ctx->stream->sink->link->connector_signal != SIGNAL_TYPE_EDP) + if (pipe_ctx->stream->link->connector_signal != SIGNAL_TYPE_EDP) return false; /* PSR should not be enabled */ - if (pipe_ctx->stream->sink->link->psr_enabled) + if (pipe_ctx->stream->link->psr_enabled) return false; /* Nothing to compress */ @@ -2334,6 +2285,11 @@ static void dce110_enable_per_frame_crtc_position_reset( } +static void init_pipes(struct dc *dc, struct dc_state *context) +{ + // Do nothing +} + static void init_hw(struct dc *dc) { int i; @@ -2578,7 +2534,7 @@ static void dce110_apply_ctx_for_surface( pipe_ctx->plane_res.mi, pipe_ctx->stream->timing.h_total, pipe_ctx->stream->timing.v_total, - pipe_ctx->stream->timing.pix_clk_khz, + pipe_ctx->stream->timing.pix_clk_100hz / 10, context->stream_count); dce110_program_front_end_for_pipe(dc, pipe_ctx); @@ -2600,7 +2556,7 @@ static void dce110_apply_ctx_for_surface( } if (dc->fbc_compressor) - enable_fbc(dc, dc->current_state); + enable_fbc(dc, context); } static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx) @@ -2627,13 +2583,35 @@ static void dce110_wait_for_mpcc_disconnect( /* do nothing*/ } +static void program_output_csc(struct dc *dc, + struct pipe_ctx *pipe_ctx, + enum dc_color_space colorspace, + uint16_t *matrix, + int opp_id) +{ + int i; + struct out_csc_color_matrix tbl_entry; + + if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) { + enum dc_color_space color_space = pipe_ctx->stream->output_color_space; + + for (i = 0; i < 12; i++) + tbl_entry.regval[i] = pipe_ctx->stream->csc_color_matrix.matrix[i]; + + tbl_entry.color_space = color_space; + + pipe_ctx->plane_res.xfm->funcs->opp_set_csc_adjustment( + pipe_ctx->plane_res.xfm, &tbl_entry); + } +} + void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx) { struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position; struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp; struct mem_input *mi = pipe_ctx->plane_res.mi; struct dc_cursor_mi_param param = { - .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz, + .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10, .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz, .viewport = pipe_ctx->plane_res.scl_data.viewport, .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz, @@ -2677,7 +2655,9 @@ void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx) static const struct hw_sequencer_funcs dce110_funcs = { .program_gamut_remap = program_gamut_remap, + .program_output_csc = program_output_csc, .init_hw = init_hw, + .init_pipes = init_pipes, .apply_ctx_to_hw = dce110_apply_ctx_to_hw, .apply_ctx_for_surface = dce110_apply_ctx_for_surface, .update_plane_addr = update_plane_addr, @@ -2706,6 +2686,8 @@ static const struct hw_sequencer_funcs dce110_funcs = { .set_static_screen_control = set_static_screen_control, .reset_hw_ctx_wrap = dce110_reset_hw_ctx_wrap, .enable_stream_timing = dce110_enable_stream_timing, + .disable_stream_gating = NULL, + .enable_stream_gating = NULL, .setup_stereo = NULL, .set_avmute = dce110_set_avmute, .wait_for_mpcc_disconnect = dce110_wait_for_mpcc_disconnect, diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c index e33d11785b1f..7549adaa1542 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c @@ -84,6 +84,7 @@ #ifndef mmBIOS_SCRATCH_2 #define mmBIOS_SCRATCH_2 0x05CB + #define mmBIOS_SCRATCH_3 0x05CC #define mmBIOS_SCRATCH_6 0x05CF #endif @@ -369,6 +370,7 @@ static const struct dce110_clk_src_mask cs_mask = { }; static const struct bios_registers bios_regs = { + .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 }; @@ -606,7 +608,7 @@ static struct output_pixel_processor *dce110_opp_create( return &opp->base; } -struct aux_engine *dce110_aux_engine_create( +struct dce_aux *dce110_aux_engine_create( struct dc_context *ctx, uint32_t inst) { @@ -779,8 +781,8 @@ static void get_pixel_clock_parameters( * the pixel clock normalization for hdmi up to here instead of doing it * in pll_adjust_pix_clk */ - pixel_clk_params->requested_pix_clk = stream->timing.pix_clk_khz; - pixel_clk_params->encoder_object_id = stream->sink->link->link_enc->id; + pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz; + pixel_clk_params->encoder_object_id = stream->link->link_enc->id; pixel_clk_params->signal_type = pipe_ctx->stream->signal; pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1; /* TODO: un-hardcode*/ @@ -797,10 +799,10 @@ static void get_pixel_clock_parameters( pixel_clk_params->color_depth = COLOR_DEPTH_888; } if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) { - pixel_clk_params->requested_pix_clk = pixel_clk_params->requested_pix_clk / 2; + pixel_clk_params->requested_pix_clk_100hz = pixel_clk_params->requested_pix_clk_100hz / 2; } if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) - pixel_clk_params->requested_pix_clk *= 2; + pixel_clk_params->requested_pix_clk_100hz *= 2; } @@ -874,7 +876,7 @@ static bool dce110_validate_bandwidth( __func__, context->streams[0]->timing.h_addressable, context->streams[0]->timing.v_addressable, - context->streams[0]->timing.pix_clk_khz); + context->streams[0]->timing.pix_clk_100hz / 10); if (memcmp(&dc->current_state->bw.dce, &context->bw.dce, sizeof(context->bw.dce))) { @@ -1055,7 +1057,7 @@ static struct pipe_ctx *dce110_acquire_underlay( pipe_ctx->plane_res.mi->funcs->allocate_mem_input(pipe_ctx->plane_res.mi, stream->timing.h_total, stream->timing.v_total, - stream->timing.pix_clk_khz, + stream->timing.pix_clk_100hz / 10, context->stream_count); color_space_to_black_color(dc, diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index 969d4e72dc94..ea3065d63372 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c @@ -76,6 +76,7 @@ #ifndef mmBIOS_SCRATCH_2 #define mmBIOS_SCRATCH_2 0x05CB + #define mmBIOS_SCRATCH_3 0x05CC #define mmBIOS_SCRATCH_6 0x05CF #endif @@ -376,6 +377,7 @@ static const struct dce110_clk_src_mask cs_mask = { }; static const struct bios_registers bios_regs = { + .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 }; @@ -607,7 +609,7 @@ struct output_pixel_processor *dce112_opp_create( return &opp->base; } -struct aux_engine *dce112_aux_engine_create( +struct dce_aux *dce112_aux_engine_create( struct dc_context *ctx, uint32_t inst) { @@ -763,7 +765,7 @@ static struct clock_source *find_matching_pll( const struct resource_pool *pool, const struct dc_stream_state *const stream) { - switch (stream->sink->link->link_enc->transmitter) { + switch (stream->link->link_enc->transmitter) { case TRANSMITTER_UNIPHY_A: return pool->clock_sources[DCE112_CLK_SRC_PLL0]; case TRANSMITTER_UNIPHY_B: diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c index eb0f5f9a973b..1ca30928025e 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c @@ -244,6 +244,21 @@ static void dce120_update_dchub( dh_data->dchub_info_valid = false; } +/** + * dce121_xgmi_enabled() - Check if xGMI is enabled + * @hws: DCE hardware sequencer object + * + * Return true if xGMI is enabled. False otherwise. + */ +bool dce121_xgmi_enabled(struct dce_hwseq *hws) +{ + uint32_t pf_max_region; + + REG_GET(MC_VM_XGMI_LFB_CNTL, PF_MAX_REGION, &pf_max_region); + /* PF_MAX_REGION == 0 means xgmi is disabled */ + return !!pf_max_region; +} + void dce120_hw_sequencer_construct(struct dc *dc) { /* All registers used by dce11.2 match those in dce11 in offset and diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h index 77a6b86d7606..c51afbd0b012 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h @@ -30,6 +30,7 @@ struct dc; +bool dce121_xgmi_enabled(struct dce_hwseq *hws); void dce120_hw_sequencer_construct(struct dc *dc); #endif /* __DC_HWSS_DCE112_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index f12696674eb0..312a0aebf91f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c @@ -62,6 +62,8 @@ #include "soc15_hw_ip.h" #include "vega10_ip_offset.h" #include "nbio/nbio_6_1_offset.h" +#include "mmhub/mmhub_9_4_0_offset.h" +#include "mmhub/mmhub_9_4_0_sh_mask.h" #include "reg_helper.h" #include "dce100/dce100_resource.h" @@ -139,6 +141,17 @@ static const struct dce110_timing_generator_offsets dce120_tg_offsets[] = { .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ mm ## block ## id ## _ ## reg_name +/* MMHUB */ +#define MMHUB_BASE_INNER(seg) \ + MMHUB_BASE__INST0_SEG ## seg + +#define MMHUB_BASE(seg) \ + MMHUB_BASE_INNER(seg) + +#define MMHUB_SR(reg_name)\ + .reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) + \ + mm ## reg_name + /* macros to expend register list macro defined in HW object header file * end *********************/ @@ -378,7 +391,7 @@ struct output_pixel_processor *dce120_opp_create( ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask); return &opp->base; } -struct aux_engine *dce120_aux_engine_create( +struct dce_aux *dce120_aux_engine_create( struct dc_context *ctx, uint32_t inst) { @@ -429,6 +442,7 @@ struct dce_i2c_hw *dce120_i2c_hw_create( return dce_i2c_hw; } static const struct bios_registers bios_regs = { + .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3 + NBIO_BASE(mmBIOS_SCRATCH_3_BASE_IDX), .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 + NBIO_BASE(mmBIOS_SCRATCH_6_BASE_IDX) }; @@ -681,6 +695,19 @@ static const struct dce_hwseq_mask hwseq_mask = { HWSEQ_DCE12_MASK_SH_LIST(_MASK) }; +/* HWSEQ regs for VG20 */ +static const struct dce_hwseq_registers dce121_hwseq_reg = { + HWSEQ_VG20_REG_LIST() +}; + +static const struct dce_hwseq_shift dce121_hwseq_shift = { + HWSEQ_VG20_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce_hwseq_mask dce121_hwseq_mask = { + HWSEQ_VG20_MASK_SH_LIST(_MASK) +}; + static struct dce_hwseq *dce120_hwseq_create( struct dc_context *ctx) { @@ -695,6 +722,20 @@ static struct dce_hwseq *dce120_hwseq_create( return hws; } +static struct dce_hwseq *dce121_hwseq_create( + struct dc_context *ctx) +{ + struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); + + if (hws) { + hws->ctx = ctx; + hws->regs = &dce121_hwseq_reg; + hws->shifts = &dce121_hwseq_shift; + hws->masks = &dce121_hwseq_mask; + } + return hws; +} + static const struct resource_create_funcs res_create_funcs = { .read_dce_straps = read_dce_straps, .create_audio = create_audio, @@ -702,6 +743,14 @@ static const struct resource_create_funcs res_create_funcs = { .create_hwseq = dce120_hwseq_create, }; +static const struct resource_create_funcs dce121_res_create_funcs = { + .read_dce_straps = read_dce_straps, + .create_audio = create_audio, + .create_stream_encoder = dce120_stream_encoder_create, + .create_hwseq = dce121_hwseq_create, +}; + + #define mi_inst_regs(id) { MI_DCE12_REG_LIST(id) } static const struct dce_mem_input_registers mi_regs[] = { mi_inst_regs(0), @@ -911,7 +960,8 @@ static bool construct( int j; struct dc_context *ctx = dc->ctx; struct irq_service_init_data irq_init_data; - bool harvest_enabled = ASICREV_IS_VEGA20_P(ctx->asic_id.hw_internal_rev); + static const struct resource_create_funcs *res_funcs; + bool is_vg20 = ASICREV_IS_VEGA20_P(ctx->asic_id.hw_internal_rev); uint32_t pipe_fuses; ctx->dc_bios->regs = &bios_regs; @@ -975,7 +1025,11 @@ static bool construct( } } - pool->base.clk_mgr = dce120_clk_mgr_create(ctx); + if (is_vg20) + pool->base.clk_mgr = dce121_clk_mgr_create(ctx); + else + pool->base.clk_mgr = dce120_clk_mgr_create(ctx); + if (pool->base.clk_mgr == NULL) { dm_error("DC: failed to create display clock!\n"); BREAK_TO_DEBUGGER(); @@ -1008,14 +1062,14 @@ static bool construct( if (!pool->base.irqs) goto irqs_create_fail; - /* retrieve valid pipe fuses */ - if (harvest_enabled) + /* VG20: Pipe harvesting enabled, retrieve valid pipe fuses */ + if (is_vg20) pipe_fuses = read_pipe_fuses(ctx); /* index to valid pipe resource */ j = 0; for (i = 0; i < pool->base.pipe_count; i++) { - if (harvest_enabled) { + if (is_vg20) { if ((pipe_fuses & (1 << i)) != 0) { dm_error("DC: skip invalid pipe %d!\n", i); continue; @@ -1093,10 +1147,24 @@ static bool construct( pool->base.pipe_count = j; pool->base.timing_generator_count = j; - if (!resource_construct(num_virtual_links, dc, &pool->base, - &res_create_funcs)) + if (is_vg20) + res_funcs = &dce121_res_create_funcs; + else + res_funcs = &res_create_funcs; + + if (!resource_construct(num_virtual_links, dc, &pool->base, res_funcs)) goto res_create_fail; + /* + * This is a bit of a hack. The xGMI enabled info is used to determine + * if audio and display clocks need to be adjusted with the WAFL link's + * SS info. This is a responsiblity of the clk_mgr. But since MMHUB is + * under hwseq, and the relevant register is in MMHUB, we have to do it + * here. + */ + if (is_vg20 && dce121_xgmi_enabled(dc->hwseq)) + dce121_clock_patch_xgmi_ss_info(pool->base.clk_mgr); + /* Create hardware sequencer */ if (!dce120_hw_sequencer_create(dc)) goto controller_create_fail; diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c index a60a90e68d91..c4543178ba20 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c @@ -77,6 +77,6 @@ void dce80_hw_sequencer_construct(struct dc *dc) dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating; dc->hwss.pipe_control_lock = dce_pipe_control_lock; dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth; - dc->hwss.optimize_bandwidth = dce100_prepare_bandwidth; + dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth; } diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index cdd1d6b7b9f2..c109ace96be9 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c @@ -77,6 +77,7 @@ #ifndef mmBIOS_SCRATCH_2 #define mmBIOS_SCRATCH_2 0x05CB + #define mmBIOS_SCRATCH_3 0x05CC #define mmBIOS_SCRATCH_6 0x05CF #endif @@ -358,6 +359,7 @@ static const struct dce110_clk_src_mask cs_mask = { }; static const struct bios_registers bios_regs = { + .BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3, .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 }; @@ -467,7 +469,7 @@ static struct output_pixel_processor *dce80_opp_create( return &opp->base; } -struct aux_engine *dce80_aux_engine_create( +struct dce_aux *dce80_aux_engine_create( struct dc_context *ctx, uint32_t inst) { @@ -790,9 +792,22 @@ bool dce80_validate_bandwidth( struct dc *dc, struct dc_state *context) { - /* TODO implement when needed but for now hardcode max value*/ - context->bw.dce.dispclk_khz = 681000; - context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ; + int i; + bool at_least_one_pipe = false; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (context->res_ctx.pipe_ctx[i].stream) + at_least_one_pipe = true; + } + + if (at_least_one_pipe) { + /* TODO implement when needed but for now hardcode max value*/ + context->bw.dce.dispclk_khz = 681000; + context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ; + } else { + context->bw.dce.dispclk_khz = 0; + context->bw.dce.yclk_khz = 0; + } return true; } diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c index 3ba4712a35ab..8b5ce557ee71 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c @@ -84,17 +84,17 @@ static const struct dce110_timing_generator_offsets reg_offsets[] = { #define DCP_REG(reg) (reg + tg110->offsets.dcp) #define DMIF_REG(reg) (reg + tg110->offsets.dmif) -static void program_pix_dur(struct timing_generator *tg, uint32_t pix_clk_khz) +static void program_pix_dur(struct timing_generator *tg, uint32_t pix_clk_100hz) { uint64_t pix_dur; uint32_t addr = mmDMIF_PG0_DPG_PIPE_ARBITRATION_CONTROL1 + DCE110TG_FROM_TG(tg)->offsets.dmif; uint32_t value = dm_read_reg(tg->ctx, addr); - if (pix_clk_khz == 0) + if (pix_clk_100hz == 0) return; - pix_dur = 1000000000 / pix_clk_khz; + pix_dur = div_u64(10000000000ull, pix_clk_100hz); set_reg_field_value( value, @@ -110,7 +110,7 @@ static void program_timing(struct timing_generator *tg, bool use_vbios) { if (!use_vbios) - program_pix_dur(tg, timing->pix_clk_khz); + program_pix_dur(tg, timing->pix_clk_100hz); dce110_tg_program_timing(tg, timing, use_vbios); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c index 54abedbf1b43..afe8c42211cd 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c @@ -161,69 +161,17 @@ static int get_active_display_cnt( return display_count; } -static void notify_deep_sleep_dcfclk_to_smu( - struct pp_smu_funcs_rv *pp_smu, int min_dcef_deep_sleep_clk_khz) -{ - int min_dcef_deep_sleep_clk_mhz; //minimum required DCEF Deep Sleep clock in mhz - /* - * if function pointer not set up, this message is - * sent as part of pplib_apply_display_requirements. - * So just return. - */ - if (!pp_smu || !pp_smu->set_min_deep_sleep_dcfclk) - return; - - min_dcef_deep_sleep_clk_mhz = (min_dcef_deep_sleep_clk_khz + 999) / 1000; //Round up - pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, min_dcef_deep_sleep_clk_mhz); -} - -static void notify_hard_min_dcfclk_to_smu( - struct pp_smu_funcs_rv *pp_smu, int min_dcf_clk_khz) -{ - int min_dcf_clk_mhz; //minimum required DCF clock in mhz - - /* - * if function pointer not set up, this message is - * sent as part of pplib_apply_display_requirements. - * So just return. - */ - if (!pp_smu || !pp_smu->set_hard_min_dcfclk_by_freq) - return; - - min_dcf_clk_mhz = min_dcf_clk_khz / 1000; - - pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, min_dcf_clk_mhz); -} - -static void notify_hard_min_fclk_to_smu( - struct pp_smu_funcs_rv *pp_smu, int min_f_clk_khz) -{ - int min_f_clk_mhz; //minimum required F clock in mhz - - /* - * if function pointer not set up, this message is - * sent as part of pplib_apply_display_requirements. - * So just return. - */ - if (!pp_smu || !pp_smu->set_hard_min_fclk_by_freq) - return; - - min_f_clk_mhz = min_f_clk_khz / 1000; - - pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, min_f_clk_mhz); -} - static void dcn1_update_clocks(struct clk_mgr *clk_mgr, struct dc_state *context, bool safe_to_lower) { struct dc *dc = clk_mgr->ctx->dc; + struct dc_debug_options *debug = &dc->debug; struct dc_clocks *new_clocks = &context->bw.dcn.clk; struct pp_smu_display_requirement_rv *smu_req_cur = &dc->res_pool->pp_smu_req; struct pp_smu_display_requirement_rv smu_req = *smu_req_cur; struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu; - uint32_t requested_dcf_clock_in_khz = 0; bool send_request_to_increase = false; bool send_request_to_lower = false; int display_count; @@ -243,9 +191,8 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr, */ if (pp_smu->set_display_count) pp_smu->set_display_count(&pp_smu->pp_smu, display_count); - else - smu_req.display_count = display_count; + smu_req.display_count = display_count; } if (new_clocks->dispclk_khz > clk_mgr->clks.dispclk_khz @@ -261,12 +208,13 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr, } // F Clock + if (debug->force_fclk_khz != 0) + new_clocks->fclk_khz = debug->force_fclk_khz; + if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr->clks.fclk_khz)) { clk_mgr->clks.fclk_khz = new_clocks->fclk_khz; smu_req.hard_min_fclk_mhz = new_clocks->fclk_khz / 1000; - notify_hard_min_fclk_to_smu(pp_smu, new_clocks->fclk_khz); - send_request_to_lower = true; } @@ -281,7 +229,7 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr, if (should_set_clock(safe_to_lower, new_clocks->dcfclk_deep_sleep_khz, clk_mgr->clks.dcfclk_deep_sleep_khz)) { clk_mgr->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz; - smu_req.min_deep_sleep_dcefclk_mhz = new_clocks->dcfclk_deep_sleep_khz / 1000; + smu_req.min_deep_sleep_dcefclk_mhz = (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000; send_request_to_lower = true; } @@ -291,15 +239,18 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr, */ if (send_request_to_increase) { /*use dcfclk to request voltage*/ - requested_dcf_clock_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks); - - notify_hard_min_dcfclk_to_smu(pp_smu, requested_dcf_clock_in_khz); - - if (pp_smu->set_display_requirement) - pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req); - - notify_deep_sleep_dcfclk_to_smu(pp_smu, clk_mgr->clks.dcfclk_deep_sleep_khz); - dcn1_pplib_apply_display_requirements(dc, context); + if (pp_smu->set_hard_min_fclk_by_freq && + pp_smu->set_hard_min_dcfclk_by_freq && + pp_smu->set_min_deep_sleep_dcfclk) { + + pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_fclk_mhz); + pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_dcefclk_mhz); + pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, smu_req.min_deep_sleep_dcefclk_mhz); + } else { + if (pp_smu->set_display_requirement) + pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req); + dcn1_pplib_apply_display_requirements(dc, context); + } } /* dcn1 dppclk is tied to dispclk */ @@ -314,18 +265,20 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr, if (!send_request_to_increase && send_request_to_lower) { /*use dcfclk to request voltage*/ - requested_dcf_clock_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks); - - notify_hard_min_dcfclk_to_smu(pp_smu, requested_dcf_clock_in_khz); - - if (pp_smu->set_display_requirement) - pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req); - - notify_deep_sleep_dcfclk_to_smu(pp_smu, clk_mgr->clks.dcfclk_deep_sleep_khz); - dcn1_pplib_apply_display_requirements(dc, context); + if (pp_smu->set_hard_min_fclk_by_freq && + pp_smu->set_hard_min_dcfclk_by_freq && + pp_smu->set_min_deep_sleep_dcfclk) { + + pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_fclk_mhz); + pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_dcefclk_mhz); + pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, smu_req.min_deep_sleep_dcefclk_mhz); + } else { + if (pp_smu->set_display_requirement) + pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req); + dcn1_pplib_apply_display_requirements(dc, context); + } } - *smu_req_cur = smu_req; } static const struct clk_mgr_funcs dcn1_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c index 116977eb24e2..41f0f4c912e7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c @@ -51,10 +51,6 @@ #define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0])) -struct dcn10_input_csc_matrix { - enum dc_color_space color_space; - uint16_t regval[12]; -}; enum dcn10_coef_filter_type_sel { SCL_COEF_LUMA_VERT_FILTER = 0, @@ -99,7 +95,7 @@ enum gamut_remap_select { GAMUT_REMAP_COMB_COEFF }; -static const struct dcn10_input_csc_matrix dcn10_input_csc_matrix[] = { +static const struct dpp_input_csc_matrix dpp_input_csc_matrix[] = { {COLOR_SPACE_SRGB, {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} }, {COLOR_SPACE_SRGB_LIMITED, @@ -454,7 +450,7 @@ void dpp1_program_input_csc( { struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); int i; - int arr_size = sizeof(dcn10_input_csc_matrix)/sizeof(struct dcn10_input_csc_matrix); + int arr_size = sizeof(dpp_input_csc_matrix)/sizeof(struct dpp_input_csc_matrix); const uint16_t *regval = NULL; uint32_t cur_select = 0; enum dcn10_input_csc_select select; @@ -467,8 +463,8 @@ void dpp1_program_input_csc( if (tbl_entry == NULL) { for (i = 0; i < arr_size; i++) - if (dcn10_input_csc_matrix[i].color_space == color_space) { - regval = dcn10_input_csc_matrix[i].regval; + if (dpp_input_csc_matrix[i].color_space == color_space) { + regval = dpp_input_csc_matrix[i].regval; break; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c index 4a863a5dab41..c7642e748297 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c @@ -597,11 +597,13 @@ static void dpp1_dscl_set_manual_ratio_init( SCL_V_INIT_FRAC, init_frac, SCL_V_INIT_INT, init_int); - init_frac = dc_fixpt_u0d19(data->inits.v_bot) << 5; - init_int = dc_fixpt_floor(data->inits.v_bot); - REG_SET_2(SCL_VERT_FILTER_INIT_BOT, 0, - SCL_V_INIT_FRAC_BOT, init_frac, - SCL_V_INIT_INT_BOT, init_int); + if (REG(SCL_VERT_FILTER_INIT_BOT)) { + init_frac = dc_fixpt_u0d19(data->inits.v_bot) << 5; + init_int = dc_fixpt_floor(data->inits.v_bot); + REG_SET_2(SCL_VERT_FILTER_INIT_BOT, 0, + SCL_V_INIT_FRAC_BOT, init_frac, + SCL_V_INIT_INT_BOT, init_int); + } init_frac = dc_fixpt_u0d19(data->inits.v_c) << 5; init_int = dc_fixpt_floor(data->inits.v_c); @@ -609,11 +611,13 @@ static void dpp1_dscl_set_manual_ratio_init( SCL_V_INIT_FRAC_C, init_frac, SCL_V_INIT_INT_C, init_int); - init_frac = dc_fixpt_u0d19(data->inits.v_c_bot) << 5; - init_int = dc_fixpt_floor(data->inits.v_c_bot); - REG_SET_2(SCL_VERT_FILTER_INIT_BOT_C, 0, - SCL_V_INIT_FRAC_BOT_C, init_frac, - SCL_V_INIT_INT_BOT_C, init_int); + if (REG(SCL_VERT_FILTER_INIT_BOT_C)) { + init_frac = dc_fixpt_u0d19(data->inits.v_c_bot) << 5; + init_int = dc_fixpt_floor(data->inits.v_c_bot); + REG_SET_2(SCL_VERT_FILTER_INIT_BOT_C, 0, + SCL_V_INIT_FRAC_BOT_C, init_frac, + SCL_V_INIT_INT_BOT_C, init_int); + } } @@ -688,15 +692,17 @@ void dpp1_dscl_set_scaler_manual_scale( return; /* Black offsets */ - if (ycbcr) - REG_SET_2(SCL_BLACK_OFFSET, 0, - SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y, - SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_CBCR); - else + if (REG(SCL_BLACK_OFFSET)) { + if (ycbcr) + REG_SET_2(SCL_BLACK_OFFSET, 0, + SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y, + SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_CBCR); + else - REG_SET_2(SCL_BLACK_OFFSET, 0, - SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y, - SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_RGB_Y); + REG_SET_2(SCL_BLACK_OFFSET, 0, + SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y, + SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_RGB_Y); + } /* Manually calculate scale ratio and init values */ dpp1_dscl_set_manual_ratio_init(dpp, scl_data); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c index c7d1e678ebf5..e161ad836812 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c @@ -29,19 +29,20 @@ #include "reg_helper.h" #define CTX \ - hubbub->ctx + hubbub1->base.ctx #define DC_LOGGER \ - hubbub->ctx->logger + hubbub1->base.ctx->logger #define REG(reg)\ - hubbub->regs->reg + hubbub1->regs->reg #undef FN #define FN(reg_name, field_name) \ - hubbub->shifts->field_name, hubbub->masks->field_name + hubbub1->shifts->field_name, hubbub1->masks->field_name void hubbub1_wm_read_state(struct hubbub *hubbub, struct dcn_hubbub_wm *wm) { + struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); struct dcn_hubbub_wm_set *s; memset(wm, 0, sizeof(struct dcn_hubbub_wm)); @@ -87,14 +88,23 @@ void hubbub1_wm_read_state(struct hubbub *hubbub, s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D); } -void hubbub1_disable_allow_self_refresh(struct hubbub *hubbub) +void hubbub1_allow_self_refresh_control(struct hubbub *hubbub, bool allow) { - REG_UPDATE(DCHUBBUB_ARB_DRAM_STATE_CNTL, - DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, 0); + struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); + + /* + * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 1 means do not allow stutter + * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0 means allow stutter + */ + + REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL, + DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE, 0, + DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, !allow); } bool hububu1_is_allow_self_refresh_enabled(struct hubbub *hubbub) { + struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); uint32_t enable = 0; REG_GET(DCHUBBUB_ARB_DRAM_STATE_CNTL, @@ -107,6 +117,8 @@ bool hububu1_is_allow_self_refresh_enabled(struct hubbub *hubbub) bool hubbub1_verify_allow_pstate_change_high( struct hubbub *hubbub) { + struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); + /* pstate latency is ~20us so if we wait over 40us and pstate allow * still not asserted, we are probably stuck and going to hang * @@ -193,7 +205,7 @@ bool hubbub1_verify_allow_pstate_change_high( * 31: SOC pstate change request */ - REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, hubbub->debug_test_index_pstate); + REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, hubbub1->debug_test_index_pstate); for (i = 0; i < pstate_wait_timeout_us; i++) { debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA); @@ -244,6 +256,8 @@ static uint32_t convert_and_clamp( void hubbub1_wm_change_req_wa(struct hubbub *hubbub) { + struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); + REG_UPDATE_SEQ(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 0, 1); } @@ -254,7 +268,7 @@ void hubbub1_program_watermarks( unsigned int refclk_mhz, bool safe_to_lower) { - uint32_t force_en = hubbub->ctx->dc->debug.disable_stutter ? 1 : 0; + struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); /* * Need to clamp to max of the register values (i.e. no wrap) * for dcn1, all wm registers are 21-bit wide @@ -264,8 +278,8 @@ void hubbub1_program_watermarks( /* Repeat for water mark set A, B, C and D. */ /* clock state A */ - if (safe_to_lower || watermarks->a.urgent_ns > hubbub->watermarks.a.urgent_ns) { - hubbub->watermarks.a.urgent_ns = watermarks->a.urgent_ns; + if (safe_to_lower || watermarks->a.urgent_ns > hubbub1->watermarks.a.urgent_ns) { + hubbub1->watermarks.a.urgent_ns = watermarks->a.urgent_ns; prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns, refclk_mhz, 0x1fffff); REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); @@ -275,20 +289,22 @@ void hubbub1_program_watermarks( watermarks->a.urgent_ns, prog_wm_value); } - if (safe_to_lower || watermarks->a.pte_meta_urgent_ns > hubbub->watermarks.a.pte_meta_urgent_ns) { - hubbub->watermarks.a.pte_meta_urgent_ns = watermarks->a.pte_meta_urgent_ns; - prog_wm_value = convert_and_clamp(watermarks->a.pte_meta_urgent_ns, - refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->a.pte_meta_urgent_ns, prog_wm_value); + if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A)) { + if (safe_to_lower || watermarks->a.pte_meta_urgent_ns > hubbub1->watermarks.a.pte_meta_urgent_ns) { + hubbub1->watermarks.a.pte_meta_urgent_ns = watermarks->a.pte_meta_urgent_ns; + prog_wm_value = convert_and_clamp(watermarks->a.pte_meta_urgent_ns, + refclk_mhz, 0x1fffff); + REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->a.pte_meta_urgent_ns, prog_wm_value); + } } if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) { if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns - > hubbub->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) { - hubbub->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = + > hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) { + hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns; prog_wm_value = convert_and_clamp( watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, @@ -300,8 +316,8 @@ void hubbub1_program_watermarks( } if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns - > hubbub->watermarks.a.cstate_pstate.cstate_exit_ns) { - hubbub->watermarks.a.cstate_pstate.cstate_exit_ns = + > hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) { + hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns = watermarks->a.cstate_pstate.cstate_exit_ns; prog_wm_value = convert_and_clamp( watermarks->a.cstate_pstate.cstate_exit_ns, @@ -314,8 +330,8 @@ void hubbub1_program_watermarks( } if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns - > hubbub->watermarks.a.cstate_pstate.pstate_change_ns) { - hubbub->watermarks.a.cstate_pstate.pstate_change_ns = + > hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) { + hubbub1->watermarks.a.cstate_pstate.pstate_change_ns = watermarks->a.cstate_pstate.pstate_change_ns; prog_wm_value = convert_and_clamp( watermarks->a.cstate_pstate.pstate_change_ns, @@ -327,8 +343,8 @@ void hubbub1_program_watermarks( } /* clock state B */ - if (safe_to_lower || watermarks->b.urgent_ns > hubbub->watermarks.b.urgent_ns) { - hubbub->watermarks.b.urgent_ns = watermarks->b.urgent_ns; + if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) { + hubbub1->watermarks.b.urgent_ns = watermarks->b.urgent_ns; prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns, refclk_mhz, 0x1fffff); REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value); @@ -338,20 +354,22 @@ void hubbub1_program_watermarks( watermarks->b.urgent_ns, prog_wm_value); } - if (safe_to_lower || watermarks->b.pte_meta_urgent_ns > hubbub->watermarks.b.pte_meta_urgent_ns) { - hubbub->watermarks.b.pte_meta_urgent_ns = watermarks->b.pte_meta_urgent_ns; - prog_wm_value = convert_and_clamp(watermarks->b.pte_meta_urgent_ns, - refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->b.pte_meta_urgent_ns, prog_wm_value); + if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B)) { + if (safe_to_lower || watermarks->b.pte_meta_urgent_ns > hubbub1->watermarks.b.pte_meta_urgent_ns) { + hubbub1->watermarks.b.pte_meta_urgent_ns = watermarks->b.pte_meta_urgent_ns; + prog_wm_value = convert_and_clamp(watermarks->b.pte_meta_urgent_ns, + refclk_mhz, 0x1fffff); + REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->b.pte_meta_urgent_ns, prog_wm_value); + } } if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) { if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns - > hubbub->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) { - hubbub->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = + > hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) { + hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns; prog_wm_value = convert_and_clamp( watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, @@ -363,8 +381,8 @@ void hubbub1_program_watermarks( } if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns - > hubbub->watermarks.b.cstate_pstate.cstate_exit_ns) { - hubbub->watermarks.b.cstate_pstate.cstate_exit_ns = + > hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) { + hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns = watermarks->b.cstate_pstate.cstate_exit_ns; prog_wm_value = convert_and_clamp( watermarks->b.cstate_pstate.cstate_exit_ns, @@ -377,8 +395,8 @@ void hubbub1_program_watermarks( } if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns - > hubbub->watermarks.b.cstate_pstate.pstate_change_ns) { - hubbub->watermarks.b.cstate_pstate.pstate_change_ns = + > hubbub1->watermarks.b.cstate_pstate.pstate_change_ns) { + hubbub1->watermarks.b.cstate_pstate.pstate_change_ns = watermarks->b.cstate_pstate.pstate_change_ns; prog_wm_value = convert_and_clamp( watermarks->b.cstate_pstate.pstate_change_ns, @@ -390,8 +408,8 @@ void hubbub1_program_watermarks( } /* clock state C */ - if (safe_to_lower || watermarks->c.urgent_ns > hubbub->watermarks.c.urgent_ns) { - hubbub->watermarks.c.urgent_ns = watermarks->c.urgent_ns; + if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) { + hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns; prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns, refclk_mhz, 0x1fffff); REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value); @@ -401,20 +419,22 @@ void hubbub1_program_watermarks( watermarks->c.urgent_ns, prog_wm_value); } - if (safe_to_lower || watermarks->c.pte_meta_urgent_ns > hubbub->watermarks.c.pte_meta_urgent_ns) { - hubbub->watermarks.c.pte_meta_urgent_ns = watermarks->c.pte_meta_urgent_ns; - prog_wm_value = convert_and_clamp(watermarks->c.pte_meta_urgent_ns, - refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->c.pte_meta_urgent_ns, prog_wm_value); + if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C)) { + if (safe_to_lower || watermarks->c.pte_meta_urgent_ns > hubbub1->watermarks.c.pte_meta_urgent_ns) { + hubbub1->watermarks.c.pte_meta_urgent_ns = watermarks->c.pte_meta_urgent_ns; + prog_wm_value = convert_and_clamp(watermarks->c.pte_meta_urgent_ns, + refclk_mhz, 0x1fffff); + REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->c.pte_meta_urgent_ns, prog_wm_value); + } } if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) { if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns - > hubbub->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) { - hubbub->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = + > hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) { + hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns; prog_wm_value = convert_and_clamp( watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, @@ -426,8 +446,8 @@ void hubbub1_program_watermarks( } if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns - > hubbub->watermarks.c.cstate_pstate.cstate_exit_ns) { - hubbub->watermarks.c.cstate_pstate.cstate_exit_ns = + > hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) { + hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns = watermarks->c.cstate_pstate.cstate_exit_ns; prog_wm_value = convert_and_clamp( watermarks->c.cstate_pstate.cstate_exit_ns, @@ -440,8 +460,8 @@ void hubbub1_program_watermarks( } if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns - > hubbub->watermarks.c.cstate_pstate.pstate_change_ns) { - hubbub->watermarks.c.cstate_pstate.pstate_change_ns = + > hubbub1->watermarks.c.cstate_pstate.pstate_change_ns) { + hubbub1->watermarks.c.cstate_pstate.pstate_change_ns = watermarks->c.cstate_pstate.pstate_change_ns; prog_wm_value = convert_and_clamp( watermarks->c.cstate_pstate.pstate_change_ns, @@ -453,8 +473,8 @@ void hubbub1_program_watermarks( } /* clock state D */ - if (safe_to_lower || watermarks->d.urgent_ns > hubbub->watermarks.d.urgent_ns) { - hubbub->watermarks.d.urgent_ns = watermarks->d.urgent_ns; + if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) { + hubbub1->watermarks.d.urgent_ns = watermarks->d.urgent_ns; prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns, refclk_mhz, 0x1fffff); REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value); @@ -464,20 +484,22 @@ void hubbub1_program_watermarks( watermarks->d.urgent_ns, prog_wm_value); } - if (safe_to_lower || watermarks->d.pte_meta_urgent_ns > hubbub->watermarks.d.pte_meta_urgent_ns) { - hubbub->watermarks.d.pte_meta_urgent_ns = watermarks->d.pte_meta_urgent_ns; - prog_wm_value = convert_and_clamp(watermarks->d.pte_meta_urgent_ns, - refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->d.pte_meta_urgent_ns, prog_wm_value); + if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D)) { + if (safe_to_lower || watermarks->d.pte_meta_urgent_ns > hubbub1->watermarks.d.pte_meta_urgent_ns) { + hubbub1->watermarks.d.pte_meta_urgent_ns = watermarks->d.pte_meta_urgent_ns; + prog_wm_value = convert_and_clamp(watermarks->d.pte_meta_urgent_ns, + refclk_mhz, 0x1fffff); + REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->d.pte_meta_urgent_ns, prog_wm_value); + } } if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) { if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns - > hubbub->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) { - hubbub->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = + > hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) { + hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns; prog_wm_value = convert_and_clamp( watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, @@ -489,8 +511,8 @@ void hubbub1_program_watermarks( } if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns - > hubbub->watermarks.d.cstate_pstate.cstate_exit_ns) { - hubbub->watermarks.d.cstate_pstate.cstate_exit_ns = + > hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) { + hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns = watermarks->d.cstate_pstate.cstate_exit_ns; prog_wm_value = convert_and_clamp( watermarks->d.cstate_pstate.cstate_exit_ns, @@ -503,8 +525,8 @@ void hubbub1_program_watermarks( } if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns - > hubbub->watermarks.d.cstate_pstate.pstate_change_ns) { - hubbub->watermarks.d.cstate_pstate.pstate_change_ns = + > hubbub1->watermarks.d.cstate_pstate.pstate_change_ns) { + hubbub1->watermarks.d.cstate_pstate.pstate_change_ns = watermarks->d.cstate_pstate.pstate_change_ns; prog_wm_value = convert_and_clamp( watermarks->d.cstate_pstate.pstate_change_ns, @@ -520,9 +542,7 @@ void hubbub1_program_watermarks( REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 68); - REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL, - DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE, 0, - DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, force_en); + hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter); #if 0 REG_UPDATE_2(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, @@ -535,6 +555,8 @@ void hubbub1_update_dchub( struct hubbub *hubbub, struct dchub_init_data *dh_data) { + struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); + if (REG(DCHUBBUB_SDPIF_FB_TOP) == 0) { ASSERT(false); /*should not come here*/ @@ -594,6 +616,8 @@ void hubbub1_update_dchub( void hubbub1_toggle_watermark_change_req(struct hubbub *hubbub) { + struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); + uint32_t watermark_change_req; REG_GET(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, @@ -610,6 +634,8 @@ void hubbub1_toggle_watermark_change_req(struct hubbub *hubbub) void hubbub1_soft_reset(struct hubbub *hubbub, bool reset) { + struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); + uint32_t reset_en = reset ? 1 : 0; REG_UPDATE(DCHUBBUB_SOFT_RESET, @@ -752,7 +778,9 @@ static bool hubbub1_get_dcc_compression_cap(struct hubbub *hubbub, const struct dc_dcc_surface_param *input, struct dc_surface_dcc_cap *output) { - struct dc *dc = hubbub->ctx->dc; + struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); + struct dc *dc = hubbub1->base.ctx->dc; + /* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */ enum dcc_control dcc_control; unsigned int bpe; @@ -764,10 +792,10 @@ static bool hubbub1_get_dcc_compression_cap(struct hubbub *hubbub, if (dc->debug.disable_dcc == DCC_DISABLE) return false; - if (!hubbub->funcs->dcc_support_pixel_format(input->format, &bpe)) + if (!hubbub1->base.funcs->dcc_support_pixel_format(input->format, &bpe)) return false; - if (!hubbub->funcs->dcc_support_swizzle(input->swizzle_mode, bpe, + if (!hubbub1->base.funcs->dcc_support_swizzle(input->swizzle_mode, bpe, &segment_order_horz, &segment_order_vert)) return false; @@ -837,6 +865,7 @@ static const struct hubbub_funcs hubbub1_funcs = { .dcc_support_swizzle = hubbub1_dcc_support_swizzle, .dcc_support_pixel_format = hubbub1_dcc_support_pixel_format, .get_dcc_compression_cap = hubbub1_get_dcc_compression_cap, + .wm_read_state = hubbub1_wm_read_state, }; void hubbub1_construct(struct hubbub *hubbub, @@ -845,18 +874,20 @@ void hubbub1_construct(struct hubbub *hubbub, const struct dcn_hubbub_shift *hubbub_shift, const struct dcn_hubbub_mask *hubbub_mask) { - hubbub->ctx = ctx; + struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); + + hubbub1->base.ctx = ctx; - hubbub->funcs = &hubbub1_funcs; + hubbub1->base.funcs = &hubbub1_funcs; - hubbub->regs = hubbub_regs; - hubbub->shifts = hubbub_shift; - hubbub->masks = hubbub_mask; + hubbub1->regs = hubbub_regs; + hubbub1->shifts = hubbub_shift; + hubbub1->masks = hubbub_mask; - hubbub->debug_test_index_pstate = 0x7; + hubbub1->debug_test_index_pstate = 0x7; #if defined(CONFIG_DRM_AMD_DC_DCN1_01) if (ctx->dce_version == DCN_VERSION_1_01) - hubbub->debug_test_index_pstate = 0xB; + hubbub1->debug_test_index_pstate = 0xB; #endif } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h index d0f03d152913..9cd4a5194154 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h @@ -29,6 +29,9 @@ #include "core_types.h" #include "dchubbub.h" +#define TO_DCN10_HUBBUB(hubbub)\ + container_of(hubbub, struct dcn10_hubbub, base) + #define HUBHUB_REG_LIST_DCN()\ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A),\ SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A),\ @@ -107,6 +110,12 @@ struct dcn_hubbub_registers { uint32_t DCHUBBUB_SDPIF_AGP_TOP; uint32_t DCHUBBUB_CRC_CTRL; uint32_t DCHUBBUB_SOFT_RESET; + uint32_t DCN_VM_FB_LOCATION_BASE; + uint32_t DCN_VM_FB_LOCATION_TOP; + uint32_t DCN_VM_FB_OFFSET; + uint32_t DCN_VM_AGP_BOT; + uint32_t DCN_VM_AGP_TOP; + uint32_t DCN_VM_AGP_BASE; }; /* set field name */ @@ -152,7 +161,13 @@ struct dcn_hubbub_registers { type SDPIF_FB_OFFSET;\ type SDPIF_AGP_BASE;\ type SDPIF_AGP_BOT;\ - type SDPIF_AGP_TOP + type SDPIF_AGP_TOP;\ + type FB_BASE;\ + type FB_TOP;\ + type FB_OFFSET;\ + type AGP_BOT;\ + type AGP_TOP;\ + type AGP_BASE struct dcn_hubbub_shift { @@ -165,22 +180,8 @@ struct dcn_hubbub_mask { struct dc; -struct dcn_hubbub_wm_set { - uint32_t wm_set; - uint32_t data_urgent; - uint32_t pte_meta_urgent; - uint32_t sr_enter; - uint32_t sr_exit; - uint32_t dram_clk_chanage; -}; - -struct dcn_hubbub_wm { - struct dcn_hubbub_wm_set sets[4]; -}; - -struct hubbub { - const struct hubbub_funcs *funcs; - struct dc_context *ctx; +struct dcn10_hubbub { + struct hubbub base; const struct dcn_hubbub_registers *regs; const struct dcn_hubbub_shift *shifts; const struct dcn_hubbub_mask *masks; @@ -203,7 +204,7 @@ void hubbub1_program_watermarks( unsigned int refclk_mhz, bool safe_to_lower); -void hubbub1_disable_allow_self_refresh(struct hubbub *hubbub); +void hubbub1_allow_self_refresh_control(struct hubbub *hubbub, bool allow); bool hububu1_is_allow_self_refresh_enabled(struct hubbub *hubub); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index d1acd7165bc8..683829466a44 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -115,7 +115,7 @@ static void hubp1_set_hubp_blank_en(struct hubp *hubp, bool blank) REG_UPDATE(DCHUBP_CNTL, HUBP_BLANK_EN, blank_en); } -static void hubp1_vready_workaround(struct hubp *hubp, +void hubp1_vready_workaround(struct hubp *hubp, struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest) { uint32_t value = 0; @@ -317,7 +317,8 @@ void hubp1_program_pixel_format( bool hubp1_program_surface_flip_and_addr( struct hubp *hubp, const struct dc_plane_address *address, - bool flip_immediate) + bool flip_immediate, + uint8_t vmid) { struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); @@ -1149,9 +1150,28 @@ void hubp1_cursor_set_position( REG_UPDATE(CURSOR_CONTROL, CURSOR_ENABLE, cur_en); - REG_SET_2(CURSOR_POSITION, 0, - CURSOR_X_POSITION, pos->x, + //account for cases where we see negative offset relative to overlay plane + if (src_x_offset < 0 && src_y_offset < 0) { + REG_SET_2(CURSOR_POSITION, 0, + CURSOR_X_POSITION, 0, + CURSOR_Y_POSITION, 0); + x_hotspot -= src_x_offset; + y_hotspot -= src_y_offset; + } else if (src_x_offset < 0) { + REG_SET_2(CURSOR_POSITION, 0, + CURSOR_X_POSITION, 0, CURSOR_Y_POSITION, pos->y); + x_hotspot -= src_x_offset; + } else if (src_y_offset < 0) { + REG_SET_2(CURSOR_POSITION, 0, + CURSOR_X_POSITION, pos->x, + CURSOR_Y_POSITION, 0); + y_hotspot -= src_y_offset; + } else { + REG_SET_2(CURSOR_POSITION, 0, + CURSOR_X_POSITION, pos->x, + CURSOR_Y_POSITION, pos->y); + } REG_SET_2(CURSOR_HOT_SPOT, 0, CURSOR_HOT_SPOT_X, x_hotspot, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index 62d4232e7796..a6d6dfe00617 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h @@ -707,11 +707,6 @@ void hubp1_dcc_control(struct hubp *hubp, bool enable, bool independent_64b_blks); -bool hubp1_program_surface_flip_and_addr( - struct hubp *hubp, - const struct dc_plane_address *address, - bool flip_immediate); - bool hubp1_is_flip_pending(struct hubp *hubp); void hubp1_cursor_set_attributes( @@ -745,5 +740,7 @@ void hubp1_clear_underflow(struct hubp *hubp); enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch); +void hubp1_vready_workaround(struct hubp *hubp, + struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest); #endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 58a12ddf12f3..d1a8f1c302a9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -40,7 +40,6 @@ #include "ipp.h" #include "mpc.h" #include "reg_helper.h" -#include "custom_float.h" #include "dcn10_hubp.h" #include "dcn10_hubbub.h" #include "dcn10_cm_common.h" @@ -92,10 +91,11 @@ static void log_mpc_crc(struct dc *dc, void dcn10_log_hubbub_state(struct dc *dc, struct dc_log_buffer_ctx *log_ctx) { struct dc_context *dc_ctx = dc->ctx; - struct dcn_hubbub_wm wm = {0}; + struct dcn_hubbub_wm wm; int i; - hubbub1_wm_read_state(dc->res_pool->hubbub, &wm); + memset(&wm, 0, sizeof(struct dcn_hubbub_wm)); + dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm); DTN_INFO("HUBBUB WM: data_urgent pte_meta_urgent" " sr_enter sr_exit dram_clk_change\n"); @@ -636,8 +636,6 @@ static enum dc_status dcn10_enable_stream_timing( struct dc_stream_state *stream = pipe_ctx->stream; enum dc_color_space color_space; struct tg_color black_color = {0}; - struct drr_params params = {0}; - unsigned int event_triggers = 0; /* by upper caller loop, pipe0 is parent pipe and be called first. * back end is set up by for pipe0. Other children pipe share back end @@ -705,19 +703,6 @@ static enum dc_status dcn10_enable_stream_timing( return DC_ERROR_UNEXPECTED; } - params.vertical_total_min = stream->adjust.v_total_min; - params.vertical_total_max = stream->adjust.v_total_max; - if (pipe_ctx->stream_res.tg->funcs->set_drr) - pipe_ctx->stream_res.tg->funcs->set_drr( - pipe_ctx->stream_res.tg, ¶ms); - - // DRR should set trigger event to monitor surface update event - if (stream->adjust.v_total_min != 0 && stream->adjust.v_total_max != 0) - event_triggers = 0x80; - if (pipe_ctx->stream_res.tg->funcs->set_static_screen_control) - pipe_ctx->stream_res.tg->funcs->set_static_screen_control( - pipe_ctx->stream_res.tg, event_triggers); - /* TODO program crtc source select for non-virtual signal*/ /* TODO program FMT */ /* TODO setup link_enc */ @@ -971,92 +956,62 @@ static void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx) pipe_ctx->pipe_idx); } -static void dcn10_init_hw(struct dc *dc) +static void dcn10_init_pipes(struct dc *dc, struct dc_state *context) { int i; - struct abm *abm = dc->res_pool->abm; - struct dmcu *dmcu = dc->res_pool->dmcu; - struct dce_hwseq *hws = dc->hwseq; - struct dc_bios *dcb = dc->ctx->dc_bios; - struct dc_state *context = dc->current_state; - - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - REG_WRITE(REFCLK_CNTL, 0); - REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); - REG_WRITE(DIO_MEM_PWR_CTRL, 0); - - if (!dc->debug.disable_clock_gate) { - /* enable all DCN clock gating */ - REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); - - REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); - - REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); - } - - enable_power_gating_plane(dc->hwseq, true); - } else { - - if (!dcb->funcs->is_accelerated_mode(dcb)) { - bool allow_self_fresh_force_enable = - hububu1_is_allow_self_refresh_enabled(dc->res_pool->hubbub); - - bios_golden_init(dc); - - /* WA for making DF sleep when idle after resume from S0i3. - * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by - * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0 - * before calling command table and it changed to 1 after, - * it should be set back to 0. - */ - if (allow_self_fresh_force_enable == false && - hububu1_is_allow_self_refresh_enabled(dc->res_pool->hubbub)) - hubbub1_disable_allow_self_refresh(dc->res_pool->hubbub); - - disable_vga(dc->hwseq); - } + bool can_apply_seamless_boot = false; - for (i = 0; i < dc->link_count; i++) { - /* Power up AND update implementation according to the - * required signal (which may be different from the - * default signal on connector). - */ - struct dc_link *link = dc->links[i]; - - if (link->link_enc->connector.id == CONNECTOR_ID_EDP) - dc->hwss.edp_power_control(link, true); - - link->link_enc->funcs->hw_init(link->link_enc); + for (i = 0; i < context->stream_count; i++) { + if (context->streams[i]->apply_seamless_boot_optimization) { + can_apply_seamless_boot = true; + break; } } for (i = 0; i < dc->res_pool->pipe_count; i++) { struct timing_generator *tg = dc->res_pool->timing_generators[i]; + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + /* There is assumption that pipe_ctx is not mapping irregularly + * to non-preferred front end. If pipe_ctx->stream is not NULL, + * we will use the pipe, so don't disable + */ + if (pipe_ctx->stream != NULL) + continue; if (tg->funcs->is_tg_enabled(tg)) tg->funcs->lock(tg); - } - - /* Blank controller using driver code instead of - * command table. - */ - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct timing_generator *tg = dc->res_pool->timing_generators[i]; + /* Blank controller using driver code instead of + * command table. + */ if (tg->funcs->is_tg_enabled(tg)) { tg->funcs->set_blank(tg, true); hwss_wait_for_blank_complete(tg); } } - /* Reset all MPCC muxes */ - dc->res_pool->mpc->funcs->mpc_init(dc->res_pool->mpc); + /* Cannot reset the MPC mux if seamless boot */ + if (!can_apply_seamless_boot) + dc->res_pool->mpc->funcs->mpc_init(dc->res_pool->mpc); - for (i = 0; i < dc->res_pool->timing_generator_count; i++) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { struct timing_generator *tg = dc->res_pool->timing_generators[i]; - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; struct hubp *hubp = dc->res_pool->hubps[i]; struct dpp *dpp = dc->res_pool->dpps[i]; + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + // W/A for issue with dc_post_update_surfaces_to_stream + hubp->power_gated = true; + + /* There is assumption that pipe_ctx is not mapping irregularly + * to non-preferred front end. If pipe_ctx->stream is not NULL, + * we will use the pipe, so don't disable + */ + if (pipe_ctx->stream != NULL) + continue; + + dpp->funcs->dpp_reset(dpp); pipe_ctx->stream_res.tg = tg; pipe_ctx->pipe_idx = i; @@ -1074,18 +1029,9 @@ static void dcn10_init_hw(struct dc *dc) pipe_ctx->stream_res.opp = dc->res_pool->opps[i]; hwss1_plane_atomic_disconnect(dc, pipe_ctx); - } - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct timing_generator *tg = dc->res_pool->timing_generators[i]; if (tg->funcs->is_tg_enabled(tg)) tg->funcs->unlock(tg); - } - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct timing_generator *tg = dc->res_pool->timing_generators[i]; - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; dcn10_disable_plane(dc, pipe_ctx); @@ -1094,10 +1040,73 @@ static void dcn10_init_hw(struct dc *dc) tg->funcs->tg_init(tg); } +} - /* end of FPGA. Below if real ASIC */ - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) +static void dcn10_init_hw(struct dc *dc) +{ + int i; + struct abm *abm = dc->res_pool->abm; + struct dmcu *dmcu = dc->res_pool->dmcu; + struct dce_hwseq *hws = dc->hwseq; + struct dc_bios *dcb = dc->ctx->dc_bios; + + if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { + REG_WRITE(REFCLK_CNTL, 0); + REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); + REG_WRITE(DIO_MEM_PWR_CTRL, 0); + + if (!dc->debug.disable_clock_gate) { + /* enable all DCN clock gating */ + REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0); + + REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0); + + REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0); + } + + enable_power_gating_plane(dc->hwseq, true); + + /* end of FPGA. Below if real ASIC */ return; + } + + if (!dcb->funcs->is_accelerated_mode(dcb)) { + bool allow_self_fresh_force_enable = + hububu1_is_allow_self_refresh_enabled( + dc->res_pool->hubbub); + + bios_golden_init(dc); + + /* WA for making DF sleep when idle after resume from S0i3. + * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by + * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0 + * before calling command table and it changed to 1 after, + * it should be set back to 0. + */ + if (allow_self_fresh_force_enable == false && + hububu1_is_allow_self_refresh_enabled(dc->res_pool->hubbub)) + hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, true); + + disable_vga(dc->hwseq); + } + + for (i = 0; i < dc->link_count; i++) { + /* Power up AND update implementation according to the + * required signal (which may be different from the + * default signal on connector). + */ + struct dc_link *link = dc->links[i]; + + if (link->link_enc->connector.id == CONNECTOR_ID_EDP) + dc->hwss.edp_power_control(link, true); + + link->link_enc->funcs->hw_init(link->link_enc); + + /* Check for enabled DIG to identify enabled display */ + if (link->link_enc->funcs->is_dig_enabled && + link->link_enc->funcs->is_dig_enabled(link->link_enc)) + link->link_status.link_active = true; + } for (i = 0; i < dc->res_pool->audio_count; i++) { struct audio *audio = dc->res_pool->audios[i]; @@ -1128,6 +1137,9 @@ static void dcn10_init_hw(struct dc *dc) enable_power_gating_plane(dc->hwseq, true); memset(&dc->res_pool->clk_mgr->clks, 0, sizeof(dc->res_pool->clk_mgr->clks)); + + if (dc->hwss.init_pipes) + dc->hwss.init_pipes(dc, dc->current_state); } static void reset_hw_ctx_wrap( @@ -1153,11 +1165,13 @@ static void reset_hw_ctx_wrap( struct clock_source *old_clk = pipe_ctx_old->clock_source; reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); + if (dc->hwss.enable_stream_gating) { + dc->hwss.enable_stream_gating(dc, pipe_ctx); + } if (old_clk) old_clk->funcs->cs_power_down(old_clk); } } - } static bool patch_address_for_sbs_tb_stereo( @@ -1202,7 +1216,8 @@ static void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_c pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr( pipe_ctx->plane_res.hubp, &plane_state->address, - plane_state->flip_immediate); + plane_state->flip_immediate, + 0); plane_state->status.requested_address = plane_state->address; @@ -2048,7 +2063,7 @@ void update_dchubp_dpp( dc->res_pool->dccg->funcs->update_dpp_dto( dc->res_pool->dccg, dpp->inst, - pipe_ctx->plane_res.bw.calc.dppclk_khz); + pipe_ctx->plane_res.bw.dppclk_khz); else dc->res_pool->clk_mgr->clks.dppclk_khz = should_divided_by_2 ? dc->res_pool->clk_mgr->clks.dispclk_khz / 2 : @@ -2125,7 +2140,8 @@ void update_dchubp_dpp( plane_state->update_flags.bits.swizzle_change || plane_state->update_flags.bits.dcc_change || plane_state->update_flags.bits.bpp_change || - plane_state->update_flags.bits.scaling_change) { + plane_state->update_flags.bits.scaling_change || + plane_state->update_flags.bits.plane_size_change) { hubp->funcs->hubp_program_surface_config( hubp, plane_state->format, @@ -2176,8 +2192,10 @@ static void dcn10_blank_pixel_data( if (!blank) { if (stream_res->tg->funcs->set_blank) stream_res->tg->funcs->set_blank(stream_res->tg, blank); - if (stream_res->abm) + if (stream_res->abm) { + stream_res->abm->funcs->set_pipe(stream_res->abm, stream_res->tg->inst + 1); stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level); + } } else if (blank) { if (stream_res->abm) stream_res->abm->funcs->set_abm_immediate_disable(stream_res->abm); @@ -2252,13 +2270,11 @@ static void program_all_pipe_in_tree( } - if (pipe_ctx->plane_state != NULL) { + if (pipe_ctx->plane_state != NULL) dcn10_program_pipe(dc, pipe_ctx, context); - } - if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx) { + if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx) program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context); - } } struct pipe_ctx *find_top_pipe_for_stream( @@ -2334,9 +2350,10 @@ static void dcn10_apply_ctx_for_surface( } } - if (!pipe_ctx->plane_state && - old_pipe_ctx->plane_state && - old_pipe_ctx->stream_res.tg == tg) { + if ((!pipe_ctx->plane_state || + pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) && + old_pipe_ctx->plane_state && + old_pipe_ctx->stream_res.tg == tg) { dc->hwss.plane_atomic_disconnect(dc, old_pipe_ctx); removed_pipe[i] = true; @@ -2383,6 +2400,22 @@ static void dcn10_apply_ctx_for_surface( hubbub1_wm_change_req_wa(dc->res_pool->hubbub); } +static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context) +{ + uint8_t i; + + for (i = 0; i < context->stream_count; i++) { + if (context->streams[i]->timing.timing_3d_format + == TIMING_3D_FORMAT_HW_FRAME_PACKING) { + /* + * Disable stutter + */ + hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false); + break; + } + } +} + static void dcn10_prepare_bandwidth( struct dc *dc, struct dc_state *context) @@ -2404,6 +2437,7 @@ static void dcn10_prepare_bandwidth( &context->bw.dcn.watermarks, dc->res_pool->ref_clock_inKhz / 1000, true); + dcn10_stereo_hw_frame_pack_wa(dc, context); if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) dcn_bw_notify_pplib_of_wm_ranges(dc); @@ -2433,6 +2467,7 @@ static void dcn10_optimize_bandwidth( &context->bw.dcn.watermarks, dc->res_pool->ref_clock_inKhz / 1000, true); + dcn10_stereo_hw_frame_pack_wa(dc, context); if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) dcn_bw_notify_pplib_of_wm_ranges(dc); @@ -2518,7 +2553,7 @@ static void dcn10_config_stereo_parameters( timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA || timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) { enum display_dongle_type dongle = \ - stream->sink->link->ddc->dongle_type; + stream->link->ddc->dongle_type; if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER || dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER || dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER) @@ -2649,7 +2684,7 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) struct hubp *hubp = pipe_ctx->plane_res.hubp; struct dpp *dpp = pipe_ctx->plane_res.dpp; struct dc_cursor_mi_param param = { - .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz, + .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10, .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz, .viewport = pipe_ctx->plane_res.scl_data.viewport, .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz, @@ -2658,8 +2693,8 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) .mirror = pipe_ctx->plane_state->horizontal_mirror }; - pos_cpy.x -= pipe_ctx->plane_state->dst_rect.x; - pos_cpy.y -= pipe_ctx->plane_state->dst_rect.y; + pos_cpy.x_hotspot += pipe_ctx->plane_state->dst_rect.x; + pos_cpy.y_hotspot += pipe_ctx->plane_state->dst_rect.y; if (pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) @@ -2706,9 +2741,151 @@ static void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx) pipe_ctx->plane_res.dpp, &opt_attr); } +/** +* apply_front_porch_workaround TODO FPGA still need? +* +* This is a workaround for a bug that has existed since R5xx and has not been +* fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive. +*/ +static void apply_front_porch_workaround( + struct dc_crtc_timing *timing) +{ + if (timing->flags.INTERLACE == 1) { + if (timing->v_front_porch < 2) + timing->v_front_porch = 2; + } else { + if (timing->v_front_porch < 1) + timing->v_front_porch = 1; + } +} + +int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx) +{ + struct timing_generator *optc = pipe_ctx->stream_res.tg; + const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing; + struct dc_crtc_timing patched_crtc_timing; + int vesa_sync_start; + int asic_blank_end; + int interlace_factor; + int vertical_line_start; + + patched_crtc_timing = *dc_crtc_timing; + apply_front_porch_workaround(&patched_crtc_timing); + + interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1; + + vesa_sync_start = patched_crtc_timing.v_addressable + + patched_crtc_timing.v_border_bottom + + patched_crtc_timing.v_front_porch; + + asic_blank_end = (patched_crtc_timing.v_total - + vesa_sync_start - + patched_crtc_timing.v_border_top) + * interlace_factor; + + vertical_line_start = asic_blank_end - + optc->dlg_otg_param.vstartup_start + 1; + + return vertical_line_start; +} + +static void calc_vupdate_position( + struct pipe_ctx *pipe_ctx, + uint32_t *start_line, + uint32_t *end_line) +{ + const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing; + int vline_int_offset_from_vupdate = + pipe_ctx->stream->periodic_interrupt0.lines_offset; + int vupdate_offset_from_vsync = get_vupdate_offset_from_vsync(pipe_ctx); + int start_position; + + if (vline_int_offset_from_vupdate > 0) + vline_int_offset_from_vupdate--; + else if (vline_int_offset_from_vupdate < 0) + vline_int_offset_from_vupdate++; + + start_position = vline_int_offset_from_vupdate + vupdate_offset_from_vsync; + + if (start_position >= 0) + *start_line = start_position; + else + *start_line = dc_crtc_timing->v_total + start_position - 1; + + *end_line = *start_line + 2; + + if (*end_line >= dc_crtc_timing->v_total) + *end_line = 2; +} + +static void cal_vline_position( + struct pipe_ctx *pipe_ctx, + enum vline_select vline, + uint32_t *start_line, + uint32_t *end_line) +{ + enum vertical_interrupt_ref_point ref_point = INVALID_POINT; + + if (vline == VLINE0) + ref_point = pipe_ctx->stream->periodic_interrupt0.ref_point; + else if (vline == VLINE1) + ref_point = pipe_ctx->stream->periodic_interrupt1.ref_point; + + switch (ref_point) { + case START_V_UPDATE: + calc_vupdate_position( + pipe_ctx, + start_line, + end_line); + break; + case START_V_SYNC: + // Suppose to do nothing because vsync is 0; + break; + default: + ASSERT(0); + break; + } +} + +static void dcn10_setup_periodic_interrupt( + struct pipe_ctx *pipe_ctx, + enum vline_select vline) +{ + struct timing_generator *tg = pipe_ctx->stream_res.tg; + + if (vline == VLINE0) { + uint32_t start_line = 0; + uint32_t end_line = 0; + + cal_vline_position(pipe_ctx, vline, &start_line, &end_line); + + tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line); + + } else if (vline == VLINE1) { + pipe_ctx->stream_res.tg->funcs->setup_vertical_interrupt1( + tg, + pipe_ctx->stream->periodic_interrupt1.lines_offset); + } +} + +static void dcn10_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx) +{ + struct timing_generator *tg = pipe_ctx->stream_res.tg; + int start_line = get_vupdate_offset_from_vsync(pipe_ctx); + + if (start_line < 0) { + ASSERT(0); + start_line = 0; + } + + if (tg->funcs->setup_vertical_interrupt2) + tg->funcs->setup_vertical_interrupt2(tg, start_line); +} + static const struct hw_sequencer_funcs dcn10_funcs = { .program_gamut_remap = program_gamut_remap, .init_hw = dcn10_init_hw, + .init_pipes = dcn10_init_pipes, .apply_ctx_to_hw = dce110_apply_ctx_to_hw, .apply_ctx_for_surface = dcn10_apply_ctx_for_surface, .update_plane_addr = dcn10_update_plane_addr, @@ -2752,7 +2929,11 @@ static const struct hw_sequencer_funcs dcn10_funcs = { .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready, .set_cursor_position = dcn10_set_cursor_position, .set_cursor_attribute = dcn10_set_cursor_attribute, - .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level + .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level, + .disable_stream_gating = NULL, + .enable_stream_gating = NULL, + .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, + .setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h index f8eea10e4c64..6d66084df55f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h @@ -81,4 +81,6 @@ struct pipe_ctx *find_top_pipe_for_stream( struct dc_state *context, const struct dc_stream_state *stream); +int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx); + #endif /* __DC_HWSS_DCN10_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c index cd469014baa3..98f41d250978 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c @@ -40,7 +40,6 @@ #include "ipp.h" #include "mpc.h" #include "reg_helper.h" -#include "custom_float.h" #include "dcn10_hubp.h" #include "dcn10_hubbub.h" #include "dcn10_cm_common.h" @@ -72,7 +71,7 @@ static unsigned int snprintf_count(char *pBuf, unsigned int bufSize, char *fmt, static unsigned int dcn10_get_hubbub_state(struct dc *dc, char *pBuf, unsigned int bufSize) { struct dc_context *dc_ctx = dc->ctx; - struct dcn_hubbub_wm wm = {0}; + struct dcn_hubbub_wm wm; int i; unsigned int chars_printed = 0; @@ -81,7 +80,8 @@ static unsigned int dcn10_get_hubbub_state(struct dc *dc, char *pBuf, unsigned i const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clock_inKhz / 1000; static const unsigned int frac = 1000; - hubbub1_wm_read_state(dc->res_pool->hubbub, &wm); + memset(&wm, 0, sizeof(struct dcn_hubbub_wm)); + dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm); chars_printed = snprintf_count(pBuf, remaining_buffer, "wm_set_index,data_urgent,pte_meta_urgent,sr_enter,sr_exit,dram_clk_chanage\n"); remaining_buffer -= chars_printed; @@ -419,20 +419,22 @@ static unsigned int dcn10_get_otg_states(struct dc *dc, char *pBuf, unsigned int unsigned int remaining_buffer = bufSize; chars_printed = snprintf_count(pBuf, remaining_buffer, "instance,v_bs,v_be,v_ss,v_se,vpol,vmax,vmin,vmax_sel,vmin_sel," - "h_bs,h_be,h_ss,h_se,hpol,htot,vtot,underflow\n"); + "h_bs,h_be,h_ss,h_se,hpol,htot,vtot,underflow,pixelclk[khz]\n"); remaining_buffer -= chars_printed; pBuf += chars_printed; for (i = 0; i < pool->timing_generator_count; i++) { struct timing_generator *tg = pool->timing_generators[i]; struct dcn_otg_state s = {0}; + int pix_clk = 0; optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s); + pix_clk = dc->current_state->res_ctx.pipe_ctx[i].stream_res.pix_clk_params.requested_pix_clk_100hz / 10; //only print if OTG master is enabled if (s.otg_enabled & 1) { chars_printed = snprintf_count(pBuf, remaining_buffer, "%x,%d,%d,%d,%d,%d,%d,%d,%d,%d," - "%d,%d,%d,%d,%d,%d,%d,%d" + "%d,%d,%d,%d,%d,%d,%d,%d,%d" "\n", tg->inst, s.v_blank_start, @@ -451,7 +453,8 @@ static unsigned int dcn10_get_otg_states(struct dc *dc, char *pBuf, unsigned int s.h_sync_a_pol, s.h_total, s.v_total, - s.underflow_occurred_status); + s.underflow_occurred_status, + pix_clk); remaining_buffer -= chars_printed; pBuf += chars_printed; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c index 477ab9222216..a9db372688ff 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c @@ -85,6 +85,7 @@ static const struct link_encoder_funcs dcn10_lnk_enc_funcs = { .enable_hpd = dcn10_link_encoder_enable_hpd, .disable_hpd = dcn10_link_encoder_disable_hpd, .is_dig_enabled = dcn10_is_dig_enabled, + .get_dig_frontend = dcn10_get_dig_frontend, .destroy = dcn10_link_encoder_destroy }; @@ -440,7 +441,7 @@ static uint8_t get_frontend_source( } } -void configure_encoder( +void enc1_configure_encoder( struct dcn10_link_encoder *enc10, const struct dc_link_settings *link_settings) { @@ -495,6 +496,15 @@ bool dcn10_is_dig_enabled(struct link_encoder *enc) return value; } +unsigned int dcn10_get_dig_frontend(struct link_encoder *enc) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + uint32_t value; + + REG_GET(DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, &value); + return value; +} + static void link_encoder_disable(struct dcn10_link_encoder *enc10) { /* reset training pattern */ @@ -543,12 +553,12 @@ bool dcn10_link_encoder_validate_dvi_output( if ((connector_signal == SIGNAL_TYPE_DVI_SINGLE_LINK || connector_signal == SIGNAL_TYPE_HDMI_TYPE_A) && signal != SIGNAL_TYPE_HDMI_TYPE_A && - crtc_timing->pix_clk_khz > TMDS_MAX_PIXEL_CLOCK) + crtc_timing->pix_clk_100hz > (TMDS_MAX_PIXEL_CLOCK * 10)) return false; - if (crtc_timing->pix_clk_khz < TMDS_MIN_PIXEL_CLOCK) + if (crtc_timing->pix_clk_100hz < (TMDS_MIN_PIXEL_CLOCK * 10)) return false; - if (crtc_timing->pix_clk_khz > max_pixel_clock) + if (crtc_timing->pix_clk_100hz > (max_pixel_clock * 10)) return false; /* DVI supports 6/8bpp single-link and 10/16bpp dual-link */ @@ -571,7 +581,7 @@ bool dcn10_link_encoder_validate_dvi_output( static bool dcn10_link_encoder_validate_hdmi_output( const struct dcn10_link_encoder *enc10, const struct dc_crtc_timing *crtc_timing, - int adjusted_pix_clk_khz) + int adjusted_pix_clk_100hz) { enum dc_color_depth max_deep_color = enc10->base.features.max_hdmi_deep_color; @@ -581,11 +591,11 @@ static bool dcn10_link_encoder_validate_hdmi_output( if (crtc_timing->display_color_depth < COLOR_DEPTH_888) return false; - if (adjusted_pix_clk_khz < TMDS_MIN_PIXEL_CLOCK) + if (adjusted_pix_clk_100hz < (TMDS_MIN_PIXEL_CLOCK * 10)) return false; - if ((adjusted_pix_clk_khz == 0) || - (adjusted_pix_clk_khz > enc10->base.features.max_hdmi_pixel_clock)) + if ((adjusted_pix_clk_100hz == 0) || + (adjusted_pix_clk_100hz > (enc10->base.features.max_hdmi_pixel_clock * 10))) return false; /* DCE11 HW does not support 420 */ @@ -594,7 +604,7 @@ static bool dcn10_link_encoder_validate_hdmi_output( return false; if (!enc10->base.features.flags.bits.HDMI_6GB_EN && - adjusted_pix_clk_khz >= 300000) + adjusted_pix_clk_100hz >= 3000000) return false; if (enc10->base.ctx->dc->debug.hdmi20_disable && crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) @@ -738,7 +748,7 @@ bool dcn10_link_encoder_validate_output_with_stream( case SIGNAL_TYPE_DVI_DUAL_LINK: is_valid = dcn10_link_encoder_validate_dvi_output( enc10, - stream->sink->link->connector_signal, + stream->link->connector_signal, stream->signal, &stream->timing); break; @@ -746,7 +756,7 @@ bool dcn10_link_encoder_validate_output_with_stream( is_valid = dcn10_link_encoder_validate_hdmi_output( enc10, &stream->timing, - stream->phy_pix_clk); + stream->phy_pix_clk * 10); break; case SIGNAL_TYPE_DISPLAY_PORT: case SIGNAL_TYPE_DISPLAY_PORT_MST: @@ -910,7 +920,7 @@ void dcn10_link_encoder_enable_dp_output( * but it's not passed to asic_control. * We need to set number of lanes manually. */ - configure_encoder(enc10, link_settings); + enc1_configure_encoder(enc10, link_settings); cntl.action = TRANSMITTER_CONTROL_ENABLE; cntl.engine_id = enc->preferred_engine; @@ -949,7 +959,7 @@ void dcn10_link_encoder_enable_dp_mst_output( * but it's not passed to asic_control. * We need to set number of lanes manually. */ - configure_encoder(enc10, link_settings); + enc1_configure_encoder(enc10, link_settings); cntl.action = TRANSMITTER_CONTROL_ENABLE; cntl.engine_id = ENGINE_ID_UNKNOWN; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h index 49ead12b2532..b74b80a247ec 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h @@ -271,7 +271,7 @@ void dcn10_link_encoder_setup( struct link_encoder *enc, enum signal_type signal); -void configure_encoder( +void enc1_configure_encoder( struct dcn10_link_encoder *enc10, const struct dc_link_settings *link_settings); @@ -336,6 +336,8 @@ void dcn10_psr_program_secondary_packet(struct link_encoder *enc, bool dcn10_is_dig_enabled(struct link_encoder *enc); +unsigned int dcn10_get_dig_frontend(struct link_encoder *enc); + void dcn10_aux_initialize(struct dcn10_link_encoder *enc10); #endif /* __DC_LINK_ENCODER__DCN10_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c index 7c138615f17d..0345d51e9d6f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c @@ -92,75 +92,36 @@ static void optc1_disable_stereo(struct timing_generator *optc) OTG_3D_STRUCTURE_STEREO_SEL_OVR, 0); } -static uint32_t get_start_vline(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing) +void optc1_setup_vertical_interrupt0( + struct timing_generator *optc, + uint32_t start_line, + uint32_t end_line) { - struct dc_crtc_timing patched_crtc_timing; - int vesa_sync_start; - int asic_blank_end; - int vertical_line_start; - - patched_crtc_timing = *dc_crtc_timing; - optc1_apply_front_porch_workaround(optc, &patched_crtc_timing); - - vesa_sync_start = patched_crtc_timing.h_addressable + - patched_crtc_timing.h_border_right + - patched_crtc_timing.h_front_porch; - - asic_blank_end = patched_crtc_timing.h_total - - vesa_sync_start - - patched_crtc_timing.h_border_left; - - vesa_sync_start = patched_crtc_timing.v_addressable + - patched_crtc_timing.v_border_bottom + - patched_crtc_timing.v_front_porch; - - asic_blank_end = (patched_crtc_timing.v_total - - vesa_sync_start - - patched_crtc_timing.v_border_top); - - vertical_line_start = asic_blank_end - optc->dlg_otg_param.vstartup_start + 1; - if (vertical_line_start < 0) { - ASSERT(0); - vertical_line_start = 0; - } + struct optc *optc1 = DCN10TG_FROM_TG(optc); - return vertical_line_start; + REG_SET_2(OTG_VERTICAL_INTERRUPT0_POSITION, 0, + OTG_VERTICAL_INTERRUPT0_LINE_START, start_line, + OTG_VERTICAL_INTERRUPT0_LINE_END, end_line); } -void optc1_program_vline_interrupt( +void optc1_setup_vertical_interrupt1( struct timing_generator *optc, - const struct dc_crtc_timing *dc_crtc_timing, - unsigned long long vsync_delta) + uint32_t start_line) { - struct optc *optc1 = DCN10TG_FROM_TG(optc); - unsigned long long req_delta_tens_of_usec = div64_u64((vsync_delta + 9999), 10000); - unsigned long long pix_clk_hundreds_khz = div64_u64((dc_crtc_timing->pix_clk_khz + 99), 100); - uint32_t req_delta_lines = (uint32_t) div64_u64( - (req_delta_tens_of_usec * pix_clk_hundreds_khz + dc_crtc_timing->h_total - 1), - dc_crtc_timing->h_total); - - uint32_t vsync_line = get_start_vline(optc, dc_crtc_timing); - uint32_t start_line = 0; - uint32_t endLine = 0; - - if (req_delta_lines != 0) - req_delta_lines--; - - if (req_delta_lines > vsync_line) - start_line = dc_crtc_timing->v_total - (req_delta_lines - vsync_line) + 2; - else - start_line = vsync_line - req_delta_lines; - - endLine = start_line + 2; + REG_SET(OTG_VERTICAL_INTERRUPT1_POSITION, 0, + OTG_VERTICAL_INTERRUPT1_LINE_START, start_line); +} - if (endLine >= dc_crtc_timing->v_total) - endLine = 2; +void optc1_setup_vertical_interrupt2( + struct timing_generator *optc, + uint32_t start_line) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); - REG_SET_2(OTG_VERTICAL_INTERRUPT0_POSITION, 0, - OTG_VERTICAL_INTERRUPT0_LINE_START, start_line, - OTG_VERTICAL_INTERRUPT0_LINE_END, endLine); + REG_SET(OTG_VERTICAL_INTERRUPT2_POSITION, 0, + OTG_VERTICAL_INTERRUPT2_LINE_START, start_line); } /** @@ -265,22 +226,14 @@ void optc1_program_timing( patched_crtc_timing.v_addressable + patched_crtc_timing.v_border_bottom); - REG_UPDATE_2(OTG_V_BLANK_START_END, - OTG_V_BLANK_START, asic_blank_start, - OTG_V_BLANK_END, asic_blank_end); - - /* Use OTG_VERTICAL_INTERRUPT2 replace VUPDATE interrupt, - * program the reg for interrupt postition. - */ vertical_line_start = asic_blank_end - optc->dlg_otg_param.vstartup_start + 1; v_fp2 = 0; if (vertical_line_start < 0) v_fp2 = -vertical_line_start; - if (vertical_line_start < 0) - vertical_line_start = 0; - REG_SET(OTG_VERTICAL_INTERRUPT2_POSITION, 0, - OTG_VERTICAL_INTERRUPT2_LINE_START, vertical_line_start); + REG_UPDATE_2(OTG_V_BLANK_START_END, + OTG_V_BLANK_START, asic_blank_start, + OTG_V_BLANK_END, asic_blank_end); /* v_sync polarity */ v_sync_polarity = patched_crtc_timing.flags.VSYNC_POSITIVE_POLARITY ? @@ -299,16 +252,17 @@ void optc1_program_timing( } /* Interlace */ - if (patched_crtc_timing.flags.INTERLACE == 1) { - REG_UPDATE(OTG_INTERLACE_CONTROL, - OTG_INTERLACE_ENABLE, 1); - v_init = v_init / 2; - if ((optc->dlg_otg_param.vstartup_start/2)*2 > asic_blank_end) - v_fp2 = v_fp2 / 2; - } else - REG_UPDATE(OTG_INTERLACE_CONTROL, - OTG_INTERLACE_ENABLE, 0); - + if (REG(OTG_INTERLACE_CONTROL)) { + if (patched_crtc_timing.flags.INTERLACE == 1) { + REG_UPDATE(OTG_INTERLACE_CONTROL, + OTG_INTERLACE_ENABLE, 1); + v_init = v_init / 2; + if ((optc->dlg_otg_param.vstartup_start/2)*2 > asic_blank_end) + v_fp2 = v_fp2 / 2; + } else + REG_UPDATE(OTG_INTERLACE_CONTROL, + OTG_INTERLACE_ENABLE, 0); + } /* VTG enable set to 0 first VInit */ REG_UPDATE(CONTROL, @@ -338,7 +292,7 @@ void optc1_program_timing( h_div_2 = optc1_is_two_pixels_per_containter(&patched_crtc_timing); REG_UPDATE(OTG_H_TIMING_CNTL, - OTG_H_TIMING_DIV_BY2, h_div_2); + OTG_H_TIMING_DIV_BY2, h_div_2 || optc1->comb_opp_id != 0xf); } @@ -1184,6 +1138,64 @@ bool optc1_is_stereo_left_eye(struct timing_generator *optc) return ret; } +bool optc1_is_matching_timing(struct timing_generator *tg, + const struct dc_crtc_timing *otg_timing) +{ + struct dc_crtc_timing hw_crtc_timing = {0}; + struct dcn_otg_state s = {0}; + + if (tg == NULL || otg_timing == NULL) + return false; + + optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s); + + hw_crtc_timing.h_total = s.h_total + 1; + hw_crtc_timing.h_addressable = s.h_total - ((s.h_total - s.h_blank_start) + s.h_blank_end); + hw_crtc_timing.h_front_porch = s.h_total + 1 - s.h_blank_start; + hw_crtc_timing.h_sync_width = s.h_sync_a_end - s.h_sync_a_start; + + hw_crtc_timing.v_total = s.v_total + 1; + hw_crtc_timing.v_addressable = s.v_total - ((s.v_total - s.v_blank_start) + s.v_blank_end); + hw_crtc_timing.v_front_porch = s.v_total + 1 - s.v_blank_start; + hw_crtc_timing.v_sync_width = s.v_sync_a_end - s.v_sync_a_start; + + if (otg_timing->h_total != hw_crtc_timing.h_total) + return false; + + if (otg_timing->h_border_left != hw_crtc_timing.h_border_left) + return false; + + if (otg_timing->h_addressable != hw_crtc_timing.h_addressable) + return false; + + if (otg_timing->h_border_right != hw_crtc_timing.h_border_right) + return false; + + if (otg_timing->h_front_porch != hw_crtc_timing.h_front_porch) + return false; + + if (otg_timing->h_sync_width != hw_crtc_timing.h_sync_width) + return false; + + if (otg_timing->v_total != hw_crtc_timing.v_total) + return false; + + if (otg_timing->v_border_top != hw_crtc_timing.v_border_top) + return false; + + if (otg_timing->v_addressable != hw_crtc_timing.v_addressable) + return false; + + if (otg_timing->v_border_bottom != hw_crtc_timing.v_border_bottom) + return false; + + if (otg_timing->v_sync_width != hw_crtc_timing.v_sync_width) + return false; + + return true; +} + + void optc1_read_otg_state(struct optc *optc1, struct dcn_otg_state *s) { @@ -1370,7 +1382,9 @@ bool optc1_get_crc(struct timing_generator *optc, static const struct timing_generator_funcs dcn10_tg_funcs = { .validate_timing = optc1_validate_timing, .program_timing = optc1_program_timing, - .program_vline_interrupt = optc1_program_vline_interrupt, + .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0, + .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1, + .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2, .program_global_sync = optc1_program_global_sync, .enable_crtc = optc1_enable_crtc, .disable_crtc = optc1_disable_crtc, @@ -1380,6 +1394,7 @@ static const struct timing_generator_funcs dcn10_tg_funcs = { .get_frame_count = optc1_get_vblank_counter, .get_scanoutpos = optc1_get_crtc_scanoutpos, .get_otg_active_size = optc1_get_otg_active_size, + .is_matching_timing = optc1_is_matching_timing, .set_early_control = optc1_set_early_control, /* used by enable_timing_synchronization. Not need for FPGA */ .wait_for_state = optc1_wait_for_state, @@ -1419,10 +1434,13 @@ void dcn10_timing_generator_init(struct optc *optc1) optc1->min_v_blank_interlace = 5; optc1->min_h_sync_width = 8; optc1->min_v_sync_width = 1; + optc1->comb_opp_id = 0xf; } bool optc1_is_two_pixels_per_containter(const struct dc_crtc_timing *timing) { - return timing->pixel_encoding == PIXEL_ENCODING_YCBCR420; + bool two_pix = timing->pixel_encoding == PIXEL_ENCODING_YCBCR420; + + return two_pix; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h index 8bacf0b6e27e..4eb9a898c237 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h @@ -67,6 +67,8 @@ SRI(OTG_CLOCK_CONTROL, OTG, inst),\ SRI(OTG_VERTICAL_INTERRUPT0_CONTROL, OTG, inst),\ SRI(OTG_VERTICAL_INTERRUPT0_POSITION, OTG, inst),\ + SRI(OTG_VERTICAL_INTERRUPT1_CONTROL, OTG, inst),\ + SRI(OTG_VERTICAL_INTERRUPT1_POSITION, OTG, inst),\ SRI(OTG_VERTICAL_INTERRUPT2_CONTROL, OTG, inst),\ SRI(OTG_VERTICAL_INTERRUPT2_POSITION, OTG, inst),\ SRI(OPTC_INPUT_CLOCK_CONTROL, ODM, inst),\ @@ -135,6 +137,8 @@ struct dcn_optc_registers { uint32_t OTG_CLOCK_CONTROL; uint32_t OTG_VERTICAL_INTERRUPT0_CONTROL; uint32_t OTG_VERTICAL_INTERRUPT0_POSITION; + uint32_t OTG_VERTICAL_INTERRUPT1_CONTROL; + uint32_t OTG_VERTICAL_INTERRUPT1_POSITION; uint32_t OTG_VERTICAL_INTERRUPT2_CONTROL; uint32_t OTG_VERTICAL_INTERRUPT2_POSITION; uint32_t OPTC_INPUT_CLOCK_CONTROL; @@ -227,6 +231,8 @@ struct dcn_optc_registers { SF(OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE, mask_sh),\ SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_START, mask_sh),\ SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_END, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL, OTG_VERTICAL_INTERRUPT1_INT_ENABLE, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT1_POSITION, OTG_VERTICAL_INTERRUPT1_LINE_START, mask_sh),\ SF(OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_INT_ENABLE, mask_sh),\ SF(OTG0_OTG_VERTICAL_INTERRUPT2_POSITION, OTG_VERTICAL_INTERRUPT2_LINE_START, mask_sh),\ SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_EN, mask_sh),\ @@ -361,6 +367,8 @@ struct dcn_optc_registers { type OTG_VERTICAL_INTERRUPT0_INT_ENABLE;\ type OTG_VERTICAL_INTERRUPT0_LINE_START;\ type OTG_VERTICAL_INTERRUPT0_LINE_END;\ + type OTG_VERTICAL_INTERRUPT1_INT_ENABLE;\ + type OTG_VERTICAL_INTERRUPT1_LINE_START;\ type OTG_VERTICAL_INTERRUPT2_INT_ENABLE;\ type OTG_VERTICAL_INTERRUPT2_LINE_START;\ type OPTC_INPUT_CLK_EN;\ @@ -427,7 +435,7 @@ struct optc { const struct dcn_optc_shift *tg_shift; const struct dcn_optc_mask *tg_mask; - enum controller_id controller_id; + int comb_opp_id; uint32_t max_h_total; uint32_t max_v_total; @@ -475,9 +483,16 @@ void optc1_program_timing( const struct dc_crtc_timing *dc_crtc_timing, bool use_vbios); -void optc1_program_vline_interrupt(struct timing_generator *optc, - const struct dc_crtc_timing *dc_crtc_timing, - unsigned long long vsync_delta); +void optc1_setup_vertical_interrupt0( + struct timing_generator *optc, + uint32_t start_line, + uint32_t end_line); +void optc1_setup_vertical_interrupt1( + struct timing_generator *optc, + uint32_t start_line); +void optc1_setup_vertical_interrupt2( + struct timing_generator *optc, + uint32_t start_line); void optc1_program_global_sync( struct timing_generator *optc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 5d4772dec0ba..09d74070a49b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -70,7 +70,7 @@ const struct _vcs_dpi_ip_params_st dcn1_0_ip = { .rob_buffer_size_kbytes = 64, .det_buffer_size_kbytes = 164, - .dpte_buffer_size_in_pte_reqs = 42, + .dpte_buffer_size_in_pte_reqs_luma = 42, .dpp_output_buffer_pixels = 2560, .opp_output_buffer_lines = 1, .pixel_chunk_size_kbytes = 8, @@ -436,7 +436,6 @@ static const struct dcn_optc_mask tg_mask = { }; static const struct bios_registers bios_regs = { - NBIO_SR(BIOS_SCRATCH_0), NBIO_SR(BIOS_SCRATCH_3), NBIO_SR(BIOS_SCRATCH_6) }; @@ -609,7 +608,7 @@ static struct output_pixel_processor *dcn10_opp_create( return &opp->base; } -struct aux_engine *dcn10_aux_engine_create( +struct dce_aux *dcn10_aux_engine_create( struct dc_context *ctx, uint32_t inst) { @@ -678,18 +677,18 @@ static struct mpc *dcn10_mpc_create(struct dc_context *ctx) static struct hubbub *dcn10_hubbub_create(struct dc_context *ctx) { - struct hubbub *hubbub = kzalloc(sizeof(struct hubbub), + struct dcn10_hubbub *dcn10_hubbub = kzalloc(sizeof(struct dcn10_hubbub), GFP_KERNEL); - if (!hubbub) + if (!dcn10_hubbub) return NULL; - hubbub1_construct(hubbub, ctx, + hubbub1_construct(&dcn10_hubbub->base, ctx, &hubbub_reg, &hubbub_shift, &hubbub_mask); - return hubbub; + return &dcn10_hubbub->base; } static struct timing_generator *dcn10_timing_generator_create( @@ -911,7 +910,7 @@ static void destruct(struct dcn10_resource_pool *pool) for (i = 0; i < pool->base.res_cap->num_ddc; i++) { if (pool->base.engines[i] != NULL) - pool->base.engines[i]->funcs->destroy_engine(&pool->base.engines[i]); + dce110_engine_destroy(&pool->base.engines[i]); if (pool->base.hw_i2cs[i] != NULL) { kfree(pool->base.hw_i2cs[i]); pool->base.hw_i2cs[i] = NULL; @@ -974,8 +973,8 @@ static void get_pixel_clock_parameters( struct pixel_clk_params *pixel_clk_params) { const struct dc_stream_state *stream = pipe_ctx->stream; - pixel_clk_params->requested_pix_clk = stream->timing.pix_clk_khz; - pixel_clk_params->encoder_object_id = stream->sink->link->link_enc->id; + pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz; + pixel_clk_params->encoder_object_id = stream->link->link_enc->id; pixel_clk_params->signal_type = pipe_ctx->stream->signal; pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1; /* TODO: un-hardcode*/ @@ -991,9 +990,9 @@ static void get_pixel_clock_parameters( pixel_clk_params->color_depth = COLOR_DEPTH_888; if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) - pixel_clk_params->requested_pix_clk /= 2; + pixel_clk_params->requested_pix_clk_100hz /= 2; if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) - pixel_clk_params->requested_pix_clk *= 2; + pixel_clk_params->requested_pix_clk_100hz *= 2; } @@ -1131,6 +1130,56 @@ static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_st return DC_OK; } +static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *context) +{ + int i, j; + bool video_down_scaled = false; + bool video_large = false; + bool desktop_large = false; + bool dcc_disabled = false; + + for (i = 0; i < context->stream_count; i++) { + if (context->stream_status[i].plane_count == 0) + continue; + + if (context->stream_status[i].plane_count > 2) + return false; + + for (j = 0; j < context->stream_status[i].plane_count; j++) { + struct dc_plane_state *plane = + context->stream_status[i].plane_states[j]; + + + if (plane->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { + + if (plane->src_rect.width > plane->dst_rect.width || + plane->src_rect.height > plane->dst_rect.height) + video_down_scaled = true; + + if (plane->src_rect.width >= 3840) + video_large = true; + + } else { + if (plane->src_rect.width >= 3840) + desktop_large = true; + if (!plane->dcc.enable) + dcc_disabled = true; + } + } + } + + /* + * Workaround: On DCN10 there is UMC issue that causes underflow when + * playing 4k video on 4k desktop with video downscaled and single channel + * memory + */ + if (video_large && desktop_large && video_down_scaled && dcc_disabled && + dc->dcn_soc->number_of_channels == 1) + return DC_FAIL_SURFACE_VALIDATE; + + return DC_OK; +} + static enum dc_status dcn10_get_default_swizzle_mode(struct dc_plane_state *plane_state) { enum dc_status result = DC_OK; @@ -1159,6 +1208,7 @@ static const struct resource_funcs dcn10_res_pool_funcs = { .validate_bandwidth = dcn_validate_bandwidth, .acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer, .validate_plane = dcn10_validate_plane, + .validate_global = dcn10_validate_global, .add_stream_to_ctx = dcn10_add_stream_to_ctx, .get_default_swizzle_mode = dcn10_get_default_swizzle_mode }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c index b8b5525a389a..b08254121251 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c @@ -261,17 +261,29 @@ void enc1_stream_encoder_dp_set_stream_attribute( uint8_t dp_component_depth = 0; struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + struct dc_crtc_timing hw_crtc_timing = *crtc_timing; + + if (hw_crtc_timing.flags.INTERLACE) { + /*the input timing is in VESA spec format with Interlace flag =1*/ + hw_crtc_timing.v_total /= 2; + hw_crtc_timing.v_border_top /= 2; + hw_crtc_timing.v_addressable /= 2; + hw_crtc_timing.v_border_bottom /= 2; + hw_crtc_timing.v_front_porch /= 2; + hw_crtc_timing.v_sync_width /= 2; + } + /* set pixel encoding */ - switch (crtc_timing->pixel_encoding) { + switch (hw_crtc_timing.pixel_encoding) { case PIXEL_ENCODING_YCBCR422: dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_YCBCR422; break; case PIXEL_ENCODING_YCBCR444: dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_YCBCR444; - if (crtc_timing->flags.Y_ONLY) - if (crtc_timing->display_color_depth != COLOR_DEPTH_666) + if (hw_crtc_timing.flags.Y_ONLY) + if (hw_crtc_timing.display_color_depth != COLOR_DEPTH_666) /* HW testing only, no use case yet. * Color depth of Y-only could be * 8, 10, 12, 16 bits @@ -299,7 +311,7 @@ void enc1_stream_encoder_dp_set_stream_attribute( * Pixel Encoding/Colorimetry Format and that a Sink device shall ignore MISC1, bit 7, * and MISC0, bits 7:1 (MISC1, bit 7, and MISC0, bits 7:1, become "don't care"). */ - if ((crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) || + if ((hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) || (output_color_space == COLOR_SPACE_2020_YCBCR) || (output_color_space == COLOR_SPACE_2020_RGB_FULLRANGE) || (output_color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE)) @@ -308,7 +320,7 @@ void enc1_stream_encoder_dp_set_stream_attribute( misc1 = misc1 & ~0x40; /* set color depth */ - switch (crtc_timing->display_color_depth) { + switch (hw_crtc_timing.display_color_depth) { case COLOR_DEPTH_666: dp_component_depth = DP_COMPONENT_PIXEL_DEPTH_6BPC; break; @@ -336,7 +348,7 @@ void enc1_stream_encoder_dp_set_stream_attribute( /* set dynamic range and YCbCr range */ - switch (crtc_timing->display_color_depth) { + switch (hw_crtc_timing.display_color_depth) { case COLOR_DEPTH_666: colorimetry_bpc = 0; break; @@ -372,9 +384,9 @@ void enc1_stream_encoder_dp_set_stream_attribute( misc0 = misc0 | 0x8; /* bit3=1, bit4=0 */ misc1 = misc1 & ~0x80; /* bit7 = 0*/ dynamic_range_ycbcr = 0; /*bt601*/ - if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) + if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */ - else if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR444) + else if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR444) misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */ break; case COLOR_SPACE_YCBCR709: @@ -382,9 +394,9 @@ void enc1_stream_encoder_dp_set_stream_attribute( misc0 = misc0 | 0x18; /* bit3=1, bit4=1 */ misc1 = misc1 & ~0x80; /* bit7 = 0*/ dynamic_range_ycbcr = 1; /*bt709*/ - if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) + if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */ - else if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR444) + else if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR444) misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */ break; case COLOR_SPACE_2020_RGB_LIMITEDRANGE: @@ -414,26 +426,26 @@ void enc1_stream_encoder_dp_set_stream_attribute( * dc_crtc_timing is vesa dmt struct. data from edid */ REG_SET_2(DP_MSA_TIMING_PARAM1, 0, - DP_MSA_HTOTAL, crtc_timing->h_total, - DP_MSA_VTOTAL, crtc_timing->v_total); + DP_MSA_HTOTAL, hw_crtc_timing.h_total, + DP_MSA_VTOTAL, hw_crtc_timing.v_total); /* calculate from vesa timing parameters * h_active_start related to leading edge of sync */ - h_blank = crtc_timing->h_total - crtc_timing->h_border_left - - crtc_timing->h_addressable - crtc_timing->h_border_right; + h_blank = hw_crtc_timing.h_total - hw_crtc_timing.h_border_left - + hw_crtc_timing.h_addressable - hw_crtc_timing.h_border_right; - h_back_porch = h_blank - crtc_timing->h_front_porch - - crtc_timing->h_sync_width; + h_back_porch = h_blank - hw_crtc_timing.h_front_porch - + hw_crtc_timing.h_sync_width; /* start at beginning of left border */ - h_active_start = crtc_timing->h_sync_width + h_back_porch; + h_active_start = hw_crtc_timing.h_sync_width + h_back_porch; - v_active_start = crtc_timing->v_total - crtc_timing->v_border_top - - crtc_timing->v_addressable - crtc_timing->v_border_bottom - - crtc_timing->v_front_porch; + v_active_start = hw_crtc_timing.v_total - hw_crtc_timing.v_border_top - + hw_crtc_timing.v_addressable - hw_crtc_timing.v_border_bottom - + hw_crtc_timing.v_front_porch; /* start at beginning of left border */ @@ -443,20 +455,20 @@ void enc1_stream_encoder_dp_set_stream_attribute( REG_SET_4(DP_MSA_TIMING_PARAM3, 0, DP_MSA_HSYNCWIDTH, - crtc_timing->h_sync_width, + hw_crtc_timing.h_sync_width, DP_MSA_HSYNCPOLARITY, - !crtc_timing->flags.HSYNC_POSITIVE_POLARITY, + !hw_crtc_timing.flags.HSYNC_POSITIVE_POLARITY, DP_MSA_VSYNCWIDTH, - crtc_timing->v_sync_width, + hw_crtc_timing.v_sync_width, DP_MSA_VSYNCPOLARITY, - !crtc_timing->flags.VSYNC_POSITIVE_POLARITY); + !hw_crtc_timing.flags.VSYNC_POSITIVE_POLARITY); /* HWDITH include border or overscan */ REG_SET_2(DP_MSA_TIMING_PARAM4, 0, - DP_MSA_HWIDTH, crtc_timing->h_border_left + - crtc_timing->h_addressable + crtc_timing->h_border_right, - DP_MSA_VHEIGHT, crtc_timing->v_border_top + - crtc_timing->v_addressable + crtc_timing->v_border_bottom); + DP_MSA_HWIDTH, hw_crtc_timing.h_border_left + + hw_crtc_timing.h_addressable + hw_crtc_timing.h_border_right, + DP_MSA_VHEIGHT, hw_crtc_timing.v_border_top + + hw_crtc_timing.v_addressable + hw_crtc_timing.v_border_bottom); } static void enc1_stream_encoder_set_stream_attribute_helper( @@ -594,7 +606,7 @@ void enc1_stream_encoder_dvi_set_stream_attribute( cntl.signal = is_dual_link ? SIGNAL_TYPE_DVI_DUAL_LINK : SIGNAL_TYPE_DVI_SINGLE_LINK; cntl.enable_dp_audio = false; - cntl.pixel_clock = crtc_timing->pix_clk_khz; + cntl.pixel_clock = crtc_timing->pix_clk_100hz / 10; cntl.lanes_number = (is_dual_link) ? LANE_COUNT_EIGHT : LANE_COUNT_FOUR; if (enc1->base.bp->funcs->encoder_control( @@ -1413,6 +1425,14 @@ void enc1_setup_stereo_sync( REG_UPDATE(DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, !enable); } +void enc1_dig_connect_to_otg( + struct stream_encoder *enc, + int tg_inst) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + + REG_UPDATE(DIG_FE_CNTL, DIG_SOURCE_SELECT, tg_inst); +} static const struct stream_encoder_funcs dcn10_str_enc_funcs = { .dp_set_stream_attribute = @@ -1445,6 +1465,7 @@ static const struct stream_encoder_funcs dcn10_str_enc_funcs = { .hdmi_audio_disable = enc1_se_hdmi_audio_disable, .setup_stereo_sync = enc1_setup_stereo_sync, .set_avmute = enc1_stream_encoder_set_avmute, + .dig_connect_to_otg = enc1_dig_connect_to_otg, }; void dcn10_stream_encoder_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h index 67f3e4dd95c1..b7c800e10a32 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h @@ -274,7 +274,8 @@ struct dcn10_stream_enc_registers { SE_SF(DP0_DP_MSA_TIMING_PARAM4, DP_MSA_HWIDTH, mask_sh),\ SE_SF(DP0_DP_MSA_TIMING_PARAM4, DP_MSA_VHEIGHT, mask_sh),\ SE_SF(DIG0_HDMI_DB_CONTROL, HDMI_DB_DISABLE, mask_sh),\ - SE_SF(DP0_DP_VID_TIMING, DP_VID_N_MUL, mask_sh) + SE_SF(DP0_DP_VID_TIMING, DP_VID_N_MUL, mask_sh),\ + SE_SF(DIG0_DIG_FE_CNTL, DIG_SOURCE_SELECT, mask_sh) #define SE_COMMON_MASK_SH_LIST_SOC(mask_sh)\ SE_COMMON_MASK_SH_LIST_SOC_BASE(mask_sh) @@ -426,7 +427,8 @@ struct dcn10_stream_enc_registers { type DP_MSA_VHEIGHT;\ type HDMI_DB_DISABLE;\ type DP_VID_N_MUL;\ - type DP_VID_M_DOUBLE_VALUE_EN + type DP_VID_M_DOUBLE_VALUE_EN;\ + type DIG_SOURCE_SELECT struct dcn10_stream_encoder_shift { SE_REG_FIELD_LIST_DCN1_0(uint8_t); @@ -523,4 +525,8 @@ void enc1_se_hdmi_audio_setup( void enc1_se_hdmi_audio_disable( struct stream_encoder *enc); +void enc1_dig_connect_to_otg( + struct stream_encoder *enc, + int tg_inst); + #endif /* __DC_STREAM_ENCODER_DCN10_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h index 5d4527d03045..e81b24374bcb 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h +++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h @@ -57,6 +57,13 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( struct dp_mst_stream_allocation_table *proposed_table, bool enable); +/* + * poll pending down reply before clear payload allocation table + */ +void dm_helpers_dp_mst_poll_pending_down_reply( + struct dc_context *ctx, + const struct dc_link *link); + /* * Clear payload allocation table before enable MST DP link. */ diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h index 0029a39efb1c..14bed5b1fa97 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h +++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h @@ -38,7 +38,8 @@ enum pp_smu_ver { * of interface sharing between families of ASIcs. */ PP_SMU_UNSUPPORTED, - PP_SMU_VER_RV + PP_SMU_VER_RV, + PP_SMU_VER_MAX }; struct pp_smu { diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h index 1af8c777b3ac..77200711abbe 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h +++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h @@ -82,9 +82,17 @@ enum dm_pp_clock_type { #define DC_DECODE_PP_CLOCK_TYPE(clk_type) \ (clk_type) == DM_PP_CLOCK_TYPE_DISPLAY_CLK ? "Display" : \ (clk_type) == DM_PP_CLOCK_TYPE_ENGINE_CLK ? "Engine" : \ - (clk_type) == DM_PP_CLOCK_TYPE_MEMORY_CLK ? "Memory" : "Invalid" - -#define DM_PP_MAX_CLOCK_LEVELS 8 + (clk_type) == DM_PP_CLOCK_TYPE_MEMORY_CLK ? "Memory" : \ + (clk_type) == DM_PP_CLOCK_TYPE_DCFCLK ? "DCF" : \ + (clk_type) == DM_PP_CLOCK_TYPE_DCEFCLK ? "DCEF" : \ + (clk_type) == DM_PP_CLOCK_TYPE_SOCCLK ? "SoC" : \ + (clk_type) == DM_PP_CLOCK_TYPE_PIXELCLK ? "Pixel" : \ + (clk_type) == DM_PP_CLOCK_TYPE_DISPLAYPHYCLK ? "Display PHY" : \ + (clk_type) == DM_PP_CLOCK_TYPE_DPPCLK ? "DPP" : \ + (clk_type) == DM_PP_CLOCK_TYPE_FCLK ? "F" : \ + "Invalid" + +#define DM_PP_MAX_CLOCK_LEVELS 16 struct dm_pp_clock_levels { uint32_t num_levels; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h index bea4e61b94c7..c59e582c1f40 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h @@ -121,4 +121,30 @@ enum self_refresh_affinity { dm_neither_self_refresh_nor_mclk_switch }; +enum dm_validation_status { + DML_VALIDATION_OK, + DML_FAIL_SCALE_RATIO_TAP, + DML_FAIL_SOURCE_PIXEL_FORMAT, + DML_FAIL_VIEWPORT_SIZE, + DML_FAIL_TOTAL_V_ACTIVE_BW, + DML_FAIL_DIO_SUPPORT, + DML_FAIL_NOT_ENOUGH_DSC, + DML_FAIL_DSC_CLK_REQUIRED, + DML_FAIL_URGENT_LATENCY, + DML_FAIL_REORDERING_BUFFER, + DML_FAIL_DISPCLK_DPPCLK, + DML_FAIL_TOTAL_AVAILABLE_PIPES, + DML_FAIL_NUM_OTG, + DML_FAIL_WRITEBACK_MODE, + DML_FAIL_WRITEBACK_LATENCY, + DML_FAIL_WRITEBACK_SCALE_RATIO_TAP, + DML_FAIL_CURSOR_SUPPORT, + DML_FAIL_PITCH_SUPPORT, + DML_FAIL_PTE_BUFFER_SIZE, + DML_FAIL_HOST_VM_IMMEDIATE_FLIP, + DML_FAIL_DSC_INPUT_BPC, + DML_FAIL_PREFETCH_SUPPORT, + DML_FAIL_V_RATIO_PREFETCH, +}; + #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c index dddeb0d4db8f..d303b789adfe 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c @@ -62,3 +62,31 @@ void dml_init_instance(struct display_mode_lib *lib, enum dml_project project) } } +const char *dml_get_status_message(enum dm_validation_status status) +{ + switch (status) { + case DML_VALIDATION_OK: return "Validation OK"; + case DML_FAIL_SCALE_RATIO_TAP: return "Scale ratio/tap"; + case DML_FAIL_SOURCE_PIXEL_FORMAT: return "Source pixel format"; + case DML_FAIL_VIEWPORT_SIZE: return "Viewport size"; + case DML_FAIL_TOTAL_V_ACTIVE_BW: return "Total vertical active bandwidth"; + case DML_FAIL_DIO_SUPPORT: return "DIO support"; + case DML_FAIL_NOT_ENOUGH_DSC: return "Not enough DSC Units"; + case DML_FAIL_DSC_CLK_REQUIRED: return "DSC clock required"; + case DML_FAIL_URGENT_LATENCY: return "Urgent latency"; + case DML_FAIL_REORDERING_BUFFER: return "Re-ordering buffer"; + case DML_FAIL_DISPCLK_DPPCLK: return "Dispclk and Dppclk"; + case DML_FAIL_TOTAL_AVAILABLE_PIPES: return "Total available pipes"; + case DML_FAIL_NUM_OTG: return "Number of OTG"; + case DML_FAIL_WRITEBACK_MODE: return "Writeback mode"; + case DML_FAIL_WRITEBACK_LATENCY: return "Writeback latency"; + case DML_FAIL_WRITEBACK_SCALE_RATIO_TAP: return "Writeback scale ratio/tap"; + case DML_FAIL_CURSOR_SUPPORT: return "Cursor support"; + case DML_FAIL_PITCH_SUPPORT: return "Pitch support"; + case DML_FAIL_PTE_BUFFER_SIZE: return "PTE buffer size"; + case DML_FAIL_DSC_INPUT_BPC: return "DSC input bpc"; + case DML_FAIL_PREFETCH_SUPPORT: return "Prefetch support"; + case DML_FAIL_V_RATIO_PREFETCH: return "Vertical ratio prefetch"; + default: return "Unknown Status"; + } +} diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h index 635206248889..a730e0209c05 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h @@ -43,4 +43,6 @@ struct display_mode_lib { void dml_init_instance(struct display_mode_lib *lib, enum dml_project project); +const char *dml_get_status_message(enum dm_validation_status status); + #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h index 5dd04520ceca..391183e3428f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h @@ -30,22 +30,15 @@ typedef struct _vcs_dpi_soc_bounding_box_st soc_bounding_box_st; typedef struct _vcs_dpi_ip_params_st ip_params_st; typedef struct _vcs_dpi_display_pipe_source_params_st display_pipe_source_params_st; typedef struct _vcs_dpi_display_output_params_st display_output_params_st; -typedef struct _vcs_dpi_display_bandwidth_st display_bandwidth_st; typedef struct _vcs_dpi_scaler_ratio_depth_st scaler_ratio_depth_st; typedef struct _vcs_dpi_scaler_taps_st scaler_taps_st; typedef struct _vcs_dpi_display_pipe_dest_params_st display_pipe_dest_params_st; typedef struct _vcs_dpi_display_pipe_params_st display_pipe_params_st; typedef struct _vcs_dpi_display_clocks_and_cfg_st display_clocks_and_cfg_st; typedef struct _vcs_dpi_display_e2e_pipe_params_st display_e2e_pipe_params_st; -typedef struct _vcs_dpi_dchub_buffer_sizing_st dchub_buffer_sizing_st; -typedef struct _vcs_dpi_watermarks_perf_st watermarks_perf_st; -typedef struct _vcs_dpi_cstate_pstate_watermarks_st cstate_pstate_watermarks_st; -typedef struct _vcs_dpi_wm_calc_pipe_params_st wm_calc_pipe_params_st; -typedef struct _vcs_dpi_vratio_pre_st vratio_pre_st; typedef struct _vcs_dpi_display_data_rq_misc_params_st display_data_rq_misc_params_st; typedef struct _vcs_dpi_display_data_rq_sizing_params_st display_data_rq_sizing_params_st; typedef struct _vcs_dpi_display_data_rq_dlg_params_st display_data_rq_dlg_params_st; -typedef struct _vcs_dpi_display_cur_rq_dlg_params_st display_cur_rq_dlg_params_st; typedef struct _vcs_dpi_display_rq_dlg_params_st display_rq_dlg_params_st; typedef struct _vcs_dpi_display_rq_sizing_params_st display_rq_sizing_params_st; typedef struct _vcs_dpi_display_rq_misc_params_st display_rq_misc_params_st; @@ -55,8 +48,6 @@ typedef struct _vcs_dpi_display_ttu_regs_st display_ttu_regs_st; typedef struct _vcs_dpi_display_data_rq_regs_st display_data_rq_regs_st; typedef struct _vcs_dpi_display_rq_regs_st display_rq_regs_st; typedef struct _vcs_dpi_display_dlg_sys_params_st display_dlg_sys_params_st; -typedef struct _vcs_dpi_display_dlg_prefetch_param_st display_dlg_prefetch_param_st; -typedef struct _vcs_dpi_display_pipe_clock_st display_pipe_clock_st; typedef struct _vcs_dpi_display_arb_params_st display_arb_params_st; struct _vcs_dpi_voltage_scaling_st { @@ -111,8 +102,6 @@ struct _vcs_dpi_soc_bounding_box_st { double xfc_bus_transport_time_us; double xfc_xbuf_latency_tolerance_us; int use_urgent_burst_bw; - double max_hscl_ratio; - double max_vscl_ratio; unsigned int num_states; struct _vcs_dpi_voltage_scaling_st clock_limits[8]; }; @@ -129,7 +118,8 @@ struct _vcs_dpi_ip_params_st { unsigned int odm_capable; unsigned int rob_buffer_size_kbytes; unsigned int det_buffer_size_kbytes; - unsigned int dpte_buffer_size_in_pte_reqs; + unsigned int dpte_buffer_size_in_pte_reqs_luma; + unsigned int dpte_buffer_size_in_pte_reqs_chroma; unsigned int pde_proc_buffer_size_64k_reqs; unsigned int dpp_output_buffer_pixels; unsigned int opp_output_buffer_lines; @@ -192,7 +182,6 @@ struct _vcs_dpi_display_xfc_params_st { struct _vcs_dpi_display_pipe_source_params_st { int source_format; unsigned char dcc; - unsigned int dcc_override; unsigned int dcc_rate; unsigned char dcc_use_global; unsigned char vm; @@ -205,7 +194,6 @@ struct _vcs_dpi_display_pipe_source_params_st { int source_scan; int sw_mode; int macro_tile_size; - unsigned char is_display_sw; unsigned int viewport_width; unsigned int viewport_height; unsigned int viewport_y_y; @@ -252,16 +240,10 @@ struct _vcs_dpi_display_output_params_st { int output_bpc; int output_type; int output_format; - int output_standard; int dsc_slices; struct writeback_st wb; }; -struct _vcs_dpi_display_bandwidth_st { - double total_bw_consumed_gbps; - double guaranteed_urgent_return_bw_gbps; -}; - struct _vcs_dpi_scaler_ratio_depth_st { double hscl_ratio; double vscl_ratio; @@ -300,11 +282,9 @@ struct _vcs_dpi_display_pipe_dest_params_st { unsigned int vupdate_width; unsigned int vready_offset; unsigned char interlaced; - unsigned char underscan; double pixel_rate_mhz; unsigned char synchronized_vblank_all_planes; unsigned char otg_inst; - unsigned char odm_split_cnt; unsigned char odm_combine; unsigned char use_maximum_vstartup; }; @@ -331,65 +311,6 @@ struct _vcs_dpi_display_e2e_pipe_params_st { display_clocks_and_cfg_st clks_cfg; }; -struct _vcs_dpi_dchub_buffer_sizing_st { - unsigned int swath_width_y; - unsigned int swath_height_y; - unsigned int swath_height_c; - unsigned int detail_buffer_size_y; -}; - -struct _vcs_dpi_watermarks_perf_st { - double stutter_eff_in_active_region_percent; - double urgent_latency_supported_us; - double non_urgent_latency_supported_us; - double dram_clock_change_margin_us; - double dram_access_eff_percent; -}; - -struct _vcs_dpi_cstate_pstate_watermarks_st { - double cstate_exit_us; - double cstate_enter_plus_exit_us; - double pstate_change_us; -}; - -struct _vcs_dpi_wm_calc_pipe_params_st { - unsigned int num_dpp; - int voltage; - int output_type; - double dcfclk_mhz; - double socclk_mhz; - double dppclk_mhz; - double pixclk_mhz; - unsigned char interlace_en; - unsigned char pte_enable; - unsigned char dcc_enable; - double dcc_rate; - double bytes_per_pixel_c; - double bytes_per_pixel_y; - unsigned int swath_width_y; - unsigned int swath_height_y; - unsigned int swath_height_c; - unsigned int det_buffer_size_y; - double h_ratio; - double v_ratio; - unsigned int h_taps; - unsigned int h_total; - unsigned int v_total; - unsigned int v_active; - unsigned int e2e_index; - double display_pipe_line_delivery_time; - double read_bw; - unsigned int lines_in_det_y; - unsigned int lines_in_det_y_rounded_down_to_swath; - double full_det_buffering_time; - double dcfclk_deepsleep_mhz_per_plane; -}; - -struct _vcs_dpi_vratio_pre_st { - double vratio_pre_l; - double vratio_pre_c; -}; - struct _vcs_dpi_display_data_rq_misc_params_st { unsigned int full_swath_bytes; unsigned int stored_swath_bytes; @@ -423,16 +344,9 @@ struct _vcs_dpi_display_data_rq_dlg_params_st { unsigned int meta_bytes_per_row_ub; }; -struct _vcs_dpi_display_cur_rq_dlg_params_st { - unsigned char enable; - unsigned int swath_height; - unsigned int req_per_line; -}; - struct _vcs_dpi_display_rq_dlg_params_st { display_data_rq_dlg_params_st rq_l; display_data_rq_dlg_params_st rq_c; - display_cur_rq_dlg_params_st rq_cur0; }; struct _vcs_dpi_display_rq_sizing_params_st { @@ -498,6 +412,10 @@ struct _vcs_dpi_display_dlg_regs_st { unsigned int xfc_reg_remote_surface_flip_latency; unsigned int xfc_reg_prefetch_margin; unsigned int dst_y_delta_drq_limit; + unsigned int refcyc_per_vm_group_vblank; + unsigned int refcyc_per_vm_group_flip; + unsigned int refcyc_per_vm_req_vblank; + unsigned int refcyc_per_vm_req_flip; }; struct _vcs_dpi_display_ttu_regs_st { @@ -556,19 +474,6 @@ struct _vcs_dpi_display_dlg_sys_params_st { unsigned int total_flip_bytes; }; -struct _vcs_dpi_display_dlg_prefetch_param_st { - double prefetch_bw; - unsigned int flip_bytes; -}; - -struct _vcs_dpi_display_pipe_clock_st { - double dcfclk_mhz; - double dispclk_mhz; - double socclk_mhz; - double dscclk_mhz[6]; - double dppclk_mhz[6]; -}; - struct _vcs_dpi_display_arb_params_st { int max_req_outstanding; int min_req_outstanding; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c index c2037daa8e66..ad8571f5a142 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c @@ -459,7 +459,7 @@ static void dml1_rq_dlg_get_row_heights( /* dpte */ /* ------ */ log2_vmpg_bytes = dml_log2(mode_lib->soc.vmm_page_size_bytes); - dpte_buf_in_pte_reqs = mode_lib->ip.dpte_buffer_size_in_pte_reqs; + dpte_buf_in_pte_reqs = mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma; log2_vmpg_height = 0; log2_vmpg_width = 0; @@ -776,7 +776,7 @@ static void get_surf_rq_param( /* dpte */ /* ------ */ log2_vmpg_bytes = dml_log2(mode_lib->soc.vmm_page_size_bytes); - dpte_buf_in_pte_reqs = mode_lib->ip.dpte_buffer_size_in_pte_reqs; + dpte_buf_in_pte_reqs = mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma; log2_vmpg_height = 0; log2_vmpg_width = 0; @@ -881,7 +881,7 @@ static void get_surf_rq_param( /* the dpte_group_bytes is reduced for the specific case of vertical * access of a tile surface that has dpte request of 8x1 ptes. */ - if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) /*reduced, in this case, will have page fault within a group */ + if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) /*reduced, in this case, will have page fault within a group */ rq_sizing_param->dpte_group_bytes = 512; else /*full size */ diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c index 1d1efd72b291..cf76ea2d9f5a 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c @@ -101,6 +101,18 @@ enum gpio_mode dal_gpio_get_mode( return gpio->mode; } +enum gpio_result dal_gpio_lock_pin( + struct gpio *gpio) +{ + return dal_gpio_service_lock(gpio->service, gpio->id, gpio->en); +} + +enum gpio_result dal_gpio_unlock_pin( + struct gpio *gpio) +{ + return dal_gpio_service_unlock(gpio->service, gpio->id, gpio->en); +} + enum gpio_result dal_gpio_change_mode( struct gpio *gpio, enum gpio_mode mode) diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c index dada04296025..3c63a3c04dbb 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c @@ -192,6 +192,34 @@ static void set_pin_free( service->busyness[id][en] = false; } +enum gpio_result dal_gpio_service_lock( + struct gpio_service *service, + enum gpio_id id, + uint32_t en) +{ + if (!service->busyness[id]) { + ASSERT_CRITICAL(false); + return GPIO_RESULT_OPEN_FAILED; + } + + set_pin_busy(service, id, en); + return GPIO_RESULT_OK; +} + +enum gpio_result dal_gpio_service_unlock( + struct gpio_service *service, + enum gpio_id id, + uint32_t en) +{ + if (!service->busyness[id]) { + ASSERT_CRITICAL(false); + return GPIO_RESULT_OPEN_FAILED; + } + + set_pin_free(service, id, en); + return GPIO_RESULT_OK; +} + enum gpio_result dal_gpio_service_open( struct gpio_service *service, enum gpio_id id, diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h index 1d501a43d13b..0c678af75331 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h +++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h @@ -52,4 +52,14 @@ void dal_gpio_service_close( struct gpio_service *service, struct hw_gpio_pin **ptr); +enum gpio_result dal_gpio_service_lock( + struct gpio_service *service, + enum gpio_id id, + uint32_t en); + +enum gpio_result dal_gpio_service_unlock( + struct gpio_service *service, + enum gpio_id id, + uint32_t en); + #endif diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/Makefile b/drivers/gpu/drm/amd/display/dc/i2caux/Makefile deleted file mode 100644 index 352885cb4d07..000000000000 --- a/drivers/gpu/drm/amd/display/dc/i2caux/Makefile +++ /dev/null @@ -1,99 +0,0 @@ -# -# Copyright 2017 Advanced Micro Devices, Inc. -# -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the "Software"), -# to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, -# and/or sell copies of the Software, and to permit persons to whom the -# Software is furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR -# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -# OTHER DEALINGS IN THE SOFTWARE. -# -# -# Makefile for the 'i2c' sub-component of DAL. -# It provides the control and status of HW i2c engine of the adapter. - -I2CAUX = aux_engine.o engine_base.o i2caux.o i2c_engine.o \ - i2c_generic_hw_engine.o i2c_hw_engine.o i2c_sw_engine.o - -AMD_DAL_I2CAUX = $(addprefix $(AMDDALPATH)/dc/i2caux/,$(I2CAUX)) - -AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX) - -############################################################################### -# DCE 8x family -############################################################################### -I2CAUX_DCE80 = i2caux_dce80.o i2c_hw_engine_dce80.o \ - i2c_sw_engine_dce80.o - -AMD_DAL_I2CAUX_DCE80 = $(addprefix $(AMDDALPATH)/dc/i2caux/dce80/,$(I2CAUX_DCE80)) - -AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCE80) - -############################################################################### -# DCE 100 family -############################################################################### -I2CAUX_DCE100 = i2caux_dce100.o - -AMD_DAL_I2CAUX_DCE100 = $(addprefix $(AMDDALPATH)/dc/i2caux/dce100/,$(I2CAUX_DCE100)) - -AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCE100) - -############################################################################### -# DCE 110 family -############################################################################### -I2CAUX_DCE110 = i2caux_dce110.o i2c_sw_engine_dce110.o i2c_hw_engine_dce110.o \ - aux_engine_dce110.o - -AMD_DAL_I2CAUX_DCE110 = $(addprefix $(AMDDALPATH)/dc/i2caux/dce110/,$(I2CAUX_DCE110)) - -AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCE110) - -############################################################################### -# DCE 112 family -############################################################################### -I2CAUX_DCE112 = i2caux_dce112.o - -AMD_DAL_I2CAUX_DCE112 = $(addprefix $(AMDDALPATH)/dc/i2caux/dce112/,$(I2CAUX_DCE112)) - -AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCE112) - -############################################################################### -# DCN 1.0 family -############################################################################### -ifdef CONFIG_DRM_AMD_DC_DCN1_0 -I2CAUX_DCN1 = i2caux_dcn10.o - -AMD_DAL_I2CAUX_DCN1 = $(addprefix $(AMDDALPATH)/dc/i2caux/dcn10/,$(I2CAUX_DCN1)) - -AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCN1) -endif - -############################################################################### -# DCE 120 family -############################################################################### -I2CAUX_DCE120 = i2caux_dce120.o - -AMD_DAL_I2CAUX_DCE120 = $(addprefix $(AMDDALPATH)/dc/i2caux/dce120/,$(I2CAUX_DCE120)) - -AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCE120) - -############################################################################### -# Diagnostics on FPGA -############################################################################### -I2CAUX_DIAG = i2caux_diag.o - -AMD_DAL_I2CAUX_DIAG = $(addprefix $(AMDDALPATH)/dc/i2caux/diagnostics/,$(I2CAUX_DIAG)) - -AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DIAG) - diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c deleted file mode 100644 index 8cbf38b2470d..000000000000 --- a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c +++ /dev/null @@ -1,606 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" -#include "dm_event_log.h" - -/* - * Pre-requisites: headers required by header of this unit - */ -#include "include/i2caux_interface.h" -#include "engine.h" - -/* - * Header of this unit - */ - -#include "aux_engine.h" - -/* - * Post-requisites: headers required by this unit - */ - -#include "include/link_service_types.h" - -/* - * This unit - */ - -enum { - AUX_INVALID_REPLY_RETRY_COUNTER = 1, - AUX_TIMED_OUT_RETRY_COUNTER = 2, - AUX_DEFER_RETRY_COUNTER = 6 -}; - -#define FROM_ENGINE(ptr) \ - container_of((ptr), struct aux_engine, base) -#define DC_LOGGER \ - engine->base.ctx->logger - -enum i2caux_engine_type dal_aux_engine_get_engine_type( - const struct engine *engine) -{ - return I2CAUX_ENGINE_TYPE_AUX; -} - -bool dal_aux_engine_acquire( - struct engine *engine, - struct ddc *ddc) -{ - struct aux_engine *aux_engine = FROM_ENGINE(engine); - - enum gpio_result result; - if (aux_engine->funcs->is_engine_available) { - /*check whether SW could use the engine*/ - if (!aux_engine->funcs->is_engine_available(aux_engine)) { - return false; - } - } - - result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE, - GPIO_DDC_CONFIG_TYPE_MODE_AUX); - - if (result != GPIO_RESULT_OK) - return false; - - if (!aux_engine->funcs->acquire_engine(aux_engine)) { - dal_ddc_close(ddc); - return false; - } - - engine->ddc = ddc; - - return true; -} - -struct read_command_context { - uint8_t *buffer; - uint32_t current_read_length; - uint32_t offset; - enum i2caux_transaction_status status; - - struct aux_request_transaction_data request; - struct aux_reply_transaction_data reply; - - uint8_t returned_byte; - - uint32_t timed_out_retry_aux; - uint32_t invalid_reply_retry_aux; - uint32_t defer_retry_aux; - uint32_t defer_retry_i2c; - uint32_t invalid_reply_retry_aux_on_ack; - - bool transaction_complete; - bool operation_succeeded; -}; - -static void process_read_reply( - struct aux_engine *engine, - struct read_command_context *ctx) -{ - engine->funcs->process_channel_reply(engine, &ctx->reply); - - switch (ctx->reply.status) { - case AUX_TRANSACTION_REPLY_AUX_ACK: - ctx->defer_retry_aux = 0; - if (ctx->returned_byte > ctx->current_read_length) { - ctx->status = - I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR; - ctx->operation_succeeded = false; - } else if (ctx->returned_byte < ctx->current_read_length) { - ctx->current_read_length -= ctx->returned_byte; - - ctx->offset += ctx->returned_byte; - - ++ctx->invalid_reply_retry_aux_on_ack; - - if (ctx->invalid_reply_retry_aux_on_ack > - AUX_INVALID_REPLY_RETRY_COUNTER) { - ctx->status = - I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR; - ctx->operation_succeeded = false; - } - } else { - ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED; - ctx->transaction_complete = true; - ctx->operation_succeeded = true; - } - break; - case AUX_TRANSACTION_REPLY_AUX_NACK: - ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_NACK; - ctx->operation_succeeded = false; - break; - case AUX_TRANSACTION_REPLY_AUX_DEFER: - ++ctx->defer_retry_aux; - - if (ctx->defer_retry_aux > AUX_DEFER_RETRY_COUNTER) { - ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT; - ctx->operation_succeeded = false; - } - break; - case AUX_TRANSACTION_REPLY_I2C_DEFER: - ctx->defer_retry_aux = 0; - - ++ctx->defer_retry_i2c; - - if (ctx->defer_retry_i2c > AUX_DEFER_RETRY_COUNTER) { - ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT; - ctx->operation_succeeded = false; - } - break; - case AUX_TRANSACTION_REPLY_HPD_DISCON: - ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON; - ctx->operation_succeeded = false; - break; - default: - ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN; - ctx->operation_succeeded = false; - } -} - -static void process_read_request( - struct aux_engine *engine, - struct read_command_context *ctx) -{ - enum aux_channel_operation_result operation_result; - - engine->funcs->submit_channel_request(engine, &ctx->request); - - operation_result = engine->funcs->get_channel_status( - engine, &ctx->returned_byte); - - switch (operation_result) { - case AUX_CHANNEL_OPERATION_SUCCEEDED: - if (ctx->returned_byte > ctx->current_read_length) { - ctx->status = - I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR; - ctx->operation_succeeded = false; - } else { - ctx->timed_out_retry_aux = 0; - ctx->invalid_reply_retry_aux = 0; - - ctx->reply.length = ctx->returned_byte; - ctx->reply.data = ctx->buffer; - - process_read_reply(engine, ctx); - } - break; - case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY: - ++ctx->invalid_reply_retry_aux; - - if (ctx->invalid_reply_retry_aux > - AUX_INVALID_REPLY_RETRY_COUNTER) { - ctx->status = - I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR; - ctx->operation_succeeded = false; - } else - udelay(400); - break; - case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT: - ++ctx->timed_out_retry_aux; - - if (ctx->timed_out_retry_aux > AUX_TIMED_OUT_RETRY_COUNTER) { - ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT; - ctx->operation_succeeded = false; - } else { - /* DP 1.2a, table 2-58: - * "S3: AUX Request CMD PENDING: - * retry 3 times, with 400usec wait on each" - * The HW timeout is set to 550usec, - * so we should not wait here */ - } - break; - case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON: - ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON; - ctx->operation_succeeded = false; - break; - default: - ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN; - ctx->operation_succeeded = false; - } -} - -static bool read_command( - struct aux_engine *engine, - struct i2caux_transaction_request *request, - bool middle_of_transaction) -{ - struct read_command_context ctx; - - ctx.buffer = request->payload.data; - ctx.current_read_length = request->payload.length; - ctx.offset = 0; - ctx.timed_out_retry_aux = 0; - ctx.invalid_reply_retry_aux = 0; - ctx.defer_retry_aux = 0; - ctx.defer_retry_i2c = 0; - ctx.invalid_reply_retry_aux_on_ack = 0; - ctx.transaction_complete = false; - ctx.operation_succeeded = true; - - if (request->payload.address_space == - I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) { - ctx.request.type = AUX_TRANSACTION_TYPE_DP; - ctx.request.action = I2CAUX_TRANSACTION_ACTION_DP_READ; - ctx.request.address = request->payload.address; - } else if (request->payload.address_space == - I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C) { - ctx.request.type = AUX_TRANSACTION_TYPE_I2C; - ctx.request.action = middle_of_transaction ? - I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT : - I2CAUX_TRANSACTION_ACTION_I2C_READ; - ctx.request.address = request->payload.address >> 1; - } else { - /* in DAL2, there was no return in such case */ - BREAK_TO_DEBUGGER(); - return false; - } - - ctx.request.delay = 0; - - do { - memset(ctx.buffer + ctx.offset, 0, ctx.current_read_length); - - ctx.request.data = ctx.buffer + ctx.offset; - ctx.request.length = ctx.current_read_length; - - process_read_request(engine, &ctx); - - request->status = ctx.status; - - if (ctx.operation_succeeded && !ctx.transaction_complete) - if (ctx.request.type == AUX_TRANSACTION_TYPE_I2C) - msleep(engine->delay); - } while (ctx.operation_succeeded && !ctx.transaction_complete); - - if (request->payload.address_space == - I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) { - DC_LOG_I2C_AUX("READ: addr:0x%x value:0x%x Result:%d", - request->payload.address, - request->payload.data[0], - ctx.operation_succeeded); - } - - return ctx.operation_succeeded; -} - -struct write_command_context { - bool mot; - - uint8_t *buffer; - uint32_t current_write_length; - enum i2caux_transaction_status status; - - struct aux_request_transaction_data request; - struct aux_reply_transaction_data reply; - - uint8_t returned_byte; - - uint32_t timed_out_retry_aux; - uint32_t invalid_reply_retry_aux; - uint32_t defer_retry_aux; - uint32_t defer_retry_i2c; - uint32_t max_defer_retry; - uint32_t ack_m_retry; - - uint8_t reply_data[DEFAULT_AUX_MAX_DATA_SIZE]; - - bool transaction_complete; - bool operation_succeeded; -}; - -static void process_write_reply( - struct aux_engine *engine, - struct write_command_context *ctx) -{ - engine->funcs->process_channel_reply(engine, &ctx->reply); - - switch (ctx->reply.status) { - case AUX_TRANSACTION_REPLY_AUX_ACK: - ctx->operation_succeeded = true; - - if (ctx->returned_byte) { - ctx->request.action = ctx->mot ? - I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT : - I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST; - - ctx->current_write_length = 0; - - ++ctx->ack_m_retry; - - if (ctx->ack_m_retry > AUX_DEFER_RETRY_COUNTER) { - ctx->status = - I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT; - ctx->operation_succeeded = false; - } else - udelay(300); - } else { - ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED; - ctx->defer_retry_aux = 0; - ctx->ack_m_retry = 0; - ctx->transaction_complete = true; - } - break; - case AUX_TRANSACTION_REPLY_AUX_NACK: - ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_NACK; - ctx->operation_succeeded = false; - break; - case AUX_TRANSACTION_REPLY_AUX_DEFER: - ++ctx->defer_retry_aux; - - if (ctx->defer_retry_aux > ctx->max_defer_retry) { - ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT; - ctx->operation_succeeded = false; - } - break; - case AUX_TRANSACTION_REPLY_I2C_DEFER: - ctx->defer_retry_aux = 0; - ctx->current_write_length = 0; - - ctx->request.action = ctx->mot ? - I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT : - I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST; - - ++ctx->defer_retry_i2c; - - if (ctx->defer_retry_i2c > ctx->max_defer_retry) { - ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT; - ctx->operation_succeeded = false; - } - break; - case AUX_TRANSACTION_REPLY_HPD_DISCON: - ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON; - ctx->operation_succeeded = false; - break; - default: - ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN; - ctx->operation_succeeded = false; - } -} - -static void process_write_request( - struct aux_engine *engine, - struct write_command_context *ctx) -{ - enum aux_channel_operation_result operation_result; - - engine->funcs->submit_channel_request(engine, &ctx->request); - - operation_result = engine->funcs->get_channel_status( - engine, &ctx->returned_byte); - - switch (operation_result) { - case AUX_CHANNEL_OPERATION_SUCCEEDED: - ctx->timed_out_retry_aux = 0; - ctx->invalid_reply_retry_aux = 0; - - ctx->reply.length = ctx->returned_byte; - ctx->reply.data = ctx->reply_data; - - process_write_reply(engine, ctx); - break; - case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY: - ++ctx->invalid_reply_retry_aux; - - if (ctx->invalid_reply_retry_aux > - AUX_INVALID_REPLY_RETRY_COUNTER) { - ctx->status = - I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR; - ctx->operation_succeeded = false; - } else - udelay(400); - break; - case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT: - ++ctx->timed_out_retry_aux; - - if (ctx->timed_out_retry_aux > AUX_TIMED_OUT_RETRY_COUNTER) { - ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT; - ctx->operation_succeeded = false; - } else { - /* DP 1.2a, table 2-58: - * "S3: AUX Request CMD PENDING: - * retry 3 times, with 400usec wait on each" - * The HW timeout is set to 550usec, - * so we should not wait here */ - } - break; - case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON: - ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON; - ctx->operation_succeeded = false; - break; - default: - ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN; - ctx->operation_succeeded = false; - } -} - -static bool write_command( - struct aux_engine *engine, - struct i2caux_transaction_request *request, - bool middle_of_transaction) -{ - struct write_command_context ctx; - - ctx.mot = middle_of_transaction; - ctx.buffer = request->payload.data; - ctx.current_write_length = request->payload.length; - ctx.timed_out_retry_aux = 0; - ctx.invalid_reply_retry_aux = 0; - ctx.defer_retry_aux = 0; - ctx.defer_retry_i2c = 0; - ctx.ack_m_retry = 0; - ctx.transaction_complete = false; - ctx.operation_succeeded = true; - - if (request->payload.address_space == - I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) { - ctx.request.type = AUX_TRANSACTION_TYPE_DP; - ctx.request.action = I2CAUX_TRANSACTION_ACTION_DP_WRITE; - ctx.request.address = request->payload.address; - } else if (request->payload.address_space == - I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C) { - ctx.request.type = AUX_TRANSACTION_TYPE_I2C; - ctx.request.action = middle_of_transaction ? - I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT : - I2CAUX_TRANSACTION_ACTION_I2C_WRITE; - ctx.request.address = request->payload.address >> 1; - } else { - /* in DAL2, there was no return in such case */ - BREAK_TO_DEBUGGER(); - return false; - } - - ctx.request.delay = 0; - - ctx.max_defer_retry = - (engine->max_defer_write_retry > AUX_DEFER_RETRY_COUNTER) ? - engine->max_defer_write_retry : AUX_DEFER_RETRY_COUNTER; - - do { - ctx.request.data = ctx.buffer; - ctx.request.length = ctx.current_write_length; - - process_write_request(engine, &ctx); - - request->status = ctx.status; - - if (ctx.operation_succeeded && !ctx.transaction_complete) - if (ctx.request.type == AUX_TRANSACTION_TYPE_I2C) - msleep(engine->delay); - } while (ctx.operation_succeeded && !ctx.transaction_complete); - - if (request->payload.address_space == - I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) { - DC_LOG_I2C_AUX("WRITE: addr:0x%x value:0x%x Result:%d", - request->payload.address, - request->payload.data[0], - ctx.operation_succeeded); - } - - return ctx.operation_succeeded; -} - -static bool end_of_transaction_command( - struct aux_engine *engine, - struct i2caux_transaction_request *request) -{ - struct i2caux_transaction_request dummy_request; - uint8_t dummy_data; - - /* [tcheng] We only need to send the stop (read with MOT = 0) - * for I2C-over-Aux, not native AUX */ - - if (request->payload.address_space != - I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C) - return false; - - dummy_request.operation = request->operation; - dummy_request.payload.address_space = request->payload.address_space; - dummy_request.payload.address = request->payload.address; - - /* - * Add a dummy byte due to some receiver quirk - * where one byte is sent along with MOT = 0. - * Ideally this should be 0. - */ - - dummy_request.payload.length = 0; - dummy_request.payload.data = &dummy_data; - - if (request->operation == I2CAUX_TRANSACTION_READ) - return read_command(engine, &dummy_request, false); - else - return write_command(engine, &dummy_request, false); - - /* according Syed, it does not need now DoDummyMOT */ -} - -bool dal_aux_engine_submit_request( - struct engine *engine, - struct i2caux_transaction_request *request, - bool middle_of_transaction) -{ - struct aux_engine *aux_engine = FROM_ENGINE(engine); - - bool result; - bool mot_used = true; - - switch (request->operation) { - case I2CAUX_TRANSACTION_READ: - result = read_command(aux_engine, request, mot_used); - break; - case I2CAUX_TRANSACTION_WRITE: - result = write_command(aux_engine, request, mot_used); - break; - default: - result = false; - } - - /* [tcheng] - * need to send stop for the last transaction to free up the AUX - * if the above command fails, this would be the last transaction */ - - if (!middle_of_transaction || !result) - end_of_transaction_command(aux_engine, request); - - /* mask AUX interrupt */ - - return result; -} - -void dal_aux_engine_construct( - struct aux_engine *engine, - struct dc_context *ctx) -{ - dal_i2caux_construct_engine(&engine->base, ctx); - engine->delay = 0; - engine->max_defer_write_retry = 0; -} - -void dal_aux_engine_destruct( - struct aux_engine *engine) -{ - dal_i2caux_destruct_engine(&engine->base); -} diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h deleted file mode 100644 index c33a2898d967..000000000000 --- a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DAL_AUX_ENGINE_H__ -#define __DAL_AUX_ENGINE_H__ - -#include "dc_ddc_types.h" - -struct aux_engine; - -struct aux_engine_funcs { - void (*destroy)( - struct aux_engine **ptr); - bool (*acquire_engine)( - struct aux_engine *engine); - void (*configure)( - struct aux_engine *engine, - union aux_config cfg); - void (*submit_channel_request)( - struct aux_engine *engine, - struct aux_request_transaction_data *request); - void (*process_channel_reply)( - struct aux_engine *engine, - struct aux_reply_transaction_data *reply); - int (*read_channel_reply)( - struct aux_engine *engine, - uint32_t size, - uint8_t *buffer, - uint8_t *reply_result, - uint32_t *sw_status); - enum aux_channel_operation_result (*get_channel_status)( - struct aux_engine *engine, - uint8_t *returned_bytes); - bool (*is_engine_available) ( - struct aux_engine *engine); -}; - -struct aux_engine { - struct engine base; - const struct aux_engine_funcs *funcs; - /* following values are expressed in milliseconds */ - uint32_t delay; - uint32_t max_defer_write_retry; - - bool acquire_reset; -}; - -void dal_aux_engine_construct( - struct aux_engine *engine, - struct dc_context *ctx); - -void dal_aux_engine_destruct( - struct aux_engine *engine); -bool dal_aux_engine_submit_request( - struct engine *ptr, - struct i2caux_transaction_request *request, - bool middle_of_transaction); -bool dal_aux_engine_acquire( - struct engine *ptr, - struct ddc *ddc); -enum i2caux_engine_type dal_aux_engine_get_engine_type( - const struct engine *engine); - -#endif diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c deleted file mode 100644 index 8b704ab0471c..000000000000 --- a/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" - -#include "include/i2caux_interface.h" -#include "../i2caux.h" -#include "../engine.h" -#include "../i2c_engine.h" -#include "../i2c_sw_engine.h" -#include "../i2c_hw_engine.h" - -#include "../dce110/aux_engine_dce110.h" -#include "../dce110/i2c_hw_engine_dce110.h" -#include "../dce110/i2caux_dce110.h" - -#include "dce/dce_10_0_d.h" -#include "dce/dce_10_0_sh_mask.h" - -/* set register offset */ -#define SR(reg_name)\ - .reg_name = mm ## reg_name - -/* set register offset with instance */ -#define SRI(reg_name, block, id)\ - .reg_name = mm ## block ## id ## _ ## reg_name - -#define aux_regs(id)\ -[id] = {\ - AUX_COMMON_REG_LIST(id), \ - .AUX_RESET_MASK = 0 \ -} - -#define hw_engine_regs(id)\ -{\ - I2C_HW_ENGINE_COMMON_REG_LIST(id) \ -} - -static const struct dce110_aux_registers dce100_aux_regs[] = { - aux_regs(0), - aux_regs(1), - aux_regs(2), - aux_regs(3), - aux_regs(4), - aux_regs(5), -}; - -static const struct dce110_i2c_hw_engine_registers dce100_hw_engine_regs[] = { - hw_engine_regs(1), - hw_engine_regs(2), - hw_engine_regs(3), - hw_engine_regs(4), - hw_engine_regs(5), - hw_engine_regs(6) -}; - -static const struct dce110_i2c_hw_engine_shift i2c_shift = { - I2C_COMMON_MASK_SH_LIST_DCE100(__SHIFT) -}; - -static const struct dce110_i2c_hw_engine_mask i2c_mask = { - I2C_COMMON_MASK_SH_LIST_DCE100(_MASK) -}; - -struct i2caux *dal_i2caux_dce100_create( - struct dc_context *ctx) -{ - struct i2caux_dce110 *i2caux_dce110 = - kzalloc(sizeof(struct i2caux_dce110), GFP_KERNEL); - - if (!i2caux_dce110) { - ASSERT_CRITICAL(false); - return NULL; - } - - dal_i2caux_dce110_construct(i2caux_dce110, - ctx, - ARRAY_SIZE(dce100_aux_regs), - dce100_aux_regs, - dce100_hw_engine_regs, - &i2c_shift, - &i2c_mask); - return &i2caux_dce110->base; -} diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.h deleted file mode 100644 index 2b508d3e0ef4..000000000000 --- a/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DAL_I2C_AUX_DCE100_H__ -#define __DAL_I2C_AUX_DCE100_H__ - -struct i2caux *dal_i2caux_dce100_create( - struct dc_context *ctx); - -#endif /* __DAL_I2C_AUX_DCE100_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c deleted file mode 100644 index 59c3ed43d609..000000000000 --- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c +++ /dev/null @@ -1,505 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" -#include "dm_event_log.h" - -/* - * Pre-requisites: headers required by header of this unit - */ -#include "include/i2caux_interface.h" -#include "../engine.h" -#include "../aux_engine.h" - -/* - * Header of this unit - */ - -#include "aux_engine_dce110.h" - -/* - * Post-requisites: headers required by this unit - */ -#include "dce/dce_11_0_sh_mask.h" - -#define CTX \ - aux110->base.base.ctx -#define REG(reg_name)\ - (aux110->regs->reg_name) -#include "reg_helper.h" - -/* - * This unit - */ - -/* - * @brief - * Cast 'struct aux_engine *' - * to 'struct aux_engine_dce110 *' - */ -#define FROM_AUX_ENGINE(ptr) \ - container_of((ptr), struct aux_engine_dce110, base) - -/* - * @brief - * Cast 'struct engine *' - * to 'struct aux_engine_dce110 *' - */ -#define FROM_ENGINE(ptr) \ - FROM_AUX_ENGINE(container_of((ptr), struct aux_engine, base)) - -static void release_engine( - struct engine *engine) -{ - struct aux_engine_dce110 *aux110 = FROM_ENGINE(engine); - - REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, 1); -} - -static void destruct( - struct aux_engine_dce110 *engine); - -static void destroy( - struct aux_engine **aux_engine) -{ - struct aux_engine_dce110 *engine = FROM_AUX_ENGINE(*aux_engine); - - destruct(engine); - - kfree(engine); - - *aux_engine = NULL; -} - -#define SW_CAN_ACCESS_AUX 1 -#define DMCU_CAN_ACCESS_AUX 2 - -static bool is_engine_available( - struct aux_engine *engine) -{ - struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine); - - uint32_t value = REG_READ(AUX_ARB_CONTROL); - uint32_t field = get_reg_field_value( - value, - AUX_ARB_CONTROL, - AUX_REG_RW_CNTL_STATUS); - - return (field != DMCU_CAN_ACCESS_AUX); -} -static bool acquire_engine( - struct aux_engine *engine) -{ - struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine); - - uint32_t value = REG_READ(AUX_ARB_CONTROL); - uint32_t field = get_reg_field_value( - value, - AUX_ARB_CONTROL, - AUX_REG_RW_CNTL_STATUS); - if (field == DMCU_CAN_ACCESS_AUX) - return false; - /* enable AUX before request SW to access AUX */ - value = REG_READ(AUX_CONTROL); - field = get_reg_field_value(value, - AUX_CONTROL, - AUX_EN); - - if (field == 0) { - set_reg_field_value( - value, - 1, - AUX_CONTROL, - AUX_EN); - - if (REG(AUX_RESET_MASK)) { - /*DP_AUX block as part of the enable sequence*/ - set_reg_field_value( - value, - 1, - AUX_CONTROL, - AUX_RESET); - } - - REG_WRITE(AUX_CONTROL, value); - - if (REG(AUX_RESET_MASK)) { - /*poll HW to make sure reset it done*/ - - REG_WAIT(AUX_CONTROL, AUX_RESET_DONE, 1, - 1, 11); - - set_reg_field_value( - value, - 0, - AUX_CONTROL, - AUX_RESET); - - REG_WRITE(AUX_CONTROL, value); - - REG_WAIT(AUX_CONTROL, AUX_RESET_DONE, 0, - 1, 11); - } - } /*if (field)*/ - - /* request SW to access AUX */ - REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, 1); - - value = REG_READ(AUX_ARB_CONTROL); - field = get_reg_field_value( - value, - AUX_ARB_CONTROL, - AUX_REG_RW_CNTL_STATUS); - - return (field == SW_CAN_ACCESS_AUX); -} - -#define COMPOSE_AUX_SW_DATA_16_20(command, address) \ - ((command) | ((0xF0000 & (address)) >> 16)) - -#define COMPOSE_AUX_SW_DATA_8_15(address) \ - ((0xFF00 & (address)) >> 8) - -#define COMPOSE_AUX_SW_DATA_0_7(address) \ - (0xFF & (address)) - -static void submit_channel_request( - struct aux_engine *engine, - struct aux_request_transaction_data *request) -{ - struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine); - uint32_t value; - uint32_t length; - - bool is_write = - ((request->type == AUX_TRANSACTION_TYPE_DP) && - (request->action == I2CAUX_TRANSACTION_ACTION_DP_WRITE)) || - ((request->type == AUX_TRANSACTION_TYPE_I2C) && - ((request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) || - (request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT))); - if (REG(AUXN_IMPCAL)) { - /* clear_aux_error */ - REG_UPDATE_SEQ(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, - 1, - 0); - - REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, - 1, - 0); - - /* force_default_calibrate */ - REG_UPDATE_1BY1_2(AUXN_IMPCAL, - AUXN_IMPCAL_ENABLE, 1, - AUXN_IMPCAL_OVERRIDE_ENABLE, 0); - - /* bug? why AUXN update EN and OVERRIDE_EN 1 by 1 while AUX P toggles OVERRIDE? */ - - REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, - 1, - 0); - } - /* set the delay and the number of bytes to write */ - - /* The length include - * the 4 bit header and the 20 bit address - * (that is 3 byte). - * If the requested length is non zero this means - * an addition byte specifying the length is required. */ - - length = request->length ? 4 : 3; - if (is_write) - length += request->length; - - REG_UPDATE_2(AUX_SW_CONTROL, - AUX_SW_START_DELAY, request->delay, - AUX_SW_WR_BYTES, length); - - /* program action and address and payload data (if 'is_write') */ - value = REG_UPDATE_4(AUX_SW_DATA, - AUX_SW_INDEX, 0, - AUX_SW_DATA_RW, 0, - AUX_SW_AUTOINCREMENT_DISABLE, 1, - AUX_SW_DATA, COMPOSE_AUX_SW_DATA_16_20(request->action, request->address)); - - value = REG_SET_2(AUX_SW_DATA, value, - AUX_SW_AUTOINCREMENT_DISABLE, 0, - AUX_SW_DATA, COMPOSE_AUX_SW_DATA_8_15(request->address)); - - value = REG_SET(AUX_SW_DATA, value, - AUX_SW_DATA, COMPOSE_AUX_SW_DATA_0_7(request->address)); - - if (request->length) { - value = REG_SET(AUX_SW_DATA, value, - AUX_SW_DATA, request->length - 1); - } - - if (is_write) { - /* Load the HW buffer with the Data to be sent. - * This is relevant for write operation. - * For read, the data recived data will be - * processed in process_channel_reply(). */ - uint32_t i = 0; - - while (i < request->length) { - value = REG_SET(AUX_SW_DATA, value, - AUX_SW_DATA, request->data[i]); - - ++i; - } - } - - REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1); - REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0, - 10, aux110->timeout_period/10); - REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1); - EVENT_LOG_AUX_REQ(engine->base.ddc->pin_data->en, EVENT_LOG_AUX_ORIGIN_NATIVE, - request->action, request->address, request->length, request->data); -} - -static int read_channel_reply(struct aux_engine *engine, uint32_t size, - uint8_t *buffer, uint8_t *reply_result, - uint32_t *sw_status) -{ - struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine); - uint32_t bytes_replied; - uint32_t reply_result_32; - - *sw_status = REG_GET(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, - &bytes_replied); - - /* In case HPD is LOW, exit AUX transaction */ - if ((*sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) - return -1; - - /* Need at least the status byte */ - if (!bytes_replied) - return -1; - - REG_UPDATE_1BY1_3(AUX_SW_DATA, - AUX_SW_INDEX, 0, - AUX_SW_AUTOINCREMENT_DISABLE, 1, - AUX_SW_DATA_RW, 1); - - REG_GET(AUX_SW_DATA, AUX_SW_DATA, &reply_result_32); - reply_result_32 = reply_result_32 >> 4; - *reply_result = (uint8_t)reply_result_32; - - if (reply_result_32 == 0) { /* ACK */ - uint32_t i = 0; - - /* First byte was already used to get the command status */ - --bytes_replied; - - /* Do not overflow buffer */ - if (bytes_replied > size) - return -1; - - while (i < bytes_replied) { - uint32_t aux_sw_data_val; - - REG_GET(AUX_SW_DATA, AUX_SW_DATA, &aux_sw_data_val); - buffer[i] = aux_sw_data_val; - ++i; - } - - return i; - } - - return 0; -} - -static void process_channel_reply( - struct aux_engine *engine, - struct aux_reply_transaction_data *reply) -{ - int bytes_replied; - uint8_t reply_result; - uint32_t sw_status; - - bytes_replied = read_channel_reply(engine, reply->length, reply->data, - &reply_result, &sw_status); - EVENT_LOG_AUX_REP(engine->base.ddc->pin_data->en, - EVENT_LOG_AUX_ORIGIN_NATIVE, reply_result, - bytes_replied, reply->data); - - /* in case HPD is LOW, exit AUX transaction */ - if ((sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) { - reply->status = AUX_TRANSACTION_REPLY_HPD_DISCON; - return; - } - - if (bytes_replied < 0) { - /* Need to handle an error case... - * Hopefully, upper layer function won't call this function if - * the number of bytes in the reply was 0, because there was - * surely an error that was asserted that should have been - * handled for hot plug case, this could happens - */ - if (!(sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) { - reply->status = AUX_TRANSACTION_REPLY_INVALID; - ASSERT_CRITICAL(false); - return; - } - } else { - - switch (reply_result) { - case 0: /* ACK */ - reply->status = AUX_TRANSACTION_REPLY_AUX_ACK; - break; - case 1: /* NACK */ - reply->status = AUX_TRANSACTION_REPLY_AUX_NACK; - break; - case 2: /* DEFER */ - reply->status = AUX_TRANSACTION_REPLY_AUX_DEFER; - break; - case 4: /* AUX ACK / I2C NACK */ - reply->status = AUX_TRANSACTION_REPLY_I2C_NACK; - break; - case 8: /* AUX ACK / I2C DEFER */ - reply->status = AUX_TRANSACTION_REPLY_I2C_DEFER; - break; - default: - reply->status = AUX_TRANSACTION_REPLY_INVALID; - } - } -} - -static enum aux_channel_operation_result get_channel_status( - struct aux_engine *engine, - uint8_t *returned_bytes) -{ - struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine); - - uint32_t value; - - if (returned_bytes == NULL) { - /*caller pass NULL pointer*/ - ASSERT_CRITICAL(false); - return AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN; - } - *returned_bytes = 0; - - /* poll to make sure that SW_DONE is asserted */ - value = REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1, - 10, aux110->timeout_period/10); - - /* in case HPD is LOW, exit AUX transaction */ - if ((value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) - return AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON; - - /* Note that the following bits are set in 'status.bits' - * during CTS 4.2.1.2 (FW 3.3.1): - * AUX_SW_RX_MIN_COUNT_VIOL, AUX_SW_RX_INVALID_STOP, - * AUX_SW_RX_RECV_NO_DET, AUX_SW_RX_RECV_INVALID_H. - * - * AUX_SW_RX_MIN_COUNT_VIOL is an internal, - * HW debugging bit and should be ignored. */ - if (value & AUX_SW_STATUS__AUX_SW_DONE_MASK) { - if ((value & AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK) || - (value & AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK)) - return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT; - - else if ((value & AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK) || - (value & AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK) || - (value & - AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK) || - (value & AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK)) - return AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY; - - *returned_bytes = get_reg_field_value(value, - AUX_SW_STATUS, - AUX_SW_REPLY_BYTE_COUNT); - - if (*returned_bytes == 0) - return - AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY; - else { - *returned_bytes -= 1; - return AUX_CHANNEL_OPERATION_SUCCEEDED; - } - } else { - /*time_elapsed >= aux_engine->timeout_period - * AUX_SW_STATUS__AUX_SW_HPD_DISCON = at this point - */ - ASSERT_CRITICAL(false); - return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT; - } -} - -static const struct aux_engine_funcs aux_engine_funcs = { - .destroy = destroy, - .acquire_engine = acquire_engine, - .submit_channel_request = submit_channel_request, - .process_channel_reply = process_channel_reply, - .read_channel_reply = read_channel_reply, - .get_channel_status = get_channel_status, - .is_engine_available = is_engine_available, -}; - -static const struct engine_funcs engine_funcs = { - .release_engine = release_engine, - .submit_request = dal_aux_engine_submit_request, - .get_engine_type = dal_aux_engine_get_engine_type, - .acquire = dal_aux_engine_acquire, -}; - -static void construct( - struct aux_engine_dce110 *engine, - const struct aux_engine_dce110_init_data *aux_init_data) -{ - dal_aux_engine_construct(&engine->base, aux_init_data->ctx); - engine->base.base.funcs = &engine_funcs; - engine->base.funcs = &aux_engine_funcs; - - engine->timeout_period = aux_init_data->timeout_period; - engine->regs = aux_init_data->regs; -} - -static void destruct( - struct aux_engine_dce110 *engine) -{ - dal_aux_engine_destruct(&engine->base); -} - -struct aux_engine *dal_aux_engine_dce110_create( - const struct aux_engine_dce110_init_data *aux_init_data) -{ - struct aux_engine_dce110 *engine; - - if (!aux_init_data) { - ASSERT_CRITICAL(false); - return NULL; - } - - engine = kzalloc(sizeof(*engine), GFP_KERNEL); - - if (!engine) { - ASSERT_CRITICAL(false); - return NULL; - } - - construct(engine, aux_init_data); - return &engine->base; -} diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.h deleted file mode 100644 index 85ee82162590..000000000000 --- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.h +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DAL_AUX_ENGINE_DCE110_H__ -#define __DAL_AUX_ENGINE_DCE110_H__ - -#include "../aux_engine.h" - -#define AUX_COMMON_REG_LIST(id)\ - SRI(AUX_CONTROL, DP_AUX, id), \ - SRI(AUX_ARB_CONTROL, DP_AUX, id), \ - SRI(AUX_SW_DATA, DP_AUX, id), \ - SRI(AUX_SW_CONTROL, DP_AUX, id), \ - SRI(AUX_INTERRUPT_CONTROL, DP_AUX, id), \ - SRI(AUX_SW_STATUS, DP_AUX, id), \ - SR(AUXN_IMPCAL), \ - SR(AUXP_IMPCAL) - -struct dce110_aux_registers { - uint32_t AUX_CONTROL; - uint32_t AUX_ARB_CONTROL; - uint32_t AUX_SW_DATA; - uint32_t AUX_SW_CONTROL; - uint32_t AUX_INTERRUPT_CONTROL; - uint32_t AUX_SW_STATUS; - uint32_t AUXN_IMPCAL; - uint32_t AUXP_IMPCAL; - - uint32_t AUX_RESET_MASK; -}; - -struct aux_engine_dce110 { - struct aux_engine base; - const struct dce110_aux_registers *regs; - struct { - uint32_t aux_control; - uint32_t aux_arb_control; - uint32_t aux_sw_data; - uint32_t aux_sw_control; - uint32_t aux_interrupt_control; - uint32_t aux_sw_status; - } addr; - uint32_t timeout_period; -}; - -struct aux_engine_dce110_init_data { - uint32_t engine_id; - uint32_t timeout_period; - struct dc_context *ctx; - const struct dce110_aux_registers *regs; -}; - -struct aux_engine *dal_aux_engine_dce110_create( - const struct aux_engine_dce110_init_data *aux_init_data); - -#endif diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c deleted file mode 100644 index 9cbe1a7a6bcb..000000000000 --- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c +++ /dev/null @@ -1,574 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" -#include "include/logger_interface.h" -/* - * Pre-requisites: headers required by header of this unit - */ - -#include "include/i2caux_interface.h" -#include "../engine.h" -#include "../i2c_engine.h" -#include "../i2c_hw_engine.h" -#include "../i2c_generic_hw_engine.h" -/* - * Header of this unit - */ - -#include "i2c_hw_engine_dce110.h" - -/* - * Post-requisites: headers required by this unit - */ -#include "reg_helper.h" - -/* - * This unit - */ -#define DC_LOGGER \ - hw_engine->base.base.base.ctx->logger - -enum dc_i2c_status { - DC_I2C_STATUS__DC_I2C_STATUS_IDLE, - DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW, - DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_HW -}; - -enum dc_i2c_arbitration { - DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_NORMAL, - DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_HIGH -}; - - - -/* - * @brief - * Cast pointer to 'struct i2c_hw_engine *' - * to pointer 'struct i2c_hw_engine_dce110 *' - */ -#define FROM_I2C_HW_ENGINE(ptr) \ - container_of((ptr), struct i2c_hw_engine_dce110, base) -/* - * @brief - * Cast pointer to 'struct i2c_engine *' - * to pointer to 'struct i2c_hw_engine_dce110 *' - */ -#define FROM_I2C_ENGINE(ptr) \ - FROM_I2C_HW_ENGINE(container_of((ptr), struct i2c_hw_engine, base)) - -/* - * @brief - * Cast pointer to 'struct engine *' - * to 'pointer to struct i2c_hw_engine_dce110 *' - */ -#define FROM_ENGINE(ptr) \ - FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base)) - -#define CTX \ - hw_engine->base.base.base.ctx - -#define REG(reg_name)\ - (hw_engine->regs->reg_name) - -#undef FN -#define FN(reg_name, field_name) \ - hw_engine->i2c_shift->field_name, hw_engine->i2c_mask->field_name - -#include "reg_helper.h" - -static void disable_i2c_hw_engine( - struct i2c_hw_engine_dce110 *hw_engine) -{ - REG_UPDATE_N(SETUP, 1, FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 0); -} - -static void release_engine( - struct engine *engine) -{ - struct i2c_hw_engine_dce110 *hw_engine = FROM_ENGINE(engine); - - struct i2c_engine *base = NULL; - bool safe_to_reset; - - base = &hw_engine->base.base; - - /* Restore original HW engine speed */ - - base->funcs->set_speed(base, hw_engine->base.original_speed); - - /* Release I2C */ - REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, 1); - - /* Reset HW engine */ - { - uint32_t i2c_sw_status = 0; - REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status); - /* if used by SW, safe to reset */ - safe_to_reset = (i2c_sw_status == 1); - } - - if (safe_to_reset) - REG_UPDATE_2( - DC_I2C_CONTROL, - DC_I2C_SOFT_RESET, 1, - DC_I2C_SW_STATUS_RESET, 1); - else - REG_UPDATE(DC_I2C_CONTROL, DC_I2C_SW_STATUS_RESET, 1); - - /* HW I2c engine - clock gating feature */ - if (!hw_engine->engine_keep_power_up_count) - disable_i2c_hw_engine(hw_engine); -} - -static bool setup_engine( - struct i2c_engine *i2c_engine) -{ - struct i2c_hw_engine_dce110 *hw_engine = FROM_I2C_ENGINE(i2c_engine); - uint32_t i2c_setup_limit = I2C_SETUP_TIME_LIMIT_DCE; - uint32_t reset_length = 0; - - if (hw_engine->base.base.setup_limit != 0) - i2c_setup_limit = hw_engine->base.base.setup_limit; - - /* Program pin select */ - REG_UPDATE_6( - DC_I2C_CONTROL, - DC_I2C_GO, 0, - DC_I2C_SOFT_RESET, 0, - DC_I2C_SEND_RESET, 0, - DC_I2C_SW_STATUS_RESET, 1, - DC_I2C_TRANSACTION_COUNT, 0, - DC_I2C_DDC_SELECT, hw_engine->engine_id); - - /* Program time limit */ - if (hw_engine->base.base.send_reset_length == 0) { - /*pre-dcn*/ - REG_UPDATE_N( - SETUP, 2, - FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), i2c_setup_limit, - FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1); - } else { - reset_length = hw_engine->base.base.send_reset_length; - } - /* Program HW priority - * set to High - interrupt software I2C at any time - * Enable restart of SW I2C that was interrupted by HW - * disable queuing of software while I2C is in use by HW */ - REG_UPDATE_2( - DC_I2C_ARBITRATION, - DC_I2C_NO_QUEUED_SW_GO, 0, - DC_I2C_SW_PRIORITY, DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_NORMAL); - - return true; -} - -static uint32_t get_speed( - const struct i2c_engine *i2c_engine) -{ - const struct i2c_hw_engine_dce110 *hw_engine = FROM_I2C_ENGINE(i2c_engine); - uint32_t pre_scale = 0; - - REG_GET(SPEED, DC_I2C_DDC1_PRESCALE, &pre_scale); - - /* [anaumov] it seems following is unnecessary */ - /*ASSERT(value.bits.DC_I2C_DDC1_PRESCALE);*/ - return pre_scale ? - hw_engine->reference_frequency / pre_scale : - hw_engine->base.default_speed; -} - -static void set_speed( - struct i2c_engine *i2c_engine, - uint32_t speed) -{ - struct i2c_hw_engine_dce110 *hw_engine = FROM_I2C_ENGINE(i2c_engine); - - if (speed) { - if (hw_engine->i2c_mask->DC_I2C_DDC1_START_STOP_TIMING_CNTL) - REG_UPDATE_N( - SPEED, 3, - FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), hw_engine->reference_frequency / speed, - FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2, - FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1); - else - REG_UPDATE_N( - SPEED, 2, - FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), hw_engine->reference_frequency / speed, - FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2); - } -} - -static inline void reset_hw_engine(struct engine *engine) -{ - struct i2c_hw_engine_dce110 *hw_engine = FROM_ENGINE(engine); - - REG_UPDATE_2( - DC_I2C_CONTROL, - DC_I2C_SW_STATUS_RESET, 1, - DC_I2C_SW_STATUS_RESET, 1); -} - -static bool is_hw_busy(struct engine *engine) -{ - struct i2c_hw_engine_dce110 *hw_engine = FROM_ENGINE(engine); - uint32_t i2c_sw_status = 0; - - REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status); - if (i2c_sw_status == DC_I2C_STATUS__DC_I2C_STATUS_IDLE) - return false; - - reset_hw_engine(engine); - - REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status); - return i2c_sw_status != DC_I2C_STATUS__DC_I2C_STATUS_IDLE; -} - - -#define STOP_TRANS_PREDICAT \ - ((hw_engine->transaction_count == 3) || \ - (request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) || \ - (request->action & I2CAUX_TRANSACTION_ACTION_I2C_READ)) - -#define SET_I2C_TRANSACTION(id) \ - do { \ - REG_UPDATE_N(DC_I2C_TRANSACTION##id, 5, \ - FN(DC_I2C_TRANSACTION0, DC_I2C_STOP_ON_NACK0), 1, \ - FN(DC_I2C_TRANSACTION0, DC_I2C_START0), 1, \ - FN(DC_I2C_TRANSACTION0, DC_I2C_STOP0), STOP_TRANS_PREDICAT ? 1:0, \ - FN(DC_I2C_TRANSACTION0, DC_I2C_RW0), (0 != (request->action & I2CAUX_TRANSACTION_ACTION_I2C_READ)), \ - FN(DC_I2C_TRANSACTION0, DC_I2C_COUNT0), length); \ - if (STOP_TRANS_PREDICAT) \ - last_transaction = true; \ - } while (false) - - -static bool process_transaction( - struct i2c_hw_engine_dce110 *hw_engine, - struct i2c_request_transaction_data *request) -{ - uint32_t length = request->length; - uint8_t *buffer = request->data; - uint32_t value = 0; - - bool last_transaction = false; - - struct dc_context *ctx = NULL; - - ctx = hw_engine->base.base.base.ctx; - - - - switch (hw_engine->transaction_count) { - case 0: - SET_I2C_TRANSACTION(0); - break; - case 1: - SET_I2C_TRANSACTION(1); - break; - case 2: - SET_I2C_TRANSACTION(2); - break; - case 3: - SET_I2C_TRANSACTION(3); - break; - default: - /* TODO Warning ? */ - break; - } - - - /* Write the I2C address and I2C data - * into the hardware circular buffer, one byte per entry. - * As an example, the 7-bit I2C slave address for CRT monitor - * for reading DDC/EDID information is 0b1010001. - * For an I2C send operation, the LSB must be programmed to 0; - * for I2C receive operation, the LSB must be programmed to 1. */ - if (hw_engine->transaction_count == 0) { - value = REG_SET_4(DC_I2C_DATA, 0, - DC_I2C_DATA_RW, false, - DC_I2C_DATA, request->address, - DC_I2C_INDEX, 0, - DC_I2C_INDEX_WRITE, 1); - hw_engine->buffer_used_write = 0; - } else - value = REG_SET_2(DC_I2C_DATA, 0, - DC_I2C_DATA_RW, false, - DC_I2C_DATA, request->address); - - hw_engine->buffer_used_write++; - - if (!(request->action & I2CAUX_TRANSACTION_ACTION_I2C_READ)) { - while (length) { - REG_SET_2(DC_I2C_DATA, value, - DC_I2C_INDEX_WRITE, 0, - DC_I2C_DATA, *buffer++); - hw_engine->buffer_used_write++; - --length; - } - } - - ++hw_engine->transaction_count; - hw_engine->buffer_used_bytes += length + 1; - - return last_transaction; -} - -static void execute_transaction( - struct i2c_hw_engine_dce110 *hw_engine) -{ - REG_UPDATE_N(SETUP, 5, - FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_EN), 0, - FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_CLK_DRIVE_EN), 0, - FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_SEL), 0, - FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_TRANSACTION_DELAY), 0, - FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_BYTE_DELAY), 0); - - - REG_UPDATE_5(DC_I2C_CONTROL, - DC_I2C_SOFT_RESET, 0, - DC_I2C_SW_STATUS_RESET, 0, - DC_I2C_SEND_RESET, 0, - DC_I2C_GO, 0, - DC_I2C_TRANSACTION_COUNT, hw_engine->transaction_count - 1); - - /* start I2C transfer */ - REG_UPDATE(DC_I2C_CONTROL, DC_I2C_GO, 1); - - /* all transactions were executed and HW buffer became empty - * (even though it actually happens when status becomes DONE) */ - hw_engine->transaction_count = 0; - hw_engine->buffer_used_bytes = 0; -} - -static void submit_channel_request( - struct i2c_engine *engine, - struct i2c_request_transaction_data *request) -{ - request->status = I2C_CHANNEL_OPERATION_SUCCEEDED; - - if (!process_transaction(FROM_I2C_ENGINE(engine), request)) - return; - - if (is_hw_busy(&engine->base)) { - request->status = I2C_CHANNEL_OPERATION_ENGINE_BUSY; - return; - } - - execute_transaction(FROM_I2C_ENGINE(engine)); -} - -static void process_channel_reply( - struct i2c_engine *engine, - struct i2c_reply_transaction_data *reply) -{ - uint32_t length = reply->length; - uint8_t *buffer = reply->data; - - struct i2c_hw_engine_dce110 *hw_engine = - FROM_I2C_ENGINE(engine); - - - REG_SET_3(DC_I2C_DATA, 0, - DC_I2C_INDEX, hw_engine->buffer_used_write, - DC_I2C_DATA_RW, 1, - DC_I2C_INDEX_WRITE, 1); - - while (length) { - /* after reading the status, - * if the I2C operation executed successfully - * (i.e. DC_I2C_STATUS_DONE = 1) then the I2C controller - * should read data bytes from I2C circular data buffer */ - - uint32_t i2c_data; - - REG_GET(DC_I2C_DATA, DC_I2C_DATA, &i2c_data); - *buffer++ = i2c_data; - - --length; - } -} - -static enum i2c_channel_operation_result get_channel_status( - struct i2c_engine *i2c_engine, - uint8_t *returned_bytes) -{ - uint32_t i2c_sw_status = 0; - struct i2c_hw_engine_dce110 *hw_engine = FROM_I2C_ENGINE(i2c_engine); - uint32_t value = - REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status); - - if (i2c_sw_status == DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW) - return I2C_CHANNEL_OPERATION_ENGINE_BUSY; - else if (value & hw_engine->i2c_mask->DC_I2C_SW_STOPPED_ON_NACK) - return I2C_CHANNEL_OPERATION_NO_RESPONSE; - else if (value & hw_engine->i2c_mask->DC_I2C_SW_TIMEOUT) - return I2C_CHANNEL_OPERATION_TIMEOUT; - else if (value & hw_engine->i2c_mask->DC_I2C_SW_ABORTED) - return I2C_CHANNEL_OPERATION_FAILED; - else if (value & hw_engine->i2c_mask->DC_I2C_SW_DONE) - return I2C_CHANNEL_OPERATION_SUCCEEDED; - - /* - * this is the case when HW used for communication, I2C_SW_STATUS - * could be zero - */ - return I2C_CHANNEL_OPERATION_SUCCEEDED; -} - -static uint32_t get_hw_buffer_available_size( - const struct i2c_hw_engine *engine) -{ - return I2C_HW_BUFFER_SIZE - - FROM_I2C_HW_ENGINE(engine)->buffer_used_bytes; -} - -static uint32_t get_transaction_timeout( - const struct i2c_hw_engine *engine, - uint32_t length) -{ - uint32_t speed = engine->base.funcs->get_speed(&engine->base); - - uint32_t period_timeout; - uint32_t num_of_clock_stretches; - - if (!speed) - return 0; - - period_timeout = (1000 * TRANSACTION_TIMEOUT_IN_I2C_CLOCKS) / speed; - - num_of_clock_stretches = 1 + (length << 3) + 1; - num_of_clock_stretches += - (FROM_I2C_HW_ENGINE(engine)->buffer_used_bytes << 3) + - (FROM_I2C_HW_ENGINE(engine)->transaction_count << 1); - - return period_timeout * num_of_clock_stretches; -} - -static void destroy( - struct i2c_engine **i2c_engine) -{ - struct i2c_hw_engine_dce110 *engine_dce110 = - FROM_I2C_ENGINE(*i2c_engine); - - dal_i2c_hw_engine_destruct(&engine_dce110->base); - - kfree(engine_dce110); - - *i2c_engine = NULL; -} - -static const struct i2c_engine_funcs i2c_engine_funcs = { - .destroy = destroy, - .get_speed = get_speed, - .set_speed = set_speed, - .setup_engine = setup_engine, - .submit_channel_request = submit_channel_request, - .process_channel_reply = process_channel_reply, - .get_channel_status = get_channel_status, - .acquire_engine = dal_i2c_hw_engine_acquire_engine, -}; - -static const struct engine_funcs engine_funcs = { - .release_engine = release_engine, - .get_engine_type = dal_i2c_hw_engine_get_engine_type, - .acquire = dal_i2c_engine_acquire, - .submit_request = dal_i2c_hw_engine_submit_request, -}; - -static const struct i2c_hw_engine_funcs i2c_hw_engine_funcs = { - .get_hw_buffer_available_size = get_hw_buffer_available_size, - .get_transaction_timeout = get_transaction_timeout, - .wait_on_operation_result = dal_i2c_hw_engine_wait_on_operation_result, -}; - -static void construct( - struct i2c_hw_engine_dce110 *hw_engine, - const struct i2c_hw_engine_dce110_create_arg *arg) -{ - uint32_t xtal_ref_div = 0; - - dal_i2c_hw_engine_construct(&hw_engine->base, arg->ctx); - - hw_engine->base.base.base.funcs = &engine_funcs; - hw_engine->base.base.funcs = &i2c_engine_funcs; - hw_engine->base.funcs = &i2c_hw_engine_funcs; - hw_engine->base.default_speed = arg->default_speed; - - hw_engine->regs = arg->regs; - hw_engine->i2c_shift = arg->i2c_shift; - hw_engine->i2c_mask = arg->i2c_mask; - - hw_engine->engine_id = arg->engine_id; - - hw_engine->buffer_used_bytes = 0; - hw_engine->transaction_count = 0; - hw_engine->engine_keep_power_up_count = 1; - - - REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div); - - if (xtal_ref_div == 0) { - DC_LOG_WARNING("Invalid base timer divider [%s]\n", - __func__); - xtal_ref_div = 2; - } - - /*Calculating Reference Clock by divding original frequency by - * XTAL_REF_DIV. - * At upper level, uint32_t reference_frequency = - * dal_i2caux_get_reference_clock(as) >> 1 - * which already divided by 2. So we need x2 to get original - * reference clock from ppll_info - */ - hw_engine->reference_frequency = - (arg->reference_frequency * 2) / xtal_ref_div; -} - -struct i2c_engine *dal_i2c_hw_engine_dce110_create( - const struct i2c_hw_engine_dce110_create_arg *arg) -{ - struct i2c_hw_engine_dce110 *engine_dce10; - - if (!arg) { - ASSERT_CRITICAL(false); - return NULL; - } - if (!arg->reference_frequency) { - ASSERT_CRITICAL(false); - return NULL; - } - - engine_dce10 = kzalloc(sizeof(struct i2c_hw_engine_dce110), - GFP_KERNEL); - - if (!engine_dce10) { - ASSERT_CRITICAL(false); - return NULL; - } - - construct(engine_dce10, arg); - return &engine_dce10->base.base; -} diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h deleted file mode 100644 index fea2946906ed..000000000000 --- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h +++ /dev/null @@ -1,218 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DAL_I2C_HW_ENGINE_DCE110_H__ -#define __DAL_I2C_HW_ENGINE_DCE110_H__ - -#define I2C_HW_ENGINE_COMMON_REG_LIST(id)\ - SRI(SETUP, DC_I2C_DDC, id),\ - SRI(SPEED, DC_I2C_DDC, id),\ - SR(DC_I2C_ARBITRATION),\ - SR(DC_I2C_CONTROL),\ - SR(DC_I2C_SW_STATUS),\ - SR(DC_I2C_TRANSACTION0),\ - SR(DC_I2C_TRANSACTION1),\ - SR(DC_I2C_TRANSACTION2),\ - SR(DC_I2C_TRANSACTION3),\ - SR(DC_I2C_DATA),\ - SR(MICROSECOND_TIME_BASE_DIV) - -#define I2C_SF(reg_name, field_name, post_fix)\ - .field_name = reg_name ## __ ## field_name ## post_fix - -#define I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh)\ - I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE, mask_sh),\ - I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT, mask_sh),\ - I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_EN, mask_sh),\ - I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_CLK_DRIVE_EN, mask_sh),\ - I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_SEL, mask_sh),\ - I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_TRANSACTION_DELAY, mask_sh),\ - I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_BYTE_DELAY, mask_sh),\ - I2C_SF(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, mask_sh),\ - I2C_SF(DC_I2C_ARBITRATION, DC_I2C_NO_QUEUED_SW_GO, mask_sh),\ - I2C_SF(DC_I2C_ARBITRATION, DC_I2C_SW_PRIORITY, mask_sh),\ - I2C_SF(DC_I2C_CONTROL, DC_I2C_SOFT_RESET, mask_sh),\ - I2C_SF(DC_I2C_CONTROL, DC_I2C_SW_STATUS_RESET, mask_sh),\ - I2C_SF(DC_I2C_CONTROL, DC_I2C_GO, mask_sh),\ - I2C_SF(DC_I2C_CONTROL, DC_I2C_SEND_RESET, mask_sh),\ - I2C_SF(DC_I2C_CONTROL, DC_I2C_TRANSACTION_COUNT, mask_sh),\ - I2C_SF(DC_I2C_CONTROL, DC_I2C_DDC_SELECT, mask_sh),\ - I2C_SF(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE, mask_sh),\ - I2C_SF(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD, mask_sh),\ - I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_STOPPED_ON_NACK, mask_sh),\ - I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_TIMEOUT, mask_sh),\ - I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_ABORTED, mask_sh),\ - I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_DONE, mask_sh),\ - I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, mask_sh),\ - I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_STOP_ON_NACK0, mask_sh),\ - I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_START0, mask_sh),\ - I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_RW0, mask_sh),\ - I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_STOP0, mask_sh),\ - I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_COUNT0, mask_sh),\ - I2C_SF(DC_I2C_DATA, DC_I2C_DATA_RW, mask_sh),\ - I2C_SF(DC_I2C_DATA, DC_I2C_DATA, mask_sh),\ - I2C_SF(DC_I2C_DATA, DC_I2C_INDEX, mask_sh),\ - I2C_SF(DC_I2C_DATA, DC_I2C_INDEX_WRITE, mask_sh),\ - I2C_SF(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, mask_sh) - -#define I2C_COMMON_MASK_SH_LIST_DCE100(mask_sh)\ - I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) - -#define I2C_COMMON_MASK_SH_LIST_DCE110(mask_sh)\ - I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh),\ - I2C_SF(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL, mask_sh) - -struct dce110_i2c_hw_engine_shift { - uint8_t DC_I2C_DDC1_ENABLE; - uint8_t DC_I2C_DDC1_TIME_LIMIT; - uint8_t DC_I2C_DDC1_DATA_DRIVE_EN; - uint8_t DC_I2C_DDC1_CLK_DRIVE_EN; - uint8_t DC_I2C_DDC1_DATA_DRIVE_SEL; - uint8_t DC_I2C_DDC1_INTRA_TRANSACTION_DELAY; - uint8_t DC_I2C_DDC1_INTRA_BYTE_DELAY; - uint8_t DC_I2C_SW_DONE_USING_I2C_REG; - uint8_t DC_I2C_NO_QUEUED_SW_GO; - uint8_t DC_I2C_SW_PRIORITY; - uint8_t DC_I2C_SOFT_RESET; - uint8_t DC_I2C_SW_STATUS_RESET; - uint8_t DC_I2C_GO; - uint8_t DC_I2C_SEND_RESET; - uint8_t DC_I2C_TRANSACTION_COUNT; - uint8_t DC_I2C_DDC_SELECT; - uint8_t DC_I2C_DDC1_PRESCALE; - uint8_t DC_I2C_DDC1_THRESHOLD; - uint8_t DC_I2C_DDC1_START_STOP_TIMING_CNTL; - uint8_t DC_I2C_SW_STOPPED_ON_NACK; - uint8_t DC_I2C_SW_TIMEOUT; - uint8_t DC_I2C_SW_ABORTED; - uint8_t DC_I2C_SW_DONE; - uint8_t DC_I2C_SW_STATUS; - uint8_t DC_I2C_STOP_ON_NACK0; - uint8_t DC_I2C_START0; - uint8_t DC_I2C_RW0; - uint8_t DC_I2C_STOP0; - uint8_t DC_I2C_COUNT0; - uint8_t DC_I2C_DATA_RW; - uint8_t DC_I2C_DATA; - uint8_t DC_I2C_INDEX; - uint8_t DC_I2C_INDEX_WRITE; - uint8_t XTAL_REF_DIV; -}; - -struct dce110_i2c_hw_engine_mask { - uint32_t DC_I2C_DDC1_ENABLE; - uint32_t DC_I2C_DDC1_TIME_LIMIT; - uint32_t DC_I2C_DDC1_DATA_DRIVE_EN; - uint32_t DC_I2C_DDC1_CLK_DRIVE_EN; - uint32_t DC_I2C_DDC1_DATA_DRIVE_SEL; - uint32_t DC_I2C_DDC1_INTRA_TRANSACTION_DELAY; - uint32_t DC_I2C_DDC1_INTRA_BYTE_DELAY; - uint32_t DC_I2C_SW_DONE_USING_I2C_REG; - uint32_t DC_I2C_NO_QUEUED_SW_GO; - uint32_t DC_I2C_SW_PRIORITY; - uint32_t DC_I2C_SOFT_RESET; - uint32_t DC_I2C_SW_STATUS_RESET; - uint32_t DC_I2C_GO; - uint32_t DC_I2C_SEND_RESET; - uint32_t DC_I2C_TRANSACTION_COUNT; - uint32_t DC_I2C_DDC_SELECT; - uint32_t DC_I2C_DDC1_PRESCALE; - uint32_t DC_I2C_DDC1_THRESHOLD; - uint32_t DC_I2C_DDC1_START_STOP_TIMING_CNTL; - uint32_t DC_I2C_SW_STOPPED_ON_NACK; - uint32_t DC_I2C_SW_TIMEOUT; - uint32_t DC_I2C_SW_ABORTED; - uint32_t DC_I2C_SW_DONE; - uint32_t DC_I2C_SW_STATUS; - uint32_t DC_I2C_STOP_ON_NACK0; - uint32_t DC_I2C_START0; - uint32_t DC_I2C_RW0; - uint32_t DC_I2C_STOP0; - uint32_t DC_I2C_COUNT0; - uint32_t DC_I2C_DATA_RW; - uint32_t DC_I2C_DATA; - uint32_t DC_I2C_INDEX; - uint32_t DC_I2C_INDEX_WRITE; - uint32_t XTAL_REF_DIV; -}; - -struct dce110_i2c_hw_engine_registers { - uint32_t SETUP; - uint32_t SPEED; - uint32_t DC_I2C_ARBITRATION; - uint32_t DC_I2C_CONTROL; - uint32_t DC_I2C_SW_STATUS; - uint32_t DC_I2C_TRANSACTION0; - uint32_t DC_I2C_TRANSACTION1; - uint32_t DC_I2C_TRANSACTION2; - uint32_t DC_I2C_TRANSACTION3; - uint32_t DC_I2C_DATA; - uint32_t MICROSECOND_TIME_BASE_DIV; -}; - -struct i2c_hw_engine_dce110 { - struct i2c_hw_engine base; - const struct dce110_i2c_hw_engine_registers *regs; - const struct dce110_i2c_hw_engine_shift *i2c_shift; - const struct dce110_i2c_hw_engine_mask *i2c_mask; - struct { - uint32_t DC_I2C_DDCX_SETUP; - uint32_t DC_I2C_DDCX_SPEED; - } addr; - uint32_t engine_id; - /* expressed in kilohertz */ - uint32_t reference_frequency; - /* number of bytes currently used in HW buffer */ - uint32_t buffer_used_bytes; - /* number of bytes used for write transaction in HW buffer - * - this will be used as the index to read from*/ - uint32_t buffer_used_write; - /* number of pending transactions (before GO) */ - uint32_t transaction_count; - uint32_t engine_keep_power_up_count; - uint32_t i2_setup_time_limit; -}; - -struct i2c_hw_engine_dce110_create_arg { - uint32_t engine_id; - uint32_t reference_frequency; - uint32_t default_speed; - struct dc_context *ctx; - const struct dce110_i2c_hw_engine_registers *regs; - const struct dce110_i2c_hw_engine_shift *i2c_shift; - const struct dce110_i2c_hw_engine_mask *i2c_mask; -}; - -struct i2c_engine *dal_i2c_hw_engine_dce110_create( - const struct i2c_hw_engine_dce110_create_arg *arg); - -enum { - I2C_SETUP_TIME_LIMIT_DCE = 255, - I2C_SETUP_TIME_LIMIT_DCN = 3, - I2C_HW_BUFFER_SIZE = 538, - I2C_SEND_RESET_LENGTH_9 = 9, - I2C_SEND_RESET_LENGTH_10 = 10, -}; -#endif diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.c deleted file mode 100644 index 3aa7f791e523..000000000000 --- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.c +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" - -/* - * Pre-requisites: headers required by header of this unit - */ -#include "include/i2caux_interface.h" -#include "../engine.h" -#include "../i2c_engine.h" -#include "../i2c_sw_engine.h" - -/* - * Header of this unit - */ - -#include "i2c_sw_engine_dce110.h" - -/* - * Post-requisites: headers required by this unit - */ - -/* - * This unit - */ - -/* - * @brief - * Cast 'struct i2c_sw_engine *' - * to 'struct i2c_sw_engine_dce110 *' - */ -#define FROM_I2C_SW_ENGINE(ptr) \ - container_of((ptr), struct i2c_sw_engine_dce110, base) -/* - * @brief - * Cast 'struct i2c_engine *' - * to 'struct i2c_sw_engine_dce80 *' - */ -#define FROM_I2C_ENGINE(ptr) \ - FROM_I2C_SW_ENGINE(container_of((ptr), struct i2c_sw_engine, base)) - -/* - * @brief - * Cast 'struct engine *' - * to 'struct i2c_sw_engine_dce80 *' - */ -#define FROM_ENGINE(ptr) \ - FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base)) - -static void release_engine( - struct engine *engine) -{ -} - -static void destruct( - struct i2c_sw_engine_dce110 *engine) -{ - dal_i2c_sw_engine_destruct(&engine->base); -} - -static void destroy( - struct i2c_engine **engine) -{ - struct i2c_sw_engine_dce110 *sw_engine = FROM_I2C_ENGINE(*engine); - - destruct(sw_engine); - - kfree(sw_engine); - - *engine = NULL; -} - -static bool acquire_engine( - struct i2c_engine *engine, - struct ddc *ddc_handle) -{ - return dal_i2caux_i2c_sw_engine_acquire_engine(engine, ddc_handle); -} - -static const struct i2c_engine_funcs i2c_engine_funcs = { - .acquire_engine = acquire_engine, - .destroy = destroy, - .get_speed = dal_i2c_sw_engine_get_speed, - .set_speed = dal_i2c_sw_engine_set_speed, - .setup_engine = dal_i2c_engine_setup_i2c_engine, - .submit_channel_request = dal_i2c_sw_engine_submit_channel_request, - .process_channel_reply = dal_i2c_engine_process_channel_reply, - .get_channel_status = dal_i2c_sw_engine_get_channel_status, -}; - -static const struct engine_funcs engine_funcs = { - .release_engine = release_engine, - .get_engine_type = dal_i2c_sw_engine_get_engine_type, - .acquire = dal_i2c_engine_acquire, - .submit_request = dal_i2c_sw_engine_submit_request, -}; - -static void construct( - struct i2c_sw_engine_dce110 *engine_dce110, - const struct i2c_sw_engine_dce110_create_arg *arg_dce110) -{ - struct i2c_sw_engine_create_arg arg_base; - - arg_base.ctx = arg_dce110->ctx; - arg_base.default_speed = arg_dce110->default_speed; - - dal_i2c_sw_engine_construct(&engine_dce110->base, &arg_base); - - /*struct engine struct engine_funcs*/ - engine_dce110->base.base.base.funcs = &engine_funcs; - /*struct i2c_engine struct i2c_engine_funcs*/ - engine_dce110->base.base.funcs = &i2c_engine_funcs; - engine_dce110->base.default_speed = arg_dce110->default_speed; - engine_dce110->engine_id = arg_dce110->engine_id; -} - -struct i2c_engine *dal_i2c_sw_engine_dce110_create( - const struct i2c_sw_engine_dce110_create_arg *arg) -{ - struct i2c_sw_engine_dce110 *engine_dce110; - - if (!arg) { - ASSERT_CRITICAL(false); - return NULL; - } - - engine_dce110 = kzalloc(sizeof(struct i2c_sw_engine_dce110), - GFP_KERNEL); - - if (!engine_dce110) { - ASSERT_CRITICAL(false); - return NULL; - } - - construct(engine_dce110, arg); - return &engine_dce110->base.base; -} diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c deleted file mode 100644 index 1d748ac1d6d6..000000000000 --- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c +++ /dev/null @@ -1,329 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" - -/* - * Pre-requisites: headers required by header of this unit - */ -#include "include/i2caux_interface.h" -#include "../i2caux.h" -#include "../engine.h" -#include "../i2c_engine.h" -#include "../i2c_sw_engine.h" -#include "../i2c_hw_engine.h" - -/* - * Header of this unit - */ -#include "i2caux_dce110.h" - -#include "i2c_sw_engine_dce110.h" -#include "i2c_hw_engine_dce110.h" -#include "aux_engine_dce110.h" -#include "../../dc.h" -#include "dc_types.h" - - -/* - * Post-requisites: headers required by this unit - */ - -/* - * This unit - */ -/*cast pointer to struct i2caux TO pointer to struct i2caux_dce110*/ -#define FROM_I2C_AUX(ptr) \ - container_of((ptr), struct i2caux_dce110, base) - -static void destruct( - struct i2caux_dce110 *i2caux_dce110) -{ - dal_i2caux_destruct(&i2caux_dce110->base); -} - -static void destroy( - struct i2caux **i2c_engine) -{ - struct i2caux_dce110 *i2caux_dce110 = FROM_I2C_AUX(*i2c_engine); - - destruct(i2caux_dce110); - - kfree(i2caux_dce110); - - *i2c_engine = NULL; -} - -static struct i2c_engine *acquire_i2c_hw_engine( - struct i2caux *i2caux, - struct ddc *ddc) -{ - struct i2caux_dce110 *i2caux_dce110 = FROM_I2C_AUX(i2caux); - - struct i2c_engine *engine = NULL; - /* generic hw engine is not used for EDID read - * It may be needed for external i2c device, like thermal chip, - * TODO will be implemented when needed. - * check dce80 bool non_generic for generic hw engine; - */ - - if (!ddc) - return NULL; - - if (ddc->hw_info.hw_supported) { - enum gpio_ddc_line line = dal_ddc_get_line(ddc); - - if (line < GPIO_DDC_LINE_COUNT) - engine = i2caux->i2c_hw_engines[line]; - } - - if (!engine) - return NULL; - - if (!i2caux_dce110->i2c_hw_buffer_in_use && - engine->base.funcs->acquire(&engine->base, ddc)) { - i2caux_dce110->i2c_hw_buffer_in_use = true; - return engine; - } - - return NULL; -} - -static void release_engine( - struct i2caux *i2caux, - struct engine *engine) -{ - struct i2caux_dce110 *i2caux_dce110 = FROM_I2C_AUX(i2caux); - - if (engine->funcs->get_engine_type(engine) == - I2CAUX_ENGINE_TYPE_I2C_DDC_HW) - i2caux_dce110->i2c_hw_buffer_in_use = false; - - dal_i2caux_release_engine(i2caux, engine); -} - -static const enum gpio_ddc_line hw_ddc_lines[] = { - GPIO_DDC_LINE_DDC1, - GPIO_DDC_LINE_DDC2, - GPIO_DDC_LINE_DDC3, - GPIO_DDC_LINE_DDC4, - GPIO_DDC_LINE_DDC5, - GPIO_DDC_LINE_DDC6, -}; - -static const enum gpio_ddc_line hw_aux_lines[] = { - GPIO_DDC_LINE_DDC1, - GPIO_DDC_LINE_DDC2, - GPIO_DDC_LINE_DDC3, - GPIO_DDC_LINE_DDC4, - GPIO_DDC_LINE_DDC5, - GPIO_DDC_LINE_DDC6, -}; - -/* function table */ -static const struct i2caux_funcs i2caux_funcs = { - .destroy = destroy, - .acquire_i2c_hw_engine = acquire_i2c_hw_engine, - .release_engine = release_engine, - .acquire_i2c_sw_engine = dal_i2caux_acquire_i2c_sw_engine, - .acquire_aux_engine = dal_i2caux_acquire_aux_engine, -}; - -#include "dce/dce_11_0_d.h" -#include "dce/dce_11_0_sh_mask.h" - -/* set register offset */ -#define SR(reg_name)\ - .reg_name = mm ## reg_name - -/* set register offset with instance */ -#define SRI(reg_name, block, id)\ - .reg_name = mm ## block ## id ## _ ## reg_name - -#define aux_regs(id)\ -[id] = {\ - AUX_COMMON_REG_LIST(id), \ - .AUX_RESET_MASK = AUX_CONTROL__AUX_RESET_MASK \ -} - -#define hw_engine_regs(id)\ -{\ - I2C_HW_ENGINE_COMMON_REG_LIST(id) \ -} - -static const struct dce110_aux_registers dce110_aux_regs[] = { - aux_regs(0), - aux_regs(1), - aux_regs(2), - aux_regs(3), - aux_regs(4), - aux_regs(5) -}; - -static const struct dce110_i2c_hw_engine_registers i2c_hw_engine_regs[] = { - hw_engine_regs(1), - hw_engine_regs(2), - hw_engine_regs(3), - hw_engine_regs(4), - hw_engine_regs(5), - hw_engine_regs(6) -}; - -static const struct dce110_i2c_hw_engine_shift i2c_shift = { - I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT) -}; - -static const struct dce110_i2c_hw_engine_mask i2c_mask = { - I2C_COMMON_MASK_SH_LIST_DCE110(_MASK) -}; - -void dal_i2caux_dce110_construct( - struct i2caux_dce110 *i2caux_dce110, - struct dc_context *ctx, - unsigned int num_i2caux_inst, - const struct dce110_aux_registers aux_regs[], - const struct dce110_i2c_hw_engine_registers i2c_hw_engine_regs[], - const struct dce110_i2c_hw_engine_shift *i2c_shift, - const struct dce110_i2c_hw_engine_mask *i2c_mask) -{ - uint32_t i = 0; - uint32_t reference_frequency = 0; - bool use_i2c_sw_engine = false; - struct i2caux *base = NULL; - /*TODO: For CZ bring up, if dal_i2caux_get_reference_clock - * does not return 48KHz, we need hard coded for 48Khz. - * Some BIOS setting incorrect cause this - * For production, we always get value from BIOS*/ - reference_frequency = - dal_i2caux_get_reference_clock(ctx->dc_bios) >> 1; - - base = &i2caux_dce110->base; - - dal_i2caux_construct(base, ctx); - - i2caux_dce110->base.funcs = &i2caux_funcs; - i2caux_dce110->i2c_hw_buffer_in_use = false; - /* Create I2C engines (DDC lines per connector) - * different I2C/AUX usage cases, DDC, Generic GPIO, AUX. - */ - do { - enum gpio_ddc_line line_id = hw_ddc_lines[i]; - - struct i2c_hw_engine_dce110_create_arg hw_arg_dce110; - - if (use_i2c_sw_engine) { - struct i2c_sw_engine_dce110_create_arg sw_arg; - - sw_arg.engine_id = i; - sw_arg.default_speed = base->default_i2c_sw_speed; - sw_arg.ctx = ctx; - base->i2c_sw_engines[line_id] = - dal_i2c_sw_engine_dce110_create(&sw_arg); - } - - hw_arg_dce110.engine_id = i; - hw_arg_dce110.reference_frequency = reference_frequency; - hw_arg_dce110.default_speed = base->default_i2c_hw_speed; - hw_arg_dce110.ctx = ctx; - hw_arg_dce110.regs = &i2c_hw_engine_regs[i]; - hw_arg_dce110.i2c_shift = i2c_shift; - hw_arg_dce110.i2c_mask = i2c_mask; - - base->i2c_hw_engines[line_id] = - dal_i2c_hw_engine_dce110_create(&hw_arg_dce110); - if (base->i2c_hw_engines[line_id] != NULL) { - switch (ctx->dce_version) { - case DCN_VERSION_1_0: - base->i2c_hw_engines[line_id]->setup_limit = - I2C_SETUP_TIME_LIMIT_DCN; - base->i2c_hw_engines[line_id]->send_reset_length = 0; - break; - default: - base->i2c_hw_engines[line_id]->setup_limit = - I2C_SETUP_TIME_LIMIT_DCE; - base->i2c_hw_engines[line_id]->send_reset_length = 0; - break; - } - } - ++i; - } while (i < num_i2caux_inst); - - /* Create AUX engines for all lines which has assisted HW AUX - * 'i' (loop counter) used as DDC/AUX engine_id */ - - i = 0; - - do { - enum gpio_ddc_line line_id = hw_aux_lines[i]; - - struct aux_engine_dce110_init_data aux_init_data; - - aux_init_data.engine_id = i; - aux_init_data.timeout_period = base->aux_timeout_period; - aux_init_data.ctx = ctx; - aux_init_data.regs = &aux_regs[i]; - - base->aux_engines[line_id] = - dal_aux_engine_dce110_create(&aux_init_data); - - ++i; - } while (i < num_i2caux_inst); - - /*TODO Generic I2C SW and HW*/ -} - -/* - * dal_i2caux_dce110_create - * - * @brief - * public interface to allocate memory for DCE11 I2CAUX - * - * @param - * struct adapter_service *as - [in] - * struct dc_context *ctx - [in] - * - * @return - * pointer to the base struct of DCE11 I2CAUX - */ -struct i2caux *dal_i2caux_dce110_create( - struct dc_context *ctx) -{ - struct i2caux_dce110 *i2caux_dce110 = - kzalloc(sizeof(struct i2caux_dce110), GFP_KERNEL); - - if (!i2caux_dce110) { - ASSERT_CRITICAL(false); - return NULL; - } - - dal_i2caux_dce110_construct(i2caux_dce110, - ctx, - ARRAY_SIZE(dce110_aux_regs), - dce110_aux_regs, - i2c_hw_engine_regs, - &i2c_shift, - &i2c_mask); - return &i2caux_dce110->base; -} diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h deleted file mode 100644 index d3d8cc58666a..000000000000 --- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DAL_I2C_AUX_DCE110_H__ -#define __DAL_I2C_AUX_DCE110_H__ - -#include "../i2caux.h" - -struct i2caux_dce110 { - struct i2caux base; - /* indicate the I2C HW circular buffer is in use */ - bool i2c_hw_buffer_in_use; -}; - -struct dce110_aux_registers; -struct dce110_i2c_hw_engine_registers; -struct dce110_i2c_hw_engine_shift; -struct dce110_i2c_hw_engine_mask; - -struct i2caux *dal_i2caux_dce110_create( - struct dc_context *ctx); - -void dal_i2caux_dce110_construct( - struct i2caux_dce110 *i2caux_dce110, - struct dc_context *ctx, - unsigned int num_i2caux_inst, - const struct dce110_aux_registers *aux_regs, - const struct dce110_i2c_hw_engine_registers *i2c_hw_engine_regs, - const struct dce110_i2c_hw_engine_shift *i2c_shift, - const struct dce110_i2c_hw_engine_mask *i2c_mask); - -#endif /* __DAL_I2C_AUX_DCE110_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c deleted file mode 100644 index a9db04738724..000000000000 --- a/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" - -#include "include/i2caux_interface.h" -#include "../i2caux.h" -#include "../engine.h" -#include "../i2c_engine.h" -#include "../i2c_sw_engine.h" -#include "../i2c_hw_engine.h" - -#include "../dce110/i2caux_dce110.h" -#include "i2caux_dce112.h" - -#include "../dce110/aux_engine_dce110.h" - -#include "../dce110/i2c_hw_engine_dce110.h" - -#include "dce/dce_11_2_d.h" -#include "dce/dce_11_2_sh_mask.h" - -/* set register offset */ -#define SR(reg_name)\ - .reg_name = mm ## reg_name - -/* set register offset with instance */ -#define SRI(reg_name, block, id)\ - .reg_name = mm ## block ## id ## _ ## reg_name - -#define aux_regs(id)\ -[id] = {\ - AUX_COMMON_REG_LIST(id), \ - .AUX_RESET_MASK = AUX_CONTROL__AUX_RESET_MASK \ -} - -#define hw_engine_regs(id)\ -{\ - I2C_HW_ENGINE_COMMON_REG_LIST(id) \ -} - -static const struct dce110_aux_registers dce112_aux_regs[] = { - aux_regs(0), - aux_regs(1), - aux_regs(2), - aux_regs(3), - aux_regs(4), - aux_regs(5), -}; - -static const struct dce110_i2c_hw_engine_registers dce112_hw_engine_regs[] = { - hw_engine_regs(1), - hw_engine_regs(2), - hw_engine_regs(3), - hw_engine_regs(4), - hw_engine_regs(5), - hw_engine_regs(6) -}; - -static const struct dce110_i2c_hw_engine_shift i2c_shift = { - I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT) -}; - -static const struct dce110_i2c_hw_engine_mask i2c_mask = { - I2C_COMMON_MASK_SH_LIST_DCE110(_MASK) -}; - -static void construct( - struct i2caux_dce110 *i2caux_dce110, - struct dc_context *ctx) -{ - dal_i2caux_dce110_construct(i2caux_dce110, - ctx, - ARRAY_SIZE(dce112_aux_regs), - dce112_aux_regs, - dce112_hw_engine_regs, - &i2c_shift, - &i2c_mask); -} - -/* - * dal_i2caux_dce110_create - * - * @brief - * public interface to allocate memory for DCE11 I2CAUX - * - * @param - * struct adapter_service *as - [in] - * struct dc_context *ctx - [in] - * - * @return - * pointer to the base struct of DCE11 I2CAUX - */ -struct i2caux *dal_i2caux_dce112_create( - struct dc_context *ctx) -{ - struct i2caux_dce110 *i2caux_dce110 = - kzalloc(sizeof(struct i2caux_dce110), GFP_KERNEL); - - if (!i2caux_dce110) { - ASSERT_CRITICAL(false); - return NULL; - } - - construct(i2caux_dce110, ctx); - return &i2caux_dce110->base; -} diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c deleted file mode 100644 index 6a4f344c1db4..000000000000 --- a/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Copyright 2012-16 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" - -#include "include/i2caux_interface.h" -#include "../i2caux.h" -#include "../engine.h" -#include "../i2c_engine.h" -#include "../i2c_sw_engine.h" -#include "../i2c_hw_engine.h" - -#include "../dce110/i2c_hw_engine_dce110.h" -#include "../dce110/aux_engine_dce110.h" -#include "../dce110/i2caux_dce110.h" - -#include "dce/dce_12_0_offset.h" -#include "dce/dce_12_0_sh_mask.h" -#include "soc15_hw_ip.h" -#include "vega10_ip_offset.h" - -/* begin ********************* - * macros to expend register list macro defined in HW object header file */ - -#define BASE_INNER(seg) \ - DCE_BASE__INST0_SEG ## seg - -/* compile time expand base address. */ -#define BASE(seg) \ - BASE_INNER(seg) - -#define SR(reg_name)\ - .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ - mm ## reg_name - -#define SRI(reg_name, block, id)\ - .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## reg_name -/* macros to expend register list macro defined in HW object header file - * end *********************/ - -#define aux_regs(id)\ -[id] = {\ - AUX_COMMON_REG_LIST(id), \ - .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK \ -} - -static const struct dce110_aux_registers dce120_aux_regs[] = { - aux_regs(0), - aux_regs(1), - aux_regs(2), - aux_regs(3), - aux_regs(4), - aux_regs(5), -}; - -#define hw_engine_regs(id)\ -{\ - I2C_HW_ENGINE_COMMON_REG_LIST(id) \ -} - -static const struct dce110_i2c_hw_engine_registers dce120_hw_engine_regs[] = { - hw_engine_regs(1), - hw_engine_regs(2), - hw_engine_regs(3), - hw_engine_regs(4), - hw_engine_regs(5), - hw_engine_regs(6) -}; - -static const struct dce110_i2c_hw_engine_shift i2c_shift = { - I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT) -}; - -static const struct dce110_i2c_hw_engine_mask i2c_mask = { - I2C_COMMON_MASK_SH_LIST_DCE110(_MASK) -}; - -struct i2caux *dal_i2caux_dce120_create( - struct dc_context *ctx) -{ - struct i2caux_dce110 *i2caux_dce110 = - kzalloc(sizeof(struct i2caux_dce110), GFP_KERNEL); - - if (!i2caux_dce110) { - ASSERT_CRITICAL(false); - return NULL; - } - - dal_i2caux_dce110_construct(i2caux_dce110, - ctx, - ARRAY_SIZE(dce120_aux_regs), - dce120_aux_regs, - dce120_hw_engine_regs, - &i2c_shift, - &i2c_mask); - return &i2caux_dce110->base; -} diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.c deleted file mode 100644 index fd0832dd2c75..000000000000 --- a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.c +++ /dev/null @@ -1,875 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" - -/* - * Pre-requisites: headers required by header of this unit - */ -#include "include/i2caux_interface.h" -#include "../engine.h" -#include "../i2c_engine.h" -#include "../i2c_hw_engine.h" -#include "../i2c_generic_hw_engine.h" -/* - * Header of this unit - */ - -#include "i2c_hw_engine_dce80.h" - -/* - * Post-requisites: headers required by this unit - */ - -#include "dce/dce_8_0_d.h" -#include "dce/dce_8_0_sh_mask.h" -/* - * This unit - */ - -enum dc_i2c_status { - DC_I2C_STATUS__DC_I2C_STATUS_IDLE, - DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW, - DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_HW -}; - -enum dc_i2c_arbitration { - DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_NORMAL, - DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_HIGH -}; - -enum { - /* No timeout in HW - * (timeout implemented in SW by querying status) */ - I2C_SETUP_TIME_LIMIT = 255, - I2C_HW_BUFFER_SIZE = 144 -}; - -/* - * @brief - * Cast 'struct i2c_hw_engine *' - * to 'struct i2c_hw_engine_dce80 *' - */ -#define FROM_I2C_HW_ENGINE(ptr) \ - container_of((ptr), struct i2c_hw_engine_dce80, base) - -/* - * @brief - * Cast pointer to 'struct i2c_engine *' - * to pointer to 'struct i2c_hw_engine_dce80 *' - */ -#define FROM_I2C_ENGINE(ptr) \ - FROM_I2C_HW_ENGINE(container_of((ptr), struct i2c_hw_engine, base)) - -/* - * @brief - * Cast pointer to 'struct engine *' - * to 'pointer to struct i2c_hw_engine_dce80 *' - */ -#define FROM_ENGINE(ptr) \ - FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base)) - -static void disable_i2c_hw_engine( - struct i2c_hw_engine_dce80 *engine) -{ - const uint32_t addr = engine->addr.DC_I2C_DDCX_SETUP; - uint32_t value = 0; - - struct dc_context *ctx = NULL; - - ctx = engine->base.base.base.ctx; - - value = dm_read_reg(ctx, addr); - - set_reg_field_value( - value, - 0, - DC_I2C_DDC1_SETUP, - DC_I2C_DDC1_ENABLE); - - dm_write_reg(ctx, addr, value); -} - -static void release_engine( - struct engine *engine) -{ - struct i2c_hw_engine_dce80 *hw_engine = FROM_ENGINE(engine); - - struct i2c_engine *base = NULL; - bool safe_to_reset; - uint32_t value = 0; - - base = &hw_engine->base.base; - - /* Restore original HW engine speed */ - - base->funcs->set_speed(base, hw_engine->base.original_speed); - - /* Release I2C */ - { - value = dm_read_reg(engine->ctx, mmDC_I2C_ARBITRATION); - - set_reg_field_value( - value, - 1, - DC_I2C_ARBITRATION, - DC_I2C_SW_DONE_USING_I2C_REG); - - dm_write_reg(engine->ctx, mmDC_I2C_ARBITRATION, value); - } - - /* Reset HW engine */ - { - uint32_t i2c_sw_status = 0; - - value = dm_read_reg(engine->ctx, mmDC_I2C_SW_STATUS); - - i2c_sw_status = get_reg_field_value( - value, - DC_I2C_SW_STATUS, - DC_I2C_SW_STATUS); - /* if used by SW, safe to reset */ - safe_to_reset = (i2c_sw_status == 1); - } - { - value = dm_read_reg(engine->ctx, mmDC_I2C_CONTROL); - - if (safe_to_reset) - set_reg_field_value( - value, - 1, - DC_I2C_CONTROL, - DC_I2C_SOFT_RESET); - - set_reg_field_value( - value, - 1, - DC_I2C_CONTROL, - DC_I2C_SW_STATUS_RESET); - - dm_write_reg(engine->ctx, mmDC_I2C_CONTROL, value); - } - - /* HW I2c engine - clock gating feature */ - if (!hw_engine->engine_keep_power_up_count) - disable_i2c_hw_engine(hw_engine); -} - -static void destruct( - struct i2c_hw_engine_dce80 *engine) -{ - dal_i2c_hw_engine_destruct(&engine->base); -} - -static void destroy( - struct i2c_engine **i2c_engine) -{ - struct i2c_hw_engine_dce80 *engine = FROM_I2C_ENGINE(*i2c_engine); - - destruct(engine); - - kfree(engine); - - *i2c_engine = NULL; -} - -static bool setup_engine( - struct i2c_engine *i2c_engine) -{ - uint32_t value = 0; - struct i2c_hw_engine_dce80 *engine = FROM_I2C_ENGINE(i2c_engine); - - /* Program pin select */ - { - const uint32_t addr = mmDC_I2C_CONTROL; - - value = dm_read_reg(i2c_engine->base.ctx, addr); - - set_reg_field_value( - value, - 0, - DC_I2C_CONTROL, - DC_I2C_GO); - - set_reg_field_value( - value, - 0, - DC_I2C_CONTROL, - DC_I2C_SOFT_RESET); - - set_reg_field_value( - value, - 0, - DC_I2C_CONTROL, - DC_I2C_SEND_RESET); - - set_reg_field_value( - value, - 0, - DC_I2C_CONTROL, - DC_I2C_SW_STATUS_RESET); - - set_reg_field_value( - value, - 0, - DC_I2C_CONTROL, - DC_I2C_TRANSACTION_COUNT); - - set_reg_field_value( - value, - engine->engine_id, - DC_I2C_CONTROL, - DC_I2C_DDC_SELECT); - - dm_write_reg(i2c_engine->base.ctx, addr, value); - } - - /* Program time limit */ - { - const uint32_t addr = engine->addr.DC_I2C_DDCX_SETUP; - - value = dm_read_reg(i2c_engine->base.ctx, addr); - - set_reg_field_value( - value, - I2C_SETUP_TIME_LIMIT, - DC_I2C_DDC1_SETUP, - DC_I2C_DDC1_TIME_LIMIT); - - set_reg_field_value( - value, - 1, - DC_I2C_DDC1_SETUP, - DC_I2C_DDC1_ENABLE); - - dm_write_reg(i2c_engine->base.ctx, addr, value); - } - - /* Program HW priority - * set to High - interrupt software I2C at any time - * Enable restart of SW I2C that was interrupted by HW - * disable queuing of software while I2C is in use by HW */ - { - value = dm_read_reg(i2c_engine->base.ctx, - mmDC_I2C_ARBITRATION); - - set_reg_field_value( - value, - 0, - DC_I2C_ARBITRATION, - DC_I2C_NO_QUEUED_SW_GO); - - set_reg_field_value( - value, - DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_NORMAL, - DC_I2C_ARBITRATION, - DC_I2C_SW_PRIORITY); - - dm_write_reg(i2c_engine->base.ctx, - mmDC_I2C_ARBITRATION, value); - } - - return true; -} - -static uint32_t get_speed( - const struct i2c_engine *i2c_engine) -{ - const struct i2c_hw_engine_dce80 *engine = FROM_I2C_ENGINE(i2c_engine); - - const uint32_t addr = engine->addr.DC_I2C_DDCX_SPEED; - - uint32_t pre_scale = 0; - - uint32_t value = dm_read_reg(i2c_engine->base.ctx, addr); - - pre_scale = get_reg_field_value( - value, - DC_I2C_DDC1_SPEED, - DC_I2C_DDC1_PRESCALE); - - /* [anaumov] it seems following is unnecessary */ - /*ASSERT(value.bits.DC_I2C_DDC1_PRESCALE);*/ - - return pre_scale ? - engine->reference_frequency / pre_scale : - engine->base.default_speed; -} - -static void set_speed( - struct i2c_engine *i2c_engine, - uint32_t speed) -{ - struct i2c_hw_engine_dce80 *engine = FROM_I2C_ENGINE(i2c_engine); - - if (speed) { - const uint32_t addr = engine->addr.DC_I2C_DDCX_SPEED; - - uint32_t value = dm_read_reg(i2c_engine->base.ctx, addr); - - set_reg_field_value( - value, - engine->reference_frequency / speed, - DC_I2C_DDC1_SPEED, - DC_I2C_DDC1_PRESCALE); - - set_reg_field_value( - value, - 2, - DC_I2C_DDC1_SPEED, - DC_I2C_DDC1_THRESHOLD); - - dm_write_reg(i2c_engine->base.ctx, addr, value); - } -} - -static inline void reset_hw_engine(struct engine *engine) -{ - uint32_t value = dm_read_reg(engine->ctx, mmDC_I2C_CONTROL); - - set_reg_field_value( - value, - 1, - DC_I2C_CONTROL, - DC_I2C_SOFT_RESET); - - set_reg_field_value( - value, - 1, - DC_I2C_CONTROL, - DC_I2C_SW_STATUS_RESET); - - dm_write_reg(engine->ctx, mmDC_I2C_CONTROL, value); -} - -static bool is_hw_busy(struct engine *engine) -{ - uint32_t i2c_sw_status = 0; - - uint32_t value = dm_read_reg(engine->ctx, mmDC_I2C_SW_STATUS); - - i2c_sw_status = get_reg_field_value( - value, - DC_I2C_SW_STATUS, - DC_I2C_SW_STATUS); - - if (i2c_sw_status == DC_I2C_STATUS__DC_I2C_STATUS_IDLE) - return false; - - reset_hw_engine(engine); - - value = dm_read_reg(engine->ctx, mmDC_I2C_SW_STATUS); - - i2c_sw_status = get_reg_field_value( - value, - DC_I2C_SW_STATUS, - DC_I2C_SW_STATUS); - - return i2c_sw_status != DC_I2C_STATUS__DC_I2C_STATUS_IDLE; -} - -/* - * @brief - * DC_GPIO_DDC MM register offsets - */ -static const uint32_t transaction_addr[] = { - mmDC_I2C_TRANSACTION0, - mmDC_I2C_TRANSACTION1, - mmDC_I2C_TRANSACTION2, - mmDC_I2C_TRANSACTION3 -}; - -static bool process_transaction( - struct i2c_hw_engine_dce80 *engine, - struct i2c_request_transaction_data *request) -{ - uint32_t length = request->length; - uint8_t *buffer = request->data; - - bool last_transaction = false; - uint32_t value = 0; - - struct dc_context *ctx = NULL; - - ctx = engine->base.base.base.ctx; - - { - const uint32_t addr = - transaction_addr[engine->transaction_count]; - - value = dm_read_reg(ctx, addr); - - set_reg_field_value( - value, - 1, - DC_I2C_TRANSACTION0, - DC_I2C_STOP_ON_NACK0); - - set_reg_field_value( - value, - 1, - DC_I2C_TRANSACTION0, - DC_I2C_START0); - - if ((engine->transaction_count == 3) || - (request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) || - (request->action & I2CAUX_TRANSACTION_ACTION_I2C_READ)) { - - set_reg_field_value( - value, - 1, - DC_I2C_TRANSACTION0, - DC_I2C_STOP0); - - last_transaction = true; - } else - set_reg_field_value( - value, - 0, - DC_I2C_TRANSACTION0, - DC_I2C_STOP0); - - set_reg_field_value( - value, - (0 != (request->action & - I2CAUX_TRANSACTION_ACTION_I2C_READ)), - DC_I2C_TRANSACTION0, - DC_I2C_RW0); - - set_reg_field_value( - value, - length, - DC_I2C_TRANSACTION0, - DC_I2C_COUNT0); - - dm_write_reg(ctx, addr, value); - } - - /* Write the I2C address and I2C data - * into the hardware circular buffer, one byte per entry. - * As an example, the 7-bit I2C slave address for CRT monitor - * for reading DDC/EDID information is 0b1010001. - * For an I2C send operation, the LSB must be programmed to 0; - * for I2C receive operation, the LSB must be programmed to 1. */ - - { - value = 0; - - set_reg_field_value( - value, - false, - DC_I2C_DATA, - DC_I2C_DATA_RW); - - set_reg_field_value( - value, - request->address, - DC_I2C_DATA, - DC_I2C_DATA); - - if (engine->transaction_count == 0) { - set_reg_field_value( - value, - 0, - DC_I2C_DATA, - DC_I2C_INDEX); - - /*enable index write*/ - set_reg_field_value( - value, - 1, - DC_I2C_DATA, - DC_I2C_INDEX_WRITE); - } - - dm_write_reg(ctx, mmDC_I2C_DATA, value); - - if (!(request->action & I2CAUX_TRANSACTION_ACTION_I2C_READ)) { - - set_reg_field_value( - value, - 0, - DC_I2C_DATA, - DC_I2C_INDEX_WRITE); - - while (length) { - - set_reg_field_value( - value, - *buffer++, - DC_I2C_DATA, - DC_I2C_DATA); - - dm_write_reg(ctx, mmDC_I2C_DATA, value); - --length; - } - } - } - - ++engine->transaction_count; - engine->buffer_used_bytes += length + 1; - - return last_transaction; -} - -static void execute_transaction( - struct i2c_hw_engine_dce80 *engine) -{ - uint32_t value = 0; - struct dc_context *ctx = NULL; - - ctx = engine->base.base.base.ctx; - - { - const uint32_t addr = engine->addr.DC_I2C_DDCX_SETUP; - - value = dm_read_reg(ctx, addr); - - set_reg_field_value( - value, - 0, - DC_I2C_DDC1_SETUP, - DC_I2C_DDC1_DATA_DRIVE_EN); - - set_reg_field_value( - value, - 0, - DC_I2C_DDC1_SETUP, - DC_I2C_DDC1_CLK_DRIVE_EN); - - set_reg_field_value( - value, - 0, - DC_I2C_DDC1_SETUP, - DC_I2C_DDC1_DATA_DRIVE_SEL); - - set_reg_field_value( - value, - 0, - DC_I2C_DDC1_SETUP, - DC_I2C_DDC1_INTRA_TRANSACTION_DELAY); - - set_reg_field_value( - value, - 0, - DC_I2C_DDC1_SETUP, - DC_I2C_DDC1_INTRA_BYTE_DELAY); - - dm_write_reg(ctx, addr, value); - } - - { - const uint32_t addr = mmDC_I2C_CONTROL; - - value = dm_read_reg(ctx, addr); - - set_reg_field_value( - value, - 0, - DC_I2C_CONTROL, - DC_I2C_SOFT_RESET); - - set_reg_field_value( - value, - 0, - DC_I2C_CONTROL, - DC_I2C_SW_STATUS_RESET); - - set_reg_field_value( - value, - 0, - DC_I2C_CONTROL, - DC_I2C_SEND_RESET); - - set_reg_field_value( - value, - 0, - DC_I2C_CONTROL, - DC_I2C_GO); - - set_reg_field_value( - value, - engine->transaction_count - 1, - DC_I2C_CONTROL, - DC_I2C_TRANSACTION_COUNT); - - dm_write_reg(ctx, addr, value); - } - - /* start I2C transfer */ - { - const uint32_t addr = mmDC_I2C_CONTROL; - - value = dm_read_reg(ctx, addr); - - set_reg_field_value( - value, - 1, - DC_I2C_CONTROL, - DC_I2C_GO); - - dm_write_reg(ctx, addr, value); - } - - /* all transactions were executed and HW buffer became empty - * (even though it actually happens when status becomes DONE) */ - engine->transaction_count = 0; - engine->buffer_used_bytes = 0; -} - -static void submit_channel_request( - struct i2c_engine *engine, - struct i2c_request_transaction_data *request) -{ - request->status = I2C_CHANNEL_OPERATION_SUCCEEDED; - - if (!process_transaction(FROM_I2C_ENGINE(engine), request)) - return; - - if (is_hw_busy(&engine->base)) { - request->status = I2C_CHANNEL_OPERATION_ENGINE_BUSY; - return; - } - - execute_transaction(FROM_I2C_ENGINE(engine)); -} - -static void process_channel_reply( - struct i2c_engine *engine, - struct i2c_reply_transaction_data *reply) -{ - uint32_t length = reply->length; - uint8_t *buffer = reply->data; - - uint32_t value = 0; - - /*set index*/ - set_reg_field_value( - value, - length - 1, - DC_I2C_DATA, - DC_I2C_INDEX); - - set_reg_field_value( - value, - 1, - DC_I2C_DATA, - DC_I2C_DATA_RW); - - set_reg_field_value( - value, - 1, - DC_I2C_DATA, - DC_I2C_INDEX_WRITE); - - dm_write_reg(engine->base.ctx, mmDC_I2C_DATA, value); - - while (length) { - /* after reading the status, - * if the I2C operation executed successfully - * (i.e. DC_I2C_STATUS_DONE = 1) then the I2C controller - * should read data bytes from I2C circular data buffer */ - - value = dm_read_reg(engine->base.ctx, mmDC_I2C_DATA); - - *buffer++ = get_reg_field_value( - value, - DC_I2C_DATA, - DC_I2C_DATA); - - --length; - } -} - -static enum i2c_channel_operation_result get_channel_status( - struct i2c_engine *engine, - uint8_t *returned_bytes) -{ - uint32_t i2c_sw_status = 0; - uint32_t value = dm_read_reg(engine->base.ctx, mmDC_I2C_SW_STATUS); - - i2c_sw_status = get_reg_field_value( - value, - DC_I2C_SW_STATUS, - DC_I2C_SW_STATUS); - - if (i2c_sw_status == DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW) - return I2C_CHANNEL_OPERATION_ENGINE_BUSY; - else if (value & DC_I2C_SW_STATUS__DC_I2C_SW_STOPPED_ON_NACK_MASK) - return I2C_CHANNEL_OPERATION_NO_RESPONSE; - else if (value & DC_I2C_SW_STATUS__DC_I2C_SW_TIMEOUT_MASK) - return I2C_CHANNEL_OPERATION_TIMEOUT; - else if (value & DC_I2C_SW_STATUS__DC_I2C_SW_ABORTED_MASK) - return I2C_CHANNEL_OPERATION_FAILED; - else if (value & DC_I2C_SW_STATUS__DC_I2C_SW_DONE_MASK) - return I2C_CHANNEL_OPERATION_SUCCEEDED; - - /* - * this is the case when HW used for communication, I2C_SW_STATUS - * could be zero - */ - return I2C_CHANNEL_OPERATION_SUCCEEDED; -} - -static uint32_t get_hw_buffer_available_size( - const struct i2c_hw_engine *engine) -{ - return I2C_HW_BUFFER_SIZE - - FROM_I2C_HW_ENGINE(engine)->buffer_used_bytes; -} - -static uint32_t get_transaction_timeout( - const struct i2c_hw_engine *engine, - uint32_t length) -{ - uint32_t speed = engine->base.funcs->get_speed(&engine->base); - - uint32_t period_timeout; - uint32_t num_of_clock_stretches; - - if (!speed) - return 0; - - period_timeout = (1000 * TRANSACTION_TIMEOUT_IN_I2C_CLOCKS) / speed; - - num_of_clock_stretches = 1 + (length << 3) + 1; - num_of_clock_stretches += - (FROM_I2C_HW_ENGINE(engine)->buffer_used_bytes << 3) + - (FROM_I2C_HW_ENGINE(engine)->transaction_count << 1); - - return period_timeout * num_of_clock_stretches; -} - -/* - * @brief - * DC_I2C_DDC1_SETUP MM register offsets - * - * @note - * The indices of this offset array are DDC engine IDs - */ -static const int32_t ddc_setup_offset[] = { - - mmDC_I2C_DDC1_SETUP - mmDC_I2C_DDC1_SETUP, /* DDC Engine 1 */ - mmDC_I2C_DDC2_SETUP - mmDC_I2C_DDC1_SETUP, /* DDC Engine 2 */ - mmDC_I2C_DDC3_SETUP - mmDC_I2C_DDC1_SETUP, /* DDC Engine 3 */ - mmDC_I2C_DDC4_SETUP - mmDC_I2C_DDC1_SETUP, /* DDC Engine 4 */ - mmDC_I2C_DDC5_SETUP - mmDC_I2C_DDC1_SETUP, /* DDC Engine 5 */ - mmDC_I2C_DDC6_SETUP - mmDC_I2C_DDC1_SETUP, /* DDC Engine 6 */ - mmDC_I2C_DDCVGA_SETUP - mmDC_I2C_DDC1_SETUP /* DDC Engine 7 */ -}; - -/* - * @brief - * DC_I2C_DDC1_SPEED MM register offsets - * - * @note - * The indices of this offset array are DDC engine IDs - */ -static const int32_t ddc_speed_offset[] = { - mmDC_I2C_DDC1_SPEED - mmDC_I2C_DDC1_SPEED, /* DDC Engine 1 */ - mmDC_I2C_DDC2_SPEED - mmDC_I2C_DDC1_SPEED, /* DDC Engine 2 */ - mmDC_I2C_DDC3_SPEED - mmDC_I2C_DDC1_SPEED, /* DDC Engine 3 */ - mmDC_I2C_DDC4_SPEED - mmDC_I2C_DDC1_SPEED, /* DDC Engine 4 */ - mmDC_I2C_DDC5_SPEED - mmDC_I2C_DDC1_SPEED, /* DDC Engine 5 */ - mmDC_I2C_DDC6_SPEED - mmDC_I2C_DDC1_SPEED, /* DDC Engine 6 */ - mmDC_I2C_DDCVGA_SPEED - mmDC_I2C_DDC1_SPEED /* DDC Engine 7 */ -}; - -static const struct i2c_engine_funcs i2c_engine_funcs = { - .destroy = destroy, - .get_speed = get_speed, - .set_speed = set_speed, - .setup_engine = setup_engine, - .submit_channel_request = submit_channel_request, - .process_channel_reply = process_channel_reply, - .get_channel_status = get_channel_status, - .acquire_engine = dal_i2c_hw_engine_acquire_engine, -}; - -static const struct engine_funcs engine_funcs = { - .release_engine = release_engine, - .get_engine_type = dal_i2c_hw_engine_get_engine_type, - .acquire = dal_i2c_engine_acquire, - .submit_request = dal_i2c_hw_engine_submit_request, -}; - -static const struct i2c_hw_engine_funcs i2c_hw_engine_funcs = { - .get_hw_buffer_available_size = - get_hw_buffer_available_size, - .get_transaction_timeout = - get_transaction_timeout, - .wait_on_operation_result = - dal_i2c_hw_engine_wait_on_operation_result, -}; - -static void construct( - struct i2c_hw_engine_dce80 *engine, - const struct i2c_hw_engine_dce80_create_arg *arg) -{ - dal_i2c_hw_engine_construct(&engine->base, arg->ctx); - - engine->base.base.base.funcs = &engine_funcs; - engine->base.base.funcs = &i2c_engine_funcs; - engine->base.funcs = &i2c_hw_engine_funcs; - engine->base.default_speed = arg->default_speed; - engine->addr.DC_I2C_DDCX_SETUP = - mmDC_I2C_DDC1_SETUP + ddc_setup_offset[arg->engine_id]; - engine->addr.DC_I2C_DDCX_SPEED = - mmDC_I2C_DDC1_SPEED + ddc_speed_offset[arg->engine_id]; - - engine->engine_id = arg->engine_id; - engine->reference_frequency = arg->reference_frequency; - engine->buffer_used_bytes = 0; - engine->transaction_count = 0; - engine->engine_keep_power_up_count = 1; -} - -struct i2c_engine *dal_i2c_hw_engine_dce80_create( - const struct i2c_hw_engine_dce80_create_arg *arg) -{ - struct i2c_hw_engine_dce80 *engine; - - if (!arg) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - if ((arg->engine_id >= sizeof(ddc_setup_offset) / sizeof(int32_t)) || - (arg->engine_id >= sizeof(ddc_speed_offset) / sizeof(int32_t)) || - !arg->reference_frequency) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - engine = kzalloc(sizeof(struct i2c_hw_engine_dce80), GFP_KERNEL); - - if (!engine) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - construct(engine, arg); - return &engine->base.base; -} diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.h deleted file mode 100644 index 5c6116fb5479..000000000000 --- a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DAL_I2C_HW_ENGINE_DCE80_H__ -#define __DAL_I2C_HW_ENGINE_DCE80_H__ - -struct i2c_hw_engine_dce80 { - struct i2c_hw_engine base; - struct { - uint32_t DC_I2C_DDCX_SETUP; - uint32_t DC_I2C_DDCX_SPEED; - } addr; - uint32_t engine_id; - /* expressed in kilohertz */ - uint32_t reference_frequency; - /* number of bytes currently used in HW buffer */ - uint32_t buffer_used_bytes; - /* number of pending transactions (before GO) */ - uint32_t transaction_count; - uint32_t engine_keep_power_up_count; -}; - -struct i2c_hw_engine_dce80_create_arg { - uint32_t engine_id; - uint32_t reference_frequency; - uint32_t default_speed; - struct dc_context *ctx; -}; - -struct i2c_engine *dal_i2c_hw_engine_dce80_create( - const struct i2c_hw_engine_dce80_create_arg *arg); -#endif diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.c deleted file mode 100644 index 4853ee26096a..000000000000 --- a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.c +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" - -/* - * Pre-requisites: headers required by header of this unit - */ -#include "include/i2caux_interface.h" -#include "../engine.h" -#include "../i2c_engine.h" -#include "../i2c_sw_engine.h" - -/* - * Header of this unit - */ - -#include "i2c_sw_engine_dce80.h" - -/* - * Post-requisites: headers required by this unit - */ - -#include "dce/dce_8_0_d.h" -#include "dce/dce_8_0_sh_mask.h" - -/* - * This unit - */ - -static const uint32_t ddc_hw_status_addr[] = { - mmDC_I2C_DDC1_HW_STATUS, - mmDC_I2C_DDC2_HW_STATUS, - mmDC_I2C_DDC3_HW_STATUS, - mmDC_I2C_DDC4_HW_STATUS, - mmDC_I2C_DDC5_HW_STATUS, - mmDC_I2C_DDC6_HW_STATUS, - mmDC_I2C_DDCVGA_HW_STATUS -}; - -/* - * @brief - * Cast 'struct i2c_sw_engine *' - * to 'struct i2c_sw_engine_dce80 *' - */ -#define FROM_I2C_SW_ENGINE(ptr) \ - container_of((ptr), struct i2c_sw_engine_dce80, base) - -/* - * @brief - * Cast 'struct i2c_engine *' - * to 'struct i2c_sw_engine_dce80 *' - */ -#define FROM_I2C_ENGINE(ptr) \ - FROM_I2C_SW_ENGINE(container_of((ptr), struct i2c_sw_engine, base)) - -/* - * @brief - * Cast 'struct engine *' - * to 'struct i2c_sw_engine_dce80 *' - */ -#define FROM_ENGINE(ptr) \ - FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base)) - -static void release_engine( - struct engine *engine) -{ - -} - -static void destruct( - struct i2c_sw_engine_dce80 *engine) -{ - dal_i2c_sw_engine_destruct(&engine->base); -} - -static void destroy( - struct i2c_engine **engine) -{ - struct i2c_sw_engine_dce80 *sw_engine = FROM_I2C_ENGINE(*engine); - - destruct(sw_engine); - - kfree(sw_engine); - - *engine = NULL; -} - -static bool acquire_engine( - struct i2c_engine *engine, - struct ddc *ddc_handle) -{ - return dal_i2caux_i2c_sw_engine_acquire_engine(engine, ddc_handle); -} - -static const struct i2c_engine_funcs i2c_engine_funcs = { - .acquire_engine = acquire_engine, - .destroy = destroy, - .get_speed = dal_i2c_sw_engine_get_speed, - .set_speed = dal_i2c_sw_engine_set_speed, - .setup_engine = dal_i2c_engine_setup_i2c_engine, - .submit_channel_request = dal_i2c_sw_engine_submit_channel_request, - .process_channel_reply = dal_i2c_engine_process_channel_reply, - .get_channel_status = dal_i2c_sw_engine_get_channel_status, -}; - -static const struct engine_funcs engine_funcs = { - .release_engine = release_engine, - .get_engine_type = dal_i2c_sw_engine_get_engine_type, - .acquire = dal_i2c_engine_acquire, - .submit_request = dal_i2c_sw_engine_submit_request, -}; - -static void construct( - struct i2c_sw_engine_dce80 *engine, - const struct i2c_sw_engine_dce80_create_arg *arg) -{ - struct i2c_sw_engine_create_arg arg_base; - - arg_base.ctx = arg->ctx; - arg_base.default_speed = arg->default_speed; - - dal_i2c_sw_engine_construct(&engine->base, &arg_base); - - engine->base.base.base.funcs = &engine_funcs; - engine->base.base.funcs = &i2c_engine_funcs; - engine->base.default_speed = arg->default_speed; - engine->engine_id = arg->engine_id; -} - -struct i2c_engine *dal_i2c_sw_engine_dce80_create( - const struct i2c_sw_engine_dce80_create_arg *arg) -{ - struct i2c_sw_engine_dce80 *engine; - - if (!arg) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - engine = kzalloc(sizeof(struct i2c_sw_engine_dce80), GFP_KERNEL); - - if (!engine) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - construct(engine, arg); - return &engine->base.base; -} - diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.c deleted file mode 100644 index ed48596dd2a5..000000000000 --- a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.c +++ /dev/null @@ -1,284 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" - -/* - * Pre-requisites: headers required by header of this unit - */ -#include "include/i2caux_interface.h" -#include "../i2caux.h" - -/* - * Header of this unit - */ - -#include "i2caux_dce80.h" - -/* - * Post-requisites: headers required by this unit - */ - -#include "../engine.h" -#include "../i2c_engine.h" -#include "../i2c_sw_engine.h" -#include "i2c_sw_engine_dce80.h" -#include "../i2c_hw_engine.h" -#include "i2c_hw_engine_dce80.h" -#include "../i2c_generic_hw_engine.h" -#include "../aux_engine.h" - - -#include "../dce110/aux_engine_dce110.h" -#include "../dce110/i2caux_dce110.h" - -#include "dce/dce_8_0_d.h" -#include "dce/dce_8_0_sh_mask.h" - - -/* set register offset */ -#define SR(reg_name)\ - .reg_name = mm ## reg_name - -/* set register offset with instance */ -#define SRI(reg_name, block, id)\ - .reg_name = mm ## block ## id ## _ ## reg_name - -#define aux_regs(id)\ -[id] = {\ - AUX_COMMON_REG_LIST(id), \ - .AUX_RESET_MASK = 0 \ -} - -static const struct dce110_aux_registers dce80_aux_regs[] = { - aux_regs(0), - aux_regs(1), - aux_regs(2), - aux_regs(3), - aux_regs(4), - aux_regs(5) -}; - -/* - * This unit - */ - -#define FROM_I2C_AUX(ptr) \ - container_of((ptr), struct i2caux_dce80, base) - -static void destruct( - struct i2caux_dce80 *i2caux_dce80) -{ - dal_i2caux_destruct(&i2caux_dce80->base); -} - -static void destroy( - struct i2caux **i2c_engine) -{ - struct i2caux_dce80 *i2caux_dce80 = FROM_I2C_AUX(*i2c_engine); - - destruct(i2caux_dce80); - - kfree(i2caux_dce80); - - *i2c_engine = NULL; -} - -static struct i2c_engine *acquire_i2c_hw_engine( - struct i2caux *i2caux, - struct ddc *ddc) -{ - struct i2caux_dce80 *i2caux_dce80 = FROM_I2C_AUX(i2caux); - - struct i2c_engine *engine = NULL; - bool non_generic; - - if (!ddc) - return NULL; - - if (ddc->hw_info.hw_supported) { - enum gpio_ddc_line line = dal_ddc_get_line(ddc); - - if (line < GPIO_DDC_LINE_COUNT) { - non_generic = true; - engine = i2caux->i2c_hw_engines[line]; - } - } - - if (!engine) { - non_generic = false; - engine = i2caux->i2c_generic_hw_engine; - } - - if (!engine) - return NULL; - - if (non_generic) { - if (!i2caux_dce80->i2c_hw_buffer_in_use && - engine->base.funcs->acquire(&engine->base, ddc)) { - i2caux_dce80->i2c_hw_buffer_in_use = true; - return engine; - } - } else { - if (engine->base.funcs->acquire(&engine->base, ddc)) - return engine; - } - - return NULL; -} - -static void release_engine( - struct i2caux *i2caux, - struct engine *engine) -{ - if (engine->funcs->get_engine_type(engine) == - I2CAUX_ENGINE_TYPE_I2C_DDC_HW) - FROM_I2C_AUX(i2caux)->i2c_hw_buffer_in_use = false; - - dal_i2caux_release_engine(i2caux, engine); -} - -static const enum gpio_ddc_line hw_ddc_lines[] = { - GPIO_DDC_LINE_DDC1, - GPIO_DDC_LINE_DDC2, - GPIO_DDC_LINE_DDC3, - GPIO_DDC_LINE_DDC4, - GPIO_DDC_LINE_DDC5, - GPIO_DDC_LINE_DDC6, - GPIO_DDC_LINE_DDC_VGA -}; - -static const enum gpio_ddc_line hw_aux_lines[] = { - GPIO_DDC_LINE_DDC1, - GPIO_DDC_LINE_DDC2, - GPIO_DDC_LINE_DDC3, - GPIO_DDC_LINE_DDC4, - GPIO_DDC_LINE_DDC5, - GPIO_DDC_LINE_DDC6 -}; - -static const struct i2caux_funcs i2caux_funcs = { - .destroy = destroy, - .acquire_i2c_hw_engine = acquire_i2c_hw_engine, - .release_engine = release_engine, - .acquire_i2c_sw_engine = dal_i2caux_acquire_i2c_sw_engine, - .acquire_aux_engine = dal_i2caux_acquire_aux_engine, -}; - -static void construct( - struct i2caux_dce80 *i2caux_dce80, - struct dc_context *ctx) -{ - /* Entire family have I2C engine reference clock frequency - * changed from XTALIN (27) to XTALIN/2 (13.5) */ - - struct i2caux *base = &i2caux_dce80->base; - - uint32_t reference_frequency = - dal_i2caux_get_reference_clock(ctx->dc_bios) >> 1; - - /*bool use_i2c_sw_engine = dal_adapter_service_is_feature_supported(as, - FEATURE_RESTORE_USAGE_I2C_SW_ENGINE);*/ - - /* Use SWI2C for dce8 currently, sicne we have bug with hwi2c */ - bool use_i2c_sw_engine = true; - - uint32_t i; - - dal_i2caux_construct(base, ctx); - - i2caux_dce80->base.funcs = &i2caux_funcs; - i2caux_dce80->i2c_hw_buffer_in_use = false; - - /* Create I2C HW engines (HW + SW pairs) - * for all lines which has assisted HW DDC - * 'i' (loop counter) used as DDC/AUX engine_id */ - - i = 0; - - do { - enum gpio_ddc_line line_id = hw_ddc_lines[i]; - - struct i2c_hw_engine_dce80_create_arg hw_arg; - - if (use_i2c_sw_engine) { - struct i2c_sw_engine_dce80_create_arg sw_arg; - - sw_arg.engine_id = i; - sw_arg.default_speed = base->default_i2c_sw_speed; - sw_arg.ctx = ctx; - base->i2c_sw_engines[line_id] = - dal_i2c_sw_engine_dce80_create(&sw_arg); - } - - hw_arg.engine_id = i; - hw_arg.reference_frequency = reference_frequency; - hw_arg.default_speed = base->default_i2c_hw_speed; - hw_arg.ctx = ctx; - - base->i2c_hw_engines[line_id] = - dal_i2c_hw_engine_dce80_create(&hw_arg); - - ++i; - } while (i < ARRAY_SIZE(hw_ddc_lines)); - - /* Create AUX engines for all lines which has assisted HW AUX - * 'i' (loop counter) used as DDC/AUX engine_id */ - - i = 0; - - do { - enum gpio_ddc_line line_id = hw_aux_lines[i]; - - struct aux_engine_dce110_init_data arg; - - arg.engine_id = i; - arg.timeout_period = base->aux_timeout_period; - arg.ctx = ctx; - arg.regs = &dce80_aux_regs[i]; - - base->aux_engines[line_id] = - dal_aux_engine_dce110_create(&arg); - - ++i; - } while (i < ARRAY_SIZE(hw_aux_lines)); - - /* TODO Generic I2C SW and HW */ -} - -struct i2caux *dal_i2caux_dce80_create( - struct dc_context *ctx) -{ - struct i2caux_dce80 *i2caux_dce80 = - kzalloc(sizeof(struct i2caux_dce80), GFP_KERNEL); - - if (!i2caux_dce80) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - construct(i2caux_dce80, ctx); - return &i2caux_dce80->base; -} diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c b/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c deleted file mode 100644 index a59c1f50c1e8..000000000000 --- a/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" - -#include "include/i2caux_interface.h" -#include "../i2caux.h" -#include "../engine.h" -#include "../i2c_engine.h" -#include "../i2c_sw_engine.h" -#include "../i2c_hw_engine.h" - -#include "../dce110/aux_engine_dce110.h" -#include "../dce110/i2c_hw_engine_dce110.h" -#include "../dce110/i2caux_dce110.h" - -#include "dcn/dcn_1_0_offset.h" -#include "dcn/dcn_1_0_sh_mask.h" -#include "soc15_hw_ip.h" -#include "vega10_ip_offset.h" - -/* begin ********************* - * macros to expend register list macro defined in HW object header file */ - -#define BASE_INNER(seg) \ - DCE_BASE__INST0_SEG ## seg - -/* compile time expand base address. */ -#define BASE(seg) \ - BASE_INNER(seg) - -#define SR(reg_name)\ - .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \ - mm ## reg_name - -#define SRI(reg_name, block, id)\ - .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \ - mm ## block ## id ## _ ## reg_name -/* macros to expend register list macro defined in HW object header file - * end *********************/ - -#define aux_regs(id)\ -[id] = {\ - AUX_COMMON_REG_LIST(id), \ - .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK \ -} - -#define hw_engine_regs(id)\ -{\ - I2C_HW_ENGINE_COMMON_REG_LIST(id) \ -} - -static const struct dce110_aux_registers dcn10_aux_regs[] = { - aux_regs(0), - aux_regs(1), - aux_regs(2), - aux_regs(3), - aux_regs(4), - aux_regs(5), -}; - -static const struct dce110_i2c_hw_engine_registers dcn10_hw_engine_regs[] = { - hw_engine_regs(1), - hw_engine_regs(2), - hw_engine_regs(3), - hw_engine_regs(4), - hw_engine_regs(5), - hw_engine_regs(6) -}; - -static const struct dce110_i2c_hw_engine_shift i2c_shift = { - I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT) -}; - -static const struct dce110_i2c_hw_engine_mask i2c_mask = { - I2C_COMMON_MASK_SH_LIST_DCE110(_MASK) -}; - -struct i2caux *dal_i2caux_dcn10_create( - struct dc_context *ctx) -{ - struct i2caux_dce110 *i2caux_dce110 = - kzalloc(sizeof(struct i2caux_dce110), GFP_KERNEL); - - if (!i2caux_dce110) { - ASSERT_CRITICAL(false); - return NULL; - } - - dal_i2caux_dce110_construct(i2caux_dce110, - ctx, - ARRAY_SIZE(dcn10_aux_regs), - dcn10_aux_regs, - dcn10_hw_engine_regs, - &i2c_shift, - &i2c_mask); - return &i2caux_dce110->base; -} diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.h b/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.h deleted file mode 100644 index aeb4a86463d4..000000000000 --- a/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DAL_I2C_AUX_DCN10_H__ -#define __DAL_I2C_AUX_DCN10_H__ - -struct i2caux *dal_i2caux_dcn10_create( - struct dc_context *ctx); - -#endif /* __DAL_I2C_AUX_DCN10_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.c b/drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.c deleted file mode 100644 index e6408f644086..000000000000 --- a/drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.c +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright 2012-16 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" - -/* - * Pre-requisites: headers required by header of this unit - */ -#include "include/i2caux_interface.h" -#include "../i2caux.h" -#include "../engine.h" -#include "../i2c_engine.h" -#include "../i2c_sw_engine.h" -#include "../i2c_hw_engine.h" - -/* - * Header of this unit - */ -#include "i2caux_diag.h" - -/* - * Post-requisites: headers required by this unit - */ - -/* - * This unit - */ - -static void destruct( - struct i2caux *i2caux) -{ - dal_i2caux_destruct(i2caux); -} - -static void destroy( - struct i2caux **i2c_engine) -{ - destruct(*i2c_engine); - - kfree(*i2c_engine); - - *i2c_engine = NULL; -} - -/* function table */ -static const struct i2caux_funcs i2caux_funcs = { - .destroy = destroy, - .acquire_i2c_hw_engine = NULL, - .release_engine = NULL, - .acquire_i2c_sw_engine = NULL, - .acquire_aux_engine = NULL, -}; - -static void construct( - struct i2caux *i2caux, - struct dc_context *ctx) -{ - dal_i2caux_construct(i2caux, ctx); - i2caux->funcs = &i2caux_funcs; -} - -struct i2caux *dal_i2caux_diag_fpga_create( - struct dc_context *ctx) -{ - struct i2caux *i2caux = kzalloc(sizeof(struct i2caux), - GFP_KERNEL); - - if (!i2caux) { - ASSERT_CRITICAL(false); - return NULL; - } - - construct(i2caux, ctx); - return i2caux; -} diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.h b/drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.h deleted file mode 100644 index a83eeb748283..000000000000 --- a/drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2012-16 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DAL_I2C_AUX_DIAG_FPGA_H__ -#define __DAL_I2C_AUX_DIAG_FPGA_H__ - -struct i2caux *dal_i2caux_diag_fpga_create( - struct dc_context *ctx); - -#endif /* __DAL_I2C_AUX_DIAG_FPGA_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/engine.h deleted file mode 100644 index b16fb1ff687d..000000000000 --- a/drivers/gpu/drm/amd/display/dc/i2caux/engine.h +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DAL_ENGINE_H__ -#define __DAL_ENGINE_H__ - -#include "dc_ddc_types.h" - -enum i2caux_transaction_operation { - I2CAUX_TRANSACTION_READ, - I2CAUX_TRANSACTION_WRITE -}; - -enum i2caux_transaction_address_space { - I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C = 1, - I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD -}; - -struct i2caux_transaction_payload { - enum i2caux_transaction_address_space address_space; - uint32_t address; - uint32_t length; - uint8_t *data; -}; - -enum i2caux_transaction_status { - I2CAUX_TRANSACTION_STATUS_UNKNOWN = (-1L), - I2CAUX_TRANSACTION_STATUS_SUCCEEDED, - I2CAUX_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY, - I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT, - I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR, - I2CAUX_TRANSACTION_STATUS_FAILED_NACK, - I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE, - I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION, - I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION, - I2CAUX_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW, - I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON -}; - -struct i2caux_transaction_request { - enum i2caux_transaction_operation operation; - struct i2caux_transaction_payload payload; - enum i2caux_transaction_status status; -}; - -enum i2caux_engine_type { - I2CAUX_ENGINE_TYPE_UNKNOWN = (-1L), - I2CAUX_ENGINE_TYPE_AUX, - I2CAUX_ENGINE_TYPE_I2C_DDC_HW, - I2CAUX_ENGINE_TYPE_I2C_GENERIC_HW, - I2CAUX_ENGINE_TYPE_I2C_SW -}; - -enum i2c_default_speed { - I2CAUX_DEFAULT_I2C_HW_SPEED = 50, - I2CAUX_DEFAULT_I2C_SW_SPEED = 50 -}; - -struct engine; - -struct engine_funcs { - enum i2caux_engine_type (*get_engine_type)( - const struct engine *engine); - bool (*acquire)( - struct engine *engine, - struct ddc *ddc); - bool (*submit_request)( - struct engine *engine, - struct i2caux_transaction_request *request, - bool middle_of_transaction); - void (*release_engine)( - struct engine *engine); -}; - -struct engine { - const struct engine_funcs *funcs; - uint32_t inst; - struct ddc *ddc; - struct dc_context *ctx; -}; - -void dal_i2caux_construct_engine( - struct engine *engine, - struct dc_context *ctx); - -void dal_i2caux_destruct_engine( - struct engine *engine); - -#endif diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.c deleted file mode 100644 index 70e20bd47ce4..000000000000 --- a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.c +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" - -/* - * Pre-requisites: headers required by header of this unit - */ -#include "include/i2caux_interface.h" -#include "engine.h" - -/* - * Header of this unit - */ - -#include "i2c_engine.h" - -/* - * Post-requisites: headers required by this unit - */ - -/* - * This unit - */ - -#define FROM_ENGINE(ptr) \ - container_of((ptr), struct i2c_engine, base) - -bool dal_i2c_engine_acquire( - struct engine *engine, - struct ddc *ddc_handle) -{ - struct i2c_engine *i2c_engine = FROM_ENGINE(engine); - - uint32_t counter = 0; - bool result; - - do { - result = i2c_engine->funcs->acquire_engine( - i2c_engine, ddc_handle); - - if (result) - break; - - /* i2c_engine is busy by VBios, lets wait and retry */ - - udelay(10); - - ++counter; - } while (counter < 2); - - if (result) { - if (!i2c_engine->funcs->setup_engine(i2c_engine)) { - engine->funcs->release_engine(engine); - result = false; - } - } - - return result; -} - -bool dal_i2c_engine_setup_i2c_engine( - struct i2c_engine *engine) -{ - /* Derivative classes do not have to override this */ - - return true; -} - -void dal_i2c_engine_submit_channel_request( - struct i2c_engine *engine, - struct i2c_request_transaction_data *request) -{ - -} - -void dal_i2c_engine_process_channel_reply( - struct i2c_engine *engine, - struct i2c_reply_transaction_data *reply) -{ - -} - -void dal_i2c_engine_construct( - struct i2c_engine *engine, - struct dc_context *ctx) -{ - dal_i2caux_construct_engine(&engine->base, ctx); - engine->timeout_delay = 0; -} - -void dal_i2c_engine_destruct( - struct i2c_engine *engine) -{ - dal_i2caux_destruct_engine(&engine->base); -} diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h deleted file mode 100644 index ded6ea34b714..000000000000 --- a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DAL_I2C_ENGINE_H__ -#define __DAL_I2C_ENGINE_H__ - -enum i2c_channel_operation_result { - I2C_CHANNEL_OPERATION_SUCCEEDED, - I2C_CHANNEL_OPERATION_FAILED, - I2C_CHANNEL_OPERATION_NOT_GRANTED, - I2C_CHANNEL_OPERATION_IS_BUSY, - I2C_CHANNEL_OPERATION_NO_HANDLE_PROVIDED, - I2C_CHANNEL_OPERATION_CHANNEL_IN_USE, - I2C_CHANNEL_OPERATION_CHANNEL_CLIENT_MAX_ALLOWED, - I2C_CHANNEL_OPERATION_ENGINE_BUSY, - I2C_CHANNEL_OPERATION_TIMEOUT, - I2C_CHANNEL_OPERATION_NO_RESPONSE, - I2C_CHANNEL_OPERATION_HW_REQUEST_I2C_BUS, - I2C_CHANNEL_OPERATION_WRONG_PARAMETER, - I2C_CHANNEL_OPERATION_OUT_NB_OF_RETRIES, - I2C_CHANNEL_OPERATION_NOT_STARTED -}; - -struct i2c_request_transaction_data { - enum i2caux_transaction_action action; - enum i2c_channel_operation_result status; - uint8_t address; - uint32_t length; - uint8_t *data; -}; - -struct i2c_reply_transaction_data { - uint32_t length; - uint8_t *data; -}; - -struct i2c_engine; - -struct i2c_engine_funcs { - void (*destroy)( - struct i2c_engine **ptr); - uint32_t (*get_speed)( - const struct i2c_engine *engine); - void (*set_speed)( - struct i2c_engine *engine, - uint32_t speed); - bool (*acquire_engine)( - struct i2c_engine *engine, - struct ddc *ddc); - bool (*setup_engine)( - struct i2c_engine *engine); - void (*submit_channel_request)( - struct i2c_engine *engine, - struct i2c_request_transaction_data *request); - void (*process_channel_reply)( - struct i2c_engine *engine, - struct i2c_reply_transaction_data *reply); - enum i2c_channel_operation_result (*get_channel_status)( - struct i2c_engine *engine, - uint8_t *returned_bytes); -}; - -struct i2c_engine { - struct engine base; - const struct i2c_engine_funcs *funcs; - uint32_t timeout_delay; - uint32_t setup_limit; - uint32_t send_reset_length; -}; - -void dal_i2c_engine_construct( - struct i2c_engine *engine, - struct dc_context *ctx); - -void dal_i2c_engine_destruct( - struct i2c_engine *engine); - -bool dal_i2c_engine_setup_i2c_engine( - struct i2c_engine *engine); - -void dal_i2c_engine_submit_channel_request( - struct i2c_engine *engine, - struct i2c_request_transaction_data *request); - -void dal_i2c_engine_process_channel_reply( - struct i2c_engine *engine, - struct i2c_reply_transaction_data *reply); - -bool dal_i2c_engine_acquire( - struct engine *ptr, - struct ddc *ddc_handle); - -#endif diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.c deleted file mode 100644 index 5a4295e0fae5..000000000000 --- a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.c +++ /dev/null @@ -1,284 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" - -/* - * Pre-requisites: headers required by header of this unit - */ -#include "include/i2caux_interface.h" -#include "engine.h" -#include "i2c_engine.h" -#include "i2c_hw_engine.h" - -/* - * Header of this unit - */ - -#include "i2c_generic_hw_engine.h" - -/* - * Post-requisites: headers required by this unit - */ - -/* - * This unit - */ - -/* - * @brief - * Cast 'struct i2c_hw_engine *' - * to 'struct i2c_generic_hw_engine *' - */ -#define FROM_I2C_HW_ENGINE(ptr) \ - container_of((ptr), struct i2c_generic_hw_engine, base) - -/* - * @brief - * Cast 'struct i2c_engine *' - * to 'struct i2c_generic_hw_engine *' - */ -#define FROM_I2C_ENGINE(ptr) \ - FROM_I2C_HW_ENGINE(container_of((ptr), struct i2c_hw_engine, base)) - -/* - * @brief - * Cast 'struct engine *' - * to 'struct i2c_generic_hw_engine *' - */ -#define FROM_ENGINE(ptr) \ - FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base)) - -enum i2caux_engine_type dal_i2c_generic_hw_engine_get_engine_type( - const struct engine *engine) -{ - return I2CAUX_ENGINE_TYPE_I2C_GENERIC_HW; -} - -/* - * @brief - * Single transaction handling. - * Since transaction may be bigger than HW buffer size, - * it divides transaction to sub-transactions - * and uses batch transaction feature of the engine. - */ -bool dal_i2c_generic_hw_engine_submit_request( - struct engine *engine, - struct i2caux_transaction_request *i2caux_request, - bool middle_of_transaction) -{ - struct i2c_generic_hw_engine *hw_engine = FROM_ENGINE(engine); - - struct i2c_hw_engine *base = &hw_engine->base; - - uint32_t max_payload_size = - base->funcs->get_hw_buffer_available_size(base); - - bool initial_stop_bit = !middle_of_transaction; - - struct i2c_generic_transaction_attributes attributes; - - enum i2c_channel_operation_result operation_result = - I2C_CHANNEL_OPERATION_FAILED; - - bool result = false; - - /* setup transaction initial properties */ - - uint8_t address = i2caux_request->payload.address; - uint8_t *current_payload = i2caux_request->payload.data; - uint32_t remaining_payload_size = i2caux_request->payload.length; - - bool first_iteration = true; - - if (i2caux_request->operation == I2CAUX_TRANSACTION_READ) - attributes.action = I2CAUX_TRANSACTION_ACTION_I2C_READ; - else if (i2caux_request->operation == I2CAUX_TRANSACTION_WRITE) - attributes.action = I2CAUX_TRANSACTION_ACTION_I2C_WRITE; - else { - i2caux_request->status = - I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION; - return false; - } - - /* Do batch transaction. - * Divide read/write data into payloads which fit HW buffer size. - * 1. Single transaction: - * start_bit = 1, stop_bit depends on session state, ack_on_read = 0; - * 2. Start of batch transaction: - * start_bit = 1, stop_bit = 0, ack_on_read = 1; - * 3. Middle of batch transaction: - * start_bit = 0, stop_bit = 0, ack_on_read = 1; - * 4. End of batch transaction: - * start_bit = 0, stop_bit depends on session state, ack_on_read = 0. - * Session stop bit is set if 'middle_of_transaction' = 0. */ - - while (remaining_payload_size) { - uint32_t current_transaction_size; - uint32_t current_payload_size; - - bool last_iteration; - bool stop_bit; - - /* Calculate current transaction size and payload size. - * Transaction size = total number of bytes in transaction, - * including slave's address; - * Payload size = number of data bytes in transaction. */ - - if (first_iteration) { - /* In the first sub-transaction we send slave's address - * thus we need to reserve one byte for it */ - current_transaction_size = - (remaining_payload_size > max_payload_size - 1) ? - max_payload_size : - remaining_payload_size + 1; - - current_payload_size = current_transaction_size - 1; - } else { - /* Second and further sub-transactions will have - * entire buffer reserved for data */ - current_transaction_size = - (remaining_payload_size > max_payload_size) ? - max_payload_size : - remaining_payload_size; - - current_payload_size = current_transaction_size; - } - - last_iteration = - (remaining_payload_size == current_payload_size); - - stop_bit = last_iteration ? initial_stop_bit : false; - - /* write slave device address */ - - if (first_iteration) - hw_engine->funcs->write_address(hw_engine, address); - - /* write current portion of data, if requested */ - - if (i2caux_request->operation == I2CAUX_TRANSACTION_WRITE) - hw_engine->funcs->write_data( - hw_engine, - current_payload, - current_payload_size); - - /* execute transaction */ - - attributes.start_bit = first_iteration; - attributes.stop_bit = stop_bit; - attributes.last_read = last_iteration; - attributes.transaction_size = current_transaction_size; - - hw_engine->funcs->execute_transaction(hw_engine, &attributes); - - /* wait until transaction is processed; if it fails - quit */ - - operation_result = base->funcs->wait_on_operation_result( - base, - base->funcs->get_transaction_timeout( - base, current_transaction_size), - I2C_CHANNEL_OPERATION_ENGINE_BUSY); - - if (operation_result != I2C_CHANNEL_OPERATION_SUCCEEDED) - break; - - /* read current portion of data, if requested */ - - /* the read offset should be 1 for first sub-transaction, - * and 0 for any next one */ - - if (i2caux_request->operation == I2CAUX_TRANSACTION_READ) - hw_engine->funcs->read_data(hw_engine, current_payload, - current_payload_size, first_iteration ? 1 : 0); - - /* update loop variables */ - - first_iteration = false; - current_payload += current_payload_size; - remaining_payload_size -= current_payload_size; - } - - /* update transaction status */ - - switch (operation_result) { - case I2C_CHANNEL_OPERATION_SUCCEEDED: - i2caux_request->status = - I2CAUX_TRANSACTION_STATUS_SUCCEEDED; - result = true; - break; - case I2C_CHANNEL_OPERATION_NO_RESPONSE: - i2caux_request->status = - I2CAUX_TRANSACTION_STATUS_FAILED_NACK; - break; - case I2C_CHANNEL_OPERATION_TIMEOUT: - i2caux_request->status = - I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT; - break; - case I2C_CHANNEL_OPERATION_FAILED: - i2caux_request->status = - I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE; - break; - default: - i2caux_request->status = - I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION; - } - - return result; -} - -/* - * @brief - * Returns number of microseconds to wait until timeout to be considered - */ -uint32_t dal_i2c_generic_hw_engine_get_transaction_timeout( - const struct i2c_hw_engine *engine, - uint32_t length) -{ - const struct i2c_engine *base = &engine->base; - - uint32_t speed = base->funcs->get_speed(base); - - if (!speed) - return 0; - - /* total timeout = period_timeout * (start + data bits count + stop) */ - - return ((1000 * TRANSACTION_TIMEOUT_IN_I2C_CLOCKS) / speed) * - (1 + (length << 3) + 1); -} - -void dal_i2c_generic_hw_engine_construct( - struct i2c_generic_hw_engine *engine, - struct dc_context *ctx) -{ - dal_i2c_hw_engine_construct(&engine->base, ctx); -} - -void dal_i2c_generic_hw_engine_destruct( - struct i2c_generic_hw_engine *engine) -{ - dal_i2c_hw_engine_destruct(&engine->base); -} diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.h deleted file mode 100644 index 1da0397b04a2..000000000000 --- a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.h +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DAL_I2C_GENERIC_HW_ENGINE_H__ -#define __DAL_I2C_GENERIC_HW_ENGINE_H__ - -struct i2c_generic_transaction_attributes { - enum i2caux_transaction_action action; - uint32_t transaction_size; - bool start_bit; - bool stop_bit; - bool last_read; -}; - -struct i2c_generic_hw_engine; - -struct i2c_generic_hw_engine_funcs { - void (*write_address)( - struct i2c_generic_hw_engine *engine, - uint8_t address); - void (*write_data)( - struct i2c_generic_hw_engine *engine, - const uint8_t *buffer, - uint32_t length); - void (*read_data)( - struct i2c_generic_hw_engine *engine, - uint8_t *buffer, - uint32_t length, - uint32_t offset); - void (*execute_transaction)( - struct i2c_generic_hw_engine *engine, - struct i2c_generic_transaction_attributes *attributes); -}; - -struct i2c_generic_hw_engine { - struct i2c_hw_engine base; - const struct i2c_generic_hw_engine_funcs *funcs; -}; - -void dal_i2c_generic_hw_engine_construct( - struct i2c_generic_hw_engine *engine, - struct dc_context *ctx); - -void dal_i2c_generic_hw_engine_destruct( - struct i2c_generic_hw_engine *engine); -enum i2caux_engine_type dal_i2c_generic_hw_engine_get_engine_type( - const struct engine *engine); -bool dal_i2c_generic_hw_engine_submit_request( - struct engine *ptr, - struct i2caux_transaction_request *i2caux_request, - bool middle_of_transaction); -uint32_t dal_i2c_generic_hw_engine_get_transaction_timeout( - const struct i2c_hw_engine *engine, - uint32_t length); -#endif diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c deleted file mode 100644 index 141898533e8e..000000000000 --- a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c +++ /dev/null @@ -1,251 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" -#include "dm_event_log.h" - -/* - * Pre-requisites: headers required by header of this unit - */ -#include "include/i2caux_interface.h" -#include "engine.h" -#include "i2c_engine.h" - -/* - * Header of this unit - */ - -#include "i2c_hw_engine.h" - -/* - * Post-requisites: headers required by this unit - */ - -/* - * This unit - */ - -/* - * @brief - * Cast 'struct i2c_engine *' - * to 'struct i2c_hw_engine *' - */ -#define FROM_I2C_ENGINE(ptr) \ - container_of((ptr), struct i2c_hw_engine, base) - -/* - * @brief - * Cast 'struct engine *' - * to 'struct i2c_hw_engine *' - */ -#define FROM_ENGINE(ptr) \ - FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base)) - -enum i2caux_engine_type dal_i2c_hw_engine_get_engine_type( - const struct engine *engine) -{ - return I2CAUX_ENGINE_TYPE_I2C_DDC_HW; -} - -bool dal_i2c_hw_engine_submit_request( - struct engine *engine, - struct i2caux_transaction_request *i2caux_request, - bool middle_of_transaction) -{ - struct i2c_hw_engine *hw_engine = FROM_ENGINE(engine); - - struct i2c_request_transaction_data request; - - uint32_t transaction_timeout; - - enum i2c_channel_operation_result operation_result; - - bool result = false; - - /* We need following: - * transaction length will not exceed - * the number of free bytes in HW buffer (minus one for address)*/ - - if (i2caux_request->payload.length >= - hw_engine->funcs->get_hw_buffer_available_size(hw_engine)) { - i2caux_request->status = - I2CAUX_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW; - return false; - } - - if (i2caux_request->operation == I2CAUX_TRANSACTION_READ) - request.action = middle_of_transaction ? - I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT : - I2CAUX_TRANSACTION_ACTION_I2C_READ; - else if (i2caux_request->operation == I2CAUX_TRANSACTION_WRITE) - request.action = middle_of_transaction ? - I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT : - I2CAUX_TRANSACTION_ACTION_I2C_WRITE; - else { - i2caux_request->status = - I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION; - /* [anaumov] in DAL2, there was no "return false" */ - return false; - } - - request.address = (uint8_t)i2caux_request->payload.address; - request.length = i2caux_request->payload.length; - request.data = i2caux_request->payload.data; - - /* obtain timeout value before submitting request */ - - transaction_timeout = hw_engine->funcs->get_transaction_timeout( - hw_engine, i2caux_request->payload.length + 1); - - hw_engine->base.funcs->submit_channel_request( - &hw_engine->base, &request); - /* EVENT_LOG_AUX_REQ(engine->ddc->pin_data->en, EVENT_LOG_AUX_ORIGIN_I2C, */ - /* request.action, request.address, request.length, request.data); */ - - if ((request.status == I2C_CHANNEL_OPERATION_FAILED) || - (request.status == I2C_CHANNEL_OPERATION_ENGINE_BUSY)) { - i2caux_request->status = - I2CAUX_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY; - return false; - } - - /* wait until transaction proceed */ - - operation_result = hw_engine->funcs->wait_on_operation_result( - hw_engine, - transaction_timeout, - I2C_CHANNEL_OPERATION_ENGINE_BUSY); - - /* update transaction status */ - - switch (operation_result) { - case I2C_CHANNEL_OPERATION_SUCCEEDED: - i2caux_request->status = - I2CAUX_TRANSACTION_STATUS_SUCCEEDED; - result = true; - break; - case I2C_CHANNEL_OPERATION_NO_RESPONSE: - i2caux_request->status = - I2CAUX_TRANSACTION_STATUS_FAILED_NACK; - break; - case I2C_CHANNEL_OPERATION_TIMEOUT: - i2caux_request->status = - I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT; - break; - case I2C_CHANNEL_OPERATION_FAILED: - i2caux_request->status = - I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE; - break; - default: - i2caux_request->status = - I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION; - } - - if (result && (i2caux_request->operation == I2CAUX_TRANSACTION_READ)) { - struct i2c_reply_transaction_data reply; - - reply.data = i2caux_request->payload.data; - reply.length = i2caux_request->payload.length; - - hw_engine->base.funcs-> - process_channel_reply(&hw_engine->base, &reply); - /* EVENT_LOG_AUX_REP(engine->ddc->pin_data->en, EVENT_LOG_AUX_ORIGIN_I2C, */ - /* AUX_TRANSACTION_REPLY_I2C_ACK, reply.length, reply.data); */ - } - - - - return result; -} - -bool dal_i2c_hw_engine_acquire_engine( - struct i2c_engine *engine, - struct ddc *ddc) -{ - enum gpio_result result; - uint32_t current_speed; - - result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE, - GPIO_DDC_CONFIG_TYPE_MODE_I2C); - - if (result != GPIO_RESULT_OK) - return false; - - engine->base.ddc = ddc; - - current_speed = engine->funcs->get_speed(engine); - - if (current_speed) - FROM_I2C_ENGINE(engine)->original_speed = current_speed; - - return true; -} -/* - * @brief - * Queries in a loop for current engine status - * until retrieved status matches 'expected_result', or timeout occurs. - * Timeout given in microseconds - * and the status query frequency is also one per microsecond. - */ -enum i2c_channel_operation_result dal_i2c_hw_engine_wait_on_operation_result( - struct i2c_hw_engine *engine, - uint32_t timeout, - enum i2c_channel_operation_result expected_result) -{ - enum i2c_channel_operation_result result; - uint32_t i = 0; - - if (!timeout) - return I2C_CHANNEL_OPERATION_SUCCEEDED; - - do { - result = engine->base.funcs->get_channel_status( - &engine->base, NULL); - - if (result != expected_result) - break; - - udelay(1); - - ++i; - } while (i < timeout); - - return result; -} - -void dal_i2c_hw_engine_construct( - struct i2c_hw_engine *engine, - struct dc_context *ctx) -{ - dal_i2c_engine_construct(&engine->base, ctx); - engine->original_speed = I2CAUX_DEFAULT_I2C_HW_SPEED; - engine->default_speed = I2CAUX_DEFAULT_I2C_HW_SPEED; -} - -void dal_i2c_hw_engine_destruct( - struct i2c_hw_engine *engine) -{ - dal_i2c_engine_destruct(&engine->base); -} diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.h deleted file mode 100644 index 8936a994804a..000000000000 --- a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.h +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DAL_I2C_HW_ENGINE_H__ -#define __DAL_I2C_HW_ENGINE_H__ - -enum { - TRANSACTION_TIMEOUT_IN_I2C_CLOCKS = 32 -}; - -struct i2c_hw_engine; - -struct i2c_hw_engine_funcs { - uint32_t (*get_hw_buffer_available_size)( - const struct i2c_hw_engine *engine); - enum i2c_channel_operation_result (*wait_on_operation_result)( - struct i2c_hw_engine *engine, - uint32_t timeout, - enum i2c_channel_operation_result expected_result); - uint32_t (*get_transaction_timeout)( - const struct i2c_hw_engine *engine, - uint32_t length); -}; - -struct i2c_hw_engine { - struct i2c_engine base; - const struct i2c_hw_engine_funcs *funcs; - - /* Values below are in kilohertz */ - uint32_t original_speed; - uint32_t default_speed; -}; - -void dal_i2c_hw_engine_construct( - struct i2c_hw_engine *engine, - struct dc_context *ctx); - -void dal_i2c_hw_engine_destruct( - struct i2c_hw_engine *engine); - -enum i2c_channel_operation_result dal_i2c_hw_engine_wait_on_operation_result( - struct i2c_hw_engine *engine, - uint32_t timeout, - enum i2c_channel_operation_result expected_result); - -bool dal_i2c_hw_engine_acquire_engine( - struct i2c_engine *engine, - struct ddc *ddc); - -bool dal_i2c_hw_engine_submit_request( - struct engine *ptr, - struct i2caux_transaction_request *i2caux_request, - bool middle_of_transaction); - -enum i2caux_engine_type dal_i2c_hw_engine_get_engine_type( - const struct engine *engine); - -#endif diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.c deleted file mode 100644 index 8e19bb629394..000000000000 --- a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.c +++ /dev/null @@ -1,601 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" - -/* - * Pre-requisites: headers required by header of this unit - */ -#include "include/i2caux_interface.h" -#include "engine.h" -#include "i2c_engine.h" - -/* - * Header of this unit - */ - -#include "i2c_sw_engine.h" - -/* - * Post-requisites: headers required by this unit - */ - -/* - * This unit - */ - -#define SCL false -#define SDA true - -static inline bool read_bit_from_ddc( - struct ddc *ddc, - bool data_nor_clock) -{ - uint32_t value = 0; - - if (data_nor_clock) - dal_gpio_get_value(ddc->pin_data, &value); - else - dal_gpio_get_value(ddc->pin_clock, &value); - - return (value != 0); -} - -static inline void write_bit_to_ddc( - struct ddc *ddc, - bool data_nor_clock, - bool bit) -{ - uint32_t value = bit ? 1 : 0; - - if (data_nor_clock) - dal_gpio_set_value(ddc->pin_data, value); - else - dal_gpio_set_value(ddc->pin_clock, value); -} - -static bool wait_for_scl_high( - struct dc_context *ctx, - struct ddc *ddc, - uint16_t clock_delay_div_4) -{ - uint32_t scl_retry = 0; - uint32_t scl_retry_max = I2C_SW_TIMEOUT_DELAY / clock_delay_div_4; - - udelay(clock_delay_div_4); - - /* 3 milliseconds delay - * to wake up some displays from "low power" state. - */ - - do { - if (read_bit_from_ddc(ddc, SCL)) - return true; - - udelay(clock_delay_div_4); - - ++scl_retry; - } while (scl_retry <= scl_retry_max); - - return false; -} - -static bool start_sync( - struct dc_context *ctx, - struct ddc *ddc_handle, - uint16_t clock_delay_div_4) -{ - uint32_t retry = 0; - - /* The I2C communications start signal is: - * the SDA going low from high, while the SCL is high. */ - - write_bit_to_ddc(ddc_handle, SCL, true); - - udelay(clock_delay_div_4); - - do { - write_bit_to_ddc(ddc_handle, SDA, true); - - if (!read_bit_from_ddc(ddc_handle, SDA)) { - ++retry; - continue; - } - - udelay(clock_delay_div_4); - - write_bit_to_ddc(ddc_handle, SCL, true); - - if (!wait_for_scl_high(ctx, ddc_handle, clock_delay_div_4)) - break; - - write_bit_to_ddc(ddc_handle, SDA, false); - - udelay(clock_delay_div_4); - - write_bit_to_ddc(ddc_handle, SCL, false); - - udelay(clock_delay_div_4); - - return true; - } while (retry <= I2C_SW_RETRIES); - - return false; -} - -static bool stop_sync( - struct dc_context *ctx, - struct ddc *ddc_handle, - uint16_t clock_delay_div_4) -{ - uint32_t retry = 0; - - /* The I2C communications stop signal is: - * the SDA going high from low, while the SCL is high. */ - - write_bit_to_ddc(ddc_handle, SCL, false); - - udelay(clock_delay_div_4); - - write_bit_to_ddc(ddc_handle, SDA, false); - - udelay(clock_delay_div_4); - - write_bit_to_ddc(ddc_handle, SCL, true); - - if (!wait_for_scl_high(ctx, ddc_handle, clock_delay_div_4)) - return false; - - write_bit_to_ddc(ddc_handle, SDA, true); - - do { - udelay(clock_delay_div_4); - - if (read_bit_from_ddc(ddc_handle, SDA)) - return true; - - ++retry; - } while (retry <= 2); - - return false; -} - -static bool write_byte( - struct dc_context *ctx, - struct ddc *ddc_handle, - uint16_t clock_delay_div_4, - uint8_t byte) -{ - int32_t shift = 7; - bool ack; - - /* bits are transmitted serially, starting from MSB */ - - do { - udelay(clock_delay_div_4); - - write_bit_to_ddc(ddc_handle, SDA, (byte >> shift) & 1); - - udelay(clock_delay_div_4); - - write_bit_to_ddc(ddc_handle, SCL, true); - - if (!wait_for_scl_high(ctx, ddc_handle, clock_delay_div_4)) - return false; - - write_bit_to_ddc(ddc_handle, SCL, false); - - --shift; - } while (shift >= 0); - - /* The display sends ACK by preventing the SDA from going high - * after the SCL pulse we use to send our last data bit. - * If the SDA goes high after that bit, it's a NACK */ - - udelay(clock_delay_div_4); - - write_bit_to_ddc(ddc_handle, SDA, true); - - udelay(clock_delay_div_4); - - write_bit_to_ddc(ddc_handle, SCL, true); - - if (!wait_for_scl_high(ctx, ddc_handle, clock_delay_div_4)) - return false; - - /* read ACK bit */ - - ack = !read_bit_from_ddc(ddc_handle, SDA); - - udelay(clock_delay_div_4 << 1); - - write_bit_to_ddc(ddc_handle, SCL, false); - - udelay(clock_delay_div_4 << 1); - - return ack; -} - -static bool read_byte( - struct dc_context *ctx, - struct ddc *ddc_handle, - uint16_t clock_delay_div_4, - uint8_t *byte, - bool more) -{ - int32_t shift = 7; - - uint8_t data = 0; - - /* The data bits are read from MSB to LSB; - * bit is read while SCL is high */ - - do { - write_bit_to_ddc(ddc_handle, SCL, true); - - if (!wait_for_scl_high(ctx, ddc_handle, clock_delay_div_4)) - return false; - - if (read_bit_from_ddc(ddc_handle, SDA)) - data |= (1 << shift); - - write_bit_to_ddc(ddc_handle, SCL, false); - - udelay(clock_delay_div_4 << 1); - - --shift; - } while (shift >= 0); - - /* read only whole byte */ - - *byte = data; - - udelay(clock_delay_div_4); - - /* send the acknowledge bit: - * SDA low means ACK, SDA high means NACK */ - - write_bit_to_ddc(ddc_handle, SDA, !more); - - udelay(clock_delay_div_4); - - write_bit_to_ddc(ddc_handle, SCL, true); - - if (!wait_for_scl_high(ctx, ddc_handle, clock_delay_div_4)) - return false; - - write_bit_to_ddc(ddc_handle, SCL, false); - - udelay(clock_delay_div_4); - - write_bit_to_ddc(ddc_handle, SDA, true); - - udelay(clock_delay_div_4); - - return true; -} - -static bool i2c_write( - struct dc_context *ctx, - struct ddc *ddc_handle, - uint16_t clock_delay_div_4, - uint8_t address, - uint32_t length, - const uint8_t *data) -{ - uint32_t i = 0; - - if (!write_byte(ctx, ddc_handle, clock_delay_div_4, address)) - return false; - - while (i < length) { - if (!write_byte(ctx, ddc_handle, clock_delay_div_4, data[i])) - return false; - ++i; - } - - return true; -} - -static bool i2c_read( - struct dc_context *ctx, - struct ddc *ddc_handle, - uint16_t clock_delay_div_4, - uint8_t address, - uint32_t length, - uint8_t *data) -{ - uint32_t i = 0; - - if (!write_byte(ctx, ddc_handle, clock_delay_div_4, address)) - return false; - - while (i < length) { - if (!read_byte(ctx, ddc_handle, clock_delay_div_4, data + i, - i < length - 1)) - return false; - ++i; - } - - return true; -} - -/* - * @brief - * Cast 'struct i2c_engine *' - * to 'struct i2c_sw_engine *' - */ -#define FROM_I2C_ENGINE(ptr) \ - container_of((ptr), struct i2c_sw_engine, base) - -/* - * @brief - * Cast 'struct engine *' - * to 'struct i2c_sw_engine *' - */ -#define FROM_ENGINE(ptr) \ - FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base)) - -enum i2caux_engine_type dal_i2c_sw_engine_get_engine_type( - const struct engine *engine) -{ - return I2CAUX_ENGINE_TYPE_I2C_SW; -} - -bool dal_i2c_sw_engine_submit_request( - struct engine *engine, - struct i2caux_transaction_request *i2caux_request, - bool middle_of_transaction) -{ - struct i2c_sw_engine *sw_engine = FROM_ENGINE(engine); - - struct i2c_engine *base = &sw_engine->base; - - struct i2c_request_transaction_data request; - bool operation_succeeded = false; - - if (i2caux_request->operation == I2CAUX_TRANSACTION_READ) - request.action = middle_of_transaction ? - I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT : - I2CAUX_TRANSACTION_ACTION_I2C_READ; - else if (i2caux_request->operation == I2CAUX_TRANSACTION_WRITE) - request.action = middle_of_transaction ? - I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT : - I2CAUX_TRANSACTION_ACTION_I2C_WRITE; - else { - i2caux_request->status = - I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION; - /* in DAL2, there was no "return false" */ - return false; - } - - request.address = (uint8_t)i2caux_request->payload.address; - request.length = i2caux_request->payload.length; - request.data = i2caux_request->payload.data; - - base->funcs->submit_channel_request(base, &request); - - if ((request.status == I2C_CHANNEL_OPERATION_ENGINE_BUSY) || - (request.status == I2C_CHANNEL_OPERATION_FAILED)) - i2caux_request->status = - I2CAUX_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY; - else { - enum i2c_channel_operation_result operation_result; - - do { - operation_result = - base->funcs->get_channel_status(base, NULL); - - switch (operation_result) { - case I2C_CHANNEL_OPERATION_SUCCEEDED: - i2caux_request->status = - I2CAUX_TRANSACTION_STATUS_SUCCEEDED; - operation_succeeded = true; - break; - case I2C_CHANNEL_OPERATION_NO_RESPONSE: - i2caux_request->status = - I2CAUX_TRANSACTION_STATUS_FAILED_NACK; - break; - case I2C_CHANNEL_OPERATION_TIMEOUT: - i2caux_request->status = - I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT; - break; - case I2C_CHANNEL_OPERATION_FAILED: - i2caux_request->status = - I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE; - break; - default: - i2caux_request->status = - I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION; - break; - } - } while (operation_result == I2C_CHANNEL_OPERATION_ENGINE_BUSY); - } - - return operation_succeeded; -} - -uint32_t dal_i2c_sw_engine_get_speed( - const struct i2c_engine *engine) -{ - return FROM_I2C_ENGINE(engine)->speed; -} - -void dal_i2c_sw_engine_set_speed( - struct i2c_engine *engine, - uint32_t speed) -{ - struct i2c_sw_engine *sw_engine = FROM_I2C_ENGINE(engine); - - ASSERT(speed); - - sw_engine->speed = speed ? speed : I2CAUX_DEFAULT_I2C_SW_SPEED; - - sw_engine->clock_delay = 1000 / sw_engine->speed; - - if (sw_engine->clock_delay < 12) - sw_engine->clock_delay = 12; -} - -bool dal_i2caux_i2c_sw_engine_acquire_engine( - struct i2c_engine *engine, - struct ddc *ddc) -{ - enum gpio_result result; - - result = dal_ddc_open(ddc, GPIO_MODE_FAST_OUTPUT, - GPIO_DDC_CONFIG_TYPE_MODE_I2C); - - if (result != GPIO_RESULT_OK) - return false; - - engine->base.ddc = ddc; - - return true; -} - -void dal_i2c_sw_engine_submit_channel_request( - struct i2c_engine *engine, - struct i2c_request_transaction_data *req) -{ - struct i2c_sw_engine *sw_engine = FROM_I2C_ENGINE(engine); - - struct ddc *ddc = engine->base.ddc; - uint16_t clock_delay_div_4 = sw_engine->clock_delay >> 2; - - /* send sync (start / repeated start) */ - - bool result = start_sync(engine->base.ctx, ddc, clock_delay_div_4); - - /* process payload */ - - if (result) { - switch (req->action) { - case I2CAUX_TRANSACTION_ACTION_I2C_WRITE: - case I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT: - result = i2c_write(engine->base.ctx, ddc, clock_delay_div_4, - req->address, req->length, req->data); - break; - case I2CAUX_TRANSACTION_ACTION_I2C_READ: - case I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT: - result = i2c_read(engine->base.ctx, ddc, clock_delay_div_4, - req->address, req->length, req->data); - break; - default: - result = false; - break; - } - } - - /* send stop if not 'mot' or operation failed */ - - if (!result || - (req->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) || - (req->action == I2CAUX_TRANSACTION_ACTION_I2C_READ)) - if (!stop_sync(engine->base.ctx, ddc, clock_delay_div_4)) - result = false; - - req->status = result ? - I2C_CHANNEL_OPERATION_SUCCEEDED : - I2C_CHANNEL_OPERATION_FAILED; -} - -enum i2c_channel_operation_result dal_i2c_sw_engine_get_channel_status( - struct i2c_engine *engine, - uint8_t *returned_bytes) -{ - /* No arbitration with VBIOS is performed since DCE 6.0 */ - return I2C_CHANNEL_OPERATION_SUCCEEDED; -} - -void dal_i2c_sw_engine_destruct( - struct i2c_sw_engine *engine) -{ - dal_i2c_engine_destruct(&engine->base); -} - -static void destroy( - struct i2c_engine **ptr) -{ - dal_i2c_sw_engine_destruct(FROM_I2C_ENGINE(*ptr)); - - kfree(*ptr); - *ptr = NULL; -} - -static const struct i2c_engine_funcs i2c_engine_funcs = { - .acquire_engine = dal_i2caux_i2c_sw_engine_acquire_engine, - .destroy = destroy, - .get_speed = dal_i2c_sw_engine_get_speed, - .set_speed = dal_i2c_sw_engine_set_speed, - .setup_engine = dal_i2c_engine_setup_i2c_engine, - .submit_channel_request = dal_i2c_sw_engine_submit_channel_request, - .process_channel_reply = dal_i2c_engine_process_channel_reply, - .get_channel_status = dal_i2c_sw_engine_get_channel_status, -}; - -static void release_engine( - struct engine *engine) -{ - -} - -static const struct engine_funcs engine_funcs = { - .release_engine = release_engine, - .get_engine_type = dal_i2c_sw_engine_get_engine_type, - .acquire = dal_i2c_engine_acquire, - .submit_request = dal_i2c_sw_engine_submit_request, -}; - -void dal_i2c_sw_engine_construct( - struct i2c_sw_engine *engine, - const struct i2c_sw_engine_create_arg *arg) -{ - dal_i2c_engine_construct(&engine->base, arg->ctx); - dal_i2c_sw_engine_set_speed(&engine->base, arg->default_speed); - engine->base.funcs = &i2c_engine_funcs; - engine->base.base.funcs = &engine_funcs; -} - -struct i2c_engine *dal_i2c_sw_engine_create( - const struct i2c_sw_engine_create_arg *arg) -{ - struct i2c_sw_engine *engine; - - if (!arg) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - engine = kzalloc(sizeof(struct i2c_sw_engine), GFP_KERNEL); - - if (!engine) { - BREAK_TO_DEBUGGER(); - return NULL; - } - - dal_i2c_sw_engine_construct(engine, arg); - return &engine->base; -} diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.h deleted file mode 100644 index 546f15b0d3f1..000000000000 --- a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DAL_I2C_SW_ENGINE_H__ -#define __DAL_I2C_SW_ENGINE_H__ - -enum { - I2C_SW_RETRIES = 10, - I2C_SW_SCL_READ_RETRIES = 128, - /* following value is in microseconds */ - I2C_SW_TIMEOUT_DELAY = 3000 -}; - -struct i2c_sw_engine; - -struct i2c_sw_engine { - struct i2c_engine base; - uint32_t clock_delay; - /* Values below are in KHz */ - uint32_t speed; - uint32_t default_speed; -}; - -struct i2c_sw_engine_create_arg { - uint32_t default_speed; - struct dc_context *ctx; -}; - -void dal_i2c_sw_engine_construct( - struct i2c_sw_engine *engine, - const struct i2c_sw_engine_create_arg *arg); - -bool dal_i2caux_i2c_sw_engine_acquire_engine( - struct i2c_engine *engine, - struct ddc *ddc_handle); - -void dal_i2c_sw_engine_destruct( - struct i2c_sw_engine *engine); - -struct i2c_engine *dal_i2c_sw_engine_create( - const struct i2c_sw_engine_create_arg *arg); -enum i2caux_engine_type dal_i2c_sw_engine_get_engine_type( - const struct engine *engine); -bool dal_i2c_sw_engine_submit_request( - struct engine *ptr, - struct i2caux_transaction_request *i2caux_request, - bool middle_of_transaction); -uint32_t dal_i2c_sw_engine_get_speed( - const struct i2c_engine *engine); -void dal_i2c_sw_engine_set_speed( - struct i2c_engine *ptr, - uint32_t speed); -void dal_i2c_sw_engine_submit_channel_request( - struct i2c_engine *ptr, - struct i2c_request_transaction_data *req); -enum i2c_channel_operation_result dal_i2c_sw_engine_get_channel_status( - struct i2c_engine *engine, - uint8_t *returned_bytes); -#endif diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c deleted file mode 100644 index 1ad6e49102ff..000000000000 --- a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c +++ /dev/null @@ -1,491 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" - -/* - * Pre-requisites: headers required by header of this unit - */ -#include "include/i2caux_interface.h" -#include "dc_bios_types.h" - -/* - * Header of this unit - */ - -#include "i2caux.h" - -/* - * Post-requisites: headers required by this unit - */ - -#include "engine.h" -#include "i2c_engine.h" -#include "aux_engine.h" - -/* - * This unit - */ - -#include "dce80/i2caux_dce80.h" - -#include "dce100/i2caux_dce100.h" - -#include "dce110/i2caux_dce110.h" - -#include "dce112/i2caux_dce112.h" - -#include "dce120/i2caux_dce120.h" - -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) -#include "dcn10/i2caux_dcn10.h" -#endif - -#include "diagnostics/i2caux_diag.h" - -/* - * @brief - * Plain API, available publicly - */ - -struct i2caux *dal_i2caux_create( - struct dc_context *ctx) -{ - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { - return dal_i2caux_diag_fpga_create(ctx); - } - - switch (ctx->dce_version) { - case DCE_VERSION_8_0: - case DCE_VERSION_8_1: - case DCE_VERSION_8_3: - return dal_i2caux_dce80_create(ctx); - case DCE_VERSION_11_2: - case DCE_VERSION_11_22: - return dal_i2caux_dce112_create(ctx); - case DCE_VERSION_11_0: - return dal_i2caux_dce110_create(ctx); - case DCE_VERSION_10_0: - return dal_i2caux_dce100_create(ctx); - case DCE_VERSION_12_0: - case DCE_VERSION_12_1: - return dal_i2caux_dce120_create(ctx); -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) - case DCN_VERSION_1_0: - return dal_i2caux_dcn10_create(ctx); -#endif - -#if defined(CONFIG_DRM_AMD_DC_DCN1_01) - case DCN_VERSION_1_01: - return dal_i2caux_dcn10_create(ctx); -#endif - default: - BREAK_TO_DEBUGGER(); - return NULL; - } -} - -bool dal_i2caux_submit_i2c_command( - struct i2caux *i2caux, - struct ddc *ddc, - struct i2c_command *cmd) -{ - struct i2c_engine *engine; - uint8_t index_of_payload = 0; - bool result; - - if (!ddc) { - BREAK_TO_DEBUGGER(); - return false; - } - - if (!cmd) { - BREAK_TO_DEBUGGER(); - return false; - } - - /* - * default will be SW, however there is a feature flag in adapter - * service that determines whether SW i2c_engine will be available or - * not, if sw i2c is not available we will fallback to hw. This feature - * flag is set to not creating sw i2c engine for every dce except dce80 - * currently - */ - switch (cmd->engine) { - case I2C_COMMAND_ENGINE_DEFAULT: - case I2C_COMMAND_ENGINE_SW: - /* try to acquire SW engine first, - * acquire HW engine if SW engine not available */ - engine = i2caux->funcs->acquire_i2c_sw_engine(i2caux, ddc); - - if (!engine) - engine = i2caux->funcs->acquire_i2c_hw_engine( - i2caux, ddc); - break; - case I2C_COMMAND_ENGINE_HW: - default: - /* try to acquire HW engine first, - * acquire SW engine if HW engine not available */ - engine = i2caux->funcs->acquire_i2c_hw_engine(i2caux, ddc); - - if (!engine) - engine = i2caux->funcs->acquire_i2c_sw_engine( - i2caux, ddc); - } - - if (!engine) - return false; - - engine->funcs->set_speed(engine, cmd->speed); - - result = true; - - while (index_of_payload < cmd->number_of_payloads) { - bool mot = (index_of_payload != cmd->number_of_payloads - 1); - - struct i2c_payload *payload = cmd->payloads + index_of_payload; - - struct i2caux_transaction_request request = { 0 }; - - request.operation = payload->write ? - I2CAUX_TRANSACTION_WRITE : - I2CAUX_TRANSACTION_READ; - - request.payload.address_space = - I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C; - request.payload.address = (payload->address << 1) | - !payload->write; - request.payload.length = payload->length; - request.payload.data = payload->data; - - if (!engine->base.funcs->submit_request( - &engine->base, &request, mot)) { - result = false; - break; - } - - ++index_of_payload; - } - - i2caux->funcs->release_engine(i2caux, &engine->base); - - return result; -} - -bool dal_i2caux_submit_aux_command( - struct i2caux *i2caux, - struct ddc *ddc, - struct aux_command *cmd) -{ - struct aux_engine *engine; - uint8_t index_of_payload = 0; - bool result; - bool mot; - - if (!ddc) { - BREAK_TO_DEBUGGER(); - return false; - } - - if (!cmd) { - BREAK_TO_DEBUGGER(); - return false; - } - - engine = i2caux->funcs->acquire_aux_engine(i2caux, ddc); - - if (!engine) - return false; - - engine->delay = cmd->defer_delay; - engine->max_defer_write_retry = cmd->max_defer_write_retry; - - result = true; - - while (index_of_payload < cmd->number_of_payloads) { - struct aux_payload *payload = cmd->payloads + index_of_payload; - struct i2caux_transaction_request request = { 0 }; - - if (cmd->mot == I2C_MOT_UNDEF) - mot = (index_of_payload != cmd->number_of_payloads - 1); - else - mot = (cmd->mot == I2C_MOT_TRUE); - - request.operation = payload->write ? - I2CAUX_TRANSACTION_WRITE : - I2CAUX_TRANSACTION_READ; - - if (payload->i2c_over_aux) { - request.payload.address_space = - I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C; - - request.payload.address = (payload->address << 1) | - !payload->write; - } else { - request.payload.address_space = - I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD; - - request.payload.address = payload->address; - } - - request.payload.length = payload->length; - request.payload.data = payload->data; - - if (!engine->base.funcs->submit_request( - &engine->base, &request, mot)) { - result = false; - break; - } - - ++index_of_payload; - } - - i2caux->funcs->release_engine(i2caux, &engine->base); - - return result; -} - -static bool get_hw_supported_ddc_line( - struct ddc *ddc, - enum gpio_ddc_line *line) -{ - enum gpio_ddc_line line_found; - - *line = GPIO_DDC_LINE_UNKNOWN; - - if (!ddc) { - BREAK_TO_DEBUGGER(); - return false; - } - - if (!ddc->hw_info.hw_supported) - return false; - - line_found = dal_ddc_get_line(ddc); - - if (line_found >= GPIO_DDC_LINE_COUNT) - return false; - - *line = line_found; - - return true; -} - -void dal_i2caux_configure_aux( - struct i2caux *i2caux, - struct ddc *ddc, - union aux_config cfg) -{ - struct aux_engine *engine = - i2caux->funcs->acquire_aux_engine(i2caux, ddc); - - if (!engine) - return; - - engine->funcs->configure(engine, cfg); - - i2caux->funcs->release_engine(i2caux, &engine->base); -} - -void dal_i2caux_destroy( - struct i2caux **i2caux) -{ - if (!i2caux || !*i2caux) { - BREAK_TO_DEBUGGER(); - return; - } - - (*i2caux)->funcs->destroy(i2caux); - - *i2caux = NULL; -} - -/* - * @brief - * An utility function used by 'struct i2caux' and its descendants - */ - -uint32_t dal_i2caux_get_reference_clock( - struct dc_bios *bios) -{ - struct dc_firmware_info info = { { 0 } }; - - if (bios->funcs->get_firmware_info(bios, &info) != BP_RESULT_OK) - return 0; - - return info.pll_info.crystal_frequency; -} - -/* - * @brief - * i2caux - */ - -enum { - /* following are expressed in KHz */ - DEFAULT_I2C_SW_SPEED = 50, - DEFAULT_I2C_HW_SPEED = 50, - - DEFAULT_I2C_SW_SPEED_100KHZ = 100, - DEFAULT_I2C_HW_SPEED_100KHZ = 100, - - /* This is the timeout as defined in DP 1.2a, - * 2.3.4 "Detailed uPacket TX AUX CH State Description". */ - AUX_TIMEOUT_PERIOD = 400, - - /* Ideally, the SW timeout should be just above 550usec - * which is programmed in HW. - * But the SW timeout of 600usec is not reliable, - * because on some systems, delay_in_microseconds() - * returns faster than it should. - * EPR #379763: by trial-and-error on different systems, - * 700usec is the minimum reliable SW timeout for polling - * the AUX_SW_STATUS.AUX_SW_DONE bit. - * This timeout expires *only* when there is - * AUX Error or AUX Timeout conditions - not during normal operation. - * During normal operation, AUX_SW_STATUS.AUX_SW_DONE bit is set - * at most within ~240usec. That means, - * increasing this timeout will not affect normal operation, - * and we'll timeout after - * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 1600usec. - * This timeout is especially important for - * resume from S3 and CTS. */ - SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 4 -}; - -struct i2c_engine *dal_i2caux_acquire_i2c_sw_engine( - struct i2caux *i2caux, - struct ddc *ddc) -{ - enum gpio_ddc_line line; - struct i2c_engine *engine = NULL; - - if (get_hw_supported_ddc_line(ddc, &line)) - engine = i2caux->i2c_sw_engines[line]; - - if (!engine) - engine = i2caux->i2c_generic_sw_engine; - - if (!engine) - return NULL; - - if (!engine->base.funcs->acquire(&engine->base, ddc)) - return NULL; - - return engine; -} - -struct aux_engine *dal_i2caux_acquire_aux_engine( - struct i2caux *i2caux, - struct ddc *ddc) -{ - enum gpio_ddc_line line; - struct aux_engine *engine; - - if (!get_hw_supported_ddc_line(ddc, &line)) - return NULL; - - engine = i2caux->aux_engines[line]; - - if (!engine) - return NULL; - - if (!engine->base.funcs->acquire(&engine->base, ddc)) - return NULL; - - return engine; -} - -void dal_i2caux_release_engine( - struct i2caux *i2caux, - struct engine *engine) -{ - engine->funcs->release_engine(engine); - - dal_ddc_close(engine->ddc); - - engine->ddc = NULL; -} - -void dal_i2caux_construct( - struct i2caux *i2caux, - struct dc_context *ctx) -{ - uint32_t i = 0; - - i2caux->ctx = ctx; - do { - i2caux->i2c_sw_engines[i] = NULL; - i2caux->i2c_hw_engines[i] = NULL; - i2caux->aux_engines[i] = NULL; - - ++i; - } while (i < GPIO_DDC_LINE_COUNT); - - i2caux->i2c_generic_sw_engine = NULL; - i2caux->i2c_generic_hw_engine = NULL; - - i2caux->aux_timeout_period = - SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD; - - if (ctx->dce_version >= DCE_VERSION_11_2) { - i2caux->default_i2c_hw_speed = DEFAULT_I2C_HW_SPEED_100KHZ; - i2caux->default_i2c_sw_speed = DEFAULT_I2C_SW_SPEED_100KHZ; - } else { - i2caux->default_i2c_hw_speed = DEFAULT_I2C_HW_SPEED; - i2caux->default_i2c_sw_speed = DEFAULT_I2C_SW_SPEED; - } -} - -void dal_i2caux_destruct( - struct i2caux *i2caux) -{ - uint32_t i = 0; - - if (i2caux->i2c_generic_hw_engine) - i2caux->i2c_generic_hw_engine->funcs->destroy( - &i2caux->i2c_generic_hw_engine); - - if (i2caux->i2c_generic_sw_engine) - i2caux->i2c_generic_sw_engine->funcs->destroy( - &i2caux->i2c_generic_sw_engine); - - do { - if (i2caux->aux_engines[i]) - i2caux->aux_engines[i]->funcs->destroy( - &i2caux->aux_engines[i]); - - if (i2caux->i2c_hw_engines[i]) - i2caux->i2c_hw_engines[i]->funcs->destroy( - &i2caux->i2c_hw_engines[i]); - - if (i2caux->i2c_sw_engines[i]) - i2caux->i2c_sw_engines[i]->funcs->destroy( - &i2caux->i2c_sw_engines[i]); - - ++i; - } while (i < GPIO_DDC_LINE_COUNT); -} - diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.h b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.h deleted file mode 100644 index 64f51bb06915..000000000000 --- a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.h +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DAL_I2C_AUX_H__ -#define __DAL_I2C_AUX_H__ - -uint32_t dal_i2caux_get_reference_clock( - struct dc_bios *bios); - -struct i2caux; - -struct engine; - -struct i2caux_funcs { - void (*destroy)(struct i2caux **ptr); - struct i2c_engine * (*acquire_i2c_sw_engine)( - struct i2caux *i2caux, - struct ddc *ddc); - struct i2c_engine * (*acquire_i2c_hw_engine)( - struct i2caux *i2caux, - struct ddc *ddc); - struct aux_engine * (*acquire_aux_engine)( - struct i2caux *i2caux, - struct ddc *ddc); - void (*release_engine)( - struct i2caux *i2caux, - struct engine *engine); -}; - -struct i2c_engine; -struct aux_engine; - -struct i2caux { - struct dc_context *ctx; - const struct i2caux_funcs *funcs; - /* On ASIC we have certain amount of lines with HW DDC engine - * (4, 6, or maybe more in the future). - * For every such line, we create separate HW DDC engine - * (since we have these engines in HW) and separate SW DDC engine - * (to allow concurrent use of few lines). - * In similar way we have AUX engines. */ - - /* I2C SW engines, per DDC line. - * Only lines with HW DDC support will be initialized */ - struct i2c_engine *i2c_sw_engines[GPIO_DDC_LINE_COUNT]; - - /* I2C HW engines, per DDC line. - * Only lines with HW DDC support will be initialized */ - struct i2c_engine *i2c_hw_engines[GPIO_DDC_LINE_COUNT]; - - /* AUX engines, per DDC line. - * Only lines with HW AUX support will be initialized */ - struct aux_engine *aux_engines[GPIO_DDC_LINE_COUNT]; - - /* For all other lines, we can use - * single instance of generic I2C HW engine - * (since in HW, there is single instance of it) - * or single instance of generic I2C SW engine. - * AUX is not supported for other lines. */ - - /* General-purpose I2C SW engine. - * Can be assigned dynamically to any line per transaction */ - struct i2c_engine *i2c_generic_sw_engine; - - /* General-purpose I2C generic HW engine. - * Can be assigned dynamically to almost any line per transaction */ - struct i2c_engine *i2c_generic_hw_engine; - - /* [anaumov] in DAL2, there is a Mutex */ - - uint32_t aux_timeout_period; - - /* expressed in KHz */ - uint32_t default_i2c_sw_speed; - uint32_t default_i2c_hw_speed; -}; - -void dal_i2caux_construct( - struct i2caux *i2caux, - struct dc_context *ctx); - -void dal_i2caux_release_engine( - struct i2caux *i2caux, - struct engine *engine); - -void dal_i2caux_destruct( - struct i2caux *i2caux); - -void dal_i2caux_destroy( - struct i2caux **ptr); - -struct i2c_engine *dal_i2caux_acquire_i2c_sw_engine( - struct i2caux *i2caux, - struct ddc *ddc); - -struct aux_engine *dal_i2caux_acquire_aux_engine( - struct i2caux *i2caux, - struct ddc *ddc); - -#endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/clock_source.h b/drivers/gpu/drm/amd/display/dc/inc/clock_source.h index 47ef90495376..fe6301cb8681 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/clock_source.h +++ b/drivers/gpu/drm/amd/display/dc/inc/clock_source.h @@ -78,7 +78,7 @@ struct csdp_ref_clk_ds_params { }; struct pixel_clk_params { - uint32_t requested_pix_clk; /* in KHz */ + uint32_t requested_pix_clk_100hz; /*> Requested Pixel Clock * (based on Video Timing standard used for requested mode)*/ uint32_t requested_sym_clk; /* in KHz */ @@ -104,9 +104,9 @@ struct pixel_clk_params { * with actually calculated Clock and reference Crystal frequency */ struct pll_settings { - uint32_t actual_pix_clk; - uint32_t adjusted_pix_clk; - uint32_t calculated_pix_clk; + uint32_t actual_pix_clk_100hz; + uint32_t adjusted_pix_clk_100hz; + uint32_t calculated_pix_clk_100hz; uint32_t vco_freq; uint32_t reference_freq; uint32_t reference_divider; @@ -166,6 +166,10 @@ struct clock_source_funcs { struct clock_source *, struct pixel_clk_params *, struct pll_settings *); + bool (*get_pixel_clk_frequency_100hz)( + struct clock_source *clock_source, + unsigned int inst, + unsigned int *pixel_clk_khz); }; struct clock_source { diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h index 94fc31080fda..2e61a22ef4b2 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h @@ -30,7 +30,7 @@ enum dc_status { DC_OK = 1, DC_NO_CONTROLLER_RESOURCE = 2, - DC_NO_STREAM_ENG_RESOURCE = 3, + DC_NO_STREAM_ENC_RESOURCE = 3, DC_NO_CLOCK_SOURCE_RESOURCE = 4, DC_FAIL_CONTROLLER_VALIDATE = 5, DC_FAIL_ENC_VALIDATE = 6, diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index b168a5e9dd9d..986ed1728644 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -146,7 +146,7 @@ struct resource_pool { struct mpc *mpc; struct pp_smu_funcs_rv *pp_smu; struct pp_smu_display_requirement_rv pp_smu_req; - struct aux_engine *engines[MAX_PIPES]; + struct dce_aux *engines[MAX_PIPES]; struct dce_i2c_hw *hw_i2cs[MAX_PIPES]; struct dce_i2c_sw *sw_i2cs[MAX_PIPES]; bool i2c_hw_buffer_in_use; @@ -180,13 +180,8 @@ struct resource_pool { const struct resource_caps *res_cap; }; -struct dcn_fe_clocks { - int dppclk_khz; -}; - struct dcn_fe_bandwidth { - struct dcn_fe_clocks calc; - struct dcn_fe_clocks cur; + int dppclk_khz; }; struct stream_resource { diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h index 538b83303b86..16fd4dc6c4dd 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h @@ -64,13 +64,6 @@ void dal_ddc_i2c_payloads_add( uint8_t *data, bool write); -void dal_ddc_aux_payloads_add( - struct aux_payloads *payloads, - uint32_t address, - uint32_t len, - uint8_t *data, - bool write); - struct ddc_service_init_data { struct graphics_object_id id; struct dc_context *ctx; @@ -103,12 +96,10 @@ bool dal_ddc_service_query_ddc_data( uint32_t read_size); int dc_link_aux_transfer(struct ddc_service *ddc, - unsigned int address, - uint8_t *reply, - void *buffer, - unsigned int size, - enum aux_transaction_type type, - enum i2caux_transaction_action action); + struct aux_payload *payload); + +bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc, + struct aux_payload *payload); void dal_ddc_service_write_scdc_data( struct ddc_service *ddc_service, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h index abc961c0906e..86dc39a02408 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h @@ -46,6 +46,7 @@ struct abm_funcs { void (*abm_init)(struct abm *abm); bool (*set_abm_level)(struct abm *abm, unsigned int abm_level); bool (*set_abm_immediate_disable)(struct abm *abm); + bool (*set_pipe)(struct abm *abm, unsigned int controller_id); bool (*init_backlight)(struct abm *abm); /* backlight_pwm_u16_16 is unsigned 32 bit, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index 02f757dd70d4..9d2d8e51306c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -39,6 +39,18 @@ enum segment_order { segment_order__non_contiguous, }; +struct dcn_hubbub_wm_set { + uint32_t wm_set; + uint32_t data_urgent; + uint32_t pte_meta_urgent; + uint32_t sr_enter; + uint32_t sr_exit; + uint32_t dram_clk_chanage; +}; + +struct dcn_hubbub_wm { + struct dcn_hubbub_wm_set sets[4]; +}; struct hubbub_funcs { void (*update_dchub)( @@ -58,7 +70,14 @@ struct hubbub_funcs { bool (*dcc_support_pixel_format)( enum surface_pixel_format format, unsigned int *bytes_per_element); + + void (*wm_read_state)(struct hubbub *hubbub, + struct dcn_hubbub_wm *wm); }; +struct hubbub { + const struct hubbub_funcs *funcs; + struct dc_context *ctx; +}; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h index cb85eaa9857f..cbaa43853611 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h @@ -27,16 +27,22 @@ #include "dm_services_types.h" +/* If HW itself ever powered down it will be 0. + * fwDmcuInit will write to 1. + * Driver will only call MCP init if current state is 1, + * and the MCP command will transition this to 2. + */ enum dmcu_state { - DMCU_NOT_INITIALIZED = 0, - DMCU_RUNNING = 1 + DMCU_UNLOADED = 0, + DMCU_LOADED_UNINITIALIZED = 1, + DMCU_RUNNING = 2, }; struct dmcu_version { - unsigned int date; - unsigned int month; - unsigned int year; unsigned int interface_version; + unsigned int abm_version; + unsigned int psr_version; + unsigned int build_version; }; struct dmcu { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h index e894e649ce5a..fb7967b39edb 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h @@ -39,6 +39,11 @@ struct dpp { }; +struct dpp_input_csc_matrix { + enum dc_color_space color_space; + uint16_t regval[12]; +}; + struct dpp_grph_csc_adjustment { struct fixed31_32 temperature_matrix[CSC_TEMPERATURE_MATRIX_SIZE]; enum graphics_gamut_adjust_type gamut_adjust_type; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h index 04c6989aac58..1cd07e94ee63 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h @@ -78,7 +78,8 @@ struct hubp_funcs { bool (*hubp_program_surface_flip_and_addr)( struct hubp *hubp, const struct dc_plane_address *address, - bool flip_immediate); + bool flip_immediate, + uint8_t vmid); void (*hubp_program_pte_vm)( struct hubp *hubp, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h index c20fdcaac53b..c9d3e37e9531 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h @@ -153,6 +153,7 @@ struct link_encoder_funcs { void (*enable_hpd)(struct link_encoder *enc); void (*disable_hpd)(struct link_encoder *enc); bool (*is_dig_enabled)(struct link_encoder *enc); + unsigned int (*get_dig_frontend)(struct link_encoder *enc); void (*destroy)(struct link_encoder **enc); }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h index 06df02ddff6a..da89c2edb07c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h @@ -31,7 +31,7 @@ #include "dml/display_mode_structs.h" struct dchub_init_data; -struct cstate_pstate_watermarks_st1 { +struct cstate_pstate_watermarks_st { uint32_t cstate_exit_ns; uint32_t cstate_enter_plus_exit_ns; uint32_t pstate_change_ns; @@ -40,7 +40,7 @@ struct cstate_pstate_watermarks_st1 { struct dcn_watermarks { uint32_t pte_meta_urgent_ns; uint32_t urgent_ns; - struct cstate_pstate_watermarks_st1 cstate_pstate; + struct cstate_pstate_watermarks_st cstate_pstate; }; struct dcn_watermark_set { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h index 53a9b64df11a..4051493557bc 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h @@ -161,6 +161,10 @@ struct stream_encoder_funcs { void (*set_avmute)( struct stream_encoder *enc, bool enable); + void (*dig_connect_to_otg)( + struct stream_encoder *enc, + int tg_inst); + }; #endif /* STREAM_ENCODER_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index af700c7dac50..c25f7df7b5e3 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -134,15 +134,24 @@ struct dc_crtc_timing; struct drr_params; + struct timing_generator_funcs { bool (*validate_timing)(struct timing_generator *tg, const struct dc_crtc_timing *timing); void (*program_timing)(struct timing_generator *tg, const struct dc_crtc_timing *timing, bool use_vbios); - void (*program_vline_interrupt)(struct timing_generator *optc, - const struct dc_crtc_timing *dc_crtc_timing, - unsigned long long vsync_delta); + void (*setup_vertical_interrupt0)( + struct timing_generator *optc, + uint32_t start_line, + uint32_t end_line); + void (*setup_vertical_interrupt1)( + struct timing_generator *optc, + uint32_t start_line); + void (*setup_vertical_interrupt2)( + struct timing_generator *optc, + uint32_t start_line); + bool (*enable_crtc)(struct timing_generator *tg); bool (*disable_crtc)(struct timing_generator *tg); bool (*is_counter_moving)(struct timing_generator *tg); @@ -159,6 +168,8 @@ struct timing_generator_funcs { bool (*get_otg_active_size)(struct timing_generator *optc, uint32_t *otg_active_width, uint32_t *otg_active_height); + bool (*is_matching_timing)(struct timing_generator *tg, + const struct dc_crtc_timing *otg_timing); void (*set_early_control)(struct timing_generator *tg, uint32_t early_cntl); void (*wait_for_state)(struct timing_generator *tg, diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/engine_base.c b/drivers/gpu/drm/amd/display/dc/inc/hw/vmid.h similarity index 63% rename from drivers/gpu/drm/amd/display/dc/i2caux/engine_base.c rename to drivers/gpu/drm/amd/display/dc/inc/hw/vmid.h index 5d155d36d353..037beb0a2a27 100644 --- a/drivers/gpu/drm/amd/display/dc/i2caux/engine_base.c +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/vmid.h @@ -1,5 +1,5 @@ /* - * Copyright 2012-15 Advanced Micro Devices, Inc. + * Copyright 2018 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -23,30 +23,27 @@ * */ -#include "dm_services.h" +#ifndef DAL_DC_INC_HW_VMID_H_ +#define DAL_DC_INC_HW_VMID_H_ -/* - * Pre-requisites: headers required by header of this unit - */ -#include "include/i2caux_interface.h" - -/* - * Header of this unit - */ - -#include "engine.h" +#include "core_types.h" +#include "dchubbub.h" -void dal_i2caux_construct_engine( - struct engine *engine, - struct dc_context *ctx) -{ - engine->ddc = NULL; - engine->ctx = ctx; -} +struct dcn_vmid_registers { + uint32_t CNTL; + uint32_t PAGE_TABLE_BASE_ADDR_HI32; + uint32_t PAGE_TABLE_BASE_ADDR_LO32; + uint32_t PAGE_TABLE_START_ADDR_HI32; + uint32_t PAGE_TABLE_START_ADDR_LO32; + uint32_t PAGE_TABLE_END_ADDR_HI32; + uint32_t PAGE_TABLE_END_ADDR_LO32; +}; -void dal_i2caux_destruct_engine( - struct engine *engine) -{ - /* nothing to do */ -} +struct dcn_vmid_page_table_config { + uint64_t page_table_start_addr; + uint64_t page_table_end_addr; + enum dcn_hubbub_page_table_depth depth; + enum dcn_hubbub_page_table_block_size block_size; +}; +#endif /* DAL_DC_INC_HW_VMID_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index d6a85f48b6d1..7676f25216b1 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -38,6 +38,11 @@ enum pipe_gating_control { PIPE_GATING_CONTROL_INIT }; +enum vline_select { + VLINE0, + VLINE1 +}; + struct dce_hwseq_wa { bool blnd_crtc_trigger; bool DEGVIDCN10_253; @@ -68,8 +73,14 @@ struct stream_resource; struct hw_sequencer_funcs { + void (*disable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx); + + void (*enable_stream_gating)(struct dc *dc, struct pipe_ctx *pipe_ctx); + void (*init_hw)(struct dc *dc); + void (*init_pipes)(struct dc *dc, struct dc_state *context); + enum dc_status (*apply_ctx_to_hw)( struct dc *dc, struct dc_state *context); @@ -218,6 +229,9 @@ struct hw_sequencer_funcs { void (*set_cursor_attribute)(struct pipe_ctx *pipe); void (*set_cursor_sdr_white_level)(struct pipe_ctx *pipe); + void (*setup_periodic_interrupt)(struct pipe_ctx *pipe_ctx, enum vline_select vline); + void (*setup_vupdate_interrupt)(struct pipe_ctx *pipe_ctx); + }; void color_space_to_black_color( diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.h b/drivers/gpu/drm/amd/display/dc/inc/vm_helper.h similarity index 64% rename from drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.h rename to drivers/gpu/drm/amd/display/dc/inc/vm_helper.h index c48c61f540a8..193407f76a80 100644 --- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.h +++ b/drivers/gpu/drm/amd/display/dc/inc/vm_helper.h @@ -1,5 +1,5 @@ /* - * Copyright 2012-15 Advanced Micro Devices, Inc. + * Copyright 2018 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -23,21 +23,34 @@ * */ -#ifndef __DAL_I2C_SW_ENGINE_DCE110_H__ -#define __DAL_I2C_SW_ENGINE_DCE110_H__ +#ifndef DC_INC_VM_HELPER_H_ +#define DC_INC_VM_HELPER_H_ -struct i2c_sw_engine_dce110 { - struct i2c_sw_engine base; - uint32_t engine_id; +#include "dc_types.h" + +#define MAX_VMID 16 +#define MAX_HUBP 6 + +struct vmid_usage { + uint16_t vmid_usage[2]; }; -struct i2c_sw_engine_dce110_create_arg { - uint32_t engine_id; - uint32_t default_speed; - struct dc_context *ctx; +struct vm_helper { + unsigned int num_vmid; + unsigned int num_hubp; + unsigned int num_vmids_available; + uint64_t ptb_assigned_to_vmid[MAX_VMID]; + struct vmid_usage hubp_vmid_usage[MAX_HUBP]; }; -struct i2c_engine *dal_i2c_sw_engine_dce110_create( - const struct i2c_sw_engine_dce110_create_arg *arg); +uint8_t get_vmid_for_ptb( + struct vm_helper *vm_helper, + int64_t ptb, + uint8_t pipe_idx); + +void init_vm_helper( + struct vm_helper *vm_helper, + unsigned int num_vmid, + unsigned int num_hubp); -#endif +#endif /* DC_INC_VM_HELPER_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/irq_types.h b/drivers/gpu/drm/amd/display/dc/irq_types.h index 0b5f3a278c22..d0ccd81ad5b4 100644 --- a/drivers/gpu/drm/amd/display/dc/irq_types.h +++ b/drivers/gpu/drm/amd/display/dc/irq_types.h @@ -144,6 +144,14 @@ enum dc_irq_source { DC_IRQ_SOURCE_DC5_VLINE0, DC_IRQ_SOURCE_DC6_VLINE0, + DC_IRQ_SOURCE_DC1_VLINE1, + DC_IRQ_SOURCE_DC2_VLINE1, + DC_IRQ_SOURCE_DC3_VLINE1, + DC_IRQ_SOURCE_DC4_VLINE1, + DC_IRQ_SOURCE_DC5_VLINE1, + DC_IRQ_SOURCE_DC6_VLINE1, + + DAL_IRQ_SOURCES_NUMBER }; diff --git a/drivers/gpu/drm/amd/display/include/bios_parser_types.h b/drivers/gpu/drm/amd/display/include/bios_parser_types.h index 7fd78a696800..01bf01a34a08 100644 --- a/drivers/gpu/drm/amd/display/include/bios_parser_types.h +++ b/drivers/gpu/drm/amd/display/include/bios_parser_types.h @@ -211,8 +211,8 @@ struct bp_pixel_clock_parameters { /* signal_type -> Encoder Mode - needed by VBIOS Exec table */ enum signal_type signal_type; /* Adjusted Pixel Clock (after VBIOS exec table) - * that becomes Target Pixel Clock (KHz) */ - uint32_t target_pixel_clock; + * that becomes Target Pixel Clock (100 Hz units) */ + uint32_t target_pixel_clock_100hz; /* Calculated Reference divider of Display PLL */ uint32_t reference_divider; /* Calculated Feedback divider of Display PLL */ diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h index 4f501ddcfb8d..34d6fdcb32e2 100644 --- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h +++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h @@ -131,6 +131,7 @@ #define INTERNAL_REV_RAVEN_A0 0x00 /* First spin of Raven */ #define RAVEN_A0 0x01 #define RAVEN_B0 0x21 +#define PICASSO_A0 0x41 #if defined(CONFIG_DRM_AMD_DC_DCN1_01) /* DCN1_01 */ #define RAVEN2_A0 0x81 @@ -165,4 +166,6 @@ #define FAMILY_UNKNOWN 0xFF + + #endif /* __DAL_ASIC_ID_H__ */ diff --git a/drivers/gpu/drm/amd/display/include/gpio_interface.h b/drivers/gpu/drm/amd/display/include/gpio_interface.h index e4fd31024b92..7de64195dc33 100644 --- a/drivers/gpu/drm/amd/display/include/gpio_interface.h +++ b/drivers/gpu/drm/amd/display/include/gpio_interface.h @@ -59,6 +59,14 @@ enum gpio_result dal_gpio_change_mode( struct gpio *gpio, enum gpio_mode mode); +/* Lock Pin */ +enum gpio_result dal_gpio_lock_pin( + struct gpio *gpio); + +/* Unlock Pin */ +enum gpio_result dal_gpio_unlock_pin( + struct gpio *gpio); + /* Get the GPIO id */ enum gpio_id dal_gpio_get_id( const struct gpio *gpio); diff --git a/drivers/gpu/drm/amd/display/include/i2caux_interface.h b/drivers/gpu/drm/amd/display/include/i2caux_interface.h index 13a3c82d118f..bb012cb1a9f5 100644 --- a/drivers/gpu/drm/amd/display/include/i2caux_interface.h +++ b/drivers/gpu/drm/amd/display/include/i2caux_interface.h @@ -40,9 +40,19 @@ struct aux_payload { /* set following flag to write data, * reset it to read data */ bool write; + bool mot; uint32_t address; uint8_t length; uint8_t *data; + /* + * used to return the reply type of the transaction + * ignored if NULL + */ + uint8_t *reply; + /* expressed in milliseconds + * zero means "use default value" + */ + uint32_t defer_delay; }; struct aux_command { @@ -66,27 +76,4 @@ union aux_config { uint32_t raw; }; -struct i2caux; - -struct i2caux *dal_i2caux_create( - struct dc_context *ctx); - -bool dal_i2caux_submit_i2c_command( - struct i2caux *i2caux, - struct ddc *ddc, - struct i2c_command *cmd); - -bool dal_i2caux_submit_aux_command( - struct i2caux *i2caux, - struct ddc *ddc, - struct aux_command *cmd); - -void dal_i2caux_configure_aux( - struct i2caux *i2caux, - struct ddc *ddc, - union aux_config cfg); - -void dal_i2caux_destroy( - struct i2caux **ptr); - #endif diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index 479b77c2e89e..0fbc8fbc3541 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -823,7 +823,7 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma, bool is_clipped = false; struct fixed31_32 sdr_white_level; - if (fs_params == NULL || fs_params->max_content == 0 || + if (fs_params->max_content == 0 || fs_params->max_display == 0) return false; @@ -1508,7 +1508,7 @@ static bool map_regamma_hw_to_x_user( struct hw_x_point *coords = coords_x; const struct pwl_float_data_ex *regamma = rgb_regamma; - if (mapUserRamp) { + if (ramp && mapUserRamp) { copy_rgb_regamma_to_coordinates_x(coords, hw_points_num, rgb_regamma); @@ -1545,7 +1545,7 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, struct pwl_float_data *rgb_user = NULL; struct pwl_float_data_ex *rgb_regamma = NULL; - struct gamma_pixel *axix_x = NULL; + struct gamma_pixel *axis_x = NULL; struct pixel_gamma_point *coeff = NULL; enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB; bool ret = false; @@ -1555,47 +1555,54 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, /* we can use hardcoded curve for plain SRGB TF */ if (output_tf->type == TF_TYPE_PREDEFINED && canRomBeUsed == true && - output_tf->tf == TRANSFER_FUNCTION_SRGB && - (ramp->is_identity || (!mapUserRamp && ramp->type == GAMMA_RGB_256))) - return true; + output_tf->tf == TRANSFER_FUNCTION_SRGB) { + if (ramp == NULL) + return true; + if (ramp->is_identity || (!mapUserRamp && ramp->type == GAMMA_RGB_256)) + return true; + } output_tf->type = TF_TYPE_DISTRIBUTED_POINTS; - rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS, + if (ramp && (mapUserRamp || ramp->type != GAMMA_RGB_256)) { + rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS, sizeof(*rgb_user), GFP_KERNEL); - if (!rgb_user) - goto rgb_user_alloc_fail; + if (!rgb_user) + goto rgb_user_alloc_fail; + + axis_x = kvcalloc(ramp->num_entries + 3, sizeof(*axis_x), + GFP_KERNEL); + if (!axis_x) + goto axis_x_alloc_fail; + + dividers.divider1 = dc_fixpt_from_fraction(3, 2); + dividers.divider2 = dc_fixpt_from_int(2); + dividers.divider3 = dc_fixpt_from_fraction(5, 2); + + build_evenly_distributed_points( + axis_x, + ramp->num_entries, + dividers); + + if (ramp->type == GAMMA_RGB_256 && mapUserRamp) + scale_gamma(rgb_user, ramp, dividers); + else if (ramp->type == GAMMA_RGB_FLOAT_1024) + scale_gamma_dx(rgb_user, ramp, dividers); + } + rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*rgb_regamma), GFP_KERNEL); if (!rgb_regamma) goto rgb_regamma_alloc_fail; - axix_x = kvcalloc(ramp->num_entries + 3, sizeof(*axix_x), - GFP_KERNEL); - if (!axix_x) - goto axix_x_alloc_fail; + coeff = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*coeff), GFP_KERNEL); if (!coeff) goto coeff_alloc_fail; - dividers.divider1 = dc_fixpt_from_fraction(3, 2); - dividers.divider2 = dc_fixpt_from_int(2); - dividers.divider3 = dc_fixpt_from_fraction(5, 2); - tf = output_tf->tf; - - build_evenly_distributed_points( - axix_x, - ramp->num_entries, - dividers); - - if (ramp->type == GAMMA_RGB_256 && mapUserRamp) - scale_gamma(rgb_user, ramp, dividers); - else if (ramp->type == GAMMA_RGB_FLOAT_1024) - scale_gamma_dx(rgb_user, ramp, dividers); - if (tf == TRANSFER_FUNCTION_PQ) { tf_pts->end_exponent = 7; tf_pts->x_point_at_y1_red = 125; @@ -1623,22 +1630,22 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, coordinates_x, tf == TRANSFER_FUNCTION_SRGB ? true:false); } map_regamma_hw_to_x_user(ramp, coeff, rgb_user, - coordinates_x, axix_x, rgb_regamma, + coordinates_x, axis_x, rgb_regamma, MAX_HW_POINTS, tf_pts, - (mapUserRamp || ramp->type != GAMMA_RGB_256) && - ramp->type != GAMMA_CS_TFM_1D); + (mapUserRamp || (ramp && ramp->type != GAMMA_RGB_256)) && + (ramp && ramp->type != GAMMA_CS_TFM_1D)); - if (ramp->type == GAMMA_CS_TFM_1D) + if (ramp && ramp->type == GAMMA_CS_TFM_1D) apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts); ret = true; kvfree(coeff); coeff_alloc_fail: - kvfree(axix_x); -axix_x_alloc_fail: kvfree(rgb_regamma); rgb_regamma_alloc_fail: + kvfree(axis_x); +axis_x_alloc_fail: kvfree(rgb_user); rgb_user_alloc_fail: return ret; @@ -1758,69 +1765,85 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf, { struct dc_transfer_func_distributed_points *tf_pts = &input_tf->tf_pts; struct dividers dividers; - struct pwl_float_data *rgb_user = NULL; struct pwl_float_data_ex *curve = NULL; struct gamma_pixel *axis_x = NULL; struct pixel_gamma_point *coeff = NULL; enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB; + uint32_t i; bool ret = false; if (input_tf->type == TF_TYPE_BYPASS) return false; - /* we can use hardcoded curve for plain SRGB TF */ + /* we can use hardcoded curve for plain SRGB TF + * If linear, it's bypass if on user ramp + */ if (input_tf->type == TF_TYPE_PREDEFINED && - input_tf->tf == TRANSFER_FUNCTION_SRGB && - (!mapUserRamp && - (ramp->type == GAMMA_RGB_256 || ramp->num_entries == 0))) + (input_tf->tf == TRANSFER_FUNCTION_SRGB || + input_tf->tf == TRANSFER_FUNCTION_LINEAR) && + !mapUserRamp) return true; input_tf->type = TF_TYPE_DISTRIBUTED_POINTS; - rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS, - sizeof(*rgb_user), - GFP_KERNEL); - if (!rgb_user) - goto rgb_user_alloc_fail; + if (mapUserRamp && ramp && ramp->type == GAMMA_RGB_256) { + rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS, + sizeof(*rgb_user), + GFP_KERNEL); + if (!rgb_user) + goto rgb_user_alloc_fail; + + axis_x = kvcalloc(ramp->num_entries + _EXTRA_POINTS, sizeof(*axis_x), + GFP_KERNEL); + if (!axis_x) + goto axis_x_alloc_fail; + + dividers.divider1 = dc_fixpt_from_fraction(3, 2); + dividers.divider2 = dc_fixpt_from_int(2); + dividers.divider3 = dc_fixpt_from_fraction(5, 2); + + build_evenly_distributed_points( + axis_x, + ramp->num_entries, + dividers); + + scale_gamma(rgb_user, ramp, dividers); + } + curve = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*curve), - GFP_KERNEL); + GFP_KERNEL); if (!curve) goto curve_alloc_fail; - axis_x = kvcalloc(ramp->num_entries + _EXTRA_POINTS, sizeof(*axis_x), - GFP_KERNEL); - if (!axis_x) - goto axis_x_alloc_fail; + coeff = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*coeff), - GFP_KERNEL); + GFP_KERNEL); if (!coeff) goto coeff_alloc_fail; - dividers.divider1 = dc_fixpt_from_fraction(3, 2); - dividers.divider2 = dc_fixpt_from_int(2); - dividers.divider3 = dc_fixpt_from_fraction(5, 2); - tf = input_tf->tf; - build_evenly_distributed_points( - axis_x, - ramp->num_entries, - dividers); - - if (ramp->type == GAMMA_RGB_256 && mapUserRamp) - scale_gamma(rgb_user, ramp, dividers); - else if (ramp->type == GAMMA_RGB_FLOAT_1024) - scale_gamma_dx(rgb_user, ramp, dividers); - if (tf == TRANSFER_FUNCTION_PQ) build_de_pq(curve, MAX_HW_POINTS, coordinates_x); - else + else if (tf == TRANSFER_FUNCTION_SRGB || + tf == TRANSFER_FUNCTION_BT709) build_degamma(curve, MAX_HW_POINTS, coordinates_x, - tf == TRANSFER_FUNCTION_SRGB ? true:false); + tf == TRANSFER_FUNCTION_SRGB ? true : false); + else if (tf == TRANSFER_FUNCTION_LINEAR) { + // just copy coordinates_x into curve + i = 0; + while (i != MAX_HW_POINTS + 1) { + curve[i].r = coordinates_x[i].x; + curve[i].g = curve[i].r; + curve[i].b = curve[i].r; + i++; + } + } else + goto invalid_tf_fail; tf_pts->end_exponent = 0; tf_pts->x_point_at_y1_red = 1; @@ -1830,23 +1853,21 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf, map_regamma_hw_to_x_user(ramp, coeff, rgb_user, coordinates_x, axis_x, curve, MAX_HW_POINTS, tf_pts, - mapUserRamp && ramp->type != GAMMA_CUSTOM); - if (ramp->type == GAMMA_CUSTOM) - apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts); + mapUserRamp && ramp && ramp->type == GAMMA_RGB_256); ret = true; +invalid_tf_fail: kvfree(coeff); coeff_alloc_fail: - kvfree(axis_x); -axis_x_alloc_fail: kvfree(curve); curve_alloc_fail: + kvfree(axis_x); +axis_x_alloc_fail: kvfree(rgb_user); rgb_user_alloc_fail: return ret; - } diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index 1544ed3f1747..94a84bc57c7a 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -108,8 +108,8 @@ static unsigned int calc_duration_in_us_from_v_total( { unsigned int duration_in_us = (unsigned int)(div64_u64(((unsigned long long)(v_total) - * 1000) * stream->timing.h_total, - stream->timing.pix_clk_khz)); + * 10000) * stream->timing.h_total, + stream->timing.pix_clk_100hz)); return duration_in_us; } @@ -126,7 +126,7 @@ static unsigned int calc_v_total_from_refresh( refresh_in_uhz))); v_total = div64_u64(div64_u64(((unsigned long long)( - frame_duration_in_ns) * stream->timing.pix_clk_khz), + frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)), stream->timing.h_total), 1000000); /* v_total cannot be less than nominal */ @@ -152,7 +152,7 @@ static unsigned int calc_v_total_from_duration( duration_in_us = vrr->max_duration_in_us; v_total = div64_u64(div64_u64(((unsigned long long)( - duration_in_us) * stream->timing.pix_clk_khz), + duration_in_us) * (stream->timing.pix_clk_100hz / 10)), stream->timing.h_total), 1000); /* v_total cannot be less than nominal */ @@ -227,7 +227,7 @@ static void update_v_total_for_static_ramp( } v_total = div64_u64(div64_u64(((unsigned long long)( - current_duration_in_us) * stream->timing.pix_clk_khz), + current_duration_in_us) * (stream->timing.pix_clk_100hz / 10)), stream->timing.h_total), 1000); in_out_vrr->adjust.v_total_min = v_total; @@ -461,6 +461,26 @@ bool mod_freesync_get_v_position(struct mod_freesync *mod_freesync, return false; } +static void build_vrr_infopacket_header_vtem(enum signal_type signal, + struct dc_info_packet *infopacket) +{ + // HEADER + + // HB0, HB1, HB2 indicates PacketType VTEMPacket + infopacket->hb0 = 0x7F; + infopacket->hb1 = 0xC0; + infopacket->hb2 = 0x00; + /* HB3 Bit Fields + * Reserved :1 = 0 + * Sync :1 = 0 + * VFR :1 = 1 + * Ds_Type :2 = 0 + * End :1 = 0 + * New :1 = 0 + */ + infopacket->hb3 = 0x20; +} + static void build_vrr_infopacket_header_v1(enum signal_type signal, struct dc_info_packet *infopacket, unsigned int *payload_size) @@ -559,6 +579,54 @@ static void build_vrr_infopacket_header_v2(enum signal_type signal, } } +static void build_vrr_vtem_infopacket_data(const struct dc_stream_state *stream, + const struct mod_vrr_params *vrr, + struct dc_info_packet *infopacket) +{ + /* dc_info_packet to VtemPacket Translation of Bit-fields, + * SB[6] + * unsigned char VRR_EN :1 + * unsigned char M_CONST :1 + * unsigned char Reserved2 :2 + * unsigned char FVA_Factor_M1 :4 + * SB[7] + * unsigned char Base_Vfront :8 + * SB[8] + * unsigned char Base_Refresh_Rate_98 :2 + * unsigned char RB :1 + * unsigned char Reserved3 :5 + * SB[9] + * unsigned char Base_RefreshRate_07 :8 + */ + unsigned int fieldRateInHz; + + if (vrr->state == VRR_STATE_ACTIVE_VARIABLE || + vrr->state == VRR_STATE_ACTIVE_FIXED){ + infopacket->sb[6] |= 0x80; //VRR_EN Bit = 1 + } else { + infopacket->sb[6] &= 0x7F; //VRR_EN Bit = 0 + } + + if (!stream->timing.vic) { + infopacket->sb[7] = stream->timing.v_front_porch; + + /* TODO: In dal2, we check mode flags for a reduced blanking timing. + * Need a way to relay that information to this function. + * if("ReducedBlanking") + * { + * infopacket->sb[8] |= 0x20; //Set 3rd bit to 1 + * } + */ + fieldRateInHz = (stream->timing.pix_clk_100hz * 100)/ + (stream->timing.h_total * stream->timing.v_total); + + infopacket->sb[8] |= ((fieldRateInHz & 0x300) >> 2); + infopacket->sb[9] |= fieldRateInHz & 0xFF; + + } + infopacket->valid = true; +} + static void build_vrr_infopacket_data(const struct mod_vrr_params *vrr, struct dc_info_packet *infopacket) { @@ -672,6 +740,19 @@ static void build_vrr_infopacket_v2(enum signal_type signal, infopacket->valid = true; } +static void build_vrr_infopacket_vtem(const struct dc_stream_state *stream, + const struct mod_vrr_params *vrr, + struct dc_info_packet *infopacket) +{ + //VTEM info packet for HdmiVrr + + //VTEM Packet is structured differently + build_vrr_infopacket_header_vtem(stream->signal, infopacket); + build_vrr_vtem_infopacket_data(stream, vrr, infopacket); + + infopacket->valid = true; +} + void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync, const struct dc_stream_state *stream, const struct mod_vrr_params *vrr, @@ -679,18 +760,21 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync, const enum color_transfer_func *app_tf, struct dc_info_packet *infopacket) { - /* SPD info packet for FreeSync */ - - /* Check if Freesync is supported. Return if false. If true, + /* SPD info packet for FreeSync + * VTEM info packet for HdmiVRR + * Check if Freesync is supported. Return if false. If true, * set the corresponding bit in the info packet */ - if (!vrr->supported || !vrr->send_vsif) + if (!vrr->supported || (!vrr->send_info_frame && packet_type != PACKET_TYPE_VTEM)) return; switch (packet_type) { case PACKET_TYPE_FS2: build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket); break; + case PACKET_TYPE_VTEM: + build_vrr_infopacket_vtem(stream, vrr, infopacket); + break; case PACKET_TYPE_VRR: case PACKET_TYPE_FS1: default: @@ -739,7 +823,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, return; in_out_vrr->state = in_config->state; - in_out_vrr->send_vsif = in_config->vsif_supported; + in_out_vrr->send_info_frame = in_config->vsif_supported; if (in_config->state == VRR_STATE_UNSUPPORTED) { in_out_vrr->state = VRR_STATE_UNSUPPORTED; @@ -972,7 +1056,7 @@ unsigned long long mod_freesync_calc_nominal_field_rate( unsigned long long nominal_field_rate_in_uhz = 0; /* Calculate nominal field rate for stream */ - nominal_field_rate_in_uhz = stream->timing.pix_clk_khz; + nominal_field_rate_in_uhz = stream->timing.pix_clk_100hz / 10; nominal_field_rate_in_uhz *= 1000ULL * 1000ULL * 1000ULL; nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, stream->timing.h_total); diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h index 949a8b62aa98..4222e403b151 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h @@ -104,7 +104,7 @@ struct mod_vrr_params_fixed_refresh { struct mod_vrr_params { bool supported; - bool send_vsif; + bool send_info_frame; enum mod_vrr_state state; uint32_t min_refresh_in_uhz; diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h index 1bd02c0ac30c..b711e7e6c204 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h @@ -41,7 +41,8 @@ enum color_transfer_func { enum vrr_packet_type { PACKET_TYPE_VRR, PACKET_TYPE_FS1, - PACKET_TYPE_FS2 + PACKET_TYPE_FS2, + PACKET_TYPE_VTEM }; diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c index c11a443dcbc8..038b88221c5f 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c @@ -41,6 +41,17 @@ static const unsigned char min_reduction_table[13] = { static const unsigned char max_reduction_table[13] = { 0xf5, 0xe5, 0xd9, 0xcd, 0xb1, 0xa5, 0xa5, 0x80, 0x65, 0x4d, 0x4d, 0x4d, 0x32}; +/* ABM 2.2 Min Reduction effectively disabled (100% for all configs)*/ +static const unsigned char min_reduction_table_v_2_2[13] = { +0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + +/* Possible ABM 2.2 Max Reduction configs from least aggressive to most aggressive + * 0 1 2 3 4 5 6 7 8 9 10 11 12 + * 96.1 89.8 74.9 69.4 64.7 52.2 48.6 39.6 30.2 25.1 19.6 12.5 12.5 % + */ +static const unsigned char max_reduction_table_v_2_2[13] = { +0xf5, 0xe5, 0xbf, 0xb1, 0xa5, 0x85, 0x7c, 0x65, 0x4d, 0x40, 0x32, 0x20, 0x20}; + /* Predefined ABM configuration sets. We may have different configuration sets * in order to satisfy different power/quality requirements. */ @@ -56,6 +67,13 @@ static const unsigned char abm_config[abm_defines_max_config][abm_defines_max_le #define NUM_AGGR_LEVEL 4 #define NUM_POWER_FN_SEGS 8 #define NUM_BL_CURVE_SEGS 16 +#define IRAM_SIZE 256 + +#define IRAM_RESERVE_AREA_START_V2 0xF0 // reserve 0xF0~0xF6 are write by DMCU only +#define IRAM_RESERVE_AREA_END_V2 0xF6 // reserve 0xF0~0xF6 are write by DMCU only + +#define IRAM_RESERVE_AREA_START_V2_2 0xF0 // reserve 0xF0~0xFF are write by DMCU only +#define IRAM_RESERVE_AREA_END_V2_2 0xFF // reserve 0xF0~0xFF are write by DMCU only #pragma pack(push, 1) /* NOTE: iRAM is 256B in size */ @@ -86,11 +104,10 @@ struct iram_table_v_2 { /* For reading PSR State directly from IRAM */ uint8_t psr_state; /* 0xf0 */ - uint8_t dmcu_interface_version; /* 0xf1 */ - uint8_t dmcu_date_version_year_b0; /* 0xf2 */ - uint8_t dmcu_date_version_year_b1; /* 0xf3 */ - uint8_t dmcu_date_version_month; /* 0xf4 */ - uint8_t dmcu_date_version_day; /* 0xf5 */ + uint8_t dmcu_mcp_interface_version; /* 0xf1 */ + uint8_t dmcu_abm_feature_version; /* 0xf2 */ + uint8_t dmcu_psr_feature_version; /* 0xf3 */ + uint16_t dmcu_version; /* 0xf4 */ uint8_t dmcu_state; /* 0xf6 */ uint16_t blRampReduction; /* 0xf7 */ @@ -101,20 +118,58 @@ struct iram_table_v_2 { uint8_t dummy8; /* 0xfe */ uint8_t dummy9; /* 0xff */ }; -#pragma pack(pop) -static uint16_t backlight_8_to_16(unsigned int backlight_8bit) -{ - return (uint16_t)(backlight_8bit * 0x101); -} +struct iram_table_v_2_2 { + /* flags */ + uint16_t flags; /* 0x00 U16 */ + + /* parameters for ABM2.2 algorithm */ + uint8_t min_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x02 U0.8 */ + uint8_t max_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x16 U0.8 */ + uint8_t bright_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x2a U2.6 */ + uint8_t dark_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x3e U2.6 */ + uint8_t hybridFactor[NUM_AGGR_LEVEL]; /* 0x52 U0.8 */ + uint8_t contrastFactor[NUM_AGGR_LEVEL]; /* 0x56 U0.8 */ + uint8_t deviation_gain[NUM_AGGR_LEVEL]; /* 0x5a U0.8 */ + uint8_t iir_curve[NUM_AMBI_LEVEL]; /* 0x5e U0.8 */ + uint8_t pad[29]; /* 0x63 U0.8 */ + + /* parameters for crgb conversion */ + uint16_t crgb_thresh[NUM_POWER_FN_SEGS]; /* 0x80 U3.13 */ + uint16_t crgb_offset[NUM_POWER_FN_SEGS]; /* 0x90 U1.15 */ + uint16_t crgb_slope[NUM_POWER_FN_SEGS]; /* 0xa0 U4.12 */ + + /* parameters for custom curve */ + /* thresholds for brightness --> backlight */ + uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; /* 0xb0 U16.0 */ + /* offsets for brightness --> backlight */ + uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; /* 0xd0 U16.0 */ + + /* For reading PSR State directly from IRAM */ + uint8_t psr_state; /* 0xf0 */ + uint8_t dmcu_mcp_interface_version; /* 0xf1 */ + uint8_t dmcu_abm_feature_version; /* 0xf2 */ + uint8_t dmcu_psr_feature_version; /* 0xf3 */ + uint16_t dmcu_version; /* 0xf4 */ + uint8_t dmcu_state; /* 0xf6 */ + + uint8_t dummy1; /* 0xf7 */ + uint8_t dummy2; /* 0xf8 */ + uint8_t dummy3; /* 0xf9 */ + uint8_t dummy4; /* 0xfa */ + uint8_t dummy5; /* 0xfb */ + uint8_t dummy6; /* 0xfc */ + uint8_t dummy7; /* 0xfd */ + uint8_t dummy8; /* 0xfe */ + uint8_t dummy9; /* 0xff */ +}; +#pragma pack(pop) static void fill_backlight_transform_table(struct dmcu_iram_parameters params, struct iram_table_v_2 *table) { unsigned int i; unsigned int num_entries = NUM_BL_CURVE_SEGS; - unsigned int query_input_8bit; - unsigned int query_output_8bit; unsigned int lut_index; table->backlight_thresholds[0] = 0; @@ -132,24 +187,368 @@ static void fill_backlight_transform_table(struct dmcu_iram_parameters params, * format U4.10. */ for (i = 1; i+1 < num_entries; i++) { - query_input_8bit = DIV_ROUNDUP((i * 256), num_entries); + lut_index = (params.backlight_lut_array_size - 1) * i / (num_entries - 1); + ASSERT(lut_index < params.backlight_lut_array_size); + + table->backlight_thresholds[i] = + cpu_to_be16(DIV_ROUNDUP((i * 65536), num_entries)); + table->backlight_offsets[i] = + cpu_to_be16(params.backlight_lut_array[lut_index]); + } +} +static void fill_backlight_transform_table_v_2_2(struct dmcu_iram_parameters params, + struct iram_table_v_2_2 *table) +{ + unsigned int i; + unsigned int num_entries = NUM_BL_CURVE_SEGS; + unsigned int lut_index; + + table->backlight_thresholds[0] = 0; + table->backlight_offsets[0] = params.backlight_lut_array[0]; + table->backlight_thresholds[num_entries-1] = 0xFFFF; + table->backlight_offsets[num_entries-1] = + params.backlight_lut_array[params.backlight_lut_array_size - 1]; + + /* Setup all brightness levels between 0% and 100% exclusive + * Fills brightness-to-backlight transform table. Backlight custom curve + * describes transform from brightness to backlight. It will be defined + * as set of thresholds and set of offsets, together, implying + * extrapolation of custom curve into 16 uniformly spanned linear + * segments. Each threshold/offset represented by 16 bit entry in + * format U4.10. + */ + for (i = 1; i+1 < num_entries; i++) { lut_index = (params.backlight_lut_array_size - 1) * i / (num_entries - 1); ASSERT(lut_index < params.backlight_lut_array_size); - query_output_8bit = params.backlight_lut_array[lut_index] >> 8; table->backlight_thresholds[i] = - backlight_8_to_16(query_input_8bit); + cpu_to_be16(DIV_ROUNDUP((i * 65536), num_entries)); table->backlight_offsets[i] = - backlight_8_to_16(query_output_8bit); + cpu_to_be16(params.backlight_lut_array[lut_index]); } } +void fill_iram_v_2(struct iram_table_v_2 *ram_table, struct dmcu_iram_parameters params) +{ + unsigned int set = params.set; + + ram_table->flags = 0x0; + ram_table->deviation_gain = 0xb3; + + ram_table->blRampReduction = + cpu_to_be16(params.backlight_ramping_reduction); + ram_table->blRampStart = + cpu_to_be16(params.backlight_ramping_start); + + ram_table->min_reduction[0][0] = min_reduction_table[abm_config[set][0]]; + ram_table->min_reduction[1][0] = min_reduction_table[abm_config[set][0]]; + ram_table->min_reduction[2][0] = min_reduction_table[abm_config[set][0]]; + ram_table->min_reduction[3][0] = min_reduction_table[abm_config[set][0]]; + ram_table->min_reduction[4][0] = min_reduction_table[abm_config[set][0]]; + ram_table->max_reduction[0][0] = max_reduction_table[abm_config[set][0]]; + ram_table->max_reduction[1][0] = max_reduction_table[abm_config[set][0]]; + ram_table->max_reduction[2][0] = max_reduction_table[abm_config[set][0]]; + ram_table->max_reduction[3][0] = max_reduction_table[abm_config[set][0]]; + ram_table->max_reduction[4][0] = max_reduction_table[abm_config[set][0]]; + + ram_table->min_reduction[0][1] = min_reduction_table[abm_config[set][1]]; + ram_table->min_reduction[1][1] = min_reduction_table[abm_config[set][1]]; + ram_table->min_reduction[2][1] = min_reduction_table[abm_config[set][1]]; + ram_table->min_reduction[3][1] = min_reduction_table[abm_config[set][1]]; + ram_table->min_reduction[4][1] = min_reduction_table[abm_config[set][1]]; + ram_table->max_reduction[0][1] = max_reduction_table[abm_config[set][1]]; + ram_table->max_reduction[1][1] = max_reduction_table[abm_config[set][1]]; + ram_table->max_reduction[2][1] = max_reduction_table[abm_config[set][1]]; + ram_table->max_reduction[3][1] = max_reduction_table[abm_config[set][1]]; + ram_table->max_reduction[4][1] = max_reduction_table[abm_config[set][1]]; + + ram_table->min_reduction[0][2] = min_reduction_table[abm_config[set][2]]; + ram_table->min_reduction[1][2] = min_reduction_table[abm_config[set][2]]; + ram_table->min_reduction[2][2] = min_reduction_table[abm_config[set][2]]; + ram_table->min_reduction[3][2] = min_reduction_table[abm_config[set][2]]; + ram_table->min_reduction[4][2] = min_reduction_table[abm_config[set][2]]; + ram_table->max_reduction[0][2] = max_reduction_table[abm_config[set][2]]; + ram_table->max_reduction[1][2] = max_reduction_table[abm_config[set][2]]; + ram_table->max_reduction[2][2] = max_reduction_table[abm_config[set][2]]; + ram_table->max_reduction[3][2] = max_reduction_table[abm_config[set][2]]; + ram_table->max_reduction[4][2] = max_reduction_table[abm_config[set][2]]; + + ram_table->min_reduction[0][3] = min_reduction_table[abm_config[set][3]]; + ram_table->min_reduction[1][3] = min_reduction_table[abm_config[set][3]]; + ram_table->min_reduction[2][3] = min_reduction_table[abm_config[set][3]]; + ram_table->min_reduction[3][3] = min_reduction_table[abm_config[set][3]]; + ram_table->min_reduction[4][3] = min_reduction_table[abm_config[set][3]]; + ram_table->max_reduction[0][3] = max_reduction_table[abm_config[set][3]]; + ram_table->max_reduction[1][3] = max_reduction_table[abm_config[set][3]]; + ram_table->max_reduction[2][3] = max_reduction_table[abm_config[set][3]]; + ram_table->max_reduction[3][3] = max_reduction_table[abm_config[set][3]]; + ram_table->max_reduction[4][3] = max_reduction_table[abm_config[set][3]]; + + ram_table->bright_pos_gain[0][0] = 0x20; + ram_table->bright_pos_gain[0][1] = 0x20; + ram_table->bright_pos_gain[0][2] = 0x20; + ram_table->bright_pos_gain[0][3] = 0x20; + ram_table->bright_pos_gain[1][0] = 0x20; + ram_table->bright_pos_gain[1][1] = 0x20; + ram_table->bright_pos_gain[1][2] = 0x20; + ram_table->bright_pos_gain[1][3] = 0x20; + ram_table->bright_pos_gain[2][0] = 0x20; + ram_table->bright_pos_gain[2][1] = 0x20; + ram_table->bright_pos_gain[2][2] = 0x20; + ram_table->bright_pos_gain[2][3] = 0x20; + ram_table->bright_pos_gain[3][0] = 0x20; + ram_table->bright_pos_gain[3][1] = 0x20; + ram_table->bright_pos_gain[3][2] = 0x20; + ram_table->bright_pos_gain[3][3] = 0x20; + ram_table->bright_pos_gain[4][0] = 0x20; + ram_table->bright_pos_gain[4][1] = 0x20; + ram_table->bright_pos_gain[4][2] = 0x20; + ram_table->bright_pos_gain[4][3] = 0x20; + ram_table->bright_neg_gain[0][1] = 0x00; + ram_table->bright_neg_gain[0][2] = 0x00; + ram_table->bright_neg_gain[0][3] = 0x00; + ram_table->bright_neg_gain[1][0] = 0x00; + ram_table->bright_neg_gain[1][1] = 0x00; + ram_table->bright_neg_gain[1][2] = 0x00; + ram_table->bright_neg_gain[1][3] = 0x00; + ram_table->bright_neg_gain[2][0] = 0x00; + ram_table->bright_neg_gain[2][1] = 0x00; + ram_table->bright_neg_gain[2][2] = 0x00; + ram_table->bright_neg_gain[2][3] = 0x00; + ram_table->bright_neg_gain[3][0] = 0x00; + ram_table->bright_neg_gain[3][1] = 0x00; + ram_table->bright_neg_gain[3][2] = 0x00; + ram_table->bright_neg_gain[3][3] = 0x00; + ram_table->bright_neg_gain[4][0] = 0x00; + ram_table->bright_neg_gain[4][1] = 0x00; + ram_table->bright_neg_gain[4][2] = 0x00; + ram_table->bright_neg_gain[4][3] = 0x00; + ram_table->dark_pos_gain[0][0] = 0x00; + ram_table->dark_pos_gain[0][1] = 0x00; + ram_table->dark_pos_gain[0][2] = 0x00; + ram_table->dark_pos_gain[0][3] = 0x00; + ram_table->dark_pos_gain[1][0] = 0x00; + ram_table->dark_pos_gain[1][1] = 0x00; + ram_table->dark_pos_gain[1][2] = 0x00; + ram_table->dark_pos_gain[1][3] = 0x00; + ram_table->dark_pos_gain[2][0] = 0x00; + ram_table->dark_pos_gain[2][1] = 0x00; + ram_table->dark_pos_gain[2][2] = 0x00; + ram_table->dark_pos_gain[2][3] = 0x00; + ram_table->dark_pos_gain[3][0] = 0x00; + ram_table->dark_pos_gain[3][1] = 0x00; + ram_table->dark_pos_gain[3][2] = 0x00; + ram_table->dark_pos_gain[3][3] = 0x00; + ram_table->dark_pos_gain[4][0] = 0x00; + ram_table->dark_pos_gain[4][1] = 0x00; + ram_table->dark_pos_gain[4][2] = 0x00; + ram_table->dark_pos_gain[4][3] = 0x00; + ram_table->dark_neg_gain[0][0] = 0x00; + ram_table->dark_neg_gain[0][1] = 0x00; + ram_table->dark_neg_gain[0][2] = 0x00; + ram_table->dark_neg_gain[0][3] = 0x00; + ram_table->dark_neg_gain[1][0] = 0x00; + ram_table->dark_neg_gain[1][1] = 0x00; + ram_table->dark_neg_gain[1][2] = 0x00; + ram_table->dark_neg_gain[1][3] = 0x00; + ram_table->dark_neg_gain[2][0] = 0x00; + ram_table->dark_neg_gain[2][1] = 0x00; + ram_table->dark_neg_gain[2][2] = 0x00; + ram_table->dark_neg_gain[2][3] = 0x00; + ram_table->dark_neg_gain[3][0] = 0x00; + ram_table->dark_neg_gain[3][1] = 0x00; + ram_table->dark_neg_gain[3][2] = 0x00; + ram_table->dark_neg_gain[3][3] = 0x00; + ram_table->dark_neg_gain[4][0] = 0x00; + ram_table->dark_neg_gain[4][1] = 0x00; + ram_table->dark_neg_gain[4][2] = 0x00; + ram_table->dark_neg_gain[4][3] = 0x00; + + ram_table->iir_curve[0] = 0x65; + ram_table->iir_curve[1] = 0x65; + ram_table->iir_curve[2] = 0x65; + ram_table->iir_curve[3] = 0x65; + ram_table->iir_curve[4] = 0x65; + + //Gamma 2.4 + ram_table->crgb_thresh[0] = cpu_to_be16(0x13b6); + ram_table->crgb_thresh[1] = cpu_to_be16(0x1648); + ram_table->crgb_thresh[2] = cpu_to_be16(0x18e3); + ram_table->crgb_thresh[3] = cpu_to_be16(0x1b41); + ram_table->crgb_thresh[4] = cpu_to_be16(0x1d46); + ram_table->crgb_thresh[5] = cpu_to_be16(0x1f21); + ram_table->crgb_thresh[6] = cpu_to_be16(0x2167); + ram_table->crgb_thresh[7] = cpu_to_be16(0x2384); + ram_table->crgb_offset[0] = cpu_to_be16(0x2999); + ram_table->crgb_offset[1] = cpu_to_be16(0x3999); + ram_table->crgb_offset[2] = cpu_to_be16(0x4666); + ram_table->crgb_offset[3] = cpu_to_be16(0x5999); + ram_table->crgb_offset[4] = cpu_to_be16(0x6333); + ram_table->crgb_offset[5] = cpu_to_be16(0x7800); + ram_table->crgb_offset[6] = cpu_to_be16(0x8c00); + ram_table->crgb_offset[7] = cpu_to_be16(0xa000); + ram_table->crgb_slope[0] = cpu_to_be16(0x3147); + ram_table->crgb_slope[1] = cpu_to_be16(0x2978); + ram_table->crgb_slope[2] = cpu_to_be16(0x23a2); + ram_table->crgb_slope[3] = cpu_to_be16(0x1f55); + ram_table->crgb_slope[4] = cpu_to_be16(0x1c63); + ram_table->crgb_slope[5] = cpu_to_be16(0x1a0f); + ram_table->crgb_slope[6] = cpu_to_be16(0x178d); + ram_table->crgb_slope[7] = cpu_to_be16(0x15ab); + + fill_backlight_transform_table( + params, ram_table); +} + +void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parameters params) +{ + unsigned int set = params.set; + + ram_table->flags = 0x0; + + ram_table->deviation_gain[0] = 0xb3; + ram_table->deviation_gain[1] = 0xb3; + ram_table->deviation_gain[2] = 0xb3; + ram_table->deviation_gain[3] = 0xb3; + + ram_table->min_reduction[0][0] = min_reduction_table_v_2_2[abm_config[set][0]]; + ram_table->min_reduction[1][0] = min_reduction_table_v_2_2[abm_config[set][0]]; + ram_table->min_reduction[2][0] = min_reduction_table_v_2_2[abm_config[set][0]]; + ram_table->min_reduction[3][0] = min_reduction_table_v_2_2[abm_config[set][0]]; + ram_table->min_reduction[4][0] = min_reduction_table_v_2_2[abm_config[set][0]]; + ram_table->max_reduction[0][0] = max_reduction_table_v_2_2[abm_config[set][0]]; + ram_table->max_reduction[1][0] = max_reduction_table_v_2_2[abm_config[set][0]]; + ram_table->max_reduction[2][0] = max_reduction_table_v_2_2[abm_config[set][0]]; + ram_table->max_reduction[3][0] = max_reduction_table_v_2_2[abm_config[set][0]]; + ram_table->max_reduction[4][0] = max_reduction_table_v_2_2[abm_config[set][0]]; + + ram_table->min_reduction[0][1] = min_reduction_table_v_2_2[abm_config[set][1]]; + ram_table->min_reduction[1][1] = min_reduction_table_v_2_2[abm_config[set][1]]; + ram_table->min_reduction[2][1] = min_reduction_table_v_2_2[abm_config[set][1]]; + ram_table->min_reduction[3][1] = min_reduction_table_v_2_2[abm_config[set][1]]; + ram_table->min_reduction[4][1] = min_reduction_table_v_2_2[abm_config[set][1]]; + ram_table->max_reduction[0][1] = max_reduction_table_v_2_2[abm_config[set][1]]; + ram_table->max_reduction[1][1] = max_reduction_table_v_2_2[abm_config[set][1]]; + ram_table->max_reduction[2][1] = max_reduction_table_v_2_2[abm_config[set][1]]; + ram_table->max_reduction[3][1] = max_reduction_table_v_2_2[abm_config[set][1]]; + ram_table->max_reduction[4][1] = max_reduction_table_v_2_2[abm_config[set][1]]; + + ram_table->min_reduction[0][2] = min_reduction_table_v_2_2[abm_config[set][2]]; + ram_table->min_reduction[1][2] = min_reduction_table_v_2_2[abm_config[set][2]]; + ram_table->min_reduction[2][2] = min_reduction_table_v_2_2[abm_config[set][2]]; + ram_table->min_reduction[3][2] = min_reduction_table_v_2_2[abm_config[set][2]]; + ram_table->min_reduction[4][2] = min_reduction_table_v_2_2[abm_config[set][2]]; + ram_table->max_reduction[0][2] = max_reduction_table_v_2_2[abm_config[set][2]]; + ram_table->max_reduction[1][2] = max_reduction_table_v_2_2[abm_config[set][2]]; + ram_table->max_reduction[2][2] = max_reduction_table_v_2_2[abm_config[set][2]]; + ram_table->max_reduction[3][2] = max_reduction_table_v_2_2[abm_config[set][2]]; + ram_table->max_reduction[4][2] = max_reduction_table_v_2_2[abm_config[set][2]]; + + ram_table->min_reduction[0][3] = min_reduction_table_v_2_2[abm_config[set][3]]; + ram_table->min_reduction[1][3] = min_reduction_table_v_2_2[abm_config[set][3]]; + ram_table->min_reduction[2][3] = min_reduction_table_v_2_2[abm_config[set][3]]; + ram_table->min_reduction[3][3] = min_reduction_table_v_2_2[abm_config[set][3]]; + ram_table->min_reduction[4][3] = min_reduction_table_v_2_2[abm_config[set][3]]; + ram_table->max_reduction[0][3] = max_reduction_table_v_2_2[abm_config[set][3]]; + ram_table->max_reduction[1][3] = max_reduction_table_v_2_2[abm_config[set][3]]; + ram_table->max_reduction[2][3] = max_reduction_table_v_2_2[abm_config[set][3]]; + ram_table->max_reduction[3][3] = max_reduction_table_v_2_2[abm_config[set][3]]; + ram_table->max_reduction[4][3] = max_reduction_table_v_2_2[abm_config[set][3]]; + + ram_table->bright_pos_gain[0][0] = 0x20; + ram_table->bright_pos_gain[0][1] = 0x20; + ram_table->bright_pos_gain[0][2] = 0x20; + ram_table->bright_pos_gain[0][3] = 0x20; + ram_table->bright_pos_gain[1][0] = 0x20; + ram_table->bright_pos_gain[1][1] = 0x20; + ram_table->bright_pos_gain[1][2] = 0x20; + ram_table->bright_pos_gain[1][3] = 0x20; + ram_table->bright_pos_gain[2][0] = 0x20; + ram_table->bright_pos_gain[2][1] = 0x20; + ram_table->bright_pos_gain[2][2] = 0x20; + ram_table->bright_pos_gain[2][3] = 0x20; + ram_table->bright_pos_gain[3][0] = 0x20; + ram_table->bright_pos_gain[3][1] = 0x20; + ram_table->bright_pos_gain[3][2] = 0x20; + ram_table->bright_pos_gain[3][3] = 0x20; + ram_table->bright_pos_gain[4][0] = 0x20; + ram_table->bright_pos_gain[4][1] = 0x20; + ram_table->bright_pos_gain[4][2] = 0x20; + ram_table->bright_pos_gain[4][3] = 0x20; + + ram_table->dark_pos_gain[0][0] = 0x00; + ram_table->dark_pos_gain[0][1] = 0x00; + ram_table->dark_pos_gain[0][2] = 0x00; + ram_table->dark_pos_gain[0][3] = 0x00; + ram_table->dark_pos_gain[1][0] = 0x00; + ram_table->dark_pos_gain[1][1] = 0x00; + ram_table->dark_pos_gain[1][2] = 0x00; + ram_table->dark_pos_gain[1][3] = 0x00; + ram_table->dark_pos_gain[2][0] = 0x00; + ram_table->dark_pos_gain[2][1] = 0x00; + ram_table->dark_pos_gain[2][2] = 0x00; + ram_table->dark_pos_gain[2][3] = 0x00; + ram_table->dark_pos_gain[3][0] = 0x00; + ram_table->dark_pos_gain[3][1] = 0x00; + ram_table->dark_pos_gain[3][2] = 0x00; + ram_table->dark_pos_gain[3][3] = 0x00; + ram_table->dark_pos_gain[4][0] = 0x00; + ram_table->dark_pos_gain[4][1] = 0x00; + ram_table->dark_pos_gain[4][2] = 0x00; + ram_table->dark_pos_gain[4][3] = 0x00; + + ram_table->hybridFactor[0] = 0xff; + ram_table->hybridFactor[1] = 0xff; + ram_table->hybridFactor[2] = 0xff; + ram_table->hybridFactor[3] = 0xc0; + + ram_table->contrastFactor[0] = 0x99; + ram_table->contrastFactor[1] = 0x99; + ram_table->contrastFactor[2] = 0x99; + ram_table->contrastFactor[3] = 0x80; + + ram_table->iir_curve[0] = 0x65; + ram_table->iir_curve[1] = 0x65; + ram_table->iir_curve[2] = 0x65; + ram_table->iir_curve[3] = 0x65; + ram_table->iir_curve[4] = 0x65; + + //Gamma 2.2 + ram_table->crgb_thresh[0] = cpu_to_be16(0x127c); + ram_table->crgb_thresh[1] = cpu_to_be16(0x151b); + ram_table->crgb_thresh[2] = cpu_to_be16(0x17d5); + ram_table->crgb_thresh[3] = cpu_to_be16(0x1a56); + ram_table->crgb_thresh[4] = cpu_to_be16(0x1c83); + ram_table->crgb_thresh[5] = cpu_to_be16(0x1e72); + ram_table->crgb_thresh[6] = cpu_to_be16(0x20f0); + ram_table->crgb_thresh[7] = cpu_to_be16(0x232b); + ram_table->crgb_offset[0] = cpu_to_be16(0x2999); + ram_table->crgb_offset[1] = cpu_to_be16(0x3999); + ram_table->crgb_offset[2] = cpu_to_be16(0x4666); + ram_table->crgb_offset[3] = cpu_to_be16(0x5999); + ram_table->crgb_offset[4] = cpu_to_be16(0x6333); + ram_table->crgb_offset[5] = cpu_to_be16(0x7800); + ram_table->crgb_offset[6] = cpu_to_be16(0x8c00); + ram_table->crgb_offset[7] = cpu_to_be16(0xa000); + ram_table->crgb_slope[0] = cpu_to_be16(0x3609); + ram_table->crgb_slope[1] = cpu_to_be16(0x2dfa); + ram_table->crgb_slope[2] = cpu_to_be16(0x27ea); + ram_table->crgb_slope[3] = cpu_to_be16(0x235d); + ram_table->crgb_slope[4] = cpu_to_be16(0x2042); + ram_table->crgb_slope[5] = cpu_to_be16(0x1dc3); + ram_table->crgb_slope[6] = cpu_to_be16(0x1b1a); + ram_table->crgb_slope[7] = cpu_to_be16(0x1910); + + fill_backlight_transform_table_v_2_2( + params, ram_table); +} + bool dmcu_load_iram(struct dmcu *dmcu, struct dmcu_iram_parameters params) { - struct iram_table_v_2 ram_table; - unsigned int set = params.set; + unsigned char ram_table[IRAM_SIZE]; + bool result = false; if (dmcu == NULL) return false; @@ -159,170 +558,23 @@ bool dmcu_load_iram(struct dmcu *dmcu, memset(&ram_table, 0, sizeof(ram_table)); - ram_table.flags = 0x0; - ram_table.deviation_gain = 0xb3; + if (dmcu->dmcu_version.abm_version == 0x22) { + fill_iram_v_2_2((struct iram_table_v_2_2 *)ram_table, params); - ram_table.blRampReduction = - cpu_to_be16(params.backlight_ramping_reduction); - ram_table.blRampStart = - cpu_to_be16(params.backlight_ramping_start); + result = dmcu->funcs->load_iram( + dmcu, 0, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2_2); + } else { + fill_iram_v_2((struct iram_table_v_2 *)ram_table, params); - ram_table.min_reduction[0][0] = min_reduction_table[abm_config[set][0]]; - ram_table.min_reduction[1][0] = min_reduction_table[abm_config[set][0]]; - ram_table.min_reduction[2][0] = min_reduction_table[abm_config[set][0]]; - ram_table.min_reduction[3][0] = min_reduction_table[abm_config[set][0]]; - ram_table.min_reduction[4][0] = min_reduction_table[abm_config[set][0]]; - ram_table.max_reduction[0][0] = max_reduction_table[abm_config[set][0]]; - ram_table.max_reduction[1][0] = max_reduction_table[abm_config[set][0]]; - ram_table.max_reduction[2][0] = max_reduction_table[abm_config[set][0]]; - ram_table.max_reduction[3][0] = max_reduction_table[abm_config[set][0]]; - ram_table.max_reduction[4][0] = max_reduction_table[abm_config[set][0]]; - - ram_table.min_reduction[0][1] = min_reduction_table[abm_config[set][1]]; - ram_table.min_reduction[1][1] = min_reduction_table[abm_config[set][1]]; - ram_table.min_reduction[2][1] = min_reduction_table[abm_config[set][1]]; - ram_table.min_reduction[3][1] = min_reduction_table[abm_config[set][1]]; - ram_table.min_reduction[4][1] = min_reduction_table[abm_config[set][1]]; - ram_table.max_reduction[0][1] = max_reduction_table[abm_config[set][1]]; - ram_table.max_reduction[1][1] = max_reduction_table[abm_config[set][1]]; - ram_table.max_reduction[2][1] = max_reduction_table[abm_config[set][1]]; - ram_table.max_reduction[3][1] = max_reduction_table[abm_config[set][1]]; - ram_table.max_reduction[4][1] = max_reduction_table[abm_config[set][1]]; - - ram_table.min_reduction[0][2] = min_reduction_table[abm_config[set][2]]; - ram_table.min_reduction[1][2] = min_reduction_table[abm_config[set][2]]; - ram_table.min_reduction[2][2] = min_reduction_table[abm_config[set][2]]; - ram_table.min_reduction[3][2] = min_reduction_table[abm_config[set][2]]; - ram_table.min_reduction[4][2] = min_reduction_table[abm_config[set][2]]; - ram_table.max_reduction[0][2] = max_reduction_table[abm_config[set][2]]; - ram_table.max_reduction[1][2] = max_reduction_table[abm_config[set][2]]; - ram_table.max_reduction[2][2] = max_reduction_table[abm_config[set][2]]; - ram_table.max_reduction[3][2] = max_reduction_table[abm_config[set][2]]; - ram_table.max_reduction[4][2] = max_reduction_table[abm_config[set][2]]; - - ram_table.min_reduction[0][3] = min_reduction_table[abm_config[set][3]]; - ram_table.min_reduction[1][3] = min_reduction_table[abm_config[set][3]]; - ram_table.min_reduction[2][3] = min_reduction_table[abm_config[set][3]]; - ram_table.min_reduction[3][3] = min_reduction_table[abm_config[set][3]]; - ram_table.min_reduction[4][3] = min_reduction_table[abm_config[set][3]]; - ram_table.max_reduction[0][3] = max_reduction_table[abm_config[set][3]]; - ram_table.max_reduction[1][3] = max_reduction_table[abm_config[set][3]]; - ram_table.max_reduction[2][3] = max_reduction_table[abm_config[set][3]]; - ram_table.max_reduction[3][3] = max_reduction_table[abm_config[set][3]]; - ram_table.max_reduction[4][3] = max_reduction_table[abm_config[set][3]]; - - ram_table.bright_pos_gain[0][0] = 0x20; - ram_table.bright_pos_gain[0][1] = 0x20; - ram_table.bright_pos_gain[0][2] = 0x20; - ram_table.bright_pos_gain[0][3] = 0x20; - ram_table.bright_pos_gain[1][0] = 0x20; - ram_table.bright_pos_gain[1][1] = 0x20; - ram_table.bright_pos_gain[1][2] = 0x20; - ram_table.bright_pos_gain[1][3] = 0x20; - ram_table.bright_pos_gain[2][0] = 0x20; - ram_table.bright_pos_gain[2][1] = 0x20; - ram_table.bright_pos_gain[2][2] = 0x20; - ram_table.bright_pos_gain[2][3] = 0x20; - ram_table.bright_pos_gain[3][0] = 0x20; - ram_table.bright_pos_gain[3][1] = 0x20; - ram_table.bright_pos_gain[3][2] = 0x20; - ram_table.bright_pos_gain[3][3] = 0x20; - ram_table.bright_pos_gain[4][0] = 0x20; - ram_table.bright_pos_gain[4][1] = 0x20; - ram_table.bright_pos_gain[4][2] = 0x20; - ram_table.bright_pos_gain[4][3] = 0x20; - ram_table.bright_neg_gain[0][1] = 0x00; - ram_table.bright_neg_gain[0][2] = 0x00; - ram_table.bright_neg_gain[0][3] = 0x00; - ram_table.bright_neg_gain[1][0] = 0x00; - ram_table.bright_neg_gain[1][1] = 0x00; - ram_table.bright_neg_gain[1][2] = 0x00; - ram_table.bright_neg_gain[1][3] = 0x00; - ram_table.bright_neg_gain[2][0] = 0x00; - ram_table.bright_neg_gain[2][1] = 0x00; - ram_table.bright_neg_gain[2][2] = 0x00; - ram_table.bright_neg_gain[2][3] = 0x00; - ram_table.bright_neg_gain[3][0] = 0x00; - ram_table.bright_neg_gain[3][1] = 0x00; - ram_table.bright_neg_gain[3][2] = 0x00; - ram_table.bright_neg_gain[3][3] = 0x00; - ram_table.bright_neg_gain[4][0] = 0x00; - ram_table.bright_neg_gain[4][1] = 0x00; - ram_table.bright_neg_gain[4][2] = 0x00; - ram_table.bright_neg_gain[4][3] = 0x00; - ram_table.dark_pos_gain[0][0] = 0x00; - ram_table.dark_pos_gain[0][1] = 0x00; - ram_table.dark_pos_gain[0][2] = 0x00; - ram_table.dark_pos_gain[0][3] = 0x00; - ram_table.dark_pos_gain[1][0] = 0x00; - ram_table.dark_pos_gain[1][1] = 0x00; - ram_table.dark_pos_gain[1][2] = 0x00; - ram_table.dark_pos_gain[1][3] = 0x00; - ram_table.dark_pos_gain[2][0] = 0x00; - ram_table.dark_pos_gain[2][1] = 0x00; - ram_table.dark_pos_gain[2][2] = 0x00; - ram_table.dark_pos_gain[2][3] = 0x00; - ram_table.dark_pos_gain[3][0] = 0x00; - ram_table.dark_pos_gain[3][1] = 0x00; - ram_table.dark_pos_gain[3][2] = 0x00; - ram_table.dark_pos_gain[3][3] = 0x00; - ram_table.dark_pos_gain[4][0] = 0x00; - ram_table.dark_pos_gain[4][1] = 0x00; - ram_table.dark_pos_gain[4][2] = 0x00; - ram_table.dark_pos_gain[4][3] = 0x00; - ram_table.dark_neg_gain[0][0] = 0x00; - ram_table.dark_neg_gain[0][1] = 0x00; - ram_table.dark_neg_gain[0][2] = 0x00; - ram_table.dark_neg_gain[0][3] = 0x00; - ram_table.dark_neg_gain[1][0] = 0x00; - ram_table.dark_neg_gain[1][1] = 0x00; - ram_table.dark_neg_gain[1][2] = 0x00; - ram_table.dark_neg_gain[1][3] = 0x00; - ram_table.dark_neg_gain[2][0] = 0x00; - ram_table.dark_neg_gain[2][1] = 0x00; - ram_table.dark_neg_gain[2][2] = 0x00; - ram_table.dark_neg_gain[2][3] = 0x00; - ram_table.dark_neg_gain[3][0] = 0x00; - ram_table.dark_neg_gain[3][1] = 0x00; - ram_table.dark_neg_gain[3][2] = 0x00; - ram_table.dark_neg_gain[3][3] = 0x00; - ram_table.dark_neg_gain[4][0] = 0x00; - ram_table.dark_neg_gain[4][1] = 0x00; - ram_table.dark_neg_gain[4][2] = 0x00; - ram_table.dark_neg_gain[4][3] = 0x00; - ram_table.iir_curve[0] = 0x65; - ram_table.iir_curve[1] = 0x65; - ram_table.iir_curve[2] = 0x65; - ram_table.iir_curve[3] = 0x65; - ram_table.iir_curve[4] = 0x65; - ram_table.crgb_thresh[0] = cpu_to_be16(0x13b6); - ram_table.crgb_thresh[1] = cpu_to_be16(0x1648); - ram_table.crgb_thresh[2] = cpu_to_be16(0x18e3); - ram_table.crgb_thresh[3] = cpu_to_be16(0x1b41); - ram_table.crgb_thresh[4] = cpu_to_be16(0x1d46); - ram_table.crgb_thresh[5] = cpu_to_be16(0x1f21); - ram_table.crgb_thresh[6] = cpu_to_be16(0x2167); - ram_table.crgb_thresh[7] = cpu_to_be16(0x2384); - ram_table.crgb_offset[0] = cpu_to_be16(0x2999); - ram_table.crgb_offset[1] = cpu_to_be16(0x3999); - ram_table.crgb_offset[2] = cpu_to_be16(0x4666); - ram_table.crgb_offset[3] = cpu_to_be16(0x5999); - ram_table.crgb_offset[4] = cpu_to_be16(0x6333); - ram_table.crgb_offset[5] = cpu_to_be16(0x7800); - ram_table.crgb_offset[6] = cpu_to_be16(0x8c00); - ram_table.crgb_offset[7] = cpu_to_be16(0xa000); - ram_table.crgb_slope[0] = cpu_to_be16(0x3147); - ram_table.crgb_slope[1] = cpu_to_be16(0x2978); - ram_table.crgb_slope[2] = cpu_to_be16(0x23a2); - ram_table.crgb_slope[3] = cpu_to_be16(0x1f55); - ram_table.crgb_slope[4] = cpu_to_be16(0x1c63); - ram_table.crgb_slope[5] = cpu_to_be16(0x1a0f); - ram_table.crgb_slope[6] = cpu_to_be16(0x178d); - ram_table.crgb_slope[7] = cpu_to_be16(0x15ab); + result = dmcu->funcs->load_iram( + dmcu, 0, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2); - fill_backlight_transform_table( - params, &ram_table); + if (result) + result = dmcu->funcs->load_iram( + dmcu, IRAM_RESERVE_AREA_END_V2 + 1, + (char *)(&ram_table) + IRAM_RESERVE_AREA_END_V2 + 1, + sizeof(ram_table) - IRAM_RESERVE_AREA_END_V2 - 1); + } - return dmcu->funcs->load_iram( - dmcu, 0, (char *)(&ram_table), sizeof(ram_table)); + return result; } diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_offset.h index 13d4de645190..d8e0dd192fdd 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_offset.h @@ -2247,6 +2247,8 @@ // addressBlock: nbio_nbif_rcc_strap_BIFDEC1[13440..14975] // base address: 0x3480 +#define mmRCC_BIF_STRAP0 0x0000 +#define mmRCC_BIF_STRAP0_BASE_IDX 2 #define mmRCC_DEV0_EPF0_STRAP0 0x000f #define mmRCC_DEV0_EPF0_STRAP0_BASE_IDX 2 diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_sh_mask.h index a02b67943372..29af5167cd00 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_sh_mask.h @@ -16838,6 +16838,10 @@ // addressBlock: nbio_nbif_rcc_strap_BIFDEC1[13440..14975] +//RCC_BIF_STRAP0 +#define RCC_BIF_STRAP0__STRAP_PX_CAPABLE__SHIFT 0x7 +#define RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK 0x00000080L + //RCC_DEV0_EPF0_STRAP0 #define RCC_DEV0_EPF0_STRAP0__STRAP_DEVICE_ID_DEV0_F0__SHIFT 0x0 #define RCC_DEV0_EPF0_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F0__SHIFT 0x10 diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_smn.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_smn.h new file mode 100644 index 000000000000..8c75669eb500 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_smn.h @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _nbio_6_1_SMN_HEADER +#define _nbio_6_1_SMN_HEADER + + +#define smnCPM_CONTROL 0x11180460 +#define smnPCIE_CNTL2 0x11180070 +#define smnPCIE_CONFIG_CNTL 0x11180044 +#define smnPCIE_CI_CNTL 0x11180080 + + +#define smnPCIE_PERF_COUNT_CNTL 0x11180200 +#define smnPCIE_PERF_CNTL_TXCLK 0x11180204 +#define smnPCIE_PERF_COUNT0_TXCLK 0x11180208 +#define smnPCIE_PERF_COUNT1_TXCLK 0x1118020c +#define smnPCIE_PERF_CNTL_MST_R_CLK 0x11180210 +#define smnPCIE_PERF_COUNT0_MST_R_CLK 0x11180214 +#define smnPCIE_PERF_COUNT1_MST_R_CLK 0x11180218 +#define smnPCIE_PERF_CNTL_MST_C_CLK 0x1118021c +#define smnPCIE_PERF_COUNT0_MST_C_CLK 0x11180220 +#define smnPCIE_PERF_COUNT1_MST_C_CLK 0x11180224 +#define smnPCIE_PERF_CNTL_SLV_R_CLK 0x11180228 +#define smnPCIE_PERF_COUNT0_SLV_R_CLK 0x1118022c +#define smnPCIE_PERF_COUNT1_SLV_R_CLK 0x11180230 +#define smnPCIE_PERF_CNTL_SLV_S_C_CLK 0x11180234 +#define smnPCIE_PERF_COUNT0_SLV_S_C_CLK 0x11180238 +#define smnPCIE_PERF_COUNT1_SLV_S_C_CLK 0x1118023c +#define smnPCIE_PERF_CNTL_SLV_NS_C_CLK 0x11180240 +#define smnPCIE_PERF_COUNT0_SLV_NS_C_CLK 0x11180244 +#define smnPCIE_PERF_COUNT1_SLV_NS_C_CLK 0x11180248 +#define smnPCIE_PERF_CNTL_EVENT0_PORT_SEL 0x1118024c +#define smnPCIE_PERF_CNTL_EVENT1_PORT_SEL 0x11180250 +#define smnPCIE_PERF_CNTL_TXCLK2 0x11180254 +#define smnPCIE_PERF_COUNT0_TXCLK2 0x11180258 +#define smnPCIE_PERF_COUNT1_TXCLK2 0x1118025c + +#endif // _nbio_6_1_SMN_HEADER + diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_smn.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_smn.h new file mode 100644 index 000000000000..5563f0715896 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_smn.h @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _nbio_7_0_SMN_HEADER +#define _nbio_7_0_SMN_HEADER + + +#define smnCPM_CONTROL 0x11180460 +#define smnPCIE_CNTL2 0x11180070 + +#define smnPCIE_PERF_COUNT_CNTL 0x11180200 +#define smnPCIE_PERF_CNTL_TXCLK 0x11180204 +#define smnPCIE_PERF_COUNT0_TXCLK 0x11180208 +#define smnPCIE_PERF_COUNT1_TXCLK 0x1118020c +#define smnPCIE_PERF_CNTL_MST_R_CLK 0x11180210 +#define smnPCIE_PERF_COUNT0_MST_R_CLK 0x11180214 +#define smnPCIE_PERF_COUNT1_MST_R_CLK 0x11180218 +#define smnPCIE_PERF_CNTL_MST_C_CLK 0x1118021c +#define smnPCIE_PERF_COUNT0_MST_C_CLK 0x11180220 +#define smnPCIE_PERF_COUNT1_MST_C_CLK 0x11180224 +#define smnPCIE_PERF_CNTL_SLV_R_CLK 0x11180228 +#define smnPCIE_PERF_COUNT0_SLV_R_CLK 0x1118022c +#define smnPCIE_PERF_COUNT1_SLV_R_CLK 0x11180230 +#define smnPCIE_PERF_CNTL_SLV_S_C_CLK 0x11180234 +#define smnPCIE_PERF_COUNT0_SLV_S_C_CLK 0x11180238 +#define smnPCIE_PERF_COUNT1_SLV_S_C_CLK 0x1118023c +#define smnPCIE_PERF_CNTL_SLV_NS_C_CLK 0x11180240 +#define smnPCIE_PERF_COUNT0_SLV_NS_C_CLK 0x11180244 +#define smnPCIE_PERF_COUNT1_SLV_NS_C_CLK 0x11180248 +#define smnPCIE_PERF_CNTL_EVENT0_PORT_SEL 0x1118024c +#define smnPCIE_PERF_CNTL_EVENT1_PORT_SEL 0x11180250 +#define smnPCIE_PERF_CNTL_TXCLK2 0x11180254 +#define smnPCIE_PERF_COUNT0_TXCLK2 0x11180258 +#define smnPCIE_PERF_COUNT1_TXCLK2 0x1118025c + +#endif // _nbio_7_0_SMN_HEADER diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h new file mode 100644 index 000000000000..c1457d880c4d --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _nbio_7_4_0_SMN_HEADER +#define _nbio_7_4_0_SMN_HEADER + + +#define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c +#define smnCPM_CONTROL 0x11180460 +#define smnPCIE_CNTL2 0x11180070 +#define smnPCIE_CI_CNTL 0x11180080 + +#define smnPCIE_PERF_COUNT_CNTL 0x11180200 +#define smnPCIE_PERF_CNTL_TXCLK1 0x11180204 +#define smnPCIE_PERF_COUNT0_TXCLK1 0x11180208 +#define smnPCIE_PERF_COUNT1_TXCLK1 0x1118020c +#define smnPCIE_PERF_CNTL_TXCLK2 0x11180210 +#define smnPCIE_PERF_COUNT0_TXCLK2 0x11180214 +#define smnPCIE_PERF_COUNT1_TXCLK2 0x11180218 +#define smnPCIE_PERF_CNTL_TXCLK3 0x1118021c +#define smnPCIE_PERF_COUNT0_TXCLK3 0x11180220 +#define smnPCIE_PERF_COUNT1_TXCLK3 0x11180224 +#define smnPCIE_PERF_CNTL_TXCLK4 0x11180228 +#define smnPCIE_PERF_COUNT0_TXCLK4 0x1118022c +#define smnPCIE_PERF_COUNT1_TXCLK4 0x11180230 +#define smnPCIE_PERF_CNTL_SCLK1 0x11180234 +#define smnPCIE_PERF_COUNT0_SCLK1 0x11180238 +#define smnPCIE_PERF_COUNT1_SCLK1 0x1118023c +#define smnPCIE_PERF_CNTL_SCLK2 0x11180240 +#define smnPCIE_PERF_COUNT0_SCLK2 0x11180244 +#define smnPCIE_PERF_COUNT1_SCLK2 0x11180248 +#define smnPCIE_PERF_CNTL_EVENT_LC_PORT_SEL 0x1118024c +#define smnPCIE_PERF_CNTL_EVENT_CI_PORT_SEL 0x11180250 + +#endif // _nbio_7_4_0_SMN_HEADER diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h index e932213f87f0..994e796a28d7 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h @@ -2567,6 +2567,8 @@ // addressBlock: nbio_nbif0_rcc_strap_BIFDEC1 // base address: 0x0 +#define mmRCC_BIF_STRAP0 0x0000 +#define mmRCC_BIF_STRAP0_BASE_IDX 2 #define mmRCC_DEV0_EPF0_STRAP0 0x0011 #define mmRCC_DEV0_EPF0_STRAP0_BASE_IDX 2 diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h index d3704b438f2d..d467b939c971 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h @@ -19690,6 +19690,9 @@ // addressBlock: nbio_nbif0_rcc_strap_BIFDEC1 +//RCC_BIF_STRAP0 +#define RCC_BIF_STRAP0__STRAP_PX_CAPABLE__SHIFT 0x7 +#define RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK 0x00000080L //RCC_DEV0_EPF0_STRAP0 #define RCC_DEV0_EPF0_STRAP0__STRAP_DEVICE_ID_DEV0_F0__SHIFT 0x0 #define RCC_DEV0_EPF0_STRAP0__STRAP_MAJOR_REV_ID_DEV0_F0__SHIFT 0x10 diff --git a/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_offset.h index a9eb57a53e59..a485526f3a51 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_offset.h @@ -46,4 +46,7 @@ #define mmTHM_TCON_THERM_TRIP 0x0002 #define mmTHM_TCON_THERM_TRIP_BASE_IDX 0 +#define mmTHM_BACO_CNTL 0x0081 +#define mmTHM_BACO_CNTL_BASE_IDX 0 + #endif diff --git a/drivers/gpu/drm/amd/include/atombios.h b/drivers/gpu/drm/amd/include/atombios.h index 7931502fa54f..8ba21747b40a 100644 --- a/drivers/gpu/drm/amd/include/atombios.h +++ b/drivers/gpu/drm/amd/include/atombios.h @@ -4106,7 +4106,7 @@ typedef struct _ATOM_LCD_MODE_CONTROL_CAP typedef struct _ATOM_FAKE_EDID_PATCH_RECORD { UCHAR ucRecordType; - UCHAR ucFakeEDIDLength; // = 128 means EDID lenght is 128 bytes, otherwise the EDID length = ucFakeEDIDLength*128 + UCHAR ucFakeEDIDLength; // = 128 means EDID length is 128 bytes, otherwise the EDID length = ucFakeEDIDLength*128 UCHAR ucFakeEDIDString[1]; // This actually has ucFakeEdidLength elements. } ATOM_FAKE_EDID_PATCH_RECORD; diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 8154d67388cc..5f3c10ebff08 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -34,7 +34,6 @@ struct pci_dev; -#define KFD_INTERFACE_VERSION 2 #define KGD_MAX_QUEUES 128 struct kfd_dev; @@ -138,20 +137,17 @@ struct kgd2kfd_shared_resources { /* Bit n == 1 means Queue n is available for KFD */ DECLARE_BITMAP(queue_bitmap, KGD_MAX_QUEUES); - /* Doorbell assignments (SOC15 and later chips only). Only + /* SDMA doorbell assignments (SOC15 and later chips only). Only * specific doorbells are routed to each SDMA engine. Others * are routed to IH and VCN. They are not usable by the CP. - * - * Any doorbell number D that satisfies the following condition - * is reserved: (D & reserved_doorbell_mask) == reserved_doorbell_val - * - * KFD currently uses 1024 (= 0x3ff) doorbells per process. If - * doorbells 0x0e0-0x0ff and 0x2e0-0x2ff are reserved, that means - * mask would be set to 0x1e0 and val set to 0x0e0. */ - unsigned int sdma_doorbell[2][8]; - unsigned int reserved_doorbell_mask; - unsigned int reserved_doorbell_val; + uint32_t *sdma_doorbell_idx; + + /* From SOC15 onward, the doorbell index range not usable for CP + * queues. + */ + uint32_t non_cp_doorbells_start; + uint32_t non_cp_doorbells_end; /* Base address of doorbell aperture. */ phys_addr_t doorbell_physical_address; @@ -330,56 +326,4 @@ struct kfd2kgd_calls { }; -/** - * struct kgd2kfd_calls - * - * @exit: Notifies amdkfd that kgd module is unloaded - * - * @probe: Notifies amdkfd about a probe done on a device in the kgd driver. - * - * @device_init: Initialize the newly probed device (if it is a device that - * amdkfd supports) - * - * @device_exit: Notifies amdkfd about a removal of a kgd device - * - * @suspend: Notifies amdkfd about a suspend action done to a kgd device - * - * @resume: Notifies amdkfd about a resume action done to a kgd device - * - * @quiesce_mm: Quiesce all user queue access to specified MM address space - * - * @resume_mm: Resume user queue access to specified MM address space - * - * @schedule_evict_and_restore_process: Schedules work queue that will prepare - * for safe eviction of KFD BOs that belong to the specified process. - * - * @pre_reset: Notifies amdkfd that amdgpu about to reset the gpu - * - * @post_reset: Notify amdkfd that amgpu successfully reseted the gpu - * - * This structure contains function callback pointers so the kgd driver - * will notify to the amdkfd about certain status changes. - * - */ -struct kgd2kfd_calls { - void (*exit)(void); - struct kfd_dev* (*probe)(struct kgd_dev *kgd, struct pci_dev *pdev, - const struct kfd2kgd_calls *f2g); - bool (*device_init)(struct kfd_dev *kfd, - const struct kgd2kfd_shared_resources *gpu_resources); - void (*device_exit)(struct kfd_dev *kfd); - void (*interrupt)(struct kfd_dev *kfd, const void *ih_ring_entry); - void (*suspend)(struct kfd_dev *kfd); - int (*resume)(struct kfd_dev *kfd); - int (*quiesce_mm)(struct mm_struct *mm); - int (*resume_mm)(struct mm_struct *mm); - int (*schedule_evict_and_restore_process)(struct mm_struct *mm, - struct dma_fence *fence); - int (*pre_reset)(struct kfd_dev *kfd); - int (*post_reset)(struct kfd_dev *kfd); -}; - -int kgd2kfd_init(unsigned interface_version, - const struct kgd2kfd_calls **g2f); - #endif /* KGD_KFD_INTERFACE_H_INCLUDED */ diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 789c4f288485..2b579ba9b685 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -92,6 +92,9 @@ enum pp_clock_type { PP_SCLK, PP_MCLK, PP_PCIE, + PP_SOCCLK, + PP_FCLK, + PP_DCEFCLK, OD_SCLK, OD_MCLK, OD_VDDC_CURVE, @@ -281,6 +284,11 @@ struct amd_pm_funcs { int (*set_hard_min_dcefclk_by_freq)(void *handle, uint32_t clock); int (*set_hard_min_fclk_by_freq)(void *handle, uint32_t clock); int (*set_min_deep_sleep_dcefclk)(void *handle, uint32_t clock); + int (*get_asic_baco_capability)(void *handle, bool *cap); + int (*get_asic_baco_state)(void *handle, int *state); + int (*set_asic_baco_state)(void *handle, int state); + int (*get_ppfeature_status)(void *handle, char *buf); + int (*set_ppfeature_status)(void *handle, uint64_t ppfeature_masks); }; #endif diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index 9bc27f468d5b..3f73f7cd18b9 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -1404,6 +1404,97 @@ static int pp_set_active_display_count(void *handle, uint32_t count) return ret; } +static int pp_get_asic_baco_capability(void *handle, bool *cap) +{ + struct pp_hwmgr *hwmgr = handle; + + if (!hwmgr) + return -EINVAL; + + if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_capability) + return 0; + + mutex_lock(&hwmgr->smu_lock); + hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr, cap); + mutex_unlock(&hwmgr->smu_lock); + + return 0; +} + +static int pp_get_asic_baco_state(void *handle, int *state) +{ + struct pp_hwmgr *hwmgr = handle; + + if (!hwmgr) + return -EINVAL; + + if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state) + return 0; + + mutex_lock(&hwmgr->smu_lock); + hwmgr->hwmgr_func->get_asic_baco_state(hwmgr, (enum BACO_STATE *)state); + mutex_unlock(&hwmgr->smu_lock); + + return 0; +} + +static int pp_set_asic_baco_state(void *handle, int state) +{ + struct pp_hwmgr *hwmgr = handle; + + if (!hwmgr) + return -EINVAL; + + if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_asic_baco_state) + return 0; + + mutex_lock(&hwmgr->smu_lock); + hwmgr->hwmgr_func->set_asic_baco_state(hwmgr, (enum BACO_STATE)state); + mutex_unlock(&hwmgr->smu_lock); + + return 0; +} + +static int pp_get_ppfeature_status(void *handle, char *buf) +{ + struct pp_hwmgr *hwmgr = handle; + int ret = 0; + + if (!hwmgr || !hwmgr->pm_en || !buf) + return -EINVAL; + + if (hwmgr->hwmgr_func->get_ppfeature_status == NULL) { + pr_info_ratelimited("%s was not implemented.\n", __func__); + return -EINVAL; + } + + mutex_lock(&hwmgr->smu_lock); + ret = hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf); + mutex_unlock(&hwmgr->smu_lock); + + return ret; +} + +static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks) +{ + struct pp_hwmgr *hwmgr = handle; + int ret = 0; + + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; + + if (hwmgr->hwmgr_func->set_ppfeature_status == NULL) { + pr_info_ratelimited("%s was not implemented.\n", __func__); + return -EINVAL; + } + + mutex_lock(&hwmgr->smu_lock); + ret = hwmgr->hwmgr_func->set_ppfeature_status(hwmgr, ppfeature_masks); + mutex_unlock(&hwmgr->smu_lock); + + return ret; +} + static const struct amd_pm_funcs pp_dpm_funcs = { .load_firmware = pp_dpm_load_fw, .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete, @@ -1454,4 +1545,9 @@ static const struct amd_pm_funcs pp_dpm_funcs = { .set_min_deep_sleep_dcefclk = pp_set_min_deep_sleep_dcefclk, .set_hard_min_dcefclk_by_freq = pp_set_hard_min_dcefclk_by_freq, .set_hard_min_fclk_by_freq = pp_set_hard_min_fclk_by_freq, + .get_asic_baco_capability = pp_get_asic_baco_capability, + .get_asic_baco_state = pp_get_asic_baco_state, + .set_asic_baco_state = pp_set_asic_baco_state, + .get_ppfeature_status = pp_get_ppfeature_status, + .set_ppfeature_status = pp_set_ppfeature_status, }; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile index ade8973b6f4d..0b3c6d1d52e4 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile @@ -35,7 +35,7 @@ HARDWARE_MGR = hwmgr.o processpptables.o \ vega12_thermal.o \ pp_overdriver.o smu_helper.o \ vega20_processpptables.o vega20_hwmgr.o vega20_powertune.o \ - vega20_thermal.o + vega20_thermal.o common_baco.o vega10_baco.o vega20_baco.o AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR)) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c new file mode 100644 index 000000000000..9c57c1f67749 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c @@ -0,0 +1,101 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "common_baco.h" + + +static bool baco_wait_register(struct pp_hwmgr *hwmgr, u32 reg, u32 mask, u32 value) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); + u32 timeout = 5000, data; + + do { + msleep(1); + data = RREG32(reg); + timeout--; + } while (value != (data & mask) && (timeout != 0)); + + if (timeout == 0) + return false; + + return true; +} + +static bool baco_cmd_handler(struct pp_hwmgr *hwmgr, u32 command, u32 reg, u32 mask, + u32 shift, u32 value, u32 timeout) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); + u32 data; + bool ret = true; + + switch (command) { + case CMD_WRITE: + WREG32(reg, value << shift); + break; + case CMD_READMODIFYWRITE: + data = RREG32(reg); + data = (data & (~mask)) | (value << shift); + WREG32(reg, data); + break; + case CMD_WAITFOR: + ret = baco_wait_register(hwmgr, reg, mask, value); + break; + case CMD_DELAY_MS: + if (timeout) + /* Delay in milli Seconds */ + msleep(timeout); + break; + case CMD_DELAY_US: + if (timeout) + /* Delay in micro Seconds */ + udelay(timeout); + break; + + default: + dev_warn(adev->dev, "Invalid BACO command.\n"); + ret = false; + } + + return ret; +} + +bool soc15_baco_program_registers(struct pp_hwmgr *hwmgr, + const struct soc15_baco_cmd_entry *entry, + const u32 array_size) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); + u32 i, reg = 0; + + for (i = 0; i < array_size; i++) { + if ((entry[i].cmd == CMD_WRITE) || + (entry[i].cmd == CMD_READMODIFYWRITE) || + (entry[i].cmd == CMD_WAITFOR)) + reg = adev->reg_offset[entry[i].hwip][entry[i].inst][entry[i].seg] + + entry[i].reg_offset; + if (!baco_cmd_handler(hwmgr, entry[i].cmd, reg, entry[i].mask, + entry[i].shift, entry[i].val, entry[i].timeout)) + return false; + } + + return true; +} diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.h b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h similarity index 65% rename from drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.h rename to drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h index 26355c088746..95296c916f4e 100644 --- a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h @@ -1,5 +1,5 @@ /* - * Copyright 2012-15 Advanced Micro Devices, Inc. + * Copyright 2018 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -19,25 +19,32 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * - * Authors: AMD - * */ +#ifndef __COMMON_BOCO_H__ +#define __COMMON_BOCO_H__ +#include "hwmgr.h" -#ifndef __DAL_I2C_SW_ENGINE_DCE80_H__ -#define __DAL_I2C_SW_ENGINE_DCE80_H__ -struct i2c_sw_engine_dce80 { - struct i2c_sw_engine base; - uint32_t engine_id; +enum baco_cmd_type { + CMD_WRITE = 0, + CMD_READMODIFYWRITE, + CMD_WAITFOR, + CMD_DELAY_MS, + CMD_DELAY_US, }; -struct i2c_sw_engine_dce80_create_arg { - uint32_t engine_id; - uint32_t default_speed; - struct dc_context *ctx; +struct soc15_baco_cmd_entry { + enum baco_cmd_type cmd; + uint32_t hwip; + uint32_t inst; + uint32_t seg; + uint32_t reg_offset; + uint32_t mask; + uint32_t shift; + uint32_t timeout; + uint32_t val; }; - -struct i2c_engine *dal_i2c_sw_engine_dce80_create( - const struct i2c_sw_engine_dce80_create_arg *arg); - +extern bool soc15_baco_program_registers(struct pp_hwmgr *hwmgr, + const struct soc15_baco_cmd_entry *entry, + const u32 array_size); #endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c index 1f92a9f4c9e3..c1c51c115e57 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c @@ -154,15 +154,6 @@ int phm_powerdown_uvd(struct pp_hwmgr *hwmgr) return 0; } -int phm_enable_clock_power_gatings(struct pp_hwmgr *hwmgr) -{ - PHM_FUNC_CHECK(hwmgr); - - if (NULL != hwmgr->hwmgr_func->enable_clock_power_gating) - return hwmgr->hwmgr_func->enable_clock_power_gating(hwmgr); - - return 0; -} int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr) { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 310b102a9292..6cd6497c6fc2 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -273,7 +273,7 @@ int hwmgr_hw_fini(struct pp_hwmgr *hwmgr) phm_stop_thermal_controller(hwmgr); psm_set_boot_states(hwmgr); - psm_adjust_power_state_dynamic(hwmgr, false, NULL); + psm_adjust_power_state_dynamic(hwmgr, true, NULL); phm_disable_dynamic_state_management(hwmgr); phm_disable_clock_power_gatings(hwmgr); @@ -295,7 +295,7 @@ int hwmgr_suspend(struct pp_hwmgr *hwmgr) ret = psm_set_boot_states(hwmgr); if (ret) return ret; - ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL); + ret = psm_adjust_power_state_dynamic(hwmgr, true, NULL); if (ret) return ret; ret = phm_power_down_asic(hwmgr); @@ -325,7 +325,7 @@ int hwmgr_resume(struct pp_hwmgr *hwmgr) if (ret) return ret; - ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL); + ret = psm_adjust_power_state_dynamic(hwmgr, true, NULL); return ret; } @@ -379,12 +379,12 @@ int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id, ret = psm_set_user_performance_state(hwmgr, requested_ui_label, &requested_ps); if (ret) return ret; - ret = psm_adjust_power_state_dynamic(hwmgr, false, requested_ps); + ret = psm_adjust_power_state_dynamic(hwmgr, true, requested_ps); break; } case AMD_PP_TASK_COMPLETE_INIT: case AMD_PP_TASK_READJUST_POWER_STATE: - ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL); + ret = psm_adjust_power_state_dynamic(hwmgr, true, NULL); break; default: break; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c index 56437866d120..ce177d7f04cb 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c @@ -256,16 +256,14 @@ static void power_state_management(struct pp_hwmgr *hwmgr, } } -int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip, +int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip_display_settings, struct pp_power_state *new_ps) { uint32_t index; long workload; - if (skip) - return 0; - - phm_display_configuration_changed(hwmgr); + if (!skip_display_settings) + phm_display_configuration_changed(hwmgr); if (hwmgr->ps) power_state_management(hwmgr, new_ps); @@ -276,9 +274,11 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip, */ phm_apply_clock_adjust_rules(hwmgr); - phm_notify_smc_display_config_after_ps_adjustment(hwmgr); + if (!skip_display_settings) + phm_notify_smc_display_config_after_ps_adjustment(hwmgr); - if (!phm_force_dpm_levels(hwmgr, hwmgr->request_dpm_level)) + if ((hwmgr->request_dpm_level != hwmgr->dpm_level) && + !phm_force_dpm_levels(hwmgr, hwmgr->request_dpm_level)) hwmgr->dpm_level = hwmgr->request_dpm_level; if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.h b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.h index fa1b6825036a..b62d55f1f289 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.h @@ -34,7 +34,7 @@ int psm_set_user_performance_state(struct pp_hwmgr *hwmgr, enum PP_StateUILabel label_id, struct pp_power_state **state); int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, - bool skip, + bool skip_display_settings, struct pp_power_state *new_ps); #endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index f95c5f50eb0f..0ad8fe4a6277 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -139,12 +139,10 @@ static int smu10_construct_max_power_limits_table(struct pp_hwmgr *hwmgr, static int smu10_init_dynamic_state_adjustment_rule_settings( struct pp_hwmgr *hwmgr) { - uint32_t table_size = - sizeof(struct phm_clock_voltage_dependency_table) + - (7 * sizeof(struct phm_clock_voltage_dependency_record)); + struct phm_clock_voltage_dependency_table *table_clk_vlt; - struct phm_clock_voltage_dependency_table *table_clk_vlt = - kzalloc(table_size, GFP_KERNEL); + table_clk_vlt = kzalloc(struct_size(table_clk_vlt, entries, 7), + GFP_KERNEL); if (NULL == table_clk_vlt) { pr_err("Can not allocate memory!\n"); @@ -1033,6 +1031,7 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr, break; case amd_pp_dpp_clock: pclk_vol_table = pinfo->vdd_dep_on_dppclk; + break; default: return -EINVAL; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index c8f5c00dd1e7..48187acac59e 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -3681,10 +3681,12 @@ static int smu7_request_link_speed_change_before_state_change( data->force_pcie_gen = PP_PCIEGen2; if (current_link_speed == PP_PCIEGen2) break; + /* fall through */ case PP_PCIEGen2: if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN2, false)) break; #endif + /* fall through */ default: data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr); break; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c index d138ddae563d..58f5589aaf12 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c @@ -1211,7 +1211,7 @@ int smu7_power_control_set_level(struct pp_hwmgr *hwmgr) hwmgr->platform_descriptor.TDPAdjustment : (-1 * hwmgr->platform_descriptor.TDPAdjustment); - if (hwmgr->chip_id > CHIP_TONGA) + if (hwmgr->chip_id > CHIP_TONGA) target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100; else target_tdp = ((100 + adjust_percent) * (int)(cac_table->usConfigurableTDP * 256)) / 100; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c index 553a203ac47c..019d6a206492 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c @@ -272,12 +272,10 @@ static int smu8_init_dynamic_state_adjustment_rule_settings( struct pp_hwmgr *hwmgr, ATOM_CLK_VOLT_CAPABILITY *disp_voltage_table) { - uint32_t table_size = - sizeof(struct phm_clock_voltage_dependency_table) + - (7 * sizeof(struct phm_clock_voltage_dependency_record)); + struct phm_clock_voltage_dependency_table *table_clk_vlt; - struct phm_clock_voltage_dependency_table *table_clk_vlt = - kzalloc(table_size, GFP_KERNEL); + table_clk_vlt = kzalloc(struct_size(table_clk_vlt, entries, 7), + GFP_KERNEL); if (NULL == table_clk_vlt) { pr_err("Can not allocate memory!\n"); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c new file mode 100644 index 000000000000..7337be5602e4 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c @@ -0,0 +1,158 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "soc15.h" +#include "soc15_hw_ip.h" +#include "vega10_ip_offset.h" +#include "soc15_common.h" +#include "vega10_inc.h" +#include "vega10_ppsmc.h" +#include "vega10_baco.h" + + + +static const struct soc15_baco_cmd_entry pre_baco_tbl[] = +{ + {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBIF_DOORBELL_CNTL), BIF_DOORBELL_CNTL__DOORBELL_MONITOR_EN_MASK, BIF_DOORBELL_CNTL__DOORBELL_MONITOR_EN__SHIFT, 0, 1}, + {CMD_WRITE, SOC15_REG_ENTRY(NBIF, 0, mmBIF_FB_EN), 0, 0, 0, 0}, + {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_DSTATE_BYPASS_MASK, BACO_CNTL__BACO_DSTATE_BYPASS__SHIFT, 0, 1}, + {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_RST_INTR_MASK_MASK, BACO_CNTL__BACO_RST_INTR_MASK__SHIFT, 0, 1} +}; + +static const struct soc15_baco_cmd_entry enter_baco_tbl[] = +{ + {CMD_WAITFOR, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__SOC_DOMAIN_IDLE_MASK, THM_BACO_CNTL__SOC_DOMAIN_IDLE__SHIFT, 0xffffffff, 0x80000000}, + {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 1}, + {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_BIF_LCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_LCLK_SWITCH__SHIFT, 0, 1}, + {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_DUMMY_EN_MASK, BACO_CNTL__BACO_DUMMY_EN__SHIFT, 0, 1}, + {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_SOC_VDCI_RESET_MASK, THM_BACO_CNTL__BACO_SOC_VDCI_RESET__SHIFT, 0, 1}, + {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_SMNCLK_MUX_MASK, THM_BACO_CNTL__BACO_SMNCLK_MUX__SHIFT,0, 1}, + {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_ISO_EN_MASK, THM_BACO_CNTL__BACO_ISO_EN__SHIFT, 0, 1}, + {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_AEB_ISO_EN_MASK, THM_BACO_CNTL__BACO_AEB_ISO_EN__SHIFT,0, 1}, + {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_ANA_ISO_EN_MASK, THM_BACO_CNTL__BACO_ANA_ISO_EN__SHIFT, 0, 1}, + {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_SOC_REFCLK_OFF_MASK, THM_BACO_CNTL__BACO_SOC_REFCLK_OFF__SHIFT, 0, 1}, + {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 1}, + {CMD_DELAY_MS, 0, 0, 0, 0, 0, 0, 5, 0}, + {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_RESET_EN_MASK, THM_BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 1}, + {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_PWROKRAW_CNTL_MASK, THM_BACO_CNTL__BACO_PWROKRAW_CNTL__SHIFT, 0, 0}, + {CMD_WAITFOR, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_MODE_MASK, BACO_CNTL__BACO_MODE__SHIFT, 0xffffffff, 0x100} +}; + +static const struct soc15_baco_cmd_entry exit_baco_tbl[] = +{ + {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0}, + {CMD_DELAY_MS, 0, 0, 0, 0, 0, 0, 10,0}, + {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_SOC_REFCLK_OFF_MASK, THM_BACO_CNTL__BACO_SOC_REFCLK_OFF__SHIFT, 0,0}, + {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_ANA_ISO_EN_MASK, THM_BACO_CNTL__BACO_ANA_ISO_EN__SHIFT, 0, 0}, + {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_AEB_ISO_EN_MASK, THM_BACO_CNTL__BACO_AEB_ISO_EN__SHIFT,0, 0}, + {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_ISO_EN_MASK, THM_BACO_CNTL__BACO_ISO_EN__SHIFT, 0, 0}, + {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_PWROKRAW_CNTL_MASK, THM_BACO_CNTL__BACO_PWROKRAW_CNTL__SHIFT, 0, 1}, + {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_SMNCLK_MUX_MASK, THM_BACO_CNTL__BACO_SMNCLK_MUX__SHIFT, 0, 0}, + {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_SOC_VDCI_RESET_MASK, THM_BACO_CNTL__BACO_SOC_VDCI_RESET__SHIFT, 0, 0}, + {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_EXIT_MASK, THM_BACO_CNTL__BACO_EXIT__SHIFT, 0, 1}, + {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_RESET_EN_MASK, THM_BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0}, + {CMD_WAITFOR, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_EXIT_MASK, 0, 0xffffffff, 0}, + {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(THM, 0, mmTHM_BACO_CNTL), THM_BACO_CNTL__BACO_SB_AXI_FENCE_MASK, THM_BACO_CNTL__BACO_SB_AXI_FENCE__SHIFT, 0, 0}, + {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_DUMMY_EN_MASK, BACO_CNTL__BACO_DUMMY_EN__SHIFT, 0, 0}, + {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_BIF_LCLK_SWITCH_MASK ,BACO_CNTL__BACO_BIF_LCLK_SWITCH__SHIFT, 0, 0}, + {CMD_READMODIFYWRITE, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_EN_MASK , BACO_CNTL__BACO_EN__SHIFT, 0,0}, + {CMD_WAITFOR, SOC15_REG_ENTRY(NBIF, 0, mmBACO_CNTL), BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0} + }; + +static const struct soc15_baco_cmd_entry clean_baco_tbl[] = +{ + {CMD_WRITE, SOC15_REG_ENTRY(NBIF, 0, mmBIOS_SCRATCH_6), 0, 0, 0, 0}, + {CMD_WRITE, SOC15_REG_ENTRY(NBIF, 0, mmBIOS_SCRATCH_7), 0, 0, 0, 0}, +}; + +int vega10_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); + uint32_t reg, data; + + *cap = false; + if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO)) + return 0; + + WREG32(0x12074, 0xFFF0003B); + data = RREG32(0x12075); + + if (data == 0x1) { + reg = RREG32_SOC15(NBIF, 0, mmRCC_BIF_STRAP0); + + if (reg & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) + *cap = true; + } + + return 0; +} + +int vega10_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); + uint32_t reg; + + reg = RREG32_SOC15(NBIF, 0, mmBACO_CNTL); + + if (reg & BACO_CNTL__BACO_MODE_MASK) + /* gfx has already entered BACO state */ + *state = BACO_STATE_IN; + else + *state = BACO_STATE_OUT; + return 0; +} + +int vega10_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state) +{ + enum BACO_STATE cur_state; + + vega10_baco_get_state(hwmgr, &cur_state); + + if (cur_state == state) + /* aisc already in the target state */ + return 0; + + if (state == BACO_STATE_IN) { + if (soc15_baco_program_registers(hwmgr, pre_baco_tbl, + ARRAY_SIZE(pre_baco_tbl))) { + if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnterBaco)) + return -EINVAL; + + if (soc15_baco_program_registers(hwmgr, enter_baco_tbl, + ARRAY_SIZE(enter_baco_tbl))) + return 0; + } + } else if (state == BACO_STATE_OUT) { + /* HW requires at least 20ms between regulator off and on */ + msleep(20); + /* Execute Hardware BACO exit sequence */ + if (soc15_baco_program_registers(hwmgr, exit_baco_tbl, + ARRAY_SIZE(exit_baco_tbl))) { + if (soc15_baco_program_registers(hwmgr, clean_baco_tbl, + ARRAY_SIZE(clean_baco_tbl))) + return 0; + } + } + + return -EINVAL; +} diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.h similarity index 74% rename from drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.h rename to drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.h index 21908629e973..f7a3ffa744b3 100644 --- a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.h @@ -1,5 +1,5 @@ /* - * Copyright 2012-15 Advanced Micro Devices, Inc. + * Copyright 2018 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -19,20 +19,14 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * - * Authors: AMD - * */ +#ifndef __VEGA10_BACO_H__ +#define __VEGA10_BACO_H__ +#include "hwmgr.h" +#include "common_baco.h" -#ifndef __DAL_I2C_AUX_DCE80_H__ -#define __DAL_I2C_AUX_DCE80_H__ - -struct i2caux_dce80 { - struct i2caux base; - /* indicate the I2C HW circular buffer is in use */ - bool i2c_hw_buffer_in_use; -}; - -struct i2caux *dal_i2caux_dce80_create( - struct dc_context *ctx); +extern int vega10_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap); +extern int vega10_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state); +extern int vega10_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state); #endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 91e3bbe6d61d..5479125ff4f6 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -48,6 +48,7 @@ #include "ppinterrupt.h" #include "pp_overdriver.h" #include "pp_thermal.h" +#include "vega10_baco.h" #include "smuio/smuio_9_0_offset.h" #include "smuio/smuio_9_0_sh_mask.h" @@ -71,6 +72,21 @@ static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2}; #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L +typedef enum { + CLK_SMNCLK = 0, + CLK_SOCCLK, + CLK_MP0CLK, + CLK_MP1CLK, + CLK_LCLK, + CLK_DCEFCLK, + CLK_VCLK, + CLK_DCLK, + CLK_ECLK, + CLK_UCLK, + CLK_GFXCLK, + CLK_COUNT, +} CLOCK_ID_e; + static const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic); struct vega10_power_state *cast_phw_vega10_power_state( @@ -3485,6 +3501,17 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr) } } + if (!data->registry_data.socclk_dpm_key_disabled) { + if (data->smc_state_table.soc_boot_level != + data->dpm_table.soc_table.dpm_state.soft_min_level) { + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetSoftMinSocclkByIndex, + data->smc_state_table.soc_boot_level); + data->dpm_table.soc_table.dpm_state.soft_min_level = + data->smc_state_table.soc_boot_level; + } + } + return 0; } @@ -3516,6 +3543,17 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr) } } + if (!data->registry_data.socclk_dpm_key_disabled) { + if (data->smc_state_table.soc_max_level != + data->dpm_table.soc_table.dpm_state.soft_max_level) { + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetSoftMaxSocclkByIndex, + data->smc_state_table.soc_max_level); + data->dpm_table.soc_table.dpm_state.soft_max_level = + data->smc_state_table.soc_max_level; + } + } + return 0; } @@ -3541,6 +3579,10 @@ static int vega10_generate_dpm_level_enable_mask( vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table)); data->smc_state_table.mem_max_level = vega10_find_highest_dpm_level(&(data->dpm_table.mem_table)); + data->smc_state_table.soc_boot_level = + vega10_find_lowest_dpm_level(&(data->dpm_table.soc_table)); + data->smc_state_table.soc_max_level = + vega10_find_highest_dpm_level(&(data->dpm_table.soc_table)); PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr), "Attempt to upload DPM Bootup Levels Failed!", @@ -3555,6 +3597,9 @@ static int vega10_generate_dpm_level_enable_mask( for(i = data->smc_state_table.mem_boot_level; i < data->smc_state_table.mem_max_level; i++) data->dpm_table.mem_table.dpm_levels[i].enabled = true; + for (i = data->smc_state_table.soc_boot_level; i < data->smc_state_table.soc_max_level; i++) + data->dpm_table.soc_table.dpm_levels[i].enabled = true; + return 0; } @@ -4028,6 +4073,24 @@ static int vega10_force_clock_level(struct pp_hwmgr *hwmgr, break; + case PP_SOCCLK: + data->smc_state_table.soc_boot_level = mask ? (ffs(mask) - 1) : 0; + data->smc_state_table.soc_max_level = mask ? (fls(mask) - 1) : 0; + + PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr), + "Failed to upload boot level to lowest!", + return -EINVAL); + + PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr), + "Failed to upload dpm max level to highest!", + return -EINVAL); + + break; + + case PP_DCEFCLK: + pr_info("Setting DCEFCLK min/max dpm level is not supported!\n"); + break; + case PP_PCIE: default: break; @@ -4267,12 +4330,113 @@ static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, return result; } +static int vega10_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf) +{ + static const char *ppfeature_name[] = { + "DPM_PREFETCHER", + "GFXCLK_DPM", + "UCLK_DPM", + "SOCCLK_DPM", + "UVD_DPM", + "VCE_DPM", + "ULV", + "MP0CLK_DPM", + "LINK_DPM", + "DCEFCLK_DPM", + "AVFS", + "GFXCLK_DS", + "SOCCLK_DS", + "LCLK_DS", + "PPT", + "TDC", + "THERMAL", + "GFX_PER_CU_CG", + "RM", + "DCEFCLK_DS", + "ACDC", + "VR0HOT", + "VR1HOT", + "FW_CTF", + "LED_DISPLAY", + "FAN_CONTROL", + "FAST_PPT", + "DIDT", + "ACG", + "PCC_LIMIT"}; + static const char *output_title[] = { + "FEATURES", + "BITMASK", + "ENABLEMENT"}; + uint64_t features_enabled; + int i; + int ret = 0; + int size = 0; + + ret = vega10_get_enabled_smc_features(hwmgr, &features_enabled); + PP_ASSERT_WITH_CODE(!ret, + "[EnableAllSmuFeatures] Failed to get enabled smc features!", + return ret); + + size += sprintf(buf + size, "Current ppfeatures: 0x%016llx\n", features_enabled); + size += sprintf(buf + size, "%-19s %-22s %s\n", + output_title[0], + output_title[1], + output_title[2]); + for (i = 0; i < GNLD_FEATURES_MAX; i++) { + size += sprintf(buf + size, "%-19s 0x%016llx %6s\n", + ppfeature_name[i], + 1ULL << i, + (features_enabled & (1ULL << i)) ? "Y" : "N"); + } + + return size; +} + +static int vega10_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfeature_masks) +{ + uint64_t features_enabled; + uint64_t features_to_enable; + uint64_t features_to_disable; + int ret = 0; + + if (new_ppfeature_masks >= (1ULL << GNLD_FEATURES_MAX)) + return -EINVAL; + + ret = vega10_get_enabled_smc_features(hwmgr, &features_enabled); + if (ret) + return ret; + + features_to_disable = + (features_enabled ^ new_ppfeature_masks) & features_enabled; + features_to_enable = + (features_enabled ^ new_ppfeature_masks) ^ features_to_disable; + + pr_debug("features_to_disable 0x%llx\n", features_to_disable); + pr_debug("features_to_enable 0x%llx\n", features_to_enable); + + if (features_to_disable) { + ret = vega10_enable_smc_features(hwmgr, false, features_to_disable); + if (ret) + return ret; + } + + if (features_to_enable) { + ret = vega10_enable_smc_features(hwmgr, true, features_to_enable); + if (ret) + return ret; + } + + return 0; +} + static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf) { struct vega10_hwmgr *data = hwmgr->backend; struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table); struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table); + struct vega10_single_dpm_table *soc_table = &(data->dpm_table.soc_table); + struct vega10_single_dpm_table *dcef_table = &(data->dpm_table.dcef_table); struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table); struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep = NULL; @@ -4303,6 +4467,32 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, i, mclk_table->dpm_levels[i].value / 100, (i == now) ? "*" : ""); break; + case PP_SOCCLK: + if (data->registry_data.socclk_dpm_key_disabled) + break; + + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex); + now = smum_get_argument(hwmgr); + + for (i = 0; i < soc_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, soc_table->dpm_levels[i].value / 100, + (i == now) ? "*" : ""); + break; + case PP_DCEFCLK: + if (data->registry_data.dcefclk_dpm_key_disabled) + break; + + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK); + now = smum_get_argument(hwmgr); + + for (i = 0; i < dcef_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, dcef_table->dpm_levels[i].value / 100, + (dcef_table->dpm_levels[i].value / 100 == now) ? + "*" : ""); + break; case PP_PCIE: smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex); now = smum_get_argument(hwmgr); @@ -4980,6 +5170,12 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = { .set_power_limit = vega10_set_power_limit, .odn_edit_dpm_table = vega10_odn_edit_dpm_table, .get_performance_level = vega10_get_performance_level, + .get_asic_baco_capability = vega10_baco_get_capability, + .get_asic_baco_state = vega10_baco_get_state, + .set_asic_baco_state = vega10_baco_set_state, + .enable_mgpu_fan_boost = vega10_enable_mgpu_fan_boost, + .get_ppfeature_status = vega10_get_ppfeature_status, + .set_ppfeature_status = vega10_set_ppfeature_status, }; int vega10_hwmgr_init(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h index 89870556de1b..f752b4ad0c8a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h @@ -199,6 +199,7 @@ struct vega10_smc_state_table { uint32_t vce_boot_level; uint32_t gfx_max_level; uint32_t mem_max_level; + uint32_t soc_max_level; uint8_t vr_hot_gpio; uint8_t ac_dc_gpio; uint8_t therm_out_gpio; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h index b3e63003a789..c934e9612c1b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h @@ -282,6 +282,30 @@ typedef struct _ATOM_Vega10_Fan_Table_V2 { UCHAR ucFanMaxRPM; } ATOM_Vega10_Fan_Table_V2; +typedef struct _ATOM_Vega10_Fan_Table_V3 { + UCHAR ucRevId; + USHORT usFanOutputSensitivity; + USHORT usFanAcousticLimitRpm; + USHORT usThrottlingRPM; + USHORT usTargetTemperature; + USHORT usMinimumPWMLimit; + USHORT usTargetGfxClk; + USHORT usFanGainEdge; + USHORT usFanGainHotspot; + USHORT usFanGainLiquid; + USHORT usFanGainVrVddc; + USHORT usFanGainVrMvdd; + USHORT usFanGainPlx; + USHORT usFanGainHbm; + UCHAR ucEnableZeroRPM; + USHORT usFanStopTemperature; + USHORT usFanStartTemperature; + UCHAR ucFanParameters; + UCHAR ucFanMinRPM; + UCHAR ucFanMaxRPM; + USHORT usMGpuThrottlingRPM; +} ATOM_Vega10_Fan_Table_V3; + typedef struct _ATOM_Vega10_Thermal_Controller { UCHAR ucRevId; UCHAR ucType; /* one of ATOM_VEGA10_PP_THERMALCONTROLLER_*/ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c index 99d596dc0e89..b6767d74dc85 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c @@ -123,6 +123,7 @@ static int init_thermal_controller( const Vega10_PPTable_Generic_SubTable_Header *header; const ATOM_Vega10_Fan_Table *fan_table_v1; const ATOM_Vega10_Fan_Table_V2 *fan_table_v2; + const ATOM_Vega10_Fan_Table_V3 *fan_table_v3; thermal_controller = (ATOM_Vega10_Thermal_Controller *) (((unsigned long)powerplay_table) + @@ -207,7 +208,7 @@ static int init_thermal_controller( le16_to_cpu(fan_table_v1->usFanStopTemperature); hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStartTemperature = le16_to_cpu(fan_table_v1->usFanStartTemperature); - } else if (header->ucRevId > 10) { + } else if (header->ucRevId == 0xb) { fan_table_v2 = (ATOM_Vega10_Fan_Table_V2 *)header; hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution = @@ -251,7 +252,54 @@ static int init_thermal_controller( le16_to_cpu(fan_table_v2->usFanStopTemperature); hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStartTemperature = le16_to_cpu(fan_table_v2->usFanStartTemperature); + } else if (header->ucRevId > 0xb) { + fan_table_v3 = (ATOM_Vega10_Fan_Table_V3 *)header; + + hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution = + fan_table_v3->ucFanParameters & ATOM_VEGA10_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK; + hwmgr->thermal_controller.fanInfo.ulMinRPM = fan_table_v3->ucFanMinRPM * 100UL; + hwmgr->thermal_controller.fanInfo.ulMaxRPM = fan_table_v3->ucFanMaxRPM * 100UL; + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl); + hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity = + le16_to_cpu(fan_table_v3->usFanOutputSensitivity); + hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = + fan_table_v3->ucFanMaxRPM * 100UL; + hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = + le16_to_cpu(fan_table_v3->usThrottlingRPM); + hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit = + le16_to_cpu(fan_table_v3->usFanAcousticLimitRpm); + hwmgr->thermal_controller.advanceFanControlParameters.usTMax = + le16_to_cpu(fan_table_v3->usTargetTemperature); + hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin = + le16_to_cpu(fan_table_v3->usMinimumPWMLimit); + hwmgr->thermal_controller.advanceFanControlParameters.ulTargetGfxClk = + le16_to_cpu(fan_table_v3->usTargetGfxClk); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainEdge = + le16_to_cpu(fan_table_v3->usFanGainEdge); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHotspot = + le16_to_cpu(fan_table_v3->usFanGainHotspot); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainLiquid = + le16_to_cpu(fan_table_v3->usFanGainLiquid); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrVddc = + le16_to_cpu(fan_table_v3->usFanGainVrVddc); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrMvdd = + le16_to_cpu(fan_table_v3->usFanGainVrMvdd); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainPlx = + le16_to_cpu(fan_table_v3->usFanGainPlx); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHbm = + le16_to_cpu(fan_table_v3->usFanGainHbm); + + hwmgr->thermal_controller.advanceFanControlParameters.ucEnableZeroRPM = + fan_table_v3->ucEnableZeroRPM; + hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStopTemperature = + le16_to_cpu(fan_table_v3->usFanStopTemperature); + hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStartTemperature = + le16_to_cpu(fan_table_v3->usFanStartTemperature); + hwmgr->thermal_controller.advanceFanControlParameters.usMGpuThrottlingRPMLimit = + le16_to_cpu(fan_table_v3->usMGpuThrottlingRPM); } + return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c index 3f807d6c95ce..ba8763daa380 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c @@ -556,6 +556,43 @@ int vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) return ret; } +int vega10_enable_mgpu_fan_boost(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = hwmgr->backend; + PPTable_t *table = &(data->smc_state_table.pp_table); + int ret; + + if (!data->smu_features[GNLD_FAN_CONTROL].supported) + return 0; + + if (!hwmgr->thermal_controller.advanceFanControlParameters. + usMGpuThrottlingRPMLimit) + return 0; + + table->FanThrottlingRpm = hwmgr->thermal_controller. + advanceFanControlParameters.usMGpuThrottlingRPMLimit; + + ret = smum_smc_table_manager(hwmgr, + (uint8_t *)(&(data->smc_state_table.pp_table)), + PPTABLE, false); + if (ret) { + pr_info("Failed to update fan control table in pptable!"); + return ret; + } + + ret = vega10_disable_fan_control_feature(hwmgr); + if (ret) { + pr_info("Attempt to disable SMC fan control feature failed!"); + return ret; + } + + ret = vega10_enable_fan_control_feature(hwmgr); + if (ret) + pr_info("Attempt to enable SMC fan control feature failed!"); + + return ret; +} + /** * Start the fan control on the SMC. * @param hwmgr the address of the powerplay hardware manager. diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h index 21e7c4dfa2ca..4a0ede7c1f07 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h @@ -73,6 +73,7 @@ extern int vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr); extern int vega10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr); extern int vega10_start_thermal_controller(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range); +extern int vega10_enable_mgpu_fan_boost(struct pp_hwmgr *hwmgr); #endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c index 0c8212902275..6c8e78611c03 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c @@ -1093,6 +1093,16 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr) return ret); } + if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) { + min_freq = data->dpm_table.dcef_table.dpm_state.hard_min_level; + + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetHardMinByFreq, + (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff))), + "Failed to set hard min dcefclk!", + return ret); + } + return ret; } @@ -1818,7 +1828,7 @@ static int vega12_force_clock_level(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask) { struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); - uint32_t soft_min_level, soft_max_level; + uint32_t soft_min_level, soft_max_level, hard_min_level; int ret = 0; switch (type) { @@ -1863,6 +1873,56 @@ static int vega12_force_clock_level(struct pp_hwmgr *hwmgr, break; + case PP_SOCCLK: + soft_min_level = mask ? (ffs(mask) - 1) : 0; + soft_max_level = mask ? (fls(mask) - 1) : 0; + + if (soft_max_level >= data->dpm_table.soc_table.count) { + pr_err("Clock level specified %d is over max allowed %d\n", + soft_max_level, + data->dpm_table.soc_table.count - 1); + return -EINVAL; + } + + data->dpm_table.soc_table.dpm_state.soft_min_level = + data->dpm_table.soc_table.dpm_levels[soft_min_level].value; + data->dpm_table.soc_table.dpm_state.soft_max_level = + data->dpm_table.soc_table.dpm_levels[soft_max_level].value; + + ret = vega12_upload_dpm_min_level(hwmgr); + PP_ASSERT_WITH_CODE(!ret, + "Failed to upload boot level to lowest!", + return ret); + + ret = vega12_upload_dpm_max_level(hwmgr); + PP_ASSERT_WITH_CODE(!ret, + "Failed to upload dpm max level to highest!", + return ret); + + break; + + case PP_DCEFCLK: + hard_min_level = mask ? (ffs(mask) - 1) : 0; + + if (hard_min_level >= data->dpm_table.dcef_table.count) { + pr_err("Clock level specified %d is over max allowed %d\n", + hard_min_level, + data->dpm_table.dcef_table.count - 1); + return -EINVAL; + } + + data->dpm_table.dcef_table.dpm_state.hard_min_level = + data->dpm_table.dcef_table.dpm_levels[hard_min_level].value; + + ret = vega12_upload_dpm_min_level(hwmgr); + PP_ASSERT_WITH_CODE(!ret, + "Failed to upload boot level to lowest!", + return ret); + + //TODO: Setting DCEFCLK max dpm level is not supported + + break; + case PP_PCIE: break; @@ -1873,6 +1933,104 @@ static int vega12_force_clock_level(struct pp_hwmgr *hwmgr, return 0; } +static int vega12_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf) +{ + static const char *ppfeature_name[] = { + "DPM_PREFETCHER", + "GFXCLK_DPM", + "UCLK_DPM", + "SOCCLK_DPM", + "UVD_DPM", + "VCE_DPM", + "ULV", + "MP0CLK_DPM", + "LINK_DPM", + "DCEFCLK_DPM", + "GFXCLK_DS", + "SOCCLK_DS", + "LCLK_DS", + "PPT", + "TDC", + "THERMAL", + "GFX_PER_CU_CG", + "RM", + "DCEFCLK_DS", + "ACDC", + "VR0HOT", + "VR1HOT", + "FW_CTF", + "LED_DISPLAY", + "FAN_CONTROL", + "DIDT", + "GFXOFF", + "CG", + "ACG"}; + static const char *output_title[] = { + "FEATURES", + "BITMASK", + "ENABLEMENT"}; + uint64_t features_enabled; + int i; + int ret = 0; + int size = 0; + + ret = vega12_get_enabled_smc_features(hwmgr, &features_enabled); + PP_ASSERT_WITH_CODE(!ret, + "[EnableAllSmuFeatures] Failed to get enabled smc features!", + return ret); + + size += sprintf(buf + size, "Current ppfeatures: 0x%016llx\n", features_enabled); + size += sprintf(buf + size, "%-19s %-22s %s\n", + output_title[0], + output_title[1], + output_title[2]); + for (i = 0; i < GNLD_FEATURES_MAX; i++) { + size += sprintf(buf + size, "%-19s 0x%016llx %6s\n", + ppfeature_name[i], + 1ULL << i, + (features_enabled & (1ULL << i)) ? "Y" : "N"); + } + + return size; +} + +static int vega12_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfeature_masks) +{ + uint64_t features_enabled; + uint64_t features_to_enable; + uint64_t features_to_disable; + int ret = 0; + + if (new_ppfeature_masks >= (1ULL << GNLD_FEATURES_MAX)) + return -EINVAL; + + ret = vega12_get_enabled_smc_features(hwmgr, &features_enabled); + if (ret) + return ret; + + features_to_disable = + (features_enabled ^ new_ppfeature_masks) & features_enabled; + features_to_enable = + (features_enabled ^ new_ppfeature_masks) ^ features_to_disable; + + pr_debug("features_to_disable 0x%llx\n", features_to_disable); + pr_debug("features_to_enable 0x%llx\n", features_to_enable); + + if (features_to_disable) { + ret = vega12_enable_smc_features(hwmgr, false, features_to_disable); + if (ret) + return ret; + } + + if (features_to_enable) { + ret = vega12_enable_smc_features(hwmgr, true, features_to_enable); + if (ret) + return ret; + } + + return 0; +} + static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf) { @@ -1912,6 +2070,42 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : ""); break; + case PP_SOCCLK: + PP_ASSERT_WITH_CODE( + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_GetDpmClockFreq, (PPCLK_SOCCLK << 16)) == 0, + "Attempt to get Current SOCCLK Frequency Failed!", + return -EINVAL); + now = smum_get_argument(hwmgr); + + PP_ASSERT_WITH_CODE( + vega12_get_socclocks(hwmgr, &clocks) == 0, + "Attempt to get soc clk levels Failed!", + return -1); + for (i = 0; i < clocks.num_levels; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, clocks.data[i].clocks_in_khz / 1000, + (clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : ""); + break; + + case PP_DCEFCLK: + PP_ASSERT_WITH_CODE( + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_GetDpmClockFreq, (PPCLK_DCEFCLK << 16)) == 0, + "Attempt to get Current DCEFCLK Frequency Failed!", + return -EINVAL); + now = smum_get_argument(hwmgr); + + PP_ASSERT_WITH_CODE( + vega12_get_dcefclocks(hwmgr, &clocks) == 0, + "Attempt to get dcef clk levels Failed!", + return -1); + for (i = 0; i < clocks.num_levels; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, clocks.data[i].clocks_in_khz / 1000, + (clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : ""); + break; + case PP_PCIE: break; @@ -2432,6 +2626,8 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = { .start_thermal_controller = vega12_start_thermal_controller, .powergate_gfx = vega12_gfx_off_control, .get_performance_level = vega12_get_performance_level, + .get_ppfeature_status = vega12_get_ppfeature_status, + .set_ppfeature_status = vega12_set_ppfeature_status, }; int vega12_hwmgr_init(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c new file mode 100644 index 000000000000..5e8602a79b1c --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c @@ -0,0 +1,103 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "soc15.h" +#include "soc15_hw_ip.h" +#include "soc15_common.h" +#include "vega20_inc.h" +#include "vega20_ppsmc.h" +#include "vega20_baco.h" + + + +static const struct soc15_baco_cmd_entry clean_baco_tbl[] = +{ + {CMD_WRITE, SOC15_REG_ENTRY(NBIF, 0, mmBIOS_SCRATCH_6), 0, 0, 0, 0}, + {CMD_WRITE, SOC15_REG_ENTRY(NBIF, 0, mmBIOS_SCRATCH_7), 0, 0, 0, 0}, +}; + +int vega20_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); + uint32_t reg; + + *cap = false; + if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO)) + return 0; + + if (((RREG32(0x17569) & 0x20000000) >> 29) == 0x1) { + reg = RREG32_SOC15(NBIF, 0, mmRCC_BIF_STRAP0); + + if (reg & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) + *cap = true; + } + + return 0; +} + +int vega20_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); + uint32_t reg; + + reg = RREG32_SOC15(NBIF, 0, mmBACO_CNTL); + + if (reg & BACO_CNTL__BACO_MODE_MASK) + /* gfx has already entered BACO state */ + *state = BACO_STATE_IN; + else + *state = BACO_STATE_OUT; + return 0; +} + +int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); + enum BACO_STATE cur_state; + uint32_t data; + + vega20_baco_get_state(hwmgr, &cur_state); + + if (cur_state == state) + /* aisc already in the target state */ + return 0; + + if (state == BACO_STATE_IN) { + data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL); + data |= 0x80000000; + WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data); + + + if(smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnterBaco, 0)) + return -EINVAL; + + } else if (state == BACO_STATE_OUT) { + if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ExitBaco)) + return -EINVAL; + if (!soc15_baco_program_registers(hwmgr, clean_baco_tbl, + ARRAY_SIZE(clean_baco_tbl))) + return -EINVAL; + } + + return 0; +} diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.h similarity index 73% rename from drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.h rename to drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.h index 8d35453c25b6..51c7f8392925 100644 --- a/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.h @@ -1,5 +1,5 @@ /* - * Copyright 2012-15 Advanced Micro Devices, Inc. + * Copyright 2018 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -19,14 +19,14 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * - * Authors: AMD - * */ +#ifndef __VEGA20_BACO_H__ +#define __VEGA20_BACO_H__ +#include "hwmgr.h" +#include "common_baco.h" -#ifndef __DAL_I2C_AUX_DCE112_H__ -#define __DAL_I2C_AUX_DCE112_H__ - -struct i2caux *dal_i2caux_dce112_create( - struct dc_context *ctx); +extern int vega20_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap); +extern int vega20_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state); +extern int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state); -#endif /* __DAL_I2C_AUX_DCE112_H__ */ +#endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index 82935a3bd950..aad79affb081 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -47,6 +47,7 @@ #include "pp_overdriver.h" #include "pp_thermal.h" #include "soc15_common.h" +#include "vega20_baco.h" #include "smuio/smuio_9_0_offset.h" #include "smuio/smuio_9_0_sh_mask.h" #include "nbio/nbio_7_4_sh_mask.h" @@ -770,6 +771,54 @@ static int vega20_init_smc_table(struct pp_hwmgr *hwmgr) return 0; } +/* + * Override PCIe link speed and link width for DPM Level 1. PPTable entries + * reflect the ASIC capabilities and not the system capabilities. For e.g. + * Vega20 board in a PCI Gen3 system. In this case, when SMU's tries to switch + * to DPM1, it fails as system doesn't support Gen4. + */ +static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); + uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg; + int ret; + + if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) + pcie_gen = 3; + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) + pcie_gen = 2; + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) + pcie_gen = 1; + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1) + pcie_gen = 0; + + if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) + pcie_width = 6; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) + pcie_width = 5; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8) + pcie_width = 4; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4) + pcie_width = 3; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2) + pcie_width = 2; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1) + pcie_width = 1; + + /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1 + * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4 + * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 + */ + smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width; + ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_OverridePcieParameters, smu_pcie_arg); + PP_ASSERT_WITH_CODE(!ret, + "[OverridePcieParameters] Attempt to override pcie params failed!", + return ret); + + return 0; +} + static int vega20_set_allowed_featuresmask(struct pp_hwmgr *hwmgr) { struct vega20_hwmgr *data = @@ -803,6 +852,11 @@ static int vega20_set_allowed_featuresmask(struct pp_hwmgr *hwmgr) return 0; } +static int vega20_run_btc(struct pp_hwmgr *hwmgr) +{ + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunBtc); +} + static int vega20_run_btc_afll(struct pp_hwmgr *hwmgr) { return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAfllBtc); @@ -1564,6 +1618,11 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr) "[EnableDPMTasks] Failed to initialize SMC table!", return result); + result = vega20_run_btc(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "[EnableDPMTasks] Failed to run btc!", + return result); + result = vega20_run_btc_afll(hwmgr); PP_ASSERT_WITH_CODE(!result, "[EnableDPMTasks] Failed to run btc afll!", @@ -1574,6 +1633,11 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr) "[EnableDPMTasks] Failed to enable all smu features!", return result); + result = vega20_override_pcie_parameters(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "[EnableDPMTasks] Failed to override pcie parameters!", + return result); + result = vega20_notify_smc_display_change(hwmgr); PP_ASSERT_WITH_CODE(!result, "[EnableDPMTasks] Failed to notify smc display change!", @@ -1735,6 +1799,28 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_ return ret); } + if (data->smu_features[GNLD_DPM_FCLK].enabled && + (feature_mask & FEATURE_DPM_FCLK_MASK)) { + min_freq = data->dpm_table.fclk_table.dpm_state.soft_min_level; + + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetSoftMinByFreq, + (PPCLK_FCLK << 16) | (min_freq & 0xffff))), + "Failed to set soft min fclk!", + return ret); + } + + if (data->smu_features[GNLD_DPM_DCEFCLK].enabled && + (feature_mask & FEATURE_DPM_DCEFCLK_MASK)) { + min_freq = data->dpm_table.dcef_table.dpm_state.hard_min_level; + + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetHardMinByFreq, + (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff))), + "Failed to set hard min dcefclk!", + return ret); + } + return ret; } @@ -1807,6 +1893,17 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_ return ret); } + if (data->smu_features[GNLD_DPM_FCLK].enabled && + (feature_mask & FEATURE_DPM_FCLK_MASK)) { + max_freq = data->dpm_table.fclk_table.dpm_state.soft_max_level; + + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter( + hwmgr, PPSMC_MSG_SetSoftMaxByFreq, + (PPCLK_FCLK << 16) | (max_freq & 0xffff))), + "Failed to set soft max fclk!", + return ret); + } + return ret; } @@ -1914,16 +2011,36 @@ static uint32_t vega20_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) return (mem_clk * 100); } +static int vega20_get_metrics_table(struct pp_hwmgr *hwmgr, SmuMetrics_t *metrics_table) +{ + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); + int ret = 0; + + if (!data->metrics_time || time_after(jiffies, data->metrics_time + HZ / 2)) { + ret = smum_smc_table_manager(hwmgr, (uint8_t *)metrics_table, + TABLE_SMU_METRICS, true); + if (ret) { + pr_info("Failed to export SMU metrics table!\n"); + return ret; + } + memcpy(&data->metrics_table, metrics_table, sizeof(SmuMetrics_t)); + data->metrics_time = jiffies; + } else + memcpy(metrics_table, &data->metrics_table, sizeof(SmuMetrics_t)); + + return ret; +} + static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr, uint32_t *query) { int ret = 0; SmuMetrics_t metrics_table; - ret = smum_smc_table_manager(hwmgr, (uint8_t *)&metrics_table, TABLE_SMU_METRICS, true); - PP_ASSERT_WITH_CODE(!ret, - "Failed to export SMU METRICS table!", - return ret); + ret = vega20_get_metrics_table(hwmgr, &metrics_table); + if (ret) + return ret; *query = metrics_table.CurrSocketPower << 8; @@ -1954,10 +2071,9 @@ static int vega20_get_current_activity_percent(struct pp_hwmgr *hwmgr, int ret = 0; SmuMetrics_t metrics_table; - ret = smum_smc_table_manager(hwmgr, (uint8_t *)&metrics_table, TABLE_SMU_METRICS, true); - PP_ASSERT_WITH_CODE(!ret, - "Failed to export SMU METRICS table!", - return ret); + ret = vega20_get_metrics_table(hwmgr, &metrics_table); + if (ret) + return ret; *activity_percent = metrics_table.AverageGfxActivity; @@ -1969,16 +2085,18 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx, { struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); struct amdgpu_device *adev = hwmgr->adev; + SmuMetrics_t metrics_table; uint32_t val_vid; int ret = 0; switch (idx) { case AMDGPU_PP_SENSOR_GFX_SCLK: - ret = vega20_get_current_clk_freq(hwmgr, - PPCLK_GFXCLK, - (uint32_t *)value); - if (!ret) - *size = 4; + ret = vega20_get_metrics_table(hwmgr, &metrics_table); + if (ret) + return ret; + + *((uint32_t *)value) = metrics_table.AverageGfxclkFrequency * 100; + *size = 4; break; case AMDGPU_PP_SENSOR_GFX_MCLK: ret = vega20_get_current_clk_freq(hwmgr, @@ -2136,6 +2254,12 @@ static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr) data->dpm_table.mem_table.dpm_state.soft_max_level = data->dpm_table.mem_table.dpm_levels[soft_level].value; + soft_level = vega20_find_highest_dpm_level(&(data->dpm_table.soc_table)); + + data->dpm_table.soc_table.dpm_state.soft_min_level = + data->dpm_table.soc_table.dpm_state.soft_max_level = + data->dpm_table.soc_table.dpm_levels[soft_level].value; + ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF); PP_ASSERT_WITH_CODE(!ret, "Failed to upload boot level to highest!", @@ -2168,6 +2292,12 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr) data->dpm_table.mem_table.dpm_state.soft_max_level = data->dpm_table.mem_table.dpm_levels[soft_level].value; + soft_level = vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table)); + + data->dpm_table.soc_table.dpm_state.soft_min_level = + data->dpm_table.soc_table.dpm_state.soft_max_level = + data->dpm_table.soc_table.dpm_levels[soft_level].value; + ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF); PP_ASSERT_WITH_CODE(!ret, "Failed to upload boot level to highest!", @@ -2184,8 +2314,32 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr) static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr) { + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); + uint32_t soft_min_level, soft_max_level; int ret = 0; + soft_min_level = vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table)); + soft_max_level = vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table)); + data->dpm_table.gfx_table.dpm_state.soft_min_level = + data->dpm_table.gfx_table.dpm_levels[soft_min_level].value; + data->dpm_table.gfx_table.dpm_state.soft_max_level = + data->dpm_table.gfx_table.dpm_levels[soft_max_level].value; + + soft_min_level = vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table)); + soft_max_level = vega20_find_highest_dpm_level(&(data->dpm_table.mem_table)); + data->dpm_table.mem_table.dpm_state.soft_min_level = + data->dpm_table.mem_table.dpm_levels[soft_min_level].value; + data->dpm_table.mem_table.dpm_state.soft_max_level = + data->dpm_table.mem_table.dpm_levels[soft_max_level].value; + + soft_min_level = vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table)); + soft_max_level = vega20_find_highest_dpm_level(&(data->dpm_table.soc_table)); + data->dpm_table.soc_table.dpm_state.soft_min_level = + data->dpm_table.soc_table.dpm_levels[soft_min_level].value; + data->dpm_table.soc_table.dpm_state.soft_max_level = + data->dpm_table.soc_table.dpm_levels[soft_max_level].value; + ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF); PP_ASSERT_WITH_CODE(!ret, "Failed to upload DPM Bootup Levels!", @@ -2236,7 +2390,7 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask) { struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); - uint32_t soft_min_level, soft_max_level; + uint32_t soft_min_level, soft_max_level, hard_min_level; int ret = 0; switch (type) { @@ -2295,6 +2449,84 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr, break; + case PP_SOCCLK: + soft_min_level = mask ? (ffs(mask) - 1) : 0; + soft_max_level = mask ? (fls(mask) - 1) : 0; + + if (soft_max_level >= data->dpm_table.soc_table.count) { + pr_err("Clock level specified %d is over max allowed %d\n", + soft_max_level, + data->dpm_table.soc_table.count - 1); + return -EINVAL; + } + + data->dpm_table.soc_table.dpm_state.soft_min_level = + data->dpm_table.soc_table.dpm_levels[soft_min_level].value; + data->dpm_table.soc_table.dpm_state.soft_max_level = + data->dpm_table.soc_table.dpm_levels[soft_max_level].value; + + ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_SOCCLK_MASK); + PP_ASSERT_WITH_CODE(!ret, + "Failed to upload boot level to lowest!", + return ret); + + ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_SOCCLK_MASK); + PP_ASSERT_WITH_CODE(!ret, + "Failed to upload dpm max level to highest!", + return ret); + + break; + + case PP_FCLK: + soft_min_level = mask ? (ffs(mask) - 1) : 0; + soft_max_level = mask ? (fls(mask) - 1) : 0; + + if (soft_max_level >= data->dpm_table.fclk_table.count) { + pr_err("Clock level specified %d is over max allowed %d\n", + soft_max_level, + data->dpm_table.fclk_table.count - 1); + return -EINVAL; + } + + data->dpm_table.fclk_table.dpm_state.soft_min_level = + data->dpm_table.fclk_table.dpm_levels[soft_min_level].value; + data->dpm_table.fclk_table.dpm_state.soft_max_level = + data->dpm_table.fclk_table.dpm_levels[soft_max_level].value; + + ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_FCLK_MASK); + PP_ASSERT_WITH_CODE(!ret, + "Failed to upload boot level to lowest!", + return ret); + + ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_FCLK_MASK); + PP_ASSERT_WITH_CODE(!ret, + "Failed to upload dpm max level to highest!", + return ret); + + break; + + case PP_DCEFCLK: + hard_min_level = mask ? (ffs(mask) - 1) : 0; + + if (hard_min_level >= data->dpm_table.dcef_table.count) { + pr_err("Clock level specified %d is over max allowed %d\n", + hard_min_level, + data->dpm_table.dcef_table.count - 1); + return -EINVAL; + } + + data->dpm_table.dcef_table.dpm_state.hard_min_level = + data->dpm_table.dcef_table.dpm_levels[hard_min_level].value; + + ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_DCEFCLK_MASK); + PP_ASSERT_WITH_CODE(!ret, + "Failed to upload boot level to lowest!", + return ret); + + //TODO: Setting DCEFCLK max dpm level is not supported + + break; + case PP_PCIE: soft_min_level = mask ? (ffs(mask) - 1) : 0; soft_max_level = mask ? (fls(mask) - 1) : 0; @@ -2345,6 +2577,7 @@ static int vega20_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, return ret; vega20_force_clock_level(hwmgr, PP_SCLK, 1 << sclk_mask); vega20_force_clock_level(hwmgr, PP_MCLK, 1 << mclk_mask); + vega20_force_clock_level(hwmgr, PP_SOCCLK, 1 << soc_mask); break; case AMD_DPM_FORCED_LEVEL_MANUAL: @@ -2775,6 +3008,108 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, return 0; } +static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf) +{ + static const char *ppfeature_name[] = { + "DPM_PREFETCHER", + "GFXCLK_DPM", + "UCLK_DPM", + "SOCCLK_DPM", + "UVD_DPM", + "VCE_DPM", + "ULV", + "MP0CLK_DPM", + "LINK_DPM", + "DCEFCLK_DPM", + "GFXCLK_DS", + "SOCCLK_DS", + "LCLK_DS", + "PPT", + "TDC", + "THERMAL", + "GFX_PER_CU_CG", + "RM", + "DCEFCLK_DS", + "ACDC", + "VR0HOT", + "VR1HOT", + "FW_CTF", + "LED_DISPLAY", + "FAN_CONTROL", + "GFX_EDC", + "GFXOFF", + "CG", + "FCLK_DPM", + "FCLK_DS", + "MP1CLK_DS", + "MP0CLK_DS", + "XGMI"}; + static const char *output_title[] = { + "FEATURES", + "BITMASK", + "ENABLEMENT"}; + uint64_t features_enabled; + int i; + int ret = 0; + int size = 0; + + ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled); + PP_ASSERT_WITH_CODE(!ret, + "[EnableAllSmuFeatures] Failed to get enabled smc features!", + return ret); + + size += sprintf(buf + size, "Current ppfeatures: 0x%016llx\n", features_enabled); + size += sprintf(buf + size, "%-19s %-22s %s\n", + output_title[0], + output_title[1], + output_title[2]); + for (i = 0; i < GNLD_FEATURES_MAX; i++) { + size += sprintf(buf + size, "%-19s 0x%016llx %6s\n", + ppfeature_name[i], + 1ULL << i, + (features_enabled & (1ULL << i)) ? "Y" : "N"); + } + + return size; +} + +static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfeature_masks) +{ + uint64_t features_enabled; + uint64_t features_to_enable; + uint64_t features_to_disable; + int ret = 0; + + if (new_ppfeature_masks >= (1ULL << GNLD_FEATURES_MAX)) + return -EINVAL; + + ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled); + if (ret) + return ret; + + features_to_disable = + (features_enabled ^ new_ppfeature_masks) & features_enabled; + features_to_enable = + (features_enabled ^ new_ppfeature_masks) ^ features_to_disable; + + pr_debug("features_to_disable 0x%llx\n", features_to_disable); + pr_debug("features_to_enable 0x%llx\n", features_to_enable); + + if (features_to_disable) { + ret = vega20_enable_smc_features(hwmgr, false, features_to_disable); + if (ret) + return ret; + } + + if (features_to_enable) { + ret = vega20_enable_smc_features(hwmgr, true, features_to_enable); + if (ret) + return ret; + } + + return 0; +} + static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf) { @@ -2789,6 +3124,8 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, PPTable_t *pptable = (PPTable_t *)pptable_information->smc_pptable; struct amdgpu_device *adev = hwmgr->adev; struct pp_clock_levels_with_latency clocks; + struct vega20_single_dpm_table *fclk_dpm_table = + &(data->dpm_table.fclk_table); int i, now, size = 0; int ret = 0; uint32_t gen_speed, lane_width; @@ -2828,6 +3165,52 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); break; + case PP_SOCCLK: + ret = vega20_get_current_clk_freq(hwmgr, PPCLK_SOCCLK, &now); + PP_ASSERT_WITH_CODE(!ret, + "Attempt to get current socclk freq Failed!", + return ret); + + ret = vega20_get_socclocks(hwmgr, &clocks); + PP_ASSERT_WITH_CODE(!ret, + "Attempt to get soc clk levels Failed!", + return ret); + + for (i = 0; i < clocks.num_levels; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, clocks.data[i].clocks_in_khz / 1000, + (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); + break; + + case PP_FCLK: + ret = vega20_get_current_clk_freq(hwmgr, PPCLK_FCLK, &now); + PP_ASSERT_WITH_CODE(!ret, + "Attempt to get current fclk freq Failed!", + return ret); + + for (i = 0; i < fclk_dpm_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, fclk_dpm_table->dpm_levels[i].value, + fclk_dpm_table->dpm_levels[i].value == (now / 100) ? "*" : ""); + break; + + case PP_DCEFCLK: + ret = vega20_get_current_clk_freq(hwmgr, PPCLK_DCEFCLK, &now); + PP_ASSERT_WITH_CODE(!ret, + "Attempt to get current dcefclk freq Failed!", + return ret); + + ret = vega20_get_dcefclocks(hwmgr, &clocks); + PP_ASSERT_WITH_CODE(!ret, + "Attempt to get dcefclk levels Failed!", + return ret); + + for (i = 0; i < clocks.num_levels; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, clocks.data[i].clocks_in_khz / 1000, + (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); + break; + case PP_PCIE: gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) @@ -3073,7 +3456,7 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) disable_mclk_switching = ((1 < hwmgr->display_config->num_display) && !hwmgr->display_config->multi_monitor_in_sync) || vblank_too_short; - latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency; + latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency; /* gfxclk */ dpm_table = &(data->dpm_table.gfx_table); @@ -3571,6 +3954,8 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = { .force_clock_level = vega20_force_clock_level, .print_clock_levels = vega20_print_clock_levels, .read_sensor = vega20_read_sensor, + .get_ppfeature_status = vega20_get_ppfeature_status, + .set_ppfeature_status = vega20_set_ppfeature_status, /* powergate related */ .powergate_uvd = vega20_power_gate_uvd, .powergate_vce = vega20_power_gate_vce, @@ -3591,6 +3976,10 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = { /* smu memory related */ .notify_cac_buffer_info = vega20_notify_cac_buffer_info, .enable_mgpu_fan_boost = vega20_enable_mgpu_fan_boost, + /* BACO related */ + .get_asic_baco_capability = vega20_baco_get_capability, + .get_asic_baco_state = vega20_baco_get_state, + .set_asic_baco_state = vega20_baco_set_state, }; int vega20_hwmgr_init(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h index 25faaa5c5b10..37f5f5e657da 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h @@ -520,6 +520,9 @@ struct vega20_hwmgr { /* ---- Gfxoff ---- */ bool gfxoff_allowed; uint32_t counter_gfxoff; + + unsigned long metrics_time; + SmuMetrics_t metrics_table; }; #define VEGA20_DPM2_NEAR_TDP_DEC 10 diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_inc.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_inc.h index 6738bad53602..613cb1989b3d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_inc.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_inc.h @@ -31,5 +31,6 @@ #include "asic_reg/mp/mp_9_0_sh_mask.h" #include "asic_reg/nbio/nbio_7_4_offset.h" +#include "asic_reg/nbio/nbio_7_4_sh_mask.h" #endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h index f4dab979a3a1..6e0be6027705 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h @@ -397,7 +397,6 @@ struct phm_odn_clock_levels { }; extern int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr); -extern int phm_enable_clock_power_gatings(struct pp_hwmgr *hwmgr); extern int phm_powerdown_uvd(struct pp_hwmgr *hwmgr); extern int phm_setup_asic(struct pp_hwmgr *hwmgr); extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 8cb831b6a016..bac3d85e3b82 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -47,6 +47,11 @@ enum DISPLAY_GAP { }; typedef enum DISPLAY_GAP DISPLAY_GAP; +enum BACO_STATE { + BACO_STATE_OUT = 0, + BACO_STATE_IN, +}; + struct vi_dpm_level { bool enabled; uint32_t value; @@ -251,7 +256,6 @@ struct pp_hwmgr_func { uint32_t (*get_sclk)(struct pp_hwmgr *hwmgr, bool low); int (*power_state_set)(struct pp_hwmgr *hwmgr, const void *state); - int (*enable_clock_power_gating)(struct pp_hwmgr *hwmgr); int (*notify_smc_display_config_after_ps_adjustment)(struct pp_hwmgr *hwmgr); int (*pre_display_config_changed)(struct pp_hwmgr *hwmgr); int (*display_config_changed)(struct pp_hwmgr *hwmgr); @@ -334,6 +338,11 @@ struct pp_hwmgr_func { int (*enable_mgpu_fan_boost)(struct pp_hwmgr *hwmgr); int (*set_hard_min_dcefclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock); int (*set_hard_min_fclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock); + int (*get_asic_baco_capability)(struct pp_hwmgr *hwmgr, bool *cap); + int (*get_asic_baco_state)(struct pp_hwmgr *hwmgr, enum BACO_STATE *state); + int (*set_asic_baco_state)(struct pp_hwmgr *hwmgr, enum BACO_STATE state); + int (*get_ppfeature_status)(struct pp_hwmgr *hwmgr, char *buf); + int (*set_ppfeature_status)(struct pp_hwmgr *hwmgr, uint64_t ppfeature_masks); }; struct pp_table_func { @@ -678,6 +687,7 @@ struct pp_advance_fan_control_parameters { uint32_t ulTargetGfxClk; uint16_t usZeroRPMStartTemperature; uint16_t usZeroRPMStopTemperature; + uint16_t usMGpuThrottlingRPMLimit; }; struct pp_thermal_controller_info { diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index a6edd5df33b0..4240aeec9000 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c @@ -29,6 +29,10 @@ #include #include "smumgr.h" +MODULE_FIRMWARE("amdgpu/bonaire_smc.bin"); +MODULE_FIRMWARE("amdgpu/bonaire_k_smc.bin"); +MODULE_FIRMWARE("amdgpu/hawaii_smc.bin"); +MODULE_FIRMWARE("amdgpu/hawaii_k_smc.bin"); MODULE_FIRMWARE("amdgpu/topaz_smc.bin"); MODULE_FIRMWARE("amdgpu/topaz_k_smc.bin"); MODULE_FIRMWARE("amdgpu/tonga_smc.bin"); diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c index 62f51f70606d..73e508e00e30 100644 --- a/drivers/gpu/drm/arc/arcpgu_crtc.c +++ b/drivers/gpu/drm/arc/arcpgu_crtc.c @@ -15,10 +15,12 @@ */ #include -#include +#include #include #include +#include #include +#include #include #include diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c index 206a76abf771..c9f78397d345 100644 --- a/drivers/gpu/drm/arc/arcpgu_drv.c +++ b/drivers/gpu/drm/arc/arcpgu_drv.c @@ -15,13 +15,19 @@ */ #include -#include +#include +#include +#include +#include #include #include #include #include -#include +#include +#include +#include #include +#include #include "arcpgu.h" #include "arcpgu_regs.h" diff --git a/drivers/gpu/drm/arc/arcpgu_sim.c b/drivers/gpu/drm/arc/arcpgu_sim.c index 68629e614990..5ea053cf805c 100644 --- a/drivers/gpu/drm/arc/arcpgu_sim.c +++ b/drivers/gpu/drm/arc/arcpgu_sim.c @@ -14,8 +14,9 @@ * */ -#include #include +#include +#include #include "arcpgu.h" @@ -51,7 +52,6 @@ arcpgu_drm_connector_helper_funcs = { }; static const struct drm_connector_funcs arcpgu_drm_connector_funcs = { - .dpms = drm_helper_connector_dpms, .reset = drm_atomic_helper_connector_reset, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = arcpgu_drm_connector_destroy, diff --git a/drivers/gpu/drm/arm/Kconfig b/drivers/gpu/drm/arm/Kconfig index 9a18e1bd57b4..a204103b3efb 100644 --- a/drivers/gpu/drm/arm/Kconfig +++ b/drivers/gpu/drm/arm/Kconfig @@ -1,13 +1,10 @@ -config DRM_ARM - bool - help - Choose this option to select drivers for ARM's devices +# SPDX-License-Identifier: GPL-2.0 +menu "ARM devices" config DRM_HDLCD tristate "ARM HDLCD" depends on DRM && OF && (ARM || ARM64) depends on COMMON_CLK - select DRM_ARM select DRM_KMS_HELPER select DRM_KMS_CMA_HELPER help @@ -29,7 +26,6 @@ config DRM_MALI_DISPLAY tristate "ARM Mali Display Processor" depends on DRM && OF && (ARM || ARM64) depends on COMMON_CLK - select DRM_ARM select DRM_KMS_HELPER select DRM_KMS_CMA_HELPER select DRM_GEM_CMA_HELPER @@ -40,3 +36,7 @@ config DRM_MALI_DISPLAY of the hardware. If compiled as a module it will be called mali-dp. + +source "drivers/gpu/drm/arm/display/Kconfig" + +endmenu diff --git a/drivers/gpu/drm/arm/Makefile b/drivers/gpu/drm/arm/Makefile index 3bf31d1a4722..120bef801fcf 100644 --- a/drivers/gpu/drm/arm/Makefile +++ b/drivers/gpu/drm/arm/Makefile @@ -3,3 +3,4 @@ obj-$(CONFIG_DRM_HDLCD) += hdlcd.o mali-dp-y := malidp_drv.o malidp_hw.o malidp_planes.o malidp_crtc.o mali-dp-y += malidp_mw.o obj-$(CONFIG_DRM_MALI_DISPLAY) += mali-dp.o +obj-$(CONFIG_DRM_KOMEDA) += display/ diff --git a/drivers/gpu/drm/arm/display/Kbuild b/drivers/gpu/drm/arm/display/Kbuild new file mode 100644 index 000000000000..382f1ca831e4 --- /dev/null +++ b/drivers/gpu/drm/arm/display/Kbuild @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_DRM_KOMEDA) += komeda/ diff --git a/drivers/gpu/drm/arm/display/Kconfig b/drivers/gpu/drm/arm/display/Kconfig new file mode 100644 index 000000000000..cec0639e3aa1 --- /dev/null +++ b/drivers/gpu/drm/arm/display/Kconfig @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0 +config DRM_KOMEDA + tristate "ARM Komeda display driver" + depends on DRM && OF + depends on COMMON_CLK + select DRM_KMS_HELPER + select DRM_KMS_CMA_HELPER + select DRM_GEM_CMA_HELPER + select VIDEOMODE_HELPERS + help + Choose this option if you want to compile the ARM Komeda display + Processor driver. It supports the D71 variants of the hardware. + + If compiled as a module it will be called komeda. diff --git a/drivers/gpu/drm/arm/display/include/malidp_io.h b/drivers/gpu/drm/arm/display/include/malidp_io.h new file mode 100644 index 000000000000..4fb3caf864ce --- /dev/null +++ b/drivers/gpu/drm/arm/display/include/malidp_io.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang + * + */ +#ifndef _MALIDP_IO_H_ +#define _MALIDP_IO_H_ + +#include + +static inline u32 +malidp_read32(u32 __iomem *base, u32 offset) +{ + return readl((base + (offset >> 2))); +} + +static inline void +malidp_write32(u32 __iomem *base, u32 offset, u32 v) +{ + writel(v, (base + (offset >> 2))); +} + +static inline void +malidp_write32_mask(u32 __iomem *base, u32 offset, u32 m, u32 v) +{ + u32 tmp = malidp_read32(base, offset); + + tmp &= (~m); + malidp_write32(base, offset, v | tmp); +} + +static inline void +malidp_write_group(u32 __iomem *base, u32 offset, int num, const u32 *values) +{ + int i; + + for (i = 0; i < num; i++) + malidp_write32(base, offset + i * 4, values[i]); +} + +#endif /*_MALIDP_IO_H_*/ diff --git a/drivers/gpu/drm/arm/display/include/malidp_product.h b/drivers/gpu/drm/arm/display/include/malidp_product.h new file mode 100644 index 000000000000..b35fc5db866b --- /dev/null +++ b/drivers/gpu/drm/arm/display/include/malidp_product.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang + * + */ +#ifndef _MALIDP_PRODUCT_H_ +#define _MALIDP_PRODUCT_H_ + +/* Product identification */ +#define MALIDP_CORE_ID(__product, __major, __minor, __status) \ + ((((__product) & 0xFFFF) << 16) | (((__major) & 0xF) << 12) | \ + (((__minor) & 0xF) << 8) | ((__status) & 0xFF)) + +#define MALIDP_CORE_ID_PRODUCT_ID(__core_id) ((__u32)(__core_id) >> 16) +#define MALIDP_CORE_ID_MAJOR(__core_id) (((__u32)(__core_id) >> 12) & 0xF) +#define MALIDP_CORE_ID_MINOR(__core_id) (((__u32)(__core_id) >> 8) & 0xF) +#define MALIDP_CORE_ID_STATUS(__core_id) (((__u32)(__core_id)) & 0xFF) + +/* Mali-display product IDs */ +#define MALIDP_D71_PRODUCT_ID 0x0071 + +#endif /* _MALIDP_PRODUCT_H_ */ diff --git a/drivers/gpu/drm/arm/display/include/malidp_utils.h b/drivers/gpu/drm/arm/display/include/malidp_utils.h new file mode 100644 index 000000000000..63cc47cefcf8 --- /dev/null +++ b/drivers/gpu/drm/arm/display/include/malidp_utils.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang + * + */ +#ifndef _MALIDP_UTILS_ +#define _MALIDP_UTILS_ + +#define has_bit(nr, mask) (BIT(nr) & (mask)) +#define has_bits(bits, mask) (((bits) & (mask)) == (bits)) + +#define dp_for_each_set_bit(bit, mask) \ + for_each_set_bit((bit), ((unsigned long *)&(mask)), sizeof(mask) * 8) + +#endif /* _MALIDP_UTILS_ */ diff --git a/drivers/gpu/drm/arm/display/komeda/Makefile b/drivers/gpu/drm/arm/display/komeda/Makefile new file mode 100644 index 000000000000..1b875e5dc0f6 --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/Makefile @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: GPL-2.0 + +ccflags-y := \ + -I$(src)/../include \ + -I$(src) + +komeda-y := \ + komeda_drv.o \ + komeda_dev.o \ + komeda_format_caps.o \ + komeda_pipeline.o \ + komeda_framebuffer.o \ + komeda_kms.o \ + komeda_crtc.o \ + komeda_plane.o \ + komeda_private_obj.o + +komeda-y += \ + d71/d71_dev.o + +obj-$(CONFIG_DRM_KOMEDA) += komeda.o diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c new file mode 100644 index 000000000000..edbf9daa1545 --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang + * + */ +#include "malidp_io.h" +#include "komeda_dev.h" + +static int d71_enum_resources(struct komeda_dev *mdev) +{ + /* TODO add enum resources */ + return -1; +} + +#define __HW_ID(__group, __format) \ + ((((__group) & 0x7) << 3) | ((__format) & 0x7)) + +#define RICH KOMEDA_FMT_RICH_LAYER +#define SIMPLE KOMEDA_FMT_SIMPLE_LAYER +#define RICH_SIMPLE (KOMEDA_FMT_RICH_LAYER | KOMEDA_FMT_SIMPLE_LAYER) +#define RICH_WB (KOMEDA_FMT_RICH_LAYER | KOMEDA_FMT_WB_LAYER) +#define RICH_SIMPLE_WB (RICH_SIMPLE | KOMEDA_FMT_WB_LAYER) + +#define Rot_0 DRM_MODE_ROTATE_0 +#define Flip_H_V (DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y | Rot_0) +#define Rot_ALL_H_V (DRM_MODE_ROTATE_MASK | Flip_H_V) + +#define LYT_NM BIT(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16) +#define LYT_WB BIT(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8) +#define LYT_NM_WB (LYT_NM | LYT_WB) + +#define AFB_TH AFBC(_TILED | _SPARSE) +#define AFB_TH_SC_YTR AFBC(_TILED | _SC | _SPARSE | _YTR) +#define AFB_TH_SC_YTR_BS AFBC(_TILED | _SC | _SPARSE | _YTR | _SPLIT) + +static struct komeda_format_caps d71_format_caps_table[] = { + /* HW_ID | fourcc | tile_sz | layer_types | rots | afbc_layouts | afbc_features */ + /* ABGR_2101010*/ + {__HW_ID(0, 0), DRM_FORMAT_ARGB2101010, 1, RICH_SIMPLE_WB, Flip_H_V, 0, 0}, + {__HW_ID(0, 1), DRM_FORMAT_ABGR2101010, 1, RICH_SIMPLE_WB, Flip_H_V, 0, 0}, + {__HW_ID(0, 1), DRM_FORMAT_ABGR2101010, 1, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR_BS}, /* afbc */ + {__HW_ID(0, 2), DRM_FORMAT_RGBA1010102, 1, RICH_SIMPLE_WB, Flip_H_V, 0, 0}, + {__HW_ID(0, 3), DRM_FORMAT_BGRA1010102, 1, RICH_SIMPLE_WB, Flip_H_V, 0, 0}, + /* ABGR_8888*/ + {__HW_ID(1, 0), DRM_FORMAT_ARGB8888, 1, RICH_SIMPLE_WB, Flip_H_V, 0, 0}, + {__HW_ID(1, 1), DRM_FORMAT_ABGR8888, 1, RICH_SIMPLE_WB, Flip_H_V, 0, 0}, + {__HW_ID(1, 1), DRM_FORMAT_ABGR8888, 1, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR_BS}, /* afbc */ + {__HW_ID(1, 2), DRM_FORMAT_RGBA8888, 1, RICH_SIMPLE_WB, Flip_H_V, 0, 0}, + {__HW_ID(1, 3), DRM_FORMAT_BGRA8888, 1, RICH_SIMPLE_WB, Flip_H_V, 0, 0}, + /* XBGB_8888 */ + {__HW_ID(2, 0), DRM_FORMAT_XRGB8888, 1, RICH_SIMPLE_WB, Flip_H_V, 0, 0}, + {__HW_ID(2, 1), DRM_FORMAT_XBGR8888, 1, RICH_SIMPLE_WB, Flip_H_V, 0, 0}, + {__HW_ID(2, 2), DRM_FORMAT_RGBX8888, 1, RICH_SIMPLE_WB, Flip_H_V, 0, 0}, + {__HW_ID(2, 3), DRM_FORMAT_BGRX8888, 1, RICH_SIMPLE_WB, Flip_H_V, 0, 0}, + /* BGR_888 */ /* none-afbc RGB888 doesn't support rotation and flip */ + {__HW_ID(3, 0), DRM_FORMAT_RGB888, 1, RICH_SIMPLE_WB, Rot_0, 0, 0}, + {__HW_ID(3, 1), DRM_FORMAT_BGR888, 1, RICH_SIMPLE_WB, Rot_0, 0, 0}, + {__HW_ID(3, 1), DRM_FORMAT_BGR888, 1, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR_BS}, /* afbc */ + /* BGR 16bpp */ + {__HW_ID(4, 0), DRM_FORMAT_RGBA5551, 1, RICH_SIMPLE, Flip_H_V, 0, 0}, + {__HW_ID(4, 1), DRM_FORMAT_ABGR1555, 1, RICH_SIMPLE, Flip_H_V, 0, 0}, + {__HW_ID(4, 1), DRM_FORMAT_ABGR1555, 1, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR}, /* afbc */ + {__HW_ID(4, 2), DRM_FORMAT_RGB565, 1, RICH_SIMPLE, Flip_H_V, 0, 0}, + {__HW_ID(4, 3), DRM_FORMAT_BGR565, 1, RICH_SIMPLE, Flip_H_V, 0, 0}, + {__HW_ID(4, 3), DRM_FORMAT_BGR565, 1, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR}, /* afbc */ + {__HW_ID(4, 4), DRM_FORMAT_R8, 1, SIMPLE, Rot_0, 0, 0}, + /* YUV 444/422/420 8bit */ + {__HW_ID(5, 0), 0 /*XYUV8888*/, 1, 0, 0, 0, 0}, + /* XYUV unsupported*/ + {__HW_ID(5, 1), DRM_FORMAT_YUYV, 1, RICH, Rot_ALL_H_V, LYT_NM, AFB_TH}, /* afbc */ + {__HW_ID(5, 2), DRM_FORMAT_YUYV, 1, RICH, Flip_H_V, 0, 0}, + {__HW_ID(5, 3), DRM_FORMAT_UYVY, 1, RICH, Flip_H_V, 0, 0}, + {__HW_ID(5, 4), 0, /*X0L0 */ 2, 0, 0, 0}, /* Y0L0 unsupported */ + {__HW_ID(5, 6), DRM_FORMAT_NV12, 1, RICH, Flip_H_V, 0, 0}, + {__HW_ID(5, 6), 0/*DRM_FORMAT_YUV420_8BIT*/, 1, RICH, Rot_ALL_H_V, LYT_NM, AFB_TH}, /* afbc */ + {__HW_ID(5, 7), DRM_FORMAT_YUV420, 1, RICH, Flip_H_V, 0, 0}, + /* YUV 10bit*/ + {__HW_ID(6, 0), 0,/*XVYU2101010*/ 1, 0, 0, 0, 0},/* VYV30 unsupported */ + {__HW_ID(6, 6), 0/*DRM_FORMAT_X0L2*/, 2, RICH, Flip_H_V, 0, 0}, + {__HW_ID(6, 7), 0/*DRM_FORMAT_P010*/, 1, RICH, Flip_H_V, 0, 0}, + {__HW_ID(6, 7), 0/*DRM_FORMAT_YUV420_10BIT*/, 1, RICH, Rot_ALL_H_V, LYT_NM, AFB_TH}, +}; + +static void d71_init_fmt_tbl(struct komeda_dev *mdev) +{ + struct komeda_format_caps_table *table = &mdev->fmt_tbl; + + table->format_caps = d71_format_caps_table; + table->n_formats = ARRAY_SIZE(d71_format_caps_table); +} + +static struct komeda_dev_funcs d71_chip_funcs = { + .init_format_table = d71_init_fmt_tbl, + .enum_resources = d71_enum_resources, + .cleanup = NULL, +}; + +#define GLB_ARCH_ID 0x000 +#define GLB_CORE_ID 0x004 +#define GLB_CORE_INFO 0x008 + +struct komeda_dev_funcs * +d71_identify(u32 __iomem *reg_base, struct komeda_chip_info *chip) +{ + chip->arch_id = malidp_read32(reg_base, GLB_ARCH_ID); + chip->core_id = malidp_read32(reg_base, GLB_CORE_ID); + chip->core_info = malidp_read32(reg_base, GLB_CORE_INFO); + + return &d71_chip_funcs; +} diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c new file mode 100644 index 000000000000..3ca5718aa0c2 --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang + * + */ +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "komeda_dev.h" +#include "komeda_kms.h" + +struct drm_crtc_helper_funcs komeda_crtc_helper_funcs = { +}; + +static const struct drm_crtc_funcs komeda_crtc_funcs = { +}; + +int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms, + struct komeda_dev *mdev) +{ + struct komeda_crtc *crtc; + struct komeda_pipeline *master; + char str[16]; + int i; + + kms->n_crtcs = 0; + + for (i = 0; i < mdev->n_pipelines; i++) { + crtc = &kms->crtcs[kms->n_crtcs]; + master = mdev->pipelines[i]; + + crtc->master = master; + crtc->slave = NULL; + + if (crtc->slave) + sprintf(str, "pipe-%d", crtc->slave->id); + else + sprintf(str, "None"); + + DRM_INFO("crtc%d: master(pipe-%d) slave(%s) output: %s.\n", + kms->n_crtcs, master->id, str, + master->of_output_dev ? + master->of_output_dev->full_name : "None"); + + kms->n_crtcs++; + } + + return 0; +} + +static struct drm_plane * +get_crtc_primary(struct komeda_kms_dev *kms, struct komeda_crtc *crtc) +{ + struct komeda_plane *kplane; + struct drm_plane *plane; + + drm_for_each_plane(plane, &kms->base) { + if (plane->type != DRM_PLANE_TYPE_PRIMARY) + continue; + + kplane = to_kplane(plane); + /* only master can be primary */ + if (kplane->layer->base.pipeline == crtc->master) + return plane; + } + + return NULL; +} + +static int komeda_crtc_add(struct komeda_kms_dev *kms, + struct komeda_crtc *kcrtc) +{ + struct drm_crtc *crtc = &kcrtc->base; + int err; + + err = drm_crtc_init_with_planes(&kms->base, crtc, + get_crtc_primary(kms, kcrtc), NULL, + &komeda_crtc_funcs, NULL); + if (err) + return err; + + drm_crtc_helper_add(crtc, &komeda_crtc_helper_funcs); + drm_crtc_vblank_reset(crtc); + + crtc->port = kcrtc->master->of_output_port; + + return 0; +} + +int komeda_kms_add_crtcs(struct komeda_kms_dev *kms, struct komeda_dev *mdev) +{ + int i, err; + + for (i = 0; i < kms->n_crtcs; i++) { + err = komeda_crtc_add(kms, &kms->crtcs[i]); + if (err) + return err; + } + + return 0; +} diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c new file mode 100644 index 000000000000..70e9bb7fa30c --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c @@ -0,0 +1,190 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang + * + */ +#include +#include +#include +#include + +#include + +#include "komeda_dev.h" + +static int komeda_parse_pipe_dt(struct komeda_dev *mdev, struct device_node *np) +{ + struct komeda_pipeline *pipe; + struct clk *clk; + u32 pipe_id; + int ret = 0; + + ret = of_property_read_u32(np, "reg", &pipe_id); + if (ret != 0 || pipe_id >= mdev->n_pipelines) + return -EINVAL; + + pipe = mdev->pipelines[pipe_id]; + + clk = of_clk_get_by_name(np, "aclk"); + if (IS_ERR(clk)) { + DRM_ERROR("get aclk for pipeline %d failed!\n", pipe_id); + return PTR_ERR(clk); + } + pipe->aclk = clk; + + clk = of_clk_get_by_name(np, "pxclk"); + if (IS_ERR(clk)) { + DRM_ERROR("get pxclk for pipeline %d failed!\n", pipe_id); + return PTR_ERR(clk); + } + pipe->pxlclk = clk; + + /* enum ports */ + pipe->of_output_dev = + of_graph_get_remote_node(np, KOMEDA_OF_PORT_OUTPUT, 0); + pipe->of_output_port = + of_graph_get_port_by_id(np, KOMEDA_OF_PORT_OUTPUT); + + pipe->of_node = np; + + return 0; +} + +static int komeda_parse_dt(struct device *dev, struct komeda_dev *mdev) +{ + struct device_node *child, *np = dev->of_node; + struct clk *clk; + int ret; + + clk = devm_clk_get(dev, "mclk"); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + mdev->mclk = clk; + + for_each_available_child_of_node(np, child) { + if (of_node_cmp(child->name, "pipeline") == 0) { + ret = komeda_parse_pipe_dt(mdev, child); + if (ret) { + DRM_ERROR("parse pipeline dt error!\n"); + of_node_put(child); + break; + } + } + } + + return ret; +} + +struct komeda_dev *komeda_dev_create(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + const struct komeda_product_data *product; + struct komeda_dev *mdev; + struct resource *io_res; + int err = 0; + + product = of_device_get_match_data(dev); + if (!product) + return ERR_PTR(-ENODEV); + + io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!io_res) { + DRM_ERROR("No registers defined.\n"); + return ERR_PTR(-ENODEV); + } + + mdev = devm_kzalloc(dev, sizeof(*mdev), GFP_KERNEL); + if (!mdev) + return ERR_PTR(-ENOMEM); + + mdev->dev = dev; + mdev->reg_base = devm_ioremap_resource(dev, io_res); + if (IS_ERR(mdev->reg_base)) { + DRM_ERROR("Map register space failed.\n"); + err = PTR_ERR(mdev->reg_base); + mdev->reg_base = NULL; + goto err_cleanup; + } + + mdev->pclk = devm_clk_get(dev, "pclk"); + if (IS_ERR(mdev->pclk)) { + DRM_ERROR("Get APB clk failed.\n"); + err = PTR_ERR(mdev->pclk); + mdev->pclk = NULL; + goto err_cleanup; + } + + /* Enable APB clock to access the registers */ + clk_prepare_enable(mdev->pclk); + + mdev->funcs = product->identify(mdev->reg_base, &mdev->chip); + if (!komeda_product_match(mdev, product->product_id)) { + DRM_ERROR("DT configured %x mismatch with real HW %x.\n", + product->product_id, + MALIDP_CORE_ID_PRODUCT_ID(mdev->chip.core_id)); + err = -ENODEV; + goto err_cleanup; + } + + DRM_INFO("Found ARM Mali-D%x version r%dp%d\n", + MALIDP_CORE_ID_PRODUCT_ID(mdev->chip.core_id), + MALIDP_CORE_ID_MAJOR(mdev->chip.core_id), + MALIDP_CORE_ID_MINOR(mdev->chip.core_id)); + + mdev->funcs->init_format_table(mdev); + + err = mdev->funcs->enum_resources(mdev); + if (err) { + DRM_ERROR("enumerate display resource failed.\n"); + goto err_cleanup; + } + + err = komeda_parse_dt(dev, mdev); + if (err) { + DRM_ERROR("parse device tree failed.\n"); + goto err_cleanup; + } + + return mdev; + +err_cleanup: + komeda_dev_destroy(mdev); + return ERR_PTR(err); +} + +void komeda_dev_destroy(struct komeda_dev *mdev) +{ + struct device *dev = mdev->dev; + struct komeda_dev_funcs *funcs = mdev->funcs; + int i; + + for (i = 0; i < mdev->n_pipelines; i++) { + komeda_pipeline_destroy(mdev, mdev->pipelines[i]); + mdev->pipelines[i] = NULL; + } + + mdev->n_pipelines = 0; + + if (funcs && funcs->cleanup) + funcs->cleanup(mdev); + + if (mdev->reg_base) { + devm_iounmap(dev, mdev->reg_base); + mdev->reg_base = NULL; + } + + if (mdev->mclk) { + devm_clk_put(dev, mdev->mclk); + mdev->mclk = NULL; + } + + if (mdev->pclk) { + clk_disable_unprepare(mdev->pclk); + devm_clk_put(dev, mdev->pclk); + mdev->pclk = NULL; + } + + devm_kfree(dev, mdev); +} diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.h b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h new file mode 100644 index 000000000000..0f77dead6a23 --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang + * + */ +#ifndef _KOMEDA_DEV_H_ +#define _KOMEDA_DEV_H_ + +#include +#include +#include "komeda_pipeline.h" +#include "malidp_product.h" +#include "komeda_format_caps.h" + +/* malidp device id */ +enum { + MALI_D71 = 0, +}; + +/* pipeline DT ports */ +enum { + KOMEDA_OF_PORT_OUTPUT = 0, + KOMEDA_OF_PORT_COPROC = 1, +}; + +struct komeda_chip_info { + u32 arch_id; + u32 core_id; + u32 core_info; + u32 bus_width; +}; + +struct komeda_product_data { + u32 product_id; + struct komeda_dev_funcs *(*identify)(u32 __iomem *reg, + struct komeda_chip_info *info); +}; + +struct komeda_dev; + +/** + * struct komeda_dev_funcs + * + * Supplied by chip level and returned by the chip entry function xxx_identify, + */ +struct komeda_dev_funcs { + /** + * @init_format_table: + * + * initialize &komeda_dev->format_table, this function should be called + * before the &enum_resource + */ + void (*init_format_table)(struct komeda_dev *mdev); + /** + * @enum_resources: + * + * for CHIP to report or add pipeline and component resources to CORE + */ + int (*enum_resources)(struct komeda_dev *mdev); + /** @cleanup: call to chip to cleanup komeda_dev->chip data */ + void (*cleanup)(struct komeda_dev *mdev); +}; + +/** + * struct komeda_dev + * + * Pipeline and component are used to describe how to handle the pixel data. + * komeda_device is for describing the whole view of the device, and the + * control-abilites of device. + */ +struct komeda_dev { + struct device *dev; + u32 __iomem *reg_base; + + struct komeda_chip_info chip; + /** @fmt_tbl: initialized by &komeda_dev_funcs->init_format_table */ + struct komeda_format_caps_table fmt_tbl; + /** @pclk: APB clock for register access */ + struct clk *pclk; + /** @mck: HW main engine clk */ + struct clk *mclk; + + int n_pipelines; + struct komeda_pipeline *pipelines[KOMEDA_MAX_PIPELINES]; + + /** @funcs: chip funcs to access to HW */ + struct komeda_dev_funcs *funcs; + /** + * @chip_data: + * + * chip data will be added by &komeda_dev_funcs.enum_resources() and + * destroyed by &komeda_dev_funcs.cleanup() + */ + void *chip_data; +}; + +static inline bool +komeda_product_match(struct komeda_dev *mdev, u32 target) +{ + return MALIDP_CORE_ID_PRODUCT_ID(mdev->chip.core_id) == target; +} + +struct komeda_dev_funcs * +d71_identify(u32 __iomem *reg, struct komeda_chip_info *chip); + +struct komeda_dev *komeda_dev_create(struct device *dev); +void komeda_dev_destroy(struct komeda_dev *mdev); + +#endif /*_KOMEDA_DEV_H_*/ diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c new file mode 100644 index 000000000000..2bdd189b041d --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c @@ -0,0 +1,144 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang + * + */ +#include +#include +#include +#include +#include +#include "komeda_dev.h" +#include "komeda_kms.h" + +struct komeda_drv { + struct komeda_dev *mdev; + struct komeda_kms_dev *kms; +}; + +static void komeda_unbind(struct device *dev) +{ + struct komeda_drv *mdrv = dev_get_drvdata(dev); + + if (!mdrv) + return; + + komeda_kms_detach(mdrv->kms); + komeda_dev_destroy(mdrv->mdev); + + dev_set_drvdata(dev, NULL); + devm_kfree(dev, mdrv); +} + +static int komeda_bind(struct device *dev) +{ + struct komeda_drv *mdrv; + int err; + + mdrv = devm_kzalloc(dev, sizeof(*mdrv), GFP_KERNEL); + if (!mdrv) + return -ENOMEM; + + mdrv->mdev = komeda_dev_create(dev); + if (IS_ERR(mdrv->mdev)) { + err = PTR_ERR(mdrv->mdev); + goto free_mdrv; + } + + mdrv->kms = komeda_kms_attach(mdrv->mdev); + if (IS_ERR(mdrv->kms)) { + err = PTR_ERR(mdrv->kms); + goto destroy_mdev; + } + + dev_set_drvdata(dev, mdrv); + + return 0; + +destroy_mdev: + komeda_dev_destroy(mdrv->mdev); + +free_mdrv: + devm_kfree(dev, mdrv); + return err; +} + +static const struct component_master_ops komeda_master_ops = { + .bind = komeda_bind, + .unbind = komeda_unbind, +}; + +static int compare_of(struct device *dev, void *data) +{ + return dev->of_node == data; +} + +static void komeda_add_slave(struct device *master, + struct component_match **match, + struct device_node *np, int port) +{ + struct device_node *remote; + + remote = of_graph_get_remote_node(np, port, 0); + if (remote) { + drm_of_component_match_add(master, match, compare_of, remote); + of_node_put(remote); + } +} + +static int komeda_platform_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct component_match *match = NULL; + struct device_node *child; + + if (!dev->of_node) + return -ENODEV; + + for_each_available_child_of_node(dev->of_node, child) { + if (of_node_cmp(child->name, "pipeline") != 0) + continue; + + /* add connector */ + komeda_add_slave(dev, &match, child, KOMEDA_OF_PORT_OUTPUT); + } + + return component_master_add_with_match(dev, &komeda_master_ops, match); +} + +static int komeda_platform_remove(struct platform_device *pdev) +{ + component_master_del(&pdev->dev, &komeda_master_ops); + return 0; +} + +static const struct komeda_product_data komeda_products[] = { + [MALI_D71] = { + .product_id = MALIDP_D71_PRODUCT_ID, + .identify = d71_identify, + }, +}; + +const struct of_device_id komeda_of_match[] = { + { .compatible = "arm,mali-d71", .data = &komeda_products[MALI_D71], }, + {}, +}; + +MODULE_DEVICE_TABLE(of, komeda_of_match); + +static struct platform_driver komeda_platform_driver = { + .probe = komeda_platform_probe, + .remove = komeda_platform_remove, + .driver = { + .name = "komeda", + .of_match_table = komeda_of_match, + .pm = NULL, + }, +}; + +module_platform_driver(komeda_platform_driver); + +MODULE_AUTHOR("James.Qian.Wang "); +MODULE_DESCRIPTION("Komeda KMS driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c new file mode 100644 index 000000000000..1e17bd6107a4 --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.c @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang + * + */ + +#include +#include "komeda_format_caps.h" +#include "malidp_utils.h" + +const struct komeda_format_caps * +komeda_get_format_caps(struct komeda_format_caps_table *table, + u32 fourcc, u64 modifier) +{ + const struct komeda_format_caps *caps; + u64 afbc_features = modifier & ~(AFBC_FORMAT_MOD_BLOCK_SIZE_MASK); + u32 afbc_layout = modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK; + int id; + + for (id = 0; id < table->n_formats; id++) { + caps = &table->format_caps[id]; + + if (fourcc != caps->fourcc) + continue; + + if ((modifier == 0ULL) && (caps->supported_afbc_layouts == 0)) + return caps; + + if (has_bits(afbc_features, caps->supported_afbc_features) && + has_bit(afbc_layout, caps->supported_afbc_layouts)) + return caps; + } + + return NULL; +} + +u32 *komeda_get_layer_fourcc_list(struct komeda_format_caps_table *table, + u32 layer_type, u32 *n_fmts) +{ + const struct komeda_format_caps *cap; + u32 *fmts; + int i, j, n = 0; + + fmts = kcalloc(table->n_formats, sizeof(u32), GFP_KERNEL); + if (!fmts) + return NULL; + + for (i = 0; i < table->n_formats; i++) { + cap = &table->format_caps[i]; + if (!(layer_type & cap->supported_layer_types) || + (cap->fourcc == 0)) + continue; + + /* one fourcc may has two caps items in table (afbc/none-afbc), + * so check the existing list to avoid adding a duplicated one. + */ + for (j = n - 1; j >= 0; j--) + if (fmts[j] == cap->fourcc) + break; + + if (j < 0) + fmts[n++] = cap->fourcc; + } + + if (n_fmts) + *n_fmts = n; + + return fmts; +} + +void komeda_put_fourcc_list(u32 *fourcc_list) +{ + kfree(fourcc_list); +} diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h new file mode 100644 index 000000000000..60f39e77b098 --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang + * + */ + +#ifndef _KOMEDA_FORMAT_CAPS_H_ +#define _KOMEDA_FORMAT_CAPS_H_ + +#include +#include +#include + +#define AFBC(x) DRM_FORMAT_MOD_ARM_AFBC(x) + +/* afbc layerout */ +#define AFBC_16x16(x) AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 | (x)) +#define AFBC_32x8(x) AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 | (x)) + +/* afbc features */ +#define _YTR AFBC_FORMAT_MOD_YTR +#define _SPLIT AFBC_FORMAT_MOD_SPLIT +#define _SPARSE AFBC_FORMAT_MOD_SPARSE +#define _CBR AFBC_FORMAT_MOD_CBR +#define _TILED AFBC_FORMAT_MOD_TILED +#define _SC AFBC_FORMAT_MOD_SC + +/* layer_type */ +#define KOMEDA_FMT_RICH_LAYER BIT(0) +#define KOMEDA_FMT_SIMPLE_LAYER BIT(1) +#define KOMEDA_FMT_WB_LAYER BIT(2) + +#define AFBC_TH_LAYOUT_ALIGNMENT 8 +#define AFBC_HEADER_SIZE 16 +#define AFBC_SUPERBLK_ALIGNMENT 128 +#define AFBC_SUPERBLK_PIXELS 256 +#define AFBC_BODY_START_ALIGNMENT 1024 +#define AFBC_TH_BODY_START_ALIGNMENT 4096 + +/** + * struct komeda_format_caps + * + * komeda_format_caps is for describing ARM display specific features and + * limitations for a specific format, and format_caps will be linked into + * &komeda_framebuffer like a extension of &drm_format_info. + * + * NOTE: one fourcc may has two different format_caps items for fourcc and + * fourcc+modifier + * + * @hw_id: hw format id, hw specific value. + * @fourcc: drm fourcc format. + * @tile_size: format tiled size, used by ARM format X0L0/X0L2 + * @supported_layer_types: indicate which layer supports this format + * @supported_rots: allowed rotations for this format + * @supported_afbc_layouts: supported afbc layerout + * @supported_afbc_features: supported afbc features + */ +struct komeda_format_caps { + u32 hw_id; + u32 fourcc; + u32 tile_size; + u32 supported_layer_types; + u32 supported_rots; + u32 supported_afbc_layouts; + u64 supported_afbc_features; +}; + +/** + * struct komeda_format_caps_table - format_caps mananger + * + * @n_formats: the size of format_caps list. + * @format_caps: format_caps list. + */ +struct komeda_format_caps_table { + u32 n_formats; + const struct komeda_format_caps *format_caps; +}; + +const struct komeda_format_caps * +komeda_get_format_caps(struct komeda_format_caps_table *table, + u32 fourcc, u64 modifier); + +u32 *komeda_get_layer_fourcc_list(struct komeda_format_caps_table *table, + u32 layer_type, u32 *n_fmts); + +void komeda_put_fourcc_list(u32 *fourcc_list); + +#endif diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c new file mode 100644 index 000000000000..9cc9935024f7 --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c @@ -0,0 +1,167 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang + * + */ +#include +#include +#include +#include +#include + +#include "komeda_framebuffer.h" +#include "komeda_dev.h" + +static void komeda_fb_destroy(struct drm_framebuffer *fb) +{ + struct komeda_fb *kfb = to_kfb(fb); + u32 i; + + for (i = 0; i < fb->format->num_planes; i++) + drm_gem_object_put_unlocked(fb->obj[i]); + + drm_framebuffer_cleanup(fb); + kfree(kfb); +} + +static int komeda_fb_create_handle(struct drm_framebuffer *fb, + struct drm_file *file, u32 *handle) +{ + return drm_gem_handle_create(file, fb->obj[0], handle); +} + +static const struct drm_framebuffer_funcs komeda_fb_funcs = { + .destroy = komeda_fb_destroy, + .create_handle = komeda_fb_create_handle, +}; + +static int +komeda_fb_none_afbc_size_check(struct komeda_dev *mdev, struct komeda_fb *kfb, + struct drm_file *file, + const struct drm_mode_fb_cmd2 *mode_cmd) +{ + struct drm_framebuffer *fb = &kfb->base; + struct drm_gem_object *obj; + u32 min_size = 0; + u32 i; + + for (i = 0; i < fb->format->num_planes; i++) { + obj = drm_gem_object_lookup(file, mode_cmd->handles[i]); + if (!obj) { + DRM_DEBUG_KMS("Failed to lookup GEM object\n"); + fb->obj[i] = NULL; + + return -ENOENT; + } + + kfb->aligned_w = fb->width / (i ? fb->format->hsub : 1); + kfb->aligned_h = fb->height / (i ? fb->format->vsub : 1); + + if (fb->pitches[i] % mdev->chip.bus_width) { + DRM_DEBUG_KMS("Pitch[%d]: 0x%x doesn't align to 0x%x\n", + i, fb->pitches[i], mdev->chip.bus_width); + drm_gem_object_put_unlocked(obj); + fb->obj[i] = NULL; + + return -EINVAL; + } + + min_size = ((kfb->aligned_h / kfb->format_caps->tile_size - 1) + * fb->pitches[i]) + + (kfb->aligned_w * fb->format->cpp[i] + * kfb->format_caps->tile_size) + + fb->offsets[i]; + + if (obj->size < min_size) { + DRM_DEBUG_KMS("Fail to check none afbc fb size.\n"); + drm_gem_object_put_unlocked(obj); + fb->obj[i] = NULL; + + return -EINVAL; + } + + fb->obj[i] = obj; + } + + if (fb->format->num_planes == 3) { + if (fb->pitches[1] != fb->pitches[2]) { + DRM_DEBUG_KMS("The pitch[1] and [2] are not same\n"); + return -EINVAL; + } + } + + return 0; +} + +struct drm_framebuffer * +komeda_fb_create(struct drm_device *dev, struct drm_file *file, + const struct drm_mode_fb_cmd2 *mode_cmd) +{ + struct komeda_dev *mdev = dev->dev_private; + struct komeda_fb *kfb; + int ret = 0, i; + + kfb = kzalloc(sizeof(*kfb), GFP_KERNEL); + if (!kfb) + return ERR_PTR(-ENOMEM); + + kfb->format_caps = komeda_get_format_caps(&mdev->fmt_tbl, + mode_cmd->pixel_format, + mode_cmd->modifier[0]); + if (!kfb->format_caps) { + DRM_DEBUG_KMS("FMT %x is not supported.\n", + mode_cmd->pixel_format); + kfree(kfb); + return ERR_PTR(-EINVAL); + } + + drm_helper_mode_fill_fb_struct(dev, &kfb->base, mode_cmd); + + ret = komeda_fb_none_afbc_size_check(mdev, kfb, file, mode_cmd); + if (ret < 0) + goto err_cleanup; + + ret = drm_framebuffer_init(dev, &kfb->base, &komeda_fb_funcs); + if (ret < 0) { + DRM_DEBUG_KMS("failed to initialize fb\n"); + + goto err_cleanup; + } + + return &kfb->base; + +err_cleanup: + for (i = 0; i < kfb->base.format->num_planes; i++) + drm_gem_object_put_unlocked(kfb->base.obj[i]); + + kfree(kfb); + return ERR_PTR(ret); +} + +dma_addr_t +komeda_fb_get_pixel_addr(struct komeda_fb *kfb, int x, int y, int plane) +{ + struct drm_framebuffer *fb = &kfb->base; + const struct drm_gem_cma_object *obj; + u32 plane_x, plane_y, cpp, pitch, offset; + + if (plane >= fb->format->num_planes) { + DRM_DEBUG_KMS("Out of max plane num.\n"); + return -EINVAL; + } + + obj = drm_fb_cma_get_gem_obj(fb, plane); + + offset = fb->offsets[plane]; + if (!fb->modifier) { + plane_x = x / (plane ? fb->format->hsub : 1); + plane_y = y / (plane ? fb->format->vsub : 1); + cpp = fb->format->cpp[plane]; + pitch = fb->pitches[plane]; + offset += plane_x * cpp * kfb->format_caps->tile_size + + (plane_y * pitch) / kfb->format_caps->tile_size; + } + + return obj->paddr + offset; +} diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h new file mode 100644 index 000000000000..0de2e4a2afd2 --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang + * + */ +#ifndef _KOMEDA_FRAMEBUFFER_H_ +#define _KOMEDA_FRAMEBUFFER_H_ + +#include +#include "komeda_format_caps.h" + +/** struct komeda_fb - entend drm_framebuffer with komeda attribute */ +struct komeda_fb { + /** @base: &drm_framebuffer */ + struct drm_framebuffer base; + /* @format_caps: &komeda_format_caps */ + const struct komeda_format_caps *format_caps; + /** @aligned_w: aligned frame buffer width */ + u32 aligned_w; + /** @aligned_h: aligned frame buffer height */ + u32 aligned_h; +}; + +#define to_kfb(dfb) container_of(dfb, struct komeda_fb, base) + +struct drm_framebuffer * +komeda_fb_create(struct drm_device *dev, struct drm_file *file, + const struct drm_mode_fb_cmd2 *mode_cmd); +dma_addr_t +komeda_fb_get_pixel_addr(struct komeda_fb *kfb, int x, int y, int plane); +bool komeda_fb_is_layer_supported(struct komeda_fb *kfb, u32 layer_type); + +#endif diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c new file mode 100644 index 000000000000..47a58ab20434 --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang + * + */ +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "komeda_dev.h" +#include "komeda_framebuffer.h" +#include "komeda_kms.h" + +DEFINE_DRM_GEM_CMA_FOPS(komeda_cma_fops); + +static int komeda_gem_cma_dumb_create(struct drm_file *file, + struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + u32 alignment = 16; /* TODO get alignment from dev */ + + args->pitch = ALIGN(DIV_ROUND_UP(args->width * args->bpp, 8), + alignment); + + return drm_gem_cma_dumb_create_internal(file, dev, args); +} + +static struct drm_driver komeda_kms_driver = { + .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC | + DRIVER_PRIME, + .lastclose = drm_fb_helper_lastclose, + .gem_free_object_unlocked = drm_gem_cma_free_object, + .gem_vm_ops = &drm_gem_cma_vm_ops, + .dumb_create = komeda_gem_cma_dumb_create, + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_export = drm_gem_prime_export, + .gem_prime_import = drm_gem_prime_import, + .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, + .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, + .gem_prime_vmap = drm_gem_cma_prime_vmap, + .gem_prime_vunmap = drm_gem_cma_prime_vunmap, + .gem_prime_mmap = drm_gem_cma_prime_mmap, + .fops = &komeda_cma_fops, + .name = "komeda", + .desc = "Arm Komeda Display Processor driver", + .date = "20181101", + .major = 0, + .minor = 1, +}; + +static void komeda_kms_commit_tail(struct drm_atomic_state *old_state) +{ + struct drm_device *dev = old_state->dev; + + drm_atomic_helper_commit_modeset_disables(dev, old_state); + + drm_atomic_helper_commit_planes(dev, old_state, 0); + + drm_atomic_helper_commit_modeset_enables(dev, old_state); + + drm_atomic_helper_wait_for_flip_done(dev, old_state); + + drm_atomic_helper_commit_hw_done(old_state); + + drm_atomic_helper_cleanup_planes(dev, old_state); +} + +static const struct drm_mode_config_helper_funcs komeda_mode_config_helpers = { + .atomic_commit_tail = komeda_kms_commit_tail, +}; + +static const struct drm_mode_config_funcs komeda_mode_config_funcs = { + .fb_create = komeda_fb_create, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static void komeda_kms_mode_config_init(struct komeda_kms_dev *kms, + struct komeda_dev *mdev) +{ + struct drm_mode_config *config = &kms->base.mode_config; + + drm_mode_config_init(&kms->base); + + komeda_kms_setup_crtcs(kms, mdev); + + /* Get value from dev */ + config->min_width = 0; + config->min_height = 0; + config->max_width = 4096; + config->max_height = 4096; + config->allow_fb_modifiers = false; + + config->funcs = &komeda_mode_config_funcs; + config->helper_private = &komeda_mode_config_helpers; +} + +struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev) +{ + struct komeda_kms_dev *kms = kzalloc(sizeof(*kms), GFP_KERNEL); + struct drm_device *drm; + int err; + + if (!kms) + return ERR_PTR(-ENOMEM); + + drm = &kms->base; + err = drm_dev_init(drm, &komeda_kms_driver, mdev->dev); + if (err) + goto free_kms; + + drm->dev_private = mdev; + + komeda_kms_mode_config_init(kms, mdev); + + err = komeda_kms_add_private_objs(kms, mdev); + if (err) + goto cleanup_mode_config; + + err = komeda_kms_add_planes(kms, mdev); + if (err) + goto cleanup_mode_config; + + err = drm_vblank_init(drm, kms->n_crtcs); + if (err) + goto cleanup_mode_config; + + err = komeda_kms_add_crtcs(kms, mdev); + if (err) + goto cleanup_mode_config; + + err = component_bind_all(mdev->dev, kms); + if (err) + goto cleanup_mode_config; + + drm_mode_config_reset(drm); + + err = drm_dev_register(drm, 0); + if (err) + goto cleanup_mode_config; + + return kms; + +cleanup_mode_config: + drm_mode_config_cleanup(drm); +free_kms: + kfree(kms); + return ERR_PTR(err); +} + +void komeda_kms_detach(struct komeda_kms_dev *kms) +{ + struct drm_device *drm = &kms->base; + struct komeda_dev *mdev = drm->dev_private; + + drm_dev_unregister(drm); + component_unbind_all(mdev->dev, drm); + komeda_kms_cleanup_private_objs(mdev); + drm_mode_config_cleanup(drm); + drm->dev_private = NULL; + drm_dev_put(drm); +} diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h new file mode 100644 index 000000000000..874e9c9f0749 --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang + * + */ +#ifndef _KOMEDA_KMS_H_ +#define _KOMEDA_KMS_H_ + +#include +#include +#include +#include +#include + +/** struct komeda_plane - komeda instance of drm_plane */ +struct komeda_plane { + /** @base: &drm_plane */ + struct drm_plane base; + /** + * @layer: + * + * represents available layer input pipelines for this plane. + * + * NOTE: + * the layer is not for a specific Layer, but indicate a group of + * Layers with same capabilities. + */ + struct komeda_layer *layer; +}; + +/** + * struct komeda_plane_state + * + * The plane_state can be split into two data flow (left/right) and handled + * by two layers &komeda_plane.layer and &komeda_plane.layer.right + */ +struct komeda_plane_state { + /** @base: &drm_plane_state */ + struct drm_plane_state base; + + /* private properties */ +}; + +/** + * struct komeda_wb_connector + */ +struct komeda_wb_connector { + /** @base: &drm_writeback_connector */ + struct drm_writeback_connector base; + + /** @wb_layer: represents associated writeback pipeline of komeda */ + struct komeda_layer *wb_layer; +}; + +/** + * struct komeda_crtc + */ +struct komeda_crtc { + /** @base: &drm_crtc */ + struct drm_crtc base; + /** @master: only master has display output */ + struct komeda_pipeline *master; + /** + * @slave: optional + * + * Doesn't have its own display output, the handled data flow will + * merge into the master. + */ + struct komeda_pipeline *slave; +}; + +/** struct komeda_crtc_state */ +struct komeda_crtc_state { + /** @base: &drm_crtc_state */ + struct drm_crtc_state base; + + /* private properties */ + + /* computed state which are used by validate/check */ + u32 affected_pipes; + u32 active_pipes; +}; + +/** struct komeda_kms_dev - for gather KMS related things */ +struct komeda_kms_dev { + /** @base: &drm_device */ + struct drm_device base; + + /** @n_crtcs: valid numbers of crtcs in &komeda_kms_dev.crtcs */ + int n_crtcs; + /** @crtcs: crtcs list */ + struct komeda_crtc crtcs[KOMEDA_MAX_PIPELINES]; +}; + +#define to_kplane(p) container_of(p, struct komeda_plane, base) +#define to_kplane_st(p) container_of(p, struct komeda_plane_state, base) +#define to_kconn(p) container_of(p, struct komeda_wb_connector, base) +#define to_kcrtc(p) container_of(p, struct komeda_crtc, base) +#define to_kcrtc_st(p) container_of(p, struct komeda_crtc_state, base) +#define to_kdev(p) container_of(p, struct komeda_kms_dev, base) + +int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms, struct komeda_dev *mdev); + +int komeda_kms_add_crtcs(struct komeda_kms_dev *kms, struct komeda_dev *mdev); +int komeda_kms_add_planes(struct komeda_kms_dev *kms, struct komeda_dev *mdev); +int komeda_kms_add_private_objs(struct komeda_kms_dev *kms, + struct komeda_dev *mdev); +void komeda_kms_cleanup_private_objs(struct komeda_dev *mdev); + +struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev); +void komeda_kms_detach(struct komeda_kms_dev *kms); + +#endif /*_KOMEDA_KMS_H_*/ diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c new file mode 100644 index 000000000000..f1908e9ef128 --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang + * + */ +#include + +#include "komeda_dev.h" +#include "komeda_pipeline.h" + +/** komeda_pipeline_add - Add a pipeline to &komeda_dev */ +struct komeda_pipeline * +komeda_pipeline_add(struct komeda_dev *mdev, size_t size, + struct komeda_pipeline_funcs *funcs) +{ + struct komeda_pipeline *pipe; + + if (mdev->n_pipelines + 1 > KOMEDA_MAX_PIPELINES) { + DRM_ERROR("Exceed max support %d pipelines.\n", + KOMEDA_MAX_PIPELINES); + return NULL; + } + + if (size < sizeof(*pipe)) { + DRM_ERROR("Request pipeline size too small.\n"); + return NULL; + } + + pipe = devm_kzalloc(mdev->dev, size, GFP_KERNEL); + if (!pipe) + return NULL; + + pipe->mdev = mdev; + pipe->id = mdev->n_pipelines; + pipe->funcs = funcs; + + mdev->pipelines[mdev->n_pipelines] = pipe; + mdev->n_pipelines++; + + return pipe; +} + +void komeda_pipeline_destroy(struct komeda_dev *mdev, + struct komeda_pipeline *pipe) +{ + struct komeda_component *c; + int i; + + dp_for_each_set_bit(i, pipe->avail_comps) { + c = komeda_pipeline_get_component(pipe, i); + komeda_component_destroy(mdev, c); + } + + clk_put(pipe->pxlclk); + clk_put(pipe->aclk); + + of_node_put(pipe->of_output_dev); + of_node_put(pipe->of_output_port); + of_node_put(pipe->of_node); + + devm_kfree(mdev->dev, pipe); +} + +struct komeda_component ** +komeda_pipeline_get_component_pos(struct komeda_pipeline *pipe, int id) +{ + struct komeda_dev *mdev = pipe->mdev; + struct komeda_pipeline *temp = NULL; + struct komeda_component **pos = NULL; + + switch (id) { + case KOMEDA_COMPONENT_LAYER0: + case KOMEDA_COMPONENT_LAYER1: + case KOMEDA_COMPONENT_LAYER2: + case KOMEDA_COMPONENT_LAYER3: + pos = to_cpos(pipe->layers[id - KOMEDA_COMPONENT_LAYER0]); + break; + case KOMEDA_COMPONENT_WB_LAYER: + pos = to_cpos(pipe->wb_layer); + break; + case KOMEDA_COMPONENT_COMPIZ0: + case KOMEDA_COMPONENT_COMPIZ1: + temp = mdev->pipelines[id - KOMEDA_COMPONENT_COMPIZ0]; + if (!temp) { + DRM_ERROR("compiz-%d doesn't exist.\n", id); + return NULL; + } + pos = to_cpos(temp->compiz); + break; + case KOMEDA_COMPONENT_SCALER0: + case KOMEDA_COMPONENT_SCALER1: + pos = to_cpos(pipe->scalers[id - KOMEDA_COMPONENT_SCALER0]); + break; + case KOMEDA_COMPONENT_IPS0: + case KOMEDA_COMPONENT_IPS1: + temp = mdev->pipelines[id - KOMEDA_COMPONENT_IPS0]; + if (!temp) { + DRM_ERROR("ips-%d doesn't exist.\n", id); + return NULL; + } + pos = to_cpos(temp->improc); + break; + case KOMEDA_COMPONENT_TIMING_CTRLR: + pos = to_cpos(pipe->ctrlr); + break; + default: + pos = NULL; + DRM_ERROR("Unknown pipeline resource ID: %d.\n", id); + break; + } + + return pos; +} + +struct komeda_component * +komeda_pipeline_get_component(struct komeda_pipeline *pipe, int id) +{ + struct komeda_component **pos = NULL; + struct komeda_component *c = NULL; + + pos = komeda_pipeline_get_component_pos(pipe, id); + if (pos) + c = *pos; + + return c; +} + +/** komeda_component_add - Add a component to &komeda_pipeline */ +struct komeda_component * +komeda_component_add(struct komeda_pipeline *pipe, + size_t comp_sz, u32 id, u32 hw_id, + struct komeda_component_funcs *funcs, + u8 max_active_inputs, u32 supported_inputs, + u8 max_active_outputs, u32 __iomem *reg, + const char *name_fmt, ...) +{ + struct komeda_component **pos; + struct komeda_component *c; + int idx, *num = NULL; + + if (max_active_inputs > KOMEDA_COMPONENT_N_INPUTS) { + WARN(1, "please large KOMEDA_COMPONENT_N_INPUTS to %d.\n", + max_active_inputs); + return NULL; + } + + pos = komeda_pipeline_get_component_pos(pipe, id); + if (!pos || (*pos)) + return NULL; + + if (has_bit(id, KOMEDA_PIPELINE_LAYERS)) { + idx = id - KOMEDA_COMPONENT_LAYER0; + num = &pipe->n_layers; + if (idx != pipe->n_layers) { + DRM_ERROR("please add Layer by id sequence.\n"); + return NULL; + } + } else if (has_bit(id, KOMEDA_PIPELINE_SCALERS)) { + idx = id - KOMEDA_COMPONENT_SCALER0; + num = &pipe->n_scalers; + if (idx != pipe->n_scalers) { + DRM_ERROR("please add Scaler by id sequence.\n"); + return NULL; + } + } + + c = devm_kzalloc(pipe->mdev->dev, comp_sz, GFP_KERNEL); + if (!c) + return NULL; + + c->id = id; + c->hw_id = hw_id; + c->reg = reg; + c->pipeline = pipe; + c->max_active_inputs = max_active_inputs; + c->max_active_outputs = max_active_outputs; + c->supported_inputs = supported_inputs; + c->funcs = funcs; + + if (name_fmt) { + va_list args; + + va_start(args, name_fmt); + vsnprintf(c->name, sizeof(c->name), name_fmt, args); + va_end(args); + } + + if (num) + *num = *num + 1; + + pipe->avail_comps |= BIT(c->id); + *pos = c; + + return c; +} + +void komeda_component_destroy(struct komeda_dev *mdev, + struct komeda_component *c) +{ + devm_kfree(mdev->dev, c); +} diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h new file mode 100644 index 000000000000..8c950bc8ae96 --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h @@ -0,0 +1,359 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang + * + */ +#ifndef _KOMEDA_PIPELINE_H_ +#define _KOMEDA_PIPELINE_H_ + +#include +#include +#include +#include "malidp_utils.h" + +#define KOMEDA_MAX_PIPELINES 2 +#define KOMEDA_PIPELINE_MAX_LAYERS 4 +#define KOMEDA_PIPELINE_MAX_SCALERS 2 +#define KOMEDA_COMPONENT_N_INPUTS 5 + +/* pipeline component IDs */ +enum { + KOMEDA_COMPONENT_LAYER0 = 0, + KOMEDA_COMPONENT_LAYER1 = 1, + KOMEDA_COMPONENT_LAYER2 = 2, + KOMEDA_COMPONENT_LAYER3 = 3, + KOMEDA_COMPONENT_WB_LAYER = 7, /* write back layer */ + KOMEDA_COMPONENT_SCALER0 = 8, + KOMEDA_COMPONENT_SCALER1 = 9, + KOMEDA_COMPONENT_SPLITTER = 12, + KOMEDA_COMPONENT_MERGER = 14, + KOMEDA_COMPONENT_COMPIZ0 = 16, /* compositor */ + KOMEDA_COMPONENT_COMPIZ1 = 17, + KOMEDA_COMPONENT_IPS0 = 20, /* post image processor */ + KOMEDA_COMPONENT_IPS1 = 21, + KOMEDA_COMPONENT_TIMING_CTRLR = 22, /* timing controller */ +}; + +#define KOMEDA_PIPELINE_LAYERS (BIT(KOMEDA_COMPONENT_LAYER0) |\ + BIT(KOMEDA_COMPONENT_LAYER1) |\ + BIT(KOMEDA_COMPONENT_LAYER2) |\ + BIT(KOMEDA_COMPONENT_LAYER3)) + +#define KOMEDA_PIPELINE_SCALERS (BIT(KOMEDA_COMPONENT_SCALER0) |\ + BIT(KOMEDA_COMPONENT_SCALER1)) + +#define KOMEDA_PIPELINE_COMPIZS (BIT(KOMEDA_COMPONENT_COMPIZ0) |\ + BIT(KOMEDA_COMPONENT_COMPIZ1)) + +#define KOMEDA_PIPELINE_IMPROCS (BIT(KOMEDA_COMPONENT_IPS0) |\ + BIT(KOMEDA_COMPONENT_IPS1)) +struct komeda_component; +struct komeda_component_state; + +/** komeda_component_funcs - component control functions */ +struct komeda_component_funcs { + /** @validate: optional, + * component may has special requirements or limitations, this function + * supply HW the ability to do the further HW specific check. + */ + int (*validate)(struct komeda_component *c, + struct komeda_component_state *state); + /** @update: update is a active update */ + void (*update)(struct komeda_component *c, + struct komeda_component_state *state); + /** @disable: disable component */ + void (*disable)(struct komeda_component *c); + /** @dump_register: Optional, dump registers to seq_file */ + void (*dump_register)(struct komeda_component *c, struct seq_file *seq); +}; + +/** + * struct komeda_component + * + * struct komeda_component describe the data flow capabilities for how to link a + * component into the display pipeline. + * all specified components are subclass of this structure. + */ +struct komeda_component { + /** @obj: treat component as private obj */ + struct drm_private_obj obj; + /** @pipeline: the komeda pipeline this component belongs to */ + struct komeda_pipeline *pipeline; + /** @name: component name */ + char name[32]; + /** + * @reg: + * component register base, + * which is initialized by chip and used by chip only + */ + u32 __iomem *reg; + /** @id: component id */ + u32 id; + /** @hw_ic: component hw id, + * which is initialized by chip and used by chip only + */ + u32 hw_id; + + /** + * @max_active_inputs: + * @max_active_outpus: + * + * maximum number of inputs/outputs that can be active in the same time + * Note: + * the number isn't the bit number of @supported_inputs or + * @supported_outputs, but may be less than it, since component may not + * support enabling all @supported_inputs/outputs at the same time. + */ + u8 max_active_inputs; + u8 max_active_outputs; + /** + * @supported_inputs: + * @supported_outputs: + * + * bitmask of BIT(component->id) for the supported inputs/outputs + * describes the possibilities of how a component is linked into a + * pipeline. + */ + u32 supported_inputs; + u32 supported_outputs; + + /** + * @funcs: chip functions to access HW + */ + struct komeda_component_funcs *funcs; +}; + +/** + * struct komeda_component_output + * + * a component has multiple outputs, if want to know where the data + * comes from, only know the component is not enough, we still need to know + * its output port + */ +struct komeda_component_output { + /** @component: indicate which component the data comes from */ + struct komeda_component *component; + /** @output_port: + * the output port of the &komeda_component_output.component + */ + u8 output_port; +}; + +/** + * struct komeda_component_state + * + * component_state is the data flow configuration of the component, and it's + * the superclass of all specific component_state like @komeda_layer_state, + * @komeda_scaler_state + */ +struct komeda_component_state { + /** @obj: tracking component_state by drm_atomic_state */ + struct drm_private_state obj; + struct komeda_component *component; + /** + * @binding_user: + * currently bound user, the user can be crtc/plane/wb_conn, which is + * valid decided by @component and @inputs + * + * - Layer: its user always is plane. + * - compiz/improc/timing_ctrlr: the user is crtc. + * - wb_layer: wb_conn; + * - scaler: plane when input is layer, wb_conn if input is compiz. + */ + union { + struct drm_crtc *crtc; + struct drm_plane *plane; + struct drm_connector *wb_conn; + void *binding_user; + }; + /** + * @active_inputs: + * + * active_inputs is bitmask of @inputs index + * + * - active_inputs = changed_active_inputs + unchanged_active_inputs + * - affected_inputs = old->active_inputs + new->active_inputs; + * - disabling_inputs = affected_inputs ^ active_inputs; + * - changed_inputs = disabling_inputs + changed_active_inputs; + * + * NOTE: + * changed_inputs doesn't include all active_input but only + * @changed_active_inputs, and this bitmask can be used in chip + * level for dirty update. + */ + u16 active_inputs; + u16 changed_active_inputs; + u16 affected_inputs; + /** + * @inputs: + * + * the specific inputs[i] only valid on BIT(i) has been set in + * @active_inputs, if not the inputs[i] is undefined. + */ + struct komeda_component_output inputs[KOMEDA_COMPONENT_N_INPUTS]; +}; + +static inline u16 component_disabling_inputs(struct komeda_component_state *st) +{ + return st->affected_inputs ^ st->active_inputs; +} + +static inline u16 component_changed_inputs(struct komeda_component_state *st) +{ + return component_disabling_inputs(st) | st->changed_active_inputs; +} + +#define to_comp(__c) (((__c) == NULL) ? NULL : &((__c)->base)) +#define to_cpos(__c) ((struct komeda_component **)&(__c)) + +/* these structures are going to be filled in in uture patches */ +struct komeda_layer { + struct komeda_component base; + /* layer specific features and caps */ + int layer_type; /* RICH, SIMPLE or WB */ +}; + +struct komeda_layer_state { + struct komeda_component_state base; + /* layer specific configuration state */ +}; + +struct komeda_compiz { + struct komeda_component base; + /* compiz specific features and caps */ +}; + +struct komeda_compiz_state { + struct komeda_component_state base; + /* compiz specific configuration state */ +}; + +struct komeda_scaler { + struct komeda_component base; + /* scaler features and caps */ +}; + +struct komeda_scaler_state { + struct komeda_component_state base; +}; + +struct komeda_improc { + struct komeda_component base; +}; + +struct komeda_improc_state { + struct komeda_component_state base; +}; + +/* display timing controller */ +struct komeda_timing_ctrlr { + struct komeda_component base; +}; + +struct komeda_timing_ctrlr_state { + struct komeda_component_state base; +}; + +/** struct komeda_pipeline_funcs */ +struct komeda_pipeline_funcs { + /* dump_register: Optional, dump registers to seq_file */ + void (*dump_register)(struct komeda_pipeline *pipe, + struct seq_file *sf); +}; + +/** + * struct komeda_pipeline + * + * Represent a complete display pipeline and hold all functional components. + */ +struct komeda_pipeline { + /** @obj: link pipeline as private obj of drm_atomic_state */ + struct drm_private_obj obj; + /** @mdev: the parent komeda_dev */ + struct komeda_dev *mdev; + /** @pxlclk: pixel clock */ + struct clk *pxlclk; + /** @aclk: AXI clock */ + struct clk *aclk; + /** @id: pipeline id */ + int id; + /** @avail_comps: available components mask of pipeline */ + u32 avail_comps; + int n_layers; + struct komeda_layer *layers[KOMEDA_PIPELINE_MAX_LAYERS]; + int n_scalers; + struct komeda_scaler *scalers[KOMEDA_PIPELINE_MAX_SCALERS]; + struct komeda_compiz *compiz; + struct komeda_layer *wb_layer; + struct komeda_improc *improc; + struct komeda_timing_ctrlr *ctrlr; + struct komeda_pipeline_funcs *funcs; /* private pipeline functions */ + + /** @of_node: pipeline dt node */ + struct device_node *of_node; + /** @of_output_port: pipeline output port */ + struct device_node *of_output_port; + /** @of_output_dev: output connector device node */ + struct device_node *of_output_dev; +}; + +/** + * struct komeda_pipeline_state + * + * NOTE: + * Unlike the pipeline, pipeline_state doesn’t gather any component_state + * into it. It because all component will be managed by drm_atomic_state. + */ +struct komeda_pipeline_state { + /** @obj: tracking pipeline_state by drm_atomic_state */ + struct drm_private_state obj; + struct komeda_pipeline *pipe; + /** @crtc: currently bound crtc */ + struct drm_crtc *crtc; + /** + * @active_comps: + * + * bitmask - BIT(component->id) of active components + */ + u32 active_comps; +}; + +#define to_layer(c) container_of(c, struct komeda_layer, base) +#define to_compiz(c) container_of(c, struct komeda_compiz, base) +#define to_scaler(c) container_of(c, struct komeda_scaler, base) +#define to_improc(c) container_of(c, struct komeda_improc, base) +#define to_ctrlr(c) container_of(c, struct komeda_timing_ctrlr, base) + +#define to_layer_st(c) container_of(c, struct komeda_layer_state, base) +#define to_compiz_st(c) container_of(c, struct komeda_compiz_state, base) +#define to_scaler_st(c) container_of(c, struct komeda_scaler_state, base) +#define to_improc_st(c) container_of(c, struct komeda_improc_state, base) +#define to_ctrlr_st(c) container_of(c, struct komeda_timing_ctrlr_state, base) + +#define priv_to_comp_st(o) container_of(o, struct komeda_component_state, obj) +#define priv_to_pipe_st(o) container_of(o, struct komeda_pipeline_state, obj) + +/* pipeline APIs */ +struct komeda_pipeline * +komeda_pipeline_add(struct komeda_dev *mdev, size_t size, + struct komeda_pipeline_funcs *funcs); +void komeda_pipeline_destroy(struct komeda_dev *mdev, + struct komeda_pipeline *pipe); + +struct komeda_component * +komeda_pipeline_get_component(struct komeda_pipeline *pipe, int id); + +/* component APIs */ +struct komeda_component * +komeda_component_add(struct komeda_pipeline *pipe, + size_t comp_sz, u32 id, u32 hw_id, + struct komeda_component_funcs *funcs, + u8 max_active_inputs, u32 supported_inputs, + u8 max_active_outputs, u32 __iomem *reg, + const char *name_fmt, ...); + +void komeda_component_destroy(struct komeda_dev *mdev, + struct komeda_component *c); + +#endif /* _KOMEDA_PIPELINE_H_*/ diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c new file mode 100644 index 000000000000..0a4953a9a909 --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang + * + */ +#include +#include +#include +#include "komeda_dev.h" +#include "komeda_kms.h" + +static const struct drm_plane_helper_funcs komeda_plane_helper_funcs = { +}; + +static void komeda_plane_destroy(struct drm_plane *plane) +{ + drm_plane_cleanup(plane); + + kfree(to_kplane(plane)); +} + +static const struct drm_plane_funcs komeda_plane_funcs = { +}; + +/* for komeda, which is pipeline can be share between crtcs */ +static u32 get_possible_crtcs(struct komeda_kms_dev *kms, + struct komeda_pipeline *pipe) +{ + struct komeda_crtc *crtc; + u32 possible_crtcs = 0; + int i; + + for (i = 0; i < kms->n_crtcs; i++) { + crtc = &kms->crtcs[i]; + + if ((pipe == crtc->master) || (pipe == crtc->slave)) + possible_crtcs |= BIT(i); + } + + return possible_crtcs; +} + +/* use Layer0 as primary */ +static u32 get_plane_type(struct komeda_kms_dev *kms, + struct komeda_component *c) +{ + bool is_primary = (c->id == KOMEDA_COMPONENT_LAYER0); + + return is_primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; +} + +static int komeda_plane_add(struct komeda_kms_dev *kms, + struct komeda_layer *layer) +{ + struct komeda_dev *mdev = kms->base.dev_private; + struct komeda_component *c = &layer->base; + struct komeda_plane *kplane; + struct drm_plane *plane; + u32 *formats, n_formats = 0; + int err; + + kplane = kzalloc(sizeof(*kplane), GFP_KERNEL); + if (!kplane) + return -ENOMEM; + + plane = &kplane->base; + kplane->layer = layer; + + formats = komeda_get_layer_fourcc_list(&mdev->fmt_tbl, + layer->layer_type, &n_formats); + + err = drm_universal_plane_init(&kms->base, plane, + get_possible_crtcs(kms, c->pipeline), + &komeda_plane_funcs, + formats, n_formats, NULL, + get_plane_type(kms, c), + "%s", c->name); + + komeda_put_fourcc_list(formats); + + if (err) + goto cleanup; + + drm_plane_helper_add(plane, &komeda_plane_helper_funcs); + + return 0; +cleanup: + komeda_plane_destroy(plane); + return err; +} + +int komeda_kms_add_planes(struct komeda_kms_dev *kms, struct komeda_dev *mdev) +{ + struct komeda_pipeline *pipe; + int i, j, err; + + for (i = 0; i < mdev->n_pipelines; i++) { + pipe = mdev->pipelines[i]; + + for (j = 0; j < pipe->n_layers; j++) { + err = komeda_plane_add(kms, pipe->layers[j]); + if (err) + return err; + } + } + + return 0; +} diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_private_obj.c b/drivers/gpu/drm/arm/display/komeda/komeda_private_obj.c new file mode 100644 index 000000000000..f1c9e3fefa86 --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/komeda_private_obj.c @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. + * Author: James.Qian.Wang + * + */ +#include "komeda_dev.h" +#include "komeda_kms.h" + +static struct drm_private_state * +komeda_pipeline_atomic_duplicate_state(struct drm_private_obj *obj) +{ + struct komeda_pipeline_state *st; + + st = kmemdup(obj->state, sizeof(*st), GFP_KERNEL); + if (!st) + return NULL; + + st->active_comps = 0; + + __drm_atomic_helper_private_obj_duplicate_state(obj, &st->obj); + + return &st->obj; +} + +static void +komeda_pipeline_atomic_destroy_state(struct drm_private_obj *obj, + struct drm_private_state *state) +{ + kfree(priv_to_pipe_st(state)); +} + +static const struct drm_private_state_funcs komeda_pipeline_obj_funcs = { + .atomic_duplicate_state = komeda_pipeline_atomic_duplicate_state, + .atomic_destroy_state = komeda_pipeline_atomic_destroy_state, +}; + +static int komeda_pipeline_obj_add(struct komeda_kms_dev *kms, + struct komeda_pipeline *pipe) +{ + struct komeda_pipeline_state *st; + + st = kzalloc(sizeof(*st), GFP_KERNEL); + if (!st) + return -ENOMEM; + + st->pipe = pipe; + drm_atomic_private_obj_init(&kms->base, &pipe->obj, &st->obj, + &komeda_pipeline_obj_funcs); + + return 0; +} + +int komeda_kms_add_private_objs(struct komeda_kms_dev *kms, + struct komeda_dev *mdev) +{ + struct komeda_pipeline *pipe; + int i, err; + + for (i = 0; i < mdev->n_pipelines; i++) { + pipe = mdev->pipelines[i]; + + err = komeda_pipeline_obj_add(kms, pipe); + if (err) + return err; + + /* Add component */ + } + + return 0; +} + +void komeda_kms_cleanup_private_objs(struct komeda_dev *mdev) +{ + struct komeda_pipeline *pipe; + struct komeda_component *c; + int i, id; + + for (i = 0; i < mdev->n_pipelines; i++) { + pipe = mdev->pipelines[i]; + dp_for_each_set_bit(id, pipe->avail_comps) { + c = komeda_pipeline_get_component(pipe, id); + + drm_atomic_private_obj_fini(&c->obj); + } + drm_atomic_private_obj_fini(&pipe->obj); + } +} diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c index e4d67b70244d..0b2b62f8fa3c 100644 --- a/drivers/gpu/drm/arm/hdlcd_crtc.c +++ b/drivers/gpu/drm/arm/hdlcd_crtc.c @@ -13,12 +13,12 @@ #include #include #include -#include -#include #include +#include #include #include #include +#include #include #include #include diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c index dfad8d06d108..8fc0b884c428 100644 --- a/drivers/gpu/drm/arm/hdlcd_drv.c +++ b/drivers/gpu/drm/arm/hdlcd_drv.c @@ -22,13 +22,13 @@ #include #include #include -#include -#include #include +#include #include #include #include #include +#include #include "hdlcd_drv.h" #include "hdlcd_regs.h" @@ -229,7 +229,7 @@ static int hdlcd_debugfs_init(struct drm_minor *minor) DEFINE_DRM_GEM_CMA_FOPS(fops); static struct drm_driver hdlcd_driver = { - .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | + .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC, .irq_handler = hdlcd_irq, diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c index e1b72782848c..56aad288666e 100644 --- a/drivers/gpu/drm/arm/malidp_crtc.c +++ b/drivers/gpu/drm/arm/malidp_crtc.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include #include #include
[,
+][;,,,,
[,
+]+] + * Table format: + * + * See Documentation/device-mapper/dm-init.txt for dm-mod.create="..." format + * details. + */ + +struct dm_device { + struct dm_ioctl dmi; + struct dm_target_spec *table[DM_MAX_TARGETS]; + char *target_args_array[DM_MAX_TARGETS]; + struct list_head list; +}; + +const char *dm_allowed_targets[] __initconst = { + "crypt", + "delay", + "linear", + "snapshot-origin", + "striped", + "verity", +}; + +static int __init dm_verify_target_type(const char *target) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(dm_allowed_targets); i++) { + if (!strcmp(dm_allowed_targets[i], target)) + return 0; + } + return -EINVAL; +} + +static void __init dm_setup_cleanup(struct list_head *devices) +{ + struct dm_device *dev, *tmp; + unsigned int i; + + list_for_each_entry_safe(dev, tmp, devices, list) { + list_del(&dev->list); + for (i = 0; i < dev->dmi.target_count; i++) { + kfree(dev->table[i]); + kfree(dev->target_args_array[i]); + } + kfree(dev); + } +} + +/** + * str_field_delimit - delimit a string based on a separator char. + * @str: the pointer to the string to delimit. + * @separator: char that delimits the field + * + * Find a @separator and replace it by '\0'. + * Remove leading and trailing spaces. + * Return the remainder string after the @separator. + */ +static char __init *str_field_delimit(char **str, char separator) +{ + char *s; + + /* TODO: add support for escaped characters */ + *str = skip_spaces(*str); + s = strchr(*str, separator); + /* Delimit the field and remove trailing spaces */ + if (s) + *s = '\0'; + *str = strim(*str); + return s ? ++s : NULL; +} + +/** + * dm_parse_table_entry - parse a table entry + * @dev: device to store the parsed information. + * @str: the pointer to a string with the format: + * [, ...] + * + * Return the remainder string after the table entry, i.e, after the comma which + * delimits the entry or NULL if reached the end of the string. + */ +static char __init *dm_parse_table_entry(struct dm_device *dev, char *str) +{ + const unsigned int n = dev->dmi.target_count - 1; + struct dm_target_spec *sp; + unsigned int i; + /* fields: */ + char *field[4]; + char *next; + + field[0] = str; + /* Delimit first 3 fields that are separated by space */ + for (i = 0; i < ARRAY_SIZE(field) - 1; i++) { + field[i + 1] = str_field_delimit(&field[i], ' '); + if (!field[i + 1]) + return ERR_PTR(-EINVAL); + } + /* Delimit last field that can be terminated by comma */ + next = str_field_delimit(&field[i], ','); + + sp = kzalloc(sizeof(*sp), GFP_KERNEL); + if (!sp) + return ERR_PTR(-ENOMEM); + dev->table[n] = sp; + + /* start_sector */ + if (kstrtoull(field[0], 0, &sp->sector_start)) + return ERR_PTR(-EINVAL); + /* num_sector */ + if (kstrtoull(field[1], 0, &sp->length)) + return ERR_PTR(-EINVAL); + /* target_type */ + strscpy(sp->target_type, field[2], sizeof(sp->target_type)); + if (dm_verify_target_type(sp->target_type)) { + DMERR("invalid type \"%s\"", sp->target_type); + return ERR_PTR(-EINVAL); + } + /* target_args */ + dev->target_args_array[n] = kstrndup(field[3], GFP_KERNEL, + DM_MAX_STR_SIZE); + if (!dev->target_args_array[n]) + return ERR_PTR(-ENOMEM); + + return next; +} + +/** + * dm_parse_table - parse "dm-mod.create=" table field + * @dev: device to store the parsed information. + * @str: the pointer to a string with the format: + *
[,
+] + */ +static int __init dm_parse_table(struct dm_device *dev, char *str) +{ + char *table_entry = str; + + while (table_entry) { + DMDEBUG("parsing table \"%s\"", str); + if (++dev->dmi.target_count >= DM_MAX_TARGETS) { + DMERR("too many targets %u > %d", + dev->dmi.target_count, DM_MAX_TARGETS); + return -EINVAL; + } + table_entry = dm_parse_table_entry(dev, table_entry); + if (IS_ERR(table_entry)) { + DMERR("couldn't parse table"); + return PTR_ERR(table_entry); + } + } + + return 0; +} + +/** + * dm_parse_device_entry - parse a device entry + * @dev: device to store the parsed information. + * @str: the pointer to a string with the format: + * name,uuid,minor,flags,table[; ...] + * + * Return the remainder string after the table entry, i.e, after the semi-colon + * which delimits the entry or NULL if reached the end of the string. + */ +static char __init *dm_parse_device_entry(struct dm_device *dev, char *str) +{ + /* There are 5 fields: name,uuid,minor,flags,table; */ + char *field[5]; + unsigned int i; + char *next; + + field[0] = str; + /* Delimit first 4 fields that are separated by comma */ + for (i = 0; i < ARRAY_SIZE(field) - 1; i++) { + field[i+1] = str_field_delimit(&field[i], ','); + if (!field[i+1]) + return ERR_PTR(-EINVAL); + } + /* Delimit last field that can be delimited by semi-colon */ + next = str_field_delimit(&field[i], ';'); + + /* name */ + strscpy(dev->dmi.name, field[0], sizeof(dev->dmi.name)); + /* uuid */ + strscpy(dev->dmi.uuid, field[1], sizeof(dev->dmi.uuid)); + /* minor */ + if (strlen(field[2])) { + if (kstrtoull(field[2], 0, &dev->dmi.dev)) + return ERR_PTR(-EINVAL); + dev->dmi.flags |= DM_PERSISTENT_DEV_FLAG; + } + /* flags */ + if (!strcmp(field[3], "ro")) + dev->dmi.flags |= DM_READONLY_FLAG; + else if (strcmp(field[3], "rw")) + return ERR_PTR(-EINVAL); + /* table */ + if (dm_parse_table(dev, field[4])) + return ERR_PTR(-EINVAL); + + return next; +} + +/** + * dm_parse_devices - parse "dm-mod.create=" argument + * @devices: list of struct dm_device to store the parsed information. + * @str: the pointer to a string with the format: + * [;+] + */ +static int __init dm_parse_devices(struct list_head *devices, char *str) +{ + unsigned long ndev = 0; + struct dm_device *dev; + char *device = str; + + DMDEBUG("parsing \"%s\"", str); + while (device) { + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + list_add_tail(&dev->list, devices); + + if (++ndev >= DM_MAX_DEVICES) { + DMERR("too many targets %u > %d", + dev->dmi.target_count, DM_MAX_TARGETS); + return -EINVAL; + } + + device = dm_parse_device_entry(dev, device); + if (IS_ERR(device)) { + DMERR("couldn't parse device"); + return PTR_ERR(device); + } + } + + return 0; +} + +/** + * dm_init_init - parse "dm-mod.create=" argument and configure drivers + */ +static int __init dm_init_init(void) +{ + struct dm_device *dev; + LIST_HEAD(devices); + char *str; + int r; + + if (!create) + return 0; + + if (strlen(create) >= DM_MAX_STR_SIZE) { + DMERR("Argument is too big. Limit is %d\n", DM_MAX_STR_SIZE); + return -EINVAL; + } + str = kstrndup(create, GFP_KERNEL, DM_MAX_STR_SIZE); + if (!str) + return -ENOMEM; + + r = dm_parse_devices(&devices, str); + if (r) + goto out; + + DMINFO("waiting for all devices to be available before creating mapped devices\n"); + wait_for_device_probe(); + + list_for_each_entry(dev, &devices, list) { + if (dm_early_create(&dev->dmi, dev->table, + dev->target_args_array)) + break; + } +out: + kfree(str); + dm_setup_cleanup(&devices); + return r; +} + +late_initcall(dm_init_init); + +module_param(create, charp, 0); +MODULE_PARM_DESC(create, "Create a mapped device in early boot"); diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 457200ca6287..d57d997a52c8 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -1122,7 +1122,7 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se return r; data = dm_bufio_read(ic->bufio, *metadata_block, &b); - if (unlikely(IS_ERR(data))) + if (IS_ERR(data)) return PTR_ERR(data); to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size); @@ -1368,8 +1368,8 @@ again: checksums_ptr - checksums, !dio->write ? TAG_CMP : TAG_WRITE); if (unlikely(r)) { if (r > 0) { - DMERR("Checksum failed at sector 0x%llx", - (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size))); + DMERR_LIMIT("Checksum failed at sector 0x%llx", + (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size))); r = -EILSEQ; atomic64_inc(&ic->number_of_mismatches); } @@ -1561,8 +1561,8 @@ retry_kmap: integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack); if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) { - DMERR("Checksum failed when reading from journal, at sector 0x%llx", - (unsigned long long)logical_sector); + DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx", + (unsigned long long)logical_sector); } } #endif diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index f666778ad237..c740153b4e52 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -2018,3 +2018,106 @@ out: return r; } + + +/** + * dm_early_create - create a mapped device in early boot. + * + * @dmi: Contains main information of the device mapping to be created. + * @spec_array: array of pointers to struct dm_target_spec. Describes the + * mapping table of the device. + * @target_params_array: array of strings with the parameters to a specific + * target. + * + * Instead of having the struct dm_target_spec and the parameters for every + * target embedded at the end of struct dm_ioctl (as performed in a normal + * ioctl), pass them as arguments, so the caller doesn't need to serialize them. + * The size of the spec_array and target_params_array is given by + * @dmi->target_count. + * This function is supposed to be called in early boot, so locking mechanisms + * to protect against concurrent loads are not required. + */ +int __init dm_early_create(struct dm_ioctl *dmi, + struct dm_target_spec **spec_array, + char **target_params_array) +{ + int r, m = DM_ANY_MINOR; + struct dm_table *t, *old_map; + struct mapped_device *md; + unsigned int i; + + if (!dmi->target_count) + return -EINVAL; + + r = check_name(dmi->name); + if (r) + return r; + + if (dmi->flags & DM_PERSISTENT_DEV_FLAG) + m = MINOR(huge_decode_dev(dmi->dev)); + + /* alloc dm device */ + r = dm_create(m, &md); + if (r) + return r; + + /* hash insert */ + r = dm_hash_insert(dmi->name, *dmi->uuid ? dmi->uuid : NULL, md); + if (r) + goto err_destroy_dm; + + /* alloc table */ + r = dm_table_create(&t, get_mode(dmi), dmi->target_count, md); + if (r) + goto err_destroy_dm; + + /* add targets */ + for (i = 0; i < dmi->target_count; i++) { + r = dm_table_add_target(t, spec_array[i]->target_type, + (sector_t) spec_array[i]->sector_start, + (sector_t) spec_array[i]->length, + target_params_array[i]); + if (r) { + DMWARN("error adding target to table"); + goto err_destroy_table; + } + } + + /* finish table */ + r = dm_table_complete(t); + if (r) + goto err_destroy_table; + + md->type = dm_table_get_type(t); + /* setup md->queue to reflect md's type (may block) */ + r = dm_setup_md_queue(md, t); + if (r) { + DMWARN("unable to set up device queue for new table."); + goto err_destroy_table; + } + + /* Set new map */ + dm_suspend(md, 0); + old_map = dm_swap_table(md, t); + if (IS_ERR(old_map)) { + r = PTR_ERR(old_map); + goto err_destroy_table; + } + set_disk_ro(dm_disk(md), !!(dmi->flags & DM_READONLY_FLAG)); + + /* resume device */ + r = dm_resume(md); + if (r) + goto err_destroy_table; + + DMINFO("%s (%s) is ready", md->disk->disk_name, dmi->name); + dm_put(md); + return 0; + +err_destroy_table: + dm_table_destroy(t); +err_destroy_dm: + dm_put(md); + dm_destroy(md); + return r; +} diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index adcfe8ae10aa..9fdef6897316 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -2986,11 +2986,6 @@ static void configure_discard_support(struct raid_set *rs) } } - /* - * RAID1 and RAID10 personalities require bio splitting, - * RAID0/4/5/6 don't and process large discard bios properly. - */ - ti->split_discard_bios = !!(rs_is_raid1(rs) || rs_is_raid10(rs)); ti->num_discard_bios = 1; } @@ -3747,6 +3742,15 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits) blk_limits_io_min(limits, chunk_size); blk_limits_io_opt(limits, chunk_size * mddev_data_stripes(rs)); + + /* + * RAID1 and RAID10 personalities require bio splitting, + * RAID0/4/5/6 don't and process large discard bios properly. + */ + if (rs_is_raid1(rs) || rs_is_raid10(rs)) { + limits->discard_granularity = chunk_size; + limits->max_discard_sectors = chunk_size; + } } static void raid_postsuspend(struct dm_target *ti) diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 4eb5f8c56535..09773636602d 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -12,6 +12,22 @@ #define DM_MSG_PREFIX "core-rq" +/* + * One of these is allocated per request. + */ +struct dm_rq_target_io { + struct mapped_device *md; + struct dm_target *ti; + struct request *orig, *clone; + struct kthread_work work; + blk_status_t error; + union map_info info; + struct dm_stats_aux stats_aux; + unsigned long duration_jiffies; + unsigned n_sectors; + unsigned completed; +}; + #define DM_MQ_NR_HW_QUEUES 1 #define DM_MQ_QUEUE_DEPTH 2048 static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES; @@ -131,7 +147,7 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig) static void rq_completed(struct mapped_device *md) { /* nudge anyone waiting on suspend queue */ - if (unlikely(waitqueue_active(&md->wait))) + if (unlikely(wq_has_sleeper(&md->wait))) wake_up(&md->wait); /* @@ -527,7 +543,7 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t) md->tag_set->ops = &dm_mq_ops; md->tag_set->queue_depth = dm_get_blk_mq_queue_depth(); md->tag_set->numa_node = md->numa_node_id; - md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; + md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE; md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues(); md->tag_set->driver_data = md; diff --git a/drivers/md/dm-rq.h b/drivers/md/dm-rq.h index b39245545229..1eea0da641db 100644 --- a/drivers/md/dm-rq.h +++ b/drivers/md/dm-rq.h @@ -16,22 +16,6 @@ struct mapped_device; -/* - * One of these is allocated per request. - */ -struct dm_rq_target_io { - struct mapped_device *md; - struct dm_target *ti; - struct request *orig, *clone; - struct kthread_work work; - blk_status_t error; - union map_info info; - struct dm_stats_aux stats_aux; - unsigned long duration_jiffies; - unsigned n_sectors; - unsigned completed; -}; - /* * For request-based dm - the bio clones we allocate are embedded in these * structs. diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 36805b12661e..a168963b757d 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -2338,13 +2338,6 @@ static int origin_map(struct dm_target *ti, struct bio *bio) return do_origin(o->dev, bio); } -static long origin_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, - long nr_pages, void **kaddr, pfn_t *pfn) -{ - DMWARN("device does not support dax."); - return -EIO; -} - /* * Set the target "max_io_len" field to the minimum of all the snapshots' * chunk sizes. @@ -2404,7 +2397,6 @@ static struct target_type origin_target = { .postsuspend = origin_postsuspend, .status = origin_status, .iterate_devices = origin_iterate_devices, - .direct_access = origin_dax_direct_access, }; static struct target_type snapshot_target = { diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c index fae35caf3672..8a0f057b8122 100644 --- a/drivers/md/dm-switch.c +++ b/drivers/md/dm-switch.c @@ -61,8 +61,7 @@ static struct switch_ctx *alloc_switch_ctx(struct dm_target *ti, unsigned nr_pat { struct switch_ctx *sctx; - sctx = kzalloc(sizeof(struct switch_ctx) + nr_paths * sizeof(struct switch_path), - GFP_KERNEL); + sctx = kzalloc(struct_size(sctx, path_list, nr_paths), GFP_KERNEL); if (!sctx) return NULL; diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 4b1be754cc41..ba9481f1bf3c 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1698,14 +1698,6 @@ static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, return q && !blk_queue_add_random(q); } -static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) -{ - struct request_queue *q = bdev_get_queue(dev->bdev); - - return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags); -} - static bool dm_table_all_devices_attribute(struct dm_table *t, iterate_devices_callout_fn func) { @@ -1902,11 +1894,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, if (!dm_table_supports_write_zeroes(t)) q->limits.max_write_zeroes_sectors = 0; - if (dm_table_all_devices_attribute(t, queue_supports_sg_merge)) - blk_queue_flag_clear(QUEUE_FLAG_NO_SG_MERGE, q); - else - blk_queue_flag_set(QUEUE_FLAG_NO_SG_MERGE, q); - dm_table_verify_integrity(t); /* diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index ca8af21bf644..fcd887703f95 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -257,6 +257,7 @@ struct pool { spinlock_t lock; struct bio_list deferred_flush_bios; + struct bio_list deferred_flush_completions; struct list_head prepared_mappings; struct list_head prepared_discards; struct list_head prepared_discards_pt2; @@ -956,6 +957,39 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m) mempool_free(m, &m->tc->pool->mapping_pool); } +static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio) +{ + struct pool *pool = tc->pool; + unsigned long flags; + + /* + * If the bio has the REQ_FUA flag set we must commit the metadata + * before signaling its completion. + */ + if (!bio_triggers_commit(tc, bio)) { + bio_endio(bio); + return; + } + + /* + * Complete bio with an error if earlier I/O caused changes to the + * metadata that can't be committed, e.g, due to I/O errors on the + * metadata device. + */ + if (dm_thin_aborted_changes(tc->td)) { + bio_io_error(bio); + return; + } + + /* + * Batch together any bios that trigger commits and then issue a + * single commit for them in process_deferred_bios(). + */ + spin_lock_irqsave(&pool->lock, flags); + bio_list_add(&pool->deferred_flush_completions, bio); + spin_unlock_irqrestore(&pool->lock, flags); +} + static void process_prepared_mapping(struct dm_thin_new_mapping *m) { struct thin_c *tc = m->tc; @@ -988,7 +1022,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m) */ if (bio) { inc_remap_and_issue_cell(tc, m->cell, m->data_block); - bio_endio(bio); + complete_overwrite_bio(tc, bio); } else { inc_all_io_entry(tc->pool, m->cell->holder); remap_and_issue(tc, m->cell->holder, m->data_block); @@ -2317,7 +2351,7 @@ static void process_deferred_bios(struct pool *pool) { unsigned long flags; struct bio *bio; - struct bio_list bios; + struct bio_list bios, bio_completions; struct thin_c *tc; tc = get_first_thin(pool); @@ -2328,26 +2362,36 @@ static void process_deferred_bios(struct pool *pool) } /* - * If there are any deferred flush bios, we must commit - * the metadata before issuing them. + * If there are any deferred flush bios, we must commit the metadata + * before issuing them or signaling their completion. */ bio_list_init(&bios); + bio_list_init(&bio_completions); + spin_lock_irqsave(&pool->lock, flags); bio_list_merge(&bios, &pool->deferred_flush_bios); bio_list_init(&pool->deferred_flush_bios); + + bio_list_merge(&bio_completions, &pool->deferred_flush_completions); + bio_list_init(&pool->deferred_flush_completions); spin_unlock_irqrestore(&pool->lock, flags); - if (bio_list_empty(&bios) && + if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) && !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool))) return; if (commit(pool)) { + bio_list_merge(&bios, &bio_completions); + while ((bio = bio_list_pop(&bios))) bio_io_error(bio); return; } pool->last_commit_jiffies = jiffies; + while ((bio = bio_list_pop(&bio_completions))) + bio_endio(bio); + while ((bio = bio_list_pop(&bios))) generic_make_request(bio); } @@ -2954,6 +2998,7 @@ static struct pool *pool_create(struct mapped_device *pool_md, INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout); spin_lock_init(&pool->lock); bio_list_init(&pool->deferred_flush_bios); + bio_list_init(&pool->deferred_flush_completions); INIT_LIST_HEAD(&pool->prepared_mappings); INIT_LIST_HEAD(&pool->prepared_discards); INIT_LIST_HEAD(&pool->prepared_discards_pt2); @@ -3238,6 +3283,13 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) as.argc = argc; as.argv = argv; + /* make sure metadata and data are different devices */ + if (!strcmp(argv[0], argv[1])) { + ti->error = "Error setting metadata or data device"; + r = -EINVAL; + goto out_unlock; + } + /* * Set default pool features. */ @@ -4122,6 +4174,12 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) tc->sort_bio_list = RB_ROOT; if (argc == 3) { + if (!strcmp(argv[0], argv[2])) { + ti->error = "Error setting origin device"; + r = -EINVAL; + goto bad_origin_dev; + } + r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev); if (r) { ti->error = "Error opening origin device"; @@ -4182,7 +4240,6 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) if (tc->pool->pf.discard_enabled) { ti->discards_supported = true; ti->num_discard_bios = 1; - ti->split_discard_bios = false; } mutex_unlock(&dm_thin_pool_table.mutex); diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c index 0ce04e5b4afb..b634fa23f4c4 100644 --- a/drivers/md/dm-verity-fec.c +++ b/drivers/md/dm-verity-fec.c @@ -73,7 +73,7 @@ static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index, *offset = (unsigned)(position - (block << v->data_dev_block_bits)); res = dm_bufio_read(v->fec->bufio, v->fec->start + block, buf); - if (unlikely(IS_ERR(res))) { + if (IS_ERR(res)) { DMERR("%s: FEC %llu: parity read failed (block %llu): %ld", v->data_dev->name, (unsigned long long)rsb, (unsigned long long)(v->fec->start + block), @@ -163,7 +163,7 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio, dm_bufio_release(buf); par = fec_read_parity(v, rsb, block_offset, &offset, &buf); - if (unlikely(IS_ERR(par))) + if (IS_ERR(par)) return PTR_ERR(par); } } @@ -253,7 +253,7 @@ static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io, } bbuf = dm_bufio_read(bufio, block, &buf); - if (unlikely(IS_ERR(bbuf))) { + if (IS_ERR(bbuf)) { DMWARN_LIMIT("%s: FEC %llu: read failed (%llu): %ld", v->data_dev->name, (unsigned long long)rsb, diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index 2b8cee35e4d5..f7822875589e 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -1859,7 +1859,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv) goto bad; } - wc->writeback_wq = alloc_workqueue("writecache-writeabck", WQ_MEM_RECLAIM, 1); + wc->writeback_wq = alloc_workqueue("writecache-writeback", WQ_MEM_RECLAIM, 1); if (!wc->writeback_wq) { r = -ENOMEM; ti->error = "Could not allocate writeback workqueue"; diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index 6af5babe6837..8865c1709e16 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -727,7 +727,6 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv) ti->per_io_data_size = sizeof(struct dmz_bioctx); ti->flush_supported = true; ti->discards_supported = true; - ti->split_discard_bios = true; /* The exposed capacity is the number of chunks that can be mapped */ ti->len = (sector_t)dmz_nr_chunks(dmz->metadata) << dev->zone_nr_sectors_shift; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 2b53c3841b53..68d24056d0b1 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -158,9 +158,6 @@ struct table_device { struct dm_dev dm_dev; }; -static struct kmem_cache *_rq_tio_cache; -static struct kmem_cache *_rq_cache; - /* * Bio-based DM's mempools' reserved IOs set by the user. */ @@ -222,20 +219,11 @@ static unsigned dm_get_numa_node(void) static int __init local_init(void) { - int r = -ENOMEM; - - _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0); - if (!_rq_tio_cache) - return r; - - _rq_cache = kmem_cache_create("dm_old_clone_request", sizeof(struct request), - __alignof__(struct request), 0, NULL); - if (!_rq_cache) - goto out_free_rq_tio_cache; + int r; r = dm_uevent_init(); if (r) - goto out_free_rq_cache; + return r; deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1); if (!deferred_remove_workqueue) { @@ -257,10 +245,6 @@ out_free_workqueue: destroy_workqueue(deferred_remove_workqueue); out_uevent_exit: dm_uevent_exit(); -out_free_rq_cache: - kmem_cache_destroy(_rq_cache); -out_free_rq_tio_cache: - kmem_cache_destroy(_rq_tio_cache); return r; } @@ -270,8 +254,6 @@ static void local_exit(void) flush_scheduled_work(); destroy_workqueue(deferred_remove_workqueue); - kmem_cache_destroy(_rq_cache); - kmem_cache_destroy(_rq_tio_cache); unregister_blkdev(_major, _name); dm_uevent_exit(); @@ -699,7 +681,7 @@ static void end_io_acct(struct dm_io *io) true, duration, &io->stats_aux); /* nudge anyone waiting on suspend queue */ - if (unlikely(waitqueue_active(&md->wait))) + if (unlikely(wq_has_sleeper(&md->wait))) wake_up(&md->wait); } @@ -1336,7 +1318,11 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio, return r; } - bio_trim(clone, sector - clone->bi_iter.bi_sector, len); + bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); + clone->bi_iter.bi_size = to_bytes(len); + + if (bio_integrity(bio)) + bio_integrity_trim(clone); return 0; } @@ -1474,17 +1460,10 @@ static unsigned get_num_write_zeroes_bios(struct dm_target *ti) return ti->num_write_zeroes_bios; } -typedef bool (*is_split_required_fn)(struct dm_target *ti); - -static bool is_split_required_for_discard(struct dm_target *ti) -{ - return ti->split_discard_bios; -} - static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti, - unsigned num_bios, bool is_split_required) + unsigned num_bios) { - unsigned len; + unsigned len = ci->sector_count; /* * Even though the device advertised support for this type of @@ -1495,11 +1474,6 @@ static int __send_changing_extent_only(struct clone_info *ci, struct dm_target * if (!num_bios) return -EOPNOTSUPP; - if (!is_split_required) - len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); - else - len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti)); - __send_duplicate_bios(ci, ti, num_bios, &len); ci->sector += len; @@ -1510,23 +1484,38 @@ static int __send_changing_extent_only(struct clone_info *ci, struct dm_target * static int __send_discard(struct clone_info *ci, struct dm_target *ti) { - return __send_changing_extent_only(ci, ti, get_num_discard_bios(ti), - is_split_required_for_discard(ti)); + return __send_changing_extent_only(ci, ti, get_num_discard_bios(ti)); } static int __send_secure_erase(struct clone_info *ci, struct dm_target *ti) { - return __send_changing_extent_only(ci, ti, get_num_secure_erase_bios(ti), false); + return __send_changing_extent_only(ci, ti, get_num_secure_erase_bios(ti)); } static int __send_write_same(struct clone_info *ci, struct dm_target *ti) { - return __send_changing_extent_only(ci, ti, get_num_write_same_bios(ti), false); + return __send_changing_extent_only(ci, ti, get_num_write_same_bios(ti)); } static int __send_write_zeroes(struct clone_info *ci, struct dm_target *ti) { - return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios(ti), false); + return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios(ti)); +} + +static bool is_abnormal_io(struct bio *bio) +{ + bool r = false; + + switch (bio_op(bio)) { + case REQ_OP_DISCARD: + case REQ_OP_SECURE_ERASE: + case REQ_OP_WRITE_SAME: + case REQ_OP_WRITE_ZEROES: + r = true; + break; + } + + return r; } static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti, @@ -1561,7 +1550,7 @@ static int __split_and_process_non_flush(struct clone_info *ci) if (!dm_target_is_valid(ti)) return -EIO; - if (unlikely(__process_abnormal_io(ci, ti, &r))) + if (__process_abnormal_io(ci, ti, &r)) return r; len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count); @@ -1597,13 +1586,6 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md, blk_qc_t ret = BLK_QC_T_NONE; int error = 0; - if (unlikely(!map)) { - bio_io_error(bio); - return ret; - } - - blk_queue_split(md->queue, &bio); - init_clone_info(&ci, md, map, bio); if (bio->bi_opf & REQ_PREFLUSH) { @@ -1671,18 +1653,13 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md, * Optimized variant of __split_and_process_bio that leverages the * fact that targets that use it do _not_ have a need to split bios. */ -static blk_qc_t __process_bio(struct mapped_device *md, - struct dm_table *map, struct bio *bio) +static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map, + struct bio *bio, struct dm_target *ti) { struct clone_info ci; blk_qc_t ret = BLK_QC_T_NONE; int error = 0; - if (unlikely(!map)) { - bio_io_error(bio); - return ret; - } - init_clone_info(&ci, md, map, bio); if (bio->bi_opf & REQ_PREFLUSH) { @@ -1700,21 +1677,11 @@ static blk_qc_t __process_bio(struct mapped_device *md, error = __send_empty_flush(&ci); /* dec_pending submits any data associated with flush */ } else { - struct dm_target *ti = md->immutable_target; struct dm_target_io *tio; - /* - * Defend against IO still getting in during teardown - * - as was seen for a time with nvme-fcloop - */ - if (WARN_ON_ONCE(!ti || !dm_target_is_valid(ti))) { - error = -EIO; - goto out; - } - ci.bio = bio; ci.sector_count = bio_sectors(bio); - if (unlikely(__process_abnormal_io(&ci, ti, &error))) + if (__process_abnormal_io(&ci, ti, &error)) goto out; tio = alloc_tio(&ci, ti, 0, GFP_NOIO); @@ -1726,11 +1693,55 @@ out: return ret; } +static void dm_queue_split(struct mapped_device *md, struct dm_target *ti, struct bio **bio) +{ + unsigned len, sector_count; + + sector_count = bio_sectors(*bio); + len = min_t(sector_t, max_io_len((*bio)->bi_iter.bi_sector, ti), sector_count); + + if (sector_count > len) { + struct bio *split = bio_split(*bio, len, GFP_NOIO, &md->queue->bio_split); + + bio_chain(split, *bio); + trace_block_split(md->queue, split, (*bio)->bi_iter.bi_sector); + generic_make_request(*bio); + *bio = split; + } +} + static blk_qc_t dm_process_bio(struct mapped_device *md, struct dm_table *map, struct bio *bio) { + blk_qc_t ret = BLK_QC_T_NONE; + struct dm_target *ti = md->immutable_target; + + if (unlikely(!map)) { + bio_io_error(bio); + return ret; + } + + if (!ti) { + ti = dm_table_find_target(map, bio->bi_iter.bi_sector); + if (unlikely(!ti || !dm_target_is_valid(ti))) { + bio_io_error(bio); + return ret; + } + } + + /* + * If in ->make_request_fn we need to use blk_queue_split(), otherwise + * queue_limits for abnormal requests (e.g. discard, writesame, etc) + * won't be imposed. + */ + if (current->bio_list) { + blk_queue_split(md->queue, &bio); + if (!is_abnormal_io(bio)) + dm_queue_split(md, ti, &bio); + } + if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED) - return __process_bio(md, map, bio); + return __process_bio(md, map, bio, ti); else return __split_and_process_bio(md, map, bio); } diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c index d45c697c0ebe..5998d78aa189 100644 --- a/drivers/md/md-linear.c +++ b/drivers/md/md-linear.c @@ -96,8 +96,7 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks) int i, cnt; bool discard_supported = false; - conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(struct dev_info), - GFP_KERNEL); + conf = kzalloc(struct_size(conf, disks, raid_disks), GFP_KERNEL); if (!conf) return NULL; diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c index 492a3f8ac119..3972232b8037 100644 --- a/drivers/md/persistent-data/dm-block-manager.c +++ b/drivers/md/persistent-data/dm-block-manager.c @@ -462,7 +462,7 @@ int dm_bm_read_lock(struct dm_block_manager *bm, dm_block_t b, int r; p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result); - if (unlikely(IS_ERR(p))) + if (IS_ERR(p)) return PTR_ERR(p); aux = dm_bufio_get_aux_data(to_buffer(*result)); @@ -498,7 +498,7 @@ int dm_bm_write_lock(struct dm_block_manager *bm, return -EPERM; p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result); - if (unlikely(IS_ERR(p))) + if (IS_ERR(p)) return PTR_ERR(p); aux = dm_bufio_get_aux_data(to_buffer(*result)); @@ -531,7 +531,7 @@ int dm_bm_read_try_lock(struct dm_block_manager *bm, int r; p = dm_bufio_get(bm->bufio, b, (struct dm_buffer **) result); - if (unlikely(IS_ERR(p))) + if (IS_ERR(p)) return PTR_ERR(p); if (unlikely(!p)) return -EWOULDBLOCK; @@ -567,7 +567,7 @@ int dm_bm_write_lock_zero(struct dm_block_manager *bm, return -EPERM; p = dm_bufio_new(bm->bufio, b, (struct dm_buffer **) result); - if (unlikely(IS_ERR(p))) + if (IS_ERR(p)) return PTR_ERR(p); memset(p, 0, dm_bm_block_size(bm)); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 1d54109071cc..fdf451aac369 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1603,11 +1603,9 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev) return; } set_bit(Blocked, &rdev->flags); - if (test_and_clear_bit(In_sync, &rdev->flags)) { + if (test_and_clear_bit(In_sync, &rdev->flags)) mddev->degraded++; - set_bit(Faulty, &rdev->flags); - } else - set_bit(Faulty, &rdev->flags); + set_bit(Faulty, &rdev->flags); spin_unlock_irqrestore(&conf->device_lock, flags); /* * if recovery is running, make sure it aborts. @@ -1863,6 +1861,20 @@ static void end_sync_read(struct bio *bio) reschedule_retry(r1_bio); } +static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio) +{ + sector_t sync_blocks = 0; + sector_t s = r1_bio->sector; + long sectors_to_go = r1_bio->sectors; + + /* make sure these bits don't get cleared. */ + do { + md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1); + s += sync_blocks; + sectors_to_go -= sync_blocks; + } while (sectors_to_go > 0); +} + static void end_sync_write(struct bio *bio) { int uptodate = !bio->bi_status; @@ -1874,15 +1886,7 @@ static void end_sync_write(struct bio *bio) struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev; if (!uptodate) { - sector_t sync_blocks = 0; - sector_t s = r1_bio->sector; - long sectors_to_go = r1_bio->sectors; - /* make sure these bits doesn't get cleared. */ - do { - md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1); - s += sync_blocks; - sectors_to_go -= sync_blocks; - } while (sectors_to_go > 0); + abort_sync_write(mddev, r1_bio); set_bit(WriteErrorSeen, &rdev->flags); if (!test_and_set_bit(WantReplacement, &rdev->flags)) set_bit(MD_RECOVERY_NEEDED, & @@ -2114,13 +2118,14 @@ static void process_checks(struct r1bio *r1_bio) struct page **spages = get_resync_pages(sbio)->pages; struct bio_vec *bi; int page_len[RESYNC_PAGES] = { 0 }; + struct bvec_iter_all iter_all; if (sbio->bi_end_io != end_sync_read) continue; /* Now we can 'fixup' the error value */ sbio->bi_status = 0; - bio_for_each_segment_all(bi, sbio, j) + bio_for_each_segment_all(bi, sbio, j, iter_all) page_len[j] = bi->bv_len; if (!status) { @@ -2172,8 +2177,10 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) (i == r1_bio->read_disk || !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) continue; - if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) + if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) { + abort_sync_write(mddev, r1_bio); continue; + } bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); if (test_bit(FailFast, &conf->mirrors[i].rdev->flags)) diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index ec3a5ef7fee0..cbbe6b6535be 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -1935,12 +1935,14 @@ out: } static struct stripe_head * -r5c_recovery_alloc_stripe(struct r5conf *conf, - sector_t stripe_sect) +r5c_recovery_alloc_stripe( + struct r5conf *conf, + sector_t stripe_sect, + int noblock) { struct stripe_head *sh; - sh = raid5_get_active_stripe(conf, stripe_sect, 0, 1, 0); + sh = raid5_get_active_stripe(conf, stripe_sect, 0, noblock, 0); if (!sh) return NULL; /* no more stripe available */ @@ -2150,7 +2152,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log, stripe_sect); if (!sh) { - sh = r5c_recovery_alloc_stripe(conf, stripe_sect); + sh = r5c_recovery_alloc_stripe(conf, stripe_sect, 1); /* * cannot get stripe from raid5_get_active_stripe * try replay some stripes @@ -2159,20 +2161,29 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log, r5c_recovery_replay_stripes( cached_stripe_list, ctx); sh = r5c_recovery_alloc_stripe( - conf, stripe_sect); + conf, stripe_sect, 1); } if (!sh) { + int new_size = conf->min_nr_stripes * 2; pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n", mdname(mddev), - conf->min_nr_stripes * 2); - raid5_set_cache_size(mddev, - conf->min_nr_stripes * 2); - sh = r5c_recovery_alloc_stripe(conf, - stripe_sect); + new_size); + ret = raid5_set_cache_size(mddev, new_size); + if (conf->min_nr_stripes <= new_size / 2) { + pr_err("md/raid:%s: Cannot increase cache size, ret=%d, new_size=%d, min_nr_stripes=%d, max_nr_stripes=%d\n", + mdname(mddev), + ret, + new_size, + conf->min_nr_stripes, + conf->max_nr_stripes); + return -ENOMEM; + } + sh = r5c_recovery_alloc_stripe( + conf, stripe_sect, 0); } if (!sh) { pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n", - mdname(mddev)); + mdname(mddev)); return -ENOMEM; } list_add_tail(&sh->lru, cached_stripe_list); diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c index 3a7c36326589..0b096ddc9c1e 100644 --- a/drivers/md/raid5-ppl.c +++ b/drivers/md/raid5-ppl.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include "md.h" @@ -165,7 +164,7 @@ ops_run_partial_parity(struct stripe_head *sh, struct raid5_percpu *percpu, struct dma_async_tx_descriptor *tx) { int disks = sh->disks; - struct page **srcs = flex_array_get(percpu->scribble, 0); + struct page **srcs = percpu->scribble; int count = 0, pd_idx = sh->pd_idx, i; struct async_submit_ctl submit; @@ -196,8 +195,7 @@ ops_run_partial_parity(struct stripe_head *sh, struct raid5_percpu *percpu, } init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, tx, - NULL, sh, flex_array_get(percpu->scribble, 0) - + sizeof(struct page *) * (sh->disks + 2)); + NULL, sh, (void *) (srcs + sh->disks + 2)); if (count == 1) tx = async_memcpy(sh->ppl_page, srcs[0], 0, 0, PAGE_SIZE, diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 4990f0319f6c..77ffd09be486 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -54,7 +54,6 @@ #include #include #include -#include #include #include @@ -1394,22 +1393,16 @@ static void ops_complete_compute(void *stripe_head_ref) } /* return a pointer to the address conversion region of the scribble buffer */ -static addr_conv_t *to_addr_conv(struct stripe_head *sh, - struct raid5_percpu *percpu, int i) +static struct page **to_addr_page(struct raid5_percpu *percpu, int i) { - void *addr; - - addr = flex_array_get(percpu->scribble, i); - return addr + sizeof(struct page *) * (sh->disks + 2); + return percpu->scribble + i * percpu->scribble_obj_size; } /* return a pointer to the address conversion region of the scribble buffer */ -static struct page **to_addr_page(struct raid5_percpu *percpu, int i) +static addr_conv_t *to_addr_conv(struct stripe_head *sh, + struct raid5_percpu *percpu, int i) { - void *addr; - - addr = flex_array_get(percpu->scribble, i); - return addr; + return (void *) (to_addr_page(percpu, i) + sh->disks + 2); } static struct dma_async_tx_descriptor * @@ -2238,21 +2231,23 @@ static int grow_stripes(struct r5conf *conf, int num) * calculate over all devices (not just the data blocks), using zeros in place * of the P and Q blocks. */ -static struct flex_array *scribble_alloc(int num, int cnt, gfp_t flags) +static int scribble_alloc(struct raid5_percpu *percpu, + int num, int cnt, gfp_t flags) { - struct flex_array *ret; - size_t len; + size_t obj_size = + sizeof(struct page *) * (num+2) + + sizeof(addr_conv_t) * (num+2); + void *scribble; - len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2); - ret = flex_array_alloc(len, cnt, flags); - if (!ret) - return NULL; - /* always prealloc all elements, so no locking is required */ - if (flex_array_prealloc(ret, 0, cnt, flags)) { - flex_array_free(ret); - return NULL; - } - return ret; + scribble = kvmalloc_array(cnt, obj_size, flags); + if (!scribble) + return -ENOMEM; + + kvfree(percpu->scribble); + + percpu->scribble = scribble; + percpu->scribble_obj_size = obj_size; + return 0; } static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors) @@ -2270,23 +2265,18 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors) return 0; mddev_suspend(conf->mddev); get_online_cpus(); + for_each_present_cpu(cpu) { struct raid5_percpu *percpu; - struct flex_array *scribble; percpu = per_cpu_ptr(conf->percpu, cpu); - scribble = scribble_alloc(new_disks, - new_sectors / STRIPE_SECTORS, - GFP_NOIO); - - if (scribble) { - flex_array_free(percpu->scribble); - percpu->scribble = scribble; - } else { - err = -ENOMEM; + err = scribble_alloc(percpu, new_disks, + new_sectors / STRIPE_SECTORS, + GFP_NOIO); + if (err) break; - } } + put_online_cpus(); mddev_resume(conf->mddev); if (!err) { @@ -6369,6 +6359,7 @@ raid5_show_stripe_cache_size(struct mddev *mddev, char *page) int raid5_set_cache_size(struct mddev *mddev, int size) { + int result = 0; struct r5conf *conf = mddev->private; if (size <= 16 || size > 32768) @@ -6385,11 +6376,14 @@ raid5_set_cache_size(struct mddev *mddev, int size) mutex_lock(&conf->cache_size_mutex); while (size > conf->max_nr_stripes) - if (!grow_one_stripe(conf, GFP_KERNEL)) + if (!grow_one_stripe(conf, GFP_KERNEL)) { + conf->min_nr_stripes = conf->max_nr_stripes; + result = -ENOMEM; break; + } mutex_unlock(&conf->cache_size_mutex); - return 0; + return result; } EXPORT_SYMBOL(raid5_set_cache_size); @@ -6738,25 +6732,26 @@ raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks) static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) { safe_put_page(percpu->spare_page); - if (percpu->scribble) - flex_array_free(percpu->scribble); percpu->spare_page = NULL; + kvfree(percpu->scribble); percpu->scribble = NULL; } static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) { - if (conf->level == 6 && !percpu->spare_page) + if (conf->level == 6 && !percpu->spare_page) { percpu->spare_page = alloc_page(GFP_KERNEL); - if (!percpu->scribble) - percpu->scribble = scribble_alloc(max(conf->raid_disks, - conf->previous_raid_disks), - max(conf->chunk_sectors, - conf->prev_chunk_sectors) - / STRIPE_SECTORS, - GFP_KERNEL); - - if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) { + if (!percpu->spare_page) + return -ENOMEM; + } + + if (scribble_alloc(percpu, + max(conf->raid_disks, + conf->previous_raid_disks), + max(conf->chunk_sectors, + conf->prev_chunk_sectors) + / STRIPE_SECTORS, + GFP_KERNEL)) { free_scratch_buffer(conf, percpu); return -ENOMEM; } diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 8474c224127b..cf991f13403e 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -638,10 +638,11 @@ struct r5conf { /* per cpu variables */ struct raid5_percpu { struct page *spare_page; /* Used when checking P/Q in raid6 */ - struct flex_array *scribble; /* space for constructing buffer - * lists and performing address - * conversions - */ + void *scribble; /* space for constructing buffer + * lists and performing address + * conversions + */ + int scribble_obj_size; } __percpu *percpu; int scribble_disks; int scribble_sectors; diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c index 391b6fd483e1..156a0d76ab2a 100644 --- a/drivers/media/cec/cec-api.c +++ b/drivers/media/cec/cec-api.c @@ -38,6 +38,7 @@ static __poll_t cec_poll(struct file *filp, struct cec_adapter *adap = fh->adap; __poll_t res = 0; + poll_wait(filp, &fh->wait, poll); if (!cec_is_registered(adap)) return EPOLLERR | EPOLLHUP; mutex_lock(&adap->lock); @@ -48,7 +49,6 @@ static __poll_t cec_poll(struct file *filp, res |= EPOLLIN | EPOLLRDNORM; if (fh->total_queued_events) res |= EPOLLPRI; - poll_wait(filp, &fh->wait, poll); mutex_unlock(&adap->lock); return res; } diff --git a/drivers/media/common/saa7146/saa7146_fops.c b/drivers/media/common/saa7146/saa7146_fops.c index c790ae264464..be4355a4c126 100644 --- a/drivers/media/common/saa7146/saa7146_fops.c +++ b/drivers/media/common/saa7146/saa7146_fops.c @@ -105,7 +105,7 @@ void saa7146_buffer_finish(struct saa7146_dev *dev, } q->curr->vb.state = state; - v4l2_get_timestamp(&q->curr->vb.ts); + q->curr->vb.ts = ktime_get_ns(); wake_up(&q->curr->vb.done); q->curr = NULL; diff --git a/drivers/media/common/saa7146/saa7146_i2c.c b/drivers/media/common/saa7146/saa7146_i2c.c index 3feddc52c446..df9ebe2a168c 100644 --- a/drivers/media/common/saa7146/saa7146_i2c.c +++ b/drivers/media/common/saa7146/saa7146_i2c.c @@ -54,10 +54,7 @@ static int saa7146_i2c_msg_prepare(const struct i2c_msg *m, int num, __le32 *op) /* loop through all messages */ for(i = 0; i < num; i++) { - /* insert the address of the i2c-slave. - note: we get 7 bit i2c-addresses, - so we have to perform a translation */ - addr = (m[i].addr*2) + ( (0 != (m[i].flags & I2C_M_RD)) ? 1 : 0); + addr = i2c_8bit_addr_from_msg(&m[i]); h1 = op_count/3; h2 = op_count%3; op[h1] |= cpu_to_le32( (u8)addr << ((3-h2)*8)); op[h1] |= cpu_to_le32(SAA7146_I2C_START << ((3-h2)*2)); diff --git a/drivers/media/common/saa7146/saa7146_video.c b/drivers/media/common/saa7146/saa7146_video.c index f90aa8109663..a0f0b5eef0bd 100644 --- a/drivers/media/common/saa7146/saa7146_video.c +++ b/drivers/media/common/saa7146/saa7146_video.c @@ -796,7 +796,7 @@ static int vidioc_s_fmt_vid_overlay(struct file *file, void *__fh, struct v4l2_f return -EFAULT; } - /* vv->ov.fh is used to indicate that we have valid overlay informations, too */ + /* vv->ov.fh is used to indicate that we have valid overlay information, too */ vv->ov.fh = fh; /* check if our current overlay is active */ diff --git a/drivers/media/common/siano/sms-cards.c b/drivers/media/common/siano/sms-cards.c index af6b2268db61..e238c9bc17d3 100644 --- a/drivers/media/common/siano/sms-cards.c +++ b/drivers/media/common/siano/sms-cards.c @@ -311,7 +311,7 @@ int sms_board_led_feedback(struct smscore_device_t *coredev, int led) int board_id = smscore_get_board_id(coredev); struct sms_board *board = sms_get_board(board_id); - /* dont touch GPIO if LEDs are already set */ + /* don't touch GPIO if LEDs are already set */ if (smscore_led_state(coredev, -1) == led) return 0; diff --git a/drivers/media/common/siano/smscoreapi.h b/drivers/media/common/siano/smscoreapi.h index eb58853008c9..476fa7a8b152 100644 --- a/drivers/media/common/siano/smscoreapi.h +++ b/drivers/media/common/siano/smscoreapi.h @@ -750,7 +750,7 @@ struct sms_stats { u32 num_of_corrected_mpe_tlbs;/* Number of MPE tables which were corrected by MPE RS decoding */ /* Common params */ - u32 ber_error_count; /* Number of errornous SYNC bits. */ + u32 ber_error_count; /* Number of erroneous SYNC bits. */ u32 ber_bit_count; /* Total number of SYNC bits. */ /* Interface information */ diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c index d9a590ae7545..07e0629af8ed 100644 --- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c +++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c @@ -246,6 +246,10 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc) case V4L2_PIX_FMT_YUV555: case V4L2_PIX_FMT_YUV565: case V4L2_PIX_FMT_YUV32: + case V4L2_PIX_FMT_AYUV32: + case V4L2_PIX_FMT_XYUV32: + case V4L2_PIX_FMT_VUYA32: + case V4L2_PIX_FMT_VUYX32: tpg->color_enc = TGP_COLOR_ENC_YCBCR; break; case V4L2_PIX_FMT_YUV420M: @@ -372,6 +376,10 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc) case V4L2_PIX_FMT_ARGB32: case V4L2_PIX_FMT_ABGR32: case V4L2_PIX_FMT_YUV32: + case V4L2_PIX_FMT_AYUV32: + case V4L2_PIX_FMT_XYUV32: + case V4L2_PIX_FMT_VUYA32: + case V4L2_PIX_FMT_VUYX32: case V4L2_PIX_FMT_HSV32: tpg->twopixelsize[0] = 2 * 4; break; @@ -1267,10 +1275,12 @@ static void gen_twopix(struct tpg_data *tpg, case V4L2_PIX_FMT_RGB32: case V4L2_PIX_FMT_XRGB32: case V4L2_PIX_FMT_HSV32: + case V4L2_PIX_FMT_XYUV32: alpha = 0; /* fall through */ case V4L2_PIX_FMT_YUV32: case V4L2_PIX_FMT_ARGB32: + case V4L2_PIX_FMT_AYUV32: buf[0][offset] = alpha; buf[0][offset + 1] = r_y_h; buf[0][offset + 2] = g_u_s; @@ -1278,9 +1288,11 @@ static void gen_twopix(struct tpg_data *tpg, break; case V4L2_PIX_FMT_BGR32: case V4L2_PIX_FMT_XBGR32: + case V4L2_PIX_FMT_VUYX32: alpha = 0; /* fall through */ case V4L2_PIX_FMT_ABGR32: + case V4L2_PIX_FMT_VUYA32: buf[0][offset] = b_v; buf[0][offset + 1] = g_u_s; buf[0][offset + 2] = r_y_h; diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c index 70e8c3366f9c..15b6b9c0a2e4 100644 --- a/drivers/media/common/videobuf2/videobuf2-core.c +++ b/drivers/media/common/videobuf2/videobuf2-core.c @@ -499,9 +499,9 @@ static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers) pr_info(" buf_init: %u buf_cleanup: %u buf_prepare: %u buf_finish: %u\n", vb->cnt_buf_init, vb->cnt_buf_cleanup, vb->cnt_buf_prepare, vb->cnt_buf_finish); - pr_info(" buf_queue: %u buf_done: %u buf_request_complete: %u\n", - vb->cnt_buf_queue, vb->cnt_buf_done, - vb->cnt_buf_request_complete); + pr_info(" buf_out_validate: %u buf_queue: %u buf_done: %u buf_request_complete: %u\n", + vb->cnt_buf_out_validate, vb->cnt_buf_queue, + vb->cnt_buf_done, vb->cnt_buf_request_complete); pr_info(" alloc: %u put: %u prepare: %u finish: %u mmap: %u\n", vb->cnt_mem_alloc, vb->cnt_mem_put, vb->cnt_mem_prepare, vb->cnt_mem_finish, @@ -934,7 +934,7 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state) /* sync buffers */ for (plane = 0; plane < vb->num_planes; ++plane) call_void_memop(vb, finish, vb->planes[plane].mem_priv); - vb->synced = false; + vb->synced = 0; } spin_lock_irqsave(&q->done_lock, flags); @@ -1041,6 +1041,7 @@ static int __prepare_userptr(struct vb2_buffer *vb) if (vb->planes[plane].mem_priv) { if (!reacquired) { reacquired = true; + vb->copied_timestamp = 0; call_void_vb_qop(vb, buf_cleanup, vb); } call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv); @@ -1165,6 +1166,7 @@ static int __prepare_dmabuf(struct vb2_buffer *vb) if (!reacquired) { reacquired = true; + vb->copied_timestamp = 0; call_void_vb_qop(vb, buf_cleanup, vb); } @@ -1196,6 +1198,9 @@ static int __prepare_dmabuf(struct vb2_buffer *vb) * userspace knows sooner rather than later if the dma-buf map fails. */ for (plane = 0; plane < vb->num_planes; ++plane) { + if (vb->planes[plane].dbuf_mapped) + continue; + ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv); if (ret) { dprintk(1, "failed to map dmabuf for plane %d\n", @@ -1274,6 +1279,14 @@ static int __buf_prepare(struct vb2_buffer *vb) return 0; WARN_ON(vb->synced); + if (q->is_output) { + ret = call_vb_qop(vb, buf_out_validate, vb); + if (ret) { + dprintk(1, "buffer validation failed\n"); + return ret; + } + } + vb->state = VB2_BUF_STATE_PREPARING; switch (q->memory) { @@ -1302,8 +1315,8 @@ static int __buf_prepare(struct vb2_buffer *vb) for (plane = 0; plane < vb->num_planes; ++plane) call_void_memop(vb, prepare, vb->planes[plane].mem_priv); - vb->synced = true; - vb->prepared = true; + vb->synced = 1; + vb->prepared = 1; vb->state = orig_state; return 0; @@ -1520,6 +1533,14 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb, return -EINVAL; } + if (q->is_output && !vb->prepared) { + ret = call_vb_qop(vb, buf_out_validate, vb); + if (ret) { + dprintk(1, "buffer validation failed\n"); + return ret; + } + } + media_request_object_init(&vb->req_obj); /* Make sure the request is in a safe state for updating. */ @@ -1750,7 +1771,6 @@ EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers); static void __vb2_dqbuf(struct vb2_buffer *vb) { struct vb2_queue *q = vb->vb2_queue; - unsigned int i; /* nothing to do if the buffer is already dequeued */ if (vb->state == VB2_BUF_STATE_DEQUEUED) @@ -1758,14 +1778,6 @@ static void __vb2_dqbuf(struct vb2_buffer *vb) vb->state = VB2_BUF_STATE_DEQUEUED; - /* unmap DMABUF buffer */ - if (q->memory == VB2_MEMORY_DMABUF) - for (i = 0; i < vb->num_planes; ++i) { - if (!vb->planes[i].dbuf_mapped) - continue; - call_void_memop(vb, unmap_dmabuf, vb->planes[i].mem_priv); - vb->planes[i].dbuf_mapped = 0; - } call_void_bufop(q, init_buffer, vb); } @@ -1792,7 +1804,7 @@ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb, } call_void_vb_qop(vb, buf_finish, vb); - vb->prepared = false; + vb->prepared = 0; if (pindex) *pindex = vb->index; @@ -1916,12 +1928,12 @@ static void __vb2_queue_cancel(struct vb2_queue *q) for (plane = 0; plane < vb->num_planes; ++plane) call_void_memop(vb, finish, vb->planes[plane].mem_priv); - vb->synced = false; + vb->synced = 0; } if (vb->prepared) { call_void_vb_qop(vb, buf_finish, vb); - vb->prepared = false; + vb->prepared = 0; } __vb2_dqbuf(vb); @@ -1932,6 +1944,7 @@ static void __vb2_queue_cancel(struct vb2_queue *q) if (vb->request) media_request_put(vb->request); vb->request = NULL; + vb->copied_timestamp = 0; } } @@ -2278,6 +2291,8 @@ __poll_t vb2_core_poll(struct vb2_queue *q, struct file *file, if (q->is_output && !(req_events & (EPOLLOUT | EPOLLWRNORM))) return 0; + poll_wait(file, &q->done_wq, wait); + /* * Start file I/O emulator only if streaming API has not been used yet. */ @@ -2329,8 +2344,6 @@ __poll_t vb2_core_poll(struct vb2_queue *q, struct file *file, */ if (q->last_buffer_dequeued) return EPOLLIN | EPOLLRDNORM; - - poll_wait(file, &q->done_wq, wait); } /* diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c index aff0ab7bf83d..82389aead6ed 100644 --- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c +++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c @@ -439,42 +439,14 @@ static void vb2_dc_put_userptr(void *buf_priv) set_page_dirty_lock(pages[i]); sg_free_table(sgt); kfree(sgt); + } else { + dma_unmap_resource(buf->dev, buf->dma_addr, buf->size, + buf->dma_dir, 0); } vb2_destroy_framevec(buf->vec); kfree(buf); } -/* - * For some kind of reserved memory there might be no struct page available, - * so all that can be done to support such 'pages' is to try to convert - * pfn to dma address or at the last resort just assume that - * dma address == physical address (like it has been assumed in earlier version - * of videobuf2-dma-contig - */ - -#ifdef __arch_pfn_to_dma -static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) -{ - return (dma_addr_t)__arch_pfn_to_dma(dev, pfn); -} -#elif defined(__pfn_to_bus) -static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) -{ - return (dma_addr_t)__pfn_to_bus(pfn); -} -#elif defined(__pfn_to_phys) -static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) -{ - return (dma_addr_t)__pfn_to_phys(pfn); -} -#else -static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn) -{ - /* really, we cannot do anything better at this point */ - return (dma_addr_t)(pfn) << PAGE_SHIFT; -} -#endif - static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr, unsigned long size, enum dma_data_direction dma_dir) { @@ -528,7 +500,12 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr, for (i = 1; i < n_pages; i++) if (nums[i-1] + 1 != nums[i]) goto fail_pfnvec; - buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, nums[0]); + buf->dma_addr = dma_map_resource(buf->dev, + __pfn_to_phys(nums[0]), size, buf->dma_dir, 0); + if (dma_mapping_error(buf->dev, buf->dma_addr)) { + ret = -ENOMEM; + goto fail_pfnvec; + } goto out; } diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c index 015e737095cd..270c3162fdcb 100644 --- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c +++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c @@ -3,7 +3,7 @@ * * Copyright (C) 2010 Samsung Electronics * - * Author: Andrzej Pietrasiewicz + * Author: Andrzej Pietrasiewicz * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -67,7 +67,7 @@ static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf, int i; order = get_order(size); - /* Dont over allocate*/ + /* Don't over allocate*/ if ((PAGE_SIZE << order) > size) order--; diff --git a/drivers/media/common/videobuf2/videobuf2-memops.c b/drivers/media/common/videobuf2/videobuf2-memops.c index 89e51989332b..c4a85be48ac2 100644 --- a/drivers/media/common/videobuf2/videobuf2-memops.c +++ b/drivers/media/common/videobuf2/videobuf2-memops.c @@ -121,7 +121,7 @@ static void vb2_common_vm_close(struct vm_area_struct *vma) } /* - * vb2_common_vm_ops - common vm_ops used for tracking refcount of mmaped + * vb2_common_vm_ops - common vm_ops used for tracking refcount of mmapped * video buffers */ const struct vm_operations_struct vb2_common_vm_ops = { diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c index 3a0ca2f9854f..d09dee20e421 100644 --- a/drivers/media/common/videobuf2/videobuf2-v4l2.c +++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c @@ -143,7 +143,7 @@ static void __copy_timestamp(struct vb2_buffer *vb, const void *pb) * and the timecode field and flag if needed. */ if (q->copy_timestamp) - vb->timestamp = timeval_to_ns(&b->timestamp); + vb->timestamp = v4l2_timeval_to_ns(&b->timestamp); vbuf->flags |= b->flags & V4L2_BUF_FLAG_TIMECODE; if (b->flags & V4L2_BUF_FLAG_TIMECODE) vbuf->timecode = b->timecode; @@ -409,6 +409,15 @@ static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *md */ if (WARN_ON(!q->ops->buf_request_complete)) return -EINVAL; + /* + * Make sure this op is implemented by the driver for the output queue. + * It's easy to forget this callback, but is it important to correctly + * validate the 'field' value at QBUF time. + */ + if (WARN_ON((q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT || + q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) && + !q->ops->buf_out_validate)) + return -EINVAL; if (vb->state != VB2_BUF_STATE_DEQUEUED) { dprintk(1, "%s: buffer is not in dequeued state\n", opname); @@ -567,7 +576,7 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb, struct vb2_plane *planes) struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); unsigned int plane; - if (!vb->vb2_queue->is_output || !vb->vb2_queue->copy_timestamp) + if (!vb->vb2_queue->copy_timestamp) vb->timestamp = 0; for (plane = 0; plane < vb->num_planes; ++plane) { @@ -589,6 +598,19 @@ static const struct vb2_buf_ops v4l2_buf_ops = { .copy_timestamp = __copy_timestamp, }; +int vb2_find_timestamp(const struct vb2_queue *q, u64 timestamp, + unsigned int start_idx) +{ + unsigned int i; + + for (i = start_idx; i < q->num_buffers; i++) + if (q->bufs[i]->copied_timestamp && + q->bufs[i]->timestamp == timestamp) + return i; + return -1; +} +EXPORT_SYMBOL_GPL(vb2_find_timestamp); + /* * vb2_querybuf() - query video buffer information * @q: videobuf queue @@ -846,16 +868,14 @@ EXPORT_SYMBOL_GPL(vb2_queue_release); __poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait) { struct video_device *vfd = video_devdata(file); - __poll_t req_events = poll_requested_events(wait); __poll_t res = 0; if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) { struct v4l2_fh *fh = file->private_data; + poll_wait(file, &fh->wait, wait); if (v4l2_event_pending(fh)) res = EPOLLPRI; - else if (req_events & EPOLLPRI) - poll_wait(file, &fh->wait, wait); } return res | vb2_core_poll(q, file, wait); diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c index 1544e8cef564..f14a872d1268 100644 --- a/drivers/media/dvb-core/dmxdev.c +++ b/drivers/media/dvb-core/dmxdev.c @@ -1195,13 +1195,13 @@ static __poll_t dvb_demux_poll(struct file *file, poll_table *wait) struct dmxdev_filter *dmxdevfilter = file->private_data; __poll_t mask = 0; + poll_wait(file, &dmxdevfilter->buffer.queue, wait); + if ((!dmxdevfilter) || dmxdevfilter->dev->exit) return EPOLLERR; if (dvb_vb2_is_streaming(&dmxdevfilter->vb2_ctx)) return dvb_vb2_poll(&dmxdevfilter->vb2_ctx, file, wait); - poll_wait(file, &dmxdevfilter->buffer.queue, wait); - if (dmxdevfilter->state != DMXDEV_STATE_GO && dmxdevfilter->state != DMXDEV_STATE_DONE && dmxdevfilter->state != DMXDEV_STATE_TIMEDOUT) @@ -1346,13 +1346,13 @@ static __poll_t dvb_dvr_poll(struct file *file, poll_table *wait) dprintk("%s\n", __func__); + poll_wait(file, &dmxdev->dvr_buffer.queue, wait); + if (dmxdev->exit) return EPOLLERR; if (dvb_vb2_is_streaming(&dmxdev->dvr_vb2_ctx)) return dvb_vb2_poll(&dmxdev->dvr_vb2_ctx, file, wait); - poll_wait(file, &dmxdev->dvr_buffer.queue, wait); - if (((file->f_flags & O_ACCMODE) == O_RDONLY) || dmxdev->may_do_mmap) { if (dmxdev->dvr_buffer.error) diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c index 4d371cea0d5d..ebf1e3b03819 100644 --- a/drivers/media/dvb-core/dvb_ca_en50221.c +++ b/drivers/media/dvb-core/dvb_ca_en50221.c @@ -1797,6 +1797,8 @@ static __poll_t dvb_ca_en50221_io_poll(struct file *file, poll_table *wait) dprintk("%s\n", __func__); + poll_wait(file, &ca->wait_queue, wait); + if (dvb_ca_en50221_io_read_condition(ca, &result, &slot) == 1) mask |= EPOLLIN; @@ -1804,9 +1806,6 @@ static __poll_t dvb_ca_en50221_io_poll(struct file *file, poll_table *wait) if (mask) return mask; - /* wait for something to happen */ - poll_wait(file, &ca->wait_queue, wait); - if (dvb_ca_en50221_io_read_condition(ca, &result, &slot) == 1) mask |= EPOLLIN; diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c index 27a1d4a98d73..fbdb4ecc7c50 100644 --- a/drivers/media/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb-core/dvb_frontend.c @@ -1596,7 +1596,7 @@ static bool is_dvbv3_delsys(u32 delsys) * * Provides emulation for delivery systems that are compatible with the old * DVBv3 call. Among its usages, it provices support for ISDB-T, and allows - * using a DVB-S2 only frontend just like it were a DVB-S, if the frontent + * using a DVB-S2 only frontend just like it were a DVB-S, if the frontend * parameters are compatible with DVB-S spec. */ static int emulate_delivery_system(struct dvb_frontend *fe, u32 delsys) diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c index b7171bf094fb..4a5834a1c3b7 100644 --- a/drivers/media/dvb-core/dvbdev.c +++ b/drivers/media/dvb-core/dvbdev.c @@ -898,7 +898,7 @@ EXPORT_SYMBOL(dvb_unregister_adapter); /* if the miracle happens and "generic_usercopy()" is included into the kernel, then this can vanish. please don't make the mistake and - define this as video_usercopy(). this will introduce a dependecy + define this as video_usercopy(). this will introduce a dependency to the v4l "videodev.o" module, which is unnecessary for some cards (ie. the budget dvb-cards don't need the v4l module...) */ int dvb_usercopy(struct file *file, diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c index c98093ed3dd7..8acf0b91b437 100644 --- a/drivers/media/dvb-frontends/cxd2841er.c +++ b/drivers/media/dvb-frontends/cxd2841er.c @@ -2947,7 +2947,7 @@ static int cxd2841er_sleep_tc_to_active_t(struct cxd2841er_priv *priv, ((priv->flags & CXD2841ER_ASCOT) ? 0x01 : 0x00), 0x01); /* Set SLV-T Bank : 0x18 */ cxd2841er_write_reg(priv, I2C_SLVT, 0x00, 0x18); - /* Pre-RS BER moniter setting */ + /* Pre-RS BER monitor setting */ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x36, 0x40, 0x07); /* FEC Auto Recovery setting */ cxd2841er_set_reg_bits(priv, I2C_SLVT, 0x30, 0x01, 0x01); diff --git a/drivers/media/dvb-frontends/dib0090.c b/drivers/media/dvb-frontends/dib0090.c index 4813a88eb9f7..18c41cfef8d6 100644 --- a/drivers/media/dvb-frontends/dib0090.c +++ b/drivers/media/dvb-frontends/dib0090.c @@ -2459,7 +2459,7 @@ static int dib0090_tune(struct dvb_frontend *fe) state->current_standard = state->fe->dtv_property_cache.delivery_system; ret = 20; - state->calibrate = CAPTRIM_CAL; /* captrim serach now */ + state->calibrate = CAPTRIM_CAL; /* captrim search now */ } else if (*tune_state == CT_TUNER_STEP_0) { /* Warning : because of captrim cal, if you change this step, change it also in _cal.c file because it is the step following captrim cal state machine */ diff --git a/drivers/media/dvb-frontends/dib7000m.c b/drivers/media/dvb-frontends/dib7000m.c index b79358d09de6..389db9077ad5 100644 --- a/drivers/media/dvb-frontends/dib7000m.c +++ b/drivers/media/dvb-frontends/dib7000m.c @@ -369,7 +369,7 @@ static int dib7000m_sad_calib(struct dib7000m_state *state) { /* internal */ -// dib7000m_write_word(state, 928, (3 << 14) | (1 << 12) | (524 << 0)); // sampling clock of the SAD is writting in set_bandwidth +// dib7000m_write_word(state, 928, (3 << 14) | (1 << 12) | (524 << 0)); // sampling clock of the SAD is writing in set_bandwidth dib7000m_write_word(state, 929, (0 << 1) | (0 << 0)); dib7000m_write_word(state, 930, 776); // 0.625*3.3 / 4096 @@ -928,7 +928,7 @@ static void dib7000m_set_channel(struct dib7000m_state *state, struct dtv_fronte } state->div_sync_wait = (value * 3) / 2 + 32; // add 50% SFN margin + compensate for one DVSY-fifo TODO - /* deactive the possibility of diversity reception if extended interleave - not for 7000MC */ + /* deactivate the possibility of diversity reception if extended interleave - not for 7000MC */ /* P_dvsy_sync_mode = 0, P_dvsy_sync_enable=1, P_dvcb_comb_mode=2 */ if (1 == 1 || state->revision > 0x4000) state->div_force_off = 0; diff --git a/drivers/media/dvb-frontends/dib7000p.c b/drivers/media/dvb-frontends/dib7000p.c index 2818e8def1b3..f8040f6def62 100644 --- a/drivers/media/dvb-frontends/dib7000p.c +++ b/drivers/media/dvb-frontends/dib7000p.c @@ -94,7 +94,7 @@ enum dib7000p_power_mode { DIB7000P_POWER_INTERFACE_ONLY, }; -/* dib7090 specific fonctions */ +/* dib7090 specific functions */ static int dib7090_set_output_mode(struct dvb_frontend *fe, int mode); static int dib7090_set_diversity_in(struct dvb_frontend *fe, int onoff); static void dib7090_setDibTxMux(struct dib7000p_state *state, int mode); @@ -319,7 +319,7 @@ static void dib7000p_set_adc_state(struct dib7000p_state *state, enum dibx000_ad dib7000p_write_word(state, 1925, reg | (1 << 4) | (1 << 2)); /* en_slowAdc = 1 & reset_sladc = 1 */ - reg = dib7000p_read_word(state, 1925); /* read acces to make it works... strange ... */ + reg = dib7000p_read_word(state, 1925); /* read access to make it works... strange ... */ msleep(200); dib7000p_write_word(state, 1925, reg & ~(1 << 4)); /* en_slowAdc = 1 & reset_sladc = 0 */ @@ -1101,7 +1101,7 @@ static void dib7000p_set_channel(struct dib7000p_state *state, else state->div_sync_wait = (value * 3) / 2 + state->cfg.diversity_delay; - /* deactive the possibility of diversity reception if extended interleaver */ + /* deactivate the possibility of diversity reception if extended interleaver */ state->div_force_off = !1 && ch->transmission_mode != TRANSMISSION_MODE_8K; dib7000p_set_diversity_in(&state->demod, state->div_state); @@ -2378,7 +2378,7 @@ static int dib7090_tuner_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[] } } - if (apb_address != 0) /* R/W acces via APB */ + if (apb_address != 0) /* R/W access via APB */ return dib7090p_rw_on_apb(i2c_adap, msg, num, apb_address); else /* R/W access via SERPAR */ return w7090p_tuner_rw_serpar(i2c_adap, msg, num); diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c index 3c3f8cb14845..85c429cce23e 100644 --- a/drivers/media/dvb-frontends/dib8000.c +++ b/drivers/media/dvb-frontends/dib8000.c @@ -564,7 +564,7 @@ static int dib8000_set_adc_state(struct dib8000_state *state, enum dibx000_adc_s dib8000_write_word(state, 1925, reg | (1<<4) | (1<<2)); - /* read acces to make it works... strange ... */ + /* read access to make it works... strange ... */ reg = dib8000_read_word(state, 1925); msleep(20); /* en_slowAdc = 1 & reset_sladc = 0 */ @@ -1091,7 +1091,7 @@ static int dib8000_reset(struct dvb_frontend *fe) if ((state->revision != 0x8090) && (dib8000_set_output_mode(fe, OUTMODE_HIGH_Z) != 0)) - dprintk("OUTPUT_MODE could not be resetted.\n"); + dprintk("OUTPUT_MODE could not be reset.\n"); state->current_agc = NULL; @@ -1867,7 +1867,7 @@ static int dib8096p_tuner_xfer(struct i2c_adapter *i2c_adap, } } - if (apb_address != 0) /* R/W acces via APB */ + if (apb_address != 0) /* R/W access via APB */ return dib8096p_rw_on_apb(i2c_adap, msg, num, apb_address); else /* R/W access via SERPAR */ return dib8096p_tuner_rw_serpar(i2c_adap, msg, num); @@ -3082,7 +3082,7 @@ static int dib8000_tune(struct dvb_frontend *fe) state->autosearch_state = AS_DONE; *tune_state = CT_DEMOD_STOP; /* else we are done here */ break; - case 2: /* Succes */ + case 2: /* Success */ state->status = FE_STATUS_FFT_SUCCESS; /* signal to the upper layer, that there was a channel found and the parameters can be read */ *tune_state = CT_DEMOD_STEP_3; if (state->autosearch_state == AS_SEARCHING_GUARD) @@ -3193,10 +3193,10 @@ static int dib8000_tune(struct dvb_frontend *fe) case CT_DEMOD_STEP_6: /* (36) if there is an input (diversity) */ if ((state->fe[1] != NULL) && (state->output_mode != OUTMODE_DIVERSITY)) { - /* if there is a diversity fe in input and this fe is has not already failled : wait here until this this fe has succedeed or failled */ + /* if there is a diversity fe in input and this fe is has not already failed : wait here until this this fe has succedeed or failed */ if (dib8000_get_status(state->fe[1]) <= FE_STATUS_STD_SUCCESS) /* Something is locked on the input fe */ *tune_state = CT_DEMOD_STEP_8; /* go for mpeg */ - else if (dib8000_get_status(state->fe[1]) >= FE_STATUS_TUNE_TIME_TOO_SHORT) { /* fe in input failled also, break the current one */ + else if (dib8000_get_status(state->fe[1]) >= FE_STATUS_TUNE_TIME_TOO_SHORT) { /* fe in input failed also, break the current one */ *tune_state = CT_DEMOD_STOP; /* else we are done here ; step 8 will close the loops and exit */ dib8000_viterbi_state(state, 1); /* start viterbi chandec */ dib8000_set_isdbt_loop_params(state, LOOP_TUNE_2); diff --git a/drivers/media/dvb-frontends/dib9000.c b/drivers/media/dvb-frontends/dib9000.c index 0183fb1346ef..1875da07c150 100644 --- a/drivers/media/dvb-frontends/dib9000.c +++ b/drivers/media/dvb-frontends/dib9000.c @@ -1020,7 +1020,7 @@ static int dib9000_risc_apb_access_read(struct dib9000_state *state, u32 address if (address >= 1024 || !state->platform.risc.fw_is_running) return -EINVAL; - /* dprintk( "APB access thru rd fw %d %x\n", address, attribute); */ + /* dprintk( "APB access through rd fw %d %x\n", address, attribute); */ mb[0] = (u16) address; mb[1] = len / 2; @@ -1050,7 +1050,7 @@ static int dib9000_risc_apb_access_write(struct dib9000_state *state, u32 addres if (len > 18) return -EINVAL; - /* dprintk( "APB access thru wr fw %d %x\n", address, attribute); */ + /* dprintk( "APB access through wr fw %d %x\n", address, attribute); */ mb[0] = (u16)address; for (i = 0; i + 1 < len; i += 2) diff --git a/drivers/media/dvb-frontends/drx39xyj/drx_dap_fasi.h b/drivers/media/dvb-frontends/drx39xyj/drx_dap_fasi.h index 23ae72468025..739dc5590fa4 100644 --- a/drivers/media/dvb-frontends/drx39xyj/drx_dap_fasi.h +++ b/drivers/media/dvb-frontends/drx39xyj/drx_dap_fasi.h @@ -67,7 +67,7 @@ * (2 bytes). The DAP can operate in 3 modes: * (1) only short * (2) only long -* (3) both long and short but short preferred and long only when necesarry +* (3) both long and short but short preferred and long only when necessary * * These modes must be selected compile time via compile switches. * Compile switch settings for the different modes: @@ -112,14 +112,14 @@ * + single master mode means no use of repeated starts * + multi master mode means use of repeated starts * Default is single master. -* Default can be overriden by setting the compile switch DRXDAP_SINGLE_MASTER. +* Default can be overridden by setting the compile switch DRXDAP_SINGLE_MASTER. * * Slave: * Single/multi master selected via the flags in the FASI protocol. * + single master means remember memory address between i2c packets * + multimaster means flush memory address between i2c packets * Default is single master, DAP FASI changes multi-master setting silently -* into single master setting. This cannot be overrriden. +* into single master setting. This cannot be overridden. * */ /* set default */ @@ -139,7 +139,7 @@ * In single master mode, data can be written by sending the register address * first, then two or four bytes of data in the next packet. * Because the device address plus a register address equals five bytes, -* the mimimum chunk size must be five. +* the minimum chunk size must be five. * If ten-bit I2C device addresses are used, the minimum chunk size must be six, * because the I2C device address will then occupy two bytes when writing. * diff --git a/drivers/media/dvb-frontends/drx39xyj/drx_driver.h b/drivers/media/dvb-frontends/drx39xyj/drx_driver.h index 1ec20eecc433..15f7e58c5a30 100644 --- a/drivers/media/dvb-frontends/drx39xyj/drx_driver.h +++ b/drivers/media/dvb-frontends/drx39xyj/drx_driver.h @@ -94,7 +94,7 @@ int drxbsp_i2c_term(void); * \param r_count The number of bytes to read * \param r_data The array to read the data from * \return int Return status. -* \retval 0 Succes. +* \retval 0 Success. * \retval -EIO Failure. * \retval -EINVAL Parameter 'wcount' is not zero but parameter * 'wdata' contains NULL. @@ -986,7 +986,7 @@ struct drx_filter_info { * \struct struct drx_channel * \brief The set of parameters describing a single channel. * * Used by DRX_CTRL_SET_CHANNEL and DRX_CTRL_GET_CHANNEL. -* Only certain fields need to be used for a specfic standard. +* Only certain fields need to be used for a specific standard. * */ struct drx_channel { @@ -1606,7 +1606,7 @@ struct drx_version_list { DRX_AUD_I2S_MATRIX_B_MONO, /*< B sound only, stereo or mono */ DRX_AUD_I2S_MATRIX_STEREO, - /*< A+B sound, transparant */ + /*< A+B sound, transparent */ DRX_AUD_I2S_MATRIX_MONO /*< A+B mixed to mono sum, (L+R)/2 */}; /* @@ -1870,7 +1870,7 @@ struct drx_reg_dump { /*< current power management mode */ /* Tuner */ - u8 tuner_port_nr; /*< nr of I2C port to wich tuner is */ + u8 tuner_port_nr; /*< nr of I2C port to which tuner is */ s32 tuner_min_freq_rf; /*< minimum RF input frequency, in kHz */ s32 tuner_max_freq_rf; diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c index 551b7d65fa66..a6876fa48753 100644 --- a/drivers/media/dvb-frontends/drx39xyj/drxj.c +++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c @@ -380,10 +380,10 @@ DEFINES */ /*****************************************************************************/ -/* Audio block 0x103 is write only. To avoid shadowing in driver accessing */ -/* RAM adresses directly. This must be READ ONLY to avoid problems. */ -/* Writing to the interface adresses is more than only writing the RAM */ -/* locations */ +/* Audio block 0x103 is write only. To avoid shadowing in driver accessing */ +/* RAM addresses directly. This must be READ ONLY to avoid problems. */ +/* Writing to the interface addresses are more than only writing the RAM */ +/* locations */ /*****************************************************************************/ /* * \brief RAM location of MODUS registers @@ -656,8 +656,8 @@ static struct drxj_data drxj_data_g = { false, /* flag: true=bypass */ ATV_TOP_VID_PEAK__PRE, /* shadow of ATV_TOP_VID_PEAK__A */ ATV_TOP_NOISE_TH__PRE, /* shadow of ATV_TOP_NOISE_TH__A */ - true, /* flag CVBS ouput enable */ - false, /* flag SIF ouput enable */ + true, /* flag CVBS output enable */ + false, /* flag SIF output enable */ DRXJ_SIF_ATTENUATION_0DB, /* current SIF att setting */ { /* qam_rf_agc_cfg */ DRX_STANDARD_ITU_B, /* standard */ @@ -832,7 +832,7 @@ static struct drx_common_attr drxj_default_comm_attr_g = { false, /* If true mirror frequency spectrum */ { /* MPEG output configuration */ - true, /* If true, enable MPEG ouput */ + true, /* If true, enable MPEG output */ false, /* If true, insert RS byte */ false, /* If true, parallel out otherwise serial */ false, /* If true, invert DATA signals */ @@ -848,7 +848,7 @@ static struct drx_common_attr drxj_default_comm_attr_g = { DRX_MPEG_STR_WIDTH_1 /* MPEG Start width in clock cycles */ }, /* Initilisations below can be omitted, they require no user input and - are initialy 0, NULL or false. The compiler will initialize them to these + are initially 0, NULL or false. The compiler will initialize them to these values when omitted. */ false, /* is_opened */ @@ -869,7 +869,7 @@ static struct drx_common_attr drxj_default_comm_attr_g = { DRX_POWER_UP, /* Tuner */ - 1, /* nr of I2C port to wich tuner is */ + 1, /* nr of I2C port to which tuner is */ 0L, /* minimum RF input frequency, in kHz */ 0L, /* maximum RF input frequency, in kHz */ false, /* Rf Agc Polarity */ @@ -1656,7 +1656,7 @@ static int drxdap_fasi_write_block(struct i2c_device_addr *dev_addr, sequense will be visible: (1) write address {i2c addr, 4 bytes chip address} (2) write data {i2c addr, 4 bytes data } (3) write address (4) write data etc... - Address must be rewriten because HI is reset after data transport and + Address must be rewritten because HI is reset after data transport and expects an address. */ todo = (block_size < datasize ? block_size : datasize); @@ -1820,7 +1820,7 @@ static int drxdap_fasi_write_reg32(struct i2c_device_addr *dev_addr, * \param wdata Data to write * \param rdata Buffer for data to read * \return int -* \retval 0 Succes +* \retval 0 Success * \retval -EIO Timeout, I2C error, illegal bank * * 16 bits register read modify write access using short addressing format only. @@ -1897,7 +1897,7 @@ static int drxj_dap_read_modify_write_reg16(struct i2c_device_addr *dev_addr, * \param addr * \param data * \return int -* \retval 0 Succes +* \retval 0 Success * \retval -EIO Timeout, I2C error, illegal bank * * 16 bits register read access via audio token ring interface. @@ -2004,7 +2004,7 @@ static int drxj_dap_read_reg16(struct i2c_device_addr *dev_addr, * \param addr * \param data * \return int -* \retval 0 Succes +* \retval 0 Success * \retval -EIO Timeout, I2C error, illegal bank * * 16 bits register write access via audio token ring interface. @@ -2094,7 +2094,7 @@ static int drxj_dap_write_reg16(struct i2c_device_addr *dev_addr, * \param datasize size of data buffer in bytes * \param data pointer to data buffer * \return int -* \retval 0 Succes +* \retval 0 Success * \retval -EIO Timeout, I2C error, illegal bank * */ @@ -2338,7 +2338,7 @@ hi_command(struct i2c_device_addr *dev_addr, const struct drxj_hi_cmd *cmd, u16 if ((cmd->cmd) == SIO_HI_RA_RAM_CMD_RESET) msleep(1); - /* Detect power down to ommit reading result */ + /* Detect power down to omit reading result */ powerdown_cmd = (bool) ((cmd->cmd == SIO_HI_RA_RAM_CMD_CONFIG) && (((cmd-> param5) & SIO_HI_RA_RAM_PAR_5_CFG_SLEEP__M) @@ -2754,7 +2754,7 @@ ctrl_set_cfg_mpeg_output(struct drx_demod_instance *demod, struct drx_cfg_mpeg_o common_attr = (struct drx_common_attr *) demod->my_common_attr; if (cfg_data->enable_mpeg_output == true) { - /* quick and dirty patch to set MPEG incase current std is not + /* quick and dirty patch to set MPEG in case current std is not producing MPEG */ switch (ext_attr->standard) { case DRX_STANDARD_8VSB: @@ -2894,7 +2894,7 @@ ctrl_set_cfg_mpeg_output(struct drx_demod_instance *demod, struct drx_cfg_mpeg_o break; default: break; - } /* swtich (standard) */ + } /* switch (standard) */ /* Check insertion of the Reed-Solomon parity bytes */ rc = drxj_dap_read_reg16(dev_addr, FEC_OC_MODE__A, &fec_oc_reg_mode, 0); @@ -4127,7 +4127,7 @@ rw_error: * \param datasize size of data buffer in bytes * \param data pointer to data buffer * \return int -* \retval 0 Succes +* \retval 0 Success * \retval -EIO Timeout, I2C error, illegal bank * */ @@ -8989,7 +8989,7 @@ qam64auto(struct drx_demod_instance *demod, ((jiffies_to_msecs(jiffies) - start_time) < (DRXJ_QAM_MAX_WAITTIME + timeout_ofs)) ); - /* Returning control to apllication ... */ + /* Returning control to application ... */ return 0; rw_error: @@ -9309,7 +9309,7 @@ get_qamrs_err_count(struct i2c_device_addr *dev_addr, return -EINVAL; /* all reported errors are received in the */ - /* most recently finished measurment period */ + /* most recently finished measurement period */ /* no of pre RS bit errors */ rc = drxj_dap_read_reg16(dev_addr, FEC_RS_NR_BIT_ERRORS__A, &nr_bit_errors, 0); if (rc != 0) { @@ -9689,7 +9689,7 @@ rw_error: (3) SIF AGC (used to amplify the output signal in case input to low) The SIF AGC is now coupled to the RF/IF AGCs. - The SIF AGC is needed for both SIF ouput and the internal SIF signal to + The SIF AGC is needed for both SIF output and the internal SIF signal to the AUD block. RF and IF AGCs DACs are part of AFE, Video and SIF AGC DACs are part of @@ -9702,11 +9702,11 @@ rw_error: later on because of the schedule) Several HW/SCU "settings" can be used for ATV. The standard selection - will reset most of these settings. To avoid that the end user apllication + will reset most of these settings. To avoid that the end user application has to perform these settings each time the ATV or FM standards is selected the driver will shadow these settings. This enables the end user to perform the settings only once after a drx_open(). The driver must - write the shadow settings to HW/SCU incase: + write the shadow settings to HW/SCU in case: ( setstandard FM/ATV) || ( settings have changed && FM/ATV standard is active) The shadow settings will be stored in the device specific data container. @@ -9908,7 +9908,7 @@ rw_error: #define IMPULSE_COSINE_ALPHA_0_5 { 2, 0, -2, -2, 2, 5, 2, -10, -20, -14, 20, 74, 125, 145} /*sqrt raised-cosine filter with alpha=0.5 */ #define IMPULSE_COSINE_ALPHA_RO_0_5 { 0, 0, 1, 2, 3, 0, -7, -15, -16, 0, 34, 77, 114, 128} /*full raised-cosine filter with alpha=0.5 (receiver only) */ -/* Coefficients for the nyquist fitler (total: 27 taps) */ +/* Coefficients for the nyquist filter (total: 27 taps) */ #define NYQFILTERLEN 27 static int ctrl_set_oob(struct drx_demod_instance *demod, struct drxoob *oob_param) diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.h b/drivers/media/dvb-frontends/drx39xyj/drxj.h index d3ee1c23bb2f..d62412f71c88 100644 --- a/drivers/media/dvb-frontends/drx39xyj/drxj.h +++ b/drivers/media/dvb-frontends/drx39xyj/drxj.h @@ -49,7 +49,7 @@ INCLUDES #if ((DRXDAP_SINGLE_MASTER == 0) && (DRXDAPFASI_LONG_ADDR_ALLOWED == 0)) #error "Multi master mode and short addressing only is an illegal combination" *; /* Generate a fatal compiler error to make sure it stops here, - this is necesarry because not all compilers stop after a #error. */ + this is necessary because not all compilers stop after a #error. */ #endif /*------------------------------------------------------------------------- @@ -203,7 +203,7 @@ struct drxj_agc_status { * /struct drxjrs_errors * Available failure information in DRXJ_FEC_RS. * -* Container for errors that are received in the most recently finished measurment period +* Container for errors that are received in the most recently finished measurement period * */ struct drxjrs_errors { @@ -405,7 +405,7 @@ struct drxj_cfg_atv_output { * */ struct drxj_data { - /* device capabilties (determined during drx_open()) */ + /* device capabilities (determined during drx_open()) */ bool has_lna; /*< true if LNA (aka PGA) present */ bool has_oob; /*< true if OOB supported */ bool has_ntsc; /*< true if NTSC supported */ @@ -455,7 +455,7 @@ struct drxj_cfg_atv_output { /* IQM fs frequecy shift and inversion */ u32 iqm_fs_rate_ofs; /*< frequency shifter setting after setchannel */ - bool pos_image; /*< Ture: positive image */ + bool pos_image; /*< True: positive image */ /* IQM RC frequecy shift */ u32 iqm_rc_rate_ofs; /*< frequency shifter setting after setchannel */ @@ -468,8 +468,8 @@ struct drxj_cfg_atv_output { bool phase_correction_bypass;/*< flag: true=bypass */ s16 atv_top_vid_peak; /*< shadow of ATV_TOP_VID_PEAK__A */ u16 atv_top_noise_th; /*< shadow of ATV_TOP_NOISE_TH__A */ - bool enable_cvbs_output; /*< flag CVBS ouput enable */ - bool enable_sif_output; /*< flag SIF ouput enable */ + bool enable_cvbs_output; /*< flag CVBS output enable */ + bool enable_sif_output; /*< flag SIF output enable */ enum drxjsif_attenuation sif_attenuation; /*< current SIF att setting */ /* Agc configuration for QAM and VSB */ diff --git a/drivers/media/dvb-frontends/drxd_firm.c b/drivers/media/dvb-frontends/drxd_firm.c index 4e1d8905e06a..412871d6636b 100644 --- a/drivers/media/dvb-frontends/drxd_firm.c +++ b/drivers/media/dvb-frontends/drxd_firm.c @@ -890,7 +890,7 @@ u8 DRXD_StartDiversityEnd[] = { /* End demod, combining RF in and diversity in, MPEG TS out */ WR16(B_FE_CF_REG_IMP_VAL__A, 0x0), /* disable impulse noise cruncher */ WR16(B_FE_AD_REG_INVEXT__A, 0x0), /* clock inversion (for sohard board) */ - WR16(B_CP_REG_BR_STR_DEL__A, 10), /* apperently no mb delay matching is best */ + WR16(B_CP_REG_BR_STR_DEL__A, 10), /* apparently no mb delay matching is best */ WR16(B_EQ_REG_RC_SEL_CAR__A, B_EQ_REG_RC_SEL_CAR_DIV_ON | /* org = 0x81 combining enabled */ B_EQ_REG_RC_SEL_CAR_MEAS_A_CC | diff --git a/drivers/media/dvb-frontends/drxd_hard.c b/drivers/media/dvb-frontends/drxd_hard.c index 684d428efb0d..0a5b15bee1d7 100644 --- a/drivers/media/dvb-frontends/drxd_hard.c +++ b/drivers/media/dvb-frontends/drxd_hard.c @@ -1144,6 +1144,8 @@ static int EnableAndResetMB(struct drxd_state *state) static int InitCC(struct drxd_state *state) { + int status = 0; + if (state->osc_clock_freq == 0 || state->osc_clock_freq > 20000 || (state->osc_clock_freq % 4000) != 0) { @@ -1151,14 +1153,17 @@ static int InitCC(struct drxd_state *state) return -1; } - Write16(state, CC_REG_OSC_MODE__A, CC_REG_OSC_MODE_M20, 0); - Write16(state, CC_REG_PLL_MODE__A, CC_REG_PLL_MODE_BYPASS_PLL | - CC_REG_PLL_MODE_PUMP_CUR_12, 0); - Write16(state, CC_REG_REF_DIVIDE__A, state->osc_clock_freq / 4000, 0); - Write16(state, CC_REG_PWD_MODE__A, CC_REG_PWD_MODE_DOWN_PLL, 0); - Write16(state, CC_REG_UPDATE__A, CC_REG_UPDATE_KEY, 0); + status |= Write16(state, CC_REG_OSC_MODE__A, CC_REG_OSC_MODE_M20, 0); + status |= Write16(state, CC_REG_PLL_MODE__A, + CC_REG_PLL_MODE_BYPASS_PLL | + CC_REG_PLL_MODE_PUMP_CUR_12, 0); + status |= Write16(state, CC_REG_REF_DIVIDE__A, + state->osc_clock_freq / 4000, 0); + status |= Write16(state, CC_REG_PWD_MODE__A, CC_REG_PWD_MODE_DOWN_PLL, + 0); + status |= Write16(state, CC_REG_UPDATE__A, CC_REG_UPDATE_KEY, 0); - return 0; + return status; } static int ResetECOD(struct drxd_state *state) @@ -1312,7 +1317,10 @@ static int SC_SendCommand(struct drxd_state *state, u16 cmd) int status = 0, ret; u16 errCode; - Write16(state, SC_RA_RAM_CMD__A, cmd, 0); + status = Write16(state, SC_RA_RAM_CMD__A, cmd, 0); + if (status < 0) + return status; + SC_WaitForReady(state); ret = Read16(state, SC_RA_RAM_CMD_ADDR__A, &errCode, 0); @@ -1339,9 +1347,9 @@ static int SC_ProcStartCommand(struct drxd_state *state, break; } SC_WaitForReady(state); - Write16(state, SC_RA_RAM_CMD_ADDR__A, subCmd, 0); - Write16(state, SC_RA_RAM_PARAM1__A, param1, 0); - Write16(state, SC_RA_RAM_PARAM0__A, param0, 0); + status |= Write16(state, SC_RA_RAM_CMD_ADDR__A, subCmd, 0); + status |= Write16(state, SC_RA_RAM_PARAM1__A, param1, 0); + status |= Write16(state, SC_RA_RAM_PARAM0__A, param0, 0); SC_SendCommand(state, SC_RA_RAM_CMD_PROC_START); } while (0); diff --git a/drivers/media/dvb-frontends/drxk.h b/drivers/media/dvb-frontends/drxk.h index 76466f7ec3a0..ee06e89187e4 100644 --- a/drivers/media/dvb-frontends/drxk.h +++ b/drivers/media/dvb-frontends/drxk.h @@ -24,7 +24,7 @@ * @microcode_name: Name of the firmware file with the microcode * @qam_demod_parameter_count: The number of parameters used for the command * to set the demodulator parameters. All - * firmwares are using the 2-parameter commmand. + * firmwares are using the 2-parameter command. * An exception is the ``drxk_a3.mc`` firmware, * which uses the 4-parameter command. * A value of 0 (default) or lower indicates that diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c index 8ea1e45be710..86652a4ef9ce 100644 --- a/drivers/media/dvb-frontends/drxk_hard.c +++ b/drivers/media/dvb-frontends/drxk_hard.c @@ -723,7 +723,7 @@ static int init_state(struct drxk_state *state) state->m_drxk_state = DRXK_UNINITIALIZED; /* MPEG output configuration */ - state->m_enable_mpeg_output = true; /* If TRUE; enable MPEG ouput */ + state->m_enable_mpeg_output = true; /* If TRUE; enable MPEG output */ state->m_insert_rs_byte = false; /* If TRUE; insert RS byte */ state->m_invert_data = false; /* If TRUE; invert DATA signals */ state->m_invert_err = false; /* If TRUE; invert ERR signal */ @@ -3870,7 +3870,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz, goto error; } #else - /* Set Priorty high */ + /* Set Priority high */ transmission_params |= OFDM_SC_RA_RAM_OP_PARAM_PRIO_HI; status = write16(state, OFDM_EC_SB_PRIOR__A, OFDM_EC_SB_PRIOR_HI); if (status < 0) @@ -3901,7 +3901,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz, } /* - * SAW filter selection: normaly not necesarry, but if wanted + * SAW filter selection: normally not necessary, but if wanted * the application can select a SAW filter via the driver by * using UIOs */ @@ -5423,7 +5423,7 @@ static int qam_demodulator_command(struct drxk_state *state, set_param_parameters[3] |= (QAM_MIRROR_AUTO_ON); /* Env parameters */ - /* check for LOCKRANGE Extented */ + /* check for LOCKRANGE Extended */ /* set_param_parameters[3] |= QAM_LOCKRANGE_NORMAL; */ status = scu_command(state, diff --git a/drivers/media/dvb-frontends/ds3000.c b/drivers/media/dvb-frontends/ds3000.c index 46a55146cb07..2b422d3ac5fa 100644 --- a/drivers/media/dvb-frontends/ds3000.c +++ b/drivers/media/dvb-frontends/ds3000.c @@ -914,7 +914,7 @@ static int ds3000_set_frontend(struct dvb_frontend *fe) /* ds3000 global reset */ ds3000_writereg(state, 0x07, 0x80); ds3000_writereg(state, 0x07, 0x00); - /* ds3000 build-in uC reset */ + /* ds3000 built-in uC reset */ ds3000_writereg(state, 0xb2, 0x01); /* ds3000 software reset */ ds3000_writereg(state, 0x00, 0x01); @@ -1023,7 +1023,7 @@ static int ds3000_set_frontend(struct dvb_frontend *fe) /* ds3000 out of software reset */ ds3000_writereg(state, 0x00, 0x00); - /* start ds3000 build-in uC */ + /* start ds3000 built-in uC */ ds3000_writereg(state, 0xb2, 0x00); if (fe->ops.tuner_ops.get_frequency) { diff --git a/drivers/media/dvb-frontends/isl6421.c b/drivers/media/dvb-frontends/isl6421.c index ae8ec59b665c..7de11d5062c2 100644 --- a/drivers/media/dvb-frontends/isl6421.c +++ b/drivers/media/dvb-frontends/isl6421.c @@ -98,7 +98,7 @@ static int isl6421_set_voltage(struct dvb_frontend *fe, if (ret != 2) return -EIO; - /* Store off status now incase future commands fail */ + /* Store off status now in case future commands fail */ isl6421->is_off = is_off; /* On overflow, the device will try again after 900 ms (typically) */ diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c index cee9c83e48de..99c6289ae585 100644 --- a/drivers/media/dvb-frontends/lgdt3306a.c +++ b/drivers/media/dvb-frontends/lgdt3306a.c @@ -1685,7 +1685,10 @@ static int lgdt3306a_read_signal_strength(struct dvb_frontend *fe, case QAM_256: case QAM_AUTO: /* need to know actual modulation to set proper SNR baseline */ - lgdt3306a_read_reg(state, 0x00a6, &val); + ret = lgdt3306a_read_reg(state, 0x00a6, &val); + if (lg_chkerr(ret)) + goto fail; + if(val & 0x04) ref_snr = 2800; /* QAM-256 28dB */ else diff --git a/drivers/media/dvb-frontends/lgdt330x.c b/drivers/media/dvb-frontends/lgdt330x.c index 96807e134886..8abb1a510a81 100644 --- a/drivers/media/dvb-frontends/lgdt330x.c +++ b/drivers/media/dvb-frontends/lgdt330x.c @@ -783,7 +783,7 @@ static int lgdt3303_read_status(struct dvb_frontend *fe, if ((buf[0] & 0x02) == 0x00) *status |= FE_HAS_SYNC; - if ((buf[0] & 0xfd) == 0x01) + if ((buf[0] & 0x01) == 0x01) *status |= FE_HAS_VITERBI | FE_HAS_LOCK; break; default: diff --git a/drivers/media/dvb-frontends/m88rs2000.c b/drivers/media/dvb-frontends/m88rs2000.c index d5bc85501f9e..13888732951c 100644 --- a/drivers/media/dvb-frontends/m88rs2000.c +++ b/drivers/media/dvb-frontends/m88rs2000.c @@ -701,7 +701,7 @@ static int m88rs2000_set_frontend(struct dvb_frontend *fe) if (status & FE_HAS_LOCK) { state->fec_inner = m88rs2000_get_fec(state); - /* Uknown suspect SNR level */ + /* Unknown suspect SNR level */ reg = m88rs2000_readreg(state, 0x65); } diff --git a/drivers/media/dvb-frontends/mt312.c b/drivers/media/dvb-frontends/mt312.c index 03e74a729168..bfbb879469f2 100644 --- a/drivers/media/dvb-frontends/mt312.c +++ b/drivers/media/dvb-frontends/mt312.c @@ -645,7 +645,9 @@ static int mt312_set_frontend(struct dvb_frontend *fe) if (ret < 0) return ret; - mt312_reset(state, 0); + ret = mt312_reset(state, 0); + if (ret < 0) + return ret; return 0; } diff --git a/drivers/media/dvb-frontends/nxt200x.c b/drivers/media/dvb-frontends/nxt200x.c index 0961e686ff68..0ef72d6c6f8b 100644 --- a/drivers/media/dvb-frontends/nxt200x.c +++ b/drivers/media/dvb-frontends/nxt200x.c @@ -153,7 +153,7 @@ static int nxt200x_writereg_multibyte (struct nxt200x_state* state, u8 reg, u8* u8 attr, len2, buf; dprintk("%s\n", __func__); - /* set mutli register register */ + /* set multi register register */ nxt200x_writebytes(state, 0x35, ®, 1); /* send the actual data */ @@ -214,7 +214,7 @@ static int nxt200x_readreg_multibyte (struct nxt200x_state* state, u8 reg, u8* d u8 buf, len2, attr; dprintk("%s\n", __func__); - /* set mutli register register */ + /* set multi register register */ nxt200x_writebytes(state, 0x35, ®, 1); switch (state->demod_chip) { diff --git a/drivers/media/dvb-frontends/or51211.c b/drivers/media/dvb-frontends/or51211.c index a39bbd8ff1f0..7343da11a1d8 100644 --- a/drivers/media/dvb-frontends/or51211.c +++ b/drivers/media/dvb-frontends/or51211.c @@ -59,7 +59,7 @@ struct or51211_state { /* Demodulator private data */ u8 initialized:1; - u32 snr; /* Result of last SNR claculation */ + u32 snr; /* Result of last SNR calculation */ /* Tuner private data */ u32 current_frequency; diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.c b/drivers/media/dvb-frontends/rtl2832_sdr.c index d6673f4fb47b..57fb05bb7e96 100644 --- a/drivers/media/dvb-frontends/rtl2832_sdr.c +++ b/drivers/media/dvb-frontends/rtl2832_sdr.c @@ -471,7 +471,7 @@ static int rtl2832_sdr_buf_prepare(struct vb2_buffer *vb) { struct rtl2832_sdr_dev *dev = vb2_get_drv_priv(vb->vb2_queue); - /* Don't allow queing new buffers after device disconnection */ + /* Don't allow queueing new buffers after device disconnection */ if (!dev->udev) return -ENODEV; diff --git a/drivers/media/dvb-frontends/s5h1409.c b/drivers/media/dvb-frontends/s5h1409.c index ceeb0c3551ce..a2907d035fe2 100644 --- a/drivers/media/dvb-frontends/s5h1409.c +++ b/drivers/media/dvb-frontends/s5h1409.c @@ -490,7 +490,7 @@ static void s5h1409_set_qam_amhum_mode(struct dvb_frontend *fe) if (state->qam_state == QAM_STATE_QAM_OPTIMIZED_L3) { /* We've already reached the maximum optimization level, so - dont bother banging on the status registers */ + don't bother banging on the status registers */ return; } diff --git a/drivers/media/dvb-frontends/sp8870.c b/drivers/media/dvb-frontends/sp8870.c index 8d31cf3f4f07..270a3c559e08 100644 --- a/drivers/media/dvb-frontends/sp8870.c +++ b/drivers/media/dvb-frontends/sp8870.c @@ -293,7 +293,9 @@ static int sp8870_set_frontend_parameters(struct dvb_frontend *fe) sp8870_writereg(state, 0xc05, reg0xc05); // read status reg in order to clear pending irqs - sp8870_readreg(state, 0x200); + err = sp8870_readreg(state, 0x200); + if (err) + return err; // system controller start sp8870_microcontroller_start(state); diff --git a/drivers/media/dvb-frontends/stb0899_algo.c b/drivers/media/dvb-frontends/stb0899_algo.c index bd2defde7a77..b5debb61bca5 100644 --- a/drivers/media/dvb-frontends/stb0899_algo.c +++ b/drivers/media/dvb-frontends/stb0899_algo.c @@ -835,8 +835,8 @@ static u32 stb0899_dvbs2_calc_dev(struct stb0899_state *state) dec_ratio = (internal->master_clk * 2) / (5 * internal->srate); dec_ratio = (dec_ratio == 0) ? 1 : dec_ratio; - master_clk = internal->master_clk / 1000; /* for integer Caculation*/ - srate = internal->srate / 1000; /* for integer Caculation*/ + master_clk = internal->master_clk / 1000; /* for integer Calculation*/ + srate = internal->srate / 1000; /* for integer Calculation*/ correction = (512 * master_clk) / (2 * dec_ratio * srate); return correction; @@ -864,7 +864,7 @@ static void stb0899_dvbs2_set_srate(struct stb0899_state *state) win_sel = dec_rate - 4; decim = (1 << dec_rate); - /* (FSamp/Fsymbol *100) for integer Caculation */ + /* (FSamp/Fsymbol *100) for integer Calculation */ f_sym = internal->master_clk / ((decim * internal->srate) / 1000); if (f_sym <= 2250) /* don't band limit signal going into btr block*/ diff --git a/drivers/media/dvb-frontends/stv0367_defs.h b/drivers/media/dvb-frontends/stv0367_defs.h index 277d2971ed3f..4afe8248a667 100644 --- a/drivers/media/dvb-frontends/stv0367_defs.h +++ b/drivers/media/dvb-frontends/stv0367_defs.h @@ -1096,7 +1096,7 @@ static const struct st_register def0367dd_ofdm[] = { }; static const struct st_register def0367dd_qam[] = { - {R367CAB_CTRL_1, 0x06}, /* Orginal 0x04 */ + {R367CAB_CTRL_1, 0x06}, /* Original 0x04 */ {R367CAB_CTRL_2, 0x03}, {R367CAB_IT_STATUS1, 0x2b}, {R367CAB_IT_STATUS2, 0x08}, diff --git a/drivers/media/dvb-frontends/stv0900_core.c b/drivers/media/dvb-frontends/stv0900_core.c index 254618a06140..fa1a0fb577ad 100644 --- a/drivers/media/dvb-frontends/stv0900_core.c +++ b/drivers/media/dvb-frontends/stv0900_core.c @@ -744,12 +744,12 @@ static int stv0900_read_ucblocks(struct dvb_frontend *fe, u32 * ucblocks) if (stv0900_get_standard(fe, demod) == STV0900_DVBS2_STANDARD) { /* DVB-S2 delineator errors count */ - /* retreiving number for errnous headers */ + /* retrieving number for errnous headers */ err_val1 = stv0900_read_reg(intp, BBFCRCKO1); err_val0 = stv0900_read_reg(intp, BBFCRCKO0); header_err_val = (err_val1 << 8) | err_val0; - /* retreiving number for errnous packets */ + /* retrieving number for errnous packets */ err_val1 = stv0900_read_reg(intp, UPCRCKO1); err_val0 = stv0900_read_reg(intp, UPCRCKO0); *ucblocks = (err_val1 << 8) | err_val0; diff --git a/drivers/media/dvb-frontends/stv0910.c b/drivers/media/dvb-frontends/stv0910.c index fc2440d8af36..68d7c7b41071 100644 --- a/drivers/media/dvb-frontends/stv0910.c +++ b/drivers/media/dvb-frontends/stv0910.c @@ -1238,7 +1238,7 @@ static int gate_ctrl(struct dvb_frontend *fe, int enable) * mutex_lock note: Concurrent I2C gate bus accesses must be * prevented (STV0910 = dual demod on a single IC with a single I2C * gate/bus, and two tuners attached), similar to most (if not all) - * other I2C host interfaces/busses. + * other I2C host interfaces/buses. * * enable=1 (open I2C gate) will grab the lock * enable=0 (close I2C gate) releases the lock @@ -1500,7 +1500,7 @@ static int read_status(struct dvb_frontend *fe, enum fe_status *status) RSTV0910_P2_FBERCPT4 + state->regoff, 0x00); /* * Reset the packet Error counter2 (and Set it to - * infinit error count mode) + * infinite error count mode) */ write_reg(state, RSTV0910_P2_ERRCTRL2 + state->regoff, 0xc1); diff --git a/drivers/media/dvb-frontends/stv6110.c b/drivers/media/dvb-frontends/stv6110.c index 7db9a5bceccc..e54708eb4fb0 100644 --- a/drivers/media/dvb-frontends/stv6110.c +++ b/drivers/media/dvb-frontends/stv6110.c @@ -202,7 +202,7 @@ static int stv6110_set_bandwidth(struct dvb_frontend *fe, u32 bandwidth) i++; } - /* RCCLKOFF = 1 calibration done, desactivate the calibration Clock */ + /* RCCLKOFF = 1 calibration done, deactivate the calibration Clock */ priv->regs[RSTV6110_CTRL3] |= (1 << 6); stv6110_write_regs(fe, &priv->regs[RSTV6110_CTRL3], RSTV6110_CTRL3, 1); return 0; diff --git a/drivers/media/dvb-frontends/tda1004x.h b/drivers/media/dvb-frontends/tda1004x.h index efd7659dace9..26f504a830e3 100644 --- a/drivers/media/dvb-frontends/tda1004x.h +++ b/drivers/media/dvb-frontends/tda1004x.h @@ -33,7 +33,7 @@ enum tda10046_xtal { enum tda10046_agc { TDA10046_AGC_DEFAULT, /* original configuration */ - TDA10046_AGC_IFO_AUTO_NEG, /* IF AGC only, automatic, negtive */ + TDA10046_AGC_IFO_AUTO_NEG, /* IF AGC only, automatic, negative */ TDA10046_AGC_IFO_AUTO_POS, /* IF AGC only, automatic, positive */ TDA10046_AGC_TDA827X, /* IF AGC only, special setup for tda827x */ }; diff --git a/drivers/media/dvb-frontends/tda10086.c b/drivers/media/dvb-frontends/tda10086.c index 8323e4e53d66..85dddfce8ef4 100644 --- a/drivers/media/dvb-frontends/tda10086.c +++ b/drivers/media/dvb-frontends/tda10086.c @@ -437,7 +437,7 @@ static int tda10086_set_frontend(struct dvb_frontend *fe) fe->ops.i2c_gate_ctrl(fe, 0); } - /* calcluate the frequency offset (in *Hz* not kHz) */ + /* calculate the frequency offset (in *Hz* not kHz) */ freqoff = fe_params->frequency - freq; freqoff = ((1<<16) * freqoff) / (SACLK/1000); tda10086_write_byte(state, 0x3d, 0x80 | ((freqoff >> 8) & 0x7f)); diff --git a/drivers/media/dvb-frontends/tda18271c2dd.c b/drivers/media/dvb-frontends/tda18271c2dd.c index eeb2318c102f..e064e2b22d9d 100644 --- a/drivers/media/dvb-frontends/tda18271c2dd.c +++ b/drivers/media/dvb-frontends/tda18271c2dd.c @@ -105,7 +105,7 @@ struct tda_state { s32 m_RF_B2[7]; u32 m_RF3[7]; - u8 m_TMValue_RFCal; /* Calibration temperatur */ + u8 m_TMValue_RFCal; /* Calibration temperature */ bool m_bFMInput; /* true to use Pin 8 for FM Radio */ @@ -400,7 +400,7 @@ static int CalibrateRF(struct tda_state *state, break; /* Switching off LT (as datasheet says) causes calibration on C1 to fail */ - /* (Readout of Cprog is allways 255) */ + /* (Readout of Cprog is always 255) */ if (state->m_Regs[ID] != 0x83) /* C1: ID == 83, C2: ID == 84 */ state->m_Regs[EP3] |= 0x40; /* SM_LT = 1 */ @@ -644,7 +644,7 @@ static int PowerScan(struct tda_state *state, if (status < 0) break; CID_Gain = Regs[EB10] & 0x3F; - state->m_Regs[ID] = Regs[ID]; /* Chip version, (needed for C1 workarround in CalibrateRF) */ + state->m_Regs[ID] = Regs[ID]; /* Chip version, (needed for C1 workaround in CalibrateRF) */ *pRF_Out = RF_in; diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig index 4c936e129500..6d32f8dcf83b 100644 --- a/drivers/media/i2c/Kconfig +++ b/drivers/media/i2c/Kconfig @@ -820,6 +820,25 @@ config VIDEO_OV7740 This is a Video4Linux2 sensor driver for the OmniVision OV7740 VGA camera sensor. +config VIDEO_OV8856 + tristate "OmniVision OV8856 sensor support" + depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API + depends on MEDIA_CAMERA_SUPPORT + select V4L2_FWNODE + help + This is a Video4Linux2 sensor driver for the OmniVision + OV8856 camera sensor. + + To compile this driver as a module, choose M here: the + module will be called ov8856. + +config VIDEO_OV9640 + tristate "OmniVision OV9640 sensor support" + depends on I2C && VIDEO_V4L2 + help + This is a Video4Linux2 sensor driver for the OmniVision + OV9640 camera sensor. + config VIDEO_OV9650 tristate "OmniVision OV9650/OV9652 sensor support" depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API @@ -848,6 +867,14 @@ config VIDEO_VS6624 To compile this driver as a module, choose M here: the module will be called vs6624. +config VIDEO_MT9M001 + tristate "mt9m001 support" + depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API + depends on MEDIA_CAMERA_SUPPORT + help + This driver supports MT9M001 cameras from Micron, monochrome + and colour models. + config VIDEO_MT9M032 tristate "MT9M032 camera sensor support" depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API @@ -1100,18 +1127,11 @@ config VIDEO_I2C Enable the I2C transport video support which supports the following: * Panasonic AMG88xx Grid-Eye Sensors + * Melexis MLX90640 Thermal Cameras To compile this driver as a module, choose M here: the module will be called video-i2c endmenu -menu "Sensors used on soc_camera driver" - -if SOC_CAMERA - source "drivers/media/i2c/soc_camera/Kconfig" -endif - -endmenu - endif diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile index 65fae7732de0..a64fca82e0c4 100644 --- a/drivers/media/i2c/Makefile +++ b/drivers/media/i2c/Makefile @@ -6,7 +6,6 @@ obj-$(CONFIG_VIDEO_SMIAPP) += smiapp/ obj-$(CONFIG_VIDEO_ET8EK8) += et8ek8/ obj-$(CONFIG_VIDEO_CX25840) += cx25840/ obj-$(CONFIG_VIDEO_M5MOLS) += m5mols/ -obj-y += soc_camera/ obj-$(CONFIG_VIDEO_APTINA_PLL) += aptina-pll.o obj-$(CONFIG_VIDEO_TVAUDIO) += tvaudio.o @@ -78,8 +77,11 @@ obj-$(CONFIG_VIDEO_OV7640) += ov7640.o obj-$(CONFIG_VIDEO_OV7670) += ov7670.o obj-$(CONFIG_VIDEO_OV772X) += ov772x.o obj-$(CONFIG_VIDEO_OV7740) += ov7740.o +obj-$(CONFIG_VIDEO_OV8856) += ov8856.o +obj-$(CONFIG_VIDEO_OV9640) += ov9640.o obj-$(CONFIG_VIDEO_OV9650) += ov9650.o obj-$(CONFIG_VIDEO_OV13858) += ov13858.o +obj-$(CONFIG_VIDEO_MT9M001) += mt9m001.o obj-$(CONFIG_VIDEO_MT9M032) += mt9m032.o obj-$(CONFIG_VIDEO_MT9M111) += mt9m111.o obj-$(CONFIG_VIDEO_MT9P031) += mt9p031.o diff --git a/drivers/media/i2c/adv7175.c b/drivers/media/i2c/adv7175.c index e31e8d909bb9..419b98117133 100644 --- a/drivers/media/i2c/adv7175.c +++ b/drivers/media/i2c/adv7175.c @@ -219,7 +219,7 @@ static int adv7175_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std) * SECAM->PAL (typically it does not work * due to genlock: when decoder is in SECAM * and encoder in in PAL the subcarrier can - * not be syncronized with horizontal + * not be synchronized with horizontal * quency) */ adv7175_write_block(sd, init_pal, sizeof(init_pal)); if (encoder->input == 0) diff --git a/drivers/media/i2c/adv748x/adv748x-afe.c b/drivers/media/i2c/adv748x/adv748x-afe.c index 71714634efb0..dbbb1e4d6363 100644 --- a/drivers/media/i2c/adv748x/adv748x-afe.c +++ b/drivers/media/i2c/adv748x/adv748x-afe.c @@ -282,7 +282,7 @@ static int adv748x_afe_s_stream(struct v4l2_subdev *sd, int enable) goto unlock; } - ret = adv748x_tx_power(&state->txb, enable); + ret = adv748x_tx_power(afe->tx, enable); if (ret) goto unlock; diff --git a/drivers/media/i2c/adv748x/adv748x-core.c b/drivers/media/i2c/adv748x/adv748x-core.c index 6854d898fdd1..f57cd77a32fa 100644 --- a/drivers/media/i2c/adv748x/adv748x-core.c +++ b/drivers/media/i2c/adv748x/adv748x-core.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include "adv748x.h" @@ -124,6 +125,16 @@ int adv748x_write(struct adv748x_state *state, u8 page, u8 reg, u8 value) return regmap_write(state->regmap[page], reg, value); } +static int adv748x_write_check(struct adv748x_state *state, u8 page, u8 reg, + u8 value, int *error) +{ + if (*error) + return *error; + + *error = adv748x_write(state, page, reg, value); + return *error; +} + /* adv748x_write_block(): Write raw data with a maximum of I2C_SMBUS_BLOCK_MAX * size to one or more registers. * @@ -207,20 +218,13 @@ static int adv748x_write_regs(struct adv748x_state *state, { int ret; - while (regs->page != ADV748X_PAGE_EOR) { - if (regs->page == ADV748X_PAGE_WAIT) { - msleep(regs->value); - } else { - ret = adv748x_write(state, regs->page, regs->reg, - regs->value); - if (ret < 0) { - adv_err(state, - "Error regs page: 0x%02x reg: 0x%02x\n", - regs->page, regs->reg); - return ret; - } + for (; regs->page != ADV748X_PAGE_EOR; regs++) { + ret = adv748x_write(state, regs->page, regs->reg, regs->value); + if (ret < 0) { + adv_err(state, "Error regs page: 0x%02x reg: 0x%02x\n", + regs->page, regs->reg); + return ret; } - regs++; } return 0; @@ -230,68 +234,77 @@ static int adv748x_write_regs(struct adv748x_state *state, * TXA and TXB */ -static const struct adv748x_reg_value adv748x_power_up_txa_4lane[] = { +static int adv748x_power_up_tx(struct adv748x_csi2 *tx) +{ + struct adv748x_state *state = tx->state; + u8 page = is_txa(tx) ? ADV748X_PAGE_TXA : ADV748X_PAGE_TXB; + int ret = 0; - {ADV748X_PAGE_TXA, 0x00, 0x84}, /* Enable 4-lane MIPI */ - {ADV748X_PAGE_TXA, 0x00, 0xa4}, /* Set Auto DPHY Timing */ + /* Enable n-lane MIPI */ + adv748x_write_check(state, page, 0x00, 0x80 | tx->num_lanes, &ret); - {ADV748X_PAGE_TXA, 0x31, 0x82}, /* ADI Required Write */ - {ADV748X_PAGE_TXA, 0x1e, 0x40}, /* ADI Required Write */ - {ADV748X_PAGE_TXA, 0xda, 0x01}, /* i2c_mipi_pll_en - 1'b1 */ - {ADV748X_PAGE_WAIT, 0x00, 0x02},/* delay 2 */ - {ADV748X_PAGE_TXA, 0x00, 0x24 },/* Power-up CSI-TX */ - {ADV748X_PAGE_WAIT, 0x00, 0x01},/* delay 1 */ - {ADV748X_PAGE_TXA, 0xc1, 0x2b}, /* ADI Required Write */ - {ADV748X_PAGE_WAIT, 0x00, 0x01},/* delay 1 */ - {ADV748X_PAGE_TXA, 0x31, 0x80}, /* ADI Required Write */ + /* Set Auto DPHY Timing */ + adv748x_write_check(state, page, 0x00, 0xa0 | tx->num_lanes, &ret); - {ADV748X_PAGE_EOR, 0xff, 0xff} /* End of register table */ -}; + /* ADI Required Write */ + if (tx->src == &state->hdmi.sd) { + adv748x_write_check(state, page, 0xdb, 0x10, &ret); + adv748x_write_check(state, page, 0xd6, 0x07, &ret); + } else { + adv748x_write_check(state, page, 0xd2, 0x40, &ret); + } -static const struct adv748x_reg_value adv748x_power_down_txa_4lane[] = { + adv748x_write_check(state, page, 0xc4, 0x0a, &ret); + adv748x_write_check(state, page, 0x71, 0x33, &ret); + adv748x_write_check(state, page, 0x72, 0x11, &ret); - {ADV748X_PAGE_TXA, 0x31, 0x82}, /* ADI Required Write */ - {ADV748X_PAGE_TXA, 0x1e, 0x00}, /* ADI Required Write */ - {ADV748X_PAGE_TXA, 0x00, 0x84}, /* Enable 4-lane MIPI */ - {ADV748X_PAGE_TXA, 0xda, 0x01}, /* i2c_mipi_pll_en - 1'b1 */ - {ADV748X_PAGE_TXA, 0xc1, 0x3b}, /* ADI Required Write */ + /* i2c_dphy_pwdn - 1'b0 */ + adv748x_write_check(state, page, 0xf0, 0x00, &ret); - {ADV748X_PAGE_EOR, 0xff, 0xff} /* End of register table */ -}; + /* ADI Required Writes*/ + adv748x_write_check(state, page, 0x31, 0x82, &ret); + adv748x_write_check(state, page, 0x1e, 0x40, &ret); -static const struct adv748x_reg_value adv748x_power_up_txb_1lane[] = { + /* i2c_mipi_pll_en - 1'b1 */ + adv748x_write_check(state, page, 0xda, 0x01, &ret); + usleep_range(2000, 2500); - {ADV748X_PAGE_TXB, 0x00, 0x81}, /* Enable 1-lane MIPI */ - {ADV748X_PAGE_TXB, 0x00, 0xa1}, /* Set Auto DPHY Timing */ + /* Power-up CSI-TX */ + adv748x_write_check(state, page, 0x00, 0x20 | tx->num_lanes, &ret); + usleep_range(1000, 1500); - {ADV748X_PAGE_TXB, 0x31, 0x82}, /* ADI Required Write */ - {ADV748X_PAGE_TXB, 0x1e, 0x40}, /* ADI Required Write */ - {ADV748X_PAGE_TXB, 0xda, 0x01}, /* i2c_mipi_pll_en - 1'b1 */ - {ADV748X_PAGE_WAIT, 0x00, 0x02},/* delay 2 */ - {ADV748X_PAGE_TXB, 0x00, 0x21 },/* Power-up CSI-TX */ - {ADV748X_PAGE_WAIT, 0x00, 0x01},/* delay 1 */ - {ADV748X_PAGE_TXB, 0xc1, 0x2b}, /* ADI Required Write */ - {ADV748X_PAGE_WAIT, 0x00, 0x01},/* delay 1 */ - {ADV748X_PAGE_TXB, 0x31, 0x80}, /* ADI Required Write */ + /* ADI Required Writes */ + adv748x_write_check(state, page, 0xc1, 0x2b, &ret); + usleep_range(1000, 1500); + adv748x_write_check(state, page, 0x31, 0x80, &ret); - {ADV748X_PAGE_EOR, 0xff, 0xff} /* End of register table */ -}; + return ret; +} -static const struct adv748x_reg_value adv748x_power_down_txb_1lane[] = { +static int adv748x_power_down_tx(struct adv748x_csi2 *tx) +{ + struct adv748x_state *state = tx->state; + u8 page = is_txa(tx) ? ADV748X_PAGE_TXA : ADV748X_PAGE_TXB; + int ret = 0; - {ADV748X_PAGE_TXB, 0x31, 0x82}, /* ADI Required Write */ - {ADV748X_PAGE_TXB, 0x1e, 0x00}, /* ADI Required Write */ - {ADV748X_PAGE_TXB, 0x00, 0x81}, /* Enable 1-lane MIPI */ - {ADV748X_PAGE_TXB, 0xda, 0x01}, /* i2c_mipi_pll_en - 1'b1 */ - {ADV748X_PAGE_TXB, 0xc1, 0x3b}, /* ADI Required Write */ + /* ADI Required Writes */ + adv748x_write_check(state, page, 0x31, 0x82, &ret); + adv748x_write_check(state, page, 0x1e, 0x00, &ret); - {ADV748X_PAGE_EOR, 0xff, 0xff} /* End of register table */ -}; + /* Enable n-lane MIPI */ + adv748x_write_check(state, page, 0x00, 0x80 | tx->num_lanes, &ret); + + /* i2c_mipi_pll_en - 1'b1 */ + adv748x_write_check(state, page, 0xda, 0x01, &ret); + + /* ADI Required Write */ + adv748x_write_check(state, page, 0xc1, 0x3b, &ret); + + return ret; +} int adv748x_tx_power(struct adv748x_csi2 *tx, bool on) { - struct adv748x_state *state = tx->state; - const struct adv748x_reg_value *reglist; int val; if (!is_tx_enabled(tx)) @@ -309,19 +322,57 @@ int adv748x_tx_power(struct adv748x_csi2 *tx, bool on) WARN_ONCE((on && val & ADV748X_CSI_FS_AS_LS_UNKNOWN), "Enabling with unknown bit set"); - if (on) - reglist = is_txa(tx) ? adv748x_power_up_txa_4lane : - adv748x_power_up_txb_1lane; - else - reglist = is_txa(tx) ? adv748x_power_down_txa_4lane : - adv748x_power_down_txb_1lane; - - return adv748x_write_regs(state, reglist); + return on ? adv748x_power_up_tx(tx) : adv748x_power_down_tx(tx); } /* ----------------------------------------------------------------------------- * Media Operations */ +static int adv748x_link_setup(struct media_entity *entity, + const struct media_pad *local, + const struct media_pad *remote, u32 flags) +{ + struct v4l2_subdev *rsd = media_entity_to_v4l2_subdev(remote->entity); + struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity); + struct adv748x_state *state = v4l2_get_subdevdata(sd); + struct adv748x_csi2 *tx = adv748x_sd_to_csi2(sd); + bool enable = flags & MEDIA_LNK_FL_ENABLED; + u8 io10_mask = ADV748X_IO_10_CSI1_EN | + ADV748X_IO_10_CSI4_EN | + ADV748X_IO_10_CSI4_IN_SEL_AFE; + u8 io10 = 0; + + /* Refuse to enable multiple links to the same TX at the same time. */ + if (enable && tx->src) + return -EINVAL; + + /* Set or clear the source (HDMI or AFE) and the current TX. */ + if (rsd == &state->afe.sd) + state->afe.tx = enable ? tx : NULL; + else + state->hdmi.tx = enable ? tx : NULL; + + tx->src = enable ? rsd : NULL; + + if (state->afe.tx) { + /* AFE Requires TXA enabled, even when output to TXB */ + io10 |= ADV748X_IO_10_CSI4_EN; + if (is_txa(tx)) + io10 |= ADV748X_IO_10_CSI4_IN_SEL_AFE; + else + io10 |= ADV748X_IO_10_CSI1_EN; + } + + if (state->hdmi.tx) + io10 |= ADV748X_IO_10_CSI4_EN; + + return io_clrset(state, ADV748X_IO_10, io10_mask, io10); +} + +static const struct media_entity_operations adv748x_tx_media_ops = { + .link_setup = adv748x_link_setup, + .link_validate = v4l2_subdev_link_validate, +}; static const struct media_entity_operations adv748x_media_ops = { .link_validate = v4l2_subdev_link_validate, @@ -331,18 +382,8 @@ static const struct media_entity_operations adv748x_media_ops = { * HW setup */ -static const struct adv748x_reg_value adv748x_sw_reset[] = { - - {ADV748X_PAGE_IO, 0xff, 0xff}, /* SW reset */ - {ADV748X_PAGE_WAIT, 0x00, 0x05},/* delay 5 */ - {ADV748X_PAGE_IO, 0x01, 0x76}, /* ADI Required Write */ - {ADV748X_PAGE_IO, 0xf2, 0x01}, /* Enable I2C Read Auto-Increment */ - {ADV748X_PAGE_EOR, 0xff, 0xff} /* End of register table */ -}; - -/* Supported Formats For Script Below */ -/* - 01-29 HDMI to MIPI TxA CSI 4-Lane - RGB888: */ -static const struct adv748x_reg_value adv748x_init_txa_4lane[] = { +/* Initialize CP Core with RGB888 format. */ +static const struct adv748x_reg_value adv748x_init_hdmi[] = { /* Disable chip powerdown & Enable HDMI Rx block */ {ADV748X_PAGE_IO, 0x00, 0x40}, @@ -383,32 +424,11 @@ static const struct adv748x_reg_value adv748x_init_txa_4lane[] = { {ADV748X_PAGE_IO, 0x0c, 0xe0}, /* Enable LLC_DLL & Double LLC Timing */ {ADV748X_PAGE_IO, 0x0e, 0xdd}, /* LLC/PIX/SPI PINS TRISTATED AUD */ - {ADV748X_PAGE_TXA, 0x00, 0x84}, /* Enable 4-lane MIPI */ - {ADV748X_PAGE_TXA, 0x00, 0xa4}, /* Set Auto DPHY Timing */ - {ADV748X_PAGE_TXA, 0xdb, 0x10}, /* ADI Required Write */ - {ADV748X_PAGE_TXA, 0xd6, 0x07}, /* ADI Required Write */ - {ADV748X_PAGE_TXA, 0xc4, 0x0a}, /* ADI Required Write */ - {ADV748X_PAGE_TXA, 0x71, 0x33}, /* ADI Required Write */ - {ADV748X_PAGE_TXA, 0x72, 0x11}, /* ADI Required Write */ - {ADV748X_PAGE_TXA, 0xf0, 0x00}, /* i2c_dphy_pwdn - 1'b0 */ - - {ADV748X_PAGE_TXA, 0x31, 0x82}, /* ADI Required Write */ - {ADV748X_PAGE_TXA, 0x1e, 0x40}, /* ADI Required Write */ - {ADV748X_PAGE_TXA, 0xda, 0x01}, /* i2c_mipi_pll_en - 1'b1 */ - {ADV748X_PAGE_WAIT, 0x00, 0x02},/* delay 2 */ - {ADV748X_PAGE_TXA, 0x00, 0x24 },/* Power-up CSI-TX */ - {ADV748X_PAGE_WAIT, 0x00, 0x01},/* delay 1 */ - {ADV748X_PAGE_TXA, 0xc1, 0x2b}, /* ADI Required Write */ - {ADV748X_PAGE_WAIT, 0x00, 0x01},/* delay 1 */ - {ADV748X_PAGE_TXA, 0x31, 0x80}, /* ADI Required Write */ - {ADV748X_PAGE_EOR, 0xff, 0xff} /* End of register table */ }; -/* 02-01 Analog CVBS to MIPI TX-B CSI 1-Lane - */ -/* Autodetect CVBS Single Ended In Ain 1 - MIPI Out */ -static const struct adv748x_reg_value adv748x_init_txb_1lane[] = { - +/* Initialize AFE core with YUV8 format. */ +static const struct adv748x_reg_value adv748x_init_afe[] = { {ADV748X_PAGE_IO, 0x00, 0x30}, /* Disable chip powerdown Rx */ {ADV748X_PAGE_IO, 0xf2, 0x01}, /* Enable I2C Read Auto-Increment */ @@ -435,33 +455,36 @@ static const struct adv748x_reg_value adv748x_init_txb_1lane[] = { {ADV748X_PAGE_SDP, 0x31, 0x12}, /* ADI Required Write */ {ADV748X_PAGE_SDP, 0xe6, 0x4f}, /* V bit end pos manually in NTSC */ - {ADV748X_PAGE_TXB, 0x00, 0x81}, /* Enable 1-lane MIPI */ - {ADV748X_PAGE_TXB, 0x00, 0xa1}, /* Set Auto DPHY Timing */ - {ADV748X_PAGE_TXB, 0xd2, 0x40}, /* ADI Required Write */ - {ADV748X_PAGE_TXB, 0xc4, 0x0a}, /* ADI Required Write */ - {ADV748X_PAGE_TXB, 0x71, 0x33}, /* ADI Required Write */ - {ADV748X_PAGE_TXB, 0x72, 0x11}, /* ADI Required Write */ - {ADV748X_PAGE_TXB, 0xf0, 0x00}, /* i2c_dphy_pwdn - 1'b0 */ - {ADV748X_PAGE_TXB, 0x31, 0x82}, /* ADI Required Write */ - {ADV748X_PAGE_TXB, 0x1e, 0x40}, /* ADI Required Write */ - {ADV748X_PAGE_TXB, 0xda, 0x01}, /* i2c_mipi_pll_en - 1'b1 */ - - {ADV748X_PAGE_WAIT, 0x00, 0x02},/* delay 2 */ - {ADV748X_PAGE_TXB, 0x00, 0x21 },/* Power-up CSI-TX */ - {ADV748X_PAGE_WAIT, 0x00, 0x01},/* delay 1 */ - {ADV748X_PAGE_TXB, 0xc1, 0x2b}, /* ADI Required Write */ - {ADV748X_PAGE_WAIT, 0x00, 0x01},/* delay 1 */ - {ADV748X_PAGE_TXB, 0x31, 0x80}, /* ADI Required Write */ - {ADV748X_PAGE_EOR, 0xff, 0xff} /* End of register table */ }; +static int adv748x_sw_reset(struct adv748x_state *state) +{ + int ret; + + ret = io_write(state, ADV748X_IO_REG_FF, ADV748X_IO_REG_FF_MAIN_RESET); + if (ret) + return ret; + + usleep_range(5000, 6000); + + /* Disable CEC Wakeup from power-down mode */ + ret = io_clrset(state, ADV748X_IO_REG_01, ADV748X_IO_REG_01_PWRDN_MASK, + ADV748X_IO_REG_01_PWRDNB); + if (ret) + return ret; + + /* Enable I2C Read Auto-Increment for consecutive reads */ + return io_write(state, ADV748X_IO_REG_F2, + ADV748X_IO_REG_F2_READ_AUTO_INC); +} + static int adv748x_reset(struct adv748x_state *state) { int ret; u8 regval = 0; - ret = adv748x_write_regs(state, adv748x_sw_reset); + ret = adv748x_sw_reset(state); if (ret < 0) return ret; @@ -469,18 +492,19 @@ static int adv748x_reset(struct adv748x_state *state) if (ret < 0) return ret; - /* Init and power down TXA */ - ret = adv748x_write_regs(state, adv748x_init_txa_4lane); + /* Initialize CP and AFE cores. */ + ret = adv748x_write_regs(state, adv748x_init_hdmi); if (ret) return ret; - adv748x_tx_power(&state->txa, 0); - - /* Init and power down TXB */ - ret = adv748x_write_regs(state, adv748x_init_txb_1lane); + ret = adv748x_write_regs(state, adv748x_init_afe); if (ret) return ret; + /* Reset TXA and TXB */ + adv748x_tx_power(&state->txa, 1); + adv748x_tx_power(&state->txa, 0); + adv748x_tx_power(&state->txb, 1); adv748x_tx_power(&state->txb, 0); /* Disable chip powerdown & Enable HDMI Rx block */ @@ -542,7 +566,51 @@ void adv748x_subdev_init(struct v4l2_subdev *sd, struct adv748x_state *state, state->client->addr, ident); sd->entity.function = function; - sd->entity.ops = &adv748x_media_ops; + sd->entity.ops = is_tx(adv748x_sd_to_csi2(sd)) ? + &adv748x_tx_media_ops : &adv748x_media_ops; +} + +static int adv748x_parse_csi2_lanes(struct adv748x_state *state, + unsigned int port, + struct device_node *ep) +{ + struct v4l2_fwnode_endpoint vep; + unsigned int num_lanes; + int ret; + + if (port != ADV748X_PORT_TXA && port != ADV748X_PORT_TXB) + return 0; + + vep.bus_type = V4L2_MBUS_CSI2_DPHY; + ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep), &vep); + if (ret) + return ret; + + num_lanes = vep.bus.mipi_csi2.num_data_lanes; + + if (vep.base.port == ADV748X_PORT_TXA) { + if (num_lanes != 1 && num_lanes != 2 && num_lanes != 4) { + adv_err(state, "TXA: Invalid number (%u) of lanes\n", + num_lanes); + return -EINVAL; + } + + state->txa.num_lanes = num_lanes; + adv_dbg(state, "TXA: using %u lanes\n", state->txa.num_lanes); + } + + if (vep.base.port == ADV748X_PORT_TXB) { + if (num_lanes != 1) { + adv_err(state, "TXB: Invalid number (%u) of lanes\n", + num_lanes); + return -EINVAL; + } + + state->txb.num_lanes = num_lanes; + adv_dbg(state, "TXB: using %u lanes\n", state->txb.num_lanes); + } + + return 0; } static int adv748x_parse_dt(struct adv748x_state *state) @@ -551,6 +619,7 @@ static int adv748x_parse_dt(struct adv748x_state *state) struct of_endpoint ep; bool out_found = false; bool in_found = false; + int ret; for_each_endpoint_of_node(state->dev->of_node, ep_np) { of_graph_parse_endpoint(ep_np, &ep); @@ -581,6 +650,11 @@ static int adv748x_parse_dt(struct adv748x_state *state) in_found = true; else out_found = true; + + /* Store number of CSI-2 lanes used for TXA and TXB. */ + ret = adv748x_parse_csi2_lanes(state, ep.port, ep_np); + if (ret) + return ret; } return in_found && out_found ? 0 : -ENODEV; @@ -604,7 +678,7 @@ static int adv748x_probe(struct i2c_client *client, if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -EIO; - state = kzalloc(sizeof(struct adv748x_state), GFP_KERNEL); + state = devm_kzalloc(&client->dev, sizeof(*state), GFP_KERNEL); if (!state) return -ENOMEM; @@ -702,7 +776,6 @@ err_cleanup_dt: adv748x_dt_cleanup(state); err_free_mutex: mutex_destroy(&state->mutex); - kfree(state); return ret; } @@ -721,8 +794,6 @@ static int adv748x_remove(struct i2c_client *client) adv748x_dt_cleanup(state); mutex_destroy(&state->mutex); - kfree(state); - return 0; } diff --git a/drivers/media/i2c/adv748x/adv748x-csi2.c b/drivers/media/i2c/adv748x/adv748x-csi2.c index 6ce21542ed48..2091cda50935 100644 --- a/drivers/media/i2c/adv748x/adv748x-csi2.c +++ b/drivers/media/i2c/adv748x/adv748x-csi2.c @@ -27,6 +27,7 @@ static int adv748x_csi2_set_virtual_channel(struct adv748x_csi2 *tx, * @v4l2_dev: Video registration device * @src: Source subdevice to establish link * @src_pad: Pad number of source to link to this @tx + * @enable: Link enabled flag * * Ensure that the subdevice is registered against the v4l2_device, and link the * source pad to the sink pad of the CSI2 bus entity. @@ -34,26 +35,27 @@ static int adv748x_csi2_set_virtual_channel(struct adv748x_csi2 *tx, static int adv748x_csi2_register_link(struct adv748x_csi2 *tx, struct v4l2_device *v4l2_dev, struct v4l2_subdev *src, - unsigned int src_pad) + unsigned int src_pad, + bool enable) { - int enabled = MEDIA_LNK_FL_ENABLED; int ret; - /* - * Dynamic linking of the AFE is not supported. - * Register the links as immutable. - */ - enabled |= MEDIA_LNK_FL_IMMUTABLE; - if (!src->v4l2_dev) { ret = v4l2_device_register_subdev(v4l2_dev, src); if (ret) return ret; } - return media_create_pad_link(&src->entity, src_pad, - &tx->sd.entity, ADV748X_CSI2_SINK, - enabled); + ret = media_create_pad_link(&src->entity, src_pad, + &tx->sd.entity, ADV748X_CSI2_SINK, + enable ? MEDIA_LNK_FL_ENABLED : 0); + if (ret) + return ret; + + if (enable) + tx->src = src; + + return 0; } /* ----------------------------------------------------------------------------- @@ -68,24 +70,42 @@ static int adv748x_csi2_registered(struct v4l2_subdev *sd) { struct adv748x_csi2 *tx = adv748x_sd_to_csi2(sd); struct adv748x_state *state = tx->state; + int ret; adv_dbg(state, "Registered %s (%s)", is_txa(tx) ? "TXA":"TXB", sd->name); /* - * The adv748x hardware allows the AFE to route through the TXA, however - * this is not currently supported in this driver. + * Link TXA to AFE and HDMI, and TXB to AFE only as TXB cannot output + * HDMI. * - * Link HDMI->TXA, and AFE->TXB directly. + * The HDMI->TXA link is enabled by default, as is the AFE->TXB one. */ - if (is_txa(tx) && is_hdmi_enabled(state)) - return adv748x_csi2_register_link(tx, sd->v4l2_dev, - &state->hdmi.sd, - ADV748X_HDMI_SOURCE); - if (!is_txa(tx) && is_afe_enabled(state)) - return adv748x_csi2_register_link(tx, sd->v4l2_dev, - &state->afe.sd, - ADV748X_AFE_SOURCE); + if (is_afe_enabled(state)) { + ret = adv748x_csi2_register_link(tx, sd->v4l2_dev, + &state->afe.sd, + ADV748X_AFE_SOURCE, + is_txb(tx)); + if (ret) + return ret; + + /* TXB can output AFE signals only. */ + if (is_txb(tx)) + state->afe.tx = tx; + } + + /* Register link to HDMI for TXA only. */ + if (is_txb(tx) || !is_hdmi_enabled(state)) + return 0; + + ret = adv748x_csi2_register_link(tx, sd->v4l2_dev, &state->hdmi.sd, + ADV748X_HDMI_SOURCE, true); + if (ret) + return ret; + + /* The default HDMI output is TXA. */ + state->hdmi.tx = tx; + return 0; } diff --git a/drivers/media/i2c/adv748x/adv748x-hdmi.c b/drivers/media/i2c/adv748x/adv748x-hdmi.c index 35d027941482..c557f8fdf11a 100644 --- a/drivers/media/i2c/adv748x/adv748x-hdmi.c +++ b/drivers/media/i2c/adv748x/adv748x-hdmi.c @@ -358,7 +358,7 @@ static int adv748x_hdmi_s_stream(struct v4l2_subdev *sd, int enable) mutex_lock(&state->mutex); - ret = adv748x_tx_power(&state->txa, enable); + ret = adv748x_tx_power(hdmi->tx, enable); if (ret) goto done; diff --git a/drivers/media/i2c/adv748x/adv748x.h b/drivers/media/i2c/adv748x/adv748x.h index 39c2fdc3b416..5042f9e94aee 100644 --- a/drivers/media/i2c/adv748x/adv748x.h +++ b/drivers/media/i2c/adv748x/adv748x.h @@ -39,7 +39,6 @@ enum adv748x_page { ADV748X_PAGE_MAX, /* Fake pages for register sequences */ - ADV748X_PAGE_WAIT, /* Wait x msec */ ADV748X_PAGE_EOR, /* End Mark */ }; @@ -79,17 +78,23 @@ struct adv748x_csi2 { struct v4l2_mbus_framefmt format; unsigned int page; unsigned int port; + unsigned int num_lanes; struct media_pad pads[ADV748X_CSI2_NR_PADS]; struct v4l2_ctrl_handler ctrl_hdl; struct v4l2_ctrl *pixel_rate; + struct v4l2_subdev *src; struct v4l2_subdev sd; }; #define notifier_to_csi2(n) container_of(n, struct adv748x_csi2, notifier) #define adv748x_sd_to_csi2(sd) container_of(sd, struct adv748x_csi2, sd) + #define is_tx_enabled(_tx) ((_tx)->state->endpoints[(_tx)->port] != NULL) #define is_txa(_tx) ((_tx) == &(_tx)->state->txa) +#define is_txb(_tx) ((_tx) == &(_tx)->state->txb) +#define is_tx(_tx) (is_txa(_tx) || is_txb(_tx)) + #define is_afe_enabled(_state) \ ((_state)->endpoints[ADV748X_PORT_AIN0] != NULL || \ (_state)->endpoints[ADV748X_PORT_AIN1] != NULL || \ @@ -116,6 +121,8 @@ struct adv748x_hdmi { struct v4l2_dv_timings timings; struct v4l2_fract aspect_ratio; + struct adv748x_csi2 *tx; + struct { u8 edid[512]; u32 present; @@ -146,6 +153,8 @@ struct adv748x_afe { struct v4l2_subdev sd; struct v4l2_mbus_framefmt format; + struct adv748x_csi2 *tx; + bool streaming; v4l2_std_id curr_norm; unsigned int input; @@ -201,6 +210,11 @@ struct adv748x_state { #define ADV748X_IO_PD 0x00 /* power down controls */ #define ADV748X_IO_PD_RX_EN BIT(6) +#define ADV748X_IO_REG_01 0x01 /* pwrdn{2}b, prog_xtal_freq */ +#define ADV748X_IO_REG_01_PWRDN_MASK (BIT(7) | BIT(6)) +#define ADV748X_IO_REG_01_PWRDN2B BIT(7) /* CEC Wakeup Support */ +#define ADV748X_IO_REG_01_PWRDNB BIT(6) /* CEC Wakeup Support */ + #define ADV748X_IO_REG_04 0x04 #define ADV748X_IO_REG_04_FORCE_FR BIT(0) /* Force CP free-run */ @@ -214,12 +228,24 @@ struct adv748x_state { #define ADV748X_IO_10_CSI4_EN BIT(7) #define ADV748X_IO_10_CSI1_EN BIT(6) #define ADV748X_IO_10_PIX_OUT_EN BIT(5) +#define ADV748X_IO_10_CSI4_IN_SEL_AFE BIT(3) #define ADV748X_IO_CHIP_REV_ID_1 0xdf #define ADV748X_IO_CHIP_REV_ID_2 0xe0 +#define ADV748X_IO_REG_F2 0xf2 +#define ADV748X_IO_REG_F2_READ_AUTO_INC BIT(0) + +/* For PAGE slave address offsets */ #define ADV748X_IO_SLAVE_ADDR_BASE 0xf2 +/* + * The ADV748x_Recommended_Settings_PrA_2014-08-20.pdf details both 0x80 and + * 0xff as examples for performing a software reset. + */ +#define ADV748X_IO_REG_FF 0xff +#define ADV748X_IO_REG_FF_MAIN_RESET 0xff + /* HDMI RX Map */ #define ADV748X_HDMI_LW1 0x07 /* line width_1 */ #define ADV748X_HDMI_LW1_VERT_FILTER BIT(7) diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c index 989259488e3d..11ab2df02dc7 100644 --- a/drivers/media/i2c/adv7842.c +++ b/drivers/media/i2c/adv7842.c @@ -3102,11 +3102,11 @@ static int adv7842_ddr_ram_test(struct v4l2_subdev *sd) io_write(sd, 0x00, 0x01); /* Program SDP 4x1 */ io_write(sd, 0x01, 0x00); /* Program SDP mode */ - afe_write(sd, 0x80, 0x92); /* SDP Recommeneded Write */ - afe_write(sd, 0x9B, 0x01); /* SDP Recommeneded Write ADV7844ES1 */ - afe_write(sd, 0x9C, 0x60); /* SDP Recommeneded Write ADV7844ES1 */ - afe_write(sd, 0x9E, 0x02); /* SDP Recommeneded Write ADV7844ES1 */ - afe_write(sd, 0xA0, 0x0B); /* SDP Recommeneded Write ADV7844ES1 */ + afe_write(sd, 0x80, 0x92); /* SDP Recommended Write */ + afe_write(sd, 0x9B, 0x01); /* SDP Recommended Write ADV7844ES1 */ + afe_write(sd, 0x9C, 0x60); /* SDP Recommended Write ADV7844ES1 */ + afe_write(sd, 0x9E, 0x02); /* SDP Recommended Write ADV7844ES1 */ + afe_write(sd, 0xA0, 0x0B); /* SDP Recommended Write ADV7844ES1 */ afe_write(sd, 0xC3, 0x02); /* Memory BIST Initialisation */ io_write(sd, 0x0C, 0x40); /* Power up ADV7844 */ io_write(sd, 0x15, 0xBA); /* Enable outputs */ diff --git a/drivers/media/i2c/bt819.c b/drivers/media/i2c/bt819.c index 472e37637c8d..e6d3fe7790bc 100644 --- a/drivers/media/i2c/bt819.c +++ b/drivers/media/i2c/bt819.c @@ -164,12 +164,12 @@ static int bt819_init(struct v4l2_subdev *sd) 0x0e, 0xb4, /* 0x0e Chroma Gain (V) msb */ 0x0f, 0x00, /* 0x0f Hue control */ 0x12, 0x04, /* 0x12 Output Format */ - 0x13, 0x20, /* 0x13 Vertial Scaling msb 0x00 + 0x13, 0x20, /* 0x13 Vertical Scaling msb 0x00 chroma comb OFF, line drop scaling, interlace scaling BUG? Why does turning the chroma comb on fuck up color? Bug in the bt819 stepping on my board? */ - 0x14, 0x00, /* 0x14 Vertial Scaling lsb */ + 0x14, 0x00, /* 0x14 Vertical Scaling lsb */ 0x16, 0x07, /* 0x16 Video Timing Polarity ACTIVE=active low FIELD: high=odd, diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c index b168bf3635b6..8b0b8b5aa531 100644 --- a/drivers/media/i2c/cx25840/cx25840-core.c +++ b/drivers/media/i2c/cx25840/cx25840-core.c @@ -5216,8 +5216,9 @@ static int cx25840_probe(struct i2c_client *client, * those extra inputs. So, let's add it only when needed. */ state->pads[CX25840_PAD_INPUT].flags = MEDIA_PAD_FL_SINK; + state->pads[CX25840_PAD_INPUT].sig_type = PAD_SIGNAL_ANALOG; state->pads[CX25840_PAD_VID_OUT].flags = MEDIA_PAD_FL_SOURCE; - state->pads[CX25840_PAD_VBI_OUT].flags = MEDIA_PAD_FL_SOURCE; + state->pads[CX25840_PAD_VID_OUT].sig_type = PAD_SIGNAL_DV; sd->entity.function = MEDIA_ENT_F_ATV_DECODER; ret = media_entity_pads_init(&sd->entity, ARRAY_SIZE(state->pads), diff --git a/drivers/media/i2c/cx25840/cx25840-core.h b/drivers/media/i2c/cx25840/cx25840-core.h index c323b1af1f83..e3ff1d7ec770 100644 --- a/drivers/media/i2c/cx25840/cx25840-core.h +++ b/drivers/media/i2c/cx25840/cx25840-core.h @@ -40,7 +40,6 @@ enum cx25840_model { enum cx25840_media_pads { CX25840_PAD_INPUT, CX25840_PAD_VID_OUT, - CX25840_PAD_VBI_OUT, CX25840_NUM_PADS }; @@ -67,7 +66,7 @@ enum cx25840_media_pads { * @is_initialized: whether we have already loaded firmware into the chip * and initialized it * @vbi_regs_offset: offset of vbi regs - * @fw_wait: wait queue to wake an initalization function up when + * @fw_wait: wait queue to wake an initialization function up when * firmware loading (on a separate workqueue) finishes * @fw_work: a work that actually loads the firmware on a separate * workqueue diff --git a/drivers/media/i2c/cx25840/cx25840-ir.c b/drivers/media/i2c/cx25840/cx25840-ir.c index 69cdc09981af..a266118cd7ca 100644 --- a/drivers/media/i2c/cx25840/cx25840-ir.c +++ b/drivers/media/i2c/cx25840/cx25840-ir.c @@ -549,7 +549,7 @@ int cx25840_ir_irq_handler(struct v4l2_subdev *sd, u32 status, bool *handled) ror = stats & STATS_ROR; /* Rx FIFO Over Run */ tse = irqen & IRQEN_TSE; /* Tx FIFO Service Request IRQ Enable */ - rse = irqen & IRQEN_RSE; /* Rx FIFO Service Reuqest IRQ Enable */ + rse = irqen & IRQEN_RSE; /* Rx FIFO Service Request IRQ Enable */ rte = irqen & IRQEN_RTE; /* Rx Pulse Width Timer Time Out IRQ Enable */ roe = irqen & IRQEN_ROE; /* Rx FIFO Over Run IRQ Enable */ @@ -638,7 +638,7 @@ int cx25840_ir_irq_handler(struct v4l2_subdev *sd, u32 status, bool *handled) events |= V4L2_SUBDEV_IR_RX_END_OF_RX_DETECTED; } if (v) { - /* Clear STATS_ROR & STATS_RTO as needed by reseting hardware */ + /* Clear STATS_ROR & STATS_RTO as needed by resetting hardware */ cx25840_write4(c, CX25840_IR_CNTRL_REG, cntrl & ~v); cx25840_write4(c, CX25840_IR_CNTRL_REG, cntrl); *handled = true; diff --git a/drivers/media/i2c/dw9714.c b/drivers/media/i2c/dw9714.c index 26d83693a681..3f0b082f863f 100644 --- a/drivers/media/i2c/dw9714.c +++ b/drivers/media/i2c/dw9714.c @@ -267,7 +267,7 @@ static struct i2c_driver dw9714_i2c_driver = { module_i2c_driver(dw9714_i2c_driver); MODULE_AUTHOR("Tianshu Qiu "); -MODULE_AUTHOR("Jian Xu Zheng "); +MODULE_AUTHOR("Jian Xu Zheng"); MODULE_AUTHOR("Yuning Pu "); MODULE_AUTHOR("Jouni Ukkonen "); MODULE_AUTHOR("Tommi Franttila "); diff --git a/drivers/media/i2c/et8ek8/et8ek8_mode.c b/drivers/media/i2c/et8ek8/et8ek8_mode.c index a79882a83885..f503303cb8bc 100644 --- a/drivers/media/i2c/et8ek8/et8ek8_mode.c +++ b/drivers/media/i2c/et8ek8/et8ek8_mode.c @@ -79,7 +79,7 @@ static struct et8ek8_reglist mode1_poweron_mode2_16vga_2592x1968_12_07fps = { { ET8EK8_REG_8BIT, 0x1258, 0x00 }, /* From parallel out to serial out */ { ET8EK8_REG_8BIT, 0x125D, 0x88 }, - /* From w/ embeded data to w/o embeded data */ + /* From w/ embedded data to w/o embedded data */ { ET8EK8_REG_8BIT, 0x125E, 0xC0 }, /* CCP2 out is from STOP to ACTIVE */ { ET8EK8_REG_8BIT, 0x1263, 0x98 }, diff --git a/drivers/media/i2c/imx214.c b/drivers/media/i2c/imx214.c index ec3d1b855f62..9857e151db46 100644 --- a/drivers/media/i2c/imx214.c +++ b/drivers/media/i2c/imx214.c @@ -377,7 +377,7 @@ static const struct reg_8 mode_table_common[] = { /* Moire reduction */ {0x6957, 0x01}, - /* image enhancment */ + /* image enhancement */ {0x6987, 0x17}, {0x698A, 0x03}, {0x698B, 0x03}, diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c index 5fac7fd32634..f3ff1af209f9 100644 --- a/drivers/media/i2c/imx274.c +++ b/drivers/media/i2c/imx274.c @@ -207,8 +207,8 @@ static const char * const tp_qmenu[] = { "Vertical Stripe (555h / 000h)", "Vertical Stripe (000h / FFFh)", "Vertical Stripe (FFFh / 000h)", - "Horizontal Color Bars", "Vertical Color Bars", + "Horizontal Color Bars", }; /* @@ -405,12 +405,12 @@ static const struct reg_8 imx274_start_2[] = { */ static const struct reg_8 imx274_start_3[] = { {0x30F4, 0x00}, - {0x3018, 0xA2}, /* XHS VHS OUTUPT */ + {0x3018, 0xA2}, /* XHS VHS OUTPUT */ {IMX274_TABLE_END, 0x00} }; /* - * imx274 register configuration for stoping stream + * imx274 register configuration for stopping stream */ static const struct reg_8 imx274_stop[] = { {IMX274_STANDBY_REG, 0x01}, @@ -617,24 +617,6 @@ static int imx274_write_table(struct stimx274 *priv, const struct reg_8 table[]) return 0; } -static inline int imx274_read_reg(struct stimx274 *priv, u16 addr, u8 *val) -{ - unsigned int uint_val; - int err; - - err = regmap_read(priv->regmap, addr, &uint_val); - if (err) - dev_err(&priv->client->dev, - "%s : i2c read failed, addr = %x\n", __func__, addr); - else - dev_dbg(&priv->client->dev, - "%s : addr 0x%x, val=0x%x\n", __func__, - addr, uint_val); - - *val = uint_val; - return err; -} - static inline int imx274_write_reg(struct stimx274 *priv, u16 addr, u8 val) { int err; diff --git a/drivers/media/i2c/lm3560.c b/drivers/media/i2c/lm3560.c index f122f03bd6b7..70c3294c21d3 100644 --- a/drivers/media/i2c/lm3560.c +++ b/drivers/media/i2c/lm3560.c @@ -55,7 +55,7 @@ enum led_enable { * @regmap: reg. map for i2c * @lock: muxtex for serial access. * @led_mode: V4L2 LED mode - * @ctrls_led: V4L2 contols + * @ctrls_led: V4L2 controls * @subdev_led: V4L2 subdev */ struct lm3560_flash { diff --git a/drivers/media/i2c/lm3646.c b/drivers/media/i2c/lm3646.c index 12ef2653987b..73fbe3c37fc9 100644 --- a/drivers/media/i2c/lm3646.c +++ b/drivers/media/i2c/lm3646.c @@ -62,7 +62,7 @@ enum led_mode { * @regmap: reg. map for i2c * @lock: muxtex for serial access. * @led_mode: V4L2 LED mode - * @ctrls_led: V4L2 contols + * @ctrls_led: V4L2 controls * @subdev_led: V4L2 subdev * @mode_reg : mode register value */ diff --git a/drivers/media/i2c/m5mols/m5mols.h b/drivers/media/i2c/m5mols/m5mols.h index 90a6c520f115..aef5b4f8904e 100644 --- a/drivers/media/i2c/m5mols/m5mols.h +++ b/drivers/media/i2c/m5mols/m5mols.h @@ -253,7 +253,7 @@ struct m5mols_info { * * The I2C read operation of the M-5MOLS requires 2 messages. The first * message sends the information about the command, command category, and total - * message size. The second message is used to retrieve the data specifed in + * message size. The second message is used to retrieve the data specified in * the first message * * 1st message 2nd message diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c index b8b2bf4cbfb2..454a336be336 100644 --- a/drivers/media/i2c/m5mols/m5mols_core.c +++ b/drivers/media/i2c/m5mols/m5mols_core.c @@ -291,7 +291,7 @@ int m5mols_write(struct v4l2_subdev *sd, u32 reg, u32 val) * @reg: the I2C_REG() address of an 8-bit status register to check * @value: expected status register value * @mask: bit mask for the read status register value - * @timeout: timeout in miliseconds, or -1 for default timeout + * @timeout: timeout in milliseconds, or -1 for default timeout * * The @reg register value is ORed with @mask before comparing with @value. * diff --git a/drivers/media/i2c/msp3400-driver.c b/drivers/media/i2c/msp3400-driver.c index c63be01059b2..522fb1d561e7 100644 --- a/drivers/media/i2c/msp3400-driver.c +++ b/drivers/media/i2c/msp3400-driver.c @@ -11,7 +11,7 @@ * * FM-Mono * should work. The stereo modes are backward compatible to FM-mono, - * therefore FM-Mono should be allways available. + * therefore FM-Mono should be always available. * * FM-Stereo (B/G, used in germany) * should work, with autodetect diff --git a/drivers/media/i2c/soc_camera/soc_mt9m001.c b/drivers/media/i2c/mt9m001.c similarity index 66% rename from drivers/media/i2c/soc_camera/soc_mt9m001.c rename to drivers/media/i2c/mt9m001.c index a1a85ff838c5..4b23fde937b3 100644 --- a/drivers/media/i2c/soc_camera/soc_mt9m001.c +++ b/drivers/media/i2c/mt9m001.c @@ -1,29 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Driver for MT9M001 CMOS Image Sensor from Micron * * Copyright (C) 2008, Guennadi Liakhovetski - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ -#include -#include +#include +#include +#include #include #include #include +#include +#include +#include -#include -#include -#include -#include #include +#include +#include +#include /* * mt9m001 i2c address 0x5d - * The platform has to define struct i2c_board_info objects and link to them - * from struct soc_camera_host_desc */ /* mt9m001 selected register addresses */ @@ -50,6 +48,8 @@ #define MT9M001_MIN_HEIGHT 32 #define MT9M001_COLUMN_SKIP 20 #define MT9M001_ROW_SKIP 12 +#define MT9M001_DEFAULT_HBLANK 9 +#define MT9M001_DEFAULT_VBLANK 25 /* MT9M001 has only one fixed colorspace per pixelcode */ struct mt9m001_datafmt { @@ -93,13 +93,18 @@ struct mt9m001 { struct v4l2_ctrl *autoexposure; struct v4l2_ctrl *exposure; }; + bool streaming; + struct mutex mutex; struct v4l2_rect rect; /* Sensor window */ - struct v4l2_clk *clk; + struct clk *clk; + struct gpio_desc *standby_gpio; + struct gpio_desc *reset_gpio; const struct mt9m001_datafmt *fmt; const struct mt9m001_datafmt *fmts; int num_fmts; unsigned int total_h; unsigned short y_skip_top; /* Lines to skip at the top */ + struct media_pad pad; }; static struct mt9m001 *to_mt9m001(const struct i2c_client *client) @@ -140,35 +145,111 @@ static int reg_clear(struct i2c_client *client, const u8 reg, return reg_write(client, reg, ret & ~data); } +struct mt9m001_reg { + u8 reg; + u16 data; +}; + +static int multi_reg_write(struct i2c_client *client, + const struct mt9m001_reg *regs, int num) +{ + int i; + + for (i = 0; i < num; i++) { + int ret = reg_write(client, regs[i].reg, regs[i].data); + + if (ret) + return ret; + } + + return 0; +} + static int mt9m001_init(struct i2c_client *client) { - int ret; + const struct mt9m001_reg init_regs[] = { + /* + * Issue a soft reset. This returns all registers to their + * default values. + */ + { MT9M001_RESET, 1 }, + { MT9M001_RESET, 0 }, + /* Disable chip, synchronous option update */ + { MT9M001_OUTPUT_CONTROL, 0 } + }; dev_dbg(&client->dev, "%s\n", __func__); - /* - * We don't know, whether platform provides reset, issue a soft reset - * too. This returns all registers to their default values. - */ - ret = reg_write(client, MT9M001_RESET, 1); - if (!ret) - ret = reg_write(client, MT9M001_RESET, 0); + return multi_reg_write(client, init_regs, ARRAY_SIZE(init_regs)); +} - /* Disable chip, synchronous option update */ - if (!ret) - ret = reg_write(client, MT9M001_OUTPUT_CONTROL, 0); +static int mt9m001_apply_selection(struct v4l2_subdev *sd) +{ + struct i2c_client *client = v4l2_get_subdevdata(sd); + struct mt9m001 *mt9m001 = to_mt9m001(client); + const struct mt9m001_reg regs[] = { + /* Blanking and start values - default... */ + { MT9M001_HORIZONTAL_BLANKING, MT9M001_DEFAULT_HBLANK }, + { MT9M001_VERTICAL_BLANKING, MT9M001_DEFAULT_VBLANK }, + /* + * The caller provides a supported format, as verified per + * call to .set_fmt(FORMAT_TRY). + */ + { MT9M001_COLUMN_START, mt9m001->rect.left }, + { MT9M001_ROW_START, mt9m001->rect.top }, + { MT9M001_WINDOW_WIDTH, mt9m001->rect.width - 1 }, + { MT9M001_WINDOW_HEIGHT, + mt9m001->rect.height + mt9m001->y_skip_top - 1 }, + }; - return ret; + return multi_reg_write(client, regs, ARRAY_SIZE(regs)); } static int mt9m001_s_stream(struct v4l2_subdev *sd, int enable) { struct i2c_client *client = v4l2_get_subdevdata(sd); + struct mt9m001 *mt9m001 = to_mt9m001(client); + int ret = 0; + + mutex_lock(&mt9m001->mutex); + + if (mt9m001->streaming == enable) + goto done; + + if (enable) { + ret = pm_runtime_get_sync(&client->dev); + if (ret < 0) + goto put_unlock; + + ret = mt9m001_apply_selection(sd); + if (ret) + goto put_unlock; + + ret = __v4l2_ctrl_handler_setup(&mt9m001->hdl); + if (ret) + goto put_unlock; + + /* Switch to master "normal" mode */ + ret = reg_write(client, MT9M001_OUTPUT_CONTROL, 2); + if (ret < 0) + goto put_unlock; + } else { + /* Switch to master stop sensor readout */ + reg_write(client, MT9M001_OUTPUT_CONTROL, 0); + pm_runtime_put(&client->dev); + } + + mt9m001->streaming = enable; +done: + mutex_unlock(&mt9m001->mutex); - /* Switch to master "normal" mode or stop sensor readout */ - if (reg_write(client, MT9M001_OUTPUT_CONTROL, enable ? 2 : 0) < 0) - return -EIO; return 0; + +put_unlock: + pm_runtime_put(&client->dev); + mutex_unlock(&mt9m001->mutex); + + return ret; } static int mt9m001_set_selection(struct v4l2_subdev *sd, @@ -178,8 +259,6 @@ static int mt9m001_set_selection(struct v4l2_subdev *sd, struct i2c_client *client = v4l2_get_subdevdata(sd); struct mt9m001 *mt9m001 = to_mt9m001(client); struct v4l2_rect rect = sel->r; - const u16 hblank = 9, vblank = 25; - int ret; if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE || sel->target != V4L2_SEL_TGT_CROP) @@ -196,39 +275,22 @@ static int mt9m001_set_selection(struct v4l2_subdev *sd, rect.width = ALIGN(rect.width, 2); rect.left = ALIGN(rect.left, 2); - soc_camera_limit_side(&rect.left, &rect.width, - MT9M001_COLUMN_SKIP, MT9M001_MIN_WIDTH, MT9M001_MAX_WIDTH); + rect.width = clamp_t(u32, rect.width, MT9M001_MIN_WIDTH, + MT9M001_MAX_WIDTH); + rect.left = clamp_t(u32, rect.left, MT9M001_COLUMN_SKIP, + MT9M001_COLUMN_SKIP + MT9M001_MAX_WIDTH - rect.width); - soc_camera_limit_side(&rect.top, &rect.height, - MT9M001_ROW_SKIP, MT9M001_MIN_HEIGHT, MT9M001_MAX_HEIGHT); + rect.height = clamp_t(u32, rect.height, MT9M001_MIN_HEIGHT, + MT9M001_MAX_HEIGHT); + rect.top = clamp_t(u32, rect.top, MT9M001_ROW_SKIP, + MT9M001_ROW_SKIP + MT9M001_MAX_HEIGHT - rect.height); - mt9m001->total_h = rect.height + mt9m001->y_skip_top + vblank; + mt9m001->total_h = rect.height + mt9m001->y_skip_top + + MT9M001_DEFAULT_VBLANK; - /* Blanking and start values - default... */ - ret = reg_write(client, MT9M001_HORIZONTAL_BLANKING, hblank); - if (!ret) - ret = reg_write(client, MT9M001_VERTICAL_BLANKING, vblank); + mt9m001->rect = rect; - /* - * The caller provides a supported format, as verified per - * call to .set_fmt(FORMAT_TRY). - */ - if (!ret) - ret = reg_write(client, MT9M001_COLUMN_START, rect.left); - if (!ret) - ret = reg_write(client, MT9M001_ROW_START, rect.top); - if (!ret) - ret = reg_write(client, MT9M001_WINDOW_WIDTH, rect.width - 1); - if (!ret) - ret = reg_write(client, MT9M001_WINDOW_HEIGHT, - rect.height + mt9m001->y_skip_top - 1); - if (!ret && v4l2_ctrl_g_ctrl(mt9m001->autoexposure) == V4L2_EXPOSURE_AUTO) - ret = reg_write(client, MT9M001_SHUTTER_WIDTH, mt9m001->total_h); - - if (!ret) - mt9m001->rect = rect; - - return ret; + return 0; } static int mt9m001_get_selection(struct v4l2_subdev *sd, @@ -267,11 +329,20 @@ static int mt9m001_get_fmt(struct v4l2_subdev *sd, if (format->pad) return -EINVAL; + if (format->which == V4L2_SUBDEV_FORMAT_TRY) { + mf = v4l2_subdev_get_try_format(sd, cfg, 0); + format->format = *mf; + return 0; + } + mf->width = mt9m001->rect.width; mf->height = mt9m001->rect.height; mf->code = mt9m001->fmt->code; mf->colorspace = mt9m001->fmt->colorspace; mf->field = V4L2_FIELD_NONE; + mf->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; + mf->quantization = V4L2_QUANTIZATION_DEFAULT; + mf->xfer_func = V4L2_XFER_FUNC_DEFAULT; return 0; } @@ -332,6 +403,10 @@ static int mt9m001_set_fmt(struct v4l2_subdev *sd, } mf->colorspace = fmt->colorspace; + mf->field = V4L2_FIELD_NONE; + mf->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; + mf->quantization = V4L2_QUANTIZATION_DEFAULT; + mf->xfer_func = V4L2_XFER_FUNC_DEFAULT; if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) return mt9m001_s_fmt(sd, fmt, mf); @@ -372,13 +447,40 @@ static int mt9m001_s_register(struct v4l2_subdev *sd, } #endif -static int mt9m001_s_power(struct v4l2_subdev *sd, int on) +static int mt9m001_power_on(struct device *dev) { - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); + struct i2c_client *client = to_i2c_client(dev); struct mt9m001 *mt9m001 = to_mt9m001(client); + int ret; + + ret = clk_prepare_enable(mt9m001->clk); + if (ret) + return ret; + + if (mt9m001->standby_gpio) { + gpiod_set_value_cansleep(mt9m001->standby_gpio, 0); + usleep_range(1000, 2000); + } + + if (mt9m001->reset_gpio) { + gpiod_set_value_cansleep(mt9m001->reset_gpio, 1); + usleep_range(1000, 2000); + gpiod_set_value_cansleep(mt9m001->reset_gpio, 0); + usleep_range(1000, 2000); + } - return soc_camera_set_power(&client->dev, ssdd, mt9m001->clk, on); + return 0; +} + +static int mt9m001_power_off(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct mt9m001 *mt9m001 = to_mt9m001(client); + + gpiod_set_value_cansleep(mt9m001->standby_gpio, 1); + clk_disable_unprepare(mt9m001->clk); + + return 0; } static int mt9m001_g_volatile_ctrl(struct v4l2_ctrl *ctrl) @@ -406,16 +508,18 @@ static int mt9m001_s_ctrl(struct v4l2_ctrl *ctrl) struct i2c_client *client = v4l2_get_subdevdata(sd); struct v4l2_ctrl *exp = mt9m001->exposure; int data; + int ret; + + if (!pm_runtime_get_if_in_use(&client->dev)) + return 0; switch (ctrl->id) { case V4L2_CID_VFLIP: if (ctrl->val) - data = reg_set(client, MT9M001_READ_OPTIONS2, 0x8000); + ret = reg_set(client, MT9M001_READ_OPTIONS2, 0x8000); else - data = reg_clear(client, MT9M001_READ_OPTIONS2, 0x8000); - if (data < 0) - return -EIO; - return 0; + ret = reg_clear(client, MT9M001_READ_OPTIONS2, 0x8000); + break; case V4L2_CID_GAIN: /* See Datasheet Table 7, Gain settings. */ @@ -425,9 +529,7 @@ static int mt9m001_s_ctrl(struct v4l2_ctrl *ctrl) data = ((ctrl->val - (s32)ctrl->minimum) * 8 + range / 2) / range; dev_dbg(&client->dev, "Setting gain %d\n", data); - data = reg_write(client, MT9M001_GLOBAL_GAIN, data); - if (data < 0) - return -EIO; + ret = reg_write(client, MT9M001_GLOBAL_GAIN, data); } else { /* Pack it into 1.125..15 variable step, register values 9..67 */ /* We assume qctrl->maximum - qctrl->default_value - 1 > 0 */ @@ -444,11 +546,9 @@ static int mt9m001_s_ctrl(struct v4l2_ctrl *ctrl) dev_dbg(&client->dev, "Setting gain from %d to %d\n", reg_read(client, MT9M001_GLOBAL_GAIN), data); - data = reg_write(client, MT9M001_GLOBAL_GAIN, data); - if (data < 0) - return -EIO; + ret = reg_write(client, MT9M001_GLOBAL_GAIN, data); } - return 0; + break; case V4L2_CID_EXPOSURE_AUTO: if (ctrl->val == V4L2_EXPOSURE_MANUAL) { @@ -459,37 +559,34 @@ static int mt9m001_s_ctrl(struct v4l2_ctrl *ctrl) dev_dbg(&client->dev, "Setting shutter width from %d to %lu\n", reg_read(client, MT9M001_SHUTTER_WIDTH), shutter); - if (reg_write(client, MT9M001_SHUTTER_WIDTH, shutter) < 0) - return -EIO; + ret = reg_write(client, MT9M001_SHUTTER_WIDTH, shutter); } else { - const u16 vblank = 25; - mt9m001->total_h = mt9m001->rect.height + - mt9m001->y_skip_top + vblank; - if (reg_write(client, MT9M001_SHUTTER_WIDTH, mt9m001->total_h) < 0) - return -EIO; + mt9m001->y_skip_top + MT9M001_DEFAULT_VBLANK; + ret = reg_write(client, MT9M001_SHUTTER_WIDTH, + mt9m001->total_h); } - return 0; + break; + default: + ret = -EINVAL; + break; } - return -EINVAL; + + pm_runtime_put(&client->dev); + + return ret; } /* * Interface active, can use i2c. If it fails, it can indeed mean, that * this wasn't our capture interface, so, we wait for the right one */ -static int mt9m001_video_probe(struct soc_camera_subdev_desc *ssdd, - struct i2c_client *client) +static int mt9m001_video_probe(struct i2c_client *client) { struct mt9m001 *mt9m001 = to_mt9m001(client); s32 data; - unsigned long flags; int ret; - ret = mt9m001_s_power(&mt9m001->subdev, 1); - if (ret < 0) - return ret; - /* Enable the chip */ data = reg_write(client, MT9M001_CHIP_ENABLE, 1); dev_dbg(&client->dev, "write: %d\n", data); @@ -502,9 +599,11 @@ static int mt9m001_video_probe(struct soc_camera_subdev_desc *ssdd, case 0x8411: case 0x8421: mt9m001->fmts = mt9m001_colour_fmts; + mt9m001->num_fmts = ARRAY_SIZE(mt9m001_colour_fmts); break; case 0x8431: mt9m001->fmts = mt9m001_monochrome_fmts; + mt9m001->num_fmts = ARRAY_SIZE(mt9m001_monochrome_fmts); break; default: dev_err(&client->dev, @@ -513,26 +612,6 @@ static int mt9m001_video_probe(struct soc_camera_subdev_desc *ssdd, goto done; } - mt9m001->num_fmts = 0; - - /* - * This is a 10bit sensor, so by default we only allow 10bit. - * The platform may support different bus widths due to - * different routing of the data lines. - */ - if (ssdd->query_bus_param) - flags = ssdd->query_bus_param(ssdd); - else - flags = SOCAM_DATAWIDTH_10; - - if (flags & SOCAM_DATAWIDTH_10) - mt9m001->num_fmts++; - else - mt9m001->fmts++; - - if (flags & SOCAM_DATAWIDTH_8) - mt9m001->num_fmts++; - mt9m001->fmt = &mt9m001->fmts[0]; dev_info(&client->dev, "Detected a MT9M001 chip ID %x (%s)\n", data, @@ -548,16 +627,9 @@ static int mt9m001_video_probe(struct soc_camera_subdev_desc *ssdd, ret = v4l2_ctrl_handler_setup(&mt9m001->hdl); done: - mt9m001_s_power(&mt9m001->subdev, 0); return ret; } -static void mt9m001_video_remove(struct soc_camera_subdev_desc *ssdd) -{ - if (ssdd->free_bus) - ssdd->free_bus(ssdd); -} - static int mt9m001_g_skip_top_lines(struct v4l2_subdev *sd, u32 *lines) { struct i2c_client *client = v4l2_get_subdevdata(sd); @@ -574,13 +646,35 @@ static const struct v4l2_ctrl_ops mt9m001_ctrl_ops = { }; static const struct v4l2_subdev_core_ops mt9m001_subdev_core_ops = { + .log_status = v4l2_ctrl_subdev_log_status, + .subscribe_event = v4l2_ctrl_subdev_subscribe_event, + .unsubscribe_event = v4l2_event_subdev_unsubscribe, #ifdef CONFIG_VIDEO_ADV_DEBUG .g_register = mt9m001_g_register, .s_register = mt9m001_s_register, #endif - .s_power = mt9m001_s_power, }; +static int mt9m001_init_cfg(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg) +{ + struct i2c_client *client = v4l2_get_subdevdata(sd); + struct mt9m001 *mt9m001 = to_mt9m001(client); + struct v4l2_mbus_framefmt *try_fmt = + v4l2_subdev_get_try_format(sd, cfg, 0); + + try_fmt->width = MT9M001_MAX_WIDTH; + try_fmt->height = MT9M001_MAX_HEIGHT; + try_fmt->code = mt9m001->fmts[0].code; + try_fmt->colorspace = mt9m001->fmts[0].colorspace; + try_fmt->field = V4L2_FIELD_NONE; + try_fmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; + try_fmt->quantization = V4L2_QUANTIZATION_DEFAULT; + try_fmt->xfer_func = V4L2_XFER_FUNC_DEFAULT; + + return 0; +} + static int mt9m001_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_mbus_code_enum *code) @@ -598,41 +692,18 @@ static int mt9m001_enum_mbus_code(struct v4l2_subdev *sd, static int mt9m001_g_mbus_config(struct v4l2_subdev *sd, struct v4l2_mbus_config *cfg) { - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - /* MT9M001 has all capture_format parameters fixed */ cfg->flags = V4L2_MBUS_PCLK_SAMPLE_FALLING | V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_DATA_ACTIVE_HIGH | V4L2_MBUS_MASTER; cfg->type = V4L2_MBUS_PARALLEL; - cfg->flags = soc_camera_apply_board_flags(ssdd, cfg); return 0; } -static int mt9m001_s_mbus_config(struct v4l2_subdev *sd, - const struct v4l2_mbus_config *cfg) -{ - const struct i2c_client *client = v4l2_get_subdevdata(sd); - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - struct mt9m001 *mt9m001 = to_mt9m001(client); - unsigned int bps = soc_mbus_get_fmtdesc(mt9m001->fmt->code)->bits_per_sample; - - if (ssdd->set_bus_param) - return ssdd->set_bus_param(ssdd, 1 << (bps - 1)); - - /* - * Without board specific bus width settings we only support the - * sensors native bus width - */ - return bps == 10 ? 0 : -EINVAL; -} - static const struct v4l2_subdev_video_ops mt9m001_subdev_video_ops = { .s_stream = mt9m001_s_stream, .g_mbus_config = mt9m001_g_mbus_config, - .s_mbus_config = mt9m001_s_mbus_config, }; static const struct v4l2_subdev_sensor_ops mt9m001_subdev_sensor_ops = { @@ -640,6 +711,7 @@ static const struct v4l2_subdev_sensor_ops mt9m001_subdev_sensor_ops = { }; static const struct v4l2_subdev_pad_ops mt9m001_subdev_pad_ops = { + .init_cfg = mt9m001_init_cfg, .enum_mbus_code = mt9m001_enum_mbus_code, .get_selection = mt9m001_get_selection, .set_selection = mt9m001_set_selection, @@ -659,25 +731,35 @@ static int mt9m001_probe(struct i2c_client *client, { struct mt9m001 *mt9m001; struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); int ret; - if (!ssdd) { - dev_err(&client->dev, "MT9M001 driver needs platform data\n"); - return -EINVAL; - } - if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) { dev_warn(&adapter->dev, "I2C-Adapter doesn't support I2C_FUNC_SMBUS_WORD\n"); return -EIO; } - mt9m001 = devm_kzalloc(&client->dev, sizeof(struct mt9m001), GFP_KERNEL); + mt9m001 = devm_kzalloc(&client->dev, sizeof(*mt9m001), GFP_KERNEL); if (!mt9m001) return -ENOMEM; + mt9m001->clk = devm_clk_get(&client->dev, NULL); + if (IS_ERR(mt9m001->clk)) + return PTR_ERR(mt9m001->clk); + + mt9m001->standby_gpio = devm_gpiod_get_optional(&client->dev, "standby", + GPIOD_OUT_LOW); + if (IS_ERR(mt9m001->standby_gpio)) + return PTR_ERR(mt9m001->standby_gpio); + + mt9m001->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset", + GPIOD_OUT_LOW); + if (IS_ERR(mt9m001->reset_gpio)) + return PTR_ERR(mt9m001->reset_gpio); + v4l2_i2c_subdev_init(&mt9m001->subdev, client, &mt9m001_subdev_ops); + mt9m001->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | + V4L2_SUBDEV_FL_HAS_EVENTS; v4l2_ctrl_handler_init(&mt9m001->hdl, 4); v4l2_ctrl_new_std(&mt9m001->hdl, &mt9m001_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0); @@ -699,6 +781,9 @@ static int mt9m001_probe(struct i2c_client *client, v4l2_ctrl_auto_cluster(2, &mt9m001->autoexposure, V4L2_EXPOSURE_MANUAL, true); + mutex_init(&mt9m001->mutex); + mt9m001->hdl.lock = &mt9m001->mutex; + /* Second stage probe - when a capture adapter is there */ mt9m001->y_skip_top = 0; mt9m001->rect.left = MT9M001_COLUMN_SKIP; @@ -706,18 +791,41 @@ static int mt9m001_probe(struct i2c_client *client, mt9m001->rect.width = MT9M001_MAX_WIDTH; mt9m001->rect.height = MT9M001_MAX_HEIGHT; - mt9m001->clk = v4l2_clk_get(&client->dev, "mclk"); - if (IS_ERR(mt9m001->clk)) { - ret = PTR_ERR(mt9m001->clk); - goto eclkget; - } + ret = mt9m001_power_on(&client->dev); + if (ret) + goto error_hdl_free; - ret = mt9m001_video_probe(ssdd, client); - if (ret) { - v4l2_clk_put(mt9m001->clk); -eclkget: - v4l2_ctrl_handler_free(&mt9m001->hdl); - } + pm_runtime_set_active(&client->dev); + pm_runtime_enable(&client->dev); + + ret = mt9m001_video_probe(client); + if (ret) + goto error_power_off; + + mt9m001->pad.flags = MEDIA_PAD_FL_SOURCE; + mt9m001->subdev.entity.function = MEDIA_ENT_F_CAM_SENSOR; + ret = media_entity_pads_init(&mt9m001->subdev.entity, 1, &mt9m001->pad); + if (ret) + goto error_power_off; + + ret = v4l2_async_register_subdev(&mt9m001->subdev); + if (ret) + goto error_entity_cleanup; + + pm_runtime_idle(&client->dev); + + return 0; + +error_entity_cleanup: + media_entity_cleanup(&mt9m001->subdev.entity); +error_power_off: + pm_runtime_disable(&client->dev); + pm_runtime_set_suspended(&client->dev); + mt9m001_power_off(&client->dev); + +error_hdl_free: + v4l2_ctrl_handler_free(&mt9m001->hdl); + mutex_destroy(&mt9m001->mutex); return ret; } @@ -725,12 +833,19 @@ eclkget: static int mt9m001_remove(struct i2c_client *client) { struct mt9m001 *mt9m001 = to_mt9m001(client); - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - v4l2_clk_put(mt9m001->clk); - v4l2_device_unregister_subdev(&mt9m001->subdev); + pm_runtime_get_sync(&client->dev); + + v4l2_async_unregister_subdev(&mt9m001->subdev); + media_entity_cleanup(&mt9m001->subdev.entity); + + pm_runtime_disable(&client->dev); + pm_runtime_set_suspended(&client->dev); + pm_runtime_put_noidle(&client->dev); + mt9m001_power_off(&client->dev); + v4l2_ctrl_handler_free(&mt9m001->hdl); - mt9m001_video_remove(ssdd); + mutex_destroy(&mt9m001->mutex); return 0; } @@ -741,9 +856,21 @@ static const struct i2c_device_id mt9m001_id[] = { }; MODULE_DEVICE_TABLE(i2c, mt9m001_id); +static const struct dev_pm_ops mt9m001_pm_ops = { + SET_RUNTIME_PM_OPS(mt9m001_power_off, mt9m001_power_on, NULL) +}; + +static const struct of_device_id mt9m001_of_match[] = { + { .compatible = "onnn,mt9m001", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, mt9m001_of_match); + static struct i2c_driver mt9m001_i2c_driver = { .driver = { .name = "mt9m001", + .pm = &mt9m001_pm_ops, + .of_match_table = mt9m001_of_match, }, .probe = mt9m001_probe, .remove = mt9m001_remove, @@ -754,4 +881,4 @@ module_i2c_driver(mt9m001_i2c_driver); MODULE_DESCRIPTION("Micron MT9M001 Camera driver"); MODULE_AUTHOR("Guennadi Liakhovetski "); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/i2c/mt9m111.c b/drivers/media/i2c/mt9m111.c index d639b9bcf64a..5168bb5880c4 100644 --- a/drivers/media/i2c/mt9m111.c +++ b/drivers/media/i2c/mt9m111.c @@ -528,11 +528,24 @@ static int mt9m111_get_fmt(struct v4l2_subdev *sd, if (format->pad) return -EINVAL; + if (format->which == V4L2_SUBDEV_FORMAT_TRY) { +#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API + mf = v4l2_subdev_get_try_format(sd, cfg, format->pad); + format->format = *mf; + return 0; +#else + return -ENOTTY; +#endif + } + mf->width = mt9m111->width; mf->height = mt9m111->height; mf->code = mt9m111->fmt->code; mf->colorspace = mt9m111->fmt->colorspace; mf->field = V4L2_FIELD_NONE; + mf->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; + mf->quantization = V4L2_QUANTIZATION_DEFAULT; + mf->xfer_func = V4L2_XFER_FUNC_DEFAULT; return 0; } @@ -660,6 +673,10 @@ static int mt9m111_set_fmt(struct v4l2_subdev *sd, mf->code = fmt->code; mf->colorspace = fmt->colorspace; + mf->field = V4L2_FIELD_NONE; + mf->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; + mf->quantization = V4L2_QUANTIZATION_DEFAULT; + mf->xfer_func = V4L2_XFER_FUNC_DEFAULT; if (format->which == V4L2_SUBDEV_FORMAT_TRY) { cfg->try_fmt = *mf; @@ -1089,6 +1106,25 @@ static int mt9m111_s_stream(struct v4l2_subdev *sd, int enable) return 0; } +static int mt9m111_init_cfg(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg) +{ +#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API + struct v4l2_mbus_framefmt *format = + v4l2_subdev_get_try_format(sd, cfg, 0); + + format->width = MT9M111_MAX_WIDTH; + format->height = MT9M111_MAX_HEIGHT; + format->code = mt9m111_colour_fmts[0].code; + format->colorspace = mt9m111_colour_fmts[0].colorspace; + format->field = V4L2_FIELD_NONE; + format->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; + format->quantization = V4L2_QUANTIZATION_DEFAULT; + format->xfer_func = V4L2_XFER_FUNC_DEFAULT; +#endif + return 0; +} + static int mt9m111_g_mbus_config(struct v4l2_subdev *sd, struct v4l2_mbus_config *cfg) { @@ -1114,6 +1150,7 @@ static const struct v4l2_subdev_video_ops mt9m111_subdev_video_ops = { }; static const struct v4l2_subdev_pad_ops mt9m111_subdev_pad_ops = { + .init_cfg = mt9m111_init_cfg, .enum_mbus_code = mt9m111_enum_mbus_code, .get_selection = mt9m111_get_selection, .set_selection = mt9m111_set_selection, @@ -1273,6 +1310,8 @@ static int mt9m111_probe(struct i2c_client *client, mt9m111->rect.top = MT9M111_MIN_DARK_ROWS; mt9m111->rect.width = MT9M111_MAX_WIDTH; mt9m111->rect.height = MT9M111_MAX_HEIGHT; + mt9m111->width = mt9m111->rect.width; + mt9m111->height = mt9m111->rect.height; mt9m111->fmt = &mt9m111_colour_fmts[0]; mt9m111->lastpage = -1; mutex_init(&mt9m111->power_lock); diff --git a/drivers/media/i2c/mt9t112.c b/drivers/media/i2c/mt9t112.c index ef353a244e33..ae3c336eadf5 100644 --- a/drivers/media/i2c/mt9t112.c +++ b/drivers/media/i2c/mt9t112.c @@ -541,7 +541,7 @@ static int mt9t112_init_setting(const struct i2c_client *client) mt9t112_mcu_write(ret, client, VAR(18, 109), 0x0AF0); /* - * Flicker Dectection registers. + * Flicker Detection registers. * This section should be replaced whenever new timing file is * generated. All the following registers need to be replaced. * Following registers are generated from Register Wizard but user can diff --git a/drivers/media/i2c/ov2640.c b/drivers/media/i2c/ov2640.c index 5d2d6735cc78..83031cfc7914 100644 --- a/drivers/media/i2c/ov2640.c +++ b/drivers/media/i2c/ov2640.c @@ -842,9 +842,6 @@ static int ov2640_set_params(struct i2c_client *client, u8 val; int ret; - if (!win) - return -EINVAL; - switch (code) { case MEDIA_BUS_FMT_RGB565_2X8_BE: dev_dbg(&client->dev, "%s: Selected cfmt RGB565 BE", __func__); @@ -929,9 +926,14 @@ static int ov2640_get_fmt(struct v4l2_subdev *sd, if (format->pad) return -EINVAL; - if (!priv->win) { - priv->win = ov2640_select_win(SVGA_WIDTH, SVGA_HEIGHT); - priv->cfmt_code = MEDIA_BUS_FMT_UYVY8_2X8; + if (format->which == V4L2_SUBDEV_FORMAT_TRY) { +#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API + mf = v4l2_subdev_get_try_format(sd, cfg, 0); + format->format = *mf; + return 0; +#else + return -ENOTTY; +#endif } mf->width = priv->win->width; @@ -939,6 +941,9 @@ static int ov2640_get_fmt(struct v4l2_subdev *sd, mf->code = priv->cfmt_code; mf->colorspace = V4L2_COLORSPACE_SRGB; mf->field = V4L2_FIELD_NONE; + mf->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; + mf->quantization = V4L2_QUANTIZATION_DEFAULT; + mf->xfer_func = V4L2_XFER_FUNC_DEFAULT; return 0; } @@ -965,6 +970,9 @@ static int ov2640_set_fmt(struct v4l2_subdev *sd, mf->field = V4L2_FIELD_NONE; mf->colorspace = V4L2_COLORSPACE_SRGB; + mf->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; + mf->quantization = V4L2_QUANTIZATION_DEFAULT; + mf->xfer_func = V4L2_XFER_FUNC_DEFAULT; switch (mf->code) { case MEDIA_BUS_FMT_RGB565_2X8_BE: @@ -999,6 +1007,27 @@ out: return ret; } +static int ov2640_init_cfg(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg) +{ +#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API + struct v4l2_mbus_framefmt *try_fmt = + v4l2_subdev_get_try_format(sd, cfg, 0); + const struct ov2640_win_size *win = + ov2640_select_win(SVGA_WIDTH, SVGA_HEIGHT); + + try_fmt->width = win->width; + try_fmt->height = win->height; + try_fmt->code = MEDIA_BUS_FMT_UYVY8_2X8; + try_fmt->colorspace = V4L2_COLORSPACE_SRGB; + try_fmt->field = V4L2_FIELD_NONE; + try_fmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; + try_fmt->quantization = V4L2_QUANTIZATION_DEFAULT; + try_fmt->xfer_func = V4L2_XFER_FUNC_DEFAULT; +#endif + return 0; +} + static int ov2640_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_mbus_code_enum *code) @@ -1108,6 +1137,7 @@ static const struct v4l2_subdev_core_ops ov2640_subdev_core_ops = { }; static const struct v4l2_subdev_pad_ops ov2640_subdev_pad_ops = { + .init_cfg = ov2640_init_cfg, .enum_mbus_code = ov2640_enum_mbus_code, .get_selection = ov2640_get_selection, .get_fmt = ov2640_get_fmt, @@ -1193,6 +1223,9 @@ static int ov2640_probe(struct i2c_client *client, if (ret) goto err_clk; + priv->win = ov2640_select_win(SVGA_WIDTH, SVGA_HEIGHT); + priv->cfmt_code = MEDIA_BUS_FMT_UYVY8_2X8; + v4l2_i2c_subdev_init(&priv->subdev, client, &ov2640_subdev_ops); priv->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS; diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c index bef3f3aae0ed..82d4ce93312c 100644 --- a/drivers/media/i2c/ov5640.c +++ b/drivers/media/i2c/ov5640.c @@ -83,6 +83,9 @@ #define OV5640_REG_SIGMADELTA_CTRL0C 0x3c0c #define OV5640_REG_FRAME_CTRL01 0x4202 #define OV5640_REG_FORMAT_CONTROL00 0x4300 +#define OV5640_REG_VFIFO_HSIZE 0x4602 +#define OV5640_REG_VFIFO_VSIZE 0x4604 +#define OV5640_REG_JPG_MODE_SELECT 0x4713 #define OV5640_REG_POLARITY_CTRL00 0x4740 #define OV5640_REG_MIPI_CTRL00 0x4800 #define OV5640_REG_DEBUG_MODE 0x4814 @@ -115,6 +118,15 @@ enum ov5640_frame_rate { OV5640_NUM_FRAMERATES, }; +enum ov5640_format_mux { + OV5640_FMT_MUX_YUV422 = 0, + OV5640_FMT_MUX_RGB, + OV5640_FMT_MUX_DITHER, + OV5640_FMT_MUX_RAW_DPC, + OV5640_FMT_MUX_SNR_RAW, + OV5640_FMT_MUX_RAW_CIP, +}; + struct ov5640_pixfmt { u32 code; u32 colorspace; @@ -126,6 +138,10 @@ static const struct ov5640_pixfmt ov5640_formats[] = { { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_SRGB, }, { MEDIA_BUS_FMT_RGB565_2X8_LE, V4L2_COLORSPACE_SRGB, }, { MEDIA_BUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_SRGB, }, + { MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_COLORSPACE_SRGB, }, + { MEDIA_BUS_FMT_SGBRG8_1X8, V4L2_COLORSPACE_SRGB, }, + { MEDIA_BUS_FMT_SGRBG8_1X8, V4L2_COLORSPACE_SRGB, }, + { MEDIA_BUS_FMT_SRGGB8_1X8, V4L2_COLORSPACE_SRGB, }, }; /* @@ -288,7 +304,7 @@ static const struct reg_value ov5640_init_setting_30fps_VGA[] = { {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x3000, 0x00, 0, 0}, {0x3002, 0x1c, 0, 0}, {0x3004, 0xff, 0, 0}, {0x3006, 0xc3, 0, 0}, {0x302e, 0x08, 0, 0}, {0x4300, 0x3f, 0, 0}, - {0x501f, 0x00, 0, 0}, {0x4713, 0x03, 0, 0}, {0x4407, 0x04, 0, 0}, + {0x501f, 0x00, 0, 0}, {0x4407, 0x04, 0, 0}, {0x440e, 0x00, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0}, {0x4837, 0x0a, 0, 0}, {0x3824, 0x02, 0, 0}, {0x5000, 0xa7, 0, 0}, {0x5001, 0xa3, 0, 0}, {0x5180, 0xff, 0, 0}, @@ -357,7 +373,7 @@ static const struct reg_value ov5640_setting_VGA_640_480[] = { {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0}, {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0}, - {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0}, + {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0}, {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0}, }; @@ -376,7 +392,7 @@ static const struct reg_value ov5640_setting_XGA_1024_768[] = { {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0}, {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0}, - {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0}, + {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0}, {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0}, }; @@ -395,7 +411,7 @@ static const struct reg_value ov5640_setting_QVGA_320_240[] = { {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0}, {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0}, - {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0}, + {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0}, {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0}, }; @@ -414,7 +430,7 @@ static const struct reg_value ov5640_setting_QCIF_176_144[] = { {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0}, {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0}, - {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0}, + {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0}, {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0}, }; @@ -433,7 +449,7 @@ static const struct reg_value ov5640_setting_NTSC_720_480[] = { {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0}, {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0}, - {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0}, + {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0}, {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0}, }; @@ -452,7 +468,7 @@ static const struct reg_value ov5640_setting_PAL_720_576[] = { {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0}, {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0}, - {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0}, + {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0}, {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0}, }; @@ -471,7 +487,7 @@ static const struct reg_value ov5640_setting_720P_1280_720[] = { {0x3a03, 0xe4, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0xbc, 0, 0}, {0x3a0a, 0x01, 0, 0}, {0x3a0b, 0x72, 0, 0}, {0x3a0e, 0x01, 0, 0}, {0x3a0d, 0x02, 0, 0}, {0x3a14, 0x02, 0, 0}, {0x3a15, 0xe4, 0, 0}, - {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0}, + {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4407, 0x04, 0, 0}, {0x460b, 0x37, 0, 0}, {0x460c, 0x20, 0, 0}, {0x3824, 0x04, 0, 0}, {0x5001, 0x83, 0, 0}, }; @@ -491,7 +507,7 @@ static const struct reg_value ov5640_setting_1080P_1920_1080[] = { {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0}, {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0}, - {0x4001, 0x02, 0, 0}, {0x4004, 0x06, 0, 0}, {0x4713, 0x03, 0, 0}, + {0x4001, 0x02, 0, 0}, {0x4004, 0x06, 0, 0}, {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0}, {0x3824, 0x02, 0, 0}, {0x5001, 0x83, 0, 0}, {0x3c07, 0x07, 0, 0}, {0x3c08, 0x00, 0, 0}, @@ -503,7 +519,7 @@ static const struct reg_value ov5640_setting_1080P_1920_1080[] = { {0x3a02, 0x04, 0, 0}, {0x3a03, 0x60, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x50, 0, 0}, {0x3a0a, 0x01, 0, 0}, {0x3a0b, 0x18, 0, 0}, {0x3a0e, 0x03, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x04, 0, 0}, - {0x3a15, 0x60, 0, 0}, {0x4713, 0x02, 0, 0}, {0x4407, 0x04, 0, 0}, + {0x3a15, 0x60, 0, 0}, {0x4407, 0x04, 0, 0}, {0x460b, 0x37, 0, 0}, {0x460c, 0x20, 0, 0}, {0x3824, 0x04, 0, 0}, {0x4005, 0x1a, 0, 0}, {0x3008, 0x02, 0, 0}, }; @@ -522,7 +538,7 @@ static const struct reg_value ov5640_setting_QSXGA_2592_1944[] = { {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0}, {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0}, - {0x4001, 0x02, 0, 0}, {0x4004, 0x06, 0, 0}, {0x4713, 0x03, 0, 0}, + {0x4001, 0x02, 0, 0}, {0x4004, 0x06, 0, 0}, {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0}, {0x3824, 0x02, 0, 0}, {0x5001, 0x83, 0, 70}, }; @@ -705,7 +721,7 @@ static int ov5640_mod_reg(struct ov5640_dev *sensor, u16 reg, /* * After trying the various combinations, reading various - * documentations spreaded around the net, and from the various + * documentations spread around the net, and from the various * feedback, the clock tree is probably as follows: * * +--------------+ @@ -1030,12 +1046,42 @@ static int ov5640_set_dvp_pclk(struct ov5640_dev *sensor, unsigned long rate) (ilog2(pclk_div) << 4)); } +/* set JPEG framing sizes */ +static int ov5640_set_jpeg_timings(struct ov5640_dev *sensor, + const struct ov5640_mode_info *mode) +{ + int ret; + + /* + * compression mode 3 timing + * + * Data is transmitted with programmable width (VFIFO_HSIZE). + * No padding done. Last line may have less data. Varying + * number of lines per frame, depending on amount of data. + */ + ret = ov5640_mod_reg(sensor, OV5640_REG_JPG_MODE_SELECT, 0x7, 0x3); + if (ret < 0) + return ret; + + ret = ov5640_write_reg16(sensor, OV5640_REG_VFIFO_HSIZE, mode->hact); + if (ret < 0) + return ret; + + return ov5640_write_reg16(sensor, OV5640_REG_VFIFO_VSIZE, mode->vact); +} + /* download ov5640 settings to sensor through i2c */ static int ov5640_set_timings(struct ov5640_dev *sensor, const struct ov5640_mode_info *mode) { int ret; + if (sensor->fmt.code == MEDIA_BUS_FMT_JPEG_1X8) { + ret = ov5640_set_jpeg_timings(sensor, mode); + if (ret < 0) + return ret; + } + ret = ov5640_write_reg16(sensor, OV5640_REG_TIMING_DVPHO, mode->hact); if (ret < 0) return ret; @@ -1893,7 +1939,7 @@ static void ov5640_reset(struct ov5640_dev *sensor) usleep_range(1000, 2000); gpiod_set_value_cansleep(sensor->reset_gpio, 0); - usleep_range(5000, 10000); + usleep_range(20000, 25000); } static int ov5640_set_power_on(struct ov5640_dev *sensor) @@ -2059,7 +2105,7 @@ static int ov5640_try_frame_interval(struct ov5640_dev *sensor, u32 width, u32 height) { const struct ov5640_mode_info *mode; - enum ov5640_frame_rate rate = OV5640_30_FPS; + enum ov5640_frame_rate rate = OV5640_15_FPS; int minfps, maxfps, best_fps, fps; int i; @@ -2200,46 +2246,67 @@ static int ov5640_set_framefmt(struct ov5640_dev *sensor, struct v4l2_mbus_framefmt *format) { int ret = 0; - bool is_rgb = false; bool is_jpeg = false; - u8 val; + u8 fmt, mux; switch (format->code) { case MEDIA_BUS_FMT_UYVY8_2X8: /* YUV422, UYVY */ - val = 0x3f; + fmt = 0x3f; + mux = OV5640_FMT_MUX_YUV422; break; case MEDIA_BUS_FMT_YUYV8_2X8: /* YUV422, YUYV */ - val = 0x30; + fmt = 0x30; + mux = OV5640_FMT_MUX_YUV422; break; case MEDIA_BUS_FMT_RGB565_2X8_LE: /* RGB565 {g[2:0],b[4:0]},{r[4:0],g[5:3]} */ - val = 0x6F; - is_rgb = true; + fmt = 0x6F; + mux = OV5640_FMT_MUX_RGB; break; case MEDIA_BUS_FMT_RGB565_2X8_BE: /* RGB565 {r[4:0],g[5:3]},{g[2:0],b[4:0]} */ - val = 0x61; - is_rgb = true; + fmt = 0x61; + mux = OV5640_FMT_MUX_RGB; break; case MEDIA_BUS_FMT_JPEG_1X8: /* YUV422, YUYV */ - val = 0x30; + fmt = 0x30; + mux = OV5640_FMT_MUX_YUV422; is_jpeg = true; break; + case MEDIA_BUS_FMT_SBGGR8_1X8: + /* Raw, BGBG... / GRGR... */ + fmt = 0x00; + mux = OV5640_FMT_MUX_RAW_DPC; + break; + case MEDIA_BUS_FMT_SGBRG8_1X8: + /* Raw bayer, GBGB... / RGRG... */ + fmt = 0x01; + mux = OV5640_FMT_MUX_RAW_DPC; + break; + case MEDIA_BUS_FMT_SGRBG8_1X8: + /* Raw bayer, GRGR... / BGBG... */ + fmt = 0x02; + mux = OV5640_FMT_MUX_RAW_DPC; + break; + case MEDIA_BUS_FMT_SRGGB8_1X8: + /* Raw bayer, RGRG... / GBGB... */ + fmt = 0x03; + mux = OV5640_FMT_MUX_RAW_DPC; + break; default: return -EINVAL; } /* FORMAT CONTROL00: YUV and RGB formatting */ - ret = ov5640_write_reg(sensor, OV5640_REG_FORMAT_CONTROL00, val); + ret = ov5640_write_reg(sensor, OV5640_REG_FORMAT_CONTROL00, fmt); if (ret) return ret; /* FORMAT MUX CONTROL: ISP YUV or RGB */ - ret = ov5640_write_reg(sensor, OV5640_REG_ISP_FORMAT_MUX_CTRL, - is_rgb ? 0x01 : 0x00); + ret = ov5640_write_reg(sensor, OV5640_REG_ISP_FORMAT_MUX_CTRL, mux); if (ret) return ret; @@ -2407,10 +2474,41 @@ static int ov5640_set_ctrl_gain(struct ov5640_dev *sensor, bool auto_gain) return ret; } +static const char * const test_pattern_menu[] = { + "Disabled", + "Color bars", + "Color bars w/ rolling bar", + "Color squares", + "Color squares w/ rolling bar", +}; + +#define OV5640_TEST_ENABLE BIT(7) +#define OV5640_TEST_ROLLING BIT(6) /* rolling horizontal bar */ +#define OV5640_TEST_TRANSPARENT BIT(5) +#define OV5640_TEST_SQUARE_BW BIT(4) /* black & white squares */ +#define OV5640_TEST_BAR_STANDARD (0 << 2) +#define OV5640_TEST_BAR_VERT_CHANGE_1 (1 << 2) +#define OV5640_TEST_BAR_HOR_CHANGE (2 << 2) +#define OV5640_TEST_BAR_VERT_CHANGE_2 (3 << 2) +#define OV5640_TEST_BAR (0 << 0) +#define OV5640_TEST_RANDOM (1 << 0) +#define OV5640_TEST_SQUARE (2 << 0) +#define OV5640_TEST_BLACK (3 << 0) + +static const u8 test_pattern_val[] = { + 0, + OV5640_TEST_ENABLE | OV5640_TEST_BAR_VERT_CHANGE_1 | + OV5640_TEST_BAR, + OV5640_TEST_ENABLE | OV5640_TEST_ROLLING | + OV5640_TEST_BAR_VERT_CHANGE_1 | OV5640_TEST_BAR, + OV5640_TEST_ENABLE | OV5640_TEST_SQUARE, + OV5640_TEST_ENABLE | OV5640_TEST_ROLLING | OV5640_TEST_SQUARE, +}; + static int ov5640_set_ctrl_test_pattern(struct ov5640_dev *sensor, int value) { - return ov5640_mod_reg(sensor, OV5640_REG_PRE_ISP_TEST_SET1, - 0xa4, value ? 0xa4 : 0); + return ov5640_write_reg(sensor, OV5640_REG_PRE_ISP_TEST_SET1, + test_pattern_val[value]); } static int ov5640_set_ctrl_light_freq(struct ov5640_dev *sensor, int value) @@ -2551,11 +2649,6 @@ static const struct v4l2_ctrl_ops ov5640_ctrl_ops = { .s_ctrl = ov5640_s_ctrl, }; -static const char * const test_pattern_menu[] = { - "Disabled", - "Color bars", -}; - static int ov5640_init_controls(struct ov5640_dev *sensor) { const struct v4l2_ctrl_ops *ops = &ov5640_ctrl_ops; diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c index 5d1b218bb7f0..c33fd584cb44 100644 --- a/drivers/media/i2c/ov6650.c +++ b/drivers/media/i2c/ov6650.c @@ -15,7 +15,7 @@ * Copyright (C) 2008 Magnus Damm * Copyright (C) 2008, Guennadi Liakhovetski * - * Hardware specific bits initialy based on former work by Matt Callow + * Hardware specific bits initially based on former work by Matt Callow * drivers/media/video/omap/sensor_ov6650.c * Copyright (C) 2006 Matt Callow * @@ -759,7 +759,7 @@ static int ov6650_s_frame_interval(struct v4l2_subdev *sd, /* * Keep result to be used as tpf limit - * for subseqent clock divider calculations + * for subsequent clock divider calculations */ priv->tpf.numerator = div; priv->tpf.denominator = FRAME_RATE_MAX; diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c index a70a6ff7b36e..a7d26b294eb5 100644 --- a/drivers/media/i2c/ov7670.c +++ b/drivers/media/i2c/ov7670.c @@ -160,10 +160,10 @@ MODULE_PARM_DESC(debug, "Debug level (0-1)"); #define REG_GFIX 0x69 /* Fix gain control */ #define REG_DBLV 0x6b /* PLL control an debugging */ -#define DBLV_BYPASS 0x00 /* Bypass PLL */ -#define DBLV_X4 0x01 /* clock x4 */ -#define DBLV_X6 0x10 /* clock x6 */ -#define DBLV_X8 0x11 /* clock x8 */ +#define DBLV_BYPASS 0x0a /* Bypass PLL */ +#define DBLV_X4 0x4a /* clock x4 */ +#define DBLV_X6 0x8a /* clock x6 */ +#define DBLV_X8 0xca /* clock x8 */ #define REG_SCALING_XSC 0x70 /* Test pattern and horizontal scale factor */ #define TEST_PATTTERN_0 0x80 @@ -241,7 +241,9 @@ struct ov7670_info { }; struct v4l2_mbus_framefmt format; struct ov7670_format_struct *fmt; /* Current format */ + struct ov7670_win_size *wsize; struct clk *clk; + int on; struct gpio_desc *resetb_gpio; struct gpio_desc *pwdn_gpio; unsigned int mbus_config; /* Media bus configuration flags */ @@ -810,13 +812,25 @@ static void ov7675_get_framerate(struct v4l2_subdev *sd, (4 * clkrc); } +static int ov7675_apply_framerate(struct v4l2_subdev *sd) +{ + struct ov7670_info *info = to_state(sd); + int ret; + + ret = ov7670_write(sd, REG_CLKRC, info->clkrc); + if (ret < 0) + return ret; + + return ov7670_write(sd, REG_DBLV, + info->pll_bypass ? DBLV_BYPASS : DBLV_X4); +} + static int ov7675_set_framerate(struct v4l2_subdev *sd, struct v4l2_fract *tpf) { struct ov7670_info *info = to_state(sd); u32 clkrc; int pll_factor; - int ret; /* * The formula is fps = 5/4*pixclk for YUV/RGB and @@ -825,19 +839,10 @@ static int ov7675_set_framerate(struct v4l2_subdev *sd, * pixclk = clock_speed / (clkrc + 1) * PLLfactor * */ - if (info->pll_bypass) { - pll_factor = 1; - ret = ov7670_write(sd, REG_DBLV, DBLV_BYPASS); - } else { - pll_factor = PLL_FACTOR; - ret = ov7670_write(sd, REG_DBLV, DBLV_X4); - } - if (ret < 0) - return ret; - if (tpf->numerator == 0 || tpf->denominator == 0) { clkrc = 0; } else { + pll_factor = info->pll_bypass ? 1 : PLL_FACTOR; clkrc = (5 * pll_factor * info->clock_speed * tpf->numerator) / (4 * tpf->denominator); if (info->fmt->mbus_code == MEDIA_BUS_FMT_SBGGR8_1X8) @@ -859,11 +864,7 @@ static int ov7675_set_framerate(struct v4l2_subdev *sd, /* Recalculate frame rate */ ov7675_get_framerate(sd, tpf); - ret = ov7670_write(sd, REG_CLKRC, info->clkrc); - if (ret < 0) - return ret; - - return ov7670_write(sd, REG_DBLV, DBLV_X4); + return ov7675_apply_framerate(sd); } static void ov7670_get_framerate_legacy(struct v4l2_subdev *sd, @@ -1004,48 +1005,20 @@ static int ov7670_try_fmt_internal(struct v4l2_subdev *sd, return 0; } -/* - * Set a format. - */ -static int ov7670_set_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *format) +static int ov7670_apply_fmt(struct v4l2_subdev *sd) { - struct ov7670_format_struct *ovfmt; - struct ov7670_win_size *wsize; struct ov7670_info *info = to_state(sd); -#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API - struct v4l2_mbus_framefmt *mbus_fmt; -#endif + struct ov7670_win_size *wsize = info->wsize; unsigned char com7, com10 = 0; int ret; - if (format->pad) - return -EINVAL; - - if (format->which == V4L2_SUBDEV_FORMAT_TRY) { - ret = ov7670_try_fmt_internal(sd, &format->format, NULL, NULL); - if (ret) - return ret; -#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API - mbus_fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad); - *mbus_fmt = format->format; - return 0; -#else - return -ENOTTY; -#endif - } - - ret = ov7670_try_fmt_internal(sd, &format->format, &ovfmt, &wsize); - if (ret) - return ret; /* * COM7 is a pain in the ass, it doesn't like to be read then * quickly written afterward. But we have everything we need * to set it absolutely here, as long as the format-specific * register sets list it first. */ - com7 = ovfmt->regs[0].value; + com7 = info->fmt->regs[0].value; com7 |= wsize->com7_bit; ret = ov7670_write(sd, REG_COM7, com7); if (ret) @@ -1067,7 +1040,7 @@ static int ov7670_set_fmt(struct v4l2_subdev *sd, /* * Now write the rest of the array. Also store start/stops */ - ret = ov7670_write_array(sd, ovfmt->regs + 1); + ret = ov7670_write_array(sd, info->fmt->regs + 1); if (ret) return ret; @@ -1082,8 +1055,6 @@ static int ov7670_set_fmt(struct v4l2_subdev *sd, return ret; } - info->fmt = ovfmt; - /* * If we're running RGB565, we must rewrite clkrc after setting * the other parameters or the image looks poor. If we're *not* @@ -1101,6 +1072,46 @@ static int ov7670_set_fmt(struct v4l2_subdev *sd, return 0; } +/* + * Set a format. + */ +static int ov7670_set_fmt(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *format) +{ + struct ov7670_info *info = to_state(sd); +#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API + struct v4l2_mbus_framefmt *mbus_fmt; +#endif + int ret; + + if (format->pad) + return -EINVAL; + + if (format->which == V4L2_SUBDEV_FORMAT_TRY) { + ret = ov7670_try_fmt_internal(sd, &format->format, NULL, NULL); + if (ret) + return ret; +#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API + mbus_fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad); + *mbus_fmt = format->format; + return 0; +#else + return -ENOTTY; +#endif + } + + ret = ov7670_try_fmt_internal(sd, &format->format, &info->fmt, &info->wsize); + if (ret) + return ret; + + ret = ov7670_apply_fmt(sd); + if (ret) + return ret; + + return 0; +} + static int ov7670_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_format *format) @@ -1607,17 +1618,57 @@ static int ov7670_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_regis } #endif -static int ov7670_s_power(struct v4l2_subdev *sd, int on) +static void ov7670_power_on(struct v4l2_subdev *sd) { struct ov7670_info *info = to_state(sd); + if (info->on) + return; + + clk_prepare_enable(info->clk); + if (info->pwdn_gpio) - gpiod_set_value(info->pwdn_gpio, !on); - if (on && info->resetb_gpio) { + gpiod_set_value(info->pwdn_gpio, 0); + if (info->resetb_gpio) { gpiod_set_value(info->resetb_gpio, 1); usleep_range(500, 1000); gpiod_set_value(info->resetb_gpio, 0); + } + if (info->pwdn_gpio || info->resetb_gpio || info->clk) usleep_range(3000, 5000); + + info->on = true; +} + +static void ov7670_power_off(struct v4l2_subdev *sd) +{ + struct ov7670_info *info = to_state(sd); + + if (!info->on) + return; + + clk_disable_unprepare(info->clk); + + if (info->pwdn_gpio) + gpiod_set_value(info->pwdn_gpio, 1); + + info->on = false; +} + +static int ov7670_s_power(struct v4l2_subdev *sd, int on) +{ + struct ov7670_info *info = to_state(sd); + + if (info->on == on) + return 0; + + if (on) { + ov7670_power_on (sd); + ov7670_apply_fmt(sd); + ov7675_apply_framerate(sd); + v4l2_ctrl_handler_setup(&info->hdl); + } else { + ov7670_power_off (sd); } return 0; @@ -1652,6 +1703,7 @@ static int ov7670_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) static const struct v4l2_subdev_core_ops ov7670_core_ops = { .reset = ov7670_reset, .init = ov7670_init, + .s_power = ov7670_s_power, .log_status = v4l2_ctrl_subdev_log_status, .subscribe_event = v4l2_ctrl_subdev_subscribe_event, .unsubscribe_event = v4l2_event_subdev_unsubscribe, @@ -1801,11 +1853,7 @@ static int ov7670_probe(struct i2c_client *client, if (config->clock_speed) info->clock_speed = config->clock_speed; - /* - * It should be allowed for ov7670 too when it is migrated to - * the new frame rate formula. - */ - if (config->pll_bypass && id->driver_data != MODEL_OV7670) + if (config->pll_bypass) info->pll_bypass = true; if (config->pclk_hb_disable) @@ -1820,24 +1868,21 @@ static int ov7670_probe(struct i2c_client *client, else return ret; } - if (info->clk) { - ret = clk_prepare_enable(info->clk); - if (ret) - return ret; + ret = ov7670_init_gpio(client, info); + if (ret) + return ret; + + ov7670_power_on(sd); + + if (info->clk) { info->clock_speed = clk_get_rate(info->clk) / 1000000; if (info->clock_speed < 10 || info->clock_speed > 48) { ret = -EINVAL; - goto clk_disable; + goto power_off; } } - ret = ov7670_init_gpio(client, info); - if (ret) - goto clk_disable; - - ov7670_s_power(sd, 1); - /* Make sure it's an ov7670 */ ret = ov7670_detect(sd); if (ret) { @@ -1851,6 +1896,7 @@ static int ov7670_probe(struct i2c_client *client, info->devtype = &ov7670_devdata[id->driver_data]; info->fmt = &ov7670_formats[0]; + info->wsize = &info->devtype->win_sizes[0]; ov7670_get_default_format(sd, &info->format); @@ -1916,6 +1962,7 @@ static int ov7670_probe(struct i2c_client *client, if (ret < 0) goto entity_cleanup; + ov7670_power_off(sd); return 0; entity_cleanup: @@ -1923,13 +1970,10 @@ entity_cleanup: hdl_free: v4l2_ctrl_handler_free(&info->hdl); power_off: - ov7670_s_power(sd, 0); -clk_disable: - clk_disable_unprepare(info->clk); + ov7670_power_off(sd); return ret; } - static int ov7670_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); @@ -1937,9 +1981,8 @@ static int ov7670_remove(struct i2c_client *client) v4l2_async_unregister_subdev(sd); v4l2_ctrl_handler_free(&info->hdl); - clk_disable_unprepare(info->clk); media_entity_cleanup(&info->sd.entity); - ov7670_s_power(sd, 0); + ov7670_power_off(sd); return 0; } diff --git a/drivers/media/i2c/ov7740.c b/drivers/media/i2c/ov7740.c index 177688afd9a6..dfece91ce96b 100644 --- a/drivers/media/i2c/ov7740.c +++ b/drivers/media/i2c/ov7740.c @@ -20,7 +20,7 @@ #define REG_BGAIN 0x01 /* blue gain */ #define REG_RGAIN 0x02 /* red gain */ #define REG_GGAIN 0x03 /* green gain */ -#define REG_REG04 0x04 /* analog setting, dont change*/ +#define REG_REG04 0x04 /* analog setting, don't change*/ #define REG_BAVG 0x05 /* b channel average */ #define REG_GAVG 0x06 /* g channel average */ #define REG_RAVG 0x07 /* r channel average */ @@ -1101,6 +1101,9 @@ static int ov7740_probe(struct i2c_client *client, if (ret) return ret; + pm_runtime_set_active(&client->dev); + pm_runtime_enable(&client->dev); + ret = ov7740_detect(ov7740); if (ret) goto error_detect; @@ -1123,8 +1126,6 @@ static int ov7740_probe(struct i2c_client *client, if (ret) goto error_async_register; - pm_runtime_set_active(&client->dev); - pm_runtime_enable(&client->dev); pm_runtime_idle(&client->dev); return 0; @@ -1134,6 +1135,8 @@ error_async_register: error_init_controls: ov7740_free_controls(ov7740); error_detect: + pm_runtime_disable(&client->dev); + pm_runtime_set_suspended(&client->dev); ov7740_set_power(ov7740, 0); media_entity_cleanup(&ov7740->subdev.entity); diff --git a/drivers/media/i2c/ov8856.c b/drivers/media/i2c/ov8856.c new file mode 100644 index 000000000000..dbf1095b9440 --- /dev/null +++ b/drivers/media/i2c/ov8856.c @@ -0,0 +1,1268 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2019 Intel Corporation. + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define OV8856_REG_VALUE_08BIT 1 +#define OV8856_REG_VALUE_16BIT 2 +#define OV8856_REG_VALUE_24BIT 3 + +#define OV8856_LINK_FREQ_360MHZ 360000000ULL +#define OV8856_LINK_FREQ_180MHZ 180000000ULL +#define OV8856_SCLK 144000000ULL +#define OV8856_MCLK 19200000 +#define OV8856_DATA_LANES 4 +#define OV8856_RGB_DEPTH 10 + +#define OV8856_REG_CHIP_ID 0x300a +#define OV8856_CHIP_ID 0x00885a + +#define OV8856_REG_MODE_SELECT 0x0100 +#define OV8856_MODE_STANDBY 0x00 +#define OV8856_MODE_STREAMING 0x01 + +/* vertical-timings from sensor */ +#define OV8856_REG_VTS 0x380e +#define OV8856_VTS_MAX 0x7fff + +/* horizontal-timings from sensor */ +#define OV8856_REG_HTS 0x380c + +/* Exposure controls from sensor */ +#define OV8856_REG_EXPOSURE 0x3500 +#define OV8856_EXPOSURE_MIN 6 +#define OV8856_EXPOSURE_MAX_MARGIN 6 +#define OV8856_EXPOSURE_STEP 1 + +/* Analog gain controls from sensor */ +#define OV8856_REG_ANALOG_GAIN 0x3508 +#define OV8856_ANAL_GAIN_MIN 128 +#define OV8856_ANAL_GAIN_MAX 2047 +#define OV8856_ANAL_GAIN_STEP 1 + +/* Digital gain controls from sensor */ +#define OV8856_REG_MWB_R_GAIN 0x5019 +#define OV8856_REG_MWB_G_GAIN 0x501b +#define OV8856_REG_MWB_B_GAIN 0x501d +#define OV8856_DGTL_GAIN_MIN 0 +#define OV8856_DGTL_GAIN_MAX 4095 +#define OV8856_DGTL_GAIN_STEP 1 +#define OV8856_DGTL_GAIN_DEFAULT 1024 + +/* Test Pattern Control */ +#define OV8856_REG_TEST_PATTERN 0x5e00 +#define OV8856_TEST_PATTERN_ENABLE BIT(7) +#define OV8856_TEST_PATTERN_BAR_SHIFT 2 + +#define to_ov8856(_sd) container_of(_sd, struct ov8856, sd) + +enum { + OV8856_LINK_FREQ_720MBPS, + OV8856_LINK_FREQ_360MBPS, +}; + +struct ov8856_reg { + u16 address; + u8 val; +}; + +struct ov8856_reg_list { + u32 num_of_regs; + const struct ov8856_reg *regs; +}; + +struct ov8856_link_freq_config { + const struct ov8856_reg_list reg_list; +}; + +struct ov8856_mode { + /* Frame width in pixels */ + u32 width; + + /* Frame height in pixels */ + u32 height; + + /* Horizontal timining size */ + u32 hts; + + /* Default vertical timining size */ + u32 vts_def; + + /* Min vertical timining size */ + u32 vts_min; + + /* Link frequency needed for this resolution */ + u32 link_freq_index; + + /* Sensor register settings for this resolution */ + const struct ov8856_reg_list reg_list; +}; + +static const struct ov8856_reg mipi_data_rate_720mbps[] = { + {0x0103, 0x01}, + {0x0100, 0x00}, + {0x0302, 0x4b}, + {0x0303, 0x01}, + {0x030b, 0x02}, + {0x030d, 0x4b}, + {0x031e, 0x0c}, +}; + +static const struct ov8856_reg mipi_data_rate_360mbps[] = { + {0x0103, 0x01}, + {0x0100, 0x00}, + {0x0302, 0x4b}, + {0x0303, 0x03}, + {0x030b, 0x02}, + {0x030d, 0x4b}, + {0x031e, 0x0c}, +}; + +static const struct ov8856_reg mode_3280x2464_regs[] = { + {0x3000, 0x20}, + {0x3003, 0x08}, + {0x300e, 0x20}, + {0x3010, 0x00}, + {0x3015, 0x84}, + {0x3018, 0x72}, + {0x3021, 0x23}, + {0x3033, 0x24}, + {0x3500, 0x00}, + {0x3501, 0x9a}, + {0x3502, 0x20}, + {0x3503, 0x08}, + {0x3505, 0x83}, + {0x3508, 0x01}, + {0x3509, 0x80}, + {0x350c, 0x00}, + {0x350d, 0x80}, + {0x350e, 0x04}, + {0x350f, 0x00}, + {0x3510, 0x00}, + {0x3511, 0x02}, + {0x3512, 0x00}, + {0x3600, 0x72}, + {0x3601, 0x40}, + {0x3602, 0x30}, + {0x3610, 0xc5}, + {0x3611, 0x58}, + {0x3612, 0x5c}, + {0x3613, 0xca}, + {0x3614, 0x20}, + {0x3628, 0xff}, + {0x3629, 0xff}, + {0x362a, 0xff}, + {0x3633, 0x10}, + {0x3634, 0x10}, + {0x3635, 0x10}, + {0x3636, 0x10}, + {0x3663, 0x08}, + {0x3669, 0x34}, + {0x366e, 0x10}, + {0x3706, 0x86}, + {0x370b, 0x7e}, + {0x3714, 0x23}, + {0x3730, 0x12}, + {0x3733, 0x10}, + {0x3764, 0x00}, + {0x3765, 0x00}, + {0x3769, 0x62}, + {0x376a, 0x2a}, + {0x376b, 0x30}, + {0x3780, 0x00}, + {0x3781, 0x24}, + {0x3782, 0x00}, + {0x3783, 0x23}, + {0x3798, 0x2f}, + {0x37a1, 0x60}, + {0x37a8, 0x6a}, + {0x37ab, 0x3f}, + {0x37c2, 0x04}, + {0x37c3, 0xf1}, + {0x37c9, 0x80}, + {0x37cb, 0x16}, + {0x37cc, 0x16}, + {0x37cd, 0x16}, + {0x37ce, 0x16}, + {0x3800, 0x00}, + {0x3801, 0x00}, + {0x3802, 0x00}, + {0x3803, 0x07}, + {0x3804, 0x0c}, + {0x3805, 0xdf}, + {0x3806, 0x09}, + {0x3807, 0xa6}, + {0x3808, 0x0c}, + {0x3809, 0xd0}, + {0x380a, 0x09}, + {0x380b, 0xa0}, + {0x380c, 0x07}, + {0x380d, 0x88}, + {0x380e, 0x09}, + {0x380f, 0xb8}, + {0x3810, 0x00}, + {0x3811, 0x00}, + {0x3812, 0x00}, + {0x3813, 0x00}, + {0x3814, 0x01}, + {0x3815, 0x01}, + {0x3816, 0x00}, + {0x3817, 0x00}, + {0x3818, 0x00}, + {0x3819, 0x10}, + {0x3820, 0x80}, + {0x3821, 0x46}, + {0x382a, 0x01}, + {0x382b, 0x01}, + {0x3830, 0x06}, + {0x3836, 0x02}, + {0x3862, 0x04}, + {0x3863, 0x08}, + {0x3cc0, 0x33}, + {0x3d85, 0x17}, + {0x3d8c, 0x73}, + {0x3d8d, 0xde}, + {0x4001, 0xe0}, + {0x4003, 0x40}, + {0x4008, 0x00}, + {0x4009, 0x0b}, + {0x400a, 0x00}, + {0x400b, 0x84}, + {0x400f, 0x80}, + {0x4010, 0xf0}, + {0x4011, 0xff}, + {0x4012, 0x02}, + {0x4013, 0x01}, + {0x4014, 0x01}, + {0x4015, 0x01}, + {0x4042, 0x00}, + {0x4043, 0x80}, + {0x4044, 0x00}, + {0x4045, 0x80}, + {0x4046, 0x00}, + {0x4047, 0x80}, + {0x4048, 0x00}, + {0x4049, 0x80}, + {0x4041, 0x03}, + {0x404c, 0x20}, + {0x404d, 0x00}, + {0x404e, 0x20}, + {0x4203, 0x80}, + {0x4307, 0x30}, + {0x4317, 0x00}, + {0x4503, 0x08}, + {0x4601, 0x80}, + {0x4800, 0x44}, + {0x4816, 0x53}, + {0x481b, 0x58}, + {0x481f, 0x27}, + {0x4837, 0x16}, + {0x483c, 0x0f}, + {0x484b, 0x05}, + {0x5000, 0x57}, + {0x5001, 0x0a}, + {0x5004, 0x04}, + {0x502e, 0x03}, + {0x5030, 0x41}, + {0x5780, 0x14}, + {0x5781, 0x0f}, + {0x5782, 0x44}, + {0x5783, 0x02}, + {0x5784, 0x01}, + {0x5785, 0x01}, + {0x5786, 0x00}, + {0x5787, 0x04}, + {0x5788, 0x02}, + {0x5789, 0x0f}, + {0x578a, 0xfd}, + {0x578b, 0xf5}, + {0x578c, 0xf5}, + {0x578d, 0x03}, + {0x578e, 0x08}, + {0x578f, 0x0c}, + {0x5790, 0x08}, + {0x5791, 0x04}, + {0x5792, 0x00}, + {0x5793, 0x52}, + {0x5794, 0xa3}, + {0x5795, 0x02}, + {0x5796, 0x20}, + {0x5797, 0x20}, + {0x5798, 0xd5}, + {0x5799, 0xd5}, + {0x579a, 0x00}, + {0x579b, 0x50}, + {0x579c, 0x00}, + {0x579d, 0x2c}, + {0x579e, 0x0c}, + {0x579f, 0x40}, + {0x57a0, 0x09}, + {0x57a1, 0x40}, + {0x59f8, 0x3d}, + {0x5a08, 0x02}, + {0x5b00, 0x02}, + {0x5b01, 0x10}, + {0x5b02, 0x03}, + {0x5b03, 0xcf}, + {0x5b05, 0x6c}, + {0x5e00, 0x00} +}; + +static const struct ov8856_reg mode_1640x1232_regs[] = { + {0x3000, 0x20}, + {0x3003, 0x08}, + {0x300e, 0x20}, + {0x3010, 0x00}, + {0x3015, 0x84}, + {0x3018, 0x72}, + {0x3021, 0x23}, + {0x3033, 0x24}, + {0x3500, 0x00}, + {0x3501, 0x4c}, + {0x3502, 0xe0}, + {0x3503, 0x08}, + {0x3505, 0x83}, + {0x3508, 0x01}, + {0x3509, 0x80}, + {0x350c, 0x00}, + {0x350d, 0x80}, + {0x350e, 0x04}, + {0x350f, 0x00}, + {0x3510, 0x00}, + {0x3511, 0x02}, + {0x3512, 0x00}, + {0x3600, 0x72}, + {0x3601, 0x40}, + {0x3602, 0x30}, + {0x3610, 0xc5}, + {0x3611, 0x58}, + {0x3612, 0x5c}, + {0x3613, 0xca}, + {0x3614, 0x20}, + {0x3628, 0xff}, + {0x3629, 0xff}, + {0x362a, 0xff}, + {0x3633, 0x10}, + {0x3634, 0x10}, + {0x3635, 0x10}, + {0x3636, 0x10}, + {0x3663, 0x08}, + {0x3669, 0x34}, + {0x366e, 0x08}, + {0x3706, 0x86}, + {0x370b, 0x7e}, + {0x3714, 0x27}, + {0x3730, 0x12}, + {0x3733, 0x10}, + {0x3764, 0x00}, + {0x3765, 0x00}, + {0x3769, 0x62}, + {0x376a, 0x2a}, + {0x376b, 0x30}, + {0x3780, 0x00}, + {0x3781, 0x24}, + {0x3782, 0x00}, + {0x3783, 0x23}, + {0x3798, 0x2f}, + {0x37a1, 0x60}, + {0x37a8, 0x6a}, + {0x37ab, 0x3f}, + {0x37c2, 0x14}, + {0x37c3, 0xf1}, + {0x37c9, 0x80}, + {0x37cb, 0x16}, + {0x37cc, 0x16}, + {0x37cd, 0x16}, + {0x37ce, 0x16}, + {0x3800, 0x00}, + {0x3801, 0x00}, + {0x3802, 0x00}, + {0x3803, 0x07}, + {0x3804, 0x0c}, + {0x3805, 0xdf}, + {0x3806, 0x09}, + {0x3807, 0xa6}, + {0x3808, 0x06}, + {0x3809, 0x68}, + {0x380a, 0x04}, + {0x380b, 0xd0}, + {0x380c, 0x0e}, + {0x380d, 0xec}, + {0x380e, 0x04}, + {0x380f, 0xe8}, + {0x3810, 0x00}, + {0x3811, 0x00}, + {0x3812, 0x00}, + {0x3813, 0x00}, + {0x3814, 0x03}, + {0x3815, 0x01}, + {0x3816, 0x00}, + {0x3817, 0x00}, + {0x3818, 0x00}, + {0x3819, 0x10}, + {0x3820, 0x90}, + {0x3821, 0x67}, + {0x382a, 0x03}, + {0x382b, 0x01}, + {0x3830, 0x06}, + {0x3836, 0x02}, + {0x3862, 0x04}, + {0x3863, 0x08}, + {0x3cc0, 0x33}, + {0x3d85, 0x17}, + {0x3d8c, 0x73}, + {0x3d8d, 0xde}, + {0x4001, 0xe0}, + {0x4003, 0x40}, + {0x4008, 0x00}, + {0x4009, 0x05}, + {0x400a, 0x00}, + {0x400b, 0x84}, + {0x400f, 0x80}, + {0x4010, 0xf0}, + {0x4011, 0xff}, + {0x4012, 0x02}, + {0x4013, 0x01}, + {0x4014, 0x01}, + {0x4015, 0x01}, + {0x4042, 0x00}, + {0x4043, 0x80}, + {0x4044, 0x00}, + {0x4045, 0x80}, + {0x4046, 0x00}, + {0x4047, 0x80}, + {0x4048, 0x00}, + {0x4049, 0x80}, + {0x4041, 0x03}, + {0x404c, 0x20}, + {0x404d, 0x00}, + {0x404e, 0x20}, + {0x4203, 0x80}, + {0x4307, 0x30}, + {0x4317, 0x00}, + {0x4503, 0x08}, + {0x4601, 0x80}, + {0x4800, 0x44}, + {0x4816, 0x53}, + {0x481b, 0x58}, + {0x481f, 0x27}, + {0x4837, 0x16}, + {0x483c, 0x0f}, + {0x484b, 0x05}, + {0x5000, 0x57}, + {0x5001, 0x0a}, + {0x5004, 0x04}, + {0x502e, 0x03}, + {0x5030, 0x41}, + {0x5780, 0x14}, + {0x5781, 0x0f}, + {0x5782, 0x44}, + {0x5783, 0x02}, + {0x5784, 0x01}, + {0x5785, 0x01}, + {0x5786, 0x00}, + {0x5787, 0x04}, + {0x5788, 0x02}, + {0x5789, 0x0f}, + {0x578a, 0xfd}, + {0x578b, 0xf5}, + {0x578c, 0xf5}, + {0x578d, 0x03}, + {0x578e, 0x08}, + {0x578f, 0x0c}, + {0x5790, 0x08}, + {0x5791, 0x04}, + {0x5792, 0x00}, + {0x5793, 0x52}, + {0x5794, 0xa3}, + {0x5795, 0x00}, + {0x5796, 0x10}, + {0x5797, 0x10}, + {0x5798, 0x73}, + {0x5799, 0x73}, + {0x579a, 0x00}, + {0x579b, 0x28}, + {0x579c, 0x00}, + {0x579d, 0x16}, + {0x579e, 0x06}, + {0x579f, 0x20}, + {0x57a0, 0x04}, + {0x57a1, 0xa0}, + {0x59f8, 0x3d}, + {0x5a08, 0x02}, + {0x5b00, 0x02}, + {0x5b01, 0x10}, + {0x5b02, 0x03}, + {0x5b03, 0xcf}, + {0x5b05, 0x6c}, + {0x5e00, 0x00} +}; + +static const char * const ov8856_test_pattern_menu[] = { + "Disabled", + "Standard Color Bar", + "Top-Bottom Darker Color Bar", + "Right-Left Darker Color Bar", + "Bottom-Top Darker Color Bar" +}; + +static const s64 link_freq_menu_items[] = { + OV8856_LINK_FREQ_360MHZ, + OV8856_LINK_FREQ_180MHZ +}; + +static const struct ov8856_link_freq_config link_freq_configs[] = { + [OV8856_LINK_FREQ_720MBPS] = { + .reg_list = { + .num_of_regs = ARRAY_SIZE(mipi_data_rate_720mbps), + .regs = mipi_data_rate_720mbps, + } + }, + [OV8856_LINK_FREQ_360MBPS] = { + .reg_list = { + .num_of_regs = ARRAY_SIZE(mipi_data_rate_360mbps), + .regs = mipi_data_rate_360mbps, + } + } +}; + +static const struct ov8856_mode supported_modes[] = { + { + .width = 3280, + .height = 2464, + .hts = 1928, + .vts_def = 2488, + .vts_min = 2488, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_3280x2464_regs), + .regs = mode_3280x2464_regs, + }, + .link_freq_index = OV8856_LINK_FREQ_720MBPS, + }, + { + .width = 1640, + .height = 1232, + .hts = 3820, + .vts_def = 1256, + .vts_min = 1256, + .reg_list = { + .num_of_regs = ARRAY_SIZE(mode_1640x1232_regs), + .regs = mode_1640x1232_regs, + }, + .link_freq_index = OV8856_LINK_FREQ_360MBPS, + } +}; + +struct ov8856 { + struct v4l2_subdev sd; + struct media_pad pad; + struct v4l2_ctrl_handler ctrl_handler; + + /* V4L2 Controls */ + struct v4l2_ctrl *link_freq; + struct v4l2_ctrl *pixel_rate; + struct v4l2_ctrl *vblank; + struct v4l2_ctrl *hblank; + struct v4l2_ctrl *exposure; + + /* Current mode */ + const struct ov8856_mode *cur_mode; + + /* To serialize asynchronus callbacks */ + struct mutex mutex; + + /* Streaming on/off */ + bool streaming; +}; + +static u64 to_pixel_rate(u32 f_index) +{ + u64 pixel_rate = link_freq_menu_items[f_index] * 2 * OV8856_DATA_LANES; + + do_div(pixel_rate, OV8856_RGB_DEPTH); + + return pixel_rate; +} + +static u64 to_pixels_per_line(u32 hts, u32 f_index) +{ + u64 ppl = hts * to_pixel_rate(f_index); + + do_div(ppl, OV8856_SCLK); + + return ppl; +} + +static int ov8856_read_reg(struct ov8856 *ov8856, u16 reg, u16 len, u32 *val) +{ + struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd); + struct i2c_msg msgs[2]; + u8 addr_buf[2]; + u8 data_buf[4] = {0}; + int ret; + + if (len > 4) + return -EINVAL; + + put_unaligned_be16(reg, addr_buf); + msgs[0].addr = client->addr; + msgs[0].flags = 0; + msgs[0].len = sizeof(addr_buf); + msgs[0].buf = addr_buf; + msgs[1].addr = client->addr; + msgs[1].flags = I2C_M_RD; + msgs[1].len = len; + msgs[1].buf = &data_buf[4 - len]; + + ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); + if (ret != ARRAY_SIZE(msgs)) + return -EIO; + + *val = get_unaligned_be32(data_buf); + + return 0; +} + +static int ov8856_write_reg(struct ov8856 *ov8856, u16 reg, u16 len, u32 val) +{ + struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd); + u8 buf[6]; + + if (len > 4) + return -EINVAL; + + put_unaligned_be16(reg, buf); + put_unaligned_be32(val << 8 * (4 - len), buf + 2); + if (i2c_master_send(client, buf, len + 2) != len + 2) + return -EIO; + + return 0; +} + +static int ov8856_write_reg_list(struct ov8856 *ov8856, + const struct ov8856_reg_list *r_list) +{ + struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd); + unsigned int i; + int ret; + + for (i = 0; i < r_list->num_of_regs; i++) { + ret = ov8856_write_reg(ov8856, r_list->regs[i].address, 1, + r_list->regs[i].val); + if (ret) { + dev_err_ratelimited(&client->dev, + "failed to write reg 0x%4.4x. error = %d", + r_list->regs[i].address, ret); + return ret; + } + } + + return 0; +} + +static int ov8856_update_digital_gain(struct ov8856 *ov8856, u32 d_gain) +{ + int ret; + + ret = ov8856_write_reg(ov8856, OV8856_REG_MWB_R_GAIN, + OV8856_REG_VALUE_16BIT, d_gain); + if (ret) + return ret; + + ret = ov8856_write_reg(ov8856, OV8856_REG_MWB_G_GAIN, + OV8856_REG_VALUE_16BIT, d_gain); + if (ret) + return ret; + + return ov8856_write_reg(ov8856, OV8856_REG_MWB_B_GAIN, + OV8856_REG_VALUE_16BIT, d_gain); +} + +static int ov8856_test_pattern(struct ov8856 *ov8856, u32 pattern) +{ + if (pattern) + pattern = (pattern - 1) << OV8856_TEST_PATTERN_BAR_SHIFT | + OV8856_TEST_PATTERN_ENABLE; + + return ov8856_write_reg(ov8856, OV8856_REG_TEST_PATTERN, + OV8856_REG_VALUE_08BIT, pattern); +} + +static int ov8856_set_ctrl(struct v4l2_ctrl *ctrl) +{ + struct ov8856 *ov8856 = container_of(ctrl->handler, + struct ov8856, ctrl_handler); + struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd); + s64 exposure_max; + int ret = 0; + + /* Propagate change of current control to all related controls */ + if (ctrl->id == V4L2_CID_VBLANK) { + /* Update max exposure while meeting expected vblanking */ + exposure_max = ov8856->cur_mode->height + ctrl->val - + OV8856_EXPOSURE_MAX_MARGIN; + __v4l2_ctrl_modify_range(ov8856->exposure, + ov8856->exposure->minimum, + exposure_max, ov8856->exposure->step, + exposure_max); + } + + /* V4L2 controls values will be applied only when power is already up */ + if (!pm_runtime_get_if_in_use(&client->dev)) + return 0; + + switch (ctrl->id) { + case V4L2_CID_ANALOGUE_GAIN: + ret = ov8856_write_reg(ov8856, OV8856_REG_ANALOG_GAIN, + OV8856_REG_VALUE_16BIT, ctrl->val); + break; + + case V4L2_CID_DIGITAL_GAIN: + ret = ov8856_update_digital_gain(ov8856, ctrl->val); + break; + + case V4L2_CID_EXPOSURE: + /* 4 least significant bits of expsoure are fractional part */ + ret = ov8856_write_reg(ov8856, OV8856_REG_EXPOSURE, + OV8856_REG_VALUE_24BIT, ctrl->val << 4); + break; + + case V4L2_CID_VBLANK: + ret = ov8856_write_reg(ov8856, OV8856_REG_VTS, + OV8856_REG_VALUE_16BIT, + ov8856->cur_mode->height + ctrl->val); + break; + + case V4L2_CID_TEST_PATTERN: + ret = ov8856_test_pattern(ov8856, ctrl->val); + break; + + default: + ret = -EINVAL; + break; + } + + pm_runtime_put(&client->dev); + + return ret; +} + +static const struct v4l2_ctrl_ops ov8856_ctrl_ops = { + .s_ctrl = ov8856_set_ctrl, +}; + +static int ov8856_init_controls(struct ov8856 *ov8856) +{ + struct v4l2_ctrl_handler *ctrl_hdlr; + s64 exposure_max, h_blank; + int ret; + + ctrl_hdlr = &ov8856->ctrl_handler; + ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8); + if (ret) + return ret; + + ctrl_hdlr->lock = &ov8856->mutex; + ov8856->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, &ov8856_ctrl_ops, + V4L2_CID_LINK_FREQ, + ARRAY_SIZE(link_freq_menu_items) - 1, + 0, link_freq_menu_items); + if (ov8856->link_freq) + ov8856->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY; + + ov8856->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov8856_ctrl_ops, + V4L2_CID_PIXEL_RATE, 0, + to_pixel_rate(OV8856_LINK_FREQ_720MBPS), + 1, + to_pixel_rate(OV8856_LINK_FREQ_720MBPS)); + ov8856->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov8856_ctrl_ops, + V4L2_CID_VBLANK, + ov8856->cur_mode->vts_min - ov8856->cur_mode->height, + OV8856_VTS_MAX - ov8856->cur_mode->height, 1, + ov8856->cur_mode->vts_def - ov8856->cur_mode->height); + h_blank = to_pixels_per_line(ov8856->cur_mode->hts, + ov8856->cur_mode->link_freq_index) - ov8856->cur_mode->width; + ov8856->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov8856_ctrl_ops, + V4L2_CID_HBLANK, h_blank, h_blank, 1, + h_blank); + if (ov8856->hblank) + ov8856->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY; + + v4l2_ctrl_new_std(ctrl_hdlr, &ov8856_ctrl_ops, V4L2_CID_ANALOGUE_GAIN, + OV8856_ANAL_GAIN_MIN, OV8856_ANAL_GAIN_MAX, + OV8856_ANAL_GAIN_STEP, OV8856_ANAL_GAIN_MIN); + v4l2_ctrl_new_std(ctrl_hdlr, &ov8856_ctrl_ops, V4L2_CID_DIGITAL_GAIN, + OV8856_DGTL_GAIN_MIN, OV8856_DGTL_GAIN_MAX, + OV8856_DGTL_GAIN_STEP, OV8856_DGTL_GAIN_DEFAULT); + exposure_max = ov8856->cur_mode->vts_def - OV8856_EXPOSURE_MAX_MARGIN; + ov8856->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &ov8856_ctrl_ops, + V4L2_CID_EXPOSURE, + OV8856_EXPOSURE_MIN, exposure_max, + OV8856_EXPOSURE_STEP, + exposure_max); + v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov8856_ctrl_ops, + V4L2_CID_TEST_PATTERN, + ARRAY_SIZE(ov8856_test_pattern_menu) - 1, + 0, 0, ov8856_test_pattern_menu); + if (ctrl_hdlr->error) + return ctrl_hdlr->error; + + ov8856->sd.ctrl_handler = ctrl_hdlr; + + return 0; +} + +static void ov8856_update_pad_format(const struct ov8856_mode *mode, + struct v4l2_mbus_framefmt *fmt) +{ + fmt->width = mode->width; + fmt->height = mode->height; + fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10; + fmt->field = V4L2_FIELD_NONE; +} + +static int ov8856_start_streaming(struct ov8856 *ov8856) +{ + struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd); + const struct ov8856_reg_list *reg_list; + int link_freq_index, ret; + + link_freq_index = ov8856->cur_mode->link_freq_index; + reg_list = &link_freq_configs[link_freq_index].reg_list; + ret = ov8856_write_reg_list(ov8856, reg_list); + if (ret) { + dev_err(&client->dev, "failed to set plls"); + return ret; + } + + reg_list = &ov8856->cur_mode->reg_list; + ret = ov8856_write_reg_list(ov8856, reg_list); + if (ret) { + dev_err(&client->dev, "failed to set mode"); + return ret; + } + + ret = __v4l2_ctrl_handler_setup(ov8856->sd.ctrl_handler); + if (ret) + return ret; + + ret = ov8856_write_reg(ov8856, OV8856_REG_MODE_SELECT, + OV8856_REG_VALUE_08BIT, OV8856_MODE_STREAMING); + if (ret) { + dev_err(&client->dev, "failed to set stream"); + return ret; + } + + return 0; +} + +static void ov8856_stop_streaming(struct ov8856 *ov8856) +{ + struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd); + + if (ov8856_write_reg(ov8856, OV8856_REG_MODE_SELECT, + OV8856_REG_VALUE_08BIT, OV8856_MODE_STANDBY)) + dev_err(&client->dev, "failed to set stream"); +} + +static int ov8856_set_stream(struct v4l2_subdev *sd, int enable) +{ + struct ov8856 *ov8856 = to_ov8856(sd); + struct i2c_client *client = v4l2_get_subdevdata(sd); + int ret = 0; + + if (ov8856->streaming == enable) + return 0; + + mutex_lock(&ov8856->mutex); + if (enable) { + ret = pm_runtime_get_sync(&client->dev); + if (ret < 0) { + pm_runtime_put_noidle(&client->dev); + mutex_unlock(&ov8856->mutex); + return ret; + } + + ret = ov8856_start_streaming(ov8856); + if (ret) { + enable = 0; + ov8856_stop_streaming(ov8856); + pm_runtime_put(&client->dev); + } + } else { + ov8856_stop_streaming(ov8856); + pm_runtime_put(&client->dev); + } + + ov8856->streaming = enable; + mutex_unlock(&ov8856->mutex); + + return ret; +} + +static int __maybe_unused ov8856_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct ov8856 *ov8856 = to_ov8856(sd); + + mutex_lock(&ov8856->mutex); + if (ov8856->streaming) + ov8856_stop_streaming(ov8856); + + mutex_unlock(&ov8856->mutex); + + return 0; +} + +static int __maybe_unused ov8856_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct ov8856 *ov8856 = to_ov8856(sd); + int ret; + + mutex_lock(&ov8856->mutex); + if (ov8856->streaming) { + ret = ov8856_start_streaming(ov8856); + if (ret) { + ov8856->streaming = false; + ov8856_stop_streaming(ov8856); + mutex_unlock(&ov8856->mutex); + return ret; + } + } + + mutex_unlock(&ov8856->mutex); + + return 0; +} + +static int ov8856_set_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct ov8856 *ov8856 = to_ov8856(sd); + const struct ov8856_mode *mode; + s32 vblank_def, h_blank; + + mode = v4l2_find_nearest_size(supported_modes, + ARRAY_SIZE(supported_modes), width, + height, fmt->format.width, + fmt->format.height); + + mutex_lock(&ov8856->mutex); + ov8856_update_pad_format(mode, &fmt->format); + if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { + *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format; + } else { + ov8856->cur_mode = mode; + __v4l2_ctrl_s_ctrl(ov8856->link_freq, mode->link_freq_index); + __v4l2_ctrl_s_ctrl_int64(ov8856->pixel_rate, + to_pixel_rate(mode->link_freq_index)); + + /* Update limits and set FPS to default */ + vblank_def = mode->vts_def - mode->height; + __v4l2_ctrl_modify_range(ov8856->vblank, + mode->vts_min - mode->height, + OV8856_VTS_MAX - mode->height, 1, + vblank_def); + __v4l2_ctrl_s_ctrl(ov8856->vblank, vblank_def); + h_blank = to_pixels_per_line(mode->hts, mode->link_freq_index) - + mode->width; + __v4l2_ctrl_modify_range(ov8856->hblank, h_blank, h_blank, 1, + h_blank); + } + + mutex_unlock(&ov8856->mutex); + + return 0; +} + +static int ov8856_get_format(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_format *fmt) +{ + struct ov8856 *ov8856 = to_ov8856(sd); + + mutex_lock(&ov8856->mutex); + if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) + fmt->format = *v4l2_subdev_get_try_format(&ov8856->sd, cfg, + fmt->pad); + else + ov8856_update_pad_format(ov8856->cur_mode, &fmt->format); + + mutex_unlock(&ov8856->mutex); + + return 0; +} + +static int ov8856_enum_mbus_code(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_mbus_code_enum *code) +{ + /* Only one bayer order GRBG is supported */ + if (code->index > 0) + return -EINVAL; + + code->code = MEDIA_BUS_FMT_SGRBG10_1X10; + + return 0; +} + +static int ov8856_enum_frame_size(struct v4l2_subdev *sd, + struct v4l2_subdev_pad_config *cfg, + struct v4l2_subdev_frame_size_enum *fse) +{ + if (fse->index >= ARRAY_SIZE(supported_modes)) + return -EINVAL; + + if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10) + return -EINVAL; + + fse->min_width = supported_modes[fse->index].width; + fse->max_width = fse->min_width; + fse->min_height = supported_modes[fse->index].height; + fse->max_height = fse->min_height; + + return 0; +} + +static int ov8856_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + struct ov8856 *ov8856 = to_ov8856(sd); + + mutex_lock(&ov8856->mutex); + ov8856_update_pad_format(&supported_modes[0], + v4l2_subdev_get_try_format(sd, fh->pad, 0)); + mutex_unlock(&ov8856->mutex); + + return 0; +} + +static const struct v4l2_subdev_video_ops ov8856_video_ops = { + .s_stream = ov8856_set_stream, +}; + +static const struct v4l2_subdev_pad_ops ov8856_pad_ops = { + .set_fmt = ov8856_set_format, + .get_fmt = ov8856_get_format, + .enum_mbus_code = ov8856_enum_mbus_code, + .enum_frame_size = ov8856_enum_frame_size, +}; + +static const struct v4l2_subdev_ops ov8856_subdev_ops = { + .video = &ov8856_video_ops, + .pad = &ov8856_pad_ops, +}; + +static const struct media_entity_operations ov8856_subdev_entity_ops = { + .link_validate = v4l2_subdev_link_validate, +}; + +static const struct v4l2_subdev_internal_ops ov8856_internal_ops = { + .open = ov8856_open, +}; + +static int ov8856_identify_module(struct ov8856 *ov8856) +{ + struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd); + int ret; + u32 val; + + ret = ov8856_read_reg(ov8856, OV8856_REG_CHIP_ID, + OV8856_REG_VALUE_24BIT, &val); + if (ret) + return ret; + + if (val != OV8856_CHIP_ID) { + dev_err(&client->dev, "chip id mismatch: %x!=%x", + OV8856_CHIP_ID, val); + return -ENXIO; + } + + return 0; +} + +static int ov8856_check_hwcfg(struct device *dev) +{ + struct fwnode_handle *ep; + struct fwnode_handle *fwnode = dev_fwnode(dev); + struct v4l2_fwnode_endpoint bus_cfg = { + .bus_type = V4L2_MBUS_CSI2_DPHY + }; + u32 mclk; + int ret; + unsigned int i, j; + + if (!fwnode) + return -ENXIO; + + fwnode_property_read_u32(fwnode, "clock-frequency", &mclk); + if (mclk != OV8856_MCLK) { + dev_err(dev, "external clock %d is not supported", mclk); + return -EINVAL; + } + + ep = fwnode_graph_get_next_endpoint(fwnode, NULL); + if (!ep) + return -ENXIO; + + ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg); + fwnode_handle_put(ep); + if (ret) + return ret; + + if (bus_cfg.bus.mipi_csi2.num_data_lanes != OV8856_DATA_LANES) { + dev_err(dev, "number of CSI2 data lanes %d is not supported", + bus_cfg.bus.mipi_csi2.num_data_lanes); + ret = -EINVAL; + goto check_hwcfg_error; + } + + if (!bus_cfg.nr_of_link_frequencies) { + dev_err(dev, "no link frequencies defined"); + ret = -EINVAL; + goto check_hwcfg_error; + } + + for (i = 0; i < ARRAY_SIZE(link_freq_menu_items); i++) { + for (j = 0; j < bus_cfg.nr_of_link_frequencies; j++) { + if (link_freq_menu_items[i] == + bus_cfg.link_frequencies[j]) + break; + } + + if (j == bus_cfg.nr_of_link_frequencies) { + dev_err(dev, "no link frequency %lld supported", + link_freq_menu_items[i]); + ret = -EINVAL; + goto check_hwcfg_error; + } + } + +check_hwcfg_error: + v4l2_fwnode_endpoint_free(&bus_cfg); + + return ret; +} + +static int ov8856_remove(struct i2c_client *client) +{ + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct ov8856 *ov8856 = to_ov8856(sd); + + v4l2_async_unregister_subdev(sd); + media_entity_cleanup(&sd->entity); + v4l2_ctrl_handler_free(sd->ctrl_handler); + pm_runtime_disable(&client->dev); + mutex_destroy(&ov8856->mutex); + + return 0; +} + +static int ov8856_probe(struct i2c_client *client) +{ + struct ov8856 *ov8856; + int ret; + + ret = ov8856_check_hwcfg(&client->dev); + if (ret) { + dev_err(&client->dev, "failed to check HW configuration: %d", + ret); + return ret; + } + + ov8856 = devm_kzalloc(&client->dev, sizeof(*ov8856), GFP_KERNEL); + if (!ov8856) + return -ENOMEM; + + v4l2_i2c_subdev_init(&ov8856->sd, client, &ov8856_subdev_ops); + ret = ov8856_identify_module(ov8856); + if (ret) { + dev_err(&client->dev, "failed to find sensor: %d", ret); + return ret; + } + + mutex_init(&ov8856->mutex); + ov8856->cur_mode = &supported_modes[0]; + ret = ov8856_init_controls(ov8856); + if (ret) { + dev_err(&client->dev, "failed to init controls: %d", ret); + goto probe_error_v4l2_ctrl_handler_free; + } + + ov8856->sd.internal_ops = &ov8856_internal_ops; + ov8856->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + ov8856->sd.entity.ops = &ov8856_subdev_entity_ops; + ov8856->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; + ov8856->pad.flags = MEDIA_PAD_FL_SOURCE; + ret = media_entity_pads_init(&ov8856->sd.entity, 1, &ov8856->pad); + if (ret) { + dev_err(&client->dev, "failed to init entity pads: %d", ret); + goto probe_error_v4l2_ctrl_handler_free; + } + + ret = v4l2_async_register_subdev_sensor_common(&ov8856->sd); + if (ret < 0) { + dev_err(&client->dev, "failed to register V4L2 subdev: %d", + ret); + goto probe_error_media_entity_cleanup; + } + + /* + * Device is already turned on by i2c-core with ACPI domain PM. + * Enable runtime PM and turn off the device. + */ + pm_runtime_set_active(&client->dev); + pm_runtime_enable(&client->dev); + pm_runtime_idle(&client->dev); + + return 0; + +probe_error_media_entity_cleanup: + media_entity_cleanup(&ov8856->sd.entity); + +probe_error_v4l2_ctrl_handler_free: + v4l2_ctrl_handler_free(ov8856->sd.ctrl_handler); + mutex_destroy(&ov8856->mutex); + + return ret; +} + +static const struct dev_pm_ops ov8856_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(ov8856_suspend, ov8856_resume) +}; + +#ifdef CONFIG_ACPI +static const struct acpi_device_id ov8856_acpi_ids[] = { + {"OVTI8856"}, + {} +}; + +MODULE_DEVICE_TABLE(acpi, ov8856_acpi_ids); +#endif + +static struct i2c_driver ov8856_i2c_driver = { + .driver = { + .name = "ov8856", + .pm = &ov8856_pm_ops, + .acpi_match_table = ACPI_PTR(ov8856_acpi_ids), + }, + .probe_new = ov8856_probe, + .remove = ov8856_remove, +}; + +module_i2c_driver(ov8856_i2c_driver); + +MODULE_AUTHOR("Ben Kao "); +MODULE_DESCRIPTION("OmniVision OV8856 sensor driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/i2c/soc_camera/soc_ov9640.c b/drivers/media/i2c/ov9640.c similarity index 90% rename from drivers/media/i2c/soc_camera/soc_ov9640.c rename to drivers/media/i2c/ov9640.c index eb91b8240083..d6831f28378b 100644 --- a/drivers/media/i2c/soc_camera/soc_ov9640.c +++ b/drivers/media/i2c/ov9640.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * OmniVision OV96xx Camera Driver * @@ -9,14 +10,11 @@ * Kuninori Morimoto * * Based on ov7670 and soc_camera_platform driver, + * transition from soc_camera to pxa_camera based on mt9m111 * * Copyright 2006-7 Jonathan Corbet * Copyright (C) 2008 Magnus Damm * Copyright (C) 2008, Guennadi Liakhovetski - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include @@ -27,10 +25,14 @@ #include #include -#include +#include #include #include #include +#include +#include + +#include #include "ov9640.h" @@ -159,7 +161,7 @@ static const struct ov9640_reg ov9640_regs_rgb[] = { { OV9640_MTXS, 0x65 }, }; -static u32 ov9640_codes[] = { +static const u32 ov9640_codes[] = { MEDIA_BUS_FMT_UYVY8_2X8, MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE, MEDIA_BUS_FMT_RGB565_2X8_LE, @@ -269,21 +271,23 @@ static int ov9640_s_stream(struct v4l2_subdev *sd, int enable) /* Set status of additional camera capabilities */ static int ov9640_s_ctrl(struct v4l2_ctrl *ctrl) { - struct ov9640_priv *priv = container_of(ctrl->handler, struct ov9640_priv, hdl); + struct ov9640_priv *priv = container_of(ctrl->handler, + struct ov9640_priv, hdl); struct i2c_client *client = v4l2_get_subdevdata(&priv->subdev); switch (ctrl->id) { case V4L2_CID_VFLIP: if (ctrl->val) return ov9640_reg_rmw(client, OV9640_MVFP, - OV9640_MVFP_V, 0); + OV9640_MVFP_V, 0); return ov9640_reg_rmw(client, OV9640_MVFP, 0, OV9640_MVFP_V); case V4L2_CID_HFLIP: if (ctrl->val) return ov9640_reg_rmw(client, OV9640_MVFP, - OV9640_MVFP_H, 0); + OV9640_MVFP_H, 0); return ov9640_reg_rmw(client, OV9640_MVFP, 0, OV9640_MVFP_H); } + return -EINVAL; } @@ -323,20 +327,33 @@ static int ov9640_set_register(struct v4l2_subdev *sd, static int ov9640_s_power(struct v4l2_subdev *sd, int on) { - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); struct ov9640_priv *priv = to_ov9640_sensor(sd); + int ret = 0; + + if (on) { + gpiod_set_value(priv->gpio_power, 1); + usleep_range(1000, 2000); + ret = v4l2_clk_enable(priv->clk); + usleep_range(1000, 2000); + gpiod_set_value(priv->gpio_reset, 0); + } else { + gpiod_set_value(priv->gpio_reset, 1); + usleep_range(1000, 2000); + v4l2_clk_disable(priv->clk); + usleep_range(1000, 2000); + gpiod_set_value(priv->gpio_power, 0); + } - return soc_camera_set_power(&client->dev, ssdd, priv->clk, on); + return ret; } /* select nearest higher resolution for capture */ static void ov9640_res_roundup(u32 *width, u32 *height) { - int i; + unsigned int i; enum { QQCIF, QQVGA, QCIF, QVGA, CIF, VGA, SXGA }; - static const int res_x[] = { 88, 160, 176, 320, 352, 640, 1280 }; - static const int res_y[] = { 72, 120, 144, 240, 288, 480, 960 }; + static const u32 res_x[] = { 88, 160, 176, 320, 352, 640, 1280 }; + static const u32 res_y[] = { 72, 120, 144, 240, 288, 480, 960 }; for (i = 0; i < ARRAY_SIZE(res_x); i++) { if (res_x[i] >= *width && res_y[i] >= *height) { @@ -379,8 +396,9 @@ static int ov9640_write_regs(struct i2c_client *client, u32 width, u32 code, struct ov9640_reg_alt *alts) { const struct ov9640_reg *ov9640_regs, *matrix_regs; - int ov9640_regs_len, matrix_regs_len; - int i, ret; + unsigned int ov9640_regs_len, matrix_regs_len; + unsigned int i; + int ret; u8 val; /* select register configuration for given resolution */ @@ -454,7 +472,7 @@ static int ov9640_write_regs(struct i2c_client *client, u32 width, /* write color matrix configuration into the module */ for (i = 0; i < matrix_regs_len; i++) { ret = ov9640_reg_write(client, matrix_regs[i].reg, - matrix_regs[i].val); + matrix_regs[i].val); if (ret) return ret; } @@ -465,17 +483,18 @@ static int ov9640_write_regs(struct i2c_client *client, u32 width, /* program default register values */ static int ov9640_prog_dflt(struct i2c_client *client) { - int i, ret; + unsigned int i; + int ret; for (i = 0; i < ARRAY_SIZE(ov9640_regs_dflt); i++) { ret = ov9640_reg_write(client, ov9640_regs_dflt[i].reg, - ov9640_regs_dflt[i].val); + ov9640_regs_dflt[i].val); if (ret) return ret; } /* wait for the changes to actually happen, 140ms are not enough yet */ - mdelay(150); + msleep(150); return 0; } @@ -529,6 +548,7 @@ static int ov9640_set_fmt(struct v4l2_subdev *sd, return ov9640_s_fmt(sd, mf); cfg->try_fmt = *mf; + return 0; } @@ -540,6 +560,7 @@ static int ov9640_enum_mbus_code(struct v4l2_subdev *sd, return -EINVAL; code->code = ov9640_codes[code->index]; + return 0; } @@ -630,14 +651,10 @@ static const struct v4l2_subdev_core_ops ov9640_core_ops = { static int ov9640_g_mbus_config(struct v4l2_subdev *sd, struct v4l2_mbus_config *cfg) { - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - cfg->flags = V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_MASTER | V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_DATA_ACTIVE_HIGH; cfg->type = V4L2_MBUS_PARALLEL; - cfg->flags = soc_camera_apply_board_flags(ssdd, cfg); return 0; } @@ -666,41 +683,62 @@ static int ov9640_probe(struct i2c_client *client, const struct i2c_device_id *did) { struct ov9640_priv *priv; - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); int ret; - if (!ssdd) { - dev_err(&client->dev, "Missing platform_data for driver\n"); - return -EINVAL; - } - priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; + priv->gpio_power = devm_gpiod_get(&client->dev, "Camera power", + GPIOD_OUT_LOW); + if (IS_ERR_OR_NULL(priv->gpio_power)) { + ret = PTR_ERR(priv->gpio_power); + return ret; + } + + priv->gpio_reset = devm_gpiod_get(&client->dev, "Camera reset", + GPIOD_OUT_HIGH); + if (IS_ERR_OR_NULL(priv->gpio_reset)) { + ret = PTR_ERR(priv->gpio_reset); + return ret; + } + v4l2_i2c_subdev_init(&priv->subdev, client, &ov9640_subdev_ops); v4l2_ctrl_handler_init(&priv->hdl, 2); v4l2_ctrl_new_std(&priv->hdl, &ov9640_ctrl_ops, - V4L2_CID_VFLIP, 0, 1, 1, 0); + V4L2_CID_VFLIP, 0, 1, 1, 0); v4l2_ctrl_new_std(&priv->hdl, &ov9640_ctrl_ops, - V4L2_CID_HFLIP, 0, 1, 1, 0); + V4L2_CID_HFLIP, 0, 1, 1, 0); + + if (priv->hdl.error) { + ret = priv->hdl.error; + goto ectrlinit; + } + priv->subdev.ctrl_handler = &priv->hdl; - if (priv->hdl.error) - return priv->hdl.error; priv->clk = v4l2_clk_get(&client->dev, "mclk"); if (IS_ERR(priv->clk)) { ret = PTR_ERR(priv->clk); - goto eclkget; + goto ectrlinit; } ret = ov9640_video_probe(client); - if (ret) { - v4l2_clk_put(priv->clk); -eclkget: - v4l2_ctrl_handler_free(&priv->hdl); - } + if (ret) + goto eprobe; + + priv->subdev.dev = &client->dev; + ret = v4l2_async_register_subdev(&priv->subdev); + if (ret) + goto eprobe; + + return 0; + +eprobe: + v4l2_clk_put(priv->clk); +ectrlinit: + v4l2_ctrl_handler_free(&priv->hdl); return ret; } @@ -711,8 +749,9 @@ static int ov9640_remove(struct i2c_client *client) struct ov9640_priv *priv = to_ov9640_sensor(sd); v4l2_clk_put(priv->clk); - v4l2_device_unregister_subdev(&priv->subdev); + v4l2_async_unregister_subdev(&priv->subdev); v4l2_ctrl_handler_free(&priv->hdl); + return 0; } diff --git a/drivers/media/i2c/soc_camera/ov9640.h b/drivers/media/i2c/ov9640.h similarity index 96% rename from drivers/media/i2c/soc_camera/ov9640.h rename to drivers/media/i2c/ov9640.h index 65d13ff17536..a8ed6992c1a8 100644 --- a/drivers/media/i2c/soc_camera/ov9640.h +++ b/drivers/media/i2c/ov9640.h @@ -1,11 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * OmniVision OV96xx Camera Header File * * Copyright (C) 2009 Marek Vasut - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef __DRIVERS_MEDIA_VIDEO_OV9640_H__ @@ -200,6 +197,8 @@ struct ov9640_priv { struct v4l2_subdev subdev; struct v4l2_ctrl_handler hdl; struct v4l2_clk *clk; + struct gpio_desc *gpio_power; + struct gpio_desc *gpio_reset; int model; int revision; diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c index f0587c0c0a72..eefd57ec2a73 100644 --- a/drivers/media/i2c/ov9650.c +++ b/drivers/media/i2c/ov9650.c @@ -45,8 +45,8 @@ MODULE_PARM_DESC(debug, "Debug level (0-2)"); * OV9650/OV9652 register definitions */ #define REG_GAIN 0x00 /* Gain control, AGC[7:0] */ -#define REG_BLUE 0x01 /* AWB - Blue chanel gain */ -#define REG_RED 0x02 /* AWB - Red chanel gain */ +#define REG_BLUE 0x01 /* AWB - Blue channel gain */ +#define REG_RED 0x02 /* AWB - Red channel gain */ #define REG_VREF 0x03 /* [7:6] - AGC[9:8], [5:3]/[2:0] */ #define VREF_GAIN_MASK 0xc0 /* - VREF end/start low 3 bits */ #define REG_COM1 0x04 diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c index c461847ddae8..b52fe250f75f 100644 --- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c +++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c @@ -1431,7 +1431,7 @@ err: for (++i; i < S5C73M3_MAX_SUPPLIES; i++) { int r = regulator_enable(state->supplies[i].consumer); if (r < 0) - v4l2_err(&state->oif_sd, "Failed to reenable %s: %d\n", + v4l2_err(&state->oif_sd, "Failed to re-enable %s: %d\n", state->supplies[i].supply, r); } diff --git a/drivers/media/i2c/s5k4ecgx.c b/drivers/media/i2c/s5k4ecgx.c index 79aa2740edc4..79c1894c2c83 100644 --- a/drivers/media/i2c/s5k4ecgx.c +++ b/drivers/media/i2c/s5k4ecgx.c @@ -263,8 +263,6 @@ static int s5k4ecgx_read(struct i2c_client *client, u32 addr, u16 *val) ret = s5k4ecgx_i2c_write(client, REG_CMDRD_ADDRL, low); if (!ret) ret = s5k4ecgx_i2c_read(client, REG_CMDBUF0_ADDR, val); - if (!ret) - dev_err(&client->dev, "Failed to execute read command\n"); return ret; } diff --git a/drivers/media/i2c/s5k6aa.c b/drivers/media/i2c/s5k6aa.c index ab26f549d716..f8630c4c2ef0 100644 --- a/drivers/media/i2c/s5k6aa.c +++ b/drivers/media/i2c/s5k6aa.c @@ -729,7 +729,7 @@ static int s5k6aa_new_config_sync(struct i2c_client *client, int timeout, * @s5k6aa: pointer to &struct s5k6aa describing the device * @preset: s5kaa preset to be applied * - * Configure output resolution and color fromat, pixel clock + * Configure output resolution and color format, pixel clock * frequency range, device frame rate type and frame period range. */ static int s5k6aa_set_prev_config(struct s5k6aa *s5k6aa, diff --git a/drivers/media/i2c/saa7115.c b/drivers/media/i2c/saa7115.c index 6bc278aa31fc..88dc6baac639 100644 --- a/drivers/media/i2c/saa7115.c +++ b/drivers/media/i2c/saa7115.c @@ -1766,7 +1766,7 @@ static int saa711x_detect_chip(struct i2c_client *client, * exists. However, tests on a device labeled as: * "GM7113C 1145" returned "10" on all 16 chip * version (reg 0x00) reads. So, we need to also - * accept at least verion 0. For now, let's just + * accept at least version 0. For now, let's just * assume that a device that returns "0000" for * the lower nibble is a gm7113c. */ diff --git a/drivers/media/i2c/saa717x.c b/drivers/media/i2c/saa717x.c index 668c39cc29e8..86b8b65ea683 100644 --- a/drivers/media/i2c/saa717x.c +++ b/drivers/media/i2c/saa717x.c @@ -844,7 +844,7 @@ static void set_h_prescale(struct v4l2_subdev *sd, if (i == count) return; - /* horizonal prescaling */ + /* horizontal prescaling */ saa717x_write(sd, 0x60 + task_shift, vals[i].xpsc); /* accumulation length */ saa717x_write(sd, 0x61 + task_shift, vals[i].xacl); diff --git a/drivers/media/i2c/soc_camera/Makefile b/drivers/media/i2c/soc_camera/Makefile deleted file mode 100644 index 09ae483b96ef..000000000000 --- a/drivers/media/i2c/soc_camera/Makefile +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_SOC_CAMERA_MT9M001) += soc_mt9m001.o -obj-$(CONFIG_SOC_CAMERA_MT9T112) += soc_mt9t112.o -obj-$(CONFIG_SOC_CAMERA_MT9V022) += soc_mt9v022.o -obj-$(CONFIG_SOC_CAMERA_OV5642) += soc_ov5642.o -obj-$(CONFIG_SOC_CAMERA_OV772X) += soc_ov772x.o -obj-$(CONFIG_SOC_CAMERA_OV9640) += soc_ov9640.o -obj-$(CONFIG_SOC_CAMERA_OV9740) += soc_ov9740.o -obj-$(CONFIG_SOC_CAMERA_RJ54N1) += soc_rj54n1cb0c.o -obj-$(CONFIG_SOC_CAMERA_TW9910) += soc_tw9910.o diff --git a/drivers/media/i2c/soc_camera/soc_mt9t112.c b/drivers/media/i2c/soc_camera/soc_mt9t112.c deleted file mode 100644 index ea1ff270bc2d..000000000000 --- a/drivers/media/i2c/soc_camera/soc_mt9t112.c +++ /dev/null @@ -1,1157 +0,0 @@ -/* - * mt9t112 Camera Driver - * - * Copyright (C) 2009 Renesas Solutions Corp. - * Kuninori Morimoto - * - * Based on ov772x driver, mt9m111 driver, - * - * Copyright (C) 2008 Kuninori Morimoto - * Copyright (C) 2008, Robert Jarzmik - * Copyright 2006-7 Jonathan Corbet - * Copyright (C) 2008 Magnus Damm - * Copyright (C) 2008, Guennadi Liakhovetski - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -/* you can check PLL/clock info */ -/* #define EXT_CLOCK 24000000 */ - -/************************************************************************ - macro -************************************************************************/ -/* - * frame size - */ -#define MAX_WIDTH 2048 -#define MAX_HEIGHT 1536 - -/* - * macro of read/write - */ -#define ECHECKER(ret, x) \ - do { \ - (ret) = (x); \ - if ((ret) < 0) \ - return (ret); \ - } while (0) - -#define mt9t112_reg_write(ret, client, a, b) \ - ECHECKER(ret, __mt9t112_reg_write(client, a, b)) -#define mt9t112_mcu_write(ret, client, a, b) \ - ECHECKER(ret, __mt9t112_mcu_write(client, a, b)) - -#define mt9t112_reg_mask_set(ret, client, a, b, c) \ - ECHECKER(ret, __mt9t112_reg_mask_set(client, a, b, c)) -#define mt9t112_mcu_mask_set(ret, client, a, b, c) \ - ECHECKER(ret, __mt9t112_mcu_mask_set(client, a, b, c)) - -#define mt9t112_reg_read(ret, client, a) \ - ECHECKER(ret, __mt9t112_reg_read(client, a)) - -/* - * Logical address - */ -#define _VAR(id, offset, base) (base | (id & 0x1f) << 10 | (offset & 0x3ff)) -#define VAR(id, offset) _VAR(id, offset, 0x0000) -#define VAR8(id, offset) _VAR(id, offset, 0x8000) - -/************************************************************************ - struct -************************************************************************/ -struct mt9t112_format { - u32 code; - enum v4l2_colorspace colorspace; - u16 fmt; - u16 order; -}; - -struct mt9t112_priv { - struct v4l2_subdev subdev; - struct mt9t112_platform_data *info; - struct i2c_client *client; - struct v4l2_rect frame; - struct v4l2_clk *clk; - const struct mt9t112_format *format; - int num_formats; - u32 flags; -/* for flags */ -#define INIT_DONE (1 << 0) -#define PCLK_RISING (1 << 1) -}; - -/************************************************************************ - supported format -************************************************************************/ - -static const struct mt9t112_format mt9t112_cfmts[] = { - { - .code = MEDIA_BUS_FMT_UYVY8_2X8, - .colorspace = V4L2_COLORSPACE_SRGB, - .fmt = 1, - .order = 0, - }, { - .code = MEDIA_BUS_FMT_VYUY8_2X8, - .colorspace = V4L2_COLORSPACE_SRGB, - .fmt = 1, - .order = 1, - }, { - .code = MEDIA_BUS_FMT_YUYV8_2X8, - .colorspace = V4L2_COLORSPACE_SRGB, - .fmt = 1, - .order = 2, - }, { - .code = MEDIA_BUS_FMT_YVYU8_2X8, - .colorspace = V4L2_COLORSPACE_SRGB, - .fmt = 1, - .order = 3, - }, { - .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE, - .colorspace = V4L2_COLORSPACE_SRGB, - .fmt = 8, - .order = 2, - }, { - .code = MEDIA_BUS_FMT_RGB565_2X8_LE, - .colorspace = V4L2_COLORSPACE_SRGB, - .fmt = 4, - .order = 2, - }, -}; - -/************************************************************************ - general function -************************************************************************/ -static struct mt9t112_priv *to_mt9t112(const struct i2c_client *client) -{ - return container_of(i2c_get_clientdata(client), - struct mt9t112_priv, - subdev); -} - -static int __mt9t112_reg_read(const struct i2c_client *client, u16 command) -{ - struct i2c_msg msg[2]; - u8 buf[2]; - int ret; - - command = swab16(command); - - msg[0].addr = client->addr; - msg[0].flags = 0; - msg[0].len = 2; - msg[0].buf = (u8 *)&command; - - msg[1].addr = client->addr; - msg[1].flags = I2C_M_RD; - msg[1].len = 2; - msg[1].buf = buf; - - /* - * if return value of this function is < 0, - * it mean error. - * else, under 16bit is valid data. - */ - ret = i2c_transfer(client->adapter, msg, 2); - if (ret < 0) - return ret; - - memcpy(&ret, buf, 2); - return swab16(ret); -} - -static int __mt9t112_reg_write(const struct i2c_client *client, - u16 command, u16 data) -{ - struct i2c_msg msg; - u8 buf[4]; - int ret; - - command = swab16(command); - data = swab16(data); - - memcpy(buf + 0, &command, 2); - memcpy(buf + 2, &data, 2); - - msg.addr = client->addr; - msg.flags = 0; - msg.len = 4; - msg.buf = buf; - - /* - * i2c_transfer return message length, - * but this function should return 0 if correct case - */ - ret = i2c_transfer(client->adapter, &msg, 1); - if (ret >= 0) - ret = 0; - - return ret; -} - -static int __mt9t112_reg_mask_set(const struct i2c_client *client, - u16 command, - u16 mask, - u16 set) -{ - int val = __mt9t112_reg_read(client, command); - if (val < 0) - return val; - - val &= ~mask; - val |= set & mask; - - return __mt9t112_reg_write(client, command, val); -} - -/* mcu access */ -static int __mt9t112_mcu_read(const struct i2c_client *client, u16 command) -{ - int ret; - - ret = __mt9t112_reg_write(client, 0x098E, command); - if (ret < 0) - return ret; - - return __mt9t112_reg_read(client, 0x0990); -} - -static int __mt9t112_mcu_write(const struct i2c_client *client, - u16 command, u16 data) -{ - int ret; - - ret = __mt9t112_reg_write(client, 0x098E, command); - if (ret < 0) - return ret; - - return __mt9t112_reg_write(client, 0x0990, data); -} - -static int __mt9t112_mcu_mask_set(const struct i2c_client *client, - u16 command, - u16 mask, - u16 set) -{ - int val = __mt9t112_mcu_read(client, command); - if (val < 0) - return val; - - val &= ~mask; - val |= set & mask; - - return __mt9t112_mcu_write(client, command, val); -} - -static int mt9t112_reset(const struct i2c_client *client) -{ - int ret; - - mt9t112_reg_mask_set(ret, client, 0x001a, 0x0001, 0x0001); - msleep(1); - mt9t112_reg_mask_set(ret, client, 0x001a, 0x0001, 0x0000); - - return ret; -} - -#ifndef EXT_CLOCK -#define CLOCK_INFO(a, b) -#else -#define CLOCK_INFO(a, b) mt9t112_clock_info(a, b) -static int mt9t112_clock_info(const struct i2c_client *client, u32 ext) -{ - int m, n, p1, p2, p3, p4, p5, p6, p7; - u32 vco, clk; - char *enable; - - ext /= 1000; /* kbyte order */ - - mt9t112_reg_read(n, client, 0x0012); - p1 = n & 0x000f; - n = n >> 4; - p2 = n & 0x000f; - n = n >> 4; - p3 = n & 0x000f; - - mt9t112_reg_read(n, client, 0x002a); - p4 = n & 0x000f; - n = n >> 4; - p5 = n & 0x000f; - n = n >> 4; - p6 = n & 0x000f; - - mt9t112_reg_read(n, client, 0x002c); - p7 = n & 0x000f; - - mt9t112_reg_read(n, client, 0x0010); - m = n & 0x00ff; - n = (n >> 8) & 0x003f; - - enable = ((6000 > ext) || (54000 < ext)) ? "X" : ""; - dev_dbg(&client->dev, "EXTCLK : %10u K %s\n", ext, enable); - - vco = 2 * m * ext / (n+1); - enable = ((384000 > vco) || (768000 < vco)) ? "X" : ""; - dev_dbg(&client->dev, "VCO : %10u K %s\n", vco, enable); - - clk = vco / (p1+1) / (p2+1); - enable = (96000 < clk) ? "X" : ""; - dev_dbg(&client->dev, "PIXCLK : %10u K %s\n", clk, enable); - - clk = vco / (p3+1); - enable = (768000 < clk) ? "X" : ""; - dev_dbg(&client->dev, "MIPICLK : %10u K %s\n", clk, enable); - - clk = vco / (p6+1); - enable = (96000 < clk) ? "X" : ""; - dev_dbg(&client->dev, "MCU CLK : %10u K %s\n", clk, enable); - - clk = vco / (p5+1); - enable = (54000 < clk) ? "X" : ""; - dev_dbg(&client->dev, "SOC CLK : %10u K %s\n", clk, enable); - - clk = vco / (p4+1); - enable = (70000 < clk) ? "X" : ""; - dev_dbg(&client->dev, "Sensor CLK : %10u K %s\n", clk, enable); - - clk = vco / (p7+1); - dev_dbg(&client->dev, "External sensor : %10u K\n", clk); - - clk = ext / (n+1); - enable = ((2000 > clk) || (24000 < clk)) ? "X" : ""; - dev_dbg(&client->dev, "PFD : %10u K %s\n", clk, enable); - - return 0; -} -#endif - -static void mt9t112_frame_check(u32 *width, u32 *height, u32 *left, u32 *top) -{ - soc_camera_limit_side(left, width, 0, 0, MAX_WIDTH); - soc_camera_limit_side(top, height, 0, 0, MAX_HEIGHT); -} - -static int mt9t112_set_a_frame_size(const struct i2c_client *client, - u16 width, - u16 height) -{ - int ret; - u16 wstart = (MAX_WIDTH - width) / 2; - u16 hstart = (MAX_HEIGHT - height) / 2; - - /* (Context A) Image Width/Height */ - mt9t112_mcu_write(ret, client, VAR(26, 0), width); - mt9t112_mcu_write(ret, client, VAR(26, 2), height); - - /* (Context A) Output Width/Height */ - mt9t112_mcu_write(ret, client, VAR(18, 43), 8 + width); - mt9t112_mcu_write(ret, client, VAR(18, 45), 8 + height); - - /* (Context A) Start Row/Column */ - mt9t112_mcu_write(ret, client, VAR(18, 2), 4 + hstart); - mt9t112_mcu_write(ret, client, VAR(18, 4), 4 + wstart); - - /* (Context A) End Row/Column */ - mt9t112_mcu_write(ret, client, VAR(18, 6), 11 + height + hstart); - mt9t112_mcu_write(ret, client, VAR(18, 8), 11 + width + wstart); - - mt9t112_mcu_write(ret, client, VAR8(1, 0), 0x06); - - return ret; -} - -static int mt9t112_set_pll_dividers(const struct i2c_client *client, - u8 m, u8 n, - u8 p1, u8 p2, u8 p3, - u8 p4, u8 p5, u8 p6, - u8 p7) -{ - int ret; - u16 val; - - /* N/M */ - val = (n << 8) | - (m << 0); - mt9t112_reg_mask_set(ret, client, 0x0010, 0x3fff, val); - - /* P1/P2/P3 */ - val = ((p3 & 0x0F) << 8) | - ((p2 & 0x0F) << 4) | - ((p1 & 0x0F) << 0); - mt9t112_reg_mask_set(ret, client, 0x0012, 0x0fff, val); - - /* P4/P5/P6 */ - val = (0x7 << 12) | - ((p6 & 0x0F) << 8) | - ((p5 & 0x0F) << 4) | - ((p4 & 0x0F) << 0); - mt9t112_reg_mask_set(ret, client, 0x002A, 0x7fff, val); - - /* P7 */ - val = (0x1 << 12) | - ((p7 & 0x0F) << 0); - mt9t112_reg_mask_set(ret, client, 0x002C, 0x100f, val); - - return ret; -} - -static int mt9t112_init_pll(const struct i2c_client *client) -{ - struct mt9t112_priv *priv = to_mt9t112(client); - int data, i, ret; - - mt9t112_reg_mask_set(ret, client, 0x0014, 0x003, 0x0001); - - /* PLL control: BYPASS PLL = 8517 */ - mt9t112_reg_write(ret, client, 0x0014, 0x2145); - - /* Replace these registers when new timing parameters are generated */ - mt9t112_set_pll_dividers(client, - priv->info->divider.m, - priv->info->divider.n, - priv->info->divider.p1, - priv->info->divider.p2, - priv->info->divider.p3, - priv->info->divider.p4, - priv->info->divider.p5, - priv->info->divider.p6, - priv->info->divider.p7); - - /* - * TEST_BYPASS on - * PLL_ENABLE on - * SEL_LOCK_DET on - * TEST_BYPASS off - */ - mt9t112_reg_write(ret, client, 0x0014, 0x2525); - mt9t112_reg_write(ret, client, 0x0014, 0x2527); - mt9t112_reg_write(ret, client, 0x0014, 0x3427); - mt9t112_reg_write(ret, client, 0x0014, 0x3027); - - mdelay(10); - - /* - * PLL_BYPASS off - * Reference clock count - * I2C Master Clock Divider - */ - mt9t112_reg_write(ret, client, 0x0014, 0x3046); - mt9t112_reg_write(ret, client, 0x0016, 0x0400); /* JPEG initialization workaround */ - mt9t112_reg_write(ret, client, 0x0022, 0x0190); - mt9t112_reg_write(ret, client, 0x3B84, 0x0212); - - /* External sensor clock is PLL bypass */ - mt9t112_reg_write(ret, client, 0x002E, 0x0500); - - mt9t112_reg_mask_set(ret, client, 0x0018, 0x0002, 0x0002); - mt9t112_reg_mask_set(ret, client, 0x3B82, 0x0004, 0x0004); - - /* MCU disabled */ - mt9t112_reg_mask_set(ret, client, 0x0018, 0x0004, 0x0004); - - /* out of standby */ - mt9t112_reg_mask_set(ret, client, 0x0018, 0x0001, 0); - - mdelay(50); - - /* - * Standby Workaround - * Disable Secondary I2C Pads - */ - mt9t112_reg_write(ret, client, 0x0614, 0x0001); - mdelay(1); - mt9t112_reg_write(ret, client, 0x0614, 0x0001); - mdelay(1); - mt9t112_reg_write(ret, client, 0x0614, 0x0001); - mdelay(1); - mt9t112_reg_write(ret, client, 0x0614, 0x0001); - mdelay(1); - mt9t112_reg_write(ret, client, 0x0614, 0x0001); - mdelay(1); - mt9t112_reg_write(ret, client, 0x0614, 0x0001); - mdelay(1); - - /* poll to verify out of standby. Must Poll this bit */ - for (i = 0; i < 100; i++) { - mt9t112_reg_read(data, client, 0x0018); - if (!(0x4000 & data)) - break; - - mdelay(10); - } - - return ret; -} - -static int mt9t112_init_setting(const struct i2c_client *client) -{ - - int ret; - - /* Adaptive Output Clock (A) */ - mt9t112_mcu_mask_set(ret, client, VAR(26, 160), 0x0040, 0x0000); - - /* Read Mode (A) */ - mt9t112_mcu_write(ret, client, VAR(18, 12), 0x0024); - - /* Fine Correction (A) */ - mt9t112_mcu_write(ret, client, VAR(18, 15), 0x00CC); - - /* Fine IT Min (A) */ - mt9t112_mcu_write(ret, client, VAR(18, 17), 0x01f1); - - /* Fine IT Max Margin (A) */ - mt9t112_mcu_write(ret, client, VAR(18, 19), 0x00fF); - - /* Base Frame Lines (A) */ - mt9t112_mcu_write(ret, client, VAR(18, 29), 0x032D); - - /* Min Line Length (A) */ - mt9t112_mcu_write(ret, client, VAR(18, 31), 0x073a); - - /* Line Length (A) */ - mt9t112_mcu_write(ret, client, VAR(18, 37), 0x07d0); - - /* Adaptive Output Clock (B) */ - mt9t112_mcu_mask_set(ret, client, VAR(27, 160), 0x0040, 0x0000); - - /* Row Start (B) */ - mt9t112_mcu_write(ret, client, VAR(18, 74), 0x004); - - /* Column Start (B) */ - mt9t112_mcu_write(ret, client, VAR(18, 76), 0x004); - - /* Row End (B) */ - mt9t112_mcu_write(ret, client, VAR(18, 78), 0x60B); - - /* Column End (B) */ - mt9t112_mcu_write(ret, client, VAR(18, 80), 0x80B); - - /* Fine Correction (B) */ - mt9t112_mcu_write(ret, client, VAR(18, 87), 0x008C); - - /* Fine IT Min (B) */ - mt9t112_mcu_write(ret, client, VAR(18, 89), 0x01F1); - - /* Fine IT Max Margin (B) */ - mt9t112_mcu_write(ret, client, VAR(18, 91), 0x00FF); - - /* Base Frame Lines (B) */ - mt9t112_mcu_write(ret, client, VAR(18, 101), 0x0668); - - /* Min Line Length (B) */ - mt9t112_mcu_write(ret, client, VAR(18, 103), 0x0AF0); - - /* Line Length (B) */ - mt9t112_mcu_write(ret, client, VAR(18, 109), 0x0AF0); - - /* - * Flicker Dectection registers - * This section should be replaced whenever new Timing file is generated - * All the following registers need to be replaced - * Following registers are generated from Register Wizard but user can - * modify them. For detail see auto flicker detection tuning - */ - - /* FD_FDPERIOD_SELECT */ - mt9t112_mcu_write(ret, client, VAR8(8, 5), 0x01); - - /* PRI_B_CONFIG_FD_ALGO_RUN */ - mt9t112_mcu_write(ret, client, VAR(27, 17), 0x0003); - - /* PRI_A_CONFIG_FD_ALGO_RUN */ - mt9t112_mcu_write(ret, client, VAR(26, 17), 0x0003); - - /* - * AFD range detection tuning registers - */ - - /* search_f1_50 */ - mt9t112_mcu_write(ret, client, VAR8(18, 165), 0x25); - - /* search_f2_50 */ - mt9t112_mcu_write(ret, client, VAR8(18, 166), 0x28); - - /* search_f1_60 */ - mt9t112_mcu_write(ret, client, VAR8(18, 167), 0x2C); - - /* search_f2_60 */ - mt9t112_mcu_write(ret, client, VAR8(18, 168), 0x2F); - - /* period_50Hz (A) */ - mt9t112_mcu_write(ret, client, VAR8(18, 68), 0xBA); - - /* secret register by aptina */ - /* period_50Hz (A MSB) */ - mt9t112_mcu_write(ret, client, VAR8(18, 303), 0x00); - - /* period_60Hz (A) */ - mt9t112_mcu_write(ret, client, VAR8(18, 69), 0x9B); - - /* secret register by aptina */ - /* period_60Hz (A MSB) */ - mt9t112_mcu_write(ret, client, VAR8(18, 301), 0x00); - - /* period_50Hz (B) */ - mt9t112_mcu_write(ret, client, VAR8(18, 140), 0x82); - - /* secret register by aptina */ - /* period_50Hz (B) MSB */ - mt9t112_mcu_write(ret, client, VAR8(18, 304), 0x00); - - /* period_60Hz (B) */ - mt9t112_mcu_write(ret, client, VAR8(18, 141), 0x6D); - - /* secret register by aptina */ - /* period_60Hz (B) MSB */ - mt9t112_mcu_write(ret, client, VAR8(18, 302), 0x00); - - /* FD Mode */ - mt9t112_mcu_write(ret, client, VAR8(8, 2), 0x10); - - /* Stat_min */ - mt9t112_mcu_write(ret, client, VAR8(8, 9), 0x02); - - /* Stat_max */ - mt9t112_mcu_write(ret, client, VAR8(8, 10), 0x03); - - /* Min_amplitude */ - mt9t112_mcu_write(ret, client, VAR8(8, 12), 0x0A); - - /* RX FIFO Watermark (A) */ - mt9t112_mcu_write(ret, client, VAR(18, 70), 0x0014); - - /* RX FIFO Watermark (B) */ - mt9t112_mcu_write(ret, client, VAR(18, 142), 0x0014); - - /* MCLK: 16MHz - * PCLK: 73MHz - * CorePixCLK: 36.5 MHz - */ - mt9t112_mcu_write(ret, client, VAR8(18, 0x0044), 133); - mt9t112_mcu_write(ret, client, VAR8(18, 0x0045), 110); - mt9t112_mcu_write(ret, client, VAR8(18, 0x008c), 130); - mt9t112_mcu_write(ret, client, VAR8(18, 0x008d), 108); - - mt9t112_mcu_write(ret, client, VAR8(18, 0x00A5), 27); - mt9t112_mcu_write(ret, client, VAR8(18, 0x00a6), 30); - mt9t112_mcu_write(ret, client, VAR8(18, 0x00a7), 32); - mt9t112_mcu_write(ret, client, VAR8(18, 0x00a8), 35); - - return ret; -} - -static int mt9t112_auto_focus_setting(const struct i2c_client *client) -{ - int ret; - - mt9t112_mcu_write(ret, client, VAR(12, 13), 0x000F); - mt9t112_mcu_write(ret, client, VAR(12, 23), 0x0F0F); - mt9t112_mcu_write(ret, client, VAR8(1, 0), 0x06); - - mt9t112_reg_write(ret, client, 0x0614, 0x0000); - - mt9t112_mcu_write(ret, client, VAR8(1, 0), 0x05); - mt9t112_mcu_write(ret, client, VAR8(12, 2), 0x02); - mt9t112_mcu_write(ret, client, VAR(12, 3), 0x0002); - mt9t112_mcu_write(ret, client, VAR(17, 3), 0x8001); - mt9t112_mcu_write(ret, client, VAR(17, 11), 0x0025); - mt9t112_mcu_write(ret, client, VAR(17, 13), 0x0193); - mt9t112_mcu_write(ret, client, VAR8(17, 33), 0x18); - mt9t112_mcu_write(ret, client, VAR8(1, 0), 0x05); - - return ret; -} - -static int mt9t112_auto_focus_trigger(const struct i2c_client *client) -{ - int ret; - - mt9t112_mcu_write(ret, client, VAR8(12, 25), 0x01); - - return ret; -} - -static int mt9t112_init_camera(const struct i2c_client *client) -{ - int ret; - - ECHECKER(ret, mt9t112_reset(client)); - - ECHECKER(ret, mt9t112_init_pll(client)); - - ECHECKER(ret, mt9t112_init_setting(client)); - - ECHECKER(ret, mt9t112_auto_focus_setting(client)); - - mt9t112_reg_mask_set(ret, client, 0x0018, 0x0004, 0); - - /* Analog setting B */ - mt9t112_reg_write(ret, client, 0x3084, 0x2409); - mt9t112_reg_write(ret, client, 0x3092, 0x0A49); - mt9t112_reg_write(ret, client, 0x3094, 0x4949); - mt9t112_reg_write(ret, client, 0x3096, 0x4950); - - /* - * Disable adaptive clock - * PRI_A_CONFIG_JPEG_OB_TX_CONTROL_VAR - * PRI_B_CONFIG_JPEG_OB_TX_CONTROL_VAR - */ - mt9t112_mcu_write(ret, client, VAR(26, 160), 0x0A2E); - mt9t112_mcu_write(ret, client, VAR(27, 160), 0x0A2E); - - /* Configure STatus in Status_before_length Format and enable header */ - /* PRI_B_CONFIG_JPEG_OB_TX_CONTROL_VAR */ - mt9t112_mcu_write(ret, client, VAR(27, 144), 0x0CB4); - - /* Enable JPEG in context B */ - /* PRI_B_CONFIG_JPEG_OB_TX_CONTROL_VAR */ - mt9t112_mcu_write(ret, client, VAR8(27, 142), 0x01); - - /* Disable Dac_TXLO */ - mt9t112_reg_write(ret, client, 0x316C, 0x350F); - - /* Set max slew rates */ - mt9t112_reg_write(ret, client, 0x1E, 0x777); - - return ret; -} - -/************************************************************************ - v4l2_subdev_core_ops -************************************************************************/ - -#ifdef CONFIG_VIDEO_ADV_DEBUG -static int mt9t112_g_register(struct v4l2_subdev *sd, - struct v4l2_dbg_register *reg) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret; - - reg->size = 2; - mt9t112_reg_read(ret, client, reg->reg); - - reg->val = (__u64)ret; - - return 0; -} - -static int mt9t112_s_register(struct v4l2_subdev *sd, - const struct v4l2_dbg_register *reg) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret; - - mt9t112_reg_write(ret, client, reg->reg, reg->val); - - return ret; -} -#endif - -static int mt9t112_s_power(struct v4l2_subdev *sd, int on) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - struct mt9t112_priv *priv = to_mt9t112(client); - - return soc_camera_set_power(&client->dev, ssdd, priv->clk, on); -} - -static const struct v4l2_subdev_core_ops mt9t112_subdev_core_ops = { -#ifdef CONFIG_VIDEO_ADV_DEBUG - .g_register = mt9t112_g_register, - .s_register = mt9t112_s_register, -#endif - .s_power = mt9t112_s_power, -}; - - -/************************************************************************ - v4l2_subdev_video_ops -************************************************************************/ -static int mt9t112_s_stream(struct v4l2_subdev *sd, int enable) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct mt9t112_priv *priv = to_mt9t112(client); - int ret = 0; - - if (!enable) { - /* FIXME - * - * If user selected large output size, - * and used it long time, - * mt9t112 camera will be very warm. - * - * But current driver can not stop mt9t112 camera. - * So, set small size here to solve this problem. - */ - mt9t112_set_a_frame_size(client, VGA_WIDTH, VGA_HEIGHT); - return ret; - } - - if (!(priv->flags & INIT_DONE)) { - u16 param = PCLK_RISING & priv->flags ? 0x0001 : 0x0000; - - ECHECKER(ret, mt9t112_init_camera(client)); - - /* Invert PCLK (Data sampled on falling edge of pixclk) */ - mt9t112_reg_write(ret, client, 0x3C20, param); - - mdelay(5); - - priv->flags |= INIT_DONE; - } - - mt9t112_mcu_write(ret, client, VAR(26, 7), priv->format->fmt); - mt9t112_mcu_write(ret, client, VAR(26, 9), priv->format->order); - mt9t112_mcu_write(ret, client, VAR8(1, 0), 0x06); - - mt9t112_set_a_frame_size(client, - priv->frame.width, - priv->frame.height); - - ECHECKER(ret, mt9t112_auto_focus_trigger(client)); - - dev_dbg(&client->dev, "format : %d\n", priv->format->code); - dev_dbg(&client->dev, "size : %d x %d\n", - priv->frame.width, - priv->frame.height); - - CLOCK_INFO(client, EXT_CLOCK); - - return ret; -} - -static int mt9t112_set_params(struct mt9t112_priv *priv, - const struct v4l2_rect *rect, - u32 code) -{ - int i; - - /* - * get color format - */ - for (i = 0; i < priv->num_formats; i++) - if (mt9t112_cfmts[i].code == code) - break; - - if (i == priv->num_formats) - return -EINVAL; - - priv->frame = *rect; - - /* - * frame size check - */ - mt9t112_frame_check(&priv->frame.width, &priv->frame.height, - &priv->frame.left, &priv->frame.top); - - priv->format = mt9t112_cfmts + i; - - return 0; -} - -static int mt9t112_get_selection(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_selection *sel) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct mt9t112_priv *priv = to_mt9t112(client); - - if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE) - return -EINVAL; - - switch (sel->target) { - case V4L2_SEL_TGT_CROP_BOUNDS: - sel->r.left = 0; - sel->r.top = 0; - sel->r.width = MAX_WIDTH; - sel->r.height = MAX_HEIGHT; - return 0; - case V4L2_SEL_TGT_CROP: - sel->r = priv->frame; - return 0; - default: - return -EINVAL; - } -} - -static int mt9t112_set_selection(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_selection *sel) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct mt9t112_priv *priv = to_mt9t112(client); - const struct v4l2_rect *rect = &sel->r; - - if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE || - sel->target != V4L2_SEL_TGT_CROP) - return -EINVAL; - - return mt9t112_set_params(priv, rect, priv->format->code); -} - -static int mt9t112_get_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *format) -{ - struct v4l2_mbus_framefmt *mf = &format->format; - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct mt9t112_priv *priv = to_mt9t112(client); - - if (format->pad) - return -EINVAL; - - mf->width = priv->frame.width; - mf->height = priv->frame.height; - mf->colorspace = priv->format->colorspace; - mf->code = priv->format->code; - mf->field = V4L2_FIELD_NONE; - - return 0; -} - -static int mt9t112_s_fmt(struct v4l2_subdev *sd, - struct v4l2_mbus_framefmt *mf) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct mt9t112_priv *priv = to_mt9t112(client); - struct v4l2_rect rect = { - .width = mf->width, - .height = mf->height, - .left = priv->frame.left, - .top = priv->frame.top, - }; - int ret; - - ret = mt9t112_set_params(priv, &rect, mf->code); - - if (!ret) - mf->colorspace = priv->format->colorspace; - - return ret; -} - -static int mt9t112_set_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *format) -{ - struct v4l2_mbus_framefmt *mf = &format->format; - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct mt9t112_priv *priv = to_mt9t112(client); - unsigned int top, left; - int i; - - if (format->pad) - return -EINVAL; - - for (i = 0; i < priv->num_formats; i++) - if (mt9t112_cfmts[i].code == mf->code) - break; - - if (i == priv->num_formats) { - mf->code = MEDIA_BUS_FMT_UYVY8_2X8; - mf->colorspace = V4L2_COLORSPACE_JPEG; - } else { - mf->colorspace = mt9t112_cfmts[i].colorspace; - } - - mt9t112_frame_check(&mf->width, &mf->height, &left, &top); - - mf->field = V4L2_FIELD_NONE; - - if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) - return mt9t112_s_fmt(sd, mf); - cfg->try_fmt = *mf; - return 0; -} - -static int mt9t112_enum_mbus_code(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_mbus_code_enum *code) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct mt9t112_priv *priv = to_mt9t112(client); - - if (code->pad || code->index >= priv->num_formats) - return -EINVAL; - - code->code = mt9t112_cfmts[code->index].code; - - return 0; -} - -static int mt9t112_g_mbus_config(struct v4l2_subdev *sd, - struct v4l2_mbus_config *cfg) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - - cfg->flags = V4L2_MBUS_MASTER | V4L2_MBUS_VSYNC_ACTIVE_HIGH | - V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_DATA_ACTIVE_HIGH | - V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING; - cfg->type = V4L2_MBUS_PARALLEL; - cfg->flags = soc_camera_apply_board_flags(ssdd, cfg); - - return 0; -} - -static int mt9t112_s_mbus_config(struct v4l2_subdev *sd, - const struct v4l2_mbus_config *cfg) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - struct mt9t112_priv *priv = to_mt9t112(client); - - if (soc_camera_apply_board_flags(ssdd, cfg) & V4L2_MBUS_PCLK_SAMPLE_RISING) - priv->flags |= PCLK_RISING; - - return 0; -} - -static const struct v4l2_subdev_video_ops mt9t112_subdev_video_ops = { - .s_stream = mt9t112_s_stream, - .g_mbus_config = mt9t112_g_mbus_config, - .s_mbus_config = mt9t112_s_mbus_config, -}; - -static const struct v4l2_subdev_pad_ops mt9t112_subdev_pad_ops = { - .enum_mbus_code = mt9t112_enum_mbus_code, - .get_selection = mt9t112_get_selection, - .set_selection = mt9t112_set_selection, - .get_fmt = mt9t112_get_fmt, - .set_fmt = mt9t112_set_fmt, -}; - -/************************************************************************ - i2c driver -************************************************************************/ -static const struct v4l2_subdev_ops mt9t112_subdev_ops = { - .core = &mt9t112_subdev_core_ops, - .video = &mt9t112_subdev_video_ops, - .pad = &mt9t112_subdev_pad_ops, -}; - -static int mt9t112_camera_probe(struct i2c_client *client) -{ - struct mt9t112_priv *priv = to_mt9t112(client); - const char *devname; - int chipid; - int ret; - - ret = mt9t112_s_power(&priv->subdev, 1); - if (ret < 0) - return ret; - - /* - * check and show chip ID - */ - mt9t112_reg_read(chipid, client, 0x0000); - - switch (chipid) { - case 0x2680: - devname = "mt9t111"; - priv->num_formats = 1; - break; - case 0x2682: - devname = "mt9t112"; - priv->num_formats = ARRAY_SIZE(mt9t112_cfmts); - break; - default: - dev_err(&client->dev, "Product ID error %04x\n", chipid); - ret = -ENODEV; - goto done; - } - - dev_info(&client->dev, "%s chip ID %04x\n", devname, chipid); - -done: - mt9t112_s_power(&priv->subdev, 0); - return ret; -} - -static int mt9t112_probe(struct i2c_client *client, - const struct i2c_device_id *did) -{ - struct mt9t112_priv *priv; - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - struct v4l2_rect rect = { - .width = VGA_WIDTH, - .height = VGA_HEIGHT, - .left = (MAX_WIDTH - VGA_WIDTH) / 2, - .top = (MAX_HEIGHT - VGA_HEIGHT) / 2, - }; - int ret; - - if (!ssdd || !ssdd->drv_priv) { - dev_err(&client->dev, "mt9t112: missing platform data!\n"); - return -EINVAL; - } - - priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - - priv->info = ssdd->drv_priv; - - v4l2_i2c_subdev_init(&priv->subdev, client, &mt9t112_subdev_ops); - - priv->clk = v4l2_clk_get(&client->dev, "mclk"); - if (IS_ERR(priv->clk)) - return PTR_ERR(priv->clk); - - ret = mt9t112_camera_probe(client); - - /* Cannot fail: using the default supported pixel code */ - if (!ret) - mt9t112_set_params(priv, &rect, MEDIA_BUS_FMT_UYVY8_2X8); - else - v4l2_clk_put(priv->clk); - - return ret; -} - -static int mt9t112_remove(struct i2c_client *client) -{ - struct mt9t112_priv *priv = to_mt9t112(client); - - v4l2_clk_put(priv->clk); - return 0; -} - -static const struct i2c_device_id mt9t112_id[] = { - { "mt9t112", 0 }, - { } -}; -MODULE_DEVICE_TABLE(i2c, mt9t112_id); - -static struct i2c_driver mt9t112_i2c_driver = { - .driver = { - .name = "mt9t112", - }, - .probe = mt9t112_probe, - .remove = mt9t112_remove, - .id_table = mt9t112_id, -}; - -module_i2c_driver(mt9t112_i2c_driver); - -MODULE_DESCRIPTION("SoC Camera driver for mt9t112"); -MODULE_AUTHOR("Kuninori Morimoto"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/i2c/soc_camera/soc_ov772x.c b/drivers/media/i2c/soc_camera/soc_ov772x.c deleted file mode 100644 index fafd372527b2..000000000000 --- a/drivers/media/i2c/soc_camera/soc_ov772x.c +++ /dev/null @@ -1,1123 +0,0 @@ -/* - * ov772x Camera Driver - * - * Copyright (C) 2008 Renesas Solutions Corp. - * Kuninori Morimoto - * - * Based on ov7670 and soc_camera_platform driver, - * - * Copyright 2006-7 Jonathan Corbet - * Copyright (C) 2008 Magnus Damm - * Copyright (C) 2008, Guennadi Liakhovetski - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -/* - * register offset - */ -#define GAIN 0x00 /* AGC - Gain control gain setting */ -#define BLUE 0x01 /* AWB - Blue channel gain setting */ -#define RED 0x02 /* AWB - Red channel gain setting */ -#define GREEN 0x03 /* AWB - Green channel gain setting */ -#define COM1 0x04 /* Common control 1 */ -#define BAVG 0x05 /* U/B Average Level */ -#define GAVG 0x06 /* Y/Gb Average Level */ -#define RAVG 0x07 /* V/R Average Level */ -#define AECH 0x08 /* Exposure Value - AEC MSBs */ -#define COM2 0x09 /* Common control 2 */ -#define PID 0x0A /* Product ID Number MSB */ -#define VER 0x0B /* Product ID Number LSB */ -#define COM3 0x0C /* Common control 3 */ -#define COM4 0x0D /* Common control 4 */ -#define COM5 0x0E /* Common control 5 */ -#define COM6 0x0F /* Common control 6 */ -#define AEC 0x10 /* Exposure Value */ -#define CLKRC 0x11 /* Internal clock */ -#define COM7 0x12 /* Common control 7 */ -#define COM8 0x13 /* Common control 8 */ -#define COM9 0x14 /* Common control 9 */ -#define COM10 0x15 /* Common control 10 */ -#define REG16 0x16 /* Register 16 */ -#define HSTART 0x17 /* Horizontal sensor size */ -#define HSIZE 0x18 /* Horizontal frame (HREF column) end high 8-bit */ -#define VSTART 0x19 /* Vertical frame (row) start high 8-bit */ -#define VSIZE 0x1A /* Vertical sensor size */ -#define PSHFT 0x1B /* Data format - pixel delay select */ -#define MIDH 0x1C /* Manufacturer ID byte - high */ -#define MIDL 0x1D /* Manufacturer ID byte - low */ -#define LAEC 0x1F /* Fine AEC value */ -#define COM11 0x20 /* Common control 11 */ -#define BDBASE 0x22 /* Banding filter Minimum AEC value */ -#define DBSTEP 0x23 /* Banding filter Maximum Setp */ -#define AEW 0x24 /* AGC/AEC - Stable operating region (upper limit) */ -#define AEB 0x25 /* AGC/AEC - Stable operating region (lower limit) */ -#define VPT 0x26 /* AGC/AEC Fast mode operating region */ -#define REG28 0x28 /* Register 28 */ -#define HOUTSIZE 0x29 /* Horizontal data output size MSBs */ -#define EXHCH 0x2A /* Dummy pixel insert MSB */ -#define EXHCL 0x2B /* Dummy pixel insert LSB */ -#define VOUTSIZE 0x2C /* Vertical data output size MSBs */ -#define ADVFL 0x2D /* LSB of insert dummy lines in Vertical direction */ -#define ADVFH 0x2E /* MSG of insert dummy lines in Vertical direction */ -#define YAVE 0x2F /* Y/G Channel Average value */ -#define LUMHTH 0x30 /* Histogram AEC/AGC Luminance high level threshold */ -#define LUMLTH 0x31 /* Histogram AEC/AGC Luminance low level threshold */ -#define HREF 0x32 /* Image start and size control */ -#define DM_LNL 0x33 /* Dummy line low 8 bits */ -#define DM_LNH 0x34 /* Dummy line high 8 bits */ -#define ADOFF_B 0x35 /* AD offset compensation value for B channel */ -#define ADOFF_R 0x36 /* AD offset compensation value for R channel */ -#define ADOFF_GB 0x37 /* AD offset compensation value for Gb channel */ -#define ADOFF_GR 0x38 /* AD offset compensation value for Gr channel */ -#define OFF_B 0x39 /* Analog process B channel offset value */ -#define OFF_R 0x3A /* Analog process R channel offset value */ -#define OFF_GB 0x3B /* Analog process Gb channel offset value */ -#define OFF_GR 0x3C /* Analog process Gr channel offset value */ -#define COM12 0x3D /* Common control 12 */ -#define COM13 0x3E /* Common control 13 */ -#define COM14 0x3F /* Common control 14 */ -#define COM15 0x40 /* Common control 15*/ -#define COM16 0x41 /* Common control 16 */ -#define TGT_B 0x42 /* BLC blue channel target value */ -#define TGT_R 0x43 /* BLC red channel target value */ -#define TGT_GB 0x44 /* BLC Gb channel target value */ -#define TGT_GR 0x45 /* BLC Gr channel target value */ -/* for ov7720 */ -#define LCC0 0x46 /* Lens correction control 0 */ -#define LCC1 0x47 /* Lens correction option 1 - X coordinate */ -#define LCC2 0x48 /* Lens correction option 2 - Y coordinate */ -#define LCC3 0x49 /* Lens correction option 3 */ -#define LCC4 0x4A /* Lens correction option 4 - radius of the circular */ -#define LCC5 0x4B /* Lens correction option 5 */ -#define LCC6 0x4C /* Lens correction option 6 */ -/* for ov7725 */ -#define LC_CTR 0x46 /* Lens correction control */ -#define LC_XC 0x47 /* X coordinate of lens correction center relative */ -#define LC_YC 0x48 /* Y coordinate of lens correction center relative */ -#define LC_COEF 0x49 /* Lens correction coefficient */ -#define LC_RADI 0x4A /* Lens correction radius */ -#define LC_COEFB 0x4B /* Lens B channel compensation coefficient */ -#define LC_COEFR 0x4C /* Lens R channel compensation coefficient */ - -#define FIXGAIN 0x4D /* Analog fix gain amplifer */ -#define AREF0 0x4E /* Sensor reference control */ -#define AREF1 0x4F /* Sensor reference current control */ -#define AREF2 0x50 /* Analog reference control */ -#define AREF3 0x51 /* ADC reference control */ -#define AREF4 0x52 /* ADC reference control */ -#define AREF5 0x53 /* ADC reference control */ -#define AREF6 0x54 /* Analog reference control */ -#define AREF7 0x55 /* Analog reference control */ -#define UFIX 0x60 /* U channel fixed value output */ -#define VFIX 0x61 /* V channel fixed value output */ -#define AWBB_BLK 0x62 /* AWB option for advanced AWB */ -#define AWB_CTRL0 0x63 /* AWB control byte 0 */ -#define DSP_CTRL1 0x64 /* DSP control byte 1 */ -#define DSP_CTRL2 0x65 /* DSP control byte 2 */ -#define DSP_CTRL3 0x66 /* DSP control byte 3 */ -#define DSP_CTRL4 0x67 /* DSP control byte 4 */ -#define AWB_BIAS 0x68 /* AWB BLC level clip */ -#define AWB_CTRL1 0x69 /* AWB control 1 */ -#define AWB_CTRL2 0x6A /* AWB control 2 */ -#define AWB_CTRL3 0x6B /* AWB control 3 */ -#define AWB_CTRL4 0x6C /* AWB control 4 */ -#define AWB_CTRL5 0x6D /* AWB control 5 */ -#define AWB_CTRL6 0x6E /* AWB control 6 */ -#define AWB_CTRL7 0x6F /* AWB control 7 */ -#define AWB_CTRL8 0x70 /* AWB control 8 */ -#define AWB_CTRL9 0x71 /* AWB control 9 */ -#define AWB_CTRL10 0x72 /* AWB control 10 */ -#define AWB_CTRL11 0x73 /* AWB control 11 */ -#define AWB_CTRL12 0x74 /* AWB control 12 */ -#define AWB_CTRL13 0x75 /* AWB control 13 */ -#define AWB_CTRL14 0x76 /* AWB control 14 */ -#define AWB_CTRL15 0x77 /* AWB control 15 */ -#define AWB_CTRL16 0x78 /* AWB control 16 */ -#define AWB_CTRL17 0x79 /* AWB control 17 */ -#define AWB_CTRL18 0x7A /* AWB control 18 */ -#define AWB_CTRL19 0x7B /* AWB control 19 */ -#define AWB_CTRL20 0x7C /* AWB control 20 */ -#define AWB_CTRL21 0x7D /* AWB control 21 */ -#define GAM1 0x7E /* Gamma Curve 1st segment input end point */ -#define GAM2 0x7F /* Gamma Curve 2nd segment input end point */ -#define GAM3 0x80 /* Gamma Curve 3rd segment input end point */ -#define GAM4 0x81 /* Gamma Curve 4th segment input end point */ -#define GAM5 0x82 /* Gamma Curve 5th segment input end point */ -#define GAM6 0x83 /* Gamma Curve 6th segment input end point */ -#define GAM7 0x84 /* Gamma Curve 7th segment input end point */ -#define GAM8 0x85 /* Gamma Curve 8th segment input end point */ -#define GAM9 0x86 /* Gamma Curve 9th segment input end point */ -#define GAM10 0x87 /* Gamma Curve 10th segment input end point */ -#define GAM11 0x88 /* Gamma Curve 11th segment input end point */ -#define GAM12 0x89 /* Gamma Curve 12th segment input end point */ -#define GAM13 0x8A /* Gamma Curve 13th segment input end point */ -#define GAM14 0x8B /* Gamma Curve 14th segment input end point */ -#define GAM15 0x8C /* Gamma Curve 15th segment input end point */ -#define SLOP 0x8D /* Gamma curve highest segment slope */ -#define DNSTH 0x8E /* De-noise threshold */ -#define EDGE_STRNGT 0x8F /* Edge strength control when manual mode */ -#define EDGE_TRSHLD 0x90 /* Edge threshold control when manual mode */ -#define DNSOFF 0x91 /* Auto De-noise threshold control */ -#define EDGE_UPPER 0x92 /* Edge strength upper limit when Auto mode */ -#define EDGE_LOWER 0x93 /* Edge strength lower limit when Auto mode */ -#define MTX1 0x94 /* Matrix coefficient 1 */ -#define MTX2 0x95 /* Matrix coefficient 2 */ -#define MTX3 0x96 /* Matrix coefficient 3 */ -#define MTX4 0x97 /* Matrix coefficient 4 */ -#define MTX5 0x98 /* Matrix coefficient 5 */ -#define MTX6 0x99 /* Matrix coefficient 6 */ -#define MTX_CTRL 0x9A /* Matrix control */ -#define BRIGHT 0x9B /* Brightness control */ -#define CNTRST 0x9C /* Contrast contrast */ -#define CNTRST_CTRL 0x9D /* Contrast contrast center */ -#define UVAD_J0 0x9E /* Auto UV adjust contrast 0 */ -#define UVAD_J1 0x9F /* Auto UV adjust contrast 1 */ -#define SCAL0 0xA0 /* Scaling control 0 */ -#define SCAL1 0xA1 /* Scaling control 1 */ -#define SCAL2 0xA2 /* Scaling control 2 */ -#define FIFODLYM 0xA3 /* FIFO manual mode delay control */ -#define FIFODLYA 0xA4 /* FIFO auto mode delay control */ -#define SDE 0xA6 /* Special digital effect control */ -#define USAT 0xA7 /* U component saturation control */ -#define VSAT 0xA8 /* V component saturation control */ -/* for ov7720 */ -#define HUE0 0xA9 /* Hue control 0 */ -#define HUE1 0xAA /* Hue control 1 */ -/* for ov7725 */ -#define HUECOS 0xA9 /* Cosine value */ -#define HUESIN 0xAA /* Sine value */ - -#define SIGN 0xAB /* Sign bit for Hue and contrast */ -#define DSPAUTO 0xAC /* DSP auto function ON/OFF control */ - -/* - * register detail - */ - -/* COM2 */ -#define SOFT_SLEEP_MODE 0x10 /* Soft sleep mode */ - /* Output drive capability */ -#define OCAP_1x 0x00 /* 1x */ -#define OCAP_2x 0x01 /* 2x */ -#define OCAP_3x 0x02 /* 3x */ -#define OCAP_4x 0x03 /* 4x */ - -/* COM3 */ -#define SWAP_MASK (SWAP_RGB | SWAP_YUV | SWAP_ML) -#define IMG_MASK (VFLIP_IMG | HFLIP_IMG) - -#define VFLIP_IMG 0x80 /* Vertical flip image ON/OFF selection */ -#define HFLIP_IMG 0x40 /* Horizontal mirror image ON/OFF selection */ -#define SWAP_RGB 0x20 /* Swap B/R output sequence in RGB mode */ -#define SWAP_YUV 0x10 /* Swap Y/UV output sequence in YUV mode */ -#define SWAP_ML 0x08 /* Swap output MSB/LSB */ - /* Tri-state option for output clock */ -#define NOTRI_CLOCK 0x04 /* 0: Tri-state at this period */ - /* 1: No tri-state at this period */ - /* Tri-state option for output data */ -#define NOTRI_DATA 0x02 /* 0: Tri-state at this period */ - /* 1: No tri-state at this period */ -#define SCOLOR_TEST 0x01 /* Sensor color bar test pattern */ - -/* COM4 */ - /* PLL frequency control */ -#define PLL_BYPASS 0x00 /* 00: Bypass PLL */ -#define PLL_4x 0x40 /* 01: PLL 4x */ -#define PLL_6x 0x80 /* 10: PLL 6x */ -#define PLL_8x 0xc0 /* 11: PLL 8x */ - /* AEC evaluate window */ -#define AEC_FULL 0x00 /* 00: Full window */ -#define AEC_1p2 0x10 /* 01: 1/2 window */ -#define AEC_1p4 0x20 /* 10: 1/4 window */ -#define AEC_2p3 0x30 /* 11: Low 2/3 window */ - -/* COM5 */ -#define AFR_ON_OFF 0x80 /* Auto frame rate control ON/OFF selection */ -#define AFR_SPPED 0x40 /* Auto frame rate control speed selection */ - /* Auto frame rate max rate control */ -#define AFR_NO_RATE 0x00 /* No reduction of frame rate */ -#define AFR_1p2 0x10 /* Max reduction to 1/2 frame rate */ -#define AFR_1p4 0x20 /* Max reduction to 1/4 frame rate */ -#define AFR_1p8 0x30 /* Max reduction to 1/8 frame rate */ - /* Auto frame rate active point control */ -#define AF_2x 0x00 /* Add frame when AGC reaches 2x gain */ -#define AF_4x 0x04 /* Add frame when AGC reaches 4x gain */ -#define AF_8x 0x08 /* Add frame when AGC reaches 8x gain */ -#define AF_16x 0x0c /* Add frame when AGC reaches 16x gain */ - /* AEC max step control */ -#define AEC_NO_LIMIT 0x01 /* 0 : AEC incease step has limit */ - /* 1 : No limit to AEC increase step */ - -/* COM7 */ - /* SCCB Register Reset */ -#define SCCB_RESET 0x80 /* 0 : No change */ - /* 1 : Resets all registers to default */ - /* Resolution selection */ -#define SLCT_MASK 0x40 /* Mask of VGA or QVGA */ -#define SLCT_VGA 0x00 /* 0 : VGA */ -#define SLCT_QVGA 0x40 /* 1 : QVGA */ -#define ITU656_ON_OFF 0x20 /* ITU656 protocol ON/OFF selection */ -#define SENSOR_RAW 0x10 /* Sensor RAW */ - /* RGB output format control */ -#define FMT_MASK 0x0c /* Mask of color format */ -#define FMT_GBR422 0x00 /* 00 : GBR 4:2:2 */ -#define FMT_RGB565 0x04 /* 01 : RGB 565 */ -#define FMT_RGB555 0x08 /* 10 : RGB 555 */ -#define FMT_RGB444 0x0c /* 11 : RGB 444 */ - /* Output format control */ -#define OFMT_MASK 0x03 /* Mask of output format */ -#define OFMT_YUV 0x00 /* 00 : YUV */ -#define OFMT_P_BRAW 0x01 /* 01 : Processed Bayer RAW */ -#define OFMT_RGB 0x02 /* 10 : RGB */ -#define OFMT_BRAW 0x03 /* 11 : Bayer RAW */ - -/* COM8 */ -#define FAST_ALGO 0x80 /* Enable fast AGC/AEC algorithm */ - /* AEC Setp size limit */ -#define UNLMT_STEP 0x40 /* 0 : Step size is limited */ - /* 1 : Unlimited step size */ -#define BNDF_ON_OFF 0x20 /* Banding filter ON/OFF */ -#define AEC_BND 0x10 /* Enable AEC below banding value */ -#define AEC_ON_OFF 0x08 /* Fine AEC ON/OFF control */ -#define AGC_ON 0x04 /* AGC Enable */ -#define AWB_ON 0x02 /* AWB Enable */ -#define AEC_ON 0x01 /* AEC Enable */ - -/* COM9 */ -#define BASE_AECAGC 0x80 /* Histogram or average based AEC/AGC */ - /* Automatic gain ceiling - maximum AGC value */ -#define GAIN_2x 0x00 /* 000 : 2x */ -#define GAIN_4x 0x10 /* 001 : 4x */ -#define GAIN_8x 0x20 /* 010 : 8x */ -#define GAIN_16x 0x30 /* 011 : 16x */ -#define GAIN_32x 0x40 /* 100 : 32x */ -#define GAIN_64x 0x50 /* 101 : 64x */ -#define GAIN_128x 0x60 /* 110 : 128x */ -#define DROP_VSYNC 0x04 /* Drop VSYNC output of corrupt frame */ -#define DROP_HREF 0x02 /* Drop HREF output of corrupt frame */ - -/* COM11 */ -#define SGLF_ON_OFF 0x02 /* Single frame ON/OFF selection */ -#define SGLF_TRIG 0x01 /* Single frame transfer trigger */ - -/* HREF */ -#define HREF_VSTART_SHIFT 6 /* VSTART LSB */ -#define HREF_HSTART_SHIFT 4 /* HSTART 2 LSBs */ -#define HREF_VSIZE_SHIFT 2 /* VSIZE LSB */ -#define HREF_HSIZE_SHIFT 0 /* HSIZE 2 LSBs */ - -/* EXHCH */ -#define EXHCH_VSIZE_SHIFT 2 /* VOUTSIZE LSB */ -#define EXHCH_HSIZE_SHIFT 0 /* HOUTSIZE 2 LSBs */ - -/* DSP_CTRL1 */ -#define FIFO_ON 0x80 /* FIFO enable/disable selection */ -#define UV_ON_OFF 0x40 /* UV adjust function ON/OFF selection */ -#define YUV444_2_422 0x20 /* YUV444 to 422 UV channel option selection */ -#define CLR_MTRX_ON_OFF 0x10 /* Color matrix ON/OFF selection */ -#define INTPLT_ON_OFF 0x08 /* Interpolation ON/OFF selection */ -#define GMM_ON_OFF 0x04 /* Gamma function ON/OFF selection */ -#define AUTO_BLK_ON_OFF 0x02 /* Black defect auto correction ON/OFF */ -#define AUTO_WHT_ON_OFF 0x01 /* White define auto correction ON/OFF */ - -/* DSP_CTRL3 */ -#define UV_MASK 0x80 /* UV output sequence option */ -#define UV_ON 0x80 /* ON */ -#define UV_OFF 0x00 /* OFF */ -#define CBAR_MASK 0x20 /* DSP Color bar mask */ -#define CBAR_ON 0x20 /* ON */ -#define CBAR_OFF 0x00 /* OFF */ - -/* DSP_CTRL4 */ -#define DSP_OFMT_YUV 0x00 -#define DSP_OFMT_RGB 0x00 -#define DSP_OFMT_RAW8 0x02 -#define DSP_OFMT_RAW10 0x03 - -/* DSPAUTO (DSP Auto Function ON/OFF Control) */ -#define AWB_ACTRL 0x80 /* AWB auto threshold control */ -#define DENOISE_ACTRL 0x40 /* De-noise auto threshold control */ -#define EDGE_ACTRL 0x20 /* Edge enhancement auto strength control */ -#define UV_ACTRL 0x10 /* UV adjust auto slope control */ -#define SCAL0_ACTRL 0x08 /* Auto scaling factor control */ -#define SCAL1_2_ACTRL 0x04 /* Auto scaling factor control */ - -#define OV772X_MAX_WIDTH VGA_WIDTH -#define OV772X_MAX_HEIGHT VGA_HEIGHT - -/* - * ID - */ -#define OV7720 0x7720 -#define OV7725 0x7721 -#define VERSION(pid, ver) ((pid<<8)|(ver&0xFF)) - -/* - * struct - */ - -struct ov772x_color_format { - u32 code; - enum v4l2_colorspace colorspace; - u8 dsp3; - u8 dsp4; - u8 com3; - u8 com7; -}; - -struct ov772x_win_size { - char *name; - unsigned char com7_bit; - struct v4l2_rect rect; -}; - -struct ov772x_priv { - struct v4l2_subdev subdev; - struct v4l2_ctrl_handler hdl; - struct v4l2_clk *clk; - struct ov772x_camera_info *info; - const struct ov772x_color_format *cfmt; - const struct ov772x_win_size *win; - unsigned short flag_vflip:1; - unsigned short flag_hflip:1; - /* band_filter = COM8[5] ? 256 - BDBASE : 0 */ - unsigned short band_filter; -}; - -/* - * supported color format list - */ -static const struct ov772x_color_format ov772x_cfmts[] = { - { - .code = MEDIA_BUS_FMT_YUYV8_2X8, - .colorspace = V4L2_COLORSPACE_JPEG, - .dsp3 = 0x0, - .dsp4 = DSP_OFMT_YUV, - .com3 = SWAP_YUV, - .com7 = OFMT_YUV, - }, - { - .code = MEDIA_BUS_FMT_YVYU8_2X8, - .colorspace = V4L2_COLORSPACE_JPEG, - .dsp3 = UV_ON, - .dsp4 = DSP_OFMT_YUV, - .com3 = SWAP_YUV, - .com7 = OFMT_YUV, - }, - { - .code = MEDIA_BUS_FMT_UYVY8_2X8, - .colorspace = V4L2_COLORSPACE_JPEG, - .dsp3 = 0x0, - .dsp4 = DSP_OFMT_YUV, - .com3 = 0x0, - .com7 = OFMT_YUV, - }, - { - .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE, - .colorspace = V4L2_COLORSPACE_SRGB, - .dsp3 = 0x0, - .dsp4 = DSP_OFMT_YUV, - .com3 = SWAP_RGB, - .com7 = FMT_RGB555 | OFMT_RGB, - }, - { - .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE, - .colorspace = V4L2_COLORSPACE_SRGB, - .dsp3 = 0x0, - .dsp4 = DSP_OFMT_YUV, - .com3 = 0x0, - .com7 = FMT_RGB555 | OFMT_RGB, - }, - { - .code = MEDIA_BUS_FMT_RGB565_2X8_LE, - .colorspace = V4L2_COLORSPACE_SRGB, - .dsp3 = 0x0, - .dsp4 = DSP_OFMT_YUV, - .com3 = SWAP_RGB, - .com7 = FMT_RGB565 | OFMT_RGB, - }, - { - .code = MEDIA_BUS_FMT_RGB565_2X8_BE, - .colorspace = V4L2_COLORSPACE_SRGB, - .dsp3 = 0x0, - .dsp4 = DSP_OFMT_YUV, - .com3 = 0x0, - .com7 = FMT_RGB565 | OFMT_RGB, - }, - { - /* Setting DSP4 to DSP_OFMT_RAW8 still gives 10-bit output, - * regardless of the COM7 value. We can thus only support 10-bit - * Bayer until someone figures it out. - */ - .code = MEDIA_BUS_FMT_SBGGR10_1X10, - .colorspace = V4L2_COLORSPACE_SRGB, - .dsp3 = 0x0, - .dsp4 = DSP_OFMT_RAW10, - .com3 = 0x0, - .com7 = SENSOR_RAW | OFMT_BRAW, - }, -}; - - -/* - * window size list - */ - -static const struct ov772x_win_size ov772x_win_sizes[] = { - { - .name = "VGA", - .com7_bit = SLCT_VGA, - .rect = { - .left = 140, - .top = 14, - .width = VGA_WIDTH, - .height = VGA_HEIGHT, - }, - }, { - .name = "QVGA", - .com7_bit = SLCT_QVGA, - .rect = { - .left = 252, - .top = 6, - .width = QVGA_WIDTH, - .height = QVGA_HEIGHT, - }, - }, -}; - -/* - * general function - */ - -static struct ov772x_priv *to_ov772x(struct v4l2_subdev *sd) -{ - return container_of(sd, struct ov772x_priv, subdev); -} - -static inline int ov772x_read(struct i2c_client *client, u8 addr) -{ - return i2c_smbus_read_byte_data(client, addr); -} - -static inline int ov772x_write(struct i2c_client *client, u8 addr, u8 value) -{ - return i2c_smbus_write_byte_data(client, addr, value); -} - -static int ov772x_mask_set(struct i2c_client *client, u8 command, u8 mask, - u8 set) -{ - s32 val = ov772x_read(client, command); - if (val < 0) - return val; - - val &= ~mask; - val |= set & mask; - - return ov772x_write(client, command, val); -} - -static int ov772x_reset(struct i2c_client *client) -{ - int ret; - - ret = ov772x_write(client, COM7, SCCB_RESET); - if (ret < 0) - return ret; - - msleep(1); - - return ov772x_mask_set(client, COM2, SOFT_SLEEP_MODE, SOFT_SLEEP_MODE); -} - -/* - * soc_camera_ops function - */ - -static int ov772x_s_stream(struct v4l2_subdev *sd, int enable) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct ov772x_priv *priv = to_ov772x(sd); - - if (!enable) { - ov772x_mask_set(client, COM2, SOFT_SLEEP_MODE, SOFT_SLEEP_MODE); - return 0; - } - - ov772x_mask_set(client, COM2, SOFT_SLEEP_MODE, 0); - - dev_dbg(&client->dev, "format %d, win %s\n", - priv->cfmt->code, priv->win->name); - - return 0; -} - -static int ov772x_s_ctrl(struct v4l2_ctrl *ctrl) -{ - struct ov772x_priv *priv = container_of(ctrl->handler, - struct ov772x_priv, hdl); - struct v4l2_subdev *sd = &priv->subdev; - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret = 0; - u8 val; - - switch (ctrl->id) { - case V4L2_CID_VFLIP: - val = ctrl->val ? VFLIP_IMG : 0x00; - priv->flag_vflip = ctrl->val; - if (priv->info->flags & OV772X_FLAG_VFLIP) - val ^= VFLIP_IMG; - return ov772x_mask_set(client, COM3, VFLIP_IMG, val); - case V4L2_CID_HFLIP: - val = ctrl->val ? HFLIP_IMG : 0x00; - priv->flag_hflip = ctrl->val; - if (priv->info->flags & OV772X_FLAG_HFLIP) - val ^= HFLIP_IMG; - return ov772x_mask_set(client, COM3, HFLIP_IMG, val); - case V4L2_CID_BAND_STOP_FILTER: - if (!ctrl->val) { - /* Switch the filter off, it is on now */ - ret = ov772x_mask_set(client, BDBASE, 0xff, 0xff); - if (!ret) - ret = ov772x_mask_set(client, COM8, - BNDF_ON_OFF, 0); - } else { - /* Switch the filter on, set AEC low limit */ - val = 256 - ctrl->val; - ret = ov772x_mask_set(client, COM8, - BNDF_ON_OFF, BNDF_ON_OFF); - if (!ret) - ret = ov772x_mask_set(client, BDBASE, - 0xff, val); - } - if (!ret) - priv->band_filter = ctrl->val; - return ret; - } - - return -EINVAL; -} - -#ifdef CONFIG_VIDEO_ADV_DEBUG -static int ov772x_g_register(struct v4l2_subdev *sd, - struct v4l2_dbg_register *reg) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret; - - reg->size = 1; - if (reg->reg > 0xff) - return -EINVAL; - - ret = ov772x_read(client, reg->reg); - if (ret < 0) - return ret; - - reg->val = (__u64)ret; - - return 0; -} - -static int ov772x_s_register(struct v4l2_subdev *sd, - const struct v4l2_dbg_register *reg) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - - if (reg->reg > 0xff || - reg->val > 0xff) - return -EINVAL; - - return ov772x_write(client, reg->reg, reg->val); -} -#endif - -static int ov772x_s_power(struct v4l2_subdev *sd, int on) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - struct ov772x_priv *priv = to_ov772x(sd); - - return soc_camera_set_power(&client->dev, ssdd, priv->clk, on); -} - -static const struct ov772x_win_size *ov772x_select_win(u32 width, u32 height) -{ - const struct ov772x_win_size *win = &ov772x_win_sizes[0]; - u32 best_diff = UINT_MAX; - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(ov772x_win_sizes); ++i) { - u32 diff = abs(width - ov772x_win_sizes[i].rect.width) - + abs(height - ov772x_win_sizes[i].rect.height); - if (diff < best_diff) { - best_diff = diff; - win = &ov772x_win_sizes[i]; - } - } - - return win; -} - -static void ov772x_select_params(const struct v4l2_mbus_framefmt *mf, - const struct ov772x_color_format **cfmt, - const struct ov772x_win_size **win) -{ - unsigned int i; - - /* Select a format. */ - *cfmt = &ov772x_cfmts[0]; - - for (i = 0; i < ARRAY_SIZE(ov772x_cfmts); i++) { - if (mf->code == ov772x_cfmts[i].code) { - *cfmt = &ov772x_cfmts[i]; - break; - } - } - - /* Select a window size. */ - *win = ov772x_select_win(mf->width, mf->height); -} - -static int ov772x_set_params(struct ov772x_priv *priv, - const struct ov772x_color_format *cfmt, - const struct ov772x_win_size *win) -{ - struct i2c_client *client = v4l2_get_subdevdata(&priv->subdev); - int ret; - u8 val; - - /* - * reset hardware - */ - ov772x_reset(client); - - /* - * Edge Ctrl - */ - if (priv->info->edgectrl.strength & OV772X_MANUAL_EDGE_CTRL) { - - /* - * Manual Edge Control Mode - * - * Edge auto strength bit is set by default. - * Remove it when manual mode. - */ - - ret = ov772x_mask_set(client, DSPAUTO, EDGE_ACTRL, 0x00); - if (ret < 0) - goto ov772x_set_fmt_error; - - ret = ov772x_mask_set(client, - EDGE_TRSHLD, OV772X_EDGE_THRESHOLD_MASK, - priv->info->edgectrl.threshold); - if (ret < 0) - goto ov772x_set_fmt_error; - - ret = ov772x_mask_set(client, - EDGE_STRNGT, OV772X_EDGE_STRENGTH_MASK, - priv->info->edgectrl.strength); - if (ret < 0) - goto ov772x_set_fmt_error; - - } else if (priv->info->edgectrl.upper > priv->info->edgectrl.lower) { - /* - * Auto Edge Control Mode - * - * set upper and lower limit - */ - ret = ov772x_mask_set(client, - EDGE_UPPER, OV772X_EDGE_UPPER_MASK, - priv->info->edgectrl.upper); - if (ret < 0) - goto ov772x_set_fmt_error; - - ret = ov772x_mask_set(client, - EDGE_LOWER, OV772X_EDGE_LOWER_MASK, - priv->info->edgectrl.lower); - if (ret < 0) - goto ov772x_set_fmt_error; - } - - /* Format and window size */ - ret = ov772x_write(client, HSTART, win->rect.left >> 2); - if (ret < 0) - goto ov772x_set_fmt_error; - ret = ov772x_write(client, HSIZE, win->rect.width >> 2); - if (ret < 0) - goto ov772x_set_fmt_error; - ret = ov772x_write(client, VSTART, win->rect.top >> 1); - if (ret < 0) - goto ov772x_set_fmt_error; - ret = ov772x_write(client, VSIZE, win->rect.height >> 1); - if (ret < 0) - goto ov772x_set_fmt_error; - ret = ov772x_write(client, HOUTSIZE, win->rect.width >> 2); - if (ret < 0) - goto ov772x_set_fmt_error; - ret = ov772x_write(client, VOUTSIZE, win->rect.height >> 1); - if (ret < 0) - goto ov772x_set_fmt_error; - ret = ov772x_write(client, HREF, - ((win->rect.top & 1) << HREF_VSTART_SHIFT) | - ((win->rect.left & 3) << HREF_HSTART_SHIFT) | - ((win->rect.height & 1) << HREF_VSIZE_SHIFT) | - ((win->rect.width & 3) << HREF_HSIZE_SHIFT)); - if (ret < 0) - goto ov772x_set_fmt_error; - ret = ov772x_write(client, EXHCH, - ((win->rect.height & 1) << EXHCH_VSIZE_SHIFT) | - ((win->rect.width & 3) << EXHCH_HSIZE_SHIFT)); - if (ret < 0) - goto ov772x_set_fmt_error; - - /* - * set DSP_CTRL3 - */ - val = cfmt->dsp3; - if (val) { - ret = ov772x_mask_set(client, - DSP_CTRL3, UV_MASK, val); - if (ret < 0) - goto ov772x_set_fmt_error; - } - - /* DSP_CTRL4: AEC reference point and DSP output format. */ - if (cfmt->dsp4) { - ret = ov772x_write(client, DSP_CTRL4, cfmt->dsp4); - if (ret < 0) - goto ov772x_set_fmt_error; - } - - /* - * set COM3 - */ - val = cfmt->com3; - if (priv->info->flags & OV772X_FLAG_VFLIP) - val |= VFLIP_IMG; - if (priv->info->flags & OV772X_FLAG_HFLIP) - val |= HFLIP_IMG; - if (priv->flag_vflip) - val ^= VFLIP_IMG; - if (priv->flag_hflip) - val ^= HFLIP_IMG; - - ret = ov772x_mask_set(client, - COM3, SWAP_MASK | IMG_MASK, val); - if (ret < 0) - goto ov772x_set_fmt_error; - - /* COM7: Sensor resolution and output format control. */ - ret = ov772x_write(client, COM7, win->com7_bit | cfmt->com7); - if (ret < 0) - goto ov772x_set_fmt_error; - - /* - * set COM8 - */ - if (priv->band_filter) { - ret = ov772x_mask_set(client, COM8, BNDF_ON_OFF, BNDF_ON_OFF); - if (!ret) - ret = ov772x_mask_set(client, BDBASE, - 0xff, 256 - priv->band_filter); - if (ret < 0) - goto ov772x_set_fmt_error; - } - - return ret; - -ov772x_set_fmt_error: - - ov772x_reset(client); - - return ret; -} - -static int ov772x_get_selection(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_selection *sel) -{ - if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE) - return -EINVAL; - - sel->r.left = 0; - sel->r.top = 0; - switch (sel->target) { - case V4L2_SEL_TGT_CROP_BOUNDS: - sel->r.width = OV772X_MAX_WIDTH; - sel->r.height = OV772X_MAX_HEIGHT; - return 0; - case V4L2_SEL_TGT_CROP: - sel->r.width = VGA_WIDTH; - sel->r.height = VGA_HEIGHT; - return 0; - default: - return -EINVAL; - } -} - -static int ov772x_get_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *format) -{ - struct v4l2_mbus_framefmt *mf = &format->format; - struct ov772x_priv *priv = to_ov772x(sd); - - if (format->pad) - return -EINVAL; - - mf->width = priv->win->rect.width; - mf->height = priv->win->rect.height; - mf->code = priv->cfmt->code; - mf->colorspace = priv->cfmt->colorspace; - mf->field = V4L2_FIELD_NONE; - - return 0; -} - -static int ov772x_set_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *format) -{ - struct ov772x_priv *priv = to_ov772x(sd); - struct v4l2_mbus_framefmt *mf = &format->format; - const struct ov772x_color_format *cfmt; - const struct ov772x_win_size *win; - int ret; - - if (format->pad) - return -EINVAL; - - ov772x_select_params(mf, &cfmt, &win); - - mf->code = cfmt->code; - mf->width = win->rect.width; - mf->height = win->rect.height; - mf->field = V4L2_FIELD_NONE; - mf->colorspace = cfmt->colorspace; - - if (format->which == V4L2_SUBDEV_FORMAT_TRY) { - cfg->try_fmt = *mf; - return 0; - } - - ret = ov772x_set_params(priv, cfmt, win); - if (ret < 0) - return ret; - - priv->win = win; - priv->cfmt = cfmt; - return 0; -} - -static int ov772x_video_probe(struct ov772x_priv *priv) -{ - struct i2c_client *client = v4l2_get_subdevdata(&priv->subdev); - u8 pid, ver; - const char *devname; - int ret; - - ret = ov772x_s_power(&priv->subdev, 1); - if (ret < 0) - return ret; - - /* - * check and show product ID and manufacturer ID - */ - pid = ov772x_read(client, PID); - ver = ov772x_read(client, VER); - - switch (VERSION(pid, ver)) { - case OV7720: - devname = "ov7720"; - break; - case OV7725: - devname = "ov7725"; - break; - default: - dev_err(&client->dev, - "Product ID error %x:%x\n", pid, ver); - ret = -ENODEV; - goto done; - } - - dev_info(&client->dev, - "%s Product ID %0x:%0x Manufacturer ID %x:%x\n", - devname, - pid, - ver, - ov772x_read(client, MIDH), - ov772x_read(client, MIDL)); - ret = v4l2_ctrl_handler_setup(&priv->hdl); - -done: - ov772x_s_power(&priv->subdev, 0); - return ret; -} - -static const struct v4l2_ctrl_ops ov772x_ctrl_ops = { - .s_ctrl = ov772x_s_ctrl, -}; - -static const struct v4l2_subdev_core_ops ov772x_subdev_core_ops = { -#ifdef CONFIG_VIDEO_ADV_DEBUG - .g_register = ov772x_g_register, - .s_register = ov772x_s_register, -#endif - .s_power = ov772x_s_power, -}; - -static int ov772x_enum_mbus_code(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_mbus_code_enum *code) -{ - if (code->pad || code->index >= ARRAY_SIZE(ov772x_cfmts)) - return -EINVAL; - - code->code = ov772x_cfmts[code->index].code; - return 0; -} - -static int ov772x_g_mbus_config(struct v4l2_subdev *sd, - struct v4l2_mbus_config *cfg) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - - cfg->flags = V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_MASTER | - V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_HIGH | - V4L2_MBUS_DATA_ACTIVE_HIGH; - cfg->type = V4L2_MBUS_PARALLEL; - cfg->flags = soc_camera_apply_board_flags(ssdd, cfg); - - return 0; -} - -static const struct v4l2_subdev_video_ops ov772x_subdev_video_ops = { - .s_stream = ov772x_s_stream, - .g_mbus_config = ov772x_g_mbus_config, -}; - -static const struct v4l2_subdev_pad_ops ov772x_subdev_pad_ops = { - .enum_mbus_code = ov772x_enum_mbus_code, - .get_selection = ov772x_get_selection, - .get_fmt = ov772x_get_fmt, - .set_fmt = ov772x_set_fmt, -}; - -static const struct v4l2_subdev_ops ov772x_subdev_ops = { - .core = &ov772x_subdev_core_ops, - .video = &ov772x_subdev_video_ops, - .pad = &ov772x_subdev_pad_ops, -}; - -/* - * i2c_driver function - */ - -static int ov772x_probe(struct i2c_client *client, - const struct i2c_device_id *did) -{ - struct ov772x_priv *priv; - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); - int ret; - - if (!ssdd || !ssdd->drv_priv) { - dev_err(&client->dev, "OV772X: missing platform data!\n"); - return -EINVAL; - } - - if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | - I2C_FUNC_PROTOCOL_MANGLING)) { - dev_err(&adapter->dev, - "I2C-Adapter doesn't support SMBUS_BYTE_DATA or PROTOCOL_MANGLING\n"); - return -EIO; - } - client->flags |= I2C_CLIENT_SCCB; - - priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - - priv->info = ssdd->drv_priv; - - v4l2_i2c_subdev_init(&priv->subdev, client, &ov772x_subdev_ops); - v4l2_ctrl_handler_init(&priv->hdl, 3); - v4l2_ctrl_new_std(&priv->hdl, &ov772x_ctrl_ops, - V4L2_CID_VFLIP, 0, 1, 1, 0); - v4l2_ctrl_new_std(&priv->hdl, &ov772x_ctrl_ops, - V4L2_CID_HFLIP, 0, 1, 1, 0); - v4l2_ctrl_new_std(&priv->hdl, &ov772x_ctrl_ops, - V4L2_CID_BAND_STOP_FILTER, 0, 256, 1, 0); - priv->subdev.ctrl_handler = &priv->hdl; - if (priv->hdl.error) - return priv->hdl.error; - - priv->clk = v4l2_clk_get(&client->dev, "mclk"); - if (IS_ERR(priv->clk)) { - ret = PTR_ERR(priv->clk); - goto eclkget; - } - - ret = ov772x_video_probe(priv); - if (ret < 0) { - v4l2_clk_put(priv->clk); -eclkget: - v4l2_ctrl_handler_free(&priv->hdl); - } else { - priv->cfmt = &ov772x_cfmts[0]; - priv->win = &ov772x_win_sizes[0]; - } - - return ret; -} - -static int ov772x_remove(struct i2c_client *client) -{ - struct ov772x_priv *priv = to_ov772x(i2c_get_clientdata(client)); - - v4l2_clk_put(priv->clk); - v4l2_device_unregister_subdev(&priv->subdev); - v4l2_ctrl_handler_free(&priv->hdl); - return 0; -} - -static const struct i2c_device_id ov772x_id[] = { - { "ov772x", 0 }, - { } -}; -MODULE_DEVICE_TABLE(i2c, ov772x_id); - -static struct i2c_driver ov772x_i2c_driver = { - .driver = { - .name = "ov772x", - }, - .probe = ov772x_probe, - .remove = ov772x_remove, - .id_table = ov772x_id, -}; - -module_i2c_driver(ov772x_i2c_driver); - -MODULE_DESCRIPTION("SoC Camera driver for ov772x"); -MODULE_AUTHOR("Kuninori Morimoto"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/i2c/soc_camera/soc_rj54n1cb0c.c b/drivers/media/i2c/soc_camera/soc_rj54n1cb0c.c deleted file mode 100644 index f0cb49a6167b..000000000000 --- a/drivers/media/i2c/soc_camera/soc_rj54n1cb0c.c +++ /dev/null @@ -1,1415 +0,0 @@ -/* - * Driver for RJ54N1CB0C CMOS Image Sensor from Sharp - * - * Copyright (C) 2009, Guennadi Liakhovetski - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#define RJ54N1_DEV_CODE 0x0400 -#define RJ54N1_DEV_CODE2 0x0401 -#define RJ54N1_OUT_SEL 0x0403 -#define RJ54N1_XY_OUTPUT_SIZE_S_H 0x0404 -#define RJ54N1_X_OUTPUT_SIZE_S_L 0x0405 -#define RJ54N1_Y_OUTPUT_SIZE_S_L 0x0406 -#define RJ54N1_XY_OUTPUT_SIZE_P_H 0x0407 -#define RJ54N1_X_OUTPUT_SIZE_P_L 0x0408 -#define RJ54N1_Y_OUTPUT_SIZE_P_L 0x0409 -#define RJ54N1_LINE_LENGTH_PCK_S_H 0x040a -#define RJ54N1_LINE_LENGTH_PCK_S_L 0x040b -#define RJ54N1_LINE_LENGTH_PCK_P_H 0x040c -#define RJ54N1_LINE_LENGTH_PCK_P_L 0x040d -#define RJ54N1_RESIZE_N 0x040e -#define RJ54N1_RESIZE_N_STEP 0x040f -#define RJ54N1_RESIZE_STEP 0x0410 -#define RJ54N1_RESIZE_HOLD_H 0x0411 -#define RJ54N1_RESIZE_HOLD_L 0x0412 -#define RJ54N1_H_OBEN_OFS 0x0413 -#define RJ54N1_V_OBEN_OFS 0x0414 -#define RJ54N1_RESIZE_CONTROL 0x0415 -#define RJ54N1_STILL_CONTROL 0x0417 -#define RJ54N1_INC_USE_SEL_H 0x0425 -#define RJ54N1_INC_USE_SEL_L 0x0426 -#define RJ54N1_MIRROR_STILL_MODE 0x0427 -#define RJ54N1_INIT_START 0x0428 -#define RJ54N1_SCALE_1_2_LEV 0x0429 -#define RJ54N1_SCALE_4_LEV 0x042a -#define RJ54N1_Y_GAIN 0x04d8 -#define RJ54N1_APT_GAIN_UP 0x04fa -#define RJ54N1_RA_SEL_UL 0x0530 -#define RJ54N1_BYTE_SWAP 0x0531 -#define RJ54N1_OUT_SIGPO 0x053b -#define RJ54N1_WB_SEL_WEIGHT_I 0x054e -#define RJ54N1_BIT8_WB 0x0569 -#define RJ54N1_HCAPS_WB 0x056a -#define RJ54N1_VCAPS_WB 0x056b -#define RJ54N1_HCAPE_WB 0x056c -#define RJ54N1_VCAPE_WB 0x056d -#define RJ54N1_EXPOSURE_CONTROL 0x058c -#define RJ54N1_FRAME_LENGTH_S_H 0x0595 -#define RJ54N1_FRAME_LENGTH_S_L 0x0596 -#define RJ54N1_FRAME_LENGTH_P_H 0x0597 -#define RJ54N1_FRAME_LENGTH_P_L 0x0598 -#define RJ54N1_PEAK_H 0x05b7 -#define RJ54N1_PEAK_50 0x05b8 -#define RJ54N1_PEAK_60 0x05b9 -#define RJ54N1_PEAK_DIFF 0x05ba -#define RJ54N1_IOC 0x05ef -#define RJ54N1_TG_BYPASS 0x0700 -#define RJ54N1_PLL_L 0x0701 -#define RJ54N1_PLL_N 0x0702 -#define RJ54N1_PLL_EN 0x0704 -#define RJ54N1_RATIO_TG 0x0706 -#define RJ54N1_RATIO_T 0x0707 -#define RJ54N1_RATIO_R 0x0708 -#define RJ54N1_RAMP_TGCLK_EN 0x0709 -#define RJ54N1_OCLK_DSP 0x0710 -#define RJ54N1_RATIO_OP 0x0711 -#define RJ54N1_RATIO_O 0x0712 -#define RJ54N1_OCLK_SEL_EN 0x0713 -#define RJ54N1_CLK_RST 0x0717 -#define RJ54N1_RESET_STANDBY 0x0718 -#define RJ54N1_FWFLG 0x07fe - -#define E_EXCLK (1 << 7) -#define SOFT_STDBY (1 << 4) -#define SEN_RSTX (1 << 2) -#define TG_RSTX (1 << 1) -#define DSP_RSTX (1 << 0) - -#define RESIZE_HOLD_SEL (1 << 2) -#define RESIZE_GO (1 << 1) - -/* - * When cropping, the camera automatically centers the cropped region, there - * doesn't seem to be a way to specify an explicit location of the rectangle. - */ -#define RJ54N1_COLUMN_SKIP 0 -#define RJ54N1_ROW_SKIP 0 -#define RJ54N1_MAX_WIDTH 1600 -#define RJ54N1_MAX_HEIGHT 1200 - -#define PLL_L 2 -#define PLL_N 0x31 - -/* I2C addresses: 0x50, 0x51, 0x60, 0x61 */ - -/* RJ54N1CB0C has only one fixed colorspace per pixelcode */ -struct rj54n1_datafmt { - u32 code; - enum v4l2_colorspace colorspace; -}; - -/* Find a data format by a pixel code in an array */ -static const struct rj54n1_datafmt *rj54n1_find_datafmt( - u32 code, const struct rj54n1_datafmt *fmt, - int n) -{ - int i; - for (i = 0; i < n; i++) - if (fmt[i].code == code) - return fmt + i; - - return NULL; -} - -static const struct rj54n1_datafmt rj54n1_colour_fmts[] = { - {MEDIA_BUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_JPEG}, - {MEDIA_BUS_FMT_YVYU8_2X8, V4L2_COLORSPACE_JPEG}, - {MEDIA_BUS_FMT_RGB565_2X8_LE, V4L2_COLORSPACE_SRGB}, - {MEDIA_BUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_SRGB}, - {MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB}, - {MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE, V4L2_COLORSPACE_SRGB}, - {MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE, V4L2_COLORSPACE_SRGB}, - {MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE, V4L2_COLORSPACE_SRGB}, - {MEDIA_BUS_FMT_SBGGR10_1X10, V4L2_COLORSPACE_SRGB}, -}; - -struct rj54n1_clock_div { - u8 ratio_tg; /* can be 0 or an odd number */ - u8 ratio_t; - u8 ratio_r; - u8 ratio_op; - u8 ratio_o; -}; - -struct rj54n1 { - struct v4l2_subdev subdev; - struct v4l2_ctrl_handler hdl; - struct v4l2_clk *clk; - struct rj54n1_clock_div clk_div; - const struct rj54n1_datafmt *fmt; - struct v4l2_rect rect; /* Sensor window */ - unsigned int tgclk_mhz; - bool auto_wb; - unsigned short width; /* Output window */ - unsigned short height; - unsigned short resize; /* Sensor * 1024 / resize = Output */ - unsigned short scale; - u8 bank; -}; - -struct rj54n1_reg_val { - u16 reg; - u8 val; -}; - -static const struct rj54n1_reg_val bank_4[] = { - {0x417, 0}, - {0x42c, 0}, - {0x42d, 0xf0}, - {0x42e, 0}, - {0x42f, 0x50}, - {0x430, 0xf5}, - {0x431, 0x16}, - {0x432, 0x20}, - {0x433, 0}, - {0x434, 0xc8}, - {0x43c, 8}, - {0x43e, 0x90}, - {0x445, 0x83}, - {0x4ba, 0x58}, - {0x4bb, 4}, - {0x4bc, 0x20}, - {0x4db, 4}, - {0x4fe, 2}, -}; - -static const struct rj54n1_reg_val bank_5[] = { - {0x514, 0}, - {0x516, 0}, - {0x518, 0}, - {0x51a, 0}, - {0x51d, 0xff}, - {0x56f, 0x28}, - {0x575, 0x40}, - {0x5bc, 0x48}, - {0x5c1, 6}, - {0x5e5, 0x11}, - {0x5e6, 0x43}, - {0x5e7, 0x33}, - {0x5e8, 0x21}, - {0x5e9, 0x30}, - {0x5ea, 0x0}, - {0x5eb, 0xa5}, - {0x5ec, 0xff}, - {0x5fe, 2}, -}; - -static const struct rj54n1_reg_val bank_7[] = { - {0x70a, 0}, - {0x714, 0xff}, - {0x715, 0xff}, - {0x716, 0x1f}, - {0x7FE, 2}, -}; - -static const struct rj54n1_reg_val bank_8[] = { - {0x800, 0x00}, - {0x801, 0x01}, - {0x802, 0x61}, - {0x805, 0x00}, - {0x806, 0x00}, - {0x807, 0x00}, - {0x808, 0x00}, - {0x809, 0x01}, - {0x80A, 0x61}, - {0x80B, 0x00}, - {0x80C, 0x01}, - {0x80D, 0x00}, - {0x80E, 0x00}, - {0x80F, 0x00}, - {0x810, 0x00}, - {0x811, 0x01}, - {0x812, 0x61}, - {0x813, 0x00}, - {0x814, 0x11}, - {0x815, 0x00}, - {0x816, 0x41}, - {0x817, 0x00}, - {0x818, 0x51}, - {0x819, 0x01}, - {0x81A, 0x1F}, - {0x81B, 0x00}, - {0x81C, 0x01}, - {0x81D, 0x00}, - {0x81E, 0x11}, - {0x81F, 0x00}, - {0x820, 0x41}, - {0x821, 0x00}, - {0x822, 0x51}, - {0x823, 0x00}, - {0x824, 0x00}, - {0x825, 0x00}, - {0x826, 0x47}, - {0x827, 0x01}, - {0x828, 0x4F}, - {0x829, 0x00}, - {0x82A, 0x00}, - {0x82B, 0x00}, - {0x82C, 0x30}, - {0x82D, 0x00}, - {0x82E, 0x40}, - {0x82F, 0x00}, - {0x830, 0xB3}, - {0x831, 0x00}, - {0x832, 0xE3}, - {0x833, 0x00}, - {0x834, 0x00}, - {0x835, 0x00}, - {0x836, 0x00}, - {0x837, 0x00}, - {0x838, 0x00}, - {0x839, 0x01}, - {0x83A, 0x61}, - {0x83B, 0x00}, - {0x83C, 0x01}, - {0x83D, 0x00}, - {0x83E, 0x00}, - {0x83F, 0x00}, - {0x840, 0x00}, - {0x841, 0x01}, - {0x842, 0x61}, - {0x843, 0x00}, - {0x844, 0x1D}, - {0x845, 0x00}, - {0x846, 0x00}, - {0x847, 0x00}, - {0x848, 0x00}, - {0x849, 0x01}, - {0x84A, 0x1F}, - {0x84B, 0x00}, - {0x84C, 0x05}, - {0x84D, 0x00}, - {0x84E, 0x19}, - {0x84F, 0x01}, - {0x850, 0x21}, - {0x851, 0x01}, - {0x852, 0x5D}, - {0x853, 0x00}, - {0x854, 0x00}, - {0x855, 0x00}, - {0x856, 0x19}, - {0x857, 0x01}, - {0x858, 0x21}, - {0x859, 0x00}, - {0x85A, 0x00}, - {0x85B, 0x00}, - {0x85C, 0x00}, - {0x85D, 0x00}, - {0x85E, 0x00}, - {0x85F, 0x00}, - {0x860, 0xB3}, - {0x861, 0x00}, - {0x862, 0xE3}, - {0x863, 0x00}, - {0x864, 0x00}, - {0x865, 0x00}, - {0x866, 0x00}, - {0x867, 0x00}, - {0x868, 0x00}, - {0x869, 0xE2}, - {0x86A, 0x00}, - {0x86B, 0x01}, - {0x86C, 0x06}, - {0x86D, 0x00}, - {0x86E, 0x00}, - {0x86F, 0x00}, - {0x870, 0x60}, - {0x871, 0x8C}, - {0x872, 0x10}, - {0x873, 0x00}, - {0x874, 0xE0}, - {0x875, 0x00}, - {0x876, 0x27}, - {0x877, 0x01}, - {0x878, 0x00}, - {0x879, 0x00}, - {0x87A, 0x00}, - {0x87B, 0x03}, - {0x87C, 0x00}, - {0x87D, 0x00}, - {0x87E, 0x00}, - {0x87F, 0x00}, - {0x880, 0x00}, - {0x881, 0x00}, - {0x882, 0x00}, - {0x883, 0x00}, - {0x884, 0x00}, - {0x885, 0x00}, - {0x886, 0xF8}, - {0x887, 0x00}, - {0x888, 0x03}, - {0x889, 0x00}, - {0x88A, 0x64}, - {0x88B, 0x00}, - {0x88C, 0x03}, - {0x88D, 0x00}, - {0x88E, 0xB1}, - {0x88F, 0x00}, - {0x890, 0x03}, - {0x891, 0x01}, - {0x892, 0x1D}, - {0x893, 0x00}, - {0x894, 0x03}, - {0x895, 0x01}, - {0x896, 0x4B}, - {0x897, 0x00}, - {0x898, 0xE5}, - {0x899, 0x00}, - {0x89A, 0x01}, - {0x89B, 0x00}, - {0x89C, 0x01}, - {0x89D, 0x04}, - {0x89E, 0xC8}, - {0x89F, 0x00}, - {0x8A0, 0x01}, - {0x8A1, 0x01}, - {0x8A2, 0x61}, - {0x8A3, 0x00}, - {0x8A4, 0x01}, - {0x8A5, 0x00}, - {0x8A6, 0x00}, - {0x8A7, 0x00}, - {0x8A8, 0x00}, - {0x8A9, 0x00}, - {0x8AA, 0x7F}, - {0x8AB, 0x03}, - {0x8AC, 0x00}, - {0x8AD, 0x00}, - {0x8AE, 0x00}, - {0x8AF, 0x00}, - {0x8B0, 0x00}, - {0x8B1, 0x00}, - {0x8B6, 0x00}, - {0x8B7, 0x01}, - {0x8B8, 0x00}, - {0x8B9, 0x00}, - {0x8BA, 0x02}, - {0x8BB, 0x00}, - {0x8BC, 0xFF}, - {0x8BD, 0x00}, - {0x8FE, 2}, -}; - -static const struct rj54n1_reg_val bank_10[] = { - {0x10bf, 0x69} -}; - -/* Clock dividers - these are default register values, divider = register + 1 */ -static const struct rj54n1_clock_div clk_div = { - .ratio_tg = 3 /* default: 5 */, - .ratio_t = 4 /* default: 1 */, - .ratio_r = 4 /* default: 0 */, - .ratio_op = 1 /* default: 5 */, - .ratio_o = 9 /* default: 0 */, -}; - -static struct rj54n1 *to_rj54n1(const struct i2c_client *client) -{ - return container_of(i2c_get_clientdata(client), struct rj54n1, subdev); -} - -static int reg_read(struct i2c_client *client, const u16 reg) -{ - struct rj54n1 *rj54n1 = to_rj54n1(client); - int ret; - - /* set bank */ - if (rj54n1->bank != reg >> 8) { - dev_dbg(&client->dev, "[0x%x] = 0x%x\n", 0xff, reg >> 8); - ret = i2c_smbus_write_byte_data(client, 0xff, reg >> 8); - if (ret < 0) - return ret; - rj54n1->bank = reg >> 8; - } - return i2c_smbus_read_byte_data(client, reg & 0xff); -} - -static int reg_write(struct i2c_client *client, const u16 reg, - const u8 data) -{ - struct rj54n1 *rj54n1 = to_rj54n1(client); - int ret; - - /* set bank */ - if (rj54n1->bank != reg >> 8) { - dev_dbg(&client->dev, "[0x%x] = 0x%x\n", 0xff, reg >> 8); - ret = i2c_smbus_write_byte_data(client, 0xff, reg >> 8); - if (ret < 0) - return ret; - rj54n1->bank = reg >> 8; - } - dev_dbg(&client->dev, "[0x%x] = 0x%x\n", reg & 0xff, data); - return i2c_smbus_write_byte_data(client, reg & 0xff, data); -} - -static int reg_set(struct i2c_client *client, const u16 reg, - const u8 data, const u8 mask) -{ - int ret; - - ret = reg_read(client, reg); - if (ret < 0) - return ret; - return reg_write(client, reg, (ret & ~mask) | (data & mask)); -} - -static int reg_write_multiple(struct i2c_client *client, - const struct rj54n1_reg_val *rv, const int n) -{ - int i, ret; - - for (i = 0; i < n; i++) { - ret = reg_write(client, rv->reg, rv->val); - if (ret < 0) - return ret; - rv++; - } - - return 0; -} - -static int rj54n1_enum_mbus_code(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_mbus_code_enum *code) -{ - if (code->pad || code->index >= ARRAY_SIZE(rj54n1_colour_fmts)) - return -EINVAL; - - code->code = rj54n1_colour_fmts[code->index].code; - return 0; -} - -static int rj54n1_s_stream(struct v4l2_subdev *sd, int enable) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - - /* Switch between preview and still shot modes */ - return reg_set(client, RJ54N1_STILL_CONTROL, (!enable) << 7, 0x80); -} - -static int rj54n1_set_rect(struct i2c_client *client, - u16 reg_x, u16 reg_y, u16 reg_xy, - u32 width, u32 height) -{ - int ret; - - ret = reg_write(client, reg_xy, - ((width >> 4) & 0x70) | - ((height >> 8) & 7)); - - if (!ret) - ret = reg_write(client, reg_x, width & 0xff); - if (!ret) - ret = reg_write(client, reg_y, height & 0xff); - - return ret; -} - -/* - * Some commands, specifically certain initialisation sequences, require - * a commit operation. - */ -static int rj54n1_commit(struct i2c_client *client) -{ - int ret = reg_write(client, RJ54N1_INIT_START, 1); - msleep(10); - if (!ret) - ret = reg_write(client, RJ54N1_INIT_START, 0); - return ret; -} - -static int rj54n1_sensor_scale(struct v4l2_subdev *sd, s32 *in_w, s32 *in_h, - s32 *out_w, s32 *out_h); - -static int rj54n1_set_selection(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_selection *sel) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct rj54n1 *rj54n1 = to_rj54n1(client); - const struct v4l2_rect *rect = &sel->r; - int dummy = 0, output_w, output_h, - input_w = rect->width, input_h = rect->height; - int ret; - - if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE || - sel->target != V4L2_SEL_TGT_CROP) - return -EINVAL; - - /* arbitrary minimum width and height, edges unimportant */ - soc_camera_limit_side(&dummy, &input_w, - RJ54N1_COLUMN_SKIP, 8, RJ54N1_MAX_WIDTH); - - soc_camera_limit_side(&dummy, &input_h, - RJ54N1_ROW_SKIP, 8, RJ54N1_MAX_HEIGHT); - - output_w = (input_w * 1024 + rj54n1->resize / 2) / rj54n1->resize; - output_h = (input_h * 1024 + rj54n1->resize / 2) / rj54n1->resize; - - dev_dbg(&client->dev, "Scaling for %dx%d : %u = %dx%d\n", - input_w, input_h, rj54n1->resize, output_w, output_h); - - ret = rj54n1_sensor_scale(sd, &input_w, &input_h, &output_w, &output_h); - if (ret < 0) - return ret; - - rj54n1->width = output_w; - rj54n1->height = output_h; - rj54n1->resize = ret; - rj54n1->rect.width = input_w; - rj54n1->rect.height = input_h; - - return 0; -} - -static int rj54n1_get_selection(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_selection *sel) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct rj54n1 *rj54n1 = to_rj54n1(client); - - if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE) - return -EINVAL; - - switch (sel->target) { - case V4L2_SEL_TGT_CROP_BOUNDS: - sel->r.left = RJ54N1_COLUMN_SKIP; - sel->r.top = RJ54N1_ROW_SKIP; - sel->r.width = RJ54N1_MAX_WIDTH; - sel->r.height = RJ54N1_MAX_HEIGHT; - return 0; - case V4L2_SEL_TGT_CROP: - sel->r = rj54n1->rect; - return 0; - default: - return -EINVAL; - } -} - -static int rj54n1_get_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *format) -{ - struct v4l2_mbus_framefmt *mf = &format->format; - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct rj54n1 *rj54n1 = to_rj54n1(client); - - if (format->pad) - return -EINVAL; - - mf->code = rj54n1->fmt->code; - mf->colorspace = rj54n1->fmt->colorspace; - mf->field = V4L2_FIELD_NONE; - mf->width = rj54n1->width; - mf->height = rj54n1->height; - - return 0; -} - -/* - * The actual geometry configuration routine. It scales the input window into - * the output one, updates the window sizes and returns an error or the resize - * coefficient on success. Note: we only use the "Fixed Scaling" on this camera. - */ -static int rj54n1_sensor_scale(struct v4l2_subdev *sd, s32 *in_w, s32 *in_h, - s32 *out_w, s32 *out_h) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct rj54n1 *rj54n1 = to_rj54n1(client); - unsigned int skip, resize, input_w = *in_w, input_h = *in_h, - output_w = *out_w, output_h = *out_h; - u16 inc_sel, wb_bit8, wb_left, wb_right, wb_top, wb_bottom; - unsigned int peak, peak_50, peak_60; - int ret; - - /* - * We have a problem with crops, where the window is larger than 512x384 - * and output window is larger than a half of the input one. In this - * case we have to either reduce the input window to equal or below - * 512x384 or the output window to equal or below 1/2 of the input. - */ - if (output_w > max(512U, input_w / 2)) { - if (2 * output_w > RJ54N1_MAX_WIDTH) { - input_w = RJ54N1_MAX_WIDTH; - output_w = RJ54N1_MAX_WIDTH / 2; - } else { - input_w = output_w * 2; - } - - dev_dbg(&client->dev, "Adjusted output width: in %u, out %u\n", - input_w, output_w); - } - - if (output_h > max(384U, input_h / 2)) { - if (2 * output_h > RJ54N1_MAX_HEIGHT) { - input_h = RJ54N1_MAX_HEIGHT; - output_h = RJ54N1_MAX_HEIGHT / 2; - } else { - input_h = output_h * 2; - } - - dev_dbg(&client->dev, "Adjusted output height: in %u, out %u\n", - input_h, output_h); - } - - /* Idea: use the read mode for snapshots, handle separate geometries */ - ret = rj54n1_set_rect(client, RJ54N1_X_OUTPUT_SIZE_S_L, - RJ54N1_Y_OUTPUT_SIZE_S_L, - RJ54N1_XY_OUTPUT_SIZE_S_H, output_w, output_h); - if (!ret) - ret = rj54n1_set_rect(client, RJ54N1_X_OUTPUT_SIZE_P_L, - RJ54N1_Y_OUTPUT_SIZE_P_L, - RJ54N1_XY_OUTPUT_SIZE_P_H, output_w, output_h); - - if (ret < 0) - return ret; - - if (output_w > input_w && output_h > input_h) { - input_w = output_w; - input_h = output_h; - - resize = 1024; - } else { - unsigned int resize_x, resize_y; - resize_x = (input_w * 1024 + output_w / 2) / output_w; - resize_y = (input_h * 1024 + output_h / 2) / output_h; - - /* We want max(resize_x, resize_y), check if it still fits */ - if (resize_x > resize_y && - (output_h * resize_x + 512) / 1024 > RJ54N1_MAX_HEIGHT) - resize = (RJ54N1_MAX_HEIGHT * 1024 + output_h / 2) / - output_h; - else if (resize_y > resize_x && - (output_w * resize_y + 512) / 1024 > RJ54N1_MAX_WIDTH) - resize = (RJ54N1_MAX_WIDTH * 1024 + output_w / 2) / - output_w; - else - resize = max(resize_x, resize_y); - - /* Prohibited value ranges */ - switch (resize) { - case 2040 ... 2047: - resize = 2039; - break; - case 4080 ... 4095: - resize = 4079; - break; - case 8160 ... 8191: - resize = 8159; - break; - case 16320 ... 16384: - resize = 16319; - } - } - - /* Set scaling */ - ret = reg_write(client, RJ54N1_RESIZE_HOLD_L, resize & 0xff); - if (!ret) - ret = reg_write(client, RJ54N1_RESIZE_HOLD_H, resize >> 8); - - if (ret < 0) - return ret; - - /* - * Configure a skipping bitmask. The sensor will select a skipping value - * among set bits automatically. This is very unclear in the datasheet - * too. I was told, in this register one enables all skipping values, - * that are required for a specific resize, and the camera selects - * automatically, which ones to use. But it is unclear how to identify, - * which cropping values are needed. Secondly, why don't we just set all - * bits and let the camera choose? Would it increase processing time and - * reduce the framerate? Using 0xfffc for INC_USE_SEL doesn't seem to - * improve the image quality or stability for larger frames (see comment - * above), but I didn't check the framerate. - */ - skip = min(resize / 1024, 15U); - - inc_sel = 1 << skip; - - if (inc_sel <= 2) - inc_sel = 0xc; - else if (resize & 1023 && skip < 15) - inc_sel |= 1 << (skip + 1); - - ret = reg_write(client, RJ54N1_INC_USE_SEL_L, inc_sel & 0xfc); - if (!ret) - ret = reg_write(client, RJ54N1_INC_USE_SEL_H, inc_sel >> 8); - - if (!rj54n1->auto_wb) { - /* Auto white balance window */ - wb_left = output_w / 16; - wb_right = (3 * output_w / 4 - 3) / 4; - wb_top = output_h / 16; - wb_bottom = (3 * output_h / 4 - 3) / 4; - wb_bit8 = ((wb_left >> 2) & 0x40) | ((wb_top >> 4) & 0x10) | - ((wb_right >> 6) & 4) | ((wb_bottom >> 8) & 1); - - if (!ret) - ret = reg_write(client, RJ54N1_BIT8_WB, wb_bit8); - if (!ret) - ret = reg_write(client, RJ54N1_HCAPS_WB, wb_left); - if (!ret) - ret = reg_write(client, RJ54N1_VCAPS_WB, wb_top); - if (!ret) - ret = reg_write(client, RJ54N1_HCAPE_WB, wb_right); - if (!ret) - ret = reg_write(client, RJ54N1_VCAPE_WB, wb_bottom); - } - - /* Antiflicker */ - peak = 12 * RJ54N1_MAX_WIDTH * (1 << 14) * resize / rj54n1->tgclk_mhz / - 10000; - peak_50 = peak / 6; - peak_60 = peak / 5; - - if (!ret) - ret = reg_write(client, RJ54N1_PEAK_H, - ((peak_50 >> 4) & 0xf0) | (peak_60 >> 8)); - if (!ret) - ret = reg_write(client, RJ54N1_PEAK_50, peak_50); - if (!ret) - ret = reg_write(client, RJ54N1_PEAK_60, peak_60); - if (!ret) - ret = reg_write(client, RJ54N1_PEAK_DIFF, peak / 150); - - /* Start resizing */ - if (!ret) - ret = reg_write(client, RJ54N1_RESIZE_CONTROL, - RESIZE_HOLD_SEL | RESIZE_GO | 1); - - if (ret < 0) - return ret; - - /* Constant taken from manufacturer's example */ - msleep(230); - - ret = reg_write(client, RJ54N1_RESIZE_CONTROL, RESIZE_HOLD_SEL | 1); - if (ret < 0) - return ret; - - *in_w = (output_w * resize + 512) / 1024; - *in_h = (output_h * resize + 512) / 1024; - *out_w = output_w; - *out_h = output_h; - - dev_dbg(&client->dev, "Scaled for %dx%d : %u = %ux%u, skip %u\n", - *in_w, *in_h, resize, output_w, output_h, skip); - - return resize; -} - -static int rj54n1_set_clock(struct i2c_client *client) -{ - struct rj54n1 *rj54n1 = to_rj54n1(client); - int ret; - - /* Enable external clock */ - ret = reg_write(client, RJ54N1_RESET_STANDBY, E_EXCLK | SOFT_STDBY); - /* Leave stand-by. Note: use this when implementing suspend / resume */ - if (!ret) - ret = reg_write(client, RJ54N1_RESET_STANDBY, E_EXCLK); - - if (!ret) - ret = reg_write(client, RJ54N1_PLL_L, PLL_L); - if (!ret) - ret = reg_write(client, RJ54N1_PLL_N, PLL_N); - - /* TGCLK dividers */ - if (!ret) - ret = reg_write(client, RJ54N1_RATIO_TG, - rj54n1->clk_div.ratio_tg); - if (!ret) - ret = reg_write(client, RJ54N1_RATIO_T, - rj54n1->clk_div.ratio_t); - if (!ret) - ret = reg_write(client, RJ54N1_RATIO_R, - rj54n1->clk_div.ratio_r); - - /* Enable TGCLK & RAMP */ - if (!ret) - ret = reg_write(client, RJ54N1_RAMP_TGCLK_EN, 3); - - /* Disable clock output */ - if (!ret) - ret = reg_write(client, RJ54N1_OCLK_DSP, 0); - - /* Set divisors */ - if (!ret) - ret = reg_write(client, RJ54N1_RATIO_OP, - rj54n1->clk_div.ratio_op); - if (!ret) - ret = reg_write(client, RJ54N1_RATIO_O, - rj54n1->clk_div.ratio_o); - - /* Enable OCLK */ - if (!ret) - ret = reg_write(client, RJ54N1_OCLK_SEL_EN, 1); - - /* Use PLL for Timing Generator, write 2 to reserved bits */ - if (!ret) - ret = reg_write(client, RJ54N1_TG_BYPASS, 2); - - /* Take sensor out of reset */ - if (!ret) - ret = reg_write(client, RJ54N1_RESET_STANDBY, - E_EXCLK | SEN_RSTX); - /* Enable PLL */ - if (!ret) - ret = reg_write(client, RJ54N1_PLL_EN, 1); - - /* Wait for PLL to stabilise */ - msleep(10); - - /* Enable clock to frequency divider */ - if (!ret) - ret = reg_write(client, RJ54N1_CLK_RST, 1); - - if (!ret) - ret = reg_read(client, RJ54N1_CLK_RST); - if (ret != 1) { - dev_err(&client->dev, - "Resetting RJ54N1CB0C clock failed: %d!\n", ret); - return -EIO; - } - - /* Start the PLL */ - ret = reg_set(client, RJ54N1_OCLK_DSP, 1, 1); - - /* Enable OCLK */ - if (!ret) - ret = reg_write(client, RJ54N1_OCLK_SEL_EN, 1); - - return ret; -} - -static int rj54n1_reg_init(struct i2c_client *client) -{ - struct rj54n1 *rj54n1 = to_rj54n1(client); - int ret = rj54n1_set_clock(client); - - if (!ret) - ret = reg_write_multiple(client, bank_7, ARRAY_SIZE(bank_7)); - if (!ret) - ret = reg_write_multiple(client, bank_10, ARRAY_SIZE(bank_10)); - - /* Set binning divisors */ - if (!ret) - ret = reg_write(client, RJ54N1_SCALE_1_2_LEV, 3 | (7 << 4)); - if (!ret) - ret = reg_write(client, RJ54N1_SCALE_4_LEV, 0xf); - - /* Switch to fixed resize mode */ - if (!ret) - ret = reg_write(client, RJ54N1_RESIZE_CONTROL, - RESIZE_HOLD_SEL | 1); - - /* Set gain */ - if (!ret) - ret = reg_write(client, RJ54N1_Y_GAIN, 0x84); - - /* - * Mirror the image back: default is upside down and left-to-right... - * Set manual preview / still shot switching - */ - if (!ret) - ret = reg_write(client, RJ54N1_MIRROR_STILL_MODE, 0x27); - - if (!ret) - ret = reg_write_multiple(client, bank_4, ARRAY_SIZE(bank_4)); - - /* Auto exposure area */ - if (!ret) - ret = reg_write(client, RJ54N1_EXPOSURE_CONTROL, 0x80); - /* Check current auto WB config */ - if (!ret) - ret = reg_read(client, RJ54N1_WB_SEL_WEIGHT_I); - if (ret >= 0) { - rj54n1->auto_wb = ret & 0x80; - ret = reg_write_multiple(client, bank_5, ARRAY_SIZE(bank_5)); - } - if (!ret) - ret = reg_write_multiple(client, bank_8, ARRAY_SIZE(bank_8)); - - if (!ret) - ret = reg_write(client, RJ54N1_RESET_STANDBY, - E_EXCLK | DSP_RSTX | SEN_RSTX); - - /* Commit init */ - if (!ret) - ret = rj54n1_commit(client); - - /* Take DSP, TG, sensor out of reset */ - if (!ret) - ret = reg_write(client, RJ54N1_RESET_STANDBY, - E_EXCLK | DSP_RSTX | TG_RSTX | SEN_RSTX); - - /* Start register update? Same register as 0x?FE in many bank_* sets */ - if (!ret) - ret = reg_write(client, RJ54N1_FWFLG, 2); - - /* Constant taken from manufacturer's example */ - msleep(700); - - return ret; -} - -static int rj54n1_set_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *format) -{ - struct v4l2_mbus_framefmt *mf = &format->format; - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct rj54n1 *rj54n1 = to_rj54n1(client); - const struct rj54n1_datafmt *fmt; - int output_w, output_h, max_w, max_h, - input_w = rj54n1->rect.width, input_h = rj54n1->rect.height; - int align = mf->code == MEDIA_BUS_FMT_SBGGR10_1X10 || - mf->code == MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE || - mf->code == MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE || - mf->code == MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE || - mf->code == MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE; - int ret; - - if (format->pad) - return -EINVAL; - - dev_dbg(&client->dev, "%s: code = %d, width = %u, height = %u\n", - __func__, mf->code, mf->width, mf->height); - - fmt = rj54n1_find_datafmt(mf->code, rj54n1_colour_fmts, - ARRAY_SIZE(rj54n1_colour_fmts)); - if (!fmt) { - fmt = rj54n1->fmt; - mf->code = fmt->code; - } - - mf->field = V4L2_FIELD_NONE; - mf->colorspace = fmt->colorspace; - - v4l_bound_align_image(&mf->width, 112, RJ54N1_MAX_WIDTH, align, - &mf->height, 84, RJ54N1_MAX_HEIGHT, align, 0); - - if (format->which == V4L2_SUBDEV_FORMAT_TRY) { - cfg->try_fmt = *mf; - return 0; - } - - /* - * Verify if the sensor has just been powered on. TODO: replace this - * with proper PM, when a suitable API is available. - */ - ret = reg_read(client, RJ54N1_RESET_STANDBY); - if (ret < 0) - return ret; - - if (!(ret & E_EXCLK)) { - ret = rj54n1_reg_init(client); - if (ret < 0) - return ret; - } - - /* RA_SEL_UL is only relevant for raw modes, ignored otherwise. */ - switch (mf->code) { - case MEDIA_BUS_FMT_YUYV8_2X8: - ret = reg_write(client, RJ54N1_OUT_SEL, 0); - if (!ret) - ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8); - break; - case MEDIA_BUS_FMT_YVYU8_2X8: - ret = reg_write(client, RJ54N1_OUT_SEL, 0); - if (!ret) - ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8); - break; - case MEDIA_BUS_FMT_RGB565_2X8_LE: - ret = reg_write(client, RJ54N1_OUT_SEL, 0x11); - if (!ret) - ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8); - break; - case MEDIA_BUS_FMT_RGB565_2X8_BE: - ret = reg_write(client, RJ54N1_OUT_SEL, 0x11); - if (!ret) - ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8); - break; - case MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE: - ret = reg_write(client, RJ54N1_OUT_SEL, 4); - if (!ret) - ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8); - if (!ret) - ret = reg_write(client, RJ54N1_RA_SEL_UL, 0); - break; - case MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE: - ret = reg_write(client, RJ54N1_OUT_SEL, 4); - if (!ret) - ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8); - if (!ret) - ret = reg_write(client, RJ54N1_RA_SEL_UL, 8); - break; - case MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE: - ret = reg_write(client, RJ54N1_OUT_SEL, 4); - if (!ret) - ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8); - if (!ret) - ret = reg_write(client, RJ54N1_RA_SEL_UL, 0); - break; - case MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE: - ret = reg_write(client, RJ54N1_OUT_SEL, 4); - if (!ret) - ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8); - if (!ret) - ret = reg_write(client, RJ54N1_RA_SEL_UL, 8); - break; - case MEDIA_BUS_FMT_SBGGR10_1X10: - ret = reg_write(client, RJ54N1_OUT_SEL, 5); - break; - default: - ret = -EINVAL; - } - - /* Special case: a raw mode with 10 bits of data per clock tick */ - if (!ret) - ret = reg_set(client, RJ54N1_OCLK_SEL_EN, - (mf->code == MEDIA_BUS_FMT_SBGGR10_1X10) << 1, 2); - - if (ret < 0) - return ret; - - /* Supported scales 1:1 >= scale > 1:16 */ - max_w = mf->width * (16 * 1024 - 1) / 1024; - if (input_w > max_w) - input_w = max_w; - max_h = mf->height * (16 * 1024 - 1) / 1024; - if (input_h > max_h) - input_h = max_h; - - output_w = mf->width; - output_h = mf->height; - - ret = rj54n1_sensor_scale(sd, &input_w, &input_h, &output_w, &output_h); - if (ret < 0) - return ret; - - fmt = rj54n1_find_datafmt(mf->code, rj54n1_colour_fmts, - ARRAY_SIZE(rj54n1_colour_fmts)); - - rj54n1->fmt = fmt; - rj54n1->resize = ret; - rj54n1->rect.width = input_w; - rj54n1->rect.height = input_h; - rj54n1->width = output_w; - rj54n1->height = output_h; - - mf->width = output_w; - mf->height = output_h; - mf->field = V4L2_FIELD_NONE; - mf->colorspace = fmt->colorspace; - - return 0; -} - -#ifdef CONFIG_VIDEO_ADV_DEBUG -static int rj54n1_g_register(struct v4l2_subdev *sd, - struct v4l2_dbg_register *reg) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - - if (reg->reg < 0x400 || reg->reg > 0x1fff) - /* Registers > 0x0800 are only available from Sharp support */ - return -EINVAL; - - reg->size = 1; - reg->val = reg_read(client, reg->reg); - - if (reg->val > 0xff) - return -EIO; - - return 0; -} - -static int rj54n1_s_register(struct v4l2_subdev *sd, - const struct v4l2_dbg_register *reg) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - - if (reg->reg < 0x400 || reg->reg > 0x1fff) - /* Registers >= 0x0800 are only available from Sharp support */ - return -EINVAL; - - if (reg_write(client, reg->reg, reg->val) < 0) - return -EIO; - - return 0; -} -#endif - -static int rj54n1_s_power(struct v4l2_subdev *sd, int on) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - struct rj54n1 *rj54n1 = to_rj54n1(client); - - return soc_camera_set_power(&client->dev, ssdd, rj54n1->clk, on); -} - -static int rj54n1_s_ctrl(struct v4l2_ctrl *ctrl) -{ - struct rj54n1 *rj54n1 = container_of(ctrl->handler, struct rj54n1, hdl); - struct v4l2_subdev *sd = &rj54n1->subdev; - struct i2c_client *client = v4l2_get_subdevdata(sd); - int data; - - switch (ctrl->id) { - case V4L2_CID_VFLIP: - if (ctrl->val) - data = reg_set(client, RJ54N1_MIRROR_STILL_MODE, 0, 1); - else - data = reg_set(client, RJ54N1_MIRROR_STILL_MODE, 1, 1); - if (data < 0) - return -EIO; - return 0; - case V4L2_CID_HFLIP: - if (ctrl->val) - data = reg_set(client, RJ54N1_MIRROR_STILL_MODE, 0, 2); - else - data = reg_set(client, RJ54N1_MIRROR_STILL_MODE, 2, 2); - if (data < 0) - return -EIO; - return 0; - case V4L2_CID_GAIN: - if (reg_write(client, RJ54N1_Y_GAIN, ctrl->val * 2) < 0) - return -EIO; - return 0; - case V4L2_CID_AUTO_WHITE_BALANCE: - /* Auto WB area - whole image */ - if (reg_set(client, RJ54N1_WB_SEL_WEIGHT_I, ctrl->val << 7, - 0x80) < 0) - return -EIO; - rj54n1->auto_wb = ctrl->val; - return 0; - } - - return -EINVAL; -} - -static const struct v4l2_ctrl_ops rj54n1_ctrl_ops = { - .s_ctrl = rj54n1_s_ctrl, -}; - -static const struct v4l2_subdev_core_ops rj54n1_subdev_core_ops = { -#ifdef CONFIG_VIDEO_ADV_DEBUG - .g_register = rj54n1_g_register, - .s_register = rj54n1_s_register, -#endif - .s_power = rj54n1_s_power, -}; - -static int rj54n1_g_mbus_config(struct v4l2_subdev *sd, - struct v4l2_mbus_config *cfg) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - - cfg->flags = - V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_PCLK_SAMPLE_FALLING | - V4L2_MBUS_MASTER | V4L2_MBUS_DATA_ACTIVE_HIGH | - V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_HIGH; - cfg->type = V4L2_MBUS_PARALLEL; - cfg->flags = soc_camera_apply_board_flags(ssdd, cfg); - - return 0; -} - -static int rj54n1_s_mbus_config(struct v4l2_subdev *sd, - const struct v4l2_mbus_config *cfg) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - - /* Figures 2.5-1 to 2.5-3 - default falling pixclk edge */ - if (soc_camera_apply_board_flags(ssdd, cfg) & - V4L2_MBUS_PCLK_SAMPLE_RISING) - return reg_write(client, RJ54N1_OUT_SIGPO, 1 << 4); - else - return reg_write(client, RJ54N1_OUT_SIGPO, 0); -} - -static const struct v4l2_subdev_video_ops rj54n1_subdev_video_ops = { - .s_stream = rj54n1_s_stream, - .g_mbus_config = rj54n1_g_mbus_config, - .s_mbus_config = rj54n1_s_mbus_config, -}; - -static const struct v4l2_subdev_pad_ops rj54n1_subdev_pad_ops = { - .enum_mbus_code = rj54n1_enum_mbus_code, - .get_selection = rj54n1_get_selection, - .set_selection = rj54n1_set_selection, - .get_fmt = rj54n1_get_fmt, - .set_fmt = rj54n1_set_fmt, -}; - -static const struct v4l2_subdev_ops rj54n1_subdev_ops = { - .core = &rj54n1_subdev_core_ops, - .video = &rj54n1_subdev_video_ops, - .pad = &rj54n1_subdev_pad_ops, -}; - -/* - * Interface active, can use i2c. If it fails, it can indeed mean, that - * this wasn't our capture interface, so, we wait for the right one - */ -static int rj54n1_video_probe(struct i2c_client *client, - struct rj54n1_pdata *priv) -{ - struct rj54n1 *rj54n1 = to_rj54n1(client); - int data1, data2; - int ret; - - ret = rj54n1_s_power(&rj54n1->subdev, 1); - if (ret < 0) - return ret; - - /* Read out the chip version register */ - data1 = reg_read(client, RJ54N1_DEV_CODE); - data2 = reg_read(client, RJ54N1_DEV_CODE2); - - if (data1 != 0x51 || data2 != 0x10) { - ret = -ENODEV; - dev_info(&client->dev, "No RJ54N1CB0C found, read 0x%x:0x%x\n", - data1, data2); - goto done; - } - - /* Configure IOCTL polarity from the platform data: 0 or 1 << 7. */ - ret = reg_write(client, RJ54N1_IOC, priv->ioctl_high << 7); - if (ret < 0) - goto done; - - dev_info(&client->dev, "Detected a RJ54N1CB0C chip ID 0x%x:0x%x\n", - data1, data2); - - ret = v4l2_ctrl_handler_setup(&rj54n1->hdl); - -done: - rj54n1_s_power(&rj54n1->subdev, 0); - return ret; -} - -static int rj54n1_probe(struct i2c_client *client, - const struct i2c_device_id *did) -{ - struct rj54n1 *rj54n1; - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); - struct rj54n1_pdata *rj54n1_priv; - int ret; - - if (!ssdd || !ssdd->drv_priv) { - dev_err(&client->dev, "RJ54N1CB0C: missing platform data!\n"); - return -EINVAL; - } - - rj54n1_priv = ssdd->drv_priv; - - if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { - dev_warn(&adapter->dev, - "I2C-Adapter doesn't support I2C_FUNC_SMBUS_BYTE\n"); - return -EIO; - } - - rj54n1 = devm_kzalloc(&client->dev, sizeof(struct rj54n1), GFP_KERNEL); - if (!rj54n1) - return -ENOMEM; - - v4l2_i2c_subdev_init(&rj54n1->subdev, client, &rj54n1_subdev_ops); - v4l2_ctrl_handler_init(&rj54n1->hdl, 4); - v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops, - V4L2_CID_VFLIP, 0, 1, 1, 0); - v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops, - V4L2_CID_HFLIP, 0, 1, 1, 0); - v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops, - V4L2_CID_GAIN, 0, 127, 1, 66); - v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops, - V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1); - rj54n1->subdev.ctrl_handler = &rj54n1->hdl; - if (rj54n1->hdl.error) - return rj54n1->hdl.error; - - rj54n1->clk_div = clk_div; - rj54n1->rect.left = RJ54N1_COLUMN_SKIP; - rj54n1->rect.top = RJ54N1_ROW_SKIP; - rj54n1->rect.width = RJ54N1_MAX_WIDTH; - rj54n1->rect.height = RJ54N1_MAX_HEIGHT; - rj54n1->width = RJ54N1_MAX_WIDTH; - rj54n1->height = RJ54N1_MAX_HEIGHT; - rj54n1->fmt = &rj54n1_colour_fmts[0]; - rj54n1->resize = 1024; - rj54n1->tgclk_mhz = (rj54n1_priv->mclk_freq / PLL_L * PLL_N) / - (clk_div.ratio_tg + 1) / (clk_div.ratio_t + 1); - - rj54n1->clk = v4l2_clk_get(&client->dev, "mclk"); - if (IS_ERR(rj54n1->clk)) { - ret = PTR_ERR(rj54n1->clk); - goto eclkget; - } - - ret = rj54n1_video_probe(client, rj54n1_priv); - if (ret < 0) { - v4l2_clk_put(rj54n1->clk); -eclkget: - v4l2_ctrl_handler_free(&rj54n1->hdl); - } - - return ret; -} - -static int rj54n1_remove(struct i2c_client *client) -{ - struct rj54n1 *rj54n1 = to_rj54n1(client); - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - - v4l2_clk_put(rj54n1->clk); - v4l2_device_unregister_subdev(&rj54n1->subdev); - if (ssdd->free_bus) - ssdd->free_bus(ssdd); - v4l2_ctrl_handler_free(&rj54n1->hdl); - - return 0; -} - -static const struct i2c_device_id rj54n1_id[] = { - { "rj54n1cb0c", 0 }, - { } -}; -MODULE_DEVICE_TABLE(i2c, rj54n1_id); - -static struct i2c_driver rj54n1_i2c_driver = { - .driver = { - .name = "rj54n1cb0c", - }, - .probe = rj54n1_probe, - .remove = rj54n1_remove, - .id_table = rj54n1_id, -}; - -module_i2c_driver(rj54n1_i2c_driver); - -MODULE_DESCRIPTION("Sharp RJ54N1CB0C Camera driver"); -MODULE_AUTHOR("Guennadi Liakhovetski "); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/i2c/soc_camera/soc_tw9910.c b/drivers/media/i2c/soc_camera/soc_tw9910.c deleted file mode 100644 index bdb5e0a431e9..000000000000 --- a/drivers/media/i2c/soc_camera/soc_tw9910.c +++ /dev/null @@ -1,999 +0,0 @@ -/* - * tw9910 Video Driver - * - * Copyright (C) 2008 Renesas Solutions Corp. - * Kuninori Morimoto - * - * Based on ov772x driver, - * - * Copyright (C) 2008 Kuninori Morimoto - * Copyright 2006-7 Jonathan Corbet - * Copyright (C) 2008 Magnus Damm - * Copyright (C) 2008, Guennadi Liakhovetski - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#define GET_ID(val) ((val & 0xF8) >> 3) -#define GET_REV(val) (val & 0x07) - -/* - * register offset - */ -#define ID 0x00 /* Product ID Code Register */ -#define STATUS1 0x01 /* Chip Status Register I */ -#define INFORM 0x02 /* Input Format */ -#define OPFORM 0x03 /* Output Format Control Register */ -#define DLYCTR 0x04 /* Hysteresis and HSYNC Delay Control */ -#define OUTCTR1 0x05 /* Output Control I */ -#define ACNTL1 0x06 /* Analog Control Register 1 */ -#define CROP_HI 0x07 /* Cropping Register, High */ -#define VDELAY_LO 0x08 /* Vertical Delay Register, Low */ -#define VACTIVE_LO 0x09 /* Vertical Active Register, Low */ -#define HDELAY_LO 0x0A /* Horizontal Delay Register, Low */ -#define HACTIVE_LO 0x0B /* Horizontal Active Register, Low */ -#define CNTRL1 0x0C /* Control Register I */ -#define VSCALE_LO 0x0D /* Vertical Scaling Register, Low */ -#define SCALE_HI 0x0E /* Scaling Register, High */ -#define HSCALE_LO 0x0F /* Horizontal Scaling Register, Low */ -#define BRIGHT 0x10 /* BRIGHTNESS Control Register */ -#define CONTRAST 0x11 /* CONTRAST Control Register */ -#define SHARPNESS 0x12 /* SHARPNESS Control Register I */ -#define SAT_U 0x13 /* Chroma (U) Gain Register */ -#define SAT_V 0x14 /* Chroma (V) Gain Register */ -#define HUE 0x15 /* Hue Control Register */ -#define CORING1 0x17 -#define CORING2 0x18 /* Coring and IF compensation */ -#define VBICNTL 0x19 /* VBI Control Register */ -#define ACNTL2 0x1A /* Analog Control 2 */ -#define OUTCTR2 0x1B /* Output Control 2 */ -#define SDT 0x1C /* Standard Selection */ -#define SDTR 0x1D /* Standard Recognition */ -#define TEST 0x1F /* Test Control Register */ -#define CLMPG 0x20 /* Clamping Gain */ -#define IAGC 0x21 /* Individual AGC Gain */ -#define AGCGAIN 0x22 /* AGC Gain */ -#define PEAKWT 0x23 /* White Peak Threshold */ -#define CLMPL 0x24 /* Clamp level */ -#define SYNCT 0x25 /* Sync Amplitude */ -#define MISSCNT 0x26 /* Sync Miss Count Register */ -#define PCLAMP 0x27 /* Clamp Position Register */ -#define VCNTL1 0x28 /* Vertical Control I */ -#define VCNTL2 0x29 /* Vertical Control II */ -#define CKILL 0x2A /* Color Killer Level Control */ -#define COMB 0x2B /* Comb Filter Control */ -#define LDLY 0x2C /* Luma Delay and H Filter Control */ -#define MISC1 0x2D /* Miscellaneous Control I */ -#define LOOP 0x2E /* LOOP Control Register */ -#define MISC2 0x2F /* Miscellaneous Control II */ -#define MVSN 0x30 /* Macrovision Detection */ -#define STATUS2 0x31 /* Chip STATUS II */ -#define HFREF 0x32 /* H monitor */ -#define CLMD 0x33 /* CLAMP MODE */ -#define IDCNTL 0x34 /* ID Detection Control */ -#define CLCNTL1 0x35 /* Clamp Control I */ -#define ANAPLLCTL 0x4C -#define VBIMIN 0x4D -#define HSLOWCTL 0x4E -#define WSS3 0x4F -#define FILLDATA 0x50 -#define SDID 0x51 -#define DID 0x52 -#define WSS1 0x53 -#define WSS2 0x54 -#define VVBI 0x55 -#define LCTL6 0x56 -#define LCTL7 0x57 -#define LCTL8 0x58 -#define LCTL9 0x59 -#define LCTL10 0x5A -#define LCTL11 0x5B -#define LCTL12 0x5C -#define LCTL13 0x5D -#define LCTL14 0x5E -#define LCTL15 0x5F -#define LCTL16 0x60 -#define LCTL17 0x61 -#define LCTL18 0x62 -#define LCTL19 0x63 -#define LCTL20 0x64 -#define LCTL21 0x65 -#define LCTL22 0x66 -#define LCTL23 0x67 -#define LCTL24 0x68 -#define LCTL25 0x69 -#define LCTL26 0x6A -#define HSBEGIN 0x6B -#define HSEND 0x6C -#define OVSDLY 0x6D -#define OVSEND 0x6E -#define VBIDELAY 0x6F - -/* - * register detail - */ - -/* INFORM */ -#define FC27_ON 0x40 /* 1 : Input crystal clock frequency is 27MHz */ -#define FC27_FF 0x00 /* 0 : Square pixel mode. */ - /* Must use 24.54MHz for 60Hz field rate */ - /* source or 29.5MHz for 50Hz field rate */ -#define IFSEL_S 0x10 /* 01 : S-video decoding */ -#define IFSEL_C 0x00 /* 00 : Composite video decoding */ - /* Y input video selection */ -#define YSEL_M0 0x00 /* 00 : Mux0 selected */ -#define YSEL_M1 0x04 /* 01 : Mux1 selected */ -#define YSEL_M2 0x08 /* 10 : Mux2 selected */ -#define YSEL_M3 0x10 /* 11 : Mux3 selected */ - -/* OPFORM */ -#define MODE 0x80 /* 0 : CCIR601 compatible YCrCb 4:2:2 format */ - /* 1 : ITU-R-656 compatible data sequence format */ -#define LEN 0x40 /* 0 : 8-bit YCrCb 4:2:2 output format */ - /* 1 : 16-bit YCrCb 4:2:2 output format.*/ -#define LLCMODE 0x20 /* 1 : LLC output mode. */ - /* 0 : free-run output mode */ -#define AINC 0x10 /* Serial interface auto-indexing control */ - /* 0 : auto-increment */ - /* 1 : non-auto */ -#define VSCTL 0x08 /* 1 : Vertical out ctrl by DVALID */ - /* 0 : Vertical out ctrl by HACTIVE and DVALID */ -#define OEN_TRI_SEL_MASK 0x07 -#define OEN_TRI_SEL_ALL_ON 0x00 /* Enable output for Rev0/Rev1 */ -#define OEN_TRI_SEL_ALL_OFF_r0 0x06 /* All tri-stated for Rev0 */ -#define OEN_TRI_SEL_ALL_OFF_r1 0x07 /* All tri-stated for Rev1 */ - -/* OUTCTR1 */ -#define VSP_LO 0x00 /* 0 : VS pin output polarity is active low */ -#define VSP_HI 0x80 /* 1 : VS pin output polarity is active high. */ - /* VS pin output control */ -#define VSSL_VSYNC 0x00 /* 0 : VSYNC */ -#define VSSL_VACT 0x10 /* 1 : VACT */ -#define VSSL_FIELD 0x20 /* 2 : FIELD */ -#define VSSL_VVALID 0x30 /* 3 : VVALID */ -#define VSSL_ZERO 0x70 /* 7 : 0 */ -#define HSP_LOW 0x00 /* 0 : HS pin output polarity is active low */ -#define HSP_HI 0x08 /* 1 : HS pin output polarity is active high.*/ - /* HS pin output control */ -#define HSSL_HACT 0x00 /* 0 : HACT */ -#define HSSL_HSYNC 0x01 /* 1 : HSYNC */ -#define HSSL_DVALID 0x02 /* 2 : DVALID */ -#define HSSL_HLOCK 0x03 /* 3 : HLOCK */ -#define HSSL_ASYNCW 0x04 /* 4 : ASYNCW */ -#define HSSL_ZERO 0x07 /* 7 : 0 */ - -/* ACNTL1 */ -#define SRESET 0x80 /* resets the device to its default state - * but all register content remain unchanged. - * This bit is self-resetting. - */ -#define ACNTL1_PDN_MASK 0x0e -#define CLK_PDN 0x08 /* system clock power down */ -#define Y_PDN 0x04 /* Luma ADC power down */ -#define C_PDN 0x02 /* Chroma ADC power down */ - -/* ACNTL2 */ -#define ACNTL2_PDN_MASK 0x40 -#define PLL_PDN 0x40 /* PLL power down */ - -/* VBICNTL */ - -/* RTSEL : control the real time signal output from the MPOUT pin */ -#define RTSEL_MASK 0x07 -#define RTSEL_VLOSS 0x00 /* 0000 = Video loss */ -#define RTSEL_HLOCK 0x01 /* 0001 = H-lock */ -#define RTSEL_SLOCK 0x02 /* 0010 = S-lock */ -#define RTSEL_VLOCK 0x03 /* 0011 = V-lock */ -#define RTSEL_MONO 0x04 /* 0100 = MONO */ -#define RTSEL_DET50 0x05 /* 0101 = DET50 */ -#define RTSEL_FIELD 0x06 /* 0110 = FIELD */ -#define RTSEL_RTCO 0x07 /* 0111 = RTCO ( Real Time Control ) */ - -/* HSYNC start and end are constant for now */ -#define HSYNC_START 0x0260 -#define HSYNC_END 0x0300 - -/* - * structure - */ - -struct regval_list { - unsigned char reg_num; - unsigned char value; -}; - -struct tw9910_scale_ctrl { - char *name; - unsigned short width; - unsigned short height; - u16 hscale; - u16 vscale; -}; - -struct tw9910_priv { - struct v4l2_subdev subdev; - struct v4l2_clk *clk; - struct tw9910_video_info *info; - const struct tw9910_scale_ctrl *scale; - v4l2_std_id norm; - u32 revision; -}; - -static const struct tw9910_scale_ctrl tw9910_ntsc_scales[] = { - { - .name = "NTSC SQ", - .width = 640, - .height = 480, - .hscale = 0x0100, - .vscale = 0x0100, - }, - { - .name = "NTSC CCIR601", - .width = 720, - .height = 480, - .hscale = 0x0100, - .vscale = 0x0100, - }, - { - .name = "NTSC SQ (CIF)", - .width = 320, - .height = 240, - .hscale = 0x0200, - .vscale = 0x0200, - }, - { - .name = "NTSC CCIR601 (CIF)", - .width = 360, - .height = 240, - .hscale = 0x0200, - .vscale = 0x0200, - }, - { - .name = "NTSC SQ (QCIF)", - .width = 160, - .height = 120, - .hscale = 0x0400, - .vscale = 0x0400, - }, - { - .name = "NTSC CCIR601 (QCIF)", - .width = 180, - .height = 120, - .hscale = 0x0400, - .vscale = 0x0400, - }, -}; - -static const struct tw9910_scale_ctrl tw9910_pal_scales[] = { - { - .name = "PAL SQ", - .width = 768, - .height = 576, - .hscale = 0x0100, - .vscale = 0x0100, - }, - { - .name = "PAL CCIR601", - .width = 720, - .height = 576, - .hscale = 0x0100, - .vscale = 0x0100, - }, - { - .name = "PAL SQ (CIF)", - .width = 384, - .height = 288, - .hscale = 0x0200, - .vscale = 0x0200, - }, - { - .name = "PAL CCIR601 (CIF)", - .width = 360, - .height = 288, - .hscale = 0x0200, - .vscale = 0x0200, - }, - { - .name = "PAL SQ (QCIF)", - .width = 192, - .height = 144, - .hscale = 0x0400, - .vscale = 0x0400, - }, - { - .name = "PAL CCIR601 (QCIF)", - .width = 180, - .height = 144, - .hscale = 0x0400, - .vscale = 0x0400, - }, -}; - -/* - * general function - */ -static struct tw9910_priv *to_tw9910(const struct i2c_client *client) -{ - return container_of(i2c_get_clientdata(client), struct tw9910_priv, - subdev); -} - -static int tw9910_mask_set(struct i2c_client *client, u8 command, - u8 mask, u8 set) -{ - s32 val = i2c_smbus_read_byte_data(client, command); - if (val < 0) - return val; - - val &= ~mask; - val |= set & mask; - - return i2c_smbus_write_byte_data(client, command, val); -} - -static int tw9910_set_scale(struct i2c_client *client, - const struct tw9910_scale_ctrl *scale) -{ - int ret; - - ret = i2c_smbus_write_byte_data(client, SCALE_HI, - (scale->vscale & 0x0F00) >> 4 | - (scale->hscale & 0x0F00) >> 8); - if (ret < 0) - return ret; - - ret = i2c_smbus_write_byte_data(client, HSCALE_LO, - scale->hscale & 0x00FF); - if (ret < 0) - return ret; - - ret = i2c_smbus_write_byte_data(client, VSCALE_LO, - scale->vscale & 0x00FF); - - return ret; -} - -static int tw9910_set_hsync(struct i2c_client *client) -{ - struct tw9910_priv *priv = to_tw9910(client); - int ret; - - /* bit 10 - 3 */ - ret = i2c_smbus_write_byte_data(client, HSBEGIN, - (HSYNC_START & 0x07F8) >> 3); - if (ret < 0) - return ret; - - /* bit 10 - 3 */ - ret = i2c_smbus_write_byte_data(client, HSEND, - (HSYNC_END & 0x07F8) >> 3); - if (ret < 0) - return ret; - - /* So far only revisions 0 and 1 have been seen */ - /* bit 2 - 0 */ - if (1 == priv->revision) - ret = tw9910_mask_set(client, HSLOWCTL, 0x77, - (HSYNC_START & 0x0007) << 4 | - (HSYNC_END & 0x0007)); - - return ret; -} - -static void tw9910_reset(struct i2c_client *client) -{ - tw9910_mask_set(client, ACNTL1, SRESET, SRESET); - msleep(1); -} - -static int tw9910_power(struct i2c_client *client, int enable) -{ - int ret; - u8 acntl1; - u8 acntl2; - - if (enable) { - acntl1 = 0; - acntl2 = 0; - } else { - acntl1 = CLK_PDN | Y_PDN | C_PDN; - acntl2 = PLL_PDN; - } - - ret = tw9910_mask_set(client, ACNTL1, ACNTL1_PDN_MASK, acntl1); - if (ret < 0) - return ret; - - return tw9910_mask_set(client, ACNTL2, ACNTL2_PDN_MASK, acntl2); -} - -static const struct tw9910_scale_ctrl *tw9910_select_norm(v4l2_std_id norm, - u32 width, u32 height) -{ - const struct tw9910_scale_ctrl *scale; - const struct tw9910_scale_ctrl *ret = NULL; - __u32 diff = 0xffffffff, tmp; - int size, i; - - if (norm & V4L2_STD_NTSC) { - scale = tw9910_ntsc_scales; - size = ARRAY_SIZE(tw9910_ntsc_scales); - } else if (norm & V4L2_STD_PAL) { - scale = tw9910_pal_scales; - size = ARRAY_SIZE(tw9910_pal_scales); - } else { - return NULL; - } - - for (i = 0; i < size; i++) { - tmp = abs(width - scale[i].width) + - abs(height - scale[i].height); - if (tmp < diff) { - diff = tmp; - ret = scale + i; - } - } - - return ret; -} - -/* - * subdevice operations - */ -static int tw9910_s_stream(struct v4l2_subdev *sd, int enable) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct tw9910_priv *priv = to_tw9910(client); - u8 val; - int ret; - - if (!enable) { - switch (priv->revision) { - case 0: - val = OEN_TRI_SEL_ALL_OFF_r0; - break; - case 1: - val = OEN_TRI_SEL_ALL_OFF_r1; - break; - default: - dev_err(&client->dev, "un-supported revision\n"); - return -EINVAL; - } - } else { - val = OEN_TRI_SEL_ALL_ON; - - if (!priv->scale) { - dev_err(&client->dev, "norm select error\n"); - return -EPERM; - } - - dev_dbg(&client->dev, "%s %dx%d\n", - priv->scale->name, - priv->scale->width, - priv->scale->height); - } - - ret = tw9910_mask_set(client, OPFORM, OEN_TRI_SEL_MASK, val); - if (ret < 0) - return ret; - - return tw9910_power(client, enable); -} - -static int tw9910_g_std(struct v4l2_subdev *sd, v4l2_std_id *norm) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct tw9910_priv *priv = to_tw9910(client); - - *norm = priv->norm; - - return 0; -} - -static int tw9910_s_std(struct v4l2_subdev *sd, v4l2_std_id norm) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct tw9910_priv *priv = to_tw9910(client); - const unsigned hact = 720; - const unsigned hdelay = 15; - unsigned vact; - unsigned vdelay; - int ret; - - if (!(norm & (V4L2_STD_NTSC | V4L2_STD_PAL))) - return -EINVAL; - - priv->norm = norm; - if (norm & V4L2_STD_525_60) { - vact = 240; - vdelay = 18; - ret = tw9910_mask_set(client, VVBI, 0x10, 0x10); - } else { - vact = 288; - vdelay = 24; - ret = tw9910_mask_set(client, VVBI, 0x10, 0x00); - } - if (!ret) - ret = i2c_smbus_write_byte_data(client, CROP_HI, - ((vdelay >> 2) & 0xc0) | - ((vact >> 4) & 0x30) | - ((hdelay >> 6) & 0x0c) | - ((hact >> 8) & 0x03)); - if (!ret) - ret = i2c_smbus_write_byte_data(client, VDELAY_LO, - vdelay & 0xff); - if (!ret) - ret = i2c_smbus_write_byte_data(client, VACTIVE_LO, - vact & 0xff); - - return ret; -} - -#ifdef CONFIG_VIDEO_ADV_DEBUG -static int tw9910_g_register(struct v4l2_subdev *sd, - struct v4l2_dbg_register *reg) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - int ret; - - if (reg->reg > 0xff) - return -EINVAL; - - reg->size = 1; - ret = i2c_smbus_read_byte_data(client, reg->reg); - if (ret < 0) - return ret; - - /* - * ret = int - * reg->val = __u64 - */ - reg->val = (__u64)ret; - - return 0; -} - -static int tw9910_s_register(struct v4l2_subdev *sd, - const struct v4l2_dbg_register *reg) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - - if (reg->reg > 0xff || - reg->val > 0xff) - return -EINVAL; - - return i2c_smbus_write_byte_data(client, reg->reg, reg->val); -} -#endif - -static int tw9910_s_power(struct v4l2_subdev *sd, int on) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - struct tw9910_priv *priv = to_tw9910(client); - - return soc_camera_set_power(&client->dev, ssdd, priv->clk, on); -} - -static int tw9910_set_frame(struct v4l2_subdev *sd, u32 *width, u32 *height) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct tw9910_priv *priv = to_tw9910(client); - int ret = -EINVAL; - u8 val; - - /* - * select suitable norm - */ - priv->scale = tw9910_select_norm(priv->norm, *width, *height); - if (!priv->scale) - goto tw9910_set_fmt_error; - - /* - * reset hardware - */ - tw9910_reset(client); - - /* - * set bus width - */ - val = 0x00; - if (SOCAM_DATAWIDTH_16 == priv->info->buswidth) - val = LEN; - - ret = tw9910_mask_set(client, OPFORM, LEN, val); - if (ret < 0) - goto tw9910_set_fmt_error; - - /* - * select MPOUT behavior - */ - switch (priv->info->mpout) { - case TW9910_MPO_VLOSS: - val = RTSEL_VLOSS; break; - case TW9910_MPO_HLOCK: - val = RTSEL_HLOCK; break; - case TW9910_MPO_SLOCK: - val = RTSEL_SLOCK; break; - case TW9910_MPO_VLOCK: - val = RTSEL_VLOCK; break; - case TW9910_MPO_MONO: - val = RTSEL_MONO; break; - case TW9910_MPO_DET50: - val = RTSEL_DET50; break; - case TW9910_MPO_FIELD: - val = RTSEL_FIELD; break; - case TW9910_MPO_RTCO: - val = RTSEL_RTCO; break; - default: - val = 0; - } - - ret = tw9910_mask_set(client, VBICNTL, RTSEL_MASK, val); - if (ret < 0) - goto tw9910_set_fmt_error; - - /* - * set scale - */ - ret = tw9910_set_scale(client, priv->scale); - if (ret < 0) - goto tw9910_set_fmt_error; - - /* - * set hsync - */ - ret = tw9910_set_hsync(client); - if (ret < 0) - goto tw9910_set_fmt_error; - - *width = priv->scale->width; - *height = priv->scale->height; - - return ret; - -tw9910_set_fmt_error: - - tw9910_reset(client); - priv->scale = NULL; - - return ret; -} - -static int tw9910_get_selection(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_selection *sel) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct tw9910_priv *priv = to_tw9910(client); - - if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE) - return -EINVAL; - /* Only CROP, CROP_DEFAULT and CROP_BOUNDS are supported */ - if (sel->target > V4L2_SEL_TGT_CROP_BOUNDS) - return -EINVAL; - - sel->r.left = 0; - sel->r.top = 0; - if (priv->norm & V4L2_STD_NTSC) { - sel->r.width = 640; - sel->r.height = 480; - } else { - sel->r.width = 768; - sel->r.height = 576; - } - return 0; -} - -static int tw9910_get_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *format) -{ - struct v4l2_mbus_framefmt *mf = &format->format; - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct tw9910_priv *priv = to_tw9910(client); - - if (format->pad) - return -EINVAL; - - if (!priv->scale) { - priv->scale = tw9910_select_norm(priv->norm, 640, 480); - if (!priv->scale) - return -EINVAL; - } - - mf->width = priv->scale->width; - mf->height = priv->scale->height; - mf->code = MEDIA_BUS_FMT_UYVY8_2X8; - mf->colorspace = V4L2_COLORSPACE_SMPTE170M; - mf->field = V4L2_FIELD_INTERLACED_BT; - - return 0; -} - -static int tw9910_s_fmt(struct v4l2_subdev *sd, - struct v4l2_mbus_framefmt *mf) -{ - u32 width = mf->width, height = mf->height; - int ret; - - WARN_ON(mf->field != V4L2_FIELD_ANY && - mf->field != V4L2_FIELD_INTERLACED_BT); - - /* - * check color format - */ - if (mf->code != MEDIA_BUS_FMT_UYVY8_2X8) - return -EINVAL; - - mf->colorspace = V4L2_COLORSPACE_SMPTE170M; - - ret = tw9910_set_frame(sd, &width, &height); - if (!ret) { - mf->width = width; - mf->height = height; - } - return ret; -} - -static int tw9910_set_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *format) -{ - struct v4l2_mbus_framefmt *mf = &format->format; - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct tw9910_priv *priv = to_tw9910(client); - const struct tw9910_scale_ctrl *scale; - - if (format->pad) - return -EINVAL; - - if (V4L2_FIELD_ANY == mf->field) { - mf->field = V4L2_FIELD_INTERLACED_BT; - } else if (V4L2_FIELD_INTERLACED_BT != mf->field) { - dev_err(&client->dev, "Field type %d invalid.\n", mf->field); - return -EINVAL; - } - - mf->code = MEDIA_BUS_FMT_UYVY8_2X8; - mf->colorspace = V4L2_COLORSPACE_SMPTE170M; - - /* - * select suitable norm - */ - scale = tw9910_select_norm(priv->norm, mf->width, mf->height); - if (!scale) - return -EINVAL; - - mf->width = scale->width; - mf->height = scale->height; - - if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) - return tw9910_s_fmt(sd, mf); - cfg->try_fmt = *mf; - return 0; -} - -static int tw9910_video_probe(struct i2c_client *client) -{ - struct tw9910_priv *priv = to_tw9910(client); - s32 id; - int ret; - - /* - * tw9910 only use 8 or 16 bit bus width - */ - if (SOCAM_DATAWIDTH_16 != priv->info->buswidth && - SOCAM_DATAWIDTH_8 != priv->info->buswidth) { - dev_err(&client->dev, "bus width error\n"); - return -ENODEV; - } - - ret = tw9910_s_power(&priv->subdev, 1); - if (ret < 0) - return ret; - - /* - * check and show Product ID - * So far only revisions 0 and 1 have been seen - */ - id = i2c_smbus_read_byte_data(client, ID); - priv->revision = GET_REV(id); - id = GET_ID(id); - - if (0x0B != id || - 0x01 < priv->revision) { - dev_err(&client->dev, - "Product ID error %x:%x\n", - id, priv->revision); - ret = -ENODEV; - goto done; - } - - dev_info(&client->dev, - "tw9910 Product ID %0x:%0x\n", id, priv->revision); - - priv->norm = V4L2_STD_NTSC; - priv->scale = &tw9910_ntsc_scales[0]; - -done: - tw9910_s_power(&priv->subdev, 0); - return ret; -} - -static const struct v4l2_subdev_core_ops tw9910_subdev_core_ops = { -#ifdef CONFIG_VIDEO_ADV_DEBUG - .g_register = tw9910_g_register, - .s_register = tw9910_s_register, -#endif - .s_power = tw9910_s_power, -}; - -static int tw9910_enum_mbus_code(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_mbus_code_enum *code) -{ - if (code->pad || code->index) - return -EINVAL; - - code->code = MEDIA_BUS_FMT_UYVY8_2X8; - return 0; -} - -static int tw9910_g_mbus_config(struct v4l2_subdev *sd, - struct v4l2_mbus_config *cfg) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - - cfg->flags = V4L2_MBUS_PCLK_SAMPLE_RISING | V4L2_MBUS_MASTER | - V4L2_MBUS_VSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_LOW | - V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_HSYNC_ACTIVE_LOW | - V4L2_MBUS_DATA_ACTIVE_HIGH; - cfg->type = V4L2_MBUS_PARALLEL; - cfg->flags = soc_camera_apply_board_flags(ssdd, cfg); - - return 0; -} - -static int tw9910_s_mbus_config(struct v4l2_subdev *sd, - const struct v4l2_mbus_config *cfg) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - u8 val = VSSL_VVALID | HSSL_DVALID; - unsigned long flags = soc_camera_apply_board_flags(ssdd, cfg); - - /* - * set OUTCTR1 - * - * We use VVALID and DVALID signals to control VSYNC and HSYNC - * outputs, in this mode their polarity is inverted. - */ - if (flags & V4L2_MBUS_HSYNC_ACTIVE_LOW) - val |= HSP_HI; - - if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW) - val |= VSP_HI; - - return i2c_smbus_write_byte_data(client, OUTCTR1, val); -} - -static int tw9910_g_tvnorms(struct v4l2_subdev *sd, v4l2_std_id *norm) -{ - *norm = V4L2_STD_NTSC | V4L2_STD_PAL; - return 0; -} - -static const struct v4l2_subdev_video_ops tw9910_subdev_video_ops = { - .s_std = tw9910_s_std, - .g_std = tw9910_g_std, - .s_stream = tw9910_s_stream, - .g_mbus_config = tw9910_g_mbus_config, - .s_mbus_config = tw9910_s_mbus_config, - .g_tvnorms = tw9910_g_tvnorms, -}; - -static const struct v4l2_subdev_pad_ops tw9910_subdev_pad_ops = { - .enum_mbus_code = tw9910_enum_mbus_code, - .get_selection = tw9910_get_selection, - .get_fmt = tw9910_get_fmt, - .set_fmt = tw9910_set_fmt, -}; - -static const struct v4l2_subdev_ops tw9910_subdev_ops = { - .core = &tw9910_subdev_core_ops, - .video = &tw9910_subdev_video_ops, - .pad = &tw9910_subdev_pad_ops, -}; - -/* - * i2c_driver function - */ - -static int tw9910_probe(struct i2c_client *client, - const struct i2c_device_id *did) - -{ - struct tw9910_priv *priv; - struct tw9910_video_info *info; - struct i2c_adapter *adapter = - to_i2c_adapter(client->dev.parent); - struct soc_camera_subdev_desc *ssdd = soc_camera_i2c_to_desc(client); - int ret; - - if (!ssdd || !ssdd->drv_priv) { - dev_err(&client->dev, "TW9910: missing platform data!\n"); - return -EINVAL; - } - - info = ssdd->drv_priv; - - if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { - dev_err(&client->dev, - "I2C-Adapter doesn't support I2C_FUNC_SMBUS_BYTE_DATA\n"); - return -EIO; - } - - priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - - priv->info = info; - - v4l2_i2c_subdev_init(&priv->subdev, client, &tw9910_subdev_ops); - - priv->clk = v4l2_clk_get(&client->dev, "mclk"); - if (IS_ERR(priv->clk)) - return PTR_ERR(priv->clk); - - ret = tw9910_video_probe(client); - if (ret < 0) - v4l2_clk_put(priv->clk); - - return ret; -} - -static int tw9910_remove(struct i2c_client *client) -{ - struct tw9910_priv *priv = to_tw9910(client); - v4l2_clk_put(priv->clk); - return 0; -} - -static const struct i2c_device_id tw9910_id[] = { - { "tw9910", 0 }, - { } -}; -MODULE_DEVICE_TABLE(i2c, tw9910_id); - -static struct i2c_driver tw9910_i2c_driver = { - .driver = { - .name = "tw9910", - }, - .probe = tw9910_probe, - .remove = tw9910_remove, - .id_table = tw9910_id, -}; - -module_i2c_driver(tw9910_i2c_driver); - -MODULE_DESCRIPTION("SoC Camera driver for tw9910"); -MODULE_AUTHOR("Kuninori Morimoto"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c index e8613e364403..a62ede096636 100644 --- a/drivers/media/i2c/tda1997x.c +++ b/drivers/media/i2c/tda1997x.c @@ -1884,6 +1884,10 @@ static int tda1997x_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid) for (i = 0; i < 128; i++) io_write(sd, REG_EDID_IN_BYTE128 + i, edid->edid[i+128]); + /* store state */ + memcpy(state->edid.edid, edid->edid, 256); + state->edid.blocks = edid->blocks; + tda1997x_enable_edid(sd); return 0; diff --git a/drivers/media/i2c/tda1997x_regs.h b/drivers/media/i2c/tda1997x_regs.h index f55dfc423a86..ecf87534613b 100644 --- a/drivers/media/i2c/tda1997x_regs.h +++ b/drivers/media/i2c/tda1997x_regs.h @@ -596,7 +596,7 @@ #define RESET_AUDIO BIT(0) /* Reset Audio FIFO control */ /* HDCP_BCAPS bits */ -#define HDCP_HDMI BIT(7) /* HDCP suports HDMI (vs DVI only) */ +#define HDCP_HDMI BIT(7) /* HDCP supports HDMI (vs DVI only) */ #define HDCP_REPEATER BIT(6) /* HDCP supports repeater function */ #define HDCP_READY BIT(5) /* set by repeater function */ #define HDCP_FAST BIT(4) /* Up to 400kHz */ diff --git a/drivers/media/i2c/tda9840.c b/drivers/media/i2c/tda9840.c index 0dd6ff3e6201..6ba53f3a6dd2 100644 --- a/drivers/media/i2c/tda9840.c +++ b/drivers/media/i2c/tda9840.c @@ -7,7 +7,7 @@ The tda9840 is a stereo/dual sound processor with digital identification. It can be found at address 0x84 on the i2c-bus. - For detailed informations download the specifications directly + For detailed information download the specifications directly from SGS Thomson at http://www.st.com This program is free software; you can redistribute it and/or modify diff --git a/drivers/media/i2c/tea6415c.c b/drivers/media/i2c/tea6415c.c index 084bd75bb32c..965c6ccc4fee 100644 --- a/drivers/media/i2c/tea6415c.c +++ b/drivers/media/i2c/tea6415c.c @@ -9,7 +9,7 @@ It is cascadable, i.e. it can be found at the addresses 0x86 and 0x06 on the i2c-bus. - For detailed informations download the specifications directly + For detailed information download the specifications directly from SGS Thomson at http://www.st.com This program is free software; you can redistribute it and/or modify diff --git a/drivers/media/i2c/tea6420.c b/drivers/media/i2c/tea6420.c index b7f4e58f3624..2701a4c9734d 100644 --- a/drivers/media/i2c/tea6420.c +++ b/drivers/media/i2c/tea6420.c @@ -9,7 +9,7 @@ It is cascadable, i.e. it can be found at the addresses 0x98 and 0x9a on the i2c-bus. - For detailed informations download the specifications directly + For detailed information download the specifications directly from SGS Thomson at http://www.st.com This program is free software; you can redistribute it and/or modify diff --git a/drivers/media/i2c/tvaudio.c b/drivers/media/i2c/tvaudio.c index af2da977a685..e6796e94dadf 100644 --- a/drivers/media/i2c/tvaudio.c +++ b/drivers/media/i2c/tvaudio.c @@ -538,7 +538,7 @@ static int tda9840_checkit(struct CHIPSTATE *chip) #define TDA9855_INT 0 /* Selects inputs LOR and LOL. (internal) */ /* Unique to TDA9850: */ -/* lower 4 bits contol SAP noise threshold, over which SAP turns off +/* lower 4 bits control SAP noise threshold, over which SAP turns off * set to values of 0x00 through 0x0f for SAP1 through SAP16 */ @@ -546,7 +546,7 @@ static int tda9840_checkit(struct CHIPSTATE *chip) /* Common to TDA9855 and TDA9850: */ #define TDA985x_SAP 3<<6 /* Selects SAP output, mute if not received */ #define TDA985x_MONOSAP 2<<6 /* Selects Mono on left, SAP on right */ -#define TDA985x_STEREO 1<<6 /* Selects Stereo ouput, mono if not received */ +#define TDA985x_STEREO 1<<6 /* Selects Stereo output, mono if not received */ #define TDA985x_MONO 0 /* Forces Mono output */ #define TDA985x_LMU 1<<3 /* Mute (LOR/LOL for 9855, OUTL/OUTR for 9850) */ diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c index 1cc83cb934e2..3ada3bb27402 100644 --- a/drivers/media/i2c/tvp514x.c +++ b/drivers/media/i2c/tvp514x.c @@ -67,7 +67,7 @@ enum tvp514x_std { }; /** - * struct tvp514x_std_info - Structure to store standard informations + * struct tvp514x_std_info - Structure to store standard information * @width: Line width in pixels * @height:Number of active lines * @video_std: Value to write in REG_VIDEO_STD register diff --git a/drivers/media/i2c/tw9910.c b/drivers/media/i2c/tw9910.c index a54548cc4285..4d7cd736b930 100644 --- a/drivers/media/i2c/tw9910.c +++ b/drivers/media/i2c/tw9910.c @@ -584,6 +584,14 @@ static int tw9910_s_register(struct v4l2_subdev *sd, } #endif +static void tw9910_set_gpio_value(struct gpio_desc *desc, int value) +{ + if (desc) { + gpiod_set_value(desc, value); + usleep_range(500, 1000); + } +} + static int tw9910_power_on(struct tw9910_priv *priv) { struct i2c_client *client = v4l2_get_subdevdata(&priv->subdev); @@ -595,10 +603,7 @@ static int tw9910_power_on(struct tw9910_priv *priv) return ret; } - if (priv->pdn_gpio) { - gpiod_set_value(priv->pdn_gpio, 0); - usleep_range(500, 1000); - } + tw9910_set_gpio_value(priv->pdn_gpio, 0); /* * FIXME: The reset signal is connected to a shared GPIO on some @@ -610,14 +615,14 @@ static int tw9910_power_on(struct tw9910_priv *priv) GPIOD_OUT_LOW); if (IS_ERR(priv->rstb_gpio)) { dev_info(&client->dev, "Unable to get GPIO \"rstb\""); + clk_disable_unprepare(priv->clk); + tw9910_set_gpio_value(priv->pdn_gpio, 1); return PTR_ERR(priv->rstb_gpio); } if (priv->rstb_gpio) { - gpiod_set_value(priv->rstb_gpio, 1); - usleep_range(500, 1000); - gpiod_set_value(priv->rstb_gpio, 0); - usleep_range(500, 1000); + tw9910_set_gpio_value(priv->rstb_gpio, 1); + tw9910_set_gpio_value(priv->rstb_gpio, 0); gpiod_put(priv->rstb_gpio); } @@ -628,11 +633,7 @@ static int tw9910_power_on(struct tw9910_priv *priv) static int tw9910_power_off(struct tw9910_priv *priv) { clk_disable_unprepare(priv->clk); - - if (priv->pdn_gpio) { - gpiod_set_value(priv->pdn_gpio, 1); - usleep_range(500, 1000); - } + tw9910_set_gpio_value(priv->pdn_gpio, 1); return 0; } @@ -1000,7 +1001,7 @@ static int tw9910_remove(struct i2c_client *client) if (priv->pdn_gpio) gpiod_put(priv->pdn_gpio); clk_put(priv->clk); - v4l2_device_unregister_subdev(&priv->subdev); + v4l2_async_unregister_subdev(&priv->subdev); return 0; } diff --git a/drivers/media/i2c/video-i2c.c b/drivers/media/i2c/video-i2c.c index 01dcf179f203..abd3152df7d0 100644 --- a/drivers/media/i2c/video-i2c.c +++ b/drivers/media/i2c/video-i2c.c @@ -6,6 +6,7 @@ * * Supported: * - Panasonic AMG88xx Grid-Eye Sensors + * - Melexis MLX90640 Thermal Cameras */ #include @@ -18,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -66,12 +68,26 @@ static const struct v4l2_frmsize_discrete amg88xx_size = { .height = 8, }; +static const struct v4l2_fmtdesc mlx90640_format = { + .pixelformat = V4L2_PIX_FMT_Y16_BE, +}; + +static const struct v4l2_frmsize_discrete mlx90640_size = { + .width = 32, + .height = 26, /* 24 lines of pixel data + 2 lines of processing data */ +}; + static const struct regmap_config amg88xx_regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = 0xff }; +static const struct regmap_config mlx90640_regmap_config = { + .reg_bits = 16, + .val_bits = 16, +}; + struct video_i2c_chip { /* video dimensions */ const struct v4l2_fmtdesc *format; @@ -88,6 +104,7 @@ struct video_i2c_chip { unsigned int bpp; const struct regmap_config *regmap_config; + struct nvmem_config *nvmem_config; /* setup function */ int (*setup)(struct video_i2c_data *data); @@ -102,6 +119,22 @@ struct video_i2c_chip { int (*hwmon_init)(struct video_i2c_data *data); }; +static int mlx90640_nvram_read(void *priv, unsigned int offset, void *val, + size_t bytes) +{ + struct video_i2c_data *data = priv; + + return regmap_bulk_read(data->regmap, 0x2400 + offset, val, bytes); +} + +static struct nvmem_config mlx90640_nvram_config = { + .name = "mlx90640_nvram", + .word_size = 2, + .stride = 1, + .size = 1664, + .reg_read = mlx90640_nvram_read, +}; + /* Power control register */ #define AMG88XX_REG_PCTL 0x00 #define AMG88XX_PCTL_NORMAL 0x00 @@ -122,12 +155,23 @@ struct video_i2c_chip { /* Temperature register */ #define AMG88XX_REG_T01L 0x80 +/* Control register */ +#define MLX90640_REG_CTL1 0x800d +#define MLX90640_REG_CTL1_MASK 0x0380 +#define MLX90640_REG_CTL1_MASK_SHIFT 7 + static int amg88xx_xfer(struct video_i2c_data *data, char *buf) { return regmap_bulk_read(data->regmap, AMG88XX_REG_T01L, buf, data->chip->buffer_size); } +static int mlx90640_xfer(struct video_i2c_data *data, char *buf) +{ + return regmap_bulk_read(data->regmap, 0x400, buf, + data->chip->buffer_size); +} + static int amg88xx_setup(struct video_i2c_data *data) { unsigned int mask = AMG88XX_FPSC_1FPS; @@ -141,6 +185,27 @@ static int amg88xx_setup(struct video_i2c_data *data) return regmap_update_bits(data->regmap, AMG88XX_REG_FPSC, mask, val); } +static int mlx90640_setup(struct video_i2c_data *data) +{ + unsigned int n, idx; + + for (n = 0; n < data->chip->num_frame_intervals - 1; n++) { + if (data->frame_interval.numerator + != data->chip->frame_intervals[n].numerator) + continue; + + if (data->frame_interval.denominator + == data->chip->frame_intervals[n].denominator) + break; + } + + idx = data->chip->num_frame_intervals - n - 1; + + return regmap_update_bits(data->regmap, MLX90640_REG_CTL1, + MLX90640_REG_CTL1_MASK, + idx << MLX90640_REG_CTL1_MASK_SHIFT); +} + static int amg88xx_set_power_on(struct video_i2c_data *data) { int ret; @@ -274,13 +339,27 @@ static int amg88xx_hwmon_init(struct video_i2c_data *data) #define amg88xx_hwmon_init NULL #endif -#define AMG88XX 0 +enum { + AMG88XX, + MLX90640, +}; static const struct v4l2_fract amg88xx_frame_intervals[] = { { 1, 10 }, { 1, 1 }, }; +static const struct v4l2_fract mlx90640_frame_intervals[] = { + { 1, 64 }, + { 1, 32 }, + { 1, 16 }, + { 1, 8 }, + { 1, 4 }, + { 1, 2 }, + { 1, 1 }, + { 2, 1 }, +}; + static const struct video_i2c_chip video_i2c_chip[] = { [AMG88XX] = { .size = &amg88xx_size, @@ -295,6 +374,18 @@ static const struct video_i2c_chip video_i2c_chip[] = { .set_power = amg88xx_set_power, .hwmon_init = amg88xx_hwmon_init, }, + [MLX90640] = { + .size = &mlx90640_size, + .format = &mlx90640_format, + .frame_intervals = mlx90640_frame_intervals, + .num_frame_intervals = ARRAY_SIZE(mlx90640_frame_intervals), + .buffer_size = 1664, + .bpp = 16, + .regmap_config = &mlx90640_regmap_config, + .nvmem_config = &mlx90640_nvram_config, + .setup = mlx90640_setup, + .xfer = mlx90640_xfer, + }, }; static const struct v4l2_file_operations video_i2c_fops = { @@ -756,6 +847,21 @@ static int video_i2c_probe(struct i2c_client *client, } } + if (data->chip->nvmem_config) { + struct nvmem_config *config = data->chip->nvmem_config; + struct nvmem_device *device; + + config->priv = data; + config->dev = &client->dev; + + device = devm_nvmem_register(&client->dev, config); + + if (IS_ERR(device)) { + dev_warn(&client->dev, + "failed to register nvmem device\n"); + } + } + ret = video_register_device(&data->vdev, VFL_TYPE_GRABBER, -1); if (ret < 0) goto error_pm_disable; @@ -835,12 +941,14 @@ static const struct dev_pm_ops video_i2c_pm_ops = { static const struct i2c_device_id video_i2c_id_table[] = { { "amg88xx", AMG88XX }, + { "mlx90640", MLX90640 }, {} }; MODULE_DEVICE_TABLE(i2c, video_i2c_id_table); static const struct of_device_id video_i2c_of_match[] = { { .compatible = "panasonic,amg88xx", .data = &video_i2c_chip[AMG88XX] }, + { .compatible = "melexis,mlx90640", .data = &video_i2c_chip[MLX90640] }, {} }; MODULE_DEVICE_TABLE(of, video_i2c_of_match); diff --git a/drivers/media/media-request.c b/drivers/media/media-request.c index c71a34ae6383..eec2e2b2f6ec 100644 --- a/drivers/media/media-request.c +++ b/drivers/media/media-request.c @@ -100,6 +100,7 @@ static __poll_t media_request_poll(struct file *filp, if (!(poll_requested_events(wait) & EPOLLPRI)) return 0; + poll_wait(filp, &req->poll_wait, wait); spin_lock_irqsave(&req->lock, flags); if (req->state == MEDIA_REQUEST_STATE_COMPLETE) { ret = EPOLLPRI; @@ -110,8 +111,6 @@ static __poll_t media_request_poll(struct file *filp, goto unlock; } - poll_wait(filp, &req->poll_wait, wait); - unlock: spin_unlock_irqrestore(&req->lock, flags); return ret; diff --git a/drivers/media/pci/bt8xx/bttv-audio-hook.c b/drivers/media/pci/bt8xx/bttv-audio-hook.c index 346fc7f58839..8febe7358a8f 100644 --- a/drivers/media/pci/bt8xx/bttv-audio-hook.c +++ b/drivers/media/pci/bt8xx/bttv-audio-hook.c @@ -1,5 +1,5 @@ /* - * Handlers for board audio hooks, splitted from bttv-cards + * Handlers for board audio hooks, split from bttv-cards * * Copyright (c) 2006 Mauro Carvalho Chehab * This code is placed under the terms of the GNU General Public License diff --git a/drivers/media/pci/bt8xx/bttv-audio-hook.h b/drivers/media/pci/bt8xx/bttv-audio-hook.h index be16a537a03a..c61b9ac4f4e3 100644 --- a/drivers/media/pci/bt8xx/bttv-audio-hook.h +++ b/drivers/media/pci/bt8xx/bttv-audio-hook.h @@ -1,5 +1,5 @@ /* - * Handlers for board audio hooks, splitted from bttv-cards + * Handlers for board audio hooks, split from bttv-cards * * Copyright (c) 2006 Mauro Carvalho Chehab * This code is placed under the terms of the GNU General Public License diff --git a/drivers/media/pci/bt8xx/bttv-cards.c b/drivers/media/pci/bt8xx/bttv-cards.c index 2616243b2c49..b1c6f3e9c3d0 100644 --- a/drivers/media/pci/bt8xx/bttv-cards.c +++ b/drivers/media/pci/bt8xx/bttv-cards.c @@ -2,7 +2,7 @@ bttv-cards.c - this file has configuration informations - card-specific stuff + this file has configuration information - card-specific stuff like the big tvcards array for the most part Copyright (C) 1996,97,98 Ralph Metzler (rjkm@thp.uni-koeln.de) @@ -1391,7 +1391,7 @@ struct tvcard bttv_tvcards[] = { .gpiomux = {0x947fff, 0x987fff,0x947fff,0x947fff }, .gpiomute = 0x947fff, /* tvtuner, radio, external,internal, mute, stereo - * tuner, Composit, SVid, Composit-on-Svid-adapter */ + * tuner, Composite, SVid, Composite-on-Svid-adapter */ .muxsel = MUXSEL(2, 3, 0, 1), .tuner_type = TUNER_MT2032, .tuner_addr = ADDR_UNSET, @@ -1411,7 +1411,7 @@ struct tvcard bttv_tvcards[] = { .gpiomux = {0x947fff, 0x987fff,0x947fff,0x947fff }, .gpiomute = 0x947fff, /* tvtuner, radio, external,internal, mute, stereo - * tuner, Composit, SVid, Composit-on-Svid-adapter */ + * tuner, Composite, SVid, Composite-on-Svid-adapter */ .muxsel = MUXSEL(2, 3, 0, 1), .tuner_type = TUNER_MT2032, .tuner_addr = ADDR_UNSET, @@ -4180,7 +4180,7 @@ static void init_PXC200(struct bttv *btv) bttv_I2CWrite(btv,0x5E,0,0x80,1); /* Initialise 12C508 PIC */ - /* The I2CWrite and I2CRead commmands are actually to the + /* The I2CWrite and I2CRead commands are actually to the * same chips - but the R/W bit is included in the address * argument so the numbers are different */ @@ -4289,7 +4289,7 @@ init_RTV24 (struct bttv *btv) /* ----------------------------------------------------------------------- */ /* * The PCI-8604PW contains a CPLD, probably an ispMACH 4A, that filters - * the PCI REQ signals comming from the four BT878 chips. After power + * the PCI REQ signals coming from the four BT878 chips. After power * up, the CPLD does not forward requests to the bus, which prevents * the BT878 from fetching RISC instructions from memory. While the * CPLD is connected to most of the GPIOs of PCI device 0xD, only @@ -4405,7 +4405,7 @@ static void rv605_muxsel(struct bttv *btv, unsigned int input) gpio_bits(0x07f, muxgpio[input]); - /* reset all conections */ + /* reset all connections */ gpio_bits(0x200,0x200); mdelay(1); gpio_bits(0x200,0x000); diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c index d09785fd37a8..b7150648081d 100644 --- a/drivers/media/pci/bt8xx/bttv-driver.c +++ b/drivers/media/pci/bt8xx/bttv-driver.c @@ -2435,7 +2435,7 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv, f->fmt.pix.field = field; - /* update our state informations */ + /* update our state information */ fh->fmt = fmt; fh->cap.field = f->fmt.pix.field; fh->cap.last = V4L2_FIELD_NONE; @@ -3064,7 +3064,7 @@ static int bttv_open(struct file *file) V4L2 apps we now reset the cropping parameters as seen through this fh, which is to say VIDIOC_G_SELECTION and scaling limit checks will use btv->crop[0], the default cropping parameters for the - current video standard, and VIDIOC_S_FMT will not implicitely + current video standard, and VIDIOC_S_FMT will not implicitly change the cropping parameters until VIDIOC_S_SELECTION has been called. */ fh->do_crop = !reset_crop; /* module parameter */ @@ -3600,9 +3600,7 @@ static void bttv_irq_wakeup_video(struct bttv *btv, struct bttv_buffer_set *wakeup, struct bttv_buffer_set *curr, unsigned int state) { - struct timeval ts; - - v4l2_get_timestamp(&ts); + u64 ts = ktime_get_ns(); if (wakeup->top == wakeup->bottom) { if (NULL != wakeup->top && curr->top != wakeup->top) { @@ -3643,7 +3641,7 @@ bttv_irq_wakeup_vbi(struct bttv *btv, struct bttv_buffer *wakeup, if (NULL == wakeup) return; - v4l2_get_timestamp(&wakeup->vb.ts); + wakeup->vb.ts = ktime_get_ns(); wakeup->vb.field_count = btv->field_count; wakeup->vb.state = state; wake_up(&wakeup->vb.done); @@ -3713,7 +3711,7 @@ bttv_irq_wakeup_top(struct bttv *btv) btv->curr.top = NULL; bttv_risc_hook(btv, RISC_SLOT_O_FIELD, NULL, 0); - v4l2_get_timestamp(&wakeup->vb.ts); + wakeup->vb.ts = ktime_get_ns(); wakeup->vb.field_count = btv->field_count; wakeup->vb.state = VIDEOBUF_DONE; wake_up(&wakeup->vb.done); diff --git a/drivers/media/pci/bt8xx/bttv-risc.c b/drivers/media/pci/bt8xx/bttv-risc.c index 74aff6877d9c..6b3c73674a3c 100644 --- a/drivers/media/pci/bt8xx/bttv-risc.c +++ b/drivers/media/pci/bt8xx/bttv-risc.c @@ -93,7 +93,7 @@ bttv_risc_packed(struct bttv *btv, struct btcx_riscmem *risc, *(rp++)=cpu_to_le32(sg_dma_address(sg)+offset); offset+=bpl; } else { - /* scanline needs to be splitted */ + /* scanline needs to be split */ todo = bpl; *(rp++)=cpu_to_le32(BT848_RISC_WRITE|BT848_RISC_SOL| (sg_dma_len(sg)-offset)); diff --git a/drivers/media/pci/bt8xx/bttv.h b/drivers/media/pci/bt8xx/bttv.h index a27384adadd2..d24b9ef9f59f 100644 --- a/drivers/media/pci/bt8xx/bttv.h +++ b/drivers/media/pci/bt8xx/bttv.h @@ -288,7 +288,7 @@ extern void bttv_init_card1(struct bttv *btv); extern void bttv_init_card2(struct bttv *btv); extern void bttv_init_tuner(struct bttv *btv); -/* card-specific funtions */ +/* card-specific functions */ extern void tea5757_set_freq(struct bttv *btv, unsigned short freq); extern u32 bttv_tda9880_setnorm(struct bttv *btv, u32 gpiobits); diff --git a/drivers/media/pci/bt8xx/dst.c b/drivers/media/pci/bt8xx/dst.c index b98de2a22f78..c94318c71a0c 100644 --- a/drivers/media/pci/bt8xx/dst.c +++ b/drivers/media/pci/bt8xx/dst.c @@ -1295,15 +1295,15 @@ static int dst_get_signal(struct dst_state *state) static int dst_tone_power_cmd(struct dst_state *state) { - u8 paket[8] = { 0x00, 0x09, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00 }; + u8 packet[8] = { 0x00, 0x09, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00 }; if (state->dst_type != DST_TYPE_IS_SAT) return -EOPNOTSUPP; - paket[4] = state->tx_tuna[4]; - paket[2] = state->tx_tuna[2]; - paket[3] = state->tx_tuna[3]; - paket[7] = dst_check_sum (paket, 7); - return dst_command(state, paket, 8); + packet[4] = state->tx_tuna[4]; + packet[2] = state->tx_tuna[2]; + packet[3] = state->tx_tuna[3]; + packet[7] = dst_check_sum (packet, 7); + return dst_command(state, packet, 8); } static int dst_get_tuna(struct dst_state *state) @@ -1429,18 +1429,18 @@ error: static int dst_set_diseqc(struct dvb_frontend *fe, struct dvb_diseqc_master_cmd *cmd) { struct dst_state *state = fe->demodulator_priv; - u8 paket[8] = { 0x00, 0x08, 0x04, 0xe0, 0x10, 0x38, 0xf0, 0xec }; + u8 packet[8] = { 0x00, 0x08, 0x04, 0xe0, 0x10, 0x38, 0xf0, 0xec }; if (state->dst_type != DST_TYPE_IS_SAT) return -EOPNOTSUPP; if (cmd->msg_len > 0 && cmd->msg_len < 5) - memcpy(&paket[3], cmd->msg, cmd->msg_len); + memcpy(&packet[3], cmd->msg, cmd->msg_len); else if (cmd->msg_len == 5 && state->dst_hw_cap & DST_TYPE_HAS_DISEQC5) - memcpy(&paket[2], cmd->msg, cmd->msg_len); + memcpy(&packet[2], cmd->msg, cmd->msg_len); else return -EINVAL; - paket[7] = dst_check_sum(&paket[0], 7); - return dst_command(state, paket, 8); + packet[7] = dst_check_sum(&packet[0], 7); + return dst_command(state, packet, 8); } static int dst_set_voltage(struct dvb_frontend *fe, enum fe_sec_voltage voltage) diff --git a/drivers/media/pci/cobalt/cobalt-v4l2.c b/drivers/media/pci/cobalt/cobalt-v4l2.c index c088de551081..f9fa3a7c3b8f 100644 --- a/drivers/media/pci/cobalt/cobalt-v4l2.c +++ b/drivers/media/pci/cobalt/cobalt-v4l2.c @@ -375,7 +375,7 @@ static void cobalt_dma_stop_streaming(struct cobalt_stream *s) } spin_unlock_irqrestore(&s->irqlock, flags); - /* Wait 100 milisecond for DMA to finish, abort on timeout. */ + /* Wait 100 millisecond for DMA to finish, abort on timeout. */ if (!wait_event_timeout(s->q.done_wq, is_dma_done(s), msecs_to_jiffies(timeout_msec))) { omni_sg_dma_abort_channel(s); diff --git a/drivers/media/pci/cx18/cx18-cards.h b/drivers/media/pci/cx18/cx18-cards.h index 02d0fb703a41..c563cc2358ba 100644 --- a/drivers/media/pci/cx18/cx18-cards.h +++ b/drivers/media/pci/cx18/cx18-cards.h @@ -83,7 +83,7 @@ struct cx18_gpio_i2c_slave_reset { u32 active_hi_mask; /* GPIO outputs that reset i2c chips when high */ int msecs_asserted; /* time period reset must remain asserted */ int msecs_recovery; /* time after deassert for chips to be ready */ - u32 ir_reset_mask; /* GPIO to reset the Zilog Z8F0811 IR contoller */ + u32 ir_reset_mask; /* GPIO to reset the Zilog Z8F0811 IR controller */ }; struct cx18_gpio_audio_input { /* select tuner/line in input */ diff --git a/drivers/media/pci/cx18/cx18-dvb.c b/drivers/media/pci/cx18/cx18-dvb.c index a3a7f7065349..61452c50a9c3 100644 --- a/drivers/media/pci/cx18/cx18-dvb.c +++ b/drivers/media/pci/cx18/cx18-dvb.c @@ -120,8 +120,8 @@ static struct zl10353_config leadtek_dvr3100h_demod = { /* * Due to * - * 1. an absence of information on how to prgram the MT352 - * 2. the Linux mt352 module pushing MT352 initialzation off onto us here + * 1. an absence of information on how to program the MT352 + * 2. the Linux mt352 module pushing MT352 initialization off onto us here * * We have to use an init sequence that *you* must extract from the Windows * driver (yuanrap.sys) and which we load as a firmware. @@ -458,7 +458,7 @@ void cx18_dvb_unregister(struct cx18_stream *stream) dvb_unregister_adapter(dvb_adapter); } -/* All the DVB attach calls go here, this function get's modified +/* All the DVB attach calls go here, this function gets modified * for each new card. cx18_dvb_start_feed() will also need changes. */ static int dvb_register(struct cx18_stream *stream) diff --git a/drivers/media/pci/cx18/cx18-fileops.c b/drivers/media/pci/cx18/cx18-fileops.c index a3f44e30f821..f778965a2eb8 100644 --- a/drivers/media/pci/cx18/cx18-fileops.c +++ b/drivers/media/pci/cx18/cx18-fileops.c @@ -403,7 +403,7 @@ static size_t cx18_copy_mdl_to_user(struct cx18_stream *s, tot_written += rc; if (stop || /* Forced stopping point for VBI insertion */ - tot_written >= ucount || /* Reader request statisfied */ + tot_written >= ucount || /* Reader request satisfied */ mdl->curr_buf->readpos < mdl->curr_buf->bytesused || mdl->readpos >= mdl->bytesused) /* MDL buffers drained */ break; diff --git a/drivers/media/pci/cx18/cx18-io.h b/drivers/media/pci/cx18/cx18-io.h index a3c96fb5d28d..da7871fdb56d 100644 --- a/drivers/media/pci/cx18/cx18-io.h +++ b/drivers/media/pci/cx18/cx18-io.h @@ -23,7 +23,7 @@ /* * Readback and retry of MMIO access for reliability: * The concept was suggested by Steve Toth . - * The implmentation is the fault of Andy Walls . + * The implementation is the fault of Andy Walls . * * *write* functions are implied to retry the mmio unless suffixed with _noretry * *read* functions never retry the mmio (it never helps to do so) diff --git a/drivers/media/pci/cx18/cx18-mailbox.c b/drivers/media/pci/cx18/cx18-mailbox.c index f66dd63e1994..0ffd2196a980 100644 --- a/drivers/media/pci/cx18/cx18-mailbox.c +++ b/drivers/media/pci/cx18/cx18-mailbox.c @@ -197,7 +197,7 @@ static void cx18_mdl_send_to_videobuf(struct cx18_stream *s, } if (dispatch) { - v4l2_get_timestamp(&vb_buf->vb.ts); + vb_buf->vb.ts = ktime_get_ns(); list_del(&vb_buf->vb.queue); vb_buf->vb.state = VIDEOBUF_DONE; wake_up(&vb_buf->vb.done); diff --git a/drivers/media/pci/cx18/cx18-vbi.c b/drivers/media/pci/cx18/cx18-vbi.c index 81f1e27436fd..48cb96f953a0 100644 --- a/drivers/media/pci/cx18/cx18-vbi.c +++ b/drivers/media/pci/cx18/cx18-vbi.c @@ -24,7 +24,7 @@ /* * Raster Reference/Protection (RP) bytes, used in Start/End Active * Video codes emitted from the digitzer in VIP 1.x mode, that flag the start - * of VBI sample or VBI ancillary data regions in the digitial ratser line. + * of VBI sample or VBI ancillary data regions in the digital ratser line. * * Task FieldEven VerticalBlank HorizontalBlank 0 0 0 0 */ diff --git a/drivers/media/pci/cx18/cx23418.h b/drivers/media/pci/cx18/cx23418.h index 15205b662952..e9f6e50ca5b4 100644 --- a/drivers/media/pci/cx18/cx23418.h +++ b/drivers/media/pci/cx18/cx23418.h @@ -439,7 +439,7 @@ /* Error in I2C data xfer (but I2C device is present) */ #define CXERR_I2CDEV_XFERERR 0x000011 -/* Chanel changing component not ready */ +/* Channel changing component not ready */ #define CXERR_CHANNELNOTREADY 0x000012 /* PPU (Presensation/Decoder) mail box is corrupted */ diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c index a00b77d80ed9..4863bd4ea5d0 100644 --- a/drivers/media/pci/cx23885/cx23885-417.c +++ b/drivers/media/pci/cx23885/cx23885-417.c @@ -1561,7 +1561,7 @@ int cx23885_417_register(struct cx23885_dev *dev) pr_info("%s: registered device %s [mpeg]\n", dev->name, video_device_node_name(dev->v4l_device)); - /* ST: Configure the encoder paramaters, but don't begin + /* ST: Configure the encoder parameters, but don't begin * encoding, this resolves an issue where the first time the * encoder is started video can be choppy. */ diff --git a/drivers/media/pci/cx23885/cx23885-alsa.c b/drivers/media/pci/cx23885/cx23885-alsa.c index ee9d329c4038..c9c1385208f9 100644 --- a/drivers/media/pci/cx23885/cx23885-alsa.c +++ b/drivers/media/pci/cx23885/cx23885-alsa.c @@ -59,7 +59,7 @@ module_param(audio_debug, int, 0644); MODULE_PARM_DESC(audio_debug, "enable debug messages [analog audio]"); /**************************************************************************** - Board specific funtions + Board specific functions ****************************************************************************/ /* Constants taken from cx88-reg.h */ diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c index fd5c52b21436..cec3cbca8d32 100644 --- a/drivers/media/pci/cx23885/cx23885-core.c +++ b/drivers/media/pci/cx23885/cx23885-core.c @@ -1996,9 +1996,9 @@ static inline int encoder_on_portc(struct cx23885_dev *dev) * and report errors if we think we're tampering with a GPIo that might * be assigned to the encoder (and used for the host bus). * - * GPIO 2 thru 0 - On the cx23885 bridge - * GPIO 18 thru 3 - On the cx23417 host bus interface - * GPIO 23 thru 19 - On the cx25840 a/v core + * GPIO 2 through 0 - On the cx23885 bridge + * GPIO 18 through 3 - On the cx23417 host bus interface + * GPIO 23 through 19 - On the cx25840 a/v core */ void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask) { diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h index cf965efabe66..4d34799431dc 100644 --- a/drivers/media/pci/cx23885/cx23885.h +++ b/drivers/media/pci/cx23885/cx23885.h @@ -246,7 +246,7 @@ struct cx23885_i2c { struct i2c_client i2c_client; u32 i2c_rc; - /* 885 registers used for raw addess */ + /* 885 registers used for raw address */ u32 i2c_period; u32 reg_ctrl; u32 reg_stat; diff --git a/drivers/media/pci/cx23885/cx23888-ir.c b/drivers/media/pci/cx23885/cx23888-ir.c index 1d775c90df51..a4d66141c4bb 100644 --- a/drivers/media/pci/cx23885/cx23888-ir.c +++ b/drivers/media/pci/cx23885/cx23888-ir.c @@ -548,7 +548,7 @@ static int cx23888_ir_irq_handler(struct v4l2_subdev *sd, u32 status, ror = stats & STATS_ROR; /* Rx FIFO Over Run */ tse = irqen & IRQEN_TSE; /* Tx FIFO Service Request IRQ Enable */ - rse = irqen & IRQEN_RSE; /* Rx FIFO Service Reuqest IRQ Enable */ + rse = irqen & IRQEN_RSE; /* Rx FIFO Service Request IRQ Enable */ rte = irqen & IRQEN_RTE; /* Rx Pulse Width Timer Time Out IRQ Enable */ roe = irqen & IRQEN_ROE; /* Rx FIFO Over Run IRQ Enable */ @@ -638,7 +638,7 @@ static int cx23888_ir_irq_handler(struct v4l2_subdev *sd, u32 status, events |= V4L2_SUBDEV_IR_RX_END_OF_RX_DETECTED; } if (v) { - /* Clear STATS_ROR & STATS_RTO as needed by reseting hardware */ + /* Clear STATS_ROR & STATS_RTO as needed by resetting hardware */ cx23888_ir_write4(dev, CX23888_IR_CNTRL_REG, cntrl & ~v); cx23888_ir_write4(dev, CX23888_IR_CNTRL_REG, cntrl); *handled = true; diff --git a/drivers/media/pci/cx25821/cx25821-alsa.c b/drivers/media/pci/cx25821/cx25821-alsa.c index 0a6c90e92557..926e12a1554a 100644 --- a/drivers/media/pci/cx25821/cx25821-alsa.c +++ b/drivers/media/pci/cx25821/cx25821-alsa.c @@ -120,7 +120,7 @@ module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "enable debug messages"); /**************************************************************************** - Module specific funtions + Module specific functions ****************************************************************************/ /* Constants taken from cx88-reg.h */ #define AUD_INT_DN_RISCI1 (1 << 0) diff --git a/drivers/media/pci/cx25821/cx25821-sram.h b/drivers/media/pci/cx25821/cx25821-sram.h index b94e0d4df664..a907662dbb1c 100644 --- a/drivers/media/pci/cx25821/cx25821-sram.h +++ b/drivers/media/pci/cx25821/cx25821-sram.h @@ -24,7 +24,7 @@ #define AUDIO_CMDS_SIZE 80 /* AUDIO CMDS size in bytes */ #define MBIF_CMDS_SIZE 80 /* MBIF CMDS size in bytes */ -/* #define RX_SRAM_POOL_START_SIZE = 0; // Start of useable RX SRAM for buffers */ +/* #define RX_SRAM_POOL_START_SIZE = 0; // Start of usable RX SRAM for buffers */ #define VID_IQ_SIZE 64 /* VID instruction queue size in bytes */ #define MBIF_IQ_SIZE 64 #define AUDIO_IQ_SIZE 64 /* AUD instruction queue size in bytes */ diff --git a/drivers/media/pci/cx25821/cx25821.h b/drivers/media/pci/cx25821/cx25821.h index 25eba4ac4499..b422275ff4f1 100644 --- a/drivers/media/pci/cx25821/cx25821.h +++ b/drivers/media/pci/cx25821/cx25821.h @@ -156,7 +156,7 @@ struct cx25821_i2c { struct i2c_client i2c_client; u32 i2c_rc; - /* cx25821 registers used for raw addess */ + /* cx25821 registers used for raw address */ u32 i2c_period; u32 reg_ctrl; u32 reg_stat; diff --git a/drivers/media/pci/dm1105/dm1105.c b/drivers/media/pci/dm1105/dm1105.c index a84c8270ea13..60138ca3372b 100644 --- a/drivers/media/pci/dm1105/dm1105.c +++ b/drivers/media/pci/dm1105/dm1105.c @@ -517,7 +517,7 @@ static int dm1105_i2c_xfer(struct i2c_adapter *i2c_adap, msgs[i].buf[byte] = rc; } } else if ((msgs[i].buf[0] == 0xf7) && (msgs[i].addr == 0x55)) { - /* prepaired for cx24116 firmware */ + /* prepared for cx24116 firmware */ /* Write in small blocks */ len = msgs[i].len - 1; k = 1; diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c index cdb79ae2d8dc..f8020ebe9f05 100644 --- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c +++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c @@ -264,7 +264,7 @@ static void cio2_fbpt_exit(struct cio2_queue *q, struct device *dev) */ /* - * shift for keeping value range suitable for 32-bit integer arithmetics + * shift for keeping value range suitable for 32-bit integer arithmetic */ #define LIMIT_SHIFT 8 @@ -846,7 +846,7 @@ static int cio2_vb2_buf_init(struct vb2_buffer *vb) unsigned int pages = DIV_ROUND_UP(vb->planes[0].length, CIO2_PAGE_SIZE); unsigned int lops = DIV_ROUND_UP(pages + 1, entries_per_page); struct sg_table *sg; - struct sg_page_iter sg_iter; + struct sg_dma_page_iter sg_iter; int i, j; if (lops <= 0 || lops > CIO2_MAX_LOPS) { @@ -873,7 +873,7 @@ static int cio2_vb2_buf_init(struct vb2_buffer *vb) b->offset = sg->sgl->offset; i = j = 0; - for_each_sg_page(sg->sgl, &sg_iter, sg->nents, 0) { + for_each_sg_dma_page (sg->sgl, &sg_iter, sg->nents, 0) { if (!pages--) break; b->lop[i][j] = sg_page_iter_dma_address(&sg_iter) >> PAGE_SHIFT; @@ -1810,7 +1810,8 @@ static int cio2_pci_probe(struct pci_dev *pci_dev, /* Register notifier for subdevices we care */ r = cio2_notifier_init(cio2); - if (r) + /* Proceed without sensors connected to allow the device to suspend. */ + if (r && r != -ENODEV) goto fail_cio2_queue_exit; r = devm_request_irq(&pci_dev->dev, pci_dev->irq, cio2_irq, @@ -2047,7 +2048,7 @@ module_pci_driver(cio2_pci_driver); MODULE_AUTHOR("Tuukka Toivonen "); MODULE_AUTHOR("Tianshu Qiu "); -MODULE_AUTHOR("Jian Xu Zheng "); +MODULE_AUTHOR("Jian Xu Zheng"); MODULE_AUTHOR("Yuning Pu "); MODULE_AUTHOR("Yong Zhi "); MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/pci/ivtv/Kconfig b/drivers/media/pci/ivtv/Kconfig index c72cbbd2d40c..06ca4e23f9fb 100644 --- a/drivers/media/pci/ivtv/Kconfig +++ b/drivers/media/pci/ivtv/Kconfig @@ -70,8 +70,25 @@ config VIDEO_FB_IVTV This is used in the Hauppauge PVR-350 card. There is a driver homepage at . - In order to use this module, you will need to boot with PAT disabled - on x86 systems, using the nopat kernel parameter. - To compile this driver as a module, choose M here: the module will be called ivtvfb. + +config VIDEO_FB_IVTV_FORCE_PAT + bool "force cx23415 framebuffer init with x86 PAT enabled" + depends on VIDEO_FB_IVTV && X86_PAT + default n + ---help--- + With PAT enabled, the cx23415 framebuffer driver does not + utilize write-combined caching on the framebuffer memory. + For this reason, the driver will by default disable itself + when initializied on a kernel with PAT enabled (i.e. not + using the nopat kernel parameter). + + The driver is not easily upgradable to the PAT-aware + ioremap_wc() API since the firmware hides the address + ranges that should be marked write-combined from the driver. + + With this setting enabled, the framebuffer will initialize on + PAT-enabled systems but the framebuffer memory will be uncached. + + If unsure, say N. diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c index 1380474519f2..c3c2c79585f5 100644 --- a/drivers/media/pci/ivtv/ivtv-yuv.c +++ b/drivers/media/pci/ivtv/ivtv-yuv.c @@ -110,7 +110,7 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma, /* * Inherit the -EFAULT from rc's * initialization, but allow it to be - * overriden by uv_pages above if it was an + * overridden by uv_pages above if it was an * actual errno. */ } else { diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c index 8ec2525d8ef5..cfd21040d0e3 100644 --- a/drivers/media/pci/ivtv/ivtvfb.c +++ b/drivers/media/pci/ivtv/ivtvfb.c @@ -55,6 +55,7 @@ /* card parameters */ static int ivtvfb_card_id = -1; static int ivtvfb_debug = 0; +static bool ivtvfb_force_pat = IS_ENABLED(CONFIG_VIDEO_FB_IVTV_FORCE_PAT); static bool osd_laced; static int osd_depth; static int osd_upper; @@ -64,6 +65,7 @@ static int osd_xres; module_param(ivtvfb_card_id, int, 0444); module_param_named(debug,ivtvfb_debug, int, 0644); +module_param_named(force_pat, ivtvfb_force_pat, bool, 0644); module_param(osd_laced, bool, 0444); module_param(osd_depth, int, 0444); module_param(osd_upper, int, 0444); @@ -79,6 +81,9 @@ MODULE_PARM_DESC(debug, "Debug level (bitmask). Default: errors only\n" "\t\t\t(debug = 3 gives full debugging)"); +MODULE_PARM_DESC(force_pat, + "Force initialization on x86 PAT-enabled systems (bool).\n"); + /* Why upper, left, xres, yres, depth, laced ? To match terminology used by fbset. Why start at 1 for left & upper coordinate ? Because X doesn't allow 0 */ @@ -1167,8 +1172,15 @@ static int ivtvfb_init_card(struct ivtv *itv) #ifdef CONFIG_X86_64 if (pat_enabled()) { - pr_warn("ivtvfb needs PAT disabled, boot with nopat kernel parameter\n"); - return -ENODEV; + if (ivtvfb_force_pat) { + pr_info("PAT is enabled. Write-combined framebuffer caching will be disabled.\n"); + pr_info("To enable caching, boot with nopat kernel parameter\n"); + } else { + pr_warn("ivtvfb needs PAT disabled for write-combined framebuffer caching.\n"); + pr_warn("Boot with nopat kernel parameter to use caching, or use the\n"); + pr_warn("force_pat module parameter to run with caching disabled\n"); + return -ENODEV; + } } #endif diff --git a/drivers/media/pci/meye/meye.c b/drivers/media/pci/meye/meye.c index bd870e60c32b..896d2d856795 100644 --- a/drivers/media/pci/meye/meye.c +++ b/drivers/media/pci/meye/meye.c @@ -805,7 +805,7 @@ again: mchip_hsize() * mchip_vsize() * 2); meye.grab_buffer[reqnr].size = mchip_hsize() * mchip_vsize() * 2; meye.grab_buffer[reqnr].state = MEYE_BUF_DONE; - v4l2_get_timestamp(&meye.grab_buffer[reqnr].timestamp); + meye.grab_buffer[reqnr].ts = ktime_get_ns(); meye.grab_buffer[reqnr].sequence = sequence++; kfifo_in_locked(&meye.doneq, (unsigned char *)&reqnr, sizeof(int), &meye.doneq_lock); @@ -826,7 +826,7 @@ again: size); meye.grab_buffer[reqnr].size = size; meye.grab_buffer[reqnr].state = MEYE_BUF_DONE; - v4l2_get_timestamp(&meye.grab_buffer[reqnr].timestamp); + meye.grab_buffer[reqnr].ts = ktime_get_ns(); meye.grab_buffer[reqnr].sequence = sequence++; kfifo_in_locked(&meye.doneq, (unsigned char *)&reqnr, sizeof(int), &meye.doneq_lock); @@ -1283,7 +1283,7 @@ static int vidioc_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf) buf->flags |= V4L2_BUF_FLAG_DONE; buf->field = V4L2_FIELD_NONE; - buf->timestamp = meye.grab_buffer[index].timestamp; + buf->timestamp = ns_to_timeval(meye.grab_buffer[index].ts); buf->sequence = meye.grab_buffer[index].sequence; buf->memory = V4L2_MEMORY_MMAP; buf->m.offset = index * gbufsize; @@ -1349,7 +1349,7 @@ static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf) buf->bytesused = meye.grab_buffer[reqnr].size; buf->flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; buf->field = V4L2_FIELD_NONE; - buf->timestamp = meye.grab_buffer[reqnr].timestamp; + buf->timestamp = ns_to_timeval(meye.grab_buffer[reqnr].ts); buf->sequence = meye.grab_buffer[reqnr].sequence; buf->memory = V4L2_MEMORY_MMAP; buf->m.offset = reqnr * gbufsize; diff --git a/drivers/media/pci/meye/meye.h b/drivers/media/pci/meye/meye.h index c4a8a5fe040c..aff6631535be 100644 --- a/drivers/media/pci/meye/meye.h +++ b/drivers/media/pci/meye/meye.h @@ -277,11 +277,11 @@ struct meye_grab_buffer { int state; /* state of buffer */ unsigned long size; /* size of jpg frame */ - struct timeval timestamp; /* timestamp */ + u64 ts; /* timestamp */ unsigned long sequence; /* sequence number */ }; -/* size of kfifos containings buffer indices */ +/* size of kfifos containing buffer indices */ #define MEYE_QUEUE_SIZE MEYE_MAX_BUFNBRS /* Motion Eye device structure */ diff --git a/drivers/media/pci/ngene/ngene-core.c b/drivers/media/pci/ngene/ngene-core.c index 25f16833a475..27953b3610a3 100644 --- a/drivers/media/pci/ngene/ngene-core.c +++ b/drivers/media/pci/ngene/ngene-core.c @@ -1014,7 +1014,7 @@ static int FillTSIdleBuffer(struct SRingBufferDescriptor *pIdleBuffer, /* Point to first buffer entry */ struct SBufferHeader *Cur = pRingBuffer->Head; int i; - /* Loop thru all buffer and set Buffer 2 pointers to TSIdlebuffer */ + /* Loop through all buffer and set Buffer 2 pointers to TSIdlebuffer */ for (i = 0; i < n; i++) { Cur->Buffer2 = pIdleBuffer->Head->Buffer1; Cur->scList2 = pIdleBuffer->Head->scList1; diff --git a/drivers/media/pci/pt1/pt1.c b/drivers/media/pci/pt1/pt1.c index f4b8030e2369..393f4c596819 100644 --- a/drivers/media/pci/pt1/pt1.c +++ b/drivers/media/pci/pt1/pt1.c @@ -200,16 +200,10 @@ static const u8 va1j5jf8007t_25mhz_configs[][2] = { static int config_demod(struct i2c_client *cl, enum pt1_fe_clk clk) { int ret; - u8 buf[2] = {0x01, 0x80}; bool is_sat; const u8 (*cfg_data)[2]; int i, len; - ret = i2c_master_send(cl, buf, 2); - if (ret < 0) - return ret; - usleep_range(30000, 50000); - is_sat = !strncmp(cl->name, TC90522_I2C_DEV_SAT, strlen(TC90522_I2C_DEV_SAT)); if (is_sat) { @@ -260,6 +254,46 @@ static int config_demod(struct i2c_client *cl, enum pt1_fe_clk clk) return 0; } +/* + * Init registers for (each pair of) terrestrial/satellite block in demod. + * Note that resetting terr. block also resets its peer sat. block as well. + * This function must be called before configuring any demod block + * (before pt1_wakeup(), fe->ops.init()). + */ +static int pt1_demod_block_init(struct pt1 *pt1) +{ + struct i2c_client *cl; + u8 buf[2] = {0x01, 0x80}; + int ret; + int i; + + /* reset all terr. & sat. pairs first */ + for (i = 0; i < PT1_NR_ADAPS; i++) { + cl = pt1->adaps[i]->demod_i2c_client; + if (strncmp(cl->name, TC90522_I2C_DEV_TER, + strlen(TC90522_I2C_DEV_TER))) + continue; + + ret = i2c_master_send(cl, buf, 2); + if (ret < 0) + return ret; + usleep_range(30000, 50000); + } + + for (i = 0; i < PT1_NR_ADAPS; i++) { + cl = pt1->adaps[i]->demod_i2c_client; + if (strncmp(cl->name, TC90522_I2C_DEV_SAT, + strlen(TC90522_I2C_DEV_SAT))) + continue; + + ret = i2c_master_send(cl, buf, 2); + if (ret < 0) + return ret; + usleep_range(30000, 50000); + } + return 0; +} + static void pt1_write_reg(struct pt1 *pt1, int reg, u32 data) { writel(data, pt1->regs + reg * 4); @@ -987,6 +1021,10 @@ static int pt1_init_frontends(struct pt1 *pt1) goto tuner_release; } + ret = pt1_demod_block_init(pt1); + if (ret < 0) + goto fe_unregister; + return 0; tuner_release: @@ -1245,6 +1283,10 @@ static int pt1_resume(struct device *dev) pt1_update_power(pt1); usleep_range(1000, 2000); + ret = pt1_demod_block_init(pt1); + if (ret < 0) + goto resume_err; + for (i = 0; i < PT1_NR_ADAPS; i++) dvb_frontend_reinitialise(pt1->adaps[i]->fe); diff --git a/drivers/media/pci/pt3/pt3.h b/drivers/media/pci/pt3/pt3.h index 495891a4ee17..a53124438f51 100644 --- a/drivers/media/pci/pt3/pt3.h +++ b/drivers/media/pci/pt3/pt3.h @@ -76,7 +76,7 @@ struct xfer_desc { u32 addr_l; /* bus address of target data buffer */ u32 addr_h; u32 size; - u32 next_l; /* bus adddress of the next xfer_desc */ + u32 next_l; /* bus address of the next xfer_desc */ u32 next_h; }; diff --git a/drivers/media/pci/saa7134/saa7134-cards.c b/drivers/media/pci/saa7134/saa7134-cards.c index 40ce033cb884..94d6484a3afe 100644 --- a/drivers/media/pci/saa7134/saa7134-cards.c +++ b/drivers/media/pci/saa7134/saa7134-cards.c @@ -6423,7 +6423,7 @@ struct pci_device_id saa7134_pci_tbl[] = { .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7133, .subvendor = 0x5168, - .subdevice = 0x3502, /* whats the difference to 0x3306 ?*/ + .subdevice = 0x3502, /* what's the difference to 0x3306 ?*/ .driver_data = SAA7134_BOARD_FLYDVBT_HYBRID_CARDBUS, },{ .vendor = PCI_VENDOR_ID_PHILIPS, diff --git a/drivers/media/pci/saa7146/mxb.c b/drivers/media/pci/saa7146/mxb.c index 44440c6208df..e94324b1de68 100644 --- a/drivers/media/pci/saa7146/mxb.c +++ b/drivers/media/pci/saa7146/mxb.c @@ -399,7 +399,7 @@ static int mxb_init_done(struct saa7146_dev* dev) /* check if the saa7740 (aka 'sound arena module') is present on the mxb. if so, we must initialize it. due to lack of - informations about the saa7740, the values were reverse + information about the saa7740, the values were reverse engineered. */ msg.addr = 0x1b; msg.flags = 0; @@ -495,7 +495,7 @@ static int vidioc_s_input(struct file *file, void *fh, unsigned int input) input_port_selection[input].hps_sync); /* prepare switching of tea6415c and saa7111a; - have a look at the 'background'-file for further informations */ + have a look at the 'background'-file for further information */ switch (input) { case TUNER: i = SAA7115_COMPOSITE0; diff --git a/drivers/media/pci/saa7164/saa7164-api.c b/drivers/media/pci/saa7164/saa7164-api.c index e318ccf81277..d6c996f39cf2 100644 --- a/drivers/media/pci/saa7164/saa7164-api.c +++ b/drivers/media/pci/saa7164/saa7164-api.c @@ -749,7 +749,7 @@ int saa7164_api_initialize_dif(struct saa7164_port *port) if (port->type == SAA7164_MPEG_ENCODER) { /* Pick any analog standard to init the diff. * we'll come back during encoder_init' - * and set the correct standard if requried. + * and set the correct standard if required. */ std = V4L2_STD_NTSC; } else diff --git a/drivers/media/pci/saa7164/saa7164-cards.c b/drivers/media/pci/saa7164/saa7164-cards.c index 3af16062e79d..9a6fe7cd4d59 100644 --- a/drivers/media/pci/saa7164/saa7164-cards.c +++ b/drivers/media/pci/saa7164/saa7164-cards.c @@ -685,7 +685,7 @@ struct saa7164_subid saa7164_subids[] = { .subvendor = 0x0070, .subdevice = 0xf111, .card = SAA7164_BOARD_HAUPPAUGE_HVR2255, - /* Prototype card left here for documenation purposes. + /* Prototype card left here for documentation purposes. .card = SAA7164_BOARD_HAUPPAUGE_HVR2255proto, */ }, { @@ -866,7 +866,7 @@ void saa7164_card_setup(struct saa7164_dev *dev) * access to I2C. Instead we have to communicate through the device f/w for * register access to 'processing units'. Each unit has a unique * id, regardless of how the physical implementation occurs across - * the three physical i2c busses. The being said if we want leverge of + * the three physical i2c buses. The being said if we want leverge of * the existing kernel drivers for tuners and demods we have to 'speak i2c', * to this bridge implements 3 virtual i2c buses. This is a helper function * for those. diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c index f33349e16359..05f25c9bb308 100644 --- a/drivers/media/pci/saa7164/saa7164-core.c +++ b/drivers/media/pci/saa7164/saa7164-core.c @@ -157,7 +157,7 @@ static void saa7164_ts_verifier(struct saa7164_buffer *buf) } - /* Only report errors if we've been through this function atleast + /* Only report errors if we've been through this function at least * once already and the cached cc values are primed. First time through * always generates errors. */ @@ -1020,7 +1020,7 @@ static int saa7164_dev_setup(struct saa7164_dev *dev) dev->bmmio = (u8 __iomem *)dev->lmmio; dev->bmmio2 = (u8 __iomem *)dev->lmmio2; - /* Inerrupt and ack register locations offset of bmmio */ + /* Interrupt and ack register locations offset of bmmio */ dev->int_status = 0x183000 + 0xf80; dev->int_ack = 0x183000 + 0xf90; diff --git a/drivers/media/pci/saa7164/saa7164-dvb.c b/drivers/media/pci/saa7164/saa7164-dvb.c index dfb118d7d1ec..3e73cb3c7e88 100644 --- a/drivers/media/pci/saa7164/saa7164-dvb.c +++ b/drivers/media/pci/saa7164/saa7164-dvb.c @@ -529,7 +529,7 @@ int saa7164_dvb_unregister(struct saa7164_port *port) return 0; } -/* All the DVB attach calls go here, this function get's modified +/* All the DVB attach calls go here, this function gets modified * for each new card. */ int saa7164_dvb_register(struct saa7164_port *port) diff --git a/drivers/media/pci/saa7164/saa7164-fw.c b/drivers/media/pci/saa7164/saa7164-fw.c index a50461861133..ed27b3ce5e8e 100644 --- a/drivers/media/pci/saa7164/saa7164-fw.c +++ b/drivers/media/pci/saa7164/saa7164-fw.c @@ -409,7 +409,7 @@ int saa7164_downloadfirmware(struct saa7164_dev *dev) (version & 0x0000001f), (version & 0xffff0000) >> 16); - /* Load the firmwware from the disk if required */ + /* Load the firmware from the disk if required */ if (version == 0) { printk(KERN_INFO "%s() Waiting for firmware upload (%s)\n", diff --git a/drivers/media/pci/smipcie/smipcie-ir.c b/drivers/media/pci/smipcie/smipcie-ir.c index c5595af6b976..d292cdfb3671 100644 --- a/drivers/media/pci/smipcie/smipcie-ir.c +++ b/drivers/media/pci/smipcie/smipcie-ir.c @@ -16,6 +16,9 @@ #include "smipcie.h" +#define SMI_SAMPLE_PERIOD 83 +#define SMI_SAMPLE_IDLEMIN (10000 / SMI_SAMPLE_PERIOD) + static void smi_ir_enableInterrupt(struct smi_rc *ir) { struct smi_dev *dev = ir->dev; @@ -42,114 +45,64 @@ static void smi_ir_stop(struct smi_rc *ir) struct smi_dev *dev = ir->dev; smi_ir_disableInterrupt(ir); - smi_clear(IR_Init_Reg, 0x80); + smi_clear(IR_Init_Reg, rbIRen); } -#define BITS_PER_COMMAND 14 -#define GROUPS_PER_BIT 2 -#define IR_RC5_MIN_BIT 36 -#define IR_RC5_MAX_BIT 52 -static u32 smi_decode_rc5(u8 *pData, u8 size) +static void smi_raw_process(struct rc_dev *rc_dev, const u8 *buffer, + const u8 length) { - u8 index, current_bit, bit_count; - u8 group_array[BITS_PER_COMMAND * GROUPS_PER_BIT + 4]; - u8 group_index = 0; - u32 command = 0xFFFFFFFF; - - group_array[group_index++] = 1; - - for (index = 0; index < size; index++) { - - current_bit = (pData[index] & 0x80) ? 1 : 0; - bit_count = pData[index] & 0x7f; - - if ((current_bit == 1) && (bit_count >= 2*IR_RC5_MAX_BIT + 1)) { - goto process_code; - } else if ((bit_count >= IR_RC5_MIN_BIT) && - (bit_count <= IR_RC5_MAX_BIT)) { - group_array[group_index++] = current_bit; - } else if ((bit_count > IR_RC5_MAX_BIT) && - (bit_count <= 2*IR_RC5_MAX_BIT)) { - group_array[group_index++] = current_bit; - group_array[group_index++] = current_bit; - } else { - goto invalid_timing; - } - if (group_index >= BITS_PER_COMMAND*GROUPS_PER_BIT) - goto process_code; - - if ((group_index == BITS_PER_COMMAND*GROUPS_PER_BIT - 1) - && (group_array[group_index-1] == 0)) { - group_array[group_index++] = 1; - goto process_code; - } - } - -process_code: - if (group_index == (BITS_PER_COMMAND*GROUPS_PER_BIT-1)) - group_array[group_index++] = 1; - - if (group_index == BITS_PER_COMMAND*GROUPS_PER_BIT) { - command = 0; - for (index = 0; index < (BITS_PER_COMMAND*GROUPS_PER_BIT); - index = index + 2) { - if ((group_array[index] == 1) && - (group_array[index+1] == 0)) { - command |= (1 << (BITS_PER_COMMAND - - (index/2) - 1)); - } else if ((group_array[index] == 0) && - (group_array[index+1] == 1)) { - /* */ - } else { - command = 0xFFFFFFFF; - goto invalid_timing; - } + struct ir_raw_event rawir = {}; + int cnt; + + for (cnt = 0; cnt < length; cnt++) { + if (buffer[cnt] & 0x7f) { + rawir.pulse = (buffer[cnt] & 0x80) == 0; + rawir.duration = ((buffer[cnt] & 0x7f) + + (rawir.pulse ? 0 : -1)) * + rc_dev->rx_resolution; + ir_raw_event_store_with_filter(rc_dev, &rawir); } } - -invalid_timing: - return command; } -static void smi_ir_decode(struct work_struct *work) +static void smi_ir_decode(struct smi_rc *ir) { - struct smi_rc *ir = container_of(work, struct smi_rc, work); struct smi_dev *dev = ir->dev; struct rc_dev *rc_dev = ir->rc_dev; - u32 dwIRControl, dwIRData, dwIRCode, scancode; - u8 index, ucIRCount, readLoop, rc5_command, rc5_system, toggle; + u32 dwIRControl, dwIRData; + u8 index, ucIRCount, readLoop; dwIRControl = smi_read(IR_Init_Reg); + if (dwIRControl & rbIRVld) { ucIRCount = (u8) smi_read(IR_Data_Cnt); - if (ucIRCount < 4) - goto end_ir_decode; - readLoop = ucIRCount/4; if (ucIRCount % 4) readLoop += 1; for (index = 0; index < readLoop; index++) { - dwIRData = smi_read(IR_DATA_BUFFER_BASE + (index*4)); + dwIRData = smi_read(IR_DATA_BUFFER_BASE + (index * 4)); ir->irData[index*4 + 0] = (u8)(dwIRData); ir->irData[index*4 + 1] = (u8)(dwIRData >> 8); ir->irData[index*4 + 2] = (u8)(dwIRData >> 16); ir->irData[index*4 + 3] = (u8)(dwIRData >> 24); } - dwIRCode = smi_decode_rc5(ir->irData, ucIRCount); - - if (dwIRCode != 0xFFFFFFFF) { - rc5_command = dwIRCode & 0x3F; - rc5_system = (dwIRCode & 0x7C0) >> 6; - toggle = (dwIRCode & 0x800) ? 1 : 0; - scancode = rc5_system << 8 | rc5_command; - rc_keydown(rc_dev, RC_PROTO_RC5, scancode, toggle); - } + smi_raw_process(rc_dev, ir->irData, ucIRCount); + smi_set(IR_Init_Reg, rbIRVld); } -end_ir_decode: - smi_set(IR_Init_Reg, 0x04); - smi_ir_enableInterrupt(ir); + + if (dwIRControl & rbIRhighidle) { + struct ir_raw_event rawir = {}; + + rawir.pulse = 0; + rawir.duration = US_TO_NS(SMI_SAMPLE_PERIOD * + SMI_SAMPLE_IDLEMIN); + ir_raw_event_store_with_filter(rc_dev, &rawir); + smi_set(IR_Init_Reg, rbIRhighidle); + } + + ir_raw_event_handle(rc_dev); } /* ir functions call by main driver.*/ @@ -160,7 +113,8 @@ int smi_ir_irq(struct smi_rc *ir, u32 int_status) if (int_status & IR_X_INT) { smi_ir_disableInterrupt(ir); smi_ir_clearInterrupt(ir); - schedule_work(&ir->work); + smi_ir_decode(ir); + smi_ir_enableInterrupt(ir); handled = 1; } return handled; @@ -170,9 +124,11 @@ void smi_ir_start(struct smi_rc *ir) { struct smi_dev *dev = ir->dev; - smi_write(IR_Idle_Cnt_Low, 0x00140070); + smi_write(IR_Idle_Cnt_Low, + (((SMI_SAMPLE_PERIOD - 1) & 0xFFFF) << 16) | + (SMI_SAMPLE_IDLEMIN & 0xFFFF)); msleep(20); - smi_set(IR_Init_Reg, 0x90); + smi_set(IR_Init_Reg, rbIRen | rbIRhighidle); smi_ir_enableInterrupt(ir); } @@ -183,7 +139,7 @@ int smi_ir_init(struct smi_dev *dev) struct rc_dev *rc_dev; struct smi_rc *ir = &dev->ir; - rc_dev = rc_allocate_device(RC_DRIVER_SCANCODE); + rc_dev = rc_allocate_device(RC_DRIVER_IR_RAW); if (!rc_dev) return -ENOMEM; @@ -193,6 +149,7 @@ int smi_ir_init(struct smi_dev *dev) snprintf(ir->input_phys, sizeof(ir->input_phys), "pci-%s/ir0", pci_name(dev->pci_dev)); + rc_dev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER; rc_dev->driver_name = "SMI_PCIe"; rc_dev->input_phys = ir->input_phys; rc_dev->device_name = ir->device_name; @@ -203,11 +160,12 @@ int smi_ir_init(struct smi_dev *dev) rc_dev->dev.parent = &dev->pci_dev->dev; rc_dev->map_name = dev->info->rc_map; + rc_dev->timeout = MS_TO_NS(100); + rc_dev->rx_resolution = US_TO_NS(SMI_SAMPLE_PERIOD); ir->rc_dev = rc_dev; ir->dev = dev; - INIT_WORK(&ir->work, smi_ir_decode); smi_ir_disableInterrupt(ir); ret = rc_register_device(rc_dev); diff --git a/drivers/media/pci/smipcie/smipcie.h b/drivers/media/pci/smipcie/smipcie.h index a6c5b1bd7edb..e52229a87b84 100644 --- a/drivers/media/pci/smipcie/smipcie.h +++ b/drivers/media/pci/smipcie/smipcie.h @@ -241,7 +241,6 @@ struct smi_rc { struct rc_dev *rc_dev; char input_phys[64]; char device_name[64]; - struct work_struct work; u8 irData[256]; int users; diff --git a/drivers/media/pci/solo6x10/solo6x10-disp.c b/drivers/media/pci/solo6x10/solo6x10-disp.c index 11c98f0625e4..f61007022471 100644 --- a/drivers/media/pci/solo6x10/solo6x10-disp.c +++ b/drivers/media/pci/solo6x10/solo6x10-disp.c @@ -71,7 +71,7 @@ static void solo_vin_config(struct solo_dev *solo_dev) solo_reg_write(solo_dev, SOLO_VI_CH_FORMAT, SOLO_VI_FD_SEL_MASK(0) | SOLO_VI_PROG_MASK(0)); - /* On 6110, initialize mozaic darkness stength */ + /* On 6110, initialize mozaic darkness strength */ if (solo_dev->type == SOLO_DEV_6010) solo_reg_write(solo_dev, SOLO_VI_FMT_CFG, 0); else @@ -230,7 +230,7 @@ int solo_set_motion_block(struct solo_dev *solo_dev, u8 ch, } /* First 8k is motion flag (512 bytes * 16). Following that is an 8k+8k - * threshold and working table for each channel. Atleast that's what the + * threshold and working table for each channel. At least that's what the * spec says. However, this code (taken from rdk) has some mystery 8k * block right after the flag area, before the first thresh table. */ static void solo_motion_config(struct solo_dev *solo_dev) diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c index 2cc05a9d57ac..a16242a9206f 100644 --- a/drivers/media/pci/solo6x10/solo6x10-g723.c +++ b/drivers/media/pci/solo6x10/solo6x10-g723.c @@ -360,13 +360,11 @@ static int solo_snd_pcm_init(struct solo_dev *solo_dev) ss; ss = ss->next, i++) sprintf(ss->name, "Camera #%d Audio", i); - ret = snd_pcm_lib_preallocate_pages_for_all(pcm, + snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS, snd_dma_continuous_data(GFP_KERNEL), G723_PERIOD_BYTES * PERIODS, G723_PERIOD_BYTES * PERIODS); - if (ret < 0) - return ret; solo_dev->snd_pcm = pcm; diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c index 411177ec4d72..2452d8f59cb0 100644 --- a/drivers/media/pci/sta2x11/sta2x11_vip.c +++ b/drivers/media/pci/sta2x11/sta2x11_vip.c @@ -110,7 +110,7 @@ static inline struct vip_buffer *to_vip_buffer(struct vb2_v4l2_buffer *vb2) * @std: video standard (e.g. PAL/NTSC) * @input: input line for video signal ( 0 or 1 ) * @disabled: Device is in power down state - * @slock: for excluse acces of registers + * @slock: for excluse access of registers * @vb_vidq: queue maintained by videobuf2 layer * @buffer_list: list of buffer in use * @sequence: sequence number of acquired buffer diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c index 409defc75c05..9345287ad963 100644 --- a/drivers/media/pci/ttpci/av7110.c +++ b/drivers/media/pci/ttpci/av7110.c @@ -2313,7 +2313,7 @@ static int frontend_init(struct av7110 *av7110) * (n in defined in the RPS_THRESH1 counter threshold) * I think HS is raised high on the beginning of the n-th line * and remains high until this n-th line that triggered - * it is completely received. When the receiption of n-th line + * it is completely received. When the reception of n-th line * ends, HS is lowered. * * To transmit data over DMA, 7146 needs changing state at @@ -2347,7 +2347,7 @@ static int frontend_init(struct av7110 *av7110) * hardware debug note: a working budget card (including budget patch) * with vpeirq() interrupt setup in mode "0x90" (every 64K) will * generate 3 interrupts per 25-Hz DMA frame of 2*188*512 bytes - * and that means 3*25=75 Hz of interrupt freqency, as seen by + * and that means 3*25=75 Hz of interrupt frequency, as seen by * watch cat /proc/interrupts * * If this frequency is 3x lower (and data received in the DMA @@ -2356,7 +2356,7 @@ static int frontend_init(struct av7110 *av7110) * this means VSYNC line is not connected in the hardware. * (check soldering pcb and pins) * The same behaviour of missing VSYNC can be duplicated on budget - * cards, by seting DD1_INIT trigger mode 7 in 3rd nibble. + * cards, by setting DD1_INIT trigger mode 7 in 3rd nibble. */ static int av7110_attach(struct saa7146_dev* dev, struct saa7146_pci_extension_data *pci_ext) diff --git a/drivers/media/pci/tw68/tw68-video.c b/drivers/media/pci/tw68/tw68-video.c index d3f727045ae8..4f74b14c3b4f 100644 --- a/drivers/media/pci/tw68/tw68-video.c +++ b/drivers/media/pci/tw68/tw68-video.c @@ -446,7 +446,7 @@ static void tw68_buf_queue(struct vb2_buffer *vb) /* * buffer_prepare * - * Set the ancilliary information into the buffer structure. This + * Set the ancillary information into the buffer structure. This * includes generating the necessary risc program if it hasn't already * been done for the current buffer format. * The structure fh contains the details of the format requested by the diff --git a/drivers/media/pci/tw686x/tw686x-audio.c b/drivers/media/pci/tw686x/tw686x-audio.c index a28329698e20..fb0e7573b5ae 100644 --- a/drivers/media/pci/tw686x/tw686x-audio.c +++ b/drivers/media/pci/tw686x/tw686x-audio.c @@ -301,11 +301,12 @@ static int tw686x_snd_pcm_init(struct tw686x_dev *dev) ss; ss = ss->next, i++) snprintf(ss->name, sizeof(ss->name), "vch%u audio", i); - return snd_pcm_lib_preallocate_pages_for_all(pcm, + snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(dev->pci_dev), TW686X_AUDIO_PAGE_MAX * AUDIO_DMA_SIZE_MAX, TW686X_AUDIO_PAGE_MAX * AUDIO_DMA_SIZE_MAX); + return 0; } static void tw686x_audio_dma_free(struct tw686x_dev *dev, diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig index a505e9f5a1e2..4acbed189644 100644 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig @@ -141,7 +141,6 @@ config VIDEO_RENESAS_CEU ---help--- This is a v4l2 driver for the Renesas CEU Interface -source "drivers/media/platform/soc_camera/Kconfig" source "drivers/media/platform/exynos4-is/Kconfig" source "drivers/media/platform/am437x/Kconfig" source "drivers/media/platform/xilinx/Kconfig" @@ -650,7 +649,7 @@ config VIDEO_SECO_CEC config VIDEO_SECO_RC bool "SECO Boards IR RC5 support" depends on VIDEO_SECO_CEC - select RC_CORE + depends on RC_CORE help If you say yes here you will get support for the SECO Boards Consumer-IR in seco-cec driver. @@ -669,7 +668,7 @@ menuconfig SDR_PLATFORM_DRIVERS if SDR_PLATFORM_DRIVERS config VIDEO_RCAR_DRIF - tristate "Renesas Digitial Radio Interface (DRIF)" + tristate "Renesas Digital Radio Interface (DRIF)" depends on VIDEO_V4L2 depends on ARCH_RENESAS || COMPILE_TEST select VIDEOBUF2_VMALLOC diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile index e6deb2597738..7cbbd925124c 100644 --- a/drivers/media/platform/Makefile +++ b/drivers/media/platform/Makefile @@ -62,8 +62,6 @@ obj-y += davinci/ obj-$(CONFIG_VIDEO_SH_VOU) += sh_vou.o -obj-$(CONFIG_SOC_CAMERA) += soc_camera/ - obj-$(CONFIG_VIDEO_RCAR_DRIF) += rcar_drif.o obj-$(CONFIG_VIDEO_RENESAS_CEU) += renesas-ceu.o obj-$(CONFIG_VIDEO_RENESAS_FCP) += rcar-fcp.o diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c index dfec813f50a9..692e08ef38c0 100644 --- a/drivers/media/platform/aspeed-video.c +++ b/drivers/media/platform/aspeed-video.c @@ -1661,6 +1661,7 @@ static int aspeed_video_probe(struct platform_device *pdev) video->frame_rate = 30; video->dev = &pdev->dev; + spin_lock_init(&video->lock); mutex_init(&video->video_lock); init_waitqueue_head(&video->wait); INIT_DELAYED_WORK(&video->res_work, aspeed_video_resolution_work); diff --git a/drivers/media/platform/atmel/atmel-isi.c b/drivers/media/platform/atmel/atmel-isi.c index fdb255e4a956..08b8d5583080 100644 --- a/drivers/media/platform/atmel/atmel-isi.c +++ b/drivers/media/platform/atmel/atmel-isi.c @@ -110,7 +110,7 @@ struct atmel_isi { bool enable_preview_path; struct completion complete; - /* ISI peripherial clock */ + /* ISI peripheral clock */ struct clk *pclk; unsigned int irq; @@ -1078,7 +1078,7 @@ static void isi_graph_notify_unbind(struct v4l2_async_notifier *notifier, dev_dbg(isi->dev, "Removing %s\n", video_device_node_name(isi->vdev)); - /* Checks internaly if vdev have been init or not */ + /* Checks internally if vdev have been init or not */ video_unregister_device(isi->vdev); } diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c index 8e0194993a52..b4f396c2e72c 100644 --- a/drivers/media/platform/coda/coda-bit.c +++ b/drivers/media/platform/coda/coda-bit.c @@ -1010,7 +1010,11 @@ static int coda_start_encoding(struct coda_ctx *ctx) CODA_264PARAM_DEBLKFILTEROFFSETALPHA_OFFSET) | ((ctx->params.h264_slice_beta_offset_div2 & CODA_264PARAM_DEBLKFILTEROFFSETBETA_MASK) << - CODA_264PARAM_DEBLKFILTEROFFSETBETA_OFFSET); + CODA_264PARAM_DEBLKFILTEROFFSETBETA_OFFSET) | + (ctx->params.h264_constrained_intra_pred_flag << + CODA_264PARAM_CONSTRAINEDINTRAPREDFLAG_OFFSET) | + (ctx->params.h264_chroma_qp_index_offset & + CODA_264PARAM_CHROMAQPOFFSET_MASK); coda_write(dev, value, CODA_CMD_ENC_SEQ_264_PARA); break; case V4L2_PIX_FMT_JPEG: @@ -2020,7 +2024,6 @@ static void coda_finish_decode(struct coda_ctx *ctx) struct coda_q_data *q_data_dst; struct vb2_v4l2_buffer *dst_buf; struct coda_buffer_meta *meta; - unsigned long payload; int width, height; int decoded_idx; int display_idx; @@ -2226,21 +2229,8 @@ static void coda_finish_decode(struct coda_ctx *ctx) trace_coda_dec_rot_done(ctx, dst_buf, meta); - switch (q_data_dst->fourcc) { - case V4L2_PIX_FMT_YUYV: - payload = width * height * 2; - break; - case V4L2_PIX_FMT_YUV420: - case V4L2_PIX_FMT_YVU420: - case V4L2_PIX_FMT_NV12: - default: - payload = width * height * 3 / 2; - break; - case V4L2_PIX_FMT_YUV422P: - payload = width * height * 2; - break; - } - vb2_set_plane_payload(&dst_buf->vb2_buf, 0, payload); + vb2_set_plane_payload(&dst_buf->vb2_buf, 0, + q_data_dst->sizeimage); if (ctx->frame_errors[ctx->display_idx] || err_vdoa) coda_m2m_buf_done(ctx, dst_buf, VB2_BUF_STATE_ERROR); diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c index 7518f01c48f7..fa0b22fb7991 100644 --- a/drivers/media/platform/coda/coda-common.c +++ b/drivers/media/platform/coda/coda-common.c @@ -728,7 +728,7 @@ static int coda_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f, ctx->tiled_map_type = GDI_TILED_FRAME_MB_RASTER_MAP; break; case V4L2_PIX_FMT_NV12: - if (!disable_tiling) { + if (!disable_tiling && ctx->dev->devtype->product == CODA_960) { ctx->tiled_map_type = GDI_TILED_FRAME_MB_RASTER_MAP; break; } @@ -1839,6 +1839,12 @@ static int coda_s_ctrl(struct v4l2_ctrl *ctrl) case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE: ctx->params.h264_disable_deblocking_filter_idc = ctrl->val; break; + case V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION: + ctx->params.h264_constrained_intra_pred_flag = ctrl->val; + break; + case V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET: + ctx->params.h264_chroma_qp_index_offset = ctrl->val; + break; case V4L2_CID_MPEG_VIDEO_H264_PROFILE: /* TODO: switch between baseline and constrained baseline */ if (ctx->inst_type == CODA_INST_ENCODER) @@ -1925,6 +1931,11 @@ static void coda_encode_ctrls(struct coda_ctx *ctx) V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE, V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY, 0x0, V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED); + v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION, 0, 1, 1, + 0); + v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, + V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET, -12, 12, 1, 0); v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_PROFILE, V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE, 0x0, diff --git a/drivers/media/platform/coda/coda-jpeg.c b/drivers/media/platform/coda/coda-jpeg.c index 9f899a6cefed..39a2351c1e48 100644 --- a/drivers/media/platform/coda/coda-jpeg.c +++ b/drivers/media/platform/coda/coda-jpeg.c @@ -103,7 +103,7 @@ static const unsigned char chroma_ac_value[162 + 2] = { /* * Quantization tables for luminance and chrominance components in - * zig-zag scan order from the Freescale i.MX VPU libaries + * zig-zag scan order from the Freescale i.MX VPU libraries */ static unsigned char luma_q[64] = { diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h index 31cea72f5b2a..31c80bda2c0b 100644 --- a/drivers/media/platform/coda/coda.h +++ b/drivers/media/platform/coda/coda.h @@ -118,6 +118,8 @@ struct coda_params { u8 h264_disable_deblocking_filter_idc; s8 h264_slice_alpha_c0_offset_div2; s8 h264_slice_beta_offset_div2; + bool h264_constrained_intra_pred_flag; + s8 h264_chroma_qp_index_offset; u8 h264_profile_idc; u8 h264_level_idc; u8 mpeg4_intra_qp; diff --git a/drivers/media/platform/davinci/isif.c b/drivers/media/platform/davinci/isif.c index 340f8218f54d..47cecd10eb9f 100644 --- a/drivers/media/platform/davinci/isif.c +++ b/drivers/media/platform/davinci/isif.c @@ -328,7 +328,7 @@ static void isif_config_bclamp(struct isif_black_clamp *bc) if (bc->en) { val = bc->bc_mode_color << ISIF_BC_MODE_COLOR_SHIFT; - /* Enable BC and horizontal clamp caculation paramaters */ + /* Enable BC and horizontal clamp calculation parameters */ val = val | 1 | (bc->horz.mode << ISIF_HORZ_BC_MODE_SHIFT); regw(val, CLAMPCFG); @@ -358,7 +358,7 @@ static void isif_config_bclamp(struct isif_black_clamp *bc) regw(bc->horz.win_start_v_calc, CLHWIN2); } - /* vertical clamp caculation paramaters */ + /* vertical clamp calculation parameters */ /* Reset clamp value sel for previous line */ val |= diff --git a/drivers/media/platform/davinci/vpbe.c b/drivers/media/platform/davinci/vpbe.c index 4766a7a23d16..8339163a5231 100644 --- a/drivers/media/platform/davinci/vpbe.c +++ b/drivers/media/platform/davinci/vpbe.c @@ -242,7 +242,7 @@ static int vpbe_set_output(struct vpbe_device *vpbe_dev, int index) goto unlock; /* - * It is assumed that venc or extenal encoder will set a default + * It is assumed that venc or external encoder will set a default * mode in the sub device. For external encoder or LCD pannel output, * we also need to set up the lcd port for the required mode. So setup * the lcd port for the default mode that is configured in the board diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c index 9996bab98fe3..26dadbba930f 100644 --- a/drivers/media/platform/davinci/vpfe_capture.c +++ b/drivers/media/platform/davinci/vpfe_capture.c @@ -518,7 +518,7 @@ static void vpfe_schedule_bottom_field(struct vpfe_device *vpfe_dev) static void vpfe_process_buffer_complete(struct vpfe_device *vpfe_dev) { - v4l2_get_timestamp(&vpfe_dev->cur_frm->ts); + vpfe_dev->cur_frm->ts = ktime_get_ns(); vpfe_dev->cur_frm->state = VIDEOBUF_DONE; vpfe_dev->cur_frm->size = vpfe_dev->fmt.fmt.pix.sizeimage; wake_up_interruptible(&vpfe_dev->cur_frm->done); diff --git a/drivers/media/platform/davinci/vpif.c b/drivers/media/platform/davinci/vpif.c index 16352e2263d2..df66461f5d4f 100644 --- a/drivers/media/platform/davinci/vpif.c +++ b/drivers/media/platform/davinci/vpif.c @@ -1,7 +1,7 @@ /* * vpif - Video Port Interface driver * VPIF is a receiver and transmitter for video data. It has two channels(0, 1) - * that receiveing video byte stream and two channels(2, 3) for video output. + * that receiving video byte stream and two channels(2, 3) for video output. * The hardware supports SDTV, HDTV formats, raw data capture. * Currently, the driver supports NTSC and PAL standards. * diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c index 3517487d9760..f4b4f2a1dfc0 100644 --- a/drivers/media/platform/davinci/vpif_display.c +++ b/drivers/media/platform/davinci/vpif_display.c @@ -138,7 +138,7 @@ static int vpif_buffer_queue_setup(struct vb2_queue *vq, * vpif_buffer_queue : Callback function to add buffer to DMA queue * @vb: ptr to vb2_buffer * - * This callback fucntion queues the buffer to DMA engine + * This callback function queues the buffer to DMA engine */ static void vpif_buffer_queue(struct vb2_buffer *vb) { @@ -635,7 +635,7 @@ static int vpif_try_fmt_vid_out(struct file *file, void *priv, struct v4l2_pix_format *pixfmt = &fmt->fmt.pix; /* - * to supress v4l-compliance warnings silently correct + * to suppress v4l-compliance warnings silently correct * the pixelformat */ if (pixfmt->pixelformat != V4L2_PIX_FMT_YUV422P) diff --git a/drivers/media/platform/exynos4-is/fimc-is-command.h b/drivers/media/platform/exynos4-is/fimc-is-command.h index 0d1f52e394b1..b06b56b890d5 100644 --- a/drivers/media/platform/exynos4-is/fimc-is-command.h +++ b/drivers/media/platform/exynos4-is/fimc-is-command.h @@ -18,7 +18,7 @@ #define FIMC_IS_COMMAND_VER 110 /* FIMC-IS command set version 1.10 */ -/* Enumeration of commands beetween the FIMC-IS and the host processor. */ +/* Enumeration of commands between the FIMC-IS and the host processor. */ /* HOST to FIMC-IS */ #define HIC_PREVIEW_STILL 0x0001 diff --git a/drivers/media/platform/exynos4-is/fimc-is-param.h b/drivers/media/platform/exynos4-is/fimc-is-param.h index 8e31f7642776..22923a3d405e 100644 --- a/drivers/media/platform/exynos4-is/fimc-is-param.h +++ b/drivers/media/platform/exynos4-is/fimc-is-param.h @@ -298,7 +298,7 @@ enum isp_af_mode { #define ISP_FLASH_COMMAND_AUTO 2 #define ISP_FLASH_COMMAND_TORCH 3 /* 3 sec */ -/* Flash red-eye commads */ +/* Flash red-eye commands */ #define ISP_FLASH_REDEYE_DISABLE 0 #define ISP_FLASH_REDEYE_ENABLE 1 diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c index f5fc54de19da..02da0b06e56a 100644 --- a/drivers/media/platform/exynos4-is/fimc-is.c +++ b/drivers/media/platform/exynos4-is/fimc-is.c @@ -738,7 +738,7 @@ int fimc_is_hw_initialize(struct fimc_is *is) return 0; } -static int fimc_is_log_show(struct seq_file *s, void *data) +static int fimc_is_show(struct seq_file *s, void *data) { struct fimc_is *is = s->private; const u8 *buf = is->memory.vaddr + FIMC_IS_DEBUG_REGION_OFFSET; @@ -752,17 +752,7 @@ static int fimc_is_log_show(struct seq_file *s, void *data) return 0; } -static int fimc_is_debugfs_open(struct inode *inode, struct file *file) -{ - return single_open(file, fimc_is_log_show, inode->i_private); -} - -static const struct file_operations fimc_is_debugfs_fops = { - .open = fimc_is_debugfs_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(fimc_is); static void fimc_is_debugfs_remove(struct fimc_is *is) { @@ -777,7 +767,7 @@ static int fimc_is_debugfs_create(struct fimc_is *is) is->debugfs_entry = debugfs_create_dir("fimc_is", NULL); dentry = debugfs_create_file("fw_log", S_IRUGO, is->debugfs_entry, - is, &fimc_is_debugfs_fops); + is, &fimc_is_fops); if (!dentry) fimc_is_debugfs_remove(is); diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c index de6bd28f7e31..bb35a2017f21 100644 --- a/drivers/media/platform/exynos4-is/fimc-isp-video.c +++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c @@ -606,9 +606,7 @@ int fimc_isp_video_device_register(struct fimc_isp *isp, vdev = &iv->ve.vdev; memset(vdev, 0, sizeof(*vdev)); - snprintf(vdev->name, sizeof(vdev->name), "fimc-is-isp.%s", - type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ? - "capture" : "output"); + strscpy(vdev->name, "fimc-is-isp.capture", sizeof(vdev->name)); vdev->queue = q; vdev->fops = &isp_video_fops; vdev->ioctl_ops = &isp_video_ioctl_ops; diff --git a/drivers/media/platform/exynos4-is/media-dev.h b/drivers/media/platform/exynos4-is/media-dev.h index 9f527670395a..a7c9490bbcb4 100644 --- a/drivers/media/platform/exynos4-is/media-dev.h +++ b/drivers/media/platform/exynos4-is/media-dev.h @@ -79,7 +79,7 @@ struct fimc_camclk_info { /** * struct fimc_sensor_info - image data source subdev information - * @pdata: sensor's atrributes passed as media device's platform data + * @pdata: sensor's attributes passed as media device's platform data * @asd: asynchronous subdev registration data structure * @subdev: image sensor v4l2 subdev * @host: fimc device the sensor is currently linked to diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c index 35cb0162085b..234e047e3e8f 100644 --- a/drivers/media/platform/exynos4-is/mipi-csis.c +++ b/drivers/media/platform/exynos4-is/mipi-csis.c @@ -183,7 +183,7 @@ struct csis_drvdata { * @index: the hardware instance index * @pdev: CSIS platform device * @phy: pointer to the CSIS generic PHY - * @regs: mmaped I/O registers memory + * @regs: mmapped I/O registers memory * @supplies: CSIS regulator supplies * @clock: CSIS clocks * @irq: requested s5p-mipi-csis irq number @@ -745,7 +745,7 @@ static int s5pcsis_parse_dt(struct platform_device *pdev, goto err; } - /* Get MIPI CSI-2 bus configration from the endpoint node. */ + /* Get MIPI CSI-2 bus configuration from the endpoint node. */ of_property_read_u32(node, "samsung,csis-hs-settle", &state->hs_settle); state->wclk_ext = of_property_read_bool(node, diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c index ca6d0317ab42..cffebcaacb90 100644 --- a/drivers/media/platform/fsl-viu.c +++ b/drivers/media/platform/fsl-viu.c @@ -1090,7 +1090,7 @@ static void viu_capture_intr(struct viu_dev *dev, u32 status) if (waitqueue_active(&buf->vb.done)) { list_del(&buf->vb.queue); - v4l2_get_timestamp(&buf->vb.ts); + buf->vb.ts = ktime_get_ns(); buf->vb.state = VIDEOBUF_DONE; buf->vb.field_count++; wake_up(&buf->vb.done); diff --git a/drivers/media/platform/imx-pxp.c b/drivers/media/platform/imx-pxp.c index c1c255408d16..0bcfc5aa8f3d 100644 --- a/drivers/media/platform/imx-pxp.c +++ b/drivers/media/platform/imx-pxp.c @@ -90,7 +90,11 @@ static struct pxp_fmt formats[] = { .depth = 16, .types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT, }, { - .fourcc = V4L2_PIX_FMT_YUV32, + .fourcc = V4L2_PIX_FMT_VUYA32, + .depth = 32, + .types = MEM2MEM_CAPTURE, + }, { + .fourcc = V4L2_PIX_FMT_VUYX32, .depth = 32, .types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT, }, { @@ -236,7 +240,7 @@ static u32 pxp_v4l2_pix_fmt_to_ps_format(u32 v4l2_pix_fmt) case V4L2_PIX_FMT_RGB555: return BV_PXP_PS_CTRL_FORMAT__RGB555; case V4L2_PIX_FMT_RGB444: return BV_PXP_PS_CTRL_FORMAT__RGB444; case V4L2_PIX_FMT_RGB565: return BV_PXP_PS_CTRL_FORMAT__RGB565; - case V4L2_PIX_FMT_YUV32: return BV_PXP_PS_CTRL_FORMAT__YUV1P444; + case V4L2_PIX_FMT_VUYX32: return BV_PXP_PS_CTRL_FORMAT__YUV1P444; case V4L2_PIX_FMT_UYVY: return BV_PXP_PS_CTRL_FORMAT__UYVY1P422; case V4L2_PIX_FMT_YUYV: return BM_PXP_PS_CTRL_WB_SWAP | BV_PXP_PS_CTRL_FORMAT__UYVY1P422; @@ -265,7 +269,8 @@ static u32 pxp_v4l2_pix_fmt_to_out_format(u32 v4l2_pix_fmt) case V4L2_PIX_FMT_RGB555: return BV_PXP_OUT_CTRL_FORMAT__RGB555; case V4L2_PIX_FMT_RGB444: return BV_PXP_OUT_CTRL_FORMAT__RGB444; case V4L2_PIX_FMT_RGB565: return BV_PXP_OUT_CTRL_FORMAT__RGB565; - case V4L2_PIX_FMT_YUV32: return BV_PXP_OUT_CTRL_FORMAT__YUV1P444; + case V4L2_PIX_FMT_VUYA32: + case V4L2_PIX_FMT_VUYX32: return BV_PXP_OUT_CTRL_FORMAT__YUV1P444; case V4L2_PIX_FMT_UYVY: return BV_PXP_OUT_CTRL_FORMAT__UYVY1P422; case V4L2_PIX_FMT_VYUY: return BV_PXP_OUT_CTRL_FORMAT__VYUY1P422; case V4L2_PIX_FMT_GREY: return BV_PXP_OUT_CTRL_FORMAT__Y8; @@ -281,7 +286,8 @@ static u32 pxp_v4l2_pix_fmt_to_out_format(u32 v4l2_pix_fmt) static bool pxp_v4l2_pix_fmt_is_yuv(u32 v4l2_pix_fmt) { switch (v4l2_pix_fmt) { - case V4L2_PIX_FMT_YUV32: + case V4L2_PIX_FMT_VUYA32: + case V4L2_PIX_FMT_VUYX32: case V4L2_PIX_FMT_UYVY: case V4L2_PIX_FMT_YUYV: case V4L2_PIX_FMT_VYUY: @@ -680,7 +686,7 @@ static void pxp_setup_csc(struct pxp_ctx *ctx) csc2_coef = csc2_coef_rec709_full; else csc2_coef = csc2_coef_rec709_lim; - } else if (ycbcr_enc == V4L2_YCBCR_ENC_709) { + } else if (ycbcr_enc == V4L2_YCBCR_ENC_BT2020) { if (quantization == V4L2_QUANTIZATION_FULL_RANGE) csc2_coef = csc2_coef_bt2020_full; else diff --git a/drivers/media/platform/marvell-ccic/mmp-driver.c b/drivers/media/platform/marvell-ccic/mmp-driver.c index 70a2833db0d1..af76eb637773 100644 --- a/drivers/media/platform/marvell-ccic/mmp-driver.c +++ b/drivers/media/platform/marvell-ccic/mmp-driver.c @@ -240,8 +240,8 @@ static void mmpcam_calc_dphy(struct mcam_camera *mcam) * bit 8 ~ bit 15: HS_SETTLE * Time interval during which the HS * receiver shall ignore any Data Lane - * HS transistions. - * The vaule has been calibrated on + * HS transitions. + * The value has been calibrated on * different boards. It seems to work well. * * More detail please refer diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c index 2a5d5002c27e..f761e4d8bf2a 100644 --- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c +++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c @@ -702,7 +702,7 @@ end: v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, to_vb2_v4l2_buffer(vb)); } -static void *mtk_jpeg_buf_remove(struct mtk_jpeg_ctx *ctx, +static struct vb2_v4l2_buffer *mtk_jpeg_buf_remove(struct mtk_jpeg_ctx *ctx, enum v4l2_buf_type type) { if (V4L2_TYPE_IS_OUTPUT(type)) @@ -714,7 +714,7 @@ static void *mtk_jpeg_buf_remove(struct mtk_jpeg_ctx *ctx, static int mtk_jpeg_start_streaming(struct vb2_queue *q, unsigned int count) { struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(q); - struct vb2_buffer *vb; + struct vb2_v4l2_buffer *vb; int ret = 0; ret = pm_runtime_get_sync(ctx->jpeg->dev); @@ -724,14 +724,14 @@ static int mtk_jpeg_start_streaming(struct vb2_queue *q, unsigned int count) return 0; err: while ((vb = mtk_jpeg_buf_remove(ctx, q->type))) - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(vb), VB2_BUF_STATE_QUEUED); + v4l2_m2m_buf_done(vb, VB2_BUF_STATE_QUEUED); return ret; } static void mtk_jpeg_stop_streaming(struct vb2_queue *q) { struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(q); - struct vb2_buffer *vb; + struct vb2_v4l2_buffer *vb; /* * STREAMOFF is an acknowledgment for source change event. @@ -743,7 +743,7 @@ static void mtk_jpeg_stop_streaming(struct vb2_queue *q) struct mtk_jpeg_src_buf *src_buf; vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); - src_buf = mtk_jpeg_vb2_to_srcbuf(vb); + src_buf = mtk_jpeg_vb2_to_srcbuf(&vb->vb2_buf); mtk_jpeg_set_queue_data(ctx, &src_buf->dec_param); ctx->state = MTK_JPEG_RUNNING; } else if (V4L2_TYPE_IS_OUTPUT(q->type)) { @@ -751,7 +751,7 @@ static void mtk_jpeg_stop_streaming(struct vb2_queue *q) } while ((vb = mtk_jpeg_buf_remove(ctx, q->type))) - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(vb), VB2_BUF_STATE_ERROR); + v4l2_m2m_buf_done(vb, VB2_BUF_STATE_ERROR); pm_runtime_put_sync(ctx->jpeg->dev); } @@ -807,7 +807,7 @@ static void mtk_jpeg_device_run(void *priv) { struct mtk_jpeg_ctx *ctx = priv; struct mtk_jpeg_dev *jpeg = ctx->jpeg; - struct vb2_buffer *src_buf, *dst_buf; + struct vb2_v4l2_buffer *src_buf, *dst_buf; enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR; unsigned long flags; struct mtk_jpeg_src_buf *jpeg_src_buf; @@ -817,11 +817,11 @@ static void mtk_jpeg_device_run(void *priv) src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); - jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(src_buf); + jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(&src_buf->vb2_buf); if (jpeg_src_buf->flags & MTK_JPEG_BUF_FLAGS_LAST_FRAME) { - for (i = 0; i < dst_buf->num_planes; i++) - vb2_set_plane_payload(dst_buf, i, 0); + for (i = 0; i < dst_buf->vb2_buf.num_planes; i++) + vb2_set_plane_payload(&dst_buf->vb2_buf, i, 0); buf_state = VB2_BUF_STATE_DONE; goto dec_end; } @@ -833,8 +833,8 @@ static void mtk_jpeg_device_run(void *priv) return; } - mtk_jpeg_set_dec_src(ctx, src_buf, &bs); - if (mtk_jpeg_set_dec_dst(ctx, &jpeg_src_buf->dec_param, dst_buf, &fb)) + mtk_jpeg_set_dec_src(ctx, &src_buf->vb2_buf, &bs); + if (mtk_jpeg_set_dec_dst(ctx, &jpeg_src_buf->dec_param, &dst_buf->vb2_buf, &fb)) goto dec_end; spin_lock_irqsave(&jpeg->hw_lock, flags); @@ -849,8 +849,8 @@ static void mtk_jpeg_device_run(void *priv) dec_end: v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf), buf_state); - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf), buf_state); + v4l2_m2m_buf_done(src_buf, buf_state); + v4l2_m2m_buf_done(dst_buf, buf_state); v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx); } @@ -921,7 +921,7 @@ static irqreturn_t mtk_jpeg_dec_irq(int irq, void *priv) { struct mtk_jpeg_dev *jpeg = priv; struct mtk_jpeg_ctx *ctx; - struct vb2_buffer *src_buf, *dst_buf; + struct vb2_v4l2_buffer *src_buf, *dst_buf; struct mtk_jpeg_src_buf *jpeg_src_buf; enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR; u32 dec_irq_ret; @@ -938,7 +938,7 @@ static irqreturn_t mtk_jpeg_dec_irq(int irq, void *priv) src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); - jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(src_buf); + jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(&src_buf->vb2_buf); if (dec_irq_ret >= MTK_JPEG_DEC_RESULT_UNDERFLOW) mtk_jpeg_dec_reset(jpeg->dec_reg_base); @@ -948,15 +948,15 @@ static irqreturn_t mtk_jpeg_dec_irq(int irq, void *priv) goto dec_end; } - for (i = 0; i < dst_buf->num_planes; i++) - vb2_set_plane_payload(dst_buf, i, + for (i = 0; i < dst_buf->vb2_buf.num_planes; i++) + vb2_set_plane_payload(&dst_buf->vb2_buf, i, jpeg_src_buf->dec_param.comp_size[i]); buf_state = VB2_BUF_STATE_DONE; dec_end: - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf), buf_state); - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf), buf_state); + v4l2_m2m_buf_done(src_buf, buf_state); + v4l2_m2m_buf_done(dst_buf, buf_state); v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx); return IRQ_HANDLED; } diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_core.h b/drivers/media/platform/mtk-mdp/mtk_mdp_core.h index ad1cff306efd..e5abb1abb3a3 100644 --- a/drivers/media/platform/mtk-mdp/mtk_mdp_core.h +++ b/drivers/media/platform/mtk-mdp/mtk_mdp_core.h @@ -41,7 +41,7 @@ #define MTK_MDP_CTX_ERROR BIT(5) /** - * struct mtk_mdp_pix_align - alignement of image + * struct mtk_mdp_pix_align - alignment of image * @org_w: source alignment of width * @org_h: source alignment of height * @target_w: dst alignment of width @@ -122,8 +122,8 @@ struct mtk_mdp_frame { /** * struct mtk_mdp_variant - image processor variant information * @pix_max: maximum limit of image size - * @pix_min: minimun limit of image size - * @pix_align: alignement of image + * @pix_min: minimum limit of image size + * @pix_align: alignment of image * @h_scale_up_max: maximum scale-up in horizontal * @v_scale_up_max: maximum scale-up in vertical * @h_scale_down_max: maximum scale-down in horizontal diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c index 51a13466261e..7d15c06e9db9 100644 --- a/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c +++ b/drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c @@ -473,20 +473,17 @@ static void mtk_mdp_prepare_addr(struct mtk_mdp_ctx *ctx, static void mtk_mdp_m2m_get_bufs(struct mtk_mdp_ctx *ctx) { struct mtk_mdp_frame *s_frame, *d_frame; - struct vb2_buffer *src_vb, *dst_vb; struct vb2_v4l2_buffer *src_vbuf, *dst_vbuf; s_frame = &ctx->s_frame; d_frame = &ctx->d_frame; - src_vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx); - mtk_mdp_prepare_addr(ctx, src_vb, s_frame, &s_frame->addr); + src_vbuf = v4l2_m2m_next_src_buf(ctx->m2m_ctx); + mtk_mdp_prepare_addr(ctx, &src_vbuf->vb2_buf, s_frame, &s_frame->addr); - dst_vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx); - mtk_mdp_prepare_addr(ctx, dst_vb, d_frame, &d_frame->addr); + dst_vbuf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx); + mtk_mdp_prepare_addr(ctx, &dst_vbuf->vb2_buf, d_frame, &d_frame->addr); - src_vbuf = to_vb2_v4l2_buffer(src_vb); - dst_vbuf = to_vb2_v4l2_buffer(dst_vb); dst_vbuf->vb2_buf.timestamp = src_vbuf->vb2_buf.timestamp; } @@ -494,17 +491,14 @@ static void mtk_mdp_process_done(void *priv, int vb_state) { struct mtk_mdp_dev *mdp = priv; struct mtk_mdp_ctx *ctx; - struct vb2_buffer *src_vb, *dst_vb; - struct vb2_v4l2_buffer *src_vbuf = NULL, *dst_vbuf = NULL; + struct vb2_v4l2_buffer *src_vbuf, *dst_vbuf; ctx = v4l2_m2m_get_curr_priv(mdp->m2m_dev); if (!ctx) return; - src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx); - src_vbuf = to_vb2_v4l2_buffer(src_vb); - dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx); - dst_vbuf = to_vb2_v4l2_buffer(dst_vb); + src_vbuf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx); + dst_vbuf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx); dst_vbuf->vb2_buf.timestamp = src_vbuf->vb2_buf.timestamp; dst_vbuf->timecode = src_vbuf->timecode; diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c index ba619647bc10..d022c65bb34c 100644 --- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c +++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c @@ -325,13 +325,12 @@ static void mtk_vdec_worker(struct work_struct *work) struct mtk_vcodec_ctx *ctx = container_of(work, struct mtk_vcodec_ctx, decode_work); struct mtk_vcodec_dev *dev = ctx->dev; - struct vb2_buffer *src_buf, *dst_buf; + struct vb2_v4l2_buffer *src_buf, *dst_buf; struct mtk_vcodec_mem buf; struct vdec_fb *pfb; bool res_chg = false; int ret; struct mtk_video_dec_buf *dst_buf_info, *src_buf_info; - struct vb2_v4l2_buffer *dst_vb2_v4l2, *src_vb2_v4l2; src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx); if (src_buf == NULL) { @@ -347,26 +346,23 @@ static void mtk_vdec_worker(struct work_struct *work) return; } - src_vb2_v4l2 = container_of(src_buf, struct vb2_v4l2_buffer, vb2_buf); - src_buf_info = container_of(src_vb2_v4l2, struct mtk_video_dec_buf, vb); - - dst_vb2_v4l2 = container_of(dst_buf, struct vb2_v4l2_buffer, vb2_buf); - dst_buf_info = container_of(dst_vb2_v4l2, struct mtk_video_dec_buf, vb); + src_buf_info = container_of(src_buf, struct mtk_video_dec_buf, vb); + dst_buf_info = container_of(dst_buf, struct mtk_video_dec_buf, vb); pfb = &dst_buf_info->frame_buffer; - pfb->base_y.va = vb2_plane_vaddr(dst_buf, 0); - pfb->base_y.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0); + pfb->base_y.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 0); + pfb->base_y.dma_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0); pfb->base_y.size = ctx->picinfo.y_bs_sz + ctx->picinfo.y_len_sz; - pfb->base_c.va = vb2_plane_vaddr(dst_buf, 1); - pfb->base_c.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 1); + pfb->base_c.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 1); + pfb->base_c.dma_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 1); pfb->base_c.size = ctx->picinfo.c_bs_sz + ctx->picinfo.c_len_sz; pfb->status = 0; mtk_v4l2_debug(3, "===>[%d] vdec_if_decode() ===>", ctx->id); mtk_v4l2_debug(3, "id=%d Framebuf pfb=%p VA=%p Y_DMA=%pad C_DMA=%pad Size=%zx", - dst_buf->index, pfb, + dst_buf->vb2_buf.index, pfb, pfb->base_y.va, &pfb->base_y.dma_addr, &pfb->base_c.dma_addr, pfb->base_y.size); @@ -384,19 +380,19 @@ static void mtk_vdec_worker(struct work_struct *work) clean_display_buffer(ctx); vb2_set_plane_payload(&dst_buf_info->vb.vb2_buf, 0, 0); vb2_set_plane_payload(&dst_buf_info->vb.vb2_buf, 1, 0); - dst_vb2_v4l2->flags |= V4L2_BUF_FLAG_LAST; + dst_buf->flags |= V4L2_BUF_FLAG_LAST; v4l2_m2m_buf_done(&dst_buf_info->vb, VB2_BUF_STATE_DONE); clean_free_buffer(ctx); v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx); return; } - buf.va = vb2_plane_vaddr(src_buf, 0); - buf.dma_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0); + buf.va = vb2_plane_vaddr(&src_buf->vb2_buf, 0); + buf.dma_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0); buf.size = (size_t)src_buf->planes[0].bytesused; if (!buf.va) { v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx); mtk_v4l2_err("[%d] id=%d src_addr is NULL!!", - ctx->id, src_buf->index); + ctx->id, src_buf->vb2_buf.index); return; } mtk_v4l2_debug(3, "[%d] Bitstream VA=%p DMA=%pad Size=%zx vb=%p", @@ -416,10 +412,10 @@ static void mtk_vdec_worker(struct work_struct *work) mtk_v4l2_err( " <===[%d], src_buf[%d] sz=0x%zx pts=%llu dst_buf[%d] vdec_if_decode() ret=%d res_chg=%d===>", ctx->id, - src_buf->index, + src_buf->vb2_buf.index, buf.size, src_buf_info->vb.vb2_buf.timestamp, - dst_buf->index, + dst_buf->vb2_buf.index, ret, res_chg); src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx); if (ret == -EIO) { @@ -1103,7 +1099,7 @@ static int vb2ops_vdec_buf_prepare(struct vb2_buffer *vb) static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb) { - struct vb2_buffer *src_buf; + struct vb2_v4l2_buffer *src_buf; struct mtk_vcodec_mem src_mem; bool res_chg = false; int ret = 0; @@ -1149,8 +1145,7 @@ static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb) mtk_v4l2_err("No src buffer"); return; } - vb2_v4l2 = to_vb2_v4l2_buffer(src_buf); - buf = container_of(vb2_v4l2, struct mtk_video_dec_buf, vb); + buf = container_of(src_buf, struct mtk_video_dec_buf, vb); if (buf->lastframe) { /* This shouldn't happen. Just in case. */ mtk_v4l2_err("Invalid flush buffer."); @@ -1158,8 +1153,8 @@ static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb) return; } - src_mem.va = vb2_plane_vaddr(src_buf, 0); - src_mem.dma_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0); + src_mem.va = vb2_plane_vaddr(&src_buf->vb2_buf, 0); + src_mem.dma_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0); src_mem.size = (size_t)src_buf->planes[0].bytesused; mtk_v4l2_debug(2, "[%d] buf id=%d va=%p dma=%pad size=%zx", @@ -1170,7 +1165,7 @@ static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb) ret = vdec_if_decode(ctx, &src_mem, NULL, &res_chg); if (ret || !res_chg) { /* - * fb == NULL menas to parse SPS/PPS header or + * fb == NULL means to parse SPS/PPS header or * resolution info in src_mem. Decode can fail * if there is no SPS header or picture info * in bs @@ -1181,11 +1176,9 @@ static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb) mtk_v4l2_err("[%d] Unrecoverable error in vdec_if_decode.", ctx->id); ctx->state = MTK_STATE_ABORT; - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf), - VB2_BUF_STATE_ERROR); + v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR); } else { - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf), - VB2_BUF_STATE_DONE); + v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE); } mtk_v4l2_debug(ret ? 0 : 1, "[%d] vdec_if_decode() src_buf=%d, size=%zu, fail=%d, res_chg=%d", @@ -1281,7 +1274,7 @@ static int vb2ops_vdec_start_streaming(struct vb2_queue *q, unsigned int count) static void vb2ops_vdec_stop_streaming(struct vb2_queue *q) { - struct vb2_buffer *src_buf = NULL, *dst_buf = NULL; + struct vb2_v4l2_buffer *src_buf = NULL, *dst_buf = NULL; struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q); mtk_v4l2_debug(3, "[%d] (%d) state=(%x) ctx->decoded_frame_cnt=%d", @@ -1289,12 +1282,10 @@ static void vb2ops_vdec_stop_streaming(struct vb2_queue *q) if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { while ((src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx))) { - struct vb2_v4l2_buffer *vb2_v4l2 = - to_vb2_v4l2_buffer(src_buf); struct mtk_video_dec_buf *buf_info = container_of( - vb2_v4l2, struct mtk_video_dec_buf, vb); + src_buf, struct mtk_video_dec_buf, vb); if (!buf_info->lastframe) - v4l2_m2m_buf_done(vb2_v4l2, + v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR); } return; @@ -1323,10 +1314,9 @@ static void vb2ops_vdec_stop_streaming(struct vb2_queue *q) ctx->state = MTK_STATE_FLUSH; while ((dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx))) { - vb2_set_plane_payload(dst_buf, 0, 0); - vb2_set_plane_payload(dst_buf, 1, 0); - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf), - VB2_BUF_STATE_ERROR); + vb2_set_plane_payload(&dst_buf->vb2_buf, 0, 0); + vb2_set_plane_payload(&dst_buf->vb2_buf, 1, 0); + v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR); } } diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c index 79ca03ac449c..7884465afcd2 100644 --- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c +++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c @@ -27,11 +27,14 @@ int mtk_vcodec_init_dec_pm(struct mtk_vcodec_dev *mtkdev) struct device_node *node; struct platform_device *pdev; struct mtk_vcodec_pm *pm; - int ret = 0; + struct mtk_vcodec_clk *dec_clk; + struct mtk_vcodec_clk_info *clk_info; + int i = 0, ret = 0; pdev = mtkdev->plat_dev; pm = &mtkdev->pm; pm->mtkdev = mtkdev; + dec_clk = &pm->vdec_clk; node = of_parse_phandle(pdev->dev.of_node, "mediatek,larb", 0); if (!node) { mtk_v4l2_err("of_parse_phandle mediatek,larb fail!"); @@ -47,52 +50,34 @@ int mtk_vcodec_init_dec_pm(struct mtk_vcodec_dev *mtkdev) pdev = mtkdev->plat_dev; pm->dev = &pdev->dev; - pm->vcodecpll = devm_clk_get(&pdev->dev, "vcodecpll"); - if (IS_ERR(pm->vcodecpll)) { - mtk_v4l2_err("devm_clk_get vcodecpll fail"); - ret = PTR_ERR(pm->vcodecpll); + dec_clk->clk_num = + of_property_count_strings(pdev->dev.of_node, "clock-names"); + if (dec_clk->clk_num > 0) { + dec_clk->clk_info = devm_kcalloc(&pdev->dev, + dec_clk->clk_num, sizeof(*clk_info), + GFP_KERNEL); + if (!dec_clk->clk_info) + return -ENOMEM; + } else { + mtk_v4l2_err("Failed to get vdec clock count"); + return -EINVAL; } - pm->univpll_d2 = devm_clk_get(&pdev->dev, "univpll_d2"); - if (IS_ERR(pm->univpll_d2)) { - mtk_v4l2_err("devm_clk_get univpll_d2 fail"); - ret = PTR_ERR(pm->univpll_d2); - } - - pm->clk_cci400_sel = devm_clk_get(&pdev->dev, "clk_cci400_sel"); - if (IS_ERR(pm->clk_cci400_sel)) { - mtk_v4l2_err("devm_clk_get clk_cci400_sel fail"); - ret = PTR_ERR(pm->clk_cci400_sel); - } - - pm->vdec_sel = devm_clk_get(&pdev->dev, "vdec_sel"); - if (IS_ERR(pm->vdec_sel)) { - mtk_v4l2_err("devm_clk_get vdec_sel fail"); - ret = PTR_ERR(pm->vdec_sel); - } - - pm->vdecpll = devm_clk_get(&pdev->dev, "vdecpll"); - if (IS_ERR(pm->vdecpll)) { - mtk_v4l2_err("devm_clk_get vdecpll fail"); - ret = PTR_ERR(pm->vdecpll); - } - - pm->vencpll = devm_clk_get(&pdev->dev, "vencpll"); - if (IS_ERR(pm->vencpll)) { - mtk_v4l2_err("devm_clk_get vencpll fail"); - ret = PTR_ERR(pm->vencpll); - } - - pm->venc_lt_sel = devm_clk_get(&pdev->dev, "venc_lt_sel"); - if (IS_ERR(pm->venc_lt_sel)) { - mtk_v4l2_err("devm_clk_get venc_lt_sel fail"); - ret = PTR_ERR(pm->venc_lt_sel); - } - - pm->vdec_bus_clk_src = devm_clk_get(&pdev->dev, "vdec_bus_clk_src"); - if (IS_ERR(pm->vdec_bus_clk_src)) { - mtk_v4l2_err("devm_clk_get vdec_bus_clk_src"); - ret = PTR_ERR(pm->vdec_bus_clk_src); + for (i = 0; i < dec_clk->clk_num; i++) { + clk_info = &dec_clk->clk_info[i]; + ret = of_property_read_string_index(pdev->dev.of_node, + "clock-names", i, &clk_info->clk_name); + if (ret) { + mtk_v4l2_err("Failed to get clock name id = %d", i); + return ret; + } + clk_info->vcodec_clk = devm_clk_get(&pdev->dev, + clk_info->clk_name); + if (IS_ERR(clk_info->vcodec_clk)) { + mtk_v4l2_err("devm_clk_get (%d)%s fail", i, + clk_info->clk_name); + return PTR_ERR(clk_info->vcodec_clk); + } } pm_runtime_enable(&pdev->dev); @@ -125,78 +110,36 @@ void mtk_vcodec_dec_pw_off(struct mtk_vcodec_pm *pm) void mtk_vcodec_dec_clock_on(struct mtk_vcodec_pm *pm) { - int ret; - - ret = clk_set_rate(pm->vcodecpll, 1482 * 1000000); - if (ret) - mtk_v4l2_err("clk_set_rate vcodecpll fail %d", ret); - - ret = clk_set_rate(pm->vencpll, 800 * 1000000); - if (ret) - mtk_v4l2_err("clk_set_rate vencpll fail %d", ret); - - ret = clk_prepare_enable(pm->vcodecpll); - if (ret) - mtk_v4l2_err("clk_prepare_enable vcodecpll fail %d", ret); - - ret = clk_prepare_enable(pm->vencpll); - if (ret) - mtk_v4l2_err("clk_prepare_enable vencpll fail %d", ret); - - ret = clk_prepare_enable(pm->vdec_bus_clk_src); - if (ret) - mtk_v4l2_err("clk_prepare_enable vdec_bus_clk_src fail %d", - ret); - - ret = clk_prepare_enable(pm->venc_lt_sel); - if (ret) - mtk_v4l2_err("clk_prepare_enable venc_lt_sel fail %d", ret); - - ret = clk_set_parent(pm->venc_lt_sel, pm->vdec_bus_clk_src); - if (ret) - mtk_v4l2_err("clk_set_parent venc_lt_sel vdec_bus_clk_src fail %d", - ret); - - ret = clk_prepare_enable(pm->univpll_d2); - if (ret) - mtk_v4l2_err("clk_prepare_enable univpll_d2 fail %d", ret); - - ret = clk_prepare_enable(pm->clk_cci400_sel); - if (ret) - mtk_v4l2_err("clk_prepare_enable clk_cci400_sel fail %d", ret); - - ret = clk_set_parent(pm->clk_cci400_sel, pm->univpll_d2); - if (ret) - mtk_v4l2_err("clk_set_parent clk_cci400_sel univpll_d2 fail %d", - ret); - - ret = clk_prepare_enable(pm->vdecpll); - if (ret) - mtk_v4l2_err("clk_prepare_enable vdecpll fail %d", ret); - - ret = clk_prepare_enable(pm->vdec_sel); - if (ret) - mtk_v4l2_err("clk_prepare_enable vdec_sel fail %d", ret); - - ret = clk_set_parent(pm->vdec_sel, pm->vdecpll); - if (ret) - mtk_v4l2_err("clk_set_parent vdec_sel vdecpll fail %d", ret); + struct mtk_vcodec_clk *dec_clk = &pm->vdec_clk; + int ret, i = 0; + + for (i = 0; i < dec_clk->clk_num; i++) { + ret = clk_prepare_enable(dec_clk->clk_info[i].vcodec_clk); + if (ret) { + mtk_v4l2_err("clk_prepare_enable %d %s fail %d", i, + dec_clk->clk_info[i].clk_name, ret); + goto error; + } + } ret = mtk_smi_larb_get(pm->larbvdec); - if (ret) + if (ret) { mtk_v4l2_err("mtk_smi_larb_get larbvdec fail %d", ret); + goto error; + } + return; +error: + for (i -= 1; i >= 0; i--) + clk_disable_unprepare(dec_clk->clk_info[i].vcodec_clk); } void mtk_vcodec_dec_clock_off(struct mtk_vcodec_pm *pm) { + struct mtk_vcodec_clk *dec_clk = &pm->vdec_clk; + int i = 0; + mtk_smi_larb_put(pm->larbvdec); - clk_disable_unprepare(pm->vdec_sel); - clk_disable_unprepare(pm->vdecpll); - clk_disable_unprepare(pm->univpll_d2); - clk_disable_unprepare(pm->clk_cci400_sel); - clk_disable_unprepare(pm->venc_lt_sel); - clk_disable_unprepare(pm->vdec_bus_clk_src); - clk_disable_unprepare(pm->vencpll); - clk_disable_unprepare(pm->vcodecpll); + for (i = dec_clk->clk_num - 1; i >= 0; i--) + clk_disable_unprepare(dec_clk->clk_info[i].vcodec_clk); } diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h index 3cffb381ac8e..e7e2a108def9 100644 --- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h +++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h @@ -151,9 +151,9 @@ struct mtk_q_data { * @intra_period: I frame period * @gop_size: group of picture size, it's used as the intra frame period * @framerate_num: frame rate numerator. ex: framerate_num=30 and - * framerate_denom=1 menas FPS is 30 + * framerate_denom=1 means FPS is 30 * @framerate_denom: frame rate denominator. ex: framerate_num=30 and - * framerate_denom=1 menas FPS is 30 + * framerate_denom=1 means FPS is 30 * @h264_max_qp: Max value for H.264 quantization parameter * @h264_profile: V4L2 defined H.264 profile * @h264_level: V4L2 defined H.264 level @@ -175,23 +175,30 @@ struct mtk_enc_params { unsigned int force_intra; }; +/** + * struct mtk_vcodec_clk_info - Structure used to store clock name + */ +struct mtk_vcodec_clk_info { + const char *clk_name; + struct clk *vcodec_clk; +}; + +/** + * struct mtk_vcodec_clk - Structure used to store vcodec clock information + */ +struct mtk_vcodec_clk { + struct mtk_vcodec_clk_info *clk_info; + int clk_num; +}; + /** * struct mtk_vcodec_pm - Power management data structure */ struct mtk_vcodec_pm { - struct clk *vdec_bus_clk_src; - struct clk *vencpll; - - struct clk *vcodecpll; - struct clk *univpll_d2; - struct clk *clk_cci400_sel; - struct clk *vdecpll; - struct clk *vdec_sel; - struct clk *vencpll_d2; - struct clk *venc_sel; - struct clk *univpll1_d2; - struct clk *venc_lt_sel; + struct mtk_vcodec_clk vdec_clk; struct device *larbvdec; + + struct mtk_vcodec_clk venc_clk; struct device *larbvenc; struct device *larbvenclt; struct device *dev; diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c index d1f12257bf66..c6b48b5925fb 100644 --- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c +++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c @@ -393,7 +393,7 @@ static void mtk_venc_set_param(struct mtk_vcodec_ctx *ctx, param->input_yuv_fmt = VENC_YUV_FORMAT_NV21; break; default: - mtk_v4l2_err("Unsupport fourcc =%d", q_data_src->fmt->fourcc); + mtk_v4l2_err("Unsupported fourcc =%d", q_data_src->fmt->fourcc); break; } param->h264_profile = enc_params->h264_profile; @@ -887,7 +887,7 @@ err_set_param: static void vb2ops_venc_stop_streaming(struct vb2_queue *q) { struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q); - struct vb2_buffer *src_buf, *dst_buf; + struct vb2_v4l2_buffer *src_buf, *dst_buf; int ret; mtk_v4l2_debug(2, "[%d]-> type=%d", ctx->id, q->type); @@ -895,13 +895,11 @@ static void vb2ops_venc_stop_streaming(struct vb2_queue *q) if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { while ((dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx))) { dst_buf->planes[0].bytesused = 0; - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf), - VB2_BUF_STATE_ERROR); + v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR); } } else { while ((src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx))) - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf), - VB2_BUF_STATE_ERROR); + v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR); } if ((q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && @@ -937,8 +935,7 @@ static int mtk_venc_encode_header(void *priv) { struct mtk_vcodec_ctx *ctx = priv; int ret; - struct vb2_buffer *src_buf, *dst_buf; - struct vb2_v4l2_buffer *dst_vb2_v4l2, *src_vb2_v4l2; + struct vb2_v4l2_buffer *src_buf, *dst_buf; struct mtk_vcodec_mem bs_buf; struct venc_done_result enc_result; @@ -948,14 +945,14 @@ static int mtk_venc_encode_header(void *priv) return -EINVAL; } - bs_buf.va = vb2_plane_vaddr(dst_buf, 0); - bs_buf.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0); + bs_buf.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 0); + bs_buf.dma_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0); bs_buf.size = (size_t)dst_buf->planes[0].length; mtk_v4l2_debug(1, "[%d] buf id=%d va=0x%p dma_addr=0x%llx size=%zu", ctx->id, - dst_buf->index, bs_buf.va, + dst_buf->vb2_buf.index, bs_buf.va, (u64)bs_buf.dma_addr, bs_buf.size); @@ -964,26 +961,23 @@ static int mtk_venc_encode_header(void *priv) NULL, &bs_buf, &enc_result); if (ret) { - dst_buf->planes[0].bytesused = 0; + dst_buf->vb2_buf.planes[0].bytesused = 0; ctx->state = MTK_STATE_ABORT; - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf), - VB2_BUF_STATE_ERROR); + v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR); mtk_v4l2_err("venc_if_encode failed=%d", ret); return -EINVAL; } src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx); if (src_buf) { - src_vb2_v4l2 = to_vb2_v4l2_buffer(src_buf); - dst_vb2_v4l2 = to_vb2_v4l2_buffer(dst_buf); - dst_buf->timestamp = src_buf->timestamp; - dst_vb2_v4l2->timecode = src_vb2_v4l2->timecode; + dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp; + dst_buf->timecode = src_buf->timecode; } else { mtk_v4l2_err("No timestamp for the header buffer."); } ctx->state = MTK_STATE_HEADER; dst_buf->planes[0].bytesused = enc_result.bs_size; - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf), VB2_BUF_STATE_DONE); + v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE); return 0; } @@ -991,9 +985,7 @@ static int mtk_venc_encode_header(void *priv) static int mtk_venc_param_change(struct mtk_vcodec_ctx *ctx) { struct venc_enc_param enc_prm; - struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx); - struct vb2_v4l2_buffer *vb2_v4l2 = - container_of(vb, struct vb2_v4l2_buffer, vb2_buf); + struct vb2_v4l2_buffer *vb2_v4l2 = v4l2_m2m_next_src_buf(ctx->m2m_ctx); struct mtk_video_enc_buf *mtk_buf = container_of(vb2_v4l2, struct mtk_video_enc_buf, vb); @@ -1067,12 +1059,11 @@ static void mtk_venc_worker(struct work_struct *work) { struct mtk_vcodec_ctx *ctx = container_of(work, struct mtk_vcodec_ctx, encode_work); - struct vb2_buffer *src_buf, *dst_buf; + struct vb2_v4l2_buffer *src_buf, *dst_buf; struct venc_frm_buf frm_buf; struct mtk_vcodec_mem bs_buf; struct venc_done_result enc_result; int ret, i; - struct vb2_v4l2_buffer *dst_vb2_v4l2, *src_vb2_v4l2; /* check dst_buf, dst_buf may be removed in device_run * to stored encdoe header so we need check dst_buf and @@ -1086,15 +1077,15 @@ static void mtk_venc_worker(struct work_struct *work) src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx); memset(&frm_buf, 0, sizeof(frm_buf)); - for (i = 0; i < src_buf->num_planes ; i++) { + for (i = 0; i < src_buf->vb2_buf.num_planes ; i++) { frm_buf.fb_addr[i].dma_addr = - vb2_dma_contig_plane_dma_addr(src_buf, i); + vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, i); frm_buf.fb_addr[i].size = - (size_t)src_buf->planes[i].length; + (size_t)src_buf->vb2_buf.planes[i].length; } - bs_buf.va = vb2_plane_vaddr(dst_buf, 0); - bs_buf.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0); - bs_buf.size = (size_t)dst_buf->planes[0].length; + bs_buf.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 0); + bs_buf.dma_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0); + bs_buf.size = (size_t)dst_buf->vb2_buf.planes[0].length; mtk_v4l2_debug(2, "Framebuf PA=%llx Size=0x%zx;PA=0x%llx Size=0x%zx;PA=0x%llx Size=%zu", @@ -1108,28 +1099,21 @@ static void mtk_venc_worker(struct work_struct *work) ret = venc_if_encode(ctx, VENC_START_OPT_ENCODE_FRAME, &frm_buf, &bs_buf, &enc_result); - src_vb2_v4l2 = to_vb2_v4l2_buffer(src_buf); - dst_vb2_v4l2 = to_vb2_v4l2_buffer(dst_buf); - - dst_buf->timestamp = src_buf->timestamp; - dst_vb2_v4l2->timecode = src_vb2_v4l2->timecode; + dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp; + dst_buf->timecode = src_buf->timecode; if (enc_result.is_key_frm) - dst_vb2_v4l2->flags |= V4L2_BUF_FLAG_KEYFRAME; + dst_buf->flags |= V4L2_BUF_FLAG_KEYFRAME; if (ret) { - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf), - VB2_BUF_STATE_ERROR); + v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR); dst_buf->planes[0].bytesused = 0; - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf), - VB2_BUF_STATE_ERROR); + v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR); mtk_v4l2_err("venc_if_encode failed=%d", ret); } else { - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf), - VB2_BUF_STATE_DONE); + v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE); dst_buf->planes[0].bytesused = enc_result.bs_size; - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf), - VB2_BUF_STATE_DONE); + v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE); mtk_v4l2_debug(2, "venc_if_encode bs size=%d", enc_result.bs_size); } @@ -1137,7 +1121,7 @@ static void mtk_venc_worker(struct work_struct *work) v4l2_m2m_job_finish(ctx->dev->m2m_dev_enc, ctx->m2m_ctx); mtk_v4l2_debug(1, "<=== src_buf[%d] dst_buf[%d] venc_if_encode ret=%d Size=%u===>", - src_buf->index, dst_buf->index, ret, + src_buf->vb2_buf.index, dst_buf->vb2_buf.index, ret, enc_result.bs_size); } diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c index 7c025045ea90..39375b8ea27c 100644 --- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c +++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c @@ -27,9 +27,11 @@ int mtk_vcodec_init_enc_pm(struct mtk_vcodec_dev *mtkdev) { struct device_node *node; struct platform_device *pdev; - struct device *dev; struct mtk_vcodec_pm *pm; - int ret = 0; + struct mtk_vcodec_clk *enc_clk; + struct mtk_vcodec_clk_info *clk_info; + int ret = 0, i = 0; + struct device *dev; pdev = mtkdev->plat_dev; pm = &mtkdev->pm; @@ -37,6 +39,7 @@ int mtk_vcodec_init_enc_pm(struct mtk_vcodec_dev *mtkdev) pm->mtkdev = mtkdev; pm->dev = &pdev->dev; dev = &pdev->dev; + enc_clk = &pm->venc_clk; node = of_parse_phandle(dev->of_node, "mediatek,larb", 0); if (!node) { @@ -68,28 +71,34 @@ int mtk_vcodec_init_enc_pm(struct mtk_vcodec_dev *mtkdev) pdev = mtkdev->plat_dev; pm->dev = &pdev->dev; - pm->vencpll_d2 = devm_clk_get(&pdev->dev, "venc_sel_src"); - if (IS_ERR(pm->vencpll_d2)) { - mtk_v4l2_err("devm_clk_get vencpll_d2 fail"); - ret = PTR_ERR(pm->vencpll_d2); - } - - pm->venc_sel = devm_clk_get(&pdev->dev, "venc_sel"); - if (IS_ERR(pm->venc_sel)) { - mtk_v4l2_err("devm_clk_get venc_sel fail"); - ret = PTR_ERR(pm->venc_sel); + enc_clk->clk_num = of_property_count_strings(pdev->dev.of_node, + "clock-names"); + if (enc_clk->clk_num > 0) { + enc_clk->clk_info = devm_kcalloc(&pdev->dev, + enc_clk->clk_num, sizeof(*clk_info), + GFP_KERNEL); + if (!enc_clk->clk_info) + return -ENOMEM; + } else { + mtk_v4l2_err("Failed to get venc clock count"); + return -EINVAL; } - pm->univpll1_d2 = devm_clk_get(&pdev->dev, "venc_lt_sel_src"); - if (IS_ERR(pm->univpll1_d2)) { - mtk_v4l2_err("devm_clk_get univpll1_d2 fail"); - ret = PTR_ERR(pm->univpll1_d2); - } - - pm->venc_lt_sel = devm_clk_get(&pdev->dev, "venc_lt_sel"); - if (IS_ERR(pm->venc_lt_sel)) { - mtk_v4l2_err("devm_clk_get venc_lt_sel fail"); - ret = PTR_ERR(pm->venc_lt_sel); + for (i = 0; i < enc_clk->clk_num; i++) { + clk_info = &enc_clk->clk_info[i]; + ret = of_property_read_string_index(pdev->dev.of_node, + "clock-names", i, &clk_info->clk_name); + if (ret) { + mtk_v4l2_err("venc failed to get clk name %d", i); + return ret; + } + clk_info->vcodec_clk = devm_clk_get(&pdev->dev, + clk_info->clk_name); + if (IS_ERR(clk_info->vcodec_clk)) { + mtk_v4l2_err("venc devm_clk_get (%d)%s fail", i, + clk_info->clk_name); + return PTR_ERR(clk_info->vcodec_clk); + } } return ret; @@ -102,38 +111,45 @@ void mtk_vcodec_release_enc_pm(struct mtk_vcodec_dev *mtkdev) void mtk_vcodec_enc_clock_on(struct mtk_vcodec_pm *pm) { - int ret; - - ret = clk_prepare_enable(pm->venc_sel); - if (ret) - mtk_v4l2_err("clk_prepare_enable fail %d", ret); - - ret = clk_set_parent(pm->venc_sel, pm->vencpll_d2); - if (ret) - mtk_v4l2_err("clk_set_parent fail %d", ret); - - ret = clk_prepare_enable(pm->venc_lt_sel); - if (ret) - mtk_v4l2_err("clk_prepare_enable fail %d", ret); - - ret = clk_set_parent(pm->venc_lt_sel, pm->univpll1_d2); - if (ret) - mtk_v4l2_err("clk_set_parent fail %d", ret); + struct mtk_vcodec_clk *enc_clk = &pm->venc_clk; + int ret, i = 0; + + for (i = 0; i < enc_clk->clk_num; i++) { + ret = clk_prepare_enable(enc_clk->clk_info[i].vcodec_clk); + if (ret) { + mtk_v4l2_err("venc clk_prepare_enable %d %s fail %d", i, + enc_clk->clk_info[i].clk_name, ret); + goto clkerr; + } + } ret = mtk_smi_larb_get(pm->larbvenc); - if (ret) + if (ret) { mtk_v4l2_err("mtk_smi_larb_get larb3 fail %d", ret); - + goto larbvencerr; + } ret = mtk_smi_larb_get(pm->larbvenclt); - if (ret) + if (ret) { mtk_v4l2_err("mtk_smi_larb_get larb4 fail %d", ret); + goto larbvenclterr; + } + return; +larbvenclterr: + mtk_smi_larb_put(pm->larbvenc); +larbvencerr: +clkerr: + for (i -= 1; i >= 0; i--) + clk_disable_unprepare(enc_clk->clk_info[i].vcodec_clk); } void mtk_vcodec_enc_clock_off(struct mtk_vcodec_pm *pm) { + struct mtk_vcodec_clk *enc_clk = &pm->venc_clk; + int i = 0; + mtk_smi_larb_put(pm->larbvenc); mtk_smi_larb_put(pm->larbvenclt); - clk_disable_unprepare(pm->venc_lt_sel); - clk_disable_unprepare(pm->venc_sel); + for (i = enc_clk->clk_num - 1; i >= 0; i--) + clk_disable_unprepare(enc_clk->clk_info[i].vcodec_clk); } diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c index aa3ce41898bc..02c960c63ac0 100644 --- a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c +++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c @@ -55,7 +55,7 @@ struct h264_fb { /** * struct h264_ring_fb_list - ring frame buffer list - * @fb_list : frame buffer arrary + * @fb_list : frame buffer array * @read_idx : read index * @write_idx : write index * @count : buffer count in list @@ -72,7 +72,7 @@ struct h264_ring_fb_list { /** * struct vdec_h264_dec_info - decode information * @dpb_sz : decoding picture buffer size - * @resolution_changed : resoltion change happen + * @resolution_changed : resolution change happen * @realloc_mv_buf : flag to notify driver to re-allocate mv buffer * @reserved : for 8 bytes alignment * @bs_dma : Input bit-stream buffer dma address diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c index 3e84a761db3a..bac3723038de 100644 --- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c +++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c @@ -120,7 +120,7 @@ struct vdec_vp8_hw_reg_base { /** * struct vdec_vp8_vpu_inst - VPU instance for VP8 decode * @wq_hd : Wait queue to wait VPU message ack - * @signaled : 1 - Host has received ack message from VPU, 0 - not recevie + * @signaled : 1 - Host has received ack message from VPU, 0 - not receive * @failure : VPU execution result status 0 - success, others - fail * @inst_addr : VPU decoder instance address */ diff --git a/drivers/media/platform/mtk-vcodec/vdec_drv_if.h b/drivers/media/platform/mtk-vcodec/vdec_drv_if.h index ded1154481cd..9a21591f3818 100644 --- a/drivers/media/platform/mtk-vcodec/vdec_drv_if.h +++ b/drivers/media/platform/mtk-vcodec/vdec_drv_if.h @@ -80,7 +80,7 @@ void vdec_if_deinit(struct mtk_vcodec_ctx *ctx); * vdec_if_decode() - trigger decode * @ctx : [in] v4l2 context * @bs : [in] input bitstream - * @fb : [in] frame buffer to store decoded frame, when null menas parse + * @fb : [in] frame buffer to store decoded frame, when null means parse * header only * @res_chg : [out] resolution change happens if current bs have different * picture width/height diff --git a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h index cd37bb2a610f..79d8eac7f5e2 100644 --- a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h +++ b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h @@ -62,7 +62,7 @@ int vpu_dec_start(struct vdec_vpu_inst *vpu, uint32_t *data, unsigned int len); /** * vpu_dec_end - end decoding, basically the function will be invoked once * when HW decoding done interrupt received successfully. The - * decoder in VPU will continute to do referene frame management + * decoder in VPU will continue to do reference frame management * and check if there is a new decoded frame available to display. * * @vpu : instance for vdec_vpu_inst diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c index 27b078cf98e3..f60f499c596b 100644 --- a/drivers/media/platform/mx2_emmaprp.c +++ b/drivers/media/platform/mx2_emmaprp.c @@ -274,7 +274,7 @@ static void emmaprp_device_run(void *priv) { struct emmaprp_ctx *ctx = priv; struct emmaprp_q_data *s_q_data, *d_q_data; - struct vb2_buffer *src_buf, *dst_buf; + struct vb2_v4l2_buffer *src_buf, *dst_buf; struct emmaprp_dev *pcdev = ctx->dev; unsigned int s_width, s_height; unsigned int d_width, d_height; @@ -294,8 +294,8 @@ static void emmaprp_device_run(void *priv) d_height = d_q_data->height; d_size = d_width * d_height; - p_in = vb2_dma_contig_plane_dma_addr(src_buf, 0); - p_out = vb2_dma_contig_plane_dma_addr(dst_buf, 0); + p_in = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0); + p_out = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0); if (!p_in || !p_out) { v4l2_err(&pcdev->v4l2_dev, "Acquiring kernel pointers to buffers failed\n"); diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c index f447ae3bb465..37f0d7146dfa 100644 --- a/drivers/media/platform/omap/omap_vout.c +++ b/drivers/media/platform/omap/omap_vout.c @@ -513,7 +513,7 @@ static int omapvid_apply_changes(struct omap_vout_device *vout) } static int omapvid_handle_interlace_display(struct omap_vout_device *vout, - unsigned int irqstatus, struct timeval timevalue) + unsigned int irqstatus, u64 ts) { u32 fid; @@ -537,7 +537,7 @@ static int omapvid_handle_interlace_display(struct omap_vout_device *vout, if (vout->cur_frm == vout->next_frm) goto err; - vout->cur_frm->ts = timevalue; + vout->cur_frm->ts = ts; vout->cur_frm->state = VIDEOBUF_DONE; wake_up_interruptible(&vout->cur_frm->done); vout->cur_frm = vout->next_frm; @@ -557,7 +557,7 @@ static void omap_vout_isr(void *arg, unsigned int irqstatus) int ret, fid, mgr_id; u32 addr, irq; struct omap_overlay *ovl; - struct timeval timevalue; + u64 ts; struct omapvideo_info *ovid; struct omap_dss_device *cur_display; struct omap_vout_device *vout = (struct omap_vout_device *)arg; @@ -577,7 +577,7 @@ static void omap_vout_isr(void *arg, unsigned int irqstatus) return; spin_lock(&vout->vbq_lock); - v4l2_get_timestamp(&timevalue); + ts = ktime_get_ns(); switch (cur_display->type) { case OMAP_DISPLAY_TYPE_DSI: @@ -595,7 +595,7 @@ static void omap_vout_isr(void *arg, unsigned int irqstatus) break; case OMAP_DISPLAY_TYPE_VENC: fid = omapvid_handle_interlace_display(vout, irqstatus, - timevalue); + ts); if (!fid) goto vout_isr_err; break; @@ -608,7 +608,7 @@ static void omap_vout_isr(void *arg, unsigned int irqstatus) } if (!vout->first_int && (vout->cur_frm != vout->next_frm)) { - vout->cur_frm->ts = timevalue; + vout->cur_frm->ts = ts; vout->cur_frm->state = VIDEOBUF_DONE; wake_up_interruptible(&vout->cur_frm->done); vout->cur_frm = vout->next_frm; @@ -1129,7 +1129,7 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *fh, } timing = &dssdev->panel.timings; - /* We dont support RGB24-packed mode if vrfb rotation + /* We don't support RGB24-packed mode if vrfb rotation * is enabled*/ if ((is_rotation_enabled(vout)) && f->fmt.pix.pixelformat == V4L2_PIX_FMT_RGB24) { @@ -1147,7 +1147,7 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *fh, vout->fbuf.fmt.width = timing->x_res; } - /* change to samller size is OK */ + /* change to smaller size is OK */ bpp = omap_vout_try_format(&f->fmt.pix); f->fmt.pix.sizeimage = f->fmt.pix.width * f->fmt.pix.height * bpp; diff --git a/drivers/media/platform/omap/omap_voutdef.h b/drivers/media/platform/omap/omap_voutdef.h index 56b630b1c8b4..c740393c8509 100644 --- a/drivers/media/platform/omap/omap_voutdef.h +++ b/drivers/media/platform/omap/omap_voutdef.h @@ -37,7 +37,7 @@ #define VID_MAX_WIDTH 1280 /* Largest width */ #define VID_MAX_HEIGHT 720 /* Largest height */ -/* Mimimum requirement is 2x2 for DSS */ +/* Minimum requirement is 2x2 for DSS */ #define VID_MIN_WIDTH 2 #define VID_MIN_HEIGHT 2 @@ -135,7 +135,7 @@ struct omap_vout_device { enum omap_color_mode dss_mode; /* we don't allow to request new buffer when old buffers are - * still mmaped + * still mmapped */ int mmap_count; diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c index 13f2828d880d..bd57174d81a7 100644 --- a/drivers/media/platform/omap3isp/isp.c +++ b/drivers/media/platform/omap3isp/isp.c @@ -1517,7 +1517,7 @@ void omap3isp_print_status(struct isp_device *isp) * * To solve this problem power management support is split into prepare/complete * and suspend/resume operations. The pipelines are stopped in prepare() and the - * ISP clocks get disabled in suspend(). Similarly, the clocks are reenabled in + * ISP clocks get disabled in suspend(). Similarly, the clocks are re-enabled in * resume(), and the the pipelines are restarted in complete(). * * TODO: PM dependencies between the ISP and sensors are not modelled explicitly diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c index 14a1c24037c4..261ad1175f98 100644 --- a/drivers/media/platform/omap3isp/ispccdc.c +++ b/drivers/media/platform/omap3isp/ispccdc.c @@ -1594,7 +1594,7 @@ static int ccdc_isr_buffer(struct isp_ccdc_device *ccdc) return 0; /* We're in continuous mode, and memory writes were disabled due to a - * buffer underrun. Reenable them now that we have a buffer. The buffer + * buffer underrun. Re-enable them now that we have a buffer. The buffer * address has been set in ccdc_video_queue. */ if (ccdc->state == ISP_PIPELINE_STREAM_CONTINUOUS && ccdc->underrun) { @@ -1712,7 +1712,7 @@ static void ccdc_vd1_isr(struct isp_ccdc_device *ccdc) * data to memory the CCDC and LSC are stopped immediately but * without change the CCDC stopping state machine. The CCDC * stopping state machine should be used only when user request - * for stopping is received (SINGLESHOT is an exeption). + * for stopping is received (SINGLESHOT is an exception). */ switch (ccdc->state) { case ISP_PIPELINE_STREAM_SINGLESHOT: diff --git a/drivers/media/platform/omap3isp/ispcsi2.c b/drivers/media/platform/omap3isp/ispcsi2.c index 9c180f607bcb..da66ea65be5d 100644 --- a/drivers/media/platform/omap3isp/ispcsi2.c +++ b/drivers/media/platform/omap3isp/ispcsi2.c @@ -710,7 +710,7 @@ static void csi2_isr_ctx(struct isp_csi2_device *csi2, /* Skip interrupts until we reach the frame skip count. The CSI2 will be * automatically disabled, as the frame skip count has been programmed - * in the CSI2_CTx_CTRL1::COUNT field, so reenable it. + * in the CSI2_CTx_CTRL1::COUNT field, so re-enable it. * * It would have been nice to rely on the FRAME_NUMBER interrupt instead * but it turned out that the interrupt is only generated when the CSI2 diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c index 5f930560eb30..4fe228752a43 100644 --- a/drivers/media/platform/pxa_camera.c +++ b/drivers/media/platform/pxa_camera.c @@ -1631,7 +1631,7 @@ static int pxa_camera_set_bus_param(struct pxa_camera_dev *pcdev) pcdev->channels = 1; - /* Make choises, based on platform preferences */ + /* Make choices, based on platform preferences */ if ((common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) && (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) { if (pcdev->platform_flags & PXA_CAMERA_HSP) @@ -2394,15 +2394,17 @@ static int pxa_camera_probe(struct platform_device *pdev) pcdev->res = res; pcdev->pdata = pdev->dev.platform_data; - if (pdev->dev.of_node && !pcdev->pdata) { - err = pxa_camera_pdata_from_dt(&pdev->dev, pcdev, &pcdev->asd); - } else { + if (pcdev->pdata) { pcdev->platform_flags = pcdev->pdata->flags; pcdev->mclk = pcdev->pdata->mclk_10khz * 10000; pcdev->asd.match_type = V4L2_ASYNC_MATCH_I2C; pcdev->asd.match.i2c.adapter_id = pcdev->pdata->sensor_i2c_adapter_id; pcdev->asd.match.i2c.address = pcdev->pdata->sensor_i2c_address; + } else if (pdev->dev.of_node) { + err = pxa_camera_pdata_from_dt(&pdev->dev, pcdev, &pcdev->asd); + } else { + return -ENODEV; } if (err < 0) return err; diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c index cb411eb85ee4..739366744e0f 100644 --- a/drivers/media/platform/qcom/venus/core.c +++ b/drivers/media/platform/qcom/venus/core.c @@ -455,7 +455,7 @@ static const struct venus_resources msm8996_res = { .reg_tbl_size = ARRAY_SIZE(msm8996_reg_preset), .clks = {"core", "iface", "bus", "mbus" }, .clks_num = 4, - .max_load = 2563200, + .max_load = 3110400, /* 4096x2160@90 */ .hfi_version = HFI_VERSION_3XX, .vmem_id = VIDC_RESOURCE_NONE, .vmem_size = 0, @@ -465,10 +465,12 @@ static const struct venus_resources msm8996_res = { }; static const struct freq_tbl sdm845_freq_table[] = { - { 1944000, 380000000 }, /* 4k UHD @ 60 */ - { 972000, 320000000 }, /* 4k UHD @ 30 */ - { 489600, 200000000 }, /* 1080p @ 60 */ - { 244800, 100000000 }, /* 1080p @ 30 */ + { 3110400, 533000000 }, /* 4096x2160@90 */ + { 2073600, 444000000 }, /* 4096x2160@60 */ + { 1944000, 404000000 }, /* 3840x2160@60 */ + { 972000, 330000000 }, /* 3840x2160@30 */ + { 489600, 200000000 }, /* 1920x1080@60 */ + { 244800, 100000000 }, /* 1920x1080@30 */ }; static const struct venus_resources sdm845_res = { diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h index 6382cea29185..7a3feb5cee00 100644 --- a/drivers/media/platform/qcom/venus/core.h +++ b/drivers/media/platform/qcom/venus/core.h @@ -134,6 +134,7 @@ struct venus_core { struct video_firmware { struct device *dev; struct iommu_domain *iommu_domain; + size_t mapped_mem_size; } fw; struct mutex lock; struct list_head instances; @@ -218,7 +219,7 @@ struct venus_buffer { #define to_venus_buffer(ptr) container_of(ptr, struct venus_buffer, vb) /** - * struct venus_inst - holds per instance paramerters + * struct venus_inst - holds per instance parameters * * @list: used for attach an instance to the core * @lock: instance lock diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c index c29acfd70c1b..6cfa8021721e 100644 --- a/drivers/media/platform/qcom/venus/firmware.c +++ b/drivers/media/platform/qcom/venus/firmware.c @@ -35,14 +35,15 @@ static void venus_reset_cpu(struct venus_core *core) { + u32 fw_size = core->fw.mapped_mem_size; void __iomem *base = core->base; writel(0, base + WRAPPER_FW_START_ADDR); - writel(VENUS_FW_MEM_SIZE, base + WRAPPER_FW_END_ADDR); + writel(fw_size, base + WRAPPER_FW_END_ADDR); writel(0, base + WRAPPER_CPA_START_ADDR); - writel(VENUS_FW_MEM_SIZE, base + WRAPPER_CPA_END_ADDR); - writel(VENUS_FW_MEM_SIZE, base + WRAPPER_NONPIX_START_ADDR); - writel(VENUS_FW_MEM_SIZE, base + WRAPPER_NONPIX_END_ADDR); + writel(fw_size, base + WRAPPER_CPA_END_ADDR); + writel(fw_size, base + WRAPPER_NONPIX_START_ADDR); + writel(fw_size, base + WRAPPER_NONPIX_END_ADDR); writel(0x0, base + WRAPPER_CPU_CGC_DIS); writel(0x0, base + WRAPPER_CPU_CLOCK_CONFIG); @@ -74,6 +75,9 @@ static int venus_load_fw(struct venus_core *core, const char *fwname, void *mem_va; int ret; + *mem_phys = 0; + *mem_size = 0; + dev = core->dev; node = of_parse_phandle(dev->of_node, "memory-region", 0); if (!node) { @@ -85,28 +89,30 @@ static int venus_load_fw(struct venus_core *core, const char *fwname, if (ret) return ret; + ret = request_firmware(&mdt, fwname, dev); + if (ret < 0) + return ret; + + fw_size = qcom_mdt_get_size(mdt); + if (fw_size < 0) { + ret = fw_size; + goto err_release_fw; + } + *mem_phys = r.start; *mem_size = resource_size(&r); - if (*mem_size < VENUS_FW_MEM_SIZE) - return -EINVAL; + if (*mem_size < fw_size || fw_size > VENUS_FW_MEM_SIZE) { + ret = -EINVAL; + goto err_release_fw; + } mem_va = memremap(r.start, *mem_size, MEMREMAP_WC); if (!mem_va) { dev_err(dev, "unable to map memory region: %pa+%zx\n", &r.start, *mem_size); - return -ENOMEM; - } - - ret = request_firmware(&mdt, fwname, dev); - if (ret < 0) - goto err_unmap; - - fw_size = qcom_mdt_get_size(mdt); - if (fw_size < 0) { - ret = fw_size; - release_firmware(mdt); - goto err_unmap; + ret = -ENOMEM; + goto err_release_fw; } if (core->use_tz) @@ -116,10 +122,9 @@ static int venus_load_fw(struct venus_core *core, const char *fwname, ret = qcom_mdt_load_no_init(dev, mdt, fwname, VENUS_PAS_ID, mem_va, *mem_phys, *mem_size, NULL); - release_firmware(mdt); - -err_unmap: memunmap(mem_va); +err_release_fw: + release_firmware(mdt); return ret; } @@ -135,6 +140,7 @@ static int venus_boot_no_tz(struct venus_core *core, phys_addr_t mem_phys, return -EPROBE_DEFER; iommu = core->fw.iommu_domain; + core->fw.mapped_mem_size = mem_size; ret = iommu_map(iommu, VENUS_FW_START_ADDR, mem_phys, mem_size, IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV); @@ -150,6 +156,7 @@ static int venus_boot_no_tz(struct venus_core *core, phys_addr_t mem_phys, static int venus_shutdown_no_tz(struct venus_core *core) { + const size_t mapped = core->fw.mapped_mem_size; struct iommu_domain *iommu; size_t unmapped; u32 reg; @@ -166,8 +173,8 @@ static int venus_shutdown_no_tz(struct venus_core *core) iommu = core->fw.iommu_domain; - unmapped = iommu_unmap(iommu, VENUS_FW_START_ADDR, VENUS_FW_MEM_SIZE); - if (unmapped != VENUS_FW_MEM_SIZE) + unmapped = iommu_unmap(iommu, VENUS_FW_START_ADDR, mapped); + if (unmapped != mapped) dev_err(dev, "failed to unmap firmware\n"); return 0; diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c index e436385bc5ab..5cad601d4c57 100644 --- a/drivers/media/platform/qcom/venus/helpers.c +++ b/drivers/media/platform/qcom/venus/helpers.c @@ -439,9 +439,6 @@ session_process_buf(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf) fdata.flags = 0; fdata.clnt_data = vbuf->vb2_buf.index; - if (!fdata.timestamp) - fdata.flags |= HFI_BUFFERFLAG_TIMESTAMPINVALID; - if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { fdata.buffer_type = HFI_BUFFER_INPUT; fdata.filled_len = vb2_get_plane_payload(vb, 0); diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c index f0719ce24b97..594d80434004 100644 --- a/drivers/media/platform/rcar-vin/rcar-core.c +++ b/drivers/media/platform/rcar-vin/rcar-core.c @@ -131,9 +131,13 @@ static int rvin_group_link_notify(struct media_link *link, u32 flags, !is_media_entity_v4l2_video_device(link->sink->entity)) return 0; - /* If any entity is in use don't allow link changes. */ + /* + * Don't allow link changes if any entity in the graph is + * streaming, modifying the CHSEL register fields can disrupt + * running streams. + */ media_device_for_each_entity(entity, &group->mdev) - if (entity->use_count) + if (entity->stream_count) return -EBUSY; mutex_lock(&group->lock); @@ -542,9 +546,7 @@ static void rvin_parallel_notify_unbind(struct v4l2_async_notifier *notifier, vin_dbg(vin, "unbind parallel subdev %s\n", subdev->name); - mutex_lock(&vin->lock); rvin_parallel_subdevice_detach(vin); - mutex_unlock(&vin->lock); } static int rvin_parallel_notify_bound(struct v4l2_async_notifier *notifier, @@ -554,9 +556,7 @@ static int rvin_parallel_notify_bound(struct v4l2_async_notifier *notifier, struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev); int ret; - mutex_lock(&vin->lock); ret = rvin_parallel_subdevice_attach(vin, subdev); - mutex_unlock(&vin->lock); if (ret) return ret; @@ -664,7 +664,6 @@ static int rvin_group_notify_complete(struct v4l2_async_notifier *notifier) } /* Create all media device links between VINs and CSI-2's. */ - mutex_lock(&vin->group->lock); for (route = vin->info->routes; route->mask; route++) { struct media_pad *source_pad, *sink_pad; struct media_entity *source, *sink; @@ -700,7 +699,6 @@ static int rvin_group_notify_complete(struct v4l2_async_notifier *notifier) break; } } - mutex_unlock(&vin->group->lock); return ret; } @@ -716,8 +714,6 @@ static void rvin_group_notify_unbind(struct v4l2_async_notifier *notifier, if (vin->group->vin[i]) rvin_v4l2_unregister(vin->group->vin[i]); - mutex_lock(&vin->group->lock); - for (i = 0; i < RVIN_CSI_MAX; i++) { if (vin->group->csi[i].fwnode != asd->match.fwnode) continue; @@ -725,8 +721,6 @@ static void rvin_group_notify_unbind(struct v4l2_async_notifier *notifier, vin_dbg(vin, "Unbind CSI-2 %s from slot %u\n", subdev->name, i); break; } - - mutex_unlock(&vin->group->lock); } static int rvin_group_notify_bound(struct v4l2_async_notifier *notifier, @@ -736,8 +730,6 @@ static int rvin_group_notify_bound(struct v4l2_async_notifier *notifier, struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev); unsigned int i; - mutex_lock(&vin->group->lock); - for (i = 0; i < RVIN_CSI_MAX; i++) { if (vin->group->csi[i].fwnode != asd->match.fwnode) continue; @@ -746,8 +738,6 @@ static int rvin_group_notify_bound(struct v4l2_async_notifier *notifier, break; } - mutex_unlock(&vin->group->lock); - return 0; } @@ -1145,6 +1135,10 @@ static const struct rvin_info rcar_info_r8a77995 = { }; static const struct of_device_id rvin_of_id_table[] = { + { + .compatible = "renesas,vin-r8a774c0", + .data = &rcar_info_r8a77990, + }, { .compatible = "renesas,vin-r8a7778", .data = &rcar_info_m1, diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c index 6d356f5a9456..f64528d2be3c 100644 --- a/drivers/media/platform/rcar-vin/rcar-csi2.c +++ b/drivers/media/platform/rcar-vin/rcar-csi2.c @@ -152,37 +152,37 @@ static const struct rcsi2_mbps_reg phtw_mbps_h3_v3h_m3n[] = { }; static const struct rcsi2_mbps_reg phtw_mbps_v3m_e3[] = { - { .mbps = 89, .reg = 0x00 }, - { .mbps = 99, .reg = 0x20 }, - { .mbps = 109, .reg = 0x40 }, - { .mbps = 129, .reg = 0x02 }, - { .mbps = 139, .reg = 0x22 }, - { .mbps = 149, .reg = 0x42 }, - { .mbps = 169, .reg = 0x04 }, - { .mbps = 179, .reg = 0x24 }, - { .mbps = 199, .reg = 0x44 }, - { .mbps = 219, .reg = 0x06 }, - { .mbps = 239, .reg = 0x26 }, - { .mbps = 249, .reg = 0x46 }, - { .mbps = 269, .reg = 0x08 }, - { .mbps = 299, .reg = 0x28 }, - { .mbps = 329, .reg = 0x0a }, - { .mbps = 359, .reg = 0x2a }, - { .mbps = 399, .reg = 0x4a }, - { .mbps = 449, .reg = 0x0c }, - { .mbps = 499, .reg = 0x2c }, - { .mbps = 549, .reg = 0x0e }, - { .mbps = 599, .reg = 0x2e }, - { .mbps = 649, .reg = 0x10 }, - { .mbps = 699, .reg = 0x30 }, - { .mbps = 749, .reg = 0x12 }, - { .mbps = 799, .reg = 0x32 }, - { .mbps = 849, .reg = 0x52 }, - { .mbps = 899, .reg = 0x72 }, - { .mbps = 949, .reg = 0x14 }, - { .mbps = 999, .reg = 0x34 }, - { .mbps = 1049, .reg = 0x54 }, - { .mbps = 1099, .reg = 0x74 }, + { .mbps = 80, .reg = 0x00 }, + { .mbps = 90, .reg = 0x20 }, + { .mbps = 100, .reg = 0x40 }, + { .mbps = 110, .reg = 0x02 }, + { .mbps = 130, .reg = 0x22 }, + { .mbps = 140, .reg = 0x42 }, + { .mbps = 150, .reg = 0x04 }, + { .mbps = 170, .reg = 0x24 }, + { .mbps = 180, .reg = 0x44 }, + { .mbps = 200, .reg = 0x06 }, + { .mbps = 220, .reg = 0x26 }, + { .mbps = 240, .reg = 0x46 }, + { .mbps = 250, .reg = 0x08 }, + { .mbps = 270, .reg = 0x28 }, + { .mbps = 300, .reg = 0x0a }, + { .mbps = 330, .reg = 0x2a }, + { .mbps = 360, .reg = 0x4a }, + { .mbps = 400, .reg = 0x0c }, + { .mbps = 450, .reg = 0x2c }, + { .mbps = 500, .reg = 0x0e }, + { .mbps = 550, .reg = 0x2e }, + { .mbps = 600, .reg = 0x10 }, + { .mbps = 650, .reg = 0x30 }, + { .mbps = 700, .reg = 0x12 }, + { .mbps = 750, .reg = 0x32 }, + { .mbps = 800, .reg = 0x52 }, + { .mbps = 850, .reg = 0x72 }, + { .mbps = 900, .reg = 0x14 }, + { .mbps = 950, .reg = 0x34 }, + { .mbps = 1000, .reg = 0x54 }, + { .mbps = 1050, .reg = 0x74 }, { .mbps = 1125, .reg = 0x16 }, { /* sentinel */ }, }; @@ -985,6 +985,10 @@ static const struct rcar_csi2_info rcar_csi2_info_r8a77990 = { }; static const struct of_device_id rcar_csi2_of_table[] = { + { + .compatible = "renesas,r8a774c0-csi2", + .data = &rcar_csi2_info_r8a77990, + }, { .compatible = "renesas,r8a7795-csi2", .data = &rcar_csi2_info_r8a7795, diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c index 92323310f735..2207a31d355e 100644 --- a/drivers/media/platform/rcar-vin/rcar-dma.c +++ b/drivers/media/platform/rcar-vin/rcar-dma.c @@ -681,7 +681,7 @@ static int rvin_setup(struct rvin_dev *vin) break; } - /* Enable VSYNC Field Toogle mode after one VSYNC input */ + /* Enable VSYNC Field Toggle mode after one VSYNC input */ if (vin->info->model == RCAR_GEN3) dmr2 = VNDMR2_FTEV; else @@ -1341,5 +1341,5 @@ int rvin_set_channel_routing(struct rvin_dev *vin, u8 chsel) pm_runtime_put(vin->dev); - return ret; + return 0; } diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c index 7a2851790b91..7cbdcbf9b090 100644 --- a/drivers/media/platform/rcar-vin/rcar-v4l2.c +++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c @@ -665,7 +665,7 @@ static void rvin_mc_try_format(struct rvin_dev *vin, * The V4L2 specification clearly documents the colorspace fields * as being set by drivers for capture devices. Using the values * supplied by userspace thus wouldn't comply with the API. Until - * the API is updated force fixed vaules. + * the API is updated force fixed values. */ pix->colorspace = RVIN_DEFAULT_COLORSPACE; pix->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(pix->colorspace); @@ -964,7 +964,7 @@ void rvin_v4l2_unregister(struct rvin_dev *vin) v4l2_info(&vin->v4l2_dev, "Removing %s\n", video_device_node_name(&vin->vdev)); - /* Checks internaly if vdev have been init or not */ + /* Checks internally if vdev have been init or not */ video_unregister_device(&vin->vdev); } diff --git a/drivers/media/platform/rockchip/rga/rga-hw.c b/drivers/media/platform/rockchip/rga/rga-hw.c index 96d1b1b3fe8e..843e50d17a72 100644 --- a/drivers/media/platform/rockchip/rga/rga-hw.c +++ b/drivers/media/platform/rockchip/rga/rga-hw.c @@ -256,7 +256,7 @@ static void rga_cmd_set_trans_info(struct rga_ctx *ctx) } /* - * Cacluate the up/down scaling mode/factor. + * Calculate the up/down scaling mode/factor. * * RGA used to scale the picture first, and then rotate second, * so we need to swap the w/h when rotate degree is 90/270. @@ -304,7 +304,7 @@ static void rga_cmd_set_trans_info(struct rga_ctx *ctx) } /* - * Cacluate the framebuffer virtual strides and active size, + * Calculate the framebuffer virtual strides and active size, * note that the step of vir_stride / vir_width is 4 byte words */ src_vir_info.data.vir_stride = ctx->in.stride >> 2; @@ -318,7 +318,7 @@ static void rga_cmd_set_trans_info(struct rga_ctx *ctx) dst_act_info.data.act_width = dst_w - 1; /* - * Cacluate the source framebuffer base address with offset pixel. + * Calculate the source framebuffer base address with offset pixel. */ src_offsets = rga_get_addr_offset(&ctx->in, src_x, src_y, src_w, src_h); diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c index 5c653287185f..b096227a9722 100644 --- a/drivers/media/platform/rockchip/rga/rga.c +++ b/drivers/media/platform/rockchip/rga/rga.c @@ -43,7 +43,7 @@ static void device_run(void *prv) { struct rga_ctx *ctx = prv; struct rockchip_rga *rga = ctx->rga; - struct vb2_buffer *src, *dst; + struct vb2_v4l2_buffer *src, *dst; unsigned long flags; spin_lock_irqsave(&rga->ctrl_lock, flags); @@ -53,8 +53,8 @@ static void device_run(void *prv) src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); - rga_buf_map(src); - rga_buf_map(dst); + rga_buf_map(&src->vb2_buf); + rga_buf_map(&dst->vb2_buf); rga_hw_start(rga); diff --git a/drivers/media/platform/s3c-camif/camif-core.h b/drivers/media/platform/s3c-camif/camif-core.h index 1f5c8c94ce89..be5e7357dffc 100644 --- a/drivers/media/platform/s3c-camif/camif-core.h +++ b/drivers/media/platform/s3c-camif/camif-core.h @@ -260,7 +260,7 @@ struct camif_vp { * @clock: clocks required for the CAMIF operation * @lock: mutex protecting this data structure * @slock: spinlock protecting CAMIF registers - * @io_base: start address of the mmaped CAMIF registers + * @io_base: start address of the mmapped CAMIF registers */ struct camif_dev { struct media_device media_dev; diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c index 57ab1d1085d1..971c47165010 100644 --- a/drivers/media/platform/s5p-g2d/g2d.c +++ b/drivers/media/platform/s5p-g2d/g2d.c @@ -513,7 +513,7 @@ static void device_run(void *prv) { struct g2d_ctx *ctx = prv; struct g2d_dev *dev = ctx->dev; - struct vb2_buffer *src, *dst; + struct vb2_v4l2_buffer *src, *dst; unsigned long flags; u32 cmd = 0; @@ -528,10 +528,10 @@ static void device_run(void *prv) spin_lock_irqsave(&dev->ctrl_lock, flags); g2d_set_src_size(dev, &ctx->in); - g2d_set_src_addr(dev, vb2_dma_contig_plane_dma_addr(src, 0)); + g2d_set_src_addr(dev, vb2_dma_contig_plane_dma_addr(&src->vb2_buf, 0)); g2d_set_dst_size(dev, &ctx->out); - g2d_set_dst_addr(dev, vb2_dma_contig_plane_dma_addr(dst, 0)); + g2d_set_dst_addr(dev, vb2_dma_contig_plane_dma_addr(&dst->vb2_buf, 0)); g2d_set_rop4(dev, ctx->rop); g2d_set_flip(dev, ctx->flip); diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c index 3f9000b70385..8cc730eccb6c 100644 --- a/drivers/media/platform/s5p-jpeg/jpeg-core.c +++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c @@ -3,7 +3,7 @@ * Copyright (c) 2011-2014 Samsung Electronics Co., Ltd. * http://www.samsung.com * - * Author: Andrzej Pietrasiewicz + * Author: Andrzej Pietrasiewicz * Author: Jacek Anaszewski * * This program is free software; you can redistribute it and/or modify @@ -793,14 +793,14 @@ static void skip(struct s5p_jpeg_buffer *buf, long len); static void exynos4_jpeg_parse_decode_h_tbl(struct s5p_jpeg_ctx *ctx) { struct s5p_jpeg *jpeg = ctx->jpeg; - struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); + struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); struct s5p_jpeg_buffer jpeg_buffer; unsigned int word; int c, x, components; jpeg_buffer.size = 2; /* Ls */ jpeg_buffer.data = - (unsigned long)vb2_plane_vaddr(vb, 0) + ctx->out_q.sos + 2; + (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) + ctx->out_q.sos + 2; jpeg_buffer.curr = 0; word = 0; @@ -830,14 +830,14 @@ static void exynos4_jpeg_parse_decode_h_tbl(struct s5p_jpeg_ctx *ctx) static void exynos4_jpeg_parse_huff_tbl(struct s5p_jpeg_ctx *ctx) { struct s5p_jpeg *jpeg = ctx->jpeg; - struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); + struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); struct s5p_jpeg_buffer jpeg_buffer; unsigned int word; int c, i, n, j; for (j = 0; j < ctx->out_q.dht.n; ++j) { jpeg_buffer.size = ctx->out_q.dht.len[j]; - jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(vb, 0) + + jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) + ctx->out_q.dht.marker[j]; jpeg_buffer.curr = 0; @@ -889,13 +889,13 @@ static void exynos4_jpeg_parse_huff_tbl(struct s5p_jpeg_ctx *ctx) static void exynos4_jpeg_parse_decode_q_tbl(struct s5p_jpeg_ctx *ctx) { struct s5p_jpeg *jpeg = ctx->jpeg; - struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); + struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); struct s5p_jpeg_buffer jpeg_buffer; int c, x, components; jpeg_buffer.size = ctx->out_q.sof_len; jpeg_buffer.data = - (unsigned long)vb2_plane_vaddr(vb, 0) + ctx->out_q.sof; + (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) + ctx->out_q.sof; jpeg_buffer.curr = 0; skip(&jpeg_buffer, 5); /* P, Y, X */ @@ -920,14 +920,14 @@ static void exynos4_jpeg_parse_decode_q_tbl(struct s5p_jpeg_ctx *ctx) static void exynos4_jpeg_parse_q_tbl(struct s5p_jpeg_ctx *ctx) { struct s5p_jpeg *jpeg = ctx->jpeg; - struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); + struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); struct s5p_jpeg_buffer jpeg_buffer; unsigned int word; int c, i, j; for (j = 0; j < ctx->out_q.dqt.n; ++j) { jpeg_buffer.size = ctx->out_q.dqt.len[j]; - jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(vb, 0) + + jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) + ctx->out_q.dqt.marker[j]; jpeg_buffer.curr = 0; @@ -1293,13 +1293,16 @@ static int s5p_jpeg_querycap(struct file *file, void *priv, return 0; } -static int enum_fmt(struct s5p_jpeg_fmt *sjpeg_formats, int n, +static int enum_fmt(struct s5p_jpeg_ctx *ctx, + struct s5p_jpeg_fmt *sjpeg_formats, int n, struct v4l2_fmtdesc *f, u32 type) { int i, num = 0; + unsigned int fmt_ver_flag = ctx->jpeg->variant->fmt_ver_flag; for (i = 0; i < n; ++i) { - if (sjpeg_formats[i].flags & type) { + if (sjpeg_formats[i].flags & type && + sjpeg_formats[i].flags & fmt_ver_flag) { /* index-th format of type type found ? */ if (num == f->index) break; @@ -1326,11 +1329,11 @@ static int s5p_jpeg_enum_fmt_vid_cap(struct file *file, void *priv, struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv); if (ctx->mode == S5P_JPEG_ENCODE) - return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f, + return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f, SJPEG_FMT_FLAG_ENC_CAPTURE); - return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f, - SJPEG_FMT_FLAG_DEC_CAPTURE); + return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f, + SJPEG_FMT_FLAG_DEC_CAPTURE); } static int s5p_jpeg_enum_fmt_vid_out(struct file *file, void *priv, @@ -1339,11 +1342,11 @@ static int s5p_jpeg_enum_fmt_vid_out(struct file *file, void *priv, struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv); if (ctx->mode == S5P_JPEG_ENCODE) - return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f, + return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f, SJPEG_FMT_FLAG_ENC_OUTPUT); - return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f, - SJPEG_FMT_FLAG_DEC_OUTPUT); + return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f, + SJPEG_FMT_FLAG_DEC_OUTPUT); } static struct s5p_jpeg_q_data *get_q_data(struct s5p_jpeg_ctx *ctx, @@ -2002,7 +2005,7 @@ static int s5p_jpeg_controls_create(struct s5p_jpeg_ctx *ctx) v4l2_ctrl_new_std(&ctx->ctrl_handler, &s5p_jpeg_ctrl_ops, V4L2_CID_JPEG_RESTART_INTERVAL, - 0, 3, 0xffff, 0); + 0, 0xffff, 1, 0); if (ctx->jpeg->variant->version == SJPEG_S5P) mask = ~0x06; /* 422, 420 */ } @@ -2072,15 +2075,15 @@ static void s5p_jpeg_device_run(void *priv) { struct s5p_jpeg_ctx *ctx = priv; struct s5p_jpeg *jpeg = ctx->jpeg; - struct vb2_buffer *src_buf, *dst_buf; + struct vb2_v4l2_buffer *src_buf, *dst_buf; unsigned long src_addr, dst_addr, flags; spin_lock_irqsave(&ctx->jpeg->slock, flags); src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); - src_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0); - dst_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0); + src_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0); + dst_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0); s5p_jpeg_reset(jpeg->regs); s5p_jpeg_poweron(jpeg->regs); @@ -2153,7 +2156,7 @@ static void exynos4_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx) { struct s5p_jpeg *jpeg = ctx->jpeg; struct s5p_jpeg_fmt *fmt; - struct vb2_buffer *vb; + struct vb2_v4l2_buffer *vb; struct s5p_jpeg_addr jpeg_addr = {}; u32 pix_size, padding_bytes = 0; @@ -2172,7 +2175,7 @@ static void exynos4_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx) vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); } - jpeg_addr.y = vb2_dma_contig_plane_dma_addr(vb, 0); + jpeg_addr.y = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0); if (fmt->colplanes == 2) { jpeg_addr.cb = jpeg_addr.y + pix_size - padding_bytes; @@ -2190,7 +2193,7 @@ static void exynos4_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx) static void exynos4_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx) { struct s5p_jpeg *jpeg = ctx->jpeg; - struct vb2_buffer *vb; + struct vb2_v4l2_buffer *vb; unsigned int jpeg_addr = 0; if (ctx->mode == S5P_JPEG_ENCODE) @@ -2198,7 +2201,7 @@ static void exynos4_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx) else vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); - jpeg_addr = vb2_dma_contig_plane_dma_addr(vb, 0); + jpeg_addr = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0); if (jpeg->variant->version == SJPEG_EXYNOS5433 && ctx->mode == S5P_JPEG_DECODE) jpeg_addr += ctx->out_q.sos; @@ -2314,7 +2317,7 @@ static void exynos3250_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx) { struct s5p_jpeg *jpeg = ctx->jpeg; struct s5p_jpeg_fmt *fmt; - struct vb2_buffer *vb; + struct vb2_v4l2_buffer *vb; struct s5p_jpeg_addr jpeg_addr = {}; u32 pix_size; @@ -2328,7 +2331,7 @@ static void exynos3250_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx) fmt = ctx->cap_q.fmt; } - jpeg_addr.y = vb2_dma_contig_plane_dma_addr(vb, 0); + jpeg_addr.y = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0); if (fmt->colplanes == 2) { jpeg_addr.cb = jpeg_addr.y + pix_size; @@ -2346,7 +2349,7 @@ static void exynos3250_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx) static void exynos3250_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx) { struct s5p_jpeg *jpeg = ctx->jpeg; - struct vb2_buffer *vb; + struct vb2_v4l2_buffer *vb; unsigned int jpeg_addr = 0; if (ctx->mode == S5P_JPEG_ENCODE) @@ -2354,7 +2357,7 @@ static void exynos3250_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx) else vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); - jpeg_addr = vb2_dma_contig_plane_dma_addr(vb, 0); + jpeg_addr = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0); exynos3250_jpeg_jpgadr(jpeg->regs, jpeg_addr); } @@ -3220,7 +3223,7 @@ static struct platform_driver s5p_jpeg_driver = { module_platform_driver(s5p_jpeg_driver); -MODULE_AUTHOR("Andrzej Pietrasiewicz "); +MODULE_AUTHOR("Andrzej Pietrasiewicz "); MODULE_AUTHOR("Jacek Anaszewski "); MODULE_DESCRIPTION("Samsung JPEG codec driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.h b/drivers/media/platform/s5p-jpeg/jpeg-core.h index a46465e10351..144c102ff05f 100644 --- a/drivers/media/platform/s5p-jpeg/jpeg-core.h +++ b/drivers/media/platform/s5p-jpeg/jpeg-core.h @@ -3,7 +3,7 @@ * Copyright (c) 2011 Samsung Electronics Co., Ltd. * http://www.samsung.com * - * Author: Andrzej Pietrasiewicz + * Author: Andrzej Pietrasiewicz * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -153,7 +153,7 @@ struct s5p_jpeg_variant { /** * struct jpeg_fmt - driver's internal color format data - * @name: format descritpion + * @name: format description * @fourcc: the fourcc code, 0 if not applicable * @depth: number of bits per pixel * @colplanes: number of color planes (1 for packed formats) @@ -193,7 +193,7 @@ struct s5p_jpeg_marker { * @sos: SOS marker's position relative to the buffer beginning * @dht: DHT markers' positions relative to the buffer beginning * @dqt: DQT markers' positions relative to the buffer beginning - * @sof: SOF0 marker's postition relative to the buffer beginning + * @sof: SOF0 marker's position relative to the buffer beginning * @sof_len: SOF0 marker's payload length (without length field itself) * @components: number of image components * @size: image buffer size in bytes diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c b/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c index b5f20e722b63..59c6263a71bf 100644 --- a/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c +++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.c @@ -3,7 +3,7 @@ * Copyright (c) 2011 Samsung Electronics Co., Ltd. * http://www.samsung.com * - * Author: Andrzej Pietrasiewicz + * Author: Andrzej Pietrasiewicz * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as diff --git a/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h b/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h index f208fa3ed738..bfe746f8f750 100644 --- a/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h +++ b/drivers/media/platform/s5p-jpeg/jpeg-hw-s5p.h @@ -3,7 +3,7 @@ * Copyright (c) 2011 Samsung Electronics Co., Ltd. * http://www.samsung.com * - * Author: Andrzej Pietrasiewicz + * Author: Andrzej Pietrasiewicz * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as diff --git a/drivers/media/platform/s5p-jpeg/jpeg-regs.h b/drivers/media/platform/s5p-jpeg/jpeg-regs.h index df790b10140c..574f0e8021e5 100644 --- a/drivers/media/platform/s5p-jpeg/jpeg-regs.h +++ b/drivers/media/platform/s5p-jpeg/jpeg-regs.h @@ -5,7 +5,7 @@ * Copyright (c) 2011-2014 Samsung Electronics Co., Ltd. * http://www.samsung.com * - * Author: Andrzej Pietrasiewicz + * Author: Andrzej Pietrasiewicz * Author: Jacek Anaszewski * * This program is free software; you can redistribute it and/or modify diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c index 8a5ba3bec3af..9a53d3908b52 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c @@ -1089,11 +1089,17 @@ static struct device *s5p_mfc_alloc_memdev(struct device *dev, device_initialize(child); dev_set_name(child, "%s:%s", dev_name(dev), name); child->parent = dev; - child->bus = dev->bus; child->coherent_dma_mask = dev->coherent_dma_mask; child->dma_mask = dev->dma_mask; child->release = s5p_mfc_memdev_release; + /* + * The memdevs are not proper OF platform devices, so in order for them + * to be treated as valid DMA masters we need a bit of a hack to force + * them to inherit the MFC node's DMA configuration. + */ + of_dma_configure(child, dev->of_node, true); + if (device_add(child) == 0) { ret = of_reserved_mem_device_init_by_idx(child, dev->of_node, idx); diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h index 20442a9b9f7a..24148020baa9 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h @@ -268,7 +268,7 @@ struct s5p_mfc_priv_buf { * @enc_ctrl_handler: control framework handler for encoding * @pm: power management control * @variant: MFC hardware variant information - * @num_inst: couter of active MFC instances + * @num_inst: counter of active MFC instances * @irqlock: lock for operations on videobuf2 queues * @condlock: lock for changing/checking if a context is ready to be * processed diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c index ee7b15b335e0..9f832ba7bc8c 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c @@ -51,7 +51,7 @@ int s5p_mfc_load_firmware(struct s5p_mfc_dev *dev) struct firmware *fw_blob; int i, err = -EINVAL; - /* Firmare has to be present as a separate file or compiled + /* Firmware has to be present as a separate file or compiled * into kernel. */ mfc_debug_enter(); diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c index f4c0e3a8f27d..e111f9c47179 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c @@ -602,7 +602,7 @@ static int vidioc_querybuf(struct file *file, void *priv, int i; if (buf->memory != V4L2_MEMORY_MMAP) { - mfc_err("Only mmaped buffers can be used\n"); + mfc_err("Only mmapped buffers can be used\n"); return -EINVAL; } mfc_debug(2, "State: %d, buf->type: %d\n", ctx->state, buf->type); diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c index 0913881219ff..6144e95f6425 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c @@ -1293,7 +1293,7 @@ static int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx) * First set the output frame buffers */ if (ctx->capture_state != QUEUE_BUFS_MMAPED) { - mfc_err("It seems that not all destination buffers were mmaped\nMFC requires that all destination are mmaped before starting processing\n"); + mfc_err("It seems that not all destination buffers were mmapped\nMFC requires that all destination are mmapped before starting processing\n"); return -EAGAIN; } if (list_empty(&ctx->src_queue)) { diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c index 7c629be43205..281699ab7fe1 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c @@ -53,7 +53,7 @@ static int s5p_mfc_alloc_dec_temp_buffers_v6(struct s5p_mfc_ctx *ctx) return 0; } -/* Release temproary buffers for decoding */ +/* Release temporary buffers for decoding */ static void s5p_mfc_release_dec_desc_buffer_v6(struct s5p_mfc_ctx *ctx) { /* NOP */ @@ -1928,7 +1928,7 @@ static inline int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx) if (ctx->capture_state != QUEUE_BUFS_MMAPED) { mfc_err("It seems that not all destination buffers were\n" - "mmaped.MFC requires that all destination are mmaped\n" + "mmapped.MFC requires that all destination are mmapped\n" "before starting processing.\n"); return -EAGAIN; } diff --git a/drivers/media/platform/seco-cec/seco-cec.h b/drivers/media/platform/seco-cec/seco-cec.h index e632c4a2a044..843de8c7dfd4 100644 --- a/drivers/media/platform/seco-cec/seco-cec.h +++ b/drivers/media/platform/seco-cec/seco-cec.h @@ -106,7 +106,7 @@ #define SECOCEC_IR_COMMAND_MASK 0x007F #define SECOCEC_IR_COMMAND_SHL 0 #define SECOCEC_IR_ADDRESS_MASK 0x1F00 -#define SECOCEC_IR_ADDRESS_SHL 7 +#define SECOCEC_IR_ADDRESS_SHL 8 #define SECOCEC_IR_TOGGLE_MASK 0x8000 #define SECOCEC_IR_TOGGLE_SHL 15 diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c index 09ae64a0004c..d277cc674349 100644 --- a/drivers/media/platform/sh_veu.c +++ b/drivers/media/platform/sh_veu.c @@ -273,13 +273,13 @@ static void sh_veu_process(struct sh_veu_dev *veu, static void sh_veu_device_run(void *priv) { struct sh_veu_dev *veu = priv; - struct vb2_buffer *src_buf, *dst_buf; + struct vb2_v4l2_buffer *src_buf, *dst_buf; src_buf = v4l2_m2m_next_src_buf(veu->m2m_ctx); dst_buf = v4l2_m2m_next_dst_buf(veu->m2m_ctx); if (src_buf && dst_buf) - sh_veu_process(veu, src_buf, dst_buf); + sh_veu_process(veu, &src_buf->vb2_buf, &dst_buf->vb2_buf); } /* ========== video ioctls ========== */ diff --git a/drivers/media/platform/soc_camera/Kconfig b/drivers/media/platform/soc_camera/Kconfig deleted file mode 100644 index 669d116b8f09..000000000000 --- a/drivers/media/platform/soc_camera/Kconfig +++ /dev/null @@ -1,26 +0,0 @@ -config SOC_CAMERA - tristate "SoC camera support" - depends on VIDEO_V4L2 && HAS_DMA && I2C - select VIDEOBUF2_CORE - help - SoC Camera is a common API to several cameras, not connecting - over a bus like PCI or USB. For example some i2c camera connected - directly to the data bus of an SoC. - -config SOC_CAMERA_SCALE_CROP - tristate - -config SOC_CAMERA_PLATFORM - tristate "platform camera support" - depends on SOC_CAMERA - help - This is a generic SoC camera platform driver, useful for testing - -config VIDEO_SH_MOBILE_CEU - tristate "SuperH Mobile CEU Interface driver" - depends on VIDEO_DEV && SOC_CAMERA && HAVE_CLK - depends on ARCH_SHMOBILE || COMPILE_TEST - select VIDEOBUF2_DMA_CONTIG - select SOC_CAMERA_SCALE_CROP - ---help--- - This is a v4l2 driver for the SuperH Mobile CEU Interface diff --git a/drivers/media/platform/soc_camera/Makefile b/drivers/media/platform/soc_camera/Makefile deleted file mode 100644 index 07a451e8b228..000000000000 --- a/drivers/media/platform/soc_camera/Makefile +++ /dev/null @@ -1,9 +0,0 @@ -obj-$(CONFIG_SOC_CAMERA) += soc_camera.o soc_mediabus.o -obj-$(CONFIG_SOC_CAMERA_SCALE_CROP) += soc_scale_crop.o - -# a platform subdevice driver stub, allowing to support cameras by adding a -# couple of callback functions to the board code -obj-$(CONFIG_SOC_CAMERA_PLATFORM) += soc_camera_platform.o - -# soc-camera host drivers have to be linked after camera drivers -obj-$(CONFIG_VIDEO_SH_MOBILE_CEU) += sh_mobile_ceu_camera.o diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c deleted file mode 100644 index 6803f744e307..000000000000 --- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c +++ /dev/null @@ -1,1810 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * V4L2 Driver for SuperH Mobile CEU interface - * - * Copyright (C) 2008 Magnus Damm - * - * Based on V4L2 Driver for PXA camera host - "pxa_camera.c", - * - * Copyright (C) 2006, Sascha Hauer, Pengutronix - * Copyright (C) 2008, Guennadi Liakhovetski - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "soc_scale_crop.h" - -/* register offsets for sh7722 / sh7723 */ - -#define CAPSR 0x00 /* Capture start register */ -#define CAPCR 0x04 /* Capture control register */ -#define CAMCR 0x08 /* Capture interface control register */ -#define CMCYR 0x0c /* Capture interface cycle register */ -#define CAMOR 0x10 /* Capture interface offset register */ -#define CAPWR 0x14 /* Capture interface width register */ -#define CAIFR 0x18 /* Capture interface input format register */ -#define CSTCR 0x20 /* Camera strobe control register (<= sh7722) */ -#define CSECR 0x24 /* Camera strobe emission count register (<= sh7722) */ -#define CRCNTR 0x28 /* CEU register control register */ -#define CRCMPR 0x2c /* CEU register forcible control register */ -#define CFLCR 0x30 /* Capture filter control register */ -#define CFSZR 0x34 /* Capture filter size clip register */ -#define CDWDR 0x38 /* Capture destination width register */ -#define CDAYR 0x3c /* Capture data address Y register */ -#define CDACR 0x40 /* Capture data address C register */ -#define CDBYR 0x44 /* Capture data bottom-field address Y register */ -#define CDBCR 0x48 /* Capture data bottom-field address C register */ -#define CBDSR 0x4c /* Capture bundle destination size register */ -#define CFWCR 0x5c /* Firewall operation control register */ -#define CLFCR 0x60 /* Capture low-pass filter control register */ -#define CDOCR 0x64 /* Capture data output control register */ -#define CDDCR 0x68 /* Capture data complexity level register */ -#define CDDAR 0x6c /* Capture data complexity level address register */ -#define CEIER 0x70 /* Capture event interrupt enable register */ -#define CETCR 0x74 /* Capture event flag clear register */ -#define CSTSR 0x7c /* Capture status register */ -#define CSRTR 0x80 /* Capture software reset register */ -#define CDSSR 0x84 /* Capture data size register */ -#define CDAYR2 0x90 /* Capture data address Y register 2 */ -#define CDACR2 0x94 /* Capture data address C register 2 */ -#define CDBYR2 0x98 /* Capture data bottom-field address Y register 2 */ -#define CDBCR2 0x9c /* Capture data bottom-field address C register 2 */ - -#undef DEBUG_GEOMETRY -#ifdef DEBUG_GEOMETRY -#define dev_geo dev_info -#else -#define dev_geo dev_dbg -#endif - -/* per video frame buffer */ -struct sh_mobile_ceu_buffer { - struct vb2_v4l2_buffer vb; /* v4l buffer must be first */ - struct list_head queue; -}; - -struct sh_mobile_ceu_dev { - struct soc_camera_host ici; - - unsigned int irq; - void __iomem *base; - size_t video_limit; - size_t buf_total; - - spinlock_t lock; /* Protects video buffer lists */ - struct list_head capture; - struct vb2_v4l2_buffer *active; - - struct sh_mobile_ceu_info *pdata; - struct completion complete; - - u32 cflcr; - - /* static max sizes either from platform data or default */ - int max_width; - int max_height; - - enum v4l2_field field; - int sequence; - unsigned long flags; - - unsigned int image_mode:1; - unsigned int is_16bit:1; - unsigned int frozen:1; -}; - -struct sh_mobile_ceu_cam { - /* CEU offsets within the camera output, before the CEU scaler */ - unsigned int ceu_left; - unsigned int ceu_top; - /* Client output, as seen by the CEU */ - unsigned int width; - unsigned int height; - /* - * User window from S_SELECTION / G_SELECTION, produced by client cropping and - * scaling, CEU scaling and CEU cropping, mapped back onto the client - * input window - */ - struct v4l2_rect subrect; - /* Camera cropping rectangle */ - struct v4l2_rect rect; - const struct soc_mbus_pixelfmt *extra_fmt; - u32 code; -}; - -static struct sh_mobile_ceu_buffer *to_ceu_vb(struct vb2_v4l2_buffer *vbuf) -{ - return container_of(vbuf, struct sh_mobile_ceu_buffer, vb); -} - -static void ceu_write(struct sh_mobile_ceu_dev *priv, - unsigned long reg_offs, u32 data) -{ - iowrite32(data, priv->base + reg_offs); -} - -static u32 ceu_read(struct sh_mobile_ceu_dev *priv, unsigned long reg_offs) -{ - return ioread32(priv->base + reg_offs); -} - -static int sh_mobile_ceu_soft_reset(struct sh_mobile_ceu_dev *pcdev) -{ - int i, success = 0; - - ceu_write(pcdev, CAPSR, 1 << 16); /* reset */ - - /* wait CSTSR.CPTON bit */ - for (i = 0; i < 1000; i++) { - if (!(ceu_read(pcdev, CSTSR) & 1)) { - success++; - break; - } - udelay(1); - } - - /* wait CAPSR.CPKIL bit */ - for (i = 0; i < 1000; i++) { - if (!(ceu_read(pcdev, CAPSR) & (1 << 16))) { - success++; - break; - } - udelay(1); - } - - if (2 != success) { - dev_warn(pcdev->ici.v4l2_dev.dev, "soft reset time out\n"); - return -EIO; - } - - return 0; -} - -/* - * Videobuf operations - */ - -/* - * .queue_setup() is called to check, whether the driver can accept the - * requested number of buffers and to fill in plane sizes - * for the current frame format if required - */ -static int sh_mobile_ceu_videobuf_setup(struct vb2_queue *vq, - unsigned int *count, unsigned int *num_planes, - unsigned int sizes[], struct device *alloc_devs[]) -{ - struct soc_camera_device *icd = soc_camera_from_vb2q(vq); - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - struct sh_mobile_ceu_dev *pcdev = ici->priv; - - if (!vq->num_buffers) - pcdev->sequence = 0; - - if (!*count) - *count = 2; - - /* Called from VIDIOC_REQBUFS or in compatibility mode */ - if (!*num_planes) - sizes[0] = icd->sizeimage; - else if (sizes[0] < icd->sizeimage) - return -EINVAL; - - /* If *num_planes != 0, we have already verified *count. */ - if (pcdev->video_limit) { - size_t size = PAGE_ALIGN(sizes[0]) * *count; - - if (size + pcdev->buf_total > pcdev->video_limit) - *count = (pcdev->video_limit - pcdev->buf_total) / - PAGE_ALIGN(sizes[0]); - } - - *num_planes = 1; - - dev_dbg(icd->parent, "count=%d, size=%u\n", *count, sizes[0]); - - return 0; -} - -#define CEU_CETCR_MAGIC 0x0317f313 /* acknowledge magical interrupt sources */ -#define CEU_CETCR_IGRW (1 << 4) /* prohibited register access interrupt bit */ -#define CEU_CEIER_CPEIE (1 << 0) /* one-frame capture end interrupt */ -#define CEU_CEIER_VBP (1 << 20) /* vbp error */ -#define CEU_CAPCR_CTNCP (1 << 16) /* continuous capture mode (if set) */ -#define CEU_CEIER_MASK (CEU_CEIER_CPEIE | CEU_CEIER_VBP) - - -/* - * return value doesn't reflex the success/failure to queue the new buffer, - * but rather the status of the previous buffer. - */ -static int sh_mobile_ceu_capture(struct sh_mobile_ceu_dev *pcdev) -{ - struct soc_camera_device *icd = pcdev->ici.icd; - dma_addr_t phys_addr_top, phys_addr_bottom; - unsigned long top1, top2; - unsigned long bottom1, bottom2; - u32 status; - bool planar; - int ret = 0; - - /* - * The hardware is _very_ picky about this sequence. Especially - * the CEU_CETCR_MAGIC value. It seems like we need to acknowledge - * several not-so-well documented interrupt sources in CETCR. - */ - ceu_write(pcdev, CEIER, ceu_read(pcdev, CEIER) & ~CEU_CEIER_MASK); - status = ceu_read(pcdev, CETCR); - ceu_write(pcdev, CETCR, ~status & CEU_CETCR_MAGIC); - if (!pcdev->frozen) - ceu_write(pcdev, CEIER, ceu_read(pcdev, CEIER) | CEU_CEIER_MASK); - ceu_write(pcdev, CAPCR, ceu_read(pcdev, CAPCR) & ~CEU_CAPCR_CTNCP); - ceu_write(pcdev, CETCR, CEU_CETCR_MAGIC ^ CEU_CETCR_IGRW); - - /* - * When a VBP interrupt occurs, a capture end interrupt does not occur - * and the image of that frame is not captured correctly. So, soft reset - * is needed here. - */ - if (status & CEU_CEIER_VBP) { - sh_mobile_ceu_soft_reset(pcdev); - ret = -EIO; - } - - if (pcdev->frozen) { - complete(&pcdev->complete); - return ret; - } - - if (!pcdev->active) - return ret; - - if (V4L2_FIELD_INTERLACED_BT == pcdev->field) { - top1 = CDBYR; - top2 = CDBCR; - bottom1 = CDAYR; - bottom2 = CDACR; - } else { - top1 = CDAYR; - top2 = CDACR; - bottom1 = CDBYR; - bottom2 = CDBCR; - } - - phys_addr_top = - vb2_dma_contig_plane_dma_addr(&pcdev->active->vb2_buf, 0); - - switch (icd->current_fmt->host_fmt->fourcc) { - case V4L2_PIX_FMT_NV12: - case V4L2_PIX_FMT_NV21: - case V4L2_PIX_FMT_NV16: - case V4L2_PIX_FMT_NV61: - planar = true; - break; - default: - planar = false; - } - - ceu_write(pcdev, top1, phys_addr_top); - if (V4L2_FIELD_NONE != pcdev->field) { - phys_addr_bottom = phys_addr_top + icd->bytesperline; - ceu_write(pcdev, bottom1, phys_addr_bottom); - } - - if (planar) { - phys_addr_top += icd->bytesperline * icd->user_height; - ceu_write(pcdev, top2, phys_addr_top); - if (V4L2_FIELD_NONE != pcdev->field) { - phys_addr_bottom = phys_addr_top + icd->bytesperline; - ceu_write(pcdev, bottom2, phys_addr_bottom); - } - } - - ceu_write(pcdev, CAPSR, 0x1); /* start capture */ - - return ret; -} - -static int sh_mobile_ceu_videobuf_prepare(struct vb2_buffer *vb) -{ - struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); - struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vbuf); - - /* Added list head initialization on alloc */ - WARN(!list_empty(&buf->queue), "Buffer %p on queue!\n", vb); - - return 0; -} - -static void sh_mobile_ceu_videobuf_queue(struct vb2_buffer *vb) -{ - struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); - struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue); - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - struct sh_mobile_ceu_dev *pcdev = ici->priv; - struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vbuf); - unsigned long size; - - size = icd->sizeimage; - - if (vb2_plane_size(vb, 0) < size) { - dev_err(icd->parent, "Buffer #%d too small (%lu < %lu)\n", - vb->index, vb2_plane_size(vb, 0), size); - goto error; - } - - vb2_set_plane_payload(vb, 0, size); - - dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__, - vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0)); - -#ifdef DEBUG - /* - * This can be useful if you want to see if we actually fill - * the buffer with something - */ - if (vb2_plane_vaddr(vb, 0)) - memset(vb2_plane_vaddr(vb, 0), 0xaa, vb2_get_plane_payload(vb, 0)); -#endif - - spin_lock_irq(&pcdev->lock); - list_add_tail(&buf->queue, &pcdev->capture); - - if (!pcdev->active) { - /* - * Because there were no active buffer at this moment, - * we are not interested in the return value of - * sh_mobile_ceu_capture here. - */ - pcdev->active = vbuf; - sh_mobile_ceu_capture(pcdev); - } - spin_unlock_irq(&pcdev->lock); - - return; - -error: - vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); -} - -static void sh_mobile_ceu_videobuf_release(struct vb2_buffer *vb) -{ - struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); - struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue); - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vbuf); - struct sh_mobile_ceu_dev *pcdev = ici->priv; - - spin_lock_irq(&pcdev->lock); - - if (pcdev->active == vbuf) { - /* disable capture (release DMA buffer), reset */ - ceu_write(pcdev, CAPSR, 1 << 16); - pcdev->active = NULL; - } - - /* - * Doesn't hurt also if the list is empty, but it hurts, if queuing the - * buffer failed, and .buf_init() hasn't been called - */ - if (buf->queue.next) - list_del_init(&buf->queue); - - pcdev->buf_total -= PAGE_ALIGN(vb2_plane_size(vb, 0)); - dev_dbg(icd->parent, "%s() %zu bytes buffers\n", __func__, - pcdev->buf_total); - - spin_unlock_irq(&pcdev->lock); -} - -static int sh_mobile_ceu_videobuf_init(struct vb2_buffer *vb) -{ - struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); - struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue); - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - struct sh_mobile_ceu_dev *pcdev = ici->priv; - - pcdev->buf_total += PAGE_ALIGN(vb2_plane_size(vb, 0)); - dev_dbg(icd->parent, "%s() %zu bytes buffers\n", __func__, - pcdev->buf_total); - - /* This is for locking debugging only */ - INIT_LIST_HEAD(&to_ceu_vb(vbuf)->queue); - return 0; -} - -static void sh_mobile_ceu_stop_streaming(struct vb2_queue *q) -{ - struct soc_camera_device *icd = soc_camera_from_vb2q(q); - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - struct sh_mobile_ceu_dev *pcdev = ici->priv; - struct list_head *buf_head, *tmp; - struct vb2_v4l2_buffer *vbuf; - - spin_lock_irq(&pcdev->lock); - - pcdev->active = NULL; - - list_for_each_safe(buf_head, tmp, &pcdev->capture) { - vbuf = &list_entry(buf_head, struct sh_mobile_ceu_buffer, - queue)->vb; - vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE); - list_del_init(buf_head); - } - - spin_unlock_irq(&pcdev->lock); - - sh_mobile_ceu_soft_reset(pcdev); -} - -static const struct vb2_ops sh_mobile_ceu_videobuf_ops = { - .queue_setup = sh_mobile_ceu_videobuf_setup, - .buf_prepare = sh_mobile_ceu_videobuf_prepare, - .buf_queue = sh_mobile_ceu_videobuf_queue, - .buf_cleanup = sh_mobile_ceu_videobuf_release, - .buf_init = sh_mobile_ceu_videobuf_init, - .wait_prepare = vb2_ops_wait_prepare, - .wait_finish = vb2_ops_wait_finish, - .stop_streaming = sh_mobile_ceu_stop_streaming, -}; - -static irqreturn_t sh_mobile_ceu_irq(int irq, void *data) -{ - struct sh_mobile_ceu_dev *pcdev = data; - struct vb2_v4l2_buffer *vbuf; - int ret; - - spin_lock(&pcdev->lock); - - vbuf = pcdev->active; - if (!vbuf) - /* Stale interrupt from a released buffer */ - goto out; - - list_del_init(&to_ceu_vb(vbuf)->queue); - - if (!list_empty(&pcdev->capture)) - pcdev->active = &list_entry(pcdev->capture.next, - struct sh_mobile_ceu_buffer, queue)->vb; - else - pcdev->active = NULL; - - ret = sh_mobile_ceu_capture(pcdev); - vbuf->vb2_buf.timestamp = ktime_get_ns(); - if (!ret) { - vbuf->field = pcdev->field; - vbuf->sequence = pcdev->sequence++; - } - vb2_buffer_done(&vbuf->vb2_buf, - ret < 0 ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE); - -out: - spin_unlock(&pcdev->lock); - - return IRQ_HANDLED; -} - -static int sh_mobile_ceu_add_device(struct soc_camera_device *icd) -{ - dev_info(icd->parent, - "SuperH Mobile CEU driver attached to camera %d\n", - icd->devnum); - - return 0; -} - -static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd) -{ - dev_info(icd->parent, - "SuperH Mobile CEU driver detached from camera %d\n", - icd->devnum); -} - -/* Called with .host_lock held */ -static int sh_mobile_ceu_clock_start(struct soc_camera_host *ici) -{ - struct sh_mobile_ceu_dev *pcdev = ici->priv; - - pm_runtime_get_sync(ici->v4l2_dev.dev); - - pcdev->buf_total = 0; - - sh_mobile_ceu_soft_reset(pcdev); - - return 0; -} - -/* Called with .host_lock held */ -static void sh_mobile_ceu_clock_stop(struct soc_camera_host *ici) -{ - struct sh_mobile_ceu_dev *pcdev = ici->priv; - - /* disable capture, disable interrupts */ - ceu_write(pcdev, CEIER, 0); - sh_mobile_ceu_soft_reset(pcdev); - - /* make sure active buffer is canceled */ - spin_lock_irq(&pcdev->lock); - if (pcdev->active) { - list_del_init(&to_ceu_vb(pcdev->active)->queue); - vb2_buffer_done(&pcdev->active->vb2_buf, VB2_BUF_STATE_ERROR); - pcdev->active = NULL; - } - spin_unlock_irq(&pcdev->lock); - - pm_runtime_put(ici->v4l2_dev.dev); -} - -/* - * See chapter 29.4.12 "Capture Filter Control Register (CFLCR)" - * in SH7722 Hardware Manual - */ -static unsigned int size_dst(unsigned int src, unsigned int scale) -{ - unsigned int mant_pre = scale >> 12; - if (!src || !scale) - return src; - return ((mant_pre + 2 * (src - 1)) / (2 * mant_pre) - 1) * - mant_pre * 4096 / scale + 1; -} - -static u16 calc_scale(unsigned int src, unsigned int *dst) -{ - u16 scale; - - if (src == *dst) - return 0; - - scale = (src * 4096 / *dst) & ~7; - - while (scale > 4096 && size_dst(src, scale) < *dst) - scale -= 8; - - *dst = size_dst(src, scale); - - return scale; -} - -/* rect is guaranteed to not exceed the scaled camera rectangle */ -static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd) -{ - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - struct sh_mobile_ceu_cam *cam = icd->host_priv; - struct sh_mobile_ceu_dev *pcdev = ici->priv; - unsigned int height, width, cdwdr_width, in_width, in_height; - unsigned int left_offset, top_offset; - u32 camor; - - dev_geo(icd->parent, "Crop %ux%u@%u:%u\n", - icd->user_width, icd->user_height, cam->ceu_left, cam->ceu_top); - - left_offset = cam->ceu_left; - top_offset = cam->ceu_top; - - WARN_ON(icd->user_width & 3 || icd->user_height & 3); - - width = icd->user_width; - - if (pcdev->image_mode) { - in_width = cam->width; - if (!pcdev->is_16bit) { - in_width *= 2; - left_offset *= 2; - } - } else { - unsigned int w_factor; - - switch (icd->current_fmt->host_fmt->packing) { - case SOC_MBUS_PACKING_2X8_PADHI: - w_factor = 2; - break; - default: - w_factor = 1; - } - - in_width = cam->width * w_factor; - left_offset *= w_factor; - } - - cdwdr_width = icd->bytesperline; - - height = icd->user_height; - in_height = cam->height; - if (V4L2_FIELD_NONE != pcdev->field) { - height = (height / 2) & ~3; - in_height /= 2; - top_offset /= 2; - cdwdr_width *= 2; - } - - /* Set CAMOR, CAPWR, CFSZR, take care of CDWDR */ - camor = left_offset | (top_offset << 16); - - dev_geo(icd->parent, - "CAMOR 0x%x, CAPWR 0x%x, CFSZR 0x%x, CDWDR 0x%x\n", camor, - (in_height << 16) | in_width, (height << 16) | width, - cdwdr_width); - - ceu_write(pcdev, CAMOR, camor); - ceu_write(pcdev, CAPWR, (in_height << 16) | in_width); - /* CFSZR clipping is applied _after_ the scaling filter (CFLCR) */ - ceu_write(pcdev, CFSZR, (height << 16) | width); - ceu_write(pcdev, CDWDR, cdwdr_width); -} - -static u32 capture_save_reset(struct sh_mobile_ceu_dev *pcdev) -{ - u32 capsr = ceu_read(pcdev, CAPSR); - ceu_write(pcdev, CAPSR, 1 << 16); /* reset, stop capture */ - return capsr; -} - -static void capture_restore(struct sh_mobile_ceu_dev *pcdev, u32 capsr) -{ - unsigned long timeout = jiffies + 10 * HZ; - - /* - * Wait until the end of the current frame. It can take a long time, - * but if it has been aborted by a CAPSR reset, it shoule exit sooner. - */ - while ((ceu_read(pcdev, CSTSR) & 1) && time_before(jiffies, timeout)) - msleep(1); - - if (time_after(jiffies, timeout)) { - dev_err(pcdev->ici.v4l2_dev.dev, - "Timeout waiting for frame end! Interface problem?\n"); - return; - } - - /* Wait until reset clears, this shall not hang... */ - while (ceu_read(pcdev, CAPSR) & (1 << 16)) - udelay(10); - - /* Anything to restore? */ - if (capsr & ~(1 << 16)) - ceu_write(pcdev, CAPSR, capsr); -} - -#define CEU_BUS_FLAGS (V4L2_MBUS_MASTER | \ - V4L2_MBUS_PCLK_SAMPLE_RISING | \ - V4L2_MBUS_HSYNC_ACTIVE_HIGH | \ - V4L2_MBUS_HSYNC_ACTIVE_LOW | \ - V4L2_MBUS_VSYNC_ACTIVE_HIGH | \ - V4L2_MBUS_VSYNC_ACTIVE_LOW | \ - V4L2_MBUS_DATA_ACTIVE_HIGH) - -/* Capture is not running, no interrupts, no locking needed */ -static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd) -{ - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - struct sh_mobile_ceu_dev *pcdev = ici->priv; - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - struct sh_mobile_ceu_cam *cam = icd->host_priv; - struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,}; - unsigned long value, common_flags = CEU_BUS_FLAGS; - u32 capsr = capture_save_reset(pcdev); - unsigned int yuv_lineskip; - int ret; - - /* - * If the client doesn't implement g_mbus_config, we just use our - * platform data - */ - ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg); - if (!ret) { - common_flags = soc_mbus_config_compatible(&cfg, - common_flags); - if (!common_flags) - return -EINVAL; - } else if (ret != -ENOIOCTLCMD) { - return ret; - } - - /* Make choises, based on platform preferences */ - if ((common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) && - (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) { - if (pcdev->flags & SH_CEU_FLAG_HSYNC_LOW) - common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_HIGH; - else - common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_LOW; - } - - if ((common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) && - (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)) { - if (pcdev->flags & SH_CEU_FLAG_VSYNC_LOW) - common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_HIGH; - else - common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_LOW; - } - - cfg.flags = common_flags; - ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg); - if (ret < 0 && ret != -ENOIOCTLCMD) - return ret; - - if (icd->current_fmt->host_fmt->bits_per_sample > 8) - pcdev->is_16bit = 1; - else - pcdev->is_16bit = 0; - - ceu_write(pcdev, CRCNTR, 0); - ceu_write(pcdev, CRCMPR, 0); - - value = 0x00000010; /* data fetch by default */ - yuv_lineskip = 0x10; - - switch (icd->current_fmt->host_fmt->fourcc) { - case V4L2_PIX_FMT_NV12: - case V4L2_PIX_FMT_NV21: - /* convert 4:2:2 -> 4:2:0 */ - yuv_lineskip = 0; /* skip for NV12/21, no skip for NV16/61 */ - /* fall-through */ - case V4L2_PIX_FMT_NV16: - case V4L2_PIX_FMT_NV61: - switch (cam->code) { - case MEDIA_BUS_FMT_UYVY8_2X8: - value = 0x00000000; /* Cb0, Y0, Cr0, Y1 */ - break; - case MEDIA_BUS_FMT_VYUY8_2X8: - value = 0x00000100; /* Cr0, Y0, Cb0, Y1 */ - break; - case MEDIA_BUS_FMT_YUYV8_2X8: - value = 0x00000200; /* Y0, Cb0, Y1, Cr0 */ - break; - case MEDIA_BUS_FMT_YVYU8_2X8: - value = 0x00000300; /* Y0, Cr0, Y1, Cb0 */ - break; - default: - BUG(); - } - } - - if (icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_NV21 || - icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_NV61) - value ^= 0x00000100; /* swap U, V to change from NV1x->NVx1 */ - - value |= common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW ? 1 << 1 : 0; - value |= common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW ? 1 << 0 : 0; - - if (pcdev->is_16bit) - value |= 1 << 12; - else if (pcdev->flags & SH_CEU_FLAG_LOWER_8BIT) - value |= 2 << 12; - - ceu_write(pcdev, CAMCR, value); - - ceu_write(pcdev, CAPCR, 0x00300000); - - switch (pcdev->field) { - case V4L2_FIELD_INTERLACED_TB: - value = 0x101; - break; - case V4L2_FIELD_INTERLACED_BT: - value = 0x102; - break; - default: - value = 0; - break; - } - ceu_write(pcdev, CAIFR, value); - - sh_mobile_ceu_set_rect(icd); - mdelay(1); - - dev_geo(icd->parent, "CFLCR 0x%x\n", pcdev->cflcr); - ceu_write(pcdev, CFLCR, pcdev->cflcr); - - /* - * A few words about byte order (observed in Big Endian mode) - * - * In data fetch mode bytes are received in chunks of 8 bytes. - * D0, D1, D2, D3, D4, D5, D6, D7 (D0 received first) - * - * The data is however by default written to memory in reverse order: - * D7, D6, D5, D4, D3, D2, D1, D0 (D7 written to lowest byte) - * - * The lowest three bits of CDOCR allows us to do swapping, - * using 7 we swap the data bytes to match the incoming order: - * D0, D1, D2, D3, D4, D5, D6, D7 - */ - value = 0x00000007 | yuv_lineskip; - - ceu_write(pcdev, CDOCR, value); - ceu_write(pcdev, CFWCR, 0); /* keep "datafetch firewall" disabled */ - - capture_restore(pcdev, capsr); - - /* not in bundle mode: skip CBDSR, CDAYR2, CDACR2, CDBYR2, CDBCR2 */ - return 0; -} - -static int sh_mobile_ceu_try_bus_param(struct soc_camera_device *icd, - unsigned char buswidth) -{ - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - unsigned long common_flags = CEU_BUS_FLAGS; - struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,}; - int ret; - - ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg); - if (!ret) - common_flags = soc_mbus_config_compatible(&cfg, - common_flags); - else if (ret != -ENOIOCTLCMD) - return ret; - - if (!common_flags || buswidth > 16) - return -EINVAL; - - return 0; -} - -static const struct soc_mbus_pixelfmt sh_mobile_ceu_formats[] = { - { - .fourcc = V4L2_PIX_FMT_NV12, - .name = "NV12", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_1_5X8, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PLANAR_2Y_C, - }, { - .fourcc = V4L2_PIX_FMT_NV21, - .name = "NV21", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_1_5X8, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PLANAR_2Y_C, - }, { - .fourcc = V4L2_PIX_FMT_NV16, - .name = "NV16", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_2X8_PADHI, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PLANAR_Y_C, - }, { - .fourcc = V4L2_PIX_FMT_NV61, - .name = "NV61", - .bits_per_sample = 8, - .packing = SOC_MBUS_PACKING_2X8_PADHI, - .order = SOC_MBUS_ORDER_LE, - .layout = SOC_MBUS_LAYOUT_PLANAR_Y_C, - }, -}; - -/* This will be corrected as we get more formats */ -static bool sh_mobile_ceu_packing_supported(const struct soc_mbus_pixelfmt *fmt) -{ - return fmt->packing == SOC_MBUS_PACKING_NONE || - (fmt->bits_per_sample == 8 && - fmt->packing == SOC_MBUS_PACKING_1_5X8) || - (fmt->bits_per_sample == 8 && - fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) || - (fmt->bits_per_sample > 8 && - fmt->packing == SOC_MBUS_PACKING_EXTEND16); -} - -static struct soc_camera_device *ctrl_to_icd(struct v4l2_ctrl *ctrl) -{ - return container_of(ctrl->handler, struct soc_camera_device, - ctrl_handler); -} - -static int sh_mobile_ceu_s_ctrl(struct v4l2_ctrl *ctrl) -{ - struct soc_camera_device *icd = ctrl_to_icd(ctrl); - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - struct sh_mobile_ceu_dev *pcdev = ici->priv; - - switch (ctrl->id) { - case V4L2_CID_SHARPNESS: - switch (icd->current_fmt->host_fmt->fourcc) { - case V4L2_PIX_FMT_NV12: - case V4L2_PIX_FMT_NV21: - case V4L2_PIX_FMT_NV16: - case V4L2_PIX_FMT_NV61: - ceu_write(pcdev, CLFCR, !ctrl->val); - return 0; - } - break; - } - - return -EINVAL; -} - -static const struct v4l2_ctrl_ops sh_mobile_ceu_ctrl_ops = { - .s_ctrl = sh_mobile_ceu_s_ctrl, -}; - -static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int idx, - struct soc_camera_format_xlate *xlate) -{ - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - struct device *dev = icd->parent; - struct soc_camera_host *ici = to_soc_camera_host(dev); - struct sh_mobile_ceu_dev *pcdev = ici->priv; - int ret, k, n; - int formats = 0; - struct sh_mobile_ceu_cam *cam; - struct v4l2_subdev_mbus_code_enum code = { - .which = V4L2_SUBDEV_FORMAT_ACTIVE, - .index = idx, - }; - const struct soc_mbus_pixelfmt *fmt; - - ret = v4l2_subdev_call(sd, pad, enum_mbus_code, NULL, &code); - if (ret < 0) - /* No more formats */ - return 0; - - fmt = soc_mbus_get_fmtdesc(code.code); - if (!fmt) { - dev_warn(dev, "unsupported format code #%u: %d\n", idx, code.code); - return 0; - } - - ret = sh_mobile_ceu_try_bus_param(icd, fmt->bits_per_sample); - if (ret < 0) - return 0; - - if (!icd->host_priv) { - struct v4l2_subdev_format fmt = { - .which = V4L2_SUBDEV_FORMAT_ACTIVE, - }; - struct v4l2_mbus_framefmt *mf = &fmt.format; - struct v4l2_rect rect; - int shift = 0; - - /* Add our control */ - v4l2_ctrl_new_std(&icd->ctrl_handler, &sh_mobile_ceu_ctrl_ops, - V4L2_CID_SHARPNESS, 0, 1, 1, 1); - if (icd->ctrl_handler.error) - return icd->ctrl_handler.error; - - /* FIXME: subwindow is lost between close / open */ - - /* Cache current client geometry */ - ret = soc_camera_client_g_rect(sd, &rect); - if (ret < 0) - return ret; - - /* First time */ - ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt); - if (ret < 0) - return ret; - - /* - * All currently existing CEU implementations support 2560x1920 - * or larger frames. If the sensor is proposing too big a frame, - * don't bother with possibly supportred by the CEU larger - * sizes, just try VGA multiples. If needed, this can be - * adjusted in the future. - */ - while ((mf->width > pcdev->max_width || - mf->height > pcdev->max_height) && shift < 4) { - /* Try 2560x1920, 1280x960, 640x480, 320x240 */ - mf->width = 2560 >> shift; - mf->height = 1920 >> shift; - ret = v4l2_device_call_until_err(sd->v4l2_dev, - soc_camera_grp_id(icd), pad, - set_fmt, NULL, &fmt); - if (ret < 0) - return ret; - shift++; - } - - if (shift == 4) { - dev_err(dev, "Failed to configure the client below %ux%x\n", - mf->width, mf->height); - return -EIO; - } - - dev_geo(dev, "camera fmt %ux%u\n", mf->width, mf->height); - - cam = kzalloc(sizeof(*cam), GFP_KERNEL); - if (!cam) - return -ENOMEM; - - /* We are called with current camera crop, initialise subrect with it */ - cam->rect = rect; - cam->subrect = rect; - - cam->width = mf->width; - cam->height = mf->height; - - icd->host_priv = cam; - } else { - cam = icd->host_priv; - } - - /* Beginning of a pass */ - if (!idx) - cam->extra_fmt = NULL; - - switch (code.code) { - case MEDIA_BUS_FMT_UYVY8_2X8: - case MEDIA_BUS_FMT_VYUY8_2X8: - case MEDIA_BUS_FMT_YUYV8_2X8: - case MEDIA_BUS_FMT_YVYU8_2X8: - if (cam->extra_fmt) - break; - - /* - * Our case is simple so far: for any of the above four camera - * formats we add all our four synthesized NV* formats, so, - * just marking the device with a single flag suffices. If - * the format generation rules are more complex, you would have - * to actually hang your already added / counted formats onto - * the host_priv pointer and check whether the format you're - * going to add now is already there. - */ - cam->extra_fmt = sh_mobile_ceu_formats; - - n = ARRAY_SIZE(sh_mobile_ceu_formats); - formats += n; - for (k = 0; xlate && k < n; k++) { - xlate->host_fmt = &sh_mobile_ceu_formats[k]; - xlate->code = code.code; - xlate++; - dev_dbg(dev, "Providing format %s using code %d\n", - sh_mobile_ceu_formats[k].name, code.code); - } - break; - default: - if (!sh_mobile_ceu_packing_supported(fmt)) - return 0; - } - - /* Generic pass-through */ - formats++; - if (xlate) { - xlate->host_fmt = fmt; - xlate->code = code.code; - xlate++; - dev_dbg(dev, "Providing format %s in pass-through mode\n", - fmt->name); - } - - return formats; -} - -static void sh_mobile_ceu_put_formats(struct soc_camera_device *icd) -{ - kfree(icd->host_priv); - icd->host_priv = NULL; -} - -#define scale_down(size, scale) soc_camera_shift_scale(size, 12, scale) -#define calc_generic_scale(in, out) soc_camera_calc_scale(in, 12, out) - -/* - * CEU can scale and crop, but we don't want to waste bandwidth and kill the - * framerate by always requesting the maximum image from the client. See - * Documentation/media/v4l-drivers/sh_mobile_ceu_camera.rst for a description of - * scaling and cropping algorithms and for the meaning of referenced here steps. - */ -static int sh_mobile_ceu_set_selection(struct soc_camera_device *icd, - struct v4l2_selection *sel) -{ - struct v4l2_rect *rect = &sel->r; - struct device *dev = icd->parent; - struct soc_camera_host *ici = to_soc_camera_host(dev); - struct sh_mobile_ceu_dev *pcdev = ici->priv; - struct v4l2_selection cam_sel; - struct sh_mobile_ceu_cam *cam = icd->host_priv; - struct v4l2_rect *cam_rect = &cam_sel.r; - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - struct v4l2_subdev_format fmt = { - .which = V4L2_SUBDEV_FORMAT_ACTIVE, - }; - struct v4l2_mbus_framefmt *mf = &fmt.format; - unsigned int scale_cam_h, scale_cam_v, scale_ceu_h, scale_ceu_v, - out_width, out_height; - int interm_width, interm_height; - u32 capsr, cflcr; - int ret; - - dev_geo(dev, "S_SELECTION(%ux%u@%u:%u)\n", rect->width, rect->height, - rect->left, rect->top); - - /* During camera cropping its output window can change too, stop CEU */ - capsr = capture_save_reset(pcdev); - dev_dbg(dev, "CAPSR 0x%x, CFLCR 0x%x\n", capsr, pcdev->cflcr); - - /* - * 1. - 2. Apply iterative camera S_SELECTION for new input window, read back - * actual camera rectangle. - */ - ret = soc_camera_client_s_selection(sd, sel, &cam_sel, - &cam->rect, &cam->subrect); - if (ret < 0) - return ret; - - dev_geo(dev, "1-2: camera cropped to %ux%u@%u:%u\n", - cam_rect->width, cam_rect->height, - cam_rect->left, cam_rect->top); - - /* On success cam_crop contains current camera crop */ - - /* 3. Retrieve camera output window */ - ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt); - if (ret < 0) - return ret; - - if (mf->width > pcdev->max_width || mf->height > pcdev->max_height) - return -EINVAL; - - /* 4. Calculate camera scales */ - scale_cam_h = calc_generic_scale(cam_rect->width, mf->width); - scale_cam_v = calc_generic_scale(cam_rect->height, mf->height); - - /* Calculate intermediate window */ - interm_width = scale_down(rect->width, scale_cam_h); - interm_height = scale_down(rect->height, scale_cam_v); - - if (interm_width < icd->user_width) { - u32 new_scale_h; - - new_scale_h = calc_generic_scale(rect->width, icd->user_width); - - mf->width = scale_down(cam_rect->width, new_scale_h); - } - - if (interm_height < icd->user_height) { - u32 new_scale_v; - - new_scale_v = calc_generic_scale(rect->height, icd->user_height); - - mf->height = scale_down(cam_rect->height, new_scale_v); - } - - if (interm_width < icd->user_width || interm_height < icd->user_height) { - ret = v4l2_device_call_until_err(sd->v4l2_dev, - soc_camera_grp_id(icd), pad, - set_fmt, NULL, &fmt); - if (ret < 0) - return ret; - - dev_geo(dev, "New camera output %ux%u\n", mf->width, mf->height); - scale_cam_h = calc_generic_scale(cam_rect->width, mf->width); - scale_cam_v = calc_generic_scale(cam_rect->height, mf->height); - interm_width = scale_down(rect->width, scale_cam_h); - interm_height = scale_down(rect->height, scale_cam_v); - } - - /* Cache camera output window */ - cam->width = mf->width; - cam->height = mf->height; - - if (pcdev->image_mode) { - out_width = min(interm_width, icd->user_width); - out_height = min(interm_height, icd->user_height); - } else { - out_width = interm_width; - out_height = interm_height; - } - - /* - * 5. Calculate CEU scales from camera scales from results of (5) and - * the user window - */ - scale_ceu_h = calc_scale(interm_width, &out_width); - scale_ceu_v = calc_scale(interm_height, &out_height); - - dev_geo(dev, "5: CEU scales %u:%u\n", scale_ceu_h, scale_ceu_v); - - /* Apply CEU scales. */ - cflcr = scale_ceu_h | (scale_ceu_v << 16); - if (cflcr != pcdev->cflcr) { - pcdev->cflcr = cflcr; - ceu_write(pcdev, CFLCR, cflcr); - } - - icd->user_width = out_width & ~3; - icd->user_height = out_height & ~3; - /* Offsets are applied at the CEU scaling filter input */ - cam->ceu_left = scale_down(rect->left - cam_rect->left, scale_cam_h) & ~1; - cam->ceu_top = scale_down(rect->top - cam_rect->top, scale_cam_v) & ~1; - - /* 6. Use CEU cropping to crop to the new window. */ - sh_mobile_ceu_set_rect(icd); - - cam->subrect = *rect; - - dev_geo(dev, "6: CEU cropped to %ux%u@%u:%u\n", - icd->user_width, icd->user_height, - cam->ceu_left, cam->ceu_top); - - /* Restore capture. The CE bit can be cleared by the hardware */ - if (pcdev->active) - capsr |= 1; - capture_restore(pcdev, capsr); - - /* Even if only camera cropping succeeded */ - return ret; -} - -static int sh_mobile_ceu_get_selection(struct soc_camera_device *icd, - struct v4l2_selection *sel) -{ - struct sh_mobile_ceu_cam *cam = icd->host_priv; - - sel->r = cam->subrect; - - return 0; -} - -/* Similar to set_crop multistage iterative algorithm */ -static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd, - struct v4l2_format *f) -{ - struct device *dev = icd->parent; - struct soc_camera_host *ici = to_soc_camera_host(dev); - struct sh_mobile_ceu_dev *pcdev = ici->priv; - struct sh_mobile_ceu_cam *cam = icd->host_priv; - struct v4l2_pix_format *pix = &f->fmt.pix; - struct v4l2_mbus_framefmt mf; - __u32 pixfmt = pix->pixelformat; - const struct soc_camera_format_xlate *xlate; - unsigned int ceu_sub_width = pcdev->max_width, - ceu_sub_height = pcdev->max_height; - u16 scale_v, scale_h; - int ret; - bool image_mode; - enum v4l2_field field; - - switch (pix->field) { - default: - pix->field = V4L2_FIELD_NONE; - /* fall-through */ - case V4L2_FIELD_INTERLACED_TB: - case V4L2_FIELD_INTERLACED_BT: - case V4L2_FIELD_NONE: - field = pix->field; - break; - case V4L2_FIELD_INTERLACED: - field = V4L2_FIELD_INTERLACED_TB; - break; - } - - xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); - if (!xlate) { - dev_warn(dev, "Format %x not found\n", pixfmt); - return -EINVAL; - } - - /* 1.-4. Calculate desired client output geometry */ - soc_camera_calc_client_output(icd, &cam->rect, &cam->subrect, pix, &mf, 12); - mf.field = pix->field; - mf.colorspace = pix->colorspace; - mf.code = xlate->code; - - switch (pixfmt) { - case V4L2_PIX_FMT_NV12: - case V4L2_PIX_FMT_NV21: - case V4L2_PIX_FMT_NV16: - case V4L2_PIX_FMT_NV61: - image_mode = true; - break; - default: - image_mode = false; - } - - dev_geo(dev, "S_FMT(pix=0x%x, fld 0x%x, code 0x%x, %ux%u)\n", pixfmt, mf.field, mf.code, - pix->width, pix->height); - - dev_geo(dev, "4: request camera output %ux%u\n", mf.width, mf.height); - - /* 5. - 9. */ - ret = soc_camera_client_scale(icd, &cam->rect, &cam->subrect, - &mf, &ceu_sub_width, &ceu_sub_height, - image_mode && V4L2_FIELD_NONE == field, 12); - - dev_geo(dev, "5-9: client scale return %d\n", ret); - - /* Done with the camera. Now see if we can improve the result */ - - dev_geo(dev, "fmt %ux%u, requested %ux%u\n", - mf.width, mf.height, pix->width, pix->height); - if (ret < 0) - return ret; - - if (mf.code != xlate->code) - return -EINVAL; - - /* 9. Prepare CEU crop */ - cam->width = mf.width; - cam->height = mf.height; - - /* 10. Use CEU scaling to scale to the requested user window. */ - - /* We cannot scale up */ - if (pix->width > ceu_sub_width) - ceu_sub_width = pix->width; - - if (pix->height > ceu_sub_height) - ceu_sub_height = pix->height; - - pix->colorspace = mf.colorspace; - - if (image_mode) { - /* Scale pix->{width x height} down to width x height */ - scale_h = calc_scale(ceu_sub_width, &pix->width); - scale_v = calc_scale(ceu_sub_height, &pix->height); - } else { - pix->width = ceu_sub_width; - pix->height = ceu_sub_height; - scale_h = 0; - scale_v = 0; - } - - pcdev->cflcr = scale_h | (scale_v << 16); - - /* - * We have calculated CFLCR, the actual configuration will be performed - * in sh_mobile_ceu_set_bus_param() - */ - - dev_geo(dev, "10: W: %u : 0x%x = %u, H: %u : 0x%x = %u\n", - ceu_sub_width, scale_h, pix->width, - ceu_sub_height, scale_v, pix->height); - - cam->code = xlate->code; - icd->current_fmt = xlate; - - pcdev->field = field; - pcdev->image_mode = image_mode; - - /* CFSZR requirement */ - pix->width &= ~3; - pix->height &= ~3; - - return 0; -} - -#define CEU_CHDW_MAX 8188U /* Maximum line stride */ - -static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd, - struct v4l2_format *f) -{ - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - struct sh_mobile_ceu_dev *pcdev = ici->priv; - const struct soc_camera_format_xlate *xlate; - struct v4l2_pix_format *pix = &f->fmt.pix; - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - struct v4l2_subdev_pad_config pad_cfg; - struct v4l2_subdev_format format = { - .which = V4L2_SUBDEV_FORMAT_TRY, - }; - struct v4l2_mbus_framefmt *mf = &format.format; - __u32 pixfmt = pix->pixelformat; - int width, height; - int ret; - - dev_geo(icd->parent, "TRY_FMT(pix=0x%x, %ux%u)\n", - pixfmt, pix->width, pix->height); - - xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); - if (!xlate) { - xlate = icd->current_fmt; - dev_dbg(icd->parent, "Format %x not found, keeping %x\n", - pixfmt, xlate->host_fmt->fourcc); - pixfmt = xlate->host_fmt->fourcc; - pix->pixelformat = pixfmt; - pix->colorspace = icd->colorspace; - } - - /* FIXME: calculate using depth and bus width */ - - /* CFSZR requires height and width to be 4-pixel aligned */ - v4l_bound_align_image(&pix->width, 2, pcdev->max_width, 2, - &pix->height, 4, pcdev->max_height, 2, 0); - - width = pix->width; - height = pix->height; - - /* limit to sensor capabilities */ - mf->width = pix->width; - mf->height = pix->height; - mf->field = pix->field; - mf->code = xlate->code; - mf->colorspace = pix->colorspace; - - ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd), - pad, set_fmt, &pad_cfg, &format); - if (ret < 0) - return ret; - - pix->width = mf->width; - pix->height = mf->height; - pix->field = mf->field; - pix->colorspace = mf->colorspace; - - switch (pixfmt) { - case V4L2_PIX_FMT_NV12: - case V4L2_PIX_FMT_NV21: - case V4L2_PIX_FMT_NV16: - case V4L2_PIX_FMT_NV61: - /* FIXME: check against rect_max after converting soc-camera */ - /* We can scale precisely, need a bigger image from camera */ - if (pix->width < width || pix->height < height) { - /* - * We presume, the sensor behaves sanely, i.e., if - * requested a bigger rectangle, it will not return a - * smaller one. - */ - mf->width = pcdev->max_width; - mf->height = pcdev->max_height; - ret = v4l2_device_call_until_err(sd->v4l2_dev, - soc_camera_grp_id(icd), pad, - set_fmt, &pad_cfg, &format); - if (ret < 0) { - /* Shouldn't actually happen... */ - dev_err(icd->parent, - "FIXME: client try_fmt() = %d\n", ret); - return ret; - } - } - /* We will scale exactly */ - if (mf->width > width) - pix->width = width; - if (mf->height > height) - pix->height = height; - - pix->bytesperline = max(pix->bytesperline, pix->width); - pix->bytesperline = min(pix->bytesperline, CEU_CHDW_MAX); - pix->bytesperline &= ~3; - break; - - default: - /* Configurable stride isn't supported in pass-through mode. */ - pix->bytesperline = 0; - } - - pix->width &= ~3; - pix->height &= ~3; - pix->sizeimage = 0; - - dev_geo(icd->parent, "%s(): return %d, fmt 0x%x, %ux%u\n", - __func__, ret, pix->pixelformat, pix->width, pix->height); - - return ret; -} - -static int sh_mobile_ceu_set_liveselection(struct soc_camera_device *icd, - struct v4l2_selection *sel) -{ - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - struct sh_mobile_ceu_dev *pcdev = ici->priv; - u32 out_width = icd->user_width, out_height = icd->user_height; - int ret; - - /* Freeze queue */ - pcdev->frozen = 1; - /* Wait for frame */ - ret = wait_for_completion_interruptible(&pcdev->complete); - /* Stop the client */ - ret = v4l2_subdev_call(sd, video, s_stream, 0); - if (ret < 0) - dev_warn(icd->parent, - "Client failed to stop the stream: %d\n", ret); - else - /* Do the crop, if it fails, there's nothing more we can do */ - sh_mobile_ceu_set_selection(icd, sel); - - dev_geo(icd->parent, "Output after crop: %ux%u\n", icd->user_width, icd->user_height); - - if (icd->user_width != out_width || icd->user_height != out_height) { - struct v4l2_format f = { - .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, - .fmt.pix = { - .width = out_width, - .height = out_height, - .pixelformat = icd->current_fmt->host_fmt->fourcc, - .field = pcdev->field, - .colorspace = icd->colorspace, - }, - }; - ret = sh_mobile_ceu_set_fmt(icd, &f); - if (!ret && (out_width != f.fmt.pix.width || - out_height != f.fmt.pix.height)) - ret = -EINVAL; - if (!ret) { - icd->user_width = out_width & ~3; - icd->user_height = out_height & ~3; - ret = sh_mobile_ceu_set_bus_param(icd); - } - } - - /* Thaw the queue */ - pcdev->frozen = 0; - spin_lock_irq(&pcdev->lock); - sh_mobile_ceu_capture(pcdev); - spin_unlock_irq(&pcdev->lock); - /* Start the client */ - ret = v4l2_subdev_call(sd, video, s_stream, 1); - return ret; -} - -static __poll_t sh_mobile_ceu_poll(struct file *file, poll_table *pt) -{ - struct soc_camera_device *icd = file->private_data; - - return vb2_poll(&icd->vb2_vidq, file, pt); -} - -static int sh_mobile_ceu_querycap(struct soc_camera_host *ici, - struct v4l2_capability *cap) -{ - strscpy(cap->card, "SuperH_Mobile_CEU", sizeof(cap->card)); - strscpy(cap->driver, "sh_mobile_ceu", sizeof(cap->driver)); - strscpy(cap->bus_info, "platform:sh_mobile_ceu", sizeof(cap->bus_info)); - cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; - cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; - - return 0; -} - -static int sh_mobile_ceu_init_videobuf(struct vb2_queue *q, - struct soc_camera_device *icd) -{ - struct soc_camera_host *ici = to_soc_camera_host(icd->parent); - - q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - q->io_modes = VB2_MMAP | VB2_USERPTR; - q->drv_priv = icd; - q->ops = &sh_mobile_ceu_videobuf_ops; - q->mem_ops = &vb2_dma_contig_memops; - q->buf_struct_size = sizeof(struct sh_mobile_ceu_buffer); - q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; - q->lock = &ici->host_lock; - q->dev = ici->v4l2_dev.dev; - - return vb2_queue_init(q); -} - -static struct soc_camera_host_ops sh_mobile_ceu_host_ops = { - .owner = THIS_MODULE, - .add = sh_mobile_ceu_add_device, - .remove = sh_mobile_ceu_remove_device, - .clock_start = sh_mobile_ceu_clock_start, - .clock_stop = sh_mobile_ceu_clock_stop, - .get_formats = sh_mobile_ceu_get_formats, - .put_formats = sh_mobile_ceu_put_formats, - .get_selection = sh_mobile_ceu_get_selection, - .set_selection = sh_mobile_ceu_set_selection, - .set_liveselection = sh_mobile_ceu_set_liveselection, - .set_fmt = sh_mobile_ceu_set_fmt, - .try_fmt = sh_mobile_ceu_try_fmt, - .poll = sh_mobile_ceu_poll, - .querycap = sh_mobile_ceu_querycap, - .set_bus_param = sh_mobile_ceu_set_bus_param, - .init_videobuf2 = sh_mobile_ceu_init_videobuf, -}; - -struct bus_wait { - struct notifier_block notifier; - struct completion completion; - struct device *dev; -}; - -static int bus_notify(struct notifier_block *nb, - unsigned long action, void *data) -{ - struct device *dev = data; - struct bus_wait *wait = container_of(nb, struct bus_wait, notifier); - - if (wait->dev != dev) - return NOTIFY_DONE; - - switch (action) { - case BUS_NOTIFY_UNBOUND_DRIVER: - /* Protect from module unloading */ - wait_for_completion(&wait->completion); - return NOTIFY_OK; - } - return NOTIFY_DONE; -} - -static int sh_mobile_ceu_probe(struct platform_device *pdev) -{ - struct sh_mobile_ceu_dev *pcdev; - struct resource *res; - void __iomem *base; - unsigned int irq; - int err; - struct bus_wait wait = { - .completion = COMPLETION_INITIALIZER_ONSTACK(wait.completion), - .notifier.notifier_call = bus_notify, - }; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - irq = platform_get_irq(pdev, 0); - if (!res || (int)irq <= 0) { - dev_err(&pdev->dev, "Not enough CEU platform resources.\n"); - return -ENODEV; - } - - pcdev = devm_kzalloc(&pdev->dev, sizeof(*pcdev), GFP_KERNEL); - if (!pcdev) { - dev_err(&pdev->dev, "Could not allocate pcdev\n"); - return -ENOMEM; - } - - INIT_LIST_HEAD(&pcdev->capture); - spin_lock_init(&pcdev->lock); - init_completion(&pcdev->complete); - - pcdev->pdata = pdev->dev.platform_data; - if (!pcdev->pdata && !pdev->dev.of_node) { - dev_err(&pdev->dev, "CEU platform data not set.\n"); - return -EINVAL; - } - - /* TODO: implement per-device bus flags */ - if (pcdev->pdata) { - pcdev->max_width = pcdev->pdata->max_width; - pcdev->max_height = pcdev->pdata->max_height; - pcdev->flags = pcdev->pdata->flags; - } - pcdev->field = V4L2_FIELD_NONE; - - if (!pcdev->max_width) { - unsigned int v; - err = of_property_read_u32(pdev->dev.of_node, "renesas,max-width", &v); - if (!err) - pcdev->max_width = v; - - if (!pcdev->max_width) - pcdev->max_width = 2560; - } - if (!pcdev->max_height) { - unsigned int v; - err = of_property_read_u32(pdev->dev.of_node, "renesas,max-height", &v); - if (!err) - pcdev->max_height = v; - - if (!pcdev->max_height) - pcdev->max_height = 1920; - } - - base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(base)) - return PTR_ERR(base); - - pcdev->irq = irq; - pcdev->base = base; - pcdev->video_limit = 0; /* only enabled if second resource exists */ - - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (res) { - err = dma_declare_coherent_memory(&pdev->dev, res->start, - res->start, - resource_size(res), - DMA_MEMORY_EXCLUSIVE); - if (err) { - dev_err(&pdev->dev, "Unable to declare CEU memory.\n"); - return err; - } - - pcdev->video_limit = resource_size(res); - } - - /* request irq */ - err = devm_request_irq(&pdev->dev, pcdev->irq, sh_mobile_ceu_irq, - 0, dev_name(&pdev->dev), pcdev); - if (err) { - dev_err(&pdev->dev, "Unable to register CEU interrupt.\n"); - goto exit_release_mem; - } - - pm_suspend_ignore_children(&pdev->dev, true); - pm_runtime_enable(&pdev->dev); - pm_runtime_resume(&pdev->dev); - - pcdev->ici.priv = pcdev; - pcdev->ici.v4l2_dev.dev = &pdev->dev; - pcdev->ici.nr = pdev->id; - pcdev->ici.drv_name = dev_name(&pdev->dev); - pcdev->ici.ops = &sh_mobile_ceu_host_ops; - pcdev->ici.capabilities = SOCAM_HOST_CAP_STRIDE; - - if (pcdev->pdata && pcdev->pdata->asd_sizes) { - pcdev->ici.asd = pcdev->pdata->asd; - pcdev->ici.asd_sizes = pcdev->pdata->asd_sizes; - } - - err = soc_camera_host_register(&pcdev->ici); - if (err) - goto exit_free_clk; - - return 0; - -exit_free_clk: - pm_runtime_disable(&pdev->dev); -exit_release_mem: - if (platform_get_resource(pdev, IORESOURCE_MEM, 1)) - dma_release_declared_memory(&pdev->dev); - return err; -} - -static int sh_mobile_ceu_remove(struct platform_device *pdev) -{ - struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev); - - soc_camera_host_unregister(soc_host); - pm_runtime_disable(&pdev->dev); - if (platform_get_resource(pdev, IORESOURCE_MEM, 1)) - dma_release_declared_memory(&pdev->dev); - - return 0; -} - -static int sh_mobile_ceu_runtime_nop(struct device *dev) -{ - /* Runtime PM callback shared between ->runtime_suspend() - * and ->runtime_resume(). Simply returns success. - * - * This driver re-initializes all registers after - * pm_runtime_get_sync() anyway so there is no need - * to save and restore registers here. - */ - return 0; -} - -static const struct dev_pm_ops sh_mobile_ceu_dev_pm_ops = { - .runtime_suspend = sh_mobile_ceu_runtime_nop, - .runtime_resume = sh_mobile_ceu_runtime_nop, -}; - -static const struct of_device_id sh_mobile_ceu_of_match[] = { - { .compatible = "renesas,sh-mobile-ceu" }, - { } -}; -MODULE_DEVICE_TABLE(of, sh_mobile_ceu_of_match); - -static struct platform_driver sh_mobile_ceu_driver = { - .driver = { - .name = "sh_mobile_ceu", - .pm = &sh_mobile_ceu_dev_pm_ops, - .of_match_table = sh_mobile_ceu_of_match, - }, - .probe = sh_mobile_ceu_probe, - .remove = sh_mobile_ceu_remove, -}; - -module_platform_driver(sh_mobile_ceu_driver); - -MODULE_DESCRIPTION("SuperH Mobile CEU driver"); -MODULE_AUTHOR("Magnus Damm"); -MODULE_LICENSE("GPL"); -MODULE_VERSION("0.1.0"); -MODULE_ALIAS("platform:sh_mobile_ceu"); diff --git a/drivers/media/platform/soc_camera/soc_camera_platform.c b/drivers/media/platform/soc_camera/soc_camera_platform.c deleted file mode 100644 index 79fbe1fea95f..000000000000 --- a/drivers/media/platform/soc_camera/soc_camera_platform.c +++ /dev/null @@ -1,188 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Generic Platform Camera Driver - * - * Copyright (C) 2008 Magnus Damm - * Based on mt9m001 driver, - * Copyright (C) 2008, Guennadi Liakhovetski - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -struct soc_camera_platform_priv { - struct v4l2_subdev subdev; -}; - -static struct soc_camera_platform_priv *get_priv(struct platform_device *pdev) -{ - struct v4l2_subdev *subdev = platform_get_drvdata(pdev); - return container_of(subdev, struct soc_camera_platform_priv, subdev); -} - -static int soc_camera_platform_s_stream(struct v4l2_subdev *sd, int enable) -{ - struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd); - return p->set_capture(p, enable); -} - -static int soc_camera_platform_fill_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_format *format) -{ - struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd); - struct v4l2_mbus_framefmt *mf = &format->format; - - mf->width = p->format.width; - mf->height = p->format.height; - mf->code = p->format.code; - mf->colorspace = p->format.colorspace; - mf->field = p->format.field; - - return 0; -} - -static int soc_camera_platform_s_power(struct v4l2_subdev *sd, int on) -{ - struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd); - - return soc_camera_set_power(p->icd->control, &p->icd->sdesc->subdev_desc, NULL, on); -} - -static const struct v4l2_subdev_core_ops platform_subdev_core_ops = { - .s_power = soc_camera_platform_s_power, -}; - -static int soc_camera_platform_enum_mbus_code(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_mbus_code_enum *code) -{ - struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd); - - if (code->pad || code->index) - return -EINVAL; - - code->code = p->format.code; - return 0; -} - -static int soc_camera_platform_get_selection(struct v4l2_subdev *sd, - struct v4l2_subdev_pad_config *cfg, - struct v4l2_subdev_selection *sel) -{ - struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd); - - if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE) - return -EINVAL; - - switch (sel->target) { - case V4L2_SEL_TGT_CROP_BOUNDS: - case V4L2_SEL_TGT_CROP_DEFAULT: - case V4L2_SEL_TGT_CROP: - sel->r.left = 0; - sel->r.top = 0; - sel->r.width = p->format.width; - sel->r.height = p->format.height; - return 0; - default: - return -EINVAL; - } -} - -static int soc_camera_platform_g_mbus_config(struct v4l2_subdev *sd, - struct v4l2_mbus_config *cfg) -{ - struct soc_camera_platform_info *p = v4l2_get_subdevdata(sd); - - cfg->flags = p->mbus_param; - cfg->type = p->mbus_type; - - return 0; -} - -static const struct v4l2_subdev_video_ops platform_subdev_video_ops = { - .s_stream = soc_camera_platform_s_stream, - .g_mbus_config = soc_camera_platform_g_mbus_config, -}; - -static const struct v4l2_subdev_pad_ops platform_subdev_pad_ops = { - .enum_mbus_code = soc_camera_platform_enum_mbus_code, - .get_selection = soc_camera_platform_get_selection, - .get_fmt = soc_camera_platform_fill_fmt, - .set_fmt = soc_camera_platform_fill_fmt, -}; - -static const struct v4l2_subdev_ops platform_subdev_ops = { - .core = &platform_subdev_core_ops, - .video = &platform_subdev_video_ops, - .pad = &platform_subdev_pad_ops, -}; - -static int soc_camera_platform_probe(struct platform_device *pdev) -{ - struct soc_camera_host *ici; - struct soc_camera_platform_priv *priv; - struct soc_camera_platform_info *p = pdev->dev.platform_data; - struct soc_camera_device *icd; - - if (!p) - return -EINVAL; - - if (!p->icd) { - dev_err(&pdev->dev, - "Platform has not set soc_camera_device pointer!\n"); - return -EINVAL; - } - - priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - - icd = p->icd; - - /* soc-camera convention: control's drvdata points to the subdev */ - platform_set_drvdata(pdev, &priv->subdev); - /* Set the control device reference */ - icd->control = &pdev->dev; - - ici = to_soc_camera_host(icd->parent); - - v4l2_subdev_init(&priv->subdev, &platform_subdev_ops); - v4l2_set_subdevdata(&priv->subdev, p); - strscpy(priv->subdev.name, dev_name(&pdev->dev), - sizeof(priv->subdev.name)); - - return v4l2_device_register_subdev(&ici->v4l2_dev, &priv->subdev); -} - -static int soc_camera_platform_remove(struct platform_device *pdev) -{ - struct soc_camera_platform_priv *priv = get_priv(pdev); - struct soc_camera_platform_info *p = v4l2_get_subdevdata(&priv->subdev); - - p->icd->control = NULL; - v4l2_device_unregister_subdev(&priv->subdev); - return 0; -} - -static struct platform_driver soc_camera_platform_driver = { - .driver = { - .name = "soc_camera_platform", - }, - .probe = soc_camera_platform_probe, - .remove = soc_camera_platform_remove, -}; - -module_platform_driver(soc_camera_platform_driver); - -MODULE_DESCRIPTION("SoC Camera Platform driver"); -MODULE_AUTHOR("Magnus Damm"); -MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("platform:soc_camera_platform"); diff --git a/drivers/media/platform/soc_camera/soc_scale_crop.c b/drivers/media/platform/soc_camera/soc_scale_crop.c deleted file mode 100644 index 8d25ca0490f7..000000000000 --- a/drivers/media/platform/soc_camera/soc_scale_crop.c +++ /dev/null @@ -1,426 +0,0 @@ -/* - * soc-camera generic scaling-cropping manipulation functions - * - * Copyright (C) 2013 Guennadi Liakhovetski - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include -#include - -#include -#include - -#include "soc_scale_crop.h" - -#ifdef DEBUG_GEOMETRY -#define dev_geo dev_info -#else -#define dev_geo dev_dbg -#endif - -/* Check if any dimension of r1 is smaller than respective one of r2 */ -static bool is_smaller(const struct v4l2_rect *r1, const struct v4l2_rect *r2) -{ - return r1->width < r2->width || r1->height < r2->height; -} - -/* Check if r1 fails to cover r2 */ -static bool is_inside(const struct v4l2_rect *r1, const struct v4l2_rect *r2) -{ - return r1->left > r2->left || r1->top > r2->top || - r1->left + r1->width < r2->left + r2->width || - r1->top + r1->height < r2->top + r2->height; -} - -/* Get and store current client crop */ -int soc_camera_client_g_rect(struct v4l2_subdev *sd, struct v4l2_rect *rect) -{ - struct v4l2_subdev_selection sdsel = { - .which = V4L2_SUBDEV_FORMAT_ACTIVE, - .target = V4L2_SEL_TGT_CROP, - }; - int ret; - - ret = v4l2_subdev_call(sd, pad, get_selection, NULL, &sdsel); - if (!ret) { - *rect = sdsel.r; - return ret; - } - - sdsel.target = V4L2_SEL_TGT_CROP_BOUNDS; - ret = v4l2_subdev_call(sd, pad, get_selection, NULL, &sdsel); - if (!ret) - *rect = sdsel.r; - - return ret; -} -EXPORT_SYMBOL(soc_camera_client_g_rect); - -/* Client crop has changed, update our sub-rectangle to remain within the area */ -static void move_and_crop_subrect(struct v4l2_rect *rect, - struct v4l2_rect *subrect) -{ - if (rect->width < subrect->width) - subrect->width = rect->width; - - if (rect->height < subrect->height) - subrect->height = rect->height; - - if (rect->left > subrect->left) - subrect->left = rect->left; - else if (rect->left + rect->width < - subrect->left + subrect->width) - subrect->left = rect->left + rect->width - - subrect->width; - - if (rect->top > subrect->top) - subrect->top = rect->top; - else if (rect->top + rect->height < - subrect->top + subrect->height) - subrect->top = rect->top + rect->height - - subrect->height; -} - -/* - * The common for both scaling and cropping iterative approach is: - * 1. try if the client can produce exactly what requested by the user - * 2. if (1) failed, try to double the client image until we get one big enough - * 3. if (2) failed, try to request the maximum image - */ -int soc_camera_client_s_selection(struct v4l2_subdev *sd, - struct v4l2_selection *sel, struct v4l2_selection *cam_sel, - struct v4l2_rect *target_rect, struct v4l2_rect *subrect) -{ - struct v4l2_subdev_selection sdsel = { - .which = V4L2_SUBDEV_FORMAT_ACTIVE, - .target = sel->target, - .flags = sel->flags, - .r = sel->r, - }; - struct v4l2_subdev_selection bounds = { - .which = V4L2_SUBDEV_FORMAT_ACTIVE, - .target = V4L2_SEL_TGT_CROP_BOUNDS, - }; - struct v4l2_rect *rect = &sel->r, *cam_rect = &cam_sel->r; - struct device *dev = sd->v4l2_dev->dev; - int ret; - unsigned int width, height; - - v4l2_subdev_call(sd, pad, set_selection, NULL, &sdsel); - sel->r = sdsel.r; - ret = soc_camera_client_g_rect(sd, cam_rect); - if (ret < 0) - return ret; - - /* - * Now cam_crop contains the current camera input rectangle, and it must - * be within camera cropcap bounds - */ - if (!memcmp(rect, cam_rect, sizeof(*rect))) { - /* Even if camera S_SELECTION failed, but camera rectangle matches */ - dev_dbg(dev, "Camera S_SELECTION successful for %dx%d@%d:%d\n", - rect->width, rect->height, rect->left, rect->top); - *target_rect = *cam_rect; - return 0; - } - - /* Try to fix cropping, that camera hasn't managed to set */ - dev_geo(dev, "Fix camera S_SELECTION for %dx%d@%d:%d to %dx%d@%d:%d\n", - cam_rect->width, cam_rect->height, - cam_rect->left, cam_rect->top, - rect->width, rect->height, rect->left, rect->top); - - /* We need sensor maximum rectangle */ - ret = v4l2_subdev_call(sd, pad, get_selection, NULL, &bounds); - if (ret < 0) - return ret; - - /* Put user requested rectangle within sensor bounds */ - soc_camera_limit_side(&rect->left, &rect->width, sdsel.r.left, 2, - bounds.r.width); - soc_camera_limit_side(&rect->top, &rect->height, sdsel.r.top, 4, - bounds.r.height); - - /* - * Popular special case - some cameras can only handle fixed sizes like - * QVGA, VGA,... Take care to avoid infinite loop. - */ - width = max_t(unsigned int, cam_rect->width, 2); - height = max_t(unsigned int, cam_rect->height, 2); - - /* - * Loop as long as sensor is not covering the requested rectangle and - * is still within its bounds - */ - while (!ret && (is_smaller(cam_rect, rect) || - is_inside(cam_rect, rect)) && - (bounds.r.width > width || bounds.r.height > height)) { - - width *= 2; - height *= 2; - - cam_rect->width = width; - cam_rect->height = height; - - /* - * We do not know what capabilities the camera has to set up - * left and top borders. We could try to be smarter in iterating - * them, e.g., if camera current left is to the right of the - * target left, set it to the middle point between the current - * left and minimum left. But that would add too much - * complexity: we would have to iterate each border separately. - * Instead we just drop to the left and top bounds. - */ - if (cam_rect->left > rect->left) - cam_rect->left = bounds.r.left; - - if (cam_rect->left + cam_rect->width < rect->left + rect->width) - cam_rect->width = rect->left + rect->width - - cam_rect->left; - - if (cam_rect->top > rect->top) - cam_rect->top = bounds.r.top; - - if (cam_rect->top + cam_rect->height < rect->top + rect->height) - cam_rect->height = rect->top + rect->height - - cam_rect->top; - - sdsel.r = *cam_rect; - v4l2_subdev_call(sd, pad, set_selection, NULL, &sdsel); - *cam_rect = sdsel.r; - ret = soc_camera_client_g_rect(sd, cam_rect); - dev_geo(dev, "Camera S_SELECTION %d for %dx%d@%d:%d\n", ret, - cam_rect->width, cam_rect->height, - cam_rect->left, cam_rect->top); - } - - /* S_SELECTION must not modify the rectangle */ - if (is_smaller(cam_rect, rect) || is_inside(cam_rect, rect)) { - /* - * The camera failed to configure a suitable cropping, - * we cannot use the current rectangle, set to max - */ - sdsel.r = bounds.r; - v4l2_subdev_call(sd, pad, set_selection, NULL, &sdsel); - *cam_rect = sdsel.r; - - ret = soc_camera_client_g_rect(sd, cam_rect); - dev_geo(dev, "Camera S_SELECTION %d for max %dx%d@%d:%d\n", ret, - cam_rect->width, cam_rect->height, - cam_rect->left, cam_rect->top); - } - - if (!ret) { - *target_rect = *cam_rect; - move_and_crop_subrect(target_rect, subrect); - } - - return ret; -} -EXPORT_SYMBOL(soc_camera_client_s_selection); - -/* Iterative set_fmt, also updates cached client crop on success */ -static int client_set_fmt(struct soc_camera_device *icd, - struct v4l2_rect *rect, struct v4l2_rect *subrect, - unsigned int max_width, unsigned int max_height, - struct v4l2_subdev_format *format, bool host_can_scale) -{ - struct v4l2_subdev *sd = soc_camera_to_subdev(icd); - struct device *dev = icd->parent; - struct v4l2_mbus_framefmt *mf = &format->format; - unsigned int width = mf->width, height = mf->height, tmp_w, tmp_h; - struct v4l2_subdev_selection sdsel = { - .which = V4L2_SUBDEV_FORMAT_ACTIVE, - .target = V4L2_SEL_TGT_CROP_BOUNDS, - }; - bool host_1to1; - int ret; - - ret = v4l2_device_call_until_err(sd->v4l2_dev, - soc_camera_grp_id(icd), pad, - set_fmt, NULL, format); - if (ret < 0) - return ret; - - dev_geo(dev, "camera scaled to %ux%u\n", mf->width, mf->height); - - if (width == mf->width && height == mf->height) { - /* Perfect! The client has done it all. */ - host_1to1 = true; - goto update_cache; - } - - host_1to1 = false; - if (!host_can_scale) - goto update_cache; - - ret = v4l2_subdev_call(sd, pad, get_selection, NULL, &sdsel); - if (ret < 0) - return ret; - - if (max_width > sdsel.r.width) - max_width = sdsel.r.width; - if (max_height > sdsel.r.height) - max_height = sdsel.r.height; - - /* Camera set a format, but geometry is not precise, try to improve */ - tmp_w = mf->width; - tmp_h = mf->height; - - /* width <= max_width && height <= max_height - guaranteed by try_fmt */ - while ((width > tmp_w || height > tmp_h) && - tmp_w < max_width && tmp_h < max_height) { - tmp_w = min(2 * tmp_w, max_width); - tmp_h = min(2 * tmp_h, max_height); - mf->width = tmp_w; - mf->height = tmp_h; - ret = v4l2_device_call_until_err(sd->v4l2_dev, - soc_camera_grp_id(icd), pad, - set_fmt, NULL, format); - dev_geo(dev, "Camera scaled to %ux%u\n", - mf->width, mf->height); - if (ret < 0) { - /* This shouldn't happen */ - dev_err(dev, "Client failed to set format: %d\n", ret); - return ret; - } - } - -update_cache: - /* Update cache */ - ret = soc_camera_client_g_rect(sd, rect); - if (ret < 0) - return ret; - - if (host_1to1) - *subrect = *rect; - else - move_and_crop_subrect(rect, subrect); - - return 0; -} - -/** - * soc_camera_client_scale - * @icd: soc-camera device - * @rect: camera cropping window - * @subrect: part of rect, sent to the user - * @mf: in- / output camera output window - * @width: on input: max host input width; - * on output: user width, mapped back to input - * @height: on input: max host input height; - * on output: user height, mapped back to input - * @host_can_scale: host can scale this pixel format - * @shift: shift, used for scaling - */ -int soc_camera_client_scale(struct soc_camera_device *icd, - struct v4l2_rect *rect, struct v4l2_rect *subrect, - struct v4l2_mbus_framefmt *mf, - unsigned int *width, unsigned int *height, - bool host_can_scale, unsigned int shift) -{ - struct device *dev = icd->parent; - struct v4l2_subdev_format fmt_tmp = { - .which = V4L2_SUBDEV_FORMAT_ACTIVE, - .format = *mf, - }; - struct v4l2_mbus_framefmt *mf_tmp = &fmt_tmp.format; - unsigned int scale_h, scale_v; - int ret; - - /* - * 5. Apply iterative camera S_FMT for camera user window (also updates - * client crop cache and the imaginary sub-rectangle). - */ - ret = client_set_fmt(icd, rect, subrect, *width, *height, - &fmt_tmp, host_can_scale); - if (ret < 0) - return ret; - - dev_geo(dev, "5: camera scaled to %ux%u\n", - mf_tmp->width, mf_tmp->height); - - /* 6. Retrieve camera output window (g_fmt) */ - - /* unneeded - it is already in "mf_tmp" */ - - /* 7. Calculate new client scales. */ - scale_h = soc_camera_calc_scale(rect->width, shift, mf_tmp->width); - scale_v = soc_camera_calc_scale(rect->height, shift, mf_tmp->height); - - mf->width = mf_tmp->width; - mf->height = mf_tmp->height; - mf->colorspace = mf_tmp->colorspace; - - /* - * 8. Calculate new host crop - apply camera scales to previously - * updated "effective" crop. - */ - *width = soc_camera_shift_scale(subrect->width, shift, scale_h); - *height = soc_camera_shift_scale(subrect->height, shift, scale_v); - - dev_geo(dev, "8: new client sub-window %ux%u\n", *width, *height); - - return 0; -} -EXPORT_SYMBOL(soc_camera_client_scale); - -/* - * Calculate real client output window by applying new scales to the current - * client crop. New scales are calculated from the requested output format and - * host crop, mapped backed onto the client input (subrect). - */ -void soc_camera_calc_client_output(struct soc_camera_device *icd, - struct v4l2_rect *rect, struct v4l2_rect *subrect, - const struct v4l2_pix_format *pix, struct v4l2_mbus_framefmt *mf, - unsigned int shift) -{ - struct device *dev = icd->parent; - unsigned int scale_v, scale_h; - - if (subrect->width == rect->width && - subrect->height == rect->height) { - /* No sub-cropping */ - mf->width = pix->width; - mf->height = pix->height; - return; - } - - /* 1.-2. Current camera scales and subwin - cached. */ - - dev_geo(dev, "2: subwin %ux%u@%u:%u\n", - subrect->width, subrect->height, - subrect->left, subrect->top); - - /* - * 3. Calculate new combined scales from input sub-window to requested - * user window. - */ - - /* - * TODO: CEU cannot scale images larger than VGA to smaller than SubQCIF - * (128x96) or larger than VGA. This and similar limitations have to be - * taken into account here. - */ - scale_h = soc_camera_calc_scale(subrect->width, shift, pix->width); - scale_v = soc_camera_calc_scale(subrect->height, shift, pix->height); - - dev_geo(dev, "3: scales %u:%u\n", scale_h, scale_v); - - /* - * 4. Calculate desired client output window by applying combined scales - * to client (real) input window. - */ - mf->width = soc_camera_shift_scale(rect->width, shift, scale_h); - mf->height = soc_camera_shift_scale(rect->height, shift, scale_v); -} -EXPORT_SYMBOL(soc_camera_calc_client_output); - -MODULE_DESCRIPTION("soc-camera scaling-cropping functions"); -MODULE_AUTHOR("Guennadi Liakhovetski "); -MODULE_LICENSE("GPL"); diff --git a/drivers/media/platform/soc_camera/soc_scale_crop.h b/drivers/media/platform/soc_camera/soc_scale_crop.h deleted file mode 100644 index 9ca469312a1f..000000000000 --- a/drivers/media/platform/soc_camera/soc_scale_crop.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - * soc-camera generic scaling-cropping manipulation functions - * - * Copyright (C) 2013 Guennadi Liakhovetski - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#ifndef SOC_SCALE_CROP_H -#define SOC_SCALE_CROP_H - -#include - -struct soc_camera_device; - -struct v4l2_selection; -struct v4l2_mbus_framefmt; -struct v4l2_pix_format; -struct v4l2_rect; -struct v4l2_subdev; - -static inline unsigned int soc_camera_shift_scale(unsigned int size, - unsigned int shift, unsigned int scale) -{ - return DIV_ROUND_CLOSEST(size << shift, scale); -} - -#define soc_camera_calc_scale(in, shift, out) soc_camera_shift_scale(in, shift, out) - -int soc_camera_client_g_rect(struct v4l2_subdev *sd, struct v4l2_rect *rect); -int soc_camera_client_s_selection(struct v4l2_subdev *sd, - struct v4l2_selection *sel, struct v4l2_selection *cam_sel, - struct v4l2_rect *target_rect, struct v4l2_rect *subrect); -int soc_camera_client_scale(struct soc_camera_device *icd, - struct v4l2_rect *rect, struct v4l2_rect *subrect, - struct v4l2_mbus_framefmt *mf, - unsigned int *width, unsigned int *height, - bool host_can_scale, unsigned int shift); -void soc_camera_calc_client_output(struct soc_camera_device *icd, - struct v4l2_rect *rect, struct v4l2_rect *subrect, - const struct v4l2_pix_format *pix, struct v4l2_mbus_framefmt *mf, - unsigned int shift); - -#endif diff --git a/drivers/media/platform/sti/bdisp/bdisp-debug.c b/drivers/media/platform/sti/bdisp/bdisp-debug.c index c6a4e2de5c0c..77ca7517fa3e 100644 --- a/drivers/media/platform/sti/bdisp/bdisp-debug.c +++ b/drivers/media/platform/sti/bdisp/bdisp-debug.c @@ -315,7 +315,7 @@ static void bdisp_dbg_dump_ivmx(struct seq_file *s, seq_puts(s, "Unknown conversion\n"); } -static int bdisp_dbg_last_nodes(struct seq_file *s, void *data) +static int last_nodes_show(struct seq_file *s, void *data) { /* Not dumping all fields, focusing on significant ones */ struct bdisp_dev *bdisp = s->private; @@ -388,7 +388,7 @@ static int bdisp_dbg_last_nodes(struct seq_file *s, void *data) return 0; } -static int bdisp_dbg_last_nodes_raw(struct seq_file *s, void *data) +static int last_nodes_raw_show(struct seq_file *s, void *data) { struct bdisp_dev *bdisp = s->private; struct bdisp_node *node; @@ -437,7 +437,7 @@ static const char *bdisp_fmt_to_str(struct bdisp_frame frame) } } -static int bdisp_dbg_last_request(struct seq_file *s, void *data) +static int last_request_show(struct seq_file *s, void *data) { struct bdisp_dev *bdisp = s->private; struct bdisp_request *request = &bdisp->dbg.copy_request; @@ -474,7 +474,7 @@ static int bdisp_dbg_last_request(struct seq_file *s, void *data) #define DUMP(reg) seq_printf(s, #reg " \t0x%08X\n", readl(bdisp->regs + reg)) -static int bdisp_dbg_regs(struct seq_file *s, void *data) +static int regs_show(struct seq_file *s, void *data) { struct bdisp_dev *bdisp = s->private; int ret; @@ -582,7 +582,7 @@ static int bdisp_dbg_regs(struct seq_file *s, void *data) #define SECOND 1000000 -static int bdisp_dbg_perf(struct seq_file *s, void *data) +static int perf_show(struct seq_file *s, void *data) { struct bdisp_dev *bdisp = s->private; struct bdisp_request *request = &bdisp->dbg.copy_request; @@ -627,27 +627,15 @@ static int bdisp_dbg_perf(struct seq_file *s, void *data) return 0; } -#define bdisp_dbg_declare(name) \ - static int bdisp_dbg_##name##_open(struct inode *i, struct file *f) \ - { \ - return single_open(f, bdisp_dbg_##name, i->i_private); \ - } \ - static const struct file_operations bdisp_dbg_##name##_fops = { \ - .open = bdisp_dbg_##name##_open, \ - .read = seq_read, \ - .llseek = seq_lseek, \ - .release = single_release, \ - } - #define bdisp_dbg_create_entry(name) \ debugfs_create_file(#name, S_IRUGO, bdisp->dbg.debugfs_entry, bdisp, \ - &bdisp_dbg_##name##_fops) + &name##_fops) -bdisp_dbg_declare(regs); -bdisp_dbg_declare(last_nodes); -bdisp_dbg_declare(last_nodes_raw); -bdisp_dbg_declare(last_request); -bdisp_dbg_declare(perf); +DEFINE_SHOW_ATTRIBUTE(regs); +DEFINE_SHOW_ATTRIBUTE(last_nodes); +DEFINE_SHOW_ATTRIBUTE(last_nodes_raw); +DEFINE_SHOW_ATTRIBUTE(last_request); +DEFINE_SHOW_ATTRIBUTE(perf); int bdisp_debugfs_create(struct bdisp_dev *bdisp) { diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.h b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.h index 3dbb3a287cc0..c9d6021904cd 100644 --- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.h +++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.h @@ -193,7 +193,7 @@ struct c8sectpfei { #define C8SECTPFE_SYS_ENABLE BIT(0) /* - * Ponter record data structure required for each input block + * Pointer record data structure required for each input block * see Table 82 on page 167 of functional specification. */ diff --git a/drivers/media/platform/sti/delta/delta.h b/drivers/media/platform/sti/delta/delta.h index 2ba99922c05b..914556030e70 100644 --- a/drivers/media/platform/sti/delta/delta.h +++ b/drivers/media/platform/sti/delta/delta.h @@ -286,7 +286,7 @@ struct delta_dec { * Header parsing must be done using decode(), giving * explicitly header access unit or first access unit of bitstream. * If no valid header is found, get_streaminfo will return -ENODATA, - * in this case the next bistream access unit must be decoded till + * in this case the next bitstream access unit must be decoded till * get_streaminfo becomes successful. */ int (*get_streaminfo)(struct delta_ctx *ctx, diff --git a/drivers/media/platform/sti/hva/hva-debugfs.c b/drivers/media/platform/sti/hva/hva-debugfs.c index 9f7e8ac875d1..7d12a5b5d914 100644 --- a/drivers/media/platform/sti/hva/hva-debugfs.c +++ b/drivers/media/platform/sti/hva/hva-debugfs.c @@ -271,7 +271,7 @@ static void hva_dbg_perf_compute(struct hva_ctx *ctx) * device debug info */ -static int hva_dbg_device(struct seq_file *s, void *data) +static int device_show(struct seq_file *s, void *data) { struct hva_dev *hva = s->private; @@ -281,7 +281,7 @@ static int hva_dbg_device(struct seq_file *s, void *data) return 0; } -static int hva_dbg_encoders(struct seq_file *s, void *data) +static int encoders_show(struct seq_file *s, void *data) { struct hva_dev *hva = s->private; unsigned int i = 0; @@ -299,7 +299,7 @@ static int hva_dbg_encoders(struct seq_file *s, void *data) return 0; } -static int hva_dbg_last(struct seq_file *s, void *data) +static int last_show(struct seq_file *s, void *data) { struct hva_dev *hva = s->private; struct hva_ctx *last_ctx = &hva->dbg.last_ctx; @@ -316,7 +316,7 @@ static int hva_dbg_last(struct seq_file *s, void *data) return 0; } -static int hva_dbg_regs(struct seq_file *s, void *data) +static int regs_show(struct seq_file *s, void *data) { struct hva_dev *hva = s->private; @@ -325,26 +325,14 @@ static int hva_dbg_regs(struct seq_file *s, void *data) return 0; } -#define hva_dbg_declare(name) \ - static int hva_dbg_##name##_open(struct inode *i, struct file *f) \ - { \ - return single_open(f, hva_dbg_##name, i->i_private); \ - } \ - static const struct file_operations hva_dbg_##name##_fops = { \ - .open = hva_dbg_##name##_open, \ - .read = seq_read, \ - .llseek = seq_lseek, \ - .release = single_release, \ - } - #define hva_dbg_create_entry(name) \ debugfs_create_file(#name, 0444, hva->dbg.debugfs_entry, hva, \ - &hva_dbg_##name##_fops) + &name##_fops) -hva_dbg_declare(device); -hva_dbg_declare(encoders); -hva_dbg_declare(last); -hva_dbg_declare(regs); +DEFINE_SHOW_ATTRIBUTE(device); +DEFINE_SHOW_ATTRIBUTE(encoders); +DEFINE_SHOW_ATTRIBUTE(last); +DEFINE_SHOW_ATTRIBUTE(regs); void hva_debugfs_create(struct hva_dev *hva) { @@ -380,7 +368,7 @@ void hva_debugfs_remove(struct hva_dev *hva) * context (instance) debug info */ -static int hva_dbg_ctx(struct seq_file *s, void *data) +static int ctx_show(struct seq_file *s, void *data) { struct hva_ctx *ctx = s->private; @@ -392,7 +380,7 @@ static int hva_dbg_ctx(struct seq_file *s, void *data) return 0; } -hva_dbg_declare(ctx); +DEFINE_SHOW_ATTRIBUTE(ctx); void hva_dbg_ctx_create(struct hva_ctx *ctx) { @@ -407,7 +395,7 @@ void hva_dbg_ctx_create(struct hva_ctx *ctx) ctx->dbg.debugfs_entry = debugfs_create_file(name, 0444, hva->dbg.debugfs_entry, - ctx, &hva_dbg_ctx_fops); + ctx, &ctx_fops); } void hva_dbg_ctx_remove(struct hva_ctx *ctx) diff --git a/drivers/media/platform/sti/hva/hva-h264.c b/drivers/media/platform/sti/hva/hva-h264.c index b61a5d337d2a..c34f7cf5aed2 100644 --- a/drivers/media/platform/sti/hva/hva-h264.c +++ b/drivers/media/platform/sti/hva/hva-h264.c @@ -626,7 +626,7 @@ static int hva_h264_prepare_task(struct hva_ctx *pctx, td->frame_width = frame_width; td->frame_height = frame_height; - /* set frame alignement */ + /* set frame alignment */ td->window_width = frame_width; td->window_height = frame_height; td->window_horizontal_offset = 0; diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c index 6732874114cf..5fe5b38fa901 100644 --- a/drivers/media/platform/stm32/stm32-dcmi.c +++ b/drivers/media/platform/stm32/stm32-dcmi.c @@ -1544,7 +1544,7 @@ static void dcmi_graph_notify_unbind(struct v4l2_async_notifier *notifier, dev_dbg(dcmi->dev, "Removing %s\n", video_device_node_name(dcmi->vdev)); - /* Checks internaly if vdev has been init or not */ + /* Checks internally if vdev has been init or not */ video_unregister_device(dcmi->vdev); } diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c index 6950585edb5a..4c79eb64a7a7 100644 --- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c +++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -143,6 +144,15 @@ bool sun6i_csi_is_format_supported(struct sun6i_csi *csi, break; } break; + + case V4L2_PIX_FMT_RGB565: + return (mbus_code == MEDIA_BUS_FMT_RGB565_2X8_LE); + case V4L2_PIX_FMT_RGB565X: + return (mbus_code == MEDIA_BUS_FMT_RGB565_2X8_BE); + + case V4L2_PIX_FMT_JPEG: + return (mbus_code == MEDIA_BUS_FMT_JPEG_1X8); + default: dev_dbg(sdev->dev, "Unsupported pixformat: 0x%x\n", pixformat); break; @@ -154,6 +164,7 @@ bool sun6i_csi_is_format_supported(struct sun6i_csi *csi, int sun6i_csi_set_power(struct sun6i_csi *csi, bool enable) { struct sun6i_csi_dev *sdev = sun6i_csi_to_dev(csi); + struct device *dev = sdev->dev; struct regmap *regmap = sdev->regmap; int ret; @@ -161,6 +172,9 @@ int sun6i_csi_set_power(struct sun6i_csi *csi, bool enable) regmap_update_bits(regmap, CSI_EN_REG, CSI_EN_CSI_EN, 0); clk_disable_unprepare(sdev->clk_ram); + if (of_device_is_compatible(dev->of_node, + "allwinner,sun50i-a64-csi")) + clk_rate_exclusive_put(sdev->clk_mod); clk_disable_unprepare(sdev->clk_mod); reset_control_assert(sdev->rstc_bus); return 0; @@ -172,6 +186,9 @@ int sun6i_csi_set_power(struct sun6i_csi *csi, bool enable) return ret; } + if (of_device_is_compatible(dev->of_node, "allwinner,sun50i-a64-csi")) + clk_set_rate_exclusive(sdev->clk_mod, 300000000); + ret = clk_prepare_enable(sdev->clk_ram); if (ret) { dev_err(sdev->dev, "Enable clk_dram_csi clk err %d\n", ret); @@ -191,6 +208,8 @@ int sun6i_csi_set_power(struct sun6i_csi *csi, bool enable) clk_ram_disable: clk_disable_unprepare(sdev->clk_ram); clk_mod_disable: + if (of_device_is_compatible(dev->of_node, "allwinner,sun50i-a64-csi")) + clk_rate_exclusive_put(sdev->clk_mod); clk_disable_unprepare(sdev->clk_mod); return ret; } @@ -198,8 +217,8 @@ clk_mod_disable: static enum csi_input_fmt get_csi_input_format(struct sun6i_csi_dev *sdev, u32 mbus_code, u32 pixformat) { - /* bayer */ - if ((mbus_code & 0xF000) == 0x3000) + /* non-YUV */ + if ((mbus_code & 0xF000) != 0x2000) return CSI_INPUT_FORMAT_RAW; switch (pixformat) { @@ -268,6 +287,14 @@ static enum csi_output_fmt get_csi_output_format(struct sun6i_csi_dev *sdev, case V4L2_PIX_FMT_YUV422P: return buf_interlaced ? CSI_FRAME_PLANAR_YUV422 : CSI_FIELD_PLANAR_YUV422; + + case V4L2_PIX_FMT_RGB565: + case V4L2_PIX_FMT_RGB565X: + return buf_interlaced ? CSI_FRAME_RGB565 : CSI_FIELD_RGB565; + + case V4L2_PIX_FMT_JPEG: + return buf_interlaced ? CSI_FRAME_RAW_8 : CSI_FIELD_RAW_8; + default: dev_warn(sdev->dev, "Unsupported pixformat: 0x%x\n", pixformat); break; @@ -279,6 +306,10 @@ static enum csi_output_fmt get_csi_output_format(struct sun6i_csi_dev *sdev, static enum csi_input_seq get_csi_input_seq(struct sun6i_csi_dev *sdev, u32 mbus_code, u32 pixformat) { + /* Input sequence does not apply to non-YUV formats */ + if ((mbus_code & 0xF000) != 0x2000) + return 0; + switch (pixformat) { case V4L2_PIX_FMT_HM12: case V4L2_PIX_FMT_NV12: @@ -793,7 +824,7 @@ static const struct regmap_config sun6i_csi_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, - .max_register = 0x1000, + .max_register = 0x9c, }; static int sun6i_csi_resource_request(struct sun6i_csi_dev *sdev, @@ -893,7 +924,9 @@ static int sun6i_csi_remove(struct platform_device *pdev) static const struct of_device_id sun6i_csi_of_match[] = { { .compatible = "allwinner,sun6i-a31-csi", }, + { .compatible = "allwinner,sun8i-h3-csi", }, { .compatible = "allwinner,sun8i-v3s-csi", }, + { .compatible = "allwinner,sun50i-a64-csi", }, {}, }; MODULE_DEVICE_TABLE(of, sun6i_csi_of_match); diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h index 0bb000712c33..c626821aaedb 100644 --- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h +++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h @@ -65,7 +65,7 @@ bool sun6i_csi_is_format_supported(struct sun6i_csi *csi, u32 pixformat, int sun6i_csi_set_power(struct sun6i_csi *csi, bool enable); /** - * sun6i_csi_update_config() - update the csi register setttings + * sun6i_csi_update_config() - update the csi register settings * @csi: pointer to the csi * @config: see struct sun6i_csi_config */ @@ -94,6 +94,7 @@ static inline int sun6i_csi_get_bpp(unsigned int pixformat) case V4L2_PIX_FMT_SGBRG8: case V4L2_PIX_FMT_SGRBG8: case V4L2_PIX_FMT_SRGGB8: + case V4L2_PIX_FMT_JPEG: return 8; case V4L2_PIX_FMT_SBGGR10: case V4L2_PIX_FMT_SGBRG10: @@ -117,6 +118,8 @@ static inline int sun6i_csi_get_bpp(unsigned int pixformat) case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: case V4L2_PIX_FMT_YUV422P: + case V4L2_PIX_FMT_RGB565: + case V4L2_PIX_FMT_RGB565X: return 16; case V4L2_PIX_FMT_RGB24: case V4L2_PIX_FMT_BGR24: diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c index b04300c3811f..1fd16861f111 100644 --- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c +++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c @@ -56,6 +56,9 @@ static const u32 supported_pixformats[] = { V4L2_PIX_FMT_NV16, V4L2_PIX_FMT_NV61, V4L2_PIX_FMT_YUV422P, + V4L2_PIX_FMT_RGB565, + V4L2_PIX_FMT_RGB565X, + V4L2_PIX_FMT_JPEG, }; static bool is_pixformat_valid(unsigned int pixformat) diff --git a/drivers/media/platform/ti-vpe/vpdma.c b/drivers/media/platform/ti-vpe/vpdma.c index e2cf2b90e500..78d716c93649 100644 --- a/drivers/media/platform/ti-vpe/vpdma.c +++ b/drivers/media/platform/ti-vpe/vpdma.c @@ -404,7 +404,7 @@ EXPORT_SYMBOL(vpdma_map_desc_buf); /* * unmap descriptor/payload DMA buffer, disabling DMA access and - * allowing the main processor to acces the data + * allowing the main processor to access the data */ void vpdma_unmap_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf) { @@ -501,7 +501,7 @@ void vpdma_reset_desc_list(struct vpdma_desc_list *list) EXPORT_SYMBOL(vpdma_reset_desc_list); /* - * free the buffer allocated fot the VPDMA descriptor list, this should be + * free the buffer allocated for the VPDMA descriptor list, this should be * called when the user doesn't want to use VPDMA any more. */ void vpdma_free_desc_list(struct vpdma_desc_list *list) @@ -790,7 +790,7 @@ static void dump_dtd(struct vpdma_dtd *dtd) * append an outbound data transfer descriptor to the given descriptor list, * this sets up a 'client to memory' VPDMA transfer for the given VPDMA channel * - * @list: vpdma desc list to which we add this decriptor + * @list: vpdma desc list to which we add this descriptor * @width: width of the image in pixels in memory * @c_rect: compose params of output image * @fmt: vpdma data format of the buffer @@ -798,7 +798,7 @@ static void dump_dtd(struct vpdma_dtd *dtd) * max_width: enum for maximum width of data transfer * max_height: enum for maximum height of data transfer * chan: VPDMA channel - * flags: VPDMA flags to configure some descriptor fileds + * flags: VPDMA flags to configure some descriptor fields */ void vpdma_add_out_dtd(struct vpdma_desc_list *list, int width, int stride, const struct v4l2_rect *c_rect, @@ -863,14 +863,14 @@ EXPORT_SYMBOL(vpdma_rawchan_add_out_dtd); * append an inbound data transfer descriptor to the given descriptor list, * this sets up a 'memory to client' VPDMA transfer for the given VPDMA channel * - * @list: vpdma desc list to which we add this decriptor + * @list: vpdma desc list to which we add this descriptor * @width: width of the image in pixels in memory(not the cropped width) * @c_rect: crop params of input image * @fmt: vpdma data format of the buffer * dma_addr: dma address as seen by VPDMA * chan: VPDMA channel * field: top or bottom field info of the input image - * flags: VPDMA flags to configure some descriptor fileds + * flags: VPDMA flags to configure some descriptor fields * frame_width/height: the complete width/height of the image presented to the * client (this makes sense when multiple channels are * connected to the same client, forming a larger frame) @@ -1008,7 +1008,7 @@ unsigned int vpdma_get_list_mask(struct vpdma_data *vpdma, int irq_num) } EXPORT_SYMBOL(vpdma_get_list_mask); -/* clear previosuly occured list intterupts in the LIST_STAT register */ +/* clear previously occurred list interrupts in the LIST_STAT register */ void vpdma_clear_list_stat(struct vpdma_data *vpdma, int irq_num, int list_num) { diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c index d70871d0ad2d..207e7e76c048 100644 --- a/drivers/media/platform/ti-vpe/vpe.c +++ b/drivers/media/platform/ti-vpe/vpe.c @@ -876,7 +876,7 @@ static int set_srcdst_params(struct vpe_ctx *ctx) /* * we make sure that the source image has a 16 byte aligned * stride, we need to do the same for the motion vector buffer - * by aligning it's stride to the next 16 byte boundry. this + * by aligning it's stride to the next 16 byte boundary. this * extra space will not be used by the de-interlacer, but will * ensure that vpdma operates correctly */ diff --git a/drivers/media/platform/vicodec/codec-fwht.c b/drivers/media/platform/vicodec/codec-fwht.c index 5630f1dc45e6..d1d6085da9f1 100644 --- a/drivers/media/platform/vicodec/codec-fwht.c +++ b/drivers/media/platform/vicodec/codec-fwht.c @@ -10,8 +10,11 @@ */ #include +#include #include "codec-fwht.h" +#define OVERFLOW_BIT BIT(14) + /* * Note: bit 0 of the header must always be 0. Otherwise it cannot * be guaranteed that the magic 8 byte sequence (see below) can @@ -103,16 +106,21 @@ static int rlc(const s16 *in, __be16 *output, int blocktype) * This function will worst-case increase rlc_in by 65*2 bytes: * one s16 value for the header and 8 * 8 coefficients of type s16. */ -static s16 derlc(const __be16 **rlc_in, s16 *dwht_out) +static u16 derlc(const __be16 **rlc_in, s16 *dwht_out, + const __be16 *end_of_input) { /* header */ const __be16 *input = *rlc_in; - s16 ret = ntohs(*input++); + u16 stat; int dec_count = 0; s16 block[8 * 8 + 16]; s16 *wp = block; int i; + if (input > end_of_input) + return OVERFLOW_BIT; + stat = ntohs(*input++); + /* * Now de-compress, it expands one byte to up to 15 bytes * (or fills the remainder of the 64 bytes with zeroes if it @@ -122,9 +130,15 @@ static s16 derlc(const __be16 **rlc_in, s16 *dwht_out) * allow for overflow if the incoming data was malformed. */ while (dec_count < 8 * 8) { - s16 in = ntohs(*input++); - int length = in & 0xf; - int coeff = in >> 4; + s16 in; + int length; + int coeff; + + if (input > end_of_input) + return OVERFLOW_BIT; + in = ntohs(*input++); + length = in & 0xf; + coeff = in >> 4; /* fill remainder with zeros */ if (length == 15) { @@ -149,7 +163,7 @@ static s16 derlc(const __be16 **rlc_in, s16 *dwht_out) dwht_out[x + y * 8] = *wp++; } *rlc_in = input; - return ret; + return stat; } static const int quant_table[] = { @@ -237,8 +251,6 @@ static void fwht(const u8 *block, s16 *output_block, unsigned int stride, unsigned int i; /* stage 1 */ - stride *= input_step; - for (i = 0; i < 8; i++, tmp += stride, out += 8) { switch (input_step) { case 1: @@ -562,7 +574,7 @@ static void fill_encoder_block(const u8 *input, s16 *dst, for (i = 0; i < 8; i++) { for (j = 0; j < 8; j++, input += input_step) *dst++ = *input; - input += (stride - 8) * input_step; + input += stride - 8 * input_step; } } @@ -660,7 +672,7 @@ static void add_deltas(s16 *deltas, const u8 *ref, int stride) static u32 encode_plane(u8 *input, u8 *refp, __be16 **rlco, __be16 *rlco_max, struct fwht_cframe *cf, u32 height, u32 width, - unsigned int input_step, + u32 stride, unsigned int input_step, bool is_intra, bool next_is_intra) { u8 *input_start = input; @@ -671,7 +683,11 @@ static u32 encode_plane(u8 *input, u8 *refp, __be16 **rlco, __be16 *rlco_max, unsigned int last_size = 0; unsigned int i, j; + width = round_up(width, 8); + height = round_up(height, 8); + for (j = 0; j < height / 8; j++) { + input = input_start + j * 8 * stride; for (i = 0; i < width / 8; i++) { /* intra code, first frame is always intra coded. */ int blocktype = IBLOCK; @@ -679,9 +695,9 @@ static u32 encode_plane(u8 *input, u8 *refp, __be16 **rlco, __be16 *rlco_max, if (!is_intra) blocktype = decide_blocktype(input, refp, - deltablock, width, input_step); + deltablock, stride, input_step); if (blocktype == IBLOCK) { - fwht(input, cf->coeffs, width, input_step, 1); + fwht(input, cf->coeffs, stride, input_step, 1); quantize_intra(cf->coeffs, cf->de_coeffs, cf->i_frame_qp); } else { @@ -722,12 +738,12 @@ static u32 encode_plane(u8 *input, u8 *refp, __be16 **rlco, __be16 *rlco_max, } last_size = size; } - input += width * 7 * input_step; } exit_loop: if (encoding & FWHT_FRAME_UNENCODED) { u8 *out = (u8 *)rlco_start; + u8 *p; input = input_start; /* @@ -736,8 +752,11 @@ exit_loop: * by 0xfe. Since YUV is limited range such values * shouldn't appear anyway. */ - for (i = 0; i < height * width; i++, input += input_step) - *out++ = (*input == 0xff) ? 0xfe : *input; + for (j = 0; j < height; j++) { + for (i = 0, p = input; i < width; i++, p += input_step) + *out++ = (*p == 0xff) ? 0xfe : *p; + input += stride; + } *rlco = (__be16 *)out; encoding &= ~FWHT_FRAME_PCODED; } @@ -747,30 +766,32 @@ exit_loop: u32 fwht_encode_frame(struct fwht_raw_frame *frm, struct fwht_raw_frame *ref_frm, struct fwht_cframe *cf, - bool is_intra, bool next_is_intra) + bool is_intra, bool next_is_intra, + unsigned int width, unsigned int height, + unsigned int stride, unsigned int chroma_stride) { - unsigned int size = frm->height * frm->width; + unsigned int size = height * width; __be16 *rlco = cf->rlc_data; __be16 *rlco_max; u32 encoding; rlco_max = rlco + size / 2 - 256; encoding = encode_plane(frm->luma, ref_frm->luma, &rlco, rlco_max, cf, - frm->height, frm->width, + height, width, stride, frm->luma_alpha_step, is_intra, next_is_intra); if (encoding & FWHT_FRAME_UNENCODED) encoding |= FWHT_LUMA_UNENCODED; encoding &= ~FWHT_FRAME_UNENCODED; if (frm->components_num >= 3) { - u32 chroma_h = frm->height / frm->height_div; - u32 chroma_w = frm->width / frm->width_div; + u32 chroma_h = height / frm->height_div; + u32 chroma_w = width / frm->width_div; unsigned int chroma_size = chroma_h * chroma_w; rlco_max = rlco + chroma_size / 2 - 256; encoding |= encode_plane(frm->cb, ref_frm->cb, &rlco, rlco_max, cf, chroma_h, chroma_w, - frm->chroma_step, + chroma_stride, frm->chroma_step, is_intra, next_is_intra); if (encoding & FWHT_FRAME_UNENCODED) encoding |= FWHT_CB_UNENCODED; @@ -778,7 +799,7 @@ u32 fwht_encode_frame(struct fwht_raw_frame *frm, rlco_max = rlco + chroma_size / 2 - 256; encoding |= encode_plane(frm->cr, ref_frm->cr, &rlco, rlco_max, cf, chroma_h, chroma_w, - frm->chroma_step, + chroma_stride, frm->chroma_step, is_intra, next_is_intra); if (encoding & FWHT_FRAME_UNENCODED) encoding |= FWHT_CR_UNENCODED; @@ -787,10 +808,10 @@ u32 fwht_encode_frame(struct fwht_raw_frame *frm, if (frm->components_num == 4) { rlco_max = rlco + size / 2 - 256; - encoding = encode_plane(frm->alpha, ref_frm->alpha, &rlco, - rlco_max, cf, frm->height, frm->width, - frm->luma_alpha_step, - is_intra, next_is_intra); + encoding |= encode_plane(frm->alpha, ref_frm->alpha, &rlco, + rlco_max, cf, height, width, + stride, frm->luma_alpha_step, + is_intra, next_is_intra); if (encoding & FWHT_FRAME_UNENCODED) encoding |= FWHT_ALPHA_UNENCODED; encoding &= ~FWHT_FRAME_UNENCODED; @@ -800,18 +821,24 @@ u32 fwht_encode_frame(struct fwht_raw_frame *frm, return encoding; } -static void decode_plane(struct fwht_cframe *cf, const __be16 **rlco, u8 *ref, - u32 height, u32 width, bool uncompressed) +static bool decode_plane(struct fwht_cframe *cf, const __be16 **rlco, u8 *ref, + u32 height, u32 width, u32 coded_width, + bool uncompressed, const __be16 *end_of_rlco_buf) { unsigned int copies = 0; s16 copy[8 * 8]; - s16 stat; + u16 stat; unsigned int i, j; + width = round_up(width, 8); + height = round_up(height, 8); + if (uncompressed) { + if (end_of_rlco_buf + 1 < *rlco + width * height / 2) + return false; memcpy(ref, *rlco, width * height); *rlco += width * height / 2; - return; + return true; } /* @@ -822,19 +849,22 @@ static void decode_plane(struct fwht_cframe *cf, const __be16 **rlco, u8 *ref, */ for (j = 0; j < height / 8; j++) { for (i = 0; i < width / 8; i++) { - u8 *refp = ref + j * 8 * width + i * 8; + u8 *refp = ref + j * 8 * coded_width + i * 8; if (copies) { memcpy(cf->de_fwht, copy, sizeof(copy)); if (stat & PFRAME_BIT) - add_deltas(cf->de_fwht, refp, width); - fill_decoder_block(refp, cf->de_fwht, width); + add_deltas(cf->de_fwht, refp, + coded_width); + fill_decoder_block(refp, cf->de_fwht, + coded_width); copies--; continue; } - stat = derlc(rlco, cf->coeffs); - + stat = derlc(rlco, cf->coeffs, end_of_rlco_buf); + if (stat & OVERFLOW_BIT) + return false; if (stat & PFRAME_BIT) dequantize_inter(cf->coeffs); else @@ -847,35 +877,53 @@ static void decode_plane(struct fwht_cframe *cf, const __be16 **rlco, u8 *ref, if (copies) memcpy(copy, cf->de_fwht, sizeof(copy)); if (stat & PFRAME_BIT) - add_deltas(cf->de_fwht, refp, width); - fill_decoder_block(refp, cf->de_fwht, width); + add_deltas(cf->de_fwht, refp, coded_width); + fill_decoder_block(refp, cf->de_fwht, coded_width); } } + return true; } -void fwht_decode_frame(struct fwht_cframe *cf, struct fwht_raw_frame *ref, - u32 hdr_flags, unsigned int components_num) +bool fwht_decode_frame(struct fwht_cframe *cf, struct fwht_raw_frame *ref, + u32 hdr_flags, unsigned int components_num, + unsigned int width, unsigned int height, + unsigned int coded_width) { const __be16 *rlco = cf->rlc_data; + const __be16 *end_of_rlco_buf = cf->rlc_data + + (cf->size / sizeof(*rlco)) - 1; - decode_plane(cf, &rlco, ref->luma, cf->height, cf->width, - hdr_flags & FWHT_FL_LUMA_IS_UNCOMPRESSED); + if (!decode_plane(cf, &rlco, ref->luma, height, width, coded_width, + hdr_flags & FWHT_FL_LUMA_IS_UNCOMPRESSED, + end_of_rlco_buf)) + return false; if (components_num >= 3) { - u32 h = cf->height; - u32 w = cf->width; + u32 h = height; + u32 w = width; + u32 c = coded_width; if (!(hdr_flags & FWHT_FL_CHROMA_FULL_HEIGHT)) h /= 2; - if (!(hdr_flags & FWHT_FL_CHROMA_FULL_WIDTH)) + if (!(hdr_flags & FWHT_FL_CHROMA_FULL_WIDTH)) { w /= 2; - decode_plane(cf, &rlco, ref->cb, h, w, - hdr_flags & FWHT_FL_CB_IS_UNCOMPRESSED); - decode_plane(cf, &rlco, ref->cr, h, w, - hdr_flags & FWHT_FL_CR_IS_UNCOMPRESSED); + c /= 2; + } + if (!decode_plane(cf, &rlco, ref->cb, h, w, c, + hdr_flags & FWHT_FL_CB_IS_UNCOMPRESSED, + end_of_rlco_buf)) + return false; + if (!decode_plane(cf, &rlco, ref->cr, h, w, c, + hdr_flags & FWHT_FL_CR_IS_UNCOMPRESSED, + end_of_rlco_buf)) + return false; } if (components_num == 4) - decode_plane(cf, &rlco, ref->alpha, cf->height, cf->width, - hdr_flags & FWHT_FL_ALPHA_IS_UNCOMPRESSED); + if (!decode_plane(cf, &rlco, ref->alpha, height, width, + coded_width, + hdr_flags & FWHT_FL_ALPHA_IS_UNCOMPRESSED, + end_of_rlco_buf)) + return false; + return true; } diff --git a/drivers/media/platform/vicodec/codec-fwht.h b/drivers/media/platform/vicodec/codec-fwht.h index 90ff8962fca7..c410512d47c5 100644 --- a/drivers/media/platform/vicodec/codec-fwht.h +++ b/drivers/media/platform/vicodec/codec-fwht.h @@ -56,7 +56,7 @@ #define FWHT_MAGIC1 0x4f4f4f4f #define FWHT_MAGIC2 0xffffffff -#define FWHT_VERSION 2 +#define FWHT_VERSION 3 /* Set if this is an interlaced format */ #define FWHT_FL_IS_INTERLACED BIT(0) @@ -76,11 +76,25 @@ #define FWHT_FL_CHROMA_FULL_HEIGHT BIT(7) #define FWHT_FL_CHROMA_FULL_WIDTH BIT(8) #define FWHT_FL_ALPHA_IS_UNCOMPRESSED BIT(9) +#define FWHT_FL_I_FRAME BIT(10) /* A 4-values flag - the number of components - 1 */ -#define FWHT_FL_COMPONENTS_NUM_MSK GENMASK(17, 16) +#define FWHT_FL_COMPONENTS_NUM_MSK GENMASK(18, 16) #define FWHT_FL_COMPONENTS_NUM_OFFSET 16 +#define FWHT_FL_PIXENC_MSK GENMASK(20, 19) +#define FWHT_FL_PIXENC_OFFSET 19 +#define FWHT_FL_PIXENC_YUV (1 << FWHT_FL_PIXENC_OFFSET) +#define FWHT_FL_PIXENC_RGB (2 << FWHT_FL_PIXENC_OFFSET) +#define FWHT_FL_PIXENC_HSV (3 << FWHT_FL_PIXENC_OFFSET) + +/* + * A macro to calculate the needed padding in order to make sure + * both luma and chroma components resolutions are rounded up to + * a multiple of 8 + */ +#define vic_round_dim(dim, div) (round_up((dim) / (div), 8) * (div)) + struct fwht_cframe_hdr { u32 magic1; u32 magic2; @@ -95,7 +109,6 @@ struct fwht_cframe_hdr { }; struct fwht_cframe { - unsigned int width, height; u16 i_frame_qp; u16 p_frame_qp; __be16 *rlc_data; @@ -106,7 +119,6 @@ struct fwht_cframe { }; struct fwht_raw_frame { - unsigned int width, height; unsigned int width_div; unsigned int height_div; unsigned int luma_alpha_step; @@ -125,8 +137,12 @@ struct fwht_raw_frame { u32 fwht_encode_frame(struct fwht_raw_frame *frm, struct fwht_raw_frame *ref_frm, struct fwht_cframe *cf, - bool is_intra, bool next_is_intra); -void fwht_decode_frame(struct fwht_cframe *cf, struct fwht_raw_frame *ref, - u32 hdr_flags, unsigned int components_num); + bool is_intra, bool next_is_intra, + unsigned int width, unsigned int height, + unsigned int stride, unsigned int chroma_stride); +bool fwht_decode_frame(struct fwht_cframe *cf, struct fwht_raw_frame *ref, + u32 hdr_flags, unsigned int components_num, + unsigned int width, unsigned int height, + unsigned int coded_width); #endif diff --git a/drivers/media/platform/vicodec/codec-v4l2-fwht.c b/drivers/media/platform/vicodec/codec-v4l2-fwht.c index 8cb0212df67f..6573a471c5ca 100644 --- a/drivers/media/platform/vicodec/codec-v4l2-fwht.c +++ b/drivers/media/platform/vicodec/codec-v4l2-fwht.c @@ -11,32 +11,53 @@ #include "codec-v4l2-fwht.h" static const struct v4l2_fwht_pixfmt_info v4l2_fwht_pixfmts[] = { - { V4L2_PIX_FMT_YUV420, 1, 3, 2, 1, 1, 2, 2, 3}, - { V4L2_PIX_FMT_YVU420, 1, 3, 2, 1, 1, 2, 2, 3}, - { V4L2_PIX_FMT_YUV422P, 1, 2, 1, 1, 1, 2, 1, 3}, - { V4L2_PIX_FMT_NV12, 1, 3, 2, 1, 2, 2, 2, 3}, - { V4L2_PIX_FMT_NV21, 1, 3, 2, 1, 2, 2, 2, 3}, - { V4L2_PIX_FMT_NV16, 1, 2, 1, 1, 2, 2, 1, 3}, - { V4L2_PIX_FMT_NV61, 1, 2, 1, 1, 2, 2, 1, 3}, - { V4L2_PIX_FMT_NV24, 1, 3, 1, 1, 2, 1, 1, 3}, - { V4L2_PIX_FMT_NV42, 1, 3, 1, 1, 2, 1, 1, 3}, - { V4L2_PIX_FMT_YUYV, 2, 2, 1, 2, 4, 2, 1, 3}, - { V4L2_PIX_FMT_YVYU, 2, 2, 1, 2, 4, 2, 1, 3}, - { V4L2_PIX_FMT_UYVY, 2, 2, 1, 2, 4, 2, 1, 3}, - { V4L2_PIX_FMT_VYUY, 2, 2, 1, 2, 4, 2, 1, 3}, - { V4L2_PIX_FMT_BGR24, 3, 3, 1, 3, 3, 1, 1, 3}, - { V4L2_PIX_FMT_RGB24, 3, 3, 1, 3, 3, 1, 1, 3}, - { V4L2_PIX_FMT_HSV24, 3, 3, 1, 3, 3, 1, 1, 3}, - { V4L2_PIX_FMT_BGR32, 4, 4, 1, 4, 4, 1, 1, 3}, - { V4L2_PIX_FMT_XBGR32, 4, 4, 1, 4, 4, 1, 1, 3}, - { V4L2_PIX_FMT_RGB32, 4, 4, 1, 4, 4, 1, 1, 3}, - { V4L2_PIX_FMT_XRGB32, 4, 4, 1, 4, 4, 1, 1, 3}, - { V4L2_PIX_FMT_HSV32, 4, 4, 1, 4, 4, 1, 1, 3}, - { V4L2_PIX_FMT_ARGB32, 4, 4, 1, 4, 4, 1, 1, 4}, - { V4L2_PIX_FMT_ABGR32, 4, 4, 1, 4, 4, 1, 1, 4}, - { V4L2_PIX_FMT_GREY, 1, 1, 1, 1, 0, 1, 1, 1}, + { V4L2_PIX_FMT_YUV420, 1, 3, 2, 1, 1, 2, 2, 3, 3, FWHT_FL_PIXENC_YUV}, + { V4L2_PIX_FMT_YVU420, 1, 3, 2, 1, 1, 2, 2, 3, 3, FWHT_FL_PIXENC_YUV}, + { V4L2_PIX_FMT_YUV422P, 1, 2, 1, 1, 1, 2, 1, 3, 3, FWHT_FL_PIXENC_YUV}, + { V4L2_PIX_FMT_NV12, 1, 3, 2, 1, 2, 2, 2, 3, 2, FWHT_FL_PIXENC_YUV}, + { V4L2_PIX_FMT_NV21, 1, 3, 2, 1, 2, 2, 2, 3, 2, FWHT_FL_PIXENC_YUV}, + { V4L2_PIX_FMT_NV16, 1, 2, 1, 1, 2, 2, 1, 3, 2, FWHT_FL_PIXENC_YUV}, + { V4L2_PIX_FMT_NV61, 1, 2, 1, 1, 2, 2, 1, 3, 2, FWHT_FL_PIXENC_YUV}, + { V4L2_PIX_FMT_NV24, 1, 3, 1, 1, 2, 1, 1, 3, 2, FWHT_FL_PIXENC_YUV}, + { V4L2_PIX_FMT_NV42, 1, 3, 1, 1, 2, 1, 1, 3, 2, FWHT_FL_PIXENC_YUV}, + { V4L2_PIX_FMT_YUYV, 2, 2, 1, 2, 4, 2, 1, 3, 1, FWHT_FL_PIXENC_YUV}, + { V4L2_PIX_FMT_YVYU, 2, 2, 1, 2, 4, 2, 1, 3, 1, FWHT_FL_PIXENC_YUV}, + { V4L2_PIX_FMT_UYVY, 2, 2, 1, 2, 4, 2, 1, 3, 1, FWHT_FL_PIXENC_YUV}, + { V4L2_PIX_FMT_VYUY, 2, 2, 1, 2, 4, 2, 1, 3, 1, FWHT_FL_PIXENC_YUV}, + { V4L2_PIX_FMT_BGR24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB}, + { V4L2_PIX_FMT_RGB24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB}, + { V4L2_PIX_FMT_HSV24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_HSV}, + { V4L2_PIX_FMT_BGR32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB}, + { V4L2_PIX_FMT_XBGR32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB}, + { V4L2_PIX_FMT_RGB32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB}, + { V4L2_PIX_FMT_XRGB32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB}, + { V4L2_PIX_FMT_HSV32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_HSV}, + { V4L2_PIX_FMT_ARGB32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB}, + { V4L2_PIX_FMT_ABGR32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB}, + { V4L2_PIX_FMT_GREY, 1, 1, 1, 1, 0, 1, 1, 1, 1, FWHT_FL_PIXENC_RGB}, }; +const struct v4l2_fwht_pixfmt_info *v4l2_fwht_default_fmt(u32 width_div, + u32 height_div, + u32 components_num, + u32 pixenc, + unsigned int start_idx) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(v4l2_fwht_pixfmts); i++) { + if (v4l2_fwht_pixfmts[i].width_div == width_div && + v4l2_fwht_pixfmts[i].height_div == height_div && + (!pixenc || v4l2_fwht_pixfmts[i].pixenc == pixenc) && + v4l2_fwht_pixfmts[i].components_num == components_num) { + if (start_idx == 0) + return v4l2_fwht_pixfmts + i; + start_idx--; + } + } + return NULL; +} + const struct v4l2_fwht_pixfmt_info *v4l2_fwht_find_pixfmt(u32 pixelformat) { unsigned int i; @@ -56,7 +77,8 @@ const struct v4l2_fwht_pixfmt_info *v4l2_fwht_get_pixfmt(u32 idx) int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out) { - unsigned int size = state->width * state->height; + unsigned int size = state->stride * state->coded_height; + unsigned int chroma_stride = state->stride; const struct v4l2_fwht_pixfmt_info *info = state->info; struct fwht_cframe_hdr *p_hdr; struct fwht_cframe cf; @@ -66,8 +88,7 @@ int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out) if (!info) return -EINVAL; - rf.width = state->width; - rf.height = state->height; + rf.luma = p_in; rf.width_div = info->width_div; rf.height_div = info->height_div; @@ -84,14 +105,17 @@ int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out) case V4L2_PIX_FMT_YUV420: rf.cb = rf.luma + size; rf.cr = rf.cb + size / 4; + chroma_stride /= 2; break; case V4L2_PIX_FMT_YVU420: rf.cr = rf.luma + size; rf.cb = rf.cr + size / 4; + chroma_stride /= 2; break; case V4L2_PIX_FMT_YUV422P: rf.cb = rf.luma + size; rf.cr = rf.cb + size / 2; + chroma_stride /= 2; break; case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV16: @@ -163,15 +187,16 @@ int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out) return -EINVAL; } - cf.width = state->width; - cf.height = state->height; cf.i_frame_qp = state->i_frame_qp; cf.p_frame_qp = state->p_frame_qp; cf.rlc_data = (__be16 *)(p_out + sizeof(*p_hdr)); encoding = fwht_encode_frame(&rf, &state->ref_frame, &cf, !state->gop_cnt, - state->gop_cnt == state->gop_size - 1); + state->gop_cnt == state->gop_size - 1, + state->visible_width, + state->visible_height, + state->stride, chroma_stride); if (!(encoding & FWHT_FRAME_PCODED)) state->gop_cnt = 0; if (++state->gop_cnt >= state->gop_size) @@ -181,9 +206,10 @@ int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out) p_hdr->magic1 = FWHT_MAGIC1; p_hdr->magic2 = FWHT_MAGIC2; p_hdr->version = htonl(FWHT_VERSION); - p_hdr->width = htonl(cf.width); - p_hdr->height = htonl(cf.height); + p_hdr->width = htonl(state->visible_width); + p_hdr->height = htonl(state->visible_height); flags |= (info->components_num - 1) << FWHT_FL_COMPONENTS_NUM_OFFSET; + flags |= info->pixenc; if (encoding & FWHT_LUMA_UNENCODED) flags |= FWHT_FL_LUMA_IS_UNCOMPRESSED; if (encoding & FWHT_CB_UNENCODED) @@ -192,6 +218,8 @@ int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out) flags |= FWHT_FL_CR_IS_UNCOMPRESSED; if (encoding & FWHT_ALPHA_UNENCODED) flags |= FWHT_FL_ALPHA_IS_UNCOMPRESSED; + if (!(encoding & FWHT_FRAME_PCODED)) + flags |= FWHT_FL_I_FRAME; if (rf.height_div == 1) flags |= FWHT_FL_CHROMA_FULL_HEIGHT; if (rf.width_div == 1) @@ -202,65 +230,70 @@ int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out) p_hdr->ycbcr_enc = htonl(state->ycbcr_enc); p_hdr->quantization = htonl(state->quantization); p_hdr->size = htonl(cf.size); - state->ref_frame.width = cf.width; - state->ref_frame.height = cf.height; return cf.size + sizeof(*p_hdr); } int v4l2_fwht_decode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out) { - unsigned int size = state->width * state->height; - unsigned int chroma_size = size; - unsigned int i; + unsigned int i, j, k; u32 flags; - struct fwht_cframe_hdr *p_hdr; struct fwht_cframe cf; - u8 *p; + u8 *p, *ref_p; unsigned int components_num = 3; unsigned int version; + const struct v4l2_fwht_pixfmt_info *info; + unsigned int hdr_width_div, hdr_height_div; if (!state->info) return -EINVAL; - p_hdr = (struct fwht_cframe_hdr *)p_in; - cf.width = ntohl(p_hdr->width); - cf.height = ntohl(p_hdr->height); + info = state->info; - version = ntohl(p_hdr->version); + version = ntohl(state->header.version); if (!version || version > FWHT_VERSION) { pr_err("version %d is not supported, current version is %d\n", version, FWHT_VERSION); return -EINVAL; } - if (p_hdr->magic1 != FWHT_MAGIC1 || - p_hdr->magic2 != FWHT_MAGIC2 || - (cf.width & 7) || (cf.height & 7)) + if (state->header.magic1 != FWHT_MAGIC1 || + state->header.magic2 != FWHT_MAGIC2) return -EINVAL; /* TODO: support resolution changes */ - if (cf.width != state->width || cf.height != state->height) + if (ntohl(state->header.width) != state->visible_width || + ntohl(state->header.height) != state->visible_height) return -EINVAL; - flags = ntohl(p_hdr->flags); + flags = ntohl(state->header.flags); - if (version == FWHT_VERSION) { + if (version >= 2) { + if ((flags & FWHT_FL_PIXENC_MSK) != info->pixenc) + return -EINVAL; components_num = 1 + ((flags & FWHT_FL_COMPONENTS_NUM_MSK) >> - FWHT_FL_COMPONENTS_NUM_OFFSET); + FWHT_FL_COMPONENTS_NUM_OFFSET); } - state->colorspace = ntohl(p_hdr->colorspace); - state->xfer_func = ntohl(p_hdr->xfer_func); - state->ycbcr_enc = ntohl(p_hdr->ycbcr_enc); - state->quantization = ntohl(p_hdr->quantization); - cf.rlc_data = (__be16 *)(p_in + sizeof(*p_hdr)); + if (components_num != info->components_num) + return -EINVAL; - if (!(flags & FWHT_FL_CHROMA_FULL_WIDTH)) - chroma_size /= 2; - if (!(flags & FWHT_FL_CHROMA_FULL_HEIGHT)) - chroma_size /= 2; + state->colorspace = ntohl(state->header.colorspace); + state->xfer_func = ntohl(state->header.xfer_func); + state->ycbcr_enc = ntohl(state->header.ycbcr_enc); + state->quantization = ntohl(state->header.quantization); + cf.rlc_data = (__be16 *)p_in; + cf.size = ntohl(state->header.size); - fwht_decode_frame(&cf, &state->ref_frame, flags, components_num); + hdr_width_div = (flags & FWHT_FL_CHROMA_FULL_WIDTH) ? 1 : 2; + hdr_height_div = (flags & FWHT_FL_CHROMA_FULL_HEIGHT) ? 1 : 2; + if (hdr_width_div != info->width_div || + hdr_height_div != info->height_div) + return -EINVAL; + + if (!fwht_decode_frame(&cf, &state->ref_frame, flags, components_num, + state->visible_width, state->visible_height, + state->coded_width)) + return -EINVAL; /* * TODO - handle the case where the compressed stream encodes a @@ -268,123 +301,226 @@ int v4l2_fwht_decode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out) */ switch (state->info->id) { case V4L2_PIX_FMT_GREY: - memcpy(p_out, state->ref_frame.luma, size); + ref_p = state->ref_frame.luma; + for (i = 0; i < state->coded_height; i++) { + memcpy(p_out, ref_p, state->visible_width); + p_out += state->stride; + ref_p += state->coded_width; + } break; case V4L2_PIX_FMT_YUV420: case V4L2_PIX_FMT_YUV422P: - memcpy(p_out, state->ref_frame.luma, size); - p_out += size; - memcpy(p_out, state->ref_frame.cb, chroma_size); - p_out += chroma_size; - memcpy(p_out, state->ref_frame.cr, chroma_size); + ref_p = state->ref_frame.luma; + for (i = 0; i < state->coded_height; i++) { + memcpy(p_out, ref_p, state->visible_width); + p_out += state->stride; + ref_p += state->coded_width; + } + + ref_p = state->ref_frame.cb; + for (i = 0; i < state->coded_height / 2; i++) { + memcpy(p_out, ref_p, state->visible_width / 2); + p_out += state->stride / 2; + ref_p += state->coded_width / 2; + } + ref_p = state->ref_frame.cr; + for (i = 0; i < state->coded_height / 2; i++) { + memcpy(p_out, ref_p, state->visible_width / 2); + p_out += state->stride / 2; + ref_p += state->coded_width / 2; + } break; case V4L2_PIX_FMT_YVU420: - memcpy(p_out, state->ref_frame.luma, size); - p_out += size; - memcpy(p_out, state->ref_frame.cr, chroma_size); - p_out += chroma_size; - memcpy(p_out, state->ref_frame.cb, chroma_size); + ref_p = state->ref_frame.luma; + for (i = 0; i < state->coded_height; i++) { + memcpy(p_out, ref_p, state->visible_width); + p_out += state->stride; + ref_p += state->coded_width; + } + + ref_p = state->ref_frame.cr; + for (i = 0; i < state->coded_height / 2; i++) { + memcpy(p_out, ref_p, state->visible_width / 2); + p_out += state->stride / 2; + ref_p += state->coded_width / 2; + } + ref_p = state->ref_frame.cb; + for (i = 0; i < state->coded_height / 2; i++) { + memcpy(p_out, ref_p, state->visible_width / 2); + p_out += state->stride / 2; + ref_p += state->coded_width / 2; + } break; case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV24: - memcpy(p_out, state->ref_frame.luma, size); - p_out += size; - for (i = 0, p = p_out; i < chroma_size; i++) { - *p++ = state->ref_frame.cb[i]; - *p++ = state->ref_frame.cr[i]; + ref_p = state->ref_frame.luma; + for (i = 0; i < state->coded_height; i++) { + memcpy(p_out, ref_p, state->visible_width); + p_out += state->stride; + ref_p += state->coded_width; + } + + k = 0; + for (i = 0; i < state->coded_height / 2; i++) { + for (j = 0, p = p_out; j < state->coded_width / 2; j++) { + *p++ = state->ref_frame.cb[k]; + *p++ = state->ref_frame.cr[k]; + k++; + } + p_out += state->stride; } break; case V4L2_PIX_FMT_NV21: case V4L2_PIX_FMT_NV61: case V4L2_PIX_FMT_NV42: - memcpy(p_out, state->ref_frame.luma, size); - p_out += size; - for (i = 0, p = p_out; i < chroma_size; i++) { - *p++ = state->ref_frame.cr[i]; - *p++ = state->ref_frame.cb[i]; + ref_p = state->ref_frame.luma; + for (i = 0; i < state->coded_height; i++) { + memcpy(p_out, ref_p, state->visible_width); + p_out += state->stride; + ref_p += state->coded_width; + } + + k = 0; + for (i = 0; i < state->coded_height / 2; i++) { + for (j = 0, p = p_out; j < state->coded_width / 2; j++) { + *p++ = state->ref_frame.cr[k]; + *p++ = state->ref_frame.cb[k]; + k++; + } + p_out += state->stride; } break; case V4L2_PIX_FMT_YUYV: - for (i = 0, p = p_out; i < size; i += 2) { - *p++ = state->ref_frame.luma[i]; - *p++ = state->ref_frame.cb[i / 2]; - *p++ = state->ref_frame.luma[i + 1]; - *p++ = state->ref_frame.cr[i / 2]; + k = 0; + for (i = 0; i < state->coded_height; i++) { + for (j = 0, p = p_out; j < state->coded_width / 2; j++) { + *p++ = state->ref_frame.luma[k]; + *p++ = state->ref_frame.cb[k / 2]; + *p++ = state->ref_frame.luma[k + 1]; + *p++ = state->ref_frame.cr[k / 2]; + k += 2; + } + p_out += state->stride; } break; case V4L2_PIX_FMT_YVYU: - for (i = 0, p = p_out; i < size; i += 2) { - *p++ = state->ref_frame.luma[i]; - *p++ = state->ref_frame.cr[i / 2]; - *p++ = state->ref_frame.luma[i + 1]; - *p++ = state->ref_frame.cb[i / 2]; + k = 0; + for (i = 0; i < state->coded_height; i++) { + for (j = 0, p = p_out; j < state->coded_width / 2; j++) { + *p++ = state->ref_frame.luma[k]; + *p++ = state->ref_frame.cr[k / 2]; + *p++ = state->ref_frame.luma[k + 1]; + *p++ = state->ref_frame.cb[k / 2]; + k += 2; + } + p_out += state->stride; } break; case V4L2_PIX_FMT_UYVY: - for (i = 0, p = p_out; i < size; i += 2) { - *p++ = state->ref_frame.cb[i / 2]; - *p++ = state->ref_frame.luma[i]; - *p++ = state->ref_frame.cr[i / 2]; - *p++ = state->ref_frame.luma[i + 1]; + k = 0; + for (i = 0; i < state->coded_height; i++) { + for (j = 0, p = p_out; j < state->coded_width / 2; j++) { + *p++ = state->ref_frame.cb[k / 2]; + *p++ = state->ref_frame.luma[k]; + *p++ = state->ref_frame.cr[k / 2]; + *p++ = state->ref_frame.luma[k + 1]; + k += 2; + } + p_out += state->stride; } break; case V4L2_PIX_FMT_VYUY: - for (i = 0, p = p_out; i < size; i += 2) { - *p++ = state->ref_frame.cr[i / 2]; - *p++ = state->ref_frame.luma[i]; - *p++ = state->ref_frame.cb[i / 2]; - *p++ = state->ref_frame.luma[i + 1]; + k = 0; + for (i = 0; i < state->coded_height; i++) { + for (j = 0, p = p_out; j < state->coded_width / 2; j++) { + *p++ = state->ref_frame.cr[k / 2]; + *p++ = state->ref_frame.luma[k]; + *p++ = state->ref_frame.cb[k / 2]; + *p++ = state->ref_frame.luma[k + 1]; + k += 2; + } + p_out += state->stride; } break; case V4L2_PIX_FMT_RGB24: case V4L2_PIX_FMT_HSV24: - for (i = 0, p = p_out; i < size; i++) { - *p++ = state->ref_frame.cr[i]; - *p++ = state->ref_frame.luma[i]; - *p++ = state->ref_frame.cb[i]; + k = 0; + for (i = 0; i < state->coded_height; i++) { + for (j = 0, p = p_out; j < state->coded_width; j++) { + *p++ = state->ref_frame.cr[k]; + *p++ = state->ref_frame.luma[k]; + *p++ = state->ref_frame.cb[k]; + k++; + } + p_out += state->stride; } break; case V4L2_PIX_FMT_BGR24: - for (i = 0, p = p_out; i < size; i++) { - *p++ = state->ref_frame.cb[i]; - *p++ = state->ref_frame.luma[i]; - *p++ = state->ref_frame.cr[i]; + k = 0; + for (i = 0; i < state->coded_height; i++) { + for (j = 0, p = p_out; j < state->coded_width; j++) { + *p++ = state->ref_frame.cb[k]; + *p++ = state->ref_frame.luma[k]; + *p++ = state->ref_frame.cr[k]; + k++; + } + p_out += state->stride; } break; case V4L2_PIX_FMT_RGB32: case V4L2_PIX_FMT_XRGB32: case V4L2_PIX_FMT_HSV32: - for (i = 0, p = p_out; i < size; i++) { - *p++ = 0; - *p++ = state->ref_frame.cr[i]; - *p++ = state->ref_frame.luma[i]; - *p++ = state->ref_frame.cb[i]; + k = 0; + for (i = 0; i < state->coded_height; i++) { + for (j = 0, p = p_out; j < state->coded_width; j++) { + *p++ = 0; + *p++ = state->ref_frame.cr[k]; + *p++ = state->ref_frame.luma[k]; + *p++ = state->ref_frame.cb[k]; + k++; + } + p_out += state->stride; } break; case V4L2_PIX_FMT_BGR32: case V4L2_PIX_FMT_XBGR32: - for (i = 0, p = p_out; i < size; i++) { - *p++ = state->ref_frame.cb[i]; - *p++ = state->ref_frame.luma[i]; - *p++ = state->ref_frame.cr[i]; - *p++ = 0; + k = 0; + for (i = 0; i < state->coded_height; i++) { + for (j = 0, p = p_out; j < state->coded_width; j++) { + *p++ = state->ref_frame.cb[k]; + *p++ = state->ref_frame.luma[k]; + *p++ = state->ref_frame.cr[k]; + *p++ = 0; + k++; + } + p_out += state->stride; } break; case V4L2_PIX_FMT_ARGB32: - for (i = 0, p = p_out; i < size; i++) { - *p++ = state->ref_frame.alpha[i]; - *p++ = state->ref_frame.cr[i]; - *p++ = state->ref_frame.luma[i]; - *p++ = state->ref_frame.cb[i]; + k = 0; + for (i = 0; i < state->coded_height; i++) { + for (j = 0, p = p_out; j < state->coded_width; j++) { + *p++ = state->ref_frame.alpha[k]; + *p++ = state->ref_frame.cr[k]; + *p++ = state->ref_frame.luma[k]; + *p++ = state->ref_frame.cb[k]; + k++; + } + p_out += state->stride; } break; case V4L2_PIX_FMT_ABGR32: - for (i = 0, p = p_out; i < size; i++) { - *p++ = state->ref_frame.cb[i]; - *p++ = state->ref_frame.luma[i]; - *p++ = state->ref_frame.cr[i]; - *p++ = state->ref_frame.alpha[i]; + k = 0; + for (i = 0; i < state->coded_height; i++) { + for (j = 0, p = p_out; j < state->coded_width; j++) { + *p++ = state->ref_frame.cb[k]; + *p++ = state->ref_frame.luma[k]; + *p++ = state->ref_frame.cr[k]; + *p++ = state->ref_frame.alpha[k]; + k++; + } + p_out += state->stride; } break; default: diff --git a/drivers/media/platform/vicodec/codec-v4l2-fwht.h b/drivers/media/platform/vicodec/codec-v4l2-fwht.h index ed53e28d4f9c..aa6fa90a48be 100644 --- a/drivers/media/platform/vicodec/codec-v4l2-fwht.h +++ b/drivers/media/platform/vicodec/codec-v4l2-fwht.h @@ -19,12 +19,17 @@ struct v4l2_fwht_pixfmt_info { unsigned int width_div; unsigned int height_div; unsigned int components_num; + unsigned int planes_num; + unsigned int pixenc; }; struct v4l2_fwht_state { const struct v4l2_fwht_pixfmt_info *info; - unsigned int width; - unsigned int height; + unsigned int visible_width; + unsigned int visible_height; + unsigned int coded_width; + unsigned int coded_height; + unsigned int stride; unsigned int gop_size; unsigned int gop_cnt; u16 i_frame_qp; @@ -36,11 +41,17 @@ struct v4l2_fwht_state { enum v4l2_quantization quantization; struct fwht_raw_frame ref_frame; + struct fwht_cframe_hdr header; u8 *compressed_frame; }; const struct v4l2_fwht_pixfmt_info *v4l2_fwht_find_pixfmt(u32 pixelformat); const struct v4l2_fwht_pixfmt_info *v4l2_fwht_get_pixfmt(u32 idx); +const struct v4l2_fwht_pixfmt_info *v4l2_fwht_default_fmt(u32 width_div, + u32 height_div, + u32 components_num, + u32 pixenc, + unsigned int start_idx); int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out); int v4l2_fwht_decode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out); diff --git a/drivers/media/platform/vicodec/vicodec-core.c b/drivers/media/platform/vicodec/vicodec-core.c index 0d7876f5acf0..d7636fe9e174 100644 --- a/drivers/media/platform/vicodec/vicodec-core.c +++ b/drivers/media/platform/vicodec/vicodec-core.c @@ -61,7 +61,7 @@ struct pixfmt_info { }; static const struct v4l2_fwht_pixfmt_info pixfmt_fwht = { - V4L2_PIX_FMT_FWHT, 0, 3, 1, 1, 1, 1, 1, 0 + V4L2_PIX_FMT_FWHT, 0, 3, 1, 1, 1, 1, 1, 0, 1 }; static void vicodec_dev_release(struct device *dev) @@ -75,8 +75,10 @@ static struct platform_device vicodec_pdev = { /* Per-queue, driver-specific private data */ struct vicodec_q_data { - unsigned int width; - unsigned int height; + unsigned int coded_width; + unsigned int coded_height; + unsigned int visible_width; + unsigned int visible_height; unsigned int sizeimage; unsigned int sequence; const struct v4l2_fwht_pixfmt_info *info; @@ -122,10 +124,12 @@ struct vicodec_ctx { u32 cur_buf_offset; u32 comp_max_size; u32 comp_size; + u32 header_size; u32 comp_magic_cnt; - u32 comp_frame_size; bool comp_has_frame; bool comp_has_next_frame; + bool first_source_change_sent; + bool source_changed; }; static inline struct vicodec_ctx *file2ctx(struct file *file) @@ -182,6 +186,10 @@ static int device_process(struct vicodec_ctx *ctx, return ret; vb2_set_plane_payload(&dst_vb->vb2_buf, 0, ret); } else { + unsigned int comp_frame_size = ntohl(ctx->state.header.size); + + if (comp_frame_size > ctx->comp_max_size) + return -EINVAL; state->info = q_dst->info; ret = v4l2_fwht_decode(state, p_src, p_dst); if (ret < 0) @@ -190,18 +198,8 @@ static int device_process(struct vicodec_ctx *ctx, } dst_vb->sequence = q_dst->sequence++; - dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp; - - if (src_vb->flags & V4L2_BUF_FLAG_TIMECODE) - dst_vb->timecode = src_vb->timecode; - dst_vb->field = src_vb->field; dst_vb->flags &= ~V4L2_BUF_FLAG_LAST; - dst_vb->flags |= src_vb->flags & - (V4L2_BUF_FLAG_TIMECODE | - V4L2_BUF_FLAG_KEYFRAME | - V4L2_BUF_FLAG_PFRAME | - V4L2_BUF_FLAG_BFRAME | - V4L2_BUF_FLAG_TSTAMP_SRC_MASK); + v4l2_m2m_buf_copy_metadata(src_vb, dst_vb, !ctx->is_enc); return 0; } @@ -209,6 +207,63 @@ static int device_process(struct vicodec_ctx *ctx, /* * mem2mem callbacks */ +static enum vb2_buffer_state get_next_header(struct vicodec_ctx *ctx, + u8 **pp, u32 sz) +{ + static const u8 magic[] = { + 0x4f, 0x4f, 0x4f, 0x4f, 0xff, 0xff, 0xff, 0xff + }; + u8 *p = *pp; + u32 state; + u8 *header = (u8 *)&ctx->state.header; + + state = VB2_BUF_STATE_DONE; + + if (!ctx->header_size) { + state = VB2_BUF_STATE_ERROR; + for (; p < *pp + sz; p++) { + u32 copy; + + p = memchr(p, magic[ctx->comp_magic_cnt], + *pp + sz - p); + if (!p) { + ctx->comp_magic_cnt = 0; + p = *pp + sz; + break; + } + copy = sizeof(magic) - ctx->comp_magic_cnt; + if (*pp + sz - p < copy) + copy = *pp + sz - p; + + memcpy(header + ctx->comp_magic_cnt, p, copy); + ctx->comp_magic_cnt += copy; + if (!memcmp(header, magic, ctx->comp_magic_cnt)) { + p += copy; + state = VB2_BUF_STATE_DONE; + break; + } + ctx->comp_magic_cnt = 0; + } + if (ctx->comp_magic_cnt < sizeof(magic)) { + *pp = p; + return state; + } + ctx->header_size = sizeof(magic); + } + + if (ctx->header_size < sizeof(struct fwht_cframe_hdr)) { + u32 copy = sizeof(struct fwht_cframe_hdr) - ctx->header_size; + + if (*pp + sz - p < copy) + copy = *pp + sz - p; + + memcpy(header + ctx->header_size, p, copy); + p += copy; + ctx->header_size += copy; + } + *pp = p; + return state; +} /* device_run() - prepares and starts the device */ static void device_run(void *priv) @@ -249,6 +304,7 @@ static void device_run(void *priv) } v4l2_m2m_buf_done(dst_buf, state); ctx->comp_size = 0; + ctx->header_size = 0; ctx->comp_magic_cnt = 0; ctx->comp_has_frame = false; spin_unlock(ctx->lock); @@ -273,6 +329,96 @@ static void job_remove_src_buf(struct vicodec_ctx *ctx, u32 state) spin_unlock(ctx->lock); } +static const struct v4l2_fwht_pixfmt_info * +info_from_header(const struct fwht_cframe_hdr *p_hdr) +{ + unsigned int flags = ntohl(p_hdr->flags); + unsigned int width_div = (flags & FWHT_FL_CHROMA_FULL_WIDTH) ? 1 : 2; + unsigned int height_div = (flags & FWHT_FL_CHROMA_FULL_HEIGHT) ? 1 : 2; + unsigned int components_num = 3; + unsigned int pixenc = 0; + unsigned int version = ntohl(p_hdr->version); + + if (version >= 2) { + components_num = 1 + ((flags & FWHT_FL_COMPONENTS_NUM_MSK) >> + FWHT_FL_COMPONENTS_NUM_OFFSET); + pixenc = (flags & FWHT_FL_PIXENC_MSK); + } + return v4l2_fwht_default_fmt(width_div, height_div, + components_num, pixenc, 0); +} + +static bool is_header_valid(const struct fwht_cframe_hdr *p_hdr) +{ + const struct v4l2_fwht_pixfmt_info *info; + unsigned int w = ntohl(p_hdr->width); + unsigned int h = ntohl(p_hdr->height); + unsigned int version = ntohl(p_hdr->version); + unsigned int flags = ntohl(p_hdr->flags); + + if (!version || version > FWHT_VERSION) + return false; + + if (w < MIN_WIDTH || w > MAX_WIDTH || h < MIN_HEIGHT || h > MAX_HEIGHT) + return false; + + if (version >= 2) { + unsigned int components_num = 1 + + ((flags & FWHT_FL_COMPONENTS_NUM_MSK) >> + FWHT_FL_COMPONENTS_NUM_OFFSET); + unsigned int pixenc = flags & FWHT_FL_PIXENC_MSK; + + if (components_num == 0 || components_num > 4 || !pixenc) + return false; + } + + info = info_from_header(p_hdr); + if (!info) + return false; + return true; +} + +static void update_capture_data_from_header(struct vicodec_ctx *ctx) +{ + struct vicodec_q_data *q_dst = get_q_data(ctx, + V4L2_BUF_TYPE_VIDEO_CAPTURE); + const struct fwht_cframe_hdr *p_hdr = &ctx->state.header; + const struct v4l2_fwht_pixfmt_info *info = info_from_header(p_hdr); + unsigned int flags = ntohl(p_hdr->flags); + unsigned int hdr_width_div = (flags & FWHT_FL_CHROMA_FULL_WIDTH) ? 1 : 2; + unsigned int hdr_height_div = (flags & FWHT_FL_CHROMA_FULL_HEIGHT) ? 1 : 2; + + q_dst->info = info; + q_dst->visible_width = ntohl(p_hdr->width); + q_dst->visible_height = ntohl(p_hdr->height); + q_dst->coded_width = vic_round_dim(q_dst->visible_width, hdr_width_div); + q_dst->coded_height = vic_round_dim(q_dst->visible_height, + hdr_height_div); + + q_dst->sizeimage = q_dst->coded_width * q_dst->coded_height * + q_dst->info->sizeimage_mult / q_dst->info->sizeimage_div; + ctx->state.colorspace = ntohl(p_hdr->colorspace); + + ctx->state.xfer_func = ntohl(p_hdr->xfer_func); + ctx->state.ycbcr_enc = ntohl(p_hdr->ycbcr_enc); + ctx->state.quantization = ntohl(p_hdr->quantization); +} + +static void set_last_buffer(struct vb2_v4l2_buffer *dst_buf, + const struct vb2_v4l2_buffer *src_buf, + struct vicodec_ctx *ctx) +{ + struct vicodec_q_data *q_dst = get_q_data(ctx, + V4L2_BUF_TYPE_VIDEO_CAPTURE); + + vb2_set_plane_payload(&dst_buf->vb2_buf, 0, 0); + dst_buf->sequence = q_dst->sequence++; + + v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, !ctx->is_enc); + dst_buf->flags |= V4L2_BUF_FLAG_LAST; + v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE); +} + static int job_ready(void *priv) { static const u8 magic[] = { @@ -284,7 +430,16 @@ static int job_ready(void *priv) u8 *p; u32 sz; u32 state; - + struct vicodec_q_data *q_dst = get_q_data(ctx, + V4L2_BUF_TYPE_VIDEO_CAPTURE); + unsigned int flags; + unsigned int hdr_width_div; + unsigned int hdr_height_div; + unsigned int max_to_copy; + unsigned int comp_frame_size; + + if (ctx->source_changed) + return 0; if (ctx->is_enc || ctx->comp_has_frame) return 1; @@ -299,59 +454,27 @@ restart: state = VB2_BUF_STATE_DONE; - if (!ctx->comp_size) { - state = VB2_BUF_STATE_ERROR; - for (; p < p_src + sz; p++) { - u32 copy; - - p = memchr(p, magic[ctx->comp_magic_cnt], - p_src + sz - p); - if (!p) { - ctx->comp_magic_cnt = 0; - break; - } - copy = sizeof(magic) - ctx->comp_magic_cnt; - if (p_src + sz - p < copy) - copy = p_src + sz - p; - - memcpy(ctx->state.compressed_frame + ctx->comp_magic_cnt, - p, copy); - ctx->comp_magic_cnt += copy; - if (!memcmp(ctx->state.compressed_frame, magic, - ctx->comp_magic_cnt)) { - p += copy; - state = VB2_BUF_STATE_DONE; - break; - } - ctx->comp_magic_cnt = 0; - } - if (ctx->comp_magic_cnt < sizeof(magic)) { + if (ctx->header_size < sizeof(struct fwht_cframe_hdr)) { + state = get_next_header(ctx, &p, p_src + sz - p); + if (ctx->header_size < sizeof(struct fwht_cframe_hdr)) { job_remove_src_buf(ctx, state); goto restart; } - ctx->comp_size = sizeof(magic); } - if (ctx->comp_size < sizeof(struct fwht_cframe_hdr)) { - struct fwht_cframe_hdr *p_hdr = - (struct fwht_cframe_hdr *)ctx->state.compressed_frame; - u32 copy = sizeof(struct fwht_cframe_hdr) - ctx->comp_size; - if (copy > p_src + sz - p) - copy = p_src + sz - p; - memcpy(ctx->state.compressed_frame + ctx->comp_size, - p, copy); - p += copy; - ctx->comp_size += copy; - if (ctx->comp_size < sizeof(struct fwht_cframe_hdr)) { - job_remove_src_buf(ctx, state); - goto restart; - } - ctx->comp_frame_size = ntohl(p_hdr->size) + sizeof(*p_hdr); - if (ctx->comp_frame_size > ctx->comp_max_size) - ctx->comp_frame_size = ctx->comp_max_size; - } - if (ctx->comp_size < ctx->comp_frame_size) { - u32 copy = ctx->comp_frame_size - ctx->comp_size; + comp_frame_size = ntohl(ctx->state.header.size); + + /* + * The current scanned frame might be the first frame of a new + * resolution so its size might be larger than ctx->comp_max_size. + * In that case it is copied up to the current buffer capacity and + * the copy will continue after allocating new large enough buffer + * when restreaming + */ + max_to_copy = min(comp_frame_size, ctx->comp_max_size); + + if (ctx->comp_size < max_to_copy) { + u32 copy = max_to_copy - ctx->comp_size; if (copy > p_src + sz - p) copy = p_src + sz - p; @@ -360,15 +483,17 @@ restart: p, copy); p += copy; ctx->comp_size += copy; - if (ctx->comp_size < ctx->comp_frame_size) { + if (ctx->comp_size < max_to_copy) { job_remove_src_buf(ctx, state); goto restart; } } ctx->cur_buf_offset = p - p_src; - ctx->comp_has_frame = true; + if (ctx->comp_size == comp_frame_size) + ctx->comp_has_frame = true; ctx->comp_has_next_frame = false; - if (sz - ctx->cur_buf_offset >= sizeof(struct fwht_cframe_hdr)) { + if (ctx->comp_has_frame && sz - ctx->cur_buf_offset >= + sizeof(struct fwht_cframe_hdr)) { struct fwht_cframe_hdr *p_hdr = (struct fwht_cframe_hdr *)p; u32 frame_size = ntohl(p_hdr->size); u32 remaining = sz - ctx->cur_buf_offset - sizeof(*p_hdr); @@ -376,6 +501,36 @@ restart: if (!memcmp(p, magic, sizeof(magic))) ctx->comp_has_next_frame = remaining >= frame_size; } + /* + * if the header is invalid the device_run will just drop the frame + * with an error + */ + if (!is_header_valid(&ctx->state.header) && ctx->comp_has_frame) + return 1; + flags = ntohl(ctx->state.header.flags); + hdr_width_div = (flags & FWHT_FL_CHROMA_FULL_WIDTH) ? 1 : 2; + hdr_height_div = (flags & FWHT_FL_CHROMA_FULL_HEIGHT) ? 1 : 2; + + if (ntohl(ctx->state.header.width) != q_dst->visible_width || + ntohl(ctx->state.header.height) != q_dst->visible_height || + !q_dst->info || + hdr_width_div != q_dst->info->width_div || + hdr_height_div != q_dst->info->height_div) { + static const struct v4l2_event rs_event = { + .type = V4L2_EVENT_SOURCE_CHANGE, + .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION, + }; + + struct vb2_v4l2_buffer *dst_buf = + v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); + + update_capture_data_from_header(ctx); + ctx->first_source_change_sent = true; + v4l2_event_queue_fh(&ctx->fh, &rs_event); + set_last_buffer(dst_buf, src_buf, ctx); + ctx->source_changed = true; + return 0; + } return 1; } @@ -403,9 +558,10 @@ static int vidioc_querycap(struct file *file, void *priv, return 0; } -static int enum_fmt(struct v4l2_fmtdesc *f, bool is_enc, bool is_out) +static int enum_fmt(struct v4l2_fmtdesc *f, struct vicodec_ctx *ctx, + bool is_out) { - bool is_uncomp = (is_enc && is_out) || (!is_enc && !is_out); + bool is_uncomp = (ctx->is_enc && is_out) || (!ctx->is_enc && !is_out); if (V4L2_TYPE_IS_MULTIPLANAR(f->type) && !multiplanar) return -EINVAL; @@ -414,8 +570,16 @@ static int enum_fmt(struct v4l2_fmtdesc *f, bool is_enc, bool is_out) if (is_uncomp) { const struct v4l2_fwht_pixfmt_info *info = - v4l2_fwht_get_pixfmt(f->index); + get_q_data(ctx, f->type)->info; + if (!info || ctx->is_enc) + info = v4l2_fwht_get_pixfmt(f->index); + else + info = v4l2_fwht_default_fmt(info->width_div, + info->height_div, + info->components_num, + info->pixenc, + f->index); if (!info) return -EINVAL; f->pixelformat = info->id; @@ -432,7 +596,7 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv, { struct vicodec_ctx *ctx = file2ctx(file); - return enum_fmt(f, ctx->is_enc, false); + return enum_fmt(f, ctx, false); } static int vidioc_enum_fmt_vid_out(struct file *file, void *priv, @@ -440,7 +604,7 @@ static int vidioc_enum_fmt_vid_out(struct file *file, void *priv, { struct vicodec_ctx *ctx = file2ctx(file); - return enum_fmt(f, ctx->is_enc, true); + return enum_fmt(f, ctx, true); } static int vidioc_g_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f) @@ -458,17 +622,21 @@ static int vidioc_g_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f) q_data = get_q_data(ctx, f->type); info = q_data->info; + if (!info) + info = v4l2_fwht_get_pixfmt(0); + switch (f->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: case V4L2_BUF_TYPE_VIDEO_OUTPUT: if (multiplanar) return -EINVAL; pix = &f->fmt.pix; - pix->width = q_data->width; - pix->height = q_data->height; + pix->width = q_data->coded_width; + pix->height = q_data->coded_height; pix->field = V4L2_FIELD_NONE; pix->pixelformat = info->id; - pix->bytesperline = q_data->width * info->bytesperline_mult; + pix->bytesperline = q_data->coded_width * + info->bytesperline_mult; pix->sizeimage = q_data->sizeimage; pix->colorspace = ctx->state.colorspace; pix->xfer_func = ctx->state.xfer_func; @@ -481,13 +649,13 @@ static int vidioc_g_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f) if (!multiplanar) return -EINVAL; pix_mp = &f->fmt.pix_mp; - pix_mp->width = q_data->width; - pix_mp->height = q_data->height; + pix_mp->width = q_data->coded_width; + pix_mp->height = q_data->coded_height; pix_mp->field = V4L2_FIELD_NONE; pix_mp->pixelformat = info->id; pix_mp->num_planes = 1; pix_mp->plane_fmt[0].bytesperline = - q_data->width * info->bytesperline_mult; + q_data->coded_width * info->bytesperline_mult; pix_mp->plane_fmt[0].sizeimage = q_data->sizeimage; pix_mp->colorspace = ctx->state.colorspace; pix_mp->xfer_func = ctx->state.xfer_func; @@ -528,8 +696,13 @@ static int vidioc_try_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f) pix = &f->fmt.pix; if (pix->pixelformat != V4L2_PIX_FMT_FWHT) info = find_fmt(pix->pixelformat); - pix->width = clamp(pix->width, MIN_WIDTH, MAX_WIDTH) & ~7; - pix->height = clamp(pix->height, MIN_HEIGHT, MAX_HEIGHT) & ~7; + + pix->width = clamp(pix->width, MIN_WIDTH, MAX_WIDTH); + pix->width = vic_round_dim(pix->width, info->width_div); + + pix->height = clamp(pix->height, MIN_HEIGHT, MAX_HEIGHT); + pix->height = vic_round_dim(pix->height, info->height_div); + pix->field = V4L2_FIELD_NONE; pix->bytesperline = pix->width * info->bytesperline_mult; @@ -545,9 +718,14 @@ static int vidioc_try_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f) if (pix_mp->pixelformat != V4L2_PIX_FMT_FWHT) info = find_fmt(pix_mp->pixelformat); pix_mp->num_planes = 1; - pix_mp->width = clamp(pix_mp->width, MIN_WIDTH, MAX_WIDTH) & ~7; - pix_mp->height = - clamp(pix_mp->height, MIN_HEIGHT, MAX_HEIGHT) & ~7; + + pix_mp->width = clamp(pix_mp->width, MIN_WIDTH, MAX_WIDTH); + pix_mp->width = vic_round_dim(pix_mp->width, info->width_div); + + pix_mp->height = clamp(pix_mp->height, MIN_HEIGHT, MAX_HEIGHT); + pix_mp->height = vic_round_dim(pix_mp->height, + info->height_div); + pix_mp->field = V4L2_FIELD_NONE; plane->bytesperline = pix_mp->width * info->bytesperline_mult; @@ -657,9 +835,10 @@ static int vidioc_s_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f) pix = &f->fmt.pix; if (ctx->is_enc && V4L2_TYPE_IS_OUTPUT(f->type)) fmt_changed = + !q_data->info || q_data->info->id != pix->pixelformat || - q_data->width != pix->width || - q_data->height != pix->height; + q_data->coded_width != pix->width || + q_data->coded_height != pix->height; if (vb2_is_busy(vq) && fmt_changed) return -EBUSY; @@ -668,8 +847,8 @@ static int vidioc_s_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f) q_data->info = &pixfmt_fwht; else q_data->info = find_fmt(pix->pixelformat); - q_data->width = pix->width; - q_data->height = pix->height; + q_data->coded_width = pix->width; + q_data->coded_height = pix->height; q_data->sizeimage = pix->sizeimage; break; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: @@ -677,9 +856,10 @@ static int vidioc_s_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f) pix_mp = &f->fmt.pix_mp; if (ctx->is_enc && V4L2_TYPE_IS_OUTPUT(f->type)) fmt_changed = + !q_data->info || q_data->info->id != pix_mp->pixelformat || - q_data->width != pix_mp->width || - q_data->height != pix_mp->height; + q_data->coded_width != pix_mp->width || + q_data->coded_height != pix_mp->height; if (vb2_is_busy(vq) && fmt_changed) return -EBUSY; @@ -688,17 +868,24 @@ static int vidioc_s_fmt(struct vicodec_ctx *ctx, struct v4l2_format *f) q_data->info = &pixfmt_fwht; else q_data->info = find_fmt(pix_mp->pixelformat); - q_data->width = pix_mp->width; - q_data->height = pix_mp->height; + q_data->coded_width = pix_mp->width; + q_data->coded_height = pix_mp->height; q_data->sizeimage = pix_mp->plane_fmt[0].sizeimage; break; default: return -EINVAL; } + if (q_data->visible_width > q_data->coded_width) + q_data->visible_width = q_data->coded_width; + if (q_data->visible_height > q_data->coded_height) + q_data->visible_height = q_data->coded_height; + dprintk(ctx->dev, - "Setting format for type %d, wxh: %dx%d, fourcc: %08x\n", - f->type, q_data->width, q_data->height, q_data->info->id); + "Setting format for type %d, coded wxh: %dx%d, visible wxh: %dx%d, fourcc: %08x\n", + f->type, q_data->coded_width, q_data->coded_height, + q_data->visible_width, q_data->visible_height, + q_data->info->id); return 0; } @@ -753,6 +940,84 @@ static int vidioc_s_fmt_vid_out(struct file *file, void *priv, return ret; } +static int vidioc_g_selection(struct file *file, void *priv, + struct v4l2_selection *s) +{ + struct vicodec_ctx *ctx = file2ctx(file); + struct vicodec_q_data *q_data; + enum v4l2_buf_type valid_cap_type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + enum v4l2_buf_type valid_out_type = V4L2_BUF_TYPE_VIDEO_OUTPUT; + + if (multiplanar) { + valid_cap_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; + valid_out_type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; + } + + if (s->type != valid_cap_type && s->type != valid_out_type) + return -EINVAL; + + q_data = get_q_data(ctx, s->type); + if (!q_data) + return -EINVAL; + /* + * encoder supports only cropping on the OUTPUT buffer + * decoder supports only composing on the CAPTURE buffer + */ + if ((ctx->is_enc && s->type == valid_out_type) || + (!ctx->is_enc && s->type == valid_cap_type)) { + switch (s->target) { + case V4L2_SEL_TGT_COMPOSE: + case V4L2_SEL_TGT_CROP: + s->r.left = 0; + s->r.top = 0; + s->r.width = q_data->visible_width; + s->r.height = q_data->visible_height; + return 0; + case V4L2_SEL_TGT_COMPOSE_DEFAULT: + case V4L2_SEL_TGT_COMPOSE_BOUNDS: + case V4L2_SEL_TGT_CROP_DEFAULT: + case V4L2_SEL_TGT_CROP_BOUNDS: + s->r.left = 0; + s->r.top = 0; + s->r.width = q_data->coded_width; + s->r.height = q_data->coded_height; + return 0; + } + } + return -EINVAL; +} + +static int vidioc_s_selection(struct file *file, void *priv, + struct v4l2_selection *s) +{ + struct vicodec_ctx *ctx = file2ctx(file); + struct vicodec_q_data *q_data; + enum v4l2_buf_type out_type = V4L2_BUF_TYPE_VIDEO_OUTPUT; + + if (multiplanar) + out_type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; + + if (s->type != out_type) + return -EINVAL; + + q_data = get_q_data(ctx, s->type); + if (!q_data) + return -EINVAL; + + if (!ctx->is_enc || s->target != V4L2_SEL_TGT_CROP) + return -EINVAL; + + s->r.left = 0; + s->r.top = 0; + q_data->visible_width = clamp(s->r.width, MIN_WIDTH, + q_data->coded_width); + s->r.width = q_data->visible_width; + q_data->visible_height = clamp(s->r.height, MIN_HEIGHT, + q_data->coded_height); + s->r.height = q_data->visible_height; + return 0; +} + static void vicodec_mark_last_buf(struct vicodec_ctx *ctx) { static const struct v4l2_event eos_event = { @@ -853,7 +1118,13 @@ static int vicodec_enum_framesizes(struct file *file, void *fh, static int vicodec_subscribe_event(struct v4l2_fh *fh, const struct v4l2_event_subscription *sub) { + struct vicodec_ctx *ctx = container_of(fh, struct vicodec_ctx, fh); + switch (sub->type) { + case V4L2_EVENT_SOURCE_CHANGE: + if (ctx->is_enc) + return -EINVAL; + /* fall through */ case V4L2_EVENT_EOS: return v4l2_event_subscribe(fh, sub, 0, NULL); default: @@ -895,6 +1166,9 @@ static const struct v4l2_ioctl_ops vicodec_ioctl_ops = { .vidioc_streamon = v4l2_m2m_ioctl_streamon, .vidioc_streamoff = v4l2_m2m_ioctl_streamoff, + .vidioc_g_selection = vidioc_g_selection, + .vidioc_s_selection = vidioc_s_selection, + .vidioc_try_encoder_cmd = vicodec_try_encoder_cmd, .vidioc_encoder_cmd = vicodec_encoder_cmd, .vidioc_try_decoder_cmd = vicodec_try_decoder_cmd, @@ -960,7 +1234,71 @@ static void vicodec_buf_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vicodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); + unsigned int sz = vb2_get_plane_payload(&vbuf->vb2_buf, 0); + u8 *p_src = vb2_plane_vaddr(&vbuf->vb2_buf, 0); + u8 *p = p_src; + struct vb2_queue *vq_out = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, + V4L2_BUF_TYPE_VIDEO_OUTPUT); + struct vb2_queue *vq_cap = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, + V4L2_BUF_TYPE_VIDEO_CAPTURE); + bool header_valid = false; + static const struct v4l2_event rs_event = { + .type = V4L2_EVENT_SOURCE_CHANGE, + .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION, + }; + /* buf_queue handles only the first source change event */ + if (ctx->first_source_change_sent) { + v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf); + return; + } + + /* + * if both queues are streaming, the source change event is + * handled in job_ready + */ + if (vb2_is_streaming(vq_cap) && vb2_is_streaming(vq_out)) { + v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf); + return; + } + + /* + * source change event is relevant only for the decoder + * in the compressed stream + */ + if (ctx->is_enc || !V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) { + v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf); + return; + } + + do { + enum vb2_buffer_state state = + get_next_header(ctx, &p, p_src + sz - p); + + if (ctx->header_size < sizeof(struct fwht_cframe_hdr)) { + v4l2_m2m_buf_done(vbuf, state); + return; + } + header_valid = is_header_valid(&ctx->state.header); + /* + * p points right after the end of the header in the + * buffer. If the header is invalid we set p to point + * to the next byte after the start of the header + */ + if (!header_valid) { + p = p - sizeof(struct fwht_cframe_hdr) + 1; + if (p < p_src) + p = p_src; + ctx->header_size = 0; + ctx->comp_magic_cnt = 0; + } + + } while (!header_valid); + + ctx->cur_buf_offset = p - p_src; + update_capture_data_from_header(ctx); + ctx->first_source_change_sent = true; + v4l2_event_queue_fh(&ctx->fh, &rs_event); v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf); } @@ -988,47 +1326,70 @@ static int vicodec_start_streaming(struct vb2_queue *q, struct vicodec_ctx *ctx = vb2_get_drv_priv(q); struct vicodec_q_data *q_data = get_q_data(ctx, q->type); struct v4l2_fwht_state *state = &ctx->state; - unsigned int size = q_data->width * q_data->height; const struct v4l2_fwht_pixfmt_info *info = q_data->info; - unsigned int chroma_div = info->width_div * info->height_div; + unsigned int size = q_data->coded_width * q_data->coded_height; + unsigned int chroma_div; unsigned int total_planes_size; + u8 *new_comp_frame; - /* - * we don't know ahead how many components are in the encoding type - * V4L2_PIX_FMT_FWHT, so we will allocate space for 4 planes. - */ - if (info->id == V4L2_PIX_FMT_FWHT || info->components_num == 4) + if (!info) + return -EINVAL; + + chroma_div = info->width_div * info->height_div; + q_data->sequence = 0; + + ctx->last_src_buf = NULL; + ctx->last_dst_buf = NULL; + state->gop_cnt = 0; + + if ((V4L2_TYPE_IS_OUTPUT(q->type) && !ctx->is_enc) || + (!V4L2_TYPE_IS_OUTPUT(q->type) && ctx->is_enc)) + return 0; + + if (info->id == V4L2_PIX_FMT_FWHT) { + vicodec_return_bufs(q, VB2_BUF_STATE_QUEUED); + return -EINVAL; + } + if (info->components_num == 4) total_planes_size = 2 * size + 2 * (size / chroma_div); else if (info->components_num == 3) total_planes_size = size + 2 * (size / chroma_div); else total_planes_size = size; - q_data->sequence = 0; + state->visible_width = q_data->visible_width; + state->visible_height = q_data->visible_height; + state->coded_width = q_data->coded_width; + state->coded_height = q_data->coded_height; + state->stride = q_data->coded_width * + info->bytesperline_mult; - if (!V4L2_TYPE_IS_OUTPUT(q->type)) { - if (!ctx->is_enc) { - state->width = q_data->width; - state->height = q_data->height; - } - return 0; - } - - if (ctx->is_enc) { - state->width = q_data->width; - state->height = q_data->height; - } - state->ref_frame.width = state->ref_frame.height = 0; state->ref_frame.luma = kvmalloc(total_planes_size, GFP_KERNEL); - ctx->comp_max_size = total_planes_size + sizeof(struct fwht_cframe_hdr); - state->compressed_frame = kvmalloc(ctx->comp_max_size, GFP_KERNEL); - if (!state->ref_frame.luma || !state->compressed_frame) { + ctx->comp_max_size = total_planes_size; + new_comp_frame = kvmalloc(ctx->comp_max_size, GFP_KERNEL); + + if (!state->ref_frame.luma || !new_comp_frame) { kvfree(state->ref_frame.luma); - kvfree(state->compressed_frame); + kvfree(new_comp_frame); vicodec_return_bufs(q, VB2_BUF_STATE_QUEUED); return -ENOMEM; } - if (info->id == V4L2_PIX_FMT_FWHT || info->components_num >= 3) { + /* + * if state->compressed_frame was already allocated then + * it contain data of the first frame of the new resolution + */ + if (state->compressed_frame) { + if (ctx->comp_size > ctx->comp_max_size) + ctx->comp_size = ctx->comp_max_size; + + memcpy(new_comp_frame, + state->compressed_frame, ctx->comp_size); + } + + kvfree(state->compressed_frame); + state->compressed_frame = new_comp_frame; + + if (info->components_num >= 3) { state->ref_frame.cb = state->ref_frame.luma + size; state->ref_frame.cr = state->ref_frame.cb + size / chroma_div; } else { @@ -1036,20 +1397,11 @@ static int vicodec_start_streaming(struct vb2_queue *q, state->ref_frame.cr = NULL; } - if (info->id == V4L2_PIX_FMT_FWHT || info->components_num == 4) + if (info->components_num == 4) state->ref_frame.alpha = state->ref_frame.cr + size / chroma_div; else state->ref_frame.alpha = NULL; - - ctx->last_src_buf = NULL; - ctx->last_dst_buf = NULL; - state->gop_cnt = 0; - ctx->cur_buf_offset = 0; - ctx->comp_size = 0; - ctx->comp_magic_cnt = 0; - ctx->comp_has_frame = false; - return 0; } @@ -1059,11 +1411,20 @@ static void vicodec_stop_streaming(struct vb2_queue *q) vicodec_return_bufs(q, VB2_BUF_STATE_ERROR); - if (!V4L2_TYPE_IS_OUTPUT(q->type)) - return; - - kvfree(ctx->state.ref_frame.luma); - kvfree(ctx->state.compressed_frame); + if ((!V4L2_TYPE_IS_OUTPUT(q->type) && !ctx->is_enc) || + (V4L2_TYPE_IS_OUTPUT(q->type) && ctx->is_enc)) { + kvfree(ctx->state.ref_frame.luma); + ctx->comp_max_size = 0; + ctx->source_changed = false; + } + if (V4L2_TYPE_IS_OUTPUT(q->type) && !ctx->is_enc) { + ctx->cur_buf_offset = 0; + ctx->comp_size = 0; + ctx->header_size = 0; + ctx->comp_magic_cnt = 0; + ctx->comp_has_frame = 0; + ctx->comp_has_next_frame = 0; + } } static const struct vb2_ops vicodec_qops = { @@ -1204,8 +1565,10 @@ static int vicodec_open(struct file *file) ctx->q_data[V4L2_M2M_SRC].info = ctx->is_enc ? v4l2_fwht_get_pixfmt(0) : &pixfmt_fwht; - ctx->q_data[V4L2_M2M_SRC].width = 1280; - ctx->q_data[V4L2_M2M_SRC].height = 720; + ctx->q_data[V4L2_M2M_SRC].coded_width = 1280; + ctx->q_data[V4L2_M2M_SRC].coded_height = 720; + ctx->q_data[V4L2_M2M_SRC].visible_width = 1280; + ctx->q_data[V4L2_M2M_SRC].visible_height = 720; size = 1280 * 720 * ctx->q_data[V4L2_M2M_SRC].info->sizeimage_mult / ctx->q_data[V4L2_M2M_SRC].info->sizeimage_div; if (ctx->is_enc) @@ -1213,16 +1576,17 @@ static int vicodec_open(struct file *file) else ctx->q_data[V4L2_M2M_SRC].sizeimage = size + sizeof(struct fwht_cframe_hdr); - ctx->q_data[V4L2_M2M_DST] = ctx->q_data[V4L2_M2M_SRC]; - ctx->q_data[V4L2_M2M_DST].info = - ctx->is_enc ? &pixfmt_fwht : v4l2_fwht_get_pixfmt(0); - size = 1280 * 720 * ctx->q_data[V4L2_M2M_DST].info->sizeimage_mult / - ctx->q_data[V4L2_M2M_DST].info->sizeimage_div; - if (ctx->is_enc) - ctx->q_data[V4L2_M2M_DST].sizeimage = - size + sizeof(struct fwht_cframe_hdr); - else - ctx->q_data[V4L2_M2M_DST].sizeimage = size; + if (ctx->is_enc) { + ctx->q_data[V4L2_M2M_DST] = ctx->q_data[V4L2_M2M_SRC]; + ctx->q_data[V4L2_M2M_DST].info = &pixfmt_fwht; + ctx->q_data[V4L2_M2M_DST].sizeimage = 1280 * 720 * + ctx->q_data[V4L2_M2M_DST].info->sizeimage_mult / + ctx->q_data[V4L2_M2M_DST].info->sizeimage_div + + sizeof(struct fwht_cframe_hdr); + } else { + ctx->q_data[V4L2_M2M_DST].info = NULL; + } + ctx->state.colorspace = V4L2_COLORSPACE_REC709; if (ctx->is_enc) { @@ -1310,6 +1674,8 @@ static int vicodec_probe(struct platform_device *pdev) #ifdef CONFIG_MEDIA_CONTROLLER dev->mdev.dev = &pdev->dev; strscpy(dev->mdev.model, "vicodec", sizeof(dev->mdev.model)); + strscpy(dev->mdev.bus_info, "platform:vicodec", + sizeof(dev->mdev.bus_info)); media_device_init(&dev->mdev); dev->v4l2_dev.mdev = &dev->mdev; #endif diff --git a/drivers/media/platform/video-mux.c b/drivers/media/platform/video-mux.c index c33900e3c23e..0ba30756e1e4 100644 --- a/drivers/media/platform/video-mux.c +++ b/drivers/media/platform/video-mux.c @@ -263,6 +263,26 @@ static int video_mux_set_format(struct v4l2_subdev *sd, case MEDIA_BUS_FMT_UYYVYY16_0_5X48: case MEDIA_BUS_FMT_JPEG_1X8: case MEDIA_BUS_FMT_AHSV8888_1X32: + case MEDIA_BUS_FMT_SBGGR8_1X8: + case MEDIA_BUS_FMT_SGBRG8_1X8: + case MEDIA_BUS_FMT_SGRBG8_1X8: + case MEDIA_BUS_FMT_SRGGB8_1X8: + case MEDIA_BUS_FMT_SBGGR10_1X10: + case MEDIA_BUS_FMT_SGBRG10_1X10: + case MEDIA_BUS_FMT_SGRBG10_1X10: + case MEDIA_BUS_FMT_SRGGB10_1X10: + case MEDIA_BUS_FMT_SBGGR12_1X12: + case MEDIA_BUS_FMT_SGBRG12_1X12: + case MEDIA_BUS_FMT_SGRBG12_1X12: + case MEDIA_BUS_FMT_SRGGB12_1X12: + case MEDIA_BUS_FMT_SBGGR14_1X14: + case MEDIA_BUS_FMT_SGBRG14_1X14: + case MEDIA_BUS_FMT_SGRBG14_1X14: + case MEDIA_BUS_FMT_SRGGB14_1X14: + case MEDIA_BUS_FMT_SBGGR16_1X16: + case MEDIA_BUS_FMT_SGBRG16_1X16: + case MEDIA_BUS_FMT_SGRBG16_1X16: + case MEDIA_BUS_FMT_SRGGB16_1X16: break; default: sdformat->format.code = MEDIA_BUS_FMT_Y8_1X8; diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c index 89d9c4c21037..34dcaca45d8b 100644 --- a/drivers/media/platform/vim2m.c +++ b/drivers/media/platform/vim2m.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * A virtual v4l2-mem2mem example device. * @@ -34,22 +35,34 @@ MODULE_DESCRIPTION("Virtual device for mem2mem framework testing"); MODULE_AUTHOR("Pawel Osciak, "); MODULE_LICENSE("GPL"); -MODULE_VERSION("0.1.1"); +MODULE_VERSION("0.2"); MODULE_ALIAS("mem2mem_testdev"); -static unsigned debug; +static unsigned int debug; module_param(debug, uint, 0644); -MODULE_PARM_DESC(debug, "activates debug info"); +MODULE_PARM_DESC(debug, "debug level"); + +/* Default transaction time in msec */ +static unsigned int default_transtime = 40; /* Max 25 fps */ +module_param(default_transtime, uint, 0644); +MODULE_PARM_DESC(default_transtime, "default transaction time in ms"); #define MIN_W 32 #define MIN_H 32 #define MAX_W 640 #define MAX_H 480 -#define DIM_ALIGN_MASK 7 /* 8-byte alignment for line length */ + +/* Pixel alignment for non-bayer formats */ +#define WIDTH_ALIGN 2 +#define HEIGHT_ALIGN 1 + +/* Pixel alignment for bayer formats */ +#define BAYER_WIDTH_ALIGN 2 +#define BAYER_HEIGHT_ALIGN 2 /* Flags that indicate a format can be used for capture/output */ -#define MEM2MEM_CAPTURE (1 << 0) -#define MEM2MEM_OUTPUT (1 << 1) +#define MEM2MEM_CAPTURE BIT(0) +#define MEM2MEM_OUTPUT BIT(1) #define MEM2MEM_NAME "vim2m" @@ -58,18 +71,12 @@ MODULE_PARM_DESC(debug, "activates debug info"); /* In bytes, per queue */ #define MEM2MEM_VID_MEM_LIMIT (16 * 1024 * 1024) -/* Default transaction time in msec */ -#define MEM2MEM_DEF_TRANSTIME 40 -#define MEM2MEM_COLOR_STEP (0xff >> 4) -#define MEM2MEM_NUM_TILES 8 - /* Flags that indicate processing mode */ -#define MEM2MEM_HFLIP (1 << 0) -#define MEM2MEM_VFLIP (1 << 1) - -#define dprintk(dev, fmt, arg...) \ - v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg) +#define MEM2MEM_HFLIP BIT(0) +#define MEM2MEM_VFLIP BIT(1) +#define dprintk(dev, lvl, fmt, arg...) \ + v4l2_dbg(lvl, debug, &(dev)->v4l2_dev, "%s: " fmt, __func__, ## arg) static void vim2m_dev_release(struct device *dev) {} @@ -83,21 +90,46 @@ struct vim2m_fmt { u32 fourcc; int depth; /* Types the format can be used for */ - u32 types; + u32 types; }; static struct vim2m_fmt formats[] = { { - .fourcc = V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */ + .fourcc = V4L2_PIX_FMT_RGB565, /* rrrrrggg gggbbbbb */ .depth = 16, - /* Both capture and output format */ - .types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT, - }, - { + .types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT, + }, { + .fourcc = V4L2_PIX_FMT_RGB565X, /* gggbbbbb rrrrrggg */ + .depth = 16, + .types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT, + }, { + .fourcc = V4L2_PIX_FMT_RGB24, + .depth = 24, + .types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT, + }, { + .fourcc = V4L2_PIX_FMT_BGR24, + .depth = 24, + .types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT, + }, { .fourcc = V4L2_PIX_FMT_YUYV, .depth = 16, - /* Output-only format */ - .types = MEM2MEM_OUTPUT, + .types = MEM2MEM_CAPTURE, + }, { + .fourcc = V4L2_PIX_FMT_SBGGR8, + .depth = 8, + .types = MEM2MEM_CAPTURE, + }, { + .fourcc = V4L2_PIX_FMT_SGBRG8, + .depth = 8, + .types = MEM2MEM_CAPTURE, + }, { + .fourcc = V4L2_PIX_FMT_SGRBG8, + .depth = 8, + .types = MEM2MEM_CAPTURE, + }, { + .fourcc = V4L2_PIX_FMT_SRGGB8, + .depth = 8, + .types = MEM2MEM_CAPTURE, }, }; @@ -120,14 +152,14 @@ enum { #define V4L2_CID_TRANS_TIME_MSEC (V4L2_CID_USER_BASE + 0x1000) #define V4L2_CID_TRANS_NUM_BUFS (V4L2_CID_USER_BASE + 0x1001) -static struct vim2m_fmt *find_format(struct v4l2_format *f) +static struct vim2m_fmt *find_format(u32 fourcc) { struct vim2m_fmt *fmt; unsigned int k; for (k = 0; k < NUM_FORMATS; k++) { fmt = &formats[k]; - if (fmt->fourcc == f->fmt.pix.pixelformat) + if (fmt->fourcc == fourcc) break; } @@ -137,6 +169,24 @@ static struct vim2m_fmt *find_format(struct v4l2_format *f) return &formats[k]; } +static void get_alignment(u32 fourcc, + unsigned int *walign, unsigned int *halign) +{ + switch (fourcc) { + case V4L2_PIX_FMT_SBGGR8: + case V4L2_PIX_FMT_SGBRG8: + case V4L2_PIX_FMT_SGRBG8: + case V4L2_PIX_FMT_SRGGB8: + *walign = BAYER_WIDTH_ALIGN; + *halign = BAYER_HEIGHT_ALIGN; + return; + default: + *walign = WIDTH_ALIGN; + *halign = HEIGHT_ALIGN; + return; + } +} + struct vim2m_dev { struct v4l2_device v4l2_dev; struct video_device vfd; @@ -146,9 +196,6 @@ struct vim2m_dev { atomic_t num_inst; struct mutex dev_mutex; - spinlock_t irqlock; - - struct delayed_work work_run; struct v4l2_m2m_dev *m2m_dev; }; @@ -167,6 +214,10 @@ struct vim2m_ctx { /* Transaction time (i.e. simulated processing time) in milliseconds */ u32 transtime; + struct mutex vb_mutex; + struct delayed_work work_run; + spinlock_t irqlock; + /* Abort requested by m2m */ int aborting; @@ -188,7 +239,7 @@ static inline struct vim2m_ctx *file2ctx(struct file *file) } static struct vim2m_q_data *get_q_data(struct vim2m_ctx *ctx, - enum v4l2_buf_type type) + enum v4l2_buf_type type) { switch (type) { case V4L2_BUF_TYPE_VIDEO_OUTPUT: @@ -196,28 +247,221 @@ static struct vim2m_q_data *get_q_data(struct vim2m_ctx *ctx, case V4L2_BUF_TYPE_VIDEO_CAPTURE: return &ctx->q_data[V4L2_M2M_DST]; default: - BUG(); + return NULL; + } +} + +static const char *type_name(enum v4l2_buf_type type) +{ + switch (type) { + case V4L2_BUF_TYPE_VIDEO_OUTPUT: + return "Output"; + case V4L2_BUF_TYPE_VIDEO_CAPTURE: + return "Capture"; + default: + return "Invalid"; + } +} + +#define CLIP(__color) \ + (u8)(((__color) > 0xff) ? 0xff : (((__color) < 0) ? 0 : (__color))) + +static void copy_line(struct vim2m_q_data *q_data_out, + u8 *src, u8 *dst, bool reverse) +{ + int x, depth = q_data_out->fmt->depth >> 3; + + if (!reverse) { + memcpy(dst, src, q_data_out->width * depth); + } else { + for (x = 0; x < q_data_out->width >> 1; x++) { + memcpy(dst, src, depth); + memcpy(dst + depth, src - depth, depth); + src -= depth << 1; + dst += depth << 1; + } + return; } - return NULL; } +static void copy_two_pixels(struct vim2m_q_data *q_data_in, + struct vim2m_q_data *q_data_out, + u8 *src[2], u8 **dst, int ypos, bool reverse) +{ + struct vim2m_fmt *out = q_data_out->fmt; + struct vim2m_fmt *in = q_data_in->fmt; + u8 _r[2], _g[2], _b[2], *r, *g, *b; + int i; + + /* Step 1: read two consecutive pixels from src pointer */ + + r = _r; + g = _g; + b = _b; + + switch (in->fourcc) { + case V4L2_PIX_FMT_RGB565: /* rrrrrggg gggbbbbb */ + for (i = 0; i < 2; i++) { + u16 pix = *(u16 *)(src[i]); + + *r++ = (u8)(((pix & 0xf800) >> 11) << 3) | 0x07; + *g++ = (u8)((((pix & 0x07e0) >> 5)) << 2) | 0x03; + *b++ = (u8)((pix & 0x1f) << 3) | 0x07; + } + break; + case V4L2_PIX_FMT_RGB565X: /* gggbbbbb rrrrrggg */ + for (i = 0; i < 2; i++) { + u16 pix = *(u16 *)(src[i]); + + *r++ = (u8)(((0x00f8 & pix) >> 3) << 3) | 0x07; + *g++ = (u8)(((pix & 0x7) << 2) | + ((pix & 0xe000) >> 5)) | 0x03; + *b++ = (u8)(((pix & 0x1f00) >> 8) << 3) | 0x07; + } + break; + default: + case V4L2_PIX_FMT_RGB24: + for (i = 0; i < 2; i++) { + *r++ = src[i][0]; + *g++ = src[i][1]; + *b++ = src[i][2]; + } + break; + case V4L2_PIX_FMT_BGR24: + for (i = 0; i < 2; i++) { + *b++ = src[i][0]; + *g++ = src[i][1]; + *r++ = src[i][2]; + } + break; + } + + /* Step 2: store two consecutive points, reversing them if needed */ + + r = _r; + g = _g; + b = _b; + + switch (out->fourcc) { + case V4L2_PIX_FMT_RGB565: /* rrrrrggg gggbbbbb */ + for (i = 0; i < 2; i++) { + u16 *pix = (u16 *)*dst; + + *pix = ((*r << 8) & 0xf800) | ((*g << 3) & 0x07e0) | + (*b >> 3); + + *dst += 2; + } + return; + case V4L2_PIX_FMT_RGB565X: /* gggbbbbb rrrrrggg */ + for (i = 0; i < 2; i++) { + u16 *pix = (u16 *)*dst; + u8 green = *g++ >> 2; + + *pix = ((green << 8) & 0xe000) | (green & 0x07) | + ((*b++ << 5) & 0x1f00) | ((*r++ & 0xf8)); + + *dst += 2; + } + return; + case V4L2_PIX_FMT_RGB24: + for (i = 0; i < 2; i++) { + *(*dst)++ = *r++; + *(*dst)++ = *g++; + *(*dst)++ = *b++; + } + return; + case V4L2_PIX_FMT_BGR24: + for (i = 0; i < 2; i++) { + *(*dst)++ = *b++; + *(*dst)++ = *g++; + *(*dst)++ = *r++; + } + return; + case V4L2_PIX_FMT_YUYV: + default: + { + u8 y, y1, u, v; + + y = ((8453 * (*r) + 16594 * (*g) + 3223 * (*b) + + 524288) >> 15); + u = ((-4878 * (*r) - 9578 * (*g) + 14456 * (*b) + + 4210688) >> 15); + v = ((14456 * (*r++) - 12105 * (*g++) - 2351 * (*b++) + + 4210688) >> 15); + y1 = ((8453 * (*r) + 16594 * (*g) + 3223 * (*b) + + 524288) >> 15); + + *(*dst)++ = y; + *(*dst)++ = u; + + *(*dst)++ = y1; + *(*dst)++ = v; + return; + } + case V4L2_PIX_FMT_SBGGR8: + if (!(ypos & 1)) { + *(*dst)++ = *b; + *(*dst)++ = *++g; + } else { + *(*dst)++ = *g; + *(*dst)++ = *++r; + } + return; + case V4L2_PIX_FMT_SGBRG8: + if (!(ypos & 1)) { + *(*dst)++ = *g; + *(*dst)++ = *++b; + } else { + *(*dst)++ = *r; + *(*dst)++ = *++g; + } + return; + case V4L2_PIX_FMT_SGRBG8: + if (!(ypos & 1)) { + *(*dst)++ = *g; + *(*dst)++ = *++r; + } else { + *(*dst)++ = *b; + *(*dst)++ = *++g; + } + return; + case V4L2_PIX_FMT_SRGGB8: + if (!(ypos & 1)) { + *(*dst)++ = *r; + *(*dst)++ = *++g; + } else { + *(*dst)++ = *g; + *(*dst)++ = *++b; + } + return; + } +} static int device_process(struct vim2m_ctx *ctx, struct vb2_v4l2_buffer *in_vb, struct vb2_v4l2_buffer *out_vb) { struct vim2m_dev *dev = ctx->dev; - struct vim2m_q_data *q_data; - u8 *p_in, *p_out; - int x, y, t, w; - int tile_w, bytes_left; - int width, height, bytesperline; + struct vim2m_q_data *q_data_in, *q_data_out; + u8 *p_in, *p_line, *p_in_x[2], *p, *p_out; + unsigned int width, height, bytesperline, bytes_per_pixel; + unsigned int x, y, y_in, y_out, x_int, x_fract, x_err, x_offset; + int start, end, step; + + q_data_in = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); + if (!q_data_in) + return 0; + bytesperline = (q_data_in->width * q_data_in->fmt->depth) >> 3; + bytes_per_pixel = q_data_in->fmt->depth >> 3; - q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); + q_data_out = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); + if (!q_data_out) + return 0; - width = q_data->width; - height = q_data->height; - bytesperline = (q_data->width * q_data->fmt->depth) >> 3; + /* As we're doing scaling, use the output dimensions here */ + height = q_data_out->height; + width = q_data_out->width; p_in = vb2_plane_vaddr(&in_vb->vb2_buf, 0); p_out = vb2_plane_vaddr(&out_vb->vb2_buf, 0); @@ -227,109 +471,86 @@ static int device_process(struct vim2m_ctx *ctx, return -EFAULT; } - if (vb2_plane_size(&in_vb->vb2_buf, 0) > - vb2_plane_size(&out_vb->vb2_buf, 0)) { - v4l2_err(&dev->v4l2_dev, "Output buffer is too small\n"); - return -EINVAL; - } + out_vb->sequence = q_data_out->sequence++; + in_vb->sequence = q_data_in->sequence++; + v4l2_m2m_buf_copy_metadata(in_vb, out_vb, true); - tile_w = (width * (q_data[V4L2_M2M_DST].fmt->depth >> 3)) - / MEM2MEM_NUM_TILES; - bytes_left = bytesperline - tile_w * MEM2MEM_NUM_TILES; - w = 0; - - out_vb->sequence = - get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE)->sequence++; - in_vb->sequence = q_data->sequence++; - out_vb->vb2_buf.timestamp = in_vb->vb2_buf.timestamp; - - if (in_vb->flags & V4L2_BUF_FLAG_TIMECODE) - out_vb->timecode = in_vb->timecode; - out_vb->field = in_vb->field; - out_vb->flags = in_vb->flags & - (V4L2_BUF_FLAG_TIMECODE | - V4L2_BUF_FLAG_KEYFRAME | - V4L2_BUF_FLAG_PFRAME | - V4L2_BUF_FLAG_BFRAME | - V4L2_BUF_FLAG_TSTAMP_SRC_MASK); - - switch (ctx->mode) { - case MEM2MEM_HFLIP | MEM2MEM_VFLIP: - p_out += bytesperline * height - bytes_left; - for (y = 0; y < height; ++y) { - for (t = 0; t < MEM2MEM_NUM_TILES; ++t) { - if (w & 0x1) { - for (x = 0; x < tile_w; ++x) - *--p_out = *p_in++ + - MEM2MEM_COLOR_STEP; - } else { - for (x = 0; x < tile_w; ++x) - *--p_out = *p_in++ - - MEM2MEM_COLOR_STEP; - } - ++w; - } - p_in += bytes_left; - p_out -= bytes_left; - } - break; + if (ctx->mode & MEM2MEM_VFLIP) { + start = height - 1; + end = -1; + step = -1; + } else { + start = 0; + end = height; + step = 1; + } + y_out = 0; + + /* + * When format and resolution are identical, + * we can use a faster copy logic + */ + if (q_data_in->fmt->fourcc == q_data_out->fmt->fourcc && + q_data_in->width == q_data_out->width && + q_data_in->height == q_data_out->height) { + for (y = start; y != end; y += step, y_out++) { + p = p_in + (y * bytesperline); + if (ctx->mode & MEM2MEM_HFLIP) + p += bytesperline - (q_data_in->fmt->depth >> 3); + + copy_line(q_data_out, p, p_out, + ctx->mode & MEM2MEM_HFLIP); - case MEM2MEM_HFLIP: - for (y = 0; y < height; ++y) { - p_out += MEM2MEM_NUM_TILES * tile_w; - for (t = 0; t < MEM2MEM_NUM_TILES; ++t) { - if (w & 0x01) { - for (x = 0; x < tile_w; ++x) - *--p_out = *p_in++ + - MEM2MEM_COLOR_STEP; - } else { - for (x = 0; x < tile_w; ++x) - *--p_out = *p_in++ - - MEM2MEM_COLOR_STEP; - } - ++w; - } - p_in += bytes_left; p_out += bytesperline; } - break; + return 0; + } + + /* Slower algorithm with format conversion, hflip, vflip and scaler */ + + /* To speed scaler up, use Bresenham for X dimension */ + x_int = q_data_in->width / q_data_out->width; + x_fract = q_data_in->width % q_data_out->width; + + for (y = start; y != end; y += step, y_out++) { + y_in = (y * q_data_in->height) / q_data_out->height; + x_offset = 0; + x_err = 0; - case MEM2MEM_VFLIP: - p_out += bytesperline * (height - 1); - for (y = 0; y < height; ++y) { - for (t = 0; t < MEM2MEM_NUM_TILES; ++t) { - if (w & 0x1) { - for (x = 0; x < tile_w; ++x) - *p_out++ = *p_in++ + - MEM2MEM_COLOR_STEP; - } else { - for (x = 0; x < tile_w; ++x) - *p_out++ = *p_in++ - - MEM2MEM_COLOR_STEP; - } - ++w; + p_line = p_in + (y_in * bytesperline); + if (ctx->mode & MEM2MEM_HFLIP) + p_line += bytesperline - (q_data_in->fmt->depth >> 3); + p_in_x[0] = p_line; + + for (x = 0; x < width >> 1; x++) { + x_offset += x_int; + x_err += x_fract; + if (x_err > width) { + x_offset++; + x_err -= width; } - p_in += bytes_left; - p_out += bytes_left - 2 * bytesperline; - } - break; - default: - for (y = 0; y < height; ++y) { - for (t = 0; t < MEM2MEM_NUM_TILES; ++t) { - if (w & 0x1) { - for (x = 0; x < tile_w; ++x) - *p_out++ = *p_in++ + - MEM2MEM_COLOR_STEP; - } else { - for (x = 0; x < tile_w; ++x) - *p_out++ = *p_in++ - - MEM2MEM_COLOR_STEP; - } - ++w; + if (ctx->mode & MEM2MEM_HFLIP) + p_in_x[1] = p_line - x_offset * bytes_per_pixel; + else + p_in_x[1] = p_line + x_offset * bytes_per_pixel; + + copy_two_pixels(q_data_in, q_data_out, + p_in_x, &p_out, y_out, + ctx->mode & MEM2MEM_HFLIP); + + /* Calculate the next p_in_x0 */ + x_offset += x_int; + x_err += x_fract; + if (x_err > width) { + x_offset++; + x_err -= width; } - p_in += bytes_left; - p_out += bytes_left; + + if (ctx->mode & MEM2MEM_HFLIP) + p_in_x[0] = p_line - x_offset * bytes_per_pixel; + else + p_in_x[0] = p_line + x_offset * bytes_per_pixel; } } @@ -349,7 +570,7 @@ static int job_ready(void *priv) if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) < ctx->translen || v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) < ctx->translen) { - dprintk(ctx->dev, "Not enough buffers available\n"); + dprintk(ctx->dev, 1, "Not enough buffers available\n"); return 0; } @@ -373,7 +594,6 @@ static void job_abort(void *priv) static void device_run(void *priv) { struct vim2m_ctx *ctx = priv; - struct vim2m_dev *dev = ctx->dev; struct vb2_v4l2_buffer *src_buf, *dst_buf; src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); @@ -390,37 +610,38 @@ static void device_run(void *priv) &ctx->hdl); /* Run delayed work, which simulates a hardware irq */ - schedule_delayed_work(&dev->work_run, msecs_to_jiffies(ctx->transtime)); + schedule_delayed_work(&ctx->work_run, msecs_to_jiffies(ctx->transtime)); } static void device_work(struct work_struct *w) { - struct vim2m_dev *vim2m_dev = - container_of(w, struct vim2m_dev, work_run.work); struct vim2m_ctx *curr_ctx; + struct vim2m_dev *vim2m_dev; struct vb2_v4l2_buffer *src_vb, *dst_vb; unsigned long flags; - curr_ctx = v4l2_m2m_get_curr_priv(vim2m_dev->m2m_dev); + curr_ctx = container_of(w, struct vim2m_ctx, work_run.work); - if (NULL == curr_ctx) { + if (!curr_ctx) { pr_err("Instance released before the end of transaction\n"); return; } + vim2m_dev = curr_ctx->dev; + src_vb = v4l2_m2m_src_buf_remove(curr_ctx->fh.m2m_ctx); dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->fh.m2m_ctx); curr_ctx->num_processed++; - spin_lock_irqsave(&vim2m_dev->irqlock, flags); + spin_lock_irqsave(&curr_ctx->irqlock, flags); v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE); v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE); - spin_unlock_irqrestore(&vim2m_dev->irqlock, flags); + spin_unlock_irqrestore(&curr_ctx->irqlock, flags); if (curr_ctx->num_processed == curr_ctx->translen || curr_ctx->aborting) { - dprintk(curr_ctx->dev, "Finishing transaction\n"); + dprintk(curr_ctx->dev, 2, "Finishing capture buffer fill\n"); curr_ctx->num_processed = 0; v4l2_m2m_job_finish(vim2m_dev->m2m_dev, curr_ctx->fh.m2m_ctx); } else { @@ -437,7 +658,7 @@ static int vidioc_querycap(struct file *file, void *priv, strncpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver) - 1); strncpy(cap->card, MEM2MEM_NAME, sizeof(cap->card) - 1); snprintf(cap->bus_info, sizeof(cap->bus_info), - "platform:%s", MEM2MEM_NAME); + "platform:%s", MEM2MEM_NAME); return 0; } @@ -453,8 +674,10 @@ static int enum_fmt(struct v4l2_fmtdesc *f, u32 type) /* index-th format of type type found ? */ if (num == f->index) break; - /* Correct type but haven't reached our index yet, - * just increment per-type index */ + /* + * Correct type but haven't reached our index yet, + * just increment per-type index + */ ++num; } } @@ -482,6 +705,27 @@ static int vidioc_enum_fmt_vid_out(struct file *file, void *priv, return enum_fmt(f, MEM2MEM_OUTPUT); } +static int vidioc_enum_framesizes(struct file *file, void *priv, + struct v4l2_frmsizeenum *fsize) +{ + if (fsize->index != 0) + return -EINVAL; + + if (!find_format(fsize->pixel_format)) + return -EINVAL; + + fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; + fsize->stepwise.min_width = MIN_W; + fsize->stepwise.min_height = MIN_H; + fsize->stepwise.max_width = MAX_W; + fsize->stepwise.max_height = MAX_H; + + get_alignment(fsize->pixel_format, + &fsize->stepwise.step_width, + &fsize->stepwise.step_height); + return 0; +} + static int vidioc_g_fmt(struct vim2m_ctx *ctx, struct v4l2_format *f) { struct vb2_queue *vq; @@ -492,6 +736,8 @@ static int vidioc_g_fmt(struct vim2m_ctx *ctx, struct v4l2_format *f) return -EINVAL; q_data = get_q_data(ctx, f->type); + if (!q_data) + return -EINVAL; f->fmt.pix.width = q_data->width; f->fmt.pix.height = q_data->height; @@ -521,8 +767,11 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv, static int vidioc_try_fmt(struct v4l2_format *f, struct vim2m_fmt *fmt) { - /* V4L2 specification suggests the driver corrects the format struct - * if any of the dimensions is unsupported */ + int walign, halign; + /* + * V4L2 specification specifies the driver corrects the + * format struct if any of the dimensions is unsupported + */ if (f->fmt.pix.height < MIN_H) f->fmt.pix.height = MIN_H; else if (f->fmt.pix.height > MAX_H) @@ -533,7 +782,9 @@ static int vidioc_try_fmt(struct v4l2_format *f, struct vim2m_fmt *fmt) else if (f->fmt.pix.width > MAX_W) f->fmt.pix.width = MAX_W; - f->fmt.pix.width &= ~DIM_ALIGN_MASK; + get_alignment(f->fmt.pix.pixelformat, &walign, &halign); + f->fmt.pix.width &= ~(walign - 1); + f->fmt.pix.height &= ~(halign - 1); f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3; f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline; f->fmt.pix.field = V4L2_FIELD_NONE; @@ -547,10 +798,10 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct vim2m_fmt *fmt; struct vim2m_ctx *ctx = file2ctx(file); - fmt = find_format(f); + fmt = find_format(f->fmt.pix.pixelformat); if (!fmt) { f->fmt.pix.pixelformat = formats[0].fourcc; - fmt = find_format(f); + fmt = find_format(f->fmt.pix.pixelformat); } if (!(fmt->types & MEM2MEM_CAPTURE)) { v4l2_err(&ctx->dev->v4l2_dev, @@ -572,10 +823,10 @@ static int vidioc_try_fmt_vid_out(struct file *file, void *priv, struct vim2m_fmt *fmt; struct vim2m_ctx *ctx = file2ctx(file); - fmt = find_format(f); + fmt = find_format(f->fmt.pix.pixelformat); if (!fmt) { f->fmt.pix.pixelformat = formats[0].fourcc; - fmt = find_format(f); + fmt = find_format(f->fmt.pix.pixelformat); } if (!(fmt->types & MEM2MEM_OUTPUT)) { v4l2_err(&ctx->dev->v4l2_dev, @@ -607,15 +858,20 @@ static int vidioc_s_fmt(struct vim2m_ctx *ctx, struct v4l2_format *f) return -EBUSY; } - q_data->fmt = find_format(f); + q_data->fmt = find_format(f->fmt.pix.pixelformat); q_data->width = f->fmt.pix.width; q_data->height = f->fmt.pix.height; q_data->sizeimage = q_data->width * q_data->height * q_data->fmt->depth >> 3; - dprintk(ctx->dev, - "Setting format for type %d, wxh: %dx%d, fmt: %d\n", - f->type, q_data->width, q_data->height, q_data->fmt->fourcc); + dprintk(ctx->dev, 1, + "Format for type %s: %dx%d (%d bpp), fmt: %c%c%c%c\n", + type_name(f->type), q_data->width, q_data->height, + q_data->fmt->depth, + (q_data->fmt->fourcc & 0xff), + (q_data->fmt->fourcc >> 8) & 0xff, + (q_data->fmt->fourcc >> 16) & 0xff, + (q_data->fmt->fourcc >> 24) & 0xff); return 0; } @@ -674,6 +930,8 @@ static int vim2m_s_ctrl(struct v4l2_ctrl *ctrl) case V4L2_CID_TRANS_TIME_MSEC: ctx->transtime = ctrl->val; + if (ctx->transtime < 1) + ctx->transtime = 1; break; case V4L2_CID_TRANS_NUM_BUFS: @@ -692,11 +950,11 @@ static const struct v4l2_ctrl_ops vim2m_ctrl_ops = { .s_ctrl = vim2m_s_ctrl, }; - static const struct v4l2_ioctl_ops vim2m_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, + .vidioc_enum_framesizes = vidioc_enum_framesizes, .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap, .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap, .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap, @@ -721,20 +979,23 @@ static const struct v4l2_ioctl_ops vim2m_ioctl_ops = { .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; - /* * Queue operations */ static int vim2m_queue_setup(struct vb2_queue *vq, - unsigned int *nbuffers, unsigned int *nplanes, - unsigned int sizes[], struct device *alloc_devs[]) + unsigned int *nbuffers, + unsigned int *nplanes, + unsigned int sizes[], + struct device *alloc_devs[]) { struct vim2m_ctx *ctx = vb2_get_drv_priv(vq); struct vim2m_q_data *q_data; unsigned int size, count = *nbuffers; q_data = get_q_data(ctx, vq->type); + if (!q_data) + return -EINVAL; size = q_data->width * q_data->height * q_data->fmt->depth >> 3; @@ -748,33 +1009,42 @@ static int vim2m_queue_setup(struct vb2_queue *vq, *nplanes = 1; sizes[0] = size; - dprintk(ctx->dev, "get %d buffer(s) of size %d each.\n", count, size); + dprintk(ctx->dev, 1, "%s: get %d buffer(s) of size %d each.\n", + type_name(vq->type), count, size); return 0; } -static int vim2m_buf_prepare(struct vb2_buffer *vb) +static int vim2m_buf_out_validate(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vim2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); + + if (vbuf->field == V4L2_FIELD_ANY) + vbuf->field = V4L2_FIELD_NONE; + if (vbuf->field != V4L2_FIELD_NONE) { + dprintk(ctx->dev, 1, "%s field isn't supported\n", __func__); + return -EINVAL; + } + + return 0; +} + +static int vim2m_buf_prepare(struct vb2_buffer *vb) +{ + struct vim2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); struct vim2m_q_data *q_data; - dprintk(ctx->dev, "type: %d\n", vb->vb2_queue->type); + dprintk(ctx->dev, 2, "type: %s\n", type_name(vb->vb2_queue->type)); q_data = get_q_data(ctx, vb->vb2_queue->type); - if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) { - if (vbuf->field == V4L2_FIELD_ANY) - vbuf->field = V4L2_FIELD_NONE; - if (vbuf->field != V4L2_FIELD_NONE) { - dprintk(ctx->dev, "%s field isn't supported\n", - __func__); - return -EINVAL; - } - } - + if (!q_data) + return -EINVAL; if (vb2_plane_size(vb, 0) < q_data->sizeimage) { - dprintk(ctx->dev, "%s data will not fit into plane (%lu < %lu)\n", - __func__, vb2_plane_size(vb, 0), (long)q_data->sizeimage); + dprintk(ctx->dev, 1, + "%s data will not fit into plane (%lu < %lu)\n", + __func__, vb2_plane_size(vb, 0), + (long)q_data->sizeimage); return -EINVAL; } @@ -791,11 +1061,14 @@ static void vim2m_buf_queue(struct vb2_buffer *vb) v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf); } -static int vim2m_start_streaming(struct vb2_queue *q, unsigned count) +static int vim2m_start_streaming(struct vb2_queue *q, unsigned int count) { struct vim2m_ctx *ctx = vb2_get_drv_priv(q); struct vim2m_q_data *q_data = get_q_data(ctx, q->type); + if (!q_data) + return -EINVAL; + q_data->sequence = 0; return 0; } @@ -803,25 +1076,23 @@ static int vim2m_start_streaming(struct vb2_queue *q, unsigned count) static void vim2m_stop_streaming(struct vb2_queue *q) { struct vim2m_ctx *ctx = vb2_get_drv_priv(q); - struct vim2m_dev *dev = ctx->dev; struct vb2_v4l2_buffer *vbuf; unsigned long flags; - if (v4l2_m2m_get_curr_priv(dev->m2m_dev) == ctx) - cancel_delayed_work_sync(&dev->work_run); + cancel_delayed_work_sync(&ctx->work_run); for (;;) { if (V4L2_TYPE_IS_OUTPUT(q->type)) vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); else vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); - if (vbuf == NULL) + if (!vbuf) return; v4l2_ctrl_request_complete(vbuf->vb2_buf.req_obj.req, &ctx->hdl); - spin_lock_irqsave(&ctx->dev->irqlock, flags); + spin_lock_irqsave(&ctx->irqlock, flags); v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR); - spin_unlock_irqrestore(&ctx->dev->irqlock, flags); + spin_unlock_irqrestore(&ctx->irqlock, flags); } } @@ -834,6 +1105,7 @@ static void vim2m_buf_request_complete(struct vb2_buffer *vb) static const struct vb2_ops vim2m_qops = { .queue_setup = vim2m_queue_setup, + .buf_out_validate = vim2m_buf_out_validate, .buf_prepare = vim2m_buf_prepare, .buf_queue = vim2m_buf_queue, .start_streaming = vim2m_start_streaming, @@ -843,7 +1115,8 @@ static const struct vb2_ops vim2m_qops = { .buf_request_complete = vim2m_buf_request_complete, }; -static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq) +static int queue_init(void *priv, struct vb2_queue *src_vq, + struct vb2_queue *dst_vq) { struct vim2m_ctx *ctx = priv; int ret; @@ -855,7 +1128,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *ds src_vq->ops = &vim2m_qops; src_vq->mem_ops = &vb2_vmalloc_memops; src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; - src_vq->lock = &ctx->dev->dev_mutex; + src_vq->lock = &ctx->vb_mutex; src_vq->supports_requests = true; ret = vb2_queue_init(src_vq); @@ -869,17 +1142,16 @@ static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *ds dst_vq->ops = &vim2m_qops; dst_vq->mem_ops = &vb2_vmalloc_memops; dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; - dst_vq->lock = &ctx->dev->dev_mutex; + dst_vq->lock = &ctx->vb_mutex; return vb2_queue_init(dst_vq); } -static const struct v4l2_ctrl_config vim2m_ctrl_trans_time_msec = { +static struct v4l2_ctrl_config vim2m_ctrl_trans_time_msec = { .ops = &vim2m_ctrl_ops, .id = V4L2_CID_TRANS_TIME_MSEC, .name = "Transaction Time (msec)", .type = V4L2_CTRL_TYPE_INTEGER, - .def = MEM2MEM_DEF_TRANSTIME, .min = 1, .max = 10001, .step = 1, @@ -921,6 +1193,8 @@ static int vim2m_open(struct file *file) v4l2_ctrl_handler_init(hdl, 4); v4l2_ctrl_new_std(hdl, &vim2m_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0); v4l2_ctrl_new_std(hdl, &vim2m_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0); + + vim2m_ctrl_trans_time_msec.def = default_transtime; v4l2_ctrl_new_custom(hdl, &vim2m_ctrl_trans_time_msec, NULL); v4l2_ctrl_new_custom(hdl, &vim2m_ctrl_trans_num_bufs, NULL); if (hdl->error) { @@ -944,6 +1218,10 @@ static int vim2m_open(struct file *file) ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init); + mutex_init(&ctx->vb_mutex); + spin_lock_init(&ctx->irqlock); + INIT_DELAYED_WORK(&ctx->work_run, device_work); + if (IS_ERR(ctx->fh.m2m_ctx)) { rc = PTR_ERR(ctx->fh.m2m_ctx); @@ -956,7 +1234,7 @@ static int vim2m_open(struct file *file) v4l2_fh_add(&ctx->fh); atomic_inc(&dev->num_inst); - dprintk(dev, "Created instance: %p, m2m_ctx: %p\n", + dprintk(dev, 1, "Created instance: %p, m2m_ctx: %p\n", ctx, ctx->fh.m2m_ctx); open_unlock: @@ -969,7 +1247,7 @@ static int vim2m_release(struct file *file) struct vim2m_dev *dev = video_drvdata(file); struct vim2m_ctx *ctx = file2ctx(file); - dprintk(dev, "Releasing instance %p\n", ctx); + dprintk(dev, 1, "Releasing instance %p\n", ctx); v4l2_fh_del(&ctx->fh); v4l2_fh_exit(&ctx->fh); @@ -1024,8 +1302,6 @@ static int vim2m_probe(struct platform_device *pdev) if (!dev) return -ENOMEM; - spin_lock_init(&dev->irqlock); - ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev); if (ret) return ret; @@ -1037,7 +1313,6 @@ static int vim2m_probe(struct platform_device *pdev) vfd = &dev->vfd; vfd->lock = &dev->dev_mutex; vfd->v4l2_dev = &dev->v4l2_dev; - INIT_DELAYED_WORK(&dev->work_run, device_work); ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); if (ret) { @@ -1047,7 +1322,7 @@ static int vim2m_probe(struct platform_device *pdev) video_set_drvdata(vfd, dev); v4l2_info(&dev->v4l2_dev, - "Device registered as /dev/video%d\n", vfd->num); + "Device registered as /dev/video%d\n", vfd->num); platform_set_drvdata(pdev, dev); @@ -1061,12 +1336,14 @@ static int vim2m_probe(struct platform_device *pdev) #ifdef CONFIG_MEDIA_CONTROLLER dev->mdev.dev = &pdev->dev; strscpy(dev->mdev.model, "vim2m", sizeof(dev->mdev.model)); + strscpy(dev->mdev.bus_info, "platform:vim2m", + sizeof(dev->mdev.bus_info)); media_device_init(&dev->mdev); dev->mdev.ops = &m2m_media_ops; dev->v4l2_dev.mdev = &dev->mdev; - ret = v4l2_m2m_register_media_controller(dev->m2m_dev, - vfd, MEDIA_ENT_F_PROC_VIDEO_SCALER); + ret = v4l2_m2m_register_media_controller(dev->m2m_dev, vfd, + MEDIA_ENT_F_PROC_VIDEO_SCALER); if (ret) { v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem media controller\n"); goto unreg_m2m; diff --git a/drivers/media/platform/vimc/Makefile b/drivers/media/platform/vimc/Makefile index 4b2e3de7856e..c4fc8e7d365a 100644 --- a/drivers/media/platform/vimc/Makefile +++ b/drivers/media/platform/vimc/Makefile @@ -5,6 +5,7 @@ vimc_common-objs := vimc-common.o vimc_debayer-objs := vimc-debayer.o vimc_scaler-objs := vimc-scaler.o vimc_sensor-objs := vimc-sensor.o +vimc_streamer-objs := vimc-streamer.o obj-$(CONFIG_VIDEO_VIMC) += vimc.o vimc_capture.o vimc_common.o vimc-debayer.o \ - vimc_scaler.o vimc_sensor.o + vimc_scaler.o vimc_sensor.o vimc_streamer.o diff --git a/drivers/media/platform/vimc/vimc-capture.c b/drivers/media/platform/vimc/vimc-capture.c index 3f7e9ed56633..ea869631a3f6 100644 --- a/drivers/media/platform/vimc/vimc-capture.c +++ b/drivers/media/platform/vimc/vimc-capture.c @@ -24,6 +24,7 @@ #include #include "vimc-common.h" +#include "vimc-streamer.h" #define VIMC_CAP_DRV_NAME "vimc-capture" @@ -44,7 +45,7 @@ struct vimc_cap_device { spinlock_t qlock; struct mutex lock; u32 sequence; - struct media_pipeline pipe; + struct vimc_stream stream; }; static const struct v4l2_pix_format fmt_default = { @@ -69,12 +70,10 @@ struct vimc_cap_buffer { static int vimc_cap_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { - struct vimc_cap_device *vcap = video_drvdata(file); - - strscpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver)); + strscpy(cap->driver, VIMC_PDEV_NAME, sizeof(cap->driver)); strscpy(cap->card, KBUILD_MODNAME, sizeof(cap->card)); snprintf(cap->bus_info, sizeof(cap->bus_info), - "platform:%s", vcap->vdev.v4l2_dev->name); + "platform:%s", VIMC_PDEV_NAME); return 0; } @@ -248,14 +247,13 @@ static int vimc_cap_start_streaming(struct vb2_queue *vq, unsigned int count) vcap->sequence = 0; /* Start the media pipeline */ - ret = media_pipeline_start(entity, &vcap->pipe); + ret = media_pipeline_start(entity, &vcap->stream.pipe); if (ret) { vimc_cap_return_all_buffers(vcap, VB2_BUF_STATE_QUEUED); return ret; } - /* Enable streaming from the pipe */ - ret = vimc_pipeline_s_stream(&vcap->vdev.entity, 1); + ret = vimc_streamer_s_stream(&vcap->stream, &vcap->ved, 1); if (ret) { media_pipeline_stop(entity); vimc_cap_return_all_buffers(vcap, VB2_BUF_STATE_QUEUED); @@ -273,8 +271,7 @@ static void vimc_cap_stop_streaming(struct vb2_queue *vq) { struct vimc_cap_device *vcap = vb2_get_drv_priv(vq); - /* Disable streaming from the pipe */ - vimc_pipeline_s_stream(&vcap->vdev.entity, 0); + vimc_streamer_s_stream(&vcap->stream, &vcap->ved, 0); /* Stop the media pipeline */ media_pipeline_stop(&vcap->vdev.entity); @@ -355,8 +352,8 @@ static void vimc_cap_comp_unbind(struct device *comp, struct device *master, kfree(vcap); } -static void vimc_cap_process_frame(struct vimc_ent_device *ved, - struct media_pad *sink, const void *frame) +static void *vimc_cap_process_frame(struct vimc_ent_device *ved, + const void *frame) { struct vimc_cap_device *vcap = container_of(ved, struct vimc_cap_device, ved); @@ -370,7 +367,7 @@ static void vimc_cap_process_frame(struct vimc_ent_device *ved, typeof(*vimc_buf), list); if (!vimc_buf) { spin_unlock(&vcap->qlock); - return; + return ERR_PTR(-EAGAIN); } /* Remove this entry from the list */ @@ -391,6 +388,7 @@ static void vimc_cap_process_frame(struct vimc_ent_device *ved, vb2_set_plane_payload(&vimc_buf->vb2.vb2_buf, 0, vcap->format.sizeimage); vb2_buffer_done(&vimc_buf->vb2.vb2_buf, VB2_BUF_STATE_DONE); + return NULL; } static int vimc_cap_comp_bind(struct device *comp, struct device *master, @@ -431,7 +429,7 @@ static int vimc_cap_comp_bind(struct device *comp, struct device *master, /* Initialize the vb2 queue */ q = &vcap->queue; q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - q->io_modes = VB2_MMAP | VB2_DMABUF; + q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_USERPTR; q->drv_priv = vcap; q->buf_struct_size = sizeof(struct vimc_cap_buffer); q->ops = &vimc_cap_qops; diff --git a/drivers/media/platform/vimc/vimc-common.c b/drivers/media/platform/vimc/vimc-common.c index 867e24dbd6b5..c1a74bb2df58 100644 --- a/drivers/media/platform/vimc/vimc-common.c +++ b/drivers/media/platform/vimc/vimc-common.c @@ -207,41 +207,6 @@ const struct vimc_pix_map *vimc_pix_map_by_pixelformat(u32 pixelformat) } EXPORT_SYMBOL_GPL(vimc_pix_map_by_pixelformat); -int vimc_propagate_frame(struct media_pad *src, const void *frame) -{ - struct media_link *link; - - if (!(src->flags & MEDIA_PAD_FL_SOURCE)) - return -EINVAL; - - /* Send this frame to all sink pads that are direct linked */ - list_for_each_entry(link, &src->entity->links, list) { - if (link->source == src && - (link->flags & MEDIA_LNK_FL_ENABLED)) { - struct vimc_ent_device *ved = NULL; - struct media_entity *entity = link->sink->entity; - - if (is_media_entity_v4l2_subdev(entity)) { - struct v4l2_subdev *sd = - container_of(entity, struct v4l2_subdev, - entity); - ved = v4l2_get_subdevdata(sd); - } else if (is_media_entity_v4l2_video_device(entity)) { - struct video_device *vdev = - container_of(entity, - struct video_device, - entity); - ved = video_get_drvdata(vdev); - } - if (ved && ved->process_frame) - ved->process_frame(ved, link->sink, frame); - } - } - - return 0; -} -EXPORT_SYMBOL_GPL(vimc_propagate_frame); - /* Helper function to allocate and initialize pads */ struct media_pad *vimc_pads_init(u16 num_pads, const unsigned long *pads_flag) { diff --git a/drivers/media/platform/vimc/vimc-common.h b/drivers/media/platform/vimc/vimc-common.h index 2e9981b18166..84539430b5e7 100644 --- a/drivers/media/platform/vimc/vimc-common.h +++ b/drivers/media/platform/vimc/vimc-common.h @@ -22,6 +22,8 @@ #include #include +#define VIMC_PDEV_NAME "vimc" + /* VIMC-specific controls */ #define VIMC_CID_VIMC_BASE (0x00f00000 | 0xf000) #define VIMC_CID_VIMC_CLASS (0x00f00000 | 1) @@ -113,23 +115,12 @@ struct vimc_pix_map { struct vimc_ent_device { struct media_entity *ent; struct media_pad *pads; - void (*process_frame)(struct vimc_ent_device *ved, - struct media_pad *sink, const void *frame); + void * (*process_frame)(struct vimc_ent_device *ved, + const void *frame); void (*vdev_get_format)(struct vimc_ent_device *ved, struct v4l2_pix_format *fmt); }; -/** - * vimc_propagate_frame - propagate a frame through the topology - * - * @src: the source pad where the frame is being originated - * @frame: the frame to be propagated - * - * This function will call the process_frame callback from the vimc_ent_device - * struct of the nodes directly connected to the @src pad - */ -int vimc_propagate_frame(struct media_pad *src, const void *frame); - /** * vimc_pads_init - initialize pads * diff --git a/drivers/media/platform/vimc/vimc-core.c b/drivers/media/platform/vimc/vimc-core.c index ce809d2e3d53..0fbb7914098f 100644 --- a/drivers/media/platform/vimc/vimc-core.c +++ b/drivers/media/platform/vimc/vimc-core.c @@ -24,7 +24,6 @@ #include "vimc-common.h" -#define VIMC_PDEV_NAME "vimc" #define VIMC_MDEV_MODEL_NAME "VIMC MDEV" #define VIMC_ENT_LINK(src, srcpad, sink, sinkpad, link_flags) { \ @@ -221,6 +220,7 @@ static int vimc_comp_bind(struct device *master) err_mdev_unregister: media_device_unregister(&vimc->mdev); + media_device_cleanup(&vimc->mdev); err_comp_unbind_all: component_unbind_all(master, NULL); err_v4l2_unregister: @@ -237,6 +237,7 @@ static void vimc_comp_unbind(struct device *master) dev_dbg(master, "unbind"); media_device_unregister(&vimc->mdev); + media_device_cleanup(&vimc->mdev); component_unbind_all(master, NULL); v4l2_device_unregister(&vimc->v4l2_dev); } @@ -319,6 +320,8 @@ static int vimc_probe(struct platform_device *pdev) /* Initialize media device */ strscpy(vimc->mdev.model, VIMC_MDEV_MODEL_NAME, sizeof(vimc->mdev.model)); + snprintf(vimc->mdev.bus_info, sizeof(vimc->mdev.bus_info), + "platform:%s", VIMC_PDEV_NAME); vimc->mdev.dev = &pdev->dev; media_device_init(&vimc->mdev); diff --git a/drivers/media/platform/vimc/vimc-debayer.c b/drivers/media/platform/vimc/vimc-debayer.c index 77887f66f323..7d77c63b99d2 100644 --- a/drivers/media/platform/vimc/vimc-debayer.c +++ b/drivers/media/platform/vimc/vimc-debayer.c @@ -321,7 +321,6 @@ static void vimc_deb_set_rgb_mbus_fmt_rgb888_1x24(struct vimc_deb_device *vdeb, static int vimc_deb_s_stream(struct v4l2_subdev *sd, int enable) { struct vimc_deb_device *vdeb = v4l2_get_subdevdata(sd); - int ret; if (enable) { const struct vimc_pix_map *vpix; @@ -351,22 +350,10 @@ static int vimc_deb_s_stream(struct v4l2_subdev *sd, int enable) if (!vdeb->src_frame) return -ENOMEM; - /* Turn the stream on in the subdevices directly connected */ - ret = vimc_pipeline_s_stream(&vdeb->sd.entity, 1); - if (ret) { - vfree(vdeb->src_frame); - vdeb->src_frame = NULL; - return ret; - } } else { if (!vdeb->src_frame) return 0; - /* Disable streaming from the pipe */ - ret = vimc_pipeline_s_stream(&vdeb->sd.entity, 0); - if (ret) - return ret; - vfree(vdeb->src_frame); vdeb->src_frame = NULL; } @@ -480,9 +467,8 @@ static void vimc_deb_calc_rgb_sink(struct vimc_deb_device *vdeb, } } -static void vimc_deb_process_frame(struct vimc_ent_device *ved, - struct media_pad *sink, - const void *sink_frame) +static void *vimc_deb_process_frame(struct vimc_ent_device *ved, + const void *sink_frame) { struct vimc_deb_device *vdeb = container_of(ved, struct vimc_deb_device, ved); @@ -491,7 +477,7 @@ static void vimc_deb_process_frame(struct vimc_ent_device *ved, /* If the stream in this node is not active, just return */ if (!vdeb->src_frame) - return; + return ERR_PTR(-EINVAL); for (i = 0; i < vdeb->sink_fmt.height; i++) for (j = 0; j < vdeb->sink_fmt.width; j++) { @@ -499,12 +485,8 @@ static void vimc_deb_process_frame(struct vimc_ent_device *ved, vdeb->set_rgb_src(vdeb, i, j, rgb); } - /* Propagate the frame through all source pads */ - for (i = 1; i < vdeb->sd.entity.num_pads; i++) { - struct media_pad *pad = &vdeb->sd.entity.pads[i]; + return vdeb->src_frame; - vimc_propagate_frame(pad, vdeb->src_frame); - } } static void vimc_deb_comp_unbind(struct device *comp, struct device *master, diff --git a/drivers/media/platform/vimc/vimc-scaler.c b/drivers/media/platform/vimc/vimc-scaler.c index b0952ee86296..39b2a73dfcc1 100644 --- a/drivers/media/platform/vimc/vimc-scaler.c +++ b/drivers/media/platform/vimc/vimc-scaler.c @@ -217,7 +217,6 @@ static const struct v4l2_subdev_pad_ops vimc_sca_pad_ops = { static int vimc_sca_s_stream(struct v4l2_subdev *sd, int enable) { struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd); - int ret; if (enable) { const struct vimc_pix_map *vpix; @@ -245,22 +244,10 @@ static int vimc_sca_s_stream(struct v4l2_subdev *sd, int enable) if (!vsca->src_frame) return -ENOMEM; - /* Turn the stream on in the subdevices directly connected */ - ret = vimc_pipeline_s_stream(&vsca->sd.entity, 1); - if (ret) { - vfree(vsca->src_frame); - vsca->src_frame = NULL; - return ret; - } } else { if (!vsca->src_frame) return 0; - /* Disable streaming from the pipe */ - ret = vimc_pipeline_s_stream(&vsca->sd.entity, 0); - if (ret) - return ret; - vfree(vsca->src_frame); vsca->src_frame = NULL; } @@ -346,26 +333,19 @@ static void vimc_sca_fill_src_frame(const struct vimc_sca_device *const vsca, vimc_sca_scale_pix(vsca, i, j, sink_frame); } -static void vimc_sca_process_frame(struct vimc_ent_device *ved, - struct media_pad *sink, - const void *sink_frame) +static void *vimc_sca_process_frame(struct vimc_ent_device *ved, + const void *sink_frame) { struct vimc_sca_device *vsca = container_of(ved, struct vimc_sca_device, ved); - unsigned int i; /* If the stream in this node is not active, just return */ if (!vsca->src_frame) - return; + return ERR_PTR(-EINVAL); vimc_sca_fill_src_frame(vsca, sink_frame); - /* Propagate the frame through all source pads */ - for (i = 1; i < vsca->sd.entity.num_pads; i++) { - struct media_pad *pad = &vsca->sd.entity.pads[i]; - - vimc_propagate_frame(pad, vsca->src_frame); - } + return vsca->src_frame; }; static void vimc_sca_comp_unbind(struct device *comp, struct device *master, diff --git a/drivers/media/platform/vimc/vimc-sensor.c b/drivers/media/platform/vimc/vimc-sensor.c index 32ca9c6172b1..59195f262623 100644 --- a/drivers/media/platform/vimc/vimc-sensor.c +++ b/drivers/media/platform/vimc/vimc-sensor.c @@ -16,8 +16,6 @@ */ #include -#include -#include #include #include #include @@ -201,38 +199,20 @@ static const struct v4l2_subdev_pad_ops vimc_sen_pad_ops = { .set_fmt = vimc_sen_set_fmt, }; -static int vimc_sen_tpg_thread(void *data) +static void *vimc_sen_process_frame(struct vimc_ent_device *ved, + const void *sink_frame) { - struct vimc_sen_device *vsen = data; - unsigned int i; - - set_freezable(); - set_current_state(TASK_UNINTERRUPTIBLE); - - for (;;) { - try_to_freeze(); - if (kthread_should_stop()) - break; + struct vimc_sen_device *vsen = container_of(ved, struct vimc_sen_device, + ved); - tpg_fill_plane_buffer(&vsen->tpg, 0, 0, vsen->frame); - - /* Send the frame to all source pads */ - for (i = 0; i < vsen->sd.entity.num_pads; i++) - vimc_propagate_frame(&vsen->sd.entity.pads[i], - vsen->frame); - - /* 60 frames per second */ - schedule_timeout(HZ/60); - } - - return 0; + tpg_fill_plane_buffer(&vsen->tpg, 0, 0, vsen->frame); + return vsen->frame; } static int vimc_sen_s_stream(struct v4l2_subdev *sd, int enable) { struct vimc_sen_device *vsen = container_of(sd, struct vimc_sen_device, sd); - int ret; if (enable) { const struct vimc_pix_map *vpix; @@ -258,26 +238,8 @@ static int vimc_sen_s_stream(struct v4l2_subdev *sd, int enable) /* configure the test pattern generator */ vimc_sen_tpg_s_format(vsen); - /* Initialize the image generator thread */ - vsen->kthread_sen = kthread_run(vimc_sen_tpg_thread, vsen, - "%s-sen", vsen->sd.v4l2_dev->name); - if (IS_ERR(vsen->kthread_sen)) { - dev_err(vsen->dev, "%s: kernel_thread() failed\n", - vsen->sd.name); - vfree(vsen->frame); - vsen->frame = NULL; - return PTR_ERR(vsen->kthread_sen); - } } else { - if (!vsen->kthread_sen) - return 0; - - /* Stop image generator */ - ret = kthread_stop(vsen->kthread_sen); - if (ret) - return ret; - vsen->kthread_sen = NULL; vfree(vsen->frame); vsen->frame = NULL; return 0; @@ -413,6 +375,7 @@ static int vimc_sen_comp_bind(struct device *comp, struct device *master, if (ret) goto err_free_hdl; + vsen->ved.process_frame = vimc_sen_process_frame; dev_set_drvdata(comp, &vsen->ved); vsen->dev = comp; diff --git a/drivers/media/platform/vimc/vimc-streamer.c b/drivers/media/platform/vimc/vimc-streamer.c new file mode 100644 index 000000000000..fcc897fb247b --- /dev/null +++ b/drivers/media/platform/vimc/vimc-streamer.c @@ -0,0 +1,188 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * vimc-streamer.c Virtual Media Controller Driver + * + * Copyright (C) 2018 Lucas A. M. Magalhães + * + */ + +#include +#include +#include +#include + +#include "vimc-streamer.h" + +/** + * vimc_get_source_entity - get the entity connected with the first sink pad + * + * @ent: reference media_entity + * + * Helper function that returns the media entity containing the source pad + * linked with the first sink pad from the given media entity pad list. + */ +static struct media_entity *vimc_get_source_entity(struct media_entity *ent) +{ + struct media_pad *pad; + int i; + + for (i = 0; i < ent->num_pads; i++) { + if (ent->pads[i].flags & MEDIA_PAD_FL_SOURCE) + continue; + pad = media_entity_remote_pad(&ent->pads[i]); + return pad ? pad->entity : NULL; + } + return NULL; +} + +/* + * vimc_streamer_pipeline_terminate - Disable stream in all ved in stream + * + * @stream: the pointer to the stream structure with the pipeline to be + * disabled. + * + * Calls s_stream to disable the stream in each entity of the pipeline + * + */ +static void vimc_streamer_pipeline_terminate(struct vimc_stream *stream) +{ + struct media_entity *entity; + struct v4l2_subdev *sd; + + while (stream->pipe_size) { + stream->pipe_size--; + entity = stream->ved_pipeline[stream->pipe_size]->ent; + entity = vimc_get_source_entity(entity); + stream->ved_pipeline[stream->pipe_size] = NULL; + + if (!is_media_entity_v4l2_subdev(entity)) + continue; + + sd = media_entity_to_v4l2_subdev(entity); + v4l2_subdev_call(sd, video, s_stream, 0); + } +} + +/* + * vimc_streamer_pipeline_init - initializes the stream structure + * + * @stream: the pointer to the stream structure to be initialized + * @ved: the pointer to the vimc entity initializing the stream + * + * Initializes the stream structure. Walks through the entity graph to + * construct the pipeline used later on the streamer thread. + * Calls s_stream to enable stream in all entities of the pipeline. + */ +static int vimc_streamer_pipeline_init(struct vimc_stream *stream, + struct vimc_ent_device *ved) +{ + struct media_entity *entity; + struct video_device *vdev; + struct v4l2_subdev *sd; + int ret = 0; + + stream->pipe_size = 0; + while (stream->pipe_size < VIMC_STREAMER_PIPELINE_MAX_SIZE) { + if (!ved) { + vimc_streamer_pipeline_terminate(stream); + return -EINVAL; + } + stream->ved_pipeline[stream->pipe_size++] = ved; + + entity = vimc_get_source_entity(ved->ent); + /* Check if the end of the pipeline was reached*/ + if (!entity) + return 0; + + if (is_media_entity_v4l2_subdev(entity)) { + sd = media_entity_to_v4l2_subdev(entity); + ret = v4l2_subdev_call(sd, video, s_stream, 1); + if (ret && ret != -ENOIOCTLCMD) { + vimc_streamer_pipeline_terminate(stream); + return ret; + } + ved = v4l2_get_subdevdata(sd); + } else { + vdev = container_of(entity, + struct video_device, + entity); + ved = video_get_drvdata(vdev); + } + } + + vimc_streamer_pipeline_terminate(stream); + return -EINVAL; +} + +static int vimc_streamer_thread(void *data) +{ + struct vimc_stream *stream = data; + int i; + + set_freezable(); + set_current_state(TASK_UNINTERRUPTIBLE); + + for (;;) { + try_to_freeze(); + if (kthread_should_stop()) + break; + + for (i = stream->pipe_size - 1; i >= 0; i--) { + stream->frame = stream->ved_pipeline[i]->process_frame( + stream->ved_pipeline[i], + stream->frame); + if (!stream->frame) + break; + if (IS_ERR(stream->frame)) + break; + } + //wait for 60hz + schedule_timeout(HZ / 60); + } + + return 0; +} + +int vimc_streamer_s_stream(struct vimc_stream *stream, + struct vimc_ent_device *ved, + int enable) +{ + int ret; + + if (!stream || !ved) + return -EINVAL; + + if (enable) { + if (stream->kthread) + return 0; + + ret = vimc_streamer_pipeline_init(stream, ved); + if (ret) + return ret; + + stream->kthread = kthread_run(vimc_streamer_thread, stream, + "vimc-streamer thread"); + + if (IS_ERR(stream->kthread)) + return PTR_ERR(stream->kthread); + + } else { + if (!stream->kthread) + return 0; + + ret = kthread_stop(stream->kthread); + if (ret) + return ret; + + stream->kthread = NULL; + + vimc_streamer_pipeline_terminate(stream); + } + + return 0; +} +EXPORT_SYMBOL_GPL(vimc_streamer_s_stream); + +MODULE_DESCRIPTION("Virtual Media Controller Driver (VIMC) Streamer"); +MODULE_AUTHOR("Lucas A. M. Magalhães "); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/platform/vimc/vimc-streamer.h b/drivers/media/platform/vimc/vimc-streamer.h new file mode 100644 index 000000000000..752af2e2d5a2 --- /dev/null +++ b/drivers/media/platform/vimc/vimc-streamer.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * vimc-streamer.h Virtual Media Controller Driver + * + * Copyright (C) 2018 Lucas A. M. Magalhães + * + */ + +#ifndef _VIMC_STREAMER_H_ +#define _VIMC_STREAMER_H_ + +#include + +#include "vimc-common.h" + +#define VIMC_STREAMER_PIPELINE_MAX_SIZE 16 + +struct vimc_stream { + struct media_pipeline pipe; + struct vimc_ent_device *ved_pipeline[VIMC_STREAMER_PIPELINE_MAX_SIZE]; + unsigned int pipe_size; + u8 *frame; + struct task_struct *kthread; +}; + +/** + * vimc_streamer_s_streamer - start/stop the stream + * + * @stream: the pointer to the stream to start or stop + * @ved: The last entity of the streamer pipeline + * @enable: any non-zero number start the stream, zero stop + * + */ +int vimc_streamer_s_stream(struct vimc_stream *stream, + struct vimc_ent_device *ved, + int enable); + +#endif //_VIMC_STREAMER_H_ diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c index c931f007e5b0..342e0e6c103b 100644 --- a/drivers/media/platform/vivid/vivid-core.c +++ b/drivers/media/platform/vivid/vivid-core.c @@ -371,7 +371,7 @@ static int vidioc_s_parm(struct file *file, void *fh, if (vdev->vfl_dir == VFL_DIR_RX) return vivid_vid_cap_s_parm(file, fh, parm); - return vivid_vid_out_g_parm(file, fh, parm); + return -ENOTTY; } static int vidioc_log_status(struct file *file, void *fh) @@ -1094,7 +1094,9 @@ static int vivid_create_instance(struct platform_device *pdev, int inst) q = &dev->vb_vid_cap_q; q->type = dev->multiplanar ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : V4L2_BUF_TYPE_VIDEO_CAPTURE; - q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ; + q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ; + if (!allocator) + q->io_modes |= VB2_USERPTR; q->drv_priv = dev; q->buf_struct_size = sizeof(struct vivid_buffer); q->ops = &vivid_vid_cap_qops; @@ -1115,7 +1117,9 @@ static int vivid_create_instance(struct platform_device *pdev, int inst) q = &dev->vb_vid_out_q; q->type = dev->multiplanar ? V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE : V4L2_BUF_TYPE_VIDEO_OUTPUT; - q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_WRITE; + q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_WRITE; + if (!allocator) + q->io_modes |= VB2_USERPTR; q->drv_priv = dev; q->buf_struct_size = sizeof(struct vivid_buffer); q->ops = &vivid_vid_out_qops; @@ -1136,7 +1140,9 @@ static int vivid_create_instance(struct platform_device *pdev, int inst) q = &dev->vb_vbi_cap_q; q->type = dev->has_raw_vbi_cap ? V4L2_BUF_TYPE_VBI_CAPTURE : V4L2_BUF_TYPE_SLICED_VBI_CAPTURE; - q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ; + q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ; + if (!allocator) + q->io_modes |= VB2_USERPTR; q->drv_priv = dev; q->buf_struct_size = sizeof(struct vivid_buffer); q->ops = &vivid_vbi_cap_qops; @@ -1157,7 +1163,9 @@ static int vivid_create_instance(struct platform_device *pdev, int inst) q = &dev->vb_vbi_out_q; q->type = dev->has_raw_vbi_out ? V4L2_BUF_TYPE_VBI_OUTPUT : V4L2_BUF_TYPE_SLICED_VBI_OUTPUT; - q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_WRITE; + q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_WRITE; + if (!allocator) + q->io_modes |= VB2_USERPTR; q->drv_priv = dev; q->buf_struct_size = sizeof(struct vivid_buffer); q->ops = &vivid_vbi_out_qops; @@ -1177,7 +1185,9 @@ static int vivid_create_instance(struct platform_device *pdev, int inst) /* initialize sdr_cap queue */ q = &dev->vb_sdr_cap_q; q->type = V4L2_BUF_TYPE_SDR_CAPTURE; - q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ; + q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ; + if (!allocator) + q->io_modes |= VB2_USERPTR; q->drv_priv = dev; q->buf_struct_size = sizeof(struct vivid_buffer); q->ops = &vivid_sdr_cap_qops; @@ -1468,9 +1478,6 @@ static int vivid_create_instance(struct platform_device *pdev, int inst) return 0; unreg_dev: -#ifdef CONFIG_MEDIA_CONTROLLER - media_device_unregister(&dev->mdev); -#endif video_unregister_device(&dev->radio_tx_dev); video_unregister_device(&dev->radio_rx_dev); video_unregister_device(&dev->sdr_cap_dev); @@ -1543,6 +1550,7 @@ static int vivid_remove(struct platform_device *pdev) #ifdef CONFIG_MEDIA_CONTROLLER media_device_unregister(&dev->mdev); + media_device_cleanup(&dev->mdev); #endif if (dev->has_vid_cap) { diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c index c059fc12668a..52eeda624d7e 100644 --- a/drivers/media/platform/vivid/vivid-vid-cap.c +++ b/drivers/media/platform/vivid/vivid-vid-cap.c @@ -124,7 +124,8 @@ static int vid_cap_queue_setup(struct vb2_queue *vq, } } else { for (p = 0; p < buffers; p++) - sizes[p] = tpg_g_line_width(&dev->tpg, p) * h + + sizes[p] = (tpg_g_line_width(&dev->tpg, p) * h) / + dev->fmt_cap->vdownsampling[p] + dev->fmt_cap->data_offset[p]; } @@ -161,7 +162,9 @@ static int vid_cap_buf_prepare(struct vb2_buffer *vb) return -EINVAL; } for (p = 0; p < buffers; p++) { - size = tpg_g_line_width(&dev->tpg, p) * dev->fmt_cap_rect.height + + size = (tpg_g_line_width(&dev->tpg, p) * + dev->fmt_cap_rect.height) / + dev->fmt_cap->vdownsampling[p] + dev->fmt_cap->data_offset[p]; if (vb2_plane_size(vb, p) < size) { @@ -545,7 +548,8 @@ int vivid_g_fmt_vid_cap(struct file *file, void *priv, for (p = 0; p < mp->num_planes; p++) { mp->plane_fmt[p].bytesperline = tpg_g_bytesperline(&dev->tpg, p); mp->plane_fmt[p].sizeimage = - tpg_g_line_width(&dev->tpg, p) * mp->height + + (tpg_g_line_width(&dev->tpg, p) * mp->height) / + dev->fmt_cap->vdownsampling[p] + dev->fmt_cap->data_offset[p]; } return 0; diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c index 661f4015fba1..74b83bcc6119 100644 --- a/drivers/media/platform/vivid/vivid-vid-common.c +++ b/drivers/media/platform/vivid/vivid-vid-common.c @@ -168,6 +168,36 @@ struct vivid_fmt vivid_formats[] = { .buffers = 1, .alpha_mask = 0x000000ff, }, + { + .fourcc = V4L2_PIX_FMT_AYUV32, + .vdownsampling = { 1 }, + .bit_depth = { 32 }, + .planes = 1, + .buffers = 1, + .alpha_mask = 0x000000ff, + }, + { + .fourcc = V4L2_PIX_FMT_XYUV32, + .vdownsampling = { 1 }, + .bit_depth = { 32 }, + .planes = 1, + .buffers = 1, + }, + { + .fourcc = V4L2_PIX_FMT_VUYA32, + .vdownsampling = { 1 }, + .bit_depth = { 32 }, + .planes = 1, + .buffers = 1, + .alpha_mask = 0xff000000, + }, + { + .fourcc = V4L2_PIX_FMT_VUYX32, + .vdownsampling = { 1 }, + .bit_depth = { 32 }, + .planes = 1, + .buffers = 1, + }, { .fourcc = V4L2_PIX_FMT_GREY, .vdownsampling = { 1 }, diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c index ea250aee2b2e..e61b91b414f9 100644 --- a/drivers/media/platform/vivid/vivid-vid-out.c +++ b/drivers/media/platform/vivid/vivid-vid-out.c @@ -28,11 +28,12 @@ static int vid_out_queue_setup(struct vb2_queue *vq, const struct vivid_fmt *vfmt = dev->fmt_out; unsigned planes = vfmt->buffers; unsigned h = dev->fmt_out_rect.height; - unsigned size = dev->bytesperline_out[0] * h; + unsigned int size = dev->bytesperline_out[0] * h + vfmt->data_offset[0]; unsigned p; for (p = vfmt->buffers; p < vfmt->planes; p++) - size += dev->bytesperline_out[p] * h / vfmt->vdownsampling[p]; + size += dev->bytesperline_out[p] * h / vfmt->vdownsampling[p] + + vfmt->data_offset[p]; if (dev->field_out == V4L2_FIELD_ALTERNATE) { /* @@ -62,12 +63,14 @@ static int vid_out_queue_setup(struct vb2_queue *vq, if (sizes[0] < size) return -EINVAL; for (p = 1; p < planes; p++) { - if (sizes[p] < dev->bytesperline_out[p] * h) + if (sizes[p] < dev->bytesperline_out[p] * h + + vfmt->data_offset[p]) return -EINVAL; } } else { for (p = 0; p < planes; p++) - sizes[p] = p ? dev->bytesperline_out[p] * h : size; + sizes[p] = p ? dev->bytesperline_out[p] * h + + vfmt->data_offset[p] : size; } if (vq->num_buffers + *nbuffers < 2) @@ -81,21 +84,38 @@ static int vid_out_queue_setup(struct vb2_queue *vq, return 0; } -static int vid_out_buf_prepare(struct vb2_buffer *vb) +static int vid_out_buf_out_validate(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); - unsigned long size; - unsigned planes; + + dprintk(dev, 1, "%s\n", __func__); + + if (dev->field_out != V4L2_FIELD_ALTERNATE) + vbuf->field = dev->field_out; + else if (vbuf->field != V4L2_FIELD_TOP && + vbuf->field != V4L2_FIELD_BOTTOM) + return -EINVAL; + return 0; +} + +static int vid_out_buf_prepare(struct vb2_buffer *vb) +{ + struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); + const struct vivid_fmt *vfmt = dev->fmt_out; + unsigned int planes = vfmt->buffers; + unsigned int h = dev->fmt_out_rect.height; + unsigned int size = dev->bytesperline_out[0] * h; unsigned p; + for (p = vfmt->buffers; p < vfmt->planes; p++) + size += dev->bytesperline_out[p] * h / vfmt->vdownsampling[p]; + dprintk(dev, 1, "%s\n", __func__); if (WARN_ON(NULL == dev->fmt_out)) return -EINVAL; - planes = dev->fmt_out->planes; - if (dev->buf_prepare_error) { /* * Error injection: test what happens if buf_prepare() returns @@ -105,18 +125,13 @@ static int vid_out_buf_prepare(struct vb2_buffer *vb) return -EINVAL; } - if (dev->field_out != V4L2_FIELD_ALTERNATE) - vbuf->field = dev->field_out; - else if (vbuf->field != V4L2_FIELD_TOP && - vbuf->field != V4L2_FIELD_BOTTOM) - return -EINVAL; - for (p = 0; p < planes; p++) { - size = dev->bytesperline_out[p] * dev->fmt_out_rect.height + - vb->planes[p].data_offset; + if (p) + size = dev->bytesperline_out[p] * h; + size += vb->planes[p].data_offset; if (vb2_get_plane_payload(vb, p) < size) { - dprintk(dev, 1, "%s the payload is too small for plane %u (%lu < %lu)\n", + dprintk(dev, 1, "%s the payload is too small for plane %u (%lu < %u)\n", __func__, p, vb2_get_plane_payload(vb, p), size); return -EINVAL; } @@ -188,6 +203,7 @@ static void vid_out_buf_request_complete(struct vb2_buffer *vb) const struct vb2_ops vivid_vid_out_qops = { .queue_setup = vid_out_queue_setup, + .buf_out_validate = vid_out_buf_out_validate, .buf_prepare = vid_out_buf_prepare, .buf_queue = vid_out_buf_queue, .start_streaming = vid_out_start_streaming, @@ -321,7 +337,8 @@ int vivid_g_fmt_vid_out(struct file *file, void *priv, for (p = 0; p < mp->num_planes; p++) { mp->plane_fmt[p].bytesperline = dev->bytesperline_out[p]; mp->plane_fmt[p].sizeimage = - mp->plane_fmt[p].bytesperline * mp->height; + mp->plane_fmt[p].bytesperline * mp->height + + fmt->data_offset[p]; } for (p = fmt->buffers; p < fmt->planes; p++) { unsigned stride = dev->bytesperline_out[p]; @@ -399,7 +416,7 @@ int vivid_try_fmt_vid_out(struct file *file, void *priv, pfmt[p].bytesperline = bytesperline; pfmt[p].sizeimage = (pfmt[p].bytesperline * mp->height) / - fmt->vdownsampling[p]; + fmt->vdownsampling[p] + fmt->data_offset[p]; memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved)); } diff --git a/drivers/media/platform/vsp1/vsp1_brx.c b/drivers/media/platform/vsp1/vsp1_brx.c index 5e50178b057d..58ad248cd7f7 100644 --- a/drivers/media/platform/vsp1/vsp1_brx.c +++ b/drivers/media/platform/vsp1/vsp1_brx.c @@ -296,7 +296,7 @@ static void brx_configure_stream(struct vsp1_entity *entity, /* * The hardware is extremely flexible but we have no userspace API to * expose all the parameters, nor is it clear whether we would have use - * cases for all the supported modes. Let's just harcode the parameters + * cases for all the supported modes. Let's just hardcode the parameters * to sane default values for now. */ @@ -373,7 +373,7 @@ static void brx_configure_stream(struct vsp1_entity *entity, vsp1_brx_write(brx, dlb, VI6_BRU_CTRL(i), ctrl); /* - * Harcode the blending formula to + * Hardcode the blending formula to * * DSTc = DSTc * (1 - SRCa) + SRCc * SRCa * DSTa = DSTa * (1 - SRCa) + SRCa diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c index 8d86f618ec77..84895385d2e5 100644 --- a/drivers/media/platform/vsp1/vsp1_drm.c +++ b/drivers/media/platform/vsp1/vsp1_drm.c @@ -333,19 +333,19 @@ static int vsp1_du_pipeline_setup_brx(struct vsp1_device *vsp1, * on the BRx sink pad 0 and propagated inside the entity, not on the * source pad. */ - format.pad = pipe->brx->source_pad; + format.pad = brx->source_pad; format.format.width = drm_pipe->width; format.format.height = drm_pipe->height; format.format.field = V4L2_FIELD_NONE; - ret = v4l2_subdev_call(&pipe->brx->subdev, pad, set_fmt, NULL, + ret = v4l2_subdev_call(&brx->subdev, pad, set_fmt, NULL, &format); if (ret < 0) return ret; dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on %s pad %u\n", __func__, format.format.width, format.format.height, - format.format.code, BRX_NAME(pipe->brx), pipe->brx->source_pad); + format.format.code, BRX_NAME(brx), brx->source_pad); if (format.format.width != drm_pipe->width || format.format.height != drm_pipe->height) { diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c index 771dfe1f7c20..7ceaf3222145 100644 --- a/drivers/media/platform/vsp1/vsp1_video.c +++ b/drivers/media/platform/vsp1/vsp1_video.c @@ -223,7 +223,7 @@ static void vsp1_video_calculate_partition(struct vsp1_pipeline *pipe, * If the modulus is less than half of the partition size, * the penultimate partition is reduced to half, which is added * to the final partition: |1234|1234|1234|12|341| - * to prevents this: |1234|1234|1234|1234|1|. + * to prevent this: |1234|1234|1234|1234|1|. */ if (modulus) { /* diff --git a/drivers/media/platform/xilinx/xilinx-vip.c b/drivers/media/platform/xilinx/xilinx-vip.c index 18f98838111b..08a825c3a3f6 100644 --- a/drivers/media/platform/xilinx/xilinx-vip.c +++ b/drivers/media/platform/xilinx/xilinx-vip.c @@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(xvip_set_format_size); * the register, otherwise the bitmask is cleared from the register * when the flag @set is false. * - * Fox eample, this function can be used to set a control with a boolean value + * Fox example, this function can be used to set a control with a boolean value * requested by users. If the caller knows whether to set or clear in the first * place, the caller should call xvip_clr() or xvip_set() directly instead of * using this function. diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c index 269971145f88..0261f4d28f16 100644 --- a/drivers/media/radio/radio-si476x.c +++ b/drivers/media/radio/radio-si476x.c @@ -1550,7 +1550,7 @@ static int si476x_radio_probe(struct platform_device *pdev) rval = si476x_radio_init_debugfs(radio); if (rval < 0) { - dev_err(&pdev->dev, "Could not creat debugfs interface\n"); + dev_err(&pdev->dev, "Could not create debugfs interface\n"); goto exit; } diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c index 9751ea1d80be..15eea2b2c90f 100644 --- a/drivers/media/radio/si470x/radio-si470x-i2c.c +++ b/drivers/media/radio/si470x/radio-si470x-i2c.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include "radio-si470x.h" @@ -350,7 +351,7 @@ static int si470x_i2c_probe(struct i2c_client *client, unsigned char version_warning = 0; /* private data allocation and initialization */ - radio = kzalloc(sizeof(struct si470x_device), GFP_KERNEL); + radio = devm_kzalloc(&client->dev, sizeof(*radio), GFP_KERNEL); if (!radio) { retval = -ENOMEM; goto err_initial; @@ -370,7 +371,7 @@ static int si470x_i2c_probe(struct i2c_client *client, retval = v4l2_device_register(&client->dev, &radio->v4l2_dev); if (retval < 0) { dev_err(&client->dev, "couldn't register v4l2_device\n"); - goto err_radio; + goto err_initial; } v4l2_ctrl_handler_init(&radio->hdl, 2); @@ -392,18 +393,29 @@ static int si470x_i2c_probe(struct i2c_client *client, radio->videodev.release = video_device_release_empty; video_set_drvdata(&radio->videodev, radio); + radio->gpio_reset = devm_gpiod_get_optional(&client->dev, "reset", + GPIOD_OUT_LOW); + if (IS_ERR(radio->gpio_reset)) { + retval = PTR_ERR(radio->gpio_reset); + dev_err(&client->dev, "Failed to request gpio: %d\n", retval); + goto err_all; + } + + if (radio->gpio_reset) + gpiod_set_value(radio->gpio_reset, 1); + /* power up : need 110ms */ radio->registers[POWERCFG] = POWERCFG_ENABLE; if (si470x_set_register(radio, POWERCFG) < 0) { retval = -EIO; - goto err_ctrl; + goto err_all; } msleep(110); /* get device and chip versions */ if (si470x_get_all_registers(radio) < 0) { retval = -EIO; - goto err_ctrl; + goto err_all; } dev_info(&client->dev, "DeviceID=0x%4.4hx ChipID=0x%4.4hx\n", radio->registers[DEVICEID], radio->registers[SI_CHIPID]); @@ -430,10 +442,10 @@ static int si470x_i2c_probe(struct i2c_client *client, /* rds buffer allocation */ radio->buf_size = rds_buf * 3; - radio->buffer = kmalloc(radio->buf_size, GFP_KERNEL); + radio->buffer = devm_kmalloc(&client->dev, radio->buf_size, GFP_KERNEL); if (!radio->buffer) { retval = -EIO; - goto err_ctrl; + goto err_all; } /* rds buffer configuration */ @@ -441,12 +453,13 @@ static int si470x_i2c_probe(struct i2c_client *client, radio->rd_index = 0; init_waitqueue_head(&radio->read_queue); - retval = request_threaded_irq(client->irq, NULL, si470x_i2c_interrupt, - IRQF_TRIGGER_FALLING | IRQF_ONESHOT, DRIVER_NAME, - radio); + retval = devm_request_threaded_irq(&client->dev, client->irq, NULL, + si470x_i2c_interrupt, + IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + DRIVER_NAME, radio); if (retval) { dev_err(&client->dev, "Failed to register interrupt\n"); - goto err_rds; + goto err_all; } /* register video device */ @@ -460,15 +473,9 @@ static int si470x_i2c_probe(struct i2c_client *client, return 0; err_all: - free_irq(client->irq, radio); -err_rds: - kfree(radio->buffer); -err_ctrl: v4l2_ctrl_handler_free(&radio->hdl); err_dev: v4l2_device_unregister(&radio->v4l2_dev); -err_radio: - kfree(radio); err_initial: return retval; } @@ -481,9 +488,10 @@ static int si470x_i2c_remove(struct i2c_client *client) { struct si470x_device *radio = i2c_get_clientdata(client); - free_irq(client->irq, radio); video_unregister_device(&radio->videodev); - kfree(radio); + + if (radio->gpio_reset) + gpiod_set_value(radio->gpio_reset, 0); return 0; } @@ -527,6 +535,13 @@ static int si470x_i2c_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(si470x_i2c_pm, si470x_i2c_suspend, si470x_i2c_resume); #endif +#if IS_ENABLED(CONFIG_OF) +static const struct of_device_id si470x_of_match[] = { + { .compatible = "silabs,si470x" }, + { }, +}; +MODULE_DEVICE_TABLE(of, si470x_of_match); +#endif /* * si470x_i2c_driver - i2c driver interface @@ -534,6 +549,7 @@ static SIMPLE_DEV_PM_OPS(si470x_i2c_pm, si470x_i2c_suspend, si470x_i2c_resume); static struct i2c_driver si470x_i2c_driver = { .driver = { .name = "si470x", + .of_match_table = of_match_ptr(si470x_of_match), #ifdef CONFIG_PM_SLEEP .pm = &si470x_i2c_pm, #endif diff --git a/drivers/media/radio/si470x/radio-si470x.h b/drivers/media/radio/si470x/radio-si470x.h index 35fa0f3bbdd2..6fd6a399cb77 100644 --- a/drivers/media/radio/si470x/radio-si470x.h +++ b/drivers/media/radio/si470x/radio-si470x.h @@ -189,6 +189,7 @@ struct si470x_device { #if IS_ENABLED(CONFIG_I2C_SI470X) struct i2c_client *client; + struct gpio_desc *gpio_reset; #endif }; diff --git a/drivers/media/radio/wl128x/fmdrv.h b/drivers/media/radio/wl128x/fmdrv.h index 1ff2eec4ed52..4c0d13539988 100644 --- a/drivers/media/radio/wl128x/fmdrv.h +++ b/drivers/media/radio/wl128x/fmdrv.h @@ -133,7 +133,7 @@ struct fm_rds { /* * Current RX channel Alternate Frequency cache. * This info is used to switch to other freq (AF) - * when current channel signal strengh is below RSSI threshold. + * when current channel signal strength is below RSSI threshold. */ struct tuned_station_info { u16 picode; @@ -228,7 +228,7 @@ struct fmdev { struct fm_rx rx; /* FM receiver info */ struct fmtx_data tx_data; - /* V4L2 ctrl framwork handler*/ + /* V4L2 ctrl framework handler*/ struct v4l2_ctrl_handler ctrl_handler; /* For core assisted locking */ diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c index 800d69c3f80b..3c8987af3772 100644 --- a/drivers/media/radio/wl128x/fmdrv_common.c +++ b/drivers/media/radio/wl128x/fmdrv_common.c @@ -908,7 +908,7 @@ static void fm_irq_afjump_setfreq(struct fmdev *fmdev) u16 frq_index; u16 payload; - fmdbg("Swtich to %d KHz\n", fmdev->rx.stat_info.af_cache[fmdev->rx.afjump_idx]); + fmdbg("Switch to %d KHz\n", fmdev->rx.stat_info.af_cache[fmdev->rx.afjump_idx]); frq_index = (fmdev->rx.stat_info.af_cache[fmdev->rx.afjump_idx] - fmdev->rx.region.bot_freq) / FM_FREQ_MUL; @@ -1047,7 +1047,7 @@ static void fm_irq_handle_intmsk_cmd_resp(struct fmdev *fmdev) clear_bit(FM_INTTASK_RUNNING, &fmdev->flag); } -/* Returns availability of RDS data in internel buffer */ +/* Returns availability of RDS data in internal buffer */ int fmc_is_rds_data_available(struct fmdev *fmdev, struct file *file, struct poll_table_struct *pts) { diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig index 8a216068a35a..96ce3e5524e0 100644 --- a/drivers/media/rc/Kconfig +++ b/drivers/media/rc/Kconfig @@ -133,6 +133,19 @@ config IR_IMON_DECODER remote control and you would like to use it with a raw IR receiver, or if you wish to use an encoder to transmit this IR. +config IR_RCMM_DECODER + tristate "Enable IR raw decoder for the RC-MM protocol" + depends on RC_CORE + help + Enable this option when you have IR with RC-MM protocol, and + you need the software decoder. The driver supports 12, + 24 and 32 bits RC-MM variants. You can enable or disable the + different modes using the following RC protocol keywords: + 'rc-mm-12', 'rc-mm-24' and 'rc-mm-32'. + + To compile this driver as a module, choose M here: the module + will be called ir-rcmm-decoder. + endif #RC_DECODERS menuconfig RC_DEVICES @@ -240,7 +253,7 @@ config IR_FINTEK depends on RC_CORE ---help--- Say Y here to enable support for integrated infrared receiver - /transciever made by Fintek. This chip is found on assorted + /transceiver made by Fintek. This chip is found on assorted Jetway motherboards (and of course, possibly others). To compile this driver as a module, choose M here: the @@ -274,7 +287,7 @@ config IR_NUVOTON depends on RC_CORE ---help--- Say Y here to enable support for integrated infrared receiver - /transciever made by Nuvoton (formerly Winbond). This chip is + /transceiver made by Nuvoton (formerly Winbond). This chip is found in the ASRock ION 330HT, as well as assorted Intel DP55-series motherboards (and of course, possibly others). diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile index 92c163816849..48d23433b3c0 100644 --- a/drivers/media/rc/Makefile +++ b/drivers/media/rc/Makefile @@ -16,6 +16,7 @@ obj-$(CONFIG_IR_SHARP_DECODER) += ir-sharp-decoder.o obj-$(CONFIG_IR_MCE_KBD_DECODER) += ir-mce_kbd-decoder.o obj-$(CONFIG_IR_XMP_DECODER) += ir-xmp-decoder.o obj-$(CONFIG_IR_IMON_DECODER) += ir-imon-decoder.o +obj-$(CONFIG_IR_RCMM_DECODER) += ir-rcmm-decoder.o # stand-alone IR receivers/transmitters obj-$(CONFIG_RC_ATI_REMOTE) += ati_remote.o diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c index 265e91a2a70d..bc2da64858c3 100644 --- a/drivers/media/rc/ati_remote.c +++ b/drivers/media/rc/ati_remote.c @@ -304,7 +304,7 @@ static const struct { {KIND_LITERAL, 0x7c, BTN_RIGHT},/* right btn down */ {KIND_LITERAL, 0x7d, BTN_RIGHT},/* right btn up */ - /* Artificial "doubleclick" events are generated by the hardware. + /* Artificial "double-click" events are generated by the hardware. * They are mapped to the "side" and "extra" mouse buttons here. */ {KIND_FILTERED, 0x7a, BTN_SIDE}, /* left dblclick */ {KIND_FILTERED, 0x7e, BTN_EXTRA},/* right dblclick */ diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c index dd2fd307ef85..293ccee2c05e 100644 --- a/drivers/media/rc/ene_ir.c +++ b/drivers/media/rc/ene_ir.c @@ -184,7 +184,7 @@ static int ene_hw_detect(struct ene_device *dev) return 0; } -/* Read properities of hw sample buffer */ +/* Read properties of hw sample buffer */ static void ene_rx_setup_hw_buffer(struct ene_device *dev) { u16 tmp; diff --git a/drivers/media/rc/ene_ir.h b/drivers/media/rc/ene_ir.h index 494646b2a284..0a45352efe40 100644 --- a/drivers/media/rc/ene_ir.h +++ b/drivers/media/rc/ene_ir.h @@ -118,7 +118,7 @@ #define ENE_CIRDAT_IN 0xFEC7 -/* RLC configuration - sample period (1us resulution) + idle mode */ +/* RLC configuration - sample period (1us resolution) + idle mode */ #define ENE_CIRRLC_CFG 0xFEC8 #define ENE_CIRRLC_CFG_OVERFLOW 0x80 /* interrupt on overflows if set */ #define ENE_DEFAULT_SAMPLE_PERIOD 50 diff --git a/drivers/media/rc/fintek-cir.h b/drivers/media/rc/fintek-cir.h index ac34a774d018..dffe0bbfc6eb 100644 --- a/drivers/media/rc/fintek-cir.h +++ b/drivers/media/rc/fintek-cir.h @@ -176,7 +176,7 @@ struct fintek_dev { #define CIR_CR_IRCS 0x05 /* Before host writes command to IR, host must set to 1. When host finshes write command to IR, host must clear to 0. */ -#define CIR_CR_COMMAND_DATA 0x06 /* Host read or write comand data */ +#define CIR_CR_COMMAND_DATA 0x06 /* Host read or write command data */ #define CIR_CR_CLASS 0x07 /* 0xff = rx-only, 0x66 = rx + 2 tx, 0x33 = rx + 1 tx */ #define CIR_CR_DEV_EN 0x30 /* bit0 = 1 enables CIR */ diff --git a/drivers/media/rc/ir-rc6-decoder.c b/drivers/media/rc/ir-rc6-decoder.c index d96aed1343e4..5cc302fa4daa 100644 --- a/drivers/media/rc/ir-rc6-decoder.c +++ b/drivers/media/rc/ir-rc6-decoder.c @@ -40,6 +40,7 @@ #define RC6_6A_MCE_TOGGLE_MASK 0x8000 /* for the body bits */ #define RC6_6A_LCC_MASK 0xffff0000 /* RC6-6A-32 long customer code mask */ #define RC6_6A_MCE_CC 0x800f0000 /* MCE customer code */ +#define RC6_6A_ZOTAC_CC 0x80340000 /* Zotac customer code */ #define RC6_6A_KATHREIN_CC 0x80460000 /* Kathrein RCU-676 customer code */ #ifndef CHAR_BIT #define CHAR_BIT 8 /* Normally in */ @@ -246,6 +247,7 @@ again: switch (scancode & RC6_6A_LCC_MASK) { case RC6_6A_MCE_CC: case RC6_6A_KATHREIN_CC: + case RC6_6A_ZOTAC_CC: protocol = RC_PROTO_RC6_MCE; toggle = !!(scancode & RC6_6A_MCE_TOGGLE_MASK); scancode &= ~RC6_6A_MCE_TOGGLE_MASK; diff --git a/drivers/media/rc/ir-rcmm-decoder.c b/drivers/media/rc/ir-rcmm-decoder.c new file mode 100644 index 000000000000..f1096ac1e5c5 --- /dev/null +++ b/drivers/media/rc/ir-rcmm-decoder.c @@ -0,0 +1,254 @@ +// SPDX-License-Identifier: GPL-2.0+ +// ir-rcmm-decoder.c - A decoder for the RCMM IR protocol +// +// Copyright (C) 2018 by Patrick Lerda + +#include "rc-core-priv.h" +#include +#include + +#define RCMM_UNIT 166667 /* nanosecs */ +#define RCMM_PREFIX_PULSE 416666 /* 166666.666666666*2.5 */ +#define RCMM_PULSE_0 277777 /* 166666.666666666*(1+2/3) */ +#define RCMM_PULSE_1 444444 /* 166666.666666666*(2+2/3) */ +#define RCMM_PULSE_2 611111 /* 166666.666666666*(3+2/3) */ +#define RCMM_PULSE_3 777778 /* 166666.666666666*(4+2/3) */ + +enum rcmm_state { + STATE_INACTIVE, + STATE_LOW, + STATE_BUMP, + STATE_VALUE, + STATE_FINISHED, +}; + +static bool rcmm_mode(const struct rcmm_dec *data) +{ + return !((0x000c0000 & data->bits) == 0x000c0000); +} + +static int rcmm_miscmode(struct rc_dev *dev, struct rcmm_dec *data) +{ + switch (data->count) { + case 24: + if (dev->enabled_protocols & RC_PROTO_BIT_RCMM24) { + rc_keydown(dev, RC_PROTO_RCMM24, data->bits, 0); + data->state = STATE_INACTIVE; + return 0; + } + return -1; + + case 12: + if (dev->enabled_protocols & RC_PROTO_BIT_RCMM12) { + rc_keydown(dev, RC_PROTO_RCMM12, data->bits, 0); + data->state = STATE_INACTIVE; + return 0; + } + return -1; + } + + return -1; +} + +/** + * ir_rcmm_decode() - Decode one RCMM pulse or space + * @dev: the struct rc_dev descriptor of the device + * @ev: the struct ir_raw_event descriptor of the pulse/space + * + * This function returns -EINVAL if the pulse violates the state machine + */ +static int ir_rcmm_decode(struct rc_dev *dev, struct ir_raw_event ev) +{ + struct rcmm_dec *data = &dev->raw->rcmm; + u32 scancode; + u8 toggle; + int value; + + if (!(dev->enabled_protocols & (RC_PROTO_BIT_RCMM32 | + RC_PROTO_BIT_RCMM24 | + RC_PROTO_BIT_RCMM12))) + return 0; + + if (!is_timing_event(ev)) { + if (ev.reset) + data->state = STATE_INACTIVE; + return 0; + } + + switch (data->state) { + case STATE_INACTIVE: + if (!ev.pulse) + break; + + if (!eq_margin(ev.duration, RCMM_PREFIX_PULSE, RCMM_UNIT / 2)) + break; + + data->state = STATE_LOW; + data->count = 0; + data->bits = 0; + return 0; + + case STATE_LOW: + if (ev.pulse) + break; + + if (!eq_margin(ev.duration, RCMM_PULSE_0, RCMM_UNIT / 2)) + break; + + data->state = STATE_BUMP; + return 0; + + case STATE_BUMP: + if (!ev.pulse) + break; + + if (!eq_margin(ev.duration, RCMM_UNIT, RCMM_UNIT / 2)) + break; + + data->state = STATE_VALUE; + return 0; + + case STATE_VALUE: + if (ev.pulse) + break; + + if (eq_margin(ev.duration, RCMM_PULSE_0, RCMM_UNIT / 2)) + value = 0; + else if (eq_margin(ev.duration, RCMM_PULSE_1, RCMM_UNIT / 2)) + value = 1; + else if (eq_margin(ev.duration, RCMM_PULSE_2, RCMM_UNIT / 2)) + value = 2; + else if (eq_margin(ev.duration, RCMM_PULSE_3, RCMM_UNIT / 2)) + value = 3; + else + value = -1; + + if (value == -1) { + if (!rcmm_miscmode(dev, data)) + return 0; + break; + } + + data->bits <<= 2; + data->bits |= value; + + data->count += 2; + + if (data->count < 32) + data->state = STATE_BUMP; + else + data->state = STATE_FINISHED; + + return 0; + + case STATE_FINISHED: + if (!ev.pulse) + break; + + if (!eq_margin(ev.duration, RCMM_UNIT, RCMM_UNIT / 2)) + break; + + if (rcmm_mode(data)) { + toggle = !!(0x8000 & data->bits); + scancode = data->bits & ~0x8000; + } else { + toggle = 0; + scancode = data->bits; + } + + if (dev->enabled_protocols & RC_PROTO_BIT_RCMM32) { + rc_keydown(dev, RC_PROTO_RCMM32, scancode, toggle); + data->state = STATE_INACTIVE; + return 0; + } + + break; + } + + data->state = STATE_INACTIVE; + return -EINVAL; +} + +static const int rcmmspace[] = { + RCMM_PULSE_0, + RCMM_PULSE_1, + RCMM_PULSE_2, + RCMM_PULSE_3, +}; + +static int ir_rcmm_rawencoder(struct ir_raw_event **ev, unsigned int max, + unsigned int n, u32 data) +{ + int i; + int ret; + + ret = ir_raw_gen_pulse_space(ev, &max, RCMM_PREFIX_PULSE, RCMM_PULSE_0); + if (ret) + return ret; + + for (i = n - 2; i >= 0; i -= 2) { + const unsigned int space = rcmmspace[(data >> i) & 3]; + + ret = ir_raw_gen_pulse_space(ev, &max, RCMM_UNIT, space); + if (ret) + return ret; + } + + return ir_raw_gen_pulse_space(ev, &max, RCMM_UNIT, RCMM_PULSE_3 * 2); +} + +static int ir_rcmm_encode(enum rc_proto protocol, u32 scancode, + struct ir_raw_event *events, unsigned int max) +{ + struct ir_raw_event *e = events; + int ret; + + switch (protocol) { + case RC_PROTO_RCMM32: + ret = ir_rcmm_rawencoder(&e, max, 32, scancode); + break; + case RC_PROTO_RCMM24: + ret = ir_rcmm_rawencoder(&e, max, 24, scancode); + break; + case RC_PROTO_RCMM12: + ret = ir_rcmm_rawencoder(&e, max, 12, scancode); + break; + default: + ret = -EINVAL; + } + + if (ret < 0) + return ret; + + return e - events; +} + +static struct ir_raw_handler rcmm_handler = { + .protocols = RC_PROTO_BIT_RCMM32 | + RC_PROTO_BIT_RCMM24 | + RC_PROTO_BIT_RCMM12, + .decode = ir_rcmm_decode, + .encode = ir_rcmm_encode, + .carrier = 36000, + .min_timeout = RCMM_PULSE_3 + RCMM_UNIT, +}; + +static int __init ir_rcmm_decode_init(void) +{ + ir_raw_handler_register(&rcmm_handler); + + pr_info("IR RCMM protocol handler initialized\n"); + return 0; +} + +static void __exit ir_rcmm_decode_exit(void) +{ + ir_raw_handler_unregister(&rcmm_handler); +} + +module_init(ir_rcmm_decode_init); +module_exit(ir_rcmm_decode_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Patrick Lerda"); +MODULE_DESCRIPTION("RCMM IR protocol decoder"); diff --git a/drivers/media/rc/ir-xmp-decoder.c b/drivers/media/rc/ir-xmp-decoder.c index c965f51df1c1..2639b0b6d2f8 100644 --- a/drivers/media/rc/ir-xmp-decoder.c +++ b/drivers/media/rc/ir-xmp-decoder.c @@ -94,7 +94,7 @@ static int ir_xmp_decode(struct rc_dev *dev, struct ir_raw_event ev) n = data->durations; /* * the 4th nibble should be 15 so base the divider on this - * to transform durations into nibbles. Substract 2000 from + * to transform durations into nibbles. Subtract 2000 from * the divider to compensate for fluctuations in the signal */ divider = (n[3] - XMP_NIBBLE_PREFIX) / 15 - 2000; diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c index cd3c60ba8519..1d48a9e59f93 100644 --- a/drivers/media/rc/ite-cir.c +++ b/drivers/media/rc/ite-cir.c @@ -515,7 +515,7 @@ static int ite_tx_ir(struct rc_dev *rcdev, unsigned *txbuf, unsigned n) /* and set the carrier values for reception */ ite_set_carrier_params(dev); - /* reenable the receiver */ + /* re-enable the receiver */ if (dev->in_use) dev->params.enable_rx(dev); diff --git a/drivers/media/rc/keymaps/rc-behold-columbus.c b/drivers/media/rc/keymaps/rc-behold-columbus.c index e73057945bd1..b68380a76010 100644 --- a/drivers/media/rc/keymaps/rc-behold-columbus.c +++ b/drivers/media/rc/keymaps/rc-behold-columbus.c @@ -14,7 +14,7 @@ * The "ascii-art picture" below (in comments, first row * is the keycode in hex, and subsequent row(s) shows * the button labels (several variants when appropriate) - * helps to descide which keycodes to assign to the buttons. + * helps to decide which keycodes to assign to the buttons. */ static struct rc_map_table behold_columbus[] = { @@ -68,7 +68,7 @@ static struct rc_map_table behold_columbus[] = { { 0x18, KEY_VOLUMEDOWN }, /* 0x0E 0x1E 0x0F 0x1A * - * Stop Pause Previouse Next * + * Stop Pause Previous Next * * */ { 0x0E, KEY_STOP }, diff --git a/drivers/media/rc/keymaps/rc-behold.c b/drivers/media/rc/keymaps/rc-behold.c index e1b2c8e26883..2b7cddb2f36d 100644 --- a/drivers/media/rc/keymaps/rc-behold.c +++ b/drivers/media/rc/keymaps/rc-behold.c @@ -17,7 +17,7 @@ * The "ascii-art picture" below (in comments, first row * is the keycode in hex, and subsequent row(s) shows * the button labels (several variants when appropriate) - * helps to descide which keycodes to assign to the buttons. + * helps to decide which keycodes to assign to the buttons. */ static struct rc_map_table behold[] = { diff --git a/drivers/media/rc/keymaps/rc-manli.c b/drivers/media/rc/keymaps/rc-manli.c index 29c9feaf413b..5e9a49e2dd6a 100644 --- a/drivers/media/rc/keymaps/rc-manli.c +++ b/drivers/media/rc/keymaps/rc-manli.c @@ -14,7 +14,7 @@ The "ascii-art picture" below (in comments, first row is the keycode in hex, and subsequent row(s) shows the button labels (several variants when appropriate) - helps to descide which keycodes to assign to the buttons. + helps to decide which keycodes to assign to the buttons. */ static struct rc_map_table manli[] = { diff --git a/drivers/media/rc/keymaps/rc-powercolor-real-angel.c b/drivers/media/rc/keymaps/rc-powercolor-real-angel.c index 4988e71c524c..cf98cf8dc13c 100644 --- a/drivers/media/rc/keymaps/rc-powercolor-real-angel.c +++ b/drivers/media/rc/keymaps/rc-powercolor-real-angel.c @@ -26,7 +26,7 @@ static struct rc_map_table powercolor_real_angel[] = { { 0x07, KEY_7 }, { 0x08, KEY_8 }, { 0x09, KEY_9 }, - { 0x0a, KEY_DIGITS }, /* single, double, tripple digit */ + { 0x0a, KEY_DIGITS }, /* single, double, triple digit */ { 0x29, KEY_PREVIOUS }, /* previous channel */ { 0x12, KEY_BRIGHTNESSUP }, { 0x13, KEY_BRIGHTNESSDOWN }, diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c index 8d7d3ef88862..fa4840940486 100644 --- a/drivers/media/rc/mceusb.c +++ b/drivers/media/rc/mceusb.c @@ -79,7 +79,7 @@ #define MCE_CMD 0x1f #define MCE_PORT_IR 0x4 /* (0x4 << 5) | MCE_CMD = 0x9f */ #define MCE_PORT_SYS 0x7 /* (0x7 << 5) | MCE_CMD = 0xff */ -#define MCE_PORT_SER 0x6 /* 0xc0 thru 0xdf flush & 0x1f bytes */ +#define MCE_PORT_SER 0x6 /* 0xc0 through 0xdf flush & 0x1f bytes */ #define MCE_PORT_MASK 0xe0 /* Mask out command bits */ /* Command port headers */ diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h index c2cbe7f6266c..9f21b3e8b377 100644 --- a/drivers/media/rc/rc-core-priv.h +++ b/drivers/media/rc/rc-core-priv.h @@ -131,6 +131,11 @@ struct ir_raw_event_ctrl { unsigned int bits; bool stick_keyboard; } imon; + struct rcmm_dec { + int state; + unsigned int count; + u32 bits; + } rcmm; }; /* Mutex for locking raw IR processing and handler change */ diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c index e10b4644a442..39dd46bbd0c1 100644 --- a/drivers/media/rc/rc-ir-raw.c +++ b/drivers/media/rc/rc-ir-raw.c @@ -186,7 +186,7 @@ int ir_raw_event_store_with_filter(struct rc_dev *dev, struct ir_raw_event *ev) dev->raw->this_ev = *ev; } - /* Enter idle mode if nessesary */ + /* Enter idle mode if necessary */ if (!ev->pulse && dev->timeout && dev->raw->this_ev.duration >= dev->timeout) ir_raw_event_set_idle(dev, true); diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c index 66a174979b3c..e8fa28e20192 100644 --- a/drivers/media/rc/rc-main.c +++ b/drivers/media/rc/rc-main.c @@ -70,6 +70,12 @@ static const struct { [RC_PROTO_CEC] = { .name = "cec", .repeat_period = 0 }, [RC_PROTO_IMON] = { .name = "imon", .scancode_bits = 0x7fffffff, .repeat_period = 114 }, + [RC_PROTO_RCMM12] = { .name = "rc-mm-12", + .scancode_bits = 0x00000fff, .repeat_period = 114 }, + [RC_PROTO_RCMM24] = { .name = "rc-mm-24", + .scancode_bits = 0x00ffffff, .repeat_period = 114 }, + [RC_PROTO_RCMM32] = { .name = "rc-mm-32", + .scancode_bits = 0xffffffff, .repeat_period = 114 }, }; /* Used to keep track of known keymaps */ @@ -274,6 +280,7 @@ static unsigned int ir_update_mapping(struct rc_dev *dev, unsigned int new_keycode) { int old_keycode = rc_map->scan[index].keycode; + int i; /* Did the user wish to remove the mapping? */ if (new_keycode == KEY_RESERVED || new_keycode == KEY_UNKNOWN) { @@ -288,9 +295,20 @@ static unsigned int ir_update_mapping(struct rc_dev *dev, old_keycode == KEY_RESERVED ? "New" : "Replacing", rc_map->scan[index].scancode, new_keycode); rc_map->scan[index].keycode = new_keycode; + __set_bit(new_keycode, dev->input_dev->keybit); } if (old_keycode != KEY_RESERVED) { + /* A previous mapping was updated... */ + __clear_bit(old_keycode, dev->input_dev->keybit); + /* ... but another scancode might use the same keycode */ + for (i = 0; i < rc_map->len; i++) { + if (rc_map->scan[i].keycode == old_keycode) { + __set_bit(old_keycode, dev->input_dev->keybit); + break; + } + } + /* Possibly shrink the keytable, failure is not a problem */ ir_resize_table(dev, rc_map, GFP_ATOMIC); } @@ -1006,6 +1024,9 @@ static const struct { { RC_PROTO_BIT_XMP, "xmp", "ir-xmp-decoder" }, { RC_PROTO_BIT_CEC, "cec", NULL }, { RC_PROTO_BIT_IMON, "imon", "ir-imon-decoder" }, + { RC_PROTO_BIT_RCMM12 | + RC_PROTO_BIT_RCMM24 | + RC_PROTO_BIT_RCMM32, "rc-mm", "ir-rcmm-decoder" }, }; /** @@ -1035,7 +1056,7 @@ struct rc_filter_attribute { * @buf: a pointer to the output buffer * * This routine is a callback routine for input read the IR protocol type(s). - * it is trigged by reading /sys/class/rc/rc?/protocols. + * it is triggered by reading /sys/class/rc/rc?/protocols. * It returns the protocol names of supported protocols. * Enabled protocols are printed in brackets. * @@ -1206,7 +1227,7 @@ void ir_raw_load_modules(u64 *protocols) * @len: length of the input buffer * * This routine is for changing the IR protocol type. - * It is trigged by writing to /sys/class/rc/rc?/[wakeup_]protocols. + * It is triggered by writing to /sys/class/rc/rc?/[wakeup_]protocols. * See parse_protocol_change() for the valid commands. * Returns @len on success or a negative error code. * @@ -1290,7 +1311,7 @@ out: * @buf: a pointer to the output buffer * * This routine is a callback routine to read a scancode filter value or mask. - * It is trigged by reading /sys/class/rc/rc?/[wakeup_]filter[_mask]. + * It is triggered by reading /sys/class/rc/rc?/[wakeup_]filter[_mask]. * It prints the current scancode filter value or mask of the appropriate filter * type in hexadecimal into @buf and returns the size of the buffer. * @@ -1333,7 +1354,7 @@ static ssize_t show_filter(struct device *device, * @len: length of the input buffer * * This routine is for changing a scancode filter value or mask. - * It is trigged by writing to /sys/class/rc/rc?/[wakeup_]filter[_mask]. + * It is triggered by writing to /sys/class/rc/rc?/[wakeup_]filter[_mask]. * Returns -EINVAL if an invalid filter value for the current protocol was * specified or if scancode filtering is not supported by the driver, otherwise * returns @len. @@ -1417,7 +1438,7 @@ unlock: * @buf: a pointer to the output buffer * * This routine is a callback routine for input read the IR protocol type(s). - * it is trigged by reading /sys/class/rc/rc?/wakeup_protocols. + * it is triggered by reading /sys/class/rc/rc?/wakeup_protocols. * It returns the protocol names of supported protocols. * The enabled protocols are printed in brackets. * @@ -1468,7 +1489,7 @@ static ssize_t show_wakeup_protocols(struct device *device, * @len: length of the input buffer * * This routine is for changing the IR protocol type. - * It is trigged by writing to /sys/class/rc/rc?/wakeup_protocols. + * It is triggered by writing to /sys/class/rc/rc?/wakeup_protocols. * Returns @len on success or a negative error code. * * dev->lock is taken to guard against races between @@ -1750,7 +1771,6 @@ static int rc_prepare_rx_device(struct rc_dev *dev) set_bit(EV_REP, dev->input_dev->evbit); set_bit(EV_MSC, dev->input_dev->evbit); set_bit(MSC_SCAN, dev->input_dev->mscbit); - bitmap_fill(dev->input_dev->keybit, KEY_CNT); /* Pointer/mouse events */ set_bit(EV_REL, dev->input_dev->evbit); diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c index 08c51ffd74a0..b82a5c9db12c 100644 --- a/drivers/media/rc/redrat3.c +++ b/drivers/media/rc/redrat3.c @@ -140,7 +140,7 @@ MODULE_PARM_DESC(length_fuzz, "Length Fuzz (0-127)"); * When receiving a continuous ir stream (for example when a user is * holding a button down on a remote), this specifies the minimum size * of a space when the redrat3 sends a irdata packet to the host. Specified - * in miliseconds. Default value 18ms. + * in milliseconds. Default value 18ms. * The value can be between 2 and 30 inclusive. */ static int minimum_pause = 18; diff --git a/drivers/media/spi/cxd2880-spi.c b/drivers/media/spi/cxd2880-spi.c index d5c433e20d4a..4077217777f9 100644 --- a/drivers/media/spi/cxd2880-spi.c +++ b/drivers/media/spi/cxd2880-spi.c @@ -522,13 +522,15 @@ cxd2880_spi_probe(struct spi_device *spi) dvb_spi->vcc_supply = devm_regulator_get_optional(&spi->dev, "vcc"); if (IS_ERR(dvb_spi->vcc_supply)) { - if (PTR_ERR(dvb_spi->vcc_supply) == -EPROBE_DEFER) - return -EPROBE_DEFER; + if (PTR_ERR(dvb_spi->vcc_supply) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto fail_adapter; + } dvb_spi->vcc_supply = NULL; } else { ret = regulator_enable(dvb_spi->vcc_supply); if (ret) - return ret; + goto fail_adapter; } dvb_spi->spi = spi; diff --git a/drivers/media/tuners/mxl5005s.c b/drivers/media/tuners/mxl5005s.c index ec584316c812..1c07e2225fb3 100644 --- a/drivers/media/tuners/mxl5005s.c +++ b/drivers/media/tuners/mxl5005s.c @@ -3584,7 +3584,7 @@ static u32 MXL_Ceiling(u32 value, u32 resolution) return value / resolution + (value % resolution > 0 ? 1 : 0); } -/* Retrieve the Initialzation Registers */ +/* Retrieve the Initialization Registers */ static u16 MXL_GetInitRegister(struct dvb_frontend *fe, u8 *RegNum, u8 *RegVal, int *count) { diff --git a/drivers/media/tuners/qm1d1b0004.h b/drivers/media/tuners/qm1d1b0004.h index 7734ed109a22..7950ecd56430 100644 --- a/drivers/media/tuners/qm1d1b0004.h +++ b/drivers/media/tuners/qm1d1b0004.h @@ -14,7 +14,7 @@ struct qm1d1b0004_config { struct dvb_frontend *fe; u32 lpf_freq; /* LPF frequency[kHz]. Default: symbol rate */ - bool half_step; /* use PLL frequency step of 500Hz istead of 1000Hz */ + bool half_step; /* use PLL frequency step of 500Hz instead of 1000Hz */ }; /* special values indicating to use the default in qm1d1b0004_config */ diff --git a/drivers/media/tuners/r820t.c b/drivers/media/tuners/r820t.c index ba4be08a8551..aed2f130ec74 100644 --- a/drivers/media/tuners/r820t.c +++ b/drivers/media/tuners/r820t.c @@ -1664,7 +1664,7 @@ static int r820t_iq_tree(struct r820t_priv *priv, /* * record IMC results by input gain/phase location then adjust - * gain or phase positive 1 step and negtive 1 step, + * gain or phase positive 1 step and negative 1 step, * both record results */ @@ -2066,7 +2066,7 @@ static int r820t_imr_callibrate(struct r820t_priv *priv) } /* - * Disables IMR callibration. That emulates the same behaviour + * Disables IMR calibration. That emulates the same behaviour * as what is done by rtl-sdr userspace library. Useful for testing */ if (no_imr_cal) { diff --git a/drivers/media/tuners/tda18271-common.c b/drivers/media/tuners/tda18271-common.c index 054b3b747dae..d46a2e775e82 100644 --- a/drivers/media/tuners/tda18271-common.c +++ b/drivers/media/tuners/tda18271-common.c @@ -528,14 +528,14 @@ int tda18271_init_regs(struct dvb_frontend *fe) * Standby modes, EP3 [7:5] * * | SM || SM_LT || SM_XT || mode description - * |=====\\=======\\=======\\=================================== + * |=====\\=======\\=======\\==================================== * | 0 || 0 || 0 || normal mode - * |-----||-------||-------||----------------------------------- + * |-----||-------||-------||------------------------------------ * | || || || standby mode w/ slave tuner output - * | 1 || 0 || 0 || & loop thru & xtal oscillator on - * |-----||-------||-------||----------------------------------- + * | 1 || 0 || 0 || & loop through & xtal oscillator on + * |-----||-------||-------||------------------------------------ * | 1 || 1 || 0 || standby mode w/ xtal oscillator on - * |-----||-------||-------||----------------------------------- + * |-----||-------||-------||------------------------------------ * | 1 || 1 || 1 || power off * */ diff --git a/drivers/media/tuners/tda18271-fe.c b/drivers/media/tuners/tda18271-fe.c index 4d69029229e4..cac6b8e62b73 100644 --- a/drivers/media/tuners/tda18271-fe.c +++ b/drivers/media/tuners/tda18271-fe.c @@ -48,7 +48,7 @@ static int tda18271_toggle_output(struct dvb_frontend *fe, int standby) if (tda_fail(ret)) goto fail; - tda_dbg("%s mode: xtal oscillator %s, slave tuner loop thru %s\n", + tda_dbg("%s mode: xtal oscillator %s, slave tuner loop through %s\n", standby ? "standby" : "active", priv->output_opt & TDA18271_OUTPUT_XT_OFF ? "off" : "on", priv->output_opt & TDA18271_OUTPUT_LT_OFF ? "off" : "on"); diff --git a/drivers/media/tuners/tda18271.h b/drivers/media/tuners/tda18271.h index 7e07966c5ace..1a23532586ef 100644 --- a/drivers/media/tuners/tda18271.h +++ b/drivers/media/tuners/tda18271.h @@ -69,10 +69,10 @@ enum tda18271_i2c_gate { }; enum tda18271_output_options { - /* slave tuner output & loop thru & xtal oscillator always on */ + /* slave tuner output & loop through & xtal oscillator always on */ TDA18271_OUTPUT_LT_XT_ON = 0, - /* slave tuner output loop thru off */ + /* slave tuner output loop through off */ TDA18271_OUTPUT_LT_OFF = 1, /* xtal oscillator off */ diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c index eb6d65dae748..a351390ee744 100644 --- a/drivers/media/tuners/xc4000.c +++ b/drivers/media/tuners/xc4000.c @@ -1471,8 +1471,8 @@ static int xc4000_get_signal(struct dvb_frontend *fe, u16 *strength) if (rc < 0) goto ret; - /* Informations from real testing of DVB-T and radio part, - coeficient for one dB is 0xff. + /* Information from real testing of DVB-T and radio part, + coefficient for one dB is 0xff. */ tuner_dbg("Signal strength: -%ddB (%05d)\n", value >> 8, value); diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c index 1fdb1601dc65..3f8c92a70116 100644 --- a/drivers/media/usb/au0828/au0828-core.c +++ b/drivers/media/usb/au0828/au0828-core.c @@ -234,7 +234,7 @@ static void au0828_media_graph_notify(struct media_entity *new, if (!new) { /* * Called during au0828 probe time to connect - * entites that were created prior to registering + * entities that were created prior to registering * the notify handler. Find mixer and decoder. */ media_device_for_each_entity(entity, dev->media_dev) { diff --git a/drivers/media/usb/au0828/au0828-dvb.c b/drivers/media/usb/au0828/au0828-dvb.c index d9093a3c57c5..6e43028112d1 100644 --- a/drivers/media/usb/au0828/au0828-dvb.c +++ b/drivers/media/usb/au0828/au0828-dvb.c @@ -566,7 +566,7 @@ void au0828_dvb_unregister(struct au0828_dev *dev) dvb->frontend = NULL; } -/* All the DVB attach calls go here, this function get's modified +/* All the DVB attach calls go here, this function gets modified * for each new card. No other function in this file needs * to change. */ diff --git a/drivers/media/usb/au0828/au0828.h b/drivers/media/usb/au0828/au0828.h index 004eadef55c7..425c35d16057 100644 --- a/drivers/media/usb/au0828/au0828.h +++ b/drivers/media/usb/au0828/au0828.h @@ -52,7 +52,7 @@ #define AU0828_INTERLACED_DEFAULT 1 -/* Defination for AU0828 USB transfer */ +/* Definition for AU0828 USB transfer */ #define AU0828_MAX_ISO_BUFS 12 /* maybe resize this value in the future */ #define AU0828_ISO_PACKETS_PER_URB 128 diff --git a/drivers/media/usb/cpia2/cpia2.h b/drivers/media/usb/cpia2/cpia2.h index ab238ac8bfc0..d0a464882510 100644 --- a/drivers/media/usb/cpia2/cpia2.h +++ b/drivers/media/usb/cpia2/cpia2.h @@ -350,7 +350,7 @@ struct cpia2_sbuf { }; struct framebuf { - struct timeval timestamp; + u64 ts; unsigned long seq; int num; int length; diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c index a771e0a52610..e5d8dee38fe4 100644 --- a/drivers/media/usb/cpia2/cpia2_usb.c +++ b/drivers/media/usb/cpia2/cpia2_usb.c @@ -324,7 +324,7 @@ static void cpia2_usb_complete(struct urb *urb) continue; } DBG("Start of frame pattern found\n"); - v4l2_get_timestamp(&cam->workbuff->timestamp); + cam->workbuff->ts = ktime_get_ns(); cam->workbuff->seq = cam->frame_count++; cam->workbuff->data[0] = 0xFF; cam->workbuff->data[1] = 0xD8; diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c index 748739c2b8b2..95c0bd4a19dc 100644 --- a/drivers/media/usb/cpia2/cpia2_v4l.c +++ b/drivers/media/usb/cpia2/cpia2_v4l.c @@ -833,7 +833,7 @@ static int cpia2_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf) break; case FRAME_READY: buf->bytesused = cam->buffers[buf->index].length; - buf->timestamp = cam->buffers[buf->index].timestamp; + buf->timestamp = ns_to_timeval(cam->buffers[buf->index].ts); buf->sequence = cam->buffers[buf->index].seq; buf->flags = V4L2_BUF_FLAG_DONE; break; @@ -889,12 +889,7 @@ static int find_earliest_filled_buffer(struct camera_data *cam) found = i; } else { /* find which buffer is earlier */ - struct timeval *tv1, *tv2; - tv1 = &cam->buffers[i].timestamp; - tv2 = &cam->buffers[found].timestamp; - if(tv1->tv_sec < tv2->tv_sec || - (tv1->tv_sec == tv2->tv_sec && - tv1->tv_usec < tv2->tv_usec)) + if (cam->buffers[i].ts < cam->buffers[found].ts) found = i; } } @@ -945,7 +940,7 @@ static int cpia2_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf) buf->flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; buf->field = V4L2_FIELD_NONE; - buf->timestamp = cam->buffers[buf->index].timestamp; + buf->timestamp = ns_to_timeval(cam->buffers[buf->index].ts); buf->sequence = cam->buffers[buf->index].seq; buf->m.offset = cam->buffers[buf->index].data - cam->frame_buffer; buf->length = cam->frame_size; diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c index 1c48c497bd6a..0f8ae81f4820 100644 --- a/drivers/media/usb/cx231xx/cx231xx-417.c +++ b/drivers/media/usb/cx231xx/cx231xx-417.c @@ -1316,7 +1316,7 @@ static void buffer_copy(struct cx231xx *dev, char *data, int len, struct urb *ur buf->vb.state = VIDEOBUF_DONE; buf->vb.field_count++; - v4l2_get_timestamp(&buf->vb.ts); + buf->vb.ts = ktime_get_ns(); list_del(&buf->vb.queue); wake_up(&buf->vb.done); dma_q->mpeg_buffer_completed = 0; @@ -1347,7 +1347,7 @@ static void buffer_filled(char *data, int len, struct urb *urb, memcpy(vbuf, data, len); buf->vb.state = VIDEOBUF_DONE; buf->vb.field_count++; - v4l2_get_timestamp(&buf->vb.ts); + buf->vb.ts = ktime_get_ns(); list_del(&buf->vb.queue); wake_up(&buf->vb.done); } diff --git a/drivers/media/usb/cx231xx/cx231xx-avcore.c b/drivers/media/usb/cx231xx/cx231xx-avcore.c index fdd3c221fa0d..3374888b3021 100644 --- a/drivers/media/usb/cx231xx/cx231xx-avcore.c +++ b/drivers/media/usb/cx231xx/cx231xx-avcore.c @@ -2987,7 +2987,7 @@ int cx231xx_gpio_i2c_write_ack(struct cx231xx *dev) { int status = 0; - /* set SDA to ouput */ + /* set SDA to output */ dev->gpio_dir |= 1 << dev->board.tuner_sda_gpio; status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); diff --git a/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.h b/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.h index 8f00b1d38277..bb4f817be0c5 100644 --- a/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.h +++ b/drivers/media/usb/cx231xx/cx231xx-pcb-cfg.h @@ -86,7 +86,7 @@ enum TS_PORT{ #define EAVP_MASK 0x8 enum EAV_PRESENT{ NO_EXTERNAL_AV = 0x0, /* 0: No External A/V inputs - (no need for i2s blcok), + (no need for i2s block), Analog Tuner must be present */ EXTERNAL_AV = 0x8 /* 1: External A/V inputs present (requires i2s blk) */ diff --git a/drivers/media/usb/cx231xx/cx231xx-vbi.c b/drivers/media/usb/cx231xx/cx231xx-vbi.c index 10b2eb7338ad..d16b73c04445 100644 --- a/drivers/media/usb/cx231xx/cx231xx-vbi.c +++ b/drivers/media/usb/cx231xx/cx231xx-vbi.c @@ -528,7 +528,7 @@ static inline void vbi_buffer_filled(struct cx231xx *dev, buf->vb.state = VIDEOBUF_DONE; buf->vb.field_count++; - v4l2_get_timestamp(&buf->vb.ts); + buf->vb.ts = ktime_get_ns(); dev->vbi_mode.bulk_ctl.buf = NULL; diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c index 0d451c4ea3b9..aebbaf9d92a6 100644 --- a/drivers/media/usb/cx231xx/cx231xx-video.c +++ b/drivers/media/usb/cx231xx/cx231xx-video.c @@ -182,7 +182,7 @@ static inline void buffer_filled(struct cx231xx *dev, cx231xx_isocdbg("[%p/%d] wakeup\n", buf, buf->vb.i); buf->vb.state = VIDEOBUF_DONE; buf->vb.field_count++; - v4l2_get_timestamp(&buf->vb.ts); + buf->vb.ts = ktime_get_ns(); if (dev->USE_ISO) dev->video_mode.isoc_ctl.buf = NULL; diff --git a/drivers/media/usb/cx231xx/cx231xx.h b/drivers/media/usb/cx231xx/cx231xx.h index fa640bf20111..86b7f57492b1 100644 --- a/drivers/media/usb/cx231xx/cx231xx.h +++ b/drivers/media/usb/cx231xx/cx231xx.h @@ -646,7 +646,7 @@ struct cx231xx { /* frame properties */ int width; /* current frame width */ int height; /* current frame height */ - int interlaced; /* 1=interlace fileds, 0=just top fileds */ + int interlaced; /* 1=interlace fields, 0=just top fields */ struct cx231xx_audio adev; diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb.h b/drivers/media/usb/dvb-usb-v2/dvb_usb.h index 3fd6cc0d6340..728ef5f3ada2 100644 --- a/drivers/media/usb/dvb-usb-v2/dvb_usb.h +++ b/drivers/media/usb/dvb-usb-v2/dvb_usb.h @@ -146,7 +146,7 @@ struct dvb_usb_rc { }; /** - * usb streaming configration for adapter + * usb streaming configuration for adapter * @type: urb type * @count: count of used urbs * @endpoint: stream usb endpoint number diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c index 602013cf3e69..15944b95970f 100644 --- a/drivers/media/usb/dvb-usb-v2/lmedm04.c +++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c @@ -308,7 +308,7 @@ static void lme2510_int_response(struct urb *lme_urb) switch (ibuf[0]) { case 0xaa: - debug_data_snipet(1, "INT Remote data snipet", ibuf); + debug_data_snipet(1, "INT Remote data snippet", ibuf); if (!adap_to_d(adap)->rc_dev) break; @@ -358,13 +358,13 @@ static void lme2510_int_response(struct urb *lme_urb) lme2510_update_stats(adap); - debug_data_snipet(5, "INT Remote data snipet in", ibuf); + debug_data_snipet(5, "INT Remote data snippet in", ibuf); break; case 0xcc: - debug_data_snipet(1, "INT Control data snipet", ibuf); + debug_data_snipet(1, "INT Control data snippet", ibuf); break; default: - debug_data_snipet(1, "INT Unknown data snipet", ibuf); + debug_data_snipet(1, "INT Unknown data snippet", ibuf); break; } } diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c index 85cdf593a9ad..5e2d53af68c7 100644 --- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c +++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c @@ -140,7 +140,7 @@ int mxl111sf_write_reg_mask(struct mxl111sf_state *state, if (mask != 0xff) { ret = mxl111sf_read_reg(state, addr, &val); #if 1 - /* dont know why this usually errors out on the first try */ + /* don't know why this usually errors out on the first try */ if (mxl_fail(ret)) pr_err("error writing addr: 0x%02x, mask: 0x%02x, data: 0x%02x, retrying...", addr, mask, data); @@ -783,7 +783,7 @@ static int mxl111sf_attach_demod(struct dvb_usb_adapter *adap, u8 fe_id) if (mxl_fail(ret)) goto fail; - /* dont care if this fails */ + /* don't care if this fails */ mxl111sf_init_port_expander(state); adap->fe[fe_id] = dvb_attach(mxl111sf_demod_attach, state, diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c index 16e946e01d2c..0638d907c73e 100644 --- a/drivers/media/usb/dvb-usb/af9005.c +++ b/drivers/media/usb/dvb-usb/af9005.c @@ -845,7 +845,7 @@ static int af9005_rc_query(struct dvb_usb_device *d, u32 * event, int *state) /* deb_info("rc_query\n"); */ st->data[0] = 3; /* rest of packet length low */ - st->data[1] = 0; /* rest of packet lentgh high */ + st->data[1] = 0; /* rest of packet length high */ st->data[2] = 0x40; /* read remote */ st->data[3] = 1; /* rest of packet length */ st->data[4] = seq = st->sequence++; /* sequence number */ diff --git a/drivers/media/usb/dvb-usb/cinergyT2-fe.c b/drivers/media/usb/dvb-usb/cinergyT2-fe.c index df71df7ed524..4c9f83ba260d 100644 --- a/drivers/media/usb/dvb-usb/cinergyT2-fe.c +++ b/drivers/media/usb/dvb-usb/cinergyT2-fe.c @@ -33,7 +33,7 @@ * This function is probably reusable and may better get placed in a support * library. * - * We replace errornous fields by default TPS fields (the ones with value 0). + * We replace erroneous fields by default TPS fields (the ones with value 0). */ static uint16_t compute_tps(struct dtv_frontend_properties *op) diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c index a51a45c60233..9ddb2000249e 100644 --- a/drivers/media/usb/dvb-usb/cxusb.c +++ b/drivers/media/usb/dvb-usb/cxusb.c @@ -1016,7 +1016,7 @@ static int cxusb_dualdig4_rev2_tuner_attach(struct dvb_usb_adapter *adap) /* * No need to call dvb7000p_attach here, as it was called * already, as frontend_attach method is called first, and - * tuner_attach is only called on sucess. + * tuner_attach is only called on success. */ tun_i2c = st->dib7000p_ops.get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_TUNER, 1); diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c index 40ca4eafb137..99951e02a880 100644 --- a/drivers/media/usb/dvb-usb/dvb-usb-init.c +++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c @@ -98,7 +98,7 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs) /* * when reloading the driver w/o replugging the device - * sometimes a timeout occures, this helps + * sometimes a timeout occurs, this helps */ if (d->props.generic_bulk_ctrl_endpoint != 0) { usb_clear_halt(d->udev, usb_sndbulkpipe(d->udev, d->props.generic_bulk_ctrl_endpoint)); diff --git a/drivers/media/usb/dvb-usb/dvb-usb.h b/drivers/media/usb/dvb-usb/dvb-usb.h index 317ed6a82d19..32829bdd5f22 100644 --- a/drivers/media/usb/dvb-usb/dvb-usb.h +++ b/drivers/media/usb/dvb-usb/dvb-usb.h @@ -336,7 +336,7 @@ struct usb_data_stream { * struct dvb_usb_adapter - a DVB adapter on a USB device * @id: index of this adapter (starting with 0). * - * @feedcount: number of reqested feeds (used for streaming-activation) + * @feedcount: number of requested feeds (used for streaming-activation) * @pid_filtering: is hardware pid_filtering used or not. * * @pll_addr: I2C address of the tuner for programming diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c index 0af74383083d..150081128196 100644 --- a/drivers/media/usb/dvb-usb/pctv452e.c +++ b/drivers/media/usb/dvb-usb/pctv452e.c @@ -528,13 +528,13 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i) rx = b0 + 5; - /* hmm where shoud this should go? */ + /* hmm where should this should go? */ ret = usb_set_interface(d->udev, 0, ISOC_INTERFACE_ALTERNATIVE); if (ret != 0) info("%s: Warning set interface returned: %d\n", __func__, ret); - /* this is a one-time initialization, dont know where to put */ + /* this is a one-time initialization, don't know where to put */ b0[0] = 0xaa; b0[1] = state->c++; b0[2] = PCTV_CMD_RESET; diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c index 02c13d71e6c1..a3155ec196cc 100644 --- a/drivers/media/usb/em28xx/em28xx-i2c.c +++ b/drivers/media/usb/em28xx/em28xx-i2c.c @@ -296,7 +296,7 @@ static int em28xx_i2c_recv_bytes(struct em28xx *dev, u16 addr, u8 *buf, u16 len) return ret; } /* - * NOTE: some devices with two i2c busses have the bad habit to return 0 + * NOTE: some devices with two i2c buses have the bad habit to return 0 * bytes if we are on bus B AND there was no write attempt to the * specified slave address before AND no device is present at the * requested slave address. @@ -427,7 +427,7 @@ static int em25xx_bus_B_recv_bytes(struct em28xx *dev, u16 addr, u8 *buf, return ret; } /* - * NOTE: some devices with two i2c busses have the bad habit to return 0 + * NOTE: some devices with two i2c buses have the bad habit to return 0 * bytes if we are on bus B AND there was no write attempt to the * specified slave address before AND no device is present at the * requested slave address. diff --git a/drivers/media/usb/em28xx/em28xx-reg.h b/drivers/media/usb/em28xx/em28xx-reg.h index f53afe18e92d..d7c60862874a 100644 --- a/drivers/media/usb/em28xx/em28xx-reg.h +++ b/drivers/media/usb/em28xx/em28xx-reg.h @@ -67,7 +67,7 @@ #define EM28XX_I2C_CLK_WAIT_ENABLE 0x40 #define EM28XX_I2C_EEPROM_ON_BOARD 0x08 #define EM28XX_I2C_EEPROM_KEY_VALID 0x04 -#define EM2874_I2C_SECONDARY_BUS_SELECT 0x04 /* em2874 has two i2c busses */ +#define EM2874_I2C_SECONDARY_BUS_SELECT 0x04 /* em2874 has two i2c buses */ #define EM28XX_I2C_FREQ_1_5_MHZ 0x03 /* bus frequency (bits [1-0]) */ #define EM28XX_I2C_FREQ_25_KHZ 0x02 #define EM28XX_I2C_FREQ_400_KHZ 0x01 diff --git a/drivers/media/usb/gspca/Kconfig b/drivers/media/usb/gspca/Kconfig index d3b6665c342d..088566e88467 100644 --- a/drivers/media/usb/gspca/Kconfig +++ b/drivers/media/usb/gspca/Kconfig @@ -46,7 +46,7 @@ config USB_GSPCA_CPIA1 depends on VIDEO_V4L2 && USB_GSPCA help Say Y here if you want support for USB cameras based on the cpia - CPiA chip. Note that you need atleast version 0.6.4 of libv4l for + CPiA chip. Note that you need at least version 0.6.4 of libv4l for applications to understand the videoformat generated by this driver. To compile this driver as a module, choose M here: the diff --git a/drivers/media/usb/gspca/autogain_functions.c b/drivers/media/usb/gspca/autogain_functions.c index 6dfab2b077f7..f915cc7c0c63 100644 --- a/drivers/media/usb/gspca/autogain_functions.c +++ b/drivers/media/usb/gspca/autogain_functions.c @@ -98,7 +98,7 @@ EXPORT_SYMBOL(gspca_expo_autogain); 80 %) and if that does not help, only then changes exposure. This leads to a much more stable image then using the knee algorithm which at certain points of the knee graph will only try to adjust exposure, - which leads to oscilating as one exposure step is huge. + which leads to oscillating as one exposure step is huge. Returns 0 if no changes were made, 1 if the gain and or exposure settings where changed. */ diff --git a/drivers/media/usb/gspca/benq.c b/drivers/media/usb/gspca/benq.c index 8a8db5eb6d5f..1744591b8ba0 100644 --- a/drivers/media/usb/gspca/benq.c +++ b/drivers/media/usb/gspca/benq.c @@ -205,12 +205,12 @@ static void sd_isoc_irq(struct urb *urb) * - 80 ba/bb 00 00 = start of image followed by 'ff d8' * - 04 ba/bb oo oo = image piece * where 'oo oo' is the image offset - (not cheked) + (not checked) * - (other -> bad frame) * The images are JPEG encoded with full header and * normal ff escape. * The end of image ('ff d9') may occur in any URB. - * (not cheked) + * (not checked) */ data = (u8 *) urb0->transfer_buffer + urb0->iso_frame_desc[i].offset; diff --git a/drivers/media/usb/gspca/cpia1.c b/drivers/media/usb/gspca/cpia1.c index 2b09af8865f4..7c817a4a93c4 100644 --- a/drivers/media/usb/gspca/cpia1.c +++ b/drivers/media/usb/gspca/cpia1.c @@ -547,10 +547,14 @@ static int do_command(struct gspca_dev *gspca_dev, u16 command, } if (sd->params.qx3.button) { /* button pressed - unlock the latch */ - do_command(gspca_dev, CPIA_COMMAND_WriteMCPort, + ret = do_command(gspca_dev, CPIA_COMMAND_WriteMCPort, 3, 0xdf, 0xdf, 0); - do_command(gspca_dev, CPIA_COMMAND_WriteMCPort, + if (ret) + return ret; + ret = do_command(gspca_dev, CPIA_COMMAND_WriteMCPort, 3, 0xff, 0xff, 0); + if (ret) + return ret; } /* test whether microscope is cradled */ @@ -1430,6 +1434,7 @@ static int sd_config(struct gspca_dev *gspca_dev, { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam; + int ret; sd->mainsFreq = FREQ_DEF == V4L2_CID_POWER_LINE_FREQUENCY_60HZ; reset_camera_params(gspca_dev); @@ -1441,7 +1446,10 @@ static int sd_config(struct gspca_dev *gspca_dev, cam->cam_mode = mode; cam->nmodes = ARRAY_SIZE(mode); - goto_low_power(gspca_dev); + ret = goto_low_power(gspca_dev); + if (ret) + gspca_err(gspca_dev, "Cannot go to low power mode: %d\n", + ret); /* Check the firmware version. */ sd->params.version.firmwareVersion = 0; get_version_information(gspca_dev); diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c index 3137f5d89d80..ac70b36d67b7 100644 --- a/drivers/media/usb/gspca/gspca.c +++ b/drivers/media/usb/gspca/gspca.c @@ -912,23 +912,30 @@ static void gspca_set_default_mode(struct gspca_dev *gspca_dev) } static int wxh_to_mode(struct gspca_dev *gspca_dev, - int width, int height) + int width, int height, u32 pixelformat) { int i; for (i = 0; i < gspca_dev->cam.nmodes; i++) { if (width == gspca_dev->cam.cam_mode[i].width - && height == gspca_dev->cam.cam_mode[i].height) + && height == gspca_dev->cam.cam_mode[i].height + && pixelformat == gspca_dev->cam.cam_mode[i].pixelformat) return i; } return -EINVAL; } static int wxh_to_nearest_mode(struct gspca_dev *gspca_dev, - int width, int height) + int width, int height, u32 pixelformat) { int i; + for (i = gspca_dev->cam.nmodes; --i > 0; ) { + if (width >= gspca_dev->cam.cam_mode[i].width + && height >= gspca_dev->cam.cam_mode[i].height + && pixelformat == gspca_dev->cam.cam_mode[i].pixelformat) + return i; + } for (i = gspca_dev->cam.nmodes; --i > 0; ) { if (width >= gspca_dev->cam.cam_mode[i].width && height >= gspca_dev->cam.cam_mode[i].height) @@ -1058,7 +1065,7 @@ static int try_fmt_vid_cap(struct gspca_dev *gspca_dev, fmt->fmt.pix.pixelformat, w, h); /* search the nearest mode for width and height */ - mode = wxh_to_nearest_mode(gspca_dev, w, h); + mode = wxh_to_nearest_mode(gspca_dev, w, h, fmt->fmt.pix.pixelformat); /* OK if right palette */ if (gspca_dev->cam.cam_mode[mode].pixelformat @@ -1152,7 +1159,8 @@ static int vidioc_enum_frameintervals(struct file *filp, void *priv, int mode; __u32 i; - mode = wxh_to_mode(gspca_dev, fival->width, fival->height); + mode = wxh_to_mode(gspca_dev, fival->width, fival->height, + fival->pixel_format); if (mode < 0) return -EINVAL; diff --git a/drivers/media/usb/gspca/m5602/m5602_mt9m111.c b/drivers/media/usb/gspca/m5602/m5602_mt9m111.c index c9947c4a0f63..8fac814f4779 100644 --- a/drivers/media/usb/gspca/m5602/m5602_mt9m111.c +++ b/drivers/media/usb/gspca/m5602/m5602_mt9m111.c @@ -199,7 +199,7 @@ static const struct v4l2_ctrl_config mt9m111_greenbal_cfg = { int mt9m111_probe(struct sd *sd) { u8 data[2] = {0x00, 0x00}; - int i; + int i, rc = 0; struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; if (force_sensor) { @@ -217,16 +217,18 @@ int mt9m111_probe(struct sd *sd) /* Do the preinit */ for (i = 0; i < ARRAY_SIZE(preinit_mt9m111); i++) { if (preinit_mt9m111[i][0] == BRIDGE) { - m5602_write_bridge(sd, + rc |= m5602_write_bridge(sd, preinit_mt9m111[i][1], preinit_mt9m111[i][2]); } else { data[0] = preinit_mt9m111[i][2]; data[1] = preinit_mt9m111[i][3]; - m5602_write_sensor(sd, + rc |= m5602_write_sensor(sd, preinit_mt9m111[i][1], data, 2); } } + if (rc < 0) + return rc; if (m5602_read_sensor(sd, MT9M111_SC_CHIPVER, data, 2)) return -ENODEV; diff --git a/drivers/media/usb/gspca/m5602/m5602_po1030.c b/drivers/media/usb/gspca/m5602/m5602_po1030.c index 37d2891e5f5b..5e43b4782f02 100644 --- a/drivers/media/usb/gspca/m5602/m5602_po1030.c +++ b/drivers/media/usb/gspca/m5602/m5602_po1030.c @@ -158,6 +158,7 @@ static const struct v4l2_ctrl_config po1030_greenbal_cfg = { int po1030_probe(struct sd *sd) { + int rc = 0; u8 dev_id_h = 0, i; struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; @@ -177,11 +178,14 @@ int po1030_probe(struct sd *sd) for (i = 0; i < ARRAY_SIZE(preinit_po1030); i++) { u8 data = preinit_po1030[i][2]; if (preinit_po1030[i][0] == SENSOR) - m5602_write_sensor(sd, + rc |= m5602_write_sensor(sd, preinit_po1030[i][1], &data, 1); else - m5602_write_bridge(sd, preinit_po1030[i][1], data); + rc |= m5602_write_bridge(sd, preinit_po1030[i][1], + data); } + if (rc < 0) + return rc; if (m5602_read_sensor(sd, PO1030_DEVID_H, &dev_id_h, 1)) return -ENODEV; diff --git a/drivers/media/usb/gspca/mr97310a.c b/drivers/media/usb/gspca/mr97310a.c index bea196361215..af454663e295 100644 --- a/drivers/media/usb/gspca/mr97310a.c +++ b/drivers/media/usb/gspca/mr97310a.c @@ -520,7 +520,7 @@ static int start_cif_cam(struct gspca_dev *gspca_dev) switch (gspca_dev->pixfmt.width) { case 160: data[9] |= 0x04; /* reg 8, 2:1 scale down from 320 */ - /* fall thru */ + /* fall through */ case 320: default: data[3] = 0x28; /* reg 2, H size/8 */ @@ -530,7 +530,7 @@ static int start_cif_cam(struct gspca_dev *gspca_dev) break; case 176: data[9] |= 0x04; /* reg 8, 2:1 scale down from 352 */ - /* fall thru */ + /* fall through */ case 352: data[3] = 0x2c; /* reg 2, H size/8 */ data[4] = 0x48; /* reg 3, V size/4 */ @@ -617,10 +617,10 @@ static int start_vga_cam(struct gspca_dev *gspca_dev) switch (gspca_dev->pixfmt.width) { case 160: data[9] |= 0x0c; /* reg 8, 4:1 scale down */ - /* fall thru */ + /* fall through */ case 320: data[9] |= 0x04; /* reg 8, 2:1 scale down */ - /* fall thru */ + /* fall through */ case 640: default: data[3] = 0x50; /* reg 2, H size/8 */ @@ -637,7 +637,7 @@ static int start_vga_cam(struct gspca_dev *gspca_dev) case 176: data[9] |= 0x04; /* reg 8, 2:1 scale down */ - /* fall thru */ + /* fall through */ case 352: data[3] = 0x2c; /* reg 2, H size */ data[4] = 0x48; /* reg 3, V size */ diff --git a/drivers/media/usb/gspca/ov519.c b/drivers/media/usb/gspca/ov519.c index 10fcbe9e8614..f2799e8cb8e7 100644 --- a/drivers/media/usb/gspca/ov519.c +++ b/drivers/media/usb/gspca/ov519.c @@ -1945,7 +1945,7 @@ static const struct ov_i2c_regvals norm_8610[] = { { 0x62, 0x5f }, /* was 0xd7, new from windrv 090403 */ { 0x63, 0xff }, { 0x64, 0x53 }, /* new windrv 090403 says 0x57, - * maybe thats wrong */ + * maybe that's wrong */ { 0x65, 0x00 }, { 0x66, 0x55 }, { 0x67, 0xb0 }, @@ -3658,7 +3658,7 @@ static void ov518_mode_init_regs(struct sd *sd) case SEN_OV7620AE: /* * HdG: 640x480 needs special handling on device - * revision 2, we check for device revison > 0 to + * revision 2, we check for device revision > 0 to * avoid regressions, as we don't know the correct * thing todo for revision 1. * diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c index d06dc0755b9a..02c90ad96b76 100644 --- a/drivers/media/usb/gspca/ov534.c +++ b/drivers/media/usb/gspca/ov534.c @@ -103,6 +103,16 @@ static const struct v4l2_pix_format ov772x_mode[] = { .sizeimage = 640 * 480 * 2, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, + {320, 240, V4L2_PIX_FMT_SGRBG8, V4L2_FIELD_NONE, + .bytesperline = 320, + .sizeimage = 320 * 240, + .colorspace = V4L2_COLORSPACE_SRGB, + .priv = 1}, + {640, 480, V4L2_PIX_FMT_SGRBG8, V4L2_FIELD_NONE, + .bytesperline = 640, + .sizeimage = 640 * 480, + .colorspace = V4L2_COLORSPACE_SRGB, + .priv = 0}, }; static const struct v4l2_pix_format ov767x_mode[] = { {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, @@ -127,6 +137,14 @@ static const struct framerates ov772x_framerates[] = { .rates = vga_rates, .nrates = ARRAY_SIZE(vga_rates), }, + { /* 320x240 SGBRG8 */ + .rates = qvga_rates, + .nrates = ARRAY_SIZE(qvga_rates), + }, + { /* 640x480 SGBRG8 */ + .rates = vga_rates, + .nrates = ARRAY_SIZE(vga_rates), + }, }; struct reg_array { @@ -411,9 +429,7 @@ static const u8 sensor_start_qvga_767x[][2] = { }; static const u8 bridge_init_772x[][2] = { - { 0xc2, 0x0c }, { 0x88, 0xf8 }, - { 0xc3, 0x69 }, { 0x89, 0xff }, { 0x76, 0x03 }, { 0x92, 0x01 }, @@ -439,7 +455,6 @@ static const u8 bridge_init_772x[][2] = { { 0x1f, 0x81 }, { 0x34, 0x05 }, { 0xe3, 0x04 }, - { 0x88, 0x00 }, { 0x89, 0x00 }, { 0x76, 0x00 }, { 0xe7, 0x2e }, @@ -447,26 +462,9 @@ static const u8 bridge_init_772x[][2] = { { 0x25, 0x42 }, { 0x21, 0xf0 }, - { 0x1c, 0x00 }, - { 0x1d, 0x40 }, - { 0x1d, 0x02 }, /* payload size 0x0200 * 4 = 2048 bytes */ - { 0x1d, 0x00 }, /* payload size */ - - { 0x1d, 0x02 }, /* frame size 0x025800 * 4 = 614400 */ - { 0x1d, 0x58 }, /* frame size */ - { 0x1d, 0x00 }, /* frame size */ - { 0x1c, 0x0a }, { 0x1d, 0x08 }, /* turn on UVC header */ { 0x1d, 0x0e }, /* .. */ - - { 0x8d, 0x1c }, - { 0x8e, 0x80 }, - { 0xe5, 0x04 }, - - { 0xc0, 0x50 }, - { 0xc1, 0x3c }, - { 0xc2, 0x0c }, }; static const u8 sensor_init_772x[][2] = { { 0x12, 0x80 }, @@ -545,13 +543,10 @@ static const u8 sensor_init_772x[][2] = { { 0x8c, 0xe8 }, { 0x8d, 0x20 }, - { 0x0c, 0x90 }, - { 0x2b, 0x00 }, { 0x22, 0x7f }, { 0x23, 0x03 }, { 0x11, 0x01 }, - { 0x0c, 0xd0 }, { 0x64, 0xff }, { 0x0d, 0x41 }, @@ -559,9 +554,9 @@ static const u8 sensor_init_772x[][2] = { { 0x0e, 0xcd }, { 0xac, 0xbf }, { 0x8e, 0x00 }, /* De-noise threshold */ - { 0x0c, 0xd0 } }; -static const u8 bridge_start_vga_772x[][2] = { +static const u8 bridge_start_vga_yuyv_772x[][2] = { + {0x88, 0x00}, {0x1c, 0x00}, {0x1d, 0x40}, {0x1d, 0x02}, @@ -569,10 +564,14 @@ static const u8 bridge_start_vga_772x[][2] = { {0x1d, 0x02}, {0x1d, 0x58}, {0x1d, 0x00}, + {0x8d, 0x1c}, + {0x8e, 0x80}, {0xc0, 0x50}, {0xc1, 0x3c}, + {0xc2, 0x0c}, + {0xc3, 0x69}, }; -static const u8 sensor_start_vga_772x[][2] = { +static const u8 sensor_start_vga_yuyv_772x[][2] = { {0x12, 0x00}, {0x17, 0x26}, {0x18, 0xa0}, @@ -581,8 +580,10 @@ static const u8 sensor_start_vga_772x[][2] = { {0x29, 0xa0}, {0x2c, 0xf0}, {0x65, 0x20}, + {0x67, 0x00}, }; -static const u8 bridge_start_qvga_772x[][2] = { +static const u8 bridge_start_qvga_yuyv_772x[][2] = { + {0x88, 0x00}, {0x1c, 0x00}, {0x1d, 0x40}, {0x1d, 0x02}, @@ -590,10 +591,14 @@ static const u8 bridge_start_qvga_772x[][2] = { {0x1d, 0x01}, {0x1d, 0x4b}, {0x1d, 0x00}, + {0x8d, 0x1c}, + {0x8e, 0x80}, {0xc0, 0x28}, {0xc1, 0x1e}, + {0xc2, 0x0c}, + {0xc3, 0x69}, }; -static const u8 sensor_start_qvga_772x[][2] = { +static const u8 sensor_start_qvga_yuyv_772x[][2] = { {0x12, 0x40}, {0x17, 0x3f}, {0x18, 0x50}, @@ -602,6 +607,61 @@ static const u8 sensor_start_qvga_772x[][2] = { {0x29, 0x50}, {0x2c, 0x78}, {0x65, 0x2f}, + {0x67, 0x00}, +}; +static const u8 bridge_start_vga_gbrg_772x[][2] = { + {0x88, 0x08}, + {0x1c, 0x00}, + {0x1d, 0x00}, + {0x1d, 0x02}, + {0x1d, 0x00}, + {0x1d, 0x01}, + {0x1d, 0x2c}, + {0x1d, 0x00}, + {0x8d, 0x00}, + {0x8e, 0x00}, + {0xc0, 0x50}, + {0xc1, 0x3c}, + {0xc2, 0x01}, + {0xc3, 0x01}, +}; +static const u8 sensor_start_vga_gbrg_772x[][2] = { + {0x12, 0x01}, + {0x17, 0x26}, + {0x18, 0xa0}, + {0x19, 0x07}, + {0x1a, 0xf0}, + {0x29, 0xa0}, + {0x2c, 0xf0}, + {0x65, 0x20}, + {0x67, 0x02}, +}; +static const u8 bridge_start_qvga_gbrg_772x[][2] = { + {0x88, 0x08}, + {0x1c, 0x00}, + {0x1d, 0x00}, + {0x1d, 0x02}, + {0x1d, 0x00}, + {0x1d, 0x00}, + {0x1d, 0x4b}, + {0x1d, 0x00}, + {0x8d, 0x00}, + {0x8e, 0x00}, + {0xc0, 0x28}, + {0xc1, 0x1e}, + {0xc2, 0x01}, + {0xc3, 0x01}, +}; +static const u8 sensor_start_qvga_gbrg_772x[][2] = { + {0x12, 0x41}, + {0x17, 0x3f}, + {0x18, 0x50}, + {0x19, 0x03}, + {0x1a, 0x78}, + {0x29, 0x50}, + {0x2c, 0x78}, + {0x65, 0x2f}, + {0x67, 0x02}, }; static void ov534_reg_write(struct gspca_dev *gspca_dev, u16 reg, u8 val) @@ -679,7 +739,7 @@ static int sccb_check_status(struct gspca_dev *gspca_dev) int i; for (i = 0; i < 5; i++) { - msleep(10); + usleep_range(10000, 20000); data = ov534_reg_read(gspca_dev, OV534_REG_STATUS); switch (data) { @@ -1277,7 +1337,7 @@ static int sd_init(struct gspca_dev *gspca_dev) /* reset sensor */ sccb_reg_write(gspca_dev, 0x12, 0x80); - msleep(10); + usleep_range(10000, 20000); /* probe the sensor */ sccb_reg_read(gspca_dev, 0x0a); @@ -1315,25 +1375,33 @@ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int mode; - static const struct reg_array bridge_start[NSENSORS][2] = { + static const struct reg_array bridge_start[NSENSORS][4] = { [SENSOR_OV767x] = {{bridge_start_qvga_767x, ARRAY_SIZE(bridge_start_qvga_767x)}, {bridge_start_vga_767x, ARRAY_SIZE(bridge_start_vga_767x)}}, - [SENSOR_OV772x] = {{bridge_start_qvga_772x, - ARRAY_SIZE(bridge_start_qvga_772x)}, - {bridge_start_vga_772x, - ARRAY_SIZE(bridge_start_vga_772x)}}, + [SENSOR_OV772x] = {{bridge_start_qvga_yuyv_772x, + ARRAY_SIZE(bridge_start_qvga_yuyv_772x)}, + {bridge_start_vga_yuyv_772x, + ARRAY_SIZE(bridge_start_vga_yuyv_772x)}, + {bridge_start_qvga_gbrg_772x, + ARRAY_SIZE(bridge_start_qvga_gbrg_772x)}, + {bridge_start_vga_gbrg_772x, + ARRAY_SIZE(bridge_start_vga_gbrg_772x)} }, }; - static const struct reg_array sensor_start[NSENSORS][2] = { + static const struct reg_array sensor_start[NSENSORS][4] = { [SENSOR_OV767x] = {{sensor_start_qvga_767x, ARRAY_SIZE(sensor_start_qvga_767x)}, {sensor_start_vga_767x, ARRAY_SIZE(sensor_start_vga_767x)}}, - [SENSOR_OV772x] = {{sensor_start_qvga_772x, - ARRAY_SIZE(sensor_start_qvga_772x)}, - {sensor_start_vga_772x, - ARRAY_SIZE(sensor_start_vga_772x)}}, + [SENSOR_OV772x] = {{sensor_start_qvga_yuyv_772x, + ARRAY_SIZE(sensor_start_qvga_yuyv_772x)}, + {sensor_start_vga_yuyv_772x, + ARRAY_SIZE(sensor_start_vga_yuyv_772x)}, + {sensor_start_qvga_gbrg_772x, + ARRAY_SIZE(sensor_start_qvga_gbrg_772x)}, + {sensor_start_vga_gbrg_772x, + ARRAY_SIZE(sensor_start_vga_gbrg_772x)} }, }; /* (from ms-win trace) */ @@ -1439,10 +1507,9 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, /* If this packet is marked as EOF, end the frame */ } else if (data[1] & UVC_STREAM_EOF) { sd->last_pts = 0; - if (gspca_dev->pixfmt.pixelformat == V4L2_PIX_FMT_YUYV + if (gspca_dev->pixfmt.pixelformat != V4L2_PIX_FMT_JPEG && gspca_dev->image_len + len - 12 != - gspca_dev->pixfmt.width * - gspca_dev->pixfmt.height * 2) { + gspca_dev->pixfmt.sizeimage) { gspca_dbg(gspca_dev, D_PACK, "wrong sized frame\n"); goto discard; } diff --git a/drivers/media/usb/gspca/pac_common.h b/drivers/media/usb/gspca/pac_common.h index 31f2a42af4dd..aae97a5534e3 100644 --- a/drivers/media/usb/gspca/pac_common.h +++ b/drivers/media/usb/gspca/pac_common.h @@ -21,7 +21,7 @@ /* We calculate the autogain at the end of the transfer of a frame, at this moment a frame with the old settings is being captured and transmitted. So - if we adjust the gain or exposure we must ignore atleast the next frame for + if we adjust the gain or exposure we must ignore at least the next frame for the new settings to come into effect before doing any other adjustments. */ #define PAC_AUTOGAIN_IGNORE_FRAMES 2 diff --git a/drivers/media/usb/gspca/sn9c20x.c b/drivers/media/usb/gspca/sn9c20x.c index 5984bb12bcff..ab912903f8d7 100644 --- a/drivers/media/usb/gspca/sn9c20x.c +++ b/drivers/media/usb/gspca/sn9c20x.c @@ -1634,7 +1634,7 @@ static int sd_config(struct gspca_dev *gspca_dev, break; case SENSOR_HV7131R: sd->i2c_intf = 0x81; /* i2c 400 Kb/s */ - /* fall thru */ + /* fall through */ default: cam->cam_mode = vga_mode; cam->nmodes = ARRAY_SIZE(vga_mode); diff --git a/drivers/media/usb/gspca/sonixb.c b/drivers/media/usb/gspca/sonixb.c index 5f3f2979540a..583c9f10198c 100644 --- a/drivers/media/usb/gspca/sonixb.c +++ b/drivers/media/usb/gspca/sonixb.c @@ -121,7 +121,7 @@ struct sensor_data { /* We calculate the autogain at the end of the transfer of a frame, at this moment a frame with the old settings is being captured and transmitted. So - if we adjust the gain or exposure we must ignore atleast the next frame for + if we adjust the gain or exposure we must ignore at least the next frame for the new settings to come into effect before doing any other adjustments. */ #define AUTOGAIN_IGNORE_FRAMES 1 @@ -757,7 +757,7 @@ static void setexposure(struct gspca_dev *gspca_dev) /* Don't allow this to get below 10 when using autogain, the steps become very large (relatively) when below 10 causing - the image to oscilate from much too dark, to much too bright + the image to oscillate from much too dark, to much too bright and back again. */ if (gspca_dev->autogain->val && reg10 < 10) reg10 = 10; diff --git a/drivers/media/usb/gspca/sonixj.c b/drivers/media/usb/gspca/sonixj.c index df8d8482b795..a63f155f1515 100644 --- a/drivers/media/usb/gspca/sonixj.c +++ b/drivers/media/usb/gspca/sonixj.c @@ -2677,7 +2677,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, * which is 62 bytes long and is followed by various information * including statuses and luminosity. * - * A marker may be splitted on two packets. + * A marker may be split on two packets. * * The 6th byte of a marker contains the bits: * 0x08: USB full diff --git a/drivers/media/usb/gspca/spca501.c b/drivers/media/usb/gspca/spca501.c index 2cce74b166d8..3d215952af18 100644 --- a/drivers/media/usb/gspca/spca501.c +++ b/drivers/media/usb/gspca/spca501.c @@ -574,7 +574,7 @@ static const __u16 spca501_3com_open_data[][3] = { {0x0, 0x0001, 0x0010}, /* TG Start Clock */ /* {0x2, 0x006a, 0x0001}, * C/S Enable ISOSYNCH Packet Engine */ - {0x2, 0x0068, 0x0001}, /* C/S Diable ISOSYNCH Packet Engine */ + {0x2, 0x0068, 0x0001}, /* C/S Disable ISOSYNCH Packet Engine */ {0x2, 0x0000, 0x0005}, {0x2, 0x0043, 0x0000}, /* C/S Set Timing Mode, Disable TG soft reset */ {0x2, 0x0043, 0x0000}, /* C/S Set Timing Mode, Disable TG soft reset */ diff --git a/drivers/media/usb/gspca/sq905.c b/drivers/media/usb/gspca/sq905.c index ffea9c35b0a0..d5c48216deb7 100644 --- a/drivers/media/usb/gspca/sq905.c +++ b/drivers/media/usb/gspca/sq905.c @@ -18,7 +18,7 @@ * History and Acknowledgments * * The original Linux driver for SQ905 based cameras was written by - * Marcell Lengyel and furter developed by many other contributors + * Marcell Lengyel and further developed by many other contributors * and is available from http://sourceforge.net/projects/sqcam/ * * This driver takes advantage of the reverse engineering work done for diff --git a/drivers/media/usb/gspca/sunplus.c b/drivers/media/usb/gspca/sunplus.c index 437a3367ab97..e1e2a605a46c 100644 --- a/drivers/media/usb/gspca/sunplus.c +++ b/drivers/media/usb/gspca/sunplus.c @@ -555,7 +555,7 @@ static void init_ctl_reg(struct gspca_dev *gspca_dev) case BRIDGE_SPCA504: case BRIDGE_SPCA504C: pollreg = 0; - /* fall thru */ + /* fall through */ default: /* case BRIDGE_SPCA533: */ /* case BRIDGE_SPCA504B: */ @@ -638,7 +638,7 @@ static int sd_init(struct gspca_dev *gspca_dev) reg_w_riv(gspca_dev, 0x00, 0x2000, 0x00); reg_w_riv(gspca_dev, 0x00, 0x2301, 0x13); reg_w_riv(gspca_dev, 0x00, 0x2306, 0x00); - /* fall thru */ + /* fall through */ case BRIDGE_SPCA533: spca504B_PollingDataReady(gspca_dev); spca50x_GetFirmware(gspca_dev); diff --git a/drivers/media/usb/gspca/t613.c b/drivers/media/usb/gspca/t613.c index 445782919446..ed9b925b723e 100644 --- a/drivers/media/usb/gspca/t613.c +++ b/drivers/media/usb/gspca/t613.c @@ -966,7 +966,7 @@ static int sd_init_controls(struct gspca_dev *gspca_dev) V4L2_CID_SATURATION, 0, 0xf, 1, 5); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_GAMMA, 0, GAMMA_MAX, 1, 10); - /* Activate lowlight, some apps dont bring up the + /* Activate lowlight, some apps don't bring up the backlight_compensation control) */ v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BACKLIGHT_COMPENSATION, 0, 1, 1, 1); diff --git a/drivers/media/usb/gspca/touptek.c b/drivers/media/usb/gspca/touptek.c index d1b9032d7863..6c056a448231 100644 --- a/drivers/media/usb/gspca/touptek.c +++ b/drivers/media/usb/gspca/touptek.c @@ -185,7 +185,7 @@ static const struct v4l2_pix_format vga_mode[] = { }; /* - * As theres no known frame sync, the only way to keep synced is to try hard + * As there's no known frame sync, the only way to keep synced is to try hard * to never miss any packets */ #if MAX_NURBS < 4 @@ -259,7 +259,7 @@ static void setexposure(struct gspca_dev *gspca_dev, s32 val) return; } gspca_dbg(gspca_dev, D_STREAM, "exposure: 0x%04X ms\n\n", value); - /* Wonder if theres a good reason for sending it twice */ + /* Wonder if there's a good reason for sending it twice */ /* probably not but leave it in because...why not */ reg_w(gspca_dev, value, REG_COARSE_INTEGRATION_TIME_); reg_w(gspca_dev, value, REG_COARSE_INTEGRATION_TIME_); diff --git a/drivers/media/usb/gspca/w996Xcf.c b/drivers/media/usb/gspca/w996Xcf.c index abfab3de1866..36cc5a5ce77a 100644 --- a/drivers/media/usb/gspca/w996Xcf.c +++ b/drivers/media/usb/gspca/w996Xcf.c @@ -431,7 +431,7 @@ static void w9968cf_set_crop_window(struct sd *sd) start_cropy = 35; } - /* Work around to avoid FP arithmetics */ + /* Work around to avoid FP arithmetic */ #define SC(x) ((x) << 10) /* Scaling factors */ diff --git a/drivers/media/usb/gspca/zc3xx-reg.h b/drivers/media/usb/gspca/zc3xx-reg.h index 71fda38e85e0..26f6153b687f 100644 --- a/drivers/media/usb/gspca/zc3xx-reg.h +++ b/drivers/media/usb/gspca/zc3xx-reg.h @@ -26,7 +26,7 @@ /* Test mode */ #define ZC3XX_R00B_TESTMODECONTROL 0x000b -/* Frame retreiving */ +/* Frame retrieving */ #define ZC3XX_R00C_LASTACQTIME 0x000c #define ZC3XX_R00D_MONITORRES 0x000d #define ZC3XX_R00E_TIMESTAMPHIGH 0x000e diff --git a/drivers/media/usb/gspca/zc3xx.c b/drivers/media/usb/gspca/zc3xx.c index cf21991e3d99..ad7194029031 100644 --- a/drivers/media/usb/gspca/zc3xx.c +++ b/drivers/media/usb/gspca/zc3xx.c @@ -3602,7 +3602,7 @@ static const struct usb_action pas106b_InitialScale[] = { /* 176x144 */ {0xaa, 0x14, 0x0081}, /* Other registers */ {0xa0, 0x37, ZC3XX_R101_SENSORCORRECTION}, -/* Frame retreiving */ +/* Frame retrieving */ {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* Gains */ {0xa0, 0xa0, ZC3XX_R1A8_DIGITALGAIN}, @@ -3718,7 +3718,7 @@ static const struct usb_action pas106b_Initial[] = { /* 352x288 */ {0xaa, 0x14, 0x0081}, /* Other registers */ {0xa0, 0x37, ZC3XX_R101_SENSORCORRECTION}, -/* Frame retreiving */ +/* Frame retrieving */ {0xa0, 0x00, ZC3XX_R019_AUTOADJUSTFPS}, /* Gains */ {0xa0, 0xa0, ZC3XX_R1A8_DIGITALGAIN}, @@ -6775,7 +6775,7 @@ static int sd_start(struct gspca_dev *gspca_dev) case SENSOR_HV7131R: case SENSOR_TAS5130C: reg_r(gspca_dev, 0x0008); - /* fall thru */ + /* fall through */ case SENSOR_PO2030: reg_w(gspca_dev, 0x03, 0x0008); break; @@ -6824,7 +6824,7 @@ static int sd_start(struct gspca_dev *gspca_dev) case SENSOR_TAS5130C: reg_w(gspca_dev, 0x09, 0x01ad); /* (from win traces) */ reg_w(gspca_dev, 0x15, 0x01ae); - /* fall thru */ + /* fall through */ case SENSOR_PAS202B: case SENSOR_PO2030: /* reg_w(gspca_dev, 0x40, ZC3XX_R117_GGAIN); in win traces */ diff --git a/drivers/media/usb/hdpvr/hdpvr-i2c.c b/drivers/media/usb/hdpvr/hdpvr-i2c.c index 5a3cb614a211..d76173f1ced1 100644 --- a/drivers/media/usb/hdpvr/hdpvr-i2c.c +++ b/drivers/media/usb/hdpvr/hdpvr-i2c.c @@ -61,10 +61,10 @@ static int hdpvr_i2c_read(struct hdpvr_device *dev, int bus, return -EINVAL; if (wlen) { - memcpy(&dev->i2c_buf, wdata, wlen); + memcpy(dev->i2c_buf, wdata, wlen); ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), REQTYPE_I2C_WRITE, CTRL_WRITE_REQUEST, - (bus << 8) | addr, 0, &dev->i2c_buf, + (bus << 8) | addr, 0, dev->i2c_buf, wlen, 1000); if (ret < 0) return ret; @@ -72,10 +72,10 @@ static int hdpvr_i2c_read(struct hdpvr_device *dev, int bus, ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), REQTYPE_I2C_READ, CTRL_READ_REQUEST, - (bus << 8) | addr, 0, &dev->i2c_buf, len, 1000); + (bus << 8) | addr, 0, dev->i2c_buf, len, 1000); if (ret == len) { - memcpy(data, &dev->i2c_buf, len); + memcpy(data, dev->i2c_buf, len); ret = 0; } else if (ret >= 0) ret = -EIO; @@ -91,17 +91,17 @@ static int hdpvr_i2c_write(struct hdpvr_device *dev, int bus, if (len > sizeof(dev->i2c_buf)) return -EINVAL; - memcpy(&dev->i2c_buf, data, len); + memcpy(dev->i2c_buf, data, len); ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), REQTYPE_I2C_WRITE, CTRL_WRITE_REQUEST, - (bus << 8) | addr, 0, &dev->i2c_buf, len, 1000); + (bus << 8) | addr, 0, dev->i2c_buf, len, 1000); if (ret < 0) return ret; ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), REQTYPE_I2C_WRITE_STATT, CTRL_READ_REQUEST, - 0, 0, &dev->i2c_buf, 2, 1000); + 0, 0, dev->i2c_buf, 2, 1000); if ((ret == 2) && (dev->i2c_buf[1] == (len - 1))) ret = 0; diff --git a/drivers/media/usb/hdpvr/hdpvr.h b/drivers/media/usb/hdpvr/hdpvr.h index 1d65b4185f57..fa43e1d45ea9 100644 --- a/drivers/media/usb/hdpvr/hdpvr.h +++ b/drivers/media/usb/hdpvr/hdpvr.h @@ -215,7 +215,7 @@ enum { */ /* :0 s 38 01 1700 0003 0001 1 = 00 - * VIDEO STANDARD or FREQUNCY 0 = 60hz, 1 = 50hz + * VIDEO STANDARD or FREQUENCY 0 = 60hz, 1 = 50hz */ /* :0 s 38 01 3100 0003 0004 4 = 03030000 diff --git a/drivers/media/usb/pwc/pwc-dec23.c b/drivers/media/usb/pwc/pwc-dec23.c index 1283b3bd9800..854c36a5dec9 100644 --- a/drivers/media/usb/pwc/pwc-dec23.c +++ b/drivers/media/usb/pwc/pwc-dec23.c @@ -41,7 +41,7 @@ * UNROLL_LOOP_FOR_COPYING_BLOCK * 0: use a loop for a smaller code (but little slower) * 1: when unrolling the loop, gcc produces some faster code (perhaps only - * valid for intel processor class). Activating this option, automaticaly + * valid for intel processor class). Activating this option, automatically * activate USE_LOOKUP_TABLE_TO_CLAMP */ #define UNROLL_LOOP_FOR_COPY 1 @@ -332,7 +332,7 @@ void pwc_dec23_init(struct pwc_device *pdev, const unsigned char *cmd) build_table_color(TimonRomTable[version][1], pdec->table_0004_pass2, pdec->table_8004_pass2); } - /* Informations can be coded on a variable number of bits but never less than 8 */ + /* Information can be coded on a variable number of bits but never less than 8 */ shift = 8 - pdec->nbits; pdec->scalebits = SCALEBITS - shift; pdec->nbitsmask = 0xFF >> shift; diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c index 72704f4d5330..4e94197094ad 100644 --- a/drivers/media/usb/pwc/pwc-if.c +++ b/drivers/media/usb/pwc/pwc-if.c @@ -76,6 +76,9 @@ #include "pwc-dec23.h" #include "pwc-dec1.h" +#define CREATE_TRACE_POINTS +#include + /* Function prototypes and driver templates */ /* hotplug device table support */ @@ -156,6 +159,32 @@ static const struct video_device pwc_template = { /***************************************************************************/ /* Private functions */ +static void *pwc_alloc_urb_buffer(struct device *dev, + size_t size, dma_addr_t *dma_handle) +{ + void *buffer = kmalloc(size, GFP_KERNEL); + + if (!buffer) + return NULL; + + *dma_handle = dma_map_single(dev, buffer, size, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, *dma_handle)) { + kfree(buffer); + return NULL; + } + + return buffer; +} + +static void pwc_free_urb_buffer(struct device *dev, + size_t size, + void *buffer, + dma_addr_t dma_handle) +{ + dma_unmap_single(dev, dma_handle, size, DMA_FROM_DEVICE); + kfree(buffer); +} + static struct pwc_frame_buf *pwc_get_next_fill_buf(struct pwc_device *pdev) { unsigned long flags = 0; @@ -260,6 +289,8 @@ static void pwc_isoc_handler(struct urb *urb) int i, fst, flen; unsigned char *iso_buf = NULL; + trace_pwc_handler_enter(urb, pdev); + if (urb->status == -ENOENT || urb->status == -ECONNRESET || urb->status == -ESHUTDOWN) { PWC_DEBUG_OPEN("URB (%p) unlinked %ssynchronously.\n", @@ -301,6 +332,11 @@ static void pwc_isoc_handler(struct urb *urb) /* Reset ISOC error counter. We did get here, after all. */ pdev->visoc_errors = 0; + dma_sync_single_for_cpu(&urb->dev->dev, + urb->transfer_dma, + urb->transfer_buffer_length, + DMA_FROM_DEVICE); + /* vsync: 0 = don't copy data 1 = sync-hunt 2 = synched @@ -347,7 +383,14 @@ static void pwc_isoc_handler(struct urb *urb) pdev->vlast_packet_size = flen; } + dma_sync_single_for_device(&urb->dev->dev, + urb->transfer_dma, + urb->transfer_buffer_length, + DMA_FROM_DEVICE); + handler_end: + trace_pwc_handler_exit(urb, pdev); + i = usb_submit_urb(urb, GFP_ATOMIC); if (i != 0) PWC_ERROR("Error (%d) re-submitting urb in pwc_isoc_handler.\n", i); @@ -421,16 +464,15 @@ retry: urb->dev = udev; urb->pipe = usb_rcvisocpipe(udev, pdev->vendpoint); urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP; - urb->transfer_buffer = usb_alloc_coherent(udev, - ISO_BUFFER_SIZE, - GFP_KERNEL, - &urb->transfer_dma); + urb->transfer_buffer_length = ISO_BUFFER_SIZE; + urb->transfer_buffer = pwc_alloc_urb_buffer(&udev->dev, + urb->transfer_buffer_length, + &urb->transfer_dma); if (urb->transfer_buffer == NULL) { PWC_ERROR("Failed to allocate urb buffer %d\n", i); pwc_isoc_cleanup(pdev); return -ENOMEM; } - urb->transfer_buffer_length = ISO_BUFFER_SIZE; urb->complete = pwc_isoc_handler; urb->context = pdev; urb->start_frame = 0; @@ -481,15 +523,16 @@ static void pwc_iso_free(struct pwc_device *pdev) /* Freeing ISOC buffers one by one */ for (i = 0; i < MAX_ISO_BUFS; i++) { - if (pdev->urbs[i]) { + struct urb *urb = pdev->urbs[i]; + + if (urb) { PWC_DEBUG_MEMORY("Freeing URB\n"); - if (pdev->urbs[i]->transfer_buffer) { - usb_free_coherent(pdev->udev, - pdev->urbs[i]->transfer_buffer_length, - pdev->urbs[i]->transfer_buffer, - pdev->urbs[i]->transfer_dma); - } - usb_free_urb(pdev->urbs[i]); + if (urb->transfer_buffer) + pwc_free_urb_buffer(&urb->dev->dev, + urb->transfer_buffer_length, + urb->transfer_buffer, + urb->transfer_dma); + usb_free_urb(urb); pdev->urbs[i] = NULL; } } @@ -610,7 +653,7 @@ static int buffer_prepare(struct vb2_buffer *vb) { struct pwc_device *pdev = vb2_get_drv_priv(vb->vb2_queue); - /* Don't allow queing new buffers after device disconnection */ + /* Don't allow queueing new buffers after device disconnection */ if (!pdev->udev) return -ENODEV; diff --git a/drivers/media/usb/pwc/pwc-misc.c b/drivers/media/usb/pwc/pwc-misc.c index 9be5adffa874..03888fc3804d 100644 --- a/drivers/media/usb/pwc/pwc-misc.c +++ b/drivers/media/usb/pwc/pwc-misc.c @@ -59,7 +59,7 @@ int pwc_get_size(struct pwc_device *pdev, int width, int height) return i; } - /* Never reached there always is atleast one supported mode */ + /* Never reached there always is at least one supported mode */ return 0; } diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c index 2ffded08407b..4fc03ec8a4f1 100644 --- a/drivers/media/usb/siano/smsusb.c +++ b/drivers/media/usb/siano/smsusb.c @@ -75,7 +75,7 @@ static int smsusb_submit_urb(struct smsusb_device_t *dev, struct smsusb_urb_t *surb); /* - * Completing URB's callback handler - bottom half (proccess context) + * Completing URB's callback handler - bottom half (process context) * submits the URB prepared on smsusb_onresponse() */ static void do_submit_urb(struct work_struct *work) diff --git a/drivers/media/usb/stk1160/stk1160-core.c b/drivers/media/usb/stk1160/stk1160-core.c index 468f5ccf4ae6..a44a44ff3bb1 100644 --- a/drivers/media/usb/stk1160/stk1160-core.c +++ b/drivers/media/usb/stk1160/stk1160-core.c @@ -297,7 +297,7 @@ static int stk1160_probe(struct usb_interface *interface, return -ENOMEM; /* - * Scan usb posibilities and populate alt_max_pkt_size array. + * Scan usb possibilities and populate alt_max_pkt_size array. * Also, check if device speed is fast enough. */ rc = stk1160_scan_usb(interface, udev, alt_max_pkt_size); @@ -426,7 +426,7 @@ static void stk1160_disconnect(struct usb_interface *interface) /* * This calls stk1160_release if it's the last reference. - * Otherwise, release is posponed until there are no users left. + * Otherwise, release is postponed until there are no users left. */ v4l2_device_put(&dev->v4l2_dev); } diff --git a/drivers/media/usb/stk1160/stk1160-reg.h b/drivers/media/usb/stk1160/stk1160-reg.h index 7b08a3cc4504..2e400db0ad0e 100644 --- a/drivers/media/usb/stk1160/stk1160-reg.h +++ b/drivers/media/usb/stk1160/stk1160-reg.h @@ -23,7 +23,7 @@ /* GPIO Control */ #define STK1160_GCTRL 0x000 -/* Remote Wakup Control */ +/* Remote Wakeup Control */ #define STK1160_RMCTL 0x00c /* Power-on Strapping Data */ @@ -104,7 +104,7 @@ #define STK1160_SBUSR_RA 0x208 #define STK1160_SBUSR_RD 0x209 -/* Alternate Serial Inteface Control */ +/* Alternate Serial Interface Control */ #define STK1160_ASIC 0x2fc /* PLL Select Options */ diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c index b8ec74d98e8d..8f545861471e 100644 --- a/drivers/media/usb/stkwebcam/stk-webcam.c +++ b/drivers/media/usb/stkwebcam/stk-webcam.c @@ -1006,7 +1006,7 @@ static int stk_setup_format(struct stk_camera *dev) stk_camera_write_reg(dev, 0x001c, 0x46); /* * Registers 0x0115 0x0114 are the size of each line (bytes), - * regs 0x0117 0x0116 are the heigth of the image. + * regs 0x0117 0x0116 are the height of the image. */ stk_camera_write_reg(dev, 0x0115, ((stk_sizes[i].w * depth) >> 8) & 0xff); @@ -1144,7 +1144,7 @@ static int stk_vidioc_dqbuf(struct file *filp, sbuf->v4lbuf.flags &= ~V4L2_BUF_FLAG_QUEUED; sbuf->v4lbuf.flags |= V4L2_BUF_FLAG_DONE; sbuf->v4lbuf.sequence = ++dev->sequence; - v4l2_get_timestamp(&sbuf->v4lbuf.timestamp); + sbuf->v4lbuf.timestamp = ns_to_timeval(ktime_get_ns()); *buf = sbuf->v4lbuf; return 0; diff --git a/drivers/media/usb/tm6000/tm6000-alsa.c b/drivers/media/usb/tm6000/tm6000-alsa.c index b965931793b5..d6c79c13b332 100644 --- a/drivers/media/usb/tm6000/tm6000-alsa.c +++ b/drivers/media/usb/tm6000/tm6000-alsa.c @@ -58,7 +58,7 @@ module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "enable debug messages"); /**************************************************************************** - Module specific funtions + Module specific functions ****************************************************************************/ /* diff --git a/drivers/media/usb/tm6000/tm6000-core.c b/drivers/media/usb/tm6000/tm6000-core.c index d3229aa45fcb..2c723706f8c8 100644 --- a/drivers/media/usb/tm6000/tm6000-core.c +++ b/drivers/media/usb/tm6000/tm6000-core.c @@ -668,7 +668,7 @@ int tm6000_set_audio_rinput(struct tm6000_core *dev) areg_f0 = 0x04; break; default: - printk(KERN_INFO "%s: audio input dosn't support\n", + printk(KERN_INFO "%s: audio input doesn't support\n", dev->name); return 0; break; @@ -690,7 +690,7 @@ int tm6000_set_audio_rinput(struct tm6000_core *dev) areg_eb = 0x04; break; default: - printk(KERN_INFO "%s: audio input dosn't support\n", + printk(KERN_INFO "%s: audio input doesn't support\n", dev->name); return 0; break; diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c index 3a4e545c6037..36eea1950e77 100644 --- a/drivers/media/usb/tm6000/tm6000-dvb.c +++ b/drivers/media/usb/tm6000/tm6000-dvb.c @@ -149,7 +149,7 @@ static int tm6000_start_stream(struct tm6000_core *dev) ret, __func__); return ret; } else - printk(KERN_ERR "tm6000: pipe resetted\n"); + printk(KERN_ERR "tm6000: pipe reset\n"); /* mutex_lock(&tm6000_driver.open_close_mutex); */ ret = usb_submit_urb(dvb->bulk_urb, GFP_ATOMIC); diff --git a/drivers/media/usb/tm6000/tm6000-i2c.c b/drivers/media/usb/tm6000/tm6000-i2c.c index 8c0476dfe54f..b37782d6f79c 100644 --- a/drivers/media/usb/tm6000/tm6000-i2c.c +++ b/drivers/media/usb/tm6000/tm6000-i2c.c @@ -155,7 +155,7 @@ static int tm6000_i2c_xfer(struct i2c_adapter *i2c_adap, /* * The TM6000 only supports a read transaction * immediately after a 1 or 2 byte write to select - * a register. We cannot fulfil this request. + * a register. We cannot fulfill this request. */ i2c_dprintk(2, " read without preceding write not supported"); rc = -EOPNOTSUPP; diff --git a/drivers/media/usb/tm6000/tm6000-stds.c b/drivers/media/usb/tm6000/tm6000-stds.c index c0c75951246b..858cb4f3a9ca 100644 --- a/drivers/media/usb/tm6000/tm6000-stds.c +++ b/drivers/media/usb/tm6000/tm6000-stds.c @@ -323,7 +323,7 @@ static int tm6000_set_audio_std(struct tm6000_core *dev) { uint8_t areg_02 = 0x04; /* GC1 Fixed gain 0dB */ uint8_t areg_05 = 0x01; /* Auto 4.5 = M Japan, Auto 6.5 = DK */ - uint8_t areg_06 = 0x02; /* Auto de-emphasis, mannual channel mode */ + uint8_t areg_06 = 0x02; /* Auto de-emphasis, manual channel mode */ if (dev->radio) { tm6000_set_reg(dev, TM6010_REQ08_R01_A_INIT, 0x00); diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c index ee7b5318b351..072210f5f92f 100644 --- a/drivers/media/usb/tm6000/tm6000-video.c +++ b/drivers/media/usb/tm6000/tm6000-video.c @@ -106,7 +106,7 @@ static inline void buffer_filled(struct tm6000_core *dev, dprintk(dev, V4L2_DEBUG_ISOC, "[%p/%d] wakeup\n", buf, buf->vb.i); buf->vb.state = VIDEOBUF_DONE; buf->vb.field_count++; - v4l2_get_timestamp(&buf->vb.ts); + buf->vb.ts = ktime_get_ns(); list_del(&buf->vb.queue); wake_up(&buf->vb.done); @@ -180,7 +180,7 @@ static int copy_streams(u8 *data, unsigned long len, field = (header >> 11) & 0x1; line = (header >> 12) & 0x1ff; cmd = (header >> 21) & 0x7; - /* Validates haeder fields */ + /* Validates header fields */ if (size > TM6000_URB_MSG_LEN) size = TM6000_URB_MSG_LEN; pktsize = TM6000_URB_MSG_LEN; diff --git a/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c b/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c index 6eb84cf007b4..4db7a013e049 100644 --- a/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c +++ b/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c @@ -306,7 +306,7 @@ static int ttusb_boot_dsp(struct ttusb *ttusb) b[3] = 28; /* upload dsp code in 32 byte steps (36 didn't work for me ...) */ - /* 32 is max packet size, no messages should be splitted. */ + /* 32 is max packet size, no messages should be split. */ for (i = 0; i < fw->size; i += 28) { memcpy(&b[4], &fw->data[i], 28); diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c index 44ca66cb9b8f..897ef5e1da71 100644 --- a/drivers/media/usb/ttusb-dec/ttusb_dec.c +++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c @@ -284,7 +284,7 @@ static void ttusb_dec_handle_irq( struct urb *urb) * * this is an fact a bit too simple implementation; * the box also reports a keyrepeat signal - * (with buffer[3] == 0x40) in an intervall of ~100ms. + * (with buffer[3] == 0x40) in an interval of ~100ms. * But to handle this correctly we had to imlemenent some * kind of timer which signals a 'key up' event if no * keyrepeat signal is received for lets say 200ms. diff --git a/drivers/media/usb/usbvision/usbvision-core.c b/drivers/media/usb/usbvision/usbvision-core.c index 31e0e98d6daf..92d166bf8c12 100644 --- a/drivers/media/usb/usbvision/usbvision-core.c +++ b/drivers/media/usb/usbvision/usbvision-core.c @@ -900,7 +900,7 @@ static enum parse_state usbvision_parse_lines_420(struct usb_usbvision *usbvisio if ((frame->curline + 1) >= frame->frmheight) return parse_state_next_frame; - block_split = (pixel_per_line%y_block_size) ? 1 : 0; /* are some blocks splitted into different lines? */ + block_split = (pixel_per_line%y_block_size) ? 1 : 0; /* are some blocks split into different lines? */ y_odd_offset = (pixel_per_line / y_block_size) * (y_block_size + uv_block_size) + block_split * uv_block_size; @@ -1160,7 +1160,7 @@ static void usbvision_parse_data(struct usb_usbvision *usbvision) if (newstate == parse_state_next_frame) { frame->grabstate = frame_state_done; - v4l2_get_timestamp(&(frame->timestamp)); + frame->ts = ktime_get_ns(); frame->sequence = usbvision->frame_num; spin_lock_irqsave(&usbvision->queue_lock, lock_flags); @@ -1865,7 +1865,7 @@ static int usbvision_set_compress_params(struct usb_usbvision *usbvision) value[4] = 0xA2; /* Reg.48 BUF_THR I'm not sure if this does something in not compressed mode. */ value[5] = 0x00; /* Reg.49 DVI_YUV This has nothing to do with compression */ - /* catched values for NT1004 */ + /* caught values for NT1004 */ /* value[0] = 0xFF; Never apply intra mode automatically */ /* value[1] = 0xF1; Use full frame height for virtual strip width; One line per strip */ /* value[2] = 0x01; Force intra mode on all new frames */ @@ -1943,7 +1943,7 @@ int usbvision_set_input(struct usb_usbvision *usbvision) /* SAA7113 uses 8 bit output */ value[0] = USBVISION_8_422_SYNC; } else { - /* I'm sure only about d2-d0 [010] 16 bit 4:2:2 usin sync pulses + /* I'm sure only about d2-d0 [010] 16 bit 4:2:2 using sync pulses * as that is how saa7111 is configured */ value[0] = USBVISION_16_422_SYNC; /* | USBVISION_VSNC_POL | USBVISION_VCLK_POL);*/ @@ -2146,7 +2146,7 @@ int usbvision_power_on(struct usb_usbvision *usbvision) /* * usbvision_begin_streaming() - * Sure you have to put bit 7 to 0, if not incoming frames are droped, but no + * Sure you have to put bit 7 to 0, if not incoming frames are dropped, but no * idea about the rest */ int usbvision_begin_streaming(struct usb_usbvision *usbvision) diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c index dd2ff8ed6c6a..e611052ebf59 100644 --- a/drivers/media/usb/usbvision/usbvision-video.c +++ b/drivers/media/usb/usbvision/usbvision-video.c @@ -706,7 +706,7 @@ static int vidioc_querybuf(struct file *file, vb->length = usbvision->curwidth * usbvision->curheight * usbvision->palette.bytes_per_pixel; - vb->timestamp = usbvision->frame[vb->index].timestamp; + vb->timestamp = ns_to_timeval(usbvision->frame[vb->index].ts); vb->sequence = usbvision->frame[vb->index].sequence; return 0; } @@ -775,7 +775,7 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *vb) V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; vb->index = f->index; vb->sequence = f->sequence; - vb->timestamp = f->timestamp; + vb->timestamp = ns_to_timeval(f->ts); vb->field = V4L2_FIELD_NONE; vb->bytesused = f->scanlength; diff --git a/drivers/media/usb/usbvision/usbvision.h b/drivers/media/usb/usbvision/usbvision.h index 017e7baf5747..668167f8951d 100644 --- a/drivers/media/usb/usbvision/usbvision.h +++ b/drivers/media/usb/usbvision/usbvision.h @@ -135,11 +135,11 @@ #define MIN_FRAME_WIDTH 64 #define MAX_USB_WIDTH 320 /* 384 */ -#define MAX_FRAME_WIDTH 320 /* 384 */ /* streching sometimes causes crashes*/ +#define MAX_FRAME_WIDTH 320 /* 384 */ /* stretching sometimes causes crashes*/ #define MIN_FRAME_HEIGHT 48 #define MAX_USB_HEIGHT 240 /* 288 */ -#define MAX_FRAME_HEIGHT 240 /* 288 */ /* Streching sometimes causes crashes*/ +#define MAX_FRAME_HEIGHT 240 /* 288 */ /* Stretching sometimes causes crashes*/ #define MAX_FRAME_SIZE (MAX_FRAME_WIDTH * MAX_FRAME_HEIGHT * MAX_BYTES_PER_PIXEL) #define USBVISION_CLIPMASK_SIZE (MAX_FRAME_WIDTH * MAX_FRAME_HEIGHT / 8) /* bytesize of clipmask */ @@ -177,7 +177,7 @@ enum { * G = 1.164*(Y-16) - 0.813*(U-128) - 0.391*(V-128) * R = 1.164*(Y-16) + 1.596*(U-128) * - * If you fancy integer arithmetics (as you should), hear this: + * If you fancy integer arithmetic (as you should), hear this: * * 65536*B = 76284*(Y-16) + 132252*(V-128) * 65536*G = 76284*(Y-16) - 53281*(U-128) - 25625*(V-128) @@ -316,7 +316,7 @@ struct usbvision_frame { long bytes_read; /* amount of scanlength that has been read from data */ struct usbvision_v4l2_format_st v4l2_format; /* format the user needs*/ int v4l2_linesize; /* bytes for one videoline*/ - struct timeval timestamp; + u64 ts; int sequence; /* How many video frames we send to user */ }; @@ -438,7 +438,7 @@ struct usb_usbvision { int last_compr_level; /* How strong (100) or weak (0) was compression */ int usb_bandwidth; /* Mbit/s */ - /* Statistics that can be overlayed on the screen */ + /* Statistics that can be overlaid on the screen */ unsigned long isoc_urb_count; /* How many URBs we received so far */ unsigned long urb_length; /* Length of last URB */ unsigned long isoc_data_count; /* How many bytes we received */ diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c index d45415cbe6e7..14cff91b7aea 100644 --- a/drivers/media/usb/uvc/uvc_ctrl.c +++ b/drivers/media/usb/uvc/uvc_ctrl.c @@ -1212,7 +1212,7 @@ static void uvc_ctrl_fill_event(struct uvc_video_chain *chain, __uvc_query_v4l2_ctrl(chain, ctrl, mapping, &v4l2_ctrl); - memset(ev->reserved, 0, sizeof(ev->reserved)); + memset(ev, 0, sizeof(*ev)); ev->type = V4L2_EVENT_CTRL; ev->id = v4l2_ctrl.id; ev->u.ctrl.value = value; diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c index b62cbd800111..10cfe8e51626 100644 --- a/drivers/media/usb/uvc/uvc_driver.c +++ b/drivers/media/usb/uvc/uvc_driver.c @@ -1106,11 +1106,19 @@ static int uvc_parse_standard_control(struct uvc_device *dev, return -EINVAL; } - /* Make sure the terminal type MSB is not null, otherwise it - * could be confused with a unit. + /* + * Reject invalid terminal types that would cause issues: + * + * - The high byte must be non-zero, otherwise it would be + * confused with a unit. + * + * - Bit 15 must be 0, as we use it internally as a terminal + * direction flag. + * + * Other unknown types are accepted. */ type = get_unaligned_le16(&buffer[4]); - if ((type & 0xff00) == 0) { + if ((type & 0x7f00) == 0 || (type & 0x8000) != 0) { uvc_trace(UVC_TRACE_DESCR, "device %d videocontrol " "interface %d INPUT_TERMINAL %d has invalid " "type 0x%04x, skipping\n", udev->devnum, @@ -2175,7 +2183,7 @@ static int uvc_probe(struct usb_interface *intf, if (udev->serial) strscpy(dev->mdev.serial, udev->serial, sizeof(dev->mdev.serial)); - strscpy(dev->mdev.bus_info, udev->devpath, sizeof(dev->mdev.bus_info)); + usb_make_path(udev, dev->mdev.bus_info, sizeof(dev->mdev.bus_info)); dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice); media_device_init(&dev->mdev); diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c index 84525ff04745..182dcac49aa3 100644 --- a/drivers/media/usb/uvc/uvc_video.c +++ b/drivers/media/usb/uvc/uvc_video.c @@ -676,6 +676,14 @@ void uvc_video_clock_update(struct uvc_streaming *stream, if (!uvc_hw_timestamps_param) return; + /* + * We will get called from __vb2_queue_cancel() if there are buffers + * done but not dequeued by the user, but the sample array has already + * been released at that time. Just bail out in that case. + */ + if (!clock->samples) + return; + spin_lock_irqsave(&clock->lock, flags); if (clock->count < clock->size) @@ -2000,7 +2008,7 @@ int uvc_video_init(struct uvc_streaming *stream) usb_set_interface(stream->dev->udev, stream->intfnum, 0); /* Set the streaming probe control with default streaming parameters - * retrieved from the device. Webcams that don't suport GET_DEF + * retrieved from the device. Webcams that don't support GET_DEF * requests on the probe control will just keep their current streaming * parameters. */ diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h index 9b41b14ce076..c7c1baa90dea 100644 --- a/drivers/media/usb/uvc/uvcvideo.h +++ b/drivers/media/usb/uvc/uvcvideo.h @@ -620,8 +620,10 @@ struct uvc_streaming { (uvc_urb) < &(uvc_streaming)->uvc_urb[UVC_URBS]; \ ++(uvc_urb)) -#define uvc_urb_index(uvc_urb) \ - (unsigned int)((uvc_urb) - (&(uvc_urb)->stream->uvc_urb[0])) +static inline u32 uvc_urb_index(const struct uvc_urb *uvc_urb) +{ + return uvc_urb - &uvc_urb->stream->uvc_urb[0]; +} struct uvc_device_info { u32 quirks; diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c index ab35554cbffa..96fee8d5b865 100644 --- a/drivers/media/usb/zr364xx/zr364xx.c +++ b/drivers/media/usb/zr364xx/zr364xx.c @@ -2,7 +2,7 @@ * Zoran 364xx based USB webcam module version 0.73 * * Allows you to use your USB webcam with V4L2 applications - * This is still in heavy developpement ! + * This is still in heavy development ! * * Copyright (C) 2004 Antoine Jacquet * http://royale.zerezo.com/zr364xx/ @@ -521,7 +521,7 @@ static void zr364xx_fillbuff(struct zr364xx_camera *cam, /* tell v4l buffer was filled */ buf->vb.field_count = cam->frame_count * 2; - v4l2_get_timestamp(&buf->vb.ts); + buf->vb.ts = ktime_get_ns(); buf->vb.state = VIDEOBUF_DONE; } @@ -549,7 +549,7 @@ static int zr364xx_got_frame(struct zr364xx_camera *cam, int jpgsize) goto unlock; } list_del(&buf->vb.queue); - v4l2_get_timestamp(&buf->vb.ts); + buf->vb.ts = ktime_get_ns(); DBG("[%p/%d] wakeup\n", buf, buf->vb.i); zr364xx_fillbuff(cam, buf, jpgsize); wake_up(&buf->vb.done); diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c index 50763fb42a1b..663730f088cd 100644 --- a/drivers/media/v4l2-core/v4l2-common.c +++ b/drivers/media/v4l2-core/v4l2-common.c @@ -398,16 +398,6 @@ __v4l2_find_nearest_size(const void *array, size_t array_size, } EXPORT_SYMBOL_GPL(__v4l2_find_nearest_size); -void v4l2_get_timestamp(struct timeval *tv) -{ - struct timespec ts; - - ktime_get_ts(&ts); - tv->tv_sec = ts.tv_sec; - tv->tv_usec = ts.tv_nsec / NSEC_PER_USEC; -} -EXPORT_SYMBOL_GPL(v4l2_get_timestamp); - int v4l2_g_parm_cap(struct video_device *vdev, struct v4l2_subdev *sd, struct v4l2_streamparm *a) { diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c index 5e3806feb5d7..b79d3bbd8350 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls.c +++ b/drivers/media/v4l2-core/v4l2-ctrls.c @@ -825,6 +825,9 @@ const char *v4l2_ctrl_get_name(u32 id) case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER:return "H264 Number of HC Layers"; case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER_QP: return "H264 Set QP Value for HC Layers"; + case V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION: + return "H264 Constrained Intra Pred"; + case V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET: return "H264 Chroma QP Index Offset"; case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP: return "MPEG4 I-Frame QP Value"; case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP: return "MPEG4 P-Frame QP Value"; case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP: return "MPEG4 B-Frame QP Value"; @@ -1387,7 +1390,7 @@ static u32 user_flags(const struct v4l2_ctrl *ctrl) static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 changes) { - memset(ev->reserved, 0, sizeof(ev->reserved)); + memset(ev, 0, sizeof(*ev)); ev->type = V4L2_EVENT_CTRL; ev->id = ctrl->id; ev->u.ctrl.changes = changes; @@ -1661,15 +1664,6 @@ static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx, return -EINVAL; } - if (p_mpeg2_slice_params->backward_ref_index >= VIDEO_MAX_FRAME || - p_mpeg2_slice_params->forward_ref_index >= VIDEO_MAX_FRAME) - return -EINVAL; - - if (p_mpeg2_slice_params->pad || - p_mpeg2_slice_params->picture.pad || - p_mpeg2_slice_params->sequence.pad) - return -EINVAL; - return 0; case V4L2_CTRL_TYPE_MPEG2_QUANTIZATION: @@ -4172,9 +4166,9 @@ __poll_t v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait) { struct v4l2_fh *fh = file->private_data; + poll_wait(file, &fh->wait, wait); if (v4l2_event_pending(fh)) return EPOLLPRI; - poll_wait(file, &fh->wait, wait); return 0; } EXPORT_SYMBOL(v4l2_ctrl_poll); diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c index 481e3c65cf97..c46d14c996fc 100644 --- a/drivers/media/v4l2-core/v4l2-event.c +++ b/drivers/media/v4l2-core/v4l2-event.c @@ -52,6 +52,7 @@ static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event) kev->event.pending = fh->navailable; *event = kev->event; + event->timestamp = ns_to_timespec(kev->ts); kev->sev->first = sev_pos(kev->sev, 1); kev->sev->in_use--; @@ -103,8 +104,8 @@ static struct v4l2_subscribed_event *v4l2_event_subscribed( return NULL; } -static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev, - const struct timespec *ts) +static void __v4l2_event_queue_fh(struct v4l2_fh *fh, + const struct v4l2_event *ev, u64 ts) { struct v4l2_subscribed_event *sev; struct v4l2_kevent *kev; @@ -144,7 +145,7 @@ static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *e if (copy_payload) kev->event.u = ev->u; kev->event.id = ev->id; - kev->event.timestamp = *ts; + kev->ts = ts; kev->event.sequence = fh->sequence; sev->in_use++; list_add_tail(&kev->list, &fh->available); @@ -158,17 +159,17 @@ void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev) { struct v4l2_fh *fh; unsigned long flags; - struct timespec timestamp; + u64 ts; if (vdev == NULL) return; - ktime_get_ts(×tamp); + ts = ktime_get_ns(); spin_lock_irqsave(&vdev->fh_lock, flags); list_for_each_entry(fh, &vdev->fh_list, list) - __v4l2_event_queue_fh(fh, ev, ×tamp); + __v4l2_event_queue_fh(fh, ev, ts); spin_unlock_irqrestore(&vdev->fh_lock, flags); } @@ -177,12 +178,10 @@ EXPORT_SYMBOL_GPL(v4l2_event_queue); void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev) { unsigned long flags; - struct timespec timestamp; - - ktime_get_ts(×tamp); + u64 ts = ktime_get_ns(); spin_lock_irqsave(&fh->vdev->fh_lock, flags); - __v4l2_event_queue_fh(fh, ev, ×tamp); + __v4l2_event_queue_fh(fh, ev, ts); spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); } EXPORT_SYMBOL_GPL(v4l2_event_queue_fh); diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c index 9bfedd7596a1..20571846e636 100644 --- a/drivers/media/v4l2-core/v4l2-fwnode.c +++ b/drivers/media/v4l2-core/v4l2-fwnode.c @@ -46,7 +46,7 @@ static const struct v4l2_fwnode_bus_conv { enum v4l2_fwnode_bus_type fwnode_bus_type; enum v4l2_mbus_type mbus_type; const char *name; -} busses[] = { +} buses[] = { { V4L2_FWNODE_BUS_TYPE_GUESS, V4L2_MBUS_UNKNOWN, @@ -83,9 +83,9 @@ get_v4l2_fwnode_bus_conv_by_fwnode_bus(enum v4l2_fwnode_bus_type type) { unsigned int i; - for (i = 0; i < ARRAY_SIZE(busses); i++) - if (busses[i].fwnode_bus_type == type) - return &busses[i]; + for (i = 0; i < ARRAY_SIZE(buses); i++) + if (buses[i].fwnode_bus_type == type) + return &buses[i]; return NULL; } @@ -113,9 +113,9 @@ get_v4l2_fwnode_bus_conv_by_mbus(enum v4l2_mbus_type type) { unsigned int i; - for (i = 0; i < ARRAY_SIZE(busses); i++) - if (busses[i].mbus_type == type) - return &busses[i]; + for (i = 0; i < ARRAY_SIZE(buses); i++) + if (buses[i].mbus_type == type) + return &buses[i]; return NULL; } @@ -809,7 +809,7 @@ error: * root node and the value of that property matching with the integer argument * of the reference, at the same index. * - * The child fwnode reched at the end of the iteration is then returned to the + * The child fwnode reached at the end of the iteration is then returned to the * caller. * * The core reason for this is that you cannot refer to just any node in ACPI. diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index 90aad465f9ed..f6d663934648 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -88,7 +88,7 @@ const char *v4l2_norm_to_name(v4l2_std_id id) int i; /* HACK: ppc32 architecture doesn't have __ucmpdi2 function to handle - 64 bit comparations. So, on that architecture, with some gcc + 64 bit comparisons. So, on that architecture, with some gcc variants, compilation fails. Currently, the max value is 30bit wide. */ BUG_ON(myid != id); @@ -1017,6 +1017,12 @@ static void v4l_sanitize_format(struct v4l2_format *fmt) { unsigned int offset; + /* Make sure num_planes is not bogus */ + if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE || + fmt->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + fmt->fmt.pix_mp.num_planes = min_t(u32, fmt->fmt.pix_mp.num_planes, + VIDEO_MAX_PLANES); + /* * The v4l2_pix_format structure has been extended with fields that were * not previously required to be set to zero by applications. The priv @@ -1214,6 +1220,10 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt) case V4L2_PIX_FMT_YUV555: descr = "16-bit A/XYUV 1-5-5-5"; break; case V4L2_PIX_FMT_YUV565: descr = "16-bit YUV 5-6-5"; break; case V4L2_PIX_FMT_YUV32: descr = "32-bit A/XYUV 8-8-8-8"; break; + case V4L2_PIX_FMT_AYUV32: descr = "32-bit AYUV 8-8-8-8"; break; + case V4L2_PIX_FMT_XYUV32: descr = "32-bit XYUV 8-8-8-8"; break; + case V4L2_PIX_FMT_VUYA32: descr = "32-bit VUYA 8-8-8-8"; break; + case V4L2_PIX_FMT_VUYX32: descr = "32-bit VUYX 8-8-8-8"; break; case V4L2_PIX_FMT_YUV410: descr = "Planar YUV 4:1:0"; break; case V4L2_PIX_FMT_YUV420: descr = "Planar YUV 4:2:0"; break; case V4L2_PIX_FMT_HI240: descr = "8-bit Dithered RGB (BTTV)"; break; @@ -1553,8 +1563,6 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops, if (unlikely(!ops->vidioc_s_fmt_vid_cap_mplane)) break; CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); - if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES) - break; for (i = 0; i < p->fmt.pix_mp.num_planes; i++) CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i], bytesperline); @@ -1586,8 +1594,6 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops, if (unlikely(!ops->vidioc_s_fmt_vid_out_mplane)) break; CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); - if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES) - break; for (i = 0; i < p->fmt.pix_mp.num_planes; i++) CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i], bytesperline); @@ -1656,8 +1662,6 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops, if (unlikely(!ops->vidioc_try_fmt_vid_cap_mplane)) break; CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); - if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES) - break; for (i = 0; i < p->fmt.pix_mp.num_planes; i++) CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i], bytesperline); @@ -1689,8 +1693,6 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops, if (unlikely(!ops->vidioc_try_fmt_vid_out_mplane)) break; CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func); - if (p->fmt.pix_mp.num_planes > VIDEO_MAX_PLANES) - break; for (i = 0; i < p->fmt.pix_mp.num_planes; i++) CLEAR_AFTER_FIELD(&p->fmt.pix_mp.plane_fmt[i], bytesperline); diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c index 5bbdec55b7d7..3392833d9541 100644 --- a/drivers/media/v4l2-core/v4l2-mem2mem.c +++ b/drivers/media/v4l2-core/v4l2-mem2mem.c @@ -131,7 +131,7 @@ struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, } EXPORT_SYMBOL(v4l2_m2m_get_vq); -void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx) +struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx) { struct v4l2_m2m_buffer *b; unsigned long flags; @@ -149,7 +149,7 @@ void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx) } EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf); -void *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx) +struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx) { struct v4l2_m2m_buffer *b; unsigned long flags; @@ -167,7 +167,7 @@ void *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx) } EXPORT_SYMBOL_GPL(v4l2_m2m_last_buf); -void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx) +struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx) { struct v4l2_m2m_buffer *b; unsigned long flags; @@ -617,36 +617,35 @@ __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, __poll_t rc = 0; unsigned long flags; + src_q = v4l2_m2m_get_src_vq(m2m_ctx); + dst_q = v4l2_m2m_get_dst_vq(m2m_ctx); + + poll_wait(file, &src_q->done_wq, wait); + poll_wait(file, &dst_q->done_wq, wait); + if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) { struct v4l2_fh *fh = file->private_data; + poll_wait(file, &fh->wait, wait); if (v4l2_event_pending(fh)) rc = EPOLLPRI; - else if (req_events & EPOLLPRI) - poll_wait(file, &fh->wait, wait); if (!(req_events & (EPOLLOUT | EPOLLWRNORM | EPOLLIN | EPOLLRDNORM))) return rc; } - src_q = v4l2_m2m_get_src_vq(m2m_ctx); - dst_q = v4l2_m2m_get_dst_vq(m2m_ctx); - /* * There has to be at least one buffer queued on each queued_list, which * means either in driver already or waiting for driver to claim it * and start processing. */ - if ((!src_q->streaming || list_empty(&src_q->queued_list)) - && (!dst_q->streaming || list_empty(&dst_q->queued_list))) { + if ((!src_q->streaming || src_q->error || + list_empty(&src_q->queued_list)) && + (!dst_q->streaming || dst_q->error || + list_empty(&dst_q->queued_list))) { rc |= EPOLLERR; goto end; } - spin_lock_irqsave(&src_q->done_lock, flags); - if (list_empty(&src_q->done_list)) - poll_wait(file, &src_q->done_wq, wait); - spin_unlock_irqrestore(&src_q->done_lock, flags); - spin_lock_irqsave(&dst_q->done_lock, flags); if (list_empty(&dst_q->done_list)) { /* @@ -657,8 +656,6 @@ __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, spin_unlock_irqrestore(&dst_q->done_lock, flags); return rc | EPOLLIN | EPOLLRDNORM; } - - poll_wait(file, &dst_q->done_wq, wait); } spin_unlock_irqrestore(&dst_q->done_lock, flags); @@ -975,6 +972,27 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, } EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue); +void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb, + struct vb2_v4l2_buffer *cap_vb, + bool copy_frame_flags) +{ + u32 mask = V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_TSTAMP_SRC_MASK; + + if (copy_frame_flags) + mask |= V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME | + V4L2_BUF_FLAG_BFRAME; + + cap_vb->vb2_buf.timestamp = out_vb->vb2_buf.timestamp; + + if (out_vb->flags & V4L2_BUF_FLAG_TIMECODE) + cap_vb->timecode = out_vb->timecode; + cap_vb->field = out_vb->field; + cap_vb->flags &= ~mask; + cap_vb->flags |= out_vb->flags & mask; + cap_vb->vb2_buf.copied_timestamp = 1; +} +EXPORT_SYMBOL_GPL(v4l2_m2m_buf_copy_metadata); + void v4l2_m2m_request_queue(struct media_request *req) { struct media_request_object *obj, *obj_safe; diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c index 7491b337002c..bf7dfb2a34af 100644 --- a/drivers/media/v4l2-core/videobuf-core.c +++ b/drivers/media/v4l2-core/videobuf-core.c @@ -214,7 +214,7 @@ int videobuf_queue_is_busy(struct videobuf_queue *q) return 1; } if (q->bufs[i]->state == VIDEOBUF_ACTIVE) { - dprintk(1, "busy: buffer #%d avtive\n", i); + dprintk(1, "busy: buffer #%d active\n", i); return 1; } } @@ -367,7 +367,7 @@ static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b, } b->field = vb->field; - b->timestamp = vb->ts; + b->timestamp = ns_to_timeval(vb->ts); b->bytesused = vb->size; b->sequence = vb->field_count >> 1; } @@ -581,7 +581,7 @@ int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b) || q->type == V4L2_BUF_TYPE_SDR_OUTPUT) { buf->size = b->bytesused; buf->field = b->field; - buf->ts = b->timestamp; + buf->ts = v4l2_timeval_to_ns(&b->timestamp); } break; case V4L2_MEMORY_USERPTR: @@ -1119,13 +1119,14 @@ done: EXPORT_SYMBOL_GPL(videobuf_read_stream); __poll_t videobuf_poll_stream(struct file *file, - struct videobuf_queue *q, - poll_table *wait) + struct videobuf_queue *q, + poll_table *wait) { __poll_t req_events = poll_requested_events(wait); struct videobuf_buffer *buf = NULL; __poll_t rc = 0; + poll_wait(file, &buf->done, wait); videobuf_queue_lock(q); if (q->streaming) { if (!list_empty(&q->stream)) @@ -1149,7 +1150,6 @@ __poll_t videobuf_poll_stream(struct file *file, rc = EPOLLERR; if (0 == rc) { - poll_wait(file, &buf->done, wait); if (buf->state == VIDEOBUF_DONE || buf->state == VIDEOBUF_ERROR) { switch (q->type) { diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c index f46132504d88..e1bf50df4c70 100644 --- a/drivers/media/v4l2-core/videobuf-dma-contig.c +++ b/drivers/media/v4l2-core/videobuf-dma-contig.c @@ -248,7 +248,7 @@ static int __videobuf_iolock(struct videobuf_queue *q, /* All handling should be done by __videobuf_mmap_mapper() */ if (!mem->vaddr) { - dev_err(q->dev, "memory is not alloced/mmapped.\n"); + dev_err(q->dev, "memory is not allocated/mmapped.\n"); return -EINVAL; } break; diff --git a/drivers/media/v4l2-core/videobuf-vmalloc.c b/drivers/media/v4l2-core/videobuf-vmalloc.c index 45fe781aeeec..cb50f1957828 100644 --- a/drivers/media/v4l2-core/videobuf-vmalloc.c +++ b/drivers/media/v4l2-core/videobuf-vmalloc.c @@ -171,7 +171,7 @@ static int __videobuf_iolock(struct videobuf_queue *q, /* All handling should be done by __videobuf_mmap_mapper() */ if (!mem->vaddr) { - printk(KERN_ERR "memory is not alloced/mmapped.\n"); + printk(KERN_ERR "memory is not allocated/mmapped.\n"); return -EINVAL; } break; @@ -196,26 +196,6 @@ static int __videobuf_iolock(struct videobuf_queue *q, } dprintk(1, "vmalloc is at addr %p (%d pages)\n", mem->vaddr, pages); - -#if 0 - int rc; - /* Kernel userptr is used also by read() method. In this case, - there's no need to remap, since data will be copied to user - */ - if (!vb->baddr) - return 0; - - /* FIXME: to properly support USERPTR, remap should occur. - The code below won't work, since mem->vma = NULL - */ - /* Try to remap memory */ - rc = remap_vmalloc_range(mem->vma, (void *)vb->baddr, 0); - if (rc < 0) { - printk(KERN_ERR "mmap: remap failed with error %d", rc); - return -ENOMEM; - } -#endif - break; case V4L2_MEMORY_OVERLAY: default: diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c index 24afc36833bf..0a53598d982f 100644 --- a/drivers/memory/tegra/mc.c +++ b/drivers/memory/tegra/mc.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -38,6 +39,7 @@ #define MC_ERR_ADR 0x0c +#define MC_GART_ERROR_REQ 0x30 #define MC_DECERR_EMEM_OTHERS_STATUS 0x58 #define MC_SECURITY_VIOLATION_STATUS 0x74 @@ -51,7 +53,7 @@ static const struct of_device_id tegra_mc_of_match[] = { #ifdef CONFIG_ARCH_TEGRA_2x_SOC - { .compatible = "nvidia,tegra20-mc", .data = &tegra20_mc_soc }, + { .compatible = "nvidia,tegra20-mc-gart", .data = &tegra20_mc_soc }, #endif #ifdef CONFIG_ARCH_TEGRA_3x_SOC { .compatible = "nvidia,tegra30-mc", .data = &tegra30_mc_soc }, @@ -161,7 +163,7 @@ static int tegra_mc_hotreset_assert(struct reset_controller_dev *rcdev, /* block clients DMA requests */ err = rst_ops->block_dma(mc, rst); if (err) { - dev_err(mc->dev, "Failed to block %s DMA: %d\n", + dev_err(mc->dev, "failed to block %s DMA: %d\n", rst->name, err); return err; } @@ -171,7 +173,7 @@ static int tegra_mc_hotreset_assert(struct reset_controller_dev *rcdev, /* wait for completion of the outstanding DMA requests */ while (!rst_ops->dma_idling(mc, rst)) { if (!retries--) { - dev_err(mc->dev, "Failed to flush %s DMA\n", + dev_err(mc->dev, "failed to flush %s DMA\n", rst->name); return -EBUSY; } @@ -184,7 +186,7 @@ static int tegra_mc_hotreset_assert(struct reset_controller_dev *rcdev, /* clear clients DMA requests sitting before arbitration */ err = rst_ops->hotreset_assert(mc, rst); if (err) { - dev_err(mc->dev, "Failed to hot reset %s: %d\n", + dev_err(mc->dev, "failed to hot reset %s: %d\n", rst->name, err); return err; } @@ -213,7 +215,7 @@ static int tegra_mc_hotreset_deassert(struct reset_controller_dev *rcdev, /* take out client from hot reset */ err = rst_ops->hotreset_deassert(mc, rst); if (err) { - dev_err(mc->dev, "Failed to deassert hot reset %s: %d\n", + dev_err(mc->dev, "failed to deassert hot reset %s: %d\n", rst->name, err); return err; } @@ -223,7 +225,7 @@ static int tegra_mc_hotreset_deassert(struct reset_controller_dev *rcdev, /* allow new DMA requests to proceed to arbitration */ err = rst_ops->unblock_dma(mc, rst); if (err) { - dev_err(mc->dev, "Failed to unblock %s DMA : %d\n", + dev_err(mc->dev, "failed to unblock %s DMA : %d\n", rst->name, err); return err; } @@ -575,8 +577,15 @@ static __maybe_unused irqreturn_t tegra20_mc_irq(int irq, void *data) break; case MC_INT_INVALID_GART_PAGE: - dev_err_ratelimited(mc->dev, "%s\n", error); - continue; + reg = MC_GART_ERROR_REQ; + value = mc_readl(mc, reg); + + id = (value >> 1) & mc->soc->client_id_mask; + desc = error_names[2]; + + if (value & BIT(0)) + direction = "write"; + break; case MC_INT_SECURITY_VIOLATION: reg = MC_SECURITY_VIOLATION_STATUS; @@ -611,23 +620,18 @@ static __maybe_unused irqreturn_t tegra20_mc_irq(int irq, void *data) static int tegra_mc_probe(struct platform_device *pdev) { - const struct of_device_id *match; struct resource *res; struct tegra_mc *mc; void *isr; int err; - match = of_match_node(tegra_mc_of_match, pdev->dev.of_node); - if (!match) - return -ENODEV; - mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL); if (!mc) return -ENOMEM; platform_set_drvdata(pdev, mc); spin_lock_init(&mc->lock); - mc->soc = match->data; + mc->soc = of_device_get_match_data(&pdev->dev); mc->dev = &pdev->dev; /* length of MC tick in nanoseconds */ @@ -638,38 +642,35 @@ static int tegra_mc_probe(struct platform_device *pdev) if (IS_ERR(mc->regs)) return PTR_ERR(mc->regs); + mc->clk = devm_clk_get(&pdev->dev, "mc"); + if (IS_ERR(mc->clk)) { + dev_err(&pdev->dev, "failed to get MC clock: %ld\n", + PTR_ERR(mc->clk)); + return PTR_ERR(mc->clk); + } + #ifdef CONFIG_ARCH_TEGRA_2x_SOC if (mc->soc == &tegra20_mc_soc) { - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - mc->regs2 = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(mc->regs2)) - return PTR_ERR(mc->regs2); - isr = tegra20_mc_irq; } else #endif { - mc->clk = devm_clk_get(&pdev->dev, "mc"); - if (IS_ERR(mc->clk)) { - dev_err(&pdev->dev, "failed to get MC clock: %ld\n", - PTR_ERR(mc->clk)); - return PTR_ERR(mc->clk); - } - err = tegra_mc_setup_latency_allowance(mc); if (err < 0) { - dev_err(&pdev->dev, "failed to setup latency allowance: %d\n", + dev_err(&pdev->dev, + "failed to setup latency allowance: %d\n", err); return err; } isr = tegra_mc_irq; - } - err = tegra_mc_setup_timings(mc); - if (err < 0) { - dev_err(&pdev->dev, "failed to setup timings: %d\n", err); - return err; + err = tegra_mc_setup_timings(mc); + if (err < 0) { + dev_err(&pdev->dev, "failed to setup timings: %d\n", + err); + return err; + } } mc->irq = platform_get_irq(pdev, 0); @@ -678,11 +679,11 @@ static int tegra_mc_probe(struct platform_device *pdev) return mc->irq; } - WARN(!mc->soc->client_id_mask, "Missing client ID mask for this SoC\n"); + WARN(!mc->soc->client_id_mask, "missing client ID mask for this SoC\n"); mc_writel(mc, mc->soc->intmask, MC_INTMASK); - err = devm_request_irq(&pdev->dev, mc->irq, isr, IRQF_SHARED, + err = devm_request_irq(&pdev->dev, mc->irq, isr, 0, dev_name(&pdev->dev), mc); if (err < 0) { dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n", mc->irq, @@ -695,20 +696,65 @@ static int tegra_mc_probe(struct platform_device *pdev) dev_err(&pdev->dev, "failed to register reset controller: %d\n", err); - if (IS_ENABLED(CONFIG_TEGRA_IOMMU_SMMU)) { + if (IS_ENABLED(CONFIG_TEGRA_IOMMU_SMMU) && mc->soc->smmu) { mc->smmu = tegra_smmu_probe(&pdev->dev, mc->soc->smmu, mc); - if (IS_ERR(mc->smmu)) + if (IS_ERR(mc->smmu)) { dev_err(&pdev->dev, "failed to probe SMMU: %ld\n", PTR_ERR(mc->smmu)); + mc->smmu = NULL; + } + } + + if (IS_ENABLED(CONFIG_TEGRA_IOMMU_GART) && !mc->soc->smmu) { + mc->gart = tegra_gart_probe(&pdev->dev, mc); + if (IS_ERR(mc->gart)) { + dev_err(&pdev->dev, "failed to probe GART: %ld\n", + PTR_ERR(mc->gart)); + mc->gart = NULL; + } + } + + return 0; +} + +static int tegra_mc_suspend(struct device *dev) +{ + struct tegra_mc *mc = dev_get_drvdata(dev); + int err; + + if (IS_ENABLED(CONFIG_TEGRA_IOMMU_GART) && mc->gart) { + err = tegra_gart_suspend(mc->gart); + if (err) + return err; } return 0; } +static int tegra_mc_resume(struct device *dev) +{ + struct tegra_mc *mc = dev_get_drvdata(dev); + int err; + + if (IS_ENABLED(CONFIG_TEGRA_IOMMU_GART) && mc->gart) { + err = tegra_gart_resume(mc->gart); + if (err) + return err; + } + + return 0; +} + +static const struct dev_pm_ops tegra_mc_pm_ops = { + .suspend = tegra_mc_suspend, + .resume = tegra_mc_resume, +}; + static struct platform_driver tegra_mc_driver = { .driver = { .name = "tegra-mc", .of_match_table = tegra_mc_of_match, + .pm = &tegra_mc_pm_ops, .suppress_bind_attrs = true, }, .prevent_deferred_probe = true, diff --git a/drivers/memory/tegra/mc.h b/drivers/memory/tegra/mc.h index 01065f12ebeb..887a3b07334f 100644 --- a/drivers/memory/tegra/mc.h +++ b/drivers/memory/tegra/mc.h @@ -26,19 +26,13 @@ static inline u32 mc_readl(struct tegra_mc *mc, unsigned long offset) { - if (mc->regs2 && offset >= 0x24) - return readl(mc->regs2 + offset - 0x3c); - - return readl(mc->regs + offset); + return readl_relaxed(mc->regs + offset); } static inline void mc_writel(struct tegra_mc *mc, u32 value, unsigned long offset) { - if (mc->regs2 && offset >= 0x24) - return writel(value, mc->regs2 + offset - 0x3c); - - writel(value, mc->regs + offset); + writel_relaxed(value, mc->regs + offset); } extern const struct tegra_mc_reset_ops terga_mc_reset_ops_common; diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index f461460a2aeb..0ce2d8dfc5f1 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -215,7 +215,6 @@ config MFD_CROS_EC config MFD_CROS_EC_CHARDEV tristate "Chrome OS Embedded Controller userspace device interface" depends on MFD_CROS_EC - select CROS_EC_CTL ---help--- This driver adds support to talk with the ChromeOS EC from userspace. @@ -519,10 +518,10 @@ config INTEL_SOC_PMIC bool "Support for Crystal Cove PMIC" depends on ACPI && HAS_IOMEM && I2C=y && GPIOLIB && COMMON_CLK depends on X86 || COMPILE_TEST + depends on I2C_DESIGNWARE_PLATFORM=y select MFD_CORE select REGMAP_I2C select REGMAP_IRQ - select I2C_DESIGNWARE_PLATFORM help Select this option to enable support for Crystal Cove PMIC on some Intel SoC systems. The PMIC provides ADC, GPIO, @@ -548,10 +547,10 @@ config INTEL_SOC_PMIC_CHTWC bool "Support for Intel Cherry Trail Whiskey Cove PMIC" depends on ACPI && HAS_IOMEM && I2C=y && COMMON_CLK depends on X86 || COMPILE_TEST + depends on I2C_DESIGNWARE_PLATFORM=y select MFD_CORE select REGMAP_I2C select REGMAP_IRQ - select I2C_DESIGNWARE_PLATFORM help Select this option to enable support for the Intel Cherry Trail Whiskey Cove PMIC found on some Intel Cherry Trail systems. @@ -928,7 +927,7 @@ config UCB1400_CORE config MFD_PM8XXX tristate "Qualcomm PM8xxx PMIC chips driver" depends on (ARM || HEXAGON || COMPILE_TEST) - select IRQ_DOMAIN + select IRQ_DOMAIN_HIERARCHY select MFD_CORE select REGMAP help @@ -1066,6 +1065,8 @@ config MFD_SI476X_CORE config MFD_SM501 tristate "Silicon Motion SM501" + depends on HAS_DMA + select DMA_DECLARE_COHERENT ---help--- This is the core driver for the Silicon Motion SM501 multimedia companion chip. This device is a multifunction device which may @@ -1205,7 +1206,7 @@ config MFD_STMPE Currently supported devices are: - STMPE811: GPIO, Touchscreen + STMPE811: GPIO, Touchscreen, ADC STMPE1601: GPIO, Keypad STMPE1801: GPIO, Keypad STMPE2401: GPIO, Keypad @@ -1218,6 +1219,7 @@ config MFD_STMPE GPIO: stmpe-gpio Keypad: stmpe-keypad Touchscreen: stmpe-ts + ADC: stmpe-adc menu "STMicroelectronics STMPE Interface Drivers" depends on MFD_STMPE @@ -1419,10 +1421,10 @@ config MFD_TPS65217 config MFD_TPS68470 bool "TI TPS68470 Power Management / LED chips" - depends on ACPI && I2C=y + depends on ACPI && PCI && I2C=y + depends on I2C_DESIGNWARE_PLATFORM=y select MFD_CORE select REGMAP_I2C - select I2C_DESIGNWARE_PLATFORM help If you say yes here you get support for the TPS68470 series of Power Management / LED chips. @@ -1674,9 +1676,18 @@ config MFD_TC6393XB select GPIOLIB select MFD_CORE select MFD_TMIO + select DMA_DECLARE_COHERENT help Support for Toshiba Mobile IO Controller TC6393XB +config MFD_TQMX86 + tristate "TQ-Systems IO controller TQMX86" + select MFD_CORE + help + Say yes here to enable support for various functions of the + TQ-Systems IO controller and watchdog device, found on their + ComExpress CPU modules. + config MFD_VX855 tristate "VIA VX855/VX875 integrated south bridge" depends on PCI @@ -1686,6 +1697,14 @@ config MFD_VX855 VIA VX855/VX875 south bridge. You will need to enable the vx855_spi and/or vx855_gpio drivers for this to do anything useful. +config MFD_LOCHNAGAR + bool "Cirrus Logic Lochnagar Audio Development Board" + select MFD_CORE + select REGMAP_I2C + depends on I2C=y && OF + help + Support for Cirrus Logic Lochnagar audio development board. + config MFD_ARIZONA select REGMAP select REGMAP_IRQ @@ -1872,6 +1891,22 @@ config MFD_STM32_TIMERS for PWM and IIO Timer. This driver allow to share the registers between the others drivers. +config MFD_STPMIC1 + tristate "Support for STPMIC1 PMIC" + depends on (I2C=y && OF) + select REGMAP_I2C + select REGMAP_IRQ + select MFD_CORE + help + Support for ST Microelectronics STPMIC1 PMIC. STPMIC1 has power on + key, watchdog and regulator functionalities which are supported via + the relevant subsystems. This driver provides core support for the + STPMIC1. In order to use the actual functionaltiy of the device other + drivers must be enabled. + + To compile this driver as a module, choose M here: the + module will be called stpmic1. + menu "Multimedia Capabilities Port drivers" depends on ARCH_SA1100 diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 12980a4ad460..b4569ed7f3f3 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_MFD_88PM805) += 88pm805.o 88pm80x.o obj-$(CONFIG_MFD_ACT8945A) += act8945a.o obj-$(CONFIG_MFD_SM501) += sm501.o obj-$(CONFIG_MFD_ASIC3) += asic3.o tmio_core.o +obj-$(CONFIG_ARCH_BCM2835) += bcm2835-pm.o obj-$(CONFIG_MFD_BCM590XX) += bcm590xx.o obj-$(CONFIG_MFD_BD9571MWV) += bd9571mwv.o cros_ec_core-objs := cros_ec.o @@ -36,6 +37,9 @@ obj-$(CONFIG_MFD_TC3589X) += tc3589x.o obj-$(CONFIG_MFD_T7L66XB) += t7l66xb.o tmio_core.o obj-$(CONFIG_MFD_TC6387XB) += tc6387xb.o tmio_core.o obj-$(CONFIG_MFD_TC6393XB) += tc6393xb.o tmio_core.o +obj-$(CONFIG_MFD_TQMX86) += tqmx86.o + +obj-$(CONFIG_MFD_LOCHNAGAR) += lochnagar-i2c.o obj-$(CONFIG_MFD_ARIZONA) += arizona-core.o obj-$(CONFIG_MFD_ARIZONA) += arizona-irq.o @@ -233,6 +237,7 @@ obj-$(CONFIG_INTEL_SOC_PMIC_CHTDC_TI) += intel_soc_pmic_chtdc_ti.o obj-$(CONFIG_MFD_MT6397) += mt6397-core.o obj-$(CONFIG_MFD_ALTERA_A10SR) += altera-a10sr.o +obj-$(CONFIG_MFD_STPMIC1) += stpmic1.o obj-$(CONFIG_MFD_SUN4I_GPADC) += sun4i-gpadc.o obj-$(CONFIG_MFD_STM32_LPTIMER) += stm32-lptimer.o diff --git a/drivers/mfd/aat2870-core.c b/drivers/mfd/aat2870-core.c index 3ba19a45f199..9d3d90d386c2 100644 --- a/drivers/mfd/aat2870-core.c +++ b/drivers/mfd/aat2870-core.c @@ -20,7 +20,6 @@ */ #include -#include #include #include #include @@ -349,18 +348,10 @@ static void aat2870_init_debugfs(struct aat2870_data *aat2870) "Failed to create debugfs register file\n"); } -static void aat2870_uninit_debugfs(struct aat2870_data *aat2870) -{ - debugfs_remove_recursive(aat2870->dentry_root); -} #else static inline void aat2870_init_debugfs(struct aat2870_data *aat2870) { } - -static inline void aat2870_uninit_debugfs(struct aat2870_data *aat2870) -{ -} #endif /* CONFIG_DEBUG_FS */ static int aat2870_i2c_probe(struct i2c_client *client, @@ -440,20 +431,6 @@ out_disable: return ret; } -static int aat2870_i2c_remove(struct i2c_client *client) -{ - struct aat2870_data *aat2870 = i2c_get_clientdata(client); - - aat2870_uninit_debugfs(aat2870); - - mfd_remove_devices(aat2870->dev); - aat2870_disable(aat2870); - if (aat2870->uninit) - aat2870->uninit(aat2870); - - return 0; -} - #ifdef CONFIG_PM_SLEEP static int aat2870_i2c_suspend(struct device *dev) { @@ -492,15 +469,14 @@ static const struct i2c_device_id aat2870_i2c_id_table[] = { { "aat2870", 0 }, { } }; -MODULE_DEVICE_TABLE(i2c, aat2870_i2c_id_table); static struct i2c_driver aat2870_i2c_driver = { .driver = { - .name = "aat2870", - .pm = &aat2870_pm_ops, + .name = "aat2870", + .pm = &aat2870_pm_ops, + .suppress_bind_attrs = true, }, .probe = aat2870_i2c_probe, - .remove = aat2870_i2c_remove, .id_table = aat2870_i2c_id_table, }; @@ -509,13 +485,3 @@ static int __init aat2870_init(void) return i2c_add_driver(&aat2870_i2c_driver); } subsys_initcall(aat2870_init); - -static void __exit aat2870_exit(void) -{ - i2c_del_driver(&aat2870_i2c_driver); -} -module_exit(aat2870_exit); - -MODULE_DESCRIPTION("Core support for the AnalogicTech AAT2870"); -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Jin Park "); diff --git a/drivers/mfd/adp5520.c b/drivers/mfd/adp5520.c index be0497b96720..2cdd39cb8a18 100644 --- a/drivers/mfd/adp5520.c +++ b/drivers/mfd/adp5520.c @@ -7,6 +7,8 @@ * * Copyright 2009 Analog Devices Inc. * + * Author: Michael Hennerich + * * Derived from da903x: * Copyright (C) 2008 Compulab, Ltd. * Mike Rapoport @@ -18,7 +20,7 @@ */ #include -#include +#include #include #include #include @@ -304,18 +306,6 @@ out_free_irq: return ret; } -static int adp5520_remove(struct i2c_client *client) -{ - struct adp5520_chip *chip = dev_get_drvdata(&client->dev); - - if (chip->irq) - free_irq(chip->irq, chip); - - adp5520_remove_subdevs(chip); - adp5520_write(chip->dev, ADP5520_MODE_STATUS, 0); - return 0; -} - #ifdef CONFIG_PM_SLEEP static int adp5520_suspend(struct device *dev) { @@ -346,20 +336,14 @@ static const struct i2c_device_id adp5520_id[] = { { "pmic-adp5501", ID_ADP5501 }, { } }; -MODULE_DEVICE_TABLE(i2c, adp5520_id); static struct i2c_driver adp5520_driver = { .driver = { - .name = "adp5520", - .pm = &adp5520_pm, + .name = "adp5520", + .pm = &adp5520_pm, + .suppress_bind_attrs = true, }, .probe = adp5520_probe, - .remove = adp5520_remove, .id_table = adp5520_id, }; - -module_i2c_driver(adp5520_driver); - -MODULE_AUTHOR("Michael Hennerich "); -MODULE_DESCRIPTION("ADP5520(01) PMIC-MFD Driver"); -MODULE_LICENSE("GPL"); +builtin_i2c_driver(adp5520_driver); diff --git a/drivers/mfd/as3711.c b/drivers/mfd/as3711.c index 67b12417585d..7a74a874b93c 100644 --- a/drivers/mfd/as3711.c +++ b/drivers/mfd/as3711.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include @@ -118,7 +117,6 @@ static const struct of_device_id as3711_of_match[] = { {.compatible = "ams,as3711",}, {} }; -MODULE_DEVICE_TABLE(of, as3711_of_match); #endif static int as3711_i2c_probe(struct i2c_client *client, @@ -202,8 +200,6 @@ static const struct i2c_device_id as3711_i2c_id[] = { {} }; -MODULE_DEVICE_TABLE(i2c, as3711_i2c_id); - static struct i2c_driver as3711_i2c_driver = { .driver = { .name = "as3711", @@ -219,13 +215,3 @@ static int __init as3711_i2c_init(void) } /* Initialise early */ subsys_initcall(as3711_i2c_init); - -static void __exit as3711_i2c_exit(void) -{ - i2c_del_driver(&as3711_i2c_driver); -} -module_exit(as3711_i2c_exit); - -MODULE_AUTHOR("Guennadi Liakhovetski "); -MODULE_DESCRIPTION("AS3711 PMIC driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/mfd/at91-usart.c b/drivers/mfd/at91-usart.c index d20747f612c1..6a8351a4588e 100644 --- a/drivers/mfd/at91-usart.c +++ b/drivers/mfd/at91-usart.c @@ -15,29 +15,29 @@ #include #include -static struct mfd_cell at91_usart_spi_subdev = { - .name = "at91_usart_spi", - .of_compatible = "microchip,at91sam9g45-usart-spi", - }; +static const struct mfd_cell at91_usart_spi_subdev = { + .name = "at91_usart_spi", + .of_compatible = "microchip,at91sam9g45-usart-spi", +}; -static struct mfd_cell at91_usart_serial_subdev = { - .name = "atmel_usart_serial", - .of_compatible = "atmel,at91rm9200-usart-serial", - }; +static const struct mfd_cell at91_usart_serial_subdev = { + .name = "atmel_usart_serial", + .of_compatible = "atmel,at91rm9200-usart-serial", +}; static int at91_usart_mode_probe(struct platform_device *pdev) { - struct mfd_cell cell; + const struct mfd_cell *cell; u32 opmode = AT91_USART_MODE_SERIAL; device_property_read_u32(&pdev->dev, "atmel,usart-mode", &opmode); switch (opmode) { case AT91_USART_MODE_SPI: - cell = at91_usart_spi_subdev; + cell = &at91_usart_spi_subdev; break; case AT91_USART_MODE_SERIAL: - cell = at91_usart_serial_subdev; + cell = &at91_usart_serial_subdev; break; default: dev_err(&pdev->dev, "atmel,usart-mode has an invalid value %u\n", @@ -45,7 +45,7 @@ static int at91_usart_mode_probe(struct platform_device *pdev) return -EINVAL; } - return devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO, &cell, 1, + return devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO, cell, 1, NULL, 0, NULL); } diff --git a/drivers/mfd/bcm2835-pm.c b/drivers/mfd/bcm2835-pm.c new file mode 100644 index 000000000000..42fe67f1538e --- /dev/null +++ b/drivers/mfd/bcm2835-pm.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * PM MFD driver for Broadcom BCM2835 + * + * This driver binds to the PM block and creates the MFD device for + * the WDT and power drivers. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static const struct mfd_cell bcm2835_pm_devs[] = { + { .name = "bcm2835-wdt" }, +}; + +static const struct mfd_cell bcm2835_power_devs[] = { + { .name = "bcm2835-power" }, +}; + +static int bcm2835_pm_probe(struct platform_device *pdev) +{ + struct resource *res; + struct device *dev = &pdev->dev; + struct bcm2835_pm *pm; + int ret; + + pm = devm_kzalloc(dev, sizeof(*pm), GFP_KERNEL); + if (!pm) + return -ENOMEM; + platform_set_drvdata(pdev, pm); + + pm->dev = dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pm->base = devm_ioremap_resource(dev, res); + if (IS_ERR(pm->base)) + return PTR_ERR(pm->base); + + ret = devm_mfd_add_devices(dev, -1, + bcm2835_pm_devs, ARRAY_SIZE(bcm2835_pm_devs), + NULL, 0, NULL); + if (ret) + return ret; + + /* We'll use the presence of the AXI ASB regs in the + * bcm2835-pm binding as the key for whether we can reference + * the full PM register range and support power domains. + */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (res) { + pm->asb = devm_ioremap_resource(dev, res); + if (IS_ERR(pm->asb)) + return PTR_ERR(pm->asb); + + ret = devm_mfd_add_devices(dev, -1, + bcm2835_power_devs, + ARRAY_SIZE(bcm2835_power_devs), + NULL, 0, NULL); + if (ret) + return ret; + } + + return 0; +} + +static const struct of_device_id bcm2835_pm_of_match[] = { + { .compatible = "brcm,bcm2835-pm-wdt", }, + { .compatible = "brcm,bcm2835-pm", }, + {}, +}; +MODULE_DEVICE_TABLE(of, bcm2835_pm_of_match); + +static struct platform_driver bcm2835_pm_driver = { + .probe = bcm2835_pm_probe, + .driver = { + .name = "bcm2835-pm", + .of_match_table = bcm2835_pm_of_match, + }, +}; +module_platform_driver(bcm2835_pm_driver); + +MODULE_AUTHOR("Eric Anholt "); +MODULE_DESCRIPTION("Driver for Broadcom BCM2835 PM MFD"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mfd/cros_ec.c b/drivers/mfd/cros_ec.c index fe6f83766144..6acfe036d522 100644 --- a/drivers/mfd/cros_ec.c +++ b/drivers/mfd/cros_ec.c @@ -129,8 +129,8 @@ int cros_ec_register(struct cros_ec_device *ec_dev) } } - err = mfd_add_devices(ec_dev->dev, PLATFORM_DEVID_AUTO, &ec_cell, 1, - NULL, ec_dev->irq, NULL); + err = devm_mfd_add_devices(ec_dev->dev, PLATFORM_DEVID_AUTO, &ec_cell, + 1, NULL, ec_dev->irq, NULL); if (err) { dev_err(dev, "Failed to register Embedded Controller subdevice %d\n", @@ -147,7 +147,7 @@ int cros_ec_register(struct cros_ec_device *ec_dev) * - the EC is responsive at init time (it is not true for a * sensor hub. */ - err = mfd_add_devices(ec_dev->dev, PLATFORM_DEVID_AUTO, + err = devm_mfd_add_devices(ec_dev->dev, PLATFORM_DEVID_AUTO, &ec_pd_cell, 1, NULL, ec_dev->irq, NULL); if (err) { dev_err(dev, @@ -181,14 +181,6 @@ int cros_ec_register(struct cros_ec_device *ec_dev) } EXPORT_SYMBOL(cros_ec_register); -int cros_ec_remove(struct cros_ec_device *ec_dev) -{ - mfd_remove_devices(ec_dev->dev); - - return 0; -} -EXPORT_SYMBOL(cros_ec_remove); - #ifdef CONFIG_PM_SLEEP int cros_ec_suspend(struct cros_ec_device *ec_dev) { diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c index 2d0fee488c5a..d275deaecb12 100644 --- a/drivers/mfd/cros_ec_dev.c +++ b/drivers/mfd/cros_ec_dev.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -34,17 +35,9 @@ #define CROS_MAX_DEV 128 static int ec_major; -static const struct attribute_group *cros_ec_groups[] = { - &cros_ec_attr_group, - &cros_ec_lightbar_attr_group, - &cros_ec_vbc_attr_group, - NULL, -}; - static struct class cros_class = { .owner = THIS_MODULE, .name = "chromeos", - .dev_groups = cros_ec_groups, }; /* Basic communication */ @@ -231,7 +224,7 @@ static long ec_device_ioctl_readmem(struct cros_ec_dev *ec, void __user *arg) if (copy_to_user((void __user *)arg, &s_mem, sizeof(s_mem))) return -EFAULT; - return 0; + return num; } static long ec_device_ioctl(struct file *filp, unsigned int cmd, @@ -395,9 +388,20 @@ static const struct mfd_cell cros_usbpd_charger_cells[] = { { .name = "cros-usbpd-charger" } }; +static const struct mfd_cell cros_ec_platform_cells[] = { + { .name = "cros-ec-debugfs" }, + { .name = "cros-ec-lightbar" }, + { .name = "cros-ec-sysfs" }, +}; + +static const struct mfd_cell cros_ec_vbc_cells[] = { + { .name = "cros-ec-vbc" } +}; + static int ec_device_probe(struct platform_device *pdev) { int retval = -ENOMEM; + struct device_node *node; struct device *dev = &pdev->dev; struct cros_ec_platform *ec_platform = dev_get_platdata(dev); struct cros_ec_dev *ec = kzalloc(sizeof(*ec), GFP_KERNEL); @@ -470,9 +474,6 @@ static int ec_device_probe(struct platform_device *pdev) retval); } - /* Take control of the lightbar from the EC. */ - lb_manual_suspend_ctrl(ec, 1); - /* We can now add the sysfs class, we know which parameter to show */ retval = cdev_device_add(&ec->cdev, &ec->class_dev); if (retval) { @@ -480,8 +481,26 @@ static int ec_device_probe(struct platform_device *pdev) goto failed; } - if (cros_ec_debugfs_init(ec)) - dev_warn(dev, "failed to create debugfs directory\n"); + retval = mfd_add_devices(ec->dev, PLATFORM_DEVID_AUTO, + cros_ec_platform_cells, + ARRAY_SIZE(cros_ec_platform_cells), + NULL, 0, NULL); + if (retval) + dev_warn(ec->dev, + "failed to add cros-ec platform devices: %d\n", + retval); + + /* Check whether this EC instance has a VBC NVRAM */ + node = ec->ec_dev->dev->of_node; + if (of_property_read_bool(node, "google,has-vbc-nvram")) { + retval = mfd_add_devices(ec->dev, PLATFORM_DEVID_AUTO, + cros_ec_vbc_cells, + ARRAY_SIZE(cros_ec_vbc_cells), + NULL, 0, NULL); + if (retval) + dev_warn(ec->dev, "failed to add VBC devices: %d\n", + retval); + } return 0; @@ -494,69 +513,25 @@ static int ec_device_remove(struct platform_device *pdev) { struct cros_ec_dev *ec = dev_get_drvdata(&pdev->dev); - /* Let the EC take over the lightbar again. */ - lb_manual_suspend_ctrl(ec, 0); - - cros_ec_debugfs_remove(ec); - mfd_remove_devices(ec->dev); cdev_del(&ec->cdev); device_unregister(&ec->class_dev); return 0; } -static void ec_device_shutdown(struct platform_device *pdev) -{ - struct cros_ec_dev *ec = dev_get_drvdata(&pdev->dev); - - /* Be sure to clear up debugfs delayed works */ - cros_ec_debugfs_remove(ec); -} - static const struct platform_device_id cros_ec_id[] = { { DRV_NAME, 0 }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(platform, cros_ec_id); -static __maybe_unused int ec_device_suspend(struct device *dev) -{ - struct cros_ec_dev *ec = dev_get_drvdata(dev); - - cros_ec_debugfs_suspend(ec); - - lb_suspend(ec); - - return 0; -} - -static __maybe_unused int ec_device_resume(struct device *dev) -{ - struct cros_ec_dev *ec = dev_get_drvdata(dev); - - cros_ec_debugfs_resume(ec); - - lb_resume(ec); - - return 0; -} - -static const struct dev_pm_ops cros_ec_dev_pm_ops = { -#ifdef CONFIG_PM_SLEEP - .suspend = ec_device_suspend, - .resume = ec_device_resume, -#endif -}; - static struct platform_driver cros_ec_dev_driver = { .driver = { .name = DRV_NAME, - .pm = &cros_ec_dev_pm_ops, }, .id_table = cros_ec_id, .probe = ec_device_probe, .remove = ec_device_remove, - .shutdown = ec_device_shutdown, }; static int __init cros_ec_dev_init(void) diff --git a/drivers/mfd/cros_ec_dev.h b/drivers/mfd/cros_ec_dev.h index 978d836a0248..ec750433455a 100644 --- a/drivers/mfd/cros_ec_dev.h +++ b/drivers/mfd/cros_ec_dev.h @@ -44,10 +44,4 @@ struct cros_ec_readmem { #define CROS_EC_DEV_IOCXCMD _IOWR(CROS_EC_DEV_IOC, 0, struct cros_ec_command) #define CROS_EC_DEV_IOCRDMEM _IOWR(CROS_EC_DEV_IOC, 1, struct cros_ec_readmem) -/* Lightbar utilities */ -extern bool ec_has_lightbar(struct cros_ec_dev *ec); -extern int lb_manual_suspend_ctrl(struct cros_ec_dev *ec, uint8_t enable); -extern int lb_suspend(struct cros_ec_dev *ec); -extern int lb_resume(struct cros_ec_dev *ec); - #endif /* _CROS_EC_DEV_H_ */ diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c index aec20e1c7d3d..65666b624ae8 100644 --- a/drivers/mfd/db8500-prcmu.c +++ b/drivers/mfd/db8500-prcmu.c @@ -1,4 +1,6 @@ /* + * DB8500 PRCM Unit driver + * * Copyright (C) STMicroelectronics 2009 * Copyright (C) ST-Ericsson SA 2010 * @@ -10,7 +12,8 @@ * U8500 PRCM Unit interface driver * */ -#include +#include +#include #include #include #include @@ -3188,9 +3191,4 @@ static int __init db8500_prcmu_init(void) { return platform_driver_register(&db8500_prcmu_driver); } - core_initcall(db8500_prcmu_init); - -MODULE_AUTHOR("Mattias Nilsson "); -MODULE_DESCRIPTION("DB8500 PRCM Unit driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/mfd/htc-i2cpld.c b/drivers/mfd/htc-i2cpld.c index 01572b5e79e8..af3c66355270 100644 --- a/drivers/mfd/htc-i2cpld.c +++ b/drivers/mfd/htc-i2cpld.c @@ -27,7 +27,6 @@ #include #include -#include #include #include #include @@ -614,8 +613,6 @@ static const struct i2c_device_id htcpld_chip_id[] = { { "htcpld-chip", 0 }, { } }; -MODULE_DEVICE_TABLE(i2c, htcpld_chip_id); - static struct i2c_driver htcpld_chip_driver = { .driver = { @@ -643,17 +640,4 @@ static int __init htcpld_core_init(void) /* Probe for our chips */ return platform_driver_probe(&htcpld_core_driver, htcpld_core_probe); } - -static void __exit htcpld_core_exit(void) -{ - i2c_del_driver(&htcpld_chip_driver); - platform_driver_unregister(&htcpld_core_driver); -} - -module_init(htcpld_core_init); -module_exit(htcpld_core_exit); - -MODULE_AUTHOR("Cory Maccarrone "); -MODULE_DESCRIPTION("I2C HTC PLD Driver"); -MODULE_LICENSE("GPL"); - +device_initcall(htcpld_core_init); diff --git a/drivers/mfd/intel-lpss-acpi.c b/drivers/mfd/intel-lpss-acpi.c index 7911b0a14a6d..6d9f03363ee7 100644 --- a/drivers/mfd/intel-lpss-acpi.c +++ b/drivers/mfd/intel-lpss-acpi.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c index 0e5282fc1467..cba2eb166650 100644 --- a/drivers/mfd/intel-lpss-pci.c +++ b/drivers/mfd/intel-lpss-pci.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include diff --git a/drivers/mfd/intel-lpss.h b/drivers/mfd/intel-lpss.h index 865bbeaaf00c..3a120fecd2dc 100644 --- a/drivers/mfd/intel-lpss.h +++ b/drivers/mfd/intel-lpss.h @@ -14,6 +14,8 @@ #ifndef __MFD_INTEL_LPSS_H #define __MFD_INTEL_LPSS_H +#include + struct device; struct resource; struct property_entry; diff --git a/drivers/mfd/lochnagar-i2c.c b/drivers/mfd/lochnagar-i2c.c new file mode 100644 index 000000000000..3a65d9938902 --- /dev/null +++ b/drivers/mfd/lochnagar-i2c.c @@ -0,0 +1,398 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Lochnagar I2C bus interface + * + * Copyright (c) 2012-2018 Cirrus Logic, Inc. and + * Cirrus Logic International Semiconductor Ltd. + * + * Author: Charles Keepax + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define LOCHNAGAR_BOOT_RETRIES 10 +#define LOCHNAGAR_BOOT_DELAY_MS 350 + +#define LOCHNAGAR_CONFIG_POLL_US 10000 + +static bool lochnagar1_readable_register(struct device *dev, unsigned int reg) +{ + switch (reg) { + case LOCHNAGAR_SOFTWARE_RESET: + case LOCHNAGAR_FIRMWARE_ID1...LOCHNAGAR_FIRMWARE_ID2: + case LOCHNAGAR1_CDC_AIF1_SEL...LOCHNAGAR1_CDC_AIF3_SEL: + case LOCHNAGAR1_CDC_MCLK1_SEL...LOCHNAGAR1_CDC_MCLK2_SEL: + case LOCHNAGAR1_CDC_AIF_CTRL1...LOCHNAGAR1_CDC_AIF_CTRL2: + case LOCHNAGAR1_EXT_AIF_CTRL: + case LOCHNAGAR1_DSP_AIF1_SEL...LOCHNAGAR1_DSP_AIF2_SEL: + case LOCHNAGAR1_DSP_CLKIN_SEL: + case LOCHNAGAR1_DSP_AIF: + case LOCHNAGAR1_GF_AIF1...LOCHNAGAR1_GF_AIF2: + case LOCHNAGAR1_PSIA_AIF: + case LOCHNAGAR1_PSIA1_SEL...LOCHNAGAR1_PSIA2_SEL: + case LOCHNAGAR1_SPDIF_AIF_SEL: + case LOCHNAGAR1_GF_AIF3_SEL...LOCHNAGAR1_GF_AIF4_SEL: + case LOCHNAGAR1_GF_CLKOUT1_SEL: + case LOCHNAGAR1_GF_AIF1_SEL...LOCHNAGAR1_GF_AIF2_SEL: + case LOCHNAGAR1_GF_GPIO2...LOCHNAGAR1_GF_GPIO7: + case LOCHNAGAR1_RST: + case LOCHNAGAR1_LED1...LOCHNAGAR1_LED2: + case LOCHNAGAR1_I2C_CTRL: + return true; + default: + return false; + } +} + +static const struct regmap_config lochnagar1_i2c_regmap = { + .reg_bits = 8, + .val_bits = 8, + .reg_format_endian = REGMAP_ENDIAN_BIG, + .val_format_endian = REGMAP_ENDIAN_BIG, + + .max_register = 0x50, + .readable_reg = lochnagar1_readable_register, + + .use_single_read = true, + .use_single_write = true, + + .cache_type = REGCACHE_RBTREE, +}; + +static const struct reg_sequence lochnagar1_patch[] = { + { 0x40, 0x0083 }, + { 0x47, 0x0018 }, + { 0x50, 0x0000 }, +}; + +static bool lochnagar2_readable_register(struct device *dev, unsigned int reg) +{ + switch (reg) { + case LOCHNAGAR_SOFTWARE_RESET: + case LOCHNAGAR_FIRMWARE_ID1...LOCHNAGAR_FIRMWARE_ID2: + case LOCHNAGAR2_CDC_AIF1_CTRL...LOCHNAGAR2_CDC_AIF3_CTRL: + case LOCHNAGAR2_DSP_AIF1_CTRL...LOCHNAGAR2_DSP_AIF2_CTRL: + case LOCHNAGAR2_PSIA1_CTRL...LOCHNAGAR2_PSIA2_CTRL: + case LOCHNAGAR2_GF_AIF3_CTRL...LOCHNAGAR2_GF_AIF4_CTRL: + case LOCHNAGAR2_GF_AIF1_CTRL...LOCHNAGAR2_GF_AIF2_CTRL: + case LOCHNAGAR2_SPDIF_AIF_CTRL: + case LOCHNAGAR2_USB_AIF1_CTRL...LOCHNAGAR2_USB_AIF2_CTRL: + case LOCHNAGAR2_ADAT_AIF_CTRL: + case LOCHNAGAR2_CDC_MCLK1_CTRL...LOCHNAGAR2_CDC_MCLK2_CTRL: + case LOCHNAGAR2_DSP_CLKIN_CTRL: + case LOCHNAGAR2_PSIA1_MCLK_CTRL...LOCHNAGAR2_PSIA2_MCLK_CTRL: + case LOCHNAGAR2_SPDIF_MCLK_CTRL: + case LOCHNAGAR2_GF_CLKOUT1_CTRL...LOCHNAGAR2_GF_CLKOUT2_CTRL: + case LOCHNAGAR2_ADAT_MCLK_CTRL: + case LOCHNAGAR2_SOUNDCARD_MCLK_CTRL: + case LOCHNAGAR2_GPIO_FPGA_GPIO1...LOCHNAGAR2_GPIO_FPGA_GPIO6: + case LOCHNAGAR2_GPIO_CDC_GPIO1...LOCHNAGAR2_GPIO_CDC_GPIO8: + case LOCHNAGAR2_GPIO_DSP_GPIO1...LOCHNAGAR2_GPIO_DSP_GPIO6: + case LOCHNAGAR2_GPIO_GF_GPIO2...LOCHNAGAR2_GPIO_GF_GPIO7: + case LOCHNAGAR2_GPIO_CDC_AIF1_BCLK...LOCHNAGAR2_GPIO_CDC_AIF3_TXDAT: + case LOCHNAGAR2_GPIO_DSP_AIF1_BCLK...LOCHNAGAR2_GPIO_DSP_AIF2_TXDAT: + case LOCHNAGAR2_GPIO_PSIA1_BCLK...LOCHNAGAR2_GPIO_PSIA2_TXDAT: + case LOCHNAGAR2_GPIO_GF_AIF3_BCLK...LOCHNAGAR2_GPIO_GF_AIF4_TXDAT: + case LOCHNAGAR2_GPIO_GF_AIF1_BCLK...LOCHNAGAR2_GPIO_GF_AIF2_TXDAT: + case LOCHNAGAR2_GPIO_DSP_UART1_RX...LOCHNAGAR2_GPIO_DSP_UART2_TX: + case LOCHNAGAR2_GPIO_GF_UART2_RX...LOCHNAGAR2_GPIO_GF_UART2_TX: + case LOCHNAGAR2_GPIO_USB_UART_RX: + case LOCHNAGAR2_GPIO_CDC_PDMCLK1...LOCHNAGAR2_GPIO_CDC_PDMDAT2: + case LOCHNAGAR2_GPIO_CDC_DMICCLK1...LOCHNAGAR2_GPIO_CDC_DMICDAT4: + case LOCHNAGAR2_GPIO_DSP_DMICCLK1...LOCHNAGAR2_GPIO_DSP_DMICDAT2: + case LOCHNAGAR2_GPIO_I2C2_SCL...LOCHNAGAR2_GPIO_I2C4_SDA: + case LOCHNAGAR2_GPIO_DSP_STANDBY: + case LOCHNAGAR2_GPIO_CDC_MCLK1...LOCHNAGAR2_GPIO_CDC_MCLK2: + case LOCHNAGAR2_GPIO_DSP_CLKIN: + case LOCHNAGAR2_GPIO_PSIA1_MCLK...LOCHNAGAR2_GPIO_PSIA2_MCLK: + case LOCHNAGAR2_GPIO_GF_GPIO1...LOCHNAGAR2_GPIO_GF_GPIO5: + case LOCHNAGAR2_GPIO_DSP_GPIO20: + case LOCHNAGAR2_GPIO_CHANNEL1...LOCHNAGAR2_GPIO_CHANNEL16: + case LOCHNAGAR2_MINICARD_RESETS: + case LOCHNAGAR2_ANALOGUE_PATH_CTRL1...LOCHNAGAR2_ANALOGUE_PATH_CTRL2: + case LOCHNAGAR2_COMMS_CTRL4: + case LOCHNAGAR2_SPDIF_CTRL: + case LOCHNAGAR2_IMON_CTRL1...LOCHNAGAR2_IMON_CTRL4: + case LOCHNAGAR2_IMON_DATA1...LOCHNAGAR2_IMON_DATA2: + case LOCHNAGAR2_POWER_CTRL: + case LOCHNAGAR2_MICVDD_CTRL1: + case LOCHNAGAR2_MICVDD_CTRL2: + case LOCHNAGAR2_VDDCORE_CDC_CTRL1: + case LOCHNAGAR2_VDDCORE_CDC_CTRL2: + case LOCHNAGAR2_SOUNDCARD_AIF_CTRL: + return true; + default: + return false; + } +} + +static bool lochnagar2_volatile_register(struct device *dev, unsigned int reg) +{ + switch (reg) { + case LOCHNAGAR2_GPIO_CHANNEL1...LOCHNAGAR2_GPIO_CHANNEL16: + case LOCHNAGAR2_ANALOGUE_PATH_CTRL1: + case LOCHNAGAR2_IMON_CTRL3...LOCHNAGAR2_IMON_CTRL4: + case LOCHNAGAR2_IMON_DATA1...LOCHNAGAR2_IMON_DATA2: + return true; + default: + return false; + } +} + +static const struct regmap_config lochnagar2_i2c_regmap = { + .reg_bits = 16, + .val_bits = 16, + .reg_format_endian = REGMAP_ENDIAN_BIG, + .val_format_endian = REGMAP_ENDIAN_BIG, + + .max_register = 0x1F1F, + .readable_reg = lochnagar2_readable_register, + .volatile_reg = lochnagar2_volatile_register, + + .cache_type = REGCACHE_RBTREE, +}; + +static const struct reg_sequence lochnagar2_patch[] = { + { 0x00EE, 0x0000 }, +}; + +struct lochnagar_config { + int id; + const char * const name; + enum lochnagar_type type; + const struct regmap_config *regmap; + const struct reg_sequence *patch; + int npatch; +}; + +static struct lochnagar_config lochnagar_configs[] = { + { + .id = 0x50, + .name = "lochnagar1", + .type = LOCHNAGAR1, + .regmap = &lochnagar1_i2c_regmap, + .patch = lochnagar1_patch, + .npatch = ARRAY_SIZE(lochnagar1_patch), + }, + { + .id = 0xCB58, + .name = "lochnagar2", + .type = LOCHNAGAR2, + .regmap = &lochnagar2_i2c_regmap, + .patch = lochnagar2_patch, + .npatch = ARRAY_SIZE(lochnagar2_patch), + }, +}; + +static const struct of_device_id lochnagar_of_match[] = { + { .compatible = "cirrus,lochnagar1", .data = &lochnagar_configs[0] }, + { .compatible = "cirrus,lochnagar2", .data = &lochnagar_configs[1] }, + {}, +}; + +static int lochnagar_wait_for_boot(struct regmap *regmap, unsigned int *id) +{ + int i, ret; + + for (i = 0; i < LOCHNAGAR_BOOT_RETRIES; ++i) { + msleep(LOCHNAGAR_BOOT_DELAY_MS); + + /* The reset register will return the device ID when read */ + ret = regmap_read(regmap, LOCHNAGAR_SOFTWARE_RESET, id); + if (!ret) + return ret; + } + + return -ETIMEDOUT; +} + +/** + * lochnagar_update_config - Synchronise the boards analogue configuration to + * the hardware. + * + * @lochnagar: A pointer to the primary core data structure. + * + * Return: Zero on success or an appropriate negative error code on failure. + */ +int lochnagar_update_config(struct lochnagar *lochnagar) +{ + struct regmap *regmap = lochnagar->regmap; + unsigned int done = LOCHNAGAR2_ANALOGUE_PATH_UPDATE_STS_MASK; + int timeout_ms = LOCHNAGAR_BOOT_DELAY_MS * LOCHNAGAR_BOOT_RETRIES; + unsigned int val = 0; + int ret; + + lockdep_assert_held(&lochnagar->analogue_config_lock); + + if (lochnagar->type != LOCHNAGAR2) + return 0; + + /* + * Toggle the ANALOGUE_PATH_UPDATE bit and wait for the device to + * acknowledge that any outstanding changes to the analogue + * configuration have been applied. + */ + ret = regmap_write(regmap, LOCHNAGAR2_ANALOGUE_PATH_CTRL1, 0); + if (ret < 0) + return ret; + + ret = regmap_write(regmap, LOCHNAGAR2_ANALOGUE_PATH_CTRL1, + LOCHNAGAR2_ANALOGUE_PATH_UPDATE_MASK); + if (ret < 0) + return ret; + + ret = regmap_read_poll_timeout(regmap, + LOCHNAGAR2_ANALOGUE_PATH_CTRL1, val, + (val & done), LOCHNAGAR_CONFIG_POLL_US, + timeout_ms * 1000); + if (ret < 0) + return ret; + + return 0; +} +EXPORT_SYMBOL_GPL(lochnagar_update_config); + +static int lochnagar_i2c_probe(struct i2c_client *i2c) +{ + struct device *dev = &i2c->dev; + const struct lochnagar_config *config = NULL; + const struct of_device_id *of_id; + struct lochnagar *lochnagar; + struct gpio_desc *reset, *present; + unsigned int val; + unsigned int firmwareid; + unsigned int devid, rev; + int ret; + + lochnagar = devm_kzalloc(dev, sizeof(*lochnagar), GFP_KERNEL); + if (!lochnagar) + return -ENOMEM; + + of_id = of_match_device(lochnagar_of_match, dev); + if (!of_id) + return -EINVAL; + + config = of_id->data; + + lochnagar->dev = dev; + mutex_init(&lochnagar->analogue_config_lock); + + dev_set_drvdata(dev, lochnagar); + + reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(reset)) { + ret = PTR_ERR(reset); + dev_err(dev, "Failed to get reset GPIO: %d\n", ret); + return ret; + } + + present = devm_gpiod_get_optional(dev, "present", GPIOD_OUT_HIGH); + if (IS_ERR(present)) { + ret = PTR_ERR(present); + dev_err(dev, "Failed to get present GPIO: %d\n", ret); + return ret; + } + + /* Leave the Lochnagar in reset for a reasonable amount of time */ + msleep(20); + + /* Bring Lochnagar out of reset */ + gpiod_set_value_cansleep(reset, 1); + + /* Identify Lochnagar */ + lochnagar->type = config->type; + + lochnagar->regmap = devm_regmap_init_i2c(i2c, config->regmap); + if (IS_ERR(lochnagar->regmap)) { + ret = PTR_ERR(lochnagar->regmap); + dev_err(dev, "Failed to allocate register map: %d\n", ret); + return ret; + } + + /* Wait for Lochnagar to boot */ + ret = lochnagar_wait_for_boot(lochnagar->regmap, &val); + if (ret < 0) { + dev_err(dev, "Failed to read device ID: %d\n", ret); + return ret; + } + + devid = val & LOCHNAGAR_DEVICE_ID_MASK; + rev = val & LOCHNAGAR_REV_ID_MASK; + + if (devid != config->id) { + dev_err(dev, + "ID does not match %s (expected 0x%x got 0x%x)\n", + config->name, config->id, devid); + return -ENODEV; + } + + /* Identify firmware */ + ret = regmap_read(lochnagar->regmap, LOCHNAGAR_FIRMWARE_ID1, &val); + if (ret < 0) { + dev_err(dev, "Failed to read firmware id 1: %d\n", ret); + return ret; + } + + firmwareid = val; + + ret = regmap_read(lochnagar->regmap, LOCHNAGAR_FIRMWARE_ID2, &val); + if (ret < 0) { + dev_err(dev, "Failed to read firmware id 2: %d\n", ret); + return ret; + } + + firmwareid |= (val << config->regmap->val_bits); + + dev_info(dev, "Found %s (0x%x) revision %u firmware 0x%.6x\n", + config->name, devid, rev + 1, firmwareid); + + ret = regmap_register_patch(lochnagar->regmap, config->patch, + config->npatch); + if (ret < 0) { + dev_err(dev, "Failed to register patch: %d\n", ret); + return ret; + } + + ret = devm_of_platform_populate(dev); + if (ret < 0) { + dev_err(dev, "Failed to populate child nodes: %d\n", ret); + return ret; + } + + return ret; +} + +static struct i2c_driver lochnagar_i2c_driver = { + .driver = { + .name = "lochnagar", + .of_match_table = of_match_ptr(lochnagar_of_match), + .suppress_bind_attrs = true, + }, + .probe_new = lochnagar_i2c_probe, +}; + +static int __init lochnagar_i2c_init(void) +{ + int ret; + + ret = i2c_add_driver(&lochnagar_i2c_driver); + if (ret) + pr_err("Failed to register Lochnagar driver: %d\n", ret); + + return ret; +} +subsys_initcall(lochnagar_i2c_init); diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c index fd8b15cd84fd..87c724ba9793 100644 --- a/drivers/mfd/max8925-core.c +++ b/drivers/mfd/max8925-core.c @@ -10,7 +10,7 @@ */ #include -#include +#include #include #include #include @@ -919,8 +919,3 @@ void max8925_device_exit(struct max8925_chip *chip) free_irq(chip->tsc_irq, chip); mfd_remove_devices(chip->dev); } - - -MODULE_DESCRIPTION("PMIC Driver for Maxim MAX8925"); -MODULE_AUTHOR("Haojian Zhuang dev, "Unsupported number of touchscreen wires (%d)\n" diff --git a/drivers/mfd/qcom-pm8xxx.c b/drivers/mfd/qcom-pm8xxx.c index e6e8d81c15fd..8eb2528793f9 100644 --- a/drivers/mfd/qcom-pm8xxx.c +++ b/drivers/mfd/qcom-pm8xxx.c @@ -70,22 +70,23 @@ #define PM8XXX_NR_IRQS 256 #define PM8821_NR_IRQS 112 +struct pm_irq_data { + int num_irqs; + struct irq_chip *irq_chip; + void (*irq_handler)(struct irq_desc *desc); +}; + struct pm_irq_chip { struct regmap *regmap; spinlock_t pm_irq_lock; struct irq_domain *irqdomain; - unsigned int num_irqs; unsigned int num_blocks; unsigned int num_masters; + const struct pm_irq_data *pm_irq_data; + /* MUST BE AT THE END OF THIS STRUCT */ u8 config[0]; }; -struct pm_irq_data { - int num_irqs; - const struct irq_domain_ops *irq_domain_ops; - void (*irq_handler)(struct irq_desc *desc); -}; - static int pm8xxx_read_block_irq(struct pm_irq_chip *chip, unsigned int bp, unsigned int *ip) { @@ -375,21 +376,38 @@ static struct irq_chip pm8xxx_irq_chip = { .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE, }; -static int pm8xxx_irq_domain_map(struct irq_domain *d, unsigned int irq, - irq_hw_number_t hwirq) +static void pm8xxx_irq_domain_map(struct pm_irq_chip *chip, + struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq, unsigned int type) { - struct pm_irq_chip *chip = d->host_data; - - irq_set_chip_and_handler(irq, &pm8xxx_irq_chip, handle_level_irq); - irq_set_chip_data(irq, chip); + irq_domain_set_info(domain, irq, hwirq, chip->pm_irq_data->irq_chip, + chip, handle_level_irq, NULL, NULL); irq_set_noprobe(irq); +} + +static int pm8xxx_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *data) +{ + struct pm_irq_chip *chip = domain->host_data; + struct irq_fwspec *fwspec = data; + irq_hw_number_t hwirq; + unsigned int type; + int ret, i; + + ret = irq_domain_translate_twocell(domain, fwspec, &hwirq, &type); + if (ret) + return ret; + + for (i = 0; i < nr_irqs; i++) + pm8xxx_irq_domain_map(chip, domain, virq + i, hwirq + i, type); return 0; } static const struct irq_domain_ops pm8xxx_irq_domain_ops = { - .xlate = irq_domain_xlate_twocell, - .map = pm8xxx_irq_domain_map, + .alloc = pm8xxx_irq_domain_alloc, + .free = irq_domain_free_irqs_common, + .translate = irq_domain_translate_twocell, }; static void pm8821_irq_mask_ack(struct irq_data *d) @@ -473,23 +491,6 @@ static struct irq_chip pm8821_irq_chip = { .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE, }; -static int pm8821_irq_domain_map(struct irq_domain *d, unsigned int irq, - irq_hw_number_t hwirq) -{ - struct pm_irq_chip *chip = d->host_data; - - irq_set_chip_and_handler(irq, &pm8821_irq_chip, handle_level_irq); - irq_set_chip_data(irq, chip); - irq_set_noprobe(irq); - - return 0; -} - -static const struct irq_domain_ops pm8821_irq_domain_ops = { - .xlate = irq_domain_xlate_twocell, - .map = pm8821_irq_domain_map, -}; - static const struct regmap_config ssbi_regmap_config = { .reg_bits = 16, .val_bits = 8, @@ -501,13 +502,13 @@ static const struct regmap_config ssbi_regmap_config = { static const struct pm_irq_data pm8xxx_data = { .num_irqs = PM8XXX_NR_IRQS, - .irq_domain_ops = &pm8xxx_irq_domain_ops, + .irq_chip = &pm8xxx_irq_chip, .irq_handler = pm8xxx_irq_handler, }; static const struct pm_irq_data pm8821_data = { .num_irqs = PM8821_NR_IRQS, - .irq_domain_ops = &pm8821_irq_domain_ops, + .irq_chip = &pm8821_irq_chip, .irq_handler = pm8821_irq_handler, }; @@ -571,14 +572,14 @@ static int pm8xxx_probe(struct platform_device *pdev) platform_set_drvdata(pdev, chip); chip->regmap = regmap; - chip->num_irqs = data->num_irqs; - chip->num_blocks = DIV_ROUND_UP(chip->num_irqs, 8); + chip->num_blocks = DIV_ROUND_UP(data->num_irqs, 8); chip->num_masters = DIV_ROUND_UP(chip->num_blocks, 8); + chip->pm_irq_data = data; spin_lock_init(&chip->pm_irq_lock); chip->irqdomain = irq_domain_add_linear(pdev->dev.of_node, data->num_irqs, - data->irq_domain_ops, + &pm8xxx_irq_domain_ops, chip); if (!chip->irqdomain) return -ENODEV; diff --git a/drivers/mfd/rc5t583.c b/drivers/mfd/rc5t583.c index fd46de02b715..c5cc5cb3dde7 100644 --- a/drivers/mfd/rc5t583.c +++ b/drivers/mfd/rc5t583.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include @@ -298,8 +297,6 @@ static const struct i2c_device_id rc5t583_i2c_id[] = { {} }; -MODULE_DEVICE_TABLE(i2c, rc5t583_i2c_id); - static struct i2c_driver rc5t583_i2c_driver = { .driver = { .name = "rc5t583", @@ -313,14 +310,3 @@ static int __init rc5t583_i2c_init(void) return i2c_add_driver(&rc5t583_i2c_driver); } subsys_initcall(rc5t583_i2c_init); - -static void __exit rc5t583_i2c_exit(void) -{ - i2c_del_driver(&rc5t583_i2c_driver); -} - -module_exit(rc5t583_i2c_exit); - -MODULE_AUTHOR("Laxman Dewangan "); -MODULE_DESCRIPTION("RICOH RC5T583 power management system device driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c index e0835c9df7a1..521319086c81 100644 --- a/drivers/mfd/sec-core.c +++ b/drivers/mfd/sec-core.c @@ -114,7 +114,8 @@ static const struct mfd_cell s2mpu02_devs[] = { #ifdef CONFIG_OF static const struct of_device_id sec_dt_match[] = { - { .compatible = "samsung,s5m8767-pmic", + { + .compatible = "samsung,s5m8767-pmic", .data = (void *)S5M8767X, }, { .compatible = "samsung,s2mps11-pmic", @@ -309,8 +310,8 @@ static void sec_pmic_configure(struct sec_pmic_dev *sec_pmic) * the sub-modules need not instantiate another instance while parsing their * platform data. */ -static struct sec_platform_data *sec_pmic_i2c_parse_dt_pdata( - struct device *dev) +static struct sec_platform_data * +sec_pmic_i2c_parse_dt_pdata(struct device *dev) { struct sec_platform_data *pd; @@ -331,8 +332,8 @@ static struct sec_platform_data *sec_pmic_i2c_parse_dt_pdata( return pd; } #else -static struct sec_platform_data *sec_pmic_i2c_parse_dt_pdata( - struct device *dev) +static struct sec_platform_data * +sec_pmic_i2c_parse_dt_pdata(struct device *dev) { return NULL; } @@ -471,8 +472,9 @@ static int sec_pmic_probe(struct i2c_client *i2c, num_sec_devs = ARRAY_SIZE(s2mpu02_devs); break; default: - /* If this happens the probe function is problem */ - BUG(); + dev_err(&i2c->dev, "Unsupported device type (%lu)\n", + sec_pmic->device_type); + return -ENODEV; } ret = devm_mfd_add_devices(sec_pmic->dev, -1, sec_devs, num_sec_devs, NULL, 0, NULL); diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c index a530972c5a7e..d217debf382e 100644 --- a/drivers/mfd/sm501.c +++ b/drivers/mfd/sm501.c @@ -1142,9 +1142,11 @@ static int sm501_register_gpio_i2c_instance(struct sm501_devdata *sm, return -ENOMEM; /* Create a gpiod lookup using gpiochip-local offsets */ - lookup = devm_kzalloc(&pdev->dev, - sizeof(*lookup) + 3 * sizeof(struct gpiod_lookup), + lookup = devm_kzalloc(&pdev->dev, struct_size(lookup, table, 3), GFP_KERNEL); + if (!lookup) + return -ENOMEM; + lookup->dev_id = "i2c-gpio"; if (iic->pin_sda < 32) lookup->table[0].chip_label = "SM501-LOW"; diff --git a/drivers/mfd/sta2x11-mfd.c b/drivers/mfd/sta2x11-mfd.c index 3aeafa228baf..cab9aabcaa1f 100644 --- a/drivers/mfd/sta2x11-mfd.c +++ b/drivers/mfd/sta2x11-mfd.c @@ -1,4 +1,6 @@ /* + * STA2x11 mfd for GPIO, SCTL and APBREG + * * Copyright (c) 2009-2011 Wind River Systems, Inc. * Copyright (c) 2011 ST Microelectronics (Alessandro Rubini, Davide Ciminaghi) * @@ -18,7 +20,8 @@ */ #include -#include +#include +#include #include #include #include @@ -653,8 +656,3 @@ static int __init sta2x11_mfd_init(void) */ subsys_initcall(sta2x11_drivers_init); rootfs_initcall(sta2x11_mfd_init); - -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Wind River"); -MODULE_DESCRIPTION("STA2x11 mfd for GPIO, SCTL and APBREG"); -MODULE_DEVICE_TABLE(pci, sta2x11_mfd_tbl); diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c index 7569a4be0608..f2acb1f6a29c 100644 --- a/drivers/mfd/stmpe.c +++ b/drivers/mfd/stmpe.c @@ -463,6 +463,28 @@ static const struct mfd_cell stmpe_ts_cell = { .num_resources = ARRAY_SIZE(stmpe_ts_resources), }; +/* + * ADC (STMPE811) + */ + +static struct resource stmpe_adc_resources[] = { + { + .name = "STMPE_TEMP_SENS", + .flags = IORESOURCE_IRQ, + }, + { + .name = "STMPE_ADC", + .flags = IORESOURCE_IRQ, + }, +}; + +static const struct mfd_cell stmpe_adc_cell = { + .name = "stmpe-adc", + .of_compatible = "st,stmpe-adc", + .resources = stmpe_adc_resources, + .num_resources = ARRAY_SIZE(stmpe_adc_resources), +}; + /* * STMPE811 or STMPE610 */ @@ -497,6 +519,11 @@ static struct stmpe_variant_block stmpe811_blocks[] = { .irq = STMPE811_IRQ_TOUCH_DET, .block = STMPE_BLOCK_TOUCHSCREEN, }, + { + .cell = &stmpe_adc_cell, + .irq = STMPE811_IRQ_TEMP_SENS, + .block = STMPE_BLOCK_ADC, + }, }; static int stmpe811_enable(struct stmpe *stmpe, unsigned int blocks, @@ -517,6 +544,35 @@ static int stmpe811_enable(struct stmpe *stmpe, unsigned int blocks, enable ? 0 : mask); } +int stmpe811_adc_common_init(struct stmpe *stmpe) +{ + int ret; + u8 adc_ctrl1, adc_ctrl1_mask; + + adc_ctrl1 = STMPE_SAMPLE_TIME(stmpe->sample_time) | + STMPE_MOD_12B(stmpe->mod_12b) | + STMPE_REF_SEL(stmpe->ref_sel); + adc_ctrl1_mask = STMPE_SAMPLE_TIME(0xff) | STMPE_MOD_12B(0xff) | + STMPE_REF_SEL(0xff); + + ret = stmpe_set_bits(stmpe, STMPE811_REG_ADC_CTRL1, + adc_ctrl1_mask, adc_ctrl1); + if (ret) { + dev_err(stmpe->dev, "Could not setup ADC\n"); + return ret; + } + + ret = stmpe_set_bits(stmpe, STMPE811_REG_ADC_CTRL2, + STMPE_ADC_FREQ(0xff), STMPE_ADC_FREQ(stmpe->adc_freq)); + if (ret) { + dev_err(stmpe->dev, "Could not setup ADC\n"); + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(stmpe811_adc_common_init); + static int stmpe811_get_altfunc(struct stmpe *stmpe, enum stmpe_block block) { /* 0 for touchscreen, 1 for GPIO */ @@ -1325,6 +1381,7 @@ int stmpe_probe(struct stmpe_client_info *ci, enum stmpe_partnum partnum) struct device_node *np = ci->dev->of_node; struct stmpe *stmpe; int ret; + u32 val; pdata = devm_kzalloc(ci->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) @@ -1342,6 +1399,15 @@ int stmpe_probe(struct stmpe_client_info *ci, enum stmpe_partnum partnum) mutex_init(&stmpe->irq_lock); mutex_init(&stmpe->lock); + if (!of_property_read_u32(np, "st,sample-time", &val)) + stmpe->sample_time = val; + if (!of_property_read_u32(np, "st,mod-12b", &val)) + stmpe->mod_12b = val; + if (!of_property_read_u32(np, "st,ref-sel", &val)) + stmpe->ref_sel = val; + if (!of_property_read_u32(np, "st,adc-freq", &val)) + stmpe->adc_freq = val; + stmpe->dev = ci->dev; stmpe->client = ci->client; stmpe->pdata = pdata; @@ -1433,6 +1499,8 @@ int stmpe_remove(struct stmpe *stmpe) if (!IS_ERR(stmpe->vcc)) regulator_disable(stmpe->vcc); + __stmpe_disable(stmpe, STMPE_BLOCK_ADC); + mfd_remove_devices(stmpe->dev); return 0; diff --git a/drivers/mfd/stpmic1.c b/drivers/mfd/stpmic1.c new file mode 100644 index 000000000000..7dfbe8906cb8 --- /dev/null +++ b/drivers/mfd/stpmic1.c @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) STMicroelectronics 2018 +// Author: Pascal Paillet + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define STPMIC1_MAIN_IRQ 0 + +static const struct regmap_range stpmic1_readable_ranges[] = { + regmap_reg_range(TURN_ON_SR, VERSION_SR), + regmap_reg_range(SWOFF_PWRCTRL_CR, LDO6_STDBY_CR), + regmap_reg_range(BST_SW_CR, BST_SW_CR), + regmap_reg_range(INT_PENDING_R1, INT_PENDING_R4), + regmap_reg_range(INT_CLEAR_R1, INT_CLEAR_R4), + regmap_reg_range(INT_MASK_R1, INT_MASK_R4), + regmap_reg_range(INT_SET_MASK_R1, INT_SET_MASK_R4), + regmap_reg_range(INT_CLEAR_MASK_R1, INT_CLEAR_MASK_R4), + regmap_reg_range(INT_SRC_R1, INT_SRC_R1), +}; + +static const struct regmap_range stpmic1_writeable_ranges[] = { + regmap_reg_range(SWOFF_PWRCTRL_CR, LDO6_STDBY_CR), + regmap_reg_range(BST_SW_CR, BST_SW_CR), + regmap_reg_range(INT_CLEAR_R1, INT_CLEAR_R4), + regmap_reg_range(INT_SET_MASK_R1, INT_SET_MASK_R4), + regmap_reg_range(INT_CLEAR_MASK_R1, INT_CLEAR_MASK_R4), +}; + +static const struct regmap_range stpmic1_volatile_ranges[] = { + regmap_reg_range(TURN_ON_SR, VERSION_SR), + regmap_reg_range(WCHDG_CR, WCHDG_CR), + regmap_reg_range(INT_PENDING_R1, INT_PENDING_R4), + regmap_reg_range(INT_SRC_R1, INT_SRC_R4), +}; + +static const struct regmap_access_table stpmic1_readable_table = { + .yes_ranges = stpmic1_readable_ranges, + .n_yes_ranges = ARRAY_SIZE(stpmic1_readable_ranges), +}; + +static const struct regmap_access_table stpmic1_writeable_table = { + .yes_ranges = stpmic1_writeable_ranges, + .n_yes_ranges = ARRAY_SIZE(stpmic1_writeable_ranges), +}; + +static const struct regmap_access_table stpmic1_volatile_table = { + .yes_ranges = stpmic1_volatile_ranges, + .n_yes_ranges = ARRAY_SIZE(stpmic1_volatile_ranges), +}; + +const struct regmap_config stpmic1_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .cache_type = REGCACHE_RBTREE, + .max_register = PMIC_MAX_REGISTER_ADDRESS, + .rd_table = &stpmic1_readable_table, + .wr_table = &stpmic1_writeable_table, + .volatile_table = &stpmic1_volatile_table, +}; + +static const struct regmap_irq stpmic1_irqs[] = { + REGMAP_IRQ_REG(IT_PONKEY_F, 0, 0x01), + REGMAP_IRQ_REG(IT_PONKEY_R, 0, 0x02), + REGMAP_IRQ_REG(IT_WAKEUP_F, 0, 0x04), + REGMAP_IRQ_REG(IT_WAKEUP_R, 0, 0x08), + REGMAP_IRQ_REG(IT_VBUS_OTG_F, 0, 0x10), + REGMAP_IRQ_REG(IT_VBUS_OTG_R, 0, 0x20), + REGMAP_IRQ_REG(IT_SWOUT_F, 0, 0x40), + REGMAP_IRQ_REG(IT_SWOUT_R, 0, 0x80), + + REGMAP_IRQ_REG(IT_CURLIM_BUCK1, 1, 0x01), + REGMAP_IRQ_REG(IT_CURLIM_BUCK2, 1, 0x02), + REGMAP_IRQ_REG(IT_CURLIM_BUCK3, 1, 0x04), + REGMAP_IRQ_REG(IT_CURLIM_BUCK4, 1, 0x08), + REGMAP_IRQ_REG(IT_OCP_OTG, 1, 0x10), + REGMAP_IRQ_REG(IT_OCP_SWOUT, 1, 0x20), + REGMAP_IRQ_REG(IT_OCP_BOOST, 1, 0x40), + REGMAP_IRQ_REG(IT_OVP_BOOST, 1, 0x80), + + REGMAP_IRQ_REG(IT_CURLIM_LDO1, 2, 0x01), + REGMAP_IRQ_REG(IT_CURLIM_LDO2, 2, 0x02), + REGMAP_IRQ_REG(IT_CURLIM_LDO3, 2, 0x04), + REGMAP_IRQ_REG(IT_CURLIM_LDO4, 2, 0x08), + REGMAP_IRQ_REG(IT_CURLIM_LDO5, 2, 0x10), + REGMAP_IRQ_REG(IT_CURLIM_LDO6, 2, 0x20), + REGMAP_IRQ_REG(IT_SHORT_SWOTG, 2, 0x40), + REGMAP_IRQ_REG(IT_SHORT_SWOUT, 2, 0x80), + + REGMAP_IRQ_REG(IT_TWARN_F, 3, 0x01), + REGMAP_IRQ_REG(IT_TWARN_R, 3, 0x02), + REGMAP_IRQ_REG(IT_VINLOW_F, 3, 0x04), + REGMAP_IRQ_REG(IT_VINLOW_R, 3, 0x08), + REGMAP_IRQ_REG(IT_SWIN_F, 3, 0x40), + REGMAP_IRQ_REG(IT_SWIN_R, 3, 0x80), +}; + +static const struct regmap_irq_chip stpmic1_regmap_irq_chip = { + .name = "pmic_irq", + .status_base = INT_PENDING_R1, + .mask_base = INT_CLEAR_MASK_R1, + .unmask_base = INT_SET_MASK_R1, + .ack_base = INT_CLEAR_R1, + .num_regs = STPMIC1_PMIC_NUM_IRQ_REGS, + .irqs = stpmic1_irqs, + .num_irqs = ARRAY_SIZE(stpmic1_irqs), +}; + +static int stpmic1_probe(struct i2c_client *i2c, + const struct i2c_device_id *id) +{ + struct stpmic1 *ddata; + struct device *dev = &i2c->dev; + int ret; + struct device_node *np = dev->of_node; + u32 reg; + + ddata = devm_kzalloc(dev, sizeof(struct stpmic1), GFP_KERNEL); + if (!ddata) + return -ENOMEM; + + i2c_set_clientdata(i2c, ddata); + ddata->dev = dev; + + ddata->regmap = devm_regmap_init_i2c(i2c, &stpmic1_regmap_config); + if (IS_ERR(ddata->regmap)) + return PTR_ERR(ddata->regmap); + + ddata->irq = of_irq_get(np, STPMIC1_MAIN_IRQ); + if (ddata->irq < 0) { + dev_err(dev, "Failed to get main IRQ: %d\n", ddata->irq); + return ddata->irq; + } + + ret = regmap_read(ddata->regmap, VERSION_SR, ®); + if (ret) { + dev_err(dev, "Unable to read PMIC version\n"); + return ret; + } + dev_info(dev, "PMIC Chip Version: 0x%x\n", reg); + + /* Initialize PMIC IRQ Chip & associated IRQ domains */ + ret = devm_regmap_add_irq_chip(dev, ddata->regmap, ddata->irq, + IRQF_ONESHOT | IRQF_SHARED, + 0, &stpmic1_regmap_irq_chip, + &ddata->irq_data); + if (ret) { + dev_err(dev, "IRQ Chip registration failed: %d\n", ret); + return ret; + } + + return devm_of_platform_populate(dev); +} + +#ifdef CONFIG_PM_SLEEP +static int stpmic1_suspend(struct device *dev) +{ + struct i2c_client *i2c = to_i2c_client(dev); + struct stpmic1 *pmic_dev = i2c_get_clientdata(i2c); + + disable_irq(pmic_dev->irq); + + return 0; +} + +static int stpmic1_resume(struct device *dev) +{ + struct i2c_client *i2c = to_i2c_client(dev); + struct stpmic1 *pmic_dev = i2c_get_clientdata(i2c); + int ret; + + ret = regcache_sync(pmic_dev->regmap); + if (ret) + return ret; + + enable_irq(pmic_dev->irq); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(stpmic1_pm, stpmic1_suspend, stpmic1_resume); + +static const struct of_device_id stpmic1_of_match[] = { + { .compatible = "st,stpmic1", }, + {}, +}; +MODULE_DEVICE_TABLE(of, stpmic1_of_match); + +static struct i2c_driver stpmic1_driver = { + .driver = { + .name = "stpmic1", + .of_match_table = of_match_ptr(stpmic1_of_match), + .pm = &stpmic1_pm, + }, + .probe = stpmic1_probe, +}; + +module_i2c_driver(stpmic1_driver); + +MODULE_DESCRIPTION("STPMIC1 PMIC Driver"); +MODULE_AUTHOR("Pascal Paillet "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c index b6d05cd934e6..0ecdffb3d967 100644 --- a/drivers/mfd/syscon.c +++ b/drivers/mfd/syscon.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include #include @@ -272,13 +272,3 @@ static int __init syscon_init(void) return platform_driver_register(&syscon_driver); } postcore_initcall(syscon_init); - -static void __exit syscon_exit(void) -{ - platform_driver_unregister(&syscon_driver); -} -module_exit(syscon_exit); - -MODULE_AUTHOR("Dong Aisheng "); -MODULE_DESCRIPTION("System Control driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/mfd/tps65090.c b/drivers/mfd/tps65090.c index f13e4cd06e89..6968df4d7828 100644 --- a/drivers/mfd/tps65090.c +++ b/drivers/mfd/tps65090.c @@ -2,7 +2,9 @@ * Core driver for TI TPS65090 PMIC family * * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. - + * + * Author: Venu Byravarasu + * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. @@ -19,7 +21,7 @@ #include #include #include -#include +#include #include #include #include @@ -171,7 +173,6 @@ static const struct of_device_id tps65090_of_match[] = { { .compatible = "ti,tps65090",}, {}, }; -MODULE_DEVICE_TABLE(of, tps65090_of_match); #endif static int tps65090_i2c_probe(struct i2c_client *client, @@ -236,30 +237,19 @@ err_irq_exit: return ret; } -static int tps65090_i2c_remove(struct i2c_client *client) -{ - struct tps65090 *tps65090 = i2c_get_clientdata(client); - - mfd_remove_devices(tps65090->dev); - if (client->irq) - regmap_del_irq_chip(client->irq, tps65090->irq_data); - - return 0; -} static const struct i2c_device_id tps65090_id_table[] = { { "tps65090", 0 }, { }, }; -MODULE_DEVICE_TABLE(i2c, tps65090_id_table); static struct i2c_driver tps65090_driver = { .driver = { .name = "tps65090", + .suppress_bind_attrs = true, .of_match_table = of_match_ptr(tps65090_of_match), }, .probe = tps65090_i2c_probe, - .remove = tps65090_i2c_remove, .id_table = tps65090_id_table, }; @@ -268,13 +258,3 @@ static int __init tps65090_init(void) return i2c_add_driver(&tps65090_driver); } subsys_initcall(tps65090_init); - -static void __exit tps65090_exit(void) -{ - i2c_del_driver(&tps65090_driver); -} -module_exit(tps65090_exit); - -MODULE_DESCRIPTION("TPS65090 core driver"); -MODULE_AUTHOR("Venu Byravarasu "); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c index 8bcdecf494d0..a62ea4cb8be7 100644 --- a/drivers/mfd/tps65218.c +++ b/drivers/mfd/tps65218.c @@ -211,6 +211,83 @@ static const struct of_device_id of_tps65218_match_table[] = { }; MODULE_DEVICE_TABLE(of, of_tps65218_match_table); +static int tps65218_voltage_set_strict(struct tps65218 *tps) +{ + u32 strict; + + if (of_property_read_u32(tps->dev->of_node, + "ti,strict-supply-voltage-supervision", + &strict)) + return 0; + + if (strict != 0 && strict != 1) { + dev_err(tps->dev, + "Invalid ti,strict-supply-voltage-supervision value\n"); + return -EINVAL; + } + + tps65218_update_bits(tps, TPS65218_REG_CONFIG1, + TPS65218_CONFIG1_STRICT, + strict ? TPS65218_CONFIG1_STRICT : 0, + TPS65218_PROTECT_L1); + return 0; +} + +static int tps65218_voltage_set_uv_hyst(struct tps65218 *tps) +{ + u32 hyst; + + if (of_property_read_u32(tps->dev->of_node, + "ti,under-voltage-hyst-microvolt", &hyst)) + return 0; + + if (hyst != 400000 && hyst != 200000) { + dev_err(tps->dev, + "Invalid ti,under-voltage-hyst-microvolt value\n"); + return -EINVAL; + } + + tps65218_update_bits(tps, TPS65218_REG_CONFIG2, + TPS65218_CONFIG2_UVLOHYS, + hyst == 400000 ? TPS65218_CONFIG2_UVLOHYS : 0, + TPS65218_PROTECT_L1); + return 0; +} + +static int tps65218_voltage_set_uvlo(struct tps65218 *tps) +{ + u32 uvlo; + int uvloval; + + if (of_property_read_u32(tps->dev->of_node, + "ti,under-voltage-limit-microvolt", &uvlo)) + return 0; + + switch (uvlo) { + case 2750000: + uvloval = TPS65218_CONFIG1_UVLO_2750000; + break; + case 2950000: + uvloval = TPS65218_CONFIG1_UVLO_2950000; + break; + case 3250000: + uvloval = TPS65218_CONFIG1_UVLO_3250000; + break; + case 3350000: + uvloval = TPS65218_CONFIG1_UVLO_3350000; + break; + default: + dev_err(tps->dev, + "Invalid ti,under-voltage-limit-microvolt value\n"); + return -EINVAL; + } + + tps65218_update_bits(tps, TPS65218_REG_CONFIG1, + TPS65218_CONFIG1_UVLO_MASK, uvloval, + TPS65218_PROTECT_L1); + return 0; +} + static int tps65218_probe(struct i2c_client *client, const struct i2c_device_id *ids) { @@ -249,6 +326,18 @@ static int tps65218_probe(struct i2c_client *client, tps->rev = chipid & TPS65218_CHIPID_REV_MASK; + ret = tps65218_voltage_set_strict(tps); + if (ret) + return ret; + + ret = tps65218_voltage_set_uvlo(tps); + if (ret) + return ret; + + ret = tps65218_voltage_set_uv_hyst(tps); + if (ret) + return ret; + ret = mfd_add_devices(tps->dev, PLATFORM_DEVID_AUTO, tps65218_cells, ARRAY_SIZE(tps65218_cells), NULL, 0, regmap_irq_get_domain(tps->irq_data)); diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c index bf16cbe6fd88..aa3d472a10ff 100644 --- a/drivers/mfd/tps65910.c +++ b/drivers/mfd/tps65910.c @@ -1,5 +1,5 @@ /* - * tps65910.c -- TI TPS6591x + * tps65910.c -- TI TPS6591x chip family multi-function driver * * Copyright 2010 Texas Instruments Inc. * @@ -13,8 +13,6 @@ * */ -#include -#include #include #include #include @@ -374,7 +372,6 @@ static const struct of_device_id tps65910_of_match[] = { { .compatible = "ti,tps65911", .data = (void *)TPS65911}, { }, }; -MODULE_DEVICE_TABLE(of, tps65910_of_match); static struct tps65910_board *tps65910_parse_dt(struct i2c_client *client, unsigned long *chip_id) @@ -527,8 +524,6 @@ static const struct i2c_device_id tps65910_i2c_id[] = { { "tps65911", TPS65911 }, { } }; -MODULE_DEVICE_TABLE(i2c, tps65910_i2c_id); - static struct i2c_driver tps65910_i2c_driver = { .driver = { @@ -545,14 +540,3 @@ static int __init tps65910_i2c_init(void) } /* init early so consumer devices can complete system boot */ subsys_initcall(tps65910_i2c_init); - -static void __exit tps65910_i2c_exit(void) -{ - i2c_del_driver(&tps65910_i2c_driver); -} -module_exit(tps65910_i2c_exit); - -MODULE_AUTHOR("Graeme Gregory "); -MODULE_AUTHOR("Jorge Eduardo Candelaria "); -MODULE_DESCRIPTION("TPS6591x chip family multi-function driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/mfd/tps68470.c b/drivers/mfd/tps68470.c index a5981a79b29a..4a4df4ffd18c 100644 --- a/drivers/mfd/tps68470.c +++ b/drivers/mfd/tps68470.c @@ -86,7 +86,6 @@ static const struct acpi_device_id tps68470_acpi_ids[] = { {"INT3472"}, {}, }; -MODULE_DEVICE_TABLE(acpi, tps68470_acpi_ids); static struct i2c_driver tps68470_driver = { .driver = { diff --git a/drivers/mfd/tps80031.c b/drivers/mfd/tps80031.c index 608c7f77830e..865257ade8ac 100644 --- a/drivers/mfd/tps80031.c +++ b/drivers/mfd/tps80031.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include #include @@ -516,40 +515,18 @@ fail_client_reg: return ret; } -static int tps80031_remove(struct i2c_client *client) -{ - struct tps80031 *tps80031 = i2c_get_clientdata(client); - int i; - - if (tps80031_power_off_dev == tps80031) { - tps80031_power_off_dev = NULL; - pm_power_off = NULL; - } - - mfd_remove_devices(tps80031->dev); - - regmap_del_irq_chip(client->irq, tps80031->irq_data); - - for (i = 0; i < TPS80031_NUM_SLAVES; i++) { - if (tps80031->clients[i] != client) - i2c_unregister_device(tps80031->clients[i]); - } - return 0; -} - static const struct i2c_device_id tps80031_id_table[] = { { "tps80031", TPS80031 }, { "tps80032", TPS80032 }, { } }; -MODULE_DEVICE_TABLE(i2c, tps80031_id_table); static struct i2c_driver tps80031_driver = { .driver = { - .name = "tps80031", + .name = "tps80031", + .suppress_bind_attrs = true, }, .probe = tps80031_probe, - .remove = tps80031_remove, .id_table = tps80031_id_table, }; @@ -558,13 +535,3 @@ static int __init tps80031_init(void) return i2c_add_driver(&tps80031_driver); } subsys_initcall(tps80031_init); - -static void __exit tps80031_exit(void) -{ - i2c_del_driver(&tps80031_driver); -} -module_exit(tps80031_exit); - -MODULE_AUTHOR("Laxman Dewangan "); -MODULE_DESCRIPTION("TPS80031 core driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/mfd/tqmx86.c b/drivers/mfd/tqmx86.c new file mode 100644 index 000000000000..22d2f02d855c --- /dev/null +++ b/drivers/mfd/tqmx86.c @@ -0,0 +1,281 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * TQ-Systems PLD MFD core driver, based on vendor driver by + * Vadim V.Vlasov + * + * Copyright (c) 2015 TQ-Systems GmbH + * Copyright (c) 2019 Andrew Lunn + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define TQMX86_IOBASE 0x160 +#define TQMX86_IOSIZE 0x3f +#define TQMX86_IOBASE_I2C 0x1a0 +#define TQMX86_IOSIZE_I2C 0xa +#define TQMX86_IOBASE_WATCHDOG 0x18b +#define TQMX86_IOSIZE_WATCHDOG 0x2 +#define TQMX86_IOBASE_GPIO 0x18d +#define TQMX86_IOSIZE_GPIO 0x4 + +#define TQMX86_REG_BOARD_ID 0x20 +#define TQMX86_REG_BOARD_ID_E38M 1 +#define TQMX86_REG_BOARD_ID_50UC 2 +#define TQMX86_REG_BOARD_ID_E38C 3 +#define TQMX86_REG_BOARD_ID_60EB 4 +#define TQMX86_REG_BOARD_ID_E39M 5 +#define TQMX86_REG_BOARD_ID_E39C 6 +#define TQMX86_REG_BOARD_ID_E39x 7 +#define TQMX86_REG_BOARD_ID_70EB 8 +#define TQMX86_REG_BOARD_ID_80UC 9 +#define TQMX86_REG_BOARD_ID_90UC 10 +#define TQMX86_REG_BOARD_REV 0x21 +#define TQMX86_REG_IO_EXT_INT 0x26 +#define TQMX86_REG_IO_EXT_INT_NONE 0 +#define TQMX86_REG_IO_EXT_INT_7 1 +#define TQMX86_REG_IO_EXT_INT_9 2 +#define TQMX86_REG_IO_EXT_INT_12 3 +#define TQMX86_REG_IO_EXT_INT_MASK 0x3 +#define TQMX86_REG_IO_EXT_INT_GPIO_SHIFT 4 + +#define TQMX86_REG_I2C_DETECT 0x47 +#define TQMX86_REG_I2C_DETECT_SOFT 0xa5 +#define TQMX86_REG_I2C_INT_EN 0x49 + +static uint gpio_irq; +module_param(gpio_irq, uint, 0); +MODULE_PARM_DESC(gpio_irq, "GPIO IRQ number (7, 9, 12)"); + +static const struct resource tqmx_i2c_soft_resources[] = { + DEFINE_RES_IO(TQMX86_IOBASE_I2C, TQMX86_IOSIZE_I2C), +}; + +static const struct resource tqmx_watchdog_resources[] = { + DEFINE_RES_IO(TQMX86_IOBASE_WATCHDOG, TQMX86_IOSIZE_WATCHDOG), +}; + +/* + * The IRQ resource must be first, since it is updated with the + * configured IRQ in the probe function. + */ +static struct resource tqmx_gpio_resources[] = { + DEFINE_RES_IRQ(0), + DEFINE_RES_IO(TQMX86_IOBASE_GPIO, TQMX86_IOSIZE_GPIO), +}; + +static struct i2c_board_info tqmx86_i2c_devices[] = { + { + /* 4K EEPROM at 0x50 */ + I2C_BOARD_INFO("24c32", 0x50), + }, +}; + +static struct ocores_i2c_platform_data ocores_platfom_data = { + .num_devices = ARRAY_SIZE(tqmx86_i2c_devices), + .devices = tqmx86_i2c_devices, +}; + +static const struct mfd_cell tqmx86_i2c_soft_dev[] = { + { + .name = "ocores-i2c", + .platform_data = &ocores_platfom_data, + .pdata_size = sizeof(ocores_platfom_data), + .resources = tqmx_i2c_soft_resources, + .num_resources = ARRAY_SIZE(tqmx_i2c_soft_resources), + }, +}; + +static const struct mfd_cell tqmx86_devs[] = { + { + .name = "tqmx86-wdt", + .resources = tqmx_watchdog_resources, + .num_resources = ARRAY_SIZE(tqmx_watchdog_resources), + .ignore_resource_conflicts = true, + }, + { + .name = "tqmx86-gpio", + .resources = tqmx_gpio_resources, + .num_resources = ARRAY_SIZE(tqmx_gpio_resources), + .ignore_resource_conflicts = true, + }, +}; + +static const char *tqmx86_board_id_to_name(u8 board_id) +{ + switch (board_id) { + case TQMX86_REG_BOARD_ID_E38M: + return "TQMxE38M"; + case TQMX86_REG_BOARD_ID_50UC: + return "TQMx50UC"; + case TQMX86_REG_BOARD_ID_E38C: + return "TQMxE38C"; + case TQMX86_REG_BOARD_ID_60EB: + return "TQMx60EB"; + case TQMX86_REG_BOARD_ID_E39M: + return "TQMxE39M"; + case TQMX86_REG_BOARD_ID_E39C: + return "TQMxE39C"; + case TQMX86_REG_BOARD_ID_E39x: + return "TQMxE39x"; + case TQMX86_REG_BOARD_ID_70EB: + return "TQMx70EB"; + case TQMX86_REG_BOARD_ID_80UC: + return "TQMx80UC"; + case TQMX86_REG_BOARD_ID_90UC: + return "TQMx90UC"; + default: + return "Unknown"; + } +} + +static int tqmx86_board_id_to_clk_rate(u8 board_id) +{ + switch (board_id) { + case TQMX86_REG_BOARD_ID_50UC: + case TQMX86_REG_BOARD_ID_60EB: + case TQMX86_REG_BOARD_ID_70EB: + case TQMX86_REG_BOARD_ID_80UC: + case TQMX86_REG_BOARD_ID_90UC: + return 24000; + case TQMX86_REG_BOARD_ID_E39M: + case TQMX86_REG_BOARD_ID_E39C: + case TQMX86_REG_BOARD_ID_E39x: + return 25000; + case TQMX86_REG_BOARD_ID_E38M: + case TQMX86_REG_BOARD_ID_E38C: + return 33000; + default: + return 0; + } +} + +static int tqmx86_probe(struct platform_device *pdev) +{ + u8 board_id, rev, i2c_det, i2c_ien, io_ext_int_val; + struct device *dev = &pdev->dev; + u8 gpio_irq_cfg, readback; + const char *board_name; + void __iomem *io_base; + int err; + + switch (gpio_irq) { + case 0: + gpio_irq_cfg = TQMX86_REG_IO_EXT_INT_NONE; + break; + case 7: + gpio_irq_cfg = TQMX86_REG_IO_EXT_INT_7; + break; + case 9: + gpio_irq_cfg = TQMX86_REG_IO_EXT_INT_9; + break; + case 12: + gpio_irq_cfg = TQMX86_REG_IO_EXT_INT_12; + break; + default: + pr_err("tqmx86: Invalid GPIO IRQ (%d)\n", gpio_irq); + return -EINVAL; + } + + io_base = devm_ioport_map(dev, TQMX86_IOBASE, TQMX86_IOSIZE); + if (!io_base) + return -ENOMEM; + + board_id = ioread8(io_base + TQMX86_REG_BOARD_ID); + board_name = tqmx86_board_id_to_name(board_id); + rev = ioread8(io_base + TQMX86_REG_BOARD_REV); + + dev_info(dev, + "Found %s - Board ID %d, PCB Revision %d, PLD Revision %d\n", + board_name, board_id, rev >> 4, rev & 0xf); + + i2c_det = ioread8(io_base + TQMX86_REG_I2C_DETECT); + i2c_ien = ioread8(io_base + TQMX86_REG_I2C_INT_EN); + + if (gpio_irq_cfg) { + io_ext_int_val = + gpio_irq_cfg << TQMX86_REG_IO_EXT_INT_GPIO_SHIFT; + iowrite8(io_ext_int_val, io_base + TQMX86_REG_IO_EXT_INT); + readback = ioread8(io_base + TQMX86_REG_IO_EXT_INT); + if (readback != io_ext_int_val) { + dev_warn(dev, "GPIO interrupts not supported.\n"); + return -EINVAL; + } + + /* Assumes the IRQ resource is first. */ + tqmx_gpio_resources[0].start = gpio_irq; + } + + ocores_platfom_data.clock_khz = tqmx86_board_id_to_clk_rate(board_id); + + if (i2c_det == TQMX86_REG_I2C_DETECT_SOFT) { + err = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, + tqmx86_i2c_soft_dev, + ARRAY_SIZE(tqmx86_i2c_soft_dev), + NULL, 0, NULL); + if (err) + return err; + } + + return devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, + tqmx86_devs, + ARRAY_SIZE(tqmx86_devs), + NULL, 0, NULL); +} + +static int tqmx86_create_platform_device(const struct dmi_system_id *id) +{ + struct platform_device *pdev; + int err; + + pdev = platform_device_alloc("tqmx86", -1); + if (!pdev) + return -ENOMEM; + + err = platform_device_add(pdev); + if (err) + platform_device_put(pdev); + + return err; +} + +static const struct dmi_system_id tqmx86_dmi_table[] __initconst = { + { + .ident = "TQMX86", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TQ-Group"), + DMI_MATCH(DMI_PRODUCT_NAME, "TQMx"), + }, + .callback = tqmx86_create_platform_device, + }, + {} +}; +MODULE_DEVICE_TABLE(dmi, tqmx86_dmi_table); + +static struct platform_driver tqmx86_driver = { + .driver = { + .name = "tqmx86", + }, + .probe = tqmx86_probe, +}; + +static int __init tqmx86_init(void) +{ + if (!dmi_check_system(tqmx86_dmi_table)) + return -ENODEV; + + return platform_driver_register(&tqmx86_driver); +} + +module_init(tqmx86_init); + +MODULE_DESCRIPTION("TQx86 PLD Core Driver"); +MODULE_AUTHOR("Andrew Lunn "); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:tqmx86"); diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c index e70d35ef5c6d..25fbbaf39cb9 100644 --- a/drivers/mfd/wm831x-core.c +++ b/drivers/mfd/wm831x-core.c @@ -13,7 +13,8 @@ */ #include -#include +#include +#include #include #include #include @@ -1892,14 +1893,6 @@ err: return ret; } -void wm831x_device_exit(struct wm831x *wm831x) -{ - wm831x_otp_exit(wm831x); - mfd_remove_devices(wm831x->dev); - free_irq(wm831x_irq(wm831x, WM831X_IRQ_AUXADC_DATA), wm831x); - wm831x_irq_exit(wm831x); -} - int wm831x_device_suspend(struct wm831x *wm831x) { int reg, mask; @@ -1944,7 +1937,3 @@ void wm831x_device_shutdown(struct wm831x *wm831x) } } EXPORT_SYMBOL_GPL(wm831x_device_shutdown); - -MODULE_DESCRIPTION("Core support for the WM831X AudioPlus PMIC"); -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Mark Brown"); diff --git a/drivers/mfd/wm831x-i2c.c b/drivers/mfd/wm831x-i2c.c index 22f7054d1b28..0f3af42f7268 100644 --- a/drivers/mfd/wm831x-i2c.c +++ b/drivers/mfd/wm831x-i2c.c @@ -13,7 +13,7 @@ */ #include -#include +#include #include #include #include @@ -68,15 +68,6 @@ static int wm831x_i2c_probe(struct i2c_client *i2c, return wm831x_device_init(wm831x, i2c->irq); } -static int wm831x_i2c_remove(struct i2c_client *i2c) -{ - struct wm831x *wm831x = i2c_get_clientdata(i2c); - - wm831x_device_exit(wm831x); - - return 0; -} - static int wm831x_i2c_suspend(struct device *dev) { struct wm831x *wm831x = dev_get_drvdata(dev); @@ -103,7 +94,6 @@ static const struct i2c_device_id wm831x_i2c_id[] = { { "wm8326", WM8326 }, { } }; -MODULE_DEVICE_TABLE(i2c, wm831x_i2c_id); static const struct dev_pm_ops wm831x_pm_ops = { .suspend = wm831x_i2c_suspend, @@ -115,9 +105,9 @@ static struct i2c_driver wm831x_i2c_driver = { .name = "wm831x", .pm = &wm831x_pm_ops, .of_match_table = of_match_ptr(wm831x_of_match), + .suppress_bind_attrs = true, }, .probe = wm831x_i2c_probe, - .remove = wm831x_i2c_remove, .id_table = wm831x_i2c_id, }; @@ -132,9 +122,3 @@ static int __init wm831x_i2c_init(void) return ret; } subsys_initcall(wm831x_i2c_init); - -static void __exit wm831x_i2c_exit(void) -{ - i2c_del_driver(&wm831x_i2c_driver); -} -module_exit(wm831x_i2c_exit); diff --git a/drivers/mfd/wm831x-spi.c b/drivers/mfd/wm831x-spi.c index 018ce652ae57..dd4dab419940 100644 --- a/drivers/mfd/wm831x-spi.c +++ b/drivers/mfd/wm831x-spi.c @@ -13,7 +13,7 @@ */ #include -#include +#include #include #include #include @@ -67,15 +67,6 @@ static int wm831x_spi_probe(struct spi_device *spi) return wm831x_device_init(wm831x, spi->irq); } -static int wm831x_spi_remove(struct spi_device *spi) -{ - struct wm831x *wm831x = spi_get_drvdata(spi); - - wm831x_device_exit(wm831x); - - return 0; -} - static int wm831x_spi_suspend(struct device *dev) { struct wm831x *wm831x = dev_get_drvdata(dev); @@ -108,17 +99,16 @@ static const struct spi_device_id wm831x_spi_ids[] = { { "wm8326", WM8326 }, { }, }; -MODULE_DEVICE_TABLE(spi, wm831x_spi_ids); static struct spi_driver wm831x_spi_driver = { .driver = { .name = "wm831x", .pm = &wm831x_spi_pm, .of_match_table = of_match_ptr(wm831x_of_match), + .suppress_bind_attrs = true, }, .id_table = wm831x_spi_ids, .probe = wm831x_spi_probe, - .remove = wm831x_spi_remove, }; static int __init wm831x_spi_init(void) @@ -132,13 +122,3 @@ static int __init wm831x_spi_init(void) return 0; } subsys_initcall(wm831x_spi_init); - -static void __exit wm831x_spi_exit(void) -{ - spi_unregister_driver(&wm831x_spi_driver); -} -module_exit(wm831x_spi_exit); - -MODULE_DESCRIPTION("SPI support for WM831x/2x AudioPlus PMICs"); -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Mark Brown"); diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c index 8a07c5634aee..9e1070f26b11 100644 --- a/drivers/mfd/wm8350-core.c +++ b/drivers/mfd/wm8350-core.c @@ -13,7 +13,8 @@ */ #include -#include +#include +#include #include #include #include @@ -442,30 +443,3 @@ err: return ret; } EXPORT_SYMBOL_GPL(wm8350_device_init); - -void wm8350_device_exit(struct wm8350 *wm8350) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(wm8350->pmic.led); i++) - platform_device_unregister(wm8350->pmic.led[i].pdev); - - for (i = 0; i < ARRAY_SIZE(wm8350->pmic.pdev); i++) - platform_device_unregister(wm8350->pmic.pdev[i]); - - platform_device_unregister(wm8350->wdt.pdev); - platform_device_unregister(wm8350->rtc.pdev); - platform_device_unregister(wm8350->power.pdev); - platform_device_unregister(wm8350->hwmon.pdev); - platform_device_unregister(wm8350->gpio.pdev); - platform_device_unregister(wm8350->codec.pdev); - - if (wm8350->irq_base) - free_irq(wm8350->irq_base + WM8350_IRQ_AUXADC_DATARDY, wm8350); - - wm8350_irq_exit(wm8350); -} -EXPORT_SYMBOL_GPL(wm8350_device_exit); - -MODULE_DESCRIPTION("WM8350 AudioPlus PMIC core driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c index 9358f03b7938..b4194e068e1b 100644 --- a/drivers/mfd/wm8350-i2c.c +++ b/drivers/mfd/wm8350-i2c.c @@ -13,8 +13,6 @@ * */ -#include -#include #include #include #include @@ -48,30 +46,19 @@ static int wm8350_i2c_probe(struct i2c_client *i2c, return wm8350_device_init(wm8350, i2c->irq, pdata); } -static int wm8350_i2c_remove(struct i2c_client *i2c) -{ - struct wm8350 *wm8350 = i2c_get_clientdata(i2c); - - wm8350_device_exit(wm8350); - - return 0; -} - static const struct i2c_device_id wm8350_i2c_id[] = { { "wm8350", 0 }, { "wm8351", 0 }, { "wm8352", 0 }, { } }; -MODULE_DEVICE_TABLE(i2c, wm8350_i2c_id); - static struct i2c_driver wm8350_i2c_driver = { .driver = { .name = "wm8350", + .suppress_bind_attrs = true, }, .probe = wm8350_i2c_probe, - .remove = wm8350_i2c_remove, .id_table = wm8350_i2c_id, }; @@ -81,12 +68,3 @@ static int __init wm8350_i2c_init(void) } /* init early so consumer devices can complete system boot */ subsys_initcall(wm8350_i2c_init); - -static void __exit wm8350_i2c_exit(void) -{ - i2c_del_driver(&wm8350_i2c_driver); -} -module_exit(wm8350_i2c_exit); - -MODULE_DESCRIPTION("I2C support for the WM8350 AudioPlus PMIC"); -MODULE_LICENSE("GPL"); diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c index 8a98a2fc74e1..79756c83f5f0 100644 --- a/drivers/mfd/wm8400-core.c +++ b/drivers/mfd/wm8400-core.c @@ -12,7 +12,7 @@ * */ -#include +#include #include #include #include @@ -150,7 +150,6 @@ static const struct i2c_device_id wm8400_i2c_id[] = { { "wm8400", 0 }, { } }; -MODULE_DEVICE_TABLE(i2c, wm8400_i2c_id); static struct i2c_driver wm8400_i2c_driver = { .driver = { @@ -161,7 +160,7 @@ static struct i2c_driver wm8400_i2c_driver = { }; #endif -static int __init wm8400_module_init(void) +static int __init wm8400_driver_init(void) { int ret = -ENODEV; @@ -173,15 +172,4 @@ static int __init wm8400_module_init(void) return ret; } -subsys_initcall(wm8400_module_init); - -static void __exit wm8400_module_exit(void) -{ -#if IS_ENABLED(CONFIG_I2C) - i2c_del_driver(&wm8400_i2c_driver); -#endif -} -module_exit(wm8400_module_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Mark Brown "); +subsys_initcall(wm8400_driver_init); diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index f417b06e11c5..42ab8ec92a04 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -295,6 +295,17 @@ config QCOM_COINCELL to maintain PMIC register and RTC state in the absence of external power. +config QCOM_FASTRPC + tristate "Qualcomm FastRPC" + depends on ARCH_QCOM || COMPILE_TEST + depends on RPMSG + select DMA_SHARED_BUFFER + help + Provides a communication mechanism that allows for clients to + make remote method invocations across processor boundary to + applications DSP processor. Say M if you want to enable this + module. + config SGI_GRU tristate "SGI GRU driver" depends on X86_UV && SMP @@ -535,4 +546,5 @@ source "drivers/misc/echo/Kconfig" source "drivers/misc/cxl/Kconfig" source "drivers/misc/ocxl/Kconfig" source "drivers/misc/cardreader/Kconfig" +source "drivers/misc/habanalabs/Kconfig" endmenu diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index e39ccbbc1b3a..d5b7d3404dc7 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -18,6 +18,7 @@ obj-$(CONFIG_TIFM_CORE) += tifm_core.o obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o obj-$(CONFIG_PHANTOM) += phantom.o obj-$(CONFIG_QCOM_COINCELL) += qcom-coincell.o +obj-$(CONFIG_QCOM_FASTRPC) += fastrpc.o obj-$(CONFIG_SENSORS_BH1770) += bh1770glc.o obj-$(CONFIG_SENSORS_APDS990X) += apds990x.o obj-$(CONFIG_SGI_IOC4) += ioc4.o @@ -59,3 +60,4 @@ obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o obj-$(CONFIG_OCXL) += ocxl/ obj-y += cardreader/ obj-$(CONFIG_PVPANIC) += pvpanic.o +obj-$(CONFIG_HABANA_AI) += habanalabs/ diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c index a0afadefcc49..1f6d008e0036 100644 --- a/drivers/misc/ad525x_dpot.c +++ b/drivers/misc/ad525x_dpot.c @@ -202,22 +202,20 @@ static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg) return dpot_read_r8d8(dpot, ctrl); case DPOT_UID(AD5272_ID): case DPOT_UID(AD5274_ID): - dpot_write_r8d8(dpot, + dpot_write_r8d8(dpot, (DPOT_AD5270_1_2_4_READ_RDAC << 2), 0); - value = dpot_read_r8d16(dpot, - DPOT_AD5270_1_2_4_RDAC << 2); - - if (value < 0) - return value; - /* - * AD5272/AD5274 returns high byte first, however - * underling smbus expects low byte first. - */ - value = swab16(value); + value = dpot_read_r8d16(dpot, DPOT_AD5270_1_2_4_RDAC << 2); + if (value < 0) + return value; + /* + * AD5272/AD5274 returns high byte first, however + * underling smbus expects low byte first. + */ + value = swab16(value); - if (dpot->uid == DPOT_UID(AD5274_ID)) - value = value >> 2; + if (dpot->uid == DPOT_UID(AD5274_ID)) + value = value >> 2; return value; default: if ((reg & DPOT_REG_TOL) || (dpot->max_pos > 256)) diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c index 024dcba8d6c8..5c98e2221889 100644 --- a/drivers/misc/cardreader/rts5227.c +++ b/drivers/misc/cardreader/rts5227.c @@ -170,35 +170,46 @@ static int rts5227_card_power_on(struct rtsx_pcr *pcr, int card) { int err; + if (pcr->option.ocp_en) + rtsx_pci_enable_ocp(pcr); + rtsx_pci_init_cmd(pcr); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL, SD_POWER_MASK, SD_PARTIAL_POWER_ON); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL, LDO3318_PWR_MASK, 0x02); + err = rtsx_pci_send_cmd(pcr, 100); if (err < 0) return err; /* To avoid too large in-rush current */ - udelay(150); - + msleep(20); rtsx_pci_init_cmd(pcr); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL, SD_POWER_MASK, SD_POWER_ON); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL, LDO3318_PWR_MASK, 0x06); + + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_OE, + SD_OUTPUT_EN, SD_OUTPUT_EN); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_OE, + MS_OUTPUT_EN, MS_OUTPUT_EN); return rtsx_pci_send_cmd(pcr, 100); } static int rts5227_card_power_off(struct rtsx_pcr *pcr, int card) { - rtsx_pci_init_cmd(pcr); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL, - SD_POWER_MASK | PMOS_STRG_MASK, - SD_POWER_OFF | PMOS_STRG_400mA); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL, - LDO3318_PWR_MASK, 0X00); - return rtsx_pci_send_cmd(pcr, 100); + if (pcr->option.ocp_en) + rtsx_pci_disable_ocp(pcr); + + rtsx_pci_write_register(pcr, CARD_PWR_CTL, SD_POWER_MASK | + PMOS_STRG_MASK, SD_POWER_OFF | PMOS_STRG_400mA); + rtsx_pci_write_register(pcr, PWR_GATE_CTRL, LDO3318_PWR_MASK, 0X00); + + return 0; } static int rts5227_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage) @@ -348,6 +359,32 @@ static int rts522a_extra_init_hw(struct rtsx_pcr *pcr) return 0; } +static int rts522a_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage) +{ + int err; + + if (voltage == OUTPUT_3V3) { + err = rtsx_pci_write_phy_register(pcr, 0x08, 0x57E4); + if (err < 0) + return err; + } else if (voltage == OUTPUT_1V8) { + err = rtsx_pci_write_phy_register(pcr, 0x11, 0x3C02); + if (err < 0) + return err; + err = rtsx_pci_write_phy_register(pcr, 0x08, 0x54A4); + if (err < 0) + return err; + } else { + return -EINVAL; + } + + /* set pad drive */ + rtsx_pci_init_cmd(pcr); + rts5227_fill_driving(pcr, voltage); + return rtsx_pci_send_cmd(pcr, 100); +} + + /* rts522a operations mainly derived from rts5227, except phy/hw init setting. */ static const struct pcr_ops rts522a_pcr_ops = { @@ -360,7 +397,7 @@ static const struct pcr_ops rts522a_pcr_ops = { .disable_auto_blink = rts5227_disable_auto_blink, .card_power_on = rts5227_card_power_on, .card_power_off = rts5227_card_power_off, - .switch_output_voltage = rts5227_switch_output_voltage, + .switch_output_voltage = rts522a_switch_output_voltage, .cd_deglitch = NULL, .conv_clk_and_div_n = NULL, .force_power_down = rts5227_force_power_down, @@ -371,4 +408,11 @@ void rts522a_init_params(struct rtsx_pcr *pcr) rts5227_init_params(pcr); pcr->reg_pm_ctrl3 = RTS522A_PM_CTRL3; + + pcr->option.ocp_en = 1; + if (pcr->option.ocp_en) + pcr->hw_param.interrupt_en |= SD_OC_INT_EN; + pcr->hw_param.ocp_glitch = SD_OCP_GLITCH_10M; + pcr->option.sd_800mA_ocp_thd = RTS522A_OCP_THD_800; + } diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c index dbe013abdb83..0f72a7e0fdab 100644 --- a/drivers/misc/cardreader/rts5249.c +++ b/drivers/misc/cardreader/rts5249.c @@ -284,6 +284,10 @@ static int rtsx_base_disable_auto_blink(struct rtsx_pcr *pcr) static int rtsx_base_card_power_on(struct rtsx_pcr *pcr, int card) { int err; + struct rtsx_cr_option *option = &pcr->option; + + if (option->ocp_en) + rtsx_pci_enable_ocp(pcr); rtsx_pci_init_cmd(pcr); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL, @@ -306,12 +310,15 @@ static int rtsx_base_card_power_on(struct rtsx_pcr *pcr, int card) static int rtsx_base_card_power_off(struct rtsx_pcr *pcr, int card) { - rtsx_pci_init_cmd(pcr); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL, - SD_POWER_MASK, SD_POWER_OFF); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL, - LDO3318_PWR_MASK, 0x00); - return rtsx_pci_send_cmd(pcr, 100); + struct rtsx_cr_option *option = &pcr->option; + + if (option->ocp_en) + rtsx_pci_disable_ocp(pcr); + + rtsx_pci_write_register(pcr, CARD_PWR_CTL, SD_POWER_MASK, SD_POWER_OFF); + + rtsx_pci_write_register(pcr, PWR_GATE_CTRL, LDO3318_PWR_MASK, 0x00); + return 0; } static int rtsx_base_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage) @@ -629,6 +636,13 @@ void rts524a_init_params(struct rtsx_pcr *pcr) pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3; pcr->ops = &rts524a_pcr_ops; + + pcr->option.ocp_en = 1; + if (pcr->option.ocp_en) + pcr->hw_param.interrupt_en |= SD_OC_INT_EN; + pcr->hw_param.ocp_glitch = SD_OCP_GLITCH_10M; + pcr->option.sd_800mA_ocp_thd = RTS524A_OCP_THD_800; + } static int rts525a_card_power_on(struct rtsx_pcr *pcr, int card) @@ -737,4 +751,10 @@ void rts525a_init_params(struct rtsx_pcr *pcr) pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3; pcr->ops = &rts525a_pcr_ops; + + pcr->option.ocp_en = 1; + if (pcr->option.ocp_en) + pcr->hw_param.interrupt_en |= SD_OC_INT_EN; + pcr->hw_param.ocp_glitch = SD_OCP_GLITCH_10M; + pcr->option.sd_800mA_ocp_thd = RTS525A_OCP_THD_800; } diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c index a493b01c5bc6..da22bcb62b04 100644 --- a/drivers/misc/cardreader/rts5260.c +++ b/drivers/misc/cardreader/rts5260.c @@ -64,11 +64,13 @@ static void rts5260_fill_driving(struct rtsx_pcr *pcr, u8 voltage) drive_sel = pcr->sd30_drive_sel_1v8; } - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CLK_DRIVE_SEL, + rtsx_pci_write_register(pcr, SD30_CLK_DRIVE_SEL, 0xFF, driving[drive_sel][0]); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CMD_DRIVE_SEL, + + rtsx_pci_write_register(pcr, SD30_CMD_DRIVE_SEL, 0xFF, driving[drive_sel][1]); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DAT_DRIVE_SEL, + + rtsx_pci_write_register(pcr, SD30_CMD_DRIVE_SEL, 0xFF, driving[drive_sel][2]); } @@ -193,7 +195,7 @@ static int sd_set_sample_push_timing_sd30(struct rtsx_pcr *pcr) | SD_ASYNC_FIFO_NOT_RST, SD_30_MODE | SD_ASYNC_FIFO_NOT_RST); rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, CLK_LOW_FREQ); rtsx_pci_write_register(pcr, CARD_CLK_SOURCE, 0xFF, - CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1); + CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1); rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0); return 0; @@ -207,22 +209,16 @@ static int rts5260_card_power_on(struct rtsx_pcr *pcr, int card) if (option->ocp_en) rtsx_pci_enable_ocp(pcr); - rtsx_pci_init_cmd(pcr); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_CONFIG2, - DV331812_VDD1, DV331812_VDD1); - err = rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF); - if (err < 0) - return err; - rtsx_pci_init_cmd(pcr); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_VCC_CFG0, + rtsx_pci_write_register(pcr, LDO_CONFIG2, DV331812_VDD1, DV331812_VDD1); + rtsx_pci_write_register(pcr, LDO_VCC_CFG0, RTS5260_DVCC_TUNE_MASK, RTS5260_DVCC_33); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_VCC_CFG1, - LDO_POW_SDVDD1_MASK, LDO_POW_SDVDD1_ON); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_CONFIG2, - DV331812_POWERON, DV331812_POWERON); - err = rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF); + rtsx_pci_write_register(pcr, LDO_VCC_CFG1, LDO_POW_SDVDD1_MASK, + LDO_POW_SDVDD1_ON); + + rtsx_pci_write_register(pcr, LDO_CONFIG2, + DV331812_POWERON, DV331812_POWERON); msleep(20); if (pcr->extra_caps & EXTRA_CAPS_SD_SDR50 || @@ -242,8 +238,8 @@ static int rts5260_card_power_on(struct rtsx_pcr *pcr, int card) /* Reset SD_CFG3 register */ rtsx_pci_write_register(pcr, SD_CFG3, SD30_CLK_END_EN, 0); rtsx_pci_write_register(pcr, REG_SD_STOP_SDCLK_CFG, - SD30_CLK_STOP_CFG_EN | SD30_CLK_STOP_CFG1 | - SD30_CLK_STOP_CFG0, 0); + SD30_CLK_STOP_CFG_EN | SD30_CLK_STOP_CFG1 | + SD30_CLK_STOP_CFG0, 0); rtsx_pci_write_register(pcr, REG_PRE_RW_MODE, EN_INFINITE_MODE, 0); @@ -273,9 +269,9 @@ static int rts5260_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage) } /* set pad drive */ - rtsx_pci_init_cmd(pcr); rts5260_fill_driving(pcr, voltage); - return rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF); + + return 0; } static void rts5260_stop_cmd(struct rtsx_pcr *pcr) @@ -290,13 +286,9 @@ static void rts5260_stop_cmd(struct rtsx_pcr *pcr) static void rts5260_card_before_power_off(struct rtsx_pcr *pcr) { - struct rtsx_cr_option *option = &pcr->option; - rts5260_stop_cmd(pcr); rts5260_switch_output_voltage(pcr, OUTPUT_3V3); - if (option->ocp_en) - rtsx_pci_disable_ocp(pcr); } static int rts5260_card_power_off(struct rtsx_pcr *pcr, int card) @@ -304,13 +296,12 @@ static int rts5260_card_power_off(struct rtsx_pcr *pcr, int card) int err = 0; rts5260_card_before_power_off(pcr); - - rtsx_pci_init_cmd(pcr); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_VCC_CFG1, + err = rtsx_pci_write_register(pcr, LDO_VCC_CFG1, LDO_POW_SDVDD1_MASK, LDO_POW_SDVDD1_OFF); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_CONFIG2, + err = rtsx_pci_write_register(pcr, LDO_CONFIG2, DV331812_POWERON, DV331812_POWEROFF); - err = rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF); + if (pcr->option.ocp_en) + rtsx_pci_disable_ocp(pcr); return err; } @@ -322,41 +313,29 @@ static void rts5260_init_ocp(struct rtsx_pcr *pcr) if (option->ocp_en) { u8 mask, val; - rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL, - RTS5260_DVCC_OCP_EN | - RTS5260_DVCC_OCP_CL_EN, - RTS5260_DVCC_OCP_EN | - RTS5260_DVCC_OCP_CL_EN); - rtsx_pci_write_register(pcr, RTS5260_DVIO_CTRL, - RTS5260_DVIO_OCP_EN | - RTS5260_DVIO_OCP_CL_EN, - RTS5260_DVIO_OCP_EN | - RTS5260_DVIO_OCP_CL_EN); rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL, - RTS5260_DVCC_OCP_THD_MASK, - option->sd_400mA_ocp_thd); - - rtsx_pci_write_register(pcr, RTS5260_DVIO_CTRL, - RTS5260_DVIO_OCP_THD_MASK, - RTS5260_DVIO_OCP_THD_350); + RTS5260_DVCC_OCP_THD_MASK, + option->sd_800mA_ocp_thd); rtsx_pci_write_register(pcr, RTS5260_DV331812_CFG, - RTS5260_DV331812_OCP_THD_MASK, - RTS5260_DV331812_OCP_THD_210); + RTS5260_DV331812_OCP_THD_MASK, + RTS5260_DV331812_OCP_THD_270); - mask = SD_OCP_GLITCH_MASK | SDVIO_OCP_GLITCH_MASK; + mask = SD_OCP_GLITCH_MASK; val = pcr->hw_param.ocp_glitch; rtsx_pci_write_register(pcr, REG_OCPGLITCH, mask, val); + rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL, + RTS5260_DVCC_OCP_EN | + RTS5260_DVCC_OCP_CL_EN, + RTS5260_DVCC_OCP_EN | + RTS5260_DVCC_OCP_CL_EN); rtsx_pci_enable_ocp(pcr); } else { rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL, RTS5260_DVCC_OCP_EN | RTS5260_DVCC_OCP_CL_EN, 0); - rtsx_pci_write_register(pcr, RTS5260_DVIO_CTRL, - RTS5260_DVIO_OCP_EN | - RTS5260_DVIO_OCP_CL_EN, 0); } } @@ -364,14 +343,9 @@ static void rts5260_enable_ocp(struct rtsx_pcr *pcr) { u8 val = 0; - rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0); - val = SD_OCP_INT_EN | SD_DETECT_EN; - val |= SDVIO_OCP_INT_EN | SDVIO_DETECT_EN; rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val); - rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL, - DV3318_DETECT_EN | DV3318_OCP_INT_EN, - DV3318_DETECT_EN | DV3318_OCP_INT_EN); + } static void rts5260_disable_ocp(struct rtsx_pcr *pcr) @@ -379,15 +353,11 @@ static void rts5260_disable_ocp(struct rtsx_pcr *pcr) u8 mask = 0; mask = SD_OCP_INT_EN | SD_DETECT_EN; - mask |= SDVIO_OCP_INT_EN | SDVIO_DETECT_EN; rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0); - rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL, - DV3318_DETECT_EN | DV3318_OCP_INT_EN, 0); - rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, - OC_POWER_DOWN); } + static int rts5260_get_ocpstat(struct rtsx_pcr *pcr, u8 *val) { return rtsx_pci_read_register(pcr, REG_OCPSTAT, val); @@ -404,9 +374,7 @@ static void rts5260_clear_ocpstat(struct rtsx_pcr *pcr) u8 val = 0; mask = SD_OCP_INT_CLR | SD_OC_CLR; - mask |= SDVIO_OCP_INT_CLR | SDVIO_OC_CLR; val = SD_OCP_INT_CLR | SD_OC_CLR; - val |= SDVIO_OCP_INT_CLR | SDVIO_OC_CLR; rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val); rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL, @@ -425,36 +393,22 @@ static void rts5260_process_ocp(struct rtsx_pcr *pcr) rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat); rts5260_get_ocpstat2(pcr, &pcr->ocp_stat2); - if (pcr->card_exist & SD_EXIST) - rtsx_sd_power_off_card3v3(pcr); - else if (pcr->card_exist & MS_EXIST) - rtsx_ms_power_off_card3v3(pcr); - - if (!(pcr->card_exist & MS_EXIST) && !(pcr->card_exist & SD_EXIST)) { - if ((pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER | - SDVIO_OC_NOW | SDVIO_OC_EVER)) || - (pcr->ocp_stat2 & (DV3318_OCP_NOW | DV3318_OCP_EVER))) - rtsx_pci_clear_ocpstat(pcr); + + if ((pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) || + (pcr->ocp_stat2 & (DV3318_OCP_NOW | DV3318_OCP_EVER))) { + rtsx_pci_card_power_off(pcr, RTSX_SD_CARD); + rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0); + rtsx_pci_clear_ocpstat(pcr); pcr->ocp_stat = 0; pcr->ocp_stat2 = 0; } - if ((pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER | - SDVIO_OC_NOW | SDVIO_OC_EVER)) || - (pcr->ocp_stat2 & (DV3318_OCP_NOW | DV3318_OCP_EVER))) { - if (pcr->card_exist & SD_EXIST) - rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0); - else if (pcr->card_exist & MS_EXIST) - rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0); - } } static int rts5260_init_hw(struct rtsx_pcr *pcr) { int err; - rtsx_pci_init_ocp(pcr); - rtsx_pci_init_cmd(pcr); rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, L1SUB_CONFIG1, @@ -483,6 +437,8 @@ static int rts5260_init_hw(struct rtsx_pcr *pcr) if (err < 0) return err; + rtsx_pci_init_ocp(pcr); + return 0; } @@ -499,7 +455,13 @@ static void rts5260_pwr_saving_setting(struct rtsx_pcr *pcr) pcr_dbg(pcr, "Set parameters for L1.2."); rtsx_pci_write_register(pcr, PWR_GLOBAL_CTRL, 0xFF, PCIE_L1_2_EN); - rtsx_pci_write_register(pcr, PWR_FE_CTL, + rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL, + RTS5260_DVCC_OCP_EN | + RTS5260_DVCC_OCP_CL_EN, + RTS5260_DVCC_OCP_EN | + RTS5260_DVCC_OCP_CL_EN); + + rtsx_pci_write_register(pcr, PWR_FE_CTL, 0xFF, PCIE_L1_2_PD_FE_EN); } else if (lss_l1_1) { pcr_dbg(pcr, "Set parameters for L1.1."); @@ -742,7 +704,7 @@ void rts5260_init_params(struct rtsx_pcr *pcr) option->ocp_en = 1; if (option->ocp_en) hw_param->interrupt_en |= SD_OC_INT_EN; - hw_param->ocp_glitch = SD_OCP_GLITCH_10M | SDVIO_OCP_GLITCH_800U; + hw_param->ocp_glitch = SDVIO_OCP_GLITCH_800U | SDVIO_OCP_GLITCH_800U; option->sd_400mA_ocp_thd = RTS5260_DVCC_OCP_THD_550; option->sd_800mA_ocp_thd = RTS5260_DVCC_OCP_THD_970; } diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c index da445223f4cc..0d320e0ab4c9 100644 --- a/drivers/misc/cardreader/rtsx_pcr.c +++ b/drivers/misc/cardreader/rtsx_pcr.c @@ -703,7 +703,10 @@ EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable); static void rtsx_pci_enable_bus_int(struct rtsx_pcr *pcr) { - pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN; + struct rtsx_hw_param *hw_param = &pcr->hw_param; + + pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN + | hw_param->interrupt_en; if (pcr->num_slots > 1) pcr->bier |= MS_INT_EN; @@ -969,8 +972,19 @@ static void rtsx_pci_card_detect(struct work_struct *work) static void rtsx_pci_process_ocp(struct rtsx_pcr *pcr) { - if (pcr->ops->process_ocp) + if (pcr->ops->process_ocp) { pcr->ops->process_ocp(pcr); + } else { + if (!pcr->option.ocp_en) + return; + rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat); + if (pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) { + rtsx_pci_card_power_off(pcr, RTSX_SD_CARD); + rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0); + rtsx_pci_clear_ocpstat(pcr); + pcr->ocp_stat = 0; + } + } } static int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr) @@ -1039,7 +1053,7 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id) } } - if (pcr->card_inserted || pcr->card_removed) + if ((pcr->card_inserted || pcr->card_removed) && !(int_reg & SD_OC_INT)) schedule_delayed_work(&pcr->carddet_work, msecs_to_jiffies(200)); @@ -1144,10 +1158,12 @@ void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr) { u8 val = SD_OCP_INT_EN | SD_DETECT_EN; - if (pcr->ops->enable_ocp) + if (pcr->ops->enable_ocp) { pcr->ops->enable_ocp(pcr); - else + } else { + rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0); rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val); + } } @@ -1155,10 +1171,13 @@ void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr) { u8 mask = SD_OCP_INT_EN | SD_DETECT_EN; - if (pcr->ops->disable_ocp) + if (pcr->ops->disable_ocp) { pcr->ops->disable_ocp(pcr); - else + } else { rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0); + rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, + OC_POWER_DOWN); + } } void rtsx_pci_init_ocp(struct rtsx_pcr *pcr) @@ -1169,7 +1188,7 @@ void rtsx_pci_init_ocp(struct rtsx_pcr *pcr) struct rtsx_cr_option *option = &(pcr->option); if (option->ocp_en) { - u8 val = option->sd_400mA_ocp_thd; + u8 val = option->sd_800mA_ocp_thd; rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0); rtsx_pci_write_register(pcr, REG_OCPPARA1, @@ -1204,6 +1223,7 @@ void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr) u8 val = SD_OCP_INT_CLR | SD_OC_CLR; rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val); + udelay(100); rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0); } } @@ -1213,7 +1233,6 @@ int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr) rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN | MS_CLK_EN | SD40_CLK_EN, 0); rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0); - rtsx_pci_card_power_off(pcr, RTSX_SD_CARD); msleep(50); @@ -1313,6 +1332,9 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr) break; } + /*init ocp*/ + rtsx_pci_init_ocp(pcr); + /* Enable clk_request_n to enable clock power management */ rtsx_pci_write_config_byte(pcr, pcr->pcie_cap + PCI_EXP_LNKCTL + 1, 1); /* Enter L1 when host tx idle */ diff --git a/drivers/misc/cardreader/rtsx_pcr.h b/drivers/misc/cardreader/rtsx_pcr.h index 6ea1655db0bb..300fc31d8e67 100644 --- a/drivers/misc/cardreader/rtsx_pcr.h +++ b/drivers/misc/cardreader/rtsx_pcr.h @@ -46,6 +46,11 @@ #define SSC_CLOCK_STABLE_WAIT 130 +#define RTS524A_OCP_THD_800 0x04 +#define RTS525A_OCP_THD_800 0x05 +#define RTS522A_OCP_THD_800 0x06 + + int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val); int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val); diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c index 5d28d9e454f5..08f4a512afad 100644 --- a/drivers/misc/cxl/guest.c +++ b/drivers/misc/cxl/guest.c @@ -267,6 +267,7 @@ static int guest_reset(struct cxl *adapter) int i, rc; pr_devel("Adapter reset request\n"); + spin_lock(&adapter->afu_list_lock); for (i = 0; i < adapter->slices; i++) { if ((afu = adapter->afu[i])) { pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT, @@ -283,6 +284,7 @@ static int guest_reset(struct cxl *adapter) pci_error_handlers(afu, CXL_RESUME_EVENT, 0); } } + spin_unlock(&adapter->afu_list_lock); return rc; } diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index c79ba1c699ad..300531d6136f 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -1805,7 +1805,7 @@ static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu, /* There should only be one entry, but go through the list * anyway */ - if (afu->phb == NULL) + if (afu == NULL || afu->phb == NULL) return result; list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { @@ -1832,7 +1832,8 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev, { struct cxl *adapter = pci_get_drvdata(pdev); struct cxl_afu *afu; - pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET, afu_result; + pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET; + pci_ers_result_t afu_result = PCI_ERS_RESULT_NEED_RESET; int i; /* At this point, we could still have an interrupt pending. @@ -1843,6 +1844,7 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev, /* If we're permanently dead, give up. */ if (state == pci_channel_io_perm_failure) { + spin_lock(&adapter->afu_list_lock); for (i = 0; i < adapter->slices; i++) { afu = adapter->afu[i]; /* @@ -1851,6 +1853,7 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev, */ cxl_vphb_error_detected(afu, state); } + spin_unlock(&adapter->afu_list_lock); return PCI_ERS_RESULT_DISCONNECT; } @@ -1932,11 +1935,17 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev, * * In slot_reset, free the old resources and allocate new ones. * * In resume, clear the flag to allow things to start. */ + + /* Make sure no one else changes the afu list */ + spin_lock(&adapter->afu_list_lock); + for (i = 0; i < adapter->slices; i++) { afu = adapter->afu[i]; - afu_result = cxl_vphb_error_detected(afu, state); + if (afu == NULL) + continue; + afu_result = cxl_vphb_error_detected(afu, state); cxl_context_detach_all(afu); cxl_ops->afu_deactivate_mode(afu, afu->current_mode); pci_deconfigure_afu(afu); @@ -1948,6 +1957,7 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev, (result == PCI_ERS_RESULT_NEED_RESET)) result = PCI_ERS_RESULT_NONE; } + spin_unlock(&adapter->afu_list_lock); /* should take the context lock here */ if (cxl_adapter_context_lock(adapter) != 0) @@ -1980,14 +1990,18 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev) */ cxl_adapter_context_unlock(adapter); + spin_lock(&adapter->afu_list_lock); for (i = 0; i < adapter->slices; i++) { afu = adapter->afu[i]; + if (afu == NULL) + continue; + if (pci_configure_afu(afu, adapter, pdev)) - goto err; + goto err_unlock; if (cxl_afu_select_best_mode(afu)) - goto err; + goto err_unlock; if (afu->phb == NULL) continue; @@ -1999,16 +2013,16 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev) ctx = cxl_get_context(afu_dev); if (ctx && cxl_release_context(ctx)) - goto err; + goto err_unlock; ctx = cxl_dev_context_init(afu_dev); if (IS_ERR(ctx)) - goto err; + goto err_unlock; afu_dev->dev.archdata.cxl_ctx = ctx; if (cxl_ops->afu_check_and_enable(afu)) - goto err; + goto err_unlock; afu_dev->error_state = pci_channel_io_normal; @@ -2029,8 +2043,13 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev) result = PCI_ERS_RESULT_DISCONNECT; } } + + spin_unlock(&adapter->afu_list_lock); return result; +err_unlock: + spin_unlock(&adapter->afu_list_lock); + err: /* All the bits that happen in both error_detected and cxl_remove * should be idempotent, so we don't need to worry about leaving a mix @@ -2051,10 +2070,11 @@ static void cxl_pci_resume(struct pci_dev *pdev) * This is not the place to be checking if everything came back up * properly, because there's no return value: do that in slot_reset. */ + spin_lock(&adapter->afu_list_lock); for (i = 0; i < adapter->slices; i++) { afu = adapter->afu[i]; - if (afu->phb == NULL) + if (afu == NULL || afu->phb == NULL) continue; list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { @@ -2063,6 +2083,7 @@ static void cxl_pci_resume(struct pci_dev *pdev) afu_dev->driver->err_handler->resume(afu_dev); } } + spin_unlock(&adapter->afu_list_lock); } static const struct pci_error_handlers cxl_err_handler = { diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c index 49da2f744bbf..631c5df246d4 100644 --- a/drivers/misc/cxl/vphb.c +++ b/drivers/misc/cxl/vphb.c @@ -43,8 +43,7 @@ static bool cxl_pci_enable_device_hook(struct pci_dev *dev) return false; } - set_dma_ops(&dev->dev, &dma_nommu_ops); - set_dma_offset(&dev->dev, PAGE_OFFSET); + dev->dev.archdata.dma_offset = PAGE_OFFSET; /* * Allocate a context to do cxl things too. If we eventually do real diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index ddfcf4ade7bf..63aa541c9608 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c @@ -22,10 +22,24 @@ #include #include #include -#include #include #include +/* Address pointer is 16 bit. */ +#define AT24_FLAG_ADDR16 BIT(7) +/* sysfs-entry will be read-only. */ +#define AT24_FLAG_READONLY BIT(6) +/* sysfs-entry will be world-readable. */ +#define AT24_FLAG_IRUGO BIT(5) +/* Take always 8 addresses (24c00). */ +#define AT24_FLAG_TAKE8ADDR BIT(4) +/* Factory-programmed serial number. */ +#define AT24_FLAG_SERIAL BIT(3) +/* Factory-programmed mac address. */ +#define AT24_FLAG_MAC BIT(2) +/* Does not auto-rollover reads to the next slave address. */ +#define AT24_FLAG_NO_RDROL BIT(1) + /* * I2C EEPROMs from most vendors are inexpensive and mostly interchangeable. * Differences between different vendor product lines (like Atmel AT24C or @@ -107,10 +121,6 @@ module_param_named(write_timeout, at24_write_timeout, uint, 0); MODULE_PARM_DESC(at24_write_timeout, "Time (in ms) to try writes (default 25)"); struct at24_chip_data { - /* - * these fields mirror their equivalents in - * struct at24_platform_data - */ u32 byte_len; u8 flags; }; @@ -471,63 +481,11 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count) return 0; } -static void at24_properties_to_pdata(struct device *dev, - struct at24_platform_data *chip) -{ - int err; - u32 val; - - if (device_property_present(dev, "read-only")) - chip->flags |= AT24_FLAG_READONLY; - if (device_property_present(dev, "no-read-rollover")) - chip->flags |= AT24_FLAG_NO_RDROL; - - err = device_property_read_u32(dev, "address-width", &val); - if (!err) { - switch (val) { - case 8: - if (chip->flags & AT24_FLAG_ADDR16) - dev_warn(dev, "Override address width to be 8, while default is 16\n"); - chip->flags &= ~AT24_FLAG_ADDR16; - break; - case 16: - chip->flags |= AT24_FLAG_ADDR16; - break; - default: - dev_warn(dev, "Bad \"address-width\" property: %u\n", - val); - } - } - - err = device_property_read_u32(dev, "size", &val); - if (!err) - chip->byte_len = val; - - err = device_property_read_u32(dev, "pagesize", &val); - if (!err) { - chip->page_size = val; - } else { - /* - * This is slow, but we can't know all eeproms, so we better - * play safe. Specifying custom eeprom-types via platform_data - * is recommended anyhow. - */ - chip->page_size = 1; - } -} - -static int at24_get_pdata(struct device *dev, struct at24_platform_data *pdata) +static const struct at24_chip_data *at24_get_chip_data(struct device *dev) { struct device_node *of_node = dev->of_node; const struct at24_chip_data *cdata; const struct i2c_device_id *id; - struct at24_platform_data *pd; - - pd = dev_get_platdata(dev); - if (pd) { - memcpy(pdata, pd, sizeof(*pdata)); - return 0; - } id = i2c_match_id(at24_ids, to_i2c_client(dev)); @@ -544,13 +502,9 @@ static int at24_get_pdata(struct device *dev, struct at24_platform_data *pdata) cdata = acpi_device_get_match_data(dev); if (!cdata) - return -ENODEV; + return ERR_PTR(-ENODEV); - pdata->byte_len = cdata->byte_len; - pdata->flags = cdata->flags; - at24_properties_to_pdata(dev, pdata); - - return 0; + return cdata; } static void at24_remove_dummy_clients(struct at24_data *at24) @@ -619,7 +573,8 @@ static int at24_probe(struct i2c_client *client) { struct regmap_config regmap_config = { }; struct nvmem_config nvmem_config = { }; - struct at24_platform_data pdata = { }; + u32 byte_len, page_size, flags, addrw; + const struct at24_chip_data *cdata; struct device *dev = &client->dev; bool i2c_fn_i2c, i2c_fn_block; unsigned int i, num_addresses; @@ -634,35 +589,75 @@ static int at24_probe(struct i2c_client *client) i2c_fn_block = i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WRITE_I2C_BLOCK); - err = at24_get_pdata(dev, &pdata); + cdata = at24_get_chip_data(dev); + if (IS_ERR(cdata)) + return PTR_ERR(cdata); + + err = device_property_read_u32(dev, "pagesize", &page_size); + if (err) + /* + * This is slow, but we can't know all eeproms, so we better + * play safe. Specifying custom eeprom-types via platform_data + * is recommended anyhow. + */ + page_size = 1; + + flags = cdata->flags; + if (device_property_present(dev, "read-only")) + flags |= AT24_FLAG_READONLY; + if (device_property_present(dev, "no-read-rollover")) + flags |= AT24_FLAG_NO_RDROL; + + err = device_property_read_u32(dev, "address-width", &addrw); + if (!err) { + switch (addrw) { + case 8: + if (flags & AT24_FLAG_ADDR16) + dev_warn(dev, + "Override address width to be 8, while default is 16\n"); + flags &= ~AT24_FLAG_ADDR16; + break; + case 16: + flags |= AT24_FLAG_ADDR16; + break; + default: + dev_warn(dev, "Bad \"address-width\" property: %u\n", + addrw); + } + } + + err = device_property_read_u32(dev, "size", &byte_len); if (err) - return err; + byte_len = cdata->byte_len; if (!i2c_fn_i2c && !i2c_fn_block) - pdata.page_size = 1; + page_size = 1; - if (!pdata.page_size) { + if (!page_size) { dev_err(dev, "page_size must not be 0!\n"); return -EINVAL; } - if (!is_power_of_2(pdata.page_size)) + if (!is_power_of_2(page_size)) dev_warn(dev, "page_size looks suspicious (no power of 2)!\n"); - if (pdata.flags & AT24_FLAG_TAKE8ADDR) - num_addresses = 8; - else - num_addresses = DIV_ROUND_UP(pdata.byte_len, - (pdata.flags & AT24_FLAG_ADDR16) ? 65536 : 256); + err = device_property_read_u32(dev, "num-addresses", &num_addresses); + if (err) { + if (flags & AT24_FLAG_TAKE8ADDR) + num_addresses = 8; + else + num_addresses = DIV_ROUND_UP(byte_len, + (flags & AT24_FLAG_ADDR16) ? 65536 : 256); + } - if ((pdata.flags & AT24_FLAG_SERIAL) && (pdata.flags & AT24_FLAG_MAC)) { + if ((flags & AT24_FLAG_SERIAL) && (flags & AT24_FLAG_MAC)) { dev_err(dev, "invalid device data - cannot have both AT24_FLAG_SERIAL & AT24_FLAG_MAC."); return -EINVAL; } regmap_config.val_bits = 8; - regmap_config.reg_bits = (pdata.flags & AT24_FLAG_ADDR16) ? 16 : 8; + regmap_config.reg_bits = (flags & AT24_FLAG_ADDR16) ? 16 : 8; regmap_config.disable_locking = true; regmap = devm_regmap_init_i2c(client, ®map_config); @@ -675,11 +670,11 @@ static int at24_probe(struct i2c_client *client) return -ENOMEM; mutex_init(&at24->lock); - at24->byte_len = pdata.byte_len; - at24->page_size = pdata.page_size; - at24->flags = pdata.flags; + at24->byte_len = byte_len; + at24->page_size = page_size; + at24->flags = flags; at24->num_addresses = num_addresses; - at24->offset_adj = at24_get_offset_adj(pdata.flags, pdata.byte_len); + at24->offset_adj = at24_get_offset_adj(flags, byte_len); at24->client[0].client = client; at24->client[0].regmap = regmap; @@ -687,10 +682,10 @@ static int at24_probe(struct i2c_client *client) if (IS_ERR(at24->wp_gpio)) return PTR_ERR(at24->wp_gpio); - writable = !(pdata.flags & AT24_FLAG_READONLY); + writable = !(flags & AT24_FLAG_READONLY); if (writable) { at24->write_max = min_t(unsigned int, - pdata.page_size, at24_io_limit); + page_size, at24_io_limit); if (!i2c_fn_i2c && at24->write_max > I2C_SMBUS_BLOCK_MAX) at24->write_max = I2C_SMBUS_BLOCK_MAX; } @@ -733,7 +728,7 @@ static int at24_probe(struct i2c_client *client) nvmem_config.priv = at24; nvmem_config.stride = 1; nvmem_config.word_size = 1; - nvmem_config.size = pdata.byte_len; + nvmem_config.size = byte_len; at24->nvmem = devm_nvmem_register(dev, &nvmem_config); if (IS_ERR(at24->nvmem)) { @@ -742,13 +737,9 @@ static int at24_probe(struct i2c_client *client) } dev_info(dev, "%u byte %s EEPROM, %s, %u bytes/write\n", - pdata.byte_len, client->name, + byte_len, client->name, writable ? "writable" : "read-only", at24->write_max); - /* export data to kernel code */ - if (pdata.setup) - pdata.setup(at24->nvmem, pdata.context); - return 0; err_clients: diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c index 5a17bfeb80d3..74d4fda6c4a7 100644 --- a/drivers/misc/enclosure.c +++ b/drivers/misc/enclosure.c @@ -125,9 +125,7 @@ enclosure_register(struct device *dev, const char *name, int components, struct enclosure_component_callbacks *cb) { struct enclosure_device *edev = - kzalloc(sizeof(struct enclosure_device) + - sizeof(struct enclosure_component)*components, - GFP_KERNEL); + kzalloc(struct_size(edev, component, components), GFP_KERNEL); int err, i; BUG_ON(!cb); diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c new file mode 100644 index 000000000000..39f832d27288 --- /dev/null +++ b/drivers/misc/fastrpc.c @@ -0,0 +1,1401 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2011-2018, The Linux Foundation. All rights reserved. +// Copyright (c) 2018, Linaro Limited + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define ADSP_DOMAIN_ID (0) +#define MDSP_DOMAIN_ID (1) +#define SDSP_DOMAIN_ID (2) +#define CDSP_DOMAIN_ID (3) +#define FASTRPC_DEV_MAX 4 /* adsp, mdsp, slpi, cdsp*/ +#define FASTRPC_MAX_SESSIONS 9 /*8 compute, 1 cpz*/ +#define FASTRPC_ALIGN 128 +#define FASTRPC_MAX_FDLIST 16 +#define FASTRPC_MAX_CRCLIST 64 +#define FASTRPC_PHYS(p) ((p) & 0xffffffff) +#define FASTRPC_CTX_MAX (256) +#define FASTRPC_INIT_HANDLE 1 +#define FASTRPC_CTXID_MASK (0xFF0) +#define INIT_FILELEN_MAX (2 * 1024 * 1024) +#define INIT_MEMLEN_MAX (8 * 1024 * 1024) +#define FASTRPC_DEVICE_NAME "fastrpc" + +/* Retrives number of input buffers from the scalars parameter */ +#define REMOTE_SCALARS_INBUFS(sc) (((sc) >> 16) & 0x0ff) + +/* Retrives number of output buffers from the scalars parameter */ +#define REMOTE_SCALARS_OUTBUFS(sc) (((sc) >> 8) & 0x0ff) + +/* Retrives number of input handles from the scalars parameter */ +#define REMOTE_SCALARS_INHANDLES(sc) (((sc) >> 4) & 0x0f) + +/* Retrives number of output handles from the scalars parameter */ +#define REMOTE_SCALARS_OUTHANDLES(sc) ((sc) & 0x0f) + +#define REMOTE_SCALARS_LENGTH(sc) (REMOTE_SCALARS_INBUFS(sc) + \ + REMOTE_SCALARS_OUTBUFS(sc) + \ + REMOTE_SCALARS_INHANDLES(sc)+ \ + REMOTE_SCALARS_OUTHANDLES(sc)) +#define FASTRPC_BUILD_SCALARS(attr, method, in, out, oin, oout) \ + (((attr & 0x07) << 29) | \ + ((method & 0x1f) << 24) | \ + ((in & 0xff) << 16) | \ + ((out & 0xff) << 8) | \ + ((oin & 0x0f) << 4) | \ + (oout & 0x0f)) + +#define FASTRPC_SCALARS(method, in, out) \ + FASTRPC_BUILD_SCALARS(0, method, in, out, 0, 0) + +#define FASTRPC_CREATE_PROCESS_NARGS 6 +/* Remote Method id table */ +#define FASTRPC_RMID_INIT_ATTACH 0 +#define FASTRPC_RMID_INIT_RELEASE 1 +#define FASTRPC_RMID_INIT_CREATE 6 +#define FASTRPC_RMID_INIT_CREATE_ATTR 7 +#define FASTRPC_RMID_INIT_CREATE_STATIC 8 + +#define miscdev_to_cctx(d) container_of(d, struct fastrpc_channel_ctx, miscdev) + +static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp", + "sdsp", "cdsp"}; +struct fastrpc_phy_page { + u64 addr; /* physical address */ + u64 size; /* size of contiguous region */ +}; + +struct fastrpc_invoke_buf { + u32 num; /* number of contiguous regions */ + u32 pgidx; /* index to start of contiguous region */ +}; + +struct fastrpc_remote_arg { + u64 pv; + u64 len; +}; + +struct fastrpc_msg { + int pid; /* process group id */ + int tid; /* thread id */ + u64 ctx; /* invoke caller context */ + u32 handle; /* handle to invoke */ + u32 sc; /* scalars structure describing the data */ + u64 addr; /* physical address */ + u64 size; /* size of contiguous region */ +}; + +struct fastrpc_invoke_rsp { + u64 ctx; /* invoke caller context */ + int retval; /* invoke return value */ +}; + +struct fastrpc_buf { + struct fastrpc_user *fl; + struct dma_buf *dmabuf; + struct device *dev; + void *virt; + u64 phys; + u64 size; + /* Lock for dma buf attachments */ + struct mutex lock; + struct list_head attachments; +}; + +struct fastrpc_dma_buf_attachment { + struct device *dev; + struct sg_table sgt; + struct list_head node; +}; + +struct fastrpc_map { + struct list_head node; + struct fastrpc_user *fl; + int fd; + struct dma_buf *buf; + struct sg_table *table; + struct dma_buf_attachment *attach; + u64 phys; + u64 size; + void *va; + u64 len; + struct kref refcount; +}; + +struct fastrpc_invoke_ctx { + int nscalars; + int nbufs; + int retval; + int pid; + int tgid; + u32 sc; + u32 *crc; + u64 ctxid; + u64 msg_sz; + struct kref refcount; + struct list_head node; /* list of ctxs */ + struct completion work; + struct fastrpc_msg msg; + struct fastrpc_user *fl; + struct fastrpc_remote_arg *rpra; + struct fastrpc_map **maps; + struct fastrpc_buf *buf; + struct fastrpc_invoke_args *args; + struct fastrpc_channel_ctx *cctx; +}; + +struct fastrpc_session_ctx { + struct device *dev; + int sid; + bool used; + bool valid; +}; + +struct fastrpc_channel_ctx { + int domain_id; + int sesscount; + struct rpmsg_device *rpdev; + struct fastrpc_session_ctx session[FASTRPC_MAX_SESSIONS]; + spinlock_t lock; + struct idr ctx_idr; + struct list_head users; + struct miscdevice miscdev; +}; + +struct fastrpc_user { + struct list_head user; + struct list_head maps; + struct list_head pending; + + struct fastrpc_channel_ctx *cctx; + struct fastrpc_session_ctx *sctx; + struct fastrpc_buf *init_mem; + + int tgid; + int pd; + /* Lock for lists */ + spinlock_t lock; + /* lock for allocations */ + struct mutex mutex; +}; + +static void fastrpc_free_map(struct kref *ref) +{ + struct fastrpc_map *map; + + map = container_of(ref, struct fastrpc_map, refcount); + + if (map->table) { + dma_buf_unmap_attachment(map->attach, map->table, + DMA_BIDIRECTIONAL); + dma_buf_detach(map->buf, map->attach); + dma_buf_put(map->buf); + } + + kfree(map); +} + +static void fastrpc_map_put(struct fastrpc_map *map) +{ + if (map) + kref_put(&map->refcount, fastrpc_free_map); +} + +static void fastrpc_map_get(struct fastrpc_map *map) +{ + if (map) + kref_get(&map->refcount); +} + +static int fastrpc_map_find(struct fastrpc_user *fl, int fd, + struct fastrpc_map **ppmap) +{ + struct fastrpc_map *map = NULL; + + mutex_lock(&fl->mutex); + list_for_each_entry(map, &fl->maps, node) { + if (map->fd == fd) { + fastrpc_map_get(map); + *ppmap = map; + mutex_unlock(&fl->mutex); + return 0; + } + } + mutex_unlock(&fl->mutex); + + return -ENOENT; +} + +static void fastrpc_buf_free(struct fastrpc_buf *buf) +{ + dma_free_coherent(buf->dev, buf->size, buf->virt, + FASTRPC_PHYS(buf->phys)); + kfree(buf); +} + +static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev, + u64 size, struct fastrpc_buf **obuf) +{ + struct fastrpc_buf *buf; + + buf = kzalloc(sizeof(*buf), GFP_KERNEL); + if (!buf) + return -ENOMEM; + + INIT_LIST_HEAD(&buf->attachments); + mutex_init(&buf->lock); + + buf->fl = fl; + buf->virt = NULL; + buf->phys = 0; + buf->size = size; + buf->dev = dev; + + buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys, + GFP_KERNEL); + if (!buf->virt) + return -ENOMEM; + + if (fl->sctx && fl->sctx->sid) + buf->phys += ((u64)fl->sctx->sid << 32); + + *obuf = buf; + + return 0; +} + +static void fastrpc_context_free(struct kref *ref) +{ + struct fastrpc_invoke_ctx *ctx; + struct fastrpc_channel_ctx *cctx; + int i; + + ctx = container_of(ref, struct fastrpc_invoke_ctx, refcount); + cctx = ctx->cctx; + + for (i = 0; i < ctx->nscalars; i++) + fastrpc_map_put(ctx->maps[i]); + + if (ctx->buf) + fastrpc_buf_free(ctx->buf); + + spin_lock(&cctx->lock); + idr_remove(&cctx->ctx_idr, ctx->ctxid >> 4); + spin_unlock(&cctx->lock); + + kfree(ctx->maps); + kfree(ctx); +} + +static void fastrpc_context_get(struct fastrpc_invoke_ctx *ctx) +{ + kref_get(&ctx->refcount); +} + +static void fastrpc_context_put(struct fastrpc_invoke_ctx *ctx) +{ + kref_put(&ctx->refcount, fastrpc_context_free); +} + +static struct fastrpc_invoke_ctx *fastrpc_context_alloc( + struct fastrpc_user *user, u32 kernel, u32 sc, + struct fastrpc_invoke_args *args) +{ + struct fastrpc_channel_ctx *cctx = user->cctx; + struct fastrpc_invoke_ctx *ctx = NULL; + int ret; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&ctx->node); + ctx->fl = user; + ctx->nscalars = REMOTE_SCALARS_LENGTH(sc); + ctx->nbufs = REMOTE_SCALARS_INBUFS(sc) + + REMOTE_SCALARS_OUTBUFS(sc); + + if (ctx->nscalars) { + ctx->maps = kcalloc(ctx->nscalars, + sizeof(*ctx->maps), GFP_KERNEL); + if (!ctx->maps) { + kfree(ctx); + return ERR_PTR(-ENOMEM); + } + ctx->args = args; + } + + ctx->sc = sc; + ctx->retval = -1; + ctx->pid = current->pid; + ctx->tgid = user->tgid; + ctx->cctx = cctx; + init_completion(&ctx->work); + + spin_lock(&user->lock); + list_add_tail(&ctx->node, &user->pending); + spin_unlock(&user->lock); + + spin_lock(&cctx->lock); + ret = idr_alloc_cyclic(&cctx->ctx_idr, ctx, 1, + FASTRPC_CTX_MAX, GFP_ATOMIC); + if (ret < 0) { + spin_unlock(&cctx->lock); + goto err_idr; + } + ctx->ctxid = ret << 4; + spin_unlock(&cctx->lock); + + kref_init(&ctx->refcount); + + return ctx; +err_idr: + spin_lock(&user->lock); + list_del(&ctx->node); + spin_unlock(&user->lock); + kfree(ctx->maps); + kfree(ctx); + + return ERR_PTR(ret); +} + +static struct sg_table * +fastrpc_map_dma_buf(struct dma_buf_attachment *attachment, + enum dma_data_direction dir) +{ + struct fastrpc_dma_buf_attachment *a = attachment->priv; + struct sg_table *table; + + table = &a->sgt; + + if (!dma_map_sg(attachment->dev, table->sgl, table->nents, dir)) + return ERR_PTR(-ENOMEM); + + return table; +} + +static void fastrpc_unmap_dma_buf(struct dma_buf_attachment *attach, + struct sg_table *table, + enum dma_data_direction dir) +{ + dma_unmap_sg(attach->dev, table->sgl, table->nents, dir); +} + +static void fastrpc_release(struct dma_buf *dmabuf) +{ + struct fastrpc_buf *buffer = dmabuf->priv; + + fastrpc_buf_free(buffer); +} + +static int fastrpc_dma_buf_attach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) +{ + struct fastrpc_dma_buf_attachment *a; + struct fastrpc_buf *buffer = dmabuf->priv; + int ret; + + a = kzalloc(sizeof(*a), GFP_KERNEL); + if (!a) + return -ENOMEM; + + ret = dma_get_sgtable(buffer->dev, &a->sgt, buffer->virt, + FASTRPC_PHYS(buffer->phys), buffer->size); + if (ret < 0) { + dev_err(buffer->dev, "failed to get scatterlist from DMA API\n"); + return -EINVAL; + } + + a->dev = attachment->dev; + INIT_LIST_HEAD(&a->node); + attachment->priv = a; + + mutex_lock(&buffer->lock); + list_add(&a->node, &buffer->attachments); + mutex_unlock(&buffer->lock); + + return 0; +} + +static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) +{ + struct fastrpc_dma_buf_attachment *a = attachment->priv; + struct fastrpc_buf *buffer = dmabuf->priv; + + mutex_lock(&buffer->lock); + list_del(&a->node); + mutex_unlock(&buffer->lock); + kfree(a); +} + +static void *fastrpc_kmap(struct dma_buf *dmabuf, unsigned long pgnum) +{ + struct fastrpc_buf *buf = dmabuf->priv; + + return buf->virt ? buf->virt + pgnum * PAGE_SIZE : NULL; +} + +static void *fastrpc_vmap(struct dma_buf *dmabuf) +{ + struct fastrpc_buf *buf = dmabuf->priv; + + return buf->virt; +} + +static int fastrpc_mmap(struct dma_buf *dmabuf, + struct vm_area_struct *vma) +{ + struct fastrpc_buf *buf = dmabuf->priv; + size_t size = vma->vm_end - vma->vm_start; + + return dma_mmap_coherent(buf->dev, vma, buf->virt, + FASTRPC_PHYS(buf->phys), size); +} + +static const struct dma_buf_ops fastrpc_dma_buf_ops = { + .attach = fastrpc_dma_buf_attach, + .detach = fastrpc_dma_buf_detatch, + .map_dma_buf = fastrpc_map_dma_buf, + .unmap_dma_buf = fastrpc_unmap_dma_buf, + .mmap = fastrpc_mmap, + .map = fastrpc_kmap, + .vmap = fastrpc_vmap, + .release = fastrpc_release, +}; + +static int fastrpc_map_create(struct fastrpc_user *fl, int fd, + u64 len, struct fastrpc_map **ppmap) +{ + struct fastrpc_session_ctx *sess = fl->sctx; + struct fastrpc_map *map = NULL; + int err = 0; + + if (!fastrpc_map_find(fl, fd, ppmap)) + return 0; + + map = kzalloc(sizeof(*map), GFP_KERNEL); + if (!map) + return -ENOMEM; + + INIT_LIST_HEAD(&map->node); + map->fl = fl; + map->fd = fd; + map->buf = dma_buf_get(fd); + if (IS_ERR(map->buf)) { + err = PTR_ERR(map->buf); + goto get_err; + } + + map->attach = dma_buf_attach(map->buf, sess->dev); + if (IS_ERR(map->attach)) { + dev_err(sess->dev, "Failed to attach dmabuf\n"); + err = PTR_ERR(map->attach); + goto attach_err; + } + + map->table = dma_buf_map_attachment(map->attach, DMA_BIDIRECTIONAL); + if (IS_ERR(map->table)) { + err = PTR_ERR(map->table); + goto map_err; + } + + map->phys = sg_dma_address(map->table->sgl); + map->phys += ((u64)fl->sctx->sid << 32); + map->size = len; + map->va = sg_virt(map->table->sgl); + map->len = len; + kref_init(&map->refcount); + + spin_lock(&fl->lock); + list_add_tail(&map->node, &fl->maps); + spin_unlock(&fl->lock); + *ppmap = map; + + return 0; + +map_err: + dma_buf_detach(map->buf, map->attach); +attach_err: + dma_buf_put(map->buf); +get_err: + kfree(map); + + return err; +} + +/* + * Fastrpc payload buffer with metadata looks like: + * + * >>>>>> START of METADATA <<<<<<<<< + * +---------------------------------+ + * | Arguments | + * | type:(struct fastrpc_remote_arg)| + * | (0 - N) | + * +---------------------------------+ + * | Invoke Buffer list | + * | type:(struct fastrpc_invoke_buf)| + * | (0 - N) | + * +---------------------------------+ + * | Page info list | + * | type:(struct fastrpc_phy_page) | + * | (0 - N) | + * +---------------------------------+ + * | Optional info | + * |(can be specific to SoC/Firmware)| + * +---------------------------------+ + * >>>>>>>> END of METADATA <<<<<<<<< + * +---------------------------------+ + * | Inline ARGS | + * | (0-N) | + * +---------------------------------+ + */ + +static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx) +{ + int size = 0; + + size = (sizeof(struct fastrpc_remote_arg) + + sizeof(struct fastrpc_invoke_buf) + + sizeof(struct fastrpc_phy_page)) * ctx->nscalars + + sizeof(u64) * FASTRPC_MAX_FDLIST + + sizeof(u32) * FASTRPC_MAX_CRCLIST; + + return size; +} + +static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen) +{ + u64 size = 0; + int i; + + size = ALIGN(metalen, FASTRPC_ALIGN); + for (i = 0; i < ctx->nscalars; i++) { + if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) { + size = ALIGN(size, FASTRPC_ALIGN); + size += ctx->args[i].length; + } + } + + return size; +} + +static int fastrpc_create_maps(struct fastrpc_invoke_ctx *ctx) +{ + struct device *dev = ctx->fl->sctx->dev; + int i, err; + + for (i = 0; i < ctx->nscalars; ++i) { + /* Make sure reserved field is set to 0 */ + if (ctx->args[i].reserved) + return -EINVAL; + + if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1 || + ctx->args[i].length == 0) + continue; + + err = fastrpc_map_create(ctx->fl, ctx->args[i].fd, + ctx->args[i].length, &ctx->maps[i]); + if (err) { + dev_err(dev, "Error Creating map %d\n", err); + return -EINVAL; + } + + } + return 0; +} + +static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx) +{ + struct device *dev = ctx->fl->sctx->dev; + struct fastrpc_remote_arg *rpra; + struct fastrpc_invoke_buf *list; + struct fastrpc_phy_page *pages; + int inbufs, i, err = 0; + u64 rlen, pkt_size; + uintptr_t args; + int metalen; + + + inbufs = REMOTE_SCALARS_INBUFS(ctx->sc); + metalen = fastrpc_get_meta_size(ctx); + pkt_size = fastrpc_get_payload_size(ctx, metalen); + + err = fastrpc_create_maps(ctx); + if (err) + return err; + + ctx->msg_sz = pkt_size; + + err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf); + if (err) + return err; + + rpra = ctx->buf->virt; + list = ctx->buf->virt + ctx->nscalars * sizeof(*rpra); + pages = ctx->buf->virt + ctx->nscalars * (sizeof(*list) + + sizeof(*rpra)); + args = (uintptr_t)ctx->buf->virt + metalen; + rlen = pkt_size - metalen; + ctx->rpra = rpra; + + for (i = 0; i < ctx->nbufs; ++i) { + u64 len = ctx->args[i].length; + + rpra[i].pv = 0; + rpra[i].len = len; + list[i].num = len ? 1 : 0; + list[i].pgidx = i; + + if (!len) + continue; + + pages[i].size = roundup(len, PAGE_SIZE); + + if (ctx->maps[i]) { + rpra[i].pv = (u64) ctx->args[i].ptr; + pages[i].addr = ctx->maps[i]->phys; + } else { + rlen -= ALIGN(args, FASTRPC_ALIGN) - args; + args = ALIGN(args, FASTRPC_ALIGN); + if (rlen < len) + goto bail; + + rpra[i].pv = args; + pages[i].addr = ctx->buf->phys + (pkt_size - rlen); + pages[i].addr = pages[i].addr & PAGE_MASK; + args = args + len; + rlen -= len; + } + + if (i < inbufs && !ctx->maps[i]) { + void *dst = (void *)(uintptr_t)rpra[i].pv; + void *src = (void *)(uintptr_t)ctx->args[i].ptr; + + if (!kernel) { + if (copy_from_user(dst, (void __user *)src, + len)) { + err = -EFAULT; + goto bail; + } + } else { + memcpy(dst, src, len); + } + } + } + + for (i = ctx->nbufs; i < ctx->nscalars; ++i) { + rpra[i].pv = (u64) ctx->args[i].ptr; + rpra[i].len = ctx->args[i].length; + list[i].num = ctx->args[i].length ? 1 : 0; + list[i].pgidx = i; + pages[i].addr = ctx->maps[i]->phys; + pages[i].size = ctx->maps[i]->size; + } + +bail: + if (err) + dev_err(dev, "Error: get invoke args failed:%d\n", err); + + return err; +} + +static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx, + u32 kernel) +{ + struct fastrpc_remote_arg *rpra = ctx->rpra; + int i, inbufs; + + inbufs = REMOTE_SCALARS_INBUFS(ctx->sc); + + for (i = inbufs; i < ctx->nbufs; ++i) { + void *src = (void *)(uintptr_t)rpra[i].pv; + void *dst = (void *)(uintptr_t)ctx->args[i].ptr; + u64 len = rpra[i].len; + + if (!kernel) { + if (copy_to_user((void __user *)dst, src, len)) + return -EFAULT; + } else { + memcpy(dst, src, len); + } + } + + return 0; +} + +static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx, + struct fastrpc_invoke_ctx *ctx, + u32 kernel, uint32_t handle) +{ + struct fastrpc_channel_ctx *cctx; + struct fastrpc_user *fl = ctx->fl; + struct fastrpc_msg *msg = &ctx->msg; + + cctx = fl->cctx; + msg->pid = fl->tgid; + msg->tid = current->pid; + + if (kernel) + msg->pid = 0; + + msg->ctx = ctx->ctxid | fl->pd; + msg->handle = handle; + msg->sc = ctx->sc; + msg->addr = ctx->buf ? ctx->buf->phys : 0; + msg->size = roundup(ctx->msg_sz, PAGE_SIZE); + fastrpc_context_get(ctx); + + return rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg)); +} + +static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel, + u32 handle, u32 sc, + struct fastrpc_invoke_args *args) +{ + struct fastrpc_invoke_ctx *ctx = NULL; + int err = 0; + + if (!fl->sctx) + return -EINVAL; + + ctx = fastrpc_context_alloc(fl, kernel, sc, args); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + if (ctx->nscalars) { + err = fastrpc_get_args(kernel, ctx); + if (err) + goto bail; + } + /* Send invoke buffer to remote dsp */ + err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle); + if (err) + goto bail; + + /* Wait for remote dsp to respond or time out */ + err = wait_for_completion_interruptible(&ctx->work); + if (err) + goto bail; + + /* Check the response from remote dsp */ + err = ctx->retval; + if (err) + goto bail; + + if (ctx->nscalars) { + /* populate all the output buffers with results */ + err = fastrpc_put_args(ctx, kernel); + if (err) + goto bail; + } + +bail: + /* We are done with this compute context, remove it from pending list */ + spin_lock(&fl->lock); + list_del(&ctx->node); + spin_unlock(&fl->lock); + fastrpc_context_put(ctx); + + if (err) + dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err); + + return err; +} + +static int fastrpc_init_create_process(struct fastrpc_user *fl, + char __user *argp) +{ + struct fastrpc_init_create init; + struct fastrpc_invoke_args *args; + struct fastrpc_phy_page pages[1]; + struct fastrpc_map *map = NULL; + struct fastrpc_buf *imem = NULL; + int memlen; + int err; + struct { + int pgid; + u32 namelen; + u32 filelen; + u32 pageslen; + u32 attrs; + u32 siglen; + } inbuf; + u32 sc; + + args = kcalloc(FASTRPC_CREATE_PROCESS_NARGS, sizeof(*args), GFP_KERNEL); + if (!args) + return -ENOMEM; + + if (copy_from_user(&init, argp, sizeof(init))) { + err = -EFAULT; + goto bail; + } + + if (init.filelen > INIT_FILELEN_MAX) { + err = -EINVAL; + goto bail; + } + + inbuf.pgid = fl->tgid; + inbuf.namelen = strlen(current->comm) + 1; + inbuf.filelen = init.filelen; + inbuf.pageslen = 1; + inbuf.attrs = init.attrs; + inbuf.siglen = init.siglen; + fl->pd = 1; + + if (init.filelen && init.filefd) { + err = fastrpc_map_create(fl, init.filefd, init.filelen, &map); + if (err) + goto bail; + } + + memlen = ALIGN(max(INIT_FILELEN_MAX, (int)init.filelen * 4), + 1024 * 1024); + err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen, + &imem); + if (err) { + fastrpc_map_put(map); + goto bail; + } + + fl->init_mem = imem; + args[0].ptr = (u64)(uintptr_t)&inbuf; + args[0].length = sizeof(inbuf); + args[0].fd = -1; + + args[1].ptr = (u64)(uintptr_t)current->comm; + args[1].length = inbuf.namelen; + args[1].fd = -1; + + args[2].ptr = (u64) init.file; + args[2].length = inbuf.filelen; + args[2].fd = init.filefd; + + pages[0].addr = imem->phys; + pages[0].size = imem->size; + + args[3].ptr = (u64)(uintptr_t) pages; + args[3].length = 1 * sizeof(*pages); + args[3].fd = -1; + + args[4].ptr = (u64)(uintptr_t)&inbuf.attrs; + args[4].length = sizeof(inbuf.attrs); + args[4].fd = -1; + + args[5].ptr = (u64)(uintptr_t) &inbuf.siglen; + args[5].length = sizeof(inbuf.siglen); + args[5].fd = -1; + + sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE, 4, 0); + if (init.attrs) + sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 6, 0); + + err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, + sc, args); + + if (err) { + fastrpc_map_put(map); + fastrpc_buf_free(imem); + } + +bail: + kfree(args); + + return err; +} + +static struct fastrpc_session_ctx *fastrpc_session_alloc( + struct fastrpc_channel_ctx *cctx) +{ + struct fastrpc_session_ctx *session = NULL; + int i; + + spin_lock(&cctx->lock); + for (i = 0; i < cctx->sesscount; i++) { + if (!cctx->session[i].used && cctx->session[i].valid) { + cctx->session[i].used = true; + session = &cctx->session[i]; + break; + } + } + spin_unlock(&cctx->lock); + + return session; +} + +static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx, + struct fastrpc_session_ctx *session) +{ + spin_lock(&cctx->lock); + session->used = false; + spin_unlock(&cctx->lock); +} + +static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl) +{ + struct fastrpc_invoke_args args[1]; + int tgid = 0; + u32 sc; + + tgid = fl->tgid; + args[0].ptr = (u64)(uintptr_t) &tgid; + args[0].length = sizeof(tgid); + args[0].fd = -1; + args[0].reserved = 0; + sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_RELEASE, 1, 0); + + return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, + sc, &args[0]); +} + +static int fastrpc_device_release(struct inode *inode, struct file *file) +{ + struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data; + struct fastrpc_channel_ctx *cctx = fl->cctx; + struct fastrpc_invoke_ctx *ctx, *n; + struct fastrpc_map *map, *m; + + fastrpc_release_current_dsp_process(fl); + + spin_lock(&cctx->lock); + list_del(&fl->user); + spin_unlock(&cctx->lock); + + if (fl->init_mem) + fastrpc_buf_free(fl->init_mem); + + list_for_each_entry_safe(ctx, n, &fl->pending, node) { + list_del(&ctx->node); + fastrpc_context_put(ctx); + } + + list_for_each_entry_safe(map, m, &fl->maps, node) { + list_del(&map->node); + fastrpc_map_put(map); + } + + fastrpc_session_free(cctx, fl->sctx); + + mutex_destroy(&fl->mutex); + kfree(fl); + file->private_data = NULL; + + return 0; +} + +static int fastrpc_device_open(struct inode *inode, struct file *filp) +{ + struct fastrpc_channel_ctx *cctx = miscdev_to_cctx(filp->private_data); + struct fastrpc_user *fl = NULL; + + fl = kzalloc(sizeof(*fl), GFP_KERNEL); + if (!fl) + return -ENOMEM; + + filp->private_data = fl; + spin_lock_init(&fl->lock); + mutex_init(&fl->mutex); + INIT_LIST_HEAD(&fl->pending); + INIT_LIST_HEAD(&fl->maps); + INIT_LIST_HEAD(&fl->user); + fl->tgid = current->tgid; + fl->cctx = cctx; + + fl->sctx = fastrpc_session_alloc(cctx); + if (!fl->sctx) { + dev_err(&cctx->rpdev->dev, "No session available\n"); + mutex_destroy(&fl->mutex); + kfree(fl); + + return -EBUSY; + } + + spin_lock(&cctx->lock); + list_add_tail(&fl->user, &cctx->users); + spin_unlock(&cctx->lock); + + return 0; +} + +static int fastrpc_dmabuf_free(struct fastrpc_user *fl, char __user *argp) +{ + struct dma_buf *buf; + int info; + + if (copy_from_user(&info, argp, sizeof(info))) + return -EFAULT; + + buf = dma_buf_get(info); + if (IS_ERR_OR_NULL(buf)) + return -EINVAL; + /* + * one for the last get and other for the ALLOC_DMA_BUFF ioctl + */ + dma_buf_put(buf); + dma_buf_put(buf); + + return 0; +} + +static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp) +{ + struct fastrpc_alloc_dma_buf bp; + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + struct fastrpc_buf *buf = NULL; + int err; + + if (copy_from_user(&bp, argp, sizeof(bp))) + return -EFAULT; + + err = fastrpc_buf_alloc(fl, fl->sctx->dev, bp.size, &buf); + if (err) + return err; + exp_info.ops = &fastrpc_dma_buf_ops; + exp_info.size = bp.size; + exp_info.flags = O_RDWR; + exp_info.priv = buf; + buf->dmabuf = dma_buf_export(&exp_info); + if (IS_ERR(buf->dmabuf)) { + err = PTR_ERR(buf->dmabuf); + fastrpc_buf_free(buf); + return err; + } + + bp.fd = dma_buf_fd(buf->dmabuf, O_ACCMODE); + if (bp.fd < 0) { + dma_buf_put(buf->dmabuf); + return -EINVAL; + } + + if (copy_to_user(argp, &bp, sizeof(bp))) { + dma_buf_put(buf->dmabuf); + return -EFAULT; + } + + get_dma_buf(buf->dmabuf); + + return 0; +} + +static int fastrpc_init_attach(struct fastrpc_user *fl) +{ + struct fastrpc_invoke_args args[1]; + int tgid = fl->tgid; + u32 sc; + + args[0].ptr = (u64)(uintptr_t) &tgid; + args[0].length = sizeof(tgid); + args[0].fd = -1; + args[0].reserved = 0; + sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH, 1, 0); + fl->pd = 0; + + return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, + sc, &args[0]); +} + +static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp) +{ + struct fastrpc_invoke_args *args = NULL; + struct fastrpc_invoke inv; + u32 nscalars; + int err; + + if (copy_from_user(&inv, argp, sizeof(inv))) + return -EFAULT; + + /* nscalars is truncated here to max supported value */ + nscalars = REMOTE_SCALARS_LENGTH(inv.sc); + if (nscalars) { + args = kcalloc(nscalars, sizeof(*args), GFP_KERNEL); + if (!args) + return -ENOMEM; + + if (copy_from_user(args, (void __user *)(uintptr_t)inv.args, + nscalars * sizeof(*args))) { + kfree(args); + return -EFAULT; + } + } + + err = fastrpc_internal_invoke(fl, false, inv.handle, inv.sc, args); + kfree(args); + + return err; +} + +static long fastrpc_device_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data; + char __user *argp = (char __user *)arg; + int err; + + switch (cmd) { + case FASTRPC_IOCTL_INVOKE: + err = fastrpc_invoke(fl, argp); + break; + case FASTRPC_IOCTL_INIT_ATTACH: + err = fastrpc_init_attach(fl); + break; + case FASTRPC_IOCTL_INIT_CREATE: + err = fastrpc_init_create_process(fl, argp); + break; + case FASTRPC_IOCTL_FREE_DMA_BUFF: + err = fastrpc_dmabuf_free(fl, argp); + break; + case FASTRPC_IOCTL_ALLOC_DMA_BUFF: + err = fastrpc_dmabuf_alloc(fl, argp); + break; + default: + err = -ENOTTY; + break; + } + + return err; +} + +static const struct file_operations fastrpc_fops = { + .open = fastrpc_device_open, + .release = fastrpc_device_release, + .unlocked_ioctl = fastrpc_device_ioctl, + .compat_ioctl = fastrpc_device_ioctl, +}; + +static int fastrpc_cb_probe(struct platform_device *pdev) +{ + struct fastrpc_channel_ctx *cctx; + struct fastrpc_session_ctx *sess; + struct device *dev = &pdev->dev; + int i, sessions = 0; + + cctx = dev_get_drvdata(dev->parent); + if (!cctx) + return -EINVAL; + + of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions); + + spin_lock(&cctx->lock); + sess = &cctx->session[cctx->sesscount]; + sess->used = false; + sess->valid = true; + sess->dev = dev; + dev_set_drvdata(dev, sess); + + if (of_property_read_u32(dev->of_node, "reg", &sess->sid)) + dev_info(dev, "FastRPC Session ID not specified in DT\n"); + + if (sessions > 0) { + struct fastrpc_session_ctx *dup_sess; + + for (i = 1; i < sessions; i++) { + if (cctx->sesscount++ >= FASTRPC_MAX_SESSIONS) + break; + dup_sess = &cctx->session[cctx->sesscount]; + memcpy(dup_sess, sess, sizeof(*dup_sess)); + } + } + cctx->sesscount++; + spin_unlock(&cctx->lock); + dma_set_mask(dev, DMA_BIT_MASK(32)); + + return 0; +} + +static int fastrpc_cb_remove(struct platform_device *pdev) +{ + struct fastrpc_channel_ctx *cctx = dev_get_drvdata(pdev->dev.parent); + struct fastrpc_session_ctx *sess = dev_get_drvdata(&pdev->dev); + int i; + + spin_lock(&cctx->lock); + for (i = 1; i < FASTRPC_MAX_SESSIONS; i++) { + if (cctx->session[i].sid == sess->sid) { + cctx->session[i].valid = false; + cctx->sesscount--; + } + } + spin_unlock(&cctx->lock); + + return 0; +} + +static const struct of_device_id fastrpc_match_table[] = { + { .compatible = "qcom,fastrpc-compute-cb", }, + {} +}; + +static struct platform_driver fastrpc_cb_driver = { + .probe = fastrpc_cb_probe, + .remove = fastrpc_cb_remove, + .driver = { + .name = "qcom,fastrpc-cb", + .of_match_table = fastrpc_match_table, + .suppress_bind_attrs = true, + }, +}; + +static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev) +{ + struct device *rdev = &rpdev->dev; + struct fastrpc_channel_ctx *data; + int i, err, domain_id = -1; + const char *domain; + + data = devm_kzalloc(rdev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + err = of_property_read_string(rdev->of_node, "label", &domain); + if (err) { + dev_info(rdev, "FastRPC Domain not specified in DT\n"); + return err; + } + + for (i = 0; i <= CDSP_DOMAIN_ID; i++) { + if (!strcmp(domains[i], domain)) { + domain_id = i; + break; + } + } + + if (domain_id < 0) { + dev_info(rdev, "FastRPC Invalid Domain ID %d\n", domain_id); + return -EINVAL; + } + + data->miscdev.minor = MISC_DYNAMIC_MINOR; + data->miscdev.name = kasprintf(GFP_KERNEL, "fastrpc-%s", + domains[domain_id]); + data->miscdev.fops = &fastrpc_fops; + err = misc_register(&data->miscdev); + if (err) + return err; + + dev_set_drvdata(&rpdev->dev, data); + dma_set_mask_and_coherent(rdev, DMA_BIT_MASK(32)); + INIT_LIST_HEAD(&data->users); + spin_lock_init(&data->lock); + idr_init(&data->ctx_idr); + data->domain_id = domain_id; + data->rpdev = rpdev; + + return of_platform_populate(rdev->of_node, NULL, NULL, rdev); +} + +static void fastrpc_notify_users(struct fastrpc_user *user) +{ + struct fastrpc_invoke_ctx *ctx; + + spin_lock(&user->lock); + list_for_each_entry(ctx, &user->pending, node) + complete(&ctx->work); + spin_unlock(&user->lock); +} + +static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev) +{ + struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev); + struct fastrpc_user *user; + + spin_lock(&cctx->lock); + list_for_each_entry(user, &cctx->users, user) + fastrpc_notify_users(user); + spin_unlock(&cctx->lock); + + misc_deregister(&cctx->miscdev); + of_platform_depopulate(&rpdev->dev); + kfree(cctx); +} + +static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data, + int len, void *priv, u32 addr) +{ + struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev); + struct fastrpc_invoke_rsp *rsp = data; + struct fastrpc_invoke_ctx *ctx; + unsigned long flags; + unsigned long ctxid; + + if (len < sizeof(*rsp)) + return -EINVAL; + + ctxid = ((rsp->ctx & FASTRPC_CTXID_MASK) >> 4); + + spin_lock_irqsave(&cctx->lock, flags); + ctx = idr_find(&cctx->ctx_idr, ctxid); + spin_unlock_irqrestore(&cctx->lock, flags); + + if (!ctx) { + dev_err(&rpdev->dev, "No context ID matches response\n"); + return -ENOENT; + } + + ctx->retval = rsp->retval; + complete(&ctx->work); + fastrpc_context_put(ctx); + + return 0; +} + +static const struct of_device_id fastrpc_rpmsg_of_match[] = { + { .compatible = "qcom,fastrpc" }, + { }, +}; +MODULE_DEVICE_TABLE(of, fastrpc_rpmsg_of_match); + +static struct rpmsg_driver fastrpc_driver = { + .probe = fastrpc_rpmsg_probe, + .remove = fastrpc_rpmsg_remove, + .callback = fastrpc_rpmsg_callback, + .drv = { + .name = "qcom,fastrpc", + .of_match_table = fastrpc_rpmsg_of_match, + }, +}; + +static int fastrpc_init(void) +{ + int ret; + + ret = platform_driver_register(&fastrpc_cb_driver); + if (ret < 0) { + pr_err("fastrpc: failed to register cb driver\n"); + return ret; + } + + ret = register_rpmsg_driver(&fastrpc_driver); + if (ret < 0) { + pr_err("fastrpc: failed to register rpmsg driver\n"); + platform_driver_unregister(&fastrpc_cb_driver); + return ret; + } + + return 0; +} +module_init(fastrpc_init); + +static void fastrpc_exit(void) +{ + platform_driver_unregister(&fastrpc_cb_driver); + unregister_rpmsg_driver(&fastrpc_driver); +} +module_exit(fastrpc_exit); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/habanalabs/Kconfig b/drivers/misc/habanalabs/Kconfig new file mode 100644 index 000000000000..99db2b82ada6 --- /dev/null +++ b/drivers/misc/habanalabs/Kconfig @@ -0,0 +1,25 @@ +# +# HabanaLabs AI accelerators driver +# + +config HABANA_AI + tristate "HabanaAI accelerators (habanalabs)" + depends on PCI && HAS_IOMEM + select FRAME_VECTOR + select DMA_SHARED_BUFFER + select GENERIC_ALLOCATOR + select HWMON + help + Enables PCIe card driver for Habana's AI Processors (AIP) that are + designed to accelerate Deep Learning inference and training workloads. + + The driver manages the PCIe devices and provides IOCTL interface for + the user to submit workloads to the devices. + + The user-space interface is described in + include/uapi/misc/habanalabs.h + + If unsure, say N. + + To compile this driver as a module, choose M here: the + module will be called habanalabs. diff --git a/drivers/misc/habanalabs/Makefile b/drivers/misc/habanalabs/Makefile new file mode 100644 index 000000000000..c6592db59b25 --- /dev/null +++ b/drivers/misc/habanalabs/Makefile @@ -0,0 +1,14 @@ +# +# Makefile for HabanaLabs AI accelerators driver +# + +obj-m := habanalabs.o + +habanalabs-y := habanalabs_drv.o device.o context.o asid.o habanalabs_ioctl.o \ + command_buffer.o hw_queue.o irq.o sysfs.o hwmon.o memory.o \ + command_submission.o mmu.o + +habanalabs-$(CONFIG_DEBUG_FS) += debugfs.o + +include $(src)/goya/Makefile +habanalabs-y += $(HL_GOYA_FILES) diff --git a/drivers/misc/habanalabs/asid.c b/drivers/misc/habanalabs/asid.c new file mode 100644 index 000000000000..f54e7971a762 --- /dev/null +++ b/drivers/misc/habanalabs/asid.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "habanalabs.h" + +#include + +int hl_asid_init(struct hl_device *hdev) +{ + hdev->asid_bitmap = kcalloc(BITS_TO_LONGS(hdev->asic_prop.max_asid), + sizeof(*hdev->asid_bitmap), GFP_KERNEL); + if (!hdev->asid_bitmap) + return -ENOMEM; + + mutex_init(&hdev->asid_mutex); + + /* ASID 0 is reserved for KMD */ + set_bit(0, hdev->asid_bitmap); + + return 0; +} + +void hl_asid_fini(struct hl_device *hdev) +{ + mutex_destroy(&hdev->asid_mutex); + kfree(hdev->asid_bitmap); +} + +unsigned long hl_asid_alloc(struct hl_device *hdev) +{ + unsigned long found; + + mutex_lock(&hdev->asid_mutex); + + found = find_first_zero_bit(hdev->asid_bitmap, + hdev->asic_prop.max_asid); + if (found == hdev->asic_prop.max_asid) + found = 0; + else + set_bit(found, hdev->asid_bitmap); + + mutex_unlock(&hdev->asid_mutex); + + return found; +} + +void hl_asid_free(struct hl_device *hdev, unsigned long asid) +{ + if (WARN((asid == 0 || asid >= hdev->asic_prop.max_asid), + "Invalid ASID %lu", asid)) + return; + clear_bit(asid, hdev->asid_bitmap); +} diff --git a/drivers/misc/habanalabs/command_buffer.c b/drivers/misc/habanalabs/command_buffer.c new file mode 100644 index 000000000000..85f75806a9a7 --- /dev/null +++ b/drivers/misc/habanalabs/command_buffer.c @@ -0,0 +1,445 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include +#include "habanalabs.h" + +#include +#include + +static void cb_fini(struct hl_device *hdev, struct hl_cb *cb) +{ + hdev->asic_funcs->dma_free_coherent(hdev, cb->size, + (void *) (uintptr_t) cb->kernel_address, + cb->bus_address); + kfree(cb); +} + +static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb) +{ + if (cb->is_pool) { + spin_lock(&hdev->cb_pool_lock); + list_add(&cb->pool_list, &hdev->cb_pool); + spin_unlock(&hdev->cb_pool_lock); + } else { + cb_fini(hdev, cb); + } +} + +static void cb_release(struct kref *ref) +{ + struct hl_device *hdev; + struct hl_cb *cb; + + cb = container_of(ref, struct hl_cb, refcount); + hdev = cb->hdev; + + hl_debugfs_remove_cb(cb); + + cb_do_release(hdev, cb); +} + +static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size, + int ctx_id) +{ + struct hl_cb *cb; + void *p; + + /* + * We use of GFP_ATOMIC here because this function can be called from + * the latency-sensitive code path for command submission. Due to H/W + * limitations in some of the ASICs, the kernel must copy the user CB + * that is designated for an external queue and actually enqueue + * the kernel's copy. Hence, we must never sleep in this code section + * and must use GFP_ATOMIC for all memory allocations. + */ + if (ctx_id == HL_KERNEL_ASID_ID) + cb = kzalloc(sizeof(*cb), GFP_ATOMIC); + else + cb = kzalloc(sizeof(*cb), GFP_KERNEL); + + if (!cb) + return NULL; + + if (ctx_id == HL_KERNEL_ASID_ID) + p = hdev->asic_funcs->dma_alloc_coherent(hdev, cb_size, + &cb->bus_address, GFP_ATOMIC); + else + p = hdev->asic_funcs->dma_alloc_coherent(hdev, cb_size, + &cb->bus_address, + GFP_USER | __GFP_ZERO); + if (!p) { + dev_err(hdev->dev, + "failed to allocate %d of dma memory for CB\n", + cb_size); + kfree(cb); + return NULL; + } + + cb->kernel_address = (u64) (uintptr_t) p; + cb->size = cb_size; + + return cb; +} + +int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr, + u32 cb_size, u64 *handle, int ctx_id) +{ + struct hl_cb *cb; + bool alloc_new_cb = true; + int rc; + + /* + * Can't use generic function to check this because of special case + * where we create a CB as part of the reset process + */ + if ((hdev->disabled) || ((atomic_read(&hdev->in_reset)) && + (ctx_id != HL_KERNEL_ASID_ID))) { + dev_warn_ratelimited(hdev->dev, + "Device is disabled or in reset. Can't create new CBs\n"); + rc = -EBUSY; + goto out_err; + } + + if (cb_size > HL_MAX_CB_SIZE) { + dev_err(hdev->dev, + "CB size %d must be less then %d\n", + cb_size, HL_MAX_CB_SIZE); + rc = -EINVAL; + goto out_err; + } + + /* Minimum allocation must be PAGE SIZE */ + if (cb_size < PAGE_SIZE) + cb_size = PAGE_SIZE; + + if (ctx_id == HL_KERNEL_ASID_ID && + cb_size <= hdev->asic_prop.cb_pool_cb_size) { + + spin_lock(&hdev->cb_pool_lock); + if (!list_empty(&hdev->cb_pool)) { + cb = list_first_entry(&hdev->cb_pool, typeof(*cb), + pool_list); + list_del(&cb->pool_list); + spin_unlock(&hdev->cb_pool_lock); + alloc_new_cb = false; + } else { + spin_unlock(&hdev->cb_pool_lock); + dev_dbg(hdev->dev, "CB pool is empty\n"); + } + } + + if (alloc_new_cb) { + cb = hl_cb_alloc(hdev, cb_size, ctx_id); + if (!cb) { + rc = -ENOMEM; + goto out_err; + } + } + + cb->hdev = hdev; + cb->ctx_id = ctx_id; + + spin_lock(&mgr->cb_lock); + rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_ATOMIC); + spin_unlock(&mgr->cb_lock); + + if (rc < 0) { + dev_err(hdev->dev, "Failed to allocate IDR for a new CB\n"); + goto release_cb; + } + + cb->id = rc; + + kref_init(&cb->refcount); + spin_lock_init(&cb->lock); + + /* + * idr is 32-bit so we can safely OR it with a mask that is above + * 32 bit + */ + *handle = cb->id | HL_MMAP_CB_MASK; + *handle <<= PAGE_SHIFT; + + hl_debugfs_add_cb(cb); + + return 0; + +release_cb: + cb_do_release(hdev, cb); +out_err: + *handle = 0; + + return rc; +} + +int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle) +{ + struct hl_cb *cb; + u32 handle; + int rc = 0; + + /* + * handle was given to user to do mmap, I need to shift it back to + * how the idr module gave it to me + */ + cb_handle >>= PAGE_SHIFT; + handle = (u32) cb_handle; + + spin_lock(&mgr->cb_lock); + + cb = idr_find(&mgr->cb_handles, handle); + if (cb) { + idr_remove(&mgr->cb_handles, handle); + spin_unlock(&mgr->cb_lock); + kref_put(&cb->refcount, cb_release); + } else { + spin_unlock(&mgr->cb_lock); + dev_err(hdev->dev, + "CB destroy failed, no match to handle 0x%x\n", handle); + rc = -EINVAL; + } + + return rc; +} + +int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data) +{ + union hl_cb_args *args = data; + struct hl_device *hdev = hpriv->hdev; + u64 handle; + int rc; + + switch (args->in.op) { + case HL_CB_OP_CREATE: + rc = hl_cb_create(hdev, &hpriv->cb_mgr, args->in.cb_size, + &handle, hpriv->ctx->asid); + memset(args, 0, sizeof(*args)); + args->out.cb_handle = handle; + break; + case HL_CB_OP_DESTROY: + rc = hl_cb_destroy(hdev, &hpriv->cb_mgr, + args->in.cb_handle); + break; + default: + rc = -ENOTTY; + break; + } + + return rc; +} + +static void cb_vm_close(struct vm_area_struct *vma) +{ + struct hl_cb *cb = (struct hl_cb *) vma->vm_private_data; + long new_mmap_size; + + new_mmap_size = cb->mmap_size - (vma->vm_end - vma->vm_start); + + if (new_mmap_size > 0) { + cb->mmap_size = new_mmap_size; + return; + } + + spin_lock(&cb->lock); + cb->mmap = false; + spin_unlock(&cb->lock); + + hl_cb_put(cb); + vma->vm_private_data = NULL; +} + +static const struct vm_operations_struct cb_vm_ops = { + .close = cb_vm_close +}; + +int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma) +{ + struct hl_device *hdev = hpriv->hdev; + struct hl_cb *cb; + phys_addr_t address; + u32 handle; + int rc; + + handle = vma->vm_pgoff; + + /* reference was taken here */ + cb = hl_cb_get(hdev, &hpriv->cb_mgr, handle); + if (!cb) { + dev_err(hdev->dev, + "CB mmap failed, no match to handle %d\n", handle); + return -EINVAL; + } + + /* Validation check */ + if ((vma->vm_end - vma->vm_start) != ALIGN(cb->size, PAGE_SIZE)) { + dev_err(hdev->dev, + "CB mmap failed, mmap size 0x%lx != 0x%x cb size\n", + vma->vm_end - vma->vm_start, cb->size); + rc = -EINVAL; + goto put_cb; + } + + spin_lock(&cb->lock); + + if (cb->mmap) { + dev_err(hdev->dev, + "CB mmap failed, CB already mmaped to user\n"); + rc = -EINVAL; + goto release_lock; + } + + cb->mmap = true; + + spin_unlock(&cb->lock); + + vma->vm_ops = &cb_vm_ops; + + /* + * Note: We're transferring the cb reference to + * vma->vm_private_data here. + */ + + vma->vm_private_data = cb; + + /* Calculate address for CB */ + address = virt_to_phys((void *) (uintptr_t) cb->kernel_address); + + rc = hdev->asic_funcs->cb_mmap(hdev, vma, cb->kernel_address, + address, cb->size); + + if (rc) { + spin_lock(&cb->lock); + cb->mmap = false; + goto release_lock; + } + + cb->mmap_size = cb->size; + + return 0; + +release_lock: + spin_unlock(&cb->lock); +put_cb: + hl_cb_put(cb); + return rc; +} + +struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr, + u32 handle) +{ + struct hl_cb *cb; + + spin_lock(&mgr->cb_lock); + cb = idr_find(&mgr->cb_handles, handle); + + if (!cb) { + spin_unlock(&mgr->cb_lock); + dev_warn(hdev->dev, + "CB get failed, no match to handle %d\n", handle); + return NULL; + } + + kref_get(&cb->refcount); + + spin_unlock(&mgr->cb_lock); + + return cb; + +} + +void hl_cb_put(struct hl_cb *cb) +{ + kref_put(&cb->refcount, cb_release); +} + +void hl_cb_mgr_init(struct hl_cb_mgr *mgr) +{ + spin_lock_init(&mgr->cb_lock); + idr_init(&mgr->cb_handles); +} + +void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr) +{ + struct hl_cb *cb; + struct idr *idp; + u32 id; + + idp = &mgr->cb_handles; + + idr_for_each_entry(idp, cb, id) { + if (kref_put(&cb->refcount, cb_release) != 1) + dev_err(hdev->dev, + "CB %d for CTX ID %d is still alive\n", + id, cb->ctx_id); + } + + idr_destroy(&mgr->cb_handles); +} + +struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size) +{ + u64 cb_handle; + struct hl_cb *cb; + int rc; + + rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, cb_size, &cb_handle, + HL_KERNEL_ASID_ID); + if (rc) { + dev_err(hdev->dev, "Failed to allocate CB for KMD %d\n", rc); + return NULL; + } + + cb_handle >>= PAGE_SHIFT; + cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, (u32) cb_handle); + /* hl_cb_get should never fail here so use kernel WARN */ + WARN(!cb, "Kernel CB handle invalid 0x%x\n", (u32) cb_handle); + if (!cb) + goto destroy_cb; + + return cb; + +destroy_cb: + hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb_handle << PAGE_SHIFT); + + return NULL; +} + +int hl_cb_pool_init(struct hl_device *hdev) +{ + struct hl_cb *cb; + int i; + + INIT_LIST_HEAD(&hdev->cb_pool); + spin_lock_init(&hdev->cb_pool_lock); + + for (i = 0 ; i < hdev->asic_prop.cb_pool_cb_cnt ; i++) { + cb = hl_cb_alloc(hdev, hdev->asic_prop.cb_pool_cb_size, + HL_KERNEL_ASID_ID); + if (cb) { + cb->is_pool = true; + list_add(&cb->pool_list, &hdev->cb_pool); + } else { + hl_cb_pool_fini(hdev); + return -ENOMEM; + } + } + + return 0; +} + +int hl_cb_pool_fini(struct hl_device *hdev) +{ + struct hl_cb *cb, *tmp; + + list_for_each_entry_safe(cb, tmp, &hdev->cb_pool, pool_list) { + list_del(&cb->pool_list); + cb_fini(hdev, cb); + } + + return 0; +} diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c new file mode 100644 index 000000000000..3525236ed8d9 --- /dev/null +++ b/drivers/misc/habanalabs/command_submission.c @@ -0,0 +1,780 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include +#include "habanalabs.h" + +#include +#include + +static void job_wq_completion(struct work_struct *work); +static long _hl_cs_wait_ioctl(struct hl_device *hdev, + struct hl_ctx *ctx, u64 timeout_us, u64 seq); +static void cs_do_release(struct kref *ref); + +static const char *hl_fence_get_driver_name(struct dma_fence *fence) +{ + return "HabanaLabs"; +} + +static const char *hl_fence_get_timeline_name(struct dma_fence *fence) +{ + struct hl_dma_fence *hl_fence = + container_of(fence, struct hl_dma_fence, base_fence); + + return dev_name(hl_fence->hdev->dev); +} + +static bool hl_fence_enable_signaling(struct dma_fence *fence) +{ + return true; +} + +static void hl_fence_release(struct dma_fence *fence) +{ + struct hl_dma_fence *hl_fence = + container_of(fence, struct hl_dma_fence, base_fence); + + kfree_rcu(hl_fence, base_fence.rcu); +} + +static const struct dma_fence_ops hl_fence_ops = { + .get_driver_name = hl_fence_get_driver_name, + .get_timeline_name = hl_fence_get_timeline_name, + .enable_signaling = hl_fence_enable_signaling, + .wait = dma_fence_default_wait, + .release = hl_fence_release +}; + +static void cs_get(struct hl_cs *cs) +{ + kref_get(&cs->refcount); +} + +static int cs_get_unless_zero(struct hl_cs *cs) +{ + return kref_get_unless_zero(&cs->refcount); +} + +static void cs_put(struct hl_cs *cs) +{ + kref_put(&cs->refcount, cs_do_release); +} + +/* + * cs_parser - parse the user command submission + * + * @hpriv : pointer to the private data of the fd + * @job : pointer to the job that holds the command submission info + * + * The function parses the command submission of the user. It calls the + * ASIC specific parser, which returns a list of memory blocks to send + * to the device as different command buffers + * + */ +static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job) +{ + struct hl_device *hdev = hpriv->hdev; + struct hl_cs_parser parser; + int rc; + + parser.ctx_id = job->cs->ctx->asid; + parser.cs_sequence = job->cs->sequence; + parser.job_id = job->id; + + parser.hw_queue_id = job->hw_queue_id; + parser.job_userptr_list = &job->userptr_list; + parser.patched_cb = NULL; + parser.user_cb = job->user_cb; + parser.user_cb_size = job->user_cb_size; + parser.ext_queue = job->ext_queue; + job->patched_cb = NULL; + parser.use_virt_addr = hdev->mmu_enable; + + rc = hdev->asic_funcs->cs_parser(hdev, &parser); + if (job->ext_queue) { + if (!rc) { + job->patched_cb = parser.patched_cb; + job->job_cb_size = parser.patched_cb_size; + + spin_lock(&job->patched_cb->lock); + job->patched_cb->cs_cnt++; + spin_unlock(&job->patched_cb->lock); + } + + /* + * Whether the parsing worked or not, we don't need the + * original CB anymore because it was already parsed and + * won't be accessed again for this CS + */ + spin_lock(&job->user_cb->lock); + job->user_cb->cs_cnt--; + spin_unlock(&job->user_cb->lock); + hl_cb_put(job->user_cb); + job->user_cb = NULL; + } + + return rc; +} + +static void free_job(struct hl_device *hdev, struct hl_cs_job *job) +{ + struct hl_cs *cs = job->cs; + + if (job->ext_queue) { + hl_userptr_delete_list(hdev, &job->userptr_list); + + /* + * We might arrive here from rollback and patched CB wasn't + * created, so we need to check it's not NULL + */ + if (job->patched_cb) { + spin_lock(&job->patched_cb->lock); + job->patched_cb->cs_cnt--; + spin_unlock(&job->patched_cb->lock); + + hl_cb_put(job->patched_cb); + } + } + + /* + * This is the only place where there can be multiple threads + * modifying the list at the same time + */ + spin_lock(&cs->job_lock); + list_del(&job->cs_node); + spin_unlock(&cs->job_lock); + + hl_debugfs_remove_job(hdev, job); + + if (job->ext_queue) + cs_put(cs); + + kfree(job); +} + +static void cs_do_release(struct kref *ref) +{ + struct hl_cs *cs = container_of(ref, struct hl_cs, + refcount); + struct hl_device *hdev = cs->ctx->hdev; + struct hl_cs_job *job, *tmp; + + cs->completed = true; + + /* + * Although if we reached here it means that all external jobs have + * finished, because each one of them took refcnt to CS, we still + * need to go over the internal jobs and free them. Otherwise, we + * will have leaked memory and what's worse, the CS object (and + * potentially the CTX object) could be released, while the JOB + * still holds a pointer to them (but no reference). + */ + list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) + free_job(hdev, job); + + /* We also need to update CI for internal queues */ + if (cs->submitted) { + hl_int_hw_queue_update_ci(cs); + + spin_lock(&hdev->hw_queues_mirror_lock); + /* remove CS from hw_queues mirror list */ + list_del_init(&cs->mirror_node); + spin_unlock(&hdev->hw_queues_mirror_lock); + + /* + * Don't cancel TDR in case this CS was timedout because we + * might be running from the TDR context + */ + if ((!cs->timedout) && + (hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT)) { + struct hl_cs *next; + + if (cs->tdr_active) + cancel_delayed_work_sync(&cs->work_tdr); + + spin_lock(&hdev->hw_queues_mirror_lock); + + /* queue TDR for next CS */ + next = list_first_entry_or_null( + &hdev->hw_queues_mirror_list, + struct hl_cs, mirror_node); + + if ((next) && (!next->tdr_active)) { + next->tdr_active = true; + schedule_delayed_work(&next->work_tdr, + hdev->timeout_jiffies); + } + + spin_unlock(&hdev->hw_queues_mirror_lock); + } + } + + /* + * Must be called before hl_ctx_put because inside we use ctx to get + * the device + */ + hl_debugfs_remove_cs(cs); + + hl_ctx_put(cs->ctx); + + if (cs->timedout) + dma_fence_set_error(cs->fence, -ETIMEDOUT); + else if (cs->aborted) + dma_fence_set_error(cs->fence, -EIO); + + dma_fence_signal(cs->fence); + dma_fence_put(cs->fence); + + kfree(cs); +} + +static void cs_timedout(struct work_struct *work) +{ + struct hl_device *hdev; + int ctx_asid, rc; + struct hl_cs *cs = container_of(work, struct hl_cs, + work_tdr.work); + rc = cs_get_unless_zero(cs); + if (!rc) + return; + + if ((!cs->submitted) || (cs->completed)) { + cs_put(cs); + return; + } + + /* Mark the CS is timed out so we won't try to cancel its TDR */ + cs->timedout = true; + + hdev = cs->ctx->hdev; + ctx_asid = cs->ctx->asid; + + /* TODO: add information about last signaled seq and last emitted seq */ + dev_err(hdev->dev, "CS %d.%llu got stuck!\n", ctx_asid, cs->sequence); + + cs_put(cs); + + if (hdev->reset_on_lockup) + hl_device_reset(hdev, false, false); +} + +static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx, + struct hl_cs **cs_new) +{ + struct hl_dma_fence *fence; + struct dma_fence *other = NULL; + struct hl_cs *cs; + int rc; + + cs = kzalloc(sizeof(*cs), GFP_ATOMIC); + if (!cs) + return -ENOMEM; + + cs->ctx = ctx; + cs->submitted = false; + cs->completed = false; + INIT_LIST_HEAD(&cs->job_list); + INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout); + kref_init(&cs->refcount); + spin_lock_init(&cs->job_lock); + + fence = kmalloc(sizeof(*fence), GFP_ATOMIC); + if (!fence) { + rc = -ENOMEM; + goto free_cs; + } + + fence->hdev = hdev; + spin_lock_init(&fence->lock); + cs->fence = &fence->base_fence; + + spin_lock(&ctx->cs_lock); + + fence->cs_seq = ctx->cs_sequence; + other = ctx->cs_pending[fence->cs_seq & (HL_MAX_PENDING_CS - 1)]; + if ((other) && (!dma_fence_is_signaled(other))) { + spin_unlock(&ctx->cs_lock); + rc = -EAGAIN; + goto free_fence; + } + + dma_fence_init(&fence->base_fence, &hl_fence_ops, &fence->lock, + ctx->asid, ctx->cs_sequence); + + cs->sequence = fence->cs_seq; + + ctx->cs_pending[fence->cs_seq & (HL_MAX_PENDING_CS - 1)] = + &fence->base_fence; + ctx->cs_sequence++; + + dma_fence_get(&fence->base_fence); + + dma_fence_put(other); + + spin_unlock(&ctx->cs_lock); + + *cs_new = cs; + + return 0; + +free_fence: + kfree(fence); +free_cs: + kfree(cs); + return rc; +} + +static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs) +{ + struct hl_cs_job *job, *tmp; + + list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) + free_job(hdev, job); +} + +void hl_cs_rollback_all(struct hl_device *hdev) +{ + struct hl_cs *cs, *tmp; + + /* flush all completions */ + flush_workqueue(hdev->cq_wq); + + /* Make sure we don't have leftovers in the H/W queues mirror list */ + list_for_each_entry_safe(cs, tmp, &hdev->hw_queues_mirror_list, + mirror_node) { + cs_get(cs); + cs->aborted = true; + dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n", + cs->ctx->asid, cs->sequence); + cs_rollback(hdev, cs); + cs_put(cs); + } +} + +static void job_wq_completion(struct work_struct *work) +{ + struct hl_cs_job *job = container_of(work, struct hl_cs_job, + finish_work); + struct hl_cs *cs = job->cs; + struct hl_device *hdev = cs->ctx->hdev; + + /* job is no longer needed */ + free_job(hdev, job); +} + +static struct hl_cb *validate_queue_index(struct hl_device *hdev, + struct hl_cb_mgr *cb_mgr, + struct hl_cs_chunk *chunk, + bool *ext_queue) +{ + struct asic_fixed_properties *asic = &hdev->asic_prop; + struct hw_queue_properties *hw_queue_prop; + u32 cb_handle; + struct hl_cb *cb; + + /* Assume external queue */ + *ext_queue = true; + + hw_queue_prop = &asic->hw_queues_props[chunk->queue_index]; + + if ((chunk->queue_index >= HL_MAX_QUEUES) || + (hw_queue_prop->type == QUEUE_TYPE_NA)) { + dev_err(hdev->dev, "Queue index %d is invalid\n", + chunk->queue_index); + return NULL; + } + + if (hw_queue_prop->kmd_only) { + dev_err(hdev->dev, "Queue index %d is restricted for KMD\n", + chunk->queue_index); + return NULL; + } else if (hw_queue_prop->type == QUEUE_TYPE_INT) { + *ext_queue = false; + return (struct hl_cb *) (uintptr_t) chunk->cb_handle; + } + + /* Retrieve CB object */ + cb_handle = (u32) (chunk->cb_handle >> PAGE_SHIFT); + + cb = hl_cb_get(hdev, cb_mgr, cb_handle); + if (!cb) { + dev_err(hdev->dev, "CB handle 0x%x invalid\n", cb_handle); + return NULL; + } + + if ((chunk->cb_size < 8) || (chunk->cb_size > cb->size)) { + dev_err(hdev->dev, "CB size %u invalid\n", chunk->cb_size); + goto release_cb; + } + + spin_lock(&cb->lock); + cb->cs_cnt++; + spin_unlock(&cb->lock); + + return cb; + +release_cb: + hl_cb_put(cb); + return NULL; +} + +struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, bool ext_queue) +{ + struct hl_cs_job *job; + + job = kzalloc(sizeof(*job), GFP_ATOMIC); + if (!job) + return NULL; + + job->ext_queue = ext_queue; + + if (job->ext_queue) { + INIT_LIST_HEAD(&job->userptr_list); + INIT_WORK(&job->finish_work, job_wq_completion); + } + + return job; +} + +static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks, + u32 num_chunks, u64 *cs_seq) +{ + struct hl_device *hdev = hpriv->hdev; + struct hl_cs_chunk *cs_chunk_array; + struct hl_cs_job *job; + struct hl_cs *cs; + struct hl_cb *cb; + bool ext_queue_present = false; + u32 size_to_copy; + int rc, i, parse_cnt; + + *cs_seq = ULLONG_MAX; + + if (num_chunks > HL_MAX_JOBS_PER_CS) { + dev_err(hdev->dev, + "Number of chunks can NOT be larger than %d\n", + HL_MAX_JOBS_PER_CS); + rc = -EINVAL; + goto out; + } + + cs_chunk_array = kmalloc_array(num_chunks, sizeof(*cs_chunk_array), + GFP_ATOMIC); + if (!cs_chunk_array) { + rc = -ENOMEM; + goto out; + } + + size_to_copy = num_chunks * sizeof(struct hl_cs_chunk); + if (copy_from_user(cs_chunk_array, chunks, size_to_copy)) { + dev_err(hdev->dev, "Failed to copy cs chunk array from user\n"); + rc = -EFAULT; + goto free_cs_chunk_array; + } + + /* increment refcnt for context */ + hl_ctx_get(hdev, hpriv->ctx); + + rc = allocate_cs(hdev, hpriv->ctx, &cs); + if (rc) { + hl_ctx_put(hpriv->ctx); + goto free_cs_chunk_array; + } + + *cs_seq = cs->sequence; + + hl_debugfs_add_cs(cs); + + /* Validate ALL the CS chunks before submitting the CS */ + for (i = 0, parse_cnt = 0 ; i < num_chunks ; i++, parse_cnt++) { + struct hl_cs_chunk *chunk = &cs_chunk_array[i]; + bool ext_queue; + + cb = validate_queue_index(hdev, &hpriv->cb_mgr, chunk, + &ext_queue); + if (ext_queue) { + ext_queue_present = true; + if (!cb) { + rc = -EINVAL; + goto free_cs_object; + } + } + + job = hl_cs_allocate_job(hdev, ext_queue); + if (!job) { + dev_err(hdev->dev, "Failed to allocate a new job\n"); + rc = -ENOMEM; + if (ext_queue) + goto release_cb; + else + goto free_cs_object; + } + + job->id = i + 1; + job->cs = cs; + job->user_cb = cb; + job->user_cb_size = chunk->cb_size; + if (job->ext_queue) + job->job_cb_size = cb->size; + else + job->job_cb_size = chunk->cb_size; + job->hw_queue_id = chunk->queue_index; + + cs->jobs_in_queue_cnt[job->hw_queue_id]++; + + list_add_tail(&job->cs_node, &cs->job_list); + + /* + * Increment CS reference. When CS reference is 0, CS is + * done and can be signaled to user and free all its resources + * Only increment for JOB on external queues, because only + * for those JOBs we get completion + */ + if (job->ext_queue) + cs_get(cs); + + hl_debugfs_add_job(hdev, job); + + rc = cs_parser(hpriv, job); + if (rc) { + dev_err(hdev->dev, + "Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n", + cs->ctx->asid, cs->sequence, job->id, rc); + goto free_cs_object; + } + } + + if (!ext_queue_present) { + dev_err(hdev->dev, + "Reject CS %d.%llu because no external queues jobs\n", + cs->ctx->asid, cs->sequence); + rc = -EINVAL; + goto free_cs_object; + } + + rc = hl_hw_queue_schedule_cs(cs); + if (rc) { + dev_err(hdev->dev, + "Failed to submit CS %d.%llu to H/W queues, error %d\n", + cs->ctx->asid, cs->sequence, rc); + goto free_cs_object; + } + + rc = HL_CS_STATUS_SUCCESS; + goto put_cs; + +release_cb: + spin_lock(&cb->lock); + cb->cs_cnt--; + spin_unlock(&cb->lock); + hl_cb_put(cb); +free_cs_object: + cs_rollback(hdev, cs); + *cs_seq = ULLONG_MAX; + /* The path below is both for good and erroneous exits */ +put_cs: + /* We finished with the CS in this function, so put the ref */ + cs_put(cs); +free_cs_chunk_array: + kfree(cs_chunk_array); +out: + return rc; +} + +int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data) +{ + struct hl_device *hdev = hpriv->hdev; + union hl_cs_args *args = data; + struct hl_ctx *ctx = hpriv->ctx; + void __user *chunks; + u32 num_chunks; + u64 cs_seq = ULONG_MAX; + int rc, do_restore; + bool need_soft_reset = false; + + if (hl_device_disabled_or_in_reset(hdev)) { + dev_warn(hdev->dev, + "Device is %s. Can't submit new CS\n", + atomic_read(&hdev->in_reset) ? "in_reset" : "disabled"); + rc = -EBUSY; + goto out; + } + + do_restore = atomic_cmpxchg(&ctx->thread_restore_token, 1, 0); + + if (do_restore || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) { + long ret; + + chunks = (void __user *)(uintptr_t)args->in.chunks_restore; + num_chunks = args->in.num_chunks_restore; + + mutex_lock(&hpriv->restore_phase_mutex); + + if (do_restore) { + rc = hdev->asic_funcs->context_switch(hdev, ctx->asid); + if (rc) { + dev_err_ratelimited(hdev->dev, + "Failed to switch to context %d, rejecting CS! %d\n", + ctx->asid, rc); + /* + * If we timedout, or if the device is not IDLE + * while we want to do context-switch (-EBUSY), + * we need to soft-reset because QMAN is + * probably stuck. However, we can't call to + * reset here directly because of deadlock, so + * need to do it at the very end of this + * function + */ + if ((rc == -ETIMEDOUT) || (rc == -EBUSY)) + need_soft_reset = true; + mutex_unlock(&hpriv->restore_phase_mutex); + goto out; + } + } + + hdev->asic_funcs->restore_phase_topology(hdev); + + if (num_chunks == 0) { + dev_dbg(hdev->dev, + "Need to run restore phase but restore CS is empty\n"); + rc = 0; + } else { + rc = _hl_cs_ioctl(hpriv, chunks, num_chunks, + &cs_seq); + } + + mutex_unlock(&hpriv->restore_phase_mutex); + + if (rc) { + dev_err(hdev->dev, + "Failed to submit restore CS for context %d (%d)\n", + ctx->asid, rc); + goto out; + } + + /* Need to wait for restore completion before execution phase */ + if (num_chunks > 0) { + ret = _hl_cs_wait_ioctl(hdev, ctx, + jiffies_to_usecs(hdev->timeout_jiffies), + cs_seq); + if (ret <= 0) { + dev_err(hdev->dev, + "Restore CS for context %d failed to complete %ld\n", + ctx->asid, ret); + rc = -ENOEXEC; + goto out; + } + } + + ctx->thread_restore_wait_token = 1; + } else if (!ctx->thread_restore_wait_token) { + u32 tmp; + + rc = hl_poll_timeout_memory(hdev, + (u64) (uintptr_t) &ctx->thread_restore_wait_token, + jiffies_to_usecs(hdev->timeout_jiffies), + &tmp); + + if (rc || !tmp) { + dev_err(hdev->dev, + "restore phase hasn't finished in time\n"); + rc = -ETIMEDOUT; + goto out; + } + } + + chunks = (void __user *)(uintptr_t)args->in.chunks_execute; + num_chunks = args->in.num_chunks_execute; + + if (num_chunks == 0) { + dev_err(hdev->dev, + "Got execute CS with 0 chunks, context %d\n", + ctx->asid); + rc = -EINVAL; + goto out; + } + + rc = _hl_cs_ioctl(hpriv, chunks, num_chunks, &cs_seq); + +out: + if (rc != -EAGAIN) { + memset(args, 0, sizeof(*args)); + args->out.status = rc; + args->out.seq = cs_seq; + } + + if (((rc == -ETIMEDOUT) || (rc == -EBUSY)) && (need_soft_reset)) + hl_device_reset(hdev, false, false); + + return rc; +} + +static long _hl_cs_wait_ioctl(struct hl_device *hdev, + struct hl_ctx *ctx, u64 timeout_us, u64 seq) +{ + struct dma_fence *fence; + unsigned long timeout; + long rc; + + if (timeout_us == MAX_SCHEDULE_TIMEOUT) + timeout = timeout_us; + else + timeout = usecs_to_jiffies(timeout_us); + + hl_ctx_get(hdev, ctx); + + fence = hl_ctx_get_fence(ctx, seq); + if (IS_ERR(fence)) { + rc = PTR_ERR(fence); + } else if (fence) { + rc = dma_fence_wait_timeout(fence, true, timeout); + if (fence->error == -ETIMEDOUT) + rc = -ETIMEDOUT; + else if (fence->error == -EIO) + rc = -EIO; + dma_fence_put(fence); + } else + rc = 1; + + hl_ctx_put(ctx); + + return rc; +} + +int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data) +{ + struct hl_device *hdev = hpriv->hdev; + union hl_wait_cs_args *args = data; + u64 seq = args->in.seq; + long rc; + + rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq); + + memset(args, 0, sizeof(*args)); + + if (rc < 0) { + dev_err(hdev->dev, "Error %ld on waiting for CS handle %llu\n", + rc, seq); + if (rc == -ERESTARTSYS) { + args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED; + rc = -EINTR; + } else if (rc == -ETIMEDOUT) { + args->out.status = HL_WAIT_CS_STATUS_TIMEDOUT; + } else if (rc == -EIO) { + args->out.status = HL_WAIT_CS_STATUS_ABORTED; + } + return rc; + } + + if (rc == 0) + args->out.status = HL_WAIT_CS_STATUS_BUSY; + else + args->out.status = HL_WAIT_CS_STATUS_COMPLETED; + + return 0; +} diff --git a/drivers/misc/habanalabs/context.c b/drivers/misc/habanalabs/context.c new file mode 100644 index 000000000000..619ace1c4ef7 --- /dev/null +++ b/drivers/misc/habanalabs/context.c @@ -0,0 +1,215 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "habanalabs.h" + +#include + +static void hl_ctx_fini(struct hl_ctx *ctx) +{ + struct hl_device *hdev = ctx->hdev; + int i; + + /* + * If we arrived here, there are no jobs waiting for this context + * on its queues so we can safely remove it. + * This is because for each CS, we increment the ref count and for + * every CS that was finished we decrement it and we won't arrive + * to this function unless the ref count is 0 + */ + + for (i = 0 ; i < HL_MAX_PENDING_CS ; i++) + dma_fence_put(ctx->cs_pending[i]); + + if (ctx->asid != HL_KERNEL_ASID_ID) { + hl_vm_ctx_fini(ctx); + hl_asid_free(hdev, ctx->asid); + } +} + +void hl_ctx_do_release(struct kref *ref) +{ + struct hl_ctx *ctx; + + ctx = container_of(ref, struct hl_ctx, refcount); + + hl_ctx_fini(ctx); + + if (ctx->hpriv) + hl_hpriv_put(ctx->hpriv); + + kfree(ctx); +} + +int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv) +{ + struct hl_ctx_mgr *mgr = &hpriv->ctx_mgr; + struct hl_ctx *ctx; + int rc; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) { + rc = -ENOMEM; + goto out_err; + } + + rc = hl_ctx_init(hdev, ctx, false); + if (rc) + goto free_ctx; + + hl_hpriv_get(hpriv); + ctx->hpriv = hpriv; + + /* TODO: remove for multiple contexts */ + hpriv->ctx = ctx; + hdev->user_ctx = ctx; + + mutex_lock(&mgr->ctx_lock); + rc = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL); + mutex_unlock(&mgr->ctx_lock); + + if (rc < 0) { + dev_err(hdev->dev, "Failed to allocate IDR for a new CTX\n"); + hl_ctx_free(hdev, ctx); + goto out_err; + } + + return 0; + +free_ctx: + kfree(ctx); +out_err: + return rc; +} + +void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx) +{ + if (kref_put(&ctx->refcount, hl_ctx_do_release) == 1) + return; + + dev_warn(hdev->dev, + "Context %d closed or terminated but its CS are executing\n", + ctx->asid); +} + +int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx) +{ + int rc = 0; + + ctx->hdev = hdev; + + kref_init(&ctx->refcount); + + ctx->cs_sequence = 1; + spin_lock_init(&ctx->cs_lock); + atomic_set(&ctx->thread_restore_token, 1); + ctx->thread_restore_wait_token = 0; + + if (is_kernel_ctx) { + ctx->asid = HL_KERNEL_ASID_ID; /* KMD gets ASID 0 */ + } else { + ctx->asid = hl_asid_alloc(hdev); + if (!ctx->asid) { + dev_err(hdev->dev, "No free ASID, failed to create context\n"); + return -ENOMEM; + } + + rc = hl_vm_ctx_init(ctx); + if (rc) { + dev_err(hdev->dev, "Failed to init mem ctx module\n"); + rc = -ENOMEM; + goto mem_ctx_err; + } + } + + return 0; + +mem_ctx_err: + if (ctx->asid != HL_KERNEL_ASID_ID) + hl_asid_free(hdev, ctx->asid); + + return rc; +} + +void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx) +{ + kref_get(&ctx->refcount); +} + +int hl_ctx_put(struct hl_ctx *ctx) +{ + return kref_put(&ctx->refcount, hl_ctx_do_release); +} + +struct dma_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq) +{ + struct hl_device *hdev = ctx->hdev; + struct dma_fence *fence; + + spin_lock(&ctx->cs_lock); + + if (seq >= ctx->cs_sequence) { + dev_notice(hdev->dev, + "Can't wait on seq %llu because current CS is at seq %llu\n", + seq, ctx->cs_sequence); + spin_unlock(&ctx->cs_lock); + return ERR_PTR(-EINVAL); + } + + + if (seq + HL_MAX_PENDING_CS < ctx->cs_sequence) { + dev_dbg(hdev->dev, + "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n", + seq, ctx->cs_sequence); + spin_unlock(&ctx->cs_lock); + return NULL; + } + + fence = dma_fence_get( + ctx->cs_pending[seq & (HL_MAX_PENDING_CS - 1)]); + spin_unlock(&ctx->cs_lock); + + return fence; +} + +/* + * hl_ctx_mgr_init - initialize the context manager + * + * @mgr: pointer to context manager structure + * + * This manager is an object inside the hpriv object of the user process. + * The function is called when a user process opens the FD. + */ +void hl_ctx_mgr_init(struct hl_ctx_mgr *mgr) +{ + mutex_init(&mgr->ctx_lock); + idr_init(&mgr->ctx_handles); +} + +/* + * hl_ctx_mgr_fini - finalize the context manager + * + * @hdev: pointer to device structure + * @mgr: pointer to context manager structure + * + * This function goes over all the contexts in the manager and frees them. + * It is called when a process closes the FD. + */ +void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr) +{ + struct hl_ctx *ctx; + struct idr *idp; + u32 id; + + idp = &mgr->ctx_handles; + + idr_for_each_entry(idp, ctx, id) + hl_ctx_free(hdev, ctx); + + idr_destroy(&mgr->ctx_handles); + mutex_destroy(&mgr->ctx_lock); +} diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c new file mode 100644 index 000000000000..a53c12aff6ad --- /dev/null +++ b/drivers/misc/habanalabs/debugfs.c @@ -0,0 +1,1077 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "habanalabs.h" +#include "include/hw_ip/mmu/mmu_general.h" + +#include +#include +#include + +#define MMU_ADDR_BUF_SIZE 40 +#define MMU_ASID_BUF_SIZE 10 +#define MMU_KBUF_SIZE (MMU_ADDR_BUF_SIZE + MMU_ASID_BUF_SIZE) + +static struct dentry *hl_debug_root; + +static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr, + u8 i2c_reg, u32 *val) +{ + struct armcp_packet pkt; + int rc; + + if (hl_device_disabled_or_in_reset(hdev)) + return -EBUSY; + + memset(&pkt, 0, sizeof(pkt)); + + pkt.ctl = __cpu_to_le32(ARMCP_PACKET_I2C_RD << + ARMCP_PKT_CTL_OPCODE_SHIFT); + pkt.i2c_bus = i2c_bus; + pkt.i2c_addr = i2c_addr; + pkt.i2c_reg = i2c_reg; + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + HL_DEVICE_TIMEOUT_USEC, (long *) val); + + if (rc) + dev_err(hdev->dev, "Failed to read from I2C, error %d\n", rc); + + return rc; +} + +static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr, + u8 i2c_reg, u32 val) +{ + struct armcp_packet pkt; + int rc; + + if (hl_device_disabled_or_in_reset(hdev)) + return -EBUSY; + + memset(&pkt, 0, sizeof(pkt)); + + pkt.ctl = __cpu_to_le32(ARMCP_PACKET_I2C_WR << + ARMCP_PKT_CTL_OPCODE_SHIFT); + pkt.i2c_bus = i2c_bus; + pkt.i2c_addr = i2c_addr; + pkt.i2c_reg = i2c_reg; + pkt.value = __cpu_to_le64(val); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + HL_DEVICE_TIMEOUT_USEC, NULL); + + if (rc) + dev_err(hdev->dev, "Failed to write to I2C, error %d\n", rc); + + return rc; +} + +static void hl_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state) +{ + struct armcp_packet pkt; + int rc; + + if (hl_device_disabled_or_in_reset(hdev)) + return; + + memset(&pkt, 0, sizeof(pkt)); + + pkt.ctl = __cpu_to_le32(ARMCP_PACKET_LED_SET << + ARMCP_PKT_CTL_OPCODE_SHIFT); + pkt.led_index = __cpu_to_le32(led); + pkt.value = __cpu_to_le64(state); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + HL_DEVICE_TIMEOUT_USEC, NULL); + + if (rc) + dev_err(hdev->dev, "Failed to set LED %d, error %d\n", led, rc); +} + +static int command_buffers_show(struct seq_file *s, void *data) +{ + struct hl_debugfs_entry *entry = s->private; + struct hl_dbg_device_entry *dev_entry = entry->dev_entry; + struct hl_cb *cb; + bool first = true; + + spin_lock(&dev_entry->cb_spinlock); + + list_for_each_entry(cb, &dev_entry->cb_list, debugfs_list) { + if (first) { + first = false; + seq_puts(s, "\n"); + seq_puts(s, " CB ID CTX ID CB size CB RefCnt mmap? CS counter\n"); + seq_puts(s, "---------------------------------------------------------------\n"); + } + seq_printf(s, + " %03d %d 0x%08x %d %d %d\n", + cb->id, cb->ctx_id, cb->size, + kref_read(&cb->refcount), + cb->mmap, cb->cs_cnt); + } + + spin_unlock(&dev_entry->cb_spinlock); + + if (!first) + seq_puts(s, "\n"); + + return 0; +} + +static int command_submission_show(struct seq_file *s, void *data) +{ + struct hl_debugfs_entry *entry = s->private; + struct hl_dbg_device_entry *dev_entry = entry->dev_entry; + struct hl_cs *cs; + bool first = true; + + spin_lock(&dev_entry->cs_spinlock); + + list_for_each_entry(cs, &dev_entry->cs_list, debugfs_list) { + if (first) { + first = false; + seq_puts(s, "\n"); + seq_puts(s, " CS ID CTX ASID CS RefCnt Submitted Completed\n"); + seq_puts(s, "------------------------------------------------------\n"); + } + seq_printf(s, + " %llu %d %d %d %d\n", + cs->sequence, cs->ctx->asid, + kref_read(&cs->refcount), + cs->submitted, cs->completed); + } + + spin_unlock(&dev_entry->cs_spinlock); + + if (!first) + seq_puts(s, "\n"); + + return 0; +} + +static int command_submission_jobs_show(struct seq_file *s, void *data) +{ + struct hl_debugfs_entry *entry = s->private; + struct hl_dbg_device_entry *dev_entry = entry->dev_entry; + struct hl_cs_job *job; + bool first = true; + + spin_lock(&dev_entry->cs_job_spinlock); + + list_for_each_entry(job, &dev_entry->cs_job_list, debugfs_list) { + if (first) { + first = false; + seq_puts(s, "\n"); + seq_puts(s, " JOB ID CS ID CTX ASID H/W Queue\n"); + seq_puts(s, "---------------------------------------\n"); + } + if (job->cs) + seq_printf(s, + " %02d %llu %d %d\n", + job->id, job->cs->sequence, job->cs->ctx->asid, + job->hw_queue_id); + else + seq_printf(s, + " %02d 0 %d %d\n", + job->id, HL_KERNEL_ASID_ID, job->hw_queue_id); + } + + spin_unlock(&dev_entry->cs_job_spinlock); + + if (!first) + seq_puts(s, "\n"); + + return 0; +} + +static int userptr_show(struct seq_file *s, void *data) +{ + struct hl_debugfs_entry *entry = s->private; + struct hl_dbg_device_entry *dev_entry = entry->dev_entry; + struct hl_userptr *userptr; + char dma_dir[4][30] = {"DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", + "DMA_FROM_DEVICE", "DMA_NONE"}; + bool first = true; + + spin_lock(&dev_entry->userptr_spinlock); + + list_for_each_entry(userptr, &dev_entry->userptr_list, debugfs_list) { + if (first) { + first = false; + seq_puts(s, "\n"); + seq_puts(s, " user virtual address size dma dir\n"); + seq_puts(s, "----------------------------------------------------------\n"); + } + seq_printf(s, + " 0x%-14llx %-10u %-30s\n", + userptr->addr, userptr->size, dma_dir[userptr->dir]); + } + + spin_unlock(&dev_entry->userptr_spinlock); + + if (!first) + seq_puts(s, "\n"); + + return 0; +} + +static int vm_show(struct seq_file *s, void *data) +{ + struct hl_debugfs_entry *entry = s->private; + struct hl_dbg_device_entry *dev_entry = entry->dev_entry; + struct hl_ctx *ctx; + struct hl_vm *vm; + struct hl_vm_hash_node *hnode; + struct hl_userptr *userptr; + struct hl_vm_phys_pg_pack *phys_pg_pack = NULL; + enum vm_type_t *vm_type; + bool once = true; + int i; + + if (!dev_entry->hdev->mmu_enable) + return 0; + + spin_lock(&dev_entry->ctx_mem_hash_spinlock); + + list_for_each_entry(ctx, &dev_entry->ctx_mem_hash_list, debugfs_list) { + once = false; + seq_puts(s, "\n\n----------------------------------------------------"); + seq_puts(s, "\n----------------------------------------------------\n\n"); + seq_printf(s, "ctx asid: %u\n", ctx->asid); + + seq_puts(s, "\nmappings:\n\n"); + seq_puts(s, " virtual address size handle\n"); + seq_puts(s, "----------------------------------------------------\n"); + mutex_lock(&ctx->mem_hash_lock); + hash_for_each(ctx->mem_hash, i, hnode, node) { + vm_type = hnode->ptr; + + if (*vm_type == VM_TYPE_USERPTR) { + userptr = hnode->ptr; + seq_printf(s, + " 0x%-14llx %-10u\n", + hnode->vaddr, userptr->size); + } else { + phys_pg_pack = hnode->ptr; + seq_printf(s, + " 0x%-14llx %-10u %-4u\n", + hnode->vaddr, phys_pg_pack->total_size, + phys_pg_pack->handle); + } + } + mutex_unlock(&ctx->mem_hash_lock); + + vm = &ctx->hdev->vm; + spin_lock(&vm->idr_lock); + + if (!idr_is_empty(&vm->phys_pg_pack_handles)) + seq_puts(s, "\n\nallocations:\n"); + + idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_pack, i) { + if (phys_pg_pack->asid != ctx->asid) + continue; + + seq_printf(s, "\nhandle: %u\n", phys_pg_pack->handle); + seq_printf(s, "page size: %u\n\n", + phys_pg_pack->page_size); + seq_puts(s, " physical address\n"); + seq_puts(s, "---------------------\n"); + for (i = 0 ; i < phys_pg_pack->npages ; i++) { + seq_printf(s, " 0x%-14llx\n", + phys_pg_pack->pages[i]); + } + } + spin_unlock(&vm->idr_lock); + + } + + spin_unlock(&dev_entry->ctx_mem_hash_spinlock); + + if (!once) + seq_puts(s, "\n"); + + return 0; +} + +/* these inline functions are copied from mmu.c */ +static inline u64 get_hop0_addr(struct hl_ctx *ctx) +{ + return ctx->hdev->asic_prop.mmu_pgt_addr + + (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size); +} + +static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx, u64 hop_addr, + u64 virt_addr) +{ + return hop_addr + ctx->hdev->asic_prop.mmu_pte_size * + ((virt_addr & HOP0_MASK) >> HOP0_SHIFT); +} + +static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx, u64 hop_addr, + u64 virt_addr) +{ + return hop_addr + ctx->hdev->asic_prop.mmu_pte_size * + ((virt_addr & HOP1_MASK) >> HOP1_SHIFT); +} + +static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx, u64 hop_addr, + u64 virt_addr) +{ + return hop_addr + ctx->hdev->asic_prop.mmu_pte_size * + ((virt_addr & HOP2_MASK) >> HOP2_SHIFT); +} + +static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx, u64 hop_addr, + u64 virt_addr) +{ + return hop_addr + ctx->hdev->asic_prop.mmu_pte_size * + ((virt_addr & HOP3_MASK) >> HOP3_SHIFT); +} + +static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx, u64 hop_addr, + u64 virt_addr) +{ + return hop_addr + ctx->hdev->asic_prop.mmu_pte_size * + ((virt_addr & HOP4_MASK) >> HOP4_SHIFT); +} + +static inline u64 get_next_hop_addr(u64 curr_pte) +{ + if (curr_pte & PAGE_PRESENT_MASK) + return curr_pte & PHYS_ADDR_MASK; + else + return ULLONG_MAX; +} + +static int mmu_show(struct seq_file *s, void *data) +{ + struct hl_debugfs_entry *entry = s->private; + struct hl_dbg_device_entry *dev_entry = entry->dev_entry; + struct hl_device *hdev = dev_entry->hdev; + struct hl_ctx *ctx = hdev->user_ctx; + + u64 hop0_addr = 0, hop0_pte_addr = 0, hop0_pte = 0, + hop1_addr = 0, hop1_pte_addr = 0, hop1_pte = 0, + hop2_addr = 0, hop2_pte_addr = 0, hop2_pte = 0, + hop3_addr = 0, hop3_pte_addr = 0, hop3_pte = 0, + hop4_addr = 0, hop4_pte_addr = 0, hop4_pte = 0, + virt_addr = dev_entry->mmu_addr; + + if (!hdev->mmu_enable) + return 0; + + if (!ctx) { + dev_err(hdev->dev, "no ctx available\n"); + return 0; + } + + mutex_lock(&ctx->mmu_lock); + + /* the following lookup is copied from unmap() in mmu.c */ + + hop0_addr = get_hop0_addr(ctx); + hop0_pte_addr = get_hop0_pte_addr(ctx, hop0_addr, virt_addr); + hop0_pte = hdev->asic_funcs->read_pte(hdev, hop0_pte_addr); + hop1_addr = get_next_hop_addr(hop0_pte); + + if (hop1_addr == ULLONG_MAX) + goto not_mapped; + + hop1_pte_addr = get_hop1_pte_addr(ctx, hop1_addr, virt_addr); + hop1_pte = hdev->asic_funcs->read_pte(hdev, hop1_pte_addr); + hop2_addr = get_next_hop_addr(hop1_pte); + + if (hop2_addr == ULLONG_MAX) + goto not_mapped; + + hop2_pte_addr = get_hop2_pte_addr(ctx, hop2_addr, virt_addr); + hop2_pte = hdev->asic_funcs->read_pte(hdev, hop2_pte_addr); + hop3_addr = get_next_hop_addr(hop2_pte); + + if (hop3_addr == ULLONG_MAX) + goto not_mapped; + + hop3_pte_addr = get_hop3_pte_addr(ctx, hop3_addr, virt_addr); + hop3_pte = hdev->asic_funcs->read_pte(hdev, hop3_pte_addr); + + if (!(hop3_pte & LAST_MASK)) { + hop4_addr = get_next_hop_addr(hop3_pte); + + if (hop4_addr == ULLONG_MAX) + goto not_mapped; + + hop4_pte_addr = get_hop4_pte_addr(ctx, hop4_addr, virt_addr); + hop4_pte = hdev->asic_funcs->read_pte(hdev, hop4_pte_addr); + if (!(hop4_pte & PAGE_PRESENT_MASK)) + goto not_mapped; + } else { + if (!(hop3_pte & PAGE_PRESENT_MASK)) + goto not_mapped; + } + + seq_printf(s, "asid: %u, virt_addr: 0x%llx\n", + dev_entry->mmu_asid, dev_entry->mmu_addr); + + seq_printf(s, "hop0_addr: 0x%llx\n", hop0_addr); + seq_printf(s, "hop0_pte_addr: 0x%llx\n", hop0_pte_addr); + seq_printf(s, "hop0_pte: 0x%llx\n", hop0_pte); + + seq_printf(s, "hop1_addr: 0x%llx\n", hop1_addr); + seq_printf(s, "hop1_pte_addr: 0x%llx\n", hop1_pte_addr); + seq_printf(s, "hop1_pte: 0x%llx\n", hop1_pte); + + seq_printf(s, "hop2_addr: 0x%llx\n", hop2_addr); + seq_printf(s, "hop2_pte_addr: 0x%llx\n", hop2_pte_addr); + seq_printf(s, "hop2_pte: 0x%llx\n", hop2_pte); + + seq_printf(s, "hop3_addr: 0x%llx\n", hop3_addr); + seq_printf(s, "hop3_pte_addr: 0x%llx\n", hop3_pte_addr); + seq_printf(s, "hop3_pte: 0x%llx\n", hop3_pte); + + if (!(hop3_pte & LAST_MASK)) { + seq_printf(s, "hop4_addr: 0x%llx\n", hop4_addr); + seq_printf(s, "hop4_pte_addr: 0x%llx\n", hop4_pte_addr); + seq_printf(s, "hop4_pte: 0x%llx\n", hop4_pte); + } + + goto out; + +not_mapped: + dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n", + virt_addr); +out: + mutex_unlock(&ctx->mmu_lock); + + return 0; +} + +static ssize_t mmu_write(struct file *file, const char __user *buf, + size_t count, loff_t *f_pos) +{ + struct seq_file *s = file->private_data; + struct hl_debugfs_entry *entry = s->private; + struct hl_dbg_device_entry *dev_entry = entry->dev_entry; + struct hl_device *hdev = dev_entry->hdev; + char kbuf[MMU_KBUF_SIZE], asid_kbuf[MMU_ASID_BUF_SIZE], + addr_kbuf[MMU_ADDR_BUF_SIZE]; + char *c; + ssize_t rc; + + if (!hdev->mmu_enable) + return count; + + memset(kbuf, 0, sizeof(kbuf)); + memset(asid_kbuf, 0, sizeof(asid_kbuf)); + memset(addr_kbuf, 0, sizeof(addr_kbuf)); + + if (copy_from_user(kbuf, buf, count)) + goto err; + + kbuf[MMU_KBUF_SIZE - 1] = 0; + + c = strchr(kbuf, ' '); + if (!c) + goto err; + + memcpy(asid_kbuf, kbuf, c - kbuf); + + rc = kstrtouint(asid_kbuf, 10, &dev_entry->mmu_asid); + if (rc) + goto err; + + c = strstr(kbuf, " 0x"); + if (!c) + goto err; + + c += 3; + memcpy(addr_kbuf, c, (kbuf + count) - c); + + rc = kstrtoull(addr_kbuf, 16, &dev_entry->mmu_addr); + if (rc) + goto err; + + return count; + +err: + dev_err(hdev->dev, "usage: echo <0xaddr> > mmu\n"); + + return -EINVAL; +} + +static ssize_t hl_data_read32(struct file *f, char __user *buf, + size_t count, loff_t *ppos) +{ + struct hl_dbg_device_entry *entry = file_inode(f)->i_private; + struct hl_device *hdev = entry->hdev; + char tmp_buf[32]; + u32 val; + ssize_t rc; + + if (*ppos) + return 0; + + rc = hdev->asic_funcs->debugfs_read32(hdev, entry->addr, &val); + if (rc) { + dev_err(hdev->dev, "Failed to read from 0x%010llx\n", + entry->addr); + return rc; + } + + sprintf(tmp_buf, "0x%08x\n", val); + rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf, + strlen(tmp_buf) + 1); + + return rc; +} + +static ssize_t hl_data_write32(struct file *f, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct hl_dbg_device_entry *entry = file_inode(f)->i_private; + struct hl_device *hdev = entry->hdev; + u32 value; + ssize_t rc; + + rc = kstrtouint_from_user(buf, count, 16, &value); + if (rc) + return rc; + + rc = hdev->asic_funcs->debugfs_write32(hdev, entry->addr, value); + if (rc) { + dev_err(hdev->dev, "Failed to write 0x%08x to 0x%010llx\n", + value, entry->addr); + return rc; + } + + return count; +} + +static ssize_t hl_get_power_state(struct file *f, char __user *buf, + size_t count, loff_t *ppos) +{ + struct hl_dbg_device_entry *entry = file_inode(f)->i_private; + struct hl_device *hdev = entry->hdev; + char tmp_buf[200]; + ssize_t rc; + int i; + + if (*ppos) + return 0; + + if (hdev->pdev->current_state == PCI_D0) + i = 1; + else if (hdev->pdev->current_state == PCI_D3hot) + i = 2; + else + i = 3; + + sprintf(tmp_buf, + "current power state: %d\n1 - D0\n2 - D3hot\n3 - Unknown\n", i); + rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf, + strlen(tmp_buf) + 1); + + return rc; +} + +static ssize_t hl_set_power_state(struct file *f, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct hl_dbg_device_entry *entry = file_inode(f)->i_private; + struct hl_device *hdev = entry->hdev; + u32 value; + ssize_t rc; + + rc = kstrtouint_from_user(buf, count, 10, &value); + if (rc) + return rc; + + if (value == 1) { + pci_set_power_state(hdev->pdev, PCI_D0); + pci_restore_state(hdev->pdev); + rc = pci_enable_device(hdev->pdev); + } else if (value == 2) { + pci_save_state(hdev->pdev); + pci_disable_device(hdev->pdev); + pci_set_power_state(hdev->pdev, PCI_D3hot); + } else { + dev_dbg(hdev->dev, "invalid power state value %u\n", value); + return -EINVAL; + } + + return count; +} + +static ssize_t hl_i2c_data_read(struct file *f, char __user *buf, + size_t count, loff_t *ppos) +{ + struct hl_dbg_device_entry *entry = file_inode(f)->i_private; + struct hl_device *hdev = entry->hdev; + char tmp_buf[32]; + u32 val; + ssize_t rc; + + if (*ppos) + return 0; + + rc = hl_debugfs_i2c_read(hdev, entry->i2c_bus, entry->i2c_addr, + entry->i2c_reg, &val); + if (rc) { + dev_err(hdev->dev, + "Failed to read from I2C bus %d, addr %d, reg %d\n", + entry->i2c_bus, entry->i2c_addr, entry->i2c_reg); + return rc; + } + + sprintf(tmp_buf, "0x%02x\n", val); + rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf, + strlen(tmp_buf) + 1); + + return rc; +} + +static ssize_t hl_i2c_data_write(struct file *f, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct hl_dbg_device_entry *entry = file_inode(f)->i_private; + struct hl_device *hdev = entry->hdev; + u32 value; + ssize_t rc; + + rc = kstrtouint_from_user(buf, count, 16, &value); + if (rc) + return rc; + + rc = hl_debugfs_i2c_write(hdev, entry->i2c_bus, entry->i2c_addr, + entry->i2c_reg, value); + if (rc) { + dev_err(hdev->dev, + "Failed to write 0x%02x to I2C bus %d, addr %d, reg %d\n", + value, entry->i2c_bus, entry->i2c_addr, entry->i2c_reg); + return rc; + } + + return count; +} + +static ssize_t hl_led0_write(struct file *f, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct hl_dbg_device_entry *entry = file_inode(f)->i_private; + struct hl_device *hdev = entry->hdev; + u32 value; + ssize_t rc; + + rc = kstrtouint_from_user(buf, count, 10, &value); + if (rc) + return rc; + + value = value ? 1 : 0; + + hl_debugfs_led_set(hdev, 0, value); + + return count; +} + +static ssize_t hl_led1_write(struct file *f, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct hl_dbg_device_entry *entry = file_inode(f)->i_private; + struct hl_device *hdev = entry->hdev; + u32 value; + ssize_t rc; + + rc = kstrtouint_from_user(buf, count, 10, &value); + if (rc) + return rc; + + value = value ? 1 : 0; + + hl_debugfs_led_set(hdev, 1, value); + + return count; +} + +static ssize_t hl_led2_write(struct file *f, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct hl_dbg_device_entry *entry = file_inode(f)->i_private; + struct hl_device *hdev = entry->hdev; + u32 value; + ssize_t rc; + + rc = kstrtouint_from_user(buf, count, 10, &value); + if (rc) + return rc; + + value = value ? 1 : 0; + + hl_debugfs_led_set(hdev, 2, value); + + return count; +} + +static ssize_t hl_device_read(struct file *f, char __user *buf, + size_t count, loff_t *ppos) +{ + char tmp_buf[200]; + ssize_t rc; + + if (*ppos) + return 0; + + sprintf(tmp_buf, + "Valid values: disable, enable, suspend, resume, cpu_timeout\n"); + rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf, + strlen(tmp_buf) + 1); + + return rc; +} + +static ssize_t hl_device_write(struct file *f, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct hl_dbg_device_entry *entry = file_inode(f)->i_private; + struct hl_device *hdev = entry->hdev; + char data[30]; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + + simple_write_to_buffer(data, 29, ppos, buf, count); + + if (strncmp("disable", data, strlen("disable")) == 0) { + hdev->disabled = true; + } else if (strncmp("enable", data, strlen("enable")) == 0) { + hdev->disabled = false; + } else if (strncmp("suspend", data, strlen("suspend")) == 0) { + hdev->asic_funcs->suspend(hdev); + } else if (strncmp("resume", data, strlen("resume")) == 0) { + hdev->asic_funcs->resume(hdev); + } else if (strncmp("cpu_timeout", data, strlen("cpu_timeout")) == 0) { + hdev->device_cpu_disabled = true; + } else { + dev_err(hdev->dev, + "Valid values: disable, enable, suspend, resume, cpu_timeout\n"); + count = -EINVAL; + } + + return count; +} + +static const struct file_operations hl_data32b_fops = { + .owner = THIS_MODULE, + .read = hl_data_read32, + .write = hl_data_write32 +}; + +static const struct file_operations hl_i2c_data_fops = { + .owner = THIS_MODULE, + .read = hl_i2c_data_read, + .write = hl_i2c_data_write +}; + +static const struct file_operations hl_power_fops = { + .owner = THIS_MODULE, + .read = hl_get_power_state, + .write = hl_set_power_state +}; + +static const struct file_operations hl_led0_fops = { + .owner = THIS_MODULE, + .write = hl_led0_write +}; + +static const struct file_operations hl_led1_fops = { + .owner = THIS_MODULE, + .write = hl_led1_write +}; + +static const struct file_operations hl_led2_fops = { + .owner = THIS_MODULE, + .write = hl_led2_write +}; + +static const struct file_operations hl_device_fops = { + .owner = THIS_MODULE, + .read = hl_device_read, + .write = hl_device_write +}; + +static const struct hl_info_list hl_debugfs_list[] = { + {"command_buffers", command_buffers_show, NULL}, + {"command_submission", command_submission_show, NULL}, + {"command_submission_jobs", command_submission_jobs_show, NULL}, + {"userptr", userptr_show, NULL}, + {"vm", vm_show, NULL}, + {"mmu", mmu_show, mmu_write}, +}; + +static int hl_debugfs_open(struct inode *inode, struct file *file) +{ + struct hl_debugfs_entry *node = inode->i_private; + + return single_open(file, node->info_ent->show, node); +} + +static ssize_t hl_debugfs_write(struct file *file, const char __user *buf, + size_t count, loff_t *f_pos) +{ + struct hl_debugfs_entry *node = file->f_inode->i_private; + + if (node->info_ent->write) + return node->info_ent->write(file, buf, count, f_pos); + else + return -EINVAL; + +} + +static const struct file_operations hl_debugfs_fops = { + .owner = THIS_MODULE, + .open = hl_debugfs_open, + .read = seq_read, + .write = hl_debugfs_write, + .llseek = seq_lseek, + .release = single_release, +}; + +void hl_debugfs_add_device(struct hl_device *hdev) +{ + struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs; + int count = ARRAY_SIZE(hl_debugfs_list); + struct hl_debugfs_entry *entry; + struct dentry *ent; + int i; + + dev_entry->hdev = hdev; + dev_entry->entry_arr = kmalloc_array(count, + sizeof(struct hl_debugfs_entry), + GFP_KERNEL); + if (!dev_entry->entry_arr) + return; + + INIT_LIST_HEAD(&dev_entry->file_list); + INIT_LIST_HEAD(&dev_entry->cb_list); + INIT_LIST_HEAD(&dev_entry->cs_list); + INIT_LIST_HEAD(&dev_entry->cs_job_list); + INIT_LIST_HEAD(&dev_entry->userptr_list); + INIT_LIST_HEAD(&dev_entry->ctx_mem_hash_list); + mutex_init(&dev_entry->file_mutex); + spin_lock_init(&dev_entry->cb_spinlock); + spin_lock_init(&dev_entry->cs_spinlock); + spin_lock_init(&dev_entry->cs_job_spinlock); + spin_lock_init(&dev_entry->userptr_spinlock); + spin_lock_init(&dev_entry->ctx_mem_hash_spinlock); + + dev_entry->root = debugfs_create_dir(dev_name(hdev->dev), + hl_debug_root); + + debugfs_create_x64("addr", + 0644, + dev_entry->root, + &dev_entry->addr); + + debugfs_create_file("data32", + 0644, + dev_entry->root, + dev_entry, + &hl_data32b_fops); + + debugfs_create_file("set_power_state", + 0200, + dev_entry->root, + dev_entry, + &hl_power_fops); + + debugfs_create_u8("i2c_bus", + 0644, + dev_entry->root, + &dev_entry->i2c_bus); + + debugfs_create_u8("i2c_addr", + 0644, + dev_entry->root, + &dev_entry->i2c_addr); + + debugfs_create_u8("i2c_reg", + 0644, + dev_entry->root, + &dev_entry->i2c_reg); + + debugfs_create_file("i2c_data", + 0644, + dev_entry->root, + dev_entry, + &hl_i2c_data_fops); + + debugfs_create_file("led0", + 0200, + dev_entry->root, + dev_entry, + &hl_led0_fops); + + debugfs_create_file("led1", + 0200, + dev_entry->root, + dev_entry, + &hl_led1_fops); + + debugfs_create_file("led2", + 0200, + dev_entry->root, + dev_entry, + &hl_led2_fops); + + debugfs_create_file("device", + 0200, + dev_entry->root, + dev_entry, + &hl_device_fops); + + for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) { + + ent = debugfs_create_file(hl_debugfs_list[i].name, + 0444, + dev_entry->root, + entry, + &hl_debugfs_fops); + entry->dent = ent; + entry->info_ent = &hl_debugfs_list[i]; + entry->dev_entry = dev_entry; + } +} + +void hl_debugfs_remove_device(struct hl_device *hdev) +{ + struct hl_dbg_device_entry *entry = &hdev->hl_debugfs; + + debugfs_remove_recursive(entry->root); + + mutex_destroy(&entry->file_mutex); + kfree(entry->entry_arr); +} + +void hl_debugfs_add_file(struct hl_fpriv *hpriv) +{ + struct hl_dbg_device_entry *dev_entry = &hpriv->hdev->hl_debugfs; + + mutex_lock(&dev_entry->file_mutex); + list_add(&hpriv->debugfs_list, &dev_entry->file_list); + mutex_unlock(&dev_entry->file_mutex); +} + +void hl_debugfs_remove_file(struct hl_fpriv *hpriv) +{ + struct hl_dbg_device_entry *dev_entry = &hpriv->hdev->hl_debugfs; + + mutex_lock(&dev_entry->file_mutex); + list_del(&hpriv->debugfs_list); + mutex_unlock(&dev_entry->file_mutex); +} + +void hl_debugfs_add_cb(struct hl_cb *cb) +{ + struct hl_dbg_device_entry *dev_entry = &cb->hdev->hl_debugfs; + + spin_lock(&dev_entry->cb_spinlock); + list_add(&cb->debugfs_list, &dev_entry->cb_list); + spin_unlock(&dev_entry->cb_spinlock); +} + +void hl_debugfs_remove_cb(struct hl_cb *cb) +{ + struct hl_dbg_device_entry *dev_entry = &cb->hdev->hl_debugfs; + + spin_lock(&dev_entry->cb_spinlock); + list_del(&cb->debugfs_list); + spin_unlock(&dev_entry->cb_spinlock); +} + +void hl_debugfs_add_cs(struct hl_cs *cs) +{ + struct hl_dbg_device_entry *dev_entry = &cs->ctx->hdev->hl_debugfs; + + spin_lock(&dev_entry->cs_spinlock); + list_add(&cs->debugfs_list, &dev_entry->cs_list); + spin_unlock(&dev_entry->cs_spinlock); +} + +void hl_debugfs_remove_cs(struct hl_cs *cs) +{ + struct hl_dbg_device_entry *dev_entry = &cs->ctx->hdev->hl_debugfs; + + spin_lock(&dev_entry->cs_spinlock); + list_del(&cs->debugfs_list); + spin_unlock(&dev_entry->cs_spinlock); +} + +void hl_debugfs_add_job(struct hl_device *hdev, struct hl_cs_job *job) +{ + struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs; + + spin_lock(&dev_entry->cs_job_spinlock); + list_add(&job->debugfs_list, &dev_entry->cs_job_list); + spin_unlock(&dev_entry->cs_job_spinlock); +} + +void hl_debugfs_remove_job(struct hl_device *hdev, struct hl_cs_job *job) +{ + struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs; + + spin_lock(&dev_entry->cs_job_spinlock); + list_del(&job->debugfs_list); + spin_unlock(&dev_entry->cs_job_spinlock); +} + +void hl_debugfs_add_userptr(struct hl_device *hdev, struct hl_userptr *userptr) +{ + struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs; + + spin_lock(&dev_entry->userptr_spinlock); + list_add(&userptr->debugfs_list, &dev_entry->userptr_list); + spin_unlock(&dev_entry->userptr_spinlock); +} + +void hl_debugfs_remove_userptr(struct hl_device *hdev, + struct hl_userptr *userptr) +{ + struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs; + + spin_lock(&dev_entry->userptr_spinlock); + list_del(&userptr->debugfs_list); + spin_unlock(&dev_entry->userptr_spinlock); +} + +void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx) +{ + struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs; + + spin_lock(&dev_entry->ctx_mem_hash_spinlock); + list_add(&ctx->debugfs_list, &dev_entry->ctx_mem_hash_list); + spin_unlock(&dev_entry->ctx_mem_hash_spinlock); +} + +void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx) +{ + struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs; + + spin_lock(&dev_entry->ctx_mem_hash_spinlock); + list_del(&ctx->debugfs_list); + spin_unlock(&dev_entry->ctx_mem_hash_spinlock); +} + +void __init hl_debugfs_init(void) +{ + hl_debug_root = debugfs_create_dir("habanalabs", NULL); +} + +void hl_debugfs_fini(void) +{ + debugfs_remove_recursive(hl_debug_root); +} diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c new file mode 100644 index 000000000000..de46aa6ed154 --- /dev/null +++ b/drivers/misc/habanalabs/device.c @@ -0,0 +1,1140 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "habanalabs.h" + +#include +#include +#include + +bool hl_device_disabled_or_in_reset(struct hl_device *hdev) +{ + if ((hdev->disabled) || (atomic_read(&hdev->in_reset))) + return true; + else + return false; +} + +static void hpriv_release(struct kref *ref) +{ + struct hl_fpriv *hpriv; + struct hl_device *hdev; + + hpriv = container_of(ref, struct hl_fpriv, refcount); + + hdev = hpriv->hdev; + + put_pid(hpriv->taskpid); + + hl_debugfs_remove_file(hpriv); + + mutex_destroy(&hpriv->restore_phase_mutex); + + kfree(hpriv); + + /* Now the FD is really closed */ + atomic_dec(&hdev->fd_open_cnt); + + /* This allows a new user context to open the device */ + hdev->user_ctx = NULL; +} + +void hl_hpriv_get(struct hl_fpriv *hpriv) +{ + kref_get(&hpriv->refcount); +} + +void hl_hpriv_put(struct hl_fpriv *hpriv) +{ + kref_put(&hpriv->refcount, hpriv_release); +} + +/* + * hl_device_release - release function for habanalabs device + * + * @inode: pointer to inode structure + * @filp: pointer to file structure + * + * Called when process closes an habanalabs device + */ +static int hl_device_release(struct inode *inode, struct file *filp) +{ + struct hl_fpriv *hpriv = filp->private_data; + + hl_cb_mgr_fini(hpriv->hdev, &hpriv->cb_mgr); + hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr); + + filp->private_data = NULL; + + hl_hpriv_put(hpriv); + + return 0; +} + +/* + * hl_mmap - mmap function for habanalabs device + * + * @*filp: pointer to file structure + * @*vma: pointer to vm_area_struct of the process + * + * Called when process does an mmap on habanalabs device. Call the device's mmap + * function at the end of the common code. + */ +static int hl_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct hl_fpriv *hpriv = filp->private_data; + + if ((vma->vm_pgoff & HL_MMAP_CB_MASK) == HL_MMAP_CB_MASK) { + vma->vm_pgoff ^= HL_MMAP_CB_MASK; + return hl_cb_mmap(hpriv, vma); + } + + return -EINVAL; +} + +static const struct file_operations hl_ops = { + .owner = THIS_MODULE, + .open = hl_device_open, + .release = hl_device_release, + .mmap = hl_mmap, + .unlocked_ioctl = hl_ioctl, + .compat_ioctl = hl_ioctl +}; + +/* + * device_setup_cdev - setup cdev and device for habanalabs device + * + * @hdev: pointer to habanalabs device structure + * @hclass: pointer to the class object of the device + * @minor: minor number of the specific device + * @fpos : file operations to install for this device + * + * Create a cdev and a Linux device for habanalabs's device. Need to be + * called at the end of the habanalabs device initialization process, + * because this function exposes the device to the user + */ +static int device_setup_cdev(struct hl_device *hdev, struct class *hclass, + int minor, const struct file_operations *fops) +{ + int err, devno = MKDEV(hdev->major, minor); + struct cdev *hdev_cdev = &hdev->cdev; + char *name; + + name = kasprintf(GFP_KERNEL, "hl%d", hdev->id); + if (!name) + return -ENOMEM; + + cdev_init(hdev_cdev, fops); + hdev_cdev->owner = THIS_MODULE; + err = cdev_add(hdev_cdev, devno, 1); + if (err) { + pr_err("Failed to add char device %s\n", name); + goto err_cdev_add; + } + + hdev->dev = device_create(hclass, NULL, devno, NULL, "%s", name); + if (IS_ERR(hdev->dev)) { + pr_err("Failed to create device %s\n", name); + err = PTR_ERR(hdev->dev); + goto err_device_create; + } + + dev_set_drvdata(hdev->dev, hdev); + + kfree(name); + + return 0; + +err_device_create: + cdev_del(hdev_cdev); +err_cdev_add: + kfree(name); + return err; +} + +/* + * device_early_init - do some early initialization for the habanalabs device + * + * @hdev: pointer to habanalabs device structure + * + * Install the relevant function pointers and call the early_init function, + * if such a function exists + */ +static int device_early_init(struct hl_device *hdev) +{ + int rc; + + switch (hdev->asic_type) { + case ASIC_GOYA: + goya_set_asic_funcs(hdev); + strlcpy(hdev->asic_name, "GOYA", sizeof(hdev->asic_name)); + break; + default: + dev_err(hdev->dev, "Unrecognized ASIC type %d\n", + hdev->asic_type); + return -EINVAL; + } + + rc = hdev->asic_funcs->early_init(hdev); + if (rc) + return rc; + + rc = hl_asid_init(hdev); + if (rc) + goto early_fini; + + hdev->cq_wq = alloc_workqueue("hl-free-jobs", WQ_UNBOUND, 0); + if (hdev->cq_wq == NULL) { + dev_err(hdev->dev, "Failed to allocate CQ workqueue\n"); + rc = -ENOMEM; + goto asid_fini; + } + + hdev->eq_wq = alloc_workqueue("hl-events", WQ_UNBOUND, 0); + if (hdev->eq_wq == NULL) { + dev_err(hdev->dev, "Failed to allocate EQ workqueue\n"); + rc = -ENOMEM; + goto free_cq_wq; + } + + hdev->hl_chip_info = kzalloc(sizeof(struct hwmon_chip_info), + GFP_KERNEL); + if (!hdev->hl_chip_info) { + rc = -ENOMEM; + goto free_eq_wq; + } + + hl_cb_mgr_init(&hdev->kernel_cb_mgr); + + mutex_init(&hdev->fd_open_cnt_lock); + mutex_init(&hdev->send_cpu_message_lock); + INIT_LIST_HEAD(&hdev->hw_queues_mirror_list); + spin_lock_init(&hdev->hw_queues_mirror_lock); + atomic_set(&hdev->in_reset, 0); + atomic_set(&hdev->fd_open_cnt, 0); + + return 0; + +free_eq_wq: + destroy_workqueue(hdev->eq_wq); +free_cq_wq: + destroy_workqueue(hdev->cq_wq); +asid_fini: + hl_asid_fini(hdev); +early_fini: + if (hdev->asic_funcs->early_fini) + hdev->asic_funcs->early_fini(hdev); + + return rc; +} + +/* + * device_early_fini - finalize all that was done in device_early_init + * + * @hdev: pointer to habanalabs device structure + * + */ +static void device_early_fini(struct hl_device *hdev) +{ + mutex_destroy(&hdev->send_cpu_message_lock); + + hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr); + + kfree(hdev->hl_chip_info); + + destroy_workqueue(hdev->eq_wq); + destroy_workqueue(hdev->cq_wq); + + hl_asid_fini(hdev); + + if (hdev->asic_funcs->early_fini) + hdev->asic_funcs->early_fini(hdev); + + mutex_destroy(&hdev->fd_open_cnt_lock); +} + +static void set_freq_to_low_job(struct work_struct *work) +{ + struct hl_device *hdev = container_of(work, struct hl_device, + work_freq.work); + + if (atomic_read(&hdev->fd_open_cnt) == 0) + hl_device_set_frequency(hdev, PLL_LOW); + + schedule_delayed_work(&hdev->work_freq, + usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC)); +} + +static void hl_device_heartbeat(struct work_struct *work) +{ + struct hl_device *hdev = container_of(work, struct hl_device, + work_heartbeat.work); + + if (hl_device_disabled_or_in_reset(hdev)) + goto reschedule; + + if (!hdev->asic_funcs->send_heartbeat(hdev)) + goto reschedule; + + dev_err(hdev->dev, "Device heartbeat failed!\n"); + hl_device_reset(hdev, true, false); + + return; + +reschedule: + schedule_delayed_work(&hdev->work_heartbeat, + usecs_to_jiffies(HL_HEARTBEAT_PER_USEC)); +} + +/* + * device_late_init - do late stuff initialization for the habanalabs device + * + * @hdev: pointer to habanalabs device structure + * + * Do stuff that either needs the device H/W queues to be active or needs + * to happen after all the rest of the initialization is finished + */ +static int device_late_init(struct hl_device *hdev) +{ + int rc; + + INIT_DELAYED_WORK(&hdev->work_freq, set_freq_to_low_job); + hdev->high_pll = hdev->asic_prop.high_pll; + + /* force setting to low frequency */ + atomic_set(&hdev->curr_pll_profile, PLL_LOW); + + if (hdev->pm_mng_profile == PM_AUTO) + hdev->asic_funcs->set_pll_profile(hdev, PLL_LOW); + else + hdev->asic_funcs->set_pll_profile(hdev, PLL_LAST); + + if (hdev->asic_funcs->late_init) { + rc = hdev->asic_funcs->late_init(hdev); + if (rc) { + dev_err(hdev->dev, + "failed late initialization for the H/W\n"); + return rc; + } + } + + schedule_delayed_work(&hdev->work_freq, + usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC)); + + if (hdev->heartbeat) { + INIT_DELAYED_WORK(&hdev->work_heartbeat, hl_device_heartbeat); + schedule_delayed_work(&hdev->work_heartbeat, + usecs_to_jiffies(HL_HEARTBEAT_PER_USEC)); + } + + hdev->late_init_done = true; + + return 0; +} + +/* + * device_late_fini - finalize all that was done in device_late_init + * + * @hdev: pointer to habanalabs device structure + * + */ +static void device_late_fini(struct hl_device *hdev) +{ + if (!hdev->late_init_done) + return; + + cancel_delayed_work_sync(&hdev->work_freq); + if (hdev->heartbeat) + cancel_delayed_work_sync(&hdev->work_heartbeat); + + if (hdev->asic_funcs->late_fini) + hdev->asic_funcs->late_fini(hdev); + + hdev->late_init_done = false; +} + +/* + * hl_device_set_frequency - set the frequency of the device + * + * @hdev: pointer to habanalabs device structure + * @freq: the new frequency value + * + * Change the frequency if needed. + * We allose to set PLL to low only if there is no user process + * Returns 0 if no change was done, otherwise returns 1; + */ +int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq) +{ + enum hl_pll_frequency old_freq = + (freq == PLL_HIGH) ? PLL_LOW : PLL_HIGH; + int ret; + + if (hdev->pm_mng_profile == PM_MANUAL) + return 0; + + ret = atomic_cmpxchg(&hdev->curr_pll_profile, old_freq, freq); + if (ret == freq) + return 0; + + /* + * in case we want to lower frequency, check if device is not + * opened. We must have a check here to workaround race condition with + * hl_device_open + */ + if ((freq == PLL_LOW) && (atomic_read(&hdev->fd_open_cnt) > 0)) { + atomic_set(&hdev->curr_pll_profile, PLL_HIGH); + return 0; + } + + dev_dbg(hdev->dev, "Changing device frequency to %s\n", + freq == PLL_HIGH ? "high" : "low"); + + hdev->asic_funcs->set_pll_profile(hdev, freq); + + return 1; +} + +/* + * hl_device_suspend - initiate device suspend + * + * @hdev: pointer to habanalabs device structure + * + * Puts the hw in the suspend state (all asics). + * Returns 0 for success or an error on failure. + * Called at driver suspend. + */ +int hl_device_suspend(struct hl_device *hdev) +{ + int rc; + + pci_save_state(hdev->pdev); + + rc = hdev->asic_funcs->suspend(hdev); + if (rc) + dev_err(hdev->dev, + "Failed to disable PCI access of device CPU\n"); + + /* Shut down the device */ + pci_disable_device(hdev->pdev); + pci_set_power_state(hdev->pdev, PCI_D3hot); + + return 0; +} + +/* + * hl_device_resume - initiate device resume + * + * @hdev: pointer to habanalabs device structure + * + * Bring the hw back to operating state (all asics). + * Returns 0 for success or an error on failure. + * Called at driver resume. + */ +int hl_device_resume(struct hl_device *hdev) +{ + int rc; + + pci_set_power_state(hdev->pdev, PCI_D0); + pci_restore_state(hdev->pdev); + rc = pci_enable_device(hdev->pdev); + if (rc) { + dev_err(hdev->dev, + "Failed to enable PCI device in resume\n"); + return rc; + } + + rc = hdev->asic_funcs->resume(hdev); + if (rc) { + dev_err(hdev->dev, + "Failed to enable PCI access from device CPU\n"); + return rc; + } + + return 0; +} + +static void hl_device_hard_reset_pending(struct work_struct *work) +{ + struct hl_device_reset_work *device_reset_work = + container_of(work, struct hl_device_reset_work, reset_work); + struct hl_device *hdev = device_reset_work->hdev; + u16 pending_cnt = HL_PENDING_RESET_PER_SEC; + struct task_struct *task = NULL; + + /* Flush all processes that are inside hl_open */ + mutex_lock(&hdev->fd_open_cnt_lock); + + while ((atomic_read(&hdev->fd_open_cnt)) && (pending_cnt)) { + + pending_cnt--; + + dev_info(hdev->dev, + "Can't HARD reset, waiting for user to close FD\n"); + ssleep(1); + } + + if (atomic_read(&hdev->fd_open_cnt)) { + task = get_pid_task(hdev->user_ctx->hpriv->taskpid, + PIDTYPE_PID); + if (task) { + dev_info(hdev->dev, "Killing user processes\n"); + send_sig(SIGKILL, task, 1); + msleep(100); + + put_task_struct(task); + } + } + + mutex_unlock(&hdev->fd_open_cnt_lock); + + hl_device_reset(hdev, true, true); + + kfree(device_reset_work); +} + +/* + * hl_device_reset - reset the device + * + * @hdev: pointer to habanalabs device structure + * @hard_reset: should we do hard reset to all engines or just reset the + * compute/dma engines + * + * Block future CS and wait for pending CS to be enqueued + * Call ASIC H/W fini + * Flush all completions + * Re-initialize all internal data structures + * Call ASIC H/W init, late_init + * Test queues + * Enable device + * + * Returns 0 for success or an error on failure. + */ +int hl_device_reset(struct hl_device *hdev, bool hard_reset, + bool from_hard_reset_thread) +{ + int i, rc; + + if (!hdev->init_done) { + dev_err(hdev->dev, + "Can't reset before initialization is done\n"); + return 0; + } + + /* + * Prevent concurrency in this function - only one reset should be + * done at any given time. Only need to perform this if we didn't + * get from the dedicated hard reset thread + */ + if (!from_hard_reset_thread) { + /* Block future CS/VM/JOB completion operations */ + rc = atomic_cmpxchg(&hdev->in_reset, 0, 1); + if (rc) + return 0; + + /* This also blocks future CS/VM/JOB completion operations */ + hdev->disabled = true; + + /* + * Flush anyone that is inside the critical section of enqueue + * jobs to the H/W + */ + hdev->asic_funcs->hw_queues_lock(hdev); + hdev->asic_funcs->hw_queues_unlock(hdev); + + dev_err(hdev->dev, "Going to RESET device!\n"); + } + +again: + if ((hard_reset) && (!from_hard_reset_thread)) { + struct hl_device_reset_work *device_reset_work; + + if (!hdev->pdev) { + dev_err(hdev->dev, + "Reset action is NOT supported in simulator\n"); + rc = -EINVAL; + goto out_err; + } + + hdev->hard_reset_pending = true; + + device_reset_work = kzalloc(sizeof(*device_reset_work), + GFP_ATOMIC); + if (!device_reset_work) { + rc = -ENOMEM; + goto out_err; + } + + /* + * Because the reset function can't run from interrupt or + * from heartbeat work, we need to call the reset function + * from a dedicated work + */ + INIT_WORK(&device_reset_work->reset_work, + hl_device_hard_reset_pending); + device_reset_work->hdev = hdev; + schedule_work(&device_reset_work->reset_work); + + return 0; + } + + if (hard_reset) { + device_late_fini(hdev); + + /* + * Now that the heartbeat thread is closed, flush processes + * which are sending messages to CPU + */ + mutex_lock(&hdev->send_cpu_message_lock); + mutex_unlock(&hdev->send_cpu_message_lock); + } + + /* + * Halt the engines and disable interrupts so we won't get any more + * completions from H/W and we won't have any accesses from the + * H/W to the host machine + */ + hdev->asic_funcs->halt_engines(hdev, hard_reset); + + /* Go over all the queues, release all CS and their jobs */ + hl_cs_rollback_all(hdev); + + if (hard_reset) { + /* Release kernel context */ + if (hl_ctx_put(hdev->kernel_ctx) != 1) { + dev_err(hdev->dev, + "kernel ctx is alive during hard reset\n"); + rc = -EBUSY; + goto out_err; + } + + hdev->kernel_ctx = NULL; + } + + /* Reset the H/W. It will be in idle state after this returns */ + hdev->asic_funcs->hw_fini(hdev, hard_reset); + + if (hard_reset) { + hl_vm_fini(hdev); + hl_eq_reset(hdev, &hdev->event_queue); + } + + /* Re-initialize PI,CI to 0 in all queues (hw queue, cq) */ + hl_hw_queue_reset(hdev, hard_reset); + for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) + hl_cq_reset(hdev, &hdev->completion_queue[i]); + + /* Make sure the setup phase for the user context will run again */ + if (hdev->user_ctx) { + atomic_set(&hdev->user_ctx->thread_restore_token, 1); + hdev->user_ctx->thread_restore_wait_token = 0; + } + + /* Finished tear-down, starting to re-initialize */ + + if (hard_reset) { + hdev->device_cpu_disabled = false; + + /* Allocate the kernel context */ + hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx), + GFP_KERNEL); + if (!hdev->kernel_ctx) { + rc = -ENOMEM; + goto out_err; + } + + hdev->user_ctx = NULL; + + rc = hl_ctx_init(hdev, hdev->kernel_ctx, true); + if (rc) { + dev_err(hdev->dev, + "failed to init kernel ctx in hard reset\n"); + kfree(hdev->kernel_ctx); + hdev->kernel_ctx = NULL; + goto out_err; + } + } + + rc = hdev->asic_funcs->hw_init(hdev); + if (rc) { + dev_err(hdev->dev, + "failed to initialize the H/W after reset\n"); + goto out_err; + } + + hdev->disabled = false; + + /* Check that the communication with the device is working */ + rc = hdev->asic_funcs->test_queues(hdev); + if (rc) { + dev_err(hdev->dev, + "Failed to detect if device is alive after reset\n"); + goto out_err; + } + + if (hard_reset) { + rc = device_late_init(hdev); + if (rc) { + dev_err(hdev->dev, + "Failed late init after hard reset\n"); + goto out_err; + } + + rc = hl_vm_init(hdev); + if (rc) { + dev_err(hdev->dev, + "Failed to init memory module after hard reset\n"); + goto out_err; + } + + hl_set_max_power(hdev, hdev->max_power); + + hdev->hard_reset_pending = false; + } else { + rc = hdev->asic_funcs->soft_reset_late_init(hdev); + if (rc) { + dev_err(hdev->dev, + "Failed late init after soft reset\n"); + goto out_err; + } + } + + atomic_set(&hdev->in_reset, 0); + + if (hard_reset) + hdev->hard_reset_cnt++; + else + hdev->soft_reset_cnt++; + + return 0; + +out_err: + hdev->disabled = true; + + if (hard_reset) { + dev_err(hdev->dev, + "Failed to reset! Device is NOT usable\n"); + hdev->hard_reset_cnt++; + } else { + dev_err(hdev->dev, + "Failed to do soft-reset, trying hard reset\n"); + hdev->soft_reset_cnt++; + hard_reset = true; + goto again; + } + + atomic_set(&hdev->in_reset, 0); + + return rc; +} + +/* + * hl_device_init - main initialization function for habanalabs device + * + * @hdev: pointer to habanalabs device structure + * + * Allocate an id for the device, do early initialization and then call the + * ASIC specific initialization functions. Finally, create the cdev and the + * Linux device to expose it to the user + */ +int hl_device_init(struct hl_device *hdev, struct class *hclass) +{ + int i, rc, cq_ready_cnt; + + /* Create device */ + rc = device_setup_cdev(hdev, hclass, hdev->id, &hl_ops); + + if (rc) + goto out_disabled; + + /* Initialize ASIC function pointers and perform early init */ + rc = device_early_init(hdev); + if (rc) + goto release_device; + + /* + * Start calling ASIC initialization. First S/W then H/W and finally + * late init + */ + rc = hdev->asic_funcs->sw_init(hdev); + if (rc) + goto early_fini; + + /* + * Initialize the H/W queues. Must be done before hw_init, because + * there the addresses of the kernel queue are being written to the + * registers of the device + */ + rc = hl_hw_queues_create(hdev); + if (rc) { + dev_err(hdev->dev, "failed to initialize kernel queues\n"); + goto sw_fini; + } + + /* + * Initialize the completion queues. Must be done before hw_init, + * because there the addresses of the completion queues are being + * passed as arguments to request_irq + */ + hdev->completion_queue = + kcalloc(hdev->asic_prop.completion_queues_count, + sizeof(*hdev->completion_queue), GFP_KERNEL); + + if (!hdev->completion_queue) { + dev_err(hdev->dev, "failed to allocate completion queues\n"); + rc = -ENOMEM; + goto hw_queues_destroy; + } + + for (i = 0, cq_ready_cnt = 0; + i < hdev->asic_prop.completion_queues_count; + i++, cq_ready_cnt++) { + rc = hl_cq_init(hdev, &hdev->completion_queue[i], i); + if (rc) { + dev_err(hdev->dev, + "failed to initialize completion queue\n"); + goto cq_fini; + } + } + + /* + * Initialize the event queue. Must be done before hw_init, + * because there the address of the event queue is being + * passed as argument to request_irq + */ + rc = hl_eq_init(hdev, &hdev->event_queue); + if (rc) { + dev_err(hdev->dev, "failed to initialize event queue\n"); + goto cq_fini; + } + + /* Allocate the kernel context */ + hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx), GFP_KERNEL); + if (!hdev->kernel_ctx) { + rc = -ENOMEM; + goto eq_fini; + } + + hdev->user_ctx = NULL; + + rc = hl_ctx_init(hdev, hdev->kernel_ctx, true); + if (rc) { + dev_err(hdev->dev, "failed to initialize kernel context\n"); + goto free_ctx; + } + + rc = hl_cb_pool_init(hdev); + if (rc) { + dev_err(hdev->dev, "failed to initialize CB pool\n"); + goto release_ctx; + } + + rc = hl_sysfs_init(hdev); + if (rc) { + dev_err(hdev->dev, "failed to initialize sysfs\n"); + goto free_cb_pool; + } + + hl_debugfs_add_device(hdev); + + if (hdev->asic_funcs->get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) { + dev_info(hdev->dev, + "H/W state is dirty, must reset before initializing\n"); + hdev->asic_funcs->hw_fini(hdev, true); + } + + rc = hdev->asic_funcs->hw_init(hdev); + if (rc) { + dev_err(hdev->dev, "failed to initialize the H/W\n"); + rc = 0; + goto out_disabled; + } + + hdev->disabled = false; + + /* Check that the communication with the device is working */ + rc = hdev->asic_funcs->test_queues(hdev); + if (rc) { + dev_err(hdev->dev, "Failed to detect if device is alive\n"); + rc = 0; + goto out_disabled; + } + + /* After test_queues, KMD can start sending messages to device CPU */ + + rc = device_late_init(hdev); + if (rc) { + dev_err(hdev->dev, "Failed late initialization\n"); + rc = 0; + goto out_disabled; + } + + dev_info(hdev->dev, "Found %s device with %lluGB DRAM\n", + hdev->asic_name, + hdev->asic_prop.dram_size / 1024 / 1024 / 1024); + + rc = hl_vm_init(hdev); + if (rc) { + dev_err(hdev->dev, "Failed to initialize memory module\n"); + rc = 0; + goto out_disabled; + } + + /* + * hl_hwmon_init must be called after device_late_init, because only + * there we get the information from the device about which + * hwmon-related sensors the device supports + */ + rc = hl_hwmon_init(hdev); + if (rc) { + dev_err(hdev->dev, "Failed to initialize hwmon\n"); + rc = 0; + goto out_disabled; + } + + dev_notice(hdev->dev, + "Successfully added device to habanalabs driver\n"); + + hdev->init_done = true; + + return 0; + +free_cb_pool: + hl_cb_pool_fini(hdev); +release_ctx: + if (hl_ctx_put(hdev->kernel_ctx) != 1) + dev_err(hdev->dev, + "kernel ctx is still alive on initialization failure\n"); +free_ctx: + kfree(hdev->kernel_ctx); +eq_fini: + hl_eq_fini(hdev, &hdev->event_queue); +cq_fini: + for (i = 0 ; i < cq_ready_cnt ; i++) + hl_cq_fini(hdev, &hdev->completion_queue[i]); + kfree(hdev->completion_queue); +hw_queues_destroy: + hl_hw_queues_destroy(hdev); +sw_fini: + hdev->asic_funcs->sw_fini(hdev); +early_fini: + device_early_fini(hdev); +release_device: + device_destroy(hclass, hdev->dev->devt); + cdev_del(&hdev->cdev); +out_disabled: + hdev->disabled = true; + if (hdev->pdev) + dev_err(&hdev->pdev->dev, + "Failed to initialize hl%d. Device is NOT usable !\n", + hdev->id); + else + pr_err("Failed to initialize hl%d. Device is NOT usable !\n", + hdev->id); + + return rc; +} + +/* + * hl_device_fini - main tear-down function for habanalabs device + * + * @hdev: pointer to habanalabs device structure + * + * Destroy the device, call ASIC fini functions and release the id + */ +void hl_device_fini(struct hl_device *hdev) +{ + int i, rc; + ktime_t timeout; + + dev_info(hdev->dev, "Removing device\n"); + + /* + * This function is competing with the reset function, so try to + * take the reset atomic and if we are already in middle of reset, + * wait until reset function is finished. Reset function is designed + * to always finish (could take up to a few seconds in worst case). + */ + + timeout = ktime_add_us(ktime_get(), + HL_PENDING_RESET_PER_SEC * 1000 * 1000 * 4); + rc = atomic_cmpxchg(&hdev->in_reset, 0, 1); + while (rc) { + usleep_range(50, 200); + rc = atomic_cmpxchg(&hdev->in_reset, 0, 1); + if (ktime_compare(ktime_get(), timeout) > 0) { + WARN(1, "Failed to remove device because reset function did not finish\n"); + return; + } + }; + + /* Mark device as disabled */ + hdev->disabled = true; + + hl_hwmon_fini(hdev); + + device_late_fini(hdev); + + hl_debugfs_remove_device(hdev); + + hl_sysfs_fini(hdev); + + /* + * Halt the engines and disable interrupts so we won't get any more + * completions from H/W and we won't have any accesses from the + * H/W to the host machine + */ + hdev->asic_funcs->halt_engines(hdev, true); + + /* Go over all the queues, release all CS and their jobs */ + hl_cs_rollback_all(hdev); + + hl_cb_pool_fini(hdev); + + /* Release kernel context */ + if ((hdev->kernel_ctx) && (hl_ctx_put(hdev->kernel_ctx) != 1)) + dev_err(hdev->dev, "kernel ctx is still alive\n"); + + /* Reset the H/W. It will be in idle state after this returns */ + hdev->asic_funcs->hw_fini(hdev, true); + + hl_vm_fini(hdev); + + hl_eq_fini(hdev, &hdev->event_queue); + + for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) + hl_cq_fini(hdev, &hdev->completion_queue[i]); + kfree(hdev->completion_queue); + + hl_hw_queues_destroy(hdev); + + /* Call ASIC S/W finalize function */ + hdev->asic_funcs->sw_fini(hdev); + + device_early_fini(hdev); + + /* Hide device from user */ + device_destroy(hdev->dev->class, hdev->dev->devt); + cdev_del(&hdev->cdev); + + pr_info("removed device successfully\n"); +} + +/* + * hl_poll_timeout_memory - Periodically poll a host memory address + * until it is not zero or a timeout occurs + * @hdev: pointer to habanalabs device structure + * @addr: Address to poll + * @timeout_us: timeout in us + * @val: Variable to read the value into + * + * Returns 0 on success and -ETIMEDOUT upon a timeout. In either + * case, the last read value at @addr is stored in @val. Must not + * be called from atomic context if sleep_us or timeout_us are used. + * + * The function sleeps for 100us with timeout value of + * timeout_us + */ +int hl_poll_timeout_memory(struct hl_device *hdev, u64 addr, + u32 timeout_us, u32 *val) +{ + /* + * address in this function points always to a memory location in the + * host's (server's) memory. That location is updated asynchronously + * either by the direct access of the device or by another core + */ + u32 *paddr = (u32 *) (uintptr_t) addr; + ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); + + might_sleep(); + + for (;;) { + /* + * Flush CPU read/write buffers to make sure we read updates + * done by other cores or by the device + */ + mb(); + *val = *paddr; + if (*val) + break; + if (ktime_compare(ktime_get(), timeout) > 0) { + *val = *paddr; + break; + } + usleep_range((100 >> 2) + 1, 100); + } + + return *val ? 0 : -ETIMEDOUT; +} + +/* + * hl_poll_timeout_devicememory - Periodically poll a device memory address + * until it is not zero or a timeout occurs + * @hdev: pointer to habanalabs device structure + * @addr: Device address to poll + * @timeout_us: timeout in us + * @val: Variable to read the value into + * + * Returns 0 on success and -ETIMEDOUT upon a timeout. In either + * case, the last read value at @addr is stored in @val. Must not + * be called from atomic context if sleep_us or timeout_us are used. + * + * The function sleeps for 100us with timeout value of + * timeout_us + */ +int hl_poll_timeout_device_memory(struct hl_device *hdev, void __iomem *addr, + u32 timeout_us, u32 *val) +{ + ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); + + might_sleep(); + + for (;;) { + *val = readl(addr); + if (*val) + break; + if (ktime_compare(ktime_get(), timeout) > 0) { + *val = readl(addr); + break; + } + usleep_range((100 >> 2) + 1, 100); + } + + return *val ? 0 : -ETIMEDOUT; +} + +/* + * MMIO register access helper functions. + */ + +/* + * hl_rreg - Read an MMIO register + * + * @hdev: pointer to habanalabs device structure + * @reg: MMIO register offset (in bytes) + * + * Returns the value of the MMIO register we are asked to read + * + */ +inline u32 hl_rreg(struct hl_device *hdev, u32 reg) +{ + return readl(hdev->rmmio + reg); +} + +/* + * hl_wreg - Write to an MMIO register + * + * @hdev: pointer to habanalabs device structure + * @reg: MMIO register offset (in bytes) + * @val: 32-bit value + * + * Writes the 32-bit value into the MMIO register + * + */ +inline void hl_wreg(struct hl_device *hdev, u32 reg, u32 val) +{ + writel(val, hdev->rmmio + reg); +} diff --git a/drivers/misc/habanalabs/goya/Makefile b/drivers/misc/habanalabs/goya/Makefile new file mode 100644 index 000000000000..e458e5ba500b --- /dev/null +++ b/drivers/misc/habanalabs/goya/Makefile @@ -0,0 +1,3 @@ +subdir-ccflags-y += -I$(src) + +HL_GOYA_FILES := goya/goya.o goya/goya_security.o goya/goya_hwmgr.o diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c new file mode 100644 index 000000000000..238dd57c541b --- /dev/null +++ b/drivers/misc/habanalabs/goya/goya.c @@ -0,0 +1,5391 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "goyaP.h" +#include "include/hw_ip/mmu/mmu_general.h" +#include "include/hw_ip/mmu/mmu_v1_0.h" +#include "include/goya/asic_reg/goya_masks.h" + +#include +#include +#include +#include +#include +#include + +/* + * GOYA security scheme: + * + * 1. Host is protected by: + * - Range registers (When MMU is enabled, DMA RR does NOT protect host) + * - MMU + * + * 2. DRAM is protected by: + * - Range registers (protect the first 512MB) + * - MMU (isolation between users) + * + * 3. Configuration is protected by: + * - Range registers + * - Protection bits + * + * When MMU is disabled: + * + * QMAN DMA: PQ, CQ, CP, DMA are secured. + * PQ, CB and the data are on the host. + * + * QMAN TPC/MME: + * PQ, CQ and CP are not secured. + * PQ, CB and the data are on the SRAM/DRAM. + * + * Since QMAN DMA is secured, KMD is parsing the DMA CB: + * - KMD checks DMA pointer + * - WREG, MSG_PROT are not allowed. + * - MSG_LONG/SHORT are allowed. + * + * A read/write transaction by the QMAN to a protected area will succeed if + * and only if the QMAN's CP is secured and MSG_PROT is used + * + * + * When MMU is enabled: + * + * QMAN DMA: PQ, CQ and CP are secured. + * MMU is set to bypass on the Secure props register of the QMAN. + * The reasons we don't enable MMU for PQ, CQ and CP are: + * - PQ entry is in kernel address space and KMD doesn't map it. + * - CP writes to MSIX register and to kernel address space (completion + * queue). + * + * DMA is not secured but because CP is secured, KMD still needs to parse the + * CB, but doesn't need to check the DMA addresses. + * + * For QMAN DMA 0, DMA is also secured because only KMD uses this DMA and KMD + * doesn't map memory in MMU. + * + * QMAN TPC/MME: PQ, CQ and CP aren't secured (no change from MMU disabled mode) + * + * DMA RR does NOT protect host because DMA is not secured + * + */ + +#define GOYA_MMU_REGS_NUM 61 + +#define GOYA_DMA_POOL_BLK_SIZE 0x100 /* 256 bytes */ + +#define GOYA_RESET_TIMEOUT_MSEC 500 /* 500ms */ +#define GOYA_PLDM_RESET_TIMEOUT_MSEC 20000 /* 20s */ +#define GOYA_RESET_WAIT_MSEC 1 /* 1ms */ +#define GOYA_CPU_RESET_WAIT_MSEC 100 /* 100ms */ +#define GOYA_PLDM_RESET_WAIT_MSEC 1000 /* 1s */ +#define GOYA_CPU_TIMEOUT_USEC 10000000 /* 10s */ +#define GOYA_TEST_QUEUE_WAIT_USEC 100000 /* 100ms */ +#define GOYA_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100) +#define GOYA_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30) + +#define GOYA_QMAN0_FENCE_VAL 0xD169B243 + +#define GOYA_MAX_INITIATORS 20 + +#define GOYA_MAX_STRING_LEN 20 + +#define GOYA_CB_POOL_CB_CNT 512 +#define GOYA_CB_POOL_CB_SIZE 0x20000 /* 128KB */ + +static const char goya_irq_name[GOYA_MSIX_ENTRIES][GOYA_MAX_STRING_LEN] = { + "goya cq 0", "goya cq 1", "goya cq 2", "goya cq 3", + "goya cq 4", "goya cpu eq" +}; + +static u16 goya_packet_sizes[MAX_PACKET_ID] = { + [PACKET_WREG_32] = sizeof(struct packet_wreg32), + [PACKET_WREG_BULK] = sizeof(struct packet_wreg_bulk), + [PACKET_MSG_LONG] = sizeof(struct packet_msg_long), + [PACKET_MSG_SHORT] = sizeof(struct packet_msg_short), + [PACKET_CP_DMA] = sizeof(struct packet_cp_dma), + [PACKET_MSG_PROT] = sizeof(struct packet_msg_prot), + [PACKET_FENCE] = sizeof(struct packet_fence), + [PACKET_LIN_DMA] = sizeof(struct packet_lin_dma), + [PACKET_NOP] = sizeof(struct packet_nop), + [PACKET_STOP] = sizeof(struct packet_stop) +}; + +static u64 goya_mmu_regs[GOYA_MMU_REGS_NUM] = { + mmDMA_QM_0_GLBL_NON_SECURE_PROPS, + mmDMA_QM_1_GLBL_NON_SECURE_PROPS, + mmDMA_QM_2_GLBL_NON_SECURE_PROPS, + mmDMA_QM_3_GLBL_NON_SECURE_PROPS, + mmDMA_QM_4_GLBL_NON_SECURE_PROPS, + mmTPC0_QM_GLBL_SECURE_PROPS, + mmTPC0_QM_GLBL_NON_SECURE_PROPS, + mmTPC0_CMDQ_GLBL_SECURE_PROPS, + mmTPC0_CMDQ_GLBL_NON_SECURE_PROPS, + mmTPC0_CFG_ARUSER, + mmTPC0_CFG_AWUSER, + mmTPC1_QM_GLBL_SECURE_PROPS, + mmTPC1_QM_GLBL_NON_SECURE_PROPS, + mmTPC1_CMDQ_GLBL_SECURE_PROPS, + mmTPC1_CMDQ_GLBL_NON_SECURE_PROPS, + mmTPC1_CFG_ARUSER, + mmTPC1_CFG_AWUSER, + mmTPC2_QM_GLBL_SECURE_PROPS, + mmTPC2_QM_GLBL_NON_SECURE_PROPS, + mmTPC2_CMDQ_GLBL_SECURE_PROPS, + mmTPC2_CMDQ_GLBL_NON_SECURE_PROPS, + mmTPC2_CFG_ARUSER, + mmTPC2_CFG_AWUSER, + mmTPC3_QM_GLBL_SECURE_PROPS, + mmTPC3_QM_GLBL_NON_SECURE_PROPS, + mmTPC3_CMDQ_GLBL_SECURE_PROPS, + mmTPC3_CMDQ_GLBL_NON_SECURE_PROPS, + mmTPC3_CFG_ARUSER, + mmTPC3_CFG_AWUSER, + mmTPC4_QM_GLBL_SECURE_PROPS, + mmTPC4_QM_GLBL_NON_SECURE_PROPS, + mmTPC4_CMDQ_GLBL_SECURE_PROPS, + mmTPC4_CMDQ_GLBL_NON_SECURE_PROPS, + mmTPC4_CFG_ARUSER, + mmTPC4_CFG_AWUSER, + mmTPC5_QM_GLBL_SECURE_PROPS, + mmTPC5_QM_GLBL_NON_SECURE_PROPS, + mmTPC5_CMDQ_GLBL_SECURE_PROPS, + mmTPC5_CMDQ_GLBL_NON_SECURE_PROPS, + mmTPC5_CFG_ARUSER, + mmTPC5_CFG_AWUSER, + mmTPC6_QM_GLBL_SECURE_PROPS, + mmTPC6_QM_GLBL_NON_SECURE_PROPS, + mmTPC6_CMDQ_GLBL_SECURE_PROPS, + mmTPC6_CMDQ_GLBL_NON_SECURE_PROPS, + mmTPC6_CFG_ARUSER, + mmTPC6_CFG_AWUSER, + mmTPC7_QM_GLBL_SECURE_PROPS, + mmTPC7_QM_GLBL_NON_SECURE_PROPS, + mmTPC7_CMDQ_GLBL_SECURE_PROPS, + mmTPC7_CMDQ_GLBL_NON_SECURE_PROPS, + mmTPC7_CFG_ARUSER, + mmTPC7_CFG_AWUSER, + mmMME_QM_GLBL_SECURE_PROPS, + mmMME_QM_GLBL_NON_SECURE_PROPS, + mmMME_CMDQ_GLBL_SECURE_PROPS, + mmMME_CMDQ_GLBL_NON_SECURE_PROPS, + mmMME_SBA_CONTROL_DATA, + mmMME_SBB_CONTROL_DATA, + mmMME_SBC_CONTROL_DATA, + mmMME_WBC_CONTROL_DATA +}; + +#define GOYA_ASYC_EVENT_GROUP_NON_FATAL_SIZE 121 + +static u32 goya_non_fatal_events[GOYA_ASYC_EVENT_GROUP_NON_FATAL_SIZE] = { + GOYA_ASYNC_EVENT_ID_PCIE_IF, + GOYA_ASYNC_EVENT_ID_TPC0_ECC, + GOYA_ASYNC_EVENT_ID_TPC1_ECC, + GOYA_ASYNC_EVENT_ID_TPC2_ECC, + GOYA_ASYNC_EVENT_ID_TPC3_ECC, + GOYA_ASYNC_EVENT_ID_TPC4_ECC, + GOYA_ASYNC_EVENT_ID_TPC5_ECC, + GOYA_ASYNC_EVENT_ID_TPC6_ECC, + GOYA_ASYNC_EVENT_ID_TPC7_ECC, + GOYA_ASYNC_EVENT_ID_MME_ECC, + GOYA_ASYNC_EVENT_ID_MME_ECC_EXT, + GOYA_ASYNC_EVENT_ID_MMU_ECC, + GOYA_ASYNC_EVENT_ID_DMA_MACRO, + GOYA_ASYNC_EVENT_ID_DMA_ECC, + GOYA_ASYNC_EVENT_ID_CPU_IF_ECC, + GOYA_ASYNC_EVENT_ID_PSOC_MEM, + GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT, + GOYA_ASYNC_EVENT_ID_SRAM0, + GOYA_ASYNC_EVENT_ID_SRAM1, + GOYA_ASYNC_EVENT_ID_SRAM2, + GOYA_ASYNC_EVENT_ID_SRAM3, + GOYA_ASYNC_EVENT_ID_SRAM4, + GOYA_ASYNC_EVENT_ID_SRAM5, + GOYA_ASYNC_EVENT_ID_SRAM6, + GOYA_ASYNC_EVENT_ID_SRAM7, + GOYA_ASYNC_EVENT_ID_SRAM8, + GOYA_ASYNC_EVENT_ID_SRAM9, + GOYA_ASYNC_EVENT_ID_SRAM10, + GOYA_ASYNC_EVENT_ID_SRAM11, + GOYA_ASYNC_EVENT_ID_SRAM12, + GOYA_ASYNC_EVENT_ID_SRAM13, + GOYA_ASYNC_EVENT_ID_SRAM14, + GOYA_ASYNC_EVENT_ID_SRAM15, + GOYA_ASYNC_EVENT_ID_SRAM16, + GOYA_ASYNC_EVENT_ID_SRAM17, + GOYA_ASYNC_EVENT_ID_SRAM18, + GOYA_ASYNC_EVENT_ID_SRAM19, + GOYA_ASYNC_EVENT_ID_SRAM20, + GOYA_ASYNC_EVENT_ID_SRAM21, + GOYA_ASYNC_EVENT_ID_SRAM22, + GOYA_ASYNC_EVENT_ID_SRAM23, + GOYA_ASYNC_EVENT_ID_SRAM24, + GOYA_ASYNC_EVENT_ID_SRAM25, + GOYA_ASYNC_EVENT_ID_SRAM26, + GOYA_ASYNC_EVENT_ID_SRAM27, + GOYA_ASYNC_EVENT_ID_SRAM28, + GOYA_ASYNC_EVENT_ID_SRAM29, + GOYA_ASYNC_EVENT_ID_GIC500, + GOYA_ASYNC_EVENT_ID_PLL0, + GOYA_ASYNC_EVENT_ID_PLL1, + GOYA_ASYNC_EVENT_ID_PLL3, + GOYA_ASYNC_EVENT_ID_PLL4, + GOYA_ASYNC_EVENT_ID_PLL5, + GOYA_ASYNC_EVENT_ID_PLL6, + GOYA_ASYNC_EVENT_ID_AXI_ECC, + GOYA_ASYNC_EVENT_ID_L2_RAM_ECC, + GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET, + GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT, + GOYA_ASYNC_EVENT_ID_PCIE_DEC, + GOYA_ASYNC_EVENT_ID_TPC0_DEC, + GOYA_ASYNC_EVENT_ID_TPC1_DEC, + GOYA_ASYNC_EVENT_ID_TPC2_DEC, + GOYA_ASYNC_EVENT_ID_TPC3_DEC, + GOYA_ASYNC_EVENT_ID_TPC4_DEC, + GOYA_ASYNC_EVENT_ID_TPC5_DEC, + GOYA_ASYNC_EVENT_ID_TPC6_DEC, + GOYA_ASYNC_EVENT_ID_TPC7_DEC, + GOYA_ASYNC_EVENT_ID_MME_WACS, + GOYA_ASYNC_EVENT_ID_MME_WACSD, + GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER, + GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC, + GOYA_ASYNC_EVENT_ID_PSOC, + GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR, + GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR, + GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR, + GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR, + GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR, + GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR, + GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR, + GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR, + GOYA_ASYNC_EVENT_ID_TPC0_CMDQ, + GOYA_ASYNC_EVENT_ID_TPC1_CMDQ, + GOYA_ASYNC_EVENT_ID_TPC2_CMDQ, + GOYA_ASYNC_EVENT_ID_TPC3_CMDQ, + GOYA_ASYNC_EVENT_ID_TPC4_CMDQ, + GOYA_ASYNC_EVENT_ID_TPC5_CMDQ, + GOYA_ASYNC_EVENT_ID_TPC6_CMDQ, + GOYA_ASYNC_EVENT_ID_TPC7_CMDQ, + GOYA_ASYNC_EVENT_ID_TPC0_QM, + GOYA_ASYNC_EVENT_ID_TPC1_QM, + GOYA_ASYNC_EVENT_ID_TPC2_QM, + GOYA_ASYNC_EVENT_ID_TPC3_QM, + GOYA_ASYNC_EVENT_ID_TPC4_QM, + GOYA_ASYNC_EVENT_ID_TPC5_QM, + GOYA_ASYNC_EVENT_ID_TPC6_QM, + GOYA_ASYNC_EVENT_ID_TPC7_QM, + GOYA_ASYNC_EVENT_ID_MME_QM, + GOYA_ASYNC_EVENT_ID_MME_CMDQ, + GOYA_ASYNC_EVENT_ID_DMA0_QM, + GOYA_ASYNC_EVENT_ID_DMA1_QM, + GOYA_ASYNC_EVENT_ID_DMA2_QM, + GOYA_ASYNC_EVENT_ID_DMA3_QM, + GOYA_ASYNC_EVENT_ID_DMA4_QM, + GOYA_ASYNC_EVENT_ID_DMA0_CH, + GOYA_ASYNC_EVENT_ID_DMA1_CH, + GOYA_ASYNC_EVENT_ID_DMA2_CH, + GOYA_ASYNC_EVENT_ID_DMA3_CH, + GOYA_ASYNC_EVENT_ID_DMA4_CH, + GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU, + GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU, + GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU, + GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU, + GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU, + GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU, + GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU, + GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU, + GOYA_ASYNC_EVENT_ID_DMA_BM_CH0, + GOYA_ASYNC_EVENT_ID_DMA_BM_CH1, + GOYA_ASYNC_EVENT_ID_DMA_BM_CH2, + GOYA_ASYNC_EVENT_ID_DMA_BM_CH3, + GOYA_ASYNC_EVENT_ID_DMA_BM_CH4 +}; + +static int goya_armcp_info_get(struct hl_device *hdev); +static void goya_mmu_prepare(struct hl_device *hdev, u32 asid); +static int goya_mmu_clear_pgt_range(struct hl_device *hdev); +static int goya_mmu_set_dram_default_page(struct hl_device *hdev); +static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid, + u64 phys_addr); + +static void goya_get_fixed_properties(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + int i; + + for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) { + prop->hw_queues_props[i].type = QUEUE_TYPE_EXT; + prop->hw_queues_props[i].kmd_only = 0; + } + + for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES ; i++) { + prop->hw_queues_props[i].type = QUEUE_TYPE_CPU; + prop->hw_queues_props[i].kmd_only = 1; + } + + for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES + + NUMBER_OF_INT_HW_QUEUES; i++) { + prop->hw_queues_props[i].type = QUEUE_TYPE_INT; + prop->hw_queues_props[i].kmd_only = 0; + } + + for (; i < HL_MAX_QUEUES; i++) + prop->hw_queues_props[i].type = QUEUE_TYPE_NA; + + prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES; + + prop->dram_base_address = DRAM_PHYS_BASE; + prop->dram_size = DRAM_PHYS_DEFAULT_SIZE; + prop->dram_end_address = prop->dram_base_address + prop->dram_size; + prop->dram_user_base_address = DRAM_BASE_ADDR_USER; + + prop->sram_base_address = SRAM_BASE_ADDR; + prop->sram_size = SRAM_SIZE; + prop->sram_end_address = prop->sram_base_address + prop->sram_size; + prop->sram_user_base_address = prop->sram_base_address + + SRAM_USER_BASE_OFFSET; + + prop->mmu_pgt_addr = MMU_PAGE_TABLES_ADDR; + prop->mmu_dram_default_page_addr = MMU_DRAM_DEFAULT_PAGE_ADDR; + if (hdev->pldm) + prop->mmu_pgt_size = 0x800000; /* 8MB */ + else + prop->mmu_pgt_size = MMU_PAGE_TABLES_SIZE; + prop->mmu_pte_size = HL_PTE_SIZE; + prop->mmu_hop_table_size = HOP_TABLE_SIZE; + prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE; + prop->dram_page_size = PAGE_SIZE_2MB; + + prop->host_phys_base_address = HOST_PHYS_BASE; + prop->va_space_host_start_address = VA_HOST_SPACE_START; + prop->va_space_host_end_address = VA_HOST_SPACE_END; + prop->va_space_dram_start_address = VA_DDR_SPACE_START; + prop->va_space_dram_end_address = VA_DDR_SPACE_END; + prop->dram_size_for_default_page_mapping = + prop->va_space_dram_end_address; + prop->cfg_size = CFG_SIZE; + prop->max_asid = MAX_ASID; + prop->num_of_events = GOYA_ASYNC_EVENT_ID_SIZE; + prop->cb_pool_cb_cnt = GOYA_CB_POOL_CB_CNT; + prop->cb_pool_cb_size = GOYA_CB_POOL_CB_SIZE; + prop->max_power_default = MAX_POWER_DEFAULT; + prop->tpc_enabled_mask = TPC_ENABLED_MASK; + + prop->high_pll = PLL_HIGH_DEFAULT; +} + +int goya_send_pci_access_msg(struct hl_device *hdev, u32 opcode) +{ + struct armcp_packet pkt; + + memset(&pkt, 0, sizeof(pkt)); + + pkt.ctl = cpu_to_le32(opcode << ARMCP_PKT_CTL_OPCODE_SHIFT); + + return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, + sizeof(pkt), HL_DEVICE_TIMEOUT_USEC, NULL); +} + +/* + * goya_pci_bars_map - Map PCI BARS of Goya device + * + * @hdev: pointer to hl_device structure + * + * Request PCI regions and map them to kernel virtual addresses. + * Returns 0 on success + * + */ +static int goya_pci_bars_map(struct hl_device *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + int rc; + + rc = pci_request_regions(pdev, HL_NAME); + if (rc) { + dev_err(hdev->dev, "Cannot obtain PCI resources\n"); + return rc; + } + + hdev->pcie_bar[SRAM_CFG_BAR_ID] = + pci_ioremap_bar(pdev, SRAM_CFG_BAR_ID); + if (!hdev->pcie_bar[SRAM_CFG_BAR_ID]) { + dev_err(hdev->dev, "pci_ioremap_bar failed for CFG\n"); + rc = -ENODEV; + goto err_release_regions; + } + + hdev->pcie_bar[MSIX_BAR_ID] = pci_ioremap_bar(pdev, MSIX_BAR_ID); + if (!hdev->pcie_bar[MSIX_BAR_ID]) { + dev_err(hdev->dev, "pci_ioremap_bar failed for MSIX\n"); + rc = -ENODEV; + goto err_unmap_sram_cfg; + } + + hdev->pcie_bar[DDR_BAR_ID] = pci_ioremap_wc_bar(pdev, DDR_BAR_ID); + if (!hdev->pcie_bar[DDR_BAR_ID]) { + dev_err(hdev->dev, "pci_ioremap_bar failed for DDR\n"); + rc = -ENODEV; + goto err_unmap_msix; + } + + hdev->rmmio = hdev->pcie_bar[SRAM_CFG_BAR_ID] + + (CFG_BASE - SRAM_BASE_ADDR); + + return 0; + +err_unmap_msix: + iounmap(hdev->pcie_bar[MSIX_BAR_ID]); +err_unmap_sram_cfg: + iounmap(hdev->pcie_bar[SRAM_CFG_BAR_ID]); +err_release_regions: + pci_release_regions(pdev); + + return rc; +} + +/* + * goya_pci_bars_unmap - Unmap PCI BARS of Goya device + * + * @hdev: pointer to hl_device structure + * + * Release all PCI BARS and unmap their virtual addresses + * + */ +static void goya_pci_bars_unmap(struct hl_device *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + + iounmap(hdev->pcie_bar[DDR_BAR_ID]); + iounmap(hdev->pcie_bar[MSIX_BAR_ID]); + iounmap(hdev->pcie_bar[SRAM_CFG_BAR_ID]); + pci_release_regions(pdev); +} + +/* + * goya_elbi_write - Write through the ELBI interface + * + * @hdev: pointer to hl_device structure + * + * return 0 on success, -1 on failure + * + */ +static int goya_elbi_write(struct hl_device *hdev, u64 addr, u32 data) +{ + struct pci_dev *pdev = hdev->pdev; + ktime_t timeout; + u32 val; + + /* Clear previous status */ + pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, 0); + + pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_ADDR, (u32) addr); + pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_DATA, data); + pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_CTRL, + PCI_CONFIG_ELBI_CTRL_WRITE); + + timeout = ktime_add_ms(ktime_get(), 10); + for (;;) { + pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, &val); + if (val & PCI_CONFIG_ELBI_STS_MASK) + break; + if (ktime_compare(ktime_get(), timeout) > 0) { + pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, + &val); + break; + } + usleep_range(300, 500); + } + + if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE) + return 0; + + if (val & PCI_CONFIG_ELBI_STS_ERR) { + dev_err(hdev->dev, "Error writing to ELBI\n"); + return -EIO; + } + + if (!(val & PCI_CONFIG_ELBI_STS_MASK)) { + dev_err(hdev->dev, "ELBI write didn't finish in time\n"); + return -EIO; + } + + dev_err(hdev->dev, "ELBI write has undefined bits in status\n"); + return -EIO; +} + +/* + * goya_iatu_write - iatu write routine + * + * @hdev: pointer to hl_device structure + * + */ +static int goya_iatu_write(struct hl_device *hdev, u32 addr, u32 data) +{ + u32 dbi_offset; + int rc; + + dbi_offset = addr & 0xFFF; + + rc = goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI, 0x00300000); + rc |= goya_elbi_write(hdev, mmPCIE_DBI_BASE + dbi_offset, data); + + if (rc) + return -EIO; + + return 0; +} + +static void goya_reset_link_through_bridge(struct hl_device *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + struct pci_dev *parent_port; + u16 val; + + parent_port = pdev->bus->self; + pci_read_config_word(parent_port, PCI_BRIDGE_CONTROL, &val); + val |= PCI_BRIDGE_CTL_BUS_RESET; + pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val); + ssleep(1); + + val &= ~(PCI_BRIDGE_CTL_BUS_RESET); + pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val); + ssleep(3); +} + +/* + * goya_set_ddr_bar_base - set DDR bar to map specific device address + * + * @hdev: pointer to hl_device structure + * @addr: address in DDR. Must be aligned to DDR bar size + * + * This function configures the iATU so that the DDR bar will start at the + * specified addr. + * + */ +static int goya_set_ddr_bar_base(struct hl_device *hdev, u64 addr) +{ + struct goya_device *goya = hdev->asic_specific; + int rc; + + if ((goya) && (goya->ddr_bar_cur_addr == addr)) + return 0; + + /* Inbound Region 1 - Bar 4 - Point to DDR */ + rc = goya_iatu_write(hdev, 0x314, lower_32_bits(addr)); + rc |= goya_iatu_write(hdev, 0x318, upper_32_bits(addr)); + rc |= goya_iatu_write(hdev, 0x300, 0); + /* Enable + Bar match + match enable + Bar 4 */ + rc |= goya_iatu_write(hdev, 0x304, 0xC0080400); + + /* Return the DBI window to the default location */ + rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI, 0); + rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI_32, 0); + + if (rc) { + dev_err(hdev->dev, "failed to map DDR bar to 0x%08llx\n", addr); + return -EIO; + } + + if (goya) + goya->ddr_bar_cur_addr = addr; + + return 0; +} + +/* + * goya_init_iatu - Initialize the iATU unit inside the PCI controller + * + * @hdev: pointer to hl_device structure + * + * This is needed in case the firmware doesn't initialize the iATU + * + */ +static int goya_init_iatu(struct hl_device *hdev) +{ + int rc; + + /* Inbound Region 0 - Bar 0 - Point to SRAM_BASE_ADDR */ + rc = goya_iatu_write(hdev, 0x114, lower_32_bits(SRAM_BASE_ADDR)); + rc |= goya_iatu_write(hdev, 0x118, upper_32_bits(SRAM_BASE_ADDR)); + rc |= goya_iatu_write(hdev, 0x100, 0); + /* Enable + Bar match + match enable */ + rc |= goya_iatu_write(hdev, 0x104, 0xC0080000); + + /* Inbound Region 1 - Bar 4 - Point to DDR */ + rc |= goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE); + + /* Outbound Region 0 - Point to Host */ + rc |= goya_iatu_write(hdev, 0x008, lower_32_bits(HOST_PHYS_BASE)); + rc |= goya_iatu_write(hdev, 0x00C, upper_32_bits(HOST_PHYS_BASE)); + rc |= goya_iatu_write(hdev, 0x010, + lower_32_bits(HOST_PHYS_BASE + HOST_PHYS_SIZE - 1)); + rc |= goya_iatu_write(hdev, 0x014, 0); + rc |= goya_iatu_write(hdev, 0x018, 0); + rc |= goya_iatu_write(hdev, 0x020, + upper_32_bits(HOST_PHYS_BASE + HOST_PHYS_SIZE - 1)); + /* Increase region size */ + rc |= goya_iatu_write(hdev, 0x000, 0x00002000); + /* Enable */ + rc |= goya_iatu_write(hdev, 0x004, 0x80000000); + + /* Return the DBI window to the default location */ + rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI, 0); + rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI_32, 0); + + if (rc) + return -EIO; + + return 0; +} + +/* + * goya_early_init - GOYA early initialization code + * + * @hdev: pointer to hl_device structure + * + * Verify PCI bars + * Set DMA masks + * PCI controller initialization + * Map PCI bars + * + */ +static int goya_early_init(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct pci_dev *pdev = hdev->pdev; + u32 val; + int rc; + + goya_get_fixed_properties(hdev); + + /* Check BAR sizes */ + if (pci_resource_len(pdev, SRAM_CFG_BAR_ID) != CFG_BAR_SIZE) { + dev_err(hdev->dev, + "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n", + SRAM_CFG_BAR_ID, + (unsigned long long) pci_resource_len(pdev, + SRAM_CFG_BAR_ID), + CFG_BAR_SIZE); + return -ENODEV; + } + + if (pci_resource_len(pdev, MSIX_BAR_ID) != MSIX_BAR_SIZE) { + dev_err(hdev->dev, + "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n", + MSIX_BAR_ID, + (unsigned long long) pci_resource_len(pdev, + MSIX_BAR_ID), + MSIX_BAR_SIZE); + return -ENODEV; + } + + prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID); + + /* set DMA mask for GOYA */ + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(39)); + if (rc) { + dev_warn(hdev->dev, "Unable to set pci dma mask to 39 bits\n"); + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) { + dev_err(hdev->dev, + "Unable to set pci dma mask to 32 bits\n"); + return rc; + } + } + + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39)); + if (rc) { + dev_warn(hdev->dev, + "Unable to set pci consistent dma mask to 39 bits\n"); + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) { + dev_err(hdev->dev, + "Unable to set pci consistent dma mask to 32 bits\n"); + return rc; + } + } + + if (hdev->reset_pcilink) + goya_reset_link_through_bridge(hdev); + + rc = pci_enable_device_mem(pdev); + if (rc) { + dev_err(hdev->dev, "can't enable PCI device\n"); + return rc; + } + + pci_set_master(pdev); + + rc = goya_init_iatu(hdev); + if (rc) { + dev_err(hdev->dev, "Failed to initialize iATU\n"); + goto disable_device; + } + + rc = goya_pci_bars_map(hdev); + if (rc) { + dev_err(hdev->dev, "Failed to initialize PCI BARS\n"); + goto disable_device; + } + + if (!hdev->pldm) { + val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS); + if (val & PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_MASK) + dev_warn(hdev->dev, + "PCI strap is not configured correctly, PCI bus errors may occur\n"); + } + + return 0; + +disable_device: + pci_clear_master(pdev); + pci_disable_device(pdev); + + return rc; +} + +/* + * goya_early_fini - GOYA early finalization code + * + * @hdev: pointer to hl_device structure + * + * Unmap PCI bars + * + */ +static int goya_early_fini(struct hl_device *hdev) +{ + goya_pci_bars_unmap(hdev); + + pci_clear_master(hdev->pdev); + pci_disable_device(hdev->pdev); + + return 0; +} + +/* + * goya_fetch_psoc_frequency - Fetch PSOC frequency values + * + * @hdev: pointer to hl_device structure + * + */ +static void goya_fetch_psoc_frequency(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + + prop->psoc_pci_pll_nr = RREG32(mmPSOC_PCI_PLL_NR); + prop->psoc_pci_pll_nf = RREG32(mmPSOC_PCI_PLL_NF); + prop->psoc_pci_pll_od = RREG32(mmPSOC_PCI_PLL_OD); + prop->psoc_pci_pll_div_factor = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1); +} + +/* + * goya_late_init - GOYA late initialization code + * + * @hdev: pointer to hl_device structure + * + * Get ArmCP info and send message to CPU to enable PCI access + */ +static int goya_late_init(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct goya_device *goya = hdev->asic_specific; + int rc; + + rc = goya->armcp_info_get(hdev); + if (rc) { + dev_err(hdev->dev, "Failed to get armcp info\n"); + return rc; + } + + /* Now that we have the DRAM size in ASIC prop, we need to check + * its size and configure the DMA_IF DDR wrap protection (which is in + * the MMU block) accordingly. The value is the log2 of the DRAM size + */ + WREG32(mmMMU_LOG2_DDR_SIZE, ilog2(prop->dram_size)); + + rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS); + if (rc) { + dev_err(hdev->dev, "Failed to enable PCI access from CPU\n"); + return rc; + } + + WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, + GOYA_ASYNC_EVENT_ID_INTS_REGISTER); + + goya_fetch_psoc_frequency(hdev); + + rc = goya_mmu_clear_pgt_range(hdev); + if (rc) { + dev_err(hdev->dev, "Failed to clear MMU page tables range\n"); + goto disable_pci_access; + } + + rc = goya_mmu_set_dram_default_page(hdev); + if (rc) { + dev_err(hdev->dev, "Failed to set DRAM default page\n"); + goto disable_pci_access; + } + + return 0; + +disable_pci_access: + goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS); + + return rc; +} + +/* + * goya_late_fini - GOYA late tear-down code + * + * @hdev: pointer to hl_device structure + * + * Free sensors allocated structures + */ +void goya_late_fini(struct hl_device *hdev) +{ + const struct hwmon_channel_info **channel_info_arr; + int i = 0; + + if (!hdev->hl_chip_info->info) + return; + + channel_info_arr = hdev->hl_chip_info->info; + + while (channel_info_arr[i]) { + kfree(channel_info_arr[i]->config); + kfree(channel_info_arr[i]); + i++; + } + + kfree(channel_info_arr); + + hdev->hl_chip_info->info = NULL; +} + +/* + * goya_sw_init - Goya software initialization code + * + * @hdev: pointer to hl_device structure + * + */ +static int goya_sw_init(struct hl_device *hdev) +{ + struct goya_device *goya; + int rc; + + /* Allocate device structure */ + goya = kzalloc(sizeof(*goya), GFP_KERNEL); + if (!goya) + return -ENOMEM; + + goya->test_cpu_queue = goya_test_cpu_queue; + goya->armcp_info_get = goya_armcp_info_get; + + /* according to goya_init_iatu */ + goya->ddr_bar_cur_addr = DRAM_PHYS_BASE; + + goya->mme_clk = GOYA_PLL_FREQ_LOW; + goya->tpc_clk = GOYA_PLL_FREQ_LOW; + goya->ic_clk = GOYA_PLL_FREQ_LOW; + + hdev->asic_specific = goya; + + /* Create DMA pool for small allocations */ + hdev->dma_pool = dma_pool_create(dev_name(hdev->dev), + &hdev->pdev->dev, GOYA_DMA_POOL_BLK_SIZE, 8, 0); + if (!hdev->dma_pool) { + dev_err(hdev->dev, "failed to create DMA pool\n"); + rc = -ENOMEM; + goto free_goya_device; + } + + hdev->cpu_accessible_dma_mem = + hdev->asic_funcs->dma_alloc_coherent(hdev, + CPU_ACCESSIBLE_MEM_SIZE, + &hdev->cpu_accessible_dma_address, + GFP_KERNEL | __GFP_ZERO); + + if (!hdev->cpu_accessible_dma_mem) { + dev_err(hdev->dev, + "failed to allocate %d of dma memory for CPU accessible memory space\n", + CPU_ACCESSIBLE_MEM_SIZE); + rc = -ENOMEM; + goto free_dma_pool; + } + + hdev->cpu_accessible_dma_pool = gen_pool_create(CPU_PKT_SHIFT, -1); + if (!hdev->cpu_accessible_dma_pool) { + dev_err(hdev->dev, + "Failed to create CPU accessible DMA pool\n"); + rc = -ENOMEM; + goto free_cpu_pq_dma_mem; + } + + rc = gen_pool_add(hdev->cpu_accessible_dma_pool, + (uintptr_t) hdev->cpu_accessible_dma_mem, + CPU_ACCESSIBLE_MEM_SIZE, -1); + if (rc) { + dev_err(hdev->dev, + "Failed to add memory to CPU accessible DMA pool\n"); + rc = -EFAULT; + goto free_cpu_pq_pool; + } + + spin_lock_init(&goya->hw_queues_lock); + + return 0; + +free_cpu_pq_pool: + gen_pool_destroy(hdev->cpu_accessible_dma_pool); +free_cpu_pq_dma_mem: + hdev->asic_funcs->dma_free_coherent(hdev, CPU_ACCESSIBLE_MEM_SIZE, + hdev->cpu_accessible_dma_mem, + hdev->cpu_accessible_dma_address); +free_dma_pool: + dma_pool_destroy(hdev->dma_pool); +free_goya_device: + kfree(goya); + + return rc; +} + +/* + * goya_sw_fini - Goya software tear-down code + * + * @hdev: pointer to hl_device structure + * + */ +static int goya_sw_fini(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + + gen_pool_destroy(hdev->cpu_accessible_dma_pool); + + hdev->asic_funcs->dma_free_coherent(hdev, CPU_ACCESSIBLE_MEM_SIZE, + hdev->cpu_accessible_dma_mem, + hdev->cpu_accessible_dma_address); + + dma_pool_destroy(hdev->dma_pool); + + kfree(goya); + + return 0; +} + +static void goya_init_dma_qman(struct hl_device *hdev, int dma_id, + dma_addr_t bus_address) +{ + struct goya_device *goya = hdev->asic_specific; + u32 mtr_base_lo, mtr_base_hi; + u32 so_base_lo, so_base_hi; + u32 gic_base_lo, gic_base_hi; + u32 reg_off = dma_id * (mmDMA_QM_1_PQ_PI - mmDMA_QM_0_PQ_PI); + + mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0); + mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0); + so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + + gic_base_lo = + lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR); + gic_base_hi = + upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR); + + WREG32(mmDMA_QM_0_PQ_BASE_LO + reg_off, lower_32_bits(bus_address)); + WREG32(mmDMA_QM_0_PQ_BASE_HI + reg_off, upper_32_bits(bus_address)); + + WREG32(mmDMA_QM_0_PQ_SIZE + reg_off, ilog2(HL_QUEUE_LENGTH)); + WREG32(mmDMA_QM_0_PQ_PI + reg_off, 0); + WREG32(mmDMA_QM_0_PQ_CI + reg_off, 0); + + WREG32(mmDMA_QM_0_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo); + WREG32(mmDMA_QM_0_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi); + WREG32(mmDMA_QM_0_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo); + WREG32(mmDMA_QM_0_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi); + WREG32(mmDMA_QM_0_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo); + WREG32(mmDMA_QM_0_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi); + WREG32(mmDMA_QM_0_GLBL_ERR_WDATA + reg_off, + GOYA_ASYNC_EVENT_ID_DMA0_QM + dma_id); + + /* PQ has buffer of 2 cache lines, while CQ has 8 lines */ + WREG32(mmDMA_QM_0_PQ_CFG1 + reg_off, 0x00020002); + WREG32(mmDMA_QM_0_CQ_CFG1 + reg_off, 0x00080008); + + if (goya->hw_cap_initialized & HW_CAP_MMU) + WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_PARTLY_TRUSTED); + else + WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_FULLY_TRUSTED); + + WREG32(mmDMA_QM_0_GLBL_ERR_CFG + reg_off, QMAN_DMA_ERR_MSG_EN); + WREG32(mmDMA_QM_0_GLBL_CFG0 + reg_off, QMAN_DMA_ENABLE); +} + +static void goya_init_dma_ch(struct hl_device *hdev, int dma_id) +{ + u32 gic_base_lo, gic_base_hi; + u64 sob_addr; + u32 reg_off = dma_id * (mmDMA_CH_1_CFG1 - mmDMA_CH_0_CFG1); + + gic_base_lo = + lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR); + gic_base_hi = + upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR); + + WREG32(mmDMA_CH_0_ERRMSG_ADDR_LO + reg_off, gic_base_lo); + WREG32(mmDMA_CH_0_ERRMSG_ADDR_HI + reg_off, gic_base_hi); + WREG32(mmDMA_CH_0_ERRMSG_WDATA + reg_off, + GOYA_ASYNC_EVENT_ID_DMA0_CH + dma_id); + + if (dma_id) + sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1000 + + (dma_id - 1) * 4; + else + sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1007; + + WREG32(mmDMA_CH_0_WR_COMP_ADDR_LO + reg_off, lower_32_bits(sob_addr)); + WREG32(mmDMA_CH_0_WR_COMP_ADDR_HI + reg_off, upper_32_bits(sob_addr)); + WREG32(mmDMA_CH_0_WR_COMP_WDATA + reg_off, 0x80000001); +} + +/* + * goya_init_dma_qmans - Initialize QMAN DMA registers + * + * @hdev: pointer to hl_device structure + * + * Initialize the H/W registers of the QMAN DMA channels + * + */ +static void goya_init_dma_qmans(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + struct hl_hw_queue *q; + dma_addr_t bus_address; + int i; + + if (goya->hw_cap_initialized & HW_CAP_DMA) + return; + + q = &hdev->kernel_queues[0]; + + for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++, q++) { + bus_address = q->bus_address + + hdev->asic_prop.host_phys_base_address; + + goya_init_dma_qman(hdev, i, bus_address); + goya_init_dma_ch(hdev, i); + } + + goya->hw_cap_initialized |= HW_CAP_DMA; +} + +/* + * goya_disable_external_queues - Disable external queues + * + * @hdev: pointer to hl_device structure + * + */ +static void goya_disable_external_queues(struct hl_device *hdev) +{ + WREG32(mmDMA_QM_0_GLBL_CFG0, 0); + WREG32(mmDMA_QM_1_GLBL_CFG0, 0); + WREG32(mmDMA_QM_2_GLBL_CFG0, 0); + WREG32(mmDMA_QM_3_GLBL_CFG0, 0); + WREG32(mmDMA_QM_4_GLBL_CFG0, 0); +} + +static int goya_stop_queue(struct hl_device *hdev, u32 cfg_reg, + u32 cp_sts_reg, u32 glbl_sts0_reg) +{ + int rc; + u32 status; + + /* use the values of TPC0 as they are all the same*/ + + WREG32(cfg_reg, 1 << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT); + + status = RREG32(cp_sts_reg); + if (status & TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK) { + rc = hl_poll_timeout( + hdev, + cp_sts_reg, + status, + !(status & TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK), + 1000, + QMAN_FENCE_TIMEOUT_USEC); + + /* if QMAN is stuck in fence no need to check for stop */ + if (rc) + return 0; + } + + rc = hl_poll_timeout( + hdev, + glbl_sts0_reg, + status, + (status & TPC0_QM_GLBL_STS0_CP_IS_STOP_MASK), + 1000, + QMAN_STOP_TIMEOUT_USEC); + + if (rc) { + dev_err(hdev->dev, + "Timeout while waiting for QMAN to stop\n"); + return -EINVAL; + } + + return 0; +} + +/* + * goya_stop_external_queues - Stop external queues + * + * @hdev: pointer to hl_device structure + * + * Returns 0 on success + * + */ +static int goya_stop_external_queues(struct hl_device *hdev) +{ + int rc, retval = 0; + + rc = goya_stop_queue(hdev, + mmDMA_QM_0_GLBL_CFG1, + mmDMA_QM_0_CP_STS, + mmDMA_QM_0_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop DMA QMAN 0\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmDMA_QM_1_GLBL_CFG1, + mmDMA_QM_1_CP_STS, + mmDMA_QM_1_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop DMA QMAN 1\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmDMA_QM_2_GLBL_CFG1, + mmDMA_QM_2_CP_STS, + mmDMA_QM_2_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop DMA QMAN 2\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmDMA_QM_3_GLBL_CFG1, + mmDMA_QM_3_CP_STS, + mmDMA_QM_3_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop DMA QMAN 3\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmDMA_QM_4_GLBL_CFG1, + mmDMA_QM_4_CP_STS, + mmDMA_QM_4_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop DMA QMAN 4\n"); + retval = -EIO; + } + + return retval; +} + +static void goya_resume_external_queues(struct hl_device *hdev) +{ + WREG32(mmDMA_QM_0_GLBL_CFG1, 0); + WREG32(mmDMA_QM_1_GLBL_CFG1, 0); + WREG32(mmDMA_QM_2_GLBL_CFG1, 0); + WREG32(mmDMA_QM_3_GLBL_CFG1, 0); + WREG32(mmDMA_QM_4_GLBL_CFG1, 0); +} + +/* + * goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU + * + * @hdev: pointer to hl_device structure + * + * Returns 0 on success + * + */ +static int goya_init_cpu_queues(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + struct hl_eq *eq; + dma_addr_t bus_address; + u32 status; + struct hl_hw_queue *cpu_pq = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ]; + int err; + + if (!hdev->cpu_queues_enable) + return 0; + + if (goya->hw_cap_initialized & HW_CAP_CPU_Q) + return 0; + + eq = &hdev->event_queue; + + bus_address = cpu_pq->bus_address + + hdev->asic_prop.host_phys_base_address; + WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_0, lower_32_bits(bus_address)); + WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_1, upper_32_bits(bus_address)); + + bus_address = eq->bus_address + hdev->asic_prop.host_phys_base_address; + WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_2, lower_32_bits(bus_address)); + WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_3, upper_32_bits(bus_address)); + + bus_address = hdev->cpu_accessible_dma_address + + hdev->asic_prop.host_phys_base_address; + WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_8, lower_32_bits(bus_address)); + WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_9, upper_32_bits(bus_address)); + + WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_5, HL_QUEUE_SIZE_IN_BYTES); + WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_4, HL_EQ_SIZE_IN_BYTES); + WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_10, CPU_ACCESSIBLE_MEM_SIZE); + + /* Used for EQ CI */ + WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_6, 0); + + WREG32(mmCPU_IF_PF_PQ_PI, 0); + + WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_7, PQ_INIT_STATUS_READY_FOR_CP); + + WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, + GOYA_ASYNC_EVENT_ID_PI_UPDATE); + + err = hl_poll_timeout( + hdev, + mmPSOC_GLOBAL_CONF_SCRATCHPAD_7, + status, + (status == PQ_INIT_STATUS_READY_FOR_HOST), + 1000, + GOYA_CPU_TIMEOUT_USEC); + + if (err) { + dev_err(hdev->dev, + "Failed to communicate with ARM CPU (ArmCP timeout)\n"); + return -EIO; + } + + goya->hw_cap_initialized |= HW_CAP_CPU_Q; + return 0; +} + +static void goya_set_pll_refclk(struct hl_device *hdev) +{ + WREG32(mmCPU_PLL_DIV_SEL_0, 0x0); + WREG32(mmCPU_PLL_DIV_SEL_1, 0x0); + WREG32(mmCPU_PLL_DIV_SEL_2, 0x0); + WREG32(mmCPU_PLL_DIV_SEL_3, 0x0); + + WREG32(mmIC_PLL_DIV_SEL_0, 0x0); + WREG32(mmIC_PLL_DIV_SEL_1, 0x0); + WREG32(mmIC_PLL_DIV_SEL_2, 0x0); + WREG32(mmIC_PLL_DIV_SEL_3, 0x0); + + WREG32(mmMC_PLL_DIV_SEL_0, 0x0); + WREG32(mmMC_PLL_DIV_SEL_1, 0x0); + WREG32(mmMC_PLL_DIV_SEL_2, 0x0); + WREG32(mmMC_PLL_DIV_SEL_3, 0x0); + + WREG32(mmPSOC_MME_PLL_DIV_SEL_0, 0x0); + WREG32(mmPSOC_MME_PLL_DIV_SEL_1, 0x0); + WREG32(mmPSOC_MME_PLL_DIV_SEL_2, 0x0); + WREG32(mmPSOC_MME_PLL_DIV_SEL_3, 0x0); + + WREG32(mmPSOC_PCI_PLL_DIV_SEL_0, 0x0); + WREG32(mmPSOC_PCI_PLL_DIV_SEL_1, 0x0); + WREG32(mmPSOC_PCI_PLL_DIV_SEL_2, 0x0); + WREG32(mmPSOC_PCI_PLL_DIV_SEL_3, 0x0); + + WREG32(mmPSOC_EMMC_PLL_DIV_SEL_0, 0x0); + WREG32(mmPSOC_EMMC_PLL_DIV_SEL_1, 0x0); + WREG32(mmPSOC_EMMC_PLL_DIV_SEL_2, 0x0); + WREG32(mmPSOC_EMMC_PLL_DIV_SEL_3, 0x0); + + WREG32(mmTPC_PLL_DIV_SEL_0, 0x0); + WREG32(mmTPC_PLL_DIV_SEL_1, 0x0); + WREG32(mmTPC_PLL_DIV_SEL_2, 0x0); + WREG32(mmTPC_PLL_DIV_SEL_3, 0x0); +} + +static void goya_disable_clk_rlx(struct hl_device *hdev) +{ + WREG32(mmPSOC_MME_PLL_CLK_RLX_0, 0x100010); + WREG32(mmIC_PLL_CLK_RLX_0, 0x100010); +} + +static void _goya_tpc_mbist_workaround(struct hl_device *hdev, u8 tpc_id) +{ + u64 tpc_eml_address; + u32 val, tpc_offset, tpc_eml_offset, tpc_slm_offset; + int err, slm_index; + + tpc_offset = tpc_id * 0x40000; + tpc_eml_offset = tpc_id * 0x200000; + tpc_eml_address = (mmTPC0_EML_CFG_BASE + tpc_eml_offset - CFG_BASE); + tpc_slm_offset = tpc_eml_address + 0x100000; + + /* + * Workaround for Bug H2 #2443 : + * "TPC SB is not initialized on chip reset" + */ + + val = RREG32(mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset); + if (val & TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_ACTIVE_MASK) + dev_warn(hdev->dev, "TPC%d MBIST ACTIVE is not cleared\n", + tpc_id); + + WREG32(mmTPC0_CFG_FUNC_MBIST_PAT + tpc_offset, val & 0xFFFFF000); + + WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_0 + tpc_offset, 0x37FF); + WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_1 + tpc_offset, 0x303F); + WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_2 + tpc_offset, 0x71FF); + WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_3 + tpc_offset, 0x71FF); + WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_4 + tpc_offset, 0x70FF); + WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_5 + tpc_offset, 0x70FF); + WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_6 + tpc_offset, 0x70FF); + WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_7 + tpc_offset, 0x70FF); + WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_8 + tpc_offset, 0x70FF); + WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_9 + tpc_offset, 0x70FF); + + WREG32_OR(mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset, + 1 << TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_START_SHIFT); + + err = hl_poll_timeout( + hdev, + mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset, + val, + (val & TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_DONE_MASK), + 1000, + HL_DEVICE_TIMEOUT_USEC); + + if (err) + dev_err(hdev->dev, + "Timeout while waiting for TPC%d MBIST DONE\n", tpc_id); + + WREG32_OR(mmTPC0_EML_CFG_DBG_CNT + tpc_eml_offset, + 1 << TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT); + + msleep(GOYA_RESET_WAIT_MSEC); + + WREG32_AND(mmTPC0_EML_CFG_DBG_CNT + tpc_eml_offset, + ~(1 << TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT)); + + msleep(GOYA_RESET_WAIT_MSEC); + + for (slm_index = 0 ; slm_index < 256 ; slm_index++) + WREG32(tpc_slm_offset + (slm_index << 2), 0); + + val = RREG32(tpc_slm_offset); +} + +static void goya_tpc_mbist_workaround(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + int i; + + if (hdev->pldm) + return; + + if (goya->hw_cap_initialized & HW_CAP_TPC_MBIST) + return; + + /* Workaround for H2 #2443 */ + + for (i = 0 ; i < TPC_MAX_NUM ; i++) + _goya_tpc_mbist_workaround(hdev, i); + + goya->hw_cap_initialized |= HW_CAP_TPC_MBIST; +} + +/* + * goya_init_golden_registers - Initialize golden registers + * + * @hdev: pointer to hl_device structure + * + * Initialize the H/W registers of the device + * + */ +static void goya_init_golden_registers(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + u32 polynom[10], tpc_intr_mask, offset; + int i; + + if (goya->hw_cap_initialized & HW_CAP_GOLDEN) + return; + + polynom[0] = 0x00020080; + polynom[1] = 0x00401000; + polynom[2] = 0x00200800; + polynom[3] = 0x00002000; + polynom[4] = 0x00080200; + polynom[5] = 0x00040100; + polynom[6] = 0x00100400; + polynom[7] = 0x00004000; + polynom[8] = 0x00010000; + polynom[9] = 0x00008000; + + /* Mask all arithmetic interrupts from TPC */ + tpc_intr_mask = 0x7FFF; + + for (i = 0, offset = 0 ; i < 6 ; i++, offset += 0x20000) { + WREG32(mmSRAM_Y0_X0_RTR_HBW_RD_RQ_L_ARB + offset, 0x302); + WREG32(mmSRAM_Y0_X1_RTR_HBW_RD_RQ_L_ARB + offset, 0x302); + WREG32(mmSRAM_Y0_X2_RTR_HBW_RD_RQ_L_ARB + offset, 0x302); + WREG32(mmSRAM_Y0_X3_RTR_HBW_RD_RQ_L_ARB + offset, 0x302); + WREG32(mmSRAM_Y0_X4_RTR_HBW_RD_RQ_L_ARB + offset, 0x302); + + WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_L_ARB + offset, 0x204); + WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_L_ARB + offset, 0x204); + WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_L_ARB + offset, 0x204); + WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_L_ARB + offset, 0x204); + WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_L_ARB + offset, 0x204); + + + WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_E_ARB + offset, 0x206); + WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_E_ARB + offset, 0x206); + WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_E_ARB + offset, 0x206); + WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_E_ARB + offset, 0x207); + WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_E_ARB + offset, 0x207); + + WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_W_ARB + offset, 0x207); + WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_W_ARB + offset, 0x207); + WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_W_ARB + offset, 0x206); + WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_W_ARB + offset, 0x206); + WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_W_ARB + offset, 0x206); + + WREG32(mmSRAM_Y0_X0_RTR_HBW_WR_RS_E_ARB + offset, 0x101); + WREG32(mmSRAM_Y0_X1_RTR_HBW_WR_RS_E_ARB + offset, 0x102); + WREG32(mmSRAM_Y0_X2_RTR_HBW_WR_RS_E_ARB + offset, 0x103); + WREG32(mmSRAM_Y0_X3_RTR_HBW_WR_RS_E_ARB + offset, 0x104); + WREG32(mmSRAM_Y0_X4_RTR_HBW_WR_RS_E_ARB + offset, 0x105); + + WREG32(mmSRAM_Y0_X0_RTR_HBW_WR_RS_W_ARB + offset, 0x105); + WREG32(mmSRAM_Y0_X1_RTR_HBW_WR_RS_W_ARB + offset, 0x104); + WREG32(mmSRAM_Y0_X2_RTR_HBW_WR_RS_W_ARB + offset, 0x103); + WREG32(mmSRAM_Y0_X3_RTR_HBW_WR_RS_W_ARB + offset, 0x102); + WREG32(mmSRAM_Y0_X4_RTR_HBW_WR_RS_W_ARB + offset, 0x101); + } + + WREG32(mmMME_STORE_MAX_CREDIT, 0x21); + WREG32(mmMME_AGU, 0x0f0f0f10); + WREG32(mmMME_SEI_MASK, ~0x0); + + WREG32(mmMME6_RTR_HBW_RD_RQ_N_ARB, 0x01010101); + WREG32(mmMME5_RTR_HBW_RD_RQ_N_ARB, 0x01040101); + WREG32(mmMME4_RTR_HBW_RD_RQ_N_ARB, 0x01030101); + WREG32(mmMME3_RTR_HBW_RD_RQ_N_ARB, 0x01020101); + WREG32(mmMME2_RTR_HBW_RD_RQ_N_ARB, 0x01010101); + WREG32(mmMME1_RTR_HBW_RD_RQ_N_ARB, 0x07010701); + WREG32(mmMME6_RTR_HBW_RD_RQ_S_ARB, 0x04010401); + WREG32(mmMME5_RTR_HBW_RD_RQ_S_ARB, 0x04050401); + WREG32(mmMME4_RTR_HBW_RD_RQ_S_ARB, 0x03070301); + WREG32(mmMME3_RTR_HBW_RD_RQ_S_ARB, 0x01030101); + WREG32(mmMME2_RTR_HBW_RD_RQ_S_ARB, 0x01040101); + WREG32(mmMME1_RTR_HBW_RD_RQ_S_ARB, 0x01050105); + WREG32(mmMME6_RTR_HBW_RD_RQ_W_ARB, 0x01010501); + WREG32(mmMME5_RTR_HBW_RD_RQ_W_ARB, 0x01010501); + WREG32(mmMME4_RTR_HBW_RD_RQ_W_ARB, 0x01040301); + WREG32(mmMME3_RTR_HBW_RD_RQ_W_ARB, 0x01030401); + WREG32(mmMME2_RTR_HBW_RD_RQ_W_ARB, 0x01040101); + WREG32(mmMME1_RTR_HBW_RD_RQ_W_ARB, 0x01050101); + WREG32(mmMME6_RTR_HBW_WR_RQ_N_ARB, 0x02020202); + WREG32(mmMME5_RTR_HBW_WR_RQ_N_ARB, 0x01070101); + WREG32(mmMME4_RTR_HBW_WR_RQ_N_ARB, 0x02020201); + WREG32(mmMME3_RTR_HBW_WR_RQ_N_ARB, 0x07020701); + WREG32(mmMME2_RTR_HBW_WR_RQ_N_ARB, 0x01020101); + WREG32(mmMME1_RTR_HBW_WR_RQ_S_ARB, 0x01010101); + WREG32(mmMME6_RTR_HBW_WR_RQ_S_ARB, 0x01070101); + WREG32(mmMME5_RTR_HBW_WR_RQ_S_ARB, 0x01070101); + WREG32(mmMME4_RTR_HBW_WR_RQ_S_ARB, 0x07020701); + WREG32(mmMME3_RTR_HBW_WR_RQ_S_ARB, 0x02020201); + WREG32(mmMME2_RTR_HBW_WR_RQ_S_ARB, 0x01070101); + WREG32(mmMME1_RTR_HBW_WR_RQ_S_ARB, 0x01020102); + WREG32(mmMME6_RTR_HBW_WR_RQ_W_ARB, 0x01020701); + WREG32(mmMME5_RTR_HBW_WR_RQ_W_ARB, 0x01020701); + WREG32(mmMME4_RTR_HBW_WR_RQ_W_ARB, 0x07020707); + WREG32(mmMME3_RTR_HBW_WR_RQ_W_ARB, 0x01020201); + WREG32(mmMME2_RTR_HBW_WR_RQ_W_ARB, 0x01070201); + WREG32(mmMME1_RTR_HBW_WR_RQ_W_ARB, 0x01070201); + WREG32(mmMME6_RTR_HBW_RD_RS_N_ARB, 0x01070102); + WREG32(mmMME5_RTR_HBW_RD_RS_N_ARB, 0x01070102); + WREG32(mmMME4_RTR_HBW_RD_RS_N_ARB, 0x01060102); + WREG32(mmMME3_RTR_HBW_RD_RS_N_ARB, 0x01040102); + WREG32(mmMME2_RTR_HBW_RD_RS_N_ARB, 0x01020102); + WREG32(mmMME1_RTR_HBW_RD_RS_N_ARB, 0x01020107); + WREG32(mmMME6_RTR_HBW_RD_RS_S_ARB, 0x01020106); + WREG32(mmMME5_RTR_HBW_RD_RS_S_ARB, 0x01020102); + WREG32(mmMME4_RTR_HBW_RD_RS_S_ARB, 0x01040102); + WREG32(mmMME3_RTR_HBW_RD_RS_S_ARB, 0x01060102); + WREG32(mmMME2_RTR_HBW_RD_RS_S_ARB, 0x01070102); + WREG32(mmMME1_RTR_HBW_RD_RS_S_ARB, 0x01070102); + WREG32(mmMME6_RTR_HBW_RD_RS_E_ARB, 0x01020702); + WREG32(mmMME5_RTR_HBW_RD_RS_E_ARB, 0x01020702); + WREG32(mmMME4_RTR_HBW_RD_RS_E_ARB, 0x01040602); + WREG32(mmMME3_RTR_HBW_RD_RS_E_ARB, 0x01060402); + WREG32(mmMME2_RTR_HBW_RD_RS_E_ARB, 0x01070202); + WREG32(mmMME1_RTR_HBW_RD_RS_E_ARB, 0x01070102); + WREG32(mmMME6_RTR_HBW_RD_RS_W_ARB, 0x01060401); + WREG32(mmMME5_RTR_HBW_RD_RS_W_ARB, 0x01060401); + WREG32(mmMME4_RTR_HBW_RD_RS_W_ARB, 0x01060401); + WREG32(mmMME3_RTR_HBW_RD_RS_W_ARB, 0x01060401); + WREG32(mmMME2_RTR_HBW_RD_RS_W_ARB, 0x01060401); + WREG32(mmMME1_RTR_HBW_RD_RS_W_ARB, 0x01060401); + WREG32(mmMME6_RTR_HBW_WR_RS_N_ARB, 0x01050101); + WREG32(mmMME5_RTR_HBW_WR_RS_N_ARB, 0x01040101); + WREG32(mmMME4_RTR_HBW_WR_RS_N_ARB, 0x01030101); + WREG32(mmMME3_RTR_HBW_WR_RS_N_ARB, 0x01020101); + WREG32(mmMME2_RTR_HBW_WR_RS_N_ARB, 0x01010101); + WREG32(mmMME1_RTR_HBW_WR_RS_N_ARB, 0x01010107); + WREG32(mmMME6_RTR_HBW_WR_RS_S_ARB, 0x01010107); + WREG32(mmMME5_RTR_HBW_WR_RS_S_ARB, 0x01010101); + WREG32(mmMME4_RTR_HBW_WR_RS_S_ARB, 0x01020101); + WREG32(mmMME3_RTR_HBW_WR_RS_S_ARB, 0x01030101); + WREG32(mmMME2_RTR_HBW_WR_RS_S_ARB, 0x01040101); + WREG32(mmMME1_RTR_HBW_WR_RS_S_ARB, 0x01050101); + WREG32(mmMME6_RTR_HBW_WR_RS_E_ARB, 0x01010501); + WREG32(mmMME5_RTR_HBW_WR_RS_E_ARB, 0x01010501); + WREG32(mmMME4_RTR_HBW_WR_RS_E_ARB, 0x01040301); + WREG32(mmMME3_RTR_HBW_WR_RS_E_ARB, 0x01030401); + WREG32(mmMME2_RTR_HBW_WR_RS_E_ARB, 0x01040101); + WREG32(mmMME1_RTR_HBW_WR_RS_E_ARB, 0x01050101); + WREG32(mmMME6_RTR_HBW_WR_RS_W_ARB, 0x01010101); + WREG32(mmMME5_RTR_HBW_WR_RS_W_ARB, 0x01010101); + WREG32(mmMME4_RTR_HBW_WR_RS_W_ARB, 0x01010101); + WREG32(mmMME3_RTR_HBW_WR_RS_W_ARB, 0x01010101); + WREG32(mmMME2_RTR_HBW_WR_RS_W_ARB, 0x01010101); + WREG32(mmMME1_RTR_HBW_WR_RS_W_ARB, 0x01010101); + + WREG32(mmTPC1_RTR_HBW_RD_RQ_N_ARB, 0x01010101); + WREG32(mmTPC1_RTR_HBW_RD_RQ_S_ARB, 0x01010101); + WREG32(mmTPC1_RTR_HBW_RD_RQ_E_ARB, 0x01060101); + WREG32(mmTPC1_RTR_HBW_WR_RQ_N_ARB, 0x02020102); + WREG32(mmTPC1_RTR_HBW_WR_RQ_S_ARB, 0x01010101); + WREG32(mmTPC1_RTR_HBW_WR_RQ_E_ARB, 0x02070202); + WREG32(mmTPC1_RTR_HBW_RD_RS_N_ARB, 0x01020201); + WREG32(mmTPC1_RTR_HBW_RD_RS_S_ARB, 0x01070201); + WREG32(mmTPC1_RTR_HBW_RD_RS_W_ARB, 0x01070202); + WREG32(mmTPC1_RTR_HBW_WR_RS_N_ARB, 0x01010101); + WREG32(mmTPC1_RTR_HBW_WR_RS_S_ARB, 0x01050101); + WREG32(mmTPC1_RTR_HBW_WR_RS_W_ARB, 0x01050101); + + WREG32(mmTPC2_RTR_HBW_RD_RQ_N_ARB, 0x01020101); + WREG32(mmTPC2_RTR_HBW_RD_RQ_S_ARB, 0x01050101); + WREG32(mmTPC2_RTR_HBW_RD_RQ_E_ARB, 0x01010201); + WREG32(mmTPC2_RTR_HBW_WR_RQ_N_ARB, 0x02040102); + WREG32(mmTPC2_RTR_HBW_WR_RQ_S_ARB, 0x01050101); + WREG32(mmTPC2_RTR_HBW_WR_RQ_E_ARB, 0x02060202); + WREG32(mmTPC2_RTR_HBW_RD_RS_N_ARB, 0x01020201); + WREG32(mmTPC2_RTR_HBW_RD_RS_S_ARB, 0x01070201); + WREG32(mmTPC2_RTR_HBW_RD_RS_W_ARB, 0x01070202); + WREG32(mmTPC2_RTR_HBW_WR_RS_N_ARB, 0x01010101); + WREG32(mmTPC2_RTR_HBW_WR_RS_S_ARB, 0x01040101); + WREG32(mmTPC2_RTR_HBW_WR_RS_W_ARB, 0x01040101); + + WREG32(mmTPC3_RTR_HBW_RD_RQ_N_ARB, 0x01030101); + WREG32(mmTPC3_RTR_HBW_RD_RQ_S_ARB, 0x01040101); + WREG32(mmTPC3_RTR_HBW_RD_RQ_E_ARB, 0x01040301); + WREG32(mmTPC3_RTR_HBW_WR_RQ_N_ARB, 0x02060102); + WREG32(mmTPC3_RTR_HBW_WR_RQ_S_ARB, 0x01040101); + WREG32(mmTPC3_RTR_HBW_WR_RQ_E_ARB, 0x01040301); + WREG32(mmTPC3_RTR_HBW_RD_RS_N_ARB, 0x01040201); + WREG32(mmTPC3_RTR_HBW_RD_RS_S_ARB, 0x01060201); + WREG32(mmTPC3_RTR_HBW_RD_RS_W_ARB, 0x01060402); + WREG32(mmTPC3_RTR_HBW_WR_RS_N_ARB, 0x01020101); + WREG32(mmTPC3_RTR_HBW_WR_RS_S_ARB, 0x01030101); + WREG32(mmTPC3_RTR_HBW_WR_RS_W_ARB, 0x01030401); + + WREG32(mmTPC4_RTR_HBW_RD_RQ_N_ARB, 0x01040101); + WREG32(mmTPC4_RTR_HBW_RD_RQ_S_ARB, 0x01030101); + WREG32(mmTPC4_RTR_HBW_RD_RQ_E_ARB, 0x01030401); + WREG32(mmTPC4_RTR_HBW_WR_RQ_N_ARB, 0x02070102); + WREG32(mmTPC4_RTR_HBW_WR_RQ_S_ARB, 0x01030101); + WREG32(mmTPC4_RTR_HBW_WR_RQ_E_ARB, 0x02060702); + WREG32(mmTPC4_RTR_HBW_RD_RS_N_ARB, 0x01060201); + WREG32(mmTPC4_RTR_HBW_RD_RS_S_ARB, 0x01040201); + WREG32(mmTPC4_RTR_HBW_RD_RS_W_ARB, 0x01040602); + WREG32(mmTPC4_RTR_HBW_WR_RS_N_ARB, 0x01030101); + WREG32(mmTPC4_RTR_HBW_WR_RS_S_ARB, 0x01020101); + WREG32(mmTPC4_RTR_HBW_WR_RS_W_ARB, 0x01040301); + + WREG32(mmTPC5_RTR_HBW_RD_RQ_N_ARB, 0x01050101); + WREG32(mmTPC5_RTR_HBW_RD_RQ_S_ARB, 0x01020101); + WREG32(mmTPC5_RTR_HBW_RD_RQ_E_ARB, 0x01200501); + WREG32(mmTPC5_RTR_HBW_WR_RQ_N_ARB, 0x02070102); + WREG32(mmTPC5_RTR_HBW_WR_RQ_S_ARB, 0x01020101); + WREG32(mmTPC5_RTR_HBW_WR_RQ_E_ARB, 0x02020602); + WREG32(mmTPC5_RTR_HBW_RD_RS_N_ARB, 0x01070201); + WREG32(mmTPC5_RTR_HBW_RD_RS_S_ARB, 0x01020201); + WREG32(mmTPC5_RTR_HBW_RD_RS_W_ARB, 0x01020702); + WREG32(mmTPC5_RTR_HBW_WR_RS_N_ARB, 0x01040101); + WREG32(mmTPC5_RTR_HBW_WR_RS_S_ARB, 0x01010101); + WREG32(mmTPC5_RTR_HBW_WR_RS_W_ARB, 0x01010501); + + WREG32(mmTPC6_RTR_HBW_RD_RQ_N_ARB, 0x01010101); + WREG32(mmTPC6_RTR_HBW_RD_RQ_S_ARB, 0x01010101); + WREG32(mmTPC6_RTR_HBW_RD_RQ_E_ARB, 0x01010601); + WREG32(mmTPC6_RTR_HBW_WR_RQ_N_ARB, 0x01010101); + WREG32(mmTPC6_RTR_HBW_WR_RQ_S_ARB, 0x01010101); + WREG32(mmTPC6_RTR_HBW_WR_RQ_E_ARB, 0x02020702); + WREG32(mmTPC6_RTR_HBW_RD_RS_N_ARB, 0x01010101); + WREG32(mmTPC6_RTR_HBW_RD_RS_S_ARB, 0x01010101); + WREG32(mmTPC6_RTR_HBW_RD_RS_W_ARB, 0x01020702); + WREG32(mmTPC6_RTR_HBW_WR_RS_N_ARB, 0x01050101); + WREG32(mmTPC6_RTR_HBW_WR_RS_S_ARB, 0x01010101); + WREG32(mmTPC6_RTR_HBW_WR_RS_W_ARB, 0x01010501); + + for (i = 0, offset = 0 ; i < 10 ; i++, offset += 4) { + WREG32(mmMME1_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7); + WREG32(mmMME2_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7); + WREG32(mmMME3_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7); + WREG32(mmMME4_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7); + WREG32(mmMME5_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7); + WREG32(mmMME6_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7); + + WREG32(mmTPC0_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7); + WREG32(mmTPC1_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7); + WREG32(mmTPC2_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7); + WREG32(mmTPC3_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7); + WREG32(mmTPC4_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7); + WREG32(mmTPC5_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7); + WREG32(mmTPC6_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7); + WREG32(mmTPC7_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7); + + WREG32(mmPCI_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7); + WREG32(mmDMA_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7); + } + + for (i = 0, offset = 0 ; i < 6 ; i++, offset += 0x40000) { + WREG32(mmMME1_RTR_SCRAMB_EN + offset, + 1 << MME1_RTR_SCRAMB_EN_VAL_SHIFT); + WREG32(mmMME1_RTR_NON_LIN_SCRAMB + offset, + 1 << MME1_RTR_NON_LIN_SCRAMB_EN_SHIFT); + } + + for (i = 0, offset = 0 ; i < 8 ; i++, offset += 0x40000) { + /* + * Workaround for Bug H2 #2441 : + * "ST.NOP set trace event illegal opcode" + */ + WREG32(mmTPC0_CFG_TPC_INTR_MASK + offset, tpc_intr_mask); + + WREG32(mmTPC0_NRTR_SCRAMB_EN + offset, + 1 << TPC0_NRTR_SCRAMB_EN_VAL_SHIFT); + WREG32(mmTPC0_NRTR_NON_LIN_SCRAMB + offset, + 1 << TPC0_NRTR_NON_LIN_SCRAMB_EN_SHIFT); + } + + WREG32(mmDMA_NRTR_SCRAMB_EN, 1 << DMA_NRTR_SCRAMB_EN_VAL_SHIFT); + WREG32(mmDMA_NRTR_NON_LIN_SCRAMB, + 1 << DMA_NRTR_NON_LIN_SCRAMB_EN_SHIFT); + + WREG32(mmPCI_NRTR_SCRAMB_EN, 1 << PCI_NRTR_SCRAMB_EN_VAL_SHIFT); + WREG32(mmPCI_NRTR_NON_LIN_SCRAMB, + 1 << PCI_NRTR_NON_LIN_SCRAMB_EN_SHIFT); + + /* + * Workaround for H2 #HW-23 bug + * Set DMA max outstanding read requests to 240 on DMA CH 1. Set it + * to 16 on KMD DMA + * We need to limit only these DMAs because the user can only read + * from Host using DMA CH 1 + */ + WREG32(mmDMA_CH_0_CFG0, 0x0fff0010); + WREG32(mmDMA_CH_1_CFG0, 0x0fff00F0); + + goya->hw_cap_initialized |= HW_CAP_GOLDEN; +} + +static void goya_init_mme_qman(struct hl_device *hdev) +{ + u32 mtr_base_lo, mtr_base_hi; + u32 so_base_lo, so_base_hi; + u32 gic_base_lo, gic_base_hi; + u64 qman_base_addr; + + mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0); + mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0); + so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + + gic_base_lo = + lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR); + gic_base_hi = + upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR); + + qman_base_addr = hdev->asic_prop.sram_base_address + + MME_QMAN_BASE_OFFSET; + + WREG32(mmMME_QM_PQ_BASE_LO, lower_32_bits(qman_base_addr)); + WREG32(mmMME_QM_PQ_BASE_HI, upper_32_bits(qman_base_addr)); + WREG32(mmMME_QM_PQ_SIZE, ilog2(MME_QMAN_LENGTH)); + WREG32(mmMME_QM_PQ_PI, 0); + WREG32(mmMME_QM_PQ_CI, 0); + WREG32(mmMME_QM_CP_LDMA_SRC_BASE_LO_OFFSET, 0x10C0); + WREG32(mmMME_QM_CP_LDMA_SRC_BASE_HI_OFFSET, 0x10C4); + WREG32(mmMME_QM_CP_LDMA_TSIZE_OFFSET, 0x10C8); + WREG32(mmMME_QM_CP_LDMA_COMMIT_OFFSET, 0x10CC); + + WREG32(mmMME_QM_CP_MSG_BASE0_ADDR_LO, mtr_base_lo); + WREG32(mmMME_QM_CP_MSG_BASE0_ADDR_HI, mtr_base_hi); + WREG32(mmMME_QM_CP_MSG_BASE1_ADDR_LO, so_base_lo); + WREG32(mmMME_QM_CP_MSG_BASE1_ADDR_HI, so_base_hi); + + /* QMAN CQ has 8 cache lines */ + WREG32(mmMME_QM_CQ_CFG1, 0x00080008); + + WREG32(mmMME_QM_GLBL_ERR_ADDR_LO, gic_base_lo); + WREG32(mmMME_QM_GLBL_ERR_ADDR_HI, gic_base_hi); + + WREG32(mmMME_QM_GLBL_ERR_WDATA, GOYA_ASYNC_EVENT_ID_MME_QM); + + WREG32(mmMME_QM_GLBL_ERR_CFG, QMAN_MME_ERR_MSG_EN); + + WREG32(mmMME_QM_GLBL_PROT, QMAN_MME_ERR_PROT); + + WREG32(mmMME_QM_GLBL_CFG0, QMAN_MME_ENABLE); +} + +static void goya_init_mme_cmdq(struct hl_device *hdev) +{ + u32 mtr_base_lo, mtr_base_hi; + u32 so_base_lo, so_base_hi; + u32 gic_base_lo, gic_base_hi; + u64 qman_base_addr; + + mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0); + mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0); + so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + + gic_base_lo = + lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR); + gic_base_hi = + upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR); + + qman_base_addr = hdev->asic_prop.sram_base_address + + MME_QMAN_BASE_OFFSET; + + WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_LO, mtr_base_lo); + WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_HI, mtr_base_hi); + WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_LO, so_base_lo); + WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_HI, so_base_hi); + + /* CMDQ CQ has 20 cache lines */ + WREG32(mmMME_CMDQ_CQ_CFG1, 0x00140014); + + WREG32(mmMME_CMDQ_GLBL_ERR_ADDR_LO, gic_base_lo); + WREG32(mmMME_CMDQ_GLBL_ERR_ADDR_HI, gic_base_hi); + + WREG32(mmMME_CMDQ_GLBL_ERR_WDATA, GOYA_ASYNC_EVENT_ID_MME_CMDQ); + + WREG32(mmMME_CMDQ_GLBL_ERR_CFG, CMDQ_MME_ERR_MSG_EN); + + WREG32(mmMME_CMDQ_GLBL_PROT, CMDQ_MME_ERR_PROT); + + WREG32(mmMME_CMDQ_GLBL_CFG0, CMDQ_MME_ENABLE); +} + +static void goya_init_mme_qmans(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + u32 so_base_lo, so_base_hi; + + if (goya->hw_cap_initialized & HW_CAP_MME) + return; + + so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + + WREG32(mmMME_SM_BASE_ADDRESS_LOW, so_base_lo); + WREG32(mmMME_SM_BASE_ADDRESS_HIGH, so_base_hi); + + goya_init_mme_qman(hdev); + goya_init_mme_cmdq(hdev); + + goya->hw_cap_initialized |= HW_CAP_MME; +} + +static void goya_init_tpc_qman(struct hl_device *hdev, u32 base_off, int tpc_id) +{ + u32 mtr_base_lo, mtr_base_hi; + u32 so_base_lo, so_base_hi; + u32 gic_base_lo, gic_base_hi; + u64 qman_base_addr; + u32 reg_off = tpc_id * (mmTPC1_QM_PQ_PI - mmTPC0_QM_PQ_PI); + + mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0); + mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0); + so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + + gic_base_lo = + lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR); + gic_base_hi = + upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR); + + qman_base_addr = hdev->asic_prop.sram_base_address + base_off; + + WREG32(mmTPC0_QM_PQ_BASE_LO + reg_off, lower_32_bits(qman_base_addr)); + WREG32(mmTPC0_QM_PQ_BASE_HI + reg_off, upper_32_bits(qman_base_addr)); + WREG32(mmTPC0_QM_PQ_SIZE + reg_off, ilog2(TPC_QMAN_LENGTH)); + WREG32(mmTPC0_QM_PQ_PI + reg_off, 0); + WREG32(mmTPC0_QM_PQ_CI + reg_off, 0); + WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET + reg_off, 0x10C0); + WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET + reg_off, 0x10C4); + WREG32(mmTPC0_QM_CP_LDMA_TSIZE_OFFSET + reg_off, 0x10C8); + WREG32(mmTPC0_QM_CP_LDMA_COMMIT_OFFSET + reg_off, 0x10CC); + + WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo); + WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi); + WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo); + WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi); + + WREG32(mmTPC0_QM_CQ_CFG1 + reg_off, 0x00080008); + + WREG32(mmTPC0_QM_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo); + WREG32(mmTPC0_QM_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi); + + WREG32(mmTPC0_QM_GLBL_ERR_WDATA + reg_off, + GOYA_ASYNC_EVENT_ID_TPC0_QM + tpc_id); + + WREG32(mmTPC0_QM_GLBL_ERR_CFG + reg_off, QMAN_TPC_ERR_MSG_EN); + + WREG32(mmTPC0_QM_GLBL_PROT + reg_off, QMAN_TPC_ERR_PROT); + + WREG32(mmTPC0_QM_GLBL_CFG0 + reg_off, QMAN_TPC_ENABLE); +} + +static void goya_init_tpc_cmdq(struct hl_device *hdev, int tpc_id) +{ + u32 mtr_base_lo, mtr_base_hi; + u32 so_base_lo, so_base_hi; + u32 gic_base_lo, gic_base_hi; + u32 reg_off = tpc_id * (mmTPC1_CMDQ_CQ_CFG1 - mmTPC0_CMDQ_CQ_CFG1); + + mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0); + mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0); + so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + + gic_base_lo = + lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR); + gic_base_hi = + upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR); + + WREG32(mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo); + WREG32(mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi); + WREG32(mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo); + WREG32(mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi); + + WREG32(mmTPC0_CMDQ_CQ_CFG1 + reg_off, 0x00140014); + + WREG32(mmTPC0_CMDQ_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo); + WREG32(mmTPC0_CMDQ_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi); + + WREG32(mmTPC0_CMDQ_GLBL_ERR_WDATA + reg_off, + GOYA_ASYNC_EVENT_ID_TPC0_CMDQ + tpc_id); + + WREG32(mmTPC0_CMDQ_GLBL_ERR_CFG + reg_off, CMDQ_TPC_ERR_MSG_EN); + + WREG32(mmTPC0_CMDQ_GLBL_PROT + reg_off, CMDQ_TPC_ERR_PROT); + + WREG32(mmTPC0_CMDQ_GLBL_CFG0 + reg_off, CMDQ_TPC_ENABLE); +} + +static void goya_init_tpc_qmans(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + u32 so_base_lo, so_base_hi; + u32 cfg_off = mmTPC1_CFG_SM_BASE_ADDRESS_LOW - + mmTPC0_CFG_SM_BASE_ADDRESS_LOW; + int i; + + if (goya->hw_cap_initialized & HW_CAP_TPC) + return; + + so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + + for (i = 0 ; i < TPC_MAX_NUM ; i++) { + WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_LOW + i * cfg_off, + so_base_lo); + WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_HIGH + i * cfg_off, + so_base_hi); + } + + goya_init_tpc_qman(hdev, TPC0_QMAN_BASE_OFFSET, 0); + goya_init_tpc_qman(hdev, TPC1_QMAN_BASE_OFFSET, 1); + goya_init_tpc_qman(hdev, TPC2_QMAN_BASE_OFFSET, 2); + goya_init_tpc_qman(hdev, TPC3_QMAN_BASE_OFFSET, 3); + goya_init_tpc_qman(hdev, TPC4_QMAN_BASE_OFFSET, 4); + goya_init_tpc_qman(hdev, TPC5_QMAN_BASE_OFFSET, 5); + goya_init_tpc_qman(hdev, TPC6_QMAN_BASE_OFFSET, 6); + goya_init_tpc_qman(hdev, TPC7_QMAN_BASE_OFFSET, 7); + + for (i = 0 ; i < TPC_MAX_NUM ; i++) + goya_init_tpc_cmdq(hdev, i); + + goya->hw_cap_initialized |= HW_CAP_TPC; +} + +/* + * goya_disable_internal_queues - Disable internal queues + * + * @hdev: pointer to hl_device structure + * + */ +static void goya_disable_internal_queues(struct hl_device *hdev) +{ + WREG32(mmMME_QM_GLBL_CFG0, 0); + WREG32(mmMME_CMDQ_GLBL_CFG0, 0); + + WREG32(mmTPC0_QM_GLBL_CFG0, 0); + WREG32(mmTPC0_CMDQ_GLBL_CFG0, 0); + + WREG32(mmTPC1_QM_GLBL_CFG0, 0); + WREG32(mmTPC1_CMDQ_GLBL_CFG0, 0); + + WREG32(mmTPC2_QM_GLBL_CFG0, 0); + WREG32(mmTPC2_CMDQ_GLBL_CFG0, 0); + + WREG32(mmTPC3_QM_GLBL_CFG0, 0); + WREG32(mmTPC3_CMDQ_GLBL_CFG0, 0); + + WREG32(mmTPC4_QM_GLBL_CFG0, 0); + WREG32(mmTPC4_CMDQ_GLBL_CFG0, 0); + + WREG32(mmTPC5_QM_GLBL_CFG0, 0); + WREG32(mmTPC5_CMDQ_GLBL_CFG0, 0); + + WREG32(mmTPC6_QM_GLBL_CFG0, 0); + WREG32(mmTPC6_CMDQ_GLBL_CFG0, 0); + + WREG32(mmTPC7_QM_GLBL_CFG0, 0); + WREG32(mmTPC7_CMDQ_GLBL_CFG0, 0); +} + +/* + * goya_stop_internal_queues - Stop internal queues + * + * @hdev: pointer to hl_device structure + * + * Returns 0 on success + * + */ +static int goya_stop_internal_queues(struct hl_device *hdev) +{ + int rc, retval = 0; + + /* + * Each queue (QMAN) is a separate H/W logic. That means that each + * QMAN can be stopped independently and failure to stop one does NOT + * mandate we should not try to stop other QMANs + */ + + rc = goya_stop_queue(hdev, + mmMME_QM_GLBL_CFG1, + mmMME_QM_CP_STS, + mmMME_QM_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop MME QMAN\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmMME_CMDQ_GLBL_CFG1, + mmMME_CMDQ_CP_STS, + mmMME_CMDQ_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop MME CMDQ\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC0_QM_GLBL_CFG1, + mmTPC0_QM_CP_STS, + mmTPC0_QM_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 0 QMAN\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC0_CMDQ_GLBL_CFG1, + mmTPC0_CMDQ_CP_STS, + mmTPC0_CMDQ_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 0 CMDQ\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC1_QM_GLBL_CFG1, + mmTPC1_QM_CP_STS, + mmTPC1_QM_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 1 QMAN\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC1_CMDQ_GLBL_CFG1, + mmTPC1_CMDQ_CP_STS, + mmTPC1_CMDQ_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 1 CMDQ\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC2_QM_GLBL_CFG1, + mmTPC2_QM_CP_STS, + mmTPC2_QM_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 2 QMAN\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC2_CMDQ_GLBL_CFG1, + mmTPC2_CMDQ_CP_STS, + mmTPC2_CMDQ_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 2 CMDQ\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC3_QM_GLBL_CFG1, + mmTPC3_QM_CP_STS, + mmTPC3_QM_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 3 QMAN\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC3_CMDQ_GLBL_CFG1, + mmTPC3_CMDQ_CP_STS, + mmTPC3_CMDQ_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 3 CMDQ\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC4_QM_GLBL_CFG1, + mmTPC4_QM_CP_STS, + mmTPC4_QM_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 4 QMAN\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC4_CMDQ_GLBL_CFG1, + mmTPC4_CMDQ_CP_STS, + mmTPC4_CMDQ_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 4 CMDQ\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC5_QM_GLBL_CFG1, + mmTPC5_QM_CP_STS, + mmTPC5_QM_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 5 QMAN\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC5_CMDQ_GLBL_CFG1, + mmTPC5_CMDQ_CP_STS, + mmTPC5_CMDQ_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 5 CMDQ\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC6_QM_GLBL_CFG1, + mmTPC6_QM_CP_STS, + mmTPC6_QM_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 6 QMAN\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC6_CMDQ_GLBL_CFG1, + mmTPC6_CMDQ_CP_STS, + mmTPC6_CMDQ_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 6 CMDQ\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC7_QM_GLBL_CFG1, + mmTPC7_QM_CP_STS, + mmTPC7_QM_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 7 QMAN\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC7_CMDQ_GLBL_CFG1, + mmTPC7_CMDQ_CP_STS, + mmTPC7_CMDQ_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 7 CMDQ\n"); + retval = -EIO; + } + + return retval; +} + +static void goya_resume_internal_queues(struct hl_device *hdev) +{ + WREG32(mmMME_QM_GLBL_CFG1, 0); + WREG32(mmMME_CMDQ_GLBL_CFG1, 0); + + WREG32(mmTPC0_QM_GLBL_CFG1, 0); + WREG32(mmTPC0_CMDQ_GLBL_CFG1, 0); + + WREG32(mmTPC1_QM_GLBL_CFG1, 0); + WREG32(mmTPC1_CMDQ_GLBL_CFG1, 0); + + WREG32(mmTPC2_QM_GLBL_CFG1, 0); + WREG32(mmTPC2_CMDQ_GLBL_CFG1, 0); + + WREG32(mmTPC3_QM_GLBL_CFG1, 0); + WREG32(mmTPC3_CMDQ_GLBL_CFG1, 0); + + WREG32(mmTPC4_QM_GLBL_CFG1, 0); + WREG32(mmTPC4_CMDQ_GLBL_CFG1, 0); + + WREG32(mmTPC5_QM_GLBL_CFG1, 0); + WREG32(mmTPC5_CMDQ_GLBL_CFG1, 0); + + WREG32(mmTPC6_QM_GLBL_CFG1, 0); + WREG32(mmTPC6_CMDQ_GLBL_CFG1, 0); + + WREG32(mmTPC7_QM_GLBL_CFG1, 0); + WREG32(mmTPC7_CMDQ_GLBL_CFG1, 0); +} + +static void goya_dma_stall(struct hl_device *hdev) +{ + WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT); + WREG32(mmDMA_QM_1_GLBL_CFG1, 1 << DMA_QM_1_GLBL_CFG1_DMA_STOP_SHIFT); + WREG32(mmDMA_QM_2_GLBL_CFG1, 1 << DMA_QM_2_GLBL_CFG1_DMA_STOP_SHIFT); + WREG32(mmDMA_QM_3_GLBL_CFG1, 1 << DMA_QM_3_GLBL_CFG1_DMA_STOP_SHIFT); + WREG32(mmDMA_QM_4_GLBL_CFG1, 1 << DMA_QM_4_GLBL_CFG1_DMA_STOP_SHIFT); +} + +static void goya_tpc_stall(struct hl_device *hdev) +{ + WREG32(mmTPC0_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT); + WREG32(mmTPC1_CFG_TPC_STALL, 1 << TPC1_CFG_TPC_STALL_V_SHIFT); + WREG32(mmTPC2_CFG_TPC_STALL, 1 << TPC2_CFG_TPC_STALL_V_SHIFT); + WREG32(mmTPC3_CFG_TPC_STALL, 1 << TPC3_CFG_TPC_STALL_V_SHIFT); + WREG32(mmTPC4_CFG_TPC_STALL, 1 << TPC4_CFG_TPC_STALL_V_SHIFT); + WREG32(mmTPC5_CFG_TPC_STALL, 1 << TPC5_CFG_TPC_STALL_V_SHIFT); + WREG32(mmTPC6_CFG_TPC_STALL, 1 << TPC6_CFG_TPC_STALL_V_SHIFT); + WREG32(mmTPC7_CFG_TPC_STALL, 1 << TPC7_CFG_TPC_STALL_V_SHIFT); +} + +static void goya_mme_stall(struct hl_device *hdev) +{ + WREG32(mmMME_STALL, 0xFFFFFFFF); +} + +static int goya_enable_msix(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + int cq_cnt = hdev->asic_prop.completion_queues_count; + int rc, i, irq_cnt_init, irq; + + if (goya->hw_cap_initialized & HW_CAP_MSIX) + return 0; + + rc = pci_alloc_irq_vectors(hdev->pdev, GOYA_MSIX_ENTRIES, + GOYA_MSIX_ENTRIES, PCI_IRQ_MSIX); + if (rc < 0) { + dev_err(hdev->dev, + "MSI-X: Failed to enable support -- %d/%d\n", + GOYA_MSIX_ENTRIES, rc); + return rc; + } + + for (i = 0, irq_cnt_init = 0 ; i < cq_cnt ; i++, irq_cnt_init++) { + irq = pci_irq_vector(hdev->pdev, i); + rc = request_irq(irq, hl_irq_handler_cq, 0, goya_irq_name[i], + &hdev->completion_queue[i]); + if (rc) { + dev_err(hdev->dev, "Failed to request IRQ %d", irq); + goto free_irqs; + } + } + + irq = pci_irq_vector(hdev->pdev, EVENT_QUEUE_MSIX_IDX); + + rc = request_irq(irq, hl_irq_handler_eq, 0, + goya_irq_name[EVENT_QUEUE_MSIX_IDX], + &hdev->event_queue); + if (rc) { + dev_err(hdev->dev, "Failed to request IRQ %d", irq); + goto free_irqs; + } + + goya->hw_cap_initialized |= HW_CAP_MSIX; + return 0; + +free_irqs: + for (i = 0 ; i < irq_cnt_init ; i++) + free_irq(pci_irq_vector(hdev->pdev, i), + &hdev->completion_queue[i]); + + pci_free_irq_vectors(hdev->pdev); + return rc; +} + +static void goya_sync_irqs(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + int i; + + if (!(goya->hw_cap_initialized & HW_CAP_MSIX)) + return; + + /* Wait for all pending IRQs to be finished */ + for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) + synchronize_irq(pci_irq_vector(hdev->pdev, i)); + + synchronize_irq(pci_irq_vector(hdev->pdev, EVENT_QUEUE_MSIX_IDX)); +} + +static void goya_disable_msix(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + int i, irq; + + if (!(goya->hw_cap_initialized & HW_CAP_MSIX)) + return; + + goya_sync_irqs(hdev); + + irq = pci_irq_vector(hdev->pdev, EVENT_QUEUE_MSIX_IDX); + free_irq(irq, &hdev->event_queue); + + for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) { + irq = pci_irq_vector(hdev->pdev, i); + free_irq(irq, &hdev->completion_queue[i]); + } + + pci_free_irq_vectors(hdev->pdev); + + goya->hw_cap_initialized &= ~HW_CAP_MSIX; +} + +static void goya_halt_engines(struct hl_device *hdev, bool hard_reset) +{ + u32 wait_timeout_ms, cpu_timeout_ms; + + dev_info(hdev->dev, + "Halting compute engines and disabling interrupts\n"); + + if (hdev->pldm) { + wait_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC; + cpu_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC; + } else { + wait_timeout_ms = GOYA_RESET_WAIT_MSEC; + cpu_timeout_ms = GOYA_CPU_RESET_WAIT_MSEC; + } + + if (hard_reset) { + /* + * I don't know what is the state of the CPU so make sure it is + * stopped in any means necessary + */ + WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_GOTO_WFE); + WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, + GOYA_ASYNC_EVENT_ID_HALT_MACHINE); + msleep(cpu_timeout_ms); + } + + goya_stop_external_queues(hdev); + goya_stop_internal_queues(hdev); + + msleep(wait_timeout_ms); + + goya_dma_stall(hdev); + goya_tpc_stall(hdev); + goya_mme_stall(hdev); + + msleep(wait_timeout_ms); + + goya_disable_external_queues(hdev); + goya_disable_internal_queues(hdev); + + if (hard_reset) + goya_disable_msix(hdev); + else + goya_sync_irqs(hdev); +} + +/* + * goya_push_fw_to_device - Push FW code to device + * + * @hdev: pointer to hl_device structure + * + * Copy fw code from firmware file to device memory. + * Returns 0 on success + * + */ +static int goya_push_fw_to_device(struct hl_device *hdev, const char *fw_name, + void __iomem *dst) +{ + const struct firmware *fw; + const u64 *fw_data; + size_t fw_size, i; + int rc; + + rc = request_firmware(&fw, fw_name, hdev->dev); + + if (rc) { + dev_err(hdev->dev, "Failed to request %s\n", fw_name); + goto out; + } + + fw_size = fw->size; + if ((fw_size % 4) != 0) { + dev_err(hdev->dev, "illegal %s firmware size %zu\n", + fw_name, fw_size); + rc = -EINVAL; + goto out; + } + + dev_dbg(hdev->dev, "%s firmware size == %zu\n", fw_name, fw_size); + + fw_data = (const u64 *) fw->data; + + if ((fw->size % 8) != 0) + fw_size -= 8; + + for (i = 0 ; i < fw_size ; i += 8, fw_data++, dst += 8) { + if (!(i & (0x80000 - 1))) { + dev_dbg(hdev->dev, + "copied so far %zu out of %zu for %s firmware", + i, fw_size, fw_name); + usleep_range(20, 100); + } + + writeq(*fw_data, dst); + } + + if ((fw->size % 8) != 0) + writel(*(const u32 *) fw_data, dst); + +out: + release_firmware(fw); + return rc; +} + +static int goya_pldm_init_cpu(struct hl_device *hdev) +{ + char fw_name[200]; + void __iomem *dst; + u32 val, unit_rst_val; + int rc; + + /* Must initialize SRAM scrambler before pushing u-boot to SRAM */ + goya_init_golden_registers(hdev); + + /* Put ARM cores into reset */ + WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL, CPU_RESET_ASSERT); + val = RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL); + + /* Reset the CA53 MACRO */ + unit_rst_val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N); + WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, CA53_RESET); + val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N); + WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, unit_rst_val); + val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N); + + snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-u-boot.bin"); + dst = hdev->pcie_bar[SRAM_CFG_BAR_ID] + UBOOT_FW_OFFSET; + rc = goya_push_fw_to_device(hdev, fw_name, dst); + if (rc) + return rc; + + snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-fit.itb"); + dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET; + rc = goya_push_fw_to_device(hdev, fw_name, dst); + if (rc) + return rc; + + WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_FIT_RDY); + WREG32(mmPSOC_GLOBAL_CONF_WARM_REBOOT, CPU_BOOT_STATUS_NA); + + WREG32(mmCPU_CA53_CFG_RST_ADDR_LSB_0, + lower_32_bits(SRAM_BASE_ADDR + UBOOT_FW_OFFSET)); + WREG32(mmCPU_CA53_CFG_RST_ADDR_MSB_0, + upper_32_bits(SRAM_BASE_ADDR + UBOOT_FW_OFFSET)); + + /* Release ARM core 0 from reset */ + WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL, + CPU_RESET_CORE0_DEASSERT); + val = RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL); + + return 0; +} + +/* + * FW component passes an offset from SRAM_BASE_ADDR in SCRATCHPAD_xx. + * The version string should be located by that offset. + */ +static void goya_read_device_fw_version(struct hl_device *hdev, + enum goya_fw_component fwc) +{ + const char *name; + u32 ver_off; + char *dest; + + switch (fwc) { + case FW_COMP_UBOOT: + ver_off = RREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_29); + dest = hdev->asic_prop.uboot_ver; + name = "U-Boot"; + break; + case FW_COMP_PREBOOT: + ver_off = RREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_28); + dest = hdev->asic_prop.preboot_ver; + name = "Preboot"; + break; + default: + dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc); + return; + } + + ver_off &= ~((u32)SRAM_BASE_ADDR); + + if (ver_off < SRAM_SIZE - VERSION_MAX_LEN) { + memcpy_fromio(dest, hdev->pcie_bar[SRAM_CFG_BAR_ID] + ver_off, + VERSION_MAX_LEN); + } else { + dev_err(hdev->dev, "%s version offset (0x%x) is above SRAM\n", + name, ver_off); + strcpy(dest, "unavailable"); + } +} + +static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout) +{ + struct goya_device *goya = hdev->asic_specific; + char fw_name[200]; + void __iomem *dst; + u32 status; + int rc; + + if (!hdev->cpu_enable) + return 0; + + if (goya->hw_cap_initialized & HW_CAP_CPU) + return 0; + + /* + * Before pushing u-boot/linux to device, need to set the ddr bar to + * base address of dram + */ + rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE); + if (rc) { + dev_err(hdev->dev, + "failed to map DDR bar to DRAM base address\n"); + return rc; + } + + if (hdev->pldm) { + rc = goya_pldm_init_cpu(hdev); + if (rc) + return rc; + + goto out; + } + + /* Make sure CPU boot-loader is running */ + rc = hl_poll_timeout( + hdev, + mmPSOC_GLOBAL_CONF_WARM_REBOOT, + status, + (status == CPU_BOOT_STATUS_DRAM_RDY) || + (status == CPU_BOOT_STATUS_SRAM_AVAIL), + 10000, + cpu_timeout); + + if (rc) { + dev_err(hdev->dev, "Error in ARM u-boot!"); + switch (status) { + case CPU_BOOT_STATUS_NA: + dev_err(hdev->dev, + "ARM status %d - BTL did NOT run\n", status); + break; + case CPU_BOOT_STATUS_IN_WFE: + dev_err(hdev->dev, + "ARM status %d - Inside WFE loop\n", status); + break; + case CPU_BOOT_STATUS_IN_BTL: + dev_err(hdev->dev, + "ARM status %d - Stuck in BTL\n", status); + break; + case CPU_BOOT_STATUS_IN_PREBOOT: + dev_err(hdev->dev, + "ARM status %d - Stuck in Preboot\n", status); + break; + case CPU_BOOT_STATUS_IN_SPL: + dev_err(hdev->dev, + "ARM status %d - Stuck in SPL\n", status); + break; + case CPU_BOOT_STATUS_IN_UBOOT: + dev_err(hdev->dev, + "ARM status %d - Stuck in u-boot\n", status); + break; + case CPU_BOOT_STATUS_DRAM_INIT_FAIL: + dev_err(hdev->dev, + "ARM status %d - DDR initialization failed\n", + status); + break; + default: + dev_err(hdev->dev, + "ARM status %d - Invalid status code\n", + status); + break; + } + return -EIO; + } + + /* Read U-Boot version now in case we will later fail */ + goya_read_device_fw_version(hdev, FW_COMP_UBOOT); + goya_read_device_fw_version(hdev, FW_COMP_PREBOOT); + + if (status == CPU_BOOT_STATUS_SRAM_AVAIL) + goto out; + + if (!hdev->fw_loading) { + dev_info(hdev->dev, "Skip loading FW\n"); + goto out; + } + + snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-fit.itb"); + dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET; + rc = goya_push_fw_to_device(hdev, fw_name, dst); + if (rc) + return rc; + + WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_FIT_RDY); + + rc = hl_poll_timeout( + hdev, + mmPSOC_GLOBAL_CONF_WARM_REBOOT, + status, + (status == CPU_BOOT_STATUS_SRAM_AVAIL), + 10000, + cpu_timeout); + + if (rc) { + if (status == CPU_BOOT_STATUS_FIT_CORRUPTED) + dev_err(hdev->dev, + "ARM u-boot reports FIT image is corrupted\n"); + else + dev_err(hdev->dev, + "ARM Linux failed to load, %d\n", status); + WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_NA); + return -EIO; + } + + dev_info(hdev->dev, "Successfully loaded firmware to device\n"); + +out: + goya->hw_cap_initialized |= HW_CAP_CPU; + + return 0; +} + +static int goya_mmu_init(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct goya_device *goya = hdev->asic_specific; + u64 hop0_addr; + int rc, i; + + if (!hdev->mmu_enable) + return 0; + + if (goya->hw_cap_initialized & HW_CAP_MMU) + return 0; + + hdev->dram_supports_virtual_memory = true; + hdev->dram_default_page_mapping = true; + + for (i = 0 ; i < prop->max_asid ; i++) { + hop0_addr = prop->mmu_pgt_addr + + (i * prop->mmu_hop_table_size); + + rc = goya_mmu_update_asid_hop0_addr(hdev, i, hop0_addr); + if (rc) { + dev_err(hdev->dev, + "failed to set hop0 addr for asid %d\n", i); + goto err; + } + } + + goya->hw_cap_initialized |= HW_CAP_MMU; + + /* init MMU cache manage page */ + WREG32(mmSTLB_CACHE_INV_BASE_39_8, + lower_32_bits(MMU_CACHE_MNG_ADDR >> 8)); + WREG32(mmSTLB_CACHE_INV_BASE_49_40, MMU_CACHE_MNG_ADDR >> 40); + + /* Remove follower feature due to performance bug */ + WREG32_AND(mmSTLB_STLB_FEATURE_EN, + (~STLB_STLB_FEATURE_EN_FOLLOWER_EN_MASK)); + + hdev->asic_funcs->mmu_invalidate_cache(hdev, true); + + WREG32(mmMMU_MMU_ENABLE, 1); + WREG32(mmMMU_SPI_MASK, 0xF); + + return 0; + +err: + return rc; +} + +/* + * goya_hw_init - Goya hardware initialization code + * + * @hdev: pointer to hl_device structure + * + * Returns 0 on success + * + */ +static int goya_hw_init(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + u32 val; + int rc; + + dev_info(hdev->dev, "Starting initialization of H/W\n"); + + /* Perform read from the device to make sure device is up */ + val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG); + + /* + * Let's mark in the H/W that we have reached this point. We check + * this value in the reset_before_init function to understand whether + * we need to reset the chip before doing H/W init. This register is + * cleared by the H/W upon H/W reset + */ + WREG32(mmPSOC_GLOBAL_CONF_APP_STATUS, HL_DEVICE_HW_STATE_DIRTY); + + rc = goya_init_cpu(hdev, GOYA_CPU_TIMEOUT_USEC); + if (rc) { + dev_err(hdev->dev, "failed to initialize CPU\n"); + return rc; + } + + goya_tpc_mbist_workaround(hdev); + + goya_init_golden_registers(hdev); + + /* + * After CPU initialization is finished, change DDR bar mapping inside + * iATU to point to the start address of the MMU page tables + */ + rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE + + (MMU_PAGE_TABLES_ADDR & ~(prop->dram_pci_bar_size - 0x1ull))); + if (rc) { + dev_err(hdev->dev, + "failed to map DDR bar to MMU page tables\n"); + return rc; + } + + rc = goya_mmu_init(hdev); + if (rc) + return rc; + + goya_init_security(hdev); + + goya_init_dma_qmans(hdev); + + goya_init_mme_qmans(hdev); + + goya_init_tpc_qmans(hdev); + + /* MSI-X must be enabled before CPU queues are initialized */ + rc = goya_enable_msix(hdev); + if (rc) + goto disable_queues; + + rc = goya_init_cpu_queues(hdev); + if (rc) { + dev_err(hdev->dev, "failed to initialize CPU H/W queues %d\n", + rc); + goto disable_msix; + } + + /* CPU initialization is finished, we can now move to 48 bit DMA mask */ + rc = pci_set_dma_mask(hdev->pdev, DMA_BIT_MASK(48)); + if (rc) { + dev_warn(hdev->dev, "Unable to set pci dma mask to 48 bits\n"); + rc = pci_set_dma_mask(hdev->pdev, DMA_BIT_MASK(32)); + if (rc) { + dev_err(hdev->dev, + "Unable to set pci dma mask to 32 bits\n"); + goto disable_pci_access; + } + } + + rc = pci_set_consistent_dma_mask(hdev->pdev, DMA_BIT_MASK(48)); + if (rc) { + dev_warn(hdev->dev, + "Unable to set pci consistent dma mask to 48 bits\n"); + rc = pci_set_consistent_dma_mask(hdev->pdev, DMA_BIT_MASK(32)); + if (rc) { + dev_err(hdev->dev, + "Unable to set pci consistent dma mask to 32 bits\n"); + goto disable_pci_access; + } + } + + /* Perform read from the device to flush all MSI-X configuration */ + val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG); + + return 0; + +disable_pci_access: + goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS); +disable_msix: + goya_disable_msix(hdev); +disable_queues: + goya_disable_internal_queues(hdev); + goya_disable_external_queues(hdev); + + return rc; +} + +/* + * goya_hw_fini - Goya hardware tear-down code + * + * @hdev: pointer to hl_device structure + * @hard_reset: should we do hard reset to all engines or just reset the + * compute/dma engines + */ +static void goya_hw_fini(struct hl_device *hdev, bool hard_reset) +{ + struct goya_device *goya = hdev->asic_specific; + u32 reset_timeout_ms, status; + + if (hdev->pldm) + reset_timeout_ms = GOYA_PLDM_RESET_TIMEOUT_MSEC; + else + reset_timeout_ms = GOYA_RESET_TIMEOUT_MSEC; + + if (hard_reset) { + goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE); + goya_disable_clk_rlx(hdev); + goya_set_pll_refclk(hdev); + + WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, RESET_ALL); + dev_info(hdev->dev, + "Issued HARD reset command, going to wait %dms\n", + reset_timeout_ms); + } else { + WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, DMA_MME_TPC_RESET); + dev_info(hdev->dev, + "Issued SOFT reset command, going to wait %dms\n", + reset_timeout_ms); + } + + /* + * After hard reset, we can't poll the BTM_FSM register because the PSOC + * itself is in reset. In either reset we need to wait until the reset + * is deasserted + */ + msleep(reset_timeout_ms); + + status = RREG32(mmPSOC_GLOBAL_CONF_BTM_FSM); + if (status & PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK) + dev_err(hdev->dev, + "Timeout while waiting for device to reset 0x%x\n", + status); + + if (!hard_reset) { + goya->hw_cap_initialized &= ~(HW_CAP_DMA | HW_CAP_MME | + HW_CAP_GOLDEN | HW_CAP_TPC); + WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, + GOYA_ASYNC_EVENT_ID_SOFT_RESET); + return; + } + + /* Chicken bit to re-initiate boot sequencer flow */ + WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START, + 1 << PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_IND_SHIFT); + /* Move boot manager FSM to pre boot sequencer init state */ + WREG32(mmPSOC_GLOBAL_CONF_SW_BTM_FSM, + 0xA << PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_SHIFT); + + goya->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q | + HW_CAP_DDR_0 | HW_CAP_DDR_1 | + HW_CAP_DMA | HW_CAP_MME | + HW_CAP_MMU | HW_CAP_TPC_MBIST | + HW_CAP_GOLDEN | HW_CAP_TPC); + memset(goya->events_stat, 0, sizeof(goya->events_stat)); + + if (!hdev->pldm) { + int rc; + /* In case we are running inside VM and the VM is + * shutting down, we need to make sure CPU boot-loader + * is running before we can continue the VM shutdown. + * That is because the VM will send an FLR signal that + * we must answer + */ + dev_info(hdev->dev, + "Going to wait up to %ds for CPU boot loader\n", + GOYA_CPU_TIMEOUT_USEC / 1000 / 1000); + + rc = hl_poll_timeout( + hdev, + mmPSOC_GLOBAL_CONF_WARM_REBOOT, + status, + (status == CPU_BOOT_STATUS_DRAM_RDY), + 10000, + GOYA_CPU_TIMEOUT_USEC); + if (rc) + dev_err(hdev->dev, + "failed to wait for CPU boot loader\n"); + } +} + +int goya_suspend(struct hl_device *hdev) +{ + int rc; + + rc = goya_stop_internal_queues(hdev); + + if (rc) { + dev_err(hdev->dev, "failed to stop internal queues\n"); + return rc; + } + + rc = goya_stop_external_queues(hdev); + + if (rc) { + dev_err(hdev->dev, "failed to stop external queues\n"); + return rc; + } + + rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS); + if (rc) + dev_err(hdev->dev, "Failed to disable PCI access from CPU\n"); + + return rc; +} + +int goya_resume(struct hl_device *hdev) +{ + int rc; + + goya_resume_external_queues(hdev); + goya_resume_internal_queues(hdev); + + rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS); + if (rc) + dev_err(hdev->dev, "Failed to enable PCI access from CPU\n"); + return rc; +} + +static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma, + u64 kaddress, phys_addr_t paddress, u32 size) +{ + int rc; + + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | + VM_DONTCOPY | VM_NORESERVE; + + rc = remap_pfn_range(vma, vma->vm_start, paddress >> PAGE_SHIFT, + size, vma->vm_page_prot); + if (rc) + dev_err(hdev->dev, "remap_pfn_range error %d", rc); + + return rc; +} + +static void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi) +{ + u32 db_reg_offset, db_value; + bool invalid_queue = false; + + switch (hw_queue_id) { + case GOYA_QUEUE_ID_DMA_0: + db_reg_offset = mmDMA_QM_0_PQ_PI; + break; + + case GOYA_QUEUE_ID_DMA_1: + db_reg_offset = mmDMA_QM_1_PQ_PI; + break; + + case GOYA_QUEUE_ID_DMA_2: + db_reg_offset = mmDMA_QM_2_PQ_PI; + break; + + case GOYA_QUEUE_ID_DMA_3: + db_reg_offset = mmDMA_QM_3_PQ_PI; + break; + + case GOYA_QUEUE_ID_DMA_4: + db_reg_offset = mmDMA_QM_4_PQ_PI; + break; + + case GOYA_QUEUE_ID_CPU_PQ: + if (hdev->cpu_queues_enable) + db_reg_offset = mmCPU_IF_PF_PQ_PI; + else + invalid_queue = true; + break; + + case GOYA_QUEUE_ID_MME: + db_reg_offset = mmMME_QM_PQ_PI; + break; + + case GOYA_QUEUE_ID_TPC0: + db_reg_offset = mmTPC0_QM_PQ_PI; + break; + + case GOYA_QUEUE_ID_TPC1: + db_reg_offset = mmTPC1_QM_PQ_PI; + break; + + case GOYA_QUEUE_ID_TPC2: + db_reg_offset = mmTPC2_QM_PQ_PI; + break; + + case GOYA_QUEUE_ID_TPC3: + db_reg_offset = mmTPC3_QM_PQ_PI; + break; + + case GOYA_QUEUE_ID_TPC4: + db_reg_offset = mmTPC4_QM_PQ_PI; + break; + + case GOYA_QUEUE_ID_TPC5: + db_reg_offset = mmTPC5_QM_PQ_PI; + break; + + case GOYA_QUEUE_ID_TPC6: + db_reg_offset = mmTPC6_QM_PQ_PI; + break; + + case GOYA_QUEUE_ID_TPC7: + db_reg_offset = mmTPC7_QM_PQ_PI; + break; + + default: + invalid_queue = true; + } + + if (invalid_queue) { + /* Should never get here */ + dev_err(hdev->dev, "h/w queue %d is invalid. Can't set pi\n", + hw_queue_id); + return; + } + + db_value = pi; + + /* ring the doorbell */ + WREG32(db_reg_offset, db_value); + + if (hw_queue_id == GOYA_QUEUE_ID_CPU_PQ) + WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, + GOYA_ASYNC_EVENT_ID_PI_UPDATE); +} + +void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val) +{ + /* Not needed in Goya */ +} + +static void *goya_dma_alloc_coherent(struct hl_device *hdev, size_t size, + dma_addr_t *dma_handle, gfp_t flags) +{ + return dma_alloc_coherent(&hdev->pdev->dev, size, dma_handle, flags); +} + +static void goya_dma_free_coherent(struct hl_device *hdev, size_t size, + void *cpu_addr, dma_addr_t dma_handle) +{ + dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, dma_handle); +} + +void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id, + dma_addr_t *dma_handle, u16 *queue_len) +{ + void *base; + u32 offset; + + *dma_handle = hdev->asic_prop.sram_base_address; + + base = hdev->pcie_bar[SRAM_CFG_BAR_ID]; + + switch (queue_id) { + case GOYA_QUEUE_ID_MME: + offset = MME_QMAN_BASE_OFFSET; + *queue_len = MME_QMAN_LENGTH; + break; + case GOYA_QUEUE_ID_TPC0: + offset = TPC0_QMAN_BASE_OFFSET; + *queue_len = TPC_QMAN_LENGTH; + break; + case GOYA_QUEUE_ID_TPC1: + offset = TPC1_QMAN_BASE_OFFSET; + *queue_len = TPC_QMAN_LENGTH; + break; + case GOYA_QUEUE_ID_TPC2: + offset = TPC2_QMAN_BASE_OFFSET; + *queue_len = TPC_QMAN_LENGTH; + break; + case GOYA_QUEUE_ID_TPC3: + offset = TPC3_QMAN_BASE_OFFSET; + *queue_len = TPC_QMAN_LENGTH; + break; + case GOYA_QUEUE_ID_TPC4: + offset = TPC4_QMAN_BASE_OFFSET; + *queue_len = TPC_QMAN_LENGTH; + break; + case GOYA_QUEUE_ID_TPC5: + offset = TPC5_QMAN_BASE_OFFSET; + *queue_len = TPC_QMAN_LENGTH; + break; + case GOYA_QUEUE_ID_TPC6: + offset = TPC6_QMAN_BASE_OFFSET; + *queue_len = TPC_QMAN_LENGTH; + break; + case GOYA_QUEUE_ID_TPC7: + offset = TPC7_QMAN_BASE_OFFSET; + *queue_len = TPC_QMAN_LENGTH; + break; + default: + dev_err(hdev->dev, "Got invalid queue id %d\n", queue_id); + return NULL; + } + + base += offset; + *dma_handle += offset; + + return base; +} + +static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job) +{ + struct goya_device *goya = hdev->asic_specific; + struct packet_msg_prot *fence_pkt; + u32 *fence_ptr; + dma_addr_t fence_dma_addr; + struct hl_cb *cb; + u32 tmp, timeout; + int rc; + + if (hdev->pldm) + timeout = GOYA_PLDM_QMAN0_TIMEOUT_USEC; + else + timeout = HL_DEVICE_TIMEOUT_USEC; + + if (!hdev->asic_funcs->is_device_idle(hdev)) { + dev_err_ratelimited(hdev->dev, + "Can't send KMD job on QMAN0 if device is not idle\n"); + return -EBUSY; + } + + fence_ptr = hdev->asic_funcs->dma_pool_zalloc(hdev, 4, GFP_KERNEL, + &fence_dma_addr); + if (!fence_ptr) { + dev_err(hdev->dev, + "Failed to allocate fence memory for QMAN0\n"); + return -ENOMEM; + } + + *fence_ptr = 0; + + if (goya->hw_cap_initialized & HW_CAP_MMU) { + WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_FULLY_TRUSTED); + RREG32(mmDMA_QM_0_GLBL_PROT); + } + + /* + * goya cs parser saves space for 2xpacket_msg_prot at end of CB. For + * synchronized kernel jobs we only need space for 1 packet_msg_prot + */ + job->job_cb_size -= sizeof(struct packet_msg_prot); + + cb = job->patched_cb; + + fence_pkt = (struct packet_msg_prot *) (uintptr_t) (cb->kernel_address + + job->job_cb_size - sizeof(struct packet_msg_prot)); + + tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) | + (1 << GOYA_PKT_CTL_EB_SHIFT) | + (1 << GOYA_PKT_CTL_MB_SHIFT); + fence_pkt->ctl = cpu_to_le32(tmp); + fence_pkt->value = cpu_to_le32(GOYA_QMAN0_FENCE_VAL); + fence_pkt->addr = cpu_to_le64(fence_dma_addr + + hdev->asic_prop.host_phys_base_address); + + rc = hl_hw_queue_send_cb_no_cmpl(hdev, GOYA_QUEUE_ID_DMA_0, + job->job_cb_size, cb->bus_address); + if (rc) { + dev_err(hdev->dev, "Failed to send CB on QMAN0, %d\n", rc); + goto free_fence_ptr; + } + + rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) fence_ptr, timeout, + &tmp); + + hl_hw_queue_inc_ci_kernel(hdev, GOYA_QUEUE_ID_DMA_0); + + if ((rc) || (tmp != GOYA_QMAN0_FENCE_VAL)) { + dev_err(hdev->dev, "QMAN0 Job hasn't finished in time\n"); + rc = -ETIMEDOUT; + } + +free_fence_ptr: + hdev->asic_funcs->dma_pool_free(hdev, (void *) fence_ptr, + fence_dma_addr); + + if (goya->hw_cap_initialized & HW_CAP_MMU) { + WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_PARTLY_TRUSTED); + RREG32(mmDMA_QM_0_GLBL_PROT); + } + + return rc; +} + +int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len, + u32 timeout, long *result) +{ + struct goya_device *goya = hdev->asic_specific; + struct armcp_packet *pkt; + dma_addr_t pkt_dma_addr; + u32 tmp; + int rc = 0; + + if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q)) { + if (result) + *result = 0; + return 0; + } + + if (len > CPU_CB_SIZE) { + dev_err(hdev->dev, "Invalid CPU message size of %d bytes\n", + len); + return -ENOMEM; + } + + pkt = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, len, + &pkt_dma_addr); + if (!pkt) { + dev_err(hdev->dev, + "Failed to allocate DMA memory for packet to CPU\n"); + return -ENOMEM; + } + + memcpy(pkt, msg, len); + + mutex_lock(&hdev->send_cpu_message_lock); + + if (hdev->disabled) + goto out; + + if (hdev->device_cpu_disabled) { + rc = -EIO; + goto out; + } + + rc = hl_hw_queue_send_cb_no_cmpl(hdev, GOYA_QUEUE_ID_CPU_PQ, len, + pkt_dma_addr); + if (rc) { + dev_err(hdev->dev, "Failed to send CB on CPU PQ (%d)\n", rc); + goto out; + } + + rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) &pkt->fence, + timeout, &tmp); + + hl_hw_queue_inc_ci_kernel(hdev, GOYA_QUEUE_ID_CPU_PQ); + + if (rc == -ETIMEDOUT) { + dev_err(hdev->dev, "Timeout while waiting for device CPU\n"); + hdev->device_cpu_disabled = true; + goto out; + } + + if (tmp == ARMCP_PACKET_FENCE_VAL) { + u32 ctl = le32_to_cpu(pkt->ctl); + + rc = (ctl & ARMCP_PKT_CTL_RC_MASK) >> ARMCP_PKT_CTL_RC_SHIFT; + if (rc) { + dev_err(hdev->dev, + "F/W ERROR %d for CPU packet %d\n", + rc, (ctl & ARMCP_PKT_CTL_OPCODE_MASK) + >> ARMCP_PKT_CTL_OPCODE_SHIFT); + rc = -EINVAL; + } else if (result) { + *result = (long) le64_to_cpu(pkt->result); + } + } else { + dev_err(hdev->dev, "CPU packet wrong fence value\n"); + rc = -EINVAL; + } + +out: + mutex_unlock(&hdev->send_cpu_message_lock); + + hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, len, pkt); + + return rc; +} + +int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id) +{ + struct packet_msg_prot *fence_pkt; + dma_addr_t pkt_dma_addr; + u32 fence_val, tmp; + dma_addr_t fence_dma_addr; + u32 *fence_ptr; + int rc; + + fence_val = GOYA_QMAN0_FENCE_VAL; + + fence_ptr = hdev->asic_funcs->dma_pool_zalloc(hdev, 4, GFP_KERNEL, + &fence_dma_addr); + if (!fence_ptr) { + dev_err(hdev->dev, + "Failed to allocate memory for queue testing\n"); + return -ENOMEM; + } + + *fence_ptr = 0; + + fence_pkt = hdev->asic_funcs->dma_pool_zalloc(hdev, + sizeof(struct packet_msg_prot), + GFP_KERNEL, &pkt_dma_addr); + if (!fence_pkt) { + dev_err(hdev->dev, + "Failed to allocate packet for queue testing\n"); + rc = -ENOMEM; + goto free_fence_ptr; + } + + tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) | + (1 << GOYA_PKT_CTL_EB_SHIFT) | + (1 << GOYA_PKT_CTL_MB_SHIFT); + fence_pkt->ctl = cpu_to_le32(tmp); + fence_pkt->value = cpu_to_le32(fence_val); + fence_pkt->addr = cpu_to_le64(fence_dma_addr + + hdev->asic_prop.host_phys_base_address); + + rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, + sizeof(struct packet_msg_prot), + pkt_dma_addr); + if (rc) { + dev_err(hdev->dev, + "Failed to send fence packet\n"); + goto free_pkt; + } + + rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) fence_ptr, + GOYA_TEST_QUEUE_WAIT_USEC, &tmp); + + hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id); + + if ((!rc) && (tmp == fence_val)) { + dev_info(hdev->dev, + "queue test on H/W queue %d succeeded\n", + hw_queue_id); + } else { + dev_err(hdev->dev, + "H/W queue %d test failed (scratch(0x%08llX) == 0x%08X)\n", + hw_queue_id, (unsigned long long) fence_dma_addr, tmp); + rc = -EINVAL; + } + +free_pkt: + hdev->asic_funcs->dma_pool_free(hdev, (void *) fence_pkt, + pkt_dma_addr); +free_fence_ptr: + hdev->asic_funcs->dma_pool_free(hdev, (void *) fence_ptr, + fence_dma_addr); + return rc; +} + +int goya_test_cpu_queue(struct hl_device *hdev) +{ + struct armcp_packet test_pkt; + long result; + int rc; + + /* cpu_queues_enable flag is always checked in send cpu message */ + + memset(&test_pkt, 0, sizeof(test_pkt)); + + test_pkt.ctl = cpu_to_le32(ARMCP_PACKET_TEST << + ARMCP_PKT_CTL_OPCODE_SHIFT); + test_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &test_pkt, + sizeof(test_pkt), HL_DEVICE_TIMEOUT_USEC, &result); + + if (!rc) { + if (result == ARMCP_PACKET_FENCE_VAL) + dev_info(hdev->dev, + "queue test on CPU queue succeeded\n"); + else + dev_err(hdev->dev, + "CPU queue test failed (0x%08lX)\n", result); + } else { + dev_err(hdev->dev, "CPU queue test failed, error %d\n", rc); + } + + return rc; +} + +static int goya_test_queues(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + int i, rc, ret_val = 0; + + for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) { + rc = goya_test_queue(hdev, i); + if (rc) + ret_val = -EINVAL; + } + + if (hdev->cpu_queues_enable) { + rc = goya->test_cpu_queue(hdev); + if (rc) + ret_val = -EINVAL; + } + + return ret_val; +} + +static void *goya_dma_pool_zalloc(struct hl_device *hdev, size_t size, + gfp_t mem_flags, dma_addr_t *dma_handle) +{ + if (size > GOYA_DMA_POOL_BLK_SIZE) + return NULL; + + return dma_pool_zalloc(hdev->dma_pool, mem_flags, dma_handle); +} + +static void goya_dma_pool_free(struct hl_device *hdev, void *vaddr, + dma_addr_t dma_addr) +{ + dma_pool_free(hdev->dma_pool, vaddr, dma_addr); +} + +static void *goya_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, + size_t size, dma_addr_t *dma_handle) +{ + u64 kernel_addr; + + /* roundup to CPU_PKT_SIZE */ + size = (size + (CPU_PKT_SIZE - 1)) & CPU_PKT_MASK; + + kernel_addr = gen_pool_alloc(hdev->cpu_accessible_dma_pool, size); + + *dma_handle = hdev->cpu_accessible_dma_address + + (kernel_addr - (u64) (uintptr_t) hdev->cpu_accessible_dma_mem); + + return (void *) (uintptr_t) kernel_addr; +} + +static void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev, + size_t size, void *vaddr) +{ + /* roundup to CPU_PKT_SIZE */ + size = (size + (CPU_PKT_SIZE - 1)) & CPU_PKT_MASK; + + gen_pool_free(hdev->cpu_accessible_dma_pool, (u64) (uintptr_t) vaddr, + size); +} + +static int goya_dma_map_sg(struct hl_device *hdev, struct scatterlist *sg, + int nents, enum dma_data_direction dir) +{ + if (!dma_map_sg(&hdev->pdev->dev, sg, nents, dir)) + return -ENOMEM; + + return 0; +} + +static void goya_dma_unmap_sg(struct hl_device *hdev, struct scatterlist *sg, + int nents, enum dma_data_direction dir) +{ + dma_unmap_sg(&hdev->pdev->dev, sg, nents, dir); +} + +u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt) +{ + struct scatterlist *sg, *sg_next_iter; + u32 count, dma_desc_cnt; + u64 len, len_next; + dma_addr_t addr, addr_next; + + dma_desc_cnt = 0; + + for_each_sg(sgt->sgl, sg, sgt->nents, count) { + + len = sg_dma_len(sg); + addr = sg_dma_address(sg); + + if (len == 0) + break; + + while ((count + 1) < sgt->nents) { + sg_next_iter = sg_next(sg); + len_next = sg_dma_len(sg_next_iter); + addr_next = sg_dma_address(sg_next_iter); + + if (len_next == 0) + break; + + if ((addr + len == addr_next) && + (len + len_next <= DMA_MAX_TRANSFER_SIZE)) { + len += len_next; + count++; + sg = sg_next_iter; + } else { + break; + } + } + + dma_desc_cnt++; + } + + return dma_desc_cnt * sizeof(struct packet_lin_dma); +} + +static int goya_pin_memory_before_cs(struct hl_device *hdev, + struct hl_cs_parser *parser, + struct packet_lin_dma *user_dma_pkt, + u64 addr, enum dma_data_direction dir) +{ + struct hl_userptr *userptr; + int rc; + + if (hl_userptr_is_pinned(hdev, addr, le32_to_cpu(user_dma_pkt->tsize), + parser->job_userptr_list, &userptr)) + goto already_pinned; + + userptr = kzalloc(sizeof(*userptr), GFP_ATOMIC); + if (!userptr) + return -ENOMEM; + + rc = hl_pin_host_memory(hdev, addr, le32_to_cpu(user_dma_pkt->tsize), + userptr); + if (rc) + goto free_userptr; + + list_add_tail(&userptr->job_node, parser->job_userptr_list); + + rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl, + userptr->sgt->nents, dir); + if (rc) { + dev_err(hdev->dev, "failed to map sgt with DMA region\n"); + goto unpin_memory; + } + + userptr->dma_mapped = true; + userptr->dir = dir; + +already_pinned: + parser->patched_cb_size += + goya_get_dma_desc_list_size(hdev, userptr->sgt); + + return 0; + +unpin_memory: + hl_unpin_host_memory(hdev, userptr); +free_userptr: + kfree(userptr); + return rc; +} + +static int goya_validate_dma_pkt_host(struct hl_device *hdev, + struct hl_cs_parser *parser, + struct packet_lin_dma *user_dma_pkt) +{ + u64 device_memory_addr, addr; + enum dma_data_direction dir; + enum goya_dma_direction user_dir; + bool sram_addr = true; + bool skip_host_mem_pin = false; + bool user_memset; + u32 ctl; + int rc = 0; + + ctl = le32_to_cpu(user_dma_pkt->ctl); + + user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >> + GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT; + + user_memset = (ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >> + GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT; + + switch (user_dir) { + case DMA_HOST_TO_DRAM: + dev_dbg(hdev->dev, "DMA direction is HOST --> DRAM\n"); + dir = DMA_TO_DEVICE; + sram_addr = false; + addr = le64_to_cpu(user_dma_pkt->src_addr); + device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr); + if (user_memset) + skip_host_mem_pin = true; + break; + + case DMA_DRAM_TO_HOST: + dev_dbg(hdev->dev, "DMA direction is DRAM --> HOST\n"); + dir = DMA_FROM_DEVICE; + sram_addr = false; + addr = le64_to_cpu(user_dma_pkt->dst_addr); + device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr); + break; + + case DMA_HOST_TO_SRAM: + dev_dbg(hdev->dev, "DMA direction is HOST --> SRAM\n"); + dir = DMA_TO_DEVICE; + addr = le64_to_cpu(user_dma_pkt->src_addr); + device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr); + if (user_memset) + skip_host_mem_pin = true; + break; + + case DMA_SRAM_TO_HOST: + dev_dbg(hdev->dev, "DMA direction is SRAM --> HOST\n"); + dir = DMA_FROM_DEVICE; + addr = le64_to_cpu(user_dma_pkt->dst_addr); + device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr); + break; + default: + dev_err(hdev->dev, "DMA direction is undefined\n"); + return -EFAULT; + } + + if (parser->ctx_id != HL_KERNEL_ASID_ID) { + if (sram_addr) { + if (!hl_mem_area_inside_range(device_memory_addr, + le32_to_cpu(user_dma_pkt->tsize), + hdev->asic_prop.sram_user_base_address, + hdev->asic_prop.sram_end_address)) { + + dev_err(hdev->dev, + "SRAM address 0x%llx + 0x%x is invalid\n", + device_memory_addr, + user_dma_pkt->tsize); + return -EFAULT; + } + } else { + if (!hl_mem_area_inside_range(device_memory_addr, + le32_to_cpu(user_dma_pkt->tsize), + hdev->asic_prop.dram_user_base_address, + hdev->asic_prop.dram_end_address)) { + + dev_err(hdev->dev, + "DRAM address 0x%llx + 0x%x is invalid\n", + device_memory_addr, + user_dma_pkt->tsize); + return -EFAULT; + } + } + } + + if (skip_host_mem_pin) + parser->patched_cb_size += sizeof(*user_dma_pkt); + else { + if ((dir == DMA_TO_DEVICE) && + (parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1)) { + dev_err(hdev->dev, + "Can't DMA from host on queue other then 1\n"); + return -EFAULT; + } + + rc = goya_pin_memory_before_cs(hdev, parser, user_dma_pkt, + addr, dir); + } + + return rc; +} + +static int goya_validate_dma_pkt_no_host(struct hl_device *hdev, + struct hl_cs_parser *parser, + struct packet_lin_dma *user_dma_pkt) +{ + u64 sram_memory_addr, dram_memory_addr; + enum goya_dma_direction user_dir; + u32 ctl; + + ctl = le32_to_cpu(user_dma_pkt->ctl); + user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >> + GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT; + + if (user_dir == DMA_DRAM_TO_SRAM) { + dev_dbg(hdev->dev, "DMA direction is DRAM --> SRAM\n"); + dram_memory_addr = le64_to_cpu(user_dma_pkt->src_addr); + sram_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr); + } else { + dev_dbg(hdev->dev, "DMA direction is SRAM --> DRAM\n"); + sram_memory_addr = le64_to_cpu(user_dma_pkt->src_addr); + dram_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr); + } + + if (!hl_mem_area_inside_range(sram_memory_addr, + le32_to_cpu(user_dma_pkt->tsize), + hdev->asic_prop.sram_user_base_address, + hdev->asic_prop.sram_end_address)) { + dev_err(hdev->dev, "SRAM address 0x%llx + 0x%x is invalid\n", + sram_memory_addr, user_dma_pkt->tsize); + return -EFAULT; + } + + if (!hl_mem_area_inside_range(dram_memory_addr, + le32_to_cpu(user_dma_pkt->tsize), + hdev->asic_prop.dram_user_base_address, + hdev->asic_prop.dram_end_address)) { + dev_err(hdev->dev, "DRAM address 0x%llx + 0x%x is invalid\n", + dram_memory_addr, user_dma_pkt->tsize); + return -EFAULT; + } + + parser->patched_cb_size += sizeof(*user_dma_pkt); + + return 0; +} + +static int goya_validate_dma_pkt_no_mmu(struct hl_device *hdev, + struct hl_cs_parser *parser, + struct packet_lin_dma *user_dma_pkt) +{ + enum goya_dma_direction user_dir; + u32 ctl; + int rc; + + dev_dbg(hdev->dev, "DMA packet details:\n"); + dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr); + dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr); + dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize); + + ctl = le32_to_cpu(user_dma_pkt->ctl); + user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >> + GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT; + + /* + * Special handling for DMA with size 0. The H/W has a bug where + * this can cause the QMAN DMA to get stuck, so block it here. + */ + if (user_dma_pkt->tsize == 0) { + dev_err(hdev->dev, + "Got DMA with size 0, might reset the device\n"); + return -EINVAL; + } + + if ((user_dir == DMA_DRAM_TO_SRAM) || (user_dir == DMA_SRAM_TO_DRAM)) + rc = goya_validate_dma_pkt_no_host(hdev, parser, user_dma_pkt); + else + rc = goya_validate_dma_pkt_host(hdev, parser, user_dma_pkt); + + return rc; +} + +static int goya_validate_dma_pkt_mmu(struct hl_device *hdev, + struct hl_cs_parser *parser, + struct packet_lin_dma *user_dma_pkt) +{ + dev_dbg(hdev->dev, "DMA packet details:\n"); + dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr); + dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr); + dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize); + + /* + * WA for HW-23. + * We can't allow user to read from Host using QMANs other than 1. + */ + if (parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1 && + hl_mem_area_inside_range(le64_to_cpu(user_dma_pkt->src_addr), + le32_to_cpu(user_dma_pkt->tsize), + hdev->asic_prop.va_space_host_start_address, + hdev->asic_prop.va_space_host_end_address)) { + dev_err(hdev->dev, + "Can't DMA from host on queue other then 1\n"); + return -EFAULT; + } + + if (user_dma_pkt->tsize == 0) { + dev_err(hdev->dev, + "Got DMA with size 0, might reset the device\n"); + return -EINVAL; + } + + parser->patched_cb_size += sizeof(*user_dma_pkt); + + return 0; +} + +static int goya_validate_wreg32(struct hl_device *hdev, + struct hl_cs_parser *parser, + struct packet_wreg32 *wreg_pkt) +{ + struct goya_device *goya = hdev->asic_specific; + u32 sob_start_addr, sob_end_addr; + u16 reg_offset; + + reg_offset = le32_to_cpu(wreg_pkt->ctl) & + GOYA_PKT_WREG32_CTL_REG_OFFSET_MASK; + + dev_dbg(hdev->dev, "WREG32 packet details:\n"); + dev_dbg(hdev->dev, "reg_offset == 0x%x\n", reg_offset); + dev_dbg(hdev->dev, "value == 0x%x\n", wreg_pkt->value); + + if (reg_offset != (mmDMA_CH_0_WR_COMP_ADDR_LO & 0x1FFF)) { + dev_err(hdev->dev, "WREG32 packet with illegal address 0x%x\n", + reg_offset); + return -EPERM; + } + + /* + * With MMU, DMA channels are not secured, so it doesn't matter where + * the WR COMP will be written to because it will go out with + * non-secured property + */ + if (goya->hw_cap_initialized & HW_CAP_MMU) + return 0; + + sob_start_addr = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + sob_end_addr = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1023); + + if ((le32_to_cpu(wreg_pkt->value) < sob_start_addr) || + (le32_to_cpu(wreg_pkt->value) > sob_end_addr)) { + + dev_err(hdev->dev, "WREG32 packet with illegal value 0x%x\n", + wreg_pkt->value); + return -EPERM; + } + + return 0; +} + +static int goya_validate_cb(struct hl_device *hdev, + struct hl_cs_parser *parser, bool is_mmu) +{ + u32 cb_parsed_length = 0; + int rc = 0; + + parser->patched_cb_size = 0; + + /* cb_user_size is more than 0 so loop will always be executed */ + while (cb_parsed_length < parser->user_cb_size) { + enum packet_id pkt_id; + u16 pkt_size; + void *user_pkt; + + user_pkt = (void *) (uintptr_t) + (parser->user_cb->kernel_address + cb_parsed_length); + + pkt_id = (enum packet_id) (((*(u64 *) user_pkt) & + PACKET_HEADER_PACKET_ID_MASK) >> + PACKET_HEADER_PACKET_ID_SHIFT); + + pkt_size = goya_packet_sizes[pkt_id]; + cb_parsed_length += pkt_size; + if (cb_parsed_length > parser->user_cb_size) { + dev_err(hdev->dev, + "packet 0x%x is out of CB boundary\n", pkt_id); + rc = -EINVAL; + break; + } + + switch (pkt_id) { + case PACKET_WREG_32: + /* + * Although it is validated after copy in patch_cb(), + * need to validate here as well because patch_cb() is + * not called in MMU path while this function is called + */ + rc = goya_validate_wreg32(hdev, parser, user_pkt); + break; + + case PACKET_WREG_BULK: + dev_err(hdev->dev, + "User not allowed to use WREG_BULK\n"); + rc = -EPERM; + break; + + case PACKET_MSG_PROT: + dev_err(hdev->dev, + "User not allowed to use MSG_PROT\n"); + rc = -EPERM; + break; + + case PACKET_CP_DMA: + dev_err(hdev->dev, "User not allowed to use CP_DMA\n"); + rc = -EPERM; + break; + + case PACKET_STOP: + dev_err(hdev->dev, "User not allowed to use STOP\n"); + rc = -EPERM; + break; + + case PACKET_LIN_DMA: + if (is_mmu) + rc = goya_validate_dma_pkt_mmu(hdev, parser, + user_pkt); + else + rc = goya_validate_dma_pkt_no_mmu(hdev, parser, + user_pkt); + break; + + case PACKET_MSG_LONG: + case PACKET_MSG_SHORT: + case PACKET_FENCE: + case PACKET_NOP: + parser->patched_cb_size += pkt_size; + break; + + default: + dev_err(hdev->dev, "Invalid packet header 0x%x\n", + pkt_id); + rc = -EINVAL; + break; + } + + if (rc) + break; + } + + /* + * The new CB should have space at the end for two MSG_PROT packets: + * 1. A packet that will act as a completion packet + * 2. A packet that will generate MSI-X interrupt + */ + parser->patched_cb_size += sizeof(struct packet_msg_prot) * 2; + + return rc; +} + +static int goya_patch_dma_packet(struct hl_device *hdev, + struct hl_cs_parser *parser, + struct packet_lin_dma *user_dma_pkt, + struct packet_lin_dma *new_dma_pkt, + u32 *new_dma_pkt_size) +{ + struct hl_userptr *userptr; + struct scatterlist *sg, *sg_next_iter; + u32 count, dma_desc_cnt; + u64 len, len_next; + dma_addr_t dma_addr, dma_addr_next; + enum goya_dma_direction user_dir; + u64 device_memory_addr, addr; + enum dma_data_direction dir; + struct sg_table *sgt; + bool skip_host_mem_pin = false; + bool user_memset; + u32 user_rdcomp_mask, user_wrcomp_mask, ctl; + + ctl = le32_to_cpu(user_dma_pkt->ctl); + + user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >> + GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT; + + user_memset = (ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >> + GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT; + + if ((user_dir == DMA_DRAM_TO_SRAM) || (user_dir == DMA_SRAM_TO_DRAM) || + (user_dma_pkt->tsize == 0)) { + memcpy(new_dma_pkt, user_dma_pkt, sizeof(*new_dma_pkt)); + *new_dma_pkt_size = sizeof(*new_dma_pkt); + return 0; + } + + if ((user_dir == DMA_HOST_TO_DRAM) || (user_dir == DMA_HOST_TO_SRAM)) { + addr = le64_to_cpu(user_dma_pkt->src_addr); + device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr); + dir = DMA_TO_DEVICE; + if (user_memset) + skip_host_mem_pin = true; + } else { + addr = le64_to_cpu(user_dma_pkt->dst_addr); + device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr); + dir = DMA_FROM_DEVICE; + } + + if ((!skip_host_mem_pin) && + (hl_userptr_is_pinned(hdev, addr, + le32_to_cpu(user_dma_pkt->tsize), + parser->job_userptr_list, &userptr) == false)) { + dev_err(hdev->dev, "Userptr 0x%llx + 0x%x NOT mapped\n", + addr, user_dma_pkt->tsize); + return -EFAULT; + } + + if ((user_memset) && (dir == DMA_TO_DEVICE)) { + memcpy(new_dma_pkt, user_dma_pkt, sizeof(*user_dma_pkt)); + *new_dma_pkt_size = sizeof(*user_dma_pkt); + return 0; + } + + user_rdcomp_mask = ctl & GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK; + + user_wrcomp_mask = ctl & GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK; + + sgt = userptr->sgt; + dma_desc_cnt = 0; + + for_each_sg(sgt->sgl, sg, sgt->nents, count) { + len = sg_dma_len(sg); + dma_addr = sg_dma_address(sg); + + if (len == 0) + break; + + while ((count + 1) < sgt->nents) { + sg_next_iter = sg_next(sg); + len_next = sg_dma_len(sg_next_iter); + dma_addr_next = sg_dma_address(sg_next_iter); + + if (len_next == 0) + break; + + if ((dma_addr + len == dma_addr_next) && + (len + len_next <= DMA_MAX_TRANSFER_SIZE)) { + len += len_next; + count++; + sg = sg_next_iter; + } else { + break; + } + } + + ctl = le32_to_cpu(user_dma_pkt->ctl); + if (likely(dma_desc_cnt)) + ctl &= ~GOYA_PKT_CTL_EB_MASK; + ctl &= ~(GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK | + GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK); + new_dma_pkt->ctl = cpu_to_le32(ctl); + new_dma_pkt->tsize = cpu_to_le32((u32) len); + + dma_addr += hdev->asic_prop.host_phys_base_address; + + if (dir == DMA_TO_DEVICE) { + new_dma_pkt->src_addr = cpu_to_le64(dma_addr); + new_dma_pkt->dst_addr = cpu_to_le64(device_memory_addr); + } else { + new_dma_pkt->src_addr = cpu_to_le64(device_memory_addr); + new_dma_pkt->dst_addr = cpu_to_le64(dma_addr); + } + + if (!user_memset) + device_memory_addr += len; + dma_desc_cnt++; + new_dma_pkt++; + } + + if (!dma_desc_cnt) { + dev_err(hdev->dev, + "Error of 0 SG entries when patching DMA packet\n"); + return -EFAULT; + } + + /* Fix the last dma packet - rdcomp/wrcomp must be as user set them */ + new_dma_pkt--; + new_dma_pkt->ctl |= cpu_to_le32(user_rdcomp_mask | user_wrcomp_mask); + + *new_dma_pkt_size = dma_desc_cnt * sizeof(struct packet_lin_dma); + + return 0; +} + +static int goya_patch_cb(struct hl_device *hdev, + struct hl_cs_parser *parser) +{ + u32 cb_parsed_length = 0; + u32 cb_patched_cur_length = 0; + int rc = 0; + + /* cb_user_size is more than 0 so loop will always be executed */ + while (cb_parsed_length < parser->user_cb_size) { + enum packet_id pkt_id; + u16 pkt_size; + u32 new_pkt_size = 0; + void *user_pkt, *kernel_pkt; + + user_pkt = (void *) (uintptr_t) + (parser->user_cb->kernel_address + cb_parsed_length); + kernel_pkt = (void *) (uintptr_t) + (parser->patched_cb->kernel_address + + cb_patched_cur_length); + + pkt_id = (enum packet_id) (((*(u64 *) user_pkt) & + PACKET_HEADER_PACKET_ID_MASK) >> + PACKET_HEADER_PACKET_ID_SHIFT); + + pkt_size = goya_packet_sizes[pkt_id]; + cb_parsed_length += pkt_size; + if (cb_parsed_length > parser->user_cb_size) { + dev_err(hdev->dev, + "packet 0x%x is out of CB boundary\n", pkt_id); + rc = -EINVAL; + break; + } + + switch (pkt_id) { + case PACKET_LIN_DMA: + rc = goya_patch_dma_packet(hdev, parser, user_pkt, + kernel_pkt, &new_pkt_size); + cb_patched_cur_length += new_pkt_size; + break; + + case PACKET_WREG_32: + memcpy(kernel_pkt, user_pkt, pkt_size); + cb_patched_cur_length += pkt_size; + rc = goya_validate_wreg32(hdev, parser, kernel_pkt); + break; + + case PACKET_WREG_BULK: + dev_err(hdev->dev, + "User not allowed to use WREG_BULK\n"); + rc = -EPERM; + break; + + case PACKET_MSG_PROT: + dev_err(hdev->dev, + "User not allowed to use MSG_PROT\n"); + rc = -EPERM; + break; + + case PACKET_CP_DMA: + dev_err(hdev->dev, "User not allowed to use CP_DMA\n"); + rc = -EPERM; + break; + + case PACKET_STOP: + dev_err(hdev->dev, "User not allowed to use STOP\n"); + rc = -EPERM; + break; + + case PACKET_MSG_LONG: + case PACKET_MSG_SHORT: + case PACKET_FENCE: + case PACKET_NOP: + memcpy(kernel_pkt, user_pkt, pkt_size); + cb_patched_cur_length += pkt_size; + break; + + default: + dev_err(hdev->dev, "Invalid packet header 0x%x\n", + pkt_id); + rc = -EINVAL; + break; + } + + if (rc) + break; + } + + return rc; +} + +static int goya_parse_cb_mmu(struct hl_device *hdev, + struct hl_cs_parser *parser) +{ + u64 patched_cb_handle; + u32 patched_cb_size; + struct hl_cb *user_cb; + int rc; + + /* + * The new CB should have space at the end for two MSG_PROT pkt: + * 1. A packet that will act as a completion packet + * 2. A packet that will generate MSI-X interrupt + */ + parser->patched_cb_size = parser->user_cb_size + + sizeof(struct packet_msg_prot) * 2; + + rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, + parser->patched_cb_size, + &patched_cb_handle, HL_KERNEL_ASID_ID); + + if (rc) { + dev_err(hdev->dev, + "Failed to allocate patched CB for DMA CS %d\n", + rc); + return rc; + } + + patched_cb_handle >>= PAGE_SHIFT; + parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, + (u32) patched_cb_handle); + /* hl_cb_get should never fail here so use kernel WARN */ + WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n", + (u32) patched_cb_handle); + if (!parser->patched_cb) { + rc = -EFAULT; + goto out; + } + + /* + * The check that parser->user_cb_size <= parser->user_cb->size was done + * in validate_queue_index(). + */ + memcpy((void *) (uintptr_t) parser->patched_cb->kernel_address, + (void *) (uintptr_t) parser->user_cb->kernel_address, + parser->user_cb_size); + + patched_cb_size = parser->patched_cb_size; + + /* validate patched CB instead of user CB */ + user_cb = parser->user_cb; + parser->user_cb = parser->patched_cb; + rc = goya_validate_cb(hdev, parser, true); + parser->user_cb = user_cb; + + if (rc) { + hl_cb_put(parser->patched_cb); + goto out; + } + + if (patched_cb_size != parser->patched_cb_size) { + dev_err(hdev->dev, "user CB size mismatch\n"); + hl_cb_put(parser->patched_cb); + rc = -EINVAL; + goto out; + } + +out: + /* + * Always call cb destroy here because we still have 1 reference + * to it by calling cb_get earlier. After the job will be completed, + * cb_put will release it, but here we want to remove it from the + * idr + */ + hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, + patched_cb_handle << PAGE_SHIFT); + + return rc; +} + +static int goya_parse_cb_no_mmu(struct hl_device *hdev, + struct hl_cs_parser *parser) +{ + u64 patched_cb_handle; + int rc; + + rc = goya_validate_cb(hdev, parser, false); + + if (rc) + goto free_userptr; + + rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, + parser->patched_cb_size, + &patched_cb_handle, HL_KERNEL_ASID_ID); + if (rc) { + dev_err(hdev->dev, + "Failed to allocate patched CB for DMA CS %d\n", rc); + goto free_userptr; + } + + patched_cb_handle >>= PAGE_SHIFT; + parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, + (u32) patched_cb_handle); + /* hl_cb_get should never fail here so use kernel WARN */ + WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n", + (u32) patched_cb_handle); + if (!parser->patched_cb) { + rc = -EFAULT; + goto out; + } + + rc = goya_patch_cb(hdev, parser); + + if (rc) + hl_cb_put(parser->patched_cb); + +out: + /* + * Always call cb destroy here because we still have 1 reference + * to it by calling cb_get earlier. After the job will be completed, + * cb_put will release it, but here we want to remove it from the + * idr + */ + hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, + patched_cb_handle << PAGE_SHIFT); + +free_userptr: + if (rc) + hl_userptr_delete_list(hdev, parser->job_userptr_list); + return rc; +} + +static int goya_parse_cb_no_ext_quque(struct hl_device *hdev, + struct hl_cs_parser *parser) +{ + struct asic_fixed_properties *asic_prop = &hdev->asic_prop; + struct goya_device *goya = hdev->asic_specific; + + if (!(goya->hw_cap_initialized & HW_CAP_MMU)) { + /* For internal queue jobs, just check if cb address is valid */ + if (hl_mem_area_inside_range( + (u64) (uintptr_t) parser->user_cb, + parser->user_cb_size, + asic_prop->sram_user_base_address, + asic_prop->sram_end_address)) + return 0; + + if (hl_mem_area_inside_range( + (u64) (uintptr_t) parser->user_cb, + parser->user_cb_size, + asic_prop->dram_user_base_address, + asic_prop->dram_end_address)) + return 0; + + dev_err(hdev->dev, + "Internal CB address %px + 0x%x is not in SRAM nor in DRAM\n", + parser->user_cb, parser->user_cb_size); + + return -EFAULT; + } + + return 0; +} + +int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser) +{ + struct goya_device *goya = hdev->asic_specific; + + if (!parser->ext_queue) + return goya_parse_cb_no_ext_quque(hdev, parser); + + if ((goya->hw_cap_initialized & HW_CAP_MMU) && parser->use_virt_addr) + return goya_parse_cb_mmu(hdev, parser); + else + return goya_parse_cb_no_mmu(hdev, parser); +} + +void goya_add_end_of_cb_packets(u64 kernel_address, u32 len, u64 cq_addr, + u32 cq_val, u32 msix_vec) +{ + struct packet_msg_prot *cq_pkt; + u32 tmp; + + cq_pkt = (struct packet_msg_prot *) (uintptr_t) + (kernel_address + len - (sizeof(struct packet_msg_prot) * 2)); + + tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) | + (1 << GOYA_PKT_CTL_EB_SHIFT) | + (1 << GOYA_PKT_CTL_MB_SHIFT); + cq_pkt->ctl = cpu_to_le32(tmp); + cq_pkt->value = cpu_to_le32(cq_val); + cq_pkt->addr = cpu_to_le64(cq_addr); + + cq_pkt++; + + tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) | + (1 << GOYA_PKT_CTL_MB_SHIFT); + cq_pkt->ctl = cpu_to_le32(tmp); + cq_pkt->value = cpu_to_le32(msix_vec & 0x7FF); + cq_pkt->addr = cpu_to_le64(CFG_BASE + mmPCIE_DBI_MSIX_DOORBELL_OFF); +} + +static void goya_update_eq_ci(struct hl_device *hdev, u32 val) +{ + WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_6, val); +} + +static void goya_restore_phase_topology(struct hl_device *hdev) +{ + int i, num_of_sob_in_longs, num_of_mon_in_longs; + + num_of_sob_in_longs = + ((mmSYNC_MNGR_SOB_OBJ_1023 - mmSYNC_MNGR_SOB_OBJ_0) + 4); + + num_of_mon_in_longs = + ((mmSYNC_MNGR_MON_STATUS_255 - mmSYNC_MNGR_MON_STATUS_0) + 4); + + for (i = 0 ; i < num_of_sob_in_longs ; i += 4) + WREG32(mmSYNC_MNGR_SOB_OBJ_0 + i, 0); + + for (i = 0 ; i < num_of_mon_in_longs ; i += 4) + WREG32(mmSYNC_MNGR_MON_STATUS_0 + i, 0); + + /* Flush all WREG to prevent race */ + i = RREG32(mmSYNC_MNGR_SOB_OBJ_0); +} + +/* + * goya_debugfs_read32 - read a 32bit value from a given device address + * + * @hdev: pointer to hl_device structure + * @addr: address in device + * @val: returned value + * + * In case of DDR address that is not mapped into the default aperture that + * the DDR bar exposes, the function will configure the iATU so that the DDR + * bar will be positioned at a base address that allows reading from the + * required address. Configuring the iATU during normal operation can + * lead to undefined behavior and therefore, should be done with extreme care + * + */ +static int goya_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + int rc = 0; + + if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) { + *val = RREG32(addr - CFG_BASE); + + } else if ((addr >= SRAM_BASE_ADDR) && + (addr < SRAM_BASE_ADDR + SRAM_SIZE)) { + + *val = readl(hdev->pcie_bar[SRAM_CFG_BAR_ID] + + (addr - SRAM_BASE_ADDR)); + + } else if ((addr >= DRAM_PHYS_BASE) && + (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size)) { + + u64 bar_base_addr = DRAM_PHYS_BASE + + (addr & ~(prop->dram_pci_bar_size - 0x1ull)); + + rc = goya_set_ddr_bar_base(hdev, bar_base_addr); + if (!rc) { + *val = readl(hdev->pcie_bar[DDR_BAR_ID] + + (addr - bar_base_addr)); + + rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE + + (MMU_PAGE_TABLES_ADDR & + ~(prop->dram_pci_bar_size - 0x1ull))); + } + } else { + rc = -EFAULT; + } + + return rc; +} + +/* + * goya_debugfs_write32 - write a 32bit value to a given device address + * + * @hdev: pointer to hl_device structure + * @addr: address in device + * @val: returned value + * + * In case of DDR address that is not mapped into the default aperture that + * the DDR bar exposes, the function will configure the iATU so that the DDR + * bar will be positioned at a base address that allows writing to the + * required address. Configuring the iATU during normal operation can + * lead to undefined behavior and therefore, should be done with extreme care + * + */ +static int goya_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + int rc = 0; + + if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) { + WREG32(addr - CFG_BASE, val); + + } else if ((addr >= SRAM_BASE_ADDR) && + (addr < SRAM_BASE_ADDR + SRAM_SIZE)) { + + writel(val, hdev->pcie_bar[SRAM_CFG_BAR_ID] + + (addr - SRAM_BASE_ADDR)); + + } else if ((addr >= DRAM_PHYS_BASE) && + (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size)) { + + u64 bar_base_addr = DRAM_PHYS_BASE + + (addr & ~(prop->dram_pci_bar_size - 0x1ull)); + + rc = goya_set_ddr_bar_base(hdev, bar_base_addr); + if (!rc) { + writel(val, hdev->pcie_bar[DDR_BAR_ID] + + (addr - bar_base_addr)); + + rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE + + (MMU_PAGE_TABLES_ADDR & + ~(prop->dram_pci_bar_size - 0x1ull))); + } + } else { + rc = -EFAULT; + } + + return rc; +} + +static u64 goya_read_pte(struct hl_device *hdev, u64 addr) +{ + struct goya_device *goya = hdev->asic_specific; + + return readq(hdev->pcie_bar[DDR_BAR_ID] + + (addr - goya->ddr_bar_cur_addr)); +} + +static void goya_write_pte(struct hl_device *hdev, u64 addr, u64 val) +{ + struct goya_device *goya = hdev->asic_specific; + + writeq(val, hdev->pcie_bar[DDR_BAR_ID] + + (addr - goya->ddr_bar_cur_addr)); +} + +static const char *_goya_get_event_desc(u16 event_type) +{ + switch (event_type) { + case GOYA_ASYNC_EVENT_ID_PCIE_DEC: + return "PCIe_dec"; + case GOYA_ASYNC_EVENT_ID_TPC0_DEC: + case GOYA_ASYNC_EVENT_ID_TPC1_DEC: + case GOYA_ASYNC_EVENT_ID_TPC2_DEC: + case GOYA_ASYNC_EVENT_ID_TPC3_DEC: + case GOYA_ASYNC_EVENT_ID_TPC4_DEC: + case GOYA_ASYNC_EVENT_ID_TPC5_DEC: + case GOYA_ASYNC_EVENT_ID_TPC6_DEC: + case GOYA_ASYNC_EVENT_ID_TPC7_DEC: + return "TPC%d_dec"; + case GOYA_ASYNC_EVENT_ID_MME_WACS: + return "MME_wacs"; + case GOYA_ASYNC_EVENT_ID_MME_WACSD: + return "MME_wacsd"; + case GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER: + return "CPU_axi_splitter"; + case GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC: + return "PSOC_axi_dec"; + case GOYA_ASYNC_EVENT_ID_PSOC: + return "PSOC"; + case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR: + return "TPC%d_krn_err"; + case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_CMDQ: + return "TPC%d_cq"; + case GOYA_ASYNC_EVENT_ID_TPC0_QM ... GOYA_ASYNC_EVENT_ID_TPC7_QM: + return "TPC%d_qm"; + case GOYA_ASYNC_EVENT_ID_MME_QM: + return "MME_qm"; + case GOYA_ASYNC_EVENT_ID_MME_CMDQ: + return "MME_cq"; + case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM: + return "DMA%d_qm"; + case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH: + return "DMA%d_ch"; + default: + return "N/A"; + } +} + +static void goya_get_event_desc(u16 event_type, char *desc, size_t size) +{ + u8 index; + + switch (event_type) { + case GOYA_ASYNC_EVENT_ID_TPC0_DEC: + case GOYA_ASYNC_EVENT_ID_TPC1_DEC: + case GOYA_ASYNC_EVENT_ID_TPC2_DEC: + case GOYA_ASYNC_EVENT_ID_TPC3_DEC: + case GOYA_ASYNC_EVENT_ID_TPC4_DEC: + case GOYA_ASYNC_EVENT_ID_TPC5_DEC: + case GOYA_ASYNC_EVENT_ID_TPC6_DEC: + case GOYA_ASYNC_EVENT_ID_TPC7_DEC: + index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_DEC) / 3; + snprintf(desc, size, _goya_get_event_desc(event_type), index); + break; + case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR: + index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR) / 10; + snprintf(desc, size, _goya_get_event_desc(event_type), index); + break; + case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_CMDQ: + index = event_type - GOYA_ASYNC_EVENT_ID_TPC0_CMDQ; + snprintf(desc, size, _goya_get_event_desc(event_type), index); + break; + case GOYA_ASYNC_EVENT_ID_TPC0_QM ... GOYA_ASYNC_EVENT_ID_TPC7_QM: + index = event_type - GOYA_ASYNC_EVENT_ID_TPC0_QM; + snprintf(desc, size, _goya_get_event_desc(event_type), index); + break; + case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM: + index = event_type - GOYA_ASYNC_EVENT_ID_DMA0_QM; + snprintf(desc, size, _goya_get_event_desc(event_type), index); + break; + case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH: + index = event_type - GOYA_ASYNC_EVENT_ID_DMA0_CH; + snprintf(desc, size, _goya_get_event_desc(event_type), index); + break; + default: + snprintf(desc, size, _goya_get_event_desc(event_type)); + break; + } +} + +static void goya_print_razwi_info(struct hl_device *hdev) +{ + if (RREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD)) { + dev_err(hdev->dev, "Illegal write to LBW\n"); + WREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD, 0); + } + + if (RREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD)) { + dev_err(hdev->dev, "Illegal read from LBW\n"); + WREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD, 0); + } + + if (RREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD)) { + dev_err(hdev->dev, "Illegal write to HBW\n"); + WREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD, 0); + } + + if (RREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD)) { + dev_err(hdev->dev, "Illegal read from HBW\n"); + WREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD, 0); + } +} + +static void goya_print_mmu_error_info(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + u64 addr; + u32 val; + + if (!(goya->hw_cap_initialized & HW_CAP_MMU)) + return; + + val = RREG32(mmMMU_PAGE_ERROR_CAPTURE); + if (val & MMU_PAGE_ERROR_CAPTURE_ENTRY_VALID_MASK) { + addr = val & MMU_PAGE_ERROR_CAPTURE_VA_49_32_MASK; + addr <<= 32; + addr |= RREG32(mmMMU_PAGE_ERROR_CAPTURE_VA); + + dev_err(hdev->dev, "MMU page fault on va 0x%llx\n", addr); + + WREG32(mmMMU_PAGE_ERROR_CAPTURE, 0); + } +} + +static void goya_print_irq_info(struct hl_device *hdev, u16 event_type) +{ + char desc[20] = ""; + + goya_get_event_desc(event_type, desc, sizeof(desc)); + dev_err(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n", + event_type, desc); + + goya_print_razwi_info(hdev); + goya_print_mmu_error_info(hdev); +} + +static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr, + size_t irq_arr_size) +{ + struct armcp_unmask_irq_arr_packet *pkt; + size_t total_pkt_size; + long result; + int rc; + + total_pkt_size = sizeof(struct armcp_unmask_irq_arr_packet) + + irq_arr_size; + + /* data should be aligned to 8 bytes in order to ArmCP to copy it */ + total_pkt_size = (total_pkt_size + 0x7) & ~0x7; + + /* total_pkt_size is casted to u16 later on */ + if (total_pkt_size > USHRT_MAX) { + dev_err(hdev->dev, "too many elements in IRQ array\n"); + return -EINVAL; + } + + pkt = kzalloc(total_pkt_size, GFP_KERNEL); + if (!pkt) + return -ENOMEM; + + pkt->length = cpu_to_le32(irq_arr_size / sizeof(irq_arr[0])); + memcpy(&pkt->irqs, irq_arr, irq_arr_size); + + pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY << + ARMCP_PKT_CTL_OPCODE_SHIFT); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt, + total_pkt_size, HL_DEVICE_TIMEOUT_USEC, &result); + + if (rc) + dev_err(hdev->dev, "failed to unmask IRQ array\n"); + + kfree(pkt); + + return rc; +} + +static int goya_soft_reset_late_init(struct hl_device *hdev) +{ + /* + * Unmask all IRQs since some could have been received + * during the soft reset + */ + return goya_unmask_irq_arr(hdev, goya_non_fatal_events, + sizeof(goya_non_fatal_events)); +} + +static int goya_unmask_irq(struct hl_device *hdev, u16 event_type) +{ + struct armcp_packet pkt; + long result; + int rc; + + memset(&pkt, 0, sizeof(pkt)); + + pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ << + ARMCP_PKT_CTL_OPCODE_SHIFT); + pkt.value = cpu_to_le64(event_type); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + HL_DEVICE_TIMEOUT_USEC, &result); + + if (rc) + dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type); + + return rc; +} + +void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry) +{ + u32 ctl = le32_to_cpu(eq_entry->hdr.ctl); + u16 event_type = ((ctl & EQ_CTL_EVENT_TYPE_MASK) + >> EQ_CTL_EVENT_TYPE_SHIFT); + struct goya_device *goya = hdev->asic_specific; + + goya->events_stat[event_type]++; + + switch (event_type) { + case GOYA_ASYNC_EVENT_ID_PCIE_IF: + case GOYA_ASYNC_EVENT_ID_TPC0_ECC: + case GOYA_ASYNC_EVENT_ID_TPC1_ECC: + case GOYA_ASYNC_EVENT_ID_TPC2_ECC: + case GOYA_ASYNC_EVENT_ID_TPC3_ECC: + case GOYA_ASYNC_EVENT_ID_TPC4_ECC: + case GOYA_ASYNC_EVENT_ID_TPC5_ECC: + case GOYA_ASYNC_EVENT_ID_TPC6_ECC: + case GOYA_ASYNC_EVENT_ID_TPC7_ECC: + case GOYA_ASYNC_EVENT_ID_MME_ECC: + case GOYA_ASYNC_EVENT_ID_MME_ECC_EXT: + case GOYA_ASYNC_EVENT_ID_MMU_ECC: + case GOYA_ASYNC_EVENT_ID_DMA_MACRO: + case GOYA_ASYNC_EVENT_ID_DMA_ECC: + case GOYA_ASYNC_EVENT_ID_CPU_IF_ECC: + case GOYA_ASYNC_EVENT_ID_PSOC_MEM: + case GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT: + case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29: + case GOYA_ASYNC_EVENT_ID_GIC500: + case GOYA_ASYNC_EVENT_ID_PLL0: + case GOYA_ASYNC_EVENT_ID_PLL1: + case GOYA_ASYNC_EVENT_ID_PLL3: + case GOYA_ASYNC_EVENT_ID_PLL4: + case GOYA_ASYNC_EVENT_ID_PLL5: + case GOYA_ASYNC_EVENT_ID_PLL6: + case GOYA_ASYNC_EVENT_ID_AXI_ECC: + case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC: + case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET: + case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT: + dev_err(hdev->dev, + "Received H/W interrupt %d, reset the chip\n", + event_type); + hl_device_reset(hdev, true, false); + break; + + case GOYA_ASYNC_EVENT_ID_PCIE_DEC: + case GOYA_ASYNC_EVENT_ID_TPC0_DEC: + case GOYA_ASYNC_EVENT_ID_TPC1_DEC: + case GOYA_ASYNC_EVENT_ID_TPC2_DEC: + case GOYA_ASYNC_EVENT_ID_TPC3_DEC: + case GOYA_ASYNC_EVENT_ID_TPC4_DEC: + case GOYA_ASYNC_EVENT_ID_TPC5_DEC: + case GOYA_ASYNC_EVENT_ID_TPC6_DEC: + case GOYA_ASYNC_EVENT_ID_TPC7_DEC: + case GOYA_ASYNC_EVENT_ID_MME_WACS: + case GOYA_ASYNC_EVENT_ID_MME_WACSD: + case GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER: + case GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC: + case GOYA_ASYNC_EVENT_ID_PSOC: + case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR: + case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_QM: + case GOYA_ASYNC_EVENT_ID_MME_QM: + case GOYA_ASYNC_EVENT_ID_MME_CMDQ: + case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM: + case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH: + goya_print_irq_info(hdev, event_type); + goya_unmask_irq(hdev, event_type); + break; + + case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU: + case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU: + case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU: + case GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU: + case GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU: + case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU: + case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU: + case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU: + case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0: + case GOYA_ASYNC_EVENT_ID_DMA_BM_CH1: + case GOYA_ASYNC_EVENT_ID_DMA_BM_CH2: + case GOYA_ASYNC_EVENT_ID_DMA_BM_CH3: + case GOYA_ASYNC_EVENT_ID_DMA_BM_CH4: + dev_info(hdev->dev, "Received H/W interrupt %d\n", event_type); + break; + + default: + dev_err(hdev->dev, "Received invalid H/W interrupt %d\n", + event_type); + break; + } +} + +void *goya_get_events_stat(struct hl_device *hdev, u32 *size) +{ + struct goya_device *goya = hdev->asic_specific; + + *size = (u32) sizeof(goya->events_stat); + + return goya->events_stat; +} + +static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u32 size, + u64 val, bool is_dram) +{ + struct packet_lin_dma *lin_dma_pkt; + struct hl_cs_parser parser; + struct hl_cs_job *job; + u32 cb_size, ctl; + struct hl_cb *cb; + int rc; + + cb = hl_cb_kernel_create(hdev, PAGE_SIZE); + if (!cb) + return -EFAULT; + + lin_dma_pkt = (struct packet_lin_dma *) (uintptr_t) cb->kernel_address; + + memset(lin_dma_pkt, 0, sizeof(*lin_dma_pkt)); + cb_size = sizeof(*lin_dma_pkt); + + ctl = ((PACKET_LIN_DMA << GOYA_PKT_CTL_OPCODE_SHIFT) | + (1 << GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT) | + (1 << GOYA_PKT_LIN_DMA_CTL_WO_SHIFT) | + (1 << GOYA_PKT_CTL_RB_SHIFT) | + (1 << GOYA_PKT_CTL_MB_SHIFT)); + ctl |= (is_dram ? DMA_HOST_TO_DRAM : DMA_HOST_TO_SRAM) << + GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT; + lin_dma_pkt->ctl = cpu_to_le32(ctl); + + lin_dma_pkt->src_addr = cpu_to_le64(val); + lin_dma_pkt->dst_addr = cpu_to_le64(addr); + lin_dma_pkt->tsize = cpu_to_le32(size); + + job = hl_cs_allocate_job(hdev, true); + if (!job) { + dev_err(hdev->dev, "Failed to allocate a new job\n"); + rc = -ENOMEM; + goto release_cb; + } + + job->id = 0; + job->user_cb = cb; + job->user_cb->cs_cnt++; + job->user_cb_size = cb_size; + job->hw_queue_id = GOYA_QUEUE_ID_DMA_0; + + hl_debugfs_add_job(hdev, job); + + parser.ctx_id = HL_KERNEL_ASID_ID; + parser.cs_sequence = 0; + parser.job_id = job->id; + parser.hw_queue_id = job->hw_queue_id; + parser.job_userptr_list = &job->userptr_list; + parser.user_cb = job->user_cb; + parser.user_cb_size = job->user_cb_size; + parser.ext_queue = job->ext_queue; + parser.use_virt_addr = hdev->mmu_enable; + + rc = hdev->asic_funcs->cs_parser(hdev, &parser); + if (rc) { + dev_err(hdev->dev, "Failed to parse kernel CB\n"); + goto free_job; + } + + job->patched_cb = parser.patched_cb; + job->job_cb_size = parser.patched_cb_size; + job->patched_cb->cs_cnt++; + + rc = goya_send_job_on_qman0(hdev, job); + + job->patched_cb->cs_cnt--; + hl_cb_put(job->patched_cb); + +free_job: + hl_userptr_delete_list(hdev, &job->userptr_list); + hl_debugfs_remove_job(hdev, job); + kfree(job); + cb->cs_cnt--; + +release_cb: + hl_cb_put(cb); + hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT); + + return rc; +} + +static int goya_context_switch(struct hl_device *hdev, u32 asid) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 addr = prop->sram_base_address; + u32 size = hdev->pldm ? 0x10000 : prop->sram_size; + u64 val = 0x7777777777777777ull; + int rc; + + rc = goya_memset_device_memory(hdev, addr, size, val, false); + if (rc) { + dev_err(hdev->dev, "Failed to clear SRAM in context switch\n"); + return rc; + } + + goya_mmu_prepare(hdev, asid); + + return 0; +} + +static int goya_mmu_clear_pgt_range(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct goya_device *goya = hdev->asic_specific; + u64 addr = prop->mmu_pgt_addr; + u32 size = prop->mmu_pgt_size + MMU_DRAM_DEFAULT_PAGE_SIZE + + MMU_CACHE_MNG_SIZE; + + if (!(goya->hw_cap_initialized & HW_CAP_MMU)) + return 0; + + return goya_memset_device_memory(hdev, addr, size, 0, true); +} + +static int goya_mmu_set_dram_default_page(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + u64 addr = hdev->asic_prop.mmu_dram_default_page_addr; + u32 size = MMU_DRAM_DEFAULT_PAGE_SIZE; + u64 val = 0x9999999999999999ull; + + if (!(goya->hw_cap_initialized & HW_CAP_MMU)) + return 0; + + return goya_memset_device_memory(hdev, addr, size, val, true); +} + +static void goya_mmu_prepare(struct hl_device *hdev, u32 asid) +{ + struct goya_device *goya = hdev->asic_specific; + int i; + + if (!(goya->hw_cap_initialized & HW_CAP_MMU)) + return; + + if (asid & ~MME_QM_GLBL_SECURE_PROPS_ASID_MASK) { + WARN(1, "asid %u is too big\n", asid); + return; + } + + /* zero the MMBP and ASID bits and then set the ASID */ + for (i = 0 ; i < GOYA_MMU_REGS_NUM ; i++) { + WREG32_AND(goya_mmu_regs[i], ~0x7FF); + WREG32_OR(goya_mmu_regs[i], asid); + } +} + +static void goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard) +{ + struct goya_device *goya = hdev->asic_specific; + u32 status, timeout_usec; + int rc; + + if (!(goya->hw_cap_initialized & HW_CAP_MMU)) + return; + + /* no need in L1 only invalidation in Goya */ + if (!is_hard) + return; + + if (hdev->pldm) + timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC; + else + timeout_usec = MMU_CONFIG_TIMEOUT_USEC; + + mutex_lock(&hdev->mmu_cache_lock); + + /* L0 & L1 invalidation */ + WREG32(mmSTLB_INV_ALL_START, 1); + + rc = hl_poll_timeout( + hdev, + mmSTLB_INV_ALL_START, + status, + !status, + 1000, + timeout_usec); + + mutex_unlock(&hdev->mmu_cache_lock); + + if (rc) + dev_notice_ratelimited(hdev->dev, + "Timeout when waiting for MMU cache invalidation\n"); +} + +static void goya_mmu_invalidate_cache_range(struct hl_device *hdev, + bool is_hard, u32 asid, u64 va, u64 size) +{ + struct goya_device *goya = hdev->asic_specific; + u32 status, timeout_usec, inv_data, pi; + int rc; + + if (!(goya->hw_cap_initialized & HW_CAP_MMU)) + return; + + /* no need in L1 only invalidation in Goya */ + if (!is_hard) + return; + + if (hdev->pldm) + timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC; + else + timeout_usec = MMU_CONFIG_TIMEOUT_USEC; + + mutex_lock(&hdev->mmu_cache_lock); + + /* + * TODO: currently invalidate entire L0 & L1 as in regular hard + * invalidation. Need to apply invalidation of specific cache lines with + * mask of ASID & VA & size. + * Note that L1 with be flushed entirely in any case. + */ + + /* L0 & L1 invalidation */ + inv_data = RREG32(mmSTLB_CACHE_INV); + /* PI is 8 bit */ + pi = ((inv_data & STLB_CACHE_INV_PRODUCER_INDEX_MASK) + 1) & 0xFF; + WREG32(mmSTLB_CACHE_INV, + (inv_data & STLB_CACHE_INV_INDEX_MASK_MASK) | pi); + + rc = hl_poll_timeout( + hdev, + mmSTLB_INV_CONSUMER_INDEX, + status, + status == pi, + 1000, + timeout_usec); + + mutex_unlock(&hdev->mmu_cache_lock); + + if (rc) + dev_notice_ratelimited(hdev->dev, + "Timeout when waiting for MMU cache invalidation\n"); +} + +static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid, + u64 phys_addr) +{ + u32 status, timeout_usec; + int rc; + + if (hdev->pldm) + timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC; + else + timeout_usec = MMU_CONFIG_TIMEOUT_USEC; + + WREG32(MMU_HOP0_PA43_12, phys_addr >> MMU_HOP0_PA43_12_SHIFT); + WREG32(MMU_HOP0_PA49_44, phys_addr >> MMU_HOP0_PA49_44_SHIFT); + WREG32(MMU_ASID_BUSY, 0x80000000 | asid); + + rc = hl_poll_timeout( + hdev, + MMU_ASID_BUSY, + status, + !(status & 0x80000000), + 1000, + timeout_usec); + + if (rc) { + dev_err(hdev->dev, + "Timeout during MMU hop0 config of asid %d\n", asid); + return rc; + } + + return 0; +} + +int goya_send_heartbeat(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + struct armcp_packet hb_pkt; + long result; + int rc; + + if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q)) + return 0; + + memset(&hb_pkt, 0, sizeof(hb_pkt)); + + hb_pkt.ctl = cpu_to_le32(ARMCP_PACKET_TEST << + ARMCP_PKT_CTL_OPCODE_SHIFT); + hb_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt, + sizeof(hb_pkt), HL_DEVICE_TIMEOUT_USEC, &result); + + if ((rc) || (result != ARMCP_PACKET_FENCE_VAL)) + rc = -EIO; + + return rc; +} + +static int goya_armcp_info_get(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct armcp_packet pkt; + void *armcp_info_cpu_addr; + dma_addr_t armcp_info_dma_addr; + u64 dram_size; + long result; + int rc; + + if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q)) + return 0; + + armcp_info_cpu_addr = + hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, + sizeof(struct armcp_info), &armcp_info_dma_addr); + if (!armcp_info_cpu_addr) { + dev_err(hdev->dev, + "Failed to allocate DMA memory for ArmCP info packet\n"); + return -ENOMEM; + } + + memset(armcp_info_cpu_addr, 0, sizeof(struct armcp_info)); + + memset(&pkt, 0, sizeof(pkt)); + + pkt.ctl = cpu_to_le32(ARMCP_PACKET_INFO_GET << + ARMCP_PKT_CTL_OPCODE_SHIFT); + pkt.addr = cpu_to_le64(armcp_info_dma_addr + + prop->host_phys_base_address); + pkt.data_max_size = cpu_to_le32(sizeof(struct armcp_info)); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + GOYA_ARMCP_INFO_TIMEOUT, &result); + + if (rc) { + dev_err(hdev->dev, + "Failed to send armcp info pkt, error %d\n", rc); + goto out; + } + + memcpy(&prop->armcp_info, armcp_info_cpu_addr, + sizeof(prop->armcp_info)); + + dram_size = le64_to_cpu(prop->armcp_info.dram_size); + if (dram_size) { + if ((!is_power_of_2(dram_size)) || + (dram_size < DRAM_PHYS_DEFAULT_SIZE)) { + dev_err(hdev->dev, + "F/W reported invalid DRAM size %llu. Trying to use default size\n", + dram_size); + dram_size = DRAM_PHYS_DEFAULT_SIZE; + } + + prop->dram_size = dram_size; + prop->dram_end_address = prop->dram_base_address + dram_size; + } + + rc = hl_build_hwmon_channel_info(hdev, prop->armcp_info.sensors); + if (rc) { + dev_err(hdev->dev, + "Failed to build hwmon channel info, error %d\n", rc); + rc = -EFAULT; + goto out; + } + +out: + hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, + sizeof(struct armcp_info), armcp_info_cpu_addr); + + return rc; +} + +static void goya_init_clock_gating(struct hl_device *hdev) +{ + +} + +static void goya_disable_clock_gating(struct hl_device *hdev) +{ + +} + +static bool goya_is_device_idle(struct hl_device *hdev) +{ + u64 offset, dma_qm_reg, tpc_qm_reg, tpc_cmdq_reg, tpc_cfg_reg; + int i; + + offset = mmDMA_QM_1_GLBL_STS0 - mmDMA_QM_0_GLBL_STS0; + + for (i = 0 ; i < DMA_MAX_NUM ; i++) { + dma_qm_reg = mmDMA_QM_0_GLBL_STS0 + i * offset; + + if ((RREG32(dma_qm_reg) & DMA_QM_IDLE_MASK) != + DMA_QM_IDLE_MASK) + return false; + } + + offset = mmTPC1_QM_GLBL_STS0 - mmTPC0_QM_GLBL_STS0; + + for (i = 0 ; i < TPC_MAX_NUM ; i++) { + tpc_qm_reg = mmTPC0_QM_GLBL_STS0 + i * offset; + tpc_cmdq_reg = mmTPC0_CMDQ_GLBL_STS0 + i * offset; + tpc_cfg_reg = mmTPC0_CFG_STATUS + i * offset; + + if ((RREG32(tpc_qm_reg) & TPC_QM_IDLE_MASK) != + TPC_QM_IDLE_MASK) + return false; + + if ((RREG32(tpc_cmdq_reg) & TPC_CMDQ_IDLE_MASK) != + TPC_CMDQ_IDLE_MASK) + return false; + + if ((RREG32(tpc_cfg_reg) & TPC_CFG_IDLE_MASK) != + TPC_CFG_IDLE_MASK) + return false; + } + + if ((RREG32(mmMME_QM_GLBL_STS0) & MME_QM_IDLE_MASK) != + MME_QM_IDLE_MASK) + return false; + + if ((RREG32(mmMME_CMDQ_GLBL_STS0) & MME_CMDQ_IDLE_MASK) != + MME_CMDQ_IDLE_MASK) + return false; + + if ((RREG32(mmMME_ARCH_STATUS) & MME_ARCH_IDLE_MASK) != + MME_ARCH_IDLE_MASK) + return false; + + if (RREG32(mmMME_SHADOW_0_STATUS) & MME_SHADOW_IDLE_MASK) + return false; + + return true; +} + +static void goya_hw_queues_lock(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + + spin_lock(&goya->hw_queues_lock); +} + +static void goya_hw_queues_unlock(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + + spin_unlock(&goya->hw_queues_lock); +} + +static u32 goya_get_pci_id(struct hl_device *hdev) +{ + return hdev->pdev->device; +} + +static int goya_get_eeprom_data(struct hl_device *hdev, void *data, + size_t max_size) +{ + struct goya_device *goya = hdev->asic_specific; + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct armcp_packet pkt; + void *eeprom_info_cpu_addr; + dma_addr_t eeprom_info_dma_addr; + long result; + int rc; + + if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q)) + return 0; + + eeprom_info_cpu_addr = + hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, + max_size, &eeprom_info_dma_addr); + if (!eeprom_info_cpu_addr) { + dev_err(hdev->dev, + "Failed to allocate DMA memory for EEPROM info packet\n"); + return -ENOMEM; + } + + memset(eeprom_info_cpu_addr, 0, max_size); + + memset(&pkt, 0, sizeof(pkt)); + + pkt.ctl = cpu_to_le32(ARMCP_PACKET_EEPROM_DATA_GET << + ARMCP_PKT_CTL_OPCODE_SHIFT); + pkt.addr = cpu_to_le64(eeprom_info_dma_addr + + prop->host_phys_base_address); + pkt.data_max_size = cpu_to_le32(max_size); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + GOYA_ARMCP_EEPROM_TIMEOUT, &result); + + if (rc) { + dev_err(hdev->dev, + "Failed to send armcp EEPROM pkt, error %d\n", rc); + goto out; + } + + /* result contains the actual size */ + memcpy(data, eeprom_info_cpu_addr, min((size_t)result, max_size)); + +out: + hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, max_size, + eeprom_info_cpu_addr); + + return rc; +} + +static enum hl_device_hw_state goya_get_hw_state(struct hl_device *hdev) +{ + return RREG32(mmPSOC_GLOBAL_CONF_APP_STATUS); +} + +static const struct hl_asic_funcs goya_funcs = { + .early_init = goya_early_init, + .early_fini = goya_early_fini, + .late_init = goya_late_init, + .late_fini = goya_late_fini, + .sw_init = goya_sw_init, + .sw_fini = goya_sw_fini, + .hw_init = goya_hw_init, + .hw_fini = goya_hw_fini, + .halt_engines = goya_halt_engines, + .suspend = goya_suspend, + .resume = goya_resume, + .cb_mmap = goya_cb_mmap, + .ring_doorbell = goya_ring_doorbell, + .flush_pq_write = goya_flush_pq_write, + .dma_alloc_coherent = goya_dma_alloc_coherent, + .dma_free_coherent = goya_dma_free_coherent, + .get_int_queue_base = goya_get_int_queue_base, + .test_queues = goya_test_queues, + .dma_pool_zalloc = goya_dma_pool_zalloc, + .dma_pool_free = goya_dma_pool_free, + .cpu_accessible_dma_pool_alloc = goya_cpu_accessible_dma_pool_alloc, + .cpu_accessible_dma_pool_free = goya_cpu_accessible_dma_pool_free, + .hl_dma_unmap_sg = goya_dma_unmap_sg, + .cs_parser = goya_cs_parser, + .asic_dma_map_sg = goya_dma_map_sg, + .get_dma_desc_list_size = goya_get_dma_desc_list_size, + .add_end_of_cb_packets = goya_add_end_of_cb_packets, + .update_eq_ci = goya_update_eq_ci, + .context_switch = goya_context_switch, + .restore_phase_topology = goya_restore_phase_topology, + .debugfs_read32 = goya_debugfs_read32, + .debugfs_write32 = goya_debugfs_write32, + .add_device_attr = goya_add_device_attr, + .handle_eqe = goya_handle_eqe, + .set_pll_profile = goya_set_pll_profile, + .get_events_stat = goya_get_events_stat, + .read_pte = goya_read_pte, + .write_pte = goya_write_pte, + .mmu_invalidate_cache = goya_mmu_invalidate_cache, + .mmu_invalidate_cache_range = goya_mmu_invalidate_cache_range, + .send_heartbeat = goya_send_heartbeat, + .enable_clock_gating = goya_init_clock_gating, + .disable_clock_gating = goya_disable_clock_gating, + .is_device_idle = goya_is_device_idle, + .soft_reset_late_init = goya_soft_reset_late_init, + .hw_queues_lock = goya_hw_queues_lock, + .hw_queues_unlock = goya_hw_queues_unlock, + .get_pci_id = goya_get_pci_id, + .get_eeprom_data = goya_get_eeprom_data, + .send_cpu_message = goya_send_cpu_message, + .get_hw_state = goya_get_hw_state +}; + +/* + * goya_set_asic_funcs - set Goya function pointers + * + * @*hdev: pointer to hl_device structure + * + */ +void goya_set_asic_funcs(struct hl_device *hdev) +{ + hdev->asic_funcs = &goya_funcs; +} diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h new file mode 100644 index 000000000000..830551b6b062 --- /dev/null +++ b/drivers/misc/habanalabs/goya/goyaP.h @@ -0,0 +1,211 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef GOYAP_H_ +#define GOYAP_H_ + +#include +#include "habanalabs.h" +#include "include/hl_boot_if.h" +#include "include/goya/goya_packets.h" +#include "include/goya/goya.h" +#include "include/goya/goya_async_events.h" +#include "include/goya/goya_fw_if.h" + +#define NUMBER_OF_CMPLT_QUEUES 5 +#define NUMBER_OF_EXT_HW_QUEUES 5 +#define NUMBER_OF_CPU_HW_QUEUES 1 +#define NUMBER_OF_INT_HW_QUEUES 9 +#define NUMBER_OF_HW_QUEUES (NUMBER_OF_EXT_HW_QUEUES + \ + NUMBER_OF_CPU_HW_QUEUES + \ + NUMBER_OF_INT_HW_QUEUES) + +/* + * Number of MSIX interrupts IDS: + * Each completion queue has 1 ID + * The event queue has 1 ID + */ +#define NUMBER_OF_INTERRUPTS (NUMBER_OF_CMPLT_QUEUES + 1) + +#if (NUMBER_OF_HW_QUEUES >= HL_MAX_QUEUES) +#error "Number of H/W queues must be smaller than HL_MAX_QUEUES" +#endif + +#if (NUMBER_OF_INTERRUPTS > GOYA_MSIX_ENTRIES) +#error "Number of MSIX interrupts must be smaller or equal to GOYA_MSIX_ENTRIES" +#endif + +#define QMAN_FENCE_TIMEOUT_USEC 10000 /* 10 ms */ + +#define QMAN_STOP_TIMEOUT_USEC 100000 /* 100 ms */ + +#define TPC_ENABLED_MASK 0xFF + +#define PLL_HIGH_DEFAULT 1575000000 /* 1.575 GHz */ + +#define MAX_POWER_DEFAULT 200000 /* 200W */ + +#define GOYA_ARMCP_INFO_TIMEOUT 10000000 /* 10s */ +#define GOYA_ARMCP_EEPROM_TIMEOUT 10000000 /* 10s */ + +#define DRAM_PHYS_DEFAULT_SIZE 0x100000000ull /* 4GB */ + +/* DRAM Memory Map */ + +#define CPU_FW_IMAGE_SIZE 0x10000000 /* 256MB */ +#define MMU_PAGE_TABLES_SIZE 0x0DE00000 /* 222MB */ +#define MMU_DRAM_DEFAULT_PAGE_SIZE 0x00200000 /* 2MB */ +#define MMU_CACHE_MNG_SIZE 0x00001000 /* 4KB */ +#define CPU_PQ_PKT_SIZE 0x00001000 /* 4KB */ +#define CPU_PQ_DATA_SIZE 0x01FFE000 /* 32MB - 8KB */ + +#define CPU_FW_IMAGE_ADDR DRAM_PHYS_BASE +#define MMU_PAGE_TABLES_ADDR (CPU_FW_IMAGE_ADDR + CPU_FW_IMAGE_SIZE) +#define MMU_DRAM_DEFAULT_PAGE_ADDR (MMU_PAGE_TABLES_ADDR + \ + MMU_PAGE_TABLES_SIZE) +#define MMU_CACHE_MNG_ADDR (MMU_DRAM_DEFAULT_PAGE_ADDR + \ + MMU_DRAM_DEFAULT_PAGE_SIZE) +#define CPU_PQ_PKT_ADDR (MMU_CACHE_MNG_ADDR + \ + MMU_CACHE_MNG_SIZE) +#define CPU_PQ_DATA_ADDR (CPU_PQ_PKT_ADDR + CPU_PQ_PKT_SIZE) +#define DRAM_BASE_ADDR_USER (CPU_PQ_DATA_ADDR + CPU_PQ_DATA_SIZE) + +#if (DRAM_BASE_ADDR_USER != 0x20000000) +#error "KMD must reserve 512MB" +#endif + +/* + * SRAM Memory Map for KMD + * + * KMD occupies KMD_SRAM_SIZE bytes from the start of SRAM. It is used for + * MME/TPC QMANs + * + */ + +#define MME_QMAN_BASE_OFFSET 0x000000 /* Must be 0 */ +#define MME_QMAN_LENGTH 64 +#define TPC_QMAN_LENGTH 64 + +#define TPC0_QMAN_BASE_OFFSET (MME_QMAN_BASE_OFFSET + \ + (MME_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)) +#define TPC1_QMAN_BASE_OFFSET (TPC0_QMAN_BASE_OFFSET + \ + (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)) +#define TPC2_QMAN_BASE_OFFSET (TPC1_QMAN_BASE_OFFSET + \ + (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)) +#define TPC3_QMAN_BASE_OFFSET (TPC2_QMAN_BASE_OFFSET + \ + (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)) +#define TPC4_QMAN_BASE_OFFSET (TPC3_QMAN_BASE_OFFSET + \ + (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)) +#define TPC5_QMAN_BASE_OFFSET (TPC4_QMAN_BASE_OFFSET + \ + (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)) +#define TPC6_QMAN_BASE_OFFSET (TPC5_QMAN_BASE_OFFSET + \ + (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)) +#define TPC7_QMAN_BASE_OFFSET (TPC6_QMAN_BASE_OFFSET + \ + (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)) + +#define SRAM_KMD_RES_OFFSET (TPC7_QMAN_BASE_OFFSET + \ + (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)) + +#if (SRAM_KMD_RES_OFFSET >= GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START) +#error "MME/TPC QMANs SRAM space exceeds limit" +#endif + +#define SRAM_USER_BASE_OFFSET GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START + +/* Virtual address space */ +#define VA_HOST_SPACE_START 0x1000000000000ull /* 256TB */ +#define VA_HOST_SPACE_END 0x3FF8000000000ull /* 1PB - 1TB */ +#define VA_HOST_SPACE_SIZE (VA_HOST_SPACE_END - \ + VA_HOST_SPACE_START) /* 767TB */ + +#define VA_DDR_SPACE_START 0x800000000ull /* 32GB */ +#define VA_DDR_SPACE_END 0x2000000000ull /* 128GB */ +#define VA_DDR_SPACE_SIZE (VA_DDR_SPACE_END - \ + VA_DDR_SPACE_START) /* 128GB */ + +#define DMA_MAX_TRANSFER_SIZE U32_MAX + +#define HW_CAP_PLL 0x00000001 +#define HW_CAP_DDR_0 0x00000002 +#define HW_CAP_DDR_1 0x00000004 +#define HW_CAP_MME 0x00000008 +#define HW_CAP_CPU 0x00000010 +#define HW_CAP_DMA 0x00000020 +#define HW_CAP_MSIX 0x00000040 +#define HW_CAP_CPU_Q 0x00000080 +#define HW_CAP_MMU 0x00000100 +#define HW_CAP_TPC_MBIST 0x00000200 +#define HW_CAP_GOLDEN 0x00000400 +#define HW_CAP_TPC 0x00000800 + +#define CPU_PKT_SHIFT 5 +#define CPU_PKT_SIZE (1 << CPU_PKT_SHIFT) +#define CPU_PKT_MASK (~((1 << CPU_PKT_SHIFT) - 1)) +#define CPU_MAX_PKTS_IN_CB 32 +#define CPU_CB_SIZE (CPU_PKT_SIZE * CPU_MAX_PKTS_IN_CB) +#define CPU_ACCESSIBLE_MEM_SIZE (HL_QUEUE_LENGTH * CPU_CB_SIZE) + +enum goya_fw_component { + FW_COMP_UBOOT, + FW_COMP_PREBOOT +}; + +struct goya_device { + int (*test_cpu_queue)(struct hl_device *hdev); + int (*armcp_info_get)(struct hl_device *hdev); + + /* TODO: remove hw_queues_lock after moving to scheduler code */ + spinlock_t hw_queues_lock; + + u64 mme_clk; + u64 tpc_clk; + u64 ic_clk; + + u64 ddr_bar_cur_addr; + u32 events_stat[GOYA_ASYNC_EVENT_ID_SIZE]; + u32 hw_cap_initialized; +}; + +int goya_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, + u8 i2c_addr, u8 i2c_reg, u32 *val); +int goya_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, + u8 i2c_addr, u8 i2c_reg, u32 val); +int goya_test_cpu_queue(struct hl_device *hdev); +int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len, + u32 timeout, long *result); +long goya_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr); +long goya_get_voltage(struct hl_device *hdev, int sensor_index, u32 attr); +long goya_get_current(struct hl_device *hdev, int sensor_index, u32 attr); +long goya_get_fan_speed(struct hl_device *hdev, int sensor_index, u32 attr); +long goya_get_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr); +void goya_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr, + long value); +void goya_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state); +void goya_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq); +void goya_add_device_attr(struct hl_device *hdev, + struct attribute_group *dev_attr_grp); +void goya_init_security(struct hl_device *hdev); +u64 goya_get_max_power(struct hl_device *hdev); +void goya_set_max_power(struct hl_device *hdev, u64 value); + +int goya_send_pci_access_msg(struct hl_device *hdev, u32 opcode); +void goya_late_fini(struct hl_device *hdev); +int goya_suspend(struct hl_device *hdev); +int goya_resume(struct hl_device *hdev); +void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val); +void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry); +void *goya_get_events_stat(struct hl_device *hdev, u32 *size); +void goya_add_end_of_cb_packets(u64 kernel_address, u32 len, u64 cq_addr, + u32 cq_val, u32 msix_vec); +int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser); +void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id, + dma_addr_t *dma_handle, u16 *queue_len); +u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt); +int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id); +int goya_send_heartbeat(struct hl_device *hdev); + +#endif /* GOYAP_H_ */ diff --git a/drivers/misc/habanalabs/goya/goya_hwmgr.c b/drivers/misc/habanalabs/goya/goya_hwmgr.c new file mode 100644 index 000000000000..088692c852b6 --- /dev/null +++ b/drivers/misc/habanalabs/goya/goya_hwmgr.c @@ -0,0 +1,254 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "goyaP.h" + +void goya_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq) +{ + struct goya_device *goya = hdev->asic_specific; + + switch (freq) { + case PLL_HIGH: + hl_set_frequency(hdev, MME_PLL, hdev->high_pll); + hl_set_frequency(hdev, TPC_PLL, hdev->high_pll); + hl_set_frequency(hdev, IC_PLL, hdev->high_pll); + break; + case PLL_LOW: + hl_set_frequency(hdev, MME_PLL, GOYA_PLL_FREQ_LOW); + hl_set_frequency(hdev, TPC_PLL, GOYA_PLL_FREQ_LOW); + hl_set_frequency(hdev, IC_PLL, GOYA_PLL_FREQ_LOW); + break; + case PLL_LAST: + hl_set_frequency(hdev, MME_PLL, goya->mme_clk); + hl_set_frequency(hdev, TPC_PLL, goya->tpc_clk); + hl_set_frequency(hdev, IC_PLL, goya->ic_clk); + break; + default: + dev_err(hdev->dev, "unknown frequency setting\n"); + } +} + +static ssize_t mme_clk_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + long value; + + if (hl_device_disabled_or_in_reset(hdev)) + return -ENODEV; + + value = hl_get_frequency(hdev, MME_PLL, false); + + if (value < 0) + return value; + + return sprintf(buf, "%lu\n", value); +} + +static ssize_t mme_clk_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + struct goya_device *goya = hdev->asic_specific; + int rc; + long value; + + if (hl_device_disabled_or_in_reset(hdev)) { + count = -ENODEV; + goto fail; + } + + if (hdev->pm_mng_profile == PM_AUTO) { + count = -EPERM; + goto fail; + } + + rc = kstrtoul(buf, 0, &value); + + if (rc) { + count = -EINVAL; + goto fail; + } + + hl_set_frequency(hdev, MME_PLL, value); + goya->mme_clk = value; + +fail: + return count; +} + +static ssize_t tpc_clk_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + long value; + + if (hl_device_disabled_or_in_reset(hdev)) + return -ENODEV; + + value = hl_get_frequency(hdev, TPC_PLL, false); + + if (value < 0) + return value; + + return sprintf(buf, "%lu\n", value); +} + +static ssize_t tpc_clk_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + struct goya_device *goya = hdev->asic_specific; + int rc; + long value; + + if (hl_device_disabled_or_in_reset(hdev)) { + count = -ENODEV; + goto fail; + } + + if (hdev->pm_mng_profile == PM_AUTO) { + count = -EPERM; + goto fail; + } + + rc = kstrtoul(buf, 0, &value); + + if (rc) { + count = -EINVAL; + goto fail; + } + + hl_set_frequency(hdev, TPC_PLL, value); + goya->tpc_clk = value; + +fail: + return count; +} + +static ssize_t ic_clk_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + long value; + + if (hl_device_disabled_or_in_reset(hdev)) + return -ENODEV; + + value = hl_get_frequency(hdev, IC_PLL, false); + + if (value < 0) + return value; + + return sprintf(buf, "%lu\n", value); +} + +static ssize_t ic_clk_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + struct goya_device *goya = hdev->asic_specific; + int rc; + long value; + + if (hl_device_disabled_or_in_reset(hdev)) { + count = -ENODEV; + goto fail; + } + + if (hdev->pm_mng_profile == PM_AUTO) { + count = -EPERM; + goto fail; + } + + rc = kstrtoul(buf, 0, &value); + + if (rc) { + count = -EINVAL; + goto fail; + } + + hl_set_frequency(hdev, IC_PLL, value); + goya->ic_clk = value; + +fail: + return count; +} + +static ssize_t mme_clk_curr_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + long value; + + if (hl_device_disabled_or_in_reset(hdev)) + return -ENODEV; + + value = hl_get_frequency(hdev, MME_PLL, true); + + if (value < 0) + return value; + + return sprintf(buf, "%lu\n", value); +} + +static ssize_t tpc_clk_curr_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + long value; + + if (hl_device_disabled_or_in_reset(hdev)) + return -ENODEV; + + value = hl_get_frequency(hdev, TPC_PLL, true); + + if (value < 0) + return value; + + return sprintf(buf, "%lu\n", value); +} + +static ssize_t ic_clk_curr_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + long value; + + if (hl_device_disabled_or_in_reset(hdev)) + return -ENODEV; + + value = hl_get_frequency(hdev, IC_PLL, true); + + if (value < 0) + return value; + + return sprintf(buf, "%lu\n", value); +} + +static DEVICE_ATTR_RW(ic_clk); +static DEVICE_ATTR_RO(ic_clk_curr); +static DEVICE_ATTR_RW(mme_clk); +static DEVICE_ATTR_RO(mme_clk_curr); +static DEVICE_ATTR_RW(tpc_clk); +static DEVICE_ATTR_RO(tpc_clk_curr); + +static struct attribute *goya_dev_attrs[] = { + &dev_attr_ic_clk.attr, + &dev_attr_ic_clk_curr.attr, + &dev_attr_mme_clk.attr, + &dev_attr_mme_clk_curr.attr, + &dev_attr_tpc_clk.attr, + &dev_attr_tpc_clk_curr.attr, + NULL, +}; + +void goya_add_device_attr(struct hl_device *hdev, + struct attribute_group *dev_attr_grp) +{ + dev_attr_grp->attrs = goya_dev_attrs; +} diff --git a/drivers/misc/habanalabs/goya/goya_security.c b/drivers/misc/habanalabs/goya/goya_security.c new file mode 100644 index 000000000000..575003238401 --- /dev/null +++ b/drivers/misc/habanalabs/goya/goya_security.c @@ -0,0 +1,2999 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "goyaP.h" + +/* + * goya_set_block_as_protected - set the given block as protected + * + * @hdev: pointer to hl_device structure + * @block: block base address + * + */ +static void goya_pb_set_block(struct hl_device *hdev, u64 base) +{ + u32 pb_addr = base - CFG_BASE + PROT_BITS_OFFS; + + while (pb_addr & 0xFFF) { + WREG32(pb_addr, 0); + pb_addr += 4; + } +} + +static void goya_init_mme_protection_bits(struct hl_device *hdev) +{ + u32 pb_addr, mask; + u8 word_offset; + + /* TODO: change to real reg name when Soc Online is updated */ + u64 mmMME_SBB_POWER_ECO1 = 0xDFF60, + mmMME_SBB_POWER_ECO2 = 0xDFF64; + + goya_pb_set_block(hdev, mmACC_MS_ECC_MEM_0_BASE); + goya_pb_set_block(hdev, mmACC_MS_ECC_MEM_1_BASE); + goya_pb_set_block(hdev, mmACC_MS_ECC_MEM_2_BASE); + goya_pb_set_block(hdev, mmACC_MS_ECC_MEM_3_BASE); + + goya_pb_set_block(hdev, mmSBA_ECC_MEM_BASE); + goya_pb_set_block(hdev, mmSBB_ECC_MEM_BASE); + + goya_pb_set_block(hdev, mmMME1_RTR_BASE); + goya_pb_set_block(hdev, mmMME1_RD_REGULATOR_BASE); + goya_pb_set_block(hdev, mmMME1_WR_REGULATOR_BASE); + goya_pb_set_block(hdev, mmMME2_RTR_BASE); + goya_pb_set_block(hdev, mmMME2_RD_REGULATOR_BASE); + goya_pb_set_block(hdev, mmMME2_WR_REGULATOR_BASE); + goya_pb_set_block(hdev, mmMME3_RTR_BASE); + goya_pb_set_block(hdev, mmMME3_RD_REGULATOR_BASE); + goya_pb_set_block(hdev, mmMME3_WR_REGULATOR_BASE); + + goya_pb_set_block(hdev, mmMME4_RTR_BASE); + goya_pb_set_block(hdev, mmMME4_RD_REGULATOR_BASE); + goya_pb_set_block(hdev, mmMME4_WR_REGULATOR_BASE); + + goya_pb_set_block(hdev, mmMME5_RTR_BASE); + goya_pb_set_block(hdev, mmMME5_RD_REGULATOR_BASE); + goya_pb_set_block(hdev, mmMME5_WR_REGULATOR_BASE); + + goya_pb_set_block(hdev, mmMME6_RTR_BASE); + goya_pb_set_block(hdev, mmMME6_RD_REGULATOR_BASE); + goya_pb_set_block(hdev, mmMME6_WR_REGULATOR_BASE); + + pb_addr = (mmMME_DUMMY & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME_DUMMY & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmMME_DUMMY & 0x7F) >> 2); + mask |= 1 << ((mmMME_RESET & 0x7F) >> 2); + mask |= 1 << ((mmMME_STALL & 0x7F) >> 2); + mask |= 1 << ((mmMME_SM_BASE_ADDRESS_LOW & 0x7F) >> 2); + mask |= 1 << ((mmMME_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmMME_DBGMEM_ADD & 0x7F) >> 2); + mask |= 1 << ((mmMME_DBGMEM_DATA_WR & 0x7F) >> 2); + mask |= 1 << ((mmMME_DBGMEM_DATA_RD & 0x7F) >> 2); + mask |= 1 << ((mmMME_DBGMEM_CTRL & 0x7F) >> 2); + mask |= 1 << ((mmMME_DBGMEM_RC & 0x7F) >> 2); + mask |= 1 << ((mmMME_LOG_SHADOW & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME_STORE_MAX_CREDIT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME_STORE_MAX_CREDIT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmMME_STORE_MAX_CREDIT & 0x7F) >> 2); + mask |= 1 << ((mmMME_AGU & 0x7F) >> 2); + mask |= 1 << ((mmMME_SBA & 0x7F) >> 2); + mask |= 1 << ((mmMME_SBB & 0x7F) >> 2); + mask |= 1 << ((mmMME_SBC & 0x7F) >> 2); + mask |= 1 << ((mmMME_WBC & 0x7F) >> 2); + mask |= 1 << ((mmMME_SBA_CONTROL_DATA & 0x7F) >> 2); + mask |= 1 << ((mmMME_SBB_CONTROL_DATA & 0x7F) >> 2); + mask |= 1 << ((mmMME_SBC_CONTROL_DATA & 0x7F) >> 2); + mask |= 1 << ((mmMME_WBC_CONTROL_DATA & 0x7F) >> 2); + mask |= 1 << ((mmMME_TE & 0x7F) >> 2); + mask |= 1 << ((mmMME_TE2DEC & 0x7F) >> 2); + mask |= 1 << ((mmMME_REI_STATUS & 0x7F) >> 2); + mask |= 1 << ((mmMME_REI_MASK & 0x7F) >> 2); + mask |= 1 << ((mmMME_SEI_STATUS & 0x7F) >> 2); + mask |= 1 << ((mmMME_SEI_MASK & 0x7F) >> 2); + mask |= 1 << ((mmMME_SPI_STATUS & 0x7F) >> 2); + mask |= 1 << ((mmMME_SPI_MASK & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmMME_QM_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_GLBL_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_PQ_BASE_LO & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_PQ_BASE_HI & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_PQ_SIZE & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_PQ_PI & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_PQ_CI & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_PQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_PQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_PQ_ARUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmMME_QM_PQ_PUSH0 & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_PQ_PUSH1 & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_PQ_PUSH2 & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_PQ_PUSH3 & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_PQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_PQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CQ_PTR_LO & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CQ_PTR_HI & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CQ_TSIZE & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CQ_CTL & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmMME_QM_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME_QM_CP_STS & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME_QM_CP_STS & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmMME_QM_CP_STS & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CP_CURRENT_INST_LO & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CP_CURRENT_INST_HI & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CP_BARRIER_CFG & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CP_DBG_0 & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_PQ_BUF_ADDR & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_PQ_BUF_RDATA & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CQ_BUF_ADDR & 0x7F) >> 2); + mask |= 1 << ((mmMME_QM_CQ_BUF_RDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmMME_CMDQ_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_GLBL_STS1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmMME_CMDQ_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME_CMDQ_CQ_IFIFO_CNT & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmMME_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CP_STS & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1 << ((mmMME_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CP_DBG_0 & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2); + mask |= 1 << ((mmMME_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmMME_SBB_POWER_ECO1 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmMME_SBB_POWER_ECO1 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmMME_SBB_POWER_ECO1 & 0x7F) >> 2); + mask |= 1 << ((mmMME_SBB_POWER_ECO2 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); +} + +static void goya_init_dma_protection_bits(struct hl_device *hdev) +{ + u32 pb_addr, mask; + u8 word_offset; + + goya_pb_set_block(hdev, mmDMA_NRTR_BASE); + goya_pb_set_block(hdev, mmDMA_RD_REGULATOR_BASE); + goya_pb_set_block(hdev, mmDMA_WR_REGULATOR_BASE); + + pb_addr = (mmDMA_QM_0_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA_QM_0_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmDMA_QM_0_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_GLBL_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_PQ_BASE_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_PQ_BASE_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_PQ_SIZE & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_PQ_PI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_PQ_CI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_PQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_PQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_PQ_ARUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA_QM_0_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA_QM_0_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmDMA_QM_0_PQ_PUSH0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_PQ_PUSH1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_PQ_PUSH2 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_PQ_PUSH3 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_PQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_PQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_PQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CQ_PTR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CQ_PTR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CQ_TSIZE & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CQ_CTL & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA_QM_0_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA_QM_0_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmDMA_QM_0_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_0_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + goya_pb_set_block(hdev, mmDMA_CH_0_BASE); + + pb_addr = (mmDMA_QM_1_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA_QM_1_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmDMA_QM_1_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_GLBL_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_PQ_BASE_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_PQ_BASE_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_PQ_SIZE & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_PQ_PI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_PQ_CI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_PQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_PQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_PQ_ARUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA_QM_1_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA_QM_1_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmDMA_QM_1_PQ_PUSH0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_PQ_PUSH1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_PQ_PUSH2 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_PQ_PUSH3 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_PQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_PQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_PQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CQ_PTR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CQ_PTR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CQ_TSIZE & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CQ_CTL & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA_QM_1_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA_QM_1_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmDMA_QM_1_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_1_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + goya_pb_set_block(hdev, mmDMA_CH_1_BASE); + + pb_addr = (mmDMA_QM_2_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA_QM_2_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmDMA_QM_2_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_GLBL_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_PQ_BASE_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_PQ_BASE_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_PQ_SIZE & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_PQ_PI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_PQ_CI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_PQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_PQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_PQ_ARUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA_QM_2_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA_QM_2_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmDMA_QM_2_PQ_PUSH0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_PQ_PUSH1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_PQ_PUSH2 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_PQ_PUSH3 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_PQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_PQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_PQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CQ_PTR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CQ_PTR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CQ_TSIZE & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CQ_CTL & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA_QM_2_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA_QM_2_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmDMA_QM_2_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_2_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + goya_pb_set_block(hdev, mmDMA_CH_2_BASE); + + pb_addr = (mmDMA_QM_3_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA_QM_3_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmDMA_QM_3_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_GLBL_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_PQ_BASE_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_PQ_BASE_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_PQ_SIZE & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_PQ_PI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_PQ_CI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_PQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_PQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_PQ_ARUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA_QM_3_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA_QM_3_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmDMA_QM_3_PQ_PUSH0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_PQ_PUSH1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_PQ_PUSH2 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_PQ_PUSH3 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_PQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_PQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_PQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CQ_PTR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CQ_PTR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CQ_TSIZE & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CQ_CTL & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA_QM_3_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA_QM_3_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmDMA_QM_3_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_3_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + goya_pb_set_block(hdev, mmDMA_CH_3_BASE); + + pb_addr = (mmDMA_QM_4_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA_QM_4_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmDMA_QM_4_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_GLBL_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_PQ_BASE_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_PQ_BASE_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_PQ_SIZE & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_PQ_PI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_PQ_CI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_PQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_PQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_PQ_ARUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA_QM_4_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA_QM_4_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmDMA_QM_4_PQ_PUSH0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_PQ_PUSH1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_PQ_PUSH2 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_PQ_PUSH3 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_PQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_PQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_PQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CQ_PTR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CQ_PTR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CQ_TSIZE & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CQ_CTL & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmDMA_QM_4_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmDMA_QM_4_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmDMA_QM_4_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmDMA_QM_4_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + goya_pb_set_block(hdev, mmDMA_CH_4_BASE); +} + +static void goya_init_tpc_protection_bits(struct hl_device *hdev) +{ + u32 pb_addr, mask; + u8 word_offset; + + goya_pb_set_block(hdev, mmTPC0_RD_REGULATOR_BASE); + goya_pb_set_block(hdev, mmTPC0_WR_REGULATOR_BASE); + + pb_addr = (mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC0_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC0_CFG_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CFG_AWUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC0_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_CFG_FUNC_MBIST_CNTRL & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC0_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_PAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC0_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC0_QM_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_GLBL_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_PQ_BASE_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_PQ_BASE_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_PQ_SIZE & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_PQ_PI & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_PQ_CI & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_PQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_PQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_PQ_ARUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC0_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC0_QM_PQ_PUSH0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_PQ_PUSH1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_PQ_PUSH2 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_PQ_PUSH3 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_PQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_PQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CQ_TSIZE & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CQ_CTL & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC0_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC0_QM_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC0_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC0_CMDQ_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_GLBL_STS1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC0_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC0_CMDQ_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC0_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC0_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CP_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC0_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC0_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1 << ((mmTPC0_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CP_DBG_0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2); + mask |= 1 << ((mmTPC0_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + goya_pb_set_block(hdev, mmTPC1_RTR_BASE); + goya_pb_set_block(hdev, mmTPC1_RD_REGULATOR_BASE); + goya_pb_set_block(hdev, mmTPC1_WR_REGULATOR_BASE); + + pb_addr = (mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC1_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC1_CFG_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_AWUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC1_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_CFG_FUNC_MBIST_CNTRL & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1 << ((mmTPC1_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_PAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC1_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC1_QM_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_GLBL_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_PQ_BASE_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_PQ_BASE_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_PQ_SIZE & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_PQ_PI & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_PQ_CI & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_PQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_PQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_PQ_ARUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC1_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC1_QM_PQ_PUSH0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_PQ_PUSH1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_PQ_PUSH2 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_PQ_PUSH3 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_PQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_PQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CQ_TSIZE & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CQ_CTL & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC1_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC1_QM_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC1_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC1_CMDQ_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_GLBL_STS1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC1_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC1_CMDQ_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC1_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC1_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CP_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC1_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC1_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1 << ((mmTPC1_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CP_DBG_0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2); + mask |= 1 << ((mmTPC1_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + goya_pb_set_block(hdev, mmTPC2_RTR_BASE); + goya_pb_set_block(hdev, mmTPC2_RD_REGULATOR_BASE); + goya_pb_set_block(hdev, mmTPC2_WR_REGULATOR_BASE); + + pb_addr = (mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC2_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC2_CFG_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_AWUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC2_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_CFG_FUNC_MBIST_CNTRL & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1 << ((mmTPC2_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_PAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC2_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC2_QM_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_GLBL_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_PQ_BASE_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_PQ_BASE_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_PQ_SIZE & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_PQ_PI & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_PQ_CI & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_PQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_PQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_PQ_ARUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC2_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC2_QM_PQ_PUSH0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_PQ_PUSH1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_PQ_PUSH2 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_PQ_PUSH3 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_PQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_PQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CQ_TSIZE & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CQ_CTL & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC2_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC2_QM_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC2_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC2_CMDQ_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_GLBL_STS1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC2_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC2_CMDQ_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC2_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC2_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CP_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC2_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC2_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1 << ((mmTPC2_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CP_DBG_0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2); + mask |= 1 << ((mmTPC2_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + goya_pb_set_block(hdev, mmTPC3_RTR_BASE); + goya_pb_set_block(hdev, mmTPC3_RD_REGULATOR_BASE); + goya_pb_set_block(hdev, mmTPC3_WR_REGULATOR_BASE); + + pb_addr = (mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH + & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC3_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC3_CFG_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_AWUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC3_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_CFG_FUNC_MBIST_CNTRL + & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC3_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_PAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC3_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC3_QM_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_GLBL_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_PQ_BASE_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_PQ_BASE_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_PQ_SIZE & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_PQ_PI & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_PQ_CI & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_PQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_PQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_PQ_ARUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC3_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC3_QM_PQ_PUSH0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_PQ_PUSH1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_PQ_PUSH2 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_PQ_PUSH3 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_PQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_PQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CQ_TSIZE & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CQ_CTL & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC3_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC3_QM_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC3_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC3_CMDQ_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_GLBL_STS1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC3_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC3_CMDQ_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC3_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC3_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CP_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC3_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC3_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1 << ((mmTPC3_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CP_DBG_0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2); + mask |= 1 << ((mmTPC3_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + goya_pb_set_block(hdev, mmTPC4_RTR_BASE); + goya_pb_set_block(hdev, mmTPC4_RD_REGULATOR_BASE); + goya_pb_set_block(hdev, mmTPC4_WR_REGULATOR_BASE); + + pb_addr = (mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC4_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC4_CFG_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_AWUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC4_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_CFG_FUNC_MBIST_CNTRL & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC4_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_PAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC4_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC4_QM_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_GLBL_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_PQ_BASE_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_PQ_BASE_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_PQ_SIZE & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_PQ_PI & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_PQ_CI & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_PQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_PQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_PQ_ARUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC4_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC4_QM_PQ_PUSH0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_PQ_PUSH1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_PQ_PUSH2 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_PQ_PUSH3 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_PQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_PQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CQ_TSIZE & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CQ_CTL & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC4_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC4_QM_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC4_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC4_CMDQ_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_GLBL_STS1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC4_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC4_CMDQ_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC4_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC4_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CP_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC4_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC4_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1 << ((mmTPC4_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CP_DBG_0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2); + mask |= 1 << ((mmTPC4_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + goya_pb_set_block(hdev, mmTPC5_RTR_BASE); + goya_pb_set_block(hdev, mmTPC5_RD_REGULATOR_BASE); + goya_pb_set_block(hdev, mmTPC5_WR_REGULATOR_BASE); + + pb_addr = (mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC5_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC5_CFG_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_AWUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC5_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_CFG_FUNC_MBIST_CNTRL & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC5_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_PAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC5_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC5_QM_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_GLBL_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_PQ_BASE_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_PQ_BASE_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_PQ_SIZE & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_PQ_PI & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_PQ_CI & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_PQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_PQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_PQ_ARUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC5_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC5_QM_PQ_PUSH0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_PQ_PUSH1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_PQ_PUSH2 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_PQ_PUSH3 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_PQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_PQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CQ_TSIZE & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CQ_CTL & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC5_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC5_QM_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC5_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC5_CMDQ_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_GLBL_STS1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC5_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC5_CMDQ_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC5_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC5_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CP_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC5_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC5_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1 << ((mmTPC5_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CP_DBG_0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2); + mask |= 1 << ((mmTPC5_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + goya_pb_set_block(hdev, mmTPC6_RTR_BASE); + goya_pb_set_block(hdev, mmTPC6_RD_REGULATOR_BASE); + goya_pb_set_block(hdev, mmTPC6_WR_REGULATOR_BASE); + + pb_addr = (mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC6_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC6_CFG_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_AWUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC6_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_CFG_FUNC_MBIST_CNTRL & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC6_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_PAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC6_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC6_QM_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_GLBL_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_PQ_BASE_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_PQ_BASE_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_PQ_SIZE & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_PQ_PI & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_PQ_CI & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_PQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_PQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_PQ_ARUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC6_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC6_QM_PQ_PUSH0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_PQ_PUSH1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_PQ_PUSH2 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_PQ_PUSH3 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_PQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_PQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CQ_TSIZE & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CQ_CTL & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC6_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC6_QM_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC6_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC6_CMDQ_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_GLBL_STS1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC6_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC6_CMDQ_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC6_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC6_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CP_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC6_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC6_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1 << ((mmTPC6_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CP_DBG_0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2); + mask |= 1 << ((mmTPC6_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + goya_pb_set_block(hdev, mmTPC7_NRTR_BASE); + goya_pb_set_block(hdev, mmTPC7_RD_REGULATOR_BASE); + goya_pb_set_block(hdev, mmTPC7_WR_REGULATOR_BASE); + + pb_addr = (mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_CFG_SUBTRACT_VALUE & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_SM_BASE_ADDRESS_LOW & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_SM_BASE_ADDRESS_HIGH & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC7_CFG_ARUSER & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_CFG_ARUSER & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC7_CFG_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_AWUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC7_CFG_FUNC_MBIST_CNTRL & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_CFG_FUNC_MBIST_CNTRL & + PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC7_CFG_FUNC_MBIST_CNTRL & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_PAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_2 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_3 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_4 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_5 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_6 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_7 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_8 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CFG_FUNC_MBIST_MEM_9 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC7_QM_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_QM_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC7_QM_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_GLBL_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_PQ_BASE_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_PQ_BASE_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_PQ_SIZE & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_PQ_PI & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_PQ_CI & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_PQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_PQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_PQ_ARUSER & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC7_QM_PQ_PUSH0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_QM_PQ_PUSH0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC7_QM_PQ_PUSH0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_PQ_PUSH1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_PQ_PUSH2 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_PQ_PUSH3 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_PQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_PQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_PQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_PQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_PQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_PQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CQ_TSIZE & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CQ_CTL & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC7_QM_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_QM_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC7_QM_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_QM_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC7_CMDQ_GLBL_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_CMDQ_GLBL_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC7_CMDQ_GLBL_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_GLBL_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_GLBL_PROT & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_GLBL_ERR_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_GLBL_ERR_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_GLBL_ERR_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_GLBL_ERR_WDATA & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_GLBL_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_GLBL_NON_SECURE_PROPS & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_GLBL_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_GLBL_STS1 & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC7_CMDQ_CQ_CFG0 & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_CMDQ_CQ_CFG0 & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC7_CMDQ_CQ_CFG0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CQ_CFG1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CQ_ARUSER & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CQ_PTR_LO_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CQ_PTR_HI_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CQ_TSIZE_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CQ_CTL_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CQ_STS0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CQ_STS1 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CQ_RD_RATE_LIM_EN & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CQ_RD_RATE_LIM_SAT & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CQ_RD_RATE_LIM_TOUT & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC7_CMDQ_CQ_IFIFO_CNT & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_CMDQ_CQ_IFIFO_CNT & PROT_BITS_OFFS) >> 7) << 2; + mask = 1 << ((mmTPC7_CMDQ_CQ_IFIFO_CNT & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE0_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE0_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE1_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE1_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE2_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE2_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE3_ADDR_LO & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CP_MSG_BASE3_ADDR_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CP_LDMA_TSIZE_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CP_LDMA_COMMIT_OFFSET & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CP_STS & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CP_CURRENT_INST_LO & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); + + pb_addr = (mmTPC7_CMDQ_CP_CURRENT_INST_HI & ~0xFFF) + PROT_BITS_OFFS; + word_offset = ((mmTPC7_CMDQ_CP_CURRENT_INST_HI & PROT_BITS_OFFS) >> 7) + << 2; + mask = 1 << ((mmTPC7_CMDQ_CP_CURRENT_INST_HI & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CP_BARRIER_CFG & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CP_DBG_0 & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CQ_BUF_ADDR & 0x7F) >> 2); + mask |= 1 << ((mmTPC7_CMDQ_CQ_BUF_RDATA & 0x7F) >> 2); + + WREG32(pb_addr + word_offset, ~mask); +} + +/* + * goya_init_protection_bits - Initialize protection bits for specific registers + * + * @hdev: pointer to hl_device structure + * + * All protection bits are 1 by default, means not protected. Need to set to 0 + * each bit that belongs to a protected register. + * + */ +static void goya_init_protection_bits(struct hl_device *hdev) +{ + /* + * In each 4K block of registers, the last 128 bytes are protection + * bits - total of 1024 bits, one for each register. Each bit is related + * to a specific register, by the order of the registers. + * So in order to calculate the bit that is related to a given register, + * we need to calculate its word offset and then the exact bit inside + * the word (which is 4 bytes). + * + * Register address: + * + * 31 12 11 7 6 2 1 0 + * ----------------------------------------------------------------- + * | Don't | word | bit location | 0 | + * | care | offset | inside word | | + * ----------------------------------------------------------------- + * + * Bits 7-11 represents the word offset inside the 128 bytes. + * Bits 2-6 represents the bit location inside the word. + */ + + goya_pb_set_block(hdev, mmPCI_NRTR_BASE); + goya_pb_set_block(hdev, mmPCI_RD_REGULATOR_BASE); + goya_pb_set_block(hdev, mmPCI_WR_REGULATOR_BASE); + + goya_pb_set_block(hdev, mmSRAM_Y0_X0_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y0_X0_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y0_X1_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y0_X1_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y0_X2_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y0_X2_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y0_X3_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y0_X3_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y0_X4_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y0_X4_RTR_BASE); + + goya_pb_set_block(hdev, mmSRAM_Y1_X0_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y1_X0_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y1_X1_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y1_X1_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y1_X2_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y1_X2_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y1_X3_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y1_X3_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y1_X4_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y1_X4_RTR_BASE); + + goya_pb_set_block(hdev, mmSRAM_Y2_X0_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y2_X0_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y2_X1_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y2_X1_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y2_X2_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y2_X2_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y2_X3_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y2_X3_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y2_X4_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y2_X4_RTR_BASE); + + goya_pb_set_block(hdev, mmSRAM_Y3_X0_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y3_X0_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y3_X1_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y3_X1_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y3_X2_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y3_X2_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y3_X3_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y3_X3_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y3_X4_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y3_X4_RTR_BASE); + + goya_pb_set_block(hdev, mmSRAM_Y4_X0_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y4_X0_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y4_X1_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y4_X1_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y4_X2_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y4_X2_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y4_X3_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y4_X3_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y4_X4_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y4_X4_RTR_BASE); + + goya_pb_set_block(hdev, mmSRAM_Y5_X0_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y5_X0_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y5_X1_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y5_X1_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y5_X2_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y5_X2_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y5_X3_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y5_X3_RTR_BASE); + goya_pb_set_block(hdev, mmSRAM_Y5_X4_BANK_BASE); + goya_pb_set_block(hdev, mmSRAM_Y5_X4_RTR_BASE); + + goya_pb_set_block(hdev, mmPCIE_WRAP_BASE); + goya_pb_set_block(hdev, mmPCIE_CORE_BASE); + goya_pb_set_block(hdev, mmPCIE_DB_CFG_BASE); + goya_pb_set_block(hdev, mmPCIE_DB_CMD_BASE); + goya_pb_set_block(hdev, mmPCIE_AUX_BASE); + goya_pb_set_block(hdev, mmPCIE_DB_RSV_BASE); + goya_pb_set_block(hdev, mmPCIE_PHY_BASE); + + goya_init_mme_protection_bits(hdev); + + goya_init_dma_protection_bits(hdev); + + goya_init_tpc_protection_bits(hdev); +} + +/* + * goya_init_security - Initialize security model + * + * @hdev: pointer to hl_device structure + * + * Initialize the security model of the device + * That includes range registers and protection bit per register + * + */ +void goya_init_security(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + + u32 dram_addr_lo = lower_32_bits(DRAM_PHYS_BASE); + u32 dram_addr_hi = upper_32_bits(DRAM_PHYS_BASE); + + u32 lbw_rng0_base = 0xFC440000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + u32 lbw_rng0_mask = 0xFFFF0000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + + u32 lbw_rng1_base = 0xFC480000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + u32 lbw_rng1_mask = 0xFFF80000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + + u32 lbw_rng2_base = 0xFC600000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + u32 lbw_rng2_mask = 0xFFE00000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + + u32 lbw_rng3_base = 0xFC800000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + u32 lbw_rng3_mask = 0xFFF00000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + + u32 lbw_rng4_base = 0xFCC02000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + u32 lbw_rng4_mask = 0xFFFFF000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + + u32 lbw_rng5_base = 0xFCC40000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + u32 lbw_rng5_mask = 0xFFFF8000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + + u32 lbw_rng6_base = 0xFCC48000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + u32 lbw_rng6_mask = 0xFFFFF000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + + u32 lbw_rng7_base = 0xFCC4A000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + u32 lbw_rng7_mask = 0xFFFFE000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + + u32 lbw_rng8_base = 0xFCC4C000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + u32 lbw_rng8_mask = 0xFFFFC000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + + u32 lbw_rng9_base = 0xFCC50000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + u32 lbw_rng9_mask = 0xFFFF0000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + + u32 lbw_rng10_base = 0xFCC60000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + u32 lbw_rng10_mask = 0xFFFE0000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + + u32 lbw_rng11_base = 0xFCE00000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + u32 lbw_rng11_mask = 0xFFFFC000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + + u32 lbw_rng12_base = 0xFE484000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + u32 lbw_rng12_mask = 0xFFFFF000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + + u32 lbw_rng13_base = 0xFEC43000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + u32 lbw_rng13_mask = 0xFFFFF000 & DMA_MACRO_LBW_RANGE_BASE_R_MASK; + + WREG32(mmDMA_MACRO_LBW_RANGE_HIT_BLOCK, 0xFFFF); + WREG32(mmDMA_MACRO_HBW_RANGE_HIT_BLOCK, 0xFF); + + if (!(goya->hw_cap_initialized & HW_CAP_MMU)) { + WREG32(mmDMA_MACRO_HBW_RANGE_HIT_BLOCK, 0xFE); + + /* Protect HOST */ + WREG32(mmDMA_MACRO_HBW_RANGE_BASE_31_0_0, 0); + WREG32(mmDMA_MACRO_HBW_RANGE_BASE_49_32_0, 0); + WREG32(mmDMA_MACRO_HBW_RANGE_MASK_31_0_0, 0); + WREG32(mmDMA_MACRO_HBW_RANGE_MASK_49_32_0, 0xFFF80); + } + + /* + * Protect DDR @ + * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END + * The mask protects the first 512MB + */ + WREG32(mmDMA_MACRO_HBW_RANGE_BASE_31_0_1, dram_addr_lo); + WREG32(mmDMA_MACRO_HBW_RANGE_BASE_49_32_1, dram_addr_hi); + WREG32(mmDMA_MACRO_HBW_RANGE_MASK_31_0_1, 0xE0000000); + WREG32(mmDMA_MACRO_HBW_RANGE_MASK_49_32_1, 0x3FFFF); + + /* Protect registers */ + + WREG32(mmDMA_MACRO_LBW_RANGE_BASE_0, lbw_rng0_base); + WREG32(mmDMA_MACRO_LBW_RANGE_MASK_0, lbw_rng0_mask); + WREG32(mmDMA_MACRO_LBW_RANGE_BASE_1, lbw_rng1_base); + WREG32(mmDMA_MACRO_LBW_RANGE_MASK_1, lbw_rng1_mask); + WREG32(mmDMA_MACRO_LBW_RANGE_BASE_2, lbw_rng2_base); + WREG32(mmDMA_MACRO_LBW_RANGE_MASK_2, lbw_rng2_mask); + WREG32(mmDMA_MACRO_LBW_RANGE_BASE_3, lbw_rng3_base); + WREG32(mmDMA_MACRO_LBW_RANGE_MASK_3, lbw_rng3_mask); + WREG32(mmDMA_MACRO_LBW_RANGE_BASE_4, lbw_rng4_base); + WREG32(mmDMA_MACRO_LBW_RANGE_MASK_4, lbw_rng4_mask); + WREG32(mmDMA_MACRO_LBW_RANGE_BASE_5, lbw_rng5_base); + WREG32(mmDMA_MACRO_LBW_RANGE_MASK_5, lbw_rng5_mask); + WREG32(mmDMA_MACRO_LBW_RANGE_BASE_6, lbw_rng6_base); + WREG32(mmDMA_MACRO_LBW_RANGE_MASK_6, lbw_rng6_mask); + WREG32(mmDMA_MACRO_LBW_RANGE_BASE_7, lbw_rng7_base); + WREG32(mmDMA_MACRO_LBW_RANGE_MASK_7, lbw_rng7_mask); + WREG32(mmDMA_MACRO_LBW_RANGE_BASE_8, lbw_rng8_base); + WREG32(mmDMA_MACRO_LBW_RANGE_MASK_8, lbw_rng8_mask); + WREG32(mmDMA_MACRO_LBW_RANGE_BASE_9, lbw_rng9_base); + WREG32(mmDMA_MACRO_LBW_RANGE_MASK_9, lbw_rng9_mask); + WREG32(mmDMA_MACRO_LBW_RANGE_BASE_10, lbw_rng10_base); + WREG32(mmDMA_MACRO_LBW_RANGE_MASK_10, lbw_rng10_mask); + WREG32(mmDMA_MACRO_LBW_RANGE_BASE_11, lbw_rng11_base); + WREG32(mmDMA_MACRO_LBW_RANGE_MASK_11, lbw_rng11_mask); + WREG32(mmDMA_MACRO_LBW_RANGE_BASE_12, lbw_rng12_base); + WREG32(mmDMA_MACRO_LBW_RANGE_MASK_12, lbw_rng12_mask); + WREG32(mmDMA_MACRO_LBW_RANGE_BASE_13, lbw_rng13_base); + WREG32(mmDMA_MACRO_LBW_RANGE_MASK_13, lbw_rng13_mask); + + WREG32(mmMME1_RTR_LBW_RANGE_HIT, 0xFFFF); + WREG32(mmMME2_RTR_LBW_RANGE_HIT, 0xFFFF); + WREG32(mmMME3_RTR_LBW_RANGE_HIT, 0xFFFF); + WREG32(mmMME4_RTR_LBW_RANGE_HIT, 0xFFFF); + WREG32(mmMME5_RTR_LBW_RANGE_HIT, 0xFFFF); + WREG32(mmMME6_RTR_LBW_RANGE_HIT, 0xFFFF); + + WREG32(mmMME1_RTR_HBW_RANGE_HIT, 0xFE); + WREG32(mmMME2_RTR_HBW_RANGE_HIT, 0xFE); + WREG32(mmMME3_RTR_HBW_RANGE_HIT, 0xFE); + WREG32(mmMME4_RTR_HBW_RANGE_HIT, 0xFE); + WREG32(mmMME5_RTR_HBW_RANGE_HIT, 0xFE); + WREG32(mmMME6_RTR_HBW_RANGE_HIT, 0xFE); + + /* Protect HOST */ + WREG32(mmMME1_RTR_HBW_RANGE_BASE_L_0, 0); + WREG32(mmMME1_RTR_HBW_RANGE_BASE_H_0, 0); + WREG32(mmMME1_RTR_HBW_RANGE_MASK_L_0, 0); + WREG32(mmMME1_RTR_HBW_RANGE_MASK_H_0, 0xFFF80); + + WREG32(mmMME2_RTR_HBW_RANGE_BASE_L_0, 0); + WREG32(mmMME2_RTR_HBW_RANGE_BASE_H_0, 0); + WREG32(mmMME2_RTR_HBW_RANGE_MASK_L_0, 0); + WREG32(mmMME2_RTR_HBW_RANGE_MASK_H_0, 0xFFF80); + + WREG32(mmMME3_RTR_HBW_RANGE_BASE_L_0, 0); + WREG32(mmMME3_RTR_HBW_RANGE_BASE_H_0, 0); + WREG32(mmMME3_RTR_HBW_RANGE_MASK_L_0, 0); + WREG32(mmMME3_RTR_HBW_RANGE_MASK_H_0, 0xFFF80); + + WREG32(mmMME4_RTR_HBW_RANGE_BASE_L_0, 0); + WREG32(mmMME4_RTR_HBW_RANGE_BASE_H_0, 0); + WREG32(mmMME4_RTR_HBW_RANGE_MASK_L_0, 0); + WREG32(mmMME4_RTR_HBW_RANGE_MASK_H_0, 0xFFF80); + + WREG32(mmMME5_RTR_HBW_RANGE_BASE_L_0, 0); + WREG32(mmMME5_RTR_HBW_RANGE_BASE_H_0, 0); + WREG32(mmMME5_RTR_HBW_RANGE_MASK_L_0, 0); + WREG32(mmMME5_RTR_HBW_RANGE_MASK_H_0, 0xFFF80); + + WREG32(mmMME6_RTR_HBW_RANGE_BASE_L_0, 0); + WREG32(mmMME6_RTR_HBW_RANGE_BASE_H_0, 0); + WREG32(mmMME6_RTR_HBW_RANGE_MASK_L_0, 0); + WREG32(mmMME6_RTR_HBW_RANGE_MASK_H_0, 0xFFF80); + + /* + * Protect DDR @ + * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END + * The mask protects the first 512MB + */ + WREG32(mmMME1_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo); + WREG32(mmMME1_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi); + WREG32(mmMME1_RTR_HBW_RANGE_MASK_L_1, 0xE0000000); + WREG32(mmMME1_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF); + + WREG32(mmMME2_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo); + WREG32(mmMME2_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi); + WREG32(mmMME2_RTR_HBW_RANGE_MASK_L_1, 0xE0000000); + WREG32(mmMME2_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF); + + WREG32(mmMME3_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo); + WREG32(mmMME3_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi); + WREG32(mmMME3_RTR_HBW_RANGE_MASK_L_1, 0xE0000000); + WREG32(mmMME3_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF); + + WREG32(mmMME4_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo); + WREG32(mmMME4_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi); + WREG32(mmMME4_RTR_HBW_RANGE_MASK_L_1, 0xE0000000); + WREG32(mmMME4_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF); + + WREG32(mmMME5_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo); + WREG32(mmMME5_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi); + WREG32(mmMME5_RTR_HBW_RANGE_MASK_L_1, 0xE0000000); + WREG32(mmMME5_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF); + + WREG32(mmMME6_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo); + WREG32(mmMME6_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi); + WREG32(mmMME6_RTR_HBW_RANGE_MASK_L_1, 0xE0000000); + WREG32(mmMME6_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF); + + WREG32(mmMME1_RTR_LBW_RANGE_BASE_0, lbw_rng0_base); + WREG32(mmMME1_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask); + WREG32(mmMME1_RTR_LBW_RANGE_BASE_1, lbw_rng1_base); + WREG32(mmMME1_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask); + WREG32(mmMME1_RTR_LBW_RANGE_BASE_2, lbw_rng2_base); + WREG32(mmMME1_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask); + WREG32(mmMME1_RTR_LBW_RANGE_BASE_3, lbw_rng3_base); + WREG32(mmMME1_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask); + WREG32(mmMME1_RTR_LBW_RANGE_BASE_4, lbw_rng4_base); + WREG32(mmMME1_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask); + WREG32(mmMME1_RTR_LBW_RANGE_BASE_5, lbw_rng5_base); + WREG32(mmMME1_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask); + WREG32(mmMME1_RTR_LBW_RANGE_BASE_6, lbw_rng6_base); + WREG32(mmMME1_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask); + WREG32(mmMME1_RTR_LBW_RANGE_BASE_7, lbw_rng7_base); + WREG32(mmMME1_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask); + WREG32(mmMME1_RTR_LBW_RANGE_BASE_8, lbw_rng8_base); + WREG32(mmMME1_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask); + WREG32(mmMME1_RTR_LBW_RANGE_BASE_9, lbw_rng9_base); + WREG32(mmMME1_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask); + WREG32(mmMME1_RTR_LBW_RANGE_BASE_10, lbw_rng10_base); + WREG32(mmMME1_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask); + WREG32(mmMME1_RTR_LBW_RANGE_BASE_11, lbw_rng11_base); + WREG32(mmMME1_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask); + WREG32(mmMME1_RTR_LBW_RANGE_BASE_12, lbw_rng12_base); + WREG32(mmMME1_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask); + WREG32(mmMME1_RTR_LBW_RANGE_BASE_13, lbw_rng13_base); + WREG32(mmMME1_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask); + + WREG32(mmMME2_RTR_LBW_RANGE_BASE_0, lbw_rng0_base); + WREG32(mmMME2_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask); + WREG32(mmMME2_RTR_LBW_RANGE_BASE_1, lbw_rng1_base); + WREG32(mmMME2_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask); + WREG32(mmMME2_RTR_LBW_RANGE_BASE_2, lbw_rng2_base); + WREG32(mmMME2_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask); + WREG32(mmMME2_RTR_LBW_RANGE_BASE_3, lbw_rng3_base); + WREG32(mmMME2_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask); + WREG32(mmMME2_RTR_LBW_RANGE_BASE_4, lbw_rng4_base); + WREG32(mmMME2_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask); + WREG32(mmMME2_RTR_LBW_RANGE_BASE_5, lbw_rng5_base); + WREG32(mmMME2_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask); + WREG32(mmMME2_RTR_LBW_RANGE_BASE_6, lbw_rng6_base); + WREG32(mmMME2_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask); + WREG32(mmMME2_RTR_LBW_RANGE_BASE_7, lbw_rng7_base); + WREG32(mmMME2_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask); + WREG32(mmMME2_RTR_LBW_RANGE_BASE_8, lbw_rng8_base); + WREG32(mmMME2_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask); + WREG32(mmMME2_RTR_LBW_RANGE_BASE_9, lbw_rng9_base); + WREG32(mmMME2_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask); + WREG32(mmMME2_RTR_LBW_RANGE_BASE_10, lbw_rng10_base); + WREG32(mmMME2_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask); + WREG32(mmMME2_RTR_LBW_RANGE_BASE_11, lbw_rng11_base); + WREG32(mmMME2_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask); + WREG32(mmMME2_RTR_LBW_RANGE_BASE_12, lbw_rng12_base); + WREG32(mmMME2_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask); + WREG32(mmMME2_RTR_LBW_RANGE_BASE_13, lbw_rng13_base); + WREG32(mmMME2_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask); + + WREG32(mmMME3_RTR_LBW_RANGE_BASE_0, lbw_rng0_base); + WREG32(mmMME3_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask); + WREG32(mmMME3_RTR_LBW_RANGE_BASE_1, lbw_rng1_base); + WREG32(mmMME3_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask); + WREG32(mmMME3_RTR_LBW_RANGE_BASE_2, lbw_rng2_base); + WREG32(mmMME3_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask); + WREG32(mmMME3_RTR_LBW_RANGE_BASE_3, lbw_rng3_base); + WREG32(mmMME3_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask); + WREG32(mmMME3_RTR_LBW_RANGE_BASE_4, lbw_rng4_base); + WREG32(mmMME3_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask); + WREG32(mmMME3_RTR_LBW_RANGE_BASE_5, lbw_rng5_base); + WREG32(mmMME3_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask); + WREG32(mmMME3_RTR_LBW_RANGE_BASE_6, lbw_rng6_base); + WREG32(mmMME3_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask); + WREG32(mmMME3_RTR_LBW_RANGE_BASE_7, lbw_rng7_base); + WREG32(mmMME3_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask); + WREG32(mmMME3_RTR_LBW_RANGE_BASE_8, lbw_rng8_base); + WREG32(mmMME3_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask); + WREG32(mmMME3_RTR_LBW_RANGE_BASE_9, lbw_rng9_base); + WREG32(mmMME3_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask); + WREG32(mmMME3_RTR_LBW_RANGE_BASE_10, lbw_rng10_base); + WREG32(mmMME3_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask); + WREG32(mmMME3_RTR_LBW_RANGE_BASE_11, lbw_rng11_base); + WREG32(mmMME3_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask); + WREG32(mmMME3_RTR_LBW_RANGE_BASE_12, lbw_rng12_base); + WREG32(mmMME3_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask); + WREG32(mmMME3_RTR_LBW_RANGE_BASE_13, lbw_rng13_base); + WREG32(mmMME3_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask); + + WREG32(mmMME4_RTR_LBW_RANGE_BASE_0, lbw_rng0_base); + WREG32(mmMME4_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask); + WREG32(mmMME4_RTR_LBW_RANGE_BASE_1, lbw_rng1_base); + WREG32(mmMME4_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask); + WREG32(mmMME4_RTR_LBW_RANGE_BASE_2, lbw_rng2_base); + WREG32(mmMME4_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask); + WREG32(mmMME4_RTR_LBW_RANGE_BASE_3, lbw_rng3_base); + WREG32(mmMME4_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask); + WREG32(mmMME4_RTR_LBW_RANGE_BASE_4, lbw_rng4_base); + WREG32(mmMME4_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask); + WREG32(mmMME4_RTR_LBW_RANGE_BASE_5, lbw_rng5_base); + WREG32(mmMME4_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask); + WREG32(mmMME4_RTR_LBW_RANGE_BASE_6, lbw_rng6_base); + WREG32(mmMME4_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask); + WREG32(mmMME4_RTR_LBW_RANGE_BASE_7, lbw_rng7_base); + WREG32(mmMME4_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask); + WREG32(mmMME4_RTR_LBW_RANGE_BASE_8, lbw_rng8_base); + WREG32(mmMME4_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask); + WREG32(mmMME4_RTR_LBW_RANGE_BASE_9, lbw_rng9_base); + WREG32(mmMME4_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask); + WREG32(mmMME4_RTR_LBW_RANGE_BASE_10, lbw_rng10_base); + WREG32(mmMME4_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask); + WREG32(mmMME4_RTR_LBW_RANGE_BASE_11, lbw_rng11_base); + WREG32(mmMME4_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask); + WREG32(mmMME4_RTR_LBW_RANGE_BASE_12, lbw_rng12_base); + WREG32(mmMME4_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask); + WREG32(mmMME4_RTR_LBW_RANGE_BASE_13, lbw_rng13_base); + WREG32(mmMME4_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask); + + WREG32(mmMME5_RTR_LBW_RANGE_BASE_0, lbw_rng0_base); + WREG32(mmMME5_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask); + WREG32(mmMME5_RTR_LBW_RANGE_BASE_1, lbw_rng1_base); + WREG32(mmMME5_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask); + WREG32(mmMME5_RTR_LBW_RANGE_BASE_2, lbw_rng2_base); + WREG32(mmMME5_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask); + WREG32(mmMME5_RTR_LBW_RANGE_BASE_3, lbw_rng3_base); + WREG32(mmMME5_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask); + WREG32(mmMME5_RTR_LBW_RANGE_BASE_4, lbw_rng4_base); + WREG32(mmMME5_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask); + WREG32(mmMME5_RTR_LBW_RANGE_BASE_5, lbw_rng5_base); + WREG32(mmMME5_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask); + WREG32(mmMME5_RTR_LBW_RANGE_BASE_6, lbw_rng6_base); + WREG32(mmMME5_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask); + WREG32(mmMME5_RTR_LBW_RANGE_BASE_7, lbw_rng7_base); + WREG32(mmMME5_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask); + WREG32(mmMME5_RTR_LBW_RANGE_BASE_8, lbw_rng8_base); + WREG32(mmMME5_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask); + WREG32(mmMME5_RTR_LBW_RANGE_BASE_9, lbw_rng9_base); + WREG32(mmMME5_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask); + WREG32(mmMME5_RTR_LBW_RANGE_BASE_10, lbw_rng10_base); + WREG32(mmMME5_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask); + WREG32(mmMME5_RTR_LBW_RANGE_BASE_11, lbw_rng11_base); + WREG32(mmMME5_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask); + WREG32(mmMME5_RTR_LBW_RANGE_BASE_12, lbw_rng12_base); + WREG32(mmMME5_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask); + WREG32(mmMME5_RTR_LBW_RANGE_BASE_13, lbw_rng13_base); + WREG32(mmMME5_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask); + + WREG32(mmMME6_RTR_LBW_RANGE_BASE_0, lbw_rng0_base); + WREG32(mmMME6_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask); + WREG32(mmMME6_RTR_LBW_RANGE_BASE_1, lbw_rng1_base); + WREG32(mmMME6_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask); + WREG32(mmMME6_RTR_LBW_RANGE_BASE_2, lbw_rng2_base); + WREG32(mmMME6_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask); + WREG32(mmMME6_RTR_LBW_RANGE_BASE_3, lbw_rng3_base); + WREG32(mmMME6_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask); + WREG32(mmMME6_RTR_LBW_RANGE_BASE_4, lbw_rng4_base); + WREG32(mmMME6_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask); + WREG32(mmMME6_RTR_LBW_RANGE_BASE_5, lbw_rng5_base); + WREG32(mmMME6_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask); + WREG32(mmMME6_RTR_LBW_RANGE_BASE_6, lbw_rng6_base); + WREG32(mmMME6_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask); + WREG32(mmMME6_RTR_LBW_RANGE_BASE_7, lbw_rng7_base); + WREG32(mmMME6_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask); + WREG32(mmMME6_RTR_LBW_RANGE_BASE_8, lbw_rng8_base); + WREG32(mmMME6_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask); + WREG32(mmMME6_RTR_LBW_RANGE_BASE_9, lbw_rng9_base); + WREG32(mmMME6_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask); + WREG32(mmMME6_RTR_LBW_RANGE_BASE_10, lbw_rng10_base); + WREG32(mmMME6_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask); + WREG32(mmMME6_RTR_LBW_RANGE_BASE_11, lbw_rng11_base); + WREG32(mmMME6_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask); + WREG32(mmMME6_RTR_LBW_RANGE_BASE_12, lbw_rng12_base); + WREG32(mmMME6_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask); + WREG32(mmMME6_RTR_LBW_RANGE_BASE_13, lbw_rng13_base); + WREG32(mmMME6_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask); + + WREG32(mmTPC0_NRTR_LBW_RANGE_HIT, 0xFFFF); + WREG32(mmTPC0_NRTR_HBW_RANGE_HIT, 0xFE); + + /* Protect HOST */ + WREG32(mmTPC0_NRTR_HBW_RANGE_BASE_L_0, 0); + WREG32(mmTPC0_NRTR_HBW_RANGE_BASE_H_0, 0); + WREG32(mmTPC0_NRTR_HBW_RANGE_MASK_L_0, 0); + WREG32(mmTPC0_NRTR_HBW_RANGE_MASK_H_0, 0xFFF80); + + /* + * Protect DDR @ + * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END + * The mask protects the first 512MB + */ + WREG32(mmTPC0_NRTR_HBW_RANGE_BASE_L_1, dram_addr_lo); + WREG32(mmTPC0_NRTR_HBW_RANGE_BASE_H_1, dram_addr_hi); + WREG32(mmTPC0_NRTR_HBW_RANGE_MASK_L_1, 0xE0000000); + WREG32(mmTPC0_NRTR_HBW_RANGE_MASK_H_1, 0x3FFFF); + + WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_0, lbw_rng0_base); + WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_0, lbw_rng0_mask); + WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_1, lbw_rng1_base); + WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_1, lbw_rng1_mask); + WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_2, lbw_rng2_base); + WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_2, lbw_rng2_mask); + WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_3, lbw_rng3_base); + WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_3, lbw_rng3_mask); + WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_4, lbw_rng4_base); + WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_4, lbw_rng4_mask); + WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_5, lbw_rng5_base); + WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_5, lbw_rng5_mask); + WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_6, lbw_rng6_base); + WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_6, lbw_rng6_mask); + WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_7, lbw_rng7_base); + WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_7, lbw_rng7_mask); + WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_8, lbw_rng8_base); + WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_8, lbw_rng8_mask); + WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_9, lbw_rng9_base); + WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_9, lbw_rng9_mask); + WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_10, lbw_rng10_base); + WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_10, lbw_rng10_mask); + WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_11, lbw_rng11_base); + WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_11, lbw_rng11_mask); + WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_12, lbw_rng12_base); + WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_12, lbw_rng12_mask); + WREG32(mmTPC0_NRTR_LBW_RANGE_BASE_13, lbw_rng13_base); + WREG32(mmTPC0_NRTR_LBW_RANGE_MASK_13, lbw_rng13_mask); + + WREG32(mmTPC1_RTR_LBW_RANGE_HIT, 0xFFFF); + WREG32(mmTPC1_RTR_HBW_RANGE_HIT, 0xFE); + + /* Protect HOST */ + WREG32(mmTPC1_RTR_HBW_RANGE_BASE_L_0, 0); + WREG32(mmTPC1_RTR_HBW_RANGE_BASE_H_0, 0); + WREG32(mmTPC1_RTR_HBW_RANGE_MASK_L_0, 0); + WREG32(mmTPC1_RTR_HBW_RANGE_MASK_H_0, 0xFFF80); + + /* + * Protect DDR @ + * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END + * The mask protects the first 512MB + */ + WREG32(mmTPC1_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo); + WREG32(mmTPC1_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi); + WREG32(mmTPC1_RTR_HBW_RANGE_MASK_L_1, 0xE0000000); + WREG32(mmTPC1_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF); + + WREG32(mmTPC1_RTR_LBW_RANGE_BASE_0, lbw_rng0_base); + WREG32(mmTPC1_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask); + WREG32(mmTPC1_RTR_LBW_RANGE_BASE_1, lbw_rng1_base); + WREG32(mmTPC1_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask); + WREG32(mmTPC1_RTR_LBW_RANGE_BASE_2, lbw_rng2_base); + WREG32(mmTPC1_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask); + WREG32(mmTPC1_RTR_LBW_RANGE_BASE_3, lbw_rng3_base); + WREG32(mmTPC1_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask); + WREG32(mmTPC1_RTR_LBW_RANGE_BASE_4, lbw_rng4_base); + WREG32(mmTPC1_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask); + WREG32(mmTPC1_RTR_LBW_RANGE_BASE_5, lbw_rng5_base); + WREG32(mmTPC1_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask); + WREG32(mmTPC1_RTR_LBW_RANGE_BASE_6, lbw_rng6_base); + WREG32(mmTPC1_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask); + WREG32(mmTPC1_RTR_LBW_RANGE_BASE_7, lbw_rng7_base); + WREG32(mmTPC1_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask); + WREG32(mmTPC1_RTR_LBW_RANGE_BASE_8, lbw_rng8_base); + WREG32(mmTPC1_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask); + WREG32(mmTPC1_RTR_LBW_RANGE_BASE_9, lbw_rng9_base); + WREG32(mmTPC1_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask); + WREG32(mmTPC1_RTR_LBW_RANGE_BASE_10, lbw_rng10_base); + WREG32(mmTPC1_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask); + WREG32(mmTPC1_RTR_LBW_RANGE_BASE_11, lbw_rng11_base); + WREG32(mmTPC1_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask); + WREG32(mmTPC1_RTR_LBW_RANGE_BASE_12, lbw_rng12_base); + WREG32(mmTPC1_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask); + WREG32(mmTPC1_RTR_LBW_RANGE_BASE_13, lbw_rng13_base); + WREG32(mmTPC1_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask); + + WREG32(mmTPC2_RTR_LBW_RANGE_HIT, 0xFFFF); + WREG32(mmTPC2_RTR_HBW_RANGE_HIT, 0xFE); + + /* Protect HOST */ + WREG32(mmTPC2_RTR_HBW_RANGE_BASE_L_0, 0); + WREG32(mmTPC2_RTR_HBW_RANGE_BASE_H_0, 0); + WREG32(mmTPC2_RTR_HBW_RANGE_MASK_L_0, 0); + WREG32(mmTPC2_RTR_HBW_RANGE_MASK_H_0, 0xFFF80); + + /* + * Protect DDR @ + * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END + * The mask protects the first 512MB + */ + WREG32(mmTPC2_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo); + WREG32(mmTPC2_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi); + WREG32(mmTPC2_RTR_HBW_RANGE_MASK_L_1, 0xE0000000); + WREG32(mmTPC2_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF); + + WREG32(mmTPC2_RTR_LBW_RANGE_BASE_0, lbw_rng0_base); + WREG32(mmTPC2_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask); + WREG32(mmTPC2_RTR_LBW_RANGE_BASE_1, lbw_rng1_base); + WREG32(mmTPC2_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask); + WREG32(mmTPC2_RTR_LBW_RANGE_BASE_2, lbw_rng2_base); + WREG32(mmTPC2_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask); + WREG32(mmTPC2_RTR_LBW_RANGE_BASE_3, lbw_rng3_base); + WREG32(mmTPC2_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask); + WREG32(mmTPC2_RTR_LBW_RANGE_BASE_4, lbw_rng4_base); + WREG32(mmTPC2_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask); + WREG32(mmTPC2_RTR_LBW_RANGE_BASE_5, lbw_rng5_base); + WREG32(mmTPC2_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask); + WREG32(mmTPC2_RTR_LBW_RANGE_BASE_6, lbw_rng6_base); + WREG32(mmTPC2_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask); + WREG32(mmTPC2_RTR_LBW_RANGE_BASE_7, lbw_rng7_base); + WREG32(mmTPC2_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask); + WREG32(mmTPC2_RTR_LBW_RANGE_BASE_8, lbw_rng8_base); + WREG32(mmTPC2_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask); + WREG32(mmTPC2_RTR_LBW_RANGE_BASE_9, lbw_rng9_base); + WREG32(mmTPC2_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask); + WREG32(mmTPC2_RTR_LBW_RANGE_BASE_10, lbw_rng10_base); + WREG32(mmTPC2_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask); + WREG32(mmTPC2_RTR_LBW_RANGE_BASE_11, lbw_rng11_base); + WREG32(mmTPC2_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask); + WREG32(mmTPC2_RTR_LBW_RANGE_BASE_12, lbw_rng12_base); + WREG32(mmTPC2_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask); + WREG32(mmTPC2_RTR_LBW_RANGE_BASE_13, lbw_rng13_base); + WREG32(mmTPC2_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask); + + WREG32(mmTPC3_RTR_LBW_RANGE_HIT, 0xFFFF); + WREG32(mmTPC3_RTR_HBW_RANGE_HIT, 0xFE); + + /* Protect HOST */ + WREG32(mmTPC3_RTR_HBW_RANGE_BASE_L_0, 0); + WREG32(mmTPC3_RTR_HBW_RANGE_BASE_H_0, 0); + WREG32(mmTPC3_RTR_HBW_RANGE_MASK_L_0, 0); + WREG32(mmTPC3_RTR_HBW_RANGE_MASK_H_0, 0xFFF80); + + /* + * Protect DDR @ + * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END + * The mask protects the first 512MB + */ + WREG32(mmTPC3_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo); + WREG32(mmTPC3_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi); + WREG32(mmTPC3_RTR_HBW_RANGE_MASK_L_1, 0xE0000000); + WREG32(mmTPC3_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF); + + WREG32(mmTPC3_RTR_LBW_RANGE_BASE_0, lbw_rng0_base); + WREG32(mmTPC3_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask); + WREG32(mmTPC3_RTR_LBW_RANGE_BASE_1, lbw_rng1_base); + WREG32(mmTPC3_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask); + WREG32(mmTPC3_RTR_LBW_RANGE_BASE_2, lbw_rng2_base); + WREG32(mmTPC3_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask); + WREG32(mmTPC3_RTR_LBW_RANGE_BASE_3, lbw_rng3_base); + WREG32(mmTPC3_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask); + WREG32(mmTPC3_RTR_LBW_RANGE_BASE_4, lbw_rng4_base); + WREG32(mmTPC3_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask); + WREG32(mmTPC3_RTR_LBW_RANGE_BASE_5, lbw_rng5_base); + WREG32(mmTPC3_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask); + WREG32(mmTPC3_RTR_LBW_RANGE_BASE_6, lbw_rng6_base); + WREG32(mmTPC3_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask); + WREG32(mmTPC3_RTR_LBW_RANGE_BASE_7, lbw_rng7_base); + WREG32(mmTPC3_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask); + WREG32(mmTPC3_RTR_LBW_RANGE_BASE_8, lbw_rng8_base); + WREG32(mmTPC3_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask); + WREG32(mmTPC3_RTR_LBW_RANGE_BASE_9, lbw_rng9_base); + WREG32(mmTPC3_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask); + WREG32(mmTPC3_RTR_LBW_RANGE_BASE_10, lbw_rng10_base); + WREG32(mmTPC3_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask); + WREG32(mmTPC3_RTR_LBW_RANGE_BASE_11, lbw_rng11_base); + WREG32(mmTPC3_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask); + WREG32(mmTPC3_RTR_LBW_RANGE_BASE_12, lbw_rng12_base); + WREG32(mmTPC3_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask); + WREG32(mmTPC3_RTR_LBW_RANGE_BASE_13, lbw_rng13_base); + WREG32(mmTPC3_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask); + + WREG32(mmTPC4_RTR_LBW_RANGE_HIT, 0xFFFF); + WREG32(mmTPC4_RTR_HBW_RANGE_HIT, 0xFE); + + /* Protect HOST */ + WREG32(mmTPC4_RTR_HBW_RANGE_BASE_L_0, 0); + WREG32(mmTPC4_RTR_HBW_RANGE_BASE_H_0, 0); + WREG32(mmTPC4_RTR_HBW_RANGE_MASK_L_0, 0); + WREG32(mmTPC4_RTR_HBW_RANGE_MASK_H_0, 0xFFF80); + + /* + * Protect DDR @ + * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END + * The mask protects the first 512MB + */ + WREG32(mmTPC4_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo); + WREG32(mmTPC4_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi); + WREG32(mmTPC4_RTR_HBW_RANGE_MASK_L_1, 0xE0000000); + WREG32(mmTPC4_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF); + + WREG32(mmTPC4_RTR_LBW_RANGE_BASE_0, lbw_rng0_base); + WREG32(mmTPC4_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask); + WREG32(mmTPC4_RTR_LBW_RANGE_BASE_1, lbw_rng1_base); + WREG32(mmTPC4_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask); + WREG32(mmTPC4_RTR_LBW_RANGE_BASE_2, lbw_rng2_base); + WREG32(mmTPC4_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask); + WREG32(mmTPC4_RTR_LBW_RANGE_BASE_3, lbw_rng3_base); + WREG32(mmTPC4_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask); + WREG32(mmTPC4_RTR_LBW_RANGE_BASE_4, lbw_rng4_base); + WREG32(mmTPC4_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask); + WREG32(mmTPC4_RTR_LBW_RANGE_BASE_5, lbw_rng5_base); + WREG32(mmTPC4_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask); + WREG32(mmTPC4_RTR_LBW_RANGE_BASE_6, lbw_rng6_base); + WREG32(mmTPC4_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask); + WREG32(mmTPC4_RTR_LBW_RANGE_BASE_7, lbw_rng7_base); + WREG32(mmTPC4_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask); + WREG32(mmTPC4_RTR_LBW_RANGE_BASE_8, lbw_rng8_base); + WREG32(mmTPC4_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask); + WREG32(mmTPC4_RTR_LBW_RANGE_BASE_9, lbw_rng9_base); + WREG32(mmTPC4_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask); + WREG32(mmTPC4_RTR_LBW_RANGE_BASE_10, lbw_rng10_base); + WREG32(mmTPC4_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask); + WREG32(mmTPC4_RTR_LBW_RANGE_BASE_11, lbw_rng11_base); + WREG32(mmTPC4_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask); + WREG32(mmTPC4_RTR_LBW_RANGE_BASE_12, lbw_rng12_base); + WREG32(mmTPC4_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask); + WREG32(mmTPC4_RTR_LBW_RANGE_BASE_13, lbw_rng13_base); + WREG32(mmTPC4_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask); + + WREG32(mmTPC5_RTR_LBW_RANGE_HIT, 0xFFFF); + WREG32(mmTPC5_RTR_HBW_RANGE_HIT, 0xFE); + + /* Protect HOST */ + WREG32(mmTPC5_RTR_HBW_RANGE_BASE_L_0, 0); + WREG32(mmTPC5_RTR_HBW_RANGE_BASE_H_0, 0); + WREG32(mmTPC5_RTR_HBW_RANGE_MASK_L_0, 0); + WREG32(mmTPC5_RTR_HBW_RANGE_MASK_H_0, 0xFFF80); + + /* + * Protect DDR @ + * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END + * The mask protects the first 512MB + */ + WREG32(mmTPC5_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo); + WREG32(mmTPC5_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi); + WREG32(mmTPC5_RTR_HBW_RANGE_MASK_L_1, 0xE0000000); + WREG32(mmTPC5_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF); + + WREG32(mmTPC5_RTR_LBW_RANGE_BASE_0, lbw_rng0_base); + WREG32(mmTPC5_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask); + WREG32(mmTPC5_RTR_LBW_RANGE_BASE_1, lbw_rng1_base); + WREG32(mmTPC5_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask); + WREG32(mmTPC5_RTR_LBW_RANGE_BASE_2, lbw_rng2_base); + WREG32(mmTPC5_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask); + WREG32(mmTPC5_RTR_LBW_RANGE_BASE_3, lbw_rng3_base); + WREG32(mmTPC5_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask); + WREG32(mmTPC5_RTR_LBW_RANGE_BASE_4, lbw_rng4_base); + WREG32(mmTPC5_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask); + WREG32(mmTPC5_RTR_LBW_RANGE_BASE_5, lbw_rng5_base); + WREG32(mmTPC5_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask); + WREG32(mmTPC5_RTR_LBW_RANGE_BASE_6, lbw_rng6_base); + WREG32(mmTPC5_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask); + WREG32(mmTPC5_RTR_LBW_RANGE_BASE_7, lbw_rng7_base); + WREG32(mmTPC5_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask); + WREG32(mmTPC5_RTR_LBW_RANGE_BASE_8, lbw_rng8_base); + WREG32(mmTPC5_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask); + WREG32(mmTPC5_RTR_LBW_RANGE_BASE_9, lbw_rng9_base); + WREG32(mmTPC5_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask); + WREG32(mmTPC5_RTR_LBW_RANGE_BASE_10, lbw_rng10_base); + WREG32(mmTPC5_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask); + WREG32(mmTPC5_RTR_LBW_RANGE_BASE_11, lbw_rng11_base); + WREG32(mmTPC5_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask); + WREG32(mmTPC5_RTR_LBW_RANGE_BASE_12, lbw_rng12_base); + WREG32(mmTPC5_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask); + WREG32(mmTPC5_RTR_LBW_RANGE_BASE_13, lbw_rng13_base); + WREG32(mmTPC5_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask); + + WREG32(mmTPC6_RTR_LBW_RANGE_HIT, 0xFFFF); + WREG32(mmTPC6_RTR_HBW_RANGE_HIT, 0xFE); + + /* Protect HOST */ + WREG32(mmTPC6_RTR_HBW_RANGE_BASE_L_0, 0); + WREG32(mmTPC6_RTR_HBW_RANGE_BASE_H_0, 0); + WREG32(mmTPC6_RTR_HBW_RANGE_MASK_L_0, 0); + WREG32(mmTPC6_RTR_HBW_RANGE_MASK_H_0, 0xFFF80); + + /* + * Protect DDR @ + * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END + * The mask protects the first 512MB + */ + WREG32(mmTPC6_RTR_HBW_RANGE_BASE_L_1, dram_addr_lo); + WREG32(mmTPC6_RTR_HBW_RANGE_BASE_H_1, dram_addr_hi); + WREG32(mmTPC6_RTR_HBW_RANGE_MASK_L_1, 0xE0000000); + WREG32(mmTPC6_RTR_HBW_RANGE_MASK_H_1, 0x3FFFF); + + WREG32(mmTPC6_RTR_LBW_RANGE_BASE_0, lbw_rng0_base); + WREG32(mmTPC6_RTR_LBW_RANGE_MASK_0, lbw_rng0_mask); + WREG32(mmTPC6_RTR_LBW_RANGE_BASE_1, lbw_rng1_base); + WREG32(mmTPC6_RTR_LBW_RANGE_MASK_1, lbw_rng1_mask); + WREG32(mmTPC6_RTR_LBW_RANGE_BASE_2, lbw_rng2_base); + WREG32(mmTPC6_RTR_LBW_RANGE_MASK_2, lbw_rng2_mask); + WREG32(mmTPC6_RTR_LBW_RANGE_BASE_3, lbw_rng3_base); + WREG32(mmTPC6_RTR_LBW_RANGE_MASK_3, lbw_rng3_mask); + WREG32(mmTPC6_RTR_LBW_RANGE_BASE_4, lbw_rng4_base); + WREG32(mmTPC6_RTR_LBW_RANGE_MASK_4, lbw_rng4_mask); + WREG32(mmTPC6_RTR_LBW_RANGE_BASE_5, lbw_rng5_base); + WREG32(mmTPC6_RTR_LBW_RANGE_MASK_5, lbw_rng5_mask); + WREG32(mmTPC6_RTR_LBW_RANGE_BASE_6, lbw_rng6_base); + WREG32(mmTPC6_RTR_LBW_RANGE_MASK_6, lbw_rng6_mask); + WREG32(mmTPC6_RTR_LBW_RANGE_BASE_7, lbw_rng7_base); + WREG32(mmTPC6_RTR_LBW_RANGE_MASK_7, lbw_rng7_mask); + WREG32(mmTPC6_RTR_LBW_RANGE_BASE_8, lbw_rng8_base); + WREG32(mmTPC6_RTR_LBW_RANGE_MASK_8, lbw_rng8_mask); + WREG32(mmTPC6_RTR_LBW_RANGE_BASE_9, lbw_rng9_base); + WREG32(mmTPC6_RTR_LBW_RANGE_MASK_9, lbw_rng9_mask); + WREG32(mmTPC6_RTR_LBW_RANGE_BASE_10, lbw_rng10_base); + WREG32(mmTPC6_RTR_LBW_RANGE_MASK_10, lbw_rng10_mask); + WREG32(mmTPC6_RTR_LBW_RANGE_BASE_11, lbw_rng11_base); + WREG32(mmTPC6_RTR_LBW_RANGE_MASK_11, lbw_rng11_mask); + WREG32(mmTPC6_RTR_LBW_RANGE_BASE_12, lbw_rng12_base); + WREG32(mmTPC6_RTR_LBW_RANGE_MASK_12, lbw_rng12_mask); + WREG32(mmTPC6_RTR_LBW_RANGE_BASE_13, lbw_rng13_base); + WREG32(mmTPC6_RTR_LBW_RANGE_MASK_13, lbw_rng13_mask); + + WREG32(mmTPC7_NRTR_LBW_RANGE_HIT, 0xFFFF); + WREG32(mmTPC7_NRTR_HBW_RANGE_HIT, 0xFE); + + /* Protect HOST */ + WREG32(mmTPC7_NRTR_HBW_RANGE_BASE_L_0, 0); + WREG32(mmTPC7_NRTR_HBW_RANGE_BASE_H_0, 0); + WREG32(mmTPC7_NRTR_HBW_RANGE_MASK_L_0, 0); + WREG32(mmTPC7_NRTR_HBW_RANGE_MASK_H_0, 0xFFF80); + + /* + * Protect DDR @ + * DRAM_VIRT_BASE : DRAM_VIRT_BASE + DRAM_VIRT_END + * The mask protects the first 512MB + */ + WREG32(mmTPC7_NRTR_HBW_RANGE_BASE_L_1, dram_addr_lo); + WREG32(mmTPC7_NRTR_HBW_RANGE_BASE_H_1, dram_addr_hi); + WREG32(mmTPC7_NRTR_HBW_RANGE_MASK_L_1, 0xE0000000); + WREG32(mmTPC7_NRTR_HBW_RANGE_MASK_H_1, 0x3FFFF); + + WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_0, lbw_rng0_base); + WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_0, lbw_rng0_mask); + WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_1, lbw_rng1_base); + WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_1, lbw_rng1_mask); + WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_2, lbw_rng2_base); + WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_2, lbw_rng2_mask); + WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_3, lbw_rng3_base); + WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_3, lbw_rng3_mask); + WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_4, lbw_rng4_base); + WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_4, lbw_rng4_mask); + WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_5, lbw_rng5_base); + WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_5, lbw_rng5_mask); + WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_6, lbw_rng6_base); + WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_6, lbw_rng6_mask); + WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_7, lbw_rng7_base); + WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_7, lbw_rng7_mask); + WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_8, lbw_rng8_base); + WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_8, lbw_rng8_mask); + WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_9, lbw_rng9_base); + WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_9, lbw_rng9_mask); + WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_10, lbw_rng10_base); + WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_10, lbw_rng10_mask); + WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_11, lbw_rng11_base); + WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_11, lbw_rng11_mask); + WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_12, lbw_rng12_base); + WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_12, lbw_rng12_mask); + WREG32(mmTPC7_NRTR_LBW_RANGE_BASE_13, lbw_rng13_base); + WREG32(mmTPC7_NRTR_LBW_RANGE_MASK_13, lbw_rng13_mask); + + goya_init_protection_bits(hdev); +} diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h new file mode 100644 index 000000000000..a7c95e9f9b9a --- /dev/null +++ b/drivers/misc/habanalabs/habanalabs.h @@ -0,0 +1,1464 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef HABANALABSP_H_ +#define HABANALABSP_H_ + +#include "include/armcp_if.h" +#include "include/qman_if.h" + +#define pr_fmt(fmt) "habanalabs: " fmt + +#include +#include +#include +#include +#include +#include +#include + +#define HL_NAME "habanalabs" + +#define HL_MMAP_CB_MASK (0x8000000000000000ull >> PAGE_SHIFT) + +#define HL_PENDING_RESET_PER_SEC 5 + +#define HL_DEVICE_TIMEOUT_USEC 1000000 /* 1 s */ + +#define HL_HEARTBEAT_PER_USEC 5000000 /* 5 s */ + +#define HL_PLL_LOW_JOB_FREQ_USEC 5000000 /* 5 s */ + +#define HL_MAX_QUEUES 128 + +#define HL_MAX_JOBS_PER_CS 64 + +/* MUST BE POWER OF 2 and larger than 1 */ +#define HL_MAX_PENDING_CS 64 + +/* Memory */ +#define MEM_HASH_TABLE_BITS 7 /* 1 << 7 buckets */ + +/* MMU */ +#define MMU_HASH_TABLE_BITS 7 /* 1 << 7 buckets */ + +/** + * struct pgt_info - MMU hop page info. + * @node: hash linked-list node for the pgts hash of pgts. + * @addr: physical address of the pgt. + * @ctx: pointer to the owner ctx. + * @num_of_ptes: indicates how many ptes are used in the pgt. + * + * The MMU page tables hierarchy is placed on the DRAM. When a new level (hop) + * is needed during mapping, a new page is allocated and this structure holds + * its essential information. During unmapping, if no valid PTEs remained in the + * page, it is freed with its pgt_info structure. + */ +struct pgt_info { + struct hlist_node node; + u64 addr; + struct hl_ctx *ctx; + int num_of_ptes; +}; + +struct hl_device; +struct hl_fpriv; + +/** + * enum hl_queue_type - Supported QUEUE types. + * @QUEUE_TYPE_NA: queue is not available. + * @QUEUE_TYPE_EXT: external queue which is a DMA channel that may access the + * host. + * @QUEUE_TYPE_INT: internal queue that performs DMA inside the device's + * memories and/or operates the compute engines. + * @QUEUE_TYPE_CPU: S/W queue for communication with the device's CPU. + */ +enum hl_queue_type { + QUEUE_TYPE_NA, + QUEUE_TYPE_EXT, + QUEUE_TYPE_INT, + QUEUE_TYPE_CPU +}; + +/** + * struct hw_queue_properties - queue information. + * @type: queue type. + * @kmd_only: true if only KMD is allowed to send a job to this queue, false + * otherwise. + */ +struct hw_queue_properties { + enum hl_queue_type type; + u8 kmd_only; +}; + +/** + * enum vm_type_t - virtual memory mapping request information. + * @VM_TYPE_USERPTR: mapping of user memory to device virtual address. + * @VM_TYPE_PHYS_PACK: mapping of DRAM memory to device virtual address. + */ +enum vm_type_t { + VM_TYPE_USERPTR, + VM_TYPE_PHYS_PACK +}; + +/** + * enum hl_device_hw_state - H/W device state. use this to understand whether + * to do reset before hw_init or not + * @HL_DEVICE_HW_STATE_CLEAN: H/W state is clean. i.e. after hard reset + * @HL_DEVICE_HW_STATE_DIRTY: H/W state is dirty. i.e. we started to execute + * hw_init + */ +enum hl_device_hw_state { + HL_DEVICE_HW_STATE_CLEAN = 0, + HL_DEVICE_HW_STATE_DIRTY +}; + +/** + * struct asic_fixed_properties - ASIC specific immutable properties. + * @hw_queues_props: H/W queues properties. + * @armcp_info: received various information from ArmCP regarding the H/W. e.g. + * available sensors. + * @uboot_ver: F/W U-boot version. + * @preboot_ver: F/W Preboot version. + * @sram_base_address: SRAM physical start address. + * @sram_end_address: SRAM physical end address. + * @sram_user_base_address - SRAM physical start address for user access. + * @dram_base_address: DRAM physical start address. + * @dram_end_address: DRAM physical end address. + * @dram_user_base_address: DRAM physical start address for user access. + * @dram_size: DRAM total size. + * @dram_pci_bar_size: size of PCI bar towards DRAM. + * @host_phys_base_address: base physical address of host memory for + * transactions that the device generates. + * @max_power_default: max power of the device after reset + * @va_space_host_start_address: base address of virtual memory range for + * mapping host memory. + * @va_space_host_end_address: end address of virtual memory range for + * mapping host memory. + * @va_space_dram_start_address: base address of virtual memory range for + * mapping DRAM memory. + * @va_space_dram_end_address: end address of virtual memory range for + * mapping DRAM memory. + * @dram_size_for_default_page_mapping: DRAM size needed to map to avoid page + * fault. + * @mmu_pgt_addr: base physical address in DRAM of MMU page tables. + * @mmu_dram_default_page_addr: DRAM default page physical address. + * @mmu_pgt_size: MMU page tables total size. + * @mmu_pte_size: PTE size in MMU page tables. + * @mmu_hop_table_size: MMU hop table size. + * @mmu_hop0_tables_total_size: total size of MMU hop0 tables. + * @dram_page_size: page size for MMU DRAM allocation. + * @cfg_size: configuration space size on SRAM. + * @sram_size: total size of SRAM. + * @max_asid: maximum number of open contexts (ASIDs). + * @num_of_events: number of possible internal H/W IRQs. + * @psoc_pci_pll_nr: PCI PLL NR value. + * @psoc_pci_pll_nf: PCI PLL NF value. + * @psoc_pci_pll_od: PCI PLL OD value. + * @psoc_pci_pll_div_factor: PCI PLL DIV FACTOR 1 value. + * @completion_queues_count: number of completion queues. + * @high_pll: high PLL frequency used by the device. + * @cb_pool_cb_cnt: number of CBs in the CB pool. + * @cb_pool_cb_size: size of each CB in the CB pool. + * @tpc_enabled_mask: which TPCs are enabled. + */ +struct asic_fixed_properties { + struct hw_queue_properties hw_queues_props[HL_MAX_QUEUES]; + struct armcp_info armcp_info; + char uboot_ver[VERSION_MAX_LEN]; + char preboot_ver[VERSION_MAX_LEN]; + u64 sram_base_address; + u64 sram_end_address; + u64 sram_user_base_address; + u64 dram_base_address; + u64 dram_end_address; + u64 dram_user_base_address; + u64 dram_size; + u64 dram_pci_bar_size; + u64 host_phys_base_address; + u64 max_power_default; + u64 va_space_host_start_address; + u64 va_space_host_end_address; + u64 va_space_dram_start_address; + u64 va_space_dram_end_address; + u64 dram_size_for_default_page_mapping; + u64 mmu_pgt_addr; + u64 mmu_dram_default_page_addr; + u32 mmu_pgt_size; + u32 mmu_pte_size; + u32 mmu_hop_table_size; + u32 mmu_hop0_tables_total_size; + u32 dram_page_size; + u32 cfg_size; + u32 sram_size; + u32 max_asid; + u32 num_of_events; + u32 psoc_pci_pll_nr; + u32 psoc_pci_pll_nf; + u32 psoc_pci_pll_od; + u32 psoc_pci_pll_div_factor; + u32 high_pll; + u32 cb_pool_cb_cnt; + u32 cb_pool_cb_size; + u8 completion_queues_count; + u8 tpc_enabled_mask; +}; + +/** + * struct hl_dma_fence - wrapper for fence object used by command submissions. + * @base_fence: kernel fence object. + * @lock: spinlock to protect fence. + * @hdev: habanalabs device structure. + * @cs_seq: command submission sequence number. + */ +struct hl_dma_fence { + struct dma_fence base_fence; + spinlock_t lock; + struct hl_device *hdev; + u64 cs_seq; +}; + +/* + * Command Buffers + */ + +#define HL_MAX_CB_SIZE 0x200000 /* 2MB */ + +/** + * struct hl_cb_mgr - describes a Command Buffer Manager. + * @cb_lock: protects cb_handles. + * @cb_handles: an idr to hold all command buffer handles. + */ +struct hl_cb_mgr { + spinlock_t cb_lock; + struct idr cb_handles; /* protected by cb_lock */ +}; + +/** + * struct hl_cb - describes a Command Buffer. + * @refcount: reference counter for usage of the CB. + * @hdev: pointer to device this CB belongs to. + * @lock: spinlock to protect mmap/cs flows. + * @debugfs_list: node in debugfs list of command buffers. + * @pool_list: node in pool list of command buffers. + * @kernel_address: Holds the CB's kernel virtual address. + * @bus_address: Holds the CB's DMA address. + * @mmap_size: Holds the CB's size that was mmaped. + * @size: holds the CB's size. + * @id: the CB's ID. + * @cs_cnt: holds number of CS that this CB participates in. + * @ctx_id: holds the ID of the owner's context. + * @mmap: true if the CB is currently mmaped to user. + * @is_pool: true if CB was acquired from the pool, false otherwise. + */ +struct hl_cb { + struct kref refcount; + struct hl_device *hdev; + spinlock_t lock; + struct list_head debugfs_list; + struct list_head pool_list; + u64 kernel_address; + dma_addr_t bus_address; + u32 mmap_size; + u32 size; + u32 id; + u32 cs_cnt; + u32 ctx_id; + u8 mmap; + u8 is_pool; +}; + + +/* + * QUEUES + */ + +struct hl_cs_job; + +/* + * Currently, there are two limitations on the maximum length of a queue: + * + * 1. The memory footprint of the queue. The current allocated space for the + * queue is PAGE_SIZE. Because each entry in the queue is HL_BD_SIZE, + * the maximum length of the queue can be PAGE_SIZE / HL_BD_SIZE, + * which currently is 4096/16 = 256 entries. + * + * To increase that, we need either to decrease the size of the + * BD (difficult), or allocate more than a single page (easier). + * + * 2. Because the size of the JOB handle field in the BD CTL / completion queue + * is 10-bit, we can have up to 1024 open jobs per hardware queue. + * Therefore, each queue can hold up to 1024 entries. + * + * HL_QUEUE_LENGTH is in units of struct hl_bd. + * HL_QUEUE_LENGTH * sizeof(struct hl_bd) should be <= HL_PAGE_SIZE + */ + +#define HL_PAGE_SIZE 4096 /* minimum page size */ +/* Must be power of 2 (HL_PAGE_SIZE / HL_BD_SIZE) */ +#define HL_QUEUE_LENGTH 256 +#define HL_QUEUE_SIZE_IN_BYTES (HL_QUEUE_LENGTH * HL_BD_SIZE) + +/* + * HL_CQ_LENGTH is in units of struct hl_cq_entry. + * HL_CQ_LENGTH should be <= HL_PAGE_SIZE + */ +#define HL_CQ_LENGTH HL_QUEUE_LENGTH +#define HL_CQ_SIZE_IN_BYTES (HL_CQ_LENGTH * HL_CQ_ENTRY_SIZE) + +/* Must be power of 2 (HL_PAGE_SIZE / HL_EQ_ENTRY_SIZE) */ +#define HL_EQ_LENGTH 64 +#define HL_EQ_SIZE_IN_BYTES (HL_EQ_LENGTH * HL_EQ_ENTRY_SIZE) + + +/** + * struct hl_hw_queue - describes a H/W transport queue. + * @shadow_queue: pointer to a shadow queue that holds pointers to jobs. + * @queue_type: type of queue. + * @kernel_address: holds the queue's kernel virtual address. + * @bus_address: holds the queue's DMA address. + * @pi: holds the queue's pi value. + * @ci: holds the queue's ci value, AS CALCULATED BY THE DRIVER (not real ci). + * @hw_queue_id: the id of the H/W queue. + * @int_queue_len: length of internal queue (number of entries). + * @valid: is the queue valid (we have array of 32 queues, not all of them + * exists). + */ +struct hl_hw_queue { + struct hl_cs_job **shadow_queue; + enum hl_queue_type queue_type; + u64 kernel_address; + dma_addr_t bus_address; + u32 pi; + u32 ci; + u32 hw_queue_id; + u16 int_queue_len; + u8 valid; +}; + +/** + * struct hl_cq - describes a completion queue + * @hdev: pointer to the device structure + * @kernel_address: holds the queue's kernel virtual address + * @bus_address: holds the queue's DMA address + * @hw_queue_id: the id of the matching H/W queue + * @ci: ci inside the queue + * @pi: pi inside the queue + * @free_slots_cnt: counter of free slots in queue + */ +struct hl_cq { + struct hl_device *hdev; + u64 kernel_address; + dma_addr_t bus_address; + u32 hw_queue_id; + u32 ci; + u32 pi; + atomic_t free_slots_cnt; +}; + +/** + * struct hl_eq - describes the event queue (single one per device) + * @hdev: pointer to the device structure + * @kernel_address: holds the queue's kernel virtual address + * @bus_address: holds the queue's DMA address + * @ci: ci inside the queue + */ +struct hl_eq { + struct hl_device *hdev; + u64 kernel_address; + dma_addr_t bus_address; + u32 ci; +}; + + +/* + * ASICs + */ + +/** + * enum hl_asic_type - supported ASIC types. + * @ASIC_AUTO_DETECT: ASIC type will be automatically set. + * @ASIC_GOYA: Goya device. + * @ASIC_INVALID: Invalid ASIC type. + */ +enum hl_asic_type { + ASIC_AUTO_DETECT, + ASIC_GOYA, + ASIC_INVALID +}; + +struct hl_cs_parser; + +/** + * enum hl_pm_mng_profile - power management profile. + * @PM_AUTO: internal clock is set by KMD. + * @PM_MANUAL: internal clock is set by the user. + * @PM_LAST: last power management type. + */ +enum hl_pm_mng_profile { + PM_AUTO = 1, + PM_MANUAL, + PM_LAST +}; + +/** + * enum hl_pll_frequency - PLL frequency. + * @PLL_HIGH: high frequency. + * @PLL_LOW: low frequency. + * @PLL_LAST: last frequency values that were configured by the user. + */ +enum hl_pll_frequency { + PLL_HIGH = 1, + PLL_LOW, + PLL_LAST +}; + +/** + * struct hl_asic_funcs - ASIC specific functions that are can be called from + * common code. + * @early_init: sets up early driver state (pre sw_init), doesn't configure H/W. + * @early_fini: tears down what was done in early_init. + * @late_init: sets up late driver/hw state (post hw_init) - Optional. + * @late_fini: tears down what was done in late_init (pre hw_fini) - Optional. + * @sw_init: sets up driver state, does not configure H/W. + * @sw_fini: tears down driver state, does not configure H/W. + * @hw_init: sets up the H/W state. + * @hw_fini: tears down the H/W state. + * @halt_engines: halt engines, needed for reset sequence. This also disables + * interrupts from the device. Should be called before + * hw_fini and before CS rollback. + * @suspend: handles IP specific H/W or SW changes for suspend. + * @resume: handles IP specific H/W or SW changes for resume. + * @cb_mmap: maps a CB. + * @ring_doorbell: increment PI on a given QMAN. + * @flush_pq_write: flush PQ entry write if necessary, WARN if flushing failed. + * @dma_alloc_coherent: Allocate coherent DMA memory by calling + * dma_alloc_coherent(). This is ASIC function because its + * implementation is not trivial when the driver is loaded + * in simulation mode (not upstreamed). + * @dma_free_coherent: Free coherent DMA memory by calling dma_free_coherent(). + * This is ASIC function because its implementation is not + * trivial when the driver is loaded in simulation mode + * (not upstreamed). + * @get_int_queue_base: get the internal queue base address. + * @test_queues: run simple test on all queues for sanity check. + * @dma_pool_zalloc: small DMA allocation of coherent memory from DMA pool. + * size of allocation is HL_DMA_POOL_BLK_SIZE. + * @dma_pool_free: free small DMA allocation from pool. + * @cpu_accessible_dma_pool_alloc: allocate CPU PQ packet from DMA pool. + * @cpu_accessible_dma_pool_free: free CPU PQ packet from DMA pool. + * @hl_dma_unmap_sg: DMA unmap scatter-gather list. + * @cs_parser: parse Command Submission. + * @asic_dma_map_sg: DMA map scatter-gather list. + * @get_dma_desc_list_size: get number of LIN_DMA packets required for CB. + * @add_end_of_cb_packets: Add packets to the end of CB, if device requires it. + * @update_eq_ci: update event queue CI. + * @context_switch: called upon ASID context switch. + * @restore_phase_topology: clear all SOBs amd MONs. + * @debugfs_read32: debug interface for reading u32 from DRAM/SRAM. + * @debugfs_write32: debug interface for writing u32 to DRAM/SRAM. + * @add_device_attr: add ASIC specific device attributes. + * @handle_eqe: handle event queue entry (IRQ) from ArmCP. + * @set_pll_profile: change PLL profile (manual/automatic). + * @get_events_stat: retrieve event queue entries histogram. + * @read_pte: read MMU page table entry from DRAM. + * @write_pte: write MMU page table entry to DRAM. + * @mmu_invalidate_cache: flush MMU STLB cache, either with soft (L1 only) or + * hard (L0 & L1) flush. + * @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with + * ASID-VA-size mask. + * @send_heartbeat: send is-alive packet to ArmCP and verify response. + * @enable_clock_gating: enable clock gating for reducing power consumption. + * @disable_clock_gating: disable clock for accessing registers on HBW. + * @is_device_idle: return true if device is idle, false otherwise. + * @soft_reset_late_init: perform certain actions needed after soft reset. + * @hw_queues_lock: acquire H/W queues lock. + * @hw_queues_unlock: release H/W queues lock. + * @get_pci_id: retrieve PCI ID. + * @get_eeprom_data: retrieve EEPROM data from F/W. + * @send_cpu_message: send buffer to ArmCP. + * @get_hw_state: retrieve the H/W state + */ +struct hl_asic_funcs { + int (*early_init)(struct hl_device *hdev); + int (*early_fini)(struct hl_device *hdev); + int (*late_init)(struct hl_device *hdev); + void (*late_fini)(struct hl_device *hdev); + int (*sw_init)(struct hl_device *hdev); + int (*sw_fini)(struct hl_device *hdev); + int (*hw_init)(struct hl_device *hdev); + void (*hw_fini)(struct hl_device *hdev, bool hard_reset); + void (*halt_engines)(struct hl_device *hdev, bool hard_reset); + int (*suspend)(struct hl_device *hdev); + int (*resume)(struct hl_device *hdev); + int (*cb_mmap)(struct hl_device *hdev, struct vm_area_struct *vma, + u64 kaddress, phys_addr_t paddress, u32 size); + void (*ring_doorbell)(struct hl_device *hdev, u32 hw_queue_id, u32 pi); + void (*flush_pq_write)(struct hl_device *hdev, u64 *pq, u64 exp_val); + void* (*dma_alloc_coherent)(struct hl_device *hdev, size_t size, + dma_addr_t *dma_handle, gfp_t flag); + void (*dma_free_coherent)(struct hl_device *hdev, size_t size, + void *cpu_addr, dma_addr_t dma_handle); + void* (*get_int_queue_base)(struct hl_device *hdev, u32 queue_id, + dma_addr_t *dma_handle, u16 *queue_len); + int (*test_queues)(struct hl_device *hdev); + void* (*dma_pool_zalloc)(struct hl_device *hdev, size_t size, + gfp_t mem_flags, dma_addr_t *dma_handle); + void (*dma_pool_free)(struct hl_device *hdev, void *vaddr, + dma_addr_t dma_addr); + void* (*cpu_accessible_dma_pool_alloc)(struct hl_device *hdev, + size_t size, dma_addr_t *dma_handle); + void (*cpu_accessible_dma_pool_free)(struct hl_device *hdev, + size_t size, void *vaddr); + void (*hl_dma_unmap_sg)(struct hl_device *hdev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir); + int (*cs_parser)(struct hl_device *hdev, struct hl_cs_parser *parser); + int (*asic_dma_map_sg)(struct hl_device *hdev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir); + u32 (*get_dma_desc_list_size)(struct hl_device *hdev, + struct sg_table *sgt); + void (*add_end_of_cb_packets)(u64 kernel_address, u32 len, u64 cq_addr, + u32 cq_val, u32 msix_num); + void (*update_eq_ci)(struct hl_device *hdev, u32 val); + int (*context_switch)(struct hl_device *hdev, u32 asid); + void (*restore_phase_topology)(struct hl_device *hdev); + int (*debugfs_read32)(struct hl_device *hdev, u64 addr, u32 *val); + int (*debugfs_write32)(struct hl_device *hdev, u64 addr, u32 val); + void (*add_device_attr)(struct hl_device *hdev, + struct attribute_group *dev_attr_grp); + void (*handle_eqe)(struct hl_device *hdev, + struct hl_eq_entry *eq_entry); + void (*set_pll_profile)(struct hl_device *hdev, + enum hl_pll_frequency freq); + void* (*get_events_stat)(struct hl_device *hdev, u32 *size); + u64 (*read_pte)(struct hl_device *hdev, u64 addr); + void (*write_pte)(struct hl_device *hdev, u64 addr, u64 val); + void (*mmu_invalidate_cache)(struct hl_device *hdev, bool is_hard); + void (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard, + u32 asid, u64 va, u64 size); + int (*send_heartbeat)(struct hl_device *hdev); + void (*enable_clock_gating)(struct hl_device *hdev); + void (*disable_clock_gating)(struct hl_device *hdev); + bool (*is_device_idle)(struct hl_device *hdev); + int (*soft_reset_late_init)(struct hl_device *hdev); + void (*hw_queues_lock)(struct hl_device *hdev); + void (*hw_queues_unlock)(struct hl_device *hdev); + u32 (*get_pci_id)(struct hl_device *hdev); + int (*get_eeprom_data)(struct hl_device *hdev, void *data, + size_t max_size); + int (*send_cpu_message)(struct hl_device *hdev, u32 *msg, + u16 len, u32 timeout, long *result); + enum hl_device_hw_state (*get_hw_state)(struct hl_device *hdev); +}; + + +/* + * CONTEXTS + */ + +#define HL_KERNEL_ASID_ID 0 + +/** + * struct hl_va_range - virtual addresses range. + * @lock: protects the virtual addresses list. + * @list: list of virtual addresses blocks available for mappings. + * @start_addr: range start address. + * @end_addr: range end address. + */ +struct hl_va_range { + struct mutex lock; + struct list_head list; + u64 start_addr; + u64 end_addr; +}; + +/** + * struct hl_ctx - user/kernel context. + * @mem_hash: holds mapping from virtual address to virtual memory area + * descriptor (hl_vm_phys_pg_list or hl_userptr). + * @mmu_hash: holds a mapping from virtual address to pgt_info structure. + * @hpriv: pointer to the private (KMD) data of the process (fd). + * @hdev: pointer to the device structure. + * @refcount: reference counter for the context. Context is released only when + * this hits 0l. It is incremented on CS and CS_WAIT. + * @cs_pending: array of DMA fence objects representing pending CS. + * @host_va_range: holds available virtual addresses for host mappings. + * @dram_va_range: holds available virtual addresses for DRAM mappings. + * @mem_hash_lock: protects the mem_hash. + * @mmu_lock: protects the MMU page tables. Any change to the PGT, modifing the + * MMU hash or walking the PGT requires talking this lock + * @debugfs_list: node in debugfs list of contexts. + * @cs_sequence: sequence number for CS. Value is assigned to a CS and passed + * to user so user could inquire about CS. It is used as + * index to cs_pending array. + * @dram_default_hops: array that holds all hops addresses needed for default + * DRAM mapping. + * @cs_lock: spinlock to protect cs_sequence. + * @dram_phys_mem: amount of used physical DRAM memory by this context. + * @thread_restore_token: token to prevent multiple threads of the same context + * from running the restore phase. Only one thread + * should run it. + * @thread_restore_wait_token: token to prevent the threads that didn't run + * the restore phase from moving to their execution + * phase before the restore phase has finished. + * @asid: context's unique address space ID in the device's MMU. + */ +struct hl_ctx { + DECLARE_HASHTABLE(mem_hash, MEM_HASH_TABLE_BITS); + DECLARE_HASHTABLE(mmu_hash, MMU_HASH_TABLE_BITS); + struct hl_fpriv *hpriv; + struct hl_device *hdev; + struct kref refcount; + struct dma_fence *cs_pending[HL_MAX_PENDING_CS]; + struct hl_va_range host_va_range; + struct hl_va_range dram_va_range; + struct mutex mem_hash_lock; + struct mutex mmu_lock; + struct list_head debugfs_list; + u64 cs_sequence; + u64 *dram_default_hops; + spinlock_t cs_lock; + atomic64_t dram_phys_mem; + atomic_t thread_restore_token; + u32 thread_restore_wait_token; + u32 asid; +}; + +/** + * struct hl_ctx_mgr - for handling multiple contexts. + * @ctx_lock: protects ctx_handles. + * @ctx_handles: idr to hold all ctx handles. + */ +struct hl_ctx_mgr { + struct mutex ctx_lock; + struct idr ctx_handles; +}; + + + +/* + * COMMAND SUBMISSIONS + */ + +/** + * struct hl_userptr - memory mapping chunk information + * @vm_type: type of the VM. + * @job_node: linked-list node for hanging the object on the Job's list. + * @vec: pointer to the frame vector. + * @sgt: pointer to the scatter-gather table that holds the pages. + * @dir: for DMA unmapping, the direction must be supplied, so save it. + * @debugfs_list: node in debugfs list of command submissions. + * @addr: user-space virtual pointer to the start of the memory area. + * @size: size of the memory area to pin & map. + * @dma_mapped: true if the SG was mapped to DMA addresses, false otherwise. + */ +struct hl_userptr { + enum vm_type_t vm_type; /* must be first */ + struct list_head job_node; + struct frame_vector *vec; + struct sg_table *sgt; + enum dma_data_direction dir; + struct list_head debugfs_list; + u64 addr; + u32 size; + u8 dma_mapped; +}; + +/** + * struct hl_cs - command submission. + * @jobs_in_queue_cnt: per each queue, maintain counter of submitted jobs. + * @ctx: the context this CS belongs to. + * @job_list: list of the CS's jobs in the various queues. + * @job_lock: spinlock for the CS's jobs list. Needed for free_job. + * @refcount: reference counter for usage of the CS. + * @fence: pointer to the fence object of this CS. + * @work_tdr: delayed work node for TDR. + * @mirror_node : node in device mirror list of command submissions. + * @debugfs_list: node in debugfs list of command submissions. + * @sequence: the sequence number of this CS. + * @submitted: true if CS was submitted to H/W. + * @completed: true if CS was completed by device. + * @timedout : true if CS was timedout. + * @tdr_active: true if TDR was activated for this CS (to prevent + * double TDR activation). + * @aborted: true if CS was aborted due to some device error. + */ +struct hl_cs { + u8 jobs_in_queue_cnt[HL_MAX_QUEUES]; + struct hl_ctx *ctx; + struct list_head job_list; + spinlock_t job_lock; + struct kref refcount; + struct dma_fence *fence; + struct delayed_work work_tdr; + struct list_head mirror_node; + struct list_head debugfs_list; + u64 sequence; + u8 submitted; + u8 completed; + u8 timedout; + u8 tdr_active; + u8 aborted; +}; + +/** + * struct hl_cs_job - command submission job. + * @cs_node: the node to hang on the CS jobs list. + * @cs: the CS this job belongs to. + * @user_cb: the CB we got from the user. + * @patched_cb: in case of patching, this is internal CB which is submitted on + * the queue instead of the CB we got from the IOCTL. + * @finish_work: workqueue object to run when job is completed. + * @userptr_list: linked-list of userptr mappings that belong to this job and + * wait for completion. + * @debugfs_list: node in debugfs list of command submission jobs. + * @id: the id of this job inside a CS. + * @hw_queue_id: the id of the H/W queue this job is submitted to. + * @user_cb_size: the actual size of the CB we got from the user. + * @job_cb_size: the actual size of the CB that we put on the queue. + * @ext_queue: whether the job is for external queue or internal queue. + */ +struct hl_cs_job { + struct list_head cs_node; + struct hl_cs *cs; + struct hl_cb *user_cb; + struct hl_cb *patched_cb; + struct work_struct finish_work; + struct list_head userptr_list; + struct list_head debugfs_list; + u32 id; + u32 hw_queue_id; + u32 user_cb_size; + u32 job_cb_size; + u8 ext_queue; +}; + +/** + * struct hl_cs_parser - command submission paerser properties. + * @user_cb: the CB we got from the user. + * @patched_cb: in case of patching, this is internal CB which is submitted on + * the queue instead of the CB we got from the IOCTL. + * @job_userptr_list: linked-list of userptr mappings that belong to the related + * job and wait for completion. + * @cs_sequence: the sequence number of the related CS. + * @ctx_id: the ID of the context the related CS belongs to. + * @hw_queue_id: the id of the H/W queue this job is submitted to. + * @user_cb_size: the actual size of the CB we got from the user. + * @patched_cb_size: the size of the CB after parsing. + * @ext_queue: whether the job is for external queue or internal queue. + * @job_id: the id of the related job inside the related CS. + * @use_virt_addr: whether to treat the addresses in the CB as virtual during + * parsing. + */ +struct hl_cs_parser { + struct hl_cb *user_cb; + struct hl_cb *patched_cb; + struct list_head *job_userptr_list; + u64 cs_sequence; + u32 ctx_id; + u32 hw_queue_id; + u32 user_cb_size; + u32 patched_cb_size; + u8 ext_queue; + u8 job_id; + u8 use_virt_addr; +}; + + +/* + * MEMORY STRUCTURE + */ + +/** + * struct hl_vm_hash_node - hash element from virtual address to virtual + * memory area descriptor (hl_vm_phys_pg_list or + * hl_userptr). + * @node: node to hang on the hash table in context object. + * @vaddr: key virtual address. + * @ptr: value pointer (hl_vm_phys_pg_list or hl_userptr). + */ +struct hl_vm_hash_node { + struct hlist_node node; + u64 vaddr; + void *ptr; +}; + +/** + * struct hl_vm_phys_pg_pack - physical page pack. + * @vm_type: describes the type of the virtual area descriptor. + * @pages: the physical page array. + * @mapping_cnt: number of shared mappings. + * @asid: the context related to this list. + * @npages: num physical pages in the pack. + * @page_size: size of each page in the pack. + * @total_size: total size of all the pages in this list. + * @flags: HL_MEM_* flags related to this list. + * @handle: the provided handle related to this list. + * @offset: offset from the first page. + * @contiguous: is contiguous physical memory. + * @created_from_userptr: is product of host virtual address. + */ +struct hl_vm_phys_pg_pack { + enum vm_type_t vm_type; /* must be first */ + u64 *pages; + atomic_t mapping_cnt; + u32 asid; + u32 npages; + u32 page_size; + u32 total_size; + u32 flags; + u32 handle; + u32 offset; + u8 contiguous; + u8 created_from_userptr; +}; + +/** + * struct hl_vm_va_block - virtual range block information. + * @node: node to hang on the virtual range list in context object. + * @start: virtual range start address. + * @end: virtual range end address. + * @size: virtual range size. + */ +struct hl_vm_va_block { + struct list_head node; + u64 start; + u64 end; + u64 size; +}; + +/** + * struct hl_vm - virtual memory manager for MMU. + * @dram_pg_pool: pool for DRAM physical pages of 2MB. + * @dram_pg_pool_refcount: reference counter for the pool usage. + * @idr_lock: protects the phys_pg_list_handles. + * @phys_pg_pack_handles: idr to hold all device allocations handles. + * @init_done: whether initialization was done. We need this because VM + * initialization might be skipped during device initialization. + */ +struct hl_vm { + struct gen_pool *dram_pg_pool; + struct kref dram_pg_pool_refcount; + spinlock_t idr_lock; + struct idr phys_pg_pack_handles; + u8 init_done; +}; + +/* + * FILE PRIVATE STRUCTURE + */ + +/** + * struct hl_fpriv - process information stored in FD private data. + * @hdev: habanalabs device structure. + * @filp: pointer to the given file structure. + * @taskpid: current process ID. + * @ctx: current executing context. + * @ctx_mgr: context manager to handle multiple context for this FD. + * @cb_mgr: command buffer manager to handle multiple buffers for this FD. + * @debugfs_list: list of relevant ASIC debugfs. + * @refcount: number of related contexts. + * @restore_phase_mutex: lock for context switch and restore phase. + */ +struct hl_fpriv { + struct hl_device *hdev; + struct file *filp; + struct pid *taskpid; + struct hl_ctx *ctx; /* TODO: remove for multiple ctx */ + struct hl_ctx_mgr ctx_mgr; + struct hl_cb_mgr cb_mgr; + struct list_head debugfs_list; + struct kref refcount; + struct mutex restore_phase_mutex; +}; + + +/* + * DebugFS + */ + +/** + * struct hl_info_list - debugfs file ops. + * @name: file name. + * @show: function to output information. + * @write: function to write to the file. + */ +struct hl_info_list { + const char *name; + int (*show)(struct seq_file *s, void *data); + ssize_t (*write)(struct file *file, const char __user *buf, + size_t count, loff_t *f_pos); +}; + +/** + * struct hl_debugfs_entry - debugfs dentry wrapper. + * @dent: base debugfs entry structure. + * @info_ent: dentry realted ops. + * @dev_entry: ASIC specific debugfs manager. + */ +struct hl_debugfs_entry { + struct dentry *dent; + const struct hl_info_list *info_ent; + struct hl_dbg_device_entry *dev_entry; +}; + +/** + * struct hl_dbg_device_entry - ASIC specific debugfs manager. + * @root: root dentry. + * @hdev: habanalabs device structure. + * @entry_arr: array of available hl_debugfs_entry. + * @file_list: list of available debugfs files. + * @file_mutex: protects file_list. + * @cb_list: list of available CBs. + * @cb_spinlock: protects cb_list. + * @cs_list: list of available CSs. + * @cs_spinlock: protects cs_list. + * @cs_job_list: list of available CB jobs. + * @cs_job_spinlock: protects cs_job_list. + * @userptr_list: list of available userptrs (virtual memory chunk descriptor). + * @userptr_spinlock: protects userptr_list. + * @ctx_mem_hash_list: list of available contexts with MMU mappings. + * @ctx_mem_hash_spinlock: protects cb_list. + * @addr: next address to read/write from/to in read/write32. + * @mmu_addr: next virtual address to translate to physical address in mmu_show. + * @mmu_asid: ASID to use while translating in mmu_show. + * @i2c_bus: generic u8 debugfs file for bus value to use in i2c_data_read. + * @i2c_bus: generic u8 debugfs file for address value to use in i2c_data_read. + * @i2c_bus: generic u8 debugfs file for register value to use in i2c_data_read. + */ +struct hl_dbg_device_entry { + struct dentry *root; + struct hl_device *hdev; + struct hl_debugfs_entry *entry_arr; + struct list_head file_list; + struct mutex file_mutex; + struct list_head cb_list; + spinlock_t cb_spinlock; + struct list_head cs_list; + spinlock_t cs_spinlock; + struct list_head cs_job_list; + spinlock_t cs_job_spinlock; + struct list_head userptr_list; + spinlock_t userptr_spinlock; + struct list_head ctx_mem_hash_list; + spinlock_t ctx_mem_hash_spinlock; + u64 addr; + u64 mmu_addr; + u32 mmu_asid; + u8 i2c_bus; + u8 i2c_addr; + u8 i2c_reg; +}; + + +/* + * DEVICES + */ + +/* Theoretical limit only. A single host can only contain up to 4 or 8 PCIe + * x16 cards. In extereme cases, there are hosts that can accommodate 16 cards + */ +#define HL_MAX_MINORS 256 + +/* + * Registers read & write functions. + */ + +u32 hl_rreg(struct hl_device *hdev, u32 reg); +void hl_wreg(struct hl_device *hdev, u32 reg, u32 val); + +#define hl_poll_timeout(hdev, addr, val, cond, sleep_us, timeout_us) \ + readl_poll_timeout(hdev->rmmio + addr, val, cond, sleep_us, timeout_us) + +#define RREG32(reg) hl_rreg(hdev, (reg)) +#define WREG32(reg, v) hl_wreg(hdev, (reg), (v)) +#define DREG32(reg) pr_info("REGISTER: " #reg " : 0x%08X\n", \ + hl_rreg(hdev, (reg))) + +#define WREG32_P(reg, val, mask) \ + do { \ + u32 tmp_ = RREG32(reg); \ + tmp_ &= (mask); \ + tmp_ |= ((val) & ~(mask)); \ + WREG32(reg, tmp_); \ + } while (0) +#define WREG32_AND(reg, and) WREG32_P(reg, 0, and) +#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or)) + +#define REG_FIELD_SHIFT(reg, field) reg##_##field##_SHIFT +#define REG_FIELD_MASK(reg, field) reg##_##field##_MASK +#define WREG32_FIELD(reg, field, val) \ + WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | \ + (val) << REG_FIELD_SHIFT(reg, field)) + +struct hwmon_chip_info; + +/** + * struct hl_device_reset_work - reset workqueue task wrapper. + * @reset_work: reset work to be done. + * @hdev: habanalabs device structure. + */ +struct hl_device_reset_work { + struct work_struct reset_work; + struct hl_device *hdev; +}; + +/** + * struct hl_device - habanalabs device structure. + * @pdev: pointer to PCI device, can be NULL in case of simulator device. + * @pcie_bar: array of available PCIe bars. + * @rmmio: configuration area address on SRAM. + * @cdev: related char device. + * @dev: realted kernel basic device structure. + * @work_freq: delayed work to lower device frequency if possible. + * @work_heartbeat: delayed work for ArmCP is-alive check. + * @asic_name: ASIC specific nmae. + * @asic_type: ASIC specific type. + * @completion_queue: array of hl_cq. + * @cq_wq: work queue of completion queues for executing work in process context + * @eq_wq: work queue of event queue for executing work in process context. + * @kernel_ctx: KMD context structure. + * @kernel_queues: array of hl_hw_queue. + * @hw_queues_mirror_list: CS mirror list for TDR. + * @hw_queues_mirror_lock: protects hw_queues_mirror_list. + * @kernel_cb_mgr: command buffer manager for creating/destroying/handling CGs. + * @event_queue: event queue for IRQ from ArmCP. + * @dma_pool: DMA pool for small allocations. + * @cpu_accessible_dma_mem: KMD <-> ArmCP shared memory CPU address. + * @cpu_accessible_dma_address: KMD <-> ArmCP shared memory DMA address. + * @cpu_accessible_dma_pool: KMD <-> ArmCP shared memory pool. + * @asid_bitmap: holds used/available ASIDs. + * @asid_mutex: protects asid_bitmap. + * @fd_open_cnt_lock: lock for updating fd_open_cnt in hl_device_open. Although + * fd_open_cnt is atomic, we need this lock to serialize + * the open function because the driver currently supports + * only a single process at a time. In addition, we need a + * lock here so we can flush user processes which are opening + * the device while we are trying to hard reset it + * @send_cpu_message_lock: enforces only one message in KMD <-> ArmCP queue. + * @asic_prop: ASIC specific immutable properties. + * @asic_funcs: ASIC specific functions. + * @asic_specific: ASIC specific information to use only from ASIC files. + * @mmu_pgt_pool: pool of available MMU hops. + * @vm: virtual memory manager for MMU. + * @mmu_cache_lock: protects MMU cache invalidation as it can serve one context + * @hwmon_dev: H/W monitor device. + * @pm_mng_profile: current power management profile. + * @hl_chip_info: ASIC's sensors information. + * @hl_debugfs: device's debugfs manager. + * @cb_pool: list of preallocated CBs. + * @cb_pool_lock: protects the CB pool. + * @user_ctx: current user context executing. + * @dram_used_mem: current DRAM memory consumption. + * @in_reset: is device in reset flow. + * @curr_pll_profile: current PLL profile. + * @fd_open_cnt: number of open user processes. + * @timeout_jiffies: device CS timeout value. + * @max_power: the max power of the device, as configured by the sysadmin. This + * value is saved so in case of hard-reset, KMD will restore this + * value and update the F/W after the re-initialization + * @major: habanalabs KMD major. + * @high_pll: high PLL profile frequency. + * @soft_reset_cnt: number of soft reset since KMD loading. + * @hard_reset_cnt: number of hard reset since KMD loading. + * @id: device minor. + * @disabled: is device disabled. + * @late_init_done: is late init stage was done during initialization. + * @hwmon_initialized: is H/W monitor sensors was initialized. + * @hard_reset_pending: is there a hard reset work pending. + * @heartbeat: is heartbeat sanity check towards ArmCP enabled. + * @reset_on_lockup: true if a reset should be done in case of stuck CS, false + * otherwise. + * @dram_supports_virtual_memory: is MMU enabled towards DRAM. + * @dram_default_page_mapping: is DRAM default page mapping enabled. + * @init_done: is the initialization of the device done. + * @mmu_enable: is MMU enabled. + * @device_cpu_disabled: is the device CPU disabled (due to timeouts) + */ +struct hl_device { + struct pci_dev *pdev; + void __iomem *pcie_bar[6]; + void __iomem *rmmio; + struct cdev cdev; + struct device *dev; + struct delayed_work work_freq; + struct delayed_work work_heartbeat; + char asic_name[16]; + enum hl_asic_type asic_type; + struct hl_cq *completion_queue; + struct workqueue_struct *cq_wq; + struct workqueue_struct *eq_wq; + struct hl_ctx *kernel_ctx; + struct hl_hw_queue *kernel_queues; + struct list_head hw_queues_mirror_list; + spinlock_t hw_queues_mirror_lock; + struct hl_cb_mgr kernel_cb_mgr; + struct hl_eq event_queue; + struct dma_pool *dma_pool; + void *cpu_accessible_dma_mem; + dma_addr_t cpu_accessible_dma_address; + struct gen_pool *cpu_accessible_dma_pool; + unsigned long *asid_bitmap; + struct mutex asid_mutex; + /* TODO: remove fd_open_cnt_lock for multiple process support */ + struct mutex fd_open_cnt_lock; + struct mutex send_cpu_message_lock; + struct asic_fixed_properties asic_prop; + const struct hl_asic_funcs *asic_funcs; + void *asic_specific; + struct gen_pool *mmu_pgt_pool; + struct hl_vm vm; + struct mutex mmu_cache_lock; + struct device *hwmon_dev; + enum hl_pm_mng_profile pm_mng_profile; + struct hwmon_chip_info *hl_chip_info; + + struct hl_dbg_device_entry hl_debugfs; + + struct list_head cb_pool; + spinlock_t cb_pool_lock; + + /* TODO: remove user_ctx for multiple process support */ + struct hl_ctx *user_ctx; + + atomic64_t dram_used_mem; + atomic_t in_reset; + atomic_t curr_pll_profile; + atomic_t fd_open_cnt; + u64 timeout_jiffies; + u64 max_power; + u32 major; + u32 high_pll; + u32 soft_reset_cnt; + u32 hard_reset_cnt; + u16 id; + u8 disabled; + u8 late_init_done; + u8 hwmon_initialized; + u8 hard_reset_pending; + u8 heartbeat; + u8 reset_on_lockup; + u8 dram_supports_virtual_memory; + u8 dram_default_page_mapping; + u8 init_done; + u8 device_cpu_disabled; + + /* Parameters for bring-up */ + u8 mmu_enable; + u8 cpu_enable; + u8 reset_pcilink; + u8 cpu_queues_enable; + u8 fw_loading; + u8 pldm; +}; + + +/* + * IOCTLs + */ + +/** + * typedef hl_ioctl_t - typedef for ioctl function in the driver + * @hpriv: pointer to the FD's private data, which contains state of + * user process + * @data: pointer to the input/output arguments structure of the IOCTL + * + * Return: 0 for success, negative value for error + */ +typedef int hl_ioctl_t(struct hl_fpriv *hpriv, void *data); + +/** + * struct hl_ioctl_desc - describes an IOCTL entry of the driver. + * @cmd: the IOCTL code as created by the kernel macros. + * @func: pointer to the driver's function that should be called for this IOCTL. + */ +struct hl_ioctl_desc { + unsigned int cmd; + hl_ioctl_t *func; +}; + + +/* + * Kernel module functions that can be accessed by entire module + */ + +/** + * hl_mem_area_inside_range() - Checks whether address+size are inside a range. + * @address: The start address of the area we want to validate. + * @size: The size in bytes of the area we want to validate. + * @range_start_address: The start address of the valid range. + * @range_end_address: The end address of the valid range. + * + * Return: true if the area is inside the valid range, false otherwise. + */ +static inline bool hl_mem_area_inside_range(u64 address, u32 size, + u64 range_start_address, u64 range_end_address) +{ + u64 end_address = address + size; + + if ((address >= range_start_address) && + (end_address <= range_end_address) && + (end_address > address)) + return true; + + return false; +} + +/** + * hl_mem_area_crosses_range() - Checks whether address+size crossing a range. + * @address: The start address of the area we want to validate. + * @size: The size in bytes of the area we want to validate. + * @range_start_address: The start address of the valid range. + * @range_end_address: The end address of the valid range. + * + * Return: true if the area overlaps part or all of the valid range, + * false otherwise. + */ +static inline bool hl_mem_area_crosses_range(u64 address, u32 size, + u64 range_start_address, u64 range_end_address) +{ + u64 end_address = address + size; + + if ((address >= range_start_address) && + (address < range_end_address)) + return true; + + if ((end_address >= range_start_address) && + (end_address < range_end_address)) + return true; + + if ((address < range_start_address) && + (end_address >= range_end_address)) + return true; + + return false; +} + +int hl_device_open(struct inode *inode, struct file *filp); +bool hl_device_disabled_or_in_reset(struct hl_device *hdev); +int create_hdev(struct hl_device **dev, struct pci_dev *pdev, + enum hl_asic_type asic_type, int minor); +void destroy_hdev(struct hl_device *hdev); +int hl_poll_timeout_memory(struct hl_device *hdev, u64 addr, u32 timeout_us, + u32 *val); +int hl_poll_timeout_device_memory(struct hl_device *hdev, void __iomem *addr, + u32 timeout_us, u32 *val); +int hl_hw_queues_create(struct hl_device *hdev); +void hl_hw_queues_destroy(struct hl_device *hdev); +int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id, + u32 cb_size, u64 cb_ptr); +int hl_hw_queue_schedule_cs(struct hl_cs *cs); +u32 hl_hw_queue_add_ptr(u32 ptr, u16 val); +void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id); +void hl_int_hw_queue_update_ci(struct hl_cs *cs); +void hl_hw_queue_reset(struct hl_device *hdev, bool hard_reset); + +#define hl_queue_inc_ptr(p) hl_hw_queue_add_ptr(p, 1) +#define hl_pi_2_offset(pi) ((pi) & (HL_QUEUE_LENGTH - 1)) + +int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id); +void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q); +int hl_eq_init(struct hl_device *hdev, struct hl_eq *q); +void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q); +void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q); +void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q); +irqreturn_t hl_irq_handler_cq(int irq, void *arg); +irqreturn_t hl_irq_handler_eq(int irq, void *arg); +u32 hl_cq_inc_ptr(u32 ptr); + +int hl_asid_init(struct hl_device *hdev); +void hl_asid_fini(struct hl_device *hdev); +unsigned long hl_asid_alloc(struct hl_device *hdev); +void hl_asid_free(struct hl_device *hdev, unsigned long asid); + +int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv); +void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx); +int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx); +void hl_ctx_do_release(struct kref *ref); +void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx); +int hl_ctx_put(struct hl_ctx *ctx); +struct dma_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq); +void hl_ctx_mgr_init(struct hl_ctx_mgr *mgr); +void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr); + +int hl_device_init(struct hl_device *hdev, struct class *hclass); +void hl_device_fini(struct hl_device *hdev); +int hl_device_suspend(struct hl_device *hdev); +int hl_device_resume(struct hl_device *hdev); +int hl_device_reset(struct hl_device *hdev, bool hard_reset, + bool from_hard_reset_thread); +void hl_hpriv_get(struct hl_fpriv *hpriv); +void hl_hpriv_put(struct hl_fpriv *hpriv); +int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq); + +int hl_build_hwmon_channel_info(struct hl_device *hdev, + struct armcp_sensor *sensors_arr); + +int hl_sysfs_init(struct hl_device *hdev); +void hl_sysfs_fini(struct hl_device *hdev); + +int hl_hwmon_init(struct hl_device *hdev); +void hl_hwmon_fini(struct hl_device *hdev); + +int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr, u32 cb_size, + u64 *handle, int ctx_id); +int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle); +int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma); +struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr, + u32 handle); +void hl_cb_put(struct hl_cb *cb); +void hl_cb_mgr_init(struct hl_cb_mgr *mgr); +void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr); +struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size); +int hl_cb_pool_init(struct hl_device *hdev); +int hl_cb_pool_fini(struct hl_device *hdev); + +void hl_cs_rollback_all(struct hl_device *hdev); +struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, bool ext_queue); + +void goya_set_asic_funcs(struct hl_device *hdev); + +int hl_vm_ctx_init(struct hl_ctx *ctx); +void hl_vm_ctx_fini(struct hl_ctx *ctx); + +int hl_vm_init(struct hl_device *hdev); +void hl_vm_fini(struct hl_device *hdev); + +int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size, + struct hl_userptr *userptr); +int hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr); +void hl_userptr_delete_list(struct hl_device *hdev, + struct list_head *userptr_list); +bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr, u32 size, + struct list_head *userptr_list, + struct hl_userptr **userptr); + +int hl_mmu_init(struct hl_device *hdev); +void hl_mmu_fini(struct hl_device *hdev); +int hl_mmu_ctx_init(struct hl_ctx *ctx); +void hl_mmu_ctx_fini(struct hl_ctx *ctx); +int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size); +int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size); +void hl_mmu_swap_out(struct hl_ctx *ctx); +void hl_mmu_swap_in(struct hl_ctx *ctx); + +long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr); +void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq); +long hl_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr); +long hl_get_voltage(struct hl_device *hdev, int sensor_index, u32 attr); +long hl_get_current(struct hl_device *hdev, int sensor_index, u32 attr); +long hl_get_fan_speed(struct hl_device *hdev, int sensor_index, u32 attr); +long hl_get_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr); +void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr, + long value); +u64 hl_get_max_power(struct hl_device *hdev); +void hl_set_max_power(struct hl_device *hdev, u64 value); + +#ifdef CONFIG_DEBUG_FS + +void hl_debugfs_init(void); +void hl_debugfs_fini(void); +void hl_debugfs_add_device(struct hl_device *hdev); +void hl_debugfs_remove_device(struct hl_device *hdev); +void hl_debugfs_add_file(struct hl_fpriv *hpriv); +void hl_debugfs_remove_file(struct hl_fpriv *hpriv); +void hl_debugfs_add_cb(struct hl_cb *cb); +void hl_debugfs_remove_cb(struct hl_cb *cb); +void hl_debugfs_add_cs(struct hl_cs *cs); +void hl_debugfs_remove_cs(struct hl_cs *cs); +void hl_debugfs_add_job(struct hl_device *hdev, struct hl_cs_job *job); +void hl_debugfs_remove_job(struct hl_device *hdev, struct hl_cs_job *job); +void hl_debugfs_add_userptr(struct hl_device *hdev, struct hl_userptr *userptr); +void hl_debugfs_remove_userptr(struct hl_device *hdev, + struct hl_userptr *userptr); +void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx); +void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx); + +#else + +static inline void __init hl_debugfs_init(void) +{ +} + +static inline void hl_debugfs_fini(void) +{ +} + +static inline void hl_debugfs_add_device(struct hl_device *hdev) +{ +} + +static inline void hl_debugfs_remove_device(struct hl_device *hdev) +{ +} + +static inline void hl_debugfs_add_file(struct hl_fpriv *hpriv) +{ +} + +static inline void hl_debugfs_remove_file(struct hl_fpriv *hpriv) +{ +} + +static inline void hl_debugfs_add_cb(struct hl_cb *cb) +{ +} + +static inline void hl_debugfs_remove_cb(struct hl_cb *cb) +{ +} + +static inline void hl_debugfs_add_cs(struct hl_cs *cs) +{ +} + +static inline void hl_debugfs_remove_cs(struct hl_cs *cs) +{ +} + +static inline void hl_debugfs_add_job(struct hl_device *hdev, + struct hl_cs_job *job) +{ +} + +static inline void hl_debugfs_remove_job(struct hl_device *hdev, + struct hl_cs_job *job) +{ +} + +static inline void hl_debugfs_add_userptr(struct hl_device *hdev, + struct hl_userptr *userptr) +{ +} + +static inline void hl_debugfs_remove_userptr(struct hl_device *hdev, + struct hl_userptr *userptr) +{ +} + +static inline void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, + struct hl_ctx *ctx) +{ +} + +static inline void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, + struct hl_ctx *ctx) +{ +} + +#endif + +/* IOCTLs */ +long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); +int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data); +int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data); +int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data); +int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data); + +#endif /* HABANALABSP_H_ */ diff --git a/drivers/misc/habanalabs/habanalabs_drv.c b/drivers/misc/habanalabs/habanalabs_drv.c new file mode 100644 index 000000000000..748601463f11 --- /dev/null +++ b/drivers/misc/habanalabs/habanalabs_drv.c @@ -0,0 +1,461 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#include "habanalabs.h" + +#include +#include + +#define HL_DRIVER_AUTHOR "HabanaLabs Kernel Driver Team" + +#define HL_DRIVER_DESC "Driver for HabanaLabs's AI Accelerators" + +MODULE_AUTHOR(HL_DRIVER_AUTHOR); +MODULE_DESCRIPTION(HL_DRIVER_DESC); +MODULE_LICENSE("GPL v2"); + +static int hl_major; +static struct class *hl_class; +static DEFINE_IDR(hl_devs_idr); +static DEFINE_MUTEX(hl_devs_idr_lock); + +static int timeout_locked = 5; +static int reset_on_lockup = 1; + +module_param(timeout_locked, int, 0444); +MODULE_PARM_DESC(timeout_locked, + "Device lockup timeout in seconds (0 = disabled, default 5s)"); + +module_param(reset_on_lockup, int, 0444); +MODULE_PARM_DESC(reset_on_lockup, + "Do device reset on lockup (0 = no, 1 = yes, default yes)"); + +#define PCI_VENDOR_ID_HABANALABS 0x1da3 + +#define PCI_IDS_GOYA 0x0001 + +static const struct pci_device_id ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GOYA), }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, ids); + +/* + * get_asic_type - translate device id to asic type + * + * @device: id of the PCI device + * + * Translate device id to asic type. + * In case of unidentified device, return -1 + */ +static enum hl_asic_type get_asic_type(u16 device) +{ + enum hl_asic_type asic_type; + + switch (device) { + case PCI_IDS_GOYA: + asic_type = ASIC_GOYA; + break; + default: + asic_type = ASIC_INVALID; + break; + } + + return asic_type; +} + +/* + * hl_device_open - open function for habanalabs device + * + * @inode: pointer to inode structure + * @filp: pointer to file structure + * + * Called when process opens an habanalabs device. + */ +int hl_device_open(struct inode *inode, struct file *filp) +{ + struct hl_device *hdev; + struct hl_fpriv *hpriv; + int rc; + + mutex_lock(&hl_devs_idr_lock); + hdev = idr_find(&hl_devs_idr, iminor(inode)); + mutex_unlock(&hl_devs_idr_lock); + + if (!hdev) { + pr_err("Couldn't find device %d:%d\n", + imajor(inode), iminor(inode)); + return -ENXIO; + } + + mutex_lock(&hdev->fd_open_cnt_lock); + + if (hl_device_disabled_or_in_reset(hdev)) { + dev_err_ratelimited(hdev->dev, + "Can't open %s because it is disabled or in reset\n", + dev_name(hdev->dev)); + mutex_unlock(&hdev->fd_open_cnt_lock); + return -EPERM; + } + + if (atomic_read(&hdev->fd_open_cnt)) { + dev_info_ratelimited(hdev->dev, + "Device %s is already attached to application\n", + dev_name(hdev->dev)); + mutex_unlock(&hdev->fd_open_cnt_lock); + return -EBUSY; + } + + atomic_inc(&hdev->fd_open_cnt); + + mutex_unlock(&hdev->fd_open_cnt_lock); + + hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL); + if (!hpriv) { + rc = -ENOMEM; + goto close_device; + } + + hpriv->hdev = hdev; + filp->private_data = hpriv; + hpriv->filp = filp; + mutex_init(&hpriv->restore_phase_mutex); + kref_init(&hpriv->refcount); + nonseekable_open(inode, filp); + + hl_cb_mgr_init(&hpriv->cb_mgr); + hl_ctx_mgr_init(&hpriv->ctx_mgr); + + rc = hl_ctx_create(hdev, hpriv); + if (rc) { + dev_err(hdev->dev, "Failed to open FD (CTX fail)\n"); + goto out_err; + } + + hpriv->taskpid = find_get_pid(current->pid); + + /* + * Device is IDLE at this point so it is legal to change PLLs. There + * is no need to check anything because if the PLL is already HIGH, the + * set function will return without doing anything + */ + hl_device_set_frequency(hdev, PLL_HIGH); + + hl_debugfs_add_file(hpriv); + + return 0; + +out_err: + filp->private_data = NULL; + hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr); + hl_cb_mgr_fini(hpriv->hdev, &hpriv->cb_mgr); + mutex_destroy(&hpriv->restore_phase_mutex); + kfree(hpriv); + +close_device: + atomic_dec(&hdev->fd_open_cnt); + return rc; +} + +/* + * create_hdev - create habanalabs device instance + * + * @dev: will hold the pointer to the new habanalabs device structure + * @pdev: pointer to the pci device + * @asic_type: in case of simulator device, which device is it + * @minor: in case of simulator device, the minor of the device + * + * Allocate memory for habanalabs device and initialize basic fields + * Identify the ASIC type + * Allocate ID (minor) for the device (only for real devices) + */ +int create_hdev(struct hl_device **dev, struct pci_dev *pdev, + enum hl_asic_type asic_type, int minor) +{ + struct hl_device *hdev; + int rc; + + *dev = NULL; + + hdev = kzalloc(sizeof(*hdev), GFP_KERNEL); + if (!hdev) + return -ENOMEM; + + hdev->major = hl_major; + hdev->reset_on_lockup = reset_on_lockup; + + /* Parameters for bring-up - set them to defaults */ + hdev->mmu_enable = 1; + hdev->cpu_enable = 1; + hdev->reset_pcilink = 0; + hdev->cpu_queues_enable = 1; + hdev->fw_loading = 1; + hdev->pldm = 0; + hdev->heartbeat = 1; + + /* If CPU is disabled, no point in loading FW */ + if (!hdev->cpu_enable) + hdev->fw_loading = 0; + + /* If we don't load FW, no need to initialize CPU queues */ + if (!hdev->fw_loading) + hdev->cpu_queues_enable = 0; + + /* If CPU queues not enabled, no way to do heartbeat */ + if (!hdev->cpu_queues_enable) + hdev->heartbeat = 0; + + if (timeout_locked) + hdev->timeout_jiffies = msecs_to_jiffies(timeout_locked * 1000); + else + hdev->timeout_jiffies = MAX_SCHEDULE_TIMEOUT; + + hdev->disabled = true; + hdev->pdev = pdev; /* can be NULL in case of simulator device */ + + if (asic_type == ASIC_AUTO_DETECT) { + hdev->asic_type = get_asic_type(pdev->device); + if (hdev->asic_type == ASIC_INVALID) { + dev_err(&pdev->dev, "Unsupported ASIC\n"); + rc = -ENODEV; + goto free_hdev; + } + } else { + hdev->asic_type = asic_type; + } + + mutex_lock(&hl_devs_idr_lock); + + if (minor == -1) { + rc = idr_alloc(&hl_devs_idr, hdev, 0, HL_MAX_MINORS, + GFP_KERNEL); + } else { + void *old_idr = idr_replace(&hl_devs_idr, hdev, minor); + + if (IS_ERR_VALUE(old_idr)) { + rc = PTR_ERR(old_idr); + pr_err("Error %d when trying to replace minor %d\n", + rc, minor); + mutex_unlock(&hl_devs_idr_lock); + goto free_hdev; + } + rc = minor; + } + + mutex_unlock(&hl_devs_idr_lock); + + if (rc < 0) { + if (rc == -ENOSPC) { + pr_err("too many devices in the system\n"); + rc = -EBUSY; + } + goto free_hdev; + } + + hdev->id = rc; + + *dev = hdev; + + return 0; + +free_hdev: + kfree(hdev); + return rc; +} + +/* + * destroy_hdev - destroy habanalabs device instance + * + * @dev: pointer to the habanalabs device structure + * + */ +void destroy_hdev(struct hl_device *hdev) +{ + /* Remove device from the device list */ + mutex_lock(&hl_devs_idr_lock); + idr_remove(&hl_devs_idr, hdev->id); + mutex_unlock(&hl_devs_idr_lock); + + kfree(hdev); +} + +static int hl_pmops_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct hl_device *hdev = pci_get_drvdata(pdev); + + pr_debug("Going to suspend PCI device\n"); + + if (!hdev) { + pr_err("device pointer is NULL in suspend\n"); + return 0; + } + + return hl_device_suspend(hdev); +} + +static int hl_pmops_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct hl_device *hdev = pci_get_drvdata(pdev); + + pr_debug("Going to resume PCI device\n"); + + if (!hdev) { + pr_err("device pointer is NULL in resume\n"); + return 0; + } + + return hl_device_resume(hdev); +} + +/* + * hl_pci_probe - probe PCI habanalabs devices + * + * @pdev: pointer to pci device + * @id: pointer to pci device id structure + * + * Standard PCI probe function for habanalabs device. + * Create a new habanalabs device and initialize it according to the + * device's type + */ +static int hl_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct hl_device *hdev; + int rc; + + dev_info(&pdev->dev, HL_NAME + " device found [%04x:%04x] (rev %x)\n", + (int)pdev->vendor, (int)pdev->device, (int)pdev->revision); + + rc = create_hdev(&hdev, pdev, ASIC_AUTO_DETECT, -1); + if (rc) + return rc; + + pci_set_drvdata(pdev, hdev); + + rc = hl_device_init(hdev, hl_class); + if (rc) { + dev_err(&pdev->dev, "Fatal error during habanalabs device init\n"); + rc = -ENODEV; + goto disable_device; + } + + return 0; + +disable_device: + pci_set_drvdata(pdev, NULL); + destroy_hdev(hdev); + + return rc; +} + +/* + * hl_pci_remove - remove PCI habanalabs devices + * + * @pdev: pointer to pci device + * + * Standard PCI remove function for habanalabs device + */ +static void hl_pci_remove(struct pci_dev *pdev) +{ + struct hl_device *hdev; + + hdev = pci_get_drvdata(pdev); + if (!hdev) + return; + + hl_device_fini(hdev); + pci_set_drvdata(pdev, NULL); + + destroy_hdev(hdev); +} + +static const struct dev_pm_ops hl_pm_ops = { + .suspend = hl_pmops_suspend, + .resume = hl_pmops_resume, +}; + +static struct pci_driver hl_pci_driver = { + .name = HL_NAME, + .id_table = ids, + .probe = hl_pci_probe, + .remove = hl_pci_remove, + .driver.pm = &hl_pm_ops, +}; + +/* + * hl_init - Initialize the habanalabs kernel driver + */ +static int __init hl_init(void) +{ + int rc; + dev_t dev; + + pr_info("loading driver\n"); + + rc = alloc_chrdev_region(&dev, 0, HL_MAX_MINORS, HL_NAME); + if (rc < 0) { + pr_err("unable to get major\n"); + return rc; + } + + hl_major = MAJOR(dev); + + hl_class = class_create(THIS_MODULE, HL_NAME); + if (IS_ERR(hl_class)) { + pr_err("failed to allocate class\n"); + rc = PTR_ERR(hl_class); + goto remove_major; + } + + hl_debugfs_init(); + + rc = pci_register_driver(&hl_pci_driver); + if (rc) { + pr_err("failed to register pci device\n"); + goto remove_debugfs; + } + + pr_debug("driver loaded\n"); + + return 0; + +remove_debugfs: + hl_debugfs_fini(); + class_destroy(hl_class); +remove_major: + unregister_chrdev_region(MKDEV(hl_major, 0), HL_MAX_MINORS); + return rc; +} + +/* + * hl_exit - Release all resources of the habanalabs kernel driver + */ +static void __exit hl_exit(void) +{ + pci_unregister_driver(&hl_pci_driver); + + /* + * Removing debugfs must be after all devices or simulator devices + * have been removed because otherwise we get a bug in the + * debugfs module for referencing NULL objects + */ + hl_debugfs_fini(); + + class_destroy(hl_class); + unregister_chrdev_region(MKDEV(hl_major, 0), HL_MAX_MINORS); + + idr_destroy(&hl_devs_idr); + + pr_debug("driver removed\n"); +} + +module_init(hl_init); +module_exit(hl_exit); diff --git a/drivers/misc/habanalabs/habanalabs_ioctl.c b/drivers/misc/habanalabs/habanalabs_ioctl.c new file mode 100644 index 000000000000..2c2739a3c5ec --- /dev/null +++ b/drivers/misc/habanalabs/habanalabs_ioctl.c @@ -0,0 +1,234 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include +#include "habanalabs.h" + +#include +#include +#include + +static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args) +{ + struct hl_info_hw_ip_info hw_ip = {0}; + u32 size = args->return_size; + void __user *out = (void __user *) (uintptr_t) args->return_pointer; + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 sram_kmd_size, dram_kmd_size; + + if ((!size) || (!out)) + return -EINVAL; + + sram_kmd_size = (prop->sram_user_base_address - + prop->sram_base_address); + dram_kmd_size = (prop->dram_user_base_address - + prop->dram_base_address); + + hw_ip.device_id = hdev->asic_funcs->get_pci_id(hdev); + hw_ip.sram_base_address = prop->sram_user_base_address; + hw_ip.dram_base_address = prop->dram_user_base_address; + hw_ip.tpc_enabled_mask = prop->tpc_enabled_mask; + hw_ip.sram_size = prop->sram_size - sram_kmd_size; + hw_ip.dram_size = prop->dram_size - dram_kmd_size; + if (hw_ip.dram_size > 0) + hw_ip.dram_enabled = 1; + hw_ip.num_of_events = prop->num_of_events; + memcpy(hw_ip.armcp_version, + prop->armcp_info.armcp_version, VERSION_MAX_LEN); + hw_ip.armcp_cpld_version = __le32_to_cpu(prop->armcp_info.cpld_version); + hw_ip.psoc_pci_pll_nr = prop->psoc_pci_pll_nr; + hw_ip.psoc_pci_pll_nf = prop->psoc_pci_pll_nf; + hw_ip.psoc_pci_pll_od = prop->psoc_pci_pll_od; + hw_ip.psoc_pci_pll_div_factor = prop->psoc_pci_pll_div_factor; + + return copy_to_user(out, &hw_ip, + min((size_t)size, sizeof(hw_ip))) ? -EFAULT : 0; +} + +static int hw_events_info(struct hl_device *hdev, struct hl_info_args *args) +{ + u32 size, max_size = args->return_size; + void __user *out = (void __user *) (uintptr_t) args->return_pointer; + void *arr; + + if ((!max_size) || (!out)) + return -EINVAL; + + arr = hdev->asic_funcs->get_events_stat(hdev, &size); + + return copy_to_user(out, arr, min(max_size, size)) ? -EFAULT : 0; +} + +static int dram_usage_info(struct hl_device *hdev, struct hl_info_args *args) +{ + struct hl_info_dram_usage dram_usage = {0}; + u32 max_size = args->return_size; + void __user *out = (void __user *) (uintptr_t) args->return_pointer; + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 dram_kmd_size; + + if ((!max_size) || (!out)) + return -EINVAL; + + dram_kmd_size = (prop->dram_user_base_address - + prop->dram_base_address); + dram_usage.dram_free_mem = (prop->dram_size - dram_kmd_size) - + atomic64_read(&hdev->dram_used_mem); + dram_usage.ctx_dram_mem = atomic64_read(&hdev->user_ctx->dram_phys_mem); + + return copy_to_user(out, &dram_usage, + min((size_t) max_size, sizeof(dram_usage))) ? -EFAULT : 0; +} + +static int hw_idle(struct hl_device *hdev, struct hl_info_args *args) +{ + struct hl_info_hw_idle hw_idle = {0}; + u32 max_size = args->return_size; + void __user *out = (void __user *) (uintptr_t) args->return_pointer; + + if ((!max_size) || (!out)) + return -EINVAL; + + hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev); + + return copy_to_user(out, &hw_idle, + min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0; +} + +static int hl_info_ioctl(struct hl_fpriv *hpriv, void *data) +{ + struct hl_info_args *args = data; + struct hl_device *hdev = hpriv->hdev; + int rc; + + if (hl_device_disabled_or_in_reset(hdev)) { + dev_err(hdev->dev, + "Device is disabled or in reset. Can't execute INFO IOCTL\n"); + return -EBUSY; + } + + switch (args->op) { + case HL_INFO_HW_IP_INFO: + rc = hw_ip_info(hdev, args); + break; + + case HL_INFO_HW_EVENTS: + rc = hw_events_info(hdev, args); + break; + + case HL_INFO_DRAM_USAGE: + rc = dram_usage_info(hdev, args); + break; + + case HL_INFO_HW_IDLE: + rc = hw_idle(hdev, args); + break; + + default: + dev_err(hdev->dev, "Invalid request %d\n", args->op); + rc = -ENOTTY; + break; + } + + return rc; +} + +#define HL_IOCTL_DEF(ioctl, _func) \ + [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func} + +static const struct hl_ioctl_desc hl_ioctls[] = { + HL_IOCTL_DEF(HL_IOCTL_INFO, hl_info_ioctl), + HL_IOCTL_DEF(HL_IOCTL_CB, hl_cb_ioctl), + HL_IOCTL_DEF(HL_IOCTL_CS, hl_cs_ioctl), + HL_IOCTL_DEF(HL_IOCTL_WAIT_CS, hl_cs_wait_ioctl), + HL_IOCTL_DEF(HL_IOCTL_MEMORY, hl_mem_ioctl) +}; + +#define HL_CORE_IOCTL_COUNT ARRAY_SIZE(hl_ioctls) + +long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) +{ + struct hl_fpriv *hpriv = filep->private_data; + struct hl_device *hdev = hpriv->hdev; + hl_ioctl_t *func; + const struct hl_ioctl_desc *ioctl = NULL; + unsigned int nr = _IOC_NR(cmd); + char stack_kdata[128] = {0}; + char *kdata = NULL; + unsigned int usize, asize; + int retcode; + + if (hdev->hard_reset_pending) { + dev_crit_ratelimited(hdev->dev, + "Device HARD reset pending! Please close FD\n"); + return -ENODEV; + } + + if ((nr >= HL_COMMAND_START) && (nr < HL_COMMAND_END)) { + u32 hl_size; + + ioctl = &hl_ioctls[nr]; + + hl_size = _IOC_SIZE(ioctl->cmd); + usize = asize = _IOC_SIZE(cmd); + if (hl_size > asize) + asize = hl_size; + + cmd = ioctl->cmd; + } else { + dev_err(hdev->dev, "invalid ioctl: pid=%d, nr=0x%02x\n", + task_pid_nr(current), nr); + return -ENOTTY; + } + + /* Do not trust userspace, use our own definition */ + func = ioctl->func; + + if (unlikely(!func)) { + dev_dbg(hdev->dev, "no function\n"); + retcode = -ENOTTY; + goto out_err; + } + + if (cmd & (IOC_IN | IOC_OUT)) { + if (asize <= sizeof(stack_kdata)) { + kdata = stack_kdata; + } else { + kdata = kzalloc(asize, GFP_KERNEL); + if (!kdata) { + retcode = -ENOMEM; + goto out_err; + } + } + } + + if (cmd & IOC_IN) { + if (copy_from_user(kdata, (void __user *)arg, usize)) { + retcode = -EFAULT; + goto out_err; + } + } else if (cmd & IOC_OUT) { + memset(kdata, 0, usize); + } + + retcode = func(hpriv, kdata); + + if (cmd & IOC_OUT) + if (copy_to_user((void __user *)arg, kdata, usize)) + retcode = -EFAULT; + +out_err: + if (retcode) + dev_dbg(hdev->dev, + "error in ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n", + task_pid_nr(current), cmd, nr); + + if (kdata != stack_kdata) + kfree(kdata); + + return retcode; +} diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/hw_queue.c new file mode 100644 index 000000000000..67bece26417c --- /dev/null +++ b/drivers/misc/habanalabs/hw_queue.c @@ -0,0 +1,635 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "habanalabs.h" + +#include + +/* + * hl_queue_add_ptr - add to pi or ci and checks if it wraps around + * + * @ptr: the current pi/ci value + * @val: the amount to add + * + * Add val to ptr. It can go until twice the queue length. + */ +inline u32 hl_hw_queue_add_ptr(u32 ptr, u16 val) +{ + ptr += val; + ptr &= ((HL_QUEUE_LENGTH << 1) - 1); + return ptr; +} + +static inline int queue_free_slots(struct hl_hw_queue *q, u32 queue_len) +{ + int delta = (q->pi - q->ci); + + if (delta >= 0) + return (queue_len - delta); + else + return (abs(delta) - queue_len); +} + +void hl_int_hw_queue_update_ci(struct hl_cs *cs) +{ + struct hl_device *hdev = cs->ctx->hdev; + struct hl_hw_queue *q; + int i; + + hdev->asic_funcs->hw_queues_lock(hdev); + + if (hdev->disabled) + goto out; + + q = &hdev->kernel_queues[0]; + for (i = 0 ; i < HL_MAX_QUEUES ; i++, q++) { + if (q->queue_type == QUEUE_TYPE_INT) { + q->ci += cs->jobs_in_queue_cnt[i]; + q->ci &= ((q->int_queue_len << 1) - 1); + } + } + +out: + hdev->asic_funcs->hw_queues_unlock(hdev); +} + +/* + * ext_queue_submit_bd - Submit a buffer descriptor to an external queue + * + * @hdev: pointer to habanalabs device structure + * @q: pointer to habanalabs queue structure + * @ctl: BD's control word + * @len: BD's length + * @ptr: BD's pointer + * + * This function assumes there is enough space on the queue to submit a new + * BD to it. It initializes the next BD and calls the device specific + * function to set the pi (and doorbell) + * + * This function must be called when the scheduler mutex is taken + * + */ +static void ext_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q, + u32 ctl, u32 len, u64 ptr) +{ + struct hl_bd *bd; + + bd = (struct hl_bd *) (uintptr_t) q->kernel_address; + bd += hl_pi_2_offset(q->pi); + bd->ctl = __cpu_to_le32(ctl); + bd->len = __cpu_to_le32(len); + bd->ptr = __cpu_to_le64(ptr + hdev->asic_prop.host_phys_base_address); + + q->pi = hl_queue_inc_ptr(q->pi); + hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi); +} + +/* + * ext_queue_sanity_checks - perform some sanity checks on external queue + * + * @hdev : pointer to hl_device structure + * @q : pointer to hl_hw_queue structure + * @num_of_entries : how many entries to check for space + * @reserve_cq_entry : whether to reserve an entry in the cq + * + * H/W queues spinlock should be taken before calling this function + * + * Perform the following: + * - Make sure we have enough space in the h/w queue + * - Make sure we have enough space in the completion queue + * - Reserve space in the completion queue (needs to be reversed if there + * is a failure down the road before the actual submission of work). Only + * do this action if reserve_cq_entry is true + * + */ +static int ext_queue_sanity_checks(struct hl_device *hdev, + struct hl_hw_queue *q, int num_of_entries, + bool reserve_cq_entry) +{ + atomic_t *free_slots = + &hdev->completion_queue[q->hw_queue_id].free_slots_cnt; + int free_slots_cnt; + + /* Check we have enough space in the queue */ + free_slots_cnt = queue_free_slots(q, HL_QUEUE_LENGTH); + + if (free_slots_cnt < num_of_entries) { + dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n", + q->hw_queue_id, num_of_entries); + return -EAGAIN; + } + + if (reserve_cq_entry) { + /* + * Check we have enough space in the completion queue + * Add -1 to counter (decrement) unless counter was already 0 + * In that case, CQ is full so we can't submit a new CB because + * we won't get ack on its completion + * atomic_add_unless will return 0 if counter was already 0 + */ + if (atomic_add_negative(num_of_entries * -1, free_slots)) { + dev_dbg(hdev->dev, "No space for %d on CQ %d\n", + num_of_entries, q->hw_queue_id); + atomic_add(num_of_entries, free_slots); + return -EAGAIN; + } + } + + return 0; +} + +/* + * int_queue_sanity_checks - perform some sanity checks on internal queue + * + * @hdev : pointer to hl_device structure + * @q : pointer to hl_hw_queue structure + * @num_of_entries : how many entries to check for space + * + * H/W queues spinlock should be taken before calling this function + * + * Perform the following: + * - Make sure we have enough space in the h/w queue + * + */ +static int int_queue_sanity_checks(struct hl_device *hdev, + struct hl_hw_queue *q, + int num_of_entries) +{ + int free_slots_cnt; + + /* Check we have enough space in the queue */ + free_slots_cnt = queue_free_slots(q, q->int_queue_len); + + if (free_slots_cnt < num_of_entries) { + dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n", + q->hw_queue_id, num_of_entries); + return -EAGAIN; + } + + return 0; +} + +/* + * hl_hw_queue_send_cb_no_cmpl - send a single CB (not a JOB) without completion + * + * @hdev: pointer to hl_device structure + * @hw_queue_id: Queue's type + * @cb_size: size of CB + * @cb_ptr: pointer to CB location + * + * This function sends a single CB, that must NOT generate a completion entry + * + */ +int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id, + u32 cb_size, u64 cb_ptr) +{ + struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id]; + int rc; + + /* + * The CPU queue is a synchronous queue with an effective depth of + * a single entry (although it is allocated with room for multiple + * entries). Therefore, there is a different lock, called + * send_cpu_message_lock, that serializes accesses to the CPU queue. + * As a result, we don't need to lock the access to the entire H/W + * queues module when submitting a JOB to the CPU queue + */ + if (q->queue_type != QUEUE_TYPE_CPU) + hdev->asic_funcs->hw_queues_lock(hdev); + + if (hdev->disabled) { + rc = -EPERM; + goto out; + } + + rc = ext_queue_sanity_checks(hdev, q, 1, false); + if (rc) + goto out; + + ext_queue_submit_bd(hdev, q, 0, cb_size, cb_ptr); + +out: + if (q->queue_type != QUEUE_TYPE_CPU) + hdev->asic_funcs->hw_queues_unlock(hdev); + + return rc; +} + +/* + * ext_hw_queue_schedule_job - submit an JOB to an external queue + * + * @job: pointer to the job that needs to be submitted to the queue + * + * This function must be called when the scheduler mutex is taken + * + */ +static void ext_hw_queue_schedule_job(struct hl_cs_job *job) +{ + struct hl_device *hdev = job->cs->ctx->hdev; + struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; + struct hl_cq_entry cq_pkt; + struct hl_cq *cq; + u64 cq_addr; + struct hl_cb *cb; + u32 ctl; + u32 len; + u64 ptr; + + /* + * Update the JOB ID inside the BD CTL so the device would know what + * to write in the completion queue + */ + ctl = ((q->pi << BD_CTL_SHADOW_INDEX_SHIFT) & BD_CTL_SHADOW_INDEX_MASK); + + cb = job->patched_cb; + len = job->job_cb_size; + ptr = cb->bus_address; + + cq_pkt.data = __cpu_to_le32( + ((q->pi << CQ_ENTRY_SHADOW_INDEX_SHIFT) + & CQ_ENTRY_SHADOW_INDEX_MASK) | + (1 << CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT) | + (1 << CQ_ENTRY_READY_SHIFT)); + + /* + * No need to protect pi_offset because scheduling to the + * H/W queues is done under the scheduler mutex + * + * No need to check if CQ is full because it was already + * checked in hl_queue_sanity_checks + */ + cq = &hdev->completion_queue[q->hw_queue_id]; + cq_addr = cq->bus_address + + hdev->asic_prop.host_phys_base_address; + cq_addr += cq->pi * sizeof(struct hl_cq_entry); + + hdev->asic_funcs->add_end_of_cb_packets(cb->kernel_address, len, + cq_addr, + __le32_to_cpu(cq_pkt.data), + q->hw_queue_id); + + q->shadow_queue[hl_pi_2_offset(q->pi)] = job; + + cq->pi = hl_cq_inc_ptr(cq->pi); + + ext_queue_submit_bd(hdev, q, ctl, len, ptr); +} + +/* + * int_hw_queue_schedule_job - submit an JOB to an internal queue + * + * @job: pointer to the job that needs to be submitted to the queue + * + * This function must be called when the scheduler mutex is taken + * + */ +static void int_hw_queue_schedule_job(struct hl_cs_job *job) +{ + struct hl_device *hdev = job->cs->ctx->hdev; + struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; + struct hl_bd bd; + u64 *pi, *pbd = (u64 *) &bd; + + bd.ctl = 0; + bd.len = __cpu_to_le32(job->job_cb_size); + bd.ptr = __cpu_to_le64((u64) (uintptr_t) job->user_cb); + + pi = (u64 *) (uintptr_t) (q->kernel_address + + ((q->pi & (q->int_queue_len - 1)) * sizeof(bd))); + + pi[0] = pbd[0]; + pi[1] = pbd[1]; + + q->pi++; + q->pi &= ((q->int_queue_len << 1) - 1); + + /* Flush PQ entry write. Relevant only for specific ASICs */ + hdev->asic_funcs->flush_pq_write(hdev, pi, pbd[0]); + + hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi); +} + +/* + * hl_hw_queue_schedule_cs - schedule a command submission + * + * @job : pointer to the CS + * + */ +int hl_hw_queue_schedule_cs(struct hl_cs *cs) +{ + struct hl_device *hdev = cs->ctx->hdev; + struct hl_cs_job *job, *tmp; + struct hl_hw_queue *q; + int rc = 0, i, cq_cnt; + + hdev->asic_funcs->hw_queues_lock(hdev); + + if (hl_device_disabled_or_in_reset(hdev)) { + dev_err(hdev->dev, + "device is disabled or in reset, CS rejected!\n"); + rc = -EPERM; + goto out; + } + + q = &hdev->kernel_queues[0]; + /* This loop assumes all external queues are consecutive */ + for (i = 0, cq_cnt = 0 ; i < HL_MAX_QUEUES ; i++, q++) { + if (q->queue_type == QUEUE_TYPE_EXT) { + if (cs->jobs_in_queue_cnt[i]) { + rc = ext_queue_sanity_checks(hdev, q, + cs->jobs_in_queue_cnt[i], true); + if (rc) + goto unroll_cq_resv; + cq_cnt++; + } + } else if (q->queue_type == QUEUE_TYPE_INT) { + if (cs->jobs_in_queue_cnt[i]) { + rc = int_queue_sanity_checks(hdev, q, + cs->jobs_in_queue_cnt[i]); + if (rc) + goto unroll_cq_resv; + } + } + } + + spin_lock(&hdev->hw_queues_mirror_lock); + list_add_tail(&cs->mirror_node, &hdev->hw_queues_mirror_list); + + /* Queue TDR if the CS is the first entry and if timeout is wanted */ + if ((hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) && + (list_first_entry(&hdev->hw_queues_mirror_list, + struct hl_cs, mirror_node) == cs)) { + cs->tdr_active = true; + schedule_delayed_work(&cs->work_tdr, hdev->timeout_jiffies); + spin_unlock(&hdev->hw_queues_mirror_lock); + } else { + spin_unlock(&hdev->hw_queues_mirror_lock); + } + + list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) { + if (job->ext_queue) + ext_hw_queue_schedule_job(job); + else + int_hw_queue_schedule_job(job); + } + + cs->submitted = true; + + goto out; + +unroll_cq_resv: + /* This loop assumes all external queues are consecutive */ + q = &hdev->kernel_queues[0]; + for (i = 0 ; (i < HL_MAX_QUEUES) && (cq_cnt > 0) ; i++, q++) { + if ((q->queue_type == QUEUE_TYPE_EXT) && + (cs->jobs_in_queue_cnt[i])) { + atomic_t *free_slots = + &hdev->completion_queue[i].free_slots_cnt; + atomic_add(cs->jobs_in_queue_cnt[i], free_slots); + cq_cnt--; + } + } + +out: + hdev->asic_funcs->hw_queues_unlock(hdev); + + return rc; +} + +/* + * hl_hw_queue_inc_ci_kernel - increment ci for kernel's queue + * + * @hdev: pointer to hl_device structure + * @hw_queue_id: which queue to increment its ci + */ +void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id) +{ + struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id]; + + q->ci = hl_queue_inc_ptr(q->ci); +} + +static int ext_and_cpu_hw_queue_init(struct hl_device *hdev, + struct hl_hw_queue *q) +{ + void *p; + int rc; + + p = hdev->asic_funcs->dma_alloc_coherent(hdev, + HL_QUEUE_SIZE_IN_BYTES, + &q->bus_address, GFP_KERNEL | __GFP_ZERO); + if (!p) + return -ENOMEM; + + q->kernel_address = (u64) (uintptr_t) p; + + q->shadow_queue = kmalloc_array(HL_QUEUE_LENGTH, + sizeof(*q->shadow_queue), + GFP_KERNEL); + if (!q->shadow_queue) { + dev_err(hdev->dev, + "Failed to allocate shadow queue for H/W queue %d\n", + q->hw_queue_id); + rc = -ENOMEM; + goto free_queue; + } + + /* Make sure read/write pointers are initialized to start of queue */ + q->ci = 0; + q->pi = 0; + + return 0; + +free_queue: + hdev->asic_funcs->dma_free_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES, + (void *) (uintptr_t) q->kernel_address, q->bus_address); + + return rc; +} + +static int int_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q) +{ + void *p; + + p = hdev->asic_funcs->get_int_queue_base(hdev, q->hw_queue_id, + &q->bus_address, &q->int_queue_len); + if (!p) { + dev_err(hdev->dev, + "Failed to get base address for internal queue %d\n", + q->hw_queue_id); + return -EFAULT; + } + + q->kernel_address = (u64) (uintptr_t) p; + q->pi = 0; + q->ci = 0; + + return 0; +} + +static int cpu_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q) +{ + return ext_and_cpu_hw_queue_init(hdev, q); +} + +static int ext_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q) +{ + return ext_and_cpu_hw_queue_init(hdev, q); +} + +/* + * hw_queue_init - main initialization function for H/W queue object + * + * @hdev: pointer to hl_device device structure + * @q: pointer to hl_hw_queue queue structure + * @hw_queue_id: The id of the H/W queue + * + * Allocate dma-able memory for the queue and initialize fields + * Returns 0 on success + */ +static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q, + u32 hw_queue_id) +{ + int rc; + + BUILD_BUG_ON(HL_QUEUE_SIZE_IN_BYTES > HL_PAGE_SIZE); + + q->hw_queue_id = hw_queue_id; + + switch (q->queue_type) { + case QUEUE_TYPE_EXT: + rc = ext_hw_queue_init(hdev, q); + break; + + case QUEUE_TYPE_INT: + rc = int_hw_queue_init(hdev, q); + break; + + case QUEUE_TYPE_CPU: + rc = cpu_hw_queue_init(hdev, q); + break; + + case QUEUE_TYPE_NA: + q->valid = 0; + return 0; + + default: + dev_crit(hdev->dev, "wrong queue type %d during init\n", + q->queue_type); + rc = -EINVAL; + break; + } + + if (rc) + return rc; + + q->valid = 1; + + return 0; +} + +/* + * hw_queue_fini - destroy queue + * + * @hdev: pointer to hl_device device structure + * @q: pointer to hl_hw_queue queue structure + * + * Free the queue memory + */ +static void hw_queue_fini(struct hl_device *hdev, struct hl_hw_queue *q) +{ + if (!q->valid) + return; + + /* + * If we arrived here, there are no jobs waiting on this queue + * so we can safely remove it. + * This is because this function can only called when: + * 1. Either a context is deleted, which only can occur if all its + * jobs were finished + * 2. A context wasn't able to be created due to failure or timeout, + * which means there are no jobs on the queue yet + * + * The only exception are the queues of the kernel context, but + * if they are being destroyed, it means that the entire module is + * being removed. If the module is removed, it means there is no open + * user context. It also means that if a job was submitted by + * the kernel driver (e.g. context creation), the job itself was + * released by the kernel driver when a timeout occurred on its + * Completion. Thus, we don't need to release it again. + */ + + if (q->queue_type == QUEUE_TYPE_INT) + return; + + kfree(q->shadow_queue); + + hdev->asic_funcs->dma_free_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES, + (void *) (uintptr_t) q->kernel_address, q->bus_address); +} + +int hl_hw_queues_create(struct hl_device *hdev) +{ + struct asic_fixed_properties *asic = &hdev->asic_prop; + struct hl_hw_queue *q; + int i, rc, q_ready_cnt; + + hdev->kernel_queues = kcalloc(HL_MAX_QUEUES, + sizeof(*hdev->kernel_queues), GFP_KERNEL); + + if (!hdev->kernel_queues) { + dev_err(hdev->dev, "Not enough memory for H/W queues\n"); + return -ENOMEM; + } + + /* Initialize the H/W queues */ + for (i = 0, q_ready_cnt = 0, q = hdev->kernel_queues; + i < HL_MAX_QUEUES ; i++, q_ready_cnt++, q++) { + + q->queue_type = asic->hw_queues_props[i].type; + rc = hw_queue_init(hdev, q, i); + if (rc) { + dev_err(hdev->dev, + "failed to initialize queue %d\n", i); + goto release_queues; + } + } + + return 0; + +release_queues: + for (i = 0, q = hdev->kernel_queues ; i < q_ready_cnt ; i++, q++) + hw_queue_fini(hdev, q); + + kfree(hdev->kernel_queues); + + return rc; +} + +void hl_hw_queues_destroy(struct hl_device *hdev) +{ + struct hl_hw_queue *q; + int i; + + for (i = 0, q = hdev->kernel_queues ; i < HL_MAX_QUEUES ; i++, q++) + hw_queue_fini(hdev, q); + + kfree(hdev->kernel_queues); +} + +void hl_hw_queue_reset(struct hl_device *hdev, bool hard_reset) +{ + struct hl_hw_queue *q; + int i; + + for (i = 0, q = hdev->kernel_queues ; i < HL_MAX_QUEUES ; i++, q++) { + if ((!q->valid) || + ((!hard_reset) && (q->queue_type == QUEUE_TYPE_CPU))) + continue; + q->pi = q->ci = 0; + } +} diff --git a/drivers/misc/habanalabs/hwmon.c b/drivers/misc/habanalabs/hwmon.c new file mode 100644 index 000000000000..77facd25c4a2 --- /dev/null +++ b/drivers/misc/habanalabs/hwmon.c @@ -0,0 +1,458 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "habanalabs.h" + +#include +#include + +#define SENSORS_PKT_TIMEOUT 1000000 /* 1s */ +#define HWMON_NR_SENSOR_TYPES (hwmon_pwm + 1) + +int hl_build_hwmon_channel_info(struct hl_device *hdev, + struct armcp_sensor *sensors_arr) +{ + u32 counts[HWMON_NR_SENSOR_TYPES] = {0}; + u32 *sensors_by_type[HWMON_NR_SENSOR_TYPES] = {NULL}; + u32 sensors_by_type_next_index[HWMON_NR_SENSOR_TYPES] = {0}; + struct hwmon_channel_info **channels_info; + u32 num_sensors_for_type, num_active_sensor_types = 0, + arr_size = 0, *curr_arr; + enum hwmon_sensor_types type; + int rc, i, j; + + for (i = 0 ; i < ARMCP_MAX_SENSORS ; i++) { + type = __le32_to_cpu(sensors_arr[i].type); + + if ((type == 0) && (sensors_arr[i].flags == 0)) + break; + + if (type >= HWMON_NR_SENSOR_TYPES) { + dev_err(hdev->dev, + "Got wrong sensor type %d from device\n", type); + return -EINVAL; + } + + counts[type]++; + arr_size++; + } + + for (i = 0 ; i < HWMON_NR_SENSOR_TYPES ; i++) { + if (counts[i] == 0) + continue; + + num_sensors_for_type = counts[i] + 1; + curr_arr = kcalloc(num_sensors_for_type, sizeof(*curr_arr), + GFP_KERNEL); + if (!curr_arr) { + rc = -ENOMEM; + goto sensors_type_err; + } + + num_active_sensor_types++; + sensors_by_type[i] = curr_arr; + } + + for (i = 0 ; i < arr_size ; i++) { + type = __le32_to_cpu(sensors_arr[i].type); + curr_arr = sensors_by_type[type]; + curr_arr[sensors_by_type_next_index[type]++] = + __le32_to_cpu(sensors_arr[i].flags); + } + + channels_info = kcalloc(num_active_sensor_types + 1, + sizeof(*channels_info), GFP_KERNEL); + if (!channels_info) { + rc = -ENOMEM; + goto channels_info_array_err; + } + + for (i = 0 ; i < num_active_sensor_types ; i++) { + channels_info[i] = kzalloc(sizeof(*channels_info[i]), + GFP_KERNEL); + if (!channels_info[i]) { + rc = -ENOMEM; + goto channel_info_err; + } + } + + for (i = 0, j = 0 ; i < HWMON_NR_SENSOR_TYPES ; i++) { + if (!sensors_by_type[i]) + continue; + + channels_info[j]->type = i; + channels_info[j]->config = sensors_by_type[i]; + j++; + } + + hdev->hl_chip_info->info = + (const struct hwmon_channel_info **)channels_info; + + return 0; + +channel_info_err: + for (i = 0 ; i < num_active_sensor_types ; i++) + if (channels_info[i]) { + kfree(channels_info[i]->config); + kfree(channels_info[i]); + } + kfree(channels_info); +channels_info_array_err: +sensors_type_err: + for (i = 0 ; i < HWMON_NR_SENSOR_TYPES ; i++) + kfree(sensors_by_type[i]); + + return rc; +} + +static int hl_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + + if (hl_device_disabled_or_in_reset(hdev)) + return -ENODEV; + + switch (type) { + case hwmon_temp: + switch (attr) { + case hwmon_temp_input: + case hwmon_temp_max: + case hwmon_temp_crit: + case hwmon_temp_max_hyst: + case hwmon_temp_crit_hyst: + break; + default: + return -EINVAL; + } + + *val = hl_get_temperature(hdev, channel, attr); + break; + case hwmon_in: + switch (attr) { + case hwmon_in_input: + case hwmon_in_min: + case hwmon_in_max: + break; + default: + return -EINVAL; + } + + *val = hl_get_voltage(hdev, channel, attr); + break; + case hwmon_curr: + switch (attr) { + case hwmon_curr_input: + case hwmon_curr_min: + case hwmon_curr_max: + break; + default: + return -EINVAL; + } + + *val = hl_get_current(hdev, channel, attr); + break; + case hwmon_fan: + switch (attr) { + case hwmon_fan_input: + case hwmon_fan_min: + case hwmon_fan_max: + break; + default: + return -EINVAL; + } + *val = hl_get_fan_speed(hdev, channel, attr); + break; + case hwmon_pwm: + switch (attr) { + case hwmon_pwm_input: + case hwmon_pwm_enable: + break; + default: + return -EINVAL; + } + *val = hl_get_pwm_info(hdev, channel, attr); + break; + default: + return -EINVAL; + } + return 0; +} + +static int hl_write(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long val) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + + if (hl_device_disabled_or_in_reset(hdev)) + return -ENODEV; + + switch (type) { + case hwmon_pwm: + switch (attr) { + case hwmon_pwm_input: + case hwmon_pwm_enable: + break; + default: + return -EINVAL; + } + hl_set_pwm_info(hdev, channel, attr, val); + break; + default: + return -EINVAL; + } + return 0; +} + +static umode_t hl_is_visible(const void *data, enum hwmon_sensor_types type, + u32 attr, int channel) +{ + switch (type) { + case hwmon_temp: + switch (attr) { + case hwmon_temp_input: + case hwmon_temp_max: + case hwmon_temp_max_hyst: + case hwmon_temp_crit: + case hwmon_temp_crit_hyst: + return 0444; + } + break; + case hwmon_in: + switch (attr) { + case hwmon_in_input: + case hwmon_in_min: + case hwmon_in_max: + return 0444; + } + break; + case hwmon_curr: + switch (attr) { + case hwmon_curr_input: + case hwmon_curr_min: + case hwmon_curr_max: + return 0444; + } + break; + case hwmon_fan: + switch (attr) { + case hwmon_fan_input: + case hwmon_fan_min: + case hwmon_fan_max: + return 0444; + } + break; + case hwmon_pwm: + switch (attr) { + case hwmon_pwm_input: + case hwmon_pwm_enable: + return 0644; + } + break; + default: + break; + } + return 0; +} + +static const struct hwmon_ops hl_hwmon_ops = { + .is_visible = hl_is_visible, + .read = hl_read, + .write = hl_write +}; + +long hl_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr) +{ + struct armcp_packet pkt; + long result; + int rc; + + memset(&pkt, 0, sizeof(pkt)); + + pkt.ctl = __cpu_to_le32(ARMCP_PACKET_TEMPERATURE_GET << + ARMCP_PKT_CTL_OPCODE_SHIFT); + pkt.sensor_index = __cpu_to_le16(sensor_index); + pkt.type = __cpu_to_le16(attr); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + SENSORS_PKT_TIMEOUT, &result); + + if (rc) { + dev_err(hdev->dev, + "Failed to get temperature from sensor %d, error %d\n", + sensor_index, rc); + result = 0; + } + + return result; +} + +long hl_get_voltage(struct hl_device *hdev, int sensor_index, u32 attr) +{ + struct armcp_packet pkt; + long result; + int rc; + + memset(&pkt, 0, sizeof(pkt)); + + pkt.ctl = __cpu_to_le32(ARMCP_PACKET_VOLTAGE_GET << + ARMCP_PKT_CTL_OPCODE_SHIFT); + pkt.sensor_index = __cpu_to_le16(sensor_index); + pkt.type = __cpu_to_le16(attr); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + SENSORS_PKT_TIMEOUT, &result); + + if (rc) { + dev_err(hdev->dev, + "Failed to get voltage from sensor %d, error %d\n", + sensor_index, rc); + result = 0; + } + + return result; +} + +long hl_get_current(struct hl_device *hdev, int sensor_index, u32 attr) +{ + struct armcp_packet pkt; + long result; + int rc; + + memset(&pkt, 0, sizeof(pkt)); + + pkt.ctl = __cpu_to_le32(ARMCP_PACKET_CURRENT_GET << + ARMCP_PKT_CTL_OPCODE_SHIFT); + pkt.sensor_index = __cpu_to_le16(sensor_index); + pkt.type = __cpu_to_le16(attr); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + SENSORS_PKT_TIMEOUT, &result); + + if (rc) { + dev_err(hdev->dev, + "Failed to get current from sensor %d, error %d\n", + sensor_index, rc); + result = 0; + } + + return result; +} + +long hl_get_fan_speed(struct hl_device *hdev, int sensor_index, u32 attr) +{ + struct armcp_packet pkt; + long result; + int rc; + + memset(&pkt, 0, sizeof(pkt)); + + pkt.ctl = __cpu_to_le32(ARMCP_PACKET_FAN_SPEED_GET << + ARMCP_PKT_CTL_OPCODE_SHIFT); + pkt.sensor_index = __cpu_to_le16(sensor_index); + pkt.type = __cpu_to_le16(attr); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + SENSORS_PKT_TIMEOUT, &result); + + if (rc) { + dev_err(hdev->dev, + "Failed to get fan speed from sensor %d, error %d\n", + sensor_index, rc); + result = 0; + } + + return result; +} + +long hl_get_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr) +{ + struct armcp_packet pkt; + long result; + int rc; + + memset(&pkt, 0, sizeof(pkt)); + + pkt.ctl = __cpu_to_le32(ARMCP_PACKET_PWM_GET << + ARMCP_PKT_CTL_OPCODE_SHIFT); + pkt.sensor_index = __cpu_to_le16(sensor_index); + pkt.type = __cpu_to_le16(attr); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + SENSORS_PKT_TIMEOUT, &result); + + if (rc) { + dev_err(hdev->dev, + "Failed to get pwm info from sensor %d, error %d\n", + sensor_index, rc); + result = 0; + } + + return result; +} + +void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr, + long value) +{ + struct armcp_packet pkt; + int rc; + + memset(&pkt, 0, sizeof(pkt)); + + pkt.ctl = __cpu_to_le32(ARMCP_PACKET_PWM_SET << + ARMCP_PKT_CTL_OPCODE_SHIFT); + pkt.sensor_index = __cpu_to_le16(sensor_index); + pkt.type = __cpu_to_le16(attr); + pkt.value = __cpu_to_le64(value); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + SENSORS_PKT_TIMEOUT, NULL); + + if (rc) + dev_err(hdev->dev, + "Failed to set pwm info to sensor %d, error %d\n", + sensor_index, rc); +} + +int hl_hwmon_init(struct hl_device *hdev) +{ + struct device *dev = hdev->pdev ? &hdev->pdev->dev : hdev->dev; + int rc; + + if ((hdev->hwmon_initialized) || !(hdev->fw_loading)) + return 0; + + if (hdev->hl_chip_info->info) { + hdev->hl_chip_info->ops = &hl_hwmon_ops; + + hdev->hwmon_dev = hwmon_device_register_with_info(dev, + "habanalabs", hdev, hdev->hl_chip_info, NULL); + if (IS_ERR(hdev->hwmon_dev)) { + rc = PTR_ERR(hdev->hwmon_dev); + dev_err(hdev->dev, + "Unable to register hwmon device: %d\n", rc); + return rc; + } + + dev_info(hdev->dev, "%s: add sensors information\n", + dev_name(hdev->hwmon_dev)); + + hdev->hwmon_initialized = true; + } else { + dev_info(hdev->dev, "no available sensors\n"); + } + + return 0; +} + +void hl_hwmon_fini(struct hl_device *hdev) +{ + if (!hdev->hwmon_initialized) + return; + + hwmon_device_unregister(hdev->hwmon_dev); +} diff --git a/drivers/misc/habanalabs/include/armcp_if.h b/drivers/misc/habanalabs/include/armcp_if.h new file mode 100644 index 000000000000..9dddb917e72c --- /dev/null +++ b/drivers/misc/habanalabs/include/armcp_if.h @@ -0,0 +1,335 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef ARMCP_IF_H +#define ARMCP_IF_H + +#include + +/* + * EVENT QUEUE + */ + +struct hl_eq_header { + __le32 reserved; + __le32 ctl; +}; + +struct hl_eq_entry { + struct hl_eq_header hdr; + __le64 data[7]; +}; + +#define HL_EQ_ENTRY_SIZE sizeof(struct hl_eq_entry) + +#define EQ_CTL_READY_SHIFT 31 +#define EQ_CTL_READY_MASK 0x80000000 + +#define EQ_CTL_EVENT_TYPE_SHIFT 16 +#define EQ_CTL_EVENT_TYPE_MASK 0x03FF0000 + +#define EVENT_QUEUE_MSIX_IDX 5 + +enum pq_init_status { + PQ_INIT_STATUS_NA = 0, + PQ_INIT_STATUS_READY_FOR_CP, + PQ_INIT_STATUS_READY_FOR_HOST +}; + +/* + * ArmCP Primary Queue Packets + * + * During normal operation, KMD needs to send various messages to ArmCP, + * usually either to SET some value into a H/W periphery or to GET the current + * value of some H/W periphery. For example, SET the frequency of MME/TPC and + * GET the value of the thermal sensor. + * + * These messages can be initiated either by the User application or by KMD + * itself, e.g. power management code. In either case, the communication from + * KMD to ArmCP will *always* be in synchronous mode, meaning that KMD will + * send a single message and poll until the message was acknowledged and the + * results are ready (if results are needed). + * + * This means that only a single message can be sent at a time and KMD must + * wait for its result before sending the next message. Having said that, + * because these are control messages which are sent in a relatively low + * frequency, this limitation seems acceptable. It's important to note that + * in case of multiple devices, messages to different devices *can* be sent + * at the same time. + * + * The message, inputs/outputs (if relevant) and fence object will be located + * on the device DDR at an address that will be determined by KMD. During + * device initialization phase, KMD will pass to ArmCP that address. Most of + * the message types will contain inputs/outputs inside the message itself. + * The common part of each message will contain the opcode of the message (its + * type) and a field representing a fence object. + * + * When KMD wishes to send a message to ArmCP, it will write the message + * contents to the device DDR, clear the fence object and then write the + * value 484 to the mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR register to issue + * the 484 interrupt-id to the ARM core. + * + * Upon receiving the 484 interrupt-id, ArmCP will read the message from the + * DDR. In case the message is a SET operation, ArmCP will first perform the + * operation and then write to the fence object on the device DDR. In case the + * message is a GET operation, ArmCP will first fill the results section on the + * device DDR and then write to the fence object. If an error occurred, ArmCP + * will fill the rc field with the right error code. + * + * In the meantime, KMD will poll on the fence object. Once KMD sees that the + * fence object is signaled, it will read the results from the device DDR + * (if relevant) and resume the code execution in KMD. + * + * To use QMAN packets, the opcode must be the QMAN opcode, shifted by 8 + * so the value being put by the KMD matches the value read by ArmCP + * + * Non-QMAN packets should be limited to values 1 through (2^8 - 1) + * + * Detailed description: + * + * ARMCP_PACKET_DISABLE_PCI_ACCESS - + * After receiving this packet the embedded CPU must NOT issue PCI + * transactions (read/write) towards the Host CPU. This also include + * sending MSI-X interrupts. + * This packet is usually sent before the device is moved to D3Hot state. + * + * ARMCP_PACKET_ENABLE_PCI_ACCESS - + * After receiving this packet the embedded CPU is allowed to issue PCI + * transactions towards the Host CPU, including sending MSI-X interrupts. + * This packet is usually send after the device is moved to D0 state. + * + * ARMCP_PACKET_TEMPERATURE_GET - + * Fetch the current temperature / Max / Max Hyst / Critical / + * Critical Hyst of a specified thermal sensor. The packet's + * arguments specify the desired sensor and the field to get. + * + * ARMCP_PACKET_VOLTAGE_GET - + * Fetch the voltage / Max / Min of a specified sensor. The packet's + * arguments specify the sensor and type. + * + * ARMCP_PACKET_CURRENT_GET - + * Fetch the current / Max / Min of a specified sensor. The packet's + * arguments specify the sensor and type. + * + * ARMCP_PACKET_FAN_SPEED_GET - + * Fetch the speed / Max / Min of a specified fan. The packet's + * arguments specify the sensor and type. + * + * ARMCP_PACKET_PWM_GET - + * Fetch the pwm value / mode of a specified pwm. The packet's + * arguments specify the sensor and type. + * + * ARMCP_PACKET_PWM_SET - + * Set the pwm value / mode of a specified pwm. The packet's + * arguments specify the sensor, type and value. + * + * ARMCP_PACKET_FREQUENCY_SET - + * Set the frequency of a specified PLL. The packet's arguments specify + * the PLL and the desired frequency. The actual frequency in the device + * might differ from the requested frequency. + * + * ARMCP_PACKET_FREQUENCY_GET - + * Fetch the frequency of a specified PLL. The packet's arguments specify + * the PLL. + * + * ARMCP_PACKET_LED_SET - + * Set the state of a specified led. The packet's arguments + * specify the led and the desired state. + * + * ARMCP_PACKET_I2C_WR - + * Write 32-bit value to I2C device. The packet's arguments specify the + * I2C bus, address and value. + * + * ARMCP_PACKET_I2C_RD - + * Read 32-bit value from I2C device. The packet's arguments specify the + * I2C bus and address. + * + * ARMCP_PACKET_INFO_GET - + * Fetch information from the device as specified in the packet's + * structure. KMD passes the max size it allows the ArmCP to write to + * the structure, to prevent data corruption in case of mismatched + * KMD/FW versions. + * + * ARMCP_PACKET_FLASH_PROGRAM_REMOVED - this packet was removed + * + * ARMCP_PACKET_UNMASK_RAZWI_IRQ - + * Unmask the given IRQ. The IRQ number is specified in the value field. + * The packet is sent after receiving an interrupt and printing its + * relevant information. + * + * ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY - + * Unmask the given IRQs. The IRQs numbers are specified in an array right + * after the armcp_packet structure, where its first element is the array + * length. The packet is sent after a soft reset was done in order to + * handle any interrupts that were sent during the reset process. + * + * ARMCP_PACKET_TEST - + * Test packet for ArmCP connectivity. The CPU will put the fence value + * in the result field. + * + * ARMCP_PACKET_FREQUENCY_CURR_GET - + * Fetch the current frequency of a specified PLL. The packet's arguments + * specify the PLL. + * + * ARMCP_PACKET_MAX_POWER_GET - + * Fetch the maximal power of the device. + * + * ARMCP_PACKET_MAX_POWER_SET - + * Set the maximal power of the device. The packet's arguments specify + * the power. + * + * ARMCP_PACKET_EEPROM_DATA_GET - + * Get EEPROM data from the ArmCP kernel. The buffer is specified in the + * addr field. The CPU will put the returned data size in the result + * field. In addition, KMD passes the max size it allows the ArmCP to + * write to the structure, to prevent data corruption in case of + * mismatched KMD/FW versions. + * + */ + +enum armcp_packet_id { + ARMCP_PACKET_DISABLE_PCI_ACCESS = 1, /* internal */ + ARMCP_PACKET_ENABLE_PCI_ACCESS, /* internal */ + ARMCP_PACKET_TEMPERATURE_GET, /* sysfs */ + ARMCP_PACKET_VOLTAGE_GET, /* sysfs */ + ARMCP_PACKET_CURRENT_GET, /* sysfs */ + ARMCP_PACKET_FAN_SPEED_GET, /* sysfs */ + ARMCP_PACKET_PWM_GET, /* sysfs */ + ARMCP_PACKET_PWM_SET, /* sysfs */ + ARMCP_PACKET_FREQUENCY_SET, /* sysfs */ + ARMCP_PACKET_FREQUENCY_GET, /* sysfs */ + ARMCP_PACKET_LED_SET, /* debugfs */ + ARMCP_PACKET_I2C_WR, /* debugfs */ + ARMCP_PACKET_I2C_RD, /* debugfs */ + ARMCP_PACKET_INFO_GET, /* IOCTL */ + ARMCP_PACKET_FLASH_PROGRAM_REMOVED, + ARMCP_PACKET_UNMASK_RAZWI_IRQ, /* internal */ + ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY, /* internal */ + ARMCP_PACKET_TEST, /* internal */ + ARMCP_PACKET_FREQUENCY_CURR_GET, /* sysfs */ + ARMCP_PACKET_MAX_POWER_GET, /* sysfs */ + ARMCP_PACKET_MAX_POWER_SET, /* sysfs */ + ARMCP_PACKET_EEPROM_DATA_GET, /* sysfs */ +}; + +#define ARMCP_PACKET_FENCE_VAL 0xFE8CE7A5 + +#define ARMCP_PKT_CTL_RC_SHIFT 12 +#define ARMCP_PKT_CTL_RC_MASK 0x0000F000 + +#define ARMCP_PKT_CTL_OPCODE_SHIFT 16 +#define ARMCP_PKT_CTL_OPCODE_MASK 0x1FFF0000 + +struct armcp_packet { + union { + __le64 value; /* For SET packets */ + __le64 result; /* For GET packets */ + __le64 addr; /* For PQ */ + }; + + __le32 ctl; + + __le32 fence; /* Signal to KMD that message is completed */ + + union { + struct {/* For temperature/current/voltage/fan/pwm get/set */ + __le16 sensor_index; + __le16 type; + }; + + struct { /* For I2C read/write */ + __u8 i2c_bus; + __u8 i2c_addr; + __u8 i2c_reg; + __u8 pad; /* unused */ + }; + + /* For frequency get/set */ + __le32 pll_index; + + /* For led set */ + __le32 led_index; + + /* For get Armcp info/EEPROM data */ + __le32 data_max_size; + }; +}; + +struct armcp_unmask_irq_arr_packet { + struct armcp_packet armcp_pkt; + __le32 length; + __le32 irqs[0]; +}; + +enum armcp_packet_rc { + armcp_packet_success, + armcp_packet_invalid, + armcp_packet_fault +}; + +enum armcp_temp_type { + armcp_temp_input, + armcp_temp_max = 6, + armcp_temp_max_hyst, + armcp_temp_crit, + armcp_temp_crit_hyst +}; + +enum armcp_in_attributes { + armcp_in_input, + armcp_in_min, + armcp_in_max +}; + +enum armcp_curr_attributes { + armcp_curr_input, + armcp_curr_min, + armcp_curr_max +}; + +enum armcp_fan_attributes { + armcp_fan_input, + armcp_fan_min = 2, + armcp_fan_max +}; + +enum armcp_pwm_attributes { + armcp_pwm_input, + armcp_pwm_enable +}; + +/* Event Queue Packets */ + +struct eq_generic_event { + __le64 data[7]; +}; + +/* + * ArmCP info + */ + +#define VERSION_MAX_LEN 128 +#define ARMCP_MAX_SENSORS 128 + +struct armcp_sensor { + __le32 type; + __le32 flags; +}; + +struct armcp_info { + struct armcp_sensor sensors[ARMCP_MAX_SENSORS]; + __u8 kernel_version[VERSION_MAX_LEN]; + __le32 reserved[3]; + __le32 cpld_version; + __le32 infineon_version; + __u8 fuse_version[VERSION_MAX_LEN]; + __u8 thermal_version[VERSION_MAX_LEN]; + __u8 armcp_version[VERSION_MAX_LEN]; + __le64 dram_size; +}; + +#endif /* ARMCP_IF_H */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h new file mode 100644 index 000000000000..2cf5c46b6e8e --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h @@ -0,0 +1,191 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_CPU_CA53_CFG_MASKS_H_ +#define ASIC_REG_CPU_CA53_CFG_MASKS_H_ + +/* + ***************************************** + * CPU_CA53_CFG (Prototype: CA53_CFG) + ***************************************** + */ + +/* CPU_CA53_CFG_ARM_CFG */ +#define CPU_CA53_CFG_ARM_CFG_AA64NAA32_SHIFT 0 +#define CPU_CA53_CFG_ARM_CFG_AA64NAA32_MASK 0x3 +#define CPU_CA53_CFG_ARM_CFG_END_SHIFT 4 +#define CPU_CA53_CFG_ARM_CFG_END_MASK 0x30 +#define CPU_CA53_CFG_ARM_CFG_TE_SHIFT 8 +#define CPU_CA53_CFG_ARM_CFG_TE_MASK 0x300 +#define CPU_CA53_CFG_ARM_CFG_VINITHI_SHIFT 12 +#define CPU_CA53_CFG_ARM_CFG_VINITHI_MASK 0x3000 + +/* CPU_CA53_CFG_RST_ADDR_LSB */ +#define CPU_CA53_CFG_RST_ADDR_LSB_VECTOR_SHIFT 0 +#define CPU_CA53_CFG_RST_ADDR_LSB_VECTOR_MASK 0xFFFFFFFF + +/* CPU_CA53_CFG_RST_ADDR_MSB */ +#define CPU_CA53_CFG_RST_ADDR_MSB_VECTOR_SHIFT 0 +#define CPU_CA53_CFG_RST_ADDR_MSB_VECTOR_MASK 0xFF + +/* CPU_CA53_CFG_ARM_RST_CONTROL */ +#define CPU_CA53_CFG_ARM_RST_CONTROL_NCPUPORESET_SHIFT 0 +#define CPU_CA53_CFG_ARM_RST_CONTROL_NCPUPORESET_MASK 0x3 +#define CPU_CA53_CFG_ARM_RST_CONTROL_NCORERESET_SHIFT 4 +#define CPU_CA53_CFG_ARM_RST_CONTROL_NCORERESET_MASK 0x30 +#define CPU_CA53_CFG_ARM_RST_CONTROL_NL2RESET_SHIFT 8 +#define CPU_CA53_CFG_ARM_RST_CONTROL_NL2RESET_MASK 0x100 +#define CPU_CA53_CFG_ARM_RST_CONTROL_NPRESETDBG_SHIFT 12 +#define CPU_CA53_CFG_ARM_RST_CONTROL_NPRESETDBG_MASK 0x1000 +#define CPU_CA53_CFG_ARM_RST_CONTROL_NMBISTRESET_SHIFT 16 +#define CPU_CA53_CFG_ARM_RST_CONTROL_NMBISTRESET_MASK 0x10000 +#define CPU_CA53_CFG_ARM_RST_CONTROL_WARMRSTREQ_SHIFT 20 +#define CPU_CA53_CFG_ARM_RST_CONTROL_WARMRSTREQ_MASK 0x300000 + +/* CPU_CA53_CFG_ARM_AFFINITY */ +#define CPU_CA53_CFG_ARM_AFFINITY_LEVEL_1_SHIFT 0 +#define CPU_CA53_CFG_ARM_AFFINITY_LEVEL_1_MASK 0xFF +#define CPU_CA53_CFG_ARM_AFFINITY_LEVEL_2_SHIFT 8 +#define CPU_CA53_CFG_ARM_AFFINITY_LEVEL_2_MASK 0xFF00 + +/* CPU_CA53_CFG_ARM_DISABLE */ +#define CPU_CA53_CFG_ARM_DISABLE_CP15S_SHIFT 0 +#define CPU_CA53_CFG_ARM_DISABLE_CP15S_MASK 0x3 +#define CPU_CA53_CFG_ARM_DISABLE_CRYPTO_SHIFT 4 +#define CPU_CA53_CFG_ARM_DISABLE_CRYPTO_MASK 0x30 +#define CPU_CA53_CFG_ARM_DISABLE_L2_RST_SHIFT 8 +#define CPU_CA53_CFG_ARM_DISABLE_L2_RST_MASK 0x100 +#define CPU_CA53_CFG_ARM_DISABLE_DBG_L1_RST_SHIFT 9 +#define CPU_CA53_CFG_ARM_DISABLE_DBG_L1_RST_MASK 0x200 + +/* CPU_CA53_CFG_ARM_GIC_PERIPHBASE */ +#define CPU_CA53_CFG_ARM_GIC_PERIPHBASE_PERIPHBASE_SHIFT 0 +#define CPU_CA53_CFG_ARM_GIC_PERIPHBASE_PERIPHBASE_MASK 0x3FFFFF + +/* CPU_CA53_CFG_ARM_GIC_IRQ_CFG */ +#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NREI_SHIFT 0 +#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NREI_MASK 0x3 +#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NSEI_SHIFT 4 +#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NSEI_MASK 0x30 +#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NIRQ_SHIFT 8 +#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NIRQ_MASK 0x300 +#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NFIQ_SHIFT 12 +#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NFIQ_MASK 0x3000 +#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NVFIQ_SHIFT 16 +#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NVFIQ_MASK 0x30000 +#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NVIRQ_SHIFT 20 +#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NVIRQ_MASK 0x300000 +#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NVSEI_SHIFT 24 +#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_NVSEI_MASK 0x3000000 +#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_GIC_EN_SHIFT 31 +#define CPU_CA53_CFG_ARM_GIC_IRQ_CFG_GIC_EN_MASK 0x80000000 + +/* CPU_CA53_CFG_ARM_PWR_MNG */ +#define CPU_CA53_CFG_ARM_PWR_MNG_CLREXMONREQ_SHIFT 0 +#define CPU_CA53_CFG_ARM_PWR_MNG_CLREXMONREQ_MASK 0x1 +#define CPU_CA53_CFG_ARM_PWR_MNG_EVENTI_SHIFT 1 +#define CPU_CA53_CFG_ARM_PWR_MNG_EVENTI_MASK 0x2 +#define CPU_CA53_CFG_ARM_PWR_MNG_L2FLUSHREQ_SHIFT 2 +#define CPU_CA53_CFG_ARM_PWR_MNG_L2FLUSHREQ_MASK 0x4 +#define CPU_CA53_CFG_ARM_PWR_MNG_L2QREQN_SHIFT 3 +#define CPU_CA53_CFG_ARM_PWR_MNG_L2QREQN_MASK 0x8 +#define CPU_CA53_CFG_ARM_PWR_MNG_CPUQREQN_SHIFT 4 +#define CPU_CA53_CFG_ARM_PWR_MNG_CPUQREQN_MASK 0x30 +#define CPU_CA53_CFG_ARM_PWR_MNG_NEONQREQN_SHIFT 8 +#define CPU_CA53_CFG_ARM_PWR_MNG_NEONQREQN_MASK 0x300 +#define CPU_CA53_CFG_ARM_PWR_MNG_DBGPWRDUP_SHIFT 12 +#define CPU_CA53_CFG_ARM_PWR_MNG_DBGPWRDUP_MASK 0x3000 + +/* CPU_CA53_CFG_ARB_DBG_ROM_ADDR */ +#define CPU_CA53_CFG_ARB_DBG_ROM_ADDR_DEBUG_ROM_BASE_ADDR_SHIFT 0 +#define CPU_CA53_CFG_ARB_DBG_ROM_ADDR_DEBUG_ROM_BASE_ADDR_MASK 0xFFFFFFF +#define CPU_CA53_CFG_ARB_DBG_ROM_ADDR_DEBUG_ROM_BASE_ADDR_VALID_SHIFT 31 +#define CPU_CA53_CFG_ARB_DBG_ROM_ADDR_DEBUG_ROM_BASE_ADDR_VALID_MASK 0x80000000 + +/* CPU_CA53_CFG_ARM_DBG_MODES */ +#define CPU_CA53_CFG_ARM_DBG_MODES_EDBGRQ_SHIFT 0 +#define CPU_CA53_CFG_ARM_DBG_MODES_EDBGRQ_MASK 0x3 +#define CPU_CA53_CFG_ARM_DBG_MODES_DBGEN_SHIFT 4 +#define CPU_CA53_CFG_ARM_DBG_MODES_DBGEN_MASK 0x30 +#define CPU_CA53_CFG_ARM_DBG_MODES_NIDEN_SHIFT 8 +#define CPU_CA53_CFG_ARM_DBG_MODES_NIDEN_MASK 0x300 +#define CPU_CA53_CFG_ARM_DBG_MODES_SPIDEN_SHIFT 12 +#define CPU_CA53_CFG_ARM_DBG_MODES_SPIDEN_MASK 0x3000 +#define CPU_CA53_CFG_ARM_DBG_MODES_SPNIDEN_SHIFT 16 +#define CPU_CA53_CFG_ARM_DBG_MODES_SPNIDEN_MASK 0x30000 + +/* CPU_CA53_CFG_ARM_PWR_STAT_0 */ +#define CPU_CA53_CFG_ARM_PWR_STAT_0_CLREXMONACK_SHIFT 0 +#define CPU_CA53_CFG_ARM_PWR_STAT_0_CLREXMONACK_MASK 0x1 +#define CPU_CA53_CFG_ARM_PWR_STAT_0_EVENTO_SHIFT 1 +#define CPU_CA53_CFG_ARM_PWR_STAT_0_EVENTO_MASK 0x2 +#define CPU_CA53_CFG_ARM_PWR_STAT_0_STANDBYWFI_SHIFT 4 +#define CPU_CA53_CFG_ARM_PWR_STAT_0_STANDBYWFI_MASK 0x30 +#define CPU_CA53_CFG_ARM_PWR_STAT_0_STANDBYWFE_SHIFT 8 +#define CPU_CA53_CFG_ARM_PWR_STAT_0_STANDBYWFE_MASK 0x300 +#define CPU_CA53_CFG_ARM_PWR_STAT_0_STANDBYWFIL2_SHIFT 12 +#define CPU_CA53_CFG_ARM_PWR_STAT_0_STANDBYWFIL2_MASK 0x1000 +#define CPU_CA53_CFG_ARM_PWR_STAT_0_L2FLUSHDONE_SHIFT 13 +#define CPU_CA53_CFG_ARM_PWR_STAT_0_L2FLUSHDONE_MASK 0x2000 +#define CPU_CA53_CFG_ARM_PWR_STAT_0_SMPEN_SHIFT 16 +#define CPU_CA53_CFG_ARM_PWR_STAT_0_SMPEN_MASK 0x30000 + +/* CPU_CA53_CFG_ARM_PWR_STAT_1 */ +#define CPU_CA53_CFG_ARM_PWR_STAT_1_CPUQACTIVE_SHIFT 0 +#define CPU_CA53_CFG_ARM_PWR_STAT_1_CPUQACTIVE_MASK 0x3 +#define CPU_CA53_CFG_ARM_PWR_STAT_1_CPUQDENY_SHIFT 4 +#define CPU_CA53_CFG_ARM_PWR_STAT_1_CPUQDENY_MASK 0x30 +#define CPU_CA53_CFG_ARM_PWR_STAT_1_CPUQACCEPTN_SHIFT 8 +#define CPU_CA53_CFG_ARM_PWR_STAT_1_CPUQACCEPTN_MASK 0x300 +#define CPU_CA53_CFG_ARM_PWR_STAT_1_NEONQACTIVE_SHIFT 12 +#define CPU_CA53_CFG_ARM_PWR_STAT_1_NEONQACTIVE_MASK 0x3000 +#define CPU_CA53_CFG_ARM_PWR_STAT_1_NEONQDENY_SHIFT 16 +#define CPU_CA53_CFG_ARM_PWR_STAT_1_NEONQDENY_MASK 0x30000 +#define CPU_CA53_CFG_ARM_PWR_STAT_1_NEONQACCEPTN_SHIFT 20 +#define CPU_CA53_CFG_ARM_PWR_STAT_1_NEONQACCEPTN_MASK 0x300000 +#define CPU_CA53_CFG_ARM_PWR_STAT_1_L2QACTIVE_SHIFT 24 +#define CPU_CA53_CFG_ARM_PWR_STAT_1_L2QACTIVE_MASK 0x1000000 +#define CPU_CA53_CFG_ARM_PWR_STAT_1_L2QDENY_SHIFT 25 +#define CPU_CA53_CFG_ARM_PWR_STAT_1_L2QDENY_MASK 0x2000000 +#define CPU_CA53_CFG_ARM_PWR_STAT_1_L2QACCEPTN_SHIFT 26 +#define CPU_CA53_CFG_ARM_PWR_STAT_1_L2QACCEPTN_MASK 0x4000000 + +/* CPU_CA53_CFG_ARM_DBG_STATUS */ +#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGACK_SHIFT 0 +#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGACK_MASK 0x3 +#define CPU_CA53_CFG_ARM_DBG_STATUS_COMMRX_SHIFT 4 +#define CPU_CA53_CFG_ARM_DBG_STATUS_COMMRX_MASK 0x30 +#define CPU_CA53_CFG_ARM_DBG_STATUS_COMMTX_SHIFT 8 +#define CPU_CA53_CFG_ARM_DBG_STATUS_COMMTX_MASK 0x300 +#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGRSTREQ_SHIFT 12 +#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGRSTREQ_MASK 0x3000 +#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGNOPWRDWN_SHIFT 16 +#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGNOPWRDWN_MASK 0x30000 +#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGPWRUPREQ_SHIFT 20 +#define CPU_CA53_CFG_ARM_DBG_STATUS_DBGPWRUPREQ_MASK 0x300000 + +/* CPU_CA53_CFG_ARM_MEM_ATTR */ +#define CPU_CA53_CFG_ARM_MEM_ATTR_RDMEMATTR_SHIFT 0 +#define CPU_CA53_CFG_ARM_MEM_ATTR_RDMEMATTR_MASK 0xFF +#define CPU_CA53_CFG_ARM_MEM_ATTR_WRMEMATTR_SHIFT 8 +#define CPU_CA53_CFG_ARM_MEM_ATTR_WRMEMATTR_MASK 0xFF00 +#define CPU_CA53_CFG_ARM_MEM_ATTR_RACKM_SHIFT 16 +#define CPU_CA53_CFG_ARM_MEM_ATTR_RACKM_MASK 0x10000 +#define CPU_CA53_CFG_ARM_MEM_ATTR_WACKM_SHIFT 20 +#define CPU_CA53_CFG_ARM_MEM_ATTR_WACKM_MASK 0x100000 + +/* CPU_CA53_CFG_ARM_PMU */ +#define CPU_CA53_CFG_ARM_PMU_EVENT_SHIFT 0 +#define CPU_CA53_CFG_ARM_PMU_EVENT_MASK 0x3FFFFFFF + +#endif /* ASIC_REG_CPU_CA53_CFG_MASKS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h new file mode 100644 index 000000000000..840ccffa1081 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_CPU_CA53_CFG_REGS_H_ +#define ASIC_REG_CPU_CA53_CFG_REGS_H_ + +/* + ***************************************** + * CPU_CA53_CFG (Prototype: CA53_CFG) + ***************************************** + */ + +#define mmCPU_CA53_CFG_ARM_CFG 0x441100 + +#define mmCPU_CA53_CFG_RST_ADDR_LSB_0 0x441104 + +#define mmCPU_CA53_CFG_RST_ADDR_LSB_1 0x441108 + +#define mmCPU_CA53_CFG_RST_ADDR_MSB_0 0x441114 + +#define mmCPU_CA53_CFG_RST_ADDR_MSB_1 0x441118 + +#define mmCPU_CA53_CFG_ARM_RST_CONTROL 0x441124 + +#define mmCPU_CA53_CFG_ARM_AFFINITY 0x441128 + +#define mmCPU_CA53_CFG_ARM_DISABLE 0x44112C + +#define mmCPU_CA53_CFG_ARM_GIC_PERIPHBASE 0x441130 + +#define mmCPU_CA53_CFG_ARM_GIC_IRQ_CFG 0x441134 + +#define mmCPU_CA53_CFG_ARM_PWR_MNG 0x441138 + +#define mmCPU_CA53_CFG_ARB_DBG_ROM_ADDR 0x44113C + +#define mmCPU_CA53_CFG_ARM_DBG_MODES 0x441140 + +#define mmCPU_CA53_CFG_ARM_PWR_STAT_0 0x441200 + +#define mmCPU_CA53_CFG_ARM_PWR_STAT_1 0x441204 + +#define mmCPU_CA53_CFG_ARM_DBG_STATUS 0x441208 + +#define mmCPU_CA53_CFG_ARM_MEM_ATTR 0x44120C + +#define mmCPU_CA53_CFG_ARM_PMU_0 0x441210 + +#define mmCPU_CA53_CFG_ARM_PMU_1 0x441214 + +#endif /* ASIC_REG_CPU_CA53_CFG_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_if_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_if_regs.h new file mode 100644 index 000000000000..f23cb3e41c30 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_if_regs.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_CPU_IF_REGS_H_ +#define ASIC_REG_CPU_IF_REGS_H_ + +/* + ***************************************** + * CPU_IF (Prototype: CPU_IF) + ***************************************** + */ + +#define mmCPU_IF_PF_PQ_PI 0x442100 + +#define mmCPU_IF_ARUSER_OVR 0x442104 + +#define mmCPU_IF_ARUSER_OVR_EN 0x442108 + +#define mmCPU_IF_AWUSER_OVR 0x44210C + +#define mmCPU_IF_AWUSER_OVR_EN 0x442110 + +#define mmCPU_IF_AXCACHE_OVR 0x442114 + +#define mmCPU_IF_LOCK_OVR 0x442118 + +#define mmCPU_IF_PROT_OVR 0x44211C + +#define mmCPU_IF_MAX_OUTSTANDING 0x442120 + +#define mmCPU_IF_EARLY_BRESP_EN 0x442124 + +#define mmCPU_IF_FORCE_RSP_OK 0x442128 + +#define mmCPU_IF_CPU_MSB_ADDR 0x44212C + +#define mmCPU_IF_AXI_SPLIT_INTR 0x442130 + +#endif /* ASIC_REG_CPU_IF_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_pll_regs.h new file mode 100644 index 000000000000..8fc97f838ada --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/cpu_pll_regs.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_CPU_PLL_REGS_H_ +#define ASIC_REG_CPU_PLL_REGS_H_ + +/* + ***************************************** + * CPU_PLL (Prototype: PLL) + ***************************************** + */ + +#define mmCPU_PLL_NR 0x4A2100 + +#define mmCPU_PLL_NF 0x4A2104 + +#define mmCPU_PLL_OD 0x4A2108 + +#define mmCPU_PLL_NB 0x4A210C + +#define mmCPU_PLL_CFG 0x4A2110 + +#define mmCPU_PLL_LOSE_MASK 0x4A2120 + +#define mmCPU_PLL_LOCK_INTR 0x4A2128 + +#define mmCPU_PLL_LOCK_BYPASS 0x4A212C + +#define mmCPU_PLL_DATA_CHNG 0x4A2130 + +#define mmCPU_PLL_RST 0x4A2134 + +#define mmCPU_PLL_SLIP_WD_CNTR 0x4A2150 + +#define mmCPU_PLL_DIV_FACTOR_0 0x4A2200 + +#define mmCPU_PLL_DIV_FACTOR_1 0x4A2204 + +#define mmCPU_PLL_DIV_FACTOR_2 0x4A2208 + +#define mmCPU_PLL_DIV_FACTOR_3 0x4A220C + +#define mmCPU_PLL_DIV_FACTOR_CMD_0 0x4A2220 + +#define mmCPU_PLL_DIV_FACTOR_CMD_1 0x4A2224 + +#define mmCPU_PLL_DIV_FACTOR_CMD_2 0x4A2228 + +#define mmCPU_PLL_DIV_FACTOR_CMD_3 0x4A222C + +#define mmCPU_PLL_DIV_SEL_0 0x4A2280 + +#define mmCPU_PLL_DIV_SEL_1 0x4A2284 + +#define mmCPU_PLL_DIV_SEL_2 0x4A2288 + +#define mmCPU_PLL_DIV_SEL_3 0x4A228C + +#define mmCPU_PLL_DIV_EN_0 0x4A22A0 + +#define mmCPU_PLL_DIV_EN_1 0x4A22A4 + +#define mmCPU_PLL_DIV_EN_2 0x4A22A8 + +#define mmCPU_PLL_DIV_EN_3 0x4A22AC + +#define mmCPU_PLL_DIV_FACTOR_BUSY_0 0x4A22C0 + +#define mmCPU_PLL_DIV_FACTOR_BUSY_1 0x4A22C4 + +#define mmCPU_PLL_DIV_FACTOR_BUSY_2 0x4A22C8 + +#define mmCPU_PLL_DIV_FACTOR_BUSY_3 0x4A22CC + +#define mmCPU_PLL_CLK_GATER 0x4A2300 + +#define mmCPU_PLL_CLK_RLX_0 0x4A2310 + +#define mmCPU_PLL_CLK_RLX_1 0x4A2314 + +#define mmCPU_PLL_CLK_RLX_2 0x4A2318 + +#define mmCPU_PLL_CLK_RLX_3 0x4A231C + +#define mmCPU_PLL_REF_CNTR_PERIOD 0x4A2400 + +#define mmCPU_PLL_REF_LOW_THRESHOLD 0x4A2410 + +#define mmCPU_PLL_REF_HIGH_THRESHOLD 0x4A2420 + +#define mmCPU_PLL_PLL_NOT_STABLE 0x4A2430 + +#define mmCPU_PLL_FREQ_CALC_EN 0x4A2440 + +#endif /* ASIC_REG_CPU_PLL_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h new file mode 100644 index 000000000000..61c8cd9ce58b --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_CH_0_REGS_H_ +#define ASIC_REG_DMA_CH_0_REGS_H_ + +/* + ***************************************** + * DMA_CH_0 (Prototype: DMA_CH) + ***************************************** + */ + +#define mmDMA_CH_0_CFG0 0x401000 + +#define mmDMA_CH_0_CFG1 0x401004 + +#define mmDMA_CH_0_ERRMSG_ADDR_LO 0x401008 + +#define mmDMA_CH_0_ERRMSG_ADDR_HI 0x40100C + +#define mmDMA_CH_0_ERRMSG_WDATA 0x401010 + +#define mmDMA_CH_0_RD_COMP_ADDR_LO 0x401014 + +#define mmDMA_CH_0_RD_COMP_ADDR_HI 0x401018 + +#define mmDMA_CH_0_RD_COMP_WDATA 0x40101C + +#define mmDMA_CH_0_WR_COMP_ADDR_LO 0x401020 + +#define mmDMA_CH_0_WR_COMP_ADDR_HI 0x401024 + +#define mmDMA_CH_0_WR_COMP_WDATA 0x401028 + +#define mmDMA_CH_0_LDMA_SRC_ADDR_LO 0x40102C + +#define mmDMA_CH_0_LDMA_SRC_ADDR_HI 0x401030 + +#define mmDMA_CH_0_LDMA_DST_ADDR_LO 0x401034 + +#define mmDMA_CH_0_LDMA_DST_ADDR_HI 0x401038 + +#define mmDMA_CH_0_LDMA_TSIZE 0x40103C + +#define mmDMA_CH_0_COMIT_TRANSFER 0x401040 + +#define mmDMA_CH_0_STS0 0x401044 + +#define mmDMA_CH_0_STS1 0x401048 + +#define mmDMA_CH_0_STS2 0x40104C + +#define mmDMA_CH_0_STS3 0x401050 + +#define mmDMA_CH_0_STS4 0x401054 + +#define mmDMA_CH_0_SRC_ADDR_LO_STS 0x401058 + +#define mmDMA_CH_0_SRC_ADDR_HI_STS 0x40105C + +#define mmDMA_CH_0_SRC_TSIZE_STS 0x401060 + +#define mmDMA_CH_0_DST_ADDR_LO_STS 0x401064 + +#define mmDMA_CH_0_DST_ADDR_HI_STS 0x401068 + +#define mmDMA_CH_0_DST_TSIZE_STS 0x40106C + +#define mmDMA_CH_0_RD_RATE_LIM_EN 0x401070 + +#define mmDMA_CH_0_RD_RATE_LIM_RST_TOKEN 0x401074 + +#define mmDMA_CH_0_RD_RATE_LIM_SAT 0x401078 + +#define mmDMA_CH_0_RD_RATE_LIM_TOUT 0x40107C + +#define mmDMA_CH_0_WR_RATE_LIM_EN 0x401080 + +#define mmDMA_CH_0_WR_RATE_LIM_RST_TOKEN 0x401084 + +#define mmDMA_CH_0_WR_RATE_LIM_SAT 0x401088 + +#define mmDMA_CH_0_WR_RATE_LIM_TOUT 0x40108C + +#define mmDMA_CH_0_CFG2 0x401090 + +#define mmDMA_CH_0_TDMA_CTL 0x401100 + +#define mmDMA_CH_0_TDMA_SRC_BASE_ADDR_LO 0x401104 + +#define mmDMA_CH_0_TDMA_SRC_BASE_ADDR_HI 0x401108 + +#define mmDMA_CH_0_TDMA_SRC_ROI_BASE_0 0x40110C + +#define mmDMA_CH_0_TDMA_SRC_ROI_SIZE_0 0x401110 + +#define mmDMA_CH_0_TDMA_SRC_VALID_ELEMENTS_0 0x401114 + +#define mmDMA_CH_0_TDMA_SRC_START_OFFSET_0 0x401118 + +#define mmDMA_CH_0_TDMA_SRC_STRIDE_0 0x40111C + +#define mmDMA_CH_0_TDMA_SRC_ROI_BASE_1 0x401120 + +#define mmDMA_CH_0_TDMA_SRC_ROI_SIZE_1 0x401124 + +#define mmDMA_CH_0_TDMA_SRC_VALID_ELEMENTS_1 0x401128 + +#define mmDMA_CH_0_TDMA_SRC_START_OFFSET_1 0x40112C + +#define mmDMA_CH_0_TDMA_SRC_STRIDE_1 0x401130 + +#define mmDMA_CH_0_TDMA_SRC_ROI_BASE_2 0x401134 + +#define mmDMA_CH_0_TDMA_SRC_ROI_SIZE_2 0x401138 + +#define mmDMA_CH_0_TDMA_SRC_VALID_ELEMENTS_2 0x40113C + +#define mmDMA_CH_0_TDMA_SRC_START_OFFSET_2 0x401140 + +#define mmDMA_CH_0_TDMA_SRC_STRIDE_2 0x401144 + +#define mmDMA_CH_0_TDMA_SRC_ROI_BASE_3 0x401148 + +#define mmDMA_CH_0_TDMA_SRC_ROI_SIZE_3 0x40114C + +#define mmDMA_CH_0_TDMA_SRC_VALID_ELEMENTS_3 0x401150 + +#define mmDMA_CH_0_TDMA_SRC_START_OFFSET_3 0x401154 + +#define mmDMA_CH_0_TDMA_SRC_STRIDE_3 0x401158 + +#define mmDMA_CH_0_TDMA_SRC_ROI_BASE_4 0x40115C + +#define mmDMA_CH_0_TDMA_SRC_ROI_SIZE_4 0x401160 + +#define mmDMA_CH_0_TDMA_SRC_VALID_ELEMENTS_4 0x401164 + +#define mmDMA_CH_0_TDMA_SRC_START_OFFSET_4 0x401168 + +#define mmDMA_CH_0_TDMA_SRC_STRIDE_4 0x40116C + +#define mmDMA_CH_0_TDMA_DST_BASE_ADDR_LO 0x401170 + +#define mmDMA_CH_0_TDMA_DST_BASE_ADDR_HI 0x401174 + +#define mmDMA_CH_0_TDMA_DST_ROI_BASE_0 0x401178 + +#define mmDMA_CH_0_TDMA_DST_ROI_SIZE_0 0x40117C + +#define mmDMA_CH_0_TDMA_DST_VALID_ELEMENTS_0 0x401180 + +#define mmDMA_CH_0_TDMA_DST_START_OFFSET_0 0x401184 + +#define mmDMA_CH_0_TDMA_DST_STRIDE_0 0x401188 + +#define mmDMA_CH_0_TDMA_DST_ROI_BASE_1 0x40118C + +#define mmDMA_CH_0_TDMA_DST_ROI_SIZE_1 0x401190 + +#define mmDMA_CH_0_TDMA_DST_VALID_ELEMENTS_1 0x401194 + +#define mmDMA_CH_0_TDMA_DST_START_OFFSET_1 0x401198 + +#define mmDMA_CH_0_TDMA_DST_STRIDE_1 0x40119C + +#define mmDMA_CH_0_TDMA_DST_ROI_BASE_2 0x4011A0 + +#define mmDMA_CH_0_TDMA_DST_ROI_SIZE_2 0x4011A4 + +#define mmDMA_CH_0_TDMA_DST_VALID_ELEMENTS_2 0x4011A8 + +#define mmDMA_CH_0_TDMA_DST_START_OFFSET_2 0x4011AC + +#define mmDMA_CH_0_TDMA_DST_STRIDE_2 0x4011B0 + +#define mmDMA_CH_0_TDMA_DST_ROI_BASE_3 0x4011B4 + +#define mmDMA_CH_0_TDMA_DST_ROI_SIZE_3 0x4011B8 + +#define mmDMA_CH_0_TDMA_DST_VALID_ELEMENTS_3 0x4011BC + +#define mmDMA_CH_0_TDMA_DST_START_OFFSET_3 0x4011C0 + +#define mmDMA_CH_0_TDMA_DST_STRIDE_3 0x4011C4 + +#define mmDMA_CH_0_TDMA_DST_ROI_BASE_4 0x4011C8 + +#define mmDMA_CH_0_TDMA_DST_ROI_SIZE_4 0x4011CC + +#define mmDMA_CH_0_TDMA_DST_VALID_ELEMENTS_4 0x4011D0 + +#define mmDMA_CH_0_TDMA_DST_START_OFFSET_4 0x4011D4 + +#define mmDMA_CH_0_TDMA_DST_STRIDE_4 0x4011D8 + +#define mmDMA_CH_0_MEM_INIT_BUSY 0x4011FC + +#endif /* ASIC_REG_DMA_CH_0_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h new file mode 100644 index 000000000000..92960ef5e308 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_CH_1_REGS_H_ +#define ASIC_REG_DMA_CH_1_REGS_H_ + +/* + ***************************************** + * DMA_CH_1 (Prototype: DMA_CH) + ***************************************** + */ + +#define mmDMA_CH_1_CFG0 0x409000 + +#define mmDMA_CH_1_CFG1 0x409004 + +#define mmDMA_CH_1_ERRMSG_ADDR_LO 0x409008 + +#define mmDMA_CH_1_ERRMSG_ADDR_HI 0x40900C + +#define mmDMA_CH_1_ERRMSG_WDATA 0x409010 + +#define mmDMA_CH_1_RD_COMP_ADDR_LO 0x409014 + +#define mmDMA_CH_1_RD_COMP_ADDR_HI 0x409018 + +#define mmDMA_CH_1_RD_COMP_WDATA 0x40901C + +#define mmDMA_CH_1_WR_COMP_ADDR_LO 0x409020 + +#define mmDMA_CH_1_WR_COMP_ADDR_HI 0x409024 + +#define mmDMA_CH_1_WR_COMP_WDATA 0x409028 + +#define mmDMA_CH_1_LDMA_SRC_ADDR_LO 0x40902C + +#define mmDMA_CH_1_LDMA_SRC_ADDR_HI 0x409030 + +#define mmDMA_CH_1_LDMA_DST_ADDR_LO 0x409034 + +#define mmDMA_CH_1_LDMA_DST_ADDR_HI 0x409038 + +#define mmDMA_CH_1_LDMA_TSIZE 0x40903C + +#define mmDMA_CH_1_COMIT_TRANSFER 0x409040 + +#define mmDMA_CH_1_STS0 0x409044 + +#define mmDMA_CH_1_STS1 0x409048 + +#define mmDMA_CH_1_STS2 0x40904C + +#define mmDMA_CH_1_STS3 0x409050 + +#define mmDMA_CH_1_STS4 0x409054 + +#define mmDMA_CH_1_SRC_ADDR_LO_STS 0x409058 + +#define mmDMA_CH_1_SRC_ADDR_HI_STS 0x40905C + +#define mmDMA_CH_1_SRC_TSIZE_STS 0x409060 + +#define mmDMA_CH_1_DST_ADDR_LO_STS 0x409064 + +#define mmDMA_CH_1_DST_ADDR_HI_STS 0x409068 + +#define mmDMA_CH_1_DST_TSIZE_STS 0x40906C + +#define mmDMA_CH_1_RD_RATE_LIM_EN 0x409070 + +#define mmDMA_CH_1_RD_RATE_LIM_RST_TOKEN 0x409074 + +#define mmDMA_CH_1_RD_RATE_LIM_SAT 0x409078 + +#define mmDMA_CH_1_RD_RATE_LIM_TOUT 0x40907C + +#define mmDMA_CH_1_WR_RATE_LIM_EN 0x409080 + +#define mmDMA_CH_1_WR_RATE_LIM_RST_TOKEN 0x409084 + +#define mmDMA_CH_1_WR_RATE_LIM_SAT 0x409088 + +#define mmDMA_CH_1_WR_RATE_LIM_TOUT 0x40908C + +#define mmDMA_CH_1_CFG2 0x409090 + +#define mmDMA_CH_1_TDMA_CTL 0x409100 + +#define mmDMA_CH_1_TDMA_SRC_BASE_ADDR_LO 0x409104 + +#define mmDMA_CH_1_TDMA_SRC_BASE_ADDR_HI 0x409108 + +#define mmDMA_CH_1_TDMA_SRC_ROI_BASE_0 0x40910C + +#define mmDMA_CH_1_TDMA_SRC_ROI_SIZE_0 0x409110 + +#define mmDMA_CH_1_TDMA_SRC_VALID_ELEMENTS_0 0x409114 + +#define mmDMA_CH_1_TDMA_SRC_START_OFFSET_0 0x409118 + +#define mmDMA_CH_1_TDMA_SRC_STRIDE_0 0x40911C + +#define mmDMA_CH_1_TDMA_SRC_ROI_BASE_1 0x409120 + +#define mmDMA_CH_1_TDMA_SRC_ROI_SIZE_1 0x409124 + +#define mmDMA_CH_1_TDMA_SRC_VALID_ELEMENTS_1 0x409128 + +#define mmDMA_CH_1_TDMA_SRC_START_OFFSET_1 0x40912C + +#define mmDMA_CH_1_TDMA_SRC_STRIDE_1 0x409130 + +#define mmDMA_CH_1_TDMA_SRC_ROI_BASE_2 0x409134 + +#define mmDMA_CH_1_TDMA_SRC_ROI_SIZE_2 0x409138 + +#define mmDMA_CH_1_TDMA_SRC_VALID_ELEMENTS_2 0x40913C + +#define mmDMA_CH_1_TDMA_SRC_START_OFFSET_2 0x409140 + +#define mmDMA_CH_1_TDMA_SRC_STRIDE_2 0x409144 + +#define mmDMA_CH_1_TDMA_SRC_ROI_BASE_3 0x409148 + +#define mmDMA_CH_1_TDMA_SRC_ROI_SIZE_3 0x40914C + +#define mmDMA_CH_1_TDMA_SRC_VALID_ELEMENTS_3 0x409150 + +#define mmDMA_CH_1_TDMA_SRC_START_OFFSET_3 0x409154 + +#define mmDMA_CH_1_TDMA_SRC_STRIDE_3 0x409158 + +#define mmDMA_CH_1_TDMA_SRC_ROI_BASE_4 0x40915C + +#define mmDMA_CH_1_TDMA_SRC_ROI_SIZE_4 0x409160 + +#define mmDMA_CH_1_TDMA_SRC_VALID_ELEMENTS_4 0x409164 + +#define mmDMA_CH_1_TDMA_SRC_START_OFFSET_4 0x409168 + +#define mmDMA_CH_1_TDMA_SRC_STRIDE_4 0x40916C + +#define mmDMA_CH_1_TDMA_DST_BASE_ADDR_LO 0x409170 + +#define mmDMA_CH_1_TDMA_DST_BASE_ADDR_HI 0x409174 + +#define mmDMA_CH_1_TDMA_DST_ROI_BASE_0 0x409178 + +#define mmDMA_CH_1_TDMA_DST_ROI_SIZE_0 0x40917C + +#define mmDMA_CH_1_TDMA_DST_VALID_ELEMENTS_0 0x409180 + +#define mmDMA_CH_1_TDMA_DST_START_OFFSET_0 0x409184 + +#define mmDMA_CH_1_TDMA_DST_STRIDE_0 0x409188 + +#define mmDMA_CH_1_TDMA_DST_ROI_BASE_1 0x40918C + +#define mmDMA_CH_1_TDMA_DST_ROI_SIZE_1 0x409190 + +#define mmDMA_CH_1_TDMA_DST_VALID_ELEMENTS_1 0x409194 + +#define mmDMA_CH_1_TDMA_DST_START_OFFSET_1 0x409198 + +#define mmDMA_CH_1_TDMA_DST_STRIDE_1 0x40919C + +#define mmDMA_CH_1_TDMA_DST_ROI_BASE_2 0x4091A0 + +#define mmDMA_CH_1_TDMA_DST_ROI_SIZE_2 0x4091A4 + +#define mmDMA_CH_1_TDMA_DST_VALID_ELEMENTS_2 0x4091A8 + +#define mmDMA_CH_1_TDMA_DST_START_OFFSET_2 0x4091AC + +#define mmDMA_CH_1_TDMA_DST_STRIDE_2 0x4091B0 + +#define mmDMA_CH_1_TDMA_DST_ROI_BASE_3 0x4091B4 + +#define mmDMA_CH_1_TDMA_DST_ROI_SIZE_3 0x4091B8 + +#define mmDMA_CH_1_TDMA_DST_VALID_ELEMENTS_3 0x4091BC + +#define mmDMA_CH_1_TDMA_DST_START_OFFSET_3 0x4091C0 + +#define mmDMA_CH_1_TDMA_DST_STRIDE_3 0x4091C4 + +#define mmDMA_CH_1_TDMA_DST_ROI_BASE_4 0x4091C8 + +#define mmDMA_CH_1_TDMA_DST_ROI_SIZE_4 0x4091CC + +#define mmDMA_CH_1_TDMA_DST_VALID_ELEMENTS_4 0x4091D0 + +#define mmDMA_CH_1_TDMA_DST_START_OFFSET_4 0x4091D4 + +#define mmDMA_CH_1_TDMA_DST_STRIDE_4 0x4091D8 + +#define mmDMA_CH_1_MEM_INIT_BUSY 0x4091FC + +#endif /* ASIC_REG_DMA_CH_1_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h new file mode 100644 index 000000000000..4e37871a51bb --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_CH_2_REGS_H_ +#define ASIC_REG_DMA_CH_2_REGS_H_ + +/* + ***************************************** + * DMA_CH_2 (Prototype: DMA_CH) + ***************************************** + */ + +#define mmDMA_CH_2_CFG0 0x411000 + +#define mmDMA_CH_2_CFG1 0x411004 + +#define mmDMA_CH_2_ERRMSG_ADDR_LO 0x411008 + +#define mmDMA_CH_2_ERRMSG_ADDR_HI 0x41100C + +#define mmDMA_CH_2_ERRMSG_WDATA 0x411010 + +#define mmDMA_CH_2_RD_COMP_ADDR_LO 0x411014 + +#define mmDMA_CH_2_RD_COMP_ADDR_HI 0x411018 + +#define mmDMA_CH_2_RD_COMP_WDATA 0x41101C + +#define mmDMA_CH_2_WR_COMP_ADDR_LO 0x411020 + +#define mmDMA_CH_2_WR_COMP_ADDR_HI 0x411024 + +#define mmDMA_CH_2_WR_COMP_WDATA 0x411028 + +#define mmDMA_CH_2_LDMA_SRC_ADDR_LO 0x41102C + +#define mmDMA_CH_2_LDMA_SRC_ADDR_HI 0x411030 + +#define mmDMA_CH_2_LDMA_DST_ADDR_LO 0x411034 + +#define mmDMA_CH_2_LDMA_DST_ADDR_HI 0x411038 + +#define mmDMA_CH_2_LDMA_TSIZE 0x41103C + +#define mmDMA_CH_2_COMIT_TRANSFER 0x411040 + +#define mmDMA_CH_2_STS0 0x411044 + +#define mmDMA_CH_2_STS1 0x411048 + +#define mmDMA_CH_2_STS2 0x41104C + +#define mmDMA_CH_2_STS3 0x411050 + +#define mmDMA_CH_2_STS4 0x411054 + +#define mmDMA_CH_2_SRC_ADDR_LO_STS 0x411058 + +#define mmDMA_CH_2_SRC_ADDR_HI_STS 0x41105C + +#define mmDMA_CH_2_SRC_TSIZE_STS 0x411060 + +#define mmDMA_CH_2_DST_ADDR_LO_STS 0x411064 + +#define mmDMA_CH_2_DST_ADDR_HI_STS 0x411068 + +#define mmDMA_CH_2_DST_TSIZE_STS 0x41106C + +#define mmDMA_CH_2_RD_RATE_LIM_EN 0x411070 + +#define mmDMA_CH_2_RD_RATE_LIM_RST_TOKEN 0x411074 + +#define mmDMA_CH_2_RD_RATE_LIM_SAT 0x411078 + +#define mmDMA_CH_2_RD_RATE_LIM_TOUT 0x41107C + +#define mmDMA_CH_2_WR_RATE_LIM_EN 0x411080 + +#define mmDMA_CH_2_WR_RATE_LIM_RST_TOKEN 0x411084 + +#define mmDMA_CH_2_WR_RATE_LIM_SAT 0x411088 + +#define mmDMA_CH_2_WR_RATE_LIM_TOUT 0x41108C + +#define mmDMA_CH_2_CFG2 0x411090 + +#define mmDMA_CH_2_TDMA_CTL 0x411100 + +#define mmDMA_CH_2_TDMA_SRC_BASE_ADDR_LO 0x411104 + +#define mmDMA_CH_2_TDMA_SRC_BASE_ADDR_HI 0x411108 + +#define mmDMA_CH_2_TDMA_SRC_ROI_BASE_0 0x41110C + +#define mmDMA_CH_2_TDMA_SRC_ROI_SIZE_0 0x411110 + +#define mmDMA_CH_2_TDMA_SRC_VALID_ELEMENTS_0 0x411114 + +#define mmDMA_CH_2_TDMA_SRC_START_OFFSET_0 0x411118 + +#define mmDMA_CH_2_TDMA_SRC_STRIDE_0 0x41111C + +#define mmDMA_CH_2_TDMA_SRC_ROI_BASE_1 0x411120 + +#define mmDMA_CH_2_TDMA_SRC_ROI_SIZE_1 0x411124 + +#define mmDMA_CH_2_TDMA_SRC_VALID_ELEMENTS_1 0x411128 + +#define mmDMA_CH_2_TDMA_SRC_START_OFFSET_1 0x41112C + +#define mmDMA_CH_2_TDMA_SRC_STRIDE_1 0x411130 + +#define mmDMA_CH_2_TDMA_SRC_ROI_BASE_2 0x411134 + +#define mmDMA_CH_2_TDMA_SRC_ROI_SIZE_2 0x411138 + +#define mmDMA_CH_2_TDMA_SRC_VALID_ELEMENTS_2 0x41113C + +#define mmDMA_CH_2_TDMA_SRC_START_OFFSET_2 0x411140 + +#define mmDMA_CH_2_TDMA_SRC_STRIDE_2 0x411144 + +#define mmDMA_CH_2_TDMA_SRC_ROI_BASE_3 0x411148 + +#define mmDMA_CH_2_TDMA_SRC_ROI_SIZE_3 0x41114C + +#define mmDMA_CH_2_TDMA_SRC_VALID_ELEMENTS_3 0x411150 + +#define mmDMA_CH_2_TDMA_SRC_START_OFFSET_3 0x411154 + +#define mmDMA_CH_2_TDMA_SRC_STRIDE_3 0x411158 + +#define mmDMA_CH_2_TDMA_SRC_ROI_BASE_4 0x41115C + +#define mmDMA_CH_2_TDMA_SRC_ROI_SIZE_4 0x411160 + +#define mmDMA_CH_2_TDMA_SRC_VALID_ELEMENTS_4 0x411164 + +#define mmDMA_CH_2_TDMA_SRC_START_OFFSET_4 0x411168 + +#define mmDMA_CH_2_TDMA_SRC_STRIDE_4 0x41116C + +#define mmDMA_CH_2_TDMA_DST_BASE_ADDR_LO 0x411170 + +#define mmDMA_CH_2_TDMA_DST_BASE_ADDR_HI 0x411174 + +#define mmDMA_CH_2_TDMA_DST_ROI_BASE_0 0x411178 + +#define mmDMA_CH_2_TDMA_DST_ROI_SIZE_0 0x41117C + +#define mmDMA_CH_2_TDMA_DST_VALID_ELEMENTS_0 0x411180 + +#define mmDMA_CH_2_TDMA_DST_START_OFFSET_0 0x411184 + +#define mmDMA_CH_2_TDMA_DST_STRIDE_0 0x411188 + +#define mmDMA_CH_2_TDMA_DST_ROI_BASE_1 0x41118C + +#define mmDMA_CH_2_TDMA_DST_ROI_SIZE_1 0x411190 + +#define mmDMA_CH_2_TDMA_DST_VALID_ELEMENTS_1 0x411194 + +#define mmDMA_CH_2_TDMA_DST_START_OFFSET_1 0x411198 + +#define mmDMA_CH_2_TDMA_DST_STRIDE_1 0x41119C + +#define mmDMA_CH_2_TDMA_DST_ROI_BASE_2 0x4111A0 + +#define mmDMA_CH_2_TDMA_DST_ROI_SIZE_2 0x4111A4 + +#define mmDMA_CH_2_TDMA_DST_VALID_ELEMENTS_2 0x4111A8 + +#define mmDMA_CH_2_TDMA_DST_START_OFFSET_2 0x4111AC + +#define mmDMA_CH_2_TDMA_DST_STRIDE_2 0x4111B0 + +#define mmDMA_CH_2_TDMA_DST_ROI_BASE_3 0x4111B4 + +#define mmDMA_CH_2_TDMA_DST_ROI_SIZE_3 0x4111B8 + +#define mmDMA_CH_2_TDMA_DST_VALID_ELEMENTS_3 0x4111BC + +#define mmDMA_CH_2_TDMA_DST_START_OFFSET_3 0x4111C0 + +#define mmDMA_CH_2_TDMA_DST_STRIDE_3 0x4111C4 + +#define mmDMA_CH_2_TDMA_DST_ROI_BASE_4 0x4111C8 + +#define mmDMA_CH_2_TDMA_DST_ROI_SIZE_4 0x4111CC + +#define mmDMA_CH_2_TDMA_DST_VALID_ELEMENTS_4 0x4111D0 + +#define mmDMA_CH_2_TDMA_DST_START_OFFSET_4 0x4111D4 + +#define mmDMA_CH_2_TDMA_DST_STRIDE_4 0x4111D8 + +#define mmDMA_CH_2_MEM_INIT_BUSY 0x4111FC + +#endif /* ASIC_REG_DMA_CH_2_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h new file mode 100644 index 000000000000..a2d6aeb32a18 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_CH_3_REGS_H_ +#define ASIC_REG_DMA_CH_3_REGS_H_ + +/* + ***************************************** + * DMA_CH_3 (Prototype: DMA_CH) + ***************************************** + */ + +#define mmDMA_CH_3_CFG0 0x419000 + +#define mmDMA_CH_3_CFG1 0x419004 + +#define mmDMA_CH_3_ERRMSG_ADDR_LO 0x419008 + +#define mmDMA_CH_3_ERRMSG_ADDR_HI 0x41900C + +#define mmDMA_CH_3_ERRMSG_WDATA 0x419010 + +#define mmDMA_CH_3_RD_COMP_ADDR_LO 0x419014 + +#define mmDMA_CH_3_RD_COMP_ADDR_HI 0x419018 + +#define mmDMA_CH_3_RD_COMP_WDATA 0x41901C + +#define mmDMA_CH_3_WR_COMP_ADDR_LO 0x419020 + +#define mmDMA_CH_3_WR_COMP_ADDR_HI 0x419024 + +#define mmDMA_CH_3_WR_COMP_WDATA 0x419028 + +#define mmDMA_CH_3_LDMA_SRC_ADDR_LO 0x41902C + +#define mmDMA_CH_3_LDMA_SRC_ADDR_HI 0x419030 + +#define mmDMA_CH_3_LDMA_DST_ADDR_LO 0x419034 + +#define mmDMA_CH_3_LDMA_DST_ADDR_HI 0x419038 + +#define mmDMA_CH_3_LDMA_TSIZE 0x41903C + +#define mmDMA_CH_3_COMIT_TRANSFER 0x419040 + +#define mmDMA_CH_3_STS0 0x419044 + +#define mmDMA_CH_3_STS1 0x419048 + +#define mmDMA_CH_3_STS2 0x41904C + +#define mmDMA_CH_3_STS3 0x419050 + +#define mmDMA_CH_3_STS4 0x419054 + +#define mmDMA_CH_3_SRC_ADDR_LO_STS 0x419058 + +#define mmDMA_CH_3_SRC_ADDR_HI_STS 0x41905C + +#define mmDMA_CH_3_SRC_TSIZE_STS 0x419060 + +#define mmDMA_CH_3_DST_ADDR_LO_STS 0x419064 + +#define mmDMA_CH_3_DST_ADDR_HI_STS 0x419068 + +#define mmDMA_CH_3_DST_TSIZE_STS 0x41906C + +#define mmDMA_CH_3_RD_RATE_LIM_EN 0x419070 + +#define mmDMA_CH_3_RD_RATE_LIM_RST_TOKEN 0x419074 + +#define mmDMA_CH_3_RD_RATE_LIM_SAT 0x419078 + +#define mmDMA_CH_3_RD_RATE_LIM_TOUT 0x41907C + +#define mmDMA_CH_3_WR_RATE_LIM_EN 0x419080 + +#define mmDMA_CH_3_WR_RATE_LIM_RST_TOKEN 0x419084 + +#define mmDMA_CH_3_WR_RATE_LIM_SAT 0x419088 + +#define mmDMA_CH_3_WR_RATE_LIM_TOUT 0x41908C + +#define mmDMA_CH_3_CFG2 0x419090 + +#define mmDMA_CH_3_TDMA_CTL 0x419100 + +#define mmDMA_CH_3_TDMA_SRC_BASE_ADDR_LO 0x419104 + +#define mmDMA_CH_3_TDMA_SRC_BASE_ADDR_HI 0x419108 + +#define mmDMA_CH_3_TDMA_SRC_ROI_BASE_0 0x41910C + +#define mmDMA_CH_3_TDMA_SRC_ROI_SIZE_0 0x419110 + +#define mmDMA_CH_3_TDMA_SRC_VALID_ELEMENTS_0 0x419114 + +#define mmDMA_CH_3_TDMA_SRC_START_OFFSET_0 0x419118 + +#define mmDMA_CH_3_TDMA_SRC_STRIDE_0 0x41911C + +#define mmDMA_CH_3_TDMA_SRC_ROI_BASE_1 0x419120 + +#define mmDMA_CH_3_TDMA_SRC_ROI_SIZE_1 0x419124 + +#define mmDMA_CH_3_TDMA_SRC_VALID_ELEMENTS_1 0x419128 + +#define mmDMA_CH_3_TDMA_SRC_START_OFFSET_1 0x41912C + +#define mmDMA_CH_3_TDMA_SRC_STRIDE_1 0x419130 + +#define mmDMA_CH_3_TDMA_SRC_ROI_BASE_2 0x419134 + +#define mmDMA_CH_3_TDMA_SRC_ROI_SIZE_2 0x419138 + +#define mmDMA_CH_3_TDMA_SRC_VALID_ELEMENTS_2 0x41913C + +#define mmDMA_CH_3_TDMA_SRC_START_OFFSET_2 0x419140 + +#define mmDMA_CH_3_TDMA_SRC_STRIDE_2 0x419144 + +#define mmDMA_CH_3_TDMA_SRC_ROI_BASE_3 0x419148 + +#define mmDMA_CH_3_TDMA_SRC_ROI_SIZE_3 0x41914C + +#define mmDMA_CH_3_TDMA_SRC_VALID_ELEMENTS_3 0x419150 + +#define mmDMA_CH_3_TDMA_SRC_START_OFFSET_3 0x419154 + +#define mmDMA_CH_3_TDMA_SRC_STRIDE_3 0x419158 + +#define mmDMA_CH_3_TDMA_SRC_ROI_BASE_4 0x41915C + +#define mmDMA_CH_3_TDMA_SRC_ROI_SIZE_4 0x419160 + +#define mmDMA_CH_3_TDMA_SRC_VALID_ELEMENTS_4 0x419164 + +#define mmDMA_CH_3_TDMA_SRC_START_OFFSET_4 0x419168 + +#define mmDMA_CH_3_TDMA_SRC_STRIDE_4 0x41916C + +#define mmDMA_CH_3_TDMA_DST_BASE_ADDR_LO 0x419170 + +#define mmDMA_CH_3_TDMA_DST_BASE_ADDR_HI 0x419174 + +#define mmDMA_CH_3_TDMA_DST_ROI_BASE_0 0x419178 + +#define mmDMA_CH_3_TDMA_DST_ROI_SIZE_0 0x41917C + +#define mmDMA_CH_3_TDMA_DST_VALID_ELEMENTS_0 0x419180 + +#define mmDMA_CH_3_TDMA_DST_START_OFFSET_0 0x419184 + +#define mmDMA_CH_3_TDMA_DST_STRIDE_0 0x419188 + +#define mmDMA_CH_3_TDMA_DST_ROI_BASE_1 0x41918C + +#define mmDMA_CH_3_TDMA_DST_ROI_SIZE_1 0x419190 + +#define mmDMA_CH_3_TDMA_DST_VALID_ELEMENTS_1 0x419194 + +#define mmDMA_CH_3_TDMA_DST_START_OFFSET_1 0x419198 + +#define mmDMA_CH_3_TDMA_DST_STRIDE_1 0x41919C + +#define mmDMA_CH_3_TDMA_DST_ROI_BASE_2 0x4191A0 + +#define mmDMA_CH_3_TDMA_DST_ROI_SIZE_2 0x4191A4 + +#define mmDMA_CH_3_TDMA_DST_VALID_ELEMENTS_2 0x4191A8 + +#define mmDMA_CH_3_TDMA_DST_START_OFFSET_2 0x4191AC + +#define mmDMA_CH_3_TDMA_DST_STRIDE_2 0x4191B0 + +#define mmDMA_CH_3_TDMA_DST_ROI_BASE_3 0x4191B4 + +#define mmDMA_CH_3_TDMA_DST_ROI_SIZE_3 0x4191B8 + +#define mmDMA_CH_3_TDMA_DST_VALID_ELEMENTS_3 0x4191BC + +#define mmDMA_CH_3_TDMA_DST_START_OFFSET_3 0x4191C0 + +#define mmDMA_CH_3_TDMA_DST_STRIDE_3 0x4191C4 + +#define mmDMA_CH_3_TDMA_DST_ROI_BASE_4 0x4191C8 + +#define mmDMA_CH_3_TDMA_DST_ROI_SIZE_4 0x4191CC + +#define mmDMA_CH_3_TDMA_DST_VALID_ELEMENTS_4 0x4191D0 + +#define mmDMA_CH_3_TDMA_DST_START_OFFSET_4 0x4191D4 + +#define mmDMA_CH_3_TDMA_DST_STRIDE_4 0x4191D8 + +#define mmDMA_CH_3_MEM_INIT_BUSY 0x4191FC + +#endif /* ASIC_REG_DMA_CH_3_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h new file mode 100644 index 000000000000..400d6fd3acf5 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_CH_4_REGS_H_ +#define ASIC_REG_DMA_CH_4_REGS_H_ + +/* + ***************************************** + * DMA_CH_4 (Prototype: DMA_CH) + ***************************************** + */ + +#define mmDMA_CH_4_CFG0 0x421000 + +#define mmDMA_CH_4_CFG1 0x421004 + +#define mmDMA_CH_4_ERRMSG_ADDR_LO 0x421008 + +#define mmDMA_CH_4_ERRMSG_ADDR_HI 0x42100C + +#define mmDMA_CH_4_ERRMSG_WDATA 0x421010 + +#define mmDMA_CH_4_RD_COMP_ADDR_LO 0x421014 + +#define mmDMA_CH_4_RD_COMP_ADDR_HI 0x421018 + +#define mmDMA_CH_4_RD_COMP_WDATA 0x42101C + +#define mmDMA_CH_4_WR_COMP_ADDR_LO 0x421020 + +#define mmDMA_CH_4_WR_COMP_ADDR_HI 0x421024 + +#define mmDMA_CH_4_WR_COMP_WDATA 0x421028 + +#define mmDMA_CH_4_LDMA_SRC_ADDR_LO 0x42102C + +#define mmDMA_CH_4_LDMA_SRC_ADDR_HI 0x421030 + +#define mmDMA_CH_4_LDMA_DST_ADDR_LO 0x421034 + +#define mmDMA_CH_4_LDMA_DST_ADDR_HI 0x421038 + +#define mmDMA_CH_4_LDMA_TSIZE 0x42103C + +#define mmDMA_CH_4_COMIT_TRANSFER 0x421040 + +#define mmDMA_CH_4_STS0 0x421044 + +#define mmDMA_CH_4_STS1 0x421048 + +#define mmDMA_CH_4_STS2 0x42104C + +#define mmDMA_CH_4_STS3 0x421050 + +#define mmDMA_CH_4_STS4 0x421054 + +#define mmDMA_CH_4_SRC_ADDR_LO_STS 0x421058 + +#define mmDMA_CH_4_SRC_ADDR_HI_STS 0x42105C + +#define mmDMA_CH_4_SRC_TSIZE_STS 0x421060 + +#define mmDMA_CH_4_DST_ADDR_LO_STS 0x421064 + +#define mmDMA_CH_4_DST_ADDR_HI_STS 0x421068 + +#define mmDMA_CH_4_DST_TSIZE_STS 0x42106C + +#define mmDMA_CH_4_RD_RATE_LIM_EN 0x421070 + +#define mmDMA_CH_4_RD_RATE_LIM_RST_TOKEN 0x421074 + +#define mmDMA_CH_4_RD_RATE_LIM_SAT 0x421078 + +#define mmDMA_CH_4_RD_RATE_LIM_TOUT 0x42107C + +#define mmDMA_CH_4_WR_RATE_LIM_EN 0x421080 + +#define mmDMA_CH_4_WR_RATE_LIM_RST_TOKEN 0x421084 + +#define mmDMA_CH_4_WR_RATE_LIM_SAT 0x421088 + +#define mmDMA_CH_4_WR_RATE_LIM_TOUT 0x42108C + +#define mmDMA_CH_4_CFG2 0x421090 + +#define mmDMA_CH_4_TDMA_CTL 0x421100 + +#define mmDMA_CH_4_TDMA_SRC_BASE_ADDR_LO 0x421104 + +#define mmDMA_CH_4_TDMA_SRC_BASE_ADDR_HI 0x421108 + +#define mmDMA_CH_4_TDMA_SRC_ROI_BASE_0 0x42110C + +#define mmDMA_CH_4_TDMA_SRC_ROI_SIZE_0 0x421110 + +#define mmDMA_CH_4_TDMA_SRC_VALID_ELEMENTS_0 0x421114 + +#define mmDMA_CH_4_TDMA_SRC_START_OFFSET_0 0x421118 + +#define mmDMA_CH_4_TDMA_SRC_STRIDE_0 0x42111C + +#define mmDMA_CH_4_TDMA_SRC_ROI_BASE_1 0x421120 + +#define mmDMA_CH_4_TDMA_SRC_ROI_SIZE_1 0x421124 + +#define mmDMA_CH_4_TDMA_SRC_VALID_ELEMENTS_1 0x421128 + +#define mmDMA_CH_4_TDMA_SRC_START_OFFSET_1 0x42112C + +#define mmDMA_CH_4_TDMA_SRC_STRIDE_1 0x421130 + +#define mmDMA_CH_4_TDMA_SRC_ROI_BASE_2 0x421134 + +#define mmDMA_CH_4_TDMA_SRC_ROI_SIZE_2 0x421138 + +#define mmDMA_CH_4_TDMA_SRC_VALID_ELEMENTS_2 0x42113C + +#define mmDMA_CH_4_TDMA_SRC_START_OFFSET_2 0x421140 + +#define mmDMA_CH_4_TDMA_SRC_STRIDE_2 0x421144 + +#define mmDMA_CH_4_TDMA_SRC_ROI_BASE_3 0x421148 + +#define mmDMA_CH_4_TDMA_SRC_ROI_SIZE_3 0x42114C + +#define mmDMA_CH_4_TDMA_SRC_VALID_ELEMENTS_3 0x421150 + +#define mmDMA_CH_4_TDMA_SRC_START_OFFSET_3 0x421154 + +#define mmDMA_CH_4_TDMA_SRC_STRIDE_3 0x421158 + +#define mmDMA_CH_4_TDMA_SRC_ROI_BASE_4 0x42115C + +#define mmDMA_CH_4_TDMA_SRC_ROI_SIZE_4 0x421160 + +#define mmDMA_CH_4_TDMA_SRC_VALID_ELEMENTS_4 0x421164 + +#define mmDMA_CH_4_TDMA_SRC_START_OFFSET_4 0x421168 + +#define mmDMA_CH_4_TDMA_SRC_STRIDE_4 0x42116C + +#define mmDMA_CH_4_TDMA_DST_BASE_ADDR_LO 0x421170 + +#define mmDMA_CH_4_TDMA_DST_BASE_ADDR_HI 0x421174 + +#define mmDMA_CH_4_TDMA_DST_ROI_BASE_0 0x421178 + +#define mmDMA_CH_4_TDMA_DST_ROI_SIZE_0 0x42117C + +#define mmDMA_CH_4_TDMA_DST_VALID_ELEMENTS_0 0x421180 + +#define mmDMA_CH_4_TDMA_DST_START_OFFSET_0 0x421184 + +#define mmDMA_CH_4_TDMA_DST_STRIDE_0 0x421188 + +#define mmDMA_CH_4_TDMA_DST_ROI_BASE_1 0x42118C + +#define mmDMA_CH_4_TDMA_DST_ROI_SIZE_1 0x421190 + +#define mmDMA_CH_4_TDMA_DST_VALID_ELEMENTS_1 0x421194 + +#define mmDMA_CH_4_TDMA_DST_START_OFFSET_1 0x421198 + +#define mmDMA_CH_4_TDMA_DST_STRIDE_1 0x42119C + +#define mmDMA_CH_4_TDMA_DST_ROI_BASE_2 0x4211A0 + +#define mmDMA_CH_4_TDMA_DST_ROI_SIZE_2 0x4211A4 + +#define mmDMA_CH_4_TDMA_DST_VALID_ELEMENTS_2 0x4211A8 + +#define mmDMA_CH_4_TDMA_DST_START_OFFSET_2 0x4211AC + +#define mmDMA_CH_4_TDMA_DST_STRIDE_2 0x4211B0 + +#define mmDMA_CH_4_TDMA_DST_ROI_BASE_3 0x4211B4 + +#define mmDMA_CH_4_TDMA_DST_ROI_SIZE_3 0x4211B8 + +#define mmDMA_CH_4_TDMA_DST_VALID_ELEMENTS_3 0x4211BC + +#define mmDMA_CH_4_TDMA_DST_START_OFFSET_3 0x4211C0 + +#define mmDMA_CH_4_TDMA_DST_STRIDE_3 0x4211C4 + +#define mmDMA_CH_4_TDMA_DST_ROI_BASE_4 0x4211C8 + +#define mmDMA_CH_4_TDMA_DST_ROI_SIZE_4 0x4211CC + +#define mmDMA_CH_4_TDMA_DST_VALID_ELEMENTS_4 0x4211D0 + +#define mmDMA_CH_4_TDMA_DST_START_OFFSET_4 0x4211D4 + +#define mmDMA_CH_4_TDMA_DST_STRIDE_4 0x4211D8 + +#define mmDMA_CH_4_MEM_INIT_BUSY 0x4211FC + +#endif /* ASIC_REG_DMA_CH_4_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_masks.h new file mode 100644 index 000000000000..8d965443c51e --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_masks.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_MACRO_MASKS_H_ +#define ASIC_REG_DMA_MACRO_MASKS_H_ + +/* + ***************************************** + * DMA_MACRO (Prototype: DMA_MACRO) + ***************************************** + */ + +/* DMA_MACRO_LBW_RANGE_HIT_BLOCK */ +#define DMA_MACRO_LBW_RANGE_HIT_BLOCK_R_SHIFT 0 +#define DMA_MACRO_LBW_RANGE_HIT_BLOCK_R_MASK 0xFFFF + +/* DMA_MACRO_LBW_RANGE_MASK */ +#define DMA_MACRO_LBW_RANGE_MASK_R_SHIFT 0 +#define DMA_MACRO_LBW_RANGE_MASK_R_MASK 0x3FFFFFF + +/* DMA_MACRO_LBW_RANGE_BASE */ +#define DMA_MACRO_LBW_RANGE_BASE_R_SHIFT 0 +#define DMA_MACRO_LBW_RANGE_BASE_R_MASK 0x3FFFFFF + +/* DMA_MACRO_HBW_RANGE_HIT_BLOCK */ +#define DMA_MACRO_HBW_RANGE_HIT_BLOCK_R_SHIFT 0 +#define DMA_MACRO_HBW_RANGE_HIT_BLOCK_R_MASK 0xFF + +/* DMA_MACRO_HBW_RANGE_MASK_49_32 */ +#define DMA_MACRO_HBW_RANGE_MASK_49_32_R_SHIFT 0 +#define DMA_MACRO_HBW_RANGE_MASK_49_32_R_MASK 0x3FFFF + +/* DMA_MACRO_HBW_RANGE_MASK_31_0 */ +#define DMA_MACRO_HBW_RANGE_MASK_31_0_R_SHIFT 0 +#define DMA_MACRO_HBW_RANGE_MASK_31_0_R_MASK 0xFFFFFFFF + +/* DMA_MACRO_HBW_RANGE_BASE_49_32 */ +#define DMA_MACRO_HBW_RANGE_BASE_49_32_R_SHIFT 0 +#define DMA_MACRO_HBW_RANGE_BASE_49_32_R_MASK 0x3FFFF + +/* DMA_MACRO_HBW_RANGE_BASE_31_0 */ +#define DMA_MACRO_HBW_RANGE_BASE_31_0_R_SHIFT 0 +#define DMA_MACRO_HBW_RANGE_BASE_31_0_R_MASK 0xFFFFFFFF + +/* DMA_MACRO_WRITE_EN */ +#define DMA_MACRO_WRITE_EN_R_SHIFT 0 +#define DMA_MACRO_WRITE_EN_R_MASK 0x1 + +/* DMA_MACRO_WRITE_CREDIT */ +#define DMA_MACRO_WRITE_CREDIT_R_SHIFT 0 +#define DMA_MACRO_WRITE_CREDIT_R_MASK 0x3FF + +/* DMA_MACRO_READ_EN */ +#define DMA_MACRO_READ_EN_R_SHIFT 0 +#define DMA_MACRO_READ_EN_R_MASK 0x1 + +/* DMA_MACRO_READ_CREDIT */ +#define DMA_MACRO_READ_CREDIT_R_SHIFT 0 +#define DMA_MACRO_READ_CREDIT_R_MASK 0x3FF + +/* DMA_MACRO_SRAM_BUSY */ + +/* DMA_MACRO_RAZWI_LBW_WT_VLD */ +#define DMA_MACRO_RAZWI_LBW_WT_VLD_R_SHIFT 0 +#define DMA_MACRO_RAZWI_LBW_WT_VLD_R_MASK 0x1 + +/* DMA_MACRO_RAZWI_LBW_WT_ID */ +#define DMA_MACRO_RAZWI_LBW_WT_ID_R_SHIFT 0 +#define DMA_MACRO_RAZWI_LBW_WT_ID_R_MASK 0x7FFF + +/* DMA_MACRO_RAZWI_LBW_RD_VLD */ +#define DMA_MACRO_RAZWI_LBW_RD_VLD_R_SHIFT 0 +#define DMA_MACRO_RAZWI_LBW_RD_VLD_R_MASK 0x1 + +/* DMA_MACRO_RAZWI_LBW_RD_ID */ +#define DMA_MACRO_RAZWI_LBW_RD_ID_R_SHIFT 0 +#define DMA_MACRO_RAZWI_LBW_RD_ID_R_MASK 0x7FFF + +/* DMA_MACRO_RAZWI_HBW_WT_VLD */ +#define DMA_MACRO_RAZWI_HBW_WT_VLD_R_SHIFT 0 +#define DMA_MACRO_RAZWI_HBW_WT_VLD_R_MASK 0x1 + +/* DMA_MACRO_RAZWI_HBW_WT_ID */ +#define DMA_MACRO_RAZWI_HBW_WT_ID_R_SHIFT 0 +#define DMA_MACRO_RAZWI_HBW_WT_ID_R_MASK 0x1FFFFFFF + +/* DMA_MACRO_RAZWI_HBW_RD_VLD */ +#define DMA_MACRO_RAZWI_HBW_RD_VLD_R_SHIFT 0 +#define DMA_MACRO_RAZWI_HBW_RD_VLD_R_MASK 0x1 + +/* DMA_MACRO_RAZWI_HBW_RD_ID */ +#define DMA_MACRO_RAZWI_HBW_RD_ID_R_SHIFT 0 +#define DMA_MACRO_RAZWI_HBW_RD_ID_R_MASK 0x1FFFFFFF + +#endif /* ASIC_REG_DMA_MACRO_MASKS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_regs.h new file mode 100644 index 000000000000..8bfcb001189d --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_regs.h @@ -0,0 +1,181 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_MACRO_REGS_H_ +#define ASIC_REG_DMA_MACRO_REGS_H_ + +/* + ***************************************** + * DMA_MACRO (Prototype: DMA_MACRO) + ***************************************** + */ + +#define mmDMA_MACRO_LBW_RANGE_HIT_BLOCK 0x4B0000 + +#define mmDMA_MACRO_LBW_RANGE_MASK_0 0x4B0004 + +#define mmDMA_MACRO_LBW_RANGE_MASK_1 0x4B0008 + +#define mmDMA_MACRO_LBW_RANGE_MASK_2 0x4B000C + +#define mmDMA_MACRO_LBW_RANGE_MASK_3 0x4B0010 + +#define mmDMA_MACRO_LBW_RANGE_MASK_4 0x4B0014 + +#define mmDMA_MACRO_LBW_RANGE_MASK_5 0x4B0018 + +#define mmDMA_MACRO_LBW_RANGE_MASK_6 0x4B001C + +#define mmDMA_MACRO_LBW_RANGE_MASK_7 0x4B0020 + +#define mmDMA_MACRO_LBW_RANGE_MASK_8 0x4B0024 + +#define mmDMA_MACRO_LBW_RANGE_MASK_9 0x4B0028 + +#define mmDMA_MACRO_LBW_RANGE_MASK_10 0x4B002C + +#define mmDMA_MACRO_LBW_RANGE_MASK_11 0x4B0030 + +#define mmDMA_MACRO_LBW_RANGE_MASK_12 0x4B0034 + +#define mmDMA_MACRO_LBW_RANGE_MASK_13 0x4B0038 + +#define mmDMA_MACRO_LBW_RANGE_MASK_14 0x4B003C + +#define mmDMA_MACRO_LBW_RANGE_MASK_15 0x4B0040 + +#define mmDMA_MACRO_LBW_RANGE_BASE_0 0x4B0044 + +#define mmDMA_MACRO_LBW_RANGE_BASE_1 0x4B0048 + +#define mmDMA_MACRO_LBW_RANGE_BASE_2 0x4B004C + +#define mmDMA_MACRO_LBW_RANGE_BASE_3 0x4B0050 + +#define mmDMA_MACRO_LBW_RANGE_BASE_4 0x4B0054 + +#define mmDMA_MACRO_LBW_RANGE_BASE_5 0x4B0058 + +#define mmDMA_MACRO_LBW_RANGE_BASE_6 0x4B005C + +#define mmDMA_MACRO_LBW_RANGE_BASE_7 0x4B0060 + +#define mmDMA_MACRO_LBW_RANGE_BASE_8 0x4B0064 + +#define mmDMA_MACRO_LBW_RANGE_BASE_9 0x4B0068 + +#define mmDMA_MACRO_LBW_RANGE_BASE_10 0x4B006C + +#define mmDMA_MACRO_LBW_RANGE_BASE_11 0x4B0070 + +#define mmDMA_MACRO_LBW_RANGE_BASE_12 0x4B0074 + +#define mmDMA_MACRO_LBW_RANGE_BASE_13 0x4B0078 + +#define mmDMA_MACRO_LBW_RANGE_BASE_14 0x4B007C + +#define mmDMA_MACRO_LBW_RANGE_BASE_15 0x4B0080 + +#define mmDMA_MACRO_HBW_RANGE_HIT_BLOCK 0x4B0084 + +#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_0 0x4B00A8 + +#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_1 0x4B00AC + +#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_2 0x4B00B0 + +#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_3 0x4B00B4 + +#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_4 0x4B00B8 + +#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_5 0x4B00BC + +#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_6 0x4B00C0 + +#define mmDMA_MACRO_HBW_RANGE_MASK_49_32_7 0x4B00C4 + +#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_0 0x4B00C8 + +#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_1 0x4B00CC + +#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_2 0x4B00D0 + +#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_3 0x4B00D4 + +#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_4 0x4B00D8 + +#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_5 0x4B00DC + +#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_6 0x4B00E0 + +#define mmDMA_MACRO_HBW_RANGE_MASK_31_0_7 0x4B00E4 + +#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_0 0x4B00E8 + +#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_1 0x4B00EC + +#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_2 0x4B00F0 + +#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_3 0x4B00F4 + +#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_4 0x4B00F8 + +#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_5 0x4B00FC + +#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_6 0x4B0100 + +#define mmDMA_MACRO_HBW_RANGE_BASE_49_32_7 0x4B0104 + +#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_0 0x4B0108 + +#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_1 0x4B010C + +#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_2 0x4B0110 + +#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_3 0x4B0114 + +#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_4 0x4B0118 + +#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_5 0x4B011C + +#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_6 0x4B0120 + +#define mmDMA_MACRO_HBW_RANGE_BASE_31_0_7 0x4B0124 + +#define mmDMA_MACRO_WRITE_EN 0x4B0128 + +#define mmDMA_MACRO_WRITE_CREDIT 0x4B012C + +#define mmDMA_MACRO_READ_EN 0x4B0130 + +#define mmDMA_MACRO_READ_CREDIT 0x4B0134 + +#define mmDMA_MACRO_SRAM_BUSY 0x4B0138 + +#define mmDMA_MACRO_RAZWI_LBW_WT_VLD 0x4B013C + +#define mmDMA_MACRO_RAZWI_LBW_WT_ID 0x4B0140 + +#define mmDMA_MACRO_RAZWI_LBW_RD_VLD 0x4B0144 + +#define mmDMA_MACRO_RAZWI_LBW_RD_ID 0x4B0148 + +#define mmDMA_MACRO_RAZWI_HBW_WT_VLD 0x4B014C + +#define mmDMA_MACRO_RAZWI_HBW_WT_ID 0x4B0150 + +#define mmDMA_MACRO_RAZWI_HBW_RD_VLD 0x4B0154 + +#define mmDMA_MACRO_RAZWI_HBW_RD_ID 0x4B0158 + +#endif /* ASIC_REG_DMA_MACRO_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h new file mode 100644 index 000000000000..9f33f351a3c1 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_NRTR_MASKS_H_ +#define ASIC_REG_DMA_NRTR_MASKS_H_ + +/* + ***************************************** + * DMA_NRTR (Prototype: IF_NRTR) + ***************************************** + */ + +/* DMA_NRTR_HBW_MAX_CRED */ +#define DMA_NRTR_HBW_MAX_CRED_WR_RQ_SHIFT 0 +#define DMA_NRTR_HBW_MAX_CRED_WR_RQ_MASK 0x3F +#define DMA_NRTR_HBW_MAX_CRED_WR_RS_SHIFT 8 +#define DMA_NRTR_HBW_MAX_CRED_WR_RS_MASK 0x3F00 +#define DMA_NRTR_HBW_MAX_CRED_RD_RQ_SHIFT 16 +#define DMA_NRTR_HBW_MAX_CRED_RD_RQ_MASK 0x3F0000 +#define DMA_NRTR_HBW_MAX_CRED_RD_RS_SHIFT 24 +#define DMA_NRTR_HBW_MAX_CRED_RD_RS_MASK 0x3F000000 + +/* DMA_NRTR_LBW_MAX_CRED */ +#define DMA_NRTR_LBW_MAX_CRED_WR_RQ_SHIFT 0 +#define DMA_NRTR_LBW_MAX_CRED_WR_RQ_MASK 0x3F +#define DMA_NRTR_LBW_MAX_CRED_WR_RS_SHIFT 8 +#define DMA_NRTR_LBW_MAX_CRED_WR_RS_MASK 0x3F00 +#define DMA_NRTR_LBW_MAX_CRED_RD_RQ_SHIFT 16 +#define DMA_NRTR_LBW_MAX_CRED_RD_RQ_MASK 0x3F0000 +#define DMA_NRTR_LBW_MAX_CRED_RD_RS_SHIFT 24 +#define DMA_NRTR_LBW_MAX_CRED_RD_RS_MASK 0x3F000000 + +/* DMA_NRTR_DBG_E_ARB */ +#define DMA_NRTR_DBG_E_ARB_W_SHIFT 0 +#define DMA_NRTR_DBG_E_ARB_W_MASK 0x7 +#define DMA_NRTR_DBG_E_ARB_S_SHIFT 8 +#define DMA_NRTR_DBG_E_ARB_S_MASK 0x700 +#define DMA_NRTR_DBG_E_ARB_N_SHIFT 16 +#define DMA_NRTR_DBG_E_ARB_N_MASK 0x70000 +#define DMA_NRTR_DBG_E_ARB_L_SHIFT 24 +#define DMA_NRTR_DBG_E_ARB_L_MASK 0x7000000 + +/* DMA_NRTR_DBG_W_ARB */ +#define DMA_NRTR_DBG_W_ARB_E_SHIFT 0 +#define DMA_NRTR_DBG_W_ARB_E_MASK 0x7 +#define DMA_NRTR_DBG_W_ARB_S_SHIFT 8 +#define DMA_NRTR_DBG_W_ARB_S_MASK 0x700 +#define DMA_NRTR_DBG_W_ARB_N_SHIFT 16 +#define DMA_NRTR_DBG_W_ARB_N_MASK 0x70000 +#define DMA_NRTR_DBG_W_ARB_L_SHIFT 24 +#define DMA_NRTR_DBG_W_ARB_L_MASK 0x7000000 + +/* DMA_NRTR_DBG_N_ARB */ +#define DMA_NRTR_DBG_N_ARB_W_SHIFT 0 +#define DMA_NRTR_DBG_N_ARB_W_MASK 0x7 +#define DMA_NRTR_DBG_N_ARB_E_SHIFT 8 +#define DMA_NRTR_DBG_N_ARB_E_MASK 0x700 +#define DMA_NRTR_DBG_N_ARB_S_SHIFT 16 +#define DMA_NRTR_DBG_N_ARB_S_MASK 0x70000 +#define DMA_NRTR_DBG_N_ARB_L_SHIFT 24 +#define DMA_NRTR_DBG_N_ARB_L_MASK 0x7000000 + +/* DMA_NRTR_DBG_S_ARB */ +#define DMA_NRTR_DBG_S_ARB_W_SHIFT 0 +#define DMA_NRTR_DBG_S_ARB_W_MASK 0x7 +#define DMA_NRTR_DBG_S_ARB_E_SHIFT 8 +#define DMA_NRTR_DBG_S_ARB_E_MASK 0x700 +#define DMA_NRTR_DBG_S_ARB_N_SHIFT 16 +#define DMA_NRTR_DBG_S_ARB_N_MASK 0x70000 +#define DMA_NRTR_DBG_S_ARB_L_SHIFT 24 +#define DMA_NRTR_DBG_S_ARB_L_MASK 0x7000000 + +/* DMA_NRTR_DBG_L_ARB */ +#define DMA_NRTR_DBG_L_ARB_W_SHIFT 0 +#define DMA_NRTR_DBG_L_ARB_W_MASK 0x7 +#define DMA_NRTR_DBG_L_ARB_E_SHIFT 8 +#define DMA_NRTR_DBG_L_ARB_E_MASK 0x700 +#define DMA_NRTR_DBG_L_ARB_S_SHIFT 16 +#define DMA_NRTR_DBG_L_ARB_S_MASK 0x70000 +#define DMA_NRTR_DBG_L_ARB_N_SHIFT 24 +#define DMA_NRTR_DBG_L_ARB_N_MASK 0x7000000 + +/* DMA_NRTR_DBG_E_ARB_MAX */ +#define DMA_NRTR_DBG_E_ARB_MAX_CREDIT_SHIFT 0 +#define DMA_NRTR_DBG_E_ARB_MAX_CREDIT_MASK 0x3F + +/* DMA_NRTR_DBG_W_ARB_MAX */ +#define DMA_NRTR_DBG_W_ARB_MAX_CREDIT_SHIFT 0 +#define DMA_NRTR_DBG_W_ARB_MAX_CREDIT_MASK 0x3F + +/* DMA_NRTR_DBG_N_ARB_MAX */ +#define DMA_NRTR_DBG_N_ARB_MAX_CREDIT_SHIFT 0 +#define DMA_NRTR_DBG_N_ARB_MAX_CREDIT_MASK 0x3F + +/* DMA_NRTR_DBG_S_ARB_MAX */ +#define DMA_NRTR_DBG_S_ARB_MAX_CREDIT_SHIFT 0 +#define DMA_NRTR_DBG_S_ARB_MAX_CREDIT_MASK 0x3F + +/* DMA_NRTR_DBG_L_ARB_MAX */ +#define DMA_NRTR_DBG_L_ARB_MAX_CREDIT_SHIFT 0 +#define DMA_NRTR_DBG_L_ARB_MAX_CREDIT_MASK 0x3F + +/* DMA_NRTR_SPLIT_COEF */ +#define DMA_NRTR_SPLIT_COEF_VAL_SHIFT 0 +#define DMA_NRTR_SPLIT_COEF_VAL_MASK 0xFFFF + +/* DMA_NRTR_SPLIT_CFG */ +#define DMA_NRTR_SPLIT_CFG_FORCE_WAK_ORDER_SHIFT 0 +#define DMA_NRTR_SPLIT_CFG_FORCE_WAK_ORDER_MASK 0x1 +#define DMA_NRTR_SPLIT_CFG_FORCE_STRONG_ORDER_SHIFT 1 +#define DMA_NRTR_SPLIT_CFG_FORCE_STRONG_ORDER_MASK 0x2 +#define DMA_NRTR_SPLIT_CFG_DEFAULT_MESH_SHIFT 2 +#define DMA_NRTR_SPLIT_CFG_DEFAULT_MESH_MASK 0xC +#define DMA_NRTR_SPLIT_CFG_RD_RATE_LIM_EN_SHIFT 4 +#define DMA_NRTR_SPLIT_CFG_RD_RATE_LIM_EN_MASK 0x10 +#define DMA_NRTR_SPLIT_CFG_WR_RATE_LIM_EN_SHIFT 5 +#define DMA_NRTR_SPLIT_CFG_WR_RATE_LIM_EN_MASK 0x20 +#define DMA_NRTR_SPLIT_CFG_B2B_OPT_SHIFT 6 +#define DMA_NRTR_SPLIT_CFG_B2B_OPT_MASK 0x1C0 + +/* DMA_NRTR_SPLIT_RD_SAT */ +#define DMA_NRTR_SPLIT_RD_SAT_VAL_SHIFT 0 +#define DMA_NRTR_SPLIT_RD_SAT_VAL_MASK 0xFFFF + +/* DMA_NRTR_SPLIT_RD_RST_TOKEN */ +#define DMA_NRTR_SPLIT_RD_RST_TOKEN_VAL_SHIFT 0 +#define DMA_NRTR_SPLIT_RD_RST_TOKEN_VAL_MASK 0xFFFF + +/* DMA_NRTR_SPLIT_RD_TIMEOUT */ +#define DMA_NRTR_SPLIT_RD_TIMEOUT_VAL_SHIFT 0 +#define DMA_NRTR_SPLIT_RD_TIMEOUT_VAL_MASK 0xFFFFFFFF + +/* DMA_NRTR_SPLIT_WR_SAT */ +#define DMA_NRTR_SPLIT_WR_SAT_VAL_SHIFT 0 +#define DMA_NRTR_SPLIT_WR_SAT_VAL_MASK 0xFFFF + +/* DMA_NRTR_WPLIT_WR_TST_TOLEN */ +#define DMA_NRTR_WPLIT_WR_TST_TOLEN_VAL_SHIFT 0 +#define DMA_NRTR_WPLIT_WR_TST_TOLEN_VAL_MASK 0xFFFF + +/* DMA_NRTR_SPLIT_WR_TIMEOUT */ +#define DMA_NRTR_SPLIT_WR_TIMEOUT_VAL_SHIFT 0 +#define DMA_NRTR_SPLIT_WR_TIMEOUT_VAL_MASK 0xFFFFFFFF + +/* DMA_NRTR_HBW_RANGE_HIT */ +#define DMA_NRTR_HBW_RANGE_HIT_IND_SHIFT 0 +#define DMA_NRTR_HBW_RANGE_HIT_IND_MASK 0xFF + +/* DMA_NRTR_HBW_RANGE_MASK_L */ +#define DMA_NRTR_HBW_RANGE_MASK_L_VAL_SHIFT 0 +#define DMA_NRTR_HBW_RANGE_MASK_L_VAL_MASK 0xFFFFFFFF + +/* DMA_NRTR_HBW_RANGE_MASK_H */ +#define DMA_NRTR_HBW_RANGE_MASK_H_VAL_SHIFT 0 +#define DMA_NRTR_HBW_RANGE_MASK_H_VAL_MASK 0x3FFFF + +/* DMA_NRTR_HBW_RANGE_BASE_L */ +#define DMA_NRTR_HBW_RANGE_BASE_L_VAL_SHIFT 0 +#define DMA_NRTR_HBW_RANGE_BASE_L_VAL_MASK 0xFFFFFFFF + +/* DMA_NRTR_HBW_RANGE_BASE_H */ +#define DMA_NRTR_HBW_RANGE_BASE_H_VAL_SHIFT 0 +#define DMA_NRTR_HBW_RANGE_BASE_H_VAL_MASK 0x3FFFF + +/* DMA_NRTR_LBW_RANGE_HIT */ +#define DMA_NRTR_LBW_RANGE_HIT_IND_SHIFT 0 +#define DMA_NRTR_LBW_RANGE_HIT_IND_MASK 0xFFFF + +/* DMA_NRTR_LBW_RANGE_MASK */ +#define DMA_NRTR_LBW_RANGE_MASK_VAL_SHIFT 0 +#define DMA_NRTR_LBW_RANGE_MASK_VAL_MASK 0x3FFFFFF + +/* DMA_NRTR_LBW_RANGE_BASE */ +#define DMA_NRTR_LBW_RANGE_BASE_VAL_SHIFT 0 +#define DMA_NRTR_LBW_RANGE_BASE_VAL_MASK 0x3FFFFFF + +/* DMA_NRTR_RGLTR */ +#define DMA_NRTR_RGLTR_WR_EN_SHIFT 0 +#define DMA_NRTR_RGLTR_WR_EN_MASK 0x1 +#define DMA_NRTR_RGLTR_RD_EN_SHIFT 4 +#define DMA_NRTR_RGLTR_RD_EN_MASK 0x10 + +/* DMA_NRTR_RGLTR_WR_RESULT */ +#define DMA_NRTR_RGLTR_WR_RESULT_VAL_SHIFT 0 +#define DMA_NRTR_RGLTR_WR_RESULT_VAL_MASK 0xFF + +/* DMA_NRTR_RGLTR_RD_RESULT */ +#define DMA_NRTR_RGLTR_RD_RESULT_VAL_SHIFT 0 +#define DMA_NRTR_RGLTR_RD_RESULT_VAL_MASK 0xFF + +/* DMA_NRTR_SCRAMB_EN */ +#define DMA_NRTR_SCRAMB_EN_VAL_SHIFT 0 +#define DMA_NRTR_SCRAMB_EN_VAL_MASK 0x1 + +/* DMA_NRTR_NON_LIN_SCRAMB */ +#define DMA_NRTR_NON_LIN_SCRAMB_EN_SHIFT 0 +#define DMA_NRTR_NON_LIN_SCRAMB_EN_MASK 0x1 + +#endif /* ASIC_REG_DMA_NRTR_MASKS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h new file mode 100644 index 000000000000..d8293745a02b --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h @@ -0,0 +1,227 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_NRTR_REGS_H_ +#define ASIC_REG_DMA_NRTR_REGS_H_ + +/* + ***************************************** + * DMA_NRTR (Prototype: IF_NRTR) + ***************************************** + */ + +#define mmDMA_NRTR_HBW_MAX_CRED 0x1C0100 + +#define mmDMA_NRTR_LBW_MAX_CRED 0x1C0120 + +#define mmDMA_NRTR_DBG_E_ARB 0x1C0300 + +#define mmDMA_NRTR_DBG_W_ARB 0x1C0304 + +#define mmDMA_NRTR_DBG_N_ARB 0x1C0308 + +#define mmDMA_NRTR_DBG_S_ARB 0x1C030C + +#define mmDMA_NRTR_DBG_L_ARB 0x1C0310 + +#define mmDMA_NRTR_DBG_E_ARB_MAX 0x1C0320 + +#define mmDMA_NRTR_DBG_W_ARB_MAX 0x1C0324 + +#define mmDMA_NRTR_DBG_N_ARB_MAX 0x1C0328 + +#define mmDMA_NRTR_DBG_S_ARB_MAX 0x1C032C + +#define mmDMA_NRTR_DBG_L_ARB_MAX 0x1C0330 + +#define mmDMA_NRTR_SPLIT_COEF_0 0x1C0400 + +#define mmDMA_NRTR_SPLIT_COEF_1 0x1C0404 + +#define mmDMA_NRTR_SPLIT_COEF_2 0x1C0408 + +#define mmDMA_NRTR_SPLIT_COEF_3 0x1C040C + +#define mmDMA_NRTR_SPLIT_COEF_4 0x1C0410 + +#define mmDMA_NRTR_SPLIT_COEF_5 0x1C0414 + +#define mmDMA_NRTR_SPLIT_COEF_6 0x1C0418 + +#define mmDMA_NRTR_SPLIT_COEF_7 0x1C041C + +#define mmDMA_NRTR_SPLIT_COEF_8 0x1C0420 + +#define mmDMA_NRTR_SPLIT_COEF_9 0x1C0424 + +#define mmDMA_NRTR_SPLIT_CFG 0x1C0440 + +#define mmDMA_NRTR_SPLIT_RD_SAT 0x1C0444 + +#define mmDMA_NRTR_SPLIT_RD_RST_TOKEN 0x1C0448 + +#define mmDMA_NRTR_SPLIT_RD_TIMEOUT_0 0x1C044C + +#define mmDMA_NRTR_SPLIT_RD_TIMEOUT_1 0x1C0450 + +#define mmDMA_NRTR_SPLIT_WR_SAT 0x1C0454 + +#define mmDMA_NRTR_WPLIT_WR_TST_TOLEN 0x1C0458 + +#define mmDMA_NRTR_SPLIT_WR_TIMEOUT_0 0x1C045C + +#define mmDMA_NRTR_SPLIT_WR_TIMEOUT_1 0x1C0460 + +#define mmDMA_NRTR_HBW_RANGE_HIT 0x1C0470 + +#define mmDMA_NRTR_HBW_RANGE_MASK_L_0 0x1C0480 + +#define mmDMA_NRTR_HBW_RANGE_MASK_L_1 0x1C0484 + +#define mmDMA_NRTR_HBW_RANGE_MASK_L_2 0x1C0488 + +#define mmDMA_NRTR_HBW_RANGE_MASK_L_3 0x1C048C + +#define mmDMA_NRTR_HBW_RANGE_MASK_L_4 0x1C0490 + +#define mmDMA_NRTR_HBW_RANGE_MASK_L_5 0x1C0494 + +#define mmDMA_NRTR_HBW_RANGE_MASK_L_6 0x1C0498 + +#define mmDMA_NRTR_HBW_RANGE_MASK_L_7 0x1C049C + +#define mmDMA_NRTR_HBW_RANGE_MASK_H_0 0x1C04A0 + +#define mmDMA_NRTR_HBW_RANGE_MASK_H_1 0x1C04A4 + +#define mmDMA_NRTR_HBW_RANGE_MASK_H_2 0x1C04A8 + +#define mmDMA_NRTR_HBW_RANGE_MASK_H_3 0x1C04AC + +#define mmDMA_NRTR_HBW_RANGE_MASK_H_4 0x1C04B0 + +#define mmDMA_NRTR_HBW_RANGE_MASK_H_5 0x1C04B4 + +#define mmDMA_NRTR_HBW_RANGE_MASK_H_6 0x1C04B8 + +#define mmDMA_NRTR_HBW_RANGE_MASK_H_7 0x1C04BC + +#define mmDMA_NRTR_HBW_RANGE_BASE_L_0 0x1C04C0 + +#define mmDMA_NRTR_HBW_RANGE_BASE_L_1 0x1C04C4 + +#define mmDMA_NRTR_HBW_RANGE_BASE_L_2 0x1C04C8 + +#define mmDMA_NRTR_HBW_RANGE_BASE_L_3 0x1C04CC + +#define mmDMA_NRTR_HBW_RANGE_BASE_L_4 0x1C04D0 + +#define mmDMA_NRTR_HBW_RANGE_BASE_L_5 0x1C04D4 + +#define mmDMA_NRTR_HBW_RANGE_BASE_L_6 0x1C04D8 + +#define mmDMA_NRTR_HBW_RANGE_BASE_L_7 0x1C04DC + +#define mmDMA_NRTR_HBW_RANGE_BASE_H_0 0x1C04E0 + +#define mmDMA_NRTR_HBW_RANGE_BASE_H_1 0x1C04E4 + +#define mmDMA_NRTR_HBW_RANGE_BASE_H_2 0x1C04E8 + +#define mmDMA_NRTR_HBW_RANGE_BASE_H_3 0x1C04EC + +#define mmDMA_NRTR_HBW_RANGE_BASE_H_4 0x1C04F0 + +#define mmDMA_NRTR_HBW_RANGE_BASE_H_5 0x1C04F4 + +#define mmDMA_NRTR_HBW_RANGE_BASE_H_6 0x1C04F8 + +#define mmDMA_NRTR_HBW_RANGE_BASE_H_7 0x1C04FC + +#define mmDMA_NRTR_LBW_RANGE_HIT 0x1C0500 + +#define mmDMA_NRTR_LBW_RANGE_MASK_0 0x1C0510 + +#define mmDMA_NRTR_LBW_RANGE_MASK_1 0x1C0514 + +#define mmDMA_NRTR_LBW_RANGE_MASK_2 0x1C0518 + +#define mmDMA_NRTR_LBW_RANGE_MASK_3 0x1C051C + +#define mmDMA_NRTR_LBW_RANGE_MASK_4 0x1C0520 + +#define mmDMA_NRTR_LBW_RANGE_MASK_5 0x1C0524 + +#define mmDMA_NRTR_LBW_RANGE_MASK_6 0x1C0528 + +#define mmDMA_NRTR_LBW_RANGE_MASK_7 0x1C052C + +#define mmDMA_NRTR_LBW_RANGE_MASK_8 0x1C0530 + +#define mmDMA_NRTR_LBW_RANGE_MASK_9 0x1C0534 + +#define mmDMA_NRTR_LBW_RANGE_MASK_10 0x1C0538 + +#define mmDMA_NRTR_LBW_RANGE_MASK_11 0x1C053C + +#define mmDMA_NRTR_LBW_RANGE_MASK_12 0x1C0540 + +#define mmDMA_NRTR_LBW_RANGE_MASK_13 0x1C0544 + +#define mmDMA_NRTR_LBW_RANGE_MASK_14 0x1C0548 + +#define mmDMA_NRTR_LBW_RANGE_MASK_15 0x1C054C + +#define mmDMA_NRTR_LBW_RANGE_BASE_0 0x1C0550 + +#define mmDMA_NRTR_LBW_RANGE_BASE_1 0x1C0554 + +#define mmDMA_NRTR_LBW_RANGE_BASE_2 0x1C0558 + +#define mmDMA_NRTR_LBW_RANGE_BASE_3 0x1C055C + +#define mmDMA_NRTR_LBW_RANGE_BASE_4 0x1C0560 + +#define mmDMA_NRTR_LBW_RANGE_BASE_5 0x1C0564 + +#define mmDMA_NRTR_LBW_RANGE_BASE_6 0x1C0568 + +#define mmDMA_NRTR_LBW_RANGE_BASE_7 0x1C056C + +#define mmDMA_NRTR_LBW_RANGE_BASE_8 0x1C0570 + +#define mmDMA_NRTR_LBW_RANGE_BASE_9 0x1C0574 + +#define mmDMA_NRTR_LBW_RANGE_BASE_10 0x1C0578 + +#define mmDMA_NRTR_LBW_RANGE_BASE_11 0x1C057C + +#define mmDMA_NRTR_LBW_RANGE_BASE_12 0x1C0580 + +#define mmDMA_NRTR_LBW_RANGE_BASE_13 0x1C0584 + +#define mmDMA_NRTR_LBW_RANGE_BASE_14 0x1C0588 + +#define mmDMA_NRTR_LBW_RANGE_BASE_15 0x1C058C + +#define mmDMA_NRTR_RGLTR 0x1C0590 + +#define mmDMA_NRTR_RGLTR_WR_RESULT 0x1C0594 + +#define mmDMA_NRTR_RGLTR_RD_RESULT 0x1C0598 + +#define mmDMA_NRTR_SCRAMB_EN 0x1C0600 + +#define mmDMA_NRTR_NON_LIN_SCRAMB 0x1C0604 + +#endif /* ASIC_REG_DMA_NRTR_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h new file mode 100644 index 000000000000..10619dbb9b17 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h @@ -0,0 +1,465 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_QM_0_MASKS_H_ +#define ASIC_REG_DMA_QM_0_MASKS_H_ + +/* + ***************************************** + * DMA_QM_0 (Prototype: QMAN) + ***************************************** + */ + +/* DMA_QM_0_GLBL_CFG0 */ +#define DMA_QM_0_GLBL_CFG0_PQF_EN_SHIFT 0 +#define DMA_QM_0_GLBL_CFG0_PQF_EN_MASK 0x1 +#define DMA_QM_0_GLBL_CFG0_CQF_EN_SHIFT 1 +#define DMA_QM_0_GLBL_CFG0_CQF_EN_MASK 0x2 +#define DMA_QM_0_GLBL_CFG0_CP_EN_SHIFT 2 +#define DMA_QM_0_GLBL_CFG0_CP_EN_MASK 0x4 +#define DMA_QM_0_GLBL_CFG0_DMA_EN_SHIFT 3 +#define DMA_QM_0_GLBL_CFG0_DMA_EN_MASK 0x8 + +/* DMA_QM_0_GLBL_CFG1 */ +#define DMA_QM_0_GLBL_CFG1_PQF_STOP_SHIFT 0 +#define DMA_QM_0_GLBL_CFG1_PQF_STOP_MASK 0x1 +#define DMA_QM_0_GLBL_CFG1_CQF_STOP_SHIFT 1 +#define DMA_QM_0_GLBL_CFG1_CQF_STOP_MASK 0x2 +#define DMA_QM_0_GLBL_CFG1_CP_STOP_SHIFT 2 +#define DMA_QM_0_GLBL_CFG1_CP_STOP_MASK 0x4 +#define DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT 3 +#define DMA_QM_0_GLBL_CFG1_DMA_STOP_MASK 0x8 +#define DMA_QM_0_GLBL_CFG1_PQF_FLUSH_SHIFT 8 +#define DMA_QM_0_GLBL_CFG1_PQF_FLUSH_MASK 0x100 +#define DMA_QM_0_GLBL_CFG1_CQF_FLUSH_SHIFT 9 +#define DMA_QM_0_GLBL_CFG1_CQF_FLUSH_MASK 0x200 +#define DMA_QM_0_GLBL_CFG1_CP_FLUSH_SHIFT 10 +#define DMA_QM_0_GLBL_CFG1_CP_FLUSH_MASK 0x400 +#define DMA_QM_0_GLBL_CFG1_DMA_FLUSH_SHIFT 11 +#define DMA_QM_0_GLBL_CFG1_DMA_FLUSH_MASK 0x800 + +/* DMA_QM_0_GLBL_PROT */ +#define DMA_QM_0_GLBL_PROT_PQF_PROT_SHIFT 0 +#define DMA_QM_0_GLBL_PROT_PQF_PROT_MASK 0x1 +#define DMA_QM_0_GLBL_PROT_CQF_PROT_SHIFT 1 +#define DMA_QM_0_GLBL_PROT_CQF_PROT_MASK 0x2 +#define DMA_QM_0_GLBL_PROT_CP_PROT_SHIFT 2 +#define DMA_QM_0_GLBL_PROT_CP_PROT_MASK 0x4 +#define DMA_QM_0_GLBL_PROT_DMA_PROT_SHIFT 3 +#define DMA_QM_0_GLBL_PROT_DMA_PROT_MASK 0x8 +#define DMA_QM_0_GLBL_PROT_PQF_ERR_PROT_SHIFT 4 +#define DMA_QM_0_GLBL_PROT_PQF_ERR_PROT_MASK 0x10 +#define DMA_QM_0_GLBL_PROT_CQF_ERR_PROT_SHIFT 5 +#define DMA_QM_0_GLBL_PROT_CQF_ERR_PROT_MASK 0x20 +#define DMA_QM_0_GLBL_PROT_CP_ERR_PROT_SHIFT 6 +#define DMA_QM_0_GLBL_PROT_CP_ERR_PROT_MASK 0x40 +#define DMA_QM_0_GLBL_PROT_DMA_ERR_PROT_SHIFT 7 +#define DMA_QM_0_GLBL_PROT_DMA_ERR_PROT_MASK 0x80 + +/* DMA_QM_0_GLBL_ERR_CFG */ +#define DMA_QM_0_GLBL_ERR_CFG_PQF_ERR_INT_EN_SHIFT 0 +#define DMA_QM_0_GLBL_ERR_CFG_PQF_ERR_INT_EN_MASK 0x1 +#define DMA_QM_0_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT 1 +#define DMA_QM_0_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK 0x2 +#define DMA_QM_0_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT 2 +#define DMA_QM_0_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK 0x4 +#define DMA_QM_0_GLBL_ERR_CFG_CQF_ERR_INT_EN_SHIFT 3 +#define DMA_QM_0_GLBL_ERR_CFG_CQF_ERR_INT_EN_MASK 0x8 +#define DMA_QM_0_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT 4 +#define DMA_QM_0_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK 0x10 +#define DMA_QM_0_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT 5 +#define DMA_QM_0_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK 0x20 +#define DMA_QM_0_GLBL_ERR_CFG_CP_ERR_INT_EN_SHIFT 6 +#define DMA_QM_0_GLBL_ERR_CFG_CP_ERR_INT_EN_MASK 0x40 +#define DMA_QM_0_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT 7 +#define DMA_QM_0_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK 0x80 +#define DMA_QM_0_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT 8 +#define DMA_QM_0_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK 0x100 +#define DMA_QM_0_GLBL_ERR_CFG_DMA_ERR_INT_EN_SHIFT 9 +#define DMA_QM_0_GLBL_ERR_CFG_DMA_ERR_INT_EN_MASK 0x200 +#define DMA_QM_0_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT 10 +#define DMA_QM_0_GLBL_ERR_CFG_DMA_ERR_MSG_EN_MASK 0x400 +#define DMA_QM_0_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT 11 +#define DMA_QM_0_GLBL_ERR_CFG_DMA_STOP_ON_ERR_MASK 0x800 + +/* DMA_QM_0_GLBL_ERR_ADDR_LO */ +#define DMA_QM_0_GLBL_ERR_ADDR_LO_VAL_SHIFT 0 +#define DMA_QM_0_GLBL_ERR_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_GLBL_ERR_ADDR_HI */ +#define DMA_QM_0_GLBL_ERR_ADDR_HI_VAL_SHIFT 0 +#define DMA_QM_0_GLBL_ERR_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_GLBL_ERR_WDATA */ +#define DMA_QM_0_GLBL_ERR_WDATA_VAL_SHIFT 0 +#define DMA_QM_0_GLBL_ERR_WDATA_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_GLBL_SECURE_PROPS */ +#define DMA_QM_0_GLBL_SECURE_PROPS_ASID_SHIFT 0 +#define DMA_QM_0_GLBL_SECURE_PROPS_ASID_MASK 0x3FF +#define DMA_QM_0_GLBL_SECURE_PROPS_MMBP_SHIFT 10 +#define DMA_QM_0_GLBL_SECURE_PROPS_MMBP_MASK 0x400 + +/* DMA_QM_0_GLBL_NON_SECURE_PROPS */ +#define DMA_QM_0_GLBL_NON_SECURE_PROPS_ASID_SHIFT 0 +#define DMA_QM_0_GLBL_NON_SECURE_PROPS_ASID_MASK 0x3FF +#define DMA_QM_0_GLBL_NON_SECURE_PROPS_MMBP_SHIFT 10 +#define DMA_QM_0_GLBL_NON_SECURE_PROPS_MMBP_MASK 0x400 + +/* DMA_QM_0_GLBL_STS0 */ +#define DMA_QM_0_GLBL_STS0_PQF_IDLE_SHIFT 0 +#define DMA_QM_0_GLBL_STS0_PQF_IDLE_MASK 0x1 +#define DMA_QM_0_GLBL_STS0_CQF_IDLE_SHIFT 1 +#define DMA_QM_0_GLBL_STS0_CQF_IDLE_MASK 0x2 +#define DMA_QM_0_GLBL_STS0_CP_IDLE_SHIFT 2 +#define DMA_QM_0_GLBL_STS0_CP_IDLE_MASK 0x4 +#define DMA_QM_0_GLBL_STS0_DMA_IDLE_SHIFT 3 +#define DMA_QM_0_GLBL_STS0_DMA_IDLE_MASK 0x8 +#define DMA_QM_0_GLBL_STS0_PQF_IS_STOP_SHIFT 4 +#define DMA_QM_0_GLBL_STS0_PQF_IS_STOP_MASK 0x10 +#define DMA_QM_0_GLBL_STS0_CQF_IS_STOP_SHIFT 5 +#define DMA_QM_0_GLBL_STS0_CQF_IS_STOP_MASK 0x20 +#define DMA_QM_0_GLBL_STS0_CP_IS_STOP_SHIFT 6 +#define DMA_QM_0_GLBL_STS0_CP_IS_STOP_MASK 0x40 +#define DMA_QM_0_GLBL_STS0_DMA_IS_STOP_SHIFT 7 +#define DMA_QM_0_GLBL_STS0_DMA_IS_STOP_MASK 0x80 + +/* DMA_QM_0_GLBL_STS1 */ +#define DMA_QM_0_GLBL_STS1_PQF_RD_ERR_SHIFT 0 +#define DMA_QM_0_GLBL_STS1_PQF_RD_ERR_MASK 0x1 +#define DMA_QM_0_GLBL_STS1_CQF_RD_ERR_SHIFT 1 +#define DMA_QM_0_GLBL_STS1_CQF_RD_ERR_MASK 0x2 +#define DMA_QM_0_GLBL_STS1_CP_RD_ERR_SHIFT 2 +#define DMA_QM_0_GLBL_STS1_CP_RD_ERR_MASK 0x4 +#define DMA_QM_0_GLBL_STS1_CP_UNDEF_CMD_ERR_SHIFT 3 +#define DMA_QM_0_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK 0x8 +#define DMA_QM_0_GLBL_STS1_CP_STOP_OP_SHIFT 4 +#define DMA_QM_0_GLBL_STS1_CP_STOP_OP_MASK 0x10 +#define DMA_QM_0_GLBL_STS1_CP_MSG_WR_ERR_SHIFT 5 +#define DMA_QM_0_GLBL_STS1_CP_MSG_WR_ERR_MASK 0x20 +#define DMA_QM_0_GLBL_STS1_DMA_RD_ERR_SHIFT 8 +#define DMA_QM_0_GLBL_STS1_DMA_RD_ERR_MASK 0x100 +#define DMA_QM_0_GLBL_STS1_DMA_WR_ERR_SHIFT 9 +#define DMA_QM_0_GLBL_STS1_DMA_WR_ERR_MASK 0x200 +#define DMA_QM_0_GLBL_STS1_DMA_RD_MSG_ERR_SHIFT 10 +#define DMA_QM_0_GLBL_STS1_DMA_RD_MSG_ERR_MASK 0x400 +#define DMA_QM_0_GLBL_STS1_DMA_WR_MSG_ERR_SHIFT 11 +#define DMA_QM_0_GLBL_STS1_DMA_WR_MSG_ERR_MASK 0x800 + +/* DMA_QM_0_PQ_BASE_LO */ +#define DMA_QM_0_PQ_BASE_LO_VAL_SHIFT 0 +#define DMA_QM_0_PQ_BASE_LO_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_PQ_BASE_HI */ +#define DMA_QM_0_PQ_BASE_HI_VAL_SHIFT 0 +#define DMA_QM_0_PQ_BASE_HI_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_PQ_SIZE */ +#define DMA_QM_0_PQ_SIZE_VAL_SHIFT 0 +#define DMA_QM_0_PQ_SIZE_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_PQ_PI */ +#define DMA_QM_0_PQ_PI_VAL_SHIFT 0 +#define DMA_QM_0_PQ_PI_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_PQ_CI */ +#define DMA_QM_0_PQ_CI_VAL_SHIFT 0 +#define DMA_QM_0_PQ_CI_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_PQ_CFG0 */ +#define DMA_QM_0_PQ_CFG0_RESERVED_SHIFT 0 +#define DMA_QM_0_PQ_CFG0_RESERVED_MASK 0x1 + +/* DMA_QM_0_PQ_CFG1 */ +#define DMA_QM_0_PQ_CFG1_CREDIT_LIM_SHIFT 0 +#define DMA_QM_0_PQ_CFG1_CREDIT_LIM_MASK 0xFFFF +#define DMA_QM_0_PQ_CFG1_MAX_INFLIGHT_SHIFT 16 +#define DMA_QM_0_PQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000 + +/* DMA_QM_0_PQ_ARUSER */ +#define DMA_QM_0_PQ_ARUSER_NOSNOOP_SHIFT 0 +#define DMA_QM_0_PQ_ARUSER_NOSNOOP_MASK 0x1 +#define DMA_QM_0_PQ_ARUSER_WORD_SHIFT 1 +#define DMA_QM_0_PQ_ARUSER_WORD_MASK 0x2 + +/* DMA_QM_0_PQ_PUSH0 */ +#define DMA_QM_0_PQ_PUSH0_PTR_LO_SHIFT 0 +#define DMA_QM_0_PQ_PUSH0_PTR_LO_MASK 0xFFFFFFFF + +/* DMA_QM_0_PQ_PUSH1 */ +#define DMA_QM_0_PQ_PUSH1_PTR_HI_SHIFT 0 +#define DMA_QM_0_PQ_PUSH1_PTR_HI_MASK 0xFFFFFFFF + +/* DMA_QM_0_PQ_PUSH2 */ +#define DMA_QM_0_PQ_PUSH2_TSIZE_SHIFT 0 +#define DMA_QM_0_PQ_PUSH2_TSIZE_MASK 0xFFFFFFFF + +/* DMA_QM_0_PQ_PUSH3 */ +#define DMA_QM_0_PQ_PUSH3_RPT_SHIFT 0 +#define DMA_QM_0_PQ_PUSH3_RPT_MASK 0xFFFF +#define DMA_QM_0_PQ_PUSH3_CTL_SHIFT 16 +#define DMA_QM_0_PQ_PUSH3_CTL_MASK 0xFFFF0000 + +/* DMA_QM_0_PQ_STS0 */ +#define DMA_QM_0_PQ_STS0_PQ_CREDIT_CNT_SHIFT 0 +#define DMA_QM_0_PQ_STS0_PQ_CREDIT_CNT_MASK 0xFFFF +#define DMA_QM_0_PQ_STS0_PQ_FREE_CNT_SHIFT 16 +#define DMA_QM_0_PQ_STS0_PQ_FREE_CNT_MASK 0xFFFF0000 + +/* DMA_QM_0_PQ_STS1 */ +#define DMA_QM_0_PQ_STS1_PQ_INFLIGHT_CNT_SHIFT 0 +#define DMA_QM_0_PQ_STS1_PQ_INFLIGHT_CNT_MASK 0xFFFF +#define DMA_QM_0_PQ_STS1_PQ_BUF_EMPTY_SHIFT 30 +#define DMA_QM_0_PQ_STS1_PQ_BUF_EMPTY_MASK 0x40000000 +#define DMA_QM_0_PQ_STS1_PQ_BUSY_SHIFT 31 +#define DMA_QM_0_PQ_STS1_PQ_BUSY_MASK 0x80000000 + +/* DMA_QM_0_PQ_RD_RATE_LIM_EN */ +#define DMA_QM_0_PQ_RD_RATE_LIM_EN_VAL_SHIFT 0 +#define DMA_QM_0_PQ_RD_RATE_LIM_EN_VAL_MASK 0x1 + +/* DMA_QM_0_PQ_RD_RATE_LIM_RST_TOKEN */ +#define DMA_QM_0_PQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0 +#define DMA_QM_0_PQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF + +/* DMA_QM_0_PQ_RD_RATE_LIM_SAT */ +#define DMA_QM_0_PQ_RD_RATE_LIM_SAT_VAL_SHIFT 0 +#define DMA_QM_0_PQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF + +/* DMA_QM_0_PQ_RD_RATE_LIM_TOUT */ +#define DMA_QM_0_PQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0 +#define DMA_QM_0_PQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF + +/* DMA_QM_0_CQ_CFG0 */ +#define DMA_QM_0_CQ_CFG0_RESERVED_SHIFT 0 +#define DMA_QM_0_CQ_CFG0_RESERVED_MASK 0x1 + +/* DMA_QM_0_CQ_CFG1 */ +#define DMA_QM_0_CQ_CFG1_CREDIT_LIM_SHIFT 0 +#define DMA_QM_0_CQ_CFG1_CREDIT_LIM_MASK 0xFFFF +#define DMA_QM_0_CQ_CFG1_MAX_INFLIGHT_SHIFT 16 +#define DMA_QM_0_CQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000 + +/* DMA_QM_0_CQ_ARUSER */ +#define DMA_QM_0_CQ_ARUSER_NOSNOOP_SHIFT 0 +#define DMA_QM_0_CQ_ARUSER_NOSNOOP_MASK 0x1 +#define DMA_QM_0_CQ_ARUSER_WORD_SHIFT 1 +#define DMA_QM_0_CQ_ARUSER_WORD_MASK 0x2 + +/* DMA_QM_0_CQ_PTR_LO */ +#define DMA_QM_0_CQ_PTR_LO_VAL_SHIFT 0 +#define DMA_QM_0_CQ_PTR_LO_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CQ_PTR_HI */ +#define DMA_QM_0_CQ_PTR_HI_VAL_SHIFT 0 +#define DMA_QM_0_CQ_PTR_HI_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CQ_TSIZE */ +#define DMA_QM_0_CQ_TSIZE_VAL_SHIFT 0 +#define DMA_QM_0_CQ_TSIZE_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CQ_CTL */ +#define DMA_QM_0_CQ_CTL_RPT_SHIFT 0 +#define DMA_QM_0_CQ_CTL_RPT_MASK 0xFFFF +#define DMA_QM_0_CQ_CTL_CTL_SHIFT 16 +#define DMA_QM_0_CQ_CTL_CTL_MASK 0xFFFF0000 + +/* DMA_QM_0_CQ_PTR_LO_STS */ +#define DMA_QM_0_CQ_PTR_LO_STS_VAL_SHIFT 0 +#define DMA_QM_0_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CQ_PTR_HI_STS */ +#define DMA_QM_0_CQ_PTR_HI_STS_VAL_SHIFT 0 +#define DMA_QM_0_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CQ_TSIZE_STS */ +#define DMA_QM_0_CQ_TSIZE_STS_VAL_SHIFT 0 +#define DMA_QM_0_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CQ_CTL_STS */ +#define DMA_QM_0_CQ_CTL_STS_RPT_SHIFT 0 +#define DMA_QM_0_CQ_CTL_STS_RPT_MASK 0xFFFF +#define DMA_QM_0_CQ_CTL_STS_CTL_SHIFT 16 +#define DMA_QM_0_CQ_CTL_STS_CTL_MASK 0xFFFF0000 + +/* DMA_QM_0_CQ_STS0 */ +#define DMA_QM_0_CQ_STS0_CQ_CREDIT_CNT_SHIFT 0 +#define DMA_QM_0_CQ_STS0_CQ_CREDIT_CNT_MASK 0xFFFF +#define DMA_QM_0_CQ_STS0_CQ_FREE_CNT_SHIFT 16 +#define DMA_QM_0_CQ_STS0_CQ_FREE_CNT_MASK 0xFFFF0000 + +/* DMA_QM_0_CQ_STS1 */ +#define DMA_QM_0_CQ_STS1_CQ_INFLIGHT_CNT_SHIFT 0 +#define DMA_QM_0_CQ_STS1_CQ_INFLIGHT_CNT_MASK 0xFFFF +#define DMA_QM_0_CQ_STS1_CQ_BUF_EMPTY_SHIFT 30 +#define DMA_QM_0_CQ_STS1_CQ_BUF_EMPTY_MASK 0x40000000 +#define DMA_QM_0_CQ_STS1_CQ_BUSY_SHIFT 31 +#define DMA_QM_0_CQ_STS1_CQ_BUSY_MASK 0x80000000 + +/* DMA_QM_0_CQ_RD_RATE_LIM_EN */ +#define DMA_QM_0_CQ_RD_RATE_LIM_EN_VAL_SHIFT 0 +#define DMA_QM_0_CQ_RD_RATE_LIM_EN_VAL_MASK 0x1 + +/* DMA_QM_0_CQ_RD_RATE_LIM_RST_TOKEN */ +#define DMA_QM_0_CQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0 +#define DMA_QM_0_CQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF + +/* DMA_QM_0_CQ_RD_RATE_LIM_SAT */ +#define DMA_QM_0_CQ_RD_RATE_LIM_SAT_VAL_SHIFT 0 +#define DMA_QM_0_CQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF + +/* DMA_QM_0_CQ_RD_RATE_LIM_TOUT */ +#define DMA_QM_0_CQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0 +#define DMA_QM_0_CQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF + +/* DMA_QM_0_CQ_IFIFO_CNT */ +#define DMA_QM_0_CQ_IFIFO_CNT_VAL_SHIFT 0 +#define DMA_QM_0_CQ_IFIFO_CNT_VAL_MASK 0x3 + +/* DMA_QM_0_CP_MSG_BASE0_ADDR_LO */ +#define DMA_QM_0_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT 0 +#define DMA_QM_0_CP_MSG_BASE0_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CP_MSG_BASE0_ADDR_HI */ +#define DMA_QM_0_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT 0 +#define DMA_QM_0_CP_MSG_BASE0_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CP_MSG_BASE1_ADDR_LO */ +#define DMA_QM_0_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT 0 +#define DMA_QM_0_CP_MSG_BASE1_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CP_MSG_BASE1_ADDR_HI */ +#define DMA_QM_0_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT 0 +#define DMA_QM_0_CP_MSG_BASE1_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CP_MSG_BASE2_ADDR_LO */ +#define DMA_QM_0_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT 0 +#define DMA_QM_0_CP_MSG_BASE2_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CP_MSG_BASE2_ADDR_HI */ +#define DMA_QM_0_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT 0 +#define DMA_QM_0_CP_MSG_BASE2_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CP_MSG_BASE3_ADDR_LO */ +#define DMA_QM_0_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT 0 +#define DMA_QM_0_CP_MSG_BASE3_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CP_MSG_BASE3_ADDR_HI */ +#define DMA_QM_0_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT 0 +#define DMA_QM_0_CP_MSG_BASE3_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CP_LDMA_TSIZE_OFFSET */ +#define DMA_QM_0_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT 0 +#define DMA_QM_0_CP_LDMA_TSIZE_OFFSET_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CP_LDMA_SRC_BASE_LO_OFFSET */ +#define DMA_QM_0_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT 0 +#define DMA_QM_0_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CP_LDMA_SRC_BASE_HI_OFFSET */ +#define DMA_QM_0_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_SHIFT 0 +#define DMA_QM_0_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CP_LDMA_DST_BASE_LO_OFFSET */ +#define DMA_QM_0_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT 0 +#define DMA_QM_0_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CP_LDMA_DST_BASE_HI_OFFSET */ +#define DMA_QM_0_CP_LDMA_DST_BASE_HI_OFFSET_VAL_SHIFT 0 +#define DMA_QM_0_CP_LDMA_DST_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CP_LDMA_COMMIT_OFFSET */ +#define DMA_QM_0_CP_LDMA_COMMIT_OFFSET_VAL_SHIFT 0 +#define DMA_QM_0_CP_LDMA_COMMIT_OFFSET_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CP_FENCE0_RDATA */ +#define DMA_QM_0_CP_FENCE0_RDATA_INC_VAL_SHIFT 0 +#define DMA_QM_0_CP_FENCE0_RDATA_INC_VAL_MASK 0xF + +/* DMA_QM_0_CP_FENCE1_RDATA */ +#define DMA_QM_0_CP_FENCE1_RDATA_INC_VAL_SHIFT 0 +#define DMA_QM_0_CP_FENCE1_RDATA_INC_VAL_MASK 0xF + +/* DMA_QM_0_CP_FENCE2_RDATA */ +#define DMA_QM_0_CP_FENCE2_RDATA_INC_VAL_SHIFT 0 +#define DMA_QM_0_CP_FENCE2_RDATA_INC_VAL_MASK 0xF + +/* DMA_QM_0_CP_FENCE3_RDATA */ +#define DMA_QM_0_CP_FENCE3_RDATA_INC_VAL_SHIFT 0 +#define DMA_QM_0_CP_FENCE3_RDATA_INC_VAL_MASK 0xF + +/* DMA_QM_0_CP_FENCE0_CNT */ +#define DMA_QM_0_CP_FENCE0_CNT_VAL_SHIFT 0 +#define DMA_QM_0_CP_FENCE0_CNT_VAL_MASK 0xFF + +/* DMA_QM_0_CP_FENCE1_CNT */ +#define DMA_QM_0_CP_FENCE1_CNT_VAL_SHIFT 0 +#define DMA_QM_0_CP_FENCE1_CNT_VAL_MASK 0xFF + +/* DMA_QM_0_CP_FENCE2_CNT */ +#define DMA_QM_0_CP_FENCE2_CNT_VAL_SHIFT 0 +#define DMA_QM_0_CP_FENCE2_CNT_VAL_MASK 0xFF + +/* DMA_QM_0_CP_FENCE3_CNT */ +#define DMA_QM_0_CP_FENCE3_CNT_VAL_SHIFT 0 +#define DMA_QM_0_CP_FENCE3_CNT_VAL_MASK 0xFF + +/* DMA_QM_0_CP_STS */ +#define DMA_QM_0_CP_STS_MSG_INFLIGHT_CNT_SHIFT 0 +#define DMA_QM_0_CP_STS_MSG_INFLIGHT_CNT_MASK 0xFFFF +#define DMA_QM_0_CP_STS_ERDY_SHIFT 16 +#define DMA_QM_0_CP_STS_ERDY_MASK 0x10000 +#define DMA_QM_0_CP_STS_RRDY_SHIFT 17 +#define DMA_QM_0_CP_STS_RRDY_MASK 0x20000 +#define DMA_QM_0_CP_STS_MRDY_SHIFT 18 +#define DMA_QM_0_CP_STS_MRDY_MASK 0x40000 +#define DMA_QM_0_CP_STS_SW_STOP_SHIFT 19 +#define DMA_QM_0_CP_STS_SW_STOP_MASK 0x80000 +#define DMA_QM_0_CP_STS_FENCE_ID_SHIFT 20 +#define DMA_QM_0_CP_STS_FENCE_ID_MASK 0x300000 +#define DMA_QM_0_CP_STS_FENCE_IN_PROGRESS_SHIFT 22 +#define DMA_QM_0_CP_STS_FENCE_IN_PROGRESS_MASK 0x400000 + +/* DMA_QM_0_CP_CURRENT_INST_LO */ +#define DMA_QM_0_CP_CURRENT_INST_LO_VAL_SHIFT 0 +#define DMA_QM_0_CP_CURRENT_INST_LO_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CP_CURRENT_INST_HI */ +#define DMA_QM_0_CP_CURRENT_INST_HI_VAL_SHIFT 0 +#define DMA_QM_0_CP_CURRENT_INST_HI_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CP_BARRIER_CFG */ +#define DMA_QM_0_CP_BARRIER_CFG_EBGUARD_SHIFT 0 +#define DMA_QM_0_CP_BARRIER_CFG_EBGUARD_MASK 0xFFF + +/* DMA_QM_0_CP_DBG_0 */ +#define DMA_QM_0_CP_DBG_0_VAL_SHIFT 0 +#define DMA_QM_0_CP_DBG_0_VAL_MASK 0xFF + +/* DMA_QM_0_PQ_BUF_ADDR */ +#define DMA_QM_0_PQ_BUF_ADDR_VAL_SHIFT 0 +#define DMA_QM_0_PQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_PQ_BUF_RDATA */ +#define DMA_QM_0_PQ_BUF_RDATA_VAL_SHIFT 0 +#define DMA_QM_0_PQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CQ_BUF_ADDR */ +#define DMA_QM_0_CQ_BUF_ADDR_VAL_SHIFT 0 +#define DMA_QM_0_CQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF + +/* DMA_QM_0_CQ_BUF_RDATA */ +#define DMA_QM_0_CQ_BUF_RDATA_VAL_SHIFT 0 +#define DMA_QM_0_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF + +#endif /* ASIC_REG_DMA_QM_0_MASKS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h new file mode 100644 index 000000000000..c693bc5dcb22 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_QM_0_REGS_H_ +#define ASIC_REG_DMA_QM_0_REGS_H_ + +/* + ***************************************** + * DMA_QM_0 (Prototype: QMAN) + ***************************************** + */ + +#define mmDMA_QM_0_GLBL_CFG0 0x400000 + +#define mmDMA_QM_0_GLBL_CFG1 0x400004 + +#define mmDMA_QM_0_GLBL_PROT 0x400008 + +#define mmDMA_QM_0_GLBL_ERR_CFG 0x40000C + +#define mmDMA_QM_0_GLBL_ERR_ADDR_LO 0x400010 + +#define mmDMA_QM_0_GLBL_ERR_ADDR_HI 0x400014 + +#define mmDMA_QM_0_GLBL_ERR_WDATA 0x400018 + +#define mmDMA_QM_0_GLBL_SECURE_PROPS 0x40001C + +#define mmDMA_QM_0_GLBL_NON_SECURE_PROPS 0x400020 + +#define mmDMA_QM_0_GLBL_STS0 0x400024 + +#define mmDMA_QM_0_GLBL_STS1 0x400028 + +#define mmDMA_QM_0_PQ_BASE_LO 0x400060 + +#define mmDMA_QM_0_PQ_BASE_HI 0x400064 + +#define mmDMA_QM_0_PQ_SIZE 0x400068 + +#define mmDMA_QM_0_PQ_PI 0x40006C + +#define mmDMA_QM_0_PQ_CI 0x400070 + +#define mmDMA_QM_0_PQ_CFG0 0x400074 + +#define mmDMA_QM_0_PQ_CFG1 0x400078 + +#define mmDMA_QM_0_PQ_ARUSER 0x40007C + +#define mmDMA_QM_0_PQ_PUSH0 0x400080 + +#define mmDMA_QM_0_PQ_PUSH1 0x400084 + +#define mmDMA_QM_0_PQ_PUSH2 0x400088 + +#define mmDMA_QM_0_PQ_PUSH3 0x40008C + +#define mmDMA_QM_0_PQ_STS0 0x400090 + +#define mmDMA_QM_0_PQ_STS1 0x400094 + +#define mmDMA_QM_0_PQ_RD_RATE_LIM_EN 0x4000A0 + +#define mmDMA_QM_0_PQ_RD_RATE_LIM_RST_TOKEN 0x4000A4 + +#define mmDMA_QM_0_PQ_RD_RATE_LIM_SAT 0x4000A8 + +#define mmDMA_QM_0_PQ_RD_RATE_LIM_TOUT 0x4000AC + +#define mmDMA_QM_0_CQ_CFG0 0x4000B0 + +#define mmDMA_QM_0_CQ_CFG1 0x4000B4 + +#define mmDMA_QM_0_CQ_ARUSER 0x4000B8 + +#define mmDMA_QM_0_CQ_PTR_LO 0x4000C0 + +#define mmDMA_QM_0_CQ_PTR_HI 0x4000C4 + +#define mmDMA_QM_0_CQ_TSIZE 0x4000C8 + +#define mmDMA_QM_0_CQ_CTL 0x4000CC + +#define mmDMA_QM_0_CQ_PTR_LO_STS 0x4000D4 + +#define mmDMA_QM_0_CQ_PTR_HI_STS 0x4000D8 + +#define mmDMA_QM_0_CQ_TSIZE_STS 0x4000DC + +#define mmDMA_QM_0_CQ_CTL_STS 0x4000E0 + +#define mmDMA_QM_0_CQ_STS0 0x4000E4 + +#define mmDMA_QM_0_CQ_STS1 0x4000E8 + +#define mmDMA_QM_0_CQ_RD_RATE_LIM_EN 0x4000F0 + +#define mmDMA_QM_0_CQ_RD_RATE_LIM_RST_TOKEN 0x4000F4 + +#define mmDMA_QM_0_CQ_RD_RATE_LIM_SAT 0x4000F8 + +#define mmDMA_QM_0_CQ_RD_RATE_LIM_TOUT 0x4000FC + +#define mmDMA_QM_0_CQ_IFIFO_CNT 0x400108 + +#define mmDMA_QM_0_CP_MSG_BASE0_ADDR_LO 0x400120 + +#define mmDMA_QM_0_CP_MSG_BASE0_ADDR_HI 0x400124 + +#define mmDMA_QM_0_CP_MSG_BASE1_ADDR_LO 0x400128 + +#define mmDMA_QM_0_CP_MSG_BASE1_ADDR_HI 0x40012C + +#define mmDMA_QM_0_CP_MSG_BASE2_ADDR_LO 0x400130 + +#define mmDMA_QM_0_CP_MSG_BASE2_ADDR_HI 0x400134 + +#define mmDMA_QM_0_CP_MSG_BASE3_ADDR_LO 0x400138 + +#define mmDMA_QM_0_CP_MSG_BASE3_ADDR_HI 0x40013C + +#define mmDMA_QM_0_CP_LDMA_TSIZE_OFFSET 0x400140 + +#define mmDMA_QM_0_CP_LDMA_SRC_BASE_LO_OFFSET 0x400144 + +#define mmDMA_QM_0_CP_LDMA_SRC_BASE_HI_OFFSET 0x400148 + +#define mmDMA_QM_0_CP_LDMA_DST_BASE_LO_OFFSET 0x40014C + +#define mmDMA_QM_0_CP_LDMA_DST_BASE_HI_OFFSET 0x400150 + +#define mmDMA_QM_0_CP_LDMA_COMMIT_OFFSET 0x400154 + +#define mmDMA_QM_0_CP_FENCE0_RDATA 0x400158 + +#define mmDMA_QM_0_CP_FENCE1_RDATA 0x40015C + +#define mmDMA_QM_0_CP_FENCE2_RDATA 0x400160 + +#define mmDMA_QM_0_CP_FENCE3_RDATA 0x400164 + +#define mmDMA_QM_0_CP_FENCE0_CNT 0x400168 + +#define mmDMA_QM_0_CP_FENCE1_CNT 0x40016C + +#define mmDMA_QM_0_CP_FENCE2_CNT 0x400170 + +#define mmDMA_QM_0_CP_FENCE3_CNT 0x400174 + +#define mmDMA_QM_0_CP_STS 0x400178 + +#define mmDMA_QM_0_CP_CURRENT_INST_LO 0x40017C + +#define mmDMA_QM_0_CP_CURRENT_INST_HI 0x400180 + +#define mmDMA_QM_0_CP_BARRIER_CFG 0x400184 + +#define mmDMA_QM_0_CP_DBG_0 0x400188 + +#define mmDMA_QM_0_PQ_BUF_ADDR 0x400300 + +#define mmDMA_QM_0_PQ_BUF_RDATA 0x400304 + +#define mmDMA_QM_0_CQ_BUF_ADDR 0x400308 + +#define mmDMA_QM_0_CQ_BUF_RDATA 0x40030C + +#endif /* ASIC_REG_DMA_QM_0_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h new file mode 100644 index 000000000000..da928390f89c --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_QM_1_REGS_H_ +#define ASIC_REG_DMA_QM_1_REGS_H_ + +/* + ***************************************** + * DMA_QM_1 (Prototype: QMAN) + ***************************************** + */ + +#define mmDMA_QM_1_GLBL_CFG0 0x408000 + +#define mmDMA_QM_1_GLBL_CFG1 0x408004 + +#define mmDMA_QM_1_GLBL_PROT 0x408008 + +#define mmDMA_QM_1_GLBL_ERR_CFG 0x40800C + +#define mmDMA_QM_1_GLBL_ERR_ADDR_LO 0x408010 + +#define mmDMA_QM_1_GLBL_ERR_ADDR_HI 0x408014 + +#define mmDMA_QM_1_GLBL_ERR_WDATA 0x408018 + +#define mmDMA_QM_1_GLBL_SECURE_PROPS 0x40801C + +#define mmDMA_QM_1_GLBL_NON_SECURE_PROPS 0x408020 + +#define mmDMA_QM_1_GLBL_STS0 0x408024 + +#define mmDMA_QM_1_GLBL_STS1 0x408028 + +#define mmDMA_QM_1_PQ_BASE_LO 0x408060 + +#define mmDMA_QM_1_PQ_BASE_HI 0x408064 + +#define mmDMA_QM_1_PQ_SIZE 0x408068 + +#define mmDMA_QM_1_PQ_PI 0x40806C + +#define mmDMA_QM_1_PQ_CI 0x408070 + +#define mmDMA_QM_1_PQ_CFG0 0x408074 + +#define mmDMA_QM_1_PQ_CFG1 0x408078 + +#define mmDMA_QM_1_PQ_ARUSER 0x40807C + +#define mmDMA_QM_1_PQ_PUSH0 0x408080 + +#define mmDMA_QM_1_PQ_PUSH1 0x408084 + +#define mmDMA_QM_1_PQ_PUSH2 0x408088 + +#define mmDMA_QM_1_PQ_PUSH3 0x40808C + +#define mmDMA_QM_1_PQ_STS0 0x408090 + +#define mmDMA_QM_1_PQ_STS1 0x408094 + +#define mmDMA_QM_1_PQ_RD_RATE_LIM_EN 0x4080A0 + +#define mmDMA_QM_1_PQ_RD_RATE_LIM_RST_TOKEN 0x4080A4 + +#define mmDMA_QM_1_PQ_RD_RATE_LIM_SAT 0x4080A8 + +#define mmDMA_QM_1_PQ_RD_RATE_LIM_TOUT 0x4080AC + +#define mmDMA_QM_1_CQ_CFG0 0x4080B0 + +#define mmDMA_QM_1_CQ_CFG1 0x4080B4 + +#define mmDMA_QM_1_CQ_ARUSER 0x4080B8 + +#define mmDMA_QM_1_CQ_PTR_LO 0x4080C0 + +#define mmDMA_QM_1_CQ_PTR_HI 0x4080C4 + +#define mmDMA_QM_1_CQ_TSIZE 0x4080C8 + +#define mmDMA_QM_1_CQ_CTL 0x4080CC + +#define mmDMA_QM_1_CQ_PTR_LO_STS 0x4080D4 + +#define mmDMA_QM_1_CQ_PTR_HI_STS 0x4080D8 + +#define mmDMA_QM_1_CQ_TSIZE_STS 0x4080DC + +#define mmDMA_QM_1_CQ_CTL_STS 0x4080E0 + +#define mmDMA_QM_1_CQ_STS0 0x4080E4 + +#define mmDMA_QM_1_CQ_STS1 0x4080E8 + +#define mmDMA_QM_1_CQ_RD_RATE_LIM_EN 0x4080F0 + +#define mmDMA_QM_1_CQ_RD_RATE_LIM_RST_TOKEN 0x4080F4 + +#define mmDMA_QM_1_CQ_RD_RATE_LIM_SAT 0x4080F8 + +#define mmDMA_QM_1_CQ_RD_RATE_LIM_TOUT 0x4080FC + +#define mmDMA_QM_1_CQ_IFIFO_CNT 0x408108 + +#define mmDMA_QM_1_CP_MSG_BASE0_ADDR_LO 0x408120 + +#define mmDMA_QM_1_CP_MSG_BASE0_ADDR_HI 0x408124 + +#define mmDMA_QM_1_CP_MSG_BASE1_ADDR_LO 0x408128 + +#define mmDMA_QM_1_CP_MSG_BASE1_ADDR_HI 0x40812C + +#define mmDMA_QM_1_CP_MSG_BASE2_ADDR_LO 0x408130 + +#define mmDMA_QM_1_CP_MSG_BASE2_ADDR_HI 0x408134 + +#define mmDMA_QM_1_CP_MSG_BASE3_ADDR_LO 0x408138 + +#define mmDMA_QM_1_CP_MSG_BASE3_ADDR_HI 0x40813C + +#define mmDMA_QM_1_CP_LDMA_TSIZE_OFFSET 0x408140 + +#define mmDMA_QM_1_CP_LDMA_SRC_BASE_LO_OFFSET 0x408144 + +#define mmDMA_QM_1_CP_LDMA_SRC_BASE_HI_OFFSET 0x408148 + +#define mmDMA_QM_1_CP_LDMA_DST_BASE_LO_OFFSET 0x40814C + +#define mmDMA_QM_1_CP_LDMA_DST_BASE_HI_OFFSET 0x408150 + +#define mmDMA_QM_1_CP_LDMA_COMMIT_OFFSET 0x408154 + +#define mmDMA_QM_1_CP_FENCE0_RDATA 0x408158 + +#define mmDMA_QM_1_CP_FENCE1_RDATA 0x40815C + +#define mmDMA_QM_1_CP_FENCE2_RDATA 0x408160 + +#define mmDMA_QM_1_CP_FENCE3_RDATA 0x408164 + +#define mmDMA_QM_1_CP_FENCE0_CNT 0x408168 + +#define mmDMA_QM_1_CP_FENCE1_CNT 0x40816C + +#define mmDMA_QM_1_CP_FENCE2_CNT 0x408170 + +#define mmDMA_QM_1_CP_FENCE3_CNT 0x408174 + +#define mmDMA_QM_1_CP_STS 0x408178 + +#define mmDMA_QM_1_CP_CURRENT_INST_LO 0x40817C + +#define mmDMA_QM_1_CP_CURRENT_INST_HI 0x408180 + +#define mmDMA_QM_1_CP_BARRIER_CFG 0x408184 + +#define mmDMA_QM_1_CP_DBG_0 0x408188 + +#define mmDMA_QM_1_PQ_BUF_ADDR 0x408300 + +#define mmDMA_QM_1_PQ_BUF_RDATA 0x408304 + +#define mmDMA_QM_1_CQ_BUF_ADDR 0x408308 + +#define mmDMA_QM_1_CQ_BUF_RDATA 0x40830C + +#endif /* ASIC_REG_DMA_QM_1_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h new file mode 100644 index 000000000000..b4f06e9b71d6 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_QM_2_REGS_H_ +#define ASIC_REG_DMA_QM_2_REGS_H_ + +/* + ***************************************** + * DMA_QM_2 (Prototype: QMAN) + ***************************************** + */ + +#define mmDMA_QM_2_GLBL_CFG0 0x410000 + +#define mmDMA_QM_2_GLBL_CFG1 0x410004 + +#define mmDMA_QM_2_GLBL_PROT 0x410008 + +#define mmDMA_QM_2_GLBL_ERR_CFG 0x41000C + +#define mmDMA_QM_2_GLBL_ERR_ADDR_LO 0x410010 + +#define mmDMA_QM_2_GLBL_ERR_ADDR_HI 0x410014 + +#define mmDMA_QM_2_GLBL_ERR_WDATA 0x410018 + +#define mmDMA_QM_2_GLBL_SECURE_PROPS 0x41001C + +#define mmDMA_QM_2_GLBL_NON_SECURE_PROPS 0x410020 + +#define mmDMA_QM_2_GLBL_STS0 0x410024 + +#define mmDMA_QM_2_GLBL_STS1 0x410028 + +#define mmDMA_QM_2_PQ_BASE_LO 0x410060 + +#define mmDMA_QM_2_PQ_BASE_HI 0x410064 + +#define mmDMA_QM_2_PQ_SIZE 0x410068 + +#define mmDMA_QM_2_PQ_PI 0x41006C + +#define mmDMA_QM_2_PQ_CI 0x410070 + +#define mmDMA_QM_2_PQ_CFG0 0x410074 + +#define mmDMA_QM_2_PQ_CFG1 0x410078 + +#define mmDMA_QM_2_PQ_ARUSER 0x41007C + +#define mmDMA_QM_2_PQ_PUSH0 0x410080 + +#define mmDMA_QM_2_PQ_PUSH1 0x410084 + +#define mmDMA_QM_2_PQ_PUSH2 0x410088 + +#define mmDMA_QM_2_PQ_PUSH3 0x41008C + +#define mmDMA_QM_2_PQ_STS0 0x410090 + +#define mmDMA_QM_2_PQ_STS1 0x410094 + +#define mmDMA_QM_2_PQ_RD_RATE_LIM_EN 0x4100A0 + +#define mmDMA_QM_2_PQ_RD_RATE_LIM_RST_TOKEN 0x4100A4 + +#define mmDMA_QM_2_PQ_RD_RATE_LIM_SAT 0x4100A8 + +#define mmDMA_QM_2_PQ_RD_RATE_LIM_TOUT 0x4100AC + +#define mmDMA_QM_2_CQ_CFG0 0x4100B0 + +#define mmDMA_QM_2_CQ_CFG1 0x4100B4 + +#define mmDMA_QM_2_CQ_ARUSER 0x4100B8 + +#define mmDMA_QM_2_CQ_PTR_LO 0x4100C0 + +#define mmDMA_QM_2_CQ_PTR_HI 0x4100C4 + +#define mmDMA_QM_2_CQ_TSIZE 0x4100C8 + +#define mmDMA_QM_2_CQ_CTL 0x4100CC + +#define mmDMA_QM_2_CQ_PTR_LO_STS 0x4100D4 + +#define mmDMA_QM_2_CQ_PTR_HI_STS 0x4100D8 + +#define mmDMA_QM_2_CQ_TSIZE_STS 0x4100DC + +#define mmDMA_QM_2_CQ_CTL_STS 0x4100E0 + +#define mmDMA_QM_2_CQ_STS0 0x4100E4 + +#define mmDMA_QM_2_CQ_STS1 0x4100E8 + +#define mmDMA_QM_2_CQ_RD_RATE_LIM_EN 0x4100F0 + +#define mmDMA_QM_2_CQ_RD_RATE_LIM_RST_TOKEN 0x4100F4 + +#define mmDMA_QM_2_CQ_RD_RATE_LIM_SAT 0x4100F8 + +#define mmDMA_QM_2_CQ_RD_RATE_LIM_TOUT 0x4100FC + +#define mmDMA_QM_2_CQ_IFIFO_CNT 0x410108 + +#define mmDMA_QM_2_CP_MSG_BASE0_ADDR_LO 0x410120 + +#define mmDMA_QM_2_CP_MSG_BASE0_ADDR_HI 0x410124 + +#define mmDMA_QM_2_CP_MSG_BASE1_ADDR_LO 0x410128 + +#define mmDMA_QM_2_CP_MSG_BASE1_ADDR_HI 0x41012C + +#define mmDMA_QM_2_CP_MSG_BASE2_ADDR_LO 0x410130 + +#define mmDMA_QM_2_CP_MSG_BASE2_ADDR_HI 0x410134 + +#define mmDMA_QM_2_CP_MSG_BASE3_ADDR_LO 0x410138 + +#define mmDMA_QM_2_CP_MSG_BASE3_ADDR_HI 0x41013C + +#define mmDMA_QM_2_CP_LDMA_TSIZE_OFFSET 0x410140 + +#define mmDMA_QM_2_CP_LDMA_SRC_BASE_LO_OFFSET 0x410144 + +#define mmDMA_QM_2_CP_LDMA_SRC_BASE_HI_OFFSET 0x410148 + +#define mmDMA_QM_2_CP_LDMA_DST_BASE_LO_OFFSET 0x41014C + +#define mmDMA_QM_2_CP_LDMA_DST_BASE_HI_OFFSET 0x410150 + +#define mmDMA_QM_2_CP_LDMA_COMMIT_OFFSET 0x410154 + +#define mmDMA_QM_2_CP_FENCE0_RDATA 0x410158 + +#define mmDMA_QM_2_CP_FENCE1_RDATA 0x41015C + +#define mmDMA_QM_2_CP_FENCE2_RDATA 0x410160 + +#define mmDMA_QM_2_CP_FENCE3_RDATA 0x410164 + +#define mmDMA_QM_2_CP_FENCE0_CNT 0x410168 + +#define mmDMA_QM_2_CP_FENCE1_CNT 0x41016C + +#define mmDMA_QM_2_CP_FENCE2_CNT 0x410170 + +#define mmDMA_QM_2_CP_FENCE3_CNT 0x410174 + +#define mmDMA_QM_2_CP_STS 0x410178 + +#define mmDMA_QM_2_CP_CURRENT_INST_LO 0x41017C + +#define mmDMA_QM_2_CP_CURRENT_INST_HI 0x410180 + +#define mmDMA_QM_2_CP_BARRIER_CFG 0x410184 + +#define mmDMA_QM_2_CP_DBG_0 0x410188 + +#define mmDMA_QM_2_PQ_BUF_ADDR 0x410300 + +#define mmDMA_QM_2_PQ_BUF_RDATA 0x410304 + +#define mmDMA_QM_2_CQ_BUF_ADDR 0x410308 + +#define mmDMA_QM_2_CQ_BUF_RDATA 0x41030C + +#endif /* ASIC_REG_DMA_QM_2_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h new file mode 100644 index 000000000000..53e3cd78a06b --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_QM_3_REGS_H_ +#define ASIC_REG_DMA_QM_3_REGS_H_ + +/* + ***************************************** + * DMA_QM_3 (Prototype: QMAN) + ***************************************** + */ + +#define mmDMA_QM_3_GLBL_CFG0 0x418000 + +#define mmDMA_QM_3_GLBL_CFG1 0x418004 + +#define mmDMA_QM_3_GLBL_PROT 0x418008 + +#define mmDMA_QM_3_GLBL_ERR_CFG 0x41800C + +#define mmDMA_QM_3_GLBL_ERR_ADDR_LO 0x418010 + +#define mmDMA_QM_3_GLBL_ERR_ADDR_HI 0x418014 + +#define mmDMA_QM_3_GLBL_ERR_WDATA 0x418018 + +#define mmDMA_QM_3_GLBL_SECURE_PROPS 0x41801C + +#define mmDMA_QM_3_GLBL_NON_SECURE_PROPS 0x418020 + +#define mmDMA_QM_3_GLBL_STS0 0x418024 + +#define mmDMA_QM_3_GLBL_STS1 0x418028 + +#define mmDMA_QM_3_PQ_BASE_LO 0x418060 + +#define mmDMA_QM_3_PQ_BASE_HI 0x418064 + +#define mmDMA_QM_3_PQ_SIZE 0x418068 + +#define mmDMA_QM_3_PQ_PI 0x41806C + +#define mmDMA_QM_3_PQ_CI 0x418070 + +#define mmDMA_QM_3_PQ_CFG0 0x418074 + +#define mmDMA_QM_3_PQ_CFG1 0x418078 + +#define mmDMA_QM_3_PQ_ARUSER 0x41807C + +#define mmDMA_QM_3_PQ_PUSH0 0x418080 + +#define mmDMA_QM_3_PQ_PUSH1 0x418084 + +#define mmDMA_QM_3_PQ_PUSH2 0x418088 + +#define mmDMA_QM_3_PQ_PUSH3 0x41808C + +#define mmDMA_QM_3_PQ_STS0 0x418090 + +#define mmDMA_QM_3_PQ_STS1 0x418094 + +#define mmDMA_QM_3_PQ_RD_RATE_LIM_EN 0x4180A0 + +#define mmDMA_QM_3_PQ_RD_RATE_LIM_RST_TOKEN 0x4180A4 + +#define mmDMA_QM_3_PQ_RD_RATE_LIM_SAT 0x4180A8 + +#define mmDMA_QM_3_PQ_RD_RATE_LIM_TOUT 0x4180AC + +#define mmDMA_QM_3_CQ_CFG0 0x4180B0 + +#define mmDMA_QM_3_CQ_CFG1 0x4180B4 + +#define mmDMA_QM_3_CQ_ARUSER 0x4180B8 + +#define mmDMA_QM_3_CQ_PTR_LO 0x4180C0 + +#define mmDMA_QM_3_CQ_PTR_HI 0x4180C4 + +#define mmDMA_QM_3_CQ_TSIZE 0x4180C8 + +#define mmDMA_QM_3_CQ_CTL 0x4180CC + +#define mmDMA_QM_3_CQ_PTR_LO_STS 0x4180D4 + +#define mmDMA_QM_3_CQ_PTR_HI_STS 0x4180D8 + +#define mmDMA_QM_3_CQ_TSIZE_STS 0x4180DC + +#define mmDMA_QM_3_CQ_CTL_STS 0x4180E0 + +#define mmDMA_QM_3_CQ_STS0 0x4180E4 + +#define mmDMA_QM_3_CQ_STS1 0x4180E8 + +#define mmDMA_QM_3_CQ_RD_RATE_LIM_EN 0x4180F0 + +#define mmDMA_QM_3_CQ_RD_RATE_LIM_RST_TOKEN 0x4180F4 + +#define mmDMA_QM_3_CQ_RD_RATE_LIM_SAT 0x4180F8 + +#define mmDMA_QM_3_CQ_RD_RATE_LIM_TOUT 0x4180FC + +#define mmDMA_QM_3_CQ_IFIFO_CNT 0x418108 + +#define mmDMA_QM_3_CP_MSG_BASE0_ADDR_LO 0x418120 + +#define mmDMA_QM_3_CP_MSG_BASE0_ADDR_HI 0x418124 + +#define mmDMA_QM_3_CP_MSG_BASE1_ADDR_LO 0x418128 + +#define mmDMA_QM_3_CP_MSG_BASE1_ADDR_HI 0x41812C + +#define mmDMA_QM_3_CP_MSG_BASE2_ADDR_LO 0x418130 + +#define mmDMA_QM_3_CP_MSG_BASE2_ADDR_HI 0x418134 + +#define mmDMA_QM_3_CP_MSG_BASE3_ADDR_LO 0x418138 + +#define mmDMA_QM_3_CP_MSG_BASE3_ADDR_HI 0x41813C + +#define mmDMA_QM_3_CP_LDMA_TSIZE_OFFSET 0x418140 + +#define mmDMA_QM_3_CP_LDMA_SRC_BASE_LO_OFFSET 0x418144 + +#define mmDMA_QM_3_CP_LDMA_SRC_BASE_HI_OFFSET 0x418148 + +#define mmDMA_QM_3_CP_LDMA_DST_BASE_LO_OFFSET 0x41814C + +#define mmDMA_QM_3_CP_LDMA_DST_BASE_HI_OFFSET 0x418150 + +#define mmDMA_QM_3_CP_LDMA_COMMIT_OFFSET 0x418154 + +#define mmDMA_QM_3_CP_FENCE0_RDATA 0x418158 + +#define mmDMA_QM_3_CP_FENCE1_RDATA 0x41815C + +#define mmDMA_QM_3_CP_FENCE2_RDATA 0x418160 + +#define mmDMA_QM_3_CP_FENCE3_RDATA 0x418164 + +#define mmDMA_QM_3_CP_FENCE0_CNT 0x418168 + +#define mmDMA_QM_3_CP_FENCE1_CNT 0x41816C + +#define mmDMA_QM_3_CP_FENCE2_CNT 0x418170 + +#define mmDMA_QM_3_CP_FENCE3_CNT 0x418174 + +#define mmDMA_QM_3_CP_STS 0x418178 + +#define mmDMA_QM_3_CP_CURRENT_INST_LO 0x41817C + +#define mmDMA_QM_3_CP_CURRENT_INST_HI 0x418180 + +#define mmDMA_QM_3_CP_BARRIER_CFG 0x418184 + +#define mmDMA_QM_3_CP_DBG_0 0x418188 + +#define mmDMA_QM_3_PQ_BUF_ADDR 0x418300 + +#define mmDMA_QM_3_PQ_BUF_RDATA 0x418304 + +#define mmDMA_QM_3_CQ_BUF_ADDR 0x418308 + +#define mmDMA_QM_3_CQ_BUF_RDATA 0x41830C + +#endif /* ASIC_REG_DMA_QM_3_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h new file mode 100644 index 000000000000..e0eb5f260201 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_DMA_QM_4_REGS_H_ +#define ASIC_REG_DMA_QM_4_REGS_H_ + +/* + ***************************************** + * DMA_QM_4 (Prototype: QMAN) + ***************************************** + */ + +#define mmDMA_QM_4_GLBL_CFG0 0x420000 + +#define mmDMA_QM_4_GLBL_CFG1 0x420004 + +#define mmDMA_QM_4_GLBL_PROT 0x420008 + +#define mmDMA_QM_4_GLBL_ERR_CFG 0x42000C + +#define mmDMA_QM_4_GLBL_ERR_ADDR_LO 0x420010 + +#define mmDMA_QM_4_GLBL_ERR_ADDR_HI 0x420014 + +#define mmDMA_QM_4_GLBL_ERR_WDATA 0x420018 + +#define mmDMA_QM_4_GLBL_SECURE_PROPS 0x42001C + +#define mmDMA_QM_4_GLBL_NON_SECURE_PROPS 0x420020 + +#define mmDMA_QM_4_GLBL_STS0 0x420024 + +#define mmDMA_QM_4_GLBL_STS1 0x420028 + +#define mmDMA_QM_4_PQ_BASE_LO 0x420060 + +#define mmDMA_QM_4_PQ_BASE_HI 0x420064 + +#define mmDMA_QM_4_PQ_SIZE 0x420068 + +#define mmDMA_QM_4_PQ_PI 0x42006C + +#define mmDMA_QM_4_PQ_CI 0x420070 + +#define mmDMA_QM_4_PQ_CFG0 0x420074 + +#define mmDMA_QM_4_PQ_CFG1 0x420078 + +#define mmDMA_QM_4_PQ_ARUSER 0x42007C + +#define mmDMA_QM_4_PQ_PUSH0 0x420080 + +#define mmDMA_QM_4_PQ_PUSH1 0x420084 + +#define mmDMA_QM_4_PQ_PUSH2 0x420088 + +#define mmDMA_QM_4_PQ_PUSH3 0x42008C + +#define mmDMA_QM_4_PQ_STS0 0x420090 + +#define mmDMA_QM_4_PQ_STS1 0x420094 + +#define mmDMA_QM_4_PQ_RD_RATE_LIM_EN 0x4200A0 + +#define mmDMA_QM_4_PQ_RD_RATE_LIM_RST_TOKEN 0x4200A4 + +#define mmDMA_QM_4_PQ_RD_RATE_LIM_SAT 0x4200A8 + +#define mmDMA_QM_4_PQ_RD_RATE_LIM_TOUT 0x4200AC + +#define mmDMA_QM_4_CQ_CFG0 0x4200B0 + +#define mmDMA_QM_4_CQ_CFG1 0x4200B4 + +#define mmDMA_QM_4_CQ_ARUSER 0x4200B8 + +#define mmDMA_QM_4_CQ_PTR_LO 0x4200C0 + +#define mmDMA_QM_4_CQ_PTR_HI 0x4200C4 + +#define mmDMA_QM_4_CQ_TSIZE 0x4200C8 + +#define mmDMA_QM_4_CQ_CTL 0x4200CC + +#define mmDMA_QM_4_CQ_PTR_LO_STS 0x4200D4 + +#define mmDMA_QM_4_CQ_PTR_HI_STS 0x4200D8 + +#define mmDMA_QM_4_CQ_TSIZE_STS 0x4200DC + +#define mmDMA_QM_4_CQ_CTL_STS 0x4200E0 + +#define mmDMA_QM_4_CQ_STS0 0x4200E4 + +#define mmDMA_QM_4_CQ_STS1 0x4200E8 + +#define mmDMA_QM_4_CQ_RD_RATE_LIM_EN 0x4200F0 + +#define mmDMA_QM_4_CQ_RD_RATE_LIM_RST_TOKEN 0x4200F4 + +#define mmDMA_QM_4_CQ_RD_RATE_LIM_SAT 0x4200F8 + +#define mmDMA_QM_4_CQ_RD_RATE_LIM_TOUT 0x4200FC + +#define mmDMA_QM_4_CQ_IFIFO_CNT 0x420108 + +#define mmDMA_QM_4_CP_MSG_BASE0_ADDR_LO 0x420120 + +#define mmDMA_QM_4_CP_MSG_BASE0_ADDR_HI 0x420124 + +#define mmDMA_QM_4_CP_MSG_BASE1_ADDR_LO 0x420128 + +#define mmDMA_QM_4_CP_MSG_BASE1_ADDR_HI 0x42012C + +#define mmDMA_QM_4_CP_MSG_BASE2_ADDR_LO 0x420130 + +#define mmDMA_QM_4_CP_MSG_BASE2_ADDR_HI 0x420134 + +#define mmDMA_QM_4_CP_MSG_BASE3_ADDR_LO 0x420138 + +#define mmDMA_QM_4_CP_MSG_BASE3_ADDR_HI 0x42013C + +#define mmDMA_QM_4_CP_LDMA_TSIZE_OFFSET 0x420140 + +#define mmDMA_QM_4_CP_LDMA_SRC_BASE_LO_OFFSET 0x420144 + +#define mmDMA_QM_4_CP_LDMA_SRC_BASE_HI_OFFSET 0x420148 + +#define mmDMA_QM_4_CP_LDMA_DST_BASE_LO_OFFSET 0x42014C + +#define mmDMA_QM_4_CP_LDMA_DST_BASE_HI_OFFSET 0x420150 + +#define mmDMA_QM_4_CP_LDMA_COMMIT_OFFSET 0x420154 + +#define mmDMA_QM_4_CP_FENCE0_RDATA 0x420158 + +#define mmDMA_QM_4_CP_FENCE1_RDATA 0x42015C + +#define mmDMA_QM_4_CP_FENCE2_RDATA 0x420160 + +#define mmDMA_QM_4_CP_FENCE3_RDATA 0x420164 + +#define mmDMA_QM_4_CP_FENCE0_CNT 0x420168 + +#define mmDMA_QM_4_CP_FENCE1_CNT 0x42016C + +#define mmDMA_QM_4_CP_FENCE2_CNT 0x420170 + +#define mmDMA_QM_4_CP_FENCE3_CNT 0x420174 + +#define mmDMA_QM_4_CP_STS 0x420178 + +#define mmDMA_QM_4_CP_CURRENT_INST_LO 0x42017C + +#define mmDMA_QM_4_CP_CURRENT_INST_HI 0x420180 + +#define mmDMA_QM_4_CP_BARRIER_CFG 0x420184 + +#define mmDMA_QM_4_CP_DBG_0 0x420188 + +#define mmDMA_QM_4_PQ_BUF_ADDR 0x420300 + +#define mmDMA_QM_4_PQ_BUF_RDATA 0x420304 + +#define mmDMA_QM_4_CQ_BUF_ADDR 0x420308 + +#define mmDMA_QM_4_CQ_BUF_RDATA 0x42030C + +#endif /* ASIC_REG_DMA_QM_4_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_blocks.h b/drivers/misc/habanalabs/include/goya/asic_reg/goya_blocks.h new file mode 100644 index 000000000000..85b15010cd7a --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/goya_blocks.h @@ -0,0 +1,1372 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef GOYA_BLOCKS_H_ +#define GOYA_BLOCKS_H_ + +#define mmPCI_NRTR_BASE 0x7FFC000000ull +#define PCI_NRTR_MAX_OFFSET 0x608 +#define PCI_NRTR_SECTION 0x4000 +#define mmPCI_RD_REGULATOR_BASE 0x7FFC004000ull +#define PCI_RD_REGULATOR_MAX_OFFSET 0x74 +#define PCI_RD_REGULATOR_SECTION 0x1000 +#define mmPCI_WR_REGULATOR_BASE 0x7FFC005000ull +#define PCI_WR_REGULATOR_MAX_OFFSET 0x74 +#define PCI_WR_REGULATOR_SECTION 0x3B000 +#define mmMME1_RTR_BASE 0x7FFC040000ull +#define MME1_RTR_MAX_OFFSET 0x608 +#define MME1_RTR_SECTION 0x4000 +#define mmMME1_RD_REGULATOR_BASE 0x7FFC044000ull +#define MME1_RD_REGULATOR_MAX_OFFSET 0x74 +#define MME1_RD_REGULATOR_SECTION 0x1000 +#define mmMME1_WR_REGULATOR_BASE 0x7FFC045000ull +#define MME1_WR_REGULATOR_MAX_OFFSET 0x74 +#define MME1_WR_REGULATOR_SECTION 0x3B000 +#define mmMME2_RTR_BASE 0x7FFC080000ull +#define MME2_RTR_MAX_OFFSET 0x608 +#define MME2_RTR_SECTION 0x4000 +#define mmMME2_RD_REGULATOR_BASE 0x7FFC084000ull +#define MME2_RD_REGULATOR_MAX_OFFSET 0x74 +#define MME2_RD_REGULATOR_SECTION 0x1000 +#define mmMME2_WR_REGULATOR_BASE 0x7FFC085000ull +#define MME2_WR_REGULATOR_MAX_OFFSET 0x74 +#define MME2_WR_REGULATOR_SECTION 0x3B000 +#define mmMME3_RTR_BASE 0x7FFC0C0000ull +#define MME3_RTR_MAX_OFFSET 0x608 +#define MME3_RTR_SECTION 0x4000 +#define mmMME3_RD_REGULATOR_BASE 0x7FFC0C4000ull +#define MME3_RD_REGULATOR_MAX_OFFSET 0x74 +#define MME3_RD_REGULATOR_SECTION 0x1000 +#define mmMME3_WR_REGULATOR_BASE 0x7FFC0C5000ull +#define MME3_WR_REGULATOR_MAX_OFFSET 0x74 +#define MME3_WR_REGULATOR_SECTION 0xB000 +#define mmMME_BASE 0x7FFC0D0000ull +#define MME_MAX_OFFSET 0xBB0 +#define MME_SECTION 0x8000 +#define mmMME_QM_BASE 0x7FFC0D8000ull +#define MME_QM_MAX_OFFSET 0x310 +#define MME_QM_SECTION 0x1000 +#define mmMME_CMDQ_BASE 0x7FFC0D9000ull +#define MME_CMDQ_MAX_OFFSET 0x310 +#define MME_CMDQ_SECTION 0x1000 +#define mmACC_MS_ECC_MEM_0_BASE 0x7FFC0DA000ull +#define ACC_MS_ECC_MEM_0_MAX_OFFSET 0x0 +#define ACC_MS_ECC_MEM_0_SECTION 0x1000 +#define mmACC_MS_ECC_MEM_1_BASE 0x7FFC0DB000ull +#define ACC_MS_ECC_MEM_1_MAX_OFFSET 0x0 +#define ACC_MS_ECC_MEM_1_SECTION 0x1000 +#define mmACC_MS_ECC_MEM_2_BASE 0x7FFC0DC000ull +#define ACC_MS_ECC_MEM_2_MAX_OFFSET 0x0 +#define ACC_MS_ECC_MEM_2_SECTION 0x1000 +#define mmACC_MS_ECC_MEM_3_BASE 0x7FFC0DD000ull +#define ACC_MS_ECC_MEM_3_MAX_OFFSET 0x0 +#define ACC_MS_ECC_MEM_3_SECTION 0x1000 +#define mmSBA_ECC_MEM_BASE 0x7FFC0DE000ull +#define SBA_ECC_MEM_MAX_OFFSET 0x0 +#define SBA_ECC_MEM_SECTION 0x1000 +#define mmSBB_ECC_MEM_BASE 0x7FFC0DF000ull +#define SBB_ECC_MEM_MAX_OFFSET 0x0 +#define SBB_ECC_MEM_SECTION 0x21000 +#define mmMME4_RTR_BASE 0x7FFC100000ull +#define MME4_RTR_MAX_OFFSET 0x608 +#define MME4_RTR_SECTION 0x4000 +#define mmMME4_RD_REGULATOR_BASE 0x7FFC104000ull +#define MME4_RD_REGULATOR_MAX_OFFSET 0x74 +#define MME4_RD_REGULATOR_SECTION 0x1000 +#define mmMME4_WR_REGULATOR_BASE 0x7FFC105000ull +#define MME4_WR_REGULATOR_MAX_OFFSET 0x74 +#define MME4_WR_REGULATOR_SECTION 0xB000 +#define mmSYNC_MNGR_BASE 0x7FFC110000ull +#define SYNC_MNGR_MAX_OFFSET 0x4400 +#define SYNC_MNGR_SECTION 0x30000 +#define mmMME5_RTR_BASE 0x7FFC140000ull +#define MME5_RTR_MAX_OFFSET 0x608 +#define MME5_RTR_SECTION 0x4000 +#define mmMME5_RD_REGULATOR_BASE 0x7FFC144000ull +#define MME5_RD_REGULATOR_MAX_OFFSET 0x74 +#define MME5_RD_REGULATOR_SECTION 0x1000 +#define mmMME5_WR_REGULATOR_BASE 0x7FFC145000ull +#define MME5_WR_REGULATOR_MAX_OFFSET 0x74 +#define MME5_WR_REGULATOR_SECTION 0x3B000 +#define mmMME6_RTR_BASE 0x7FFC180000ull +#define MME6_RTR_MAX_OFFSET 0x608 +#define MME6_RTR_SECTION 0x4000 +#define mmMME6_RD_REGULATOR_BASE 0x7FFC184000ull +#define MME6_RD_REGULATOR_MAX_OFFSET 0x74 +#define MME6_RD_REGULATOR_SECTION 0x1000 +#define mmMME6_WR_REGULATOR_BASE 0x7FFC185000ull +#define MME6_WR_REGULATOR_MAX_OFFSET 0x74 +#define MME6_WR_REGULATOR_SECTION 0x3B000 +#define mmDMA_NRTR_BASE 0x7FFC1C0000ull +#define DMA_NRTR_MAX_OFFSET 0x608 +#define DMA_NRTR_SECTION 0x4000 +#define mmDMA_RD_REGULATOR_BASE 0x7FFC1C4000ull +#define DMA_RD_REGULATOR_MAX_OFFSET 0x74 +#define DMA_RD_REGULATOR_SECTION 0x1000 +#define mmDMA_WR_REGULATOR_BASE 0x7FFC1C5000ull +#define DMA_WR_REGULATOR_MAX_OFFSET 0x74 +#define DMA_WR_REGULATOR_SECTION 0x3B000 +#define mmSRAM_Y0_X0_BANK_BASE 0x7FFC200000ull +#define SRAM_Y0_X0_BANK_MAX_OFFSET 0x4 +#define SRAM_Y0_X0_BANK_SECTION 0x1000 +#define mmSRAM_Y0_X0_RTR_BASE 0x7FFC201000ull +#define SRAM_Y0_X0_RTR_MAX_OFFSET 0x334 +#define SRAM_Y0_X0_RTR_SECTION 0x3000 +#define mmSRAM_Y0_X1_BANK_BASE 0x7FFC204000ull +#define SRAM_Y0_X1_BANK_MAX_OFFSET 0x4 +#define SRAM_Y0_X1_BANK_SECTION 0x1000 +#define mmSRAM_Y0_X1_RTR_BASE 0x7FFC205000ull +#define SRAM_Y0_X1_RTR_MAX_OFFSET 0x334 +#define SRAM_Y0_X1_RTR_SECTION 0x3000 +#define mmSRAM_Y0_X2_BANK_BASE 0x7FFC208000ull +#define SRAM_Y0_X2_BANK_MAX_OFFSET 0x4 +#define SRAM_Y0_X2_BANK_SECTION 0x1000 +#define mmSRAM_Y0_X2_RTR_BASE 0x7FFC209000ull +#define SRAM_Y0_X2_RTR_MAX_OFFSET 0x334 +#define SRAM_Y0_X2_RTR_SECTION 0x3000 +#define mmSRAM_Y0_X3_BANK_BASE 0x7FFC20C000ull +#define SRAM_Y0_X3_BANK_MAX_OFFSET 0x4 +#define SRAM_Y0_X3_BANK_SECTION 0x1000 +#define mmSRAM_Y0_X3_RTR_BASE 0x7FFC20D000ull +#define SRAM_Y0_X3_RTR_MAX_OFFSET 0x334 +#define SRAM_Y0_X3_RTR_SECTION 0x3000 +#define mmSRAM_Y0_X4_BANK_BASE 0x7FFC210000ull +#define SRAM_Y0_X4_BANK_MAX_OFFSET 0x4 +#define SRAM_Y0_X4_BANK_SECTION 0x1000 +#define mmSRAM_Y0_X4_RTR_BASE 0x7FFC211000ull +#define SRAM_Y0_X4_RTR_MAX_OFFSET 0x334 +#define SRAM_Y0_X4_RTR_SECTION 0xF000 +#define mmSRAM_Y1_X0_BANK_BASE 0x7FFC220000ull +#define SRAM_Y1_X0_BANK_MAX_OFFSET 0x4 +#define SRAM_Y1_X0_BANK_SECTION 0x1000 +#define mmSRAM_Y1_X0_RTR_BASE 0x7FFC221000ull +#define SRAM_Y1_X0_RTR_MAX_OFFSET 0x334 +#define SRAM_Y1_X0_RTR_SECTION 0x3000 +#define mmSRAM_Y1_X1_BANK_BASE 0x7FFC224000ull +#define SRAM_Y1_X1_BANK_MAX_OFFSET 0x4 +#define SRAM_Y1_X1_BANK_SECTION 0x1000 +#define mmSRAM_Y1_X1_RTR_BASE 0x7FFC225000ull +#define SRAM_Y1_X1_RTR_MAX_OFFSET 0x334 +#define SRAM_Y1_X1_RTR_SECTION 0x3000 +#define mmSRAM_Y1_X2_BANK_BASE 0x7FFC228000ull +#define SRAM_Y1_X2_BANK_MAX_OFFSET 0x4 +#define SRAM_Y1_X2_BANK_SECTION 0x1000 +#define mmSRAM_Y1_X2_RTR_BASE 0x7FFC229000ull +#define SRAM_Y1_X2_RTR_MAX_OFFSET 0x334 +#define SRAM_Y1_X2_RTR_SECTION 0x3000 +#define mmSRAM_Y1_X3_BANK_BASE 0x7FFC22C000ull +#define SRAM_Y1_X3_BANK_MAX_OFFSET 0x4 +#define SRAM_Y1_X3_BANK_SECTION 0x1000 +#define mmSRAM_Y1_X3_RTR_BASE 0x7FFC22D000ull +#define SRAM_Y1_X3_RTR_MAX_OFFSET 0x334 +#define SRAM_Y1_X3_RTR_SECTION 0x3000 +#define mmSRAM_Y1_X4_BANK_BASE 0x7FFC230000ull +#define SRAM_Y1_X4_BANK_MAX_OFFSET 0x4 +#define SRAM_Y1_X4_BANK_SECTION 0x1000 +#define mmSRAM_Y1_X4_RTR_BASE 0x7FFC231000ull +#define SRAM_Y1_X4_RTR_MAX_OFFSET 0x334 +#define SRAM_Y1_X4_RTR_SECTION 0xF000 +#define mmSRAM_Y2_X0_BANK_BASE 0x7FFC240000ull +#define SRAM_Y2_X0_BANK_MAX_OFFSET 0x4 +#define SRAM_Y2_X0_BANK_SECTION 0x1000 +#define mmSRAM_Y2_X0_RTR_BASE 0x7FFC241000ull +#define SRAM_Y2_X0_RTR_MAX_OFFSET 0x334 +#define SRAM_Y2_X0_RTR_SECTION 0x3000 +#define mmSRAM_Y2_X1_BANK_BASE 0x7FFC244000ull +#define SRAM_Y2_X1_BANK_MAX_OFFSET 0x4 +#define SRAM_Y2_X1_BANK_SECTION 0x1000 +#define mmSRAM_Y2_X1_RTR_BASE 0x7FFC245000ull +#define SRAM_Y2_X1_RTR_MAX_OFFSET 0x334 +#define SRAM_Y2_X1_RTR_SECTION 0x3000 +#define mmSRAM_Y2_X2_BANK_BASE 0x7FFC248000ull +#define SRAM_Y2_X2_BANK_MAX_OFFSET 0x4 +#define SRAM_Y2_X2_BANK_SECTION 0x1000 +#define mmSRAM_Y2_X2_RTR_BASE 0x7FFC249000ull +#define SRAM_Y2_X2_RTR_MAX_OFFSET 0x334 +#define SRAM_Y2_X2_RTR_SECTION 0x3000 +#define mmSRAM_Y2_X3_BANK_BASE 0x7FFC24C000ull +#define SRAM_Y2_X3_BANK_MAX_OFFSET 0x4 +#define SRAM_Y2_X3_BANK_SECTION 0x1000 +#define mmSRAM_Y2_X3_RTR_BASE 0x7FFC24D000ull +#define SRAM_Y2_X3_RTR_MAX_OFFSET 0x334 +#define SRAM_Y2_X3_RTR_SECTION 0x3000 +#define mmSRAM_Y2_X4_BANK_BASE 0x7FFC250000ull +#define SRAM_Y2_X4_BANK_MAX_OFFSET 0x4 +#define SRAM_Y2_X4_BANK_SECTION 0x1000 +#define mmSRAM_Y2_X4_RTR_BASE 0x7FFC251000ull +#define SRAM_Y2_X4_RTR_MAX_OFFSET 0x334 +#define SRAM_Y2_X4_RTR_SECTION 0xF000 +#define mmSRAM_Y3_X0_BANK_BASE 0x7FFC260000ull +#define SRAM_Y3_X0_BANK_MAX_OFFSET 0x4 +#define SRAM_Y3_X0_BANK_SECTION 0x1000 +#define mmSRAM_Y3_X0_RTR_BASE 0x7FFC261000ull +#define SRAM_Y3_X0_RTR_MAX_OFFSET 0x334 +#define SRAM_Y3_X0_RTR_SECTION 0x3000 +#define mmSRAM_Y3_X1_BANK_BASE 0x7FFC264000ull +#define SRAM_Y3_X1_BANK_MAX_OFFSET 0x4 +#define SRAM_Y3_X1_BANK_SECTION 0x1000 +#define mmSRAM_Y3_X1_RTR_BASE 0x7FFC265000ull +#define SRAM_Y3_X1_RTR_MAX_OFFSET 0x334 +#define SRAM_Y3_X1_RTR_SECTION 0x3000 +#define mmSRAM_Y3_X2_BANK_BASE 0x7FFC268000ull +#define SRAM_Y3_X2_BANK_MAX_OFFSET 0x4 +#define SRAM_Y3_X2_BANK_SECTION 0x1000 +#define mmSRAM_Y3_X2_RTR_BASE 0x7FFC269000ull +#define SRAM_Y3_X2_RTR_MAX_OFFSET 0x334 +#define SRAM_Y3_X2_RTR_SECTION 0x3000 +#define mmSRAM_Y3_X3_BANK_BASE 0x7FFC26C000ull +#define SRAM_Y3_X3_BANK_MAX_OFFSET 0x4 +#define SRAM_Y3_X3_BANK_SECTION 0x1000 +#define mmSRAM_Y3_X3_RTR_BASE 0x7FFC26D000ull +#define SRAM_Y3_X3_RTR_MAX_OFFSET 0x334 +#define SRAM_Y3_X3_RTR_SECTION 0x3000 +#define mmSRAM_Y3_X4_BANK_BASE 0x7FFC270000ull +#define SRAM_Y3_X4_BANK_MAX_OFFSET 0x4 +#define SRAM_Y3_X4_BANK_SECTION 0x1000 +#define mmSRAM_Y3_X4_RTR_BASE 0x7FFC271000ull +#define SRAM_Y3_X4_RTR_MAX_OFFSET 0x334 +#define SRAM_Y3_X4_RTR_SECTION 0xF000 +#define mmSRAM_Y4_X0_BANK_BASE 0x7FFC280000ull +#define SRAM_Y4_X0_BANK_MAX_OFFSET 0x4 +#define SRAM_Y4_X0_BANK_SECTION 0x1000 +#define mmSRAM_Y4_X0_RTR_BASE 0x7FFC281000ull +#define SRAM_Y4_X0_RTR_MAX_OFFSET 0x334 +#define SRAM_Y4_X0_RTR_SECTION 0x3000 +#define mmSRAM_Y4_X1_BANK_BASE 0x7FFC284000ull +#define SRAM_Y4_X1_BANK_MAX_OFFSET 0x4 +#define SRAM_Y4_X1_BANK_SECTION 0x1000 +#define mmSRAM_Y4_X1_RTR_BASE 0x7FFC285000ull +#define SRAM_Y4_X1_RTR_MAX_OFFSET 0x334 +#define SRAM_Y4_X1_RTR_SECTION 0x3000 +#define mmSRAM_Y4_X2_BANK_BASE 0x7FFC288000ull +#define SRAM_Y4_X2_BANK_MAX_OFFSET 0x4 +#define SRAM_Y4_X2_BANK_SECTION 0x1000 +#define mmSRAM_Y4_X2_RTR_BASE 0x7FFC289000ull +#define SRAM_Y4_X2_RTR_MAX_OFFSET 0x334 +#define SRAM_Y4_X2_RTR_SECTION 0x3000 +#define mmSRAM_Y4_X3_BANK_BASE 0x7FFC28C000ull +#define SRAM_Y4_X3_BANK_MAX_OFFSET 0x4 +#define SRAM_Y4_X3_BANK_SECTION 0x1000 +#define mmSRAM_Y4_X3_RTR_BASE 0x7FFC28D000ull +#define SRAM_Y4_X3_RTR_MAX_OFFSET 0x334 +#define SRAM_Y4_X3_RTR_SECTION 0x3000 +#define mmSRAM_Y4_X4_BANK_BASE 0x7FFC290000ull +#define SRAM_Y4_X4_BANK_MAX_OFFSET 0x4 +#define SRAM_Y4_X4_BANK_SECTION 0x1000 +#define mmSRAM_Y4_X4_RTR_BASE 0x7FFC291000ull +#define SRAM_Y4_X4_RTR_MAX_OFFSET 0x334 +#define SRAM_Y4_X4_RTR_SECTION 0xF000 +#define mmSRAM_Y5_X0_BANK_BASE 0x7FFC2A0000ull +#define SRAM_Y5_X0_BANK_MAX_OFFSET 0x4 +#define SRAM_Y5_X0_BANK_SECTION 0x1000 +#define mmSRAM_Y5_X0_RTR_BASE 0x7FFC2A1000ull +#define SRAM_Y5_X0_RTR_MAX_OFFSET 0x334 +#define SRAM_Y5_X0_RTR_SECTION 0x3000 +#define mmSRAM_Y5_X1_BANK_BASE 0x7FFC2A4000ull +#define SRAM_Y5_X1_BANK_MAX_OFFSET 0x4 +#define SRAM_Y5_X1_BANK_SECTION 0x1000 +#define mmSRAM_Y5_X1_RTR_BASE 0x7FFC2A5000ull +#define SRAM_Y5_X1_RTR_MAX_OFFSET 0x334 +#define SRAM_Y5_X1_RTR_SECTION 0x3000 +#define mmSRAM_Y5_X2_BANK_BASE 0x7FFC2A8000ull +#define SRAM_Y5_X2_BANK_MAX_OFFSET 0x4 +#define SRAM_Y5_X2_BANK_SECTION 0x1000 +#define mmSRAM_Y5_X2_RTR_BASE 0x7FFC2A9000ull +#define SRAM_Y5_X2_RTR_MAX_OFFSET 0x334 +#define SRAM_Y5_X2_RTR_SECTION 0x3000 +#define mmSRAM_Y5_X3_BANK_BASE 0x7FFC2AC000ull +#define SRAM_Y5_X3_BANK_MAX_OFFSET 0x4 +#define SRAM_Y5_X3_BANK_SECTION 0x1000 +#define mmSRAM_Y5_X3_RTR_BASE 0x7FFC2AD000ull +#define SRAM_Y5_X3_RTR_MAX_OFFSET 0x334 +#define SRAM_Y5_X3_RTR_SECTION 0x3000 +#define mmSRAM_Y5_X4_BANK_BASE 0x7FFC2B0000ull +#define SRAM_Y5_X4_BANK_MAX_OFFSET 0x4 +#define SRAM_Y5_X4_BANK_SECTION 0x1000 +#define mmSRAM_Y5_X4_RTR_BASE 0x7FFC2B1000ull +#define SRAM_Y5_X4_RTR_MAX_OFFSET 0x334 +#define SRAM_Y5_X4_RTR_SECTION 0x14F000 +#define mmDMA_QM_0_BASE 0x7FFC400000ull +#define DMA_QM_0_MAX_OFFSET 0x310 +#define DMA_QM_0_SECTION 0x1000 +#define mmDMA_CH_0_BASE 0x7FFC401000ull +#define DMA_CH_0_MAX_OFFSET 0x200 +#define DMA_CH_0_SECTION 0x7000 +#define mmDMA_QM_1_BASE 0x7FFC408000ull +#define DMA_QM_1_MAX_OFFSET 0x310 +#define DMA_QM_1_SECTION 0x1000 +#define mmDMA_CH_1_BASE 0x7FFC409000ull +#define DMA_CH_1_MAX_OFFSET 0x200 +#define DMA_CH_1_SECTION 0x7000 +#define mmDMA_QM_2_BASE 0x7FFC410000ull +#define DMA_QM_2_MAX_OFFSET 0x310 +#define DMA_QM_2_SECTION 0x1000 +#define mmDMA_CH_2_BASE 0x7FFC411000ull +#define DMA_CH_2_MAX_OFFSET 0x200 +#define DMA_CH_2_SECTION 0x7000 +#define mmDMA_QM_3_BASE 0x7FFC418000ull +#define DMA_QM_3_MAX_OFFSET 0x310 +#define DMA_QM_3_SECTION 0x1000 +#define mmDMA_CH_3_BASE 0x7FFC419000ull +#define DMA_CH_3_MAX_OFFSET 0x200 +#define DMA_CH_3_SECTION 0x7000 +#define mmDMA_QM_4_BASE 0x7FFC420000ull +#define DMA_QM_4_MAX_OFFSET 0x310 +#define DMA_QM_4_SECTION 0x1000 +#define mmDMA_CH_4_BASE 0x7FFC421000ull +#define DMA_CH_4_MAX_OFFSET 0x200 +#define DMA_CH_4_SECTION 0x20000 +#define mmCPU_CA53_CFG_BASE 0x7FFC441000ull +#define CPU_CA53_CFG_MAX_OFFSET 0x218 +#define CPU_CA53_CFG_SECTION 0x1000 +#define mmCPU_IF_BASE 0x7FFC442000ull +#define CPU_IF_MAX_OFFSET 0x134 +#define CPU_IF_SECTION 0x2000 +#define mmCPU_TIMESTAMP_BASE 0x7FFC444000ull +#define CPU_TIMESTAMP_MAX_OFFSET 0x1000 +#define CPU_TIMESTAMP_SECTION 0x3C000 +#define mmMMU_BASE 0x7FFC480000ull +#define MMU_MAX_OFFSET 0x44 +#define MMU_SECTION 0x10000 +#define mmSTLB_BASE 0x7FFC490000ull +#define STLB_MAX_OFFSET 0x50 +#define STLB_SECTION 0x10000 +#define mmNORTH_THERMAL_SENSOR_BASE 0x7FFC4A0000ull +#define NORTH_THERMAL_SENSOR_MAX_OFFSET 0xE64 +#define NORTH_THERMAL_SENSOR_SECTION 0x1000 +#define mmMC_PLL_BASE 0x7FFC4A1000ull +#define MC_PLL_MAX_OFFSET 0x444 +#define MC_PLL_SECTION 0x1000 +#define mmCPU_PLL_BASE 0x7FFC4A2000ull +#define CPU_PLL_MAX_OFFSET 0x444 +#define CPU_PLL_SECTION 0x1000 +#define mmIC_PLL_BASE 0x7FFC4A3000ull +#define IC_PLL_MAX_OFFSET 0x444 +#define IC_PLL_SECTION 0x1000 +#define mmDMA_PROCESS_MON_BASE 0x7FFC4A4000ull +#define DMA_PROCESS_MON_MAX_OFFSET 0x4 +#define DMA_PROCESS_MON_SECTION 0xC000 +#define mmDMA_MACRO_BASE 0x7FFC4B0000ull +#define DMA_MACRO_MAX_OFFSET 0x15C +#define DMA_MACRO_SECTION 0x150000 +#define mmDDR_PHY_CH0_BASE 0x7FFC600000ull +#define DDR_PHY_CH0_MAX_OFFSET 0x0 +#define DDR_PHY_CH0_SECTION 0x40000 +#define mmDDR_MC_CH0_BASE 0x7FFC640000ull +#define DDR_MC_CH0_MAX_OFFSET 0xF34 +#define DDR_MC_CH0_SECTION 0x8000 +#define mmDDR_MISC_CH0_BASE 0x7FFC648000ull +#define DDR_MISC_CH0_MAX_OFFSET 0x204 +#define DDR_MISC_CH0_SECTION 0xB8000 +#define mmDDR_PHY_CH1_BASE 0x7FFC700000ull +#define DDR_PHY_CH1_MAX_OFFSET 0x0 +#define DDR_PHY_CH1_SECTION 0x40000 +#define mmDDR_MC_CH1_BASE 0x7FFC740000ull +#define DDR_MC_CH1_MAX_OFFSET 0xF34 +#define DDR_MC_CH1_SECTION 0x8000 +#define mmDDR_MISC_CH1_BASE 0x7FFC748000ull +#define DDR_MISC_CH1_MAX_OFFSET 0x204 +#define DDR_MISC_CH1_SECTION 0xB8000 +#define mmGIC_BASE 0x7FFC800000ull +#define GIC_MAX_OFFSET 0x10000 +#define GIC_SECTION 0x401000 +#define mmPCIE_WRAP_BASE 0x7FFCC01000ull +#define PCIE_WRAP_MAX_OFFSET 0xDF4 +#define PCIE_WRAP_SECTION 0x1000 +#define mmPCIE_DBI_BASE 0x7FFCC02000ull +#define PCIE_DBI_MAX_OFFSET 0xC04 +#define PCIE_DBI_SECTION 0x2000 +#define mmPCIE_CORE_BASE 0x7FFCC04000ull +#define PCIE_CORE_MAX_OFFSET 0x9B8 +#define PCIE_CORE_SECTION 0x1000 +#define mmPCIE_DB_CFG_BASE 0x7FFCC05000ull +#define PCIE_DB_CFG_MAX_OFFSET 0xE34 +#define PCIE_DB_CFG_SECTION 0x1000 +#define mmPCIE_DB_CMD_BASE 0x7FFCC06000ull +#define PCIE_DB_CMD_MAX_OFFSET 0x810 +#define PCIE_DB_CMD_SECTION 0x1000 +#define mmPCIE_AUX_BASE 0x7FFCC07000ull +#define PCIE_AUX_MAX_OFFSET 0x9BC +#define PCIE_AUX_SECTION 0x1000 +#define mmPCIE_DB_RSV_BASE 0x7FFCC08000ull +#define PCIE_DB_RSV_MAX_OFFSET 0x800 +#define PCIE_DB_RSV_SECTION 0x8000 +#define mmPCIE_PHY_BASE 0x7FFCC10000ull +#define PCIE_PHY_MAX_OFFSET 0x924 +#define PCIE_PHY_SECTION 0x30000 +#define mmPSOC_I2C_M0_BASE 0x7FFCC40000ull +#define PSOC_I2C_M0_MAX_OFFSET 0x100 +#define PSOC_I2C_M0_SECTION 0x1000 +#define mmPSOC_I2C_M1_BASE 0x7FFCC41000ull +#define PSOC_I2C_M1_MAX_OFFSET 0x100 +#define PSOC_I2C_M1_SECTION 0x1000 +#define mmPSOC_I2C_S_BASE 0x7FFCC42000ull +#define PSOC_I2C_S_MAX_OFFSET 0x100 +#define PSOC_I2C_S_SECTION 0x1000 +#define mmPSOC_SPI_BASE 0x7FFCC43000ull +#define PSOC_SPI_MAX_OFFSET 0x100 +#define PSOC_SPI_SECTION 0x1000 +#define mmPSOC_EMMC_BASE 0x7FFCC44000ull +#define PSOC_EMMC_MAX_OFFSET 0xF70 +#define PSOC_EMMC_SECTION 0x1000 +#define mmPSOC_UART_0_BASE 0x7FFCC45000ull +#define PSOC_UART_0_MAX_OFFSET 0x1000 +#define PSOC_UART_0_SECTION 0x1000 +#define mmPSOC_UART_1_BASE 0x7FFCC46000ull +#define PSOC_UART_1_MAX_OFFSET 0x1000 +#define PSOC_UART_1_SECTION 0x1000 +#define mmPSOC_TIMER_BASE 0x7FFCC47000ull +#define PSOC_TIMER_MAX_OFFSET 0x1000 +#define PSOC_TIMER_SECTION 0x1000 +#define mmPSOC_WDOG_BASE 0x7FFCC48000ull +#define PSOC_WDOG_MAX_OFFSET 0x1000 +#define PSOC_WDOG_SECTION 0x1000 +#define mmPSOC_TIMESTAMP_BASE 0x7FFCC49000ull +#define PSOC_TIMESTAMP_MAX_OFFSET 0x1000 +#define PSOC_TIMESTAMP_SECTION 0x1000 +#define mmPSOC_EFUSE_BASE 0x7FFCC4A000ull +#define PSOC_EFUSE_MAX_OFFSET 0x10C +#define PSOC_EFUSE_SECTION 0x1000 +#define mmPSOC_GLOBAL_CONF_BASE 0x7FFCC4B000ull +#define PSOC_GLOBAL_CONF_MAX_OFFSET 0xA48 +#define PSOC_GLOBAL_CONF_SECTION 0x1000 +#define mmPSOC_GPIO0_BASE 0x7FFCC4C000ull +#define PSOC_GPIO0_MAX_OFFSET 0x1000 +#define PSOC_GPIO0_SECTION 0x1000 +#define mmPSOC_GPIO1_BASE 0x7FFCC4D000ull +#define PSOC_GPIO1_MAX_OFFSET 0x1000 +#define PSOC_GPIO1_SECTION 0x1000 +#define mmPSOC_BTL_BASE 0x7FFCC4E000ull +#define PSOC_BTL_MAX_OFFSET 0x124 +#define PSOC_BTL_SECTION 0x1000 +#define mmPSOC_CS_TRACE_BASE 0x7FFCC4F000ull +#define PSOC_CS_TRACE_MAX_OFFSET 0x0 +#define PSOC_CS_TRACE_SECTION 0x1000 +#define mmPSOC_GPIO2_BASE 0x7FFCC50000ull +#define PSOC_GPIO2_MAX_OFFSET 0x1000 +#define PSOC_GPIO2_SECTION 0x1000 +#define mmPSOC_GPIO3_BASE 0x7FFCC51000ull +#define PSOC_GPIO3_MAX_OFFSET 0x1000 +#define PSOC_GPIO3_SECTION 0x1000 +#define mmPSOC_GPIO4_BASE 0x7FFCC52000ull +#define PSOC_GPIO4_MAX_OFFSET 0x1000 +#define PSOC_GPIO4_SECTION 0x1000 +#define mmPSOC_DFT_EFUSE_BASE 0x7FFCC53000ull +#define PSOC_DFT_EFUSE_MAX_OFFSET 0x10C +#define PSOC_DFT_EFUSE_SECTION 0x1000 +#define mmPSOC_PM_BASE 0x7FFCC54000ull +#define PSOC_PM_MAX_OFFSET 0x4 +#define PSOC_PM_SECTION 0x1000 +#define mmPSOC_TS_BASE 0x7FFCC55000ull +#define PSOC_TS_MAX_OFFSET 0xE64 +#define PSOC_TS_SECTION 0xB000 +#define mmPSOC_MII_BASE 0x7FFCC60000ull +#define PSOC_MII_MAX_OFFSET 0x105C +#define PSOC_MII_SECTION 0x10000 +#define mmPSOC_EMMC_PLL_BASE 0x7FFCC70000ull +#define PSOC_EMMC_PLL_MAX_OFFSET 0x444 +#define PSOC_EMMC_PLL_SECTION 0x1000 +#define mmPSOC_MME_PLL_BASE 0x7FFCC71000ull +#define PSOC_MME_PLL_MAX_OFFSET 0x444 +#define PSOC_MME_PLL_SECTION 0x1000 +#define mmPSOC_PCI_PLL_BASE 0x7FFCC72000ull +#define PSOC_PCI_PLL_MAX_OFFSET 0x444 +#define PSOC_PCI_PLL_SECTION 0x6000 +#define mmPSOC_PWM0_BASE 0x7FFCC78000ull +#define PSOC_PWM0_MAX_OFFSET 0x58 +#define PSOC_PWM0_SECTION 0x1000 +#define mmPSOC_PWM1_BASE 0x7FFCC79000ull +#define PSOC_PWM1_MAX_OFFSET 0x58 +#define PSOC_PWM1_SECTION 0x1000 +#define mmPSOC_PWM2_BASE 0x7FFCC7A000ull +#define PSOC_PWM2_MAX_OFFSET 0x58 +#define PSOC_PWM2_SECTION 0x1000 +#define mmPSOC_PWM3_BASE 0x7FFCC7B000ull +#define PSOC_PWM3_MAX_OFFSET 0x58 +#define PSOC_PWM3_SECTION 0x185000 +#define mmTPC0_NRTR_BASE 0x7FFCE00000ull +#define TPC0_NRTR_MAX_OFFSET 0x608 +#define TPC0_NRTR_SECTION 0x1000 +#define mmTPC_PLL_BASE 0x7FFCE01000ull +#define TPC_PLL_MAX_OFFSET 0x444 +#define TPC_PLL_SECTION 0x1000 +#define mmTPC_THEMAL_SENSOR_BASE 0x7FFCE02000ull +#define TPC_THEMAL_SENSOR_MAX_OFFSET 0xE64 +#define TPC_THEMAL_SENSOR_SECTION 0x1000 +#define mmTPC_PROCESS_MON_BASE 0x7FFCE03000ull +#define TPC_PROCESS_MON_MAX_OFFSET 0x4 +#define TPC_PROCESS_MON_SECTION 0x1000 +#define mmTPC0_RD_REGULATOR_BASE 0x7FFCE04000ull +#define TPC0_RD_REGULATOR_MAX_OFFSET 0x74 +#define TPC0_RD_REGULATOR_SECTION 0x1000 +#define mmTPC0_WR_REGULATOR_BASE 0x7FFCE05000ull +#define TPC0_WR_REGULATOR_MAX_OFFSET 0x74 +#define TPC0_WR_REGULATOR_SECTION 0x1000 +#define mmTPC0_CFG_BASE 0x7FFCE06000ull +#define TPC0_CFG_MAX_OFFSET 0xE30 +#define TPC0_CFG_SECTION 0x2000 +#define mmTPC0_QM_BASE 0x7FFCE08000ull +#define TPC0_QM_MAX_OFFSET 0x310 +#define TPC0_QM_SECTION 0x1000 +#define mmTPC0_CMDQ_BASE 0x7FFCE09000ull +#define TPC0_CMDQ_MAX_OFFSET 0x310 +#define TPC0_CMDQ_SECTION 0x37000 +#define mmTPC1_RTR_BASE 0x7FFCE40000ull +#define TPC1_RTR_MAX_OFFSET 0x608 +#define TPC1_RTR_SECTION 0x4000 +#define mmTPC1_WR_REGULATOR_BASE 0x7FFCE44000ull +#define TPC1_WR_REGULATOR_MAX_OFFSET 0x74 +#define TPC1_WR_REGULATOR_SECTION 0x1000 +#define mmTPC1_RD_REGULATOR_BASE 0x7FFCE45000ull +#define TPC1_RD_REGULATOR_MAX_OFFSET 0x74 +#define TPC1_RD_REGULATOR_SECTION 0x1000 +#define mmTPC1_CFG_BASE 0x7FFCE46000ull +#define TPC1_CFG_MAX_OFFSET 0xE30 +#define TPC1_CFG_SECTION 0x2000 +#define mmTPC1_QM_BASE 0x7FFCE48000ull +#define TPC1_QM_MAX_OFFSET 0x310 +#define TPC1_QM_SECTION 0x1000 +#define mmTPC1_CMDQ_BASE 0x7FFCE49000ull +#define TPC1_CMDQ_MAX_OFFSET 0x310 +#define TPC1_CMDQ_SECTION 0x37000 +#define mmTPC2_RTR_BASE 0x7FFCE80000ull +#define TPC2_RTR_MAX_OFFSET 0x608 +#define TPC2_RTR_SECTION 0x4000 +#define mmTPC2_RD_REGULATOR_BASE 0x7FFCE84000ull +#define TPC2_RD_REGULATOR_MAX_OFFSET 0x74 +#define TPC2_RD_REGULATOR_SECTION 0x1000 +#define mmTPC2_WR_REGULATOR_BASE 0x7FFCE85000ull +#define TPC2_WR_REGULATOR_MAX_OFFSET 0x74 +#define TPC2_WR_REGULATOR_SECTION 0x1000 +#define mmTPC2_CFG_BASE 0x7FFCE86000ull +#define TPC2_CFG_MAX_OFFSET 0xE30 +#define TPC2_CFG_SECTION 0x2000 +#define mmTPC2_QM_BASE 0x7FFCE88000ull +#define TPC2_QM_MAX_OFFSET 0x310 +#define TPC2_QM_SECTION 0x1000 +#define mmTPC2_CMDQ_BASE 0x7FFCE89000ull +#define TPC2_CMDQ_MAX_OFFSET 0x310 +#define TPC2_CMDQ_SECTION 0x37000 +#define mmTPC3_RTR_BASE 0x7FFCEC0000ull +#define TPC3_RTR_MAX_OFFSET 0x608 +#define TPC3_RTR_SECTION 0x4000 +#define mmTPC3_RD_REGULATOR_BASE 0x7FFCEC4000ull +#define TPC3_RD_REGULATOR_MAX_OFFSET 0x74 +#define TPC3_RD_REGULATOR_SECTION 0x1000 +#define mmTPC3_WR_REGULATOR_BASE 0x7FFCEC5000ull +#define TPC3_WR_REGULATOR_MAX_OFFSET 0x74 +#define TPC3_WR_REGULATOR_SECTION 0x1000 +#define mmTPC3_CFG_BASE 0x7FFCEC6000ull +#define TPC3_CFG_MAX_OFFSET 0xE30 +#define TPC3_CFG_SECTION 0x2000 +#define mmTPC3_QM_BASE 0x7FFCEC8000ull +#define TPC3_QM_MAX_OFFSET 0x310 +#define TPC3_QM_SECTION 0x1000 +#define mmTPC3_CMDQ_BASE 0x7FFCEC9000ull +#define TPC3_CMDQ_MAX_OFFSET 0x310 +#define TPC3_CMDQ_SECTION 0x37000 +#define mmTPC4_RTR_BASE 0x7FFCF00000ull +#define TPC4_RTR_MAX_OFFSET 0x608 +#define TPC4_RTR_SECTION 0x4000 +#define mmTPC4_RD_REGULATOR_BASE 0x7FFCF04000ull +#define TPC4_RD_REGULATOR_MAX_OFFSET 0x74 +#define TPC4_RD_REGULATOR_SECTION 0x1000 +#define mmTPC4_WR_REGULATOR_BASE 0x7FFCF05000ull +#define TPC4_WR_REGULATOR_MAX_OFFSET 0x74 +#define TPC4_WR_REGULATOR_SECTION 0x1000 +#define mmTPC4_CFG_BASE 0x7FFCF06000ull +#define TPC4_CFG_MAX_OFFSET 0xE30 +#define TPC4_CFG_SECTION 0x2000 +#define mmTPC4_QM_BASE 0x7FFCF08000ull +#define TPC4_QM_MAX_OFFSET 0x310 +#define TPC4_QM_SECTION 0x1000 +#define mmTPC4_CMDQ_BASE 0x7FFCF09000ull +#define TPC4_CMDQ_MAX_OFFSET 0x310 +#define TPC4_CMDQ_SECTION 0x37000 +#define mmTPC5_RTR_BASE 0x7FFCF40000ull +#define TPC5_RTR_MAX_OFFSET 0x608 +#define TPC5_RTR_SECTION 0x4000 +#define mmTPC5_RD_REGULATOR_BASE 0x7FFCF44000ull +#define TPC5_RD_REGULATOR_MAX_OFFSET 0x74 +#define TPC5_RD_REGULATOR_SECTION 0x1000 +#define mmTPC5_WR_REGULATOR_BASE 0x7FFCF45000ull +#define TPC5_WR_REGULATOR_MAX_OFFSET 0x74 +#define TPC5_WR_REGULATOR_SECTION 0x1000 +#define mmTPC5_CFG_BASE 0x7FFCF46000ull +#define TPC5_CFG_MAX_OFFSET 0xE30 +#define TPC5_CFG_SECTION 0x2000 +#define mmTPC5_QM_BASE 0x7FFCF48000ull +#define TPC5_QM_MAX_OFFSET 0x310 +#define TPC5_QM_SECTION 0x1000 +#define mmTPC5_CMDQ_BASE 0x7FFCF49000ull +#define TPC5_CMDQ_MAX_OFFSET 0x310 +#define TPC5_CMDQ_SECTION 0x37000 +#define mmTPC6_RTR_BASE 0x7FFCF80000ull +#define TPC6_RTR_MAX_OFFSET 0x608 +#define TPC6_RTR_SECTION 0x4000 +#define mmTPC6_RD_REGULATOR_BASE 0x7FFCF84000ull +#define TPC6_RD_REGULATOR_MAX_OFFSET 0x74 +#define TPC6_RD_REGULATOR_SECTION 0x1000 +#define mmTPC6_WR_REGULATOR_BASE 0x7FFCF85000ull +#define TPC6_WR_REGULATOR_MAX_OFFSET 0x74 +#define TPC6_WR_REGULATOR_SECTION 0x1000 +#define mmTPC6_CFG_BASE 0x7FFCF86000ull +#define TPC6_CFG_MAX_OFFSET 0xE30 +#define TPC6_CFG_SECTION 0x2000 +#define mmTPC6_QM_BASE 0x7FFCF88000ull +#define TPC6_QM_MAX_OFFSET 0x310 +#define TPC6_QM_SECTION 0x1000 +#define mmTPC6_CMDQ_BASE 0x7FFCF89000ull +#define TPC6_CMDQ_MAX_OFFSET 0x310 +#define TPC6_CMDQ_SECTION 0x37000 +#define mmTPC7_NRTR_BASE 0x7FFCFC0000ull +#define TPC7_NRTR_MAX_OFFSET 0x608 +#define TPC7_NRTR_SECTION 0x4000 +#define mmTPC7_RD_REGULATOR_BASE 0x7FFCFC4000ull +#define TPC7_RD_REGULATOR_MAX_OFFSET 0x74 +#define TPC7_RD_REGULATOR_SECTION 0x1000 +#define mmTPC7_WR_REGULATOR_BASE 0x7FFCFC5000ull +#define TPC7_WR_REGULATOR_MAX_OFFSET 0x74 +#define TPC7_WR_REGULATOR_SECTION 0x1000 +#define mmTPC7_CFG_BASE 0x7FFCFC6000ull +#define TPC7_CFG_MAX_OFFSET 0xE30 +#define TPC7_CFG_SECTION 0x2000 +#define mmTPC7_QM_BASE 0x7FFCFC8000ull +#define TPC7_QM_MAX_OFFSET 0x310 +#define TPC7_QM_SECTION 0x1000 +#define mmTPC7_CMDQ_BASE 0x7FFCFC9000ull +#define TPC7_CMDQ_MAX_OFFSET 0x310 +#define TPC7_CMDQ_SECTION 0x1037000 +#define mmMME_TOP_TABLE_BASE 0x7FFE000000ull +#define MME_TOP_TABLE_MAX_OFFSET 0x1000 +#define MME_TOP_TABLE_SECTION 0x1000 +#define mmMME0_RTR_FUNNEL_BASE 0x7FFE001000ull +#define MME0_RTR_FUNNEL_MAX_OFFSET 0x1000 +#define MME0_RTR_FUNNEL_SECTION 0x40000 +#define mmMME1_RTR_FUNNEL_BASE 0x7FFE041000ull +#define MME1_RTR_FUNNEL_MAX_OFFSET 0x1000 +#define MME1_RTR_FUNNEL_SECTION 0x1000 +#define mmMME1_SBA_STM_BASE 0x7FFE042000ull +#define MME1_SBA_STM_MAX_OFFSET 0x1000 +#define MME1_SBA_STM_SECTION 0x1000 +#define mmMME1_SBA_CTI_BASE 0x7FFE043000ull +#define MME1_SBA_CTI_MAX_OFFSET 0x1000 +#define MME1_SBA_CTI_SECTION 0x1000 +#define mmMME1_SBA_ETF_BASE 0x7FFE044000ull +#define MME1_SBA_ETF_MAX_OFFSET 0x1000 +#define MME1_SBA_ETF_SECTION 0x1000 +#define mmMME1_SBA_SPMU_BASE 0x7FFE045000ull +#define MME1_SBA_SPMU_MAX_OFFSET 0x1000 +#define MME1_SBA_SPMU_SECTION 0x1000 +#define mmMME1_SBA_CTI0_BASE 0x7FFE046000ull +#define MME1_SBA_CTI0_MAX_OFFSET 0x1000 +#define MME1_SBA_CTI0_SECTION 0x1000 +#define mmMME1_SBA_CTI1_BASE 0x7FFE047000ull +#define MME1_SBA_CTI1_MAX_OFFSET 0x1000 +#define MME1_SBA_CTI1_SECTION 0x1000 +#define mmMME1_SBA_BMON0_BASE 0x7FFE048000ull +#define MME1_SBA_BMON0_MAX_OFFSET 0x1000 +#define MME1_SBA_BMON0_SECTION 0x1000 +#define mmMME1_SBA_BMON1_BASE 0x7FFE049000ull +#define MME1_SBA_BMON1_MAX_OFFSET 0x1000 +#define MME1_SBA_BMON1_SECTION 0x38000 +#define mmMME2_RTR_FUNNEL_BASE 0x7FFE081000ull +#define MME2_RTR_FUNNEL_MAX_OFFSET 0x1000 +#define MME2_RTR_FUNNEL_SECTION 0x40000 +#define mmMME3_RTR_FUNNEL_BASE 0x7FFE0C1000ull +#define MME3_RTR_FUNNEL_MAX_OFFSET 0x1000 +#define MME3_RTR_FUNNEL_SECTION 0x1000 +#define mmMME3_SBB_STM_BASE 0x7FFE0C2000ull +#define MME3_SBB_STM_MAX_OFFSET 0x1000 +#define MME3_SBB_STM_SECTION 0x1000 +#define mmMME3_SBB_CTI_BASE 0x7FFE0C3000ull +#define MME3_SBB_CTI_MAX_OFFSET 0x1000 +#define MME3_SBB_CTI_SECTION 0x1000 +#define mmMME3_SBB_ETF_BASE 0x7FFE0C4000ull +#define MME3_SBB_ETF_MAX_OFFSET 0x1000 +#define MME3_SBB_ETF_SECTION 0x1000 +#define mmMME3_SBB_SPMU_BASE 0x7FFE0C5000ull +#define MME3_SBB_SPMU_MAX_OFFSET 0x1000 +#define MME3_SBB_SPMU_SECTION 0x1000 +#define mmMME3_SBB_CTI0_BASE 0x7FFE0C6000ull +#define MME3_SBB_CTI0_MAX_OFFSET 0x1000 +#define MME3_SBB_CTI0_SECTION 0x1000 +#define mmMME3_SBB_CTI1_BASE 0x7FFE0C7000ull +#define MME3_SBB_CTI1_MAX_OFFSET 0x1000 +#define MME3_SBB_CTI1_SECTION 0x1000 +#define mmMME3_SBB_BMON0_BASE 0x7FFE0C8000ull +#define MME3_SBB_BMON0_MAX_OFFSET 0x1000 +#define MME3_SBB_BMON0_SECTION 0x1000 +#define mmMME3_SBB_BMON1_BASE 0x7FFE0C9000ull +#define MME3_SBB_BMON1_MAX_OFFSET 0x1000 +#define MME3_SBB_BMON1_SECTION 0x38000 +#define mmMME4_RTR_FUNNEL_BASE 0x7FFE101000ull +#define MME4_RTR_FUNNEL_MAX_OFFSET 0x1000 +#define MME4_RTR_FUNNEL_SECTION 0x1000 +#define mmMME4_WACS_STM_BASE 0x7FFE102000ull +#define MME4_WACS_STM_MAX_OFFSET 0x1000 +#define MME4_WACS_STM_SECTION 0x1000 +#define mmMME4_WACS_CTI_BASE 0x7FFE103000ull +#define MME4_WACS_CTI_MAX_OFFSET 0x1000 +#define MME4_WACS_CTI_SECTION 0x1000 +#define mmMME4_WACS_ETF_BASE 0x7FFE104000ull +#define MME4_WACS_ETF_MAX_OFFSET 0x1000 +#define MME4_WACS_ETF_SECTION 0x1000 +#define mmMME4_WACS_SPMU_BASE 0x7FFE105000ull +#define MME4_WACS_SPMU_MAX_OFFSET 0x1000 +#define MME4_WACS_SPMU_SECTION 0x1000 +#define mmMME4_WACS_CTI0_BASE 0x7FFE106000ull +#define MME4_WACS_CTI0_MAX_OFFSET 0x1000 +#define MME4_WACS_CTI0_SECTION 0x1000 +#define mmMME4_WACS_CTI1_BASE 0x7FFE107000ull +#define MME4_WACS_CTI1_MAX_OFFSET 0x1000 +#define MME4_WACS_CTI1_SECTION 0x1000 +#define mmMME4_WACS_BMON0_BASE 0x7FFE108000ull +#define MME4_WACS_BMON0_MAX_OFFSET 0x1000 +#define MME4_WACS_BMON0_SECTION 0x1000 +#define mmMME4_WACS_BMON1_BASE 0x7FFE109000ull +#define MME4_WACS_BMON1_MAX_OFFSET 0x1000 +#define MME4_WACS_BMON1_SECTION 0x1000 +#define mmMME4_WACS_BMON2_BASE 0x7FFE10A000ull +#define MME4_WACS_BMON2_MAX_OFFSET 0x1000 +#define MME4_WACS_BMON2_SECTION 0x1000 +#define mmMME4_WACS_BMON3_BASE 0x7FFE10B000ull +#define MME4_WACS_BMON3_MAX_OFFSET 0x1000 +#define MME4_WACS_BMON3_SECTION 0x1000 +#define mmMME4_WACS_BMON4_BASE 0x7FFE10C000ull +#define MME4_WACS_BMON4_MAX_OFFSET 0x1000 +#define MME4_WACS_BMON4_SECTION 0x1000 +#define mmMME4_WACS_BMON5_BASE 0x7FFE10D000ull +#define MME4_WACS_BMON5_MAX_OFFSET 0x1000 +#define MME4_WACS_BMON5_SECTION 0x1000 +#define mmMME4_WACS_BMON6_BASE 0x7FFE10E000ull +#define MME4_WACS_BMON6_MAX_OFFSET 0x1000 +#define MME4_WACS_BMON6_SECTION 0x4000 +#define mmMME4_WACS2_STM_BASE 0x7FFE112000ull +#define MME4_WACS2_STM_MAX_OFFSET 0x1000 +#define MME4_WACS2_STM_SECTION 0x1000 +#define mmMME4_WACS2_CTI_BASE 0x7FFE113000ull +#define MME4_WACS2_CTI_MAX_OFFSET 0x1000 +#define MME4_WACS2_CTI_SECTION 0x1000 +#define mmMME4_WACS2_ETF_BASE 0x7FFE114000ull +#define MME4_WACS2_ETF_MAX_OFFSET 0x1000 +#define MME4_WACS2_ETF_SECTION 0x1000 +#define mmMME4_WACS2_SPMU_BASE 0x7FFE115000ull +#define MME4_WACS2_SPMU_MAX_OFFSET 0x1000 +#define MME4_WACS2_SPMU_SECTION 0x1000 +#define mmMME4_WACS2_CTI0_BASE 0x7FFE116000ull +#define MME4_WACS2_CTI0_MAX_OFFSET 0x1000 +#define MME4_WACS2_CTI0_SECTION 0x1000 +#define mmMME4_WACS2_CTI1_BASE 0x7FFE117000ull +#define MME4_WACS2_CTI1_MAX_OFFSET 0x1000 +#define MME4_WACS2_CTI1_SECTION 0x1000 +#define mmMME4_WACS2_BMON0_BASE 0x7FFE118000ull +#define MME4_WACS2_BMON0_MAX_OFFSET 0x1000 +#define MME4_WACS2_BMON0_SECTION 0x1000 +#define mmMME4_WACS2_BMON1_BASE 0x7FFE119000ull +#define MME4_WACS2_BMON1_MAX_OFFSET 0x1000 +#define MME4_WACS2_BMON1_SECTION 0x1000 +#define mmMME4_WACS2_BMON2_BASE 0x7FFE11A000ull +#define MME4_WACS2_BMON2_MAX_OFFSET 0x1000 +#define MME4_WACS2_BMON2_SECTION 0x27000 +#define mmMME5_RTR_FUNNEL_BASE 0x7FFE141000ull +#define MME5_RTR_FUNNEL_MAX_OFFSET 0x1000 +#define MME5_RTR_FUNNEL_SECTION 0x2BF000 +#define mmDMA_ROM_TABLE_BASE 0x7FFE400000ull +#define DMA_ROM_TABLE_MAX_OFFSET 0x1000 +#define DMA_ROM_TABLE_SECTION 0x1000 +#define mmDMA_CH_0_CS_STM_BASE 0x7FFE401000ull +#define DMA_CH_0_CS_STM_MAX_OFFSET 0x1000 +#define DMA_CH_0_CS_STM_SECTION 0x1000 +#define mmDMA_CH_0_CS_CTI_BASE 0x7FFE402000ull +#define DMA_CH_0_CS_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_0_CS_CTI_SECTION 0x1000 +#define mmDMA_CH_0_CS_ETF_BASE 0x7FFE403000ull +#define DMA_CH_0_CS_ETF_MAX_OFFSET 0x1000 +#define DMA_CH_0_CS_ETF_SECTION 0x1000 +#define mmDMA_CH_0_CS_SPMU_BASE 0x7FFE404000ull +#define DMA_CH_0_CS_SPMU_MAX_OFFSET 0x1000 +#define DMA_CH_0_CS_SPMU_SECTION 0x1000 +#define mmDMA_CH_0_BMON_CTI_BASE 0x7FFE405000ull +#define DMA_CH_0_BMON_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_0_BMON_CTI_SECTION 0x1000 +#define mmDMA_CH_0_USER_CTI_BASE 0x7FFE406000ull +#define DMA_CH_0_USER_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_0_USER_CTI_SECTION 0x1000 +#define mmDMA_CH_0_BMON_0_BASE 0x7FFE407000ull +#define DMA_CH_0_BMON_0_MAX_OFFSET 0x1000 +#define DMA_CH_0_BMON_0_SECTION 0x1000 +#define mmDMA_CH_0_BMON_1_BASE 0x7FFE408000ull +#define DMA_CH_0_BMON_1_MAX_OFFSET 0x1000 +#define DMA_CH_0_BMON_1_SECTION 0x9000 +#define mmDMA_CH_1_CS_STM_BASE 0x7FFE411000ull +#define DMA_CH_1_CS_STM_MAX_OFFSET 0x1000 +#define DMA_CH_1_CS_STM_SECTION 0x1000 +#define mmDMA_CH_1_CS_CTI_BASE 0x7FFE412000ull +#define DMA_CH_1_CS_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_1_CS_CTI_SECTION 0x1000 +#define mmDMA_CH_1_CS_ETF_BASE 0x7FFE413000ull +#define DMA_CH_1_CS_ETF_MAX_OFFSET 0x1000 +#define DMA_CH_1_CS_ETF_SECTION 0x1000 +#define mmDMA_CH_1_CS_SPMU_BASE 0x7FFE414000ull +#define DMA_CH_1_CS_SPMU_MAX_OFFSET 0x1000 +#define DMA_CH_1_CS_SPMU_SECTION 0x1000 +#define mmDMA_CH_1_BMON_CTI_BASE 0x7FFE415000ull +#define DMA_CH_1_BMON_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_1_BMON_CTI_SECTION 0x1000 +#define mmDMA_CH_1_USER_CTI_BASE 0x7FFE416000ull +#define DMA_CH_1_USER_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_1_USER_CTI_SECTION 0x1000 +#define mmDMA_CH_1_BMON_0_BASE 0x7FFE417000ull +#define DMA_CH_1_BMON_0_MAX_OFFSET 0x1000 +#define DMA_CH_1_BMON_0_SECTION 0x1000 +#define mmDMA_CH_1_BMON_1_BASE 0x7FFE418000ull +#define DMA_CH_1_BMON_1_MAX_OFFSET 0x1000 +#define DMA_CH_1_BMON_1_SECTION 0x9000 +#define mmDMA_CH_2_CS_STM_BASE 0x7FFE421000ull +#define DMA_CH_2_CS_STM_MAX_OFFSET 0x1000 +#define DMA_CH_2_CS_STM_SECTION 0x1000 +#define mmDMA_CH_2_CS_CTI_BASE 0x7FFE422000ull +#define DMA_CH_2_CS_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_2_CS_CTI_SECTION 0x1000 +#define mmDMA_CH_2_CS_ETF_BASE 0x7FFE423000ull +#define DMA_CH_2_CS_ETF_MAX_OFFSET 0x1000 +#define DMA_CH_2_CS_ETF_SECTION 0x1000 +#define mmDMA_CH_2_CS_SPMU_BASE 0x7FFE424000ull +#define DMA_CH_2_CS_SPMU_MAX_OFFSET 0x1000 +#define DMA_CH_2_CS_SPMU_SECTION 0x1000 +#define mmDMA_CH_2_BMON_CTI_BASE 0x7FFE425000ull +#define DMA_CH_2_BMON_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_2_BMON_CTI_SECTION 0x1000 +#define mmDMA_CH_2_USER_CTI_BASE 0x7FFE426000ull +#define DMA_CH_2_USER_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_2_USER_CTI_SECTION 0x1000 +#define mmDMA_CH_2_BMON_0_BASE 0x7FFE427000ull +#define DMA_CH_2_BMON_0_MAX_OFFSET 0x1000 +#define DMA_CH_2_BMON_0_SECTION 0x1000 +#define mmDMA_CH_2_BMON_1_BASE 0x7FFE428000ull +#define DMA_CH_2_BMON_1_MAX_OFFSET 0x1000 +#define DMA_CH_2_BMON_1_SECTION 0x9000 +#define mmDMA_CH_3_CS_STM_BASE 0x7FFE431000ull +#define DMA_CH_3_CS_STM_MAX_OFFSET 0x1000 +#define DMA_CH_3_CS_STM_SECTION 0x1000 +#define mmDMA_CH_3_CS_CTI_BASE 0x7FFE432000ull +#define DMA_CH_3_CS_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_3_CS_CTI_SECTION 0x1000 +#define mmDMA_CH_3_CS_ETF_BASE 0x7FFE433000ull +#define DMA_CH_3_CS_ETF_MAX_OFFSET 0x1000 +#define DMA_CH_3_CS_ETF_SECTION 0x1000 +#define mmDMA_CH_3_CS_SPMU_BASE 0x7FFE434000ull +#define DMA_CH_3_CS_SPMU_MAX_OFFSET 0x1000 +#define DMA_CH_3_CS_SPMU_SECTION 0x1000 +#define mmDMA_CH_3_BMON_CTI_BASE 0x7FFE435000ull +#define DMA_CH_3_BMON_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_3_BMON_CTI_SECTION 0x1000 +#define mmDMA_CH_3_USER_CTI_BASE 0x7FFE436000ull +#define DMA_CH_3_USER_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_3_USER_CTI_SECTION 0x1000 +#define mmDMA_CH_3_BMON_0_BASE 0x7FFE437000ull +#define DMA_CH_3_BMON_0_MAX_OFFSET 0x1000 +#define DMA_CH_3_BMON_0_SECTION 0x1000 +#define mmDMA_CH_3_BMON_1_BASE 0x7FFE438000ull +#define DMA_CH_3_BMON_1_MAX_OFFSET 0x1000 +#define DMA_CH_3_BMON_1_SECTION 0x9000 +#define mmDMA_CH_4_CS_STM_BASE 0x7FFE441000ull +#define DMA_CH_4_CS_STM_MAX_OFFSET 0x1000 +#define DMA_CH_4_CS_STM_SECTION 0x1000 +#define mmDMA_CH_4_CS_CTI_BASE 0x7FFE442000ull +#define DMA_CH_4_CS_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_4_CS_CTI_SECTION 0x1000 +#define mmDMA_CH_4_CS_ETF_BASE 0x7FFE443000ull +#define DMA_CH_4_CS_ETF_MAX_OFFSET 0x1000 +#define DMA_CH_4_CS_ETF_SECTION 0x1000 +#define mmDMA_CH_4_CS_SPMU_BASE 0x7FFE444000ull +#define DMA_CH_4_CS_SPMU_MAX_OFFSET 0x1000 +#define DMA_CH_4_CS_SPMU_SECTION 0x1000 +#define mmDMA_CH_4_BMON_CTI_BASE 0x7FFE445000ull +#define DMA_CH_4_BMON_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_4_BMON_CTI_SECTION 0x1000 +#define mmDMA_CH_4_USER_CTI_BASE 0x7FFE446000ull +#define DMA_CH_4_USER_CTI_MAX_OFFSET 0x1000 +#define DMA_CH_4_USER_CTI_SECTION 0x1000 +#define mmDMA_CH_4_BMON_0_BASE 0x7FFE447000ull +#define DMA_CH_4_BMON_0_MAX_OFFSET 0x1000 +#define DMA_CH_4_BMON_0_SECTION 0x1000 +#define mmDMA_CH_4_BMON_1_BASE 0x7FFE448000ull +#define DMA_CH_4_BMON_1_MAX_OFFSET 0x1000 +#define DMA_CH_4_BMON_1_SECTION 0x8000 +#define mmDMA_CH_FUNNEL_6_1_BASE 0x7FFE450000ull +#define DMA_CH_FUNNEL_6_1_MAX_OFFSET 0x1000 +#define DMA_CH_FUNNEL_6_1_SECTION 0x11000 +#define mmDMA_MACRO_CS_STM_BASE 0x7FFE461000ull +#define DMA_MACRO_CS_STM_MAX_OFFSET 0x1000 +#define DMA_MACRO_CS_STM_SECTION 0x1000 +#define mmDMA_MACRO_CS_CTI_BASE 0x7FFE462000ull +#define DMA_MACRO_CS_CTI_MAX_OFFSET 0x1000 +#define DMA_MACRO_CS_CTI_SECTION 0x1000 +#define mmDMA_MACRO_CS_ETF_BASE 0x7FFE463000ull +#define DMA_MACRO_CS_ETF_MAX_OFFSET 0x1000 +#define DMA_MACRO_CS_ETF_SECTION 0x1000 +#define mmDMA_MACRO_CS_SPMU_BASE 0x7FFE464000ull +#define DMA_MACRO_CS_SPMU_MAX_OFFSET 0x1000 +#define DMA_MACRO_CS_SPMU_SECTION 0x1000 +#define mmDMA_MACRO_BMON_CTI_BASE 0x7FFE465000ull +#define DMA_MACRO_BMON_CTI_MAX_OFFSET 0x1000 +#define DMA_MACRO_BMON_CTI_SECTION 0x1000 +#define mmDMA_MACRO_USER_CTI_BASE 0x7FFE466000ull +#define DMA_MACRO_USER_CTI_MAX_OFFSET 0x1000 +#define DMA_MACRO_USER_CTI_SECTION 0x1000 +#define mmDMA_MACRO_BMON_0_BASE 0x7FFE467000ull +#define DMA_MACRO_BMON_0_MAX_OFFSET 0x1000 +#define DMA_MACRO_BMON_0_SECTION 0x1000 +#define mmDMA_MACRO_BMON_1_BASE 0x7FFE468000ull +#define DMA_MACRO_BMON_1_MAX_OFFSET 0x1000 +#define DMA_MACRO_BMON_1_SECTION 0x1000 +#define mmDMA_MACRO_BMON_2_BASE 0x7FFE469000ull +#define DMA_MACRO_BMON_2_MAX_OFFSET 0x1000 +#define DMA_MACRO_BMON_2_SECTION 0x1000 +#define mmDMA_MACRO_BMON_3_BASE 0x7FFE46A000ull +#define DMA_MACRO_BMON_3_MAX_OFFSET 0x1000 +#define DMA_MACRO_BMON_3_SECTION 0x1000 +#define mmDMA_MACRO_BMON_4_BASE 0x7FFE46B000ull +#define DMA_MACRO_BMON_4_MAX_OFFSET 0x1000 +#define DMA_MACRO_BMON_4_SECTION 0x1000 +#define mmDMA_MACRO_BMON_5_BASE 0x7FFE46C000ull +#define DMA_MACRO_BMON_5_MAX_OFFSET 0x1000 +#define DMA_MACRO_BMON_5_SECTION 0x1000 +#define mmDMA_MACRO_BMON_6_BASE 0x7FFE46D000ull +#define DMA_MACRO_BMON_6_MAX_OFFSET 0x1000 +#define DMA_MACRO_BMON_6_SECTION 0x1000 +#define mmDMA_MACRO_BMON_7_BASE 0x7FFE46E000ull +#define DMA_MACRO_BMON_7_MAX_OFFSET 0x1000 +#define DMA_MACRO_BMON_7_SECTION 0x2000 +#define mmDMA_MACRO_FUNNEL_3_1_BASE 0x7FFE470000ull +#define DMA_MACRO_FUNNEL_3_1_MAX_OFFSET 0x1000 +#define DMA_MACRO_FUNNEL_3_1_SECTION 0x10000 +#define mmCPU_ROM_TABLE_BASE 0x7FFE480000ull +#define CPU_ROM_TABLE_MAX_OFFSET 0x1000 +#define CPU_ROM_TABLE_SECTION 0x1000 +#define mmCPU_ETF_0_BASE 0x7FFE481000ull +#define CPU_ETF_0_MAX_OFFSET 0x1000 +#define CPU_ETF_0_SECTION 0x1000 +#define mmCPU_ETF_1_BASE 0x7FFE482000ull +#define CPU_ETF_1_MAX_OFFSET 0x1000 +#define CPU_ETF_1_SECTION 0x2000 +#define mmCPU_CTI_BASE 0x7FFE484000ull +#define CPU_CTI_MAX_OFFSET 0x1000 +#define CPU_CTI_SECTION 0x1000 +#define mmCPU_FUNNEL_BASE 0x7FFE485000ull +#define CPU_FUNNEL_MAX_OFFSET 0x1000 +#define CPU_FUNNEL_SECTION 0x1000 +#define mmCPU_STM_BASE 0x7FFE486000ull +#define CPU_STM_MAX_OFFSET 0x1000 +#define CPU_STM_SECTION 0x1000 +#define mmCPU_CTI_TRACE_BASE 0x7FFE487000ull +#define CPU_CTI_TRACE_MAX_OFFSET 0x1000 +#define CPU_CTI_TRACE_SECTION 0x1000 +#define mmCPU_ETF_TRACE_BASE 0x7FFE488000ull +#define CPU_ETF_TRACE_MAX_OFFSET 0x1000 +#define CPU_ETF_TRACE_SECTION 0x1000 +#define mmCPU_WR_BMON_BASE 0x7FFE489000ull +#define CPU_WR_BMON_MAX_OFFSET 0x1000 +#define CPU_WR_BMON_SECTION 0x1000 +#define mmCPU_RD_BMON_BASE 0x7FFE48A000ull +#define CPU_RD_BMON_MAX_OFFSET 0x1000 +#define CPU_RD_BMON_SECTION 0x37000 +#define mmMMU_CS_STM_BASE 0x7FFE4C1000ull +#define MMU_CS_STM_MAX_OFFSET 0x1000 +#define MMU_CS_STM_SECTION 0x1000 +#define mmMMU_CS_CTI_BASE 0x7FFE4C2000ull +#define MMU_CS_CTI_MAX_OFFSET 0x1000 +#define MMU_CS_CTI_SECTION 0x1000 +#define mmMMU_CS_ETF_BASE 0x7FFE4C3000ull +#define MMU_CS_ETF_MAX_OFFSET 0x1000 +#define MMU_CS_ETF_SECTION 0x1000 +#define mmMMU_CS_SPMU_BASE 0x7FFE4C4000ull +#define MMU_CS_SPMU_MAX_OFFSET 0x1000 +#define MMU_CS_SPMU_SECTION 0x1000 +#define mmMMU_BMON_CTI_BASE 0x7FFE4C5000ull +#define MMU_BMON_CTI_MAX_OFFSET 0x1000 +#define MMU_BMON_CTI_SECTION 0x1000 +#define mmMMU_USER_CTI_BASE 0x7FFE4C6000ull +#define MMU_USER_CTI_MAX_OFFSET 0x1000 +#define MMU_USER_CTI_SECTION 0x1000 +#define mmMMU_BMON_0_BASE 0x7FFE4C7000ull +#define MMU_BMON_0_MAX_OFFSET 0x1000 +#define MMU_BMON_0_SECTION 0x1000 +#define mmMMU_BMON_1_BASE 0x7FFE4C8000ull +#define MMU_BMON_1_MAX_OFFSET 0x1000 +#define MMU_BMON_1_SECTION 0x338000 +#define mmCA53_BASE 0x7FFE800000ull +#define CA53_MAX_OFFSET 0x1000 +#define CA53_SECTION 0x400000 +#define mmPCI_ROM_TABLE_BASE 0x7FFEC00000ull +#define PCI_ROM_TABLE_MAX_OFFSET 0x1000 +#define PCI_ROM_TABLE_SECTION 0x1000 +#define mmPCIE_STM_BASE 0x7FFEC01000ull +#define PCIE_STM_MAX_OFFSET 0x1000 +#define PCIE_STM_SECTION 0x1000 +#define mmPCIE_ETF_BASE 0x7FFEC02000ull +#define PCIE_ETF_MAX_OFFSET 0x1000 +#define PCIE_ETF_SECTION 0x1000 +#define mmPCIE_CTI_0_BASE 0x7FFEC03000ull +#define PCIE_CTI_0_MAX_OFFSET 0x1000 +#define PCIE_CTI_0_SECTION 0x1000 +#define mmPCIE_SPMU_BASE 0x7FFEC04000ull +#define PCIE_SPMU_MAX_OFFSET 0x1000 +#define PCIE_SPMU_SECTION 0x1000 +#define mmPCIE_CTI_1_BASE 0x7FFEC05000ull +#define PCIE_CTI_1_MAX_OFFSET 0x1000 +#define PCIE_CTI_1_SECTION 0x1000 +#define mmPCIE_FUNNEL_BASE 0x7FFEC06000ull +#define PCIE_FUNNEL_MAX_OFFSET 0x1000 +#define PCIE_FUNNEL_SECTION 0x1000 +#define mmPCIE_BMON_MSTR_WR_BASE 0x7FFEC07000ull +#define PCIE_BMON_MSTR_WR_MAX_OFFSET 0x1000 +#define PCIE_BMON_MSTR_WR_SECTION 0x1000 +#define mmPCIE_BMON_MSTR_RD_BASE 0x7FFEC08000ull +#define PCIE_BMON_MSTR_RD_MAX_OFFSET 0x1000 +#define PCIE_BMON_MSTR_RD_SECTION 0x1000 +#define mmPCIE_BMON_SLV_WR_BASE 0x7FFEC09000ull +#define PCIE_BMON_SLV_WR_MAX_OFFSET 0x1000 +#define PCIE_BMON_SLV_WR_SECTION 0x1000 +#define mmPCIE_BMON_SLV_RD_BASE 0x7FFEC0A000ull +#define PCIE_BMON_SLV_RD_MAX_OFFSET 0x1000 +#define PCIE_BMON_SLV_RD_SECTION 0x36000 +#define mmPSOC_CTI_BASE 0x7FFEC40000ull +#define PSOC_CTI_MAX_OFFSET 0x1000 +#define PSOC_CTI_SECTION 0x1000 +#define mmPSOC_STM_BASE 0x7FFEC41000ull +#define PSOC_STM_MAX_OFFSET 0x1000 +#define PSOC_STM_SECTION 0x1000 +#define mmPSOC_FUNNEL_BASE 0x7FFEC42000ull +#define PSOC_FUNNEL_MAX_OFFSET 0x1000 +#define PSOC_FUNNEL_SECTION 0x1000 +#define mmPSOC_ETR_BASE 0x7FFEC43000ull +#define PSOC_ETR_MAX_OFFSET 0x1000 +#define PSOC_ETR_SECTION 0x1000 +#define mmPSOC_ETF_BASE 0x7FFEC44000ull +#define PSOC_ETF_MAX_OFFSET 0x1000 +#define PSOC_ETF_SECTION 0x1000 +#define mmPSOC_TS_CTI_BASE 0x7FFEC45000ull +#define PSOC_TS_CTI_MAX_OFFSET 0x1000 +#define PSOC_TS_CTI_SECTION 0xB000 +#define mmTOP_ROM_TABLE_BASE 0x7FFEC50000ull +#define TOP_ROM_TABLE_MAX_OFFSET 0x1000 +#define TOP_ROM_TABLE_SECTION 0x1F0000 +#define mmTPC1_RTR_FUNNEL_BASE 0x7FFEE40000ull +#define TPC1_RTR_FUNNEL_MAX_OFFSET 0x1000 +#define TPC1_RTR_FUNNEL_SECTION 0x40000 +#define mmTPC2_RTR_FUNNEL_BASE 0x7FFEE80000ull +#define TPC2_RTR_FUNNEL_MAX_OFFSET 0x1000 +#define TPC2_RTR_FUNNEL_SECTION 0x40000 +#define mmTPC3_RTR_FUNNEL_BASE 0x7FFEEC0000ull +#define TPC3_RTR_FUNNEL_MAX_OFFSET 0x1000 +#define TPC3_RTR_FUNNEL_SECTION 0x40000 +#define mmTPC4_RTR_FUNNEL_BASE 0x7FFEF00000ull +#define TPC4_RTR_FUNNEL_MAX_OFFSET 0x1000 +#define TPC4_RTR_FUNNEL_SECTION 0x40000 +#define mmTPC5_RTR_FUNNEL_BASE 0x7FFEF40000ull +#define TPC5_RTR_FUNNEL_MAX_OFFSET 0x1000 +#define TPC5_RTR_FUNNEL_SECTION 0x40000 +#define mmTPC6_RTR_FUNNEL_BASE 0x7FFEF80000ull +#define TPC6_RTR_FUNNEL_MAX_OFFSET 0x1000 +#define TPC6_RTR_FUNNEL_SECTION 0x81000 +#define mmTPC0_EML_SPMU_BASE 0x7FFF001000ull +#define TPC0_EML_SPMU_MAX_OFFSET 0x1000 +#define TPC0_EML_SPMU_SECTION 0x1000 +#define mmTPC0_EML_ETF_BASE 0x7FFF002000ull +#define TPC0_EML_ETF_MAX_OFFSET 0x1000 +#define TPC0_EML_ETF_SECTION 0x1000 +#define mmTPC0_EML_STM_BASE 0x7FFF003000ull +#define TPC0_EML_STM_MAX_OFFSET 0x1000 +#define TPC0_EML_STM_SECTION 0x1000 +#define mmTPC0_EML_ETM_R4_BASE 0x7FFF004000ull +#define TPC0_EML_ETM_R4_MAX_OFFSET 0x0 +#define TPC0_EML_ETM_R4_SECTION 0x1000 +#define mmTPC0_EML_CTI_BASE 0x7FFF005000ull +#define TPC0_EML_CTI_MAX_OFFSET 0x1000 +#define TPC0_EML_CTI_SECTION 0x1000 +#define mmTPC0_EML_FUNNEL_BASE 0x7FFF006000ull +#define TPC0_EML_FUNNEL_MAX_OFFSET 0x1000 +#define TPC0_EML_FUNNEL_SECTION 0x1000 +#define mmTPC0_EML_BUSMON_0_BASE 0x7FFF007000ull +#define TPC0_EML_BUSMON_0_MAX_OFFSET 0x1000 +#define TPC0_EML_BUSMON_0_SECTION 0x1000 +#define mmTPC0_EML_BUSMON_1_BASE 0x7FFF008000ull +#define TPC0_EML_BUSMON_1_MAX_OFFSET 0x1000 +#define TPC0_EML_BUSMON_1_SECTION 0x1000 +#define mmTPC0_EML_BUSMON_2_BASE 0x7FFF009000ull +#define TPC0_EML_BUSMON_2_MAX_OFFSET 0x1000 +#define TPC0_EML_BUSMON_2_SECTION 0x1000 +#define mmTPC0_EML_BUSMON_3_BASE 0x7FFF00A000ull +#define TPC0_EML_BUSMON_3_MAX_OFFSET 0x1000 +#define TPC0_EML_BUSMON_3_SECTION 0x36000 +#define mmTPC0_EML_CFG_BASE 0x7FFF040000ull +#define TPC0_EML_CFG_MAX_OFFSET 0x338 +#define TPC0_EML_CFG_SECTION 0x1BF000 +#define mmTPC0_EML_CS_BASE 0x7FFF1FF000ull +#define TPC0_EML_CS_MAX_OFFSET 0x1000 +#define TPC0_EML_CS_SECTION 0x2000 +#define mmTPC1_EML_SPMU_BASE 0x7FFF201000ull +#define TPC1_EML_SPMU_MAX_OFFSET 0x1000 +#define TPC1_EML_SPMU_SECTION 0x1000 +#define mmTPC1_EML_ETF_BASE 0x7FFF202000ull +#define TPC1_EML_ETF_MAX_OFFSET 0x1000 +#define TPC1_EML_ETF_SECTION 0x1000 +#define mmTPC1_EML_STM_BASE 0x7FFF203000ull +#define TPC1_EML_STM_MAX_OFFSET 0x1000 +#define TPC1_EML_STM_SECTION 0x1000 +#define mmTPC1_EML_ETM_R4_BASE 0x7FFF204000ull +#define TPC1_EML_ETM_R4_MAX_OFFSET 0x0 +#define TPC1_EML_ETM_R4_SECTION 0x1000 +#define mmTPC1_EML_CTI_BASE 0x7FFF205000ull +#define TPC1_EML_CTI_MAX_OFFSET 0x1000 +#define TPC1_EML_CTI_SECTION 0x1000 +#define mmTPC1_EML_FUNNEL_BASE 0x7FFF206000ull +#define TPC1_EML_FUNNEL_MAX_OFFSET 0x1000 +#define TPC1_EML_FUNNEL_SECTION 0x1000 +#define mmTPC1_EML_BUSMON_0_BASE 0x7FFF207000ull +#define TPC1_EML_BUSMON_0_MAX_OFFSET 0x1000 +#define TPC1_EML_BUSMON_0_SECTION 0x1000 +#define mmTPC1_EML_BUSMON_1_BASE 0x7FFF208000ull +#define TPC1_EML_BUSMON_1_MAX_OFFSET 0x1000 +#define TPC1_EML_BUSMON_1_SECTION 0x1000 +#define mmTPC1_EML_BUSMON_2_BASE 0x7FFF209000ull +#define TPC1_EML_BUSMON_2_MAX_OFFSET 0x1000 +#define TPC1_EML_BUSMON_2_SECTION 0x1000 +#define mmTPC1_EML_BUSMON_3_BASE 0x7FFF20A000ull +#define TPC1_EML_BUSMON_3_MAX_OFFSET 0x1000 +#define TPC1_EML_BUSMON_3_SECTION 0x36000 +#define mmTPC1_EML_CFG_BASE 0x7FFF240000ull +#define TPC1_EML_CFG_MAX_OFFSET 0x338 +#define TPC1_EML_CFG_SECTION 0x1BF000 +#define mmTPC1_EML_CS_BASE 0x7FFF3FF000ull +#define TPC1_EML_CS_MAX_OFFSET 0x1000 +#define TPC1_EML_CS_SECTION 0x2000 +#define mmTPC2_EML_SPMU_BASE 0x7FFF401000ull +#define TPC2_EML_SPMU_MAX_OFFSET 0x1000 +#define TPC2_EML_SPMU_SECTION 0x1000 +#define mmTPC2_EML_ETF_BASE 0x7FFF402000ull +#define TPC2_EML_ETF_MAX_OFFSET 0x1000 +#define TPC2_EML_ETF_SECTION 0x1000 +#define mmTPC2_EML_STM_BASE 0x7FFF403000ull +#define TPC2_EML_STM_MAX_OFFSET 0x1000 +#define TPC2_EML_STM_SECTION 0x1000 +#define mmTPC2_EML_ETM_R4_BASE 0x7FFF404000ull +#define TPC2_EML_ETM_R4_MAX_OFFSET 0x0 +#define TPC2_EML_ETM_R4_SECTION 0x1000 +#define mmTPC2_EML_CTI_BASE 0x7FFF405000ull +#define TPC2_EML_CTI_MAX_OFFSET 0x1000 +#define TPC2_EML_CTI_SECTION 0x1000 +#define mmTPC2_EML_FUNNEL_BASE 0x7FFF406000ull +#define TPC2_EML_FUNNEL_MAX_OFFSET 0x1000 +#define TPC2_EML_FUNNEL_SECTION 0x1000 +#define mmTPC2_EML_BUSMON_0_BASE 0x7FFF407000ull +#define TPC2_EML_BUSMON_0_MAX_OFFSET 0x1000 +#define TPC2_EML_BUSMON_0_SECTION 0x1000 +#define mmTPC2_EML_BUSMON_1_BASE 0x7FFF408000ull +#define TPC2_EML_BUSMON_1_MAX_OFFSET 0x1000 +#define TPC2_EML_BUSMON_1_SECTION 0x1000 +#define mmTPC2_EML_BUSMON_2_BASE 0x7FFF409000ull +#define TPC2_EML_BUSMON_2_MAX_OFFSET 0x1000 +#define TPC2_EML_BUSMON_2_SECTION 0x1000 +#define mmTPC2_EML_BUSMON_3_BASE 0x7FFF40A000ull +#define TPC2_EML_BUSMON_3_MAX_OFFSET 0x1000 +#define TPC2_EML_BUSMON_3_SECTION 0x36000 +#define mmTPC2_EML_CFG_BASE 0x7FFF440000ull +#define TPC2_EML_CFG_MAX_OFFSET 0x338 +#define TPC2_EML_CFG_SECTION 0x1BF000 +#define mmTPC2_EML_CS_BASE 0x7FFF5FF000ull +#define TPC2_EML_CS_MAX_OFFSET 0x1000 +#define TPC2_EML_CS_SECTION 0x2000 +#define mmTPC3_EML_SPMU_BASE 0x7FFF601000ull +#define TPC3_EML_SPMU_MAX_OFFSET 0x1000 +#define TPC3_EML_SPMU_SECTION 0x1000 +#define mmTPC3_EML_ETF_BASE 0x7FFF602000ull +#define TPC3_EML_ETF_MAX_OFFSET 0x1000 +#define TPC3_EML_ETF_SECTION 0x1000 +#define mmTPC3_EML_STM_BASE 0x7FFF603000ull +#define TPC3_EML_STM_MAX_OFFSET 0x1000 +#define TPC3_EML_STM_SECTION 0x1000 +#define mmTPC3_EML_ETM_R4_BASE 0x7FFF604000ull +#define TPC3_EML_ETM_R4_MAX_OFFSET 0x0 +#define TPC3_EML_ETM_R4_SECTION 0x1000 +#define mmTPC3_EML_CTI_BASE 0x7FFF605000ull +#define TPC3_EML_CTI_MAX_OFFSET 0x1000 +#define TPC3_EML_CTI_SECTION 0x1000 +#define mmTPC3_EML_FUNNEL_BASE 0x7FFF606000ull +#define TPC3_EML_FUNNEL_MAX_OFFSET 0x1000 +#define TPC3_EML_FUNNEL_SECTION 0x1000 +#define mmTPC3_EML_BUSMON_0_BASE 0x7FFF607000ull +#define TPC3_EML_BUSMON_0_MAX_OFFSET 0x1000 +#define TPC3_EML_BUSMON_0_SECTION 0x1000 +#define mmTPC3_EML_BUSMON_1_BASE 0x7FFF608000ull +#define TPC3_EML_BUSMON_1_MAX_OFFSET 0x1000 +#define TPC3_EML_BUSMON_1_SECTION 0x1000 +#define mmTPC3_EML_BUSMON_2_BASE 0x7FFF609000ull +#define TPC3_EML_BUSMON_2_MAX_OFFSET 0x1000 +#define TPC3_EML_BUSMON_2_SECTION 0x1000 +#define mmTPC3_EML_BUSMON_3_BASE 0x7FFF60A000ull +#define TPC3_EML_BUSMON_3_MAX_OFFSET 0x1000 +#define TPC3_EML_BUSMON_3_SECTION 0x36000 +#define mmTPC3_EML_CFG_BASE 0x7FFF640000ull +#define TPC3_EML_CFG_MAX_OFFSET 0x338 +#define TPC3_EML_CFG_SECTION 0x1BF000 +#define mmTPC3_EML_CS_BASE 0x7FFF7FF000ull +#define TPC3_EML_CS_MAX_OFFSET 0x1000 +#define TPC3_EML_CS_SECTION 0x2000 +#define mmTPC4_EML_SPMU_BASE 0x7FFF801000ull +#define TPC4_EML_SPMU_MAX_OFFSET 0x1000 +#define TPC4_EML_SPMU_SECTION 0x1000 +#define mmTPC4_EML_ETF_BASE 0x7FFF802000ull +#define TPC4_EML_ETF_MAX_OFFSET 0x1000 +#define TPC4_EML_ETF_SECTION 0x1000 +#define mmTPC4_EML_STM_BASE 0x7FFF803000ull +#define TPC4_EML_STM_MAX_OFFSET 0x1000 +#define TPC4_EML_STM_SECTION 0x1000 +#define mmTPC4_EML_ETM_R4_BASE 0x7FFF804000ull +#define TPC4_EML_ETM_R4_MAX_OFFSET 0x0 +#define TPC4_EML_ETM_R4_SECTION 0x1000 +#define mmTPC4_EML_CTI_BASE 0x7FFF805000ull +#define TPC4_EML_CTI_MAX_OFFSET 0x1000 +#define TPC4_EML_CTI_SECTION 0x1000 +#define mmTPC4_EML_FUNNEL_BASE 0x7FFF806000ull +#define TPC4_EML_FUNNEL_MAX_OFFSET 0x1000 +#define TPC4_EML_FUNNEL_SECTION 0x1000 +#define mmTPC4_EML_BUSMON_0_BASE 0x7FFF807000ull +#define TPC4_EML_BUSMON_0_MAX_OFFSET 0x1000 +#define TPC4_EML_BUSMON_0_SECTION 0x1000 +#define mmTPC4_EML_BUSMON_1_BASE 0x7FFF808000ull +#define TPC4_EML_BUSMON_1_MAX_OFFSET 0x1000 +#define TPC4_EML_BUSMON_1_SECTION 0x1000 +#define mmTPC4_EML_BUSMON_2_BASE 0x7FFF809000ull +#define TPC4_EML_BUSMON_2_MAX_OFFSET 0x1000 +#define TPC4_EML_BUSMON_2_SECTION 0x1000 +#define mmTPC4_EML_BUSMON_3_BASE 0x7FFF80A000ull +#define TPC4_EML_BUSMON_3_MAX_OFFSET 0x1000 +#define TPC4_EML_BUSMON_3_SECTION 0x36000 +#define mmTPC4_EML_CFG_BASE 0x7FFF840000ull +#define TPC4_EML_CFG_MAX_OFFSET 0x338 +#define TPC4_EML_CFG_SECTION 0x1BF000 +#define mmTPC4_EML_CS_BASE 0x7FFF9FF000ull +#define TPC4_EML_CS_MAX_OFFSET 0x1000 +#define TPC4_EML_CS_SECTION 0x2000 +#define mmTPC5_EML_SPMU_BASE 0x7FFFA01000ull +#define TPC5_EML_SPMU_MAX_OFFSET 0x1000 +#define TPC5_EML_SPMU_SECTION 0x1000 +#define mmTPC5_EML_ETF_BASE 0x7FFFA02000ull +#define TPC5_EML_ETF_MAX_OFFSET 0x1000 +#define TPC5_EML_ETF_SECTION 0x1000 +#define mmTPC5_EML_STM_BASE 0x7FFFA03000ull +#define TPC5_EML_STM_MAX_OFFSET 0x1000 +#define TPC5_EML_STM_SECTION 0x1000 +#define mmTPC5_EML_ETM_R4_BASE 0x7FFFA04000ull +#define TPC5_EML_ETM_R4_MAX_OFFSET 0x0 +#define TPC5_EML_ETM_R4_SECTION 0x1000 +#define mmTPC5_EML_CTI_BASE 0x7FFFA05000ull +#define TPC5_EML_CTI_MAX_OFFSET 0x1000 +#define TPC5_EML_CTI_SECTION 0x1000 +#define mmTPC5_EML_FUNNEL_BASE 0x7FFFA06000ull +#define TPC5_EML_FUNNEL_MAX_OFFSET 0x1000 +#define TPC5_EML_FUNNEL_SECTION 0x1000 +#define mmTPC5_EML_BUSMON_0_BASE 0x7FFFA07000ull +#define TPC5_EML_BUSMON_0_MAX_OFFSET 0x1000 +#define TPC5_EML_BUSMON_0_SECTION 0x1000 +#define mmTPC5_EML_BUSMON_1_BASE 0x7FFFA08000ull +#define TPC5_EML_BUSMON_1_MAX_OFFSET 0x1000 +#define TPC5_EML_BUSMON_1_SECTION 0x1000 +#define mmTPC5_EML_BUSMON_2_BASE 0x7FFFA09000ull +#define TPC5_EML_BUSMON_2_MAX_OFFSET 0x1000 +#define TPC5_EML_BUSMON_2_SECTION 0x1000 +#define mmTPC5_EML_BUSMON_3_BASE 0x7FFFA0A000ull +#define TPC5_EML_BUSMON_3_MAX_OFFSET 0x1000 +#define TPC5_EML_BUSMON_3_SECTION 0x36000 +#define mmTPC5_EML_CFG_BASE 0x7FFFA40000ull +#define TPC5_EML_CFG_MAX_OFFSET 0x338 +#define TPC5_EML_CFG_SECTION 0x1BF000 +#define mmTPC5_EML_CS_BASE 0x7FFFBFF000ull +#define TPC5_EML_CS_MAX_OFFSET 0x1000 +#define TPC5_EML_CS_SECTION 0x2000 +#define mmTPC6_EML_SPMU_BASE 0x7FFFC01000ull +#define TPC6_EML_SPMU_MAX_OFFSET 0x1000 +#define TPC6_EML_SPMU_SECTION 0x1000 +#define mmTPC6_EML_ETF_BASE 0x7FFFC02000ull +#define TPC6_EML_ETF_MAX_OFFSET 0x1000 +#define TPC6_EML_ETF_SECTION 0x1000 +#define mmTPC6_EML_STM_BASE 0x7FFFC03000ull +#define TPC6_EML_STM_MAX_OFFSET 0x1000 +#define TPC6_EML_STM_SECTION 0x1000 +#define mmTPC6_EML_ETM_R4_BASE 0x7FFFC04000ull +#define TPC6_EML_ETM_R4_MAX_OFFSET 0x0 +#define TPC6_EML_ETM_R4_SECTION 0x1000 +#define mmTPC6_EML_CTI_BASE 0x7FFFC05000ull +#define TPC6_EML_CTI_MAX_OFFSET 0x1000 +#define TPC6_EML_CTI_SECTION 0x1000 +#define mmTPC6_EML_FUNNEL_BASE 0x7FFFC06000ull +#define TPC6_EML_FUNNEL_MAX_OFFSET 0x1000 +#define TPC6_EML_FUNNEL_SECTION 0x1000 +#define mmTPC6_EML_BUSMON_0_BASE 0x7FFFC07000ull +#define TPC6_EML_BUSMON_0_MAX_OFFSET 0x1000 +#define TPC6_EML_BUSMON_0_SECTION 0x1000 +#define mmTPC6_EML_BUSMON_1_BASE 0x7FFFC08000ull +#define TPC6_EML_BUSMON_1_MAX_OFFSET 0x1000 +#define TPC6_EML_BUSMON_1_SECTION 0x1000 +#define mmTPC6_EML_BUSMON_2_BASE 0x7FFFC09000ull +#define TPC6_EML_BUSMON_2_MAX_OFFSET 0x1000 +#define TPC6_EML_BUSMON_2_SECTION 0x1000 +#define mmTPC6_EML_BUSMON_3_BASE 0x7FFFC0A000ull +#define TPC6_EML_BUSMON_3_MAX_OFFSET 0x1000 +#define TPC6_EML_BUSMON_3_SECTION 0x36000 +#define mmTPC6_EML_CFG_BASE 0x7FFFC40000ull +#define TPC6_EML_CFG_MAX_OFFSET 0x338 +#define TPC6_EML_CFG_SECTION 0x1BF000 +#define mmTPC6_EML_CS_BASE 0x7FFFDFF000ull +#define TPC6_EML_CS_MAX_OFFSET 0x1000 +#define TPC6_EML_CS_SECTION 0x2000 +#define mmTPC7_EML_SPMU_BASE 0x7FFFE01000ull +#define TPC7_EML_SPMU_MAX_OFFSET 0x1000 +#define TPC7_EML_SPMU_SECTION 0x1000 +#define mmTPC7_EML_ETF_BASE 0x7FFFE02000ull +#define TPC7_EML_ETF_MAX_OFFSET 0x1000 +#define TPC7_EML_ETF_SECTION 0x1000 +#define mmTPC7_EML_STM_BASE 0x7FFFE03000ull +#define TPC7_EML_STM_MAX_OFFSET 0x1000 +#define TPC7_EML_STM_SECTION 0x1000 +#define mmTPC7_EML_ETM_R4_BASE 0x7FFFE04000ull +#define TPC7_EML_ETM_R4_MAX_OFFSET 0x0 +#define TPC7_EML_ETM_R4_SECTION 0x1000 +#define mmTPC7_EML_CTI_BASE 0x7FFFE05000ull +#define TPC7_EML_CTI_MAX_OFFSET 0x1000 +#define TPC7_EML_CTI_SECTION 0x1000 +#define mmTPC7_EML_FUNNEL_BASE 0x7FFFE06000ull +#define TPC7_EML_FUNNEL_MAX_OFFSET 0x1000 +#define TPC7_EML_FUNNEL_SECTION 0x1000 +#define mmTPC7_EML_BUSMON_0_BASE 0x7FFFE07000ull +#define TPC7_EML_BUSMON_0_MAX_OFFSET 0x1000 +#define TPC7_EML_BUSMON_0_SECTION 0x1000 +#define mmTPC7_EML_BUSMON_1_BASE 0x7FFFE08000ull +#define TPC7_EML_BUSMON_1_MAX_OFFSET 0x1000 +#define TPC7_EML_BUSMON_1_SECTION 0x1000 +#define mmTPC7_EML_BUSMON_2_BASE 0x7FFFE09000ull +#define TPC7_EML_BUSMON_2_MAX_OFFSET 0x1000 +#define TPC7_EML_BUSMON_2_SECTION 0x1000 +#define mmTPC7_EML_BUSMON_3_BASE 0x7FFFE0A000ull +#define TPC7_EML_BUSMON_3_MAX_OFFSET 0x1000 +#define TPC7_EML_BUSMON_3_SECTION 0x36000 +#define mmTPC7_EML_CFG_BASE 0x7FFFE40000ull +#define TPC7_EML_CFG_MAX_OFFSET 0x338 +#define TPC7_EML_CFG_SECTION 0x1BF000 +#define mmTPC7_EML_CS_BASE 0x7FFFFFF000ull +#define TPC7_EML_CS_MAX_OFFSET 0x1000 + +#endif /* GOYA_BLOCKS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h new file mode 100644 index 000000000000..a161ecfe74de --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h @@ -0,0 +1,275 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef ASIC_REG_GOYA_MASKS_H_ +#define ASIC_REG_GOYA_MASKS_H_ + +#include "goya_regs.h" + +/* Useful masks for bits in various registers */ +#define QMAN_DMA_ENABLE (\ + (1 << DMA_QM_0_GLBL_CFG0_PQF_EN_SHIFT) | \ + (1 << DMA_QM_0_GLBL_CFG0_CQF_EN_SHIFT) | \ + (1 << DMA_QM_0_GLBL_CFG0_CP_EN_SHIFT) | \ + (1 << DMA_QM_0_GLBL_CFG0_DMA_EN_SHIFT)) + +#define QMAN_DMA_FULLY_TRUSTED (\ + (1 << DMA_QM_0_GLBL_PROT_PQF_PROT_SHIFT) | \ + (1 << DMA_QM_0_GLBL_PROT_CQF_PROT_SHIFT) | \ + (1 << DMA_QM_0_GLBL_PROT_CP_PROT_SHIFT) | \ + (1 << DMA_QM_0_GLBL_PROT_DMA_PROT_SHIFT) | \ + (1 << DMA_QM_0_GLBL_PROT_PQF_ERR_PROT_SHIFT) | \ + (1 << DMA_QM_0_GLBL_PROT_CQF_ERR_PROT_SHIFT) | \ + (1 << DMA_QM_0_GLBL_PROT_CP_ERR_PROT_SHIFT) | \ + (1 << DMA_QM_0_GLBL_PROT_DMA_ERR_PROT_SHIFT)) + +#define QMAN_DMA_PARTLY_TRUSTED (\ + (1 << DMA_QM_0_GLBL_PROT_PQF_PROT_SHIFT) | \ + (1 << DMA_QM_0_GLBL_PROT_CQF_PROT_SHIFT) | \ + (1 << DMA_QM_0_GLBL_PROT_CP_PROT_SHIFT) | \ + (1 << DMA_QM_0_GLBL_PROT_PQF_ERR_PROT_SHIFT) | \ + (1 << DMA_QM_0_GLBL_PROT_CQF_ERR_PROT_SHIFT) | \ + (1 << DMA_QM_0_GLBL_PROT_CP_ERR_PROT_SHIFT) | \ + (1 << DMA_QM_0_GLBL_PROT_DMA_ERR_PROT_SHIFT)) + +#define QMAN_DMA_STOP (\ + (1 << DMA_QM_0_GLBL_CFG1_PQF_STOP_SHIFT) | \ + (1 << DMA_QM_0_GLBL_CFG1_CQF_STOP_SHIFT) | \ + (1 << DMA_QM_0_GLBL_CFG1_CP_STOP_SHIFT) | \ + (1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT)) + +#define QMAN_DMA_IS_STOPPED (\ + (1 << DMA_QM_0_GLBL_STS0_PQF_IS_STOP_SHIFT) | \ + (1 << DMA_QM_0_GLBL_STS0_CQF_IS_STOP_SHIFT) | \ + (1 << DMA_QM_0_GLBL_STS0_CP_IS_STOP_SHIFT) | \ + (1 << DMA_QM_0_GLBL_STS0_DMA_IS_STOP_SHIFT)) + +#define QMAN_DMA_ERR_MSG_EN (\ + (1 << DMA_QM_0_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \ + (1 << DMA_QM_0_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \ + (1 << DMA_QM_0_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT) | \ + (1 << DMA_QM_0_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT) | \ + (1 << DMA_QM_0_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \ + (1 << DMA_QM_0_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \ + (1 << DMA_QM_0_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT) | \ + (1 << DMA_QM_0_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT)) + +#define QMAN_MME_ENABLE (\ + (1 << MME_QM_GLBL_CFG0_PQF_EN_SHIFT) | \ + (1 << MME_QM_GLBL_CFG0_CQF_EN_SHIFT) | \ + (1 << MME_QM_GLBL_CFG0_CP_EN_SHIFT)) + +#define CMDQ_MME_ENABLE (\ + (1 << MME_CMDQ_GLBL_CFG0_CQF_EN_SHIFT) | \ + (1 << MME_CMDQ_GLBL_CFG0_CP_EN_SHIFT)) + +#define QMAN_MME_STOP (\ + (1 << MME_QM_GLBL_CFG1_PQF_STOP_SHIFT) | \ + (1 << MME_QM_GLBL_CFG1_CQF_STOP_SHIFT) | \ + (1 << MME_QM_GLBL_CFG1_CP_STOP_SHIFT)) + +#define CMDQ_MME_STOP (\ + (1 << MME_CMDQ_GLBL_CFG1_CQF_STOP_SHIFT) | \ + (1 << MME_CMDQ_GLBL_CFG1_CP_STOP_SHIFT)) + +#define QMAN_MME_ERR_MSG_EN (\ + (1 << MME_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \ + (1 << MME_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \ + (1 << MME_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT) | \ + (1 << MME_QM_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT) | \ + (1 << MME_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \ + (1 << MME_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \ + (1 << MME_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT) | \ + (1 << MME_QM_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT)) + +#define CMDQ_MME_ERR_MSG_EN (\ + (1 << MME_CMDQ_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \ + (1 << MME_CMDQ_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \ + (1 << MME_CMDQ_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT) | \ + (1 << MME_CMDQ_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT) | \ + (1 << MME_CMDQ_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \ + (1 << MME_CMDQ_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \ + (1 << MME_CMDQ_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT) | \ + (1 << MME_CMDQ_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT)) + +#define QMAN_MME_ERR_PROT (\ + (1 << MME_QM_GLBL_PROT_PQF_ERR_PROT_SHIFT) | \ + (1 << MME_QM_GLBL_PROT_CQF_ERR_PROT_SHIFT) | \ + (1 << MME_QM_GLBL_PROT_CP_ERR_PROT_SHIFT) | \ + (1 << MME_QM_GLBL_PROT_DMA_ERR_PROT_SHIFT)) + +#define CMDQ_MME_ERR_PROT (\ + (1 << MME_CMDQ_GLBL_PROT_PQF_ERR_PROT_SHIFT) | \ + (1 << MME_CMDQ_GLBL_PROT_CQF_ERR_PROT_SHIFT) | \ + (1 << MME_CMDQ_GLBL_PROT_CP_ERR_PROT_SHIFT) | \ + (1 << MME_CMDQ_GLBL_PROT_DMA_ERR_PROT_SHIFT)) + +#define QMAN_TPC_ENABLE (\ + (1 << TPC0_QM_GLBL_CFG0_PQF_EN_SHIFT) | \ + (1 << TPC0_QM_GLBL_CFG0_CQF_EN_SHIFT) | \ + (1 << TPC0_QM_GLBL_CFG0_CP_EN_SHIFT)) + +#define CMDQ_TPC_ENABLE (\ + (1 << TPC0_CMDQ_GLBL_CFG0_CQF_EN_SHIFT) | \ + (1 << TPC0_CMDQ_GLBL_CFG0_CP_EN_SHIFT)) + +#define QMAN_TPC_STOP (\ + (1 << TPC0_QM_GLBL_CFG1_PQF_STOP_SHIFT) | \ + (1 << TPC0_QM_GLBL_CFG1_CQF_STOP_SHIFT) | \ + (1 << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT)) + +#define CMDQ_TPC_STOP (\ + (1 << TPC0_CMDQ_GLBL_CFG1_CQF_STOP_SHIFT) | \ + (1 << TPC0_CMDQ_GLBL_CFG1_CP_STOP_SHIFT)) + +#define QMAN_TPC_ERR_MSG_EN (\ + (1 << TPC0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \ + (1 << TPC0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \ + (1 << TPC0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT) | \ + (1 << TPC0_QM_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT) | \ + (1 << TPC0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \ + (1 << TPC0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \ + (1 << TPC0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT) | \ + (1 << TPC0_QM_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT)) + +#define CMDQ_TPC_ERR_MSG_EN (\ + (1 << TPC0_CMDQ_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \ + (1 << TPC0_CMDQ_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \ + (1 << TPC0_CMDQ_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT) | \ + (1 << TPC0_CMDQ_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT) | \ + (1 << TPC0_CMDQ_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \ + (1 << TPC0_CMDQ_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \ + (1 << TPC0_CMDQ_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT) | \ + (1 << TPC0_CMDQ_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT)) + +#define QMAN_TPC_ERR_PROT (\ + (1 << TPC0_QM_GLBL_PROT_PQF_ERR_PROT_SHIFT) | \ + (1 << TPC0_QM_GLBL_PROT_CQF_ERR_PROT_SHIFT) | \ + (1 << TPC0_QM_GLBL_PROT_CP_ERR_PROT_SHIFT) | \ + (1 << TPC0_QM_GLBL_PROT_DMA_ERR_PROT_SHIFT)) + +#define CMDQ_TPC_ERR_PROT (\ + (1 << TPC0_CMDQ_GLBL_PROT_PQF_ERR_PROT_SHIFT) | \ + (1 << TPC0_CMDQ_GLBL_PROT_CQF_ERR_PROT_SHIFT) | \ + (1 << TPC0_CMDQ_GLBL_PROT_CP_ERR_PROT_SHIFT) | \ + (1 << TPC0_CMDQ_GLBL_PROT_DMA_ERR_PROT_SHIFT)) + +/* RESETS */ +#define DMA_MME_TPC_RESET (\ + 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_TPC_SHIFT |\ + 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_MME_SHIFT |\ + 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_DMA_SHIFT) + +#define RESET_ALL (\ + 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_TPC_SHIFT |\ + 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_MME_SHIFT |\ + 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_MC_SHIFT |\ + 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_CPU_SHIFT |\ + 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PSOC_SHIFT |\ + 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_IC_IF_SHIFT |\ + PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_SRAM_MASK |\ + 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_DMA_SHIFT |\ + 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_DMA_IF_SHIFT) + +#define CA53_RESET (\ + (~\ + (1 << PSOC_GLOBAL_CONF_UNIT_RST_N_CPU_SHIFT)\ + ) & 0x7FFFFF) + +#define CPU_RESET_ASSERT (\ + 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NMBISTRESET_SHIFT) + +#define CPU_RESET_CORE0_DEASSERT (\ + 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NCPUPORESET_SHIFT |\ + 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NCORERESET_SHIFT |\ + 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NL2RESET_SHIFT |\ + 1 << CPU_CA53_CFG_ARM_RST_CONTROL_NMBISTRESET_SHIFT) + +/* PCI CONFIGURATION SPACE */ +#define mmPCI_CONFIG_ELBI_ADDR 0xFF0 +#define mmPCI_CONFIG_ELBI_DATA 0xFF4 +#define mmPCI_CONFIG_ELBI_CTRL 0xFF8 +#define PCI_CONFIG_ELBI_CTRL_WRITE (1 << 31) + +#define mmPCI_CONFIG_ELBI_STS 0xFFC +#define PCI_CONFIG_ELBI_STS_ERR (1 << 30) +#define PCI_CONFIG_ELBI_STS_DONE (1 << 31) +#define PCI_CONFIG_ELBI_STS_MASK (PCI_CONFIG_ELBI_STS_ERR | \ + PCI_CONFIG_ELBI_STS_DONE) + +#define GOYA_IRQ_HBW_ID_MASK 0x1FFF +#define GOYA_IRQ_HBW_ID_SHIFT 0 +#define GOYA_IRQ_HBW_INTERNAL_ID_MASK 0xE000 +#define GOYA_IRQ_HBW_INTERNAL_ID_SHIFT 13 +#define GOYA_IRQ_HBW_AGENT_ID_MASK 0x1F0000 +#define GOYA_IRQ_HBW_AGENT_ID_SHIFT 16 +#define GOYA_IRQ_HBW_Y_MASK 0xE00000 +#define GOYA_IRQ_HBW_Y_SHIFT 21 +#define GOYA_IRQ_HBW_X_MASK 0x7000000 +#define GOYA_IRQ_HBW_X_SHIFT 24 +#define GOYA_IRQ_LBW_ID_MASK 0xFF +#define GOYA_IRQ_LBW_ID_SHIFT 0 +#define GOYA_IRQ_LBW_INTERNAL_ID_MASK 0x700 +#define GOYA_IRQ_LBW_INTERNAL_ID_SHIFT 8 +#define GOYA_IRQ_LBW_AGENT_ID_MASK 0xF800 +#define GOYA_IRQ_LBW_AGENT_ID_SHIFT 11 +#define GOYA_IRQ_LBW_Y_MASK 0x70000 +#define GOYA_IRQ_LBW_Y_SHIFT 16 +#define GOYA_IRQ_LBW_X_MASK 0x380000 +#define GOYA_IRQ_LBW_X_SHIFT 19 + +#define DMA_QM_IDLE_MASK (DMA_QM_0_GLBL_STS0_PQF_IDLE_MASK | \ + DMA_QM_0_GLBL_STS0_CQF_IDLE_MASK | \ + DMA_QM_0_GLBL_STS0_CP_IDLE_MASK | \ + DMA_QM_0_GLBL_STS0_DMA_IDLE_MASK) + +#define TPC_QM_IDLE_MASK (TPC0_QM_GLBL_STS0_PQF_IDLE_MASK | \ + TPC0_QM_GLBL_STS0_CQF_IDLE_MASK | \ + TPC0_QM_GLBL_STS0_CP_IDLE_MASK) + +#define TPC_CMDQ_IDLE_MASK (TPC0_CMDQ_GLBL_STS0_CQF_IDLE_MASK | \ + TPC0_CMDQ_GLBL_STS0_CP_IDLE_MASK) + +#define TPC_CFG_IDLE_MASK (TPC0_CFG_STATUS_SCALAR_PIPE_EMPTY_MASK | \ + TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK | \ + TPC0_CFG_STATUS_IQ_EMPTY_MASK | \ + TPC0_CFG_STATUS_NO_INFLIGH_MEM_ACCESSES_MASK) + +#define MME_QM_IDLE_MASK (MME_QM_GLBL_STS0_PQF_IDLE_MASK | \ + MME_QM_GLBL_STS0_CQF_IDLE_MASK | \ + MME_QM_GLBL_STS0_CP_IDLE_MASK) + +#define MME_CMDQ_IDLE_MASK (MME_CMDQ_GLBL_STS0_CQF_IDLE_MASK | \ + MME_CMDQ_GLBL_STS0_CP_IDLE_MASK) + +#define MME_ARCH_IDLE_MASK (MME_ARCH_STATUS_SB_A_EMPTY_MASK | \ + MME_ARCH_STATUS_SB_B_EMPTY_MASK | \ + MME_ARCH_STATUS_SB_CIN_EMPTY_MASK | \ + MME_ARCH_STATUS_SB_COUT_EMPTY_MASK) + +#define MME_SHADOW_IDLE_MASK (MME_SHADOW_0_STATUS_A_MASK | \ + MME_SHADOW_0_STATUS_B_MASK | \ + MME_SHADOW_0_STATUS_CIN_MASK | \ + MME_SHADOW_0_STATUS_COUT_MASK | \ + MME_SHADOW_0_STATUS_TE_MASK | \ + MME_SHADOW_0_STATUS_LD_MASK | \ + MME_SHADOW_0_STATUS_ST_MASK) + +#define TPC1_CFG_TPC_STALL_V_SHIFT TPC0_CFG_TPC_STALL_V_SHIFT +#define TPC2_CFG_TPC_STALL_V_SHIFT TPC0_CFG_TPC_STALL_V_SHIFT +#define TPC3_CFG_TPC_STALL_V_SHIFT TPC0_CFG_TPC_STALL_V_SHIFT +#define TPC4_CFG_TPC_STALL_V_SHIFT TPC0_CFG_TPC_STALL_V_SHIFT +#define TPC5_CFG_TPC_STALL_V_SHIFT TPC0_CFG_TPC_STALL_V_SHIFT +#define TPC6_CFG_TPC_STALL_V_SHIFT TPC0_CFG_TPC_STALL_V_SHIFT +#define TPC7_CFG_TPC_STALL_V_SHIFT TPC0_CFG_TPC_STALL_V_SHIFT + +#define DMA_QM_1_GLBL_CFG1_DMA_STOP_SHIFT DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT +#define DMA_QM_2_GLBL_CFG1_DMA_STOP_SHIFT DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT +#define DMA_QM_3_GLBL_CFG1_DMA_STOP_SHIFT DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT +#define DMA_QM_4_GLBL_CFG1_DMA_STOP_SHIFT DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT + +#endif /* ASIC_REG_GOYA_MASKS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h new file mode 100644 index 000000000000..6cb0b6e54d41 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef ASIC_REG_GOYA_REGS_H_ +#define ASIC_REG_GOYA_REGS_H_ + +#include "goya_blocks.h" +#include "stlb_regs.h" +#include "mmu_regs.h" +#include "pcie_aux_regs.h" +#include "psoc_global_conf_regs.h" +#include "psoc_spi_regs.h" +#include "psoc_mme_pll_regs.h" +#include "psoc_pci_pll_regs.h" +#include "psoc_emmc_pll_regs.h" +#include "cpu_if_regs.h" +#include "cpu_ca53_cfg_regs.h" +#include "cpu_pll_regs.h" +#include "ic_pll_regs.h" +#include "mc_pll_regs.h" +#include "tpc_pll_regs.h" +#include "dma_qm_0_regs.h" +#include "dma_qm_1_regs.h" +#include "dma_qm_2_regs.h" +#include "dma_qm_3_regs.h" +#include "dma_qm_4_regs.h" +#include "dma_ch_0_regs.h" +#include "dma_ch_1_regs.h" +#include "dma_ch_2_regs.h" +#include "dma_ch_3_regs.h" +#include "dma_ch_4_regs.h" +#include "dma_macro_regs.h" +#include "dma_nrtr_regs.h" +#include "pci_nrtr_regs.h" +#include "sram_y0_x0_rtr_regs.h" +#include "sram_y0_x1_rtr_regs.h" +#include "sram_y0_x2_rtr_regs.h" +#include "sram_y0_x3_rtr_regs.h" +#include "sram_y0_x4_rtr_regs.h" +#include "mme_regs.h" +#include "mme_qm_regs.h" +#include "mme_cmdq_regs.h" +#include "mme1_rtr_regs.h" +#include "mme2_rtr_regs.h" +#include "mme3_rtr_regs.h" +#include "mme4_rtr_regs.h" +#include "mme5_rtr_regs.h" +#include "mme6_rtr_regs.h" +#include "tpc0_cfg_regs.h" +#include "tpc1_cfg_regs.h" +#include "tpc2_cfg_regs.h" +#include "tpc3_cfg_regs.h" +#include "tpc4_cfg_regs.h" +#include "tpc5_cfg_regs.h" +#include "tpc6_cfg_regs.h" +#include "tpc7_cfg_regs.h" +#include "tpc0_qm_regs.h" +#include "tpc1_qm_regs.h" +#include "tpc2_qm_regs.h" +#include "tpc3_qm_regs.h" +#include "tpc4_qm_regs.h" +#include "tpc5_qm_regs.h" +#include "tpc6_qm_regs.h" +#include "tpc7_qm_regs.h" +#include "tpc0_cmdq_regs.h" +#include "tpc1_cmdq_regs.h" +#include "tpc2_cmdq_regs.h" +#include "tpc3_cmdq_regs.h" +#include "tpc4_cmdq_regs.h" +#include "tpc5_cmdq_regs.h" +#include "tpc6_cmdq_regs.h" +#include "tpc7_cmdq_regs.h" +#include "tpc0_nrtr_regs.h" +#include "tpc1_rtr_regs.h" +#include "tpc2_rtr_regs.h" +#include "tpc3_rtr_regs.h" +#include "tpc4_rtr_regs.h" +#include "tpc5_rtr_regs.h" +#include "tpc6_rtr_regs.h" +#include "tpc7_nrtr_regs.h" +#include "tpc0_eml_cfg_regs.h" + +#include "psoc_global_conf_masks.h" +#include "dma_macro_masks.h" +#include "dma_qm_0_masks.h" +#include "tpc0_qm_masks.h" +#include "tpc0_cmdq_masks.h" +#include "mme_qm_masks.h" +#include "mme_cmdq_masks.h" +#include "tpc0_cfg_masks.h" +#include "tpc0_eml_cfg_masks.h" +#include "mme1_rtr_masks.h" +#include "tpc0_nrtr_masks.h" +#include "dma_nrtr_masks.h" +#include "pci_nrtr_masks.h" +#include "stlb_masks.h" +#include "cpu_ca53_cfg_masks.h" +#include "mmu_masks.h" +#include "mme_masks.h" + +#define mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG 0xC02000 +#define mmPCIE_DBI_MSIX_DOORBELL_OFF 0xC02948 + +#define mmSYNC_MNGR_MON_PAY_ADDRL_0 0x113000 +#define mmSYNC_MNGR_SOB_OBJ_0 0x112000 +#define mmSYNC_MNGR_SOB_OBJ_1000 0x112FA0 +#define mmSYNC_MNGR_SOB_OBJ_1007 0x112FBC +#define mmSYNC_MNGR_SOB_OBJ_1023 0x112FFC +#define mmSYNC_MNGR_MON_STATUS_0 0x114000 +#define mmSYNC_MNGR_MON_STATUS_255 0x1143FC + +#define mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR 0x800040 + +#endif /* ASIC_REG_GOYA_REGS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/ic_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/ic_pll_regs.h new file mode 100644 index 000000000000..0a743817aad7 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/ic_pll_regs.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_IC_PLL_REGS_H_ +#define ASIC_REG_IC_PLL_REGS_H_ + +/* + ***************************************** + * IC_PLL (Prototype: PLL) + ***************************************** + */ + +#define mmIC_PLL_NR 0x4A3100 + +#define mmIC_PLL_NF 0x4A3104 + +#define mmIC_PLL_OD 0x4A3108 + +#define mmIC_PLL_NB 0x4A310C + +#define mmIC_PLL_CFG 0x4A3110 + +#define mmIC_PLL_LOSE_MASK 0x4A3120 + +#define mmIC_PLL_LOCK_INTR 0x4A3128 + +#define mmIC_PLL_LOCK_BYPASS 0x4A312C + +#define mmIC_PLL_DATA_CHNG 0x4A3130 + +#define mmIC_PLL_RST 0x4A3134 + +#define mmIC_PLL_SLIP_WD_CNTR 0x4A3150 + +#define mmIC_PLL_DIV_FACTOR_0 0x4A3200 + +#define mmIC_PLL_DIV_FACTOR_1 0x4A3204 + +#define mmIC_PLL_DIV_FACTOR_2 0x4A3208 + +#define mmIC_PLL_DIV_FACTOR_3 0x4A320C + +#define mmIC_PLL_DIV_FACTOR_CMD_0 0x4A3220 + +#define mmIC_PLL_DIV_FACTOR_CMD_1 0x4A3224 + +#define mmIC_PLL_DIV_FACTOR_CMD_2 0x4A3228 + +#define mmIC_PLL_DIV_FACTOR_CMD_3 0x4A322C + +#define mmIC_PLL_DIV_SEL_0 0x4A3280 + +#define mmIC_PLL_DIV_SEL_1 0x4A3284 + +#define mmIC_PLL_DIV_SEL_2 0x4A3288 + +#define mmIC_PLL_DIV_SEL_3 0x4A328C + +#define mmIC_PLL_DIV_EN_0 0x4A32A0 + +#define mmIC_PLL_DIV_EN_1 0x4A32A4 + +#define mmIC_PLL_DIV_EN_2 0x4A32A8 + +#define mmIC_PLL_DIV_EN_3 0x4A32AC + +#define mmIC_PLL_DIV_FACTOR_BUSY_0 0x4A32C0 + +#define mmIC_PLL_DIV_FACTOR_BUSY_1 0x4A32C4 + +#define mmIC_PLL_DIV_FACTOR_BUSY_2 0x4A32C8 + +#define mmIC_PLL_DIV_FACTOR_BUSY_3 0x4A32CC + +#define mmIC_PLL_CLK_GATER 0x4A3300 + +#define mmIC_PLL_CLK_RLX_0 0x4A3310 + +#define mmIC_PLL_CLK_RLX_1 0x4A3314 + +#define mmIC_PLL_CLK_RLX_2 0x4A3318 + +#define mmIC_PLL_CLK_RLX_3 0x4A331C + +#define mmIC_PLL_REF_CNTR_PERIOD 0x4A3400 + +#define mmIC_PLL_REF_LOW_THRESHOLD 0x4A3410 + +#define mmIC_PLL_REF_HIGH_THRESHOLD 0x4A3420 + +#define mmIC_PLL_PLL_NOT_STABLE 0x4A3430 + +#define mmIC_PLL_FREQ_CALC_EN 0x4A3440 + +#endif /* ASIC_REG_IC_PLL_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mc_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mc_pll_regs.h new file mode 100644 index 000000000000..4408188aa067 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/mc_pll_regs.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MC_PLL_REGS_H_ +#define ASIC_REG_MC_PLL_REGS_H_ + +/* + ***************************************** + * MC_PLL (Prototype: PLL) + ***************************************** + */ + +#define mmMC_PLL_NR 0x4A1100 + +#define mmMC_PLL_NF 0x4A1104 + +#define mmMC_PLL_OD 0x4A1108 + +#define mmMC_PLL_NB 0x4A110C + +#define mmMC_PLL_CFG 0x4A1110 + +#define mmMC_PLL_LOSE_MASK 0x4A1120 + +#define mmMC_PLL_LOCK_INTR 0x4A1128 + +#define mmMC_PLL_LOCK_BYPASS 0x4A112C + +#define mmMC_PLL_DATA_CHNG 0x4A1130 + +#define mmMC_PLL_RST 0x4A1134 + +#define mmMC_PLL_SLIP_WD_CNTR 0x4A1150 + +#define mmMC_PLL_DIV_FACTOR_0 0x4A1200 + +#define mmMC_PLL_DIV_FACTOR_1 0x4A1204 + +#define mmMC_PLL_DIV_FACTOR_2 0x4A1208 + +#define mmMC_PLL_DIV_FACTOR_3 0x4A120C + +#define mmMC_PLL_DIV_FACTOR_CMD_0 0x4A1220 + +#define mmMC_PLL_DIV_FACTOR_CMD_1 0x4A1224 + +#define mmMC_PLL_DIV_FACTOR_CMD_2 0x4A1228 + +#define mmMC_PLL_DIV_FACTOR_CMD_3 0x4A122C + +#define mmMC_PLL_DIV_SEL_0 0x4A1280 + +#define mmMC_PLL_DIV_SEL_1 0x4A1284 + +#define mmMC_PLL_DIV_SEL_2 0x4A1288 + +#define mmMC_PLL_DIV_SEL_3 0x4A128C + +#define mmMC_PLL_DIV_EN_0 0x4A12A0 + +#define mmMC_PLL_DIV_EN_1 0x4A12A4 + +#define mmMC_PLL_DIV_EN_2 0x4A12A8 + +#define mmMC_PLL_DIV_EN_3 0x4A12AC + +#define mmMC_PLL_DIV_FACTOR_BUSY_0 0x4A12C0 + +#define mmMC_PLL_DIV_FACTOR_BUSY_1 0x4A12C4 + +#define mmMC_PLL_DIV_FACTOR_BUSY_2 0x4A12C8 + +#define mmMC_PLL_DIV_FACTOR_BUSY_3 0x4A12CC + +#define mmMC_PLL_CLK_GATER 0x4A1300 + +#define mmMC_PLL_CLK_RLX_0 0x4A1310 + +#define mmMC_PLL_CLK_RLX_1 0x4A1314 + +#define mmMC_PLL_CLK_RLX_2 0x4A1318 + +#define mmMC_PLL_CLK_RLX_3 0x4A131C + +#define mmMC_PLL_REF_CNTR_PERIOD 0x4A1400 + +#define mmMC_PLL_REF_LOW_THRESHOLD 0x4A1410 + +#define mmMC_PLL_REF_HIGH_THRESHOLD 0x4A1420 + +#define mmMC_PLL_PLL_NOT_STABLE 0x4A1430 + +#define mmMC_PLL_FREQ_CALC_EN 0x4A1440 + +#endif /* ASIC_REG_MC_PLL_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h new file mode 100644 index 000000000000..687bca5c5fe3 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h @@ -0,0 +1,653 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MME1_RTR_MASKS_H_ +#define ASIC_REG_MME1_RTR_MASKS_H_ + +/* + ***************************************** + * MME1_RTR (Prototype: MME_RTR) + ***************************************** + */ + +/* MME1_RTR_HBW_RD_RQ_E_ARB */ +#define MME1_RTR_HBW_RD_RQ_E_ARB_W_SHIFT 0 +#define MME1_RTR_HBW_RD_RQ_E_ARB_W_MASK 0x7 +#define MME1_RTR_HBW_RD_RQ_E_ARB_S_SHIFT 8 +#define MME1_RTR_HBW_RD_RQ_E_ARB_S_MASK 0x700 +#define MME1_RTR_HBW_RD_RQ_E_ARB_N_SHIFT 16 +#define MME1_RTR_HBW_RD_RQ_E_ARB_N_MASK 0x70000 +#define MME1_RTR_HBW_RD_RQ_E_ARB_L_SHIFT 24 +#define MME1_RTR_HBW_RD_RQ_E_ARB_L_MASK 0x7000000 + +/* MME1_RTR_HBW_RD_RQ_W_ARB */ +#define MME1_RTR_HBW_RD_RQ_W_ARB_E_SHIFT 0 +#define MME1_RTR_HBW_RD_RQ_W_ARB_E_MASK 0x7 +#define MME1_RTR_HBW_RD_RQ_W_ARB_S_SHIFT 8 +#define MME1_RTR_HBW_RD_RQ_W_ARB_S_MASK 0x700 +#define MME1_RTR_HBW_RD_RQ_W_ARB_N_SHIFT 16 +#define MME1_RTR_HBW_RD_RQ_W_ARB_N_MASK 0x70000 +#define MME1_RTR_HBW_RD_RQ_W_ARB_L_SHIFT 24 +#define MME1_RTR_HBW_RD_RQ_W_ARB_L_MASK 0x7000000 + +/* MME1_RTR_HBW_RD_RQ_N_ARB */ +#define MME1_RTR_HBW_RD_RQ_N_ARB_W_SHIFT 0 +#define MME1_RTR_HBW_RD_RQ_N_ARB_W_MASK 0x7 +#define MME1_RTR_HBW_RD_RQ_N_ARB_E_SHIFT 8 +#define MME1_RTR_HBW_RD_RQ_N_ARB_E_MASK 0x700 +#define MME1_RTR_HBW_RD_RQ_N_ARB_S_SHIFT 16 +#define MME1_RTR_HBW_RD_RQ_N_ARB_S_MASK 0x70000 +#define MME1_RTR_HBW_RD_RQ_N_ARB_L_SHIFT 24 +#define MME1_RTR_HBW_RD_RQ_N_ARB_L_MASK 0x7000000 + +/* MME1_RTR_HBW_RD_RQ_S_ARB */ +#define MME1_RTR_HBW_RD_RQ_S_ARB_W_SHIFT 0 +#define MME1_RTR_HBW_RD_RQ_S_ARB_W_MASK 0x7 +#define MME1_RTR_HBW_RD_RQ_S_ARB_E_SHIFT 8 +#define MME1_RTR_HBW_RD_RQ_S_ARB_E_MASK 0x700 +#define MME1_RTR_HBW_RD_RQ_S_ARB_N_SHIFT 16 +#define MME1_RTR_HBW_RD_RQ_S_ARB_N_MASK 0x70000 +#define MME1_RTR_HBW_RD_RQ_S_ARB_L_SHIFT 24 +#define MME1_RTR_HBW_RD_RQ_S_ARB_L_MASK 0x7000000 + +/* MME1_RTR_HBW_RD_RQ_L_ARB */ +#define MME1_RTR_HBW_RD_RQ_L_ARB_W_SHIFT 0 +#define MME1_RTR_HBW_RD_RQ_L_ARB_W_MASK 0x7 +#define MME1_RTR_HBW_RD_RQ_L_ARB_E_SHIFT 8 +#define MME1_RTR_HBW_RD_RQ_L_ARB_E_MASK 0x700 +#define MME1_RTR_HBW_RD_RQ_L_ARB_S_SHIFT 16 +#define MME1_RTR_HBW_RD_RQ_L_ARB_S_MASK 0x70000 +#define MME1_RTR_HBW_RD_RQ_L_ARB_N_SHIFT 24 +#define MME1_RTR_HBW_RD_RQ_L_ARB_N_MASK 0x7000000 + +/* MME1_RTR_HBW_E_ARB_MAX */ +#define MME1_RTR_HBW_E_ARB_MAX_CREDIT_SHIFT 0 +#define MME1_RTR_HBW_E_ARB_MAX_CREDIT_MASK 0x3F + +/* MME1_RTR_HBW_W_ARB_MAX */ +#define MME1_RTR_HBW_W_ARB_MAX_CREDIT_SHIFT 0 +#define MME1_RTR_HBW_W_ARB_MAX_CREDIT_MASK 0x3F + +/* MME1_RTR_HBW_N_ARB_MAX */ +#define MME1_RTR_HBW_N_ARB_MAX_CREDIT_SHIFT 0 +#define MME1_RTR_HBW_N_ARB_MAX_CREDIT_MASK 0x3F + +/* MME1_RTR_HBW_S_ARB_MAX */ +#define MME1_RTR_HBW_S_ARB_MAX_CREDIT_SHIFT 0 +#define MME1_RTR_HBW_S_ARB_MAX_CREDIT_MASK 0x3F + +/* MME1_RTR_HBW_L_ARB_MAX */ +#define MME1_RTR_HBW_L_ARB_MAX_CREDIT_SHIFT 0 +#define MME1_RTR_HBW_L_ARB_MAX_CREDIT_MASK 0x3F + +/* MME1_RTR_HBW_RD_RS_MAX_CREDIT */ +#define MME1_RTR_HBW_RD_RS_MAX_CREDIT_A_SHIFT 0 +#define MME1_RTR_HBW_RD_RS_MAX_CREDIT_A_MASK 0x3F +#define MME1_RTR_HBW_RD_RS_MAX_CREDIT_B_SHIFT 8 +#define MME1_RTR_HBW_RD_RS_MAX_CREDIT_B_MASK 0x3F00 + +/* MME1_RTR_HBW_WR_RQ_MAX_CREDIT */ +#define MME1_RTR_HBW_WR_RQ_MAX_CREDIT_VAL_SHIFT 0 +#define MME1_RTR_HBW_WR_RQ_MAX_CREDIT_VAL_MASK 0x3F + +/* MME1_RTR_HBW_RD_RQ_MAX_CREDIT */ +#define MME1_RTR_HBW_RD_RQ_MAX_CREDIT_A_SHIFT 0 +#define MME1_RTR_HBW_RD_RQ_MAX_CREDIT_A_MASK 0x3F +#define MME1_RTR_HBW_RD_RQ_MAX_CREDIT_B_SHIFT 8 +#define MME1_RTR_HBW_RD_RQ_MAX_CREDIT_B_MASK 0x3F00 +#define MME1_RTR_HBW_RD_RQ_MAX_CREDIT_IC_SHIFT 16 +#define MME1_RTR_HBW_RD_RQ_MAX_CREDIT_IC_MASK 0x3F0000 + +/* MME1_RTR_HBW_RD_RS_E_ARB */ +#define MME1_RTR_HBW_RD_RS_E_ARB_W_SHIFT 0 +#define MME1_RTR_HBW_RD_RS_E_ARB_W_MASK 0x7 +#define MME1_RTR_HBW_RD_RS_E_ARB_S_SHIFT 8 +#define MME1_RTR_HBW_RD_RS_E_ARB_S_MASK 0x700 +#define MME1_RTR_HBW_RD_RS_E_ARB_N_SHIFT 16 +#define MME1_RTR_HBW_RD_RS_E_ARB_N_MASK 0x70000 +#define MME1_RTR_HBW_RD_RS_E_ARB_L_SHIFT 24 +#define MME1_RTR_HBW_RD_RS_E_ARB_L_MASK 0x7000000 + +/* MME1_RTR_HBW_RD_RS_W_ARB */ +#define MME1_RTR_HBW_RD_RS_W_ARB_E_SHIFT 0 +#define MME1_RTR_HBW_RD_RS_W_ARB_E_MASK 0x7 +#define MME1_RTR_HBW_RD_RS_W_ARB_S_SHIFT 8 +#define MME1_RTR_HBW_RD_RS_W_ARB_S_MASK 0x700 +#define MME1_RTR_HBW_RD_RS_W_ARB_N_SHIFT 16 +#define MME1_RTR_HBW_RD_RS_W_ARB_N_MASK 0x70000 +#define MME1_RTR_HBW_RD_RS_W_ARB_L_SHIFT 24 +#define MME1_RTR_HBW_RD_RS_W_ARB_L_MASK 0x7000000 + +/* MME1_RTR_HBW_RD_RS_N_ARB */ +#define MME1_RTR_HBW_RD_RS_N_ARB_W_SHIFT 0 +#define MME1_RTR_HBW_RD_RS_N_ARB_W_MASK 0x7 +#define MME1_RTR_HBW_RD_RS_N_ARB_E_SHIFT 8 +#define MME1_RTR_HBW_RD_RS_N_ARB_E_MASK 0x700 +#define MME1_RTR_HBW_RD_RS_N_ARB_S_SHIFT 16 +#define MME1_RTR_HBW_RD_RS_N_ARB_S_MASK 0x70000 +#define MME1_RTR_HBW_RD_RS_N_ARB_L_SHIFT 24 +#define MME1_RTR_HBW_RD_RS_N_ARB_L_MASK 0x7000000 + +/* MME1_RTR_HBW_RD_RS_S_ARB */ +#define MME1_RTR_HBW_RD_RS_S_ARB_W_SHIFT 0 +#define MME1_RTR_HBW_RD_RS_S_ARB_W_MASK 0x7 +#define MME1_RTR_HBW_RD_RS_S_ARB_E_SHIFT 8 +#define MME1_RTR_HBW_RD_RS_S_ARB_E_MASK 0x700 +#define MME1_RTR_HBW_RD_RS_S_ARB_N_SHIFT 16 +#define MME1_RTR_HBW_RD_RS_S_ARB_N_MASK 0x70000 +#define MME1_RTR_HBW_RD_RS_S_ARB_L_SHIFT 24 +#define MME1_RTR_HBW_RD_RS_S_ARB_L_MASK 0x7000000 + +/* MME1_RTR_HBW_RD_RS_L_ARB */ +#define MME1_RTR_HBW_RD_RS_L_ARB_W_SHIFT 0 +#define MME1_RTR_HBW_RD_RS_L_ARB_W_MASK 0x7 +#define MME1_RTR_HBW_RD_RS_L_ARB_E_SHIFT 8 +#define MME1_RTR_HBW_RD_RS_L_ARB_E_MASK 0x700 +#define MME1_RTR_HBW_RD_RS_L_ARB_S_SHIFT 16 +#define MME1_RTR_HBW_RD_RS_L_ARB_S_MASK 0x70000 +#define MME1_RTR_HBW_RD_RS_L_ARB_N_SHIFT 24 +#define MME1_RTR_HBW_RD_RS_L_ARB_N_MASK 0x7000000 + +/* MME1_RTR_HBW_WR_RQ_E_ARB */ +#define MME1_RTR_HBW_WR_RQ_E_ARB_W_SHIFT 0 +#define MME1_RTR_HBW_WR_RQ_E_ARB_W_MASK 0x7 +#define MME1_RTR_HBW_WR_RQ_E_ARB_S_SHIFT 8 +#define MME1_RTR_HBW_WR_RQ_E_ARB_S_MASK 0x700 +#define MME1_RTR_HBW_WR_RQ_E_ARB_N_SHIFT 16 +#define MME1_RTR_HBW_WR_RQ_E_ARB_N_MASK 0x70000 +#define MME1_RTR_HBW_WR_RQ_E_ARB_L_SHIFT 24 +#define MME1_RTR_HBW_WR_RQ_E_ARB_L_MASK 0x7000000 + +/* MME1_RTR_HBW_WR_RQ_W_ARB */ +#define MME1_RTR_HBW_WR_RQ_W_ARB_E_SHIFT 0 +#define MME1_RTR_HBW_WR_RQ_W_ARB_E_MASK 0x7 +#define MME1_RTR_HBW_WR_RQ_W_ARB_S_SHIFT 8 +#define MME1_RTR_HBW_WR_RQ_W_ARB_S_MASK 0x700 +#define MME1_RTR_HBW_WR_RQ_W_ARB_N_SHIFT 16 +#define MME1_RTR_HBW_WR_RQ_W_ARB_N_MASK 0x70000 +#define MME1_RTR_HBW_WR_RQ_W_ARB_L_SHIFT 24 +#define MME1_RTR_HBW_WR_RQ_W_ARB_L_MASK 0x7000000 + +/* MME1_RTR_HBW_WR_RQ_N_ARB */ +#define MME1_RTR_HBW_WR_RQ_N_ARB_W_SHIFT 0 +#define MME1_RTR_HBW_WR_RQ_N_ARB_W_MASK 0x7 +#define MME1_RTR_HBW_WR_RQ_N_ARB_E_SHIFT 8 +#define MME1_RTR_HBW_WR_RQ_N_ARB_E_MASK 0x700 +#define MME1_RTR_HBW_WR_RQ_N_ARB_S_SHIFT 16 +#define MME1_RTR_HBW_WR_RQ_N_ARB_S_MASK 0x70000 +#define MME1_RTR_HBW_WR_RQ_N_ARB_L_SHIFT 24 +#define MME1_RTR_HBW_WR_RQ_N_ARB_L_MASK 0x7000000 + +/* MME1_RTR_HBW_WR_RQ_S_ARB */ +#define MME1_RTR_HBW_WR_RQ_S_ARB_W_SHIFT 0 +#define MME1_RTR_HBW_WR_RQ_S_ARB_W_MASK 0x7 +#define MME1_RTR_HBW_WR_RQ_S_ARB_E_SHIFT 8 +#define MME1_RTR_HBW_WR_RQ_S_ARB_E_MASK 0x700 +#define MME1_RTR_HBW_WR_RQ_S_ARB_N_SHIFT 16 +#define MME1_RTR_HBW_WR_RQ_S_ARB_N_MASK 0x70000 +#define MME1_RTR_HBW_WR_RQ_S_ARB_L_SHIFT 24 +#define MME1_RTR_HBW_WR_RQ_S_ARB_L_MASK 0x7000000 + +/* MME1_RTR_HBW_WR_RQ_L_ARB */ +#define MME1_RTR_HBW_WR_RQ_L_ARB_W_SHIFT 0 +#define MME1_RTR_HBW_WR_RQ_L_ARB_W_MASK 0x7 +#define MME1_RTR_HBW_WR_RQ_L_ARB_E_SHIFT 8 +#define MME1_RTR_HBW_WR_RQ_L_ARB_E_MASK 0x700 +#define MME1_RTR_HBW_WR_RQ_L_ARB_S_SHIFT 16 +#define MME1_RTR_HBW_WR_RQ_L_ARB_S_MASK 0x70000 +#define MME1_RTR_HBW_WR_RQ_L_ARB_N_SHIFT 24 +#define MME1_RTR_HBW_WR_RQ_L_ARB_N_MASK 0x7000000 + +/* MME1_RTR_HBW_WR_RS_E_ARB */ +#define MME1_RTR_HBW_WR_RS_E_ARB_W_SHIFT 0 +#define MME1_RTR_HBW_WR_RS_E_ARB_W_MASK 0x7 +#define MME1_RTR_HBW_WR_RS_E_ARB_S_SHIFT 8 +#define MME1_RTR_HBW_WR_RS_E_ARB_S_MASK 0x700 +#define MME1_RTR_HBW_WR_RS_E_ARB_N_SHIFT 16 +#define MME1_RTR_HBW_WR_RS_E_ARB_N_MASK 0x70000 +#define MME1_RTR_HBW_WR_RS_E_ARB_L_SHIFT 24 +#define MME1_RTR_HBW_WR_RS_E_ARB_L_MASK 0x7000000 + +/* MME1_RTR_HBW_WR_RS_W_ARB */ +#define MME1_RTR_HBW_WR_RS_W_ARB_E_SHIFT 0 +#define MME1_RTR_HBW_WR_RS_W_ARB_E_MASK 0x7 +#define MME1_RTR_HBW_WR_RS_W_ARB_S_SHIFT 8 +#define MME1_RTR_HBW_WR_RS_W_ARB_S_MASK 0x700 +#define MME1_RTR_HBW_WR_RS_W_ARB_N_SHIFT 16 +#define MME1_RTR_HBW_WR_RS_W_ARB_N_MASK 0x70000 +#define MME1_RTR_HBW_WR_RS_W_ARB_L_SHIFT 24 +#define MME1_RTR_HBW_WR_RS_W_ARB_L_MASK 0x7000000 + +/* MME1_RTR_HBW_WR_RS_N_ARB */ +#define MME1_RTR_HBW_WR_RS_N_ARB_W_SHIFT 0 +#define MME1_RTR_HBW_WR_RS_N_ARB_W_MASK 0x7 +#define MME1_RTR_HBW_WR_RS_N_ARB_E_SHIFT 8 +#define MME1_RTR_HBW_WR_RS_N_ARB_E_MASK 0x700 +#define MME1_RTR_HBW_WR_RS_N_ARB_S_SHIFT 16 +#define MME1_RTR_HBW_WR_RS_N_ARB_S_MASK 0x70000 +#define MME1_RTR_HBW_WR_RS_N_ARB_L_SHIFT 24 +#define MME1_RTR_HBW_WR_RS_N_ARB_L_MASK 0x7000000 + +/* MME1_RTR_HBW_WR_RS_S_ARB */ +#define MME1_RTR_HBW_WR_RS_S_ARB_W_SHIFT 0 +#define MME1_RTR_HBW_WR_RS_S_ARB_W_MASK 0x7 +#define MME1_RTR_HBW_WR_RS_S_ARB_E_SHIFT 8 +#define MME1_RTR_HBW_WR_RS_S_ARB_E_MASK 0x700 +#define MME1_RTR_HBW_WR_RS_S_ARB_N_SHIFT 16 +#define MME1_RTR_HBW_WR_RS_S_ARB_N_MASK 0x70000 +#define MME1_RTR_HBW_WR_RS_S_ARB_L_SHIFT 24 +#define MME1_RTR_HBW_WR_RS_S_ARB_L_MASK 0x7000000 + +/* MME1_RTR_HBW_WR_RS_L_ARB */ +#define MME1_RTR_HBW_WR_RS_L_ARB_W_SHIFT 0 +#define MME1_RTR_HBW_WR_RS_L_ARB_W_MASK 0x7 +#define MME1_RTR_HBW_WR_RS_L_ARB_E_SHIFT 8 +#define MME1_RTR_HBW_WR_RS_L_ARB_E_MASK 0x700 +#define MME1_RTR_HBW_WR_RS_L_ARB_S_SHIFT 16 +#define MME1_RTR_HBW_WR_RS_L_ARB_S_MASK 0x70000 +#define MME1_RTR_HBW_WR_RS_L_ARB_N_SHIFT 24 +#define MME1_RTR_HBW_WR_RS_L_ARB_N_MASK 0x7000000 + +/* MME1_RTR_LBW_RD_RQ_E_ARB */ +#define MME1_RTR_LBW_RD_RQ_E_ARB_W_SHIFT 0 +#define MME1_RTR_LBW_RD_RQ_E_ARB_W_MASK 0x7 +#define MME1_RTR_LBW_RD_RQ_E_ARB_S_SHIFT 8 +#define MME1_RTR_LBW_RD_RQ_E_ARB_S_MASK 0x700 +#define MME1_RTR_LBW_RD_RQ_E_ARB_N_SHIFT 16 +#define MME1_RTR_LBW_RD_RQ_E_ARB_N_MASK 0x70000 +#define MME1_RTR_LBW_RD_RQ_E_ARB_L_SHIFT 24 +#define MME1_RTR_LBW_RD_RQ_E_ARB_L_MASK 0x7000000 + +/* MME1_RTR_LBW_RD_RQ_W_ARB */ +#define MME1_RTR_LBW_RD_RQ_W_ARB_E_SHIFT 0 +#define MME1_RTR_LBW_RD_RQ_W_ARB_E_MASK 0x7 +#define MME1_RTR_LBW_RD_RQ_W_ARB_S_SHIFT 8 +#define MME1_RTR_LBW_RD_RQ_W_ARB_S_MASK 0x700 +#define MME1_RTR_LBW_RD_RQ_W_ARB_N_SHIFT 16 +#define MME1_RTR_LBW_RD_RQ_W_ARB_N_MASK 0x70000 +#define MME1_RTR_LBW_RD_RQ_W_ARB_L_SHIFT 24 +#define MME1_RTR_LBW_RD_RQ_W_ARB_L_MASK 0x7000000 + +/* MME1_RTR_LBW_RD_RQ_N_ARB */ +#define MME1_RTR_LBW_RD_RQ_N_ARB_W_SHIFT 0 +#define MME1_RTR_LBW_RD_RQ_N_ARB_W_MASK 0x7 +#define MME1_RTR_LBW_RD_RQ_N_ARB_E_SHIFT 8 +#define MME1_RTR_LBW_RD_RQ_N_ARB_E_MASK 0x700 +#define MME1_RTR_LBW_RD_RQ_N_ARB_S_SHIFT 16 +#define MME1_RTR_LBW_RD_RQ_N_ARB_S_MASK 0x70000 +#define MME1_RTR_LBW_RD_RQ_N_ARB_L_SHIFT 24 +#define MME1_RTR_LBW_RD_RQ_N_ARB_L_MASK 0x7000000 + +/* MME1_RTR_LBW_RD_RQ_S_ARB */ +#define MME1_RTR_LBW_RD_RQ_S_ARB_W_SHIFT 0 +#define MME1_RTR_LBW_RD_RQ_S_ARB_W_MASK 0x7 +#define MME1_RTR_LBW_RD_RQ_S_ARB_E_SHIFT 8 +#define MME1_RTR_LBW_RD_RQ_S_ARB_E_MASK 0x700 +#define MME1_RTR_LBW_RD_RQ_S_ARB_N_SHIFT 16 +#define MME1_RTR_LBW_RD_RQ_S_ARB_N_MASK 0x70000 +#define MME1_RTR_LBW_RD_RQ_S_ARB_L_SHIFT 24 +#define MME1_RTR_LBW_RD_RQ_S_ARB_L_MASK 0x7000000 + +/* MME1_RTR_LBW_RD_RQ_L_ARB */ +#define MME1_RTR_LBW_RD_RQ_L_ARB_W_SHIFT 0 +#define MME1_RTR_LBW_RD_RQ_L_ARB_W_MASK 0x7 +#define MME1_RTR_LBW_RD_RQ_L_ARB_E_SHIFT 8 +#define MME1_RTR_LBW_RD_RQ_L_ARB_E_MASK 0x700 +#define MME1_RTR_LBW_RD_RQ_L_ARB_S_SHIFT 16 +#define MME1_RTR_LBW_RD_RQ_L_ARB_S_MASK 0x70000 +#define MME1_RTR_LBW_RD_RQ_L_ARB_N_SHIFT 24 +#define MME1_RTR_LBW_RD_RQ_L_ARB_N_MASK 0x7000000 + +/* MME1_RTR_LBW_E_ARB_MAX */ +#define MME1_RTR_LBW_E_ARB_MAX_CREDIT_SHIFT 0 +#define MME1_RTR_LBW_E_ARB_MAX_CREDIT_MASK 0x3F + +/* MME1_RTR_LBW_W_ARB_MAX */ +#define MME1_RTR_LBW_W_ARB_MAX_CREDIT_SHIFT 0 +#define MME1_RTR_LBW_W_ARB_MAX_CREDIT_MASK 0x3F + +/* MME1_RTR_LBW_N_ARB_MAX */ +#define MME1_RTR_LBW_N_ARB_MAX_CREDIT_SHIFT 0 +#define MME1_RTR_LBW_N_ARB_MAX_CREDIT_MASK 0x3F + +/* MME1_RTR_LBW_S_ARB_MAX */ +#define MME1_RTR_LBW_S_ARB_MAX_CREDIT_SHIFT 0 +#define MME1_RTR_LBW_S_ARB_MAX_CREDIT_MASK 0x3F + +/* MME1_RTR_LBW_L_ARB_MAX */ +#define MME1_RTR_LBW_L_ARB_MAX_CREDIT_SHIFT 0 +#define MME1_RTR_LBW_L_ARB_MAX_CREDIT_MASK 0x3F + +/* MME1_RTR_LBW_SRAM_MAX_CREDIT */ +#define MME1_RTR_LBW_SRAM_MAX_CREDIT_MSTR_SHIFT 0 +#define MME1_RTR_LBW_SRAM_MAX_CREDIT_MSTR_MASK 0x3F +#define MME1_RTR_LBW_SRAM_MAX_CREDIT_SLV_SHIFT 8 +#define MME1_RTR_LBW_SRAM_MAX_CREDIT_SLV_MASK 0x3F00 + +/* MME1_RTR_LBW_RD_RS_E_ARB */ +#define MME1_RTR_LBW_RD_RS_E_ARB_W_SHIFT 0 +#define MME1_RTR_LBW_RD_RS_E_ARB_W_MASK 0x7 +#define MME1_RTR_LBW_RD_RS_E_ARB_S_SHIFT 8 +#define MME1_RTR_LBW_RD_RS_E_ARB_S_MASK 0x700 +#define MME1_RTR_LBW_RD_RS_E_ARB_N_SHIFT 16 +#define MME1_RTR_LBW_RD_RS_E_ARB_N_MASK 0x70000 +#define MME1_RTR_LBW_RD_RS_E_ARB_L_SHIFT 24 +#define MME1_RTR_LBW_RD_RS_E_ARB_L_MASK 0x7000000 + +/* MME1_RTR_LBW_RD_RS_W_ARB */ +#define MME1_RTR_LBW_RD_RS_W_ARB_E_SHIFT 0 +#define MME1_RTR_LBW_RD_RS_W_ARB_E_MASK 0x7 +#define MME1_RTR_LBW_RD_RS_W_ARB_S_SHIFT 8 +#define MME1_RTR_LBW_RD_RS_W_ARB_S_MASK 0x700 +#define MME1_RTR_LBW_RD_RS_W_ARB_N_SHIFT 16 +#define MME1_RTR_LBW_RD_RS_W_ARB_N_MASK 0x70000 +#define MME1_RTR_LBW_RD_RS_W_ARB_L_SHIFT 24 +#define MME1_RTR_LBW_RD_RS_W_ARB_L_MASK 0x7000000 + +/* MME1_RTR_LBW_RD_RS_N_ARB */ +#define MME1_RTR_LBW_RD_RS_N_ARB_W_SHIFT 0 +#define MME1_RTR_LBW_RD_RS_N_ARB_W_MASK 0x7 +#define MME1_RTR_LBW_RD_RS_N_ARB_E_SHIFT 8 +#define MME1_RTR_LBW_RD_RS_N_ARB_E_MASK 0x700 +#define MME1_RTR_LBW_RD_RS_N_ARB_S_SHIFT 16 +#define MME1_RTR_LBW_RD_RS_N_ARB_S_MASK 0x70000 +#define MME1_RTR_LBW_RD_RS_N_ARB_L_SHIFT 24 +#define MME1_RTR_LBW_RD_RS_N_ARB_L_MASK 0x7000000 + +/* MME1_RTR_LBW_RD_RS_S_ARB */ +#define MME1_RTR_LBW_RD_RS_S_ARB_W_SHIFT 0 +#define MME1_RTR_LBW_RD_RS_S_ARB_W_MASK 0x7 +#define MME1_RTR_LBW_RD_RS_S_ARB_E_SHIFT 8 +#define MME1_RTR_LBW_RD_RS_S_ARB_E_MASK 0x700 +#define MME1_RTR_LBW_RD_RS_S_ARB_N_SHIFT 16 +#define MME1_RTR_LBW_RD_RS_S_ARB_N_MASK 0x70000 +#define MME1_RTR_LBW_RD_RS_S_ARB_L_SHIFT 24 +#define MME1_RTR_LBW_RD_RS_S_ARB_L_MASK 0x7000000 + +/* MME1_RTR_LBW_RD_RS_L_ARB */ +#define MME1_RTR_LBW_RD_RS_L_ARB_W_SHIFT 0 +#define MME1_RTR_LBW_RD_RS_L_ARB_W_MASK 0x7 +#define MME1_RTR_LBW_RD_RS_L_ARB_E_SHIFT 8 +#define MME1_RTR_LBW_RD_RS_L_ARB_E_MASK 0x700 +#define MME1_RTR_LBW_RD_RS_L_ARB_S_SHIFT 16 +#define MME1_RTR_LBW_RD_RS_L_ARB_S_MASK 0x70000 +#define MME1_RTR_LBW_RD_RS_L_ARB_N_SHIFT 24 +#define MME1_RTR_LBW_RD_RS_L_ARB_N_MASK 0x7000000 + +/* MME1_RTR_LBW_WR_RQ_E_ARB */ +#define MME1_RTR_LBW_WR_RQ_E_ARB_W_SHIFT 0 +#define MME1_RTR_LBW_WR_RQ_E_ARB_W_MASK 0x7 +#define MME1_RTR_LBW_WR_RQ_E_ARB_S_SHIFT 8 +#define MME1_RTR_LBW_WR_RQ_E_ARB_S_MASK 0x700 +#define MME1_RTR_LBW_WR_RQ_E_ARB_N_SHIFT 16 +#define MME1_RTR_LBW_WR_RQ_E_ARB_N_MASK 0x70000 +#define MME1_RTR_LBW_WR_RQ_E_ARB_L_SHIFT 24 +#define MME1_RTR_LBW_WR_RQ_E_ARB_L_MASK 0x7000000 + +/* MME1_RTR_LBW_WR_RQ_W_ARB */ +#define MME1_RTR_LBW_WR_RQ_W_ARB_E_SHIFT 0 +#define MME1_RTR_LBW_WR_RQ_W_ARB_E_MASK 0x7 +#define MME1_RTR_LBW_WR_RQ_W_ARB_S_SHIFT 8 +#define MME1_RTR_LBW_WR_RQ_W_ARB_S_MASK 0x700 +#define MME1_RTR_LBW_WR_RQ_W_ARB_N_SHIFT 16 +#define MME1_RTR_LBW_WR_RQ_W_ARB_N_MASK 0x70000 +#define MME1_RTR_LBW_WR_RQ_W_ARB_L_SHIFT 24 +#define MME1_RTR_LBW_WR_RQ_W_ARB_L_MASK 0x7000000 + +/* MME1_RTR_LBW_WR_RQ_N_ARB */ +#define MME1_RTR_LBW_WR_RQ_N_ARB_W_SHIFT 0 +#define MME1_RTR_LBW_WR_RQ_N_ARB_W_MASK 0x7 +#define MME1_RTR_LBW_WR_RQ_N_ARB_E_SHIFT 8 +#define MME1_RTR_LBW_WR_RQ_N_ARB_E_MASK 0x700 +#define MME1_RTR_LBW_WR_RQ_N_ARB_S_SHIFT 16 +#define MME1_RTR_LBW_WR_RQ_N_ARB_S_MASK 0x70000 +#define MME1_RTR_LBW_WR_RQ_N_ARB_L_SHIFT 24 +#define MME1_RTR_LBW_WR_RQ_N_ARB_L_MASK 0x7000000 + +/* MME1_RTR_LBW_WR_RQ_S_ARB */ +#define MME1_RTR_LBW_WR_RQ_S_ARB_W_SHIFT 0 +#define MME1_RTR_LBW_WR_RQ_S_ARB_W_MASK 0x7 +#define MME1_RTR_LBW_WR_RQ_S_ARB_E_SHIFT 8 +#define MME1_RTR_LBW_WR_RQ_S_ARB_E_MASK 0x700 +#define MME1_RTR_LBW_WR_RQ_S_ARB_N_SHIFT 16 +#define MME1_RTR_LBW_WR_RQ_S_ARB_N_MASK 0x70000 +#define MME1_RTR_LBW_WR_RQ_S_ARB_L_SHIFT 24 +#define MME1_RTR_LBW_WR_RQ_S_ARB_L_MASK 0x7000000 + +/* MME1_RTR_LBW_WR_RQ_L_ARB */ +#define MME1_RTR_LBW_WR_RQ_L_ARB_W_SHIFT 0 +#define MME1_RTR_LBW_WR_RQ_L_ARB_W_MASK 0x7 +#define MME1_RTR_LBW_WR_RQ_L_ARB_E_SHIFT 8 +#define MME1_RTR_LBW_WR_RQ_L_ARB_E_MASK 0x700 +#define MME1_RTR_LBW_WR_RQ_L_ARB_S_SHIFT 16 +#define MME1_RTR_LBW_WR_RQ_L_ARB_S_MASK 0x70000 +#define MME1_RTR_LBW_WR_RQ_L_ARB_N_SHIFT 24 +#define MME1_RTR_LBW_WR_RQ_L_ARB_N_MASK 0x7000000 + +/* MME1_RTR_LBW_WR_RS_E_ARB */ +#define MME1_RTR_LBW_WR_RS_E_ARB_W_SHIFT 0 +#define MME1_RTR_LBW_WR_RS_E_ARB_W_MASK 0x7 +#define MME1_RTR_LBW_WR_RS_E_ARB_S_SHIFT 8 +#define MME1_RTR_LBW_WR_RS_E_ARB_S_MASK 0x700 +#define MME1_RTR_LBW_WR_RS_E_ARB_N_SHIFT 16 +#define MME1_RTR_LBW_WR_RS_E_ARB_N_MASK 0x70000 +#define MME1_RTR_LBW_WR_RS_E_ARB_L_SHIFT 24 +#define MME1_RTR_LBW_WR_RS_E_ARB_L_MASK 0x7000000 + +/* MME1_RTR_LBW_WR_RS_W_ARB */ +#define MME1_RTR_LBW_WR_RS_W_ARB_E_SHIFT 0 +#define MME1_RTR_LBW_WR_RS_W_ARB_E_MASK 0x7 +#define MME1_RTR_LBW_WR_RS_W_ARB_S_SHIFT 8 +#define MME1_RTR_LBW_WR_RS_W_ARB_S_MASK 0x700 +#define MME1_RTR_LBW_WR_RS_W_ARB_N_SHIFT 16 +#define MME1_RTR_LBW_WR_RS_W_ARB_N_MASK 0x70000 +#define MME1_RTR_LBW_WR_RS_W_ARB_L_SHIFT 24 +#define MME1_RTR_LBW_WR_RS_W_ARB_L_MASK 0x7000000 + +/* MME1_RTR_LBW_WR_RS_N_ARB */ +#define MME1_RTR_LBW_WR_RS_N_ARB_W_SHIFT 0 +#define MME1_RTR_LBW_WR_RS_N_ARB_W_MASK 0x7 +#define MME1_RTR_LBW_WR_RS_N_ARB_E_SHIFT 8 +#define MME1_RTR_LBW_WR_RS_N_ARB_E_MASK 0x700 +#define MME1_RTR_LBW_WR_RS_N_ARB_S_SHIFT 16 +#define MME1_RTR_LBW_WR_RS_N_ARB_S_MASK 0x70000 +#define MME1_RTR_LBW_WR_RS_N_ARB_L_SHIFT 24 +#define MME1_RTR_LBW_WR_RS_N_ARB_L_MASK 0x7000000 + +/* MME1_RTR_LBW_WR_RS_S_ARB */ +#define MME1_RTR_LBW_WR_RS_S_ARB_W_SHIFT 0 +#define MME1_RTR_LBW_WR_RS_S_ARB_W_MASK 0x7 +#define MME1_RTR_LBW_WR_RS_S_ARB_E_SHIFT 8 +#define MME1_RTR_LBW_WR_RS_S_ARB_E_MASK 0x700 +#define MME1_RTR_LBW_WR_RS_S_ARB_N_SHIFT 16 +#define MME1_RTR_LBW_WR_RS_S_ARB_N_MASK 0x70000 +#define MME1_RTR_LBW_WR_RS_S_ARB_L_SHIFT 24 +#define MME1_RTR_LBW_WR_RS_S_ARB_L_MASK 0x7000000 + +/* MME1_RTR_LBW_WR_RS_L_ARB */ +#define MME1_RTR_LBW_WR_RS_L_ARB_W_SHIFT 0 +#define MME1_RTR_LBW_WR_RS_L_ARB_W_MASK 0x7 +#define MME1_RTR_LBW_WR_RS_L_ARB_E_SHIFT 8 +#define MME1_RTR_LBW_WR_RS_L_ARB_E_MASK 0x700 +#define MME1_RTR_LBW_WR_RS_L_ARB_S_SHIFT 16 +#define MME1_RTR_LBW_WR_RS_L_ARB_S_MASK 0x70000 +#define MME1_RTR_LBW_WR_RS_L_ARB_N_SHIFT 24 +#define MME1_RTR_LBW_WR_RS_L_ARB_N_MASK 0x7000000 + +/* MME1_RTR_DBG_E_ARB */ +#define MME1_RTR_DBG_E_ARB_W_SHIFT 0 +#define MME1_RTR_DBG_E_ARB_W_MASK 0x7 +#define MME1_RTR_DBG_E_ARB_S_SHIFT 8 +#define MME1_RTR_DBG_E_ARB_S_MASK 0x700 +#define MME1_RTR_DBG_E_ARB_N_SHIFT 16 +#define MME1_RTR_DBG_E_ARB_N_MASK 0x70000 +#define MME1_RTR_DBG_E_ARB_L_SHIFT 24 +#define MME1_RTR_DBG_E_ARB_L_MASK 0x7000000 + +/* MME1_RTR_DBG_W_ARB */ +#define MME1_RTR_DBG_W_ARB_E_SHIFT 0 +#define MME1_RTR_DBG_W_ARB_E_MASK 0x7 +#define MME1_RTR_DBG_W_ARB_S_SHIFT 8 +#define MME1_RTR_DBG_W_ARB_S_MASK 0x700 +#define MME1_RTR_DBG_W_ARB_N_SHIFT 16 +#define MME1_RTR_DBG_W_ARB_N_MASK 0x70000 +#define MME1_RTR_DBG_W_ARB_L_SHIFT 24 +#define MME1_RTR_DBG_W_ARB_L_MASK 0x7000000 + +/* MME1_RTR_DBG_N_ARB */ +#define MME1_RTR_DBG_N_ARB_W_SHIFT 0 +#define MME1_RTR_DBG_N_ARB_W_MASK 0x7 +#define MME1_RTR_DBG_N_ARB_E_SHIFT 8 +#define MME1_RTR_DBG_N_ARB_E_MASK 0x700 +#define MME1_RTR_DBG_N_ARB_S_SHIFT 16 +#define MME1_RTR_DBG_N_ARB_S_MASK 0x70000 +#define MME1_RTR_DBG_N_ARB_L_SHIFT 24 +#define MME1_RTR_DBG_N_ARB_L_MASK 0x7000000 + +/* MME1_RTR_DBG_S_ARB */ +#define MME1_RTR_DBG_S_ARB_W_SHIFT 0 +#define MME1_RTR_DBG_S_ARB_W_MASK 0x7 +#define MME1_RTR_DBG_S_ARB_E_SHIFT 8 +#define MME1_RTR_DBG_S_ARB_E_MASK 0x700 +#define MME1_RTR_DBG_S_ARB_N_SHIFT 16 +#define MME1_RTR_DBG_S_ARB_N_MASK 0x70000 +#define MME1_RTR_DBG_S_ARB_L_SHIFT 24 +#define MME1_RTR_DBG_S_ARB_L_MASK 0x7000000 + +/* MME1_RTR_DBG_L_ARB */ +#define MME1_RTR_DBG_L_ARB_W_SHIFT 0 +#define MME1_RTR_DBG_L_ARB_W_MASK 0x7 +#define MME1_RTR_DBG_L_ARB_E_SHIFT 8 +#define MME1_RTR_DBG_L_ARB_E_MASK 0x700 +#define MME1_RTR_DBG_L_ARB_S_SHIFT 16 +#define MME1_RTR_DBG_L_ARB_S_MASK 0x70000 +#define MME1_RTR_DBG_L_ARB_N_SHIFT 24 +#define MME1_RTR_DBG_L_ARB_N_MASK 0x7000000 + +/* MME1_RTR_DBG_E_ARB_MAX */ +#define MME1_RTR_DBG_E_ARB_MAX_CREDIT_SHIFT 0 +#define MME1_RTR_DBG_E_ARB_MAX_CREDIT_MASK 0x3F + +/* MME1_RTR_DBG_W_ARB_MAX */ +#define MME1_RTR_DBG_W_ARB_MAX_CREDIT_SHIFT 0 +#define MME1_RTR_DBG_W_ARB_MAX_CREDIT_MASK 0x3F + +/* MME1_RTR_DBG_N_ARB_MAX */ +#define MME1_RTR_DBG_N_ARB_MAX_CREDIT_SHIFT 0 +#define MME1_RTR_DBG_N_ARB_MAX_CREDIT_MASK 0x3F + +/* MME1_RTR_DBG_S_ARB_MAX */ +#define MME1_RTR_DBG_S_ARB_MAX_CREDIT_SHIFT 0 +#define MME1_RTR_DBG_S_ARB_MAX_CREDIT_MASK 0x3F + +/* MME1_RTR_DBG_L_ARB_MAX */ +#define MME1_RTR_DBG_L_ARB_MAX_CREDIT_SHIFT 0 +#define MME1_RTR_DBG_L_ARB_MAX_CREDIT_MASK 0x3F + +/* MME1_RTR_SPLIT_COEF */ +#define MME1_RTR_SPLIT_COEF_VAL_SHIFT 0 +#define MME1_RTR_SPLIT_COEF_VAL_MASK 0xFFFF + +/* MME1_RTR_SPLIT_CFG */ +#define MME1_RTR_SPLIT_CFG_FORCE_WAK_ORDER_SHIFT 0 +#define MME1_RTR_SPLIT_CFG_FORCE_WAK_ORDER_MASK 0x1 +#define MME1_RTR_SPLIT_CFG_FORCE_STRONG_ORDER_SHIFT 1 +#define MME1_RTR_SPLIT_CFG_FORCE_STRONG_ORDER_MASK 0x2 +#define MME1_RTR_SPLIT_CFG_DEFAULT_MESH_SHIFT 2 +#define MME1_RTR_SPLIT_CFG_DEFAULT_MESH_MASK 0xC +#define MME1_RTR_SPLIT_CFG_WR_RATE_LIM_EN_SHIFT 4 +#define MME1_RTR_SPLIT_CFG_WR_RATE_LIM_EN_MASK 0x10 +#define MME1_RTR_SPLIT_CFG_RD_RATE_LIM_EN_SHIFT 5 +#define MME1_RTR_SPLIT_CFG_RD_RATE_LIM_EN_MASK 0x20 +#define MME1_RTR_SPLIT_CFG_B2B_OPT_SHIFT 6 +#define MME1_RTR_SPLIT_CFG_B2B_OPT_MASK 0x1C0 + +/* MME1_RTR_SPLIT_RD_SAT */ +#define MME1_RTR_SPLIT_RD_SAT_VAL_SHIFT 0 +#define MME1_RTR_SPLIT_RD_SAT_VAL_MASK 0xFFFF + +/* MME1_RTR_SPLIT_RD_RST_TOKEN */ +#define MME1_RTR_SPLIT_RD_RST_TOKEN_VAL_SHIFT 0 +#define MME1_RTR_SPLIT_RD_RST_TOKEN_VAL_MASK 0xFFFF + +/* MME1_RTR_SPLIT_RD_TIMEOUT */ +#define MME1_RTR_SPLIT_RD_TIMEOUT_VAL_SHIFT 0 +#define MME1_RTR_SPLIT_RD_TIMEOUT_VAL_MASK 0xFFFFFFFF + +/* MME1_RTR_SPLIT_WR_SAT */ +#define MME1_RTR_SPLIT_WR_SAT_VAL_SHIFT 0 +#define MME1_RTR_SPLIT_WR_SAT_VAL_MASK 0xFFFF + +/* MME1_RTR_WPLIT_WR_TST_TOLEN */ +#define MME1_RTR_WPLIT_WR_TST_TOLEN_VAL_SHIFT 0 +#define MME1_RTR_WPLIT_WR_TST_TOLEN_VAL_MASK 0xFFFF + +/* MME1_RTR_SPLIT_WR_TIMEOUT */ +#define MME1_RTR_SPLIT_WR_TIMEOUT_VAL_SHIFT 0 +#define MME1_RTR_SPLIT_WR_TIMEOUT_VAL_MASK 0xFFFFFFFF + +/* MME1_RTR_HBW_RANGE_HIT */ +#define MME1_RTR_HBW_RANGE_HIT_IND_SHIFT 0 +#define MME1_RTR_HBW_RANGE_HIT_IND_MASK 0xFF + +/* MME1_RTR_HBW_RANGE_MASK_L */ +#define MME1_RTR_HBW_RANGE_MASK_L_VAL_SHIFT 0 +#define MME1_RTR_HBW_RANGE_MASK_L_VAL_MASK 0xFFFFFFFF + +/* MME1_RTR_HBW_RANGE_MASK_H */ +#define MME1_RTR_HBW_RANGE_MASK_H_VAL_SHIFT 0 +#define MME1_RTR_HBW_RANGE_MASK_H_VAL_MASK 0x3FFFF + +/* MME1_RTR_HBW_RANGE_BASE_L */ +#define MME1_RTR_HBW_RANGE_BASE_L_VAL_SHIFT 0 +#define MME1_RTR_HBW_RANGE_BASE_L_VAL_MASK 0xFFFFFFFF + +/* MME1_RTR_HBW_RANGE_BASE_H */ +#define MME1_RTR_HBW_RANGE_BASE_H_VAL_SHIFT 0 +#define MME1_RTR_HBW_RANGE_BASE_H_VAL_MASK 0x3FFFF + +/* MME1_RTR_LBW_RANGE_HIT */ +#define MME1_RTR_LBW_RANGE_HIT_IND_SHIFT 0 +#define MME1_RTR_LBW_RANGE_HIT_IND_MASK 0xFFFF + +/* MME1_RTR_LBW_RANGE_MASK */ +#define MME1_RTR_LBW_RANGE_MASK_VAL_SHIFT 0 +#define MME1_RTR_LBW_RANGE_MASK_VAL_MASK 0x3FFFFFF + +/* MME1_RTR_LBW_RANGE_BASE */ +#define MME1_RTR_LBW_RANGE_BASE_VAL_SHIFT 0 +#define MME1_RTR_LBW_RANGE_BASE_VAL_MASK 0x3FFFFFF + +/* MME1_RTR_RGLTR */ +#define MME1_RTR_RGLTR_WR_EN_SHIFT 0 +#define MME1_RTR_RGLTR_WR_EN_MASK 0x1 +#define MME1_RTR_RGLTR_RD_EN_SHIFT 4 +#define MME1_RTR_RGLTR_RD_EN_MASK 0x10 + +/* MME1_RTR_RGLTR_WR_RESULT */ +#define MME1_RTR_RGLTR_WR_RESULT_VAL_SHIFT 0 +#define MME1_RTR_RGLTR_WR_RESULT_VAL_MASK 0xFF + +/* MME1_RTR_RGLTR_RD_RESULT */ +#define MME1_RTR_RGLTR_RD_RESULT_VAL_SHIFT 0 +#define MME1_RTR_RGLTR_RD_RESULT_VAL_MASK 0xFF + +/* MME1_RTR_SCRAMB_EN */ +#define MME1_RTR_SCRAMB_EN_VAL_SHIFT 0 +#define MME1_RTR_SCRAMB_EN_VAL_MASK 0x1 + +/* MME1_RTR_NON_LIN_SCRAMB */ +#define MME1_RTR_NON_LIN_SCRAMB_EN_SHIFT 0 +#define MME1_RTR_NON_LIN_SCRAMB_EN_MASK 0x1 + +#endif /* ASIC_REG_MME1_RTR_MASKS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h new file mode 100644 index 000000000000..c248339a1cbe --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h @@ -0,0 +1,331 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MME1_RTR_REGS_H_ +#define ASIC_REG_MME1_RTR_REGS_H_ + +/* + ***************************************** + * MME1_RTR (Prototype: MME_RTR) + ***************************************** + */ + +#define mmMME1_RTR_HBW_RD_RQ_E_ARB 0x40100 + +#define mmMME1_RTR_HBW_RD_RQ_W_ARB 0x40104 + +#define mmMME1_RTR_HBW_RD_RQ_N_ARB 0x40108 + +#define mmMME1_RTR_HBW_RD_RQ_S_ARB 0x4010C + +#define mmMME1_RTR_HBW_RD_RQ_L_ARB 0x40110 + +#define mmMME1_RTR_HBW_E_ARB_MAX 0x40120 + +#define mmMME1_RTR_HBW_W_ARB_MAX 0x40124 + +#define mmMME1_RTR_HBW_N_ARB_MAX 0x40128 + +#define mmMME1_RTR_HBW_S_ARB_MAX 0x4012C + +#define mmMME1_RTR_HBW_L_ARB_MAX 0x40130 + +#define mmMME1_RTR_HBW_RD_RS_MAX_CREDIT 0x40140 + +#define mmMME1_RTR_HBW_WR_RQ_MAX_CREDIT 0x40144 + +#define mmMME1_RTR_HBW_RD_RQ_MAX_CREDIT 0x40148 + +#define mmMME1_RTR_HBW_RD_RS_E_ARB 0x40150 + +#define mmMME1_RTR_HBW_RD_RS_W_ARB 0x40154 + +#define mmMME1_RTR_HBW_RD_RS_N_ARB 0x40158 + +#define mmMME1_RTR_HBW_RD_RS_S_ARB 0x4015C + +#define mmMME1_RTR_HBW_RD_RS_L_ARB 0x40160 + +#define mmMME1_RTR_HBW_WR_RQ_E_ARB 0x40170 + +#define mmMME1_RTR_HBW_WR_RQ_W_ARB 0x40174 + +#define mmMME1_RTR_HBW_WR_RQ_N_ARB 0x40178 + +#define mmMME1_RTR_HBW_WR_RQ_S_ARB 0x4017C + +#define mmMME1_RTR_HBW_WR_RQ_L_ARB 0x40180 + +#define mmMME1_RTR_HBW_WR_RS_E_ARB 0x40190 + +#define mmMME1_RTR_HBW_WR_RS_W_ARB 0x40194 + +#define mmMME1_RTR_HBW_WR_RS_N_ARB 0x40198 + +#define mmMME1_RTR_HBW_WR_RS_S_ARB 0x4019C + +#define mmMME1_RTR_HBW_WR_RS_L_ARB 0x401A0 + +#define mmMME1_RTR_LBW_RD_RQ_E_ARB 0x40200 + +#define mmMME1_RTR_LBW_RD_RQ_W_ARB 0x40204 + +#define mmMME1_RTR_LBW_RD_RQ_N_ARB 0x40208 + +#define mmMME1_RTR_LBW_RD_RQ_S_ARB 0x4020C + +#define mmMME1_RTR_LBW_RD_RQ_L_ARB 0x40210 + +#define mmMME1_RTR_LBW_E_ARB_MAX 0x40220 + +#define mmMME1_RTR_LBW_W_ARB_MAX 0x40224 + +#define mmMME1_RTR_LBW_N_ARB_MAX 0x40228 + +#define mmMME1_RTR_LBW_S_ARB_MAX 0x4022C + +#define mmMME1_RTR_LBW_L_ARB_MAX 0x40230 + +#define mmMME1_RTR_LBW_SRAM_MAX_CREDIT 0x40240 + +#define mmMME1_RTR_LBW_RD_RS_E_ARB 0x40250 + +#define mmMME1_RTR_LBW_RD_RS_W_ARB 0x40254 + +#define mmMME1_RTR_LBW_RD_RS_N_ARB 0x40258 + +#define mmMME1_RTR_LBW_RD_RS_S_ARB 0x4025C + +#define mmMME1_RTR_LBW_RD_RS_L_ARB 0x40260 + +#define mmMME1_RTR_LBW_WR_RQ_E_ARB 0x40270 + +#define mmMME1_RTR_LBW_WR_RQ_W_ARB 0x40274 + +#define mmMME1_RTR_LBW_WR_RQ_N_ARB 0x40278 + +#define mmMME1_RTR_LBW_WR_RQ_S_ARB 0x4027C + +#define mmMME1_RTR_LBW_WR_RQ_L_ARB 0x40280 + +#define mmMME1_RTR_LBW_WR_RS_E_ARB 0x40290 + +#define mmMME1_RTR_LBW_WR_RS_W_ARB 0x40294 + +#define mmMME1_RTR_LBW_WR_RS_N_ARB 0x40298 + +#define mmMME1_RTR_LBW_WR_RS_S_ARB 0x4029C + +#define mmMME1_RTR_LBW_WR_RS_L_ARB 0x402A0 + +#define mmMME1_RTR_DBG_E_ARB 0x40300 + +#define mmMME1_RTR_DBG_W_ARB 0x40304 + +#define mmMME1_RTR_DBG_N_ARB 0x40308 + +#define mmMME1_RTR_DBG_S_ARB 0x4030C + +#define mmMME1_RTR_DBG_L_ARB 0x40310 + +#define mmMME1_RTR_DBG_E_ARB_MAX 0x40320 + +#define mmMME1_RTR_DBG_W_ARB_MAX 0x40324 + +#define mmMME1_RTR_DBG_N_ARB_MAX 0x40328 + +#define mmMME1_RTR_DBG_S_ARB_MAX 0x4032C + +#define mmMME1_RTR_DBG_L_ARB_MAX 0x40330 + +#define mmMME1_RTR_SPLIT_COEF_0 0x40400 + +#define mmMME1_RTR_SPLIT_COEF_1 0x40404 + +#define mmMME1_RTR_SPLIT_COEF_2 0x40408 + +#define mmMME1_RTR_SPLIT_COEF_3 0x4040C + +#define mmMME1_RTR_SPLIT_COEF_4 0x40410 + +#define mmMME1_RTR_SPLIT_COEF_5 0x40414 + +#define mmMME1_RTR_SPLIT_COEF_6 0x40418 + +#define mmMME1_RTR_SPLIT_COEF_7 0x4041C + +#define mmMME1_RTR_SPLIT_COEF_8 0x40420 + +#define mmMME1_RTR_SPLIT_COEF_9 0x40424 + +#define mmMME1_RTR_SPLIT_CFG 0x40440 + +#define mmMME1_RTR_SPLIT_RD_SAT 0x40444 + +#define mmMME1_RTR_SPLIT_RD_RST_TOKEN 0x40448 + +#define mmMME1_RTR_SPLIT_RD_TIMEOUT_0 0x4044C + +#define mmMME1_RTR_SPLIT_RD_TIMEOUT_1 0x40450 + +#define mmMME1_RTR_SPLIT_WR_SAT 0x40454 + +#define mmMME1_RTR_WPLIT_WR_TST_TOLEN 0x40458 + +#define mmMME1_RTR_SPLIT_WR_TIMEOUT_0 0x4045C + +#define mmMME1_RTR_SPLIT_WR_TIMEOUT_1 0x40460 + +#define mmMME1_RTR_HBW_RANGE_HIT 0x40470 + +#define mmMME1_RTR_HBW_RANGE_MASK_L_0 0x40480 + +#define mmMME1_RTR_HBW_RANGE_MASK_L_1 0x40484 + +#define mmMME1_RTR_HBW_RANGE_MASK_L_2 0x40488 + +#define mmMME1_RTR_HBW_RANGE_MASK_L_3 0x4048C + +#define mmMME1_RTR_HBW_RANGE_MASK_L_4 0x40490 + +#define mmMME1_RTR_HBW_RANGE_MASK_L_5 0x40494 + +#define mmMME1_RTR_HBW_RANGE_MASK_L_6 0x40498 + +#define mmMME1_RTR_HBW_RANGE_MASK_L_7 0x4049C + +#define mmMME1_RTR_HBW_RANGE_MASK_H_0 0x404A0 + +#define mmMME1_RTR_HBW_RANGE_MASK_H_1 0x404A4 + +#define mmMME1_RTR_HBW_RANGE_MASK_H_2 0x404A8 + +#define mmMME1_RTR_HBW_RANGE_MASK_H_3 0x404AC + +#define mmMME1_RTR_HBW_RANGE_MASK_H_4 0x404B0 + +#define mmMME1_RTR_HBW_RANGE_MASK_H_5 0x404B4 + +#define mmMME1_RTR_HBW_RANGE_MASK_H_6 0x404B8 + +#define mmMME1_RTR_HBW_RANGE_MASK_H_7 0x404BC + +#define mmMME1_RTR_HBW_RANGE_BASE_L_0 0x404C0 + +#define mmMME1_RTR_HBW_RANGE_BASE_L_1 0x404C4 + +#define mmMME1_RTR_HBW_RANGE_BASE_L_2 0x404C8 + +#define mmMME1_RTR_HBW_RANGE_BASE_L_3 0x404CC + +#define mmMME1_RTR_HBW_RANGE_BASE_L_4 0x404D0 + +#define mmMME1_RTR_HBW_RANGE_BASE_L_5 0x404D4 + +#define mmMME1_RTR_HBW_RANGE_BASE_L_6 0x404D8 + +#define mmMME1_RTR_HBW_RANGE_BASE_L_7 0x404DC + +#define mmMME1_RTR_HBW_RANGE_BASE_H_0 0x404E0 + +#define mmMME1_RTR_HBW_RANGE_BASE_H_1 0x404E4 + +#define mmMME1_RTR_HBW_RANGE_BASE_H_2 0x404E8 + +#define mmMME1_RTR_HBW_RANGE_BASE_H_3 0x404EC + +#define mmMME1_RTR_HBW_RANGE_BASE_H_4 0x404F0 + +#define mmMME1_RTR_HBW_RANGE_BASE_H_5 0x404F4 + +#define mmMME1_RTR_HBW_RANGE_BASE_H_6 0x404F8 + +#define mmMME1_RTR_HBW_RANGE_BASE_H_7 0x404FC + +#define mmMME1_RTR_LBW_RANGE_HIT 0x40500 + +#define mmMME1_RTR_LBW_RANGE_MASK_0 0x40510 + +#define mmMME1_RTR_LBW_RANGE_MASK_1 0x40514 + +#define mmMME1_RTR_LBW_RANGE_MASK_2 0x40518 + +#define mmMME1_RTR_LBW_RANGE_MASK_3 0x4051C + +#define mmMME1_RTR_LBW_RANGE_MASK_4 0x40520 + +#define mmMME1_RTR_LBW_RANGE_MASK_5 0x40524 + +#define mmMME1_RTR_LBW_RANGE_MASK_6 0x40528 + +#define mmMME1_RTR_LBW_RANGE_MASK_7 0x4052C + +#define mmMME1_RTR_LBW_RANGE_MASK_8 0x40530 + +#define mmMME1_RTR_LBW_RANGE_MASK_9 0x40534 + +#define mmMME1_RTR_LBW_RANGE_MASK_10 0x40538 + +#define mmMME1_RTR_LBW_RANGE_MASK_11 0x4053C + +#define mmMME1_RTR_LBW_RANGE_MASK_12 0x40540 + +#define mmMME1_RTR_LBW_RANGE_MASK_13 0x40544 + +#define mmMME1_RTR_LBW_RANGE_MASK_14 0x40548 + +#define mmMME1_RTR_LBW_RANGE_MASK_15 0x4054C + +#define mmMME1_RTR_LBW_RANGE_BASE_0 0x40550 + +#define mmMME1_RTR_LBW_RANGE_BASE_1 0x40554 + +#define mmMME1_RTR_LBW_RANGE_BASE_2 0x40558 + +#define mmMME1_RTR_LBW_RANGE_BASE_3 0x4055C + +#define mmMME1_RTR_LBW_RANGE_BASE_4 0x40560 + +#define mmMME1_RTR_LBW_RANGE_BASE_5 0x40564 + +#define mmMME1_RTR_LBW_RANGE_BASE_6 0x40568 + +#define mmMME1_RTR_LBW_RANGE_BASE_7 0x4056C + +#define mmMME1_RTR_LBW_RANGE_BASE_8 0x40570 + +#define mmMME1_RTR_LBW_RANGE_BASE_9 0x40574 + +#define mmMME1_RTR_LBW_RANGE_BASE_10 0x40578 + +#define mmMME1_RTR_LBW_RANGE_BASE_11 0x4057C + +#define mmMME1_RTR_LBW_RANGE_BASE_12 0x40580 + +#define mmMME1_RTR_LBW_RANGE_BASE_13 0x40584 + +#define mmMME1_RTR_LBW_RANGE_BASE_14 0x40588 + +#define mmMME1_RTR_LBW_RANGE_BASE_15 0x4058C + +#define mmMME1_RTR_RGLTR 0x40590 + +#define mmMME1_RTR_RGLTR_WR_RESULT 0x40594 + +#define mmMME1_RTR_RGLTR_RD_RESULT 0x40598 + +#define mmMME1_RTR_SCRAMB_EN 0x40600 + +#define mmMME1_RTR_NON_LIN_SCRAMB 0x40604 + +#endif /* ASIC_REG_MME1_RTR_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h new file mode 100644 index 000000000000..7a2b777bdc4f --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h @@ -0,0 +1,331 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MME2_RTR_REGS_H_ +#define ASIC_REG_MME2_RTR_REGS_H_ + +/* + ***************************************** + * MME2_RTR (Prototype: MME_RTR) + ***************************************** + */ + +#define mmMME2_RTR_HBW_RD_RQ_E_ARB 0x80100 + +#define mmMME2_RTR_HBW_RD_RQ_W_ARB 0x80104 + +#define mmMME2_RTR_HBW_RD_RQ_N_ARB 0x80108 + +#define mmMME2_RTR_HBW_RD_RQ_S_ARB 0x8010C + +#define mmMME2_RTR_HBW_RD_RQ_L_ARB 0x80110 + +#define mmMME2_RTR_HBW_E_ARB_MAX 0x80120 + +#define mmMME2_RTR_HBW_W_ARB_MAX 0x80124 + +#define mmMME2_RTR_HBW_N_ARB_MAX 0x80128 + +#define mmMME2_RTR_HBW_S_ARB_MAX 0x8012C + +#define mmMME2_RTR_HBW_L_ARB_MAX 0x80130 + +#define mmMME2_RTR_HBW_RD_RS_MAX_CREDIT 0x80140 + +#define mmMME2_RTR_HBW_WR_RQ_MAX_CREDIT 0x80144 + +#define mmMME2_RTR_HBW_RD_RQ_MAX_CREDIT 0x80148 + +#define mmMME2_RTR_HBW_RD_RS_E_ARB 0x80150 + +#define mmMME2_RTR_HBW_RD_RS_W_ARB 0x80154 + +#define mmMME2_RTR_HBW_RD_RS_N_ARB 0x80158 + +#define mmMME2_RTR_HBW_RD_RS_S_ARB 0x8015C + +#define mmMME2_RTR_HBW_RD_RS_L_ARB 0x80160 + +#define mmMME2_RTR_HBW_WR_RQ_E_ARB 0x80170 + +#define mmMME2_RTR_HBW_WR_RQ_W_ARB 0x80174 + +#define mmMME2_RTR_HBW_WR_RQ_N_ARB 0x80178 + +#define mmMME2_RTR_HBW_WR_RQ_S_ARB 0x8017C + +#define mmMME2_RTR_HBW_WR_RQ_L_ARB 0x80180 + +#define mmMME2_RTR_HBW_WR_RS_E_ARB 0x80190 + +#define mmMME2_RTR_HBW_WR_RS_W_ARB 0x80194 + +#define mmMME2_RTR_HBW_WR_RS_N_ARB 0x80198 + +#define mmMME2_RTR_HBW_WR_RS_S_ARB 0x8019C + +#define mmMME2_RTR_HBW_WR_RS_L_ARB 0x801A0 + +#define mmMME2_RTR_LBW_RD_RQ_E_ARB 0x80200 + +#define mmMME2_RTR_LBW_RD_RQ_W_ARB 0x80204 + +#define mmMME2_RTR_LBW_RD_RQ_N_ARB 0x80208 + +#define mmMME2_RTR_LBW_RD_RQ_S_ARB 0x8020C + +#define mmMME2_RTR_LBW_RD_RQ_L_ARB 0x80210 + +#define mmMME2_RTR_LBW_E_ARB_MAX 0x80220 + +#define mmMME2_RTR_LBW_W_ARB_MAX 0x80224 + +#define mmMME2_RTR_LBW_N_ARB_MAX 0x80228 + +#define mmMME2_RTR_LBW_S_ARB_MAX 0x8022C + +#define mmMME2_RTR_LBW_L_ARB_MAX 0x80230 + +#define mmMME2_RTR_LBW_SRAM_MAX_CREDIT 0x80240 + +#define mmMME2_RTR_LBW_RD_RS_E_ARB 0x80250 + +#define mmMME2_RTR_LBW_RD_RS_W_ARB 0x80254 + +#define mmMME2_RTR_LBW_RD_RS_N_ARB 0x80258 + +#define mmMME2_RTR_LBW_RD_RS_S_ARB 0x8025C + +#define mmMME2_RTR_LBW_RD_RS_L_ARB 0x80260 + +#define mmMME2_RTR_LBW_WR_RQ_E_ARB 0x80270 + +#define mmMME2_RTR_LBW_WR_RQ_W_ARB 0x80274 + +#define mmMME2_RTR_LBW_WR_RQ_N_ARB 0x80278 + +#define mmMME2_RTR_LBW_WR_RQ_S_ARB 0x8027C + +#define mmMME2_RTR_LBW_WR_RQ_L_ARB 0x80280 + +#define mmMME2_RTR_LBW_WR_RS_E_ARB 0x80290 + +#define mmMME2_RTR_LBW_WR_RS_W_ARB 0x80294 + +#define mmMME2_RTR_LBW_WR_RS_N_ARB 0x80298 + +#define mmMME2_RTR_LBW_WR_RS_S_ARB 0x8029C + +#define mmMME2_RTR_LBW_WR_RS_L_ARB 0x802A0 + +#define mmMME2_RTR_DBG_E_ARB 0x80300 + +#define mmMME2_RTR_DBG_W_ARB 0x80304 + +#define mmMME2_RTR_DBG_N_ARB 0x80308 + +#define mmMME2_RTR_DBG_S_ARB 0x8030C + +#define mmMME2_RTR_DBG_L_ARB 0x80310 + +#define mmMME2_RTR_DBG_E_ARB_MAX 0x80320 + +#define mmMME2_RTR_DBG_W_ARB_MAX 0x80324 + +#define mmMME2_RTR_DBG_N_ARB_MAX 0x80328 + +#define mmMME2_RTR_DBG_S_ARB_MAX 0x8032C + +#define mmMME2_RTR_DBG_L_ARB_MAX 0x80330 + +#define mmMME2_RTR_SPLIT_COEF_0 0x80400 + +#define mmMME2_RTR_SPLIT_COEF_1 0x80404 + +#define mmMME2_RTR_SPLIT_COEF_2 0x80408 + +#define mmMME2_RTR_SPLIT_COEF_3 0x8040C + +#define mmMME2_RTR_SPLIT_COEF_4 0x80410 + +#define mmMME2_RTR_SPLIT_COEF_5 0x80414 + +#define mmMME2_RTR_SPLIT_COEF_6 0x80418 + +#define mmMME2_RTR_SPLIT_COEF_7 0x8041C + +#define mmMME2_RTR_SPLIT_COEF_8 0x80420 + +#define mmMME2_RTR_SPLIT_COEF_9 0x80424 + +#define mmMME2_RTR_SPLIT_CFG 0x80440 + +#define mmMME2_RTR_SPLIT_RD_SAT 0x80444 + +#define mmMME2_RTR_SPLIT_RD_RST_TOKEN 0x80448 + +#define mmMME2_RTR_SPLIT_RD_TIMEOUT_0 0x8044C + +#define mmMME2_RTR_SPLIT_RD_TIMEOUT_1 0x80450 + +#define mmMME2_RTR_SPLIT_WR_SAT 0x80454 + +#define mmMME2_RTR_WPLIT_WR_TST_TOLEN 0x80458 + +#define mmMME2_RTR_SPLIT_WR_TIMEOUT_0 0x8045C + +#define mmMME2_RTR_SPLIT_WR_TIMEOUT_1 0x80460 + +#define mmMME2_RTR_HBW_RANGE_HIT 0x80470 + +#define mmMME2_RTR_HBW_RANGE_MASK_L_0 0x80480 + +#define mmMME2_RTR_HBW_RANGE_MASK_L_1 0x80484 + +#define mmMME2_RTR_HBW_RANGE_MASK_L_2 0x80488 + +#define mmMME2_RTR_HBW_RANGE_MASK_L_3 0x8048C + +#define mmMME2_RTR_HBW_RANGE_MASK_L_4 0x80490 + +#define mmMME2_RTR_HBW_RANGE_MASK_L_5 0x80494 + +#define mmMME2_RTR_HBW_RANGE_MASK_L_6 0x80498 + +#define mmMME2_RTR_HBW_RANGE_MASK_L_7 0x8049C + +#define mmMME2_RTR_HBW_RANGE_MASK_H_0 0x804A0 + +#define mmMME2_RTR_HBW_RANGE_MASK_H_1 0x804A4 + +#define mmMME2_RTR_HBW_RANGE_MASK_H_2 0x804A8 + +#define mmMME2_RTR_HBW_RANGE_MASK_H_3 0x804AC + +#define mmMME2_RTR_HBW_RANGE_MASK_H_4 0x804B0 + +#define mmMME2_RTR_HBW_RANGE_MASK_H_5 0x804B4 + +#define mmMME2_RTR_HBW_RANGE_MASK_H_6 0x804B8 + +#define mmMME2_RTR_HBW_RANGE_MASK_H_7 0x804BC + +#define mmMME2_RTR_HBW_RANGE_BASE_L_0 0x804C0 + +#define mmMME2_RTR_HBW_RANGE_BASE_L_1 0x804C4 + +#define mmMME2_RTR_HBW_RANGE_BASE_L_2 0x804C8 + +#define mmMME2_RTR_HBW_RANGE_BASE_L_3 0x804CC + +#define mmMME2_RTR_HBW_RANGE_BASE_L_4 0x804D0 + +#define mmMME2_RTR_HBW_RANGE_BASE_L_5 0x804D4 + +#define mmMME2_RTR_HBW_RANGE_BASE_L_6 0x804D8 + +#define mmMME2_RTR_HBW_RANGE_BASE_L_7 0x804DC + +#define mmMME2_RTR_HBW_RANGE_BASE_H_0 0x804E0 + +#define mmMME2_RTR_HBW_RANGE_BASE_H_1 0x804E4 + +#define mmMME2_RTR_HBW_RANGE_BASE_H_2 0x804E8 + +#define mmMME2_RTR_HBW_RANGE_BASE_H_3 0x804EC + +#define mmMME2_RTR_HBW_RANGE_BASE_H_4 0x804F0 + +#define mmMME2_RTR_HBW_RANGE_BASE_H_5 0x804F4 + +#define mmMME2_RTR_HBW_RANGE_BASE_H_6 0x804F8 + +#define mmMME2_RTR_HBW_RANGE_BASE_H_7 0x804FC + +#define mmMME2_RTR_LBW_RANGE_HIT 0x80500 + +#define mmMME2_RTR_LBW_RANGE_MASK_0 0x80510 + +#define mmMME2_RTR_LBW_RANGE_MASK_1 0x80514 + +#define mmMME2_RTR_LBW_RANGE_MASK_2 0x80518 + +#define mmMME2_RTR_LBW_RANGE_MASK_3 0x8051C + +#define mmMME2_RTR_LBW_RANGE_MASK_4 0x80520 + +#define mmMME2_RTR_LBW_RANGE_MASK_5 0x80524 + +#define mmMME2_RTR_LBW_RANGE_MASK_6 0x80528 + +#define mmMME2_RTR_LBW_RANGE_MASK_7 0x8052C + +#define mmMME2_RTR_LBW_RANGE_MASK_8 0x80530 + +#define mmMME2_RTR_LBW_RANGE_MASK_9 0x80534 + +#define mmMME2_RTR_LBW_RANGE_MASK_10 0x80538 + +#define mmMME2_RTR_LBW_RANGE_MASK_11 0x8053C + +#define mmMME2_RTR_LBW_RANGE_MASK_12 0x80540 + +#define mmMME2_RTR_LBW_RANGE_MASK_13 0x80544 + +#define mmMME2_RTR_LBW_RANGE_MASK_14 0x80548 + +#define mmMME2_RTR_LBW_RANGE_MASK_15 0x8054C + +#define mmMME2_RTR_LBW_RANGE_BASE_0 0x80550 + +#define mmMME2_RTR_LBW_RANGE_BASE_1 0x80554 + +#define mmMME2_RTR_LBW_RANGE_BASE_2 0x80558 + +#define mmMME2_RTR_LBW_RANGE_BASE_3 0x8055C + +#define mmMME2_RTR_LBW_RANGE_BASE_4 0x80560 + +#define mmMME2_RTR_LBW_RANGE_BASE_5 0x80564 + +#define mmMME2_RTR_LBW_RANGE_BASE_6 0x80568 + +#define mmMME2_RTR_LBW_RANGE_BASE_7 0x8056C + +#define mmMME2_RTR_LBW_RANGE_BASE_8 0x80570 + +#define mmMME2_RTR_LBW_RANGE_BASE_9 0x80574 + +#define mmMME2_RTR_LBW_RANGE_BASE_10 0x80578 + +#define mmMME2_RTR_LBW_RANGE_BASE_11 0x8057C + +#define mmMME2_RTR_LBW_RANGE_BASE_12 0x80580 + +#define mmMME2_RTR_LBW_RANGE_BASE_13 0x80584 + +#define mmMME2_RTR_LBW_RANGE_BASE_14 0x80588 + +#define mmMME2_RTR_LBW_RANGE_BASE_15 0x8058C + +#define mmMME2_RTR_RGLTR 0x80590 + +#define mmMME2_RTR_RGLTR_WR_RESULT 0x80594 + +#define mmMME2_RTR_RGLTR_RD_RESULT 0x80598 + +#define mmMME2_RTR_SCRAMB_EN 0x80600 + +#define mmMME2_RTR_NON_LIN_SCRAMB 0x80604 + +#endif /* ASIC_REG_MME2_RTR_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h new file mode 100644 index 000000000000..b78f8bc387fc --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h @@ -0,0 +1,331 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MME3_RTR_REGS_H_ +#define ASIC_REG_MME3_RTR_REGS_H_ + +/* + ***************************************** + * MME3_RTR (Prototype: MME_RTR) + ***************************************** + */ + +#define mmMME3_RTR_HBW_RD_RQ_E_ARB 0xC0100 + +#define mmMME3_RTR_HBW_RD_RQ_W_ARB 0xC0104 + +#define mmMME3_RTR_HBW_RD_RQ_N_ARB 0xC0108 + +#define mmMME3_RTR_HBW_RD_RQ_S_ARB 0xC010C + +#define mmMME3_RTR_HBW_RD_RQ_L_ARB 0xC0110 + +#define mmMME3_RTR_HBW_E_ARB_MAX 0xC0120 + +#define mmMME3_RTR_HBW_W_ARB_MAX 0xC0124 + +#define mmMME3_RTR_HBW_N_ARB_MAX 0xC0128 + +#define mmMME3_RTR_HBW_S_ARB_MAX 0xC012C + +#define mmMME3_RTR_HBW_L_ARB_MAX 0xC0130 + +#define mmMME3_RTR_HBW_RD_RS_MAX_CREDIT 0xC0140 + +#define mmMME3_RTR_HBW_WR_RQ_MAX_CREDIT 0xC0144 + +#define mmMME3_RTR_HBW_RD_RQ_MAX_CREDIT 0xC0148 + +#define mmMME3_RTR_HBW_RD_RS_E_ARB 0xC0150 + +#define mmMME3_RTR_HBW_RD_RS_W_ARB 0xC0154 + +#define mmMME3_RTR_HBW_RD_RS_N_ARB 0xC0158 + +#define mmMME3_RTR_HBW_RD_RS_S_ARB 0xC015C + +#define mmMME3_RTR_HBW_RD_RS_L_ARB 0xC0160 + +#define mmMME3_RTR_HBW_WR_RQ_E_ARB 0xC0170 + +#define mmMME3_RTR_HBW_WR_RQ_W_ARB 0xC0174 + +#define mmMME3_RTR_HBW_WR_RQ_N_ARB 0xC0178 + +#define mmMME3_RTR_HBW_WR_RQ_S_ARB 0xC017C + +#define mmMME3_RTR_HBW_WR_RQ_L_ARB 0xC0180 + +#define mmMME3_RTR_HBW_WR_RS_E_ARB 0xC0190 + +#define mmMME3_RTR_HBW_WR_RS_W_ARB 0xC0194 + +#define mmMME3_RTR_HBW_WR_RS_N_ARB 0xC0198 + +#define mmMME3_RTR_HBW_WR_RS_S_ARB 0xC019C + +#define mmMME3_RTR_HBW_WR_RS_L_ARB 0xC01A0 + +#define mmMME3_RTR_LBW_RD_RQ_E_ARB 0xC0200 + +#define mmMME3_RTR_LBW_RD_RQ_W_ARB 0xC0204 + +#define mmMME3_RTR_LBW_RD_RQ_N_ARB 0xC0208 + +#define mmMME3_RTR_LBW_RD_RQ_S_ARB 0xC020C + +#define mmMME3_RTR_LBW_RD_RQ_L_ARB 0xC0210 + +#define mmMME3_RTR_LBW_E_ARB_MAX 0xC0220 + +#define mmMME3_RTR_LBW_W_ARB_MAX 0xC0224 + +#define mmMME3_RTR_LBW_N_ARB_MAX 0xC0228 + +#define mmMME3_RTR_LBW_S_ARB_MAX 0xC022C + +#define mmMME3_RTR_LBW_L_ARB_MAX 0xC0230 + +#define mmMME3_RTR_LBW_SRAM_MAX_CREDIT 0xC0240 + +#define mmMME3_RTR_LBW_RD_RS_E_ARB 0xC0250 + +#define mmMME3_RTR_LBW_RD_RS_W_ARB 0xC0254 + +#define mmMME3_RTR_LBW_RD_RS_N_ARB 0xC0258 + +#define mmMME3_RTR_LBW_RD_RS_S_ARB 0xC025C + +#define mmMME3_RTR_LBW_RD_RS_L_ARB 0xC0260 + +#define mmMME3_RTR_LBW_WR_RQ_E_ARB 0xC0270 + +#define mmMME3_RTR_LBW_WR_RQ_W_ARB 0xC0274 + +#define mmMME3_RTR_LBW_WR_RQ_N_ARB 0xC0278 + +#define mmMME3_RTR_LBW_WR_RQ_S_ARB 0xC027C + +#define mmMME3_RTR_LBW_WR_RQ_L_ARB 0xC0280 + +#define mmMME3_RTR_LBW_WR_RS_E_ARB 0xC0290 + +#define mmMME3_RTR_LBW_WR_RS_W_ARB 0xC0294 + +#define mmMME3_RTR_LBW_WR_RS_N_ARB 0xC0298 + +#define mmMME3_RTR_LBW_WR_RS_S_ARB 0xC029C + +#define mmMME3_RTR_LBW_WR_RS_L_ARB 0xC02A0 + +#define mmMME3_RTR_DBG_E_ARB 0xC0300 + +#define mmMME3_RTR_DBG_W_ARB 0xC0304 + +#define mmMME3_RTR_DBG_N_ARB 0xC0308 + +#define mmMME3_RTR_DBG_S_ARB 0xC030C + +#define mmMME3_RTR_DBG_L_ARB 0xC0310 + +#define mmMME3_RTR_DBG_E_ARB_MAX 0xC0320 + +#define mmMME3_RTR_DBG_W_ARB_MAX 0xC0324 + +#define mmMME3_RTR_DBG_N_ARB_MAX 0xC0328 + +#define mmMME3_RTR_DBG_S_ARB_MAX 0xC032C + +#define mmMME3_RTR_DBG_L_ARB_MAX 0xC0330 + +#define mmMME3_RTR_SPLIT_COEF_0 0xC0400 + +#define mmMME3_RTR_SPLIT_COEF_1 0xC0404 + +#define mmMME3_RTR_SPLIT_COEF_2 0xC0408 + +#define mmMME3_RTR_SPLIT_COEF_3 0xC040C + +#define mmMME3_RTR_SPLIT_COEF_4 0xC0410 + +#define mmMME3_RTR_SPLIT_COEF_5 0xC0414 + +#define mmMME3_RTR_SPLIT_COEF_6 0xC0418 + +#define mmMME3_RTR_SPLIT_COEF_7 0xC041C + +#define mmMME3_RTR_SPLIT_COEF_8 0xC0420 + +#define mmMME3_RTR_SPLIT_COEF_9 0xC0424 + +#define mmMME3_RTR_SPLIT_CFG 0xC0440 + +#define mmMME3_RTR_SPLIT_RD_SAT 0xC0444 + +#define mmMME3_RTR_SPLIT_RD_RST_TOKEN 0xC0448 + +#define mmMME3_RTR_SPLIT_RD_TIMEOUT_0 0xC044C + +#define mmMME3_RTR_SPLIT_RD_TIMEOUT_1 0xC0450 + +#define mmMME3_RTR_SPLIT_WR_SAT 0xC0454 + +#define mmMME3_RTR_WPLIT_WR_TST_TOLEN 0xC0458 + +#define mmMME3_RTR_SPLIT_WR_TIMEOUT_0 0xC045C + +#define mmMME3_RTR_SPLIT_WR_TIMEOUT_1 0xC0460 + +#define mmMME3_RTR_HBW_RANGE_HIT 0xC0470 + +#define mmMME3_RTR_HBW_RANGE_MASK_L_0 0xC0480 + +#define mmMME3_RTR_HBW_RANGE_MASK_L_1 0xC0484 + +#define mmMME3_RTR_HBW_RANGE_MASK_L_2 0xC0488 + +#define mmMME3_RTR_HBW_RANGE_MASK_L_3 0xC048C + +#define mmMME3_RTR_HBW_RANGE_MASK_L_4 0xC0490 + +#define mmMME3_RTR_HBW_RANGE_MASK_L_5 0xC0494 + +#define mmMME3_RTR_HBW_RANGE_MASK_L_6 0xC0498 + +#define mmMME3_RTR_HBW_RANGE_MASK_L_7 0xC049C + +#define mmMME3_RTR_HBW_RANGE_MASK_H_0 0xC04A0 + +#define mmMME3_RTR_HBW_RANGE_MASK_H_1 0xC04A4 + +#define mmMME3_RTR_HBW_RANGE_MASK_H_2 0xC04A8 + +#define mmMME3_RTR_HBW_RANGE_MASK_H_3 0xC04AC + +#define mmMME3_RTR_HBW_RANGE_MASK_H_4 0xC04B0 + +#define mmMME3_RTR_HBW_RANGE_MASK_H_5 0xC04B4 + +#define mmMME3_RTR_HBW_RANGE_MASK_H_6 0xC04B8 + +#define mmMME3_RTR_HBW_RANGE_MASK_H_7 0xC04BC + +#define mmMME3_RTR_HBW_RANGE_BASE_L_0 0xC04C0 + +#define mmMME3_RTR_HBW_RANGE_BASE_L_1 0xC04C4 + +#define mmMME3_RTR_HBW_RANGE_BASE_L_2 0xC04C8 + +#define mmMME3_RTR_HBW_RANGE_BASE_L_3 0xC04CC + +#define mmMME3_RTR_HBW_RANGE_BASE_L_4 0xC04D0 + +#define mmMME3_RTR_HBW_RANGE_BASE_L_5 0xC04D4 + +#define mmMME3_RTR_HBW_RANGE_BASE_L_6 0xC04D8 + +#define mmMME3_RTR_HBW_RANGE_BASE_L_7 0xC04DC + +#define mmMME3_RTR_HBW_RANGE_BASE_H_0 0xC04E0 + +#define mmMME3_RTR_HBW_RANGE_BASE_H_1 0xC04E4 + +#define mmMME3_RTR_HBW_RANGE_BASE_H_2 0xC04E8 + +#define mmMME3_RTR_HBW_RANGE_BASE_H_3 0xC04EC + +#define mmMME3_RTR_HBW_RANGE_BASE_H_4 0xC04F0 + +#define mmMME3_RTR_HBW_RANGE_BASE_H_5 0xC04F4 + +#define mmMME3_RTR_HBW_RANGE_BASE_H_6 0xC04F8 + +#define mmMME3_RTR_HBW_RANGE_BASE_H_7 0xC04FC + +#define mmMME3_RTR_LBW_RANGE_HIT 0xC0500 + +#define mmMME3_RTR_LBW_RANGE_MASK_0 0xC0510 + +#define mmMME3_RTR_LBW_RANGE_MASK_1 0xC0514 + +#define mmMME3_RTR_LBW_RANGE_MASK_2 0xC0518 + +#define mmMME3_RTR_LBW_RANGE_MASK_3 0xC051C + +#define mmMME3_RTR_LBW_RANGE_MASK_4 0xC0520 + +#define mmMME3_RTR_LBW_RANGE_MASK_5 0xC0524 + +#define mmMME3_RTR_LBW_RANGE_MASK_6 0xC0528 + +#define mmMME3_RTR_LBW_RANGE_MASK_7 0xC052C + +#define mmMME3_RTR_LBW_RANGE_MASK_8 0xC0530 + +#define mmMME3_RTR_LBW_RANGE_MASK_9 0xC0534 + +#define mmMME3_RTR_LBW_RANGE_MASK_10 0xC0538 + +#define mmMME3_RTR_LBW_RANGE_MASK_11 0xC053C + +#define mmMME3_RTR_LBW_RANGE_MASK_12 0xC0540 + +#define mmMME3_RTR_LBW_RANGE_MASK_13 0xC0544 + +#define mmMME3_RTR_LBW_RANGE_MASK_14 0xC0548 + +#define mmMME3_RTR_LBW_RANGE_MASK_15 0xC054C + +#define mmMME3_RTR_LBW_RANGE_BASE_0 0xC0550 + +#define mmMME3_RTR_LBW_RANGE_BASE_1 0xC0554 + +#define mmMME3_RTR_LBW_RANGE_BASE_2 0xC0558 + +#define mmMME3_RTR_LBW_RANGE_BASE_3 0xC055C + +#define mmMME3_RTR_LBW_RANGE_BASE_4 0xC0560 + +#define mmMME3_RTR_LBW_RANGE_BASE_5 0xC0564 + +#define mmMME3_RTR_LBW_RANGE_BASE_6 0xC0568 + +#define mmMME3_RTR_LBW_RANGE_BASE_7 0xC056C + +#define mmMME3_RTR_LBW_RANGE_BASE_8 0xC0570 + +#define mmMME3_RTR_LBW_RANGE_BASE_9 0xC0574 + +#define mmMME3_RTR_LBW_RANGE_BASE_10 0xC0578 + +#define mmMME3_RTR_LBW_RANGE_BASE_11 0xC057C + +#define mmMME3_RTR_LBW_RANGE_BASE_12 0xC0580 + +#define mmMME3_RTR_LBW_RANGE_BASE_13 0xC0584 + +#define mmMME3_RTR_LBW_RANGE_BASE_14 0xC0588 + +#define mmMME3_RTR_LBW_RANGE_BASE_15 0xC058C + +#define mmMME3_RTR_RGLTR 0xC0590 + +#define mmMME3_RTR_RGLTR_WR_RESULT 0xC0594 + +#define mmMME3_RTR_RGLTR_RD_RESULT 0xC0598 + +#define mmMME3_RTR_SCRAMB_EN 0xC0600 + +#define mmMME3_RTR_NON_LIN_SCRAMB 0xC0604 + +#endif /* ASIC_REG_MME3_RTR_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h new file mode 100644 index 000000000000..d9a4a02cefa3 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h @@ -0,0 +1,331 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MME4_RTR_REGS_H_ +#define ASIC_REG_MME4_RTR_REGS_H_ + +/* + ***************************************** + * MME4_RTR (Prototype: MME_RTR) + ***************************************** + */ + +#define mmMME4_RTR_HBW_RD_RQ_E_ARB 0x100100 + +#define mmMME4_RTR_HBW_RD_RQ_W_ARB 0x100104 + +#define mmMME4_RTR_HBW_RD_RQ_N_ARB 0x100108 + +#define mmMME4_RTR_HBW_RD_RQ_S_ARB 0x10010C + +#define mmMME4_RTR_HBW_RD_RQ_L_ARB 0x100110 + +#define mmMME4_RTR_HBW_E_ARB_MAX 0x100120 + +#define mmMME4_RTR_HBW_W_ARB_MAX 0x100124 + +#define mmMME4_RTR_HBW_N_ARB_MAX 0x100128 + +#define mmMME4_RTR_HBW_S_ARB_MAX 0x10012C + +#define mmMME4_RTR_HBW_L_ARB_MAX 0x100130 + +#define mmMME4_RTR_HBW_RD_RS_MAX_CREDIT 0x100140 + +#define mmMME4_RTR_HBW_WR_RQ_MAX_CREDIT 0x100144 + +#define mmMME4_RTR_HBW_RD_RQ_MAX_CREDIT 0x100148 + +#define mmMME4_RTR_HBW_RD_RS_E_ARB 0x100150 + +#define mmMME4_RTR_HBW_RD_RS_W_ARB 0x100154 + +#define mmMME4_RTR_HBW_RD_RS_N_ARB 0x100158 + +#define mmMME4_RTR_HBW_RD_RS_S_ARB 0x10015C + +#define mmMME4_RTR_HBW_RD_RS_L_ARB 0x100160 + +#define mmMME4_RTR_HBW_WR_RQ_E_ARB 0x100170 + +#define mmMME4_RTR_HBW_WR_RQ_W_ARB 0x100174 + +#define mmMME4_RTR_HBW_WR_RQ_N_ARB 0x100178 + +#define mmMME4_RTR_HBW_WR_RQ_S_ARB 0x10017C + +#define mmMME4_RTR_HBW_WR_RQ_L_ARB 0x100180 + +#define mmMME4_RTR_HBW_WR_RS_E_ARB 0x100190 + +#define mmMME4_RTR_HBW_WR_RS_W_ARB 0x100194 + +#define mmMME4_RTR_HBW_WR_RS_N_ARB 0x100198 + +#define mmMME4_RTR_HBW_WR_RS_S_ARB 0x10019C + +#define mmMME4_RTR_HBW_WR_RS_L_ARB 0x1001A0 + +#define mmMME4_RTR_LBW_RD_RQ_E_ARB 0x100200 + +#define mmMME4_RTR_LBW_RD_RQ_W_ARB 0x100204 + +#define mmMME4_RTR_LBW_RD_RQ_N_ARB 0x100208 + +#define mmMME4_RTR_LBW_RD_RQ_S_ARB 0x10020C + +#define mmMME4_RTR_LBW_RD_RQ_L_ARB 0x100210 + +#define mmMME4_RTR_LBW_E_ARB_MAX 0x100220 + +#define mmMME4_RTR_LBW_W_ARB_MAX 0x100224 + +#define mmMME4_RTR_LBW_N_ARB_MAX 0x100228 + +#define mmMME4_RTR_LBW_S_ARB_MAX 0x10022C + +#define mmMME4_RTR_LBW_L_ARB_MAX 0x100230 + +#define mmMME4_RTR_LBW_SRAM_MAX_CREDIT 0x100240 + +#define mmMME4_RTR_LBW_RD_RS_E_ARB 0x100250 + +#define mmMME4_RTR_LBW_RD_RS_W_ARB 0x100254 + +#define mmMME4_RTR_LBW_RD_RS_N_ARB 0x100258 + +#define mmMME4_RTR_LBW_RD_RS_S_ARB 0x10025C + +#define mmMME4_RTR_LBW_RD_RS_L_ARB 0x100260 + +#define mmMME4_RTR_LBW_WR_RQ_E_ARB 0x100270 + +#define mmMME4_RTR_LBW_WR_RQ_W_ARB 0x100274 + +#define mmMME4_RTR_LBW_WR_RQ_N_ARB 0x100278 + +#define mmMME4_RTR_LBW_WR_RQ_S_ARB 0x10027C + +#define mmMME4_RTR_LBW_WR_RQ_L_ARB 0x100280 + +#define mmMME4_RTR_LBW_WR_RS_E_ARB 0x100290 + +#define mmMME4_RTR_LBW_WR_RS_W_ARB 0x100294 + +#define mmMME4_RTR_LBW_WR_RS_N_ARB 0x100298 + +#define mmMME4_RTR_LBW_WR_RS_S_ARB 0x10029C + +#define mmMME4_RTR_LBW_WR_RS_L_ARB 0x1002A0 + +#define mmMME4_RTR_DBG_E_ARB 0x100300 + +#define mmMME4_RTR_DBG_W_ARB 0x100304 + +#define mmMME4_RTR_DBG_N_ARB 0x100308 + +#define mmMME4_RTR_DBG_S_ARB 0x10030C + +#define mmMME4_RTR_DBG_L_ARB 0x100310 + +#define mmMME4_RTR_DBG_E_ARB_MAX 0x100320 + +#define mmMME4_RTR_DBG_W_ARB_MAX 0x100324 + +#define mmMME4_RTR_DBG_N_ARB_MAX 0x100328 + +#define mmMME4_RTR_DBG_S_ARB_MAX 0x10032C + +#define mmMME4_RTR_DBG_L_ARB_MAX 0x100330 + +#define mmMME4_RTR_SPLIT_COEF_0 0x100400 + +#define mmMME4_RTR_SPLIT_COEF_1 0x100404 + +#define mmMME4_RTR_SPLIT_COEF_2 0x100408 + +#define mmMME4_RTR_SPLIT_COEF_3 0x10040C + +#define mmMME4_RTR_SPLIT_COEF_4 0x100410 + +#define mmMME4_RTR_SPLIT_COEF_5 0x100414 + +#define mmMME4_RTR_SPLIT_COEF_6 0x100418 + +#define mmMME4_RTR_SPLIT_COEF_7 0x10041C + +#define mmMME4_RTR_SPLIT_COEF_8 0x100420 + +#define mmMME4_RTR_SPLIT_COEF_9 0x100424 + +#define mmMME4_RTR_SPLIT_CFG 0x100440 + +#define mmMME4_RTR_SPLIT_RD_SAT 0x100444 + +#define mmMME4_RTR_SPLIT_RD_RST_TOKEN 0x100448 + +#define mmMME4_RTR_SPLIT_RD_TIMEOUT_0 0x10044C + +#define mmMME4_RTR_SPLIT_RD_TIMEOUT_1 0x100450 + +#define mmMME4_RTR_SPLIT_WR_SAT 0x100454 + +#define mmMME4_RTR_WPLIT_WR_TST_TOLEN 0x100458 + +#define mmMME4_RTR_SPLIT_WR_TIMEOUT_0 0x10045C + +#define mmMME4_RTR_SPLIT_WR_TIMEOUT_1 0x100460 + +#define mmMME4_RTR_HBW_RANGE_HIT 0x100470 + +#define mmMME4_RTR_HBW_RANGE_MASK_L_0 0x100480 + +#define mmMME4_RTR_HBW_RANGE_MASK_L_1 0x100484 + +#define mmMME4_RTR_HBW_RANGE_MASK_L_2 0x100488 + +#define mmMME4_RTR_HBW_RANGE_MASK_L_3 0x10048C + +#define mmMME4_RTR_HBW_RANGE_MASK_L_4 0x100490 + +#define mmMME4_RTR_HBW_RANGE_MASK_L_5 0x100494 + +#define mmMME4_RTR_HBW_RANGE_MASK_L_6 0x100498 + +#define mmMME4_RTR_HBW_RANGE_MASK_L_7 0x10049C + +#define mmMME4_RTR_HBW_RANGE_MASK_H_0 0x1004A0 + +#define mmMME4_RTR_HBW_RANGE_MASK_H_1 0x1004A4 + +#define mmMME4_RTR_HBW_RANGE_MASK_H_2 0x1004A8 + +#define mmMME4_RTR_HBW_RANGE_MASK_H_3 0x1004AC + +#define mmMME4_RTR_HBW_RANGE_MASK_H_4 0x1004B0 + +#define mmMME4_RTR_HBW_RANGE_MASK_H_5 0x1004B4 + +#define mmMME4_RTR_HBW_RANGE_MASK_H_6 0x1004B8 + +#define mmMME4_RTR_HBW_RANGE_MASK_H_7 0x1004BC + +#define mmMME4_RTR_HBW_RANGE_BASE_L_0 0x1004C0 + +#define mmMME4_RTR_HBW_RANGE_BASE_L_1 0x1004C4 + +#define mmMME4_RTR_HBW_RANGE_BASE_L_2 0x1004C8 + +#define mmMME4_RTR_HBW_RANGE_BASE_L_3 0x1004CC + +#define mmMME4_RTR_HBW_RANGE_BASE_L_4 0x1004D0 + +#define mmMME4_RTR_HBW_RANGE_BASE_L_5 0x1004D4 + +#define mmMME4_RTR_HBW_RANGE_BASE_L_6 0x1004D8 + +#define mmMME4_RTR_HBW_RANGE_BASE_L_7 0x1004DC + +#define mmMME4_RTR_HBW_RANGE_BASE_H_0 0x1004E0 + +#define mmMME4_RTR_HBW_RANGE_BASE_H_1 0x1004E4 + +#define mmMME4_RTR_HBW_RANGE_BASE_H_2 0x1004E8 + +#define mmMME4_RTR_HBW_RANGE_BASE_H_3 0x1004EC + +#define mmMME4_RTR_HBW_RANGE_BASE_H_4 0x1004F0 + +#define mmMME4_RTR_HBW_RANGE_BASE_H_5 0x1004F4 + +#define mmMME4_RTR_HBW_RANGE_BASE_H_6 0x1004F8 + +#define mmMME4_RTR_HBW_RANGE_BASE_H_7 0x1004FC + +#define mmMME4_RTR_LBW_RANGE_HIT 0x100500 + +#define mmMME4_RTR_LBW_RANGE_MASK_0 0x100510 + +#define mmMME4_RTR_LBW_RANGE_MASK_1 0x100514 + +#define mmMME4_RTR_LBW_RANGE_MASK_2 0x100518 + +#define mmMME4_RTR_LBW_RANGE_MASK_3 0x10051C + +#define mmMME4_RTR_LBW_RANGE_MASK_4 0x100520 + +#define mmMME4_RTR_LBW_RANGE_MASK_5 0x100524 + +#define mmMME4_RTR_LBW_RANGE_MASK_6 0x100528 + +#define mmMME4_RTR_LBW_RANGE_MASK_7 0x10052C + +#define mmMME4_RTR_LBW_RANGE_MASK_8 0x100530 + +#define mmMME4_RTR_LBW_RANGE_MASK_9 0x100534 + +#define mmMME4_RTR_LBW_RANGE_MASK_10 0x100538 + +#define mmMME4_RTR_LBW_RANGE_MASK_11 0x10053C + +#define mmMME4_RTR_LBW_RANGE_MASK_12 0x100540 + +#define mmMME4_RTR_LBW_RANGE_MASK_13 0x100544 + +#define mmMME4_RTR_LBW_RANGE_MASK_14 0x100548 + +#define mmMME4_RTR_LBW_RANGE_MASK_15 0x10054C + +#define mmMME4_RTR_LBW_RANGE_BASE_0 0x100550 + +#define mmMME4_RTR_LBW_RANGE_BASE_1 0x100554 + +#define mmMME4_RTR_LBW_RANGE_BASE_2 0x100558 + +#define mmMME4_RTR_LBW_RANGE_BASE_3 0x10055C + +#define mmMME4_RTR_LBW_RANGE_BASE_4 0x100560 + +#define mmMME4_RTR_LBW_RANGE_BASE_5 0x100564 + +#define mmMME4_RTR_LBW_RANGE_BASE_6 0x100568 + +#define mmMME4_RTR_LBW_RANGE_BASE_7 0x10056C + +#define mmMME4_RTR_LBW_RANGE_BASE_8 0x100570 + +#define mmMME4_RTR_LBW_RANGE_BASE_9 0x100574 + +#define mmMME4_RTR_LBW_RANGE_BASE_10 0x100578 + +#define mmMME4_RTR_LBW_RANGE_BASE_11 0x10057C + +#define mmMME4_RTR_LBW_RANGE_BASE_12 0x100580 + +#define mmMME4_RTR_LBW_RANGE_BASE_13 0x100584 + +#define mmMME4_RTR_LBW_RANGE_BASE_14 0x100588 + +#define mmMME4_RTR_LBW_RANGE_BASE_15 0x10058C + +#define mmMME4_RTR_RGLTR 0x100590 + +#define mmMME4_RTR_RGLTR_WR_RESULT 0x100594 + +#define mmMME4_RTR_RGLTR_RD_RESULT 0x100598 + +#define mmMME4_RTR_SCRAMB_EN 0x100600 + +#define mmMME4_RTR_NON_LIN_SCRAMB 0x100604 + +#endif /* ASIC_REG_MME4_RTR_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h new file mode 100644 index 000000000000..205adc988407 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h @@ -0,0 +1,331 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MME5_RTR_REGS_H_ +#define ASIC_REG_MME5_RTR_REGS_H_ + +/* + ***************************************** + * MME5_RTR (Prototype: MME_RTR) + ***************************************** + */ + +#define mmMME5_RTR_HBW_RD_RQ_E_ARB 0x140100 + +#define mmMME5_RTR_HBW_RD_RQ_W_ARB 0x140104 + +#define mmMME5_RTR_HBW_RD_RQ_N_ARB 0x140108 + +#define mmMME5_RTR_HBW_RD_RQ_S_ARB 0x14010C + +#define mmMME5_RTR_HBW_RD_RQ_L_ARB 0x140110 + +#define mmMME5_RTR_HBW_E_ARB_MAX 0x140120 + +#define mmMME5_RTR_HBW_W_ARB_MAX 0x140124 + +#define mmMME5_RTR_HBW_N_ARB_MAX 0x140128 + +#define mmMME5_RTR_HBW_S_ARB_MAX 0x14012C + +#define mmMME5_RTR_HBW_L_ARB_MAX 0x140130 + +#define mmMME5_RTR_HBW_RD_RS_MAX_CREDIT 0x140140 + +#define mmMME5_RTR_HBW_WR_RQ_MAX_CREDIT 0x140144 + +#define mmMME5_RTR_HBW_RD_RQ_MAX_CREDIT 0x140148 + +#define mmMME5_RTR_HBW_RD_RS_E_ARB 0x140150 + +#define mmMME5_RTR_HBW_RD_RS_W_ARB 0x140154 + +#define mmMME5_RTR_HBW_RD_RS_N_ARB 0x140158 + +#define mmMME5_RTR_HBW_RD_RS_S_ARB 0x14015C + +#define mmMME5_RTR_HBW_RD_RS_L_ARB 0x140160 + +#define mmMME5_RTR_HBW_WR_RQ_E_ARB 0x140170 + +#define mmMME5_RTR_HBW_WR_RQ_W_ARB 0x140174 + +#define mmMME5_RTR_HBW_WR_RQ_N_ARB 0x140178 + +#define mmMME5_RTR_HBW_WR_RQ_S_ARB 0x14017C + +#define mmMME5_RTR_HBW_WR_RQ_L_ARB 0x140180 + +#define mmMME5_RTR_HBW_WR_RS_E_ARB 0x140190 + +#define mmMME5_RTR_HBW_WR_RS_W_ARB 0x140194 + +#define mmMME5_RTR_HBW_WR_RS_N_ARB 0x140198 + +#define mmMME5_RTR_HBW_WR_RS_S_ARB 0x14019C + +#define mmMME5_RTR_HBW_WR_RS_L_ARB 0x1401A0 + +#define mmMME5_RTR_LBW_RD_RQ_E_ARB 0x140200 + +#define mmMME5_RTR_LBW_RD_RQ_W_ARB 0x140204 + +#define mmMME5_RTR_LBW_RD_RQ_N_ARB 0x140208 + +#define mmMME5_RTR_LBW_RD_RQ_S_ARB 0x14020C + +#define mmMME5_RTR_LBW_RD_RQ_L_ARB 0x140210 + +#define mmMME5_RTR_LBW_E_ARB_MAX 0x140220 + +#define mmMME5_RTR_LBW_W_ARB_MAX 0x140224 + +#define mmMME5_RTR_LBW_N_ARB_MAX 0x140228 + +#define mmMME5_RTR_LBW_S_ARB_MAX 0x14022C + +#define mmMME5_RTR_LBW_L_ARB_MAX 0x140230 + +#define mmMME5_RTR_LBW_SRAM_MAX_CREDIT 0x140240 + +#define mmMME5_RTR_LBW_RD_RS_E_ARB 0x140250 + +#define mmMME5_RTR_LBW_RD_RS_W_ARB 0x140254 + +#define mmMME5_RTR_LBW_RD_RS_N_ARB 0x140258 + +#define mmMME5_RTR_LBW_RD_RS_S_ARB 0x14025C + +#define mmMME5_RTR_LBW_RD_RS_L_ARB 0x140260 + +#define mmMME5_RTR_LBW_WR_RQ_E_ARB 0x140270 + +#define mmMME5_RTR_LBW_WR_RQ_W_ARB 0x140274 + +#define mmMME5_RTR_LBW_WR_RQ_N_ARB 0x140278 + +#define mmMME5_RTR_LBW_WR_RQ_S_ARB 0x14027C + +#define mmMME5_RTR_LBW_WR_RQ_L_ARB 0x140280 + +#define mmMME5_RTR_LBW_WR_RS_E_ARB 0x140290 + +#define mmMME5_RTR_LBW_WR_RS_W_ARB 0x140294 + +#define mmMME5_RTR_LBW_WR_RS_N_ARB 0x140298 + +#define mmMME5_RTR_LBW_WR_RS_S_ARB 0x14029C + +#define mmMME5_RTR_LBW_WR_RS_L_ARB 0x1402A0 + +#define mmMME5_RTR_DBG_E_ARB 0x140300 + +#define mmMME5_RTR_DBG_W_ARB 0x140304 + +#define mmMME5_RTR_DBG_N_ARB 0x140308 + +#define mmMME5_RTR_DBG_S_ARB 0x14030C + +#define mmMME5_RTR_DBG_L_ARB 0x140310 + +#define mmMME5_RTR_DBG_E_ARB_MAX 0x140320 + +#define mmMME5_RTR_DBG_W_ARB_MAX 0x140324 + +#define mmMME5_RTR_DBG_N_ARB_MAX 0x140328 + +#define mmMME5_RTR_DBG_S_ARB_MAX 0x14032C + +#define mmMME5_RTR_DBG_L_ARB_MAX 0x140330 + +#define mmMME5_RTR_SPLIT_COEF_0 0x140400 + +#define mmMME5_RTR_SPLIT_COEF_1 0x140404 + +#define mmMME5_RTR_SPLIT_COEF_2 0x140408 + +#define mmMME5_RTR_SPLIT_COEF_3 0x14040C + +#define mmMME5_RTR_SPLIT_COEF_4 0x140410 + +#define mmMME5_RTR_SPLIT_COEF_5 0x140414 + +#define mmMME5_RTR_SPLIT_COEF_6 0x140418 + +#define mmMME5_RTR_SPLIT_COEF_7 0x14041C + +#define mmMME5_RTR_SPLIT_COEF_8 0x140420 + +#define mmMME5_RTR_SPLIT_COEF_9 0x140424 + +#define mmMME5_RTR_SPLIT_CFG 0x140440 + +#define mmMME5_RTR_SPLIT_RD_SAT 0x140444 + +#define mmMME5_RTR_SPLIT_RD_RST_TOKEN 0x140448 + +#define mmMME5_RTR_SPLIT_RD_TIMEOUT_0 0x14044C + +#define mmMME5_RTR_SPLIT_RD_TIMEOUT_1 0x140450 + +#define mmMME5_RTR_SPLIT_WR_SAT 0x140454 + +#define mmMME5_RTR_WPLIT_WR_TST_TOLEN 0x140458 + +#define mmMME5_RTR_SPLIT_WR_TIMEOUT_0 0x14045C + +#define mmMME5_RTR_SPLIT_WR_TIMEOUT_1 0x140460 + +#define mmMME5_RTR_HBW_RANGE_HIT 0x140470 + +#define mmMME5_RTR_HBW_RANGE_MASK_L_0 0x140480 + +#define mmMME5_RTR_HBW_RANGE_MASK_L_1 0x140484 + +#define mmMME5_RTR_HBW_RANGE_MASK_L_2 0x140488 + +#define mmMME5_RTR_HBW_RANGE_MASK_L_3 0x14048C + +#define mmMME5_RTR_HBW_RANGE_MASK_L_4 0x140490 + +#define mmMME5_RTR_HBW_RANGE_MASK_L_5 0x140494 + +#define mmMME5_RTR_HBW_RANGE_MASK_L_6 0x140498 + +#define mmMME5_RTR_HBW_RANGE_MASK_L_7 0x14049C + +#define mmMME5_RTR_HBW_RANGE_MASK_H_0 0x1404A0 + +#define mmMME5_RTR_HBW_RANGE_MASK_H_1 0x1404A4 + +#define mmMME5_RTR_HBW_RANGE_MASK_H_2 0x1404A8 + +#define mmMME5_RTR_HBW_RANGE_MASK_H_3 0x1404AC + +#define mmMME5_RTR_HBW_RANGE_MASK_H_4 0x1404B0 + +#define mmMME5_RTR_HBW_RANGE_MASK_H_5 0x1404B4 + +#define mmMME5_RTR_HBW_RANGE_MASK_H_6 0x1404B8 + +#define mmMME5_RTR_HBW_RANGE_MASK_H_7 0x1404BC + +#define mmMME5_RTR_HBW_RANGE_BASE_L_0 0x1404C0 + +#define mmMME5_RTR_HBW_RANGE_BASE_L_1 0x1404C4 + +#define mmMME5_RTR_HBW_RANGE_BASE_L_2 0x1404C8 + +#define mmMME5_RTR_HBW_RANGE_BASE_L_3 0x1404CC + +#define mmMME5_RTR_HBW_RANGE_BASE_L_4 0x1404D0 + +#define mmMME5_RTR_HBW_RANGE_BASE_L_5 0x1404D4 + +#define mmMME5_RTR_HBW_RANGE_BASE_L_6 0x1404D8 + +#define mmMME5_RTR_HBW_RANGE_BASE_L_7 0x1404DC + +#define mmMME5_RTR_HBW_RANGE_BASE_H_0 0x1404E0 + +#define mmMME5_RTR_HBW_RANGE_BASE_H_1 0x1404E4 + +#define mmMME5_RTR_HBW_RANGE_BASE_H_2 0x1404E8 + +#define mmMME5_RTR_HBW_RANGE_BASE_H_3 0x1404EC + +#define mmMME5_RTR_HBW_RANGE_BASE_H_4 0x1404F0 + +#define mmMME5_RTR_HBW_RANGE_BASE_H_5 0x1404F4 + +#define mmMME5_RTR_HBW_RANGE_BASE_H_6 0x1404F8 + +#define mmMME5_RTR_HBW_RANGE_BASE_H_7 0x1404FC + +#define mmMME5_RTR_LBW_RANGE_HIT 0x140500 + +#define mmMME5_RTR_LBW_RANGE_MASK_0 0x140510 + +#define mmMME5_RTR_LBW_RANGE_MASK_1 0x140514 + +#define mmMME5_RTR_LBW_RANGE_MASK_2 0x140518 + +#define mmMME5_RTR_LBW_RANGE_MASK_3 0x14051C + +#define mmMME5_RTR_LBW_RANGE_MASK_4 0x140520 + +#define mmMME5_RTR_LBW_RANGE_MASK_5 0x140524 + +#define mmMME5_RTR_LBW_RANGE_MASK_6 0x140528 + +#define mmMME5_RTR_LBW_RANGE_MASK_7 0x14052C + +#define mmMME5_RTR_LBW_RANGE_MASK_8 0x140530 + +#define mmMME5_RTR_LBW_RANGE_MASK_9 0x140534 + +#define mmMME5_RTR_LBW_RANGE_MASK_10 0x140538 + +#define mmMME5_RTR_LBW_RANGE_MASK_11 0x14053C + +#define mmMME5_RTR_LBW_RANGE_MASK_12 0x140540 + +#define mmMME5_RTR_LBW_RANGE_MASK_13 0x140544 + +#define mmMME5_RTR_LBW_RANGE_MASK_14 0x140548 + +#define mmMME5_RTR_LBW_RANGE_MASK_15 0x14054C + +#define mmMME5_RTR_LBW_RANGE_BASE_0 0x140550 + +#define mmMME5_RTR_LBW_RANGE_BASE_1 0x140554 + +#define mmMME5_RTR_LBW_RANGE_BASE_2 0x140558 + +#define mmMME5_RTR_LBW_RANGE_BASE_3 0x14055C + +#define mmMME5_RTR_LBW_RANGE_BASE_4 0x140560 + +#define mmMME5_RTR_LBW_RANGE_BASE_5 0x140564 + +#define mmMME5_RTR_LBW_RANGE_BASE_6 0x140568 + +#define mmMME5_RTR_LBW_RANGE_BASE_7 0x14056C + +#define mmMME5_RTR_LBW_RANGE_BASE_8 0x140570 + +#define mmMME5_RTR_LBW_RANGE_BASE_9 0x140574 + +#define mmMME5_RTR_LBW_RANGE_BASE_10 0x140578 + +#define mmMME5_RTR_LBW_RANGE_BASE_11 0x14057C + +#define mmMME5_RTR_LBW_RANGE_BASE_12 0x140580 + +#define mmMME5_RTR_LBW_RANGE_BASE_13 0x140584 + +#define mmMME5_RTR_LBW_RANGE_BASE_14 0x140588 + +#define mmMME5_RTR_LBW_RANGE_BASE_15 0x14058C + +#define mmMME5_RTR_RGLTR 0x140590 + +#define mmMME5_RTR_RGLTR_WR_RESULT 0x140594 + +#define mmMME5_RTR_RGLTR_RD_RESULT 0x140598 + +#define mmMME5_RTR_SCRAMB_EN 0x140600 + +#define mmMME5_RTR_NON_LIN_SCRAMB 0x140604 + +#endif /* ASIC_REG_MME5_RTR_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h new file mode 100644 index 000000000000..fcec68388278 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h @@ -0,0 +1,331 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MME6_RTR_REGS_H_ +#define ASIC_REG_MME6_RTR_REGS_H_ + +/* + ***************************************** + * MME6_RTR (Prototype: MME_RTR) + ***************************************** + */ + +#define mmMME6_RTR_HBW_RD_RQ_E_ARB 0x180100 + +#define mmMME6_RTR_HBW_RD_RQ_W_ARB 0x180104 + +#define mmMME6_RTR_HBW_RD_RQ_N_ARB 0x180108 + +#define mmMME6_RTR_HBW_RD_RQ_S_ARB 0x18010C + +#define mmMME6_RTR_HBW_RD_RQ_L_ARB 0x180110 + +#define mmMME6_RTR_HBW_E_ARB_MAX 0x180120 + +#define mmMME6_RTR_HBW_W_ARB_MAX 0x180124 + +#define mmMME6_RTR_HBW_N_ARB_MAX 0x180128 + +#define mmMME6_RTR_HBW_S_ARB_MAX 0x18012C + +#define mmMME6_RTR_HBW_L_ARB_MAX 0x180130 + +#define mmMME6_RTR_HBW_RD_RS_MAX_CREDIT 0x180140 + +#define mmMME6_RTR_HBW_WR_RQ_MAX_CREDIT 0x180144 + +#define mmMME6_RTR_HBW_RD_RQ_MAX_CREDIT 0x180148 + +#define mmMME6_RTR_HBW_RD_RS_E_ARB 0x180150 + +#define mmMME6_RTR_HBW_RD_RS_W_ARB 0x180154 + +#define mmMME6_RTR_HBW_RD_RS_N_ARB 0x180158 + +#define mmMME6_RTR_HBW_RD_RS_S_ARB 0x18015C + +#define mmMME6_RTR_HBW_RD_RS_L_ARB 0x180160 + +#define mmMME6_RTR_HBW_WR_RQ_E_ARB 0x180170 + +#define mmMME6_RTR_HBW_WR_RQ_W_ARB 0x180174 + +#define mmMME6_RTR_HBW_WR_RQ_N_ARB 0x180178 + +#define mmMME6_RTR_HBW_WR_RQ_S_ARB 0x18017C + +#define mmMME6_RTR_HBW_WR_RQ_L_ARB 0x180180 + +#define mmMME6_RTR_HBW_WR_RS_E_ARB 0x180190 + +#define mmMME6_RTR_HBW_WR_RS_W_ARB 0x180194 + +#define mmMME6_RTR_HBW_WR_RS_N_ARB 0x180198 + +#define mmMME6_RTR_HBW_WR_RS_S_ARB 0x18019C + +#define mmMME6_RTR_HBW_WR_RS_L_ARB 0x1801A0 + +#define mmMME6_RTR_LBW_RD_RQ_E_ARB 0x180200 + +#define mmMME6_RTR_LBW_RD_RQ_W_ARB 0x180204 + +#define mmMME6_RTR_LBW_RD_RQ_N_ARB 0x180208 + +#define mmMME6_RTR_LBW_RD_RQ_S_ARB 0x18020C + +#define mmMME6_RTR_LBW_RD_RQ_L_ARB 0x180210 + +#define mmMME6_RTR_LBW_E_ARB_MAX 0x180220 + +#define mmMME6_RTR_LBW_W_ARB_MAX 0x180224 + +#define mmMME6_RTR_LBW_N_ARB_MAX 0x180228 + +#define mmMME6_RTR_LBW_S_ARB_MAX 0x18022C + +#define mmMME6_RTR_LBW_L_ARB_MAX 0x180230 + +#define mmMME6_RTR_LBW_SRAM_MAX_CREDIT 0x180240 + +#define mmMME6_RTR_LBW_RD_RS_E_ARB 0x180250 + +#define mmMME6_RTR_LBW_RD_RS_W_ARB 0x180254 + +#define mmMME6_RTR_LBW_RD_RS_N_ARB 0x180258 + +#define mmMME6_RTR_LBW_RD_RS_S_ARB 0x18025C + +#define mmMME6_RTR_LBW_RD_RS_L_ARB 0x180260 + +#define mmMME6_RTR_LBW_WR_RQ_E_ARB 0x180270 + +#define mmMME6_RTR_LBW_WR_RQ_W_ARB 0x180274 + +#define mmMME6_RTR_LBW_WR_RQ_N_ARB 0x180278 + +#define mmMME6_RTR_LBW_WR_RQ_S_ARB 0x18027C + +#define mmMME6_RTR_LBW_WR_RQ_L_ARB 0x180280 + +#define mmMME6_RTR_LBW_WR_RS_E_ARB 0x180290 + +#define mmMME6_RTR_LBW_WR_RS_W_ARB 0x180294 + +#define mmMME6_RTR_LBW_WR_RS_N_ARB 0x180298 + +#define mmMME6_RTR_LBW_WR_RS_S_ARB 0x18029C + +#define mmMME6_RTR_LBW_WR_RS_L_ARB 0x1802A0 + +#define mmMME6_RTR_DBG_E_ARB 0x180300 + +#define mmMME6_RTR_DBG_W_ARB 0x180304 + +#define mmMME6_RTR_DBG_N_ARB 0x180308 + +#define mmMME6_RTR_DBG_S_ARB 0x18030C + +#define mmMME6_RTR_DBG_L_ARB 0x180310 + +#define mmMME6_RTR_DBG_E_ARB_MAX 0x180320 + +#define mmMME6_RTR_DBG_W_ARB_MAX 0x180324 + +#define mmMME6_RTR_DBG_N_ARB_MAX 0x180328 + +#define mmMME6_RTR_DBG_S_ARB_MAX 0x18032C + +#define mmMME6_RTR_DBG_L_ARB_MAX 0x180330 + +#define mmMME6_RTR_SPLIT_COEF_0 0x180400 + +#define mmMME6_RTR_SPLIT_COEF_1 0x180404 + +#define mmMME6_RTR_SPLIT_COEF_2 0x180408 + +#define mmMME6_RTR_SPLIT_COEF_3 0x18040C + +#define mmMME6_RTR_SPLIT_COEF_4 0x180410 + +#define mmMME6_RTR_SPLIT_COEF_5 0x180414 + +#define mmMME6_RTR_SPLIT_COEF_6 0x180418 + +#define mmMME6_RTR_SPLIT_COEF_7 0x18041C + +#define mmMME6_RTR_SPLIT_COEF_8 0x180420 + +#define mmMME6_RTR_SPLIT_COEF_9 0x180424 + +#define mmMME6_RTR_SPLIT_CFG 0x180440 + +#define mmMME6_RTR_SPLIT_RD_SAT 0x180444 + +#define mmMME6_RTR_SPLIT_RD_RST_TOKEN 0x180448 + +#define mmMME6_RTR_SPLIT_RD_TIMEOUT_0 0x18044C + +#define mmMME6_RTR_SPLIT_RD_TIMEOUT_1 0x180450 + +#define mmMME6_RTR_SPLIT_WR_SAT 0x180454 + +#define mmMME6_RTR_WPLIT_WR_TST_TOLEN 0x180458 + +#define mmMME6_RTR_SPLIT_WR_TIMEOUT_0 0x18045C + +#define mmMME6_RTR_SPLIT_WR_TIMEOUT_1 0x180460 + +#define mmMME6_RTR_HBW_RANGE_HIT 0x180470 + +#define mmMME6_RTR_HBW_RANGE_MASK_L_0 0x180480 + +#define mmMME6_RTR_HBW_RANGE_MASK_L_1 0x180484 + +#define mmMME6_RTR_HBW_RANGE_MASK_L_2 0x180488 + +#define mmMME6_RTR_HBW_RANGE_MASK_L_3 0x18048C + +#define mmMME6_RTR_HBW_RANGE_MASK_L_4 0x180490 + +#define mmMME6_RTR_HBW_RANGE_MASK_L_5 0x180494 + +#define mmMME6_RTR_HBW_RANGE_MASK_L_6 0x180498 + +#define mmMME6_RTR_HBW_RANGE_MASK_L_7 0x18049C + +#define mmMME6_RTR_HBW_RANGE_MASK_H_0 0x1804A0 + +#define mmMME6_RTR_HBW_RANGE_MASK_H_1 0x1804A4 + +#define mmMME6_RTR_HBW_RANGE_MASK_H_2 0x1804A8 + +#define mmMME6_RTR_HBW_RANGE_MASK_H_3 0x1804AC + +#define mmMME6_RTR_HBW_RANGE_MASK_H_4 0x1804B0 + +#define mmMME6_RTR_HBW_RANGE_MASK_H_5 0x1804B4 + +#define mmMME6_RTR_HBW_RANGE_MASK_H_6 0x1804B8 + +#define mmMME6_RTR_HBW_RANGE_MASK_H_7 0x1804BC + +#define mmMME6_RTR_HBW_RANGE_BASE_L_0 0x1804C0 + +#define mmMME6_RTR_HBW_RANGE_BASE_L_1 0x1804C4 + +#define mmMME6_RTR_HBW_RANGE_BASE_L_2 0x1804C8 + +#define mmMME6_RTR_HBW_RANGE_BASE_L_3 0x1804CC + +#define mmMME6_RTR_HBW_RANGE_BASE_L_4 0x1804D0 + +#define mmMME6_RTR_HBW_RANGE_BASE_L_5 0x1804D4 + +#define mmMME6_RTR_HBW_RANGE_BASE_L_6 0x1804D8 + +#define mmMME6_RTR_HBW_RANGE_BASE_L_7 0x1804DC + +#define mmMME6_RTR_HBW_RANGE_BASE_H_0 0x1804E0 + +#define mmMME6_RTR_HBW_RANGE_BASE_H_1 0x1804E4 + +#define mmMME6_RTR_HBW_RANGE_BASE_H_2 0x1804E8 + +#define mmMME6_RTR_HBW_RANGE_BASE_H_3 0x1804EC + +#define mmMME6_RTR_HBW_RANGE_BASE_H_4 0x1804F0 + +#define mmMME6_RTR_HBW_RANGE_BASE_H_5 0x1804F4 + +#define mmMME6_RTR_HBW_RANGE_BASE_H_6 0x1804F8 + +#define mmMME6_RTR_HBW_RANGE_BASE_H_7 0x1804FC + +#define mmMME6_RTR_LBW_RANGE_HIT 0x180500 + +#define mmMME6_RTR_LBW_RANGE_MASK_0 0x180510 + +#define mmMME6_RTR_LBW_RANGE_MASK_1 0x180514 + +#define mmMME6_RTR_LBW_RANGE_MASK_2 0x180518 + +#define mmMME6_RTR_LBW_RANGE_MASK_3 0x18051C + +#define mmMME6_RTR_LBW_RANGE_MASK_4 0x180520 + +#define mmMME6_RTR_LBW_RANGE_MASK_5 0x180524 + +#define mmMME6_RTR_LBW_RANGE_MASK_6 0x180528 + +#define mmMME6_RTR_LBW_RANGE_MASK_7 0x18052C + +#define mmMME6_RTR_LBW_RANGE_MASK_8 0x180530 + +#define mmMME6_RTR_LBW_RANGE_MASK_9 0x180534 + +#define mmMME6_RTR_LBW_RANGE_MASK_10 0x180538 + +#define mmMME6_RTR_LBW_RANGE_MASK_11 0x18053C + +#define mmMME6_RTR_LBW_RANGE_MASK_12 0x180540 + +#define mmMME6_RTR_LBW_RANGE_MASK_13 0x180544 + +#define mmMME6_RTR_LBW_RANGE_MASK_14 0x180548 + +#define mmMME6_RTR_LBW_RANGE_MASK_15 0x18054C + +#define mmMME6_RTR_LBW_RANGE_BASE_0 0x180550 + +#define mmMME6_RTR_LBW_RANGE_BASE_1 0x180554 + +#define mmMME6_RTR_LBW_RANGE_BASE_2 0x180558 + +#define mmMME6_RTR_LBW_RANGE_BASE_3 0x18055C + +#define mmMME6_RTR_LBW_RANGE_BASE_4 0x180560 + +#define mmMME6_RTR_LBW_RANGE_BASE_5 0x180564 + +#define mmMME6_RTR_LBW_RANGE_BASE_6 0x180568 + +#define mmMME6_RTR_LBW_RANGE_BASE_7 0x18056C + +#define mmMME6_RTR_LBW_RANGE_BASE_8 0x180570 + +#define mmMME6_RTR_LBW_RANGE_BASE_9 0x180574 + +#define mmMME6_RTR_LBW_RANGE_BASE_10 0x180578 + +#define mmMME6_RTR_LBW_RANGE_BASE_11 0x18057C + +#define mmMME6_RTR_LBW_RANGE_BASE_12 0x180580 + +#define mmMME6_RTR_LBW_RANGE_BASE_13 0x180584 + +#define mmMME6_RTR_LBW_RANGE_BASE_14 0x180588 + +#define mmMME6_RTR_LBW_RANGE_BASE_15 0x18058C + +#define mmMME6_RTR_RGLTR 0x180590 + +#define mmMME6_RTR_RGLTR_WR_RESULT 0x180594 + +#define mmMME6_RTR_RGLTR_RD_RESULT 0x180598 + +#define mmMME6_RTR_SCRAMB_EN 0x180600 + +#define mmMME6_RTR_NON_LIN_SCRAMB 0x180604 + +#endif /* ASIC_REG_MME6_RTR_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h new file mode 100644 index 000000000000..a0d4382fbbd0 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h @@ -0,0 +1,373 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MME_CMDQ_MASKS_H_ +#define ASIC_REG_MME_CMDQ_MASKS_H_ + +/* + ***************************************** + * MME_CMDQ (Prototype: CMDQ) + ***************************************** + */ + +/* MME_CMDQ_GLBL_CFG0 */ +#define MME_CMDQ_GLBL_CFG0_PQF_EN_SHIFT 0 +#define MME_CMDQ_GLBL_CFG0_PQF_EN_MASK 0x1 +#define MME_CMDQ_GLBL_CFG0_CQF_EN_SHIFT 1 +#define MME_CMDQ_GLBL_CFG0_CQF_EN_MASK 0x2 +#define MME_CMDQ_GLBL_CFG0_CP_EN_SHIFT 2 +#define MME_CMDQ_GLBL_CFG0_CP_EN_MASK 0x4 +#define MME_CMDQ_GLBL_CFG0_DMA_EN_SHIFT 3 +#define MME_CMDQ_GLBL_CFG0_DMA_EN_MASK 0x8 + +/* MME_CMDQ_GLBL_CFG1 */ +#define MME_CMDQ_GLBL_CFG1_PQF_STOP_SHIFT 0 +#define MME_CMDQ_GLBL_CFG1_PQF_STOP_MASK 0x1 +#define MME_CMDQ_GLBL_CFG1_CQF_STOP_SHIFT 1 +#define MME_CMDQ_GLBL_CFG1_CQF_STOP_MASK 0x2 +#define MME_CMDQ_GLBL_CFG1_CP_STOP_SHIFT 2 +#define MME_CMDQ_GLBL_CFG1_CP_STOP_MASK 0x4 +#define MME_CMDQ_GLBL_CFG1_DMA_STOP_SHIFT 3 +#define MME_CMDQ_GLBL_CFG1_DMA_STOP_MASK 0x8 +#define MME_CMDQ_GLBL_CFG1_PQF_FLUSH_SHIFT 8 +#define MME_CMDQ_GLBL_CFG1_PQF_FLUSH_MASK 0x100 +#define MME_CMDQ_GLBL_CFG1_CQF_FLUSH_SHIFT 9 +#define MME_CMDQ_GLBL_CFG1_CQF_FLUSH_MASK 0x200 +#define MME_CMDQ_GLBL_CFG1_CP_FLUSH_SHIFT 10 +#define MME_CMDQ_GLBL_CFG1_CP_FLUSH_MASK 0x400 +#define MME_CMDQ_GLBL_CFG1_DMA_FLUSH_SHIFT 11 +#define MME_CMDQ_GLBL_CFG1_DMA_FLUSH_MASK 0x800 + +/* MME_CMDQ_GLBL_PROT */ +#define MME_CMDQ_GLBL_PROT_PQF_PROT_SHIFT 0 +#define MME_CMDQ_GLBL_PROT_PQF_PROT_MASK 0x1 +#define MME_CMDQ_GLBL_PROT_CQF_PROT_SHIFT 1 +#define MME_CMDQ_GLBL_PROT_CQF_PROT_MASK 0x2 +#define MME_CMDQ_GLBL_PROT_CP_PROT_SHIFT 2 +#define MME_CMDQ_GLBL_PROT_CP_PROT_MASK 0x4 +#define MME_CMDQ_GLBL_PROT_DMA_PROT_SHIFT 3 +#define MME_CMDQ_GLBL_PROT_DMA_PROT_MASK 0x8 +#define MME_CMDQ_GLBL_PROT_PQF_ERR_PROT_SHIFT 4 +#define MME_CMDQ_GLBL_PROT_PQF_ERR_PROT_MASK 0x10 +#define MME_CMDQ_GLBL_PROT_CQF_ERR_PROT_SHIFT 5 +#define MME_CMDQ_GLBL_PROT_CQF_ERR_PROT_MASK 0x20 +#define MME_CMDQ_GLBL_PROT_CP_ERR_PROT_SHIFT 6 +#define MME_CMDQ_GLBL_PROT_CP_ERR_PROT_MASK 0x40 +#define MME_CMDQ_GLBL_PROT_DMA_ERR_PROT_SHIFT 7 +#define MME_CMDQ_GLBL_PROT_DMA_ERR_PROT_MASK 0x80 + +/* MME_CMDQ_GLBL_ERR_CFG */ +#define MME_CMDQ_GLBL_ERR_CFG_PQF_ERR_INT_EN_SHIFT 0 +#define MME_CMDQ_GLBL_ERR_CFG_PQF_ERR_INT_EN_MASK 0x1 +#define MME_CMDQ_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT 1 +#define MME_CMDQ_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK 0x2 +#define MME_CMDQ_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT 2 +#define MME_CMDQ_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK 0x4 +#define MME_CMDQ_GLBL_ERR_CFG_CQF_ERR_INT_EN_SHIFT 3 +#define MME_CMDQ_GLBL_ERR_CFG_CQF_ERR_INT_EN_MASK 0x8 +#define MME_CMDQ_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT 4 +#define MME_CMDQ_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK 0x10 +#define MME_CMDQ_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT 5 +#define MME_CMDQ_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK 0x20 +#define MME_CMDQ_GLBL_ERR_CFG_CP_ERR_INT_EN_SHIFT 6 +#define MME_CMDQ_GLBL_ERR_CFG_CP_ERR_INT_EN_MASK 0x40 +#define MME_CMDQ_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT 7 +#define MME_CMDQ_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK 0x80 +#define MME_CMDQ_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT 8 +#define MME_CMDQ_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK 0x100 +#define MME_CMDQ_GLBL_ERR_CFG_DMA_ERR_INT_EN_SHIFT 9 +#define MME_CMDQ_GLBL_ERR_CFG_DMA_ERR_INT_EN_MASK 0x200 +#define MME_CMDQ_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT 10 +#define MME_CMDQ_GLBL_ERR_CFG_DMA_ERR_MSG_EN_MASK 0x400 +#define MME_CMDQ_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT 11 +#define MME_CMDQ_GLBL_ERR_CFG_DMA_STOP_ON_ERR_MASK 0x800 + +/* MME_CMDQ_GLBL_ERR_ADDR_LO */ +#define MME_CMDQ_GLBL_ERR_ADDR_LO_VAL_SHIFT 0 +#define MME_CMDQ_GLBL_ERR_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_GLBL_ERR_ADDR_HI */ +#define MME_CMDQ_GLBL_ERR_ADDR_HI_VAL_SHIFT 0 +#define MME_CMDQ_GLBL_ERR_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_GLBL_ERR_WDATA */ +#define MME_CMDQ_GLBL_ERR_WDATA_VAL_SHIFT 0 +#define MME_CMDQ_GLBL_ERR_WDATA_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_GLBL_SECURE_PROPS */ +#define MME_CMDQ_GLBL_SECURE_PROPS_ASID_SHIFT 0 +#define MME_CMDQ_GLBL_SECURE_PROPS_ASID_MASK 0x3FF +#define MME_CMDQ_GLBL_SECURE_PROPS_MMBP_SHIFT 10 +#define MME_CMDQ_GLBL_SECURE_PROPS_MMBP_MASK 0x400 + +/* MME_CMDQ_GLBL_NON_SECURE_PROPS */ +#define MME_CMDQ_GLBL_NON_SECURE_PROPS_ASID_SHIFT 0 +#define MME_CMDQ_GLBL_NON_SECURE_PROPS_ASID_MASK 0x3FF +#define MME_CMDQ_GLBL_NON_SECURE_PROPS_MMBP_SHIFT 10 +#define MME_CMDQ_GLBL_NON_SECURE_PROPS_MMBP_MASK 0x400 + +/* MME_CMDQ_GLBL_STS0 */ +#define MME_CMDQ_GLBL_STS0_PQF_IDLE_SHIFT 0 +#define MME_CMDQ_GLBL_STS0_PQF_IDLE_MASK 0x1 +#define MME_CMDQ_GLBL_STS0_CQF_IDLE_SHIFT 1 +#define MME_CMDQ_GLBL_STS0_CQF_IDLE_MASK 0x2 +#define MME_CMDQ_GLBL_STS0_CP_IDLE_SHIFT 2 +#define MME_CMDQ_GLBL_STS0_CP_IDLE_MASK 0x4 +#define MME_CMDQ_GLBL_STS0_DMA_IDLE_SHIFT 3 +#define MME_CMDQ_GLBL_STS0_DMA_IDLE_MASK 0x8 +#define MME_CMDQ_GLBL_STS0_PQF_IS_STOP_SHIFT 4 +#define MME_CMDQ_GLBL_STS0_PQF_IS_STOP_MASK 0x10 +#define MME_CMDQ_GLBL_STS0_CQF_IS_STOP_SHIFT 5 +#define MME_CMDQ_GLBL_STS0_CQF_IS_STOP_MASK 0x20 +#define MME_CMDQ_GLBL_STS0_CP_IS_STOP_SHIFT 6 +#define MME_CMDQ_GLBL_STS0_CP_IS_STOP_MASK 0x40 +#define MME_CMDQ_GLBL_STS0_DMA_IS_STOP_SHIFT 7 +#define MME_CMDQ_GLBL_STS0_DMA_IS_STOP_MASK 0x80 + +/* MME_CMDQ_GLBL_STS1 */ +#define MME_CMDQ_GLBL_STS1_PQF_RD_ERR_SHIFT 0 +#define MME_CMDQ_GLBL_STS1_PQF_RD_ERR_MASK 0x1 +#define MME_CMDQ_GLBL_STS1_CQF_RD_ERR_SHIFT 1 +#define MME_CMDQ_GLBL_STS1_CQF_RD_ERR_MASK 0x2 +#define MME_CMDQ_GLBL_STS1_CP_RD_ERR_SHIFT 2 +#define MME_CMDQ_GLBL_STS1_CP_RD_ERR_MASK 0x4 +#define MME_CMDQ_GLBL_STS1_CP_UNDEF_CMD_ERR_SHIFT 3 +#define MME_CMDQ_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK 0x8 +#define MME_CMDQ_GLBL_STS1_CP_STOP_OP_SHIFT 4 +#define MME_CMDQ_GLBL_STS1_CP_STOP_OP_MASK 0x10 +#define MME_CMDQ_GLBL_STS1_CP_MSG_WR_ERR_SHIFT 5 +#define MME_CMDQ_GLBL_STS1_CP_MSG_WR_ERR_MASK 0x20 +#define MME_CMDQ_GLBL_STS1_DMA_RD_ERR_SHIFT 8 +#define MME_CMDQ_GLBL_STS1_DMA_RD_ERR_MASK 0x100 +#define MME_CMDQ_GLBL_STS1_DMA_WR_ERR_SHIFT 9 +#define MME_CMDQ_GLBL_STS1_DMA_WR_ERR_MASK 0x200 +#define MME_CMDQ_GLBL_STS1_DMA_RD_MSG_ERR_SHIFT 10 +#define MME_CMDQ_GLBL_STS1_DMA_RD_MSG_ERR_MASK 0x400 +#define MME_CMDQ_GLBL_STS1_DMA_WR_MSG_ERR_SHIFT 11 +#define MME_CMDQ_GLBL_STS1_DMA_WR_MSG_ERR_MASK 0x800 + +/* MME_CMDQ_CQ_CFG0 */ +#define MME_CMDQ_CQ_CFG0_RESERVED_SHIFT 0 +#define MME_CMDQ_CQ_CFG0_RESERVED_MASK 0x1 + +/* MME_CMDQ_CQ_CFG1 */ +#define MME_CMDQ_CQ_CFG1_CREDIT_LIM_SHIFT 0 +#define MME_CMDQ_CQ_CFG1_CREDIT_LIM_MASK 0xFFFF +#define MME_CMDQ_CQ_CFG1_MAX_INFLIGHT_SHIFT 16 +#define MME_CMDQ_CQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000 + +/* MME_CMDQ_CQ_ARUSER */ +#define MME_CMDQ_CQ_ARUSER_NOSNOOP_SHIFT 0 +#define MME_CMDQ_CQ_ARUSER_NOSNOOP_MASK 0x1 +#define MME_CMDQ_CQ_ARUSER_WORD_SHIFT 1 +#define MME_CMDQ_CQ_ARUSER_WORD_MASK 0x2 + +/* MME_CMDQ_CQ_PTR_LO */ +#define MME_CMDQ_CQ_PTR_LO_VAL_SHIFT 0 +#define MME_CMDQ_CQ_PTR_LO_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CQ_PTR_HI */ +#define MME_CMDQ_CQ_PTR_HI_VAL_SHIFT 0 +#define MME_CMDQ_CQ_PTR_HI_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CQ_TSIZE */ +#define MME_CMDQ_CQ_TSIZE_VAL_SHIFT 0 +#define MME_CMDQ_CQ_TSIZE_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CQ_CTL */ +#define MME_CMDQ_CQ_CTL_RPT_SHIFT 0 +#define MME_CMDQ_CQ_CTL_RPT_MASK 0xFFFF +#define MME_CMDQ_CQ_CTL_CTL_SHIFT 16 +#define MME_CMDQ_CQ_CTL_CTL_MASK 0xFFFF0000 + +/* MME_CMDQ_CQ_PTR_LO_STS */ +#define MME_CMDQ_CQ_PTR_LO_STS_VAL_SHIFT 0 +#define MME_CMDQ_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CQ_PTR_HI_STS */ +#define MME_CMDQ_CQ_PTR_HI_STS_VAL_SHIFT 0 +#define MME_CMDQ_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CQ_TSIZE_STS */ +#define MME_CMDQ_CQ_TSIZE_STS_VAL_SHIFT 0 +#define MME_CMDQ_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CQ_CTL_STS */ +#define MME_CMDQ_CQ_CTL_STS_RPT_SHIFT 0 +#define MME_CMDQ_CQ_CTL_STS_RPT_MASK 0xFFFF +#define MME_CMDQ_CQ_CTL_STS_CTL_SHIFT 16 +#define MME_CMDQ_CQ_CTL_STS_CTL_MASK 0xFFFF0000 + +/* MME_CMDQ_CQ_STS0 */ +#define MME_CMDQ_CQ_STS0_CQ_CREDIT_CNT_SHIFT 0 +#define MME_CMDQ_CQ_STS0_CQ_CREDIT_CNT_MASK 0xFFFF +#define MME_CMDQ_CQ_STS0_CQ_FREE_CNT_SHIFT 16 +#define MME_CMDQ_CQ_STS0_CQ_FREE_CNT_MASK 0xFFFF0000 + +/* MME_CMDQ_CQ_STS1 */ +#define MME_CMDQ_CQ_STS1_CQ_INFLIGHT_CNT_SHIFT 0 +#define MME_CMDQ_CQ_STS1_CQ_INFLIGHT_CNT_MASK 0xFFFF +#define MME_CMDQ_CQ_STS1_CQ_BUF_EMPTY_SHIFT 30 +#define MME_CMDQ_CQ_STS1_CQ_BUF_EMPTY_MASK 0x40000000 +#define MME_CMDQ_CQ_STS1_CQ_BUSY_SHIFT 31 +#define MME_CMDQ_CQ_STS1_CQ_BUSY_MASK 0x80000000 + +/* MME_CMDQ_CQ_RD_RATE_LIM_EN */ +#define MME_CMDQ_CQ_RD_RATE_LIM_EN_VAL_SHIFT 0 +#define MME_CMDQ_CQ_RD_RATE_LIM_EN_VAL_MASK 0x1 + +/* MME_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN */ +#define MME_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0 +#define MME_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF + +/* MME_CMDQ_CQ_RD_RATE_LIM_SAT */ +#define MME_CMDQ_CQ_RD_RATE_LIM_SAT_VAL_SHIFT 0 +#define MME_CMDQ_CQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF + +/* MME_CMDQ_CQ_RD_RATE_LIM_TOUT */ +#define MME_CMDQ_CQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0 +#define MME_CMDQ_CQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF + +/* MME_CMDQ_CQ_IFIFO_CNT */ +#define MME_CMDQ_CQ_IFIFO_CNT_VAL_SHIFT 0 +#define MME_CMDQ_CQ_IFIFO_CNT_VAL_MASK 0x3 + +/* MME_CMDQ_CP_MSG_BASE0_ADDR_LO */ +#define MME_CMDQ_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT 0 +#define MME_CMDQ_CP_MSG_BASE0_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CP_MSG_BASE0_ADDR_HI */ +#define MME_CMDQ_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT 0 +#define MME_CMDQ_CP_MSG_BASE0_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CP_MSG_BASE1_ADDR_LO */ +#define MME_CMDQ_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT 0 +#define MME_CMDQ_CP_MSG_BASE1_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CP_MSG_BASE1_ADDR_HI */ +#define MME_CMDQ_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT 0 +#define MME_CMDQ_CP_MSG_BASE1_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CP_MSG_BASE2_ADDR_LO */ +#define MME_CMDQ_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT 0 +#define MME_CMDQ_CP_MSG_BASE2_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CP_MSG_BASE2_ADDR_HI */ +#define MME_CMDQ_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT 0 +#define MME_CMDQ_CP_MSG_BASE2_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CP_MSG_BASE3_ADDR_LO */ +#define MME_CMDQ_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT 0 +#define MME_CMDQ_CP_MSG_BASE3_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CP_MSG_BASE3_ADDR_HI */ +#define MME_CMDQ_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT 0 +#define MME_CMDQ_CP_MSG_BASE3_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CP_LDMA_TSIZE_OFFSET */ +#define MME_CMDQ_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT 0 +#define MME_CMDQ_CP_LDMA_TSIZE_OFFSET_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET */ +#define MME_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT 0 +#define MME_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET */ +#define MME_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_SHIFT 0 +#define MME_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET */ +#define MME_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT 0 +#define MME_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET */ +#define MME_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET_VAL_SHIFT 0 +#define MME_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CP_LDMA_COMMIT_OFFSET */ +#define MME_CMDQ_CP_LDMA_COMMIT_OFFSET_VAL_SHIFT 0 +#define MME_CMDQ_CP_LDMA_COMMIT_OFFSET_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CP_FENCE0_RDATA */ +#define MME_CMDQ_CP_FENCE0_RDATA_INC_VAL_SHIFT 0 +#define MME_CMDQ_CP_FENCE0_RDATA_INC_VAL_MASK 0xF + +/* MME_CMDQ_CP_FENCE1_RDATA */ +#define MME_CMDQ_CP_FENCE1_RDATA_INC_VAL_SHIFT 0 +#define MME_CMDQ_CP_FENCE1_RDATA_INC_VAL_MASK 0xF + +/* MME_CMDQ_CP_FENCE2_RDATA */ +#define MME_CMDQ_CP_FENCE2_RDATA_INC_VAL_SHIFT 0 +#define MME_CMDQ_CP_FENCE2_RDATA_INC_VAL_MASK 0xF + +/* MME_CMDQ_CP_FENCE3_RDATA */ +#define MME_CMDQ_CP_FENCE3_RDATA_INC_VAL_SHIFT 0 +#define MME_CMDQ_CP_FENCE3_RDATA_INC_VAL_MASK 0xF + +/* MME_CMDQ_CP_FENCE0_CNT */ +#define MME_CMDQ_CP_FENCE0_CNT_VAL_SHIFT 0 +#define MME_CMDQ_CP_FENCE0_CNT_VAL_MASK 0xFF + +/* MME_CMDQ_CP_FENCE1_CNT */ +#define MME_CMDQ_CP_FENCE1_CNT_VAL_SHIFT 0 +#define MME_CMDQ_CP_FENCE1_CNT_VAL_MASK 0xFF + +/* MME_CMDQ_CP_FENCE2_CNT */ +#define MME_CMDQ_CP_FENCE2_CNT_VAL_SHIFT 0 +#define MME_CMDQ_CP_FENCE2_CNT_VAL_MASK 0xFF + +/* MME_CMDQ_CP_FENCE3_CNT */ +#define MME_CMDQ_CP_FENCE3_CNT_VAL_SHIFT 0 +#define MME_CMDQ_CP_FENCE3_CNT_VAL_MASK 0xFF + +/* MME_CMDQ_CP_STS */ +#define MME_CMDQ_CP_STS_MSG_INFLIGHT_CNT_SHIFT 0 +#define MME_CMDQ_CP_STS_MSG_INFLIGHT_CNT_MASK 0xFFFF +#define MME_CMDQ_CP_STS_ERDY_SHIFT 16 +#define MME_CMDQ_CP_STS_ERDY_MASK 0x10000 +#define MME_CMDQ_CP_STS_RRDY_SHIFT 17 +#define MME_CMDQ_CP_STS_RRDY_MASK 0x20000 +#define MME_CMDQ_CP_STS_MRDY_SHIFT 18 +#define MME_CMDQ_CP_STS_MRDY_MASK 0x40000 +#define MME_CMDQ_CP_STS_SW_STOP_SHIFT 19 +#define MME_CMDQ_CP_STS_SW_STOP_MASK 0x80000 +#define MME_CMDQ_CP_STS_FENCE_ID_SHIFT 20 +#define MME_CMDQ_CP_STS_FENCE_ID_MASK 0x300000 +#define MME_CMDQ_CP_STS_FENCE_IN_PROGRESS_SHIFT 22 +#define MME_CMDQ_CP_STS_FENCE_IN_PROGRESS_MASK 0x400000 + +/* MME_CMDQ_CP_CURRENT_INST_LO */ +#define MME_CMDQ_CP_CURRENT_INST_LO_VAL_SHIFT 0 +#define MME_CMDQ_CP_CURRENT_INST_LO_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CP_CURRENT_INST_HI */ +#define MME_CMDQ_CP_CURRENT_INST_HI_VAL_SHIFT 0 +#define MME_CMDQ_CP_CURRENT_INST_HI_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CP_BARRIER_CFG */ +#define MME_CMDQ_CP_BARRIER_CFG_EBGUARD_SHIFT 0 +#define MME_CMDQ_CP_BARRIER_CFG_EBGUARD_MASK 0xFFF + +/* MME_CMDQ_CP_DBG_0 */ +#define MME_CMDQ_CP_DBG_0_VAL_SHIFT 0 +#define MME_CMDQ_CP_DBG_0_VAL_MASK 0xFF + +/* MME_CMDQ_CQ_BUF_ADDR */ +#define MME_CMDQ_CQ_BUF_ADDR_VAL_SHIFT 0 +#define MME_CMDQ_CQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF + +/* MME_CMDQ_CQ_BUF_RDATA */ +#define MME_CMDQ_CQ_BUF_RDATA_VAL_SHIFT 0 +#define MME_CMDQ_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF + +#endif /* ASIC_REG_MME_CMDQ_MASKS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h new file mode 100644 index 000000000000..5c2f6b870a58 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MME_CMDQ_REGS_H_ +#define ASIC_REG_MME_CMDQ_REGS_H_ + +/* + ***************************************** + * MME_CMDQ (Prototype: CMDQ) + ***************************************** + */ + +#define mmMME_CMDQ_GLBL_CFG0 0xD9000 + +#define mmMME_CMDQ_GLBL_CFG1 0xD9004 + +#define mmMME_CMDQ_GLBL_PROT 0xD9008 + +#define mmMME_CMDQ_GLBL_ERR_CFG 0xD900C + +#define mmMME_CMDQ_GLBL_ERR_ADDR_LO 0xD9010 + +#define mmMME_CMDQ_GLBL_ERR_ADDR_HI 0xD9014 + +#define mmMME_CMDQ_GLBL_ERR_WDATA 0xD9018 + +#define mmMME_CMDQ_GLBL_SECURE_PROPS 0xD901C + +#define mmMME_CMDQ_GLBL_NON_SECURE_PROPS 0xD9020 + +#define mmMME_CMDQ_GLBL_STS0 0xD9024 + +#define mmMME_CMDQ_GLBL_STS1 0xD9028 + +#define mmMME_CMDQ_CQ_CFG0 0xD90B0 + +#define mmMME_CMDQ_CQ_CFG1 0xD90B4 + +#define mmMME_CMDQ_CQ_ARUSER 0xD90B8 + +#define mmMME_CMDQ_CQ_PTR_LO 0xD90C0 + +#define mmMME_CMDQ_CQ_PTR_HI 0xD90C4 + +#define mmMME_CMDQ_CQ_TSIZE 0xD90C8 + +#define mmMME_CMDQ_CQ_CTL 0xD90CC + +#define mmMME_CMDQ_CQ_PTR_LO_STS 0xD90D4 + +#define mmMME_CMDQ_CQ_PTR_HI_STS 0xD90D8 + +#define mmMME_CMDQ_CQ_TSIZE_STS 0xD90DC + +#define mmMME_CMDQ_CQ_CTL_STS 0xD90E0 + +#define mmMME_CMDQ_CQ_STS0 0xD90E4 + +#define mmMME_CMDQ_CQ_STS1 0xD90E8 + +#define mmMME_CMDQ_CQ_RD_RATE_LIM_EN 0xD90F0 + +#define mmMME_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xD90F4 + +#define mmMME_CMDQ_CQ_RD_RATE_LIM_SAT 0xD90F8 + +#define mmMME_CMDQ_CQ_RD_RATE_LIM_TOUT 0xD90FC + +#define mmMME_CMDQ_CQ_IFIFO_CNT 0xD9108 + +#define mmMME_CMDQ_CP_MSG_BASE0_ADDR_LO 0xD9120 + +#define mmMME_CMDQ_CP_MSG_BASE0_ADDR_HI 0xD9124 + +#define mmMME_CMDQ_CP_MSG_BASE1_ADDR_LO 0xD9128 + +#define mmMME_CMDQ_CP_MSG_BASE1_ADDR_HI 0xD912C + +#define mmMME_CMDQ_CP_MSG_BASE2_ADDR_LO 0xD9130 + +#define mmMME_CMDQ_CP_MSG_BASE2_ADDR_HI 0xD9134 + +#define mmMME_CMDQ_CP_MSG_BASE3_ADDR_LO 0xD9138 + +#define mmMME_CMDQ_CP_MSG_BASE3_ADDR_HI 0xD913C + +#define mmMME_CMDQ_CP_LDMA_TSIZE_OFFSET 0xD9140 + +#define mmMME_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xD9144 + +#define mmMME_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xD9148 + +#define mmMME_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xD914C + +#define mmMME_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xD9150 + +#define mmMME_CMDQ_CP_LDMA_COMMIT_OFFSET 0xD9154 + +#define mmMME_CMDQ_CP_FENCE0_RDATA 0xD9158 + +#define mmMME_CMDQ_CP_FENCE1_RDATA 0xD915C + +#define mmMME_CMDQ_CP_FENCE2_RDATA 0xD9160 + +#define mmMME_CMDQ_CP_FENCE3_RDATA 0xD9164 + +#define mmMME_CMDQ_CP_FENCE0_CNT 0xD9168 + +#define mmMME_CMDQ_CP_FENCE1_CNT 0xD916C + +#define mmMME_CMDQ_CP_FENCE2_CNT 0xD9170 + +#define mmMME_CMDQ_CP_FENCE3_CNT 0xD9174 + +#define mmMME_CMDQ_CP_STS 0xD9178 + +#define mmMME_CMDQ_CP_CURRENT_INST_LO 0xD917C + +#define mmMME_CMDQ_CP_CURRENT_INST_HI 0xD9180 + +#define mmMME_CMDQ_CP_BARRIER_CFG 0xD9184 + +#define mmMME_CMDQ_CP_DBG_0 0xD9188 + +#define mmMME_CMDQ_CQ_BUF_ADDR 0xD9308 + +#define mmMME_CMDQ_CQ_BUF_RDATA 0xD930C + +#endif /* ASIC_REG_MME_CMDQ_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_masks.h new file mode 100644 index 000000000000..c7b1b0bb3384 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_masks.h @@ -0,0 +1,1537 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MME_MASKS_H_ +#define ASIC_REG_MME_MASKS_H_ + +/* + ***************************************** + * MME (Prototype: MME) + ***************************************** + */ + +/* MME_ARCH_STATUS */ +#define MME_ARCH_STATUS_A_SHIFT 0 +#define MME_ARCH_STATUS_A_MASK 0x1 +#define MME_ARCH_STATUS_B_SHIFT 1 +#define MME_ARCH_STATUS_B_MASK 0x2 +#define MME_ARCH_STATUS_CIN_SHIFT 2 +#define MME_ARCH_STATUS_CIN_MASK 0x4 +#define MME_ARCH_STATUS_COUT_SHIFT 3 +#define MME_ARCH_STATUS_COUT_MASK 0x8 +#define MME_ARCH_STATUS_TE_SHIFT 4 +#define MME_ARCH_STATUS_TE_MASK 0x10 +#define MME_ARCH_STATUS_LD_SHIFT 5 +#define MME_ARCH_STATUS_LD_MASK 0x20 +#define MME_ARCH_STATUS_ST_SHIFT 6 +#define MME_ARCH_STATUS_ST_MASK 0x40 +#define MME_ARCH_STATUS_SB_A_EMPTY_SHIFT 7 +#define MME_ARCH_STATUS_SB_A_EMPTY_MASK 0x80 +#define MME_ARCH_STATUS_SB_B_EMPTY_SHIFT 8 +#define MME_ARCH_STATUS_SB_B_EMPTY_MASK 0x100 +#define MME_ARCH_STATUS_SB_CIN_EMPTY_SHIFT 9 +#define MME_ARCH_STATUS_SB_CIN_EMPTY_MASK 0x200 +#define MME_ARCH_STATUS_SB_COUT_EMPTY_SHIFT 10 +#define MME_ARCH_STATUS_SB_COUT_EMPTY_MASK 0x400 +#define MME_ARCH_STATUS_SM_IDLE_SHIFT 11 +#define MME_ARCH_STATUS_SM_IDLE_MASK 0x800 +#define MME_ARCH_STATUS_WBC_AXI_IDLE_SHIFT 12 +#define MME_ARCH_STATUS_WBC_AXI_IDLE_MASK 0xF000 +#define MME_ARCH_STATUS_SBC_AXI_IDLE_SHIFT 16 +#define MME_ARCH_STATUS_SBC_AXI_IDLE_MASK 0x30000 +#define MME_ARCH_STATUS_SBB_AXI_IDLE_SHIFT 18 +#define MME_ARCH_STATUS_SBB_AXI_IDLE_MASK 0xC0000 +#define MME_ARCH_STATUS_SBA_AXI_IDLE_SHIFT 20 +#define MME_ARCH_STATUS_SBA_AXI_IDLE_MASK 0x300000 +#define MME_ARCH_STATUS_FREE_ACCUMS_SHIFT 22 +#define MME_ARCH_STATUS_FREE_ACCUMS_MASK 0x1C00000 + +/* MME_ARCH_A_BASE_ADDR_HIGH */ +#define MME_ARCH_A_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_ARCH_A_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_ARCH_B_BASE_ADDR_HIGH */ +#define MME_ARCH_B_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_ARCH_B_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_ARCH_CIN_BASE_ADDR_HIGH */ +#define MME_ARCH_CIN_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_ARCH_CIN_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_ARCH_COUT_BASE_ADDR_HIGH */ +#define MME_ARCH_COUT_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_ARCH_COUT_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_ARCH_BIAS_BASE_ADDR_HIGH */ +#define MME_ARCH_BIAS_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_ARCH_BIAS_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_ARCH_A_BASE_ADDR_LOW */ +#define MME_ARCH_A_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_ARCH_A_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_ARCH_B_BASE_ADDR_LOW */ +#define MME_ARCH_B_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_ARCH_B_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_ARCH_CIN_BASE_ADDR_LOW */ +#define MME_ARCH_CIN_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_ARCH_CIN_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_ARCH_COUT_BASE_ADDR_LOW */ +#define MME_ARCH_COUT_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_ARCH_COUT_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_ARCH_BIAS_BASE_ADDR_LOW */ +#define MME_ARCH_BIAS_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_ARCH_BIAS_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_ARCH_HEADER */ +#define MME_ARCH_HEADER_SIGNAL_MASK_SHIFT 0 +#define MME_ARCH_HEADER_SIGNAL_MASK_MASK 0x1F +#define MME_ARCH_HEADER_SIGNAL_EN_SHIFT 5 +#define MME_ARCH_HEADER_SIGNAL_EN_MASK 0x20 +#define MME_ARCH_HEADER_TRANS_A_SHIFT 6 +#define MME_ARCH_HEADER_TRANS_A_MASK 0x40 +#define MME_ARCH_HEADER_LOWER_A_SHIFT 7 +#define MME_ARCH_HEADER_LOWER_A_MASK 0x80 +#define MME_ARCH_HEADER_ACCUM_MASK_SHIFT 8 +#define MME_ARCH_HEADER_ACCUM_MASK_MASK 0xF00 +#define MME_ARCH_HEADER_LOAD_BIAS_SHIFT 12 +#define MME_ARCH_HEADER_LOAD_BIAS_MASK 0x1000 +#define MME_ARCH_HEADER_LOAD_CIN_SHIFT 13 +#define MME_ARCH_HEADER_LOAD_CIN_MASK 0x2000 +#define MME_ARCH_HEADER_STORE_OUT_SHIFT 15 +#define MME_ARCH_HEADER_STORE_OUT_MASK 0x8000 +#define MME_ARCH_HEADER_ACC_LD_INC_DISABLE_SHIFT 16 +#define MME_ARCH_HEADER_ACC_LD_INC_DISABLE_MASK 0x10000 +#define MME_ARCH_HEADER_ADVANCE_A_SHIFT 17 +#define MME_ARCH_HEADER_ADVANCE_A_MASK 0x20000 +#define MME_ARCH_HEADER_ADVANCE_B_SHIFT 18 +#define MME_ARCH_HEADER_ADVANCE_B_MASK 0x40000 +#define MME_ARCH_HEADER_ADVANCE_CIN_SHIFT 19 +#define MME_ARCH_HEADER_ADVANCE_CIN_MASK 0x80000 +#define MME_ARCH_HEADER_ADVANCE_COUT_SHIFT 20 +#define MME_ARCH_HEADER_ADVANCE_COUT_MASK 0x100000 +#define MME_ARCH_HEADER_COMPRESSED_B_SHIFT 21 +#define MME_ARCH_HEADER_COMPRESSED_B_MASK 0x200000 +#define MME_ARCH_HEADER_MASK_CONV_END_SHIFT 22 +#define MME_ARCH_HEADER_MASK_CONV_END_MASK 0x400000 +#define MME_ARCH_HEADER_ACC_ST_INC_DISABLE_SHIFT 23 +#define MME_ARCH_HEADER_ACC_ST_INC_DISABLE_MASK 0x800000 +#define MME_ARCH_HEADER_AB_DATA_TYPE_SHIFT 24 +#define MME_ARCH_HEADER_AB_DATA_TYPE_MASK 0x3000000 +#define MME_ARCH_HEADER_CIN_DATA_TYPE_SHIFT 26 +#define MME_ARCH_HEADER_CIN_DATA_TYPE_MASK 0x1C000000 +#define MME_ARCH_HEADER_COUT_DATA_TYPE_SHIFT 29 +#define MME_ARCH_HEADER_COUT_DATA_TYPE_MASK 0xE0000000 + +/* MME_ARCH_KERNEL_SIZE_MINUS_1 */ +#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_0_SHIFT 0 +#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_0_MASK 0xFF +#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_1_SHIFT 8 +#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_1_MASK 0xFF00 +#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_2_SHIFT 16 +#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_2_MASK 0xFF0000 +#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_3_SHIFT 24 +#define MME_ARCH_KERNEL_SIZE_MINUS_1_DIM_3_MASK 0xFF000000 + +/* MME_ARCH_ASSOCIATED_DIMS */ +#define MME_ARCH_ASSOCIATED_DIMS_A_0_SHIFT 0 +#define MME_ARCH_ASSOCIATED_DIMS_A_0_MASK 0x7 +#define MME_ARCH_ASSOCIATED_DIMS_B_0_SHIFT 3 +#define MME_ARCH_ASSOCIATED_DIMS_B_0_MASK 0x38 +#define MME_ARCH_ASSOCIATED_DIMS_CIN_0_SHIFT 6 +#define MME_ARCH_ASSOCIATED_DIMS_CIN_0_MASK 0x1C0 +#define MME_ARCH_ASSOCIATED_DIMS_COUT_0_SHIFT 9 +#define MME_ARCH_ASSOCIATED_DIMS_COUT_0_MASK 0xE00 +#define MME_ARCH_ASSOCIATED_DIMS_A_1_SHIFT 16 +#define MME_ARCH_ASSOCIATED_DIMS_A_1_MASK 0x70000 +#define MME_ARCH_ASSOCIATED_DIMS_B_1_SHIFT 19 +#define MME_ARCH_ASSOCIATED_DIMS_B_1_MASK 0x380000 +#define MME_ARCH_ASSOCIATED_DIMS_CIN_1_SHIFT 22 +#define MME_ARCH_ASSOCIATED_DIMS_CIN_1_MASK 0x1C00000 +#define MME_ARCH_ASSOCIATED_DIMS_COUT_1_SHIFT 25 +#define MME_ARCH_ASSOCIATED_DIMS_COUT_1_MASK 0xE000000 + +/* MME_ARCH_COUT_SCALE */ +#define MME_ARCH_COUT_SCALE_V_SHIFT 0 +#define MME_ARCH_COUT_SCALE_V_MASK 0xFFFFFFFF + +/* MME_ARCH_CIN_SCALE */ +#define MME_ARCH_CIN_SCALE_V_SHIFT 0 +#define MME_ARCH_CIN_SCALE_V_MASK 0xFFFFFFFF + +/* MME_ARCH_GEMMLOWP_ZP */ +#define MME_ARCH_GEMMLOWP_ZP_ZP_CIN_SHIFT 0 +#define MME_ARCH_GEMMLOWP_ZP_ZP_CIN_MASK 0x1FF +#define MME_ARCH_GEMMLOWP_ZP_ZP_COUT_SHIFT 9 +#define MME_ARCH_GEMMLOWP_ZP_ZP_COUT_MASK 0x3FE00 +#define MME_ARCH_GEMMLOWP_ZP_ZP_B_SHIFT 18 +#define MME_ARCH_GEMMLOWP_ZP_ZP_B_MASK 0x7FC0000 +#define MME_ARCH_GEMMLOWP_ZP_GEMMLOWP_EU_EN_SHIFT 27 +#define MME_ARCH_GEMMLOWP_ZP_GEMMLOWP_EU_EN_MASK 0x8000000 +#define MME_ARCH_GEMMLOWP_ZP_ACCUM_SHIFT 28 +#define MME_ARCH_GEMMLOWP_ZP_ACCUM_MASK 0x10000000 +#define MME_ARCH_GEMMLOWP_ZP_ACCUM_BIAS_SHIFT 29 +#define MME_ARCH_GEMMLOWP_ZP_ACCUM_BIAS_MASK 0x20000000 +#define MME_ARCH_GEMMLOWP_ZP_RELU_EN_SHIFT 30 +#define MME_ARCH_GEMMLOWP_ZP_RELU_EN_MASK 0x40000000 + +/* MME_ARCH_GEMMLOWP_EXPONENT */ +#define MME_ARCH_GEMMLOWP_EXPONENT_EXPONENT_CIN_SHIFT 0 +#define MME_ARCH_GEMMLOWP_EXPONENT_EXPONENT_CIN_MASK 0x3F +#define MME_ARCH_GEMMLOWP_EXPONENT_EXPONENT_COUT_SHIFT 8 +#define MME_ARCH_GEMMLOWP_EXPONENT_EXPONENT_COUT_MASK 0x3F00 +#define MME_ARCH_GEMMLOWP_EXPONENT_MUL_CIN_EN_SHIFT 16 +#define MME_ARCH_GEMMLOWP_EXPONENT_MUL_CIN_EN_MASK 0x10000 +#define MME_ARCH_GEMMLOWP_EXPONENT_MUL_COUT_EN_SHIFT 17 +#define MME_ARCH_GEMMLOWP_EXPONENT_MUL_COUT_EN_MASK 0x20000 + +/* MME_ARCH_A_ROI_BASE_OFFSET */ +#define MME_ARCH_A_ROI_BASE_OFFSET_V_SHIFT 0 +#define MME_ARCH_A_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_ARCH_A_VALID_ELEMENTS */ +#define MME_ARCH_A_VALID_ELEMENTS_V_SHIFT 0 +#define MME_ARCH_A_VALID_ELEMENTS_V_MASK 0xFFFFFFFF + +/* MME_ARCH_A_LOOP_STRIDE */ +#define MME_ARCH_A_LOOP_STRIDE_V_SHIFT 0 +#define MME_ARCH_A_LOOP_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_ARCH_A_ROI_SIZE */ +#define MME_ARCH_A_ROI_SIZE_V_SHIFT 0 +#define MME_ARCH_A_ROI_SIZE_V_MASK 0xFFFFFFFF + +/* MME_ARCH_A_SPATIAL_START_OFFSET */ +#define MME_ARCH_A_SPATIAL_START_OFFSET_V_SHIFT 0 +#define MME_ARCH_A_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_ARCH_A_SPATIAL_STRIDE */ +#define MME_ARCH_A_SPATIAL_STRIDE_V_SHIFT 0 +#define MME_ARCH_A_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_ARCH_A_SPATIAL_SIZE_MINUS_1 */ +#define MME_ARCH_A_SPATIAL_SIZE_MINUS_1_V_SHIFT 0 +#define MME_ARCH_A_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_ARCH_B_ROI_BASE_OFFSET */ +#define MME_ARCH_B_ROI_BASE_OFFSET_V_SHIFT 0 +#define MME_ARCH_B_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_ARCH_B_VALID_ELEMENTS */ +#define MME_ARCH_B_VALID_ELEMENTS_V_SHIFT 0 +#define MME_ARCH_B_VALID_ELEMENTS_V_MASK 0xFFFFFFFF + +/* MME_ARCH_B_LOOP_STRIDE */ +#define MME_ARCH_B_LOOP_STRIDE_V_SHIFT 0 +#define MME_ARCH_B_LOOP_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_ARCH_B_ROI_SIZE */ +#define MME_ARCH_B_ROI_SIZE_V_SHIFT 0 +#define MME_ARCH_B_ROI_SIZE_V_MASK 0xFFFFFFFF + +/* MME_ARCH_B_SPATIAL_START_OFFSET */ +#define MME_ARCH_B_SPATIAL_START_OFFSET_V_SHIFT 0 +#define MME_ARCH_B_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_ARCH_B_SPATIAL_STRIDE */ +#define MME_ARCH_B_SPATIAL_STRIDE_V_SHIFT 0 +#define MME_ARCH_B_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_ARCH_B_SPATIAL_SIZE_MINUS_1 */ +#define MME_ARCH_B_SPATIAL_SIZE_MINUS_1_V_SHIFT 0 +#define MME_ARCH_B_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_ARCH_C_ROI_BASE_OFFSET */ +#define MME_ARCH_C_ROI_BASE_OFFSET_V_SHIFT 0 +#define MME_ARCH_C_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_ARCH_C_VALID_ELEMENTS */ +#define MME_ARCH_C_VALID_ELEMENTS_V_SHIFT 0 +#define MME_ARCH_C_VALID_ELEMENTS_V_MASK 0xFFFFFFFF + +/* MME_ARCH_C_LOOP_STRIDE */ +#define MME_ARCH_C_LOOP_STRIDE_V_SHIFT 0 +#define MME_ARCH_C_LOOP_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_ARCH_C_ROI_SIZE */ +#define MME_ARCH_C_ROI_SIZE_V_SHIFT 0 +#define MME_ARCH_C_ROI_SIZE_V_MASK 0xFFFFFFFF + +/* MME_ARCH_C_SPATIAL_START_OFFSET */ +#define MME_ARCH_C_SPATIAL_START_OFFSET_V_SHIFT 0 +#define MME_ARCH_C_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_ARCH_C_SPATIAL_STRIDE */ +#define MME_ARCH_C_SPATIAL_STRIDE_V_SHIFT 0 +#define MME_ARCH_C_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_ARCH_C_SPATIAL_SIZE_MINUS_1 */ +#define MME_ARCH_C_SPATIAL_SIZE_MINUS_1_V_SHIFT 0 +#define MME_ARCH_C_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_ARCH_SYNC_OBJECT_MESSAGE */ +#define MME_ARCH_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0 +#define MME_ARCH_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF +#define MME_ARCH_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_SHIFT 16 +#define MME_ARCH_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_MASK 0x7FFF0000 +#define MME_ARCH_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 31 +#define MME_ARCH_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0x80000000 + +/* MME_ARCH_E_PADDING_VALUE_A */ +#define MME_ARCH_E_PADDING_VALUE_A_V_SHIFT 0 +#define MME_ARCH_E_PADDING_VALUE_A_V_MASK 0xFFFF + +/* MME_ARCH_E_NUM_ITERATION_MINUS_1 */ +#define MME_ARCH_E_NUM_ITERATION_MINUS_1_V_SHIFT 0 +#define MME_ARCH_E_NUM_ITERATION_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_ARCH_E_BUBBLES_PER_SPLIT */ +#define MME_ARCH_E_BUBBLES_PER_SPLIT_A_SHIFT 0 +#define MME_ARCH_E_BUBBLES_PER_SPLIT_A_MASK 0xFF +#define MME_ARCH_E_BUBBLES_PER_SPLIT_B_SHIFT 8 +#define MME_ARCH_E_BUBBLES_PER_SPLIT_B_MASK 0xFF00 +#define MME_ARCH_E_BUBBLES_PER_SPLIT_CIN_SHIFT 16 +#define MME_ARCH_E_BUBBLES_PER_SPLIT_CIN_MASK 0xFF0000 +#define MME_ARCH_E_BUBBLES_PER_SPLIT_ID_SHIFT 24 +#define MME_ARCH_E_BUBBLES_PER_SPLIT_ID_MASK 0xFF000000 + +/* MME_CMD */ +#define MME_CMD_EXECUTE_SHIFT 0 +#define MME_CMD_EXECUTE_MASK 0x1 + +/* MME_DUMMY */ +#define MME_DUMMY_V_SHIFT 0 +#define MME_DUMMY_V_MASK 0xFFFFFFFF + +/* MME_RESET */ +#define MME_RESET_V_SHIFT 0 +#define MME_RESET_V_MASK 0x1 + +/* MME_STALL */ +#define MME_STALL_V_SHIFT 0 +#define MME_STALL_V_MASK 0xFFFFFFFF + +/* MME_SM_BASE_ADDRESS_LOW */ +#define MME_SM_BASE_ADDRESS_LOW_V_SHIFT 0 +#define MME_SM_BASE_ADDRESS_LOW_V_MASK 0xFFFFFFFF + +/* MME_SM_BASE_ADDRESS_HIGH */ +#define MME_SM_BASE_ADDRESS_HIGH_V_SHIFT 0 +#define MME_SM_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF + +/* MME_DBGMEM_ADD */ +#define MME_DBGMEM_ADD_V_SHIFT 0 +#define MME_DBGMEM_ADD_V_MASK 0xFFFFFFFF + +/* MME_DBGMEM_DATA_WR */ +#define MME_DBGMEM_DATA_WR_V_SHIFT 0 +#define MME_DBGMEM_DATA_WR_V_MASK 0xFFFFFFFF + +/* MME_DBGMEM_DATA_RD */ +#define MME_DBGMEM_DATA_RD_V_SHIFT 0 +#define MME_DBGMEM_DATA_RD_V_MASK 0xFFFFFFFF + +/* MME_DBGMEM_CTRL */ +#define MME_DBGMEM_CTRL_WR_NRD_SHIFT 0 +#define MME_DBGMEM_CTRL_WR_NRD_MASK 0x1 + +/* MME_DBGMEM_RC */ +#define MME_DBGMEM_RC_VALID_SHIFT 0 +#define MME_DBGMEM_RC_VALID_MASK 0x1 +#define MME_DBGMEM_RC_FULL_SHIFT 1 +#define MME_DBGMEM_RC_FULL_MASK 0x2 + +/* MME_LOG_SHADOW */ +#define MME_LOG_SHADOW_MASK_0_SHIFT 0 +#define MME_LOG_SHADOW_MASK_0_MASK 0x7F +#define MME_LOG_SHADOW_MASK_1_SHIFT 8 +#define MME_LOG_SHADOW_MASK_1_MASK 0x7F00 +#define MME_LOG_SHADOW_MASK_2_SHIFT 16 +#define MME_LOG_SHADOW_MASK_2_MASK 0x7F0000 +#define MME_LOG_SHADOW_MASK_3_SHIFT 24 +#define MME_LOG_SHADOW_MASK_3_MASK 0x7F000000 + +/* MME_STORE_MAX_CREDIT */ +#define MME_STORE_MAX_CREDIT_V_SHIFT 0 +#define MME_STORE_MAX_CREDIT_V_MASK 0x3F + +/* MME_AGU */ +#define MME_AGU_SBA_MAX_CREDIT_SHIFT 0 +#define MME_AGU_SBA_MAX_CREDIT_MASK 0x1F +#define MME_AGU_SBB_MAX_CREDIT_SHIFT 8 +#define MME_AGU_SBB_MAX_CREDIT_MASK 0x1F00 +#define MME_AGU_SBC_MAX_CREDIT_SHIFT 16 +#define MME_AGU_SBC_MAX_CREDIT_MASK 0x1F0000 +#define MME_AGU_WBC_MAX_CREDIT_SHIFT 24 +#define MME_AGU_WBC_MAX_CREDIT_MASK 0x3F000000 + +/* MME_SBA */ +#define MME_SBA_MAX_SIZE_SHIFT 0 +#define MME_SBA_MAX_SIZE_MASK 0x3FF +#define MME_SBA_EU_MAX_CREDIT_SHIFT 16 +#define MME_SBA_EU_MAX_CREDIT_MASK 0x1F0000 + +/* MME_SBB */ +#define MME_SBB_MAX_SIZE_SHIFT 0 +#define MME_SBB_MAX_SIZE_MASK 0x3FF +#define MME_SBB_EU_MAX_CREDIT_SHIFT 16 +#define MME_SBB_EU_MAX_CREDIT_MASK 0x1F0000 + +/* MME_SBC */ +#define MME_SBC_MAX_SIZE_SHIFT 0 +#define MME_SBC_MAX_SIZE_MASK 0x3FF +#define MME_SBC_EU_MAX_CREDIT_SHIFT 16 +#define MME_SBC_EU_MAX_CREDIT_MASK 0x1F0000 + +/* MME_WBC */ +#define MME_WBC_MAX_OUTSTANDING_SHIFT 0 +#define MME_WBC_MAX_OUTSTANDING_MASK 0xFFF +#define MME_WBC_DISABLE_FAST_END_PE_SHIFT 12 +#define MME_WBC_DISABLE_FAST_END_PE_MASK 0x1000 +#define MME_WBC_LD_INSERT_BUBBLE_DIS_SHIFT 13 +#define MME_WBC_LD_INSERT_BUBBLE_DIS_MASK 0x2000 + +/* MME_SBA_CONTROL_DATA */ +#define MME_SBA_CONTROL_DATA_ASID_SHIFT 0 +#define MME_SBA_CONTROL_DATA_ASID_MASK 0x3FF +#define MME_SBA_CONTROL_DATA_MMBP_SHIFT 10 +#define MME_SBA_CONTROL_DATA_MMBP_MASK 0x400 + +/* MME_SBB_CONTROL_DATA */ +#define MME_SBB_CONTROL_DATA_ASID_SHIFT 0 +#define MME_SBB_CONTROL_DATA_ASID_MASK 0x3FF +#define MME_SBB_CONTROL_DATA_MMBP_SHIFT 10 +#define MME_SBB_CONTROL_DATA_MMBP_MASK 0x400 + +/* MME_SBC_CONTROL_DATA */ +#define MME_SBC_CONTROL_DATA_ASID_SHIFT 0 +#define MME_SBC_CONTROL_DATA_ASID_MASK 0x3FF +#define MME_SBC_CONTROL_DATA_MMBP_SHIFT 10 +#define MME_SBC_CONTROL_DATA_MMBP_MASK 0x400 + +/* MME_WBC_CONTROL_DATA */ +#define MME_WBC_CONTROL_DATA_ASID_SHIFT 0 +#define MME_WBC_CONTROL_DATA_ASID_MASK 0x3FF +#define MME_WBC_CONTROL_DATA_MMBP_SHIFT 10 +#define MME_WBC_CONTROL_DATA_MMBP_MASK 0x400 + +/* MME_TE */ +#define MME_TE_MAX_CREDIT_SHIFT 0 +#define MME_TE_MAX_CREDIT_MASK 0x1F +#define MME_TE_DESC_MAX_CREDIT_SHIFT 8 +#define MME_TE_DESC_MAX_CREDIT_MASK 0x1F00 + +/* MME_TE2DEC */ +#define MME_TE2DEC_MAX_CREDIT_SHIFT 0 +#define MME_TE2DEC_MAX_CREDIT_MASK 0x1F + +/* MME_REI_STATUS */ +#define MME_REI_STATUS_V_SHIFT 0 +#define MME_REI_STATUS_V_MASK 0xFFFFFFFF + +/* MME_REI_MASK */ +#define MME_REI_MASK_V_SHIFT 0 +#define MME_REI_MASK_V_MASK 0xFFFFFFFF + +/* MME_SEI_STATUS */ +#define MME_SEI_STATUS_V_SHIFT 0 +#define MME_SEI_STATUS_V_MASK 0xFFFFFFFF + +/* MME_SEI_MASK */ +#define MME_SEI_MASK_V_SHIFT 0 +#define MME_SEI_MASK_V_MASK 0xFFFFFFFF + +/* MME_SPI_STATUS */ +#define MME_SPI_STATUS_V_SHIFT 0 +#define MME_SPI_STATUS_V_MASK 0xFFFFFFFF + +/* MME_SPI_MASK */ +#define MME_SPI_MASK_V_SHIFT 0 +#define MME_SPI_MASK_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_STATUS */ +#define MME_SHADOW_0_STATUS_A_SHIFT 0 +#define MME_SHADOW_0_STATUS_A_MASK 0x1 +#define MME_SHADOW_0_STATUS_B_SHIFT 1 +#define MME_SHADOW_0_STATUS_B_MASK 0x2 +#define MME_SHADOW_0_STATUS_CIN_SHIFT 2 +#define MME_SHADOW_0_STATUS_CIN_MASK 0x4 +#define MME_SHADOW_0_STATUS_COUT_SHIFT 3 +#define MME_SHADOW_0_STATUS_COUT_MASK 0x8 +#define MME_SHADOW_0_STATUS_TE_SHIFT 4 +#define MME_SHADOW_0_STATUS_TE_MASK 0x10 +#define MME_SHADOW_0_STATUS_LD_SHIFT 5 +#define MME_SHADOW_0_STATUS_LD_MASK 0x20 +#define MME_SHADOW_0_STATUS_ST_SHIFT 6 +#define MME_SHADOW_0_STATUS_ST_MASK 0x40 + +/* MME_SHADOW_0_A_BASE_ADDR_HIGH */ +#define MME_SHADOW_0_A_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_0_A_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_B_BASE_ADDR_HIGH */ +#define MME_SHADOW_0_B_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_0_B_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_CIN_BASE_ADDR_HIGH */ +#define MME_SHADOW_0_CIN_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_0_CIN_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_COUT_BASE_ADDR_HIGH */ +#define MME_SHADOW_0_COUT_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_0_COUT_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_BIAS_BASE_ADDR_HIGH */ +#define MME_SHADOW_0_BIAS_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_0_BIAS_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_A_BASE_ADDR_LOW */ +#define MME_SHADOW_0_A_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_0_A_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_B_BASE_ADDR_LOW */ +#define MME_SHADOW_0_B_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_0_B_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_CIN_BASE_ADDR_LOW */ +#define MME_SHADOW_0_CIN_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_0_CIN_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_COUT_BASE_ADDR_LOW */ +#define MME_SHADOW_0_COUT_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_0_COUT_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_BIAS_BASE_ADDR_LOW */ +#define MME_SHADOW_0_BIAS_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_0_BIAS_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_HEADER */ +#define MME_SHADOW_0_HEADER_SIGNAL_MASK_SHIFT 0 +#define MME_SHADOW_0_HEADER_SIGNAL_MASK_MASK 0x1F +#define MME_SHADOW_0_HEADER_SIGNAL_EN_SHIFT 5 +#define MME_SHADOW_0_HEADER_SIGNAL_EN_MASK 0x20 +#define MME_SHADOW_0_HEADER_TRANS_A_SHIFT 6 +#define MME_SHADOW_0_HEADER_TRANS_A_MASK 0x40 +#define MME_SHADOW_0_HEADER_LOWER_A_SHIFT 7 +#define MME_SHADOW_0_HEADER_LOWER_A_MASK 0x80 +#define MME_SHADOW_0_HEADER_ACCUM_MASK_SHIFT 8 +#define MME_SHADOW_0_HEADER_ACCUM_MASK_MASK 0xF00 +#define MME_SHADOW_0_HEADER_LOAD_BIAS_SHIFT 12 +#define MME_SHADOW_0_HEADER_LOAD_BIAS_MASK 0x1000 +#define MME_SHADOW_0_HEADER_LOAD_CIN_SHIFT 13 +#define MME_SHADOW_0_HEADER_LOAD_CIN_MASK 0x2000 +#define MME_SHADOW_0_HEADER_STORE_OUT_SHIFT 15 +#define MME_SHADOW_0_HEADER_STORE_OUT_MASK 0x8000 +#define MME_SHADOW_0_HEADER_ACC_LD_INC_DISABLE_SHIFT 16 +#define MME_SHADOW_0_HEADER_ACC_LD_INC_DISABLE_MASK 0x10000 +#define MME_SHADOW_0_HEADER_ADVANCE_A_SHIFT 17 +#define MME_SHADOW_0_HEADER_ADVANCE_A_MASK 0x20000 +#define MME_SHADOW_0_HEADER_ADVANCE_B_SHIFT 18 +#define MME_SHADOW_0_HEADER_ADVANCE_B_MASK 0x40000 +#define MME_SHADOW_0_HEADER_ADVANCE_CIN_SHIFT 19 +#define MME_SHADOW_0_HEADER_ADVANCE_CIN_MASK 0x80000 +#define MME_SHADOW_0_HEADER_ADVANCE_COUT_SHIFT 20 +#define MME_SHADOW_0_HEADER_ADVANCE_COUT_MASK 0x100000 +#define MME_SHADOW_0_HEADER_COMPRESSED_B_SHIFT 21 +#define MME_SHADOW_0_HEADER_COMPRESSED_B_MASK 0x200000 +#define MME_SHADOW_0_HEADER_MASK_CONV_END_SHIFT 22 +#define MME_SHADOW_0_HEADER_MASK_CONV_END_MASK 0x400000 +#define MME_SHADOW_0_HEADER_ACC_ST_INC_DISABLE_SHIFT 23 +#define MME_SHADOW_0_HEADER_ACC_ST_INC_DISABLE_MASK 0x800000 +#define MME_SHADOW_0_HEADER_AB_DATA_TYPE_SHIFT 24 +#define MME_SHADOW_0_HEADER_AB_DATA_TYPE_MASK 0x3000000 +#define MME_SHADOW_0_HEADER_CIN_DATA_TYPE_SHIFT 26 +#define MME_SHADOW_0_HEADER_CIN_DATA_TYPE_MASK 0x1C000000 +#define MME_SHADOW_0_HEADER_COUT_DATA_TYPE_SHIFT 29 +#define MME_SHADOW_0_HEADER_COUT_DATA_TYPE_MASK 0xE0000000 + +/* MME_SHADOW_0_KERNEL_SIZE_MINUS_1 */ +#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_0_SHIFT 0 +#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_0_MASK 0xFF +#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_1_SHIFT 8 +#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_1_MASK 0xFF00 +#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_2_SHIFT 16 +#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_2_MASK 0xFF0000 +#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_3_SHIFT 24 +#define MME_SHADOW_0_KERNEL_SIZE_MINUS_1_DIM_3_MASK 0xFF000000 + +/* MME_SHADOW_0_ASSOCIATED_DIMS */ +#define MME_SHADOW_0_ASSOCIATED_DIMS_A_0_SHIFT 0 +#define MME_SHADOW_0_ASSOCIATED_DIMS_A_0_MASK 0x7 +#define MME_SHADOW_0_ASSOCIATED_DIMS_B_0_SHIFT 3 +#define MME_SHADOW_0_ASSOCIATED_DIMS_B_0_MASK 0x38 +#define MME_SHADOW_0_ASSOCIATED_DIMS_CIN_0_SHIFT 6 +#define MME_SHADOW_0_ASSOCIATED_DIMS_CIN_0_MASK 0x1C0 +#define MME_SHADOW_0_ASSOCIATED_DIMS_COUT_0_SHIFT 9 +#define MME_SHADOW_0_ASSOCIATED_DIMS_COUT_0_MASK 0xE00 +#define MME_SHADOW_0_ASSOCIATED_DIMS_A_1_SHIFT 16 +#define MME_SHADOW_0_ASSOCIATED_DIMS_A_1_MASK 0x70000 +#define MME_SHADOW_0_ASSOCIATED_DIMS_B_1_SHIFT 19 +#define MME_SHADOW_0_ASSOCIATED_DIMS_B_1_MASK 0x380000 +#define MME_SHADOW_0_ASSOCIATED_DIMS_CIN_1_SHIFT 22 +#define MME_SHADOW_0_ASSOCIATED_DIMS_CIN_1_MASK 0x1C00000 +#define MME_SHADOW_0_ASSOCIATED_DIMS_COUT_1_SHIFT 25 +#define MME_SHADOW_0_ASSOCIATED_DIMS_COUT_1_MASK 0xE000000 + +/* MME_SHADOW_0_COUT_SCALE */ +#define MME_SHADOW_0_COUT_SCALE_V_SHIFT 0 +#define MME_SHADOW_0_COUT_SCALE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_CIN_SCALE */ +#define MME_SHADOW_0_CIN_SCALE_V_SHIFT 0 +#define MME_SHADOW_0_CIN_SCALE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_GEMMLOWP_ZP */ +#define MME_SHADOW_0_GEMMLOWP_ZP_ZP_CIN_SHIFT 0 +#define MME_SHADOW_0_GEMMLOWP_ZP_ZP_CIN_MASK 0x1FF +#define MME_SHADOW_0_GEMMLOWP_ZP_ZP_COUT_SHIFT 9 +#define MME_SHADOW_0_GEMMLOWP_ZP_ZP_COUT_MASK 0x3FE00 +#define MME_SHADOW_0_GEMMLOWP_ZP_ZP_B_SHIFT 18 +#define MME_SHADOW_0_GEMMLOWP_ZP_ZP_B_MASK 0x7FC0000 +#define MME_SHADOW_0_GEMMLOWP_ZP_GEMMLOWP_EU_EN_SHIFT 27 +#define MME_SHADOW_0_GEMMLOWP_ZP_GEMMLOWP_EU_EN_MASK 0x8000000 +#define MME_SHADOW_0_GEMMLOWP_ZP_ACCUM_SHIFT 28 +#define MME_SHADOW_0_GEMMLOWP_ZP_ACCUM_MASK 0x10000000 +#define MME_SHADOW_0_GEMMLOWP_ZP_ACCUM_BIAS_SHIFT 29 +#define MME_SHADOW_0_GEMMLOWP_ZP_ACCUM_BIAS_MASK 0x20000000 +#define MME_SHADOW_0_GEMMLOWP_ZP_RELU_EN_SHIFT 30 +#define MME_SHADOW_0_GEMMLOWP_ZP_RELU_EN_MASK 0x40000000 + +/* MME_SHADOW_0_GEMMLOWP_EXPONENT */ +#define MME_SHADOW_0_GEMMLOWP_EXPONENT_EXPONENT_CIN_SHIFT 0 +#define MME_SHADOW_0_GEMMLOWP_EXPONENT_EXPONENT_CIN_MASK 0x3F +#define MME_SHADOW_0_GEMMLOWP_EXPONENT_EXPONENT_COUT_SHIFT 8 +#define MME_SHADOW_0_GEMMLOWP_EXPONENT_EXPONENT_COUT_MASK 0x3F00 +#define MME_SHADOW_0_GEMMLOWP_EXPONENT_MUL_CIN_EN_SHIFT 16 +#define MME_SHADOW_0_GEMMLOWP_EXPONENT_MUL_CIN_EN_MASK 0x10000 +#define MME_SHADOW_0_GEMMLOWP_EXPONENT_MUL_COUT_EN_SHIFT 17 +#define MME_SHADOW_0_GEMMLOWP_EXPONENT_MUL_COUT_EN_MASK 0x20000 + +/* MME_SHADOW_0_A_ROI_BASE_OFFSET */ +#define MME_SHADOW_0_A_ROI_BASE_OFFSET_V_SHIFT 0 +#define MME_SHADOW_0_A_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_A_VALID_ELEMENTS */ +#define MME_SHADOW_0_A_VALID_ELEMENTS_V_SHIFT 0 +#define MME_SHADOW_0_A_VALID_ELEMENTS_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_A_LOOP_STRIDE */ +#define MME_SHADOW_0_A_LOOP_STRIDE_V_SHIFT 0 +#define MME_SHADOW_0_A_LOOP_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_A_ROI_SIZE */ +#define MME_SHADOW_0_A_ROI_SIZE_V_SHIFT 0 +#define MME_SHADOW_0_A_ROI_SIZE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_A_SPATIAL_START_OFFSET */ +#define MME_SHADOW_0_A_SPATIAL_START_OFFSET_V_SHIFT 0 +#define MME_SHADOW_0_A_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_A_SPATIAL_STRIDE */ +#define MME_SHADOW_0_A_SPATIAL_STRIDE_V_SHIFT 0 +#define MME_SHADOW_0_A_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_A_SPATIAL_SIZE_MINUS_1 */ +#define MME_SHADOW_0_A_SPATIAL_SIZE_MINUS_1_V_SHIFT 0 +#define MME_SHADOW_0_A_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_B_ROI_BASE_OFFSET */ +#define MME_SHADOW_0_B_ROI_BASE_OFFSET_V_SHIFT 0 +#define MME_SHADOW_0_B_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_B_VALID_ELEMENTS */ +#define MME_SHADOW_0_B_VALID_ELEMENTS_V_SHIFT 0 +#define MME_SHADOW_0_B_VALID_ELEMENTS_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_B_LOOP_STRIDE */ +#define MME_SHADOW_0_B_LOOP_STRIDE_V_SHIFT 0 +#define MME_SHADOW_0_B_LOOP_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_B_ROI_SIZE */ +#define MME_SHADOW_0_B_ROI_SIZE_V_SHIFT 0 +#define MME_SHADOW_0_B_ROI_SIZE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_B_SPATIAL_START_OFFSET */ +#define MME_SHADOW_0_B_SPATIAL_START_OFFSET_V_SHIFT 0 +#define MME_SHADOW_0_B_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_B_SPATIAL_STRIDE */ +#define MME_SHADOW_0_B_SPATIAL_STRIDE_V_SHIFT 0 +#define MME_SHADOW_0_B_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_B_SPATIAL_SIZE_MINUS_1 */ +#define MME_SHADOW_0_B_SPATIAL_SIZE_MINUS_1_V_SHIFT 0 +#define MME_SHADOW_0_B_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_C_ROI_BASE_OFFSET */ +#define MME_SHADOW_0_C_ROI_BASE_OFFSET_V_SHIFT 0 +#define MME_SHADOW_0_C_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_C_VALID_ELEMENTS */ +#define MME_SHADOW_0_C_VALID_ELEMENTS_V_SHIFT 0 +#define MME_SHADOW_0_C_VALID_ELEMENTS_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_C_LOOP_STRIDE */ +#define MME_SHADOW_0_C_LOOP_STRIDE_V_SHIFT 0 +#define MME_SHADOW_0_C_LOOP_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_C_ROI_SIZE */ +#define MME_SHADOW_0_C_ROI_SIZE_V_SHIFT 0 +#define MME_SHADOW_0_C_ROI_SIZE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_C_SPATIAL_START_OFFSET */ +#define MME_SHADOW_0_C_SPATIAL_START_OFFSET_V_SHIFT 0 +#define MME_SHADOW_0_C_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_C_SPATIAL_STRIDE */ +#define MME_SHADOW_0_C_SPATIAL_STRIDE_V_SHIFT 0 +#define MME_SHADOW_0_C_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_C_SPATIAL_SIZE_MINUS_1 */ +#define MME_SHADOW_0_C_SPATIAL_SIZE_MINUS_1_V_SHIFT 0 +#define MME_SHADOW_0_C_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_SYNC_OBJECT_MESSAGE */ +#define MME_SHADOW_0_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0 +#define MME_SHADOW_0_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF +#define MME_SHADOW_0_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_SHIFT 16 +#define MME_SHADOW_0_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_MASK 0x7FFF0000 +#define MME_SHADOW_0_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 31 +#define MME_SHADOW_0_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0x80000000 + +/* MME_SHADOW_0_E_PADDING_VALUE_A */ +#define MME_SHADOW_0_E_PADDING_VALUE_A_V_SHIFT 0 +#define MME_SHADOW_0_E_PADDING_VALUE_A_V_MASK 0xFFFF + +/* MME_SHADOW_0_E_NUM_ITERATION_MINUS_1 */ +#define MME_SHADOW_0_E_NUM_ITERATION_MINUS_1_V_SHIFT 0 +#define MME_SHADOW_0_E_NUM_ITERATION_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_0_E_BUBBLES_PER_SPLIT */ +#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_A_SHIFT 0 +#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_A_MASK 0xFF +#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_B_SHIFT 8 +#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_B_MASK 0xFF00 +#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_CIN_SHIFT 16 +#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_CIN_MASK 0xFF0000 +#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_ID_SHIFT 24 +#define MME_SHADOW_0_E_BUBBLES_PER_SPLIT_ID_MASK 0xFF000000 + +/* MME_SHADOW_1_STATUS */ +#define MME_SHADOW_1_STATUS_A_SHIFT 0 +#define MME_SHADOW_1_STATUS_A_MASK 0x1 +#define MME_SHADOW_1_STATUS_B_SHIFT 1 +#define MME_SHADOW_1_STATUS_B_MASK 0x2 +#define MME_SHADOW_1_STATUS_CIN_SHIFT 2 +#define MME_SHADOW_1_STATUS_CIN_MASK 0x4 +#define MME_SHADOW_1_STATUS_COUT_SHIFT 3 +#define MME_SHADOW_1_STATUS_COUT_MASK 0x8 +#define MME_SHADOW_1_STATUS_TE_SHIFT 4 +#define MME_SHADOW_1_STATUS_TE_MASK 0x10 +#define MME_SHADOW_1_STATUS_LD_SHIFT 5 +#define MME_SHADOW_1_STATUS_LD_MASK 0x20 +#define MME_SHADOW_1_STATUS_ST_SHIFT 6 +#define MME_SHADOW_1_STATUS_ST_MASK 0x40 + +/* MME_SHADOW_1_A_BASE_ADDR_HIGH */ +#define MME_SHADOW_1_A_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_1_A_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_B_BASE_ADDR_HIGH */ +#define MME_SHADOW_1_B_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_1_B_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_CIN_BASE_ADDR_HIGH */ +#define MME_SHADOW_1_CIN_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_1_CIN_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_COUT_BASE_ADDR_HIGH */ +#define MME_SHADOW_1_COUT_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_1_COUT_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_BIAS_BASE_ADDR_HIGH */ +#define MME_SHADOW_1_BIAS_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_1_BIAS_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_A_BASE_ADDR_LOW */ +#define MME_SHADOW_1_A_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_1_A_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_B_BASE_ADDR_LOW */ +#define MME_SHADOW_1_B_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_1_B_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_CIN_BASE_ADDR_LOW */ +#define MME_SHADOW_1_CIN_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_1_CIN_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_COUT_BASE_ADDR_LOW */ +#define MME_SHADOW_1_COUT_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_1_COUT_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_BIAS_BASE_ADDR_LOW */ +#define MME_SHADOW_1_BIAS_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_1_BIAS_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_HEADER */ +#define MME_SHADOW_1_HEADER_SIGNAL_MASK_SHIFT 0 +#define MME_SHADOW_1_HEADER_SIGNAL_MASK_MASK 0x1F +#define MME_SHADOW_1_HEADER_SIGNAL_EN_SHIFT 5 +#define MME_SHADOW_1_HEADER_SIGNAL_EN_MASK 0x20 +#define MME_SHADOW_1_HEADER_TRANS_A_SHIFT 6 +#define MME_SHADOW_1_HEADER_TRANS_A_MASK 0x40 +#define MME_SHADOW_1_HEADER_LOWER_A_SHIFT 7 +#define MME_SHADOW_1_HEADER_LOWER_A_MASK 0x80 +#define MME_SHADOW_1_HEADER_ACCUM_MASK_SHIFT 8 +#define MME_SHADOW_1_HEADER_ACCUM_MASK_MASK 0xF00 +#define MME_SHADOW_1_HEADER_LOAD_BIAS_SHIFT 12 +#define MME_SHADOW_1_HEADER_LOAD_BIAS_MASK 0x1000 +#define MME_SHADOW_1_HEADER_LOAD_CIN_SHIFT 13 +#define MME_SHADOW_1_HEADER_LOAD_CIN_MASK 0x2000 +#define MME_SHADOW_1_HEADER_STORE_OUT_SHIFT 15 +#define MME_SHADOW_1_HEADER_STORE_OUT_MASK 0x8000 +#define MME_SHADOW_1_HEADER_ACC_LD_INC_DISABLE_SHIFT 16 +#define MME_SHADOW_1_HEADER_ACC_LD_INC_DISABLE_MASK 0x10000 +#define MME_SHADOW_1_HEADER_ADVANCE_A_SHIFT 17 +#define MME_SHADOW_1_HEADER_ADVANCE_A_MASK 0x20000 +#define MME_SHADOW_1_HEADER_ADVANCE_B_SHIFT 18 +#define MME_SHADOW_1_HEADER_ADVANCE_B_MASK 0x40000 +#define MME_SHADOW_1_HEADER_ADVANCE_CIN_SHIFT 19 +#define MME_SHADOW_1_HEADER_ADVANCE_CIN_MASK 0x80000 +#define MME_SHADOW_1_HEADER_ADVANCE_COUT_SHIFT 20 +#define MME_SHADOW_1_HEADER_ADVANCE_COUT_MASK 0x100000 +#define MME_SHADOW_1_HEADER_COMPRESSED_B_SHIFT 21 +#define MME_SHADOW_1_HEADER_COMPRESSED_B_MASK 0x200000 +#define MME_SHADOW_1_HEADER_MASK_CONV_END_SHIFT 22 +#define MME_SHADOW_1_HEADER_MASK_CONV_END_MASK 0x400000 +#define MME_SHADOW_1_HEADER_ACC_ST_INC_DISABLE_SHIFT 23 +#define MME_SHADOW_1_HEADER_ACC_ST_INC_DISABLE_MASK 0x800000 +#define MME_SHADOW_1_HEADER_AB_DATA_TYPE_SHIFT 24 +#define MME_SHADOW_1_HEADER_AB_DATA_TYPE_MASK 0x3000000 +#define MME_SHADOW_1_HEADER_CIN_DATA_TYPE_SHIFT 26 +#define MME_SHADOW_1_HEADER_CIN_DATA_TYPE_MASK 0x1C000000 +#define MME_SHADOW_1_HEADER_COUT_DATA_TYPE_SHIFT 29 +#define MME_SHADOW_1_HEADER_COUT_DATA_TYPE_MASK 0xE0000000 + +/* MME_SHADOW_1_KERNEL_SIZE_MINUS_1 */ +#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_0_SHIFT 0 +#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_0_MASK 0xFF +#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_1_SHIFT 8 +#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_1_MASK 0xFF00 +#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_2_SHIFT 16 +#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_2_MASK 0xFF0000 +#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_3_SHIFT 24 +#define MME_SHADOW_1_KERNEL_SIZE_MINUS_1_DIM_3_MASK 0xFF000000 + +/* MME_SHADOW_1_ASSOCIATED_DIMS */ +#define MME_SHADOW_1_ASSOCIATED_DIMS_A_0_SHIFT 0 +#define MME_SHADOW_1_ASSOCIATED_DIMS_A_0_MASK 0x7 +#define MME_SHADOW_1_ASSOCIATED_DIMS_B_0_SHIFT 3 +#define MME_SHADOW_1_ASSOCIATED_DIMS_B_0_MASK 0x38 +#define MME_SHADOW_1_ASSOCIATED_DIMS_CIN_0_SHIFT 6 +#define MME_SHADOW_1_ASSOCIATED_DIMS_CIN_0_MASK 0x1C0 +#define MME_SHADOW_1_ASSOCIATED_DIMS_COUT_0_SHIFT 9 +#define MME_SHADOW_1_ASSOCIATED_DIMS_COUT_0_MASK 0xE00 +#define MME_SHADOW_1_ASSOCIATED_DIMS_A_1_SHIFT 16 +#define MME_SHADOW_1_ASSOCIATED_DIMS_A_1_MASK 0x70000 +#define MME_SHADOW_1_ASSOCIATED_DIMS_B_1_SHIFT 19 +#define MME_SHADOW_1_ASSOCIATED_DIMS_B_1_MASK 0x380000 +#define MME_SHADOW_1_ASSOCIATED_DIMS_CIN_1_SHIFT 22 +#define MME_SHADOW_1_ASSOCIATED_DIMS_CIN_1_MASK 0x1C00000 +#define MME_SHADOW_1_ASSOCIATED_DIMS_COUT_1_SHIFT 25 +#define MME_SHADOW_1_ASSOCIATED_DIMS_COUT_1_MASK 0xE000000 + +/* MME_SHADOW_1_COUT_SCALE */ +#define MME_SHADOW_1_COUT_SCALE_V_SHIFT 0 +#define MME_SHADOW_1_COUT_SCALE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_CIN_SCALE */ +#define MME_SHADOW_1_CIN_SCALE_V_SHIFT 0 +#define MME_SHADOW_1_CIN_SCALE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_GEMMLOWP_ZP */ +#define MME_SHADOW_1_GEMMLOWP_ZP_ZP_CIN_SHIFT 0 +#define MME_SHADOW_1_GEMMLOWP_ZP_ZP_CIN_MASK 0x1FF +#define MME_SHADOW_1_GEMMLOWP_ZP_ZP_COUT_SHIFT 9 +#define MME_SHADOW_1_GEMMLOWP_ZP_ZP_COUT_MASK 0x3FE00 +#define MME_SHADOW_1_GEMMLOWP_ZP_ZP_B_SHIFT 18 +#define MME_SHADOW_1_GEMMLOWP_ZP_ZP_B_MASK 0x7FC0000 +#define MME_SHADOW_1_GEMMLOWP_ZP_GEMMLOWP_EU_EN_SHIFT 27 +#define MME_SHADOW_1_GEMMLOWP_ZP_GEMMLOWP_EU_EN_MASK 0x8000000 +#define MME_SHADOW_1_GEMMLOWP_ZP_ACCUM_SHIFT 28 +#define MME_SHADOW_1_GEMMLOWP_ZP_ACCUM_MASK 0x10000000 +#define MME_SHADOW_1_GEMMLOWP_ZP_ACCUM_BIAS_SHIFT 29 +#define MME_SHADOW_1_GEMMLOWP_ZP_ACCUM_BIAS_MASK 0x20000000 +#define MME_SHADOW_1_GEMMLOWP_ZP_RELU_EN_SHIFT 30 +#define MME_SHADOW_1_GEMMLOWP_ZP_RELU_EN_MASK 0x40000000 + +/* MME_SHADOW_1_GEMMLOWP_EXPONENT */ +#define MME_SHADOW_1_GEMMLOWP_EXPONENT_EXPONENT_CIN_SHIFT 0 +#define MME_SHADOW_1_GEMMLOWP_EXPONENT_EXPONENT_CIN_MASK 0x3F +#define MME_SHADOW_1_GEMMLOWP_EXPONENT_EXPONENT_COUT_SHIFT 8 +#define MME_SHADOW_1_GEMMLOWP_EXPONENT_EXPONENT_COUT_MASK 0x3F00 +#define MME_SHADOW_1_GEMMLOWP_EXPONENT_MUL_CIN_EN_SHIFT 16 +#define MME_SHADOW_1_GEMMLOWP_EXPONENT_MUL_CIN_EN_MASK 0x10000 +#define MME_SHADOW_1_GEMMLOWP_EXPONENT_MUL_COUT_EN_SHIFT 17 +#define MME_SHADOW_1_GEMMLOWP_EXPONENT_MUL_COUT_EN_MASK 0x20000 + +/* MME_SHADOW_1_A_ROI_BASE_OFFSET */ +#define MME_SHADOW_1_A_ROI_BASE_OFFSET_V_SHIFT 0 +#define MME_SHADOW_1_A_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_A_VALID_ELEMENTS */ +#define MME_SHADOW_1_A_VALID_ELEMENTS_V_SHIFT 0 +#define MME_SHADOW_1_A_VALID_ELEMENTS_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_A_LOOP_STRIDE */ +#define MME_SHADOW_1_A_LOOP_STRIDE_V_SHIFT 0 +#define MME_SHADOW_1_A_LOOP_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_A_ROI_SIZE */ +#define MME_SHADOW_1_A_ROI_SIZE_V_SHIFT 0 +#define MME_SHADOW_1_A_ROI_SIZE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_A_SPATIAL_START_OFFSET */ +#define MME_SHADOW_1_A_SPATIAL_START_OFFSET_V_SHIFT 0 +#define MME_SHADOW_1_A_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_A_SPATIAL_STRIDE */ +#define MME_SHADOW_1_A_SPATIAL_STRIDE_V_SHIFT 0 +#define MME_SHADOW_1_A_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_A_SPATIAL_SIZE_MINUS_1 */ +#define MME_SHADOW_1_A_SPATIAL_SIZE_MINUS_1_V_SHIFT 0 +#define MME_SHADOW_1_A_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_B_ROI_BASE_OFFSET */ +#define MME_SHADOW_1_B_ROI_BASE_OFFSET_V_SHIFT 0 +#define MME_SHADOW_1_B_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_B_VALID_ELEMENTS */ +#define MME_SHADOW_1_B_VALID_ELEMENTS_V_SHIFT 0 +#define MME_SHADOW_1_B_VALID_ELEMENTS_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_B_LOOP_STRIDE */ +#define MME_SHADOW_1_B_LOOP_STRIDE_V_SHIFT 0 +#define MME_SHADOW_1_B_LOOP_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_B_ROI_SIZE */ +#define MME_SHADOW_1_B_ROI_SIZE_V_SHIFT 0 +#define MME_SHADOW_1_B_ROI_SIZE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_B_SPATIAL_START_OFFSET */ +#define MME_SHADOW_1_B_SPATIAL_START_OFFSET_V_SHIFT 0 +#define MME_SHADOW_1_B_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_B_SPATIAL_STRIDE */ +#define MME_SHADOW_1_B_SPATIAL_STRIDE_V_SHIFT 0 +#define MME_SHADOW_1_B_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_B_SPATIAL_SIZE_MINUS_1 */ +#define MME_SHADOW_1_B_SPATIAL_SIZE_MINUS_1_V_SHIFT 0 +#define MME_SHADOW_1_B_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_C_ROI_BASE_OFFSET */ +#define MME_SHADOW_1_C_ROI_BASE_OFFSET_V_SHIFT 0 +#define MME_SHADOW_1_C_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_C_VALID_ELEMENTS */ +#define MME_SHADOW_1_C_VALID_ELEMENTS_V_SHIFT 0 +#define MME_SHADOW_1_C_VALID_ELEMENTS_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_C_LOOP_STRIDE */ +#define MME_SHADOW_1_C_LOOP_STRIDE_V_SHIFT 0 +#define MME_SHADOW_1_C_LOOP_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_C_ROI_SIZE */ +#define MME_SHADOW_1_C_ROI_SIZE_V_SHIFT 0 +#define MME_SHADOW_1_C_ROI_SIZE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_C_SPATIAL_START_OFFSET */ +#define MME_SHADOW_1_C_SPATIAL_START_OFFSET_V_SHIFT 0 +#define MME_SHADOW_1_C_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_C_SPATIAL_STRIDE */ +#define MME_SHADOW_1_C_SPATIAL_STRIDE_V_SHIFT 0 +#define MME_SHADOW_1_C_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_C_SPATIAL_SIZE_MINUS_1 */ +#define MME_SHADOW_1_C_SPATIAL_SIZE_MINUS_1_V_SHIFT 0 +#define MME_SHADOW_1_C_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_SYNC_OBJECT_MESSAGE */ +#define MME_SHADOW_1_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0 +#define MME_SHADOW_1_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF +#define MME_SHADOW_1_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_SHIFT 16 +#define MME_SHADOW_1_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_MASK 0x7FFF0000 +#define MME_SHADOW_1_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 31 +#define MME_SHADOW_1_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0x80000000 + +/* MME_SHADOW_1_E_PADDING_VALUE_A */ +#define MME_SHADOW_1_E_PADDING_VALUE_A_V_SHIFT 0 +#define MME_SHADOW_1_E_PADDING_VALUE_A_V_MASK 0xFFFF + +/* MME_SHADOW_1_E_NUM_ITERATION_MINUS_1 */ +#define MME_SHADOW_1_E_NUM_ITERATION_MINUS_1_V_SHIFT 0 +#define MME_SHADOW_1_E_NUM_ITERATION_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_1_E_BUBBLES_PER_SPLIT */ +#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_A_SHIFT 0 +#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_A_MASK 0xFF +#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_B_SHIFT 8 +#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_B_MASK 0xFF00 +#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_CIN_SHIFT 16 +#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_CIN_MASK 0xFF0000 +#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_ID_SHIFT 24 +#define MME_SHADOW_1_E_BUBBLES_PER_SPLIT_ID_MASK 0xFF000000 + +/* MME_SHADOW_2_STATUS */ +#define MME_SHADOW_2_STATUS_A_SHIFT 0 +#define MME_SHADOW_2_STATUS_A_MASK 0x1 +#define MME_SHADOW_2_STATUS_B_SHIFT 1 +#define MME_SHADOW_2_STATUS_B_MASK 0x2 +#define MME_SHADOW_2_STATUS_CIN_SHIFT 2 +#define MME_SHADOW_2_STATUS_CIN_MASK 0x4 +#define MME_SHADOW_2_STATUS_COUT_SHIFT 3 +#define MME_SHADOW_2_STATUS_COUT_MASK 0x8 +#define MME_SHADOW_2_STATUS_TE_SHIFT 4 +#define MME_SHADOW_2_STATUS_TE_MASK 0x10 +#define MME_SHADOW_2_STATUS_LD_SHIFT 5 +#define MME_SHADOW_2_STATUS_LD_MASK 0x20 +#define MME_SHADOW_2_STATUS_ST_SHIFT 6 +#define MME_SHADOW_2_STATUS_ST_MASK 0x40 + +/* MME_SHADOW_2_A_BASE_ADDR_HIGH */ +#define MME_SHADOW_2_A_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_2_A_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_B_BASE_ADDR_HIGH */ +#define MME_SHADOW_2_B_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_2_B_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_CIN_BASE_ADDR_HIGH */ +#define MME_SHADOW_2_CIN_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_2_CIN_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_COUT_BASE_ADDR_HIGH */ +#define MME_SHADOW_2_COUT_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_2_COUT_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_BIAS_BASE_ADDR_HIGH */ +#define MME_SHADOW_2_BIAS_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_2_BIAS_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_A_BASE_ADDR_LOW */ +#define MME_SHADOW_2_A_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_2_A_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_B_BASE_ADDR_LOW */ +#define MME_SHADOW_2_B_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_2_B_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_CIN_BASE_ADDR_LOW */ +#define MME_SHADOW_2_CIN_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_2_CIN_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_COUT_BASE_ADDR_LOW */ +#define MME_SHADOW_2_COUT_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_2_COUT_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_BIAS_BASE_ADDR_LOW */ +#define MME_SHADOW_2_BIAS_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_2_BIAS_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_HEADER */ +#define MME_SHADOW_2_HEADER_SIGNAL_MASK_SHIFT 0 +#define MME_SHADOW_2_HEADER_SIGNAL_MASK_MASK 0x1F +#define MME_SHADOW_2_HEADER_SIGNAL_EN_SHIFT 5 +#define MME_SHADOW_2_HEADER_SIGNAL_EN_MASK 0x20 +#define MME_SHADOW_2_HEADER_TRANS_A_SHIFT 6 +#define MME_SHADOW_2_HEADER_TRANS_A_MASK 0x40 +#define MME_SHADOW_2_HEADER_LOWER_A_SHIFT 7 +#define MME_SHADOW_2_HEADER_LOWER_A_MASK 0x80 +#define MME_SHADOW_2_HEADER_ACCUM_MASK_SHIFT 8 +#define MME_SHADOW_2_HEADER_ACCUM_MASK_MASK 0xF00 +#define MME_SHADOW_2_HEADER_LOAD_BIAS_SHIFT 12 +#define MME_SHADOW_2_HEADER_LOAD_BIAS_MASK 0x1000 +#define MME_SHADOW_2_HEADER_LOAD_CIN_SHIFT 13 +#define MME_SHADOW_2_HEADER_LOAD_CIN_MASK 0x2000 +#define MME_SHADOW_2_HEADER_STORE_OUT_SHIFT 15 +#define MME_SHADOW_2_HEADER_STORE_OUT_MASK 0x8000 +#define MME_SHADOW_2_HEADER_ACC_LD_INC_DISABLE_SHIFT 16 +#define MME_SHADOW_2_HEADER_ACC_LD_INC_DISABLE_MASK 0x10000 +#define MME_SHADOW_2_HEADER_ADVANCE_A_SHIFT 17 +#define MME_SHADOW_2_HEADER_ADVANCE_A_MASK 0x20000 +#define MME_SHADOW_2_HEADER_ADVANCE_B_SHIFT 18 +#define MME_SHADOW_2_HEADER_ADVANCE_B_MASK 0x40000 +#define MME_SHADOW_2_HEADER_ADVANCE_CIN_SHIFT 19 +#define MME_SHADOW_2_HEADER_ADVANCE_CIN_MASK 0x80000 +#define MME_SHADOW_2_HEADER_ADVANCE_COUT_SHIFT 20 +#define MME_SHADOW_2_HEADER_ADVANCE_COUT_MASK 0x100000 +#define MME_SHADOW_2_HEADER_COMPRESSED_B_SHIFT 21 +#define MME_SHADOW_2_HEADER_COMPRESSED_B_MASK 0x200000 +#define MME_SHADOW_2_HEADER_MASK_CONV_END_SHIFT 22 +#define MME_SHADOW_2_HEADER_MASK_CONV_END_MASK 0x400000 +#define MME_SHADOW_2_HEADER_ACC_ST_INC_DISABLE_SHIFT 23 +#define MME_SHADOW_2_HEADER_ACC_ST_INC_DISABLE_MASK 0x800000 +#define MME_SHADOW_2_HEADER_AB_DATA_TYPE_SHIFT 24 +#define MME_SHADOW_2_HEADER_AB_DATA_TYPE_MASK 0x3000000 +#define MME_SHADOW_2_HEADER_CIN_DATA_TYPE_SHIFT 26 +#define MME_SHADOW_2_HEADER_CIN_DATA_TYPE_MASK 0x1C000000 +#define MME_SHADOW_2_HEADER_COUT_DATA_TYPE_SHIFT 29 +#define MME_SHADOW_2_HEADER_COUT_DATA_TYPE_MASK 0xE0000000 + +/* MME_SHADOW_2_KERNEL_SIZE_MINUS_1 */ +#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_0_SHIFT 0 +#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_0_MASK 0xFF +#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_1_SHIFT 8 +#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_1_MASK 0xFF00 +#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_2_SHIFT 16 +#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_2_MASK 0xFF0000 +#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_3_SHIFT 24 +#define MME_SHADOW_2_KERNEL_SIZE_MINUS_1_DIM_3_MASK 0xFF000000 + +/* MME_SHADOW_2_ASSOCIATED_DIMS */ +#define MME_SHADOW_2_ASSOCIATED_DIMS_A_0_SHIFT 0 +#define MME_SHADOW_2_ASSOCIATED_DIMS_A_0_MASK 0x7 +#define MME_SHADOW_2_ASSOCIATED_DIMS_B_0_SHIFT 3 +#define MME_SHADOW_2_ASSOCIATED_DIMS_B_0_MASK 0x38 +#define MME_SHADOW_2_ASSOCIATED_DIMS_CIN_0_SHIFT 6 +#define MME_SHADOW_2_ASSOCIATED_DIMS_CIN_0_MASK 0x1C0 +#define MME_SHADOW_2_ASSOCIATED_DIMS_COUT_0_SHIFT 9 +#define MME_SHADOW_2_ASSOCIATED_DIMS_COUT_0_MASK 0xE00 +#define MME_SHADOW_2_ASSOCIATED_DIMS_A_1_SHIFT 16 +#define MME_SHADOW_2_ASSOCIATED_DIMS_A_1_MASK 0x70000 +#define MME_SHADOW_2_ASSOCIATED_DIMS_B_1_SHIFT 19 +#define MME_SHADOW_2_ASSOCIATED_DIMS_B_1_MASK 0x380000 +#define MME_SHADOW_2_ASSOCIATED_DIMS_CIN_1_SHIFT 22 +#define MME_SHADOW_2_ASSOCIATED_DIMS_CIN_1_MASK 0x1C00000 +#define MME_SHADOW_2_ASSOCIATED_DIMS_COUT_1_SHIFT 25 +#define MME_SHADOW_2_ASSOCIATED_DIMS_COUT_1_MASK 0xE000000 + +/* MME_SHADOW_2_COUT_SCALE */ +#define MME_SHADOW_2_COUT_SCALE_V_SHIFT 0 +#define MME_SHADOW_2_COUT_SCALE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_CIN_SCALE */ +#define MME_SHADOW_2_CIN_SCALE_V_SHIFT 0 +#define MME_SHADOW_2_CIN_SCALE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_GEMMLOWP_ZP */ +#define MME_SHADOW_2_GEMMLOWP_ZP_ZP_CIN_SHIFT 0 +#define MME_SHADOW_2_GEMMLOWP_ZP_ZP_CIN_MASK 0x1FF +#define MME_SHADOW_2_GEMMLOWP_ZP_ZP_COUT_SHIFT 9 +#define MME_SHADOW_2_GEMMLOWP_ZP_ZP_COUT_MASK 0x3FE00 +#define MME_SHADOW_2_GEMMLOWP_ZP_ZP_B_SHIFT 18 +#define MME_SHADOW_2_GEMMLOWP_ZP_ZP_B_MASK 0x7FC0000 +#define MME_SHADOW_2_GEMMLOWP_ZP_GEMMLOWP_EU_EN_SHIFT 27 +#define MME_SHADOW_2_GEMMLOWP_ZP_GEMMLOWP_EU_EN_MASK 0x8000000 +#define MME_SHADOW_2_GEMMLOWP_ZP_ACCUM_SHIFT 28 +#define MME_SHADOW_2_GEMMLOWP_ZP_ACCUM_MASK 0x10000000 +#define MME_SHADOW_2_GEMMLOWP_ZP_ACCUM_BIAS_SHIFT 29 +#define MME_SHADOW_2_GEMMLOWP_ZP_ACCUM_BIAS_MASK 0x20000000 +#define MME_SHADOW_2_GEMMLOWP_ZP_RELU_EN_SHIFT 30 +#define MME_SHADOW_2_GEMMLOWP_ZP_RELU_EN_MASK 0x40000000 + +/* MME_SHADOW_2_GEMMLOWP_EXPONENT */ +#define MME_SHADOW_2_GEMMLOWP_EXPONENT_EXPONENT_CIN_SHIFT 0 +#define MME_SHADOW_2_GEMMLOWP_EXPONENT_EXPONENT_CIN_MASK 0x3F +#define MME_SHADOW_2_GEMMLOWP_EXPONENT_EXPONENT_COUT_SHIFT 8 +#define MME_SHADOW_2_GEMMLOWP_EXPONENT_EXPONENT_COUT_MASK 0x3F00 +#define MME_SHADOW_2_GEMMLOWP_EXPONENT_MUL_CIN_EN_SHIFT 16 +#define MME_SHADOW_2_GEMMLOWP_EXPONENT_MUL_CIN_EN_MASK 0x10000 +#define MME_SHADOW_2_GEMMLOWP_EXPONENT_MUL_COUT_EN_SHIFT 17 +#define MME_SHADOW_2_GEMMLOWP_EXPONENT_MUL_COUT_EN_MASK 0x20000 + +/* MME_SHADOW_2_A_ROI_BASE_OFFSET */ +#define MME_SHADOW_2_A_ROI_BASE_OFFSET_V_SHIFT 0 +#define MME_SHADOW_2_A_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_A_VALID_ELEMENTS */ +#define MME_SHADOW_2_A_VALID_ELEMENTS_V_SHIFT 0 +#define MME_SHADOW_2_A_VALID_ELEMENTS_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_A_LOOP_STRIDE */ +#define MME_SHADOW_2_A_LOOP_STRIDE_V_SHIFT 0 +#define MME_SHADOW_2_A_LOOP_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_A_ROI_SIZE */ +#define MME_SHADOW_2_A_ROI_SIZE_V_SHIFT 0 +#define MME_SHADOW_2_A_ROI_SIZE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_A_SPATIAL_START_OFFSET */ +#define MME_SHADOW_2_A_SPATIAL_START_OFFSET_V_SHIFT 0 +#define MME_SHADOW_2_A_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_A_SPATIAL_STRIDE */ +#define MME_SHADOW_2_A_SPATIAL_STRIDE_V_SHIFT 0 +#define MME_SHADOW_2_A_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_A_SPATIAL_SIZE_MINUS_1 */ +#define MME_SHADOW_2_A_SPATIAL_SIZE_MINUS_1_V_SHIFT 0 +#define MME_SHADOW_2_A_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_B_ROI_BASE_OFFSET */ +#define MME_SHADOW_2_B_ROI_BASE_OFFSET_V_SHIFT 0 +#define MME_SHADOW_2_B_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_B_VALID_ELEMENTS */ +#define MME_SHADOW_2_B_VALID_ELEMENTS_V_SHIFT 0 +#define MME_SHADOW_2_B_VALID_ELEMENTS_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_B_LOOP_STRIDE */ +#define MME_SHADOW_2_B_LOOP_STRIDE_V_SHIFT 0 +#define MME_SHADOW_2_B_LOOP_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_B_ROI_SIZE */ +#define MME_SHADOW_2_B_ROI_SIZE_V_SHIFT 0 +#define MME_SHADOW_2_B_ROI_SIZE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_B_SPATIAL_START_OFFSET */ +#define MME_SHADOW_2_B_SPATIAL_START_OFFSET_V_SHIFT 0 +#define MME_SHADOW_2_B_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_B_SPATIAL_STRIDE */ +#define MME_SHADOW_2_B_SPATIAL_STRIDE_V_SHIFT 0 +#define MME_SHADOW_2_B_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_B_SPATIAL_SIZE_MINUS_1 */ +#define MME_SHADOW_2_B_SPATIAL_SIZE_MINUS_1_V_SHIFT 0 +#define MME_SHADOW_2_B_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_C_ROI_BASE_OFFSET */ +#define MME_SHADOW_2_C_ROI_BASE_OFFSET_V_SHIFT 0 +#define MME_SHADOW_2_C_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_C_VALID_ELEMENTS */ +#define MME_SHADOW_2_C_VALID_ELEMENTS_V_SHIFT 0 +#define MME_SHADOW_2_C_VALID_ELEMENTS_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_C_LOOP_STRIDE */ +#define MME_SHADOW_2_C_LOOP_STRIDE_V_SHIFT 0 +#define MME_SHADOW_2_C_LOOP_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_C_ROI_SIZE */ +#define MME_SHADOW_2_C_ROI_SIZE_V_SHIFT 0 +#define MME_SHADOW_2_C_ROI_SIZE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_C_SPATIAL_START_OFFSET */ +#define MME_SHADOW_2_C_SPATIAL_START_OFFSET_V_SHIFT 0 +#define MME_SHADOW_2_C_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_C_SPATIAL_STRIDE */ +#define MME_SHADOW_2_C_SPATIAL_STRIDE_V_SHIFT 0 +#define MME_SHADOW_2_C_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_C_SPATIAL_SIZE_MINUS_1 */ +#define MME_SHADOW_2_C_SPATIAL_SIZE_MINUS_1_V_SHIFT 0 +#define MME_SHADOW_2_C_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_SYNC_OBJECT_MESSAGE */ +#define MME_SHADOW_2_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0 +#define MME_SHADOW_2_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF +#define MME_SHADOW_2_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_SHIFT 16 +#define MME_SHADOW_2_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_MASK 0x7FFF0000 +#define MME_SHADOW_2_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 31 +#define MME_SHADOW_2_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0x80000000 + +/* MME_SHADOW_2_E_PADDING_VALUE_A */ +#define MME_SHADOW_2_E_PADDING_VALUE_A_V_SHIFT 0 +#define MME_SHADOW_2_E_PADDING_VALUE_A_V_MASK 0xFFFF + +/* MME_SHADOW_2_E_NUM_ITERATION_MINUS_1 */ +#define MME_SHADOW_2_E_NUM_ITERATION_MINUS_1_V_SHIFT 0 +#define MME_SHADOW_2_E_NUM_ITERATION_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_2_E_BUBBLES_PER_SPLIT */ +#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_A_SHIFT 0 +#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_A_MASK 0xFF +#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_B_SHIFT 8 +#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_B_MASK 0xFF00 +#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_CIN_SHIFT 16 +#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_CIN_MASK 0xFF0000 +#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_ID_SHIFT 24 +#define MME_SHADOW_2_E_BUBBLES_PER_SPLIT_ID_MASK 0xFF000000 + +/* MME_SHADOW_3_STATUS */ +#define MME_SHADOW_3_STATUS_A_SHIFT 0 +#define MME_SHADOW_3_STATUS_A_MASK 0x1 +#define MME_SHADOW_3_STATUS_B_SHIFT 1 +#define MME_SHADOW_3_STATUS_B_MASK 0x2 +#define MME_SHADOW_3_STATUS_CIN_SHIFT 2 +#define MME_SHADOW_3_STATUS_CIN_MASK 0x4 +#define MME_SHADOW_3_STATUS_COUT_SHIFT 3 +#define MME_SHADOW_3_STATUS_COUT_MASK 0x8 +#define MME_SHADOW_3_STATUS_TE_SHIFT 4 +#define MME_SHADOW_3_STATUS_TE_MASK 0x10 +#define MME_SHADOW_3_STATUS_LD_SHIFT 5 +#define MME_SHADOW_3_STATUS_LD_MASK 0x20 +#define MME_SHADOW_3_STATUS_ST_SHIFT 6 +#define MME_SHADOW_3_STATUS_ST_MASK 0x40 + +/* MME_SHADOW_3_A_BASE_ADDR_HIGH */ +#define MME_SHADOW_3_A_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_3_A_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_B_BASE_ADDR_HIGH */ +#define MME_SHADOW_3_B_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_3_B_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_CIN_BASE_ADDR_HIGH */ +#define MME_SHADOW_3_CIN_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_3_CIN_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_COUT_BASE_ADDR_HIGH */ +#define MME_SHADOW_3_COUT_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_3_COUT_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_BIAS_BASE_ADDR_HIGH */ +#define MME_SHADOW_3_BIAS_BASE_ADDR_HIGH_V_SHIFT 0 +#define MME_SHADOW_3_BIAS_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_A_BASE_ADDR_LOW */ +#define MME_SHADOW_3_A_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_3_A_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_B_BASE_ADDR_LOW */ +#define MME_SHADOW_3_B_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_3_B_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_CIN_BASE_ADDR_LOW */ +#define MME_SHADOW_3_CIN_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_3_CIN_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_COUT_BASE_ADDR_LOW */ +#define MME_SHADOW_3_COUT_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_3_COUT_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_BIAS_BASE_ADDR_LOW */ +#define MME_SHADOW_3_BIAS_BASE_ADDR_LOW_V_SHIFT 0 +#define MME_SHADOW_3_BIAS_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_HEADER */ +#define MME_SHADOW_3_HEADER_SIGNAL_MASK_SHIFT 0 +#define MME_SHADOW_3_HEADER_SIGNAL_MASK_MASK 0x1F +#define MME_SHADOW_3_HEADER_SIGNAL_EN_SHIFT 5 +#define MME_SHADOW_3_HEADER_SIGNAL_EN_MASK 0x20 +#define MME_SHADOW_3_HEADER_TRANS_A_SHIFT 6 +#define MME_SHADOW_3_HEADER_TRANS_A_MASK 0x40 +#define MME_SHADOW_3_HEADER_LOWER_A_SHIFT 7 +#define MME_SHADOW_3_HEADER_LOWER_A_MASK 0x80 +#define MME_SHADOW_3_HEADER_ACCUM_MASK_SHIFT 8 +#define MME_SHADOW_3_HEADER_ACCUM_MASK_MASK 0xF00 +#define MME_SHADOW_3_HEADER_LOAD_BIAS_SHIFT 12 +#define MME_SHADOW_3_HEADER_LOAD_BIAS_MASK 0x1000 +#define MME_SHADOW_3_HEADER_LOAD_CIN_SHIFT 13 +#define MME_SHADOW_3_HEADER_LOAD_CIN_MASK 0x2000 +#define MME_SHADOW_3_HEADER_STORE_OUT_SHIFT 15 +#define MME_SHADOW_3_HEADER_STORE_OUT_MASK 0x8000 +#define MME_SHADOW_3_HEADER_ACC_LD_INC_DISABLE_SHIFT 16 +#define MME_SHADOW_3_HEADER_ACC_LD_INC_DISABLE_MASK 0x10000 +#define MME_SHADOW_3_HEADER_ADVANCE_A_SHIFT 17 +#define MME_SHADOW_3_HEADER_ADVANCE_A_MASK 0x20000 +#define MME_SHADOW_3_HEADER_ADVANCE_B_SHIFT 18 +#define MME_SHADOW_3_HEADER_ADVANCE_B_MASK 0x40000 +#define MME_SHADOW_3_HEADER_ADVANCE_CIN_SHIFT 19 +#define MME_SHADOW_3_HEADER_ADVANCE_CIN_MASK 0x80000 +#define MME_SHADOW_3_HEADER_ADVANCE_COUT_SHIFT 20 +#define MME_SHADOW_3_HEADER_ADVANCE_COUT_MASK 0x100000 +#define MME_SHADOW_3_HEADER_COMPRESSED_B_SHIFT 21 +#define MME_SHADOW_3_HEADER_COMPRESSED_B_MASK 0x200000 +#define MME_SHADOW_3_HEADER_MASK_CONV_END_SHIFT 22 +#define MME_SHADOW_3_HEADER_MASK_CONV_END_MASK 0x400000 +#define MME_SHADOW_3_HEADER_ACC_ST_INC_DISABLE_SHIFT 23 +#define MME_SHADOW_3_HEADER_ACC_ST_INC_DISABLE_MASK 0x800000 +#define MME_SHADOW_3_HEADER_AB_DATA_TYPE_SHIFT 24 +#define MME_SHADOW_3_HEADER_AB_DATA_TYPE_MASK 0x3000000 +#define MME_SHADOW_3_HEADER_CIN_DATA_TYPE_SHIFT 26 +#define MME_SHADOW_3_HEADER_CIN_DATA_TYPE_MASK 0x1C000000 +#define MME_SHADOW_3_HEADER_COUT_DATA_TYPE_SHIFT 29 +#define MME_SHADOW_3_HEADER_COUT_DATA_TYPE_MASK 0xE0000000 + +/* MME_SHADOW_3_KERNEL_SIZE_MINUS_1 */ +#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_0_SHIFT 0 +#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_0_MASK 0xFF +#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_1_SHIFT 8 +#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_1_MASK 0xFF00 +#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_2_SHIFT 16 +#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_2_MASK 0xFF0000 +#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_3_SHIFT 24 +#define MME_SHADOW_3_KERNEL_SIZE_MINUS_1_DIM_3_MASK 0xFF000000 + +/* MME_SHADOW_3_ASSOCIATED_DIMS */ +#define MME_SHADOW_3_ASSOCIATED_DIMS_A_0_SHIFT 0 +#define MME_SHADOW_3_ASSOCIATED_DIMS_A_0_MASK 0x7 +#define MME_SHADOW_3_ASSOCIATED_DIMS_B_0_SHIFT 3 +#define MME_SHADOW_3_ASSOCIATED_DIMS_B_0_MASK 0x38 +#define MME_SHADOW_3_ASSOCIATED_DIMS_CIN_0_SHIFT 6 +#define MME_SHADOW_3_ASSOCIATED_DIMS_CIN_0_MASK 0x1C0 +#define MME_SHADOW_3_ASSOCIATED_DIMS_COUT_0_SHIFT 9 +#define MME_SHADOW_3_ASSOCIATED_DIMS_COUT_0_MASK 0xE00 +#define MME_SHADOW_3_ASSOCIATED_DIMS_A_1_SHIFT 16 +#define MME_SHADOW_3_ASSOCIATED_DIMS_A_1_MASK 0x70000 +#define MME_SHADOW_3_ASSOCIATED_DIMS_B_1_SHIFT 19 +#define MME_SHADOW_3_ASSOCIATED_DIMS_B_1_MASK 0x380000 +#define MME_SHADOW_3_ASSOCIATED_DIMS_CIN_1_SHIFT 22 +#define MME_SHADOW_3_ASSOCIATED_DIMS_CIN_1_MASK 0x1C00000 +#define MME_SHADOW_3_ASSOCIATED_DIMS_COUT_1_SHIFT 25 +#define MME_SHADOW_3_ASSOCIATED_DIMS_COUT_1_MASK 0xE000000 + +/* MME_SHADOW_3_COUT_SCALE */ +#define MME_SHADOW_3_COUT_SCALE_V_SHIFT 0 +#define MME_SHADOW_3_COUT_SCALE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_CIN_SCALE */ +#define MME_SHADOW_3_CIN_SCALE_V_SHIFT 0 +#define MME_SHADOW_3_CIN_SCALE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_GEMMLOWP_ZP */ +#define MME_SHADOW_3_GEMMLOWP_ZP_ZP_CIN_SHIFT 0 +#define MME_SHADOW_3_GEMMLOWP_ZP_ZP_CIN_MASK 0x1FF +#define MME_SHADOW_3_GEMMLOWP_ZP_ZP_COUT_SHIFT 9 +#define MME_SHADOW_3_GEMMLOWP_ZP_ZP_COUT_MASK 0x3FE00 +#define MME_SHADOW_3_GEMMLOWP_ZP_ZP_B_SHIFT 18 +#define MME_SHADOW_3_GEMMLOWP_ZP_ZP_B_MASK 0x7FC0000 +#define MME_SHADOW_3_GEMMLOWP_ZP_GEMMLOWP_EU_EN_SHIFT 27 +#define MME_SHADOW_3_GEMMLOWP_ZP_GEMMLOWP_EU_EN_MASK 0x8000000 +#define MME_SHADOW_3_GEMMLOWP_ZP_ACCUM_SHIFT 28 +#define MME_SHADOW_3_GEMMLOWP_ZP_ACCUM_MASK 0x10000000 +#define MME_SHADOW_3_GEMMLOWP_ZP_ACCUM_BIAS_SHIFT 29 +#define MME_SHADOW_3_GEMMLOWP_ZP_ACCUM_BIAS_MASK 0x20000000 +#define MME_SHADOW_3_GEMMLOWP_ZP_RELU_EN_SHIFT 30 +#define MME_SHADOW_3_GEMMLOWP_ZP_RELU_EN_MASK 0x40000000 + +/* MME_SHADOW_3_GEMMLOWP_EXPONENT */ +#define MME_SHADOW_3_GEMMLOWP_EXPONENT_EXPONENT_CIN_SHIFT 0 +#define MME_SHADOW_3_GEMMLOWP_EXPONENT_EXPONENT_CIN_MASK 0x3F +#define MME_SHADOW_3_GEMMLOWP_EXPONENT_EXPONENT_COUT_SHIFT 8 +#define MME_SHADOW_3_GEMMLOWP_EXPONENT_EXPONENT_COUT_MASK 0x3F00 +#define MME_SHADOW_3_GEMMLOWP_EXPONENT_MUL_CIN_EN_SHIFT 16 +#define MME_SHADOW_3_GEMMLOWP_EXPONENT_MUL_CIN_EN_MASK 0x10000 +#define MME_SHADOW_3_GEMMLOWP_EXPONENT_MUL_COUT_EN_SHIFT 17 +#define MME_SHADOW_3_GEMMLOWP_EXPONENT_MUL_COUT_EN_MASK 0x20000 + +/* MME_SHADOW_3_A_ROI_BASE_OFFSET */ +#define MME_SHADOW_3_A_ROI_BASE_OFFSET_V_SHIFT 0 +#define MME_SHADOW_3_A_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_A_VALID_ELEMENTS */ +#define MME_SHADOW_3_A_VALID_ELEMENTS_V_SHIFT 0 +#define MME_SHADOW_3_A_VALID_ELEMENTS_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_A_LOOP_STRIDE */ +#define MME_SHADOW_3_A_LOOP_STRIDE_V_SHIFT 0 +#define MME_SHADOW_3_A_LOOP_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_A_ROI_SIZE */ +#define MME_SHADOW_3_A_ROI_SIZE_V_SHIFT 0 +#define MME_SHADOW_3_A_ROI_SIZE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_A_SPATIAL_START_OFFSET */ +#define MME_SHADOW_3_A_SPATIAL_START_OFFSET_V_SHIFT 0 +#define MME_SHADOW_3_A_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_A_SPATIAL_STRIDE */ +#define MME_SHADOW_3_A_SPATIAL_STRIDE_V_SHIFT 0 +#define MME_SHADOW_3_A_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_A_SPATIAL_SIZE_MINUS_1 */ +#define MME_SHADOW_3_A_SPATIAL_SIZE_MINUS_1_V_SHIFT 0 +#define MME_SHADOW_3_A_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_B_ROI_BASE_OFFSET */ +#define MME_SHADOW_3_B_ROI_BASE_OFFSET_V_SHIFT 0 +#define MME_SHADOW_3_B_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_B_VALID_ELEMENTS */ +#define MME_SHADOW_3_B_VALID_ELEMENTS_V_SHIFT 0 +#define MME_SHADOW_3_B_VALID_ELEMENTS_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_B_LOOP_STRIDE */ +#define MME_SHADOW_3_B_LOOP_STRIDE_V_SHIFT 0 +#define MME_SHADOW_3_B_LOOP_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_B_ROI_SIZE */ +#define MME_SHADOW_3_B_ROI_SIZE_V_SHIFT 0 +#define MME_SHADOW_3_B_ROI_SIZE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_B_SPATIAL_START_OFFSET */ +#define MME_SHADOW_3_B_SPATIAL_START_OFFSET_V_SHIFT 0 +#define MME_SHADOW_3_B_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_B_SPATIAL_STRIDE */ +#define MME_SHADOW_3_B_SPATIAL_STRIDE_V_SHIFT 0 +#define MME_SHADOW_3_B_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_B_SPATIAL_SIZE_MINUS_1 */ +#define MME_SHADOW_3_B_SPATIAL_SIZE_MINUS_1_V_SHIFT 0 +#define MME_SHADOW_3_B_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_C_ROI_BASE_OFFSET */ +#define MME_SHADOW_3_C_ROI_BASE_OFFSET_V_SHIFT 0 +#define MME_SHADOW_3_C_ROI_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_C_VALID_ELEMENTS */ +#define MME_SHADOW_3_C_VALID_ELEMENTS_V_SHIFT 0 +#define MME_SHADOW_3_C_VALID_ELEMENTS_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_C_LOOP_STRIDE */ +#define MME_SHADOW_3_C_LOOP_STRIDE_V_SHIFT 0 +#define MME_SHADOW_3_C_LOOP_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_C_ROI_SIZE */ +#define MME_SHADOW_3_C_ROI_SIZE_V_SHIFT 0 +#define MME_SHADOW_3_C_ROI_SIZE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_C_SPATIAL_START_OFFSET */ +#define MME_SHADOW_3_C_SPATIAL_START_OFFSET_V_SHIFT 0 +#define MME_SHADOW_3_C_SPATIAL_START_OFFSET_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_C_SPATIAL_STRIDE */ +#define MME_SHADOW_3_C_SPATIAL_STRIDE_V_SHIFT 0 +#define MME_SHADOW_3_C_SPATIAL_STRIDE_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_C_SPATIAL_SIZE_MINUS_1 */ +#define MME_SHADOW_3_C_SPATIAL_SIZE_MINUS_1_V_SHIFT 0 +#define MME_SHADOW_3_C_SPATIAL_SIZE_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_SYNC_OBJECT_MESSAGE */ +#define MME_SHADOW_3_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0 +#define MME_SHADOW_3_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF +#define MME_SHADOW_3_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_SHIFT 16 +#define MME_SHADOW_3_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_MASK 0x7FFF0000 +#define MME_SHADOW_3_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 31 +#define MME_SHADOW_3_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0x80000000 + +/* MME_SHADOW_3_E_PADDING_VALUE_A */ +#define MME_SHADOW_3_E_PADDING_VALUE_A_V_SHIFT 0 +#define MME_SHADOW_3_E_PADDING_VALUE_A_V_MASK 0xFFFF + +/* MME_SHADOW_3_E_NUM_ITERATION_MINUS_1 */ +#define MME_SHADOW_3_E_NUM_ITERATION_MINUS_1_V_SHIFT 0 +#define MME_SHADOW_3_E_NUM_ITERATION_MINUS_1_V_MASK 0xFFFFFFFF + +/* MME_SHADOW_3_E_BUBBLES_PER_SPLIT */ +#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_A_SHIFT 0 +#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_A_MASK 0xFF +#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_B_SHIFT 8 +#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_B_MASK 0xFF00 +#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_CIN_SHIFT 16 +#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_CIN_MASK 0xFF0000 +#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_ID_SHIFT 24 +#define MME_SHADOW_3_E_BUBBLES_PER_SPLIT_ID_MASK 0xFF000000 + +#endif /* ASIC_REG_MME_MASKS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_masks.h new file mode 100644 index 000000000000..d4bfa58dce19 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_masks.h @@ -0,0 +1,465 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MME_QM_MASKS_H_ +#define ASIC_REG_MME_QM_MASKS_H_ + +/* + ***************************************** + * MME_QM (Prototype: QMAN) + ***************************************** + */ + +/* MME_QM_GLBL_CFG0 */ +#define MME_QM_GLBL_CFG0_PQF_EN_SHIFT 0 +#define MME_QM_GLBL_CFG0_PQF_EN_MASK 0x1 +#define MME_QM_GLBL_CFG0_CQF_EN_SHIFT 1 +#define MME_QM_GLBL_CFG0_CQF_EN_MASK 0x2 +#define MME_QM_GLBL_CFG0_CP_EN_SHIFT 2 +#define MME_QM_GLBL_CFG0_CP_EN_MASK 0x4 +#define MME_QM_GLBL_CFG0_DMA_EN_SHIFT 3 +#define MME_QM_GLBL_CFG0_DMA_EN_MASK 0x8 + +/* MME_QM_GLBL_CFG1 */ +#define MME_QM_GLBL_CFG1_PQF_STOP_SHIFT 0 +#define MME_QM_GLBL_CFG1_PQF_STOP_MASK 0x1 +#define MME_QM_GLBL_CFG1_CQF_STOP_SHIFT 1 +#define MME_QM_GLBL_CFG1_CQF_STOP_MASK 0x2 +#define MME_QM_GLBL_CFG1_CP_STOP_SHIFT 2 +#define MME_QM_GLBL_CFG1_CP_STOP_MASK 0x4 +#define MME_QM_GLBL_CFG1_DMA_STOP_SHIFT 3 +#define MME_QM_GLBL_CFG1_DMA_STOP_MASK 0x8 +#define MME_QM_GLBL_CFG1_PQF_FLUSH_SHIFT 8 +#define MME_QM_GLBL_CFG1_PQF_FLUSH_MASK 0x100 +#define MME_QM_GLBL_CFG1_CQF_FLUSH_SHIFT 9 +#define MME_QM_GLBL_CFG1_CQF_FLUSH_MASK 0x200 +#define MME_QM_GLBL_CFG1_CP_FLUSH_SHIFT 10 +#define MME_QM_GLBL_CFG1_CP_FLUSH_MASK 0x400 +#define MME_QM_GLBL_CFG1_DMA_FLUSH_SHIFT 11 +#define MME_QM_GLBL_CFG1_DMA_FLUSH_MASK 0x800 + +/* MME_QM_GLBL_PROT */ +#define MME_QM_GLBL_PROT_PQF_PROT_SHIFT 0 +#define MME_QM_GLBL_PROT_PQF_PROT_MASK 0x1 +#define MME_QM_GLBL_PROT_CQF_PROT_SHIFT 1 +#define MME_QM_GLBL_PROT_CQF_PROT_MASK 0x2 +#define MME_QM_GLBL_PROT_CP_PROT_SHIFT 2 +#define MME_QM_GLBL_PROT_CP_PROT_MASK 0x4 +#define MME_QM_GLBL_PROT_DMA_PROT_SHIFT 3 +#define MME_QM_GLBL_PROT_DMA_PROT_MASK 0x8 +#define MME_QM_GLBL_PROT_PQF_ERR_PROT_SHIFT 4 +#define MME_QM_GLBL_PROT_PQF_ERR_PROT_MASK 0x10 +#define MME_QM_GLBL_PROT_CQF_ERR_PROT_SHIFT 5 +#define MME_QM_GLBL_PROT_CQF_ERR_PROT_MASK 0x20 +#define MME_QM_GLBL_PROT_CP_ERR_PROT_SHIFT 6 +#define MME_QM_GLBL_PROT_CP_ERR_PROT_MASK 0x40 +#define MME_QM_GLBL_PROT_DMA_ERR_PROT_SHIFT 7 +#define MME_QM_GLBL_PROT_DMA_ERR_PROT_MASK 0x80 + +/* MME_QM_GLBL_ERR_CFG */ +#define MME_QM_GLBL_ERR_CFG_PQF_ERR_INT_EN_SHIFT 0 +#define MME_QM_GLBL_ERR_CFG_PQF_ERR_INT_EN_MASK 0x1 +#define MME_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT 1 +#define MME_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK 0x2 +#define MME_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT 2 +#define MME_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK 0x4 +#define MME_QM_GLBL_ERR_CFG_CQF_ERR_INT_EN_SHIFT 3 +#define MME_QM_GLBL_ERR_CFG_CQF_ERR_INT_EN_MASK 0x8 +#define MME_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT 4 +#define MME_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK 0x10 +#define MME_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT 5 +#define MME_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK 0x20 +#define MME_QM_GLBL_ERR_CFG_CP_ERR_INT_EN_SHIFT 6 +#define MME_QM_GLBL_ERR_CFG_CP_ERR_INT_EN_MASK 0x40 +#define MME_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT 7 +#define MME_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK 0x80 +#define MME_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT 8 +#define MME_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK 0x100 +#define MME_QM_GLBL_ERR_CFG_DMA_ERR_INT_EN_SHIFT 9 +#define MME_QM_GLBL_ERR_CFG_DMA_ERR_INT_EN_MASK 0x200 +#define MME_QM_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT 10 +#define MME_QM_GLBL_ERR_CFG_DMA_ERR_MSG_EN_MASK 0x400 +#define MME_QM_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT 11 +#define MME_QM_GLBL_ERR_CFG_DMA_STOP_ON_ERR_MASK 0x800 + +/* MME_QM_GLBL_ERR_ADDR_LO */ +#define MME_QM_GLBL_ERR_ADDR_LO_VAL_SHIFT 0 +#define MME_QM_GLBL_ERR_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* MME_QM_GLBL_ERR_ADDR_HI */ +#define MME_QM_GLBL_ERR_ADDR_HI_VAL_SHIFT 0 +#define MME_QM_GLBL_ERR_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* MME_QM_GLBL_ERR_WDATA */ +#define MME_QM_GLBL_ERR_WDATA_VAL_SHIFT 0 +#define MME_QM_GLBL_ERR_WDATA_VAL_MASK 0xFFFFFFFF + +/* MME_QM_GLBL_SECURE_PROPS */ +#define MME_QM_GLBL_SECURE_PROPS_ASID_SHIFT 0 +#define MME_QM_GLBL_SECURE_PROPS_ASID_MASK 0x3FF +#define MME_QM_GLBL_SECURE_PROPS_MMBP_SHIFT 10 +#define MME_QM_GLBL_SECURE_PROPS_MMBP_MASK 0x400 + +/* MME_QM_GLBL_NON_SECURE_PROPS */ +#define MME_QM_GLBL_NON_SECURE_PROPS_ASID_SHIFT 0 +#define MME_QM_GLBL_NON_SECURE_PROPS_ASID_MASK 0x3FF +#define MME_QM_GLBL_NON_SECURE_PROPS_MMBP_SHIFT 10 +#define MME_QM_GLBL_NON_SECURE_PROPS_MMBP_MASK 0x400 + +/* MME_QM_GLBL_STS0 */ +#define MME_QM_GLBL_STS0_PQF_IDLE_SHIFT 0 +#define MME_QM_GLBL_STS0_PQF_IDLE_MASK 0x1 +#define MME_QM_GLBL_STS0_CQF_IDLE_SHIFT 1 +#define MME_QM_GLBL_STS0_CQF_IDLE_MASK 0x2 +#define MME_QM_GLBL_STS0_CP_IDLE_SHIFT 2 +#define MME_QM_GLBL_STS0_CP_IDLE_MASK 0x4 +#define MME_QM_GLBL_STS0_DMA_IDLE_SHIFT 3 +#define MME_QM_GLBL_STS0_DMA_IDLE_MASK 0x8 +#define MME_QM_GLBL_STS0_PQF_IS_STOP_SHIFT 4 +#define MME_QM_GLBL_STS0_PQF_IS_STOP_MASK 0x10 +#define MME_QM_GLBL_STS0_CQF_IS_STOP_SHIFT 5 +#define MME_QM_GLBL_STS0_CQF_IS_STOP_MASK 0x20 +#define MME_QM_GLBL_STS0_CP_IS_STOP_SHIFT 6 +#define MME_QM_GLBL_STS0_CP_IS_STOP_MASK 0x40 +#define MME_QM_GLBL_STS0_DMA_IS_STOP_SHIFT 7 +#define MME_QM_GLBL_STS0_DMA_IS_STOP_MASK 0x80 + +/* MME_QM_GLBL_STS1 */ +#define MME_QM_GLBL_STS1_PQF_RD_ERR_SHIFT 0 +#define MME_QM_GLBL_STS1_PQF_RD_ERR_MASK 0x1 +#define MME_QM_GLBL_STS1_CQF_RD_ERR_SHIFT 1 +#define MME_QM_GLBL_STS1_CQF_RD_ERR_MASK 0x2 +#define MME_QM_GLBL_STS1_CP_RD_ERR_SHIFT 2 +#define MME_QM_GLBL_STS1_CP_RD_ERR_MASK 0x4 +#define MME_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_SHIFT 3 +#define MME_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK 0x8 +#define MME_QM_GLBL_STS1_CP_STOP_OP_SHIFT 4 +#define MME_QM_GLBL_STS1_CP_STOP_OP_MASK 0x10 +#define MME_QM_GLBL_STS1_CP_MSG_WR_ERR_SHIFT 5 +#define MME_QM_GLBL_STS1_CP_MSG_WR_ERR_MASK 0x20 +#define MME_QM_GLBL_STS1_DMA_RD_ERR_SHIFT 8 +#define MME_QM_GLBL_STS1_DMA_RD_ERR_MASK 0x100 +#define MME_QM_GLBL_STS1_DMA_WR_ERR_SHIFT 9 +#define MME_QM_GLBL_STS1_DMA_WR_ERR_MASK 0x200 +#define MME_QM_GLBL_STS1_DMA_RD_MSG_ERR_SHIFT 10 +#define MME_QM_GLBL_STS1_DMA_RD_MSG_ERR_MASK 0x400 +#define MME_QM_GLBL_STS1_DMA_WR_MSG_ERR_SHIFT 11 +#define MME_QM_GLBL_STS1_DMA_WR_MSG_ERR_MASK 0x800 + +/* MME_QM_PQ_BASE_LO */ +#define MME_QM_PQ_BASE_LO_VAL_SHIFT 0 +#define MME_QM_PQ_BASE_LO_VAL_MASK 0xFFFFFFFF + +/* MME_QM_PQ_BASE_HI */ +#define MME_QM_PQ_BASE_HI_VAL_SHIFT 0 +#define MME_QM_PQ_BASE_HI_VAL_MASK 0xFFFFFFFF + +/* MME_QM_PQ_SIZE */ +#define MME_QM_PQ_SIZE_VAL_SHIFT 0 +#define MME_QM_PQ_SIZE_VAL_MASK 0xFFFFFFFF + +/* MME_QM_PQ_PI */ +#define MME_QM_PQ_PI_VAL_SHIFT 0 +#define MME_QM_PQ_PI_VAL_MASK 0xFFFFFFFF + +/* MME_QM_PQ_CI */ +#define MME_QM_PQ_CI_VAL_SHIFT 0 +#define MME_QM_PQ_CI_VAL_MASK 0xFFFFFFFF + +/* MME_QM_PQ_CFG0 */ +#define MME_QM_PQ_CFG0_RESERVED_SHIFT 0 +#define MME_QM_PQ_CFG0_RESERVED_MASK 0x1 + +/* MME_QM_PQ_CFG1 */ +#define MME_QM_PQ_CFG1_CREDIT_LIM_SHIFT 0 +#define MME_QM_PQ_CFG1_CREDIT_LIM_MASK 0xFFFF +#define MME_QM_PQ_CFG1_MAX_INFLIGHT_SHIFT 16 +#define MME_QM_PQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000 + +/* MME_QM_PQ_ARUSER */ +#define MME_QM_PQ_ARUSER_NOSNOOP_SHIFT 0 +#define MME_QM_PQ_ARUSER_NOSNOOP_MASK 0x1 +#define MME_QM_PQ_ARUSER_WORD_SHIFT 1 +#define MME_QM_PQ_ARUSER_WORD_MASK 0x2 + +/* MME_QM_PQ_PUSH0 */ +#define MME_QM_PQ_PUSH0_PTR_LO_SHIFT 0 +#define MME_QM_PQ_PUSH0_PTR_LO_MASK 0xFFFFFFFF + +/* MME_QM_PQ_PUSH1 */ +#define MME_QM_PQ_PUSH1_PTR_HI_SHIFT 0 +#define MME_QM_PQ_PUSH1_PTR_HI_MASK 0xFFFFFFFF + +/* MME_QM_PQ_PUSH2 */ +#define MME_QM_PQ_PUSH2_TSIZE_SHIFT 0 +#define MME_QM_PQ_PUSH2_TSIZE_MASK 0xFFFFFFFF + +/* MME_QM_PQ_PUSH3 */ +#define MME_QM_PQ_PUSH3_RPT_SHIFT 0 +#define MME_QM_PQ_PUSH3_RPT_MASK 0xFFFF +#define MME_QM_PQ_PUSH3_CTL_SHIFT 16 +#define MME_QM_PQ_PUSH3_CTL_MASK 0xFFFF0000 + +/* MME_QM_PQ_STS0 */ +#define MME_QM_PQ_STS0_PQ_CREDIT_CNT_SHIFT 0 +#define MME_QM_PQ_STS0_PQ_CREDIT_CNT_MASK 0xFFFF +#define MME_QM_PQ_STS0_PQ_FREE_CNT_SHIFT 16 +#define MME_QM_PQ_STS0_PQ_FREE_CNT_MASK 0xFFFF0000 + +/* MME_QM_PQ_STS1 */ +#define MME_QM_PQ_STS1_PQ_INFLIGHT_CNT_SHIFT 0 +#define MME_QM_PQ_STS1_PQ_INFLIGHT_CNT_MASK 0xFFFF +#define MME_QM_PQ_STS1_PQ_BUF_EMPTY_SHIFT 30 +#define MME_QM_PQ_STS1_PQ_BUF_EMPTY_MASK 0x40000000 +#define MME_QM_PQ_STS1_PQ_BUSY_SHIFT 31 +#define MME_QM_PQ_STS1_PQ_BUSY_MASK 0x80000000 + +/* MME_QM_PQ_RD_RATE_LIM_EN */ +#define MME_QM_PQ_RD_RATE_LIM_EN_VAL_SHIFT 0 +#define MME_QM_PQ_RD_RATE_LIM_EN_VAL_MASK 0x1 + +/* MME_QM_PQ_RD_RATE_LIM_RST_TOKEN */ +#define MME_QM_PQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0 +#define MME_QM_PQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF + +/* MME_QM_PQ_RD_RATE_LIM_SAT */ +#define MME_QM_PQ_RD_RATE_LIM_SAT_VAL_SHIFT 0 +#define MME_QM_PQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF + +/* MME_QM_PQ_RD_RATE_LIM_TOUT */ +#define MME_QM_PQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0 +#define MME_QM_PQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF + +/* MME_QM_CQ_CFG0 */ +#define MME_QM_CQ_CFG0_RESERVED_SHIFT 0 +#define MME_QM_CQ_CFG0_RESERVED_MASK 0x1 + +/* MME_QM_CQ_CFG1 */ +#define MME_QM_CQ_CFG1_CREDIT_LIM_SHIFT 0 +#define MME_QM_CQ_CFG1_CREDIT_LIM_MASK 0xFFFF +#define MME_QM_CQ_CFG1_MAX_INFLIGHT_SHIFT 16 +#define MME_QM_CQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000 + +/* MME_QM_CQ_ARUSER */ +#define MME_QM_CQ_ARUSER_NOSNOOP_SHIFT 0 +#define MME_QM_CQ_ARUSER_NOSNOOP_MASK 0x1 +#define MME_QM_CQ_ARUSER_WORD_SHIFT 1 +#define MME_QM_CQ_ARUSER_WORD_MASK 0x2 + +/* MME_QM_CQ_PTR_LO */ +#define MME_QM_CQ_PTR_LO_VAL_SHIFT 0 +#define MME_QM_CQ_PTR_LO_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CQ_PTR_HI */ +#define MME_QM_CQ_PTR_HI_VAL_SHIFT 0 +#define MME_QM_CQ_PTR_HI_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CQ_TSIZE */ +#define MME_QM_CQ_TSIZE_VAL_SHIFT 0 +#define MME_QM_CQ_TSIZE_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CQ_CTL */ +#define MME_QM_CQ_CTL_RPT_SHIFT 0 +#define MME_QM_CQ_CTL_RPT_MASK 0xFFFF +#define MME_QM_CQ_CTL_CTL_SHIFT 16 +#define MME_QM_CQ_CTL_CTL_MASK 0xFFFF0000 + +/* MME_QM_CQ_PTR_LO_STS */ +#define MME_QM_CQ_PTR_LO_STS_VAL_SHIFT 0 +#define MME_QM_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CQ_PTR_HI_STS */ +#define MME_QM_CQ_PTR_HI_STS_VAL_SHIFT 0 +#define MME_QM_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CQ_TSIZE_STS */ +#define MME_QM_CQ_TSIZE_STS_VAL_SHIFT 0 +#define MME_QM_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CQ_CTL_STS */ +#define MME_QM_CQ_CTL_STS_RPT_SHIFT 0 +#define MME_QM_CQ_CTL_STS_RPT_MASK 0xFFFF +#define MME_QM_CQ_CTL_STS_CTL_SHIFT 16 +#define MME_QM_CQ_CTL_STS_CTL_MASK 0xFFFF0000 + +/* MME_QM_CQ_STS0 */ +#define MME_QM_CQ_STS0_CQ_CREDIT_CNT_SHIFT 0 +#define MME_QM_CQ_STS0_CQ_CREDIT_CNT_MASK 0xFFFF +#define MME_QM_CQ_STS0_CQ_FREE_CNT_SHIFT 16 +#define MME_QM_CQ_STS0_CQ_FREE_CNT_MASK 0xFFFF0000 + +/* MME_QM_CQ_STS1 */ +#define MME_QM_CQ_STS1_CQ_INFLIGHT_CNT_SHIFT 0 +#define MME_QM_CQ_STS1_CQ_INFLIGHT_CNT_MASK 0xFFFF +#define MME_QM_CQ_STS1_CQ_BUF_EMPTY_SHIFT 30 +#define MME_QM_CQ_STS1_CQ_BUF_EMPTY_MASK 0x40000000 +#define MME_QM_CQ_STS1_CQ_BUSY_SHIFT 31 +#define MME_QM_CQ_STS1_CQ_BUSY_MASK 0x80000000 + +/* MME_QM_CQ_RD_RATE_LIM_EN */ +#define MME_QM_CQ_RD_RATE_LIM_EN_VAL_SHIFT 0 +#define MME_QM_CQ_RD_RATE_LIM_EN_VAL_MASK 0x1 + +/* MME_QM_CQ_RD_RATE_LIM_RST_TOKEN */ +#define MME_QM_CQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0 +#define MME_QM_CQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF + +/* MME_QM_CQ_RD_RATE_LIM_SAT */ +#define MME_QM_CQ_RD_RATE_LIM_SAT_VAL_SHIFT 0 +#define MME_QM_CQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF + +/* MME_QM_CQ_RD_RATE_LIM_TOUT */ +#define MME_QM_CQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0 +#define MME_QM_CQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF + +/* MME_QM_CQ_IFIFO_CNT */ +#define MME_QM_CQ_IFIFO_CNT_VAL_SHIFT 0 +#define MME_QM_CQ_IFIFO_CNT_VAL_MASK 0x3 + +/* MME_QM_CP_MSG_BASE0_ADDR_LO */ +#define MME_QM_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT 0 +#define MME_QM_CP_MSG_BASE0_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CP_MSG_BASE0_ADDR_HI */ +#define MME_QM_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT 0 +#define MME_QM_CP_MSG_BASE0_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CP_MSG_BASE1_ADDR_LO */ +#define MME_QM_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT 0 +#define MME_QM_CP_MSG_BASE1_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CP_MSG_BASE1_ADDR_HI */ +#define MME_QM_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT 0 +#define MME_QM_CP_MSG_BASE1_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CP_MSG_BASE2_ADDR_LO */ +#define MME_QM_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT 0 +#define MME_QM_CP_MSG_BASE2_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CP_MSG_BASE2_ADDR_HI */ +#define MME_QM_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT 0 +#define MME_QM_CP_MSG_BASE2_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CP_MSG_BASE3_ADDR_LO */ +#define MME_QM_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT 0 +#define MME_QM_CP_MSG_BASE3_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CP_MSG_BASE3_ADDR_HI */ +#define MME_QM_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT 0 +#define MME_QM_CP_MSG_BASE3_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CP_LDMA_TSIZE_OFFSET */ +#define MME_QM_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT 0 +#define MME_QM_CP_LDMA_TSIZE_OFFSET_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CP_LDMA_SRC_BASE_LO_OFFSET */ +#define MME_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT 0 +#define MME_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CP_LDMA_SRC_BASE_HI_OFFSET */ +#define MME_QM_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_SHIFT 0 +#define MME_QM_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CP_LDMA_DST_BASE_LO_OFFSET */ +#define MME_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT 0 +#define MME_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CP_LDMA_DST_BASE_HI_OFFSET */ +#define MME_QM_CP_LDMA_DST_BASE_HI_OFFSET_VAL_SHIFT 0 +#define MME_QM_CP_LDMA_DST_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CP_LDMA_COMMIT_OFFSET */ +#define MME_QM_CP_LDMA_COMMIT_OFFSET_VAL_SHIFT 0 +#define MME_QM_CP_LDMA_COMMIT_OFFSET_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CP_FENCE0_RDATA */ +#define MME_QM_CP_FENCE0_RDATA_INC_VAL_SHIFT 0 +#define MME_QM_CP_FENCE0_RDATA_INC_VAL_MASK 0xF + +/* MME_QM_CP_FENCE1_RDATA */ +#define MME_QM_CP_FENCE1_RDATA_INC_VAL_SHIFT 0 +#define MME_QM_CP_FENCE1_RDATA_INC_VAL_MASK 0xF + +/* MME_QM_CP_FENCE2_RDATA */ +#define MME_QM_CP_FENCE2_RDATA_INC_VAL_SHIFT 0 +#define MME_QM_CP_FENCE2_RDATA_INC_VAL_MASK 0xF + +/* MME_QM_CP_FENCE3_RDATA */ +#define MME_QM_CP_FENCE3_RDATA_INC_VAL_SHIFT 0 +#define MME_QM_CP_FENCE3_RDATA_INC_VAL_MASK 0xF + +/* MME_QM_CP_FENCE0_CNT */ +#define MME_QM_CP_FENCE0_CNT_VAL_SHIFT 0 +#define MME_QM_CP_FENCE0_CNT_VAL_MASK 0xFF + +/* MME_QM_CP_FENCE1_CNT */ +#define MME_QM_CP_FENCE1_CNT_VAL_SHIFT 0 +#define MME_QM_CP_FENCE1_CNT_VAL_MASK 0xFF + +/* MME_QM_CP_FENCE2_CNT */ +#define MME_QM_CP_FENCE2_CNT_VAL_SHIFT 0 +#define MME_QM_CP_FENCE2_CNT_VAL_MASK 0xFF + +/* MME_QM_CP_FENCE3_CNT */ +#define MME_QM_CP_FENCE3_CNT_VAL_SHIFT 0 +#define MME_QM_CP_FENCE3_CNT_VAL_MASK 0xFF + +/* MME_QM_CP_STS */ +#define MME_QM_CP_STS_MSG_INFLIGHT_CNT_SHIFT 0 +#define MME_QM_CP_STS_MSG_INFLIGHT_CNT_MASK 0xFFFF +#define MME_QM_CP_STS_ERDY_SHIFT 16 +#define MME_QM_CP_STS_ERDY_MASK 0x10000 +#define MME_QM_CP_STS_RRDY_SHIFT 17 +#define MME_QM_CP_STS_RRDY_MASK 0x20000 +#define MME_QM_CP_STS_MRDY_SHIFT 18 +#define MME_QM_CP_STS_MRDY_MASK 0x40000 +#define MME_QM_CP_STS_SW_STOP_SHIFT 19 +#define MME_QM_CP_STS_SW_STOP_MASK 0x80000 +#define MME_QM_CP_STS_FENCE_ID_SHIFT 20 +#define MME_QM_CP_STS_FENCE_ID_MASK 0x300000 +#define MME_QM_CP_STS_FENCE_IN_PROGRESS_SHIFT 22 +#define MME_QM_CP_STS_FENCE_IN_PROGRESS_MASK 0x400000 + +/* MME_QM_CP_CURRENT_INST_LO */ +#define MME_QM_CP_CURRENT_INST_LO_VAL_SHIFT 0 +#define MME_QM_CP_CURRENT_INST_LO_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CP_CURRENT_INST_HI */ +#define MME_QM_CP_CURRENT_INST_HI_VAL_SHIFT 0 +#define MME_QM_CP_CURRENT_INST_HI_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CP_BARRIER_CFG */ +#define MME_QM_CP_BARRIER_CFG_EBGUARD_SHIFT 0 +#define MME_QM_CP_BARRIER_CFG_EBGUARD_MASK 0xFFF + +/* MME_QM_CP_DBG_0 */ +#define MME_QM_CP_DBG_0_VAL_SHIFT 0 +#define MME_QM_CP_DBG_0_VAL_MASK 0xFF + +/* MME_QM_PQ_BUF_ADDR */ +#define MME_QM_PQ_BUF_ADDR_VAL_SHIFT 0 +#define MME_QM_PQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF + +/* MME_QM_PQ_BUF_RDATA */ +#define MME_QM_PQ_BUF_RDATA_VAL_SHIFT 0 +#define MME_QM_PQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CQ_BUF_ADDR */ +#define MME_QM_CQ_BUF_ADDR_VAL_SHIFT 0 +#define MME_QM_CQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF + +/* MME_QM_CQ_BUF_RDATA */ +#define MME_QM_CQ_BUF_RDATA_VAL_SHIFT 0 +#define MME_QM_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF + +#endif /* ASIC_REG_MME_QM_MASKS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_regs.h new file mode 100644 index 000000000000..b5b1c776f6c3 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_regs.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MME_QM_REGS_H_ +#define ASIC_REG_MME_QM_REGS_H_ + +/* + ***************************************** + * MME_QM (Prototype: QMAN) + ***************************************** + */ + +#define mmMME_QM_GLBL_CFG0 0xD8000 + +#define mmMME_QM_GLBL_CFG1 0xD8004 + +#define mmMME_QM_GLBL_PROT 0xD8008 + +#define mmMME_QM_GLBL_ERR_CFG 0xD800C + +#define mmMME_QM_GLBL_ERR_ADDR_LO 0xD8010 + +#define mmMME_QM_GLBL_ERR_ADDR_HI 0xD8014 + +#define mmMME_QM_GLBL_ERR_WDATA 0xD8018 + +#define mmMME_QM_GLBL_SECURE_PROPS 0xD801C + +#define mmMME_QM_GLBL_NON_SECURE_PROPS 0xD8020 + +#define mmMME_QM_GLBL_STS0 0xD8024 + +#define mmMME_QM_GLBL_STS1 0xD8028 + +#define mmMME_QM_PQ_BASE_LO 0xD8060 + +#define mmMME_QM_PQ_BASE_HI 0xD8064 + +#define mmMME_QM_PQ_SIZE 0xD8068 + +#define mmMME_QM_PQ_PI 0xD806C + +#define mmMME_QM_PQ_CI 0xD8070 + +#define mmMME_QM_PQ_CFG0 0xD8074 + +#define mmMME_QM_PQ_CFG1 0xD8078 + +#define mmMME_QM_PQ_ARUSER 0xD807C + +#define mmMME_QM_PQ_PUSH0 0xD8080 + +#define mmMME_QM_PQ_PUSH1 0xD8084 + +#define mmMME_QM_PQ_PUSH2 0xD8088 + +#define mmMME_QM_PQ_PUSH3 0xD808C + +#define mmMME_QM_PQ_STS0 0xD8090 + +#define mmMME_QM_PQ_STS1 0xD8094 + +#define mmMME_QM_PQ_RD_RATE_LIM_EN 0xD80A0 + +#define mmMME_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xD80A4 + +#define mmMME_QM_PQ_RD_RATE_LIM_SAT 0xD80A8 + +#define mmMME_QM_PQ_RD_RATE_LIM_TOUT 0xD80AC + +#define mmMME_QM_CQ_CFG0 0xD80B0 + +#define mmMME_QM_CQ_CFG1 0xD80B4 + +#define mmMME_QM_CQ_ARUSER 0xD80B8 + +#define mmMME_QM_CQ_PTR_LO 0xD80C0 + +#define mmMME_QM_CQ_PTR_HI 0xD80C4 + +#define mmMME_QM_CQ_TSIZE 0xD80C8 + +#define mmMME_QM_CQ_CTL 0xD80CC + +#define mmMME_QM_CQ_PTR_LO_STS 0xD80D4 + +#define mmMME_QM_CQ_PTR_HI_STS 0xD80D8 + +#define mmMME_QM_CQ_TSIZE_STS 0xD80DC + +#define mmMME_QM_CQ_CTL_STS 0xD80E0 + +#define mmMME_QM_CQ_STS0 0xD80E4 + +#define mmMME_QM_CQ_STS1 0xD80E8 + +#define mmMME_QM_CQ_RD_RATE_LIM_EN 0xD80F0 + +#define mmMME_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xD80F4 + +#define mmMME_QM_CQ_RD_RATE_LIM_SAT 0xD80F8 + +#define mmMME_QM_CQ_RD_RATE_LIM_TOUT 0xD80FC + +#define mmMME_QM_CQ_IFIFO_CNT 0xD8108 + +#define mmMME_QM_CP_MSG_BASE0_ADDR_LO 0xD8120 + +#define mmMME_QM_CP_MSG_BASE0_ADDR_HI 0xD8124 + +#define mmMME_QM_CP_MSG_BASE1_ADDR_LO 0xD8128 + +#define mmMME_QM_CP_MSG_BASE1_ADDR_HI 0xD812C + +#define mmMME_QM_CP_MSG_BASE2_ADDR_LO 0xD8130 + +#define mmMME_QM_CP_MSG_BASE2_ADDR_HI 0xD8134 + +#define mmMME_QM_CP_MSG_BASE3_ADDR_LO 0xD8138 + +#define mmMME_QM_CP_MSG_BASE3_ADDR_HI 0xD813C + +#define mmMME_QM_CP_LDMA_TSIZE_OFFSET 0xD8140 + +#define mmMME_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xD8144 + +#define mmMME_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xD8148 + +#define mmMME_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xD814C + +#define mmMME_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xD8150 + +#define mmMME_QM_CP_LDMA_COMMIT_OFFSET 0xD8154 + +#define mmMME_QM_CP_FENCE0_RDATA 0xD8158 + +#define mmMME_QM_CP_FENCE1_RDATA 0xD815C + +#define mmMME_QM_CP_FENCE2_RDATA 0xD8160 + +#define mmMME_QM_CP_FENCE3_RDATA 0xD8164 + +#define mmMME_QM_CP_FENCE0_CNT 0xD8168 + +#define mmMME_QM_CP_FENCE1_CNT 0xD816C + +#define mmMME_QM_CP_FENCE2_CNT 0xD8170 + +#define mmMME_QM_CP_FENCE3_CNT 0xD8174 + +#define mmMME_QM_CP_STS 0xD8178 + +#define mmMME_QM_CP_CURRENT_INST_LO 0xD817C + +#define mmMME_QM_CP_CURRENT_INST_HI 0xD8180 + +#define mmMME_QM_CP_BARRIER_CFG 0xD8184 + +#define mmMME_QM_CP_DBG_0 0xD8188 + +#define mmMME_QM_PQ_BUF_ADDR 0xD8300 + +#define mmMME_QM_PQ_BUF_RDATA 0xD8304 + +#define mmMME_QM_CQ_BUF_ADDR 0xD8308 + +#define mmMME_QM_CQ_BUF_RDATA 0xD830C + +#endif /* ASIC_REG_MME_QM_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mme_regs.h new file mode 100644 index 000000000000..9436b1e2705a --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/mme_regs.h @@ -0,0 +1,1153 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MME_REGS_H_ +#define ASIC_REG_MME_REGS_H_ + +/* + ***************************************** + * MME (Prototype: MME) + ***************************************** + */ + +#define mmMME_ARCH_STATUS 0xD0000 + +#define mmMME_ARCH_A_BASE_ADDR_HIGH 0xD0008 + +#define mmMME_ARCH_B_BASE_ADDR_HIGH 0xD000C + +#define mmMME_ARCH_CIN_BASE_ADDR_HIGH 0xD0010 + +#define mmMME_ARCH_COUT_BASE_ADDR_HIGH 0xD0014 + +#define mmMME_ARCH_BIAS_BASE_ADDR_HIGH 0xD0018 + +#define mmMME_ARCH_A_BASE_ADDR_LOW 0xD001C + +#define mmMME_ARCH_B_BASE_ADDR_LOW 0xD0020 + +#define mmMME_ARCH_CIN_BASE_ADDR_LOW 0xD0024 + +#define mmMME_ARCH_COUT_BASE_ADDR_LOW 0xD0028 + +#define mmMME_ARCH_BIAS_BASE_ADDR_LOW 0xD002C + +#define mmMME_ARCH_HEADER 0xD0030 + +#define mmMME_ARCH_KERNEL_SIZE_MINUS_1 0xD0034 + +#define mmMME_ARCH_ASSOCIATED_DIMS_0 0xD0038 + +#define mmMME_ARCH_ASSOCIATED_DIMS_1 0xD003C + +#define mmMME_ARCH_COUT_SCALE 0xD0040 + +#define mmMME_ARCH_CIN_SCALE 0xD0044 + +#define mmMME_ARCH_GEMMLOWP_ZP 0xD0048 + +#define mmMME_ARCH_GEMMLOWP_EXPONENT 0xD004C + +#define mmMME_ARCH_A_ROI_BASE_OFFSET_0 0xD0050 + +#define mmMME_ARCH_A_ROI_BASE_OFFSET_1 0xD0054 + +#define mmMME_ARCH_A_ROI_BASE_OFFSET_2 0xD0058 + +#define mmMME_ARCH_A_ROI_BASE_OFFSET_3 0xD005C + +#define mmMME_ARCH_A_ROI_BASE_OFFSET_4 0xD0060 + +#define mmMME_ARCH_A_VALID_ELEMENTS_0 0xD0064 + +#define mmMME_ARCH_A_VALID_ELEMENTS_1 0xD0068 + +#define mmMME_ARCH_A_VALID_ELEMENTS_2 0xD006C + +#define mmMME_ARCH_A_VALID_ELEMENTS_3 0xD0070 + +#define mmMME_ARCH_A_VALID_ELEMENTS_4 0xD0074 + +#define mmMME_ARCH_A_LOOP_STRIDE_0 0xD0078 + +#define mmMME_ARCH_A_LOOP_STRIDE_1 0xD007C + +#define mmMME_ARCH_A_LOOP_STRIDE_2 0xD0080 + +#define mmMME_ARCH_A_LOOP_STRIDE_3 0xD0084 + +#define mmMME_ARCH_A_LOOP_STRIDE_4 0xD0088 + +#define mmMME_ARCH_A_ROI_SIZE_0 0xD008C + +#define mmMME_ARCH_A_ROI_SIZE_1 0xD0090 + +#define mmMME_ARCH_A_ROI_SIZE_2 0xD0094 + +#define mmMME_ARCH_A_ROI_SIZE_3 0xD0098 + +#define mmMME_ARCH_A_SPATIAL_START_OFFSET_0 0xD009C + +#define mmMME_ARCH_A_SPATIAL_START_OFFSET_1 0xD00A0 + +#define mmMME_ARCH_A_SPATIAL_START_OFFSET_2 0xD00A4 + +#define mmMME_ARCH_A_SPATIAL_START_OFFSET_3 0xD00A8 + +#define mmMME_ARCH_A_SPATIAL_STRIDE_0 0xD00AC + +#define mmMME_ARCH_A_SPATIAL_STRIDE_1 0xD00B0 + +#define mmMME_ARCH_A_SPATIAL_STRIDE_2 0xD00B4 + +#define mmMME_ARCH_A_SPATIAL_STRIDE_3 0xD00B8 + +#define mmMME_ARCH_A_SPATIAL_SIZE_MINUS_1 0xD00BC + +#define mmMME_ARCH_B_ROI_BASE_OFFSET_0 0xD00C0 + +#define mmMME_ARCH_B_ROI_BASE_OFFSET_1 0xD00C4 + +#define mmMME_ARCH_B_ROI_BASE_OFFSET_2 0xD00C8 + +#define mmMME_ARCH_B_ROI_BASE_OFFSET_3 0xD00CC + +#define mmMME_ARCH_B_ROI_BASE_OFFSET_4 0xD00D0 + +#define mmMME_ARCH_B_VALID_ELEMENTS_0 0xD00D4 + +#define mmMME_ARCH_B_VALID_ELEMENTS_1 0xD00D8 + +#define mmMME_ARCH_B_VALID_ELEMENTS_2 0xD00DC + +#define mmMME_ARCH_B_VALID_ELEMENTS_3 0xD00E0 + +#define mmMME_ARCH_B_VALID_ELEMENTS_4 0xD00E4 + +#define mmMME_ARCH_B_LOOP_STRIDE_0 0xD00E8 + +#define mmMME_ARCH_B_LOOP_STRIDE_1 0xD00EC + +#define mmMME_ARCH_B_LOOP_STRIDE_2 0xD00F0 + +#define mmMME_ARCH_B_LOOP_STRIDE_3 0xD00F4 + +#define mmMME_ARCH_B_LOOP_STRIDE_4 0xD00F8 + +#define mmMME_ARCH_B_ROI_SIZE_0 0xD00FC + +#define mmMME_ARCH_B_ROI_SIZE_1 0xD0100 + +#define mmMME_ARCH_B_ROI_SIZE_2 0xD0104 + +#define mmMME_ARCH_B_ROI_SIZE_3 0xD0108 + +#define mmMME_ARCH_B_SPATIAL_START_OFFSET_0 0xD010C + +#define mmMME_ARCH_B_SPATIAL_START_OFFSET_1 0xD0110 + +#define mmMME_ARCH_B_SPATIAL_START_OFFSET_2 0xD0114 + +#define mmMME_ARCH_B_SPATIAL_START_OFFSET_3 0xD0118 + +#define mmMME_ARCH_B_SPATIAL_STRIDE_0 0xD011C + +#define mmMME_ARCH_B_SPATIAL_STRIDE_1 0xD0120 + +#define mmMME_ARCH_B_SPATIAL_STRIDE_2 0xD0124 + +#define mmMME_ARCH_B_SPATIAL_STRIDE_3 0xD0128 + +#define mmMME_ARCH_B_SPATIAL_SIZE_MINUS_1 0xD012C + +#define mmMME_ARCH_C_ROI_BASE_OFFSET_0 0xD0130 + +#define mmMME_ARCH_C_ROI_BASE_OFFSET_1 0xD0134 + +#define mmMME_ARCH_C_ROI_BASE_OFFSET_2 0xD0138 + +#define mmMME_ARCH_C_ROI_BASE_OFFSET_3 0xD013C + +#define mmMME_ARCH_C_ROI_BASE_OFFSET_4 0xD0140 + +#define mmMME_ARCH_C_VALID_ELEMENTS_0 0xD0144 + +#define mmMME_ARCH_C_VALID_ELEMENTS_1 0xD0148 + +#define mmMME_ARCH_C_VALID_ELEMENTS_2 0xD014C + +#define mmMME_ARCH_C_VALID_ELEMENTS_3 0xD0150 + +#define mmMME_ARCH_C_VALID_ELEMENTS_4 0xD0154 + +#define mmMME_ARCH_C_LOOP_STRIDE_0 0xD0158 + +#define mmMME_ARCH_C_LOOP_STRIDE_1 0xD015C + +#define mmMME_ARCH_C_LOOP_STRIDE_2 0xD0160 + +#define mmMME_ARCH_C_LOOP_STRIDE_3 0xD0164 + +#define mmMME_ARCH_C_LOOP_STRIDE_4 0xD0168 + +#define mmMME_ARCH_C_ROI_SIZE_0 0xD016C + +#define mmMME_ARCH_C_ROI_SIZE_1 0xD0170 + +#define mmMME_ARCH_C_ROI_SIZE_2 0xD0174 + +#define mmMME_ARCH_C_ROI_SIZE_3 0xD0178 + +#define mmMME_ARCH_C_SPATIAL_START_OFFSET_0 0xD017C + +#define mmMME_ARCH_C_SPATIAL_START_OFFSET_1 0xD0180 + +#define mmMME_ARCH_C_SPATIAL_START_OFFSET_2 0xD0184 + +#define mmMME_ARCH_C_SPATIAL_START_OFFSET_3 0xD0188 + +#define mmMME_ARCH_C_SPATIAL_STRIDE_0 0xD018C + +#define mmMME_ARCH_C_SPATIAL_STRIDE_1 0xD0190 + +#define mmMME_ARCH_C_SPATIAL_STRIDE_2 0xD0194 + +#define mmMME_ARCH_C_SPATIAL_STRIDE_3 0xD0198 + +#define mmMME_ARCH_C_SPATIAL_SIZE_MINUS_1 0xD019C + +#define mmMME_ARCH_SYNC_OBJECT_MESSAGE 0xD01A0 + +#define mmMME_ARCH_E_PADDING_VALUE_A 0xD01A4 + +#define mmMME_ARCH_E_NUM_ITERATION_MINUS_1 0xD01A8 + +#define mmMME_ARCH_E_BUBBLES_PER_SPLIT 0xD01AC + +#define mmMME_CMD 0xD0200 + +#define mmMME_DUMMY 0xD0204 + +#define mmMME_RESET 0xD0208 + +#define mmMME_STALL 0xD020C + +#define mmMME_SM_BASE_ADDRESS_LOW 0xD0210 + +#define mmMME_SM_BASE_ADDRESS_HIGH 0xD0214 + +#define mmMME_DBGMEM_ADD 0xD0218 + +#define mmMME_DBGMEM_DATA_WR 0xD021C + +#define mmMME_DBGMEM_DATA_RD 0xD0220 + +#define mmMME_DBGMEM_CTRL 0xD0224 + +#define mmMME_DBGMEM_RC 0xD0228 + +#define mmMME_LOG_SHADOW 0xD022C + +#define mmMME_STORE_MAX_CREDIT 0xD0300 + +#define mmMME_AGU 0xD0304 + +#define mmMME_SBA 0xD0308 + +#define mmMME_SBB 0xD030C + +#define mmMME_SBC 0xD0310 + +#define mmMME_WBC 0xD0314 + +#define mmMME_SBA_CONTROL_DATA 0xD0318 + +#define mmMME_SBB_CONTROL_DATA 0xD031C + +#define mmMME_SBC_CONTROL_DATA 0xD0320 + +#define mmMME_WBC_CONTROL_DATA 0xD0324 + +#define mmMME_TE 0xD0328 + +#define mmMME_TE2DEC 0xD032C + +#define mmMME_REI_STATUS 0xD0330 + +#define mmMME_REI_MASK 0xD0334 + +#define mmMME_SEI_STATUS 0xD0338 + +#define mmMME_SEI_MASK 0xD033C + +#define mmMME_SPI_STATUS 0xD0340 + +#define mmMME_SPI_MASK 0xD0344 + +#define mmMME_SHADOW_0_STATUS 0xD0400 + +#define mmMME_SHADOW_0_A_BASE_ADDR_HIGH 0xD0408 + +#define mmMME_SHADOW_0_B_BASE_ADDR_HIGH 0xD040C + +#define mmMME_SHADOW_0_CIN_BASE_ADDR_HIGH 0xD0410 + +#define mmMME_SHADOW_0_COUT_BASE_ADDR_HIGH 0xD0414 + +#define mmMME_SHADOW_0_BIAS_BASE_ADDR_HIGH 0xD0418 + +#define mmMME_SHADOW_0_A_BASE_ADDR_LOW 0xD041C + +#define mmMME_SHADOW_0_B_BASE_ADDR_LOW 0xD0420 + +#define mmMME_SHADOW_0_CIN_BASE_ADDR_LOW 0xD0424 + +#define mmMME_SHADOW_0_COUT_BASE_ADDR_LOW 0xD0428 + +#define mmMME_SHADOW_0_BIAS_BASE_ADDR_LOW 0xD042C + +#define mmMME_SHADOW_0_HEADER 0xD0430 + +#define mmMME_SHADOW_0_KERNEL_SIZE_MINUS_1 0xD0434 + +#define mmMME_SHADOW_0_ASSOCIATED_DIMS_0 0xD0438 + +#define mmMME_SHADOW_0_ASSOCIATED_DIMS_1 0xD043C + +#define mmMME_SHADOW_0_COUT_SCALE 0xD0440 + +#define mmMME_SHADOW_0_CIN_SCALE 0xD0444 + +#define mmMME_SHADOW_0_GEMMLOWP_ZP 0xD0448 + +#define mmMME_SHADOW_0_GEMMLOWP_EXPONENT 0xD044C + +#define mmMME_SHADOW_0_A_ROI_BASE_OFFSET_0 0xD0450 + +#define mmMME_SHADOW_0_A_ROI_BASE_OFFSET_1 0xD0454 + +#define mmMME_SHADOW_0_A_ROI_BASE_OFFSET_2 0xD0458 + +#define mmMME_SHADOW_0_A_ROI_BASE_OFFSET_3 0xD045C + +#define mmMME_SHADOW_0_A_ROI_BASE_OFFSET_4 0xD0460 + +#define mmMME_SHADOW_0_A_VALID_ELEMENTS_0 0xD0464 + +#define mmMME_SHADOW_0_A_VALID_ELEMENTS_1 0xD0468 + +#define mmMME_SHADOW_0_A_VALID_ELEMENTS_2 0xD046C + +#define mmMME_SHADOW_0_A_VALID_ELEMENTS_3 0xD0470 + +#define mmMME_SHADOW_0_A_VALID_ELEMENTS_4 0xD0474 + +#define mmMME_SHADOW_0_A_LOOP_STRIDE_0 0xD0478 + +#define mmMME_SHADOW_0_A_LOOP_STRIDE_1 0xD047C + +#define mmMME_SHADOW_0_A_LOOP_STRIDE_2 0xD0480 + +#define mmMME_SHADOW_0_A_LOOP_STRIDE_3 0xD0484 + +#define mmMME_SHADOW_0_A_LOOP_STRIDE_4 0xD0488 + +#define mmMME_SHADOW_0_A_ROI_SIZE_0 0xD048C + +#define mmMME_SHADOW_0_A_ROI_SIZE_1 0xD0490 + +#define mmMME_SHADOW_0_A_ROI_SIZE_2 0xD0494 + +#define mmMME_SHADOW_0_A_ROI_SIZE_3 0xD0498 + +#define mmMME_SHADOW_0_A_SPATIAL_START_OFFSET_0 0xD049C + +#define mmMME_SHADOW_0_A_SPATIAL_START_OFFSET_1 0xD04A0 + +#define mmMME_SHADOW_0_A_SPATIAL_START_OFFSET_2 0xD04A4 + +#define mmMME_SHADOW_0_A_SPATIAL_START_OFFSET_3 0xD04A8 + +#define mmMME_SHADOW_0_A_SPATIAL_STRIDE_0 0xD04AC + +#define mmMME_SHADOW_0_A_SPATIAL_STRIDE_1 0xD04B0 + +#define mmMME_SHADOW_0_A_SPATIAL_STRIDE_2 0xD04B4 + +#define mmMME_SHADOW_0_A_SPATIAL_STRIDE_3 0xD04B8 + +#define mmMME_SHADOW_0_A_SPATIAL_SIZE_MINUS_1 0xD04BC + +#define mmMME_SHADOW_0_B_ROI_BASE_OFFSET_0 0xD04C0 + +#define mmMME_SHADOW_0_B_ROI_BASE_OFFSET_1 0xD04C4 + +#define mmMME_SHADOW_0_B_ROI_BASE_OFFSET_2 0xD04C8 + +#define mmMME_SHADOW_0_B_ROI_BASE_OFFSET_3 0xD04CC + +#define mmMME_SHADOW_0_B_ROI_BASE_OFFSET_4 0xD04D0 + +#define mmMME_SHADOW_0_B_VALID_ELEMENTS_0 0xD04D4 + +#define mmMME_SHADOW_0_B_VALID_ELEMENTS_1 0xD04D8 + +#define mmMME_SHADOW_0_B_VALID_ELEMENTS_2 0xD04DC + +#define mmMME_SHADOW_0_B_VALID_ELEMENTS_3 0xD04E0 + +#define mmMME_SHADOW_0_B_VALID_ELEMENTS_4 0xD04E4 + +#define mmMME_SHADOW_0_B_LOOP_STRIDE_0 0xD04E8 + +#define mmMME_SHADOW_0_B_LOOP_STRIDE_1 0xD04EC + +#define mmMME_SHADOW_0_B_LOOP_STRIDE_2 0xD04F0 + +#define mmMME_SHADOW_0_B_LOOP_STRIDE_3 0xD04F4 + +#define mmMME_SHADOW_0_B_LOOP_STRIDE_4 0xD04F8 + +#define mmMME_SHADOW_0_B_ROI_SIZE_0 0xD04FC + +#define mmMME_SHADOW_0_B_ROI_SIZE_1 0xD0500 + +#define mmMME_SHADOW_0_B_ROI_SIZE_2 0xD0504 + +#define mmMME_SHADOW_0_B_ROI_SIZE_3 0xD0508 + +#define mmMME_SHADOW_0_B_SPATIAL_START_OFFSET_0 0xD050C + +#define mmMME_SHADOW_0_B_SPATIAL_START_OFFSET_1 0xD0510 + +#define mmMME_SHADOW_0_B_SPATIAL_START_OFFSET_2 0xD0514 + +#define mmMME_SHADOW_0_B_SPATIAL_START_OFFSET_3 0xD0518 + +#define mmMME_SHADOW_0_B_SPATIAL_STRIDE_0 0xD051C + +#define mmMME_SHADOW_0_B_SPATIAL_STRIDE_1 0xD0520 + +#define mmMME_SHADOW_0_B_SPATIAL_STRIDE_2 0xD0524 + +#define mmMME_SHADOW_0_B_SPATIAL_STRIDE_3 0xD0528 + +#define mmMME_SHADOW_0_B_SPATIAL_SIZE_MINUS_1 0xD052C + +#define mmMME_SHADOW_0_C_ROI_BASE_OFFSET_0 0xD0530 + +#define mmMME_SHADOW_0_C_ROI_BASE_OFFSET_1 0xD0534 + +#define mmMME_SHADOW_0_C_ROI_BASE_OFFSET_2 0xD0538 + +#define mmMME_SHADOW_0_C_ROI_BASE_OFFSET_3 0xD053C + +#define mmMME_SHADOW_0_C_ROI_BASE_OFFSET_4 0xD0540 + +#define mmMME_SHADOW_0_C_VALID_ELEMENTS_0 0xD0544 + +#define mmMME_SHADOW_0_C_VALID_ELEMENTS_1 0xD0548 + +#define mmMME_SHADOW_0_C_VALID_ELEMENTS_2 0xD054C + +#define mmMME_SHADOW_0_C_VALID_ELEMENTS_3 0xD0550 + +#define mmMME_SHADOW_0_C_VALID_ELEMENTS_4 0xD0554 + +#define mmMME_SHADOW_0_C_LOOP_STRIDE_0 0xD0558 + +#define mmMME_SHADOW_0_C_LOOP_STRIDE_1 0xD055C + +#define mmMME_SHADOW_0_C_LOOP_STRIDE_2 0xD0560 + +#define mmMME_SHADOW_0_C_LOOP_STRIDE_3 0xD0564 + +#define mmMME_SHADOW_0_C_LOOP_STRIDE_4 0xD0568 + +#define mmMME_SHADOW_0_C_ROI_SIZE_0 0xD056C + +#define mmMME_SHADOW_0_C_ROI_SIZE_1 0xD0570 + +#define mmMME_SHADOW_0_C_ROI_SIZE_2 0xD0574 + +#define mmMME_SHADOW_0_C_ROI_SIZE_3 0xD0578 + +#define mmMME_SHADOW_0_C_SPATIAL_START_OFFSET_0 0xD057C + +#define mmMME_SHADOW_0_C_SPATIAL_START_OFFSET_1 0xD0580 + +#define mmMME_SHADOW_0_C_SPATIAL_START_OFFSET_2 0xD0584 + +#define mmMME_SHADOW_0_C_SPATIAL_START_OFFSET_3 0xD0588 + +#define mmMME_SHADOW_0_C_SPATIAL_STRIDE_0 0xD058C + +#define mmMME_SHADOW_0_C_SPATIAL_STRIDE_1 0xD0590 + +#define mmMME_SHADOW_0_C_SPATIAL_STRIDE_2 0xD0594 + +#define mmMME_SHADOW_0_C_SPATIAL_STRIDE_3 0xD0598 + +#define mmMME_SHADOW_0_C_SPATIAL_SIZE_MINUS_1 0xD059C + +#define mmMME_SHADOW_0_SYNC_OBJECT_MESSAGE 0xD05A0 + +#define mmMME_SHADOW_0_E_PADDING_VALUE_A 0xD05A4 + +#define mmMME_SHADOW_0_E_NUM_ITERATION_MINUS_1 0xD05A8 + +#define mmMME_SHADOW_0_E_BUBBLES_PER_SPLIT 0xD05AC + +#define mmMME_SHADOW_1_STATUS 0xD0600 + +#define mmMME_SHADOW_1_A_BASE_ADDR_HIGH 0xD0608 + +#define mmMME_SHADOW_1_B_BASE_ADDR_HIGH 0xD060C + +#define mmMME_SHADOW_1_CIN_BASE_ADDR_HIGH 0xD0610 + +#define mmMME_SHADOW_1_COUT_BASE_ADDR_HIGH 0xD0614 + +#define mmMME_SHADOW_1_BIAS_BASE_ADDR_HIGH 0xD0618 + +#define mmMME_SHADOW_1_A_BASE_ADDR_LOW 0xD061C + +#define mmMME_SHADOW_1_B_BASE_ADDR_LOW 0xD0620 + +#define mmMME_SHADOW_1_CIN_BASE_ADDR_LOW 0xD0624 + +#define mmMME_SHADOW_1_COUT_BASE_ADDR_LOW 0xD0628 + +#define mmMME_SHADOW_1_BIAS_BASE_ADDR_LOW 0xD062C + +#define mmMME_SHADOW_1_HEADER 0xD0630 + +#define mmMME_SHADOW_1_KERNEL_SIZE_MINUS_1 0xD0634 + +#define mmMME_SHADOW_1_ASSOCIATED_DIMS_0 0xD0638 + +#define mmMME_SHADOW_1_ASSOCIATED_DIMS_1 0xD063C + +#define mmMME_SHADOW_1_COUT_SCALE 0xD0640 + +#define mmMME_SHADOW_1_CIN_SCALE 0xD0644 + +#define mmMME_SHADOW_1_GEMMLOWP_ZP 0xD0648 + +#define mmMME_SHADOW_1_GEMMLOWP_EXPONENT 0xD064C + +#define mmMME_SHADOW_1_A_ROI_BASE_OFFSET_0 0xD0650 + +#define mmMME_SHADOW_1_A_ROI_BASE_OFFSET_1 0xD0654 + +#define mmMME_SHADOW_1_A_ROI_BASE_OFFSET_2 0xD0658 + +#define mmMME_SHADOW_1_A_ROI_BASE_OFFSET_3 0xD065C + +#define mmMME_SHADOW_1_A_ROI_BASE_OFFSET_4 0xD0660 + +#define mmMME_SHADOW_1_A_VALID_ELEMENTS_0 0xD0664 + +#define mmMME_SHADOW_1_A_VALID_ELEMENTS_1 0xD0668 + +#define mmMME_SHADOW_1_A_VALID_ELEMENTS_2 0xD066C + +#define mmMME_SHADOW_1_A_VALID_ELEMENTS_3 0xD0670 + +#define mmMME_SHADOW_1_A_VALID_ELEMENTS_4 0xD0674 + +#define mmMME_SHADOW_1_A_LOOP_STRIDE_0 0xD0678 + +#define mmMME_SHADOW_1_A_LOOP_STRIDE_1 0xD067C + +#define mmMME_SHADOW_1_A_LOOP_STRIDE_2 0xD0680 + +#define mmMME_SHADOW_1_A_LOOP_STRIDE_3 0xD0684 + +#define mmMME_SHADOW_1_A_LOOP_STRIDE_4 0xD0688 + +#define mmMME_SHADOW_1_A_ROI_SIZE_0 0xD068C + +#define mmMME_SHADOW_1_A_ROI_SIZE_1 0xD0690 + +#define mmMME_SHADOW_1_A_ROI_SIZE_2 0xD0694 + +#define mmMME_SHADOW_1_A_ROI_SIZE_3 0xD0698 + +#define mmMME_SHADOW_1_A_SPATIAL_START_OFFSET_0 0xD069C + +#define mmMME_SHADOW_1_A_SPATIAL_START_OFFSET_1 0xD06A0 + +#define mmMME_SHADOW_1_A_SPATIAL_START_OFFSET_2 0xD06A4 + +#define mmMME_SHADOW_1_A_SPATIAL_START_OFFSET_3 0xD06A8 + +#define mmMME_SHADOW_1_A_SPATIAL_STRIDE_0 0xD06AC + +#define mmMME_SHADOW_1_A_SPATIAL_STRIDE_1 0xD06B0 + +#define mmMME_SHADOW_1_A_SPATIAL_STRIDE_2 0xD06B4 + +#define mmMME_SHADOW_1_A_SPATIAL_STRIDE_3 0xD06B8 + +#define mmMME_SHADOW_1_A_SPATIAL_SIZE_MINUS_1 0xD06BC + +#define mmMME_SHADOW_1_B_ROI_BASE_OFFSET_0 0xD06C0 + +#define mmMME_SHADOW_1_B_ROI_BASE_OFFSET_1 0xD06C4 + +#define mmMME_SHADOW_1_B_ROI_BASE_OFFSET_2 0xD06C8 + +#define mmMME_SHADOW_1_B_ROI_BASE_OFFSET_3 0xD06CC + +#define mmMME_SHADOW_1_B_ROI_BASE_OFFSET_4 0xD06D0 + +#define mmMME_SHADOW_1_B_VALID_ELEMENTS_0 0xD06D4 + +#define mmMME_SHADOW_1_B_VALID_ELEMENTS_1 0xD06D8 + +#define mmMME_SHADOW_1_B_VALID_ELEMENTS_2 0xD06DC + +#define mmMME_SHADOW_1_B_VALID_ELEMENTS_3 0xD06E0 + +#define mmMME_SHADOW_1_B_VALID_ELEMENTS_4 0xD06E4 + +#define mmMME_SHADOW_1_B_LOOP_STRIDE_0 0xD06E8 + +#define mmMME_SHADOW_1_B_LOOP_STRIDE_1 0xD06EC + +#define mmMME_SHADOW_1_B_LOOP_STRIDE_2 0xD06F0 + +#define mmMME_SHADOW_1_B_LOOP_STRIDE_3 0xD06F4 + +#define mmMME_SHADOW_1_B_LOOP_STRIDE_4 0xD06F8 + +#define mmMME_SHADOW_1_B_ROI_SIZE_0 0xD06FC + +#define mmMME_SHADOW_1_B_ROI_SIZE_1 0xD0700 + +#define mmMME_SHADOW_1_B_ROI_SIZE_2 0xD0704 + +#define mmMME_SHADOW_1_B_ROI_SIZE_3 0xD0708 + +#define mmMME_SHADOW_1_B_SPATIAL_START_OFFSET_0 0xD070C + +#define mmMME_SHADOW_1_B_SPATIAL_START_OFFSET_1 0xD0710 + +#define mmMME_SHADOW_1_B_SPATIAL_START_OFFSET_2 0xD0714 + +#define mmMME_SHADOW_1_B_SPATIAL_START_OFFSET_3 0xD0718 + +#define mmMME_SHADOW_1_B_SPATIAL_STRIDE_0 0xD071C + +#define mmMME_SHADOW_1_B_SPATIAL_STRIDE_1 0xD0720 + +#define mmMME_SHADOW_1_B_SPATIAL_STRIDE_2 0xD0724 + +#define mmMME_SHADOW_1_B_SPATIAL_STRIDE_3 0xD0728 + +#define mmMME_SHADOW_1_B_SPATIAL_SIZE_MINUS_1 0xD072C + +#define mmMME_SHADOW_1_C_ROI_BASE_OFFSET_0 0xD0730 + +#define mmMME_SHADOW_1_C_ROI_BASE_OFFSET_1 0xD0734 + +#define mmMME_SHADOW_1_C_ROI_BASE_OFFSET_2 0xD0738 + +#define mmMME_SHADOW_1_C_ROI_BASE_OFFSET_3 0xD073C + +#define mmMME_SHADOW_1_C_ROI_BASE_OFFSET_4 0xD0740 + +#define mmMME_SHADOW_1_C_VALID_ELEMENTS_0 0xD0744 + +#define mmMME_SHADOW_1_C_VALID_ELEMENTS_1 0xD0748 + +#define mmMME_SHADOW_1_C_VALID_ELEMENTS_2 0xD074C + +#define mmMME_SHADOW_1_C_VALID_ELEMENTS_3 0xD0750 + +#define mmMME_SHADOW_1_C_VALID_ELEMENTS_4 0xD0754 + +#define mmMME_SHADOW_1_C_LOOP_STRIDE_0 0xD0758 + +#define mmMME_SHADOW_1_C_LOOP_STRIDE_1 0xD075C + +#define mmMME_SHADOW_1_C_LOOP_STRIDE_2 0xD0760 + +#define mmMME_SHADOW_1_C_LOOP_STRIDE_3 0xD0764 + +#define mmMME_SHADOW_1_C_LOOP_STRIDE_4 0xD0768 + +#define mmMME_SHADOW_1_C_ROI_SIZE_0 0xD076C + +#define mmMME_SHADOW_1_C_ROI_SIZE_1 0xD0770 + +#define mmMME_SHADOW_1_C_ROI_SIZE_2 0xD0774 + +#define mmMME_SHADOW_1_C_ROI_SIZE_3 0xD0778 + +#define mmMME_SHADOW_1_C_SPATIAL_START_OFFSET_0 0xD077C + +#define mmMME_SHADOW_1_C_SPATIAL_START_OFFSET_1 0xD0780 + +#define mmMME_SHADOW_1_C_SPATIAL_START_OFFSET_2 0xD0784 + +#define mmMME_SHADOW_1_C_SPATIAL_START_OFFSET_3 0xD0788 + +#define mmMME_SHADOW_1_C_SPATIAL_STRIDE_0 0xD078C + +#define mmMME_SHADOW_1_C_SPATIAL_STRIDE_1 0xD0790 + +#define mmMME_SHADOW_1_C_SPATIAL_STRIDE_2 0xD0794 + +#define mmMME_SHADOW_1_C_SPATIAL_STRIDE_3 0xD0798 + +#define mmMME_SHADOW_1_C_SPATIAL_SIZE_MINUS_1 0xD079C + +#define mmMME_SHADOW_1_SYNC_OBJECT_MESSAGE 0xD07A0 + +#define mmMME_SHADOW_1_E_PADDING_VALUE_A 0xD07A4 + +#define mmMME_SHADOW_1_E_NUM_ITERATION_MINUS_1 0xD07A8 + +#define mmMME_SHADOW_1_E_BUBBLES_PER_SPLIT 0xD07AC + +#define mmMME_SHADOW_2_STATUS 0xD0800 + +#define mmMME_SHADOW_2_A_BASE_ADDR_HIGH 0xD0808 + +#define mmMME_SHADOW_2_B_BASE_ADDR_HIGH 0xD080C + +#define mmMME_SHADOW_2_CIN_BASE_ADDR_HIGH 0xD0810 + +#define mmMME_SHADOW_2_COUT_BASE_ADDR_HIGH 0xD0814 + +#define mmMME_SHADOW_2_BIAS_BASE_ADDR_HIGH 0xD0818 + +#define mmMME_SHADOW_2_A_BASE_ADDR_LOW 0xD081C + +#define mmMME_SHADOW_2_B_BASE_ADDR_LOW 0xD0820 + +#define mmMME_SHADOW_2_CIN_BASE_ADDR_LOW 0xD0824 + +#define mmMME_SHADOW_2_COUT_BASE_ADDR_LOW 0xD0828 + +#define mmMME_SHADOW_2_BIAS_BASE_ADDR_LOW 0xD082C + +#define mmMME_SHADOW_2_HEADER 0xD0830 + +#define mmMME_SHADOW_2_KERNEL_SIZE_MINUS_1 0xD0834 + +#define mmMME_SHADOW_2_ASSOCIATED_DIMS_0 0xD0838 + +#define mmMME_SHADOW_2_ASSOCIATED_DIMS_1 0xD083C + +#define mmMME_SHADOW_2_COUT_SCALE 0xD0840 + +#define mmMME_SHADOW_2_CIN_SCALE 0xD0844 + +#define mmMME_SHADOW_2_GEMMLOWP_ZP 0xD0848 + +#define mmMME_SHADOW_2_GEMMLOWP_EXPONENT 0xD084C + +#define mmMME_SHADOW_2_A_ROI_BASE_OFFSET_0 0xD0850 + +#define mmMME_SHADOW_2_A_ROI_BASE_OFFSET_1 0xD0854 + +#define mmMME_SHADOW_2_A_ROI_BASE_OFFSET_2 0xD0858 + +#define mmMME_SHADOW_2_A_ROI_BASE_OFFSET_3 0xD085C + +#define mmMME_SHADOW_2_A_ROI_BASE_OFFSET_4 0xD0860 + +#define mmMME_SHADOW_2_A_VALID_ELEMENTS_0 0xD0864 + +#define mmMME_SHADOW_2_A_VALID_ELEMENTS_1 0xD0868 + +#define mmMME_SHADOW_2_A_VALID_ELEMENTS_2 0xD086C + +#define mmMME_SHADOW_2_A_VALID_ELEMENTS_3 0xD0870 + +#define mmMME_SHADOW_2_A_VALID_ELEMENTS_4 0xD0874 + +#define mmMME_SHADOW_2_A_LOOP_STRIDE_0 0xD0878 + +#define mmMME_SHADOW_2_A_LOOP_STRIDE_1 0xD087C + +#define mmMME_SHADOW_2_A_LOOP_STRIDE_2 0xD0880 + +#define mmMME_SHADOW_2_A_LOOP_STRIDE_3 0xD0884 + +#define mmMME_SHADOW_2_A_LOOP_STRIDE_4 0xD0888 + +#define mmMME_SHADOW_2_A_ROI_SIZE_0 0xD088C + +#define mmMME_SHADOW_2_A_ROI_SIZE_1 0xD0890 + +#define mmMME_SHADOW_2_A_ROI_SIZE_2 0xD0894 + +#define mmMME_SHADOW_2_A_ROI_SIZE_3 0xD0898 + +#define mmMME_SHADOW_2_A_SPATIAL_START_OFFSET_0 0xD089C + +#define mmMME_SHADOW_2_A_SPATIAL_START_OFFSET_1 0xD08A0 + +#define mmMME_SHADOW_2_A_SPATIAL_START_OFFSET_2 0xD08A4 + +#define mmMME_SHADOW_2_A_SPATIAL_START_OFFSET_3 0xD08A8 + +#define mmMME_SHADOW_2_A_SPATIAL_STRIDE_0 0xD08AC + +#define mmMME_SHADOW_2_A_SPATIAL_STRIDE_1 0xD08B0 + +#define mmMME_SHADOW_2_A_SPATIAL_STRIDE_2 0xD08B4 + +#define mmMME_SHADOW_2_A_SPATIAL_STRIDE_3 0xD08B8 + +#define mmMME_SHADOW_2_A_SPATIAL_SIZE_MINUS_1 0xD08BC + +#define mmMME_SHADOW_2_B_ROI_BASE_OFFSET_0 0xD08C0 + +#define mmMME_SHADOW_2_B_ROI_BASE_OFFSET_1 0xD08C4 + +#define mmMME_SHADOW_2_B_ROI_BASE_OFFSET_2 0xD08C8 + +#define mmMME_SHADOW_2_B_ROI_BASE_OFFSET_3 0xD08CC + +#define mmMME_SHADOW_2_B_ROI_BASE_OFFSET_4 0xD08D0 + +#define mmMME_SHADOW_2_B_VALID_ELEMENTS_0 0xD08D4 + +#define mmMME_SHADOW_2_B_VALID_ELEMENTS_1 0xD08D8 + +#define mmMME_SHADOW_2_B_VALID_ELEMENTS_2 0xD08DC + +#define mmMME_SHADOW_2_B_VALID_ELEMENTS_3 0xD08E0 + +#define mmMME_SHADOW_2_B_VALID_ELEMENTS_4 0xD08E4 + +#define mmMME_SHADOW_2_B_LOOP_STRIDE_0 0xD08E8 + +#define mmMME_SHADOW_2_B_LOOP_STRIDE_1 0xD08EC + +#define mmMME_SHADOW_2_B_LOOP_STRIDE_2 0xD08F0 + +#define mmMME_SHADOW_2_B_LOOP_STRIDE_3 0xD08F4 + +#define mmMME_SHADOW_2_B_LOOP_STRIDE_4 0xD08F8 + +#define mmMME_SHADOW_2_B_ROI_SIZE_0 0xD08FC + +#define mmMME_SHADOW_2_B_ROI_SIZE_1 0xD0900 + +#define mmMME_SHADOW_2_B_ROI_SIZE_2 0xD0904 + +#define mmMME_SHADOW_2_B_ROI_SIZE_3 0xD0908 + +#define mmMME_SHADOW_2_B_SPATIAL_START_OFFSET_0 0xD090C + +#define mmMME_SHADOW_2_B_SPATIAL_START_OFFSET_1 0xD0910 + +#define mmMME_SHADOW_2_B_SPATIAL_START_OFFSET_2 0xD0914 + +#define mmMME_SHADOW_2_B_SPATIAL_START_OFFSET_3 0xD0918 + +#define mmMME_SHADOW_2_B_SPATIAL_STRIDE_0 0xD091C + +#define mmMME_SHADOW_2_B_SPATIAL_STRIDE_1 0xD0920 + +#define mmMME_SHADOW_2_B_SPATIAL_STRIDE_2 0xD0924 + +#define mmMME_SHADOW_2_B_SPATIAL_STRIDE_3 0xD0928 + +#define mmMME_SHADOW_2_B_SPATIAL_SIZE_MINUS_1 0xD092C + +#define mmMME_SHADOW_2_C_ROI_BASE_OFFSET_0 0xD0930 + +#define mmMME_SHADOW_2_C_ROI_BASE_OFFSET_1 0xD0934 + +#define mmMME_SHADOW_2_C_ROI_BASE_OFFSET_2 0xD0938 + +#define mmMME_SHADOW_2_C_ROI_BASE_OFFSET_3 0xD093C + +#define mmMME_SHADOW_2_C_ROI_BASE_OFFSET_4 0xD0940 + +#define mmMME_SHADOW_2_C_VALID_ELEMENTS_0 0xD0944 + +#define mmMME_SHADOW_2_C_VALID_ELEMENTS_1 0xD0948 + +#define mmMME_SHADOW_2_C_VALID_ELEMENTS_2 0xD094C + +#define mmMME_SHADOW_2_C_VALID_ELEMENTS_3 0xD0950 + +#define mmMME_SHADOW_2_C_VALID_ELEMENTS_4 0xD0954 + +#define mmMME_SHADOW_2_C_LOOP_STRIDE_0 0xD0958 + +#define mmMME_SHADOW_2_C_LOOP_STRIDE_1 0xD095C + +#define mmMME_SHADOW_2_C_LOOP_STRIDE_2 0xD0960 + +#define mmMME_SHADOW_2_C_LOOP_STRIDE_3 0xD0964 + +#define mmMME_SHADOW_2_C_LOOP_STRIDE_4 0xD0968 + +#define mmMME_SHADOW_2_C_ROI_SIZE_0 0xD096C + +#define mmMME_SHADOW_2_C_ROI_SIZE_1 0xD0970 + +#define mmMME_SHADOW_2_C_ROI_SIZE_2 0xD0974 + +#define mmMME_SHADOW_2_C_ROI_SIZE_3 0xD0978 + +#define mmMME_SHADOW_2_C_SPATIAL_START_OFFSET_0 0xD097C + +#define mmMME_SHADOW_2_C_SPATIAL_START_OFFSET_1 0xD0980 + +#define mmMME_SHADOW_2_C_SPATIAL_START_OFFSET_2 0xD0984 + +#define mmMME_SHADOW_2_C_SPATIAL_START_OFFSET_3 0xD0988 + +#define mmMME_SHADOW_2_C_SPATIAL_STRIDE_0 0xD098C + +#define mmMME_SHADOW_2_C_SPATIAL_STRIDE_1 0xD0990 + +#define mmMME_SHADOW_2_C_SPATIAL_STRIDE_2 0xD0994 + +#define mmMME_SHADOW_2_C_SPATIAL_STRIDE_3 0xD0998 + +#define mmMME_SHADOW_2_C_SPATIAL_SIZE_MINUS_1 0xD099C + +#define mmMME_SHADOW_2_SYNC_OBJECT_MESSAGE 0xD09A0 + +#define mmMME_SHADOW_2_E_PADDING_VALUE_A 0xD09A4 + +#define mmMME_SHADOW_2_E_NUM_ITERATION_MINUS_1 0xD09A8 + +#define mmMME_SHADOW_2_E_BUBBLES_PER_SPLIT 0xD09AC + +#define mmMME_SHADOW_3_STATUS 0xD0A00 + +#define mmMME_SHADOW_3_A_BASE_ADDR_HIGH 0xD0A08 + +#define mmMME_SHADOW_3_B_BASE_ADDR_HIGH 0xD0A0C + +#define mmMME_SHADOW_3_CIN_BASE_ADDR_HIGH 0xD0A10 + +#define mmMME_SHADOW_3_COUT_BASE_ADDR_HIGH 0xD0A14 + +#define mmMME_SHADOW_3_BIAS_BASE_ADDR_HIGH 0xD0A18 + +#define mmMME_SHADOW_3_A_BASE_ADDR_LOW 0xD0A1C + +#define mmMME_SHADOW_3_B_BASE_ADDR_LOW 0xD0A20 + +#define mmMME_SHADOW_3_CIN_BASE_ADDR_LOW 0xD0A24 + +#define mmMME_SHADOW_3_COUT_BASE_ADDR_LOW 0xD0A28 + +#define mmMME_SHADOW_3_BIAS_BASE_ADDR_LOW 0xD0A2C + +#define mmMME_SHADOW_3_HEADER 0xD0A30 + +#define mmMME_SHADOW_3_KERNEL_SIZE_MINUS_1 0xD0A34 + +#define mmMME_SHADOW_3_ASSOCIATED_DIMS_0 0xD0A38 + +#define mmMME_SHADOW_3_ASSOCIATED_DIMS_1 0xD0A3C + +#define mmMME_SHADOW_3_COUT_SCALE 0xD0A40 + +#define mmMME_SHADOW_3_CIN_SCALE 0xD0A44 + +#define mmMME_SHADOW_3_GEMMLOWP_ZP 0xD0A48 + +#define mmMME_SHADOW_3_GEMMLOWP_EXPONENT 0xD0A4C + +#define mmMME_SHADOW_3_A_ROI_BASE_OFFSET_0 0xD0A50 + +#define mmMME_SHADOW_3_A_ROI_BASE_OFFSET_1 0xD0A54 + +#define mmMME_SHADOW_3_A_ROI_BASE_OFFSET_2 0xD0A58 + +#define mmMME_SHADOW_3_A_ROI_BASE_OFFSET_3 0xD0A5C + +#define mmMME_SHADOW_3_A_ROI_BASE_OFFSET_4 0xD0A60 + +#define mmMME_SHADOW_3_A_VALID_ELEMENTS_0 0xD0A64 + +#define mmMME_SHADOW_3_A_VALID_ELEMENTS_1 0xD0A68 + +#define mmMME_SHADOW_3_A_VALID_ELEMENTS_2 0xD0A6C + +#define mmMME_SHADOW_3_A_VALID_ELEMENTS_3 0xD0A70 + +#define mmMME_SHADOW_3_A_VALID_ELEMENTS_4 0xD0A74 + +#define mmMME_SHADOW_3_A_LOOP_STRIDE_0 0xD0A78 + +#define mmMME_SHADOW_3_A_LOOP_STRIDE_1 0xD0A7C + +#define mmMME_SHADOW_3_A_LOOP_STRIDE_2 0xD0A80 + +#define mmMME_SHADOW_3_A_LOOP_STRIDE_3 0xD0A84 + +#define mmMME_SHADOW_3_A_LOOP_STRIDE_4 0xD0A88 + +#define mmMME_SHADOW_3_A_ROI_SIZE_0 0xD0A8C + +#define mmMME_SHADOW_3_A_ROI_SIZE_1 0xD0A90 + +#define mmMME_SHADOW_3_A_ROI_SIZE_2 0xD0A94 + +#define mmMME_SHADOW_3_A_ROI_SIZE_3 0xD0A98 + +#define mmMME_SHADOW_3_A_SPATIAL_START_OFFSET_0 0xD0A9C + +#define mmMME_SHADOW_3_A_SPATIAL_START_OFFSET_1 0xD0AA0 + +#define mmMME_SHADOW_3_A_SPATIAL_START_OFFSET_2 0xD0AA4 + +#define mmMME_SHADOW_3_A_SPATIAL_START_OFFSET_3 0xD0AA8 + +#define mmMME_SHADOW_3_A_SPATIAL_STRIDE_0 0xD0AAC + +#define mmMME_SHADOW_3_A_SPATIAL_STRIDE_1 0xD0AB0 + +#define mmMME_SHADOW_3_A_SPATIAL_STRIDE_2 0xD0AB4 + +#define mmMME_SHADOW_3_A_SPATIAL_STRIDE_3 0xD0AB8 + +#define mmMME_SHADOW_3_A_SPATIAL_SIZE_MINUS_1 0xD0ABC + +#define mmMME_SHADOW_3_B_ROI_BASE_OFFSET_0 0xD0AC0 + +#define mmMME_SHADOW_3_B_ROI_BASE_OFFSET_1 0xD0AC4 + +#define mmMME_SHADOW_3_B_ROI_BASE_OFFSET_2 0xD0AC8 + +#define mmMME_SHADOW_3_B_ROI_BASE_OFFSET_3 0xD0ACC + +#define mmMME_SHADOW_3_B_ROI_BASE_OFFSET_4 0xD0AD0 + +#define mmMME_SHADOW_3_B_VALID_ELEMENTS_0 0xD0AD4 + +#define mmMME_SHADOW_3_B_VALID_ELEMENTS_1 0xD0AD8 + +#define mmMME_SHADOW_3_B_VALID_ELEMENTS_2 0xD0ADC + +#define mmMME_SHADOW_3_B_VALID_ELEMENTS_3 0xD0AE0 + +#define mmMME_SHADOW_3_B_VALID_ELEMENTS_4 0xD0AE4 + +#define mmMME_SHADOW_3_B_LOOP_STRIDE_0 0xD0AE8 + +#define mmMME_SHADOW_3_B_LOOP_STRIDE_1 0xD0AEC + +#define mmMME_SHADOW_3_B_LOOP_STRIDE_2 0xD0AF0 + +#define mmMME_SHADOW_3_B_LOOP_STRIDE_3 0xD0AF4 + +#define mmMME_SHADOW_3_B_LOOP_STRIDE_4 0xD0AF8 + +#define mmMME_SHADOW_3_B_ROI_SIZE_0 0xD0AFC + +#define mmMME_SHADOW_3_B_ROI_SIZE_1 0xD0B00 + +#define mmMME_SHADOW_3_B_ROI_SIZE_2 0xD0B04 + +#define mmMME_SHADOW_3_B_ROI_SIZE_3 0xD0B08 + +#define mmMME_SHADOW_3_B_SPATIAL_START_OFFSET_0 0xD0B0C + +#define mmMME_SHADOW_3_B_SPATIAL_START_OFFSET_1 0xD0B10 + +#define mmMME_SHADOW_3_B_SPATIAL_START_OFFSET_2 0xD0B14 + +#define mmMME_SHADOW_3_B_SPATIAL_START_OFFSET_3 0xD0B18 + +#define mmMME_SHADOW_3_B_SPATIAL_STRIDE_0 0xD0B1C + +#define mmMME_SHADOW_3_B_SPATIAL_STRIDE_1 0xD0B20 + +#define mmMME_SHADOW_3_B_SPATIAL_STRIDE_2 0xD0B24 + +#define mmMME_SHADOW_3_B_SPATIAL_STRIDE_3 0xD0B28 + +#define mmMME_SHADOW_3_B_SPATIAL_SIZE_MINUS_1 0xD0B2C + +#define mmMME_SHADOW_3_C_ROI_BASE_OFFSET_0 0xD0B30 + +#define mmMME_SHADOW_3_C_ROI_BASE_OFFSET_1 0xD0B34 + +#define mmMME_SHADOW_3_C_ROI_BASE_OFFSET_2 0xD0B38 + +#define mmMME_SHADOW_3_C_ROI_BASE_OFFSET_3 0xD0B3C + +#define mmMME_SHADOW_3_C_ROI_BASE_OFFSET_4 0xD0B40 + +#define mmMME_SHADOW_3_C_VALID_ELEMENTS_0 0xD0B44 + +#define mmMME_SHADOW_3_C_VALID_ELEMENTS_1 0xD0B48 + +#define mmMME_SHADOW_3_C_VALID_ELEMENTS_2 0xD0B4C + +#define mmMME_SHADOW_3_C_VALID_ELEMENTS_3 0xD0B50 + +#define mmMME_SHADOW_3_C_VALID_ELEMENTS_4 0xD0B54 + +#define mmMME_SHADOW_3_C_LOOP_STRIDE_0 0xD0B58 + +#define mmMME_SHADOW_3_C_LOOP_STRIDE_1 0xD0B5C + +#define mmMME_SHADOW_3_C_LOOP_STRIDE_2 0xD0B60 + +#define mmMME_SHADOW_3_C_LOOP_STRIDE_3 0xD0B64 + +#define mmMME_SHADOW_3_C_LOOP_STRIDE_4 0xD0B68 + +#define mmMME_SHADOW_3_C_ROI_SIZE_0 0xD0B6C + +#define mmMME_SHADOW_3_C_ROI_SIZE_1 0xD0B70 + +#define mmMME_SHADOW_3_C_ROI_SIZE_2 0xD0B74 + +#define mmMME_SHADOW_3_C_ROI_SIZE_3 0xD0B78 + +#define mmMME_SHADOW_3_C_SPATIAL_START_OFFSET_0 0xD0B7C + +#define mmMME_SHADOW_3_C_SPATIAL_START_OFFSET_1 0xD0B80 + +#define mmMME_SHADOW_3_C_SPATIAL_START_OFFSET_2 0xD0B84 + +#define mmMME_SHADOW_3_C_SPATIAL_START_OFFSET_3 0xD0B88 + +#define mmMME_SHADOW_3_C_SPATIAL_STRIDE_0 0xD0B8C + +#define mmMME_SHADOW_3_C_SPATIAL_STRIDE_1 0xD0B90 + +#define mmMME_SHADOW_3_C_SPATIAL_STRIDE_2 0xD0B94 + +#define mmMME_SHADOW_3_C_SPATIAL_STRIDE_3 0xD0B98 + +#define mmMME_SHADOW_3_C_SPATIAL_SIZE_MINUS_1 0xD0B9C + +#define mmMME_SHADOW_3_SYNC_OBJECT_MESSAGE 0xD0BA0 + +#define mmMME_SHADOW_3_E_PADDING_VALUE_A 0xD0BA4 + +#define mmMME_SHADOW_3_E_NUM_ITERATION_MINUS_1 0xD0BA8 + +#define mmMME_SHADOW_3_E_BUBBLES_PER_SPLIT 0xD0BAC + +#endif /* ASIC_REG_MME_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mmu_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/mmu_masks.h new file mode 100644 index 000000000000..3a78078d3c4c --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/mmu_masks.h @@ -0,0 +1,143 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MMU_MASKS_H_ +#define ASIC_REG_MMU_MASKS_H_ + +/* + ***************************************** + * MMU (Prototype: MMU) + ***************************************** + */ + +/* MMU_INPUT_FIFO_THRESHOLD */ +#define MMU_INPUT_FIFO_THRESHOLD_PCI_SHIFT 0 +#define MMU_INPUT_FIFO_THRESHOLD_PCI_MASK 0x7 +#define MMU_INPUT_FIFO_THRESHOLD_PSOC_SHIFT 4 +#define MMU_INPUT_FIFO_THRESHOLD_PSOC_MASK 0x70 +#define MMU_INPUT_FIFO_THRESHOLD_DMA_SHIFT 8 +#define MMU_INPUT_FIFO_THRESHOLD_DMA_MASK 0x700 +#define MMU_INPUT_FIFO_THRESHOLD_CPU_SHIFT 12 +#define MMU_INPUT_FIFO_THRESHOLD_CPU_MASK 0x7000 +#define MMU_INPUT_FIFO_THRESHOLD_MME_SHIFT 16 +#define MMU_INPUT_FIFO_THRESHOLD_MME_MASK 0x70000 +#define MMU_INPUT_FIFO_THRESHOLD_TPC_SHIFT 20 +#define MMU_INPUT_FIFO_THRESHOLD_TPC_MASK 0x700000 +#define MMU_INPUT_FIFO_THRESHOLD_OTHER_SHIFT 24 +#define MMU_INPUT_FIFO_THRESHOLD_OTHER_MASK 0x7000000 + +/* MMU_MMU_ENABLE */ +#define MMU_MMU_ENABLE_R_SHIFT 0 +#define MMU_MMU_ENABLE_R_MASK 0x1 + +/* MMU_FORCE_ORDERING */ +#define MMU_FORCE_ORDERING_DMA_WEAK_ORDERING_SHIFT 0 +#define MMU_FORCE_ORDERING_DMA_WEAK_ORDERING_MASK 0x1 +#define MMU_FORCE_ORDERING_PSOC_WEAK_ORDERING_SHIFT 1 +#define MMU_FORCE_ORDERING_PSOC_WEAK_ORDERING_MASK 0x2 +#define MMU_FORCE_ORDERING_PCI_WEAK_ORDERING_SHIFT 2 +#define MMU_FORCE_ORDERING_PCI_WEAK_ORDERING_MASK 0x4 +#define MMU_FORCE_ORDERING_CPU_WEAK_ORDERING_SHIFT 3 +#define MMU_FORCE_ORDERING_CPU_WEAK_ORDERING_MASK 0x8 +#define MMU_FORCE_ORDERING_MME_WEAK_ORDERING_SHIFT 4 +#define MMU_FORCE_ORDERING_MME_WEAK_ORDERING_MASK 0x10 +#define MMU_FORCE_ORDERING_TPC_WEAK_ORDERING_SHIFT 5 +#define MMU_FORCE_ORDERING_TPC_WEAK_ORDERING_MASK 0x20 +#define MMU_FORCE_ORDERING_DEFAULT_WEAK_ORDERING_SHIFT 6 +#define MMU_FORCE_ORDERING_DEFAULT_WEAK_ORDERING_MASK 0x40 +#define MMU_FORCE_ORDERING_DMA_STRONG_ORDERING_SHIFT 8 +#define MMU_FORCE_ORDERING_DMA_STRONG_ORDERING_MASK 0x100 +#define MMU_FORCE_ORDERING_PSOC_STRONG_ORDERING_SHIFT 9 +#define MMU_FORCE_ORDERING_PSOC_STRONG_ORDERING_MASK 0x200 +#define MMU_FORCE_ORDERING_PCI_STRONG_ORDERING_SHIFT 10 +#define MMU_FORCE_ORDERING_PCI_STRONG_ORDERING_MASK 0x400 +#define MMU_FORCE_ORDERING_CPU_STRONG_ORDERING_SHIFT 11 +#define MMU_FORCE_ORDERING_CPU_STRONG_ORDERING_MASK 0x800 +#define MMU_FORCE_ORDERING_MME_STRONG_ORDERING_SHIFT 12 +#define MMU_FORCE_ORDERING_MME_STRONG_ORDERING_MASK 0x1000 +#define MMU_FORCE_ORDERING_TPC_STRONG_ORDERING_SHIFT 13 +#define MMU_FORCE_ORDERING_TPC_STRONG_ORDERING_MASK 0x2000 +#define MMU_FORCE_ORDERING_DEFAULT_STRONG_ORDERING_SHIFT 14 +#define MMU_FORCE_ORDERING_DEFAULT_STRONG_ORDERING_MASK 0x4000 + +/* MMU_FEATURE_ENABLE */ +#define MMU_FEATURE_ENABLE_VA_ORDERING_EN_SHIFT 0 +#define MMU_FEATURE_ENABLE_VA_ORDERING_EN_MASK 0x1 +#define MMU_FEATURE_ENABLE_CLEAN_LINK_LIST_SHIFT 1 +#define MMU_FEATURE_ENABLE_CLEAN_LINK_LIST_MASK 0x2 +#define MMU_FEATURE_ENABLE_HOP_OFFSET_EN_SHIFT 2 +#define MMU_FEATURE_ENABLE_HOP_OFFSET_EN_MASK 0x4 +#define MMU_FEATURE_ENABLE_OBI_ORDERING_EN_SHIFT 3 +#define MMU_FEATURE_ENABLE_OBI_ORDERING_EN_MASK 0x8 +#define MMU_FEATURE_ENABLE_STRONG_ORDERING_READ_EN_SHIFT 4 +#define MMU_FEATURE_ENABLE_STRONG_ORDERING_READ_EN_MASK 0x10 +#define MMU_FEATURE_ENABLE_TRACE_ENABLE_SHIFT 5 +#define MMU_FEATURE_ENABLE_TRACE_ENABLE_MASK 0x20 + +/* MMU_VA_ORDERING_MASK_31_7 */ +#define MMU_VA_ORDERING_MASK_31_7_R_SHIFT 0 +#define MMU_VA_ORDERING_MASK_31_7_R_MASK 0x1FFFFFF + +/* MMU_VA_ORDERING_MASK_49_32 */ +#define MMU_VA_ORDERING_MASK_49_32_R_SHIFT 0 +#define MMU_VA_ORDERING_MASK_49_32_R_MASK 0x3FFFF + +/* MMU_LOG2_DDR_SIZE */ +#define MMU_LOG2_DDR_SIZE_R_SHIFT 0 +#define MMU_LOG2_DDR_SIZE_R_MASK 0xFF + +/* MMU_SCRAMBLER */ +#define MMU_SCRAMBLER_ADDR_BIT_SHIFT 0 +#define MMU_SCRAMBLER_ADDR_BIT_MASK 0x3F +#define MMU_SCRAMBLER_SINGLE_DDR_EN_SHIFT 6 +#define MMU_SCRAMBLER_SINGLE_DDR_EN_MASK 0x40 +#define MMU_SCRAMBLER_SINGLE_DDR_ID_SHIFT 7 +#define MMU_SCRAMBLER_SINGLE_DDR_ID_MASK 0x80 + +/* MMU_MEM_INIT_BUSY */ +#define MMU_MEM_INIT_BUSY_DATA_SHIFT 0 +#define MMU_MEM_INIT_BUSY_DATA_MASK 0x3 +#define MMU_MEM_INIT_BUSY_OBI0_SHIFT 2 +#define MMU_MEM_INIT_BUSY_OBI0_MASK 0x4 +#define MMU_MEM_INIT_BUSY_OBI1_SHIFT 3 +#define MMU_MEM_INIT_BUSY_OBI1_MASK 0x8 + +/* MMU_SPI_MASK */ +#define MMU_SPI_MASK_R_SHIFT 0 +#define MMU_SPI_MASK_R_MASK 0xFF + +/* MMU_SPI_CAUSE */ +#define MMU_SPI_CAUSE_R_SHIFT 0 +#define MMU_SPI_CAUSE_R_MASK 0xFF + +/* MMU_PAGE_ERROR_CAPTURE */ +#define MMU_PAGE_ERROR_CAPTURE_VA_49_32_SHIFT 0 +#define MMU_PAGE_ERROR_CAPTURE_VA_49_32_MASK 0x3FFFF +#define MMU_PAGE_ERROR_CAPTURE_ENTRY_VALID_SHIFT 18 +#define MMU_PAGE_ERROR_CAPTURE_ENTRY_VALID_MASK 0x40000 + +/* MMU_PAGE_ERROR_CAPTURE_VA */ +#define MMU_PAGE_ERROR_CAPTURE_VA_VA_31_0_SHIFT 0 +#define MMU_PAGE_ERROR_CAPTURE_VA_VA_31_0_MASK 0xFFFFFFFF + +/* MMU_ACCESS_ERROR_CAPTURE */ +#define MMU_ACCESS_ERROR_CAPTURE_VA_49_32_SHIFT 0 +#define MMU_ACCESS_ERROR_CAPTURE_VA_49_32_MASK 0x3FFFF +#define MMU_ACCESS_ERROR_CAPTURE_ENTRY_VALID_SHIFT 18 +#define MMU_ACCESS_ERROR_CAPTURE_ENTRY_VALID_MASK 0x40000 + +/* MMU_ACCESS_ERROR_CAPTURE_VA */ +#define MMU_ACCESS_ERROR_CAPTURE_VA_VA_31_0_SHIFT 0 +#define MMU_ACCESS_ERROR_CAPTURE_VA_VA_31_0_MASK 0xFFFFFFFF + +#endif /* ASIC_REG_MMU_MASKS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mmu_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/mmu_regs.h new file mode 100644 index 000000000000..bec6c014135c --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/mmu_regs.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_MMU_REGS_H_ +#define ASIC_REG_MMU_REGS_H_ + +/* + ***************************************** + * MMU (Prototype: MMU) + ***************************************** + */ + +#define mmMMU_INPUT_FIFO_THRESHOLD 0x480000 + +#define mmMMU_MMU_ENABLE 0x48000C + +#define mmMMU_FORCE_ORDERING 0x480010 + +#define mmMMU_FEATURE_ENABLE 0x480014 + +#define mmMMU_VA_ORDERING_MASK_31_7 0x480018 + +#define mmMMU_VA_ORDERING_MASK_49_32 0x48001C + +#define mmMMU_LOG2_DDR_SIZE 0x480020 + +#define mmMMU_SCRAMBLER 0x480024 + +#define mmMMU_MEM_INIT_BUSY 0x480028 + +#define mmMMU_SPI_MASK 0x48002C + +#define mmMMU_SPI_CAUSE 0x480030 + +#define mmMMU_PAGE_ERROR_CAPTURE 0x480034 + +#define mmMMU_PAGE_ERROR_CAPTURE_VA 0x480038 + +#define mmMMU_ACCESS_ERROR_CAPTURE 0x48003C + +#define mmMMU_ACCESS_ERROR_CAPTURE_VA 0x480040 + +#endif /* ASIC_REG_MMU_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h new file mode 100644 index 000000000000..209e41402a11 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_PCI_NRTR_MASKS_H_ +#define ASIC_REG_PCI_NRTR_MASKS_H_ + +/* + ***************************************** + * PCI_NRTR (Prototype: IF_NRTR) + ***************************************** + */ + +/* PCI_NRTR_HBW_MAX_CRED */ +#define PCI_NRTR_HBW_MAX_CRED_WR_RQ_SHIFT 0 +#define PCI_NRTR_HBW_MAX_CRED_WR_RQ_MASK 0x3F +#define PCI_NRTR_HBW_MAX_CRED_WR_RS_SHIFT 8 +#define PCI_NRTR_HBW_MAX_CRED_WR_RS_MASK 0x3F00 +#define PCI_NRTR_HBW_MAX_CRED_RD_RQ_SHIFT 16 +#define PCI_NRTR_HBW_MAX_CRED_RD_RQ_MASK 0x3F0000 +#define PCI_NRTR_HBW_MAX_CRED_RD_RS_SHIFT 24 +#define PCI_NRTR_HBW_MAX_CRED_RD_RS_MASK 0x3F000000 + +/* PCI_NRTR_LBW_MAX_CRED */ +#define PCI_NRTR_LBW_MAX_CRED_WR_RQ_SHIFT 0 +#define PCI_NRTR_LBW_MAX_CRED_WR_RQ_MASK 0x3F +#define PCI_NRTR_LBW_MAX_CRED_WR_RS_SHIFT 8 +#define PCI_NRTR_LBW_MAX_CRED_WR_RS_MASK 0x3F00 +#define PCI_NRTR_LBW_MAX_CRED_RD_RQ_SHIFT 16 +#define PCI_NRTR_LBW_MAX_CRED_RD_RQ_MASK 0x3F0000 +#define PCI_NRTR_LBW_MAX_CRED_RD_RS_SHIFT 24 +#define PCI_NRTR_LBW_MAX_CRED_RD_RS_MASK 0x3F000000 + +/* PCI_NRTR_DBG_E_ARB */ +#define PCI_NRTR_DBG_E_ARB_W_SHIFT 0 +#define PCI_NRTR_DBG_E_ARB_W_MASK 0x7 +#define PCI_NRTR_DBG_E_ARB_S_SHIFT 8 +#define PCI_NRTR_DBG_E_ARB_S_MASK 0x700 +#define PCI_NRTR_DBG_E_ARB_N_SHIFT 16 +#define PCI_NRTR_DBG_E_ARB_N_MASK 0x70000 +#define PCI_NRTR_DBG_E_ARB_L_SHIFT 24 +#define PCI_NRTR_DBG_E_ARB_L_MASK 0x7000000 + +/* PCI_NRTR_DBG_W_ARB */ +#define PCI_NRTR_DBG_W_ARB_E_SHIFT 0 +#define PCI_NRTR_DBG_W_ARB_E_MASK 0x7 +#define PCI_NRTR_DBG_W_ARB_S_SHIFT 8 +#define PCI_NRTR_DBG_W_ARB_S_MASK 0x700 +#define PCI_NRTR_DBG_W_ARB_N_SHIFT 16 +#define PCI_NRTR_DBG_W_ARB_N_MASK 0x70000 +#define PCI_NRTR_DBG_W_ARB_L_SHIFT 24 +#define PCI_NRTR_DBG_W_ARB_L_MASK 0x7000000 + +/* PCI_NRTR_DBG_N_ARB */ +#define PCI_NRTR_DBG_N_ARB_W_SHIFT 0 +#define PCI_NRTR_DBG_N_ARB_W_MASK 0x7 +#define PCI_NRTR_DBG_N_ARB_E_SHIFT 8 +#define PCI_NRTR_DBG_N_ARB_E_MASK 0x700 +#define PCI_NRTR_DBG_N_ARB_S_SHIFT 16 +#define PCI_NRTR_DBG_N_ARB_S_MASK 0x70000 +#define PCI_NRTR_DBG_N_ARB_L_SHIFT 24 +#define PCI_NRTR_DBG_N_ARB_L_MASK 0x7000000 + +/* PCI_NRTR_DBG_S_ARB */ +#define PCI_NRTR_DBG_S_ARB_W_SHIFT 0 +#define PCI_NRTR_DBG_S_ARB_W_MASK 0x7 +#define PCI_NRTR_DBG_S_ARB_E_SHIFT 8 +#define PCI_NRTR_DBG_S_ARB_E_MASK 0x700 +#define PCI_NRTR_DBG_S_ARB_N_SHIFT 16 +#define PCI_NRTR_DBG_S_ARB_N_MASK 0x70000 +#define PCI_NRTR_DBG_S_ARB_L_SHIFT 24 +#define PCI_NRTR_DBG_S_ARB_L_MASK 0x7000000 + +/* PCI_NRTR_DBG_L_ARB */ +#define PCI_NRTR_DBG_L_ARB_W_SHIFT 0 +#define PCI_NRTR_DBG_L_ARB_W_MASK 0x7 +#define PCI_NRTR_DBG_L_ARB_E_SHIFT 8 +#define PCI_NRTR_DBG_L_ARB_E_MASK 0x700 +#define PCI_NRTR_DBG_L_ARB_S_SHIFT 16 +#define PCI_NRTR_DBG_L_ARB_S_MASK 0x70000 +#define PCI_NRTR_DBG_L_ARB_N_SHIFT 24 +#define PCI_NRTR_DBG_L_ARB_N_MASK 0x7000000 + +/* PCI_NRTR_DBG_E_ARB_MAX */ +#define PCI_NRTR_DBG_E_ARB_MAX_CREDIT_SHIFT 0 +#define PCI_NRTR_DBG_E_ARB_MAX_CREDIT_MASK 0x3F + +/* PCI_NRTR_DBG_W_ARB_MAX */ +#define PCI_NRTR_DBG_W_ARB_MAX_CREDIT_SHIFT 0 +#define PCI_NRTR_DBG_W_ARB_MAX_CREDIT_MASK 0x3F + +/* PCI_NRTR_DBG_N_ARB_MAX */ +#define PCI_NRTR_DBG_N_ARB_MAX_CREDIT_SHIFT 0 +#define PCI_NRTR_DBG_N_ARB_MAX_CREDIT_MASK 0x3F + +/* PCI_NRTR_DBG_S_ARB_MAX */ +#define PCI_NRTR_DBG_S_ARB_MAX_CREDIT_SHIFT 0 +#define PCI_NRTR_DBG_S_ARB_MAX_CREDIT_MASK 0x3F + +/* PCI_NRTR_DBG_L_ARB_MAX */ +#define PCI_NRTR_DBG_L_ARB_MAX_CREDIT_SHIFT 0 +#define PCI_NRTR_DBG_L_ARB_MAX_CREDIT_MASK 0x3F + +/* PCI_NRTR_SPLIT_COEF */ +#define PCI_NRTR_SPLIT_COEF_VAL_SHIFT 0 +#define PCI_NRTR_SPLIT_COEF_VAL_MASK 0xFFFF + +/* PCI_NRTR_SPLIT_CFG */ +#define PCI_NRTR_SPLIT_CFG_FORCE_WAK_ORDER_SHIFT 0 +#define PCI_NRTR_SPLIT_CFG_FORCE_WAK_ORDER_MASK 0x1 +#define PCI_NRTR_SPLIT_CFG_FORCE_STRONG_ORDER_SHIFT 1 +#define PCI_NRTR_SPLIT_CFG_FORCE_STRONG_ORDER_MASK 0x2 +#define PCI_NRTR_SPLIT_CFG_DEFAULT_MESH_SHIFT 2 +#define PCI_NRTR_SPLIT_CFG_DEFAULT_MESH_MASK 0xC +#define PCI_NRTR_SPLIT_CFG_RD_RATE_LIM_EN_SHIFT 4 +#define PCI_NRTR_SPLIT_CFG_RD_RATE_LIM_EN_MASK 0x10 +#define PCI_NRTR_SPLIT_CFG_WR_RATE_LIM_EN_SHIFT 5 +#define PCI_NRTR_SPLIT_CFG_WR_RATE_LIM_EN_MASK 0x20 +#define PCI_NRTR_SPLIT_CFG_B2B_OPT_SHIFT 6 +#define PCI_NRTR_SPLIT_CFG_B2B_OPT_MASK 0x1C0 + +/* PCI_NRTR_SPLIT_RD_SAT */ +#define PCI_NRTR_SPLIT_RD_SAT_VAL_SHIFT 0 +#define PCI_NRTR_SPLIT_RD_SAT_VAL_MASK 0xFFFF + +/* PCI_NRTR_SPLIT_RD_RST_TOKEN */ +#define PCI_NRTR_SPLIT_RD_RST_TOKEN_VAL_SHIFT 0 +#define PCI_NRTR_SPLIT_RD_RST_TOKEN_VAL_MASK 0xFFFF + +/* PCI_NRTR_SPLIT_RD_TIMEOUT */ +#define PCI_NRTR_SPLIT_RD_TIMEOUT_VAL_SHIFT 0 +#define PCI_NRTR_SPLIT_RD_TIMEOUT_VAL_MASK 0xFFFFFFFF + +/* PCI_NRTR_SPLIT_WR_SAT */ +#define PCI_NRTR_SPLIT_WR_SAT_VAL_SHIFT 0 +#define PCI_NRTR_SPLIT_WR_SAT_VAL_MASK 0xFFFF + +/* PCI_NRTR_WPLIT_WR_TST_TOLEN */ +#define PCI_NRTR_WPLIT_WR_TST_TOLEN_VAL_SHIFT 0 +#define PCI_NRTR_WPLIT_WR_TST_TOLEN_VAL_MASK 0xFFFF + +/* PCI_NRTR_SPLIT_WR_TIMEOUT */ +#define PCI_NRTR_SPLIT_WR_TIMEOUT_VAL_SHIFT 0 +#define PCI_NRTR_SPLIT_WR_TIMEOUT_VAL_MASK 0xFFFFFFFF + +/* PCI_NRTR_HBW_RANGE_HIT */ +#define PCI_NRTR_HBW_RANGE_HIT_IND_SHIFT 0 +#define PCI_NRTR_HBW_RANGE_HIT_IND_MASK 0xFF + +/* PCI_NRTR_HBW_RANGE_MASK_L */ +#define PCI_NRTR_HBW_RANGE_MASK_L_VAL_SHIFT 0 +#define PCI_NRTR_HBW_RANGE_MASK_L_VAL_MASK 0xFFFFFFFF + +/* PCI_NRTR_HBW_RANGE_MASK_H */ +#define PCI_NRTR_HBW_RANGE_MASK_H_VAL_SHIFT 0 +#define PCI_NRTR_HBW_RANGE_MASK_H_VAL_MASK 0x3FFFF + +/* PCI_NRTR_HBW_RANGE_BASE_L */ +#define PCI_NRTR_HBW_RANGE_BASE_L_VAL_SHIFT 0 +#define PCI_NRTR_HBW_RANGE_BASE_L_VAL_MASK 0xFFFFFFFF + +/* PCI_NRTR_HBW_RANGE_BASE_H */ +#define PCI_NRTR_HBW_RANGE_BASE_H_VAL_SHIFT 0 +#define PCI_NRTR_HBW_RANGE_BASE_H_VAL_MASK 0x3FFFF + +/* PCI_NRTR_LBW_RANGE_HIT */ +#define PCI_NRTR_LBW_RANGE_HIT_IND_SHIFT 0 +#define PCI_NRTR_LBW_RANGE_HIT_IND_MASK 0xFFFF + +/* PCI_NRTR_LBW_RANGE_MASK */ +#define PCI_NRTR_LBW_RANGE_MASK_VAL_SHIFT 0 +#define PCI_NRTR_LBW_RANGE_MASK_VAL_MASK 0x3FFFFFF + +/* PCI_NRTR_LBW_RANGE_BASE */ +#define PCI_NRTR_LBW_RANGE_BASE_VAL_SHIFT 0 +#define PCI_NRTR_LBW_RANGE_BASE_VAL_MASK 0x3FFFFFF + +/* PCI_NRTR_RGLTR */ +#define PCI_NRTR_RGLTR_WR_EN_SHIFT 0 +#define PCI_NRTR_RGLTR_WR_EN_MASK 0x1 +#define PCI_NRTR_RGLTR_RD_EN_SHIFT 4 +#define PCI_NRTR_RGLTR_RD_EN_MASK 0x10 + +/* PCI_NRTR_RGLTR_WR_RESULT */ +#define PCI_NRTR_RGLTR_WR_RESULT_VAL_SHIFT 0 +#define PCI_NRTR_RGLTR_WR_RESULT_VAL_MASK 0xFF + +/* PCI_NRTR_RGLTR_RD_RESULT */ +#define PCI_NRTR_RGLTR_RD_RESULT_VAL_SHIFT 0 +#define PCI_NRTR_RGLTR_RD_RESULT_VAL_MASK 0xFF + +/* PCI_NRTR_SCRAMB_EN */ +#define PCI_NRTR_SCRAMB_EN_VAL_SHIFT 0 +#define PCI_NRTR_SCRAMB_EN_VAL_MASK 0x1 + +/* PCI_NRTR_NON_LIN_SCRAMB */ +#define PCI_NRTR_NON_LIN_SCRAMB_EN_SHIFT 0 +#define PCI_NRTR_NON_LIN_SCRAMB_EN_MASK 0x1 + +#endif /* ASIC_REG_PCI_NRTR_MASKS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h new file mode 100644 index 000000000000..447e5d4e7dc8 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h @@ -0,0 +1,227 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_PCI_NRTR_REGS_H_ +#define ASIC_REG_PCI_NRTR_REGS_H_ + +/* + ***************************************** + * PCI_NRTR (Prototype: IF_NRTR) + ***************************************** + */ + +#define mmPCI_NRTR_HBW_MAX_CRED 0x100 + +#define mmPCI_NRTR_LBW_MAX_CRED 0x120 + +#define mmPCI_NRTR_DBG_E_ARB 0x300 + +#define mmPCI_NRTR_DBG_W_ARB 0x304 + +#define mmPCI_NRTR_DBG_N_ARB 0x308 + +#define mmPCI_NRTR_DBG_S_ARB 0x30C + +#define mmPCI_NRTR_DBG_L_ARB 0x310 + +#define mmPCI_NRTR_DBG_E_ARB_MAX 0x320 + +#define mmPCI_NRTR_DBG_W_ARB_MAX 0x324 + +#define mmPCI_NRTR_DBG_N_ARB_MAX 0x328 + +#define mmPCI_NRTR_DBG_S_ARB_MAX 0x32C + +#define mmPCI_NRTR_DBG_L_ARB_MAX 0x330 + +#define mmPCI_NRTR_SPLIT_COEF_0 0x400 + +#define mmPCI_NRTR_SPLIT_COEF_1 0x404 + +#define mmPCI_NRTR_SPLIT_COEF_2 0x408 + +#define mmPCI_NRTR_SPLIT_COEF_3 0x40C + +#define mmPCI_NRTR_SPLIT_COEF_4 0x410 + +#define mmPCI_NRTR_SPLIT_COEF_5 0x414 + +#define mmPCI_NRTR_SPLIT_COEF_6 0x418 + +#define mmPCI_NRTR_SPLIT_COEF_7 0x41C + +#define mmPCI_NRTR_SPLIT_COEF_8 0x420 + +#define mmPCI_NRTR_SPLIT_COEF_9 0x424 + +#define mmPCI_NRTR_SPLIT_CFG 0x440 + +#define mmPCI_NRTR_SPLIT_RD_SAT 0x444 + +#define mmPCI_NRTR_SPLIT_RD_RST_TOKEN 0x448 + +#define mmPCI_NRTR_SPLIT_RD_TIMEOUT_0 0x44C + +#define mmPCI_NRTR_SPLIT_RD_TIMEOUT_1 0x450 + +#define mmPCI_NRTR_SPLIT_WR_SAT 0x454 + +#define mmPCI_NRTR_WPLIT_WR_TST_TOLEN 0x458 + +#define mmPCI_NRTR_SPLIT_WR_TIMEOUT_0 0x45C + +#define mmPCI_NRTR_SPLIT_WR_TIMEOUT_1 0x460 + +#define mmPCI_NRTR_HBW_RANGE_HIT 0x470 + +#define mmPCI_NRTR_HBW_RANGE_MASK_L_0 0x480 + +#define mmPCI_NRTR_HBW_RANGE_MASK_L_1 0x484 + +#define mmPCI_NRTR_HBW_RANGE_MASK_L_2 0x488 + +#define mmPCI_NRTR_HBW_RANGE_MASK_L_3 0x48C + +#define mmPCI_NRTR_HBW_RANGE_MASK_L_4 0x490 + +#define mmPCI_NRTR_HBW_RANGE_MASK_L_5 0x494 + +#define mmPCI_NRTR_HBW_RANGE_MASK_L_6 0x498 + +#define mmPCI_NRTR_HBW_RANGE_MASK_L_7 0x49C + +#define mmPCI_NRTR_HBW_RANGE_MASK_H_0 0x4A0 + +#define mmPCI_NRTR_HBW_RANGE_MASK_H_1 0x4A4 + +#define mmPCI_NRTR_HBW_RANGE_MASK_H_2 0x4A8 + +#define mmPCI_NRTR_HBW_RANGE_MASK_H_3 0x4AC + +#define mmPCI_NRTR_HBW_RANGE_MASK_H_4 0x4B0 + +#define mmPCI_NRTR_HBW_RANGE_MASK_H_5 0x4B4 + +#define mmPCI_NRTR_HBW_RANGE_MASK_H_6 0x4B8 + +#define mmPCI_NRTR_HBW_RANGE_MASK_H_7 0x4BC + +#define mmPCI_NRTR_HBW_RANGE_BASE_L_0 0x4C0 + +#define mmPCI_NRTR_HBW_RANGE_BASE_L_1 0x4C4 + +#define mmPCI_NRTR_HBW_RANGE_BASE_L_2 0x4C8 + +#define mmPCI_NRTR_HBW_RANGE_BASE_L_3 0x4CC + +#define mmPCI_NRTR_HBW_RANGE_BASE_L_4 0x4D0 + +#define mmPCI_NRTR_HBW_RANGE_BASE_L_5 0x4D4 + +#define mmPCI_NRTR_HBW_RANGE_BASE_L_6 0x4D8 + +#define mmPCI_NRTR_HBW_RANGE_BASE_L_7 0x4DC + +#define mmPCI_NRTR_HBW_RANGE_BASE_H_0 0x4E0 + +#define mmPCI_NRTR_HBW_RANGE_BASE_H_1 0x4E4 + +#define mmPCI_NRTR_HBW_RANGE_BASE_H_2 0x4E8 + +#define mmPCI_NRTR_HBW_RANGE_BASE_H_3 0x4EC + +#define mmPCI_NRTR_HBW_RANGE_BASE_H_4 0x4F0 + +#define mmPCI_NRTR_HBW_RANGE_BASE_H_5 0x4F4 + +#define mmPCI_NRTR_HBW_RANGE_BASE_H_6 0x4F8 + +#define mmPCI_NRTR_HBW_RANGE_BASE_H_7 0x4FC + +#define mmPCI_NRTR_LBW_RANGE_HIT 0x500 + +#define mmPCI_NRTR_LBW_RANGE_MASK_0 0x510 + +#define mmPCI_NRTR_LBW_RANGE_MASK_1 0x514 + +#define mmPCI_NRTR_LBW_RANGE_MASK_2 0x518 + +#define mmPCI_NRTR_LBW_RANGE_MASK_3 0x51C + +#define mmPCI_NRTR_LBW_RANGE_MASK_4 0x520 + +#define mmPCI_NRTR_LBW_RANGE_MASK_5 0x524 + +#define mmPCI_NRTR_LBW_RANGE_MASK_6 0x528 + +#define mmPCI_NRTR_LBW_RANGE_MASK_7 0x52C + +#define mmPCI_NRTR_LBW_RANGE_MASK_8 0x530 + +#define mmPCI_NRTR_LBW_RANGE_MASK_9 0x534 + +#define mmPCI_NRTR_LBW_RANGE_MASK_10 0x538 + +#define mmPCI_NRTR_LBW_RANGE_MASK_11 0x53C + +#define mmPCI_NRTR_LBW_RANGE_MASK_12 0x540 + +#define mmPCI_NRTR_LBW_RANGE_MASK_13 0x544 + +#define mmPCI_NRTR_LBW_RANGE_MASK_14 0x548 + +#define mmPCI_NRTR_LBW_RANGE_MASK_15 0x54C + +#define mmPCI_NRTR_LBW_RANGE_BASE_0 0x550 + +#define mmPCI_NRTR_LBW_RANGE_BASE_1 0x554 + +#define mmPCI_NRTR_LBW_RANGE_BASE_2 0x558 + +#define mmPCI_NRTR_LBW_RANGE_BASE_3 0x55C + +#define mmPCI_NRTR_LBW_RANGE_BASE_4 0x560 + +#define mmPCI_NRTR_LBW_RANGE_BASE_5 0x564 + +#define mmPCI_NRTR_LBW_RANGE_BASE_6 0x568 + +#define mmPCI_NRTR_LBW_RANGE_BASE_7 0x56C + +#define mmPCI_NRTR_LBW_RANGE_BASE_8 0x570 + +#define mmPCI_NRTR_LBW_RANGE_BASE_9 0x574 + +#define mmPCI_NRTR_LBW_RANGE_BASE_10 0x578 + +#define mmPCI_NRTR_LBW_RANGE_BASE_11 0x57C + +#define mmPCI_NRTR_LBW_RANGE_BASE_12 0x580 + +#define mmPCI_NRTR_LBW_RANGE_BASE_13 0x584 + +#define mmPCI_NRTR_LBW_RANGE_BASE_14 0x588 + +#define mmPCI_NRTR_LBW_RANGE_BASE_15 0x58C + +#define mmPCI_NRTR_RGLTR 0x590 + +#define mmPCI_NRTR_RGLTR_WR_RESULT 0x594 + +#define mmPCI_NRTR_RGLTR_RD_RESULT 0x598 + +#define mmPCI_NRTR_SCRAMB_EN 0x600 + +#define mmPCI_NRTR_NON_LIN_SCRAMB 0x604 + +#endif /* ASIC_REG_PCI_NRTR_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/pcie_aux_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/pcie_aux_regs.h new file mode 100644 index 000000000000..daaf5d9079dc --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/pcie_aux_regs.h @@ -0,0 +1,243 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_PCIE_AUX_REGS_H_ +#define ASIC_REG_PCIE_AUX_REGS_H_ + +/* + ***************************************** + * PCIE_AUX (Prototype: PCIE_AUX) + ***************************************** + */ + +#define mmPCIE_AUX_APB_TIMEOUT 0xC07004 + +#define mmPCIE_AUX_PHY_INIT 0xC07100 + +#define mmPCIE_AUX_LTR_MAX_LATENCY 0xC07138 + +#define mmPCIE_AUX_BAR0_START_L 0xC07160 + +#define mmPCIE_AUX_BAR0_START_H 0xC07164 + +#define mmPCIE_AUX_BAR1_START 0xC07168 + +#define mmPCIE_AUX_BAR2_START_L 0xC0716C + +#define mmPCIE_AUX_BAR2_START_H 0xC07170 + +#define mmPCIE_AUX_BAR3_START 0xC07174 + +#define mmPCIE_AUX_BAR4_START_L 0xC07178 + +#define mmPCIE_AUX_BAR4_START_H 0xC0717C + +#define mmPCIE_AUX_BAR5_START 0xC07180 + +#define mmPCIE_AUX_BAR0_LIMIT_L 0xC07184 + +#define mmPCIE_AUX_BAR0_LIMIT_H 0xC07188 + +#define mmPCIE_AUX_BAR1_LIMIT 0xC0718C + +#define mmPCIE_AUX_BAR2_LIMIT_L 0xC07190 + +#define mmPCIE_AUX_BAR2_LIMIT_H 0xC07194 + +#define mmPCIE_AUX_BAR3_LIMIT 0xC07198 + +#define mmPCIE_AUX_BAR4_LIMIT_L 0xC0719C + +#define mmPCIE_AUX_BAR4_LIMIT_H 0xC07200 + +#define mmPCIE_AUX_BAR5_LIMIT 0xC07204 + +#define mmPCIE_AUX_BUS_MASTER_EN 0xC07208 + +#define mmPCIE_AUX_MEM_SPACE_EN 0xC0720C + +#define mmPCIE_AUX_MAX_RD_REQ_SIZE 0xC07210 + +#define mmPCIE_AUX_MAX_PAYLOAD_SIZE 0xC07214 + +#define mmPCIE_AUX_EXT_TAG_EN 0xC07218 + +#define mmPCIE_AUX_RCB 0xC0721C + +#define mmPCIE_AUX_PM_NO_SOFT_RST 0xC07220 + +#define mmPCIE_AUX_PBUS_NUM 0xC07224 + +#define mmPCIE_AUX_PBUS_DEV_NUM 0xC07228 + +#define mmPCIE_AUX_NO_SNOOP_EN 0xC0722C + +#define mmPCIE_AUX_RELAX_ORDER_EN 0xC07230 + +#define mmPCIE_AUX_HP_SLOT_CTRL_ACCESS 0xC07234 + +#define mmPCIE_AUX_DLL_STATE_CHGED_EN 0xC07238 + +#define mmPCIE_AUX_CMP_CPLED_INT_EN 0xC0723C + +#define mmPCIE_AUX_HP_INT_EN 0xC07340 + +#define mmPCIE_AUX_PRE_DET_CHGEN_EN 0xC07344 + +#define mmPCIE_AUX_MRL_SENSOR_CHGED_EN 0xC07348 + +#define mmPCIE_AUX_PWR_FAULT_DET_EN 0xC0734C + +#define mmPCIE_AUX_ATTEN_BUTTON_PRESSED_EN 0xC07350 + +#define mmPCIE_AUX_PF_FLR_ACTIVE 0xC07360 + +#define mmPCIE_AUX_PF_FLR_DONE 0xC07364 + +#define mmPCIE_AUX_FLR_INT 0xC07390 + +#define mmPCIE_AUX_LTR_M_EN 0xC073B0 + +#define mmPCIE_AUX_LTSSM_EN 0xC07428 + +#define mmPCIE_AUX_SYS_INTR 0xC07440 + +#define mmPCIE_AUX_INT_DISABLE 0xC07444 + +#define mmPCIE_AUX_SMLH_LINK_UP 0xC07448 + +#define mmPCIE_AUX_PM_CURR_STATE 0xC07450 + +#define mmPCIE_AUX_RDLH_LINK_UP 0xC07458 + +#define mmPCIE_AUX_BRDG_SLV_XFER_PENDING 0xC0745C + +#define mmPCIE_AUX_BRDG_DBI_XFER_PENDING 0xC07460 + +#define mmPCIE_AUX_AUTO_SP_DIS 0xC07478 + +#define mmPCIE_AUX_DBI 0xC07490 + +#define mmPCIE_AUX_DBI_32 0xC07494 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_0 0xC074A4 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_1 0xC074A8 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_2 0xC074AC + +#define mmPCIE_AUX_DIAG_STATUS_BUS_3 0xC074B0 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_4 0xC074B4 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_5 0xC074B8 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_6 0xC074BC + +#define mmPCIE_AUX_DIAG_STATUS_BUS_7 0xC074C0 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_8 0xC074C4 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_9 0xC074C8 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_10 0xC074CC + +#define mmPCIE_AUX_DIAG_STATUS_BUS_11 0xC074D0 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_12 0xC074D4 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_13 0xC074D8 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_14 0xC074DC + +#define mmPCIE_AUX_DIAG_STATUS_BUS_15 0xC074E0 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_16 0xC074E4 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_17 0xC074E8 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_18 0xC074EC + +#define mmPCIE_AUX_DIAG_STATUS_BUS_19 0xC074F0 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_20 0xC074F4 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_21 0xC074F8 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_22 0xC074FC + +#define mmPCIE_AUX_DIAG_STATUS_BUS_23 0xC07500 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_24 0xC07504 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_25 0xC07508 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_26 0xC0750C + +#define mmPCIE_AUX_DIAG_STATUS_BUS_27 0xC07510 + +#define mmPCIE_AUX_DIAG_STATUS_BUS_28 0xC07514 + +#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_0 0xC07640 + +#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_1 0xC07644 + +#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_2 0xC07648 + +#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_3 0xC0764C + +#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_4 0xC07650 + +#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_5 0xC07654 + +#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_6 0xC07658 + +#define mmPCIE_AUX_CDM_RAS_DES_EC_INFO_7 0xC0765C + +#define mmPCIE_AUX_CDM_RAS_DES_SD_COMMON_0 0xC07744 + +#define mmPCIE_AUX_CDM_RAS_DES_SD_COMMON_1 0xC07748 + +#define mmPCIE_AUX_CDM_RAS_DES_SD_COMMON_2 0xC0774C + +#define mmPCIE_AUX_APP_RAS_DES_TBA_CTRL 0xC07774 + +#define mmPCIE_AUX_PM_DSTATE 0xC07840 + +#define mmPCIE_AUX_PM_PME_EN 0xC07844 + +#define mmPCIE_AUX_PM_LINKST_IN_L0S 0xC07848 + +#define mmPCIE_AUX_PM_LINKST_IN_L1 0xC0784C + +#define mmPCIE_AUX_PM_LINKST_IN_L2 0xC07850 + +#define mmPCIE_AUX_PM_LINKST_L2_EXIT 0xC07854 + +#define mmPCIE_AUX_PM_STATUS 0xC07858 + +#define mmPCIE_AUX_APP_READY_ENTER_L23 0xC0785C + +#define mmPCIE_AUX_APP_XFER_PENDING 0xC07860 + +#define mmPCIE_AUX_APP_REQ_L1 0xC07930 + +#define mmPCIE_AUX_AUX_PM_EN 0xC07934 + +#define mmPCIE_AUX_APPS_PM_XMT_PME 0xC07938 + +#define mmPCIE_AUX_OUTBAND_PWRUP_CMD 0xC07940 + +#define mmPCIE_AUX_PERST 0xC079B8 + +#endif /* ASIC_REG_PCIE_AUX_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h new file mode 100644 index 000000000000..8eda4de58788 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_PSOC_EMMC_PLL_REGS_H_ +#define ASIC_REG_PSOC_EMMC_PLL_REGS_H_ + +/* + ***************************************** + * PSOC_EMMC_PLL (Prototype: PLL) + ***************************************** + */ + +#define mmPSOC_EMMC_PLL_NR 0xC70100 + +#define mmPSOC_EMMC_PLL_NF 0xC70104 + +#define mmPSOC_EMMC_PLL_OD 0xC70108 + +#define mmPSOC_EMMC_PLL_NB 0xC7010C + +#define mmPSOC_EMMC_PLL_CFG 0xC70110 + +#define mmPSOC_EMMC_PLL_LOSE_MASK 0xC70120 + +#define mmPSOC_EMMC_PLL_LOCK_INTR 0xC70128 + +#define mmPSOC_EMMC_PLL_LOCK_BYPASS 0xC7012C + +#define mmPSOC_EMMC_PLL_DATA_CHNG 0xC70130 + +#define mmPSOC_EMMC_PLL_RST 0xC70134 + +#define mmPSOC_EMMC_PLL_SLIP_WD_CNTR 0xC70150 + +#define mmPSOC_EMMC_PLL_DIV_FACTOR_0 0xC70200 + +#define mmPSOC_EMMC_PLL_DIV_FACTOR_1 0xC70204 + +#define mmPSOC_EMMC_PLL_DIV_FACTOR_2 0xC70208 + +#define mmPSOC_EMMC_PLL_DIV_FACTOR_3 0xC7020C + +#define mmPSOC_EMMC_PLL_DIV_FACTOR_CMD_0 0xC70220 + +#define mmPSOC_EMMC_PLL_DIV_FACTOR_CMD_1 0xC70224 + +#define mmPSOC_EMMC_PLL_DIV_FACTOR_CMD_2 0xC70228 + +#define mmPSOC_EMMC_PLL_DIV_FACTOR_CMD_3 0xC7022C + +#define mmPSOC_EMMC_PLL_DIV_SEL_0 0xC70280 + +#define mmPSOC_EMMC_PLL_DIV_SEL_1 0xC70284 + +#define mmPSOC_EMMC_PLL_DIV_SEL_2 0xC70288 + +#define mmPSOC_EMMC_PLL_DIV_SEL_3 0xC7028C + +#define mmPSOC_EMMC_PLL_DIV_EN_0 0xC702A0 + +#define mmPSOC_EMMC_PLL_DIV_EN_1 0xC702A4 + +#define mmPSOC_EMMC_PLL_DIV_EN_2 0xC702A8 + +#define mmPSOC_EMMC_PLL_DIV_EN_3 0xC702AC + +#define mmPSOC_EMMC_PLL_DIV_FACTOR_BUSY_0 0xC702C0 + +#define mmPSOC_EMMC_PLL_DIV_FACTOR_BUSY_1 0xC702C4 + +#define mmPSOC_EMMC_PLL_DIV_FACTOR_BUSY_2 0xC702C8 + +#define mmPSOC_EMMC_PLL_DIV_FACTOR_BUSY_3 0xC702CC + +#define mmPSOC_EMMC_PLL_CLK_GATER 0xC70300 + +#define mmPSOC_EMMC_PLL_CLK_RLX_0 0xC70310 + +#define mmPSOC_EMMC_PLL_CLK_RLX_1 0xC70314 + +#define mmPSOC_EMMC_PLL_CLK_RLX_2 0xC70318 + +#define mmPSOC_EMMC_PLL_CLK_RLX_3 0xC7031C + +#define mmPSOC_EMMC_PLL_REF_CNTR_PERIOD 0xC70400 + +#define mmPSOC_EMMC_PLL_REF_LOW_THRESHOLD 0xC70410 + +#define mmPSOC_EMMC_PLL_REF_HIGH_THRESHOLD 0xC70420 + +#define mmPSOC_EMMC_PLL_PLL_NOT_STABLE 0xC70430 + +#define mmPSOC_EMMC_PLL_FREQ_CALC_EN 0xC70440 + +#endif /* ASIC_REG_PSOC_EMMC_PLL_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h new file mode 100644 index 000000000000..d4bf0e1db4df --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h @@ -0,0 +1,447 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_PSOC_GLOBAL_CONF_MASKS_H_ +#define ASIC_REG_PSOC_GLOBAL_CONF_MASKS_H_ + +/* + ***************************************** + * PSOC_GLOBAL_CONF (Prototype: GLOBAL_CONF) + ***************************************** + */ + +/* PSOC_GLOBAL_CONF_NON_RST_FLOPS */ +#define PSOC_GLOBAL_CONF_NON_RST_FLOPS_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_NON_RST_FLOPS_VAL_MASK 0xFFFFFFFF + +/* PSOC_GLOBAL_CONF_PCI_FW_FSM */ +#define PSOC_GLOBAL_CONF_PCI_FW_FSM_EN_SHIFT 0 +#define PSOC_GLOBAL_CONF_PCI_FW_FSM_EN_MASK 0x1 + +/* PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START */ +#define PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_IND_SHIFT 0 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_IND_MASK 0x1 + +/* PSOC_GLOBAL_CONF_BTM_FSM */ +#define PSOC_GLOBAL_CONF_BTM_FSM_STATE_SHIFT 0 +#define PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK 0xF + +/* PSOC_GLOBAL_CONF_SW_BTM_FSM */ +#define PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_SHIFT 0 +#define PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_MASK 0xF + +/* PSOC_GLOBAL_CONF_SW_BOOT_SEQ_FSM */ +#define PSOC_GLOBAL_CONF_SW_BOOT_SEQ_FSM_CTRL_SHIFT 0 +#define PSOC_GLOBAL_CONF_SW_BOOT_SEQ_FSM_CTRL_MASK 0xF + +/* PSOC_GLOBAL_CONF_BOOT_SEQ_TIMEOUT */ +#define PSOC_GLOBAL_CONF_BOOT_SEQ_TIMEOUT_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_TIMEOUT_VAL_MASK 0xFFFFFFFF + +/* PSOC_GLOBAL_CONF_SPI_MEM_EN */ +#define PSOC_GLOBAL_CONF_SPI_MEM_EN_IND_SHIFT 0 +#define PSOC_GLOBAL_CONF_SPI_MEM_EN_IND_MASK 0x1 + +/* PSOC_GLOBAL_CONF_PRSTN */ +#define PSOC_GLOBAL_CONF_PRSTN_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_PRSTN_VAL_MASK 0x1 + +/* PSOC_GLOBAL_CONF_PCIE_EN */ +#define PSOC_GLOBAL_CONF_PCIE_EN_MASK_SHIFT 0 +#define PSOC_GLOBAL_CONF_PCIE_EN_MASK_MASK 0x1 + +/* PSOC_GLOBAL_CONF_SPI_IMG_STS */ +#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRI_SHIFT 0 +#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRI_MASK 0x1 +#define PSOC_GLOBAL_CONF_SPI_IMG_STS_SEC_SHIFT 1 +#define PSOC_GLOBAL_CONF_SPI_IMG_STS_SEC_MASK 0x2 +#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRSTN_SHIFT 2 +#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PRSTN_MASK 0x4 +#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PCI_SHIFT 3 +#define PSOC_GLOBAL_CONF_SPI_IMG_STS_PCI_MASK 0x8 + +/* PSOC_GLOBAL_CONF_BOOT_SEQ_FSM */ +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_IDLE_SHIFT 0 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_IDLE_MASK 0x1 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_BOOT_INIT_SHIFT 1 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_BOOT_INIT_MASK 0x2 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PRI_SHIFT 2 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PRI_MASK 0x4 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_SEC_SHIFT 3 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_SEC_MASK 0x8 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PRSTN_SHIFT 4 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PRSTN_MASK 0x10 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PCIE_SHIFT 5 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_SPI_PCIE_MASK 0x20 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_ROM_SHIFT 6 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_ROM_MASK 0x40 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_PCLK_READY_SHIFT 7 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_PCLK_READY_MASK 0x80 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_LTSSM_EN_SHIFT 8 +#define PSOC_GLOBAL_CONF_BOOT_SEQ_FSM_LTSSM_EN_MASK 0x100 + +/* PSOC_GLOBAL_CONF_SCRATCHPAD */ +#define PSOC_GLOBAL_CONF_SCRATCHPAD_REG_SHIFT 0 +#define PSOC_GLOBAL_CONF_SCRATCHPAD_REG_MASK 0xFFFFFFFF + +/* PSOC_GLOBAL_CONF_SEMAPHORE */ +#define PSOC_GLOBAL_CONF_SEMAPHORE_REG_SHIFT 0 +#define PSOC_GLOBAL_CONF_SEMAPHORE_REG_MASK 0xFFFFFFFF + +/* PSOC_GLOBAL_CONF_WARM_REBOOT */ +#define PSOC_GLOBAL_CONF_WARM_REBOOT_CNTR_SHIFT 0 +#define PSOC_GLOBAL_CONF_WARM_REBOOT_CNTR_MASK 0xFFFFFFFF + +/* PSOC_GLOBAL_CONF_UBOOT_MAGIC */ +#define PSOC_GLOBAL_CONF_UBOOT_MAGIC_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_UBOOT_MAGIC_VAL_MASK 0xFFFFFFFF + +/* PSOC_GLOBAL_CONF_SPL_SOURCE */ +#define PSOC_GLOBAL_CONF_SPL_SOURCE_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_SPL_SOURCE_VAL_MASK 0x7 + +/* PSOC_GLOBAL_CONF_I2C_MSTR1_DBG */ +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_S_GEN_SHIFT 0 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_S_GEN_MASK 0x1 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_P_GEN_SHIFT 1 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_P_GEN_MASK 0x2 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_DATA_SHIFT 2 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_DATA_MASK 0x4 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_ADDR_SHIFT 3 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_ADDR_MASK 0x8 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_RD_SHIFT 4 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_RD_MASK 0x10 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_WR_SHIFT 5 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_WR_MASK 0x20 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_HS_SHIFT 6 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_HS_MASK 0x40 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_MASTER_ACT_SHIFT 7 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_MASTER_ACT_MASK 0x80 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_SLAVE_ACT_SHIFT 8 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_SLAVE_ACT_MASK 0x100 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_ADDR_10BIT_SHIFT 9 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_ADDR_10BIT_MASK 0x200 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_MST_CSTATE_SHIFT 10 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_MST_CSTATE_MASK 0x7C00 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_SLV_CSTATE_SHIFT 15 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_SLV_CSTATE_MASK 0x78000 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_IC_EN_SHIFT 19 +#define PSOC_GLOBAL_CONF_I2C_MSTR1_DBG_IC_EN_MASK 0x80000 + +/* PSOC_GLOBAL_CONF_I2C_SLV */ +#define PSOC_GLOBAL_CONF_I2C_SLV_CPU_CTRL_SHIFT 0 +#define PSOC_GLOBAL_CONF_I2C_SLV_CPU_CTRL_MASK 0x1 + +/* PSOC_GLOBAL_CONF_I2C_SLV_INTR_MASK */ +#define PSOC_GLOBAL_CONF_I2C_SLV_INTR_MASK_FLD_INT_SHIFT 0 +#define PSOC_GLOBAL_CONF_I2C_SLV_INTR_MASK_FLD_INT_MASK 0x1 + +/* PSOC_GLOBAL_CONF_APP_STATUS */ +#define PSOC_GLOBAL_CONF_APP_STATUS_IND_SHIFT 0 +#define PSOC_GLOBAL_CONF_APP_STATUS_IND_MASK 0xFFFFFFFF + +/* PSOC_GLOBAL_CONF_BTL_STS */ +#define PSOC_GLOBAL_CONF_BTL_STS_DONE_SHIFT 0 +#define PSOC_GLOBAL_CONF_BTL_STS_DONE_MASK 0x1 +#define PSOC_GLOBAL_CONF_BTL_STS_FAIL_SHIFT 4 +#define PSOC_GLOBAL_CONF_BTL_STS_FAIL_MASK 0x10 +#define PSOC_GLOBAL_CONF_BTL_STS_FAIL_CODE_SHIFT 8 +#define PSOC_GLOBAL_CONF_BTL_STS_FAIL_CODE_MASK 0xF00 + +/* PSOC_GLOBAL_CONF_TIMEOUT_INTR */ +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_0_SHIFT 0 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_0_MASK 0x1 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_1_SHIFT 1 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_1_MASK 0x2 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_2_SHIFT 2 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_2_MASK 0x4 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_3_SHIFT 3 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_3_MASK 0x8 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_4_SHIFT 4 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_GPIO_4_MASK 0x10 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_TIMER_SHIFT 5 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_TIMER_MASK 0x20 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_UART_0_SHIFT 6 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_UART_0_MASK 0x40 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_UART_1_SHIFT 7 +#define PSOC_GLOBAL_CONF_TIMEOUT_INTR_UART_1_MASK 0x80 + +/* PSOC_GLOBAL_CONF_COMB_TIMEOUT_INTR */ +#define PSOC_GLOBAL_CONF_COMB_TIMEOUT_INTR_IND_SHIFT 0 +#define PSOC_GLOBAL_CONF_COMB_TIMEOUT_INTR_IND_MASK 0x1 + +/* PSOC_GLOBAL_CONF_PERIPH_INTR */ +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_TX_SHIFT 0 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_TX_MASK 0x1 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_RX_SHIFT 1 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_RX_MASK 0x2 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_TXOVR_SHIFT 2 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_TXOVR_MASK 0x4 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_RXOVR_SHIFT 3 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_0_RXOVR_MASK 0x8 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_TX_SHIFT 4 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_TX_MASK 0x10 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_RX_SHIFT 5 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_RX_MASK 0x20 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_TXOVR_SHIFT 6 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_TXOVR_MASK 0x40 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_RXOVR_SHIFT 7 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_UART_1_RXOVR_MASK 0x80 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_EMMC_SHIFT 12 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_EMMC_MASK 0x1000 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_EMMC_WAKEUP_SHIFT 13 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_EMMC_WAKEUP_MASK 0x2000 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_MII_SHIFT 16 +#define PSOC_GLOBAL_CONF_PERIPH_INTR_MII_MASK 0x10000 + +/* PSOC_GLOBAL_CONF_COMB_PERIPH_INTR */ +#define PSOC_GLOBAL_CONF_COMB_PERIPH_INTR_IND_SHIFT 0 +#define PSOC_GLOBAL_CONF_COMB_PERIPH_INTR_IND_MASK 0x1 + +/* PSOC_GLOBAL_CONF_AXI_ERR_INTR */ +#define PSOC_GLOBAL_CONF_AXI_ERR_INTR_IND_SHIFT 0 +#define PSOC_GLOBAL_CONF_AXI_ERR_INTR_IND_MASK 0x1 + +/* PSOC_GLOBAL_CONF_TARGETID */ +#define PSOC_GLOBAL_CONF_TARGETID_TDESIGNER_SHIFT 1 +#define PSOC_GLOBAL_CONF_TARGETID_TDESIGNER_MASK 0xFFE +#define PSOC_GLOBAL_CONF_TARGETID_TPARTNO_SHIFT 12 +#define PSOC_GLOBAL_CONF_TARGETID_TPARTNO_MASK 0xFFFF000 +#define PSOC_GLOBAL_CONF_TARGETID_TREVISION_SHIFT 28 +#define PSOC_GLOBAL_CONF_TARGETID_TREVISION_MASK 0xF0000000 + +/* PSOC_GLOBAL_CONF_EMMC_INT_VOL_STABLE */ +#define PSOC_GLOBAL_CONF_EMMC_INT_VOL_STABLE_IND_SHIFT 0 +#define PSOC_GLOBAL_CONF_EMMC_INT_VOL_STABLE_IND_MASK 0x1 + +/* PSOC_GLOBAL_CONF_MII_ADDR */ +#define PSOC_GLOBAL_CONF_MII_ADDR_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_MII_ADDR_VAL_MASK 0xFF + +/* PSOC_GLOBAL_CONF_MII_SPEED */ +#define PSOC_GLOBAL_CONF_MII_SPEED_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_MII_SPEED_VAL_MASK 0x3 + +/* PSOC_GLOBAL_CONF_BOOT_STRAP_PINS */ +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_CPOL_SHIFT 0 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_CPOL_MASK 0x1 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_CPHA_SHIFT 1 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_CPHA_MASK 0x2 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BTL_EN_SHIFT 2 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BTL_EN_MASK 0x4 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BTL_ROM_EN_SHIFT 3 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BTL_ROM_EN_MASK 0x8 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_PCIE_EN_SHIFT 4 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_PCIE_EN_MASK 0x10 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_I2C_SLV_ADDR_SHIFT 5 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_I2C_SLV_ADDR_MASK 0xFE0 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BOOT_STG2_SRC_SHIFT 12 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_BOOT_STG2_SRC_MASK 0x3000 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_PLL_BPS_SHIFT 14 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_PLL_BPS_MASK 0x1FC000 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_SHIFT 21 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_MASK 0x200000 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_PLL_CFG_SHIFT 22 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_PLL_CFG_MASK 0x1C00000 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_MEM_REPAIR_BPS_SHIFT 25 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_MEM_REPAIR_BPS_MASK 0x2000000 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SPARE_SHIFT 26 +#define PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SPARE_MASK 0x1C000000 + +/* PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL */ +#define PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL_SET_SHIFT 0 +#define PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL_SET_MASK 0x1 +#define PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL_CLR_SHIFT 1 +#define PSOC_GLOBAL_CONF_MEM_REPAIR_CTRL_CLR_MASK 0x2 + +/* PSOC_GLOBAL_CONF_MEM_REPAIR_STS */ +#define PSOC_GLOBAL_CONF_MEM_REPAIR_STS_IND_SHIFT 0 +#define PSOC_GLOBAL_CONF_MEM_REPAIR_STS_IND_MASK 0x1 + +/* PSOC_GLOBAL_CONF_OUTSTANT_TRANS */ +#define PSOC_GLOBAL_CONF_OUTSTANT_TRANS_RD_SHIFT 0 +#define PSOC_GLOBAL_CONF_OUTSTANT_TRANS_RD_MASK 0x1 +#define PSOC_GLOBAL_CONF_OUTSTANT_TRANS_WR_SHIFT 1 +#define PSOC_GLOBAL_CONF_OUTSTANT_TRANS_WR_MASK 0x2 + +/* PSOC_GLOBAL_CONF_MASK_REQ */ +#define PSOC_GLOBAL_CONF_MASK_REQ_IND_SHIFT 0 +#define PSOC_GLOBAL_CONF_MASK_REQ_IND_MASK 0x1 + +/* PSOC_GLOBAL_CONF_PRSTN_RST_CFG */ +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PCI_SHIFT 0 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PCI_MASK 0x1 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PCI_IF_SHIFT 1 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PCI_IF_MASK 0x2 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PLL_SHIFT 2 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PLL_MASK 0x1FC +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_TPC_SHIFT 9 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_TPC_MASK 0x200 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_MME_SHIFT 10 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_MME_MASK 0x400 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_MC_SHIFT 11 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_MC_MASK 0x800 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_CPU_SHIFT 12 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_CPU_MASK 0x1000 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_IC_IF_SHIFT 13 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_IC_IF_MASK 0x2000 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PSOC_SHIFT 14 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_PSOC_MASK 0x4000 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_SRAM_SHIFT 15 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_SRAM_MASK 0x1F8000 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_DMA_SHIFT 21 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_DMA_MASK 0x200000 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_DMA_IF_SHIFT 22 +#define PSOC_GLOBAL_CONF_PRSTN_RST_CFG_DMA_IF_MASK 0x400000 + +/* PSOC_GLOBAL_CONF_SW_ALL_RST_CFG */ +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PCI_SHIFT 0 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PCI_MASK 0x1 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PCI_IF_SHIFT 1 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PCI_IF_MASK 0x2 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PLL_SHIFT 2 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PLL_MASK 0x1FC +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_TPC_SHIFT 9 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_TPC_MASK 0x200 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_MME_SHIFT 10 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_MME_MASK 0x400 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_MC_SHIFT 11 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_MC_MASK 0x800 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_CPU_SHIFT 12 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_CPU_MASK 0x1000 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_IC_IF_SHIFT 13 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_IC_IF_MASK 0x2000 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PSOC_SHIFT 14 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_PSOC_MASK 0x4000 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_SRAM_SHIFT 15 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_SRAM_MASK 0x1F8000 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_DMA_SHIFT 21 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_DMA_MASK 0x200000 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_DMA_IF_SHIFT 22 +#define PSOC_GLOBAL_CONF_SW_ALL_RST_CFG_DMA_IF_MASK 0x400000 + +/* PSOC_GLOBAL_CONF_WD_RST_CFG */ +#define PSOC_GLOBAL_CONF_WD_RST_CFG_PCI_SHIFT 0 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_PCI_MASK 0x1 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_PCI_IF_SHIFT 1 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_PCI_IF_MASK 0x2 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_PLL_SHIFT 2 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_PLL_MASK 0x1FC +#define PSOC_GLOBAL_CONF_WD_RST_CFG_TPC_SHIFT 9 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_TPC_MASK 0x200 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_MME_SHIFT 10 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_MME_MASK 0x400 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_MC_SHIFT 11 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_MC_MASK 0x800 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_CPU_SHIFT 12 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_CPU_MASK 0x1000 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_IC_IF_SHIFT 13 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_IC_IF_MASK 0x2000 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_PSOC_SHIFT 14 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_PSOC_MASK 0x4000 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_SRAM_SHIFT 15 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_SRAM_MASK 0x1F8000 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_DMA_SHIFT 21 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_DMA_MASK 0x200000 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_DMA_IF_SHIFT 22 +#define PSOC_GLOBAL_CONF_WD_RST_CFG_DMA_IF_MASK 0x400000 + +/* PSOC_GLOBAL_CONF_MNL_RST_CFG */ +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PCI_SHIFT 0 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PCI_MASK 0x1 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PCI_IF_SHIFT 1 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PCI_IF_MASK 0x2 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PLL_SHIFT 2 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PLL_MASK 0x1FC +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_TPC_SHIFT 9 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_TPC_MASK 0x200 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_MME_SHIFT 10 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_MME_MASK 0x400 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_MC_SHIFT 11 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_MC_MASK 0x800 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_CPU_SHIFT 12 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_CPU_MASK 0x1000 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_IC_IF_SHIFT 13 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_IC_IF_MASK 0x2000 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PSOC_SHIFT 14 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_PSOC_MASK 0x4000 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_SRAM_SHIFT 15 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_SRAM_MASK 0x1F8000 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_DMA_SHIFT 21 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_DMA_MASK 0x200000 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_DMA_IF_SHIFT 22 +#define PSOC_GLOBAL_CONF_MNL_RST_CFG_DMA_IF_MASK 0x400000 + +/* PSOC_GLOBAL_CONF_UNIT_RST_N */ +#define PSOC_GLOBAL_CONF_UNIT_RST_N_PCI_SHIFT 0 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_PCI_MASK 0x1 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_PCI_IF_SHIFT 1 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_PCI_IF_MASK 0x2 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_PLL_SHIFT 2 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_PLL_MASK 0x1FC +#define PSOC_GLOBAL_CONF_UNIT_RST_N_TPC_SHIFT 9 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_TPC_MASK 0x200 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_MME_SHIFT 10 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_MME_MASK 0x400 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_MC_SHIFT 11 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_MC_MASK 0x800 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_CPU_SHIFT 12 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_CPU_MASK 0x1000 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_IC_IF_SHIFT 13 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_IC_IF_MASK 0x2000 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_PSOC_SHIFT 14 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_PSOC_MASK 0x4000 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_SRAM_SHIFT 15 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_SRAM_MASK 0x1F8000 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_DMA_SHIFT 21 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_DMA_MASK 0x200000 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_DMA_IF_SHIFT 22 +#define PSOC_GLOBAL_CONF_UNIT_RST_N_DMA_IF_MASK 0x400000 + +/* PSOC_GLOBAL_CONF_PRSTN_MASK */ +#define PSOC_GLOBAL_CONF_PRSTN_MASK_IND_SHIFT 0 +#define PSOC_GLOBAL_CONF_PRSTN_MASK_IND_MASK 0x1 + +/* PSOC_GLOBAL_CONF_WD_MASK */ +#define PSOC_GLOBAL_CONF_WD_MASK_IND_SHIFT 0 +#define PSOC_GLOBAL_CONF_WD_MASK_IND_MASK 0x1 + +/* PSOC_GLOBAL_CONF_RST_SRC */ +#define PSOC_GLOBAL_CONF_RST_SRC_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_RST_SRC_VAL_MASK 0xF + +/* PSOC_GLOBAL_CONF_PAD_1V8_CFG */ +#define PSOC_GLOBAL_CONF_PAD_1V8_CFG_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_PAD_1V8_CFG_VAL_MASK 0x7F + +/* PSOC_GLOBAL_CONF_PAD_3V3_CFG */ +#define PSOC_GLOBAL_CONF_PAD_3V3_CFG_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_PAD_3V3_CFG_VAL_MASK 0x7F + +/* PSOC_GLOBAL_CONF_PAD_1V8_INPUT */ +#define PSOC_GLOBAL_CONF_PAD_1V8_INPUT_CFG_SHIFT 0 +#define PSOC_GLOBAL_CONF_PAD_1V8_INPUT_CFG_MASK 0x7 + +/* PSOC_GLOBAL_CONF_BNK3V3_MS */ +#define PSOC_GLOBAL_CONF_BNK3V3_MS_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_BNK3V3_MS_VAL_MASK 0x3 + +/* PSOC_GLOBAL_CONF_PAD_DEFAULT */ +#define PSOC_GLOBAL_CONF_PAD_DEFAULT_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_PAD_DEFAULT_VAL_MASK 0xF + +/* PSOC_GLOBAL_CONF_PAD_SEL */ +#define PSOC_GLOBAL_CONF_PAD_SEL_VAL_SHIFT 0 +#define PSOC_GLOBAL_CONF_PAD_SEL_VAL_MASK 0x3 + +#endif /* ASIC_REG_PSOC_GLOBAL_CONF_MASKS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h new file mode 100644 index 000000000000..cfbdd2c9c5c7 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h @@ -0,0 +1,745 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_PSOC_GLOBAL_CONF_REGS_H_ +#define ASIC_REG_PSOC_GLOBAL_CONF_REGS_H_ + +/* + ***************************************** + * PSOC_GLOBAL_CONF (Prototype: GLOBAL_CONF) + ***************************************** + */ + +#define mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_0 0xC4B000 + +#define mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_1 0xC4B004 + +#define mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_2 0xC4B008 + +#define mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_3 0xC4B00C + +#define mmPSOC_GLOBAL_CONF_PCI_FW_FSM 0xC4B020 + +#define mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START 0xC4B024 + +#define mmPSOC_GLOBAL_CONF_BTM_FSM 0xC4B028 + +#define mmPSOC_GLOBAL_CONF_SW_BTM_FSM 0xC4B030 + +#define mmPSOC_GLOBAL_CONF_SW_BOOT_SEQ_FSM 0xC4B034 + +#define mmPSOC_GLOBAL_CONF_BOOT_SEQ_TIMEOUT 0xC4B038 + +#define mmPSOC_GLOBAL_CONF_SPI_MEM_EN 0xC4B040 + +#define mmPSOC_GLOBAL_CONF_PRSTN 0xC4B044 + +#define mmPSOC_GLOBAL_CONF_PCIE_EN 0xC4B048 + +#define mmPSOC_GLOBAL_CONF_SPI_IMG_STS 0xC4B050 + +#define mmPSOC_GLOBAL_CONF_BOOT_SEQ_FSM 0xC4B054 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_0 0xC4B100 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_1 0xC4B104 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_2 0xC4B108 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_3 0xC4B10C + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_4 0xC4B110 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_5 0xC4B114 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_6 0xC4B118 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_7 0xC4B11C + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_8 0xC4B120 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_9 0xC4B124 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_10 0xC4B128 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_11 0xC4B12C + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_12 0xC4B130 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_13 0xC4B134 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_14 0xC4B138 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_15 0xC4B13C + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_16 0xC4B140 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_17 0xC4B144 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_18 0xC4B148 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_19 0xC4B14C + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_20 0xC4B150 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_21 0xC4B154 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_22 0xC4B158 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_23 0xC4B15C + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_24 0xC4B160 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_25 0xC4B164 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_26 0xC4B168 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_27 0xC4B16C + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_28 0xC4B170 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_29 0xC4B174 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_30 0xC4B178 + +#define mmPSOC_GLOBAL_CONF_SCRATCHPAD_31 0xC4B17C + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_0 0xC4B200 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_1 0xC4B204 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_2 0xC4B208 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_3 0xC4B20C + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_4 0xC4B210 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_5 0xC4B214 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_6 0xC4B218 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_7 0xC4B21C + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_8 0xC4B220 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_9 0xC4B224 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_10 0xC4B228 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_11 0xC4B22C + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_12 0xC4B230 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_13 0xC4B234 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_14 0xC4B238 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_15 0xC4B23C + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_16 0xC4B240 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_17 0xC4B244 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_18 0xC4B248 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_19 0xC4B24C + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_20 0xC4B250 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_21 0xC4B254 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_22 0xC4B258 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_23 0xC4B25C + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_24 0xC4B260 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_25 0xC4B264 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_26 0xC4B268 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_27 0xC4B26C + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_28 0xC4B270 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_29 0xC4B274 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_30 0xC4B278 + +#define mmPSOC_GLOBAL_CONF_SEMAPHORE_31 0xC4B27C + +#define mmPSOC_GLOBAL_CONF_WARM_REBOOT 0xC4B300 + +#define mmPSOC_GLOBAL_CONF_UBOOT_MAGIC 0xC4B304 + +#define mmPSOC_GLOBAL_CONF_SPL_SOURCE 0xC4B308 + +#define mmPSOC_GLOBAL_CONF_I2C_MSTR1_DBG 0xC4B30C + +#define mmPSOC_GLOBAL_CONF_I2C_SLV 0xC4B310 + +#define mmPSOC_GLOBAL_CONF_I2C_SLV_INTR_MASK 0xC4B314 + +#define mmPSOC_GLOBAL_CONF_APP_STATUS 0xC4B320 + +#define mmPSOC_GLOBAL_CONF_BTL_STS 0xC4B340 + +#define mmPSOC_GLOBAL_CONF_TIMEOUT_INTR 0xC4B350 + +#define mmPSOC_GLOBAL_CONF_COMB_TIMEOUT_INTR 0xC4B354 + +#define mmPSOC_GLOBAL_CONF_PERIPH_INTR 0xC4B358 + +#define mmPSOC_GLOBAL_CONF_COMB_PERIPH_INTR 0xC4B35C + +#define mmPSOC_GLOBAL_CONF_AXI_ERR_INTR 0xC4B360 + +#define mmPSOC_GLOBAL_CONF_TARGETID 0xC4B400 + +#define mmPSOC_GLOBAL_CONF_EMMC_INT_VOL_STABLE 0xC4B420 + +#define mmPSOC_GLOBAL_CONF_MII_ADDR 0xC4B424 + +#define mmPSOC_GLOBAL_CONF_MII_SPEED 0xC4B428 + +#define mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS 0xC4B430 + +#define mmPSOC_GLOBAL_CONF_MEM_REPAIR_CTRL 0xC4B450 + +#define mmPSOC_GLOBAL_CONF_MEM_REPAIR_STS 0xC4B454 + +#define mmPSOC_GLOBAL_CONF_OUTSTANT_TRANS 0xC4B458 + +#define mmPSOC_GLOBAL_CONF_MASK_REQ 0xC4B45C + +#define mmPSOC_GLOBAL_CONF_PRSTN_RST_CFG 0xC4B470 + +#define mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG 0xC4B474 + +#define mmPSOC_GLOBAL_CONF_WD_RST_CFG 0xC4B478 + +#define mmPSOC_GLOBAL_CONF_MNL_RST_CFG 0xC4B47C + +#define mmPSOC_GLOBAL_CONF_UNIT_RST_N 0xC4B480 + +#define mmPSOC_GLOBAL_CONF_PRSTN_MASK 0xC4B484 + +#define mmPSOC_GLOBAL_CONF_WD_MASK 0xC4B488 + +#define mmPSOC_GLOBAL_CONF_RST_SRC 0xC4B490 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_0 0xC4B500 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_1 0xC4B504 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_2 0xC4B508 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_3 0xC4B50C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_4 0xC4B510 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_5 0xC4B514 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_6 0xC4B518 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_7 0xC4B51C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_8 0xC4B520 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_9 0xC4B524 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_10 0xC4B528 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_11 0xC4B52C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_12 0xC4B530 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_13 0xC4B534 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_14 0xC4B538 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_15 0xC4B53C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_16 0xC4B540 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_17 0xC4B544 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_18 0xC4B548 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_19 0xC4B54C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_20 0xC4B550 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_21 0xC4B554 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_22 0xC4B558 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_23 0xC4B55C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_24 0xC4B560 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_25 0xC4B564 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_26 0xC4B568 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_27 0xC4B56C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_28 0xC4B570 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_29 0xC4B574 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_30 0xC4B578 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_31 0xC4B57C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_32 0xC4B580 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_33 0xC4B584 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_34 0xC4B588 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_35 0xC4B58C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_36 0xC4B590 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_37 0xC4B594 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_38 0xC4B598 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_39 0xC4B59C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_40 0xC4B5A0 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_41 0xC4B5A4 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_42 0xC4B5A8 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_43 0xC4B5AC + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_44 0xC4B5B0 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_45 0xC4B5B4 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_46 0xC4B5B8 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_47 0xC4B5BC + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_48 0xC4B5C0 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_49 0xC4B5C4 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_50 0xC4B5C8 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_51 0xC4B5CC + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_52 0xC4B5D0 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_53 0xC4B5D4 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_54 0xC4B5D8 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_55 0xC4B5DC + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_56 0xC4B5E0 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_57 0xC4B5E4 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_58 0xC4B5E8 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_59 0xC4B5EC + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_60 0xC4B5F0 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_61 0xC4B5F4 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_62 0xC4B5F8 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_63 0xC4B5FC + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_64 0xC4B600 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_65 0xC4B604 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_66 0xC4B608 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_67 0xC4B60C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_CFG_68 0xC4B610 + +#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_0 0xC4B640 + +#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_1 0xC4B644 + +#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_2 0xC4B648 + +#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_3 0xC4B64C + +#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_4 0xC4B650 + +#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_5 0xC4B654 + +#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_6 0xC4B658 + +#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_7 0xC4B65C + +#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_8 0xC4B660 + +#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_9 0xC4B664 + +#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_10 0xC4B668 + +#define mmPSOC_GLOBAL_CONF_PAD_3V3_CFG_11 0xC4B66C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_0 0xC4B680 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_1 0xC4B684 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_2 0xC4B688 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_3 0xC4B68C + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_4 0xC4B690 + +#define mmPSOC_GLOBAL_CONF_PAD_1V8_INPUT_5 0xC4B694 + +#define mmPSOC_GLOBAL_CONF_BNK3V3_MS 0xC4B6E0 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_0 0xC4B700 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_1 0xC4B704 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_2 0xC4B708 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_3 0xC4B70C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_4 0xC4B710 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_5 0xC4B714 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_6 0xC4B718 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_7 0xC4B71C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_8 0xC4B720 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_9 0xC4B724 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_10 0xC4B728 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_11 0xC4B72C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_12 0xC4B730 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_13 0xC4B734 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_14 0xC4B738 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_15 0xC4B73C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_16 0xC4B740 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_17 0xC4B744 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_18 0xC4B748 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_19 0xC4B74C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_20 0xC4B750 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_21 0xC4B754 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_22 0xC4B758 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_23 0xC4B75C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_24 0xC4B760 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_25 0xC4B764 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_26 0xC4B768 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_27 0xC4B76C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_28 0xC4B770 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_29 0xC4B774 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_30 0xC4B778 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_31 0xC4B77C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_32 0xC4B780 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_33 0xC4B784 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_34 0xC4B788 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_35 0xC4B78C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_36 0xC4B790 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_37 0xC4B794 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_38 0xC4B798 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_39 0xC4B79C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_40 0xC4B7A0 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_41 0xC4B7A4 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_42 0xC4B7A8 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_43 0xC4B7AC + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_44 0xC4B7B0 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_45 0xC4B7B4 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_46 0xC4B7B8 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_47 0xC4B7BC + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_48 0xC4B7C0 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_49 0xC4B7C4 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_50 0xC4B7C8 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_51 0xC4B7CC + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_52 0xC4B7D0 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_53 0xC4B7D4 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_54 0xC4B7D8 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_55 0xC4B7DC + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_56 0xC4B7E0 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_57 0xC4B7E4 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_58 0xC4B7E8 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_59 0xC4B7EC + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_60 0xC4B7F0 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_61 0xC4B7F4 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_62 0xC4B7F8 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_63 0xC4B7FC + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_64 0xC4B800 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_65 0xC4B804 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_66 0xC4B808 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_67 0xC4B80C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_68 0xC4B810 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_69 0xC4B814 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_70 0xC4B818 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_71 0xC4B81C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_72 0xC4B820 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_73 0xC4B824 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_74 0xC4B828 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_75 0xC4B82C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_76 0xC4B830 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_77 0xC4B834 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_78 0xC4B838 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_79 0xC4B83C + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_80 0xC4B840 + +#define mmPSOC_GLOBAL_CONF_PAD_DEFAULT_81 0xC4B844 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_0 0xC4B900 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_1 0xC4B904 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_2 0xC4B908 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_3 0xC4B90C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_4 0xC4B910 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_5 0xC4B914 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_6 0xC4B918 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_7 0xC4B91C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_8 0xC4B920 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_9 0xC4B924 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_10 0xC4B928 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_11 0xC4B92C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_12 0xC4B930 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_13 0xC4B934 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_14 0xC4B938 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_15 0xC4B93C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_16 0xC4B940 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_17 0xC4B944 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_18 0xC4B948 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_19 0xC4B94C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_20 0xC4B950 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_21 0xC4B954 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_22 0xC4B958 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_23 0xC4B95C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_24 0xC4B960 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_25 0xC4B964 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_26 0xC4B968 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_27 0xC4B96C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_28 0xC4B970 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_29 0xC4B974 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_30 0xC4B978 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_31 0xC4B97C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_32 0xC4B980 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_33 0xC4B984 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_34 0xC4B988 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_35 0xC4B98C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_36 0xC4B990 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_37 0xC4B994 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_38 0xC4B998 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_39 0xC4B99C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_40 0xC4B9A0 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_41 0xC4B9A4 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_42 0xC4B9A8 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_43 0xC4B9AC + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_44 0xC4B9B0 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_45 0xC4B9B4 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_46 0xC4B9B8 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_47 0xC4B9BC + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_48 0xC4B9C0 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_49 0xC4B9C4 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_50 0xC4B9C8 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_51 0xC4B9CC + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_52 0xC4B9D0 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_53 0xC4B9D4 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_54 0xC4B9D8 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_55 0xC4B9DC + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_56 0xC4B9E0 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_57 0xC4B9E4 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_58 0xC4B9E8 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_59 0xC4B9EC + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_60 0xC4B9F0 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_61 0xC4B9F4 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_62 0xC4B9F8 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_63 0xC4B9FC + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_64 0xC4BA00 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_65 0xC4BA04 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_66 0xC4BA08 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_67 0xC4BA0C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_68 0xC4BA10 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_69 0xC4BA14 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_70 0xC4BA18 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_71 0xC4BA1C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_72 0xC4BA20 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_73 0xC4BA24 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_74 0xC4BA28 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_75 0xC4BA2C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_76 0xC4BA30 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_77 0xC4BA34 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_78 0xC4BA38 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_79 0xC4BA3C + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_80 0xC4BA40 + +#define mmPSOC_GLOBAL_CONF_PAD_SEL_81 0xC4BA44 + +#endif /* ASIC_REG_PSOC_GLOBAL_CONF_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h new file mode 100644 index 000000000000..6723d8f76f30 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_PSOC_MME_PLL_REGS_H_ +#define ASIC_REG_PSOC_MME_PLL_REGS_H_ + +/* + ***************************************** + * PSOC_MME_PLL (Prototype: PLL) + ***************************************** + */ + +#define mmPSOC_MME_PLL_NR 0xC71100 + +#define mmPSOC_MME_PLL_NF 0xC71104 + +#define mmPSOC_MME_PLL_OD 0xC71108 + +#define mmPSOC_MME_PLL_NB 0xC7110C + +#define mmPSOC_MME_PLL_CFG 0xC71110 + +#define mmPSOC_MME_PLL_LOSE_MASK 0xC71120 + +#define mmPSOC_MME_PLL_LOCK_INTR 0xC71128 + +#define mmPSOC_MME_PLL_LOCK_BYPASS 0xC7112C + +#define mmPSOC_MME_PLL_DATA_CHNG 0xC71130 + +#define mmPSOC_MME_PLL_RST 0xC71134 + +#define mmPSOC_MME_PLL_SLIP_WD_CNTR 0xC71150 + +#define mmPSOC_MME_PLL_DIV_FACTOR_0 0xC71200 + +#define mmPSOC_MME_PLL_DIV_FACTOR_1 0xC71204 + +#define mmPSOC_MME_PLL_DIV_FACTOR_2 0xC71208 + +#define mmPSOC_MME_PLL_DIV_FACTOR_3 0xC7120C + +#define mmPSOC_MME_PLL_DIV_FACTOR_CMD_0 0xC71220 + +#define mmPSOC_MME_PLL_DIV_FACTOR_CMD_1 0xC71224 + +#define mmPSOC_MME_PLL_DIV_FACTOR_CMD_2 0xC71228 + +#define mmPSOC_MME_PLL_DIV_FACTOR_CMD_3 0xC7122C + +#define mmPSOC_MME_PLL_DIV_SEL_0 0xC71280 + +#define mmPSOC_MME_PLL_DIV_SEL_1 0xC71284 + +#define mmPSOC_MME_PLL_DIV_SEL_2 0xC71288 + +#define mmPSOC_MME_PLL_DIV_SEL_3 0xC7128C + +#define mmPSOC_MME_PLL_DIV_EN_0 0xC712A0 + +#define mmPSOC_MME_PLL_DIV_EN_1 0xC712A4 + +#define mmPSOC_MME_PLL_DIV_EN_2 0xC712A8 + +#define mmPSOC_MME_PLL_DIV_EN_3 0xC712AC + +#define mmPSOC_MME_PLL_DIV_FACTOR_BUSY_0 0xC712C0 + +#define mmPSOC_MME_PLL_DIV_FACTOR_BUSY_1 0xC712C4 + +#define mmPSOC_MME_PLL_DIV_FACTOR_BUSY_2 0xC712C8 + +#define mmPSOC_MME_PLL_DIV_FACTOR_BUSY_3 0xC712CC + +#define mmPSOC_MME_PLL_CLK_GATER 0xC71300 + +#define mmPSOC_MME_PLL_CLK_RLX_0 0xC71310 + +#define mmPSOC_MME_PLL_CLK_RLX_1 0xC71314 + +#define mmPSOC_MME_PLL_CLK_RLX_2 0xC71318 + +#define mmPSOC_MME_PLL_CLK_RLX_3 0xC7131C + +#define mmPSOC_MME_PLL_REF_CNTR_PERIOD 0xC71400 + +#define mmPSOC_MME_PLL_REF_LOW_THRESHOLD 0xC71410 + +#define mmPSOC_MME_PLL_REF_HIGH_THRESHOLD 0xC71420 + +#define mmPSOC_MME_PLL_PLL_NOT_STABLE 0xC71430 + +#define mmPSOC_MME_PLL_FREQ_CALC_EN 0xC71440 + +#endif /* ASIC_REG_PSOC_MME_PLL_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h new file mode 100644 index 000000000000..abcded0531c9 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_PSOC_PCI_PLL_REGS_H_ +#define ASIC_REG_PSOC_PCI_PLL_REGS_H_ + +/* + ***************************************** + * PSOC_PCI_PLL (Prototype: PLL) + ***************************************** + */ + +#define mmPSOC_PCI_PLL_NR 0xC72100 + +#define mmPSOC_PCI_PLL_NF 0xC72104 + +#define mmPSOC_PCI_PLL_OD 0xC72108 + +#define mmPSOC_PCI_PLL_NB 0xC7210C + +#define mmPSOC_PCI_PLL_CFG 0xC72110 + +#define mmPSOC_PCI_PLL_LOSE_MASK 0xC72120 + +#define mmPSOC_PCI_PLL_LOCK_INTR 0xC72128 + +#define mmPSOC_PCI_PLL_LOCK_BYPASS 0xC7212C + +#define mmPSOC_PCI_PLL_DATA_CHNG 0xC72130 + +#define mmPSOC_PCI_PLL_RST 0xC72134 + +#define mmPSOC_PCI_PLL_SLIP_WD_CNTR 0xC72150 + +#define mmPSOC_PCI_PLL_DIV_FACTOR_0 0xC72200 + +#define mmPSOC_PCI_PLL_DIV_FACTOR_1 0xC72204 + +#define mmPSOC_PCI_PLL_DIV_FACTOR_2 0xC72208 + +#define mmPSOC_PCI_PLL_DIV_FACTOR_3 0xC7220C + +#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_0 0xC72220 + +#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_1 0xC72224 + +#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_2 0xC72228 + +#define mmPSOC_PCI_PLL_DIV_FACTOR_CMD_3 0xC7222C + +#define mmPSOC_PCI_PLL_DIV_SEL_0 0xC72280 + +#define mmPSOC_PCI_PLL_DIV_SEL_1 0xC72284 + +#define mmPSOC_PCI_PLL_DIV_SEL_2 0xC72288 + +#define mmPSOC_PCI_PLL_DIV_SEL_3 0xC7228C + +#define mmPSOC_PCI_PLL_DIV_EN_0 0xC722A0 + +#define mmPSOC_PCI_PLL_DIV_EN_1 0xC722A4 + +#define mmPSOC_PCI_PLL_DIV_EN_2 0xC722A8 + +#define mmPSOC_PCI_PLL_DIV_EN_3 0xC722AC + +#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_0 0xC722C0 + +#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_1 0xC722C4 + +#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_2 0xC722C8 + +#define mmPSOC_PCI_PLL_DIV_FACTOR_BUSY_3 0xC722CC + +#define mmPSOC_PCI_PLL_CLK_GATER 0xC72300 + +#define mmPSOC_PCI_PLL_CLK_RLX_0 0xC72310 + +#define mmPSOC_PCI_PLL_CLK_RLX_1 0xC72314 + +#define mmPSOC_PCI_PLL_CLK_RLX_2 0xC72318 + +#define mmPSOC_PCI_PLL_CLK_RLX_3 0xC7231C + +#define mmPSOC_PCI_PLL_REF_CNTR_PERIOD 0xC72400 + +#define mmPSOC_PCI_PLL_REF_LOW_THRESHOLD 0xC72410 + +#define mmPSOC_PCI_PLL_REF_HIGH_THRESHOLD 0xC72420 + +#define mmPSOC_PCI_PLL_PLL_NOT_STABLE 0xC72430 + +#define mmPSOC_PCI_PLL_FREQ_CALC_EN 0xC72440 + +#endif /* ASIC_REG_PSOC_PCI_PLL_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_spi_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_spi_regs.h new file mode 100644 index 000000000000..5925c7477c25 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/psoc_spi_regs.h @@ -0,0 +1,143 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_PSOC_SPI_REGS_H_ +#define ASIC_REG_PSOC_SPI_REGS_H_ + +/* + ***************************************** + * PSOC_SPI (Prototype: SPI) + ***************************************** + */ + +#define mmPSOC_SPI_CTRLR0 0xC43000 + +#define mmPSOC_SPI_CTRLR1 0xC43004 + +#define mmPSOC_SPI_SSIENR 0xC43008 + +#define mmPSOC_SPI_MWCR 0xC4300C + +#define mmPSOC_SPI_SER 0xC43010 + +#define mmPSOC_SPI_BAUDR 0xC43014 + +#define mmPSOC_SPI_TXFTLR 0xC43018 + +#define mmPSOC_SPI_RXFTLR 0xC4301C + +#define mmPSOC_SPI_TXFLR 0xC43020 + +#define mmPSOC_SPI_RXFLR 0xC43024 + +#define mmPSOC_SPI_SR 0xC43028 + +#define mmPSOC_SPI_IMR 0xC4302C + +#define mmPSOC_SPI_ISR 0xC43030 + +#define mmPSOC_SPI_RISR 0xC43034 + +#define mmPSOC_SPI_TXOICR 0xC43038 + +#define mmPSOC_SPI_RXOICR 0xC4303C + +#define mmPSOC_SPI_RXUICR 0xC43040 + +#define mmPSOC_SPI_MSTICR 0xC43044 + +#define mmPSOC_SPI_ICR 0xC43048 + +#define mmPSOC_SPI_IDR 0xC43058 + +#define mmPSOC_SPI_SSI_VERSION_ID 0xC4305C + +#define mmPSOC_SPI_DR0 0xC43060 + +#define mmPSOC_SPI_DR1 0xC43064 + +#define mmPSOC_SPI_DR2 0xC43068 + +#define mmPSOC_SPI_DR3 0xC4306C + +#define mmPSOC_SPI_DR4 0xC43070 + +#define mmPSOC_SPI_DR5 0xC43074 + +#define mmPSOC_SPI_DR6 0xC43078 + +#define mmPSOC_SPI_DR7 0xC4307C + +#define mmPSOC_SPI_DR8 0xC43080 + +#define mmPSOC_SPI_DR9 0xC43084 + +#define mmPSOC_SPI_DR10 0xC43088 + +#define mmPSOC_SPI_DR11 0xC4308C + +#define mmPSOC_SPI_DR12 0xC43090 + +#define mmPSOC_SPI_DR13 0xC43094 + +#define mmPSOC_SPI_DR14 0xC43098 + +#define mmPSOC_SPI_DR15 0xC4309C + +#define mmPSOC_SPI_DR16 0xC430A0 + +#define mmPSOC_SPI_DR17 0xC430A4 + +#define mmPSOC_SPI_DR18 0xC430A8 + +#define mmPSOC_SPI_DR19 0xC430AC + +#define mmPSOC_SPI_DR20 0xC430B0 + +#define mmPSOC_SPI_DR21 0xC430B4 + +#define mmPSOC_SPI_DR22 0xC430B8 + +#define mmPSOC_SPI_DR23 0xC430BC + +#define mmPSOC_SPI_DR24 0xC430C0 + +#define mmPSOC_SPI_DR25 0xC430C4 + +#define mmPSOC_SPI_DR26 0xC430C8 + +#define mmPSOC_SPI_DR27 0xC430CC + +#define mmPSOC_SPI_DR28 0xC430D0 + +#define mmPSOC_SPI_DR29 0xC430D4 + +#define mmPSOC_SPI_DR30 0xC430D8 + +#define mmPSOC_SPI_DR31 0xC430DC + +#define mmPSOC_SPI_DR32 0xC430E0 + +#define mmPSOC_SPI_DR33 0xC430E4 + +#define mmPSOC_SPI_DR34 0xC430E8 + +#define mmPSOC_SPI_DR35 0xC430EC + +#define mmPSOC_SPI_RX_SAMPLE_DLY 0xC430F0 + +#define mmPSOC_SPI_RSVD_1 0xC430F8 + +#define mmPSOC_SPI_RSVD_2 0xC430FC + +#endif /* ASIC_REG_PSOC_SPI_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h new file mode 100644 index 000000000000..d56c9fa0e7ba --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_SRAM_Y0_X0_RTR_REGS_H_ +#define ASIC_REG_SRAM_Y0_X0_RTR_REGS_H_ + +/* + ***************************************** + * SRAM_Y0_X0_RTR (Prototype: IC_RTR) + ***************************************** + */ + +#define mmSRAM_Y0_X0_RTR_HBW_RD_RQ_E_ARB 0x201100 + +#define mmSRAM_Y0_X0_RTR_HBW_RD_RQ_W_ARB 0x201104 + +#define mmSRAM_Y0_X0_RTR_HBW_RD_RQ_L_ARB 0x201110 + +#define mmSRAM_Y0_X0_RTR_HBW_E_ARB_MAX 0x201120 + +#define mmSRAM_Y0_X0_RTR_HBW_W_ARB_MAX 0x201124 + +#define mmSRAM_Y0_X0_RTR_HBW_L_ARB_MAX 0x201130 + +#define mmSRAM_Y0_X0_RTR_HBW_DATA_E_ARB 0x201140 + +#define mmSRAM_Y0_X0_RTR_HBW_DATA_W_ARB 0x201144 + +#define mmSRAM_Y0_X0_RTR_HBW_DATA_L_ARB 0x201148 + +#define mmSRAM_Y0_X0_RTR_HBW_WR_RS_E_ARB 0x201160 + +#define mmSRAM_Y0_X0_RTR_HBW_WR_RS_W_ARB 0x201164 + +#define mmSRAM_Y0_X0_RTR_HBW_WR_RS_L_ARB 0x201168 + +#define mmSRAM_Y0_X0_RTR_LBW_RD_RQ_E_ARB 0x201200 + +#define mmSRAM_Y0_X0_RTR_LBW_RD_RQ_W_ARB 0x201204 + +#define mmSRAM_Y0_X0_RTR_LBW_RD_RQ_L_ARB 0x201210 + +#define mmSRAM_Y0_X0_RTR_LBW_E_ARB_MAX 0x201220 + +#define mmSRAM_Y0_X0_RTR_LBW_W_ARB_MAX 0x201224 + +#define mmSRAM_Y0_X0_RTR_LBW_L_ARB_MAX 0x201230 + +#define mmSRAM_Y0_X0_RTR_LBW_DATA_E_ARB 0x201240 + +#define mmSRAM_Y0_X0_RTR_LBW_DATA_W_ARB 0x201244 + +#define mmSRAM_Y0_X0_RTR_LBW_DATA_L_ARB 0x201248 + +#define mmSRAM_Y0_X0_RTR_LBW_WR_RS_E_ARB 0x201260 + +#define mmSRAM_Y0_X0_RTR_LBW_WR_RS_W_ARB 0x201264 + +#define mmSRAM_Y0_X0_RTR_LBW_WR_RS_L_ARB 0x201268 + +#define mmSRAM_Y0_X0_RTR_DBG_E_ARB 0x201300 + +#define mmSRAM_Y0_X0_RTR_DBG_W_ARB 0x201304 + +#define mmSRAM_Y0_X0_RTR_DBG_L_ARB 0x201310 + +#define mmSRAM_Y0_X0_RTR_DBG_E_ARB_MAX 0x201320 + +#define mmSRAM_Y0_X0_RTR_DBG_W_ARB_MAX 0x201324 + +#define mmSRAM_Y0_X0_RTR_DBG_L_ARB_MAX 0x201330 + +#endif /* ASIC_REG_SRAM_Y0_X0_RTR_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h new file mode 100644 index 000000000000..5624544303ca --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_SRAM_Y0_X1_RTR_REGS_H_ +#define ASIC_REG_SRAM_Y0_X1_RTR_REGS_H_ + +/* + ***************************************** + * SRAM_Y0_X1_RTR (Prototype: IC_RTR) + ***************************************** + */ + +#define mmSRAM_Y0_X1_RTR_HBW_RD_RQ_E_ARB 0x205100 + +#define mmSRAM_Y0_X1_RTR_HBW_RD_RQ_W_ARB 0x205104 + +#define mmSRAM_Y0_X1_RTR_HBW_RD_RQ_L_ARB 0x205110 + +#define mmSRAM_Y0_X1_RTR_HBW_E_ARB_MAX 0x205120 + +#define mmSRAM_Y0_X1_RTR_HBW_W_ARB_MAX 0x205124 + +#define mmSRAM_Y0_X1_RTR_HBW_L_ARB_MAX 0x205130 + +#define mmSRAM_Y0_X1_RTR_HBW_DATA_E_ARB 0x205140 + +#define mmSRAM_Y0_X1_RTR_HBW_DATA_W_ARB 0x205144 + +#define mmSRAM_Y0_X1_RTR_HBW_DATA_L_ARB 0x205148 + +#define mmSRAM_Y0_X1_RTR_HBW_WR_RS_E_ARB 0x205160 + +#define mmSRAM_Y0_X1_RTR_HBW_WR_RS_W_ARB 0x205164 + +#define mmSRAM_Y0_X1_RTR_HBW_WR_RS_L_ARB 0x205168 + +#define mmSRAM_Y0_X1_RTR_LBW_RD_RQ_E_ARB 0x205200 + +#define mmSRAM_Y0_X1_RTR_LBW_RD_RQ_W_ARB 0x205204 + +#define mmSRAM_Y0_X1_RTR_LBW_RD_RQ_L_ARB 0x205210 + +#define mmSRAM_Y0_X1_RTR_LBW_E_ARB_MAX 0x205220 + +#define mmSRAM_Y0_X1_RTR_LBW_W_ARB_MAX 0x205224 + +#define mmSRAM_Y0_X1_RTR_LBW_L_ARB_MAX 0x205230 + +#define mmSRAM_Y0_X1_RTR_LBW_DATA_E_ARB 0x205240 + +#define mmSRAM_Y0_X1_RTR_LBW_DATA_W_ARB 0x205244 + +#define mmSRAM_Y0_X1_RTR_LBW_DATA_L_ARB 0x205248 + +#define mmSRAM_Y0_X1_RTR_LBW_WR_RS_E_ARB 0x205260 + +#define mmSRAM_Y0_X1_RTR_LBW_WR_RS_W_ARB 0x205264 + +#define mmSRAM_Y0_X1_RTR_LBW_WR_RS_L_ARB 0x205268 + +#define mmSRAM_Y0_X1_RTR_DBG_E_ARB 0x205300 + +#define mmSRAM_Y0_X1_RTR_DBG_W_ARB 0x205304 + +#define mmSRAM_Y0_X1_RTR_DBG_L_ARB 0x205310 + +#define mmSRAM_Y0_X1_RTR_DBG_E_ARB_MAX 0x205320 + +#define mmSRAM_Y0_X1_RTR_DBG_W_ARB_MAX 0x205324 + +#define mmSRAM_Y0_X1_RTR_DBG_L_ARB_MAX 0x205330 + +#endif /* ASIC_REG_SRAM_Y0_X1_RTR_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h new file mode 100644 index 000000000000..3322bc0bd1df --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_SRAM_Y0_X2_RTR_REGS_H_ +#define ASIC_REG_SRAM_Y0_X2_RTR_REGS_H_ + +/* + ***************************************** + * SRAM_Y0_X2_RTR (Prototype: IC_RTR) + ***************************************** + */ + +#define mmSRAM_Y0_X2_RTR_HBW_RD_RQ_E_ARB 0x209100 + +#define mmSRAM_Y0_X2_RTR_HBW_RD_RQ_W_ARB 0x209104 + +#define mmSRAM_Y0_X2_RTR_HBW_RD_RQ_L_ARB 0x209110 + +#define mmSRAM_Y0_X2_RTR_HBW_E_ARB_MAX 0x209120 + +#define mmSRAM_Y0_X2_RTR_HBW_W_ARB_MAX 0x209124 + +#define mmSRAM_Y0_X2_RTR_HBW_L_ARB_MAX 0x209130 + +#define mmSRAM_Y0_X2_RTR_HBW_DATA_E_ARB 0x209140 + +#define mmSRAM_Y0_X2_RTR_HBW_DATA_W_ARB 0x209144 + +#define mmSRAM_Y0_X2_RTR_HBW_DATA_L_ARB 0x209148 + +#define mmSRAM_Y0_X2_RTR_HBW_WR_RS_E_ARB 0x209160 + +#define mmSRAM_Y0_X2_RTR_HBW_WR_RS_W_ARB 0x209164 + +#define mmSRAM_Y0_X2_RTR_HBW_WR_RS_L_ARB 0x209168 + +#define mmSRAM_Y0_X2_RTR_LBW_RD_RQ_E_ARB 0x209200 + +#define mmSRAM_Y0_X2_RTR_LBW_RD_RQ_W_ARB 0x209204 + +#define mmSRAM_Y0_X2_RTR_LBW_RD_RQ_L_ARB 0x209210 + +#define mmSRAM_Y0_X2_RTR_LBW_E_ARB_MAX 0x209220 + +#define mmSRAM_Y0_X2_RTR_LBW_W_ARB_MAX 0x209224 + +#define mmSRAM_Y0_X2_RTR_LBW_L_ARB_MAX 0x209230 + +#define mmSRAM_Y0_X2_RTR_LBW_DATA_E_ARB 0x209240 + +#define mmSRAM_Y0_X2_RTR_LBW_DATA_W_ARB 0x209244 + +#define mmSRAM_Y0_X2_RTR_LBW_DATA_L_ARB 0x209248 + +#define mmSRAM_Y0_X2_RTR_LBW_WR_RS_E_ARB 0x209260 + +#define mmSRAM_Y0_X2_RTR_LBW_WR_RS_W_ARB 0x209264 + +#define mmSRAM_Y0_X2_RTR_LBW_WR_RS_L_ARB 0x209268 + +#define mmSRAM_Y0_X2_RTR_DBG_E_ARB 0x209300 + +#define mmSRAM_Y0_X2_RTR_DBG_W_ARB 0x209304 + +#define mmSRAM_Y0_X2_RTR_DBG_L_ARB 0x209310 + +#define mmSRAM_Y0_X2_RTR_DBG_E_ARB_MAX 0x209320 + +#define mmSRAM_Y0_X2_RTR_DBG_W_ARB_MAX 0x209324 + +#define mmSRAM_Y0_X2_RTR_DBG_L_ARB_MAX 0x209330 + +#endif /* ASIC_REG_SRAM_Y0_X2_RTR_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h new file mode 100644 index 000000000000..81e393db2027 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_SRAM_Y0_X3_RTR_REGS_H_ +#define ASIC_REG_SRAM_Y0_X3_RTR_REGS_H_ + +/* + ***************************************** + * SRAM_Y0_X3_RTR (Prototype: IC_RTR) + ***************************************** + */ + +#define mmSRAM_Y0_X3_RTR_HBW_RD_RQ_E_ARB 0x20D100 + +#define mmSRAM_Y0_X3_RTR_HBW_RD_RQ_W_ARB 0x20D104 + +#define mmSRAM_Y0_X3_RTR_HBW_RD_RQ_L_ARB 0x20D110 + +#define mmSRAM_Y0_X3_RTR_HBW_E_ARB_MAX 0x20D120 + +#define mmSRAM_Y0_X3_RTR_HBW_W_ARB_MAX 0x20D124 + +#define mmSRAM_Y0_X3_RTR_HBW_L_ARB_MAX 0x20D130 + +#define mmSRAM_Y0_X3_RTR_HBW_DATA_E_ARB 0x20D140 + +#define mmSRAM_Y0_X3_RTR_HBW_DATA_W_ARB 0x20D144 + +#define mmSRAM_Y0_X3_RTR_HBW_DATA_L_ARB 0x20D148 + +#define mmSRAM_Y0_X3_RTR_HBW_WR_RS_E_ARB 0x20D160 + +#define mmSRAM_Y0_X3_RTR_HBW_WR_RS_W_ARB 0x20D164 + +#define mmSRAM_Y0_X3_RTR_HBW_WR_RS_L_ARB 0x20D168 + +#define mmSRAM_Y0_X3_RTR_LBW_RD_RQ_E_ARB 0x20D200 + +#define mmSRAM_Y0_X3_RTR_LBW_RD_RQ_W_ARB 0x20D204 + +#define mmSRAM_Y0_X3_RTR_LBW_RD_RQ_L_ARB 0x20D210 + +#define mmSRAM_Y0_X3_RTR_LBW_E_ARB_MAX 0x20D220 + +#define mmSRAM_Y0_X3_RTR_LBW_W_ARB_MAX 0x20D224 + +#define mmSRAM_Y0_X3_RTR_LBW_L_ARB_MAX 0x20D230 + +#define mmSRAM_Y0_X3_RTR_LBW_DATA_E_ARB 0x20D240 + +#define mmSRAM_Y0_X3_RTR_LBW_DATA_W_ARB 0x20D244 + +#define mmSRAM_Y0_X3_RTR_LBW_DATA_L_ARB 0x20D248 + +#define mmSRAM_Y0_X3_RTR_LBW_WR_RS_E_ARB 0x20D260 + +#define mmSRAM_Y0_X3_RTR_LBW_WR_RS_W_ARB 0x20D264 + +#define mmSRAM_Y0_X3_RTR_LBW_WR_RS_L_ARB 0x20D268 + +#define mmSRAM_Y0_X3_RTR_DBG_E_ARB 0x20D300 + +#define mmSRAM_Y0_X3_RTR_DBG_W_ARB 0x20D304 + +#define mmSRAM_Y0_X3_RTR_DBG_L_ARB 0x20D310 + +#define mmSRAM_Y0_X3_RTR_DBG_E_ARB_MAX 0x20D320 + +#define mmSRAM_Y0_X3_RTR_DBG_W_ARB_MAX 0x20D324 + +#define mmSRAM_Y0_X3_RTR_DBG_L_ARB_MAX 0x20D330 + +#endif /* ASIC_REG_SRAM_Y0_X3_RTR_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h new file mode 100644 index 000000000000..b2e11b1de385 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_SRAM_Y0_X4_RTR_REGS_H_ +#define ASIC_REG_SRAM_Y0_X4_RTR_REGS_H_ + +/* + ***************************************** + * SRAM_Y0_X4_RTR (Prototype: IC_RTR) + ***************************************** + */ + +#define mmSRAM_Y0_X4_RTR_HBW_RD_RQ_E_ARB 0x211100 + +#define mmSRAM_Y0_X4_RTR_HBW_RD_RQ_W_ARB 0x211104 + +#define mmSRAM_Y0_X4_RTR_HBW_RD_RQ_L_ARB 0x211110 + +#define mmSRAM_Y0_X4_RTR_HBW_E_ARB_MAX 0x211120 + +#define mmSRAM_Y0_X4_RTR_HBW_W_ARB_MAX 0x211124 + +#define mmSRAM_Y0_X4_RTR_HBW_L_ARB_MAX 0x211130 + +#define mmSRAM_Y0_X4_RTR_HBW_DATA_E_ARB 0x211140 + +#define mmSRAM_Y0_X4_RTR_HBW_DATA_W_ARB 0x211144 + +#define mmSRAM_Y0_X4_RTR_HBW_DATA_L_ARB 0x211148 + +#define mmSRAM_Y0_X4_RTR_HBW_WR_RS_E_ARB 0x211160 + +#define mmSRAM_Y0_X4_RTR_HBW_WR_RS_W_ARB 0x211164 + +#define mmSRAM_Y0_X4_RTR_HBW_WR_RS_L_ARB 0x211168 + +#define mmSRAM_Y0_X4_RTR_LBW_RD_RQ_E_ARB 0x211200 + +#define mmSRAM_Y0_X4_RTR_LBW_RD_RQ_W_ARB 0x211204 + +#define mmSRAM_Y0_X4_RTR_LBW_RD_RQ_L_ARB 0x211210 + +#define mmSRAM_Y0_X4_RTR_LBW_E_ARB_MAX 0x211220 + +#define mmSRAM_Y0_X4_RTR_LBW_W_ARB_MAX 0x211224 + +#define mmSRAM_Y0_X4_RTR_LBW_L_ARB_MAX 0x211230 + +#define mmSRAM_Y0_X4_RTR_LBW_DATA_E_ARB 0x211240 + +#define mmSRAM_Y0_X4_RTR_LBW_DATA_W_ARB 0x211244 + +#define mmSRAM_Y0_X4_RTR_LBW_DATA_L_ARB 0x211248 + +#define mmSRAM_Y0_X4_RTR_LBW_WR_RS_E_ARB 0x211260 + +#define mmSRAM_Y0_X4_RTR_LBW_WR_RS_W_ARB 0x211264 + +#define mmSRAM_Y0_X4_RTR_LBW_WR_RS_L_ARB 0x211268 + +#define mmSRAM_Y0_X4_RTR_DBG_E_ARB 0x211300 + +#define mmSRAM_Y0_X4_RTR_DBG_W_ARB 0x211304 + +#define mmSRAM_Y0_X4_RTR_DBG_L_ARB 0x211310 + +#define mmSRAM_Y0_X4_RTR_DBG_E_ARB_MAX 0x211320 + +#define mmSRAM_Y0_X4_RTR_DBG_W_ARB_MAX 0x211324 + +#define mmSRAM_Y0_X4_RTR_DBG_L_ARB_MAX 0x211330 + +#endif /* ASIC_REG_SRAM_Y0_X4_RTR_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/stlb_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/stlb_masks.h new file mode 100644 index 000000000000..b4ea8cae2757 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/stlb_masks.h @@ -0,0 +1,117 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_STLB_MASKS_H_ +#define ASIC_REG_STLB_MASKS_H_ + +/* + ***************************************** + * STLB (Prototype: STLB) + ***************************************** + */ + +/* STLB_CACHE_INV */ +#define STLB_CACHE_INV_PRODUCER_INDEX_SHIFT 0 +#define STLB_CACHE_INV_PRODUCER_INDEX_MASK 0xFF +#define STLB_CACHE_INV_INDEX_MASK_SHIFT 8 +#define STLB_CACHE_INV_INDEX_MASK_MASK 0xFF00 + +/* STLB_CACHE_INV_BASE_39_8 */ +#define STLB_CACHE_INV_BASE_39_8_PA_SHIFT 0 +#define STLB_CACHE_INV_BASE_39_8_PA_MASK 0xFFFFFFFF + +/* STLB_CACHE_INV_BASE_49_40 */ +#define STLB_CACHE_INV_BASE_49_40_PA_SHIFT 0 +#define STLB_CACHE_INV_BASE_49_40_PA_MASK 0x3FF + +/* STLB_STLB_FEATURE_EN */ +#define STLB_STLB_FEATURE_EN_STLB_CTRL_MULTI_PAGE_SIZE_EN_SHIFT 0 +#define STLB_STLB_FEATURE_EN_STLB_CTRL_MULTI_PAGE_SIZE_EN_MASK 0x1 +#define STLB_STLB_FEATURE_EN_MULTI_PAGE_SIZE_EN_SHIFT 1 +#define STLB_STLB_FEATURE_EN_MULTI_PAGE_SIZE_EN_MASK 0x2 +#define STLB_STLB_FEATURE_EN_LOOKUP_EN_SHIFT 2 +#define STLB_STLB_FEATURE_EN_LOOKUP_EN_MASK 0x4 +#define STLB_STLB_FEATURE_EN_BYPASS_SHIFT 3 +#define STLB_STLB_FEATURE_EN_BYPASS_MASK 0x8 +#define STLB_STLB_FEATURE_EN_BANK_STOP_SHIFT 4 +#define STLB_STLB_FEATURE_EN_BANK_STOP_MASK 0x10 +#define STLB_STLB_FEATURE_EN_TRACE_EN_SHIFT 5 +#define STLB_STLB_FEATURE_EN_TRACE_EN_MASK 0x20 +#define STLB_STLB_FEATURE_EN_FOLLOWER_EN_SHIFT 6 +#define STLB_STLB_FEATURE_EN_FOLLOWER_EN_MASK 0x40 +#define STLB_STLB_FEATURE_EN_CACHING_EN_SHIFT 7 +#define STLB_STLB_FEATURE_EN_CACHING_EN_MASK 0xF80 + +/* STLB_STLB_AXI_CACHE */ +#define STLB_STLB_AXI_CACHE_STLB_CTRL_ARCACHE_SHIFT 0 +#define STLB_STLB_AXI_CACHE_STLB_CTRL_ARCACHE_MASK 0xF +#define STLB_STLB_AXI_CACHE_STLB_CTRL_AWCACHE_SHIFT 4 +#define STLB_STLB_AXI_CACHE_STLB_CTRL_AWCACHE_MASK 0xF0 +#define STLB_STLB_AXI_CACHE_INV_ARCACHE_SHIFT 8 +#define STLB_STLB_AXI_CACHE_INV_ARCACHE_MASK 0xF00 + +/* STLB_HOP_CONFIGURATION */ +#define STLB_HOP_CONFIGURATION_FIRST_HOP_SHIFT 0 +#define STLB_HOP_CONFIGURATION_FIRST_HOP_MASK 0x7 +#define STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_SHIFT 4 +#define STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_MASK 0x70 +#define STLB_HOP_CONFIGURATION_LAST_HOP_SHIFT 8 +#define STLB_HOP_CONFIGURATION_LAST_HOP_MASK 0x700 + +/* STLB_LINK_LIST_LOOKUP_MASK_49_32 */ +#define STLB_LINK_LIST_LOOKUP_MASK_49_32_R_SHIFT 0 +#define STLB_LINK_LIST_LOOKUP_MASK_49_32_R_MASK 0x3FFFF + +/* STLB_LINK_LIST_LOOKUP_MASK_31_0 */ +#define STLB_LINK_LIST_LOOKUP_MASK_31_0_R_SHIFT 0 +#define STLB_LINK_LIST_LOOKUP_MASK_31_0_R_MASK 0xFFFFFFFF + +/* STLB_LINK_LIST */ +#define STLB_LINK_LIST_CLEAR_SHIFT 0 +#define STLB_LINK_LIST_CLEAR_MASK 0x1 +#define STLB_LINK_LIST_EN_SHIFT 1 +#define STLB_LINK_LIST_EN_MASK 0x2 + +/* STLB_INV_ALL_START */ +#define STLB_INV_ALL_START_R_SHIFT 0 +#define STLB_INV_ALL_START_R_MASK 0x1 + +/* STLB_INV_ALL_SET */ +#define STLB_INV_ALL_SET_R_SHIFT 0 +#define STLB_INV_ALL_SET_R_MASK 0xFF + +/* STLB_INV_PS */ +#define STLB_INV_PS_R_SHIFT 0 +#define STLB_INV_PS_R_MASK 0x3 + +/* STLB_INV_CONSUMER_INDEX */ +#define STLB_INV_CONSUMER_INDEX_R_SHIFT 0 +#define STLB_INV_CONSUMER_INDEX_R_MASK 0xFF + +/* STLB_INV_HIT_COUNT */ +#define STLB_INV_HIT_COUNT_R_SHIFT 0 +#define STLB_INV_HIT_COUNT_R_MASK 0x7FF + +/* STLB_INV_SET */ +#define STLB_INV_SET_R_SHIFT 0 +#define STLB_INV_SET_R_MASK 0xFF + +/* STLB_SRAM_INIT */ +#define STLB_SRAM_INIT_BUSY_TAG_SHIFT 0 +#define STLB_SRAM_INIT_BUSY_TAG_MASK 0x3 +#define STLB_SRAM_INIT_BUSY_SLICE_SHIFT 2 +#define STLB_SRAM_INIT_BUSY_SLICE_MASK 0xC +#define STLB_SRAM_INIT_BUSY_DATA_SHIFT 4 +#define STLB_SRAM_INIT_BUSY_DATA_MASK 0x10 + +#endif /* ASIC_REG_STLB_MASKS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/stlb_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/stlb_regs.h new file mode 100644 index 000000000000..0f5281d3e65b --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/stlb_regs.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_STLB_REGS_H_ +#define ASIC_REG_STLB_REGS_H_ + +/* + ***************************************** + * STLB (Prototype: STLB) + ***************************************** + */ + +#define mmSTLB_CACHE_INV 0x490010 + +#define mmSTLB_CACHE_INV_BASE_39_8 0x490014 + +#define mmSTLB_CACHE_INV_BASE_49_40 0x490018 + +#define mmSTLB_STLB_FEATURE_EN 0x49001C + +#define mmSTLB_STLB_AXI_CACHE 0x490020 + +#define mmSTLB_HOP_CONFIGURATION 0x490024 + +#define mmSTLB_LINK_LIST_LOOKUP_MASK_49_32 0x490028 + +#define mmSTLB_LINK_LIST_LOOKUP_MASK_31_0 0x49002C + +#define mmSTLB_LINK_LIST 0x490030 + +#define mmSTLB_INV_ALL_START 0x490034 + +#define mmSTLB_INV_ALL_SET 0x490038 + +#define mmSTLB_INV_PS 0x49003C + +#define mmSTLB_INV_CONSUMER_INDEX 0x490040 + +#define mmSTLB_INV_HIT_COUNT 0x490044 + +#define mmSTLB_INV_SET 0x490048 + +#define mmSTLB_SRAM_INIT 0x49004C + +#endif /* ASIC_REG_STLB_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h new file mode 100644 index 000000000000..e5587b49eecd --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h @@ -0,0 +1,1607 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC0_CFG_MASKS_H_ +#define ASIC_REG_TPC0_CFG_MASKS_H_ + +/* + ***************************************** + * TPC0_CFG (Prototype: TPC) + ***************************************** + */ + +/* TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW */ +#define TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH */ +#define TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_PADDING_VALUE */ +#define TPC0_CFG_KERNEL_TENSOR_0_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG */ +#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_DATA_TYPE_MASK 0x3 +#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_0_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_1_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_2_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_3_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_4_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW */ +#define TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH */ +#define TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_PADDING_VALUE */ +#define TPC0_CFG_KERNEL_TENSOR_1_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG */ +#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_DATA_TYPE_MASK 0x3 +#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_0_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_1_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_2_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_3_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_4_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW */ +#define TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH */ +#define TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_PADDING_VALUE */ +#define TPC0_CFG_KERNEL_TENSOR_2_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG */ +#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_DATA_TYPE_MASK 0x3 +#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_0_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_1_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_2_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_3_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_4_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW */ +#define TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH */ +#define TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_PADDING_VALUE */ +#define TPC0_CFG_KERNEL_TENSOR_3_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG */ +#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_DATA_TYPE_MASK 0x3 +#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_0_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_1_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_2_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_3_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_4_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW */ +#define TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH */ +#define TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_PADDING_VALUE */ +#define TPC0_CFG_KERNEL_TENSOR_4_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG */ +#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_DATA_TYPE_MASK 0x3 +#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_0_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_1_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_2_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_3_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_4_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW */ +#define TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH */ +#define TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_PADDING_VALUE */ +#define TPC0_CFG_KERNEL_TENSOR_5_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG */ +#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_DATA_TYPE_MASK 0x3 +#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_0_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_1_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_2_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_3_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_4_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW */ +#define TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH */ +#define TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_PADDING_VALUE */ +#define TPC0_CFG_KERNEL_TENSOR_6_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG */ +#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_DATA_TYPE_MASK 0x3 +#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_0_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_1_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_2_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_3_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_4_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW */ +#define TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH */ +#define TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_PADDING_VALUE */ +#define TPC0_CFG_KERNEL_TENSOR_7_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG */ +#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_DATA_TYPE_MASK 0x3 +#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_0_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_1_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_2_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_3_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_4_SIZE */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET */ +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW */ +#define TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW_V_SHIFT 0 +#define TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH */ +#define TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH_V_SHIFT 0 +#define TPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TID_BASE_DIM_0 */ +#define TPC0_CFG_KERNEL_TID_BASE_DIM_0_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TID_BASE_DIM_0_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TID_SIZE_DIM_0 */ +#define TPC0_CFG_KERNEL_TID_SIZE_DIM_0_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TID_SIZE_DIM_0_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TID_BASE_DIM_1 */ +#define TPC0_CFG_KERNEL_TID_BASE_DIM_1_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TID_BASE_DIM_1_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TID_SIZE_DIM_1 */ +#define TPC0_CFG_KERNEL_TID_SIZE_DIM_1_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TID_SIZE_DIM_1_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TID_BASE_DIM_2 */ +#define TPC0_CFG_KERNEL_TID_BASE_DIM_2_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TID_BASE_DIM_2_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TID_SIZE_DIM_2 */ +#define TPC0_CFG_KERNEL_TID_SIZE_DIM_2_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TID_SIZE_DIM_2_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TID_BASE_DIM_3 */ +#define TPC0_CFG_KERNEL_TID_BASE_DIM_3_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TID_BASE_DIM_3_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TID_SIZE_DIM_3 */ +#define TPC0_CFG_KERNEL_TID_SIZE_DIM_3_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TID_SIZE_DIM_3_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TID_BASE_DIM_4 */ +#define TPC0_CFG_KERNEL_TID_BASE_DIM_4_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TID_BASE_DIM_4_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_TID_SIZE_DIM_4 */ +#define TPC0_CFG_KERNEL_TID_SIZE_DIM_4_V_SHIFT 0 +#define TPC0_CFG_KERNEL_TID_SIZE_DIM_4_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_SRF */ +#define TPC0_CFG_KERNEL_SRF_V_SHIFT 0 +#define TPC0_CFG_KERNEL_SRF_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_KERNEL_KERNEL_CONFIG */ +#define TPC0_CFG_KERNEL_KERNEL_CONFIG_SMALL_VLM_SHIFT 0 +#define TPC0_CFG_KERNEL_KERNEL_CONFIG_SMALL_VLM_MASK 0x1 +#define TPC0_CFG_KERNEL_KERNEL_CONFIG_ASO_EVICT_L0_SHIFT 1 +#define TPC0_CFG_KERNEL_KERNEL_CONFIG_ASO_EVICT_L0_MASK 0x2 +#define TPC0_CFG_KERNEL_KERNEL_CONFIG_NUM_VALID_SRFS_SHIFT 8 +#define TPC0_CFG_KERNEL_KERNEL_CONFIG_NUM_VALID_SRFS_MASK 0x3F00 + +/* TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE */ +#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0 +#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF +#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_SHIFT 16 +#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_MASK 0x7FFF0000 +#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 31 +#define TPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0x80000000 + +/* TPC0_CFG_RESERVED_DESC_END */ +#define TPC0_CFG_RESERVED_DESC_END_V_SHIFT 0 +#define TPC0_CFG_RESERVED_DESC_END_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_ROUND_CSR */ +#define TPC0_CFG_ROUND_CSR_MODE_SHIFT 0 +#define TPC0_CFG_ROUND_CSR_MODE_MASK 0x7 + +/* TPC0_CFG_TBUF_BASE_ADDR_LOW */ +#define TPC0_CFG_TBUF_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_TBUF_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_TBUF_BASE_ADDR_HIGH */ +#define TPC0_CFG_TBUF_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_TBUF_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_SEMAPHORE */ +#define TPC0_CFG_SEMAPHORE_V_SHIFT 0 +#define TPC0_CFG_SEMAPHORE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_VFLAGS */ +#define TPC0_CFG_VFLAGS_V_SHIFT 0 +#define TPC0_CFG_VFLAGS_V_MASK 0xF + +/* TPC0_CFG_SFLAGS */ +#define TPC0_CFG_SFLAGS_V_SHIFT 0 +#define TPC0_CFG_SFLAGS_V_MASK 0xF + +/* TPC0_CFG_LFSR_POLYNOM */ +#define TPC0_CFG_LFSR_POLYNOM_V_SHIFT 0 +#define TPC0_CFG_LFSR_POLYNOM_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_STATUS */ +#define TPC0_CFG_STATUS_SCALAR_PIPE_EMPTY_SHIFT 1 +#define TPC0_CFG_STATUS_SCALAR_PIPE_EMPTY_MASK 0x2 +#define TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_SHIFT 2 +#define TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK 0x4 +#define TPC0_CFG_STATUS_IQ_EMPTY_SHIFT 3 +#define TPC0_CFG_STATUS_IQ_EMPTY_MASK 0x8 +#define TPC0_CFG_STATUS_NO_INFLIGH_MEM_ACCESSES_SHIFT 4 +#define TPC0_CFG_STATUS_NO_INFLIGH_MEM_ACCESSES_MASK 0x10 + +/* TPC0_CFG_CFG_BASE_ADDRESS_HIGH */ +#define TPC0_CFG_CFG_BASE_ADDRESS_HIGH_V_SHIFT 0 +#define TPC0_CFG_CFG_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_CFG_SUBTRACT_VALUE */ +#define TPC0_CFG_CFG_SUBTRACT_VALUE_V_SHIFT 0 +#define TPC0_CFG_CFG_SUBTRACT_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_SM_BASE_ADDRESS_LOW */ +#define TPC0_CFG_SM_BASE_ADDRESS_LOW_V_SHIFT 0 +#define TPC0_CFG_SM_BASE_ADDRESS_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_SM_BASE_ADDRESS_HIGH */ +#define TPC0_CFG_SM_BASE_ADDRESS_HIGH_V_SHIFT 0 +#define TPC0_CFG_SM_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_TPC_CMD */ +#define TPC0_CFG_TPC_CMD_ICACHE_INVALIDATE_SHIFT 0 +#define TPC0_CFG_TPC_CMD_ICACHE_INVALIDATE_MASK 0x1 +#define TPC0_CFG_TPC_CMD_DCACHE_INVALIDATE_SHIFT 1 +#define TPC0_CFG_TPC_CMD_DCACHE_INVALIDATE_MASK 0x2 +#define TPC0_CFG_TPC_CMD_LCACHE_INVALIDATE_SHIFT 2 +#define TPC0_CFG_TPC_CMD_LCACHE_INVALIDATE_MASK 0x4 +#define TPC0_CFG_TPC_CMD_TCACHE_INVALIDATE_SHIFT 3 +#define TPC0_CFG_TPC_CMD_TCACHE_INVALIDATE_MASK 0x8 +#define TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_64KB_SHIFT 4 +#define TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_64KB_MASK 0x10 +#define TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_32KB_SHIFT 5 +#define TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_32KB_MASK 0x20 +#define TPC0_CFG_TPC_CMD_QMAN_STOP_SHIFT 6 +#define TPC0_CFG_TPC_CMD_QMAN_STOP_MASK 0x40 + +/* TPC0_CFG_TPC_EXECUTE */ +#define TPC0_CFG_TPC_EXECUTE_V_SHIFT 0 +#define TPC0_CFG_TPC_EXECUTE_V_MASK 0x1 + +/* TPC0_CFG_TPC_STALL */ +#define TPC0_CFG_TPC_STALL_V_SHIFT 0 +#define TPC0_CFG_TPC_STALL_V_MASK 0x1 + +/* TPC0_CFG_ICACHE_BASE_ADDERESS_LOW */ +#define TPC0_CFG_ICACHE_BASE_ADDERESS_LOW_V_SHIFT 0 +#define TPC0_CFG_ICACHE_BASE_ADDERESS_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_ICACHE_BASE_ADDERESS_HIGH */ +#define TPC0_CFG_ICACHE_BASE_ADDERESS_HIGH_V_SHIFT 0 +#define TPC0_CFG_ICACHE_BASE_ADDERESS_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_MSS_CONFIG */ +#define TPC0_CFG_MSS_CONFIG_AWCACHE_SHIFT 0 +#define TPC0_CFG_MSS_CONFIG_AWCACHE_MASK 0xF +#define TPC0_CFG_MSS_CONFIG_ARCACHE_SHIFT 4 +#define TPC0_CFG_MSS_CONFIG_ARCACHE_MASK 0xF0 +#define TPC0_CFG_MSS_CONFIG_ICACHE_FETCH_LINE_NUM_SHIFT 8 +#define TPC0_CFG_MSS_CONFIG_ICACHE_FETCH_LINE_NUM_MASK 0x300 +#define TPC0_CFG_MSS_CONFIG_EXPOSED_PIPE_DIS_SHIFT 10 +#define TPC0_CFG_MSS_CONFIG_EXPOSED_PIPE_DIS_MASK 0x400 + +/* TPC0_CFG_TPC_INTR_CAUSE */ +#define TPC0_CFG_TPC_INTR_CAUSE_CAUSE_SHIFT 0 +#define TPC0_CFG_TPC_INTR_CAUSE_CAUSE_MASK 0xFFFFFFFF + +/* TPC0_CFG_TPC_INTR_MASK */ +#define TPC0_CFG_TPC_INTR_MASK_MASK_SHIFT 0 +#define TPC0_CFG_TPC_INTR_MASK_MASK_MASK 0xFFFFFFFF + +/* TPC0_CFG_TSB_CONFIG */ +#define TPC0_CFG_TSB_CONFIG_TSB_AGU_MAX_CREDIT_SHIFT 0 +#define TPC0_CFG_TSB_CONFIG_TSB_AGU_MAX_CREDIT_MASK 0x1F +#define TPC0_CFG_TSB_CONFIG_TSB_EU_MAX_CREDIT_SHIFT 5 +#define TPC0_CFG_TSB_CONFIG_TSB_EU_MAX_CREDIT_MASK 0x3E0 +#define TPC0_CFG_TSB_CONFIG_MAX_OUTSTANDING_SHIFT 10 +#define TPC0_CFG_TSB_CONFIG_MAX_OUTSTANDING_MASK 0xFFC00 +#define TPC0_CFG_TSB_CONFIG_MAX_SIZE_SHIFT 20 +#define TPC0_CFG_TSB_CONFIG_MAX_SIZE_MASK 0x3FF00000 + +/* TPC0_CFG_QM_TENSOR_0_BASE_ADDR_LOW */ +#define TPC0_CFG_QM_TENSOR_0_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_BASE_ADDR_HIGH */ +#define TPC0_CFG_QM_TENSOR_0_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_PADDING_VALUE */ +#define TPC0_CFG_QM_TENSOR_0_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG */ +#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_DATA_TYPE_MASK 0x3 +#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 + +/* TPC0_CFG_QM_TENSOR_0_DIM_0_SIZE */ +#define TPC0_CFG_QM_TENSOR_0_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_DIM_0_STRIDE */ +#define TPC0_CFG_QM_TENSOR_0_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_DIM_1_SIZE */ +#define TPC0_CFG_QM_TENSOR_0_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_DIM_1_STRIDE */ +#define TPC0_CFG_QM_TENSOR_0_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_DIM_2_SIZE */ +#define TPC0_CFG_QM_TENSOR_0_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_DIM_2_STRIDE */ +#define TPC0_CFG_QM_TENSOR_0_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_DIM_3_SIZE */ +#define TPC0_CFG_QM_TENSOR_0_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_DIM_3_STRIDE */ +#define TPC0_CFG_QM_TENSOR_0_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_DIM_4_SIZE */ +#define TPC0_CFG_QM_TENSOR_0_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_DIM_4_STRIDE */ +#define TPC0_CFG_QM_TENSOR_0_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_BASE_ADDR_LOW */ +#define TPC0_CFG_QM_TENSOR_1_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_BASE_ADDR_HIGH */ +#define TPC0_CFG_QM_TENSOR_1_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_PADDING_VALUE */ +#define TPC0_CFG_QM_TENSOR_1_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG */ +#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_DATA_TYPE_MASK 0x3 +#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 + +/* TPC0_CFG_QM_TENSOR_1_DIM_0_SIZE */ +#define TPC0_CFG_QM_TENSOR_1_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_DIM_0_STRIDE */ +#define TPC0_CFG_QM_TENSOR_1_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_DIM_1_SIZE */ +#define TPC0_CFG_QM_TENSOR_1_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_DIM_1_STRIDE */ +#define TPC0_CFG_QM_TENSOR_1_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_DIM_2_SIZE */ +#define TPC0_CFG_QM_TENSOR_1_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_DIM_2_STRIDE */ +#define TPC0_CFG_QM_TENSOR_1_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_DIM_3_SIZE */ +#define TPC0_CFG_QM_TENSOR_1_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_DIM_3_STRIDE */ +#define TPC0_CFG_QM_TENSOR_1_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_DIM_4_SIZE */ +#define TPC0_CFG_QM_TENSOR_1_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_DIM_4_STRIDE */ +#define TPC0_CFG_QM_TENSOR_1_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_BASE_ADDR_LOW */ +#define TPC0_CFG_QM_TENSOR_2_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_BASE_ADDR_HIGH */ +#define TPC0_CFG_QM_TENSOR_2_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_PADDING_VALUE */ +#define TPC0_CFG_QM_TENSOR_2_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG */ +#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_DATA_TYPE_MASK 0x3 +#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 + +/* TPC0_CFG_QM_TENSOR_2_DIM_0_SIZE */ +#define TPC0_CFG_QM_TENSOR_2_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_DIM_0_STRIDE */ +#define TPC0_CFG_QM_TENSOR_2_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_DIM_1_SIZE */ +#define TPC0_CFG_QM_TENSOR_2_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_DIM_1_STRIDE */ +#define TPC0_CFG_QM_TENSOR_2_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_DIM_2_SIZE */ +#define TPC0_CFG_QM_TENSOR_2_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_DIM_2_STRIDE */ +#define TPC0_CFG_QM_TENSOR_2_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_DIM_3_SIZE */ +#define TPC0_CFG_QM_TENSOR_2_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_DIM_3_STRIDE */ +#define TPC0_CFG_QM_TENSOR_2_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_DIM_4_SIZE */ +#define TPC0_CFG_QM_TENSOR_2_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_DIM_4_STRIDE */ +#define TPC0_CFG_QM_TENSOR_2_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_BASE_ADDR_LOW */ +#define TPC0_CFG_QM_TENSOR_3_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_BASE_ADDR_HIGH */ +#define TPC0_CFG_QM_TENSOR_3_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_PADDING_VALUE */ +#define TPC0_CFG_QM_TENSOR_3_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG */ +#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_DATA_TYPE_MASK 0x3 +#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 + +/* TPC0_CFG_QM_TENSOR_3_DIM_0_SIZE */ +#define TPC0_CFG_QM_TENSOR_3_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_DIM_0_STRIDE */ +#define TPC0_CFG_QM_TENSOR_3_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_DIM_1_SIZE */ +#define TPC0_CFG_QM_TENSOR_3_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_DIM_1_STRIDE */ +#define TPC0_CFG_QM_TENSOR_3_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_DIM_2_SIZE */ +#define TPC0_CFG_QM_TENSOR_3_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_DIM_2_STRIDE */ +#define TPC0_CFG_QM_TENSOR_3_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_DIM_3_SIZE */ +#define TPC0_CFG_QM_TENSOR_3_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_DIM_3_STRIDE */ +#define TPC0_CFG_QM_TENSOR_3_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_DIM_4_SIZE */ +#define TPC0_CFG_QM_TENSOR_3_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_DIM_4_STRIDE */ +#define TPC0_CFG_QM_TENSOR_3_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_BASE_ADDR_LOW */ +#define TPC0_CFG_QM_TENSOR_4_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_BASE_ADDR_HIGH */ +#define TPC0_CFG_QM_TENSOR_4_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_PADDING_VALUE */ +#define TPC0_CFG_QM_TENSOR_4_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG */ +#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_DATA_TYPE_MASK 0x3 +#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 + +/* TPC0_CFG_QM_TENSOR_4_DIM_0_SIZE */ +#define TPC0_CFG_QM_TENSOR_4_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_DIM_0_STRIDE */ +#define TPC0_CFG_QM_TENSOR_4_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_DIM_1_SIZE */ +#define TPC0_CFG_QM_TENSOR_4_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_DIM_1_STRIDE */ +#define TPC0_CFG_QM_TENSOR_4_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_DIM_2_SIZE */ +#define TPC0_CFG_QM_TENSOR_4_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_DIM_2_STRIDE */ +#define TPC0_CFG_QM_TENSOR_4_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_DIM_3_SIZE */ +#define TPC0_CFG_QM_TENSOR_4_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_DIM_3_STRIDE */ +#define TPC0_CFG_QM_TENSOR_4_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_DIM_4_SIZE */ +#define TPC0_CFG_QM_TENSOR_4_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_DIM_4_STRIDE */ +#define TPC0_CFG_QM_TENSOR_4_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_BASE_ADDR_LOW */ +#define TPC0_CFG_QM_TENSOR_5_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_BASE_ADDR_HIGH */ +#define TPC0_CFG_QM_TENSOR_5_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_PADDING_VALUE */ +#define TPC0_CFG_QM_TENSOR_5_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG */ +#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_DATA_TYPE_MASK 0x3 +#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 + +/* TPC0_CFG_QM_TENSOR_5_DIM_0_SIZE */ +#define TPC0_CFG_QM_TENSOR_5_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_DIM_0_STRIDE */ +#define TPC0_CFG_QM_TENSOR_5_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_DIM_1_SIZE */ +#define TPC0_CFG_QM_TENSOR_5_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_DIM_1_STRIDE */ +#define TPC0_CFG_QM_TENSOR_5_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_DIM_2_SIZE */ +#define TPC0_CFG_QM_TENSOR_5_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_DIM_2_STRIDE */ +#define TPC0_CFG_QM_TENSOR_5_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_DIM_3_SIZE */ +#define TPC0_CFG_QM_TENSOR_5_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_DIM_3_STRIDE */ +#define TPC0_CFG_QM_TENSOR_5_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_DIM_4_SIZE */ +#define TPC0_CFG_QM_TENSOR_5_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_DIM_4_STRIDE */ +#define TPC0_CFG_QM_TENSOR_5_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_BASE_ADDR_LOW */ +#define TPC0_CFG_QM_TENSOR_6_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_BASE_ADDR_HIGH */ +#define TPC0_CFG_QM_TENSOR_6_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_PADDING_VALUE */ +#define TPC0_CFG_QM_TENSOR_6_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG */ +#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_DATA_TYPE_MASK 0x3 +#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 + +/* TPC0_CFG_QM_TENSOR_6_DIM_0_SIZE */ +#define TPC0_CFG_QM_TENSOR_6_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_DIM_0_STRIDE */ +#define TPC0_CFG_QM_TENSOR_6_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_DIM_1_SIZE */ +#define TPC0_CFG_QM_TENSOR_6_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_DIM_1_STRIDE */ +#define TPC0_CFG_QM_TENSOR_6_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_DIM_2_SIZE */ +#define TPC0_CFG_QM_TENSOR_6_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_DIM_2_STRIDE */ +#define TPC0_CFG_QM_TENSOR_6_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_DIM_3_SIZE */ +#define TPC0_CFG_QM_TENSOR_6_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_DIM_3_STRIDE */ +#define TPC0_CFG_QM_TENSOR_6_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_DIM_4_SIZE */ +#define TPC0_CFG_QM_TENSOR_6_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_DIM_4_STRIDE */ +#define TPC0_CFG_QM_TENSOR_6_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_BASE_ADDR_LOW */ +#define TPC0_CFG_QM_TENSOR_7_BASE_ADDR_LOW_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_BASE_ADDR_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_BASE_ADDR_HIGH */ +#define TPC0_CFG_QM_TENSOR_7_BASE_ADDR_HIGH_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_BASE_ADDR_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_PADDING_VALUE */ +#define TPC0_CFG_QM_TENSOR_7_PADDING_VALUE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_PADDING_VALUE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG */ +#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_DATA_TYPE_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_DATA_TYPE_MASK 0x3 +#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_VALID_DIM_MASK_SHIFT 8 +#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_VALID_DIM_MASK_MASK 0x1F00 +#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_LAST_DIM_SHIFT 16 +#define TPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG_LAST_DIM_MASK 0x70000 + +/* TPC0_CFG_QM_TENSOR_7_DIM_0_SIZE */ +#define TPC0_CFG_QM_TENSOR_7_DIM_0_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_0_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_DIM_0_STRIDE */ +#define TPC0_CFG_QM_TENSOR_7_DIM_0_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_0_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_DIM_1_SIZE */ +#define TPC0_CFG_QM_TENSOR_7_DIM_1_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_1_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_DIM_1_STRIDE */ +#define TPC0_CFG_QM_TENSOR_7_DIM_1_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_1_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_DIM_2_SIZE */ +#define TPC0_CFG_QM_TENSOR_7_DIM_2_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_2_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_DIM_2_STRIDE */ +#define TPC0_CFG_QM_TENSOR_7_DIM_2_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_2_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_DIM_3_SIZE */ +#define TPC0_CFG_QM_TENSOR_7_DIM_3_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_3_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_DIM_3_STRIDE */ +#define TPC0_CFG_QM_TENSOR_7_DIM_3_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_3_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_DIM_4_SIZE */ +#define TPC0_CFG_QM_TENSOR_7_DIM_4_SIZE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_4_SIZE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_DIM_4_STRIDE */ +#define TPC0_CFG_QM_TENSOR_7_DIM_4_STRIDE_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_4_STRIDE_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET */ +#define TPC0_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET_V_SHIFT 0 +#define TPC0_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW */ +#define TPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW_V_SHIFT 0 +#define TPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH */ +#define TPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH_V_SHIFT 0 +#define TPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TID_BASE_DIM_0 */ +#define TPC0_CFG_QM_TID_BASE_DIM_0_V_SHIFT 0 +#define TPC0_CFG_QM_TID_BASE_DIM_0_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TID_SIZE_DIM_0 */ +#define TPC0_CFG_QM_TID_SIZE_DIM_0_V_SHIFT 0 +#define TPC0_CFG_QM_TID_SIZE_DIM_0_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TID_BASE_DIM_1 */ +#define TPC0_CFG_QM_TID_BASE_DIM_1_V_SHIFT 0 +#define TPC0_CFG_QM_TID_BASE_DIM_1_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TID_SIZE_DIM_1 */ +#define TPC0_CFG_QM_TID_SIZE_DIM_1_V_SHIFT 0 +#define TPC0_CFG_QM_TID_SIZE_DIM_1_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TID_BASE_DIM_2 */ +#define TPC0_CFG_QM_TID_BASE_DIM_2_V_SHIFT 0 +#define TPC0_CFG_QM_TID_BASE_DIM_2_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TID_SIZE_DIM_2 */ +#define TPC0_CFG_QM_TID_SIZE_DIM_2_V_SHIFT 0 +#define TPC0_CFG_QM_TID_SIZE_DIM_2_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TID_BASE_DIM_3 */ +#define TPC0_CFG_QM_TID_BASE_DIM_3_V_SHIFT 0 +#define TPC0_CFG_QM_TID_BASE_DIM_3_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TID_SIZE_DIM_3 */ +#define TPC0_CFG_QM_TID_SIZE_DIM_3_V_SHIFT 0 +#define TPC0_CFG_QM_TID_SIZE_DIM_3_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TID_BASE_DIM_4 */ +#define TPC0_CFG_QM_TID_BASE_DIM_4_V_SHIFT 0 +#define TPC0_CFG_QM_TID_BASE_DIM_4_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_TID_SIZE_DIM_4 */ +#define TPC0_CFG_QM_TID_SIZE_DIM_4_V_SHIFT 0 +#define TPC0_CFG_QM_TID_SIZE_DIM_4_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_SRF */ +#define TPC0_CFG_QM_SRF_V_SHIFT 0 +#define TPC0_CFG_QM_SRF_V_MASK 0xFFFFFFFF + +/* TPC0_CFG_QM_KERNEL_CONFIG */ +#define TPC0_CFG_QM_KERNEL_CONFIG_SMALL_VLM_SHIFT 0 +#define TPC0_CFG_QM_KERNEL_CONFIG_SMALL_VLM_MASK 0x1 +#define TPC0_CFG_QM_KERNEL_CONFIG_ASO_EVICT_L0_SHIFT 1 +#define TPC0_CFG_QM_KERNEL_CONFIG_ASO_EVICT_L0_MASK 0x2 +#define TPC0_CFG_QM_KERNEL_CONFIG_NUM_VALID_SRFS_SHIFT 8 +#define TPC0_CFG_QM_KERNEL_CONFIG_NUM_VALID_SRFS_MASK 0x3F00 + +/* TPC0_CFG_QM_SYNC_OBJECT_MESSAGE */ +#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_SHIFT 0 +#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_WRITE_VALUE_MASK 0xFFFF +#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_SHIFT 16 +#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_ADDRESS_OFFSET_MASK 0x7FFF0000 +#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_OPERATION_SHIFT 31 +#define TPC0_CFG_QM_SYNC_OBJECT_MESSAGE_SO_OPERATION_MASK 0x80000000 + +/* TPC0_CFG_ARUSER */ +#define TPC0_CFG_ARUSER_ASID_SHIFT 0 +#define TPC0_CFG_ARUSER_ASID_MASK 0x3FF +#define TPC0_CFG_ARUSER_MMBP_SHIFT 10 +#define TPC0_CFG_ARUSER_MMBP_MASK 0x400 +#define TPC0_CFG_ARUSER_V_SHIFT 11 +#define TPC0_CFG_ARUSER_V_MASK 0xFFFFF800 + +/* TPC0_CFG_AWUSER */ +#define TPC0_CFG_AWUSER_ASID_SHIFT 0 +#define TPC0_CFG_AWUSER_ASID_MASK 0x3FF +#define TPC0_CFG_AWUSER_MMBP_SHIFT 10 +#define TPC0_CFG_AWUSER_MMBP_MASK 0x400 +#define TPC0_CFG_AWUSER_V_SHIFT 11 +#define TPC0_CFG_AWUSER_V_MASK 0xFFFFF800 + +/* TPC0_CFG_FUNC_MBIST_CNTRL */ +#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_START_SHIFT 0 +#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_START_MASK 0x1 +#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_DONE_SHIFT 1 +#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_DONE_MASK 0x2 +#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_ACTIVE_SHIFT 2 +#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_ACTIVE_MASK 0x4 +#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_FAILED_SHIFT 16 +#define TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_FAILED_MASK 0x3FF0000 + +/* TPC0_CFG_FUNC_MBIST_PAT */ +#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN0_EVEN_SHIFT 0 +#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN0_EVEN_MASK 0x3 +#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN0_ODD_SHIFT 2 +#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN0_ODD_MASK 0xC +#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN1_EVEN_SHIFT 4 +#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN1_EVEN_MASK 0x30 +#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN1_ODD_SHIFT 6 +#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN1_ODD_MASK 0xC0 +#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN2_EVEN_SHIFT 8 +#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN2_EVEN_MASK 0x300 +#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN2_ODD_SHIFT 10 +#define TPC0_CFG_FUNC_MBIST_PAT_MBIST_PATTERN2_ODD_MASK 0xC00 + +/* TPC0_CFG_FUNC_MBIST_MEM */ +#define TPC0_CFG_FUNC_MBIST_MEM_MAX_ADDR_SHIFT 0 +#define TPC0_CFG_FUNC_MBIST_MEM_MAX_ADDR_MASK 0x7FF +#define TPC0_CFG_FUNC_MBIST_MEM_PATTERN_EN_SHIFT 12 +#define TPC0_CFG_FUNC_MBIST_MEM_PATTERN_EN_MASK 0x7000 +#define TPC0_CFG_FUNC_MBIST_MEM_LAST_FAILED_ADDR_SHIFT 16 +#define TPC0_CFG_FUNC_MBIST_MEM_LAST_FAILED_ADDR_MASK 0x7FF0000 +#define TPC0_CFG_FUNC_MBIST_MEM_LAST_FAILED_PATTERN_SHIFT 28 +#define TPC0_CFG_FUNC_MBIST_MEM_LAST_FAILED_PATTERN_MASK 0x70000000 + +#endif /* ASIC_REG_TPC0_CFG_MASKS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h new file mode 100644 index 000000000000..2be28a63c50a --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h @@ -0,0 +1,887 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC0_CFG_REGS_H_ +#define ASIC_REG_TPC0_CFG_REGS_H_ + +/* + ***************************************** + * TPC0_CFG (Prototype: TPC) + ***************************************** + */ + +#define mmTPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xE06400 + +#define mmTPC0_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xE06404 + +#define mmTPC0_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xE06408 + +#define mmTPC0_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xE0640C + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xE06410 + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xE06414 + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xE06418 + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xE0641C + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xE06420 + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xE06424 + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xE06428 + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xE0642C + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xE06430 + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xE06434 + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xE06438 + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xE0643C + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xE06440 + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xE06444 + +#define mmTPC0_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xE06448 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xE0644C + +#define mmTPC0_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xE06450 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xE06454 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xE06458 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xE0645C + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xE06460 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xE06464 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xE06468 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xE0646C + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xE06470 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xE06474 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xE06478 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xE0647C + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xE06480 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xE06484 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xE06488 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xE0648C + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xE06490 + +#define mmTPC0_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xE06494 + +#define mmTPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xE06498 + +#define mmTPC0_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xE0649C + +#define mmTPC0_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xE064A0 + +#define mmTPC0_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xE064A4 + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xE064A8 + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xE064AC + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xE064B0 + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xE064B4 + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xE064B8 + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xE064BC + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xE064C0 + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xE064C4 + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xE064C8 + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xE064CC + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xE064D0 + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xE064D4 + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xE064D8 + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xE064DC + +#define mmTPC0_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xE064E0 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xE064E4 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xE064E8 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xE064EC + +#define mmTPC0_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xE064F0 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xE064F4 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xE064F8 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xE064FC + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xE06500 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xE06504 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xE06508 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xE0650C + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xE06510 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xE06514 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xE06518 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xE0651C + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xE06520 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xE06524 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xE06528 + +#define mmTPC0_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xE0652C + +#define mmTPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xE06530 + +#define mmTPC0_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xE06534 + +#define mmTPC0_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xE06538 + +#define mmTPC0_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xE0653C + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xE06540 + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xE06544 + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xE06548 + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xE0654C + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xE06550 + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xE06554 + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xE06558 + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xE0655C + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xE06560 + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xE06564 + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xE06568 + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xE0656C + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xE06570 + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xE06574 + +#define mmTPC0_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xE06578 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xE0657C + +#define mmTPC0_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xE06580 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xE06584 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xE06588 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xE0658C + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xE06590 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xE06594 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xE06598 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xE0659C + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xE065A0 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xE065A4 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xE065A8 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xE065AC + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xE065B0 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xE065B4 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xE065B8 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xE065BC + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xE065C0 + +#define mmTPC0_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xE065C4 + +#define mmTPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xE065C8 + +#define mmTPC0_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xE065CC + +#define mmTPC0_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xE065D0 + +#define mmTPC0_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xE065D4 + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xE065D8 + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xE065DC + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xE065E0 + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xE065E4 + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xE065E8 + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xE065EC + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xE065F0 + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xE065F4 + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xE065F8 + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xE065FC + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xE06600 + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xE06604 + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xE06608 + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xE0660C + +#define mmTPC0_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xE06610 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xE06614 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xE06618 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xE0661C + +#define mmTPC0_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xE06620 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xE06624 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xE06628 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xE0662C + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xE06630 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xE06634 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xE06638 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xE0663C + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xE06640 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xE06644 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xE06648 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xE0664C + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xE06650 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xE06654 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xE06658 + +#define mmTPC0_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xE0665C + +#define mmTPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xE06660 + +#define mmTPC0_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xE06664 + +#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_0 0xE06668 + +#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_0 0xE0666C + +#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_1 0xE06670 + +#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_1 0xE06674 + +#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_2 0xE06678 + +#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_2 0xE0667C + +#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_3 0xE06680 + +#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_3 0xE06684 + +#define mmTPC0_CFG_KERNEL_TID_BASE_DIM_4 0xE06688 + +#define mmTPC0_CFG_KERNEL_TID_SIZE_DIM_4 0xE0668C + +#define mmTPC0_CFG_KERNEL_SRF_0 0xE06690 + +#define mmTPC0_CFG_KERNEL_SRF_1 0xE06694 + +#define mmTPC0_CFG_KERNEL_SRF_2 0xE06698 + +#define mmTPC0_CFG_KERNEL_SRF_3 0xE0669C + +#define mmTPC0_CFG_KERNEL_SRF_4 0xE066A0 + +#define mmTPC0_CFG_KERNEL_SRF_5 0xE066A4 + +#define mmTPC0_CFG_KERNEL_SRF_6 0xE066A8 + +#define mmTPC0_CFG_KERNEL_SRF_7 0xE066AC + +#define mmTPC0_CFG_KERNEL_SRF_8 0xE066B0 + +#define mmTPC0_CFG_KERNEL_SRF_9 0xE066B4 + +#define mmTPC0_CFG_KERNEL_SRF_10 0xE066B8 + +#define mmTPC0_CFG_KERNEL_SRF_11 0xE066BC + +#define mmTPC0_CFG_KERNEL_SRF_12 0xE066C0 + +#define mmTPC0_CFG_KERNEL_SRF_13 0xE066C4 + +#define mmTPC0_CFG_KERNEL_SRF_14 0xE066C8 + +#define mmTPC0_CFG_KERNEL_SRF_15 0xE066CC + +#define mmTPC0_CFG_KERNEL_SRF_16 0xE066D0 + +#define mmTPC0_CFG_KERNEL_SRF_17 0xE066D4 + +#define mmTPC0_CFG_KERNEL_SRF_18 0xE066D8 + +#define mmTPC0_CFG_KERNEL_SRF_19 0xE066DC + +#define mmTPC0_CFG_KERNEL_SRF_20 0xE066E0 + +#define mmTPC0_CFG_KERNEL_SRF_21 0xE066E4 + +#define mmTPC0_CFG_KERNEL_SRF_22 0xE066E8 + +#define mmTPC0_CFG_KERNEL_SRF_23 0xE066EC + +#define mmTPC0_CFG_KERNEL_SRF_24 0xE066F0 + +#define mmTPC0_CFG_KERNEL_SRF_25 0xE066F4 + +#define mmTPC0_CFG_KERNEL_SRF_26 0xE066F8 + +#define mmTPC0_CFG_KERNEL_SRF_27 0xE066FC + +#define mmTPC0_CFG_KERNEL_SRF_28 0xE06700 + +#define mmTPC0_CFG_KERNEL_SRF_29 0xE06704 + +#define mmTPC0_CFG_KERNEL_SRF_30 0xE06708 + +#define mmTPC0_CFG_KERNEL_SRF_31 0xE0670C + +#define mmTPC0_CFG_KERNEL_KERNEL_CONFIG 0xE06710 + +#define mmTPC0_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xE06714 + +#define mmTPC0_CFG_RESERVED_DESC_END 0xE06738 + +#define mmTPC0_CFG_ROUND_CSR 0xE067FC + +#define mmTPC0_CFG_TBUF_BASE_ADDR_LOW 0xE06800 + +#define mmTPC0_CFG_TBUF_BASE_ADDR_HIGH 0xE06804 + +#define mmTPC0_CFG_SEMAPHORE 0xE06808 + +#define mmTPC0_CFG_VFLAGS 0xE0680C + +#define mmTPC0_CFG_SFLAGS 0xE06810 + +#define mmTPC0_CFG_LFSR_POLYNOM 0xE06818 + +#define mmTPC0_CFG_STATUS 0xE0681C + +#define mmTPC0_CFG_CFG_BASE_ADDRESS_HIGH 0xE06820 + +#define mmTPC0_CFG_CFG_SUBTRACT_VALUE 0xE06824 + +#define mmTPC0_CFG_SM_BASE_ADDRESS_LOW 0xE06828 + +#define mmTPC0_CFG_SM_BASE_ADDRESS_HIGH 0xE0682C + +#define mmTPC0_CFG_TPC_CMD 0xE06830 + +#define mmTPC0_CFG_TPC_EXECUTE 0xE06838 + +#define mmTPC0_CFG_TPC_STALL 0xE0683C + +#define mmTPC0_CFG_ICACHE_BASE_ADDERESS_LOW 0xE06840 + +#define mmTPC0_CFG_ICACHE_BASE_ADDERESS_HIGH 0xE06844 + +#define mmTPC0_CFG_MSS_CONFIG 0xE06854 + +#define mmTPC0_CFG_TPC_INTR_CAUSE 0xE06858 + +#define mmTPC0_CFG_TPC_INTR_MASK 0xE0685C + +#define mmTPC0_CFG_TSB_CONFIG 0xE06860 + +#define mmTPC0_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xE06A00 + +#define mmTPC0_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xE06A04 + +#define mmTPC0_CFG_QM_TENSOR_0_PADDING_VALUE 0xE06A08 + +#define mmTPC0_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xE06A0C + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_0_SIZE 0xE06A10 + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xE06A14 + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xE06A18 + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_1_SIZE 0xE06A1C + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xE06A20 + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xE06A24 + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_2_SIZE 0xE06A28 + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xE06A2C + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xE06A30 + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_3_SIZE 0xE06A34 + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xE06A38 + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xE06A3C + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_4_SIZE 0xE06A40 + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xE06A44 + +#define mmTPC0_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xE06A48 + +#define mmTPC0_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xE06A4C + +#define mmTPC0_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xE06A50 + +#define mmTPC0_CFG_QM_TENSOR_1_PADDING_VALUE 0xE06A54 + +#define mmTPC0_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xE06A58 + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_0_SIZE 0xE06A5C + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xE06A60 + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xE06A64 + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_1_SIZE 0xE06A68 + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xE06A6C + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xE06A70 + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_2_SIZE 0xE06A74 + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xE06A78 + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xE06A7C + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_3_SIZE 0xE06A80 + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xE06A84 + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xE06A88 + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_4_SIZE 0xE06A8C + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xE06A90 + +#define mmTPC0_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xE06A94 + +#define mmTPC0_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xE06A98 + +#define mmTPC0_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xE06A9C + +#define mmTPC0_CFG_QM_TENSOR_2_PADDING_VALUE 0xE06AA0 + +#define mmTPC0_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xE06AA4 + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_0_SIZE 0xE06AA8 + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xE06AAC + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xE06AB0 + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_1_SIZE 0xE06AB4 + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xE06AB8 + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xE06ABC + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_2_SIZE 0xE06AC0 + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xE06AC4 + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xE06AC8 + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_3_SIZE 0xE06ACC + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xE06AD0 + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xE06AD4 + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_4_SIZE 0xE06AD8 + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xE06ADC + +#define mmTPC0_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xE06AE0 + +#define mmTPC0_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xE06AE4 + +#define mmTPC0_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xE06AE8 + +#define mmTPC0_CFG_QM_TENSOR_3_PADDING_VALUE 0xE06AEC + +#define mmTPC0_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xE06AF0 + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_0_SIZE 0xE06AF4 + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xE06AF8 + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xE06AFC + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_1_SIZE 0xE06B00 + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xE06B04 + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xE06B08 + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_2_SIZE 0xE06B0C + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xE06B10 + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xE06B14 + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_3_SIZE 0xE06B18 + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xE06B1C + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xE06B20 + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_4_SIZE 0xE06B24 + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xE06B28 + +#define mmTPC0_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xE06B2C + +#define mmTPC0_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xE06B30 + +#define mmTPC0_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xE06B34 + +#define mmTPC0_CFG_QM_TENSOR_4_PADDING_VALUE 0xE06B38 + +#define mmTPC0_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xE06B3C + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_0_SIZE 0xE06B40 + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xE06B44 + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xE06B48 + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_1_SIZE 0xE06B4C + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xE06B50 + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xE06B54 + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_2_SIZE 0xE06B58 + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xE06B5C + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xE06B60 + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_3_SIZE 0xE06B64 + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xE06B68 + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xE06B6C + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_4_SIZE 0xE06B70 + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xE06B74 + +#define mmTPC0_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xE06B78 + +#define mmTPC0_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xE06B7C + +#define mmTPC0_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xE06B80 + +#define mmTPC0_CFG_QM_TENSOR_5_PADDING_VALUE 0xE06B84 + +#define mmTPC0_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xE06B88 + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_0_SIZE 0xE06B8C + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xE06B90 + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xE06B94 + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_1_SIZE 0xE06B98 + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xE06B9C + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xE06BA0 + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_2_SIZE 0xE06BA4 + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xE06BA8 + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xE06BAC + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_3_SIZE 0xE06BB0 + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xE06BB4 + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xE06BB8 + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_4_SIZE 0xE06BBC + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xE06BC0 + +#define mmTPC0_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xE06BC4 + +#define mmTPC0_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xE06BC8 + +#define mmTPC0_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xE06BCC + +#define mmTPC0_CFG_QM_TENSOR_6_PADDING_VALUE 0xE06BD0 + +#define mmTPC0_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xE06BD4 + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_0_SIZE 0xE06BD8 + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xE06BDC + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xE06BE0 + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_1_SIZE 0xE06BE4 + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xE06BE8 + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xE06BEC + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_2_SIZE 0xE06BF0 + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xE06BF4 + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xE06BF8 + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_3_SIZE 0xE06BFC + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xE06C00 + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xE06C04 + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_4_SIZE 0xE06C08 + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xE06C0C + +#define mmTPC0_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xE06C10 + +#define mmTPC0_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xE06C14 + +#define mmTPC0_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xE06C18 + +#define mmTPC0_CFG_QM_TENSOR_7_PADDING_VALUE 0xE06C1C + +#define mmTPC0_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xE06C20 + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_0_SIZE 0xE06C24 + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xE06C28 + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xE06C2C + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_1_SIZE 0xE06C30 + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xE06C34 + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xE06C38 + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_2_SIZE 0xE06C3C + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xE06C40 + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xE06C44 + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_3_SIZE 0xE06C48 + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xE06C4C + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xE06C50 + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_4_SIZE 0xE06C54 + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xE06C58 + +#define mmTPC0_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xE06C5C + +#define mmTPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xE06C60 + +#define mmTPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xE06C64 + +#define mmTPC0_CFG_QM_TID_BASE_DIM_0 0xE06C68 + +#define mmTPC0_CFG_QM_TID_SIZE_DIM_0 0xE06C6C + +#define mmTPC0_CFG_QM_TID_BASE_DIM_1 0xE06C70 + +#define mmTPC0_CFG_QM_TID_SIZE_DIM_1 0xE06C74 + +#define mmTPC0_CFG_QM_TID_BASE_DIM_2 0xE06C78 + +#define mmTPC0_CFG_QM_TID_SIZE_DIM_2 0xE06C7C + +#define mmTPC0_CFG_QM_TID_BASE_DIM_3 0xE06C80 + +#define mmTPC0_CFG_QM_TID_SIZE_DIM_3 0xE06C84 + +#define mmTPC0_CFG_QM_TID_BASE_DIM_4 0xE06C88 + +#define mmTPC0_CFG_QM_TID_SIZE_DIM_4 0xE06C8C + +#define mmTPC0_CFG_QM_SRF_0 0xE06C90 + +#define mmTPC0_CFG_QM_SRF_1 0xE06C94 + +#define mmTPC0_CFG_QM_SRF_2 0xE06C98 + +#define mmTPC0_CFG_QM_SRF_3 0xE06C9C + +#define mmTPC0_CFG_QM_SRF_4 0xE06CA0 + +#define mmTPC0_CFG_QM_SRF_5 0xE06CA4 + +#define mmTPC0_CFG_QM_SRF_6 0xE06CA8 + +#define mmTPC0_CFG_QM_SRF_7 0xE06CAC + +#define mmTPC0_CFG_QM_SRF_8 0xE06CB0 + +#define mmTPC0_CFG_QM_SRF_9 0xE06CB4 + +#define mmTPC0_CFG_QM_SRF_10 0xE06CB8 + +#define mmTPC0_CFG_QM_SRF_11 0xE06CBC + +#define mmTPC0_CFG_QM_SRF_12 0xE06CC0 + +#define mmTPC0_CFG_QM_SRF_13 0xE06CC4 + +#define mmTPC0_CFG_QM_SRF_14 0xE06CC8 + +#define mmTPC0_CFG_QM_SRF_15 0xE06CCC + +#define mmTPC0_CFG_QM_SRF_16 0xE06CD0 + +#define mmTPC0_CFG_QM_SRF_17 0xE06CD4 + +#define mmTPC0_CFG_QM_SRF_18 0xE06CD8 + +#define mmTPC0_CFG_QM_SRF_19 0xE06CDC + +#define mmTPC0_CFG_QM_SRF_20 0xE06CE0 + +#define mmTPC0_CFG_QM_SRF_21 0xE06CE4 + +#define mmTPC0_CFG_QM_SRF_22 0xE06CE8 + +#define mmTPC0_CFG_QM_SRF_23 0xE06CEC + +#define mmTPC0_CFG_QM_SRF_24 0xE06CF0 + +#define mmTPC0_CFG_QM_SRF_25 0xE06CF4 + +#define mmTPC0_CFG_QM_SRF_26 0xE06CF8 + +#define mmTPC0_CFG_QM_SRF_27 0xE06CFC + +#define mmTPC0_CFG_QM_SRF_28 0xE06D00 + +#define mmTPC0_CFG_QM_SRF_29 0xE06D04 + +#define mmTPC0_CFG_QM_SRF_30 0xE06D08 + +#define mmTPC0_CFG_QM_SRF_31 0xE06D0C + +#define mmTPC0_CFG_QM_KERNEL_CONFIG 0xE06D10 + +#define mmTPC0_CFG_QM_SYNC_OBJECT_MESSAGE 0xE06D14 + +#define mmTPC0_CFG_ARUSER 0xE06D18 + +#define mmTPC0_CFG_AWUSER 0xE06D1C + +#define mmTPC0_CFG_FUNC_MBIST_CNTRL 0xE06E00 + +#define mmTPC0_CFG_FUNC_MBIST_PAT 0xE06E04 + +#define mmTPC0_CFG_FUNC_MBIST_MEM_0 0xE06E08 + +#define mmTPC0_CFG_FUNC_MBIST_MEM_1 0xE06E0C + +#define mmTPC0_CFG_FUNC_MBIST_MEM_2 0xE06E10 + +#define mmTPC0_CFG_FUNC_MBIST_MEM_3 0xE06E14 + +#define mmTPC0_CFG_FUNC_MBIST_MEM_4 0xE06E18 + +#define mmTPC0_CFG_FUNC_MBIST_MEM_5 0xE06E1C + +#define mmTPC0_CFG_FUNC_MBIST_MEM_6 0xE06E20 + +#define mmTPC0_CFG_FUNC_MBIST_MEM_7 0xE06E24 + +#define mmTPC0_CFG_FUNC_MBIST_MEM_8 0xE06E28 + +#define mmTPC0_CFG_FUNC_MBIST_MEM_9 0xE06E2C + +#endif /* ASIC_REG_TPC0_CFG_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h new file mode 100644 index 000000000000..9aa2d8b53207 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h @@ -0,0 +1,373 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC0_CMDQ_MASKS_H_ +#define ASIC_REG_TPC0_CMDQ_MASKS_H_ + +/* + ***************************************** + * TPC0_CMDQ (Prototype: CMDQ) + ***************************************** + */ + +/* TPC0_CMDQ_GLBL_CFG0 */ +#define TPC0_CMDQ_GLBL_CFG0_PQF_EN_SHIFT 0 +#define TPC0_CMDQ_GLBL_CFG0_PQF_EN_MASK 0x1 +#define TPC0_CMDQ_GLBL_CFG0_CQF_EN_SHIFT 1 +#define TPC0_CMDQ_GLBL_CFG0_CQF_EN_MASK 0x2 +#define TPC0_CMDQ_GLBL_CFG0_CP_EN_SHIFT 2 +#define TPC0_CMDQ_GLBL_CFG0_CP_EN_MASK 0x4 +#define TPC0_CMDQ_GLBL_CFG0_DMA_EN_SHIFT 3 +#define TPC0_CMDQ_GLBL_CFG0_DMA_EN_MASK 0x8 + +/* TPC0_CMDQ_GLBL_CFG1 */ +#define TPC0_CMDQ_GLBL_CFG1_PQF_STOP_SHIFT 0 +#define TPC0_CMDQ_GLBL_CFG1_PQF_STOP_MASK 0x1 +#define TPC0_CMDQ_GLBL_CFG1_CQF_STOP_SHIFT 1 +#define TPC0_CMDQ_GLBL_CFG1_CQF_STOP_MASK 0x2 +#define TPC0_CMDQ_GLBL_CFG1_CP_STOP_SHIFT 2 +#define TPC0_CMDQ_GLBL_CFG1_CP_STOP_MASK 0x4 +#define TPC0_CMDQ_GLBL_CFG1_DMA_STOP_SHIFT 3 +#define TPC0_CMDQ_GLBL_CFG1_DMA_STOP_MASK 0x8 +#define TPC0_CMDQ_GLBL_CFG1_PQF_FLUSH_SHIFT 8 +#define TPC0_CMDQ_GLBL_CFG1_PQF_FLUSH_MASK 0x100 +#define TPC0_CMDQ_GLBL_CFG1_CQF_FLUSH_SHIFT 9 +#define TPC0_CMDQ_GLBL_CFG1_CQF_FLUSH_MASK 0x200 +#define TPC0_CMDQ_GLBL_CFG1_CP_FLUSH_SHIFT 10 +#define TPC0_CMDQ_GLBL_CFG1_CP_FLUSH_MASK 0x400 +#define TPC0_CMDQ_GLBL_CFG1_DMA_FLUSH_SHIFT 11 +#define TPC0_CMDQ_GLBL_CFG1_DMA_FLUSH_MASK 0x800 + +/* TPC0_CMDQ_GLBL_PROT */ +#define TPC0_CMDQ_GLBL_PROT_PQF_PROT_SHIFT 0 +#define TPC0_CMDQ_GLBL_PROT_PQF_PROT_MASK 0x1 +#define TPC0_CMDQ_GLBL_PROT_CQF_PROT_SHIFT 1 +#define TPC0_CMDQ_GLBL_PROT_CQF_PROT_MASK 0x2 +#define TPC0_CMDQ_GLBL_PROT_CP_PROT_SHIFT 2 +#define TPC0_CMDQ_GLBL_PROT_CP_PROT_MASK 0x4 +#define TPC0_CMDQ_GLBL_PROT_DMA_PROT_SHIFT 3 +#define TPC0_CMDQ_GLBL_PROT_DMA_PROT_MASK 0x8 +#define TPC0_CMDQ_GLBL_PROT_PQF_ERR_PROT_SHIFT 4 +#define TPC0_CMDQ_GLBL_PROT_PQF_ERR_PROT_MASK 0x10 +#define TPC0_CMDQ_GLBL_PROT_CQF_ERR_PROT_SHIFT 5 +#define TPC0_CMDQ_GLBL_PROT_CQF_ERR_PROT_MASK 0x20 +#define TPC0_CMDQ_GLBL_PROT_CP_ERR_PROT_SHIFT 6 +#define TPC0_CMDQ_GLBL_PROT_CP_ERR_PROT_MASK 0x40 +#define TPC0_CMDQ_GLBL_PROT_DMA_ERR_PROT_SHIFT 7 +#define TPC0_CMDQ_GLBL_PROT_DMA_ERR_PROT_MASK 0x80 + +/* TPC0_CMDQ_GLBL_ERR_CFG */ +#define TPC0_CMDQ_GLBL_ERR_CFG_PQF_ERR_INT_EN_SHIFT 0 +#define TPC0_CMDQ_GLBL_ERR_CFG_PQF_ERR_INT_EN_MASK 0x1 +#define TPC0_CMDQ_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT 1 +#define TPC0_CMDQ_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK 0x2 +#define TPC0_CMDQ_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT 2 +#define TPC0_CMDQ_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK 0x4 +#define TPC0_CMDQ_GLBL_ERR_CFG_CQF_ERR_INT_EN_SHIFT 3 +#define TPC0_CMDQ_GLBL_ERR_CFG_CQF_ERR_INT_EN_MASK 0x8 +#define TPC0_CMDQ_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT 4 +#define TPC0_CMDQ_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK 0x10 +#define TPC0_CMDQ_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT 5 +#define TPC0_CMDQ_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK 0x20 +#define TPC0_CMDQ_GLBL_ERR_CFG_CP_ERR_INT_EN_SHIFT 6 +#define TPC0_CMDQ_GLBL_ERR_CFG_CP_ERR_INT_EN_MASK 0x40 +#define TPC0_CMDQ_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT 7 +#define TPC0_CMDQ_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK 0x80 +#define TPC0_CMDQ_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT 8 +#define TPC0_CMDQ_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK 0x100 +#define TPC0_CMDQ_GLBL_ERR_CFG_DMA_ERR_INT_EN_SHIFT 9 +#define TPC0_CMDQ_GLBL_ERR_CFG_DMA_ERR_INT_EN_MASK 0x200 +#define TPC0_CMDQ_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT 10 +#define TPC0_CMDQ_GLBL_ERR_CFG_DMA_ERR_MSG_EN_MASK 0x400 +#define TPC0_CMDQ_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT 11 +#define TPC0_CMDQ_GLBL_ERR_CFG_DMA_STOP_ON_ERR_MASK 0x800 + +/* TPC0_CMDQ_GLBL_ERR_ADDR_LO */ +#define TPC0_CMDQ_GLBL_ERR_ADDR_LO_VAL_SHIFT 0 +#define TPC0_CMDQ_GLBL_ERR_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_GLBL_ERR_ADDR_HI */ +#define TPC0_CMDQ_GLBL_ERR_ADDR_HI_VAL_SHIFT 0 +#define TPC0_CMDQ_GLBL_ERR_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_GLBL_ERR_WDATA */ +#define TPC0_CMDQ_GLBL_ERR_WDATA_VAL_SHIFT 0 +#define TPC0_CMDQ_GLBL_ERR_WDATA_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_GLBL_SECURE_PROPS */ +#define TPC0_CMDQ_GLBL_SECURE_PROPS_ASID_SHIFT 0 +#define TPC0_CMDQ_GLBL_SECURE_PROPS_ASID_MASK 0x3FF +#define TPC0_CMDQ_GLBL_SECURE_PROPS_MMBP_SHIFT 10 +#define TPC0_CMDQ_GLBL_SECURE_PROPS_MMBP_MASK 0x400 + +/* TPC0_CMDQ_GLBL_NON_SECURE_PROPS */ +#define TPC0_CMDQ_GLBL_NON_SECURE_PROPS_ASID_SHIFT 0 +#define TPC0_CMDQ_GLBL_NON_SECURE_PROPS_ASID_MASK 0x3FF +#define TPC0_CMDQ_GLBL_NON_SECURE_PROPS_MMBP_SHIFT 10 +#define TPC0_CMDQ_GLBL_NON_SECURE_PROPS_MMBP_MASK 0x400 + +/* TPC0_CMDQ_GLBL_STS0 */ +#define TPC0_CMDQ_GLBL_STS0_PQF_IDLE_SHIFT 0 +#define TPC0_CMDQ_GLBL_STS0_PQF_IDLE_MASK 0x1 +#define TPC0_CMDQ_GLBL_STS0_CQF_IDLE_SHIFT 1 +#define TPC0_CMDQ_GLBL_STS0_CQF_IDLE_MASK 0x2 +#define TPC0_CMDQ_GLBL_STS0_CP_IDLE_SHIFT 2 +#define TPC0_CMDQ_GLBL_STS0_CP_IDLE_MASK 0x4 +#define TPC0_CMDQ_GLBL_STS0_DMA_IDLE_SHIFT 3 +#define TPC0_CMDQ_GLBL_STS0_DMA_IDLE_MASK 0x8 +#define TPC0_CMDQ_GLBL_STS0_PQF_IS_STOP_SHIFT 4 +#define TPC0_CMDQ_GLBL_STS0_PQF_IS_STOP_MASK 0x10 +#define TPC0_CMDQ_GLBL_STS0_CQF_IS_STOP_SHIFT 5 +#define TPC0_CMDQ_GLBL_STS0_CQF_IS_STOP_MASK 0x20 +#define TPC0_CMDQ_GLBL_STS0_CP_IS_STOP_SHIFT 6 +#define TPC0_CMDQ_GLBL_STS0_CP_IS_STOP_MASK 0x40 +#define TPC0_CMDQ_GLBL_STS0_DMA_IS_STOP_SHIFT 7 +#define TPC0_CMDQ_GLBL_STS0_DMA_IS_STOP_MASK 0x80 + +/* TPC0_CMDQ_GLBL_STS1 */ +#define TPC0_CMDQ_GLBL_STS1_PQF_RD_ERR_SHIFT 0 +#define TPC0_CMDQ_GLBL_STS1_PQF_RD_ERR_MASK 0x1 +#define TPC0_CMDQ_GLBL_STS1_CQF_RD_ERR_SHIFT 1 +#define TPC0_CMDQ_GLBL_STS1_CQF_RD_ERR_MASK 0x2 +#define TPC0_CMDQ_GLBL_STS1_CP_RD_ERR_SHIFT 2 +#define TPC0_CMDQ_GLBL_STS1_CP_RD_ERR_MASK 0x4 +#define TPC0_CMDQ_GLBL_STS1_CP_UNDEF_CMD_ERR_SHIFT 3 +#define TPC0_CMDQ_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK 0x8 +#define TPC0_CMDQ_GLBL_STS1_CP_STOP_OP_SHIFT 4 +#define TPC0_CMDQ_GLBL_STS1_CP_STOP_OP_MASK 0x10 +#define TPC0_CMDQ_GLBL_STS1_CP_MSG_WR_ERR_SHIFT 5 +#define TPC0_CMDQ_GLBL_STS1_CP_MSG_WR_ERR_MASK 0x20 +#define TPC0_CMDQ_GLBL_STS1_DMA_RD_ERR_SHIFT 8 +#define TPC0_CMDQ_GLBL_STS1_DMA_RD_ERR_MASK 0x100 +#define TPC0_CMDQ_GLBL_STS1_DMA_WR_ERR_SHIFT 9 +#define TPC0_CMDQ_GLBL_STS1_DMA_WR_ERR_MASK 0x200 +#define TPC0_CMDQ_GLBL_STS1_DMA_RD_MSG_ERR_SHIFT 10 +#define TPC0_CMDQ_GLBL_STS1_DMA_RD_MSG_ERR_MASK 0x400 +#define TPC0_CMDQ_GLBL_STS1_DMA_WR_MSG_ERR_SHIFT 11 +#define TPC0_CMDQ_GLBL_STS1_DMA_WR_MSG_ERR_MASK 0x800 + +/* TPC0_CMDQ_CQ_CFG0 */ +#define TPC0_CMDQ_CQ_CFG0_RESERVED_SHIFT 0 +#define TPC0_CMDQ_CQ_CFG0_RESERVED_MASK 0x1 + +/* TPC0_CMDQ_CQ_CFG1 */ +#define TPC0_CMDQ_CQ_CFG1_CREDIT_LIM_SHIFT 0 +#define TPC0_CMDQ_CQ_CFG1_CREDIT_LIM_MASK 0xFFFF +#define TPC0_CMDQ_CQ_CFG1_MAX_INFLIGHT_SHIFT 16 +#define TPC0_CMDQ_CQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000 + +/* TPC0_CMDQ_CQ_ARUSER */ +#define TPC0_CMDQ_CQ_ARUSER_NOSNOOP_SHIFT 0 +#define TPC0_CMDQ_CQ_ARUSER_NOSNOOP_MASK 0x1 +#define TPC0_CMDQ_CQ_ARUSER_WORD_SHIFT 1 +#define TPC0_CMDQ_CQ_ARUSER_WORD_MASK 0x2 + +/* TPC0_CMDQ_CQ_PTR_LO */ +#define TPC0_CMDQ_CQ_PTR_LO_VAL_SHIFT 0 +#define TPC0_CMDQ_CQ_PTR_LO_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CQ_PTR_HI */ +#define TPC0_CMDQ_CQ_PTR_HI_VAL_SHIFT 0 +#define TPC0_CMDQ_CQ_PTR_HI_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CQ_TSIZE */ +#define TPC0_CMDQ_CQ_TSIZE_VAL_SHIFT 0 +#define TPC0_CMDQ_CQ_TSIZE_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CQ_CTL */ +#define TPC0_CMDQ_CQ_CTL_RPT_SHIFT 0 +#define TPC0_CMDQ_CQ_CTL_RPT_MASK 0xFFFF +#define TPC0_CMDQ_CQ_CTL_CTL_SHIFT 16 +#define TPC0_CMDQ_CQ_CTL_CTL_MASK 0xFFFF0000 + +/* TPC0_CMDQ_CQ_PTR_LO_STS */ +#define TPC0_CMDQ_CQ_PTR_LO_STS_VAL_SHIFT 0 +#define TPC0_CMDQ_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CQ_PTR_HI_STS */ +#define TPC0_CMDQ_CQ_PTR_HI_STS_VAL_SHIFT 0 +#define TPC0_CMDQ_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CQ_TSIZE_STS */ +#define TPC0_CMDQ_CQ_TSIZE_STS_VAL_SHIFT 0 +#define TPC0_CMDQ_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CQ_CTL_STS */ +#define TPC0_CMDQ_CQ_CTL_STS_RPT_SHIFT 0 +#define TPC0_CMDQ_CQ_CTL_STS_RPT_MASK 0xFFFF +#define TPC0_CMDQ_CQ_CTL_STS_CTL_SHIFT 16 +#define TPC0_CMDQ_CQ_CTL_STS_CTL_MASK 0xFFFF0000 + +/* TPC0_CMDQ_CQ_STS0 */ +#define TPC0_CMDQ_CQ_STS0_CQ_CREDIT_CNT_SHIFT 0 +#define TPC0_CMDQ_CQ_STS0_CQ_CREDIT_CNT_MASK 0xFFFF +#define TPC0_CMDQ_CQ_STS0_CQ_FREE_CNT_SHIFT 16 +#define TPC0_CMDQ_CQ_STS0_CQ_FREE_CNT_MASK 0xFFFF0000 + +/* TPC0_CMDQ_CQ_STS1 */ +#define TPC0_CMDQ_CQ_STS1_CQ_INFLIGHT_CNT_SHIFT 0 +#define TPC0_CMDQ_CQ_STS1_CQ_INFLIGHT_CNT_MASK 0xFFFF +#define TPC0_CMDQ_CQ_STS1_CQ_BUF_EMPTY_SHIFT 30 +#define TPC0_CMDQ_CQ_STS1_CQ_BUF_EMPTY_MASK 0x40000000 +#define TPC0_CMDQ_CQ_STS1_CQ_BUSY_SHIFT 31 +#define TPC0_CMDQ_CQ_STS1_CQ_BUSY_MASK 0x80000000 + +/* TPC0_CMDQ_CQ_RD_RATE_LIM_EN */ +#define TPC0_CMDQ_CQ_RD_RATE_LIM_EN_VAL_SHIFT 0 +#define TPC0_CMDQ_CQ_RD_RATE_LIM_EN_VAL_MASK 0x1 + +/* TPC0_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN */ +#define TPC0_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0 +#define TPC0_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF + +/* TPC0_CMDQ_CQ_RD_RATE_LIM_SAT */ +#define TPC0_CMDQ_CQ_RD_RATE_LIM_SAT_VAL_SHIFT 0 +#define TPC0_CMDQ_CQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF + +/* TPC0_CMDQ_CQ_RD_RATE_LIM_TOUT */ +#define TPC0_CMDQ_CQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0 +#define TPC0_CMDQ_CQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF + +/* TPC0_CMDQ_CQ_IFIFO_CNT */ +#define TPC0_CMDQ_CQ_IFIFO_CNT_VAL_SHIFT 0 +#define TPC0_CMDQ_CQ_IFIFO_CNT_VAL_MASK 0x3 + +/* TPC0_CMDQ_CP_MSG_BASE0_ADDR_LO */ +#define TPC0_CMDQ_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_MSG_BASE0_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CP_MSG_BASE0_ADDR_HI */ +#define TPC0_CMDQ_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_MSG_BASE0_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CP_MSG_BASE1_ADDR_LO */ +#define TPC0_CMDQ_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_MSG_BASE1_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CP_MSG_BASE1_ADDR_HI */ +#define TPC0_CMDQ_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_MSG_BASE1_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CP_MSG_BASE2_ADDR_LO */ +#define TPC0_CMDQ_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_MSG_BASE2_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CP_MSG_BASE2_ADDR_HI */ +#define TPC0_CMDQ_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_MSG_BASE2_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CP_MSG_BASE3_ADDR_LO */ +#define TPC0_CMDQ_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_MSG_BASE3_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CP_MSG_BASE3_ADDR_HI */ +#define TPC0_CMDQ_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_MSG_BASE3_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CP_LDMA_TSIZE_OFFSET */ +#define TPC0_CMDQ_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_LDMA_TSIZE_OFFSET_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET */ +#define TPC0_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET */ +#define TPC0_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET */ +#define TPC0_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET */ +#define TPC0_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CP_LDMA_COMMIT_OFFSET */ +#define TPC0_CMDQ_CP_LDMA_COMMIT_OFFSET_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_LDMA_COMMIT_OFFSET_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CP_FENCE0_RDATA */ +#define TPC0_CMDQ_CP_FENCE0_RDATA_INC_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_FENCE0_RDATA_INC_VAL_MASK 0xF + +/* TPC0_CMDQ_CP_FENCE1_RDATA */ +#define TPC0_CMDQ_CP_FENCE1_RDATA_INC_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_FENCE1_RDATA_INC_VAL_MASK 0xF + +/* TPC0_CMDQ_CP_FENCE2_RDATA */ +#define TPC0_CMDQ_CP_FENCE2_RDATA_INC_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_FENCE2_RDATA_INC_VAL_MASK 0xF + +/* TPC0_CMDQ_CP_FENCE3_RDATA */ +#define TPC0_CMDQ_CP_FENCE3_RDATA_INC_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_FENCE3_RDATA_INC_VAL_MASK 0xF + +/* TPC0_CMDQ_CP_FENCE0_CNT */ +#define TPC0_CMDQ_CP_FENCE0_CNT_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_FENCE0_CNT_VAL_MASK 0xFF + +/* TPC0_CMDQ_CP_FENCE1_CNT */ +#define TPC0_CMDQ_CP_FENCE1_CNT_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_FENCE1_CNT_VAL_MASK 0xFF + +/* TPC0_CMDQ_CP_FENCE2_CNT */ +#define TPC0_CMDQ_CP_FENCE2_CNT_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_FENCE2_CNT_VAL_MASK 0xFF + +/* TPC0_CMDQ_CP_FENCE3_CNT */ +#define TPC0_CMDQ_CP_FENCE3_CNT_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_FENCE3_CNT_VAL_MASK 0xFF + +/* TPC0_CMDQ_CP_STS */ +#define TPC0_CMDQ_CP_STS_MSG_INFLIGHT_CNT_SHIFT 0 +#define TPC0_CMDQ_CP_STS_MSG_INFLIGHT_CNT_MASK 0xFFFF +#define TPC0_CMDQ_CP_STS_ERDY_SHIFT 16 +#define TPC0_CMDQ_CP_STS_ERDY_MASK 0x10000 +#define TPC0_CMDQ_CP_STS_RRDY_SHIFT 17 +#define TPC0_CMDQ_CP_STS_RRDY_MASK 0x20000 +#define TPC0_CMDQ_CP_STS_MRDY_SHIFT 18 +#define TPC0_CMDQ_CP_STS_MRDY_MASK 0x40000 +#define TPC0_CMDQ_CP_STS_SW_STOP_SHIFT 19 +#define TPC0_CMDQ_CP_STS_SW_STOP_MASK 0x80000 +#define TPC0_CMDQ_CP_STS_FENCE_ID_SHIFT 20 +#define TPC0_CMDQ_CP_STS_FENCE_ID_MASK 0x300000 +#define TPC0_CMDQ_CP_STS_FENCE_IN_PROGRESS_SHIFT 22 +#define TPC0_CMDQ_CP_STS_FENCE_IN_PROGRESS_MASK 0x400000 + +/* TPC0_CMDQ_CP_CURRENT_INST_LO */ +#define TPC0_CMDQ_CP_CURRENT_INST_LO_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_CURRENT_INST_LO_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CP_CURRENT_INST_HI */ +#define TPC0_CMDQ_CP_CURRENT_INST_HI_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_CURRENT_INST_HI_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CP_BARRIER_CFG */ +#define TPC0_CMDQ_CP_BARRIER_CFG_EBGUARD_SHIFT 0 +#define TPC0_CMDQ_CP_BARRIER_CFG_EBGUARD_MASK 0xFFF + +/* TPC0_CMDQ_CP_DBG_0 */ +#define TPC0_CMDQ_CP_DBG_0_VAL_SHIFT 0 +#define TPC0_CMDQ_CP_DBG_0_VAL_MASK 0xFF + +/* TPC0_CMDQ_CQ_BUF_ADDR */ +#define TPC0_CMDQ_CQ_BUF_ADDR_VAL_SHIFT 0 +#define TPC0_CMDQ_CQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF + +/* TPC0_CMDQ_CQ_BUF_RDATA */ +#define TPC0_CMDQ_CQ_BUF_RDATA_VAL_SHIFT 0 +#define TPC0_CMDQ_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF + +#endif /* ASIC_REG_TPC0_CMDQ_MASKS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h new file mode 100644 index 000000000000..3572752ba66e --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC0_CMDQ_REGS_H_ +#define ASIC_REG_TPC0_CMDQ_REGS_H_ + +/* + ***************************************** + * TPC0_CMDQ (Prototype: CMDQ) + ***************************************** + */ + +#define mmTPC0_CMDQ_GLBL_CFG0 0xE09000 + +#define mmTPC0_CMDQ_GLBL_CFG1 0xE09004 + +#define mmTPC0_CMDQ_GLBL_PROT 0xE09008 + +#define mmTPC0_CMDQ_GLBL_ERR_CFG 0xE0900C + +#define mmTPC0_CMDQ_GLBL_ERR_ADDR_LO 0xE09010 + +#define mmTPC0_CMDQ_GLBL_ERR_ADDR_HI 0xE09014 + +#define mmTPC0_CMDQ_GLBL_ERR_WDATA 0xE09018 + +#define mmTPC0_CMDQ_GLBL_SECURE_PROPS 0xE0901C + +#define mmTPC0_CMDQ_GLBL_NON_SECURE_PROPS 0xE09020 + +#define mmTPC0_CMDQ_GLBL_STS0 0xE09024 + +#define mmTPC0_CMDQ_GLBL_STS1 0xE09028 + +#define mmTPC0_CMDQ_CQ_CFG0 0xE090B0 + +#define mmTPC0_CMDQ_CQ_CFG1 0xE090B4 + +#define mmTPC0_CMDQ_CQ_ARUSER 0xE090B8 + +#define mmTPC0_CMDQ_CQ_PTR_LO 0xE090C0 + +#define mmTPC0_CMDQ_CQ_PTR_HI 0xE090C4 + +#define mmTPC0_CMDQ_CQ_TSIZE 0xE090C8 + +#define mmTPC0_CMDQ_CQ_CTL 0xE090CC + +#define mmTPC0_CMDQ_CQ_PTR_LO_STS 0xE090D4 + +#define mmTPC0_CMDQ_CQ_PTR_HI_STS 0xE090D8 + +#define mmTPC0_CMDQ_CQ_TSIZE_STS 0xE090DC + +#define mmTPC0_CMDQ_CQ_CTL_STS 0xE090E0 + +#define mmTPC0_CMDQ_CQ_STS0 0xE090E4 + +#define mmTPC0_CMDQ_CQ_STS1 0xE090E8 + +#define mmTPC0_CMDQ_CQ_RD_RATE_LIM_EN 0xE090F0 + +#define mmTPC0_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xE090F4 + +#define mmTPC0_CMDQ_CQ_RD_RATE_LIM_SAT 0xE090F8 + +#define mmTPC0_CMDQ_CQ_RD_RATE_LIM_TOUT 0xE090FC + +#define mmTPC0_CMDQ_CQ_IFIFO_CNT 0xE09108 + +#define mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_LO 0xE09120 + +#define mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_HI 0xE09124 + +#define mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_LO 0xE09128 + +#define mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_HI 0xE0912C + +#define mmTPC0_CMDQ_CP_MSG_BASE2_ADDR_LO 0xE09130 + +#define mmTPC0_CMDQ_CP_MSG_BASE2_ADDR_HI 0xE09134 + +#define mmTPC0_CMDQ_CP_MSG_BASE3_ADDR_LO 0xE09138 + +#define mmTPC0_CMDQ_CP_MSG_BASE3_ADDR_HI 0xE0913C + +#define mmTPC0_CMDQ_CP_LDMA_TSIZE_OFFSET 0xE09140 + +#define mmTPC0_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xE09144 + +#define mmTPC0_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xE09148 + +#define mmTPC0_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xE0914C + +#define mmTPC0_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xE09150 + +#define mmTPC0_CMDQ_CP_LDMA_COMMIT_OFFSET 0xE09154 + +#define mmTPC0_CMDQ_CP_FENCE0_RDATA 0xE09158 + +#define mmTPC0_CMDQ_CP_FENCE1_RDATA 0xE0915C + +#define mmTPC0_CMDQ_CP_FENCE2_RDATA 0xE09160 + +#define mmTPC0_CMDQ_CP_FENCE3_RDATA 0xE09164 + +#define mmTPC0_CMDQ_CP_FENCE0_CNT 0xE09168 + +#define mmTPC0_CMDQ_CP_FENCE1_CNT 0xE0916C + +#define mmTPC0_CMDQ_CP_FENCE2_CNT 0xE09170 + +#define mmTPC0_CMDQ_CP_FENCE3_CNT 0xE09174 + +#define mmTPC0_CMDQ_CP_STS 0xE09178 + +#define mmTPC0_CMDQ_CP_CURRENT_INST_LO 0xE0917C + +#define mmTPC0_CMDQ_CP_CURRENT_INST_HI 0xE09180 + +#define mmTPC0_CMDQ_CP_BARRIER_CFG 0xE09184 + +#define mmTPC0_CMDQ_CP_DBG_0 0xE09188 + +#define mmTPC0_CMDQ_CQ_BUF_ADDR 0xE09308 + +#define mmTPC0_CMDQ_CQ_BUF_RDATA 0xE0930C + +#endif /* ASIC_REG_TPC0_CMDQ_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h new file mode 100644 index 000000000000..ed866d93c440 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h @@ -0,0 +1,347 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC0_EML_CFG_MASKS_H_ +#define ASIC_REG_TPC0_EML_CFG_MASKS_H_ + +/* + ***************************************** + * TPC0_EML_CFG (Prototype: TPC_EML_CFG) + ***************************************** + */ + +/* TPC0_EML_CFG_DBG_CNT */ +#define TPC0_EML_CFG_DBG_CNT_DBG_ENTER_SHIFT 0 +#define TPC0_EML_CFG_DBG_CNT_DBG_ENTER_MASK 0x1 +#define TPC0_EML_CFG_DBG_CNT_DBG_EN_SHIFT 1 +#define TPC0_EML_CFG_DBG_CNT_DBG_EN_MASK 0x2 +#define TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT 2 +#define TPC0_EML_CFG_DBG_CNT_CORE_RST_MASK 0x4 +#define TPC0_EML_CFG_DBG_CNT_DCACHE_INV_SHIFT 4 +#define TPC0_EML_CFG_DBG_CNT_DCACHE_INV_MASK 0x10 +#define TPC0_EML_CFG_DBG_CNT_ICACHE_INV_SHIFT 5 +#define TPC0_EML_CFG_DBG_CNT_ICACHE_INV_MASK 0x20 +#define TPC0_EML_CFG_DBG_CNT_DBG_EXIT_SHIFT 6 +#define TPC0_EML_CFG_DBG_CNT_DBG_EXIT_MASK 0x40 +#define TPC0_EML_CFG_DBG_CNT_SNG_STEP_SHIFT 7 +#define TPC0_EML_CFG_DBG_CNT_SNG_STEP_MASK 0x80 +#define TPC0_EML_CFG_DBG_CNT_BP_DBGSW_EN_SHIFT 16 +#define TPC0_EML_CFG_DBG_CNT_BP_DBGSW_EN_MASK 0x10000 + +/* TPC0_EML_CFG_DBG_STS */ +#define TPC0_EML_CFG_DBG_STS_DBG_MODE_SHIFT 0 +#define TPC0_EML_CFG_DBG_STS_DBG_MODE_MASK 0x1 +#define TPC0_EML_CFG_DBG_STS_CORE_READY_SHIFT 1 +#define TPC0_EML_CFG_DBG_STS_CORE_READY_MASK 0x2 +#define TPC0_EML_CFG_DBG_STS_DURING_KERNEL_SHIFT 2 +#define TPC0_EML_CFG_DBG_STS_DURING_KERNEL_MASK 0x4 +#define TPC0_EML_CFG_DBG_STS_ICACHE_IDLE_SHIFT 3 +#define TPC0_EML_CFG_DBG_STS_ICACHE_IDLE_MASK 0x8 +#define TPC0_EML_CFG_DBG_STS_DCACHE_IDLE_SHIFT 4 +#define TPC0_EML_CFG_DBG_STS_DCACHE_IDLE_MASK 0x10 +#define TPC0_EML_CFG_DBG_STS_QM_IDLE_SHIFT 5 +#define TPC0_EML_CFG_DBG_STS_QM_IDLE_MASK 0x20 +#define TPC0_EML_CFG_DBG_STS_WQ_IDLE_SHIFT 6 +#define TPC0_EML_CFG_DBG_STS_WQ_IDLE_MASK 0x40 +#define TPC0_EML_CFG_DBG_STS_MSS_IDLE_SHIFT 7 +#define TPC0_EML_CFG_DBG_STS_MSS_IDLE_MASK 0x80 +#define TPC0_EML_CFG_DBG_STS_DBG_CAUSE_SHIFT 8 +#define TPC0_EML_CFG_DBG_STS_DBG_CAUSE_MASK 0xFFFFFF00 + +/* TPC0_EML_CFG_DBG_PADD */ +#define TPC0_EML_CFG_DBG_PADD_ADDRESS_SHIFT 0 +#define TPC0_EML_CFG_DBG_PADD_ADDRESS_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_DBG_PADD_COUNT */ +#define TPC0_EML_CFG_DBG_PADD_COUNT_COUNT_SHIFT 0 +#define TPC0_EML_CFG_DBG_PADD_COUNT_COUNT_MASK 0xFF + +/* TPC0_EML_CFG_DBG_PADD_COUNT_MATCH */ +#define TPC0_EML_CFG_DBG_PADD_COUNT_MATCH_COUNT_SHIFT 0 +#define TPC0_EML_CFG_DBG_PADD_COUNT_MATCH_COUNT_MASK 0xFF + +/* TPC0_EML_CFG_DBG_PADD_EN */ +#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE0_SHIFT 0 +#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE0_MASK 0x1 +#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE1_SHIFT 1 +#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE1_MASK 0x2 +#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE2_SHIFT 2 +#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE2_MASK 0x4 +#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE3_SHIFT 3 +#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE3_MASK 0x8 +#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE4_SHIFT 4 +#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE4_MASK 0x10 +#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE5_SHIFT 5 +#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE5_MASK 0x20 +#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE6_SHIFT 6 +#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE6_MASK 0x40 +#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE7_SHIFT 7 +#define TPC0_EML_CFG_DBG_PADD_EN_ENABLE7_MASK 0x80 + +/* TPC0_EML_CFG_DBG_VPADD_HIGH */ +#define TPC0_EML_CFG_DBG_VPADD_HIGH_ADDRESS_SHIFT 0 +#define TPC0_EML_CFG_DBG_VPADD_HIGH_ADDRESS_MASK 0x1FF + +/* TPC0_EML_CFG_DBG_VPADD_LOW */ +#define TPC0_EML_CFG_DBG_VPADD_LOW_ADDRESS_SHIFT 0 +#define TPC0_EML_CFG_DBG_VPADD_LOW_ADDRESS_MASK 0x1FF + +/* TPC0_EML_CFG_DBG_VPADD_COUNT */ +#define TPC0_EML_CFG_DBG_VPADD_COUNT_COUNT_SHIFT 0 +#define TPC0_EML_CFG_DBG_VPADD_COUNT_COUNT_MASK 0xFF + +/* TPC0_EML_CFG_DBG_VPADD_COUNT_MATCH */ +#define TPC0_EML_CFG_DBG_VPADD_COUNT_MATCH_COUNT_SHIFT 0 +#define TPC0_EML_CFG_DBG_VPADD_COUNT_MATCH_COUNT_MASK 0xFF + +/* TPC0_EML_CFG_DBG_VPADD_EN */ +#define TPC0_EML_CFG_DBG_VPADD_EN_ENABLE0_SHIFT 0 +#define TPC0_EML_CFG_DBG_VPADD_EN_ENABLE0_MASK 0x1 +#define TPC0_EML_CFG_DBG_VPADD_EN_ENABLE1_SHIFT 1 +#define TPC0_EML_CFG_DBG_VPADD_EN_ENABLE1_MASK 0x2 +#define TPC0_EML_CFG_DBG_VPADD_EN_RW_N0_SHIFT 2 +#define TPC0_EML_CFG_DBG_VPADD_EN_RW_N0_MASK 0x4 +#define TPC0_EML_CFG_DBG_VPADD_EN_RW_N1_SHIFT 3 +#define TPC0_EML_CFG_DBG_VPADD_EN_RW_N1_MASK 0x8 + +/* TPC0_EML_CFG_DBG_SPADD_HIGH */ +#define TPC0_EML_CFG_DBG_SPADD_HIGH_ADDRESS_SHIFT 0 +#define TPC0_EML_CFG_DBG_SPADD_HIGH_ADDRESS_MASK 0xFF + +/* TPC0_EML_CFG_DBG_SPADD_LOW */ +#define TPC0_EML_CFG_DBG_SPADD_LOW_ADDRESS_SHIFT 0 +#define TPC0_EML_CFG_DBG_SPADD_LOW_ADDRESS_MASK 0xFF + +/* TPC0_EML_CFG_DBG_SPADD_COUNT */ +#define TPC0_EML_CFG_DBG_SPADD_COUNT_COUNT_SHIFT 0 +#define TPC0_EML_CFG_DBG_SPADD_COUNT_COUNT_MASK 0xFF + +/* TPC0_EML_CFG_DBG_SPADD_COUNT_MATCH */ +#define TPC0_EML_CFG_DBG_SPADD_COUNT_MATCH_COUNT_SHIFT 0 +#define TPC0_EML_CFG_DBG_SPADD_COUNT_MATCH_COUNT_MASK 0xFF + +/* TPC0_EML_CFG_DBG_SPADD_EN */ +#define TPC0_EML_CFG_DBG_SPADD_EN_ENABLE0_SHIFT 0 +#define TPC0_EML_CFG_DBG_SPADD_EN_ENABLE0_MASK 0x1 +#define TPC0_EML_CFG_DBG_SPADD_EN_ENABLE1_SHIFT 1 +#define TPC0_EML_CFG_DBG_SPADD_EN_ENABLE1_MASK 0x2 +#define TPC0_EML_CFG_DBG_SPADD_EN_RW_N0_SHIFT 2 +#define TPC0_EML_CFG_DBG_SPADD_EN_RW_N0_MASK 0x4 +#define TPC0_EML_CFG_DBG_SPADD_EN_RW_N1_SHIFT 3 +#define TPC0_EML_CFG_DBG_SPADD_EN_RW_N1_MASK 0x8 + +/* TPC0_EML_CFG_DBG_AGUADD_MSB_HIGH */ +#define TPC0_EML_CFG_DBG_AGUADD_MSB_HIGH_ADDRESS_SHIFT 0 +#define TPC0_EML_CFG_DBG_AGUADD_MSB_HIGH_ADDRESS_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_DBG_AGUADD_MSB_LOW */ +#define TPC0_EML_CFG_DBG_AGUADD_MSB_LOW_ADDRESS_SHIFT 0 +#define TPC0_EML_CFG_DBG_AGUADD_MSB_LOW_ADDRESS_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_DBG_AGUADD_LSB_HIGH */ +#define TPC0_EML_CFG_DBG_AGUADD_LSB_HIGH_ADDRESS_SHIFT 0 +#define TPC0_EML_CFG_DBG_AGUADD_LSB_HIGH_ADDRESS_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_DBG_AGUADD_LSB_LOW */ +#define TPC0_EML_CFG_DBG_AGUADD_LSB_LOW_ADDRESS_SHIFT 0 +#define TPC0_EML_CFG_DBG_AGUADD_LSB_LOW_ADDRESS_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_DBG_AGUADD_COUNT */ +#define TPC0_EML_CFG_DBG_AGUADD_COUNT_COUNT_SHIFT 0 +#define TPC0_EML_CFG_DBG_AGUADD_COUNT_COUNT_MASK 0xFF + +/* TPC0_EML_CFG_DBG_AGUADD_COUNT_MATCH */ +#define TPC0_EML_CFG_DBG_AGUADD_COUNT_MATCH_COUNT_SHIFT 0 +#define TPC0_EML_CFG_DBG_AGUADD_COUNT_MATCH_COUNT_MASK 0xFF + +/* TPC0_EML_CFG_DBG_AGUADD_EN */ +#define TPC0_EML_CFG_DBG_AGUADD_EN_ENABLE0_SHIFT 0 +#define TPC0_EML_CFG_DBG_AGUADD_EN_ENABLE0_MASK 0x1 +#define TPC0_EML_CFG_DBG_AGUADD_EN_ENABLE1_SHIFT 1 +#define TPC0_EML_CFG_DBG_AGUADD_EN_ENABLE1_MASK 0x2 +#define TPC0_EML_CFG_DBG_AGUADD_EN_RW_N0_SHIFT 2 +#define TPC0_EML_CFG_DBG_AGUADD_EN_RW_N0_MASK 0x4 +#define TPC0_EML_CFG_DBG_AGUADD_EN_RW_N1_SHIFT 3 +#define TPC0_EML_CFG_DBG_AGUADD_EN_RW_N1_MASK 0x8 + +/* TPC0_EML_CFG_DBG_AXIHBWADD_MSB_HIGH */ +#define TPC0_EML_CFG_DBG_AXIHBWADD_MSB_HIGH_ADDRESS_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXIHBWADD_MSB_HIGH_ADDRESS_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_DBG_AXIHBWADD_MSB_LOW */ +#define TPC0_EML_CFG_DBG_AXIHBWADD_MSB_LOW_ADDRESS_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXIHBWADD_MSB_LOW_ADDRESS_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_DBG_AXIHBWADD_LSB_HIGH */ +#define TPC0_EML_CFG_DBG_AXIHBWADD_LSB_HIGH_ADDRESS_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXIHBWADD_LSB_HIGH_ADDRESS_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_DBG_AXIHBWADD_LSB_LOW */ +#define TPC0_EML_CFG_DBG_AXIHBWADD_LSB_LOW_ADDRESS_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXIHBWADD_LSB_LOW_ADDRESS_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_DBG_AXIHBWADD_COUNT */ +#define TPC0_EML_CFG_DBG_AXIHBWADD_COUNT_COUNT_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXIHBWADD_COUNT_COUNT_MASK 0xFF + +/* TPC0_EML_CFG_DBG_AXIHBWADD_COUNT_MATCH */ +#define TPC0_EML_CFG_DBG_AXIHBWADD_COUNT_MATCH_MATCH_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXIHBWADD_COUNT_MATCH_MATCH_MASK 0xFF + +/* TPC0_EML_CFG_DBG_AXIHBWADD_EN */ +#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_ENABLE0_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_ENABLE0_MASK 0x1 +#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_ENABLE1_SHIFT 1 +#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_ENABLE1_MASK 0x2 +#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_RW_N0_SHIFT 2 +#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_RW_N0_MASK 0x4 +#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_RW_N1_SHIFT 3 +#define TPC0_EML_CFG_DBG_AXIHBWADD_EN_RW_N1_MASK 0x8 + +/* TPC0_EML_CFG_DBG_AXILBWADD_MSB_HIGH */ +#define TPC0_EML_CFG_DBG_AXILBWADD_MSB_HIGH_ADDRESS_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXILBWADD_MSB_HIGH_ADDRESS_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_DBG_AXILBWADD_MSB_LOW */ +#define TPC0_EML_CFG_DBG_AXILBWADD_MSB_LOW_ADDRESS_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXILBWADD_MSB_LOW_ADDRESS_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_DBG_AXILBWADD_LSB_HIGH */ +#define TPC0_EML_CFG_DBG_AXILBWADD_LSB_HIGH_ADDRESS_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXILBWADD_LSB_HIGH_ADDRESS_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_DBG_AXILBWADD_LSB_LOW */ +#define TPC0_EML_CFG_DBG_AXILBWADD_LSB_LOW_ADDRESS_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXILBWADD_LSB_LOW_ADDRESS_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_DBG_AXILBWADD_COUNT */ +#define TPC0_EML_CFG_DBG_AXILBWADD_COUNT_COUNT_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXILBWADD_COUNT_COUNT_MASK 0xFF + +/* TPC0_EML_CFG_DBG_AXILBWADD_COUNT_MATCH */ +#define TPC0_EML_CFG_DBG_AXILBWADD_COUNT_MATCH_MATCH_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXILBWADD_COUNT_MATCH_MATCH_MASK 0xFF + +/* TPC0_EML_CFG_DBG_AXILBWADD_EN */ +#define TPC0_EML_CFG_DBG_AXILBWADD_EN_ENABLE0_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXILBWADD_EN_ENABLE0_MASK 0x1 +#define TPC0_EML_CFG_DBG_AXILBWADD_EN_ENABLE1_SHIFT 1 +#define TPC0_EML_CFG_DBG_AXILBWADD_EN_ENABLE1_MASK 0x2 +#define TPC0_EML_CFG_DBG_AXILBWADD_EN_RW_N0_SHIFT 2 +#define TPC0_EML_CFG_DBG_AXILBWADD_EN_RW_N0_MASK 0x4 +#define TPC0_EML_CFG_DBG_AXILBWADD_EN_RW_N1_SHIFT 3 +#define TPC0_EML_CFG_DBG_AXILBWADD_EN_RW_N1_MASK 0x8 + +/* TPC0_EML_CFG_DBG_SPDATA */ +#define TPC0_EML_CFG_DBG_SPDATA_DATA_SHIFT 0 +#define TPC0_EML_CFG_DBG_SPDATA_DATA_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_DBG_SPDATA_COUNT */ +#define TPC0_EML_CFG_DBG_SPDATA_COUNT_COUNT_SHIFT 0 +#define TPC0_EML_CFG_DBG_SPDATA_COUNT_COUNT_MASK 0xFF + +/* TPC0_EML_CFG_DBG_SPDATA_COUNT_MATCH */ +#define TPC0_EML_CFG_DBG_SPDATA_COUNT_MATCH_MATCH_SHIFT 0 +#define TPC0_EML_CFG_DBG_SPDATA_COUNT_MATCH_MATCH_MASK 0xFF + +/* TPC0_EML_CFG_DBG_SPDATA_EN */ +#define TPC0_EML_CFG_DBG_SPDATA_EN_ENABLE0_SHIFT 0 +#define TPC0_EML_CFG_DBG_SPDATA_EN_ENABLE0_MASK 0x1 +#define TPC0_EML_CFG_DBG_SPDATA_EN_ENABLE1_SHIFT 1 +#define TPC0_EML_CFG_DBG_SPDATA_EN_ENABLE1_MASK 0x2 +#define TPC0_EML_CFG_DBG_SPDATA_EN_RW_N0_SHIFT 2 +#define TPC0_EML_CFG_DBG_SPDATA_EN_RW_N0_MASK 0x4 +#define TPC0_EML_CFG_DBG_SPDATA_EN_RW_N1_SHIFT 3 +#define TPC0_EML_CFG_DBG_SPDATA_EN_RW_N1_MASK 0x8 + +/* TPC0_EML_CFG_DBG_AXIHBWDATA */ +#define TPC0_EML_CFG_DBG_AXIHBWDATA_DATA_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXIHBWDATA_DATA_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_DBG_AXIHBWDATA_COUNT */ +#define TPC0_EML_CFG_DBG_AXIHBWDATA_COUNT_COUNT_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXIHBWDATA_COUNT_COUNT_MASK 0xFF + +/* TPC0_EML_CFG_DBG_AXIHBWDAT_COUNT_MATCH */ +#define TPC0_EML_CFG_DBG_AXIHBWDAT_COUNT_MATCH_COUNT_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXIHBWDAT_COUNT_MATCH_COUNT_MASK 0xFF + +/* TPC0_EML_CFG_DBG_AXIHBWDATA_EN */ +#define TPC0_EML_CFG_DBG_AXIHBWDATA_EN_ENABLE_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXIHBWDATA_EN_ENABLE_MASK 0x1 +#define TPC0_EML_CFG_DBG_AXIHBWDATA_EN_RW_N_SHIFT 1 +#define TPC0_EML_CFG_DBG_AXIHBWDATA_EN_RW_N_MASK 0x2 + +/* TPC0_EML_CFG_DBG_AXILBWDATA */ +#define TPC0_EML_CFG_DBG_AXILBWDATA_DATA_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXILBWDATA_DATA_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_DBG_AXILBWDATA_COUNT */ +#define TPC0_EML_CFG_DBG_AXILBWDATA_COUNT_COUNT_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXILBWDATA_COUNT_COUNT_MASK 0xFF + +/* TPC0_EML_CFG_DBG_AXILBWDAT_COUNT_MATCH */ +#define TPC0_EML_CFG_DBG_AXILBWDAT_COUNT_MATCH_MATCH_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXILBWDAT_COUNT_MATCH_MATCH_MASK 0xFF + +/* TPC0_EML_CFG_DBG_AXILBWDATA_EN */ +#define TPC0_EML_CFG_DBG_AXILBWDATA_EN_ENABLE_SHIFT 0 +#define TPC0_EML_CFG_DBG_AXILBWDATA_EN_ENABLE_MASK 0x1 +#define TPC0_EML_CFG_DBG_AXILBWDATA_EN_RW_N_SHIFT 1 +#define TPC0_EML_CFG_DBG_AXILBWDATA_EN_RW_N_MASK 0x2 + +/* TPC0_EML_CFG_DBG_D0_PC */ +#define TPC0_EML_CFG_DBG_D0_PC_PC_SHIFT 0 +#define TPC0_EML_CFG_DBG_D0_PC_PC_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_RTTCONFIG */ +#define TPC0_EML_CFG_RTTCONFIG_TR_EN_SHIFT 0 +#define TPC0_EML_CFG_RTTCONFIG_TR_EN_MASK 0x1 +#define TPC0_EML_CFG_RTTCONFIG_PRIO_SHIFT 1 +#define TPC0_EML_CFG_RTTCONFIG_PRIO_MASK 0x2 + +/* TPC0_EML_CFG_RTTPREDICATE */ +#define TPC0_EML_CFG_RTTPREDICATE_TR_EN_SHIFT 0 +#define TPC0_EML_CFG_RTTPREDICATE_TR_EN_MASK 0x1 +#define TPC0_EML_CFG_RTTPREDICATE_GEN_SHIFT 1 +#define TPC0_EML_CFG_RTTPREDICATE_GEN_MASK 0x2 +#define TPC0_EML_CFG_RTTPREDICATE_USE_INTERVAL_SHIFT 2 +#define TPC0_EML_CFG_RTTPREDICATE_USE_INTERVAL_MASK 0x4 +#define TPC0_EML_CFG_RTTPREDICATE_SPRF_MASK_SHIFT 16 +#define TPC0_EML_CFG_RTTPREDICATE_SPRF_MASK_MASK 0xFFFF0000 + +/* TPC0_EML_CFG_RTTPREDICATE_INTV */ +#define TPC0_EML_CFG_RTTPREDICATE_INTV_INTERVAL_SHIFT 0 +#define TPC0_EML_CFG_RTTPREDICATE_INTV_INTERVAL_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_RTTTS */ +#define TPC0_EML_CFG_RTTTS_TR_EN_SHIFT 0 +#define TPC0_EML_CFG_RTTTS_TR_EN_MASK 0x1 +#define TPC0_EML_CFG_RTTTS_GEN_SHIFT 1 +#define TPC0_EML_CFG_RTTTS_GEN_MASK 0x2 +#define TPC0_EML_CFG_RTTTS_COMPRESS_EN_SHIFT 2 +#define TPC0_EML_CFG_RTTTS_COMPRESS_EN_MASK 0x4 + +/* TPC0_EML_CFG_RTTTS_INTV */ +#define TPC0_EML_CFG_RTTTS_INTV_INTERVAL_SHIFT 0 +#define TPC0_EML_CFG_RTTTS_INTV_INTERVAL_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_DBG_INST_INSERT */ +#define TPC0_EML_CFG_DBG_INST_INSERT_INST_SHIFT 0 +#define TPC0_EML_CFG_DBG_INST_INSERT_INST_MASK 0xFFFFFFFF + +/* TPC0_EML_CFG_DBG_INST_INSERT_CTL */ +#define TPC0_EML_CFG_DBG_INST_INSERT_CTL_INSERT_SHIFT 0 +#define TPC0_EML_CFG_DBG_INST_INSERT_CTL_INSERT_MASK 0x1 + +#endif /* ASIC_REG_TPC0_EML_CFG_MASKS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h new file mode 100644 index 000000000000..f1a1b4fa4841 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h @@ -0,0 +1,313 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC0_EML_CFG_REGS_H_ +#define ASIC_REG_TPC0_EML_CFG_REGS_H_ + +/* + ***************************************** + * TPC0_EML_CFG (Prototype: TPC_EML_CFG) + ***************************************** + */ + +#define mmTPC0_EML_CFG_DBG_CNT 0x3040000 + +#define mmTPC0_EML_CFG_DBG_STS 0x3040004 + +#define mmTPC0_EML_CFG_DBG_PADD_0 0x3040008 + +#define mmTPC0_EML_CFG_DBG_PADD_1 0x304000C + +#define mmTPC0_EML_CFG_DBG_PADD_2 0x3040010 + +#define mmTPC0_EML_CFG_DBG_PADD_3 0x3040014 + +#define mmTPC0_EML_CFG_DBG_PADD_4 0x3040018 + +#define mmTPC0_EML_CFG_DBG_PADD_5 0x304001C + +#define mmTPC0_EML_CFG_DBG_PADD_6 0x3040020 + +#define mmTPC0_EML_CFG_DBG_PADD_7 0x3040024 + +#define mmTPC0_EML_CFG_DBG_PADD_COUNT_0 0x3040028 + +#define mmTPC0_EML_CFG_DBG_PADD_COUNT_1 0x304002C + +#define mmTPC0_EML_CFG_DBG_PADD_COUNT_2 0x3040030 + +#define mmTPC0_EML_CFG_DBG_PADD_COUNT_3 0x3040034 + +#define mmTPC0_EML_CFG_DBG_PADD_COUNT_4 0x3040038 + +#define mmTPC0_EML_CFG_DBG_PADD_COUNT_5 0x304003C + +#define mmTPC0_EML_CFG_DBG_PADD_COUNT_6 0x3040040 + +#define mmTPC0_EML_CFG_DBG_PADD_COUNT_7 0x3040044 + +#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_0 0x3040048 + +#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_1 0x304004C + +#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_2 0x3040050 + +#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_3 0x3040054 + +#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_4 0x3040058 + +#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_5 0x304005C + +#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_6 0x3040060 + +#define mmTPC0_EML_CFG_DBG_PADD_COUNT_MATCH_7 0x3040064 + +#define mmTPC0_EML_CFG_DBG_PADD_EN 0x3040068 + +#define mmTPC0_EML_CFG_DBG_VPADD_HIGH_0 0x304006C + +#define mmTPC0_EML_CFG_DBG_VPADD_HIGH_1 0x3040070 + +#define mmTPC0_EML_CFG_DBG_VPADD_LOW_0 0x3040074 + +#define mmTPC0_EML_CFG_DBG_VPADD_LOW_1 0x3040078 + +#define mmTPC0_EML_CFG_DBG_VPADD_COUNT_0 0x304007C + +#define mmTPC0_EML_CFG_DBG_VPADD_COUNT_1 0x3040080 + +#define mmTPC0_EML_CFG_DBG_VPADD_COUNT_MATCH_0 0x3040084 + +#define mmTPC0_EML_CFG_DBG_VPADD_COUNT_MATCH_1 0x3040088 + +#define mmTPC0_EML_CFG_DBG_VPADD_EN 0x304008C + +#define mmTPC0_EML_CFG_DBG_SPADD_HIGH_0 0x3040090 + +#define mmTPC0_EML_CFG_DBG_SPADD_HIGH_1 0x3040094 + +#define mmTPC0_EML_CFG_DBG_SPADD_LOW_0 0x3040098 + +#define mmTPC0_EML_CFG_DBG_SPADD_LOW_1 0x304009C + +#define mmTPC0_EML_CFG_DBG_SPADD_COUNT_0 0x30400A0 + +#define mmTPC0_EML_CFG_DBG_SPADD_COUNT_1 0x30400A4 + +#define mmTPC0_EML_CFG_DBG_SPADD_COUNT_MATCH_0 0x30400A8 + +#define mmTPC0_EML_CFG_DBG_SPADD_COUNT_MATCH_1 0x30400AC + +#define mmTPC0_EML_CFG_DBG_SPADD_EN 0x30400B0 + +#define mmTPC0_EML_CFG_DBG_AGUADD_MSB_HIGH_0 0x30400B4 + +#define mmTPC0_EML_CFG_DBG_AGUADD_MSB_HIGH_1 0x30400B8 + +#define mmTPC0_EML_CFG_DBG_AGUADD_MSB_LOW_0 0x30400BC + +#define mmTPC0_EML_CFG_DBG_AGUADD_MSB_LOW_1 0x30400C0 + +#define mmTPC0_EML_CFG_DBG_AGUADD_LSB_HIGH_0 0x30400C4 + +#define mmTPC0_EML_CFG_DBG_AGUADD_LSB_HIGH_1 0x30400C8 + +#define mmTPC0_EML_CFG_DBG_AGUADD_LSB_LOW_0 0x30400CC + +#define mmTPC0_EML_CFG_DBG_AGUADD_LSB_LOW_1 0x30400D0 + +#define mmTPC0_EML_CFG_DBG_AGUADD_COUNT_0 0x30400D4 + +#define mmTPC0_EML_CFG_DBG_AGUADD_COUNT_1 0x30400D8 + +#define mmTPC0_EML_CFG_DBG_AGUADD_COUNT_MATCH_0 0x30400DC + +#define mmTPC0_EML_CFG_DBG_AGUADD_COUNT_MATCH_1 0x30400E0 + +#define mmTPC0_EML_CFG_DBG_AGUADD_EN 0x30400E4 + +#define mmTPC0_EML_CFG_DBG_AXIHBWADD_MSB_HIGH_0 0x30400E8 + +#define mmTPC0_EML_CFG_DBG_AXIHBWADD_MSB_HIGH_1 0x30400EC + +#define mmTPC0_EML_CFG_DBG_AXIHBWADD_MSB_LOW_0 0x30400F0 + +#define mmTPC0_EML_CFG_DBG_AXIHBWADD_MSB_LOW_1 0x30400F4 + +#define mmTPC0_EML_CFG_DBG_AXIHBWADD_LSB_HIGH_0 0x30400F8 + +#define mmTPC0_EML_CFG_DBG_AXIHBWADD_LSB_HIGH_1 0x30400FC + +#define mmTPC0_EML_CFG_DBG_AXIHBWADD_LSB_LOW_0 0x3040100 + +#define mmTPC0_EML_CFG_DBG_AXIHBWADD_LSB_LOW_1 0x3040104 + +#define mmTPC0_EML_CFG_DBG_AXIHBWADD_COUNT_0 0x3040108 + +#define mmTPC0_EML_CFG_DBG_AXIHBWADD_COUNT_1 0x304010C + +#define mmTPC0_EML_CFG_DBG_AXIHBWADD_COUNT_MATCH_0 0x3040110 + +#define mmTPC0_EML_CFG_DBG_AXIHBWADD_COUNT_MATCH_1 0x3040114 + +#define mmTPC0_EML_CFG_DBG_AXIHBWADD_EN 0x3040118 + +#define mmTPC0_EML_CFG_DBG_AXILBWADD_MSB_HIGH_0 0x304011C + +#define mmTPC0_EML_CFG_DBG_AXILBWADD_MSB_HIGH_1 0x3040120 + +#define mmTPC0_EML_CFG_DBG_AXILBWADD_MSB_LOW_0 0x3040124 + +#define mmTPC0_EML_CFG_DBG_AXILBWADD_MSB_LOW_1 0x3040128 + +#define mmTPC0_EML_CFG_DBG_AXILBWADD_LSB_HIGH_0 0x304012C + +#define mmTPC0_EML_CFG_DBG_AXILBWADD_LSB_HIGH_1 0x3040130 + +#define mmTPC0_EML_CFG_DBG_AXILBWADD_LSB_LOW_0 0x3040134 + +#define mmTPC0_EML_CFG_DBG_AXILBWADD_LSB_LOW_1 0x3040138 + +#define mmTPC0_EML_CFG_DBG_AXILBWADD_COUNT_0 0x304013C + +#define mmTPC0_EML_CFG_DBG_AXILBWADD_COUNT_1 0x3040140 + +#define mmTPC0_EML_CFG_DBG_AXILBWADD_COUNT_MATCH_0 0x3040144 + +#define mmTPC0_EML_CFG_DBG_AXILBWADD_COUNT_MATCH_1 0x3040148 + +#define mmTPC0_EML_CFG_DBG_AXILBWADD_EN 0x304014C + +#define mmTPC0_EML_CFG_DBG_SPDATA_0 0x3040150 + +#define mmTPC0_EML_CFG_DBG_SPDATA_1 0x3040154 + +#define mmTPC0_EML_CFG_DBG_SPDATA_COUNT_0 0x3040158 + +#define mmTPC0_EML_CFG_DBG_SPDATA_COUNT_1 0x304015C + +#define mmTPC0_EML_CFG_DBG_SPDATA_COUNT_MATCH_0 0x3040160 + +#define mmTPC0_EML_CFG_DBG_SPDATA_COUNT_MATCH_1 0x3040164 + +#define mmTPC0_EML_CFG_DBG_SPDATA_EN 0x3040168 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_0 0x304016C + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_1 0x3040170 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_2 0x3040174 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_3 0x3040178 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_4 0x304017C + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_5 0x3040180 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_6 0x3040184 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_7 0x3040188 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_8 0x304018C + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_9 0x3040190 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_10 0x3040194 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_11 0x3040198 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_12 0x304019C + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_13 0x30401A0 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_14 0x30401A4 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_15 0x30401A8 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_16 0x30401AC + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_17 0x30401B0 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_18 0x30401B4 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_19 0x30401B8 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_20 0x30401BC + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_21 0x30401C0 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_22 0x30401C4 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_23 0x30401C8 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_24 0x30401CC + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_25 0x30401D0 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_26 0x30401D4 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_27 0x30401D8 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_28 0x30401DC + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_29 0x30401E0 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_30 0x30401E4 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_31 0x30401E8 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_COUNT 0x30401EC + +#define mmTPC0_EML_CFG_DBG_AXIHBWDAT_COUNT_MATCH 0x30401F0 + +#define mmTPC0_EML_CFG_DBG_AXIHBWDATA_EN 0x30401F4 + +#define mmTPC0_EML_CFG_DBG_AXILBWDATA 0x30401F8 + +#define mmTPC0_EML_CFG_DBG_AXILBWDATA_COUNT 0x30401FC + +#define mmTPC0_EML_CFG_DBG_AXILBWDAT_COUNT_MATCH 0x3040200 + +#define mmTPC0_EML_CFG_DBG_AXILBWDATA_EN 0x3040204 + +#define mmTPC0_EML_CFG_DBG_D0_PC 0x3040208 + +#define mmTPC0_EML_CFG_RTTCONFIG 0x3040300 + +#define mmTPC0_EML_CFG_RTTPREDICATE 0x3040304 + +#define mmTPC0_EML_CFG_RTTPREDICATE_INTV 0x3040308 + +#define mmTPC0_EML_CFG_RTTTS 0x304030C + +#define mmTPC0_EML_CFG_RTTTS_INTV 0x3040310 + +#define mmTPC0_EML_CFG_DBG_INST_INSERT_0 0x3040314 + +#define mmTPC0_EML_CFG_DBG_INST_INSERT_1 0x3040318 + +#define mmTPC0_EML_CFG_DBG_INST_INSERT_2 0x304031C + +#define mmTPC0_EML_CFG_DBG_INST_INSERT_3 0x3040320 + +#define mmTPC0_EML_CFG_DBG_INST_INSERT_4 0x3040324 + +#define mmTPC0_EML_CFG_DBG_INST_INSERT_5 0x3040328 + +#define mmTPC0_EML_CFG_DBG_INST_INSERT_6 0x304032C + +#define mmTPC0_EML_CFG_DBG_INST_INSERT_7 0x3040330 + +#define mmTPC0_EML_CFG_DBG_INST_INSERT_CTL 0x3040334 + +#endif /* ASIC_REG_TPC0_EML_CFG_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h new file mode 100644 index 000000000000..7f86621179a5 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC0_NRTR_MASKS_H_ +#define ASIC_REG_TPC0_NRTR_MASKS_H_ + +/* + ***************************************** + * TPC0_NRTR (Prototype: IF_NRTR) + ***************************************** + */ + +/* TPC0_NRTR_HBW_MAX_CRED */ +#define TPC0_NRTR_HBW_MAX_CRED_WR_RQ_SHIFT 0 +#define TPC0_NRTR_HBW_MAX_CRED_WR_RQ_MASK 0x3F +#define TPC0_NRTR_HBW_MAX_CRED_WR_RS_SHIFT 8 +#define TPC0_NRTR_HBW_MAX_CRED_WR_RS_MASK 0x3F00 +#define TPC0_NRTR_HBW_MAX_CRED_RD_RQ_SHIFT 16 +#define TPC0_NRTR_HBW_MAX_CRED_RD_RQ_MASK 0x3F0000 +#define TPC0_NRTR_HBW_MAX_CRED_RD_RS_SHIFT 24 +#define TPC0_NRTR_HBW_MAX_CRED_RD_RS_MASK 0x3F000000 + +/* TPC0_NRTR_LBW_MAX_CRED */ +#define TPC0_NRTR_LBW_MAX_CRED_WR_RQ_SHIFT 0 +#define TPC0_NRTR_LBW_MAX_CRED_WR_RQ_MASK 0x3F +#define TPC0_NRTR_LBW_MAX_CRED_WR_RS_SHIFT 8 +#define TPC0_NRTR_LBW_MAX_CRED_WR_RS_MASK 0x3F00 +#define TPC0_NRTR_LBW_MAX_CRED_RD_RQ_SHIFT 16 +#define TPC0_NRTR_LBW_MAX_CRED_RD_RQ_MASK 0x3F0000 +#define TPC0_NRTR_LBW_MAX_CRED_RD_RS_SHIFT 24 +#define TPC0_NRTR_LBW_MAX_CRED_RD_RS_MASK 0x3F000000 + +/* TPC0_NRTR_DBG_E_ARB */ +#define TPC0_NRTR_DBG_E_ARB_W_SHIFT 0 +#define TPC0_NRTR_DBG_E_ARB_W_MASK 0x7 +#define TPC0_NRTR_DBG_E_ARB_S_SHIFT 8 +#define TPC0_NRTR_DBG_E_ARB_S_MASK 0x700 +#define TPC0_NRTR_DBG_E_ARB_N_SHIFT 16 +#define TPC0_NRTR_DBG_E_ARB_N_MASK 0x70000 +#define TPC0_NRTR_DBG_E_ARB_L_SHIFT 24 +#define TPC0_NRTR_DBG_E_ARB_L_MASK 0x7000000 + +/* TPC0_NRTR_DBG_W_ARB */ +#define TPC0_NRTR_DBG_W_ARB_E_SHIFT 0 +#define TPC0_NRTR_DBG_W_ARB_E_MASK 0x7 +#define TPC0_NRTR_DBG_W_ARB_S_SHIFT 8 +#define TPC0_NRTR_DBG_W_ARB_S_MASK 0x700 +#define TPC0_NRTR_DBG_W_ARB_N_SHIFT 16 +#define TPC0_NRTR_DBG_W_ARB_N_MASK 0x70000 +#define TPC0_NRTR_DBG_W_ARB_L_SHIFT 24 +#define TPC0_NRTR_DBG_W_ARB_L_MASK 0x7000000 + +/* TPC0_NRTR_DBG_N_ARB */ +#define TPC0_NRTR_DBG_N_ARB_W_SHIFT 0 +#define TPC0_NRTR_DBG_N_ARB_W_MASK 0x7 +#define TPC0_NRTR_DBG_N_ARB_E_SHIFT 8 +#define TPC0_NRTR_DBG_N_ARB_E_MASK 0x700 +#define TPC0_NRTR_DBG_N_ARB_S_SHIFT 16 +#define TPC0_NRTR_DBG_N_ARB_S_MASK 0x70000 +#define TPC0_NRTR_DBG_N_ARB_L_SHIFT 24 +#define TPC0_NRTR_DBG_N_ARB_L_MASK 0x7000000 + +/* TPC0_NRTR_DBG_S_ARB */ +#define TPC0_NRTR_DBG_S_ARB_W_SHIFT 0 +#define TPC0_NRTR_DBG_S_ARB_W_MASK 0x7 +#define TPC0_NRTR_DBG_S_ARB_E_SHIFT 8 +#define TPC0_NRTR_DBG_S_ARB_E_MASK 0x700 +#define TPC0_NRTR_DBG_S_ARB_N_SHIFT 16 +#define TPC0_NRTR_DBG_S_ARB_N_MASK 0x70000 +#define TPC0_NRTR_DBG_S_ARB_L_SHIFT 24 +#define TPC0_NRTR_DBG_S_ARB_L_MASK 0x7000000 + +/* TPC0_NRTR_DBG_L_ARB */ +#define TPC0_NRTR_DBG_L_ARB_W_SHIFT 0 +#define TPC0_NRTR_DBG_L_ARB_W_MASK 0x7 +#define TPC0_NRTR_DBG_L_ARB_E_SHIFT 8 +#define TPC0_NRTR_DBG_L_ARB_E_MASK 0x700 +#define TPC0_NRTR_DBG_L_ARB_S_SHIFT 16 +#define TPC0_NRTR_DBG_L_ARB_S_MASK 0x70000 +#define TPC0_NRTR_DBG_L_ARB_N_SHIFT 24 +#define TPC0_NRTR_DBG_L_ARB_N_MASK 0x7000000 + +/* TPC0_NRTR_DBG_E_ARB_MAX */ +#define TPC0_NRTR_DBG_E_ARB_MAX_CREDIT_SHIFT 0 +#define TPC0_NRTR_DBG_E_ARB_MAX_CREDIT_MASK 0x3F + +/* TPC0_NRTR_DBG_W_ARB_MAX */ +#define TPC0_NRTR_DBG_W_ARB_MAX_CREDIT_SHIFT 0 +#define TPC0_NRTR_DBG_W_ARB_MAX_CREDIT_MASK 0x3F + +/* TPC0_NRTR_DBG_N_ARB_MAX */ +#define TPC0_NRTR_DBG_N_ARB_MAX_CREDIT_SHIFT 0 +#define TPC0_NRTR_DBG_N_ARB_MAX_CREDIT_MASK 0x3F + +/* TPC0_NRTR_DBG_S_ARB_MAX */ +#define TPC0_NRTR_DBG_S_ARB_MAX_CREDIT_SHIFT 0 +#define TPC0_NRTR_DBG_S_ARB_MAX_CREDIT_MASK 0x3F + +/* TPC0_NRTR_DBG_L_ARB_MAX */ +#define TPC0_NRTR_DBG_L_ARB_MAX_CREDIT_SHIFT 0 +#define TPC0_NRTR_DBG_L_ARB_MAX_CREDIT_MASK 0x3F + +/* TPC0_NRTR_SPLIT_COEF */ +#define TPC0_NRTR_SPLIT_COEF_VAL_SHIFT 0 +#define TPC0_NRTR_SPLIT_COEF_VAL_MASK 0xFFFF + +/* TPC0_NRTR_SPLIT_CFG */ +#define TPC0_NRTR_SPLIT_CFG_FORCE_WAK_ORDER_SHIFT 0 +#define TPC0_NRTR_SPLIT_CFG_FORCE_WAK_ORDER_MASK 0x1 +#define TPC0_NRTR_SPLIT_CFG_FORCE_STRONG_ORDER_SHIFT 1 +#define TPC0_NRTR_SPLIT_CFG_FORCE_STRONG_ORDER_MASK 0x2 +#define TPC0_NRTR_SPLIT_CFG_DEFAULT_MESH_SHIFT 2 +#define TPC0_NRTR_SPLIT_CFG_DEFAULT_MESH_MASK 0xC +#define TPC0_NRTR_SPLIT_CFG_RD_RATE_LIM_EN_SHIFT 4 +#define TPC0_NRTR_SPLIT_CFG_RD_RATE_LIM_EN_MASK 0x10 +#define TPC0_NRTR_SPLIT_CFG_WR_RATE_LIM_EN_SHIFT 5 +#define TPC0_NRTR_SPLIT_CFG_WR_RATE_LIM_EN_MASK 0x20 +#define TPC0_NRTR_SPLIT_CFG_B2B_OPT_SHIFT 6 +#define TPC0_NRTR_SPLIT_CFG_B2B_OPT_MASK 0x1C0 + +/* TPC0_NRTR_SPLIT_RD_SAT */ +#define TPC0_NRTR_SPLIT_RD_SAT_VAL_SHIFT 0 +#define TPC0_NRTR_SPLIT_RD_SAT_VAL_MASK 0xFFFF + +/* TPC0_NRTR_SPLIT_RD_RST_TOKEN */ +#define TPC0_NRTR_SPLIT_RD_RST_TOKEN_VAL_SHIFT 0 +#define TPC0_NRTR_SPLIT_RD_RST_TOKEN_VAL_MASK 0xFFFF + +/* TPC0_NRTR_SPLIT_RD_TIMEOUT */ +#define TPC0_NRTR_SPLIT_RD_TIMEOUT_VAL_SHIFT 0 +#define TPC0_NRTR_SPLIT_RD_TIMEOUT_VAL_MASK 0xFFFFFFFF + +/* TPC0_NRTR_SPLIT_WR_SAT */ +#define TPC0_NRTR_SPLIT_WR_SAT_VAL_SHIFT 0 +#define TPC0_NRTR_SPLIT_WR_SAT_VAL_MASK 0xFFFF + +/* TPC0_NRTR_WPLIT_WR_TST_TOLEN */ +#define TPC0_NRTR_WPLIT_WR_TST_TOLEN_VAL_SHIFT 0 +#define TPC0_NRTR_WPLIT_WR_TST_TOLEN_VAL_MASK 0xFFFF + +/* TPC0_NRTR_SPLIT_WR_TIMEOUT */ +#define TPC0_NRTR_SPLIT_WR_TIMEOUT_VAL_SHIFT 0 +#define TPC0_NRTR_SPLIT_WR_TIMEOUT_VAL_MASK 0xFFFFFFFF + +/* TPC0_NRTR_HBW_RANGE_HIT */ +#define TPC0_NRTR_HBW_RANGE_HIT_IND_SHIFT 0 +#define TPC0_NRTR_HBW_RANGE_HIT_IND_MASK 0xFF + +/* TPC0_NRTR_HBW_RANGE_MASK_L */ +#define TPC0_NRTR_HBW_RANGE_MASK_L_VAL_SHIFT 0 +#define TPC0_NRTR_HBW_RANGE_MASK_L_VAL_MASK 0xFFFFFFFF + +/* TPC0_NRTR_HBW_RANGE_MASK_H */ +#define TPC0_NRTR_HBW_RANGE_MASK_H_VAL_SHIFT 0 +#define TPC0_NRTR_HBW_RANGE_MASK_H_VAL_MASK 0x3FFFF + +/* TPC0_NRTR_HBW_RANGE_BASE_L */ +#define TPC0_NRTR_HBW_RANGE_BASE_L_VAL_SHIFT 0 +#define TPC0_NRTR_HBW_RANGE_BASE_L_VAL_MASK 0xFFFFFFFF + +/* TPC0_NRTR_HBW_RANGE_BASE_H */ +#define TPC0_NRTR_HBW_RANGE_BASE_H_VAL_SHIFT 0 +#define TPC0_NRTR_HBW_RANGE_BASE_H_VAL_MASK 0x3FFFF + +/* TPC0_NRTR_LBW_RANGE_HIT */ +#define TPC0_NRTR_LBW_RANGE_HIT_IND_SHIFT 0 +#define TPC0_NRTR_LBW_RANGE_HIT_IND_MASK 0xFFFF + +/* TPC0_NRTR_LBW_RANGE_MASK */ +#define TPC0_NRTR_LBW_RANGE_MASK_VAL_SHIFT 0 +#define TPC0_NRTR_LBW_RANGE_MASK_VAL_MASK 0x3FFFFFF + +/* TPC0_NRTR_LBW_RANGE_BASE */ +#define TPC0_NRTR_LBW_RANGE_BASE_VAL_SHIFT 0 +#define TPC0_NRTR_LBW_RANGE_BASE_VAL_MASK 0x3FFFFFF + +/* TPC0_NRTR_RGLTR */ +#define TPC0_NRTR_RGLTR_WR_EN_SHIFT 0 +#define TPC0_NRTR_RGLTR_WR_EN_MASK 0x1 +#define TPC0_NRTR_RGLTR_RD_EN_SHIFT 4 +#define TPC0_NRTR_RGLTR_RD_EN_MASK 0x10 + +/* TPC0_NRTR_RGLTR_WR_RESULT */ +#define TPC0_NRTR_RGLTR_WR_RESULT_VAL_SHIFT 0 +#define TPC0_NRTR_RGLTR_WR_RESULT_VAL_MASK 0xFF + +/* TPC0_NRTR_RGLTR_RD_RESULT */ +#define TPC0_NRTR_RGLTR_RD_RESULT_VAL_SHIFT 0 +#define TPC0_NRTR_RGLTR_RD_RESULT_VAL_MASK 0xFF + +/* TPC0_NRTR_SCRAMB_EN */ +#define TPC0_NRTR_SCRAMB_EN_VAL_SHIFT 0 +#define TPC0_NRTR_SCRAMB_EN_VAL_MASK 0x1 + +/* TPC0_NRTR_NON_LIN_SCRAMB */ +#define TPC0_NRTR_NON_LIN_SCRAMB_EN_SHIFT 0 +#define TPC0_NRTR_NON_LIN_SCRAMB_EN_MASK 0x1 + +#endif /* ASIC_REG_TPC0_NRTR_MASKS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h new file mode 100644 index 000000000000..dc280f4e6608 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h @@ -0,0 +1,227 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC0_NRTR_REGS_H_ +#define ASIC_REG_TPC0_NRTR_REGS_H_ + +/* + ***************************************** + * TPC0_NRTR (Prototype: IF_NRTR) + ***************************************** + */ + +#define mmTPC0_NRTR_HBW_MAX_CRED 0xE00100 + +#define mmTPC0_NRTR_LBW_MAX_CRED 0xE00120 + +#define mmTPC0_NRTR_DBG_E_ARB 0xE00300 + +#define mmTPC0_NRTR_DBG_W_ARB 0xE00304 + +#define mmTPC0_NRTR_DBG_N_ARB 0xE00308 + +#define mmTPC0_NRTR_DBG_S_ARB 0xE0030C + +#define mmTPC0_NRTR_DBG_L_ARB 0xE00310 + +#define mmTPC0_NRTR_DBG_E_ARB_MAX 0xE00320 + +#define mmTPC0_NRTR_DBG_W_ARB_MAX 0xE00324 + +#define mmTPC0_NRTR_DBG_N_ARB_MAX 0xE00328 + +#define mmTPC0_NRTR_DBG_S_ARB_MAX 0xE0032C + +#define mmTPC0_NRTR_DBG_L_ARB_MAX 0xE00330 + +#define mmTPC0_NRTR_SPLIT_COEF_0 0xE00400 + +#define mmTPC0_NRTR_SPLIT_COEF_1 0xE00404 + +#define mmTPC0_NRTR_SPLIT_COEF_2 0xE00408 + +#define mmTPC0_NRTR_SPLIT_COEF_3 0xE0040C + +#define mmTPC0_NRTR_SPLIT_COEF_4 0xE00410 + +#define mmTPC0_NRTR_SPLIT_COEF_5 0xE00414 + +#define mmTPC0_NRTR_SPLIT_COEF_6 0xE00418 + +#define mmTPC0_NRTR_SPLIT_COEF_7 0xE0041C + +#define mmTPC0_NRTR_SPLIT_COEF_8 0xE00420 + +#define mmTPC0_NRTR_SPLIT_COEF_9 0xE00424 + +#define mmTPC0_NRTR_SPLIT_CFG 0xE00440 + +#define mmTPC0_NRTR_SPLIT_RD_SAT 0xE00444 + +#define mmTPC0_NRTR_SPLIT_RD_RST_TOKEN 0xE00448 + +#define mmTPC0_NRTR_SPLIT_RD_TIMEOUT_0 0xE0044C + +#define mmTPC0_NRTR_SPLIT_RD_TIMEOUT_1 0xE00450 + +#define mmTPC0_NRTR_SPLIT_WR_SAT 0xE00454 + +#define mmTPC0_NRTR_WPLIT_WR_TST_TOLEN 0xE00458 + +#define mmTPC0_NRTR_SPLIT_WR_TIMEOUT_0 0xE0045C + +#define mmTPC0_NRTR_SPLIT_WR_TIMEOUT_1 0xE00460 + +#define mmTPC0_NRTR_HBW_RANGE_HIT 0xE00470 + +#define mmTPC0_NRTR_HBW_RANGE_MASK_L_0 0xE00480 + +#define mmTPC0_NRTR_HBW_RANGE_MASK_L_1 0xE00484 + +#define mmTPC0_NRTR_HBW_RANGE_MASK_L_2 0xE00488 + +#define mmTPC0_NRTR_HBW_RANGE_MASK_L_3 0xE0048C + +#define mmTPC0_NRTR_HBW_RANGE_MASK_L_4 0xE00490 + +#define mmTPC0_NRTR_HBW_RANGE_MASK_L_5 0xE00494 + +#define mmTPC0_NRTR_HBW_RANGE_MASK_L_6 0xE00498 + +#define mmTPC0_NRTR_HBW_RANGE_MASK_L_7 0xE0049C + +#define mmTPC0_NRTR_HBW_RANGE_MASK_H_0 0xE004A0 + +#define mmTPC0_NRTR_HBW_RANGE_MASK_H_1 0xE004A4 + +#define mmTPC0_NRTR_HBW_RANGE_MASK_H_2 0xE004A8 + +#define mmTPC0_NRTR_HBW_RANGE_MASK_H_3 0xE004AC + +#define mmTPC0_NRTR_HBW_RANGE_MASK_H_4 0xE004B0 + +#define mmTPC0_NRTR_HBW_RANGE_MASK_H_5 0xE004B4 + +#define mmTPC0_NRTR_HBW_RANGE_MASK_H_6 0xE004B8 + +#define mmTPC0_NRTR_HBW_RANGE_MASK_H_7 0xE004BC + +#define mmTPC0_NRTR_HBW_RANGE_BASE_L_0 0xE004C0 + +#define mmTPC0_NRTR_HBW_RANGE_BASE_L_1 0xE004C4 + +#define mmTPC0_NRTR_HBW_RANGE_BASE_L_2 0xE004C8 + +#define mmTPC0_NRTR_HBW_RANGE_BASE_L_3 0xE004CC + +#define mmTPC0_NRTR_HBW_RANGE_BASE_L_4 0xE004D0 + +#define mmTPC0_NRTR_HBW_RANGE_BASE_L_5 0xE004D4 + +#define mmTPC0_NRTR_HBW_RANGE_BASE_L_6 0xE004D8 + +#define mmTPC0_NRTR_HBW_RANGE_BASE_L_7 0xE004DC + +#define mmTPC0_NRTR_HBW_RANGE_BASE_H_0 0xE004E0 + +#define mmTPC0_NRTR_HBW_RANGE_BASE_H_1 0xE004E4 + +#define mmTPC0_NRTR_HBW_RANGE_BASE_H_2 0xE004E8 + +#define mmTPC0_NRTR_HBW_RANGE_BASE_H_3 0xE004EC + +#define mmTPC0_NRTR_HBW_RANGE_BASE_H_4 0xE004F0 + +#define mmTPC0_NRTR_HBW_RANGE_BASE_H_5 0xE004F4 + +#define mmTPC0_NRTR_HBW_RANGE_BASE_H_6 0xE004F8 + +#define mmTPC0_NRTR_HBW_RANGE_BASE_H_7 0xE004FC + +#define mmTPC0_NRTR_LBW_RANGE_HIT 0xE00500 + +#define mmTPC0_NRTR_LBW_RANGE_MASK_0 0xE00510 + +#define mmTPC0_NRTR_LBW_RANGE_MASK_1 0xE00514 + +#define mmTPC0_NRTR_LBW_RANGE_MASK_2 0xE00518 + +#define mmTPC0_NRTR_LBW_RANGE_MASK_3 0xE0051C + +#define mmTPC0_NRTR_LBW_RANGE_MASK_4 0xE00520 + +#define mmTPC0_NRTR_LBW_RANGE_MASK_5 0xE00524 + +#define mmTPC0_NRTR_LBW_RANGE_MASK_6 0xE00528 + +#define mmTPC0_NRTR_LBW_RANGE_MASK_7 0xE0052C + +#define mmTPC0_NRTR_LBW_RANGE_MASK_8 0xE00530 + +#define mmTPC0_NRTR_LBW_RANGE_MASK_9 0xE00534 + +#define mmTPC0_NRTR_LBW_RANGE_MASK_10 0xE00538 + +#define mmTPC0_NRTR_LBW_RANGE_MASK_11 0xE0053C + +#define mmTPC0_NRTR_LBW_RANGE_MASK_12 0xE00540 + +#define mmTPC0_NRTR_LBW_RANGE_MASK_13 0xE00544 + +#define mmTPC0_NRTR_LBW_RANGE_MASK_14 0xE00548 + +#define mmTPC0_NRTR_LBW_RANGE_MASK_15 0xE0054C + +#define mmTPC0_NRTR_LBW_RANGE_BASE_0 0xE00550 + +#define mmTPC0_NRTR_LBW_RANGE_BASE_1 0xE00554 + +#define mmTPC0_NRTR_LBW_RANGE_BASE_2 0xE00558 + +#define mmTPC0_NRTR_LBW_RANGE_BASE_3 0xE0055C + +#define mmTPC0_NRTR_LBW_RANGE_BASE_4 0xE00560 + +#define mmTPC0_NRTR_LBW_RANGE_BASE_5 0xE00564 + +#define mmTPC0_NRTR_LBW_RANGE_BASE_6 0xE00568 + +#define mmTPC0_NRTR_LBW_RANGE_BASE_7 0xE0056C + +#define mmTPC0_NRTR_LBW_RANGE_BASE_8 0xE00570 + +#define mmTPC0_NRTR_LBW_RANGE_BASE_9 0xE00574 + +#define mmTPC0_NRTR_LBW_RANGE_BASE_10 0xE00578 + +#define mmTPC0_NRTR_LBW_RANGE_BASE_11 0xE0057C + +#define mmTPC0_NRTR_LBW_RANGE_BASE_12 0xE00580 + +#define mmTPC0_NRTR_LBW_RANGE_BASE_13 0xE00584 + +#define mmTPC0_NRTR_LBW_RANGE_BASE_14 0xE00588 + +#define mmTPC0_NRTR_LBW_RANGE_BASE_15 0xE0058C + +#define mmTPC0_NRTR_RGLTR 0xE00590 + +#define mmTPC0_NRTR_RGLTR_WR_RESULT 0xE00594 + +#define mmTPC0_NRTR_RGLTR_RD_RESULT 0xE00598 + +#define mmTPC0_NRTR_SCRAMB_EN 0xE00600 + +#define mmTPC0_NRTR_NON_LIN_SCRAMB 0xE00604 + +#endif /* ASIC_REG_TPC0_NRTR_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h new file mode 100644 index 000000000000..80d97ee3d8d6 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h @@ -0,0 +1,465 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC0_QM_MASKS_H_ +#define ASIC_REG_TPC0_QM_MASKS_H_ + +/* + ***************************************** + * TPC0_QM (Prototype: QMAN) + ***************************************** + */ + +/* TPC0_QM_GLBL_CFG0 */ +#define TPC0_QM_GLBL_CFG0_PQF_EN_SHIFT 0 +#define TPC0_QM_GLBL_CFG0_PQF_EN_MASK 0x1 +#define TPC0_QM_GLBL_CFG0_CQF_EN_SHIFT 1 +#define TPC0_QM_GLBL_CFG0_CQF_EN_MASK 0x2 +#define TPC0_QM_GLBL_CFG0_CP_EN_SHIFT 2 +#define TPC0_QM_GLBL_CFG0_CP_EN_MASK 0x4 +#define TPC0_QM_GLBL_CFG0_DMA_EN_SHIFT 3 +#define TPC0_QM_GLBL_CFG0_DMA_EN_MASK 0x8 + +/* TPC0_QM_GLBL_CFG1 */ +#define TPC0_QM_GLBL_CFG1_PQF_STOP_SHIFT 0 +#define TPC0_QM_GLBL_CFG1_PQF_STOP_MASK 0x1 +#define TPC0_QM_GLBL_CFG1_CQF_STOP_SHIFT 1 +#define TPC0_QM_GLBL_CFG1_CQF_STOP_MASK 0x2 +#define TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT 2 +#define TPC0_QM_GLBL_CFG1_CP_STOP_MASK 0x4 +#define TPC0_QM_GLBL_CFG1_DMA_STOP_SHIFT 3 +#define TPC0_QM_GLBL_CFG1_DMA_STOP_MASK 0x8 +#define TPC0_QM_GLBL_CFG1_PQF_FLUSH_SHIFT 8 +#define TPC0_QM_GLBL_CFG1_PQF_FLUSH_MASK 0x100 +#define TPC0_QM_GLBL_CFG1_CQF_FLUSH_SHIFT 9 +#define TPC0_QM_GLBL_CFG1_CQF_FLUSH_MASK 0x200 +#define TPC0_QM_GLBL_CFG1_CP_FLUSH_SHIFT 10 +#define TPC0_QM_GLBL_CFG1_CP_FLUSH_MASK 0x400 +#define TPC0_QM_GLBL_CFG1_DMA_FLUSH_SHIFT 11 +#define TPC0_QM_GLBL_CFG1_DMA_FLUSH_MASK 0x800 + +/* TPC0_QM_GLBL_PROT */ +#define TPC0_QM_GLBL_PROT_PQF_PROT_SHIFT 0 +#define TPC0_QM_GLBL_PROT_PQF_PROT_MASK 0x1 +#define TPC0_QM_GLBL_PROT_CQF_PROT_SHIFT 1 +#define TPC0_QM_GLBL_PROT_CQF_PROT_MASK 0x2 +#define TPC0_QM_GLBL_PROT_CP_PROT_SHIFT 2 +#define TPC0_QM_GLBL_PROT_CP_PROT_MASK 0x4 +#define TPC0_QM_GLBL_PROT_DMA_PROT_SHIFT 3 +#define TPC0_QM_GLBL_PROT_DMA_PROT_MASK 0x8 +#define TPC0_QM_GLBL_PROT_PQF_ERR_PROT_SHIFT 4 +#define TPC0_QM_GLBL_PROT_PQF_ERR_PROT_MASK 0x10 +#define TPC0_QM_GLBL_PROT_CQF_ERR_PROT_SHIFT 5 +#define TPC0_QM_GLBL_PROT_CQF_ERR_PROT_MASK 0x20 +#define TPC0_QM_GLBL_PROT_CP_ERR_PROT_SHIFT 6 +#define TPC0_QM_GLBL_PROT_CP_ERR_PROT_MASK 0x40 +#define TPC0_QM_GLBL_PROT_DMA_ERR_PROT_SHIFT 7 +#define TPC0_QM_GLBL_PROT_DMA_ERR_PROT_MASK 0x80 + +/* TPC0_QM_GLBL_ERR_CFG */ +#define TPC0_QM_GLBL_ERR_CFG_PQF_ERR_INT_EN_SHIFT 0 +#define TPC0_QM_GLBL_ERR_CFG_PQF_ERR_INT_EN_MASK 0x1 +#define TPC0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT 1 +#define TPC0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_MASK 0x2 +#define TPC0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT 2 +#define TPC0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_MASK 0x4 +#define TPC0_QM_GLBL_ERR_CFG_CQF_ERR_INT_EN_SHIFT 3 +#define TPC0_QM_GLBL_ERR_CFG_CQF_ERR_INT_EN_MASK 0x8 +#define TPC0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT 4 +#define TPC0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_MASK 0x10 +#define TPC0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT 5 +#define TPC0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_MASK 0x20 +#define TPC0_QM_GLBL_ERR_CFG_CP_ERR_INT_EN_SHIFT 6 +#define TPC0_QM_GLBL_ERR_CFG_CP_ERR_INT_EN_MASK 0x40 +#define TPC0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT 7 +#define TPC0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_MASK 0x80 +#define TPC0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT 8 +#define TPC0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_MASK 0x100 +#define TPC0_QM_GLBL_ERR_CFG_DMA_ERR_INT_EN_SHIFT 9 +#define TPC0_QM_GLBL_ERR_CFG_DMA_ERR_INT_EN_MASK 0x200 +#define TPC0_QM_GLBL_ERR_CFG_DMA_ERR_MSG_EN_SHIFT 10 +#define TPC0_QM_GLBL_ERR_CFG_DMA_ERR_MSG_EN_MASK 0x400 +#define TPC0_QM_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT 11 +#define TPC0_QM_GLBL_ERR_CFG_DMA_STOP_ON_ERR_MASK 0x800 + +/* TPC0_QM_GLBL_ERR_ADDR_LO */ +#define TPC0_QM_GLBL_ERR_ADDR_LO_VAL_SHIFT 0 +#define TPC0_QM_GLBL_ERR_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_GLBL_ERR_ADDR_HI */ +#define TPC0_QM_GLBL_ERR_ADDR_HI_VAL_SHIFT 0 +#define TPC0_QM_GLBL_ERR_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_GLBL_ERR_WDATA */ +#define TPC0_QM_GLBL_ERR_WDATA_VAL_SHIFT 0 +#define TPC0_QM_GLBL_ERR_WDATA_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_GLBL_SECURE_PROPS */ +#define TPC0_QM_GLBL_SECURE_PROPS_ASID_SHIFT 0 +#define TPC0_QM_GLBL_SECURE_PROPS_ASID_MASK 0x3FF +#define TPC0_QM_GLBL_SECURE_PROPS_MMBP_SHIFT 10 +#define TPC0_QM_GLBL_SECURE_PROPS_MMBP_MASK 0x400 + +/* TPC0_QM_GLBL_NON_SECURE_PROPS */ +#define TPC0_QM_GLBL_NON_SECURE_PROPS_ASID_SHIFT 0 +#define TPC0_QM_GLBL_NON_SECURE_PROPS_ASID_MASK 0x3FF +#define TPC0_QM_GLBL_NON_SECURE_PROPS_MMBP_SHIFT 10 +#define TPC0_QM_GLBL_NON_SECURE_PROPS_MMBP_MASK 0x400 + +/* TPC0_QM_GLBL_STS0 */ +#define TPC0_QM_GLBL_STS0_PQF_IDLE_SHIFT 0 +#define TPC0_QM_GLBL_STS0_PQF_IDLE_MASK 0x1 +#define TPC0_QM_GLBL_STS0_CQF_IDLE_SHIFT 1 +#define TPC0_QM_GLBL_STS0_CQF_IDLE_MASK 0x2 +#define TPC0_QM_GLBL_STS0_CP_IDLE_SHIFT 2 +#define TPC0_QM_GLBL_STS0_CP_IDLE_MASK 0x4 +#define TPC0_QM_GLBL_STS0_DMA_IDLE_SHIFT 3 +#define TPC0_QM_GLBL_STS0_DMA_IDLE_MASK 0x8 +#define TPC0_QM_GLBL_STS0_PQF_IS_STOP_SHIFT 4 +#define TPC0_QM_GLBL_STS0_PQF_IS_STOP_MASK 0x10 +#define TPC0_QM_GLBL_STS0_CQF_IS_STOP_SHIFT 5 +#define TPC0_QM_GLBL_STS0_CQF_IS_STOP_MASK 0x20 +#define TPC0_QM_GLBL_STS0_CP_IS_STOP_SHIFT 6 +#define TPC0_QM_GLBL_STS0_CP_IS_STOP_MASK 0x40 +#define TPC0_QM_GLBL_STS0_DMA_IS_STOP_SHIFT 7 +#define TPC0_QM_GLBL_STS0_DMA_IS_STOP_MASK 0x80 + +/* TPC0_QM_GLBL_STS1 */ +#define TPC0_QM_GLBL_STS1_PQF_RD_ERR_SHIFT 0 +#define TPC0_QM_GLBL_STS1_PQF_RD_ERR_MASK 0x1 +#define TPC0_QM_GLBL_STS1_CQF_RD_ERR_SHIFT 1 +#define TPC0_QM_GLBL_STS1_CQF_RD_ERR_MASK 0x2 +#define TPC0_QM_GLBL_STS1_CP_RD_ERR_SHIFT 2 +#define TPC0_QM_GLBL_STS1_CP_RD_ERR_MASK 0x4 +#define TPC0_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_SHIFT 3 +#define TPC0_QM_GLBL_STS1_CP_UNDEF_CMD_ERR_MASK 0x8 +#define TPC0_QM_GLBL_STS1_CP_STOP_OP_SHIFT 4 +#define TPC0_QM_GLBL_STS1_CP_STOP_OP_MASK 0x10 +#define TPC0_QM_GLBL_STS1_CP_MSG_WR_ERR_SHIFT 5 +#define TPC0_QM_GLBL_STS1_CP_MSG_WR_ERR_MASK 0x20 +#define TPC0_QM_GLBL_STS1_DMA_RD_ERR_SHIFT 8 +#define TPC0_QM_GLBL_STS1_DMA_RD_ERR_MASK 0x100 +#define TPC0_QM_GLBL_STS1_DMA_WR_ERR_SHIFT 9 +#define TPC0_QM_GLBL_STS1_DMA_WR_ERR_MASK 0x200 +#define TPC0_QM_GLBL_STS1_DMA_RD_MSG_ERR_SHIFT 10 +#define TPC0_QM_GLBL_STS1_DMA_RD_MSG_ERR_MASK 0x400 +#define TPC0_QM_GLBL_STS1_DMA_WR_MSG_ERR_SHIFT 11 +#define TPC0_QM_GLBL_STS1_DMA_WR_MSG_ERR_MASK 0x800 + +/* TPC0_QM_PQ_BASE_LO */ +#define TPC0_QM_PQ_BASE_LO_VAL_SHIFT 0 +#define TPC0_QM_PQ_BASE_LO_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_PQ_BASE_HI */ +#define TPC0_QM_PQ_BASE_HI_VAL_SHIFT 0 +#define TPC0_QM_PQ_BASE_HI_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_PQ_SIZE */ +#define TPC0_QM_PQ_SIZE_VAL_SHIFT 0 +#define TPC0_QM_PQ_SIZE_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_PQ_PI */ +#define TPC0_QM_PQ_PI_VAL_SHIFT 0 +#define TPC0_QM_PQ_PI_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_PQ_CI */ +#define TPC0_QM_PQ_CI_VAL_SHIFT 0 +#define TPC0_QM_PQ_CI_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_PQ_CFG0 */ +#define TPC0_QM_PQ_CFG0_RESERVED_SHIFT 0 +#define TPC0_QM_PQ_CFG0_RESERVED_MASK 0x1 + +/* TPC0_QM_PQ_CFG1 */ +#define TPC0_QM_PQ_CFG1_CREDIT_LIM_SHIFT 0 +#define TPC0_QM_PQ_CFG1_CREDIT_LIM_MASK 0xFFFF +#define TPC0_QM_PQ_CFG1_MAX_INFLIGHT_SHIFT 16 +#define TPC0_QM_PQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000 + +/* TPC0_QM_PQ_ARUSER */ +#define TPC0_QM_PQ_ARUSER_NOSNOOP_SHIFT 0 +#define TPC0_QM_PQ_ARUSER_NOSNOOP_MASK 0x1 +#define TPC0_QM_PQ_ARUSER_WORD_SHIFT 1 +#define TPC0_QM_PQ_ARUSER_WORD_MASK 0x2 + +/* TPC0_QM_PQ_PUSH0 */ +#define TPC0_QM_PQ_PUSH0_PTR_LO_SHIFT 0 +#define TPC0_QM_PQ_PUSH0_PTR_LO_MASK 0xFFFFFFFF + +/* TPC0_QM_PQ_PUSH1 */ +#define TPC0_QM_PQ_PUSH1_PTR_HI_SHIFT 0 +#define TPC0_QM_PQ_PUSH1_PTR_HI_MASK 0xFFFFFFFF + +/* TPC0_QM_PQ_PUSH2 */ +#define TPC0_QM_PQ_PUSH2_TSIZE_SHIFT 0 +#define TPC0_QM_PQ_PUSH2_TSIZE_MASK 0xFFFFFFFF + +/* TPC0_QM_PQ_PUSH3 */ +#define TPC0_QM_PQ_PUSH3_RPT_SHIFT 0 +#define TPC0_QM_PQ_PUSH3_RPT_MASK 0xFFFF +#define TPC0_QM_PQ_PUSH3_CTL_SHIFT 16 +#define TPC0_QM_PQ_PUSH3_CTL_MASK 0xFFFF0000 + +/* TPC0_QM_PQ_STS0 */ +#define TPC0_QM_PQ_STS0_PQ_CREDIT_CNT_SHIFT 0 +#define TPC0_QM_PQ_STS0_PQ_CREDIT_CNT_MASK 0xFFFF +#define TPC0_QM_PQ_STS0_PQ_FREE_CNT_SHIFT 16 +#define TPC0_QM_PQ_STS0_PQ_FREE_CNT_MASK 0xFFFF0000 + +/* TPC0_QM_PQ_STS1 */ +#define TPC0_QM_PQ_STS1_PQ_INFLIGHT_CNT_SHIFT 0 +#define TPC0_QM_PQ_STS1_PQ_INFLIGHT_CNT_MASK 0xFFFF +#define TPC0_QM_PQ_STS1_PQ_BUF_EMPTY_SHIFT 30 +#define TPC0_QM_PQ_STS1_PQ_BUF_EMPTY_MASK 0x40000000 +#define TPC0_QM_PQ_STS1_PQ_BUSY_SHIFT 31 +#define TPC0_QM_PQ_STS1_PQ_BUSY_MASK 0x80000000 + +/* TPC0_QM_PQ_RD_RATE_LIM_EN */ +#define TPC0_QM_PQ_RD_RATE_LIM_EN_VAL_SHIFT 0 +#define TPC0_QM_PQ_RD_RATE_LIM_EN_VAL_MASK 0x1 + +/* TPC0_QM_PQ_RD_RATE_LIM_RST_TOKEN */ +#define TPC0_QM_PQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0 +#define TPC0_QM_PQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF + +/* TPC0_QM_PQ_RD_RATE_LIM_SAT */ +#define TPC0_QM_PQ_RD_RATE_LIM_SAT_VAL_SHIFT 0 +#define TPC0_QM_PQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF + +/* TPC0_QM_PQ_RD_RATE_LIM_TOUT */ +#define TPC0_QM_PQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0 +#define TPC0_QM_PQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF + +/* TPC0_QM_CQ_CFG0 */ +#define TPC0_QM_CQ_CFG0_RESERVED_SHIFT 0 +#define TPC0_QM_CQ_CFG0_RESERVED_MASK 0x1 + +/* TPC0_QM_CQ_CFG1 */ +#define TPC0_QM_CQ_CFG1_CREDIT_LIM_SHIFT 0 +#define TPC0_QM_CQ_CFG1_CREDIT_LIM_MASK 0xFFFF +#define TPC0_QM_CQ_CFG1_MAX_INFLIGHT_SHIFT 16 +#define TPC0_QM_CQ_CFG1_MAX_INFLIGHT_MASK 0xFFFF0000 + +/* TPC0_QM_CQ_ARUSER */ +#define TPC0_QM_CQ_ARUSER_NOSNOOP_SHIFT 0 +#define TPC0_QM_CQ_ARUSER_NOSNOOP_MASK 0x1 +#define TPC0_QM_CQ_ARUSER_WORD_SHIFT 1 +#define TPC0_QM_CQ_ARUSER_WORD_MASK 0x2 + +/* TPC0_QM_CQ_PTR_LO */ +#define TPC0_QM_CQ_PTR_LO_VAL_SHIFT 0 +#define TPC0_QM_CQ_PTR_LO_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CQ_PTR_HI */ +#define TPC0_QM_CQ_PTR_HI_VAL_SHIFT 0 +#define TPC0_QM_CQ_PTR_HI_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CQ_TSIZE */ +#define TPC0_QM_CQ_TSIZE_VAL_SHIFT 0 +#define TPC0_QM_CQ_TSIZE_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CQ_CTL */ +#define TPC0_QM_CQ_CTL_RPT_SHIFT 0 +#define TPC0_QM_CQ_CTL_RPT_MASK 0xFFFF +#define TPC0_QM_CQ_CTL_CTL_SHIFT 16 +#define TPC0_QM_CQ_CTL_CTL_MASK 0xFFFF0000 + +/* TPC0_QM_CQ_PTR_LO_STS */ +#define TPC0_QM_CQ_PTR_LO_STS_VAL_SHIFT 0 +#define TPC0_QM_CQ_PTR_LO_STS_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CQ_PTR_HI_STS */ +#define TPC0_QM_CQ_PTR_HI_STS_VAL_SHIFT 0 +#define TPC0_QM_CQ_PTR_HI_STS_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CQ_TSIZE_STS */ +#define TPC0_QM_CQ_TSIZE_STS_VAL_SHIFT 0 +#define TPC0_QM_CQ_TSIZE_STS_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CQ_CTL_STS */ +#define TPC0_QM_CQ_CTL_STS_RPT_SHIFT 0 +#define TPC0_QM_CQ_CTL_STS_RPT_MASK 0xFFFF +#define TPC0_QM_CQ_CTL_STS_CTL_SHIFT 16 +#define TPC0_QM_CQ_CTL_STS_CTL_MASK 0xFFFF0000 + +/* TPC0_QM_CQ_STS0 */ +#define TPC0_QM_CQ_STS0_CQ_CREDIT_CNT_SHIFT 0 +#define TPC0_QM_CQ_STS0_CQ_CREDIT_CNT_MASK 0xFFFF +#define TPC0_QM_CQ_STS0_CQ_FREE_CNT_SHIFT 16 +#define TPC0_QM_CQ_STS0_CQ_FREE_CNT_MASK 0xFFFF0000 + +/* TPC0_QM_CQ_STS1 */ +#define TPC0_QM_CQ_STS1_CQ_INFLIGHT_CNT_SHIFT 0 +#define TPC0_QM_CQ_STS1_CQ_INFLIGHT_CNT_MASK 0xFFFF +#define TPC0_QM_CQ_STS1_CQ_BUF_EMPTY_SHIFT 30 +#define TPC0_QM_CQ_STS1_CQ_BUF_EMPTY_MASK 0x40000000 +#define TPC0_QM_CQ_STS1_CQ_BUSY_SHIFT 31 +#define TPC0_QM_CQ_STS1_CQ_BUSY_MASK 0x80000000 + +/* TPC0_QM_CQ_RD_RATE_LIM_EN */ +#define TPC0_QM_CQ_RD_RATE_LIM_EN_VAL_SHIFT 0 +#define TPC0_QM_CQ_RD_RATE_LIM_EN_VAL_MASK 0x1 + +/* TPC0_QM_CQ_RD_RATE_LIM_RST_TOKEN */ +#define TPC0_QM_CQ_RD_RATE_LIM_RST_TOKEN_VAL_SHIFT 0 +#define TPC0_QM_CQ_RD_RATE_LIM_RST_TOKEN_VAL_MASK 0xFFFF + +/* TPC0_QM_CQ_RD_RATE_LIM_SAT */ +#define TPC0_QM_CQ_RD_RATE_LIM_SAT_VAL_SHIFT 0 +#define TPC0_QM_CQ_RD_RATE_LIM_SAT_VAL_MASK 0xFFFF + +/* TPC0_QM_CQ_RD_RATE_LIM_TOUT */ +#define TPC0_QM_CQ_RD_RATE_LIM_TOUT_VAL_SHIFT 0 +#define TPC0_QM_CQ_RD_RATE_LIM_TOUT_VAL_MASK 0x7FFFFFFF + +/* TPC0_QM_CQ_IFIFO_CNT */ +#define TPC0_QM_CQ_IFIFO_CNT_VAL_SHIFT 0 +#define TPC0_QM_CQ_IFIFO_CNT_VAL_MASK 0x3 + +/* TPC0_QM_CP_MSG_BASE0_ADDR_LO */ +#define TPC0_QM_CP_MSG_BASE0_ADDR_LO_VAL_SHIFT 0 +#define TPC0_QM_CP_MSG_BASE0_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_MSG_BASE0_ADDR_HI */ +#define TPC0_QM_CP_MSG_BASE0_ADDR_HI_VAL_SHIFT 0 +#define TPC0_QM_CP_MSG_BASE0_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_MSG_BASE1_ADDR_LO */ +#define TPC0_QM_CP_MSG_BASE1_ADDR_LO_VAL_SHIFT 0 +#define TPC0_QM_CP_MSG_BASE1_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_MSG_BASE1_ADDR_HI */ +#define TPC0_QM_CP_MSG_BASE1_ADDR_HI_VAL_SHIFT 0 +#define TPC0_QM_CP_MSG_BASE1_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_MSG_BASE2_ADDR_LO */ +#define TPC0_QM_CP_MSG_BASE2_ADDR_LO_VAL_SHIFT 0 +#define TPC0_QM_CP_MSG_BASE2_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_MSG_BASE2_ADDR_HI */ +#define TPC0_QM_CP_MSG_BASE2_ADDR_HI_VAL_SHIFT 0 +#define TPC0_QM_CP_MSG_BASE2_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_MSG_BASE3_ADDR_LO */ +#define TPC0_QM_CP_MSG_BASE3_ADDR_LO_VAL_SHIFT 0 +#define TPC0_QM_CP_MSG_BASE3_ADDR_LO_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_MSG_BASE3_ADDR_HI */ +#define TPC0_QM_CP_MSG_BASE3_ADDR_HI_VAL_SHIFT 0 +#define TPC0_QM_CP_MSG_BASE3_ADDR_HI_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_LDMA_TSIZE_OFFSET */ +#define TPC0_QM_CP_LDMA_TSIZE_OFFSET_VAL_SHIFT 0 +#define TPC0_QM_CP_LDMA_TSIZE_OFFSET_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET */ +#define TPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_SHIFT 0 +#define TPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET */ +#define TPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_SHIFT 0 +#define TPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET */ +#define TPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_SHIFT 0 +#define TPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_LDMA_DST_BASE_HI_OFFSET */ +#define TPC0_QM_CP_LDMA_DST_BASE_HI_OFFSET_VAL_SHIFT 0 +#define TPC0_QM_CP_LDMA_DST_BASE_HI_OFFSET_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_LDMA_COMMIT_OFFSET */ +#define TPC0_QM_CP_LDMA_COMMIT_OFFSET_VAL_SHIFT 0 +#define TPC0_QM_CP_LDMA_COMMIT_OFFSET_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_FENCE0_RDATA */ +#define TPC0_QM_CP_FENCE0_RDATA_INC_VAL_SHIFT 0 +#define TPC0_QM_CP_FENCE0_RDATA_INC_VAL_MASK 0xF + +/* TPC0_QM_CP_FENCE1_RDATA */ +#define TPC0_QM_CP_FENCE1_RDATA_INC_VAL_SHIFT 0 +#define TPC0_QM_CP_FENCE1_RDATA_INC_VAL_MASK 0xF + +/* TPC0_QM_CP_FENCE2_RDATA */ +#define TPC0_QM_CP_FENCE2_RDATA_INC_VAL_SHIFT 0 +#define TPC0_QM_CP_FENCE2_RDATA_INC_VAL_MASK 0xF + +/* TPC0_QM_CP_FENCE3_RDATA */ +#define TPC0_QM_CP_FENCE3_RDATA_INC_VAL_SHIFT 0 +#define TPC0_QM_CP_FENCE3_RDATA_INC_VAL_MASK 0xF + +/* TPC0_QM_CP_FENCE0_CNT */ +#define TPC0_QM_CP_FENCE0_CNT_VAL_SHIFT 0 +#define TPC0_QM_CP_FENCE0_CNT_VAL_MASK 0xFF + +/* TPC0_QM_CP_FENCE1_CNT */ +#define TPC0_QM_CP_FENCE1_CNT_VAL_SHIFT 0 +#define TPC0_QM_CP_FENCE1_CNT_VAL_MASK 0xFF + +/* TPC0_QM_CP_FENCE2_CNT */ +#define TPC0_QM_CP_FENCE2_CNT_VAL_SHIFT 0 +#define TPC0_QM_CP_FENCE2_CNT_VAL_MASK 0xFF + +/* TPC0_QM_CP_FENCE3_CNT */ +#define TPC0_QM_CP_FENCE3_CNT_VAL_SHIFT 0 +#define TPC0_QM_CP_FENCE3_CNT_VAL_MASK 0xFF + +/* TPC0_QM_CP_STS */ +#define TPC0_QM_CP_STS_MSG_INFLIGHT_CNT_SHIFT 0 +#define TPC0_QM_CP_STS_MSG_INFLIGHT_CNT_MASK 0xFFFF +#define TPC0_QM_CP_STS_ERDY_SHIFT 16 +#define TPC0_QM_CP_STS_ERDY_MASK 0x10000 +#define TPC0_QM_CP_STS_RRDY_SHIFT 17 +#define TPC0_QM_CP_STS_RRDY_MASK 0x20000 +#define TPC0_QM_CP_STS_MRDY_SHIFT 18 +#define TPC0_QM_CP_STS_MRDY_MASK 0x40000 +#define TPC0_QM_CP_STS_SW_STOP_SHIFT 19 +#define TPC0_QM_CP_STS_SW_STOP_MASK 0x80000 +#define TPC0_QM_CP_STS_FENCE_ID_SHIFT 20 +#define TPC0_QM_CP_STS_FENCE_ID_MASK 0x300000 +#define TPC0_QM_CP_STS_FENCE_IN_PROGRESS_SHIFT 22 +#define TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK 0x400000 + +/* TPC0_QM_CP_CURRENT_INST_LO */ +#define TPC0_QM_CP_CURRENT_INST_LO_VAL_SHIFT 0 +#define TPC0_QM_CP_CURRENT_INST_LO_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_CURRENT_INST_HI */ +#define TPC0_QM_CP_CURRENT_INST_HI_VAL_SHIFT 0 +#define TPC0_QM_CP_CURRENT_INST_HI_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CP_BARRIER_CFG */ +#define TPC0_QM_CP_BARRIER_CFG_EBGUARD_SHIFT 0 +#define TPC0_QM_CP_BARRIER_CFG_EBGUARD_MASK 0xFFF + +/* TPC0_QM_CP_DBG_0 */ +#define TPC0_QM_CP_DBG_0_VAL_SHIFT 0 +#define TPC0_QM_CP_DBG_0_VAL_MASK 0xFF + +/* TPC0_QM_PQ_BUF_ADDR */ +#define TPC0_QM_PQ_BUF_ADDR_VAL_SHIFT 0 +#define TPC0_QM_PQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_PQ_BUF_RDATA */ +#define TPC0_QM_PQ_BUF_RDATA_VAL_SHIFT 0 +#define TPC0_QM_PQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CQ_BUF_ADDR */ +#define TPC0_QM_CQ_BUF_ADDR_VAL_SHIFT 0 +#define TPC0_QM_CQ_BUF_ADDR_VAL_MASK 0xFFFFFFFF + +/* TPC0_QM_CQ_BUF_RDATA */ +#define TPC0_QM_CQ_BUF_RDATA_VAL_SHIFT 0 +#define TPC0_QM_CQ_BUF_RDATA_VAL_MASK 0xFFFFFFFF + +#endif /* ASIC_REG_TPC0_QM_MASKS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h new file mode 100644 index 000000000000..7552d4ba61fe --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC0_QM_REGS_H_ +#define ASIC_REG_TPC0_QM_REGS_H_ + +/* + ***************************************** + * TPC0_QM (Prototype: QMAN) + ***************************************** + */ + +#define mmTPC0_QM_GLBL_CFG0 0xE08000 + +#define mmTPC0_QM_GLBL_CFG1 0xE08004 + +#define mmTPC0_QM_GLBL_PROT 0xE08008 + +#define mmTPC0_QM_GLBL_ERR_CFG 0xE0800C + +#define mmTPC0_QM_GLBL_ERR_ADDR_LO 0xE08010 + +#define mmTPC0_QM_GLBL_ERR_ADDR_HI 0xE08014 + +#define mmTPC0_QM_GLBL_ERR_WDATA 0xE08018 + +#define mmTPC0_QM_GLBL_SECURE_PROPS 0xE0801C + +#define mmTPC0_QM_GLBL_NON_SECURE_PROPS 0xE08020 + +#define mmTPC0_QM_GLBL_STS0 0xE08024 + +#define mmTPC0_QM_GLBL_STS1 0xE08028 + +#define mmTPC0_QM_PQ_BASE_LO 0xE08060 + +#define mmTPC0_QM_PQ_BASE_HI 0xE08064 + +#define mmTPC0_QM_PQ_SIZE 0xE08068 + +#define mmTPC0_QM_PQ_PI 0xE0806C + +#define mmTPC0_QM_PQ_CI 0xE08070 + +#define mmTPC0_QM_PQ_CFG0 0xE08074 + +#define mmTPC0_QM_PQ_CFG1 0xE08078 + +#define mmTPC0_QM_PQ_ARUSER 0xE0807C + +#define mmTPC0_QM_PQ_PUSH0 0xE08080 + +#define mmTPC0_QM_PQ_PUSH1 0xE08084 + +#define mmTPC0_QM_PQ_PUSH2 0xE08088 + +#define mmTPC0_QM_PQ_PUSH3 0xE0808C + +#define mmTPC0_QM_PQ_STS0 0xE08090 + +#define mmTPC0_QM_PQ_STS1 0xE08094 + +#define mmTPC0_QM_PQ_RD_RATE_LIM_EN 0xE080A0 + +#define mmTPC0_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xE080A4 + +#define mmTPC0_QM_PQ_RD_RATE_LIM_SAT 0xE080A8 + +#define mmTPC0_QM_PQ_RD_RATE_LIM_TOUT 0xE080AC + +#define mmTPC0_QM_CQ_CFG0 0xE080B0 + +#define mmTPC0_QM_CQ_CFG1 0xE080B4 + +#define mmTPC0_QM_CQ_ARUSER 0xE080B8 + +#define mmTPC0_QM_CQ_PTR_LO 0xE080C0 + +#define mmTPC0_QM_CQ_PTR_HI 0xE080C4 + +#define mmTPC0_QM_CQ_TSIZE 0xE080C8 + +#define mmTPC0_QM_CQ_CTL 0xE080CC + +#define mmTPC0_QM_CQ_PTR_LO_STS 0xE080D4 + +#define mmTPC0_QM_CQ_PTR_HI_STS 0xE080D8 + +#define mmTPC0_QM_CQ_TSIZE_STS 0xE080DC + +#define mmTPC0_QM_CQ_CTL_STS 0xE080E0 + +#define mmTPC0_QM_CQ_STS0 0xE080E4 + +#define mmTPC0_QM_CQ_STS1 0xE080E8 + +#define mmTPC0_QM_CQ_RD_RATE_LIM_EN 0xE080F0 + +#define mmTPC0_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xE080F4 + +#define mmTPC0_QM_CQ_RD_RATE_LIM_SAT 0xE080F8 + +#define mmTPC0_QM_CQ_RD_RATE_LIM_TOUT 0xE080FC + +#define mmTPC0_QM_CQ_IFIFO_CNT 0xE08108 + +#define mmTPC0_QM_CP_MSG_BASE0_ADDR_LO 0xE08120 + +#define mmTPC0_QM_CP_MSG_BASE0_ADDR_HI 0xE08124 + +#define mmTPC0_QM_CP_MSG_BASE1_ADDR_LO 0xE08128 + +#define mmTPC0_QM_CP_MSG_BASE1_ADDR_HI 0xE0812C + +#define mmTPC0_QM_CP_MSG_BASE2_ADDR_LO 0xE08130 + +#define mmTPC0_QM_CP_MSG_BASE2_ADDR_HI 0xE08134 + +#define mmTPC0_QM_CP_MSG_BASE3_ADDR_LO 0xE08138 + +#define mmTPC0_QM_CP_MSG_BASE3_ADDR_HI 0xE0813C + +#define mmTPC0_QM_CP_LDMA_TSIZE_OFFSET 0xE08140 + +#define mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xE08144 + +#define mmTPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xE08148 + +#define mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xE0814C + +#define mmTPC0_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xE08150 + +#define mmTPC0_QM_CP_LDMA_COMMIT_OFFSET 0xE08154 + +#define mmTPC0_QM_CP_FENCE0_RDATA 0xE08158 + +#define mmTPC0_QM_CP_FENCE1_RDATA 0xE0815C + +#define mmTPC0_QM_CP_FENCE2_RDATA 0xE08160 + +#define mmTPC0_QM_CP_FENCE3_RDATA 0xE08164 + +#define mmTPC0_QM_CP_FENCE0_CNT 0xE08168 + +#define mmTPC0_QM_CP_FENCE1_CNT 0xE0816C + +#define mmTPC0_QM_CP_FENCE2_CNT 0xE08170 + +#define mmTPC0_QM_CP_FENCE3_CNT 0xE08174 + +#define mmTPC0_QM_CP_STS 0xE08178 + +#define mmTPC0_QM_CP_CURRENT_INST_LO 0xE0817C + +#define mmTPC0_QM_CP_CURRENT_INST_HI 0xE08180 + +#define mmTPC0_QM_CP_BARRIER_CFG 0xE08184 + +#define mmTPC0_QM_CP_DBG_0 0xE08188 + +#define mmTPC0_QM_PQ_BUF_ADDR 0xE08300 + +#define mmTPC0_QM_PQ_BUF_RDATA 0xE08304 + +#define mmTPC0_QM_CQ_BUF_ADDR 0xE08308 + +#define mmTPC0_QM_CQ_BUF_RDATA 0xE0830C + +#endif /* ASIC_REG_TPC0_QM_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h new file mode 100644 index 000000000000..19894413474a --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h @@ -0,0 +1,887 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC1_CFG_REGS_H_ +#define ASIC_REG_TPC1_CFG_REGS_H_ + +/* + ***************************************** + * TPC1_CFG (Prototype: TPC) + ***************************************** + */ + +#define mmTPC1_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xE46400 + +#define mmTPC1_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xE46404 + +#define mmTPC1_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xE46408 + +#define mmTPC1_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xE4640C + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xE46410 + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xE46414 + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xE46418 + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xE4641C + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xE46420 + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xE46424 + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xE46428 + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xE4642C + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xE46430 + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xE46434 + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xE46438 + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xE4643C + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xE46440 + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xE46444 + +#define mmTPC1_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xE46448 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xE4644C + +#define mmTPC1_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xE46450 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xE46454 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xE46458 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xE4645C + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xE46460 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xE46464 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xE46468 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xE4646C + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xE46470 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xE46474 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xE46478 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xE4647C + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xE46480 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xE46484 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xE46488 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xE4648C + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xE46490 + +#define mmTPC1_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xE46494 + +#define mmTPC1_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xE46498 + +#define mmTPC1_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xE4649C + +#define mmTPC1_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xE464A0 + +#define mmTPC1_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xE464A4 + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xE464A8 + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xE464AC + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xE464B0 + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xE464B4 + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xE464B8 + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xE464BC + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xE464C0 + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xE464C4 + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xE464C8 + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xE464CC + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xE464D0 + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xE464D4 + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xE464D8 + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xE464DC + +#define mmTPC1_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xE464E0 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xE464E4 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xE464E8 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xE464EC + +#define mmTPC1_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xE464F0 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xE464F4 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xE464F8 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xE464FC + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xE46500 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xE46504 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xE46508 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xE4650C + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xE46510 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xE46514 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xE46518 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xE4651C + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xE46520 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xE46524 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xE46528 + +#define mmTPC1_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xE4652C + +#define mmTPC1_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xE46530 + +#define mmTPC1_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xE46534 + +#define mmTPC1_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xE46538 + +#define mmTPC1_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xE4653C + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xE46540 + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xE46544 + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xE46548 + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xE4654C + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xE46550 + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xE46554 + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xE46558 + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xE4655C + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xE46560 + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xE46564 + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xE46568 + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xE4656C + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xE46570 + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xE46574 + +#define mmTPC1_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xE46578 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xE4657C + +#define mmTPC1_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xE46580 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xE46584 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xE46588 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xE4658C + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xE46590 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xE46594 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xE46598 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xE4659C + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xE465A0 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xE465A4 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xE465A8 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xE465AC + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xE465B0 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xE465B4 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xE465B8 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xE465BC + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xE465C0 + +#define mmTPC1_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xE465C4 + +#define mmTPC1_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xE465C8 + +#define mmTPC1_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xE465CC + +#define mmTPC1_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xE465D0 + +#define mmTPC1_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xE465D4 + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xE465D8 + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xE465DC + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xE465E0 + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xE465E4 + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xE465E8 + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xE465EC + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xE465F0 + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xE465F4 + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xE465F8 + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xE465FC + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xE46600 + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xE46604 + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xE46608 + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xE4660C + +#define mmTPC1_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xE46610 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xE46614 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xE46618 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xE4661C + +#define mmTPC1_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xE46620 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xE46624 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xE46628 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xE4662C + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xE46630 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xE46634 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xE46638 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xE4663C + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xE46640 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xE46644 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xE46648 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xE4664C + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xE46650 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xE46654 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xE46658 + +#define mmTPC1_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xE4665C + +#define mmTPC1_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xE46660 + +#define mmTPC1_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xE46664 + +#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_0 0xE46668 + +#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_0 0xE4666C + +#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_1 0xE46670 + +#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_1 0xE46674 + +#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_2 0xE46678 + +#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_2 0xE4667C + +#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_3 0xE46680 + +#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_3 0xE46684 + +#define mmTPC1_CFG_KERNEL_TID_BASE_DIM_4 0xE46688 + +#define mmTPC1_CFG_KERNEL_TID_SIZE_DIM_4 0xE4668C + +#define mmTPC1_CFG_KERNEL_SRF_0 0xE46690 + +#define mmTPC1_CFG_KERNEL_SRF_1 0xE46694 + +#define mmTPC1_CFG_KERNEL_SRF_2 0xE46698 + +#define mmTPC1_CFG_KERNEL_SRF_3 0xE4669C + +#define mmTPC1_CFG_KERNEL_SRF_4 0xE466A0 + +#define mmTPC1_CFG_KERNEL_SRF_5 0xE466A4 + +#define mmTPC1_CFG_KERNEL_SRF_6 0xE466A8 + +#define mmTPC1_CFG_KERNEL_SRF_7 0xE466AC + +#define mmTPC1_CFG_KERNEL_SRF_8 0xE466B0 + +#define mmTPC1_CFG_KERNEL_SRF_9 0xE466B4 + +#define mmTPC1_CFG_KERNEL_SRF_10 0xE466B8 + +#define mmTPC1_CFG_KERNEL_SRF_11 0xE466BC + +#define mmTPC1_CFG_KERNEL_SRF_12 0xE466C0 + +#define mmTPC1_CFG_KERNEL_SRF_13 0xE466C4 + +#define mmTPC1_CFG_KERNEL_SRF_14 0xE466C8 + +#define mmTPC1_CFG_KERNEL_SRF_15 0xE466CC + +#define mmTPC1_CFG_KERNEL_SRF_16 0xE466D0 + +#define mmTPC1_CFG_KERNEL_SRF_17 0xE466D4 + +#define mmTPC1_CFG_KERNEL_SRF_18 0xE466D8 + +#define mmTPC1_CFG_KERNEL_SRF_19 0xE466DC + +#define mmTPC1_CFG_KERNEL_SRF_20 0xE466E0 + +#define mmTPC1_CFG_KERNEL_SRF_21 0xE466E4 + +#define mmTPC1_CFG_KERNEL_SRF_22 0xE466E8 + +#define mmTPC1_CFG_KERNEL_SRF_23 0xE466EC + +#define mmTPC1_CFG_KERNEL_SRF_24 0xE466F0 + +#define mmTPC1_CFG_KERNEL_SRF_25 0xE466F4 + +#define mmTPC1_CFG_KERNEL_SRF_26 0xE466F8 + +#define mmTPC1_CFG_KERNEL_SRF_27 0xE466FC + +#define mmTPC1_CFG_KERNEL_SRF_28 0xE46700 + +#define mmTPC1_CFG_KERNEL_SRF_29 0xE46704 + +#define mmTPC1_CFG_KERNEL_SRF_30 0xE46708 + +#define mmTPC1_CFG_KERNEL_SRF_31 0xE4670C + +#define mmTPC1_CFG_KERNEL_KERNEL_CONFIG 0xE46710 + +#define mmTPC1_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xE46714 + +#define mmTPC1_CFG_RESERVED_DESC_END 0xE46738 + +#define mmTPC1_CFG_ROUND_CSR 0xE467FC + +#define mmTPC1_CFG_TBUF_BASE_ADDR_LOW 0xE46800 + +#define mmTPC1_CFG_TBUF_BASE_ADDR_HIGH 0xE46804 + +#define mmTPC1_CFG_SEMAPHORE 0xE46808 + +#define mmTPC1_CFG_VFLAGS 0xE4680C + +#define mmTPC1_CFG_SFLAGS 0xE46810 + +#define mmTPC1_CFG_LFSR_POLYNOM 0xE46818 + +#define mmTPC1_CFG_STATUS 0xE4681C + +#define mmTPC1_CFG_CFG_BASE_ADDRESS_HIGH 0xE46820 + +#define mmTPC1_CFG_CFG_SUBTRACT_VALUE 0xE46824 + +#define mmTPC1_CFG_SM_BASE_ADDRESS_LOW 0xE46828 + +#define mmTPC1_CFG_SM_BASE_ADDRESS_HIGH 0xE4682C + +#define mmTPC1_CFG_TPC_CMD 0xE46830 + +#define mmTPC1_CFG_TPC_EXECUTE 0xE46838 + +#define mmTPC1_CFG_TPC_STALL 0xE4683C + +#define mmTPC1_CFG_ICACHE_BASE_ADDERESS_LOW 0xE46840 + +#define mmTPC1_CFG_ICACHE_BASE_ADDERESS_HIGH 0xE46844 + +#define mmTPC1_CFG_MSS_CONFIG 0xE46854 + +#define mmTPC1_CFG_TPC_INTR_CAUSE 0xE46858 + +#define mmTPC1_CFG_TPC_INTR_MASK 0xE4685C + +#define mmTPC1_CFG_TSB_CONFIG 0xE46860 + +#define mmTPC1_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xE46A00 + +#define mmTPC1_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xE46A04 + +#define mmTPC1_CFG_QM_TENSOR_0_PADDING_VALUE 0xE46A08 + +#define mmTPC1_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xE46A0C + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_0_SIZE 0xE46A10 + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xE46A14 + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xE46A18 + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_1_SIZE 0xE46A1C + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xE46A20 + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xE46A24 + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_2_SIZE 0xE46A28 + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xE46A2C + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xE46A30 + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_3_SIZE 0xE46A34 + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xE46A38 + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xE46A3C + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_4_SIZE 0xE46A40 + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xE46A44 + +#define mmTPC1_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xE46A48 + +#define mmTPC1_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xE46A4C + +#define mmTPC1_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xE46A50 + +#define mmTPC1_CFG_QM_TENSOR_1_PADDING_VALUE 0xE46A54 + +#define mmTPC1_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xE46A58 + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_0_SIZE 0xE46A5C + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xE46A60 + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xE46A64 + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_1_SIZE 0xE46A68 + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xE46A6C + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xE46A70 + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_2_SIZE 0xE46A74 + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xE46A78 + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xE46A7C + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_3_SIZE 0xE46A80 + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xE46A84 + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xE46A88 + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_4_SIZE 0xE46A8C + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xE46A90 + +#define mmTPC1_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xE46A94 + +#define mmTPC1_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xE46A98 + +#define mmTPC1_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xE46A9C + +#define mmTPC1_CFG_QM_TENSOR_2_PADDING_VALUE 0xE46AA0 + +#define mmTPC1_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xE46AA4 + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_0_SIZE 0xE46AA8 + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xE46AAC + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xE46AB0 + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_1_SIZE 0xE46AB4 + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xE46AB8 + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xE46ABC + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_2_SIZE 0xE46AC0 + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xE46AC4 + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xE46AC8 + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_3_SIZE 0xE46ACC + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xE46AD0 + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xE46AD4 + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_4_SIZE 0xE46AD8 + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xE46ADC + +#define mmTPC1_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xE46AE0 + +#define mmTPC1_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xE46AE4 + +#define mmTPC1_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xE46AE8 + +#define mmTPC1_CFG_QM_TENSOR_3_PADDING_VALUE 0xE46AEC + +#define mmTPC1_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xE46AF0 + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_0_SIZE 0xE46AF4 + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xE46AF8 + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xE46AFC + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_1_SIZE 0xE46B00 + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xE46B04 + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xE46B08 + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_2_SIZE 0xE46B0C + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xE46B10 + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xE46B14 + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_3_SIZE 0xE46B18 + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xE46B1C + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xE46B20 + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_4_SIZE 0xE46B24 + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xE46B28 + +#define mmTPC1_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xE46B2C + +#define mmTPC1_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xE46B30 + +#define mmTPC1_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xE46B34 + +#define mmTPC1_CFG_QM_TENSOR_4_PADDING_VALUE 0xE46B38 + +#define mmTPC1_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xE46B3C + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_0_SIZE 0xE46B40 + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xE46B44 + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xE46B48 + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_1_SIZE 0xE46B4C + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xE46B50 + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xE46B54 + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_2_SIZE 0xE46B58 + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xE46B5C + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xE46B60 + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_3_SIZE 0xE46B64 + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xE46B68 + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xE46B6C + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_4_SIZE 0xE46B70 + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xE46B74 + +#define mmTPC1_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xE46B78 + +#define mmTPC1_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xE46B7C + +#define mmTPC1_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xE46B80 + +#define mmTPC1_CFG_QM_TENSOR_5_PADDING_VALUE 0xE46B84 + +#define mmTPC1_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xE46B88 + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_0_SIZE 0xE46B8C + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xE46B90 + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xE46B94 + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_1_SIZE 0xE46B98 + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xE46B9C + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xE46BA0 + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_2_SIZE 0xE46BA4 + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xE46BA8 + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xE46BAC + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_3_SIZE 0xE46BB0 + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xE46BB4 + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xE46BB8 + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_4_SIZE 0xE46BBC + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xE46BC0 + +#define mmTPC1_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xE46BC4 + +#define mmTPC1_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xE46BC8 + +#define mmTPC1_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xE46BCC + +#define mmTPC1_CFG_QM_TENSOR_6_PADDING_VALUE 0xE46BD0 + +#define mmTPC1_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xE46BD4 + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_0_SIZE 0xE46BD8 + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xE46BDC + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xE46BE0 + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_1_SIZE 0xE46BE4 + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xE46BE8 + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xE46BEC + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_2_SIZE 0xE46BF0 + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xE46BF4 + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xE46BF8 + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_3_SIZE 0xE46BFC + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xE46C00 + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xE46C04 + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_4_SIZE 0xE46C08 + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xE46C0C + +#define mmTPC1_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xE46C10 + +#define mmTPC1_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xE46C14 + +#define mmTPC1_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xE46C18 + +#define mmTPC1_CFG_QM_TENSOR_7_PADDING_VALUE 0xE46C1C + +#define mmTPC1_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xE46C20 + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_0_SIZE 0xE46C24 + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xE46C28 + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xE46C2C + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_1_SIZE 0xE46C30 + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xE46C34 + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xE46C38 + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_2_SIZE 0xE46C3C + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xE46C40 + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xE46C44 + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_3_SIZE 0xE46C48 + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xE46C4C + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xE46C50 + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_4_SIZE 0xE46C54 + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xE46C58 + +#define mmTPC1_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xE46C5C + +#define mmTPC1_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xE46C60 + +#define mmTPC1_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xE46C64 + +#define mmTPC1_CFG_QM_TID_BASE_DIM_0 0xE46C68 + +#define mmTPC1_CFG_QM_TID_SIZE_DIM_0 0xE46C6C + +#define mmTPC1_CFG_QM_TID_BASE_DIM_1 0xE46C70 + +#define mmTPC1_CFG_QM_TID_SIZE_DIM_1 0xE46C74 + +#define mmTPC1_CFG_QM_TID_BASE_DIM_2 0xE46C78 + +#define mmTPC1_CFG_QM_TID_SIZE_DIM_2 0xE46C7C + +#define mmTPC1_CFG_QM_TID_BASE_DIM_3 0xE46C80 + +#define mmTPC1_CFG_QM_TID_SIZE_DIM_3 0xE46C84 + +#define mmTPC1_CFG_QM_TID_BASE_DIM_4 0xE46C88 + +#define mmTPC1_CFG_QM_TID_SIZE_DIM_4 0xE46C8C + +#define mmTPC1_CFG_QM_SRF_0 0xE46C90 + +#define mmTPC1_CFG_QM_SRF_1 0xE46C94 + +#define mmTPC1_CFG_QM_SRF_2 0xE46C98 + +#define mmTPC1_CFG_QM_SRF_3 0xE46C9C + +#define mmTPC1_CFG_QM_SRF_4 0xE46CA0 + +#define mmTPC1_CFG_QM_SRF_5 0xE46CA4 + +#define mmTPC1_CFG_QM_SRF_6 0xE46CA8 + +#define mmTPC1_CFG_QM_SRF_7 0xE46CAC + +#define mmTPC1_CFG_QM_SRF_8 0xE46CB0 + +#define mmTPC1_CFG_QM_SRF_9 0xE46CB4 + +#define mmTPC1_CFG_QM_SRF_10 0xE46CB8 + +#define mmTPC1_CFG_QM_SRF_11 0xE46CBC + +#define mmTPC1_CFG_QM_SRF_12 0xE46CC0 + +#define mmTPC1_CFG_QM_SRF_13 0xE46CC4 + +#define mmTPC1_CFG_QM_SRF_14 0xE46CC8 + +#define mmTPC1_CFG_QM_SRF_15 0xE46CCC + +#define mmTPC1_CFG_QM_SRF_16 0xE46CD0 + +#define mmTPC1_CFG_QM_SRF_17 0xE46CD4 + +#define mmTPC1_CFG_QM_SRF_18 0xE46CD8 + +#define mmTPC1_CFG_QM_SRF_19 0xE46CDC + +#define mmTPC1_CFG_QM_SRF_20 0xE46CE0 + +#define mmTPC1_CFG_QM_SRF_21 0xE46CE4 + +#define mmTPC1_CFG_QM_SRF_22 0xE46CE8 + +#define mmTPC1_CFG_QM_SRF_23 0xE46CEC + +#define mmTPC1_CFG_QM_SRF_24 0xE46CF0 + +#define mmTPC1_CFG_QM_SRF_25 0xE46CF4 + +#define mmTPC1_CFG_QM_SRF_26 0xE46CF8 + +#define mmTPC1_CFG_QM_SRF_27 0xE46CFC + +#define mmTPC1_CFG_QM_SRF_28 0xE46D00 + +#define mmTPC1_CFG_QM_SRF_29 0xE46D04 + +#define mmTPC1_CFG_QM_SRF_30 0xE46D08 + +#define mmTPC1_CFG_QM_SRF_31 0xE46D0C + +#define mmTPC1_CFG_QM_KERNEL_CONFIG 0xE46D10 + +#define mmTPC1_CFG_QM_SYNC_OBJECT_MESSAGE 0xE46D14 + +#define mmTPC1_CFG_ARUSER 0xE46D18 + +#define mmTPC1_CFG_AWUSER 0xE46D1C + +#define mmTPC1_CFG_FUNC_MBIST_CNTRL 0xE46E00 + +#define mmTPC1_CFG_FUNC_MBIST_PAT 0xE46E04 + +#define mmTPC1_CFG_FUNC_MBIST_MEM_0 0xE46E08 + +#define mmTPC1_CFG_FUNC_MBIST_MEM_1 0xE46E0C + +#define mmTPC1_CFG_FUNC_MBIST_MEM_2 0xE46E10 + +#define mmTPC1_CFG_FUNC_MBIST_MEM_3 0xE46E14 + +#define mmTPC1_CFG_FUNC_MBIST_MEM_4 0xE46E18 + +#define mmTPC1_CFG_FUNC_MBIST_MEM_5 0xE46E1C + +#define mmTPC1_CFG_FUNC_MBIST_MEM_6 0xE46E20 + +#define mmTPC1_CFG_FUNC_MBIST_MEM_7 0xE46E24 + +#define mmTPC1_CFG_FUNC_MBIST_MEM_8 0xE46E28 + +#define mmTPC1_CFG_FUNC_MBIST_MEM_9 0xE46E2C + +#endif /* ASIC_REG_TPC1_CFG_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h new file mode 100644 index 000000000000..9099ebd7ab23 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC1_CMDQ_REGS_H_ +#define ASIC_REG_TPC1_CMDQ_REGS_H_ + +/* + ***************************************** + * TPC1_CMDQ (Prototype: CMDQ) + ***************************************** + */ + +#define mmTPC1_CMDQ_GLBL_CFG0 0xE49000 + +#define mmTPC1_CMDQ_GLBL_CFG1 0xE49004 + +#define mmTPC1_CMDQ_GLBL_PROT 0xE49008 + +#define mmTPC1_CMDQ_GLBL_ERR_CFG 0xE4900C + +#define mmTPC1_CMDQ_GLBL_ERR_ADDR_LO 0xE49010 + +#define mmTPC1_CMDQ_GLBL_ERR_ADDR_HI 0xE49014 + +#define mmTPC1_CMDQ_GLBL_ERR_WDATA 0xE49018 + +#define mmTPC1_CMDQ_GLBL_SECURE_PROPS 0xE4901C + +#define mmTPC1_CMDQ_GLBL_NON_SECURE_PROPS 0xE49020 + +#define mmTPC1_CMDQ_GLBL_STS0 0xE49024 + +#define mmTPC1_CMDQ_GLBL_STS1 0xE49028 + +#define mmTPC1_CMDQ_CQ_CFG0 0xE490B0 + +#define mmTPC1_CMDQ_CQ_CFG1 0xE490B4 + +#define mmTPC1_CMDQ_CQ_ARUSER 0xE490B8 + +#define mmTPC1_CMDQ_CQ_PTR_LO 0xE490C0 + +#define mmTPC1_CMDQ_CQ_PTR_HI 0xE490C4 + +#define mmTPC1_CMDQ_CQ_TSIZE 0xE490C8 + +#define mmTPC1_CMDQ_CQ_CTL 0xE490CC + +#define mmTPC1_CMDQ_CQ_PTR_LO_STS 0xE490D4 + +#define mmTPC1_CMDQ_CQ_PTR_HI_STS 0xE490D8 + +#define mmTPC1_CMDQ_CQ_TSIZE_STS 0xE490DC + +#define mmTPC1_CMDQ_CQ_CTL_STS 0xE490E0 + +#define mmTPC1_CMDQ_CQ_STS0 0xE490E4 + +#define mmTPC1_CMDQ_CQ_STS1 0xE490E8 + +#define mmTPC1_CMDQ_CQ_RD_RATE_LIM_EN 0xE490F0 + +#define mmTPC1_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xE490F4 + +#define mmTPC1_CMDQ_CQ_RD_RATE_LIM_SAT 0xE490F8 + +#define mmTPC1_CMDQ_CQ_RD_RATE_LIM_TOUT 0xE490FC + +#define mmTPC1_CMDQ_CQ_IFIFO_CNT 0xE49108 + +#define mmTPC1_CMDQ_CP_MSG_BASE0_ADDR_LO 0xE49120 + +#define mmTPC1_CMDQ_CP_MSG_BASE0_ADDR_HI 0xE49124 + +#define mmTPC1_CMDQ_CP_MSG_BASE1_ADDR_LO 0xE49128 + +#define mmTPC1_CMDQ_CP_MSG_BASE1_ADDR_HI 0xE4912C + +#define mmTPC1_CMDQ_CP_MSG_BASE2_ADDR_LO 0xE49130 + +#define mmTPC1_CMDQ_CP_MSG_BASE2_ADDR_HI 0xE49134 + +#define mmTPC1_CMDQ_CP_MSG_BASE3_ADDR_LO 0xE49138 + +#define mmTPC1_CMDQ_CP_MSG_BASE3_ADDR_HI 0xE4913C + +#define mmTPC1_CMDQ_CP_LDMA_TSIZE_OFFSET 0xE49140 + +#define mmTPC1_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xE49144 + +#define mmTPC1_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xE49148 + +#define mmTPC1_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xE4914C + +#define mmTPC1_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xE49150 + +#define mmTPC1_CMDQ_CP_LDMA_COMMIT_OFFSET 0xE49154 + +#define mmTPC1_CMDQ_CP_FENCE0_RDATA 0xE49158 + +#define mmTPC1_CMDQ_CP_FENCE1_RDATA 0xE4915C + +#define mmTPC1_CMDQ_CP_FENCE2_RDATA 0xE49160 + +#define mmTPC1_CMDQ_CP_FENCE3_RDATA 0xE49164 + +#define mmTPC1_CMDQ_CP_FENCE0_CNT 0xE49168 + +#define mmTPC1_CMDQ_CP_FENCE1_CNT 0xE4916C + +#define mmTPC1_CMDQ_CP_FENCE2_CNT 0xE49170 + +#define mmTPC1_CMDQ_CP_FENCE3_CNT 0xE49174 + +#define mmTPC1_CMDQ_CP_STS 0xE49178 + +#define mmTPC1_CMDQ_CP_CURRENT_INST_LO 0xE4917C + +#define mmTPC1_CMDQ_CP_CURRENT_INST_HI 0xE49180 + +#define mmTPC1_CMDQ_CP_BARRIER_CFG 0xE49184 + +#define mmTPC1_CMDQ_CP_DBG_0 0xE49188 + +#define mmTPC1_CMDQ_CQ_BUF_ADDR 0xE49308 + +#define mmTPC1_CMDQ_CQ_BUF_RDATA 0xE4930C + +#endif /* ASIC_REG_TPC1_CMDQ_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h new file mode 100644 index 000000000000..bc8b9a10391f --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC1_QM_REGS_H_ +#define ASIC_REG_TPC1_QM_REGS_H_ + +/* + ***************************************** + * TPC1_QM (Prototype: QMAN) + ***************************************** + */ + +#define mmTPC1_QM_GLBL_CFG0 0xE48000 + +#define mmTPC1_QM_GLBL_CFG1 0xE48004 + +#define mmTPC1_QM_GLBL_PROT 0xE48008 + +#define mmTPC1_QM_GLBL_ERR_CFG 0xE4800C + +#define mmTPC1_QM_GLBL_ERR_ADDR_LO 0xE48010 + +#define mmTPC1_QM_GLBL_ERR_ADDR_HI 0xE48014 + +#define mmTPC1_QM_GLBL_ERR_WDATA 0xE48018 + +#define mmTPC1_QM_GLBL_SECURE_PROPS 0xE4801C + +#define mmTPC1_QM_GLBL_NON_SECURE_PROPS 0xE48020 + +#define mmTPC1_QM_GLBL_STS0 0xE48024 + +#define mmTPC1_QM_GLBL_STS1 0xE48028 + +#define mmTPC1_QM_PQ_BASE_LO 0xE48060 + +#define mmTPC1_QM_PQ_BASE_HI 0xE48064 + +#define mmTPC1_QM_PQ_SIZE 0xE48068 + +#define mmTPC1_QM_PQ_PI 0xE4806C + +#define mmTPC1_QM_PQ_CI 0xE48070 + +#define mmTPC1_QM_PQ_CFG0 0xE48074 + +#define mmTPC1_QM_PQ_CFG1 0xE48078 + +#define mmTPC1_QM_PQ_ARUSER 0xE4807C + +#define mmTPC1_QM_PQ_PUSH0 0xE48080 + +#define mmTPC1_QM_PQ_PUSH1 0xE48084 + +#define mmTPC1_QM_PQ_PUSH2 0xE48088 + +#define mmTPC1_QM_PQ_PUSH3 0xE4808C + +#define mmTPC1_QM_PQ_STS0 0xE48090 + +#define mmTPC1_QM_PQ_STS1 0xE48094 + +#define mmTPC1_QM_PQ_RD_RATE_LIM_EN 0xE480A0 + +#define mmTPC1_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xE480A4 + +#define mmTPC1_QM_PQ_RD_RATE_LIM_SAT 0xE480A8 + +#define mmTPC1_QM_PQ_RD_RATE_LIM_TOUT 0xE480AC + +#define mmTPC1_QM_CQ_CFG0 0xE480B0 + +#define mmTPC1_QM_CQ_CFG1 0xE480B4 + +#define mmTPC1_QM_CQ_ARUSER 0xE480B8 + +#define mmTPC1_QM_CQ_PTR_LO 0xE480C0 + +#define mmTPC1_QM_CQ_PTR_HI 0xE480C4 + +#define mmTPC1_QM_CQ_TSIZE 0xE480C8 + +#define mmTPC1_QM_CQ_CTL 0xE480CC + +#define mmTPC1_QM_CQ_PTR_LO_STS 0xE480D4 + +#define mmTPC1_QM_CQ_PTR_HI_STS 0xE480D8 + +#define mmTPC1_QM_CQ_TSIZE_STS 0xE480DC + +#define mmTPC1_QM_CQ_CTL_STS 0xE480E0 + +#define mmTPC1_QM_CQ_STS0 0xE480E4 + +#define mmTPC1_QM_CQ_STS1 0xE480E8 + +#define mmTPC1_QM_CQ_RD_RATE_LIM_EN 0xE480F0 + +#define mmTPC1_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xE480F4 + +#define mmTPC1_QM_CQ_RD_RATE_LIM_SAT 0xE480F8 + +#define mmTPC1_QM_CQ_RD_RATE_LIM_TOUT 0xE480FC + +#define mmTPC1_QM_CQ_IFIFO_CNT 0xE48108 + +#define mmTPC1_QM_CP_MSG_BASE0_ADDR_LO 0xE48120 + +#define mmTPC1_QM_CP_MSG_BASE0_ADDR_HI 0xE48124 + +#define mmTPC1_QM_CP_MSG_BASE1_ADDR_LO 0xE48128 + +#define mmTPC1_QM_CP_MSG_BASE1_ADDR_HI 0xE4812C + +#define mmTPC1_QM_CP_MSG_BASE2_ADDR_LO 0xE48130 + +#define mmTPC1_QM_CP_MSG_BASE2_ADDR_HI 0xE48134 + +#define mmTPC1_QM_CP_MSG_BASE3_ADDR_LO 0xE48138 + +#define mmTPC1_QM_CP_MSG_BASE3_ADDR_HI 0xE4813C + +#define mmTPC1_QM_CP_LDMA_TSIZE_OFFSET 0xE48140 + +#define mmTPC1_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xE48144 + +#define mmTPC1_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xE48148 + +#define mmTPC1_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xE4814C + +#define mmTPC1_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xE48150 + +#define mmTPC1_QM_CP_LDMA_COMMIT_OFFSET 0xE48154 + +#define mmTPC1_QM_CP_FENCE0_RDATA 0xE48158 + +#define mmTPC1_QM_CP_FENCE1_RDATA 0xE4815C + +#define mmTPC1_QM_CP_FENCE2_RDATA 0xE48160 + +#define mmTPC1_QM_CP_FENCE3_RDATA 0xE48164 + +#define mmTPC1_QM_CP_FENCE0_CNT 0xE48168 + +#define mmTPC1_QM_CP_FENCE1_CNT 0xE4816C + +#define mmTPC1_QM_CP_FENCE2_CNT 0xE48170 + +#define mmTPC1_QM_CP_FENCE3_CNT 0xE48174 + +#define mmTPC1_QM_CP_STS 0xE48178 + +#define mmTPC1_QM_CP_CURRENT_INST_LO 0xE4817C + +#define mmTPC1_QM_CP_CURRENT_INST_HI 0xE48180 + +#define mmTPC1_QM_CP_BARRIER_CFG 0xE48184 + +#define mmTPC1_QM_CP_DBG_0 0xE48188 + +#define mmTPC1_QM_PQ_BUF_ADDR 0xE48300 + +#define mmTPC1_QM_PQ_BUF_RDATA 0xE48304 + +#define mmTPC1_QM_CQ_BUF_ADDR 0xE48308 + +#define mmTPC1_QM_CQ_BUF_RDATA 0xE4830C + +#endif /* ASIC_REG_TPC1_QM_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h new file mode 100644 index 000000000000..ae267f8f457e --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h @@ -0,0 +1,323 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC1_RTR_REGS_H_ +#define ASIC_REG_TPC1_RTR_REGS_H_ + +/* + ***************************************** + * TPC1_RTR (Prototype: TPC_RTR) + ***************************************** + */ + +#define mmTPC1_RTR_HBW_RD_RQ_E_ARB 0xE40100 + +#define mmTPC1_RTR_HBW_RD_RQ_W_ARB 0xE40104 + +#define mmTPC1_RTR_HBW_RD_RQ_N_ARB 0xE40108 + +#define mmTPC1_RTR_HBW_RD_RQ_S_ARB 0xE4010C + +#define mmTPC1_RTR_HBW_RD_RQ_L_ARB 0xE40110 + +#define mmTPC1_RTR_HBW_E_ARB_MAX 0xE40120 + +#define mmTPC1_RTR_HBW_W_ARB_MAX 0xE40124 + +#define mmTPC1_RTR_HBW_N_ARB_MAX 0xE40128 + +#define mmTPC1_RTR_HBW_S_ARB_MAX 0xE4012C + +#define mmTPC1_RTR_HBW_L_ARB_MAX 0xE40130 + +#define mmTPC1_RTR_HBW_RD_RS_E_ARB 0xE40140 + +#define mmTPC1_RTR_HBW_RD_RS_W_ARB 0xE40144 + +#define mmTPC1_RTR_HBW_RD_RS_N_ARB 0xE40148 + +#define mmTPC1_RTR_HBW_RD_RS_S_ARB 0xE4014C + +#define mmTPC1_RTR_HBW_RD_RS_L_ARB 0xE40150 + +#define mmTPC1_RTR_HBW_WR_RQ_E_ARB 0xE40170 + +#define mmTPC1_RTR_HBW_WR_RQ_W_ARB 0xE40174 + +#define mmTPC1_RTR_HBW_WR_RQ_N_ARB 0xE40178 + +#define mmTPC1_RTR_HBW_WR_RQ_S_ARB 0xE4017C + +#define mmTPC1_RTR_HBW_WR_RQ_L_ARB 0xE40180 + +#define mmTPC1_RTR_HBW_WR_RS_E_ARB 0xE40190 + +#define mmTPC1_RTR_HBW_WR_RS_W_ARB 0xE40194 + +#define mmTPC1_RTR_HBW_WR_RS_N_ARB 0xE40198 + +#define mmTPC1_RTR_HBW_WR_RS_S_ARB 0xE4019C + +#define mmTPC1_RTR_HBW_WR_RS_L_ARB 0xE401A0 + +#define mmTPC1_RTR_LBW_RD_RQ_E_ARB 0xE40200 + +#define mmTPC1_RTR_LBW_RD_RQ_W_ARB 0xE40204 + +#define mmTPC1_RTR_LBW_RD_RQ_N_ARB 0xE40208 + +#define mmTPC1_RTR_LBW_RD_RQ_S_ARB 0xE4020C + +#define mmTPC1_RTR_LBW_RD_RQ_L_ARB 0xE40210 + +#define mmTPC1_RTR_LBW_E_ARB_MAX 0xE40220 + +#define mmTPC1_RTR_LBW_W_ARB_MAX 0xE40224 + +#define mmTPC1_RTR_LBW_N_ARB_MAX 0xE40228 + +#define mmTPC1_RTR_LBW_S_ARB_MAX 0xE4022C + +#define mmTPC1_RTR_LBW_L_ARB_MAX 0xE40230 + +#define mmTPC1_RTR_LBW_RD_RS_E_ARB 0xE40250 + +#define mmTPC1_RTR_LBW_RD_RS_W_ARB 0xE40254 + +#define mmTPC1_RTR_LBW_RD_RS_N_ARB 0xE40258 + +#define mmTPC1_RTR_LBW_RD_RS_S_ARB 0xE4025C + +#define mmTPC1_RTR_LBW_RD_RS_L_ARB 0xE40260 + +#define mmTPC1_RTR_LBW_WR_RQ_E_ARB 0xE40270 + +#define mmTPC1_RTR_LBW_WR_RQ_W_ARB 0xE40274 + +#define mmTPC1_RTR_LBW_WR_RQ_N_ARB 0xE40278 + +#define mmTPC1_RTR_LBW_WR_RQ_S_ARB 0xE4027C + +#define mmTPC1_RTR_LBW_WR_RQ_L_ARB 0xE40280 + +#define mmTPC1_RTR_LBW_WR_RS_E_ARB 0xE40290 + +#define mmTPC1_RTR_LBW_WR_RS_W_ARB 0xE40294 + +#define mmTPC1_RTR_LBW_WR_RS_N_ARB 0xE40298 + +#define mmTPC1_RTR_LBW_WR_RS_S_ARB 0xE4029C + +#define mmTPC1_RTR_LBW_WR_RS_L_ARB 0xE402A0 + +#define mmTPC1_RTR_DBG_E_ARB 0xE40300 + +#define mmTPC1_RTR_DBG_W_ARB 0xE40304 + +#define mmTPC1_RTR_DBG_N_ARB 0xE40308 + +#define mmTPC1_RTR_DBG_S_ARB 0xE4030C + +#define mmTPC1_RTR_DBG_L_ARB 0xE40310 + +#define mmTPC1_RTR_DBG_E_ARB_MAX 0xE40320 + +#define mmTPC1_RTR_DBG_W_ARB_MAX 0xE40324 + +#define mmTPC1_RTR_DBG_N_ARB_MAX 0xE40328 + +#define mmTPC1_RTR_DBG_S_ARB_MAX 0xE4032C + +#define mmTPC1_RTR_DBG_L_ARB_MAX 0xE40330 + +#define mmTPC1_RTR_SPLIT_COEF_0 0xE40400 + +#define mmTPC1_RTR_SPLIT_COEF_1 0xE40404 + +#define mmTPC1_RTR_SPLIT_COEF_2 0xE40408 + +#define mmTPC1_RTR_SPLIT_COEF_3 0xE4040C + +#define mmTPC1_RTR_SPLIT_COEF_4 0xE40410 + +#define mmTPC1_RTR_SPLIT_COEF_5 0xE40414 + +#define mmTPC1_RTR_SPLIT_COEF_6 0xE40418 + +#define mmTPC1_RTR_SPLIT_COEF_7 0xE4041C + +#define mmTPC1_RTR_SPLIT_COEF_8 0xE40420 + +#define mmTPC1_RTR_SPLIT_COEF_9 0xE40424 + +#define mmTPC1_RTR_SPLIT_CFG 0xE40440 + +#define mmTPC1_RTR_SPLIT_RD_SAT 0xE40444 + +#define mmTPC1_RTR_SPLIT_RD_RST_TOKEN 0xE40448 + +#define mmTPC1_RTR_SPLIT_RD_TIMEOUT_0 0xE4044C + +#define mmTPC1_RTR_SPLIT_RD_TIMEOUT_1 0xE40450 + +#define mmTPC1_RTR_SPLIT_WR_SAT 0xE40454 + +#define mmTPC1_RTR_WPLIT_WR_TST_TOLEN 0xE40458 + +#define mmTPC1_RTR_SPLIT_WR_TIMEOUT_0 0xE4045C + +#define mmTPC1_RTR_SPLIT_WR_TIMEOUT_1 0xE40460 + +#define mmTPC1_RTR_HBW_RANGE_HIT 0xE40470 + +#define mmTPC1_RTR_HBW_RANGE_MASK_L_0 0xE40480 + +#define mmTPC1_RTR_HBW_RANGE_MASK_L_1 0xE40484 + +#define mmTPC1_RTR_HBW_RANGE_MASK_L_2 0xE40488 + +#define mmTPC1_RTR_HBW_RANGE_MASK_L_3 0xE4048C + +#define mmTPC1_RTR_HBW_RANGE_MASK_L_4 0xE40490 + +#define mmTPC1_RTR_HBW_RANGE_MASK_L_5 0xE40494 + +#define mmTPC1_RTR_HBW_RANGE_MASK_L_6 0xE40498 + +#define mmTPC1_RTR_HBW_RANGE_MASK_L_7 0xE4049C + +#define mmTPC1_RTR_HBW_RANGE_MASK_H_0 0xE404A0 + +#define mmTPC1_RTR_HBW_RANGE_MASK_H_1 0xE404A4 + +#define mmTPC1_RTR_HBW_RANGE_MASK_H_2 0xE404A8 + +#define mmTPC1_RTR_HBW_RANGE_MASK_H_3 0xE404AC + +#define mmTPC1_RTR_HBW_RANGE_MASK_H_4 0xE404B0 + +#define mmTPC1_RTR_HBW_RANGE_MASK_H_5 0xE404B4 + +#define mmTPC1_RTR_HBW_RANGE_MASK_H_6 0xE404B8 + +#define mmTPC1_RTR_HBW_RANGE_MASK_H_7 0xE404BC + +#define mmTPC1_RTR_HBW_RANGE_BASE_L_0 0xE404C0 + +#define mmTPC1_RTR_HBW_RANGE_BASE_L_1 0xE404C4 + +#define mmTPC1_RTR_HBW_RANGE_BASE_L_2 0xE404C8 + +#define mmTPC1_RTR_HBW_RANGE_BASE_L_3 0xE404CC + +#define mmTPC1_RTR_HBW_RANGE_BASE_L_4 0xE404D0 + +#define mmTPC1_RTR_HBW_RANGE_BASE_L_5 0xE404D4 + +#define mmTPC1_RTR_HBW_RANGE_BASE_L_6 0xE404D8 + +#define mmTPC1_RTR_HBW_RANGE_BASE_L_7 0xE404DC + +#define mmTPC1_RTR_HBW_RANGE_BASE_H_0 0xE404E0 + +#define mmTPC1_RTR_HBW_RANGE_BASE_H_1 0xE404E4 + +#define mmTPC1_RTR_HBW_RANGE_BASE_H_2 0xE404E8 + +#define mmTPC1_RTR_HBW_RANGE_BASE_H_3 0xE404EC + +#define mmTPC1_RTR_HBW_RANGE_BASE_H_4 0xE404F0 + +#define mmTPC1_RTR_HBW_RANGE_BASE_H_5 0xE404F4 + +#define mmTPC1_RTR_HBW_RANGE_BASE_H_6 0xE404F8 + +#define mmTPC1_RTR_HBW_RANGE_BASE_H_7 0xE404FC + +#define mmTPC1_RTR_LBW_RANGE_HIT 0xE40500 + +#define mmTPC1_RTR_LBW_RANGE_MASK_0 0xE40510 + +#define mmTPC1_RTR_LBW_RANGE_MASK_1 0xE40514 + +#define mmTPC1_RTR_LBW_RANGE_MASK_2 0xE40518 + +#define mmTPC1_RTR_LBW_RANGE_MASK_3 0xE4051C + +#define mmTPC1_RTR_LBW_RANGE_MASK_4 0xE40520 + +#define mmTPC1_RTR_LBW_RANGE_MASK_5 0xE40524 + +#define mmTPC1_RTR_LBW_RANGE_MASK_6 0xE40528 + +#define mmTPC1_RTR_LBW_RANGE_MASK_7 0xE4052C + +#define mmTPC1_RTR_LBW_RANGE_MASK_8 0xE40530 + +#define mmTPC1_RTR_LBW_RANGE_MASK_9 0xE40534 + +#define mmTPC1_RTR_LBW_RANGE_MASK_10 0xE40538 + +#define mmTPC1_RTR_LBW_RANGE_MASK_11 0xE4053C + +#define mmTPC1_RTR_LBW_RANGE_MASK_12 0xE40540 + +#define mmTPC1_RTR_LBW_RANGE_MASK_13 0xE40544 + +#define mmTPC1_RTR_LBW_RANGE_MASK_14 0xE40548 + +#define mmTPC1_RTR_LBW_RANGE_MASK_15 0xE4054C + +#define mmTPC1_RTR_LBW_RANGE_BASE_0 0xE40550 + +#define mmTPC1_RTR_LBW_RANGE_BASE_1 0xE40554 + +#define mmTPC1_RTR_LBW_RANGE_BASE_2 0xE40558 + +#define mmTPC1_RTR_LBW_RANGE_BASE_3 0xE4055C + +#define mmTPC1_RTR_LBW_RANGE_BASE_4 0xE40560 + +#define mmTPC1_RTR_LBW_RANGE_BASE_5 0xE40564 + +#define mmTPC1_RTR_LBW_RANGE_BASE_6 0xE40568 + +#define mmTPC1_RTR_LBW_RANGE_BASE_7 0xE4056C + +#define mmTPC1_RTR_LBW_RANGE_BASE_8 0xE40570 + +#define mmTPC1_RTR_LBW_RANGE_BASE_9 0xE40574 + +#define mmTPC1_RTR_LBW_RANGE_BASE_10 0xE40578 + +#define mmTPC1_RTR_LBW_RANGE_BASE_11 0xE4057C + +#define mmTPC1_RTR_LBW_RANGE_BASE_12 0xE40580 + +#define mmTPC1_RTR_LBW_RANGE_BASE_13 0xE40584 + +#define mmTPC1_RTR_LBW_RANGE_BASE_14 0xE40588 + +#define mmTPC1_RTR_LBW_RANGE_BASE_15 0xE4058C + +#define mmTPC1_RTR_RGLTR 0xE40590 + +#define mmTPC1_RTR_RGLTR_WR_RESULT 0xE40594 + +#define mmTPC1_RTR_RGLTR_RD_RESULT 0xE40598 + +#define mmTPC1_RTR_SCRAMB_EN 0xE40600 + +#define mmTPC1_RTR_NON_LIN_SCRAMB 0xE40604 + +#endif /* ASIC_REG_TPC1_RTR_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h new file mode 100644 index 000000000000..9c33fc039036 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h @@ -0,0 +1,887 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC2_CFG_REGS_H_ +#define ASIC_REG_TPC2_CFG_REGS_H_ + +/* + ***************************************** + * TPC2_CFG (Prototype: TPC) + ***************************************** + */ + +#define mmTPC2_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xE86400 + +#define mmTPC2_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xE86404 + +#define mmTPC2_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xE86408 + +#define mmTPC2_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xE8640C + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xE86410 + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xE86414 + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xE86418 + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xE8641C + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xE86420 + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xE86424 + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xE86428 + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xE8642C + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xE86430 + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xE86434 + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xE86438 + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xE8643C + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xE86440 + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xE86444 + +#define mmTPC2_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xE86448 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xE8644C + +#define mmTPC2_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xE86450 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xE86454 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xE86458 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xE8645C + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xE86460 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xE86464 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xE86468 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xE8646C + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xE86470 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xE86474 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xE86478 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xE8647C + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xE86480 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xE86484 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xE86488 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xE8648C + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xE86490 + +#define mmTPC2_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xE86494 + +#define mmTPC2_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xE86498 + +#define mmTPC2_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xE8649C + +#define mmTPC2_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xE864A0 + +#define mmTPC2_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xE864A4 + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xE864A8 + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xE864AC + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xE864B0 + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xE864B4 + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xE864B8 + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xE864BC + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xE864C0 + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xE864C4 + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xE864C8 + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xE864CC + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xE864D0 + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xE864D4 + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xE864D8 + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xE864DC + +#define mmTPC2_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xE864E0 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xE864E4 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xE864E8 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xE864EC + +#define mmTPC2_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xE864F0 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xE864F4 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xE864F8 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xE864FC + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xE86500 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xE86504 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xE86508 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xE8650C + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xE86510 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xE86514 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xE86518 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xE8651C + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xE86520 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xE86524 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xE86528 + +#define mmTPC2_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xE8652C + +#define mmTPC2_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xE86530 + +#define mmTPC2_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xE86534 + +#define mmTPC2_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xE86538 + +#define mmTPC2_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xE8653C + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xE86540 + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xE86544 + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xE86548 + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xE8654C + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xE86550 + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xE86554 + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xE86558 + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xE8655C + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xE86560 + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xE86564 + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xE86568 + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xE8656C + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xE86570 + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xE86574 + +#define mmTPC2_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xE86578 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xE8657C + +#define mmTPC2_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xE86580 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xE86584 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xE86588 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xE8658C + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xE86590 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xE86594 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xE86598 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xE8659C + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xE865A0 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xE865A4 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xE865A8 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xE865AC + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xE865B0 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xE865B4 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xE865B8 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xE865BC + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xE865C0 + +#define mmTPC2_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xE865C4 + +#define mmTPC2_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xE865C8 + +#define mmTPC2_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xE865CC + +#define mmTPC2_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xE865D0 + +#define mmTPC2_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xE865D4 + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xE865D8 + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xE865DC + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xE865E0 + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xE865E4 + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xE865E8 + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xE865EC + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xE865F0 + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xE865F4 + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xE865F8 + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xE865FC + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xE86600 + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xE86604 + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xE86608 + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xE8660C + +#define mmTPC2_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xE86610 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xE86614 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xE86618 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xE8661C + +#define mmTPC2_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xE86620 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xE86624 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xE86628 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xE8662C + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xE86630 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xE86634 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xE86638 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xE8663C + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xE86640 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xE86644 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xE86648 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xE8664C + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xE86650 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xE86654 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xE86658 + +#define mmTPC2_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xE8665C + +#define mmTPC2_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xE86660 + +#define mmTPC2_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xE86664 + +#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_0 0xE86668 + +#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_0 0xE8666C + +#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_1 0xE86670 + +#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_1 0xE86674 + +#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_2 0xE86678 + +#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_2 0xE8667C + +#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_3 0xE86680 + +#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_3 0xE86684 + +#define mmTPC2_CFG_KERNEL_TID_BASE_DIM_4 0xE86688 + +#define mmTPC2_CFG_KERNEL_TID_SIZE_DIM_4 0xE8668C + +#define mmTPC2_CFG_KERNEL_SRF_0 0xE86690 + +#define mmTPC2_CFG_KERNEL_SRF_1 0xE86694 + +#define mmTPC2_CFG_KERNEL_SRF_2 0xE86698 + +#define mmTPC2_CFG_KERNEL_SRF_3 0xE8669C + +#define mmTPC2_CFG_KERNEL_SRF_4 0xE866A0 + +#define mmTPC2_CFG_KERNEL_SRF_5 0xE866A4 + +#define mmTPC2_CFG_KERNEL_SRF_6 0xE866A8 + +#define mmTPC2_CFG_KERNEL_SRF_7 0xE866AC + +#define mmTPC2_CFG_KERNEL_SRF_8 0xE866B0 + +#define mmTPC2_CFG_KERNEL_SRF_9 0xE866B4 + +#define mmTPC2_CFG_KERNEL_SRF_10 0xE866B8 + +#define mmTPC2_CFG_KERNEL_SRF_11 0xE866BC + +#define mmTPC2_CFG_KERNEL_SRF_12 0xE866C0 + +#define mmTPC2_CFG_KERNEL_SRF_13 0xE866C4 + +#define mmTPC2_CFG_KERNEL_SRF_14 0xE866C8 + +#define mmTPC2_CFG_KERNEL_SRF_15 0xE866CC + +#define mmTPC2_CFG_KERNEL_SRF_16 0xE866D0 + +#define mmTPC2_CFG_KERNEL_SRF_17 0xE866D4 + +#define mmTPC2_CFG_KERNEL_SRF_18 0xE866D8 + +#define mmTPC2_CFG_KERNEL_SRF_19 0xE866DC + +#define mmTPC2_CFG_KERNEL_SRF_20 0xE866E0 + +#define mmTPC2_CFG_KERNEL_SRF_21 0xE866E4 + +#define mmTPC2_CFG_KERNEL_SRF_22 0xE866E8 + +#define mmTPC2_CFG_KERNEL_SRF_23 0xE866EC + +#define mmTPC2_CFG_KERNEL_SRF_24 0xE866F0 + +#define mmTPC2_CFG_KERNEL_SRF_25 0xE866F4 + +#define mmTPC2_CFG_KERNEL_SRF_26 0xE866F8 + +#define mmTPC2_CFG_KERNEL_SRF_27 0xE866FC + +#define mmTPC2_CFG_KERNEL_SRF_28 0xE86700 + +#define mmTPC2_CFG_KERNEL_SRF_29 0xE86704 + +#define mmTPC2_CFG_KERNEL_SRF_30 0xE86708 + +#define mmTPC2_CFG_KERNEL_SRF_31 0xE8670C + +#define mmTPC2_CFG_KERNEL_KERNEL_CONFIG 0xE86710 + +#define mmTPC2_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xE86714 + +#define mmTPC2_CFG_RESERVED_DESC_END 0xE86738 + +#define mmTPC2_CFG_ROUND_CSR 0xE867FC + +#define mmTPC2_CFG_TBUF_BASE_ADDR_LOW 0xE86800 + +#define mmTPC2_CFG_TBUF_BASE_ADDR_HIGH 0xE86804 + +#define mmTPC2_CFG_SEMAPHORE 0xE86808 + +#define mmTPC2_CFG_VFLAGS 0xE8680C + +#define mmTPC2_CFG_SFLAGS 0xE86810 + +#define mmTPC2_CFG_LFSR_POLYNOM 0xE86818 + +#define mmTPC2_CFG_STATUS 0xE8681C + +#define mmTPC2_CFG_CFG_BASE_ADDRESS_HIGH 0xE86820 + +#define mmTPC2_CFG_CFG_SUBTRACT_VALUE 0xE86824 + +#define mmTPC2_CFG_SM_BASE_ADDRESS_LOW 0xE86828 + +#define mmTPC2_CFG_SM_BASE_ADDRESS_HIGH 0xE8682C + +#define mmTPC2_CFG_TPC_CMD 0xE86830 + +#define mmTPC2_CFG_TPC_EXECUTE 0xE86838 + +#define mmTPC2_CFG_TPC_STALL 0xE8683C + +#define mmTPC2_CFG_ICACHE_BASE_ADDERESS_LOW 0xE86840 + +#define mmTPC2_CFG_ICACHE_BASE_ADDERESS_HIGH 0xE86844 + +#define mmTPC2_CFG_MSS_CONFIG 0xE86854 + +#define mmTPC2_CFG_TPC_INTR_CAUSE 0xE86858 + +#define mmTPC2_CFG_TPC_INTR_MASK 0xE8685C + +#define mmTPC2_CFG_TSB_CONFIG 0xE86860 + +#define mmTPC2_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xE86A00 + +#define mmTPC2_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xE86A04 + +#define mmTPC2_CFG_QM_TENSOR_0_PADDING_VALUE 0xE86A08 + +#define mmTPC2_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xE86A0C + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_0_SIZE 0xE86A10 + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xE86A14 + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xE86A18 + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_1_SIZE 0xE86A1C + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xE86A20 + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xE86A24 + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_2_SIZE 0xE86A28 + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xE86A2C + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xE86A30 + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_3_SIZE 0xE86A34 + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xE86A38 + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xE86A3C + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_4_SIZE 0xE86A40 + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xE86A44 + +#define mmTPC2_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xE86A48 + +#define mmTPC2_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xE86A4C + +#define mmTPC2_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xE86A50 + +#define mmTPC2_CFG_QM_TENSOR_1_PADDING_VALUE 0xE86A54 + +#define mmTPC2_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xE86A58 + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_0_SIZE 0xE86A5C + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xE86A60 + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xE86A64 + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_1_SIZE 0xE86A68 + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xE86A6C + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xE86A70 + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_2_SIZE 0xE86A74 + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xE86A78 + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xE86A7C + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_3_SIZE 0xE86A80 + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xE86A84 + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xE86A88 + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_4_SIZE 0xE86A8C + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xE86A90 + +#define mmTPC2_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xE86A94 + +#define mmTPC2_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xE86A98 + +#define mmTPC2_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xE86A9C + +#define mmTPC2_CFG_QM_TENSOR_2_PADDING_VALUE 0xE86AA0 + +#define mmTPC2_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xE86AA4 + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_0_SIZE 0xE86AA8 + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xE86AAC + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xE86AB0 + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_1_SIZE 0xE86AB4 + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xE86AB8 + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xE86ABC + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_2_SIZE 0xE86AC0 + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xE86AC4 + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xE86AC8 + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_3_SIZE 0xE86ACC + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xE86AD0 + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xE86AD4 + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_4_SIZE 0xE86AD8 + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xE86ADC + +#define mmTPC2_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xE86AE0 + +#define mmTPC2_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xE86AE4 + +#define mmTPC2_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xE86AE8 + +#define mmTPC2_CFG_QM_TENSOR_3_PADDING_VALUE 0xE86AEC + +#define mmTPC2_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xE86AF0 + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_0_SIZE 0xE86AF4 + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xE86AF8 + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xE86AFC + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_1_SIZE 0xE86B00 + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xE86B04 + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xE86B08 + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_2_SIZE 0xE86B0C + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xE86B10 + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xE86B14 + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_3_SIZE 0xE86B18 + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xE86B1C + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xE86B20 + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_4_SIZE 0xE86B24 + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xE86B28 + +#define mmTPC2_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xE86B2C + +#define mmTPC2_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xE86B30 + +#define mmTPC2_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xE86B34 + +#define mmTPC2_CFG_QM_TENSOR_4_PADDING_VALUE 0xE86B38 + +#define mmTPC2_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xE86B3C + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_0_SIZE 0xE86B40 + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xE86B44 + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xE86B48 + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_1_SIZE 0xE86B4C + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xE86B50 + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xE86B54 + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_2_SIZE 0xE86B58 + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xE86B5C + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xE86B60 + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_3_SIZE 0xE86B64 + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xE86B68 + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xE86B6C + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_4_SIZE 0xE86B70 + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xE86B74 + +#define mmTPC2_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xE86B78 + +#define mmTPC2_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xE86B7C + +#define mmTPC2_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xE86B80 + +#define mmTPC2_CFG_QM_TENSOR_5_PADDING_VALUE 0xE86B84 + +#define mmTPC2_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xE86B88 + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_0_SIZE 0xE86B8C + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xE86B90 + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xE86B94 + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_1_SIZE 0xE86B98 + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xE86B9C + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xE86BA0 + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_2_SIZE 0xE86BA4 + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xE86BA8 + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xE86BAC + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_3_SIZE 0xE86BB0 + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xE86BB4 + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xE86BB8 + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_4_SIZE 0xE86BBC + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xE86BC0 + +#define mmTPC2_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xE86BC4 + +#define mmTPC2_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xE86BC8 + +#define mmTPC2_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xE86BCC + +#define mmTPC2_CFG_QM_TENSOR_6_PADDING_VALUE 0xE86BD0 + +#define mmTPC2_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xE86BD4 + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_0_SIZE 0xE86BD8 + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xE86BDC + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xE86BE0 + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_1_SIZE 0xE86BE4 + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xE86BE8 + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xE86BEC + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_2_SIZE 0xE86BF0 + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xE86BF4 + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xE86BF8 + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_3_SIZE 0xE86BFC + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xE86C00 + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xE86C04 + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_4_SIZE 0xE86C08 + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xE86C0C + +#define mmTPC2_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xE86C10 + +#define mmTPC2_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xE86C14 + +#define mmTPC2_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xE86C18 + +#define mmTPC2_CFG_QM_TENSOR_7_PADDING_VALUE 0xE86C1C + +#define mmTPC2_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xE86C20 + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_0_SIZE 0xE86C24 + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xE86C28 + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xE86C2C + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_1_SIZE 0xE86C30 + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xE86C34 + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xE86C38 + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_2_SIZE 0xE86C3C + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xE86C40 + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xE86C44 + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_3_SIZE 0xE86C48 + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xE86C4C + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xE86C50 + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_4_SIZE 0xE86C54 + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xE86C58 + +#define mmTPC2_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xE86C5C + +#define mmTPC2_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xE86C60 + +#define mmTPC2_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xE86C64 + +#define mmTPC2_CFG_QM_TID_BASE_DIM_0 0xE86C68 + +#define mmTPC2_CFG_QM_TID_SIZE_DIM_0 0xE86C6C + +#define mmTPC2_CFG_QM_TID_BASE_DIM_1 0xE86C70 + +#define mmTPC2_CFG_QM_TID_SIZE_DIM_1 0xE86C74 + +#define mmTPC2_CFG_QM_TID_BASE_DIM_2 0xE86C78 + +#define mmTPC2_CFG_QM_TID_SIZE_DIM_2 0xE86C7C + +#define mmTPC2_CFG_QM_TID_BASE_DIM_3 0xE86C80 + +#define mmTPC2_CFG_QM_TID_SIZE_DIM_3 0xE86C84 + +#define mmTPC2_CFG_QM_TID_BASE_DIM_4 0xE86C88 + +#define mmTPC2_CFG_QM_TID_SIZE_DIM_4 0xE86C8C + +#define mmTPC2_CFG_QM_SRF_0 0xE86C90 + +#define mmTPC2_CFG_QM_SRF_1 0xE86C94 + +#define mmTPC2_CFG_QM_SRF_2 0xE86C98 + +#define mmTPC2_CFG_QM_SRF_3 0xE86C9C + +#define mmTPC2_CFG_QM_SRF_4 0xE86CA0 + +#define mmTPC2_CFG_QM_SRF_5 0xE86CA4 + +#define mmTPC2_CFG_QM_SRF_6 0xE86CA8 + +#define mmTPC2_CFG_QM_SRF_7 0xE86CAC + +#define mmTPC2_CFG_QM_SRF_8 0xE86CB0 + +#define mmTPC2_CFG_QM_SRF_9 0xE86CB4 + +#define mmTPC2_CFG_QM_SRF_10 0xE86CB8 + +#define mmTPC2_CFG_QM_SRF_11 0xE86CBC + +#define mmTPC2_CFG_QM_SRF_12 0xE86CC0 + +#define mmTPC2_CFG_QM_SRF_13 0xE86CC4 + +#define mmTPC2_CFG_QM_SRF_14 0xE86CC8 + +#define mmTPC2_CFG_QM_SRF_15 0xE86CCC + +#define mmTPC2_CFG_QM_SRF_16 0xE86CD0 + +#define mmTPC2_CFG_QM_SRF_17 0xE86CD4 + +#define mmTPC2_CFG_QM_SRF_18 0xE86CD8 + +#define mmTPC2_CFG_QM_SRF_19 0xE86CDC + +#define mmTPC2_CFG_QM_SRF_20 0xE86CE0 + +#define mmTPC2_CFG_QM_SRF_21 0xE86CE4 + +#define mmTPC2_CFG_QM_SRF_22 0xE86CE8 + +#define mmTPC2_CFG_QM_SRF_23 0xE86CEC + +#define mmTPC2_CFG_QM_SRF_24 0xE86CF0 + +#define mmTPC2_CFG_QM_SRF_25 0xE86CF4 + +#define mmTPC2_CFG_QM_SRF_26 0xE86CF8 + +#define mmTPC2_CFG_QM_SRF_27 0xE86CFC + +#define mmTPC2_CFG_QM_SRF_28 0xE86D00 + +#define mmTPC2_CFG_QM_SRF_29 0xE86D04 + +#define mmTPC2_CFG_QM_SRF_30 0xE86D08 + +#define mmTPC2_CFG_QM_SRF_31 0xE86D0C + +#define mmTPC2_CFG_QM_KERNEL_CONFIG 0xE86D10 + +#define mmTPC2_CFG_QM_SYNC_OBJECT_MESSAGE 0xE86D14 + +#define mmTPC2_CFG_ARUSER 0xE86D18 + +#define mmTPC2_CFG_AWUSER 0xE86D1C + +#define mmTPC2_CFG_FUNC_MBIST_CNTRL 0xE86E00 + +#define mmTPC2_CFG_FUNC_MBIST_PAT 0xE86E04 + +#define mmTPC2_CFG_FUNC_MBIST_MEM_0 0xE86E08 + +#define mmTPC2_CFG_FUNC_MBIST_MEM_1 0xE86E0C + +#define mmTPC2_CFG_FUNC_MBIST_MEM_2 0xE86E10 + +#define mmTPC2_CFG_FUNC_MBIST_MEM_3 0xE86E14 + +#define mmTPC2_CFG_FUNC_MBIST_MEM_4 0xE86E18 + +#define mmTPC2_CFG_FUNC_MBIST_MEM_5 0xE86E1C + +#define mmTPC2_CFG_FUNC_MBIST_MEM_6 0xE86E20 + +#define mmTPC2_CFG_FUNC_MBIST_MEM_7 0xE86E24 + +#define mmTPC2_CFG_FUNC_MBIST_MEM_8 0xE86E28 + +#define mmTPC2_CFG_FUNC_MBIST_MEM_9 0xE86E2C + +#endif /* ASIC_REG_TPC2_CFG_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h new file mode 100644 index 000000000000..7a643887d6e1 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC2_CMDQ_REGS_H_ +#define ASIC_REG_TPC2_CMDQ_REGS_H_ + +/* + ***************************************** + * TPC2_CMDQ (Prototype: CMDQ) + ***************************************** + */ + +#define mmTPC2_CMDQ_GLBL_CFG0 0xE89000 + +#define mmTPC2_CMDQ_GLBL_CFG1 0xE89004 + +#define mmTPC2_CMDQ_GLBL_PROT 0xE89008 + +#define mmTPC2_CMDQ_GLBL_ERR_CFG 0xE8900C + +#define mmTPC2_CMDQ_GLBL_ERR_ADDR_LO 0xE89010 + +#define mmTPC2_CMDQ_GLBL_ERR_ADDR_HI 0xE89014 + +#define mmTPC2_CMDQ_GLBL_ERR_WDATA 0xE89018 + +#define mmTPC2_CMDQ_GLBL_SECURE_PROPS 0xE8901C + +#define mmTPC2_CMDQ_GLBL_NON_SECURE_PROPS 0xE89020 + +#define mmTPC2_CMDQ_GLBL_STS0 0xE89024 + +#define mmTPC2_CMDQ_GLBL_STS1 0xE89028 + +#define mmTPC2_CMDQ_CQ_CFG0 0xE890B0 + +#define mmTPC2_CMDQ_CQ_CFG1 0xE890B4 + +#define mmTPC2_CMDQ_CQ_ARUSER 0xE890B8 + +#define mmTPC2_CMDQ_CQ_PTR_LO 0xE890C0 + +#define mmTPC2_CMDQ_CQ_PTR_HI 0xE890C4 + +#define mmTPC2_CMDQ_CQ_TSIZE 0xE890C8 + +#define mmTPC2_CMDQ_CQ_CTL 0xE890CC + +#define mmTPC2_CMDQ_CQ_PTR_LO_STS 0xE890D4 + +#define mmTPC2_CMDQ_CQ_PTR_HI_STS 0xE890D8 + +#define mmTPC2_CMDQ_CQ_TSIZE_STS 0xE890DC + +#define mmTPC2_CMDQ_CQ_CTL_STS 0xE890E0 + +#define mmTPC2_CMDQ_CQ_STS0 0xE890E4 + +#define mmTPC2_CMDQ_CQ_STS1 0xE890E8 + +#define mmTPC2_CMDQ_CQ_RD_RATE_LIM_EN 0xE890F0 + +#define mmTPC2_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xE890F4 + +#define mmTPC2_CMDQ_CQ_RD_RATE_LIM_SAT 0xE890F8 + +#define mmTPC2_CMDQ_CQ_RD_RATE_LIM_TOUT 0xE890FC + +#define mmTPC2_CMDQ_CQ_IFIFO_CNT 0xE89108 + +#define mmTPC2_CMDQ_CP_MSG_BASE0_ADDR_LO 0xE89120 + +#define mmTPC2_CMDQ_CP_MSG_BASE0_ADDR_HI 0xE89124 + +#define mmTPC2_CMDQ_CP_MSG_BASE1_ADDR_LO 0xE89128 + +#define mmTPC2_CMDQ_CP_MSG_BASE1_ADDR_HI 0xE8912C + +#define mmTPC2_CMDQ_CP_MSG_BASE2_ADDR_LO 0xE89130 + +#define mmTPC2_CMDQ_CP_MSG_BASE2_ADDR_HI 0xE89134 + +#define mmTPC2_CMDQ_CP_MSG_BASE3_ADDR_LO 0xE89138 + +#define mmTPC2_CMDQ_CP_MSG_BASE3_ADDR_HI 0xE8913C + +#define mmTPC2_CMDQ_CP_LDMA_TSIZE_OFFSET 0xE89140 + +#define mmTPC2_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xE89144 + +#define mmTPC2_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xE89148 + +#define mmTPC2_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xE8914C + +#define mmTPC2_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xE89150 + +#define mmTPC2_CMDQ_CP_LDMA_COMMIT_OFFSET 0xE89154 + +#define mmTPC2_CMDQ_CP_FENCE0_RDATA 0xE89158 + +#define mmTPC2_CMDQ_CP_FENCE1_RDATA 0xE8915C + +#define mmTPC2_CMDQ_CP_FENCE2_RDATA 0xE89160 + +#define mmTPC2_CMDQ_CP_FENCE3_RDATA 0xE89164 + +#define mmTPC2_CMDQ_CP_FENCE0_CNT 0xE89168 + +#define mmTPC2_CMDQ_CP_FENCE1_CNT 0xE8916C + +#define mmTPC2_CMDQ_CP_FENCE2_CNT 0xE89170 + +#define mmTPC2_CMDQ_CP_FENCE3_CNT 0xE89174 + +#define mmTPC2_CMDQ_CP_STS 0xE89178 + +#define mmTPC2_CMDQ_CP_CURRENT_INST_LO 0xE8917C + +#define mmTPC2_CMDQ_CP_CURRENT_INST_HI 0xE89180 + +#define mmTPC2_CMDQ_CP_BARRIER_CFG 0xE89184 + +#define mmTPC2_CMDQ_CP_DBG_0 0xE89188 + +#define mmTPC2_CMDQ_CQ_BUF_ADDR 0xE89308 + +#define mmTPC2_CMDQ_CQ_BUF_RDATA 0xE8930C + +#endif /* ASIC_REG_TPC2_CMDQ_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h new file mode 100644 index 000000000000..f3e32c018064 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC2_QM_REGS_H_ +#define ASIC_REG_TPC2_QM_REGS_H_ + +/* + ***************************************** + * TPC2_QM (Prototype: QMAN) + ***************************************** + */ + +#define mmTPC2_QM_GLBL_CFG0 0xE88000 + +#define mmTPC2_QM_GLBL_CFG1 0xE88004 + +#define mmTPC2_QM_GLBL_PROT 0xE88008 + +#define mmTPC2_QM_GLBL_ERR_CFG 0xE8800C + +#define mmTPC2_QM_GLBL_ERR_ADDR_LO 0xE88010 + +#define mmTPC2_QM_GLBL_ERR_ADDR_HI 0xE88014 + +#define mmTPC2_QM_GLBL_ERR_WDATA 0xE88018 + +#define mmTPC2_QM_GLBL_SECURE_PROPS 0xE8801C + +#define mmTPC2_QM_GLBL_NON_SECURE_PROPS 0xE88020 + +#define mmTPC2_QM_GLBL_STS0 0xE88024 + +#define mmTPC2_QM_GLBL_STS1 0xE88028 + +#define mmTPC2_QM_PQ_BASE_LO 0xE88060 + +#define mmTPC2_QM_PQ_BASE_HI 0xE88064 + +#define mmTPC2_QM_PQ_SIZE 0xE88068 + +#define mmTPC2_QM_PQ_PI 0xE8806C + +#define mmTPC2_QM_PQ_CI 0xE88070 + +#define mmTPC2_QM_PQ_CFG0 0xE88074 + +#define mmTPC2_QM_PQ_CFG1 0xE88078 + +#define mmTPC2_QM_PQ_ARUSER 0xE8807C + +#define mmTPC2_QM_PQ_PUSH0 0xE88080 + +#define mmTPC2_QM_PQ_PUSH1 0xE88084 + +#define mmTPC2_QM_PQ_PUSH2 0xE88088 + +#define mmTPC2_QM_PQ_PUSH3 0xE8808C + +#define mmTPC2_QM_PQ_STS0 0xE88090 + +#define mmTPC2_QM_PQ_STS1 0xE88094 + +#define mmTPC2_QM_PQ_RD_RATE_LIM_EN 0xE880A0 + +#define mmTPC2_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xE880A4 + +#define mmTPC2_QM_PQ_RD_RATE_LIM_SAT 0xE880A8 + +#define mmTPC2_QM_PQ_RD_RATE_LIM_TOUT 0xE880AC + +#define mmTPC2_QM_CQ_CFG0 0xE880B0 + +#define mmTPC2_QM_CQ_CFG1 0xE880B4 + +#define mmTPC2_QM_CQ_ARUSER 0xE880B8 + +#define mmTPC2_QM_CQ_PTR_LO 0xE880C0 + +#define mmTPC2_QM_CQ_PTR_HI 0xE880C4 + +#define mmTPC2_QM_CQ_TSIZE 0xE880C8 + +#define mmTPC2_QM_CQ_CTL 0xE880CC + +#define mmTPC2_QM_CQ_PTR_LO_STS 0xE880D4 + +#define mmTPC2_QM_CQ_PTR_HI_STS 0xE880D8 + +#define mmTPC2_QM_CQ_TSIZE_STS 0xE880DC + +#define mmTPC2_QM_CQ_CTL_STS 0xE880E0 + +#define mmTPC2_QM_CQ_STS0 0xE880E4 + +#define mmTPC2_QM_CQ_STS1 0xE880E8 + +#define mmTPC2_QM_CQ_RD_RATE_LIM_EN 0xE880F0 + +#define mmTPC2_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xE880F4 + +#define mmTPC2_QM_CQ_RD_RATE_LIM_SAT 0xE880F8 + +#define mmTPC2_QM_CQ_RD_RATE_LIM_TOUT 0xE880FC + +#define mmTPC2_QM_CQ_IFIFO_CNT 0xE88108 + +#define mmTPC2_QM_CP_MSG_BASE0_ADDR_LO 0xE88120 + +#define mmTPC2_QM_CP_MSG_BASE0_ADDR_HI 0xE88124 + +#define mmTPC2_QM_CP_MSG_BASE1_ADDR_LO 0xE88128 + +#define mmTPC2_QM_CP_MSG_BASE1_ADDR_HI 0xE8812C + +#define mmTPC2_QM_CP_MSG_BASE2_ADDR_LO 0xE88130 + +#define mmTPC2_QM_CP_MSG_BASE2_ADDR_HI 0xE88134 + +#define mmTPC2_QM_CP_MSG_BASE3_ADDR_LO 0xE88138 + +#define mmTPC2_QM_CP_MSG_BASE3_ADDR_HI 0xE8813C + +#define mmTPC2_QM_CP_LDMA_TSIZE_OFFSET 0xE88140 + +#define mmTPC2_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xE88144 + +#define mmTPC2_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xE88148 + +#define mmTPC2_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xE8814C + +#define mmTPC2_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xE88150 + +#define mmTPC2_QM_CP_LDMA_COMMIT_OFFSET 0xE88154 + +#define mmTPC2_QM_CP_FENCE0_RDATA 0xE88158 + +#define mmTPC2_QM_CP_FENCE1_RDATA 0xE8815C + +#define mmTPC2_QM_CP_FENCE2_RDATA 0xE88160 + +#define mmTPC2_QM_CP_FENCE3_RDATA 0xE88164 + +#define mmTPC2_QM_CP_FENCE0_CNT 0xE88168 + +#define mmTPC2_QM_CP_FENCE1_CNT 0xE8816C + +#define mmTPC2_QM_CP_FENCE2_CNT 0xE88170 + +#define mmTPC2_QM_CP_FENCE3_CNT 0xE88174 + +#define mmTPC2_QM_CP_STS 0xE88178 + +#define mmTPC2_QM_CP_CURRENT_INST_LO 0xE8817C + +#define mmTPC2_QM_CP_CURRENT_INST_HI 0xE88180 + +#define mmTPC2_QM_CP_BARRIER_CFG 0xE88184 + +#define mmTPC2_QM_CP_DBG_0 0xE88188 + +#define mmTPC2_QM_PQ_BUF_ADDR 0xE88300 + +#define mmTPC2_QM_PQ_BUF_RDATA 0xE88304 + +#define mmTPC2_QM_CQ_BUF_ADDR 0xE88308 + +#define mmTPC2_QM_CQ_BUF_RDATA 0xE8830C + +#endif /* ASIC_REG_TPC2_QM_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h new file mode 100644 index 000000000000..0eb0cd1fbd19 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h @@ -0,0 +1,323 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC2_RTR_REGS_H_ +#define ASIC_REG_TPC2_RTR_REGS_H_ + +/* + ***************************************** + * TPC2_RTR (Prototype: TPC_RTR) + ***************************************** + */ + +#define mmTPC2_RTR_HBW_RD_RQ_E_ARB 0xE80100 + +#define mmTPC2_RTR_HBW_RD_RQ_W_ARB 0xE80104 + +#define mmTPC2_RTR_HBW_RD_RQ_N_ARB 0xE80108 + +#define mmTPC2_RTR_HBW_RD_RQ_S_ARB 0xE8010C + +#define mmTPC2_RTR_HBW_RD_RQ_L_ARB 0xE80110 + +#define mmTPC2_RTR_HBW_E_ARB_MAX 0xE80120 + +#define mmTPC2_RTR_HBW_W_ARB_MAX 0xE80124 + +#define mmTPC2_RTR_HBW_N_ARB_MAX 0xE80128 + +#define mmTPC2_RTR_HBW_S_ARB_MAX 0xE8012C + +#define mmTPC2_RTR_HBW_L_ARB_MAX 0xE80130 + +#define mmTPC2_RTR_HBW_RD_RS_E_ARB 0xE80140 + +#define mmTPC2_RTR_HBW_RD_RS_W_ARB 0xE80144 + +#define mmTPC2_RTR_HBW_RD_RS_N_ARB 0xE80148 + +#define mmTPC2_RTR_HBW_RD_RS_S_ARB 0xE8014C + +#define mmTPC2_RTR_HBW_RD_RS_L_ARB 0xE80150 + +#define mmTPC2_RTR_HBW_WR_RQ_E_ARB 0xE80170 + +#define mmTPC2_RTR_HBW_WR_RQ_W_ARB 0xE80174 + +#define mmTPC2_RTR_HBW_WR_RQ_N_ARB 0xE80178 + +#define mmTPC2_RTR_HBW_WR_RQ_S_ARB 0xE8017C + +#define mmTPC2_RTR_HBW_WR_RQ_L_ARB 0xE80180 + +#define mmTPC2_RTR_HBW_WR_RS_E_ARB 0xE80190 + +#define mmTPC2_RTR_HBW_WR_RS_W_ARB 0xE80194 + +#define mmTPC2_RTR_HBW_WR_RS_N_ARB 0xE80198 + +#define mmTPC2_RTR_HBW_WR_RS_S_ARB 0xE8019C + +#define mmTPC2_RTR_HBW_WR_RS_L_ARB 0xE801A0 + +#define mmTPC2_RTR_LBW_RD_RQ_E_ARB 0xE80200 + +#define mmTPC2_RTR_LBW_RD_RQ_W_ARB 0xE80204 + +#define mmTPC2_RTR_LBW_RD_RQ_N_ARB 0xE80208 + +#define mmTPC2_RTR_LBW_RD_RQ_S_ARB 0xE8020C + +#define mmTPC2_RTR_LBW_RD_RQ_L_ARB 0xE80210 + +#define mmTPC2_RTR_LBW_E_ARB_MAX 0xE80220 + +#define mmTPC2_RTR_LBW_W_ARB_MAX 0xE80224 + +#define mmTPC2_RTR_LBW_N_ARB_MAX 0xE80228 + +#define mmTPC2_RTR_LBW_S_ARB_MAX 0xE8022C + +#define mmTPC2_RTR_LBW_L_ARB_MAX 0xE80230 + +#define mmTPC2_RTR_LBW_RD_RS_E_ARB 0xE80250 + +#define mmTPC2_RTR_LBW_RD_RS_W_ARB 0xE80254 + +#define mmTPC2_RTR_LBW_RD_RS_N_ARB 0xE80258 + +#define mmTPC2_RTR_LBW_RD_RS_S_ARB 0xE8025C + +#define mmTPC2_RTR_LBW_RD_RS_L_ARB 0xE80260 + +#define mmTPC2_RTR_LBW_WR_RQ_E_ARB 0xE80270 + +#define mmTPC2_RTR_LBW_WR_RQ_W_ARB 0xE80274 + +#define mmTPC2_RTR_LBW_WR_RQ_N_ARB 0xE80278 + +#define mmTPC2_RTR_LBW_WR_RQ_S_ARB 0xE8027C + +#define mmTPC2_RTR_LBW_WR_RQ_L_ARB 0xE80280 + +#define mmTPC2_RTR_LBW_WR_RS_E_ARB 0xE80290 + +#define mmTPC2_RTR_LBW_WR_RS_W_ARB 0xE80294 + +#define mmTPC2_RTR_LBW_WR_RS_N_ARB 0xE80298 + +#define mmTPC2_RTR_LBW_WR_RS_S_ARB 0xE8029C + +#define mmTPC2_RTR_LBW_WR_RS_L_ARB 0xE802A0 + +#define mmTPC2_RTR_DBG_E_ARB 0xE80300 + +#define mmTPC2_RTR_DBG_W_ARB 0xE80304 + +#define mmTPC2_RTR_DBG_N_ARB 0xE80308 + +#define mmTPC2_RTR_DBG_S_ARB 0xE8030C + +#define mmTPC2_RTR_DBG_L_ARB 0xE80310 + +#define mmTPC2_RTR_DBG_E_ARB_MAX 0xE80320 + +#define mmTPC2_RTR_DBG_W_ARB_MAX 0xE80324 + +#define mmTPC2_RTR_DBG_N_ARB_MAX 0xE80328 + +#define mmTPC2_RTR_DBG_S_ARB_MAX 0xE8032C + +#define mmTPC2_RTR_DBG_L_ARB_MAX 0xE80330 + +#define mmTPC2_RTR_SPLIT_COEF_0 0xE80400 + +#define mmTPC2_RTR_SPLIT_COEF_1 0xE80404 + +#define mmTPC2_RTR_SPLIT_COEF_2 0xE80408 + +#define mmTPC2_RTR_SPLIT_COEF_3 0xE8040C + +#define mmTPC2_RTR_SPLIT_COEF_4 0xE80410 + +#define mmTPC2_RTR_SPLIT_COEF_5 0xE80414 + +#define mmTPC2_RTR_SPLIT_COEF_6 0xE80418 + +#define mmTPC2_RTR_SPLIT_COEF_7 0xE8041C + +#define mmTPC2_RTR_SPLIT_COEF_8 0xE80420 + +#define mmTPC2_RTR_SPLIT_COEF_9 0xE80424 + +#define mmTPC2_RTR_SPLIT_CFG 0xE80440 + +#define mmTPC2_RTR_SPLIT_RD_SAT 0xE80444 + +#define mmTPC2_RTR_SPLIT_RD_RST_TOKEN 0xE80448 + +#define mmTPC2_RTR_SPLIT_RD_TIMEOUT_0 0xE8044C + +#define mmTPC2_RTR_SPLIT_RD_TIMEOUT_1 0xE80450 + +#define mmTPC2_RTR_SPLIT_WR_SAT 0xE80454 + +#define mmTPC2_RTR_WPLIT_WR_TST_TOLEN 0xE80458 + +#define mmTPC2_RTR_SPLIT_WR_TIMEOUT_0 0xE8045C + +#define mmTPC2_RTR_SPLIT_WR_TIMEOUT_1 0xE80460 + +#define mmTPC2_RTR_HBW_RANGE_HIT 0xE80470 + +#define mmTPC2_RTR_HBW_RANGE_MASK_L_0 0xE80480 + +#define mmTPC2_RTR_HBW_RANGE_MASK_L_1 0xE80484 + +#define mmTPC2_RTR_HBW_RANGE_MASK_L_2 0xE80488 + +#define mmTPC2_RTR_HBW_RANGE_MASK_L_3 0xE8048C + +#define mmTPC2_RTR_HBW_RANGE_MASK_L_4 0xE80490 + +#define mmTPC2_RTR_HBW_RANGE_MASK_L_5 0xE80494 + +#define mmTPC2_RTR_HBW_RANGE_MASK_L_6 0xE80498 + +#define mmTPC2_RTR_HBW_RANGE_MASK_L_7 0xE8049C + +#define mmTPC2_RTR_HBW_RANGE_MASK_H_0 0xE804A0 + +#define mmTPC2_RTR_HBW_RANGE_MASK_H_1 0xE804A4 + +#define mmTPC2_RTR_HBW_RANGE_MASK_H_2 0xE804A8 + +#define mmTPC2_RTR_HBW_RANGE_MASK_H_3 0xE804AC + +#define mmTPC2_RTR_HBW_RANGE_MASK_H_4 0xE804B0 + +#define mmTPC2_RTR_HBW_RANGE_MASK_H_5 0xE804B4 + +#define mmTPC2_RTR_HBW_RANGE_MASK_H_6 0xE804B8 + +#define mmTPC2_RTR_HBW_RANGE_MASK_H_7 0xE804BC + +#define mmTPC2_RTR_HBW_RANGE_BASE_L_0 0xE804C0 + +#define mmTPC2_RTR_HBW_RANGE_BASE_L_1 0xE804C4 + +#define mmTPC2_RTR_HBW_RANGE_BASE_L_2 0xE804C8 + +#define mmTPC2_RTR_HBW_RANGE_BASE_L_3 0xE804CC + +#define mmTPC2_RTR_HBW_RANGE_BASE_L_4 0xE804D0 + +#define mmTPC2_RTR_HBW_RANGE_BASE_L_5 0xE804D4 + +#define mmTPC2_RTR_HBW_RANGE_BASE_L_6 0xE804D8 + +#define mmTPC2_RTR_HBW_RANGE_BASE_L_7 0xE804DC + +#define mmTPC2_RTR_HBW_RANGE_BASE_H_0 0xE804E0 + +#define mmTPC2_RTR_HBW_RANGE_BASE_H_1 0xE804E4 + +#define mmTPC2_RTR_HBW_RANGE_BASE_H_2 0xE804E8 + +#define mmTPC2_RTR_HBW_RANGE_BASE_H_3 0xE804EC + +#define mmTPC2_RTR_HBW_RANGE_BASE_H_4 0xE804F0 + +#define mmTPC2_RTR_HBW_RANGE_BASE_H_5 0xE804F4 + +#define mmTPC2_RTR_HBW_RANGE_BASE_H_6 0xE804F8 + +#define mmTPC2_RTR_HBW_RANGE_BASE_H_7 0xE804FC + +#define mmTPC2_RTR_LBW_RANGE_HIT 0xE80500 + +#define mmTPC2_RTR_LBW_RANGE_MASK_0 0xE80510 + +#define mmTPC2_RTR_LBW_RANGE_MASK_1 0xE80514 + +#define mmTPC2_RTR_LBW_RANGE_MASK_2 0xE80518 + +#define mmTPC2_RTR_LBW_RANGE_MASK_3 0xE8051C + +#define mmTPC2_RTR_LBW_RANGE_MASK_4 0xE80520 + +#define mmTPC2_RTR_LBW_RANGE_MASK_5 0xE80524 + +#define mmTPC2_RTR_LBW_RANGE_MASK_6 0xE80528 + +#define mmTPC2_RTR_LBW_RANGE_MASK_7 0xE8052C + +#define mmTPC2_RTR_LBW_RANGE_MASK_8 0xE80530 + +#define mmTPC2_RTR_LBW_RANGE_MASK_9 0xE80534 + +#define mmTPC2_RTR_LBW_RANGE_MASK_10 0xE80538 + +#define mmTPC2_RTR_LBW_RANGE_MASK_11 0xE8053C + +#define mmTPC2_RTR_LBW_RANGE_MASK_12 0xE80540 + +#define mmTPC2_RTR_LBW_RANGE_MASK_13 0xE80544 + +#define mmTPC2_RTR_LBW_RANGE_MASK_14 0xE80548 + +#define mmTPC2_RTR_LBW_RANGE_MASK_15 0xE8054C + +#define mmTPC2_RTR_LBW_RANGE_BASE_0 0xE80550 + +#define mmTPC2_RTR_LBW_RANGE_BASE_1 0xE80554 + +#define mmTPC2_RTR_LBW_RANGE_BASE_2 0xE80558 + +#define mmTPC2_RTR_LBW_RANGE_BASE_3 0xE8055C + +#define mmTPC2_RTR_LBW_RANGE_BASE_4 0xE80560 + +#define mmTPC2_RTR_LBW_RANGE_BASE_5 0xE80564 + +#define mmTPC2_RTR_LBW_RANGE_BASE_6 0xE80568 + +#define mmTPC2_RTR_LBW_RANGE_BASE_7 0xE8056C + +#define mmTPC2_RTR_LBW_RANGE_BASE_8 0xE80570 + +#define mmTPC2_RTR_LBW_RANGE_BASE_9 0xE80574 + +#define mmTPC2_RTR_LBW_RANGE_BASE_10 0xE80578 + +#define mmTPC2_RTR_LBW_RANGE_BASE_11 0xE8057C + +#define mmTPC2_RTR_LBW_RANGE_BASE_12 0xE80580 + +#define mmTPC2_RTR_LBW_RANGE_BASE_13 0xE80584 + +#define mmTPC2_RTR_LBW_RANGE_BASE_14 0xE80588 + +#define mmTPC2_RTR_LBW_RANGE_BASE_15 0xE8058C + +#define mmTPC2_RTR_RGLTR 0xE80590 + +#define mmTPC2_RTR_RGLTR_WR_RESULT 0xE80594 + +#define mmTPC2_RTR_RGLTR_RD_RESULT 0xE80598 + +#define mmTPC2_RTR_SCRAMB_EN 0xE80600 + +#define mmTPC2_RTR_NON_LIN_SCRAMB 0xE80604 + +#endif /* ASIC_REG_TPC2_RTR_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h new file mode 100644 index 000000000000..0baf63c69b25 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h @@ -0,0 +1,887 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC3_CFG_REGS_H_ +#define ASIC_REG_TPC3_CFG_REGS_H_ + +/* + ***************************************** + * TPC3_CFG (Prototype: TPC) + ***************************************** + */ + +#define mmTPC3_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xEC6400 + +#define mmTPC3_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xEC6404 + +#define mmTPC3_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xEC6408 + +#define mmTPC3_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xEC640C + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xEC6410 + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xEC6414 + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xEC6418 + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xEC641C + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xEC6420 + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xEC6424 + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xEC6428 + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xEC642C + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xEC6430 + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xEC6434 + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xEC6438 + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xEC643C + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xEC6440 + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xEC6444 + +#define mmTPC3_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xEC6448 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xEC644C + +#define mmTPC3_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xEC6450 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xEC6454 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xEC6458 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xEC645C + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xEC6460 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xEC6464 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xEC6468 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xEC646C + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xEC6470 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xEC6474 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xEC6478 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xEC647C + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xEC6480 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xEC6484 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xEC6488 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xEC648C + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xEC6490 + +#define mmTPC3_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xEC6494 + +#define mmTPC3_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xEC6498 + +#define mmTPC3_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xEC649C + +#define mmTPC3_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xEC64A0 + +#define mmTPC3_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xEC64A4 + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xEC64A8 + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xEC64AC + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xEC64B0 + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xEC64B4 + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xEC64B8 + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xEC64BC + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xEC64C0 + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xEC64C4 + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xEC64C8 + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xEC64CC + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xEC64D0 + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xEC64D4 + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xEC64D8 + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xEC64DC + +#define mmTPC3_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xEC64E0 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xEC64E4 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xEC64E8 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xEC64EC + +#define mmTPC3_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xEC64F0 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xEC64F4 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xEC64F8 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xEC64FC + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xEC6500 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xEC6504 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xEC6508 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xEC650C + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xEC6510 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xEC6514 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xEC6518 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xEC651C + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xEC6520 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xEC6524 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xEC6528 + +#define mmTPC3_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xEC652C + +#define mmTPC3_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xEC6530 + +#define mmTPC3_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xEC6534 + +#define mmTPC3_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xEC6538 + +#define mmTPC3_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xEC653C + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xEC6540 + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xEC6544 + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xEC6548 + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xEC654C + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xEC6550 + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xEC6554 + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xEC6558 + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xEC655C + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xEC6560 + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xEC6564 + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xEC6568 + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xEC656C + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xEC6570 + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xEC6574 + +#define mmTPC3_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xEC6578 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xEC657C + +#define mmTPC3_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xEC6580 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xEC6584 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xEC6588 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xEC658C + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xEC6590 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xEC6594 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xEC6598 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xEC659C + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xEC65A0 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xEC65A4 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xEC65A8 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xEC65AC + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xEC65B0 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xEC65B4 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xEC65B8 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xEC65BC + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xEC65C0 + +#define mmTPC3_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xEC65C4 + +#define mmTPC3_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xEC65C8 + +#define mmTPC3_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xEC65CC + +#define mmTPC3_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xEC65D0 + +#define mmTPC3_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xEC65D4 + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xEC65D8 + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xEC65DC + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xEC65E0 + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xEC65E4 + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xEC65E8 + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xEC65EC + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xEC65F0 + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xEC65F4 + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xEC65F8 + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xEC65FC + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xEC6600 + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xEC6604 + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xEC6608 + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xEC660C + +#define mmTPC3_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xEC6610 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xEC6614 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xEC6618 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xEC661C + +#define mmTPC3_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xEC6620 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xEC6624 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xEC6628 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xEC662C + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xEC6630 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xEC6634 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xEC6638 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xEC663C + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xEC6640 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xEC6644 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xEC6648 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xEC664C + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xEC6650 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xEC6654 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xEC6658 + +#define mmTPC3_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xEC665C + +#define mmTPC3_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xEC6660 + +#define mmTPC3_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xEC6664 + +#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_0 0xEC6668 + +#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_0 0xEC666C + +#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_1 0xEC6670 + +#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_1 0xEC6674 + +#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_2 0xEC6678 + +#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_2 0xEC667C + +#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_3 0xEC6680 + +#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_3 0xEC6684 + +#define mmTPC3_CFG_KERNEL_TID_BASE_DIM_4 0xEC6688 + +#define mmTPC3_CFG_KERNEL_TID_SIZE_DIM_4 0xEC668C + +#define mmTPC3_CFG_KERNEL_SRF_0 0xEC6690 + +#define mmTPC3_CFG_KERNEL_SRF_1 0xEC6694 + +#define mmTPC3_CFG_KERNEL_SRF_2 0xEC6698 + +#define mmTPC3_CFG_KERNEL_SRF_3 0xEC669C + +#define mmTPC3_CFG_KERNEL_SRF_4 0xEC66A0 + +#define mmTPC3_CFG_KERNEL_SRF_5 0xEC66A4 + +#define mmTPC3_CFG_KERNEL_SRF_6 0xEC66A8 + +#define mmTPC3_CFG_KERNEL_SRF_7 0xEC66AC + +#define mmTPC3_CFG_KERNEL_SRF_8 0xEC66B0 + +#define mmTPC3_CFG_KERNEL_SRF_9 0xEC66B4 + +#define mmTPC3_CFG_KERNEL_SRF_10 0xEC66B8 + +#define mmTPC3_CFG_KERNEL_SRF_11 0xEC66BC + +#define mmTPC3_CFG_KERNEL_SRF_12 0xEC66C0 + +#define mmTPC3_CFG_KERNEL_SRF_13 0xEC66C4 + +#define mmTPC3_CFG_KERNEL_SRF_14 0xEC66C8 + +#define mmTPC3_CFG_KERNEL_SRF_15 0xEC66CC + +#define mmTPC3_CFG_KERNEL_SRF_16 0xEC66D0 + +#define mmTPC3_CFG_KERNEL_SRF_17 0xEC66D4 + +#define mmTPC3_CFG_KERNEL_SRF_18 0xEC66D8 + +#define mmTPC3_CFG_KERNEL_SRF_19 0xEC66DC + +#define mmTPC3_CFG_KERNEL_SRF_20 0xEC66E0 + +#define mmTPC3_CFG_KERNEL_SRF_21 0xEC66E4 + +#define mmTPC3_CFG_KERNEL_SRF_22 0xEC66E8 + +#define mmTPC3_CFG_KERNEL_SRF_23 0xEC66EC + +#define mmTPC3_CFG_KERNEL_SRF_24 0xEC66F0 + +#define mmTPC3_CFG_KERNEL_SRF_25 0xEC66F4 + +#define mmTPC3_CFG_KERNEL_SRF_26 0xEC66F8 + +#define mmTPC3_CFG_KERNEL_SRF_27 0xEC66FC + +#define mmTPC3_CFG_KERNEL_SRF_28 0xEC6700 + +#define mmTPC3_CFG_KERNEL_SRF_29 0xEC6704 + +#define mmTPC3_CFG_KERNEL_SRF_30 0xEC6708 + +#define mmTPC3_CFG_KERNEL_SRF_31 0xEC670C + +#define mmTPC3_CFG_KERNEL_KERNEL_CONFIG 0xEC6710 + +#define mmTPC3_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xEC6714 + +#define mmTPC3_CFG_RESERVED_DESC_END 0xEC6738 + +#define mmTPC3_CFG_ROUND_CSR 0xEC67FC + +#define mmTPC3_CFG_TBUF_BASE_ADDR_LOW 0xEC6800 + +#define mmTPC3_CFG_TBUF_BASE_ADDR_HIGH 0xEC6804 + +#define mmTPC3_CFG_SEMAPHORE 0xEC6808 + +#define mmTPC3_CFG_VFLAGS 0xEC680C + +#define mmTPC3_CFG_SFLAGS 0xEC6810 + +#define mmTPC3_CFG_LFSR_POLYNOM 0xEC6818 + +#define mmTPC3_CFG_STATUS 0xEC681C + +#define mmTPC3_CFG_CFG_BASE_ADDRESS_HIGH 0xEC6820 + +#define mmTPC3_CFG_CFG_SUBTRACT_VALUE 0xEC6824 + +#define mmTPC3_CFG_SM_BASE_ADDRESS_LOW 0xEC6828 + +#define mmTPC3_CFG_SM_BASE_ADDRESS_HIGH 0xEC682C + +#define mmTPC3_CFG_TPC_CMD 0xEC6830 + +#define mmTPC3_CFG_TPC_EXECUTE 0xEC6838 + +#define mmTPC3_CFG_TPC_STALL 0xEC683C + +#define mmTPC3_CFG_ICACHE_BASE_ADDERESS_LOW 0xEC6840 + +#define mmTPC3_CFG_ICACHE_BASE_ADDERESS_HIGH 0xEC6844 + +#define mmTPC3_CFG_MSS_CONFIG 0xEC6854 + +#define mmTPC3_CFG_TPC_INTR_CAUSE 0xEC6858 + +#define mmTPC3_CFG_TPC_INTR_MASK 0xEC685C + +#define mmTPC3_CFG_TSB_CONFIG 0xEC6860 + +#define mmTPC3_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xEC6A00 + +#define mmTPC3_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xEC6A04 + +#define mmTPC3_CFG_QM_TENSOR_0_PADDING_VALUE 0xEC6A08 + +#define mmTPC3_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xEC6A0C + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_0_SIZE 0xEC6A10 + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xEC6A14 + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xEC6A18 + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_1_SIZE 0xEC6A1C + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xEC6A20 + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xEC6A24 + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_2_SIZE 0xEC6A28 + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xEC6A2C + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xEC6A30 + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_3_SIZE 0xEC6A34 + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xEC6A38 + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xEC6A3C + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_4_SIZE 0xEC6A40 + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xEC6A44 + +#define mmTPC3_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xEC6A48 + +#define mmTPC3_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xEC6A4C + +#define mmTPC3_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xEC6A50 + +#define mmTPC3_CFG_QM_TENSOR_1_PADDING_VALUE 0xEC6A54 + +#define mmTPC3_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xEC6A58 + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_0_SIZE 0xEC6A5C + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xEC6A60 + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xEC6A64 + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_1_SIZE 0xEC6A68 + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xEC6A6C + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xEC6A70 + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_2_SIZE 0xEC6A74 + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xEC6A78 + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xEC6A7C + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_3_SIZE 0xEC6A80 + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xEC6A84 + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xEC6A88 + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_4_SIZE 0xEC6A8C + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xEC6A90 + +#define mmTPC3_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xEC6A94 + +#define mmTPC3_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xEC6A98 + +#define mmTPC3_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xEC6A9C + +#define mmTPC3_CFG_QM_TENSOR_2_PADDING_VALUE 0xEC6AA0 + +#define mmTPC3_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xEC6AA4 + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_0_SIZE 0xEC6AA8 + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xEC6AAC + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xEC6AB0 + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_1_SIZE 0xEC6AB4 + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xEC6AB8 + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xEC6ABC + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_2_SIZE 0xEC6AC0 + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xEC6AC4 + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xEC6AC8 + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_3_SIZE 0xEC6ACC + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xEC6AD0 + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xEC6AD4 + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_4_SIZE 0xEC6AD8 + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xEC6ADC + +#define mmTPC3_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xEC6AE0 + +#define mmTPC3_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xEC6AE4 + +#define mmTPC3_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xEC6AE8 + +#define mmTPC3_CFG_QM_TENSOR_3_PADDING_VALUE 0xEC6AEC + +#define mmTPC3_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xEC6AF0 + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_0_SIZE 0xEC6AF4 + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xEC6AF8 + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xEC6AFC + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_1_SIZE 0xEC6B00 + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xEC6B04 + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xEC6B08 + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_2_SIZE 0xEC6B0C + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xEC6B10 + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xEC6B14 + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_3_SIZE 0xEC6B18 + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xEC6B1C + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xEC6B20 + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_4_SIZE 0xEC6B24 + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xEC6B28 + +#define mmTPC3_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xEC6B2C + +#define mmTPC3_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xEC6B30 + +#define mmTPC3_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xEC6B34 + +#define mmTPC3_CFG_QM_TENSOR_4_PADDING_VALUE 0xEC6B38 + +#define mmTPC3_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xEC6B3C + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_0_SIZE 0xEC6B40 + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xEC6B44 + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xEC6B48 + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_1_SIZE 0xEC6B4C + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xEC6B50 + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xEC6B54 + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_2_SIZE 0xEC6B58 + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xEC6B5C + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xEC6B60 + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_3_SIZE 0xEC6B64 + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xEC6B68 + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xEC6B6C + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_4_SIZE 0xEC6B70 + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xEC6B74 + +#define mmTPC3_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xEC6B78 + +#define mmTPC3_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xEC6B7C + +#define mmTPC3_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xEC6B80 + +#define mmTPC3_CFG_QM_TENSOR_5_PADDING_VALUE 0xEC6B84 + +#define mmTPC3_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xEC6B88 + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_0_SIZE 0xEC6B8C + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xEC6B90 + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xEC6B94 + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_1_SIZE 0xEC6B98 + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xEC6B9C + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xEC6BA0 + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_2_SIZE 0xEC6BA4 + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xEC6BA8 + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xEC6BAC + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_3_SIZE 0xEC6BB0 + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xEC6BB4 + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xEC6BB8 + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_4_SIZE 0xEC6BBC + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xEC6BC0 + +#define mmTPC3_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xEC6BC4 + +#define mmTPC3_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xEC6BC8 + +#define mmTPC3_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xEC6BCC + +#define mmTPC3_CFG_QM_TENSOR_6_PADDING_VALUE 0xEC6BD0 + +#define mmTPC3_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xEC6BD4 + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_0_SIZE 0xEC6BD8 + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xEC6BDC + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xEC6BE0 + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_1_SIZE 0xEC6BE4 + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xEC6BE8 + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xEC6BEC + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_2_SIZE 0xEC6BF0 + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xEC6BF4 + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xEC6BF8 + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_3_SIZE 0xEC6BFC + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xEC6C00 + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xEC6C04 + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_4_SIZE 0xEC6C08 + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xEC6C0C + +#define mmTPC3_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xEC6C10 + +#define mmTPC3_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xEC6C14 + +#define mmTPC3_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xEC6C18 + +#define mmTPC3_CFG_QM_TENSOR_7_PADDING_VALUE 0xEC6C1C + +#define mmTPC3_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xEC6C20 + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_0_SIZE 0xEC6C24 + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xEC6C28 + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xEC6C2C + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_1_SIZE 0xEC6C30 + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xEC6C34 + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xEC6C38 + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_2_SIZE 0xEC6C3C + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xEC6C40 + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xEC6C44 + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_3_SIZE 0xEC6C48 + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xEC6C4C + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xEC6C50 + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_4_SIZE 0xEC6C54 + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xEC6C58 + +#define mmTPC3_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xEC6C5C + +#define mmTPC3_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xEC6C60 + +#define mmTPC3_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xEC6C64 + +#define mmTPC3_CFG_QM_TID_BASE_DIM_0 0xEC6C68 + +#define mmTPC3_CFG_QM_TID_SIZE_DIM_0 0xEC6C6C + +#define mmTPC3_CFG_QM_TID_BASE_DIM_1 0xEC6C70 + +#define mmTPC3_CFG_QM_TID_SIZE_DIM_1 0xEC6C74 + +#define mmTPC3_CFG_QM_TID_BASE_DIM_2 0xEC6C78 + +#define mmTPC3_CFG_QM_TID_SIZE_DIM_2 0xEC6C7C + +#define mmTPC3_CFG_QM_TID_BASE_DIM_3 0xEC6C80 + +#define mmTPC3_CFG_QM_TID_SIZE_DIM_3 0xEC6C84 + +#define mmTPC3_CFG_QM_TID_BASE_DIM_4 0xEC6C88 + +#define mmTPC3_CFG_QM_TID_SIZE_DIM_4 0xEC6C8C + +#define mmTPC3_CFG_QM_SRF_0 0xEC6C90 + +#define mmTPC3_CFG_QM_SRF_1 0xEC6C94 + +#define mmTPC3_CFG_QM_SRF_2 0xEC6C98 + +#define mmTPC3_CFG_QM_SRF_3 0xEC6C9C + +#define mmTPC3_CFG_QM_SRF_4 0xEC6CA0 + +#define mmTPC3_CFG_QM_SRF_5 0xEC6CA4 + +#define mmTPC3_CFG_QM_SRF_6 0xEC6CA8 + +#define mmTPC3_CFG_QM_SRF_7 0xEC6CAC + +#define mmTPC3_CFG_QM_SRF_8 0xEC6CB0 + +#define mmTPC3_CFG_QM_SRF_9 0xEC6CB4 + +#define mmTPC3_CFG_QM_SRF_10 0xEC6CB8 + +#define mmTPC3_CFG_QM_SRF_11 0xEC6CBC + +#define mmTPC3_CFG_QM_SRF_12 0xEC6CC0 + +#define mmTPC3_CFG_QM_SRF_13 0xEC6CC4 + +#define mmTPC3_CFG_QM_SRF_14 0xEC6CC8 + +#define mmTPC3_CFG_QM_SRF_15 0xEC6CCC + +#define mmTPC3_CFG_QM_SRF_16 0xEC6CD0 + +#define mmTPC3_CFG_QM_SRF_17 0xEC6CD4 + +#define mmTPC3_CFG_QM_SRF_18 0xEC6CD8 + +#define mmTPC3_CFG_QM_SRF_19 0xEC6CDC + +#define mmTPC3_CFG_QM_SRF_20 0xEC6CE0 + +#define mmTPC3_CFG_QM_SRF_21 0xEC6CE4 + +#define mmTPC3_CFG_QM_SRF_22 0xEC6CE8 + +#define mmTPC3_CFG_QM_SRF_23 0xEC6CEC + +#define mmTPC3_CFG_QM_SRF_24 0xEC6CF0 + +#define mmTPC3_CFG_QM_SRF_25 0xEC6CF4 + +#define mmTPC3_CFG_QM_SRF_26 0xEC6CF8 + +#define mmTPC3_CFG_QM_SRF_27 0xEC6CFC + +#define mmTPC3_CFG_QM_SRF_28 0xEC6D00 + +#define mmTPC3_CFG_QM_SRF_29 0xEC6D04 + +#define mmTPC3_CFG_QM_SRF_30 0xEC6D08 + +#define mmTPC3_CFG_QM_SRF_31 0xEC6D0C + +#define mmTPC3_CFG_QM_KERNEL_CONFIG 0xEC6D10 + +#define mmTPC3_CFG_QM_SYNC_OBJECT_MESSAGE 0xEC6D14 + +#define mmTPC3_CFG_ARUSER 0xEC6D18 + +#define mmTPC3_CFG_AWUSER 0xEC6D1C + +#define mmTPC3_CFG_FUNC_MBIST_CNTRL 0xEC6E00 + +#define mmTPC3_CFG_FUNC_MBIST_PAT 0xEC6E04 + +#define mmTPC3_CFG_FUNC_MBIST_MEM_0 0xEC6E08 + +#define mmTPC3_CFG_FUNC_MBIST_MEM_1 0xEC6E0C + +#define mmTPC3_CFG_FUNC_MBIST_MEM_2 0xEC6E10 + +#define mmTPC3_CFG_FUNC_MBIST_MEM_3 0xEC6E14 + +#define mmTPC3_CFG_FUNC_MBIST_MEM_4 0xEC6E18 + +#define mmTPC3_CFG_FUNC_MBIST_MEM_5 0xEC6E1C + +#define mmTPC3_CFG_FUNC_MBIST_MEM_6 0xEC6E20 + +#define mmTPC3_CFG_FUNC_MBIST_MEM_7 0xEC6E24 + +#define mmTPC3_CFG_FUNC_MBIST_MEM_8 0xEC6E28 + +#define mmTPC3_CFG_FUNC_MBIST_MEM_9 0xEC6E2C + +#endif /* ASIC_REG_TPC3_CFG_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h new file mode 100644 index 000000000000..82a5261e852f --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC3_CMDQ_REGS_H_ +#define ASIC_REG_TPC3_CMDQ_REGS_H_ + +/* + ***************************************** + * TPC3_CMDQ (Prototype: CMDQ) + ***************************************** + */ + +#define mmTPC3_CMDQ_GLBL_CFG0 0xEC9000 + +#define mmTPC3_CMDQ_GLBL_CFG1 0xEC9004 + +#define mmTPC3_CMDQ_GLBL_PROT 0xEC9008 + +#define mmTPC3_CMDQ_GLBL_ERR_CFG 0xEC900C + +#define mmTPC3_CMDQ_GLBL_ERR_ADDR_LO 0xEC9010 + +#define mmTPC3_CMDQ_GLBL_ERR_ADDR_HI 0xEC9014 + +#define mmTPC3_CMDQ_GLBL_ERR_WDATA 0xEC9018 + +#define mmTPC3_CMDQ_GLBL_SECURE_PROPS 0xEC901C + +#define mmTPC3_CMDQ_GLBL_NON_SECURE_PROPS 0xEC9020 + +#define mmTPC3_CMDQ_GLBL_STS0 0xEC9024 + +#define mmTPC3_CMDQ_GLBL_STS1 0xEC9028 + +#define mmTPC3_CMDQ_CQ_CFG0 0xEC90B0 + +#define mmTPC3_CMDQ_CQ_CFG1 0xEC90B4 + +#define mmTPC3_CMDQ_CQ_ARUSER 0xEC90B8 + +#define mmTPC3_CMDQ_CQ_PTR_LO 0xEC90C0 + +#define mmTPC3_CMDQ_CQ_PTR_HI 0xEC90C4 + +#define mmTPC3_CMDQ_CQ_TSIZE 0xEC90C8 + +#define mmTPC3_CMDQ_CQ_CTL 0xEC90CC + +#define mmTPC3_CMDQ_CQ_PTR_LO_STS 0xEC90D4 + +#define mmTPC3_CMDQ_CQ_PTR_HI_STS 0xEC90D8 + +#define mmTPC3_CMDQ_CQ_TSIZE_STS 0xEC90DC + +#define mmTPC3_CMDQ_CQ_CTL_STS 0xEC90E0 + +#define mmTPC3_CMDQ_CQ_STS0 0xEC90E4 + +#define mmTPC3_CMDQ_CQ_STS1 0xEC90E8 + +#define mmTPC3_CMDQ_CQ_RD_RATE_LIM_EN 0xEC90F0 + +#define mmTPC3_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xEC90F4 + +#define mmTPC3_CMDQ_CQ_RD_RATE_LIM_SAT 0xEC90F8 + +#define mmTPC3_CMDQ_CQ_RD_RATE_LIM_TOUT 0xEC90FC + +#define mmTPC3_CMDQ_CQ_IFIFO_CNT 0xEC9108 + +#define mmTPC3_CMDQ_CP_MSG_BASE0_ADDR_LO 0xEC9120 + +#define mmTPC3_CMDQ_CP_MSG_BASE0_ADDR_HI 0xEC9124 + +#define mmTPC3_CMDQ_CP_MSG_BASE1_ADDR_LO 0xEC9128 + +#define mmTPC3_CMDQ_CP_MSG_BASE1_ADDR_HI 0xEC912C + +#define mmTPC3_CMDQ_CP_MSG_BASE2_ADDR_LO 0xEC9130 + +#define mmTPC3_CMDQ_CP_MSG_BASE2_ADDR_HI 0xEC9134 + +#define mmTPC3_CMDQ_CP_MSG_BASE3_ADDR_LO 0xEC9138 + +#define mmTPC3_CMDQ_CP_MSG_BASE3_ADDR_HI 0xEC913C + +#define mmTPC3_CMDQ_CP_LDMA_TSIZE_OFFSET 0xEC9140 + +#define mmTPC3_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xEC9144 + +#define mmTPC3_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xEC9148 + +#define mmTPC3_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xEC914C + +#define mmTPC3_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xEC9150 + +#define mmTPC3_CMDQ_CP_LDMA_COMMIT_OFFSET 0xEC9154 + +#define mmTPC3_CMDQ_CP_FENCE0_RDATA 0xEC9158 + +#define mmTPC3_CMDQ_CP_FENCE1_RDATA 0xEC915C + +#define mmTPC3_CMDQ_CP_FENCE2_RDATA 0xEC9160 + +#define mmTPC3_CMDQ_CP_FENCE3_RDATA 0xEC9164 + +#define mmTPC3_CMDQ_CP_FENCE0_CNT 0xEC9168 + +#define mmTPC3_CMDQ_CP_FENCE1_CNT 0xEC916C + +#define mmTPC3_CMDQ_CP_FENCE2_CNT 0xEC9170 + +#define mmTPC3_CMDQ_CP_FENCE3_CNT 0xEC9174 + +#define mmTPC3_CMDQ_CP_STS 0xEC9178 + +#define mmTPC3_CMDQ_CP_CURRENT_INST_LO 0xEC917C + +#define mmTPC3_CMDQ_CP_CURRENT_INST_HI 0xEC9180 + +#define mmTPC3_CMDQ_CP_BARRIER_CFG 0xEC9184 + +#define mmTPC3_CMDQ_CP_DBG_0 0xEC9188 + +#define mmTPC3_CMDQ_CQ_BUF_ADDR 0xEC9308 + +#define mmTPC3_CMDQ_CQ_BUF_RDATA 0xEC930C + +#endif /* ASIC_REG_TPC3_CMDQ_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h new file mode 100644 index 000000000000..b05b1e18e664 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC3_QM_REGS_H_ +#define ASIC_REG_TPC3_QM_REGS_H_ + +/* + ***************************************** + * TPC3_QM (Prototype: QMAN) + ***************************************** + */ + +#define mmTPC3_QM_GLBL_CFG0 0xEC8000 + +#define mmTPC3_QM_GLBL_CFG1 0xEC8004 + +#define mmTPC3_QM_GLBL_PROT 0xEC8008 + +#define mmTPC3_QM_GLBL_ERR_CFG 0xEC800C + +#define mmTPC3_QM_GLBL_ERR_ADDR_LO 0xEC8010 + +#define mmTPC3_QM_GLBL_ERR_ADDR_HI 0xEC8014 + +#define mmTPC3_QM_GLBL_ERR_WDATA 0xEC8018 + +#define mmTPC3_QM_GLBL_SECURE_PROPS 0xEC801C + +#define mmTPC3_QM_GLBL_NON_SECURE_PROPS 0xEC8020 + +#define mmTPC3_QM_GLBL_STS0 0xEC8024 + +#define mmTPC3_QM_GLBL_STS1 0xEC8028 + +#define mmTPC3_QM_PQ_BASE_LO 0xEC8060 + +#define mmTPC3_QM_PQ_BASE_HI 0xEC8064 + +#define mmTPC3_QM_PQ_SIZE 0xEC8068 + +#define mmTPC3_QM_PQ_PI 0xEC806C + +#define mmTPC3_QM_PQ_CI 0xEC8070 + +#define mmTPC3_QM_PQ_CFG0 0xEC8074 + +#define mmTPC3_QM_PQ_CFG1 0xEC8078 + +#define mmTPC3_QM_PQ_ARUSER 0xEC807C + +#define mmTPC3_QM_PQ_PUSH0 0xEC8080 + +#define mmTPC3_QM_PQ_PUSH1 0xEC8084 + +#define mmTPC3_QM_PQ_PUSH2 0xEC8088 + +#define mmTPC3_QM_PQ_PUSH3 0xEC808C + +#define mmTPC3_QM_PQ_STS0 0xEC8090 + +#define mmTPC3_QM_PQ_STS1 0xEC8094 + +#define mmTPC3_QM_PQ_RD_RATE_LIM_EN 0xEC80A0 + +#define mmTPC3_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xEC80A4 + +#define mmTPC3_QM_PQ_RD_RATE_LIM_SAT 0xEC80A8 + +#define mmTPC3_QM_PQ_RD_RATE_LIM_TOUT 0xEC80AC + +#define mmTPC3_QM_CQ_CFG0 0xEC80B0 + +#define mmTPC3_QM_CQ_CFG1 0xEC80B4 + +#define mmTPC3_QM_CQ_ARUSER 0xEC80B8 + +#define mmTPC3_QM_CQ_PTR_LO 0xEC80C0 + +#define mmTPC3_QM_CQ_PTR_HI 0xEC80C4 + +#define mmTPC3_QM_CQ_TSIZE 0xEC80C8 + +#define mmTPC3_QM_CQ_CTL 0xEC80CC + +#define mmTPC3_QM_CQ_PTR_LO_STS 0xEC80D4 + +#define mmTPC3_QM_CQ_PTR_HI_STS 0xEC80D8 + +#define mmTPC3_QM_CQ_TSIZE_STS 0xEC80DC + +#define mmTPC3_QM_CQ_CTL_STS 0xEC80E0 + +#define mmTPC3_QM_CQ_STS0 0xEC80E4 + +#define mmTPC3_QM_CQ_STS1 0xEC80E8 + +#define mmTPC3_QM_CQ_RD_RATE_LIM_EN 0xEC80F0 + +#define mmTPC3_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xEC80F4 + +#define mmTPC3_QM_CQ_RD_RATE_LIM_SAT 0xEC80F8 + +#define mmTPC3_QM_CQ_RD_RATE_LIM_TOUT 0xEC80FC + +#define mmTPC3_QM_CQ_IFIFO_CNT 0xEC8108 + +#define mmTPC3_QM_CP_MSG_BASE0_ADDR_LO 0xEC8120 + +#define mmTPC3_QM_CP_MSG_BASE0_ADDR_HI 0xEC8124 + +#define mmTPC3_QM_CP_MSG_BASE1_ADDR_LO 0xEC8128 + +#define mmTPC3_QM_CP_MSG_BASE1_ADDR_HI 0xEC812C + +#define mmTPC3_QM_CP_MSG_BASE2_ADDR_LO 0xEC8130 + +#define mmTPC3_QM_CP_MSG_BASE2_ADDR_HI 0xEC8134 + +#define mmTPC3_QM_CP_MSG_BASE3_ADDR_LO 0xEC8138 + +#define mmTPC3_QM_CP_MSG_BASE3_ADDR_HI 0xEC813C + +#define mmTPC3_QM_CP_LDMA_TSIZE_OFFSET 0xEC8140 + +#define mmTPC3_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xEC8144 + +#define mmTPC3_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xEC8148 + +#define mmTPC3_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xEC814C + +#define mmTPC3_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xEC8150 + +#define mmTPC3_QM_CP_LDMA_COMMIT_OFFSET 0xEC8154 + +#define mmTPC3_QM_CP_FENCE0_RDATA 0xEC8158 + +#define mmTPC3_QM_CP_FENCE1_RDATA 0xEC815C + +#define mmTPC3_QM_CP_FENCE2_RDATA 0xEC8160 + +#define mmTPC3_QM_CP_FENCE3_RDATA 0xEC8164 + +#define mmTPC3_QM_CP_FENCE0_CNT 0xEC8168 + +#define mmTPC3_QM_CP_FENCE1_CNT 0xEC816C + +#define mmTPC3_QM_CP_FENCE2_CNT 0xEC8170 + +#define mmTPC3_QM_CP_FENCE3_CNT 0xEC8174 + +#define mmTPC3_QM_CP_STS 0xEC8178 + +#define mmTPC3_QM_CP_CURRENT_INST_LO 0xEC817C + +#define mmTPC3_QM_CP_CURRENT_INST_HI 0xEC8180 + +#define mmTPC3_QM_CP_BARRIER_CFG 0xEC8184 + +#define mmTPC3_QM_CP_DBG_0 0xEC8188 + +#define mmTPC3_QM_PQ_BUF_ADDR 0xEC8300 + +#define mmTPC3_QM_PQ_BUF_RDATA 0xEC8304 + +#define mmTPC3_QM_CQ_BUF_ADDR 0xEC8308 + +#define mmTPC3_QM_CQ_BUF_RDATA 0xEC830C + +#endif /* ASIC_REG_TPC3_QM_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h new file mode 100644 index 000000000000..5a2fd7652650 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h @@ -0,0 +1,323 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC3_RTR_REGS_H_ +#define ASIC_REG_TPC3_RTR_REGS_H_ + +/* + ***************************************** + * TPC3_RTR (Prototype: TPC_RTR) + ***************************************** + */ + +#define mmTPC3_RTR_HBW_RD_RQ_E_ARB 0xEC0100 + +#define mmTPC3_RTR_HBW_RD_RQ_W_ARB 0xEC0104 + +#define mmTPC3_RTR_HBW_RD_RQ_N_ARB 0xEC0108 + +#define mmTPC3_RTR_HBW_RD_RQ_S_ARB 0xEC010C + +#define mmTPC3_RTR_HBW_RD_RQ_L_ARB 0xEC0110 + +#define mmTPC3_RTR_HBW_E_ARB_MAX 0xEC0120 + +#define mmTPC3_RTR_HBW_W_ARB_MAX 0xEC0124 + +#define mmTPC3_RTR_HBW_N_ARB_MAX 0xEC0128 + +#define mmTPC3_RTR_HBW_S_ARB_MAX 0xEC012C + +#define mmTPC3_RTR_HBW_L_ARB_MAX 0xEC0130 + +#define mmTPC3_RTR_HBW_RD_RS_E_ARB 0xEC0140 + +#define mmTPC3_RTR_HBW_RD_RS_W_ARB 0xEC0144 + +#define mmTPC3_RTR_HBW_RD_RS_N_ARB 0xEC0148 + +#define mmTPC3_RTR_HBW_RD_RS_S_ARB 0xEC014C + +#define mmTPC3_RTR_HBW_RD_RS_L_ARB 0xEC0150 + +#define mmTPC3_RTR_HBW_WR_RQ_E_ARB 0xEC0170 + +#define mmTPC3_RTR_HBW_WR_RQ_W_ARB 0xEC0174 + +#define mmTPC3_RTR_HBW_WR_RQ_N_ARB 0xEC0178 + +#define mmTPC3_RTR_HBW_WR_RQ_S_ARB 0xEC017C + +#define mmTPC3_RTR_HBW_WR_RQ_L_ARB 0xEC0180 + +#define mmTPC3_RTR_HBW_WR_RS_E_ARB 0xEC0190 + +#define mmTPC3_RTR_HBW_WR_RS_W_ARB 0xEC0194 + +#define mmTPC3_RTR_HBW_WR_RS_N_ARB 0xEC0198 + +#define mmTPC3_RTR_HBW_WR_RS_S_ARB 0xEC019C + +#define mmTPC3_RTR_HBW_WR_RS_L_ARB 0xEC01A0 + +#define mmTPC3_RTR_LBW_RD_RQ_E_ARB 0xEC0200 + +#define mmTPC3_RTR_LBW_RD_RQ_W_ARB 0xEC0204 + +#define mmTPC3_RTR_LBW_RD_RQ_N_ARB 0xEC0208 + +#define mmTPC3_RTR_LBW_RD_RQ_S_ARB 0xEC020C + +#define mmTPC3_RTR_LBW_RD_RQ_L_ARB 0xEC0210 + +#define mmTPC3_RTR_LBW_E_ARB_MAX 0xEC0220 + +#define mmTPC3_RTR_LBW_W_ARB_MAX 0xEC0224 + +#define mmTPC3_RTR_LBW_N_ARB_MAX 0xEC0228 + +#define mmTPC3_RTR_LBW_S_ARB_MAX 0xEC022C + +#define mmTPC3_RTR_LBW_L_ARB_MAX 0xEC0230 + +#define mmTPC3_RTR_LBW_RD_RS_E_ARB 0xEC0250 + +#define mmTPC3_RTR_LBW_RD_RS_W_ARB 0xEC0254 + +#define mmTPC3_RTR_LBW_RD_RS_N_ARB 0xEC0258 + +#define mmTPC3_RTR_LBW_RD_RS_S_ARB 0xEC025C + +#define mmTPC3_RTR_LBW_RD_RS_L_ARB 0xEC0260 + +#define mmTPC3_RTR_LBW_WR_RQ_E_ARB 0xEC0270 + +#define mmTPC3_RTR_LBW_WR_RQ_W_ARB 0xEC0274 + +#define mmTPC3_RTR_LBW_WR_RQ_N_ARB 0xEC0278 + +#define mmTPC3_RTR_LBW_WR_RQ_S_ARB 0xEC027C + +#define mmTPC3_RTR_LBW_WR_RQ_L_ARB 0xEC0280 + +#define mmTPC3_RTR_LBW_WR_RS_E_ARB 0xEC0290 + +#define mmTPC3_RTR_LBW_WR_RS_W_ARB 0xEC0294 + +#define mmTPC3_RTR_LBW_WR_RS_N_ARB 0xEC0298 + +#define mmTPC3_RTR_LBW_WR_RS_S_ARB 0xEC029C + +#define mmTPC3_RTR_LBW_WR_RS_L_ARB 0xEC02A0 + +#define mmTPC3_RTR_DBG_E_ARB 0xEC0300 + +#define mmTPC3_RTR_DBG_W_ARB 0xEC0304 + +#define mmTPC3_RTR_DBG_N_ARB 0xEC0308 + +#define mmTPC3_RTR_DBG_S_ARB 0xEC030C + +#define mmTPC3_RTR_DBG_L_ARB 0xEC0310 + +#define mmTPC3_RTR_DBG_E_ARB_MAX 0xEC0320 + +#define mmTPC3_RTR_DBG_W_ARB_MAX 0xEC0324 + +#define mmTPC3_RTR_DBG_N_ARB_MAX 0xEC0328 + +#define mmTPC3_RTR_DBG_S_ARB_MAX 0xEC032C + +#define mmTPC3_RTR_DBG_L_ARB_MAX 0xEC0330 + +#define mmTPC3_RTR_SPLIT_COEF_0 0xEC0400 + +#define mmTPC3_RTR_SPLIT_COEF_1 0xEC0404 + +#define mmTPC3_RTR_SPLIT_COEF_2 0xEC0408 + +#define mmTPC3_RTR_SPLIT_COEF_3 0xEC040C + +#define mmTPC3_RTR_SPLIT_COEF_4 0xEC0410 + +#define mmTPC3_RTR_SPLIT_COEF_5 0xEC0414 + +#define mmTPC3_RTR_SPLIT_COEF_6 0xEC0418 + +#define mmTPC3_RTR_SPLIT_COEF_7 0xEC041C + +#define mmTPC3_RTR_SPLIT_COEF_8 0xEC0420 + +#define mmTPC3_RTR_SPLIT_COEF_9 0xEC0424 + +#define mmTPC3_RTR_SPLIT_CFG 0xEC0440 + +#define mmTPC3_RTR_SPLIT_RD_SAT 0xEC0444 + +#define mmTPC3_RTR_SPLIT_RD_RST_TOKEN 0xEC0448 + +#define mmTPC3_RTR_SPLIT_RD_TIMEOUT_0 0xEC044C + +#define mmTPC3_RTR_SPLIT_RD_TIMEOUT_1 0xEC0450 + +#define mmTPC3_RTR_SPLIT_WR_SAT 0xEC0454 + +#define mmTPC3_RTR_WPLIT_WR_TST_TOLEN 0xEC0458 + +#define mmTPC3_RTR_SPLIT_WR_TIMEOUT_0 0xEC045C + +#define mmTPC3_RTR_SPLIT_WR_TIMEOUT_1 0xEC0460 + +#define mmTPC3_RTR_HBW_RANGE_HIT 0xEC0470 + +#define mmTPC3_RTR_HBW_RANGE_MASK_L_0 0xEC0480 + +#define mmTPC3_RTR_HBW_RANGE_MASK_L_1 0xEC0484 + +#define mmTPC3_RTR_HBW_RANGE_MASK_L_2 0xEC0488 + +#define mmTPC3_RTR_HBW_RANGE_MASK_L_3 0xEC048C + +#define mmTPC3_RTR_HBW_RANGE_MASK_L_4 0xEC0490 + +#define mmTPC3_RTR_HBW_RANGE_MASK_L_5 0xEC0494 + +#define mmTPC3_RTR_HBW_RANGE_MASK_L_6 0xEC0498 + +#define mmTPC3_RTR_HBW_RANGE_MASK_L_7 0xEC049C + +#define mmTPC3_RTR_HBW_RANGE_MASK_H_0 0xEC04A0 + +#define mmTPC3_RTR_HBW_RANGE_MASK_H_1 0xEC04A4 + +#define mmTPC3_RTR_HBW_RANGE_MASK_H_2 0xEC04A8 + +#define mmTPC3_RTR_HBW_RANGE_MASK_H_3 0xEC04AC + +#define mmTPC3_RTR_HBW_RANGE_MASK_H_4 0xEC04B0 + +#define mmTPC3_RTR_HBW_RANGE_MASK_H_5 0xEC04B4 + +#define mmTPC3_RTR_HBW_RANGE_MASK_H_6 0xEC04B8 + +#define mmTPC3_RTR_HBW_RANGE_MASK_H_7 0xEC04BC + +#define mmTPC3_RTR_HBW_RANGE_BASE_L_0 0xEC04C0 + +#define mmTPC3_RTR_HBW_RANGE_BASE_L_1 0xEC04C4 + +#define mmTPC3_RTR_HBW_RANGE_BASE_L_2 0xEC04C8 + +#define mmTPC3_RTR_HBW_RANGE_BASE_L_3 0xEC04CC + +#define mmTPC3_RTR_HBW_RANGE_BASE_L_4 0xEC04D0 + +#define mmTPC3_RTR_HBW_RANGE_BASE_L_5 0xEC04D4 + +#define mmTPC3_RTR_HBW_RANGE_BASE_L_6 0xEC04D8 + +#define mmTPC3_RTR_HBW_RANGE_BASE_L_7 0xEC04DC + +#define mmTPC3_RTR_HBW_RANGE_BASE_H_0 0xEC04E0 + +#define mmTPC3_RTR_HBW_RANGE_BASE_H_1 0xEC04E4 + +#define mmTPC3_RTR_HBW_RANGE_BASE_H_2 0xEC04E8 + +#define mmTPC3_RTR_HBW_RANGE_BASE_H_3 0xEC04EC + +#define mmTPC3_RTR_HBW_RANGE_BASE_H_4 0xEC04F0 + +#define mmTPC3_RTR_HBW_RANGE_BASE_H_5 0xEC04F4 + +#define mmTPC3_RTR_HBW_RANGE_BASE_H_6 0xEC04F8 + +#define mmTPC3_RTR_HBW_RANGE_BASE_H_7 0xEC04FC + +#define mmTPC3_RTR_LBW_RANGE_HIT 0xEC0500 + +#define mmTPC3_RTR_LBW_RANGE_MASK_0 0xEC0510 + +#define mmTPC3_RTR_LBW_RANGE_MASK_1 0xEC0514 + +#define mmTPC3_RTR_LBW_RANGE_MASK_2 0xEC0518 + +#define mmTPC3_RTR_LBW_RANGE_MASK_3 0xEC051C + +#define mmTPC3_RTR_LBW_RANGE_MASK_4 0xEC0520 + +#define mmTPC3_RTR_LBW_RANGE_MASK_5 0xEC0524 + +#define mmTPC3_RTR_LBW_RANGE_MASK_6 0xEC0528 + +#define mmTPC3_RTR_LBW_RANGE_MASK_7 0xEC052C + +#define mmTPC3_RTR_LBW_RANGE_MASK_8 0xEC0530 + +#define mmTPC3_RTR_LBW_RANGE_MASK_9 0xEC0534 + +#define mmTPC3_RTR_LBW_RANGE_MASK_10 0xEC0538 + +#define mmTPC3_RTR_LBW_RANGE_MASK_11 0xEC053C + +#define mmTPC3_RTR_LBW_RANGE_MASK_12 0xEC0540 + +#define mmTPC3_RTR_LBW_RANGE_MASK_13 0xEC0544 + +#define mmTPC3_RTR_LBW_RANGE_MASK_14 0xEC0548 + +#define mmTPC3_RTR_LBW_RANGE_MASK_15 0xEC054C + +#define mmTPC3_RTR_LBW_RANGE_BASE_0 0xEC0550 + +#define mmTPC3_RTR_LBW_RANGE_BASE_1 0xEC0554 + +#define mmTPC3_RTR_LBW_RANGE_BASE_2 0xEC0558 + +#define mmTPC3_RTR_LBW_RANGE_BASE_3 0xEC055C + +#define mmTPC3_RTR_LBW_RANGE_BASE_4 0xEC0560 + +#define mmTPC3_RTR_LBW_RANGE_BASE_5 0xEC0564 + +#define mmTPC3_RTR_LBW_RANGE_BASE_6 0xEC0568 + +#define mmTPC3_RTR_LBW_RANGE_BASE_7 0xEC056C + +#define mmTPC3_RTR_LBW_RANGE_BASE_8 0xEC0570 + +#define mmTPC3_RTR_LBW_RANGE_BASE_9 0xEC0574 + +#define mmTPC3_RTR_LBW_RANGE_BASE_10 0xEC0578 + +#define mmTPC3_RTR_LBW_RANGE_BASE_11 0xEC057C + +#define mmTPC3_RTR_LBW_RANGE_BASE_12 0xEC0580 + +#define mmTPC3_RTR_LBW_RANGE_BASE_13 0xEC0584 + +#define mmTPC3_RTR_LBW_RANGE_BASE_14 0xEC0588 + +#define mmTPC3_RTR_LBW_RANGE_BASE_15 0xEC058C + +#define mmTPC3_RTR_RGLTR 0xEC0590 + +#define mmTPC3_RTR_RGLTR_WR_RESULT 0xEC0594 + +#define mmTPC3_RTR_RGLTR_RD_RESULT 0xEC0598 + +#define mmTPC3_RTR_SCRAMB_EN 0xEC0600 + +#define mmTPC3_RTR_NON_LIN_SCRAMB 0xEC0604 + +#endif /* ASIC_REG_TPC3_RTR_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h new file mode 100644 index 000000000000..d64a100075f2 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h @@ -0,0 +1,887 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC4_CFG_REGS_H_ +#define ASIC_REG_TPC4_CFG_REGS_H_ + +/* + ***************************************** + * TPC4_CFG (Prototype: TPC) + ***************************************** + */ + +#define mmTPC4_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xF06400 + +#define mmTPC4_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xF06404 + +#define mmTPC4_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xF06408 + +#define mmTPC4_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xF0640C + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xF06410 + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xF06414 + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xF06418 + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xF0641C + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xF06420 + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xF06424 + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xF06428 + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xF0642C + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xF06430 + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xF06434 + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xF06438 + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xF0643C + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xF06440 + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xF06444 + +#define mmTPC4_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xF06448 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xF0644C + +#define mmTPC4_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xF06450 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xF06454 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xF06458 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xF0645C + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xF06460 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xF06464 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xF06468 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xF0646C + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xF06470 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xF06474 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xF06478 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xF0647C + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xF06480 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xF06484 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xF06488 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xF0648C + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xF06490 + +#define mmTPC4_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xF06494 + +#define mmTPC4_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xF06498 + +#define mmTPC4_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xF0649C + +#define mmTPC4_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xF064A0 + +#define mmTPC4_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xF064A4 + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xF064A8 + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xF064AC + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xF064B0 + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xF064B4 + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xF064B8 + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xF064BC + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xF064C0 + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xF064C4 + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xF064C8 + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xF064CC + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xF064D0 + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xF064D4 + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xF064D8 + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xF064DC + +#define mmTPC4_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xF064E0 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xF064E4 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xF064E8 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xF064EC + +#define mmTPC4_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xF064F0 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xF064F4 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xF064F8 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xF064FC + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xF06500 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xF06504 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xF06508 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xF0650C + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xF06510 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xF06514 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xF06518 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xF0651C + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xF06520 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xF06524 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xF06528 + +#define mmTPC4_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xF0652C + +#define mmTPC4_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xF06530 + +#define mmTPC4_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xF06534 + +#define mmTPC4_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xF06538 + +#define mmTPC4_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xF0653C + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xF06540 + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xF06544 + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xF06548 + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xF0654C + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xF06550 + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xF06554 + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xF06558 + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xF0655C + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xF06560 + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xF06564 + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xF06568 + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xF0656C + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xF06570 + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xF06574 + +#define mmTPC4_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xF06578 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xF0657C + +#define mmTPC4_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xF06580 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xF06584 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xF06588 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xF0658C + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xF06590 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xF06594 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xF06598 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xF0659C + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xF065A0 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xF065A4 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xF065A8 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xF065AC + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xF065B0 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xF065B4 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xF065B8 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xF065BC + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xF065C0 + +#define mmTPC4_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xF065C4 + +#define mmTPC4_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xF065C8 + +#define mmTPC4_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xF065CC + +#define mmTPC4_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xF065D0 + +#define mmTPC4_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xF065D4 + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xF065D8 + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xF065DC + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xF065E0 + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xF065E4 + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xF065E8 + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xF065EC + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xF065F0 + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xF065F4 + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xF065F8 + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xF065FC + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xF06600 + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xF06604 + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xF06608 + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xF0660C + +#define mmTPC4_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xF06610 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xF06614 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xF06618 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xF0661C + +#define mmTPC4_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xF06620 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xF06624 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xF06628 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xF0662C + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xF06630 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xF06634 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xF06638 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xF0663C + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xF06640 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xF06644 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xF06648 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xF0664C + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xF06650 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xF06654 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xF06658 + +#define mmTPC4_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xF0665C + +#define mmTPC4_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xF06660 + +#define mmTPC4_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xF06664 + +#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_0 0xF06668 + +#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_0 0xF0666C + +#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_1 0xF06670 + +#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_1 0xF06674 + +#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_2 0xF06678 + +#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_2 0xF0667C + +#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_3 0xF06680 + +#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_3 0xF06684 + +#define mmTPC4_CFG_KERNEL_TID_BASE_DIM_4 0xF06688 + +#define mmTPC4_CFG_KERNEL_TID_SIZE_DIM_4 0xF0668C + +#define mmTPC4_CFG_KERNEL_SRF_0 0xF06690 + +#define mmTPC4_CFG_KERNEL_SRF_1 0xF06694 + +#define mmTPC4_CFG_KERNEL_SRF_2 0xF06698 + +#define mmTPC4_CFG_KERNEL_SRF_3 0xF0669C + +#define mmTPC4_CFG_KERNEL_SRF_4 0xF066A0 + +#define mmTPC4_CFG_KERNEL_SRF_5 0xF066A4 + +#define mmTPC4_CFG_KERNEL_SRF_6 0xF066A8 + +#define mmTPC4_CFG_KERNEL_SRF_7 0xF066AC + +#define mmTPC4_CFG_KERNEL_SRF_8 0xF066B0 + +#define mmTPC4_CFG_KERNEL_SRF_9 0xF066B4 + +#define mmTPC4_CFG_KERNEL_SRF_10 0xF066B8 + +#define mmTPC4_CFG_KERNEL_SRF_11 0xF066BC + +#define mmTPC4_CFG_KERNEL_SRF_12 0xF066C0 + +#define mmTPC4_CFG_KERNEL_SRF_13 0xF066C4 + +#define mmTPC4_CFG_KERNEL_SRF_14 0xF066C8 + +#define mmTPC4_CFG_KERNEL_SRF_15 0xF066CC + +#define mmTPC4_CFG_KERNEL_SRF_16 0xF066D0 + +#define mmTPC4_CFG_KERNEL_SRF_17 0xF066D4 + +#define mmTPC4_CFG_KERNEL_SRF_18 0xF066D8 + +#define mmTPC4_CFG_KERNEL_SRF_19 0xF066DC + +#define mmTPC4_CFG_KERNEL_SRF_20 0xF066E0 + +#define mmTPC4_CFG_KERNEL_SRF_21 0xF066E4 + +#define mmTPC4_CFG_KERNEL_SRF_22 0xF066E8 + +#define mmTPC4_CFG_KERNEL_SRF_23 0xF066EC + +#define mmTPC4_CFG_KERNEL_SRF_24 0xF066F0 + +#define mmTPC4_CFG_KERNEL_SRF_25 0xF066F4 + +#define mmTPC4_CFG_KERNEL_SRF_26 0xF066F8 + +#define mmTPC4_CFG_KERNEL_SRF_27 0xF066FC + +#define mmTPC4_CFG_KERNEL_SRF_28 0xF06700 + +#define mmTPC4_CFG_KERNEL_SRF_29 0xF06704 + +#define mmTPC4_CFG_KERNEL_SRF_30 0xF06708 + +#define mmTPC4_CFG_KERNEL_SRF_31 0xF0670C + +#define mmTPC4_CFG_KERNEL_KERNEL_CONFIG 0xF06710 + +#define mmTPC4_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xF06714 + +#define mmTPC4_CFG_RESERVED_DESC_END 0xF06738 + +#define mmTPC4_CFG_ROUND_CSR 0xF067FC + +#define mmTPC4_CFG_TBUF_BASE_ADDR_LOW 0xF06800 + +#define mmTPC4_CFG_TBUF_BASE_ADDR_HIGH 0xF06804 + +#define mmTPC4_CFG_SEMAPHORE 0xF06808 + +#define mmTPC4_CFG_VFLAGS 0xF0680C + +#define mmTPC4_CFG_SFLAGS 0xF06810 + +#define mmTPC4_CFG_LFSR_POLYNOM 0xF06818 + +#define mmTPC4_CFG_STATUS 0xF0681C + +#define mmTPC4_CFG_CFG_BASE_ADDRESS_HIGH 0xF06820 + +#define mmTPC4_CFG_CFG_SUBTRACT_VALUE 0xF06824 + +#define mmTPC4_CFG_SM_BASE_ADDRESS_LOW 0xF06828 + +#define mmTPC4_CFG_SM_BASE_ADDRESS_HIGH 0xF0682C + +#define mmTPC4_CFG_TPC_CMD 0xF06830 + +#define mmTPC4_CFG_TPC_EXECUTE 0xF06838 + +#define mmTPC4_CFG_TPC_STALL 0xF0683C + +#define mmTPC4_CFG_ICACHE_BASE_ADDERESS_LOW 0xF06840 + +#define mmTPC4_CFG_ICACHE_BASE_ADDERESS_HIGH 0xF06844 + +#define mmTPC4_CFG_MSS_CONFIG 0xF06854 + +#define mmTPC4_CFG_TPC_INTR_CAUSE 0xF06858 + +#define mmTPC4_CFG_TPC_INTR_MASK 0xF0685C + +#define mmTPC4_CFG_TSB_CONFIG 0xF06860 + +#define mmTPC4_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xF06A00 + +#define mmTPC4_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xF06A04 + +#define mmTPC4_CFG_QM_TENSOR_0_PADDING_VALUE 0xF06A08 + +#define mmTPC4_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xF06A0C + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_0_SIZE 0xF06A10 + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xF06A14 + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xF06A18 + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_1_SIZE 0xF06A1C + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xF06A20 + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xF06A24 + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_2_SIZE 0xF06A28 + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xF06A2C + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xF06A30 + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_3_SIZE 0xF06A34 + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xF06A38 + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xF06A3C + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_4_SIZE 0xF06A40 + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xF06A44 + +#define mmTPC4_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xF06A48 + +#define mmTPC4_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xF06A4C + +#define mmTPC4_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xF06A50 + +#define mmTPC4_CFG_QM_TENSOR_1_PADDING_VALUE 0xF06A54 + +#define mmTPC4_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xF06A58 + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_0_SIZE 0xF06A5C + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xF06A60 + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xF06A64 + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_1_SIZE 0xF06A68 + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xF06A6C + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xF06A70 + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_2_SIZE 0xF06A74 + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xF06A78 + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xF06A7C + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_3_SIZE 0xF06A80 + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xF06A84 + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xF06A88 + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_4_SIZE 0xF06A8C + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xF06A90 + +#define mmTPC4_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xF06A94 + +#define mmTPC4_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xF06A98 + +#define mmTPC4_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xF06A9C + +#define mmTPC4_CFG_QM_TENSOR_2_PADDING_VALUE 0xF06AA0 + +#define mmTPC4_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xF06AA4 + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_0_SIZE 0xF06AA8 + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xF06AAC + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xF06AB0 + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_1_SIZE 0xF06AB4 + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xF06AB8 + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xF06ABC + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_2_SIZE 0xF06AC0 + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xF06AC4 + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xF06AC8 + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_3_SIZE 0xF06ACC + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xF06AD0 + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xF06AD4 + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_4_SIZE 0xF06AD8 + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xF06ADC + +#define mmTPC4_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xF06AE0 + +#define mmTPC4_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xF06AE4 + +#define mmTPC4_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xF06AE8 + +#define mmTPC4_CFG_QM_TENSOR_3_PADDING_VALUE 0xF06AEC + +#define mmTPC4_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xF06AF0 + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_0_SIZE 0xF06AF4 + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xF06AF8 + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xF06AFC + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_1_SIZE 0xF06B00 + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xF06B04 + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xF06B08 + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_2_SIZE 0xF06B0C + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xF06B10 + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xF06B14 + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_3_SIZE 0xF06B18 + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xF06B1C + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xF06B20 + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_4_SIZE 0xF06B24 + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xF06B28 + +#define mmTPC4_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xF06B2C + +#define mmTPC4_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xF06B30 + +#define mmTPC4_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xF06B34 + +#define mmTPC4_CFG_QM_TENSOR_4_PADDING_VALUE 0xF06B38 + +#define mmTPC4_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xF06B3C + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_0_SIZE 0xF06B40 + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xF06B44 + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xF06B48 + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_1_SIZE 0xF06B4C + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xF06B50 + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xF06B54 + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_2_SIZE 0xF06B58 + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xF06B5C + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xF06B60 + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_3_SIZE 0xF06B64 + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xF06B68 + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xF06B6C + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_4_SIZE 0xF06B70 + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xF06B74 + +#define mmTPC4_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xF06B78 + +#define mmTPC4_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xF06B7C + +#define mmTPC4_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xF06B80 + +#define mmTPC4_CFG_QM_TENSOR_5_PADDING_VALUE 0xF06B84 + +#define mmTPC4_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xF06B88 + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_0_SIZE 0xF06B8C + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xF06B90 + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xF06B94 + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_1_SIZE 0xF06B98 + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xF06B9C + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xF06BA0 + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_2_SIZE 0xF06BA4 + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xF06BA8 + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xF06BAC + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_3_SIZE 0xF06BB0 + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xF06BB4 + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xF06BB8 + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_4_SIZE 0xF06BBC + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xF06BC0 + +#define mmTPC4_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xF06BC4 + +#define mmTPC4_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xF06BC8 + +#define mmTPC4_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xF06BCC + +#define mmTPC4_CFG_QM_TENSOR_6_PADDING_VALUE 0xF06BD0 + +#define mmTPC4_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xF06BD4 + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_0_SIZE 0xF06BD8 + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xF06BDC + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xF06BE0 + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_1_SIZE 0xF06BE4 + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xF06BE8 + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xF06BEC + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_2_SIZE 0xF06BF0 + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xF06BF4 + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xF06BF8 + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_3_SIZE 0xF06BFC + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xF06C00 + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xF06C04 + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_4_SIZE 0xF06C08 + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xF06C0C + +#define mmTPC4_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xF06C10 + +#define mmTPC4_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xF06C14 + +#define mmTPC4_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xF06C18 + +#define mmTPC4_CFG_QM_TENSOR_7_PADDING_VALUE 0xF06C1C + +#define mmTPC4_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xF06C20 + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_0_SIZE 0xF06C24 + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xF06C28 + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xF06C2C + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_1_SIZE 0xF06C30 + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xF06C34 + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xF06C38 + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_2_SIZE 0xF06C3C + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xF06C40 + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xF06C44 + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_3_SIZE 0xF06C48 + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xF06C4C + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xF06C50 + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_4_SIZE 0xF06C54 + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xF06C58 + +#define mmTPC4_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xF06C5C + +#define mmTPC4_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xF06C60 + +#define mmTPC4_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xF06C64 + +#define mmTPC4_CFG_QM_TID_BASE_DIM_0 0xF06C68 + +#define mmTPC4_CFG_QM_TID_SIZE_DIM_0 0xF06C6C + +#define mmTPC4_CFG_QM_TID_BASE_DIM_1 0xF06C70 + +#define mmTPC4_CFG_QM_TID_SIZE_DIM_1 0xF06C74 + +#define mmTPC4_CFG_QM_TID_BASE_DIM_2 0xF06C78 + +#define mmTPC4_CFG_QM_TID_SIZE_DIM_2 0xF06C7C + +#define mmTPC4_CFG_QM_TID_BASE_DIM_3 0xF06C80 + +#define mmTPC4_CFG_QM_TID_SIZE_DIM_3 0xF06C84 + +#define mmTPC4_CFG_QM_TID_BASE_DIM_4 0xF06C88 + +#define mmTPC4_CFG_QM_TID_SIZE_DIM_4 0xF06C8C + +#define mmTPC4_CFG_QM_SRF_0 0xF06C90 + +#define mmTPC4_CFG_QM_SRF_1 0xF06C94 + +#define mmTPC4_CFG_QM_SRF_2 0xF06C98 + +#define mmTPC4_CFG_QM_SRF_3 0xF06C9C + +#define mmTPC4_CFG_QM_SRF_4 0xF06CA0 + +#define mmTPC4_CFG_QM_SRF_5 0xF06CA4 + +#define mmTPC4_CFG_QM_SRF_6 0xF06CA8 + +#define mmTPC4_CFG_QM_SRF_7 0xF06CAC + +#define mmTPC4_CFG_QM_SRF_8 0xF06CB0 + +#define mmTPC4_CFG_QM_SRF_9 0xF06CB4 + +#define mmTPC4_CFG_QM_SRF_10 0xF06CB8 + +#define mmTPC4_CFG_QM_SRF_11 0xF06CBC + +#define mmTPC4_CFG_QM_SRF_12 0xF06CC0 + +#define mmTPC4_CFG_QM_SRF_13 0xF06CC4 + +#define mmTPC4_CFG_QM_SRF_14 0xF06CC8 + +#define mmTPC4_CFG_QM_SRF_15 0xF06CCC + +#define mmTPC4_CFG_QM_SRF_16 0xF06CD0 + +#define mmTPC4_CFG_QM_SRF_17 0xF06CD4 + +#define mmTPC4_CFG_QM_SRF_18 0xF06CD8 + +#define mmTPC4_CFG_QM_SRF_19 0xF06CDC + +#define mmTPC4_CFG_QM_SRF_20 0xF06CE0 + +#define mmTPC4_CFG_QM_SRF_21 0xF06CE4 + +#define mmTPC4_CFG_QM_SRF_22 0xF06CE8 + +#define mmTPC4_CFG_QM_SRF_23 0xF06CEC + +#define mmTPC4_CFG_QM_SRF_24 0xF06CF0 + +#define mmTPC4_CFG_QM_SRF_25 0xF06CF4 + +#define mmTPC4_CFG_QM_SRF_26 0xF06CF8 + +#define mmTPC4_CFG_QM_SRF_27 0xF06CFC + +#define mmTPC4_CFG_QM_SRF_28 0xF06D00 + +#define mmTPC4_CFG_QM_SRF_29 0xF06D04 + +#define mmTPC4_CFG_QM_SRF_30 0xF06D08 + +#define mmTPC4_CFG_QM_SRF_31 0xF06D0C + +#define mmTPC4_CFG_QM_KERNEL_CONFIG 0xF06D10 + +#define mmTPC4_CFG_QM_SYNC_OBJECT_MESSAGE 0xF06D14 + +#define mmTPC4_CFG_ARUSER 0xF06D18 + +#define mmTPC4_CFG_AWUSER 0xF06D1C + +#define mmTPC4_CFG_FUNC_MBIST_CNTRL 0xF06E00 + +#define mmTPC4_CFG_FUNC_MBIST_PAT 0xF06E04 + +#define mmTPC4_CFG_FUNC_MBIST_MEM_0 0xF06E08 + +#define mmTPC4_CFG_FUNC_MBIST_MEM_1 0xF06E0C + +#define mmTPC4_CFG_FUNC_MBIST_MEM_2 0xF06E10 + +#define mmTPC4_CFG_FUNC_MBIST_MEM_3 0xF06E14 + +#define mmTPC4_CFG_FUNC_MBIST_MEM_4 0xF06E18 + +#define mmTPC4_CFG_FUNC_MBIST_MEM_5 0xF06E1C + +#define mmTPC4_CFG_FUNC_MBIST_MEM_6 0xF06E20 + +#define mmTPC4_CFG_FUNC_MBIST_MEM_7 0xF06E24 + +#define mmTPC4_CFG_FUNC_MBIST_MEM_8 0xF06E28 + +#define mmTPC4_CFG_FUNC_MBIST_MEM_9 0xF06E2C + +#endif /* ASIC_REG_TPC4_CFG_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h new file mode 100644 index 000000000000..565b42885b0d --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC4_CMDQ_REGS_H_ +#define ASIC_REG_TPC4_CMDQ_REGS_H_ + +/* + ***************************************** + * TPC4_CMDQ (Prototype: CMDQ) + ***************************************** + */ + +#define mmTPC4_CMDQ_GLBL_CFG0 0xF09000 + +#define mmTPC4_CMDQ_GLBL_CFG1 0xF09004 + +#define mmTPC4_CMDQ_GLBL_PROT 0xF09008 + +#define mmTPC4_CMDQ_GLBL_ERR_CFG 0xF0900C + +#define mmTPC4_CMDQ_GLBL_ERR_ADDR_LO 0xF09010 + +#define mmTPC4_CMDQ_GLBL_ERR_ADDR_HI 0xF09014 + +#define mmTPC4_CMDQ_GLBL_ERR_WDATA 0xF09018 + +#define mmTPC4_CMDQ_GLBL_SECURE_PROPS 0xF0901C + +#define mmTPC4_CMDQ_GLBL_NON_SECURE_PROPS 0xF09020 + +#define mmTPC4_CMDQ_GLBL_STS0 0xF09024 + +#define mmTPC4_CMDQ_GLBL_STS1 0xF09028 + +#define mmTPC4_CMDQ_CQ_CFG0 0xF090B0 + +#define mmTPC4_CMDQ_CQ_CFG1 0xF090B4 + +#define mmTPC4_CMDQ_CQ_ARUSER 0xF090B8 + +#define mmTPC4_CMDQ_CQ_PTR_LO 0xF090C0 + +#define mmTPC4_CMDQ_CQ_PTR_HI 0xF090C4 + +#define mmTPC4_CMDQ_CQ_TSIZE 0xF090C8 + +#define mmTPC4_CMDQ_CQ_CTL 0xF090CC + +#define mmTPC4_CMDQ_CQ_PTR_LO_STS 0xF090D4 + +#define mmTPC4_CMDQ_CQ_PTR_HI_STS 0xF090D8 + +#define mmTPC4_CMDQ_CQ_TSIZE_STS 0xF090DC + +#define mmTPC4_CMDQ_CQ_CTL_STS 0xF090E0 + +#define mmTPC4_CMDQ_CQ_STS0 0xF090E4 + +#define mmTPC4_CMDQ_CQ_STS1 0xF090E8 + +#define mmTPC4_CMDQ_CQ_RD_RATE_LIM_EN 0xF090F0 + +#define mmTPC4_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xF090F4 + +#define mmTPC4_CMDQ_CQ_RD_RATE_LIM_SAT 0xF090F8 + +#define mmTPC4_CMDQ_CQ_RD_RATE_LIM_TOUT 0xF090FC + +#define mmTPC4_CMDQ_CQ_IFIFO_CNT 0xF09108 + +#define mmTPC4_CMDQ_CP_MSG_BASE0_ADDR_LO 0xF09120 + +#define mmTPC4_CMDQ_CP_MSG_BASE0_ADDR_HI 0xF09124 + +#define mmTPC4_CMDQ_CP_MSG_BASE1_ADDR_LO 0xF09128 + +#define mmTPC4_CMDQ_CP_MSG_BASE1_ADDR_HI 0xF0912C + +#define mmTPC4_CMDQ_CP_MSG_BASE2_ADDR_LO 0xF09130 + +#define mmTPC4_CMDQ_CP_MSG_BASE2_ADDR_HI 0xF09134 + +#define mmTPC4_CMDQ_CP_MSG_BASE3_ADDR_LO 0xF09138 + +#define mmTPC4_CMDQ_CP_MSG_BASE3_ADDR_HI 0xF0913C + +#define mmTPC4_CMDQ_CP_LDMA_TSIZE_OFFSET 0xF09140 + +#define mmTPC4_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xF09144 + +#define mmTPC4_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xF09148 + +#define mmTPC4_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xF0914C + +#define mmTPC4_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xF09150 + +#define mmTPC4_CMDQ_CP_LDMA_COMMIT_OFFSET 0xF09154 + +#define mmTPC4_CMDQ_CP_FENCE0_RDATA 0xF09158 + +#define mmTPC4_CMDQ_CP_FENCE1_RDATA 0xF0915C + +#define mmTPC4_CMDQ_CP_FENCE2_RDATA 0xF09160 + +#define mmTPC4_CMDQ_CP_FENCE3_RDATA 0xF09164 + +#define mmTPC4_CMDQ_CP_FENCE0_CNT 0xF09168 + +#define mmTPC4_CMDQ_CP_FENCE1_CNT 0xF0916C + +#define mmTPC4_CMDQ_CP_FENCE2_CNT 0xF09170 + +#define mmTPC4_CMDQ_CP_FENCE3_CNT 0xF09174 + +#define mmTPC4_CMDQ_CP_STS 0xF09178 + +#define mmTPC4_CMDQ_CP_CURRENT_INST_LO 0xF0917C + +#define mmTPC4_CMDQ_CP_CURRENT_INST_HI 0xF09180 + +#define mmTPC4_CMDQ_CP_BARRIER_CFG 0xF09184 + +#define mmTPC4_CMDQ_CP_DBG_0 0xF09188 + +#define mmTPC4_CMDQ_CQ_BUF_ADDR 0xF09308 + +#define mmTPC4_CMDQ_CQ_BUF_RDATA 0xF0930C + +#endif /* ASIC_REG_TPC4_CMDQ_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h new file mode 100644 index 000000000000..196da3f12710 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC4_QM_REGS_H_ +#define ASIC_REG_TPC4_QM_REGS_H_ + +/* + ***************************************** + * TPC4_QM (Prototype: QMAN) + ***************************************** + */ + +#define mmTPC4_QM_GLBL_CFG0 0xF08000 + +#define mmTPC4_QM_GLBL_CFG1 0xF08004 + +#define mmTPC4_QM_GLBL_PROT 0xF08008 + +#define mmTPC4_QM_GLBL_ERR_CFG 0xF0800C + +#define mmTPC4_QM_GLBL_ERR_ADDR_LO 0xF08010 + +#define mmTPC4_QM_GLBL_ERR_ADDR_HI 0xF08014 + +#define mmTPC4_QM_GLBL_ERR_WDATA 0xF08018 + +#define mmTPC4_QM_GLBL_SECURE_PROPS 0xF0801C + +#define mmTPC4_QM_GLBL_NON_SECURE_PROPS 0xF08020 + +#define mmTPC4_QM_GLBL_STS0 0xF08024 + +#define mmTPC4_QM_GLBL_STS1 0xF08028 + +#define mmTPC4_QM_PQ_BASE_LO 0xF08060 + +#define mmTPC4_QM_PQ_BASE_HI 0xF08064 + +#define mmTPC4_QM_PQ_SIZE 0xF08068 + +#define mmTPC4_QM_PQ_PI 0xF0806C + +#define mmTPC4_QM_PQ_CI 0xF08070 + +#define mmTPC4_QM_PQ_CFG0 0xF08074 + +#define mmTPC4_QM_PQ_CFG1 0xF08078 + +#define mmTPC4_QM_PQ_ARUSER 0xF0807C + +#define mmTPC4_QM_PQ_PUSH0 0xF08080 + +#define mmTPC4_QM_PQ_PUSH1 0xF08084 + +#define mmTPC4_QM_PQ_PUSH2 0xF08088 + +#define mmTPC4_QM_PQ_PUSH3 0xF0808C + +#define mmTPC4_QM_PQ_STS0 0xF08090 + +#define mmTPC4_QM_PQ_STS1 0xF08094 + +#define mmTPC4_QM_PQ_RD_RATE_LIM_EN 0xF080A0 + +#define mmTPC4_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xF080A4 + +#define mmTPC4_QM_PQ_RD_RATE_LIM_SAT 0xF080A8 + +#define mmTPC4_QM_PQ_RD_RATE_LIM_TOUT 0xF080AC + +#define mmTPC4_QM_CQ_CFG0 0xF080B0 + +#define mmTPC4_QM_CQ_CFG1 0xF080B4 + +#define mmTPC4_QM_CQ_ARUSER 0xF080B8 + +#define mmTPC4_QM_CQ_PTR_LO 0xF080C0 + +#define mmTPC4_QM_CQ_PTR_HI 0xF080C4 + +#define mmTPC4_QM_CQ_TSIZE 0xF080C8 + +#define mmTPC4_QM_CQ_CTL 0xF080CC + +#define mmTPC4_QM_CQ_PTR_LO_STS 0xF080D4 + +#define mmTPC4_QM_CQ_PTR_HI_STS 0xF080D8 + +#define mmTPC4_QM_CQ_TSIZE_STS 0xF080DC + +#define mmTPC4_QM_CQ_CTL_STS 0xF080E0 + +#define mmTPC4_QM_CQ_STS0 0xF080E4 + +#define mmTPC4_QM_CQ_STS1 0xF080E8 + +#define mmTPC4_QM_CQ_RD_RATE_LIM_EN 0xF080F0 + +#define mmTPC4_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xF080F4 + +#define mmTPC4_QM_CQ_RD_RATE_LIM_SAT 0xF080F8 + +#define mmTPC4_QM_CQ_RD_RATE_LIM_TOUT 0xF080FC + +#define mmTPC4_QM_CQ_IFIFO_CNT 0xF08108 + +#define mmTPC4_QM_CP_MSG_BASE0_ADDR_LO 0xF08120 + +#define mmTPC4_QM_CP_MSG_BASE0_ADDR_HI 0xF08124 + +#define mmTPC4_QM_CP_MSG_BASE1_ADDR_LO 0xF08128 + +#define mmTPC4_QM_CP_MSG_BASE1_ADDR_HI 0xF0812C + +#define mmTPC4_QM_CP_MSG_BASE2_ADDR_LO 0xF08130 + +#define mmTPC4_QM_CP_MSG_BASE2_ADDR_HI 0xF08134 + +#define mmTPC4_QM_CP_MSG_BASE3_ADDR_LO 0xF08138 + +#define mmTPC4_QM_CP_MSG_BASE3_ADDR_HI 0xF0813C + +#define mmTPC4_QM_CP_LDMA_TSIZE_OFFSET 0xF08140 + +#define mmTPC4_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xF08144 + +#define mmTPC4_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xF08148 + +#define mmTPC4_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xF0814C + +#define mmTPC4_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xF08150 + +#define mmTPC4_QM_CP_LDMA_COMMIT_OFFSET 0xF08154 + +#define mmTPC4_QM_CP_FENCE0_RDATA 0xF08158 + +#define mmTPC4_QM_CP_FENCE1_RDATA 0xF0815C + +#define mmTPC4_QM_CP_FENCE2_RDATA 0xF08160 + +#define mmTPC4_QM_CP_FENCE3_RDATA 0xF08164 + +#define mmTPC4_QM_CP_FENCE0_CNT 0xF08168 + +#define mmTPC4_QM_CP_FENCE1_CNT 0xF0816C + +#define mmTPC4_QM_CP_FENCE2_CNT 0xF08170 + +#define mmTPC4_QM_CP_FENCE3_CNT 0xF08174 + +#define mmTPC4_QM_CP_STS 0xF08178 + +#define mmTPC4_QM_CP_CURRENT_INST_LO 0xF0817C + +#define mmTPC4_QM_CP_CURRENT_INST_HI 0xF08180 + +#define mmTPC4_QM_CP_BARRIER_CFG 0xF08184 + +#define mmTPC4_QM_CP_DBG_0 0xF08188 + +#define mmTPC4_QM_PQ_BUF_ADDR 0xF08300 + +#define mmTPC4_QM_PQ_BUF_RDATA 0xF08304 + +#define mmTPC4_QM_CQ_BUF_ADDR 0xF08308 + +#define mmTPC4_QM_CQ_BUF_RDATA 0xF0830C + +#endif /* ASIC_REG_TPC4_QM_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h new file mode 100644 index 000000000000..8b54041d144a --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h @@ -0,0 +1,323 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC4_RTR_REGS_H_ +#define ASIC_REG_TPC4_RTR_REGS_H_ + +/* + ***************************************** + * TPC4_RTR (Prototype: TPC_RTR) + ***************************************** + */ + +#define mmTPC4_RTR_HBW_RD_RQ_E_ARB 0xF00100 + +#define mmTPC4_RTR_HBW_RD_RQ_W_ARB 0xF00104 + +#define mmTPC4_RTR_HBW_RD_RQ_N_ARB 0xF00108 + +#define mmTPC4_RTR_HBW_RD_RQ_S_ARB 0xF0010C + +#define mmTPC4_RTR_HBW_RD_RQ_L_ARB 0xF00110 + +#define mmTPC4_RTR_HBW_E_ARB_MAX 0xF00120 + +#define mmTPC4_RTR_HBW_W_ARB_MAX 0xF00124 + +#define mmTPC4_RTR_HBW_N_ARB_MAX 0xF00128 + +#define mmTPC4_RTR_HBW_S_ARB_MAX 0xF0012C + +#define mmTPC4_RTR_HBW_L_ARB_MAX 0xF00130 + +#define mmTPC4_RTR_HBW_RD_RS_E_ARB 0xF00140 + +#define mmTPC4_RTR_HBW_RD_RS_W_ARB 0xF00144 + +#define mmTPC4_RTR_HBW_RD_RS_N_ARB 0xF00148 + +#define mmTPC4_RTR_HBW_RD_RS_S_ARB 0xF0014C + +#define mmTPC4_RTR_HBW_RD_RS_L_ARB 0xF00150 + +#define mmTPC4_RTR_HBW_WR_RQ_E_ARB 0xF00170 + +#define mmTPC4_RTR_HBW_WR_RQ_W_ARB 0xF00174 + +#define mmTPC4_RTR_HBW_WR_RQ_N_ARB 0xF00178 + +#define mmTPC4_RTR_HBW_WR_RQ_S_ARB 0xF0017C + +#define mmTPC4_RTR_HBW_WR_RQ_L_ARB 0xF00180 + +#define mmTPC4_RTR_HBW_WR_RS_E_ARB 0xF00190 + +#define mmTPC4_RTR_HBW_WR_RS_W_ARB 0xF00194 + +#define mmTPC4_RTR_HBW_WR_RS_N_ARB 0xF00198 + +#define mmTPC4_RTR_HBW_WR_RS_S_ARB 0xF0019C + +#define mmTPC4_RTR_HBW_WR_RS_L_ARB 0xF001A0 + +#define mmTPC4_RTR_LBW_RD_RQ_E_ARB 0xF00200 + +#define mmTPC4_RTR_LBW_RD_RQ_W_ARB 0xF00204 + +#define mmTPC4_RTR_LBW_RD_RQ_N_ARB 0xF00208 + +#define mmTPC4_RTR_LBW_RD_RQ_S_ARB 0xF0020C + +#define mmTPC4_RTR_LBW_RD_RQ_L_ARB 0xF00210 + +#define mmTPC4_RTR_LBW_E_ARB_MAX 0xF00220 + +#define mmTPC4_RTR_LBW_W_ARB_MAX 0xF00224 + +#define mmTPC4_RTR_LBW_N_ARB_MAX 0xF00228 + +#define mmTPC4_RTR_LBW_S_ARB_MAX 0xF0022C + +#define mmTPC4_RTR_LBW_L_ARB_MAX 0xF00230 + +#define mmTPC4_RTR_LBW_RD_RS_E_ARB 0xF00250 + +#define mmTPC4_RTR_LBW_RD_RS_W_ARB 0xF00254 + +#define mmTPC4_RTR_LBW_RD_RS_N_ARB 0xF00258 + +#define mmTPC4_RTR_LBW_RD_RS_S_ARB 0xF0025C + +#define mmTPC4_RTR_LBW_RD_RS_L_ARB 0xF00260 + +#define mmTPC4_RTR_LBW_WR_RQ_E_ARB 0xF00270 + +#define mmTPC4_RTR_LBW_WR_RQ_W_ARB 0xF00274 + +#define mmTPC4_RTR_LBW_WR_RQ_N_ARB 0xF00278 + +#define mmTPC4_RTR_LBW_WR_RQ_S_ARB 0xF0027C + +#define mmTPC4_RTR_LBW_WR_RQ_L_ARB 0xF00280 + +#define mmTPC4_RTR_LBW_WR_RS_E_ARB 0xF00290 + +#define mmTPC4_RTR_LBW_WR_RS_W_ARB 0xF00294 + +#define mmTPC4_RTR_LBW_WR_RS_N_ARB 0xF00298 + +#define mmTPC4_RTR_LBW_WR_RS_S_ARB 0xF0029C + +#define mmTPC4_RTR_LBW_WR_RS_L_ARB 0xF002A0 + +#define mmTPC4_RTR_DBG_E_ARB 0xF00300 + +#define mmTPC4_RTR_DBG_W_ARB 0xF00304 + +#define mmTPC4_RTR_DBG_N_ARB 0xF00308 + +#define mmTPC4_RTR_DBG_S_ARB 0xF0030C + +#define mmTPC4_RTR_DBG_L_ARB 0xF00310 + +#define mmTPC4_RTR_DBG_E_ARB_MAX 0xF00320 + +#define mmTPC4_RTR_DBG_W_ARB_MAX 0xF00324 + +#define mmTPC4_RTR_DBG_N_ARB_MAX 0xF00328 + +#define mmTPC4_RTR_DBG_S_ARB_MAX 0xF0032C + +#define mmTPC4_RTR_DBG_L_ARB_MAX 0xF00330 + +#define mmTPC4_RTR_SPLIT_COEF_0 0xF00400 + +#define mmTPC4_RTR_SPLIT_COEF_1 0xF00404 + +#define mmTPC4_RTR_SPLIT_COEF_2 0xF00408 + +#define mmTPC4_RTR_SPLIT_COEF_3 0xF0040C + +#define mmTPC4_RTR_SPLIT_COEF_4 0xF00410 + +#define mmTPC4_RTR_SPLIT_COEF_5 0xF00414 + +#define mmTPC4_RTR_SPLIT_COEF_6 0xF00418 + +#define mmTPC4_RTR_SPLIT_COEF_7 0xF0041C + +#define mmTPC4_RTR_SPLIT_COEF_8 0xF00420 + +#define mmTPC4_RTR_SPLIT_COEF_9 0xF00424 + +#define mmTPC4_RTR_SPLIT_CFG 0xF00440 + +#define mmTPC4_RTR_SPLIT_RD_SAT 0xF00444 + +#define mmTPC4_RTR_SPLIT_RD_RST_TOKEN 0xF00448 + +#define mmTPC4_RTR_SPLIT_RD_TIMEOUT_0 0xF0044C + +#define mmTPC4_RTR_SPLIT_RD_TIMEOUT_1 0xF00450 + +#define mmTPC4_RTR_SPLIT_WR_SAT 0xF00454 + +#define mmTPC4_RTR_WPLIT_WR_TST_TOLEN 0xF00458 + +#define mmTPC4_RTR_SPLIT_WR_TIMEOUT_0 0xF0045C + +#define mmTPC4_RTR_SPLIT_WR_TIMEOUT_1 0xF00460 + +#define mmTPC4_RTR_HBW_RANGE_HIT 0xF00470 + +#define mmTPC4_RTR_HBW_RANGE_MASK_L_0 0xF00480 + +#define mmTPC4_RTR_HBW_RANGE_MASK_L_1 0xF00484 + +#define mmTPC4_RTR_HBW_RANGE_MASK_L_2 0xF00488 + +#define mmTPC4_RTR_HBW_RANGE_MASK_L_3 0xF0048C + +#define mmTPC4_RTR_HBW_RANGE_MASK_L_4 0xF00490 + +#define mmTPC4_RTR_HBW_RANGE_MASK_L_5 0xF00494 + +#define mmTPC4_RTR_HBW_RANGE_MASK_L_6 0xF00498 + +#define mmTPC4_RTR_HBW_RANGE_MASK_L_7 0xF0049C + +#define mmTPC4_RTR_HBW_RANGE_MASK_H_0 0xF004A0 + +#define mmTPC4_RTR_HBW_RANGE_MASK_H_1 0xF004A4 + +#define mmTPC4_RTR_HBW_RANGE_MASK_H_2 0xF004A8 + +#define mmTPC4_RTR_HBW_RANGE_MASK_H_3 0xF004AC + +#define mmTPC4_RTR_HBW_RANGE_MASK_H_4 0xF004B0 + +#define mmTPC4_RTR_HBW_RANGE_MASK_H_5 0xF004B4 + +#define mmTPC4_RTR_HBW_RANGE_MASK_H_6 0xF004B8 + +#define mmTPC4_RTR_HBW_RANGE_MASK_H_7 0xF004BC + +#define mmTPC4_RTR_HBW_RANGE_BASE_L_0 0xF004C0 + +#define mmTPC4_RTR_HBW_RANGE_BASE_L_1 0xF004C4 + +#define mmTPC4_RTR_HBW_RANGE_BASE_L_2 0xF004C8 + +#define mmTPC4_RTR_HBW_RANGE_BASE_L_3 0xF004CC + +#define mmTPC4_RTR_HBW_RANGE_BASE_L_4 0xF004D0 + +#define mmTPC4_RTR_HBW_RANGE_BASE_L_5 0xF004D4 + +#define mmTPC4_RTR_HBW_RANGE_BASE_L_6 0xF004D8 + +#define mmTPC4_RTR_HBW_RANGE_BASE_L_7 0xF004DC + +#define mmTPC4_RTR_HBW_RANGE_BASE_H_0 0xF004E0 + +#define mmTPC4_RTR_HBW_RANGE_BASE_H_1 0xF004E4 + +#define mmTPC4_RTR_HBW_RANGE_BASE_H_2 0xF004E8 + +#define mmTPC4_RTR_HBW_RANGE_BASE_H_3 0xF004EC + +#define mmTPC4_RTR_HBW_RANGE_BASE_H_4 0xF004F0 + +#define mmTPC4_RTR_HBW_RANGE_BASE_H_5 0xF004F4 + +#define mmTPC4_RTR_HBW_RANGE_BASE_H_6 0xF004F8 + +#define mmTPC4_RTR_HBW_RANGE_BASE_H_7 0xF004FC + +#define mmTPC4_RTR_LBW_RANGE_HIT 0xF00500 + +#define mmTPC4_RTR_LBW_RANGE_MASK_0 0xF00510 + +#define mmTPC4_RTR_LBW_RANGE_MASK_1 0xF00514 + +#define mmTPC4_RTR_LBW_RANGE_MASK_2 0xF00518 + +#define mmTPC4_RTR_LBW_RANGE_MASK_3 0xF0051C + +#define mmTPC4_RTR_LBW_RANGE_MASK_4 0xF00520 + +#define mmTPC4_RTR_LBW_RANGE_MASK_5 0xF00524 + +#define mmTPC4_RTR_LBW_RANGE_MASK_6 0xF00528 + +#define mmTPC4_RTR_LBW_RANGE_MASK_7 0xF0052C + +#define mmTPC4_RTR_LBW_RANGE_MASK_8 0xF00530 + +#define mmTPC4_RTR_LBW_RANGE_MASK_9 0xF00534 + +#define mmTPC4_RTR_LBW_RANGE_MASK_10 0xF00538 + +#define mmTPC4_RTR_LBW_RANGE_MASK_11 0xF0053C + +#define mmTPC4_RTR_LBW_RANGE_MASK_12 0xF00540 + +#define mmTPC4_RTR_LBW_RANGE_MASK_13 0xF00544 + +#define mmTPC4_RTR_LBW_RANGE_MASK_14 0xF00548 + +#define mmTPC4_RTR_LBW_RANGE_MASK_15 0xF0054C + +#define mmTPC4_RTR_LBW_RANGE_BASE_0 0xF00550 + +#define mmTPC4_RTR_LBW_RANGE_BASE_1 0xF00554 + +#define mmTPC4_RTR_LBW_RANGE_BASE_2 0xF00558 + +#define mmTPC4_RTR_LBW_RANGE_BASE_3 0xF0055C + +#define mmTPC4_RTR_LBW_RANGE_BASE_4 0xF00560 + +#define mmTPC4_RTR_LBW_RANGE_BASE_5 0xF00564 + +#define mmTPC4_RTR_LBW_RANGE_BASE_6 0xF00568 + +#define mmTPC4_RTR_LBW_RANGE_BASE_7 0xF0056C + +#define mmTPC4_RTR_LBW_RANGE_BASE_8 0xF00570 + +#define mmTPC4_RTR_LBW_RANGE_BASE_9 0xF00574 + +#define mmTPC4_RTR_LBW_RANGE_BASE_10 0xF00578 + +#define mmTPC4_RTR_LBW_RANGE_BASE_11 0xF0057C + +#define mmTPC4_RTR_LBW_RANGE_BASE_12 0xF00580 + +#define mmTPC4_RTR_LBW_RANGE_BASE_13 0xF00584 + +#define mmTPC4_RTR_LBW_RANGE_BASE_14 0xF00588 + +#define mmTPC4_RTR_LBW_RANGE_BASE_15 0xF0058C + +#define mmTPC4_RTR_RGLTR 0xF00590 + +#define mmTPC4_RTR_RGLTR_WR_RESULT 0xF00594 + +#define mmTPC4_RTR_RGLTR_RD_RESULT 0xF00598 + +#define mmTPC4_RTR_SCRAMB_EN 0xF00600 + +#define mmTPC4_RTR_NON_LIN_SCRAMB 0xF00604 + +#endif /* ASIC_REG_TPC4_RTR_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h new file mode 100644 index 000000000000..3f00954fcdba --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h @@ -0,0 +1,887 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC5_CFG_REGS_H_ +#define ASIC_REG_TPC5_CFG_REGS_H_ + +/* + ***************************************** + * TPC5_CFG (Prototype: TPC) + ***************************************** + */ + +#define mmTPC5_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xF46400 + +#define mmTPC5_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xF46404 + +#define mmTPC5_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xF46408 + +#define mmTPC5_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xF4640C + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xF46410 + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xF46414 + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xF46418 + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xF4641C + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xF46420 + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xF46424 + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xF46428 + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xF4642C + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xF46430 + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xF46434 + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xF46438 + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xF4643C + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xF46440 + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xF46444 + +#define mmTPC5_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xF46448 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xF4644C + +#define mmTPC5_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xF46450 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xF46454 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xF46458 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xF4645C + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xF46460 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xF46464 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xF46468 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xF4646C + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xF46470 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xF46474 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xF46478 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xF4647C + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xF46480 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xF46484 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xF46488 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xF4648C + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xF46490 + +#define mmTPC5_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xF46494 + +#define mmTPC5_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xF46498 + +#define mmTPC5_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xF4649C + +#define mmTPC5_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xF464A0 + +#define mmTPC5_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xF464A4 + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xF464A8 + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xF464AC + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xF464B0 + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xF464B4 + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xF464B8 + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xF464BC + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xF464C0 + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xF464C4 + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xF464C8 + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xF464CC + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xF464D0 + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xF464D4 + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xF464D8 + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xF464DC + +#define mmTPC5_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xF464E0 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xF464E4 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xF464E8 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xF464EC + +#define mmTPC5_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xF464F0 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xF464F4 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xF464F8 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xF464FC + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xF46500 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xF46504 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xF46508 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xF4650C + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xF46510 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xF46514 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xF46518 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xF4651C + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xF46520 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xF46524 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xF46528 + +#define mmTPC5_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xF4652C + +#define mmTPC5_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xF46530 + +#define mmTPC5_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xF46534 + +#define mmTPC5_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xF46538 + +#define mmTPC5_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xF4653C + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xF46540 + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xF46544 + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xF46548 + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xF4654C + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xF46550 + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xF46554 + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xF46558 + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xF4655C + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xF46560 + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xF46564 + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xF46568 + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xF4656C + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xF46570 + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xF46574 + +#define mmTPC5_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xF46578 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xF4657C + +#define mmTPC5_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xF46580 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xF46584 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xF46588 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xF4658C + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xF46590 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xF46594 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xF46598 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xF4659C + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xF465A0 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xF465A4 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xF465A8 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xF465AC + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xF465B0 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xF465B4 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xF465B8 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xF465BC + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xF465C0 + +#define mmTPC5_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xF465C4 + +#define mmTPC5_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xF465C8 + +#define mmTPC5_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xF465CC + +#define mmTPC5_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xF465D0 + +#define mmTPC5_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xF465D4 + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xF465D8 + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xF465DC + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xF465E0 + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xF465E4 + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xF465E8 + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xF465EC + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xF465F0 + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xF465F4 + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xF465F8 + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xF465FC + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xF46600 + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xF46604 + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xF46608 + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xF4660C + +#define mmTPC5_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xF46610 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xF46614 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xF46618 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xF4661C + +#define mmTPC5_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xF46620 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xF46624 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xF46628 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xF4662C + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xF46630 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xF46634 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xF46638 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xF4663C + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xF46640 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xF46644 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xF46648 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xF4664C + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xF46650 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xF46654 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xF46658 + +#define mmTPC5_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xF4665C + +#define mmTPC5_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xF46660 + +#define mmTPC5_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xF46664 + +#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_0 0xF46668 + +#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_0 0xF4666C + +#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_1 0xF46670 + +#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_1 0xF46674 + +#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_2 0xF46678 + +#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_2 0xF4667C + +#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_3 0xF46680 + +#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_3 0xF46684 + +#define mmTPC5_CFG_KERNEL_TID_BASE_DIM_4 0xF46688 + +#define mmTPC5_CFG_KERNEL_TID_SIZE_DIM_4 0xF4668C + +#define mmTPC5_CFG_KERNEL_SRF_0 0xF46690 + +#define mmTPC5_CFG_KERNEL_SRF_1 0xF46694 + +#define mmTPC5_CFG_KERNEL_SRF_2 0xF46698 + +#define mmTPC5_CFG_KERNEL_SRF_3 0xF4669C + +#define mmTPC5_CFG_KERNEL_SRF_4 0xF466A0 + +#define mmTPC5_CFG_KERNEL_SRF_5 0xF466A4 + +#define mmTPC5_CFG_KERNEL_SRF_6 0xF466A8 + +#define mmTPC5_CFG_KERNEL_SRF_7 0xF466AC + +#define mmTPC5_CFG_KERNEL_SRF_8 0xF466B0 + +#define mmTPC5_CFG_KERNEL_SRF_9 0xF466B4 + +#define mmTPC5_CFG_KERNEL_SRF_10 0xF466B8 + +#define mmTPC5_CFG_KERNEL_SRF_11 0xF466BC + +#define mmTPC5_CFG_KERNEL_SRF_12 0xF466C0 + +#define mmTPC5_CFG_KERNEL_SRF_13 0xF466C4 + +#define mmTPC5_CFG_KERNEL_SRF_14 0xF466C8 + +#define mmTPC5_CFG_KERNEL_SRF_15 0xF466CC + +#define mmTPC5_CFG_KERNEL_SRF_16 0xF466D0 + +#define mmTPC5_CFG_KERNEL_SRF_17 0xF466D4 + +#define mmTPC5_CFG_KERNEL_SRF_18 0xF466D8 + +#define mmTPC5_CFG_KERNEL_SRF_19 0xF466DC + +#define mmTPC5_CFG_KERNEL_SRF_20 0xF466E0 + +#define mmTPC5_CFG_KERNEL_SRF_21 0xF466E4 + +#define mmTPC5_CFG_KERNEL_SRF_22 0xF466E8 + +#define mmTPC5_CFG_KERNEL_SRF_23 0xF466EC + +#define mmTPC5_CFG_KERNEL_SRF_24 0xF466F0 + +#define mmTPC5_CFG_KERNEL_SRF_25 0xF466F4 + +#define mmTPC5_CFG_KERNEL_SRF_26 0xF466F8 + +#define mmTPC5_CFG_KERNEL_SRF_27 0xF466FC + +#define mmTPC5_CFG_KERNEL_SRF_28 0xF46700 + +#define mmTPC5_CFG_KERNEL_SRF_29 0xF46704 + +#define mmTPC5_CFG_KERNEL_SRF_30 0xF46708 + +#define mmTPC5_CFG_KERNEL_SRF_31 0xF4670C + +#define mmTPC5_CFG_KERNEL_KERNEL_CONFIG 0xF46710 + +#define mmTPC5_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xF46714 + +#define mmTPC5_CFG_RESERVED_DESC_END 0xF46738 + +#define mmTPC5_CFG_ROUND_CSR 0xF467FC + +#define mmTPC5_CFG_TBUF_BASE_ADDR_LOW 0xF46800 + +#define mmTPC5_CFG_TBUF_BASE_ADDR_HIGH 0xF46804 + +#define mmTPC5_CFG_SEMAPHORE 0xF46808 + +#define mmTPC5_CFG_VFLAGS 0xF4680C + +#define mmTPC5_CFG_SFLAGS 0xF46810 + +#define mmTPC5_CFG_LFSR_POLYNOM 0xF46818 + +#define mmTPC5_CFG_STATUS 0xF4681C + +#define mmTPC5_CFG_CFG_BASE_ADDRESS_HIGH 0xF46820 + +#define mmTPC5_CFG_CFG_SUBTRACT_VALUE 0xF46824 + +#define mmTPC5_CFG_SM_BASE_ADDRESS_LOW 0xF46828 + +#define mmTPC5_CFG_SM_BASE_ADDRESS_HIGH 0xF4682C + +#define mmTPC5_CFG_TPC_CMD 0xF46830 + +#define mmTPC5_CFG_TPC_EXECUTE 0xF46838 + +#define mmTPC5_CFG_TPC_STALL 0xF4683C + +#define mmTPC5_CFG_ICACHE_BASE_ADDERESS_LOW 0xF46840 + +#define mmTPC5_CFG_ICACHE_BASE_ADDERESS_HIGH 0xF46844 + +#define mmTPC5_CFG_MSS_CONFIG 0xF46854 + +#define mmTPC5_CFG_TPC_INTR_CAUSE 0xF46858 + +#define mmTPC5_CFG_TPC_INTR_MASK 0xF4685C + +#define mmTPC5_CFG_TSB_CONFIG 0xF46860 + +#define mmTPC5_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xF46A00 + +#define mmTPC5_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xF46A04 + +#define mmTPC5_CFG_QM_TENSOR_0_PADDING_VALUE 0xF46A08 + +#define mmTPC5_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xF46A0C + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_0_SIZE 0xF46A10 + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xF46A14 + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xF46A18 + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_1_SIZE 0xF46A1C + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xF46A20 + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xF46A24 + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_2_SIZE 0xF46A28 + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xF46A2C + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xF46A30 + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_3_SIZE 0xF46A34 + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xF46A38 + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xF46A3C + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_4_SIZE 0xF46A40 + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xF46A44 + +#define mmTPC5_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xF46A48 + +#define mmTPC5_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xF46A4C + +#define mmTPC5_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xF46A50 + +#define mmTPC5_CFG_QM_TENSOR_1_PADDING_VALUE 0xF46A54 + +#define mmTPC5_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xF46A58 + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_0_SIZE 0xF46A5C + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xF46A60 + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xF46A64 + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_1_SIZE 0xF46A68 + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xF46A6C + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xF46A70 + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_2_SIZE 0xF46A74 + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xF46A78 + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xF46A7C + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_3_SIZE 0xF46A80 + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xF46A84 + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xF46A88 + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_4_SIZE 0xF46A8C + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xF46A90 + +#define mmTPC5_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xF46A94 + +#define mmTPC5_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xF46A98 + +#define mmTPC5_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xF46A9C + +#define mmTPC5_CFG_QM_TENSOR_2_PADDING_VALUE 0xF46AA0 + +#define mmTPC5_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xF46AA4 + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_0_SIZE 0xF46AA8 + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xF46AAC + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xF46AB0 + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_1_SIZE 0xF46AB4 + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xF46AB8 + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xF46ABC + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_2_SIZE 0xF46AC0 + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xF46AC4 + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xF46AC8 + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_3_SIZE 0xF46ACC + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xF46AD0 + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xF46AD4 + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_4_SIZE 0xF46AD8 + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xF46ADC + +#define mmTPC5_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xF46AE0 + +#define mmTPC5_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xF46AE4 + +#define mmTPC5_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xF46AE8 + +#define mmTPC5_CFG_QM_TENSOR_3_PADDING_VALUE 0xF46AEC + +#define mmTPC5_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xF46AF0 + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_0_SIZE 0xF46AF4 + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xF46AF8 + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xF46AFC + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_1_SIZE 0xF46B00 + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xF46B04 + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xF46B08 + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_2_SIZE 0xF46B0C + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xF46B10 + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xF46B14 + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_3_SIZE 0xF46B18 + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xF46B1C + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xF46B20 + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_4_SIZE 0xF46B24 + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xF46B28 + +#define mmTPC5_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xF46B2C + +#define mmTPC5_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xF46B30 + +#define mmTPC5_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xF46B34 + +#define mmTPC5_CFG_QM_TENSOR_4_PADDING_VALUE 0xF46B38 + +#define mmTPC5_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xF46B3C + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_0_SIZE 0xF46B40 + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xF46B44 + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xF46B48 + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_1_SIZE 0xF46B4C + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xF46B50 + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xF46B54 + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_2_SIZE 0xF46B58 + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xF46B5C + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xF46B60 + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_3_SIZE 0xF46B64 + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xF46B68 + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xF46B6C + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_4_SIZE 0xF46B70 + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xF46B74 + +#define mmTPC5_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xF46B78 + +#define mmTPC5_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xF46B7C + +#define mmTPC5_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xF46B80 + +#define mmTPC5_CFG_QM_TENSOR_5_PADDING_VALUE 0xF46B84 + +#define mmTPC5_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xF46B88 + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_0_SIZE 0xF46B8C + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xF46B90 + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xF46B94 + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_1_SIZE 0xF46B98 + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xF46B9C + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xF46BA0 + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_2_SIZE 0xF46BA4 + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xF46BA8 + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xF46BAC + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_3_SIZE 0xF46BB0 + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xF46BB4 + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xF46BB8 + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_4_SIZE 0xF46BBC + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xF46BC0 + +#define mmTPC5_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xF46BC4 + +#define mmTPC5_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xF46BC8 + +#define mmTPC5_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xF46BCC + +#define mmTPC5_CFG_QM_TENSOR_6_PADDING_VALUE 0xF46BD0 + +#define mmTPC5_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xF46BD4 + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_0_SIZE 0xF46BD8 + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xF46BDC + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xF46BE0 + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_1_SIZE 0xF46BE4 + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xF46BE8 + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xF46BEC + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_2_SIZE 0xF46BF0 + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xF46BF4 + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xF46BF8 + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_3_SIZE 0xF46BFC + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xF46C00 + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xF46C04 + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_4_SIZE 0xF46C08 + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xF46C0C + +#define mmTPC5_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xF46C10 + +#define mmTPC5_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xF46C14 + +#define mmTPC5_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xF46C18 + +#define mmTPC5_CFG_QM_TENSOR_7_PADDING_VALUE 0xF46C1C + +#define mmTPC5_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xF46C20 + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_0_SIZE 0xF46C24 + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xF46C28 + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xF46C2C + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_1_SIZE 0xF46C30 + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xF46C34 + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xF46C38 + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_2_SIZE 0xF46C3C + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xF46C40 + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xF46C44 + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_3_SIZE 0xF46C48 + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xF46C4C + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xF46C50 + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_4_SIZE 0xF46C54 + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xF46C58 + +#define mmTPC5_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xF46C5C + +#define mmTPC5_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xF46C60 + +#define mmTPC5_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xF46C64 + +#define mmTPC5_CFG_QM_TID_BASE_DIM_0 0xF46C68 + +#define mmTPC5_CFG_QM_TID_SIZE_DIM_0 0xF46C6C + +#define mmTPC5_CFG_QM_TID_BASE_DIM_1 0xF46C70 + +#define mmTPC5_CFG_QM_TID_SIZE_DIM_1 0xF46C74 + +#define mmTPC5_CFG_QM_TID_BASE_DIM_2 0xF46C78 + +#define mmTPC5_CFG_QM_TID_SIZE_DIM_2 0xF46C7C + +#define mmTPC5_CFG_QM_TID_BASE_DIM_3 0xF46C80 + +#define mmTPC5_CFG_QM_TID_SIZE_DIM_3 0xF46C84 + +#define mmTPC5_CFG_QM_TID_BASE_DIM_4 0xF46C88 + +#define mmTPC5_CFG_QM_TID_SIZE_DIM_4 0xF46C8C + +#define mmTPC5_CFG_QM_SRF_0 0xF46C90 + +#define mmTPC5_CFG_QM_SRF_1 0xF46C94 + +#define mmTPC5_CFG_QM_SRF_2 0xF46C98 + +#define mmTPC5_CFG_QM_SRF_3 0xF46C9C + +#define mmTPC5_CFG_QM_SRF_4 0xF46CA0 + +#define mmTPC5_CFG_QM_SRF_5 0xF46CA4 + +#define mmTPC5_CFG_QM_SRF_6 0xF46CA8 + +#define mmTPC5_CFG_QM_SRF_7 0xF46CAC + +#define mmTPC5_CFG_QM_SRF_8 0xF46CB0 + +#define mmTPC5_CFG_QM_SRF_9 0xF46CB4 + +#define mmTPC5_CFG_QM_SRF_10 0xF46CB8 + +#define mmTPC5_CFG_QM_SRF_11 0xF46CBC + +#define mmTPC5_CFG_QM_SRF_12 0xF46CC0 + +#define mmTPC5_CFG_QM_SRF_13 0xF46CC4 + +#define mmTPC5_CFG_QM_SRF_14 0xF46CC8 + +#define mmTPC5_CFG_QM_SRF_15 0xF46CCC + +#define mmTPC5_CFG_QM_SRF_16 0xF46CD0 + +#define mmTPC5_CFG_QM_SRF_17 0xF46CD4 + +#define mmTPC5_CFG_QM_SRF_18 0xF46CD8 + +#define mmTPC5_CFG_QM_SRF_19 0xF46CDC + +#define mmTPC5_CFG_QM_SRF_20 0xF46CE0 + +#define mmTPC5_CFG_QM_SRF_21 0xF46CE4 + +#define mmTPC5_CFG_QM_SRF_22 0xF46CE8 + +#define mmTPC5_CFG_QM_SRF_23 0xF46CEC + +#define mmTPC5_CFG_QM_SRF_24 0xF46CF0 + +#define mmTPC5_CFG_QM_SRF_25 0xF46CF4 + +#define mmTPC5_CFG_QM_SRF_26 0xF46CF8 + +#define mmTPC5_CFG_QM_SRF_27 0xF46CFC + +#define mmTPC5_CFG_QM_SRF_28 0xF46D00 + +#define mmTPC5_CFG_QM_SRF_29 0xF46D04 + +#define mmTPC5_CFG_QM_SRF_30 0xF46D08 + +#define mmTPC5_CFG_QM_SRF_31 0xF46D0C + +#define mmTPC5_CFG_QM_KERNEL_CONFIG 0xF46D10 + +#define mmTPC5_CFG_QM_SYNC_OBJECT_MESSAGE 0xF46D14 + +#define mmTPC5_CFG_ARUSER 0xF46D18 + +#define mmTPC5_CFG_AWUSER 0xF46D1C + +#define mmTPC5_CFG_FUNC_MBIST_CNTRL 0xF46E00 + +#define mmTPC5_CFG_FUNC_MBIST_PAT 0xF46E04 + +#define mmTPC5_CFG_FUNC_MBIST_MEM_0 0xF46E08 + +#define mmTPC5_CFG_FUNC_MBIST_MEM_1 0xF46E0C + +#define mmTPC5_CFG_FUNC_MBIST_MEM_2 0xF46E10 + +#define mmTPC5_CFG_FUNC_MBIST_MEM_3 0xF46E14 + +#define mmTPC5_CFG_FUNC_MBIST_MEM_4 0xF46E18 + +#define mmTPC5_CFG_FUNC_MBIST_MEM_5 0xF46E1C + +#define mmTPC5_CFG_FUNC_MBIST_MEM_6 0xF46E20 + +#define mmTPC5_CFG_FUNC_MBIST_MEM_7 0xF46E24 + +#define mmTPC5_CFG_FUNC_MBIST_MEM_8 0xF46E28 + +#define mmTPC5_CFG_FUNC_MBIST_MEM_9 0xF46E2C + +#endif /* ASIC_REG_TPC5_CFG_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h new file mode 100644 index 000000000000..d8e72a8e18d7 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC5_CMDQ_REGS_H_ +#define ASIC_REG_TPC5_CMDQ_REGS_H_ + +/* + ***************************************** + * TPC5_CMDQ (Prototype: CMDQ) + ***************************************** + */ + +#define mmTPC5_CMDQ_GLBL_CFG0 0xF49000 + +#define mmTPC5_CMDQ_GLBL_CFG1 0xF49004 + +#define mmTPC5_CMDQ_GLBL_PROT 0xF49008 + +#define mmTPC5_CMDQ_GLBL_ERR_CFG 0xF4900C + +#define mmTPC5_CMDQ_GLBL_ERR_ADDR_LO 0xF49010 + +#define mmTPC5_CMDQ_GLBL_ERR_ADDR_HI 0xF49014 + +#define mmTPC5_CMDQ_GLBL_ERR_WDATA 0xF49018 + +#define mmTPC5_CMDQ_GLBL_SECURE_PROPS 0xF4901C + +#define mmTPC5_CMDQ_GLBL_NON_SECURE_PROPS 0xF49020 + +#define mmTPC5_CMDQ_GLBL_STS0 0xF49024 + +#define mmTPC5_CMDQ_GLBL_STS1 0xF49028 + +#define mmTPC5_CMDQ_CQ_CFG0 0xF490B0 + +#define mmTPC5_CMDQ_CQ_CFG1 0xF490B4 + +#define mmTPC5_CMDQ_CQ_ARUSER 0xF490B8 + +#define mmTPC5_CMDQ_CQ_PTR_LO 0xF490C0 + +#define mmTPC5_CMDQ_CQ_PTR_HI 0xF490C4 + +#define mmTPC5_CMDQ_CQ_TSIZE 0xF490C8 + +#define mmTPC5_CMDQ_CQ_CTL 0xF490CC + +#define mmTPC5_CMDQ_CQ_PTR_LO_STS 0xF490D4 + +#define mmTPC5_CMDQ_CQ_PTR_HI_STS 0xF490D8 + +#define mmTPC5_CMDQ_CQ_TSIZE_STS 0xF490DC + +#define mmTPC5_CMDQ_CQ_CTL_STS 0xF490E0 + +#define mmTPC5_CMDQ_CQ_STS0 0xF490E4 + +#define mmTPC5_CMDQ_CQ_STS1 0xF490E8 + +#define mmTPC5_CMDQ_CQ_RD_RATE_LIM_EN 0xF490F0 + +#define mmTPC5_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xF490F4 + +#define mmTPC5_CMDQ_CQ_RD_RATE_LIM_SAT 0xF490F8 + +#define mmTPC5_CMDQ_CQ_RD_RATE_LIM_TOUT 0xF490FC + +#define mmTPC5_CMDQ_CQ_IFIFO_CNT 0xF49108 + +#define mmTPC5_CMDQ_CP_MSG_BASE0_ADDR_LO 0xF49120 + +#define mmTPC5_CMDQ_CP_MSG_BASE0_ADDR_HI 0xF49124 + +#define mmTPC5_CMDQ_CP_MSG_BASE1_ADDR_LO 0xF49128 + +#define mmTPC5_CMDQ_CP_MSG_BASE1_ADDR_HI 0xF4912C + +#define mmTPC5_CMDQ_CP_MSG_BASE2_ADDR_LO 0xF49130 + +#define mmTPC5_CMDQ_CP_MSG_BASE2_ADDR_HI 0xF49134 + +#define mmTPC5_CMDQ_CP_MSG_BASE3_ADDR_LO 0xF49138 + +#define mmTPC5_CMDQ_CP_MSG_BASE3_ADDR_HI 0xF4913C + +#define mmTPC5_CMDQ_CP_LDMA_TSIZE_OFFSET 0xF49140 + +#define mmTPC5_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xF49144 + +#define mmTPC5_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xF49148 + +#define mmTPC5_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xF4914C + +#define mmTPC5_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xF49150 + +#define mmTPC5_CMDQ_CP_LDMA_COMMIT_OFFSET 0xF49154 + +#define mmTPC5_CMDQ_CP_FENCE0_RDATA 0xF49158 + +#define mmTPC5_CMDQ_CP_FENCE1_RDATA 0xF4915C + +#define mmTPC5_CMDQ_CP_FENCE2_RDATA 0xF49160 + +#define mmTPC5_CMDQ_CP_FENCE3_RDATA 0xF49164 + +#define mmTPC5_CMDQ_CP_FENCE0_CNT 0xF49168 + +#define mmTPC5_CMDQ_CP_FENCE1_CNT 0xF4916C + +#define mmTPC5_CMDQ_CP_FENCE2_CNT 0xF49170 + +#define mmTPC5_CMDQ_CP_FENCE3_CNT 0xF49174 + +#define mmTPC5_CMDQ_CP_STS 0xF49178 + +#define mmTPC5_CMDQ_CP_CURRENT_INST_LO 0xF4917C + +#define mmTPC5_CMDQ_CP_CURRENT_INST_HI 0xF49180 + +#define mmTPC5_CMDQ_CP_BARRIER_CFG 0xF49184 + +#define mmTPC5_CMDQ_CP_DBG_0 0xF49188 + +#define mmTPC5_CMDQ_CQ_BUF_ADDR 0xF49308 + +#define mmTPC5_CMDQ_CQ_BUF_RDATA 0xF4930C + +#endif /* ASIC_REG_TPC5_CMDQ_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h new file mode 100644 index 000000000000..be2e68624709 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC5_QM_REGS_H_ +#define ASIC_REG_TPC5_QM_REGS_H_ + +/* + ***************************************** + * TPC5_QM (Prototype: QMAN) + ***************************************** + */ + +#define mmTPC5_QM_GLBL_CFG0 0xF48000 + +#define mmTPC5_QM_GLBL_CFG1 0xF48004 + +#define mmTPC5_QM_GLBL_PROT 0xF48008 + +#define mmTPC5_QM_GLBL_ERR_CFG 0xF4800C + +#define mmTPC5_QM_GLBL_ERR_ADDR_LO 0xF48010 + +#define mmTPC5_QM_GLBL_ERR_ADDR_HI 0xF48014 + +#define mmTPC5_QM_GLBL_ERR_WDATA 0xF48018 + +#define mmTPC5_QM_GLBL_SECURE_PROPS 0xF4801C + +#define mmTPC5_QM_GLBL_NON_SECURE_PROPS 0xF48020 + +#define mmTPC5_QM_GLBL_STS0 0xF48024 + +#define mmTPC5_QM_GLBL_STS1 0xF48028 + +#define mmTPC5_QM_PQ_BASE_LO 0xF48060 + +#define mmTPC5_QM_PQ_BASE_HI 0xF48064 + +#define mmTPC5_QM_PQ_SIZE 0xF48068 + +#define mmTPC5_QM_PQ_PI 0xF4806C + +#define mmTPC5_QM_PQ_CI 0xF48070 + +#define mmTPC5_QM_PQ_CFG0 0xF48074 + +#define mmTPC5_QM_PQ_CFG1 0xF48078 + +#define mmTPC5_QM_PQ_ARUSER 0xF4807C + +#define mmTPC5_QM_PQ_PUSH0 0xF48080 + +#define mmTPC5_QM_PQ_PUSH1 0xF48084 + +#define mmTPC5_QM_PQ_PUSH2 0xF48088 + +#define mmTPC5_QM_PQ_PUSH3 0xF4808C + +#define mmTPC5_QM_PQ_STS0 0xF48090 + +#define mmTPC5_QM_PQ_STS1 0xF48094 + +#define mmTPC5_QM_PQ_RD_RATE_LIM_EN 0xF480A0 + +#define mmTPC5_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xF480A4 + +#define mmTPC5_QM_PQ_RD_RATE_LIM_SAT 0xF480A8 + +#define mmTPC5_QM_PQ_RD_RATE_LIM_TOUT 0xF480AC + +#define mmTPC5_QM_CQ_CFG0 0xF480B0 + +#define mmTPC5_QM_CQ_CFG1 0xF480B4 + +#define mmTPC5_QM_CQ_ARUSER 0xF480B8 + +#define mmTPC5_QM_CQ_PTR_LO 0xF480C0 + +#define mmTPC5_QM_CQ_PTR_HI 0xF480C4 + +#define mmTPC5_QM_CQ_TSIZE 0xF480C8 + +#define mmTPC5_QM_CQ_CTL 0xF480CC + +#define mmTPC5_QM_CQ_PTR_LO_STS 0xF480D4 + +#define mmTPC5_QM_CQ_PTR_HI_STS 0xF480D8 + +#define mmTPC5_QM_CQ_TSIZE_STS 0xF480DC + +#define mmTPC5_QM_CQ_CTL_STS 0xF480E0 + +#define mmTPC5_QM_CQ_STS0 0xF480E4 + +#define mmTPC5_QM_CQ_STS1 0xF480E8 + +#define mmTPC5_QM_CQ_RD_RATE_LIM_EN 0xF480F0 + +#define mmTPC5_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xF480F4 + +#define mmTPC5_QM_CQ_RD_RATE_LIM_SAT 0xF480F8 + +#define mmTPC5_QM_CQ_RD_RATE_LIM_TOUT 0xF480FC + +#define mmTPC5_QM_CQ_IFIFO_CNT 0xF48108 + +#define mmTPC5_QM_CP_MSG_BASE0_ADDR_LO 0xF48120 + +#define mmTPC5_QM_CP_MSG_BASE0_ADDR_HI 0xF48124 + +#define mmTPC5_QM_CP_MSG_BASE1_ADDR_LO 0xF48128 + +#define mmTPC5_QM_CP_MSG_BASE1_ADDR_HI 0xF4812C + +#define mmTPC5_QM_CP_MSG_BASE2_ADDR_LO 0xF48130 + +#define mmTPC5_QM_CP_MSG_BASE2_ADDR_HI 0xF48134 + +#define mmTPC5_QM_CP_MSG_BASE3_ADDR_LO 0xF48138 + +#define mmTPC5_QM_CP_MSG_BASE3_ADDR_HI 0xF4813C + +#define mmTPC5_QM_CP_LDMA_TSIZE_OFFSET 0xF48140 + +#define mmTPC5_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xF48144 + +#define mmTPC5_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xF48148 + +#define mmTPC5_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xF4814C + +#define mmTPC5_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xF48150 + +#define mmTPC5_QM_CP_LDMA_COMMIT_OFFSET 0xF48154 + +#define mmTPC5_QM_CP_FENCE0_RDATA 0xF48158 + +#define mmTPC5_QM_CP_FENCE1_RDATA 0xF4815C + +#define mmTPC5_QM_CP_FENCE2_RDATA 0xF48160 + +#define mmTPC5_QM_CP_FENCE3_RDATA 0xF48164 + +#define mmTPC5_QM_CP_FENCE0_CNT 0xF48168 + +#define mmTPC5_QM_CP_FENCE1_CNT 0xF4816C + +#define mmTPC5_QM_CP_FENCE2_CNT 0xF48170 + +#define mmTPC5_QM_CP_FENCE3_CNT 0xF48174 + +#define mmTPC5_QM_CP_STS 0xF48178 + +#define mmTPC5_QM_CP_CURRENT_INST_LO 0xF4817C + +#define mmTPC5_QM_CP_CURRENT_INST_HI 0xF48180 + +#define mmTPC5_QM_CP_BARRIER_CFG 0xF48184 + +#define mmTPC5_QM_CP_DBG_0 0xF48188 + +#define mmTPC5_QM_PQ_BUF_ADDR 0xF48300 + +#define mmTPC5_QM_PQ_BUF_RDATA 0xF48304 + +#define mmTPC5_QM_CQ_BUF_ADDR 0xF48308 + +#define mmTPC5_QM_CQ_BUF_RDATA 0xF4830C + +#endif /* ASIC_REG_TPC5_QM_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h new file mode 100644 index 000000000000..6f301c7bbc2f --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h @@ -0,0 +1,323 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC5_RTR_REGS_H_ +#define ASIC_REG_TPC5_RTR_REGS_H_ + +/* + ***************************************** + * TPC5_RTR (Prototype: TPC_RTR) + ***************************************** + */ + +#define mmTPC5_RTR_HBW_RD_RQ_E_ARB 0xF40100 + +#define mmTPC5_RTR_HBW_RD_RQ_W_ARB 0xF40104 + +#define mmTPC5_RTR_HBW_RD_RQ_N_ARB 0xF40108 + +#define mmTPC5_RTR_HBW_RD_RQ_S_ARB 0xF4010C + +#define mmTPC5_RTR_HBW_RD_RQ_L_ARB 0xF40110 + +#define mmTPC5_RTR_HBW_E_ARB_MAX 0xF40120 + +#define mmTPC5_RTR_HBW_W_ARB_MAX 0xF40124 + +#define mmTPC5_RTR_HBW_N_ARB_MAX 0xF40128 + +#define mmTPC5_RTR_HBW_S_ARB_MAX 0xF4012C + +#define mmTPC5_RTR_HBW_L_ARB_MAX 0xF40130 + +#define mmTPC5_RTR_HBW_RD_RS_E_ARB 0xF40140 + +#define mmTPC5_RTR_HBW_RD_RS_W_ARB 0xF40144 + +#define mmTPC5_RTR_HBW_RD_RS_N_ARB 0xF40148 + +#define mmTPC5_RTR_HBW_RD_RS_S_ARB 0xF4014C + +#define mmTPC5_RTR_HBW_RD_RS_L_ARB 0xF40150 + +#define mmTPC5_RTR_HBW_WR_RQ_E_ARB 0xF40170 + +#define mmTPC5_RTR_HBW_WR_RQ_W_ARB 0xF40174 + +#define mmTPC5_RTR_HBW_WR_RQ_N_ARB 0xF40178 + +#define mmTPC5_RTR_HBW_WR_RQ_S_ARB 0xF4017C + +#define mmTPC5_RTR_HBW_WR_RQ_L_ARB 0xF40180 + +#define mmTPC5_RTR_HBW_WR_RS_E_ARB 0xF40190 + +#define mmTPC5_RTR_HBW_WR_RS_W_ARB 0xF40194 + +#define mmTPC5_RTR_HBW_WR_RS_N_ARB 0xF40198 + +#define mmTPC5_RTR_HBW_WR_RS_S_ARB 0xF4019C + +#define mmTPC5_RTR_HBW_WR_RS_L_ARB 0xF401A0 + +#define mmTPC5_RTR_LBW_RD_RQ_E_ARB 0xF40200 + +#define mmTPC5_RTR_LBW_RD_RQ_W_ARB 0xF40204 + +#define mmTPC5_RTR_LBW_RD_RQ_N_ARB 0xF40208 + +#define mmTPC5_RTR_LBW_RD_RQ_S_ARB 0xF4020C + +#define mmTPC5_RTR_LBW_RD_RQ_L_ARB 0xF40210 + +#define mmTPC5_RTR_LBW_E_ARB_MAX 0xF40220 + +#define mmTPC5_RTR_LBW_W_ARB_MAX 0xF40224 + +#define mmTPC5_RTR_LBW_N_ARB_MAX 0xF40228 + +#define mmTPC5_RTR_LBW_S_ARB_MAX 0xF4022C + +#define mmTPC5_RTR_LBW_L_ARB_MAX 0xF40230 + +#define mmTPC5_RTR_LBW_RD_RS_E_ARB 0xF40250 + +#define mmTPC5_RTR_LBW_RD_RS_W_ARB 0xF40254 + +#define mmTPC5_RTR_LBW_RD_RS_N_ARB 0xF40258 + +#define mmTPC5_RTR_LBW_RD_RS_S_ARB 0xF4025C + +#define mmTPC5_RTR_LBW_RD_RS_L_ARB 0xF40260 + +#define mmTPC5_RTR_LBW_WR_RQ_E_ARB 0xF40270 + +#define mmTPC5_RTR_LBW_WR_RQ_W_ARB 0xF40274 + +#define mmTPC5_RTR_LBW_WR_RQ_N_ARB 0xF40278 + +#define mmTPC5_RTR_LBW_WR_RQ_S_ARB 0xF4027C + +#define mmTPC5_RTR_LBW_WR_RQ_L_ARB 0xF40280 + +#define mmTPC5_RTR_LBW_WR_RS_E_ARB 0xF40290 + +#define mmTPC5_RTR_LBW_WR_RS_W_ARB 0xF40294 + +#define mmTPC5_RTR_LBW_WR_RS_N_ARB 0xF40298 + +#define mmTPC5_RTR_LBW_WR_RS_S_ARB 0xF4029C + +#define mmTPC5_RTR_LBW_WR_RS_L_ARB 0xF402A0 + +#define mmTPC5_RTR_DBG_E_ARB 0xF40300 + +#define mmTPC5_RTR_DBG_W_ARB 0xF40304 + +#define mmTPC5_RTR_DBG_N_ARB 0xF40308 + +#define mmTPC5_RTR_DBG_S_ARB 0xF4030C + +#define mmTPC5_RTR_DBG_L_ARB 0xF40310 + +#define mmTPC5_RTR_DBG_E_ARB_MAX 0xF40320 + +#define mmTPC5_RTR_DBG_W_ARB_MAX 0xF40324 + +#define mmTPC5_RTR_DBG_N_ARB_MAX 0xF40328 + +#define mmTPC5_RTR_DBG_S_ARB_MAX 0xF4032C + +#define mmTPC5_RTR_DBG_L_ARB_MAX 0xF40330 + +#define mmTPC5_RTR_SPLIT_COEF_0 0xF40400 + +#define mmTPC5_RTR_SPLIT_COEF_1 0xF40404 + +#define mmTPC5_RTR_SPLIT_COEF_2 0xF40408 + +#define mmTPC5_RTR_SPLIT_COEF_3 0xF4040C + +#define mmTPC5_RTR_SPLIT_COEF_4 0xF40410 + +#define mmTPC5_RTR_SPLIT_COEF_5 0xF40414 + +#define mmTPC5_RTR_SPLIT_COEF_6 0xF40418 + +#define mmTPC5_RTR_SPLIT_COEF_7 0xF4041C + +#define mmTPC5_RTR_SPLIT_COEF_8 0xF40420 + +#define mmTPC5_RTR_SPLIT_COEF_9 0xF40424 + +#define mmTPC5_RTR_SPLIT_CFG 0xF40440 + +#define mmTPC5_RTR_SPLIT_RD_SAT 0xF40444 + +#define mmTPC5_RTR_SPLIT_RD_RST_TOKEN 0xF40448 + +#define mmTPC5_RTR_SPLIT_RD_TIMEOUT_0 0xF4044C + +#define mmTPC5_RTR_SPLIT_RD_TIMEOUT_1 0xF40450 + +#define mmTPC5_RTR_SPLIT_WR_SAT 0xF40454 + +#define mmTPC5_RTR_WPLIT_WR_TST_TOLEN 0xF40458 + +#define mmTPC5_RTR_SPLIT_WR_TIMEOUT_0 0xF4045C + +#define mmTPC5_RTR_SPLIT_WR_TIMEOUT_1 0xF40460 + +#define mmTPC5_RTR_HBW_RANGE_HIT 0xF40470 + +#define mmTPC5_RTR_HBW_RANGE_MASK_L_0 0xF40480 + +#define mmTPC5_RTR_HBW_RANGE_MASK_L_1 0xF40484 + +#define mmTPC5_RTR_HBW_RANGE_MASK_L_2 0xF40488 + +#define mmTPC5_RTR_HBW_RANGE_MASK_L_3 0xF4048C + +#define mmTPC5_RTR_HBW_RANGE_MASK_L_4 0xF40490 + +#define mmTPC5_RTR_HBW_RANGE_MASK_L_5 0xF40494 + +#define mmTPC5_RTR_HBW_RANGE_MASK_L_6 0xF40498 + +#define mmTPC5_RTR_HBW_RANGE_MASK_L_7 0xF4049C + +#define mmTPC5_RTR_HBW_RANGE_MASK_H_0 0xF404A0 + +#define mmTPC5_RTR_HBW_RANGE_MASK_H_1 0xF404A4 + +#define mmTPC5_RTR_HBW_RANGE_MASK_H_2 0xF404A8 + +#define mmTPC5_RTR_HBW_RANGE_MASK_H_3 0xF404AC + +#define mmTPC5_RTR_HBW_RANGE_MASK_H_4 0xF404B0 + +#define mmTPC5_RTR_HBW_RANGE_MASK_H_5 0xF404B4 + +#define mmTPC5_RTR_HBW_RANGE_MASK_H_6 0xF404B8 + +#define mmTPC5_RTR_HBW_RANGE_MASK_H_7 0xF404BC + +#define mmTPC5_RTR_HBW_RANGE_BASE_L_0 0xF404C0 + +#define mmTPC5_RTR_HBW_RANGE_BASE_L_1 0xF404C4 + +#define mmTPC5_RTR_HBW_RANGE_BASE_L_2 0xF404C8 + +#define mmTPC5_RTR_HBW_RANGE_BASE_L_3 0xF404CC + +#define mmTPC5_RTR_HBW_RANGE_BASE_L_4 0xF404D0 + +#define mmTPC5_RTR_HBW_RANGE_BASE_L_5 0xF404D4 + +#define mmTPC5_RTR_HBW_RANGE_BASE_L_6 0xF404D8 + +#define mmTPC5_RTR_HBW_RANGE_BASE_L_7 0xF404DC + +#define mmTPC5_RTR_HBW_RANGE_BASE_H_0 0xF404E0 + +#define mmTPC5_RTR_HBW_RANGE_BASE_H_1 0xF404E4 + +#define mmTPC5_RTR_HBW_RANGE_BASE_H_2 0xF404E8 + +#define mmTPC5_RTR_HBW_RANGE_BASE_H_3 0xF404EC + +#define mmTPC5_RTR_HBW_RANGE_BASE_H_4 0xF404F0 + +#define mmTPC5_RTR_HBW_RANGE_BASE_H_5 0xF404F4 + +#define mmTPC5_RTR_HBW_RANGE_BASE_H_6 0xF404F8 + +#define mmTPC5_RTR_HBW_RANGE_BASE_H_7 0xF404FC + +#define mmTPC5_RTR_LBW_RANGE_HIT 0xF40500 + +#define mmTPC5_RTR_LBW_RANGE_MASK_0 0xF40510 + +#define mmTPC5_RTR_LBW_RANGE_MASK_1 0xF40514 + +#define mmTPC5_RTR_LBW_RANGE_MASK_2 0xF40518 + +#define mmTPC5_RTR_LBW_RANGE_MASK_3 0xF4051C + +#define mmTPC5_RTR_LBW_RANGE_MASK_4 0xF40520 + +#define mmTPC5_RTR_LBW_RANGE_MASK_5 0xF40524 + +#define mmTPC5_RTR_LBW_RANGE_MASK_6 0xF40528 + +#define mmTPC5_RTR_LBW_RANGE_MASK_7 0xF4052C + +#define mmTPC5_RTR_LBW_RANGE_MASK_8 0xF40530 + +#define mmTPC5_RTR_LBW_RANGE_MASK_9 0xF40534 + +#define mmTPC5_RTR_LBW_RANGE_MASK_10 0xF40538 + +#define mmTPC5_RTR_LBW_RANGE_MASK_11 0xF4053C + +#define mmTPC5_RTR_LBW_RANGE_MASK_12 0xF40540 + +#define mmTPC5_RTR_LBW_RANGE_MASK_13 0xF40544 + +#define mmTPC5_RTR_LBW_RANGE_MASK_14 0xF40548 + +#define mmTPC5_RTR_LBW_RANGE_MASK_15 0xF4054C + +#define mmTPC5_RTR_LBW_RANGE_BASE_0 0xF40550 + +#define mmTPC5_RTR_LBW_RANGE_BASE_1 0xF40554 + +#define mmTPC5_RTR_LBW_RANGE_BASE_2 0xF40558 + +#define mmTPC5_RTR_LBW_RANGE_BASE_3 0xF4055C + +#define mmTPC5_RTR_LBW_RANGE_BASE_4 0xF40560 + +#define mmTPC5_RTR_LBW_RANGE_BASE_5 0xF40564 + +#define mmTPC5_RTR_LBW_RANGE_BASE_6 0xF40568 + +#define mmTPC5_RTR_LBW_RANGE_BASE_7 0xF4056C + +#define mmTPC5_RTR_LBW_RANGE_BASE_8 0xF40570 + +#define mmTPC5_RTR_LBW_RANGE_BASE_9 0xF40574 + +#define mmTPC5_RTR_LBW_RANGE_BASE_10 0xF40578 + +#define mmTPC5_RTR_LBW_RANGE_BASE_11 0xF4057C + +#define mmTPC5_RTR_LBW_RANGE_BASE_12 0xF40580 + +#define mmTPC5_RTR_LBW_RANGE_BASE_13 0xF40584 + +#define mmTPC5_RTR_LBW_RANGE_BASE_14 0xF40588 + +#define mmTPC5_RTR_LBW_RANGE_BASE_15 0xF4058C + +#define mmTPC5_RTR_RGLTR 0xF40590 + +#define mmTPC5_RTR_RGLTR_WR_RESULT 0xF40594 + +#define mmTPC5_RTR_RGLTR_RD_RESULT 0xF40598 + +#define mmTPC5_RTR_SCRAMB_EN 0xF40600 + +#define mmTPC5_RTR_NON_LIN_SCRAMB 0xF40604 + +#endif /* ASIC_REG_TPC5_RTR_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h new file mode 100644 index 000000000000..1e1168601c41 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h @@ -0,0 +1,887 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC6_CFG_REGS_H_ +#define ASIC_REG_TPC6_CFG_REGS_H_ + +/* + ***************************************** + * TPC6_CFG (Prototype: TPC) + ***************************************** + */ + +#define mmTPC6_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xF86400 + +#define mmTPC6_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xF86404 + +#define mmTPC6_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xF86408 + +#define mmTPC6_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xF8640C + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xF86410 + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xF86414 + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xF86418 + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xF8641C + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xF86420 + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xF86424 + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xF86428 + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xF8642C + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xF86430 + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xF86434 + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xF86438 + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xF8643C + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xF86440 + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xF86444 + +#define mmTPC6_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xF86448 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xF8644C + +#define mmTPC6_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xF86450 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xF86454 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xF86458 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xF8645C + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xF86460 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xF86464 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xF86468 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xF8646C + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xF86470 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xF86474 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xF86478 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xF8647C + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xF86480 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xF86484 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xF86488 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xF8648C + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xF86490 + +#define mmTPC6_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xF86494 + +#define mmTPC6_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xF86498 + +#define mmTPC6_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xF8649C + +#define mmTPC6_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xF864A0 + +#define mmTPC6_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xF864A4 + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xF864A8 + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xF864AC + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xF864B0 + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xF864B4 + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xF864B8 + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xF864BC + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xF864C0 + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xF864C4 + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xF864C8 + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xF864CC + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xF864D0 + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xF864D4 + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xF864D8 + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xF864DC + +#define mmTPC6_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xF864E0 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xF864E4 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xF864E8 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xF864EC + +#define mmTPC6_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xF864F0 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xF864F4 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xF864F8 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xF864FC + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xF86500 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xF86504 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xF86508 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xF8650C + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xF86510 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xF86514 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xF86518 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xF8651C + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xF86520 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xF86524 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xF86528 + +#define mmTPC6_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xF8652C + +#define mmTPC6_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xF86530 + +#define mmTPC6_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xF86534 + +#define mmTPC6_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xF86538 + +#define mmTPC6_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xF8653C + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xF86540 + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xF86544 + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xF86548 + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xF8654C + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xF86550 + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xF86554 + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xF86558 + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xF8655C + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xF86560 + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xF86564 + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xF86568 + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xF8656C + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xF86570 + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xF86574 + +#define mmTPC6_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xF86578 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xF8657C + +#define mmTPC6_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xF86580 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xF86584 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xF86588 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xF8658C + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xF86590 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xF86594 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xF86598 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xF8659C + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xF865A0 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xF865A4 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xF865A8 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xF865AC + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xF865B0 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xF865B4 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xF865B8 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xF865BC + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xF865C0 + +#define mmTPC6_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xF865C4 + +#define mmTPC6_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xF865C8 + +#define mmTPC6_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xF865CC + +#define mmTPC6_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xF865D0 + +#define mmTPC6_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xF865D4 + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xF865D8 + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xF865DC + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xF865E0 + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xF865E4 + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xF865E8 + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xF865EC + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xF865F0 + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xF865F4 + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xF865F8 + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xF865FC + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xF86600 + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xF86604 + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xF86608 + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xF8660C + +#define mmTPC6_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xF86610 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xF86614 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xF86618 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xF8661C + +#define mmTPC6_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xF86620 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xF86624 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xF86628 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xF8662C + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xF86630 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xF86634 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xF86638 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xF8663C + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xF86640 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xF86644 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xF86648 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xF8664C + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xF86650 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xF86654 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xF86658 + +#define mmTPC6_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xF8665C + +#define mmTPC6_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xF86660 + +#define mmTPC6_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xF86664 + +#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_0 0xF86668 + +#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_0 0xF8666C + +#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_1 0xF86670 + +#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_1 0xF86674 + +#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_2 0xF86678 + +#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_2 0xF8667C + +#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_3 0xF86680 + +#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_3 0xF86684 + +#define mmTPC6_CFG_KERNEL_TID_BASE_DIM_4 0xF86688 + +#define mmTPC6_CFG_KERNEL_TID_SIZE_DIM_4 0xF8668C + +#define mmTPC6_CFG_KERNEL_SRF_0 0xF86690 + +#define mmTPC6_CFG_KERNEL_SRF_1 0xF86694 + +#define mmTPC6_CFG_KERNEL_SRF_2 0xF86698 + +#define mmTPC6_CFG_KERNEL_SRF_3 0xF8669C + +#define mmTPC6_CFG_KERNEL_SRF_4 0xF866A0 + +#define mmTPC6_CFG_KERNEL_SRF_5 0xF866A4 + +#define mmTPC6_CFG_KERNEL_SRF_6 0xF866A8 + +#define mmTPC6_CFG_KERNEL_SRF_7 0xF866AC + +#define mmTPC6_CFG_KERNEL_SRF_8 0xF866B0 + +#define mmTPC6_CFG_KERNEL_SRF_9 0xF866B4 + +#define mmTPC6_CFG_KERNEL_SRF_10 0xF866B8 + +#define mmTPC6_CFG_KERNEL_SRF_11 0xF866BC + +#define mmTPC6_CFG_KERNEL_SRF_12 0xF866C0 + +#define mmTPC6_CFG_KERNEL_SRF_13 0xF866C4 + +#define mmTPC6_CFG_KERNEL_SRF_14 0xF866C8 + +#define mmTPC6_CFG_KERNEL_SRF_15 0xF866CC + +#define mmTPC6_CFG_KERNEL_SRF_16 0xF866D0 + +#define mmTPC6_CFG_KERNEL_SRF_17 0xF866D4 + +#define mmTPC6_CFG_KERNEL_SRF_18 0xF866D8 + +#define mmTPC6_CFG_KERNEL_SRF_19 0xF866DC + +#define mmTPC6_CFG_KERNEL_SRF_20 0xF866E0 + +#define mmTPC6_CFG_KERNEL_SRF_21 0xF866E4 + +#define mmTPC6_CFG_KERNEL_SRF_22 0xF866E8 + +#define mmTPC6_CFG_KERNEL_SRF_23 0xF866EC + +#define mmTPC6_CFG_KERNEL_SRF_24 0xF866F0 + +#define mmTPC6_CFG_KERNEL_SRF_25 0xF866F4 + +#define mmTPC6_CFG_KERNEL_SRF_26 0xF866F8 + +#define mmTPC6_CFG_KERNEL_SRF_27 0xF866FC + +#define mmTPC6_CFG_KERNEL_SRF_28 0xF86700 + +#define mmTPC6_CFG_KERNEL_SRF_29 0xF86704 + +#define mmTPC6_CFG_KERNEL_SRF_30 0xF86708 + +#define mmTPC6_CFG_KERNEL_SRF_31 0xF8670C + +#define mmTPC6_CFG_KERNEL_KERNEL_CONFIG 0xF86710 + +#define mmTPC6_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xF86714 + +#define mmTPC6_CFG_RESERVED_DESC_END 0xF86738 + +#define mmTPC6_CFG_ROUND_CSR 0xF867FC + +#define mmTPC6_CFG_TBUF_BASE_ADDR_LOW 0xF86800 + +#define mmTPC6_CFG_TBUF_BASE_ADDR_HIGH 0xF86804 + +#define mmTPC6_CFG_SEMAPHORE 0xF86808 + +#define mmTPC6_CFG_VFLAGS 0xF8680C + +#define mmTPC6_CFG_SFLAGS 0xF86810 + +#define mmTPC6_CFG_LFSR_POLYNOM 0xF86818 + +#define mmTPC6_CFG_STATUS 0xF8681C + +#define mmTPC6_CFG_CFG_BASE_ADDRESS_HIGH 0xF86820 + +#define mmTPC6_CFG_CFG_SUBTRACT_VALUE 0xF86824 + +#define mmTPC6_CFG_SM_BASE_ADDRESS_LOW 0xF86828 + +#define mmTPC6_CFG_SM_BASE_ADDRESS_HIGH 0xF8682C + +#define mmTPC6_CFG_TPC_CMD 0xF86830 + +#define mmTPC6_CFG_TPC_EXECUTE 0xF86838 + +#define mmTPC6_CFG_TPC_STALL 0xF8683C + +#define mmTPC6_CFG_ICACHE_BASE_ADDERESS_LOW 0xF86840 + +#define mmTPC6_CFG_ICACHE_BASE_ADDERESS_HIGH 0xF86844 + +#define mmTPC6_CFG_MSS_CONFIG 0xF86854 + +#define mmTPC6_CFG_TPC_INTR_CAUSE 0xF86858 + +#define mmTPC6_CFG_TPC_INTR_MASK 0xF8685C + +#define mmTPC6_CFG_TSB_CONFIG 0xF86860 + +#define mmTPC6_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xF86A00 + +#define mmTPC6_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xF86A04 + +#define mmTPC6_CFG_QM_TENSOR_0_PADDING_VALUE 0xF86A08 + +#define mmTPC6_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xF86A0C + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_0_SIZE 0xF86A10 + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xF86A14 + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xF86A18 + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_1_SIZE 0xF86A1C + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xF86A20 + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xF86A24 + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_2_SIZE 0xF86A28 + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xF86A2C + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xF86A30 + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_3_SIZE 0xF86A34 + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xF86A38 + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xF86A3C + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_4_SIZE 0xF86A40 + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xF86A44 + +#define mmTPC6_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xF86A48 + +#define mmTPC6_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xF86A4C + +#define mmTPC6_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xF86A50 + +#define mmTPC6_CFG_QM_TENSOR_1_PADDING_VALUE 0xF86A54 + +#define mmTPC6_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xF86A58 + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_0_SIZE 0xF86A5C + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xF86A60 + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xF86A64 + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_1_SIZE 0xF86A68 + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xF86A6C + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xF86A70 + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_2_SIZE 0xF86A74 + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xF86A78 + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xF86A7C + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_3_SIZE 0xF86A80 + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xF86A84 + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xF86A88 + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_4_SIZE 0xF86A8C + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xF86A90 + +#define mmTPC6_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xF86A94 + +#define mmTPC6_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xF86A98 + +#define mmTPC6_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xF86A9C + +#define mmTPC6_CFG_QM_TENSOR_2_PADDING_VALUE 0xF86AA0 + +#define mmTPC6_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xF86AA4 + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_0_SIZE 0xF86AA8 + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xF86AAC + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xF86AB0 + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_1_SIZE 0xF86AB4 + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xF86AB8 + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xF86ABC + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_2_SIZE 0xF86AC0 + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xF86AC4 + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xF86AC8 + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_3_SIZE 0xF86ACC + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xF86AD0 + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xF86AD4 + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_4_SIZE 0xF86AD8 + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xF86ADC + +#define mmTPC6_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xF86AE0 + +#define mmTPC6_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xF86AE4 + +#define mmTPC6_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xF86AE8 + +#define mmTPC6_CFG_QM_TENSOR_3_PADDING_VALUE 0xF86AEC + +#define mmTPC6_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xF86AF0 + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_0_SIZE 0xF86AF4 + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xF86AF8 + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xF86AFC + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_1_SIZE 0xF86B00 + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xF86B04 + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xF86B08 + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_2_SIZE 0xF86B0C + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xF86B10 + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xF86B14 + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_3_SIZE 0xF86B18 + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xF86B1C + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xF86B20 + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_4_SIZE 0xF86B24 + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xF86B28 + +#define mmTPC6_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xF86B2C + +#define mmTPC6_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xF86B30 + +#define mmTPC6_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xF86B34 + +#define mmTPC6_CFG_QM_TENSOR_4_PADDING_VALUE 0xF86B38 + +#define mmTPC6_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xF86B3C + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_0_SIZE 0xF86B40 + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xF86B44 + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xF86B48 + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_1_SIZE 0xF86B4C + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xF86B50 + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xF86B54 + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_2_SIZE 0xF86B58 + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xF86B5C + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xF86B60 + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_3_SIZE 0xF86B64 + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xF86B68 + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xF86B6C + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_4_SIZE 0xF86B70 + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xF86B74 + +#define mmTPC6_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xF86B78 + +#define mmTPC6_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xF86B7C + +#define mmTPC6_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xF86B80 + +#define mmTPC6_CFG_QM_TENSOR_5_PADDING_VALUE 0xF86B84 + +#define mmTPC6_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xF86B88 + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_0_SIZE 0xF86B8C + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xF86B90 + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xF86B94 + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_1_SIZE 0xF86B98 + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xF86B9C + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xF86BA0 + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_2_SIZE 0xF86BA4 + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xF86BA8 + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xF86BAC + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_3_SIZE 0xF86BB0 + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xF86BB4 + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xF86BB8 + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_4_SIZE 0xF86BBC + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xF86BC0 + +#define mmTPC6_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xF86BC4 + +#define mmTPC6_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xF86BC8 + +#define mmTPC6_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xF86BCC + +#define mmTPC6_CFG_QM_TENSOR_6_PADDING_VALUE 0xF86BD0 + +#define mmTPC6_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xF86BD4 + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_0_SIZE 0xF86BD8 + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xF86BDC + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xF86BE0 + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_1_SIZE 0xF86BE4 + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xF86BE8 + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xF86BEC + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_2_SIZE 0xF86BF0 + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xF86BF4 + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xF86BF8 + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_3_SIZE 0xF86BFC + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xF86C00 + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xF86C04 + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_4_SIZE 0xF86C08 + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xF86C0C + +#define mmTPC6_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xF86C10 + +#define mmTPC6_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xF86C14 + +#define mmTPC6_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xF86C18 + +#define mmTPC6_CFG_QM_TENSOR_7_PADDING_VALUE 0xF86C1C + +#define mmTPC6_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xF86C20 + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_0_SIZE 0xF86C24 + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xF86C28 + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xF86C2C + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_1_SIZE 0xF86C30 + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xF86C34 + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xF86C38 + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_2_SIZE 0xF86C3C + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xF86C40 + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xF86C44 + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_3_SIZE 0xF86C48 + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xF86C4C + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xF86C50 + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_4_SIZE 0xF86C54 + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xF86C58 + +#define mmTPC6_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xF86C5C + +#define mmTPC6_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xF86C60 + +#define mmTPC6_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xF86C64 + +#define mmTPC6_CFG_QM_TID_BASE_DIM_0 0xF86C68 + +#define mmTPC6_CFG_QM_TID_SIZE_DIM_0 0xF86C6C + +#define mmTPC6_CFG_QM_TID_BASE_DIM_1 0xF86C70 + +#define mmTPC6_CFG_QM_TID_SIZE_DIM_1 0xF86C74 + +#define mmTPC6_CFG_QM_TID_BASE_DIM_2 0xF86C78 + +#define mmTPC6_CFG_QM_TID_SIZE_DIM_2 0xF86C7C + +#define mmTPC6_CFG_QM_TID_BASE_DIM_3 0xF86C80 + +#define mmTPC6_CFG_QM_TID_SIZE_DIM_3 0xF86C84 + +#define mmTPC6_CFG_QM_TID_BASE_DIM_4 0xF86C88 + +#define mmTPC6_CFG_QM_TID_SIZE_DIM_4 0xF86C8C + +#define mmTPC6_CFG_QM_SRF_0 0xF86C90 + +#define mmTPC6_CFG_QM_SRF_1 0xF86C94 + +#define mmTPC6_CFG_QM_SRF_2 0xF86C98 + +#define mmTPC6_CFG_QM_SRF_3 0xF86C9C + +#define mmTPC6_CFG_QM_SRF_4 0xF86CA0 + +#define mmTPC6_CFG_QM_SRF_5 0xF86CA4 + +#define mmTPC6_CFG_QM_SRF_6 0xF86CA8 + +#define mmTPC6_CFG_QM_SRF_7 0xF86CAC + +#define mmTPC6_CFG_QM_SRF_8 0xF86CB0 + +#define mmTPC6_CFG_QM_SRF_9 0xF86CB4 + +#define mmTPC6_CFG_QM_SRF_10 0xF86CB8 + +#define mmTPC6_CFG_QM_SRF_11 0xF86CBC + +#define mmTPC6_CFG_QM_SRF_12 0xF86CC0 + +#define mmTPC6_CFG_QM_SRF_13 0xF86CC4 + +#define mmTPC6_CFG_QM_SRF_14 0xF86CC8 + +#define mmTPC6_CFG_QM_SRF_15 0xF86CCC + +#define mmTPC6_CFG_QM_SRF_16 0xF86CD0 + +#define mmTPC6_CFG_QM_SRF_17 0xF86CD4 + +#define mmTPC6_CFG_QM_SRF_18 0xF86CD8 + +#define mmTPC6_CFG_QM_SRF_19 0xF86CDC + +#define mmTPC6_CFG_QM_SRF_20 0xF86CE0 + +#define mmTPC6_CFG_QM_SRF_21 0xF86CE4 + +#define mmTPC6_CFG_QM_SRF_22 0xF86CE8 + +#define mmTPC6_CFG_QM_SRF_23 0xF86CEC + +#define mmTPC6_CFG_QM_SRF_24 0xF86CF0 + +#define mmTPC6_CFG_QM_SRF_25 0xF86CF4 + +#define mmTPC6_CFG_QM_SRF_26 0xF86CF8 + +#define mmTPC6_CFG_QM_SRF_27 0xF86CFC + +#define mmTPC6_CFG_QM_SRF_28 0xF86D00 + +#define mmTPC6_CFG_QM_SRF_29 0xF86D04 + +#define mmTPC6_CFG_QM_SRF_30 0xF86D08 + +#define mmTPC6_CFG_QM_SRF_31 0xF86D0C + +#define mmTPC6_CFG_QM_KERNEL_CONFIG 0xF86D10 + +#define mmTPC6_CFG_QM_SYNC_OBJECT_MESSAGE 0xF86D14 + +#define mmTPC6_CFG_ARUSER 0xF86D18 + +#define mmTPC6_CFG_AWUSER 0xF86D1C + +#define mmTPC6_CFG_FUNC_MBIST_CNTRL 0xF86E00 + +#define mmTPC6_CFG_FUNC_MBIST_PAT 0xF86E04 + +#define mmTPC6_CFG_FUNC_MBIST_MEM_0 0xF86E08 + +#define mmTPC6_CFG_FUNC_MBIST_MEM_1 0xF86E0C + +#define mmTPC6_CFG_FUNC_MBIST_MEM_2 0xF86E10 + +#define mmTPC6_CFG_FUNC_MBIST_MEM_3 0xF86E14 + +#define mmTPC6_CFG_FUNC_MBIST_MEM_4 0xF86E18 + +#define mmTPC6_CFG_FUNC_MBIST_MEM_5 0xF86E1C + +#define mmTPC6_CFG_FUNC_MBIST_MEM_6 0xF86E20 + +#define mmTPC6_CFG_FUNC_MBIST_MEM_7 0xF86E24 + +#define mmTPC6_CFG_FUNC_MBIST_MEM_8 0xF86E28 + +#define mmTPC6_CFG_FUNC_MBIST_MEM_9 0xF86E2C + +#endif /* ASIC_REG_TPC6_CFG_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h new file mode 100644 index 000000000000..fbca6b47284e --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC6_CMDQ_REGS_H_ +#define ASIC_REG_TPC6_CMDQ_REGS_H_ + +/* + ***************************************** + * TPC6_CMDQ (Prototype: CMDQ) + ***************************************** + */ + +#define mmTPC6_CMDQ_GLBL_CFG0 0xF89000 + +#define mmTPC6_CMDQ_GLBL_CFG1 0xF89004 + +#define mmTPC6_CMDQ_GLBL_PROT 0xF89008 + +#define mmTPC6_CMDQ_GLBL_ERR_CFG 0xF8900C + +#define mmTPC6_CMDQ_GLBL_ERR_ADDR_LO 0xF89010 + +#define mmTPC6_CMDQ_GLBL_ERR_ADDR_HI 0xF89014 + +#define mmTPC6_CMDQ_GLBL_ERR_WDATA 0xF89018 + +#define mmTPC6_CMDQ_GLBL_SECURE_PROPS 0xF8901C + +#define mmTPC6_CMDQ_GLBL_NON_SECURE_PROPS 0xF89020 + +#define mmTPC6_CMDQ_GLBL_STS0 0xF89024 + +#define mmTPC6_CMDQ_GLBL_STS1 0xF89028 + +#define mmTPC6_CMDQ_CQ_CFG0 0xF890B0 + +#define mmTPC6_CMDQ_CQ_CFG1 0xF890B4 + +#define mmTPC6_CMDQ_CQ_ARUSER 0xF890B8 + +#define mmTPC6_CMDQ_CQ_PTR_LO 0xF890C0 + +#define mmTPC6_CMDQ_CQ_PTR_HI 0xF890C4 + +#define mmTPC6_CMDQ_CQ_TSIZE 0xF890C8 + +#define mmTPC6_CMDQ_CQ_CTL 0xF890CC + +#define mmTPC6_CMDQ_CQ_PTR_LO_STS 0xF890D4 + +#define mmTPC6_CMDQ_CQ_PTR_HI_STS 0xF890D8 + +#define mmTPC6_CMDQ_CQ_TSIZE_STS 0xF890DC + +#define mmTPC6_CMDQ_CQ_CTL_STS 0xF890E0 + +#define mmTPC6_CMDQ_CQ_STS0 0xF890E4 + +#define mmTPC6_CMDQ_CQ_STS1 0xF890E8 + +#define mmTPC6_CMDQ_CQ_RD_RATE_LIM_EN 0xF890F0 + +#define mmTPC6_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xF890F4 + +#define mmTPC6_CMDQ_CQ_RD_RATE_LIM_SAT 0xF890F8 + +#define mmTPC6_CMDQ_CQ_RD_RATE_LIM_TOUT 0xF890FC + +#define mmTPC6_CMDQ_CQ_IFIFO_CNT 0xF89108 + +#define mmTPC6_CMDQ_CP_MSG_BASE0_ADDR_LO 0xF89120 + +#define mmTPC6_CMDQ_CP_MSG_BASE0_ADDR_HI 0xF89124 + +#define mmTPC6_CMDQ_CP_MSG_BASE1_ADDR_LO 0xF89128 + +#define mmTPC6_CMDQ_CP_MSG_BASE1_ADDR_HI 0xF8912C + +#define mmTPC6_CMDQ_CP_MSG_BASE2_ADDR_LO 0xF89130 + +#define mmTPC6_CMDQ_CP_MSG_BASE2_ADDR_HI 0xF89134 + +#define mmTPC6_CMDQ_CP_MSG_BASE3_ADDR_LO 0xF89138 + +#define mmTPC6_CMDQ_CP_MSG_BASE3_ADDR_HI 0xF8913C + +#define mmTPC6_CMDQ_CP_LDMA_TSIZE_OFFSET 0xF89140 + +#define mmTPC6_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xF89144 + +#define mmTPC6_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xF89148 + +#define mmTPC6_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xF8914C + +#define mmTPC6_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xF89150 + +#define mmTPC6_CMDQ_CP_LDMA_COMMIT_OFFSET 0xF89154 + +#define mmTPC6_CMDQ_CP_FENCE0_RDATA 0xF89158 + +#define mmTPC6_CMDQ_CP_FENCE1_RDATA 0xF8915C + +#define mmTPC6_CMDQ_CP_FENCE2_RDATA 0xF89160 + +#define mmTPC6_CMDQ_CP_FENCE3_RDATA 0xF89164 + +#define mmTPC6_CMDQ_CP_FENCE0_CNT 0xF89168 + +#define mmTPC6_CMDQ_CP_FENCE1_CNT 0xF8916C + +#define mmTPC6_CMDQ_CP_FENCE2_CNT 0xF89170 + +#define mmTPC6_CMDQ_CP_FENCE3_CNT 0xF89174 + +#define mmTPC6_CMDQ_CP_STS 0xF89178 + +#define mmTPC6_CMDQ_CP_CURRENT_INST_LO 0xF8917C + +#define mmTPC6_CMDQ_CP_CURRENT_INST_HI 0xF89180 + +#define mmTPC6_CMDQ_CP_BARRIER_CFG 0xF89184 + +#define mmTPC6_CMDQ_CP_DBG_0 0xF89188 + +#define mmTPC6_CMDQ_CQ_BUF_ADDR 0xF89308 + +#define mmTPC6_CMDQ_CQ_BUF_RDATA 0xF8930C + +#endif /* ASIC_REG_TPC6_CMDQ_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h new file mode 100644 index 000000000000..bf32465dabcb --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC6_QM_REGS_H_ +#define ASIC_REG_TPC6_QM_REGS_H_ + +/* + ***************************************** + * TPC6_QM (Prototype: QMAN) + ***************************************** + */ + +#define mmTPC6_QM_GLBL_CFG0 0xF88000 + +#define mmTPC6_QM_GLBL_CFG1 0xF88004 + +#define mmTPC6_QM_GLBL_PROT 0xF88008 + +#define mmTPC6_QM_GLBL_ERR_CFG 0xF8800C + +#define mmTPC6_QM_GLBL_ERR_ADDR_LO 0xF88010 + +#define mmTPC6_QM_GLBL_ERR_ADDR_HI 0xF88014 + +#define mmTPC6_QM_GLBL_ERR_WDATA 0xF88018 + +#define mmTPC6_QM_GLBL_SECURE_PROPS 0xF8801C + +#define mmTPC6_QM_GLBL_NON_SECURE_PROPS 0xF88020 + +#define mmTPC6_QM_GLBL_STS0 0xF88024 + +#define mmTPC6_QM_GLBL_STS1 0xF88028 + +#define mmTPC6_QM_PQ_BASE_LO 0xF88060 + +#define mmTPC6_QM_PQ_BASE_HI 0xF88064 + +#define mmTPC6_QM_PQ_SIZE 0xF88068 + +#define mmTPC6_QM_PQ_PI 0xF8806C + +#define mmTPC6_QM_PQ_CI 0xF88070 + +#define mmTPC6_QM_PQ_CFG0 0xF88074 + +#define mmTPC6_QM_PQ_CFG1 0xF88078 + +#define mmTPC6_QM_PQ_ARUSER 0xF8807C + +#define mmTPC6_QM_PQ_PUSH0 0xF88080 + +#define mmTPC6_QM_PQ_PUSH1 0xF88084 + +#define mmTPC6_QM_PQ_PUSH2 0xF88088 + +#define mmTPC6_QM_PQ_PUSH3 0xF8808C + +#define mmTPC6_QM_PQ_STS0 0xF88090 + +#define mmTPC6_QM_PQ_STS1 0xF88094 + +#define mmTPC6_QM_PQ_RD_RATE_LIM_EN 0xF880A0 + +#define mmTPC6_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xF880A4 + +#define mmTPC6_QM_PQ_RD_RATE_LIM_SAT 0xF880A8 + +#define mmTPC6_QM_PQ_RD_RATE_LIM_TOUT 0xF880AC + +#define mmTPC6_QM_CQ_CFG0 0xF880B0 + +#define mmTPC6_QM_CQ_CFG1 0xF880B4 + +#define mmTPC6_QM_CQ_ARUSER 0xF880B8 + +#define mmTPC6_QM_CQ_PTR_LO 0xF880C0 + +#define mmTPC6_QM_CQ_PTR_HI 0xF880C4 + +#define mmTPC6_QM_CQ_TSIZE 0xF880C8 + +#define mmTPC6_QM_CQ_CTL 0xF880CC + +#define mmTPC6_QM_CQ_PTR_LO_STS 0xF880D4 + +#define mmTPC6_QM_CQ_PTR_HI_STS 0xF880D8 + +#define mmTPC6_QM_CQ_TSIZE_STS 0xF880DC + +#define mmTPC6_QM_CQ_CTL_STS 0xF880E0 + +#define mmTPC6_QM_CQ_STS0 0xF880E4 + +#define mmTPC6_QM_CQ_STS1 0xF880E8 + +#define mmTPC6_QM_CQ_RD_RATE_LIM_EN 0xF880F0 + +#define mmTPC6_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xF880F4 + +#define mmTPC6_QM_CQ_RD_RATE_LIM_SAT 0xF880F8 + +#define mmTPC6_QM_CQ_RD_RATE_LIM_TOUT 0xF880FC + +#define mmTPC6_QM_CQ_IFIFO_CNT 0xF88108 + +#define mmTPC6_QM_CP_MSG_BASE0_ADDR_LO 0xF88120 + +#define mmTPC6_QM_CP_MSG_BASE0_ADDR_HI 0xF88124 + +#define mmTPC6_QM_CP_MSG_BASE1_ADDR_LO 0xF88128 + +#define mmTPC6_QM_CP_MSG_BASE1_ADDR_HI 0xF8812C + +#define mmTPC6_QM_CP_MSG_BASE2_ADDR_LO 0xF88130 + +#define mmTPC6_QM_CP_MSG_BASE2_ADDR_HI 0xF88134 + +#define mmTPC6_QM_CP_MSG_BASE3_ADDR_LO 0xF88138 + +#define mmTPC6_QM_CP_MSG_BASE3_ADDR_HI 0xF8813C + +#define mmTPC6_QM_CP_LDMA_TSIZE_OFFSET 0xF88140 + +#define mmTPC6_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xF88144 + +#define mmTPC6_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xF88148 + +#define mmTPC6_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xF8814C + +#define mmTPC6_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xF88150 + +#define mmTPC6_QM_CP_LDMA_COMMIT_OFFSET 0xF88154 + +#define mmTPC6_QM_CP_FENCE0_RDATA 0xF88158 + +#define mmTPC6_QM_CP_FENCE1_RDATA 0xF8815C + +#define mmTPC6_QM_CP_FENCE2_RDATA 0xF88160 + +#define mmTPC6_QM_CP_FENCE3_RDATA 0xF88164 + +#define mmTPC6_QM_CP_FENCE0_CNT 0xF88168 + +#define mmTPC6_QM_CP_FENCE1_CNT 0xF8816C + +#define mmTPC6_QM_CP_FENCE2_CNT 0xF88170 + +#define mmTPC6_QM_CP_FENCE3_CNT 0xF88174 + +#define mmTPC6_QM_CP_STS 0xF88178 + +#define mmTPC6_QM_CP_CURRENT_INST_LO 0xF8817C + +#define mmTPC6_QM_CP_CURRENT_INST_HI 0xF88180 + +#define mmTPC6_QM_CP_BARRIER_CFG 0xF88184 + +#define mmTPC6_QM_CP_DBG_0 0xF88188 + +#define mmTPC6_QM_PQ_BUF_ADDR 0xF88300 + +#define mmTPC6_QM_PQ_BUF_RDATA 0xF88304 + +#define mmTPC6_QM_CQ_BUF_ADDR 0xF88308 + +#define mmTPC6_QM_CQ_BUF_RDATA 0xF8830C + +#endif /* ASIC_REG_TPC6_QM_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h new file mode 100644 index 000000000000..609bb90e1046 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h @@ -0,0 +1,323 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC6_RTR_REGS_H_ +#define ASIC_REG_TPC6_RTR_REGS_H_ + +/* + ***************************************** + * TPC6_RTR (Prototype: TPC_RTR) + ***************************************** + */ + +#define mmTPC6_RTR_HBW_RD_RQ_E_ARB 0xF80100 + +#define mmTPC6_RTR_HBW_RD_RQ_W_ARB 0xF80104 + +#define mmTPC6_RTR_HBW_RD_RQ_N_ARB 0xF80108 + +#define mmTPC6_RTR_HBW_RD_RQ_S_ARB 0xF8010C + +#define mmTPC6_RTR_HBW_RD_RQ_L_ARB 0xF80110 + +#define mmTPC6_RTR_HBW_E_ARB_MAX 0xF80120 + +#define mmTPC6_RTR_HBW_W_ARB_MAX 0xF80124 + +#define mmTPC6_RTR_HBW_N_ARB_MAX 0xF80128 + +#define mmTPC6_RTR_HBW_S_ARB_MAX 0xF8012C + +#define mmTPC6_RTR_HBW_L_ARB_MAX 0xF80130 + +#define mmTPC6_RTR_HBW_RD_RS_E_ARB 0xF80140 + +#define mmTPC6_RTR_HBW_RD_RS_W_ARB 0xF80144 + +#define mmTPC6_RTR_HBW_RD_RS_N_ARB 0xF80148 + +#define mmTPC6_RTR_HBW_RD_RS_S_ARB 0xF8014C + +#define mmTPC6_RTR_HBW_RD_RS_L_ARB 0xF80150 + +#define mmTPC6_RTR_HBW_WR_RQ_E_ARB 0xF80170 + +#define mmTPC6_RTR_HBW_WR_RQ_W_ARB 0xF80174 + +#define mmTPC6_RTR_HBW_WR_RQ_N_ARB 0xF80178 + +#define mmTPC6_RTR_HBW_WR_RQ_S_ARB 0xF8017C + +#define mmTPC6_RTR_HBW_WR_RQ_L_ARB 0xF80180 + +#define mmTPC6_RTR_HBW_WR_RS_E_ARB 0xF80190 + +#define mmTPC6_RTR_HBW_WR_RS_W_ARB 0xF80194 + +#define mmTPC6_RTR_HBW_WR_RS_N_ARB 0xF80198 + +#define mmTPC6_RTR_HBW_WR_RS_S_ARB 0xF8019C + +#define mmTPC6_RTR_HBW_WR_RS_L_ARB 0xF801A0 + +#define mmTPC6_RTR_LBW_RD_RQ_E_ARB 0xF80200 + +#define mmTPC6_RTR_LBW_RD_RQ_W_ARB 0xF80204 + +#define mmTPC6_RTR_LBW_RD_RQ_N_ARB 0xF80208 + +#define mmTPC6_RTR_LBW_RD_RQ_S_ARB 0xF8020C + +#define mmTPC6_RTR_LBW_RD_RQ_L_ARB 0xF80210 + +#define mmTPC6_RTR_LBW_E_ARB_MAX 0xF80220 + +#define mmTPC6_RTR_LBW_W_ARB_MAX 0xF80224 + +#define mmTPC6_RTR_LBW_N_ARB_MAX 0xF80228 + +#define mmTPC6_RTR_LBW_S_ARB_MAX 0xF8022C + +#define mmTPC6_RTR_LBW_L_ARB_MAX 0xF80230 + +#define mmTPC6_RTR_LBW_RD_RS_E_ARB 0xF80250 + +#define mmTPC6_RTR_LBW_RD_RS_W_ARB 0xF80254 + +#define mmTPC6_RTR_LBW_RD_RS_N_ARB 0xF80258 + +#define mmTPC6_RTR_LBW_RD_RS_S_ARB 0xF8025C + +#define mmTPC6_RTR_LBW_RD_RS_L_ARB 0xF80260 + +#define mmTPC6_RTR_LBW_WR_RQ_E_ARB 0xF80270 + +#define mmTPC6_RTR_LBW_WR_RQ_W_ARB 0xF80274 + +#define mmTPC6_RTR_LBW_WR_RQ_N_ARB 0xF80278 + +#define mmTPC6_RTR_LBW_WR_RQ_S_ARB 0xF8027C + +#define mmTPC6_RTR_LBW_WR_RQ_L_ARB 0xF80280 + +#define mmTPC6_RTR_LBW_WR_RS_E_ARB 0xF80290 + +#define mmTPC6_RTR_LBW_WR_RS_W_ARB 0xF80294 + +#define mmTPC6_RTR_LBW_WR_RS_N_ARB 0xF80298 + +#define mmTPC6_RTR_LBW_WR_RS_S_ARB 0xF8029C + +#define mmTPC6_RTR_LBW_WR_RS_L_ARB 0xF802A0 + +#define mmTPC6_RTR_DBG_E_ARB 0xF80300 + +#define mmTPC6_RTR_DBG_W_ARB 0xF80304 + +#define mmTPC6_RTR_DBG_N_ARB 0xF80308 + +#define mmTPC6_RTR_DBG_S_ARB 0xF8030C + +#define mmTPC6_RTR_DBG_L_ARB 0xF80310 + +#define mmTPC6_RTR_DBG_E_ARB_MAX 0xF80320 + +#define mmTPC6_RTR_DBG_W_ARB_MAX 0xF80324 + +#define mmTPC6_RTR_DBG_N_ARB_MAX 0xF80328 + +#define mmTPC6_RTR_DBG_S_ARB_MAX 0xF8032C + +#define mmTPC6_RTR_DBG_L_ARB_MAX 0xF80330 + +#define mmTPC6_RTR_SPLIT_COEF_0 0xF80400 + +#define mmTPC6_RTR_SPLIT_COEF_1 0xF80404 + +#define mmTPC6_RTR_SPLIT_COEF_2 0xF80408 + +#define mmTPC6_RTR_SPLIT_COEF_3 0xF8040C + +#define mmTPC6_RTR_SPLIT_COEF_4 0xF80410 + +#define mmTPC6_RTR_SPLIT_COEF_5 0xF80414 + +#define mmTPC6_RTR_SPLIT_COEF_6 0xF80418 + +#define mmTPC6_RTR_SPLIT_COEF_7 0xF8041C + +#define mmTPC6_RTR_SPLIT_COEF_8 0xF80420 + +#define mmTPC6_RTR_SPLIT_COEF_9 0xF80424 + +#define mmTPC6_RTR_SPLIT_CFG 0xF80440 + +#define mmTPC6_RTR_SPLIT_RD_SAT 0xF80444 + +#define mmTPC6_RTR_SPLIT_RD_RST_TOKEN 0xF80448 + +#define mmTPC6_RTR_SPLIT_RD_TIMEOUT_0 0xF8044C + +#define mmTPC6_RTR_SPLIT_RD_TIMEOUT_1 0xF80450 + +#define mmTPC6_RTR_SPLIT_WR_SAT 0xF80454 + +#define mmTPC6_RTR_WPLIT_WR_TST_TOLEN 0xF80458 + +#define mmTPC6_RTR_SPLIT_WR_TIMEOUT_0 0xF8045C + +#define mmTPC6_RTR_SPLIT_WR_TIMEOUT_1 0xF80460 + +#define mmTPC6_RTR_HBW_RANGE_HIT 0xF80470 + +#define mmTPC6_RTR_HBW_RANGE_MASK_L_0 0xF80480 + +#define mmTPC6_RTR_HBW_RANGE_MASK_L_1 0xF80484 + +#define mmTPC6_RTR_HBW_RANGE_MASK_L_2 0xF80488 + +#define mmTPC6_RTR_HBW_RANGE_MASK_L_3 0xF8048C + +#define mmTPC6_RTR_HBW_RANGE_MASK_L_4 0xF80490 + +#define mmTPC6_RTR_HBW_RANGE_MASK_L_5 0xF80494 + +#define mmTPC6_RTR_HBW_RANGE_MASK_L_6 0xF80498 + +#define mmTPC6_RTR_HBW_RANGE_MASK_L_7 0xF8049C + +#define mmTPC6_RTR_HBW_RANGE_MASK_H_0 0xF804A0 + +#define mmTPC6_RTR_HBW_RANGE_MASK_H_1 0xF804A4 + +#define mmTPC6_RTR_HBW_RANGE_MASK_H_2 0xF804A8 + +#define mmTPC6_RTR_HBW_RANGE_MASK_H_3 0xF804AC + +#define mmTPC6_RTR_HBW_RANGE_MASK_H_4 0xF804B0 + +#define mmTPC6_RTR_HBW_RANGE_MASK_H_5 0xF804B4 + +#define mmTPC6_RTR_HBW_RANGE_MASK_H_6 0xF804B8 + +#define mmTPC6_RTR_HBW_RANGE_MASK_H_7 0xF804BC + +#define mmTPC6_RTR_HBW_RANGE_BASE_L_0 0xF804C0 + +#define mmTPC6_RTR_HBW_RANGE_BASE_L_1 0xF804C4 + +#define mmTPC6_RTR_HBW_RANGE_BASE_L_2 0xF804C8 + +#define mmTPC6_RTR_HBW_RANGE_BASE_L_3 0xF804CC + +#define mmTPC6_RTR_HBW_RANGE_BASE_L_4 0xF804D0 + +#define mmTPC6_RTR_HBW_RANGE_BASE_L_5 0xF804D4 + +#define mmTPC6_RTR_HBW_RANGE_BASE_L_6 0xF804D8 + +#define mmTPC6_RTR_HBW_RANGE_BASE_L_7 0xF804DC + +#define mmTPC6_RTR_HBW_RANGE_BASE_H_0 0xF804E0 + +#define mmTPC6_RTR_HBW_RANGE_BASE_H_1 0xF804E4 + +#define mmTPC6_RTR_HBW_RANGE_BASE_H_2 0xF804E8 + +#define mmTPC6_RTR_HBW_RANGE_BASE_H_3 0xF804EC + +#define mmTPC6_RTR_HBW_RANGE_BASE_H_4 0xF804F0 + +#define mmTPC6_RTR_HBW_RANGE_BASE_H_5 0xF804F4 + +#define mmTPC6_RTR_HBW_RANGE_BASE_H_6 0xF804F8 + +#define mmTPC6_RTR_HBW_RANGE_BASE_H_7 0xF804FC + +#define mmTPC6_RTR_LBW_RANGE_HIT 0xF80500 + +#define mmTPC6_RTR_LBW_RANGE_MASK_0 0xF80510 + +#define mmTPC6_RTR_LBW_RANGE_MASK_1 0xF80514 + +#define mmTPC6_RTR_LBW_RANGE_MASK_2 0xF80518 + +#define mmTPC6_RTR_LBW_RANGE_MASK_3 0xF8051C + +#define mmTPC6_RTR_LBW_RANGE_MASK_4 0xF80520 + +#define mmTPC6_RTR_LBW_RANGE_MASK_5 0xF80524 + +#define mmTPC6_RTR_LBW_RANGE_MASK_6 0xF80528 + +#define mmTPC6_RTR_LBW_RANGE_MASK_7 0xF8052C + +#define mmTPC6_RTR_LBW_RANGE_MASK_8 0xF80530 + +#define mmTPC6_RTR_LBW_RANGE_MASK_9 0xF80534 + +#define mmTPC6_RTR_LBW_RANGE_MASK_10 0xF80538 + +#define mmTPC6_RTR_LBW_RANGE_MASK_11 0xF8053C + +#define mmTPC6_RTR_LBW_RANGE_MASK_12 0xF80540 + +#define mmTPC6_RTR_LBW_RANGE_MASK_13 0xF80544 + +#define mmTPC6_RTR_LBW_RANGE_MASK_14 0xF80548 + +#define mmTPC6_RTR_LBW_RANGE_MASK_15 0xF8054C + +#define mmTPC6_RTR_LBW_RANGE_BASE_0 0xF80550 + +#define mmTPC6_RTR_LBW_RANGE_BASE_1 0xF80554 + +#define mmTPC6_RTR_LBW_RANGE_BASE_2 0xF80558 + +#define mmTPC6_RTR_LBW_RANGE_BASE_3 0xF8055C + +#define mmTPC6_RTR_LBW_RANGE_BASE_4 0xF80560 + +#define mmTPC6_RTR_LBW_RANGE_BASE_5 0xF80564 + +#define mmTPC6_RTR_LBW_RANGE_BASE_6 0xF80568 + +#define mmTPC6_RTR_LBW_RANGE_BASE_7 0xF8056C + +#define mmTPC6_RTR_LBW_RANGE_BASE_8 0xF80570 + +#define mmTPC6_RTR_LBW_RANGE_BASE_9 0xF80574 + +#define mmTPC6_RTR_LBW_RANGE_BASE_10 0xF80578 + +#define mmTPC6_RTR_LBW_RANGE_BASE_11 0xF8057C + +#define mmTPC6_RTR_LBW_RANGE_BASE_12 0xF80580 + +#define mmTPC6_RTR_LBW_RANGE_BASE_13 0xF80584 + +#define mmTPC6_RTR_LBW_RANGE_BASE_14 0xF80588 + +#define mmTPC6_RTR_LBW_RANGE_BASE_15 0xF8058C + +#define mmTPC6_RTR_RGLTR 0xF80590 + +#define mmTPC6_RTR_RGLTR_WR_RESULT 0xF80594 + +#define mmTPC6_RTR_RGLTR_RD_RESULT 0xF80598 + +#define mmTPC6_RTR_SCRAMB_EN 0xF80600 + +#define mmTPC6_RTR_NON_LIN_SCRAMB 0xF80604 + +#endif /* ASIC_REG_TPC6_RTR_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h new file mode 100644 index 000000000000..bf2fd0f73906 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h @@ -0,0 +1,887 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC7_CFG_REGS_H_ +#define ASIC_REG_TPC7_CFG_REGS_H_ + +/* + ***************************************** + * TPC7_CFG (Prototype: TPC) + ***************************************** + */ + +#define mmTPC7_CFG_KERNEL_TENSOR_0_BASE_ADDR_LOW 0xFC6400 + +#define mmTPC7_CFG_KERNEL_TENSOR_0_BASE_ADDR_HIGH 0xFC6404 + +#define mmTPC7_CFG_KERNEL_TENSOR_0_PADDING_VALUE 0xFC6408 + +#define mmTPC7_CFG_KERNEL_TENSOR_0_TENSOR_CONFIG 0xFC640C + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_0_SIZE 0xFC6410 + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_0_STRIDE 0xFC6414 + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_0_BASE_OFFSET 0xFC6418 + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_1_SIZE 0xFC641C + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_1_STRIDE 0xFC6420 + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_1_BASE_OFFSET 0xFC6424 + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_2_SIZE 0xFC6428 + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_2_STRIDE 0xFC642C + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_2_BASE_OFFSET 0xFC6430 + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_3_SIZE 0xFC6434 + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_3_STRIDE 0xFC6438 + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_3_BASE_OFFSET 0xFC643C + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_4_SIZE 0xFC6440 + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_4_STRIDE 0xFC6444 + +#define mmTPC7_CFG_KERNEL_TENSOR_0_DIM_4_BASE_OFFSET 0xFC6448 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_BASE_ADDR_LOW 0xFC644C + +#define mmTPC7_CFG_KERNEL_TENSOR_1_BASE_ADDR_HIGH 0xFC6450 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_PADDING_VALUE 0xFC6454 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_TENSOR_CONFIG 0xFC6458 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_0_SIZE 0xFC645C + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_0_STRIDE 0xFC6460 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_0_BASE_OFFSET 0xFC6464 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_1_SIZE 0xFC6468 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_1_STRIDE 0xFC646C + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_1_BASE_OFFSET 0xFC6470 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_2_SIZE 0xFC6474 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_2_STRIDE 0xFC6478 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_2_BASE_OFFSET 0xFC647C + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_3_SIZE 0xFC6480 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_3_STRIDE 0xFC6484 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_3_BASE_OFFSET 0xFC6488 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_4_SIZE 0xFC648C + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_4_STRIDE 0xFC6490 + +#define mmTPC7_CFG_KERNEL_TENSOR_1_DIM_4_BASE_OFFSET 0xFC6494 + +#define mmTPC7_CFG_KERNEL_TENSOR_2_BASE_ADDR_LOW 0xFC6498 + +#define mmTPC7_CFG_KERNEL_TENSOR_2_BASE_ADDR_HIGH 0xFC649C + +#define mmTPC7_CFG_KERNEL_TENSOR_2_PADDING_VALUE 0xFC64A0 + +#define mmTPC7_CFG_KERNEL_TENSOR_2_TENSOR_CONFIG 0xFC64A4 + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_0_SIZE 0xFC64A8 + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_0_STRIDE 0xFC64AC + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_0_BASE_OFFSET 0xFC64B0 + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_1_SIZE 0xFC64B4 + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_1_STRIDE 0xFC64B8 + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_1_BASE_OFFSET 0xFC64BC + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_2_SIZE 0xFC64C0 + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_2_STRIDE 0xFC64C4 + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_2_BASE_OFFSET 0xFC64C8 + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_3_SIZE 0xFC64CC + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_3_STRIDE 0xFC64D0 + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_3_BASE_OFFSET 0xFC64D4 + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_4_SIZE 0xFC64D8 + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_4_STRIDE 0xFC64DC + +#define mmTPC7_CFG_KERNEL_TENSOR_2_DIM_4_BASE_OFFSET 0xFC64E0 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_BASE_ADDR_LOW 0xFC64E4 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_BASE_ADDR_HIGH 0xFC64E8 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_PADDING_VALUE 0xFC64EC + +#define mmTPC7_CFG_KERNEL_TENSOR_3_TENSOR_CONFIG 0xFC64F0 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_0_SIZE 0xFC64F4 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_0_STRIDE 0xFC64F8 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_0_BASE_OFFSET 0xFC64FC + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_1_SIZE 0xFC6500 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_1_STRIDE 0xFC6504 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_1_BASE_OFFSET 0xFC6508 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_2_SIZE 0xFC650C + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_2_STRIDE 0xFC6510 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_2_BASE_OFFSET 0xFC6514 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_3_SIZE 0xFC6518 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_3_STRIDE 0xFC651C + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_3_BASE_OFFSET 0xFC6520 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_4_SIZE 0xFC6524 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_4_STRIDE 0xFC6528 + +#define mmTPC7_CFG_KERNEL_TENSOR_3_DIM_4_BASE_OFFSET 0xFC652C + +#define mmTPC7_CFG_KERNEL_TENSOR_4_BASE_ADDR_LOW 0xFC6530 + +#define mmTPC7_CFG_KERNEL_TENSOR_4_BASE_ADDR_HIGH 0xFC6534 + +#define mmTPC7_CFG_KERNEL_TENSOR_4_PADDING_VALUE 0xFC6538 + +#define mmTPC7_CFG_KERNEL_TENSOR_4_TENSOR_CONFIG 0xFC653C + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_0_SIZE 0xFC6540 + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_0_STRIDE 0xFC6544 + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_0_BASE_OFFSET 0xFC6548 + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_1_SIZE 0xFC654C + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_1_STRIDE 0xFC6550 + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_1_BASE_OFFSET 0xFC6554 + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_2_SIZE 0xFC6558 + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_2_STRIDE 0xFC655C + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_2_BASE_OFFSET 0xFC6560 + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_3_SIZE 0xFC6564 + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_3_STRIDE 0xFC6568 + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_3_BASE_OFFSET 0xFC656C + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_4_SIZE 0xFC6570 + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_4_STRIDE 0xFC6574 + +#define mmTPC7_CFG_KERNEL_TENSOR_4_DIM_4_BASE_OFFSET 0xFC6578 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_BASE_ADDR_LOW 0xFC657C + +#define mmTPC7_CFG_KERNEL_TENSOR_5_BASE_ADDR_HIGH 0xFC6580 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_PADDING_VALUE 0xFC6584 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_TENSOR_CONFIG 0xFC6588 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_0_SIZE 0xFC658C + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_0_STRIDE 0xFC6590 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_0_BASE_OFFSET 0xFC6594 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_1_SIZE 0xFC6598 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_1_STRIDE 0xFC659C + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_1_BASE_OFFSET 0xFC65A0 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_2_SIZE 0xFC65A4 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_2_STRIDE 0xFC65A8 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_2_BASE_OFFSET 0xFC65AC + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_3_SIZE 0xFC65B0 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_3_STRIDE 0xFC65B4 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_3_BASE_OFFSET 0xFC65B8 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_4_SIZE 0xFC65BC + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_4_STRIDE 0xFC65C0 + +#define mmTPC7_CFG_KERNEL_TENSOR_5_DIM_4_BASE_OFFSET 0xFC65C4 + +#define mmTPC7_CFG_KERNEL_TENSOR_6_BASE_ADDR_LOW 0xFC65C8 + +#define mmTPC7_CFG_KERNEL_TENSOR_6_BASE_ADDR_HIGH 0xFC65CC + +#define mmTPC7_CFG_KERNEL_TENSOR_6_PADDING_VALUE 0xFC65D0 + +#define mmTPC7_CFG_KERNEL_TENSOR_6_TENSOR_CONFIG 0xFC65D4 + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_0_SIZE 0xFC65D8 + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_0_STRIDE 0xFC65DC + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_0_BASE_OFFSET 0xFC65E0 + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_1_SIZE 0xFC65E4 + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_1_STRIDE 0xFC65E8 + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_1_BASE_OFFSET 0xFC65EC + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_2_SIZE 0xFC65F0 + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_2_STRIDE 0xFC65F4 + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_2_BASE_OFFSET 0xFC65F8 + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_3_SIZE 0xFC65FC + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_3_STRIDE 0xFC6600 + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_3_BASE_OFFSET 0xFC6604 + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_4_SIZE 0xFC6608 + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_4_STRIDE 0xFC660C + +#define mmTPC7_CFG_KERNEL_TENSOR_6_DIM_4_BASE_OFFSET 0xFC6610 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_BASE_ADDR_LOW 0xFC6614 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_BASE_ADDR_HIGH 0xFC6618 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_PADDING_VALUE 0xFC661C + +#define mmTPC7_CFG_KERNEL_TENSOR_7_TENSOR_CONFIG 0xFC6620 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_0_SIZE 0xFC6624 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_0_STRIDE 0xFC6628 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_0_BASE_OFFSET 0xFC662C + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_1_SIZE 0xFC6630 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_1_STRIDE 0xFC6634 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_1_BASE_OFFSET 0xFC6638 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_2_SIZE 0xFC663C + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_2_STRIDE 0xFC6640 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_2_BASE_OFFSET 0xFC6644 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_3_SIZE 0xFC6648 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_3_STRIDE 0xFC664C + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_3_BASE_OFFSET 0xFC6650 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_4_SIZE 0xFC6654 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_4_STRIDE 0xFC6658 + +#define mmTPC7_CFG_KERNEL_TENSOR_7_DIM_4_BASE_OFFSET 0xFC665C + +#define mmTPC7_CFG_KERNEL_KERNEL_BASE_ADDRESS_LOW 0xFC6660 + +#define mmTPC7_CFG_KERNEL_KERNEL_BASE_ADDRESS_HIGH 0xFC6664 + +#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_0 0xFC6668 + +#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_0 0xFC666C + +#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_1 0xFC6670 + +#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_1 0xFC6674 + +#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_2 0xFC6678 + +#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_2 0xFC667C + +#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_3 0xFC6680 + +#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_3 0xFC6684 + +#define mmTPC7_CFG_KERNEL_TID_BASE_DIM_4 0xFC6688 + +#define mmTPC7_CFG_KERNEL_TID_SIZE_DIM_4 0xFC668C + +#define mmTPC7_CFG_KERNEL_SRF_0 0xFC6690 + +#define mmTPC7_CFG_KERNEL_SRF_1 0xFC6694 + +#define mmTPC7_CFG_KERNEL_SRF_2 0xFC6698 + +#define mmTPC7_CFG_KERNEL_SRF_3 0xFC669C + +#define mmTPC7_CFG_KERNEL_SRF_4 0xFC66A0 + +#define mmTPC7_CFG_KERNEL_SRF_5 0xFC66A4 + +#define mmTPC7_CFG_KERNEL_SRF_6 0xFC66A8 + +#define mmTPC7_CFG_KERNEL_SRF_7 0xFC66AC + +#define mmTPC7_CFG_KERNEL_SRF_8 0xFC66B0 + +#define mmTPC7_CFG_KERNEL_SRF_9 0xFC66B4 + +#define mmTPC7_CFG_KERNEL_SRF_10 0xFC66B8 + +#define mmTPC7_CFG_KERNEL_SRF_11 0xFC66BC + +#define mmTPC7_CFG_KERNEL_SRF_12 0xFC66C0 + +#define mmTPC7_CFG_KERNEL_SRF_13 0xFC66C4 + +#define mmTPC7_CFG_KERNEL_SRF_14 0xFC66C8 + +#define mmTPC7_CFG_KERNEL_SRF_15 0xFC66CC + +#define mmTPC7_CFG_KERNEL_SRF_16 0xFC66D0 + +#define mmTPC7_CFG_KERNEL_SRF_17 0xFC66D4 + +#define mmTPC7_CFG_KERNEL_SRF_18 0xFC66D8 + +#define mmTPC7_CFG_KERNEL_SRF_19 0xFC66DC + +#define mmTPC7_CFG_KERNEL_SRF_20 0xFC66E0 + +#define mmTPC7_CFG_KERNEL_SRF_21 0xFC66E4 + +#define mmTPC7_CFG_KERNEL_SRF_22 0xFC66E8 + +#define mmTPC7_CFG_KERNEL_SRF_23 0xFC66EC + +#define mmTPC7_CFG_KERNEL_SRF_24 0xFC66F0 + +#define mmTPC7_CFG_KERNEL_SRF_25 0xFC66F4 + +#define mmTPC7_CFG_KERNEL_SRF_26 0xFC66F8 + +#define mmTPC7_CFG_KERNEL_SRF_27 0xFC66FC + +#define mmTPC7_CFG_KERNEL_SRF_28 0xFC6700 + +#define mmTPC7_CFG_KERNEL_SRF_29 0xFC6704 + +#define mmTPC7_CFG_KERNEL_SRF_30 0xFC6708 + +#define mmTPC7_CFG_KERNEL_SRF_31 0xFC670C + +#define mmTPC7_CFG_KERNEL_KERNEL_CONFIG 0xFC6710 + +#define mmTPC7_CFG_KERNEL_SYNC_OBJECT_MESSAGE 0xFC6714 + +#define mmTPC7_CFG_RESERVED_DESC_END 0xFC6738 + +#define mmTPC7_CFG_ROUND_CSR 0xFC67FC + +#define mmTPC7_CFG_TBUF_BASE_ADDR_LOW 0xFC6800 + +#define mmTPC7_CFG_TBUF_BASE_ADDR_HIGH 0xFC6804 + +#define mmTPC7_CFG_SEMAPHORE 0xFC6808 + +#define mmTPC7_CFG_VFLAGS 0xFC680C + +#define mmTPC7_CFG_SFLAGS 0xFC6810 + +#define mmTPC7_CFG_LFSR_POLYNOM 0xFC6818 + +#define mmTPC7_CFG_STATUS 0xFC681C + +#define mmTPC7_CFG_CFG_BASE_ADDRESS_HIGH 0xFC6820 + +#define mmTPC7_CFG_CFG_SUBTRACT_VALUE 0xFC6824 + +#define mmTPC7_CFG_SM_BASE_ADDRESS_LOW 0xFC6828 + +#define mmTPC7_CFG_SM_BASE_ADDRESS_HIGH 0xFC682C + +#define mmTPC7_CFG_TPC_CMD 0xFC6830 + +#define mmTPC7_CFG_TPC_EXECUTE 0xFC6838 + +#define mmTPC7_CFG_TPC_STALL 0xFC683C + +#define mmTPC7_CFG_ICACHE_BASE_ADDERESS_LOW 0xFC6840 + +#define mmTPC7_CFG_ICACHE_BASE_ADDERESS_HIGH 0xFC6844 + +#define mmTPC7_CFG_MSS_CONFIG 0xFC6854 + +#define mmTPC7_CFG_TPC_INTR_CAUSE 0xFC6858 + +#define mmTPC7_CFG_TPC_INTR_MASK 0xFC685C + +#define mmTPC7_CFG_TSB_CONFIG 0xFC6860 + +#define mmTPC7_CFG_QM_TENSOR_0_BASE_ADDR_LOW 0xFC6A00 + +#define mmTPC7_CFG_QM_TENSOR_0_BASE_ADDR_HIGH 0xFC6A04 + +#define mmTPC7_CFG_QM_TENSOR_0_PADDING_VALUE 0xFC6A08 + +#define mmTPC7_CFG_QM_TENSOR_0_TENSOR_CONFIG 0xFC6A0C + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_0_SIZE 0xFC6A10 + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_0_STRIDE 0xFC6A14 + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_0_BASE_OFFSET 0xFC6A18 + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_1_SIZE 0xFC6A1C + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_1_STRIDE 0xFC6A20 + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_1_BASE_OFFSET 0xFC6A24 + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_2_SIZE 0xFC6A28 + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_2_STRIDE 0xFC6A2C + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_2_BASE_OFFSET 0xFC6A30 + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_3_SIZE 0xFC6A34 + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_3_STRIDE 0xFC6A38 + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_3_BASE_OFFSET 0xFC6A3C + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_4_SIZE 0xFC6A40 + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_4_STRIDE 0xFC6A44 + +#define mmTPC7_CFG_QM_TENSOR_0_DIM_4_BASE_OFFSET 0xFC6A48 + +#define mmTPC7_CFG_QM_TENSOR_1_BASE_ADDR_LOW 0xFC6A4C + +#define mmTPC7_CFG_QM_TENSOR_1_BASE_ADDR_HIGH 0xFC6A50 + +#define mmTPC7_CFG_QM_TENSOR_1_PADDING_VALUE 0xFC6A54 + +#define mmTPC7_CFG_QM_TENSOR_1_TENSOR_CONFIG 0xFC6A58 + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_0_SIZE 0xFC6A5C + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_0_STRIDE 0xFC6A60 + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_0_BASE_OFFSET 0xFC6A64 + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_1_SIZE 0xFC6A68 + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_1_STRIDE 0xFC6A6C + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_1_BASE_OFFSET 0xFC6A70 + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_2_SIZE 0xFC6A74 + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_2_STRIDE 0xFC6A78 + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_2_BASE_OFFSET 0xFC6A7C + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_3_SIZE 0xFC6A80 + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_3_STRIDE 0xFC6A84 + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_3_BASE_OFFSET 0xFC6A88 + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_4_SIZE 0xFC6A8C + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_4_STRIDE 0xFC6A90 + +#define mmTPC7_CFG_QM_TENSOR_1_DIM_4_BASE_OFFSET 0xFC6A94 + +#define mmTPC7_CFG_QM_TENSOR_2_BASE_ADDR_LOW 0xFC6A98 + +#define mmTPC7_CFG_QM_TENSOR_2_BASE_ADDR_HIGH 0xFC6A9C + +#define mmTPC7_CFG_QM_TENSOR_2_PADDING_VALUE 0xFC6AA0 + +#define mmTPC7_CFG_QM_TENSOR_2_TENSOR_CONFIG 0xFC6AA4 + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_0_SIZE 0xFC6AA8 + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_0_STRIDE 0xFC6AAC + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_0_BASE_OFFSET 0xFC6AB0 + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_1_SIZE 0xFC6AB4 + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_1_STRIDE 0xFC6AB8 + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_1_BASE_OFFSET 0xFC6ABC + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_2_SIZE 0xFC6AC0 + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_2_STRIDE 0xFC6AC4 + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_2_BASE_OFFSET 0xFC6AC8 + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_3_SIZE 0xFC6ACC + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_3_STRIDE 0xFC6AD0 + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_3_BASE_OFFSET 0xFC6AD4 + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_4_SIZE 0xFC6AD8 + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_4_STRIDE 0xFC6ADC + +#define mmTPC7_CFG_QM_TENSOR_2_DIM_4_BASE_OFFSET 0xFC6AE0 + +#define mmTPC7_CFG_QM_TENSOR_3_BASE_ADDR_LOW 0xFC6AE4 + +#define mmTPC7_CFG_QM_TENSOR_3_BASE_ADDR_HIGH 0xFC6AE8 + +#define mmTPC7_CFG_QM_TENSOR_3_PADDING_VALUE 0xFC6AEC + +#define mmTPC7_CFG_QM_TENSOR_3_TENSOR_CONFIG 0xFC6AF0 + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_0_SIZE 0xFC6AF4 + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_0_STRIDE 0xFC6AF8 + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_0_BASE_OFFSET 0xFC6AFC + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_1_SIZE 0xFC6B00 + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_1_STRIDE 0xFC6B04 + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_1_BASE_OFFSET 0xFC6B08 + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_2_SIZE 0xFC6B0C + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_2_STRIDE 0xFC6B10 + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_2_BASE_OFFSET 0xFC6B14 + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_3_SIZE 0xFC6B18 + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_3_STRIDE 0xFC6B1C + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_3_BASE_OFFSET 0xFC6B20 + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_4_SIZE 0xFC6B24 + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_4_STRIDE 0xFC6B28 + +#define mmTPC7_CFG_QM_TENSOR_3_DIM_4_BASE_OFFSET 0xFC6B2C + +#define mmTPC7_CFG_QM_TENSOR_4_BASE_ADDR_LOW 0xFC6B30 + +#define mmTPC7_CFG_QM_TENSOR_4_BASE_ADDR_HIGH 0xFC6B34 + +#define mmTPC7_CFG_QM_TENSOR_4_PADDING_VALUE 0xFC6B38 + +#define mmTPC7_CFG_QM_TENSOR_4_TENSOR_CONFIG 0xFC6B3C + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_0_SIZE 0xFC6B40 + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_0_STRIDE 0xFC6B44 + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_0_BASE_OFFSET 0xFC6B48 + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_1_SIZE 0xFC6B4C + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_1_STRIDE 0xFC6B50 + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_1_BASE_OFFSET 0xFC6B54 + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_2_SIZE 0xFC6B58 + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_2_STRIDE 0xFC6B5C + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_2_BASE_OFFSET 0xFC6B60 + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_3_SIZE 0xFC6B64 + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_3_STRIDE 0xFC6B68 + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_3_BASE_OFFSET 0xFC6B6C + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_4_SIZE 0xFC6B70 + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_4_STRIDE 0xFC6B74 + +#define mmTPC7_CFG_QM_TENSOR_4_DIM_4_BASE_OFFSET 0xFC6B78 + +#define mmTPC7_CFG_QM_TENSOR_5_BASE_ADDR_LOW 0xFC6B7C + +#define mmTPC7_CFG_QM_TENSOR_5_BASE_ADDR_HIGH 0xFC6B80 + +#define mmTPC7_CFG_QM_TENSOR_5_PADDING_VALUE 0xFC6B84 + +#define mmTPC7_CFG_QM_TENSOR_5_TENSOR_CONFIG 0xFC6B88 + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_0_SIZE 0xFC6B8C + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_0_STRIDE 0xFC6B90 + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_0_BASE_OFFSET 0xFC6B94 + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_1_SIZE 0xFC6B98 + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_1_STRIDE 0xFC6B9C + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_1_BASE_OFFSET 0xFC6BA0 + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_2_SIZE 0xFC6BA4 + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_2_STRIDE 0xFC6BA8 + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_2_BASE_OFFSET 0xFC6BAC + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_3_SIZE 0xFC6BB0 + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_3_STRIDE 0xFC6BB4 + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_3_BASE_OFFSET 0xFC6BB8 + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_4_SIZE 0xFC6BBC + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_4_STRIDE 0xFC6BC0 + +#define mmTPC7_CFG_QM_TENSOR_5_DIM_4_BASE_OFFSET 0xFC6BC4 + +#define mmTPC7_CFG_QM_TENSOR_6_BASE_ADDR_LOW 0xFC6BC8 + +#define mmTPC7_CFG_QM_TENSOR_6_BASE_ADDR_HIGH 0xFC6BCC + +#define mmTPC7_CFG_QM_TENSOR_6_PADDING_VALUE 0xFC6BD0 + +#define mmTPC7_CFG_QM_TENSOR_6_TENSOR_CONFIG 0xFC6BD4 + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_0_SIZE 0xFC6BD8 + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_0_STRIDE 0xFC6BDC + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_0_BASE_OFFSET 0xFC6BE0 + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_1_SIZE 0xFC6BE4 + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_1_STRIDE 0xFC6BE8 + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_1_BASE_OFFSET 0xFC6BEC + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_2_SIZE 0xFC6BF0 + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_2_STRIDE 0xFC6BF4 + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_2_BASE_OFFSET 0xFC6BF8 + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_3_SIZE 0xFC6BFC + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_3_STRIDE 0xFC6C00 + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_3_BASE_OFFSET 0xFC6C04 + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_4_SIZE 0xFC6C08 + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_4_STRIDE 0xFC6C0C + +#define mmTPC7_CFG_QM_TENSOR_6_DIM_4_BASE_OFFSET 0xFC6C10 + +#define mmTPC7_CFG_QM_TENSOR_7_BASE_ADDR_LOW 0xFC6C14 + +#define mmTPC7_CFG_QM_TENSOR_7_BASE_ADDR_HIGH 0xFC6C18 + +#define mmTPC7_CFG_QM_TENSOR_7_PADDING_VALUE 0xFC6C1C + +#define mmTPC7_CFG_QM_TENSOR_7_TENSOR_CONFIG 0xFC6C20 + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_0_SIZE 0xFC6C24 + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_0_STRIDE 0xFC6C28 + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_0_BASE_OFFSET 0xFC6C2C + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_1_SIZE 0xFC6C30 + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_1_STRIDE 0xFC6C34 + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_1_BASE_OFFSET 0xFC6C38 + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_2_SIZE 0xFC6C3C + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_2_STRIDE 0xFC6C40 + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_2_BASE_OFFSET 0xFC6C44 + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_3_SIZE 0xFC6C48 + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_3_STRIDE 0xFC6C4C + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_3_BASE_OFFSET 0xFC6C50 + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_4_SIZE 0xFC6C54 + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_4_STRIDE 0xFC6C58 + +#define mmTPC7_CFG_QM_TENSOR_7_DIM_4_BASE_OFFSET 0xFC6C5C + +#define mmTPC7_CFG_QM_KERNEL_BASE_ADDRESS_LOW 0xFC6C60 + +#define mmTPC7_CFG_QM_KERNEL_BASE_ADDRESS_HIGH 0xFC6C64 + +#define mmTPC7_CFG_QM_TID_BASE_DIM_0 0xFC6C68 + +#define mmTPC7_CFG_QM_TID_SIZE_DIM_0 0xFC6C6C + +#define mmTPC7_CFG_QM_TID_BASE_DIM_1 0xFC6C70 + +#define mmTPC7_CFG_QM_TID_SIZE_DIM_1 0xFC6C74 + +#define mmTPC7_CFG_QM_TID_BASE_DIM_2 0xFC6C78 + +#define mmTPC7_CFG_QM_TID_SIZE_DIM_2 0xFC6C7C + +#define mmTPC7_CFG_QM_TID_BASE_DIM_3 0xFC6C80 + +#define mmTPC7_CFG_QM_TID_SIZE_DIM_3 0xFC6C84 + +#define mmTPC7_CFG_QM_TID_BASE_DIM_4 0xFC6C88 + +#define mmTPC7_CFG_QM_TID_SIZE_DIM_4 0xFC6C8C + +#define mmTPC7_CFG_QM_SRF_0 0xFC6C90 + +#define mmTPC7_CFG_QM_SRF_1 0xFC6C94 + +#define mmTPC7_CFG_QM_SRF_2 0xFC6C98 + +#define mmTPC7_CFG_QM_SRF_3 0xFC6C9C + +#define mmTPC7_CFG_QM_SRF_4 0xFC6CA0 + +#define mmTPC7_CFG_QM_SRF_5 0xFC6CA4 + +#define mmTPC7_CFG_QM_SRF_6 0xFC6CA8 + +#define mmTPC7_CFG_QM_SRF_7 0xFC6CAC + +#define mmTPC7_CFG_QM_SRF_8 0xFC6CB0 + +#define mmTPC7_CFG_QM_SRF_9 0xFC6CB4 + +#define mmTPC7_CFG_QM_SRF_10 0xFC6CB8 + +#define mmTPC7_CFG_QM_SRF_11 0xFC6CBC + +#define mmTPC7_CFG_QM_SRF_12 0xFC6CC0 + +#define mmTPC7_CFG_QM_SRF_13 0xFC6CC4 + +#define mmTPC7_CFG_QM_SRF_14 0xFC6CC8 + +#define mmTPC7_CFG_QM_SRF_15 0xFC6CCC + +#define mmTPC7_CFG_QM_SRF_16 0xFC6CD0 + +#define mmTPC7_CFG_QM_SRF_17 0xFC6CD4 + +#define mmTPC7_CFG_QM_SRF_18 0xFC6CD8 + +#define mmTPC7_CFG_QM_SRF_19 0xFC6CDC + +#define mmTPC7_CFG_QM_SRF_20 0xFC6CE0 + +#define mmTPC7_CFG_QM_SRF_21 0xFC6CE4 + +#define mmTPC7_CFG_QM_SRF_22 0xFC6CE8 + +#define mmTPC7_CFG_QM_SRF_23 0xFC6CEC + +#define mmTPC7_CFG_QM_SRF_24 0xFC6CF0 + +#define mmTPC7_CFG_QM_SRF_25 0xFC6CF4 + +#define mmTPC7_CFG_QM_SRF_26 0xFC6CF8 + +#define mmTPC7_CFG_QM_SRF_27 0xFC6CFC + +#define mmTPC7_CFG_QM_SRF_28 0xFC6D00 + +#define mmTPC7_CFG_QM_SRF_29 0xFC6D04 + +#define mmTPC7_CFG_QM_SRF_30 0xFC6D08 + +#define mmTPC7_CFG_QM_SRF_31 0xFC6D0C + +#define mmTPC7_CFG_QM_KERNEL_CONFIG 0xFC6D10 + +#define mmTPC7_CFG_QM_SYNC_OBJECT_MESSAGE 0xFC6D14 + +#define mmTPC7_CFG_ARUSER 0xFC6D18 + +#define mmTPC7_CFG_AWUSER 0xFC6D1C + +#define mmTPC7_CFG_FUNC_MBIST_CNTRL 0xFC6E00 + +#define mmTPC7_CFG_FUNC_MBIST_PAT 0xFC6E04 + +#define mmTPC7_CFG_FUNC_MBIST_MEM_0 0xFC6E08 + +#define mmTPC7_CFG_FUNC_MBIST_MEM_1 0xFC6E0C + +#define mmTPC7_CFG_FUNC_MBIST_MEM_2 0xFC6E10 + +#define mmTPC7_CFG_FUNC_MBIST_MEM_3 0xFC6E14 + +#define mmTPC7_CFG_FUNC_MBIST_MEM_4 0xFC6E18 + +#define mmTPC7_CFG_FUNC_MBIST_MEM_5 0xFC6E1C + +#define mmTPC7_CFG_FUNC_MBIST_MEM_6 0xFC6E20 + +#define mmTPC7_CFG_FUNC_MBIST_MEM_7 0xFC6E24 + +#define mmTPC7_CFG_FUNC_MBIST_MEM_8 0xFC6E28 + +#define mmTPC7_CFG_FUNC_MBIST_MEM_9 0xFC6E2C + +#endif /* ASIC_REG_TPC7_CFG_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h new file mode 100644 index 000000000000..65d83043bf63 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC7_CMDQ_REGS_H_ +#define ASIC_REG_TPC7_CMDQ_REGS_H_ + +/* + ***************************************** + * TPC7_CMDQ (Prototype: CMDQ) + ***************************************** + */ + +#define mmTPC7_CMDQ_GLBL_CFG0 0xFC9000 + +#define mmTPC7_CMDQ_GLBL_CFG1 0xFC9004 + +#define mmTPC7_CMDQ_GLBL_PROT 0xFC9008 + +#define mmTPC7_CMDQ_GLBL_ERR_CFG 0xFC900C + +#define mmTPC7_CMDQ_GLBL_ERR_ADDR_LO 0xFC9010 + +#define mmTPC7_CMDQ_GLBL_ERR_ADDR_HI 0xFC9014 + +#define mmTPC7_CMDQ_GLBL_ERR_WDATA 0xFC9018 + +#define mmTPC7_CMDQ_GLBL_SECURE_PROPS 0xFC901C + +#define mmTPC7_CMDQ_GLBL_NON_SECURE_PROPS 0xFC9020 + +#define mmTPC7_CMDQ_GLBL_STS0 0xFC9024 + +#define mmTPC7_CMDQ_GLBL_STS1 0xFC9028 + +#define mmTPC7_CMDQ_CQ_CFG0 0xFC90B0 + +#define mmTPC7_CMDQ_CQ_CFG1 0xFC90B4 + +#define mmTPC7_CMDQ_CQ_ARUSER 0xFC90B8 + +#define mmTPC7_CMDQ_CQ_PTR_LO 0xFC90C0 + +#define mmTPC7_CMDQ_CQ_PTR_HI 0xFC90C4 + +#define mmTPC7_CMDQ_CQ_TSIZE 0xFC90C8 + +#define mmTPC7_CMDQ_CQ_CTL 0xFC90CC + +#define mmTPC7_CMDQ_CQ_PTR_LO_STS 0xFC90D4 + +#define mmTPC7_CMDQ_CQ_PTR_HI_STS 0xFC90D8 + +#define mmTPC7_CMDQ_CQ_TSIZE_STS 0xFC90DC + +#define mmTPC7_CMDQ_CQ_CTL_STS 0xFC90E0 + +#define mmTPC7_CMDQ_CQ_STS0 0xFC90E4 + +#define mmTPC7_CMDQ_CQ_STS1 0xFC90E8 + +#define mmTPC7_CMDQ_CQ_RD_RATE_LIM_EN 0xFC90F0 + +#define mmTPC7_CMDQ_CQ_RD_RATE_LIM_RST_TOKEN 0xFC90F4 + +#define mmTPC7_CMDQ_CQ_RD_RATE_LIM_SAT 0xFC90F8 + +#define mmTPC7_CMDQ_CQ_RD_RATE_LIM_TOUT 0xFC90FC + +#define mmTPC7_CMDQ_CQ_IFIFO_CNT 0xFC9108 + +#define mmTPC7_CMDQ_CP_MSG_BASE0_ADDR_LO 0xFC9120 + +#define mmTPC7_CMDQ_CP_MSG_BASE0_ADDR_HI 0xFC9124 + +#define mmTPC7_CMDQ_CP_MSG_BASE1_ADDR_LO 0xFC9128 + +#define mmTPC7_CMDQ_CP_MSG_BASE1_ADDR_HI 0xFC912C + +#define mmTPC7_CMDQ_CP_MSG_BASE2_ADDR_LO 0xFC9130 + +#define mmTPC7_CMDQ_CP_MSG_BASE2_ADDR_HI 0xFC9134 + +#define mmTPC7_CMDQ_CP_MSG_BASE3_ADDR_LO 0xFC9138 + +#define mmTPC7_CMDQ_CP_MSG_BASE3_ADDR_HI 0xFC913C + +#define mmTPC7_CMDQ_CP_LDMA_TSIZE_OFFSET 0xFC9140 + +#define mmTPC7_CMDQ_CP_LDMA_SRC_BASE_LO_OFFSET 0xFC9144 + +#define mmTPC7_CMDQ_CP_LDMA_SRC_BASE_HI_OFFSET 0xFC9148 + +#define mmTPC7_CMDQ_CP_LDMA_DST_BASE_LO_OFFSET 0xFC914C + +#define mmTPC7_CMDQ_CP_LDMA_DST_BASE_HI_OFFSET 0xFC9150 + +#define mmTPC7_CMDQ_CP_LDMA_COMMIT_OFFSET 0xFC9154 + +#define mmTPC7_CMDQ_CP_FENCE0_RDATA 0xFC9158 + +#define mmTPC7_CMDQ_CP_FENCE1_RDATA 0xFC915C + +#define mmTPC7_CMDQ_CP_FENCE2_RDATA 0xFC9160 + +#define mmTPC7_CMDQ_CP_FENCE3_RDATA 0xFC9164 + +#define mmTPC7_CMDQ_CP_FENCE0_CNT 0xFC9168 + +#define mmTPC7_CMDQ_CP_FENCE1_CNT 0xFC916C + +#define mmTPC7_CMDQ_CP_FENCE2_CNT 0xFC9170 + +#define mmTPC7_CMDQ_CP_FENCE3_CNT 0xFC9174 + +#define mmTPC7_CMDQ_CP_STS 0xFC9178 + +#define mmTPC7_CMDQ_CP_CURRENT_INST_LO 0xFC917C + +#define mmTPC7_CMDQ_CP_CURRENT_INST_HI 0xFC9180 + +#define mmTPC7_CMDQ_CP_BARRIER_CFG 0xFC9184 + +#define mmTPC7_CMDQ_CP_DBG_0 0xFC9188 + +#define mmTPC7_CMDQ_CQ_BUF_ADDR 0xFC9308 + +#define mmTPC7_CMDQ_CQ_BUF_RDATA 0xFC930C + +#endif /* ASIC_REG_TPC7_CMDQ_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h new file mode 100644 index 000000000000..3d5848d87304 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h @@ -0,0 +1,227 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC7_NRTR_REGS_H_ +#define ASIC_REG_TPC7_NRTR_REGS_H_ + +/* + ***************************************** + * TPC7_NRTR (Prototype: IF_NRTR) + ***************************************** + */ + +#define mmTPC7_NRTR_HBW_MAX_CRED 0xFC0100 + +#define mmTPC7_NRTR_LBW_MAX_CRED 0xFC0120 + +#define mmTPC7_NRTR_DBG_E_ARB 0xFC0300 + +#define mmTPC7_NRTR_DBG_W_ARB 0xFC0304 + +#define mmTPC7_NRTR_DBG_N_ARB 0xFC0308 + +#define mmTPC7_NRTR_DBG_S_ARB 0xFC030C + +#define mmTPC7_NRTR_DBG_L_ARB 0xFC0310 + +#define mmTPC7_NRTR_DBG_E_ARB_MAX 0xFC0320 + +#define mmTPC7_NRTR_DBG_W_ARB_MAX 0xFC0324 + +#define mmTPC7_NRTR_DBG_N_ARB_MAX 0xFC0328 + +#define mmTPC7_NRTR_DBG_S_ARB_MAX 0xFC032C + +#define mmTPC7_NRTR_DBG_L_ARB_MAX 0xFC0330 + +#define mmTPC7_NRTR_SPLIT_COEF_0 0xFC0400 + +#define mmTPC7_NRTR_SPLIT_COEF_1 0xFC0404 + +#define mmTPC7_NRTR_SPLIT_COEF_2 0xFC0408 + +#define mmTPC7_NRTR_SPLIT_COEF_3 0xFC040C + +#define mmTPC7_NRTR_SPLIT_COEF_4 0xFC0410 + +#define mmTPC7_NRTR_SPLIT_COEF_5 0xFC0414 + +#define mmTPC7_NRTR_SPLIT_COEF_6 0xFC0418 + +#define mmTPC7_NRTR_SPLIT_COEF_7 0xFC041C + +#define mmTPC7_NRTR_SPLIT_COEF_8 0xFC0420 + +#define mmTPC7_NRTR_SPLIT_COEF_9 0xFC0424 + +#define mmTPC7_NRTR_SPLIT_CFG 0xFC0440 + +#define mmTPC7_NRTR_SPLIT_RD_SAT 0xFC0444 + +#define mmTPC7_NRTR_SPLIT_RD_RST_TOKEN 0xFC0448 + +#define mmTPC7_NRTR_SPLIT_RD_TIMEOUT_0 0xFC044C + +#define mmTPC7_NRTR_SPLIT_RD_TIMEOUT_1 0xFC0450 + +#define mmTPC7_NRTR_SPLIT_WR_SAT 0xFC0454 + +#define mmTPC7_NRTR_WPLIT_WR_TST_TOLEN 0xFC0458 + +#define mmTPC7_NRTR_SPLIT_WR_TIMEOUT_0 0xFC045C + +#define mmTPC7_NRTR_SPLIT_WR_TIMEOUT_1 0xFC0460 + +#define mmTPC7_NRTR_HBW_RANGE_HIT 0xFC0470 + +#define mmTPC7_NRTR_HBW_RANGE_MASK_L_0 0xFC0480 + +#define mmTPC7_NRTR_HBW_RANGE_MASK_L_1 0xFC0484 + +#define mmTPC7_NRTR_HBW_RANGE_MASK_L_2 0xFC0488 + +#define mmTPC7_NRTR_HBW_RANGE_MASK_L_3 0xFC048C + +#define mmTPC7_NRTR_HBW_RANGE_MASK_L_4 0xFC0490 + +#define mmTPC7_NRTR_HBW_RANGE_MASK_L_5 0xFC0494 + +#define mmTPC7_NRTR_HBW_RANGE_MASK_L_6 0xFC0498 + +#define mmTPC7_NRTR_HBW_RANGE_MASK_L_7 0xFC049C + +#define mmTPC7_NRTR_HBW_RANGE_MASK_H_0 0xFC04A0 + +#define mmTPC7_NRTR_HBW_RANGE_MASK_H_1 0xFC04A4 + +#define mmTPC7_NRTR_HBW_RANGE_MASK_H_2 0xFC04A8 + +#define mmTPC7_NRTR_HBW_RANGE_MASK_H_3 0xFC04AC + +#define mmTPC7_NRTR_HBW_RANGE_MASK_H_4 0xFC04B0 + +#define mmTPC7_NRTR_HBW_RANGE_MASK_H_5 0xFC04B4 + +#define mmTPC7_NRTR_HBW_RANGE_MASK_H_6 0xFC04B8 + +#define mmTPC7_NRTR_HBW_RANGE_MASK_H_7 0xFC04BC + +#define mmTPC7_NRTR_HBW_RANGE_BASE_L_0 0xFC04C0 + +#define mmTPC7_NRTR_HBW_RANGE_BASE_L_1 0xFC04C4 + +#define mmTPC7_NRTR_HBW_RANGE_BASE_L_2 0xFC04C8 + +#define mmTPC7_NRTR_HBW_RANGE_BASE_L_3 0xFC04CC + +#define mmTPC7_NRTR_HBW_RANGE_BASE_L_4 0xFC04D0 + +#define mmTPC7_NRTR_HBW_RANGE_BASE_L_5 0xFC04D4 + +#define mmTPC7_NRTR_HBW_RANGE_BASE_L_6 0xFC04D8 + +#define mmTPC7_NRTR_HBW_RANGE_BASE_L_7 0xFC04DC + +#define mmTPC7_NRTR_HBW_RANGE_BASE_H_0 0xFC04E0 + +#define mmTPC7_NRTR_HBW_RANGE_BASE_H_1 0xFC04E4 + +#define mmTPC7_NRTR_HBW_RANGE_BASE_H_2 0xFC04E8 + +#define mmTPC7_NRTR_HBW_RANGE_BASE_H_3 0xFC04EC + +#define mmTPC7_NRTR_HBW_RANGE_BASE_H_4 0xFC04F0 + +#define mmTPC7_NRTR_HBW_RANGE_BASE_H_5 0xFC04F4 + +#define mmTPC7_NRTR_HBW_RANGE_BASE_H_6 0xFC04F8 + +#define mmTPC7_NRTR_HBW_RANGE_BASE_H_7 0xFC04FC + +#define mmTPC7_NRTR_LBW_RANGE_HIT 0xFC0500 + +#define mmTPC7_NRTR_LBW_RANGE_MASK_0 0xFC0510 + +#define mmTPC7_NRTR_LBW_RANGE_MASK_1 0xFC0514 + +#define mmTPC7_NRTR_LBW_RANGE_MASK_2 0xFC0518 + +#define mmTPC7_NRTR_LBW_RANGE_MASK_3 0xFC051C + +#define mmTPC7_NRTR_LBW_RANGE_MASK_4 0xFC0520 + +#define mmTPC7_NRTR_LBW_RANGE_MASK_5 0xFC0524 + +#define mmTPC7_NRTR_LBW_RANGE_MASK_6 0xFC0528 + +#define mmTPC7_NRTR_LBW_RANGE_MASK_7 0xFC052C + +#define mmTPC7_NRTR_LBW_RANGE_MASK_8 0xFC0530 + +#define mmTPC7_NRTR_LBW_RANGE_MASK_9 0xFC0534 + +#define mmTPC7_NRTR_LBW_RANGE_MASK_10 0xFC0538 + +#define mmTPC7_NRTR_LBW_RANGE_MASK_11 0xFC053C + +#define mmTPC7_NRTR_LBW_RANGE_MASK_12 0xFC0540 + +#define mmTPC7_NRTR_LBW_RANGE_MASK_13 0xFC0544 + +#define mmTPC7_NRTR_LBW_RANGE_MASK_14 0xFC0548 + +#define mmTPC7_NRTR_LBW_RANGE_MASK_15 0xFC054C + +#define mmTPC7_NRTR_LBW_RANGE_BASE_0 0xFC0550 + +#define mmTPC7_NRTR_LBW_RANGE_BASE_1 0xFC0554 + +#define mmTPC7_NRTR_LBW_RANGE_BASE_2 0xFC0558 + +#define mmTPC7_NRTR_LBW_RANGE_BASE_3 0xFC055C + +#define mmTPC7_NRTR_LBW_RANGE_BASE_4 0xFC0560 + +#define mmTPC7_NRTR_LBW_RANGE_BASE_5 0xFC0564 + +#define mmTPC7_NRTR_LBW_RANGE_BASE_6 0xFC0568 + +#define mmTPC7_NRTR_LBW_RANGE_BASE_7 0xFC056C + +#define mmTPC7_NRTR_LBW_RANGE_BASE_8 0xFC0570 + +#define mmTPC7_NRTR_LBW_RANGE_BASE_9 0xFC0574 + +#define mmTPC7_NRTR_LBW_RANGE_BASE_10 0xFC0578 + +#define mmTPC7_NRTR_LBW_RANGE_BASE_11 0xFC057C + +#define mmTPC7_NRTR_LBW_RANGE_BASE_12 0xFC0580 + +#define mmTPC7_NRTR_LBW_RANGE_BASE_13 0xFC0584 + +#define mmTPC7_NRTR_LBW_RANGE_BASE_14 0xFC0588 + +#define mmTPC7_NRTR_LBW_RANGE_BASE_15 0xFC058C + +#define mmTPC7_NRTR_RGLTR 0xFC0590 + +#define mmTPC7_NRTR_RGLTR_WR_RESULT 0xFC0594 + +#define mmTPC7_NRTR_RGLTR_RD_RESULT 0xFC0598 + +#define mmTPC7_NRTR_SCRAMB_EN 0xFC0600 + +#define mmTPC7_NRTR_NON_LIN_SCRAMB 0xFC0604 + +#endif /* ASIC_REG_TPC7_NRTR_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h new file mode 100644 index 000000000000..25f5095f68fb --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h @@ -0,0 +1,179 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC7_QM_REGS_H_ +#define ASIC_REG_TPC7_QM_REGS_H_ + +/* + ***************************************** + * TPC7_QM (Prototype: QMAN) + ***************************************** + */ + +#define mmTPC7_QM_GLBL_CFG0 0xFC8000 + +#define mmTPC7_QM_GLBL_CFG1 0xFC8004 + +#define mmTPC7_QM_GLBL_PROT 0xFC8008 + +#define mmTPC7_QM_GLBL_ERR_CFG 0xFC800C + +#define mmTPC7_QM_GLBL_ERR_ADDR_LO 0xFC8010 + +#define mmTPC7_QM_GLBL_ERR_ADDR_HI 0xFC8014 + +#define mmTPC7_QM_GLBL_ERR_WDATA 0xFC8018 + +#define mmTPC7_QM_GLBL_SECURE_PROPS 0xFC801C + +#define mmTPC7_QM_GLBL_NON_SECURE_PROPS 0xFC8020 + +#define mmTPC7_QM_GLBL_STS0 0xFC8024 + +#define mmTPC7_QM_GLBL_STS1 0xFC8028 + +#define mmTPC7_QM_PQ_BASE_LO 0xFC8060 + +#define mmTPC7_QM_PQ_BASE_HI 0xFC8064 + +#define mmTPC7_QM_PQ_SIZE 0xFC8068 + +#define mmTPC7_QM_PQ_PI 0xFC806C + +#define mmTPC7_QM_PQ_CI 0xFC8070 + +#define mmTPC7_QM_PQ_CFG0 0xFC8074 + +#define mmTPC7_QM_PQ_CFG1 0xFC8078 + +#define mmTPC7_QM_PQ_ARUSER 0xFC807C + +#define mmTPC7_QM_PQ_PUSH0 0xFC8080 + +#define mmTPC7_QM_PQ_PUSH1 0xFC8084 + +#define mmTPC7_QM_PQ_PUSH2 0xFC8088 + +#define mmTPC7_QM_PQ_PUSH3 0xFC808C + +#define mmTPC7_QM_PQ_STS0 0xFC8090 + +#define mmTPC7_QM_PQ_STS1 0xFC8094 + +#define mmTPC7_QM_PQ_RD_RATE_LIM_EN 0xFC80A0 + +#define mmTPC7_QM_PQ_RD_RATE_LIM_RST_TOKEN 0xFC80A4 + +#define mmTPC7_QM_PQ_RD_RATE_LIM_SAT 0xFC80A8 + +#define mmTPC7_QM_PQ_RD_RATE_LIM_TOUT 0xFC80AC + +#define mmTPC7_QM_CQ_CFG0 0xFC80B0 + +#define mmTPC7_QM_CQ_CFG1 0xFC80B4 + +#define mmTPC7_QM_CQ_ARUSER 0xFC80B8 + +#define mmTPC7_QM_CQ_PTR_LO 0xFC80C0 + +#define mmTPC7_QM_CQ_PTR_HI 0xFC80C4 + +#define mmTPC7_QM_CQ_TSIZE 0xFC80C8 + +#define mmTPC7_QM_CQ_CTL 0xFC80CC + +#define mmTPC7_QM_CQ_PTR_LO_STS 0xFC80D4 + +#define mmTPC7_QM_CQ_PTR_HI_STS 0xFC80D8 + +#define mmTPC7_QM_CQ_TSIZE_STS 0xFC80DC + +#define mmTPC7_QM_CQ_CTL_STS 0xFC80E0 + +#define mmTPC7_QM_CQ_STS0 0xFC80E4 + +#define mmTPC7_QM_CQ_STS1 0xFC80E8 + +#define mmTPC7_QM_CQ_RD_RATE_LIM_EN 0xFC80F0 + +#define mmTPC7_QM_CQ_RD_RATE_LIM_RST_TOKEN 0xFC80F4 + +#define mmTPC7_QM_CQ_RD_RATE_LIM_SAT 0xFC80F8 + +#define mmTPC7_QM_CQ_RD_RATE_LIM_TOUT 0xFC80FC + +#define mmTPC7_QM_CQ_IFIFO_CNT 0xFC8108 + +#define mmTPC7_QM_CP_MSG_BASE0_ADDR_LO 0xFC8120 + +#define mmTPC7_QM_CP_MSG_BASE0_ADDR_HI 0xFC8124 + +#define mmTPC7_QM_CP_MSG_BASE1_ADDR_LO 0xFC8128 + +#define mmTPC7_QM_CP_MSG_BASE1_ADDR_HI 0xFC812C + +#define mmTPC7_QM_CP_MSG_BASE2_ADDR_LO 0xFC8130 + +#define mmTPC7_QM_CP_MSG_BASE2_ADDR_HI 0xFC8134 + +#define mmTPC7_QM_CP_MSG_BASE3_ADDR_LO 0xFC8138 + +#define mmTPC7_QM_CP_MSG_BASE3_ADDR_HI 0xFC813C + +#define mmTPC7_QM_CP_LDMA_TSIZE_OFFSET 0xFC8140 + +#define mmTPC7_QM_CP_LDMA_SRC_BASE_LO_OFFSET 0xFC8144 + +#define mmTPC7_QM_CP_LDMA_SRC_BASE_HI_OFFSET 0xFC8148 + +#define mmTPC7_QM_CP_LDMA_DST_BASE_LO_OFFSET 0xFC814C + +#define mmTPC7_QM_CP_LDMA_DST_BASE_HI_OFFSET 0xFC8150 + +#define mmTPC7_QM_CP_LDMA_COMMIT_OFFSET 0xFC8154 + +#define mmTPC7_QM_CP_FENCE0_RDATA 0xFC8158 + +#define mmTPC7_QM_CP_FENCE1_RDATA 0xFC815C + +#define mmTPC7_QM_CP_FENCE2_RDATA 0xFC8160 + +#define mmTPC7_QM_CP_FENCE3_RDATA 0xFC8164 + +#define mmTPC7_QM_CP_FENCE0_CNT 0xFC8168 + +#define mmTPC7_QM_CP_FENCE1_CNT 0xFC816C + +#define mmTPC7_QM_CP_FENCE2_CNT 0xFC8170 + +#define mmTPC7_QM_CP_FENCE3_CNT 0xFC8174 + +#define mmTPC7_QM_CP_STS 0xFC8178 + +#define mmTPC7_QM_CP_CURRENT_INST_LO 0xFC817C + +#define mmTPC7_QM_CP_CURRENT_INST_HI 0xFC8180 + +#define mmTPC7_QM_CP_BARRIER_CFG 0xFC8184 + +#define mmTPC7_QM_CP_DBG_0 0xFC8188 + +#define mmTPC7_QM_PQ_BUF_ADDR 0xFC8300 + +#define mmTPC7_QM_PQ_BUF_RDATA 0xFC8304 + +#define mmTPC7_QM_CQ_BUF_ADDR 0xFC8308 + +#define mmTPC7_QM_CQ_BUF_RDATA 0xFC830C + +#endif /* ASIC_REG_TPC7_QM_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc_pll_regs.h b/drivers/misc/habanalabs/include/goya/asic_reg/tpc_pll_regs.h new file mode 100644 index 000000000000..920231d0afa5 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/asic_reg/tpc_pll_regs.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +/************************************ + ** This is an auto-generated file ** + ** DO NOT EDIT BELOW ** + ************************************/ + +#ifndef ASIC_REG_TPC_PLL_REGS_H_ +#define ASIC_REG_TPC_PLL_REGS_H_ + +/* + ***************************************** + * TPC_PLL (Prototype: PLL) + ***************************************** + */ + +#define mmTPC_PLL_NR 0xE01100 + +#define mmTPC_PLL_NF 0xE01104 + +#define mmTPC_PLL_OD 0xE01108 + +#define mmTPC_PLL_NB 0xE0110C + +#define mmTPC_PLL_CFG 0xE01110 + +#define mmTPC_PLL_LOSE_MASK 0xE01120 + +#define mmTPC_PLL_LOCK_INTR 0xE01128 + +#define mmTPC_PLL_LOCK_BYPASS 0xE0112C + +#define mmTPC_PLL_DATA_CHNG 0xE01130 + +#define mmTPC_PLL_RST 0xE01134 + +#define mmTPC_PLL_SLIP_WD_CNTR 0xE01150 + +#define mmTPC_PLL_DIV_FACTOR_0 0xE01200 + +#define mmTPC_PLL_DIV_FACTOR_1 0xE01204 + +#define mmTPC_PLL_DIV_FACTOR_2 0xE01208 + +#define mmTPC_PLL_DIV_FACTOR_3 0xE0120C + +#define mmTPC_PLL_DIV_FACTOR_CMD_0 0xE01220 + +#define mmTPC_PLL_DIV_FACTOR_CMD_1 0xE01224 + +#define mmTPC_PLL_DIV_FACTOR_CMD_2 0xE01228 + +#define mmTPC_PLL_DIV_FACTOR_CMD_3 0xE0122C + +#define mmTPC_PLL_DIV_SEL_0 0xE01280 + +#define mmTPC_PLL_DIV_SEL_1 0xE01284 + +#define mmTPC_PLL_DIV_SEL_2 0xE01288 + +#define mmTPC_PLL_DIV_SEL_3 0xE0128C + +#define mmTPC_PLL_DIV_EN_0 0xE012A0 + +#define mmTPC_PLL_DIV_EN_1 0xE012A4 + +#define mmTPC_PLL_DIV_EN_2 0xE012A8 + +#define mmTPC_PLL_DIV_EN_3 0xE012AC + +#define mmTPC_PLL_DIV_FACTOR_BUSY_0 0xE012C0 + +#define mmTPC_PLL_DIV_FACTOR_BUSY_1 0xE012C4 + +#define mmTPC_PLL_DIV_FACTOR_BUSY_2 0xE012C8 + +#define mmTPC_PLL_DIV_FACTOR_BUSY_3 0xE012CC + +#define mmTPC_PLL_CLK_GATER 0xE01300 + +#define mmTPC_PLL_CLK_RLX_0 0xE01310 + +#define mmTPC_PLL_CLK_RLX_1 0xE01314 + +#define mmTPC_PLL_CLK_RLX_2 0xE01318 + +#define mmTPC_PLL_CLK_RLX_3 0xE0131C + +#define mmTPC_PLL_REF_CNTR_PERIOD 0xE01400 + +#define mmTPC_PLL_REF_LOW_THRESHOLD 0xE01410 + +#define mmTPC_PLL_REF_HIGH_THRESHOLD 0xE01420 + +#define mmTPC_PLL_PLL_NOT_STABLE 0xE01430 + +#define mmTPC_PLL_FREQ_CALC_EN 0xE01440 + +#endif /* ASIC_REG_TPC_PLL_REGS_H_ */ + diff --git a/drivers/misc/habanalabs/include/goya/goya.h b/drivers/misc/habanalabs/include/goya/goya.h new file mode 100644 index 000000000000..614149efa412 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/goya.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef GOYA_H +#define GOYA_H + +#include "asic_reg/goya_regs.h" + +#include + +#define SRAM_CFG_BAR_ID 0 +#define MSIX_BAR_ID 2 +#define DDR_BAR_ID 4 + +#define CFG_BAR_SIZE 0x10000000ull /* 256MB */ +#define MSIX_BAR_SIZE 0x1000ull /* 4KB */ + +#define CFG_BASE 0x7FFC000000ull +#define CFG_SIZE 0x4000000 /* 32MB CFG + 32MB DBG*/ + +#define SRAM_BASE_ADDR 0x7FF0000000ull +#define SRAM_SIZE 0x32A0000 /* 50.625MB */ + +#define DRAM_PHYS_BASE 0x0ull + +#define HOST_PHYS_BASE 0x8000000000ull /* 0.5TB */ +#define HOST_PHYS_SIZE 0x1000000000000ull /* 0.25PB (48 bits) */ + +#define GOYA_MSIX_ENTRIES 8 + +#define QMAN_PQ_ENTRY_SIZE 16 /* Bytes */ + +#define MAX_ASID 1024 + +#define PROT_BITS_OFFS 0xF80 + +#define DMA_MAX_NUM 5 + +#define TPC_MAX_NUM 8 + +#endif /* GOYA_H */ diff --git a/drivers/misc/habanalabs/include/goya/goya_async_events.h b/drivers/misc/habanalabs/include/goya/goya_async_events.h new file mode 100644 index 000000000000..497937a17ee9 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/goya_async_events.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef __GOYA_ASYNC_EVENTS_H_ +#define __GOYA_ASYNC_EVENTS_H_ + +enum goya_async_event_id { + GOYA_ASYNC_EVENT_ID_PCIE_IF = 33, + GOYA_ASYNC_EVENT_ID_TPC0_ECC = 36, + GOYA_ASYNC_EVENT_ID_TPC1_ECC = 39, + GOYA_ASYNC_EVENT_ID_TPC2_ECC = 42, + GOYA_ASYNC_EVENT_ID_TPC3_ECC = 45, + GOYA_ASYNC_EVENT_ID_TPC4_ECC = 48, + GOYA_ASYNC_EVENT_ID_TPC5_ECC = 51, + GOYA_ASYNC_EVENT_ID_TPC6_ECC = 54, + GOYA_ASYNC_EVENT_ID_TPC7_ECC = 57, + GOYA_ASYNC_EVENT_ID_MME_ECC = 60, + GOYA_ASYNC_EVENT_ID_MME_ECC_EXT = 61, + GOYA_ASYNC_EVENT_ID_MMU_ECC = 63, + GOYA_ASYNC_EVENT_ID_DMA_MACRO = 64, + GOYA_ASYNC_EVENT_ID_DMA_ECC = 66, + GOYA_ASYNC_EVENT_ID_CPU_IF_ECC = 75, + GOYA_ASYNC_EVENT_ID_PSOC_MEM = 78, + GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT = 79, + GOYA_ASYNC_EVENT_ID_SRAM0 = 81, + GOYA_ASYNC_EVENT_ID_SRAM1 = 82, + GOYA_ASYNC_EVENT_ID_SRAM2 = 83, + GOYA_ASYNC_EVENT_ID_SRAM3 = 84, + GOYA_ASYNC_EVENT_ID_SRAM4 = 85, + GOYA_ASYNC_EVENT_ID_SRAM5 = 86, + GOYA_ASYNC_EVENT_ID_SRAM6 = 87, + GOYA_ASYNC_EVENT_ID_SRAM7 = 88, + GOYA_ASYNC_EVENT_ID_SRAM8 = 89, + GOYA_ASYNC_EVENT_ID_SRAM9 = 90, + GOYA_ASYNC_EVENT_ID_SRAM10 = 91, + GOYA_ASYNC_EVENT_ID_SRAM11 = 92, + GOYA_ASYNC_EVENT_ID_SRAM12 = 93, + GOYA_ASYNC_EVENT_ID_SRAM13 = 94, + GOYA_ASYNC_EVENT_ID_SRAM14 = 95, + GOYA_ASYNC_EVENT_ID_SRAM15 = 96, + GOYA_ASYNC_EVENT_ID_SRAM16 = 97, + GOYA_ASYNC_EVENT_ID_SRAM17 = 98, + GOYA_ASYNC_EVENT_ID_SRAM18 = 99, + GOYA_ASYNC_EVENT_ID_SRAM19 = 100, + GOYA_ASYNC_EVENT_ID_SRAM20 = 101, + GOYA_ASYNC_EVENT_ID_SRAM21 = 102, + GOYA_ASYNC_EVENT_ID_SRAM22 = 103, + GOYA_ASYNC_EVENT_ID_SRAM23 = 104, + GOYA_ASYNC_EVENT_ID_SRAM24 = 105, + GOYA_ASYNC_EVENT_ID_SRAM25 = 106, + GOYA_ASYNC_EVENT_ID_SRAM26 = 107, + GOYA_ASYNC_EVENT_ID_SRAM27 = 108, + GOYA_ASYNC_EVENT_ID_SRAM28 = 109, + GOYA_ASYNC_EVENT_ID_SRAM29 = 110, + GOYA_ASYNC_EVENT_ID_GIC500 = 112, + GOYA_ASYNC_EVENT_ID_PCIE_DEC = 115, + GOYA_ASYNC_EVENT_ID_TPC0_DEC = 117, + GOYA_ASYNC_EVENT_ID_TPC1_DEC = 120, + GOYA_ASYNC_EVENT_ID_TPC2_DEC = 123, + GOYA_ASYNC_EVENT_ID_TPC3_DEC = 126, + GOYA_ASYNC_EVENT_ID_TPC4_DEC = 129, + GOYA_ASYNC_EVENT_ID_TPC5_DEC = 132, + GOYA_ASYNC_EVENT_ID_TPC6_DEC = 135, + GOYA_ASYNC_EVENT_ID_TPC7_DEC = 138, + GOYA_ASYNC_EVENT_ID_AXI_ECC = 139, + GOYA_ASYNC_EVENT_ID_L2_RAM_ECC = 140, + GOYA_ASYNC_EVENT_ID_MME_WACS = 141, + GOYA_ASYNC_EVENT_ID_MME_WACSD = 142, + GOYA_ASYNC_EVENT_ID_PLL0 = 143, + GOYA_ASYNC_EVENT_ID_PLL1 = 144, + GOYA_ASYNC_EVENT_ID_PLL3 = 146, + GOYA_ASYNC_EVENT_ID_PLL4 = 147, + GOYA_ASYNC_EVENT_ID_PLL5 = 148, + GOYA_ASYNC_EVENT_ID_PLL6 = 149, + GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER = 155, + GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC = 159, + GOYA_ASYNC_EVENT_ID_PSOC = 160, + GOYA_ASYNC_EVENT_ID_PCIE_FLR = 171, + GOYA_ASYNC_EVENT_ID_PCIE_HOT_RESET = 172, + GOYA_ASYNC_EVENT_ID_PCIE_QID0_ENG0 = 174, + GOYA_ASYNC_EVENT_ID_PCIE_QID0_ENG1 = 175, + GOYA_ASYNC_EVENT_ID_PCIE_QID0_ENG2 = 176, + GOYA_ASYNC_EVENT_ID_PCIE_QID0_ENG3 = 177, + GOYA_ASYNC_EVENT_ID_PCIE_QID1_ENG0 = 178, + GOYA_ASYNC_EVENT_ID_PCIE_QID1_ENG1 = 179, + GOYA_ASYNC_EVENT_ID_PCIE_QID1_ENG2 = 180, + GOYA_ASYNC_EVENT_ID_PCIE_QID1_ENG3 = 181, + GOYA_ASYNC_EVENT_ID_PCIE_APB = 182, + GOYA_ASYNC_EVENT_ID_PCIE_QDB = 183, + GOYA_ASYNC_EVENT_ID_PCIE_BM_D_P_WR = 184, + GOYA_ASYNC_EVENT_ID_PCIE_BM_D_RD = 185, + GOYA_ASYNC_EVENT_ID_PCIE_BM_U_P_WR = 186, + GOYA_ASYNC_EVENT_ID_PCIE_BM_U_RD = 187, + GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU = 190, + GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR = 191, + GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU = 200, + GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR = 201, + GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU = 210, + GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR = 211, + GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU = 220, + GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR = 221, + GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU = 230, + GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR = 231, + GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU = 240, + GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR = 241, + GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU = 250, + GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR = 251, + GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU = 260, + GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR = 261, + GOYA_ASYNC_EVENT_ID_MMU_SBA_SPMU0 = 270, + GOYA_ASYNC_EVENT_ID_MMU_SBA_SPMU1 = 271, + GOYA_ASYNC_EVENT_ID_MME_WACS_UP = 272, + GOYA_ASYNC_EVENT_ID_MME_WACS_DOWN = 273, + GOYA_ASYNC_EVENT_ID_MMU_PAGE_FAULT = 280, + GOYA_ASYNC_EVENT_ID_MMU_WR_PERM = 281, + GOYA_ASYNC_EVENT_ID_MMU_DBG_BM = 282, + GOYA_ASYNC_EVENT_ID_DMA_BM_CH0 = 290, + GOYA_ASYNC_EVENT_ID_DMA_BM_CH1 = 291, + GOYA_ASYNC_EVENT_ID_DMA_BM_CH2 = 292, + GOYA_ASYNC_EVENT_ID_DMA_BM_CH3 = 293, + GOYA_ASYNC_EVENT_ID_DMA_BM_CH4 = 294, + GOYA_ASYNC_EVENT_ID_DDR0_PHY_DFI = 300, + GOYA_ASYNC_EVENT_ID_DDR0_ECC_SCRUB = 301, + GOYA_ASYNC_EVENT_ID_DDR0_DB_ECC = 302, + GOYA_ASYNC_EVENT_ID_DDR0_SB_ECC = 303, + GOYA_ASYNC_EVENT_ID_DDR0_SB_ECC_MC = 304, + GOYA_ASYNC_EVENT_ID_DDR0_AXI_RD = 305, + GOYA_ASYNC_EVENT_ID_DDR0_AXI_WR = 306, + GOYA_ASYNC_EVENT_ID_DDR1_PHY_DFI = 310, + GOYA_ASYNC_EVENT_ID_DDR1_ECC_SCRUB = 311, + GOYA_ASYNC_EVENT_ID_DDR1_DB_ECC = 312, + GOYA_ASYNC_EVENT_ID_DDR1_SB_ECC = 313, + GOYA_ASYNC_EVENT_ID_DDR1_SB_ECC_MC = 314, + GOYA_ASYNC_EVENT_ID_DDR1_AXI_RD = 315, + GOYA_ASYNC_EVENT_ID_DDR1_AXI_WR = 316, + GOYA_ASYNC_EVENT_ID_CPU_BMON = 320, + GOYA_ASYNC_EVENT_ID_TS_EAST = 322, + GOYA_ASYNC_EVENT_ID_TS_WEST = 323, + GOYA_ASYNC_EVENT_ID_TS_NORTH = 324, + GOYA_ASYNC_EVENT_ID_PSOC_GPIO_U16_0 = 330, + GOYA_ASYNC_EVENT_ID_PSOC_GPIO_U16_1 = 331, + GOYA_ASYNC_EVENT_ID_PSOC_GPIO_U16_2 = 332, + GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET = 356, + GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT = 361, + GOYA_ASYNC_EVENT_ID_TPC0_CMDQ = 430, + GOYA_ASYNC_EVENT_ID_TPC1_CMDQ = 431, + GOYA_ASYNC_EVENT_ID_TPC2_CMDQ = 432, + GOYA_ASYNC_EVENT_ID_TPC3_CMDQ = 433, + GOYA_ASYNC_EVENT_ID_TPC4_CMDQ = 434, + GOYA_ASYNC_EVENT_ID_TPC5_CMDQ = 435, + GOYA_ASYNC_EVENT_ID_TPC6_CMDQ = 436, + GOYA_ASYNC_EVENT_ID_TPC7_CMDQ = 437, + GOYA_ASYNC_EVENT_ID_TPC0_QM = 438, + GOYA_ASYNC_EVENT_ID_TPC1_QM = 439, + GOYA_ASYNC_EVENT_ID_TPC2_QM = 440, + GOYA_ASYNC_EVENT_ID_TPC3_QM = 441, + GOYA_ASYNC_EVENT_ID_TPC4_QM = 442, + GOYA_ASYNC_EVENT_ID_TPC5_QM = 443, + GOYA_ASYNC_EVENT_ID_TPC6_QM = 444, + GOYA_ASYNC_EVENT_ID_TPC7_QM = 445, + GOYA_ASYNC_EVENT_ID_MME_QM = 447, + GOYA_ASYNC_EVENT_ID_MME_CMDQ = 448, + GOYA_ASYNC_EVENT_ID_DMA0_QM = 449, + GOYA_ASYNC_EVENT_ID_DMA1_QM = 450, + GOYA_ASYNC_EVENT_ID_DMA2_QM = 451, + GOYA_ASYNC_EVENT_ID_DMA3_QM = 452, + GOYA_ASYNC_EVENT_ID_DMA4_QM = 453, + GOYA_ASYNC_EVENT_ID_DMA_ON_HBW = 454, + GOYA_ASYNC_EVENT_ID_DMA0_CH = 455, + GOYA_ASYNC_EVENT_ID_DMA1_CH = 456, + GOYA_ASYNC_EVENT_ID_DMA2_CH = 457, + GOYA_ASYNC_EVENT_ID_DMA3_CH = 458, + GOYA_ASYNC_EVENT_ID_DMA4_CH = 459, + GOYA_ASYNC_EVENT_ID_PI_UPDATE = 484, + GOYA_ASYNC_EVENT_ID_HALT_MACHINE = 485, + GOYA_ASYNC_EVENT_ID_INTS_REGISTER = 486, + GOYA_ASYNC_EVENT_ID_SOFT_RESET = 487, + GOYA_ASYNC_EVENT_ID_LAST_VALID_ID = 1023, + GOYA_ASYNC_EVENT_ID_SIZE +}; + +#endif /* __GOYA_ASYNC_EVENTS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/goya_fw_if.h b/drivers/misc/habanalabs/include/goya/goya_fw_if.h new file mode 100644 index 000000000000..a9920cb4a07b --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/goya_fw_if.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef GOYA_FW_IF_H +#define GOYA_FW_IF_H + +#define CPU_BOOT_ADDR 0x7FF8040000ull + +#define UBOOT_FW_OFFSET 0x100000 /* 1MB in SRAM */ +#define LINUX_FW_OFFSET 0x800000 /* 8MB in DDR */ + +enum goya_pll_index { + CPU_PLL = 0, + IC_PLL, + MC_PLL, + MME_PLL, + PCI_PLL, + EMMC_PLL, + TPC_PLL +}; + +#define GOYA_PLL_FREQ_LOW 50000000 /* 50 MHz */ + +#endif /* GOYA_FW_IF_H */ diff --git a/drivers/misc/habanalabs/include/goya/goya_packets.h b/drivers/misc/habanalabs/include/goya/goya_packets.h new file mode 100644 index 000000000000..a14407b975e4 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/goya_packets.h @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2017-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef GOYA_PACKETS_H +#define GOYA_PACKETS_H + +#include + +#define PACKET_HEADER_PACKET_ID_SHIFT 56 +#define PACKET_HEADER_PACKET_ID_MASK 0x1F00000000000000ull + +enum packet_id { + PACKET_WREG_32 = 0x1, + PACKET_WREG_BULK = 0x2, + PACKET_MSG_LONG = 0x3, + PACKET_MSG_SHORT = 0x4, + PACKET_CP_DMA = 0x5, + PACKET_MSG_PROT = 0x7, + PACKET_FENCE = 0x8, + PACKET_LIN_DMA = 0x9, + PACKET_NOP = 0xA, + PACKET_STOP = 0xB, + MAX_PACKET_ID = (PACKET_HEADER_PACKET_ID_MASK >> + PACKET_HEADER_PACKET_ID_SHIFT) + 1 +}; + +enum goya_dma_direction { + DMA_HOST_TO_DRAM, + DMA_HOST_TO_SRAM, + DMA_DRAM_TO_SRAM, + DMA_SRAM_TO_DRAM, + DMA_SRAM_TO_HOST, + DMA_DRAM_TO_HOST, + DMA_DRAM_TO_DRAM, + DMA_SRAM_TO_SRAM, + DMA_ENUM_MAX +}; + +#define GOYA_PKT_CTL_OPCODE_SHIFT 24 +#define GOYA_PKT_CTL_OPCODE_MASK 0x1F000000 + +#define GOYA_PKT_CTL_EB_SHIFT 29 +#define GOYA_PKT_CTL_EB_MASK 0x20000000 + +#define GOYA_PKT_CTL_RB_SHIFT 30 +#define GOYA_PKT_CTL_RB_MASK 0x40000000 + +#define GOYA_PKT_CTL_MB_SHIFT 31 +#define GOYA_PKT_CTL_MB_MASK 0x80000000 + +struct packet_nop { + __le32 reserved; + __le32 ctl; +}; + +struct packet_stop { + __le32 reserved; + __le32 ctl; +}; + +#define GOYA_PKT_WREG32_CTL_REG_OFFSET_SHIFT 0 +#define GOYA_PKT_WREG32_CTL_REG_OFFSET_MASK 0x0000FFFF + +struct packet_wreg32 { + __le32 value; + __le32 ctl; +}; + +struct packet_wreg_bulk { + __le32 size64; + __le32 ctl; + __le64 values[0]; /* data starts here */ +}; + +struct packet_msg_long { + __le32 value; + __le32 ctl; + __le64 addr; +}; + +struct packet_msg_short { + __le32 value; + __le32 ctl; +}; + +struct packet_msg_prot { + __le32 value; + __le32 ctl; + __le64 addr; +}; + +struct packet_fence { + __le32 cfg; + __le32 ctl; +}; + +#define GOYA_PKT_LIN_DMA_CTL_WO_SHIFT 0 +#define GOYA_PKT_LIN_DMA_CTL_WO_MASK 0x00000001 + +#define GOYA_PKT_LIN_DMA_CTL_RDCOMP_SHIFT 1 +#define GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK 0x00000002 + +#define GOYA_PKT_LIN_DMA_CTL_WRCOMP_SHIFT 2 +#define GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK 0x00000004 + +#define GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT 6 +#define GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK 0x00000040 + +#define GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT 20 +#define GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK 0x00700000 + +struct packet_lin_dma { + __le32 tsize; + __le32 ctl; + __le64 src_addr; + __le64 dst_addr; +}; + +struct packet_cp_dma { + __le32 tsize; + __le32 ctl; + __le64 src_addr; +}; + +#endif /* GOYA_PACKETS_H */ diff --git a/drivers/misc/habanalabs/include/hl_boot_if.h b/drivers/misc/habanalabs/include/hl_boot_if.h new file mode 100644 index 000000000000..7475732b9996 --- /dev/null +++ b/drivers/misc/habanalabs/include/hl_boot_if.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef HL_BOOT_IF_H +#define HL_BOOT_IF_H + +enum cpu_boot_status { + CPU_BOOT_STATUS_NA = 0, /* Default value after reset of chip */ + CPU_BOOT_STATUS_IN_WFE, + CPU_BOOT_STATUS_DRAM_RDY, + CPU_BOOT_STATUS_SRAM_AVAIL, + CPU_BOOT_STATUS_IN_BTL, /* BTL is H/W FSM */ + CPU_BOOT_STATUS_IN_PREBOOT, + CPU_BOOT_STATUS_IN_SPL, + CPU_BOOT_STATUS_IN_UBOOT, + CPU_BOOT_STATUS_DRAM_INIT_FAIL, + CPU_BOOT_STATUS_FIT_CORRUPTED +}; + +enum kmd_msg { + KMD_MSG_NA = 0, + KMD_MSG_GOTO_WFE, + KMD_MSG_FIT_RDY +}; + +#endif /* HL_BOOT_IF_H */ diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h new file mode 100644 index 000000000000..b680052ee3f0 --- /dev/null +++ b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef INCLUDE_MMU_GENERAL_H_ +#define INCLUDE_MMU_GENERAL_H_ + +#define PAGE_SHIFT_4KB 12 +#define PAGE_SHIFT_2MB 21 +#define PAGE_SIZE_2MB (_AC(1, UL) << PAGE_SHIFT_2MB) +#define PAGE_SIZE_4KB (_AC(1, UL) << PAGE_SHIFT_4KB) +#define PAGE_MASK_2MB (~(PAGE_SIZE_2MB - 1)) + +#define PAGE_PRESENT_MASK 0x0000000000001 +#define SWAP_OUT_MASK 0x0000000000004 +#define LAST_MASK 0x0000000000800 +#define PHYS_ADDR_MASK 0x3FFFFFFFFF000ull +#define HOP0_MASK 0x3000000000000ull +#define HOP1_MASK 0x0FF8000000000ull +#define HOP2_MASK 0x0007FC0000000ull +#define HOP3_MASK 0x000003FE00000 +#define HOP4_MASK 0x00000001FF000 +#define OFFSET_MASK 0x0000000000FFF + +#define HOP0_SHIFT 48 +#define HOP1_SHIFT 39 +#define HOP2_SHIFT 30 +#define HOP3_SHIFT 21 +#define HOP4_SHIFT 12 + +#define PTE_PHYS_ADDR_SHIFT 12 +#define PTE_PHYS_ADDR_MASK ~0xFFF + +#define HL_PTE_SIZE sizeof(u64) +#define HOP_TABLE_SIZE PAGE_SIZE_4KB +#define PTE_ENTRIES_IN_HOP (HOP_TABLE_SIZE / HL_PTE_SIZE) +#define HOP0_TABLES_TOTAL_SIZE (HOP_TABLE_SIZE * MAX_ASID) + +#define MMU_HOP0_PA43_12_SHIFT 12 +#define MMU_HOP0_PA49_44_SHIFT (12 + 32) + +#define MMU_CONFIG_TIMEOUT_USEC 2000 /* 2 ms */ + +#endif /* INCLUDE_MMU_GENERAL_H_ */ diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h new file mode 100644 index 000000000000..8539dd041f2c --- /dev/null +++ b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef INCLUDE_MMU_V1_0_H_ +#define INCLUDE_MMU_V1_0_H_ + +#define MMU_HOP0_PA43_12 0x490004 +#define MMU_HOP0_PA49_44 0x490008 +#define MMU_ASID_BUSY 0x490000 + +#endif /* INCLUDE_MMU_V1_0_H_ */ diff --git a/drivers/misc/habanalabs/include/qman_if.h b/drivers/misc/habanalabs/include/qman_if.h new file mode 100644 index 000000000000..bf59bbe27fdc --- /dev/null +++ b/drivers/misc/habanalabs/include/qman_if.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef QMAN_IF_H +#define QMAN_IF_H + +#include + +/* + * PRIMARY QUEUE + */ + +struct hl_bd { + __le64 ptr; + __le32 len; + __le32 ctl; +}; + +#define HL_BD_SIZE sizeof(struct hl_bd) + +/* + * BD_CTL_REPEAT_VALID tells the CP whether the repeat field in the BD CTL is + * valid. 1 means the repeat field is valid, 0 means not-valid, + * i.e. repeat == 1 + */ +#define BD_CTL_REPEAT_VALID_SHIFT 24 +#define BD_CTL_REPEAT_VALID_MASK 0x01000000 + +#define BD_CTL_SHADOW_INDEX_SHIFT 0 +#define BD_CTL_SHADOW_INDEX_MASK 0x00000FFF + +/* + * COMPLETION QUEUE + */ + +struct hl_cq_entry { + __le32 data; +}; + +#define HL_CQ_ENTRY_SIZE sizeof(struct hl_cq_entry) + +#define CQ_ENTRY_READY_SHIFT 31 +#define CQ_ENTRY_READY_MASK 0x80000000 + +#define CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT 30 +#define CQ_ENTRY_SHADOW_INDEX_VALID_MASK 0x40000000 + +#define CQ_ENTRY_SHADOW_INDEX_SHIFT BD_CTL_SHADOW_INDEX_SHIFT +#define CQ_ENTRY_SHADOW_INDEX_MASK BD_CTL_SHADOW_INDEX_MASK + + +#endif /* QMAN_IF_H */ diff --git a/drivers/misc/habanalabs/irq.c b/drivers/misc/habanalabs/irq.c new file mode 100644 index 000000000000..e69a09c10e3f --- /dev/null +++ b/drivers/misc/habanalabs/irq.c @@ -0,0 +1,327 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "habanalabs.h" + +#include + +/** + * This structure is used to schedule work of EQ entry and armcp_reset event + * + * @eq_work - workqueue object to run when EQ entry is received + * @hdev - pointer to device structure + * @eq_entry - copy of the EQ entry + */ +struct hl_eqe_work { + struct work_struct eq_work; + struct hl_device *hdev; + struct hl_eq_entry eq_entry; +}; + +/* + * hl_cq_inc_ptr - increment ci or pi of cq + * + * @ptr: the current ci or pi value of the completion queue + * + * Increment ptr by 1. If it reaches the number of completion queue + * entries, set it to 0 + */ +inline u32 hl_cq_inc_ptr(u32 ptr) +{ + ptr++; + if (unlikely(ptr == HL_CQ_LENGTH)) + ptr = 0; + return ptr; +} + +/* + * hl_eq_inc_ptr - increment ci of eq + * + * @ptr: the current ci value of the event queue + * + * Increment ptr by 1. If it reaches the number of event queue + * entries, set it to 0 + */ +inline u32 hl_eq_inc_ptr(u32 ptr) +{ + ptr++; + if (unlikely(ptr == HL_EQ_LENGTH)) + ptr = 0; + return ptr; +} + +static void irq_handle_eqe(struct work_struct *work) +{ + struct hl_eqe_work *eqe_work = container_of(work, struct hl_eqe_work, + eq_work); + struct hl_device *hdev = eqe_work->hdev; + + hdev->asic_funcs->handle_eqe(hdev, &eqe_work->eq_entry); + + kfree(eqe_work); +} + +/* + * hl_irq_handler_cq - irq handler for completion queue + * + * @irq: irq number + * @arg: pointer to completion queue structure + * + */ +irqreturn_t hl_irq_handler_cq(int irq, void *arg) +{ + struct hl_cq *cq = arg; + struct hl_device *hdev = cq->hdev; + struct hl_hw_queue *queue; + struct hl_cs_job *job; + bool shadow_index_valid; + u16 shadow_index; + u32 *cq_entry; + u32 *cq_base; + + if (hdev->disabled) { + dev_dbg(hdev->dev, + "Device disabled but received IRQ %d for CQ %d\n", + irq, cq->hw_queue_id); + return IRQ_HANDLED; + } + + cq_base = (u32 *) (uintptr_t) cq->kernel_address; + + while (1) { + bool entry_ready = ((cq_base[cq->ci] & CQ_ENTRY_READY_MASK) + >> CQ_ENTRY_READY_SHIFT); + + if (!entry_ready) + break; + + cq_entry = (u32 *) &cq_base[cq->ci]; + + /* + * Make sure we read CQ entry contents after we've + * checked the ownership bit. + */ + dma_rmb(); + + shadow_index_valid = + ((*cq_entry & CQ_ENTRY_SHADOW_INDEX_VALID_MASK) + >> CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT); + + shadow_index = (u16) + ((*cq_entry & CQ_ENTRY_SHADOW_INDEX_MASK) + >> CQ_ENTRY_SHADOW_INDEX_SHIFT); + + queue = &hdev->kernel_queues[cq->hw_queue_id]; + + if ((shadow_index_valid) && (!hdev->disabled)) { + job = queue->shadow_queue[hl_pi_2_offset(shadow_index)]; + queue_work(hdev->cq_wq, &job->finish_work); + } + + /* + * Update ci of the context's queue. There is no + * need to protect it with spinlock because this update is + * done only inside IRQ and there is a different IRQ per + * queue + */ + queue->ci = hl_queue_inc_ptr(queue->ci); + + /* Clear CQ entry ready bit */ + cq_base[cq->ci] &= ~CQ_ENTRY_READY_MASK; + + cq->ci = hl_cq_inc_ptr(cq->ci); + + /* Increment free slots */ + atomic_inc(&cq->free_slots_cnt); + } + + return IRQ_HANDLED; +} + +/* + * hl_irq_handler_eq - irq handler for event queue + * + * @irq: irq number + * @arg: pointer to event queue structure + * + */ +irqreturn_t hl_irq_handler_eq(int irq, void *arg) +{ + struct hl_eq *eq = arg; + struct hl_device *hdev = eq->hdev; + struct hl_eq_entry *eq_entry; + struct hl_eq_entry *eq_base; + struct hl_eqe_work *handle_eqe_work; + + eq_base = (struct hl_eq_entry *) (uintptr_t) eq->kernel_address; + + while (1) { + bool entry_ready = + ((__le32_to_cpu(eq_base[eq->ci].hdr.ctl) & + EQ_CTL_READY_MASK) >> EQ_CTL_READY_SHIFT); + + if (!entry_ready) + break; + + eq_entry = &eq_base[eq->ci]; + + /* + * Make sure we read EQ entry contents after we've + * checked the ownership bit. + */ + dma_rmb(); + + if (hdev->disabled) { + dev_warn(hdev->dev, + "Device disabled but received IRQ %d for EQ\n", + irq); + goto skip_irq; + } + + handle_eqe_work = kmalloc(sizeof(*handle_eqe_work), GFP_ATOMIC); + if (handle_eqe_work) { + INIT_WORK(&handle_eqe_work->eq_work, irq_handle_eqe); + handle_eqe_work->hdev = hdev; + + memcpy(&handle_eqe_work->eq_entry, eq_entry, + sizeof(*eq_entry)); + + queue_work(hdev->eq_wq, &handle_eqe_work->eq_work); + } +skip_irq: + /* Clear EQ entry ready bit */ + eq_entry->hdr.ctl = + __cpu_to_le32(__le32_to_cpu(eq_entry->hdr.ctl) & + ~EQ_CTL_READY_MASK); + + eq->ci = hl_eq_inc_ptr(eq->ci); + + hdev->asic_funcs->update_eq_ci(hdev, eq->ci); + } + + return IRQ_HANDLED; +} + +/* + * hl_cq_init - main initialization function for an cq object + * + * @hdev: pointer to device structure + * @q: pointer to cq structure + * @hw_queue_id: The H/W queue ID this completion queue belongs to + * + * Allocate dma-able memory for the completion queue and initialize fields + * Returns 0 on success + */ +int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id) +{ + void *p; + + BUILD_BUG_ON(HL_CQ_SIZE_IN_BYTES > HL_PAGE_SIZE); + + p = hdev->asic_funcs->dma_alloc_coherent(hdev, HL_CQ_SIZE_IN_BYTES, + &q->bus_address, GFP_KERNEL | __GFP_ZERO); + if (!p) + return -ENOMEM; + + q->hdev = hdev; + q->kernel_address = (u64) (uintptr_t) p; + q->hw_queue_id = hw_queue_id; + q->ci = 0; + q->pi = 0; + + atomic_set(&q->free_slots_cnt, HL_CQ_LENGTH); + + return 0; +} + +/* + * hl_cq_fini - destroy completion queue + * + * @hdev: pointer to device structure + * @q: pointer to cq structure + * + * Free the completion queue memory + */ +void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q) +{ + hdev->asic_funcs->dma_free_coherent(hdev, HL_CQ_SIZE_IN_BYTES, + (void *) (uintptr_t) q->kernel_address, q->bus_address); +} + +void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q) +{ + q->ci = 0; + q->pi = 0; + + atomic_set(&q->free_slots_cnt, HL_CQ_LENGTH); + + /* + * It's not enough to just reset the PI/CI because the H/W may have + * written valid completion entries before it was halted and therefore + * we need to clean the actual queues so we won't process old entries + * when the device is operational again + */ + + memset((void *) (uintptr_t) q->kernel_address, 0, HL_CQ_SIZE_IN_BYTES); +} + +/* + * hl_eq_init - main initialization function for an event queue object + * + * @hdev: pointer to device structure + * @q: pointer to eq structure + * + * Allocate dma-able memory for the event queue and initialize fields + * Returns 0 on success + */ +int hl_eq_init(struct hl_device *hdev, struct hl_eq *q) +{ + void *p; + + BUILD_BUG_ON(HL_EQ_SIZE_IN_BYTES > HL_PAGE_SIZE); + + p = hdev->asic_funcs->dma_alloc_coherent(hdev, HL_EQ_SIZE_IN_BYTES, + &q->bus_address, GFP_KERNEL | __GFP_ZERO); + if (!p) + return -ENOMEM; + + q->hdev = hdev; + q->kernel_address = (u64) (uintptr_t) p; + q->ci = 0; + + return 0; +} + +/* + * hl_eq_fini - destroy event queue + * + * @hdev: pointer to device structure + * @q: pointer to eq structure + * + * Free the event queue memory + */ +void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q) +{ + flush_workqueue(hdev->eq_wq); + + hdev->asic_funcs->dma_free_coherent(hdev, HL_EQ_SIZE_IN_BYTES, + (void *) (uintptr_t) q->kernel_address, q->bus_address); +} + +void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q) +{ + q->ci = 0; + + /* + * It's not enough to just reset the PI/CI because the H/W may have + * written valid completion entries before it was halted and therefore + * we need to clean the actual queues so we won't process old entries + * when the device is operational again + */ + + memset((void *) (uintptr_t) q->kernel_address, 0, HL_EQ_SIZE_IN_BYTES); +} diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c new file mode 100644 index 000000000000..3a12fd1a5274 --- /dev/null +++ b/drivers/misc/habanalabs/memory.c @@ -0,0 +1,1723 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include +#include "habanalabs.h" +#include "include/hw_ip/mmu/mmu_general.h" + +#include +#include +#include + +#define PGS_IN_2MB_PAGE (PAGE_SIZE_2MB >> PAGE_SHIFT) +#define HL_MMU_DEBUG 0 + +/* + * The va ranges in context object contain a list with the available chunks of + * device virtual memory. + * There is one range for host allocations and one for DRAM allocations. + * + * On initialization each range contains one chunk of all of its available + * virtual range which is a half of the total device virtual range. + * + * On each mapping of physical pages, a suitable virtual range chunk (with a + * minimum size) is selected from the list. If the chunk size equals the + * requested size, the chunk is returned. Otherwise, the chunk is split into + * two chunks - one to return as result and a remainder to stay in the list. + * + * On each Unmapping of a virtual address, the relevant virtual chunk is + * returned to the list. The chunk is added to the list and if its edges match + * the edges of the adjacent chunks (means a contiguous chunk can be created), + * the chunks are merged. + * + * On finish, the list is checked to have only one chunk of all the relevant + * virtual range (which is a half of the device total virtual range). + * If not (means not all mappings were unmapped), a warning is printed. + */ + +/* + * alloc_device_memory - allocate device memory + * + * @ctx : current context + * @args : host parameters containing the requested size + * @ret_handle : result handle + * + * This function does the following: + * - Allocate the requested size rounded up to 2MB pages + * - Return unique handle + */ +static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args, + u32 *ret_handle) +{ + struct hl_device *hdev = ctx->hdev; + struct hl_vm *vm = &hdev->vm; + struct hl_vm_phys_pg_pack *phys_pg_pack; + u64 paddr = 0; + u32 total_size, num_pgs, num_curr_pgs, page_size, page_shift; + int handle, rc, i; + bool contiguous; + + num_curr_pgs = 0; + page_size = hdev->asic_prop.dram_page_size; + page_shift = __ffs(page_size); + num_pgs = (args->alloc.mem_size + (page_size - 1)) >> page_shift; + total_size = num_pgs << page_shift; + + contiguous = args->flags & HL_MEM_CONTIGUOUS; + + if (contiguous) { + paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size); + if (!paddr) { + dev_err(hdev->dev, + "failed to allocate %u huge contiguous pages\n", + num_pgs); + return -ENOMEM; + } + } + + phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL); + if (!phys_pg_pack) { + rc = -ENOMEM; + goto pages_pack_err; + } + + phys_pg_pack->vm_type = VM_TYPE_PHYS_PACK; + phys_pg_pack->asid = ctx->asid; + phys_pg_pack->npages = num_pgs; + phys_pg_pack->page_size = page_size; + phys_pg_pack->total_size = total_size; + phys_pg_pack->flags = args->flags; + phys_pg_pack->contiguous = contiguous; + + phys_pg_pack->pages = kcalloc(num_pgs, sizeof(u64), GFP_KERNEL); + if (!phys_pg_pack->pages) { + rc = -ENOMEM; + goto pages_arr_err; + } + + if (phys_pg_pack->contiguous) { + for (i = 0 ; i < num_pgs ; i++) + phys_pg_pack->pages[i] = paddr + i * page_size; + } else { + for (i = 0 ; i < num_pgs ; i++) { + phys_pg_pack->pages[i] = (u64) gen_pool_alloc( + vm->dram_pg_pool, + page_size); + if (!phys_pg_pack->pages[i]) { + dev_err(hdev->dev, + "ioctl failed to allocate page\n"); + rc = -ENOMEM; + goto page_err; + } + + num_curr_pgs++; + } + } + + spin_lock(&vm->idr_lock); + handle = idr_alloc(&vm->phys_pg_pack_handles, phys_pg_pack, 1, 0, + GFP_ATOMIC); + spin_unlock(&vm->idr_lock); + + if (handle < 0) { + dev_err(hdev->dev, "Failed to get handle for page\n"); + rc = -EFAULT; + goto idr_err; + } + + for (i = 0 ; i < num_pgs ; i++) + kref_get(&vm->dram_pg_pool_refcount); + + phys_pg_pack->handle = handle; + + atomic64_add(phys_pg_pack->total_size, &ctx->dram_phys_mem); + atomic64_add(phys_pg_pack->total_size, &hdev->dram_used_mem); + + *ret_handle = handle; + + return 0; + +idr_err: +page_err: + if (!phys_pg_pack->contiguous) + for (i = 0 ; i < num_curr_pgs ; i++) + gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i], + page_size); + + kfree(phys_pg_pack->pages); +pages_arr_err: + kfree(phys_pg_pack); +pages_pack_err: + if (contiguous) + gen_pool_free(vm->dram_pg_pool, paddr, total_size); + + return rc; +} + +/* + * get_userptr_from_host_va - initialize userptr structure from given host + * virtual address + * + * @hdev : habanalabs device structure + * @args : parameters containing the virtual address and size + * @p_userptr : pointer to result userptr structure + * + * This function does the following: + * - Allocate userptr structure + * - Pin the given host memory using the userptr structure + * - Perform DMA mapping to have the DMA addresses of the pages + */ +static int get_userptr_from_host_va(struct hl_device *hdev, + struct hl_mem_in *args, struct hl_userptr **p_userptr) +{ + struct hl_userptr *userptr; + int rc; + + userptr = kzalloc(sizeof(*userptr), GFP_KERNEL); + if (!userptr) { + rc = -ENOMEM; + goto userptr_err; + } + + rc = hl_pin_host_memory(hdev, args->map_host.host_virt_addr, + args->map_host.mem_size, userptr); + if (rc) { + dev_err(hdev->dev, "Failed to pin host memory\n"); + goto pin_err; + } + + rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl, + userptr->sgt->nents, DMA_BIDIRECTIONAL); + if (rc) { + dev_err(hdev->dev, "failed to map sgt with DMA region\n"); + goto dma_map_err; + } + + userptr->dma_mapped = true; + userptr->dir = DMA_BIDIRECTIONAL; + userptr->vm_type = VM_TYPE_USERPTR; + + *p_userptr = userptr; + + return 0; + +dma_map_err: + hl_unpin_host_memory(hdev, userptr); +pin_err: + kfree(userptr); +userptr_err: + + return rc; +} + +/* + * free_userptr - free userptr structure + * + * @hdev : habanalabs device structure + * @userptr : userptr to free + * + * This function does the following: + * - Unpins the physical pages + * - Frees the userptr structure + */ +static void free_userptr(struct hl_device *hdev, struct hl_userptr *userptr) +{ + hl_unpin_host_memory(hdev, userptr); + kfree(userptr); +} + +/* + * dram_pg_pool_do_release - free DRAM pages pool + * + * @ref : pointer to reference object + * + * This function does the following: + * - Frees the idr structure of physical pages handles + * - Frees the generic pool of DRAM physical pages + */ +static void dram_pg_pool_do_release(struct kref *ref) +{ + struct hl_vm *vm = container_of(ref, struct hl_vm, + dram_pg_pool_refcount); + + /* + * free the idr here as only here we know for sure that there are no + * allocated physical pages and hence there are no handles in use + */ + idr_destroy(&vm->phys_pg_pack_handles); + gen_pool_destroy(vm->dram_pg_pool); +} + +/* + * free_phys_pg_pack - free physical page pack + * + * @hdev : habanalabs device structure + * @phys_pg_pack : physical page pack to free + * + * This function does the following: + * - For DRAM memory only, iterate over the pack and free each physical block + * structure by returning it to the general pool + * - Free the hl_vm_phys_pg_pack structure + */ +static void free_phys_pg_pack(struct hl_device *hdev, + struct hl_vm_phys_pg_pack *phys_pg_pack) +{ + struct hl_vm *vm = &hdev->vm; + int i; + + if (!phys_pg_pack->created_from_userptr) { + if (phys_pg_pack->contiguous) { + gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[0], + phys_pg_pack->total_size); + + for (i = 0; i < phys_pg_pack->npages ; i++) + kref_put(&vm->dram_pg_pool_refcount, + dram_pg_pool_do_release); + } else { + for (i = 0 ; i < phys_pg_pack->npages ; i++) { + gen_pool_free(vm->dram_pg_pool, + phys_pg_pack->pages[i], + phys_pg_pack->page_size); + kref_put(&vm->dram_pg_pool_refcount, + dram_pg_pool_do_release); + } + } + } + + kfree(phys_pg_pack->pages); + kfree(phys_pg_pack); +} + +/* + * free_device_memory - free device memory + * + * @ctx : current context + * @handle : handle of the memory chunk to free + * + * This function does the following: + * - Free the device memory related to the given handle + */ +static int free_device_memory(struct hl_ctx *ctx, u32 handle) +{ + struct hl_device *hdev = ctx->hdev; + struct hl_vm *vm = &hdev->vm; + struct hl_vm_phys_pg_pack *phys_pg_pack; + + spin_lock(&vm->idr_lock); + phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle); + if (phys_pg_pack) { + if (atomic_read(&phys_pg_pack->mapping_cnt) > 0) { + dev_err(hdev->dev, "handle %u is mapped, cannot free\n", + handle); + spin_unlock(&vm->idr_lock); + return -EINVAL; + } + + /* + * must remove from idr before the freeing of the physical + * pages as the refcount of the pool is also the trigger of the + * idr destroy + */ + idr_remove(&vm->phys_pg_pack_handles, handle); + spin_unlock(&vm->idr_lock); + + atomic64_sub(phys_pg_pack->total_size, &ctx->dram_phys_mem); + atomic64_sub(phys_pg_pack->total_size, &hdev->dram_used_mem); + + free_phys_pg_pack(hdev, phys_pg_pack); + } else { + spin_unlock(&vm->idr_lock); + dev_err(hdev->dev, + "free device memory failed, no match for handle %u\n", + handle); + return -EINVAL; + } + + return 0; +} + +/* + * clear_va_list_locked - free virtual addresses list + * + * @hdev : habanalabs device structure + * @va_list : list of virtual addresses to free + * + * This function does the following: + * - Iterate over the list and free each virtual addresses block + * + * This function should be called only when va_list lock is taken + */ +static void clear_va_list_locked(struct hl_device *hdev, + struct list_head *va_list) +{ + struct hl_vm_va_block *va_block, *tmp; + + list_for_each_entry_safe(va_block, tmp, va_list, node) { + list_del(&va_block->node); + kfree(va_block); + } +} + +/* + * print_va_list_locked - print virtual addresses list + * + * @hdev : habanalabs device structure + * @va_list : list of virtual addresses to print + * + * This function does the following: + * - Iterate over the list and print each virtual addresses block + * + * This function should be called only when va_list lock is taken + */ +static void print_va_list_locked(struct hl_device *hdev, + struct list_head *va_list) +{ +#if HL_MMU_DEBUG + struct hl_vm_va_block *va_block; + + dev_dbg(hdev->dev, "print va list:\n"); + + list_for_each_entry(va_block, va_list, node) + dev_dbg(hdev->dev, + "va block, start: 0x%llx, end: 0x%llx, size: %llu\n", + va_block->start, va_block->end, va_block->size); +#endif +} + +/* + * merge_va_blocks_locked - merge a virtual block if possible + * + * @hdev : pointer to the habanalabs device structure + * @va_list : pointer to the virtual addresses block list + * @va_block : virtual block to merge with adjacent blocks + * + * This function does the following: + * - Merge the given blocks with the adjacent blocks if their virtual ranges + * create a contiguous virtual range + * + * This Function should be called only when va_list lock is taken + */ +static void merge_va_blocks_locked(struct hl_device *hdev, + struct list_head *va_list, struct hl_vm_va_block *va_block) +{ + struct hl_vm_va_block *prev, *next; + + prev = list_prev_entry(va_block, node); + if (&prev->node != va_list && prev->end + 1 == va_block->start) { + prev->end = va_block->end; + prev->size = prev->end - prev->start; + list_del(&va_block->node); + kfree(va_block); + va_block = prev; + } + + next = list_next_entry(va_block, node); + if (&next->node != va_list && va_block->end + 1 == next->start) { + next->start = va_block->start; + next->size = next->end - next->start; + list_del(&va_block->node); + kfree(va_block); + } +} + +/* + * add_va_block_locked - add a virtual block to the virtual addresses list + * + * @hdev : pointer to the habanalabs device structure + * @va_list : pointer to the virtual addresses block list + * @start : start virtual address + * @end : end virtual address + * + * This function does the following: + * - Add the given block to the virtual blocks list and merge with other + * blocks if a contiguous virtual block can be created + * + * This Function should be called only when va_list lock is taken + */ +static int add_va_block_locked(struct hl_device *hdev, + struct list_head *va_list, u64 start, u64 end) +{ + struct hl_vm_va_block *va_block, *res = NULL; + u64 size = end - start; + + print_va_list_locked(hdev, va_list); + + list_for_each_entry(va_block, va_list, node) { + /* TODO: remove upon matureness */ + if (hl_mem_area_crosses_range(start, size, va_block->start, + va_block->end)) { + dev_err(hdev->dev, + "block crossing ranges at start 0x%llx, end 0x%llx\n", + va_block->start, va_block->end); + return -EINVAL; + } + + if (va_block->end < start) + res = va_block; + } + + va_block = kmalloc(sizeof(*va_block), GFP_KERNEL); + if (!va_block) + return -ENOMEM; + + va_block->start = start; + va_block->end = end; + va_block->size = size; + + if (!res) + list_add(&va_block->node, va_list); + else + list_add(&va_block->node, &res->node); + + merge_va_blocks_locked(hdev, va_list, va_block); + + print_va_list_locked(hdev, va_list); + + return 0; +} + +/* + * add_va_block - wrapper for add_va_block_locked + * + * @hdev : pointer to the habanalabs device structure + * @va_list : pointer to the virtual addresses block list + * @start : start virtual address + * @end : end virtual address + * + * This function does the following: + * - Takes the list lock and calls add_va_block_locked + */ +static inline int add_va_block(struct hl_device *hdev, + struct hl_va_range *va_range, u64 start, u64 end) +{ + int rc; + + mutex_lock(&va_range->lock); + rc = add_va_block_locked(hdev, &va_range->list, start, end); + mutex_unlock(&va_range->lock); + + return rc; +} + +/* + * get_va_block - get a virtual block with the requested size + * + * @hdev : pointer to the habanalabs device structure + * @va_range : pointer to the virtual addresses range + * @size : requested block size + * @hint_addr : hint for request address by the user + * @is_userptr : is host or DRAM memory + * + * This function does the following: + * - Iterate on the virtual block list to find a suitable virtual block for the + * requested size + * - Reserve the requested block and update the list + * - Return the start address of the virtual block + */ +static u64 get_va_block(struct hl_device *hdev, + struct hl_va_range *va_range, u32 size, u64 hint_addr, + bool is_userptr) +{ + struct hl_vm_va_block *va_block, *new_va_block = NULL; + u64 valid_start, valid_size, prev_start, prev_end, page_mask, + res_valid_start = 0, res_valid_size = 0; + u32 page_size; + bool add_prev = false; + + if (is_userptr) { + /* + * We cannot know if the user allocated memory with huge pages + * or not, hence we continue with the biggest possible + * granularity. + */ + page_size = PAGE_SIZE_2MB; + page_mask = PAGE_MASK_2MB; + } else { + page_size = hdev->asic_prop.dram_page_size; + page_mask = ~((u64)page_size - 1); + } + + mutex_lock(&va_range->lock); + + print_va_list_locked(hdev, &va_range->list); + + list_for_each_entry(va_block, &va_range->list, node) { + /* calc the first possible aligned addr */ + valid_start = va_block->start; + + + if (valid_start & (page_size - 1)) { + valid_start &= page_mask; + valid_start += page_size; + if (valid_start > va_block->end) + continue; + } + + valid_size = va_block->end - valid_start; + + if (valid_size >= size && + (!new_va_block || valid_size < res_valid_size)) { + + new_va_block = va_block; + res_valid_start = valid_start; + res_valid_size = valid_size; + } + + if (hint_addr && hint_addr >= valid_start && + ((hint_addr + size) <= va_block->end)) { + new_va_block = va_block; + res_valid_start = hint_addr; + res_valid_size = valid_size; + break; + } + } + + if (!new_va_block) { + dev_err(hdev->dev, "no available va block for size %u\n", size); + goto out; + } + + if (res_valid_start > new_va_block->start) { + prev_start = new_va_block->start; + prev_end = res_valid_start - 1; + + new_va_block->start = res_valid_start; + new_va_block->size = res_valid_size; + + add_prev = true; + } + + if (new_va_block->size > size) { + new_va_block->start += size; + new_va_block->size = new_va_block->end - new_va_block->start; + } else { + list_del(&new_va_block->node); + kfree(new_va_block); + } + + if (add_prev) + add_va_block_locked(hdev, &va_range->list, prev_start, + prev_end); + + print_va_list_locked(hdev, &va_range->list); +out: + mutex_unlock(&va_range->lock); + + return res_valid_start; +} + +/* + * get_sg_info - get number of pages and the DMA address from SG list + * + * @sg : the SG list + * @dma_addr : pointer to DMA address to return + * + * Calculate the number of consecutive pages described by the SG list. Take the + * offset of the address in the first page, add to it the length and round it up + * to the number of needed pages. + */ +static u32 get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr) +{ + *dma_addr = sg_dma_address(sg); + + return ((((*dma_addr) & (PAGE_SIZE - 1)) + sg_dma_len(sg)) + + (PAGE_SIZE - 1)) >> PAGE_SHIFT; +} + +/* + * init_phys_pg_pack_from_userptr - initialize physical page pack from host + * memory + * + * @ctx : current context + * @userptr : userptr to initialize from + * @pphys_pg_pack : res pointer + * + * This function does the following: + * - Pin the physical pages related to the given virtual block + * - Create a physical page pack from the physical pages related to the given + * virtual block + */ +static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx, + struct hl_userptr *userptr, + struct hl_vm_phys_pg_pack **pphys_pg_pack) +{ + struct hl_vm_phys_pg_pack *phys_pg_pack; + struct scatterlist *sg; + dma_addr_t dma_addr; + u64 page_mask; + u32 npages, total_npages, page_size = PAGE_SIZE; + bool first = true, is_huge_page_opt = true; + int rc, i, j; + + phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL); + if (!phys_pg_pack) + return -ENOMEM; + + phys_pg_pack->vm_type = userptr->vm_type; + phys_pg_pack->created_from_userptr = true; + phys_pg_pack->asid = ctx->asid; + atomic_set(&phys_pg_pack->mapping_cnt, 1); + + /* Only if all dma_addrs are aligned to 2MB and their + * sizes is at least 2MB, we can use huge page mapping. + * We limit the 2MB optimization to this condition, + * since later on we acquire the related VA range as one + * consecutive block. + */ + total_npages = 0; + for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) { + npages = get_sg_info(sg, &dma_addr); + + total_npages += npages; + + if (first) { + first = false; + dma_addr &= PAGE_MASK_2MB; + } + + if ((npages % PGS_IN_2MB_PAGE) || + (dma_addr & (PAGE_SIZE_2MB - 1))) + is_huge_page_opt = false; + } + + if (is_huge_page_opt) { + page_size = PAGE_SIZE_2MB; + total_npages /= PGS_IN_2MB_PAGE; + } + + page_mask = ~(((u64) page_size) - 1); + + phys_pg_pack->pages = kcalloc(total_npages, sizeof(u64), GFP_KERNEL); + if (!phys_pg_pack->pages) { + rc = -ENOMEM; + goto page_pack_arr_mem_err; + } + + phys_pg_pack->npages = total_npages; + phys_pg_pack->page_size = page_size; + phys_pg_pack->total_size = total_npages * page_size; + + j = 0; + first = true; + for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) { + npages = get_sg_info(sg, &dma_addr); + + /* align down to physical page size and save the offset */ + if (first) { + first = false; + phys_pg_pack->offset = dma_addr & (page_size - 1); + dma_addr &= page_mask; + } + + while (npages) { + phys_pg_pack->pages[j++] = dma_addr; + dma_addr += page_size; + + if (is_huge_page_opt) + npages -= PGS_IN_2MB_PAGE; + else + npages--; + } + } + + *pphys_pg_pack = phys_pg_pack; + + return 0; + +page_pack_arr_mem_err: + kfree(phys_pg_pack); + + return rc; +} + +/* + * map_phys_page_pack - maps the physical page pack + * + * @ctx : current context + * @vaddr : start address of the virtual area to map from + * @phys_pg_pack : the pack of physical pages to map to + * + * This function does the following: + * - Maps each chunk of virtual memory to matching physical chunk + * - Stores number of successful mappings in the given argument + * - Returns 0 on success, error code otherwise. + */ +static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr, + struct hl_vm_phys_pg_pack *phys_pg_pack) +{ + struct hl_device *hdev = ctx->hdev; + u64 next_vaddr = vaddr, paddr; + u32 page_size = phys_pg_pack->page_size; + int i, rc = 0, mapped_pg_cnt = 0; + + for (i = 0 ; i < phys_pg_pack->npages ; i++) { + paddr = phys_pg_pack->pages[i]; + + /* For accessing the host we need to turn on bit 39 */ + if (phys_pg_pack->created_from_userptr) + paddr += hdev->asic_prop.host_phys_base_address; + + rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size); + if (rc) { + dev_err(hdev->dev, + "map failed for handle %u, npages: %d, mapped: %d", + phys_pg_pack->handle, phys_pg_pack->npages, + mapped_pg_cnt); + goto err; + } + + mapped_pg_cnt++; + next_vaddr += page_size; + } + + return 0; + +err: + next_vaddr = vaddr; + for (i = 0 ; i < mapped_pg_cnt ; i++) { + if (hl_mmu_unmap(ctx, next_vaddr, page_size)) + dev_warn_ratelimited(hdev->dev, + "failed to unmap handle %u, va: 0x%llx, pa: 0x%llx, page size: %u\n", + phys_pg_pack->handle, next_vaddr, + phys_pg_pack->pages[i], page_size); + + next_vaddr += page_size; + } + + return rc; +} + +static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args, + u64 *paddr) +{ + struct hl_device *hdev = ctx->hdev; + struct hl_vm *vm = &hdev->vm; + struct hl_vm_phys_pg_pack *phys_pg_pack; + u32 handle; + + handle = lower_32_bits(args->map_device.handle); + spin_lock(&vm->idr_lock); + phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle); + if (!phys_pg_pack) { + spin_unlock(&vm->idr_lock); + dev_err(hdev->dev, "no match for handle %u\n", handle); + return -EINVAL; + } + + *paddr = phys_pg_pack->pages[0]; + + spin_unlock(&vm->idr_lock); + + return 0; +} + +/* + * map_device_va - map the given memory + * + * @ctx : current context + * @args : host parameters with handle/host virtual address + * @device_addr : pointer to result device virtual address + * + * This function does the following: + * - If given a physical device memory handle, map to a device virtual block + * and return the start address of this block + * - If given a host virtual address and size, find the related physical pages, + * map a device virtual block to this pages and return the start address of + * this block + */ +static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, + u64 *device_addr) +{ + struct hl_device *hdev = ctx->hdev; + struct hl_vm *vm = &hdev->vm; + struct hl_vm_phys_pg_pack *phys_pg_pack; + struct hl_userptr *userptr = NULL; + struct hl_vm_hash_node *hnode; + enum vm_type_t *vm_type; + u64 ret_vaddr, hint_addr; + u32 handle = 0; + int rc; + bool is_userptr = args->flags & HL_MEM_USERPTR; + + /* Assume failure */ + *device_addr = 0; + + if (is_userptr) { + rc = get_userptr_from_host_va(hdev, args, &userptr); + if (rc) { + dev_err(hdev->dev, "failed to get userptr from va\n"); + return rc; + } + + rc = init_phys_pg_pack_from_userptr(ctx, userptr, + &phys_pg_pack); + if (rc) { + dev_err(hdev->dev, + "unable to init page pack for vaddr 0x%llx\n", + args->map_host.host_virt_addr); + goto init_page_pack_err; + } + + vm_type = (enum vm_type_t *) userptr; + hint_addr = args->map_host.hint_addr; + } else { + handle = lower_32_bits(args->map_device.handle); + + spin_lock(&vm->idr_lock); + phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle); + if (!phys_pg_pack) { + spin_unlock(&vm->idr_lock); + dev_err(hdev->dev, + "no match for handle %u\n", handle); + return -EINVAL; + } + + /* increment now to avoid freeing device memory while mapping */ + atomic_inc(&phys_pg_pack->mapping_cnt); + + spin_unlock(&vm->idr_lock); + + vm_type = (enum vm_type_t *) phys_pg_pack; + + hint_addr = args->map_device.hint_addr; + } + + /* + * relevant for mapping device physical memory only, as host memory is + * implicitly shared + */ + if (!is_userptr && !(phys_pg_pack->flags & HL_MEM_SHARED) && + phys_pg_pack->asid != ctx->asid) { + dev_err(hdev->dev, + "Failed to map memory, handle %u is not shared\n", + handle); + rc = -EPERM; + goto shared_err; + } + + hnode = kzalloc(sizeof(*hnode), GFP_KERNEL); + if (!hnode) { + rc = -ENOMEM; + goto hnode_err; + } + + ret_vaddr = get_va_block(hdev, + is_userptr ? &ctx->host_va_range : &ctx->dram_va_range, + phys_pg_pack->total_size, hint_addr, is_userptr); + if (!ret_vaddr) { + dev_err(hdev->dev, "no available va block for handle %u\n", + handle); + rc = -ENOMEM; + goto va_block_err; + } + + mutex_lock(&ctx->mmu_lock); + + rc = map_phys_page_pack(ctx, ret_vaddr, phys_pg_pack); + if (rc) { + mutex_unlock(&ctx->mmu_lock); + dev_err(hdev->dev, "mapping page pack failed for handle %u\n", + handle); + goto map_err; + } + + hdev->asic_funcs->mmu_invalidate_cache(hdev, false); + + mutex_unlock(&ctx->mmu_lock); + + ret_vaddr += phys_pg_pack->offset; + + hnode->ptr = vm_type; + hnode->vaddr = ret_vaddr; + + mutex_lock(&ctx->mem_hash_lock); + hash_add(ctx->mem_hash, &hnode->node, ret_vaddr); + mutex_unlock(&ctx->mem_hash_lock); + + *device_addr = ret_vaddr; + + if (is_userptr) + free_phys_pg_pack(hdev, phys_pg_pack); + + return 0; + +map_err: + if (add_va_block(hdev, + is_userptr ? &ctx->host_va_range : &ctx->dram_va_range, + ret_vaddr, + ret_vaddr + phys_pg_pack->total_size - 1)) + dev_warn(hdev->dev, + "release va block failed for handle 0x%x, vaddr: 0x%llx\n", + handle, ret_vaddr); + +va_block_err: + kfree(hnode); +hnode_err: +shared_err: + atomic_dec(&phys_pg_pack->mapping_cnt); + if (is_userptr) + free_phys_pg_pack(hdev, phys_pg_pack); +init_page_pack_err: + if (is_userptr) + free_userptr(hdev, userptr); + + return rc; +} + +/* + * unmap_device_va - unmap the given device virtual address + * + * @ctx : current context + * @vaddr : device virtual address to unmap + * + * This function does the following: + * - Unmap the physical pages related to the given virtual address + * - return the device virtual block to the virtual block list + */ +static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr) +{ + struct hl_device *hdev = ctx->hdev; + struct hl_vm_phys_pg_pack *phys_pg_pack = NULL; + struct hl_vm_hash_node *hnode = NULL; + struct hl_userptr *userptr = NULL; + enum vm_type_t *vm_type; + u64 next_vaddr; + u32 page_size; + bool is_userptr; + int i, rc; + + /* protect from double entrance */ + mutex_lock(&ctx->mem_hash_lock); + hash_for_each_possible(ctx->mem_hash, hnode, node, (unsigned long)vaddr) + if (vaddr == hnode->vaddr) + break; + + if (!hnode) { + mutex_unlock(&ctx->mem_hash_lock); + dev_err(hdev->dev, + "unmap failed, no mem hnode for vaddr 0x%llx\n", + vaddr); + return -EINVAL; + } + + hash_del(&hnode->node); + mutex_unlock(&ctx->mem_hash_lock); + + vm_type = hnode->ptr; + + if (*vm_type == VM_TYPE_USERPTR) { + is_userptr = true; + userptr = hnode->ptr; + rc = init_phys_pg_pack_from_userptr(ctx, userptr, + &phys_pg_pack); + if (rc) { + dev_err(hdev->dev, + "unable to init page pack for vaddr 0x%llx\n", + vaddr); + goto vm_type_err; + } + } else if (*vm_type == VM_TYPE_PHYS_PACK) { + is_userptr = false; + phys_pg_pack = hnode->ptr; + } else { + dev_warn(hdev->dev, + "unmap failed, unknown vm desc for vaddr 0x%llx\n", + vaddr); + rc = -EFAULT; + goto vm_type_err; + } + + if (atomic_read(&phys_pg_pack->mapping_cnt) == 0) { + dev_err(hdev->dev, "vaddr 0x%llx is not mapped\n", vaddr); + rc = -EINVAL; + goto mapping_cnt_err; + } + + page_size = phys_pg_pack->page_size; + vaddr &= ~(((u64) page_size) - 1); + + next_vaddr = vaddr; + + mutex_lock(&ctx->mmu_lock); + + for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) + if (hl_mmu_unmap(ctx, next_vaddr, page_size)) + dev_warn_ratelimited(hdev->dev, + "unmap failed for vaddr: 0x%llx\n", next_vaddr); + + hdev->asic_funcs->mmu_invalidate_cache(hdev, true); + + mutex_unlock(&ctx->mmu_lock); + + if (add_va_block(hdev, + is_userptr ? &ctx->host_va_range : &ctx->dram_va_range, + vaddr, + vaddr + phys_pg_pack->total_size - 1)) + dev_warn(hdev->dev, "add va block failed for vaddr: 0x%llx\n", + vaddr); + + atomic_dec(&phys_pg_pack->mapping_cnt); + kfree(hnode); + + if (is_userptr) { + free_phys_pg_pack(hdev, phys_pg_pack); + free_userptr(hdev, userptr); + } + + return 0; + +mapping_cnt_err: + if (is_userptr) + free_phys_pg_pack(hdev, phys_pg_pack); +vm_type_err: + mutex_lock(&ctx->mem_hash_lock); + hash_add(ctx->mem_hash, &hnode->node, vaddr); + mutex_unlock(&ctx->mem_hash_lock); + + return rc; +} + +int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data) +{ + union hl_mem_args *args = data; + struct hl_device *hdev = hpriv->hdev; + struct hl_ctx *ctx = hpriv->ctx; + u64 device_addr = 0; + u32 handle = 0; + int rc; + + if (hl_device_disabled_or_in_reset(hdev)) { + dev_warn_ratelimited(hdev->dev, + "Device is disabled or in reset. Can't execute memory IOCTL\n"); + return -EBUSY; + } + + if (hdev->mmu_enable) { + switch (args->in.op) { + case HL_MEM_OP_ALLOC: + if (!hdev->dram_supports_virtual_memory) { + dev_err(hdev->dev, + "DRAM alloc is not supported\n"); + rc = -EINVAL; + goto out; + } + if (args->in.alloc.mem_size == 0) { + dev_err(hdev->dev, + "alloc size must be larger than 0\n"); + rc = -EINVAL; + goto out; + } + rc = alloc_device_memory(ctx, &args->in, &handle); + + memset(args, 0, sizeof(*args)); + args->out.handle = (__u64) handle; + break; + + case HL_MEM_OP_FREE: + if (!hdev->dram_supports_virtual_memory) { + dev_err(hdev->dev, + "DRAM free is not supported\n"); + rc = -EINVAL; + goto out; + } + rc = free_device_memory(ctx, args->in.free.handle); + break; + + case HL_MEM_OP_MAP: + rc = map_device_va(ctx, &args->in, &device_addr); + + memset(args, 0, sizeof(*args)); + args->out.device_virt_addr = device_addr; + break; + + case HL_MEM_OP_UNMAP: + rc = unmap_device_va(ctx, + args->in.unmap.device_virt_addr); + break; + + default: + dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n"); + rc = -ENOTTY; + break; + } + } else { + switch (args->in.op) { + case HL_MEM_OP_ALLOC: + if (args->in.alloc.mem_size == 0) { + dev_err(hdev->dev, + "alloc size must be larger than 0\n"); + rc = -EINVAL; + goto out; + } + + /* Force contiguous as there are no real MMU + * translations to overcome physical memory gaps + */ + args->in.flags |= HL_MEM_CONTIGUOUS; + rc = alloc_device_memory(ctx, &args->in, &handle); + + memset(args, 0, sizeof(*args)); + args->out.handle = (__u64) handle; + break; + + case HL_MEM_OP_FREE: + rc = free_device_memory(ctx, args->in.free.handle); + break; + + case HL_MEM_OP_MAP: + if (args->in.flags & HL_MEM_USERPTR) { + device_addr = args->in.map_host.host_virt_addr; + rc = 0; + } else { + rc = get_paddr_from_handle(ctx, &args->in, + &device_addr); + } + + memset(args, 0, sizeof(*args)); + args->out.device_virt_addr = device_addr; + break; + + case HL_MEM_OP_UNMAP: + rc = 0; + break; + + default: + dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n"); + rc = -ENOTTY; + break; + } + } + +out: + return rc; +} + +/* + * hl_pin_host_memory - pins a chunk of host memory + * + * @hdev : pointer to the habanalabs device structure + * @addr : the user-space virtual address of the memory area + * @size : the size of the memory area + * @userptr : pointer to hl_userptr structure + * + * This function does the following: + * - Pins the physical pages + * - Create a SG list from those pages + */ +int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size, + struct hl_userptr *userptr) +{ + u64 start, end; + u32 npages, offset; + int rc; + + if (!size) { + dev_err(hdev->dev, "size to pin is invalid - %llu\n", size); + return -EINVAL; + } + + if (!access_ok((void __user *) (uintptr_t) addr, size)) { + dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n", addr); + return -EFAULT; + } + + /* + * If the combination of the address and size requested for this memory + * region causes an integer overflow, return error. + */ + if (((addr + size) < addr) || + PAGE_ALIGN(addr + size) < (addr + size)) { + dev_err(hdev->dev, + "user pointer 0x%llx + %llu causes integer overflow\n", + addr, size); + return -EINVAL; + } + + start = addr & PAGE_MASK; + offset = addr & ~PAGE_MASK; + end = PAGE_ALIGN(addr + size); + npages = (end - start) >> PAGE_SHIFT; + + userptr->size = size; + userptr->addr = addr; + userptr->dma_mapped = false; + INIT_LIST_HEAD(&userptr->job_node); + + userptr->vec = frame_vector_create(npages); + if (!userptr->vec) { + dev_err(hdev->dev, "Failed to create frame vector\n"); + return -ENOMEM; + } + + rc = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE, + userptr->vec); + + if (rc != npages) { + dev_err(hdev->dev, + "Failed to map host memory, user ptr probably wrong\n"); + if (rc < 0) + goto destroy_framevec; + rc = -EFAULT; + goto put_framevec; + } + + if (frame_vector_to_pages(userptr->vec) < 0) { + dev_err(hdev->dev, + "Failed to translate frame vector to pages\n"); + rc = -EFAULT; + goto put_framevec; + } + + userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_ATOMIC); + if (!userptr->sgt) { + rc = -ENOMEM; + goto put_framevec; + } + + rc = sg_alloc_table_from_pages(userptr->sgt, + frame_vector_pages(userptr->vec), + npages, offset, size, GFP_ATOMIC); + if (rc < 0) { + dev_err(hdev->dev, "failed to create SG table from pages\n"); + goto free_sgt; + } + + hl_debugfs_add_userptr(hdev, userptr); + + return 0; + +free_sgt: + kfree(userptr->sgt); +put_framevec: + put_vaddr_frames(userptr->vec); +destroy_framevec: + frame_vector_destroy(userptr->vec); + return rc; +} + +/* + * hl_unpin_host_memory - unpins a chunk of host memory + * + * @hdev : pointer to the habanalabs device structure + * @userptr : pointer to hl_userptr structure + * + * This function does the following: + * - Unpins the physical pages related to the host memory + * - Free the SG list + */ +int hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr) +{ + struct page **pages; + + hl_debugfs_remove_userptr(hdev, userptr); + + if (userptr->dma_mapped) + hdev->asic_funcs->hl_dma_unmap_sg(hdev, + userptr->sgt->sgl, + userptr->sgt->nents, + userptr->dir); + + pages = frame_vector_pages(userptr->vec); + if (!IS_ERR(pages)) { + int i; + + for (i = 0; i < frame_vector_count(userptr->vec); i++) + set_page_dirty_lock(pages[i]); + } + put_vaddr_frames(userptr->vec); + frame_vector_destroy(userptr->vec); + + list_del(&userptr->job_node); + + sg_free_table(userptr->sgt); + kfree(userptr->sgt); + + return 0; +} + +/* + * hl_userptr_delete_list - clear userptr list + * + * @hdev : pointer to the habanalabs device structure + * @userptr_list : pointer to the list to clear + * + * This function does the following: + * - Iterates over the list and unpins the host memory and frees the userptr + * structure. + */ +void hl_userptr_delete_list(struct hl_device *hdev, + struct list_head *userptr_list) +{ + struct hl_userptr *userptr, *tmp; + + list_for_each_entry_safe(userptr, tmp, userptr_list, job_node) { + hl_unpin_host_memory(hdev, userptr); + kfree(userptr); + } + + INIT_LIST_HEAD(userptr_list); +} + +/* + * hl_userptr_is_pinned - returns whether the given userptr is pinned + * + * @hdev : pointer to the habanalabs device structure + * @userptr_list : pointer to the list to clear + * @userptr : pointer to userptr to check + * + * This function does the following: + * - Iterates over the list and checks if the given userptr is in it, means is + * pinned. If so, returns true, otherwise returns false. + */ +bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr, + u32 size, struct list_head *userptr_list, + struct hl_userptr **userptr) +{ + list_for_each_entry((*userptr), userptr_list, job_node) { + if ((addr == (*userptr)->addr) && (size == (*userptr)->size)) + return true; + } + + return false; +} + +/* + * hl_va_range_init - initialize virtual addresses range + * + * @hdev : pointer to the habanalabs device structure + * @va_range : pointer to the range to initialize + * @start : range start address + * @end : range end address + * + * This function does the following: + * - Initializes the virtual addresses list of the given range with the given + * addresses. + */ +static int hl_va_range_init(struct hl_device *hdev, + struct hl_va_range *va_range, u64 start, u64 end) +{ + int rc; + + INIT_LIST_HEAD(&va_range->list); + + /* PAGE_SIZE alignment */ + + if (start & (PAGE_SIZE - 1)) { + start &= PAGE_MASK; + start += PAGE_SIZE; + } + + if (end & (PAGE_SIZE - 1)) + end &= PAGE_MASK; + + if (start >= end) { + dev_err(hdev->dev, "too small vm range for va list\n"); + return -EFAULT; + } + + rc = add_va_block(hdev, va_range, start, end); + + if (rc) { + dev_err(hdev->dev, "Failed to init host va list\n"); + return rc; + } + + va_range->start_addr = start; + va_range->end_addr = end; + + return 0; +} + +/* + * hl_vm_ctx_init_with_ranges - initialize virtual memory for context + * + * @ctx : pointer to the habanalabs context structure + * @host_range_start : host virtual addresses range start + * @host_range_end : host virtual addresses range end + * @dram_range_start : dram virtual addresses range start + * @dram_range_end : dram virtual addresses range end + * + * This function initializes the following: + * - MMU for context + * - Virtual address to area descriptor hashtable + * - Virtual block list of available virtual memory + */ +static int hl_vm_ctx_init_with_ranges(struct hl_ctx *ctx, u64 host_range_start, + u64 host_range_end, u64 dram_range_start, + u64 dram_range_end) +{ + struct hl_device *hdev = ctx->hdev; + int rc; + + rc = hl_mmu_ctx_init(ctx); + if (rc) { + dev_err(hdev->dev, "failed to init context %d\n", ctx->asid); + return rc; + } + + mutex_init(&ctx->mem_hash_lock); + hash_init(ctx->mem_hash); + + mutex_init(&ctx->host_va_range.lock); + + rc = hl_va_range_init(hdev, &ctx->host_va_range, host_range_start, + host_range_end); + if (rc) { + dev_err(hdev->dev, "failed to init host vm range\n"); + goto host_vm_err; + } + + mutex_init(&ctx->dram_va_range.lock); + + rc = hl_va_range_init(hdev, &ctx->dram_va_range, dram_range_start, + dram_range_end); + if (rc) { + dev_err(hdev->dev, "failed to init dram vm range\n"); + goto dram_vm_err; + } + + hl_debugfs_add_ctx_mem_hash(hdev, ctx); + + return 0; + +dram_vm_err: + mutex_destroy(&ctx->dram_va_range.lock); + + mutex_lock(&ctx->host_va_range.lock); + clear_va_list_locked(hdev, &ctx->host_va_range.list); + mutex_unlock(&ctx->host_va_range.lock); +host_vm_err: + mutex_destroy(&ctx->host_va_range.lock); + mutex_destroy(&ctx->mem_hash_lock); + hl_mmu_ctx_fini(ctx); + + return rc; +} + +int hl_vm_ctx_init(struct hl_ctx *ctx) +{ + struct asic_fixed_properties *prop = &ctx->hdev->asic_prop; + u64 host_range_start, host_range_end, dram_range_start, + dram_range_end; + + atomic64_set(&ctx->dram_phys_mem, 0); + + /* + * - If MMU is enabled, init the ranges as usual. + * - If MMU is disabled, in case of host mapping, the returned address + * is the given one. + * In case of DRAM mapping, the returned address is the physical + * address of the memory related to the given handle. + */ + if (ctx->hdev->mmu_enable) { + dram_range_start = prop->va_space_dram_start_address; + dram_range_end = prop->va_space_dram_end_address; + host_range_start = prop->va_space_host_start_address; + host_range_end = prop->va_space_host_end_address; + } else { + dram_range_start = prop->dram_user_base_address; + dram_range_end = prop->dram_end_address; + host_range_start = prop->dram_user_base_address; + host_range_end = prop->dram_end_address; + } + + return hl_vm_ctx_init_with_ranges(ctx, host_range_start, host_range_end, + dram_range_start, dram_range_end); +} + +/* + * hl_va_range_fini - clear a virtual addresses range + * + * @hdev : pointer to the habanalabs structure + * va_range : pointer to virtual addresses range + * + * This function initializes the following: + * - Checks that the given range contains the whole initial range + * - Frees the virtual addresses block list and its lock + */ +static void hl_va_range_fini(struct hl_device *hdev, + struct hl_va_range *va_range) +{ + struct hl_vm_va_block *va_block; + + if (list_empty(&va_range->list)) { + dev_warn(hdev->dev, + "va list should not be empty on cleanup!\n"); + goto out; + } + + if (!list_is_singular(&va_range->list)) { + dev_warn(hdev->dev, + "va list should not contain multiple blocks on cleanup!\n"); + goto free_va_list; + } + + va_block = list_first_entry(&va_range->list, typeof(*va_block), node); + + if (va_block->start != va_range->start_addr || + va_block->end != va_range->end_addr) { + dev_warn(hdev->dev, + "wrong va block on cleanup, from 0x%llx to 0x%llx\n", + va_block->start, va_block->end); + goto free_va_list; + } + +free_va_list: + mutex_lock(&va_range->lock); + clear_va_list_locked(hdev, &va_range->list); + mutex_unlock(&va_range->lock); + +out: + mutex_destroy(&va_range->lock); +} + +/* + * hl_vm_ctx_fini - virtual memory teardown of context + * + * @ctx : pointer to the habanalabs context structure + * + * This function perform teardown the following: + * - Virtual block list of available virtual memory + * - Virtual address to area descriptor hashtable + * - MMU for context + * + * In addition this function does the following: + * - Unmaps the existing hashtable nodes if the hashtable is not empty. The + * hashtable should be empty as no valid mappings should exist at this + * point. + * - Frees any existing physical page list from the idr which relates to the + * current context asid. + * - This function checks the virtual block list for correctness. At this point + * the list should contain one element which describes the whole virtual + * memory range of the context. Otherwise, a warning is printed. + */ +void hl_vm_ctx_fini(struct hl_ctx *ctx) +{ + struct hl_device *hdev = ctx->hdev; + struct hl_vm *vm = &hdev->vm; + struct hl_vm_phys_pg_pack *phys_pg_list; + struct hl_vm_hash_node *hnode; + struct hlist_node *tmp_node; + int i; + + hl_debugfs_remove_ctx_mem_hash(hdev, ctx); + + if (!hash_empty(ctx->mem_hash)) + dev_notice(hdev->dev, "ctx is freed while it has va in use\n"); + + hash_for_each_safe(ctx->mem_hash, i, tmp_node, hnode, node) { + dev_dbg(hdev->dev, + "hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n", + hnode->vaddr, ctx->asid); + unmap_device_va(ctx, hnode->vaddr); + } + + spin_lock(&vm->idr_lock); + idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i) + if (phys_pg_list->asid == ctx->asid) { + dev_dbg(hdev->dev, + "page list 0x%p of asid %d is still alive\n", + phys_pg_list, ctx->asid); + free_phys_pg_pack(hdev, phys_pg_list); + idr_remove(&vm->phys_pg_pack_handles, i); + } + spin_unlock(&vm->idr_lock); + + hl_va_range_fini(hdev, &ctx->dram_va_range); + hl_va_range_fini(hdev, &ctx->host_va_range); + + mutex_destroy(&ctx->mem_hash_lock); + hl_mmu_ctx_fini(ctx); +} + +/* + * hl_vm_init - initialize virtual memory module + * + * @hdev : pointer to the habanalabs device structure + * + * This function initializes the following: + * - MMU module + * - DRAM physical pages pool of 2MB + * - Idr for device memory allocation handles + */ +int hl_vm_init(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct hl_vm *vm = &hdev->vm; + int rc; + + rc = hl_mmu_init(hdev); + if (rc) { + dev_err(hdev->dev, "Failed to init MMU\n"); + return rc; + } + + vm->dram_pg_pool = gen_pool_create(__ffs(prop->dram_page_size), -1); + if (!vm->dram_pg_pool) { + dev_err(hdev->dev, "Failed to create dram page pool\n"); + rc = -ENOMEM; + goto pool_create_err; + } + + kref_init(&vm->dram_pg_pool_refcount); + + rc = gen_pool_add(vm->dram_pg_pool, prop->dram_user_base_address, + prop->dram_end_address - prop->dram_user_base_address, + -1); + + if (rc) { + dev_err(hdev->dev, + "Failed to add memory to dram page pool %d\n", rc); + goto pool_add_err; + } + + spin_lock_init(&vm->idr_lock); + idr_init(&vm->phys_pg_pack_handles); + + atomic64_set(&hdev->dram_used_mem, 0); + + vm->init_done = true; + + return 0; + +pool_add_err: + gen_pool_destroy(vm->dram_pg_pool); +pool_create_err: + hl_mmu_fini(hdev); + + return rc; +} + +/* + * hl_vm_fini - virtual memory module teardown + * + * @hdev : pointer to the habanalabs device structure + * + * This function perform teardown to the following: + * - Idr for device memory allocation handles + * - DRAM physical pages pool of 2MB + * - MMU module + */ +void hl_vm_fini(struct hl_device *hdev) +{ + struct hl_vm *vm = &hdev->vm; + + if (!vm->init_done) + return; + + /* + * At this point all the contexts should be freed and hence no DRAM + * memory should be in use. Hence the DRAM pool should be freed here. + */ + if (kref_put(&vm->dram_pg_pool_refcount, dram_pg_pool_do_release) != 1) + dev_warn(hdev->dev, "dram_pg_pool was not destroyed on %s\n", + __func__); + + hl_mmu_fini(hdev); + + vm->init_done = false; +} diff --git a/drivers/misc/habanalabs/mmu.c b/drivers/misc/habanalabs/mmu.c new file mode 100644 index 000000000000..2f2e99cb2743 --- /dev/null +++ b/drivers/misc/habanalabs/mmu.c @@ -0,0 +1,906 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "habanalabs.h" +#include "include/hw_ip/mmu/mmu_general.h" + +#include +#include + +static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 addr) +{ + struct pgt_info *pgt_info = NULL; + + hash_for_each_possible(ctx->mmu_hash, pgt_info, node, + (unsigned long) addr) + if (addr == pgt_info->addr) + break; + + return pgt_info; +} + +static void free_hop(struct hl_ctx *ctx, u64 hop_addr) +{ + struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr); + + gen_pool_free(pgt_info->ctx->hdev->mmu_pgt_pool, pgt_info->addr, + ctx->hdev->asic_prop.mmu_hop_table_size); + hash_del(&pgt_info->node); + + kfree(pgt_info); +} + +static u64 alloc_hop(struct hl_ctx *ctx) +{ + struct hl_device *hdev = ctx->hdev; + struct pgt_info *pgt_info; + u64 addr; + + pgt_info = kmalloc(sizeof(*pgt_info), GFP_KERNEL); + if (!pgt_info) + return ULLONG_MAX; + + addr = (u64) gen_pool_alloc(hdev->mmu_pgt_pool, + hdev->asic_prop.mmu_hop_table_size); + if (!addr) { + dev_err(hdev->dev, "failed to allocate page\n"); + kfree(pgt_info); + return ULLONG_MAX; + } + + pgt_info->addr = addr; + pgt_info->ctx = ctx; + pgt_info->num_of_ptes = 0; + hash_add(ctx->mmu_hash, &pgt_info->node, addr); + + return addr; +} + +static inline void clear_pte(struct hl_device *hdev, u64 pte_addr) +{ + /* clear the last and present bits */ + hdev->asic_funcs->write_pte(hdev, pte_addr, 0); +} + +static inline void get_pte(struct hl_ctx *ctx, u64 hop_addr) +{ + get_pgt_info(ctx, hop_addr)->num_of_ptes++; +} + +/* + * put_pte - decrement the num of ptes and free the hop if possible + * + * @ctx: pointer to the context structure + * @hop_addr: addr of the hop + * + * This function returns the number of ptes left on this hop. If the number is + * 0, it means the pte was freed. + */ +static inline int put_pte(struct hl_ctx *ctx, u64 hop_addr) +{ + struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr); + int num_of_ptes_left; + + pgt_info->num_of_ptes--; + + /* + * Need to save the number of ptes left because free_hop might free + * the pgt_info + */ + num_of_ptes_left = pgt_info->num_of_ptes; + if (!num_of_ptes_left) + free_hop(ctx, hop_addr); + + return num_of_ptes_left; +} + +static inline u64 get_hop0_addr(struct hl_ctx *ctx) +{ + return ctx->hdev->asic_prop.mmu_pgt_addr + + (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size); +} + +static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr, + u64 virt_addr, u64 mask, u64 shift) +{ + return hop_addr + ctx->hdev->asic_prop.mmu_pte_size * + ((virt_addr & mask) >> shift); +} + +static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr) +{ + return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP0_MASK, HOP0_SHIFT); +} + +static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr) +{ + return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP1_MASK, HOP1_SHIFT); +} + +static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr) +{ + return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP2_MASK, HOP2_SHIFT); +} + +static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr) +{ + return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP3_MASK, HOP3_SHIFT); +} + +static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr) +{ + return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP4_MASK, HOP4_SHIFT); +} + +static inline u64 get_next_hop_addr(u64 curr_pte) +{ + if (curr_pte & PAGE_PRESENT_MASK) + return curr_pte & PHYS_ADDR_MASK; + else + return ULLONG_MAX; +} + +static inline u64 get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte, + bool *is_new_hop) +{ + u64 hop_addr = get_next_hop_addr(curr_pte); + + if (hop_addr == ULLONG_MAX) { + hop_addr = alloc_hop(ctx); + *is_new_hop = (hop_addr != ULLONG_MAX); + } + + return hop_addr; +} + +/* + * hl_mmu_init - init the mmu module + * + * @hdev: pointer to the habanalabs device structure + * + * This function does the following: + * - Allocate max_asid zeroed hop0 pgts so no mapping is available + * - Enable mmu in hw + * - Invalidate the mmu cache + * - Create a pool of pages for pgts + * - Returns 0 on success + * + * This function depends on DMA QMAN to be working! + */ +int hl_mmu_init(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + int rc; + + if (!hdev->mmu_enable) + return 0; + + /* MMU HW init was already done in device hw_init() */ + + mutex_init(&hdev->mmu_cache_lock); + + hdev->mmu_pgt_pool = + gen_pool_create(__ffs(prop->mmu_hop_table_size), -1); + + if (!hdev->mmu_pgt_pool) { + dev_err(hdev->dev, "Failed to create page gen pool\n"); + rc = -ENOMEM; + goto err_pool_create; + } + + rc = gen_pool_add(hdev->mmu_pgt_pool, prop->mmu_pgt_addr + + prop->mmu_hop0_tables_total_size, + prop->mmu_pgt_size - prop->mmu_hop0_tables_total_size, + -1); + if (rc) { + dev_err(hdev->dev, "Failed to add memory to page gen pool\n"); + goto err_pool_add; + } + + return 0; + +err_pool_add: + gen_pool_destroy(hdev->mmu_pgt_pool); +err_pool_create: + mutex_destroy(&hdev->mmu_cache_lock); + + return rc; +} + +/* + * hl_mmu_fini - release the mmu module. + * + * @hdev: pointer to the habanalabs device structure + * + * This function does the following: + * - Disable mmu in hw + * - free the pgts pool + * + * All ctxs should be freed before calling this func + */ +void hl_mmu_fini(struct hl_device *hdev) +{ + if (!hdev->mmu_enable) + return; + + gen_pool_destroy(hdev->mmu_pgt_pool); + + mutex_destroy(&hdev->mmu_cache_lock); + + /* MMU HW fini will be done in device hw_fini() */ +} + +/** + * hl_mmu_ctx_init() - initialize a context for using the MMU module. + * @ctx: pointer to the context structure to initialize. + * + * Initialize a mutex to protect the concurrent mapping flow, a hash to hold all + * page tables hops related to this context and an optional DRAM default page + * mapping. + * Return: 0 on success, non-zero otherwise. + */ +int hl_mmu_ctx_init(struct hl_ctx *ctx) +{ + struct hl_device *hdev = ctx->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 num_of_hop3, total_hops, hop1_addr, hop2_addr, hop2_pte_addr, + hop3_pte_addr, pte_val; + int rc, i, j, hop3_allocated = 0; + + if (!hdev->mmu_enable) + return 0; + + mutex_init(&ctx->mmu_lock); + hash_init(ctx->mmu_hash); + + if (!hdev->dram_supports_virtual_memory || + !hdev->dram_default_page_mapping) + return 0; + + num_of_hop3 = prop->dram_size_for_default_page_mapping; + do_div(num_of_hop3, prop->dram_page_size); + do_div(num_of_hop3, PTE_ENTRIES_IN_HOP); + + /* add hop1 and hop2 */ + total_hops = num_of_hop3 + 2; + + ctx->dram_default_hops = kzalloc(HL_PTE_SIZE * total_hops, GFP_KERNEL); + if (!ctx->dram_default_hops) { + rc = -ENOMEM; + goto alloc_err; + } + + hop1_addr = alloc_hop(ctx); + if (hop1_addr == ULLONG_MAX) { + dev_err(hdev->dev, "failed to alloc hop 1\n"); + rc = -ENOMEM; + goto hop1_err; + } + + ctx->dram_default_hops[total_hops - 1] = hop1_addr; + + hop2_addr = alloc_hop(ctx); + if (hop2_addr == ULLONG_MAX) { + dev_err(hdev->dev, "failed to alloc hop 2\n"); + rc = -ENOMEM; + goto hop2_err; + } + + ctx->dram_default_hops[total_hops - 2] = hop2_addr; + + for (i = 0 ; i < num_of_hop3 ; i++) { + ctx->dram_default_hops[i] = alloc_hop(ctx); + if (ctx->dram_default_hops[i] == ULLONG_MAX) { + dev_err(hdev->dev, "failed to alloc hop 3, i: %d\n", i); + rc = -ENOMEM; + goto hop3_err; + } + hop3_allocated++; + } + + /* need only pte 0 in hops 0 and 1 */ + pte_val = (hop1_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK; + hdev->asic_funcs->write_pte(hdev, get_hop0_addr(ctx), pte_val); + + pte_val = (hop2_addr & PTE_PHYS_ADDR_MASK) | PAGE_PRESENT_MASK; + hdev->asic_funcs->write_pte(hdev, hop1_addr, pte_val); + get_pte(ctx, hop1_addr); + + hop2_pte_addr = hop2_addr; + for (i = 0 ; i < num_of_hop3 ; i++) { + pte_val = (ctx->dram_default_hops[i] & PTE_PHYS_ADDR_MASK) | + PAGE_PRESENT_MASK; + hdev->asic_funcs->write_pte(hdev, hop2_pte_addr, pte_val); + get_pte(ctx, hop2_addr); + hop2_pte_addr += HL_PTE_SIZE; + } + + pte_val = (prop->mmu_dram_default_page_addr & PTE_PHYS_ADDR_MASK) | + LAST_MASK | PAGE_PRESENT_MASK; + + for (i = 0 ; i < num_of_hop3 ; i++) { + hop3_pte_addr = ctx->dram_default_hops[i]; + for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) { + hdev->asic_funcs->write_pte(hdev, hop3_pte_addr, + pte_val); + get_pte(ctx, ctx->dram_default_hops[i]); + hop3_pte_addr += HL_PTE_SIZE; + } + } + + /* flush all writes to reach PCI */ + mb(); + hdev->asic_funcs->read_pte(hdev, hop2_addr); + + return 0; + +hop3_err: + for (i = 0 ; i < hop3_allocated ; i++) + free_hop(ctx, ctx->dram_default_hops[i]); + free_hop(ctx, hop2_addr); +hop2_err: + free_hop(ctx, hop1_addr); +hop1_err: + kfree(ctx->dram_default_hops); +alloc_err: + mutex_destroy(&ctx->mmu_lock); + + return rc; +} + +/* + * hl_mmu_ctx_fini - disable a ctx from using the mmu module + * + * @ctx: pointer to the context structure + * + * This function does the following: + * - Free any pgts which were not freed yet + * - Free the mutex + * - Free DRAM default page mapping hops + */ +void hl_mmu_ctx_fini(struct hl_ctx *ctx) +{ + struct hl_device *hdev = ctx->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct pgt_info *pgt_info; + struct hlist_node *tmp; + u64 num_of_hop3, total_hops, hop1_addr, hop2_addr, hop2_pte_addr, + hop3_pte_addr; + int i, j; + + if (!ctx->hdev->mmu_enable) + return; + + if (hdev->dram_supports_virtual_memory && + hdev->dram_default_page_mapping) { + + num_of_hop3 = prop->dram_size_for_default_page_mapping; + do_div(num_of_hop3, prop->dram_page_size); + do_div(num_of_hop3, PTE_ENTRIES_IN_HOP); + + /* add hop1 and hop2 */ + total_hops = num_of_hop3 + 2; + hop1_addr = ctx->dram_default_hops[total_hops - 1]; + hop2_addr = ctx->dram_default_hops[total_hops - 2]; + + for (i = 0 ; i < num_of_hop3 ; i++) { + hop3_pte_addr = ctx->dram_default_hops[i]; + for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) { + clear_pte(hdev, hop3_pte_addr); + put_pte(ctx, ctx->dram_default_hops[i]); + hop3_pte_addr += HL_PTE_SIZE; + } + } + + hop2_pte_addr = hop2_addr; + for (i = 0 ; i < num_of_hop3 ; i++) { + clear_pte(hdev, hop2_pte_addr); + put_pte(ctx, hop2_addr); + hop2_pte_addr += HL_PTE_SIZE; + } + + clear_pte(hdev, hop1_addr); + put_pte(ctx, hop1_addr); + clear_pte(hdev, get_hop0_addr(ctx)); + + kfree(ctx->dram_default_hops); + + /* flush all writes to reach PCI */ + mb(); + hdev->asic_funcs->read_pte(hdev, hop2_addr); + } + + if (!hash_empty(ctx->mmu_hash)) + dev_err(hdev->dev, "ctx is freed while it has pgts in use\n"); + + hash_for_each_safe(ctx->mmu_hash, i, tmp, pgt_info, node) { + dev_err(hdev->dev, + "pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n", + pgt_info->addr, ctx->asid, pgt_info->num_of_ptes); + free_hop(ctx, pgt_info->addr); + } + + mutex_destroy(&ctx->mmu_lock); +} + +static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr) +{ + struct hl_device *hdev = ctx->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 hop0_addr = 0, hop0_pte_addr = 0, + hop1_addr = 0, hop1_pte_addr = 0, + hop2_addr = 0, hop2_pte_addr = 0, + hop3_addr = 0, hop3_pte_addr = 0, + hop4_addr = 0, hop4_pte_addr = 0, + curr_pte; + int clear_hop3 = 1; + bool is_dram_addr, is_huge, is_dram_default_page_mapping; + + is_dram_addr = hl_mem_area_inside_range(virt_addr, PAGE_SIZE_2MB, + prop->va_space_dram_start_address, + prop->va_space_dram_end_address); + + hop0_addr = get_hop0_addr(ctx); + + hop0_pte_addr = get_hop0_pte_addr(ctx, hop0_addr, virt_addr); + + curr_pte = hdev->asic_funcs->read_pte(hdev, hop0_pte_addr); + + hop1_addr = get_next_hop_addr(curr_pte); + + if (hop1_addr == ULLONG_MAX) + goto not_mapped; + + hop1_pte_addr = get_hop1_pte_addr(ctx, hop1_addr, virt_addr); + + curr_pte = hdev->asic_funcs->read_pte(hdev, hop1_pte_addr); + + hop2_addr = get_next_hop_addr(curr_pte); + + if (hop2_addr == ULLONG_MAX) + goto not_mapped; + + hop2_pte_addr = get_hop2_pte_addr(ctx, hop2_addr, virt_addr); + + curr_pte = hdev->asic_funcs->read_pte(hdev, hop2_pte_addr); + + hop3_addr = get_next_hop_addr(curr_pte); + + if (hop3_addr == ULLONG_MAX) + goto not_mapped; + + hop3_pte_addr = get_hop3_pte_addr(ctx, hop3_addr, virt_addr); + + curr_pte = hdev->asic_funcs->read_pte(hdev, hop3_pte_addr); + + is_huge = curr_pte & LAST_MASK; + + if (is_dram_addr && !is_huge) { + dev_err(hdev->dev, + "DRAM unmapping should use huge pages only\n"); + return -EFAULT; + } + + is_dram_default_page_mapping = + hdev->dram_default_page_mapping && is_dram_addr; + + if (!is_huge) { + hop4_addr = get_next_hop_addr(curr_pte); + + if (hop4_addr == ULLONG_MAX) + goto not_mapped; + + hop4_pte_addr = get_hop4_pte_addr(ctx, hop4_addr, virt_addr); + + curr_pte = hdev->asic_funcs->read_pte(hdev, hop4_pte_addr); + + clear_hop3 = 0; + } + + if (is_dram_default_page_mapping) { + u64 zero_pte = (prop->mmu_dram_default_page_addr & + PTE_PHYS_ADDR_MASK) | LAST_MASK | + PAGE_PRESENT_MASK; + if (curr_pte == zero_pte) { + dev_err(hdev->dev, + "DRAM: hop3 PTE points to zero page, can't unmap, va: 0x%llx\n", + virt_addr); + goto not_mapped; + } + + if (!(curr_pte & PAGE_PRESENT_MASK)) { + dev_err(hdev->dev, + "DRAM: hop3 PTE is cleared! can't unmap, va: 0x%llx\n", + virt_addr); + goto not_mapped; + } + + hdev->asic_funcs->write_pte(hdev, hop3_pte_addr, zero_pte); + put_pte(ctx, hop3_addr); + } else { + if (!(curr_pte & PAGE_PRESENT_MASK)) + goto not_mapped; + + clear_pte(hdev, hop4_addr ? hop4_pte_addr : hop3_pte_addr); + + if (hop4_addr && !put_pte(ctx, hop4_addr)) + clear_hop3 = 1; + + if (!clear_hop3) + goto flush; + clear_pte(hdev, hop3_pte_addr); + + if (put_pte(ctx, hop3_addr)) + goto flush; + clear_pte(hdev, hop2_pte_addr); + + if (put_pte(ctx, hop2_addr)) + goto flush; + clear_pte(hdev, hop1_pte_addr); + + if (put_pte(ctx, hop1_addr)) + goto flush; + clear_pte(hdev, hop0_pte_addr); + } + +flush: + /* flush all writes from all cores to reach PCI */ + mb(); + + hdev->asic_funcs->read_pte(hdev, + hop4_addr ? hop4_pte_addr : hop3_pte_addr); + + return 0; + +not_mapped: + dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n", + virt_addr); + + return -EINVAL; +} + +/* + * hl_mmu_unmap - unmaps a virtual addr + * + * @ctx: pointer to the context structure + * @virt_addr: virt addr to map from + * @page_size: size of the page to unmap + * + * This function does the following: + * - Check that the virt addr is mapped + * - Unmap the virt addr and frees pgts if possible + * - Returns 0 on success, -EINVAL if the given addr is not mapped + * + * Because this function changes the page tables in the device and because it + * changes the MMU hash, it must be protected by a lock. + * However, because it maps only a single page, the lock should be implemented + * in a higher level in order to protect the entire mapping of the memory area + */ +int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size) +{ + struct hl_device *hdev = ctx->hdev; + u64 real_virt_addr; + u32 real_page_size, npages; + int i, rc; + + if (!hdev->mmu_enable) + return 0; + + /* + * The H/W handles mapping of 4KB/2MB page. Hence if the host page size + * is bigger, we break it to sub-pages and unmap them separately. + */ + if ((page_size % PAGE_SIZE_2MB) == 0) { + real_page_size = PAGE_SIZE_2MB; + } else if ((page_size % PAGE_SIZE_4KB) == 0) { + real_page_size = PAGE_SIZE_4KB; + } else { + dev_err(hdev->dev, + "page size of %u is not 4KB nor 2MB aligned, can't unmap\n", + page_size); + + return -EFAULT; + } + + npages = page_size / real_page_size; + real_virt_addr = virt_addr; + + for (i = 0 ; i < npages ; i++) { + rc = _hl_mmu_unmap(ctx, real_virt_addr); + if (rc) + return rc; + + real_virt_addr += real_page_size; + } + + return 0; +} + +static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, + u32 page_size) +{ + struct hl_device *hdev = ctx->hdev; + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 hop0_addr = 0, hop0_pte_addr = 0, + hop1_addr = 0, hop1_pte_addr = 0, + hop2_addr = 0, hop2_pte_addr = 0, + hop3_addr = 0, hop3_pte_addr = 0, + hop4_addr = 0, hop4_pte_addr = 0, + curr_pte = 0; + bool hop1_new = false, hop2_new = false, hop3_new = false, + hop4_new = false, is_huge, is_dram_addr, + is_dram_default_page_mapping; + int rc = -ENOMEM; + + /* + * This mapping function can map a 4KB/2MB page. For 2MB page there are + * only 3 hops rather than 4. Currently the DRAM allocation uses 2MB + * pages only but user memory could have been allocated with one of the + * two page sizes. Since this is a common code for all the three cases, + * we need this hugs page check. + */ + is_huge = page_size == PAGE_SIZE_2MB; + + is_dram_addr = hl_mem_area_inside_range(virt_addr, page_size, + prop->va_space_dram_start_address, + prop->va_space_dram_end_address); + + if (is_dram_addr && !is_huge) { + dev_err(hdev->dev, "DRAM mapping should use huge pages only\n"); + return -EFAULT; + } + + is_dram_default_page_mapping = + hdev->dram_default_page_mapping && is_dram_addr; + + hop0_addr = get_hop0_addr(ctx); + + hop0_pte_addr = get_hop0_pte_addr(ctx, hop0_addr, virt_addr); + + curr_pte = hdev->asic_funcs->read_pte(hdev, hop0_pte_addr); + + hop1_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop1_new); + + if (hop1_addr == ULLONG_MAX) + goto err; + + hop1_pte_addr = get_hop1_pte_addr(ctx, hop1_addr, virt_addr); + + curr_pte = hdev->asic_funcs->read_pte(hdev, hop1_pte_addr); + + hop2_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop2_new); + + if (hop2_addr == ULLONG_MAX) + goto err; + + hop2_pte_addr = get_hop2_pte_addr(ctx, hop2_addr, virt_addr); + + curr_pte = hdev->asic_funcs->read_pte(hdev, hop2_pte_addr); + + hop3_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop3_new); + + if (hop3_addr == ULLONG_MAX) + goto err; + + hop3_pte_addr = get_hop3_pte_addr(ctx, hop3_addr, virt_addr); + + curr_pte = hdev->asic_funcs->read_pte(hdev, hop3_pte_addr); + + if (!is_huge) { + hop4_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop4_new); + + if (hop4_addr == ULLONG_MAX) + goto err; + + hop4_pte_addr = get_hop4_pte_addr(ctx, hop4_addr, virt_addr); + + curr_pte = hdev->asic_funcs->read_pte(hdev, hop4_pte_addr); + } + + if (is_dram_default_page_mapping) { + u64 zero_pte = (prop->mmu_dram_default_page_addr & + PTE_PHYS_ADDR_MASK) | LAST_MASK | + PAGE_PRESENT_MASK; + + if (curr_pte != zero_pte) { + dev_err(hdev->dev, + "DRAM: mapping already exists for virt_addr 0x%llx\n", + virt_addr); + rc = -EINVAL; + goto err; + } + + if (hop1_new || hop2_new || hop3_new || hop4_new) { + dev_err(hdev->dev, + "DRAM mapping should not allocate more hops\n"); + rc = -EFAULT; + goto err; + } + } else if (curr_pte & PAGE_PRESENT_MASK) { + dev_err(hdev->dev, + "mapping already exists for virt_addr 0x%llx\n", + virt_addr); + + dev_dbg(hdev->dev, "hop0 pte: 0x%llx (0x%llx)\n", + hdev->asic_funcs->read_pte(hdev, hop0_pte_addr), + hop0_pte_addr); + dev_dbg(hdev->dev, "hop1 pte: 0x%llx (0x%llx)\n", + hdev->asic_funcs->read_pte(hdev, hop1_pte_addr), + hop1_pte_addr); + dev_dbg(hdev->dev, "hop2 pte: 0x%llx (0x%llx)\n", + hdev->asic_funcs->read_pte(hdev, hop2_pte_addr), + hop2_pte_addr); + dev_dbg(hdev->dev, "hop3 pte: 0x%llx (0x%llx)\n", + hdev->asic_funcs->read_pte(hdev, hop3_pte_addr), + hop3_pte_addr); + + if (!is_huge) + dev_dbg(hdev->dev, "hop4 pte: 0x%llx (0x%llx)\n", + hdev->asic_funcs->read_pte(hdev, + hop4_pte_addr), + hop4_pte_addr); + + rc = -EINVAL; + goto err; + } + + curr_pte = (phys_addr & PTE_PHYS_ADDR_MASK) | LAST_MASK + | PAGE_PRESENT_MASK; + + hdev->asic_funcs->write_pte(hdev, + is_huge ? hop3_pte_addr : hop4_pte_addr, + curr_pte); + + if (hop1_new) { + curr_pte = (hop1_addr & PTE_PHYS_ADDR_MASK) | + PAGE_PRESENT_MASK; + ctx->hdev->asic_funcs->write_pte(ctx->hdev, hop0_pte_addr, + curr_pte); + } + if (hop2_new) { + curr_pte = (hop2_addr & PTE_PHYS_ADDR_MASK) | + PAGE_PRESENT_MASK; + ctx->hdev->asic_funcs->write_pte(ctx->hdev, hop1_pte_addr, + curr_pte); + get_pte(ctx, hop1_addr); + } + if (hop3_new) { + curr_pte = (hop3_addr & PTE_PHYS_ADDR_MASK) | + PAGE_PRESENT_MASK; + ctx->hdev->asic_funcs->write_pte(ctx->hdev, hop2_pte_addr, + curr_pte); + get_pte(ctx, hop2_addr); + } + + if (!is_huge) { + if (hop4_new) { + curr_pte = (hop4_addr & PTE_PHYS_ADDR_MASK) | + PAGE_PRESENT_MASK; + ctx->hdev->asic_funcs->write_pte(ctx->hdev, + hop3_pte_addr, curr_pte); + get_pte(ctx, hop3_addr); + } + + get_pte(ctx, hop4_addr); + } else { + get_pte(ctx, hop3_addr); + } + + /* flush all writes from all cores to reach PCI */ + mb(); + + hdev->asic_funcs->read_pte(hdev, + is_huge ? hop3_pte_addr : hop4_pte_addr); + + return 0; + +err: + if (hop4_new) + free_hop(ctx, hop4_addr); + if (hop3_new) + free_hop(ctx, hop3_addr); + if (hop2_new) + free_hop(ctx, hop2_addr); + if (hop1_new) + free_hop(ctx, hop1_addr); + + return rc; +} + +/* + * hl_mmu_map - maps a virtual addr to physical addr + * + * @ctx: pointer to the context structure + * @virt_addr: virt addr to map from + * @phys_addr: phys addr to map to + * @page_size: physical page size + * + * This function does the following: + * - Check that the virt addr is not mapped + * - Allocate pgts as necessary in order to map the virt addr to the phys + * - Returns 0 on success, -EINVAL if addr is already mapped, or -ENOMEM. + * + * Because this function changes the page tables in the device and because it + * changes the MMU hash, it must be protected by a lock. + * However, because it maps only a single page, the lock should be implemented + * in a higher level in order to protect the entire mapping of the memory area + */ +int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size) +{ + struct hl_device *hdev = ctx->hdev; + u64 real_virt_addr; + u32 real_page_size, npages; + int i, rc, mapped_cnt = 0; + + if (!hdev->mmu_enable) + return 0; + + /* + * The H/W handles mapping of 4KB/2MB page. Hence if the host page size + * is bigger, we break it to sub-pages and map them separately. + */ + if ((page_size % PAGE_SIZE_2MB) == 0) { + real_page_size = PAGE_SIZE_2MB; + } else if ((page_size % PAGE_SIZE_4KB) == 0) { + real_page_size = PAGE_SIZE_4KB; + } else { + dev_err(hdev->dev, + "page size of %u is not 4KB nor 2MB aligned, can't map\n", + page_size); + + return -EFAULT; + } + + npages = page_size / real_page_size; + real_virt_addr = virt_addr; + + for (i = 0 ; i < npages ; i++) { + rc = _hl_mmu_map(ctx, real_virt_addr, phys_addr, + real_page_size); + if (rc) + goto err; + + real_virt_addr += real_page_size; + mapped_cnt++; + } + + return 0; + +err: + real_virt_addr = virt_addr; + for (i = 0 ; i < mapped_cnt ; i++) { + if (_hl_mmu_unmap(ctx, real_virt_addr)) + dev_warn_ratelimited(hdev->dev, + "failed to unmap va: 0x%llx\n", real_virt_addr); + + real_virt_addr += real_page_size; + } + + return rc; +} + +/* + * hl_mmu_swap_out - marks all mapping of the given ctx as swapped out + * + * @ctx: pointer to the context structure + * + */ +void hl_mmu_swap_out(struct hl_ctx *ctx) +{ + +} + +/* + * hl_mmu_swap_in - marks all mapping of the given ctx as swapped in + * + * @ctx: pointer to the context structure + * + */ +void hl_mmu_swap_in(struct hl_ctx *ctx) +{ + +} diff --git a/drivers/misc/habanalabs/sysfs.c b/drivers/misc/habanalabs/sysfs.c new file mode 100644 index 000000000000..c900ab15cceb --- /dev/null +++ b/drivers/misc/habanalabs/sysfs.c @@ -0,0 +1,539 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "habanalabs.h" + +#include + +#define SET_CLK_PKT_TIMEOUT 1000000 /* 1s */ +#define SET_PWR_PKT_TIMEOUT 1000000 /* 1s */ + +long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr) +{ + struct armcp_packet pkt; + long result; + int rc; + + memset(&pkt, 0, sizeof(pkt)); + + if (curr) + pkt.ctl = __cpu_to_le32(ARMCP_PACKET_FREQUENCY_CURR_GET << + ARMCP_PKT_CTL_OPCODE_SHIFT); + else + pkt.ctl = __cpu_to_le32(ARMCP_PACKET_FREQUENCY_GET << + ARMCP_PKT_CTL_OPCODE_SHIFT); + pkt.pll_index = __cpu_to_le32(pll_index); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + SET_CLK_PKT_TIMEOUT, &result); + + if (rc) { + dev_err(hdev->dev, + "Failed to get frequency of PLL %d, error %d\n", + pll_index, rc); + result = rc; + } + + return result; +} + +void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq) +{ + struct armcp_packet pkt; + int rc; + + memset(&pkt, 0, sizeof(pkt)); + + pkt.ctl = __cpu_to_le32(ARMCP_PACKET_FREQUENCY_SET << + ARMCP_PKT_CTL_OPCODE_SHIFT); + pkt.pll_index = __cpu_to_le32(pll_index); + pkt.value = __cpu_to_le64(freq); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + SET_CLK_PKT_TIMEOUT, NULL); + + if (rc) + dev_err(hdev->dev, + "Failed to set frequency to PLL %d, error %d\n", + pll_index, rc); +} + +u64 hl_get_max_power(struct hl_device *hdev) +{ + struct armcp_packet pkt; + long result; + int rc; + + memset(&pkt, 0, sizeof(pkt)); + + pkt.ctl = __cpu_to_le32(ARMCP_PACKET_MAX_POWER_GET << + ARMCP_PKT_CTL_OPCODE_SHIFT); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + SET_PWR_PKT_TIMEOUT, &result); + + if (rc) { + dev_err(hdev->dev, "Failed to get max power, error %d\n", rc); + result = rc; + } + + return result; +} + +void hl_set_max_power(struct hl_device *hdev, u64 value) +{ + struct armcp_packet pkt; + int rc; + + memset(&pkt, 0, sizeof(pkt)); + + pkt.ctl = __cpu_to_le32(ARMCP_PACKET_MAX_POWER_SET << + ARMCP_PKT_CTL_OPCODE_SHIFT); + pkt.value = __cpu_to_le64(value); + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), + SET_PWR_PKT_TIMEOUT, NULL); + + if (rc) + dev_err(hdev->dev, "Failed to set max power, error %d\n", rc); +} + +static ssize_t pm_mng_profile_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + + if (hl_device_disabled_or_in_reset(hdev)) + return -ENODEV; + + return sprintf(buf, "%s\n", + (hdev->pm_mng_profile == PM_AUTO) ? "auto" : + (hdev->pm_mng_profile == PM_MANUAL) ? "manual" : + "unknown"); +} + +static ssize_t pm_mng_profile_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + + if (hl_device_disabled_or_in_reset(hdev)) { + count = -ENODEV; + goto out; + } + + mutex_lock(&hdev->fd_open_cnt_lock); + + if (atomic_read(&hdev->fd_open_cnt) > 0) { + dev_err(hdev->dev, + "Can't change PM profile while user process is opened on the device\n"); + count = -EPERM; + goto unlock_mutex; + } + + if (strncmp("auto", buf, strlen("auto")) == 0) { + /* Make sure we are in LOW PLL when changing modes */ + if (hdev->pm_mng_profile == PM_MANUAL) { + atomic_set(&hdev->curr_pll_profile, PLL_HIGH); + hl_device_set_frequency(hdev, PLL_LOW); + hdev->pm_mng_profile = PM_AUTO; + } + } else if (strncmp("manual", buf, strlen("manual")) == 0) { + /* Make sure we are in LOW PLL when changing modes */ + if (hdev->pm_mng_profile == PM_AUTO) { + flush_delayed_work(&hdev->work_freq); + hdev->pm_mng_profile = PM_MANUAL; + } + } else { + dev_err(hdev->dev, "value should be auto or manual\n"); + count = -EINVAL; + goto unlock_mutex; + } + +unlock_mutex: + mutex_unlock(&hdev->fd_open_cnt_lock); +out: + return count; +} + +static ssize_t high_pll_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + + if (hl_device_disabled_or_in_reset(hdev)) + return -ENODEV; + + return sprintf(buf, "%u\n", hdev->high_pll); +} + +static ssize_t high_pll_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + long value; + int rc; + + if (hl_device_disabled_or_in_reset(hdev)) { + count = -ENODEV; + goto out; + } + + rc = kstrtoul(buf, 0, &value); + + if (rc) { + count = -EINVAL; + goto out; + } + + hdev->high_pll = value; + +out: + return count; +} + +static ssize_t uboot_ver_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + + return sprintf(buf, "%s\n", hdev->asic_prop.uboot_ver); +} + +static ssize_t armcp_kernel_ver_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + + return sprintf(buf, "%s", hdev->asic_prop.armcp_info.kernel_version); +} + +static ssize_t armcp_ver_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + + return sprintf(buf, "%s\n", hdev->asic_prop.armcp_info.armcp_version); +} + +static ssize_t cpld_ver_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + + return sprintf(buf, "0x%08x\n", + hdev->asic_prop.armcp_info.cpld_version); +} + +static ssize_t infineon_ver_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + + return sprintf(buf, "0x%04x\n", + hdev->asic_prop.armcp_info.infineon_version); +} + +static ssize_t fuse_ver_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + + return sprintf(buf, "%s\n", hdev->asic_prop.armcp_info.fuse_version); +} + +static ssize_t thermal_ver_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + + return sprintf(buf, "%s", hdev->asic_prop.armcp_info.thermal_version); +} + +static ssize_t preboot_btl_ver_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + + return sprintf(buf, "%s\n", hdev->asic_prop.preboot_ver); +} + +static ssize_t soft_reset_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + long value; + int rc; + + rc = kstrtoul(buf, 0, &value); + + if (rc) { + count = -EINVAL; + goto out; + } + + hl_device_reset(hdev, false, false); + +out: + return count; +} + +static ssize_t hard_reset_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + long value; + int rc; + + rc = kstrtoul(buf, 0, &value); + + if (rc) { + count = -EINVAL; + goto out; + } + + hl_device_reset(hdev, true, false); + +out: + return count; +} + +static ssize_t device_type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + char *str; + + switch (hdev->asic_type) { + case ASIC_GOYA: + str = "GOYA"; + break; + default: + dev_err(hdev->dev, "Unrecognized ASIC type %d\n", + hdev->asic_type); + return -EINVAL; + } + + return sprintf(buf, "%s\n", str); +} + +static ssize_t pci_addr_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + + /* Use dummy, fixed address for simulator */ + if (!hdev->pdev) + return sprintf(buf, "0000:%02d:00.0\n", hdev->id); + + return sprintf(buf, "%04x:%02x:%02x.%x\n", + pci_domain_nr(hdev->pdev->bus), + hdev->pdev->bus->number, + PCI_SLOT(hdev->pdev->devfn), + PCI_FUNC(hdev->pdev->devfn)); +} + +static ssize_t status_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + char *str; + + if (atomic_read(&hdev->in_reset)) + str = "In reset"; + else if (hdev->disabled) + str = "Malfunction"; + else + str = "Operational"; + + return sprintf(buf, "%s\n", str); +} + +static ssize_t write_open_cnt_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", hdev->user_ctx ? 1 : 0); +} + +static ssize_t soft_reset_cnt_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", hdev->soft_reset_cnt); +} + +static ssize_t hard_reset_cnt_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", hdev->hard_reset_cnt); +} + +static ssize_t max_power_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + long val; + + if (hl_device_disabled_or_in_reset(hdev)) + return -ENODEV; + + val = hl_get_max_power(hdev); + + return sprintf(buf, "%lu\n", val); +} + +static ssize_t max_power_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct hl_device *hdev = dev_get_drvdata(dev); + unsigned long value; + int rc; + + if (hl_device_disabled_or_in_reset(hdev)) { + count = -ENODEV; + goto out; + } + + rc = kstrtoul(buf, 0, &value); + + if (rc) { + count = -EINVAL; + goto out; + } + + hdev->max_power = value; + hl_set_max_power(hdev, value); + +out: + return count; +} + +static ssize_t eeprom_read_handler(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, loff_t offset, + size_t max_size) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct hl_device *hdev = dev_get_drvdata(dev); + char *data; + int rc; + + if (!max_size) + return -EINVAL; + + data = kzalloc(max_size, GFP_KERNEL); + if (!data) + return -ENOMEM; + + rc = hdev->asic_funcs->get_eeprom_data(hdev, data, max_size); + if (rc) + goto out; + + memcpy(buf, data, max_size); + +out: + kfree(data); + + return max_size; +} + +static DEVICE_ATTR_RO(armcp_kernel_ver); +static DEVICE_ATTR_RO(armcp_ver); +static DEVICE_ATTR_RO(cpld_ver); +static DEVICE_ATTR_RO(device_type); +static DEVICE_ATTR_RO(fuse_ver); +static DEVICE_ATTR_WO(hard_reset); +static DEVICE_ATTR_RO(hard_reset_cnt); +static DEVICE_ATTR_RW(high_pll); +static DEVICE_ATTR_RO(infineon_ver); +static DEVICE_ATTR_RW(max_power); +static DEVICE_ATTR_RO(pci_addr); +static DEVICE_ATTR_RW(pm_mng_profile); +static DEVICE_ATTR_RO(preboot_btl_ver); +static DEVICE_ATTR_WO(soft_reset); +static DEVICE_ATTR_RO(soft_reset_cnt); +static DEVICE_ATTR_RO(status); +static DEVICE_ATTR_RO(thermal_ver); +static DEVICE_ATTR_RO(uboot_ver); +static DEVICE_ATTR_RO(write_open_cnt); + +static struct bin_attribute bin_attr_eeprom = { + .attr = {.name = "eeprom", .mode = (0444)}, + .size = PAGE_SIZE, + .read = eeprom_read_handler +}; + +static struct attribute *hl_dev_attrs[] = { + &dev_attr_armcp_kernel_ver.attr, + &dev_attr_armcp_ver.attr, + &dev_attr_cpld_ver.attr, + &dev_attr_device_type.attr, + &dev_attr_fuse_ver.attr, + &dev_attr_hard_reset.attr, + &dev_attr_hard_reset_cnt.attr, + &dev_attr_high_pll.attr, + &dev_attr_infineon_ver.attr, + &dev_attr_max_power.attr, + &dev_attr_pci_addr.attr, + &dev_attr_pm_mng_profile.attr, + &dev_attr_preboot_btl_ver.attr, + &dev_attr_soft_reset.attr, + &dev_attr_soft_reset_cnt.attr, + &dev_attr_status.attr, + &dev_attr_thermal_ver.attr, + &dev_attr_uboot_ver.attr, + &dev_attr_write_open_cnt.attr, + NULL, +}; + +static struct bin_attribute *hl_dev_bin_attrs[] = { + &bin_attr_eeprom, + NULL +}; + +static struct attribute_group hl_dev_attr_group = { + .attrs = hl_dev_attrs, + .bin_attrs = hl_dev_bin_attrs, +}; + +static struct attribute_group hl_dev_clks_attr_group; + +static const struct attribute_group *hl_dev_attr_groups[] = { + &hl_dev_attr_group, + &hl_dev_clks_attr_group, + NULL, +}; + +int hl_sysfs_init(struct hl_device *hdev) +{ + int rc; + + hdev->pm_mng_profile = PM_AUTO; + hdev->max_power = hdev->asic_prop.max_power_default; + + hdev->asic_funcs->add_device_attr(hdev, &hl_dev_clks_attr_group); + + rc = device_add_groups(hdev->dev, hl_dev_attr_groups); + if (rc) { + dev_err(hdev->dev, + "Failed to add groups to device, error %d\n", rc); + return rc; + } + + return 0; +} + +void hl_sysfs_fini(struct hl_device *hdev) +{ + device_remove_groups(hdev->dev, hl_dev_attr_groups); +} diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c index e9c9ef52c76a..927309b86bab 100644 --- a/drivers/misc/hpilo.c +++ b/drivers/misc/hpilo.c @@ -29,6 +29,13 @@ static struct class *ilo_class; static unsigned int ilo_major; static unsigned int max_ccb = 16; static char ilo_hwdev[MAX_ILO_DEV]; +static const struct pci_device_id ilo_blacklist[] = { + /* auxiliary iLO */ + {PCI_DEVICE_SUB(PCI_VENDOR_ID_HP, 0x3307, PCI_VENDOR_ID_HP, 0x1979)}, + /* CL */ + {PCI_DEVICE_SUB(PCI_VENDOR_ID_HP, 0x3307, PCI_VENDOR_ID_HP_3PAR, 0x0289)}, + {} +}; static inline int get_entry_id(int entry) { @@ -763,9 +770,10 @@ static int ilo_probe(struct pci_dev *pdev, int devnum, minor, start, error = 0; struct ilo_hwinfo *ilo_hw; - /* Ignore subsystem_device = 0x1979 (set by BIOS) */ - if (pdev->subsystem_device == 0x1979) - return 0; + if (pci_match_id(ilo_blacklist, pdev)) { + dev_dbg(&pdev->dev, "Not supported on this device\n"); + return -ENODEV; + } if (max_ccb > MAX_CCB) max_ccb = MAX_CCB; diff --git a/drivers/misc/ics932s401.c b/drivers/misc/ics932s401.c index 81a0541ef3ac..294fb2f66bfe 100644 --- a/drivers/misc/ics932s401.c +++ b/drivers/misc/ics932s401.c @@ -146,6 +146,8 @@ static struct ics932s401_data *ics932s401_update_device(struct device *dev) */ for (i = 0; i < NUM_MIRRORED_REGS; i++) { temp = i2c_smbus_read_word_data(client, regs_to_copy[i]); + if (temp < 0) + data->regs[regs_to_copy[i]] = 0; data->regs[regs_to_copy[i]] = temp >> 8; } diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c index 2837dc77478e..b51cf182b031 100644 --- a/drivers/misc/lkdtm/core.c +++ b/drivers/misc/lkdtm/core.c @@ -37,16 +37,9 @@ #include #include #include -#include -#include #include -#include #include -#ifdef CONFIG_IDE -#include -#endif - #define DEFAULT_COUNT 10 static int lkdtm_debugfs_open(struct inode *inode, struct file *file); @@ -102,9 +95,7 @@ static struct crashpoint crashpoints[] = { CRASHPOINT("MEM_SWAPOUT", "shrink_inactive_list"), CRASHPOINT("TIMERADD", "hrtimer_start"), CRASHPOINT("SCSI_DISPATCH_CMD", "scsi_dispatch_cmd"), -# ifdef CONFIG_IDE CRASHPOINT("IDE_CORE_CP", "generic_ide_ioctl"), -# endif #endif }; @@ -152,7 +143,9 @@ static const struct crashtype crashtypes[] = { CRASHTYPE(EXEC_VMALLOC), CRASHTYPE(EXEC_RODATA), CRASHTYPE(EXEC_USERSPACE), + CRASHTYPE(EXEC_NULL), CRASHTYPE(ACCESS_USERSPACE), + CRASHTYPE(ACCESS_NULL), CRASHTYPE(WRITE_RO), CRASHTYPE(WRITE_RO_AFTER_INIT), CRASHTYPE(WRITE_KERN), @@ -347,9 +340,9 @@ static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf, if (buf == NULL) return -ENOMEM; - n = snprintf(buf, PAGE_SIZE, "Available crash types:\n"); + n = scnprintf(buf, PAGE_SIZE, "Available crash types:\n"); for (i = 0; i < ARRAY_SIZE(crashtypes); i++) { - n += snprintf(buf + n, PAGE_SIZE - n, "%s\n", + n += scnprintf(buf + n, PAGE_SIZE - n, "%s\n", crashtypes[i].name); } buf[n] = '\0'; diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h index 3c6fd327e166..b69ee004a3f7 100644 --- a/drivers/misc/lkdtm/lkdtm.h +++ b/drivers/misc/lkdtm/lkdtm.h @@ -45,7 +45,9 @@ void lkdtm_EXEC_KMALLOC(void); void lkdtm_EXEC_VMALLOC(void); void lkdtm_EXEC_RODATA(void); void lkdtm_EXEC_USERSPACE(void); +void lkdtm_EXEC_NULL(void); void lkdtm_ACCESS_USERSPACE(void); +void lkdtm_ACCESS_NULL(void); /* lkdtm_refcount.c */ void lkdtm_REFCOUNT_INC_OVERFLOW(void); diff --git a/drivers/misc/lkdtm/perms.c b/drivers/misc/lkdtm/perms.c index 53b85c9d16b8..62f76d506f04 100644 --- a/drivers/misc/lkdtm/perms.c +++ b/drivers/misc/lkdtm/perms.c @@ -47,7 +47,7 @@ static noinline void execute_location(void *dst, bool write) { void (*func)(void) = dst; - pr_info("attempting ok execution at %p\n", do_nothing); + pr_info("attempting ok execution at %px\n", do_nothing); do_nothing(); if (write == CODE_WRITE) { @@ -55,7 +55,7 @@ static noinline void execute_location(void *dst, bool write) flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE); } - pr_info("attempting bad execution at %p\n", func); + pr_info("attempting bad execution at %px\n", func); func(); } @@ -66,14 +66,14 @@ static void execute_user_location(void *dst) /* Intentionally crossing kernel/user memory boundary. */ void (*func)(void) = dst; - pr_info("attempting ok execution at %p\n", do_nothing); + pr_info("attempting ok execution at %px\n", do_nothing); do_nothing(); copied = access_process_vm(current, (unsigned long)dst, do_nothing, EXEC_SIZE, FOLL_WRITE); if (copied < EXEC_SIZE) return; - pr_info("attempting bad execution at %p\n", func); + pr_info("attempting bad execution at %px\n", func); func(); } @@ -82,7 +82,7 @@ void lkdtm_WRITE_RO(void) /* Explicitly cast away "const" for the test. */ unsigned long *ptr = (unsigned long *)&rodata; - pr_info("attempting bad rodata write at %p\n", ptr); + pr_info("attempting bad rodata write at %px\n", ptr); *ptr ^= 0xabcd1234; } @@ -100,7 +100,7 @@ void lkdtm_WRITE_RO_AFTER_INIT(void) return; } - pr_info("attempting bad ro_after_init write at %p\n", ptr); + pr_info("attempting bad ro_after_init write at %px\n", ptr); *ptr ^= 0xabcd1234; } @@ -112,7 +112,7 @@ void lkdtm_WRITE_KERN(void) size = (unsigned long)do_overwritten - (unsigned long)do_nothing; ptr = (unsigned char *)do_overwritten; - pr_info("attempting bad %zu byte write at %p\n", size, ptr); + pr_info("attempting bad %zu byte write at %px\n", size, ptr); memcpy(ptr, (unsigned char *)do_nothing, size); flush_icache_range((unsigned long)ptr, (unsigned long)(ptr + size)); @@ -164,6 +164,11 @@ void lkdtm_EXEC_USERSPACE(void) vm_munmap(user_addr, PAGE_SIZE); } +void lkdtm_EXEC_NULL(void) +{ + execute_location(NULL, CODE_AS_IS); +} + void lkdtm_ACCESS_USERSPACE(void) { unsigned long user_addr, tmp = 0; @@ -185,16 +190,29 @@ void lkdtm_ACCESS_USERSPACE(void) ptr = (unsigned long *)user_addr; - pr_info("attempting bad read at %p\n", ptr); + pr_info("attempting bad read at %px\n", ptr); tmp = *ptr; tmp += 0xc0dec0de; - pr_info("attempting bad write at %p\n", ptr); + pr_info("attempting bad write at %px\n", ptr); *ptr = tmp; vm_munmap(user_addr, PAGE_SIZE); } +void lkdtm_ACCESS_NULL(void) +{ + unsigned long tmp; + unsigned long *ptr = (unsigned long *)NULL; + + pr_info("attempting bad read at %px\n", ptr); + tmp = *ptr; + tmp += 0xc0dec0de; + + pr_info("attempting bad write at %px\n", ptr); + *ptr = tmp; +} + void __init lkdtm_perms_init(void) { /* Make sure we can write to __ro_after_init values during __init */ diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig index c49e1d2269af..74e2c667dce0 100644 --- a/drivers/misc/mei/Kconfig +++ b/drivers/misc/mei/Kconfig @@ -43,3 +43,13 @@ config INTEL_MEI_TXE Supported SoCs: Intel Bay Trail + +config INTEL_MEI_HDCP + tristate "Intel HDCP2.2 services of ME Interface" + select INTEL_MEI_ME + depends on DRM_I915 + help + MEI Support for HDCP2.2 Services on Intel platforms. + + Enables the ME FW services required for HDCP2.2 support through + I915 display driver of Intel. diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile index d9215fc4e499..8c2d9565a4cb 100644 --- a/drivers/misc/mei/Makefile +++ b/drivers/misc/mei/Makefile @@ -24,3 +24,5 @@ mei-txe-objs += hw-txe.o mei-$(CONFIG_EVENT_TRACING) += mei-trace.o CFLAGS_mei-trace.o = -I$(src) + +obj-$(CONFIG_INTEL_MEI_HDCP) += hdcp/ diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c index 80215c312f0e..5fcac02233af 100644 --- a/drivers/misc/mei/bus-fixup.c +++ b/drivers/misc/mei/bus-fixup.c @@ -40,6 +40,9 @@ static const uuid_le mei_nfc_info_guid = MEI_UUID_NFC_INFO; #define MEI_UUID_MKHIF_FIX UUID_LE(0x55213584, 0x9a29, 0x4916, \ 0xba, 0xdf, 0xf, 0xb7, 0xed, 0x68, 0x2a, 0xeb) +#define MEI_UUID_HDCP UUID_LE(0xB638AB7E, 0x94E2, 0x4EA2, \ + 0xA5, 0x52, 0xD1, 0xC5, 0x4B, 0x62, 0x7F, 0x04) + #define MEI_UUID_ANY NULL_UUID_LE /** @@ -71,6 +74,18 @@ static void blacklist(struct mei_cl_device *cldev) cldev->do_match = 0; } +/** + * whitelist - forcefully whitelist client + * + * @cldev: me clients device + */ +static void whitelist(struct mei_cl_device *cldev) +{ + dev_dbg(&cldev->dev, "running hook %s\n", __func__); + + cldev->do_match = 1; +} + #define OSTYPE_LINUX 2 struct mei_os_ver { __le16 build; @@ -472,6 +487,7 @@ static struct mei_fixup { MEI_FIXUP(MEI_UUID_NFC_HCI, mei_nfc), MEI_FIXUP(MEI_UUID_WD, mei_wd), MEI_FIXUP(MEI_UUID_MKHIF_FIX, mei_mkhi_fix), + MEI_FIXUP(MEI_UUID_HDCP, whitelist), }; /** diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index fc3872fe7b25..65bec998eb6e 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -28,7 +28,6 @@ #include "client.h" #define to_mei_cl_driver(d) container_of(d, struct mei_cl_driver, driver) -#define to_mei_cl_device(d) container_of(d, struct mei_cl_device, dev) /** * __mei_cl_send - internal client send (write) @@ -541,17 +540,9 @@ int mei_cldev_enable(struct mei_cl_device *cldev) goto out; } - if (!mei_cl_bus_module_get(cldev)) { - dev_err(&cldev->dev, "get hw module failed"); - ret = -ENODEV; - goto out; - } - ret = mei_cl_connect(cl, cldev->me_cl, NULL); - if (ret < 0) { + if (ret < 0) dev_err(&cldev->dev, "cannot connect\n"); - mei_cl_bus_module_put(cldev); - } out: mutex_unlock(&bus->device_lock); @@ -614,7 +605,6 @@ int mei_cldev_disable(struct mei_cl_device *cldev) if (err < 0) dev_err(bus->dev, "Could not disconnect from the ME client\n"); - mei_cl_bus_module_put(cldev); out: /* Flush queues and remove any pending read */ mei_cl_flush_queues(cl, NULL); @@ -725,9 +715,16 @@ static int mei_cl_device_probe(struct device *dev) if (!id) return -ENODEV; + if (!mei_cl_bus_module_get(cldev)) { + dev_err(&cldev->dev, "get hw module failed"); + return -ENODEV; + } + ret = cldrv->probe(cldev, id); - if (ret) + if (ret) { + mei_cl_bus_module_put(cldev); return ret; + } __module_get(THIS_MODULE); return 0; @@ -755,6 +752,7 @@ static int mei_cl_device_remove(struct device *dev) mei_cldev_unregister_callbacks(cldev); + mei_cl_bus_module_put(cldev); module_put(THIS_MODULE); dev->driver = NULL; return ret; diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index 1fc8ea0f519b..ca4c9cc218a2 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -401,8 +401,11 @@ static void mei_io_list_flush_cl(struct list_head *head, struct mei_cl_cb *cb, *next; list_for_each_entry_safe(cb, next, head, list) { - if (cl == cb->cl) + if (cl == cb->cl) { list_del_init(&cb->list); + if (cb->fop_type == MEI_FOP_READ) + mei_io_cb_free(cb); + } } } diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index 8f7616557c97..e6207f614816 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -1029,29 +1029,36 @@ static void mei_hbm_config_features(struct mei_device *dev) dev->version.minor_version >= HBM_MINOR_VERSION_PGI) dev->hbm_f_pg_supported = 1; + dev->hbm_f_dc_supported = 0; if (dev->version.major_version >= HBM_MAJOR_VERSION_DC) dev->hbm_f_dc_supported = 1; + dev->hbm_f_ie_supported = 0; if (dev->version.major_version >= HBM_MAJOR_VERSION_IE) dev->hbm_f_ie_supported = 1; /* disconnect on connect timeout instead of link reset */ + dev->hbm_f_dot_supported = 0; if (dev->version.major_version >= HBM_MAJOR_VERSION_DOT) dev->hbm_f_dot_supported = 1; /* Notification Event Support */ + dev->hbm_f_ev_supported = 0; if (dev->version.major_version >= HBM_MAJOR_VERSION_EV) dev->hbm_f_ev_supported = 1; /* Fixed Address Client Support */ + dev->hbm_f_fa_supported = 0; if (dev->version.major_version >= HBM_MAJOR_VERSION_FA) dev->hbm_f_fa_supported = 1; /* OS ver message Support */ + dev->hbm_f_os_supported = 0; if (dev->version.major_version >= HBM_MAJOR_VERSION_OS) dev->hbm_f_os_supported = 1; /* DMA Ring Support */ + dev->hbm_f_dr_supported = 0; if (dev->version.major_version > HBM_MAJOR_VERSION_DR || (dev->version.major_version == HBM_MAJOR_VERSION_DR && dev->version.minor_version >= HBM_MINOR_VERSION_DR)) diff --git a/drivers/misc/mei/hdcp/Makefile b/drivers/misc/mei/hdcp/Makefile new file mode 100644 index 000000000000..adbe7506282d --- /dev/null +++ b/drivers/misc/mei/hdcp/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Copyright (c) 2019, Intel Corporation. +# +# Makefile - HDCP client driver for Intel MEI Bus Driver. + +obj-$(CONFIG_INTEL_MEI_HDCP) += mei_hdcp.o diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c new file mode 100644 index 000000000000..90b6ae8e9dae --- /dev/null +++ b/drivers/misc/mei/hdcp/mei_hdcp.c @@ -0,0 +1,849 @@ +// SPDX-License-Identifier: (GPL-2.0) +/* + * Copyright © 2019 Intel Corporation + * + * Mei_hdcp.c: HDCP client driver for mei bus + * + * Author: + * Ramalingam C + */ + +/** + * DOC: MEI_HDCP Client Driver + * + * This is a client driver to the mei_bus to make the HDCP2.2 services of + * ME FW available for the interested consumers like I915. + * + * This module will act as a translation layer between HDCP protocol + * implementor(I915) and ME FW by translating HDCP2.2 authentication + * messages to ME FW command payloads and vice versa. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mei_hdcp.h" + +static inline u8 mei_get_ddi_index(enum port port) +{ + switch (port) { + case PORT_A: + return MEI_DDI_A; + case PORT_B ... PORT_F: + return (u8)port; + default: + return MEI_DDI_INVALID_PORT; + } +} + +/** + * mei_hdcp_initiate_session() - Initiate a Wired HDCP2.2 Tx Session in ME FW + * @dev: device corresponding to the mei_cl_device + * @data: Intel HW specific hdcp data + * @ake_data: AKE_Init msg output. + * + * Return: 0 on Success, <0 on Failure. + */ +static int +mei_hdcp_initiate_session(struct device *dev, struct hdcp_port_data *data, + struct hdcp2_ake_init *ake_data) +{ + struct wired_cmd_initiate_hdcp2_session_in session_init_in = { { 0 } }; + struct wired_cmd_initiate_hdcp2_session_out + session_init_out = { { 0 } }; + struct mei_cl_device *cldev; + ssize_t byte; + + if (!dev || !data || !ake_data) + return -EINVAL; + + cldev = to_mei_cl_device(dev); + + session_init_in.header.api_version = HDCP_API_VERSION; + session_init_in.header.command_id = WIRED_INITIATE_HDCP2_SESSION; + session_init_in.header.status = ME_HDCP_STATUS_SUCCESS; + session_init_in.header.buffer_len = + WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_IN; + + session_init_in.port.integrated_port_type = data->port_type; + session_init_in.port.physical_port = mei_get_ddi_index(data->port); + session_init_in.protocol = data->protocol; + + byte = mei_cldev_send(cldev, (u8 *)&session_init_in, + sizeof(session_init_in)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte); + return byte; + } + + byte = mei_cldev_recv(cldev, (u8 *)&session_init_out, + sizeof(session_init_out)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte); + return byte; + } + + if (session_init_out.header.status != ME_HDCP_STATUS_SUCCESS) { + dev_dbg(dev, "ME cmd 0x%08X Failed. Status: 0x%X\n", + WIRED_INITIATE_HDCP2_SESSION, + session_init_out.header.status); + return -EIO; + } + + ake_data->msg_id = HDCP_2_2_AKE_INIT; + ake_data->tx_caps = session_init_out.tx_caps; + memcpy(ake_data->r_tx, session_init_out.r_tx, HDCP_2_2_RTX_LEN); + + return 0; +} + +/** + * mei_hdcp_verify_receiver_cert_prepare_km() - Verify the Receiver Certificate + * AKE_Send_Cert and prepare AKE_Stored_Km/AKE_No_Stored_Km + * @dev: device corresponding to the mei_cl_device + * @data: Intel HW specific hdcp data + * @rx_cert: AKE_Send_Cert for verification + * @km_stored: Pairing status flag output + * @ek_pub_km: AKE_Stored_Km/AKE_No_Stored_Km output msg + * @msg_sz : size of AKE_XXXXX_Km output msg + * + * Return: 0 on Success, <0 on Failure + */ +static int +mei_hdcp_verify_receiver_cert_prepare_km(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_ake_send_cert *rx_cert, + bool *km_stored, + struct hdcp2_ake_no_stored_km + *ek_pub_km, + size_t *msg_sz) +{ + struct wired_cmd_verify_receiver_cert_in verify_rxcert_in = { { 0 } }; + struct wired_cmd_verify_receiver_cert_out verify_rxcert_out = { { 0 } }; + struct mei_cl_device *cldev; + ssize_t byte; + + if (!dev || !data || !rx_cert || !km_stored || !ek_pub_km || !msg_sz) + return -EINVAL; + + cldev = to_mei_cl_device(dev); + + verify_rxcert_in.header.api_version = HDCP_API_VERSION; + verify_rxcert_in.header.command_id = WIRED_VERIFY_RECEIVER_CERT; + verify_rxcert_in.header.status = ME_HDCP_STATUS_SUCCESS; + verify_rxcert_in.header.buffer_len = + WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_IN; + + verify_rxcert_in.port.integrated_port_type = data->port_type; + verify_rxcert_in.port.physical_port = mei_get_ddi_index(data->port); + + verify_rxcert_in.cert_rx = rx_cert->cert_rx; + memcpy(verify_rxcert_in.r_rx, &rx_cert->r_rx, HDCP_2_2_RRX_LEN); + memcpy(verify_rxcert_in.rx_caps, rx_cert->rx_caps, HDCP_2_2_RXCAPS_LEN); + + byte = mei_cldev_send(cldev, (u8 *)&verify_rxcert_in, + sizeof(verify_rxcert_in)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_send failed: %zd\n", byte); + return byte; + } + + byte = mei_cldev_recv(cldev, (u8 *)&verify_rxcert_out, + sizeof(verify_rxcert_out)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_recv failed: %zd\n", byte); + return byte; + } + + if (verify_rxcert_out.header.status != ME_HDCP_STATUS_SUCCESS) { + dev_dbg(dev, "ME cmd 0x%08X Failed. Status: 0x%X\n", + WIRED_VERIFY_RECEIVER_CERT, + verify_rxcert_out.header.status); + return -EIO; + } + + *km_stored = !!verify_rxcert_out.km_stored; + if (verify_rxcert_out.km_stored) { + ek_pub_km->msg_id = HDCP_2_2_AKE_STORED_KM; + *msg_sz = sizeof(struct hdcp2_ake_stored_km); + } else { + ek_pub_km->msg_id = HDCP_2_2_AKE_NO_STORED_KM; + *msg_sz = sizeof(struct hdcp2_ake_no_stored_km); + } + + memcpy(ek_pub_km->e_kpub_km, &verify_rxcert_out.ekm_buff, + sizeof(verify_rxcert_out.ekm_buff)); + + return 0; +} + +/** + * mei_hdcp_verify_hprime() - Verify AKE_Send_H_prime at ME FW. + * @dev: device corresponding to the mei_cl_device + * @data: Intel HW specific hdcp data + * @rx_hprime: AKE_Send_H_prime msg for ME FW verification + * + * Return: 0 on Success, <0 on Failure + */ +static int +mei_hdcp_verify_hprime(struct device *dev, struct hdcp_port_data *data, + struct hdcp2_ake_send_hprime *rx_hprime) +{ + struct wired_cmd_ake_send_hprime_in send_hprime_in = { { 0 } }; + struct wired_cmd_ake_send_hprime_out send_hprime_out = { { 0 } }; + struct mei_cl_device *cldev; + ssize_t byte; + + if (!dev || !data || !rx_hprime) + return -EINVAL; + + cldev = to_mei_cl_device(dev); + + send_hprime_in.header.api_version = HDCP_API_VERSION; + send_hprime_in.header.command_id = WIRED_AKE_SEND_HPRIME; + send_hprime_in.header.status = ME_HDCP_STATUS_SUCCESS; + send_hprime_in.header.buffer_len = WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_IN; + + send_hprime_in.port.integrated_port_type = data->port_type; + send_hprime_in.port.physical_port = mei_get_ddi_index(data->port); + + memcpy(send_hprime_in.h_prime, rx_hprime->h_prime, + HDCP_2_2_H_PRIME_LEN); + + byte = mei_cldev_send(cldev, (u8 *)&send_hprime_in, + sizeof(send_hprime_in)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte); + return byte; + } + + byte = mei_cldev_recv(cldev, (u8 *)&send_hprime_out, + sizeof(send_hprime_out)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte); + return byte; + } + + if (send_hprime_out.header.status != ME_HDCP_STATUS_SUCCESS) { + dev_dbg(dev, "ME cmd 0x%08X Failed. Status: 0x%X\n", + WIRED_AKE_SEND_HPRIME, send_hprime_out.header.status); + return -EIO; + } + + return 0; +} + +/** + * mei_hdcp_store_pairing_info() - Store pairing info received at ME FW + * @dev: device corresponding to the mei_cl_device + * @data: Intel HW specific hdcp data + * @pairing_info: AKE_Send_Pairing_Info msg input to ME FW + * + * Return: 0 on Success, <0 on Failure + */ +static int +mei_hdcp_store_pairing_info(struct device *dev, struct hdcp_port_data *data, + struct hdcp2_ake_send_pairing_info *pairing_info) +{ + struct wired_cmd_ake_send_pairing_info_in pairing_info_in = { { 0 } }; + struct wired_cmd_ake_send_pairing_info_out pairing_info_out = { { 0 } }; + struct mei_cl_device *cldev; + ssize_t byte; + + if (!dev || !data || !pairing_info) + return -EINVAL; + + cldev = to_mei_cl_device(dev); + + pairing_info_in.header.api_version = HDCP_API_VERSION; + pairing_info_in.header.command_id = WIRED_AKE_SEND_PAIRING_INFO; + pairing_info_in.header.status = ME_HDCP_STATUS_SUCCESS; + pairing_info_in.header.buffer_len = + WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_IN; + + pairing_info_in.port.integrated_port_type = data->port_type; + pairing_info_in.port.physical_port = mei_get_ddi_index(data->port); + + memcpy(pairing_info_in.e_kh_km, pairing_info->e_kh_km, + HDCP_2_2_E_KH_KM_LEN); + + byte = mei_cldev_send(cldev, (u8 *)&pairing_info_in, + sizeof(pairing_info_in)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte); + return byte; + } + + byte = mei_cldev_recv(cldev, (u8 *)&pairing_info_out, + sizeof(pairing_info_out)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte); + return byte; + } + + if (pairing_info_out.header.status != ME_HDCP_STATUS_SUCCESS) { + dev_dbg(dev, "ME cmd 0x%08X failed. Status: 0x%X\n", + WIRED_AKE_SEND_PAIRING_INFO, + pairing_info_out.header.status); + return -EIO; + } + + return 0; +} + +/** + * mei_hdcp_initiate_locality_check() - Prepare LC_Init + * @dev: device corresponding to the mei_cl_device + * @data: Intel HW specific hdcp data + * @lc_init_data: LC_Init msg output + * + * Return: 0 on Success, <0 on Failure + */ +static int +mei_hdcp_initiate_locality_check(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_lc_init *lc_init_data) +{ + struct wired_cmd_init_locality_check_in lc_init_in = { { 0 } }; + struct wired_cmd_init_locality_check_out lc_init_out = { { 0 } }; + struct mei_cl_device *cldev; + ssize_t byte; + + if (!dev || !data || !lc_init_data) + return -EINVAL; + + cldev = to_mei_cl_device(dev); + + lc_init_in.header.api_version = HDCP_API_VERSION; + lc_init_in.header.command_id = WIRED_INIT_LOCALITY_CHECK; + lc_init_in.header.status = ME_HDCP_STATUS_SUCCESS; + lc_init_in.header.buffer_len = WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_IN; + + lc_init_in.port.integrated_port_type = data->port_type; + lc_init_in.port.physical_port = mei_get_ddi_index(data->port); + + byte = mei_cldev_send(cldev, (u8 *)&lc_init_in, sizeof(lc_init_in)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte); + return byte; + } + + byte = mei_cldev_recv(cldev, (u8 *)&lc_init_out, sizeof(lc_init_out)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte); + return byte; + } + + if (lc_init_out.header.status != ME_HDCP_STATUS_SUCCESS) { + dev_dbg(dev, "ME cmd 0x%08X Failed. status: 0x%X\n", + WIRED_INIT_LOCALITY_CHECK, lc_init_out.header.status); + return -EIO; + } + + lc_init_data->msg_id = HDCP_2_2_LC_INIT; + memcpy(lc_init_data->r_n, lc_init_out.r_n, HDCP_2_2_RN_LEN); + + return 0; +} + +/** + * mei_hdcp_verify_lprime() - Verify lprime. + * @dev: device corresponding to the mei_cl_device + * @data: Intel HW specific hdcp data + * @rx_lprime: LC_Send_L_prime msg for ME FW verification + * + * Return: 0 on Success, <0 on Failure + */ +static int +mei_hdcp_verify_lprime(struct device *dev, struct hdcp_port_data *data, + struct hdcp2_lc_send_lprime *rx_lprime) +{ + struct wired_cmd_validate_locality_in verify_lprime_in = { { 0 } }; + struct wired_cmd_validate_locality_out verify_lprime_out = { { 0 } }; + struct mei_cl_device *cldev; + ssize_t byte; + + if (!dev || !data || !rx_lprime) + return -EINVAL; + + cldev = to_mei_cl_device(dev); + + verify_lprime_in.header.api_version = HDCP_API_VERSION; + verify_lprime_in.header.command_id = WIRED_VALIDATE_LOCALITY; + verify_lprime_in.header.status = ME_HDCP_STATUS_SUCCESS; + verify_lprime_in.header.buffer_len = + WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_IN; + + verify_lprime_in.port.integrated_port_type = data->port_type; + verify_lprime_in.port.physical_port = mei_get_ddi_index(data->port); + + memcpy(verify_lprime_in.l_prime, rx_lprime->l_prime, + HDCP_2_2_L_PRIME_LEN); + + byte = mei_cldev_send(cldev, (u8 *)&verify_lprime_in, + sizeof(verify_lprime_in)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte); + return byte; + } + + byte = mei_cldev_recv(cldev, (u8 *)&verify_lprime_out, + sizeof(verify_lprime_out)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte); + return byte; + } + + if (verify_lprime_out.header.status != ME_HDCP_STATUS_SUCCESS) { + dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n", + WIRED_VALIDATE_LOCALITY, + verify_lprime_out.header.status); + return -EIO; + } + + return 0; +} + +/** + * mei_hdcp_get_session_key() - Prepare SKE_Send_Eks. + * @dev: device corresponding to the mei_cl_device + * @data: Intel HW specific hdcp data + * @ske_data: SKE_Send_Eks msg output from ME FW. + * + * Return: 0 on Success, <0 on Failure + */ +static int mei_hdcp_get_session_key(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_ske_send_eks *ske_data) +{ + struct wired_cmd_get_session_key_in get_skey_in = { { 0 } }; + struct wired_cmd_get_session_key_out get_skey_out = { { 0 } }; + struct mei_cl_device *cldev; + ssize_t byte; + + if (!dev || !data || !ske_data) + return -EINVAL; + + cldev = to_mei_cl_device(dev); + + get_skey_in.header.api_version = HDCP_API_VERSION; + get_skey_in.header.command_id = WIRED_GET_SESSION_KEY; + get_skey_in.header.status = ME_HDCP_STATUS_SUCCESS; + get_skey_in.header.buffer_len = WIRED_CMD_BUF_LEN_GET_SESSION_KEY_IN; + + get_skey_in.port.integrated_port_type = data->port_type; + get_skey_in.port.physical_port = mei_get_ddi_index(data->port); + + byte = mei_cldev_send(cldev, (u8 *)&get_skey_in, sizeof(get_skey_in)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte); + return byte; + } + + byte = mei_cldev_recv(cldev, (u8 *)&get_skey_out, sizeof(get_skey_out)); + + if (byte < 0) { + dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte); + return byte; + } + + if (get_skey_out.header.status != ME_HDCP_STATUS_SUCCESS) { + dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n", + WIRED_GET_SESSION_KEY, get_skey_out.header.status); + return -EIO; + } + + ske_data->msg_id = HDCP_2_2_SKE_SEND_EKS; + memcpy(ske_data->e_dkey_ks, get_skey_out.e_dkey_ks, + HDCP_2_2_E_DKEY_KS_LEN); + memcpy(ske_data->riv, get_skey_out.r_iv, HDCP_2_2_RIV_LEN); + + return 0; +} + +/** + * mei_hdcp_repeater_check_flow_prepare_ack() - Validate the Downstream topology + * and prepare rep_ack. + * @dev: device corresponding to the mei_cl_device + * @data: Intel HW specific hdcp data + * @rep_topology: Receiver ID List to be validated + * @rep_send_ack : repeater ack from ME FW. + * + * Return: 0 on Success, <0 on Failure + */ +static int +mei_hdcp_repeater_check_flow_prepare_ack(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_rep_send_receiverid_list + *rep_topology, + struct hdcp2_rep_send_ack + *rep_send_ack) +{ + struct wired_cmd_verify_repeater_in verify_repeater_in = { { 0 } }; + struct wired_cmd_verify_repeater_out verify_repeater_out = { { 0 } }; + struct mei_cl_device *cldev; + ssize_t byte; + + if (!dev || !rep_topology || !rep_send_ack || !data) + return -EINVAL; + + cldev = to_mei_cl_device(dev); + + verify_repeater_in.header.api_version = HDCP_API_VERSION; + verify_repeater_in.header.command_id = WIRED_VERIFY_REPEATER; + verify_repeater_in.header.status = ME_HDCP_STATUS_SUCCESS; + verify_repeater_in.header.buffer_len = + WIRED_CMD_BUF_LEN_VERIFY_REPEATER_IN; + + verify_repeater_in.port.integrated_port_type = data->port_type; + verify_repeater_in.port.physical_port = mei_get_ddi_index(data->port); + + memcpy(verify_repeater_in.rx_info, rep_topology->rx_info, + HDCP_2_2_RXINFO_LEN); + memcpy(verify_repeater_in.seq_num_v, rep_topology->seq_num_v, + HDCP_2_2_SEQ_NUM_LEN); + memcpy(verify_repeater_in.v_prime, rep_topology->v_prime, + HDCP_2_2_V_PRIME_HALF_LEN); + memcpy(verify_repeater_in.receiver_ids, rep_topology->receiver_ids, + HDCP_2_2_RECEIVER_IDS_MAX_LEN); + + byte = mei_cldev_send(cldev, (u8 *)&verify_repeater_in, + sizeof(verify_repeater_in)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte); + return byte; + } + + byte = mei_cldev_recv(cldev, (u8 *)&verify_repeater_out, + sizeof(verify_repeater_out)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte); + return byte; + } + + if (verify_repeater_out.header.status != ME_HDCP_STATUS_SUCCESS) { + dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n", + WIRED_VERIFY_REPEATER, + verify_repeater_out.header.status); + return -EIO; + } + + memcpy(rep_send_ack->v, verify_repeater_out.v, + HDCP_2_2_V_PRIME_HALF_LEN); + rep_send_ack->msg_id = HDCP_2_2_REP_SEND_ACK; + + return 0; +} + +/** + * mei_hdcp_verify_mprime() - Verify mprime. + * @dev: device corresponding to the mei_cl_device + * @data: Intel HW specific hdcp data + * @stream_ready: RepeaterAuth_Stream_Ready msg for ME FW verification. + * + * Return: 0 on Success, <0 on Failure + */ +static int mei_hdcp_verify_mprime(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_rep_stream_ready *stream_ready) +{ + struct wired_cmd_repeater_auth_stream_req_in + verify_mprime_in = { { 0 } }; + struct wired_cmd_repeater_auth_stream_req_out + verify_mprime_out = { { 0 } }; + struct mei_cl_device *cldev; + ssize_t byte; + + if (!dev || !stream_ready || !data) + return -EINVAL; + + cldev = to_mei_cl_device(dev); + + verify_mprime_in.header.api_version = HDCP_API_VERSION; + verify_mprime_in.header.command_id = WIRED_REPEATER_AUTH_STREAM_REQ; + verify_mprime_in.header.status = ME_HDCP_STATUS_SUCCESS; + verify_mprime_in.header.buffer_len = + WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_MIN_IN; + + verify_mprime_in.port.integrated_port_type = data->port_type; + verify_mprime_in.port.physical_port = mei_get_ddi_index(data->port); + + memcpy(verify_mprime_in.m_prime, stream_ready->m_prime, + HDCP_2_2_MPRIME_LEN); + drm_hdcp2_u32_to_seq_num(verify_mprime_in.seq_num_m, data->seq_num_m); + memcpy(verify_mprime_in.streams, data->streams, + (data->k * sizeof(struct hdcp2_streamid_type))); + + verify_mprime_in.k = cpu_to_be16(data->k); + + byte = mei_cldev_send(cldev, (u8 *)&verify_mprime_in, + sizeof(verify_mprime_in)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte); + return byte; + } + + byte = mei_cldev_recv(cldev, (u8 *)&verify_mprime_out, + sizeof(verify_mprime_out)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte); + return byte; + } + + if (verify_mprime_out.header.status != ME_HDCP_STATUS_SUCCESS) { + dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n", + WIRED_REPEATER_AUTH_STREAM_REQ, + verify_mprime_out.header.status); + return -EIO; + } + + return 0; +} + +/** + * mei_hdcp_enable_authentication() - Mark a port as authenticated + * through ME FW + * @dev: device corresponding to the mei_cl_device + * @data: Intel HW specific hdcp data + * + * Return: 0 on Success, <0 on Failure + */ +static int mei_hdcp_enable_authentication(struct device *dev, + struct hdcp_port_data *data) +{ + struct wired_cmd_enable_auth_in enable_auth_in = { { 0 } }; + struct wired_cmd_enable_auth_out enable_auth_out = { { 0 } }; + struct mei_cl_device *cldev; + ssize_t byte; + + if (!dev || !data) + return -EINVAL; + + cldev = to_mei_cl_device(dev); + + enable_auth_in.header.api_version = HDCP_API_VERSION; + enable_auth_in.header.command_id = WIRED_ENABLE_AUTH; + enable_auth_in.header.status = ME_HDCP_STATUS_SUCCESS; + enable_auth_in.header.buffer_len = WIRED_CMD_BUF_LEN_ENABLE_AUTH_IN; + + enable_auth_in.port.integrated_port_type = data->port_type; + enable_auth_in.port.physical_port = mei_get_ddi_index(data->port); + enable_auth_in.stream_type = data->streams[0].stream_type; + + byte = mei_cldev_send(cldev, (u8 *)&enable_auth_in, + sizeof(enable_auth_in)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte); + return byte; + } + + byte = mei_cldev_recv(cldev, (u8 *)&enable_auth_out, + sizeof(enable_auth_out)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte); + return byte; + } + + if (enable_auth_out.header.status != ME_HDCP_STATUS_SUCCESS) { + dev_dbg(dev, "ME cmd 0x%08X failed. status: 0x%X\n", + WIRED_ENABLE_AUTH, enable_auth_out.header.status); + return -EIO; + } + + return 0; +} + +/** + * mei_hdcp_close_session() - Close the Wired HDCP Tx session of ME FW per port. + * This also disables the authenticated state of the port. + * @dev: device corresponding to the mei_cl_device + * @data: Intel HW specific hdcp data + * + * Return: 0 on Success, <0 on Failure + */ +static int +mei_hdcp_close_session(struct device *dev, struct hdcp_port_data *data) +{ + struct wired_cmd_close_session_in session_close_in = { { 0 } }; + struct wired_cmd_close_session_out session_close_out = { { 0 } }; + struct mei_cl_device *cldev; + ssize_t byte; + + if (!dev || !data) + return -EINVAL; + + cldev = to_mei_cl_device(dev); + + session_close_in.header.api_version = HDCP_API_VERSION; + session_close_in.header.command_id = WIRED_CLOSE_SESSION; + session_close_in.header.status = ME_HDCP_STATUS_SUCCESS; + session_close_in.header.buffer_len = + WIRED_CMD_BUF_LEN_CLOSE_SESSION_IN; + + session_close_in.port.integrated_port_type = data->port_type; + session_close_in.port.physical_port = mei_get_ddi_index(data->port); + + byte = mei_cldev_send(cldev, (u8 *)&session_close_in, + sizeof(session_close_in)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte); + return byte; + } + + byte = mei_cldev_recv(cldev, (u8 *)&session_close_out, + sizeof(session_close_out)); + if (byte < 0) { + dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte); + return byte; + } + + if (session_close_out.header.status != ME_HDCP_STATUS_SUCCESS) { + dev_dbg(dev, "Session Close Failed. status: 0x%X\n", + session_close_out.header.status); + return -EIO; + } + + return 0; +} + +static const struct i915_hdcp_component_ops mei_hdcp_ops = { + .owner = THIS_MODULE, + .initiate_hdcp2_session = mei_hdcp_initiate_session, + .verify_receiver_cert_prepare_km = + mei_hdcp_verify_receiver_cert_prepare_km, + .verify_hprime = mei_hdcp_verify_hprime, + .store_pairing_info = mei_hdcp_store_pairing_info, + .initiate_locality_check = mei_hdcp_initiate_locality_check, + .verify_lprime = mei_hdcp_verify_lprime, + .get_session_key = mei_hdcp_get_session_key, + .repeater_check_flow_prepare_ack = + mei_hdcp_repeater_check_flow_prepare_ack, + .verify_mprime = mei_hdcp_verify_mprime, + .enable_hdcp_authentication = mei_hdcp_enable_authentication, + .close_hdcp_session = mei_hdcp_close_session, +}; + +static int mei_component_master_bind(struct device *dev) +{ + struct mei_cl_device *cldev = to_mei_cl_device(dev); + struct i915_hdcp_comp_master *comp_master = + mei_cldev_get_drvdata(cldev); + int ret; + + dev_dbg(dev, "%s\n", __func__); + comp_master->ops = &mei_hdcp_ops; + comp_master->mei_dev = dev; + ret = component_bind_all(dev, comp_master); + if (ret < 0) + return ret; + + return 0; +} + +static void mei_component_master_unbind(struct device *dev) +{ + struct mei_cl_device *cldev = to_mei_cl_device(dev); + struct i915_hdcp_comp_master *comp_master = + mei_cldev_get_drvdata(cldev); + + dev_dbg(dev, "%s\n", __func__); + component_unbind_all(dev, comp_master); +} + +static const struct component_master_ops mei_component_master_ops = { + .bind = mei_component_master_bind, + .unbind = mei_component_master_unbind, +}; + +static int mei_hdcp_component_match(struct device *dev, int subcomponent, + void *data) +{ + return !strcmp(dev->driver->name, "i915") && + subcomponent == I915_COMPONENT_HDCP; +} + +static int mei_hdcp_probe(struct mei_cl_device *cldev, + const struct mei_cl_device_id *id) +{ + struct i915_hdcp_comp_master *comp_master; + struct component_match *master_match; + int ret; + + ret = mei_cldev_enable(cldev); + if (ret < 0) { + dev_err(&cldev->dev, "mei_cldev_enable Failed. %d\n", ret); + goto enable_err_exit; + } + + comp_master = kzalloc(sizeof(*comp_master), GFP_KERNEL); + if (!comp_master) { + ret = -ENOMEM; + goto err_exit; + } + + master_match = NULL; + component_match_add_typed(&cldev->dev, &master_match, + mei_hdcp_component_match, comp_master); + if (IS_ERR_OR_NULL(master_match)) { + ret = -ENOMEM; + goto err_exit; + } + + mei_cldev_set_drvdata(cldev, comp_master); + ret = component_master_add_with_match(&cldev->dev, + &mei_component_master_ops, + master_match); + if (ret < 0) { + dev_err(&cldev->dev, "Master comp add failed %d\n", ret); + goto err_exit; + } + + return 0; + +err_exit: + mei_cldev_set_drvdata(cldev, NULL); + kfree(comp_master); + mei_cldev_disable(cldev); +enable_err_exit: + return ret; +} + +static int mei_hdcp_remove(struct mei_cl_device *cldev) +{ + struct i915_hdcp_comp_master *comp_master = + mei_cldev_get_drvdata(cldev); + + component_master_del(&cldev->dev, &mei_component_master_ops); + kfree(comp_master); + mei_cldev_set_drvdata(cldev, NULL); + + return mei_cldev_disable(cldev); +} + +#define MEI_UUID_HDCP GUID_INIT(0xB638AB7E, 0x94E2, 0x4EA2, 0xA5, \ + 0x52, 0xD1, 0xC5, 0x4B, 0x62, 0x7F, 0x04) + +static struct mei_cl_device_id mei_hdcp_tbl[] = { + { .uuid = MEI_UUID_HDCP, .version = MEI_CL_VERSION_ANY }, + { } +}; +MODULE_DEVICE_TABLE(mei, mei_hdcp_tbl); + +static struct mei_cl_driver mei_hdcp_driver = { + .id_table = mei_hdcp_tbl, + .name = KBUILD_MODNAME, + .probe = mei_hdcp_probe, + .remove = mei_hdcp_remove, +}; + +module_mei_cl_driver(mei_hdcp_driver); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("MEI HDCP"); diff --git a/drivers/misc/mei/hdcp/mei_hdcp.h b/drivers/misc/mei/hdcp/mei_hdcp.h new file mode 100644 index 000000000000..5f74b908e486 --- /dev/null +++ b/drivers/misc/mei/hdcp/mei_hdcp.h @@ -0,0 +1,377 @@ +/* SPDX-License-Identifier: (GPL-2.0+) */ +/* + * Copyright © 2019 Intel Corporation + * + * Authors: + * Ramalingam C + */ + +#ifndef __MEI_HDCP_H__ +#define __MEI_HDCP_H__ + +#include + +/* me_hdcp_status: Enumeration of all HDCP Status Codes */ +enum me_hdcp_status { + ME_HDCP_STATUS_SUCCESS = 0x0000, + + /* WiDi Generic Status Codes */ + ME_HDCP_STATUS_INTERNAL_ERROR = 0x1000, + ME_HDCP_STATUS_UNKNOWN_ERROR = 0x1001, + ME_HDCP_STATUS_INCORRECT_API_VERSION = 0x1002, + ME_HDCP_STATUS_INVALID_FUNCTION = 0x1003, + ME_HDCP_STATUS_INVALID_BUFFER_LENGTH = 0x1004, + ME_HDCP_STATUS_INVALID_PARAMS = 0x1005, + ME_HDCP_STATUS_AUTHENTICATION_FAILED = 0x1006, + + /* WiDi Status Codes */ + ME_HDCP_INVALID_SESSION_STATE = 0x6000, + ME_HDCP_SRM_FRAGMENT_UNEXPECTED = 0x6001, + ME_HDCP_SRM_INVALID_LENGTH = 0x6002, + ME_HDCP_SRM_FRAGMENT_OFFSET_INVALID = 0x6003, + ME_HDCP_SRM_VERIFICATION_FAILED = 0x6004, + ME_HDCP_SRM_VERSION_TOO_OLD = 0x6005, + ME_HDCP_RX_CERT_VERIFICATION_FAILED = 0x6006, + ME_HDCP_RX_REVOKED = 0x6007, + ME_HDCP_H_VERIFICATION_FAILED = 0x6008, + ME_HDCP_REPEATER_CHECK_UNEXPECTED = 0x6009, + ME_HDCP_TOPOLOGY_MAX_EXCEEDED = 0x600A, + ME_HDCP_V_VERIFICATION_FAILED = 0x600B, + ME_HDCP_L_VERIFICATION_FAILED = 0x600C, + ME_HDCP_STREAM_KEY_ALLOC_FAILED = 0x600D, + ME_HDCP_BASE_KEY_RESET_FAILED = 0x600E, + ME_HDCP_NONCE_GENERATION_FAILED = 0x600F, + ME_HDCP_STATUS_INVALID_E_KEY_STATE = 0x6010, + ME_HDCP_STATUS_INVALID_CS_ICV = 0x6011, + ME_HDCP_STATUS_INVALID_KB_KEY_STATE = 0x6012, + ME_HDCP_STATUS_INVALID_PAVP_MODE_ICV = 0x6013, + ME_HDCP_STATUS_INVALID_PAVP_MODE = 0x6014, + ME_HDCP_STATUS_LC_MAX_ATTEMPTS = 0x6015, + + /* New status for HDCP 2.1 */ + ME_HDCP_STATUS_MISMATCH_IN_M = 0x6016, + + /* New status code for HDCP 2.2 Rx */ + ME_HDCP_STATUS_RX_PROV_NOT_ALLOWED = 0x6017, + ME_HDCP_STATUS_RX_PROV_WRONG_SUBJECT = 0x6018, + ME_HDCP_RX_NEEDS_PROVISIONING = 0x6019, + ME_HDCP_BKSV_ICV_AUTH_FAILED = 0x6020, + ME_HDCP_STATUS_INVALID_STREAM_ID = 0x6021, + ME_HDCP_STATUS_CHAIN_NOT_INITIALIZED = 0x6022, + ME_HDCP_FAIL_NOT_EXPECTED = 0x6023, + ME_HDCP_FAIL_HDCP_OFF = 0x6024, + ME_HDCP_FAIL_INVALID_PAVP_MEMORY_MODE = 0x6025, + ME_HDCP_FAIL_AES_ECB_FAILURE = 0x6026, + ME_HDCP_FEATURE_NOT_SUPPORTED = 0x6027, + ME_HDCP_DMA_READ_ERROR = 0x6028, + ME_HDCP_DMA_WRITE_ERROR = 0x6029, + ME_HDCP_FAIL_INVALID_PACKET_SIZE = 0x6030, + ME_HDCP_H264_PARSING_ERROR = 0x6031, + ME_HDCP_HDCP2_ERRATA_VIDEO_VIOLATION = 0x6032, + ME_HDCP_HDCP2_ERRATA_AUDIO_VIOLATION = 0x6033, + ME_HDCP_TX_ACTIVE_ERROR = 0x6034, + ME_HDCP_MODE_CHANGE_ERROR = 0x6035, + ME_HDCP_STREAM_TYPE_ERROR = 0x6036, + ME_HDCP_STREAM_MANAGE_NOT_POSSIBLE = 0x6037, + + ME_HDCP_STATUS_PORT_INVALID_COMMAND = 0x6038, + ME_HDCP_STATUS_UNSUPPORTED_PROTOCOL = 0x6039, + ME_HDCP_STATUS_INVALID_PORT_INDEX = 0x603a, + ME_HDCP_STATUS_TX_AUTH_NEEDED = 0x603b, + ME_HDCP_STATUS_NOT_INTEGRATED_PORT = 0x603c, + ME_HDCP_STATUS_SESSION_MAX_REACHED = 0x603d, + + /* hdcp capable bit is not set in rx_caps(error is unique to DP) */ + ME_HDCP_STATUS_NOT_HDCP_CAPABLE = 0x6041, + + ME_HDCP_STATUS_INVALID_STREAM_COUNT = 0x6042, +}; + +#define HDCP_API_VERSION 0x00010000 + +#define HDCP_M_LEN 16 +#define HDCP_KH_LEN 16 + +/* Payload Buffer size(Excluding Header) for CMDs and corresponding response */ +/* Wired_Tx_AKE */ +#define WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_IN (4 + 1) +#define WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_OUT (4 + 8 + 3) + +#define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_IN (4 + 522 + 8 + 3) +#define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_MIN_OUT (4 + 1 + 3 + 16 + 16) +#define WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_MAX_OUT (4 + 1 + 3 + 128) + +#define WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_IN (4 + 32) +#define WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_OUT (4) + +#define WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_IN (4 + 16) +#define WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_OUT (4) + +#define WIRED_CMD_BUF_LEN_CLOSE_SESSION_IN (4) +#define WIRED_CMD_BUF_LEN_CLOSE_SESSION_OUT (4) + +/* Wired_Tx_LC */ +#define WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_IN (4) +#define WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_OUT (4 + 8) + +#define WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_IN (4 + 32) +#define WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_OUT (4) + +/* Wired_Tx_SKE */ +#define WIRED_CMD_BUF_LEN_GET_SESSION_KEY_IN (4) +#define WIRED_CMD_BUF_LEN_GET_SESSION_KEY_OUT (4 + 16 + 8) + +/* Wired_Tx_SKE */ +#define WIRED_CMD_BUF_LEN_ENABLE_AUTH_IN (4 + 1) +#define WIRED_CMD_BUF_LEN_ENABLE_AUTH_OUT (4) + +/* Wired_Tx_Repeater */ +#define WIRED_CMD_BUF_LEN_VERIFY_REPEATER_IN (4 + 2 + 3 + 16 + 155) +#define WIRED_CMD_BUF_LEN_VERIFY_REPEATER_OUT (4 + 1 + 16) + +#define WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_MIN_IN (4 + 3 + \ + 32 + 2 + 2) + +#define WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_OUT (4) + +/* hdcp_command_id: Enumeration of all WIRED HDCP Command IDs */ +enum hdcp_command_id { + _WIDI_COMMAND_BASE = 0x00030000, + WIDI_INITIATE_HDCP2_SESSION = _WIDI_COMMAND_BASE, + HDCP_GET_SRM_STATUS, + HDCP_SEND_SRM_FRAGMENT, + + /* The wired HDCP Tx commands */ + _WIRED_COMMAND_BASE = 0x00031000, + WIRED_INITIATE_HDCP2_SESSION = _WIRED_COMMAND_BASE, + WIRED_VERIFY_RECEIVER_CERT, + WIRED_AKE_SEND_HPRIME, + WIRED_AKE_SEND_PAIRING_INFO, + WIRED_INIT_LOCALITY_CHECK, + WIRED_VALIDATE_LOCALITY, + WIRED_GET_SESSION_KEY, + WIRED_ENABLE_AUTH, + WIRED_VERIFY_REPEATER, + WIRED_REPEATER_AUTH_STREAM_REQ, + WIRED_CLOSE_SESSION, + + _WIRED_COMMANDS_COUNT, +}; + +union encrypted_buff { + u8 e_kpub_km[HDCP_2_2_E_KPUB_KM_LEN]; + u8 e_kh_km_m[HDCP_2_2_E_KH_KM_M_LEN]; + struct { + u8 e_kh_km[HDCP_KH_LEN]; + u8 m[HDCP_M_LEN]; + } __packed; +}; + +/* HDCP HECI message header. All header values are little endian. */ +struct hdcp_cmd_header { + u32 api_version; + u32 command_id; + enum me_hdcp_status status; + /* Length of the HECI message (excluding the header) */ + u32 buffer_len; +} __packed; + +/* Empty command request or response. No data follows the header. */ +struct hdcp_cmd_no_data { + struct hdcp_cmd_header header; +} __packed; + +/* Uniquely identifies the hdcp port being addressed for a given command. */ +struct hdcp_port_id { + u8 integrated_port_type; + u8 physical_port; + u16 reserved; +} __packed; + +/* + * Data structures for integrated wired HDCP2 Tx in + * support of the AKE protocol + */ +/* HECI struct for integrated wired HDCP Tx session initiation. */ +struct wired_cmd_initiate_hdcp2_session_in { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + u8 protocol; /* for HDMI vs DP */ +} __packed; + +struct wired_cmd_initiate_hdcp2_session_out { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + u8 r_tx[HDCP_2_2_RTX_LEN]; + struct hdcp2_tx_caps tx_caps; +} __packed; + +/* HECI struct for ending an integrated wired HDCP Tx session. */ +struct wired_cmd_close_session_in { + struct hdcp_cmd_header header; + struct hdcp_port_id port; +} __packed; + +struct wired_cmd_close_session_out { + struct hdcp_cmd_header header; + struct hdcp_port_id port; +} __packed; + +/* HECI struct for integrated wired HDCP Tx Rx Cert verification. */ +struct wired_cmd_verify_receiver_cert_in { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + struct hdcp2_cert_rx cert_rx; + u8 r_rx[HDCP_2_2_RRX_LEN]; + u8 rx_caps[HDCP_2_2_RXCAPS_LEN]; +} __packed; + +struct wired_cmd_verify_receiver_cert_out { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + u8 km_stored; + u8 reserved[3]; + union encrypted_buff ekm_buff; +} __packed; + +/* HECI struct for verification of Rx's Hprime in a HDCP Tx session */ +struct wired_cmd_ake_send_hprime_in { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + u8 h_prime[HDCP_2_2_H_PRIME_LEN]; +} __packed; + +struct wired_cmd_ake_send_hprime_out { + struct hdcp_cmd_header header; + struct hdcp_port_id port; +} __packed; + +/* + * HECI struct for sending in AKE pairing data generated by the Rx in an + * integrated wired HDCP Tx session. + */ +struct wired_cmd_ake_send_pairing_info_in { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + u8 e_kh_km[HDCP_2_2_E_KH_KM_LEN]; +} __packed; + +struct wired_cmd_ake_send_pairing_info_out { + struct hdcp_cmd_header header; + struct hdcp_port_id port; +} __packed; + +/* Data structures for integrated wired HDCP2 Tx in support of the LC protocol*/ +/* + * HECI struct for initiating locality check with an + * integrated wired HDCP Tx session. + */ +struct wired_cmd_init_locality_check_in { + struct hdcp_cmd_header header; + struct hdcp_port_id port; +} __packed; + +struct wired_cmd_init_locality_check_out { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + u8 r_n[HDCP_2_2_RN_LEN]; +} __packed; + +/* + * HECI struct for validating an Rx's LPrime value in an + * integrated wired HDCP Tx session. + */ +struct wired_cmd_validate_locality_in { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + u8 l_prime[HDCP_2_2_L_PRIME_LEN]; +} __packed; + +struct wired_cmd_validate_locality_out { + struct hdcp_cmd_header header; + struct hdcp_port_id port; +} __packed; + +/* + * Data structures for integrated wired HDCP2 Tx in support of the + * SKE protocol + */ +/* HECI struct for creating session key */ +struct wired_cmd_get_session_key_in { + struct hdcp_cmd_header header; + struct hdcp_port_id port; +} __packed; + +struct wired_cmd_get_session_key_out { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + u8 e_dkey_ks[HDCP_2_2_E_DKEY_KS_LEN]; + u8 r_iv[HDCP_2_2_RIV_LEN]; +} __packed; + +/* HECI struct for the Tx enable authentication command */ +struct wired_cmd_enable_auth_in { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + u8 stream_type; +} __packed; + +struct wired_cmd_enable_auth_out { + struct hdcp_cmd_header header; + struct hdcp_port_id port; +} __packed; + +/* + * Data structures for integrated wired HDCP2 Tx in support of + * the repeater protocols + */ +/* + * HECI struct for verifying the downstream repeater's HDCP topology in an + * integrated wired HDCP Tx session. + */ +struct wired_cmd_verify_repeater_in { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + u8 rx_info[HDCP_2_2_RXINFO_LEN]; + u8 seq_num_v[HDCP_2_2_SEQ_NUM_LEN]; + u8 v_prime[HDCP_2_2_V_PRIME_HALF_LEN]; + u8 receiver_ids[HDCP_2_2_RECEIVER_IDS_MAX_LEN]; +} __packed; + +struct wired_cmd_verify_repeater_out { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + u8 content_type_supported; + u8 v[HDCP_2_2_V_PRIME_HALF_LEN]; +} __packed; + +/* + * HECI struct in support of stream management in an + * integrated wired HDCP Tx session. + */ +struct wired_cmd_repeater_auth_stream_req_in { + struct hdcp_cmd_header header; + struct hdcp_port_id port; + u8 seq_num_m[HDCP_2_2_SEQ_NUM_LEN]; + u8 m_prime[HDCP_2_2_MPRIME_LEN]; + __be16 k; + struct hdcp2_streamid_type streams[1]; +} __packed; + +struct wired_cmd_repeater_auth_stream_req_out { + struct hdcp_cmd_header header; + struct hdcp_port_id port; +} __packed; + +enum mei_fw_ddi { + MEI_DDI_INVALID_PORT = 0x0, + + MEI_DDI_B = 1, + MEI_DDI_C, + MEI_DDI_D, + MEI_DDI_E, + MEI_DDI_F, + MEI_DDI_A = 7, + MEI_DDI_RANGE_END = MEI_DDI_A, +}; +#endif /* __MEI_HDCP_H__ */ diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index 23739a60517f..bb1ee9834a02 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -139,6 +139,8 @@ #define MEI_DEV_ID_CNP_H 0xA360 /* Cannon Point H */ #define MEI_DEV_ID_CNP_H_4 0xA364 /* Cannon Point H 4 (iTouch) */ +#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */ + /* * MEI HW Section */ diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h index 2b7f7677f8cc..b7d2487b8409 100644 --- a/drivers/misc/mei/hw.h +++ b/drivers/misc/mei/hw.h @@ -311,7 +311,8 @@ struct mei_client_properties { u8 protocol_version; u8 max_number_of_connections; u8 fixed_address; - u8 single_recv_buf; + u8 single_recv_buf:1; + u8 reserved:7; u32 max_msg_length; } __packed; diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index e89497f858ae..3ab946ad3257 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -105,6 +105,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)}, + /* required last entry */ {0, } }; diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig index 227cc7443671..242dcee14689 100644 --- a/drivers/misc/mic/Kconfig +++ b/drivers/misc/mic/Kconfig @@ -38,7 +38,6 @@ comment "VOP Bus Driver" config VOP_BUS tristate "VOP Bus Driver" - depends on 64BIT && PCI && X86 && X86_DEV_DMA_OPS help This option is selected by any driver which registers a device or driver on the VOP Bus, such as CONFIG_INTEL_MIC_HOST @@ -132,7 +131,7 @@ comment "VOP Driver" config VOP tristate "VOP Driver" - depends on 64BIT && PCI && X86 && VOP_BUS + depends on VOP_BUS select VHOST_RING select VIRTIO help diff --git a/drivers/misc/mic/bus/scif_bus.h b/drivers/misc/mic/bus/scif_bus.h index ff59568219ad..377a4f38cd7e 100644 --- a/drivers/misc/mic/bus/scif_bus.h +++ b/drivers/misc/mic/bus/scif_bus.h @@ -88,8 +88,8 @@ struct scif_driver { * @send_intr: Send an interrupt to the remote node on a specified doorbell. * @send_p2p_intr: Send an interrupt to the peer node on a specified doorbell * which is specifically targeted for a peer to peer node. - * @ioremap: Map a buffer with the specified physical address and length. - * @iounmap: Unmap a buffer previously mapped. + * @remap: Map a buffer with the specified physical address and length. + * @unmap: Unmap a buffer previously mapped. */ struct scif_hw_ops { int (*next_db)(struct scif_hw_dev *sdev); @@ -104,9 +104,9 @@ struct scif_hw_ops { void (*send_intr)(struct scif_hw_dev *sdev, int db); void (*send_p2p_intr)(struct scif_hw_dev *sdev, int db, struct mic_mw *mw); - void __iomem * (*ioremap)(struct scif_hw_dev *sdev, + void __iomem * (*remap)(struct scif_hw_dev *sdev, phys_addr_t pa, size_t len); - void (*iounmap)(struct scif_hw_dev *sdev, void __iomem *va); + void (*unmap)(struct scif_hw_dev *sdev, void __iomem *va); }; int scif_register_driver(struct scif_driver *driver); diff --git a/drivers/misc/mic/bus/vop_bus.h b/drivers/misc/mic/bus/vop_bus.h index fff7a865d721..cf5f3fae573c 100644 --- a/drivers/misc/mic/bus/vop_bus.h +++ b/drivers/misc/mic/bus/vop_bus.h @@ -87,8 +87,8 @@ struct vop_driver { * @get_dp: Get access to the virtio device page used by the self * node to add/remove/configure virtio devices. * @send_intr: Send an interrupt to the peer node on a specified doorbell. - * @ioremap: Map a buffer with the specified DMA address and length. - * @iounmap: Unmap a buffer previously mapped. + * @remap: Map a buffer with the specified DMA address and length. + * @unmap: Unmap a buffer previously mapped. * @dma_filter: The DMA filter function to use for obtaining access to * a DMA channel on the peer node. */ @@ -104,9 +104,9 @@ struct vop_hw_ops { void __iomem * (*get_remote_dp)(struct vop_device *vpdev); void * (*get_dp)(struct vop_device *vpdev); void (*send_intr)(struct vop_device *vpdev, int db); - void __iomem * (*ioremap)(struct vop_device *vpdev, + void __iomem * (*remap)(struct vop_device *vpdev, dma_addr_t pa, size_t len); - void (*iounmap)(struct vop_device *vpdev, void __iomem *va); + void (*unmap)(struct vop_device *vpdev, void __iomem *va); }; struct vop_device * diff --git a/drivers/misc/mic/card/mic_device.c b/drivers/misc/mic/card/mic_device.c index e749af48f736..dcd07ef29801 100644 --- a/drivers/misc/mic/card/mic_device.c +++ b/drivers/misc/mic/card/mic_device.c @@ -245,8 +245,8 @@ static struct scif_hw_ops scif_hw_ops = { .next_db = ___mic_next_db, .send_intr = ___mic_send_intr, .send_p2p_intr = ___mic_send_p2p_intr, - .ioremap = ___mic_ioremap, - .iounmap = ___mic_iounmap, + .remap = ___mic_ioremap, + .unmap = ___mic_iounmap, }; static inline struct mic_driver *vpdev_to_mdrv(struct vop_device *vpdev) @@ -316,8 +316,8 @@ static struct vop_hw_ops vop_hw_ops = { .next_db = __mic_next_db, .get_remote_dp = __mic_get_remote_dp, .send_intr = __mic_send_intr, - .ioremap = __mic_ioremap, - .iounmap = __mic_iounmap, + .remap = __mic_ioremap, + .unmap = __mic_iounmap, }; static int mic_request_dma_chans(struct mic_driver *mdrv) diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c index 6479435ac96b..079c36f0ce6e 100644 --- a/drivers/misc/mic/host/mic_boot.c +++ b/drivers/misc/mic/host/mic_boot.c @@ -133,8 +133,8 @@ static struct vop_hw_ops vop_hw_ops = { .get_dp = __mic_get_dp, .get_remote_dp = __mic_get_remote_dp, .send_intr = __mic_send_intr, - .ioremap = __mic_ioremap, - .iounmap = __mic_iounmap, + .remap = __mic_ioremap, + .unmap = __mic_iounmap, }; static inline struct mic_device *scdev_to_mdev(struct scif_hw_dev *scdev) @@ -315,8 +315,8 @@ static struct scif_hw_ops scif_hw_ops = { .ack_interrupt = ___mic_ack_interrupt, .next_db = ___mic_next_db, .send_intr = ___mic_send_intr, - .ioremap = ___mic_ioremap, - .iounmap = ___mic_iounmap, + .remap = ___mic_ioremap, + .unmap = ___mic_iounmap, }; static inline struct mic_device *mbdev_to_mdev(struct mbus_device *mbdev) diff --git a/drivers/misc/mic/scif/scif_map.h b/drivers/misc/mic/scif/scif_map.h index 3e86360ba5a6..7b380534eba1 100644 --- a/drivers/misc/mic/scif/scif_map.h +++ b/drivers/misc/mic/scif/scif_map.h @@ -97,7 +97,7 @@ scif_ioremap(dma_addr_t phys, size_t size, struct scif_dev *scifdev) out_virt = phys_to_virt(phys); else out_virt = (void __force *) - sdev->hw_ops->ioremap(sdev, phys, size); + sdev->hw_ops->remap(sdev, phys, size); return out_virt; } @@ -107,7 +107,7 @@ scif_iounmap(void *virt, size_t len, struct scif_dev *scifdev) if (!scifdev_self(scifdev)) { struct scif_hw_dev *sdev = scifdev->sdev; - sdev->hw_ops->iounmap(sdev, (void __force __iomem *)virt); + sdev->hw_ops->unmap(sdev, (void __force __iomem *)virt); } } diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c index 749321eb91ae..0fb9b440dc70 100644 --- a/drivers/misc/mic/scif/scif_rma.c +++ b/drivers/misc/mic/scif/scif_rma.c @@ -272,21 +272,12 @@ static inline void __scif_release_mm(struct mm_struct *mm) static inline int __scif_dec_pinned_vm_lock(struct mm_struct *mm, - int nr_pages, bool try_lock) + int nr_pages) { if (!mm || !nr_pages || !scif_ulimit_check) return 0; - if (try_lock) { - if (!down_write_trylock(&mm->mmap_sem)) { - dev_err(scif_info.mdev.this_device, - "%s %d err\n", __func__, __LINE__); - return -1; - } - } else { - down_write(&mm->mmap_sem); - } - mm->pinned_vm -= nr_pages; - up_write(&mm->mmap_sem); + + atomic64_sub(nr_pages, &mm->pinned_vm); return 0; } @@ -298,16 +289,16 @@ static inline int __scif_check_inc_pinned_vm(struct mm_struct *mm, if (!mm || !nr_pages || !scif_ulimit_check) return 0; - locked = nr_pages; - locked += mm->pinned_vm; lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; + locked = atomic64_add_return(nr_pages, &mm->pinned_vm); + if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) { + atomic64_sub(nr_pages, &mm->pinned_vm); dev_err(scif_info.mdev.this_device, "locked(%lu) > lock_limit(%lu)\n", locked, lock_limit); return -ENOMEM; } - mm->pinned_vm = locked; return 0; } @@ -326,7 +317,7 @@ int scif_destroy_window(struct scif_endpt *ep, struct scif_window *window) might_sleep(); if (!window->temp && window->mm) { - __scif_dec_pinned_vm_lock(window->mm, window->nr_pages, 0); + __scif_dec_pinned_vm_lock(window->mm, window->nr_pages); __scif_release_mm(window->mm); window->mm = NULL; } @@ -672,8 +663,8 @@ int scif_unregister_window(struct scif_window *window) { window->unreg_state = OP_IN_PROGRESS; send_msg = true; - /* fall through */ } + /* fall through */ case OP_IN_PROGRESS: { scif_get_window(window, 1); @@ -737,7 +728,7 @@ done: ep->rma_info.dma_chan); } else { if (!__scif_dec_pinned_vm_lock(window->mm, - window->nr_pages, 1)) { + window->nr_pages)) { __scif_release_mm(window->mm); window->mm = NULL; } @@ -1385,28 +1376,23 @@ int __scif_pin_pages(void *addr, size_t len, int *out_prot, prot |= SCIF_PROT_WRITE; retry: mm = current->mm; - down_write(&mm->mmap_sem); if (ulimit) { err = __scif_check_inc_pinned_vm(mm, nr_pages); if (err) { - up_write(&mm->mmap_sem); pinned_pages->nr_pages = 0; goto error_unmap; } } - pinned_pages->nr_pages = get_user_pages( + pinned_pages->nr_pages = get_user_pages_fast( (u64)addr, nr_pages, (prot & SCIF_PROT_WRITE) ? FOLL_WRITE : 0, - pinned_pages->pages, - NULL); - up_write(&mm->mmap_sem); + pinned_pages->pages); if (nr_pages != pinned_pages->nr_pages) { if (try_upgrade) { if (ulimit) - __scif_dec_pinned_vm_lock(mm, - nr_pages, 0); + __scif_dec_pinned_vm_lock(mm, nr_pages); /* Roll back any pinned pages */ for (i = 0; i < pinned_pages->nr_pages; i++) { if (pinned_pages->pages[i]) @@ -1433,7 +1419,7 @@ retry: return err; dec_pinned: if (ulimit) - __scif_dec_pinned_vm_lock(mm, nr_pages, 0); + __scif_dec_pinned_vm_lock(mm, nr_pages); /* Something went wrong! Rollback */ error_unmap: pinned_pages->nr_pages = nr_pages; diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c index 2bfa3a903bf9..e37b2c2152a2 100644 --- a/drivers/misc/mic/vop/vop_main.c +++ b/drivers/misc/mic/vop/vop_main.c @@ -34,6 +34,7 @@ #include #include #include +#include #include "vop_main.h" @@ -47,7 +48,8 @@ * @dc: Virtio device control * @vpdev: VOP device which is the parent for this virtio device * @vr: Buffer for accessing the VRING - * @used: Buffer for used + * @used_virt: Virtual address of used ring + * @used: DMA address of used ring * @used_size: Size of the used buffer * @reset_done: Track whether VOP reset is complete * @virtio_cookie: Cookie returned upon requesting a interrupt @@ -61,6 +63,7 @@ struct _vop_vdev { struct mic_device_ctrl __iomem *dc; struct vop_device *vpdev; void __iomem *vr[VOP_MAX_VRINGS]; + void *used_virt[VOP_MAX_VRINGS]; dma_addr_t used[VOP_MAX_VRINGS]; int used_size[VOP_MAX_VRINGS]; struct completion reset_done; @@ -116,7 +119,7 @@ _vop_total_desc_size(struct mic_device_desc __iomem *desc) static u64 vop_get_features(struct virtio_device *vdev) { unsigned int i, bits; - u32 features = 0; + u64 features = 0; struct mic_device_desc __iomem *desc = to_vopvdev(vdev)->desc; u8 __iomem *in_features = _vop_vq_features(desc); int feature_len = ioread8(&desc->feature_len); @@ -124,7 +127,7 @@ static u64 vop_get_features(struct virtio_device *vdev) bits = min_t(unsigned, feature_len, sizeof(vdev->features)) * 8; for (i = 0; i < bits; i++) if (ioread8(&in_features[i / 8]) & (BIT(i % 8))) - features |= BIT(i); + features |= BIT_ULL(i); return features; } @@ -226,7 +229,7 @@ static void vop_reset_inform_host(struct virtio_device *dev) if (ioread8(&dc->host_ack)) break; msleep(100); - }; + } dev_dbg(_vop_dev(vdev), "%s: retry: %d\n", __func__, retry); @@ -260,14 +263,14 @@ static bool vop_notify(struct virtqueue *vq) static void vop_del_vq(struct virtqueue *vq, int n) { struct _vop_vdev *vdev = to_vopvdev(vq->vdev); - struct vring *vr = (struct vring *)(vq + 1); struct vop_device *vpdev = vdev->vpdev; dma_unmap_single(&vpdev->dev, vdev->used[n], vdev->used_size[n], DMA_BIDIRECTIONAL); - free_pages((unsigned long)vr->used, get_order(vdev->used_size[n])); + free_pages((unsigned long)vdev->used_virt[n], + get_order(vdev->used_size[n])); vring_del_virtqueue(vq); - vpdev->hw_ops->iounmap(vpdev, vdev->vr[n]); + vpdev->hw_ops->unmap(vpdev, vdev->vr[n]); vdev->vr[n] = NULL; } @@ -283,6 +286,26 @@ static void vop_del_vqs(struct virtio_device *dev) vop_del_vq(vq, idx++); } +static struct virtqueue *vop_new_virtqueue(unsigned int index, + unsigned int num, + struct virtio_device *vdev, + bool context, + void *pages, + bool (*notify)(struct virtqueue *vq), + void (*callback)(struct virtqueue *vq), + const char *name, + void *used) +{ + bool weak_barriers = false; + struct vring vring; + + vring_init(&vring, num, pages, MIC_VIRTIO_RING_ALIGN); + vring.used = used; + + return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context, + notify, callback, name); +} + /* * This routine will assign vring's allocated in host/io memory. Code in * virtio_ring.c however continues to access this io memory as if it were local @@ -302,7 +325,6 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev, struct _mic_vring_info __iomem *info; void *used; int vr_size, _vr_size, err, magic; - struct vring *vr; u8 type = ioread8(&vdev->desc->type); if (index >= ioread8(&vdev->desc->num_vq)) @@ -316,23 +338,12 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev, memcpy_fromio(&config, vqconfig, sizeof(config)); _vr_size = vring_size(le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN); vr_size = PAGE_ALIGN(_vr_size + sizeof(struct _mic_vring_info)); - va = vpdev->hw_ops->ioremap(vpdev, le64_to_cpu(config.address), - vr_size); + va = vpdev->hw_ops->remap(vpdev, le64_to_cpu(config.address), vr_size); if (!va) return ERR_PTR(-ENOMEM); vdev->vr[index] = va; memset_io(va, 0x0, _vr_size); - vq = vring_new_virtqueue( - index, - le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN, - dev, - false, - ctx, - (void __force *)va, vop_notify, callback, name); - if (!vq) { - err = -ENOMEM; - goto unmap; - } + info = va + _vr_size; magic = ioread32(&info->magic); @@ -341,18 +352,27 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev, goto unmap; } - /* Allocate and reassign used ring now */ vdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * le16_to_cpu(config.num)); used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(vdev->used_size[index])); + vdev->used_virt[index] = used; if (!used) { err = -ENOMEM; dev_err(_vop_dev(vdev), "%s %d err %d\n", __func__, __LINE__, err); - goto del_vq; + goto unmap; } + + vq = vop_new_virtqueue(index, le16_to_cpu(config.num), dev, ctx, + (void __force *)va, vop_notify, callback, + name, used); + if (!vq) { + err = -ENOMEM; + goto free_used; + } + vdev->used[index] = dma_map_single(&vpdev->dev, used, vdev->used_size[index], DMA_BIDIRECTIONAL); @@ -360,28 +380,19 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev, err = -ENOMEM; dev_err(_vop_dev(vdev), "%s %d err %d\n", __func__, __LINE__, err); - goto free_used; + goto del_vq; } writeq(vdev->used[index], &vqconfig->used_address); - /* - * To reassign the used ring here we are directly accessing - * struct vring_virtqueue which is a private data structure - * in virtio_ring.c. At the minimum, a BUILD_BUG_ON() in - * vring_new_virtqueue() would ensure that - * (&vq->vring == (struct vring *) (&vq->vq + 1)); - */ - vr = (struct vring *)(vq + 1); - vr->used = used; vq->priv = vdev; return vq; +del_vq: + vring_del_virtqueue(vq); free_used: free_pages((unsigned long)used, get_order(vdev->used_size[index])); -del_vq: - vring_del_virtqueue(vq); unmap: - vpdev->hw_ops->iounmap(vpdev, vdev->vr[index]); + vpdev->hw_ops->unmap(vpdev, vdev->vr[index]); return ERR_PTR(err); } @@ -426,7 +437,7 @@ static int vop_find_vqs(struct virtio_device *dev, unsigned nvqs, if (!ioread8(&dc->used_address_updated)) break; msleep(100); - }; + } dev_dbg(_vop_dev(vdev), "%s: retry: %d\n", __func__, retry); if (!retry) { @@ -502,7 +513,7 @@ static int _vop_add_device(struct mic_device_desc __iomem *d, vdev->desc = d; vdev->dc = (void __iomem *)d + _vop_aligned_desc_size(d); vdev->dnode = dnode; - vdev->vdev.priv = (void *)(u64)dnode; + vdev->vdev.priv = (void *)(unsigned long)dnode; init_completion(&vdev->reset_done); vdev->h2c_vdev_db = vpdev->hw_ops->next_db(vpdev); @@ -524,7 +535,7 @@ static int _vop_add_device(struct mic_device_desc __iomem *d, offset, type); goto free_irq; } - writeq((u64)vdev, &vdev->dc->vdev); + writeq((unsigned long)vdev, &vdev->dc->vdev); dev_dbg(_vop_dev(vdev), "%s: registered vop device %u type %u vdev %p\n", __func__, offset, type, vdev); @@ -551,13 +562,18 @@ static int vop_match_desc(struct device *dev, void *data) return vdev->desc == (void __iomem *)data; } +static struct _vop_vdev *vop_dc_to_vdev(struct mic_device_ctrl *dc) +{ + return (struct _vop_vdev *)(unsigned long)readq(&dc->vdev); +} + static void _vop_handle_config_change(struct mic_device_desc __iomem *d, unsigned int offset, struct vop_device *vpdev) { struct mic_device_ctrl __iomem *dc = (void __iomem *)d + _vop_aligned_desc_size(d); - struct _vop_vdev *vdev = (struct _vop_vdev *)readq(&dc->vdev); + struct _vop_vdev *vdev = vop_dc_to_vdev(dc); if (ioread8(&dc->config_change) != MIC_VIRTIO_PARAM_CONFIG_CHANGED) return; @@ -576,11 +592,13 @@ static int _vop_remove_device(struct mic_device_desc __iomem *d, { struct mic_device_ctrl __iomem *dc = (void __iomem *)d + _vop_aligned_desc_size(d); - struct _vop_vdev *vdev = (struct _vop_vdev *)readq(&dc->vdev); + struct _vop_vdev *vdev = vop_dc_to_vdev(dc); u8 status; int ret = -1; if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) { + struct device *dev = get_device(&vdev->vdev.dev); + dev_dbg(&vpdev->dev, "%s %d config_change %d type %d vdev %p\n", __func__, __LINE__, @@ -592,7 +610,7 @@ static int _vop_remove_device(struct mic_device_desc __iomem *d, iowrite8(-1, &dc->h2c_vdev_db); if (status & VIRTIO_CONFIG_S_DRIVER_OK) wait_for_completion(&vdev->reset_done); - put_device(&vdev->vdev.dev); + put_device(dev); iowrite8(1, &dc->guest_ack); dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n", __func__, __LINE__, ioread8(&dc->guest_ack)); diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c index cbc8ebcff5cf..3632fce40590 100644 --- a/drivers/misc/mic/vop/vop_vringh.c +++ b/drivers/misc/mic/vop/vop_vringh.c @@ -80,7 +80,7 @@ static void vop_virtio_init_post(struct vop_vdev *vdev) continue; } vdev->vvr[i].vrh.vring.used = - (void __force *)vpdev->hw_ops->ioremap( + (void __force *)vpdev->hw_ops->remap( vpdev, le64_to_cpu(vqconfig[i].used_address), used_size); @@ -528,15 +528,15 @@ static int vop_virtio_copy_to_user(struct vop_vdev *vdev, void __user *ubuf, int vr_idx) { struct vop_device *vpdev = vdev->vpdev; - void __iomem *dbuf = vpdev->hw_ops->ioremap(vpdev, daddr, len); + void __iomem *dbuf = vpdev->hw_ops->remap(vpdev, daddr, len); struct vop_vringh *vvr = &vdev->vvr[vr_idx]; struct vop_info *vi = dev_get_drvdata(&vpdev->dev); - size_t dma_alignment = 1 << vi->dma_ch->device->copy_align; - bool x200 = is_dma_copy_aligned(vi->dma_ch->device, 1, 1, 1); + size_t dma_alignment; + bool x200; size_t dma_offset, partlen; int err; - if (!VOP_USE_DMA) { + if (!VOP_USE_DMA || !vi->dma_ch) { if (copy_to_user(ubuf, (void __force *)dbuf, len)) { err = -EFAULT; dev_err(vop_dev(vdev), "%s %d err %d\n", @@ -548,6 +548,9 @@ static int vop_virtio_copy_to_user(struct vop_vdev *vdev, void __user *ubuf, goto err; } + dma_alignment = 1 << vi->dma_ch->device->copy_align; + x200 = is_dma_copy_aligned(vi->dma_ch->device, 1, 1, 1); + dma_offset = daddr - round_down(daddr, dma_alignment); daddr -= dma_offset; len += dma_offset; @@ -585,9 +588,9 @@ static int vop_virtio_copy_to_user(struct vop_vdev *vdev, void __user *ubuf, } err = 0; err: - vpdev->hw_ops->iounmap(vpdev, dbuf); + vpdev->hw_ops->unmap(vpdev, dbuf); dev_dbg(vop_dev(vdev), - "%s: ubuf %p dbuf %p len 0x%lx vr_idx 0x%x\n", + "%s: ubuf %p dbuf %p len 0x%zx vr_idx 0x%x\n", __func__, ubuf, dbuf, len, vr_idx); return err; } @@ -603,21 +606,26 @@ static int vop_virtio_copy_from_user(struct vop_vdev *vdev, void __user *ubuf, int vr_idx) { struct vop_device *vpdev = vdev->vpdev; - void __iomem *dbuf = vpdev->hw_ops->ioremap(vpdev, daddr, len); + void __iomem *dbuf = vpdev->hw_ops->remap(vpdev, daddr, len); struct vop_vringh *vvr = &vdev->vvr[vr_idx]; struct vop_info *vi = dev_get_drvdata(&vdev->vpdev->dev); - size_t dma_alignment = 1 << vi->dma_ch->device->copy_align; - bool x200 = is_dma_copy_aligned(vi->dma_ch->device, 1, 1, 1); + size_t dma_alignment; + bool x200; size_t partlen; - bool dma = VOP_USE_DMA; + bool dma = VOP_USE_DMA && vi->dma_ch; int err = 0; - if (daddr & (dma_alignment - 1)) { - vdev->tx_dst_unaligned += len; - dma = false; - } else if (ALIGN(len, dma_alignment) > dlen) { - vdev->tx_len_unaligned += len; - dma = false; + if (dma) { + dma_alignment = 1 << vi->dma_ch->device->copy_align; + x200 = is_dma_copy_aligned(vi->dma_ch->device, 1, 1, 1); + + if (daddr & (dma_alignment - 1)) { + vdev->tx_dst_unaligned += len; + dma = false; + } else if (ALIGN(len, dma_alignment) > dlen) { + vdev->tx_len_unaligned += len; + dma = false; + } } if (!dma) @@ -668,9 +676,9 @@ memcpy: vdev->out_bytes += len; err = 0; err: - vpdev->hw_ops->iounmap(vpdev, dbuf); + vpdev->hw_ops->unmap(vpdev, dbuf); dev_dbg(vop_dev(vdev), - "%s: ubuf %p dbuf %p len 0x%lx vr_idx 0x%x\n", + "%s: ubuf %p dbuf %p len 0x%zx vr_idx 0x%x\n", __func__, ubuf, dbuf, len, vr_idx); return err; } @@ -704,16 +712,17 @@ static int vop_vringh_copy(struct vop_vdev *vdev, struct vringh_kiov *iov, while (len && iov->i < iov->used) { struct kvec *kiov = &iov->iov[iov->i]; + unsigned long daddr = (unsigned long)kiov->iov_base; partlen = min(kiov->iov_len, len); if (read) ret = vop_virtio_copy_to_user(vdev, ubuf, partlen, - (u64)kiov->iov_base, + daddr, kiov->iov_len, vr_idx); else ret = vop_virtio_copy_from_user(vdev, ubuf, partlen, - (u64)kiov->iov_base, + daddr, kiov->iov_len, vr_idx); if (ret) { diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c index 540845651b8c..309703e9c42e 100644 --- a/drivers/misc/pch_phub.c +++ b/drivers/misc/pch_phub.c @@ -64,7 +64,6 @@ #define CLKCFG_UARTCLKSEL (1 << 18) /* Macros for ML7213 */ -#define PCI_VENDOR_ID_ROHM 0x10db #define PCI_DEVICE_ID_ROHM_ML7213_PHUB 0x801A /* Macros for ML7223 */ diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c index 896e2df9400f..29582fe57151 100644 --- a/drivers/misc/pci_endpoint_test.c +++ b/drivers/misc/pci_endpoint_test.c @@ -788,6 +788,7 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev) static const struct pci_device_id pci_endpoint_test_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) }, { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) }, + { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0) }, { PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, 0xedda) }, { } }; diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c index 93be82fc338a..2ec5808ba464 100644 --- a/drivers/misc/sgi-gru/grufault.c +++ b/drivers/misc/sgi-gru/grufault.c @@ -616,8 +616,8 @@ irqreturn_t gru_intr_mblade(int irq, void *dev_id) for_each_possible_blade(blade) { if (uv_blade_nr_possible_cpus(blade)) continue; - gru_intr(0, blade); - gru_intr(1, blade); + gru_intr(0, blade); + gru_intr(1, blade); } return IRQ_HANDLED; } diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index 0441abe87880..9e443df44b3b 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #if defined CONFIG_X86_64 #include @@ -61,7 +62,7 @@ static struct xpc_heartbeat_uv *xpc_heartbeat_uv; XPC_NOTIFY_MSG_SIZE_UV) #define XPC_NOTIFY_IRQ_NAME "xpc_notify" -static int xpc_mq_node = -1; +static int xpc_mq_node = NUMA_NO_NODE; static struct xpc_gru_mq_uv *xpc_activate_mq_uv; static struct xpc_gru_mq_uv *xpc_notify_mq_uv; diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c index f8240b87df22..ad807d5a3141 100644 --- a/drivers/misc/vmw_balloon.c +++ b/drivers/misc/vmw_balloon.c @@ -34,7 +34,6 @@ MODULE_AUTHOR("VMware, Inc."); MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver"); -MODULE_VERSION("1.5.0.0-k"); MODULE_ALIAS("dmi:*:svnVMware*:*"); MODULE_ALIAS("vmware_vmmemctl"); MODULE_LICENSE("GPL"); @@ -73,15 +72,26 @@ enum vmwballoon_capabilities { VMW_BALLOON_BATCHED_CMDS = (1 << 2), VMW_BALLOON_BATCHED_2M_CMDS = (1 << 3), VMW_BALLOON_SIGNALLED_WAKEUP_CMD = (1 << 4), + VMW_BALLOON_64_BIT_TARGET = (1 << 5) }; -#define VMW_BALLOON_CAPABILITIES (VMW_BALLOON_BASIC_CMDS \ +#define VMW_BALLOON_CAPABILITIES_COMMON (VMW_BALLOON_BASIC_CMDS \ | VMW_BALLOON_BATCHED_CMDS \ | VMW_BALLOON_BATCHED_2M_CMDS \ | VMW_BALLOON_SIGNALLED_WAKEUP_CMD) #define VMW_BALLOON_2M_ORDER (PMD_SHIFT - PAGE_SHIFT) +/* + * 64-bit targets are only supported in 64-bit + */ +#ifdef CONFIG_64BIT +#define VMW_BALLOON_CAPABILITIES (VMW_BALLOON_CAPABILITIES_COMMON \ + | VMW_BALLOON_64_BIT_TARGET) +#else +#define VMW_BALLOON_CAPABILITIES VMW_BALLOON_CAPABILITIES_COMMON +#endif + enum vmballoon_page_size_type { VMW_BALLOON_4K_PAGE, VMW_BALLOON_2M_PAGE, @@ -556,6 +566,36 @@ vmballoon_page_in_frames(enum vmballoon_page_size_type page_size) return 1 << vmballoon_page_order(page_size); } +/** + * vmballoon_mark_page_offline() - mark a page as offline + * @page: pointer for the page. + * @page_size: the size of the page. + */ +static void +vmballoon_mark_page_offline(struct page *page, + enum vmballoon_page_size_type page_size) +{ + int i; + + for (i = 0; i < vmballoon_page_in_frames(page_size); i++) + __SetPageOffline(page + i); +} + +/** + * vmballoon_mark_page_online() - mark a page as online + * @page: pointer for the page. + * @page_size: the size of the page. + */ +static void +vmballoon_mark_page_online(struct page *page, + enum vmballoon_page_size_type page_size) +{ + int i; + + for (i = 0; i < vmballoon_page_in_frames(page_size); i++) + __ClearPageOffline(page + i); +} + /** * vmballoon_send_get_target() - Retrieve desired balloon size from the host. * @@ -572,8 +612,9 @@ static int vmballoon_send_get_target(struct vmballoon *b) limit = totalram_pages(); - /* Ensure limit fits in 32-bits */ - if (limit != (u32)limit) + /* Ensure limit fits in 32-bits if 64-bit targets are not supported */ + if (!(b->capabilities & VMW_BALLOON_64_BIT_TARGET) && + limit != (u32)limit) return -EINVAL; status = vmballoon_cmd(b, VMW_BALLOON_CMD_GET_TARGET, limit, 0); @@ -612,6 +653,7 @@ static int vmballoon_alloc_page_list(struct vmballoon *b, ctl->page_size); if (page) { + vmballoon_mark_page_offline(page, ctl->page_size); /* Success. Add the page to the list and continue. */ list_add(&page->lru, &ctl->pages); continue; @@ -850,6 +892,7 @@ static void vmballoon_release_page_list(struct list_head *page_list, list_for_each_entry_safe(page, tmp, page_list, lru) { list_del(&page->lru); + vmballoon_mark_page_online(page, page_size); __free_pages(page, vmballoon_page_order(page_size)); } @@ -1287,7 +1330,7 @@ static void vmballoon_reset(struct vmballoon *b) vmballoon_pop(b); if (vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES)) - return; + goto unlock; if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) { if (vmballoon_init_batching(b)) { @@ -1298,7 +1341,7 @@ static void vmballoon_reset(struct vmballoon *b) * The guest will retry in one second. */ vmballoon_send_start(b, 0); - return; + goto unlock; } } else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) { vmballoon_deinit_batching(b); @@ -1314,6 +1357,7 @@ static void vmballoon_reset(struct vmballoon *b) if (vmballoon_send_guest_id(b)) pr_err("failed to send guest ID to the host\n"); +unlock: up_write(&b->conf_sem); } diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c index b3fa738ae005..7824c7494916 100644 --- a/drivers/misc/vmw_vmci/vmci_doorbell.c +++ b/drivers/misc/vmw_vmci/vmci_doorbell.c @@ -330,7 +330,7 @@ int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle) /* * Register the notification bitmap with the host. */ -bool vmci_dbell_register_notification_bitmap(u32 bitmap_ppn) +bool vmci_dbell_register_notification_bitmap(u64 bitmap_ppn) { int result; struct vmci_notify_bm_set_msg bitmap_set_msg; @@ -340,11 +340,14 @@ bool vmci_dbell_register_notification_bitmap(u32 bitmap_ppn) bitmap_set_msg.hdr.src = VMCI_ANON_SRC_HANDLE; bitmap_set_msg.hdr.payload_size = sizeof(bitmap_set_msg) - VMCI_DG_HEADERSIZE; - bitmap_set_msg.bitmap_ppn = bitmap_ppn; + if (vmci_use_ppn64()) + bitmap_set_msg.bitmap_ppn64 = bitmap_ppn; + else + bitmap_set_msg.bitmap_ppn32 = (u32) bitmap_ppn; result = vmci_send_datagram(&bitmap_set_msg.hdr); if (result != VMCI_SUCCESS) { - pr_devel("Failed to register (PPN=%u) as notification bitmap (error=%d)\n", + pr_devel("Failed to register (PPN=%llu) as notification bitmap (error=%d)\n", bitmap_ppn, result); return false; } diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.h b/drivers/misc/vmw_vmci/vmci_doorbell.h index e4c0b17486a5..410a21f8436f 100644 --- a/drivers/misc/vmw_vmci/vmci_doorbell.h +++ b/drivers/misc/vmw_vmci/vmci_doorbell.h @@ -45,7 +45,7 @@ struct dbell_cpt_state { int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle); int vmci_dbell_get_priv_flags(struct vmci_handle handle, u32 *priv_flags); -bool vmci_dbell_register_notification_bitmap(u32 bitmap_ppn); +bool vmci_dbell_register_notification_bitmap(u64 bitmap_ppn); void vmci_dbell_scan_notification_entries(u8 *bitmap); #endif /* VMCI_DOORBELL_H */ diff --git a/drivers/misc/vmw_vmci/vmci_driver.h b/drivers/misc/vmw_vmci/vmci_driver.h index cee9e977d318..2fbf4a0ac657 100644 --- a/drivers/misc/vmw_vmci/vmci_driver.h +++ b/drivers/misc/vmw_vmci/vmci_driver.h @@ -54,4 +54,6 @@ void vmci_guest_exit(void); bool vmci_guest_code_active(void); u32 vmci_get_vm_context_id(void); +bool vmci_use_ppn64(void); + #endif /* _VMCI_DRIVER_H_ */ diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c index dad5abee656e..928708128177 100644 --- a/drivers/misc/vmw_vmci/vmci_guest.c +++ b/drivers/misc/vmw_vmci/vmci_guest.c @@ -64,6 +64,13 @@ struct vmci_guest_device { dma_addr_t notification_base; }; +static bool use_ppn64; + +bool vmci_use_ppn64(void) +{ + return use_ppn64; +} + /* vmci_dev singleton device and supporting data*/ struct pci_dev *vmci_pdev; static struct vmci_guest_device *vmci_dev_g; @@ -432,6 +439,7 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, struct vmci_guest_device *vmci_dev; void __iomem *iobase; unsigned int capabilities; + unsigned int caps_in_use; unsigned long cmd; int vmci_err; int error; @@ -496,6 +504,23 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, error = -ENXIO; goto err_free_data_buffer; } + caps_in_use = VMCI_CAPS_DATAGRAM; + + /* + * Use 64-bit PPNs if the device supports. + * + * There is no check for the return value of dma_set_mask_and_coherent + * since this driver can handle the default mask values if + * dma_set_mask_and_coherent fails. + */ + if (capabilities & VMCI_CAPS_PPN64) { + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + use_ppn64 = true; + caps_in_use |= VMCI_CAPS_PPN64; + } else { + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44)); + use_ppn64 = false; + } /* * If the hardware supports notifications, we will use that as @@ -510,14 +535,14 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, "Unable to allocate notification bitmap\n"); } else { memset(vmci_dev->notification_bitmap, 0, PAGE_SIZE); - capabilities |= VMCI_CAPS_NOTIFICATIONS; + caps_in_use |= VMCI_CAPS_NOTIFICATIONS; } } - dev_info(&pdev->dev, "Using capabilities 0x%x\n", capabilities); + dev_info(&pdev->dev, "Using capabilities 0x%x\n", caps_in_use); /* Let the host know which capabilities we intend to use. */ - iowrite32(capabilities, vmci_dev->iobase + VMCI_CAPS_ADDR); + iowrite32(caps_in_use, vmci_dev->iobase + VMCI_CAPS_ADDR); /* Set up global device so that we can start sending datagrams */ spin_lock_irq(&vmci_dev_spinlock); @@ -529,13 +554,13 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, * Register notification bitmap with device if that capability is * used. */ - if (capabilities & VMCI_CAPS_NOTIFICATIONS) { + if (caps_in_use & VMCI_CAPS_NOTIFICATIONS) { unsigned long bitmap_ppn = vmci_dev->notification_base >> PAGE_SHIFT; if (!vmci_dbell_register_notification_bitmap(bitmap_ppn)) { dev_warn(&pdev->dev, - "VMCI device unable to register notification bitmap with PPN 0x%x\n", - (u32) bitmap_ppn); + "VMCI device unable to register notification bitmap with PPN 0x%lx\n", + bitmap_ppn); error = -ENXIO; goto err_remove_vmci_dev_g; } @@ -611,7 +636,7 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, /* Enable specific interrupt bits. */ cmd = VMCI_IMR_DATAGRAM; - if (capabilities & VMCI_CAPS_NOTIFICATIONS) + if (caps_in_use & VMCI_CAPS_NOTIFICATIONS) cmd |= VMCI_IMR_NOTIFICATION; iowrite32(cmd, vmci_dev->iobase + VMCI_IMR_ADDR); diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c index 264f4ed8eef2..f5f1aac9d163 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -435,8 +435,8 @@ static int qp_alloc_ppn_set(void *prod_q, void *cons_q, u64 num_consume_pages, struct ppn_set *ppn_set) { - u32 *produce_ppns; - u32 *consume_ppns; + u64 *produce_ppns; + u64 *consume_ppns; struct vmci_queue *produce_q = prod_q; struct vmci_queue *consume_q = cons_q; u64 i; @@ -462,31 +462,13 @@ static int qp_alloc_ppn_set(void *prod_q, return VMCI_ERROR_NO_MEM; } - for (i = 0; i < num_produce_pages; i++) { - unsigned long pfn; - + for (i = 0; i < num_produce_pages; i++) produce_ppns[i] = produce_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT; - pfn = produce_ppns[i]; - - /* Fail allocation if PFN isn't supported by hypervisor. */ - if (sizeof(pfn) > sizeof(*produce_ppns) - && pfn != produce_ppns[i]) - goto ppn_error; - } - - for (i = 0; i < num_consume_pages; i++) { - unsigned long pfn; + for (i = 0; i < num_consume_pages; i++) consume_ppns[i] = consume_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT; - pfn = consume_ppns[i]; - - /* Fail allocation if PFN isn't supported by hypervisor. */ - if (sizeof(pfn) > sizeof(*consume_ppns) - && pfn != consume_ppns[i]) - goto ppn_error; - } ppn_set->num_produce_pages = num_produce_pages; ppn_set->num_consume_pages = num_consume_pages; @@ -494,11 +476,6 @@ static int qp_alloc_ppn_set(void *prod_q, ppn_set->consume_ppns = consume_ppns; ppn_set->initialized = true; return VMCI_SUCCESS; - - ppn_error: - kfree(produce_ppns); - kfree(consume_ppns); - return VMCI_ERROR_INVALID_ARGS; } /* @@ -520,12 +497,28 @@ static void qp_free_ppn_set(struct ppn_set *ppn_set) */ static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set) { - memcpy(call_buf, ppn_set->produce_ppns, - ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns)); - memcpy(call_buf + - ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns), - ppn_set->consume_ppns, - ppn_set->num_consume_pages * sizeof(*ppn_set->consume_ppns)); + if (vmci_use_ppn64()) { + memcpy(call_buf, ppn_set->produce_ppns, + ppn_set->num_produce_pages * + sizeof(*ppn_set->produce_ppns)); + memcpy(call_buf + + ppn_set->num_produce_pages * + sizeof(*ppn_set->produce_ppns), + ppn_set->consume_ppns, + ppn_set->num_consume_pages * + sizeof(*ppn_set->consume_ppns)); + } else { + int i; + u32 *ppns = (u32 *) call_buf; + + for (i = 0; i < ppn_set->num_produce_pages; i++) + ppns[i] = (u32) ppn_set->produce_ppns[i]; + + ppns = &ppns[ppn_set->num_produce_pages]; + + for (i = 0; i < ppn_set->num_consume_pages; i++) + ppns[i] = (u32) ppn_set->consume_ppns[i]; + } return VMCI_SUCCESS; } @@ -951,13 +944,15 @@ static int qp_alloc_hypercall(const struct qp_guest_endpoint *entry) { struct vmci_qp_alloc_msg *alloc_msg; size_t msg_size; + size_t ppn_size; int result; if (!entry || entry->num_ppns <= 2) return VMCI_ERROR_INVALID_ARGS; + ppn_size = vmci_use_ppn64() ? sizeof(u64) : sizeof(u32); msg_size = sizeof(*alloc_msg) + - (size_t) entry->num_ppns * sizeof(u32); + (size_t) entry->num_ppns * ppn_size; alloc_msg = kmalloc(msg_size, GFP_KERNEL); if (!alloc_msg) return VMCI_ERROR_NO_MEM; diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.h b/drivers/misc/vmw_vmci/vmci_queue_pair.h index ed177f04ef24..46c0b6c7bafb 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.h +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.h @@ -28,8 +28,8 @@ typedef int (*vmci_event_release_cb) (void *client_data); struct ppn_set { u64 num_produce_pages; u64 num_consume_pages; - u32 *produce_ppns; - u32 *consume_ppns; + u64 *produce_ppns; + u64 *consume_ppns; bool initialized; }; diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile index abba078f7f49..95ffe008ebdf 100644 --- a/drivers/mmc/core/Makefile +++ b/drivers/mmc/core/Makefile @@ -8,7 +8,7 @@ mmc_core-y := core.o bus.o host.o \ mmc.o mmc_ops.o sd.o sd_ops.o \ sdio.o sdio_ops.o sdio_bus.o \ sdio_cis.o sdio_io.o sdio_irq.o \ - slot-gpio.o + slot-gpio.o regulator.o mmc_core-$(CONFIG_OF) += pwrseq.o obj-$(CONFIG_PWRSEQ_SIMPLE) += pwrseq_simple.o obj-$(CONFIG_PWRSEQ_SD8787) += pwrseq_sd8787.o diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index aef1185f383d..2c71a434c915 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -1124,7 +1124,7 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) { struct mmc_blk_data *md = mq->blkdata; struct mmc_card *card = md->queue.card; - unsigned int from, nr, arg; + unsigned int from, nr; int err = 0, type = MMC_BLK_DISCARD; blk_status_t status = BLK_STS_OK; @@ -1136,24 +1136,18 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) from = blk_rq_pos(req); nr = blk_rq_sectors(req); - if (mmc_can_discard(card)) - arg = MMC_DISCARD_ARG; - else if (mmc_can_trim(card)) - arg = MMC_TRIM_ARG; - else - arg = MMC_ERASE_ARG; do { err = 0; if (card->quirks & MMC_QUIRK_INAND_CMD38) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, INAND_CMD38_ARG_EXT_CSD, - arg == MMC_TRIM_ARG ? + card->erase_arg == MMC_TRIM_ARG ? INAND_CMD38_ARG_TRIM : INAND_CMD38_ARG_ERASE, 0); } if (!err) - err = mmc_erase(card, from, nr, arg); + err = mmc_erase(card, from, nr, card->erase_arg); } while (err == -EIO && !mmc_blk_reset(md, card->host, type)); if (err) status = BLK_STS_IOERR; @@ -2112,7 +2106,7 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq) if (waiting) wake_up(&mq->wait); else - kblockd_schedule_work(&mq->complete_work); + queue_work(mq->card->complete_wq, &mq->complete_work); return; } @@ -2380,12 +2374,6 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), "mmcblk%u%s", card->host->index, subname ? subname : ""); - if (mmc_card_mmc(card)) - blk_queue_logical_block_size(md->queue.queue, - card->ext_csd.data_sector_size); - else - blk_queue_logical_block_size(md->queue.queue, 512); - set_capacity(md->disk, size); if (mmc_host_cmd23(card->host)) { @@ -2774,8 +2762,8 @@ static int mmc_dbg_card_status_get(void *data, u64 *val) return ret; } -DEFINE_SIMPLE_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get, - NULL, "%08llx\n"); +DEFINE_DEBUGFS_ATTRIBUTE(mmc_dbg_card_status_fops, mmc_dbg_card_status_get, + NULL, "%08llx\n"); /* That is two digits * 512 + 1 for newline */ #define EXT_CSD_STR_LEN 1025 @@ -2863,8 +2851,9 @@ static int mmc_blk_add_debugfs(struct mmc_card *card, struct mmc_blk_data *md) if (mmc_card_mmc(card) || mmc_card_sd(card)) { md->status_dentry = - debugfs_create_file("status", S_IRUSR, root, card, - &mmc_dbg_card_status_fops); + debugfs_create_file_unsafe("status", 0400, root, + card, + &mmc_dbg_card_status_fops); if (!md->status_dentry) return -EIO; } @@ -2924,6 +2913,13 @@ static int mmc_blk_probe(struct mmc_card *card) mmc_fixup_device(card, mmc_blk_fixups); + card->complete_wq = alloc_workqueue("mmc_complete", + WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); + if (unlikely(!card->complete_wq)) { + pr_err("Failed to create mmc completion workqueue"); + return -ENOMEM; + } + md = mmc_blk_alloc(card); if (IS_ERR(md)) return PTR_ERR(md); @@ -2987,6 +2983,7 @@ static void mmc_blk_remove(struct mmc_card *card) pm_runtime_put_noidle(&card->dev); mmc_blk_remove_req(md); dev_set_drvdata(&card->dev, NULL); + destroy_workqueue(card->complete_wq); } static int _mmc_blk_suspend(struct mmc_card *card) diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 5bd58b95d318..6db36dc870b5 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include #include @@ -52,6 +51,7 @@ /* The max erase timeout, used when host->max_busy_timeout isn't specified */ #define MMC_ERASE_TIMEOUT_MS (60 * 1000) /* 60 s */ +#define SD_DISCARD_TIMEOUT_MS (250) static const unsigned freqs[] = { 400000, 300000, 200000, 100000 }; @@ -95,7 +95,7 @@ static void mmc_should_fail_request(struct mmc_host *host, if (!data) return; - if (cmd->error || data->error || + if ((cmd && cmd->error) || data->error || !should_fail(&host->fail_mmc_request, data->blksz * data->blocks)) return; @@ -758,33 +758,6 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) } EXPORT_SYMBOL(mmc_set_data_timeout); -/** - * mmc_align_data_size - pads a transfer size to a more optimal value - * @card: the MMC card associated with the data transfer - * @sz: original transfer size - * - * Pads the original data size with a number of extra bytes in - * order to avoid controller bugs and/or performance hits - * (e.g. some controllers revert to PIO for certain sizes). - * - * Returns the improved size, which might be unmodified. - * - * Note that this function is only relevant when issuing a - * single scatter gather entry. - */ -unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz) -{ - /* - * FIXME: We don't have a system for the controller to tell - * the core about its problems yet, so for now we just 32-bit - * align the size. - */ - sz = ((sz + 3) / 4) * 4; - - return sz; -} -EXPORT_SYMBOL(mmc_align_data_size); - /* * Allow claiming an already claimed host if the context is the same or there is * no context but the task is the same. @@ -1112,55 +1085,6 @@ u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max) return mask; } -EXPORT_SYMBOL(mmc_vddrange_to_ocrmask); - -#ifdef CONFIG_OF - -/** - * mmc_of_parse_voltage - return mask of supported voltages - * @np: The device node need to be parsed. - * @mask: mask of voltages available for MMC/SD/SDIO - * - * Parse the "voltage-ranges" DT property, returning zero if it is not - * found, negative errno if the voltage-range specification is invalid, - * or one if the voltage-range is specified and successfully parsed. - */ -int mmc_of_parse_voltage(struct device_node *np, u32 *mask) -{ - const u32 *voltage_ranges; - int num_ranges, i; - - voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges); - num_ranges = num_ranges / sizeof(*voltage_ranges) / 2; - if (!voltage_ranges) { - pr_debug("%pOF: voltage-ranges unspecified\n", np); - return 0; - } - if (!num_ranges) { - pr_err("%pOF: voltage-ranges empty\n", np); - return -EINVAL; - } - - for (i = 0; i < num_ranges; i++) { - const int j = i * 2; - u32 ocr_mask; - - ocr_mask = mmc_vddrange_to_ocrmask( - be32_to_cpu(voltage_ranges[j]), - be32_to_cpu(voltage_ranges[j + 1])); - if (!ocr_mask) { - pr_err("%pOF: voltage-range #%d is invalid\n", - np, i); - return -EINVAL; - } - *mask |= ocr_mask; - } - - return 1; -} -EXPORT_SYMBOL(mmc_of_parse_voltage); - -#endif /* CONFIG_OF */ static int mmc_of_get_func_num(struct device_node *node) { @@ -1190,246 +1114,6 @@ struct device_node *mmc_of_find_child_device(struct mmc_host *host, return NULL; } -#ifdef CONFIG_REGULATOR - -/** - * mmc_ocrbitnum_to_vdd - Convert a OCR bit number to its voltage - * @vdd_bit: OCR bit number - * @min_uV: minimum voltage value (mV) - * @max_uV: maximum voltage value (mV) - * - * This function returns the voltage range according to the provided OCR - * bit number. If conversion is not possible a negative errno value returned. - */ -static int mmc_ocrbitnum_to_vdd(int vdd_bit, int *min_uV, int *max_uV) -{ - int tmp; - - if (!vdd_bit) - return -EINVAL; - - /* - * REVISIT mmc_vddrange_to_ocrmask() may have set some - * bits this regulator doesn't quite support ... don't - * be too picky, most cards and regulators are OK with - * a 0.1V range goof (it's a small error percentage). - */ - tmp = vdd_bit - ilog2(MMC_VDD_165_195); - if (tmp == 0) { - *min_uV = 1650 * 1000; - *max_uV = 1950 * 1000; - } else { - *min_uV = 1900 * 1000 + tmp * 100 * 1000; - *max_uV = *min_uV + 100 * 1000; - } - - return 0; -} - -/** - * mmc_regulator_get_ocrmask - return mask of supported voltages - * @supply: regulator to use - * - * This returns either a negative errno, or a mask of voltages that - * can be provided to MMC/SD/SDIO devices using the specified voltage - * regulator. This would normally be called before registering the - * MMC host adapter. - */ -int mmc_regulator_get_ocrmask(struct regulator *supply) -{ - int result = 0; - int count; - int i; - int vdd_uV; - int vdd_mV; - - count = regulator_count_voltages(supply); - if (count < 0) - return count; - - for (i = 0; i < count; i++) { - vdd_uV = regulator_list_voltage(supply, i); - if (vdd_uV <= 0) - continue; - - vdd_mV = vdd_uV / 1000; - result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV); - } - - if (!result) { - vdd_uV = regulator_get_voltage(supply); - if (vdd_uV <= 0) - return vdd_uV; - - vdd_mV = vdd_uV / 1000; - result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV); - } - - return result; -} -EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask); - -/** - * mmc_regulator_set_ocr - set regulator to match host->ios voltage - * @mmc: the host to regulate - * @supply: regulator to use - * @vdd_bit: zero for power off, else a bit number (host->ios.vdd) - * - * Returns zero on success, else negative errno. - * - * MMC host drivers may use this to enable or disable a regulator using - * a particular supply voltage. This would normally be called from the - * set_ios() method. - */ -int mmc_regulator_set_ocr(struct mmc_host *mmc, - struct regulator *supply, - unsigned short vdd_bit) -{ - int result = 0; - int min_uV, max_uV; - - if (vdd_bit) { - mmc_ocrbitnum_to_vdd(vdd_bit, &min_uV, &max_uV); - - result = regulator_set_voltage(supply, min_uV, max_uV); - if (result == 0 && !mmc->regulator_enabled) { - result = regulator_enable(supply); - if (!result) - mmc->regulator_enabled = true; - } - } else if (mmc->regulator_enabled) { - result = regulator_disable(supply); - if (result == 0) - mmc->regulator_enabled = false; - } - - if (result) - dev_err(mmc_dev(mmc), - "could not set regulator OCR (%d)\n", result); - return result; -} -EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr); - -static int mmc_regulator_set_voltage_if_supported(struct regulator *regulator, - int min_uV, int target_uV, - int max_uV) -{ - /* - * Check if supported first to avoid errors since we may try several - * signal levels during power up and don't want to show errors. - */ - if (!regulator_is_supported_voltage(regulator, min_uV, max_uV)) - return -EINVAL; - - return regulator_set_voltage_triplet(regulator, min_uV, target_uV, - max_uV); -} - -/** - * mmc_regulator_set_vqmmc - Set VQMMC as per the ios - * - * For 3.3V signaling, we try to match VQMMC to VMMC as closely as possible. - * That will match the behavior of old boards where VQMMC and VMMC were supplied - * by the same supply. The Bus Operating conditions for 3.3V signaling in the - * SD card spec also define VQMMC in terms of VMMC. - * If this is not possible we'll try the full 2.7-3.6V of the spec. - * - * For 1.2V and 1.8V signaling we'll try to get as close as possible to the - * requested voltage. This is definitely a good idea for UHS where there's a - * separate regulator on the card that's trying to make 1.8V and it's best if - * we match. - * - * This function is expected to be used by a controller's - * start_signal_voltage_switch() function. - */ -int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios) -{ - struct device *dev = mmc_dev(mmc); - int ret, volt, min_uV, max_uV; - - /* If no vqmmc supply then we can't change the voltage */ - if (IS_ERR(mmc->supply.vqmmc)) - return -EINVAL; - - switch (ios->signal_voltage) { - case MMC_SIGNAL_VOLTAGE_120: - return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc, - 1100000, 1200000, 1300000); - case MMC_SIGNAL_VOLTAGE_180: - return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc, - 1700000, 1800000, 1950000); - case MMC_SIGNAL_VOLTAGE_330: - ret = mmc_ocrbitnum_to_vdd(mmc->ios.vdd, &volt, &max_uV); - if (ret < 0) - return ret; - - dev_dbg(dev, "%s: found vmmc voltage range of %d-%duV\n", - __func__, volt, max_uV); - - min_uV = max(volt - 300000, 2700000); - max_uV = min(max_uV + 200000, 3600000); - - /* - * Due to a limitation in the current implementation of - * regulator_set_voltage_triplet() which is taking the lowest - * voltage possible if below the target, search for a suitable - * voltage in two steps and try to stay close to vmmc - * with a 0.3V tolerance at first. - */ - if (!mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc, - min_uV, volt, max_uV)) - return 0; - - return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc, - 2700000, volt, 3600000); - default: - return -EINVAL; - } -} -EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc); - -#endif /* CONFIG_REGULATOR */ - -/** - * mmc_regulator_get_supply - try to get VMMC and VQMMC regulators for a host - * @mmc: the host to regulate - * - * Returns 0 or errno. errno should be handled, it is either a critical error - * or -EPROBE_DEFER. 0 means no critical error but it does not mean all - * regulators have been found because they all are optional. If you require - * certain regulators, you need to check separately in your driver if they got - * populated after calling this function. - */ -int mmc_regulator_get_supply(struct mmc_host *mmc) -{ - struct device *dev = mmc_dev(mmc); - int ret; - - mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc"); - mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc"); - - if (IS_ERR(mmc->supply.vmmc)) { - if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER) - return -EPROBE_DEFER; - dev_dbg(dev, "No vmmc regulator found\n"); - } else { - ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc); - if (ret > 0) - mmc->ocr_avail = ret; - else - dev_warn(dev, "Failed getting OCR mask: %d\n", ret); - } - - if (IS_ERR(mmc->supply.vqmmc)) { - if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER) - return -EPROBE_DEFER; - dev_dbg(dev, "No vqmmc regulator found\n"); - } - - return 0; -} -EXPORT_SYMBOL_GPL(mmc_regulator_get_supply); - /* * Mask off any voltages we don't support and select * the lowest voltage @@ -1936,6 +1620,12 @@ static unsigned int mmc_sd_erase_timeout(struct mmc_card *card, { unsigned int erase_timeout; + /* for DISCARD none of the below calculation applies. + * the busy timeout is 250msec per discard command. + */ + if (arg == SD_DISCARD_ARG) + return SD_DISCARD_TIMEOUT_MS; + if (card->ssr.erase_timeout) { /* Erase timeout specified in SD Status Register (SSR) */ erase_timeout = card->ssr.erase_timeout * qty + @@ -2164,7 +1854,7 @@ static unsigned int mmc_align_erase_size(struct mmc_card *card, * @card: card to erase * @from: first sector to erase * @nr: number of sectors to erase - * @arg: erase command argument (SD supports only %MMC_ERASE_ARG) + * @arg: erase command argument * * Caller must claim host before calling this function. */ @@ -2181,14 +1871,14 @@ int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr, if (!card->erase_size) return -EOPNOTSUPP; - if (mmc_card_sd(card) && arg != MMC_ERASE_ARG) + if (mmc_card_sd(card) && arg != SD_ERASE_ARG && arg != SD_DISCARD_ARG) return -EOPNOTSUPP; - if ((arg & MMC_SECURE_ARGS) && + if (mmc_card_mmc(card) && (arg & MMC_SECURE_ARGS) && !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)) return -EOPNOTSUPP; - if ((arg & MMC_TRIM_ARGS) && + if (mmc_card_mmc(card) && (arg & MMC_TRIM_ARGS) && !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)) return -EOPNOTSUPP; @@ -2381,9 +2071,9 @@ unsigned int mmc_calc_max_discard(struct mmc_card *card) return card->pref_erase; max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG); - if (max_discard && mmc_can_trim(card)) { + if (mmc_can_trim(card)) { max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG); - if (max_trim < max_discard) + if (max_trim < max_discard || max_discard == 0) max_discard = max_trim; } else if (max_discard < card->erase_size) { max_discard = 0; diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h index 8fb6bc37f808..b5083b13d594 100644 --- a/drivers/mmc/core/core.h +++ b/drivers/mmc/core/core.h @@ -59,6 +59,7 @@ void mmc_power_up(struct mmc_host *host, u32 ocr); void mmc_power_off(struct mmc_host *host); void mmc_power_cycle(struct mmc_host *host, u32 ocr); void mmc_set_initial_state(struct mmc_host *host); +u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max); static inline void mmc_delay(unsigned int ms) { diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index cf58ccaf22d5..3a4402a79904 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -194,7 +194,7 @@ int mmc_of_parse(struct mmc_host *host) switch (bus_width) { case 8: host->caps |= MMC_CAP_8_BIT_DATA; - /* Hosts capable of 8-bit transfers can also do 4 bits */ + /* fall through - Hosts capable of 8-bit can also do 4 bits */ case 4: host->caps |= MMC_CAP_4_BIT_DATA; break; @@ -260,7 +260,7 @@ int mmc_of_parse(struct mmc_host *host) /* Parse Write Protection */ ro_cap_invert = device_property_read_bool(dev, "wp-inverted"); - ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert); + ret = mmc_gpiod_request_ro(host, "wp", 0, 0, &ro_gpio_invert); if (!ret) dev_info(host->parent, "Got WP GPIO\n"); else if (ret != -ENOENT && ret != -ENOSYS) @@ -348,6 +348,50 @@ int mmc_of_parse(struct mmc_host *host) EXPORT_SYMBOL(mmc_of_parse); +/** + * mmc_of_parse_voltage - return mask of supported voltages + * @np: The device node need to be parsed. + * @mask: mask of voltages available for MMC/SD/SDIO + * + * Parse the "voltage-ranges" DT property, returning zero if it is not + * found, negative errno if the voltage-range specification is invalid, + * or one if the voltage-range is specified and successfully parsed. + */ +int mmc_of_parse_voltage(struct device_node *np, u32 *mask) +{ + const u32 *voltage_ranges; + int num_ranges, i; + + voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges); + num_ranges = num_ranges / sizeof(*voltage_ranges) / 2; + if (!voltage_ranges) { + pr_debug("%pOF: voltage-ranges unspecified\n", np); + return 0; + } + if (!num_ranges) { + pr_err("%pOF: voltage-ranges empty\n", np); + return -EINVAL; + } + + for (i = 0; i < num_ranges; i++) { + const int j = i * 2; + u32 ocr_mask; + + ocr_mask = mmc_vddrange_to_ocrmask( + be32_to_cpu(voltage_ranges[j]), + be32_to_cpu(voltage_ranges[j + 1])); + if (!ocr_mask) { + pr_err("%pOF: voltage-range #%d is invalid\n", + np, i); + return -EINVAL; + } + *mask |= ocr_mask; + } + + return 1; +} +EXPORT_SYMBOL(mmc_of_parse_voltage); + /** * mmc_alloc_host - initialise the per-host structure. * @extra: sizeof private data structure diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index da892a599524..3e786ba204c3 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -1594,6 +1594,8 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, if (oldcard) { if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) { + pr_debug("%s: Perhaps the card was replaced\n", + mmc_hostname(host)); err = -ENOENT; goto err; } @@ -1743,6 +1745,14 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, card->ext_csd.power_off_notification = EXT_CSD_POWER_ON; } + /* set erase_arg */ + if (mmc_can_discard(card)) + card->erase_arg = MMC_DISCARD_ARG; + else if (mmc_can_trim(card)) + card->erase_arg = MMC_TRIM_ARG; + else + card->erase_arg = MMC_ERASE_ARG; + /* * Select timing interface */ diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index 9054329fe903..c5208fb312ae 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -562,7 +562,7 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, if (index == EXT_CSD_SANITIZE_START) cmd.sanitize_busy = true; - err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); + err = mmc_wait_for_cmd(host, &cmd, 0); if (err) goto out; diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 35cc138b096d..7c364a9c4eeb 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -355,6 +355,7 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) { struct mmc_host *host = card->host; u64 limit = BLK_BOUNCE_HIGH; + unsigned block_size = 512; if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; @@ -368,7 +369,13 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) blk_queue_max_hw_sectors(mq->queue, min(host->max_blk_count, host->max_req_size / 512)); blk_queue_max_segments(mq->queue, host->max_segs); - blk_queue_max_segment_size(mq->queue, host->max_seg_size); + + if (mmc_card_mmc(card)) + block_size = card->ext_csd.data_sector_size; + + blk_queue_logical_block_size(mq->queue, block_size); + blk_queue_max_segment_size(mq->queue, + round_down(host->max_seg_size, block_size)); INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler); INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work); @@ -410,8 +417,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card) else mq->tag_set.queue_depth = MMC_QUEUE_DEPTH; mq->tag_set.numa_node = NUMA_NO_NODE; - mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE | - BLK_MQ_F_BLOCKING; + mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; mq->tag_set.nr_hw_queues = 1; mq->tag_set.cmd_size = sizeof(struct mmc_queue_req); mq->tag_set.driver_data = mq; diff --git a/drivers/mmc/core/regulator.c b/drivers/mmc/core/regulator.c new file mode 100644 index 000000000000..b6febbcf8978 --- /dev/null +++ b/drivers/mmc/core/regulator.c @@ -0,0 +1,260 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Helper functions for MMC regulators. + */ + +#include +#include +#include +#include + +#include + +#include "core.h" +#include "host.h" + +#ifdef CONFIG_REGULATOR + +/** + * mmc_ocrbitnum_to_vdd - Convert a OCR bit number to its voltage + * @vdd_bit: OCR bit number + * @min_uV: minimum voltage value (mV) + * @max_uV: maximum voltage value (mV) + * + * This function returns the voltage range according to the provided OCR + * bit number. If conversion is not possible a negative errno value returned. + */ +static int mmc_ocrbitnum_to_vdd(int vdd_bit, int *min_uV, int *max_uV) +{ + int tmp; + + if (!vdd_bit) + return -EINVAL; + + /* + * REVISIT mmc_vddrange_to_ocrmask() may have set some + * bits this regulator doesn't quite support ... don't + * be too picky, most cards and regulators are OK with + * a 0.1V range goof (it's a small error percentage). + */ + tmp = vdd_bit - ilog2(MMC_VDD_165_195); + if (tmp == 0) { + *min_uV = 1650 * 1000; + *max_uV = 1950 * 1000; + } else { + *min_uV = 1900 * 1000 + tmp * 100 * 1000; + *max_uV = *min_uV + 100 * 1000; + } + + return 0; +} + +/** + * mmc_regulator_get_ocrmask - return mask of supported voltages + * @supply: regulator to use + * + * This returns either a negative errno, or a mask of voltages that + * can be provided to MMC/SD/SDIO devices using the specified voltage + * regulator. This would normally be called before registering the + * MMC host adapter. + */ +static int mmc_regulator_get_ocrmask(struct regulator *supply) +{ + int result = 0; + int count; + int i; + int vdd_uV; + int vdd_mV; + + count = regulator_count_voltages(supply); + if (count < 0) + return count; + + for (i = 0; i < count; i++) { + vdd_uV = regulator_list_voltage(supply, i); + if (vdd_uV <= 0) + continue; + + vdd_mV = vdd_uV / 1000; + result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV); + } + + if (!result) { + vdd_uV = regulator_get_voltage(supply); + if (vdd_uV <= 0) + return vdd_uV; + + vdd_mV = vdd_uV / 1000; + result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV); + } + + return result; +} + +/** + * mmc_regulator_set_ocr - set regulator to match host->ios voltage + * @mmc: the host to regulate + * @supply: regulator to use + * @vdd_bit: zero for power off, else a bit number (host->ios.vdd) + * + * Returns zero on success, else negative errno. + * + * MMC host drivers may use this to enable or disable a regulator using + * a particular supply voltage. This would normally be called from the + * set_ios() method. + */ +int mmc_regulator_set_ocr(struct mmc_host *mmc, + struct regulator *supply, + unsigned short vdd_bit) +{ + int result = 0; + int min_uV, max_uV; + + if (vdd_bit) { + mmc_ocrbitnum_to_vdd(vdd_bit, &min_uV, &max_uV); + + result = regulator_set_voltage(supply, min_uV, max_uV); + if (result == 0 && !mmc->regulator_enabled) { + result = regulator_enable(supply); + if (!result) + mmc->regulator_enabled = true; + } + } else if (mmc->regulator_enabled) { + result = regulator_disable(supply); + if (result == 0) + mmc->regulator_enabled = false; + } + + if (result) + dev_err(mmc_dev(mmc), + "could not set regulator OCR (%d)\n", result); + return result; +} +EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr); + +static int mmc_regulator_set_voltage_if_supported(struct regulator *regulator, + int min_uV, int target_uV, + int max_uV) +{ + /* + * Check if supported first to avoid errors since we may try several + * signal levels during power up and don't want to show errors. + */ + if (!regulator_is_supported_voltage(regulator, min_uV, max_uV)) + return -EINVAL; + + return regulator_set_voltage_triplet(regulator, min_uV, target_uV, + max_uV); +} + +/** + * mmc_regulator_set_vqmmc - Set VQMMC as per the ios + * + * For 3.3V signaling, we try to match VQMMC to VMMC as closely as possible. + * That will match the behavior of old boards where VQMMC and VMMC were supplied + * by the same supply. The Bus Operating conditions for 3.3V signaling in the + * SD card spec also define VQMMC in terms of VMMC. + * If this is not possible we'll try the full 2.7-3.6V of the spec. + * + * For 1.2V and 1.8V signaling we'll try to get as close as possible to the + * requested voltage. This is definitely a good idea for UHS where there's a + * separate regulator on the card that's trying to make 1.8V and it's best if + * we match. + * + * This function is expected to be used by a controller's + * start_signal_voltage_switch() function. + */ +int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct device *dev = mmc_dev(mmc); + int ret, volt, min_uV, max_uV; + + /* If no vqmmc supply then we can't change the voltage */ + if (IS_ERR(mmc->supply.vqmmc)) + return -EINVAL; + + switch (ios->signal_voltage) { + case MMC_SIGNAL_VOLTAGE_120: + return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc, + 1100000, 1200000, 1300000); + case MMC_SIGNAL_VOLTAGE_180: + return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc, + 1700000, 1800000, 1950000); + case MMC_SIGNAL_VOLTAGE_330: + ret = mmc_ocrbitnum_to_vdd(mmc->ios.vdd, &volt, &max_uV); + if (ret < 0) + return ret; + + dev_dbg(dev, "%s: found vmmc voltage range of %d-%duV\n", + __func__, volt, max_uV); + + min_uV = max(volt - 300000, 2700000); + max_uV = min(max_uV + 200000, 3600000); + + /* + * Due to a limitation in the current implementation of + * regulator_set_voltage_triplet() which is taking the lowest + * voltage possible if below the target, search for a suitable + * voltage in two steps and try to stay close to vmmc + * with a 0.3V tolerance at first. + */ + if (!mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc, + min_uV, volt, max_uV)) + return 0; + + return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc, + 2700000, volt, 3600000); + default: + return -EINVAL; + } +} +EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc); + +#else + +static inline int mmc_regulator_get_ocrmask(struct regulator *supply) +{ + return 0; +} + +#endif /* CONFIG_REGULATOR */ + +/** + * mmc_regulator_get_supply - try to get VMMC and VQMMC regulators for a host + * @mmc: the host to regulate + * + * Returns 0 or errno. errno should be handled, it is either a critical error + * or -EPROBE_DEFER. 0 means no critical error but it does not mean all + * regulators have been found because they all are optional. If you require + * certain regulators, you need to check separately in your driver if they got + * populated after calling this function. + */ +int mmc_regulator_get_supply(struct mmc_host *mmc) +{ + struct device *dev = mmc_dev(mmc); + int ret; + + mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc"); + mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc"); + + if (IS_ERR(mmc->supply.vmmc)) { + if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER) + return -EPROBE_DEFER; + dev_dbg(dev, "No vmmc regulator found\n"); + } else { + ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc); + if (ret > 0) + mmc->ocr_avail = ret; + else + dev_warn(dev, "Failed getting OCR mask: %d\n", ret); + } + + if (IS_ERR(mmc->supply.vqmmc)) { + if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER) + return -EPROBE_DEFER; + dev_dbg(dev, "No vqmmc regulator found\n"); + } + + return 0; +} +EXPORT_SYMBOL_GPL(mmc_regulator_get_supply); diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index d0d9f90e7cdf..265e1aeeb9d8 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -209,6 +209,11 @@ static int mmc_decode_scr(struct mmc_card *card) /* Check if Physical Layer Spec v3.0 is supported */ scr->sda_spec3 = UNSTUFF_BITS(resp, 47, 1); + if (scr->sda_spec3) { + scr->sda_spec4 = UNSTUFF_BITS(resp, 42, 1); + scr->sda_specx = UNSTUFF_BITS(resp, 38, 4); + } + if (UNSTUFF_BITS(resp, 55, 1)) card->erased_byte = 0xFF; else @@ -226,6 +231,8 @@ static int mmc_read_ssr(struct mmc_card *card) { unsigned int au, es, et, eo; __be32 *raw_ssr; + u32 resp[4] = {}; + u8 discard_support; int i; if (!(card->csd.cmdclass & CCC_APP_SPEC)) { @@ -271,6 +278,14 @@ static int mmc_read_ssr(struct mmc_card *card) } } + /* + * starting SD5.1 discard is supported if DISCARD_SUPPORT (b313) is set + */ + resp[3] = card->raw_ssr[6]; + discard_support = UNSTUFF_BITS(resp, 313 - 288, 1); + card->erase_arg = (card->scr.sda_specx && discard_support) ? + SD_DISCARD_ARG : SD_ERASE_ARG; + return 0; } @@ -936,8 +951,11 @@ retry: return err; if (oldcard) { - if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) + if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) { + pr_debug("%s: Perhaps the card was replaced\n", + mmc_hostname(host)); return -ENOENT; + } card = oldcard; } else { diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c index 47056d8d1bac..0bb0b8419016 100644 --- a/drivers/mmc/core/sd_ops.c +++ b/drivers/mmc/core/sd_ops.c @@ -52,36 +52,17 @@ int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card) } EXPORT_SYMBOL_GPL(mmc_app_cmd); -/** - * mmc_wait_for_app_cmd - start an application command and wait for - completion - * @host: MMC host to start command - * @card: Card to send MMC_APP_CMD to - * @cmd: MMC command to start - * @retries: maximum number of retries - * - * Sends a MMC_APP_CMD, checks the card response, sends the command - * in the parameter and waits for it to complete. Return any error - * that occurred while the command was executing. Do not attempt to - * parse the response. - */ -int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card, - struct mmc_command *cmd, int retries) +static int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card, + struct mmc_command *cmd) { struct mmc_request mrq = {}; - - int i, err; - - if (retries < 0) - retries = MMC_CMD_RETRIES; - - err = -EIO; + int i, err = -EIO; /* * We have to resend MMC_APP_CMD for each attempt so * we cannot use the retries field in mmc_command. */ - for (i = 0;i <= retries;i++) { + for (i = 0; i <= MMC_CMD_RETRIES; i++) { err = mmc_app_cmd(host, card); if (err) { /* no point in retrying; no APP commands allowed */ @@ -116,8 +97,6 @@ int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card, return err; } -EXPORT_SYMBOL(mmc_wait_for_app_cmd); - int mmc_app_set_bus_width(struct mmc_card *card, int width) { struct mmc_command cmd = {}; @@ -136,7 +115,7 @@ int mmc_app_set_bus_width(struct mmc_card *card, int width) return -EINVAL; } - return mmc_wait_for_app_cmd(card->host, card, &cmd, MMC_CMD_RETRIES); + return mmc_wait_for_app_cmd(card->host, card, &cmd); } int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) @@ -152,7 +131,7 @@ int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR; for (i = 100; i; i--) { - err = mmc_wait_for_app_cmd(host, NULL, &cmd, MMC_CMD_RETRIES); + err = mmc_wait_for_app_cmd(host, NULL, &cmd); if (err) break; diff --git a/drivers/mmc/core/sd_ops.h b/drivers/mmc/core/sd_ops.h index 0e6c3d51e66d..ffaed5cacc88 100644 --- a/drivers/mmc/core/sd_ops.h +++ b/drivers/mmc/core/sd_ops.h @@ -16,7 +16,6 @@ struct mmc_card; struct mmc_host; -struct mmc_command; int mmc_app_set_bus_width(struct mmc_card *card, int width); int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr); @@ -27,8 +26,6 @@ int mmc_sd_switch(struct mmc_card *card, int mode, int group, u8 value, u8 *resp); int mmc_app_sd_status(struct mmc_card *card, void *ssr); int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card); -int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card, - struct mmc_command *cmd, int retries); #endif diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index d8e17ea6126d..6718fc8bb40f 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -617,6 +617,8 @@ try_again: if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO || memcmp(card->raw_cid, oldcard->raw_cid, sizeof(card->raw_cid)) != 0)) { mmc_remove_card(card); + pr_debug("%s: Perhaps the card was replaced\n", + mmc_hostname(host)); return -ENOENT; } } else { @@ -624,6 +626,8 @@ try_again: if (oldcard && oldcard->type != MMC_TYPE_SDIO) { mmc_remove_card(card); + pr_debug("%s: Perhaps the card was replaced\n", + mmc_hostname(host)); return -ENOENT; } } @@ -736,8 +740,11 @@ try_again: int same = (card->cis.vendor == oldcard->cis.vendor && card->cis.device == oldcard->cis.device); mmc_remove_card(card); - if (!same) + if (!same) { + pr_debug("%s: Perhaps the card was replaced\n", + mmc_hostname(host)); return -ENOENT; + } card = oldcard; } diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c index b6d8203e46eb..62b0f5ecc7f7 100644 --- a/drivers/mmc/core/sdio_bus.c +++ b/drivers/mmc/core/sdio_bus.c @@ -179,7 +179,6 @@ static int sdio_bus_remove(struct device *dev) { struct sdio_driver *drv = to_sdio_driver(dev->driver); struct sdio_func *func = dev_to_sdio_func(dev); - int ret = 0; /* Make sure card is powered before invoking ->remove() */ if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) @@ -205,7 +204,7 @@ static int sdio_bus_remove(struct device *dev) dev_pm_domain_detach(dev, false); - return ret; + return 0; } static const struct dev_pm_ops sdio_bus_pm_ops = { diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c index d40744bbafa9..3f67fbbe0d75 100644 --- a/drivers/mmc/core/sdio_io.c +++ b/drivers/mmc/core/sdio_io.c @@ -10,6 +10,7 @@ */ #include +#include #include #include #include @@ -203,6 +204,21 @@ static inline unsigned int sdio_max_byte_size(struct sdio_func *func) return min(mval, 512u); /* maximum size for byte mode */ } +/* + * This is legacy code, which needs to be re-worked some day. Basically we need + * to take into account the properties of the host, as to enable the SDIO func + * driver layer to allocate optimal buffers. + */ +static inline unsigned int _sdio_align_size(unsigned int sz) +{ + /* + * FIXME: We don't have a system for the controller to tell + * the core about its problems yet, so for now we just 32-bit + * align the size. + */ + return ALIGN(sz, 4); +} + /** * sdio_align_size - pads a transfer size to a more optimal value * @func: SDIO function @@ -230,7 +246,7 @@ unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz) * wants to increase the size up to a point where it * might need more than one block. */ - sz = mmc_align_data_size(func->card, sz); + sz = _sdio_align_size(sz); /* * If we can still do this with just a byte transfer, then @@ -252,7 +268,7 @@ unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz) */ blk_sz = ((sz + func->cur_blksize - 1) / func->cur_blksize) * func->cur_blksize; - blk_sz = mmc_align_data_size(func->card, blk_sz); + blk_sz = _sdio_align_size(blk_sz); /* * This value is only good if it is still just @@ -265,8 +281,7 @@ unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz) * We failed to do one request, but at least try to * pad the remainder properly. */ - byte_sz = mmc_align_data_size(func->card, - sz % func->cur_blksize); + byte_sz = _sdio_align_size(sz % func->cur_blksize); if (byte_sz <= sdio_max_byte_size(func)) { blk_sz = sz / func->cur_blksize; return blk_sz * func->cur_blksize + byte_sz; @@ -276,16 +291,14 @@ unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz) * We need multiple requests, so first check that the * controller can handle the chunk size; */ - chunk_sz = mmc_align_data_size(func->card, - sdio_max_byte_size(func)); + chunk_sz = _sdio_align_size(sdio_max_byte_size(func)); if (chunk_sz == sdio_max_byte_size(func)) { /* * Fix up the size of the remainder (if any) */ byte_sz = orig_sz % chunk_sz; if (byte_sz) { - byte_sz = mmc_align_data_size(func->card, - byte_sz); + byte_sz = _sdio_align_size(byte_sz); } return (orig_sz / chunk_sz) * chunk_sz + byte_sz; diff --git a/drivers/mmc/core/sdio_ops.h b/drivers/mmc/core/sdio_ops.h index 96945cafbf0b..1f6d0447ea0f 100644 --- a/drivers/mmc/core/sdio_ops.h +++ b/drivers/mmc/core/sdio_ops.h @@ -25,7 +25,6 @@ int mmc_io_rw_direct(struct mmc_card *card, int write, unsigned fn, int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn, unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz); int sdio_reset(struct mmc_host *host); -unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz); void sdio_irq_work(struct work_struct *work); static inline bool sdio_is_io_busy(u32 opcode, u32 arg) diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c index 319ccd93383d..4afc6b87b465 100644 --- a/drivers/mmc/core/slot-gpio.c +++ b/drivers/mmc/core/slot-gpio.c @@ -22,7 +22,6 @@ struct mmc_gpio { struct gpio_desc *ro_gpio; struct gpio_desc *cd_gpio; - bool override_ro_active_level; bool override_cd_active_level; irqreturn_t (*cd_gpio_isr)(int irq, void *dev_id); char *ro_label; @@ -71,10 +70,6 @@ int mmc_gpio_get_ro(struct mmc_host *host) if (!ctx || !ctx->ro_gpio) return -ENOSYS; - if (ctx->override_ro_active_level) - return !gpiod_get_raw_value_cansleep(ctx->ro_gpio) ^ - !!(host->caps2 & MMC_CAP2_RO_ACTIVE_HIGH); - return gpiod_get_value_cansleep(ctx->ro_gpio); } EXPORT_SYMBOL(mmc_gpio_get_ro); @@ -225,7 +220,6 @@ EXPORT_SYMBOL(mmc_can_gpio_cd); * @host: mmc host * @con_id: function within the GPIO consumer * @idx: index of the GPIO to obtain in the consumer - * @override_active_level: ignore %GPIO_ACTIVE_LOW flag * @debounce: debounce time in microseconds * @gpio_invert: will return whether the GPIO line is inverted or not, * set to NULL to ignore @@ -233,7 +227,7 @@ EXPORT_SYMBOL(mmc_can_gpio_cd); * Returns zero on success, else an error. */ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id, - unsigned int idx, bool override_active_level, + unsigned int idx, unsigned int debounce, bool *gpio_invert) { struct mmc_gpio *ctx = host->slot.handler_priv; @@ -253,7 +247,6 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id, if (gpio_invert) *gpio_invert = !gpiod_is_active_low(desc); - ctx->override_ro_active_level = override_active_level; ctx->ro_gpio = desc; return 0; diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index a44ec8bb5418..28fcd8f580a1 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -224,6 +224,7 @@ config MMC_SDHCI_ESDHC_IMX depends on ARCH_MXC depends on MMC_SDHCI_PLTFM select MMC_SDHCI_IO_ACCESSORS + select MMC_CQHCI help This selects the Freescale eSDHC/uSDHC controller support found on i.MX25, i.MX35 i.MX5x and i.MX6x. @@ -250,6 +251,7 @@ config MMC_SDHCI_TEGRA depends on ARCH_TEGRA depends on MMC_SDHCI_PLTFM select MMC_SDHCI_IO_ACCESSORS + select MMC_CQHCI help This selects the Tegra SD/MMC controller. If you have a Tegra platform with SD or MMC devices, say Y or M here. diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 47189f9ed4e2..735aa5871358 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -1410,6 +1410,9 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) case MMC_BUS_WIDTH_4: slot->sdc_reg |= ATMCI_SDCBUS_4BIT; break; + case MMC_BUS_WIDTH_8: + slot->sdc_reg |= ATMCI_SDCBUS_8BIT; + break; } if (ios->clock) { @@ -2275,8 +2278,11 @@ static int atmci_init_slot(struct atmel_mci *host, * use only one bit for data to prevent fifo underruns and overruns * which will corrupt data. */ - if ((slot_data->bus_width >= 4) && host->caps.has_rwproof) + if ((slot_data->bus_width >= 4) && host->caps.has_rwproof) { mmc->caps |= MMC_CAP_4_BIT_DATA; + if (slot_data->bus_width >= 8) + mmc->caps |= MMC_CAP_8_BIT_DATA; + } if (atmci_get_version(host) < 0x200) { mmc->max_segs = 256; diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c index 50293529d6de..7e0d3a49c06d 100644 --- a/drivers/mmc/host/bcm2835.c +++ b/drivers/mmc/host/bcm2835.c @@ -148,7 +148,6 @@ struct bcm2835_host { void __iomem *ioaddr; u32 phys_addr; - struct mmc_host *mmc; struct platform_device *pdev; int clock; /* Current clock speed */ @@ -618,7 +617,7 @@ static void bcm2835_finish_request(struct bcm2835_host *host) "failed to terminate DMA (%d)\n", err); } - mmc_request_done(host->mmc, mrq); + mmc_request_done(mmc_from_priv(host), mrq); } static @@ -837,7 +836,7 @@ static void bcm2835_timeout(struct work_struct *work) dev_err(dev, "timeout waiting for hardware interrupt.\n"); bcm2835_dumpregs(host); - bcm2835_reset(host->mmc); + bcm2835_reset(mmc_from_priv(host)); if (host->data) { host->data->error = -ETIMEDOUT; @@ -1100,6 +1099,7 @@ static void bcm2835_dma_complete_work(struct work_struct *work) static void bcm2835_set_clock(struct bcm2835_host *host, unsigned int clock) { + struct mmc_host *mmc = mmc_from_priv(host); int div; /* The SDCDIV register has 11 bits, and holds (div - 2). But @@ -1143,18 +1143,18 @@ static void bcm2835_set_clock(struct bcm2835_host *host, unsigned int clock) div = SDCDIV_MAX_CDIV; clock = host->max_clk / (div + 2); - host->mmc->actual_clock = clock; + mmc->actual_clock = clock; /* Calibrate some delays */ host->ns_per_fifo_word = (1000000000 / clock) * - ((host->mmc->caps & MMC_CAP_4_BIT_DATA) ? 8 : 32); + ((mmc->caps & MMC_CAP_4_BIT_DATA) ? 8 : 32); host->cdiv = div; writel(host->cdiv, host->ioaddr + SDCDIV); /* Set the timeout to 500ms */ - writel(host->mmc->actual_clock / 2, host->ioaddr + SDTOUT); + writel(mmc->actual_clock / 2, host->ioaddr + SDTOUT); } static void bcm2835_request(struct mmc_host *mmc, struct mmc_request *mrq) @@ -1264,7 +1264,7 @@ static const struct mmc_host_ops bcm2835_ops = { static int bcm2835_add_host(struct bcm2835_host *host) { - struct mmc_host *mmc = host->mmc; + struct mmc_host *mmc = mmc_from_priv(host); struct device *dev = &host->pdev->dev; char pio_limit_string[20]; int ret; @@ -1286,7 +1286,7 @@ static int bcm2835_add_host(struct bcm2835_host *host) spin_lock_init(&host->lock); mutex_init(&host->mutex); - if (IS_ERR_OR_NULL(host->dma_chan_rxtx)) { + if (!host->dma_chan_rxtx) { dev_warn(dev, "unable to initialise DMA channel. Falling back to PIO\n"); host->use_dma = false; } else { @@ -1370,7 +1370,6 @@ static int bcm2835_probe(struct platform_device *pdev) mmc->ops = &bcm2835_ops; host = mmc_priv(mmc); - host->mmc = mmc; host->pdev = pdev; spin_lock_init(&host->lock); @@ -1431,6 +1430,8 @@ static int bcm2835_probe(struct platform_device *pdev) err: dev_dbg(dev, "%s -> err %d\n", __func__, ret); + if (host->dma_chan_rxtx) + dma_release_channel(host->dma_chan_rxtx); mmc_free_host(mmc); return ret; @@ -1439,8 +1440,9 @@ err: static int bcm2835_remove(struct platform_device *pdev) { struct bcm2835_host *host = platform_get_drvdata(pdev); + struct mmc_host *mmc = mmc_from_priv(host); - mmc_remove_host(host->mmc); + mmc_remove_host(mmc); writel(SDVDD_POWER_OFF, host->ioaddr + SDVDD); @@ -1452,8 +1454,7 @@ static int bcm2835_remove(struct platform_device *pdev) if (host->dma_chan_rxtx) dma_release_channel(host->dma_chan_rxtx); - mmc_free_host(host->mmc); - platform_set_drvdata(pdev, NULL); + mmc_free_host(mmc); return 0; } diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c index 1087b4c79cd6..4c477dcd2ada 100644 --- a/drivers/mmc/host/cb710-mmc.c +++ b/drivers/mmc/host/cb710-mmc.c @@ -566,30 +566,32 @@ static void cb710_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) cb710_mmc_select_clock_divider(mmc, ios->clock); - if (ios->power_mode != reader->last_power_mode) - switch (ios->power_mode) { - case MMC_POWER_ON: - err = cb710_mmc_powerup(slot); - if (err) { - dev_warn(cb710_slot_dev(slot), - "powerup failed (%d)- retrying\n", err); - cb710_mmc_powerdown(slot); - udelay(1); + if (ios->power_mode != reader->last_power_mode) { + switch (ios->power_mode) { + case MMC_POWER_ON: err = cb710_mmc_powerup(slot); - if (err) + if (err) { dev_warn(cb710_slot_dev(slot), - "powerup retry failed (%d) - expect errors\n", + "powerup failed (%d)- retrying\n", err); + cb710_mmc_powerdown(slot); + udelay(1); + err = cb710_mmc_powerup(slot); + if (err) + dev_warn(cb710_slot_dev(slot), + "powerup retry failed (%d) - expect errors\n", err); + } + reader->last_power_mode = MMC_POWER_ON; + break; + case MMC_POWER_OFF: + cb710_mmc_powerdown(slot); + reader->last_power_mode = MMC_POWER_OFF; + break; + case MMC_POWER_UP: + default: + /* ignore */ + break; } - reader->last_power_mode = MMC_POWER_ON; - break; - case MMC_POWER_OFF: - cb710_mmc_powerdown(slot); - reader->last_power_mode = MMC_POWER_OFF; - break; - case MMC_POWER_UP: - default: - /* ignore */; } cb710_mmc_enable_4bit_data(slot, ios->bus_width != MMC_BUS_WIDTH_1); diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c index 159270e947cf..a8af682a9182 100644 --- a/drivers/mmc/host/cqhci.c +++ b/drivers/mmc/host/cqhci.c @@ -201,7 +201,7 @@ static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host) cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots; cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs * - (cq_host->num_slots - 1); + cq_host->mmc->cqe_qdepth; pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n", mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size, @@ -217,12 +217,21 @@ static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host) cq_host->desc_size, &cq_host->desc_dma_base, GFP_KERNEL); + if (!cq_host->desc_base) + return -ENOMEM; + cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc), cq_host->data_size, &cq_host->trans_desc_dma_base, GFP_KERNEL); - if (!cq_host->desc_base || !cq_host->trans_desc_base) + if (!cq_host->trans_desc_base) { + dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size, + cq_host->desc_base, + cq_host->desc_dma_base); + cq_host->desc_base = NULL; + cq_host->desc_dma_base = 0; return -ENOMEM; + } pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n", mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base, diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c index 9e68c3645e22..49e0daf2ef5e 100644 --- a/drivers/mmc/host/davinci_mmc.c +++ b/drivers/mmc/host/davinci_mmc.c @@ -1193,7 +1193,7 @@ static int mmc_davinci_parse_pdata(struct mmc_host *mmc) else if (ret) mmc->caps |= MMC_CAP_NEEDS_POLL; - ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL); + ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL); if (ret == -EPROBE_DEFER) return ret; diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c index 33215d66afa2..63303022669c 100644 --- a/drivers/mmc/host/jz4740_mmc.c +++ b/drivers/mmc/host/jz4740_mmc.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include #include @@ -36,7 +35,6 @@ #include #include -#include #define JZ_REG_MMC_STRPCL 0x00 #define JZ_REG_MMC_STATUS 0x04 @@ -148,9 +146,7 @@ enum jz4780_cookie { struct jz4740_mmc_host { struct mmc_host *mmc; struct platform_device *pdev; - struct jz4740_mmc_platform_data *pdata; struct clk *clk; - struct gpio_desc *power; enum jz4740_mmc_version version; @@ -743,6 +739,7 @@ static irqreturn_t jz_mmc_irq_worker(int irq, void *devid) break; jz_mmc_prepare_data_transfer(host); + /* fall through */ case JZ4740_MMC_STATE_TRANSFER_DATA: if (host->use_dma) { @@ -777,6 +774,7 @@ static irqreturn_t jz_mmc_irq_worker(int irq, void *devid) break; } jz4740_mmc_write_irq_reg(host, JZ_MMC_IRQ_DATA_TRAN_DONE); + /* fall through */ case JZ4740_MMC_STATE_SEND_STOP: if (!req->stop) @@ -894,16 +892,16 @@ static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) switch (ios->power_mode) { case MMC_POWER_UP: jz4740_mmc_reset(host); - if (host->power) - gpiod_set_value(host->power, 1); + if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); host->cmdat |= JZ_MMC_CMDAT_INIT; clk_prepare_enable(host->clk); break; case MMC_POWER_ON: break; default: - if (host->power) - gpiod_set_value(host->power, 0); + if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); clk_disable_unprepare(host->clk); break; } @@ -936,38 +934,6 @@ static const struct mmc_host_ops jz4740_mmc_ops = { .enable_sdio_irq = jz4740_mmc_enable_sdio_irq, }; -static int jz4740_mmc_request_gpios(struct jz4740_mmc_host *host, - struct mmc_host *mmc, - struct platform_device *pdev) -{ - struct jz4740_mmc_platform_data *pdata = dev_get_platdata(&pdev->dev); - int ret = 0; - - if (!pdata) - return 0; - - if (!pdata->card_detect_active_low) - mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; - if (!pdata->read_only_active_low) - mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; - - /* - * Get optional card detect and write protect GPIOs, - * only back out on probe deferral. - */ - ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL); - if (ret == -EPROBE_DEFER) - return ret; - - ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL); - if (ret == -EPROBE_DEFER) - return ret; - - host->power = devm_gpiod_get_optional(&pdev->dev, "power", - GPIOD_OUT_HIGH); - return PTR_ERR_OR_ZERO(host->power); -} - static const struct of_device_id jz4740_mmc_of_match[] = { { .compatible = "ingenic,jz4740-mmc", .data = (void *) JZ_MMC_JZ4740 }, { .compatible = "ingenic,jz4725b-mmc", .data = (void *)JZ_MMC_JZ4725B }, @@ -982,9 +948,6 @@ static int jz4740_mmc_probe(struct platform_device* pdev) struct mmc_host *mmc; struct jz4740_mmc_host *host; const struct of_device_id *match; - struct jz4740_mmc_platform_data *pdata; - - pdata = dev_get_platdata(&pdev->dev); mmc = mmc_alloc_host(sizeof(struct jz4740_mmc_host), &pdev->dev); if (!mmc) { @@ -993,29 +956,25 @@ static int jz4740_mmc_probe(struct platform_device* pdev) } host = mmc_priv(mmc); - host->pdata = pdata; match = of_match_device(jz4740_mmc_of_match, &pdev->dev); if (match) { host->version = (enum jz4740_mmc_version)match->data; - ret = mmc_of_parse(mmc); - if (ret) { - if (ret != -EPROBE_DEFER) - dev_err(&pdev->dev, - "could not parse of data: %d\n", ret); - goto err_free_host; - } } else { /* JZ4740 should be the only one using legacy probe */ host->version = JZ_MMC_JZ4740; - mmc->caps |= MMC_CAP_SDIO_IRQ; - if (!(pdata && pdata->data_1bit)) - mmc->caps |= MMC_CAP_4_BIT_DATA; - ret = jz4740_mmc_request_gpios(host, mmc, pdev); - if (ret) - goto err_free_host; } + ret = mmc_of_parse(mmc); + if (ret) { + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, + "could not parse device properties: %d\n", ret); + goto err_free_host; + } + + mmc_regulator_get_supply(mmc); + host->irq = platform_get_irq(pdev, 0); if (host->irq < 0) { ret = host->irq; diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index f19ec60bcbdc..2eba507790e4 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -1338,7 +1338,8 @@ static int meson_mmc_probe(struct platform_device *pdev) host->regs + SD_EMMC_IRQ_EN); ret = request_threaded_irq(host->irq, meson_mmc_irq, - meson_mmc_irq_thread, IRQF_SHARED, NULL, host); + meson_mmc_irq_thread, IRQF_SHARED, + dev_name(&pdev->dev), host); if (ret) goto err_init_clk; diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index 10ba46b728e8..1b1498805972 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -1450,9 +1450,10 @@ static int mmc_spi_probe(struct spi_device *spi) mmc->caps &= ~MMC_CAP_NEEDS_POLL; mmc_gpiod_request_cd_irq(mmc); } + mmc_detect_change(mmc, 0); /* Index 1 is write protect/read only */ - status = mmc_gpiod_request_ro(mmc, NULL, 1, false, 0, NULL); + status = mmc_gpiod_request_ro(mmc, NULL, 1, 0, NULL); if (status == -EPROBE_DEFER) goto fail_add_host; if (!status) diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index e352f5ad5801..387ff14587b8 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -1127,6 +1127,12 @@ mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c) writel(c, base + MMCICOMMAND); } +static void mmci_stop_command(struct mmci_host *host) +{ + host->stop_abort.error = 0; + mmci_start_command(host, &host->stop_abort, 0); +} + static void mmci_data_irq(struct mmci_host *host, struct mmc_data *data, unsigned int status) @@ -1196,10 +1202,16 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data, /* The error clause is handled above, success! */ data->bytes_xfered = data->blksz * data->blocks; - if (!data->stop || (host->mrq->sbc && !data->error)) + if (!data->stop) { + if (host->variant->cmdreg_stop && data->error) + mmci_stop_command(host); + else + mmci_request_end(host, data->mrq); + } else if (host->mrq->sbc && !data->error) { mmci_request_end(host, data->mrq); - else + } else { mmci_start_command(host, data->stop, 0); + } } } @@ -1298,6 +1310,10 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, mmci_dma_error(host); mmci_stop_data(host); + if (host->variant->cmdreg_stop && cmd->error) { + mmci_stop_command(host); + return; + } } mmci_request_end(host, host->mrq); } else if (sbc) { @@ -1956,6 +1972,11 @@ static int mmci_probe(struct amba_device *dev, mmc->max_busy_timeout = 0; } + /* Prepare a CMD12 - needed to clear the DPSM on some variants. */ + host->stop_abort.opcode = MMC_STOP_TRANSMISSION; + host->stop_abort.arg = 0; + host->stop_abort.flags = MMC_RSP_R1B | MMC_CMD_AC; + mmc->ops = &mmci_ops; /* We support these PM capabilities. */ @@ -2011,7 +2032,7 @@ static int mmci_probe(struct amba_device *dev, if (ret == -EPROBE_DEFER) goto clk_disable; - ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL); + ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL); if (ret == -EPROBE_DEFER) goto clk_disable; } diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h index 24229097d05c..14df81054438 100644 --- a/drivers/mmc/host/mmci.h +++ b/drivers/mmc/host/mmci.h @@ -377,6 +377,7 @@ struct mmci_host { void __iomem *base; struct mmc_request *mrq; struct mmc_command *cmd; + struct mmc_command stop_abort; struct mmc_data *data; struct mmc_host *mmc; struct clk *clk; diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index 8afeaf81ae66..833ef0590af8 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -846,7 +846,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz) if (timing == MMC_TIMING_MMC_HS400 && host->dev_comp->hs400_tune) - sdr_set_field(host->base + PAD_CMD_TUNE, + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRRDLY, host->hs400_cmd_int_delay); dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->mmc->actual_clock, diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index 4d17032d15ee..d54612257b06 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c @@ -31,14 +31,12 @@ #include #include #include -#include #include #include #include #include #include #include -#include #include #include diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c index add1e70195ea..4f06fb03c0a2 100644 --- a/drivers/mmc/host/mxs-mmc.c +++ b/drivers/mmc/host/mxs-mmc.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include #include @@ -39,7 +38,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c index b294b221f225..8a274b91804e 100644 --- a/drivers/mmc/host/of_mmc_spi.c +++ b/drivers/mmc/host/of_mmc_spi.c @@ -61,9 +61,6 @@ struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi) struct device *dev = &spi->dev; struct device_node *np = dev->of_node; struct of_mmc_spi *oms; - const __be32 *voltage_ranges; - int num_ranges; - int i; if (dev->platform_data || !np) return dev->platform_data; @@ -72,25 +69,8 @@ struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi) if (!oms) return NULL; - voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges); - num_ranges = num_ranges / sizeof(*voltage_ranges) / 2; - if (!voltage_ranges || !num_ranges) { - dev_err(dev, "OF: voltage-ranges unspecified\n"); + if (mmc_of_parse_voltage(np, &oms->pdata.ocr_mask) <= 0) goto err_ocr; - } - - for (i = 0; i < num_ranges; i++) { - const int j = i * 2; - u32 mask; - - mask = mmc_vddrange_to_ocrmask(be32_to_cpu(voltage_ranges[j]), - be32_to_cpu(voltage_ranges[j + 1])); - if (!mask) { - dev_err(dev, "OF: voltage-range #%d is invalid\n", i); - goto err_ocr; - } - oms->pdata.ocr_mask |= mask; - } oms->detect_irq = irq_of_parse_and_map(np, 0); if (oms->detect_irq != 0) { diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c index c60a7625b1fa..b2873a2432b6 100644 --- a/drivers/mmc/host/omap.c +++ b/drivers/mmc/host/omap.c @@ -920,7 +920,7 @@ static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_reques reg &= ~(1 << 5); OMAP_MMC_WRITE(host, SDIO, reg); /* Set maximum timeout */ - OMAP_MMC_WRITE(host, CTO, 0xff); + OMAP_MMC_WRITE(host, CTO, 0xfd); } static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_request *req) diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index 8779bbaa6b69..c907bf502a12 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c @@ -743,7 +743,7 @@ static int pxamci_probe(struct platform_device *pdev) goto out; } - ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL); + ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL); if (ret && ret != -ENOENT) { dev_err(dev, "Failed requesting gpio_ro\n"); goto out; diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h index da1e49c45bec..8394a7bb1fc1 100644 --- a/drivers/mmc/host/renesas_sdhi.h +++ b/drivers/mmc/host/renesas_sdhi.h @@ -15,6 +15,7 @@ struct renesas_sdhi_scc { unsigned long clk_rate; /* clock rate for SDR104 */ u32 tap; /* sampling clock position for SDR104 */ + u32 tap_hs400; /* sampling clock position for HS400 */ }; struct renesas_sdhi_of_data { @@ -49,6 +50,7 @@ struct renesas_sdhi { struct pinctrl_state *pins_default, *pins_uhs; void __iomem *scc_ctl; u32 scc_tappos; + u32 scc_tappos_hs400; }; #define host_to_priv(host) \ diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index 31a351a20dc0..71e13844df6c 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -337,6 +337,10 @@ static void renesas_sdhi_hs400_complete(struct tmio_mmc_host *host) /* Set HS400 mode */ sd_ctrl_write16(host, CTL_SDIF_MODE, 0x0001 | sd_ctrl_read16(host, CTL_SDIF_MODE)); + + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DT2FF, + priv->scc_tappos_hs400); + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2, (SH_MOBILE_SDHI_SCC_TMPPORT2_HS400EN | SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL) | @@ -396,6 +400,9 @@ static void renesas_sdhi_reset_hs400_mode(struct tmio_mmc_host *host, /* Reset HS400 mode */ sd_ctrl_write16(host, CTL_SDIF_MODE, ~0x0001 & sd_ctrl_read16(host, CTL_SDIF_MODE)); + + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DT2FF, priv->scc_tappos); + sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2, ~(SH_MOBILE_SDHI_SCC_TMPPORT2_HS400EN | SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL) & @@ -723,6 +730,13 @@ int renesas_sdhi_probe(struct platform_device *pdev, host->ops.start_signal_voltage_switch = renesas_sdhi_start_signal_voltage_switch; host->sdcard_irq_setbit_mask = TMIO_STAT_ALWAYS_SET_27; + + /* SDR and HS200/400 registers requires HW reset */ + if (of_data && of_data->scc_offset) { + priv->scc_ctl = host->ctl + of_data->scc_offset; + host->mmc->caps |= MMC_CAP_HW_RESET; + host->hw_reset = renesas_sdhi_hw_reset; + } } /* Orginally registers were 16 bit apart, could be 32 or 64 nowadays */ @@ -775,12 +789,11 @@ int renesas_sdhi_probe(struct platform_device *pdev, const struct renesas_sdhi_scc *taps = of_data->taps; bool hit = false; - host->mmc->caps |= MMC_CAP_HW_RESET; - for (i = 0; i < of_data->taps_num; i++) { if (taps[i].clk_rate == 0 || taps[i].clk_rate == host->mmc->f_max) { priv->scc_tappos = taps->tap; + priv->scc_tappos_hs400 = taps->tap_hs400; hit = true; break; } @@ -789,12 +802,10 @@ int renesas_sdhi_probe(struct platform_device *pdev, if (!hit) dev_warn(&host->pdev->dev, "Unknown clock rate for SDR104\n"); - priv->scc_ctl = host->ctl + of_data->scc_offset; host->init_tuning = renesas_sdhi_init_tuning; host->prepare_tuning = renesas_sdhi_prepare_tuning; host->select_tuning = renesas_sdhi_select_tuning; host->check_scc_error = renesas_sdhi_check_scc_error; - host->hw_reset = renesas_sdhi_hw_reset; host->prepare_hs400_tuning = renesas_sdhi_prepare_hs400_tuning; host->hs400_downgrade = renesas_sdhi_disable_scc; diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c index 92c9b15252da..9dfafa2a90a3 100644 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c @@ -81,6 +81,7 @@ static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = { { .clk_rate = 0, .tap = 0x00000300, + .tap_hs400 = 0x00000704, }, }; diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c index 8471160316e0..02cd878e209f 100644 --- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c @@ -65,6 +65,7 @@ static const struct renesas_sdhi_of_data of_rcar_gen2_compatible = { .scc_offset = 0x0300, .taps = rcar_gen2_scc_taps, .taps_num = ARRAY_SIZE(rcar_gen2_scc_taps), + .max_blk_count = 0xffffffff, }; /* Definitions for sampling clocks */ diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c index 10f5219b3b40..f31333e831a7 100644 --- a/drivers/mmc/host/s3cmci.c +++ b/drivers/mmc/host/s3cmci.c @@ -1530,7 +1530,7 @@ static int s3cmci_probe_pdata(struct s3cmci_host *host) return ret; } - ret = mmc_gpiod_request_ro(host->mmc, "wp", 0, false, 0, NULL); + ret = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0, NULL); if (ret != -ENOENT) { dev_err(&pdev->dev, "error requesting GPIO for WP %d\n", ret); diff --git a/drivers/mmc/host/sdhci-bcm-kona.c b/drivers/mmc/host/sdhci-bcm-kona.c index bdbd4897c0f7..a6c2bd202b45 100644 --- a/drivers/mmc/host/sdhci-bcm-kona.c +++ b/drivers/mmc/host/sdhci-bcm-kona.c @@ -18,12 +18,10 @@ #include #include #include -#include #include #include #include #include -#include #include #include "sdhci-pltfm.h" diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c index 552bddc5096c..1cd10356fc14 100644 --- a/drivers/mmc/host/sdhci-brcmstb.c +++ b/drivers/mmc/host/sdhci-brcmstb.c @@ -55,7 +55,9 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev) } sdhci_get_of_property(pdev); - mmc_of_parse(host->mmc); + res = mmc_of_parse(host->mmc); + if (res) + goto err; /* * Supply the existing CAPS, but clear the UHS modes. This diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index d0d319398a54..8dbbc1f62b70 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -25,6 +25,7 @@ #include #include "sdhci-pltfm.h" #include "sdhci-esdhc.h" +#include "cqhci.h" #define ESDHC_SYS_CTRL_DTOCV_MASK 0x0f #define ESDHC_CTRL_D3CD 0x08 @@ -50,6 +51,7 @@ #define ESDHC_MIX_CTRL_AUTO_TUNE_EN (1 << 24) #define ESDHC_MIX_CTRL_FBCLK_SEL (1 << 25) #define ESDHC_MIX_CTRL_HS400_EN (1 << 26) +#define ESDHC_MIX_CTRL_HS400_ES_EN (1 << 27) /* Bits 3 and 6 are not SDHCI standard definitions */ #define ESDHC_MIX_CTRL_SDHCI_MASK 0xb7 /* Tuning bits */ @@ -76,6 +78,9 @@ #define ESDHC_STROBE_DLL_STS_REF_LOCK (1 << 1) #define ESDHC_STROBE_DLL_STS_SLV_LOCK 0x1 +#define ESDHC_VEND_SPEC2 0xc8 +#define ESDHC_VEND_SPEC2_EN_BUSY_IRQ (1 << 8) + #define ESDHC_TUNING_CTRL 0xcc #define ESDHC_STD_TUNING_EN (1 << 24) /* NOTE: the minimum valid tuning start tap for mx6sl is 1 */ @@ -103,6 +108,9 @@ */ #define ESDHC_INT_VENDOR_SPEC_DMA_ERR (1 << 28) +/* the address offset of CQHCI */ +#define ESDHC_CQHCI_ADDR_OFFSET 0x100 + /* * The CMDTYPE of the CMD register (offset 0xE) should be set to * "11" when the STOP CMD12 is issued on imx53 to abort one @@ -138,51 +146,71 @@ #define ESDHC_FLAG_HS200 BIT(8) /* The IP supports HS400 mode */ #define ESDHC_FLAG_HS400 BIT(9) - -/* A clock frequency higher than this rate requires strobe dll control */ -#define ESDHC_STROBE_DLL_CLK_FREQ 100000000 +/* + * The IP has errata ERR010450 + * uSDHC: Due to the I/O timing limit, for SDR mode, SD card clock can't + * exceed 150MHz, for DDR mode, SD card clock can't exceed 45MHz. + */ +#define ESDHC_FLAG_ERR010450 BIT(10) +/* The IP supports HS400ES mode */ +#define ESDHC_FLAG_HS400_ES BIT(11) +/* The IP has Host Controller Interface for Command Queuing */ +#define ESDHC_FLAG_CQHCI BIT(12) struct esdhc_soc_data { u32 flags; }; -static struct esdhc_soc_data esdhc_imx25_data = { +static const struct esdhc_soc_data esdhc_imx25_data = { .flags = ESDHC_FLAG_ERR004536, }; -static struct esdhc_soc_data esdhc_imx35_data = { +static const struct esdhc_soc_data esdhc_imx35_data = { .flags = ESDHC_FLAG_ERR004536, }; -static struct esdhc_soc_data esdhc_imx51_data = { +static const struct esdhc_soc_data esdhc_imx51_data = { .flags = 0, }; -static struct esdhc_soc_data esdhc_imx53_data = { +static const struct esdhc_soc_data esdhc_imx53_data = { .flags = ESDHC_FLAG_MULTIBLK_NO_INT, }; -static struct esdhc_soc_data usdhc_imx6q_data = { +static const struct esdhc_soc_data usdhc_imx6q_data = { .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING, }; -static struct esdhc_soc_data usdhc_imx6sl_data = { +static const struct esdhc_soc_data usdhc_imx6sl_data = { .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_ERR004536 | ESDHC_FLAG_HS200, }; -static struct esdhc_soc_data usdhc_imx6sx_data = { +static const struct esdhc_soc_data usdhc_imx6sx_data = { .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200, }; -static struct esdhc_soc_data usdhc_imx7d_data = { +static const struct esdhc_soc_data usdhc_imx6ull_data = { + .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING + | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200 + | ESDHC_FLAG_ERR010450, +}; + +static const struct esdhc_soc_data usdhc_imx7d_data = { .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200 | ESDHC_FLAG_HS400, }; +static struct esdhc_soc_data usdhc_imx8qxp_data = { + .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING + | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200 + | ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES + | ESDHC_FLAG_CQHCI, +}; + struct pltfm_imx_data { u32 scratchpad; struct pinctrl *pinctrl; @@ -227,7 +255,9 @@ static const struct of_device_id imx_esdhc_dt_ids[] = { { .compatible = "fsl,imx6sx-usdhc", .data = &usdhc_imx6sx_data, }, { .compatible = "fsl,imx6sl-usdhc", .data = &usdhc_imx6sl_data, }, { .compatible = "fsl,imx6q-usdhc", .data = &usdhc_imx6q_data, }, + { .compatible = "fsl,imx6ull-usdhc", .data = &usdhc_imx6ull_data, }, { .compatible = "fsl,imx7d-usdhc", .data = &usdhc_imx7d_data, }, + { .compatible = "fsl,imx8qxp-usdhc", .data = &usdhc_imx8qxp_data, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, imx_esdhc_dt_ids); @@ -733,6 +763,14 @@ static inline void esdhc_pltfm_set_clock(struct sdhci_host *host, | ESDHC_CLOCK_MASK); sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); + if (imx_data->socdata->flags & ESDHC_FLAG_ERR010450) { + unsigned int max_clock; + + max_clock = imx_data->is_ddr ? 45000000 : 150000000; + + clock = min(clock, max_clock); + } + while (host_clock / (16 * pre_div * ddr_pre_div) > clock && pre_div < 256) pre_div *= 2; @@ -801,6 +839,20 @@ static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width) SDHCI_HOST_CONTROL); } +static int usdhc_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + struct sdhci_host *host = mmc_priv(mmc); + + /* + * i.MX uSDHC internally already uses a fixed optimized timing for + * DDR50, normally does not require tuning for DDR50 mode. + */ + if (host->timing == MMC_TIMING_UHS_DDR50) + return 0; + + return sdhci_execute_tuning(mmc, opcode); +} + static void esdhc_prepare_tuning(struct sdhci_host *host, u32 val) { u32 reg; @@ -864,6 +916,19 @@ static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode) return ret; } +static void esdhc_hs400_enhanced_strobe(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct sdhci_host *host = mmc_priv(mmc); + u32 m; + + m = readl(host->ioaddr + ESDHC_MIX_CTRL); + if (ios->enhanced_strobe) + m |= ESDHC_MIX_CTRL_HS400_ES_EN; + else + m &= ~ESDHC_MIX_CTRL_HS400_ES_EN; + writel(m, host->ioaddr + ESDHC_MIX_CTRL); +} + static int esdhc_change_pinstate(struct sdhci_host *host, unsigned int uhs) { @@ -905,39 +970,35 @@ static int esdhc_change_pinstate(struct sdhci_host *host, * edge of data_strobe line. Due to the time delay between CLK line and * data_strobe line, if the delay time is larger than one clock cycle, * then CLK and data_strobe line will be misaligned, read error shows up. - * So when the CLK is higher than 100MHz, each clock cycle is short enough, - * host should configure the delay target. */ static void esdhc_set_strobe_dll(struct sdhci_host *host) { u32 v; - if (host->mmc->actual_clock > ESDHC_STROBE_DLL_CLK_FREQ) { - /* disable clock before enabling strobe dll */ - writel(readl(host->ioaddr + ESDHC_VENDOR_SPEC) & - ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON, - host->ioaddr + ESDHC_VENDOR_SPEC); + /* disable clock before enabling strobe dll */ + writel(readl(host->ioaddr + ESDHC_VENDOR_SPEC) & + ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON, + host->ioaddr + ESDHC_VENDOR_SPEC); - /* force a reset on strobe dll */ - writel(ESDHC_STROBE_DLL_CTRL_RESET, - host->ioaddr + ESDHC_STROBE_DLL_CTRL); - /* - * enable strobe dll ctrl and adjust the delay target - * for the uSDHC loopback read clock - */ - v = ESDHC_STROBE_DLL_CTRL_ENABLE | - (7 << ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_SHIFT); - writel(v, host->ioaddr + ESDHC_STROBE_DLL_CTRL); - /* wait 1us to make sure strobe dll status register stable */ - udelay(1); - v = readl(host->ioaddr + ESDHC_STROBE_DLL_STATUS); - if (!(v & ESDHC_STROBE_DLL_STS_REF_LOCK)) - dev_warn(mmc_dev(host->mmc), - "warning! HS400 strobe DLL status REF not lock!\n"); - if (!(v & ESDHC_STROBE_DLL_STS_SLV_LOCK)) - dev_warn(mmc_dev(host->mmc), - "warning! HS400 strobe DLL status SLV not lock!\n"); - } + /* force a reset on strobe dll */ + writel(ESDHC_STROBE_DLL_CTRL_RESET, + host->ioaddr + ESDHC_STROBE_DLL_CTRL); + /* + * enable strobe dll ctrl and adjust the delay target + * for the uSDHC loopback read clock + */ + v = ESDHC_STROBE_DLL_CTRL_ENABLE | + (7 << ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_SHIFT); + writel(v, host->ioaddr + ESDHC_STROBE_DLL_CTRL); + /* wait 1us to make sure strobe dll status register stable */ + udelay(1); + v = readl(host->ioaddr + ESDHC_STROBE_DLL_STATUS); + if (!(v & ESDHC_STROBE_DLL_STS_REF_LOCK)) + dev_warn(mmc_dev(host->mmc), + "warning! HS400 strobe DLL status REF not lock!\n"); + if (!(v & ESDHC_STROBE_DLL_STS_SLV_LOCK)) + dev_warn(mmc_dev(host->mmc), + "warning! HS400 strobe DLL status SLV not lock!\n"); } static void esdhc_reset_tuning(struct sdhci_host *host) @@ -979,6 +1040,7 @@ static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing) case MMC_TIMING_UHS_SDR25: case MMC_TIMING_UHS_SDR50: case MMC_TIMING_UHS_SDR104: + case MMC_TIMING_MMC_HS: case MMC_TIMING_MMC_HS200: writel(m, host->ioaddr + ESDHC_MIX_CTRL); break; @@ -1042,6 +1104,19 @@ static void esdhc_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) SDHCI_TIMEOUT_CONTROL); } +static u32 esdhc_cqhci_irq(struct sdhci_host *host, u32 intmask) +{ + int cmd_error = 0; + int data_error = 0; + + if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) + return intmask; + + cqhci_irq(host->mmc, intmask, cmd_error, data_error); + + return 0; +} + static struct sdhci_ops sdhci_esdhc_ops = { .read_l = esdhc_readl_le, .read_w = esdhc_readw_le, @@ -1058,6 +1133,7 @@ static struct sdhci_ops sdhci_esdhc_ops = { .set_bus_width = esdhc_pltfm_set_bus_width, .set_uhs_signaling = esdhc_set_uhs_signaling, .reset = esdhc_reset, + .irq = esdhc_cqhci_irq, }; static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { @@ -1095,16 +1171,34 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host) writel(readl(host->ioaddr + SDHCI_HOST_CONTROL) | ESDHC_BURST_LEN_EN_INCR, host->ioaddr + SDHCI_HOST_CONTROL); + /* - * erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL - * TO1.1, it's harmless for MX6SL - */ - writel(readl(host->ioaddr + 0x6c) | BIT(7), + * erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL + * TO1.1, it's harmless for MX6SL + */ + writel(readl(host->ioaddr + 0x6c) & ~BIT(7), host->ioaddr + 0x6c); /* disable DLL_CTRL delay line settings */ writel(0x0, host->ioaddr + ESDHC_DLL_CTRL); + /* + * For the case of command with busy, if set the bit + * ESDHC_VEND_SPEC2_EN_BUSY_IRQ, USDHC will generate a + * transfer complete interrupt when busy is deasserted. + * When CQHCI use DCMD to send a CMD need R1b respons, + * CQHCI require to set ESDHC_VEND_SPEC2_EN_BUSY_IRQ, + * otherwise DCMD will always meet timeout waiting for + * hardware interrupt issue. + */ + if (imx_data->socdata->flags & ESDHC_FLAG_CQHCI) { + tmp = readl(host->ioaddr + ESDHC_VEND_SPEC2); + tmp |= ESDHC_VEND_SPEC2_EN_BUSY_IRQ; + writel(tmp, host->ioaddr + ESDHC_VEND_SPEC2); + + host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ; + } + if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) { tmp = readl(host->ioaddr + ESDHC_TUNING_CTRL); tmp |= ESDHC_STD_TUNING_EN | @@ -1120,10 +1214,81 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host) << ESDHC_TUNING_STEP_SHIFT; } writel(tmp, host->ioaddr + ESDHC_TUNING_CTRL); + } else if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) { + /* + * ESDHC_STD_TUNING_EN may be configed in bootloader + * or ROM code, so clear this bit here to make sure + * the manual tuning can work. + */ + tmp = readl(host->ioaddr + ESDHC_TUNING_CTRL); + tmp &= ~ESDHC_STD_TUNING_EN; + writel(tmp, host->ioaddr + ESDHC_TUNING_CTRL); } } } +static void esdhc_cqe_enable(struct mmc_host *mmc) +{ + struct sdhci_host *host = mmc_priv(mmc); + struct cqhci_host *cq_host = mmc->cqe_private; + u32 reg; + u16 mode; + int count = 10; + + /* + * CQE gets stuck if it sees Buffer Read Enable bit set, which can be + * the case after tuning, so ensure the buffer is drained. + */ + reg = sdhci_readl(host, SDHCI_PRESENT_STATE); + while (reg & SDHCI_DATA_AVAILABLE) { + sdhci_readl(host, SDHCI_BUFFER); + reg = sdhci_readl(host, SDHCI_PRESENT_STATE); + if (count-- == 0) { + dev_warn(mmc_dev(host->mmc), + "CQE may get stuck because the Buffer Read Enable bit is set\n"); + break; + } + mdelay(1); + } + + /* + * Runtime resume will reset the entire host controller, which + * will also clear the DMAEN/BCEN of register ESDHC_MIX_CTRL. + * Here set DMAEN and BCEN when enable CMDQ. + */ + mode = sdhci_readw(host, SDHCI_TRANSFER_MODE); + if (host->flags & SDHCI_REQ_USE_DMA) + mode |= SDHCI_TRNS_DMA; + if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE)) + mode |= SDHCI_TRNS_BLK_CNT_EN; + sdhci_writew(host, mode, SDHCI_TRANSFER_MODE); + + /* + * Though Runtime resume reset the entire host controller, + * but do not impact the CQHCI side, need to clear the + * HALT bit, avoid CQHCI stuck in the first request when + * system resume back. + */ + cqhci_writel(cq_host, 0, CQHCI_CTL); + if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) + dev_err(mmc_dev(host->mmc), + "failed to exit halt state when enable CQE\n"); + + + sdhci_cqe_enable(mmc); +} + +static void esdhc_sdhci_dumpregs(struct mmc_host *mmc) +{ + sdhci_dumpregs(mmc_priv(mmc)); +} + +static const struct cqhci_host_ops esdhc_cqhci_ops = { + .enable = esdhc_cqe_enable, + .disable = sdhci_cqe_disable, + .dumpregs = esdhc_sdhci_dumpregs, +}; + #ifdef CONFIG_OF static int sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, @@ -1200,7 +1365,7 @@ static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev, host->mmc->parent->platform_data); /* write_protect */ if (boarddata->wp_type == ESDHC_WP_GPIO) { - err = mmc_gpiod_request_ro(host->mmc, "wp", 0, false, 0, NULL); + err = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0, NULL); if (err) { dev_err(mmc_dev(host->mmc), "failed to request write-protect gpio!\n"); @@ -1255,6 +1420,7 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) of_match_device(imx_esdhc_dt_ids, &pdev->dev); struct sdhci_pltfm_host *pltfm_host; struct sdhci_host *host; + struct cqhci_host *cq_host; int err; struct pltfm_imx_data *imx_data; @@ -1321,6 +1487,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) writel(0x0, host->ioaddr + ESDHC_MIX_CTRL); writel(0x0, host->ioaddr + SDHCI_AUTO_CMD_STATUS); writel(0x0, host->ioaddr + ESDHC_TUNE_CTRL_STATUS); + + /* + * Link usdhc specific mmc_host_ops execute_tuning function, + * to replace the standard one in sdhci_ops. + */ + host->mmc_host_ops.execute_tuning = usdhc_execute_tuning; } if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) @@ -1333,6 +1505,28 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) if (imx_data->socdata->flags & ESDHC_FLAG_HS400) host->quirks2 |= SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400; + if (imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) { + host->mmc->caps2 |= MMC_CAP2_HS400_ES; + host->mmc_host_ops.hs400_enhanced_strobe = + esdhc_hs400_enhanced_strobe; + } + + if (imx_data->socdata->flags & ESDHC_FLAG_CQHCI) { + host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; + cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL); + if (!cq_host) { + err = -ENOMEM; + goto disable_ahb_clk; + } + + cq_host->mmio = host->ioaddr + ESDHC_CQHCI_ADDR_OFFSET; + cq_host->ops = &esdhc_cqhci_ops; + + err = cqhci_init(cq_host, host->mmc, false); + if (err) + goto disable_ahb_clk; + } + if (of_id) err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data); else @@ -1340,6 +1534,8 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) if (err) goto disable_ahb_clk; + host->tuning_delay = 1; + sdhci_esdhc_imx_hwinit(host); err = sdhci_add_host(host); @@ -1391,6 +1587,13 @@ static int sdhci_esdhc_imx_remove(struct platform_device *pdev) static int sdhci_esdhc_suspend(struct device *dev) { struct sdhci_host *host = dev_get_drvdata(dev); + int ret; + + if (host->mmc->caps2 & MMC_CAP2_CQE) { + ret = cqhci_suspend(host->mmc); + if (ret) + return ret; + } if (host->tuning_mode != SDHCI_TUNING_MODE_3) mmc_retune_needed(host->mmc); @@ -1401,11 +1604,19 @@ static int sdhci_esdhc_suspend(struct device *dev) static int sdhci_esdhc_resume(struct device *dev) { struct sdhci_host *host = dev_get_drvdata(dev); + int ret; /* re-initialize hw state in case it's lost in low power mode */ sdhci_esdhc_imx_hwinit(host); - return sdhci_resume_host(host); + ret = sdhci_resume_host(host); + if (ret) + return ret; + + if (host->mmc->caps2 & MMC_CAP2_CQE) + ret = cqhci_resume(host->mmc); + + return ret; } #endif @@ -1417,6 +1628,12 @@ static int sdhci_esdhc_runtime_suspend(struct device *dev) struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host); int ret; + if (host->mmc->caps2 & MMC_CAP2_CQE) { + ret = cqhci_suspend(host->mmc); + if (ret) + return ret; + } + ret = sdhci_runtime_suspend_host(host); if (ret) return ret; @@ -1460,7 +1677,10 @@ static int sdhci_esdhc_runtime_resume(struct device *dev) if (err) goto disable_ipg_clk; - return 0; + if (host->mmc->caps2 & MMC_CAP2_CQE) + err = cqhci_resume(host->mmc); + + return err; disable_ipg_clk: if (!sdhci_sdio_irq_enabled(host)) diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c index c11c18a9aacb..b1a66ca3821a 100644 --- a/drivers/mmc/host/sdhci-omap.c +++ b/drivers/mmc/host/sdhci-omap.c @@ -1097,7 +1097,6 @@ static int sdhci_omap_probe(struct platform_device *pdev) goto err_put_sync; } - host->mmc_host_ops.get_ro = mmc_gpio_get_ro; host->mmc_host_ops.start_signal_voltage_switch = sdhci_omap_start_signal_voltage_switch; host->mmc_host_ops.set_ios = sdhci_omap_set_ios; diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 2a6eba74b94e..99b0fec2836b 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -1257,16 +1257,6 @@ static int jmicron_resume(struct sdhci_pci_chip *chip) } #endif -static const struct sdhci_pci_fixes sdhci_o2 = { - .probe = sdhci_pci_o2_probe, - .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, - .quirks2 = SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD, - .probe_slot = sdhci_pci_o2_probe_slot, -#ifdef CONFIG_PM_SLEEP - .resume = sdhci_pci_o2_resume, -#endif -}; - static const struct sdhci_pci_fixes sdhci_jmicron = { .probe = jmicron_probe, diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c index cc3ffeffd7a2..05a012a694b2 100644 --- a/drivers/mmc/host/sdhci-pci-o2micro.c +++ b/drivers/mmc/host/sdhci-pci-o2micro.c @@ -60,6 +60,13 @@ #define O2_SD_VENDOR_SETTING2 0x1C8 #define O2_SD_HW_TUNING_DISABLE BIT(4) +#define O2_PLL_WDT_CONTROL1 0x1CC +#define O2_PLL_FORCE_ACTIVE BIT(18) +#define O2_PLL_LOCK_STATUS BIT(14) +#define O2_PLL_SOFT_RESET BIT(12) + +#define O2_SD_DETECT_SETTING 0x324 + static void sdhci_o2_set_tuning_mode(struct sdhci_host *host) { u16 reg; @@ -283,6 +290,113 @@ static void sdhci_pci_o2_enable_msi(struct sdhci_pci_chip *chip, host->irq = pci_irq_vector(chip->pdev, 0); } +static void sdhci_o2_wait_card_detect_stable(struct sdhci_host *host) +{ + ktime_t timeout; + u32 scratch32; + + /* Wait max 50 ms */ + timeout = ktime_add_ms(ktime_get(), 50); + while (1) { + bool timedout = ktime_after(ktime_get(), timeout); + + scratch32 = sdhci_readl(host, SDHCI_PRESENT_STATE); + if ((scratch32 & SDHCI_CARD_PRESENT) >> SDHCI_CARD_PRES_SHIFT + == (scratch32 & SDHCI_CD_LVL) >> SDHCI_CD_LVL_SHIFT) + break; + + if (timedout) { + pr_err("%s: Card Detect debounce never finished.\n", + mmc_hostname(host->mmc)); + sdhci_dumpregs(host); + return; + } + udelay(10); + } +} + +static void sdhci_o2_enable_internal_clock(struct sdhci_host *host) +{ + ktime_t timeout; + u16 scratch; + u32 scratch32; + + /* PLL software reset */ + scratch32 = sdhci_readl(host, O2_PLL_WDT_CONTROL1); + scratch32 |= O2_PLL_SOFT_RESET; + sdhci_writel(host, scratch32, O2_PLL_WDT_CONTROL1); + udelay(1); + scratch32 &= ~(O2_PLL_SOFT_RESET); + sdhci_writel(host, scratch32, O2_PLL_WDT_CONTROL1); + + /* PLL force active */ + scratch32 |= O2_PLL_FORCE_ACTIVE; + sdhci_writel(host, scratch32, O2_PLL_WDT_CONTROL1); + + /* Wait max 20 ms */ + timeout = ktime_add_ms(ktime_get(), 20); + while (1) { + bool timedout = ktime_after(ktime_get(), timeout); + + scratch = sdhci_readw(host, O2_PLL_WDT_CONTROL1); + if (scratch & O2_PLL_LOCK_STATUS) + break; + if (timedout) { + pr_err("%s: Internal clock never stabilised.\n", + mmc_hostname(host->mmc)); + sdhci_dumpregs(host); + goto out; + } + udelay(10); + } + + /* Wait for card detect finish */ + udelay(1); + sdhci_o2_wait_card_detect_stable(host); + +out: + /* Cancel PLL force active */ + scratch32 = sdhci_readl(host, O2_PLL_WDT_CONTROL1); + scratch32 &= ~O2_PLL_FORCE_ACTIVE; + sdhci_writel(host, scratch32, O2_PLL_WDT_CONTROL1); +} + +static int sdhci_o2_get_cd(struct mmc_host *mmc) +{ + struct sdhci_host *host = mmc_priv(mmc); + + sdhci_o2_enable_internal_clock(host); + + return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); +} + +static void sdhci_o2_enable_clk(struct sdhci_host *host, u16 clk) +{ + /* Enable internal clock */ + clk |= SDHCI_CLOCK_INT_EN; + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + + if (sdhci_o2_get_cd(host->mmc)) { + clk |= SDHCI_CLOCK_CARD_EN; + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + } +} + +void sdhci_pci_o2_set_clock(struct sdhci_host *host, unsigned int clock) +{ + u16 clk; + + host->mmc->actual_clock = 0; + + sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); + + if (clock == 0) + return; + + clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock); + sdhci_o2_enable_clk(host, clk); +} + int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot) { struct sdhci_pci_chip *chip; @@ -314,9 +428,14 @@ int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot) mmc_hostname(host->mmc)); host->flags &= ~SDHCI_SIGNALING_330; host->flags |= SDHCI_SIGNALING_180; + host->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD; host->mmc->caps2 |= MMC_CAP2_NO_SD; host->mmc->caps2 |= MMC_CAP2_NO_SDIO; + pci_write_config_dword(chip->pdev, + O2_SD_DETECT_SETTING, 3); } + + slot->host->mmc_host_ops.get_cd = sdhci_o2_get_cd; } host->mmc_host_ops.execute_tuning = sdhci_o2_execute_tuning; @@ -490,9 +609,6 @@ int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip) pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); break; case PCI_DEVICE_ID_O2_SEABIRD0: - if (chip->pdev->revision == 0x01) - chip->quirks |= SDHCI_QUIRK_DELAY_AFTER_POWER; - /* fall through */ case PCI_DEVICE_ID_O2_SEABIRD1: /* UnLock WP */ ret = pci_read_config_byte(chip->pdev, @@ -550,3 +666,21 @@ int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip) return sdhci_pci_resume_host(chip); } #endif + +static const struct sdhci_ops sdhci_pci_o2_ops = { + .set_clock = sdhci_pci_o2_set_clock, + .enable_dma = sdhci_pci_enable_dma, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +const struct sdhci_pci_fixes sdhci_o2 = { + .probe = sdhci_pci_o2_probe, + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, + .probe_slot = sdhci_pci_o2_probe_slot, +#ifdef CONFIG_PM_SLEEP + .resume = sdhci_pci_o2_resume, +#endif + .ops = &sdhci_pci_o2_ops, +}; diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h index 2ef0bdca9197..4ddb69a15cd7 100644 --- a/drivers/mmc/host/sdhci-pci.h +++ b/drivers/mmc/host/sdhci-pci.h @@ -179,13 +179,9 @@ static inline void *sdhci_pci_priv(struct sdhci_pci_slot *slot) int sdhci_pci_resume_host(struct sdhci_pci_chip *chip); #endif int sdhci_pci_enable_dma(struct sdhci_host *host); -int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot); -int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip); -#ifdef CONFIG_PM_SLEEP -int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip); -#endif extern const struct sdhci_pci_fixes sdhci_arasan; extern const struct sdhci_pci_fixes sdhci_snps; +extern const struct sdhci_pci_fixes sdhci_o2; #endif /* __SDHCI_PCI_H */ diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c index 2c3827f54927..cdc8e16b4567 100644 --- a/drivers/mmc/host/sdhci-pxav2.c +++ b/drivers/mmc/host/sdhci-pxav2.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c index e6ace31e2a41..32e62904c0d3 100644 --- a/drivers/mmc/host/sdhci-tegra.c +++ b/drivers/mmc/host/sdhci-tegra.c @@ -33,6 +33,7 @@ #include #include "sdhci-pltfm.h" +#include "cqhci.h" /* Tegra SDHOST controller vendor register definitions */ #define SDHCI_TEGRA_VENDOR_CLOCK_CTRL 0x100 @@ -75,6 +76,7 @@ #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK 0x0000000f #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL 0x7 #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD BIT(31) +#define SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK 0x07FFF000 #define SDHCI_TEGRA_AUTO_CAL_STATUS 0x1ec #define SDHCI_TEGRA_AUTO_CAL_ACTIVE BIT(31) @@ -89,6 +91,9 @@ #define NVQUIRK_NEEDS_PAD_CONTROL BIT(7) #define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP BIT(8) +/* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */ +#define SDHCI_TEGRA_CQE_BASE_ADDR 0xF000 + struct sdhci_tegra_soc_data { const struct sdhci_pltfm_data *pdata; u32 nvquirks; @@ -121,6 +126,8 @@ struct sdhci_tegra { struct pinctrl *pinctrl_sdmmc; struct pinctrl_state *pinctrl_state_3v3; struct pinctrl_state *pinctrl_state_1v8; + struct pinctrl_state *pinctrl_state_3v3_drv; + struct pinctrl_state *pinctrl_state_1v8_drv; struct sdhci_tegra_autocal_offsets autocal_offsets; ktime_t last_calib; @@ -128,6 +135,7 @@ struct sdhci_tegra { u32 default_tap; u32 default_trim; u32 dqs_trim; + bool enable_hwcq; }; static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg) @@ -237,11 +245,6 @@ static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg) } } -static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host) -{ - return mmc_gpio_get_ro(host->mmc); -} - static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); @@ -411,6 +414,76 @@ static void tegra_sdhci_set_pad_autocal_offset(struct sdhci_host *host, sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG); } +static int tegra_sdhci_set_padctrl(struct sdhci_host *host, int voltage, + bool state_drvupdn) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); + struct sdhci_tegra_autocal_offsets *offsets = + &tegra_host->autocal_offsets; + struct pinctrl_state *pinctrl_drvupdn = NULL; + int ret = 0; + u8 drvup = 0, drvdn = 0; + u32 reg; + + if (!state_drvupdn) { + /* PADS Drive Strength */ + if (voltage == MMC_SIGNAL_VOLTAGE_180) { + if (tegra_host->pinctrl_state_1v8_drv) { + pinctrl_drvupdn = + tegra_host->pinctrl_state_1v8_drv; + } else { + drvup = offsets->pull_up_1v8_timeout; + drvdn = offsets->pull_down_1v8_timeout; + } + } else { + if (tegra_host->pinctrl_state_3v3_drv) { + pinctrl_drvupdn = + tegra_host->pinctrl_state_3v3_drv; + } else { + drvup = offsets->pull_up_3v3_timeout; + drvdn = offsets->pull_down_3v3_timeout; + } + } + + if (pinctrl_drvupdn != NULL) { + ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc, + pinctrl_drvupdn); + if (ret < 0) + dev_err(mmc_dev(host->mmc), + "failed pads drvupdn, ret: %d\n", ret); + } else if ((drvup) || (drvdn)) { + reg = sdhci_readl(host, + SDHCI_TEGRA_SDMEM_COMP_PADCTRL); + reg &= ~SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK; + reg |= (drvup << 20) | (drvdn << 12); + sdhci_writel(host, reg, + SDHCI_TEGRA_SDMEM_COMP_PADCTRL); + } + + } else { + /* Dual Voltage PADS Voltage selection */ + if (!tegra_host->pad_control_available) + return 0; + + if (voltage == MMC_SIGNAL_VOLTAGE_180) { + ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc, + tegra_host->pinctrl_state_1v8); + if (ret < 0) + dev_err(mmc_dev(host->mmc), + "setting 1.8V failed, ret: %d\n", ret); + } else { + ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc, + tegra_host->pinctrl_state_3v3); + if (ret < 0) + dev_err(mmc_dev(host->mmc), + "setting 3.3V failed, ret: %d\n", ret); + } + } + + return ret; +} + static void tegra_sdhci_pad_autocalib(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); @@ -437,6 +510,7 @@ static void tegra_sdhci_pad_autocalib(struct sdhci_host *host) pdpu = offsets.pull_down_3v3 << 8 | offsets.pull_up_3v3; } + /* Set initial offset before auto-calibration */ tegra_sdhci_set_pad_autocal_offset(host, pdpu); card_clk_enabled = tegra_sdhci_configure_card_clk(host, false); @@ -460,19 +534,15 @@ static void tegra_sdhci_pad_autocalib(struct sdhci_host *host) if (ret) { dev_err(mmc_dev(host->mmc), "Pad autocal timed out\n"); - if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) - pdpu = offsets.pull_down_1v8_timeout << 8 | - offsets.pull_up_1v8_timeout; - else - pdpu = offsets.pull_down_3v3_timeout << 8 | - offsets.pull_up_3v3_timeout; - - /* Disable automatic calibration and use fixed offsets */ + /* Disable automatic cal and use fixed Drive Strengths */ reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG); reg &= ~SDHCI_AUTO_CAL_ENABLE; sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG); - tegra_sdhci_set_pad_autocal_offset(host, pdpu); + ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, false); + if (ret < 0) + dev_err(mmc_dev(host->mmc), + "Setting drive strengths failed: %d\n", ret); } } @@ -511,26 +581,46 @@ static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host) err = device_property_read_u32(host->mmc->parent, "nvidia,pad-autocal-pull-up-offset-3v3-timeout", &autocal->pull_up_3v3_timeout); - if (err) + if (err) { + if (!IS_ERR(tegra_host->pinctrl_state_3v3) && + (tegra_host->pinctrl_state_3v3_drv == NULL)) + pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n", + mmc_hostname(host->mmc)); autocal->pull_up_3v3_timeout = 0; + } err = device_property_read_u32(host->mmc->parent, "nvidia,pad-autocal-pull-down-offset-3v3-timeout", &autocal->pull_down_3v3_timeout); - if (err) + if (err) { + if (!IS_ERR(tegra_host->pinctrl_state_3v3) && + (tegra_host->pinctrl_state_3v3_drv == NULL)) + pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n", + mmc_hostname(host->mmc)); autocal->pull_down_3v3_timeout = 0; + } err = device_property_read_u32(host->mmc->parent, "nvidia,pad-autocal-pull-up-offset-1v8-timeout", &autocal->pull_up_1v8_timeout); - if (err) + if (err) { + if (!IS_ERR(tegra_host->pinctrl_state_1v8) && + (tegra_host->pinctrl_state_1v8_drv == NULL)) + pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n", + mmc_hostname(host->mmc)); autocal->pull_up_1v8_timeout = 0; + } err = device_property_read_u32(host->mmc->parent, "nvidia,pad-autocal-pull-down-offset-1v8-timeout", &autocal->pull_down_1v8_timeout); - if (err) + if (err) { + if (!IS_ERR(tegra_host->pinctrl_state_1v8) && + (tegra_host->pinctrl_state_1v8_drv == NULL)) + pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n", + mmc_hostname(host->mmc)); autocal->pull_down_1v8_timeout = 0; + } err = device_property_read_u32(host->mmc->parent, "nvidia,pad-autocal-pull-up-offset-sdr104", @@ -595,6 +685,20 @@ static void tegra_sdhci_parse_tap_and_trim(struct sdhci_host *host) tegra_host->dqs_trim = 0x11; } +static void tegra_sdhci_parse_dt(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); + + if (device_property_read_bool(host->mmc->parent, "supports-cqe")) + tegra_host->enable_hwcq = true; + else + tegra_host->enable_hwcq = false; + + tegra_sdhci_parse_pad_autocal_dt(host); + tegra_sdhci_parse_tap_and_trim(host); +} + static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); @@ -743,32 +847,6 @@ static int tegra_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) return mmc_send_tuning(host->mmc, opcode, NULL); } -static int tegra_sdhci_set_padctrl(struct sdhci_host *host, int voltage) -{ - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); - int ret; - - if (!tegra_host->pad_control_available) - return 0; - - if (voltage == MMC_SIGNAL_VOLTAGE_180) { - ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc, - tegra_host->pinctrl_state_1v8); - if (ret < 0) - dev_err(mmc_dev(host->mmc), - "setting 1.8V failed, ret: %d\n", ret); - } else { - ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc, - tegra_host->pinctrl_state_3v3); - if (ret < 0) - dev_err(mmc_dev(host->mmc), - "setting 3.3V failed, ret: %d\n", ret); - } - - return ret; -} - static int sdhci_tegra_start_signal_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios) { @@ -778,7 +856,7 @@ static int sdhci_tegra_start_signal_voltage_switch(struct mmc_host *mmc, int ret = 0; if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) { - ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage); + ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true); if (ret < 0) return ret; ret = sdhci_start_signal_voltage_switch(mmc, ios); @@ -786,7 +864,7 @@ static int sdhci_tegra_start_signal_voltage_switch(struct mmc_host *mmc, ret = sdhci_start_signal_voltage_switch(mmc, ios); if (ret < 0) return ret; - ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage); + ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true); } if (tegra_host->pad_calib_required) @@ -805,6 +883,20 @@ static int tegra_sdhci_init_pinctrl_info(struct device *dev, return -1; } + tegra_host->pinctrl_state_1v8_drv = pinctrl_lookup_state( + tegra_host->pinctrl_sdmmc, "sdmmc-1v8-drv"); + if (IS_ERR(tegra_host->pinctrl_state_1v8_drv)) { + if (PTR_ERR(tegra_host->pinctrl_state_1v8_drv) == -ENODEV) + tegra_host->pinctrl_state_1v8_drv = NULL; + } + + tegra_host->pinctrl_state_3v3_drv = pinctrl_lookup_state( + tegra_host->pinctrl_sdmmc, "sdmmc-3v3-drv"); + if (IS_ERR(tegra_host->pinctrl_state_3v3_drv)) { + if (PTR_ERR(tegra_host->pinctrl_state_3v3_drv) == -ENODEV) + tegra_host->pinctrl_state_3v3_drv = NULL; + } + tegra_host->pinctrl_state_3v3 = pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-3v3"); if (IS_ERR(tegra_host->pinctrl_state_3v3)) { @@ -836,8 +928,50 @@ static void tegra_sdhci_voltage_switch(struct sdhci_host *host) tegra_host->pad_calib_required = true; } +static void sdhci_tegra_cqe_enable(struct mmc_host *mmc) +{ + struct cqhci_host *cq_host = mmc->cqe_private; + u32 cqcfg = 0; + + /* + * Tegra SDMMC Controller design prevents write access to BLOCK_COUNT + * registers when CQE is enabled. + */ + cqcfg = cqhci_readl(cq_host, CQHCI_CFG); + if (cqcfg & CQHCI_ENABLE) + cqhci_writel(cq_host, (cqcfg & ~CQHCI_ENABLE), CQHCI_CFG); + + sdhci_cqe_enable(mmc); + + if (cqcfg & CQHCI_ENABLE) + cqhci_writel(cq_host, cqcfg, CQHCI_CFG); +} + +static void sdhci_tegra_dumpregs(struct mmc_host *mmc) +{ + sdhci_dumpregs(mmc_priv(mmc)); +} + +static u32 sdhci_tegra_cqhci_irq(struct sdhci_host *host, u32 intmask) +{ + int cmd_error = 0; + int data_error = 0; + + if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) + return intmask; + + cqhci_irq(host->mmc, intmask, cmd_error, data_error); + + return 0; +} + +static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = { + .enable = sdhci_tegra_cqe_enable, + .disable = sdhci_cqe_disable, + .dumpregs = sdhci_tegra_dumpregs, +}; + static const struct sdhci_ops tegra_sdhci_ops = { - .get_ro = tegra_sdhci_get_ro, .read_w = tegra_sdhci_readw, .write_l = tegra_sdhci_writel, .set_clock = tegra_sdhci_set_clock, @@ -893,7 +1027,6 @@ static const struct sdhci_tegra_soc_data soc_data_tegra30 = { }; static const struct sdhci_ops tegra114_sdhci_ops = { - .get_ro = tegra_sdhci_get_ro, .read_w = tegra_sdhci_readw, .write_w = tegra_sdhci_writew, .write_l = tegra_sdhci_writel, @@ -947,7 +1080,6 @@ static const struct sdhci_tegra_soc_data soc_data_tegra124 = { }; static const struct sdhci_ops tegra210_sdhci_ops = { - .get_ro = tegra_sdhci_get_ro, .read_w = tegra_sdhci_readw, .write_w = tegra210_sdhci_writew, .write_l = tegra_sdhci_writel, @@ -980,7 +1112,6 @@ static const struct sdhci_tegra_soc_data soc_data_tegra210 = { }; static const struct sdhci_ops tegra186_sdhci_ops = { - .get_ro = tegra_sdhci_get_ro, .read_w = tegra_sdhci_readw, .write_l = tegra_sdhci_writel, .set_clock = tegra_sdhci_set_clock, @@ -989,6 +1120,7 @@ static const struct sdhci_ops tegra186_sdhci_ops = { .set_uhs_signaling = tegra_sdhci_set_uhs_signaling, .voltage_switch = tegra_sdhci_voltage_switch, .get_max_clock = tegra_sdhci_get_max_clock, + .irq = sdhci_tegra_cqhci_irq, }; static const struct sdhci_pltfm_data sdhci_tegra186_pdata = { @@ -1030,6 +1162,54 @@ static const struct of_device_id sdhci_tegra_dt_match[] = { }; MODULE_DEVICE_TABLE(of, sdhci_tegra_dt_match); +static int sdhci_tegra_add_host(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host); + struct cqhci_host *cq_host; + bool dma64; + int ret; + + if (!tegra_host->enable_hwcq) + return sdhci_add_host(host); + + sdhci_enable_v4_mode(host); + + ret = sdhci_setup_host(host); + if (ret) + return ret; + + host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; + + cq_host = devm_kzalloc(host->mmc->parent, + sizeof(*cq_host), GFP_KERNEL); + if (!cq_host) { + ret = -ENOMEM; + goto cleanup; + } + + cq_host->mmio = host->ioaddr + SDHCI_TEGRA_CQE_BASE_ADDR; + cq_host->ops = &sdhci_tegra_cqhci_ops; + + dma64 = host->flags & SDHCI_USE_64_BIT_DMA; + if (dma64) + cq_host->caps |= CQHCI_TASK_DESC_SZ_128; + + ret = cqhci_init(cq_host, host->mmc, dma64); + if (ret) + goto cleanup; + + ret = __sdhci_add_host(host); + if (ret) + goto cleanup; + + return 0; + +cleanup: + sdhci_cleanup_host(host); + return ret; +} + static int sdhci_tegra_probe(struct platform_device *pdev) { const struct of_device_id *match; @@ -1077,9 +1257,7 @@ static int sdhci_tegra_probe(struct platform_device *pdev) if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50) host->mmc->caps |= MMC_CAP_1_8V_DDR; - tegra_sdhci_parse_pad_autocal_dt(host); - - tegra_sdhci_parse_tap_and_trim(host); + tegra_sdhci_parse_dt(host); tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power", GPIOD_OUT_HIGH); @@ -1117,7 +1295,7 @@ static int sdhci_tegra_probe(struct platform_device *pdev) usleep_range(2000, 4000); - rc = sdhci_add_host(host); + rc = sdhci_tegra_add_host(host); if (rc) goto err_add_host; diff --git a/drivers/mmc/host/sdhci-xenon-phy.c b/drivers/mmc/host/sdhci-xenon-phy.c index 5b5eb53a63d2..8d07ee1b8f08 100644 --- a/drivers/mmc/host/sdhci-xenon-phy.c +++ b/drivers/mmc/host/sdhci-xenon-phy.c @@ -530,7 +530,7 @@ static bool xenon_emmc_phy_slow_mode(struct sdhci_host *host, ret = true; break; } - /* else: fall through */ + /* fall through */ default: reg &= ~XENON_TIMING_ADJUST_SLOW_MODE; ret = false; diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index eba9bcc92ad3..a8141ff9be03 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -883,7 +883,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd, bool *too_big) { u8 count; - struct mmc_data *data = cmd->data; + struct mmc_data *data; unsigned target_timeout, current_timeout; *too_big = true; @@ -897,6 +897,11 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd, if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) return 0xE; + /* Unspecified command, asume max */ + if (cmd == NULL) + return 0xE; + + data = cmd->data; /* Unspecified timeout, assume max */ if (!data && !cmd->busy_timeout) return 0xE; @@ -2048,6 +2053,8 @@ static int sdhci_check_ro(struct sdhci_host *host) is_readonly = 0; else if (host->ops->get_ro) is_readonly = host->ops->get_ro(host); + else if (mmc_can_gpio_ro(host->mmc)) + is_readonly = mmc_gpio_get_ro(host->mmc); else is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_WRITE_PROTECT); @@ -2376,6 +2383,10 @@ static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) return -ETIMEDOUT; } + /* Spec does not require a delay between tuning cycles */ + if (host->tuning_delay > 0) + mdelay(host->tuning_delay); + ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) { if (ctrl & SDHCI_CTRL_TUNED_CLK) @@ -2383,9 +2394,6 @@ static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) break; } - /* Spec does not require a delay between tuning cycles */ - if (host->tuning_delay > 0) - mdelay(host->tuning_delay); } pr_info("%s: Tuning failed, falling back to fixed sampling clock\n", @@ -3353,7 +3361,14 @@ void sdhci_cqe_enable(struct mmc_host *mmc) ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); ctrl &= ~SDHCI_CTRL_DMA_MASK; - if (host->flags & SDHCI_USE_64_BIT_DMA) + /* + * Host from V4.10 supports ADMA3 DMA type. + * ADMA3 performs integrated descriptor which is more suitable + * for cmd queuing to fetch both command and transfer descriptors. + */ + if (host->v4_mode && (host->caps1 & SDHCI_CAN_DO_ADMA3)) + ctrl |= SDHCI_CTRL_ADMA3; + else if (host->flags & SDHCI_USE_64_BIT_DMA) ctrl |= SDHCI_CTRL_ADMA64; else ctrl |= SDHCI_CTRL_ADMA32; @@ -3363,7 +3378,7 @@ void sdhci_cqe_enable(struct mmc_host *mmc) SDHCI_BLOCK_SIZE); /* Set maximum timeout */ - sdhci_writeb(host, 0xE, SDHCI_TIMEOUT_CONTROL); + sdhci_set_timeout(host, NULL); host->ier = host->cqe_ier; diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 6cc9a3c2ac66..01002cba1359 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -73,6 +73,10 @@ #define SDHCI_SPACE_AVAILABLE 0x00000400 #define SDHCI_DATA_AVAILABLE 0x00000800 #define SDHCI_CARD_PRESENT 0x00010000 +#define SDHCI_CARD_PRES_SHIFT 16 +#define SDHCI_CD_STABLE 0x00020000 +#define SDHCI_CD_LVL 0x00040000 +#define SDHCI_CD_LVL_SHIFT 18 #define SDHCI_WRITE_PROTECT 0x00080000 #define SDHCI_DATA_LVL_MASK 0x00F00000 #define SDHCI_DATA_LVL_SHIFT 20 @@ -88,6 +92,7 @@ #define SDHCI_CTRL_ADMA1 0x08 #define SDHCI_CTRL_ADMA32 0x10 #define SDHCI_CTRL_ADMA64 0x18 +#define SDHCI_CTRL_ADMA3 0x18 #define SDHCI_CTRL_8BITBUS 0x20 #define SDHCI_CTRL_CDTEST_INS 0x40 #define SDHCI_CTRL_CDTEST_EN 0x80 @@ -230,6 +235,7 @@ #define SDHCI_RETUNING_MODE_SHIFT 14 #define SDHCI_CLOCK_MUL_MASK 0x00FF0000 #define SDHCI_CLOCK_MUL_SHIFT 16 +#define SDHCI_CAN_DO_ADMA3 0x08000000 #define SDHCI_SUPPORT_HS400 0x80000000 /* Non-standard */ #define SDHCI_CAPABILITIES_1 0x44 diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c index 8c05879850a0..eea183e90f1b 100644 --- a/drivers/mmc/host/sdhci_am654.c +++ b/drivers/mmc/host/sdhci_am654.c @@ -158,7 +158,7 @@ static void sdhci_am654_set_power(struct sdhci_host *host, unsigned char mode, sdhci_set_power_noreg(host, mode, vdd); } -struct sdhci_ops sdhci_am654_ops = { +static struct sdhci_ops sdhci_am654_ops = { .get_max_clock = sdhci_pltfm_clk_get_max_clock, .get_timeout_clock = sdhci_pltfm_clk_get_max_clock, .set_uhs_signaling = sdhci_set_uhs_signaling, diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index 279e326e397e..2901a5773d83 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include @@ -32,7 +31,6 @@ #include #include #include -#include #include #include #include @@ -1399,13 +1397,37 @@ static int sunxi_mmc_probe(struct platform_device *pdev) mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ; - if (host->cfg->clk_delays || host->use_new_timings) + /* + * Some H5 devices do not have signal traces precise enough to + * use HS DDR mode for their eMMC chips. + * + * We still enable HS DDR modes for all the other controller + * variants that support them. + */ + if ((host->cfg->clk_delays || host->use_new_timings) && + !of_device_is_compatible(pdev->dev.of_node, + "allwinner,sun50i-h5-emmc")) mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR; ret = mmc_of_parse(mmc); if (ret) goto error_free_dma; + /* + * If we don't support delay chains in the SoC, we can't use any + * of the higher speed modes. Mask them out in case the device + * tree specifies the properties for them, which gets added to + * the caps by mmc_of_parse() above. + */ + if (!(host->cfg->clk_delays || host->use_new_timings)) { + mmc->caps &= ~(MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR | + MMC_CAP_1_2V_DDR | MMC_CAP_UHS); + mmc->caps2 &= ~MMC_CAP2_HS200; + } + + /* TODO: This driver doesn't support HS400 mode yet */ + mmc->caps2 &= ~MMC_CAP2_HS400; + ret = sunxi_mmc_init_host(host); if (ret) goto error_free_dma; diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h index c03529e3f01a..2adb0d24360f 100644 --- a/drivers/mmc/host/tmio_mmc.h +++ b/drivers/mmc/host/tmio_mmc.h @@ -277,6 +277,11 @@ static inline void sd_ctrl_write32_as_16_and_16(struct tmio_mmc_host *host, iowrite16(val >> 16, host->ctl + ((addr + 2) << host->bus_shift)); } +static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val) +{ + iowrite32(val, host->ctl + (addr << host->bus_shift)); +} + static inline void sd_ctrl_write32_rep(struct tmio_mmc_host *host, int addr, const u32 *buf, int count) { diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index 085a0fab769c..595949f1f001 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #include #include @@ -629,7 +630,7 @@ static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host, int ireg, return false; } -static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host) +static bool __tmio_mmc_sdio_irq(struct tmio_mmc_host *host) { struct mmc_host *mmc = host->mmc; struct tmio_mmc_data *pdata = host->pdata; @@ -637,7 +638,7 @@ static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host) unsigned int sdio_status; if (!(pdata->flags & TMIO_MMC_SDIO_IRQ)) - return; + return false; status = sd_ctrl_read16(host, CTL_SDIO_STATUS); ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdio_irq_mask; @@ -650,6 +651,8 @@ static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host) if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ) mmc_signal_sdio_irq(mmc); + + return ireg; } irqreturn_t tmio_mmc_irq(int irq, void *devid) @@ -668,9 +671,10 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid) if (__tmio_mmc_sdcard_irq(host, ireg, status)) return IRQ_HANDLED; - __tmio_mmc_sdio_irq(host); + if (__tmio_mmc_sdio_irq(host)) + return IRQ_HANDLED; - return IRQ_HANDLED; + return IRQ_NONE; } EXPORT_SYMBOL_GPL(tmio_mmc_irq); @@ -700,7 +704,10 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host, /* Set transfer length / blocksize */ sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); - sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); + if (host->mmc->max_blk_count >= SZ_64K) + sd_ctrl_write32(host, CTL_XFER_BLK_COUNT, data->blocks); + else + sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); tmio_mmc_start_dma(host, data); @@ -1066,7 +1073,7 @@ static int tmio_mmc_init_ocr(struct tmio_mmc_host *host) /* use ocr_mask if no regulator */ if (!mmc->ocr_avail) - mmc->ocr_avail = pdata->ocr_mask; + mmc->ocr_avail = pdata->ocr_mask; /* * try again. @@ -1287,6 +1294,7 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host) cancel_delayed_work_sync(&host->delayed_reset_work); tmio_mmc_release_dma(host); + pm_runtime_dont_use_autosuspend(&pdev->dev); pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); } diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c index 3ba42f508014..4fd6da29489e 100644 --- a/drivers/mmc/host/wmt-sdmmc.c +++ b/drivers/mmc/host/wmt-sdmmc.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c index 6e8e7b1bb34b..79a53cb8507b 100644 --- a/drivers/mtd/chips/cfi_cmdset_0001.c +++ b/drivers/mtd/chips/cfi_cmdset_0001.c @@ -756,7 +756,8 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd, } numvirtchips = cfi->numchips * numparts; - newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL); + newcfi = kmalloc(struct_size(newcfi, chips, numvirtchips), + GFP_KERNEL); if (!newcfi) return -ENOMEM; shared = kmalloc_array(cfi->numchips, diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c index 837b04ab96a9..839ed40625d6 100644 --- a/drivers/mtd/chips/gen_probe.c +++ b/drivers/mtd/chips/gen_probe.c @@ -135,7 +135,7 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi * our caller, and copy the appropriate data into them. */ - retcfi = kmalloc(sizeof(struct cfi_private) + cfi.numchips * sizeof(struct flchip), GFP_KERNEL); + retcfi = kmalloc(struct_size(retcfi, chips, cfi.numchips), GFP_KERNEL); if (!retcfi) { kfree(cfi.cfiq); diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c index 4c94fc096696..7754803e3463 100644 --- a/drivers/mtd/devices/docg3.c +++ b/drivers/mtd/devices/docg3.c @@ -1767,8 +1767,8 @@ static int __init doc_set_driver_info(int chip_id, struct mtd_info *mtd) switch (chip_id) { case DOC_CHIPID_G3: - mtd->name = kasprintf(GFP_KERNEL, "docg3.%d", - docg3->device_id); + mtd->name = devm_kasprintf(docg3->dev, GFP_KERNEL, "docg3.%d", + docg3->device_id); if (!mtd->name) return -ENOMEM; docg3->max_block = 2047; @@ -1872,7 +1872,7 @@ nomem3: nomem2: kfree(docg3); nomem1: - return ERR_PTR(ret); + return ret ? ERR_PTR(ret) : NULL; } /** @@ -1886,7 +1886,6 @@ static void doc_release_device(struct mtd_info *mtd) mtd_device_unregister(mtd); kfree(docg3->bbt); kfree(docg3); - kfree(mtd->name); kfree(mtd); } diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index c4a1d04b8c80..651bab6d4e31 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c @@ -195,7 +195,14 @@ static int m25p_probe(struct spi_mem *spimem) spi_mem_set_drvdata(spimem, flash); flash->spimem = spimem; - if (spi->mode & SPI_RX_QUAD) { + if (spi->mode & SPI_RX_OCTAL) { + hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8; + + if (spi->mode & SPI_TX_OCTAL) + hwcaps.mask |= (SNOR_HWCAPS_READ_1_8_8 | + SNOR_HWCAPS_PP_1_1_8 | + SNOR_HWCAPS_PP_1_8_8); + } else if (spi->mode & SPI_RX_QUAD) { hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4; if (spi->mode & SPI_TX_QUAD) diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c index 46238796145f..1c97fabc4bf9 100644 --- a/drivers/mtd/devices/mtdram.c +++ b/drivers/mtd/devices/mtdram.c @@ -24,14 +24,12 @@ static unsigned long writebuf_size = 64; #define MTDRAM_TOTAL_SIZE (total_size * 1024) #define MTDRAM_ERASE_SIZE (erase_size * 1024) -#ifdef MODULE module_param(total_size, ulong, 0); MODULE_PARM_DESC(total_size, "Total device size in KiB"); module_param(erase_size, ulong, 0); MODULE_PARM_DESC(erase_size, "Device erase block size in KiB"); module_param(writebuf_size, ulong, 0); MODULE_PARM_DESC(writebuf_size, "Device write buf size in Bytes (Default: 64)"); -#endif // We could store these in the mtd structure, but we only support 1 device.. static struct mtd_info *mtd_info; diff --git a/drivers/mtd/devices/powernv_flash.c b/drivers/mtd/devices/powernv_flash.c index 22f753e555ac..83f88b8b5d9f 100644 --- a/drivers/mtd/devices/powernv_flash.c +++ b/drivers/mtd/devices/powernv_flash.c @@ -212,7 +212,7 @@ static int powernv_flash_set_driver_info(struct device *dev, * Going to have to check what details I need to set and how to * get them */ - mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFn", dev->of_node); + mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node); mtd->type = MTD_NORFLASH; mtd->flags = MTD_WRITEABLE; mtd->size = size; diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c index 69f2112340b1..175bdc3b72f4 100644 --- a/drivers/mtd/lpddr/qinfo_probe.c +++ b/drivers/mtd/lpddr/qinfo_probe.c @@ -181,8 +181,8 @@ static struct lpddr_private *lpddr_probe_chip(struct map_info *map) lpddr.numchips = 1; numvirtchips = lpddr.numchips * lpddr.qinfo->HWPartsNum; - retlpddr = kzalloc(sizeof(struct lpddr_private) + - numvirtchips * sizeof(struct flchip), GFP_KERNEL); + retlpddr = kzalloc(struct_size(retlpddr, chips, numvirtchips), + GFP_KERNEL); if (!retlpddr) return NULL; diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 999b705769a8..76b4264936ff 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -155,7 +155,6 @@ static ssize_t mtd_flags_show(struct device *dev, struct mtd_info *mtd = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags); - } static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL); @@ -166,7 +165,6 @@ static ssize_t mtd_size_show(struct device *dev, return snprintf(buf, PAGE_SIZE, "%llu\n", (unsigned long long)mtd->size); - } static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL); @@ -176,7 +174,6 @@ static ssize_t mtd_erasesize_show(struct device *dev, struct mtd_info *mtd = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize); - } static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL); @@ -186,7 +183,6 @@ static ssize_t mtd_writesize_show(struct device *dev, struct mtd_info *mtd = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize); - } static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL); @@ -197,7 +193,6 @@ static ssize_t mtd_subpagesize_show(struct device *dev, unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft; return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize); - } static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL); @@ -207,7 +202,6 @@ static ssize_t mtd_oobsize_show(struct device *dev, struct mtd_info *mtd = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize); - } static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL); @@ -226,7 +220,6 @@ static ssize_t mtd_numeraseregions_show(struct device *dev, struct mtd_info *mtd = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions); - } static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show, NULL); @@ -237,7 +230,6 @@ static ssize_t mtd_name_show(struct device *dev, struct mtd_info *mtd = dev_get_drvdata(dev); return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name); - } static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL); @@ -507,6 +499,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd) { struct nvmem_config config = {}; + config.id = -1; config.dev = &mtd->dev; config.name = mtd->name; config.owner = THIS_MODULE; @@ -559,6 +552,14 @@ int add_mtd_device(struct mtd_info *mtd) BUG_ON(mtd->writesize == 0); + /* + * MTD drivers should implement ->_{write,read}() or + * ->_{write,read}_oob(), but not both. + */ + if (WARN_ON((mtd->_write && mtd->_write_oob) || + (mtd->_read && mtd->_read_oob))) + return -EINVAL; + if (WARN_ON((!mtd->erasesize || !mtd->_erase) && !(mtd->flags & MTD_NO_ERASE))) return -EINVAL; @@ -1089,67 +1090,32 @@ EXPORT_SYMBOL_GPL(mtd_get_unmapped_area); int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { - int ret_code; - *retlen = 0; - if (from < 0 || from >= mtd->size || len > mtd->size - from) - return -EINVAL; - if (!len) - return 0; + struct mtd_oob_ops ops = { + .len = len, + .datbuf = buf, + }; + int ret; - ledtrig_mtd_activity(); - /* - * In the absence of an error, drivers return a non-negative integer - * representing the maximum number of bitflips that were corrected on - * any one ecc region (if applicable; zero otherwise). - */ - if (mtd->_read) { - ret_code = mtd->_read(mtd, from, len, retlen, buf); - } else if (mtd->_read_oob) { - struct mtd_oob_ops ops = { - .len = len, - .datbuf = buf, - }; - - ret_code = mtd->_read_oob(mtd, from, &ops); - *retlen = ops.retlen; - } else { - return -ENOTSUPP; - } + ret = mtd_read_oob(mtd, from, &ops); + *retlen = ops.retlen; - if (unlikely(ret_code < 0)) - return ret_code; - if (mtd->ecc_strength == 0) - return 0; /* device lacks ecc */ - return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0; + return ret; } EXPORT_SYMBOL_GPL(mtd_read); int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) { - *retlen = 0; - if (to < 0 || to >= mtd->size || len > mtd->size - to) - return -EINVAL; - if ((!mtd->_write && !mtd->_write_oob) || - !(mtd->flags & MTD_WRITEABLE)) - return -EROFS; - if (!len) - return 0; - ledtrig_mtd_activity(); + struct mtd_oob_ops ops = { + .len = len, + .datbuf = (u8 *)buf, + }; + int ret; - if (!mtd->_write) { - struct mtd_oob_ops ops = { - .len = len, - .datbuf = (u8 *)buf, - }; - int ret; + ret = mtd_write_oob(mtd, to, &ops); + *retlen = ops.retlen; - ret = mtd->_write_oob(mtd, to, &ops); - *retlen = ops.retlen; - return ret; - } - - return mtd->_write(mtd, to, len, retlen, buf); + return ret; } EXPORT_SYMBOL_GPL(mtd_write); diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index 60104e1079c5..37f174ccbcec 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c @@ -480,6 +480,10 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent, /* let's register it anyway to preserve ordering */ slave->offset = 0; slave->mtd.size = 0; + + /* Initialize ->erasesize to make add_mtd_device() happy. */ + slave->mtd.erasesize = parent->erasesize; + printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n", part->name); goto out_register; @@ -632,7 +636,6 @@ err_remove_part: mutex_unlock(&mtd_partitions_mutex); free_partition(new); - pr_info("%s:%i\n", __func__, __LINE__); return ret; } diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig index 1a55d3e3d4c5..e604625e2dfa 100644 --- a/drivers/mtd/nand/raw/Kconfig +++ b/drivers/mtd/nand/raw/Kconfig @@ -541,4 +541,21 @@ config MTD_NAND_TEGRA is supported. Extra OOB bytes when using HW ECC are currently not supported. +config MTD_NAND_STM32_FMC2 + tristate "Support for NAND controller on STM32MP SoCs" + depends on MACH_STM32MP157 || COMPILE_TEST + help + Enables support for NAND Flash chips on SoCs containing the FMC2 + NAND controller. This controller is found on STM32MP SoCs. + The controller supports a maximum 8k page size and supports + a maximum 8-bit correction error per sector of 512 bytes. + +config MTD_NAND_MESON + tristate "Support for NAND controller on Amlogic's Meson SoCs" + depends on ARCH_MESON || COMPILE_TEST + select MFD_SYSCON + help + Enables support for NAND controller on Amlogic's Meson SoCs. + This controller is found on Meson SoCs. + endif # MTD_NAND diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile index 57159b349054..5a5a72f0793e 100644 --- a/drivers/mtd/nand/raw/Makefile +++ b/drivers/mtd/nand/raw/Makefile @@ -56,6 +56,8 @@ obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/ obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o obj-$(CONFIG_MTD_NAND_MTK) += mtk_ecc.o mtk_nand.o obj-$(CONFIG_MTD_NAND_TEGRA) += tegra_nand.o +obj-$(CONFIG_MTD_NAND_STM32_FMC2) += stm32_fmc2_nand.o +obj-$(CONFIG_MTD_NAND_MESON) += meson_nand.o nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o nand-objs += nand_onfi.o diff --git a/drivers/mtd/nand/raw/atmel/pmecc.c b/drivers/mtd/nand/raw/atmel/pmecc.c index 555a74e15269..9d3997840889 100644 --- a/drivers/mtd/nand/raw/atmel/pmecc.c +++ b/drivers/mtd/nand/raw/atmel/pmecc.c @@ -876,23 +876,32 @@ static struct atmel_pmecc *atmel_pmecc_get_by_node(struct device *userdev, { struct platform_device *pdev; struct atmel_pmecc *pmecc, **ptr; + int ret; pdev = of_find_device_by_node(np); - if (!pdev || !platform_get_drvdata(pdev)) + if (!pdev) return ERR_PTR(-EPROBE_DEFER); + pmecc = platform_get_drvdata(pdev); + if (!pmecc) { + ret = -EPROBE_DEFER; + goto err_put_device; + } ptr = devres_alloc(devm_atmel_pmecc_put, sizeof(*ptr), GFP_KERNEL); - if (!ptr) - return ERR_PTR(-ENOMEM); - - get_device(&pdev->dev); - pmecc = platform_get_drvdata(pdev); + if (!ptr) { + ret = -ENOMEM; + goto err_put_device; + } *ptr = pmecc; devres_add(userdev, ptr); return pmecc; + +err_put_device: + put_device(&pdev->dev); + return ERR_PTR(ret); } static const int atmel_pmecc_strengths[] = { 2, 4, 8, 12, 24, 32 }; diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c index 6e8edc9375dd..24aeafc67cd4 100644 --- a/drivers/mtd/nand/raw/denali.c +++ b/drivers/mtd/nand/raw/denali.c @@ -37,9 +37,6 @@ #define DENALI_MAP11_ADDR ((DENALI_MAP11) | 1) /* address cycle */ #define DENALI_MAP11_DATA ((DENALI_MAP11) | 2) /* data cycle */ -/* MAP10 commands */ -#define DENALI_ERASE 0x01 - #define DENALI_BANK(denali) ((denali)->active_bank << 24) #define DENALI_INVALID_BANK -1 @@ -476,7 +473,7 @@ static void denali_setup_dma32(struct denali_nand_info *denali, } static int denali_pio_read(struct denali_nand_info *denali, void *buf, - size_t size, int page, int raw) + size_t size, int page) { u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page; uint32_t *buf32 = (uint32_t *)buf; @@ -504,7 +501,7 @@ static int denali_pio_read(struct denali_nand_info *denali, void *buf, } static int denali_pio_write(struct denali_nand_info *denali, - const void *buf, size_t size, int page, int raw) + const void *buf, size_t size, int page) { u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page; const uint32_t *buf32 = (uint32_t *)buf; @@ -525,16 +522,16 @@ static int denali_pio_write(struct denali_nand_info *denali, } static int denali_pio_xfer(struct denali_nand_info *denali, void *buf, - size_t size, int page, int raw, int write) + size_t size, int page, int write) { if (write) - return denali_pio_write(denali, buf, size, page, raw); + return denali_pio_write(denali, buf, size, page); else - return denali_pio_read(denali, buf, size, page, raw); + return denali_pio_read(denali, buf, size, page); } static int denali_dma_xfer(struct denali_nand_info *denali, void *buf, - size_t size, int page, int raw, int write) + size_t size, int page, int write) { dma_addr_t dma_addr; uint32_t irq_mask, irq_status, ecc_err_mask; @@ -544,7 +541,7 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf, dma_addr = dma_map_single(denali->dev, buf, size, dir); if (dma_mapping_error(denali->dev, dma_addr)) { dev_dbg(denali->dev, "Failed to DMA-map buffer. Trying PIO.\n"); - return denali_pio_xfer(denali, buf, size, page, raw, write); + return denali_pio_xfer(denali, buf, size, page, write); } if (write) { @@ -598,9 +595,9 @@ static int denali_data_xfer(struct denali_nand_info *denali, void *buf, denali->reg + TRANSFER_SPARE_REG); if (denali->dma_avail) - return denali_dma_xfer(denali, buf, size, page, raw, write); + return denali_dma_xfer(denali, buf, size, page, write); else - return denali_pio_xfer(denali, buf, size, page, raw, write); + return denali_pio_xfer(denali, buf, size, page, write); } static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip, @@ -754,9 +751,6 @@ static int denali_read_oob(struct nand_chip *chip, int page) static int denali_write_oob(struct nand_chip *chip, int page) { struct mtd_info *mtd = nand_to_mtd(chip); - struct denali_nand_info *denali = mtd_to_denali(mtd); - - denali_reset_irq(denali); denali_oob_xfer(mtd, chip, page, 1); @@ -903,23 +897,6 @@ static int denali_waitfunc(struct nand_chip *chip) return irq_status & INTR__INT_ACT ? 0 : NAND_STATUS_FAIL; } -static int denali_erase(struct nand_chip *chip, int page) -{ - struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip)); - uint32_t irq_status; - - denali_reset_irq(denali); - - denali->host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page, - DENALI_ERASE); - - /* wait for erase to complete or failure to occur */ - irq_status = denali_wait_for_irq(denali, - INTR__ERASE_COMP | INTR__ERASE_FAIL); - - return irq_status & INTR__ERASE_COMP ? 0 : -EIO; -} - static int denali_setup_data_interface(struct nand_chip *chip, int chipnr, const struct nand_data_interface *conf) { @@ -1244,7 +1221,6 @@ static int denali_attach_chip(struct nand_chip *chip) chip->ecc.write_page_raw = denali_write_page_raw; chip->ecc.read_oob = denali_read_oob; chip->ecc.write_oob = denali_write_oob; - chip->legacy.erase = denali_erase; ret = denali_multidev_fixup(denali); if (ret) diff --git a/drivers/mtd/nand/raw/denali.h b/drivers/mtd/nand/raw/denali.h index 25c00601b8b3..c8c2620fc736 100644 --- a/drivers/mtd/nand/raw/denali.h +++ b/drivers/mtd/nand/raw/denali.h @@ -304,7 +304,6 @@ struct denali_nand_info { u32 irq_status; /* interrupts that have happened */ int irq; void *buf; /* for syndrome layout conversion */ - dma_addr_t dma_addr; int dma_avail; /* can support DMA? */ int devs_per_cs; /* devices connected in parallel */ int oob_skip_bytes; /* number of bytes reserved for BBM */ diff --git a/drivers/mtd/nand/raw/denali_dt.c b/drivers/mtd/nand/raw/denali_dt.c index 7c6a8a426606..0b5ae2418815 100644 --- a/drivers/mtd/nand/raw/denali_dt.c +++ b/drivers/mtd/nand/raw/denali_dt.c @@ -109,25 +109,17 @@ static int denali_dt_probe(struct platform_device *pdev) if (IS_ERR(denali->host)) return PTR_ERR(denali->host); - /* - * A single anonymous clock is supported for the backward compatibility. - * New platforms should support all the named clocks. - */ dt->clk = devm_clk_get(dev, "nand"); if (IS_ERR(dt->clk)) - dt->clk = devm_clk_get(dev, NULL); - if (IS_ERR(dt->clk)) { - dev_err(dev, "no clk available\n"); return PTR_ERR(dt->clk); - } dt->clk_x = devm_clk_get(dev, "nand_x"); if (IS_ERR(dt->clk_x)) - dt->clk_x = NULL; + return PTR_ERR(dt->clk_x); dt->clk_ecc = devm_clk_get(dev, "ecc"); if (IS_ERR(dt->clk_ecc)) - dt->clk_ecc = NULL; + return PTR_ERR(dt->clk_ecc); ret = clk_prepare_enable(dt->clk); if (ret) @@ -141,19 +133,8 @@ static int denali_dt_probe(struct platform_device *pdev) if (ret) goto out_disable_clk_x; - if (dt->clk_x) { - denali->clk_rate = clk_get_rate(dt->clk); - denali->clk_x_rate = clk_get_rate(dt->clk_x); - } else { - /* - * Hardcode the clock rates for the backward compatibility. - * This works for both SOCFPGA and UniPhier. - */ - dev_notice(dev, - "necessary clock is missing. default clock rates are used.\n"); - denali->clk_rate = 50000000; - denali->clk_x_rate = 200000000; - } + denali->clk_rate = clk_get_rate(dt->clk); + denali->clk_x_rate = clk_get_rate(dt->clk_x); ret = denali_init(denali); if (ret) diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c index c9149a37f8f0..6c7ca41354be 100644 --- a/drivers/mtd/nand/raw/fsmc_nand.c +++ b/drivers/mtd/nand/raw/fsmc_nand.c @@ -965,6 +965,19 @@ static const struct nand_controller_ops fsmc_nand_controller_ops = { .setup_data_interface = fsmc_setup_data_interface, }; +/** + * fsmc_nand_disable() - Disables the NAND bank + * @host: The instance to disable + */ +static void fsmc_nand_disable(struct fsmc_nand_data *host) +{ + u32 val; + + val = readl(host->regs_va + FSMC_PC); + val &= ~FSMC_ENABLE; + writel(val, host->regs_va + FSMC_PC); +} + /* * fsmc_nand_probe - Probe function * @pdev: platform device structure @@ -1120,6 +1133,7 @@ release_dma_read_chan: if (host->mode == USE_DMA_ACCESS) dma_release_channel(host->read_dma_chan); disable_clk: + fsmc_nand_disable(host); clk_disable_unprepare(host->clk); return ret; @@ -1134,6 +1148,7 @@ static int fsmc_nand_remove(struct platform_device *pdev) if (host) { nand_release(&host->nand); + fsmc_nand_disable(host); if (host->mode == USE_DMA_ACCESS) { dma_release_channel(host->write_dma_chan); @@ -1164,6 +1179,7 @@ static int fsmc_nand_resume(struct device *dev) clk_prepare_enable(host->clk); if (host->dev_timings) fsmc_nand_setup(host, host->dev_timings); + nand_reset(&host->nand, 0); } return 0; diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c index bd4cfac6b5aa..a4768df5083f 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c @@ -155,9 +155,10 @@ int gpmi_init(struct gpmi_nand_data *this) /* * Reset BCH here, too. We got failures otherwise :( - * See later BCH reset for explanation of MX23 handling + * See later BCH reset for explanation of MX23 and MX28 handling */ - ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this)); + ret = gpmi_reset_block(r->bch_regs, + GPMI_IS_MX23(this) || GPMI_IS_MX28(this)); if (ret) goto err_out; @@ -263,12 +264,10 @@ int bch_set_geometry(struct gpmi_nand_data *this) /* * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this * chip, otherwise it will lock up. So we skip resetting BCH on the MX23. - * On the other hand, the MX28 needs the reset, because one case has been - * seen where the BCH produced ECC errors constantly after 10000 - * consecutive reboots. The latter case has not been seen on the MX23 - * yet, still we don't know if it could happen there as well. + * and MX28. */ - ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this)); + ret = gpmi_reset_block(r->bch_regs, + GPMI_IS_MX23(this) || GPMI_IS_MX28(this)); if (ret) goto err_out; diff --git a/drivers/mtd/nand/raw/jz4780_bch.c b/drivers/mtd/nand/raw/jz4780_bch.c index 7201827809e9..c5f74ed85862 100644 --- a/drivers/mtd/nand/raw/jz4780_bch.c +++ b/drivers/mtd/nand/raw/jz4780_bch.c @@ -281,12 +281,15 @@ static struct jz4780_bch *jz4780_bch_get(struct device_node *np) struct jz4780_bch *bch; pdev = of_find_device_by_node(np); - if (!pdev || !platform_get_drvdata(pdev)) + if (!pdev) return ERR_PTR(-EPROBE_DEFER); - get_device(&pdev->dev); - bch = platform_get_drvdata(pdev); + if (!bch) { + put_device(&pdev->dev); + return ERR_PTR(-EPROBE_DEFER); + } + clk_prepare_enable(bch->clk); return bch; diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index 84283c6bb0ff..f38e5c1b87e4 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -2550,9 +2550,8 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc, } /* Alloc the nand chip structure */ - marvell_nand = devm_kzalloc(dev, sizeof(*marvell_nand) + - (nsels * - sizeof(struct marvell_nand_chip_sel)), + marvell_nand = devm_kzalloc(dev, + struct_size(marvell_nand, sels, nsels), GFP_KERNEL); if (!marvell_nand) { dev_err(dev, "could not allocate chip structure\n"); diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c new file mode 100644 index 000000000000..3e8aa71407b5 --- /dev/null +++ b/drivers/mtd/nand/raw/meson_nand.c @@ -0,0 +1,1464 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR MIT) +/* + * Amlogic Meson Nand Flash Controller Driver + * + * Copyright (c) 2018 Amlogic, inc. + * Author: Liang Yang + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define NFC_REG_CMD 0x00 +#define NFC_CMD_IDLE (0xc << 14) +#define NFC_CMD_CLE (0x5 << 14) +#define NFC_CMD_ALE (0x6 << 14) +#define NFC_CMD_ADL ((0 << 16) | (3 << 20)) +#define NFC_CMD_ADH ((1 << 16) | (3 << 20)) +#define NFC_CMD_AIL ((2 << 16) | (3 << 20)) +#define NFC_CMD_AIH ((3 << 16) | (3 << 20)) +#define NFC_CMD_SEED ((8 << 16) | (3 << 20)) +#define NFC_CMD_M2N ((0 << 17) | (2 << 20)) +#define NFC_CMD_N2M ((1 << 17) | (2 << 20)) +#define NFC_CMD_RB BIT(20) +#define NFC_CMD_SCRAMBLER_ENABLE BIT(19) +#define NFC_CMD_SCRAMBLER_DISABLE 0 +#define NFC_CMD_SHORTMODE_DISABLE 0 +#define NFC_CMD_RB_INT BIT(14) + +#define NFC_CMD_GET_SIZE(x) (((x) >> 22) & GENMASK(4, 0)) + +#define NFC_REG_CFG 0x04 +#define NFC_REG_DADR 0x08 +#define NFC_REG_IADR 0x0c +#define NFC_REG_BUF 0x10 +#define NFC_REG_INFO 0x14 +#define NFC_REG_DC 0x18 +#define NFC_REG_ADR 0x1c +#define NFC_REG_DL 0x20 +#define NFC_REG_DH 0x24 +#define NFC_REG_CADR 0x28 +#define NFC_REG_SADR 0x2c +#define NFC_REG_PINS 0x30 +#define NFC_REG_VER 0x38 + +#define NFC_RB_IRQ_EN BIT(21) + +#define CMDRWGEN(cmd_dir, ran, bch, short_mode, page_size, pages) \ + ( \ + (cmd_dir) | \ + ((ran) << 19) | \ + ((bch) << 14) | \ + ((short_mode) << 13) | \ + (((page_size) & 0x7f) << 6) | \ + ((pages) & 0x3f) \ + ) + +#define GENCMDDADDRL(adl, addr) ((adl) | ((addr) & 0xffff)) +#define GENCMDDADDRH(adh, addr) ((adh) | (((addr) >> 16) & 0xffff)) +#define GENCMDIADDRL(ail, addr) ((ail) | ((addr) & 0xffff)) +#define GENCMDIADDRH(aih, addr) ((aih) | (((addr) >> 16) & 0xffff)) + +#define DMA_DIR(dir) ((dir) ? NFC_CMD_N2M : NFC_CMD_M2N) + +#define ECC_CHECK_RETURN_FF (-1) + +#define NAND_CE0 (0xe << 10) +#define NAND_CE1 (0xd << 10) + +#define DMA_BUSY_TIMEOUT 0x100000 +#define CMD_FIFO_EMPTY_TIMEOUT 1000 + +#define MAX_CE_NUM 2 + +/* eMMC clock register, misc control */ +#define CLK_SELECT_NAND BIT(31) + +#define NFC_CLK_CYCLE 6 + +/* nand flash controller delay 3 ns */ +#define NFC_DEFAULT_DELAY 3000 + +#define ROW_ADDER(page, index) (((page) >> (8 * (index))) & 0xff) +#define MAX_CYCLE_ADDRS 5 +#define DIRREAD 1 +#define DIRWRITE 0 + +#define ECC_PARITY_BCH8_512B 14 +#define ECC_COMPLETE BIT(31) +#define ECC_ERR_CNT(x) (((x) >> 24) & GENMASK(5, 0)) +#define ECC_ZERO_CNT(x) (((x) >> 16) & GENMASK(5, 0)) +#define ECC_UNCORRECTABLE 0x3f + +#define PER_INFO_BYTE 8 + +struct meson_nfc_nand_chip { + struct list_head node; + struct nand_chip nand; + unsigned long clk_rate; + unsigned long level1_divider; + u32 bus_timing; + u32 twb; + u32 tadl; + u32 tbers_max; + + u32 bch_mode; + u8 *data_buf; + __le64 *info_buf; + u32 nsels; + u8 sels[0]; +}; + +struct meson_nand_ecc { + u32 bch; + u32 strength; +}; + +struct meson_nfc_data { + const struct nand_ecc_caps *ecc_caps; +}; + +struct meson_nfc_param { + u32 chip_select; + u32 rb_select; +}; + +struct nand_rw_cmd { + u32 cmd0; + u32 addrs[MAX_CYCLE_ADDRS]; + u32 cmd1; +}; + +struct nand_timing { + u32 twb; + u32 tadl; + u32 tbers_max; +}; + +struct meson_nfc { + struct nand_controller controller; + struct clk *core_clk; + struct clk *device_clk; + struct clk *phase_tx; + struct clk *phase_rx; + + unsigned long clk_rate; + u32 bus_timing; + + struct device *dev; + void __iomem *reg_base; + struct regmap *reg_clk; + struct completion completion; + struct list_head chips; + const struct meson_nfc_data *data; + struct meson_nfc_param param; + struct nand_timing timing; + union { + int cmd[32]; + struct nand_rw_cmd rw; + } cmdfifo; + + dma_addr_t daddr; + dma_addr_t iaddr; + + unsigned long assigned_cs; +}; + +enum { + NFC_ECC_BCH8_1K = 2, + NFC_ECC_BCH24_1K, + NFC_ECC_BCH30_1K, + NFC_ECC_BCH40_1K, + NFC_ECC_BCH50_1K, + NFC_ECC_BCH60_1K, +}; + +#define MESON_ECC_DATA(b, s) { .bch = (b), .strength = (s)} + +static struct meson_nand_ecc meson_ecc[] = { + MESON_ECC_DATA(NFC_ECC_BCH8_1K, 8), + MESON_ECC_DATA(NFC_ECC_BCH24_1K, 24), + MESON_ECC_DATA(NFC_ECC_BCH30_1K, 30), + MESON_ECC_DATA(NFC_ECC_BCH40_1K, 40), + MESON_ECC_DATA(NFC_ECC_BCH50_1K, 50), + MESON_ECC_DATA(NFC_ECC_BCH60_1K, 60), +}; + +static int meson_nand_calc_ecc_bytes(int step_size, int strength) +{ + int ecc_bytes; + + if (step_size == 512 && strength == 8) + return ECC_PARITY_BCH8_512B; + + ecc_bytes = DIV_ROUND_UP(strength * fls(step_size * 8), 8); + ecc_bytes = ALIGN(ecc_bytes, 2); + + return ecc_bytes; +} + +NAND_ECC_CAPS_SINGLE(meson_gxl_ecc_caps, + meson_nand_calc_ecc_bytes, 1024, 8, 24, 30, 40, 50, 60); +NAND_ECC_CAPS_SINGLE(meson_axg_ecc_caps, + meson_nand_calc_ecc_bytes, 1024, 8); + +static struct meson_nfc_nand_chip *to_meson_nand(struct nand_chip *nand) +{ + return container_of(nand, struct meson_nfc_nand_chip, nand); +} + +static void meson_nfc_select_chip(struct nand_chip *nand, int chip) +{ + struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); + struct meson_nfc *nfc = nand_get_controller_data(nand); + int ret, value; + + if (chip < 0 || WARN_ON_ONCE(chip >= meson_chip->nsels)) + return; + + nfc->param.chip_select = meson_chip->sels[chip] ? NAND_CE1 : NAND_CE0; + nfc->param.rb_select = nfc->param.chip_select; + nfc->timing.twb = meson_chip->twb; + nfc->timing.tadl = meson_chip->tadl; + nfc->timing.tbers_max = meson_chip->tbers_max; + + if (nfc->clk_rate != meson_chip->clk_rate) { + ret = clk_set_rate(nfc->device_clk, meson_chip->clk_rate); + if (ret) { + dev_err(nfc->dev, "failed to set clock rate\n"); + return; + } + nfc->clk_rate = meson_chip->clk_rate; + } + if (nfc->bus_timing != meson_chip->bus_timing) { + value = (NFC_CLK_CYCLE - 1) | (meson_chip->bus_timing << 5); + writel(value, nfc->reg_base + NFC_REG_CFG); + writel((1 << 31), nfc->reg_base + NFC_REG_CMD); + nfc->bus_timing = meson_chip->bus_timing; + } +} + +static void meson_nfc_cmd_idle(struct meson_nfc *nfc, u32 time) +{ + writel(nfc->param.chip_select | NFC_CMD_IDLE | (time & 0x3ff), + nfc->reg_base + NFC_REG_CMD); +} + +static void meson_nfc_cmd_seed(struct meson_nfc *nfc, u32 seed) +{ + writel(NFC_CMD_SEED | (0xc2 + (seed & 0x7fff)), + nfc->reg_base + NFC_REG_CMD); +} + +static void meson_nfc_cmd_access(struct nand_chip *nand, int raw, bool dir, + int scrambler) +{ + struct mtd_info *mtd = nand_to_mtd(nand); + struct meson_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd)); + struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); + u32 bch = meson_chip->bch_mode, cmd; + int len = mtd->writesize, pagesize, pages; + + pagesize = nand->ecc.size; + + if (raw) { + len = mtd->writesize + mtd->oobsize; + cmd = (len & GENMASK(5, 0)) | scrambler | DMA_DIR(dir); + writel(cmd, nfc->reg_base + NFC_REG_CMD); + return; + } + + pages = len / nand->ecc.size; + + cmd = CMDRWGEN(DMA_DIR(dir), scrambler, bch, + NFC_CMD_SHORTMODE_DISABLE, pagesize, pages); + + writel(cmd, nfc->reg_base + NFC_REG_CMD); +} + +static void meson_nfc_drain_cmd(struct meson_nfc *nfc) +{ + /* + * Insert two commands to make sure all valid commands are finished. + * + * The Nand flash controller is designed as two stages pipleline - + * a) fetch and b) excute. + * There might be cases when the driver see command queue is empty, + * but the Nand flash controller still has two commands buffered, + * one is fetched into NFC request queue (ready to run), and another + * is actively executing. So pushing 2 "IDLE" commands guarantees that + * the pipeline is emptied. + */ + meson_nfc_cmd_idle(nfc, 0); + meson_nfc_cmd_idle(nfc, 0); +} + +static int meson_nfc_wait_cmd_finish(struct meson_nfc *nfc, + unsigned int timeout_ms) +{ + u32 cmd_size = 0; + int ret; + + /* wait cmd fifo is empty */ + ret = readl_relaxed_poll_timeout(nfc->reg_base + NFC_REG_CMD, cmd_size, + !NFC_CMD_GET_SIZE(cmd_size), + 10, timeout_ms * 1000); + if (ret) + dev_err(nfc->dev, "wait for empty CMD FIFO time out\n"); + + return ret; +} + +static int meson_nfc_wait_dma_finish(struct meson_nfc *nfc) +{ + meson_nfc_drain_cmd(nfc); + + return meson_nfc_wait_cmd_finish(nfc, DMA_BUSY_TIMEOUT); +} + +static u8 *meson_nfc_oob_ptr(struct nand_chip *nand, int i) +{ + struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); + int len; + + len = nand->ecc.size * (i + 1) + (nand->ecc.bytes + 2) * i; + + return meson_chip->data_buf + len; +} + +static u8 *meson_nfc_data_ptr(struct nand_chip *nand, int i) +{ + struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); + int len, temp; + + temp = nand->ecc.size + nand->ecc.bytes; + len = (temp + 2) * i; + + return meson_chip->data_buf + len; +} + +static void meson_nfc_get_data_oob(struct nand_chip *nand, + u8 *buf, u8 *oobbuf) +{ + int i, oob_len = 0; + u8 *dsrc, *osrc; + + oob_len = nand->ecc.bytes + 2; + for (i = 0; i < nand->ecc.steps; i++) { + if (buf) { + dsrc = meson_nfc_data_ptr(nand, i); + memcpy(buf, dsrc, nand->ecc.size); + buf += nand->ecc.size; + } + osrc = meson_nfc_oob_ptr(nand, i); + memcpy(oobbuf, osrc, oob_len); + oobbuf += oob_len; + } +} + +static void meson_nfc_set_data_oob(struct nand_chip *nand, + const u8 *buf, u8 *oobbuf) +{ + int i, oob_len = 0; + u8 *dsrc, *osrc; + + oob_len = nand->ecc.bytes + 2; + for (i = 0; i < nand->ecc.steps; i++) { + if (buf) { + dsrc = meson_nfc_data_ptr(nand, i); + memcpy(dsrc, buf, nand->ecc.size); + buf += nand->ecc.size; + } + osrc = meson_nfc_oob_ptr(nand, i); + memcpy(osrc, oobbuf, oob_len); + oobbuf += oob_len; + } +} + +static int meson_nfc_queue_rb(struct meson_nfc *nfc, int timeout_ms) +{ + u32 cmd, cfg; + int ret = 0; + + meson_nfc_cmd_idle(nfc, nfc->timing.twb); + meson_nfc_drain_cmd(nfc); + meson_nfc_wait_cmd_finish(nfc, CMD_FIFO_EMPTY_TIMEOUT); + + cfg = readl(nfc->reg_base + NFC_REG_CFG); + cfg |= NFC_RB_IRQ_EN; + writel(cfg, nfc->reg_base + NFC_REG_CFG); + + init_completion(&nfc->completion); + + /* use the max erase time as the maximum clock for waiting R/B */ + cmd = NFC_CMD_RB | NFC_CMD_RB_INT + | nfc->param.chip_select | nfc->timing.tbers_max; + writel(cmd, nfc->reg_base + NFC_REG_CMD); + + ret = wait_for_completion_timeout(&nfc->completion, + msecs_to_jiffies(timeout_ms)); + if (ret == 0) + ret = -1; + + return ret; +} + +static void meson_nfc_set_user_byte(struct nand_chip *nand, u8 *oob_buf) +{ + struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); + __le64 *info; + int i, count; + + for (i = 0, count = 0; i < nand->ecc.steps; i++, count += 2) { + info = &meson_chip->info_buf[i]; + *info |= oob_buf[count]; + *info |= oob_buf[count + 1] << 8; + } +} + +static void meson_nfc_get_user_byte(struct nand_chip *nand, u8 *oob_buf) +{ + struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); + __le64 *info; + int i, count; + + for (i = 0, count = 0; i < nand->ecc.steps; i++, count += 2) { + info = &meson_chip->info_buf[i]; + oob_buf[count] = *info; + oob_buf[count + 1] = *info >> 8; + } +} + +static int meson_nfc_ecc_correct(struct nand_chip *nand, u32 *bitflips, + u64 *correct_bitmap) +{ + struct mtd_info *mtd = nand_to_mtd(nand); + struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); + __le64 *info; + int ret = 0, i; + + for (i = 0; i < nand->ecc.steps; i++) { + info = &meson_chip->info_buf[i]; + if (ECC_ERR_CNT(*info) != ECC_UNCORRECTABLE) { + mtd->ecc_stats.corrected += ECC_ERR_CNT(*info); + *bitflips = max_t(u32, *bitflips, ECC_ERR_CNT(*info)); + *correct_bitmap |= 1 >> i; + continue; + } + if ((nand->options & NAND_NEED_SCRAMBLING) && + ECC_ZERO_CNT(*info) < nand->ecc.strength) { + mtd->ecc_stats.corrected += ECC_ZERO_CNT(*info); + *bitflips = max_t(u32, *bitflips, + ECC_ZERO_CNT(*info)); + ret = ECC_CHECK_RETURN_FF; + } else { + ret = -EBADMSG; + } + } + return ret; +} + +static int meson_nfc_dma_buffer_setup(struct nand_chip *nand, u8 *databuf, + int datalen, u8 *infobuf, int infolen, + enum dma_data_direction dir) +{ + struct meson_nfc *nfc = nand_get_controller_data(nand); + u32 cmd; + int ret = 0; + + nfc->daddr = dma_map_single(nfc->dev, (void *)databuf, datalen, dir); + ret = dma_mapping_error(nfc->dev, nfc->daddr); + if (ret) { + dev_err(nfc->dev, "DMA mapping error\n"); + return ret; + } + cmd = GENCMDDADDRL(NFC_CMD_ADL, nfc->daddr); + writel(cmd, nfc->reg_base + NFC_REG_CMD); + + cmd = GENCMDDADDRH(NFC_CMD_ADH, nfc->daddr); + writel(cmd, nfc->reg_base + NFC_REG_CMD); + + if (infobuf) { + nfc->iaddr = dma_map_single(nfc->dev, infobuf, infolen, dir); + ret = dma_mapping_error(nfc->dev, nfc->iaddr); + if (ret) { + dev_err(nfc->dev, "DMA mapping error\n"); + dma_unmap_single(nfc->dev, + nfc->daddr, datalen, dir); + return ret; + } + cmd = GENCMDIADDRL(NFC_CMD_AIL, nfc->iaddr); + writel(cmd, nfc->reg_base + NFC_REG_CMD); + + cmd = GENCMDIADDRH(NFC_CMD_AIH, nfc->iaddr); + writel(cmd, nfc->reg_base + NFC_REG_CMD); + } + + return ret; +} + +static void meson_nfc_dma_buffer_release(struct nand_chip *nand, + int infolen, int datalen, + enum dma_data_direction dir) +{ + struct meson_nfc *nfc = nand_get_controller_data(nand); + + dma_unmap_single(nfc->dev, nfc->daddr, datalen, dir); + if (infolen) + dma_unmap_single(nfc->dev, nfc->iaddr, infolen, dir); +} + +static int meson_nfc_read_buf(struct nand_chip *nand, u8 *buf, int len) +{ + struct meson_nfc *nfc = nand_get_controller_data(nand); + int ret = 0; + u32 cmd; + u8 *info; + + info = kzalloc(PER_INFO_BYTE, GFP_KERNEL); + ret = meson_nfc_dma_buffer_setup(nand, buf, len, info, + PER_INFO_BYTE, DMA_FROM_DEVICE); + if (ret) + return ret; + + cmd = NFC_CMD_N2M | (len & GENMASK(5, 0)); + writel(cmd, nfc->reg_base + NFC_REG_CMD); + + meson_nfc_drain_cmd(nfc); + meson_nfc_wait_cmd_finish(nfc, 1000); + meson_nfc_dma_buffer_release(nand, len, PER_INFO_BYTE, DMA_FROM_DEVICE); + kfree(info); + + return ret; +} + +static int meson_nfc_write_buf(struct nand_chip *nand, u8 *buf, int len) +{ + struct meson_nfc *nfc = nand_get_controller_data(nand); + int ret = 0; + u32 cmd; + + ret = meson_nfc_dma_buffer_setup(nand, buf, len, NULL, + 0, DMA_TO_DEVICE); + if (ret) + return ret; + + cmd = NFC_CMD_M2N | (len & GENMASK(5, 0)); + writel(cmd, nfc->reg_base + NFC_REG_CMD); + + meson_nfc_drain_cmd(nfc); + meson_nfc_wait_cmd_finish(nfc, 1000); + meson_nfc_dma_buffer_release(nand, len, 0, DMA_TO_DEVICE); + + return ret; +} + +static int meson_nfc_rw_cmd_prepare_and_execute(struct nand_chip *nand, + int page, bool in) +{ + struct mtd_info *mtd = nand_to_mtd(nand); + struct meson_nfc *nfc = nand_get_controller_data(nand); + const struct nand_sdr_timings *sdr = + nand_get_sdr_timings(&nand->data_interface); + u32 *addrs = nfc->cmdfifo.rw.addrs; + u32 cs = nfc->param.chip_select; + u32 cmd0, cmd_num, row_start; + int ret = 0, i; + + cmd_num = sizeof(struct nand_rw_cmd) / sizeof(int); + + cmd0 = in ? NAND_CMD_READ0 : NAND_CMD_SEQIN; + nfc->cmdfifo.rw.cmd0 = cs | NFC_CMD_CLE | cmd0; + + addrs[0] = cs | NFC_CMD_ALE | 0; + if (mtd->writesize <= 512) { + cmd_num--; + row_start = 1; + } else { + addrs[1] = cs | NFC_CMD_ALE | 0; + row_start = 2; + } + + addrs[row_start] = cs | NFC_CMD_ALE | ROW_ADDER(page, 0); + addrs[row_start + 1] = cs | NFC_CMD_ALE | ROW_ADDER(page, 1); + + if (nand->options & NAND_ROW_ADDR_3) + addrs[row_start + 2] = + cs | NFC_CMD_ALE | ROW_ADDER(page, 2); + else + cmd_num--; + + /* subtract cmd1 */ + cmd_num--; + + for (i = 0; i < cmd_num; i++) + writel_relaxed(nfc->cmdfifo.cmd[i], + nfc->reg_base + NFC_REG_CMD); + + if (in) { + nfc->cmdfifo.rw.cmd1 = cs | NFC_CMD_CLE | NAND_CMD_READSTART; + writel(nfc->cmdfifo.rw.cmd1, nfc->reg_base + NFC_REG_CMD); + meson_nfc_queue_rb(nfc, PSEC_TO_MSEC(sdr->tR_max)); + } else { + meson_nfc_cmd_idle(nfc, nfc->timing.tadl); + } + + return ret; +} + +static int meson_nfc_write_page_sub(struct nand_chip *nand, + int page, int raw) +{ + struct mtd_info *mtd = nand_to_mtd(nand); + const struct nand_sdr_timings *sdr = + nand_get_sdr_timings(&nand->data_interface); + struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); + struct meson_nfc *nfc = nand_get_controller_data(nand); + int data_len, info_len; + u32 cmd; + int ret; + + meson_nfc_select_chip(nand, nand->cur_cs); + + data_len = mtd->writesize + mtd->oobsize; + info_len = nand->ecc.steps * PER_INFO_BYTE; + + ret = meson_nfc_rw_cmd_prepare_and_execute(nand, page, DIRWRITE); + if (ret) + return ret; + + ret = meson_nfc_dma_buffer_setup(nand, meson_chip->data_buf, + data_len, (u8 *)meson_chip->info_buf, + info_len, DMA_TO_DEVICE); + if (ret) + return ret; + + if (nand->options & NAND_NEED_SCRAMBLING) { + meson_nfc_cmd_seed(nfc, page); + meson_nfc_cmd_access(nand, raw, DIRWRITE, + NFC_CMD_SCRAMBLER_ENABLE); + } else { + meson_nfc_cmd_access(nand, raw, DIRWRITE, + NFC_CMD_SCRAMBLER_DISABLE); + } + + cmd = nfc->param.chip_select | NFC_CMD_CLE | NAND_CMD_PAGEPROG; + writel(cmd, nfc->reg_base + NFC_REG_CMD); + meson_nfc_queue_rb(nfc, PSEC_TO_MSEC(sdr->tPROG_max)); + + meson_nfc_dma_buffer_release(nand, data_len, info_len, DMA_TO_DEVICE); + + return ret; +} + +static int meson_nfc_write_page_raw(struct nand_chip *nand, const u8 *buf, + int oob_required, int page) +{ + u8 *oob_buf = nand->oob_poi; + + meson_nfc_set_data_oob(nand, buf, oob_buf); + + return meson_nfc_write_page_sub(nand, page, 1); +} + +static int meson_nfc_write_page_hwecc(struct nand_chip *nand, + const u8 *buf, int oob_required, int page) +{ + struct mtd_info *mtd = nand_to_mtd(nand); + struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); + u8 *oob_buf = nand->oob_poi; + + memcpy(meson_chip->data_buf, buf, mtd->writesize); + memset(meson_chip->info_buf, 0, nand->ecc.steps * PER_INFO_BYTE); + meson_nfc_set_user_byte(nand, oob_buf); + + return meson_nfc_write_page_sub(nand, page, 0); +} + +static void meson_nfc_check_ecc_pages_valid(struct meson_nfc *nfc, + struct nand_chip *nand, int raw) +{ + struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); + __le64 *info; + u32 neccpages; + int ret; + + neccpages = raw ? 1 : nand->ecc.steps; + info = &meson_chip->info_buf[neccpages - 1]; + do { + usleep_range(10, 15); + /* info is updated by nfc dma engine*/ + smp_rmb(); + ret = *info & ECC_COMPLETE; + } while (!ret); +} + +static int meson_nfc_read_page_sub(struct nand_chip *nand, + int page, int raw) +{ + struct mtd_info *mtd = nand_to_mtd(nand); + struct meson_nfc *nfc = nand_get_controller_data(nand); + struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); + int data_len, info_len; + int ret; + + meson_nfc_select_chip(nand, nand->cur_cs); + + data_len = mtd->writesize + mtd->oobsize; + info_len = nand->ecc.steps * PER_INFO_BYTE; + + ret = meson_nfc_rw_cmd_prepare_and_execute(nand, page, DIRREAD); + if (ret) + return ret; + + ret = meson_nfc_dma_buffer_setup(nand, meson_chip->data_buf, + data_len, (u8 *)meson_chip->info_buf, + info_len, DMA_FROM_DEVICE); + if (ret) + return ret; + + if (nand->options & NAND_NEED_SCRAMBLING) { + meson_nfc_cmd_seed(nfc, page); + meson_nfc_cmd_access(nand, raw, DIRREAD, + NFC_CMD_SCRAMBLER_ENABLE); + } else { + meson_nfc_cmd_access(nand, raw, DIRREAD, + NFC_CMD_SCRAMBLER_DISABLE); + } + + ret = meson_nfc_wait_dma_finish(nfc); + meson_nfc_check_ecc_pages_valid(nfc, nand, raw); + + meson_nfc_dma_buffer_release(nand, data_len, info_len, DMA_FROM_DEVICE); + + return ret; +} + +static int meson_nfc_read_page_raw(struct nand_chip *nand, u8 *buf, + int oob_required, int page) +{ + u8 *oob_buf = nand->oob_poi; + int ret; + + ret = meson_nfc_read_page_sub(nand, page, 1); + if (ret) + return ret; + + meson_nfc_get_data_oob(nand, buf, oob_buf); + + return 0; +} + +static int meson_nfc_read_page_hwecc(struct nand_chip *nand, u8 *buf, + int oob_required, int page) +{ + struct mtd_info *mtd = nand_to_mtd(nand); + struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); + struct nand_ecc_ctrl *ecc = &nand->ecc; + u64 correct_bitmap = 0; + u32 bitflips = 0; + u8 *oob_buf = nand->oob_poi; + int ret, i; + + ret = meson_nfc_read_page_sub(nand, page, 0); + if (ret) + return ret; + + meson_nfc_get_user_byte(nand, oob_buf); + ret = meson_nfc_ecc_correct(nand, &bitflips, &correct_bitmap); + if (ret == ECC_CHECK_RETURN_FF) { + if (buf) + memset(buf, 0xff, mtd->writesize); + memset(oob_buf, 0xff, mtd->oobsize); + } else if (ret < 0) { + if ((nand->options & NAND_NEED_SCRAMBLING) || !buf) { + mtd->ecc_stats.failed++; + return bitflips; + } + ret = meson_nfc_read_page_raw(nand, buf, 0, page); + if (ret) + return ret; + + for (i = 0; i < nand->ecc.steps ; i++) { + u8 *data = buf + i * ecc->size; + u8 *oob = nand->oob_poi + i * (ecc->bytes + 2); + + if (correct_bitmap & (1 << i)) + continue; + ret = nand_check_erased_ecc_chunk(data, ecc->size, + oob, ecc->bytes + 2, + NULL, 0, + ecc->strength); + if (ret < 0) { + mtd->ecc_stats.failed++; + } else { + mtd->ecc_stats.corrected += ret; + bitflips = max_t(u32, bitflips, ret); + } + } + } else if (buf && buf != meson_chip->data_buf) { + memcpy(buf, meson_chip->data_buf, mtd->writesize); + } + + return bitflips; +} + +static int meson_nfc_read_oob_raw(struct nand_chip *nand, int page) +{ + return meson_nfc_read_page_raw(nand, NULL, 1, page); +} + +static int meson_nfc_read_oob(struct nand_chip *nand, int page) +{ + return meson_nfc_read_page_hwecc(nand, NULL, 1, page); +} + +static bool meson_nfc_is_buffer_dma_safe(const void *buffer) +{ + if (virt_addr_valid(buffer) && (!object_is_on_stack(buffer))) + return true; + return false; +} + +static void * +meson_nand_op_get_dma_safe_input_buf(const struct nand_op_instr *instr) +{ + if (WARN_ON(instr->type != NAND_OP_DATA_IN_INSTR)) + return NULL; + + if (meson_nfc_is_buffer_dma_safe(instr->ctx.data.buf.in)) + return instr->ctx.data.buf.in; + + return kzalloc(instr->ctx.data.len, GFP_KERNEL); +} + +static void +meson_nand_op_put_dma_safe_input_buf(const struct nand_op_instr *instr, + void *buf) +{ + if (WARN_ON(instr->type != NAND_OP_DATA_IN_INSTR) || + WARN_ON(!buf)) + return; + + if (buf == instr->ctx.data.buf.in) + return; + + memcpy(instr->ctx.data.buf.in, buf, instr->ctx.data.len); + kfree(buf); +} + +static void * +meson_nand_op_get_dma_safe_output_buf(const struct nand_op_instr *instr) +{ + if (WARN_ON(instr->type != NAND_OP_DATA_OUT_INSTR)) + return NULL; + + if (meson_nfc_is_buffer_dma_safe(instr->ctx.data.buf.out)) + return (void *)instr->ctx.data.buf.out; + + return kmemdup(instr->ctx.data.buf.out, + instr->ctx.data.len, GFP_KERNEL); +} + +static void +meson_nand_op_put_dma_safe_output_buf(const struct nand_op_instr *instr, + const void *buf) +{ + if (WARN_ON(instr->type != NAND_OP_DATA_OUT_INSTR) || + WARN_ON(!buf)) + return; + + if (buf != instr->ctx.data.buf.out) + kfree(buf); +} + +static int meson_nfc_exec_op(struct nand_chip *nand, + const struct nand_operation *op, bool check_only) +{ + struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); + struct meson_nfc *nfc = nand_get_controller_data(nand); + const struct nand_op_instr *instr = NULL; + void *buf; + u32 op_id, delay_idle, cmd; + int i; + + meson_nfc_select_chip(nand, op->cs); + for (op_id = 0; op_id < op->ninstrs; op_id++) { + instr = &op->instrs[op_id]; + delay_idle = DIV_ROUND_UP(PSEC_TO_NSEC(instr->delay_ns), + meson_chip->level1_divider * + NFC_CLK_CYCLE); + switch (instr->type) { + case NAND_OP_CMD_INSTR: + cmd = nfc->param.chip_select | NFC_CMD_CLE; + cmd |= instr->ctx.cmd.opcode & 0xff; + writel(cmd, nfc->reg_base + NFC_REG_CMD); + meson_nfc_cmd_idle(nfc, delay_idle); + break; + + case NAND_OP_ADDR_INSTR: + for (i = 0; i < instr->ctx.addr.naddrs; i++) { + cmd = nfc->param.chip_select | NFC_CMD_ALE; + cmd |= instr->ctx.addr.addrs[i] & 0xff; + writel(cmd, nfc->reg_base + NFC_REG_CMD); + } + meson_nfc_cmd_idle(nfc, delay_idle); + break; + + case NAND_OP_DATA_IN_INSTR: + buf = meson_nand_op_get_dma_safe_input_buf(instr); + if (!buf) + return -ENOMEM; + meson_nfc_read_buf(nand, buf, instr->ctx.data.len); + meson_nand_op_put_dma_safe_input_buf(instr, buf); + break; + + case NAND_OP_DATA_OUT_INSTR: + buf = meson_nand_op_get_dma_safe_output_buf(instr); + if (!buf) + return -ENOMEM; + meson_nfc_write_buf(nand, buf, instr->ctx.data.len); + meson_nand_op_put_dma_safe_output_buf(instr, buf); + break; + + case NAND_OP_WAITRDY_INSTR: + meson_nfc_queue_rb(nfc, instr->ctx.waitrdy.timeout_ms); + if (instr->delay_ns) + meson_nfc_cmd_idle(nfc, delay_idle); + break; + } + } + meson_nfc_wait_cmd_finish(nfc, 1000); + return 0; +} + +static int meson_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + + if (section >= nand->ecc.steps) + return -ERANGE; + + oobregion->offset = 2 + (section * (2 + nand->ecc.bytes)); + oobregion->length = nand->ecc.bytes; + + return 0; +} + +static int meson_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + + if (section >= nand->ecc.steps) + return -ERANGE; + + oobregion->offset = section * (2 + nand->ecc.bytes); + oobregion->length = 2; + + return 0; +} + +static const struct mtd_ooblayout_ops meson_ooblayout_ops = { + .ecc = meson_ooblayout_ecc, + .free = meson_ooblayout_free, +}; + +static int meson_nfc_clk_init(struct meson_nfc *nfc) +{ + int ret; + + /* request core clock */ + nfc->core_clk = devm_clk_get(nfc->dev, "core"); + if (IS_ERR(nfc->core_clk)) { + dev_err(nfc->dev, "failed to get core clock\n"); + return PTR_ERR(nfc->core_clk); + } + + nfc->device_clk = devm_clk_get(nfc->dev, "device"); + if (IS_ERR(nfc->device_clk)) { + dev_err(nfc->dev, "failed to get device clock\n"); + return PTR_ERR(nfc->device_clk); + } + + nfc->phase_tx = devm_clk_get(nfc->dev, "tx"); + if (IS_ERR(nfc->phase_tx)) { + dev_err(nfc->dev, "failed to get TX clk\n"); + return PTR_ERR(nfc->phase_tx); + } + + nfc->phase_rx = devm_clk_get(nfc->dev, "rx"); + if (IS_ERR(nfc->phase_rx)) { + dev_err(nfc->dev, "failed to get RX clk\n"); + return PTR_ERR(nfc->phase_rx); + } + + /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */ + regmap_update_bits(nfc->reg_clk, + 0, CLK_SELECT_NAND, CLK_SELECT_NAND); + + ret = clk_prepare_enable(nfc->core_clk); + if (ret) { + dev_err(nfc->dev, "failed to enable core clock\n"); + return ret; + } + + ret = clk_prepare_enable(nfc->device_clk); + if (ret) { + dev_err(nfc->dev, "failed to enable device clock\n"); + goto err_device_clk; + } + + ret = clk_prepare_enable(nfc->phase_tx); + if (ret) { + dev_err(nfc->dev, "failed to enable TX clock\n"); + goto err_phase_tx; + } + + ret = clk_prepare_enable(nfc->phase_rx); + if (ret) { + dev_err(nfc->dev, "failed to enable RX clock\n"); + goto err_phase_rx; + } + + ret = clk_set_rate(nfc->device_clk, 24000000); + if (ret) + goto err_phase_rx; + + return 0; +err_phase_rx: + clk_disable_unprepare(nfc->phase_tx); +err_phase_tx: + clk_disable_unprepare(nfc->device_clk); +err_device_clk: + clk_disable_unprepare(nfc->core_clk); + return ret; +} + +static void meson_nfc_disable_clk(struct meson_nfc *nfc) +{ + clk_disable_unprepare(nfc->phase_rx); + clk_disable_unprepare(nfc->phase_tx); + clk_disable_unprepare(nfc->device_clk); + clk_disable_unprepare(nfc->core_clk); +} + +static void meson_nfc_free_buffer(struct nand_chip *nand) +{ + struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); + + kfree(meson_chip->info_buf); + kfree(meson_chip->data_buf); +} + +static int meson_chip_buffer_init(struct nand_chip *nand) +{ + struct mtd_info *mtd = nand_to_mtd(nand); + struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); + u32 page_bytes, info_bytes, nsectors; + + nsectors = mtd->writesize / nand->ecc.size; + + page_bytes = mtd->writesize + mtd->oobsize; + info_bytes = nsectors * PER_INFO_BYTE; + + meson_chip->data_buf = kmalloc(page_bytes, GFP_KERNEL); + if (!meson_chip->data_buf) + return -ENOMEM; + + meson_chip->info_buf = kmalloc(info_bytes, GFP_KERNEL); + if (!meson_chip->info_buf) { + kfree(meson_chip->data_buf); + return -ENOMEM; + } + + return 0; +} + +static +int meson_nfc_setup_data_interface(struct nand_chip *nand, int csline, + const struct nand_data_interface *conf) +{ + struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); + const struct nand_sdr_timings *timings; + u32 div, bt_min, bt_max, tbers_clocks; + + timings = nand_get_sdr_timings(conf); + if (IS_ERR(timings)) + return -ENOTSUPP; + + if (csline == NAND_DATA_IFACE_CHECK_ONLY) + return 0; + + div = DIV_ROUND_UP((timings->tRC_min / 1000), NFC_CLK_CYCLE); + bt_min = (timings->tREA_max + NFC_DEFAULT_DELAY) / div; + bt_max = (NFC_DEFAULT_DELAY + timings->tRHOH_min + + timings->tRC_min / 2) / div; + + meson_chip->twb = DIV_ROUND_UP(PSEC_TO_NSEC(timings->tWB_max), + div * NFC_CLK_CYCLE); + meson_chip->tadl = DIV_ROUND_UP(PSEC_TO_NSEC(timings->tADL_min), + div * NFC_CLK_CYCLE); + tbers_clocks = DIV_ROUND_UP_ULL(PSEC_TO_NSEC(timings->tBERS_max), + div * NFC_CLK_CYCLE); + meson_chip->tbers_max = ilog2(tbers_clocks); + if (!is_power_of_2(tbers_clocks)) + meson_chip->tbers_max++; + + bt_min = DIV_ROUND_UP(bt_min, 1000); + bt_max = DIV_ROUND_UP(bt_max, 1000); + + if (bt_max < bt_min) + return -EINVAL; + + meson_chip->level1_divider = div; + meson_chip->clk_rate = 1000000000 / meson_chip->level1_divider; + meson_chip->bus_timing = (bt_min + bt_max) / 2 + 1; + + return 0; +} + +static int meson_nand_bch_mode(struct nand_chip *nand) +{ + struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); + int i; + + if (nand->ecc.strength > 60 || nand->ecc.strength < 8) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(meson_ecc); i++) { + if (meson_ecc[i].strength == nand->ecc.strength) { + meson_chip->bch_mode = meson_ecc[i].bch; + return 0; + } + } + + return -EINVAL; +} + +static void meson_nand_detach_chip(struct nand_chip *nand) +{ + meson_nfc_free_buffer(nand); +} + +static int meson_nand_attach_chip(struct nand_chip *nand) +{ + struct meson_nfc *nfc = nand_get_controller_data(nand); + struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); + struct mtd_info *mtd = nand_to_mtd(nand); + int nsectors = mtd->writesize / 1024; + int ret; + + if (!mtd->name) { + mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL, + "%s:nand%d", + dev_name(nfc->dev), + meson_chip->sels[0]); + if (!mtd->name) + return -ENOMEM; + } + + if (nand->bbt_options & NAND_BBT_USE_FLASH) + nand->bbt_options |= NAND_BBT_NO_OOB; + + nand->options |= NAND_NO_SUBPAGE_WRITE; + + ret = nand_ecc_choose_conf(nand, nfc->data->ecc_caps, + mtd->oobsize - 2 * nsectors); + if (ret) { + dev_err(nfc->dev, "failed to ECC init\n"); + return -EINVAL; + } + + ret = meson_nand_bch_mode(nand); + if (ret) + return -EINVAL; + + nand->ecc.mode = NAND_ECC_HW; + nand->ecc.write_page_raw = meson_nfc_write_page_raw; + nand->ecc.write_page = meson_nfc_write_page_hwecc; + nand->ecc.write_oob_raw = nand_write_oob_std; + nand->ecc.write_oob = nand_write_oob_std; + + nand->ecc.read_page_raw = meson_nfc_read_page_raw; + nand->ecc.read_page = meson_nfc_read_page_hwecc; + nand->ecc.read_oob_raw = meson_nfc_read_oob_raw; + nand->ecc.read_oob = meson_nfc_read_oob; + + if (nand->options & NAND_BUSWIDTH_16) { + dev_err(nfc->dev, "16bits bus width not supported"); + return -EINVAL; + } + ret = meson_chip_buffer_init(nand); + if (ret) + return -ENOMEM; + + return ret; +} + +static const struct nand_controller_ops meson_nand_controller_ops = { + .attach_chip = meson_nand_attach_chip, + .detach_chip = meson_nand_detach_chip, + .setup_data_interface = meson_nfc_setup_data_interface, + .exec_op = meson_nfc_exec_op, +}; + +static int +meson_nfc_nand_chip_init(struct device *dev, + struct meson_nfc *nfc, struct device_node *np) +{ + struct meson_nfc_nand_chip *meson_chip; + struct nand_chip *nand; + struct mtd_info *mtd; + int ret, i; + u32 tmp, nsels; + + if (!of_get_property(np, "reg", &nsels)) + return -EINVAL; + + nsels /= sizeof(u32); + if (!nsels || nsels > MAX_CE_NUM) { + dev_err(dev, "invalid register property size\n"); + return -EINVAL; + } + + meson_chip = devm_kzalloc(dev, + sizeof(*meson_chip) + (nsels * sizeof(u8)), + GFP_KERNEL); + if (!meson_chip) + return -ENOMEM; + + meson_chip->nsels = nsels; + + for (i = 0; i < nsels; i++) { + ret = of_property_read_u32_index(np, "reg", i, &tmp); + if (ret) { + dev_err(dev, "could not retrieve register property: %d\n", + ret); + return ret; + } + + if (test_and_set_bit(tmp, &nfc->assigned_cs)) { + dev_err(dev, "CS %d already assigned\n", tmp); + return -EINVAL; + } + } + + nand = &meson_chip->nand; + nand->controller = &nfc->controller; + nand->controller->ops = &meson_nand_controller_ops; + nand_set_flash_node(nand, np); + nand_set_controller_data(nand, nfc); + + nand->options |= NAND_USE_BOUNCE_BUFFER; + mtd = nand_to_mtd(nand); + mtd->owner = THIS_MODULE; + mtd->dev.parent = dev; + + ret = nand_scan(nand, nsels); + if (ret) + return ret; + + ret = mtd_device_register(mtd, NULL, 0); + if (ret) { + dev_err(dev, "failed to register MTD device: %d\n", ret); + nand_cleanup(nand); + return ret; + } + + list_add_tail(&meson_chip->node, &nfc->chips); + + return 0; +} + +static int meson_nfc_nand_chip_cleanup(struct meson_nfc *nfc) +{ + struct meson_nfc_nand_chip *meson_chip; + struct mtd_info *mtd; + int ret; + + while (!list_empty(&nfc->chips)) { + meson_chip = list_first_entry(&nfc->chips, + struct meson_nfc_nand_chip, node); + mtd = nand_to_mtd(&meson_chip->nand); + ret = mtd_device_unregister(mtd); + if (ret) + return ret; + + meson_nfc_free_buffer(&meson_chip->nand); + nand_cleanup(&meson_chip->nand); + list_del(&meson_chip->node); + } + + return 0; +} + +static int meson_nfc_nand_chips_init(struct device *dev, + struct meson_nfc *nfc) +{ + struct device_node *np = dev->of_node; + struct device_node *nand_np; + int ret; + + for_each_child_of_node(np, nand_np) { + ret = meson_nfc_nand_chip_init(dev, nfc, nand_np); + if (ret) { + meson_nfc_nand_chip_cleanup(nfc); + return ret; + } + } + + return 0; +} + +static irqreturn_t meson_nfc_irq(int irq, void *id) +{ + struct meson_nfc *nfc = id; + u32 cfg; + + cfg = readl(nfc->reg_base + NFC_REG_CFG); + if (!(cfg & NFC_RB_IRQ_EN)) + return IRQ_NONE; + + cfg &= ~(NFC_RB_IRQ_EN); + writel(cfg, nfc->reg_base + NFC_REG_CFG); + + complete(&nfc->completion); + return IRQ_HANDLED; +} + +static const struct meson_nfc_data meson_gxl_data = { + .ecc_caps = &meson_gxl_ecc_caps, +}; + +static const struct meson_nfc_data meson_axg_data = { + .ecc_caps = &meson_axg_ecc_caps, +}; + +static const struct of_device_id meson_nfc_id_table[] = { + { + .compatible = "amlogic,meson-gxl-nfc", + .data = &meson_gxl_data, + }, { + .compatible = "amlogic,meson-axg-nfc", + .data = &meson_axg_data, + }, + {} +}; +MODULE_DEVICE_TABLE(of, meson_nfc_id_table); + +static int meson_nfc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct meson_nfc *nfc; + struct resource *res; + int ret, irq; + + nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL); + if (!nfc) + return -ENOMEM; + + nfc->data = of_device_get_match_data(&pdev->dev); + if (!nfc->data) + return -ENODEV; + + nand_controller_init(&nfc->controller); + INIT_LIST_HEAD(&nfc->chips); + + nfc->dev = dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + nfc->reg_base = devm_ioremap_resource(dev, res); + if (IS_ERR(nfc->reg_base)) + return PTR_ERR(nfc->reg_base); + + nfc->reg_clk = + syscon_regmap_lookup_by_phandle(dev->of_node, + "amlogic,mmc-syscon"); + if (IS_ERR(nfc->reg_clk)) { + dev_err(dev, "Failed to lookup clock base\n"); + return PTR_ERR(nfc->reg_clk); + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(dev, "no NFC IRQ resource\n"); + return -EINVAL; + } + + ret = meson_nfc_clk_init(nfc); + if (ret) { + dev_err(dev, "failed to initialize NAND clock\n"); + return ret; + } + + writel(0, nfc->reg_base + NFC_REG_CFG); + ret = devm_request_irq(dev, irq, meson_nfc_irq, 0, dev_name(dev), nfc); + if (ret) { + dev_err(dev, "failed to request NFC IRQ\n"); + ret = -EINVAL; + goto err_clk; + } + + ret = dma_set_mask(dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(dev, "failed to set DMA mask\n"); + goto err_clk; + } + + platform_set_drvdata(pdev, nfc); + + ret = meson_nfc_nand_chips_init(dev, nfc); + if (ret) { + dev_err(dev, "failed to init NAND chips\n"); + goto err_clk; + } + + return 0; +err_clk: + meson_nfc_disable_clk(nfc); + return ret; +} + +static int meson_nfc_remove(struct platform_device *pdev) +{ + struct meson_nfc *nfc = platform_get_drvdata(pdev); + int ret; + + ret = meson_nfc_nand_chip_cleanup(nfc); + if (ret) + return ret; + + meson_nfc_disable_clk(nfc); + + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static struct platform_driver meson_nfc_driver = { + .probe = meson_nfc_probe, + .remove = meson_nfc_remove, + .driver = { + .name = "meson-nand", + .of_match_table = meson_nfc_id_table, + }, +}; +module_platform_driver(meson_nfc_driver); + +MODULE_LICENSE("Dual MIT/GPL"); +MODULE_AUTHOR("Liang Yang "); +MODULE_DESCRIPTION("Amlogic's Meson NAND Flash Controller driver"); diff --git a/drivers/mtd/nand/raw/mtk_ecc.c b/drivers/mtd/nand/raw/mtk_ecc.c index 6432bd70c3b3..05b0c19d72d9 100644 --- a/drivers/mtd/nand/raw/mtk_ecc.c +++ b/drivers/mtd/nand/raw/mtk_ecc.c @@ -267,11 +267,15 @@ static struct mtk_ecc *mtk_ecc_get(struct device_node *np) struct mtk_ecc *ecc; pdev = of_find_device_by_node(np); - if (!pdev || !platform_get_drvdata(pdev)) + if (!pdev) return ERR_PTR(-EPROBE_DEFER); - get_device(&pdev->dev); ecc = platform_get_drvdata(pdev); + if (!ecc) { + put_device(&pdev->dev); + return ERR_PTR(-EPROBE_DEFER); + } + clk_prepare_enable(ecc->clk); mtk_ecc_hw_init(ecc); diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c index b6b4602f5132..2c0e09187773 100644 --- a/drivers/mtd/nand/raw/mtk_nand.c +++ b/drivers/mtd/nand/raw/mtk_nand.c @@ -1451,8 +1451,7 @@ static int mtk_nfc_probe(struct platform_device *pdev) if (!nfc) return -ENOMEM; - spin_lock_init(&nfc->controller.lock); - init_waitqueue_head(&nfc->controller.wq); + nand_controller_init(&nfc->controller); INIT_LIST_HEAD(&nfc->chips); nfc->controller.ops = &mtk_nfc_controller_ops; diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index cca4b24d2ffa..ddd396e93e32 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -278,11 +278,8 @@ EXPORT_SYMBOL_GPL(nand_deselect_target); static void nand_release_device(struct nand_chip *chip) { /* Release the controller and the chip */ - spin_lock(&chip->controller->lock); - chip->controller->active = NULL; - chip->state = FL_READY; - wake_up(&chip->controller->wq); - spin_unlock(&chip->controller->lock); + mutex_unlock(&chip->controller->lock); + mutex_unlock(&chip->lock); } /** @@ -330,58 +327,24 @@ static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs) return nand_block_bad(chip, ofs); } -/** - * panic_nand_get_device - [GENERIC] Get chip for selected access - * @chip: the nand chip descriptor - * @new_state: the state which is requested - * - * Used when in panic, no locks are taken. - */ -static void panic_nand_get_device(struct nand_chip *chip, int new_state) -{ - /* Hardware controller shared among independent devices */ - chip->controller->active = chip; - chip->state = new_state; -} - /** * nand_get_device - [GENERIC] Get chip for selected access * @chip: NAND chip structure - * @new_state: the state which is requested * - * Get the device and lock it for exclusive access + * Lock the device and its controller for exclusive access + * + * Return: -EBUSY if the chip has been suspended, 0 otherwise */ -static int -nand_get_device(struct nand_chip *chip, int new_state) +static int nand_get_device(struct nand_chip *chip) { - spinlock_t *lock = &chip->controller->lock; - wait_queue_head_t *wq = &chip->controller->wq; - DECLARE_WAITQUEUE(wait, current); -retry: - spin_lock(lock); - - /* Hardware controller shared among independent devices */ - if (!chip->controller->active) - chip->controller->active = chip; - - if (chip->controller->active == chip && chip->state == FL_READY) { - chip->state = new_state; - spin_unlock(lock); - return 0; - } - if (new_state == FL_PM_SUSPENDED) { - if (chip->controller->active->state == FL_PM_SUSPENDED) { - chip->state = FL_PM_SUSPENDED; - spin_unlock(lock); - return 0; - } + mutex_lock(&chip->lock); + if (chip->suspended) { + mutex_unlock(&chip->lock); + return -EBUSY; } - set_current_state(TASK_UNINTERRUPTIBLE); - add_wait_queue(wq, &wait); - spin_unlock(lock); - schedule(); - remove_wait_queue(wq, &wait); - goto retry; + mutex_lock(&chip->controller->lock); + + return 0; } /** @@ -410,6 +373,7 @@ static int nand_check_wp(struct nand_chip *chip) /** * nand_fill_oob - [INTERN] Transfer client buffer to oob + * @chip: NAND chip object * @oob: oob data buffer * @len: oob data write length * @ops: oob ops structure @@ -457,7 +421,7 @@ static int nand_do_write_oob(struct nand_chip *chip, loff_t to, struct mtd_oob_ops *ops) { struct mtd_info *mtd = nand_to_mtd(chip); - int chipnr, page, status, len; + int chipnr, page, status, len, ret; pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to, (int)ops->ooblen); @@ -479,7 +443,9 @@ static int nand_do_write_oob(struct nand_chip *chip, loff_t to, * if we don't do this. I have no clue why, but I seem to have 'fixed' * it in the doc2000 driver in August 1999. dwmw2. */ - nand_reset(chip, chipnr); + ret = nand_reset(chip, chipnr); + if (ret) + return ret; nand_select_target(chip, chipnr); @@ -602,7 +568,10 @@ static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs) nand_erase_nand(chip, &einfo, 0); /* Write bad block marker to OOB */ - nand_get_device(chip, FL_WRITING); + ret = nand_get_device(chip); + if (ret) + return ret; + ret = nand_markbad_bbm(chip, ofs); nand_release_device(chip); } @@ -3580,7 +3549,9 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from, ops->mode != MTD_OPS_RAW) return -ENOTSUPP; - nand_get_device(chip, FL_READING); + ret = nand_get_device(chip); + if (ret) + return ret; if (!ops->datbuf) ret = nand_do_read_oob(chip, from, ops); @@ -4099,9 +4070,6 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len, struct mtd_oob_ops ops; int ret; - /* Grab the device */ - panic_nand_get_device(chip, FL_WRITING); - nand_select_target(chip, chipnr); /* Wait for the device to get ready */ @@ -4132,7 +4100,9 @@ static int nand_write_oob(struct mtd_info *mtd, loff_t to, ops->retlen = 0; - nand_get_device(chip, FL_WRITING); + ret = nand_get_device(chip); + if (ret) + return ret; switch (ops->mode) { case MTD_OPS_PLACE_OOB: @@ -4154,23 +4124,6 @@ out: return ret; } -/** - * single_erase - [GENERIC] NAND standard block erase command function - * @chip: NAND chip object - * @page: the page address of the block which will be erased - * - * Standard erase command for NAND chips. Returns NAND status. - */ -static int single_erase(struct nand_chip *chip, int page) -{ - unsigned int eraseblock; - - /* Send commands to erase a block */ - eraseblock = page >> (chip->phys_erase_shift - chip->page_shift); - - return nand_erase_op(chip, eraseblock); -} - /** * nand_erase - [MTD Interface] erase block(s) * @mtd: MTD device structure @@ -4194,7 +4147,7 @@ static int nand_erase(struct mtd_info *mtd, struct erase_info *instr) int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr, int allowbbt) { - int page, status, pages_per_block, ret, chipnr; + int page, pages_per_block, ret, chipnr; loff_t len; pr_debug("%s: start = 0x%012llx, len = %llu\n", @@ -4205,7 +4158,9 @@ int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr, return -EINVAL; /* Grab the lock and see if the device is available */ - nand_get_device(chip, FL_ERASING); + ret = nand_get_device(chip); + if (ret) + return ret; /* Shift to get first page */ page = (int)(instr->addr >> chip->page_shift); @@ -4246,17 +4201,11 @@ int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr, (page + pages_per_block)) chip->pagebuf = -1; - if (chip->legacy.erase) - status = chip->legacy.erase(chip, - page & chip->pagemask); - else - status = single_erase(chip, page & chip->pagemask); - - /* See if block erase succeeded */ - if (status) { + ret = nand_erase_op(chip, (page & chip->pagemask) >> + (chip->phys_erase_shift - chip->page_shift)); + if (ret) { pr_debug("%s: failed erase, page 0x%08x\n", __func__, page); - ret = -EIO; instr->fail_addr = ((loff_t)page << chip->page_shift); goto erase_exit; @@ -4298,7 +4247,7 @@ static void nand_sync(struct mtd_info *mtd) pr_debug("%s: called\n", __func__); /* Grab the lock and see if the device is available */ - nand_get_device(chip, FL_SYNCING); + WARN_ON(nand_get_device(chip)); /* Release it and go back */ nand_release_device(chip); } @@ -4315,7 +4264,10 @@ static int nand_block_isbad(struct mtd_info *mtd, loff_t offs) int ret; /* Select the NAND device */ - nand_get_device(chip, FL_READING); + ret = nand_get_device(chip); + if (ret) + return ret; + nand_select_target(chip, chipnr); ret = nand_block_checkbad(chip, offs, 0); @@ -4388,7 +4340,13 @@ static int nand_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len) */ static int nand_suspend(struct mtd_info *mtd) { - return nand_get_device(mtd_to_nand(mtd), FL_PM_SUSPENDED); + struct nand_chip *chip = mtd_to_nand(mtd); + + mutex_lock(&chip->lock); + chip->suspended = 1; + mutex_unlock(&chip->lock); + + return 0; } /** @@ -4399,11 +4357,13 @@ static void nand_resume(struct mtd_info *mtd) { struct nand_chip *chip = mtd_to_nand(mtd); - if (chip->state == FL_PM_SUSPENDED) - nand_release_device(chip); + mutex_lock(&chip->lock); + if (chip->suspended) + chip->suspended = 0; else pr_err("%s called for a chip which is not in suspended state\n", __func__); + mutex_unlock(&chip->lock); } /** @@ -4413,7 +4373,7 @@ static void nand_resume(struct mtd_info *mtd) */ static void nand_shutdown(struct mtd_info *mtd) { - nand_get_device(mtd_to_nand(mtd), FL_PM_SUSPENDED); + nand_suspend(mtd); } /* Set default functions */ @@ -5018,6 +4978,8 @@ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips, /* Assume all dies are deselected when we enter nand_scan_ident(). */ chip->cur_cs = -1; + mutex_init(&chip->lock); + /* Enforce the right timings for reset/detection */ onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0); @@ -5060,11 +5022,15 @@ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips, u8 id[2]; /* See comment in nand_get_flash_type for reset */ - nand_reset(chip, i); + ret = nand_reset(chip, i); + if (ret) + break; nand_select_target(chip, i); /* Send the command for reading device ID */ - nand_readid_op(chip, 0, id, sizeof(id)); + ret = nand_readid_op(chip, 0, id, sizeof(id)); + if (ret) + break; /* Read manufacturer and device IDs */ if (nand_maf_id != id[0] || nand_dev_id != id[1]) { nand_deselect_target(chip); @@ -5555,6 +5521,7 @@ static int nand_scan_tail(struct nand_chip *chip) } if (!ecc->read_page) ecc->read_page = nand_read_page_hwecc_oob_first; + /* fall through */ case NAND_ECC_HW: /* Use standard hwecc read page function? */ @@ -5574,6 +5541,7 @@ static int nand_scan_tail(struct nand_chip *chip) ecc->read_subpage = nand_read_subpage; if (!ecc->write_subpage && ecc->hwctl && ecc->calculate) ecc->write_subpage = nand_write_subpage_hwecc; + /* fall through */ case NAND_ECC_HW_SYNDROME: if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) && @@ -5611,6 +5579,7 @@ static int nand_scan_tail(struct nand_chip *chip) ecc->size, mtd->writesize); ecc->mode = NAND_ECC_SOFT; ecc->algo = NAND_ECC_HAMMING; + /* fall through */ case NAND_ECC_SOFT: ret = nand_set_ecc_soft_ops(chip); @@ -5717,9 +5686,6 @@ static int nand_scan_tail(struct nand_chip *chip) } chip->subpagesize = mtd->writesize >> mtd->subpage_sft; - /* Initialize state */ - chip->state = FL_READY; - /* Invalidate the pagebuffer reference */ chip->pagebuf = -1; diff --git a/drivers/mtd/nand/raw/nand_bbt.c b/drivers/mtd/nand/raw/nand_bbt.c index 1b722fe9213c..19a2b563acdf 100644 --- a/drivers/mtd/nand/raw/nand_bbt.c +++ b/drivers/mtd/nand/raw/nand_bbt.c @@ -158,7 +158,7 @@ static u32 add_marker_len(struct nand_bbt_descr *td) /** * read_bbt - [GENERIC] Read the bad block table starting from page - * @chip: NAND chip object + * @this: NAND chip object * @buf: temporary buffer * @page: the starting page * @num: the number of bbt descriptors to read diff --git a/drivers/mtd/nand/raw/nand_legacy.c b/drivers/mtd/nand/raw/nand_legacy.c index 43575943f13b..f2526ec616a6 100644 --- a/drivers/mtd/nand/raw/nand_legacy.c +++ b/drivers/mtd/nand/raw/nand_legacy.c @@ -331,6 +331,7 @@ static void nand_command(struct nand_chip *chip, unsigned int command, */ if (column == -1 && page_addr == -1) return; + /* fall through */ default: /* @@ -483,7 +484,7 @@ static void nand_command_lp(struct nand_chip *chip, unsigned int command, chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); - /* This applies to read commands */ + /* fall through - This applies to read commands */ default: /* * If we don't have access to the busy pin, we apply the given diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c index 68e8b9f7f372..8f280a2962c8 100644 --- a/drivers/mtd/nand/raw/omap2.c +++ b/drivers/mtd/nand/raw/omap2.c @@ -994,12 +994,9 @@ static int omap_wait(struct nand_chip *this) { struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(this)); unsigned long timeo = jiffies; - int status, state = this->state; + int status; - if (state == FL_ERASING) - timeo += msecs_to_jiffies(400); - else - timeo += msecs_to_jiffies(20); + timeo += msecs_to_jiffies(400); writeb(NAND_CMD_STATUS & 0xFF, info->reg.gpmc_nand_command); while (time_before(jiffies, timeo)) { @@ -2173,11 +2170,8 @@ static const struct nand_controller_ops omap_nand_controller_ops = { }; /* Shared among all NAND instances to synchronize access to the ECC Engine */ -static struct nand_controller omap_gpmc_controller = { - .lock = __SPIN_LOCK_UNLOCKED(omap_gpmc_controller.lock), - .wq = __WAIT_QUEUE_HEAD_INITIALIZER(omap_gpmc_controller.wq), - .ops = &omap_nand_controller_ops, -}; +static struct nand_controller omap_gpmc_controller; +static bool omap_gpmc_controller_initialized; static int omap_nand_probe(struct platform_device *pdev) { @@ -2227,6 +2221,12 @@ static int omap_nand_probe(struct platform_device *pdev) info->phys_base = res->start; + if (!omap_gpmc_controller_initialized) { + omap_gpmc_controller.ops = &omap_nand_controller_ops; + nand_controller_init(&omap_gpmc_controller); + omap_gpmc_controller_initialized = true; + } + nand_chip->controller = &omap_gpmc_controller; nand_chip->legacy.IO_ADDR_W = nand_chip->legacy.IO_ADDR_R; diff --git a/drivers/mtd/nand/raw/r852.c b/drivers/mtd/nand/raw/r852.c index c01422d953dd..86456216fb93 100644 --- a/drivers/mtd/nand/raw/r852.c +++ b/drivers/mtd/nand/raw/r852.c @@ -369,8 +369,7 @@ static int r852_wait(struct nand_chip *chip) unsigned long timeout; u8 status; - timeout = jiffies + (chip->state == FL_ERASING ? - msecs_to_jiffies(400) : msecs_to_jiffies(20)); + timeout = jiffies + msecs_to_jiffies(400); while (time_before(jiffies, timeout)) if (chip->legacy.dev_ready(chip)) diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c new file mode 100644 index 000000000000..999ca6a66036 --- /dev/null +++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c @@ -0,0 +1,2073 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) STMicroelectronics 2018 + * Author: Christophe Kerello + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Bad block marker length */ +#define FMC2_BBM_LEN 2 + +/* ECC step size */ +#define FMC2_ECC_STEP_SIZE 512 + +/* BCHDSRx registers length */ +#define FMC2_BCHDSRS_LEN 20 + +/* HECCR length */ +#define FMC2_HECCR_LEN 4 + +/* Max requests done for a 8k nand page size */ +#define FMC2_MAX_SG 16 + +/* Max chip enable */ +#define FMC2_MAX_CE 2 + +/* Max ECC buffer length */ +#define FMC2_MAX_ECC_BUF_LEN (FMC2_BCHDSRS_LEN * FMC2_MAX_SG) + +/* Timings */ +#define FMC2_THIZ 1 +#define FMC2_TIO 8000 +#define FMC2_TSYNC 3000 +#define FMC2_PCR_TIMING_MASK 0xf +#define FMC2_PMEM_PATT_TIMING_MASK 0xff + +/* FMC2 Controller Registers */ +#define FMC2_BCR1 0x0 +#define FMC2_PCR 0x80 +#define FMC2_SR 0x84 +#define FMC2_PMEM 0x88 +#define FMC2_PATT 0x8c +#define FMC2_HECCR 0x94 +#define FMC2_CSQCR 0x200 +#define FMC2_CSQCFGR1 0x204 +#define FMC2_CSQCFGR2 0x208 +#define FMC2_CSQCFGR3 0x20c +#define FMC2_CSQAR1 0x210 +#define FMC2_CSQAR2 0x214 +#define FMC2_CSQIER 0x220 +#define FMC2_CSQISR 0x224 +#define FMC2_CSQICR 0x228 +#define FMC2_CSQEMSR 0x230 +#define FMC2_BCHIER 0x250 +#define FMC2_BCHISR 0x254 +#define FMC2_BCHICR 0x258 +#define FMC2_BCHPBR1 0x260 +#define FMC2_BCHPBR2 0x264 +#define FMC2_BCHPBR3 0x268 +#define FMC2_BCHPBR4 0x26c +#define FMC2_BCHDSR0 0x27c +#define FMC2_BCHDSR1 0x280 +#define FMC2_BCHDSR2 0x284 +#define FMC2_BCHDSR3 0x288 +#define FMC2_BCHDSR4 0x28c + +/* Register: FMC2_BCR1 */ +#define FMC2_BCR1_FMC2EN BIT(31) + +/* Register: FMC2_PCR */ +#define FMC2_PCR_PWAITEN BIT(1) +#define FMC2_PCR_PBKEN BIT(2) +#define FMC2_PCR_PWID_MASK GENMASK(5, 4) +#define FMC2_PCR_PWID(x) (((x) & 0x3) << 4) +#define FMC2_PCR_PWID_BUSWIDTH_8 0 +#define FMC2_PCR_PWID_BUSWIDTH_16 1 +#define FMC2_PCR_ECCEN BIT(6) +#define FMC2_PCR_ECCALG BIT(8) +#define FMC2_PCR_TCLR_MASK GENMASK(12, 9) +#define FMC2_PCR_TCLR(x) (((x) & 0xf) << 9) +#define FMC2_PCR_TCLR_DEFAULT 0xf +#define FMC2_PCR_TAR_MASK GENMASK(16, 13) +#define FMC2_PCR_TAR(x) (((x) & 0xf) << 13) +#define FMC2_PCR_TAR_DEFAULT 0xf +#define FMC2_PCR_ECCSS_MASK GENMASK(19, 17) +#define FMC2_PCR_ECCSS(x) (((x) & 0x7) << 17) +#define FMC2_PCR_ECCSS_512 1 +#define FMC2_PCR_ECCSS_2048 3 +#define FMC2_PCR_BCHECC BIT(24) +#define FMC2_PCR_WEN BIT(25) + +/* Register: FMC2_SR */ +#define FMC2_SR_NWRF BIT(6) + +/* Register: FMC2_PMEM */ +#define FMC2_PMEM_MEMSET(x) (((x) & 0xff) << 0) +#define FMC2_PMEM_MEMWAIT(x) (((x) & 0xff) << 8) +#define FMC2_PMEM_MEMHOLD(x) (((x) & 0xff) << 16) +#define FMC2_PMEM_MEMHIZ(x) (((x) & 0xff) << 24) +#define FMC2_PMEM_DEFAULT 0x0a0a0a0a + +/* Register: FMC2_PATT */ +#define FMC2_PATT_ATTSET(x) (((x) & 0xff) << 0) +#define FMC2_PATT_ATTWAIT(x) (((x) & 0xff) << 8) +#define FMC2_PATT_ATTHOLD(x) (((x) & 0xff) << 16) +#define FMC2_PATT_ATTHIZ(x) (((x) & 0xff) << 24) +#define FMC2_PATT_DEFAULT 0x0a0a0a0a + +/* Register: FMC2_CSQCR */ +#define FMC2_CSQCR_CSQSTART BIT(0) + +/* Register: FMC2_CSQCFGR1 */ +#define FMC2_CSQCFGR1_CMD2EN BIT(1) +#define FMC2_CSQCFGR1_DMADEN BIT(2) +#define FMC2_CSQCFGR1_ACYNBR(x) (((x) & 0x7) << 4) +#define FMC2_CSQCFGR1_CMD1(x) (((x) & 0xff) << 8) +#define FMC2_CSQCFGR1_CMD2(x) (((x) & 0xff) << 16) +#define FMC2_CSQCFGR1_CMD1T BIT(24) +#define FMC2_CSQCFGR1_CMD2T BIT(25) + +/* Register: FMC2_CSQCFGR2 */ +#define FMC2_CSQCFGR2_SQSDTEN BIT(0) +#define FMC2_CSQCFGR2_RCMD2EN BIT(1) +#define FMC2_CSQCFGR2_DMASEN BIT(2) +#define FMC2_CSQCFGR2_RCMD1(x) (((x) & 0xff) << 8) +#define FMC2_CSQCFGR2_RCMD2(x) (((x) & 0xff) << 16) +#define FMC2_CSQCFGR2_RCMD1T BIT(24) +#define FMC2_CSQCFGR2_RCMD2T BIT(25) + +/* Register: FMC2_CSQCFGR3 */ +#define FMC2_CSQCFGR3_SNBR(x) (((x) & 0x1f) << 8) +#define FMC2_CSQCFGR3_AC1T BIT(16) +#define FMC2_CSQCFGR3_AC2T BIT(17) +#define FMC2_CSQCFGR3_AC3T BIT(18) +#define FMC2_CSQCFGR3_AC4T BIT(19) +#define FMC2_CSQCFGR3_AC5T BIT(20) +#define FMC2_CSQCFGR3_SDT BIT(21) +#define FMC2_CSQCFGR3_RAC1T BIT(22) +#define FMC2_CSQCFGR3_RAC2T BIT(23) + +/* Register: FMC2_CSQCAR1 */ +#define FMC2_CSQCAR1_ADDC1(x) (((x) & 0xff) << 0) +#define FMC2_CSQCAR1_ADDC2(x) (((x) & 0xff) << 8) +#define FMC2_CSQCAR1_ADDC3(x) (((x) & 0xff) << 16) +#define FMC2_CSQCAR1_ADDC4(x) (((x) & 0xff) << 24) + +/* Register: FMC2_CSQCAR2 */ +#define FMC2_CSQCAR2_ADDC5(x) (((x) & 0xff) << 0) +#define FMC2_CSQCAR2_NANDCEN(x) (((x) & 0x3) << 10) +#define FMC2_CSQCAR2_SAO(x) (((x) & 0xffff) << 16) + +/* Register: FMC2_CSQIER */ +#define FMC2_CSQIER_TCIE BIT(0) + +/* Register: FMC2_CSQICR */ +#define FMC2_CSQICR_CLEAR_IRQ GENMASK(4, 0) + +/* Register: FMC2_CSQEMSR */ +#define FMC2_CSQEMSR_SEM GENMASK(15, 0) + +/* Register: FMC2_BCHIER */ +#define FMC2_BCHIER_DERIE BIT(1) +#define FMC2_BCHIER_EPBRIE BIT(4) + +/* Register: FMC2_BCHICR */ +#define FMC2_BCHICR_CLEAR_IRQ GENMASK(4, 0) + +/* Register: FMC2_BCHDSR0 */ +#define FMC2_BCHDSR0_DUE BIT(0) +#define FMC2_BCHDSR0_DEF BIT(1) +#define FMC2_BCHDSR0_DEN_MASK GENMASK(7, 4) +#define FMC2_BCHDSR0_DEN_SHIFT 4 + +/* Register: FMC2_BCHDSR1 */ +#define FMC2_BCHDSR1_EBP1_MASK GENMASK(12, 0) +#define FMC2_BCHDSR1_EBP2_MASK GENMASK(28, 16) +#define FMC2_BCHDSR1_EBP2_SHIFT 16 + +/* Register: FMC2_BCHDSR2 */ +#define FMC2_BCHDSR2_EBP3_MASK GENMASK(12, 0) +#define FMC2_BCHDSR2_EBP4_MASK GENMASK(28, 16) +#define FMC2_BCHDSR2_EBP4_SHIFT 16 + +/* Register: FMC2_BCHDSR3 */ +#define FMC2_BCHDSR3_EBP5_MASK GENMASK(12, 0) +#define FMC2_BCHDSR3_EBP6_MASK GENMASK(28, 16) +#define FMC2_BCHDSR3_EBP6_SHIFT 16 + +/* Register: FMC2_BCHDSR4 */ +#define FMC2_BCHDSR4_EBP7_MASK GENMASK(12, 0) +#define FMC2_BCHDSR4_EBP8_MASK GENMASK(28, 16) +#define FMC2_BCHDSR4_EBP8_SHIFT 16 + +enum stm32_fmc2_ecc { + FMC2_ECC_HAM = 1, + FMC2_ECC_BCH4 = 4, + FMC2_ECC_BCH8 = 8 +}; + +enum stm32_fmc2_irq_state { + FMC2_IRQ_UNKNOWN = 0, + FMC2_IRQ_BCH, + FMC2_IRQ_SEQ +}; + +struct stm32_fmc2_timings { + u8 tclr; + u8 tar; + u8 thiz; + u8 twait; + u8 thold_mem; + u8 tset_mem; + u8 thold_att; + u8 tset_att; +}; + +struct stm32_fmc2_nand { + struct nand_chip chip; + struct stm32_fmc2_timings timings; + int ncs; + int cs_used[FMC2_MAX_CE]; +}; + +static inline struct stm32_fmc2_nand *to_fmc2_nand(struct nand_chip *chip) +{ + return container_of(chip, struct stm32_fmc2_nand, chip); +} + +struct stm32_fmc2_nfc { + struct nand_controller base; + struct stm32_fmc2_nand nand; + struct device *dev; + void __iomem *io_base; + void __iomem *data_base[FMC2_MAX_CE]; + void __iomem *cmd_base[FMC2_MAX_CE]; + void __iomem *addr_base[FMC2_MAX_CE]; + phys_addr_t io_phys_addr; + phys_addr_t data_phys_addr[FMC2_MAX_CE]; + struct clk *clk; + u8 irq_state; + + struct dma_chan *dma_tx_ch; + struct dma_chan *dma_rx_ch; + struct dma_chan *dma_ecc_ch; + struct sg_table dma_data_sg; + struct sg_table dma_ecc_sg; + u8 *ecc_buf; + int dma_ecc_len; + + struct completion complete; + struct completion dma_data_complete; + struct completion dma_ecc_complete; + + u8 cs_assigned; + int cs_sel; +}; + +static inline struct stm32_fmc2_nfc *to_stm32_nfc(struct nand_controller *base) +{ + return container_of(base, struct stm32_fmc2_nfc, base); +} + +/* Timings configuration */ +static void stm32_fmc2_timings_init(struct nand_chip *chip) +{ + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct stm32_fmc2_nand *nand = to_fmc2_nand(chip); + struct stm32_fmc2_timings *timings = &nand->timings; + u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR); + u32 pmem, patt; + + /* Set tclr/tar timings */ + pcr &= ~FMC2_PCR_TCLR_MASK; + pcr |= FMC2_PCR_TCLR(timings->tclr); + pcr &= ~FMC2_PCR_TAR_MASK; + pcr |= FMC2_PCR_TAR(timings->tar); + + /* Set tset/twait/thold/thiz timings in common bank */ + pmem = FMC2_PMEM_MEMSET(timings->tset_mem); + pmem |= FMC2_PMEM_MEMWAIT(timings->twait); + pmem |= FMC2_PMEM_MEMHOLD(timings->thold_mem); + pmem |= FMC2_PMEM_MEMHIZ(timings->thiz); + + /* Set tset/twait/thold/thiz timings in attribut bank */ + patt = FMC2_PATT_ATTSET(timings->tset_att); + patt |= FMC2_PATT_ATTWAIT(timings->twait); + patt |= FMC2_PATT_ATTHOLD(timings->thold_att); + patt |= FMC2_PATT_ATTHIZ(timings->thiz); + + writel_relaxed(pcr, fmc2->io_base + FMC2_PCR); + writel_relaxed(pmem, fmc2->io_base + FMC2_PMEM); + writel_relaxed(patt, fmc2->io_base + FMC2_PATT); +} + +/* Controller configuration */ +static void stm32_fmc2_setup(struct nand_chip *chip) +{ + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR); + + /* Configure ECC algorithm (default configuration is Hamming) */ + pcr &= ~FMC2_PCR_ECCALG; + pcr &= ~FMC2_PCR_BCHECC; + if (chip->ecc.strength == FMC2_ECC_BCH8) { + pcr |= FMC2_PCR_ECCALG; + pcr |= FMC2_PCR_BCHECC; + } else if (chip->ecc.strength == FMC2_ECC_BCH4) { + pcr |= FMC2_PCR_ECCALG; + } + + /* Set buswidth */ + pcr &= ~FMC2_PCR_PWID_MASK; + if (chip->options & NAND_BUSWIDTH_16) + pcr |= FMC2_PCR_PWID(FMC2_PCR_PWID_BUSWIDTH_16); + + /* Set ECC sector size */ + pcr &= ~FMC2_PCR_ECCSS_MASK; + pcr |= FMC2_PCR_ECCSS(FMC2_PCR_ECCSS_512); + + writel_relaxed(pcr, fmc2->io_base + FMC2_PCR); +} + +/* Select target */ +static int stm32_fmc2_select_chip(struct nand_chip *chip, int chipnr) +{ + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct stm32_fmc2_nand *nand = to_fmc2_nand(chip); + struct dma_slave_config dma_cfg; + int ret; + + if (nand->cs_used[chipnr] == fmc2->cs_sel) + return 0; + + fmc2->cs_sel = nand->cs_used[chipnr]; + + /* FMC2 setup routine */ + stm32_fmc2_setup(chip); + + /* Apply timings */ + stm32_fmc2_timings_init(chip); + + if (fmc2->dma_tx_ch && fmc2->dma_rx_ch) { + memset(&dma_cfg, 0, sizeof(dma_cfg)); + dma_cfg.src_addr = fmc2->data_phys_addr[fmc2->cs_sel]; + dma_cfg.dst_addr = fmc2->data_phys_addr[fmc2->cs_sel]; + dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + dma_cfg.src_maxburst = 32; + dma_cfg.dst_maxburst = 32; + + ret = dmaengine_slave_config(fmc2->dma_tx_ch, &dma_cfg); + if (ret) { + dev_err(fmc2->dev, "tx DMA engine slave config failed\n"); + return ret; + } + + ret = dmaengine_slave_config(fmc2->dma_rx_ch, &dma_cfg); + if (ret) { + dev_err(fmc2->dev, "rx DMA engine slave config failed\n"); + return ret; + } + } + + if (fmc2->dma_ecc_ch) { + /* + * Hamming: we read HECCR register + * BCH4/BCH8: we read BCHDSRSx registers + */ + memset(&dma_cfg, 0, sizeof(dma_cfg)); + dma_cfg.src_addr = fmc2->io_phys_addr; + dma_cfg.src_addr += chip->ecc.strength == FMC2_ECC_HAM ? + FMC2_HECCR : FMC2_BCHDSR0; + dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + + ret = dmaengine_slave_config(fmc2->dma_ecc_ch, &dma_cfg); + if (ret) { + dev_err(fmc2->dev, "ECC DMA engine slave config failed\n"); + return ret; + } + + /* Calculate ECC length needed for one sector */ + fmc2->dma_ecc_len = chip->ecc.strength == FMC2_ECC_HAM ? + FMC2_HECCR_LEN : FMC2_BCHDSRS_LEN; + } + + return 0; +} + +/* Set bus width to 16-bit or 8-bit */ +static void stm32_fmc2_set_buswidth_16(struct stm32_fmc2_nfc *fmc2, bool set) +{ + u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR); + + pcr &= ~FMC2_PCR_PWID_MASK; + if (set) + pcr |= FMC2_PCR_PWID(FMC2_PCR_PWID_BUSWIDTH_16); + writel_relaxed(pcr, fmc2->io_base + FMC2_PCR); +} + +/* Enable/disable ECC */ +static void stm32_fmc2_set_ecc(struct stm32_fmc2_nfc *fmc2, bool enable) +{ + u32 pcr = readl(fmc2->io_base + FMC2_PCR); + + pcr &= ~FMC2_PCR_ECCEN; + if (enable) + pcr |= FMC2_PCR_ECCEN; + writel(pcr, fmc2->io_base + FMC2_PCR); +} + +/* Enable irq sources in case of the sequencer is used */ +static inline void stm32_fmc2_enable_seq_irq(struct stm32_fmc2_nfc *fmc2) +{ + u32 csqier = readl_relaxed(fmc2->io_base + FMC2_CSQIER); + + csqier |= FMC2_CSQIER_TCIE; + + fmc2->irq_state = FMC2_IRQ_SEQ; + + writel_relaxed(csqier, fmc2->io_base + FMC2_CSQIER); +} + +/* Disable irq sources in case of the sequencer is used */ +static inline void stm32_fmc2_disable_seq_irq(struct stm32_fmc2_nfc *fmc2) +{ + u32 csqier = readl_relaxed(fmc2->io_base + FMC2_CSQIER); + + csqier &= ~FMC2_CSQIER_TCIE; + + writel_relaxed(csqier, fmc2->io_base + FMC2_CSQIER); + + fmc2->irq_state = FMC2_IRQ_UNKNOWN; +} + +/* Clear irq sources in case of the sequencer is used */ +static inline void stm32_fmc2_clear_seq_irq(struct stm32_fmc2_nfc *fmc2) +{ + writel_relaxed(FMC2_CSQICR_CLEAR_IRQ, fmc2->io_base + FMC2_CSQICR); +} + +/* Enable irq sources in case of bch is used */ +static inline void stm32_fmc2_enable_bch_irq(struct stm32_fmc2_nfc *fmc2, + int mode) +{ + u32 bchier = readl_relaxed(fmc2->io_base + FMC2_BCHIER); + + if (mode == NAND_ECC_WRITE) + bchier |= FMC2_BCHIER_EPBRIE; + else + bchier |= FMC2_BCHIER_DERIE; + + fmc2->irq_state = FMC2_IRQ_BCH; + + writel_relaxed(bchier, fmc2->io_base + FMC2_BCHIER); +} + +/* Disable irq sources in case of bch is used */ +static inline void stm32_fmc2_disable_bch_irq(struct stm32_fmc2_nfc *fmc2) +{ + u32 bchier = readl_relaxed(fmc2->io_base + FMC2_BCHIER); + + bchier &= ~FMC2_BCHIER_DERIE; + bchier &= ~FMC2_BCHIER_EPBRIE; + + writel_relaxed(bchier, fmc2->io_base + FMC2_BCHIER); + + fmc2->irq_state = FMC2_IRQ_UNKNOWN; +} + +/* Clear irq sources in case of bch is used */ +static inline void stm32_fmc2_clear_bch_irq(struct stm32_fmc2_nfc *fmc2) +{ + writel_relaxed(FMC2_BCHICR_CLEAR_IRQ, fmc2->io_base + FMC2_BCHICR); +} + +/* + * Enable ECC logic and reset syndrome/parity bits previously calculated + * Syndrome/parity bits is cleared by setting the ECCEN bit to 0 + */ +static void stm32_fmc2_hwctl(struct nand_chip *chip, int mode) +{ + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + + stm32_fmc2_set_ecc(fmc2, false); + + if (chip->ecc.strength != FMC2_ECC_HAM) { + u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR); + + if (mode == NAND_ECC_WRITE) + pcr |= FMC2_PCR_WEN; + else + pcr &= ~FMC2_PCR_WEN; + writel_relaxed(pcr, fmc2->io_base + FMC2_PCR); + + reinit_completion(&fmc2->complete); + stm32_fmc2_clear_bch_irq(fmc2); + stm32_fmc2_enable_bch_irq(fmc2, mode); + } + + stm32_fmc2_set_ecc(fmc2, true); +} + +/* + * ECC Hamming calculation + * ECC is 3 bytes for 512 bytes of data (supports error correction up to + * max of 1-bit) + */ +static inline void stm32_fmc2_ham_set_ecc(const u32 ecc_sta, u8 *ecc) +{ + ecc[0] = ecc_sta; + ecc[1] = ecc_sta >> 8; + ecc[2] = ecc_sta >> 16; +} + +static int stm32_fmc2_ham_calculate(struct nand_chip *chip, const u8 *data, + u8 *ecc) +{ + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + u32 sr, heccr; + int ret; + + ret = readl_relaxed_poll_timeout(fmc2->io_base + FMC2_SR, + sr, sr & FMC2_SR_NWRF, 10, 1000); + if (ret) { + dev_err(fmc2->dev, "ham timeout\n"); + return ret; + } + + heccr = readl_relaxed(fmc2->io_base + FMC2_HECCR); + + stm32_fmc2_ham_set_ecc(heccr, ecc); + + /* Disable ECC */ + stm32_fmc2_set_ecc(fmc2, false); + + return 0; +} + +static int stm32_fmc2_ham_correct(struct nand_chip *chip, u8 *dat, + u8 *read_ecc, u8 *calc_ecc) +{ + u8 bit_position = 0, b0, b1, b2; + u32 byte_addr = 0, b; + u32 i, shifting = 1; + + /* Indicate which bit and byte is faulty (if any) */ + b0 = read_ecc[0] ^ calc_ecc[0]; + b1 = read_ecc[1] ^ calc_ecc[1]; + b2 = read_ecc[2] ^ calc_ecc[2]; + b = b0 | (b1 << 8) | (b2 << 16); + + /* No errors */ + if (likely(!b)) + return 0; + + /* Calculate bit position */ + for (i = 0; i < 3; i++) { + switch (b % 4) { + case 2: + bit_position += shifting; + case 1: + break; + default: + return -EBADMSG; + } + shifting <<= 1; + b >>= 2; + } + + /* Calculate byte position */ + shifting = 1; + for (i = 0; i < 9; i++) { + switch (b % 4) { + case 2: + byte_addr += shifting; + case 1: + break; + default: + return -EBADMSG; + } + shifting <<= 1; + b >>= 2; + } + + /* Flip the bit */ + dat[byte_addr] ^= (1 << bit_position); + + return 1; +} + +/* + * ECC BCH calculation and correction + * ECC is 7/13 bytes for 512 bytes of data (supports error correction up to + * max of 4-bit/8-bit) + */ +static int stm32_fmc2_bch_calculate(struct nand_chip *chip, const u8 *data, + u8 *ecc) +{ + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + u32 bchpbr; + + /* Wait until the BCH code is ready */ + if (!wait_for_completion_timeout(&fmc2->complete, + msecs_to_jiffies(1000))) { + dev_err(fmc2->dev, "bch timeout\n"); + stm32_fmc2_disable_bch_irq(fmc2); + return -ETIMEDOUT; + } + + /* Read parity bits */ + bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR1); + ecc[0] = bchpbr; + ecc[1] = bchpbr >> 8; + ecc[2] = bchpbr >> 16; + ecc[3] = bchpbr >> 24; + + bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR2); + ecc[4] = bchpbr; + ecc[5] = bchpbr >> 8; + ecc[6] = bchpbr >> 16; + + if (chip->ecc.strength == FMC2_ECC_BCH8) { + ecc[7] = bchpbr >> 24; + + bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR3); + ecc[8] = bchpbr; + ecc[9] = bchpbr >> 8; + ecc[10] = bchpbr >> 16; + ecc[11] = bchpbr >> 24; + + bchpbr = readl_relaxed(fmc2->io_base + FMC2_BCHPBR4); + ecc[12] = bchpbr; + } + + /* Disable ECC */ + stm32_fmc2_set_ecc(fmc2, false); + + return 0; +} + +/* BCH algorithm correction */ +static int stm32_fmc2_bch_decode(int eccsize, u8 *dat, u32 *ecc_sta) +{ + u32 bchdsr0 = ecc_sta[0]; + u32 bchdsr1 = ecc_sta[1]; + u32 bchdsr2 = ecc_sta[2]; + u32 bchdsr3 = ecc_sta[3]; + u32 bchdsr4 = ecc_sta[4]; + u16 pos[8]; + int i, den; + unsigned int nb_errs = 0; + + /* No errors found */ + if (likely(!(bchdsr0 & FMC2_BCHDSR0_DEF))) + return 0; + + /* Too many errors detected */ + if (unlikely(bchdsr0 & FMC2_BCHDSR0_DUE)) + return -EBADMSG; + + pos[0] = bchdsr1 & FMC2_BCHDSR1_EBP1_MASK; + pos[1] = (bchdsr1 & FMC2_BCHDSR1_EBP2_MASK) >> FMC2_BCHDSR1_EBP2_SHIFT; + pos[2] = bchdsr2 & FMC2_BCHDSR2_EBP3_MASK; + pos[3] = (bchdsr2 & FMC2_BCHDSR2_EBP4_MASK) >> FMC2_BCHDSR2_EBP4_SHIFT; + pos[4] = bchdsr3 & FMC2_BCHDSR3_EBP5_MASK; + pos[5] = (bchdsr3 & FMC2_BCHDSR3_EBP6_MASK) >> FMC2_BCHDSR3_EBP6_SHIFT; + pos[6] = bchdsr4 & FMC2_BCHDSR4_EBP7_MASK; + pos[7] = (bchdsr4 & FMC2_BCHDSR4_EBP8_MASK) >> FMC2_BCHDSR4_EBP8_SHIFT; + + den = (bchdsr0 & FMC2_BCHDSR0_DEN_MASK) >> FMC2_BCHDSR0_DEN_SHIFT; + for (i = 0; i < den; i++) { + if (pos[i] < eccsize * 8) { + change_bit(pos[i], (unsigned long *)dat); + nb_errs++; + } + } + + return nb_errs; +} + +static int stm32_fmc2_bch_correct(struct nand_chip *chip, u8 *dat, + u8 *read_ecc, u8 *calc_ecc) +{ + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + u32 ecc_sta[5]; + + /* Wait until the decoding error is ready */ + if (!wait_for_completion_timeout(&fmc2->complete, + msecs_to_jiffies(1000))) { + dev_err(fmc2->dev, "bch timeout\n"); + stm32_fmc2_disable_bch_irq(fmc2); + return -ETIMEDOUT; + } + + ecc_sta[0] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR0); + ecc_sta[1] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR1); + ecc_sta[2] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR2); + ecc_sta[3] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR3); + ecc_sta[4] = readl_relaxed(fmc2->io_base + FMC2_BCHDSR4); + + /* Disable ECC */ + stm32_fmc2_set_ecc(fmc2, false); + + return stm32_fmc2_bch_decode(chip->ecc.size, dat, ecc_sta); +} + +static int stm32_fmc2_read_page(struct nand_chip *chip, u8 *buf, + int oob_required, int page) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + int ret, i, s, stat, eccsize = chip->ecc.size; + int eccbytes = chip->ecc.bytes; + int eccsteps = chip->ecc.steps; + int eccstrength = chip->ecc.strength; + u8 *p = buf; + u8 *ecc_calc = chip->ecc.calc_buf; + u8 *ecc_code = chip->ecc.code_buf; + unsigned int max_bitflips = 0; + + ret = nand_read_page_op(chip, page, 0, NULL, 0); + if (ret) + return ret; + + for (i = mtd->writesize + FMC2_BBM_LEN, s = 0; s < eccsteps; + s++, i += eccbytes, p += eccsize) { + chip->ecc.hwctl(chip, NAND_ECC_READ); + + /* Read the nand page sector (512 bytes) */ + ret = nand_change_read_column_op(chip, s * eccsize, p, + eccsize, false); + if (ret) + return ret; + + /* Read the corresponding ECC bytes */ + ret = nand_change_read_column_op(chip, i, ecc_code, + eccbytes, false); + if (ret) + return ret; + + /* Correct the data */ + stat = chip->ecc.correct(chip, p, ecc_code, ecc_calc); + if (stat == -EBADMSG) + /* Check for empty pages with bitflips */ + stat = nand_check_erased_ecc_chunk(p, eccsize, + ecc_code, eccbytes, + NULL, 0, + eccstrength); + + if (stat < 0) { + mtd->ecc_stats.failed++; + } else { + mtd->ecc_stats.corrected += stat; + max_bitflips = max_t(unsigned int, max_bitflips, stat); + } + } + + /* Read oob */ + if (oob_required) { + ret = nand_change_read_column_op(chip, mtd->writesize, + chip->oob_poi, mtd->oobsize, + false); + if (ret) + return ret; + } + + return max_bitflips; +} + +/* Sequencer read/write configuration */ +static void stm32_fmc2_rw_page_init(struct nand_chip *chip, int page, + int raw, bool write_data) +{ + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct mtd_info *mtd = nand_to_mtd(chip); + u32 csqcfgr1, csqcfgr2, csqcfgr3; + u32 csqar1, csqar2; + u32 ecc_offset = mtd->writesize + FMC2_BBM_LEN; + u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR); + + if (write_data) + pcr |= FMC2_PCR_WEN; + else + pcr &= ~FMC2_PCR_WEN; + writel_relaxed(pcr, fmc2->io_base + FMC2_PCR); + + /* + * - Set Program Page/Page Read command + * - Enable DMA request data + * - Set timings + */ + csqcfgr1 = FMC2_CSQCFGR1_DMADEN | FMC2_CSQCFGR1_CMD1T; + if (write_data) + csqcfgr1 |= FMC2_CSQCFGR1_CMD1(NAND_CMD_SEQIN); + else + csqcfgr1 |= FMC2_CSQCFGR1_CMD1(NAND_CMD_READ0) | + FMC2_CSQCFGR1_CMD2EN | + FMC2_CSQCFGR1_CMD2(NAND_CMD_READSTART) | + FMC2_CSQCFGR1_CMD2T; + + /* + * - Set Random Data Input/Random Data Read command + * - Enable the sequencer to access the Spare data area + * - Enable DMA request status decoding for read + * - Set timings + */ + if (write_data) + csqcfgr2 = FMC2_CSQCFGR2_RCMD1(NAND_CMD_RNDIN); + else + csqcfgr2 = FMC2_CSQCFGR2_RCMD1(NAND_CMD_RNDOUT) | + FMC2_CSQCFGR2_RCMD2EN | + FMC2_CSQCFGR2_RCMD2(NAND_CMD_RNDOUTSTART) | + FMC2_CSQCFGR2_RCMD1T | + FMC2_CSQCFGR2_RCMD2T; + if (!raw) { + csqcfgr2 |= write_data ? 0 : FMC2_CSQCFGR2_DMASEN; + csqcfgr2 |= FMC2_CSQCFGR2_SQSDTEN; + } + + /* + * - Set the number of sectors to be written + * - Set timings + */ + csqcfgr3 = FMC2_CSQCFGR3_SNBR(chip->ecc.steps - 1); + if (write_data) { + csqcfgr3 |= FMC2_CSQCFGR3_RAC2T; + if (chip->options & NAND_ROW_ADDR_3) + csqcfgr3 |= FMC2_CSQCFGR3_AC5T; + else + csqcfgr3 |= FMC2_CSQCFGR3_AC4T; + } + + /* + * Set the fourth first address cycles + * Byte 1 and byte 2 => column, we start at 0x0 + * Byte 3 and byte 4 => page + */ + csqar1 = FMC2_CSQCAR1_ADDC3(page); + csqar1 |= FMC2_CSQCAR1_ADDC4(page >> 8); + + /* + * - Set chip enable number + * - Set ECC byte offset in the spare area + * - Calculate the number of address cycles to be issued + * - Set byte 5 of address cycle if needed + */ + csqar2 = FMC2_CSQCAR2_NANDCEN(fmc2->cs_sel); + if (chip->options & NAND_BUSWIDTH_16) + csqar2 |= FMC2_CSQCAR2_SAO(ecc_offset >> 1); + else + csqar2 |= FMC2_CSQCAR2_SAO(ecc_offset); + if (chip->options & NAND_ROW_ADDR_3) { + csqcfgr1 |= FMC2_CSQCFGR1_ACYNBR(5); + csqar2 |= FMC2_CSQCAR2_ADDC5(page >> 16); + } else { + csqcfgr1 |= FMC2_CSQCFGR1_ACYNBR(4); + } + + writel_relaxed(csqcfgr1, fmc2->io_base + FMC2_CSQCFGR1); + writel_relaxed(csqcfgr2, fmc2->io_base + FMC2_CSQCFGR2); + writel_relaxed(csqcfgr3, fmc2->io_base + FMC2_CSQCFGR3); + writel_relaxed(csqar1, fmc2->io_base + FMC2_CSQAR1); + writel_relaxed(csqar2, fmc2->io_base + FMC2_CSQAR2); +} + +static void stm32_fmc2_dma_callback(void *arg) +{ + complete((struct completion *)arg); +} + +/* Read/write data from/to a page */ +static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf, + int raw, bool write_data) +{ + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct dma_async_tx_descriptor *desc_data, *desc_ecc; + struct scatterlist *sg; + struct dma_chan *dma_ch = fmc2->dma_rx_ch; + enum dma_data_direction dma_data_dir = DMA_FROM_DEVICE; + enum dma_transfer_direction dma_transfer_dir = DMA_DEV_TO_MEM; + u32 csqcr = readl_relaxed(fmc2->io_base + FMC2_CSQCR); + int eccsteps = chip->ecc.steps; + int eccsize = chip->ecc.size; + const u8 *p = buf; + int s, ret; + + /* Configure DMA data */ + if (write_data) { + dma_data_dir = DMA_TO_DEVICE; + dma_transfer_dir = DMA_MEM_TO_DEV; + dma_ch = fmc2->dma_tx_ch; + } + + for_each_sg(fmc2->dma_data_sg.sgl, sg, eccsteps, s) { + sg_set_buf(sg, p, eccsize); + p += eccsize; + } + + ret = dma_map_sg(fmc2->dev, fmc2->dma_data_sg.sgl, + eccsteps, dma_data_dir); + if (ret < 0) + return ret; + + desc_data = dmaengine_prep_slave_sg(dma_ch, fmc2->dma_data_sg.sgl, + eccsteps, dma_transfer_dir, + DMA_PREP_INTERRUPT); + if (!desc_data) { + ret = -ENOMEM; + goto err_unmap_data; + } + + reinit_completion(&fmc2->dma_data_complete); + reinit_completion(&fmc2->complete); + desc_data->callback = stm32_fmc2_dma_callback; + desc_data->callback_param = &fmc2->dma_data_complete; + ret = dma_submit_error(dmaengine_submit(desc_data)); + if (ret) + goto err_unmap_data; + + dma_async_issue_pending(dma_ch); + + if (!write_data && !raw) { + /* Configure DMA ECC status */ + p = fmc2->ecc_buf; + for_each_sg(fmc2->dma_ecc_sg.sgl, sg, eccsteps, s) { + sg_set_buf(sg, p, fmc2->dma_ecc_len); + p += fmc2->dma_ecc_len; + } + + ret = dma_map_sg(fmc2->dev, fmc2->dma_ecc_sg.sgl, + eccsteps, dma_data_dir); + if (ret < 0) + goto err_unmap_data; + + desc_ecc = dmaengine_prep_slave_sg(fmc2->dma_ecc_ch, + fmc2->dma_ecc_sg.sgl, + eccsteps, dma_transfer_dir, + DMA_PREP_INTERRUPT); + if (!desc_ecc) { + ret = -ENOMEM; + goto err_unmap_ecc; + } + + reinit_completion(&fmc2->dma_ecc_complete); + desc_ecc->callback = stm32_fmc2_dma_callback; + desc_ecc->callback_param = &fmc2->dma_ecc_complete; + ret = dma_submit_error(dmaengine_submit(desc_ecc)); + if (ret) + goto err_unmap_ecc; + + dma_async_issue_pending(fmc2->dma_ecc_ch); + } + + stm32_fmc2_clear_seq_irq(fmc2); + stm32_fmc2_enable_seq_irq(fmc2); + + /* Start the transfer */ + csqcr |= FMC2_CSQCR_CSQSTART; + writel_relaxed(csqcr, fmc2->io_base + FMC2_CSQCR); + + /* Wait end of sequencer transfer */ + if (!wait_for_completion_timeout(&fmc2->complete, + msecs_to_jiffies(1000))) { + dev_err(fmc2->dev, "seq timeout\n"); + stm32_fmc2_disable_seq_irq(fmc2); + dmaengine_terminate_all(dma_ch); + if (!write_data && !raw) + dmaengine_terminate_all(fmc2->dma_ecc_ch); + ret = -ETIMEDOUT; + goto err_unmap_ecc; + } + + /* Wait DMA data transfer completion */ + if (!wait_for_completion_timeout(&fmc2->dma_data_complete, + msecs_to_jiffies(100))) { + dev_err(fmc2->dev, "data DMA timeout\n"); + dmaengine_terminate_all(dma_ch); + ret = -ETIMEDOUT; + } + + /* Wait DMA ECC transfer completion */ + if (!write_data && !raw) { + if (!wait_for_completion_timeout(&fmc2->dma_ecc_complete, + msecs_to_jiffies(100))) { + dev_err(fmc2->dev, "ECC DMA timeout\n"); + dmaengine_terminate_all(fmc2->dma_ecc_ch); + ret = -ETIMEDOUT; + } + } + +err_unmap_ecc: + if (!write_data && !raw) + dma_unmap_sg(fmc2->dev, fmc2->dma_ecc_sg.sgl, + eccsteps, dma_data_dir); + +err_unmap_data: + dma_unmap_sg(fmc2->dev, fmc2->dma_data_sg.sgl, eccsteps, dma_data_dir); + + return ret; +} + +static int stm32_fmc2_sequencer_write(struct nand_chip *chip, + const u8 *buf, int oob_required, + int page, int raw) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + int ret; + + /* Configure the sequencer */ + stm32_fmc2_rw_page_init(chip, page, raw, true); + + /* Write the page */ + ret = stm32_fmc2_xfer(chip, buf, raw, true); + if (ret) + return ret; + + /* Write oob */ + if (oob_required) { + ret = nand_change_write_column_op(chip, mtd->writesize, + chip->oob_poi, mtd->oobsize, + false); + if (ret) + return ret; + } + + return nand_prog_page_end_op(chip); +} + +static int stm32_fmc2_sequencer_write_page(struct nand_chip *chip, + const u8 *buf, + int oob_required, + int page) +{ + int ret; + + /* Select the target */ + ret = stm32_fmc2_select_chip(chip, chip->cur_cs); + if (ret) + return ret; + + return stm32_fmc2_sequencer_write(chip, buf, oob_required, page, false); +} + +static int stm32_fmc2_sequencer_write_page_raw(struct nand_chip *chip, + const u8 *buf, + int oob_required, + int page) +{ + int ret; + + /* Select the target */ + ret = stm32_fmc2_select_chip(chip, chip->cur_cs); + if (ret) + return ret; + + return stm32_fmc2_sequencer_write(chip, buf, oob_required, page, true); +} + +/* Get a status indicating which sectors have errors */ +static inline u16 stm32_fmc2_get_mapping_status(struct stm32_fmc2_nfc *fmc2) +{ + u32 csqemsr = readl_relaxed(fmc2->io_base + FMC2_CSQEMSR); + + return csqemsr & FMC2_CSQEMSR_SEM; +} + +static int stm32_fmc2_sequencer_correct(struct nand_chip *chip, u8 *dat, + u8 *read_ecc, u8 *calc_ecc) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + int eccbytes = chip->ecc.bytes; + int eccsteps = chip->ecc.steps; + int eccstrength = chip->ecc.strength; + int i, s, eccsize = chip->ecc.size; + u32 *ecc_sta = (u32 *)fmc2->ecc_buf; + u16 sta_map = stm32_fmc2_get_mapping_status(fmc2); + unsigned int max_bitflips = 0; + + for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, dat += eccsize) { + int stat = 0; + + if (eccstrength == FMC2_ECC_HAM) { + /* Ecc_sta = FMC2_HECCR */ + if (sta_map & BIT(s)) { + stm32_fmc2_ham_set_ecc(*ecc_sta, &calc_ecc[i]); + stat = stm32_fmc2_ham_correct(chip, dat, + &read_ecc[i], + &calc_ecc[i]); + } + ecc_sta++; + } else { + /* + * Ecc_sta[0] = FMC2_BCHDSR0 + * Ecc_sta[1] = FMC2_BCHDSR1 + * Ecc_sta[2] = FMC2_BCHDSR2 + * Ecc_sta[3] = FMC2_BCHDSR3 + * Ecc_sta[4] = FMC2_BCHDSR4 + */ + if (sta_map & BIT(s)) + stat = stm32_fmc2_bch_decode(eccsize, dat, + ecc_sta); + ecc_sta += 5; + } + + if (stat == -EBADMSG) + /* Check for empty pages with bitflips */ + stat = nand_check_erased_ecc_chunk(dat, eccsize, + &read_ecc[i], + eccbytes, + NULL, 0, + eccstrength); + + if (stat < 0) { + mtd->ecc_stats.failed++; + } else { + mtd->ecc_stats.corrected += stat; + max_bitflips = max_t(unsigned int, max_bitflips, stat); + } + } + + return max_bitflips; +} + +static int stm32_fmc2_sequencer_read_page(struct nand_chip *chip, u8 *buf, + int oob_required, int page) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + u8 *ecc_calc = chip->ecc.calc_buf; + u8 *ecc_code = chip->ecc.code_buf; + u16 sta_map; + int ret; + + /* Select the target */ + ret = stm32_fmc2_select_chip(chip, chip->cur_cs); + if (ret) + return ret; + + /* Configure the sequencer */ + stm32_fmc2_rw_page_init(chip, page, 0, false); + + /* Read the page */ + ret = stm32_fmc2_xfer(chip, buf, 0, false); + if (ret) + return ret; + + sta_map = stm32_fmc2_get_mapping_status(fmc2); + + /* Check if errors happen */ + if (likely(!sta_map)) { + if (oob_required) + return nand_change_read_column_op(chip, mtd->writesize, + chip->oob_poi, + mtd->oobsize, false); + + return 0; + } + + /* Read oob */ + ret = nand_change_read_column_op(chip, mtd->writesize, + chip->oob_poi, mtd->oobsize, false); + if (ret) + return ret; + + ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, + chip->ecc.total); + if (ret) + return ret; + + /* Correct data */ + return chip->ecc.correct(chip, buf, ecc_code, ecc_calc); +} + +static int stm32_fmc2_sequencer_read_page_raw(struct nand_chip *chip, u8 *buf, + int oob_required, int page) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + int ret; + + /* Select the target */ + ret = stm32_fmc2_select_chip(chip, chip->cur_cs); + if (ret) + return ret; + + /* Configure the sequencer */ + stm32_fmc2_rw_page_init(chip, page, 1, false); + + /* Read the page */ + ret = stm32_fmc2_xfer(chip, buf, 1, false); + if (ret) + return ret; + + /* Read oob */ + if (oob_required) + return nand_change_read_column_op(chip, mtd->writesize, + chip->oob_poi, mtd->oobsize, + false); + + return 0; +} + +static irqreturn_t stm32_fmc2_irq(int irq, void *dev_id) +{ + struct stm32_fmc2_nfc *fmc2 = (struct stm32_fmc2_nfc *)dev_id; + + if (fmc2->irq_state == FMC2_IRQ_SEQ) + /* Sequencer is used */ + stm32_fmc2_disable_seq_irq(fmc2); + else if (fmc2->irq_state == FMC2_IRQ_BCH) + /* BCH is used */ + stm32_fmc2_disable_bch_irq(fmc2); + + complete(&fmc2->complete); + + return IRQ_HANDLED; +} + +static void stm32_fmc2_read_data(struct nand_chip *chip, void *buf, + unsigned int len, bool force_8bit) +{ + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + void __iomem *io_addr_r = fmc2->data_base[fmc2->cs_sel]; + + if (force_8bit && chip->options & NAND_BUSWIDTH_16) + /* Reconfigure bus width to 8-bit */ + stm32_fmc2_set_buswidth_16(fmc2, false); + + if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32))) { + if (!IS_ALIGNED((uintptr_t)buf, sizeof(u16)) && len) { + *(u8 *)buf = readb_relaxed(io_addr_r); + buf += sizeof(u8); + len -= sizeof(u8); + } + + if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32)) && + len >= sizeof(u16)) { + *(u16 *)buf = readw_relaxed(io_addr_r); + buf += sizeof(u16); + len -= sizeof(u16); + } + } + + /* Buf is aligned */ + while (len >= sizeof(u32)) { + *(u32 *)buf = readl_relaxed(io_addr_r); + buf += sizeof(u32); + len -= sizeof(u32); + } + + /* Read remaining bytes */ + if (len >= sizeof(u16)) { + *(u16 *)buf = readw_relaxed(io_addr_r); + buf += sizeof(u16); + len -= sizeof(u16); + } + + if (len) + *(u8 *)buf = readb_relaxed(io_addr_r); + + if (force_8bit && chip->options & NAND_BUSWIDTH_16) + /* Reconfigure bus width to 16-bit */ + stm32_fmc2_set_buswidth_16(fmc2, true); +} + +static void stm32_fmc2_write_data(struct nand_chip *chip, const void *buf, + unsigned int len, bool force_8bit) +{ + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + void __iomem *io_addr_w = fmc2->data_base[fmc2->cs_sel]; + + if (force_8bit && chip->options & NAND_BUSWIDTH_16) + /* Reconfigure bus width to 8-bit */ + stm32_fmc2_set_buswidth_16(fmc2, false); + + if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32))) { + if (!IS_ALIGNED((uintptr_t)buf, sizeof(u16)) && len) { + writeb_relaxed(*(u8 *)buf, io_addr_w); + buf += sizeof(u8); + len -= sizeof(u8); + } + + if (!IS_ALIGNED((uintptr_t)buf, sizeof(u32)) && + len >= sizeof(u16)) { + writew_relaxed(*(u16 *)buf, io_addr_w); + buf += sizeof(u16); + len -= sizeof(u16); + } + } + + /* Buf is aligned */ + while (len >= sizeof(u32)) { + writel_relaxed(*(u32 *)buf, io_addr_w); + buf += sizeof(u32); + len -= sizeof(u32); + } + + /* Write remaining bytes */ + if (len >= sizeof(u16)) { + writew_relaxed(*(u16 *)buf, io_addr_w); + buf += sizeof(u16); + len -= sizeof(u16); + } + + if (len) + writeb_relaxed(*(u8 *)buf, io_addr_w); + + if (force_8bit && chip->options & NAND_BUSWIDTH_16) + /* Reconfigure bus width to 16-bit */ + stm32_fmc2_set_buswidth_16(fmc2, true); +} + +static int stm32_fmc2_exec_op(struct nand_chip *chip, + const struct nand_operation *op, + bool check_only) +{ + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + const struct nand_op_instr *instr = NULL; + unsigned int op_id, i; + int ret; + + ret = stm32_fmc2_select_chip(chip, op->cs); + if (ret) + return ret; + + if (check_only) + return ret; + + for (op_id = 0; op_id < op->ninstrs; op_id++) { + instr = &op->instrs[op_id]; + + switch (instr->type) { + case NAND_OP_CMD_INSTR: + writeb_relaxed(instr->ctx.cmd.opcode, + fmc2->cmd_base[fmc2->cs_sel]); + break; + + case NAND_OP_ADDR_INSTR: + for (i = 0; i < instr->ctx.addr.naddrs; i++) + writeb_relaxed(instr->ctx.addr.addrs[i], + fmc2->addr_base[fmc2->cs_sel]); + break; + + case NAND_OP_DATA_IN_INSTR: + stm32_fmc2_read_data(chip, instr->ctx.data.buf.in, + instr->ctx.data.len, + instr->ctx.data.force_8bit); + break; + + case NAND_OP_DATA_OUT_INSTR: + stm32_fmc2_write_data(chip, instr->ctx.data.buf.out, + instr->ctx.data.len, + instr->ctx.data.force_8bit); + break; + + case NAND_OP_WAITRDY_INSTR: + ret = nand_soft_waitrdy(chip, + instr->ctx.waitrdy.timeout_ms); + break; + } + } + + return ret; +} + +/* Controller initialization */ +static void stm32_fmc2_init(struct stm32_fmc2_nfc *fmc2) +{ + u32 pcr = readl_relaxed(fmc2->io_base + FMC2_PCR); + u32 bcr1 = readl_relaxed(fmc2->io_base + FMC2_BCR1); + + /* Set CS used to undefined */ + fmc2->cs_sel = -1; + + /* Enable wait feature and nand flash memory bank */ + pcr |= FMC2_PCR_PWAITEN; + pcr |= FMC2_PCR_PBKEN; + + /* Set buswidth to 8 bits mode for identification */ + pcr &= ~FMC2_PCR_PWID_MASK; + + /* ECC logic is disabled */ + pcr &= ~FMC2_PCR_ECCEN; + + /* Default mode */ + pcr &= ~FMC2_PCR_ECCALG; + pcr &= ~FMC2_PCR_BCHECC; + pcr &= ~FMC2_PCR_WEN; + + /* Set default ECC sector size */ + pcr &= ~FMC2_PCR_ECCSS_MASK; + pcr |= FMC2_PCR_ECCSS(FMC2_PCR_ECCSS_2048); + + /* Set default tclr/tar timings */ + pcr &= ~FMC2_PCR_TCLR_MASK; + pcr |= FMC2_PCR_TCLR(FMC2_PCR_TCLR_DEFAULT); + pcr &= ~FMC2_PCR_TAR_MASK; + pcr |= FMC2_PCR_TAR(FMC2_PCR_TAR_DEFAULT); + + /* Enable FMC2 controller */ + bcr1 |= FMC2_BCR1_FMC2EN; + + writel_relaxed(bcr1, fmc2->io_base + FMC2_BCR1); + writel_relaxed(pcr, fmc2->io_base + FMC2_PCR); + writel_relaxed(FMC2_PMEM_DEFAULT, fmc2->io_base + FMC2_PMEM); + writel_relaxed(FMC2_PATT_DEFAULT, fmc2->io_base + FMC2_PATT); +} + +/* Controller timings */ +static void stm32_fmc2_calc_timings(struct nand_chip *chip, + const struct nand_sdr_timings *sdrt) +{ + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct stm32_fmc2_nand *nand = to_fmc2_nand(chip); + struct stm32_fmc2_timings *tims = &nand->timings; + unsigned long hclk = clk_get_rate(fmc2->clk); + unsigned long hclkp = NSEC_PER_SEC / (hclk / 1000); + int tar, tclr, thiz, twait, tset_mem, tset_att, thold_mem, thold_att; + + tar = hclkp; + if (tar < sdrt->tAR_min) + tar = sdrt->tAR_min; + tims->tar = DIV_ROUND_UP(tar, hclkp) - 1; + if (tims->tar > FMC2_PCR_TIMING_MASK) + tims->tar = FMC2_PCR_TIMING_MASK; + + tclr = hclkp; + if (tclr < sdrt->tCLR_min) + tclr = sdrt->tCLR_min; + tims->tclr = DIV_ROUND_UP(tclr, hclkp) - 1; + if (tims->tclr > FMC2_PCR_TIMING_MASK) + tims->tclr = FMC2_PCR_TIMING_MASK; + + tims->thiz = FMC2_THIZ; + thiz = (tims->thiz + 1) * hclkp; + + /* + * tWAIT > tRP + * tWAIT > tWP + * tWAIT > tREA + tIO + */ + twait = hclkp; + if (twait < sdrt->tRP_min) + twait = sdrt->tRP_min; + if (twait < sdrt->tWP_min) + twait = sdrt->tWP_min; + if (twait < sdrt->tREA_max + FMC2_TIO) + twait = sdrt->tREA_max + FMC2_TIO; + tims->twait = DIV_ROUND_UP(twait, hclkp); + if (tims->twait == 0) + tims->twait = 1; + else if (tims->twait > FMC2_PMEM_PATT_TIMING_MASK) + tims->twait = FMC2_PMEM_PATT_TIMING_MASK; + + /* + * tSETUP_MEM > tCS - tWAIT + * tSETUP_MEM > tALS - tWAIT + * tSETUP_MEM > tDS - (tWAIT - tHIZ) + */ + tset_mem = hclkp; + if (sdrt->tCS_min > twait && (tset_mem < sdrt->tCS_min - twait)) + tset_mem = sdrt->tCS_min - twait; + if (sdrt->tALS_min > twait && (tset_mem < sdrt->tALS_min - twait)) + tset_mem = sdrt->tALS_min - twait; + if (twait > thiz && (sdrt->tDS_min > twait - thiz) && + (tset_mem < sdrt->tDS_min - (twait - thiz))) + tset_mem = sdrt->tDS_min - (twait - thiz); + tims->tset_mem = DIV_ROUND_UP(tset_mem, hclkp); + if (tims->tset_mem == 0) + tims->tset_mem = 1; + else if (tims->tset_mem > FMC2_PMEM_PATT_TIMING_MASK) + tims->tset_mem = FMC2_PMEM_PATT_TIMING_MASK; + + /* + * tHOLD_MEM > tCH + * tHOLD_MEM > tREH - tSETUP_MEM + * tHOLD_MEM > max(tRC, tWC) - (tSETUP_MEM + tWAIT) + */ + thold_mem = hclkp; + if (thold_mem < sdrt->tCH_min) + thold_mem = sdrt->tCH_min; + if (sdrt->tREH_min > tset_mem && + (thold_mem < sdrt->tREH_min - tset_mem)) + thold_mem = sdrt->tREH_min - tset_mem; + if ((sdrt->tRC_min > tset_mem + twait) && + (thold_mem < sdrt->tRC_min - (tset_mem + twait))) + thold_mem = sdrt->tRC_min - (tset_mem + twait); + if ((sdrt->tWC_min > tset_mem + twait) && + (thold_mem < sdrt->tWC_min - (tset_mem + twait))) + thold_mem = sdrt->tWC_min - (tset_mem + twait); + tims->thold_mem = DIV_ROUND_UP(thold_mem, hclkp); + if (tims->thold_mem == 0) + tims->thold_mem = 1; + else if (tims->thold_mem > FMC2_PMEM_PATT_TIMING_MASK) + tims->thold_mem = FMC2_PMEM_PATT_TIMING_MASK; + + /* + * tSETUP_ATT > tCS - tWAIT + * tSETUP_ATT > tCLS - tWAIT + * tSETUP_ATT > tALS - tWAIT + * tSETUP_ATT > tRHW - tHOLD_MEM + * tSETUP_ATT > tDS - (tWAIT - tHIZ) + */ + tset_att = hclkp; + if (sdrt->tCS_min > twait && (tset_att < sdrt->tCS_min - twait)) + tset_att = sdrt->tCS_min - twait; + if (sdrt->tCLS_min > twait && (tset_att < sdrt->tCLS_min - twait)) + tset_att = sdrt->tCLS_min - twait; + if (sdrt->tALS_min > twait && (tset_att < sdrt->tALS_min - twait)) + tset_att = sdrt->tALS_min - twait; + if (sdrt->tRHW_min > thold_mem && + (tset_att < sdrt->tRHW_min - thold_mem)) + tset_att = sdrt->tRHW_min - thold_mem; + if (twait > thiz && (sdrt->tDS_min > twait - thiz) && + (tset_att < sdrt->tDS_min - (twait - thiz))) + tset_att = sdrt->tDS_min - (twait - thiz); + tims->tset_att = DIV_ROUND_UP(tset_att, hclkp); + if (tims->tset_att == 0) + tims->tset_att = 1; + else if (tims->tset_att > FMC2_PMEM_PATT_TIMING_MASK) + tims->tset_att = FMC2_PMEM_PATT_TIMING_MASK; + + /* + * tHOLD_ATT > tALH + * tHOLD_ATT > tCH + * tHOLD_ATT > tCLH + * tHOLD_ATT > tCOH + * tHOLD_ATT > tDH + * tHOLD_ATT > tWB + tIO + tSYNC - tSETUP_MEM + * tHOLD_ATT > tADL - tSETUP_MEM + * tHOLD_ATT > tWH - tSETUP_MEM + * tHOLD_ATT > tWHR - tSETUP_MEM + * tHOLD_ATT > tRC - (tSETUP_ATT + tWAIT) + * tHOLD_ATT > tWC - (tSETUP_ATT + tWAIT) + */ + thold_att = hclkp; + if (thold_att < sdrt->tALH_min) + thold_att = sdrt->tALH_min; + if (thold_att < sdrt->tCH_min) + thold_att = sdrt->tCH_min; + if (thold_att < sdrt->tCLH_min) + thold_att = sdrt->tCLH_min; + if (thold_att < sdrt->tCOH_min) + thold_att = sdrt->tCOH_min; + if (thold_att < sdrt->tDH_min) + thold_att = sdrt->tDH_min; + if ((sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC > tset_mem) && + (thold_att < sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC - tset_mem)) + thold_att = sdrt->tWB_max + FMC2_TIO + FMC2_TSYNC - tset_mem; + if (sdrt->tADL_min > tset_mem && + (thold_att < sdrt->tADL_min - tset_mem)) + thold_att = sdrt->tADL_min - tset_mem; + if (sdrt->tWH_min > tset_mem && + (thold_att < sdrt->tWH_min - tset_mem)) + thold_att = sdrt->tWH_min - tset_mem; + if (sdrt->tWHR_min > tset_mem && + (thold_att < sdrt->tWHR_min - tset_mem)) + thold_att = sdrt->tWHR_min - tset_mem; + if ((sdrt->tRC_min > tset_att + twait) && + (thold_att < sdrt->tRC_min - (tset_att + twait))) + thold_att = sdrt->tRC_min - (tset_att + twait); + if ((sdrt->tWC_min > tset_att + twait) && + (thold_att < sdrt->tWC_min - (tset_att + twait))) + thold_att = sdrt->tWC_min - (tset_att + twait); + tims->thold_att = DIV_ROUND_UP(thold_att, hclkp); + if (tims->thold_att == 0) + tims->thold_att = 1; + else if (tims->thold_att > FMC2_PMEM_PATT_TIMING_MASK) + tims->thold_att = FMC2_PMEM_PATT_TIMING_MASK; +} + +static int stm32_fmc2_setup_interface(struct nand_chip *chip, int chipnr, + const struct nand_data_interface *conf) +{ + const struct nand_sdr_timings *sdrt; + + sdrt = nand_get_sdr_timings(conf); + if (IS_ERR(sdrt)) + return PTR_ERR(sdrt); + + if (chipnr == NAND_DATA_IFACE_CHECK_ONLY) + return 0; + + stm32_fmc2_calc_timings(chip, sdrt); + + /* Apply timings */ + stm32_fmc2_timings_init(chip); + + return 0; +} + +/* DMA configuration */ +static int stm32_fmc2_dma_setup(struct stm32_fmc2_nfc *fmc2) +{ + int ret; + + fmc2->dma_tx_ch = dma_request_slave_channel(fmc2->dev, "tx"); + fmc2->dma_rx_ch = dma_request_slave_channel(fmc2->dev, "rx"); + fmc2->dma_ecc_ch = dma_request_slave_channel(fmc2->dev, "ecc"); + + if (!fmc2->dma_tx_ch || !fmc2->dma_rx_ch || !fmc2->dma_ecc_ch) { + dev_warn(fmc2->dev, "DMAs not defined in the device tree, polling mode is used\n"); + return 0; + } + + ret = sg_alloc_table(&fmc2->dma_ecc_sg, FMC2_MAX_SG, GFP_KERNEL); + if (ret) + return ret; + + /* Allocate a buffer to store ECC status registers */ + fmc2->ecc_buf = devm_kzalloc(fmc2->dev, FMC2_MAX_ECC_BUF_LEN, + GFP_KERNEL); + if (!fmc2->ecc_buf) + return -ENOMEM; + + ret = sg_alloc_table(&fmc2->dma_data_sg, FMC2_MAX_SG, GFP_KERNEL); + if (ret) + return ret; + + init_completion(&fmc2->dma_data_complete); + init_completion(&fmc2->dma_ecc_complete); + + return 0; +} + +/* NAND callbacks setup */ +static void stm32_fmc2_nand_callbacks_setup(struct nand_chip *chip) +{ + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + + /* + * Specific callbacks to read/write a page depending on + * the mode (polling/sequencer) and the algo used (Hamming, BCH). + */ + if (fmc2->dma_tx_ch && fmc2->dma_rx_ch && fmc2->dma_ecc_ch) { + /* DMA => use sequencer mode callbacks */ + chip->ecc.correct = stm32_fmc2_sequencer_correct; + chip->ecc.write_page = stm32_fmc2_sequencer_write_page; + chip->ecc.read_page = stm32_fmc2_sequencer_read_page; + chip->ecc.write_page_raw = stm32_fmc2_sequencer_write_page_raw; + chip->ecc.read_page_raw = stm32_fmc2_sequencer_read_page_raw; + } else { + /* No DMA => use polling mode callbacks */ + chip->ecc.hwctl = stm32_fmc2_hwctl; + if (chip->ecc.strength == FMC2_ECC_HAM) { + /* Hamming is used */ + chip->ecc.calculate = stm32_fmc2_ham_calculate; + chip->ecc.correct = stm32_fmc2_ham_correct; + chip->ecc.options |= NAND_ECC_GENERIC_ERASED_CHECK; + } else { + /* BCH is used */ + chip->ecc.calculate = stm32_fmc2_bch_calculate; + chip->ecc.correct = stm32_fmc2_bch_correct; + chip->ecc.read_page = stm32_fmc2_read_page; + } + } + + /* Specific configurations depending on the algo used */ + if (chip->ecc.strength == FMC2_ECC_HAM) + chip->ecc.bytes = chip->options & NAND_BUSWIDTH_16 ? 4 : 3; + else if (chip->ecc.strength == FMC2_ECC_BCH8) + chip->ecc.bytes = chip->options & NAND_BUSWIDTH_16 ? 14 : 13; + else + chip->ecc.bytes = chip->options & NAND_BUSWIDTH_16 ? 8 : 7; +} + +/* FMC2 layout */ +static int stm32_fmc2_nand_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct nand_ecc_ctrl *ecc = &chip->ecc; + + if (section) + return -ERANGE; + + oobregion->length = ecc->total; + oobregion->offset = FMC2_BBM_LEN; + + return 0; +} + +static int stm32_fmc2_nand_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct nand_ecc_ctrl *ecc = &chip->ecc; + + if (section) + return -ERANGE; + + oobregion->length = mtd->oobsize - ecc->total - FMC2_BBM_LEN; + oobregion->offset = ecc->total + FMC2_BBM_LEN; + + return 0; +} + +static const struct mtd_ooblayout_ops stm32_fmc2_nand_ooblayout_ops = { + .ecc = stm32_fmc2_nand_ooblayout_ecc, + .free = stm32_fmc2_nand_ooblayout_free, +}; + +/* FMC2 caps */ +static int stm32_fmc2_calc_ecc_bytes(int step_size, int strength) +{ + /* Hamming */ + if (strength == FMC2_ECC_HAM) + return 4; + + /* BCH8 */ + if (strength == FMC2_ECC_BCH8) + return 14; + + /* BCH4 */ + return 8; +} + +NAND_ECC_CAPS_SINGLE(stm32_fmc2_ecc_caps, stm32_fmc2_calc_ecc_bytes, + FMC2_ECC_STEP_SIZE, + FMC2_ECC_HAM, FMC2_ECC_BCH4, FMC2_ECC_BCH8); + +/* FMC2 controller ops */ +static int stm32_fmc2_attach_chip(struct nand_chip *chip) +{ + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); + struct mtd_info *mtd = nand_to_mtd(chip); + int ret; + + /* + * Only NAND_ECC_HW mode is actually supported + * Hamming => ecc.strength = 1 + * BCH4 => ecc.strength = 4 + * BCH8 => ecc.strength = 8 + * ECC sector size = 512 + */ + if (chip->ecc.mode != NAND_ECC_HW) { + dev_err(fmc2->dev, "nand_ecc_mode is not well defined in the DT\n"); + return -EINVAL; + } + + ret = nand_ecc_choose_conf(chip, &stm32_fmc2_ecc_caps, + mtd->oobsize - FMC2_BBM_LEN); + if (ret) { + dev_err(fmc2->dev, "no valid ECC settings set\n"); + return ret; + } + + if (mtd->writesize / chip->ecc.size > FMC2_MAX_SG) { + dev_err(fmc2->dev, "nand page size is not supported\n"); + return -EINVAL; + } + + if (chip->bbt_options & NAND_BBT_USE_FLASH) + chip->bbt_options |= NAND_BBT_NO_OOB; + + /* NAND callbacks setup */ + stm32_fmc2_nand_callbacks_setup(chip); + + /* Define ECC layout */ + mtd_set_ooblayout(mtd, &stm32_fmc2_nand_ooblayout_ops); + + /* Configure bus width to 16-bit */ + if (chip->options & NAND_BUSWIDTH_16) + stm32_fmc2_set_buswidth_16(fmc2, true); + + return 0; +} + +static const struct nand_controller_ops stm32_fmc2_nand_controller_ops = { + .attach_chip = stm32_fmc2_attach_chip, + .exec_op = stm32_fmc2_exec_op, + .setup_data_interface = stm32_fmc2_setup_interface, +}; + +/* FMC2 probe */ +static int stm32_fmc2_parse_child(struct stm32_fmc2_nfc *fmc2, + struct device_node *dn) +{ + struct stm32_fmc2_nand *nand = &fmc2->nand; + u32 cs; + int ret, i; + + if (!of_get_property(dn, "reg", &nand->ncs)) + return -EINVAL; + + nand->ncs /= sizeof(u32); + if (!nand->ncs) { + dev_err(fmc2->dev, "invalid reg property size\n"); + return -EINVAL; + } + + for (i = 0; i < nand->ncs; i++) { + ret = of_property_read_u32_index(dn, "reg", i, &cs); + if (ret) { + dev_err(fmc2->dev, "could not retrieve reg property: %d\n", + ret); + return ret; + } + + if (cs > FMC2_MAX_CE) { + dev_err(fmc2->dev, "invalid reg value: %d\n", cs); + return -EINVAL; + } + + if (fmc2->cs_assigned & BIT(cs)) { + dev_err(fmc2->dev, "cs already assigned: %d\n", cs); + return -EINVAL; + } + + fmc2->cs_assigned |= BIT(cs); + nand->cs_used[i] = cs; + } + + nand_set_flash_node(&nand->chip, dn); + + return 0; +} + +static int stm32_fmc2_parse_dt(struct stm32_fmc2_nfc *fmc2) +{ + struct device_node *dn = fmc2->dev->of_node; + struct device_node *child; + int nchips = of_get_child_count(dn); + int ret = 0; + + if (!nchips) { + dev_err(fmc2->dev, "NAND chip not defined\n"); + return -EINVAL; + } + + if (nchips > 1) { + dev_err(fmc2->dev, "too many NAND chips defined\n"); + return -EINVAL; + } + + for_each_child_of_node(dn, child) { + ret = stm32_fmc2_parse_child(fmc2, child); + if (ret < 0) { + of_node_put(child); + return ret; + } + } + + return ret; +} + +static int stm32_fmc2_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct reset_control *rstc; + struct stm32_fmc2_nfc *fmc2; + struct stm32_fmc2_nand *nand; + struct resource *res; + struct mtd_info *mtd; + struct nand_chip *chip; + int chip_cs, mem_region, ret, irq; + + fmc2 = devm_kzalloc(dev, sizeof(*fmc2), GFP_KERNEL); + if (!fmc2) + return -ENOMEM; + + fmc2->dev = dev; + nand_controller_init(&fmc2->base); + fmc2->base.ops = &stm32_fmc2_nand_controller_ops; + + ret = stm32_fmc2_parse_dt(fmc2); + if (ret) + return ret; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + fmc2->io_base = devm_ioremap_resource(dev, res); + if (IS_ERR(fmc2->io_base)) + return PTR_ERR(fmc2->io_base); + + fmc2->io_phys_addr = res->start; + + for (chip_cs = 0, mem_region = 1; chip_cs < FMC2_MAX_CE; + chip_cs++, mem_region += 3) { + if (!(fmc2->cs_assigned & BIT(chip_cs))) + continue; + + res = platform_get_resource(pdev, IORESOURCE_MEM, mem_region); + fmc2->data_base[chip_cs] = devm_ioremap_resource(dev, res); + if (IS_ERR(fmc2->data_base[chip_cs])) + return PTR_ERR(fmc2->data_base[chip_cs]); + + fmc2->data_phys_addr[chip_cs] = res->start; + + res = platform_get_resource(pdev, IORESOURCE_MEM, + mem_region + 1); + fmc2->cmd_base[chip_cs] = devm_ioremap_resource(dev, res); + if (IS_ERR(fmc2->cmd_base[chip_cs])) + return PTR_ERR(fmc2->cmd_base[chip_cs]); + + res = platform_get_resource(pdev, IORESOURCE_MEM, + mem_region + 2); + fmc2->addr_base[chip_cs] = devm_ioremap_resource(dev, res); + if (IS_ERR(fmc2->addr_base[chip_cs])) + return PTR_ERR(fmc2->addr_base[chip_cs]); + } + + irq = platform_get_irq(pdev, 0); + ret = devm_request_irq(dev, irq, stm32_fmc2_irq, 0, + dev_name(dev), fmc2); + if (ret) { + dev_err(dev, "failed to request irq\n"); + return ret; + } + + init_completion(&fmc2->complete); + + fmc2->clk = devm_clk_get(dev, NULL); + if (IS_ERR(fmc2->clk)) + return PTR_ERR(fmc2->clk); + + ret = clk_prepare_enable(fmc2->clk); + if (ret) { + dev_err(dev, "can not enable the clock\n"); + return ret; + } + + rstc = devm_reset_control_get(dev, NULL); + if (!IS_ERR(rstc)) { + reset_control_assert(rstc); + reset_control_deassert(rstc); + } + + /* DMA setup */ + ret = stm32_fmc2_dma_setup(fmc2); + if (ret) + return ret; + + /* FMC2 init routine */ + stm32_fmc2_init(fmc2); + + nand = &fmc2->nand; + chip = &nand->chip; + mtd = nand_to_mtd(chip); + mtd->dev.parent = dev; + + chip->controller = &fmc2->base; + chip->options |= NAND_BUSWIDTH_AUTO | NAND_NO_SUBPAGE_WRITE | + NAND_USE_BOUNCE_BUFFER; + + /* Default ECC settings */ + chip->ecc.mode = NAND_ECC_HW; + chip->ecc.size = FMC2_ECC_STEP_SIZE; + chip->ecc.strength = FMC2_ECC_BCH8; + + /* Scan to find existence of the device */ + ret = nand_scan(chip, nand->ncs); + if (ret) + goto err_scan; + + ret = mtd_device_register(mtd, NULL, 0); + if (ret) + goto err_device_register; + + platform_set_drvdata(pdev, fmc2); + + return 0; + +err_device_register: + nand_cleanup(chip); + +err_scan: + if (fmc2->dma_ecc_ch) + dma_release_channel(fmc2->dma_ecc_ch); + if (fmc2->dma_tx_ch) + dma_release_channel(fmc2->dma_tx_ch); + if (fmc2->dma_rx_ch) + dma_release_channel(fmc2->dma_rx_ch); + + sg_free_table(&fmc2->dma_data_sg); + sg_free_table(&fmc2->dma_ecc_sg); + + clk_disable_unprepare(fmc2->clk); + + return ret; +} + +static int stm32_fmc2_remove(struct platform_device *pdev) +{ + struct stm32_fmc2_nfc *fmc2 = platform_get_drvdata(pdev); + struct stm32_fmc2_nand *nand = &fmc2->nand; + + nand_release(&nand->chip); + + if (fmc2->dma_ecc_ch) + dma_release_channel(fmc2->dma_ecc_ch); + if (fmc2->dma_tx_ch) + dma_release_channel(fmc2->dma_tx_ch); + if (fmc2->dma_rx_ch) + dma_release_channel(fmc2->dma_rx_ch); + + sg_free_table(&fmc2->dma_data_sg); + sg_free_table(&fmc2->dma_ecc_sg); + + clk_disable_unprepare(fmc2->clk); + + return 0; +} + +static int __maybe_unused stm32_fmc2_suspend(struct device *dev) +{ + struct stm32_fmc2_nfc *fmc2 = dev_get_drvdata(dev); + + clk_disable_unprepare(fmc2->clk); + + pinctrl_pm_select_sleep_state(dev); + + return 0; +} + +static int __maybe_unused stm32_fmc2_resume(struct device *dev) +{ + struct stm32_fmc2_nfc *fmc2 = dev_get_drvdata(dev); + struct stm32_fmc2_nand *nand = &fmc2->nand; + int chip_cs, ret; + + pinctrl_pm_select_default_state(dev); + + ret = clk_prepare_enable(fmc2->clk); + if (ret) { + dev_err(dev, "can not enable the clock\n"); + return ret; + } + + stm32_fmc2_init(fmc2); + + for (chip_cs = 0; chip_cs < FMC2_MAX_CE; chip_cs++) { + if (!(fmc2->cs_assigned & BIT(chip_cs))) + continue; + + nand_reset(&nand->chip, chip_cs); + } + + return 0; +} + +static SIMPLE_DEV_PM_OPS(stm32_fmc2_pm_ops, stm32_fmc2_suspend, + stm32_fmc2_resume); + +static const struct of_device_id stm32_fmc2_match[] = { + {.compatible = "st,stm32mp15-fmc2"}, + {} +}; +MODULE_DEVICE_TABLE(of, stm32_fmc2_match); + +static struct platform_driver stm32_fmc2_driver = { + .probe = stm32_fmc2_probe, + .remove = stm32_fmc2_remove, + .driver = { + .name = "stm32_fmc2_nand", + .of_match_table = stm32_fmc2_match, + .pm = &stm32_fmc2_pm_ops, + }, +}; +module_platform_driver(stm32_fmc2_driver); + +MODULE_ALIAS("platform:stm32_fmc2_nand"); +MODULE_AUTHOR("Christophe Kerello "); +MODULE_DESCRIPTION("STMicroelectronics STM32 FMC2 nand driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c index e828ee50a201..4282bc477761 100644 --- a/drivers/mtd/nand/raw/sunxi_nand.c +++ b/drivers/mtd/nand/raw/sunxi_nand.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2013 Boris BREZILLON * @@ -10,16 +11,6 @@ * * Copyright (C) 2013 Dmitriy B. * Copyright (C) 2013 Sergey Lapin - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include @@ -163,38 +154,36 @@ #define NFC_MAX_CS 7 -/* - * Chip Select structure: stores information related to NAND Chip Select +/** + * struct sunxi_nand_chip_sel - stores information related to NAND Chip Select * - * @cs: the NAND CS id used to communicate with a NAND Chip - * @rb: the Ready/Busy pin ID. -1 means no R/B pin connected to the - * NFC + * @cs: the NAND CS id used to communicate with a NAND Chip + * @rb: the Ready/Busy pin ID. -1 means no R/B pin connected to the NFC */ struct sunxi_nand_chip_sel { u8 cs; s8 rb; }; -/* - * sunxi HW ECC infos: stores information related to HW ECC support +/** + * struct sunxi_nand_hw_ecc - stores information related to HW ECC support * - * @mode: the sunxi ECC mode field deduced from ECC requirements + * @mode: the sunxi ECC mode field deduced from ECC requirements */ struct sunxi_nand_hw_ecc { int mode; }; -/* - * NAND chip structure: stores NAND chip device related information +/** + * struct sunxi_nand_chip - stores NAND chip device related information * - * @node: used to store NAND chips into a list - * @nand: base NAND chip structure - * @mtd: base MTD structure - * @clk_rate: clk_rate required for this NAND chip - * @timing_cfg TIMING_CFG register value for this NAND chip - * @selected: current active CS - * @nsels: number of CS lines required by the NAND chip - * @sels: array of CS lines descriptions + * @node: used to store NAND chips into a list + * @nand: base NAND chip structure + * @clk_rate: clk_rate required for this NAND chip + * @timing_cfg: TIMING_CFG register value for this NAND chip + * @timing_ctl: TIMING_CTL register value for this NAND chip + * @nsels: number of CS lines required by the NAND chip + * @sels: array of CS lines descriptions */ struct sunxi_nand_chip { struct list_head node; @@ -202,11 +191,6 @@ struct sunxi_nand_chip { unsigned long clk_rate; u32 timing_cfg; u32 timing_ctl; - int selected; - int addr_cycles; - u32 addr[2]; - int cmd_cycles; - u8 cmd[2]; int nsels; struct sunxi_nand_chip_sel sels[0]; }; @@ -216,20 +200,21 @@ static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand) return container_of(nand, struct sunxi_nand_chip, nand); } -/* - * NAND Controller structure: stores sunxi NAND controller information +/** + * struct sunxi_nfc - stores sunxi NAND controller information * - * @controller: base controller structure - * @dev: parent device (used to print error messages) - * @regs: NAND controller registers - * @ahb_clk: NAND Controller AHB clock - * @mod_clk: NAND Controller mod clock - * @assigned_cs: bitmask describing already assigned CS lines - * @clk_rate: NAND controller current clock rate - * @chips: a list containing all the NAND chips attached to - * this NAND controller - * @complete: a completion object used to wait for NAND - * controller events + * @controller: base controller structure + * @dev: parent device (used to print error messages) + * @regs: NAND controller registers + * @ahb_clk: NAND controller AHB clock + * @mod_clk: NAND controller mod clock + * @reset: NAND controller reset line + * @assigned_cs: bitmask describing already assigned CS lines + * @clk_rate: NAND controller current clock rate + * @chips: a list containing all the NAND chips attached to this NAND + * controller + * @complete: a completion object used to wait for NAND controller events + * @dmac: the DMA channel attached to the NAND controller */ struct sunxi_nfc { struct nand_controller controller; @@ -339,13 +324,11 @@ static int sunxi_nfc_rst(struct sunxi_nfc *nfc) return ret; } -static int sunxi_nfc_dma_op_prepare(struct mtd_info *mtd, const void *buf, +static int sunxi_nfc_dma_op_prepare(struct sunxi_nfc *nfc, const void *buf, int chunksize, int nchunks, enum dma_data_direction ddir, struct scatterlist *sg) { - struct nand_chip *nand = mtd_to_nand(mtd); - struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); struct dma_async_tx_descriptor *dmad; enum dma_transfer_direction tdir; dma_cookie_t dmat; @@ -388,38 +371,16 @@ err_unmap_buf: return ret; } -static void sunxi_nfc_dma_op_cleanup(struct mtd_info *mtd, +static void sunxi_nfc_dma_op_cleanup(struct sunxi_nfc *nfc, enum dma_data_direction ddir, struct scatterlist *sg) { - struct nand_chip *nand = mtd_to_nand(mtd); - struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); - dma_unmap_sg(nfc->dev, sg, 1, ddir); writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD, nfc->regs + NFC_REG_CTL); } -static int sunxi_nfc_dev_ready(struct nand_chip *nand) -{ - struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); - struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller); - u32 mask; - - if (sunxi_nand->selected < 0) - return 0; - - if (sunxi_nand->sels[sunxi_nand->selected].rb < 0) { - dev_err(nfc->dev, "cannot check R/B NAND status!\n"); - return 0; - } - - mask = NFC_RB_STATE(sunxi_nand->sels[sunxi_nand->selected].rb); - - return !!(readl(nfc->regs + NFC_REG_ST) & mask); -} - -static void sunxi_nfc_select_chip(struct nand_chip *nand, int chip) +static void sunxi_nfc_select_chip(struct nand_chip *nand, unsigned int cs) { struct mtd_info *mtd = nand_to_mtd(nand); struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); @@ -427,40 +388,27 @@ static void sunxi_nfc_select_chip(struct nand_chip *nand, int chip) struct sunxi_nand_chip_sel *sel; u32 ctl; - if (chip > 0 && chip >= sunxi_nand->nsels) - return; - - if (chip == sunxi_nand->selected) + if (cs > 0 && cs >= sunxi_nand->nsels) return; ctl = readl(nfc->regs + NFC_REG_CTL) & ~(NFC_PAGE_SHIFT_MSK | NFC_CE_SEL_MSK | NFC_RB_SEL_MSK | NFC_EN); - if (chip >= 0) { - sel = &sunxi_nand->sels[chip]; + sel = &sunxi_nand->sels[cs]; + ctl |= NFC_CE_SEL(sel->cs) | NFC_EN | NFC_PAGE_SHIFT(nand->page_shift); + if (sel->rb >= 0) + ctl |= NFC_RB_SEL(sel->rb); - ctl |= NFC_CE_SEL(sel->cs) | NFC_EN | - NFC_PAGE_SHIFT(nand->page_shift); - if (sel->rb < 0) { - nand->legacy.dev_ready = NULL; - } else { - nand->legacy.dev_ready = sunxi_nfc_dev_ready; - ctl |= NFC_RB_SEL(sel->rb); - } - - writel(mtd->writesize, nfc->regs + NFC_REG_SPARE_AREA); + writel(mtd->writesize, nfc->regs + NFC_REG_SPARE_AREA); - if (nfc->clk_rate != sunxi_nand->clk_rate) { - clk_set_rate(nfc->mod_clk, sunxi_nand->clk_rate); - nfc->clk_rate = sunxi_nand->clk_rate; - } + if (nfc->clk_rate != sunxi_nand->clk_rate) { + clk_set_rate(nfc->mod_clk, sunxi_nand->clk_rate); + nfc->clk_rate = sunxi_nand->clk_rate; } writel(sunxi_nand->timing_ctl, nfc->regs + NFC_REG_TIMING_CTL); writel(sunxi_nand->timing_cfg, nfc->regs + NFC_REG_TIMING_CFG); writel(ctl, nfc->regs + NFC_REG_CTL); - - sunxi_nand->selected = chip; } static void sunxi_nfc_read_buf(struct nand_chip *nand, uint8_t *buf, int len) @@ -537,71 +485,6 @@ static void sunxi_nfc_write_buf(struct nand_chip *nand, const uint8_t *buf, } } -static uint8_t sunxi_nfc_read_byte(struct nand_chip *nand) -{ - uint8_t ret = 0; - - sunxi_nfc_read_buf(nand, &ret, 1); - - return ret; -} - -static void sunxi_nfc_cmd_ctrl(struct nand_chip *nand, int dat, - unsigned int ctrl) -{ - struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); - struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller); - int ret; - - if (dat == NAND_CMD_NONE && (ctrl & NAND_NCE) && - !(ctrl & (NAND_CLE | NAND_ALE))) { - u32 cmd = 0; - - if (!sunxi_nand->addr_cycles && !sunxi_nand->cmd_cycles) - return; - - if (sunxi_nand->cmd_cycles--) - cmd |= NFC_SEND_CMD1 | sunxi_nand->cmd[0]; - - if (sunxi_nand->cmd_cycles--) { - cmd |= NFC_SEND_CMD2; - writel(sunxi_nand->cmd[1], - nfc->regs + NFC_REG_RCMD_SET); - } - - sunxi_nand->cmd_cycles = 0; - - if (sunxi_nand->addr_cycles) { - cmd |= NFC_SEND_ADR | - NFC_ADR_NUM(sunxi_nand->addr_cycles); - writel(sunxi_nand->addr[0], - nfc->regs + NFC_REG_ADDR_LOW); - } - - if (sunxi_nand->addr_cycles > 4) - writel(sunxi_nand->addr[1], - nfc->regs + NFC_REG_ADDR_HIGH); - - ret = sunxi_nfc_wait_cmd_fifo_empty(nfc); - if (ret) - return; - - writel(cmd, nfc->regs + NFC_REG_CMD); - sunxi_nand->addr[0] = 0; - sunxi_nand->addr[1] = 0; - sunxi_nand->addr_cycles = 0; - sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0); - } - - if (ctrl & NAND_CLE) { - sunxi_nand->cmd[sunxi_nand->cmd_cycles++] = dat; - } else if (ctrl & NAND_ALE) { - sunxi_nand->addr[sunxi_nand->addr_cycles / 4] |= - dat << ((sunxi_nand->addr_cycles % 4) * 8); - sunxi_nand->addr_cycles++; - } -} - /* These seed values have been extracted from Allwinner's BSP */ static const u16 sunxi_nfc_randomizer_page_seeds[] = { 0x2b75, 0x0bd0, 0x5ca3, 0x62d1, 0x1c93, 0x07e9, 0x2162, 0x3a72, @@ -684,8 +567,10 @@ static u16 sunxi_nfc_randomizer_step(u16 state, int count) return state; } -static u16 sunxi_nfc_randomizer_state(struct mtd_info *mtd, int page, bool ecc) +static u16 sunxi_nfc_randomizer_state(struct nand_chip *nand, int page, + bool ecc) { + struct mtd_info *mtd = nand_to_mtd(nand); const u16 *seeds = sunxi_nfc_randomizer_page_seeds; int mod = mtd_div_by_ws(mtd->erasesize, mtd); @@ -702,10 +587,9 @@ static u16 sunxi_nfc_randomizer_state(struct mtd_info *mtd, int page, bool ecc) return seeds[page % mod]; } -static void sunxi_nfc_randomizer_config(struct mtd_info *mtd, - int page, bool ecc) +static void sunxi_nfc_randomizer_config(struct nand_chip *nand, int page, + bool ecc) { - struct nand_chip *nand = mtd_to_nand(mtd); struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); u32 ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL); u16 state; @@ -714,14 +598,13 @@ static void sunxi_nfc_randomizer_config(struct mtd_info *mtd, return; ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL); - state = sunxi_nfc_randomizer_state(mtd, page, ecc); + state = sunxi_nfc_randomizer_state(nand, page, ecc); ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_SEED_MSK; writel(ecc_ctl | NFC_RANDOM_SEED(state), nfc->regs + NFC_REG_ECC_CTL); } -static void sunxi_nfc_randomizer_enable(struct mtd_info *mtd) +static void sunxi_nfc_randomizer_enable(struct nand_chip *nand) { - struct nand_chip *nand = mtd_to_nand(mtd); struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); if (!(nand->options & NAND_NEED_SCRAMBLING)) @@ -731,9 +614,8 @@ static void sunxi_nfc_randomizer_enable(struct mtd_info *mtd) nfc->regs + NFC_REG_ECC_CTL); } -static void sunxi_nfc_randomizer_disable(struct mtd_info *mtd) +static void sunxi_nfc_randomizer_disable(struct nand_chip *nand) { - struct nand_chip *nand = mtd_to_nand(mtd); struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); if (!(nand->options & NAND_NEED_SCRAMBLING)) @@ -743,36 +625,35 @@ static void sunxi_nfc_randomizer_disable(struct mtd_info *mtd) nfc->regs + NFC_REG_ECC_CTL); } -static void sunxi_nfc_randomize_bbm(struct mtd_info *mtd, int page, u8 *bbm) +static void sunxi_nfc_randomize_bbm(struct nand_chip *nand, int page, u8 *bbm) { - u16 state = sunxi_nfc_randomizer_state(mtd, page, true); + u16 state = sunxi_nfc_randomizer_state(nand, page, true); bbm[0] ^= state; bbm[1] ^= sunxi_nfc_randomizer_step(state, 8); } -static void sunxi_nfc_randomizer_write_buf(struct mtd_info *mtd, +static void sunxi_nfc_randomizer_write_buf(struct nand_chip *nand, const uint8_t *buf, int len, bool ecc, int page) { - sunxi_nfc_randomizer_config(mtd, page, ecc); - sunxi_nfc_randomizer_enable(mtd); - sunxi_nfc_write_buf(mtd_to_nand(mtd), buf, len); - sunxi_nfc_randomizer_disable(mtd); + sunxi_nfc_randomizer_config(nand, page, ecc); + sunxi_nfc_randomizer_enable(nand); + sunxi_nfc_write_buf(nand, buf, len); + sunxi_nfc_randomizer_disable(nand); } -static void sunxi_nfc_randomizer_read_buf(struct mtd_info *mtd, uint8_t *buf, +static void sunxi_nfc_randomizer_read_buf(struct nand_chip *nand, uint8_t *buf, int len, bool ecc, int page) { - sunxi_nfc_randomizer_config(mtd, page, ecc); - sunxi_nfc_randomizer_enable(mtd); - sunxi_nfc_read_buf(mtd_to_nand(mtd), buf, len); - sunxi_nfc_randomizer_disable(mtd); + sunxi_nfc_randomizer_config(nand, page, ecc); + sunxi_nfc_randomizer_enable(nand); + sunxi_nfc_read_buf(nand, buf, len); + sunxi_nfc_randomizer_disable(nand); } -static void sunxi_nfc_hw_ecc_enable(struct mtd_info *mtd) +static void sunxi_nfc_hw_ecc_enable(struct nand_chip *nand) { - struct nand_chip *nand = mtd_to_nand(mtd); struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); struct sunxi_nand_hw_ecc *data = nand->ecc.priv; u32 ecc_ctl; @@ -789,9 +670,8 @@ static void sunxi_nfc_hw_ecc_enable(struct mtd_info *mtd) writel(ecc_ctl, nfc->regs + NFC_REG_ECC_CTL); } -static void sunxi_nfc_hw_ecc_disable(struct mtd_info *mtd) +static void sunxi_nfc_hw_ecc_disable(struct nand_chip *nand) { - struct nand_chip *nand = mtd_to_nand(mtd); struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_ECC_EN, @@ -811,10 +691,9 @@ static inline u32 sunxi_nfc_buf_to_user_data(const u8 *buf) return buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24); } -static void sunxi_nfc_hw_ecc_get_prot_oob_bytes(struct mtd_info *mtd, u8 *oob, +static void sunxi_nfc_hw_ecc_get_prot_oob_bytes(struct nand_chip *nand, u8 *oob, int step, bool bbm, int page) { - struct nand_chip *nand = mtd_to_nand(mtd); struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); sunxi_nfc_user_data_to_buf(readl(nfc->regs + NFC_REG_USER_DATA(step)), @@ -822,21 +701,20 @@ static void sunxi_nfc_hw_ecc_get_prot_oob_bytes(struct mtd_info *mtd, u8 *oob, /* De-randomize the Bad Block Marker. */ if (bbm && (nand->options & NAND_NEED_SCRAMBLING)) - sunxi_nfc_randomize_bbm(mtd, page, oob); + sunxi_nfc_randomize_bbm(nand, page, oob); } -static void sunxi_nfc_hw_ecc_set_prot_oob_bytes(struct mtd_info *mtd, +static void sunxi_nfc_hw_ecc_set_prot_oob_bytes(struct nand_chip *nand, const u8 *oob, int step, bool bbm, int page) { - struct nand_chip *nand = mtd_to_nand(mtd); struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); u8 user_data[4]; /* Randomize the Bad Block Marker. */ if (bbm && (nand->options & NAND_NEED_SCRAMBLING)) { memcpy(user_data, oob, sizeof(user_data)); - sunxi_nfc_randomize_bbm(mtd, page, user_data); + sunxi_nfc_randomize_bbm(nand, page, user_data); oob = user_data; } @@ -844,9 +722,11 @@ static void sunxi_nfc_hw_ecc_set_prot_oob_bytes(struct mtd_info *mtd, nfc->regs + NFC_REG_USER_DATA(step)); } -static void sunxi_nfc_hw_ecc_update_stats(struct mtd_info *mtd, +static void sunxi_nfc_hw_ecc_update_stats(struct nand_chip *nand, unsigned int *max_bitflips, int ret) { + struct mtd_info *mtd = nand_to_mtd(nand); + if (ret < 0) { mtd->ecc_stats.failed++; } else { @@ -855,10 +735,9 @@ static void sunxi_nfc_hw_ecc_update_stats(struct mtd_info *mtd, } } -static int sunxi_nfc_hw_ecc_correct(struct mtd_info *mtd, u8 *data, u8 *oob, +static int sunxi_nfc_hw_ecc_correct(struct nand_chip *nand, u8 *data, u8 *oob, int step, u32 status, bool *erased) { - struct nand_chip *nand = mtd_to_nand(mtd); struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); struct nand_ecc_ctrl *ecc = &nand->ecc; u32 tmp; @@ -892,14 +771,13 @@ static int sunxi_nfc_hw_ecc_correct(struct mtd_info *mtd, u8 *data, u8 *oob, return NFC_ECC_ERR_CNT(step, tmp); } -static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd, +static int sunxi_nfc_hw_ecc_read_chunk(struct nand_chip *nand, u8 *data, int data_off, u8 *oob, int oob_off, int *cur_off, unsigned int *max_bitflips, bool bbm, bool oob_required, int page) { - struct nand_chip *nand = mtd_to_nand(mtd); struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); struct nand_ecc_ctrl *ecc = &nand->ecc; int raw_mode = 0; @@ -909,7 +787,7 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd, if (*cur_off != data_off) nand_change_read_column_op(nand, data_off, NULL, 0, false); - sunxi_nfc_randomizer_read_buf(mtd, NULL, ecc->size, false, page); + sunxi_nfc_randomizer_read_buf(nand, NULL, ecc->size, false, page); if (data_off + ecc->size != oob_off) nand_change_read_column_op(nand, oob_off, NULL, 0, false); @@ -918,18 +796,18 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd, if (ret) return ret; - sunxi_nfc_randomizer_enable(mtd); + sunxi_nfc_randomizer_enable(nand); writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP, nfc->regs + NFC_REG_CMD); ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0); - sunxi_nfc_randomizer_disable(mtd); + sunxi_nfc_randomizer_disable(nand); if (ret) return ret; *cur_off = oob_off + ecc->bytes + 4; - ret = sunxi_nfc_hw_ecc_correct(mtd, data, oob_required ? oob : NULL, 0, + ret = sunxi_nfc_hw_ecc_correct(nand, data, oob_required ? oob : NULL, 0, readl(nfc->regs + NFC_REG_ECC_ST), &erased); if (erased) @@ -961,24 +839,24 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd, if (oob_required) { nand_change_read_column_op(nand, oob_off, NULL, 0, false); - sunxi_nfc_randomizer_read_buf(mtd, oob, ecc->bytes + 4, + sunxi_nfc_randomizer_read_buf(nand, oob, ecc->bytes + 4, true, page); - sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, 0, + sunxi_nfc_hw_ecc_get_prot_oob_bytes(nand, oob, 0, bbm, page); } } - sunxi_nfc_hw_ecc_update_stats(mtd, max_bitflips, ret); + sunxi_nfc_hw_ecc_update_stats(nand, max_bitflips, ret); return raw_mode; } -static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd, +static void sunxi_nfc_hw_ecc_read_extra_oob(struct nand_chip *nand, u8 *oob, int *cur_off, bool randomize, int page) { - struct nand_chip *nand = mtd_to_nand(mtd); + struct mtd_info *mtd = nand_to_mtd(nand); struct nand_ecc_ctrl *ecc = &nand->ecc; int offset = ((ecc->bytes + 4) * ecc->steps); int len = mtd->oobsize - offset; @@ -993,20 +871,20 @@ static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd, if (!randomize) sunxi_nfc_read_buf(nand, oob + offset, len); else - sunxi_nfc_randomizer_read_buf(mtd, oob + offset, len, + sunxi_nfc_randomizer_read_buf(nand, oob + offset, len, false, page); if (cur_off) *cur_off = mtd->oobsize + mtd->writesize; } -static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf, +static int sunxi_nfc_hw_ecc_read_chunks_dma(struct nand_chip *nand, uint8_t *buf, int oob_required, int page, int nchunks) { - struct nand_chip *nand = mtd_to_nand(mtd); bool randomized = nand->options & NAND_NEED_SCRAMBLING; struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); + struct mtd_info *mtd = nand_to_mtd(nand); struct nand_ecc_ctrl *ecc = &nand->ecc; unsigned int max_bitflips = 0; int ret, i, raw_mode = 0; @@ -1017,14 +895,14 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf, if (ret) return ret; - ret = sunxi_nfc_dma_op_prepare(mtd, buf, ecc->size, nchunks, + ret = sunxi_nfc_dma_op_prepare(nfc, buf, ecc->size, nchunks, DMA_FROM_DEVICE, &sg); if (ret) return ret; - sunxi_nfc_hw_ecc_enable(mtd); - sunxi_nfc_randomizer_config(mtd, page, false); - sunxi_nfc_randomizer_enable(mtd); + sunxi_nfc_hw_ecc_enable(nand); + sunxi_nfc_randomizer_config(nand, page, false); + sunxi_nfc_randomizer_enable(nand); writel((NAND_CMD_RNDOUTSTART << 16) | (NAND_CMD_RNDOUT << 8) | NAND_CMD_READSTART, nfc->regs + NFC_REG_RCMD_SET); @@ -1038,10 +916,10 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf, if (ret) dmaengine_terminate_all(nfc->dmac); - sunxi_nfc_randomizer_disable(mtd); - sunxi_nfc_hw_ecc_disable(mtd); + sunxi_nfc_randomizer_disable(nand); + sunxi_nfc_hw_ecc_disable(nand); - sunxi_nfc_dma_op_cleanup(mtd, DMA_FROM_DEVICE, &sg); + sunxi_nfc_dma_op_cleanup(nfc, DMA_FROM_DEVICE, &sg); if (ret) return ret; @@ -1055,7 +933,7 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf, u8 *oob = nand->oob_poi + oob_off; bool erased; - ret = sunxi_nfc_hw_ecc_correct(mtd, randomized ? data : NULL, + ret = sunxi_nfc_hw_ecc_correct(nand, randomized ? data : NULL, oob_required ? oob : NULL, i, status, &erased); @@ -1069,14 +947,14 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf, mtd->writesize + oob_off, oob, ecc->bytes + 4, false); - sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, i, + sunxi_nfc_hw_ecc_get_prot_oob_bytes(nand, oob, i, !i, page); } if (erased) raw_mode = 1; - sunxi_nfc_hw_ecc_update_stats(mtd, &max_bitflips, ret); + sunxi_nfc_hw_ecc_update_stats(nand, &max_bitflips, ret); } if (status & NFC_ECC_ERR_MSK) { @@ -1111,25 +989,24 @@ static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf, if (ret >= 0) raw_mode = 1; - sunxi_nfc_hw_ecc_update_stats(mtd, &max_bitflips, ret); + sunxi_nfc_hw_ecc_update_stats(nand, &max_bitflips, ret); } } if (oob_required) - sunxi_nfc_hw_ecc_read_extra_oob(mtd, nand->oob_poi, + sunxi_nfc_hw_ecc_read_extra_oob(nand, nand->oob_poi, NULL, !raw_mode, page); return max_bitflips; } -static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd, +static int sunxi_nfc_hw_ecc_write_chunk(struct nand_chip *nand, const u8 *data, int data_off, const u8 *oob, int oob_off, int *cur_off, bool bbm, int page) { - struct nand_chip *nand = mtd_to_nand(mtd); struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); struct nand_ecc_ctrl *ecc = &nand->ecc; int ret; @@ -1137,7 +1014,7 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd, if (data_off != *cur_off) nand_change_write_column_op(nand, data_off, NULL, 0, false); - sunxi_nfc_randomizer_write_buf(mtd, data, ecc->size, false, page); + sunxi_nfc_randomizer_write_buf(nand, data, ecc->size, false, page); if (data_off + ecc->size != oob_off) nand_change_write_column_op(nand, oob_off, NULL, 0, false); @@ -1146,15 +1023,15 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd, if (ret) return ret; - sunxi_nfc_randomizer_enable(mtd); - sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, 0, bbm, page); + sunxi_nfc_randomizer_enable(nand); + sunxi_nfc_hw_ecc_set_prot_oob_bytes(nand, oob, 0, bbm, page); writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ACCESS_DIR | NFC_ECC_OP, nfc->regs + NFC_REG_CMD); ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, false, 0); - sunxi_nfc_randomizer_disable(mtd); + sunxi_nfc_randomizer_disable(nand); if (ret) return ret; @@ -1163,11 +1040,11 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd, return 0; } -static void sunxi_nfc_hw_ecc_write_extra_oob(struct mtd_info *mtd, +static void sunxi_nfc_hw_ecc_write_extra_oob(struct nand_chip *nand, u8 *oob, int *cur_off, int page) { - struct nand_chip *nand = mtd_to_nand(mtd); + struct mtd_info *mtd = nand_to_mtd(nand); struct nand_ecc_ctrl *ecc = &nand->ecc; int offset = ((ecc->bytes + 4) * ecc->steps); int len = mtd->oobsize - offset; @@ -1179,32 +1056,34 @@ static void sunxi_nfc_hw_ecc_write_extra_oob(struct mtd_info *mtd, nand_change_write_column_op(nand, offset + mtd->writesize, NULL, 0, false); - sunxi_nfc_randomizer_write_buf(mtd, oob + offset, len, false, page); + sunxi_nfc_randomizer_write_buf(nand, oob + offset, len, false, page); if (cur_off) *cur_off = mtd->oobsize + mtd->writesize; } -static int sunxi_nfc_hw_ecc_read_page(struct nand_chip *chip, uint8_t *buf, +static int sunxi_nfc_hw_ecc_read_page(struct nand_chip *nand, uint8_t *buf, int oob_required, int page) { - struct mtd_info *mtd = nand_to_mtd(chip); - struct nand_ecc_ctrl *ecc = &chip->ecc; + struct mtd_info *mtd = nand_to_mtd(nand); + struct nand_ecc_ctrl *ecc = &nand->ecc; unsigned int max_bitflips = 0; int ret, i, cur_off = 0; bool raw_mode = false; - nand_read_page_op(chip, page, 0, NULL, 0); + sunxi_nfc_select_chip(nand, nand->cur_cs); + + nand_read_page_op(nand, page, 0, NULL, 0); - sunxi_nfc_hw_ecc_enable(mtd); + sunxi_nfc_hw_ecc_enable(nand); for (i = 0; i < ecc->steps; i++) { int data_off = i * ecc->size; int oob_off = i * (ecc->bytes + 4); u8 *data = buf + data_off; - u8 *oob = chip->oob_poi + oob_off; + u8 *oob = nand->oob_poi + oob_off; - ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob, + ret = sunxi_nfc_hw_ecc_read_chunk(nand, data, data_off, oob, oob_off + mtd->writesize, &cur_off, &max_bitflips, !i, oob_required, page); @@ -1215,52 +1094,55 @@ static int sunxi_nfc_hw_ecc_read_page(struct nand_chip *chip, uint8_t *buf, } if (oob_required) - sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off, + sunxi_nfc_hw_ecc_read_extra_oob(nand, nand->oob_poi, &cur_off, !raw_mode, page); - sunxi_nfc_hw_ecc_disable(mtd); + sunxi_nfc_hw_ecc_disable(nand); return max_bitflips; } -static int sunxi_nfc_hw_ecc_read_page_dma(struct nand_chip *chip, u8 *buf, +static int sunxi_nfc_hw_ecc_read_page_dma(struct nand_chip *nand, u8 *buf, int oob_required, int page) { - struct mtd_info *mtd = nand_to_mtd(chip); int ret; - nand_read_page_op(chip, page, 0, NULL, 0); + sunxi_nfc_select_chip(nand, nand->cur_cs); + + nand_read_page_op(nand, page, 0, NULL, 0); - ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, oob_required, page, - chip->ecc.steps); + ret = sunxi_nfc_hw_ecc_read_chunks_dma(nand, buf, oob_required, page, + nand->ecc.steps); if (ret >= 0) return ret; /* Fallback to PIO mode */ - return sunxi_nfc_hw_ecc_read_page(chip, buf, oob_required, page); + return sunxi_nfc_hw_ecc_read_page(nand, buf, oob_required, page); } -static int sunxi_nfc_hw_ecc_read_subpage(struct nand_chip *chip, +static int sunxi_nfc_hw_ecc_read_subpage(struct nand_chip *nand, u32 data_offs, u32 readlen, u8 *bufpoi, int page) { - struct mtd_info *mtd = nand_to_mtd(chip); - struct nand_ecc_ctrl *ecc = &chip->ecc; + struct mtd_info *mtd = nand_to_mtd(nand); + struct nand_ecc_ctrl *ecc = &nand->ecc; int ret, i, cur_off = 0; unsigned int max_bitflips = 0; - nand_read_page_op(chip, page, 0, NULL, 0); + sunxi_nfc_select_chip(nand, nand->cur_cs); + + nand_read_page_op(nand, page, 0, NULL, 0); - sunxi_nfc_hw_ecc_enable(mtd); + sunxi_nfc_hw_ecc_enable(nand); for (i = data_offs / ecc->size; i < DIV_ROUND_UP(data_offs + readlen, ecc->size); i++) { int data_off = i * ecc->size; int oob_off = i * (ecc->bytes + 4); u8 *data = bufpoi + data_off; - u8 *oob = chip->oob_poi + oob_off; + u8 *oob = nand->oob_poi + oob_off; - ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, + ret = sunxi_nfc_hw_ecc_read_chunk(nand, data, data_off, oob, oob_off + mtd->writesize, &cur_off, &max_bitflips, !i, @@ -1269,113 +1151,118 @@ static int sunxi_nfc_hw_ecc_read_subpage(struct nand_chip *chip, return ret; } - sunxi_nfc_hw_ecc_disable(mtd); + sunxi_nfc_hw_ecc_disable(nand); return max_bitflips; } -static int sunxi_nfc_hw_ecc_read_subpage_dma(struct nand_chip *chip, +static int sunxi_nfc_hw_ecc_read_subpage_dma(struct nand_chip *nand, u32 data_offs, u32 readlen, u8 *buf, int page) { - struct mtd_info *mtd = nand_to_mtd(chip); - int nchunks = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size); + int nchunks = DIV_ROUND_UP(data_offs + readlen, nand->ecc.size); int ret; - nand_read_page_op(chip, page, 0, NULL, 0); + sunxi_nfc_select_chip(nand, nand->cur_cs); + + nand_read_page_op(nand, page, 0, NULL, 0); - ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, false, page, nchunks); + ret = sunxi_nfc_hw_ecc_read_chunks_dma(nand, buf, false, page, nchunks); if (ret >= 0) return ret; /* Fallback to PIO mode */ - return sunxi_nfc_hw_ecc_read_subpage(chip, data_offs, readlen, + return sunxi_nfc_hw_ecc_read_subpage(nand, data_offs, readlen, buf, page); } -static int sunxi_nfc_hw_ecc_write_page(struct nand_chip *chip, +static int sunxi_nfc_hw_ecc_write_page(struct nand_chip *nand, const uint8_t *buf, int oob_required, int page) { - struct mtd_info *mtd = nand_to_mtd(chip); - struct nand_ecc_ctrl *ecc = &chip->ecc; + struct mtd_info *mtd = nand_to_mtd(nand); + struct nand_ecc_ctrl *ecc = &nand->ecc; int ret, i, cur_off = 0; - nand_prog_page_begin_op(chip, page, 0, NULL, 0); + sunxi_nfc_select_chip(nand, nand->cur_cs); + + nand_prog_page_begin_op(nand, page, 0, NULL, 0); - sunxi_nfc_hw_ecc_enable(mtd); + sunxi_nfc_hw_ecc_enable(nand); for (i = 0; i < ecc->steps; i++) { int data_off = i * ecc->size; int oob_off = i * (ecc->bytes + 4); const u8 *data = buf + data_off; - const u8 *oob = chip->oob_poi + oob_off; + const u8 *oob = nand->oob_poi + oob_off; - ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, oob, + ret = sunxi_nfc_hw_ecc_write_chunk(nand, data, data_off, oob, oob_off + mtd->writesize, &cur_off, !i, page); if (ret) return ret; } - if (oob_required || (chip->options & NAND_NEED_SCRAMBLING)) - sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi, + if (oob_required || (nand->options & NAND_NEED_SCRAMBLING)) + sunxi_nfc_hw_ecc_write_extra_oob(nand, nand->oob_poi, &cur_off, page); - sunxi_nfc_hw_ecc_disable(mtd); + sunxi_nfc_hw_ecc_disable(nand); - return nand_prog_page_end_op(chip); + return nand_prog_page_end_op(nand); } -static int sunxi_nfc_hw_ecc_write_subpage(struct nand_chip *chip, +static int sunxi_nfc_hw_ecc_write_subpage(struct nand_chip *nand, u32 data_offs, u32 data_len, const u8 *buf, int oob_required, int page) { - struct mtd_info *mtd = nand_to_mtd(chip); - struct nand_ecc_ctrl *ecc = &chip->ecc; + struct mtd_info *mtd = nand_to_mtd(nand); + struct nand_ecc_ctrl *ecc = &nand->ecc; int ret, i, cur_off = 0; - nand_prog_page_begin_op(chip, page, 0, NULL, 0); + sunxi_nfc_select_chip(nand, nand->cur_cs); - sunxi_nfc_hw_ecc_enable(mtd); + nand_prog_page_begin_op(nand, page, 0, NULL, 0); + + sunxi_nfc_hw_ecc_enable(nand); for (i = data_offs / ecc->size; i < DIV_ROUND_UP(data_offs + data_len, ecc->size); i++) { int data_off = i * ecc->size; int oob_off = i * (ecc->bytes + 4); const u8 *data = buf + data_off; - const u8 *oob = chip->oob_poi + oob_off; + const u8 *oob = nand->oob_poi + oob_off; - ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, oob, + ret = sunxi_nfc_hw_ecc_write_chunk(nand, data, data_off, oob, oob_off + mtd->writesize, &cur_off, !i, page); if (ret) return ret; } - sunxi_nfc_hw_ecc_disable(mtd); + sunxi_nfc_hw_ecc_disable(nand); - return nand_prog_page_end_op(chip); + return nand_prog_page_end_op(nand); } -static int sunxi_nfc_hw_ecc_write_page_dma(struct nand_chip *chip, +static int sunxi_nfc_hw_ecc_write_page_dma(struct nand_chip *nand, const u8 *buf, int oob_required, int page) { - struct mtd_info *mtd = nand_to_mtd(chip); - struct nand_chip *nand = mtd_to_nand(mtd); struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); struct nand_ecc_ctrl *ecc = &nand->ecc; struct scatterlist sg; int ret, i; + sunxi_nfc_select_chip(nand, nand->cur_cs); + ret = sunxi_nfc_wait_cmd_fifo_empty(nfc); if (ret) return ret; - ret = sunxi_nfc_dma_op_prepare(mtd, buf, ecc->size, ecc->steps, + ret = sunxi_nfc_dma_op_prepare(nfc, buf, ecc->size, ecc->steps, DMA_TO_DEVICE, &sg); if (ret) goto pio_fallback; @@ -1383,14 +1270,14 @@ static int sunxi_nfc_hw_ecc_write_page_dma(struct nand_chip *chip, for (i = 0; i < ecc->steps; i++) { const u8 *oob = nand->oob_poi + (i * (ecc->bytes + 4)); - sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, i, !i, page); + sunxi_nfc_hw_ecc_set_prot_oob_bytes(nand, oob, i, !i, page); } - nand_prog_page_begin_op(chip, page, 0, NULL, 0); + nand_prog_page_begin_op(nand, page, 0, NULL, 0); - sunxi_nfc_hw_ecc_enable(mtd); - sunxi_nfc_randomizer_config(mtd, page, false); - sunxi_nfc_randomizer_enable(mtd); + sunxi_nfc_hw_ecc_enable(nand); + sunxi_nfc_randomizer_config(nand, page, false); + sunxi_nfc_randomizer_enable(nand); writel((NAND_CMD_RNDIN << 8) | NAND_CMD_PAGEPROG, nfc->regs + NFC_REG_WCMD_SET); @@ -1405,46 +1292,46 @@ static int sunxi_nfc_hw_ecc_write_page_dma(struct nand_chip *chip, if (ret) dmaengine_terminate_all(nfc->dmac); - sunxi_nfc_randomizer_disable(mtd); - sunxi_nfc_hw_ecc_disable(mtd); + sunxi_nfc_randomizer_disable(nand); + sunxi_nfc_hw_ecc_disable(nand); - sunxi_nfc_dma_op_cleanup(mtd, DMA_TO_DEVICE, &sg); + sunxi_nfc_dma_op_cleanup(nfc, DMA_TO_DEVICE, &sg); if (ret) return ret; - if (oob_required || (chip->options & NAND_NEED_SCRAMBLING)) + if (oob_required || (nand->options & NAND_NEED_SCRAMBLING)) /* TODO: use DMA to transfer extra OOB bytes ? */ - sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi, + sunxi_nfc_hw_ecc_write_extra_oob(nand, nand->oob_poi, NULL, page); - return nand_prog_page_end_op(chip); + return nand_prog_page_end_op(nand); pio_fallback: - return sunxi_nfc_hw_ecc_write_page(chip, buf, oob_required, page); + return sunxi_nfc_hw_ecc_write_page(nand, buf, oob_required, page); } -static int sunxi_nfc_hw_ecc_read_oob(struct nand_chip *chip, int page) +static int sunxi_nfc_hw_ecc_read_oob(struct nand_chip *nand, int page) { - chip->pagebuf = -1; + nand->pagebuf = -1; - return chip->ecc.read_page(chip, chip->data_buf, 1, page); + return nand->ecc.read_page(nand, nand->data_buf, 1, page); } -static int sunxi_nfc_hw_ecc_write_oob(struct nand_chip *chip, int page) +static int sunxi_nfc_hw_ecc_write_oob(struct nand_chip *nand, int page) { - struct mtd_info *mtd = nand_to_mtd(chip); + struct mtd_info *mtd = nand_to_mtd(nand); int ret; - chip->pagebuf = -1; + nand->pagebuf = -1; - memset(chip->data_buf, 0xff, mtd->writesize); - ret = chip->ecc.write_page(chip, chip->data_buf, 1, page); + memset(nand->data_buf, 0xff, mtd->writesize); + ret = nand->ecc.write_page(nand, nand->data_buf, 1, page); if (ret) return ret; /* Send command to program the OOB data */ - return nand_prog_page_end_op(chip); + return nand_prog_page_end_op(nand); } static const s32 tWB_lut[] = {6, 12, 16, 20}; @@ -1471,8 +1358,8 @@ static int _sunxi_nand_lookup_timing(const s32 *lut, int lut_size, u32 duration, static int sunxi_nfc_setup_data_interface(struct nand_chip *nand, int csline, const struct nand_data_interface *conf) { - struct sunxi_nand_chip *chip = to_sunxi_nand(nand); - struct sunxi_nfc *nfc = to_sunxi_nfc(chip->nand.controller); + struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); + struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller); const struct nand_sdr_timings *timings; u32 min_clk_period = 0; s32 tWB, tADL, tWHR, tRHW, tCAD; @@ -1555,6 +1442,20 @@ static int sunxi_nfc_setup_data_interface(struct nand_chip *nand, int csline, if (timings->tRHW_min > (min_clk_period * 20)) min_clk_period = DIV_ROUND_UP(timings->tRHW_min, 20); + /* + * In non-EDO, tREA should be less than tRP to guarantee that the + * controller does not sample the IO lines too early. Unfortunately, + * the sunxi NAND controller does not allow us to have different + * values for tRP and tREH (tRP = tREH = tRW / 2). + * + * We have 2 options to overcome this limitation: + * + * 1/ Extend tRC to fulfil the tREA <= tRC / 2 constraint + * 2/ Use EDO mode (only works if timings->tRLOH > 0) + */ + if (timings->tREA_max > min_clk_period && !timings->tRLOH_min) + min_clk_period = timings->tREA_max; + tWB = sunxi_nand_lookup_timing(tWB_lut, timings->tWB_max, min_clk_period); if (tWB < 0) { @@ -1591,7 +1492,7 @@ static int sunxi_nfc_setup_data_interface(struct nand_chip *nand, int csline, tCAD = 0x7; /* TODO: A83 has some more bits for CDQSS, CS, CLHZ, CCS, WC */ - chip->timing_cfg = NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD); + sunxi_nand->timing_cfg = NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD); /* Convert min_clk_period from picoseconds to nanoseconds */ min_clk_period = DIV_ROUND_UP(min_clk_period, 1000); @@ -1602,21 +1503,24 @@ static int sunxi_nfc_setup_data_interface(struct nand_chip *nand, int csline, * This new formula was verified with a scope and validated by * Allwinner engineers. */ - chip->clk_rate = NSEC_PER_SEC / min_clk_period; - real_clk_rate = clk_round_rate(nfc->mod_clk, chip->clk_rate); + sunxi_nand->clk_rate = NSEC_PER_SEC / min_clk_period; + real_clk_rate = clk_round_rate(nfc->mod_clk, sunxi_nand->clk_rate); if (real_clk_rate <= 0) { - dev_err(nfc->dev, "Unable to round clk %lu\n", chip->clk_rate); + dev_err(nfc->dev, "Unable to round clk %lu\n", + sunxi_nand->clk_rate); return -EINVAL; } + sunxi_nand->timing_ctl = 0; + /* * ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data * output cycle timings shall be used if the host drives tRC less than - * 30 ns. + * 30 ns. We should also use EDO mode if tREA is bigger than tRP. */ min_clk_period = NSEC_PER_SEC / real_clk_rate; - chip->timing_ctl = ((min_clk_period * 2) < 30) ? - NFC_TIMING_CTL_EDO : 0; + if (min_clk_period * 2 < 30 || min_clk_period * 1000 < timings->tREA_max) + sunxi_nand->timing_ctl = NFC_TIMING_CTL_EDO; return 0; } @@ -1677,14 +1581,13 @@ static void sunxi_nand_hw_ecc_ctrl_cleanup(struct nand_ecc_ctrl *ecc) kfree(ecc->priv); } -static int sunxi_nand_hw_ecc_ctrl_init(struct mtd_info *mtd, +static int sunxi_nand_hw_ecc_ctrl_init(struct nand_chip *nand, struct nand_ecc_ctrl *ecc, struct device_node *np) { static const u8 strengths[] = { 16, 24, 28, 32, 40, 48, 56, 60, 64 }; - struct nand_chip *nand = mtd_to_nand(mtd); - struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); - struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller); + struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); + struct mtd_info *mtd = nand_to_mtd(nand); struct sunxi_nand_hw_ecc *data; int nsectors; int ret; @@ -1808,7 +1711,6 @@ static void sunxi_nand_ecc_cleanup(struct nand_ecc_ctrl *ecc) static int sunxi_nand_attach_chip(struct nand_chip *nand) { - struct mtd_info *mtd = nand_to_mtd(nand); struct nand_ecc_ctrl *ecc = &nand->ecc; struct device_node *np = nand_get_flash_node(nand); int ret; @@ -1831,7 +1733,7 @@ static int sunxi_nand_attach_chip(struct nand_chip *nand) switch (ecc->mode) { case NAND_ECC_HW: - ret = sunxi_nand_hw_ecc_ctrl_init(mtd, ecc, np); + ret = sunxi_nand_hw_ecc_ctrl_init(nand, ecc, np); if (ret) return ret; break; @@ -1845,15 +1747,165 @@ static int sunxi_nand_attach_chip(struct nand_chip *nand) return 0; } +static int sunxi_nfc_exec_subop(struct nand_chip *nand, + const struct nand_subop *subop) +{ + struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); + u32 cmd = 0, extcmd = 0, cnt = 0, addrs[2] = { }; + unsigned int i, j, remaining, start; + void *inbuf = NULL; + int ret; + + for (i = 0; i < subop->ninstrs; i++) { + const struct nand_op_instr *instr = &subop->instrs[i]; + + switch (instr->type) { + case NAND_OP_CMD_INSTR: + if (cmd & NFC_SEND_CMD1) { + if (WARN_ON(cmd & NFC_SEND_CMD2)) + return -EINVAL; + + cmd |= NFC_SEND_CMD2; + extcmd |= instr->ctx.cmd.opcode; + } else { + cmd |= NFC_SEND_CMD1 | + NFC_CMD(instr->ctx.cmd.opcode); + } + break; + + case NAND_OP_ADDR_INSTR: + remaining = nand_subop_get_num_addr_cyc(subop, i); + start = nand_subop_get_addr_start_off(subop, i); + for (j = 0; j < 8 && j + start < remaining; j++) { + u32 addr = instr->ctx.addr.addrs[j + start]; + + addrs[j / 4] |= addr << (j % 4) * 8; + } + + if (j) + cmd |= NFC_SEND_ADR | NFC_ADR_NUM(j); + + break; + + case NAND_OP_DATA_IN_INSTR: + case NAND_OP_DATA_OUT_INSTR: + start = nand_subop_get_data_start_off(subop, i); + remaining = nand_subop_get_data_len(subop, i); + cnt = min_t(u32, remaining, NFC_SRAM_SIZE); + cmd |= NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD; + + if (instr->type == NAND_OP_DATA_OUT_INSTR) { + cmd |= NFC_ACCESS_DIR; + memcpy_toio(nfc->regs + NFC_RAM0_BASE, + instr->ctx.data.buf.out + start, + cnt); + } else { + inbuf = instr->ctx.data.buf.in + start; + } + + break; + + case NAND_OP_WAITRDY_INSTR: + cmd |= NFC_WAIT_FLAG; + break; + } + } + + ret = sunxi_nfc_wait_cmd_fifo_empty(nfc); + if (ret) + return ret; + + if (cmd & NFC_SEND_ADR) { + writel(addrs[0], nfc->regs + NFC_REG_ADDR_LOW); + writel(addrs[1], nfc->regs + NFC_REG_ADDR_HIGH); + } + + if (cmd & NFC_SEND_CMD2) + writel(extcmd, + nfc->regs + + (cmd & NFC_ACCESS_DIR ? + NFC_REG_WCMD_SET : NFC_REG_RCMD_SET)); + + if (cmd & NFC_DATA_TRANS) + writel(cnt, nfc->regs + NFC_REG_CNT); + + writel(cmd, nfc->regs + NFC_REG_CMD); + + ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, + !(cmd & NFC_WAIT_FLAG) && cnt < 64, + 0); + if (ret) + return ret; + + if (inbuf) + memcpy_fromio(inbuf, nfc->regs + NFC_RAM0_BASE, cnt); + + return 0; +} + +static int sunxi_nfc_soft_waitrdy(struct nand_chip *nand, + const struct nand_subop *subop) +{ + return nand_soft_waitrdy(nand, + subop->instrs[0].ctx.waitrdy.timeout_ms); +} + +static const struct nand_op_parser sunxi_nfc_op_parser = NAND_OP_PARSER( + NAND_OP_PARSER_PATTERN(sunxi_nfc_exec_subop, + NAND_OP_PARSER_PAT_CMD_ELEM(true), + NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8), + NAND_OP_PARSER_PAT_CMD_ELEM(true), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(true), + NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 1024)), + NAND_OP_PARSER_PATTERN(sunxi_nfc_exec_subop, + NAND_OP_PARSER_PAT_CMD_ELEM(true), + NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8), + NAND_OP_PARSER_PAT_DATA_OUT_ELEM(true, 1024), + NAND_OP_PARSER_PAT_CMD_ELEM(true), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)), +); + +static const struct nand_op_parser sunxi_nfc_norb_op_parser = NAND_OP_PARSER( + NAND_OP_PARSER_PATTERN(sunxi_nfc_exec_subop, + NAND_OP_PARSER_PAT_CMD_ELEM(true), + NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8), + NAND_OP_PARSER_PAT_CMD_ELEM(true), + NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 1024)), + NAND_OP_PARSER_PATTERN(sunxi_nfc_exec_subop, + NAND_OP_PARSER_PAT_CMD_ELEM(true), + NAND_OP_PARSER_PAT_ADDR_ELEM(true, 8), + NAND_OP_PARSER_PAT_DATA_OUT_ELEM(true, 1024), + NAND_OP_PARSER_PAT_CMD_ELEM(true)), + NAND_OP_PARSER_PATTERN(sunxi_nfc_soft_waitrdy, + NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), +); + +static int sunxi_nfc_exec_op(struct nand_chip *nand, + const struct nand_operation *op, bool check_only) +{ + struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); + const struct nand_op_parser *parser; + + sunxi_nfc_select_chip(nand, op->cs); + + if (sunxi_nand->sels[op->cs].rb >= 0) + parser = &sunxi_nfc_op_parser; + else + parser = &sunxi_nfc_norb_op_parser; + + return nand_op_parser_exec_op(nand, parser, op, check_only); +} + static const struct nand_controller_ops sunxi_nand_controller_ops = { .attach_chip = sunxi_nand_attach_chip, .setup_data_interface = sunxi_nfc_setup_data_interface, + .exec_op = sunxi_nfc_exec_op, }; static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc, struct device_node *np) { - struct sunxi_nand_chip *chip; + struct sunxi_nand_chip *sunxi_nand; struct mtd_info *mtd; struct nand_chip *nand; int nsels; @@ -1870,17 +1922,14 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc, return -EINVAL; } - chip = devm_kzalloc(dev, - sizeof(*chip) + - (nsels * sizeof(struct sunxi_nand_chip_sel)), - GFP_KERNEL); - if (!chip) { + sunxi_nand = devm_kzalloc(dev, struct_size(sunxi_nand, sels, nsels), + GFP_KERNEL); + if (!sunxi_nand) { dev_err(dev, "could not allocate chip\n"); return -ENOMEM; } - chip->nsels = nsels; - chip->selected = -1; + sunxi_nand->nsels = nsels; for (i = 0; i < nsels; i++) { ret = of_property_read_u32_index(np, "reg", i, &tmp); @@ -1902,18 +1951,17 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc, return -EINVAL; } - chip->sels[i].cs = tmp; + sunxi_nand->sels[i].cs = tmp; if (!of_property_read_u32_index(np, "allwinner,rb", i, &tmp) && tmp < 2) - chip->sels[i].rb = tmp; + sunxi_nand->sels[i].rb = tmp; else - chip->sels[i].rb = -1; + sunxi_nand->sels[i].rb = -1; } - nand = &chip->nand; + nand = &sunxi_nand->nand; /* Default tR value specified in the ONFI spec (chapter 4.15.1) */ - nand->legacy.chip_delay = 200; nand->controller = &nfc->controller; nand->controller->ops = &sunxi_nand_controller_ops; @@ -1923,11 +1971,6 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc, */ nand->ecc.mode = NAND_ECC_HW; nand_set_flash_node(nand, np); - nand->legacy.select_chip = sunxi_nfc_select_chip; - nand->legacy.cmd_ctrl = sunxi_nfc_cmd_ctrl; - nand->legacy.read_buf = sunxi_nfc_read_buf; - nand->legacy.write_buf = sunxi_nfc_write_buf; - nand->legacy.read_byte = sunxi_nfc_read_byte; mtd = nand_to_mtd(nand); mtd->dev.parent = dev; @@ -1943,7 +1986,7 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc, return ret; } - list_add_tail(&chip->node, &nfc->chips); + list_add_tail(&sunxi_nand->node, &nfc->chips); return 0; } @@ -1973,14 +2016,15 @@ static int sunxi_nand_chips_init(struct device *dev, struct sunxi_nfc *nfc) static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc) { - struct sunxi_nand_chip *chip; + struct sunxi_nand_chip *sunxi_nand; while (!list_empty(&nfc->chips)) { - chip = list_first_entry(&nfc->chips, struct sunxi_nand_chip, - node); - nand_release(&chip->nand); - sunxi_nand_ecc_cleanup(&chip->nand.ecc); - list_del(&chip->node); + sunxi_nand = list_first_entry(&nfc->chips, + struct sunxi_nand_chip, + node); + nand_release(&sunxi_nand->nand); + sunxi_nand_ecc_cleanup(&sunxi_nand->nand.ecc); + list_del(&sunxi_nand->node); } } @@ -2124,7 +2168,7 @@ static struct platform_driver sunxi_nfc_driver = { }; module_platform_driver(sunxi_nfc_driver); -MODULE_LICENSE("GPL v2"); +MODULE_LICENSE("GPL"); MODULE_AUTHOR("Boris BREZILLON"); MODULE_DESCRIPTION("Allwinner NAND Flash Controller driver"); MODULE_ALIAS("platform:sunxi_nand"); diff --git a/drivers/mtd/nand/raw/tmio_nand.c b/drivers/mtd/nand/raw/tmio_nand.c index f3b59e649b7d..db030f1701ee 100644 --- a/drivers/mtd/nand/raw/tmio_nand.c +++ b/drivers/mtd/nand/raw/tmio_nand.c @@ -104,6 +104,7 @@ struct tmio_nand { struct nand_chip chip; + struct completion comp; struct platform_device *dev; @@ -168,15 +169,11 @@ static int tmio_nand_dev_ready(struct nand_chip *chip) static irqreturn_t tmio_irq(int irq, void *__tmio) { struct tmio_nand *tmio = __tmio; - struct nand_chip *nand_chip = &tmio->chip; /* disable RDYREQ interrupt */ tmio_iowrite8(0x00, tmio->fcr + FCR_IMR); + complete(&tmio->comp); - if (unlikely(!waitqueue_active(&nand_chip->controller->wq))) - dev_warn(&tmio->dev->dev, "spurious interrupt\n"); - - wake_up(&nand_chip->controller->wq); return IRQ_HANDLED; } @@ -193,18 +190,18 @@ static int tmio_nand_wait(struct nand_chip *nand_chip) u8 status; /* enable RDYREQ interrupt */ + tmio_iowrite8(0x0f, tmio->fcr + FCR_ISR); + reinit_completion(&tmio->comp); tmio_iowrite8(0x81, tmio->fcr + FCR_IMR); - timeout = wait_event_timeout(nand_chip->controller->wq, - tmio_nand_dev_ready(nand_chip), - msecs_to_jiffies(nand_chip->state == FL_ERASING ? 400 : 20)); + timeout = 400; + timeout = wait_for_completion_timeout(&tmio->comp, + msecs_to_jiffies(timeout)); if (unlikely(!tmio_nand_dev_ready(nand_chip))) { tmio_iowrite8(0x00, tmio->fcr + FCR_IMR); - dev_warn(&tmio->dev->dev, "still busy with %s after %d ms\n", - nand_chip->state == FL_ERASING ? "erase" : "program", - nand_chip->state == FL_ERASING ? 400 : 20); + dev_warn(&tmio->dev->dev, "still busy after 400 ms\n"); } else if (unlikely(!timeout)) { tmio_iowrite8(0x00, tmio->fcr + FCR_IMR); @@ -378,6 +375,8 @@ static int tmio_probe(struct platform_device *dev) if (!tmio) return -ENOMEM; + init_completion(&tmio->comp); + tmio->dev = dev; platform_set_drvdata(dev, tmio); diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c index 479c2f2cf17f..fa87ae28cdfe 100644 --- a/drivers/mtd/nand/spi/core.c +++ b/drivers/mtd/nand/spi/core.c @@ -304,24 +304,30 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand, struct nand_device *nand = spinand_to_nand(spinand); struct mtd_info *mtd = nanddev_to_mtd(nand); struct nand_page_io_req adjreq = *req; - unsigned int nbytes = 0; - void *buf = NULL; + void *buf = spinand->databuf; + unsigned int nbytes; u16 column = 0; int ret; - memset(spinand->databuf, 0xff, - nanddev_page_size(nand) + - nanddev_per_page_oobsize(nand)); + /* + * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset + * the cache content to 0xFF (depends on vendor implementation), so we + * must fill the page cache entirely even if we only want to program + * the data portion of the page, otherwise we might corrupt the BBM or + * user data previously programmed in OOB area. + */ + nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand); + memset(spinand->databuf, 0xff, nbytes); + adjreq.dataoffs = 0; + adjreq.datalen = nanddev_page_size(nand); + adjreq.databuf.out = spinand->databuf; + adjreq.ooblen = nanddev_per_page_oobsize(nand); + adjreq.ooboffs = 0; + adjreq.oobbuf.out = spinand->oobbuf; - if (req->datalen) { + if (req->datalen) memcpy(spinand->databuf + req->dataoffs, req->databuf.out, req->datalen); - adjreq.dataoffs = 0; - adjreq.datalen = nanddev_page_size(nand); - adjreq.databuf.out = spinand->databuf; - nbytes = adjreq.datalen; - buf = spinand->databuf; - } if (req->ooblen) { if (req->mode == MTD_OPS_AUTO_OOB) @@ -332,14 +338,6 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand, else memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out, req->ooblen); - - adjreq.ooblen = nanddev_per_page_oobsize(nand); - adjreq.ooboffs = 0; - nbytes += nanddev_per_page_oobsize(nand); - if (!buf) { - buf = spinand->oobbuf; - column = nanddev_page_size(nand); - } } spinand_cache_op_adjust_colum(spinand, &adjreq, &column); @@ -370,8 +368,8 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand, /* * We need to use the RANDOM LOAD CACHE operation if there's - * more than one iteration, because the LOAD operation resets - * the cache to 0xff. + * more than one iteration, because the LOAD operation might + * reset the cache to 0xff. */ if (nbytes) { column = op.addr.val; @@ -1018,11 +1016,11 @@ static int spinand_init(struct spinand_device *spinand) for (i = 0; i < nand->memorg.ntargets; i++) { ret = spinand_select_target(spinand, i); if (ret) - goto err_free_bufs; + goto err_manuf_cleanup; ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED); if (ret) - goto err_free_bufs; + goto err_manuf_cleanup; } ret = nanddev_init(nand, &spinand_ops, THIS_MODULE); diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c index e4141c20947a..0b49d8264bef 100644 --- a/drivers/mtd/nand/spi/gigadevice.c +++ b/drivers/mtd/nand/spi/gigadevice.c @@ -12,6 +12,8 @@ #define GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS (1 << 4) #define GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS (3 << 4) +#define GD5FXGQ4UEXXG_REG_STATUS2 0xf0 + static SPINAND_OP_VARIANTS(read_cache_variants, SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), @@ -81,11 +83,83 @@ static int gd5fxgq4xa_ecc_get_status(struct spinand_device *spinand, return -EINVAL; } +static int gd5fxgq4uexxg_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *region) +{ + if (section) + return -ERANGE; + + region->offset = 64; + region->length = 64; + + return 0; +} + +static int gd5fxgq4uexxg_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *region) +{ + if (section) + return -ERANGE; + + /* Reserve 1 bytes for the BBM. */ + region->offset = 1; + region->length = 63; + + return 0; +} + +static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand, + u8 status) +{ + u8 status2; + struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQ4UEXXG_REG_STATUS2, + &status2); + int ret; + + switch (status & STATUS_ECC_MASK) { + case STATUS_ECC_NO_BITFLIPS: + return 0; + + case GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS: + /* + * Read status2 register to determine a more fine grained + * bit error status + */ + ret = spi_mem_exec_op(spinand->spimem, &op); + if (ret) + return ret; + + /* + * 4 ... 7 bits are flipped (1..4 can't be detected, so + * report the maximum of 4 in this case + */ + /* bits sorted this way (3...0): ECCS1,ECCS0,ECCSE1,ECCSE0 */ + return ((status & STATUS_ECC_MASK) >> 2) | + ((status2 & STATUS_ECC_MASK) >> 4); + + case GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS: + return 8; + + case STATUS_ECC_UNCOR_ERROR: + return -EBADMSG; + + default: + break; + } + + return -EINVAL; +} + static const struct mtd_ooblayout_ops gd5fxgq4xa_ooblayout = { .ecc = gd5fxgq4xa_ooblayout_ecc, .free = gd5fxgq4xa_ooblayout_free, }; +static const struct mtd_ooblayout_ops gd5fxgq4uexxg_ooblayout = { + .ecc = gd5fxgq4uexxg_ooblayout_ecc, + .free = gd5fxgq4uexxg_ooblayout_free, +}; + static const struct spinand_info gigadevice_spinand_table[] = { SPINAND_INFO("GD5F1GQ4xA", 0xF1, NAND_MEMORG(1, 2048, 64, 64, 1024, 1, 1, 1), @@ -114,6 +188,15 @@ static const struct spinand_info gigadevice_spinand_table[] = { 0, SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout, gd5fxgq4xa_ecc_get_status)), + SPINAND_INFO("GD5F1GQ4UExxG", 0xd1, + NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1), + NAND_ECCREQ(8, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + 0, + SPINAND_ECCINFO(&gd5fxgq4uexxg_ooblayout, + gd5fxgq4uexxg_ecc_get_status)), }; static int gigadevice_spinand_detect(struct spinand_device *spinand) diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c index 98f6b9c4b684..d16b57081c95 100644 --- a/drivers/mtd/nand/spi/macronix.c +++ b/drivers/mtd/nand/spi/macronix.c @@ -10,6 +10,7 @@ #include #define SPINAND_MFR_MACRONIX 0xC2 +#define MACRONIX_ECCSR_MASK 0x0F static SPINAND_OP_VARIANTS(read_cache_variants, SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), @@ -55,7 +56,12 @@ static int mx35lf1ge4ab_get_eccsr(struct spinand_device *spinand, u8 *eccsr) SPI_MEM_OP_DUMMY(1, 1), SPI_MEM_OP_DATA_IN(1, eccsr, 1)); - return spi_mem_exec_op(spinand->spimem, &op); + int ret = spi_mem_exec_op(spinand->spimem, &op); + if (ret) + return ret; + + *eccsr &= MACRONIX_ECCSR_MASK; + return 0; } static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand, diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c index 081265557e70..db8021da45b5 100644 --- a/drivers/mtd/nand/spi/toshiba.c +++ b/drivers/mtd/nand/spi/toshiba.c @@ -25,19 +25,19 @@ static SPINAND_OP_VARIANTS(write_cache_variants, static SPINAND_OP_VARIANTS(update_cache_variants, SPINAND_PROG_LOAD(false, 0, NULL, 0)); -static int tc58cvg2s0h_ooblayout_ecc(struct mtd_info *mtd, int section, +static int tc58cxgxsx_ooblayout_ecc(struct mtd_info *mtd, int section, struct mtd_oob_region *region) { - if (section > 7) + if (section > 0) return -ERANGE; - region->offset = 128 + 16 * section; - region->length = 16; + region->offset = mtd->oobsize / 2; + region->length = mtd->oobsize / 2; return 0; } -static int tc58cvg2s0h_ooblayout_free(struct mtd_info *mtd, int section, +static int tc58cxgxsx_ooblayout_free(struct mtd_info *mtd, int section, struct mtd_oob_region *region) { if (section > 0) @@ -45,17 +45,17 @@ static int tc58cvg2s0h_ooblayout_free(struct mtd_info *mtd, int section, /* 2 bytes reserved for BBM */ region->offset = 2; - region->length = 126; + region->length = (mtd->oobsize / 2) - 2; return 0; } -static const struct mtd_ooblayout_ops tc58cvg2s0h_ooblayout = { - .ecc = tc58cvg2s0h_ooblayout_ecc, - .free = tc58cvg2s0h_ooblayout_free, +static const struct mtd_ooblayout_ops tc58cxgxsx_ooblayout = { + .ecc = tc58cxgxsx_ooblayout_ecc, + .free = tc58cxgxsx_ooblayout_free, }; -static int tc58cvg2s0h_ecc_get_status(struct spinand_device *spinand, +static int tc58cxgxsx_ecc_get_status(struct spinand_device *spinand, u8 status) { struct nand_device *nand = spinand_to_nand(spinand); @@ -94,15 +94,66 @@ static int tc58cvg2s0h_ecc_get_status(struct spinand_device *spinand, } static const struct spinand_info toshiba_spinand_table[] = { - SPINAND_INFO("TC58CVG2S0H", 0xCD, + /* 3.3V 1Gb */ + SPINAND_INFO("TC58CVG0S3", 0xC2, + NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1), + NAND_ECCREQ(8, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + 0, + SPINAND_ECCINFO(&tc58cxgxsx_ooblayout, + tc58cxgxsx_ecc_get_status)), + /* 3.3V 2Gb */ + SPINAND_INFO("TC58CVG1S3", 0xCB, + NAND_MEMORG(1, 2048, 128, 64, 2048, 1, 1, 1), + NAND_ECCREQ(8, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + 0, + SPINAND_ECCINFO(&tc58cxgxsx_ooblayout, + tc58cxgxsx_ecc_get_status)), + /* 3.3V 4Gb */ + SPINAND_INFO("TC58CVG2S0", 0xCD, + NAND_MEMORG(1, 4096, 256, 64, 2048, 1, 1, 1), + NAND_ECCREQ(8, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + 0, + SPINAND_ECCINFO(&tc58cxgxsx_ooblayout, + tc58cxgxsx_ecc_get_status)), + /* 1.8V 1Gb */ + SPINAND_INFO("TC58CYG0S3", 0xB2, + NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1), + NAND_ECCREQ(8, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + 0, + SPINAND_ECCINFO(&tc58cxgxsx_ooblayout, + tc58cxgxsx_ecc_get_status)), + /* 1.8V 2Gb */ + SPINAND_INFO("TC58CYG1S3", 0xBB, + NAND_MEMORG(1, 2048, 128, 64, 2048, 1, 1, 1), + NAND_ECCREQ(8, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + 0, + SPINAND_ECCINFO(&tc58cxgxsx_ooblayout, + tc58cxgxsx_ecc_get_status)), + /* 1.8V 4Gb */ + SPINAND_INFO("TC58CYG2S0", 0xBD, NAND_MEMORG(1, 4096, 256, 64, 2048, 1, 1, 1), NAND_ECCREQ(8, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_variants, &write_cache_variants, &update_cache_variants), - SPINAND_HAS_QE_BIT, - SPINAND_ECCINFO(&tc58cvg2s0h_ooblayout, - tc58cvg2s0h_ecc_get_status)), + 0, + SPINAND_ECCINFO(&tc58cxgxsx_ooblayout, + tc58cxgxsx_ecc_get_status)), }; static int toshiba_spinand_detect(struct spinand_device *spinand) diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig index 44fe8018733c..dab986691267 100644 --- a/drivers/mtd/spi-nor/Kconfig +++ b/drivers/mtd/spi-nor/Kconfig @@ -7,14 +7,6 @@ menuconfig MTD_SPI_NOR if MTD_SPI_NOR -config MTD_MT81xx_NOR - tristate "Mediatek MT81xx SPI NOR flash controller" - depends on HAS_IOMEM - help - This enables access to SPI NOR flash, using MT81xx SPI NOR flash - controller. This controller does not support generic SPI BUS, it only - supports SPI NOR Flash. - config MTD_SPI_NOR_USE_4K_SECTORS bool "Use small 4096 B erase sectors" default y @@ -50,15 +42,6 @@ config SPI_CADENCE_QUADSPI device with a Cadence QSPI controller and want to access the Flash as an MTD device. -config SPI_FSL_QUADSPI - tristate "Freescale Quad SPI controller" - depends on ARCH_MXC || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST - depends on HAS_IOMEM - help - This enables support for the Quad SPI controller in master mode. - This controller does not support generic SPI. It only supports - SPI NOR. - config SPI_HISI_SFC tristate "Hisilicon SPI-NOR Flash Controller(SFC)" depends on ARCH_HISI || COMPILE_TEST @@ -66,6 +49,14 @@ config SPI_HISI_SFC help This enables support for hisilicon SPI-NOR flash controller. +config SPI_MTK_QUADSPI + tristate "MediaTek Quad SPI controller" + depends on HAS_IOMEM + help + This enables support for the Quad SPI controller in master mode. + This controller does not support generic SPI. It only supports + SPI NOR. + config SPI_NXP_SPIFI tristate "NXP SPI Flash Interface (SPIFI)" depends on OF && (ARCH_LPC18XX || COMPILE_TEST) diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile index a552efd22958..189a15cca3ec 100644 --- a/drivers/mtd/spi-nor/Makefile +++ b/drivers/mtd/spi-nor/Makefile @@ -2,9 +2,8 @@ obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o obj-$(CONFIG_SPI_ASPEED_SMC) += aspeed-smc.o obj-$(CONFIG_SPI_CADENCE_QUADSPI) += cadence-quadspi.o -obj-$(CONFIG_SPI_FSL_QUADSPI) += fsl-quadspi.o obj-$(CONFIG_SPI_HISI_SFC) += hisi-sfc.o -obj-$(CONFIG_MTD_MT81xx_NOR) += mtk-quadspi.o +obj-$(CONFIG_SPI_MTK_QUADSPI) += mtk-quadspi.o obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o obj-$(CONFIG_SPI_INTEL_SPI) += intel-spi.o obj-$(CONFIG_SPI_INTEL_SPI_PCI) += intel-spi-pci.o diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c index 04cedd3a2bf6..792628750eec 100644 --- a/drivers/mtd/spi-nor/cadence-quadspi.c +++ b/drivers/mtd/spi-nor/cadence-quadspi.c @@ -44,6 +44,12 @@ /* Quirks */ #define CQSPI_NEEDS_WR_DELAY BIT(0) +/* Capabilities mask */ +#define CQSPI_BASE_HWCAPS_MASK \ + (SNOR_HWCAPS_READ | SNOR_HWCAPS_READ_FAST | \ + SNOR_HWCAPS_READ_1_1_2 | SNOR_HWCAPS_READ_1_1_4 | \ + SNOR_HWCAPS_PP) + struct cqspi_st; struct cqspi_flash_pdata { @@ -93,6 +99,11 @@ struct cqspi_st { struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT]; }; +struct cqspi_driver_platdata { + u32 hwcaps_mask; + u8 quirks; +}; + /* Operation timeout value */ #define CQSPI_TIMEOUT_MS 500 #define CQSPI_READ_TIMEOUT_MS 10 @@ -101,6 +112,7 @@ struct cqspi_st { #define CQSPI_INST_TYPE_SINGLE 0 #define CQSPI_INST_TYPE_DUAL 1 #define CQSPI_INST_TYPE_QUAD 2 +#define CQSPI_INST_TYPE_OCTAL 3 #define CQSPI_DUMMY_CLKS_PER_BYTE 8 #define CQSPI_DUMMY_BYTES_MAX 4 @@ -418,9 +430,10 @@ static int cqspi_command_write(struct spi_nor *nor, const u8 opcode, void __iomem *reg_base = cqspi->iobase; unsigned int reg; unsigned int data; + u32 write_len; int ret; - if (n_tx > 4 || (n_tx && !txbuf)) { + if (n_tx > CQSPI_STIG_DATA_LEN_MAX || (n_tx && !txbuf)) { dev_err(nor->dev, "Invalid input argument, cmdlen %d txbuf 0x%p\n", n_tx, txbuf); @@ -433,10 +446,18 @@ static int cqspi_command_write(struct spi_nor *nor, const u8 opcode, reg |= ((n_tx - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK) << CQSPI_REG_CMDCTRL_WR_BYTES_LSB; data = 0; - memcpy(&data, txbuf, n_tx); + write_len = (n_tx > 4) ? 4 : n_tx; + memcpy(&data, txbuf, write_len); + txbuf += write_len; writel(data, reg_base + CQSPI_REG_CMDWRITEDATALOWER); - } + if (n_tx > 4) { + data = 0; + write_len = n_tx - 4; + memcpy(&data, txbuf, write_len); + writel(data, reg_base + CQSPI_REG_CMDWRITEDATAUPPER); + } + } ret = cqspi_exec_flash_cmd(cqspi, reg); return ret; } @@ -911,6 +932,9 @@ static int cqspi_set_protocol(struct spi_nor *nor, const int read) case SNOR_PROTO_1_1_4: f_pdata->data_width = CQSPI_INST_TYPE_QUAD; break; + case SNOR_PROTO_1_1_8: + f_pdata->data_width = CQSPI_INST_TYPE_OCTAL; + break; default: return -EINVAL; } @@ -1213,21 +1237,23 @@ static void cqspi_request_mmap_dma(struct cqspi_st *cqspi) static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np) { - const struct spi_nor_hwcaps hwcaps = { - .mask = SNOR_HWCAPS_READ | - SNOR_HWCAPS_READ_FAST | - SNOR_HWCAPS_READ_1_1_2 | - SNOR_HWCAPS_READ_1_1_4 | - SNOR_HWCAPS_PP, - }; struct platform_device *pdev = cqspi->pdev; struct device *dev = &pdev->dev; + const struct cqspi_driver_platdata *ddata; + struct spi_nor_hwcaps hwcaps; struct cqspi_flash_pdata *f_pdata; struct spi_nor *nor; struct mtd_info *mtd; unsigned int cs; int i, ret; + ddata = of_device_get_match_data(dev); + if (!ddata) { + dev_err(dev, "Couldn't find driver data\n"); + return -EINVAL; + } + hwcaps.mask = ddata->hwcaps_mask; + /* Get flash device data */ for_each_available_child_of_node(dev->of_node, np) { ret = of_property_read_u32(np, "reg", &cs); @@ -1310,7 +1336,7 @@ static int cqspi_probe(struct platform_device *pdev) struct cqspi_st *cqspi; struct resource *res; struct resource *res_ahb; - unsigned long data; + const struct cqspi_driver_platdata *ddata; int ret; int irq; @@ -1377,8 +1403,8 @@ static int cqspi_probe(struct platform_device *pdev) } cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk); - data = (unsigned long)of_device_get_match_data(dev); - if (data & CQSPI_NEEDS_WR_DELAY) + ddata = of_device_get_match_data(dev); + if (ddata && (ddata->quirks & CQSPI_NEEDS_WR_DELAY)) cqspi->wr_delay = 5 * DIV_ROUND_UP(NSEC_PER_SEC, cqspi->master_ref_clk_hz); @@ -1460,14 +1486,32 @@ static const struct dev_pm_ops cqspi__dev_pm_ops = { #define CQSPI_DEV_PM_OPS NULL #endif +static const struct cqspi_driver_platdata cdns_qspi = { + .hwcaps_mask = CQSPI_BASE_HWCAPS_MASK, +}; + +static const struct cqspi_driver_platdata k2g_qspi = { + .hwcaps_mask = CQSPI_BASE_HWCAPS_MASK, + .quirks = CQSPI_NEEDS_WR_DELAY, +}; + +static const struct cqspi_driver_platdata am654_ospi = { + .hwcaps_mask = CQSPI_BASE_HWCAPS_MASK | SNOR_HWCAPS_READ_1_1_8, + .quirks = CQSPI_NEEDS_WR_DELAY, +}; + static const struct of_device_id cqspi_dt_ids[] = { { .compatible = "cdns,qspi-nor", - .data = (void *)0, + .data = &cdns_qspi, }, { .compatible = "ti,k2g-qspi", - .data = (void *)CQSPI_NEEDS_WR_DELAY, + .data = &k2g_qspi, + }, + { + .compatible = "ti,am654-ospi", + .data = &am654_ospi, }, { /* end of table */ } }; diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c deleted file mode 100644 index 1ff3430f82c8..000000000000 --- a/drivers/mtd/spi-nor/fsl-quadspi.c +++ /dev/null @@ -1,1224 +0,0 @@ -/* - * Freescale QuadSPI driver. - * - * Copyright (C) 2013 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* Controller needs driver to swap endian */ -#define QUADSPI_QUIRK_SWAP_ENDIAN (1 << 0) -/* Controller needs 4x internal clock */ -#define QUADSPI_QUIRK_4X_INT_CLK (1 << 1) -/* - * TKT253890, Controller needs driver to fill txfifo till 16 byte to - * trigger data transfer even though extern data will not transferred. - */ -#define QUADSPI_QUIRK_TKT253890 (1 << 2) -/* Controller cannot wake up from wait mode, TKT245618 */ -#define QUADSPI_QUIRK_TKT245618 (1 << 3) - -/* The registers */ -#define QUADSPI_MCR 0x00 -#define QUADSPI_MCR_RESERVED_SHIFT 16 -#define QUADSPI_MCR_RESERVED_MASK (0xF << QUADSPI_MCR_RESERVED_SHIFT) -#define QUADSPI_MCR_MDIS_SHIFT 14 -#define QUADSPI_MCR_MDIS_MASK (1 << QUADSPI_MCR_MDIS_SHIFT) -#define QUADSPI_MCR_CLR_TXF_SHIFT 11 -#define QUADSPI_MCR_CLR_TXF_MASK (1 << QUADSPI_MCR_CLR_TXF_SHIFT) -#define QUADSPI_MCR_CLR_RXF_SHIFT 10 -#define QUADSPI_MCR_CLR_RXF_MASK (1 << QUADSPI_MCR_CLR_RXF_SHIFT) -#define QUADSPI_MCR_DDR_EN_SHIFT 7 -#define QUADSPI_MCR_DDR_EN_MASK (1 << QUADSPI_MCR_DDR_EN_SHIFT) -#define QUADSPI_MCR_END_CFG_SHIFT 2 -#define QUADSPI_MCR_END_CFG_MASK (3 << QUADSPI_MCR_END_CFG_SHIFT) -#define QUADSPI_MCR_SWRSTHD_SHIFT 1 -#define QUADSPI_MCR_SWRSTHD_MASK (1 << QUADSPI_MCR_SWRSTHD_SHIFT) -#define QUADSPI_MCR_SWRSTSD_SHIFT 0 -#define QUADSPI_MCR_SWRSTSD_MASK (1 << QUADSPI_MCR_SWRSTSD_SHIFT) - -#define QUADSPI_IPCR 0x08 -#define QUADSPI_IPCR_SEQID_SHIFT 24 -#define QUADSPI_IPCR_SEQID_MASK (0xF << QUADSPI_IPCR_SEQID_SHIFT) - -#define QUADSPI_BUF0CR 0x10 -#define QUADSPI_BUF1CR 0x14 -#define QUADSPI_BUF2CR 0x18 -#define QUADSPI_BUFXCR_INVALID_MSTRID 0xe - -#define QUADSPI_BUF3CR 0x1c -#define QUADSPI_BUF3CR_ALLMST_SHIFT 31 -#define QUADSPI_BUF3CR_ALLMST_MASK (1 << QUADSPI_BUF3CR_ALLMST_SHIFT) -#define QUADSPI_BUF3CR_ADATSZ_SHIFT 8 -#define QUADSPI_BUF3CR_ADATSZ_MASK (0xFF << QUADSPI_BUF3CR_ADATSZ_SHIFT) - -#define QUADSPI_BFGENCR 0x20 -#define QUADSPI_BFGENCR_PAR_EN_SHIFT 16 -#define QUADSPI_BFGENCR_PAR_EN_MASK (1 << (QUADSPI_BFGENCR_PAR_EN_SHIFT)) -#define QUADSPI_BFGENCR_SEQID_SHIFT 12 -#define QUADSPI_BFGENCR_SEQID_MASK (0xF << QUADSPI_BFGENCR_SEQID_SHIFT) - -#define QUADSPI_BUF0IND 0x30 -#define QUADSPI_BUF1IND 0x34 -#define QUADSPI_BUF2IND 0x38 -#define QUADSPI_SFAR 0x100 - -#define QUADSPI_SMPR 0x108 -#define QUADSPI_SMPR_DDRSMP_SHIFT 16 -#define QUADSPI_SMPR_DDRSMP_MASK (7 << QUADSPI_SMPR_DDRSMP_SHIFT) -#define QUADSPI_SMPR_FSDLY_SHIFT 6 -#define QUADSPI_SMPR_FSDLY_MASK (1 << QUADSPI_SMPR_FSDLY_SHIFT) -#define QUADSPI_SMPR_FSPHS_SHIFT 5 -#define QUADSPI_SMPR_FSPHS_MASK (1 << QUADSPI_SMPR_FSPHS_SHIFT) -#define QUADSPI_SMPR_HSENA_SHIFT 0 -#define QUADSPI_SMPR_HSENA_MASK (1 << QUADSPI_SMPR_HSENA_SHIFT) - -#define QUADSPI_RBSR 0x10c -#define QUADSPI_RBSR_RDBFL_SHIFT 8 -#define QUADSPI_RBSR_RDBFL_MASK (0x3F << QUADSPI_RBSR_RDBFL_SHIFT) - -#define QUADSPI_RBCT 0x110 -#define QUADSPI_RBCT_WMRK_MASK 0x1F -#define QUADSPI_RBCT_RXBRD_SHIFT 8 -#define QUADSPI_RBCT_RXBRD_USEIPS (0x1 << QUADSPI_RBCT_RXBRD_SHIFT) - -#define QUADSPI_TBSR 0x150 -#define QUADSPI_TBDR 0x154 -#define QUADSPI_SR 0x15c -#define QUADSPI_SR_IP_ACC_SHIFT 1 -#define QUADSPI_SR_IP_ACC_MASK (0x1 << QUADSPI_SR_IP_ACC_SHIFT) -#define QUADSPI_SR_AHB_ACC_SHIFT 2 -#define QUADSPI_SR_AHB_ACC_MASK (0x1 << QUADSPI_SR_AHB_ACC_SHIFT) - -#define QUADSPI_FR 0x160 -#define QUADSPI_FR_TFF_MASK 0x1 - -#define QUADSPI_SFA1AD 0x180 -#define QUADSPI_SFA2AD 0x184 -#define QUADSPI_SFB1AD 0x188 -#define QUADSPI_SFB2AD 0x18c -#define QUADSPI_RBDR 0x200 - -#define QUADSPI_LUTKEY 0x300 -#define QUADSPI_LUTKEY_VALUE 0x5AF05AF0 - -#define QUADSPI_LCKCR 0x304 -#define QUADSPI_LCKER_LOCK 0x1 -#define QUADSPI_LCKER_UNLOCK 0x2 - -#define QUADSPI_RSER 0x164 -#define QUADSPI_RSER_TFIE (0x1 << 0) - -#define QUADSPI_LUT_BASE 0x310 - -/* - * The definition of the LUT register shows below: - * - * --------------------------------------------------- - * | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 | - * --------------------------------------------------- - */ -#define OPRND0_SHIFT 0 -#define PAD0_SHIFT 8 -#define INSTR0_SHIFT 10 -#define OPRND1_SHIFT 16 - -/* Instruction set for the LUT register. */ -#define LUT_STOP 0 -#define LUT_CMD 1 -#define LUT_ADDR 2 -#define LUT_DUMMY 3 -#define LUT_MODE 4 -#define LUT_MODE2 5 -#define LUT_MODE4 6 -#define LUT_FSL_READ 7 -#define LUT_FSL_WRITE 8 -#define LUT_JMP_ON_CS 9 -#define LUT_ADDR_DDR 10 -#define LUT_MODE_DDR 11 -#define LUT_MODE2_DDR 12 -#define LUT_MODE4_DDR 13 -#define LUT_FSL_READ_DDR 14 -#define LUT_FSL_WRITE_DDR 15 -#define LUT_DATA_LEARN 16 - -/* - * The PAD definitions for LUT register. - * - * The pad stands for the lines number of IO[0:3]. - * For example, the Quad read need four IO lines, so you should - * set LUT_PAD4 which means we use four IO lines. - */ -#define LUT_PAD1 0 -#define LUT_PAD2 1 -#define LUT_PAD4 2 - -/* Oprands for the LUT register. */ -#define ADDR24BIT 0x18 -#define ADDR32BIT 0x20 - -/* Macros for constructing the LUT register. */ -#define LUT0(ins, pad, opr) \ - (((opr) << OPRND0_SHIFT) | ((LUT_##pad) << PAD0_SHIFT) | \ - ((LUT_##ins) << INSTR0_SHIFT)) - -#define LUT1(ins, pad, opr) (LUT0(ins, pad, opr) << OPRND1_SHIFT) - -/* other macros for LUT register. */ -#define QUADSPI_LUT(x) (QUADSPI_LUT_BASE + (x) * 4) -#define QUADSPI_LUT_NUM 64 - -/* SEQID -- we can have 16 seqids at most. */ -#define SEQID_READ 0 -#define SEQID_WREN 1 -#define SEQID_WRDI 2 -#define SEQID_RDSR 3 -#define SEQID_SE 4 -#define SEQID_CHIP_ERASE 5 -#define SEQID_PP 6 -#define SEQID_RDID 7 -#define SEQID_WRSR 8 -#define SEQID_RDCR 9 -#define SEQID_EN4B 10 -#define SEQID_BRWR 11 - -#define QUADSPI_MIN_IOMAP SZ_4M - -enum fsl_qspi_devtype { - FSL_QUADSPI_VYBRID, - FSL_QUADSPI_IMX6SX, - FSL_QUADSPI_IMX7D, - FSL_QUADSPI_IMX6UL, - FSL_QUADSPI_LS1021A, - FSL_QUADSPI_LS2080A, -}; - -struct fsl_qspi_devtype_data { - enum fsl_qspi_devtype devtype; - int rxfifo; - int txfifo; - int ahb_buf_size; - int driver_data; -}; - -static const struct fsl_qspi_devtype_data vybrid_data = { - .devtype = FSL_QUADSPI_VYBRID, - .rxfifo = 128, - .txfifo = 64, - .ahb_buf_size = 1024, - .driver_data = QUADSPI_QUIRK_SWAP_ENDIAN, -}; - -static const struct fsl_qspi_devtype_data imx6sx_data = { - .devtype = FSL_QUADSPI_IMX6SX, - .rxfifo = 128, - .txfifo = 512, - .ahb_buf_size = 1024, - .driver_data = QUADSPI_QUIRK_4X_INT_CLK - | QUADSPI_QUIRK_TKT245618, -}; - -static const struct fsl_qspi_devtype_data imx7d_data = { - .devtype = FSL_QUADSPI_IMX7D, - .rxfifo = 512, - .txfifo = 512, - .ahb_buf_size = 1024, - .driver_data = QUADSPI_QUIRK_TKT253890 - | QUADSPI_QUIRK_4X_INT_CLK, -}; - -static const struct fsl_qspi_devtype_data imx6ul_data = { - .devtype = FSL_QUADSPI_IMX6UL, - .rxfifo = 128, - .txfifo = 512, - .ahb_buf_size = 1024, - .driver_data = QUADSPI_QUIRK_TKT253890 - | QUADSPI_QUIRK_4X_INT_CLK, -}; - -static struct fsl_qspi_devtype_data ls1021a_data = { - .devtype = FSL_QUADSPI_LS1021A, - .rxfifo = 128, - .txfifo = 64, - .ahb_buf_size = 1024, - .driver_data = 0, -}; - -static const struct fsl_qspi_devtype_data ls2080a_data = { - .devtype = FSL_QUADSPI_LS2080A, - .rxfifo = 128, - .txfifo = 64, - .ahb_buf_size = 1024, - .driver_data = QUADSPI_QUIRK_TKT253890, -}; - - -#define FSL_QSPI_MAX_CHIP 4 -struct fsl_qspi { - struct spi_nor nor[FSL_QSPI_MAX_CHIP]; - void __iomem *iobase; - void __iomem *ahb_addr; - u32 memmap_phy; - u32 memmap_offs; - u32 memmap_len; - struct clk *clk, *clk_en; - struct device *dev; - struct completion c; - const struct fsl_qspi_devtype_data *devtype_data; - u32 nor_size; - u32 nor_num; - u32 clk_rate; - unsigned int chip_base_addr; /* We may support two chips. */ - bool has_second_chip; - bool big_endian; - struct mutex lock; - struct pm_qos_request pm_qos_req; -}; - -static inline int needs_swap_endian(struct fsl_qspi *q) -{ - return q->devtype_data->driver_data & QUADSPI_QUIRK_SWAP_ENDIAN; -} - -static inline int needs_4x_clock(struct fsl_qspi *q) -{ - return q->devtype_data->driver_data & QUADSPI_QUIRK_4X_INT_CLK; -} - -static inline int needs_fill_txfifo(struct fsl_qspi *q) -{ - return q->devtype_data->driver_data & QUADSPI_QUIRK_TKT253890; -} - -static inline int needs_wakeup_wait_mode(struct fsl_qspi *q) -{ - return q->devtype_data->driver_data & QUADSPI_QUIRK_TKT245618; -} - -/* - * R/W functions for big- or little-endian registers: - * The qSPI controller's endian is independent of the CPU core's endian. - * So far, although the CPU core is little-endian but the qSPI have two - * versions for big-endian and little-endian. - */ -static void qspi_writel(struct fsl_qspi *q, u32 val, void __iomem *addr) -{ - if (q->big_endian) - iowrite32be(val, addr); - else - iowrite32(val, addr); -} - -static u32 qspi_readl(struct fsl_qspi *q, void __iomem *addr) -{ - if (q->big_endian) - return ioread32be(addr); - else - return ioread32(addr); -} - -/* - * An IC bug makes us to re-arrange the 32-bit data. - * The following chips, such as IMX6SLX, have fixed this bug. - */ -static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a) -{ - return needs_swap_endian(q) ? __swab32(a) : a; -} - -static inline void fsl_qspi_unlock_lut(struct fsl_qspi *q) -{ - qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY); - qspi_writel(q, QUADSPI_LCKER_UNLOCK, q->iobase + QUADSPI_LCKCR); -} - -static inline void fsl_qspi_lock_lut(struct fsl_qspi *q) -{ - qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY); - qspi_writel(q, QUADSPI_LCKER_LOCK, q->iobase + QUADSPI_LCKCR); -} - -static irqreturn_t fsl_qspi_irq_handler(int irq, void *dev_id) -{ - struct fsl_qspi *q = dev_id; - u32 reg; - - /* clear interrupt */ - reg = qspi_readl(q, q->iobase + QUADSPI_FR); - qspi_writel(q, reg, q->iobase + QUADSPI_FR); - - if (reg & QUADSPI_FR_TFF_MASK) - complete(&q->c); - - dev_dbg(q->dev, "QUADSPI_FR : 0x%.8x:0x%.8x\n", q->chip_base_addr, reg); - return IRQ_HANDLED; -} - -static void fsl_qspi_init_lut(struct fsl_qspi *q) -{ - void __iomem *base = q->iobase; - int rxfifo = q->devtype_data->rxfifo; - u32 lut_base; - int i; - - struct spi_nor *nor = &q->nor[0]; - u8 addrlen = (nor->addr_width == 3) ? ADDR24BIT : ADDR32BIT; - u8 read_op = nor->read_opcode; - u8 read_dm = nor->read_dummy; - - fsl_qspi_unlock_lut(q); - - /* Clear all the LUT table */ - for (i = 0; i < QUADSPI_LUT_NUM; i++) - qspi_writel(q, 0, base + QUADSPI_LUT_BASE + i * 4); - - /* Read */ - lut_base = SEQID_READ * 4; - - qspi_writel(q, LUT0(CMD, PAD1, read_op) | LUT1(ADDR, PAD1, addrlen), - base + QUADSPI_LUT(lut_base)); - qspi_writel(q, LUT0(DUMMY, PAD1, read_dm) | - LUT1(FSL_READ, PAD4, rxfifo), - base + QUADSPI_LUT(lut_base + 1)); - - /* Write enable */ - lut_base = SEQID_WREN * 4; - qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WREN), - base + QUADSPI_LUT(lut_base)); - - /* Page Program */ - lut_base = SEQID_PP * 4; - - qspi_writel(q, LUT0(CMD, PAD1, nor->program_opcode) | - LUT1(ADDR, PAD1, addrlen), - base + QUADSPI_LUT(lut_base)); - qspi_writel(q, LUT0(FSL_WRITE, PAD1, 0), - base + QUADSPI_LUT(lut_base + 1)); - - /* Read Status */ - lut_base = SEQID_RDSR * 4; - qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_RDSR) | - LUT1(FSL_READ, PAD1, 0x1), - base + QUADSPI_LUT(lut_base)); - - /* Erase a sector */ - lut_base = SEQID_SE * 4; - - qspi_writel(q, LUT0(CMD, PAD1, nor->erase_opcode) | - LUT1(ADDR, PAD1, addrlen), - base + QUADSPI_LUT(lut_base)); - - /* Erase the whole chip */ - lut_base = SEQID_CHIP_ERASE * 4; - qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_CHIP_ERASE), - base + QUADSPI_LUT(lut_base)); - - /* READ ID */ - lut_base = SEQID_RDID * 4; - qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_RDID) | - LUT1(FSL_READ, PAD1, 0x8), - base + QUADSPI_LUT(lut_base)); - - /* Write Register */ - lut_base = SEQID_WRSR * 4; - qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WRSR) | - LUT1(FSL_WRITE, PAD1, 0x2), - base + QUADSPI_LUT(lut_base)); - - /* Read Configuration Register */ - lut_base = SEQID_RDCR * 4; - qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_RDCR) | - LUT1(FSL_READ, PAD1, 0x1), - base + QUADSPI_LUT(lut_base)); - - /* Write disable */ - lut_base = SEQID_WRDI * 4; - qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_WRDI), - base + QUADSPI_LUT(lut_base)); - - /* Enter 4 Byte Mode (Micron) */ - lut_base = SEQID_EN4B * 4; - qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_EN4B), - base + QUADSPI_LUT(lut_base)); - - /* Enter 4 Byte Mode (Spansion) */ - lut_base = SEQID_BRWR * 4; - qspi_writel(q, LUT0(CMD, PAD1, SPINOR_OP_BRWR), - base + QUADSPI_LUT(lut_base)); - - fsl_qspi_lock_lut(q); -} - -/* Get the SEQID for the command */ -static int fsl_qspi_get_seqid(struct fsl_qspi *q, u8 cmd) -{ - switch (cmd) { - case SPINOR_OP_READ_1_1_4: - case SPINOR_OP_READ_1_1_4_4B: - return SEQID_READ; - case SPINOR_OP_WREN: - return SEQID_WREN; - case SPINOR_OP_WRDI: - return SEQID_WRDI; - case SPINOR_OP_RDSR: - return SEQID_RDSR; - case SPINOR_OP_SE: - return SEQID_SE; - case SPINOR_OP_CHIP_ERASE: - return SEQID_CHIP_ERASE; - case SPINOR_OP_PP: - return SEQID_PP; - case SPINOR_OP_RDID: - return SEQID_RDID; - case SPINOR_OP_WRSR: - return SEQID_WRSR; - case SPINOR_OP_RDCR: - return SEQID_RDCR; - case SPINOR_OP_EN4B: - return SEQID_EN4B; - case SPINOR_OP_BRWR: - return SEQID_BRWR; - default: - if (cmd == q->nor[0].erase_opcode) - return SEQID_SE; - dev_err(q->dev, "Unsupported cmd 0x%.2x\n", cmd); - break; - } - return -EINVAL; -} - -static int -fsl_qspi_runcmd(struct fsl_qspi *q, u8 cmd, unsigned int addr, int len) -{ - void __iomem *base = q->iobase; - int seqid; - u32 reg, reg2; - int err; - - init_completion(&q->c); - dev_dbg(q->dev, "to 0x%.8x:0x%.8x, len:%d, cmd:%.2x\n", - q->chip_base_addr, addr, len, cmd); - - /* save the reg */ - reg = qspi_readl(q, base + QUADSPI_MCR); - - qspi_writel(q, q->memmap_phy + q->chip_base_addr + addr, - base + QUADSPI_SFAR); - qspi_writel(q, QUADSPI_RBCT_WMRK_MASK | QUADSPI_RBCT_RXBRD_USEIPS, - base + QUADSPI_RBCT); - qspi_writel(q, reg | QUADSPI_MCR_CLR_RXF_MASK, base + QUADSPI_MCR); - - do { - reg2 = qspi_readl(q, base + QUADSPI_SR); - if (reg2 & (QUADSPI_SR_IP_ACC_MASK | QUADSPI_SR_AHB_ACC_MASK)) { - udelay(1); - dev_dbg(q->dev, "The controller is busy, 0x%x\n", reg2); - continue; - } - break; - } while (1); - - /* trigger the LUT now */ - seqid = fsl_qspi_get_seqid(q, cmd); - if (seqid < 0) - return seqid; - - qspi_writel(q, (seqid << QUADSPI_IPCR_SEQID_SHIFT) | len, - base + QUADSPI_IPCR); - - /* Wait for the interrupt. */ - if (!wait_for_completion_timeout(&q->c, msecs_to_jiffies(1000))) { - dev_err(q->dev, - "cmd 0x%.2x timeout, addr@%.8x, FR:0x%.8x, SR:0x%.8x\n", - cmd, addr, qspi_readl(q, base + QUADSPI_FR), - qspi_readl(q, base + QUADSPI_SR)); - err = -ETIMEDOUT; - } else { - err = 0; - } - - /* restore the MCR */ - qspi_writel(q, reg, base + QUADSPI_MCR); - - return err; -} - -/* Read out the data from the QUADSPI_RBDR buffer registers. */ -static void fsl_qspi_read_data(struct fsl_qspi *q, int len, u8 *rxbuf) -{ - u32 tmp; - int i = 0; - - while (len > 0) { - tmp = qspi_readl(q, q->iobase + QUADSPI_RBDR + i * 4); - tmp = fsl_qspi_endian_xchg(q, tmp); - dev_dbg(q->dev, "chip addr:0x%.8x, rcv:0x%.8x\n", - q->chip_base_addr, tmp); - - if (len >= 4) { - *((u32 *)rxbuf) = tmp; - rxbuf += 4; - } else { - memcpy(rxbuf, &tmp, len); - break; - } - - len -= 4; - i++; - } -} - -/* - * If we have changed the content of the flash by writing or erasing, - * we need to invalidate the AHB buffer. If we do not do so, we may read out - * the wrong data. The spec tells us reset the AHB domain and Serial Flash - * domain at the same time. - */ -static inline void fsl_qspi_invalid(struct fsl_qspi *q) -{ - u32 reg; - - reg = qspi_readl(q, q->iobase + QUADSPI_MCR); - reg |= QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK; - qspi_writel(q, reg, q->iobase + QUADSPI_MCR); - - /* - * The minimum delay : 1 AHB + 2 SFCK clocks. - * Delay 1 us is enough. - */ - udelay(1); - - reg &= ~(QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK); - qspi_writel(q, reg, q->iobase + QUADSPI_MCR); -} - -static ssize_t fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor, - u8 opcode, unsigned int to, u32 *txbuf, - unsigned count) -{ - int ret, i, j; - u32 tmp; - - dev_dbg(q->dev, "to 0x%.8x:0x%.8x, len : %d\n", - q->chip_base_addr, to, count); - - /* clear the TX FIFO. */ - tmp = qspi_readl(q, q->iobase + QUADSPI_MCR); - qspi_writel(q, tmp | QUADSPI_MCR_CLR_TXF_MASK, q->iobase + QUADSPI_MCR); - - /* fill the TX data to the FIFO */ - for (j = 0, i = ((count + 3) / 4); j < i; j++) { - tmp = fsl_qspi_endian_xchg(q, *txbuf); - qspi_writel(q, tmp, q->iobase + QUADSPI_TBDR); - txbuf++; - } - - /* fill the TXFIFO upto 16 bytes for i.MX7d */ - if (needs_fill_txfifo(q)) - for (; i < 4; i++) - qspi_writel(q, tmp, q->iobase + QUADSPI_TBDR); - - /* Trigger it */ - ret = fsl_qspi_runcmd(q, opcode, to, count); - - if (ret == 0) - return count; - - return ret; -} - -static void fsl_qspi_set_map_addr(struct fsl_qspi *q) -{ - int nor_size = q->nor_size; - void __iomem *base = q->iobase; - - qspi_writel(q, nor_size + q->memmap_phy, base + QUADSPI_SFA1AD); - qspi_writel(q, nor_size * 2 + q->memmap_phy, base + QUADSPI_SFA2AD); - qspi_writel(q, nor_size * 3 + q->memmap_phy, base + QUADSPI_SFB1AD); - qspi_writel(q, nor_size * 4 + q->memmap_phy, base + QUADSPI_SFB2AD); -} - -/* - * There are two different ways to read out the data from the flash: - * the "IP Command Read" and the "AHB Command Read". - * - * The IC guy suggests we use the "AHB Command Read" which is faster - * then the "IP Command Read". (What's more is that there is a bug in - * the "IP Command Read" in the Vybrid.) - * - * After we set up the registers for the "AHB Command Read", we can use - * the memcpy to read the data directly. A "missed" access to the buffer - * causes the controller to clear the buffer, and use the sequence pointed - * by the QUADSPI_BFGENCR[SEQID] to initiate a read from the flash. - */ -static int fsl_qspi_init_ahb_read(struct fsl_qspi *q) -{ - void __iomem *base = q->iobase; - int seqid; - - /* AHB configuration for access buffer 0/1/2 .*/ - qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF0CR); - qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF1CR); - qspi_writel(q, QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF2CR); - /* - * Set ADATSZ with the maximum AHB buffer size to improve the - * read performance. - */ - qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK | - ((q->devtype_data->ahb_buf_size / 8) - << QUADSPI_BUF3CR_ADATSZ_SHIFT), - base + QUADSPI_BUF3CR); - - /* We only use the buffer3 */ - qspi_writel(q, 0, base + QUADSPI_BUF0IND); - qspi_writel(q, 0, base + QUADSPI_BUF1IND); - qspi_writel(q, 0, base + QUADSPI_BUF2IND); - - /* Set the default lut sequence for AHB Read. */ - seqid = fsl_qspi_get_seqid(q, q->nor[0].read_opcode); - if (seqid < 0) - return seqid; - - qspi_writel(q, seqid << QUADSPI_BFGENCR_SEQID_SHIFT, - q->iobase + QUADSPI_BFGENCR); - - return 0; -} - -/* This function was used to prepare and enable QSPI clock */ -static int fsl_qspi_clk_prep_enable(struct fsl_qspi *q) -{ - int ret; - - ret = clk_prepare_enable(q->clk_en); - if (ret) - return ret; - - ret = clk_prepare_enable(q->clk); - if (ret) { - clk_disable_unprepare(q->clk_en); - return ret; - } - - if (needs_wakeup_wait_mode(q)) - pm_qos_add_request(&q->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, 0); - - return 0; -} - -/* This function was used to disable and unprepare QSPI clock */ -static void fsl_qspi_clk_disable_unprep(struct fsl_qspi *q) -{ - if (needs_wakeup_wait_mode(q)) - pm_qos_remove_request(&q->pm_qos_req); - - clk_disable_unprepare(q->clk); - clk_disable_unprepare(q->clk_en); - -} - -/* We use this function to do some basic init for spi_nor_scan(). */ -static int fsl_qspi_nor_setup(struct fsl_qspi *q) -{ - void __iomem *base = q->iobase; - u32 reg; - int ret; - - /* disable and unprepare clock to avoid glitch pass to controller */ - fsl_qspi_clk_disable_unprep(q); - - /* the default frequency, we will change it in the future. */ - ret = clk_set_rate(q->clk, 66000000); - if (ret) - return ret; - - ret = fsl_qspi_clk_prep_enable(q); - if (ret) - return ret; - - /* Reset the module */ - qspi_writel(q, QUADSPI_MCR_SWRSTSD_MASK | QUADSPI_MCR_SWRSTHD_MASK, - base + QUADSPI_MCR); - udelay(1); - - /* Init the LUT table. */ - fsl_qspi_init_lut(q); - - /* Disable the module */ - qspi_writel(q, QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK, - base + QUADSPI_MCR); - - reg = qspi_readl(q, base + QUADSPI_SMPR); - qspi_writel(q, reg & ~(QUADSPI_SMPR_FSDLY_MASK - | QUADSPI_SMPR_FSPHS_MASK - | QUADSPI_SMPR_HSENA_MASK - | QUADSPI_SMPR_DDRSMP_MASK), base + QUADSPI_SMPR); - - /* Enable the module */ - qspi_writel(q, QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK, - base + QUADSPI_MCR); - - /* clear all interrupt status */ - qspi_writel(q, 0xffffffff, q->iobase + QUADSPI_FR); - - /* enable the interrupt */ - qspi_writel(q, QUADSPI_RSER_TFIE, q->iobase + QUADSPI_RSER); - - return 0; -} - -static int fsl_qspi_nor_setup_last(struct fsl_qspi *q) -{ - unsigned long rate = q->clk_rate; - int ret; - - if (needs_4x_clock(q)) - rate *= 4; - - /* disable and unprepare clock to avoid glitch pass to controller */ - fsl_qspi_clk_disable_unprep(q); - - ret = clk_set_rate(q->clk, rate); - if (ret) - return ret; - - ret = fsl_qspi_clk_prep_enable(q); - if (ret) - return ret; - - /* Init the LUT table again. */ - fsl_qspi_init_lut(q); - - /* Init for AHB read */ - return fsl_qspi_init_ahb_read(q); -} - -static const struct of_device_id fsl_qspi_dt_ids[] = { - { .compatible = "fsl,vf610-qspi", .data = &vybrid_data, }, - { .compatible = "fsl,imx6sx-qspi", .data = &imx6sx_data, }, - { .compatible = "fsl,imx7d-qspi", .data = &imx7d_data, }, - { .compatible = "fsl,imx6ul-qspi", .data = &imx6ul_data, }, - { .compatible = "fsl,ls1021a-qspi", .data = (void *)&ls1021a_data, }, - { .compatible = "fsl,ls2080a-qspi", .data = &ls2080a_data, }, - { /* sentinel */ } -}; -MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids); - -static void fsl_qspi_set_base_addr(struct fsl_qspi *q, struct spi_nor *nor) -{ - q->chip_base_addr = q->nor_size * (nor - q->nor); -} - -static int fsl_qspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) -{ - int ret; - struct fsl_qspi *q = nor->priv; - - ret = fsl_qspi_runcmd(q, opcode, 0, len); - if (ret) - return ret; - - fsl_qspi_read_data(q, len, buf); - return 0; -} - -static int fsl_qspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) -{ - struct fsl_qspi *q = nor->priv; - int ret; - - if (!buf) { - ret = fsl_qspi_runcmd(q, opcode, 0, 1); - if (ret) - return ret; - - if (opcode == SPINOR_OP_CHIP_ERASE) - fsl_qspi_invalid(q); - - } else if (len > 0) { - ret = fsl_qspi_nor_write(q, nor, opcode, 0, - (u32 *)buf, len); - if (ret > 0) - return 0; - } else { - dev_err(q->dev, "invalid cmd %d\n", opcode); - ret = -EINVAL; - } - - return ret; -} - -static ssize_t fsl_qspi_write(struct spi_nor *nor, loff_t to, - size_t len, const u_char *buf) -{ - struct fsl_qspi *q = nor->priv; - ssize_t ret = fsl_qspi_nor_write(q, nor, nor->program_opcode, to, - (u32 *)buf, len); - - /* invalid the data in the AHB buffer. */ - fsl_qspi_invalid(q); - return ret; -} - -static ssize_t fsl_qspi_read(struct spi_nor *nor, loff_t from, - size_t len, u_char *buf) -{ - struct fsl_qspi *q = nor->priv; - u8 cmd = nor->read_opcode; - - /* if necessary,ioremap buffer before AHB read, */ - if (!q->ahb_addr) { - q->memmap_offs = q->chip_base_addr + from; - q->memmap_len = len > QUADSPI_MIN_IOMAP ? len : QUADSPI_MIN_IOMAP; - - q->ahb_addr = ioremap_nocache( - q->memmap_phy + q->memmap_offs, - q->memmap_len); - if (!q->ahb_addr) { - dev_err(q->dev, "ioremap failed\n"); - return -ENOMEM; - } - /* ioremap if the data requested is out of range */ - } else if (q->chip_base_addr + from < q->memmap_offs - || q->chip_base_addr + from + len > - q->memmap_offs + q->memmap_len) { - iounmap(q->ahb_addr); - - q->memmap_offs = q->chip_base_addr + from; - q->memmap_len = len > QUADSPI_MIN_IOMAP ? len : QUADSPI_MIN_IOMAP; - q->ahb_addr = ioremap_nocache( - q->memmap_phy + q->memmap_offs, - q->memmap_len); - if (!q->ahb_addr) { - dev_err(q->dev, "ioremap failed\n"); - return -ENOMEM; - } - } - - dev_dbg(q->dev, "cmd [%x],read from %p, len:%zd\n", - cmd, q->ahb_addr + q->chip_base_addr + from - q->memmap_offs, - len); - - /* Read out the data directly from the AHB buffer.*/ - memcpy(buf, q->ahb_addr + q->chip_base_addr + from - q->memmap_offs, - len); - - return len; -} - -static int fsl_qspi_erase(struct spi_nor *nor, loff_t offs) -{ - struct fsl_qspi *q = nor->priv; - int ret; - - dev_dbg(nor->dev, "%dKiB at 0x%08x:0x%08x\n", - nor->mtd.erasesize / 1024, q->chip_base_addr, (u32)offs); - - ret = fsl_qspi_runcmd(q, nor->erase_opcode, offs, 0); - if (ret) - return ret; - - fsl_qspi_invalid(q); - return 0; -} - -static int fsl_qspi_prep(struct spi_nor *nor, enum spi_nor_ops ops) -{ - struct fsl_qspi *q = nor->priv; - int ret; - - mutex_lock(&q->lock); - - ret = fsl_qspi_clk_prep_enable(q); - if (ret) - goto err_mutex; - - fsl_qspi_set_base_addr(q, nor); - return 0; - -err_mutex: - mutex_unlock(&q->lock); - return ret; -} - -static void fsl_qspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops) -{ - struct fsl_qspi *q = nor->priv; - - fsl_qspi_clk_disable_unprep(q); - mutex_unlock(&q->lock); -} - -static int fsl_qspi_probe(struct platform_device *pdev) -{ - const struct spi_nor_hwcaps hwcaps = { - .mask = SNOR_HWCAPS_READ_1_1_4 | - SNOR_HWCAPS_PP, - }; - struct device_node *np = pdev->dev.of_node; - struct device *dev = &pdev->dev; - struct fsl_qspi *q; - struct resource *res; - struct spi_nor *nor; - struct mtd_info *mtd; - int ret, i = 0; - - q = devm_kzalloc(dev, sizeof(*q), GFP_KERNEL); - if (!q) - return -ENOMEM; - - q->nor_num = of_get_child_count(dev->of_node); - if (!q->nor_num || q->nor_num > FSL_QSPI_MAX_CHIP) - return -ENODEV; - - q->dev = dev; - q->devtype_data = of_device_get_match_data(dev); - if (!q->devtype_data) - return -ENODEV; - platform_set_drvdata(pdev, q); - - /* find the resources */ - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "QuadSPI"); - q->iobase = devm_ioremap_resource(dev, res); - if (IS_ERR(q->iobase)) - return PTR_ERR(q->iobase); - - q->big_endian = of_property_read_bool(np, "big-endian"); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "QuadSPI-memory"); - if (!devm_request_mem_region(dev, res->start, resource_size(res), - res->name)) { - dev_err(dev, "can't request region for resource %pR\n", res); - return -EBUSY; - } - - q->memmap_phy = res->start; - - /* find the clocks */ - q->clk_en = devm_clk_get(dev, "qspi_en"); - if (IS_ERR(q->clk_en)) - return PTR_ERR(q->clk_en); - - q->clk = devm_clk_get(dev, "qspi"); - if (IS_ERR(q->clk)) - return PTR_ERR(q->clk); - - ret = fsl_qspi_clk_prep_enable(q); - if (ret) { - dev_err(dev, "can not enable the clock\n"); - goto clk_failed; - } - - /* find the irq */ - ret = platform_get_irq(pdev, 0); - if (ret < 0) { - dev_err(dev, "failed to get the irq: %d\n", ret); - goto irq_failed; - } - - ret = devm_request_irq(dev, ret, - fsl_qspi_irq_handler, 0, pdev->name, q); - if (ret) { - dev_err(dev, "failed to request irq: %d\n", ret); - goto irq_failed; - } - - ret = fsl_qspi_nor_setup(q); - if (ret) - goto irq_failed; - - if (of_get_property(np, "fsl,qspi-has-second-chip", NULL)) - q->has_second_chip = true; - - mutex_init(&q->lock); - - /* iterate the subnodes. */ - for_each_available_child_of_node(dev->of_node, np) { - /* skip the holes */ - if (!q->has_second_chip) - i *= 2; - - nor = &q->nor[i]; - mtd = &nor->mtd; - - nor->dev = dev; - spi_nor_set_flash_node(nor, np); - nor->priv = q; - - if (q->nor_num > 1 && !mtd->name) { - int spiflash_idx; - - ret = of_property_read_u32(np, "reg", &spiflash_idx); - if (!ret) { - mtd->name = devm_kasprintf(dev, GFP_KERNEL, - "%s-%d", - dev_name(dev), - spiflash_idx); - if (!mtd->name) { - ret = -ENOMEM; - goto mutex_failed; - } - } else { - dev_warn(dev, "reg property is missing\n"); - } - } - - /* fill the hooks */ - nor->read_reg = fsl_qspi_read_reg; - nor->write_reg = fsl_qspi_write_reg; - nor->read = fsl_qspi_read; - nor->write = fsl_qspi_write; - nor->erase = fsl_qspi_erase; - - nor->prepare = fsl_qspi_prep; - nor->unprepare = fsl_qspi_unprep; - - ret = of_property_read_u32(np, "spi-max-frequency", - &q->clk_rate); - if (ret < 0) - goto mutex_failed; - - /* set the chip address for READID */ - fsl_qspi_set_base_addr(q, nor); - - ret = spi_nor_scan(nor, NULL, &hwcaps); - if (ret) - goto mutex_failed; - - ret = mtd_device_register(mtd, NULL, 0); - if (ret) - goto mutex_failed; - - /* Set the correct NOR size now. */ - if (q->nor_size == 0) { - q->nor_size = mtd->size; - - /* Map the SPI NOR to accessiable address */ - fsl_qspi_set_map_addr(q); - } - - /* - * The TX FIFO is 64 bytes in the Vybrid, but the Page Program - * may writes 265 bytes per time. The write is working in the - * unit of the TX FIFO, not in the unit of the SPI NOR's page - * size. - * - * So shrink the spi_nor->page_size if it is larger then the - * TX FIFO. - */ - if (nor->page_size > q->devtype_data->txfifo) - nor->page_size = q->devtype_data->txfifo; - - i++; - } - - /* finish the rest init. */ - ret = fsl_qspi_nor_setup_last(q); - if (ret) - goto last_init_failed; - - fsl_qspi_clk_disable_unprep(q); - return 0; - -last_init_failed: - for (i = 0; i < q->nor_num; i++) { - /* skip the holes */ - if (!q->has_second_chip) - i *= 2; - mtd_device_unregister(&q->nor[i].mtd); - } -mutex_failed: - mutex_destroy(&q->lock); -irq_failed: - fsl_qspi_clk_disable_unprep(q); -clk_failed: - dev_err(dev, "Freescale QuadSPI probe failed\n"); - return ret; -} - -static int fsl_qspi_remove(struct platform_device *pdev) -{ - struct fsl_qspi *q = platform_get_drvdata(pdev); - int i; - - for (i = 0; i < q->nor_num; i++) { - /* skip the holes */ - if (!q->has_second_chip) - i *= 2; - mtd_device_unregister(&q->nor[i].mtd); - } - - /* disable the hardware */ - qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR); - qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER); - - mutex_destroy(&q->lock); - - if (q->ahb_addr) - iounmap(q->ahb_addr); - - return 0; -} - -static int fsl_qspi_suspend(struct platform_device *pdev, pm_message_t state) -{ - return 0; -} - -static int fsl_qspi_resume(struct platform_device *pdev) -{ - int ret; - struct fsl_qspi *q = platform_get_drvdata(pdev); - - ret = fsl_qspi_clk_prep_enable(q); - if (ret) - return ret; - - fsl_qspi_nor_setup(q); - fsl_qspi_set_map_addr(q); - fsl_qspi_nor_setup_last(q); - - fsl_qspi_clk_disable_unprep(q); - - return 0; -} - -static struct platform_driver fsl_qspi_driver = { - .driver = { - .name = "fsl-quadspi", - .of_match_table = fsl_qspi_dt_ids, - }, - .probe = fsl_qspi_probe, - .remove = fsl_qspi_remove, - .suspend = fsl_qspi_suspend, - .resume = fsl_qspi_resume, -}; -module_platform_driver(fsl_qspi_driver); - -MODULE_DESCRIPTION("Freescale QuadSPI Controller Driver"); -MODULE_AUTHOR("Freescale Semiconductor Inc."); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/mtd/spi-nor/mtk-quadspi.c b/drivers/mtd/spi-nor/mtk-quadspi.c index 5442993b71ff..d9eed6844ba1 100644 --- a/drivers/mtd/spi-nor/mtk-quadspi.c +++ b/drivers/mtd/spi-nor/mtk-quadspi.c @@ -431,7 +431,8 @@ static int mtk_nor_init(struct mtk_nor *mtk_nor, struct device_node *flash_node) { const struct spi_nor_hwcaps hwcaps = { - .mask = SNOR_HWCAPS_READ_FAST | + .mask = SNOR_HWCAPS_READ | + SNOR_HWCAPS_READ_FAST | SNOR_HWCAPS_READ_1_1_2 | SNOR_HWCAPS_PP, }; diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index 6e13bbd1aaa5..fae147452aff 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -68,7 +68,7 @@ enum spi_nor_read_command_index { SNOR_CMD_READ_4_4_4, SNOR_CMD_READ_1_4_4_DTR, - /* Octo SPI */ + /* Octal SPI */ SNOR_CMD_READ_1_1_8, SNOR_CMD_READ_1_8_8, SNOR_CMD_READ_8_8_8, @@ -85,7 +85,7 @@ enum spi_nor_pp_command_index { SNOR_CMD_PP_1_4_4, SNOR_CMD_PP_4_4_4, - /* Octo SPI */ + /* Octal SPI */ SNOR_CMD_PP_1_1_8, SNOR_CMD_PP_1_8_8, SNOR_CMD_PP_8_8_8, @@ -278,6 +278,7 @@ struct flash_info { #define NO_CHIP_ERASE BIT(12) /* Chip does not support chip erase */ #define SPI_NOR_SKIP_SFDP BIT(13) /* Skip parsing of SFDP tables */ #define USE_CLSR BIT(14) /* use CLSR command */ +#define SPI_NOR_OCTAL_READ BIT(15) /* Flash supports Octal Read */ /* Part specific fixup hooks. */ const struct spi_nor_fixups *fixups; @@ -398,6 +399,8 @@ static u8 spi_nor_convert_3to4_read(u8 opcode) { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B }, { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B }, { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B }, + { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B }, + { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B }, { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B }, { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B }, @@ -414,6 +417,8 @@ static u8 spi_nor_convert_3to4_program(u8 opcode) { SPINOR_OP_PP, SPINOR_OP_PP_4B }, { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B }, { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B }, + { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B }, + { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B }, }; return spi_nor_convert_opcode(opcode, spi_nor_3to4_program, @@ -1740,7 +1745,11 @@ static const struct flash_info spi_nor_ids[] = { { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) }, { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) }, { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) }, + { "en25q80a", INFO(0x1c3014, 0, 64 * 1024, 16, + SECT_4K | SPI_NOR_DUAL_READ) }, { "en25qh32", INFO(0x1c7016, 0, 64 * 1024, 64, 0) }, + { "en25qh64", INFO(0x1c7017, 0, 64 * 1024, 128, + SECT_4K | SPI_NOR_DUAL_READ) }, { "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) }, { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) }, { "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, SECT_4K) }, @@ -1836,6 +1845,8 @@ static const struct flash_info spi_nor_ids[] = { { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) }, { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) }, { "mx25u2033e", INFO(0xc22532, 0, 64 * 1024, 4, SECT_4K) }, + { "mx25u3235f", INFO(0xc22536, 0, 64 * 1024, 64, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, { "mx25u4035", INFO(0xc22533, 0, 64 * 1024, 8, SECT_4K) }, { "mx25u8035", INFO(0xc22534, 0, 64 * 1024, 16, SECT_4K) }, { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) }, @@ -1847,6 +1858,8 @@ static const struct flash_info spi_nor_ids[] = { SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) .fixups = &mx25l25635_fixups }, { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) }, + { "mx25v8035f", INFO(0xc22314, 0, 64 * 1024, 16, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) }, { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) }, { "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) }, @@ -1872,7 +1885,8 @@ static const struct flash_info spi_nor_ids[] = { /* Micron */ { "mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512, - SECT_4K | USE_FSR | SPI_NOR_4B_OPCODES) + SECT_4K | USE_FSR | SPI_NOR_OCTAL_READ | + SPI_NOR_4B_OPCODES) }, /* PMC */ @@ -1885,13 +1899,17 @@ static const struct flash_info spi_nor_ids[] = { */ { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "s25fl128s0", INFO6(0x012018, 0x4d0080, 256 * 1024, 64, + SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) }, + { "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, + SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) }, { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, USE_CLSR) }, { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) }, - { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) }, + { "s25fl512s", INFO6(0x010220, 0x4d0080, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) }, + { "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) }, { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) }, { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) }, { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) }, - { "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) }, { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) }, { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) }, { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) }, @@ -3591,6 +3609,13 @@ static int spi_nor_init_params(struct spi_nor *nor, SNOR_PROTO_1_1_4); } + if (info->flags & SPI_NOR_OCTAL_READ) { + params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8; + spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_8], + 0, 8, SPINOR_OP_READ_1_1_8, + SNOR_PROTO_1_1_8); + } + /* Page Program settings. */ params->hwcaps.mask |= SNOR_HWCAPS_PP; spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP], diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c index 22547d7a84ea..947a8adbc799 100644 --- a/drivers/mtd/ubi/cdev.c +++ b/drivers/mtd/ubi/cdev.c @@ -974,6 +974,36 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd, break; } + /* Check a specific PEB for bitflips and scrub it if needed */ + case UBI_IOCRPEB: + { + int pnum; + + err = get_user(pnum, (__user int32_t *)argp); + if (err) { + err = -EFAULT; + break; + } + + err = ubi_bitflip_check(ubi, pnum, 0); + break; + } + + /* Force scrubbing for a specific PEB */ + case UBI_IOCSPEB: + { + int pnum; + + err = get_user(pnum, (__user int32_t *)argp); + if (err) { + err = -EFAULT; + break; + } + + err = ubi_bitflip_check(ubi, pnum, 1); + break; + } + default: err = -ENOTTY; break; diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index d47b9e436e67..a1b9e764d489 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h @@ -929,6 +929,7 @@ int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *used_e, int ubi_is_erase_work(struct ubi_work *wrk); void ubi_refill_pools(struct ubi_device *ubi); int ubi_ensure_anchor_pebs(struct ubi_device *ubi); +int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force_scrub); /* io.c */ int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset, diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 6f2ac865ff05..2709dc02fc24 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -277,6 +277,27 @@ static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root) return 0; } +/** + * in_pq - check if a wear-leveling entry is present in the protection queue. + * @ubi: UBI device description object + * @e: the wear-leveling entry to check + * + * This function returns non-zero if @e is in the protection queue and zero + * if it is not. + */ +static inline int in_pq(const struct ubi_device *ubi, struct ubi_wl_entry *e) +{ + struct ubi_wl_entry *p; + int i; + + for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) + list_for_each_entry(p, &ubi->pq[i], u.list) + if (p == e) + return 1; + + return 0; +} + /** * prot_queue_add - add physical eraseblock to the protection queue. * @ubi: UBI device description object @@ -1419,6 +1440,150 @@ int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum) return err; } +static bool scrub_possible(struct ubi_device *ubi, struct ubi_wl_entry *e) +{ + if (in_wl_tree(e, &ubi->scrub)) + return false; + else if (in_wl_tree(e, &ubi->erroneous)) + return false; + else if (ubi->move_from == e) + return false; + else if (ubi->move_to == e) + return false; + + return true; +} + +/** + * ubi_bitflip_check - Check an eraseblock for bitflips and scrub it if needed. + * @ubi: UBI device description object + * @pnum: the physical eraseblock to schedule + * @force: dont't read the block, assume bitflips happened and take action. + * + * This function reads the given eraseblock and checks if bitflips occured. + * In case of bitflips, the eraseblock is scheduled for scrubbing. + * If scrubbing is forced with @force, the eraseblock is not read, + * but scheduled for scrubbing right away. + * + * Returns: + * %EINVAL, PEB is out of range + * %ENOENT, PEB is no longer used by UBI + * %EBUSY, PEB cannot be checked now or a check is currently running on it + * %EAGAIN, bit flips happened but scrubbing is currently not possible + * %EUCLEAN, bit flips happened and PEB is scheduled for scrubbing + * %0, no bit flips detected + */ +int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force) +{ + int err; + struct ubi_wl_entry *e; + + if (pnum < 0 || pnum >= ubi->peb_count) { + err = -EINVAL; + goto out; + } + + /* + * Pause all parallel work, otherwise it can happen that the + * erase worker frees a wl entry under us. + */ + down_write(&ubi->work_sem); + + /* + * Make sure that the wl entry does not change state while + * inspecting it. + */ + spin_lock(&ubi->wl_lock); + e = ubi->lookuptbl[pnum]; + if (!e) { + spin_unlock(&ubi->wl_lock); + err = -ENOENT; + goto out_resume; + } + + /* + * Does it make sense to check this PEB? + */ + if (!scrub_possible(ubi, e)) { + spin_unlock(&ubi->wl_lock); + err = -EBUSY; + goto out_resume; + } + spin_unlock(&ubi->wl_lock); + + if (!force) { + mutex_lock(&ubi->buf_mutex); + err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size); + mutex_unlock(&ubi->buf_mutex); + } + + if (force || err == UBI_IO_BITFLIPS) { + /* + * Okay, bit flip happened, let's figure out what we can do. + */ + spin_lock(&ubi->wl_lock); + + /* + * Recheck. We released wl_lock, UBI might have killed the + * wl entry under us. + */ + e = ubi->lookuptbl[pnum]; + if (!e) { + spin_unlock(&ubi->wl_lock); + err = -ENOENT; + goto out_resume; + } + + /* + * Need to re-check state + */ + if (!scrub_possible(ubi, e)) { + spin_unlock(&ubi->wl_lock); + err = -EBUSY; + goto out_resume; + } + + if (in_pq(ubi, e)) { + prot_queue_del(ubi, e->pnum); + wl_tree_add(e, &ubi->scrub); + spin_unlock(&ubi->wl_lock); + + err = ensure_wear_leveling(ubi, 1); + } else if (in_wl_tree(e, &ubi->used)) { + rb_erase(&e->u.rb, &ubi->used); + wl_tree_add(e, &ubi->scrub); + spin_unlock(&ubi->wl_lock); + + err = ensure_wear_leveling(ubi, 1); + } else if (in_wl_tree(e, &ubi->free)) { + rb_erase(&e->u.rb, &ubi->free); + ubi->free_count--; + spin_unlock(&ubi->wl_lock); + + /* + * This PEB is empty we can schedule it for + * erasure right away. No wear leveling needed. + */ + err = schedule_erase(ubi, e, UBI_UNKNOWN, UBI_UNKNOWN, + force ? 0 : 1, true); + } else { + spin_unlock(&ubi->wl_lock); + err = -EAGAIN; + } + + if (!err && !force) + err = -EUCLEAN; + } else { + err = 0; + } + +out_resume: + up_write(&ubi->work_sem); +out: + + return err; +} + /** * tree_destroy - destroy an RB-tree. * @ubi: UBI device description object @@ -1848,16 +2013,11 @@ static int self_check_in_wl_tree(const struct ubi_device *ubi, static int self_check_in_pq(const struct ubi_device *ubi, struct ubi_wl_entry *e) { - struct ubi_wl_entry *p; - int i; - if (!ubi_dbg_chk_gen(ubi)) return 0; - for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) - list_for_each_entry(p, &ubi->pq[i], u.list) - if (p == e) - return 0; + if (in_pq(ubi, e)) + return 0; ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue", e->pnum, e->ec); diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index edb1c023a753..5e4ca082cfcd 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -145,13 +145,16 @@ config MACVTAP To compile this driver as a module, choose M here: the module will be called macvtap. +config IPVLAN_L3S + depends on NETFILTER + depends on IPVLAN + def_bool y + select NET_L3_MASTER_DEV config IPVLAN tristate "IP-VLAN support" depends on INET depends on IPV6 || !IPV6 - depends on NETFILTER - select NET_L3_MASTER_DEV ---help--- This allows one to create virtual devices off of a main interface and packets will be delivered based on the dest L3 (IPv6/IPv4 addr) @@ -197,9 +200,9 @@ config VXLAN config GENEVE tristate "Generic Network Virtualization Encapsulation" - depends on INET && NET_UDP_TUNNEL + depends on INET depends on IPV6 || !IPV6 - select NET_IP_TUNNEL + select NET_UDP_TUNNEL select GRO_CELLS ---help--- This allows one to create geneve virtual interfaces that provide @@ -502,7 +505,6 @@ source "drivers/net/hyperv/Kconfig" config NETDEVSIM tristate "Simulated networking device" depends on DEBUG_FS - depends on MAY_USE_DEVLINK help This driver is a developer testing tool and software model that can be used to test various control path networking APIs, especially diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c index f90bb723985f..b3c63d2f16aa 100644 --- a/drivers/net/appletalk/cops.c +++ b/drivers/net/appletalk/cops.c @@ -301,7 +301,7 @@ static int __init cops_probe1(struct net_device *dev, int ioaddr) dev->irq = cops_irq(ioaddr, board); if (dev->irq) break; - /* No IRQ found on this port, fallthrough */ + /* fall through - Once no IRQ found on this port. */ case 1: retval = -EINVAL; goto err_out; diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 7c46d9f4fefd..9274dcc6e9b0 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -31,6 +31,7 @@ #include #include #include +#include /* General definitions */ #define AD_SHORT_TIMEOUT 1 @@ -851,6 +852,9 @@ static int ad_lacpdu_send(struct port *port) if (!skb) return -ENOMEM; + atomic64_inc(&SLAVE_AD_INFO(slave)->stats.lacpdu_tx); + atomic64_inc(&BOND_AD_INFO(slave->bond).stats.lacpdu_tx); + skb->dev = slave->dev; skb_reset_mac_header(skb); skb->network_header = skb->mac_header + ETH_HLEN; @@ -892,6 +896,17 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker) if (!skb) return -ENOMEM; + switch (marker->tlv_type) { + case AD_MARKER_INFORMATION_SUBTYPE: + atomic64_inc(&SLAVE_AD_INFO(slave)->stats.marker_tx); + atomic64_inc(&BOND_AD_INFO(slave->bond).stats.marker_tx); + break; + case AD_MARKER_RESPONSE_SUBTYPE: + atomic64_inc(&SLAVE_AD_INFO(slave)->stats.marker_resp_tx); + atomic64_inc(&BOND_AD_INFO(slave->bond).stats.marker_resp_tx); + break; + } + skb_reserve(skb, 16); skb->dev = slave->dev; @@ -1086,6 +1101,10 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) */ last_state = port->sm_rx_state; + if (lacpdu) { + atomic64_inc(&SLAVE_AD_INFO(port->slave)->stats.lacpdu_rx); + atomic64_inc(&BOND_AD_INFO(port->slave->bond).stats.lacpdu_rx); + } /* check if state machine should change state */ /* first, check if port was reinitialized */ @@ -1922,6 +1941,9 @@ static void ad_marker_info_received(struct bond_marker *marker_info, { struct bond_marker marker; + atomic64_inc(&SLAVE_AD_INFO(port->slave)->stats.marker_rx); + atomic64_inc(&BOND_AD_INFO(port->slave->bond).stats.marker_rx); + /* copy the received marker data to the response marker */ memcpy(&marker, marker_info, sizeof(struct bond_marker)); /* change the marker subtype to marker response */ @@ -1946,6 +1968,9 @@ static void ad_marker_info_received(struct bond_marker *marker_info, static void ad_marker_response_received(struct bond_marker *marker, struct port *port) { + atomic64_inc(&SLAVE_AD_INFO(port->slave)->stats.marker_resp_rx); + atomic64_inc(&BOND_AD_INFO(port->slave->bond).stats.marker_resp_rx); + /* DO NOTHING, SINCE WE DECIDED NOT TO IMPLEMENT THIS FEATURE FOR NOW */ } @@ -2348,66 +2373,68 @@ re_arm: * bond_3ad_rx_indication - handle a received frame * @lacpdu: received lacpdu * @slave: slave struct to work on - * @length: length of the data received * * It is assumed that frames that were sent on this NIC don't returned as new * received frames (loopback). Since only the payload is given to this * function, it check for loopback. */ -static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, - u16 length) +static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave) { - struct port *port; + struct bonding *bond = slave->bond; int ret = RX_HANDLER_ANOTHER; + struct bond_marker *marker; + struct port *port; + atomic64_t *stat; - if (length >= sizeof(struct lacpdu)) { - - port = &(SLAVE_AD_INFO(slave)->port); - - if (!port->slave) { - net_warn_ratelimited("%s: Warning: port of slave %s is uninitialized\n", - slave->dev->name, slave->bond->dev->name); - return ret; - } + port = &(SLAVE_AD_INFO(slave)->port); + if (!port->slave) { + net_warn_ratelimited("%s: Warning: port of slave %s is uninitialized\n", + slave->dev->name, slave->bond->dev->name); + return ret; + } - switch (lacpdu->subtype) { - case AD_TYPE_LACPDU: - ret = RX_HANDLER_CONSUMED; - netdev_dbg(slave->bond->dev, - "Received LACPDU on port %d slave %s\n", - port->actor_port_number, - slave->dev->name); - /* Protect against concurrent state machines */ - spin_lock(&slave->bond->mode_lock); - ad_rx_machine(lacpdu, port); - spin_unlock(&slave->bond->mode_lock); + switch (lacpdu->subtype) { + case AD_TYPE_LACPDU: + ret = RX_HANDLER_CONSUMED; + netdev_dbg(slave->bond->dev, + "Received LACPDU on port %d slave %s\n", + port->actor_port_number, slave->dev->name); + /* Protect against concurrent state machines */ + spin_lock(&slave->bond->mode_lock); + ad_rx_machine(lacpdu, port); + spin_unlock(&slave->bond->mode_lock); + break; + case AD_TYPE_MARKER: + ret = RX_HANDLER_CONSUMED; + /* No need to convert fields to Little Endian since we + * don't use the marker's fields. + */ + marker = (struct bond_marker *)lacpdu; + switch (marker->tlv_type) { + case AD_MARKER_INFORMATION_SUBTYPE: + netdev_dbg(slave->bond->dev, "Received Marker Information on port %d\n", + port->actor_port_number); + ad_marker_info_received(marker, port); break; - - case AD_TYPE_MARKER: - ret = RX_HANDLER_CONSUMED; - /* No need to convert fields to Little Endian since we - * don't use the marker's fields. - */ - - switch (((struct bond_marker *)lacpdu)->tlv_type) { - case AD_MARKER_INFORMATION_SUBTYPE: - netdev_dbg(slave->bond->dev, "Received Marker Information on port %d\n", - port->actor_port_number); - ad_marker_info_received((struct bond_marker *)lacpdu, port); - break; - - case AD_MARKER_RESPONSE_SUBTYPE: - netdev_dbg(slave->bond->dev, "Received Marker Response on port %d\n", - port->actor_port_number); - ad_marker_response_received((struct bond_marker *)lacpdu, port); - break; - - default: - netdev_dbg(slave->bond->dev, "Received an unknown Marker subtype on slot %d\n", - port->actor_port_number); - } + case AD_MARKER_RESPONSE_SUBTYPE: + netdev_dbg(slave->bond->dev, "Received Marker Response on port %d\n", + port->actor_port_number); + ad_marker_response_received(marker, port); + break; + default: + netdev_dbg(slave->bond->dev, "Received an unknown Marker subtype on slot %d\n", + port->actor_port_number); + stat = &SLAVE_AD_INFO(slave)->stats.marker_unknown_rx; + atomic64_inc(stat); + stat = &BOND_AD_INFO(bond).stats.marker_unknown_rx; + atomic64_inc(stat); } + break; + default: + atomic64_inc(&SLAVE_AD_INFO(slave)->stats.lacpdu_unknown_rx); + atomic64_inc(&BOND_AD_INFO(bond).stats.lacpdu_unknown_rx); } + return ret; } @@ -2643,10 +2670,13 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond, return RX_HANDLER_ANOTHER; lacpdu = skb_header_pointer(skb, 0, sizeof(_lacpdu), &_lacpdu); - if (!lacpdu) + if (!lacpdu) { + atomic64_inc(&SLAVE_AD_INFO(slave)->stats.lacpdu_illegal_rx); + atomic64_inc(&BOND_AD_INFO(bond).stats.lacpdu_illegal_rx); return RX_HANDLER_ANOTHER; + } - return bond_3ad_rx_indication(lacpdu, slave, skb->len); + return bond_3ad_rx_indication(lacpdu, slave); } /** @@ -2678,3 +2708,61 @@ void bond_3ad_update_lacp_rate(struct bonding *bond) } spin_unlock_bh(&bond->mode_lock); } + +size_t bond_3ad_stats_size(void) +{ + return nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_LACPDU_RX */ + nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_LACPDU_TX */ + nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_LACPDU_UNKNOWN_RX */ + nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_LACPDU_ILLEGAL_RX */ + nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_MARKER_RX */ + nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_MARKER_TX */ + nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_MARKER_RESP_RX */ + nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_MARKER_RESP_TX */ + nla_total_size_64bit(sizeof(u64)); /* BOND_3AD_STAT_MARKER_UNKNOWN_RX */ +} + +int bond_3ad_stats_fill(struct sk_buff *skb, struct bond_3ad_stats *stats) +{ + u64 val; + + val = atomic64_read(&stats->lacpdu_rx); + if (nla_put_u64_64bit(skb, BOND_3AD_STAT_LACPDU_RX, val, + BOND_3AD_STAT_PAD)) + return -EMSGSIZE; + val = atomic64_read(&stats->lacpdu_tx); + if (nla_put_u64_64bit(skb, BOND_3AD_STAT_LACPDU_TX, val, + BOND_3AD_STAT_PAD)) + return -EMSGSIZE; + val = atomic64_read(&stats->lacpdu_unknown_rx); + if (nla_put_u64_64bit(skb, BOND_3AD_STAT_LACPDU_UNKNOWN_RX, val, + BOND_3AD_STAT_PAD)) + return -EMSGSIZE; + val = atomic64_read(&stats->lacpdu_illegal_rx); + if (nla_put_u64_64bit(skb, BOND_3AD_STAT_LACPDU_ILLEGAL_RX, val, + BOND_3AD_STAT_PAD)) + return -EMSGSIZE; + + val = atomic64_read(&stats->marker_rx); + if (nla_put_u64_64bit(skb, BOND_3AD_STAT_MARKER_RX, val, + BOND_3AD_STAT_PAD)) + return -EMSGSIZE; + val = atomic64_read(&stats->marker_tx); + if (nla_put_u64_64bit(skb, BOND_3AD_STAT_MARKER_TX, val, + BOND_3AD_STAT_PAD)) + return -EMSGSIZE; + val = atomic64_read(&stats->marker_resp_rx); + if (nla_put_u64_64bit(skb, BOND_3AD_STAT_MARKER_RESP_RX, val, + BOND_3AD_STAT_PAD)) + return -EMSGSIZE; + val = atomic64_read(&stats->marker_resp_tx); + if (nla_put_u64_64bit(skb, BOND_3AD_STAT_MARKER_RESP_TX, val, + BOND_3AD_STAT_PAD)) + return -EMSGSIZE; + val = atomic64_read(&stats->marker_unknown_rx); + if (nla_put_u64_64bit(skb, BOND_3AD_STAT_MARKER_UNKNOWN_RX, val, + BOND_3AD_STAT_PAD)) + return -EMSGSIZE; + + return 0; +} diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 485462d3087f..b59708c35faf 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -77,7 +77,6 @@ #include #include #include -#include #include #include #include @@ -1183,29 +1182,22 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) } } - /* Link-local multicast packets should be passed to the - * stack on the link they arrive as well as pass them to the - * bond-master device. These packets are mostly usable when - * stack receives it with the link on which they arrive - * (e.g. LLDP) they also must be available on master. Some of - * the use cases include (but are not limited to): LLDP agents - * that must be able to operate both on enslaved interfaces as - * well as on bonds themselves; linux bridges that must be able - * to process/pass BPDUs from attached bonds when any kind of - * STP version is enabled on the network. + /* + * For packets determined by bond_should_deliver_exact_match() call to + * be suppressed we want to make an exception for link-local packets. + * This is necessary for e.g. LLDP daemons to be able to monitor + * inactive slave links without being forced to bind to them + * explicitly. + * + * At the same time, packets that are passed to the bonding master + * (including link-local ones) can have their originating interface + * determined via PACKET_ORIGDEV socket option. */ - if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) { - struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); - - if (nskb) { - nskb->dev = bond->dev; - nskb->queue_mapping = 0; - netif_rx(nskb); - } - return RX_HANDLER_PASS; - } - if (bond_should_deliver_exact_match(skb, slave, bond)) + if (bond_should_deliver_exact_match(skb, slave, bond)) { + if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) + return RX_HANDLER_PASS; return RX_HANDLER_EXACT; + } skb->dev = bond->dev; diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c index 6b9ad8673218..b286f591242e 100644 --- a/drivers/net/bonding/bond_netlink.c +++ b/drivers/net/bonding/bond_netlink.c @@ -675,6 +675,71 @@ nla_put_failure: return -EMSGSIZE; } +static size_t bond_get_linkxstats_size(const struct net_device *dev, int attr) +{ + switch (attr) { + case IFLA_STATS_LINK_XSTATS: + case IFLA_STATS_LINK_XSTATS_SLAVE: + break; + default: + return 0; + } + + return bond_3ad_stats_size() + nla_total_size(0); +} + +static int bond_fill_linkxstats(struct sk_buff *skb, + const struct net_device *dev, + int *prividx, int attr) +{ + struct nlattr *nla __maybe_unused; + struct slave *slave = NULL; + struct nlattr *nest, *nest2; + struct bonding *bond; + + switch (attr) { + case IFLA_STATS_LINK_XSTATS: + bond = netdev_priv(dev); + break; + case IFLA_STATS_LINK_XSTATS_SLAVE: + slave = bond_slave_get_rtnl(dev); + if (!slave) + return 0; + bond = slave->bond; + break; + default: + return -EINVAL; + } + + nest = nla_nest_start(skb, LINK_XSTATS_TYPE_BOND); + if (!nest) + return -EMSGSIZE; + if (BOND_MODE(bond) == BOND_MODE_8023AD) { + struct bond_3ad_stats *stats; + + if (slave) + stats = &SLAVE_AD_INFO(slave)->stats; + else + stats = &BOND_AD_INFO(bond).stats; + + nest2 = nla_nest_start(skb, BOND_XSTATS_3AD); + if (!nest2) { + nla_nest_end(skb, nest); + return -EMSGSIZE; + } + + if (bond_3ad_stats_fill(skb, stats)) { + nla_nest_cancel(skb, nest2); + nla_nest_end(skb, nest); + return -EMSGSIZE; + } + nla_nest_end(skb, nest2); + } + nla_nest_end(skb, nest); + + return 0; +} + struct rtnl_link_ops bond_link_ops __read_mostly = { .kind = "bond", .priv_size = sizeof(struct bonding), @@ -689,6 +754,8 @@ struct rtnl_link_ops bond_link_ops __read_mostly = { .get_num_tx_queues = bond_get_num_tx_queues, .get_num_rx_queues = bond_get_num_tx_queues, /* Use the same number as for TX queues */ + .fill_linkxstats = bond_fill_linkxstats, + .get_linkxstats_size = bond_get_linkxstats_size, .slave_maxtype = IFLA_BOND_SLAVE_MAX, .slave_policy = bond_slave_policy, .slave_changelink = bond_slave_changelink, diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 4d5d01cb8141..da1fc17295d9 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -1375,6 +1375,7 @@ static int bond_option_slaves_set(struct bonding *bond, sscanf(newval->string, "%16s", command); /* IFNAMSIZ*/ ifname = command + 1; if ((strlen(command) <= 1) || + (command[0] != '+' && command[0] != '-') || !dev_valid_name(ifname)) goto err_no_cmd; @@ -1398,6 +1399,7 @@ static int bond_option_slaves_set(struct bonding *bond, break; default: + /* should not run here. */ goto err_no_cmd; } diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c index a0f954f36c09..44e6c7b1b222 100644 --- a/drivers/net/caif/caif_serial.c +++ b/drivers/net/caif/caif_serial.c @@ -257,10 +257,7 @@ static int handle_tx(struct ser_device *ser) if (skb->len == 0) { struct sk_buff *tmp = skb_dequeue(&ser->head); WARN_ON(tmp != skb); - if (in_interrupt()) - dev_kfree_skb_irq(skb); - else - kfree_skb(skb); + dev_consume_skb_any(skb); } } /* Send flow off if queue is empty */ diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c index d28a1398c091..7608bc3e00df 100644 --- a/drivers/net/caif/caif_spi.c +++ b/drivers/net/caif/caif_spi.c @@ -73,35 +73,37 @@ MODULE_PARM_DESC(spi_down_tail_align, "SPI downlink tail alignment."); #define LOW_WATER_MARK 100 #define HIGH_WATER_MARK (LOW_WATER_MARK*5) -#ifdef CONFIG_UML +#ifndef CONFIG_HAS_DMA /* * We sometimes use UML for debugging, but it cannot handle * dma_alloc_coherent so we have to wrap it. */ -static inline void *dma_alloc(dma_addr_t *daddr) +static inline void *dma_alloc(struct cfspi *cfspi, dma_addr_t *daddr) { return kmalloc(SPI_DMA_BUF_LEN, GFP_KERNEL); } -static inline void dma_free(void *cpu_addr, dma_addr_t handle) +static inline void dma_free(struct cfspi *cfspi, void *cpu_addr, + dma_addr_t handle) { kfree(cpu_addr); } #else -static inline void *dma_alloc(dma_addr_t *daddr) +static inline void *dma_alloc(struct cfspi *cfspi, dma_addr_t *daddr) { - return dma_alloc_coherent(NULL, SPI_DMA_BUF_LEN, daddr, + return dma_alloc_coherent(&cfspi->pdev->dev, SPI_DMA_BUF_LEN, daddr, GFP_KERNEL); } -static inline void dma_free(void *cpu_addr, dma_addr_t handle) +static inline void dma_free(struct cfspi *cfspi, void *cpu_addr, + dma_addr_t handle) { - dma_free_coherent(NULL, SPI_DMA_BUF_LEN, cpu_addr, handle); + dma_free_coherent(&cfspi->pdev->dev, SPI_DMA_BUF_LEN, cpu_addr, handle); } -#endif /* CONFIG_UML */ +#endif /* CONFIG_HAS_DMA */ #ifdef CONFIG_DEBUG_FS @@ -610,13 +612,13 @@ static int cfspi_init(struct net_device *dev) } /* Allocate DMA buffers. */ - cfspi->xfer.va_tx[0] = dma_alloc(&cfspi->xfer.pa_tx[0]); + cfspi->xfer.va_tx[0] = dma_alloc(cfspi, &cfspi->xfer.pa_tx[0]); if (!cfspi->xfer.va_tx[0]) { res = -ENODEV; goto err_dma_alloc_tx_0; } - cfspi->xfer.va_rx = dma_alloc(&cfspi->xfer.pa_rx); + cfspi->xfer.va_rx = dma_alloc(cfspi, &cfspi->xfer.pa_rx); if (!cfspi->xfer.va_rx) { res = -ENODEV; @@ -665,9 +667,9 @@ static int cfspi_init(struct net_device *dev) return 0; err_create_wq: - dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx); + dma_free(cfspi, cfspi->xfer.va_rx, cfspi->xfer.pa_rx); err_dma_alloc_rx: - dma_free(cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]); + dma_free(cfspi, cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]); err_dma_alloc_tx_0: return res; } @@ -683,8 +685,8 @@ static void cfspi_uninit(struct net_device *dev) cfspi->ndev = NULL; /* Free DMA buffers. */ - dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx); - dma_free(cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]); + dma_free(cfspi, cfspi->xfer.va_rx, cfspi->xfer.pa_rx); + dma_free(cfspi, cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]); set_bit(SPI_TERMINATE, &cfspi->state); wake_up_interruptible(&cfspi->wait); destroy_workqueue(cfspi->wq); diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c index d516def846ab..b388406ac0f5 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c @@ -127,7 +127,7 @@ static u8 *pcan_msg_init_empty(struct pcan_usb_pro_msg *pm, /* * add one record to a message being built */ -static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, u8 id, ...) +static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, int id, ...) { int len, i; u8 *pc; diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 0e4bbdcc614f..0852e5e08177 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -344,7 +344,8 @@ static void b53_set_forwarding(struct b53_device *dev, int enable) b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt); } -static void b53_enable_vlan(struct b53_device *dev, bool enable) +static void b53_enable_vlan(struct b53_device *dev, bool enable, + bool enable_filtering) { u8 mgmt, vc0, vc1, vc4 = 0, vc5; @@ -369,8 +370,13 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable) vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID; vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN; vc4 &= ~VC4_ING_VID_CHECK_MASK; - vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S; - vc5 |= VC5_DROP_VTABLE_MISS; + if (enable_filtering) { + vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S; + vc5 |= VC5_DROP_VTABLE_MISS; + } else { + vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S; + vc5 &= ~VC5_DROP_VTABLE_MISS; + } if (is5325(dev)) vc0 &= ~VC0_RESERVED_1; @@ -420,6 +426,9 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable) } b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); + + dev->vlan_enabled = enable; + dev->vlan_filtering_enabled = enable_filtering; } static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100) @@ -534,7 +543,7 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) } EXPORT_SYMBOL(b53_enable_port); -void b53_disable_port(struct dsa_switch *ds, int port, struct phy_device *phy) +void b53_disable_port(struct dsa_switch *ds, int port) { struct b53_device *dev = ds->priv; u8 reg; @@ -632,25 +641,35 @@ static void b53_enable_mib(struct b53_device *dev) b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc); } +static u16 b53_default_pvid(struct b53_device *dev) +{ + if (is5325(dev) || is5365(dev)) + return 1; + else + return 0; +} + int b53_configure_vlan(struct dsa_switch *ds) { struct b53_device *dev = ds->priv; struct b53_vlan vl = { 0 }; - int i; + int i, def_vid; + + def_vid = b53_default_pvid(dev); /* clear all vlan entries */ if (is5325(dev) || is5365(dev)) { - for (i = 1; i < dev->num_vlans; i++) + for (i = def_vid; i < dev->num_vlans; i++) b53_set_vlan_entry(dev, i, &vl); } else { b53_do_vlan_op(dev, VTA_CMD_CLEAR); } - b53_enable_vlan(dev, false); + b53_enable_vlan(dev, false, dev->vlan_filtering_enabled); b53_for_each_port(dev, i) b53_write16(dev, B53_VLAN_PAGE, - B53_VLAN_PORT_DEF_TAG(i), 1); + B53_VLAN_PORT_DEF_TAG(i), def_vid); if (!is5325(dev) && !is5365(dev)) b53_set_jumbo(dev, dev->enable_jumbo, false); @@ -944,7 +963,7 @@ static int b53_setup(struct dsa_switch *ds) if (dsa_is_cpu_port(ds, port)) b53_enable_cpu_port(dev, port); else if (dsa_is_unused_port(ds, port)) - b53_disable_port(ds, port, NULL); + b53_disable_port(ds, port); } return ret; @@ -1255,6 +1274,46 @@ EXPORT_SYMBOL(b53_phylink_mac_link_up); int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) { + struct b53_device *dev = ds->priv; + struct net_device *bridge_dev; + unsigned int i; + u16 pvid, new_pvid; + + /* Handle the case were multiple bridges span the same switch device + * and one of them has a different setting than what is being requested + * which would be breaking filtering semantics for any of the other + * bridge devices. + */ + b53_for_each_port(dev, i) { + bridge_dev = dsa_to_port(ds, i)->bridge_dev; + if (bridge_dev && + bridge_dev != dsa_to_port(ds, port)->bridge_dev && + br_vlan_enabled(bridge_dev) != vlan_filtering) { + netdev_err(bridge_dev, + "VLAN filtering is global to the switch!\n"); + return -EINVAL; + } + } + + b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid); + new_pvid = pvid; + if (dev->vlan_filtering_enabled && !vlan_filtering) { + /* Filtering is currently enabled, use the default PVID since + * the bridge does not expect tagging anymore + */ + dev->ports[port].pvid = pvid; + new_pvid = b53_default_pvid(dev); + } else if (!dev->vlan_filtering_enabled && vlan_filtering) { + /* Filtering is currently disabled, restore the previous PVID */ + new_pvid = dev->ports[port].pvid; + } + + if (pvid != new_pvid) + b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), + new_pvid); + + b53_enable_vlan(dev, dev->vlan_enabled, vlan_filtering); + return 0; } EXPORT_SYMBOL(b53_vlan_filtering); @@ -1270,7 +1329,7 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port, if (vlan->vid_end > dev->num_vlans) return -ERANGE; - b53_enable_vlan(dev, true); + b53_enable_vlan(dev, true, dev->vlan_filtering_enabled); return 0; } @@ -1300,7 +1359,7 @@ void b53_vlan_add(struct dsa_switch *ds, int port, b53_fast_age_vlan(dev, vid); } - if (pvid) { + if (pvid && !dsa_is_cpu_port(ds, port)) { b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), vlan->vid_end); b53_fast_age_vlan(dev, vid); @@ -1326,12 +1385,8 @@ int b53_vlan_del(struct dsa_switch *ds, int port, vl->members &= ~BIT(port); - if (pvid == vid) { - if (is5325(dev) || is5365(dev)) - pvid = 1; - else - pvid = 0; - } + if (pvid == vid) + pvid = b53_default_pvid(dev); if (untagged && !dsa_is_cpu_port(ds, port)) vl->untag &= ~(BIT(port)); @@ -1644,10 +1699,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br) b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); dev->ports[port].vlan_ctl_mask = pvlan; - if (is5325(dev) || is5365(dev)) - pvid = 1; - else - pvid = 0; + pvid = b53_default_pvid(dev); /* Make this port join all VLANs without VLAN entries */ if (is58xx(dev)) { diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index ec796482792d..e3441dcf2d21 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -91,6 +91,7 @@ enum { struct b53_port { u16 vlan_ctl_mask; struct ethtool_eee eee; + u16 pvid; }; struct b53_vlan { @@ -137,6 +138,8 @@ struct b53_device { unsigned int num_vlans; struct b53_vlan *vlans; + bool vlan_enabled; + bool vlan_filtering_enabled; unsigned int num_ports; struct b53_port *ports; }; @@ -353,7 +356,7 @@ enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port); void b53_mirror_del(struct dsa_switch *ds, int port, struct dsa_mall_mirror_tc_entry *mirror); int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy); -void b53_disable_port(struct dsa_switch *ds, int port, struct phy_device *phy); +void b53_disable_port(struct dsa_switch *ds, int port); void b53_brcm_hdr_setup(struct dsa_switch *ds, int port); void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable); int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy); diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c index 90f514252987..d9c56a779c08 100644 --- a/drivers/net/dsa/b53/b53_srab.c +++ b/drivers/net/dsa/b53/b53_srab.c @@ -511,9 +511,6 @@ static void b53_srab_prepare_irq(struct platform_device *pdev) /* Clear all pending interrupts */ writel(0xffffffff, priv->regs + B53_SRAB_INTR); - if (dev->pdata && dev->pdata->chip_id != BCM58XX_DEVICE_ID) - return; - for (i = 0; i < B53_N_PORTS; i++) { port = &priv->port_intrs[i]; diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 361fbde76654..c8e3f05e1d72 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -221,8 +221,7 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, return b53_enable_port(ds, port, phy); } -static void bcm_sf2_port_disable(struct dsa_switch *ds, int port, - struct phy_device *phy) +static void bcm_sf2_port_disable(struct dsa_switch *ds, int port) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); u32 reg; @@ -241,7 +240,7 @@ static void bcm_sf2_port_disable(struct dsa_switch *ds, int port, if (priv->int_phy_mask & 1 << port && priv->hw_params.num_gphy == 1) bcm_sf2_gphy_enable_set(ds, false); - b53_disable_port(ds, port, phy); + b53_disable_port(ds, port); /* Power down the port memory */ reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL); @@ -690,9 +689,9 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds) * port, the other ones have already been disabled during * bcm_sf2_sw_setup */ - for (port = 0; port < DSA_MAX_PORTS; port++) { + for (port = 0; port < ds->num_ports; port++) { if (dsa_is_user_port(ds, port) || dsa_is_cpu_port(ds, port)) - bcm_sf2_port_disable(ds, port, NULL); + bcm_sf2_port_disable(ds, port); } return 0; @@ -726,10 +725,11 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port, { struct net_device *p = ds->ports[port].cpu_dp->master; struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); - struct ethtool_wolinfo pwol; + struct ethtool_wolinfo pwol = { }; /* Get the parent device WoL settings */ - p->ethtool_ops->get_wol(p, &pwol); + if (p->ethtool_ops->get_wol) + p->ethtool_ops->get_wol(p, &pwol); /* Advertise the parent device supported settings */ wol->supported = pwol.supported; @@ -750,9 +750,10 @@ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port, struct net_device *p = ds->ports[port].cpu_dp->master; struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); s8 cpu_port = ds->ports[port].cpu_dp->index; - struct ethtool_wolinfo pwol; + struct ethtool_wolinfo pwol = { }; - p->ethtool_ops->get_wol(p, &pwol); + if (p->ethtool_ops->get_wol) + p->ethtool_ops->get_wol(p, &pwol); if (wol->wolopts & ~pwol.supported) return -EINVAL; @@ -786,7 +787,7 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds) else if (dsa_is_cpu_port(ds, port)) bcm_sf2_imp_setup(ds, port); else - bcm_sf2_port_disable(ds, port, NULL); + bcm_sf2_port_disable(ds, port); } b53_configure_vlan(ds); @@ -894,12 +895,44 @@ static const struct b53_io_ops bcm_sf2_io_ops = { .write64 = bcm_sf2_core_write64, }; +static void bcm_sf2_sw_get_strings(struct dsa_switch *ds, int port, + u32 stringset, uint8_t *data) +{ + int cnt = b53_get_sset_count(ds, port, stringset); + + b53_get_strings(ds, port, stringset, data); + bcm_sf2_cfp_get_strings(ds, port, stringset, + data + cnt * ETH_GSTRING_LEN); +} + +static void bcm_sf2_sw_get_ethtool_stats(struct dsa_switch *ds, int port, + uint64_t *data) +{ + int cnt = b53_get_sset_count(ds, port, ETH_SS_STATS); + + b53_get_ethtool_stats(ds, port, data); + bcm_sf2_cfp_get_ethtool_stats(ds, port, data + cnt); +} + +static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds, int port, + int sset) +{ + int cnt = b53_get_sset_count(ds, port, sset); + + if (cnt < 0) + return cnt; + + cnt += bcm_sf2_cfp_get_sset_count(ds, port, sset); + + return cnt; +} + static const struct dsa_switch_ops bcm_sf2_ops = { .get_tag_protocol = b53_get_tag_protocol, .setup = bcm_sf2_sw_setup, - .get_strings = b53_get_strings, - .get_ethtool_stats = b53_get_ethtool_stats, - .get_sset_count = b53_get_sset_count, + .get_strings = bcm_sf2_sw_get_strings, + .get_ethtool_stats = bcm_sf2_sw_get_ethtool_stats, + .get_sset_count = bcm_sf2_sw_get_sset_count, .get_ethtool_phy_stats = b53_get_ethtool_phy_stats, .get_phy_flags = bcm_sf2_sw_get_phy_flags, .phylink_validate = bcm_sf2_sw_validate, @@ -1062,7 +1095,6 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) dev_set_drvdata(&pdev->dev, priv); spin_lock_init(&priv->indir_lock); - mutex_init(&priv->stats_mutex); mutex_init(&priv->cfp.lock); INIT_LIST_HEAD(&priv->cfp.rules_list); diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h index faaef320ec48..eb3655bea467 100644 --- a/drivers/net/dsa/bcm_sf2.h +++ b/drivers/net/dsa/bcm_sf2.h @@ -87,9 +87,6 @@ struct bcm_sf2_priv { /* Backing b53_device */ struct b53_device *dev; - /* Mutex protecting access to the MIB counters */ - struct mutex stats_mutex; - struct bcm_sf2_hw_params hw_params; struct bcm_sf2_port_status port_sts[DSA_MAX_PORTS]; @@ -216,5 +213,10 @@ int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port, int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv); void bcm_sf2_cfp_exit(struct dsa_switch *ds); int bcm_sf2_cfp_resume(struct dsa_switch *ds); +void bcm_sf2_cfp_get_strings(struct dsa_switch *ds, int port, + u32 stringset, uint8_t *data); +void bcm_sf2_cfp_get_ethtool_stats(struct dsa_switch *ds, int port, + uint64_t *data); +int bcm_sf2_cfp_get_sset_count(struct dsa_switch *ds, int port, int sset); #endif /* __BCM_SF2_H */ diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c index e14663ab6dbc..e6234d209787 100644 --- a/drivers/net/dsa/bcm_sf2_cfp.c +++ b/drivers/net/dsa/bcm_sf2_cfp.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "bcm_sf2.h" #include "bcm_sf2_regs.h" @@ -212,6 +213,7 @@ static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv) static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv *priv, unsigned int rule_index, + int src_port, unsigned int port_num, unsigned int queue_num, bool fwd_map_change) @@ -229,6 +231,10 @@ static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv *priv, else reg = 0; + /* Enable looping back to the original port */ + if (src_port == port_num) + reg |= LOOP_BK_EN; + core_writel(priv, reg, CORE_ACT_POL_DATA0); /* Set classification ID that needs to be put in Broadcom tag */ @@ -257,7 +263,8 @@ static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv *priv, } static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv, - struct ethtool_tcpip4_spec *v4_spec, + struct flow_dissector_key_ipv4_addrs *addrs, + struct flow_dissector_key_ports *ports, unsigned int slice_num, bool mask) { @@ -278,7 +285,7 @@ static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv, * UDF_n_A6 [23:8] * UDF_n_A5 [7:0] */ - reg = be16_to_cpu(v4_spec->pdst) >> 8; + reg = be16_to_cpu(ports->dst) >> 8; if (mask) offset = CORE_CFP_MASK_PORT(3); else @@ -289,9 +296,9 @@ static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv, * UDF_n_A4 [23:8] * UDF_n_A3 [7:0] */ - reg = (be16_to_cpu(v4_spec->pdst) & 0xff) << 24 | - (u32)be16_to_cpu(v4_spec->psrc) << 8 | - (be32_to_cpu(v4_spec->ip4dst) & 0x0000ff00) >> 8; + reg = (be16_to_cpu(ports->dst) & 0xff) << 24 | + (u32)be16_to_cpu(ports->src) << 8 | + (be32_to_cpu(addrs->dst) & 0x0000ff00) >> 8; if (mask) offset = CORE_CFP_MASK_PORT(2); else @@ -302,9 +309,9 @@ static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv, * UDF_n_A2 [23:8] * UDF_n_A1 [7:0] */ - reg = (u32)(be32_to_cpu(v4_spec->ip4dst) & 0xff) << 24 | - (u32)(be32_to_cpu(v4_spec->ip4dst) >> 16) << 8 | - (be32_to_cpu(v4_spec->ip4src) & 0x0000ff00) >> 8; + reg = (u32)(be32_to_cpu(addrs->dst) & 0xff) << 24 | + (u32)(be32_to_cpu(addrs->dst) >> 16) << 8 | + (be32_to_cpu(addrs->src) & 0x0000ff00) >> 8; if (mask) offset = CORE_CFP_MASK_PORT(1); else @@ -317,8 +324,8 @@ static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv, * Slice ID [3:2] * Slice valid [1:0] */ - reg = (u32)(be32_to_cpu(v4_spec->ip4src) & 0xff) << 24 | - (u32)(be32_to_cpu(v4_spec->ip4src) >> 16) << 8 | + reg = (u32)(be32_to_cpu(addrs->src) & 0xff) << 24 | + (u32)(be32_to_cpu(addrs->src) >> 16) << 8 | SLICE_NUM(slice_num) | SLICE_VALID; if (mask) offset = CORE_CFP_MASK_PORT(0); @@ -332,9 +339,13 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port, unsigned int queue_num, struct ethtool_rx_flow_spec *fs) { - struct ethtool_tcpip4_spec *v4_spec, *v4_m_spec; + struct ethtool_rx_flow_spec_input input = {}; const struct cfp_udf_layout *layout; unsigned int slice_num, rule_index; + struct ethtool_rx_flow_rule *flow; + struct flow_match_ipv4_addrs ipv4; + struct flow_match_ports ports; + struct flow_match_ip ip; u8 ip_proto, ip_frag; u8 num_udf; u32 reg; @@ -343,13 +354,9 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port, switch (fs->flow_type & ~FLOW_EXT) { case TCP_V4_FLOW: ip_proto = IPPROTO_TCP; - v4_spec = &fs->h_u.tcp_ip4_spec; - v4_m_spec = &fs->m_u.tcp_ip4_spec; break; case UDP_V4_FLOW: ip_proto = IPPROTO_UDP; - v4_spec = &fs->h_u.udp_ip4_spec; - v4_m_spec = &fs->m_u.udp_ip4_spec; break; default: return -EINVAL; @@ -367,11 +374,22 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port, if (rule_index > bcm_sf2_cfp_rule_size(priv)) return -ENOSPC; + input.fs = fs; + flow = ethtool_rx_flow_rule_create(&input); + if (IS_ERR(flow)) + return PTR_ERR(flow); + + flow_rule_match_ipv4_addrs(flow->rule, &ipv4); + flow_rule_match_ports(flow->rule, &ports); + flow_rule_match_ip(flow->rule, &ip); + layout = &udf_tcpip4_layout; /* We only use one UDF slice for now */ slice_num = bcm_sf2_get_slice_number(layout, 0); - if (slice_num == UDF_NUM_SLICES) - return -EINVAL; + if (slice_num == UDF_NUM_SLICES) { + ret = -EINVAL; + goto out_err_flow_rule; + } num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices); @@ -398,7 +416,7 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port, * Reserved [1] * UDF_Valid[8] [0] */ - core_writel(priv, v4_spec->tos << IPTOS_SHIFT | + core_writel(priv, ip.key->tos << IPTOS_SHIFT | ip_proto << IPPROTO_SHIFT | ip_frag << IP_FRAG_SHIFT | udf_upper_bits(num_udf), CORE_CFP_DATA_PORT(6)); @@ -417,8 +435,8 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port, core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5)); /* Program the match and the mask */ - bcm_sf2_cfp_slice_ipv4(priv, v4_spec, slice_num, false); - bcm_sf2_cfp_slice_ipv4(priv, v4_m_spec, SLICE_NUM_MASK, true); + bcm_sf2_cfp_slice_ipv4(priv, ipv4.key, ports.key, slice_num, false); + bcm_sf2_cfp_slice_ipv4(priv, ipv4.mask, ports.mask, SLICE_NUM_MASK, true); /* Insert into TCAM now */ bcm_sf2_cfp_rule_addr_set(priv, rule_index); @@ -426,14 +444,14 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port, ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL); if (ret) { pr_err("TCAM entry at addr %d failed\n", rule_index); - return ret; + goto out_err_flow_rule; } /* Insert into Action and policer RAMs now */ - ret = bcm_sf2_cfp_act_pol_set(priv, rule_index, port_num, + ret = bcm_sf2_cfp_act_pol_set(priv, rule_index, port, port_num, queue_num, true); if (ret) - return ret; + goto out_err_flow_rule; /* Turn on CFP for this rule now */ reg = core_readl(priv, CORE_CFP_CTL_REG); @@ -446,6 +464,10 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port, fs->location = rule_index; return 0; + +out_err_flow_rule: + ethtool_rx_flow_rule_destroy(flow); + return ret; } static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv, @@ -581,9 +603,12 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, unsigned int queue_num, struct ethtool_rx_flow_spec *fs) { - struct ethtool_tcpip6_spec *v6_spec, *v6_m_spec; + struct ethtool_rx_flow_spec_input input = {}; unsigned int slice_num, rule_index[2]; const struct cfp_udf_layout *layout; + struct ethtool_rx_flow_rule *flow; + struct flow_match_ipv6_addrs ipv6; + struct flow_match_ports ports; u8 ip_proto, ip_frag; int ret = 0; u8 num_udf; @@ -592,13 +617,9 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, switch (fs->flow_type & ~FLOW_EXT) { case TCP_V6_FLOW: ip_proto = IPPROTO_TCP; - v6_spec = &fs->h_u.tcp_ip6_spec; - v6_m_spec = &fs->m_u.tcp_ip6_spec; break; case UDP_V6_FLOW: ip_proto = IPPROTO_UDP; - v6_spec = &fs->h_u.udp_ip6_spec; - v6_m_spec = &fs->m_u.udp_ip6_spec; break; default: return -EINVAL; @@ -645,6 +666,15 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, goto out_err; } + input.fs = fs; + flow = ethtool_rx_flow_rule_create(&input); + if (IS_ERR(flow)) { + ret = PTR_ERR(flow); + goto out_err; + } + flow_rule_match_ipv6_addrs(flow->rule, &ipv6); + flow_rule_match_ports(flow->rule, &ports); + /* Apply the UDF layout for this filter */ bcm_sf2_cfp_udf_set(priv, layout, slice_num); @@ -688,10 +718,10 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5)); /* Slice the IPv6 source address and port */ - bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6src, v6_spec->psrc, - slice_num, false); - bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6src, v6_m_spec->psrc, - SLICE_NUM_MASK, true); + bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->src.in6_u.u6_addr32, + ports.key->src, slice_num, false); + bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->src.in6_u.u6_addr32, + ports.mask->src, SLICE_NUM_MASK, true); /* Insert into TCAM now because we need to insert a second rule */ bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]); @@ -699,20 +729,20 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL); if (ret) { pr_err("TCAM entry at addr %d failed\n", rule_index[0]); - goto out_err; + goto out_err_flow_rule; } /* Insert into Action and policer RAMs now */ - ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port_num, + ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port, port_num, queue_num, false); if (ret) - goto out_err; + goto out_err_flow_rule; /* Now deal with the second slice to chain this rule */ slice_num = bcm_sf2_get_slice_number(layout, slice_num + 1); if (slice_num == UDF_NUM_SLICES) { ret = -EINVAL; - goto out_err; + goto out_err_flow_rule; } num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices); @@ -748,10 +778,10 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, /* Mask all */ core_writel(priv, 0, CORE_CFP_MASK_PORT(5)); - bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6dst, v6_spec->pdst, slice_num, - false); - bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6dst, v6_m_spec->pdst, - SLICE_NUM_MASK, true); + bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->dst.in6_u.u6_addr32, + ports.key->dst, slice_num, false); + bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->dst.in6_u.u6_addr32, + ports.key->dst, SLICE_NUM_MASK, true); /* Insert into TCAM now */ bcm_sf2_cfp_rule_addr_set(priv, rule_index[1]); @@ -759,16 +789,16 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL); if (ret) { pr_err("TCAM entry at addr %d failed\n", rule_index[1]); - goto out_err; + goto out_err_flow_rule; } /* Insert into Action and policer RAMs now, set chain ID to * the one we are chained to */ - ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[1], port_num, + ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[1], port, port_num, queue_num, true); if (ret) - goto out_err; + goto out_err_flow_rule; /* Turn on CFP for this rule now */ reg = core_readl(priv, CORE_CFP_CTL_REG); @@ -784,6 +814,8 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, return ret; +out_err_flow_rule: + ethtool_rx_flow_rule_destroy(flow); out_err: clear_bit(rule_index[1], priv->cfp.used); return ret; @@ -1169,3 +1201,91 @@ int bcm_sf2_cfp_resume(struct dsa_switch *ds) return ret; } + +static const struct bcm_sf2_cfp_stat { + unsigned int offset; + unsigned int ram_loc; + const char *name; +} bcm_sf2_cfp_stats[] = { + { + .offset = CORE_STAT_GREEN_CNTR, + .ram_loc = GREEN_STAT_RAM, + .name = "Green" + }, + { + .offset = CORE_STAT_YELLOW_CNTR, + .ram_loc = YELLOW_STAT_RAM, + .name = "Yellow" + }, + { + .offset = CORE_STAT_RED_CNTR, + .ram_loc = RED_STAT_RAM, + .name = "Red" + }, +}; + +void bcm_sf2_cfp_get_strings(struct dsa_switch *ds, int port, + u32 stringset, uint8_t *data) +{ + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + unsigned int s = ARRAY_SIZE(bcm_sf2_cfp_stats); + char buf[ETH_GSTRING_LEN]; + unsigned int i, j, iter; + + if (stringset != ETH_SS_STATS) + return; + + for (i = 1; i < priv->num_cfp_rules; i++) { + for (j = 0; j < s; j++) { + snprintf(buf, sizeof(buf), + "CFP%03d_%sCntr", + i, bcm_sf2_cfp_stats[j].name); + iter = (i - 1) * s + j; + strlcpy(data + iter * ETH_GSTRING_LEN, + buf, ETH_GSTRING_LEN); + } + } +} + +void bcm_sf2_cfp_get_ethtool_stats(struct dsa_switch *ds, int port, + uint64_t *data) +{ + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + unsigned int s = ARRAY_SIZE(bcm_sf2_cfp_stats); + const struct bcm_sf2_cfp_stat *stat; + unsigned int i, j, iter; + struct cfp_rule *rule; + int ret; + + mutex_lock(&priv->cfp.lock); + for (i = 1; i < priv->num_cfp_rules; i++) { + rule = bcm_sf2_cfp_rule_find(priv, port, i); + if (!rule) + continue; + + for (j = 0; j < s; j++) { + stat = &bcm_sf2_cfp_stats[j]; + + bcm_sf2_cfp_rule_addr_set(priv, i); + ret = bcm_sf2_cfp_op(priv, stat->ram_loc | OP_SEL_READ); + if (ret) + continue; + + iter = (i - 1) * s + j; + data[iter] = core_readl(priv, stat->offset); + } + + } + mutex_unlock(&priv->cfp.lock); +} + +int bcm_sf2_cfp_get_sset_count(struct dsa_switch *ds, int port, int sset) +{ + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + + if (sset != ETH_SS_STATS) + return 0; + + /* 3 counters per CFP rules */ + return (priv->num_cfp_rules - 1) * ARRAY_SIZE(bcm_sf2_cfp_stats); +} diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h index 0a1e530d52b7..67f056206f37 100644 --- a/drivers/net/dsa/bcm_sf2_regs.h +++ b/drivers/net/dsa/bcm_sf2_regs.h @@ -400,6 +400,10 @@ enum bcm_sf2_reg_offs { #define CORE_RATE_METER6 0x281e0 #define CIR_REF_CNT_MASK 0x7ffff +#define CORE_STAT_GREEN_CNTR 0x28200 +#define CORE_STAT_YELLOW_CNTR 0x28210 +#define CORE_STAT_RED_CNTR 0x28220 + #define CORE_CFP_CTL_REG 0x28400 #define CFP_EN_MAP_MASK 0x1ff diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c index 816f34d64736..17482ae09aa5 100644 --- a/drivers/net/dsa/dsa_loop.c +++ b/drivers/net/dsa/dsa_loop.c @@ -343,7 +343,7 @@ static int __init dsa_loop_init(void) unsigned int i; for (i = 0; i < NUM_FIXED_PHYS; i++) - phydevs[i] = fixed_phy_register(PHY_POLL, &status, -1, NULL); + phydevs[i] = fixed_phy_register(PHY_POLL, &status, NULL); return mdio_driver_register(&dsa_loop_drv); } diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index b4f6e1a67dd9..2ffab7ee3d80 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -1091,8 +1091,7 @@ static int lan9303_port_enable(struct dsa_switch *ds, int port, return lan9303_enable_processing_port(chip, port); } -static void lan9303_port_disable(struct dsa_switch *ds, int port, - struct phy_device *phy) +static void lan9303_port_disable(struct dsa_switch *ds, int port) { struct lan9303 *chip = ds->priv; diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index 693a67f45bef..d8328866908c 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -480,8 +480,7 @@ static int gswip_port_enable(struct dsa_switch *ds, int port, return 0; } -static void gswip_port_disable(struct dsa_switch *ds, int port, - struct phy_device *phy) +static void gswip_port_disable(struct dsa_switch *ds, int port) { struct gswip_priv *priv = ds->priv; @@ -549,7 +548,7 @@ static int gswip_setup(struct dsa_switch *ds) /* disable port fetch/store dma on all ports */ for (i = 0; i < priv->hw_info->max_ports; i++) - gswip_port_disable(ds, i, NULL); + gswip_port_disable(ds, i); /* enable Switch */ gswip_mdio_mask(priv, 0, GSWIP_MDIO_GLOB_ENABLE, GSWIP_MDIO_GLOB); @@ -1069,10 +1068,10 @@ static int gswip_probe(struct platform_device *pdev) version = gswip_switch_r(priv, GSWIP_VERSION); /* bring up the mdio bus */ - gphy_fw_np = of_find_compatible_node(pdev->dev.of_node, NULL, - "lantiq,gphy-fw"); + gphy_fw_np = of_get_compatible_child(dev->of_node, "lantiq,gphy-fw"); if (gphy_fw_np) { err = gswip_gphy_fw_list(priv, gphy_fw_np, version); + of_node_put(gphy_fw_np); if (err) { dev_err(dev, "gphy fw probe failed\n"); return err; @@ -1080,13 +1079,12 @@ static int gswip_probe(struct platform_device *pdev) } /* bring up the mdio bus */ - mdio_np = of_find_compatible_node(pdev->dev.of_node, NULL, - "lantiq,xrx200-mdio"); + mdio_np = of_get_compatible_child(dev->of_node, "lantiq,xrx200-mdio"); if (mdio_np) { err = gswip_mdio(priv, mdio_np); if (err) { dev_err(dev, "mdio probe failed\n"); - goto gphy_fw; + goto put_mdio_node; } } @@ -1099,7 +1097,7 @@ static int gswip_probe(struct platform_device *pdev) dev_err(dev, "wrong CPU port defined, HW only supports port: %i", priv->hw_info->cpu_port); err = -EINVAL; - goto mdio_bus; + goto disable_switch; } platform_set_drvdata(pdev, priv); @@ -1109,10 +1107,14 @@ static int gswip_probe(struct platform_device *pdev) (version & GSWIP_VERSION_MOD_MASK) >> GSWIP_VERSION_MOD_SHIFT); return 0; +disable_switch: + gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB); + dsa_unregister_switch(priv->ds); mdio_bus: if (mdio_np) mdiobus_unregister(priv->ds->slave_mii_bus); -gphy_fw: +put_mdio_node: + of_node_put(mdio_np); for (i = 0; i < priv->num_gphy_fw; i++) gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]); return err; @@ -1123,16 +1125,15 @@ static int gswip_remove(struct platform_device *pdev) struct gswip_priv *priv = platform_get_drvdata(pdev); int i; - if (!priv) - return 0; - /* disable the switch */ gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB); dsa_unregister_switch(priv->ds); - if (priv->ds->slave_mii_bus) + if (priv->ds->slave_mii_bus) { mdiobus_unregister(priv->ds->slave_mii_bus); + of_node_put(priv->ds->slave_mii_bus->dev.of_node); + } for (i = 0; i < priv->num_gphy_fw; i++) gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]); @@ -1162,6 +1163,12 @@ static struct platform_driver gswip_driver = { module_platform_driver(gswip_driver); +MODULE_FIRMWARE("lantiq/xrx300_phy11g_a21.bin"); +MODULE_FIRMWARE("lantiq/xrx300_phy22f_a21.bin"); +MODULE_FIRMWARE("lantiq/xrx200_phy11g_a14.bin"); +MODULE_FIRMWARE("lantiq/xrx200_phy11g_a22.bin"); +MODULE_FIRMWARE("lantiq/xrx200_phy22f_a14.bin"); +MODULE_FIRMWARE("lantiq/xrx200_phy22f_a22.bin"); MODULE_AUTHOR("Hauke Mehrtens "); MODULE_DESCRIPTION("Lantiq / Intel GSWIP driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index 89ed059bb576..f16e1d7d8615 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -2,24 +2,26 @@ /* * Microchip KSZ9477 switch driver main logic * - * Copyright (C) 2017-2018 Microchip Technology Inc. + * Copyright (C) 2017-2019 Microchip Technology Inc. */ -#include -#include -#include #include #include +#include #include #include -#include #include #include #include #include "ksz_priv.h" -#include "ksz_common.h" #include "ksz9477_reg.h" +#include "ksz_common.h" + +/* Used with variable features to indicate capabilities. */ +#define GBIT_SUPPORT BIT(0) +#define NEW_XMII BIT(1) +#define IS_9893 BIT(2) static const struct { int index; @@ -259,10 +261,84 @@ static int ksz9477_reset_switch(struct ksz_device *dev) return 0; } +static void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, + u64 *cnt) +{ + struct ksz_poll_ctx ctx = { + .dev = dev, + .port = port, + .offset = REG_PORT_MIB_CTRL_STAT__4, + }; + struct ksz_port *p = &dev->ports[port]; + u32 data; + int ret; + + /* retain the flush/freeze bit */ + data = p->freeze ? MIB_COUNTER_FLUSH_FREEZE : 0; + data |= MIB_COUNTER_READ; + data |= (addr << MIB_COUNTER_INDEX_S); + ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, data); + + ret = readx_poll_timeout(ksz_pread32_poll, &ctx, data, + !(data & MIB_COUNTER_READ), 10, 1000); + + /* failed to read MIB. get out of loop */ + if (ret < 0) { + dev_dbg(dev->dev, "Failed to get MIB\n"); + return; + } + + /* count resets upon read */ + ksz_pread32(dev, port, REG_PORT_MIB_DATA, &data); + *cnt += data; +} + +static void ksz9477_r_mib_pkt(struct ksz_device *dev, int port, u16 addr, + u64 *dropped, u64 *cnt) +{ + addr = ksz9477_mib_names[addr].index; + ksz9477_r_mib_cnt(dev, port, addr, cnt); +} + +static void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze) +{ + u32 val = freeze ? MIB_COUNTER_FLUSH_FREEZE : 0; + struct ksz_port *p = &dev->ports[port]; + + /* enable/disable the port for flush/freeze function */ + mutex_lock(&p->mib.cnt_mutex); + ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, val); + + /* used by MIB counter reading code to know freeze is enabled */ + p->freeze = freeze; + mutex_unlock(&p->mib.cnt_mutex); +} + +static void ksz9477_port_init_cnt(struct ksz_device *dev, int port) +{ + struct ksz_port_mib *mib = &dev->ports[port].mib; + + /* flush all enabled port MIB counters */ + mutex_lock(&mib->cnt_mutex); + ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, + MIB_COUNTER_FLUSH_FREEZE); + ksz_write8(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FLUSH); + ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, 0); + mutex_unlock(&mib->cnt_mutex); + + mib->cnt_ptr = 0; + memset(mib->counters, 0, dev->mib_cnt * sizeof(u64)); +} + static enum dsa_tag_protocol ksz9477_get_tag_protocol(struct dsa_switch *ds, int port) { - return DSA_TAG_PROTO_KSZ9477; + enum dsa_tag_protocol proto = DSA_TAG_PROTO_KSZ9477; + struct ksz_device *dev = ds->priv; + + if (dev->features & IS_9893) + proto = DSA_TAG_PROTO_KSZ9893; + return proto; } static int ksz9477_phy_read16(struct dsa_switch *ds, int addr, int reg) @@ -323,6 +399,10 @@ static int ksz9477_phy_write16(struct dsa_switch *ds, int addr, int reg, /* No real PHY after this. */ if (addr >= dev->phy_port_cnt) return 0; + + /* No gigabit support. Do not write to this register. */ + if (!(dev->features & GBIT_SUPPORT) && reg == MII_CTRL1000) + return 0; ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val); return 0; @@ -342,47 +422,6 @@ static void ksz9477_get_strings(struct dsa_switch *ds, int port, } } -static void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, - uint64_t *buf) -{ - struct ksz_device *dev = ds->priv; - int i; - u32 data; - int timeout; - - mutex_lock(&dev->stats_mutex); - - for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) { - data = MIB_COUNTER_READ; - data |= ((ksz9477_mib_names[i].index & 0xFF) << - MIB_COUNTER_INDEX_S); - ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, data); - - timeout = 1000; - do { - ksz_pread32(dev, port, REG_PORT_MIB_CTRL_STAT__4, - &data); - usleep_range(1, 10); - if (!(data & MIB_COUNTER_READ)) - break; - } while (timeout-- > 0); - - /* failed to read MIB. get out of loop */ - if (!timeout) { - dev_dbg(dev->dev, "Failed to get MIB\n"); - break; - } - - /* count resets upon read */ - ksz_pread32(dev, port, REG_PORT_MIB_DATA, &data); - - dev->mib_value[i] += (uint64_t)data; - buf[i] = dev->mib_value[i]; - } - - mutex_unlock(&dev->stats_mutex); -} - static void ksz9477_cfg_port_member(struct ksz_device *dev, int port, u8 member) { @@ -397,6 +436,7 @@ static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port, struct ksz_port *p = &dev->ports[port]; u8 data; int member = -1; + int forward = dev->member; ksz_pread8(dev, port, P_STP_CTRL, &data); data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE); @@ -424,12 +464,14 @@ static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port, break; member = dev->host_mask | p->vid_member; + mutex_lock(&dev->dev_mutex); /* Port is a member of a bridge. */ if (dev->br_member & (1 << port)) { dev->member |= (1 << port); member = dev->member; } + mutex_unlock(&dev->dev_mutex); break; case BR_STATE_BLOCKING: data |= PORT_LEARN_DISABLE; @@ -444,6 +486,7 @@ static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port, ksz_pwrite8(dev, port, P_STP_CTRL, data); p->stp_state = state; + mutex_lock(&dev->dev_mutex); if (data & PORT_RX_ENABLE) dev->rx_ports |= (1 << port); else @@ -464,10 +507,11 @@ static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port, } /* When topology has changed the function ksz_update_port_member - * should be called to modify port forwarding behavior. However - * as the offload_fwd_mark indication cannot be reported here - * the switch forwarding function is not enabled. + * should be called to modify port forwarding behavior. */ + if (forward != dev->member) + ksz_update_port_member(dev, port); + mutex_unlock(&dev->dev_mutex); } static void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port) @@ -965,6 +1009,161 @@ static void ksz9477_port_mirror_del(struct dsa_switch *ds, int port, PORT_MIRROR_SNIFFER, false); } +static void ksz9477_phy_setup(struct ksz_device *dev, int port, + struct phy_device *phy) +{ + /* Only apply to port with PHY. */ + if (port >= dev->phy_port_cnt) + return; + + /* The MAC actually cannot run in 1000 half-duplex mode. */ + phy_remove_link_mode(phy, + ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + + /* PHY does not support gigabit. */ + if (!(dev->features & GBIT_SUPPORT)) + phy_remove_link_mode(phy, + ETHTOOL_LINK_MODE_1000baseT_Full_BIT); +} + +static bool ksz9477_get_gbit(struct ksz_device *dev, u8 data) +{ + bool gbit; + + if (dev->features & NEW_XMII) + gbit = !(data & PORT_MII_NOT_1GBIT); + else + gbit = !!(data & PORT_MII_1000MBIT_S1); + return gbit; +} + +static void ksz9477_set_gbit(struct ksz_device *dev, bool gbit, u8 *data) +{ + if (dev->features & NEW_XMII) { + if (gbit) + *data &= ~PORT_MII_NOT_1GBIT; + else + *data |= PORT_MII_NOT_1GBIT; + } else { + if (gbit) + *data |= PORT_MII_1000MBIT_S1; + else + *data &= ~PORT_MII_1000MBIT_S1; + } +} + +static int ksz9477_get_xmii(struct ksz_device *dev, u8 data) +{ + int mode; + + if (dev->features & NEW_XMII) { + switch (data & PORT_MII_SEL_M) { + case PORT_MII_SEL: + mode = 0; + break; + case PORT_RMII_SEL: + mode = 1; + break; + case PORT_GMII_SEL: + mode = 2; + break; + default: + mode = 3; + } + } else { + switch (data & PORT_MII_SEL_M) { + case PORT_MII_SEL_S1: + mode = 0; + break; + case PORT_RMII_SEL_S1: + mode = 1; + break; + case PORT_GMII_SEL_S1: + mode = 2; + break; + default: + mode = 3; + } + } + return mode; +} + +static void ksz9477_set_xmii(struct ksz_device *dev, int mode, u8 *data) +{ + u8 xmii; + + if (dev->features & NEW_XMII) { + switch (mode) { + case 0: + xmii = PORT_MII_SEL; + break; + case 1: + xmii = PORT_RMII_SEL; + break; + case 2: + xmii = PORT_GMII_SEL; + break; + default: + xmii = PORT_RGMII_SEL; + break; + } + } else { + switch (mode) { + case 0: + xmii = PORT_MII_SEL_S1; + break; + case 1: + xmii = PORT_RMII_SEL_S1; + break; + case 2: + xmii = PORT_GMII_SEL_S1; + break; + default: + xmii = PORT_RGMII_SEL_S1; + break; + } + } + *data &= ~PORT_MII_SEL_M; + *data |= xmii; +} + +static phy_interface_t ksz9477_get_interface(struct ksz_device *dev, int port) +{ + phy_interface_t interface; + bool gbit; + int mode; + u8 data8; + + if (port < dev->phy_port_cnt) + return PHY_INTERFACE_MODE_NA; + ksz_pread8(dev, port, REG_PORT_XMII_CTRL_1, &data8); + gbit = ksz9477_get_gbit(dev, data8); + mode = ksz9477_get_xmii(dev, data8); + switch (mode) { + case 2: + interface = PHY_INTERFACE_MODE_GMII; + if (gbit) + break; + case 0: + interface = PHY_INTERFACE_MODE_MII; + break; + case 1: + interface = PHY_INTERFACE_MODE_RMII; + break; + default: + interface = PHY_INTERFACE_MODE_RGMII; + if (data8 & PORT_RGMII_ID_EG_ENABLE) + interface = PHY_INTERFACE_MODE_RGMII_TXID; + if (data8 & PORT_RGMII_ID_IG_ENABLE) { + interface = PHY_INTERFACE_MODE_RGMII_RXID; + if (data8 & PORT_RGMII_ID_EG_ENABLE) + interface = PHY_INTERFACE_MODE_RGMII_ID; + } + break; + } + return interface; +} + static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port) { u8 data8; @@ -1011,24 +1210,25 @@ static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port) /* configure MAC to 1G & RGMII mode */ ksz_pread8(dev, port, REG_PORT_XMII_CTRL_1, &data8); - data8 &= ~PORT_MII_NOT_1GBIT; - data8 &= ~PORT_MII_SEL_M; switch (dev->interface) { case PHY_INTERFACE_MODE_MII: - data8 |= PORT_MII_NOT_1GBIT; - data8 |= PORT_MII_SEL; + ksz9477_set_xmii(dev, 0, &data8); + ksz9477_set_gbit(dev, false, &data8); p->phydev.speed = SPEED_100; break; case PHY_INTERFACE_MODE_RMII: - data8 |= PORT_MII_NOT_1GBIT; - data8 |= PORT_RMII_SEL; + ksz9477_set_xmii(dev, 1, &data8); + ksz9477_set_gbit(dev, false, &data8); p->phydev.speed = SPEED_100; break; case PHY_INTERFACE_MODE_GMII: - data8 |= PORT_GMII_SEL; + ksz9477_set_xmii(dev, 2, &data8); + ksz9477_set_gbit(dev, true, &data8); p->phydev.speed = SPEED_1000; break; default: + ksz9477_set_xmii(dev, 3, &data8); + ksz9477_set_gbit(dev, true, &data8); data8 &= ~PORT_RGMII_ID_IG_ENABLE; data8 &= ~PORT_RGMII_ID_EG_ENABLE; if (dev->interface == PHY_INTERFACE_MODE_RGMII_ID || @@ -1037,13 +1237,13 @@ static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port) if (dev->interface == PHY_INTERFACE_MODE_RGMII_ID || dev->interface == PHY_INTERFACE_MODE_RGMII_TXID) data8 |= PORT_RGMII_ID_EG_ENABLE; - data8 |= PORT_RGMII_SEL; p->phydev.speed = SPEED_1000; break; } ksz_pwrite8(dev, port, REG_PORT_XMII_CTRL_1, data8); p->phydev.duplex = 1; } + mutex_lock(&dev->dev_mutex); if (cpu_port) { member = dev->port_mask; dev->on_ports = dev->host_mask; @@ -1056,6 +1256,7 @@ static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port) if (p->phydev.link) dev->live_ports |= (1 << port); } + mutex_unlock(&dev->dev_mutex); ksz9477_cfg_port_member(dev, port, member); /* clear pending interrupts */ @@ -1073,10 +1274,25 @@ static void ksz9477_config_cpu_port(struct dsa_switch *ds) for (i = 0; i < dev->port_cnt; i++) { if (dsa_is_cpu_port(ds, i) && (dev->cpu_ports & (1 << i))) { + phy_interface_t interface; + dev->cpu_port = i; dev->host_mask = (1 << dev->cpu_port); dev->port_mask |= dev->host_mask; + /* Read from XMII register to determine host port + * interface. If set specifically in device tree + * note the difference to help debugging. + */ + interface = ksz9477_get_interface(dev, i); + if (!dev->interface) + dev->interface = interface; + if (interface && interface != dev->interface) + dev_info(dev->dev, + "use %s instead of %s\n", + phy_modes(dev->interface), + phy_modes(interface)); + /* enable cpu port */ ksz9477_port_setup(dev, i, true); p = &dev->ports[dev->cpu_port]; @@ -1130,6 +1346,9 @@ static int ksz9477_setup(struct dsa_switch *ds) ksz9477_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY, true); + /* Do not work correctly with tail tagging. */ + ksz_cfg(dev, REG_SW_MAC_CTRL_0, SW_CHECK_LENGTH, false); + /* accept packet up to 2000bytes */ ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_LEGAL_PACKET_DISABLE, true); @@ -1140,9 +1359,14 @@ static int ksz9477_setup(struct dsa_switch *ds) /* queue based egress rate limit */ ksz_cfg(dev, REG_SW_MAC_CTRL_5, SW_OUT_RATE_LIMIT_QUEUE_BASED, true); + /* enable global MIB counter freeze function */ + ksz_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true); + /* start switch */ ksz_cfg(dev, REG_SW_OPERATION, SW_START, true); + ksz_init_mib_timer(dev); + return 0; } @@ -1151,6 +1375,7 @@ static const struct dsa_switch_ops ksz9477_switch_ops = { .setup = ksz9477_setup, .phy_read = ksz9477_phy_read16, .phy_write = ksz9477_phy_write16, + .adjust_link = ksz_adjust_link, .port_enable = ksz_enable_port, .port_disable = ksz_disable_port, .get_strings = ksz9477_get_strings, @@ -1182,6 +1407,8 @@ static u32 ksz9477_get_port_addr(int port, int offset) static int ksz9477_switch_detect(struct ksz_device *dev) { u8 data8; + u8 id_hi; + u8 id_lo; u32 id32; int ret; @@ -1197,6 +1424,9 @@ static int ksz9477_switch_detect(struct ksz_device *dev) /* read chip id */ ret = ksz_read32(dev, REG_CHIP_ID0__1, &id32); + if (ret) + return ret; + ret = ksz_read8(dev, REG_GLOBAL_OPTIONS, &data8); if (ret) return ret; @@ -1204,6 +1434,32 @@ static int ksz9477_switch_detect(struct ksz_device *dev) dev->mib_port_cnt = TOTAL_PORT_NUM; dev->phy_port_cnt = 5; + /* Default capability is gigabit capable. */ + dev->features = GBIT_SUPPORT; + + id_hi = (u8)(id32 >> 16); + id_lo = (u8)(id32 >> 8); + if ((id_lo & 0xf) == 3) { + /* Chip is from KSZ9893 design. */ + dev->features |= IS_9893; + + /* Chip does not support gigabit. */ + if (data8 & SW_QW_ABLE) + dev->features &= ~GBIT_SUPPORT; + dev->mib_port_cnt = 3; + dev->phy_port_cnt = 2; + } else { + /* Chip uses new XMII register definitions. */ + dev->features |= NEW_XMII; + + /* Chip does not support gigabit. */ + if (!(data8 & SW_GIGABIT_ABLE)) + dev->features &= ~GBIT_SUPPORT; + } + + /* Change chip id to known ones so it can be matched against them. */ + id32 = (id_hi << 16) | (id_lo << 8); + dev->chip_id = id32; return 0; @@ -1238,6 +1494,15 @@ static const struct ksz_chip_data ksz9477_switch_chips[] = { .cpu_ports = 0x7F, /* can be configured as cpu port */ .port_cnt = 7, /* total physical port count */ }, + { + .chip_id = 0x00989300, + .dev_name = "KSZ9893", + .num_vlans = 4096, + .num_alus = 4096, + .num_statics = 16, + .cpu_ports = 0x07, /* can be configured as cpu port */ + .port_cnt = 3, /* total port count */ + }, }; static int ksz9477_switch_init(struct ksz_device *dev) @@ -1276,6 +1541,7 @@ static int ksz9477_switch_init(struct ksz_device *dev) if (!dev->ports) return -ENOMEM; for (i = 0; i < dev->mib_port_cnt; i++) { + mutex_init(&dev->ports[i].mib.cnt_mutex); dev->ports[i].mib.counters = devm_kzalloc(dev->dev, sizeof(u64) * @@ -1284,7 +1550,6 @@ static int ksz9477_switch_init(struct ksz_device *dev) if (!dev->ports[i].mib.counters) return -ENOMEM; } - dev->interface = PHY_INTERFACE_MODE_RGMII_TXID; return 0; } @@ -1298,7 +1563,12 @@ static const struct ksz_dev_ops ksz9477_dev_ops = { .get_port_addr = ksz9477_get_port_addr, .cfg_port_member = ksz9477_cfg_port_member, .flush_dyn_mac_table = ksz9477_flush_dyn_mac_table, + .phy_setup = ksz9477_phy_setup, .port_setup = ksz9477_port_setup, + .r_mib_cnt = ksz9477_r_mib_cnt, + .r_mib_pkt = ksz9477_r_mib_pkt, + .freeze_mib = ksz9477_freeze_mib, + .port_init_cnt = ksz9477_port_init_cnt, .shutdown = ksz9477_reset_switch, .detect = ksz9477_switch_detect, .init = ksz9477_switch_init, diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c index d757ba151cb1..75178624d3f5 100644 --- a/drivers/net/dsa/microchip/ksz9477_spi.c +++ b/drivers/net/dsa/microchip/ksz9477_spi.c @@ -2,7 +2,7 @@ /* * Microchip KSZ9477 series register access through SPI * - * Copyright (C) 2017-2018 Microchip Technology Inc. + * Copyright (C) 2017-2019 Microchip Technology Inc. */ #include @@ -155,6 +155,8 @@ static void ksz9477_spi_shutdown(struct spi_device *spi) static const struct of_device_id ksz9477_dt_ids[] = { { .compatible = "microchip,ksz9477" }, { .compatible = "microchip,ksz9897" }, + { .compatible = "microchip,ksz9893" }, + { .compatible = "microchip,ksz9563" }, {}, }; MODULE_DEVICE_TABLE(of, ksz9477_dt_ids); diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 8a5111f9414c..39dace8e3512 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -2,7 +2,7 @@ /* * Microchip switch driver main logic * - * Copyright (C) 2017-2018 Microchip Technology Inc. + * Copyright (C) 2017-2019 Microchip Technology Inc. */ #include @@ -20,6 +20,16 @@ #include "ksz_priv.h" +void ksz_port_cleanup(struct ksz_device *dev, int port) +{ + /* Common code for port cleanup. */ + mutex_lock(&dev->dev_mutex); + dev->on_ports &= ~(1 << port); + dev->live_ports &= ~(1 << port); + mutex_unlock(&dev->dev_mutex); +} +EXPORT_SYMBOL_GPL(ksz_port_cleanup); + void ksz_update_port_member(struct ksz_device *dev, int port) { struct ksz_port *p; @@ -40,6 +50,85 @@ void ksz_update_port_member(struct ksz_device *dev, int port) } EXPORT_SYMBOL_GPL(ksz_update_port_member); +static void port_r_cnt(struct ksz_device *dev, int port) +{ + struct ksz_port_mib *mib = &dev->ports[port].mib; + u64 *dropped; + + /* Some ports may not have MIB counters before SWITCH_COUNTER_NUM. */ + while (mib->cnt_ptr < dev->reg_mib_cnt) { + dev->dev_ops->r_mib_cnt(dev, port, mib->cnt_ptr, + &mib->counters[mib->cnt_ptr]); + ++mib->cnt_ptr; + } + + /* last one in storage */ + dropped = &mib->counters[dev->mib_cnt]; + + /* Some ports may not have MIB counters after SWITCH_COUNTER_NUM. */ + while (mib->cnt_ptr < dev->mib_cnt) { + dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr, + dropped, &mib->counters[mib->cnt_ptr]); + ++mib->cnt_ptr; + } + mib->cnt_ptr = 0; +} + +static void ksz_mib_read_work(struct work_struct *work) +{ + struct ksz_device *dev = container_of(work, struct ksz_device, + mib_read); + struct ksz_port_mib *mib; + struct ksz_port *p; + int i; + + for (i = 0; i < dev->mib_port_cnt; i++) { + p = &dev->ports[i]; + mib = &p->mib; + mutex_lock(&mib->cnt_mutex); + + /* Only read MIB counters when the port is told to do. + * If not, read only dropped counters when link is not up. + */ + if (!p->read) { + const struct dsa_port *dp = dsa_to_port(dev->ds, i); + + if (!netif_carrier_ok(dp->slave)) + mib->cnt_ptr = dev->reg_mib_cnt; + } + port_r_cnt(dev, i); + p->read = false; + mutex_unlock(&mib->cnt_mutex); + } +} + +static void mib_monitor(struct timer_list *t) +{ + struct ksz_device *dev = from_timer(dev, t, mib_read_timer); + + mod_timer(&dev->mib_read_timer, jiffies + dev->mib_read_interval); + schedule_work(&dev->mib_read); +} + +void ksz_init_mib_timer(struct ksz_device *dev) +{ + int i; + + /* Read MIB counters every 30 seconds to avoid overflow. */ + dev->mib_read_interval = msecs_to_jiffies(30000); + + INIT_WORK(&dev->mib_read, ksz_mib_read_work); + timer_setup(&dev->mib_read_timer, mib_monitor, 0); + + for (i = 0; i < dev->mib_port_cnt; i++) + dev->dev_ops->port_init_cnt(dev, i); + + /* Start the timer 2 seconds later. */ + dev->mib_read_timer.expires = jiffies + msecs_to_jiffies(2000); + add_timer(&dev->mib_read_timer); +} +EXPORT_SYMBOL_GPL(ksz_init_mib_timer); + int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg) { struct ksz_device *dev = ds->priv; @@ -61,6 +150,27 @@ int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val) } EXPORT_SYMBOL_GPL(ksz_phy_write16); +void ksz_adjust_link(struct dsa_switch *ds, int port, + struct phy_device *phydev) +{ + struct ksz_device *dev = ds->priv; + struct ksz_port *p = &dev->ports[port]; + + /* Read all MIB counters when the link is going down. */ + if (!phydev->link) { + p->read = true; + schedule_work(&dev->mib_read); + } + mutex_lock(&dev->dev_mutex); + if (!phydev->link) + dev->live_ports &= ~(1 << port); + else + /* Remember which port is connected and active. */ + dev->live_ports |= (1 << port) & dev->on_ports; + mutex_unlock(&dev->dev_mutex); +} +EXPORT_SYMBOL_GPL(ksz_adjust_link); + int ksz_sset_count(struct dsa_switch *ds, int port, int sset) { struct ksz_device *dev = ds->priv; @@ -72,12 +182,32 @@ int ksz_sset_count(struct dsa_switch *ds, int port, int sset) } EXPORT_SYMBOL_GPL(ksz_sset_count); +void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf) +{ + const struct dsa_port *dp = dsa_to_port(ds, port); + struct ksz_device *dev = ds->priv; + struct ksz_port_mib *mib; + + mib = &dev->ports[port].mib; + mutex_lock(&mib->cnt_mutex); + + /* Only read dropped counters if no link. */ + if (!netif_carrier_ok(dp->slave)) + mib->cnt_ptr = dev->reg_mib_cnt; + port_r_cnt(dev, port); + memcpy(buf, mib->counters, dev->mib_cnt * sizeof(u64)); + mutex_unlock(&mib->cnt_mutex); +} +EXPORT_SYMBOL_GPL(ksz_get_ethtool_stats); + int ksz_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br) { struct ksz_device *dev = ds->priv; + mutex_lock(&dev->dev_mutex); dev->br_member |= (1 << port); + mutex_unlock(&dev->dev_mutex); /* port_stp_state_set() will be called after to put the port in * appropriate state so there is no need to do anything. @@ -92,8 +222,10 @@ void ksz_port_bridge_leave(struct dsa_switch *ds, int port, { struct ksz_device *dev = ds->priv; + mutex_lock(&dev->dev_mutex); dev->br_member &= ~(1 << port); dev->member &= ~(1 << port); + mutex_unlock(&dev->dev_mutex); /* port_stp_state_set() will be called after to put the port in * forwarding state so there is no need to do anything. @@ -238,6 +370,7 @@ int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) /* setup slave port */ dev->dev_ops->port_setup(dev, port, false); + dev->dev_ops->phy_setup(dev, port, phy); /* port_stp_state_set() will be called after to enable the port so * there is no need to do anything. @@ -247,7 +380,7 @@ int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) } EXPORT_SYMBOL_GPL(ksz_enable_port); -void ksz_disable_port(struct dsa_switch *ds, int port, struct phy_device *phy) +void ksz_disable_port(struct dsa_switch *ds, int port) { struct ksz_device *dev = ds->priv; @@ -305,6 +438,7 @@ int ksz_switch_register(struct ksz_device *dev, gpiod_set_value(dev->reset_gpio, 0); } + mutex_init(&dev->dev_mutex); mutex_init(&dev->reg_mutex); mutex_init(&dev->stats_mutex); mutex_init(&dev->alu_mutex); @@ -319,7 +453,9 @@ int ksz_switch_register(struct ksz_device *dev, if (ret) return ret; - dev->interface = PHY_INTERFACE_MODE_MII; + /* Host port interface will be self detected, or specifically set in + * device tree. + */ if (dev->dev->of_node) { ret = of_get_phy_mode(dev->dev->of_node); if (ret >= 0) @@ -338,6 +474,12 @@ EXPORT_SYMBOL(ksz_switch_register); void ksz_switch_remove(struct ksz_device *dev) { + /* timer started */ + if (dev->mib_read_timer.expires) { + del_timer_sync(&dev->mib_read_timer); + flush_work(&dev->mib_read); + } + dev->dev_ops->exit(dev); dsa_unregister_switch(dev->ds); diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index 2dd832de0d52..21cd794e18f1 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -1,19 +1,24 @@ /* SPDX-License-Identifier: GPL-2.0 * Microchip switch driver common header * - * Copyright (C) 2017-2018 Microchip Technology Inc. + * Copyright (C) 2017-2019 Microchip Technology Inc. */ #ifndef __KSZ_COMMON_H #define __KSZ_COMMON_H +void ksz_port_cleanup(struct ksz_device *dev, int port); void ksz_update_port_member(struct ksz_device *dev, int port); +void ksz_init_mib_timer(struct ksz_device *dev); /* Common DSA access functions */ int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg); int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val); +void ksz_adjust_link(struct dsa_switch *ds, int port, + struct phy_device *phydev); int ksz_sset_count(struct dsa_switch *ds, int port, int sset); +void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf); int ksz_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br); void ksz_port_bridge_leave(struct dsa_switch *ds, int port, @@ -30,7 +35,7 @@ void ksz_port_mdb_add(struct dsa_switch *ds, int port, int ksz_port_mdb_del(struct dsa_switch *ds, int port, const struct switchdev_obj_port_mdb *mdb); int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy); -void ksz_disable_port(struct dsa_switch *ds, int port, struct phy_device *phy); +void ksz_disable_port(struct dsa_switch *ds, int port); /* Common register access functions */ @@ -211,4 +216,18 @@ static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits, ksz_write8(dev, addr, data); } +struct ksz_poll_ctx { + struct ksz_device *dev; + int port; + int offset; +}; + +static inline u32 ksz_pread32_poll(struct ksz_poll_ctx *ctx) +{ + u32 data; + + ksz_pread32(ctx->dev, ctx->port, ctx->offset, &data); + return data; +} + #endif diff --git a/drivers/net/dsa/microchip/ksz_priv.h b/drivers/net/dsa/microchip/ksz_priv.h index 60b49010904b..b52e5ca17ab4 100644 --- a/drivers/net/dsa/microchip/ksz_priv.h +++ b/drivers/net/dsa/microchip/ksz_priv.h @@ -2,7 +2,7 @@ * * Microchip KSZ series switch common definitions * - * Copyright (C) 2017-2018 Microchip Technology Inc. + * Copyright (C) 2017-2019 Microchip Technology Inc. */ #ifndef __KSZ_PRIV_H @@ -14,8 +14,6 @@ #include #include -#include "ksz9477_reg.h" - struct ksz_io_ops; struct vlan_table { @@ -23,6 +21,7 @@ struct vlan_table { }; struct ksz_port_mib { + struct mutex cnt_mutex; /* structure access */ u8 cnt_ptr; u64 *counters; }; @@ -38,7 +37,8 @@ struct ksz_port { u32 fiber:1; /* port is fiber */ u32 sgmii:1; /* port is SGMII */ u32 force:1; - u32 link_just_down:1; /* link just goes down */ + u32 read:1; /* read MIB counters in background */ + u32 freeze:1; /* MIB counter freeze is enabled */ struct ksz_port_mib mib; }; @@ -48,6 +48,7 @@ struct ksz_device { struct ksz_platform_data *pdata; const char *name; + struct mutex dev_mutex; /* device access */ struct mutex reg_mutex; /* register access */ struct mutex stats_mutex; /* status access */ struct mutex alu_mutex; /* ALU access */ @@ -79,8 +80,6 @@ struct ksz_device { struct vlan_table *vlan_cache; - u64 mib_value[TOTAL_SWITCH_COUNTER_NUM]; - u8 *txbuf; struct ksz_port *ports; @@ -137,6 +136,9 @@ struct ksz_dev_ops { u32 (*get_port_addr)(int port, int offset); void (*cfg_port_member)(struct ksz_device *dev, int port, u8 member); void (*flush_dyn_mac_table)(struct ksz_device *dev, int port); + void (*phy_setup)(struct ksz_device *dev, int port, + struct phy_device *phy); + void (*port_cleanup)(struct ksz_device *dev, int port); void (*port_setup)(struct ksz_device *dev, int port, bool cpu_port); void (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val); void (*w_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 val); @@ -151,6 +153,7 @@ struct ksz_dev_ops { u64 *cnt); void (*r_mib_pkt)(struct ksz_device *dev, int port, u16 addr, u64 *dropped, u64 *cnt); + void (*freeze_mib)(struct ksz_device *dev, int port, bool freeze); void (*port_init_cnt)(struct ksz_device *dev, int port); int (*shutdown)(struct ksz_device *dev); int (*detect)(struct ksz_device *dev); diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index a8a2c728afba..7357b4fc0185 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -621,17 +621,19 @@ static void mt7530_adjust_link(struct dsa_switch *ds, int port, struct mt7530_priv *priv = ds->priv; if (phy_is_pseudo_fixed_link(phydev)) { - dev_dbg(priv->dev, "phy-mode for master device = %x\n", - phydev->interface); - - /* Setup TX circuit incluing relevant PAD and driving */ - mt7530_pad_clk_setup(ds, phydev->interface); - - /* Setup RX circuit, relevant PAD and driving on the host - * which must be placed after the setup on the device side is - * all finished. - */ - mt7623_pad_clk_setup(ds); + if (priv->id == ID_MT7530) { + dev_dbg(priv->dev, "phy-mode for master device = %x\n", + phydev->interface); + + /* Setup TX circuit incluing relevant PAD and driving */ + mt7530_pad_clk_setup(ds, phydev->interface); + + /* Setup RX circuit, relevant PAD and driving on the + * host which must be placed after the setup on the + * device side is all finished. + */ + mt7623_pad_clk_setup(ds); + } } else { u16 lcl_adv = 0, rmt_adv = 0; u8 flowctrl; @@ -644,7 +646,7 @@ static void mt7530_adjust_link(struct dsa_switch *ds, int port, case SPEED_100: mcr |= PMCR_FORCE_SPEED_100; break; - }; + } if (phydev->link) mcr |= PMCR_FORCE_LNK; @@ -687,6 +689,10 @@ mt7530_cpu_port_enable(struct mt7530_priv *priv, /* Unknown unicast frame fordwarding to the cpu port */ mt7530_set(priv, MT7530_MFC, UNU_FFP(BIT(port))); + /* Set CPU port number */ + if (priv->id == ID_MT7621) + mt7530_rmw(priv, MT7530_MFC, CPU_MASK, CPU_EN | CPU_PORT(port)); + /* CPU port gets connected to all user ports of * the switch */ @@ -723,8 +729,7 @@ mt7530_port_enable(struct dsa_switch *ds, int port, } static void -mt7530_port_disable(struct dsa_switch *ds, int port, - struct phy_device *phy) +mt7530_port_disable(struct dsa_switch *ds, int port) { struct mt7530_priv *priv = ds->priv; @@ -1219,24 +1224,27 @@ mt7530_setup(struct dsa_switch *ds) * as two netdev instances. */ dn = ds->ports[MT7530_CPU_PORT].master->dev.of_node->parent; - priv->ethernet = syscon_node_to_regmap(dn); - if (IS_ERR(priv->ethernet)) - return PTR_ERR(priv->ethernet); - regulator_set_voltage(priv->core_pwr, 1000000, 1000000); - ret = regulator_enable(priv->core_pwr); - if (ret < 0) { - dev_err(priv->dev, - "Failed to enable core power: %d\n", ret); - return ret; - } + if (priv->id == ID_MT7530) { + priv->ethernet = syscon_node_to_regmap(dn); + if (IS_ERR(priv->ethernet)) + return PTR_ERR(priv->ethernet); + + regulator_set_voltage(priv->core_pwr, 1000000, 1000000); + ret = regulator_enable(priv->core_pwr); + if (ret < 0) { + dev_err(priv->dev, + "Failed to enable core power: %d\n", ret); + return ret; + } - regulator_set_voltage(priv->io_pwr, 3300000, 3300000); - ret = regulator_enable(priv->io_pwr); - if (ret < 0) { - dev_err(priv->dev, "Failed to enable io pwr: %d\n", - ret); - return ret; + regulator_set_voltage(priv->io_pwr, 3300000, 3300000); + ret = regulator_enable(priv->io_pwr); + if (ret < 0) { + dev_err(priv->dev, "Failed to enable io pwr: %d\n", + ret); + return ret; + } } /* Reset whole chip through gpio pin or memory-mapped registers for @@ -1292,7 +1300,7 @@ mt7530_setup(struct dsa_switch *ds) if (dsa_is_cpu_port(ds, i)) mt7530_cpu_port_enable(priv, i); else - mt7530_port_disable(ds, i, NULL); + mt7530_port_disable(ds, i); } /* Flush the FDB table */ @@ -1326,6 +1334,13 @@ static const struct dsa_switch_ops mt7530_switch_ops = { .port_vlan_del = mt7530_port_vlan_del, }; +static const struct of_device_id mt7530_of_match[] = { + { .compatible = "mediatek,mt7621", .data = (void *)ID_MT7621, }, + { .compatible = "mediatek,mt7530", .data = (void *)ID_MT7530, }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, mt7530_of_match); + static int mt7530_probe(struct mdio_device *mdiodev) { @@ -1356,13 +1371,21 @@ mt7530_probe(struct mdio_device *mdiodev) } } - priv->core_pwr = devm_regulator_get(&mdiodev->dev, "core"); - if (IS_ERR(priv->core_pwr)) - return PTR_ERR(priv->core_pwr); + /* Get the hardware identifier from the devicetree node. + * We will need it for some of the clock and regulator setup. + */ + priv->id = (unsigned int)(unsigned long) + of_device_get_match_data(&mdiodev->dev); + + if (priv->id == ID_MT7530) { + priv->core_pwr = devm_regulator_get(&mdiodev->dev, "core"); + if (IS_ERR(priv->core_pwr)) + return PTR_ERR(priv->core_pwr); - priv->io_pwr = devm_regulator_get(&mdiodev->dev, "io"); - if (IS_ERR(priv->io_pwr)) - return PTR_ERR(priv->io_pwr); + priv->io_pwr = devm_regulator_get(&mdiodev->dev, "io"); + if (IS_ERR(priv->io_pwr)) + return PTR_ERR(priv->io_pwr); + } /* Not MCM that indicates switch works as the remote standalone * integrated circuit so the GPIO pin would be used to complete @@ -1408,12 +1431,6 @@ mt7530_remove(struct mdio_device *mdiodev) mutex_destroy(&priv->reg_mutex); } -static const struct of_device_id mt7530_of_match[] = { - { .compatible = "mediatek,mt7530" }, - { /* sentinel */ }, -}; -MODULE_DEVICE_TABLE(of, mt7530_of_match); - static struct mdio_driver mt7530_mdio_driver = { .probe = mt7530_probe, .remove = mt7530_remove, diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h index d9b407a22a58..a95ed958df5b 100644 --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h @@ -19,6 +19,11 @@ #define MT7530_NUM_FDB_RECORDS 2048 #define MT7530_ALL_MEMBERS 0xff +enum { + ID_MT7530 = 0, + ID_MT7621 = 1, +}; + #define NUM_TRGMII_CTRL 5 #define TRGMII_BASE(x) (0x10000 + (x)) @@ -36,6 +41,9 @@ #define UNM_FFP(x) (((x) & 0xff) << 16) #define UNU_FFP(x) (((x) & 0xff) << 8) #define UNU_FFP_MASK UNU_FFP(~0) +#define CPU_EN BIT(7) +#define CPU_PORT(x) ((x) << 4) +#define CPU_MASK (0xf << 4) /* Registers for address table access */ #define MT7530_ATA1 0x74 @@ -430,6 +438,7 @@ struct mt7530_priv { struct regulator *core_pwr; struct regulator *io_pwr; struct gpio_desc *reset; + unsigned int id; bool mcm; struct mt7530_port ports[MT7530_NUM_PORTS]; diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 8dca2c949e73..f4e2db44ad91 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -261,6 +261,7 @@ static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip) unsigned int sub_irq; unsigned int n; u16 reg; + u16 ctl1; int err; mutex_lock(&chip->reg_lock); @@ -270,13 +271,28 @@ static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip) if (err) goto out; - for (n = 0; n < chip->g1_irq.nirqs; ++n) { - if (reg & (1 << n)) { - sub_irq = irq_find_mapping(chip->g1_irq.domain, n); - handle_nested_irq(sub_irq); - ++nhandled; + do { + for (n = 0; n < chip->g1_irq.nirqs; ++n) { + if (reg & (1 << n)) { + sub_irq = irq_find_mapping(chip->g1_irq.domain, + n); + handle_nested_irq(sub_irq); + ++nhandled; + } } - } + + mutex_lock(&chip->reg_lock); + err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &ctl1); + if (err) + goto unlock; + err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, ®); +unlock: + mutex_unlock(&chip->reg_lock); + if (err) + goto out; + ctl1 &= GENMASK(chip->g1_irq.nirqs, 0); + } while (reg & ctl1); + out: return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE); } @@ -426,16 +442,26 @@ out_mapping: static int mv88e6xxx_g1_irq_setup(struct mv88e6xxx_chip *chip) { + static struct lock_class_key lock_key; + static struct lock_class_key request_key; int err; err = mv88e6xxx_g1_irq_setup_common(chip); if (err) return err; + /* These lock classes tells lockdep that global 1 irqs are in + * a different category than their parent GPIO, so it won't + * report false recursion. + */ + irq_set_lockdep_class(chip->irq, &lock_key, &request_key); + + mutex_unlock(&chip->reg_lock); err = request_threaded_irq(chip->irq, NULL, mv88e6xxx_g1_irq_thread_fn, IRQF_ONESHOT | IRQF_SHARED, dev_name(chip->dev), chip); + mutex_lock(&chip->reg_lock); if (err) mv88e6xxx_g1_irq_free_common(chip); @@ -464,7 +490,7 @@ static int mv88e6xxx_irq_poll_setup(struct mv88e6xxx_chip *chip) kthread_init_delayed_work(&chip->irq_poll_work, mv88e6xxx_irq_poll); - chip->kworker = kthread_create_worker(0, dev_name(chip->dev)); + chip->kworker = kthread_create_worker(0, "%s", dev_name(chip->dev)); if (IS_ERR(chip->kworker)) return PTR_ERR(chip->kworker); @@ -523,9 +549,9 @@ int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg, u16 update) return mv88e6xxx_write(chip, addr, reg, val); } -static int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port, - int link, int speed, int duplex, int pause, - phy_interface_t mode) +int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port, int link, + int speed, int duplex, int pause, + phy_interface_t mode) { int err; @@ -543,6 +569,9 @@ static int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port, goto restore_link; } + if (speed == SPEED_MAX && chip->info->ops->port_max_speed_mode) + mode = chip->info->ops->port_max_speed_mode(port); + if (chip->info->ops->port_set_pause) { err = chip->info->ops->port_set_pause(chip, port, pause); if (err) @@ -632,6 +661,20 @@ static void mv88e6185_phylink_validate(struct mv88e6xxx_chip *chip, int port, mv88e6065_phylink_validate(chip, port, mask, state); } +static void mv88e6341_phylink_validate(struct mv88e6xxx_chip *chip, int port, + unsigned long *mask, + struct phylink_link_state *state) +{ + if (port >= 5) + phylink_set(mask, 2500baseX_Full); + + /* No ethtool bits for 200Mbps */ + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseX_Full); + + mv88e6065_phylink_validate(chip, port, mask, state); +} + static void mv88e6352_phylink_validate(struct mv88e6xxx_chip *chip, int port, unsigned long *mask, struct phylink_link_state *state) @@ -647,8 +690,10 @@ static void mv88e6390_phylink_validate(struct mv88e6xxx_chip *chip, int port, unsigned long *mask, struct phylink_link_state *state) { - if (port >= 9) + if (port >= 9) { phylink_set(mask, 2500baseX_Full); + phylink_set(mask, 2500baseT_Full); + } /* No ethtool bits for 200Mbps */ phylink_set(mask, 1000baseT_Full); @@ -880,7 +925,7 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip, default: return U64_MAX; } - value = (((u64)high) << 16) | low; + value = (((u64)high) << 32) | low; return value; } @@ -2360,8 +2405,7 @@ static int mv88e6xxx_port_enable(struct dsa_switch *ds, int port, return err; } -static void mv88e6xxx_port_disable(struct dsa_switch *ds, int port, - struct phy_device *phydev) +static void mv88e6xxx_port_disable(struct dsa_switch *ds, int port) { struct mv88e6xxx_chip *chip = ds->priv; @@ -3026,6 +3070,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, .port_set_speed = mv88e6341_port_set_speed, + .port_max_speed_mode = mv88e6341_port_max_speed_mode, .port_tag_remap = mv88e6095_port_tag_remap, .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_egress_floods = mv88e6352_port_set_egress_floods, @@ -3052,7 +3097,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_power = mv88e6341_serdes_power, .gpio_ops = &mv88e6352_gpio_ops, - .phylink_validate = mv88e6390_phylink_validate, + .phylink_validate = mv88e6341_phylink_validate, }; static const struct mv88e6xxx_ops mv88e6161_ops = { @@ -3077,7 +3122,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6185_port_get_cmode, - .stats_snapshot = mv88e6320_g1_stats_snapshot, + .stats_snapshot = mv88e6xxx_g1_stats_snapshot, .stats_set_histogram = mv88e6095_g1_stats_set_histogram, .stats_get_sset_count = mv88e6095_stats_get_sset_count, .stats_get_strings = mv88e6095_stats_get_strings, @@ -3344,6 +3389,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, .port_set_speed = mv88e6390_port_set_speed, + .port_max_speed_mode = mv88e6390_port_max_speed_mode, .port_tag_remap = mv88e6390_port_tag_remap, .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_egress_floods = mv88e6352_port_set_egress_floods, @@ -3388,6 +3434,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, .port_set_speed = mv88e6390x_port_set_speed, + .port_max_speed_mode = mv88e6390x_port_max_speed_mode, .port_tag_remap = mv88e6390_port_tag_remap, .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_egress_floods = mv88e6352_port_set_egress_floods, @@ -3432,6 +3479,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = { .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, .port_set_speed = mv88e6390_port_set_speed, + .port_max_speed_mode = mv88e6390_port_max_speed_mode, .port_tag_remap = mv88e6390_port_tag_remap, .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_egress_floods = mv88e6352_port_set_egress_floods, @@ -3525,6 +3573,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, .port_set_speed = mv88e6390_port_set_speed, + .port_max_speed_mode = mv88e6390_port_max_speed_mode, .port_tag_remap = mv88e6390_port_tag_remap, .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_egress_floods = mv88e6352_port_set_egress_floods, @@ -3656,6 +3705,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, .port_set_speed = mv88e6341_port_set_speed, + .port_max_speed_mode = mv88e6341_port_max_speed_mode, .port_tag_remap = mv88e6095_port_tag_remap, .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_egress_floods = mv88e6352_port_set_egress_floods, @@ -3684,7 +3734,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, - .phylink_validate = mv88e6390_phylink_validate, + .phylink_validate = mv88e6341_phylink_validate, }; static const struct mv88e6xxx_ops mv88e6350_ops = { @@ -3831,6 +3881,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, .port_set_speed = mv88e6390_port_set_speed, + .port_max_speed_mode = mv88e6390_port_max_speed_mode, .port_tag_remap = mv88e6390_port_tag_remap, .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_egress_floods = mv88e6352_port_set_egress_floods, @@ -3879,6 +3930,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, .port_set_speed = mv88e6390x_port_set_speed, + .port_max_speed_mode = mv88e6390x_port_max_speed_mode, .port_tag_remap = mv88e6390_port_tag_remap, .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_egress_floods = mv88e6352_port_set_egress_floods, @@ -4206,7 +4258,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6190", .num_databases = 4096, .num_ports = 11, /* 10 + Z80 */ - .num_internal_phys = 11, + .num_internal_phys = 9, .num_gpio = 16, .max_vid = 8191, .port_base_addr = 0x0, @@ -4229,7 +4281,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6190X", .num_databases = 4096, .num_ports = 11, /* 10 + Z80 */ - .num_internal_phys = 11, + .num_internal_phys = 9, .num_gpio = 16, .max_vid = 8191, .port_base_addr = 0x0, @@ -4252,7 +4304,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6191", .num_databases = 4096, .num_ports = 11, /* 10 + Z80 */ - .num_internal_phys = 11, + .num_internal_phys = 9, .max_vid = 8191, .port_base_addr = 0x0, .phy_base_addr = 0x0, @@ -4299,7 +4351,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6290", .num_databases = 4096, .num_ports = 11, /* 10 + Z80 */ - .num_internal_phys = 11, + .num_internal_phys = 9, .num_gpio = 16, .max_vid = 8191, .port_base_addr = 0x0, @@ -4461,7 +4513,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6390", .num_databases = 4096, .num_ports = 11, /* 10 + Z80 */ - .num_internal_phys = 11, + .num_internal_phys = 9, .num_gpio = 16, .max_vid = 8191, .port_base_addr = 0x0, @@ -4484,7 +4536,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .name = "Marvell 88E6390X", .num_databases = 4096, .num_ports = 11, /* 10 + Z80 */ - .num_internal_phys = 11, + .num_internal_phys = 9, .num_gpio = 16, .max_vid = 8191, .port_base_addr = 0x0, @@ -4579,6 +4631,14 @@ static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip, return 0; } +static void mv88e6xxx_ports_cmode_init(struct mv88e6xxx_chip *chip) +{ + int i; + + for (i = 0; i < mv88e6xxx_num_ports(chip); i++) + chip->ports[i].cmode = MV88E6XXX_PORT_STS_CMODE_INVALID; +} + static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds, int port) { @@ -4615,6 +4675,8 @@ static const char *mv88e6xxx_drv_probe(struct device *dsa_dev, if (err) goto free; + mv88e6xxx_ports_cmode_init(chip); + mutex_lock(&chip->reg_lock); err = mv88e6xxx_switch_reset(chip); mutex_unlock(&chip->reg_lock); @@ -4674,6 +4736,22 @@ static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port, return err; } +static int mv88e6xxx_port_egress_floods(struct dsa_switch *ds, int port, + bool unicast, bool multicast) +{ + struct mv88e6xxx_chip *chip = ds->priv; + int err = -EOPNOTSUPP; + + mutex_lock(&chip->reg_lock); + if (chip->info->ops->port_set_egress_floods) + err = chip->info->ops->port_set_egress_floods(chip, port, + unicast, + multicast); + mutex_unlock(&chip->reg_lock); + + return err; +} + static const struct dsa_switch_ops mv88e6xxx_switch_ops = { #if IS_ENABLED(CONFIG_NET_DSA_LEGACY) .probe = mv88e6xxx_drv_probe, @@ -4701,6 +4779,7 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .set_ageing_time = mv88e6xxx_set_ageing_time, .port_bridge_join = mv88e6xxx_port_bridge_join, .port_bridge_leave = mv88e6xxx_port_bridge_leave, + .port_egress_floods = mv88e6xxx_port_egress_floods, .port_stp_state_set = mv88e6xxx_port_stp_state_set, .port_fast_age = mv88e6xxx_port_fast_age, .port_vlan_filtering = mv88e6xxx_port_vlan_filtering, @@ -4764,6 +4843,21 @@ static const void *pdata_device_get_match_data(struct device *dev) return NULL; } +/* There is no suspend to RAM support at DSA level yet, the switch configuration + * would be lost after a power cycle so prevent it to be suspended. + */ +static int __maybe_unused mv88e6xxx_suspend(struct device *dev) +{ + return -EOPNOTSUPP; +} + +static int __maybe_unused mv88e6xxx_resume(struct device *dev) +{ + return 0; +} + +static SIMPLE_DEV_PM_OPS(mv88e6xxx_pm_ops, mv88e6xxx_suspend, mv88e6xxx_resume); + static int mv88e6xxx_probe(struct mdio_device *mdiodev) { struct dsa_mv88e6xxx_pdata *pdata = mdiodev->dev.platform_data; @@ -4821,6 +4915,7 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) if (err) goto out; + mv88e6xxx_ports_cmode_init(chip); mv88e6xxx_phy_init(chip); if (chip->info->ops->get_eeprom) { @@ -4948,6 +5043,7 @@ static struct mdio_driver mv88e6xxx_driver = { .mdiodrv.driver = { .name = "mv88e6085", .of_match_table = mv88e6xxx_of_match, + .pm = &mv88e6xxx_pm_ops, }, }; diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 546651d8c3e1..19c07dff0440 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -377,6 +377,9 @@ struct mv88e6xxx_ops { */ int (*port_set_speed)(struct mv88e6xxx_chip *chip, int port, int speed); + /* What interface mode should be used for maximum speed? */ + phy_interface_t (*port_max_speed_mode)(int port); + int (*port_tag_remap)(struct mv88e6xxx_chip *chip, int port); int (*port_set_frame_mode)(struct mv88e6xxx_chip *chip, int port, @@ -579,6 +582,9 @@ int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val); int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg, u16 update); int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, u16 mask); +int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port, int link, + int speed, int duplex, int pause, + phy_interface_t mode); struct mii_bus *mv88e6xxx_default_mdio_bus(struct mv88e6xxx_chip *chip); #endif /* _MV88E6XXX_CHIP_H */ diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c index 5200e4bdce93..ea243840ee0f 100644 --- a/drivers/net/dsa/mv88e6xxx/global1_atu.c +++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c @@ -314,6 +314,7 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id) { struct mv88e6xxx_chip *chip = dev_id; struct mv88e6xxx_atu_entry entry; + int spid; int err; u16 val; @@ -336,6 +337,8 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id) if (err) goto out; + spid = entry.state; + if (val & MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION) { dev_err_ratelimited(chip->dev, "ATU age out violation for %pM\n", @@ -344,23 +347,23 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id) if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) { dev_err_ratelimited(chip->dev, - "ATU member violation for %pM portvec %x\n", - entry.mac, entry.portvec); - chip->ports[entry.portvec].atu_member_violation++; + "ATU member violation for %pM portvec %x spid %d\n", + entry.mac, entry.portvec, spid); + chip->ports[spid].atu_member_violation++; } if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) { dev_err_ratelimited(chip->dev, - "ATU miss violation for %pM portvec %x\n", - entry.mac, entry.portvec); - chip->ports[entry.portvec].atu_miss_violation++; + "ATU miss violation for %pM portvec %x spid %d\n", + entry.mac, entry.portvec, spid); + chip->ports[spid].atu_miss_violation++; } if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION) { dev_err_ratelimited(chip->dev, - "ATU full violation for %pM portvec %x\n", - entry.mac, entry.portvec); - chip->ports[entry.portvec].atu_full_violation++; + "ATU full violation for %pM portvec %x spid %d\n", + entry.mac, entry.portvec, spid); + chip->ports[spid].atu_full_violation++; } mutex_unlock(&chip->reg_lock); diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index ebd26b6a93e6..dce84a2a65c7 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c @@ -190,7 +190,7 @@ int mv88e6xxx_port_set_duplex(struct mv88e6xxx_chip *chip, int port, int dup) /* normal duplex detection */ break; default: - return -EINVAL; + return -EOPNOTSUPP; } err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_MAC_CTL, reg); @@ -312,6 +312,14 @@ int mv88e6341_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed) return mv88e6xxx_port_set_speed(chip, port, speed, !port, true); } +phy_interface_t mv88e6341_port_max_speed_mode(int port) +{ + if (port == 5) + return PHY_INTERFACE_MODE_2500BASEX; + + return PHY_INTERFACE_MODE_NA; +} + /* Support 10, 100, 200, 1000 Mbps (e.g. 88E6352 family) */ int mv88e6352_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed) { @@ -345,6 +353,14 @@ int mv88e6390_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed) return mv88e6xxx_port_set_speed(chip, port, speed, true, true); } +phy_interface_t mv88e6390_port_max_speed_mode(int port) +{ + if (port == 9 || port == 10) + return PHY_INTERFACE_MODE_2500BASEX; + + return PHY_INTERFACE_MODE_NA; +} + /* Support 10, 100, 200, 1000, 2500, 10000 Mbps (e.g. 88E6190X) */ int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed) { @@ -360,6 +376,14 @@ int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed) return mv88e6xxx_port_set_speed(chip, port, speed, true, true); } +phy_interface_t mv88e6390x_port_max_speed_mode(int port) +{ + if (port == 9 || port == 10) + return PHY_INTERFACE_MODE_XAUI; + + return PHY_INTERFACE_MODE_NA; +} + int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, phy_interface_t mode) { @@ -398,6 +422,10 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, cmode = 0; } + /* cmode doesn't change, nothing to do for us */ + if (cmode == chip->ports[port].cmode) + return 0; + lane = mv88e6390x_serdes_get_lane(chip, port); if (lane < 0) return lane; @@ -408,7 +436,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, return err; } - err = mv88e6390_serdes_power(chip, port, false); + err = mv88e6390x_serdes_power(chip, port, false); if (err) return err; @@ -424,7 +452,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, if (err) return err; - err = mv88e6390_serdes_power(chip, port, true); + err = mv88e6390x_serdes_power(chip, port, true); if (err) return err; @@ -444,6 +472,8 @@ int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port, phy_interface_t mode) { switch (mode) { + case PHY_INTERFACE_MODE_NA: + return 0; case PHY_INTERFACE_MODE_XGMII: case PHY_INTERFACE_MODE_XAUI: case PHY_INTERFACE_MODE_RXAUI: diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h index e583641de758..c7bed263a0f4 100644 --- a/drivers/net/dsa/mv88e6xxx/port.h +++ b/drivers/net/dsa/mv88e6xxx/port.h @@ -52,6 +52,7 @@ #define MV88E6185_PORT_STS_CMODE_1000BASE_X 0x0005 #define MV88E6185_PORT_STS_CMODE_PHY 0x0006 #define MV88E6185_PORT_STS_CMODE_DISABLED 0x0007 +#define MV88E6XXX_PORT_STS_CMODE_INVALID 0xff /* Offset 0x01: MAC (or PCS or Physical) Control Register */ #define MV88E6XXX_PORT_MAC_CTL 0x01 @@ -284,6 +285,10 @@ int mv88e6352_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed); int mv88e6390_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed); int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed); +phy_interface_t mv88e6341_port_max_speed_mode(int port); +phy_interface_t mv88e6390_port_max_speed_mode(int port); +phy_interface_t mv88e6390x_port_max_speed_mode(int port); + int mv88e6xxx_port_set_state(struct mv88e6xxx_chip *chip, int port, u8 state); int mv88e6xxx_port_set_vlan_map(struct mv88e6xxx_chip *chip, int port, u16 map); diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c index 4b336d8d4c67..42872d21857b 100644 --- a/drivers/net/dsa/mv88e6xxx/ptp.c +++ b/drivers/net/dsa/mv88e6xxx/ptp.c @@ -400,7 +400,7 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip) chip->ptp_clock_info.owner = THIS_MODULE; snprintf(chip->ptp_clock_info.name, sizeof(chip->ptp_clock_info.name), - dev_name(chip->dev)); + "%s", dev_name(chip->dev)); chip->ptp_clock_info.max_adj = 1000000; chip->ptp_clock_info.n_ext_ts = ptp_ops->n_ext_ts; diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c index 2caa8c8b4b55..6a5de1b72f6c 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c @@ -510,21 +510,48 @@ static void mv88e6390_serdes_irq_link_sgmii(struct mv88e6xxx_chip *chip, int port, int lane) { struct dsa_switch *ds = chip->ds; + int duplex = DUPLEX_UNKNOWN; + int speed = SPEED_UNKNOWN; + int link, err; u16 status; - bool up; - mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, - MV88E6390_SGMII_STATUS, &status); + err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, + MV88E6390_SGMII_PHY_STATUS, &status); + if (err) { + dev_err(chip->dev, "can't read SGMII PHY status: %d\n", err); + return; + } - /* Status must be read twice in order to give the current link - * status. Otherwise the change in link status since the last - * read of the register is returned. - */ - mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, - MV88E6390_SGMII_STATUS, &status); - up = status & MV88E6390_SGMII_STATUS_LINK; + link = status & MV88E6390_SGMII_PHY_STATUS_LINK ? + LINK_FORCED_UP : LINK_FORCED_DOWN; + + if (status & MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID) { + duplex = status & MV88E6390_SGMII_PHY_STATUS_DUPLEX_FULL ? + DUPLEX_FULL : DUPLEX_HALF; + + switch (status & MV88E6390_SGMII_PHY_STATUS_SPEED_MASK) { + case MV88E6390_SGMII_PHY_STATUS_SPEED_1000: + speed = SPEED_1000; + break; + case MV88E6390_SGMII_PHY_STATUS_SPEED_100: + speed = SPEED_100; + break; + case MV88E6390_SGMII_PHY_STATUS_SPEED_10: + speed = SPEED_10; + break; + default: + dev_err(chip->dev, "invalid PHY speed\n"); + return; + } + } - dsa_port_phylink_mac_change(ds, port, up); + err = mv88e6xxx_port_setup_mac(chip, port, link, speed, duplex, + PAUSE_OFF, PHY_INTERFACE_MODE_NA); + if (err) + dev_err(chip->dev, "can't propagate PHY settings to MAC: %d\n", + err); + else + dsa_port_phylink_mac_change(ds, port, link == LINK_FORCED_UP); } static int mv88e6390_serdes_irq_enable_sgmii(struct mv88e6xxx_chip *chip, @@ -664,7 +691,7 @@ int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port) if (port < 9) return 0; - return mv88e6390_serdes_irq_setup(chip, port); + return mv88e6390x_serdes_irq_setup(chip, port); } void mv88e6390x_serdes_irq_free(struct mv88e6xxx_chip *chip, int port) diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h index 573dce8b1eb4..c2e7eedfa9b9 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.h +++ b/drivers/net/dsa/mv88e6xxx/serdes.h @@ -69,6 +69,14 @@ #define MV88E6390_SGMII_INT_SYMBOL_ERROR BIT(8) #define MV88E6390_SGMII_INT_FALSE_CARRIER BIT(7) #define MV88E6390_SGMII_INT_STATUS 0xa002 +#define MV88E6390_SGMII_PHY_STATUS 0xa003 +#define MV88E6390_SGMII_PHY_STATUS_SPEED_MASK GENMASK(15, 14) +#define MV88E6390_SGMII_PHY_STATUS_SPEED_1000 0x8000 +#define MV88E6390_SGMII_PHY_STATUS_SPEED_100 0x4000 +#define MV88E6390_SGMII_PHY_STATUS_SPEED_10 0x0000 +#define MV88E6390_SGMII_PHY_STATUS_DUPLEX_FULL BIT(13) +#define MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID BIT(11) +#define MV88E6390_SGMII_PHY_STATUS_LINK BIT(10) int mv88e6390x_serdes_get_lane(struct mv88e6xxx_chip *chip, int port); int mv88e6341_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on); diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 7e97e620bd44..576b37d12a63 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -420,7 +420,7 @@ qca8k_mib_init(struct qca8k_priv *priv) static int qca8k_set_pad_ctrl(struct qca8k_priv *priv, int port, int mode) { - u32 reg; + u32 reg, val; switch (port) { case 0: @@ -439,15 +439,19 @@ qca8k_set_pad_ctrl(struct qca8k_priv *priv, int port, int mode) */ switch (mode) { case PHY_INTERFACE_MODE_RGMII: - qca8k_write(priv, reg, - QCA8K_PORT_PAD_RGMII_EN | - QCA8K_PORT_PAD_RGMII_TX_DELAY(3) | - QCA8K_PORT_PAD_RGMII_RX_DELAY(3)); - - /* According to the datasheet, RGMII delay is enabled through + /* RGMII mode means no delay so don't enable the delay */ + val = QCA8K_PORT_PAD_RGMII_EN; + qca8k_write(priv, reg, val); + break; + case PHY_INTERFACE_MODE_RGMII_ID: + /* RGMII_ID needs internal delay. This is enabled through * PORT5_PAD_CTRL for all ports, rather than individual port * registers */ + qca8k_write(priv, reg, + QCA8K_PORT_PAD_RGMII_EN | + QCA8K_PORT_PAD_RGMII_TX_DELAY(QCA8K_MAX_DELAY) | + QCA8K_PORT_PAD_RGMII_RX_DELAY(QCA8K_MAX_DELAY)); qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL, QCA8K_PORT_PAD_RGMII_RX_DELAY_EN); break; @@ -797,8 +801,7 @@ qca8k_port_enable(struct dsa_switch *ds, int port, } static void -qca8k_port_disable(struct dsa_switch *ds, int port, - struct phy_device *phy) +qca8k_port_disable(struct dsa_switch *ds, int port) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h index 613fe5c50236..d146e54c8a6c 100644 --- a/drivers/net/dsa/qca8k.h +++ b/drivers/net/dsa/qca8k.h @@ -40,6 +40,7 @@ ((0x8 + (x & 0x3)) << 22) #define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) \ ((0x10 + (x & 0x3)) << 20) +#define QCA8K_MAX_DELAY 3 #define QCA8K_PORT_PAD_RGMII_RX_DELAY_EN BIT(24) #define QCA8K_PORT_PAD_SGMII_EN BIT(7) #define QCA8K_REG_MODULE_EN 0x030 diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c index a4d5049df692..40b3974970c6 100644 --- a/drivers/net/dsa/rtl8366rb.c +++ b/drivers/net/dsa/rtl8366rb.c @@ -1073,8 +1073,7 @@ rtl8366rb_port_enable(struct dsa_switch *ds, int port, } static void -rtl8366rb_port_disable(struct dsa_switch *ds, int port, - struct phy_device *phy) +rtl8366rb_port_disable(struct dsa_switch *ds, int port) { struct realtek_smi *smi = ds->priv; int ret; diff --git a/drivers/net/dsa/vitesse-vsc73xx.c b/drivers/net/dsa/vitesse-vsc73xx.c index 9f1b5f2e8a64..d4780610ea8a 100644 --- a/drivers/net/dsa/vitesse-vsc73xx.c +++ b/drivers/net/dsa/vitesse-vsc73xx.c @@ -1013,8 +1013,7 @@ static int vsc73xx_port_enable(struct dsa_switch *ds, int port, return 0; } -static void vsc73xx_port_disable(struct dsa_switch *ds, int port, - struct phy_device *phy) +static void vsc73xx_port_disable(struct dsa_switch *ds, int port) { struct vsc73xx *vsc = ds->priv; diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c index b223769d6a5e..3da97996bdf3 100644 --- a/drivers/net/ethernet/3com/3c509.c +++ b/drivers/net/ethernet/3com/3c509.c @@ -1266,12 +1266,14 @@ el3_up(struct net_device *dev) pr_cont("Forcing 3c5x9b full-duplex mode"); break; } + /* fall through */ case 8: /* set full-duplex mode based on eeprom config setting */ if ((sw_info & 0x000f) && (sw_info & 0x8000)) { pr_cont("Setting 3c5x9b full-duplex mode (from EEPROM configuration bit)"); break; } + /* fall through */ default: /* xcvr=(0 || 4) OR user has an old 3c5x9 non "B" model */ pr_cont("Setting 3c5x9/3c5x9B half-duplex mode"); diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c index b648e3f95c01..808abb6b3671 100644 --- a/drivers/net/ethernet/3com/3c515.c +++ b/drivers/net/ethernet/3com/3c515.c @@ -1177,7 +1177,7 @@ static irqreturn_t corkscrew_interrupt(int irq, void *dev_id) if (inl(ioaddr + DownListPtr) == isa_virt_to_bus(&lp->tx_ring[entry])) break; /* It still hasn't been processed. */ if (lp->tx_skbuff[entry]) { - dev_kfree_skb_irq(lp->tx_skbuff[entry]); + dev_consume_skb_irq(lp->tx_skbuff[entry]); lp->tx_skbuff[entry] = NULL; } dirty_tx++; @@ -1192,7 +1192,7 @@ static irqreturn_t corkscrew_interrupt(int irq, void *dev_id) #ifdef VORTEX_BUS_MASTER if (status & DMADone) { outw(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */ - dev_kfree_skb_irq(lp->tx_skb); /* Release the transferred buffer */ + dev_consume_skb_irq(lp->tx_skb); /* Release the transferred buffer */ netif_wake_queue(dev); } #endif diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 40f421dbdf57..147051404194 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -2307,7 +2307,7 @@ _vortex_interrupt(int irq, struct net_device *dev) dma_unmap_single(vp->gendev, vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, DMA_TO_DEVICE); pkts_compl++; bytes_compl += vp->tx_skb->len; - dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */ + dev_consume_skb_irq(vp->tx_skb); /* Release the transferred buffer */ if (ioread16(ioaddr + TxFree) > 1536) { /* * AKPM: FIXME: I don't think we need this. If the queue was stopped due to @@ -2449,7 +2449,7 @@ _boomerang_interrupt(int irq, struct net_device *dev) #endif pkts_compl++; bytes_compl += skb->len; - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); vp->tx_skbuff[entry] = NULL; } else { pr_debug("boomerang_interrupt: no skb!\n"); diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c index 097467f44b0d..816540e6beac 100644 --- a/drivers/net/ethernet/adaptec/starfire.c +++ b/drivers/net/ethernet/adaptec/starfire.c @@ -1390,7 +1390,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) } } - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); } np->tx_done_q[np->tx_done].status = 0; np->tx_done = (np->tx_done + 1) % DONE_Q_SIZE; diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c index 4f11f98347ed..1827ef1f6d55 100644 --- a/drivers/net/ethernet/alteon/acenic.c +++ b/drivers/net/ethernet/alteon/acenic.c @@ -2059,7 +2059,7 @@ static inline void ace_tx_int(struct net_device *dev, if (skb) { dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); info->skb = NULL; } diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c index 0fb986ba3290..0ae723f75341 100644 --- a/drivers/net/ethernet/altera/altera_msgdma.c +++ b/drivers/net/ethernet/altera/altera_msgdma.c @@ -145,7 +145,8 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv) & 0xffff; if (inuse) { /* Tx FIFO is not empty */ - ready = priv->tx_prod - priv->tx_cons - inuse - 1; + ready = max_t(int, + priv->tx_prod - priv->tx_cons - inuse - 1, 0); } else { /* Check for buffered last packet */ status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status)); diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index a70bb1bb90e7..a6eacf2099c3 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -2663,11 +2663,6 @@ static int ena_restore_device(struct ena_adapter *adapter) goto err_device_destroy; } - clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); - /* Make sure we don't have a race with AENQ Links state handler */ - if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) - netif_carrier_on(adapter->netdev); - rc = ena_enable_msix_and_set_admin_interrupts(adapter, adapter->num_queues); if (rc) { @@ -2684,6 +2679,11 @@ static int ena_restore_device(struct ena_adapter *adapter) } set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); + + clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); + if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) + netif_carrier_on(adapter->netdev); + mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); dev_err(&pdev->dev, "Device reset completed successfully, Driver info: %s\n", diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index dc8b6173d8d8..63870072cbbd 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -45,7 +45,7 @@ #define DRV_MODULE_VER_MAJOR 2 #define DRV_MODULE_VER_MINOR 0 -#define DRV_MODULE_VER_SUBMINOR 2 +#define DRV_MODULE_VER_SUBMINOR 3 #define DRV_MODULE_NAME "ena" #ifndef DRV_MODULE_VERSION diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index a90080f12e67..145fe71fd155 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -435,7 +435,7 @@ static int amd8111e_restart(struct net_device *dev) int i,reg_val; /* stop the chip */ - writel(RUN, mmio + CMD0); + writel(RUN, mmio + CMD0); if(amd8111e_init_ring(dev)) return -ENOMEM; @@ -666,7 +666,7 @@ static int amd8111e_tx(struct net_device *dev) pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[tx_index], lp->tx_skbuff[tx_index]->len, PCI_DMA_TODEVICE); - dev_kfree_skb_irq (lp->tx_skbuff[tx_index]); + dev_consume_skb_irq(lp->tx_skbuff[tx_index]); lp->tx_skbuff[tx_index] = NULL; lp->tx_dma_addr[tx_index] = 0; } @@ -1720,7 +1720,7 @@ static void amd8111e_config_ipg(struct timer_list *t) writew((u32)tmp_ipg, mmio + IPG); writew((u32)(tmp_ipg - IFS1_DELTA), mmio + IFS1); } - mod_timer(&lp->ipg_data.ipg_timer, jiffies + IPG_CONVERGE_JIFFIES); + mod_timer(&lp->ipg_data.ipg_timer, jiffies + IPG_CONVERGE_JIFFIES); return; } diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index e833d1b3fe18..e5073aeea06a 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -1167,7 +1167,7 @@ static int au1000_probe(struct platform_device *pdev) /* Allocate the data buffers * Snooping works fine with eth on all au1xxx */ - aup->vaddr = (u32)dma_alloc_attrs(NULL, MAX_BUF_SIZE * + aup->vaddr = (u32)dma_alloc_attrs(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), &aup->dma_addr, 0, DMA_ATTR_NON_CONSISTENT); @@ -1349,7 +1349,7 @@ err_remap3: err_remap2: iounmap(aup->mac); err_remap1: - dma_free_attrs(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), + dma_free_attrs(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), (void *)aup->vaddr, aup->dma_addr, DMA_ATTR_NON_CONSISTENT); err_vaddr: @@ -1383,7 +1383,7 @@ static int au1000_remove(struct platform_device *pdev) if (aup->tx_db_inuse[i]) au1000_ReleaseDB(aup, aup->tx_db_inuse[i]); - dma_free_attrs(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), + dma_free_attrs(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), (void *)aup->vaddr, aup->dma_addr, DMA_ATTR_NON_CONSISTENT); diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c index b56d84c7df46..f90b454b1642 100644 --- a/drivers/net/ethernet/amd/lance.c +++ b/drivers/net/ethernet/amd/lance.c @@ -1084,7 +1084,7 @@ static irqreturn_t lance_interrupt(int irq, void *dev_id) /* We must free the original skb if it's not a data-only copy in the bounce buffer. */ if (lp->tx_skbuff[entry]) { - dev_kfree_skb_irq(lp->tx_skbuff[entry]); + dev_consume_skb_irq(lp->tx_skbuff[entry]); lp->tx_skbuff[entry] = NULL; } dirty_tx++; diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c index 8931ce6bab7b..87ff5d6d1b22 100644 --- a/drivers/net/ethernet/amd/ni65.c +++ b/drivers/net/ethernet/amd/ni65.c @@ -1028,7 +1028,7 @@ static void ni65_xmit_intr(struct net_device *dev,int csr0) #ifdef XMT_VIA_SKB if(p->tmd_skb[p->tmdlast]) { - dev_kfree_skb_irq(p->tmd_skb[p->tmdlast]); + dev_consume_skb_irq(p->tmd_skb[p->tmdlast]); p->tmd_skb[p->tmdlast] = NULL; } #endif diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c index 6a8e2567f2bd..4d3855ceb500 100644 --- a/drivers/net/ethernet/apple/bmac.c +++ b/drivers/net/ethernet/apple/bmac.c @@ -777,7 +777,7 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id) if (bp->tx_bufs[bp->tx_empty]) { ++dev->stats.tx_packets; - dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]); + dev_consume_skb_irq(bp->tx_bufs[bp->tx_empty]); } bp->tx_bufs[bp->tx_empty] = NULL; bp->tx_fullup = 0; diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c index 68b9ee489489..4d9819d2894d 100644 --- a/drivers/net/ethernet/apple/mace.c +++ b/drivers/net/ethernet/apple/mace.c @@ -764,7 +764,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id) dev->stats.tx_bytes += mp->tx_bufs[i]->len; ++dev->stats.tx_packets; } - dev_kfree_skb_irq(mp->tx_bufs[i]); + dev_consume_skb_irq(mp->tx_bufs[i]); --mp->tx_active; if (++i >= N_TX_RING) i = 0; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index 38e87eed76b9..a718d7a1f76c 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -138,7 +138,7 @@ static void aq_ethtool_get_strings(struct net_device *ndev, u8 *p = data; if (stringset == ETH_SS_STATS) { - memcpy(p, *aq_ethtool_stat_names, + memcpy(p, aq_ethtool_stat_names, sizeof(aq_ethtool_stat_names)); p = p + sizeof(aq_ethtool_stat_names); for (i = 0; i < cfg->vecs; i++) { diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h index dc88a1221f1d..bc711238ca0c 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h @@ -14,6 +14,8 @@ #ifndef AQ_HW_UTILS_H #define AQ_HW_UTILS_H +#include + #include "aq_common.h" #ifndef HIDWORD @@ -23,18 +25,6 @@ #define AQ_HW_SLEEP(_US_) mdelay(_US_) -#define AQ_HW_WAIT_FOR(_B_, _US_, _N_) \ -do { \ - unsigned int AQ_HW_WAIT_FOR_i; \ - for (AQ_HW_WAIT_FOR_i = _N_; (!(_B_)) && (AQ_HW_WAIT_FOR_i);\ - --AQ_HW_WAIT_FOR_i) {\ - udelay(_US_); \ - } \ - if (!AQ_HW_WAIT_FOR_i) {\ - err = -ETIME; \ - } \ -} while (0) - #define aq_pr_err(...) pr_err(AQ_CFG_DRV_NAME ": " __VA_ARGS__) #define aq_pr_trace(...) pr_info(AQ_CFG_DRV_NAME ": " __VA_ARGS__) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 0147c037ca96..ff83667410bd 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -986,4 +986,4 @@ void aq_nic_shutdown(struct aq_nic_s *self) err_exit: rtnl_unlock(); -} \ No newline at end of file +} diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index c8b44cdb91c1..0217ff4669a4 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -170,6 +170,8 @@ void aq_pci_func_free_irqs(struct aq_nic_s *self) for (i = 32U; i--;) { if (!((1U << i) & self->msix_entry_mask)) continue; + if (i >= AQ_CFG_VECS_MAX) + continue; if (pdev->msix_enabled) irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL); diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index 2469ed4d86b9..f6f8338153a2 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -85,6 +85,7 @@ const struct aq_hw_caps_s hw_atl_a0_caps_aqc109 = { static int hw_atl_a0_hw_reset(struct aq_hw_s *self) { int err = 0; + u32 val; hw_atl_glb_glb_reg_res_dis_set(self, 1U); hw_atl_pci_pci_reg_res_dis_set(self, 0U); @@ -95,7 +96,9 @@ static int hw_atl_a0_hw_reset(struct aq_hw_s *self) hw_atl_glb_soft_res_set(self, 1); /* check 10 times by 1ms */ - AQ_HW_WAIT_FOR(hw_atl_glb_soft_res_get(self) == 0, 1000U, 10U); + err = readx_poll_timeout_atomic(hw_atl_glb_soft_res_get, + self, val, val == 0, + 1000U, 10000U); if (err < 0) goto err_exit; @@ -103,7 +106,9 @@ static int hw_atl_a0_hw_reset(struct aq_hw_s *self) hw_atl_itr_res_irq_set(self, 1U); /* check 10 times by 1ms */ - AQ_HW_WAIT_FOR(hw_atl_itr_res_irq_get(self) == 0, 1000U, 10U); + err = readx_poll_timeout_atomic(hw_atl_itr_res_irq_get, + self, val, val == 0, + 1000U, 10000U); if (err < 0) goto err_exit; @@ -181,6 +186,7 @@ static int hw_atl_a0_hw_rss_hash_set(struct aq_hw_s *self, int err = 0; unsigned int i = 0U; unsigned int addr = 0U; + u32 val; for (i = 10, addr = 0U; i--; ++addr) { u32 key_data = cfg->is_rss ? @@ -188,8 +194,9 @@ static int hw_atl_a0_hw_rss_hash_set(struct aq_hw_s *self, hw_atl_rpf_rss_key_wr_data_set(self, key_data); hw_atl_rpf_rss_key_addr_set(self, addr); hw_atl_rpf_rss_key_wr_en_set(self, 1U); - AQ_HW_WAIT_FOR(hw_atl_rpf_rss_key_wr_en_get(self) == 0, - 1000U, 10U); + err = readx_poll_timeout_atomic(hw_atl_rpf_rss_key_wr_en_get, + self, val, val == 0, + 1000U, 10000U); if (err < 0) goto err_exit; } @@ -207,8 +214,9 @@ static int hw_atl_a0_hw_rss_set(struct aq_hw_s *self, u32 i = 0U; u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues); int err = 0; - u16 bitary[(HW_ATL_A0_RSS_REDIRECTION_MAX * - HW_ATL_A0_RSS_REDIRECTION_BITS / 16U)]; + u16 bitary[1 + (HW_ATL_A0_RSS_REDIRECTION_MAX * + HW_ATL_A0_RSS_REDIRECTION_BITS / 16U)]; + u32 val; memset(bitary, 0, sizeof(bitary)); @@ -222,8 +230,9 @@ static int hw_atl_a0_hw_rss_set(struct aq_hw_s *self, hw_atl_rpf_rss_redir_tbl_wr_data_set(self, bitary[i]); hw_atl_rpf_rss_redir_tbl_addr_set(self, i); hw_atl_rpf_rss_redir_wr_en_set(self, 1U); - AQ_HW_WAIT_FOR(hw_atl_rpf_rss_redir_wr_en_get(self) == 0, - 1000U, 10U); + err = readx_poll_timeout_atomic(hw_atl_rpf_rss_redir_wr_en_get, + self, val, val == 0, + 1000U, 10000U); if (err < 0) goto err_exit; } diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index b58ca7cb8e9d..b31dba1b1a55 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -173,6 +173,7 @@ static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self, int err = 0; unsigned int i = 0U; unsigned int addr = 0U; + u32 val; for (i = 10, addr = 0U; i--; ++addr) { u32 key_data = cfg->is_rss ? @@ -180,8 +181,9 @@ static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self, hw_atl_rpf_rss_key_wr_data_set(self, key_data); hw_atl_rpf_rss_key_addr_set(self, addr); hw_atl_rpf_rss_key_wr_en_set(self, 1U); - AQ_HW_WAIT_FOR(hw_atl_rpf_rss_key_wr_en_get(self) == 0, - 1000U, 10U); + err = readx_poll_timeout_atomic(hw_atl_rpf_rss_key_wr_en_get, + self, val, val == 0, + 1000U, 10000U); if (err < 0) goto err_exit; } @@ -199,8 +201,9 @@ static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self, u32 i = 0U; u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues); int err = 0; - u16 bitary[(HW_ATL_B0_RSS_REDIRECTION_MAX * - HW_ATL_B0_RSS_REDIRECTION_BITS / 16U)]; + u16 bitary[1 + (HW_ATL_B0_RSS_REDIRECTION_MAX * + HW_ATL_B0_RSS_REDIRECTION_BITS / 16U)]; + u32 val; memset(bitary, 0, sizeof(bitary)); @@ -214,8 +217,9 @@ static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self, hw_atl_rpf_rss_redir_tbl_wr_data_set(self, bitary[i]); hw_atl_rpf_rss_redir_tbl_addr_set(self, i); hw_atl_rpf_rss_redir_wr_en_set(self, 1U); - AQ_HW_WAIT_FOR(hw_atl_rpf_rss_redir_wr_en_get(self) == 0, - 1000U, 10U); + err = readx_poll_timeout_atomic(hw_atl_rpf_rss_redir_wr_en_get, + self, val, val == 0, + 1000U, 10000U); if (err < 0) goto err_exit; } @@ -275,6 +279,9 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self, static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self) { + /* Tx TC/Queue number config */ + hw_atl_rpb_tps_tx_tc_mode_set(self, 1U); + hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U); hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U); hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU); diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c index 939f77e2e117..0722b8e01964 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c @@ -1274,6 +1274,15 @@ void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en) HW_ATL_TPB_TX_BUF_EN_SHIFT, tx_buff_en); } +void hw_atl_rpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw, + u32 tx_traf_class_mode) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TPB_TX_TC_MODE_ADDR, + HW_ATL_TPB_TX_TC_MODE_MSK, + HW_ATL_TPB_TX_TC_MODE_SHIFT, + tx_traf_class_mode); +} + void hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw, u32 tx_buff_hi_threshold_per_tc, u32 buffer) @@ -1585,3 +1594,24 @@ void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location, HW_ATL_RPF_L3_DSTA_ADR(location + i), ipv6_dest[i]); } + +u32 hw_atl_sem_ram_get(struct aq_hw_s *self) +{ + return hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM); +} + +u32 hw_atl_scrpad_get(struct aq_hw_s *aq_hw, u32 scratch_scp) +{ + return aq_hw_read_reg(aq_hw, + HW_ATL_GLB_CPU_SCRATCH_SCP_ADR(scratch_scp)); +} + +u32 hw_atl_scrpad12_get(struct aq_hw_s *self) +{ + return hw_atl_scrpad_get(self, 0xB); +} + +u32 hw_atl_scrpad25_get(struct aq_hw_s *self) +{ + return hw_atl_scrpad_get(self, 0x18); +} diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h index 03c570d115fe..d46351890b16 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h @@ -605,6 +605,10 @@ void hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw, /* tpb */ +/* set TX Traffic Class Mode */ +void hw_atl_rpb_tps_tx_tc_mode_set(struct aq_hw_s *aq_hw, + u32 tx_traf_class_mode); + /* set tx buffer enable */ void hw_atl_tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en); @@ -752,4 +756,16 @@ void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw_s *aq_hw, u8 location, void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location, u32 *ipv6_dest); +/* get global microprocessor ram semaphore */ +u32 hw_atl_sem_ram_get(struct aq_hw_s *self); + +/* get global microprocessor scratch pad register */ +u32 hw_atl_scrpad_get(struct aq_hw_s *aq_hw, u32 scratch_scp); + +/* get global microprocessor scratch pad 12 register */ +u32 hw_atl_scrpad12_get(struct aq_hw_s *self); + +/* get global microprocessor scratch pad 25 register */ +u32 hw_atl_scrpad25_get(struct aq_hw_s *self); + #endif /* HW_ATL_LLH_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h index 8470d92db812..fb45bc2d99cf 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h @@ -1948,6 +1948,19 @@ /* default value of bitfield tx_buf_en */ #define HW_ATL_TPB_TX_BUF_EN_DEFAULT 0x0 +/* register address for bitfield tx_tc_mode */ +#define HW_ATL_TPB_TX_TC_MODE_ADDR 0x00007900 +/* bitmask for bitfield tx_tc_mode */ +#define HW_ATL_TPB_TX_TC_MODE_MSK 0x00000100 +/* inverted bitmask for bitfield tx_tc_mode */ +#define HW_ATL_TPB_TX_TC_MODE_MSKN 0xFFFFFEFF +/* lower bit position of bitfield tx_tc_mode */ +#define HW_ATL_TPB_TX_TC_MODE_SHIFT 8 +/* width of bitfield tx_tc_mode */ +#define HW_ATL_TPB_TX_TC_MODE_WIDTH 1 +/* default value of bitfield tx_tc_mode */ +#define HW_ATL_TPB_TX_TC_MODE_DEFAULT 0x0 + /* tx tx{b}_hi_thresh[c:0] bitfield definitions * preprocessor definitions for the bitfield "tx{b}_hi_thresh[c:0]". * parameter: buffer {b} | stride size 0x10 | range [0, 7] @@ -2519,4 +2532,6 @@ /* Default value of bitfield l3_da0[1F:0] */ #define HW_ATL_RPF_L3_DSTA_DEFAULT 0x0 +#define HW_ATL_FW_SM_RAM 0x2U + #endif /* HW_ATL_LLH_INTERNAL_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index 9b74a3197d7f..eb4b99d56081 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -25,7 +25,9 @@ #define HW_ATL_MIF_ADDR 0x0208U #define HW_ATL_MIF_VAL 0x020CU -#define HW_ATL_FW_SM_RAM 0x2U +#define HW_ATL_RPC_CONTROL_ADR 0x0338U +#define HW_ATL_RPC_STATE_ADR 0x033CU + #define HW_ATL_MPI_FW_VERSION 0x18 #define HW_ATL_MPI_CONTROL_ADR 0x0368U #define HW_ATL_MPI_STATE_ADR 0x036CU @@ -53,6 +55,12 @@ static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual); static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state); +static u32 hw_atl_utils_get_mpi_mbox_tid(struct aq_hw_s *self); +static u32 hw_atl_utils_mpi_get_state(struct aq_hw_s *self); +static u32 hw_atl_utils_mif_cmd_get(struct aq_hw_s *self); +static u32 hw_atl_utils_mif_addr_get(struct aq_hw_s *self); +static u32 hw_atl_utils_rpc_state_get(struct aq_hw_s *self); + int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops) { int err = 0; @@ -234,6 +242,7 @@ int hw_atl_utils_soft_reset(struct aq_hw_s *self) { int k; u32 boot_exit_code = 0; + u32 val; for (k = 0; k < 1000; ++k) { u32 flb_status = aq_hw_read_reg(self, @@ -260,9 +269,11 @@ int hw_atl_utils_soft_reset(struct aq_hw_s *self) int err = 0; hw_atl_utils_mpi_set_state(self, MPI_DEINIT); - AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR) & - HW_ATL_MPI_STATE_MSK) == MPI_DEINIT, - 10, 1000U); + err = readx_poll_timeout_atomic(hw_atl_utils_mpi_get_state, + self, val, + (val & HW_ATL_MPI_STATE_MSK) == + MPI_DEINIT, + 10, 10000U); if (err) return err; } @@ -277,16 +288,17 @@ int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a, u32 *p, u32 cnt) { int err = 0; + u32 val; - AQ_HW_WAIT_FOR(hw_atl_reg_glb_cpu_sem_get(self, - HW_ATL_FW_SM_RAM) == 1U, - 1U, 10000U); + err = readx_poll_timeout_atomic(hw_atl_sem_ram_get, + self, val, val == 1U, + 1U, 10000U); if (err < 0) { bool is_locked; hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM); - is_locked = hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM); + is_locked = hw_atl_sem_ram_get(self); if (!is_locked) { err = -ETIME; goto err_exit; @@ -299,13 +311,14 @@ int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a, aq_hw_write_reg(self, HW_ATL_MIF_CMD, 0x00008000U); if (IS_CHIP_FEATURE(REVISION_B1)) - AQ_HW_WAIT_FOR(a != aq_hw_read_reg(self, - HW_ATL_MIF_ADDR), - 1, 1000U); + err = readx_poll_timeout_atomic(hw_atl_utils_mif_addr_get, + self, val, val != a, + 1U, 1000U); else - AQ_HW_WAIT_FOR(!(0x100 & aq_hw_read_reg(self, - HW_ATL_MIF_CMD)), - 1, 1000U); + err = readx_poll_timeout_atomic(hw_atl_utils_mif_cmd_get, + self, val, + !(val & 0x100), + 1U, 1000U); *(p++) = aq_hw_read_reg(self, HW_ATL_MIF_VAL); a += 4; @@ -320,10 +333,11 @@ err_exit: static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p, u32 cnt) { + u32 val; int err = 0; bool is_locked; - is_locked = hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM); + is_locked = hw_atl_sem_ram_get(self); if (!is_locked) { err = -ETIME; goto err_exit; @@ -337,10 +351,11 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p, (0x80000000 | (0xFFFF & (offset * 4)))); hw_atl_mcp_up_force_intr_set(self, 1); /* 1000 times by 10us = 10ms */ - AQ_HW_WAIT_FOR((aq_hw_read_reg(self, - 0x32C) & 0xF0000000) != - 0x80000000, - 10, 1000); + err = readx_poll_timeout_atomic(hw_atl_scrpad12_get, + self, val, + (val & 0xF0000000) == + 0x80000000, + 10U, 10000U); } } else { u32 offset = 0; @@ -351,8 +366,10 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p, aq_hw_write_reg(self, 0x20C, p[offset]); aq_hw_write_reg(self, 0x200, 0xC000); - AQ_HW_WAIT_FOR((aq_hw_read_reg(self, 0x200U) & - 0x100) == 0, 10, 1000); + err = readx_poll_timeout_atomic(hw_atl_utils_mif_cmd_get, + self, val, + (val & 0x100) == 0, + 1000U, 10000U); } } @@ -395,15 +412,14 @@ static int hw_atl_utils_init_ucp(struct aq_hw_s *self, hw_atl_reg_glb_cpu_scratch_scp_set(self, 0x00000000U, 25U); /* check 10 times by 1ms */ - AQ_HW_WAIT_FOR(0U != (self->mbox_addr = - aq_hw_read_reg(self, 0x360U)), 1000U, 10U); + err = readx_poll_timeout_atomic(hw_atl_scrpad25_get, + self, self->mbox_addr, + self->mbox_addr != 0U, + 1000U, 10000U); return err; } -#define HW_ATL_RPC_CONTROL_ADR 0x0338U -#define HW_ATL_RPC_STATE_ADR 0x033CU - struct aq_hw_atl_utils_fw_rpc_tid_s { union { u32 val; @@ -452,10 +468,10 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, self->rpc_tid = sw.tid; - AQ_HW_WAIT_FOR(sw.tid == - (fw.val = - aq_hw_read_reg(self, HW_ATL_RPC_STATE_ADR), - fw.tid), 1000U, 100U); + err = readx_poll_timeout_atomic(hw_atl_utils_rpc_state_get, + self, fw.val, + sw.tid == fw.tid, + 1000U, 100000U); if (fw.len == 0xFFFFU) { err = hw_atl_utils_fw_rpc_call(self, sw.len); @@ -559,10 +575,11 @@ static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self, transaction_id = mbox.transaction_id; - AQ_HW_WAIT_FOR(transaction_id != - (hw_atl_utils_mpi_read_mbox(self, &mbox), - mbox.transaction_id), - 1000U, 100U); + err = readx_poll_timeout_atomic(hw_atl_utils_get_mpi_mbox_tid, + self, mbox.transaction_id, + transaction_id != + mbox.transaction_id, + 1000U, 100000U); if (err < 0) goto err_exit; } @@ -585,7 +602,7 @@ err_exit: int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self) { - u32 cp0x036C = aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR); + u32 cp0x036C = hw_atl_utils_mpi_get_state(self); u32 link_speed_mask = cp0x036C >> HW_ATL_MPI_SPEED_SHIFT; struct aq_hw_link_status_s *link_status = &self->aq_link_status; @@ -905,6 +922,35 @@ err_exit: return err; } +static u32 hw_atl_utils_get_mpi_mbox_tid(struct aq_hw_s *self) +{ + struct hw_atl_utils_mbox_header mbox; + + hw_atl_utils_mpi_read_mbox(self, &mbox); + + return mbox.transaction_id; +} + +static u32 hw_atl_utils_mpi_get_state(struct aq_hw_s *self) +{ + return aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR); +} + +static u32 hw_atl_utils_mif_cmd_get(struct aq_hw_s *self) +{ + return aq_hw_read_reg(self, HW_ATL_MIF_CMD); +} + +static u32 hw_atl_utils_mif_addr_get(struct aq_hw_s *self) +{ + return aq_hw_read_reg(self, HW_ATL_MIF_ADDR); +} + +static u32 hw_atl_utils_rpc_state_get(struct aq_hw_s *self) +{ + return aq_hw_read_reg(self, HW_ATL_RPC_STATE_ADR); +} + const struct aq_fw_ops aq_fw_1x_ops = { .init = hw_atl_utils_mpi_create, .deinit = hw_atl_fw1x_deinit, diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c index 7de3220d9cab..fe6c5658e016 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c @@ -20,15 +20,14 @@ #include "hw_atl_utils.h" #include "hw_atl_llh.h" -#define HW_ATL_FW2X_MPI_EFUSE_ADDR 0x364 -#define HW_ATL_FW2X_MPI_MBOX_ADDR 0x360 #define HW_ATL_FW2X_MPI_RPC_ADDR 0x334 +#define HW_ATL_FW2X_MPI_MBOX_ADDR 0x360 +#define HW_ATL_FW2X_MPI_EFUSE_ADDR 0x364 #define HW_ATL_FW2X_MPI_CONTROL_ADDR 0x368 #define HW_ATL_FW2X_MPI_CONTROL2_ADDR 0x36C - #define HW_ATL_FW2X_MPI_STATE_ADDR 0x370 -#define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374 +#define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374 #define HW_ATL_FW2X_CAP_PAUSE BIT(CAPS_HI_PAUSE) #define HW_ATL_FW2X_CAP_ASYM_PAUSE BIT(CAPS_HI_ASYMMETRIC_PAUSE) @@ -72,17 +71,24 @@ static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed); static int aq_fw2x_set_state(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state); +static u32 aq_fw2x_mbox_get(struct aq_hw_s *self); +static u32 aq_fw2x_rpc_get(struct aq_hw_s *self); +static u32 aq_fw2x_state2_get(struct aq_hw_s *self); + static int aq_fw2x_init(struct aq_hw_s *self) { int err = 0; /* check 10 times by 1ms */ - AQ_HW_WAIT_FOR(0U != (self->mbox_addr = - aq_hw_read_reg(self, HW_ATL_FW2X_MPI_MBOX_ADDR)), - 1000U, 10U); - AQ_HW_WAIT_FOR(0U != (self->rpc_addr = - aq_hw_read_reg(self, HW_ATL_FW2X_MPI_RPC_ADDR)), - 1000U, 100U); + err = readx_poll_timeout_atomic(aq_fw2x_mbox_get, + self, self->mbox_addr, + self->mbox_addr != 0U, + 1000U, 10000U); + + err = readx_poll_timeout_atomic(aq_fw2x_rpc_get, + self, self->rpc_addr, + self->rpc_addr != 0U, + 1000U, 100000U); return err; } @@ -286,16 +292,18 @@ static int aq_fw2x_update_stats(struct aq_hw_s *self) int err = 0; u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); u32 orig_stats_val = mpi_opts & BIT(CAPS_HI_STATISTICS); + u32 stats_val; /* Toggle statistics bit for FW to update */ mpi_opts = mpi_opts ^ BIT(CAPS_HI_STATISTICS); aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts); /* Wait FW to report back */ - AQ_HW_WAIT_FOR(orig_stats_val != - (aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR) & - BIT(CAPS_HI_STATISTICS)), - 1U, 10000U); + err = readx_poll_timeout_atomic(aq_fw2x_state2_get, + self, stats_val, + orig_stats_val != (stats_val & + BIT(CAPS_HI_STATISTICS)), + 1U, 10000U); if (err) return err; @@ -309,6 +317,7 @@ static int aq_fw2x_set_sleep_proxy(struct aq_hw_s *self, u8 *mac) unsigned int rpc_size = 0U; u32 mpi_opts; int err = 0; + u32 val; rpc_size = sizeof(rpc->msg_id) + sizeof(*cfg); @@ -337,8 +346,10 @@ static int aq_fw2x_set_sleep_proxy(struct aq_hw_s *self, u8 *mac) mpi_opts |= HW_ATL_FW2X_CTRL_SLEEP_PROXY; aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts); - AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR) & - HW_ATL_FW2X_CTRL_SLEEP_PROXY), 1U, 10000U); + err = readx_poll_timeout_atomic(aq_fw2x_state2_get, + self, val, + val & HW_ATL_FW2X_CTRL_SLEEP_PROXY, + 1U, 10000U); err_exit: return err; @@ -350,6 +361,7 @@ static int aq_fw2x_set_wol_params(struct aq_hw_s *self, u8 *mac) struct fw2x_msg_wol *msg = NULL; u32 mpi_opts; int err = 0; + u32 val; err = hw_atl_utils_fw_rpc_wait(self, &rpc); if (err < 0) @@ -374,8 +386,9 @@ static int aq_fw2x_set_wol_params(struct aq_hw_s *self, u8 *mac) mpi_opts |= HW_ATL_FW2X_CTRL_WOL; aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts); - AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR) & - HW_ATL_FW2X_CTRL_WOL), 1U, 10000U); + err = readx_poll_timeout_atomic(aq_fw2x_state2_get, + self, val, val & HW_ATL_FW2X_CTRL_WOL, + 1U, 10000U); err_exit: return err; @@ -425,7 +438,7 @@ static int aq_fw2x_get_eee_rate(struct aq_hw_s *self, u32 *rate, *supported_rates = fw2x_to_eee_mask(caps_hi); - mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR); + mpi_state = aq_fw2x_state2_get(self); *rate = fw2x_to_eee_mask(mpi_state); return err; @@ -455,7 +468,7 @@ static int aq_fw2x_set_flow_control(struct aq_hw_s *self) static u32 aq_fw2x_get_flow_control(struct aq_hw_s *self, u32 *fcmode) { - u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR); + u32 mpi_state = aq_fw2x_state2_get(self); if (mpi_state & HW_ATL_FW2X_CAP_PAUSE) if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE) @@ -471,6 +484,21 @@ static u32 aq_fw2x_get_flow_control(struct aq_hw_s *self, u32 *fcmode) return 0; } +static u32 aq_fw2x_mbox_get(struct aq_hw_s *self) +{ + return aq_hw_read_reg(self, HW_ATL_FW2X_MPI_MBOX_ADDR); +} + +static u32 aq_fw2x_rpc_get(struct aq_hw_s *self) +{ + return aq_hw_read_reg(self, HW_ATL_FW2X_MPI_RPC_ADDR); +} + +static u32 aq_fw2x_state2_get(struct aq_hw_s *self) +{ + return aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR); +} + const struct aq_fw_ops aq_fw_2x_ops = { .init = aq_fw2x_init, .deinit = aq_fw2x_deinit, diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index 4406325fdd9f..ff3d68532f5f 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -148,7 +148,7 @@ static void arc_emac_tx_clean(struct net_device *ndev) dma_unmap_len(tx_buff, len), DMA_TO_DEVICE); /* return the sk_buff to system */ - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); txbd->data = 0; txbd->info = 0; diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 3a3b35b5df67..0f1eb1981469 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -1833,10 +1833,10 @@ rrs_checked: atl1c_clean_rrd(rrd_ring, rrs, rfd_num); if (rrs->word3 & (RRS_RX_ERR_SUM | RRS_802_3_LEN_ERR)) { atl1c_clean_rfd(rfd_ring, rrs, rfd_num); - if (netif_msg_rx_err(adapter)) - dev_warn(&pdev->dev, - "wrong packet! rrs word3 is %x\n", - rrs->word3); + if (netif_msg_rx_err(adapter)) + dev_warn(&pdev->dev, + "wrong packet! rrs word3 is %x\n", + rrs->word3); continue; } diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 3164aad29bcf..9dfe6a9431e6 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -1259,7 +1259,7 @@ static bool atl1e_clean_tx_irq(struct atl1e_adapter *adapter) } if (tx_buffer->skb) { - dev_kfree_skb_irq(tx_buffer->skb); + dev_consume_skb_irq(tx_buffer->skb); tx_buffer->skb = NULL; } diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index 63edc5706c09..9e07b469066a 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -2088,7 +2088,7 @@ static int atl1_intr_tx(struct atl1_adapter *adapter) } if (buffer_info->skb) { - dev_kfree_skb_irq(buffer_info->skb); + dev_consume_skb_irq(buffer_info->skb); buffer_info->skb = NULL; } diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c index bb41becb6609..d99317b3d891 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/drivers/net/ethernet/atheros/atlx/atl2.c @@ -909,7 +909,7 @@ static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb, (adapter->txd_write_ptr >> 2)); mmiowb(); - dev_kfree_skb_any(skb); + dev_consume_skb_any(skb); return NETDEV_TX_OK; } @@ -1335,13 +1335,11 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; struct atl2_adapter *adapter; - static int cards_found; + static int cards_found = 0; unsigned long mmio_start; int mmio_len; int err; - cards_found = 0; - err = pci_enable_device(pdev); if (err) return err; @@ -2946,7 +2944,7 @@ static int atl2_validate_option(int *value, struct atl2_option *opt) if (*value == ent->i) { if (ent->str[0] != '\0') printk(KERN_INFO "%s\n", ent->str); - return 0; + return 0; } } break; diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index c1d3ee9baf7e..716bfbba59cf 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -194,7 +194,6 @@ config SYSTEMPORT config BNXT tristate "Broadcom NetXtreme-C/E support" depends on PCI - depends on MAY_USE_DEVLINK select FW_LOADER select LIBCRC32C ---help--- diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index f44808959ff3..97ab0dd25552 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -638,7 +638,7 @@ static void b44_tx(struct b44 *bp) bytes_compl += skb->len; pkts_compl++; - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); } netdev_completed_queue(bp->dev, pkts_compl, bytes_compl); @@ -1012,7 +1012,7 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev) } skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len); - dev_kfree_skb_any(skb); + dev_consume_skb_any(skb); skb = bounce_skb; } diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index f9521d0274b7..bc3ac369cbe3 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -134,6 +134,10 @@ static void bcm_sysport_set_rx_csum(struct net_device *dev, priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM); reg = rxchk_readl(priv, RXCHK_CONTROL); + /* Clear L2 header checks, which would prevent BPDUs + * from being received. + */ + reg &= ~RXCHK_L2_HDR_DIS; if (priv->rx_chk_en) reg |= RXCHK_EN; else @@ -520,7 +524,6 @@ static void bcm_sysport_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct bcm_sysport_priv *priv = netdev_priv(dev); - u32 reg; wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; wol->wolopts = priv->wolopts; @@ -528,11 +531,7 @@ static void bcm_sysport_get_wol(struct net_device *dev, if (!(priv->wolopts & WAKE_MAGICSECURE)) return; - /* Return the programmed SecureOn password */ - reg = umac_readl(priv, UMAC_PSW_MS); - put_unaligned_be16(reg, &wol->sopass[0]); - reg = umac_readl(priv, UMAC_PSW_LS); - put_unaligned_be32(reg, &wol->sopass[2]); + memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass)); } static int bcm_sysport_set_wol(struct net_device *dev, @@ -548,13 +547,8 @@ static int bcm_sysport_set_wol(struct net_device *dev, if (wol->wolopts & ~supported) return -EINVAL; - /* Program the SecureOn password */ - if (wol->wolopts & WAKE_MAGICSECURE) { - umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), - UMAC_PSW_MS); - umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), - UMAC_PSW_LS); - } + if (wol->wolopts & WAKE_MAGICSECURE) + memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass)); /* Flag the device and relevant IRQ as wakeup capable */ if (wol->wolopts) { @@ -2649,13 +2643,18 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv) unsigned int index, i = 0; u32 reg; - /* Password has already been programmed */ reg = umac_readl(priv, UMAC_MPD_CTRL); if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) reg |= MPD_EN; reg &= ~PSW_EN; - if (priv->wolopts & WAKE_MAGICSECURE) + if (priv->wolopts & WAKE_MAGICSECURE) { + /* Program the SecureOn password */ + umac_writel(priv, get_unaligned_be16(&priv->sopass[0]), + UMAC_PSW_MS); + umac_writel(priv, get_unaligned_be32(&priv->sopass[2]), + UMAC_PSW_LS); reg |= PSW_EN; + } umac_writel(priv, reg, UMAC_MPD_CTRL); if (priv->wolopts & WAKE_FILTER) { diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index 0887e6356649..0b192fea9c5d 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -12,6 +12,7 @@ #define __BCM_SYSPORT_H #include +#include #include #include @@ -778,6 +779,7 @@ struct bcm_sysport_priv { unsigned int crc_fwd:1; u16 rev; u32 wolopts; + u8 sopass[SOPASS_MAX]; unsigned int wol_irq_disabled:1; /* MIB related fields */ diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 2d3a44c40221..4632dd5dbad1 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -1446,7 +1446,7 @@ int bgmac_phy_connect_direct(struct bgmac *bgmac) struct phy_device *phy_dev; int err; - phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, -1, NULL); + phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); if (!phy_dev || IS_ERR(phy_dev)) { dev_err(bgmac->dev, "Failed to register fixed PHY device\n"); return -ENODEV; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 03d131f777bc..6026b53137aa 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -32,7 +32,7 @@ * (you will need to reboot afterwards) */ /* #define BNX2X_STOP_ON_ERROR */ -#define DRV_MODULE_VERSION "1.712.30-0" +#define DRV_MODULE_VERSION "1.713.36-0" #define DRV_MODULE_RELDATE "2014/02/10" #define BNX2X_BC_VER 0x040200 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h index 46ee2c01f4c5..066765fbef06 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h @@ -449,7 +449,7 @@ static inline void bnx2x_init_fw_wrr(const struct cmng_init_input *input_data, ccd[cos] = (u32)input_data->cos_min_rate[cos] * 100 * (T_FAIR_COEF / (8 * 100 * cosWeightSum)); - if (ccd[cos] < pdata->fair_vars.fair_threshold + if (ccd[cos] < pdata->fair_vars.fair_threshold + MIN_ABOVE_THRESH) { ccd[cos] = pdata->fair_vars.fair_threshold + diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 98d4c5a3ff21..d581d0ae6584 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -837,49 +837,45 @@ static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp, switch (cos_entry) { case 0: - nig_reg_adress_crd_weight = - (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 : - NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0; - pbf_reg_adress_crd_weight = (port) ? - PBF_REG_COS0_WEIGHT_P1 : PBF_REG_COS0_WEIGHT_P0; - break; + nig_reg_adress_crd_weight = + (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0; + pbf_reg_adress_crd_weight = (port) ? + PBF_REG_COS0_WEIGHT_P1 : PBF_REG_COS0_WEIGHT_P0; + break; case 1: - nig_reg_adress_crd_weight = (port) ? - NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 : - NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1; - pbf_reg_adress_crd_weight = (port) ? - PBF_REG_COS1_WEIGHT_P1 : PBF_REG_COS1_WEIGHT_P0; - break; + nig_reg_adress_crd_weight = (port) ? + NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1; + pbf_reg_adress_crd_weight = (port) ? + PBF_REG_COS1_WEIGHT_P1 : PBF_REG_COS1_WEIGHT_P0; + break; case 2: - nig_reg_adress_crd_weight = (port) ? - NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 : - NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2; + nig_reg_adress_crd_weight = (port) ? + NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 : + NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2; - pbf_reg_adress_crd_weight = (port) ? - PBF_REG_COS2_WEIGHT_P1 : PBF_REG_COS2_WEIGHT_P0; - break; + pbf_reg_adress_crd_weight = (port) ? + PBF_REG_COS2_WEIGHT_P1 : PBF_REG_COS2_WEIGHT_P0; + break; case 3: - if (port) + if (port) return -EINVAL; - nig_reg_adress_crd_weight = - NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3; - pbf_reg_adress_crd_weight = - PBF_REG_COS3_WEIGHT_P0; - break; + nig_reg_adress_crd_weight = NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3; + pbf_reg_adress_crd_weight = PBF_REG_COS3_WEIGHT_P0; + break; case 4: - if (port) - return -EINVAL; - nig_reg_adress_crd_weight = - NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4; - pbf_reg_adress_crd_weight = PBF_REG_COS4_WEIGHT_P0; - break; + if (port) + return -EINVAL; + nig_reg_adress_crd_weight = NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4; + pbf_reg_adress_crd_weight = PBF_REG_COS4_WEIGHT_P0; + break; case 5: - if (port) - return -EINVAL; - nig_reg_adress_crd_weight = - NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5; - pbf_reg_adress_crd_weight = PBF_REG_COS5_WEIGHT_P0; - break; + if (port) + return -EINVAL; + nig_reg_adress_crd_weight = NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5; + pbf_reg_adress_crd_weight = PBF_REG_COS5_WEIGHT_P0; + break; } REG_WR(bp, nig_reg_adress_crd_weight, cos_bw_nig); @@ -966,7 +962,7 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params, if (pri >= max_num_of_cos) { DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid " "parameter Illegal strict priority\n"); - return -EINVAL; + return -EINVAL; } if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) { @@ -1845,28 +1841,28 @@ static int bnx2x_emac_enable(struct link_params *params, bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE, EMAC_TX_MODE_RESET); - /* pause enable/disable */ - bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE, - EMAC_RX_MODE_FLOW_EN); + /* pause enable/disable */ + bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE, + EMAC_RX_MODE_FLOW_EN); - bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE, - (EMAC_TX_MODE_EXT_PAUSE_EN | - EMAC_TX_MODE_FLOW_EN)); - if (!(params->feature_config_flags & - FEATURE_CONFIG_PFC_ENABLED)) { - if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX) - bnx2x_bits_en(bp, emac_base + - EMAC_REG_EMAC_RX_MODE, - EMAC_RX_MODE_FLOW_EN); - - if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) - bnx2x_bits_en(bp, emac_base + - EMAC_REG_EMAC_TX_MODE, - (EMAC_TX_MODE_EXT_PAUSE_EN | - EMAC_TX_MODE_FLOW_EN)); - } else - bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE, - EMAC_TX_MODE_FLOW_EN); + bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE, + (EMAC_TX_MODE_EXT_PAUSE_EN | + EMAC_TX_MODE_FLOW_EN)); + if (!(params->feature_config_flags & + FEATURE_CONFIG_PFC_ENABLED)) { + if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX) + bnx2x_bits_en(bp, emac_base + + EMAC_REG_EMAC_RX_MODE, + EMAC_RX_MODE_FLOW_EN); + + if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) + bnx2x_bits_en(bp, emac_base + + EMAC_REG_EMAC_TX_MODE, + (EMAC_TX_MODE_EXT_PAUSE_EN | + EMAC_TX_MODE_FLOW_EN)); + } else + bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE, + EMAC_TX_MODE_FLOW_EN); /* KEEP_VLAN_TAG, promiscuous */ val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE); @@ -6339,7 +6335,7 @@ int bnx2x_set_led(struct link_params *params, */ if (!vars->link_up) break; - /* else: fall through */ + /* fall through */ case LED_MODE_ON: if (((params->phy[EXT_PHY1].type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) || @@ -6478,9 +6474,9 @@ int bnx2x_test_link(struct link_params *params, struct link_vars *vars, MDIO_REG_BANK_GP_STATUS, MDIO_GP_STATUS_TOP_AN_STATUS1, &gp_status); - /* Link is up only if both local phy and external phy are up */ - if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS)) - return -ESRCH; + /* Link is up only if both local phy and external phy are up */ + if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS)) + return -ESRCH; } /* In XGXS loopback mode, do not check external PHY */ if (params->loopback_mode == LOOPBACK_XGXS) @@ -7293,8 +7289,8 @@ static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy) DP(NETIF_MSG_LINK, "XAUI workaround has completed\n"); return 0; - } - usleep_range(3000, 6000); + } + usleep_range(3000, 6000); } break; } @@ -12675,39 +12671,39 @@ static void bnx2x_init_bmac_loopback(struct link_params *params, struct link_vars *vars) { struct bnx2x *bp = params->bp; - vars->link_up = 1; - vars->line_speed = SPEED_10000; - vars->duplex = DUPLEX_FULL; - vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; - vars->mac_type = MAC_TYPE_BMAC; + vars->link_up = 1; + vars->line_speed = SPEED_10000; + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; + vars->mac_type = MAC_TYPE_BMAC; - vars->phy_flags = PHY_XGXS_FLAG; + vars->phy_flags = PHY_XGXS_FLAG; - bnx2x_xgxs_deassert(params); + bnx2x_xgxs_deassert(params); - /* Set bmac loopback */ - bnx2x_bmac_enable(params, vars, 1, 1); + /* Set bmac loopback */ + bnx2x_bmac_enable(params, vars, 1, 1); - REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0); } static void bnx2x_init_emac_loopback(struct link_params *params, struct link_vars *vars) { struct bnx2x *bp = params->bp; - vars->link_up = 1; - vars->line_speed = SPEED_1000; - vars->duplex = DUPLEX_FULL; - vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; - vars->mac_type = MAC_TYPE_EMAC; + vars->link_up = 1; + vars->line_speed = SPEED_1000; + vars->duplex = DUPLEX_FULL; + vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; + vars->mac_type = MAC_TYPE_EMAC; - vars->phy_flags = PHY_XGXS_FLAG; + vars->phy_flags = PHY_XGXS_FLAG; - bnx2x_xgxs_deassert(params); - /* Set bmac loopback */ - bnx2x_emac_enable(params, vars, 1); - bnx2x_emac_program(params, vars); - REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); + bnx2x_xgxs_deassert(params); + /* Set bmac loopback */ + bnx2x_emac_enable(params, vars, 1); + bnx2x_emac_program(params, vars); + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0); } static void bnx2x_init_xmac_loopback(struct link_params *params, @@ -13073,12 +13069,12 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars, REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0); } - if (!CHIP_IS_E3(bp)) { - bnx2x_set_bmac_rx(bp, params->chip_id, port, 0); - } else { - bnx2x_set_xmac_rxtx(params, 0); - bnx2x_set_umac_rxtx(params, 0); - } + if (!CHIP_IS_E3(bp)) { + bnx2x_set_bmac_rx(bp, params->chip_id, port, 0); + } else { + bnx2x_set_xmac_rxtx(params, 0); + bnx2x_set_umac_rxtx(params, 0); + } /* Disable emac */ if (!CHIP_IS_E3(bp)) REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 3b5b47e98c73..626b491f7674 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -11298,7 +11298,7 @@ static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg) dev_info.port_hw_config[port].external_phy_config), SHMEM_RD(bp, dev_info.port_hw_config[port].external_phy_config2)); - return; + return; } if (CHIP_IS_E3(bp)) @@ -11998,7 +11998,7 @@ static void validate_set_si_mode(struct bnx2x *bp) static int bnx2x_get_hwinfo(struct bnx2x *bp) { int /*abs*/func = BP_ABS_FUNC(bp); - int vn, mfw_vn; + int vn; u32 val = 0, val2 = 0; int rc = 0; @@ -12083,12 +12083,10 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp) /* * Initialize MF configuration */ - bp->mf_ov = 0; bp->mf_mode = 0; bp->mf_sub_mode = 0; vn = BP_VN(bp); - mfw_vn = BP_FW_MB_IDX(bp); if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n", diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index a9eaaf3e73a4..7b22a6d8514c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -2977,8 +2977,8 @@ static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp, cmd_pos->data.macs_num--; - DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n", - cmd_pos->data.macs_num, cnt); + DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n", + cmd_pos->data.macs_num, cnt); /* Break if we reached the maximum * number of rules. @@ -3597,8 +3597,8 @@ static int bnx2x_mcast_validate_e1(struct bnx2x *bp, /* RESTORE command will restore the entire multicast configuration */ case BNX2X_MCAST_CMD_RESTORE: p->mcast_list_len = reg_sz; - DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n", - cmd, p->mcast_list_len); + DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n", + cmd, p->mcast_list_len); break; case BNX2X_MCAST_CMD_ADD: @@ -3735,8 +3735,8 @@ static inline int bnx2x_mcast_handle_restore_cmd_e1( i++; - DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n", - cfg_data.mac); + DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n", + cfg_data.mac); } *rdata_idx = i; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index c835f6c7ecd0..c97b642e6537 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -2230,7 +2230,7 @@ int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf) rc = bnx2x_vf_close(bp, vf); if (rc) goto op_err; - /* Fallthrough to release resources */ + /* Fall through - to release resources */ case VF_ACQUIRED: DP(BNX2X_MSG_IOV, "about to free resources\n"); bnx2x_vf_free_resc(bp, vf); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 8e0a317b31f7..a9bdc21873d3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -1654,13 +1654,9 @@ static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp, { int i, j; struct bnx2x_vf_mac_vlan_filters *fl = NULL; - size_t fsz; - fsz = tlv->n_mac_vlan_filters * - sizeof(struct bnx2x_vf_mac_vlan_filter) + - sizeof(struct bnx2x_vf_mac_vlan_filters); - - fl = kzalloc(fsz, GFP_KERNEL); + fl = kzalloc(struct_size(fl, filters, tlv->n_mac_vlan_filters), + GFP_KERNEL); if (!fl) return -ENOMEM; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 6a512871176b..0bb9d7b3a2b6 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1,7 +1,7 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation - * Copyright (c) 2016-2018 Broadcom Limited + * Copyright (c) 2016-2019 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -112,6 +113,7 @@ enum board_idx { BCM57454, BCM5745x_NPAR, BCM57508, + BCM57504, BCM58802, BCM58804, BCM58808, @@ -155,6 +157,7 @@ static const struct { [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" }, [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, + [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" }, [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, @@ -201,6 +204,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = { { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 }, { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 }, { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 }, + { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 }, { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 }, { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 }, #ifdef CONFIG_BNXT_SRIOV @@ -500,6 +504,12 @@ normal_tx: } length >>= 9; + if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) { + dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n", + skb->len); + i = 0; + goto tx_dma_error; + } flags |= bnxt_lhint_arr[length]; txbd->tx_bd_len_flags_type = cpu_to_le32(flags); @@ -3903,7 +3913,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, if (len) break; /* on first few passes, just barely sleep */ - if (i < DFLT_HWRM_CMD_TIMEOUT) + if (i < HWRM_SHORT_TIMEOUT_COUNTER) usleep_range(HWRM_SHORT_MIN_TIMEOUT, HWRM_SHORT_MAX_TIMEOUT); else @@ -3926,7 +3936,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, dma_rmb(); if (*valid) break; - udelay(1); + usleep_range(1, 5); } if (j >= HWRM_VALID_BIT_DELAY_USEC) { @@ -4973,12 +4983,18 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; u32 map_idx = ring->map_idx; + unsigned int vector; + vector = bp->irq_tbl[map_idx].vector; + disable_irq_nosync(vector); rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); - if (rc) + if (rc) { + enable_irq(vector); goto err_out; + } bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); + enable_irq(vector); bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; if (!i) { @@ -6674,6 +6690,10 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp) VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED) bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE; + if (dev_caps_cfg & + VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF; + hwrm_ver_get_exit: mutex_unlock(&bp->hwrm_cmd_lock); return rc; @@ -8608,24 +8628,88 @@ static int bnxt_close(struct net_device *dev) return 0; } +static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg, + u16 *val) +{ + struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_port_phy_mdio_read_input req = {0}; + int rc; + + if (bp->hwrm_spec_code < 0x10a00) + return -EOPNOTSUPP; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1); + req.port_id = cpu_to_le16(bp->pf.port_id); + req.phy_addr = phy_addr; + req.reg_addr = cpu_to_le16(reg & 0x1f); + if (bp->link_info.support_speeds & BNXT_LINK_SPEED_MSK_10GB) { + req.cl45_mdio = 1; + req.phy_addr = mdio_phy_id_prtad(phy_addr); + req.dev_addr = mdio_phy_id_devad(phy_addr); + req.reg_addr = cpu_to_le16(reg); + } + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) + *val = le16_to_cpu(resp->reg_data); + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg, + u16 val) +{ + struct hwrm_port_phy_mdio_write_input req = {0}; + + if (bp->hwrm_spec_code < 0x10a00) + return -EOPNOTSUPP; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1); + req.port_id = cpu_to_le16(bp->pf.port_id); + req.phy_addr = phy_addr; + req.reg_addr = cpu_to_le16(reg & 0x1f); + if (bp->link_info.support_speeds & BNXT_LINK_SPEED_MSK_10GB) { + req.cl45_mdio = 1; + req.phy_addr = mdio_phy_id_prtad(phy_addr); + req.dev_addr = mdio_phy_id_devad(phy_addr); + req.reg_addr = cpu_to_le16(reg); + } + req.reg_data = cpu_to_le16(val); + + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + /* rtnl_lock held */ static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { + struct mii_ioctl_data *mdio = if_mii(ifr); + struct bnxt *bp = netdev_priv(dev); + int rc; + switch (cmd) { case SIOCGMIIPHY: + mdio->phy_id = bp->link_info.phy_addr; + /* fallthru */ case SIOCGMIIREG: { + u16 mii_regval = 0; + if (!netif_running(dev)) return -EAGAIN; - return 0; + rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num, + &mii_regval); + mdio->val_out = mii_regval; + return rc; } case SIOCSMIIREG: if (!netif_running(dev)) return -EAGAIN; - return 0; + return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num, + mdio->val_in); default: /* do nothing */ @@ -9981,8 +10065,11 @@ static int bnxt_get_phys_port_name(struct net_device *dev, char *buf, return 0; } -int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr) +int bnxt_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) { + struct bnxt *bp = netdev_priv(dev); + if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) return -EOPNOTSUPP; @@ -9990,27 +10077,12 @@ int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr) if (!BNXT_PF(bp)) return -EOPNOTSUPP; - switch (attr->id) { - case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: - attr->u.ppid.id_len = sizeof(bp->switch_id); - memcpy(attr->u.ppid.id, bp->switch_id, attr->u.ppid.id_len); - break; - default: - return -EOPNOTSUPP; - } - return 0; -} + ppid->id_len = sizeof(bp->switch_id); + memcpy(ppid->id, bp->switch_id, ppid->id_len); -static int bnxt_swdev_port_attr_get(struct net_device *dev, - struct switchdev_attr *attr) -{ - return bnxt_port_attr_get(netdev_priv(dev), attr); + return 0; } -static const struct switchdev_ops bnxt_switchdev_ops = { - .switchdev_port_attr_get = bnxt_swdev_port_attr_get -}; - static const struct net_device_ops bnxt_netdev_ops = { .ndo_open = bnxt_open, .ndo_start_xmit = bnxt_start_xmit, @@ -10042,6 +10114,7 @@ static const struct net_device_ops bnxt_netdev_ops = { .ndo_bpf = bnxt_xdp, .ndo_bridge_getlink = bnxt_bridge_getlink, .ndo_bridge_setlink = bnxt_bridge_setlink, + .ndo_get_port_parent_id = bnxt_get_port_parent_id, .ndo_get_phys_port_name = bnxt_get_phys_port_name }; @@ -10400,7 +10473,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->netdev_ops = &bnxt_netdev_ops; dev->watchdog_timeo = BNXT_TX_TIMEOUT; dev->ethtool_ops = &bnxt_ethtool_ops; - SWITCHDEV_SET_OPS(dev, &bnxt_switchdev_ops); pci_set_drvdata(pdev, dev); rc = bnxt_alloc_hwrm_resources(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index a451796deefe..cf81ace7a6e6 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -22,7 +22,6 @@ #include #include #include -#include #include #include @@ -582,7 +581,7 @@ struct nqe_cn { (HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT + \ ((n) - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT)) -#define HWRM_VALID_BIT_DELAY_USEC 20 +#define HWRM_VALID_BIT_DELAY_USEC 150 #define BNXT_HWRM_CHNL_CHIMP 0 #define BNXT_HWRM_CHNL_KONG 1 @@ -946,6 +945,7 @@ struct bnxt_vf_info { * stored by PF. */ u16 vlan; + u16 func_qcfg_flags; u32 flags; #define BNXT_VF_QOS 0x1 #define BNXT_VF_SPOOFCHK 0x2 @@ -1479,6 +1479,7 @@ struct bnxt { #define BNXT_FW_CAP_IF_CHANGE 0x00000010 #define BNXT_FW_CAP_KONG_MB_CHNL 0x00000080 #define BNXT_FW_CAP_OVS_64BIT_HANDLE 0x00000400 + #define BNXT_FW_CAP_TRUSTED_VF 0x00000800 #define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM) u32 hwrm_spec_code; @@ -1609,6 +1610,7 @@ struct bnxt { /* devlink interface and vf-rep structs */ struct devlink *dl; + struct devlink_port dl_port; enum devlink_eswitch_mode eswitch_mode; struct bnxt_vf_rep **vf_reps; /* array of vf-rep ptrs */ u16 *cfa_code_map; /* cfa_code -> vf_idx map */ @@ -1794,7 +1796,8 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, int bnxt_setup_mq_tc(struct net_device *dev, u8 tc); int bnxt_get_max_rings(struct bnxt *, int *, int *, bool); int bnxt_restore_pf_fw_resources(struct bnxt *bp); -int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr); +int bnxt_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid); void bnxt_dim_work(struct work_struct *work); int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 7f56032e44ac..e1feb97bcd81 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -188,6 +188,9 @@ static const struct devlink_param bnxt_dl_params[] = { NULL), }; +static const struct devlink_param bnxt_dl_port_params[] = { +}; + int bnxt_dl_register(struct bnxt *bp) { struct devlink *dl; @@ -225,8 +228,29 @@ int bnxt_dl_register(struct bnxt *bp) goto err_dl_unreg; } + rc = devlink_port_register(dl, &bp->dl_port, bp->pf.port_id); + if (rc) { + netdev_err(bp->dev, "devlink_port_register failed"); + goto err_dl_param_unreg; + } + devlink_port_type_eth_set(&bp->dl_port, bp->dev); + + rc = devlink_port_params_register(&bp->dl_port, bnxt_dl_port_params, + ARRAY_SIZE(bnxt_dl_port_params)); + if (rc) { + netdev_err(bp->dev, "devlink_port_params_register failed"); + goto err_dl_port_unreg; + } + + devlink_params_publish(dl); + return 0; +err_dl_port_unreg: + devlink_port_unregister(&bp->dl_port); +err_dl_param_unreg: + devlink_params_unregister(dl, bnxt_dl_params, + ARRAY_SIZE(bnxt_dl_params)); err_dl_unreg: devlink_unregister(dl); err_dl_free: @@ -242,6 +266,9 @@ void bnxt_dl_unregister(struct bnxt *bp) if (!dl) return; + devlink_port_params_unregister(&bp->dl_port, bnxt_dl_port_params, + ARRAY_SIZE(bnxt_dl_port_params)); + devlink_port_unregister(&bp->dl_port); devlink_params_unregister(dl, bnxt_dl_params, ARRAY_SIZE(bnxt_dl_params)); devlink_unregister(dl); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 0a0995894ddb..b6c610339501 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -1,7 +1,7 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation - * Copyright (c) 2016-2018 Broadcom Limited + * Copyright (c) 2016-2019 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -98,6 +98,7 @@ struct hwrm_short_input { struct cmd_nums { __le16 req_type; #define HWRM_VER_GET 0x0UL + #define HWRM_ERROR_RECOVERY_QCFG 0xcUL #define HWRM_FUNC_DRV_IF_CHANGE 0xdUL #define HWRM_FUNC_BUF_UNRGTR 0xeUL #define HWRM_FUNC_VF_CFG 0xfUL @@ -221,6 +222,7 @@ struct cmd_nums { #define HWRM_CFA_METER_PROFILE_CFG 0xf7UL #define HWRM_CFA_METER_INSTANCE_ALLOC 0xf8UL #define HWRM_CFA_METER_INSTANCE_FREE 0xf9UL + #define HWRM_CFA_METER_INSTANCE_CFG 0xfaUL #define HWRM_CFA_VFR_ALLOC 0xfdUL #define HWRM_CFA_VFR_FREE 0xfeUL #define HWRM_CFA_VF_PAIR_ALLOC 0x100UL @@ -269,6 +271,7 @@ struct cmd_nums { #define HWRM_ENGINE_CKV_FLUSH 0x133UL #define HWRM_ENGINE_CKV_RNG_GET 0x134UL #define HWRM_ENGINE_CKV_KEY_GEN 0x135UL + #define HWRM_ENGINE_CKV_KEY_LABEL_CFG 0x136UL #define HWRM_ENGINE_QG_CONFIG_QUERY 0x13cUL #define HWRM_ENGINE_QG_QUERY 0x13dUL #define HWRM_ENGINE_QG_METER_PROFILE_CONFIG_QUERY 0x13eUL @@ -296,6 +299,7 @@ struct cmd_nums { #define HWRM_ENGINE_NQ_ALLOC 0x162UL #define HWRM_ENGINE_NQ_FREE 0x163UL #define HWRM_ENGINE_ON_DIE_RQE_CREDITS 0x164UL + #define HWRM_ENGINE_FUNC_QCFG 0x165UL #define HWRM_FUNC_RESOURCE_QCAPS 0x190UL #define HWRM_FUNC_VF_RESOURCE_CFG 0x191UL #define HWRM_FUNC_BACKING_STORE_QCAPS 0x192UL @@ -379,15 +383,15 @@ struct hwrm_err_output { }; #define HWRM_NA_SIGNATURE ((__le32)(-1)) #define HWRM_MAX_REQ_LEN 128 -#define HWRM_MAX_RESP_LEN 280 +#define HWRM_MAX_RESP_LEN 704 #define HW_HASH_INDEX_SIZE 0x80 #define HW_HASH_KEY_SIZE 40 #define HWRM_RESP_VALID_KEY 1 #define HWRM_VERSION_MAJOR 1 #define HWRM_VERSION_MINOR 10 #define HWRM_VERSION_UPDATE 0 -#define HWRM_VERSION_RSVD 35 -#define HWRM_VERSION_STR "1.10.0.35" +#define HWRM_VERSION_RSVD 47 +#define HWRM_VERSION_STR "1.10.0.47" /* hwrm_ver_get_input (size:192b/24B) */ struct hwrm_ver_get_input { @@ -580,6 +584,7 @@ struct hwrm_async_event_cmpl { #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL #define ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY 0x8UL + #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY 0x9UL #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL @@ -595,6 +600,9 @@ struct hwrm_async_event_cmpl { #define ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION 0x37UL #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL + #define ASYNC_EVENT_CMPL_EVENT_ID_TCP_FLAG_ACTION_CHANGE 0x3aUL + #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_FLOW_ACTIVE 0x3bUL + #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE 0x3cUL #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR @@ -724,6 +732,30 @@ struct hwrm_async_event_cmpl_reset_notify { #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_SFT 16 }; +/* hwrm_async_event_cmpl_error_recovery (size:128b/16B) */ +struct hwrm_async_event_cmpl_error_recovery { + __le16 type; + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_ERROR_RECOVERY 0x9UL + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_ERROR_RECOVERY + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_V 0x1UL + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASK 0xffUL + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC 0x1UL + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED 0x2UL +}; + /* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */ struct hwrm_async_event_cmpl_vf_cfg_change { __le16 type; @@ -1014,6 +1046,7 @@ struct hwrm_func_qcaps_output { #define FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE 0x100000UL #define FUNC_QCAPS_RESP_FLAGS_DYNAMIC_TX_RING_ALLOC 0x200000UL #define FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE 0x400000UL + #define FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE 0x800000UL u8 mac_address[6]; __le16 max_rsscos_ctx; __le16 max_cmpl_rings; @@ -1185,6 +1218,7 @@ struct hwrm_func_cfg_input { #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL #define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL #define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL + #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE 0x1000000UL __le32 enables; #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL @@ -1390,6 +1424,7 @@ struct hwrm_func_drv_rgtr_input { #define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL #define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE 0x8UL #define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT 0x10UL + #define FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT 0x20UL __le32 enables; #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL @@ -2024,6 +2059,89 @@ struct hwrm_func_backing_store_cfg_output { u8 valid; }; +/* hwrm_error_recovery_qcfg_input (size:192b/24B) */ +struct hwrm_error_recovery_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 unused_0[8]; +}; + +/* hwrm_error_recovery_qcfg_output (size:1664b/208B) */ +struct hwrm_error_recovery_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST 0x1UL + #define ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU 0x2UL + __le32 driver_polling_freq; + __le32 master_func_wait_period; + __le32 normal_func_wait_period; + __le32 master_func_wait_period_after_reset; + __le32 max_bailout_time_after_reset; + __le32 fw_health_status_reg; + #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_MASK 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_SFT 0 + #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_PCIE_CFG 0x0UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_GRC 0x1UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR0 0x2UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR1 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR1 + #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_MASK 0xfffffffcUL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SFT 2 + __le32 fw_heartbeat_reg; + #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_MASK 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_SFT 0 + #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_PCIE_CFG 0x0UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_GRC 0x1UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR0 0x2UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR1 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR1 + #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_MASK 0xfffffffcUL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SFT 2 + __le32 fw_reset_cnt_reg; + #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_MASK 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_SFT 0 + #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_PCIE_CFG 0x0UL + #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_GRC 0x1UL + #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR0 0x2UL + #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR1 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR1 + #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_MASK 0xfffffffcUL + #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SFT 2 + __le32 reset_inprogress_reg; + #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_MASK 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_SFT 0 + #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_PCIE_CFG 0x0UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_GRC 0x1UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR0 0x2UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR1 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR1 + #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_MASK 0xfffffffcUL + #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SFT 2 + __le32 reset_inprogress_reg_mask; + u8 unused_0[3]; + u8 reg_array_cnt; + __le32 reset_reg[16]; + #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_MASK 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_SFT 0 + #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_PCIE_CFG 0x0UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_GRC 0x1UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR0 0x2UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR1 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR1 + #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_MASK 0xfffffffcUL + #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SFT 2 + __le32 reset_reg_val[16]; + u8 delay_after_reset[16]; + u8 unused_1[7]; + u8 valid; +}; + /* hwrm_func_drv_if_change_input (size:192b/24B) */ struct hwrm_func_drv_if_change_input { __le16 req_type; @@ -2955,6 +3073,7 @@ struct hwrm_port_phy_qcaps_output { #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_200GB 0x4000UL __le16 supported_speeds_auto_mode; #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL @@ -2970,6 +3089,7 @@ struct hwrm_port_phy_qcaps_output { #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_200GB 0x4000UL __le16 supported_speeds_eee_mode; #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL @@ -4919,6 +5039,35 @@ struct hwrm_ring_free_output { u8 valid; }; +/* hwrm_ring_reset_input (size:192b/24B) */ +struct hwrm_ring_reset_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 ring_type; + #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL + #define RING_RESET_REQ_RING_TYPE_TX 0x1UL + #define RING_RESET_REQ_RING_TYPE_RX 0x2UL + #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL + #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_ROCE_CMPL + u8 unused_0; + __le16 ring_id; + u8 unused_1[4]; +}; + +/* hwrm_ring_reset_output (size:128b/16B) */ +struct hwrm_ring_reset_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[4]; + u8 consumer_idx[3]; + u8 valid; +}; + /* hwrm_ring_aggint_qcaps_input (size:128b/16B) */ struct hwrm_ring_aggint_qcaps_input { __le16 req_type; @@ -5446,19 +5595,21 @@ struct hwrm_cfa_encap_record_alloc_input { __le64 resp_addr; __le32 flags; #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_EXTERNAL 0x2UL u8 encap_type; - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_V4 0x9UL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE_V1 0xaUL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2_ETYPE 0xbUL - #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2_ETYPE + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_V4 0x9UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE_V1 0xaUL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2_ETYPE 0xbUL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE_V6 u8 unused_0[3]; __le32 encap_data[20]; }; @@ -5506,6 +5657,7 @@ struct hwrm_cfa_ntuple_filter_alloc_input { #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_FID 0x8UL __le32 enables; #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL @@ -5627,7 +5779,8 @@ struct hwrm_cfa_ntuple_filter_cfg_input { #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL - u8 unused_0[4]; + __le32 flags; + #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_FID 0x1UL __le64 ntuple_filter_id; __le32 new_dst_id; __le32 new_mirror_vnic_id; @@ -5892,13 +6045,15 @@ struct hwrm_cfa_flow_info_input { __le64 ext_flow_handle; }; -/* hwrm_cfa_flow_info_output (size:448b/56B) */ +/* hwrm_cfa_flow_info_output (size:5632b/704B) */ struct hwrm_cfa_flow_info_output { __le16 error_code; __le16 req_type; __le16 seq_id; __le16 resp_len; u8 flags; + #define CFA_FLOW_INFO_RESP_FLAGS_PATH_TX 0x1UL + #define CFA_FLOW_INFO_RESP_FLAGS_PATH_RX 0x2UL u8 profile; __le16 src_fid; __le16 dst_fid; @@ -5910,7 +6065,10 @@ struct hwrm_cfa_flow_info_output { __le16 flow_handle; __le32 tunnel_handle; __le16 flow_timer; - u8 unused_0[5]; + u8 unused_0[6]; + __le32 flow_key_data[130]; + __le32 flow_action_info[30]; + u8 unused_1[7]; u8 valid; }; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index d80f5c981d90..2b90a2bb1a1d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -121,6 +121,54 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) return rc; } +static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf) +{ + struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_qcfg_input req = {0}; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); + req.fid = cpu_to_le16(vf->fw_fid); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) { + mutex_unlock(&bp->hwrm_cmd_lock); + return -EIO; + } + vf->func_qcfg_flags = le16_to_cpu(resp->flags); + mutex_unlock(&bp->hwrm_cmd_lock); + return 0; +} + +static bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) +{ + if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF)) + return !!(vf->flags & BNXT_VF_TRUST); + + bnxt_hwrm_func_qcfg_flags(bp, vf); + return !!(vf->func_qcfg_flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF); +} + +static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) +{ + struct hwrm_func_cfg_input req = {0}; + int rc; + + if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF)) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + req.fid = cpu_to_le16(vf->fw_fid); + if (vf->flags & BNXT_VF_TRUST) + req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE); + else + req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + return -EIO; + return 0; +} + int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted) { struct bnxt *bp = netdev_priv(dev); @@ -135,6 +183,7 @@ int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted) else vf->flags &= ~BNXT_VF_TRUST; + bnxt_hwrm_set_trusted_vf(bp, vf); return 0; } @@ -164,7 +213,7 @@ int bnxt_get_vf_config(struct net_device *dev, int vf_id, else ivi->qos = 0; ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK); - ivi->trusted = !!(vf->flags & BNXT_VF_TRUST); + ivi->trusted = bnxt_is_trusted_vf(bp, vf); if (!(vf->flags & BNXT_VF_LINK_FORCED)) ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; else if (vf->flags & BNXT_VF_LINK_UP) @@ -935,9 +984,10 @@ static int bnxt_vf_configure_mac(struct bnxt *bp, struct bnxt_vf_info *vf) * if the PF assigned MAC address is zero */ if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) { + bool trust = bnxt_is_trusted_vf(bp, vf); + if (is_valid_ether_addr(req->dflt_mac_addr) && - ((vf->flags & BNXT_VF_TRUST) || - !is_valid_ether_addr(vf->mac_addr) || + (trust || !is_valid_ether_addr(vf->mac_addr) || ether_addr_equal(req->dflt_mac_addr, vf->mac_addr))) { ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr); return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); @@ -962,7 +1012,7 @@ static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf) * Otherwise, it must match the VF MAC address if firmware spec >= * 1.2.2 */ - if (vf->flags & BNXT_VF_TRUST) { + if (bnxt_is_trusted_vf(bp, vf)) { mac_ok = true; } else if (is_valid_ether_addr(vf->mac_addr)) { if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr)) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index c683b5e96b1d..44d6c5743fb9 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -45,7 +45,7 @@ static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev) struct bnxt *bp; /* check if dev belongs to the same switch */ - if (!switchdev_port_same_parent_id(pf_bp->dev, dev)) { + if (!netdev_port_same_parent_id(pf_bp->dev, dev)) { netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch", dev->ifindex); return BNXT_FID_INVALID; @@ -61,9 +61,9 @@ static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev) static int bnxt_tc_parse_redir(struct bnxt *bp, struct bnxt_tc_actions *actions, - const struct tc_action *tc_act) + const struct flow_action_entry *act) { - struct net_device *dev = tcf_mirred_dev(tc_act); + struct net_device *dev = act->dev; if (!dev) { netdev_info(bp->dev, "no dev in mirred action"); @@ -77,16 +77,16 @@ static int bnxt_tc_parse_redir(struct bnxt *bp, static int bnxt_tc_parse_vlan(struct bnxt *bp, struct bnxt_tc_actions *actions, - const struct tc_action *tc_act) + const struct flow_action_entry *act) { - switch (tcf_vlan_action(tc_act)) { - case TCA_VLAN_ACT_POP: + switch (act->id) { + case FLOW_ACTION_VLAN_POP: actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN; break; - case TCA_VLAN_ACT_PUSH: + case FLOW_ACTION_VLAN_PUSH: actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN; - actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act)); - actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act); + actions->push_vlan_tci = htons(act->vlan.vid); + actions->push_vlan_tpid = act->vlan.proto; break; default: return -EOPNOTSUPP; @@ -96,10 +96,10 @@ static int bnxt_tc_parse_vlan(struct bnxt *bp, static int bnxt_tc_parse_tunnel_set(struct bnxt *bp, struct bnxt_tc_actions *actions, - const struct tc_action *tc_act) + const struct flow_action_entry *act) { - struct ip_tunnel_info *tun_info = tcf_tunnel_info(tc_act); - struct ip_tunnel_key *tun_key = &tun_info->key; + const struct ip_tunnel_info *tun_info = act->tunnel; + const struct ip_tunnel_key *tun_key = &tun_info->key; if (ip_tunnel_info_af(tun_info) != AF_INET) { netdev_info(bp->dev, "only IPv4 tunnel-encap is supported"); @@ -113,51 +113,43 @@ static int bnxt_tc_parse_tunnel_set(struct bnxt *bp, static int bnxt_tc_parse_actions(struct bnxt *bp, struct bnxt_tc_actions *actions, - struct tcf_exts *tc_exts) + struct flow_action *flow_action) { - const struct tc_action *tc_act; + struct flow_action_entry *act; int i, rc; - if (!tcf_exts_has_actions(tc_exts)) { + if (!flow_action_has_entries(flow_action)) { netdev_info(bp->dev, "no actions"); return -EINVAL; } - tcf_exts_for_each_action(i, tc_act, tc_exts) { - /* Drop action */ - if (is_tcf_gact_shot(tc_act)) { + flow_action_for_each(i, act, flow_action) { + switch (act->id) { + case FLOW_ACTION_DROP: actions->flags |= BNXT_TC_ACTION_FLAG_DROP; return 0; /* don't bother with other actions */ - } - - /* Redirect action */ - if (is_tcf_mirred_egress_redirect(tc_act)) { - rc = bnxt_tc_parse_redir(bp, actions, tc_act); + case FLOW_ACTION_REDIRECT: + rc = bnxt_tc_parse_redir(bp, actions, act); if (rc) return rc; - continue; - } - - /* Push/pop VLAN */ - if (is_tcf_vlan(tc_act)) { - rc = bnxt_tc_parse_vlan(bp, actions, tc_act); + break; + case FLOW_ACTION_VLAN_POP: + case FLOW_ACTION_VLAN_PUSH: + case FLOW_ACTION_VLAN_MANGLE: + rc = bnxt_tc_parse_vlan(bp, actions, act); if (rc) return rc; - continue; - } - - /* Tunnel encap */ - if (is_tcf_tunnel_set(tc_act)) { - rc = bnxt_tc_parse_tunnel_set(bp, actions, tc_act); + break; + case FLOW_ACTION_TUNNEL_ENCAP: + rc = bnxt_tc_parse_tunnel_set(bp, actions, act); if (rc) return rc; - continue; - } - - /* Tunnel decap */ - if (is_tcf_tunnel_release(tc_act)) { + break; + case FLOW_ACTION_TUNNEL_DECAP: actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_DECAP; - continue; + break; + default: + break; } } @@ -177,18 +169,12 @@ static int bnxt_tc_parse_actions(struct bnxt *bp, return 0; } -#define GET_KEY(flow_cmd, key_type) \ - skb_flow_dissector_target((flow_cmd)->dissector, key_type,\ - (flow_cmd)->key) -#define GET_MASK(flow_cmd, key_type) \ - skb_flow_dissector_target((flow_cmd)->dissector, key_type,\ - (flow_cmd)->mask) - static int bnxt_tc_parse_flow(struct bnxt *bp, struct tc_cls_flower_offload *tc_flow_cmd, struct bnxt_tc_flow *flow) { - struct flow_dissector *dissector = tc_flow_cmd->dissector; + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(tc_flow_cmd); + struct flow_dissector *dissector = rule->match.dissector; /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */ if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 || @@ -198,143 +184,123 @@ static int bnxt_tc_parse_flow(struct bnxt *bp, return -EOPNOTSUPP; } - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) { - struct flow_dissector_key_basic *key = - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC); - struct flow_dissector_key_basic *mask = - GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; - flow->l2_key.ether_type = key->n_proto; - flow->l2_mask.ether_type = mask->n_proto; + flow_rule_match_basic(rule, &match); + flow->l2_key.ether_type = match.key->n_proto; + flow->l2_mask.ether_type = match.mask->n_proto; - if (key->n_proto == htons(ETH_P_IP) || - key->n_proto == htons(ETH_P_IPV6)) { - flow->l4_key.ip_proto = key->ip_proto; - flow->l4_mask.ip_proto = mask->ip_proto; + if (match.key->n_proto == htons(ETH_P_IP) || + match.key->n_proto == htons(ETH_P_IPV6)) { + flow->l4_key.ip_proto = match.key->ip_proto; + flow->l4_mask.ip_proto = match.mask->ip_proto; } } - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { - struct flow_dissector_key_eth_addrs *key = - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS); - struct flow_dissector_key_eth_addrs *mask = - GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + flow_rule_match_eth_addrs(rule, &match); flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS; - ether_addr_copy(flow->l2_key.dmac, key->dst); - ether_addr_copy(flow->l2_mask.dmac, mask->dst); - ether_addr_copy(flow->l2_key.smac, key->src); - ether_addr_copy(flow->l2_mask.smac, mask->src); + ether_addr_copy(flow->l2_key.dmac, match.key->dst); + ether_addr_copy(flow->l2_mask.dmac, match.mask->dst); + ether_addr_copy(flow->l2_key.smac, match.key->src); + ether_addr_copy(flow->l2_mask.smac, match.mask->src); } - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN)) { - struct flow_dissector_key_vlan *key = - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN); - struct flow_dissector_key_vlan *mask = - GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + flow_rule_match_vlan(rule, &match); flow->l2_key.inner_vlan_tci = - cpu_to_be16(VLAN_TCI(key->vlan_id, key->vlan_priority)); + cpu_to_be16(VLAN_TCI(match.key->vlan_id, + match.key->vlan_priority)); flow->l2_mask.inner_vlan_tci = - cpu_to_be16((VLAN_TCI(mask->vlan_id, mask->vlan_priority))); + cpu_to_be16((VLAN_TCI(match.mask->vlan_id, + match.mask->vlan_priority))); flow->l2_key.inner_vlan_tpid = htons(ETH_P_8021Q); flow->l2_mask.inner_vlan_tpid = htons(0xffff); flow->l2_key.num_vlans = 1; } - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { - struct flow_dissector_key_ipv4_addrs *key = - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS); - struct flow_dissector_key_ipv4_addrs *mask = - GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { + struct flow_match_ipv4_addrs match; + flow_rule_match_ipv4_addrs(rule, &match); flow->flags |= BNXT_TC_FLOW_FLAGS_IPV4_ADDRS; - flow->l3_key.ipv4.daddr.s_addr = key->dst; - flow->l3_mask.ipv4.daddr.s_addr = mask->dst; - flow->l3_key.ipv4.saddr.s_addr = key->src; - flow->l3_mask.ipv4.saddr.s_addr = mask->src; - } else if (dissector_uses_key(dissector, - FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { - struct flow_dissector_key_ipv6_addrs *key = - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS); - struct flow_dissector_key_ipv6_addrs *mask = - GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS); - + flow->l3_key.ipv4.daddr.s_addr = match.key->dst; + flow->l3_mask.ipv4.daddr.s_addr = match.mask->dst; + flow->l3_key.ipv4.saddr.s_addr = match.key->src; + flow->l3_mask.ipv4.saddr.s_addr = match.mask->src; + } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { + struct flow_match_ipv6_addrs match; + + flow_rule_match_ipv6_addrs(rule, &match); flow->flags |= BNXT_TC_FLOW_FLAGS_IPV6_ADDRS; - flow->l3_key.ipv6.daddr = key->dst; - flow->l3_mask.ipv6.daddr = mask->dst; - flow->l3_key.ipv6.saddr = key->src; - flow->l3_mask.ipv6.saddr = mask->src; + flow->l3_key.ipv6.daddr = match.key->dst; + flow->l3_mask.ipv6.daddr = match.mask->dst; + flow->l3_key.ipv6.saddr = match.key->src; + flow->l3_mask.ipv6.saddr = match.mask->src; } - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS)) { - struct flow_dissector_key_ports *key = - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS); - struct flow_dissector_key_ports *mask = - GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports match; + flow_rule_match_ports(rule, &match); flow->flags |= BNXT_TC_FLOW_FLAGS_PORTS; - flow->l4_key.ports.dport = key->dst; - flow->l4_mask.ports.dport = mask->dst; - flow->l4_key.ports.sport = key->src; - flow->l4_mask.ports.sport = mask->src; + flow->l4_key.ports.dport = match.key->dst; + flow->l4_mask.ports.dport = match.mask->dst; + flow->l4_key.ports.sport = match.key->src; + flow->l4_mask.ports.sport = match.mask->src; } - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ICMP)) { - struct flow_dissector_key_icmp *key = - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP); - struct flow_dissector_key_icmp *mask = - GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) { + struct flow_match_icmp match; + flow_rule_match_icmp(rule, &match); flow->flags |= BNXT_TC_FLOW_FLAGS_ICMP; - flow->l4_key.icmp.type = key->type; - flow->l4_key.icmp.code = key->code; - flow->l4_mask.icmp.type = mask->type; - flow->l4_mask.icmp.code = mask->code; + flow->l4_key.icmp.type = match.key->type; + flow->l4_key.icmp.code = match.key->code; + flow->l4_mask.icmp.type = match.mask->type; + flow->l4_mask.icmp.code = match.mask->code; } - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { - struct flow_dissector_key_ipv4_addrs *key = - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS); - struct flow_dissector_key_ipv4_addrs *mask = - GET_MASK(tc_flow_cmd, - FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { + struct flow_match_ipv4_addrs match; + flow_rule_match_enc_ipv4_addrs(rule, &match); flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS; - flow->tun_key.u.ipv4.dst = key->dst; - flow->tun_mask.u.ipv4.dst = mask->dst; - flow->tun_key.u.ipv4.src = key->src; - flow->tun_mask.u.ipv4.src = mask->src; - } else if (dissector_uses_key(dissector, + flow->tun_key.u.ipv4.dst = match.key->dst; + flow->tun_mask.u.ipv4.dst = match.mask->dst; + flow->tun_key.u.ipv4.src = match.key->src; + flow->tun_mask.u.ipv4.src = match.mask->src; + } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) { return -EOPNOTSUPP; } - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { - struct flow_dissector_key_keyid *key = - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID); - struct flow_dissector_key_keyid *mask = - GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { + struct flow_match_enc_keyid match; + flow_rule_match_enc_keyid(rule, &match); flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ID; - flow->tun_key.tun_id = key32_to_tunnel_id(key->keyid); - flow->tun_mask.tun_id = key32_to_tunnel_id(mask->keyid); + flow->tun_key.tun_id = key32_to_tunnel_id(match.key->keyid); + flow->tun_mask.tun_id = key32_to_tunnel_id(match.mask->keyid); } - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) { - struct flow_dissector_key_ports *key = - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS); - struct flow_dissector_key_ports *mask = - GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) { + struct flow_match_ports match; + flow_rule_match_enc_ports(rule, &match); flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_PORTS; - flow->tun_key.tp_dst = key->dst; - flow->tun_mask.tp_dst = mask->dst; - flow->tun_key.tp_src = key->src; - flow->tun_mask.tp_src = mask->src; + flow->tun_key.tp_dst = match.key->dst; + flow->tun_mask.tp_dst = match.mask->dst; + flow->tun_key.tp_src = match.key->src; + flow->tun_mask.tp_src = match.mask->src; } - return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts); + return bnxt_tc_parse_actions(bp, &flow->actions, &rule->action); } static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, @@ -1324,7 +1290,7 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid, bnxt_tc_set_flow_dir(bp, flow, src_fid); if (!bnxt_tc_can_offload(bp, flow)) { - rc = -ENOSPC; + rc = -EOPNOTSUPP; goto free_node; } @@ -1422,8 +1388,8 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp, lastused = flow->lastused; spin_unlock(&flow->stats_lock); - tcf_exts_stats_update(tc_flow_cmd->exts, stats.bytes, stats.packets, - lastused); + flow_stats_update(&tc_flow_cmd->stats, stats.bytes, stats.packets, + lastused); return 0; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index ea45a9b8179e..cf475873ce81 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -43,9 +43,6 @@ static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id, if (ulp_id == BNXT_ROCE_ULP) { unsigned int max_stat_ctxs; - if (bp->flags & BNXT_FLAG_CHIP_P5) - return -EOPNOTSUPP; - max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp); if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS || bp->cp_nr_rings == max_stat_ctxs) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index 9a25c05aa571..2bdd2da9aac7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -237,21 +237,17 @@ static void bnxt_vf_rep_get_drvinfo(struct net_device *dev, strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); } -static int bnxt_vf_rep_port_attr_get(struct net_device *dev, - struct switchdev_attr *attr) +static int bnxt_vf_rep_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) { struct bnxt_vf_rep *vf_rep = netdev_priv(dev); /* as only PORT_PARENT_ID is supported currently use common code * between PF and VF-rep for now. */ - return bnxt_port_attr_get(vf_rep->bp, attr); + return bnxt_get_port_parent_id(vf_rep->bp->dev, ppid); } -static const struct switchdev_ops bnxt_vf_rep_switchdev_ops = { - .switchdev_port_attr_get = bnxt_vf_rep_port_attr_get -}; - static const struct ethtool_ops bnxt_vf_rep_ethtool_ops = { .get_drvinfo = bnxt_vf_rep_get_drvinfo }; @@ -262,6 +258,7 @@ static const struct net_device_ops bnxt_vf_rep_netdev_ops = { .ndo_start_xmit = bnxt_vf_rep_xmit, .ndo_get_stats64 = bnxt_vf_rep_get_stats64, .ndo_setup_tc = bnxt_vf_rep_setup_tc, + .ndo_get_port_parent_id = bnxt_vf_rep_get_port_parent_id, .ndo_get_phys_port_name = bnxt_vf_rep_get_phys_port_name }; @@ -392,7 +389,6 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep, dev->netdev_ops = &bnxt_vf_rep_netdev_ops; dev->ethtool_ops = &bnxt_vf_rep_ethtool_ops; - SWITCHDEV_SET_OPS(dev, &bnxt_vf_rep_switchdev_ops); /* Just inherit all the featues of the parent PF as the VF-R * uses the RX/TX rings of the parent PF */ diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index aceb9b7b55bd..51880d83131a 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -525,7 +525,7 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv) .asym_pause = 0, }; - phydev = fixed_phy_register(PHY_POLL, &fphy_status, -1, NULL); + phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); if (!phydev || IS_ERR(phydev)) { dev_err(kdev, "failed to register fixed PHY device\n"); return -ENODEV; diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index 5db9f4158e62..134ae2862efa 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -1288,7 +1288,7 @@ static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d, * for transmits, we just free buffers. */ - dev_kfree_skb_irq(sb); + dev_consume_skb_irq(sb); /* * .. and advance to the next buffer. diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index b1627dd5f2fd..328373e0578f 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -721,7 +721,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum) case TG3_APE_LOCK_GPIO: if (tg3_asic_rev(tp) == ASIC_REV_5761) return 0; - /* else: fall through */ + /* fall through */ case TG3_APE_LOCK_GRC: case TG3_APE_LOCK_MEM: if (!tp->pci_fn) @@ -782,7 +782,7 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum) case TG3_APE_LOCK_GPIO: if (tg3_asic_rev(tp) == ASIC_REV_5761) return; - /* else: fall through */ + /* fall through */ case TG3_APE_LOCK_GRC: case TG3_APE_LOCK_MEM: if (!tp->pci_fn) diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index a36e38676640..84741d288ffa 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c @@ -743,7 +743,7 @@ bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event) case IOCPF_E_TIMEOUT: bfa_nw_ioc_hw_sem_release(ioc); - bfa_ioc_pf_failed(ioc); + bfa_ioc_pf_failed(ioc); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); break; @@ -788,9 +788,8 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event) case IOCPF_E_INITFAIL: del_timer(&ioc->iocpf_timer); - /* - * !!! fall through !!! - */ + /* fall through */ + case IOCPF_E_TIMEOUT: bfa_nw_ioc_hw_sem_release(ioc); if (event == IOCPF_E_TIMEOUT) @@ -858,9 +857,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event) case IOCPF_E_FAIL: del_timer(&ioc->iocpf_timer); - /* - * !!! fall through !!! - */ + /* fall through*/ case IOCPF_E_TIMEOUT: bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 3d45f4c92cf6..acc66a7e7b95 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -643,6 +643,7 @@ #define MACB_CAPS_JUMBO 0x00000020 #define MACB_CAPS_GEM_HAS_PTP 0x00000040 #define MACB_CAPS_BD_RD_PREFETCH 0x00000080 +#define MACB_CAPS_NEEDS_RSTONUBR 0x00000100 #define MACB_CAPS_FIFO_MODE 0x10000000 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 #define MACB_CAPS_SG_DISABLED 0x40000000 @@ -714,6 +715,8 @@ __v; \ }) +#define MACB_READ_NSR(bp) macb_readl(bp, NSR) + /* struct macb_dma_desc - Hardware DMA descriptor * @addr: DMA address of data buffer * @ctrl: Control and status bits @@ -1082,7 +1085,7 @@ struct macb_config { unsigned int dma_burst_length; int (*clk_init)(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, - struct clk **rx_clk); + struct clk **rx_clk, struct clk **tsu_clk); int (*init)(struct platform_device *pdev); int jumbo_max_len; }; @@ -1162,6 +1165,7 @@ struct macb { struct clk *hclk; struct clk *tx_clk; struct clk *rx_clk; + struct clk *tsu_clk; struct net_device *dev; union { struct macb_stats macb; @@ -1214,6 +1218,8 @@ struct macb { int rx_bd_rd_prefetch; int tx_bd_rd_prefetch; + + u32 rx_intr_mask; }; #ifdef CONFIG_MACB_USE_HWSTAMP diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 66cc7927061a..ad099fd01b45 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -36,6 +36,8 @@ #include #include #include +#include +#include #include "macb.h" #define MACB_RX_BUFFER_SIZE 128 @@ -56,8 +58,7 @@ /* level of occupied TX descriptors under which we wake up TX process */ #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4) -#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ - | MACB_BIT(ISR_ROVR)) +#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR)) #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ | MACB_BIT(ISR_RLE) \ | MACB_BIT(TXERR)) @@ -80,6 +81,10 @@ */ #define MACB_HALT_TIMEOUT 1230 +#define MACB_PM_TIMEOUT 100 /* ms */ + +#define MACB_MDIO_TIMEOUT 1000000 /* in usecs */ + /* DMA buffer descriptor might be different size * depends on hardware configuration: * @@ -319,10 +324,26 @@ static void macb_get_hwaddr(struct macb *bp) eth_hw_addr_random(bp->dev); } +static int macb_mdio_wait_for_idle(struct macb *bp) +{ + u32 val; + + return readx_poll_timeout(MACB_READ_NSR, bp, val, val & MACB_BIT(IDLE), + 1, MACB_MDIO_TIMEOUT); +} + static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) { struct macb *bp = bus->priv; - int value; + int status; + + status = pm_runtime_get_sync(&bp->pdev->dev); + if (status < 0) + goto mdio_pm_exit; + + status = macb_mdio_wait_for_idle(bp); + if (status < 0) + goto mdio_read_exit; macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) | MACB_BF(RW, MACB_MAN_READ) @@ -330,19 +351,32 @@ static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) | MACB_BF(REGA, regnum) | MACB_BF(CODE, MACB_MAN_CODE))); - /* wait for end of transfer */ - while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) - cpu_relax(); + status = macb_mdio_wait_for_idle(bp); + if (status < 0) + goto mdio_read_exit; - value = MACB_BFEXT(DATA, macb_readl(bp, MAN)); + status = MACB_BFEXT(DATA, macb_readl(bp, MAN)); - return value; +mdio_read_exit: + pm_runtime_mark_last_busy(&bp->pdev->dev); + pm_runtime_put_autosuspend(&bp->pdev->dev); +mdio_pm_exit: + return status; } static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value) { struct macb *bp = bus->priv; + int status; + + status = pm_runtime_get_sync(&bp->pdev->dev); + if (status < 0) + goto mdio_pm_exit; + + status = macb_mdio_wait_for_idle(bp); + if (status < 0) + goto mdio_write_exit; macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) | MACB_BF(RW, MACB_MAN_WRITE) @@ -351,11 +385,15 @@ static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum, | MACB_BF(CODE, MACB_MAN_CODE) | MACB_BF(DATA, value))); - /* wait for end of transfer */ - while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) - cpu_relax(); + status = macb_mdio_wait_for_idle(bp); + if (status < 0) + goto mdio_write_exit; - return 0; +mdio_write_exit: + pm_runtime_mark_last_busy(&bp->pdev->dev); + pm_runtime_put_autosuspend(&bp->pdev->dev); +mdio_pm_exit: + return status; } /** @@ -1270,7 +1308,7 @@ static int macb_poll(struct napi_struct *napi, int budget) queue_writel(queue, ISR, MACB_BIT(RCOMP)); napi_reschedule(napi); } else { - queue_writel(queue, IER, MACB_RX_INT_FLAGS); + queue_writel(queue, IER, bp->rx_intr_mask); } } @@ -1288,7 +1326,7 @@ static void macb_hresp_error_task(unsigned long data) u32 ctrl; for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { - queue_writel(queue, IDR, MACB_RX_INT_FLAGS | + queue_writel(queue, IDR, bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); } @@ -1318,7 +1356,7 @@ static void macb_hresp_error_task(unsigned long data) /* Enable interrupts */ queue_writel(queue, IER, - MACB_RX_INT_FLAGS | + bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); } @@ -1372,14 +1410,14 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) (unsigned int)(queue - bp->queues), (unsigned long)status); - if (status & MACB_RX_INT_FLAGS) { + if (status & bp->rx_intr_mask) { /* There's no point taking any more interrupts * until we have processed the buffers. The * scheduling call may fail if the poll routine * is already scheduled, so disable interrupts * now. */ - queue_writel(queue, IDR, MACB_RX_INT_FLAGS); + queue_writel(queue, IDR, bp->rx_intr_mask); if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) queue_writel(queue, ISR, MACB_BIT(RCOMP)); @@ -1412,8 +1450,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) /* There is a hardware issue under heavy load where DMA can * stop, this causes endless "used buffer descriptor read" * interrupts but it can be cleared by re-enabling RX. See - * the at91 manual, section 41.3.1 or the Zynq manual - * section 16.7.4 for details. + * the at91rm9200 manual, section 41.3.1 or the Zynq manual + * section 16.7.4 for details. RXUBR is only enabled for + * these two versions. */ if (status & MACB_BIT(RXUBR)) { ctrl = macb_readl(bp, NCR); @@ -1734,7 +1773,7 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev) if (!nskb) return -ENOMEM; - dev_kfree_skb_any(*skb); + dev_consume_skb_any(*skb); *skb = nskb; } @@ -2259,7 +2298,7 @@ static void macb_init_hw(struct macb *bp) /* Enable interrupts */ queue_writel(queue, IER, - MACB_RX_INT_FLAGS | + bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); } @@ -2397,12 +2436,18 @@ static int macb_open(struct net_device *dev) netdev_dbg(bp->dev, "open\n"); + err = pm_runtime_get_sync(&bp->pdev->dev); + if (err < 0) + goto pm_exit; + /* carrier starts down */ netif_carrier_off(dev); /* if the phy is not yet register, retry later*/ - if (!dev->phydev) - return -EAGAIN; + if (!dev->phydev) { + err = -EAGAIN; + goto pm_exit; + } /* RX buffers initialization */ macb_init_rx_buffer_size(bp, bufsz); @@ -2411,7 +2456,7 @@ static int macb_open(struct net_device *dev) if (err) { netdev_err(dev, "Unable to allocate DMA memory (error %d)\n", err); - return err; + goto pm_exit; } bp->macbgem_ops.mog_init_rings(bp); @@ -2428,6 +2473,11 @@ static int macb_open(struct net_device *dev) if (bp->ptp_info) bp->ptp_info->ptp_init(dev); +pm_exit: + if (err) { + pm_runtime_put_sync(&bp->pdev->dev); + return err; + } return 0; } @@ -2456,6 +2506,8 @@ static int macb_close(struct net_device *dev) if (bp->ptp_info) bp->ptp_info->ptp_remove(dev); + pm_runtime_put(&bp->pdev->dev); + return 0; } @@ -3304,7 +3356,7 @@ static void macb_probe_queues(void __iomem *mem, static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, - struct clk **rx_clk) + struct clk **rx_clk, struct clk **tsu_clk) { struct macb_platform_data *pdata; int err; @@ -3338,6 +3390,10 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, if (IS_ERR(*rx_clk)) *rx_clk = NULL; + *tsu_clk = devm_clk_get(&pdev->dev, "tsu_clk"); + if (IS_ERR(*tsu_clk)) + *tsu_clk = NULL; + err = clk_prepare_enable(*pclk); if (err) { dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err); @@ -3362,8 +3418,17 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, goto err_disable_txclk; } + err = clk_prepare_enable(*tsu_clk); + if (err) { + dev_err(&pdev->dev, "failed to enable tsu_clk (%u)\n", err); + goto err_disable_rxclk; + } + return 0; +err_disable_rxclk: + clk_disable_unprepare(*rx_clk); + err_disable_txclk: clk_disable_unprepare(*tx_clk); @@ -3673,9 +3738,9 @@ static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb, /* Store packet information (to free when Tx completed) */ lp->skb = skb; lp->skb_length = skb->len; - lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, - DMA_TO_DEVICE); - if (dma_mapping_error(NULL, lp->skb_physaddr)) { + lp->skb_physaddr = dma_map_single(&lp->pdev->dev, skb->data, + skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(&lp->pdev->dev, lp->skb_physaddr)) { dev_kfree_skb_any(skb); dev->stats.tx_dropped++; netdev_err(dev, "%s: DMA mapping error\n", __func__); @@ -3763,9 +3828,9 @@ static irqreturn_t at91ether_interrupt(int irq, void *dev_id) dev->stats.tx_errors++; if (lp->skb) { - dev_kfree_skb_irq(lp->skb); + dev_consume_skb_irq(lp->skb); lp->skb = NULL; - dma_unmap_single(NULL, lp->skb_physaddr, + dma_unmap_single(&lp->pdev->dev, lp->skb_physaddr, lp->skb_length, DMA_TO_DEVICE); dev->stats.tx_packets++; dev->stats.tx_bytes += lp->skb_length; @@ -3814,13 +3879,14 @@ static const struct net_device_ops at91ether_netdev_ops = { static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk, struct clk **hclk, struct clk **tx_clk, - struct clk **rx_clk) + struct clk **rx_clk, struct clk **tsu_clk) { int err; *hclk = NULL; *tx_clk = NULL; *rx_clk = NULL; + *tsu_clk = NULL; *pclk = devm_clk_get(&pdev->dev, "ether_clk"); if (IS_ERR(*pclk)) @@ -3907,6 +3973,7 @@ static const struct macb_config sama5d4_config = { }; static const struct macb_config emac_config = { + .caps = MACB_CAPS_NEEDS_RSTONUBR, .clk_init = at91ether_clk_init, .init = at91ether_init, }; @@ -3928,7 +3995,8 @@ static const struct macb_config zynqmp_config = { }; static const struct macb_config zynq_config = { - .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF, + .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF | + MACB_CAPS_NEEDS_RSTONUBR, .dma_burst_length = 16, .clk_init = macb_clk_init, .init = macb_init, @@ -3941,6 +4009,7 @@ static const struct of_device_id macb_dt_ids[] = { { .compatible = "cdns,np4-macb", .data = &np4_config }, { .compatible = "cdns,pc302-gem", .data = &pc302gem_config }, { .compatible = "cdns,gem", .data = &pc302gem_config }, + { .compatible = "cdns,sam9x60-macb", .data = &at91sam9260_config }, { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config }, { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config }, { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config }, @@ -3968,11 +4037,12 @@ static int macb_probe(struct platform_device *pdev) { const struct macb_config *macb_config = &default_gem_config; int (*clk_init)(struct platform_device *, struct clk **, - struct clk **, struct clk **, struct clk **) - = macb_config->clk_init; + struct clk **, struct clk **, struct clk **, + struct clk **) = macb_config->clk_init; int (*init)(struct platform_device *) = macb_config->init; struct device_node *np = pdev->dev.of_node; struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL; + struct clk *tsu_clk = NULL; unsigned int queue_mask, num_queues; struct macb_platform_data *pdata; bool native_io; @@ -4000,10 +4070,15 @@ static int macb_probe(struct platform_device *pdev) } } - err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk); + err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk); if (err) return err; + pm_runtime_set_autosuspend_delay(&pdev->dev, MACB_PM_TIMEOUT); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); native_io = hw_is_native_io(mem); macb_probe_queues(mem, native_io, &queue_mask, &num_queues); @@ -4037,6 +4112,7 @@ static int macb_probe(struct platform_device *pdev) bp->hclk = hclk; bp->tx_clk = tx_clk; bp->rx_clk = rx_clk; + bp->tsu_clk = tsu_clk; if (macb_config) bp->jumbo_max_len = macb_config->jumbo_max_len; @@ -4083,6 +4159,10 @@ static int macb_probe(struct platform_device *pdev) macb_dma_desc_get_size(bp); } + bp->rx_intr_mask = MACB_RX_INT_FLAGS; + if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR) + bp->rx_intr_mask |= MACB_BIT(RXUBR); + mac = of_get_mac_address(np); if (mac) { ether_addr_copy(bp->dev->dev_addr, mac); @@ -4134,6 +4214,9 @@ static int macb_probe(struct platform_device *pdev) macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID), dev->base_addr, dev->irq, dev->dev_addr); + pm_runtime_mark_last_busy(&bp->pdev->dev); + pm_runtime_put_autosuspend(&bp->pdev->dev); + return 0; err_out_unregister_mdio: @@ -4152,6 +4235,10 @@ err_disable_clocks: clk_disable_unprepare(hclk); clk_disable_unprepare(pclk); clk_disable_unprepare(rx_clk); + clk_disable_unprepare(tsu_clk); + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); return err; } @@ -4175,10 +4262,16 @@ static int macb_remove(struct platform_device *pdev) mdiobus_free(bp->mii_bus); unregister_netdev(dev); - clk_disable_unprepare(bp->tx_clk); - clk_disable_unprepare(bp->hclk); - clk_disable_unprepare(bp->pclk); - clk_disable_unprepare(bp->rx_clk); + pm_runtime_disable(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); + if (!pm_runtime_suspended(&pdev->dev)) { + clk_disable_unprepare(bp->tx_clk); + clk_disable_unprepare(bp->hclk); + clk_disable_unprepare(bp->pclk); + clk_disable_unprepare(bp->rx_clk); + clk_disable_unprepare(bp->tsu_clk); + pm_runtime_set_suspended(&pdev->dev); + } of_node_put(bp->phy_node); free_netdev(dev); } @@ -4190,21 +4283,36 @@ static int __maybe_unused macb_suspend(struct device *dev) { struct net_device *netdev = dev_get_drvdata(dev); struct macb *bp = netdev_priv(netdev); + struct macb_queue *queue = bp->queues; + unsigned long flags; + unsigned int q; + + if (!netif_running(netdev)) + return 0; - netif_carrier_off(netdev); - netif_device_detach(netdev); if (bp->wol & MACB_WOL_ENABLED) { macb_writel(bp, IER, MACB_BIT(WOL)); macb_writel(bp, WOL, MACB_BIT(MAG)); enable_irq_wake(bp->queues[0].irq); + netif_device_detach(netdev); } else { - clk_disable_unprepare(bp->tx_clk); - clk_disable_unprepare(bp->hclk); - clk_disable_unprepare(bp->pclk); - clk_disable_unprepare(bp->rx_clk); + netif_device_detach(netdev); + for (q = 0, queue = bp->queues; q < bp->num_queues; + ++q, ++queue) + napi_disable(&queue->napi); + phy_stop(netdev->phydev); + phy_suspend(netdev->phydev); + spin_lock_irqsave(&bp->lock, flags); + macb_reset_hw(bp); + spin_unlock_irqrestore(&bp->lock, flags); } + netif_carrier_off(netdev); + if (bp->ptp_info) + bp->ptp_info->ptp_remove(netdev); + pm_runtime_force_suspend(dev); + return 0; } @@ -4212,24 +4320,76 @@ static int __maybe_unused macb_resume(struct device *dev) { struct net_device *netdev = dev_get_drvdata(dev); struct macb *bp = netdev_priv(netdev); + struct macb_queue *queue = bp->queues; + unsigned int q; + + if (!netif_running(netdev)) + return 0; + + pm_runtime_force_resume(dev); if (bp->wol & MACB_WOL_ENABLED) { macb_writel(bp, IDR, MACB_BIT(WOL)); macb_writel(bp, WOL, 0); disable_irq_wake(bp->queues[0].irq); } else { + macb_writel(bp, NCR, MACB_BIT(MPE)); + for (q = 0, queue = bp->queues; q < bp->num_queues; + ++q, ++queue) + napi_enable(&queue->napi); + phy_resume(netdev->phydev); + phy_init_hw(netdev->phydev); + phy_start(netdev->phydev); + } + + bp->macbgem_ops.mog_init_rings(bp); + macb_init_hw(bp); + macb_set_rx_mode(netdev); + netif_device_attach(netdev); + if (bp->ptp_info) + bp->ptp_info->ptp_init(netdev); + + return 0; +} + +static int __maybe_unused macb_runtime_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct net_device *netdev = platform_get_drvdata(pdev); + struct macb *bp = netdev_priv(netdev); + + if (!(device_may_wakeup(&bp->dev->dev))) { + clk_disable_unprepare(bp->tx_clk); + clk_disable_unprepare(bp->hclk); + clk_disable_unprepare(bp->pclk); + clk_disable_unprepare(bp->rx_clk); + } + clk_disable_unprepare(bp->tsu_clk); + + return 0; +} + +static int __maybe_unused macb_runtime_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct net_device *netdev = platform_get_drvdata(pdev); + struct macb *bp = netdev_priv(netdev); + + if (!(device_may_wakeup(&bp->dev->dev))) { clk_prepare_enable(bp->pclk); clk_prepare_enable(bp->hclk); clk_prepare_enable(bp->tx_clk); clk_prepare_enable(bp->rx_clk); } - - netif_device_attach(netdev); + clk_prepare_enable(bp->tsu_clk); return 0; } -static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume); +static const struct dev_pm_ops macb_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(macb_suspend, macb_resume) + SET_RUNTIME_PM_OPS(macb_runtime_suspend, macb_runtime_resume, NULL) +}; static struct platform_driver macb_driver = { .probe = macb_probe, diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig index 5f03199a3acf..6650e2a5f171 100644 --- a/drivers/net/ethernet/cavium/Kconfig +++ b/drivers/net/ethernet/cavium/Kconfig @@ -54,7 +54,6 @@ config CAVIUM_PTP tristate "Cavium PTP coprocessor as PTP clock" depends on 64BIT && PCI imply PTP_1588_CLOCK - default y ---help--- This driver adds support for the Precision Time Protocol Clocks and Timestamping coprocessor (PTP) found on Cavium processors. @@ -65,7 +64,6 @@ config CAVIUM_PTP config LIQUIDIO tristate "Cavium LiquidIO support" depends on 64BIT && PCI - depends on MAY_USE_DEVLINK depends on PCI imply PTP_1588_CLOCK select FW_LOADER diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c index 9f4f3c1d5043..43d11c38b38a 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c @@ -1450,7 +1450,7 @@ void cn23xx_tell_vf_its_macaddr_changed(struct octeon_device *oct, int vfidx, mbox_cmd.recv_len = 0; mbox_cmd.recv_status = 0; mbox_cmd.fn = NULL; - mbox_cmd.fn_arg = 0; + mbox_cmd.fn_arg = NULL; ether_addr_copy(mbox_cmd.msg.s.params, mac); mbox_cmd.q_no = vfidx * oct->sriov_info.rings_per_vf; octeon_mbox_write(oct, &mbox_cmd); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index 825a28e5b544..e21bf3724611 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -661,7 +661,8 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), (((rh->r_dh.encap_on) && (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) || (!(rh->r_dh.encap_on) && - (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED)))) + ((rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED) == + CNNIC_CSUM_VERIFIED)))) /* checksum has already been verified */ skb->ip_summed = CHECKSUM_UNNECESSARY; else diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 3d24133e5e49..9b7819fdc9de 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -21,7 +21,6 @@ #include #include #include -#include #include "liquidio_common.h" #include "octeon_droq.h" #include "octeon_iq.h" @@ -2908,7 +2907,7 @@ static int liquidio_set_vf_spoofchk(struct net_device *netdev, int vfidx, nctrl.ncmd.s.param2 = enable; nctrl.ncmd.s.more = 0; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.cb_fn = 0; + nctrl.cb_fn = NULL; retval = octnet_send_nic_ctrl_pkt(oct, &nctrl); @@ -3184,7 +3183,8 @@ static const struct devlink_ops liquidio_devlink_ops = { }; static int -lio_pf_switchdev_attr_get(struct net_device *dev, struct switchdev_attr *attr) +liquidio_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) { struct lio *lio = GET_LIO(dev); struct octeon_device *oct = lio->oct_dev; @@ -3192,24 +3192,12 @@ lio_pf_switchdev_attr_get(struct net_device *dev, struct switchdev_attr *attr) if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) return -EOPNOTSUPP; - switch (attr->id) { - case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: - attr->u.ppid.id_len = ETH_ALEN; - ether_addr_copy(attr->u.ppid.id, - (void *)&lio->linfo.hw_addr + 2); - break; - - default: - return -EOPNOTSUPP; - } + ppid->id_len = ETH_ALEN; + ether_addr_copy(ppid->id, (void *)&lio->linfo.hw_addr + 2); return 0; } -static const struct switchdev_ops lio_pf_switchdev_ops = { - .switchdev_port_attr_get = lio_pf_switchdev_attr_get, -}; - static int liquidio_get_vf_stats(struct net_device *netdev, int vfidx, struct ifla_vf_stats *vf_stats) { @@ -3259,6 +3247,7 @@ static const struct net_device_ops lionetdevops = { .ndo_set_vf_trust = liquidio_set_vf_trust, .ndo_set_vf_link_state = liquidio_set_vf_link_state, .ndo_get_vf_stats = liquidio_get_vf_stats, + .ndo_get_port_parent_id = liquidio_get_port_parent_id, }; /** \brief Entry point for the liquidio module @@ -3534,7 +3523,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) * netdev tasks. */ netdev->netdev_ops = &lionetdevops; - SWITCHDEV_SET_OPS(netdev, &lio_pf_switchdev_ops); retval = netif_set_real_num_rx_queues(netdev, num_oqueues); if (retval) { diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c index de61060721c4..f3f2e71431ac 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c @@ -25,7 +25,6 @@ #include "octeon_nic.h" #include "octeon_main.h" #include "octeon_network.h" -#include #include "lio_vf_rep.h" static int lio_vf_rep_open(struct net_device *ndev); @@ -38,6 +37,8 @@ static int lio_vf_rep_phys_port_name(struct net_device *dev, static void lio_vf_rep_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64); static int lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu); +static int lio_vf_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid); static const struct net_device_ops lio_vf_rep_ndev_ops = { .ndo_open = lio_vf_rep_open, @@ -47,6 +48,7 @@ static const struct net_device_ops lio_vf_rep_ndev_ops = { .ndo_get_phys_port_name = lio_vf_rep_phys_port_name, .ndo_get_stats64 = lio_vf_rep_get_stats64, .ndo_change_mtu = lio_vf_rep_change_mtu, + .ndo_get_port_parent_id = lio_vf_get_port_parent_id, }; static int @@ -443,31 +445,19 @@ xmit_failed: return NETDEV_TX_OK; } -static int -lio_vf_rep_attr_get(struct net_device *dev, struct switchdev_attr *attr) +static int lio_vf_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) { struct lio_vf_rep_desc *vf_rep = netdev_priv(dev); struct net_device *parent_ndev = vf_rep->parent_ndev; struct lio *lio = GET_LIO(parent_ndev); - switch (attr->id) { - case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: - attr->u.ppid.id_len = ETH_ALEN; - ether_addr_copy(attr->u.ppid.id, - (void *)&lio->linfo.hw_addr + 2); - break; - - default: - return -EOPNOTSUPP; - } + ppid->id_len = ETH_ALEN; + ether_addr_copy(ppid->id, (void *)&lio->linfo.hw_addr + 2); return 0; } -static const struct switchdev_ops lio_vf_rep_switchdev_ops = { - .switchdev_port_attr_get = lio_vf_rep_attr_get, -}; - static void lio_vf_rep_fetch_stats(struct work_struct *work) { @@ -524,7 +514,6 @@ lio_vf_rep_create(struct octeon_device *oct) ndev->min_mtu = LIO_MIN_MTU_SIZE; ndev->max_mtu = LIO_MAX_MTU_SIZE; ndev->netdev_ops = &lio_vf_rep_ndev_ops; - SWITCHDEV_SET_OPS(ndev, &lio_vf_rep_switchdev_ops); vf_rep = netdev_priv(ndev); memset(vf_rep, 0, sizeof(*vf_rep)); diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index f4d81765221e..62636c1ed141 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h @@ -271,7 +271,7 @@ struct xcast_addr_list { }; struct nicvf_work { - struct delayed_work work; + struct work_struct work; u8 mode; struct xcast_addr_list *mc; }; @@ -327,7 +327,11 @@ struct nicvf { struct nicvf_work rx_mode_work; /* spinlock to protect workqueue arguments from concurrent access */ spinlock_t rx_mode_wq_lock; - + /* workqueue for handling kernel ndo_set_rx_mode() calls */ + struct workqueue_struct *nicvf_rx_mode_wq; + /* mutex to protect VF's mailbox contents from concurrent access */ + struct mutex rx_mode_mtx; + struct delayed_work link_change_work; /* PTP timestamp */ struct cavium_ptp *ptp_clock; /* Inbound timestamping is on */ @@ -575,10 +579,8 @@ struct set_ptp { struct xcast { u8 msg; - union { - u8 mode; - u64 mac; - } data; + u8 mode; + u64 mac:48; }; /* 128 bit shared memory between PF and each VF */ diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index 6c8dcb65ff03..c90252829ed3 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -57,14 +57,8 @@ struct nicpf { #define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF) #define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF) u8 *vf_lmac_map; - struct delayed_work dwork; - struct workqueue_struct *check_link; - u8 *link; - u8 *duplex; - u32 *speed; u16 cpi_base[MAX_NUM_VFS_SUPPORTED]; u16 rssi_base[MAX_NUM_VFS_SUPPORTED]; - bool mbx_lock[MAX_NUM_VFS_SUPPORTED]; /* MSI-X */ u8 num_vec; @@ -929,6 +923,35 @@ static void nic_config_timestamp(struct nicpf *nic, int vf, struct set_ptp *ptp) nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3), pkind_val); } +/* Get BGX LMAC link status and update corresponding VF + * if there is a change, valid only if internal L2 switch + * is not present otherwise VF link is always treated as up + */ +static void nic_link_status_get(struct nicpf *nic, u8 vf) +{ + union nic_mbx mbx = {}; + struct bgx_link_status link; + u8 bgx, lmac; + + mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE; + + /* Get BGX, LMAC indices for the VF */ + bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + + /* Get interface link status */ + bgx_get_lmac_link_state(nic->node, bgx, lmac, &link); + + /* Send a mbox message to VF with current link status */ + mbx.link_status.link_up = link.link_up; + mbx.link_status.duplex = link.duplex; + mbx.link_status.speed = link.speed; + mbx.link_status.mac_type = link.mac_type; + + /* reply with link status */ + nic_send_msg_to_vf(nic, vf, &mbx); +} + /* Interrupt handler to handle mailbox messages from VFs */ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) { @@ -941,8 +964,6 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) int i; int ret = 0; - nic->mbx_lock[vf] = true; - mbx_addr = nic_get_mbx_addr(vf); mbx_data = (u64 *)&mbx; @@ -957,12 +978,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) switch (mbx.msg.msg) { case NIC_MBOX_MSG_READY: nic_mbx_send_ready(nic, vf); - if (vf < nic->num_vf_en) { - nic->link[vf] = 0; - nic->duplex[vf] = 0; - nic->speed[vf] = 0; - } - goto unlock; + return; case NIC_MBOX_MSG_QS_CFG: reg_addr = NIC_PF_QSET_0_127_CFG | (mbx.qs.num << NIC_QS_ID_SHIFT); @@ -1031,7 +1047,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) break; case NIC_MBOX_MSG_RSS_SIZE: nic_send_rss_size(nic, vf); - goto unlock; + return; case NIC_MBOX_MSG_RSS_CFG: case NIC_MBOX_MSG_RSS_CFG_CONT: nic_config_rss(nic, &mbx.rss_cfg); @@ -1039,7 +1055,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) case NIC_MBOX_MSG_CFG_DONE: /* Last message of VF config msg sequence */ nic_enable_vf(nic, vf, true); - goto unlock; + break; case NIC_MBOX_MSG_SHUTDOWN: /* First msg in VF teardown sequence */ if (vf >= nic->num_vf_en) @@ -1049,19 +1065,19 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) break; case NIC_MBOX_MSG_ALLOC_SQS: nic_alloc_sqs(nic, &mbx.sqs_alloc); - goto unlock; + return; case NIC_MBOX_MSG_NICVF_PTR: nic->nicvf[vf] = mbx.nicvf.nicvf; break; case NIC_MBOX_MSG_PNICVF_PTR: nic_send_pnicvf(nic, vf); - goto unlock; + return; case NIC_MBOX_MSG_SNICVF_PTR: nic_send_snicvf(nic, &mbx.nicvf); - goto unlock; + return; case NIC_MBOX_MSG_BGX_STATS: nic_get_bgx_stats(nic, &mbx.bgx_stats); - goto unlock; + return; case NIC_MBOX_MSG_LOOPBACK: ret = nic_config_loopback(nic, &mbx.lbk); break; @@ -1070,7 +1086,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) break; case NIC_MBOX_MSG_PFC: nic_pause_frame(nic, vf, &mbx.pfc); - goto unlock; + return; case NIC_MBOX_MSG_PTP_CFG: nic_config_timestamp(nic, vf, &mbx.ptp); break; @@ -1094,7 +1110,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); bgx_set_dmac_cam_filter(nic->node, bgx, lmac, - mbx.xcast.data.mac, + mbx.xcast.mac, vf < NIC_VF_PER_MBX_REG ? vf : vf - NIC_VF_PER_MBX_REG); break; @@ -1106,8 +1122,15 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) } bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); - bgx_set_xcast_mode(nic->node, bgx, lmac, mbx.xcast.data.mode); + bgx_set_xcast_mode(nic->node, bgx, lmac, mbx.xcast.mode); break; + case NIC_MBOX_MSG_BGX_LINK_CHANGE: + if (vf >= nic->num_vf_en) { + ret = -1; /* NACK */ + break; + } + nic_link_status_get(nic, vf); + return; default: dev_err(&nic->pdev->dev, "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg); @@ -1121,8 +1144,6 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) mbx.msg.msg, vf); nic_mbx_send_nack(nic, vf); } -unlock: - nic->mbx_lock[vf] = false; } static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq) @@ -1270,52 +1291,6 @@ static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic) return 0; } -/* Poll for BGX LMAC link status and update corresponding VF - * if there is a change, valid only if internal L2 switch - * is not present otherwise VF link is always treated as up - */ -static void nic_poll_for_link(struct work_struct *work) -{ - union nic_mbx mbx = {}; - struct nicpf *nic; - struct bgx_link_status link; - u8 vf, bgx, lmac; - - nic = container_of(work, struct nicpf, dwork.work); - - mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE; - - for (vf = 0; vf < nic->num_vf_en; vf++) { - /* Poll only if VF is UP */ - if (!nic->vf_enabled[vf]) - continue; - - /* Get BGX, LMAC indices for the VF */ - bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); - lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); - /* Get interface link status */ - bgx_get_lmac_link_state(nic->node, bgx, lmac, &link); - - /* Inform VF only if link status changed */ - if (nic->link[vf] == link.link_up) - continue; - - if (!nic->mbx_lock[vf]) { - nic->link[vf] = link.link_up; - nic->duplex[vf] = link.duplex; - nic->speed[vf] = link.speed; - - /* Send a mbox message to VF with current link status */ - mbx.link_status.link_up = link.link_up; - mbx.link_status.duplex = link.duplex; - mbx.link_status.speed = link.speed; - mbx.link_status.mac_type = link.mac_type; - nic_send_msg_to_vf(nic, vf, &mbx); - } - } - queue_delayed_work(nic->check_link, &nic->dwork, HZ * 2); -} - static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct device *dev = &pdev->dev; @@ -1384,18 +1359,6 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (!nic->vf_lmac_map) goto err_release_regions; - nic->link = devm_kmalloc_array(dev, max_lmac, sizeof(u8), GFP_KERNEL); - if (!nic->link) - goto err_release_regions; - - nic->duplex = devm_kmalloc_array(dev, max_lmac, sizeof(u8), GFP_KERNEL); - if (!nic->duplex) - goto err_release_regions; - - nic->speed = devm_kmalloc_array(dev, max_lmac, sizeof(u32), GFP_KERNEL); - if (!nic->speed) - goto err_release_regions; - /* Initialize hardware */ nic_init_hw(nic); @@ -1411,22 +1374,8 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_unregister_interrupts; - /* Register a physical link status poll fn() */ - nic->check_link = alloc_workqueue("check_link_status", - WQ_UNBOUND | WQ_MEM_RECLAIM, 1); - if (!nic->check_link) { - err = -ENOMEM; - goto err_disable_sriov; - } - - INIT_DELAYED_WORK(&nic->dwork, nic_poll_for_link); - queue_delayed_work(nic->check_link, &nic->dwork, 0); - return 0; -err_disable_sriov: - if (nic->flags & NIC_SRIOV_ENABLED) - pci_disable_sriov(pdev); err_unregister_interrupts: nic_unregister_interrupts(nic); err_release_regions: @@ -1447,12 +1396,6 @@ static void nic_remove(struct pci_dev *pdev) if (nic->flags & NIC_SRIOV_ENABLED) pci_disable_sriov(pdev); - if (nic->check_link) { - /* Destroy work Queue */ - cancel_delayed_work_sync(&nic->dwork); - destroy_workqueue(nic->check_link); - } - nic_unregister_interrupts(nic); pci_release_regions(pdev); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 88f8a8fa93cd..503cfadff4ac 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -68,9 +68,6 @@ module_param(cpi_alg, int, 0444); MODULE_PARM_DESC(cpi_alg, "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)"); -/* workqueue for handling kernel ndo_set_rx_mode() calls */ -static struct workqueue_struct *nicvf_rx_mode_wq; - static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx) { if (nic->sqs_mode) @@ -127,6 +124,9 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx) { int timeout = NIC_MBOX_MSG_TIMEOUT; int sleep = 10; + int ret = 0; + + mutex_lock(&nic->rx_mode_mtx); nic->pf_acked = false; nic->pf_nacked = false; @@ -139,7 +139,8 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx) netdev_err(nic->netdev, "PF NACK to mbox msg 0x%02x from VF%d\n", (mbx->msg.msg & 0xFF), nic->vf_id); - return -EINVAL; + ret = -EINVAL; + break; } msleep(sleep); if (nic->pf_acked) @@ -149,10 +150,12 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx) netdev_err(nic->netdev, "PF didn't ACK to mbox msg 0x%02x from VF%d\n", (mbx->msg.msg & 0xFF), nic->vf_id); - return -EBUSY; + ret = -EBUSY; + break; } } - return 0; + mutex_unlock(&nic->rx_mode_mtx); + return ret; } /* Checks if VF is able to comminicate with PF @@ -172,6 +175,17 @@ static int nicvf_check_pf_ready(struct nicvf *nic) return 1; } +static void nicvf_send_cfg_done(struct nicvf *nic) +{ + union nic_mbx mbx = {}; + + mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE; + if (nicvf_send_msg_to_pf(nic, &mbx)) { + netdev_err(nic->netdev, + "PF didn't respond to CFG DONE msg\n"); + } +} + static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx) { if (bgx->rx) @@ -228,21 +242,24 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic) break; case NIC_MBOX_MSG_BGX_LINK_CHANGE: nic->pf_acked = true; - nic->link_up = mbx.link_status.link_up; - nic->duplex = mbx.link_status.duplex; - nic->speed = mbx.link_status.speed; - nic->mac_type = mbx.link_status.mac_type; - if (nic->link_up) { - netdev_info(nic->netdev, "Link is Up %d Mbps %s duplex\n", - nic->speed, - nic->duplex == DUPLEX_FULL ? - "Full" : "Half"); - netif_carrier_on(nic->netdev); - netif_tx_start_all_queues(nic->netdev); - } else { - netdev_info(nic->netdev, "Link is Down\n"); - netif_carrier_off(nic->netdev); - netif_tx_stop_all_queues(nic->netdev); + if (nic->link_up != mbx.link_status.link_up) { + nic->link_up = mbx.link_status.link_up; + nic->duplex = mbx.link_status.duplex; + nic->speed = mbx.link_status.speed; + nic->mac_type = mbx.link_status.mac_type; + if (nic->link_up) { + netdev_info(nic->netdev, + "Link is Up %d Mbps %s duplex\n", + nic->speed, + nic->duplex == DUPLEX_FULL ? + "Full" : "Half"); + netif_carrier_on(nic->netdev); + netif_tx_start_all_queues(nic->netdev); + } else { + netdev_info(nic->netdev, "Link is Down\n"); + netif_carrier_off(nic->netdev); + netif_tx_stop_all_queues(nic->netdev); + } } break; case NIC_MBOX_MSG_ALLOC_SQS: @@ -1311,6 +1328,11 @@ int nicvf_stop(struct net_device *netdev) struct nicvf_cq_poll *cq_poll = NULL; union nic_mbx mbx = {}; + cancel_delayed_work_sync(&nic->link_change_work); + + /* wait till all queued set_rx_mode tasks completes */ + drain_workqueue(nic->nicvf_rx_mode_wq); + mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN; nicvf_send_msg_to_pf(nic, &mbx); @@ -1410,13 +1432,27 @@ static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu) return nicvf_send_msg_to_pf(nic, &mbx); } +static void nicvf_link_status_check_task(struct work_struct *work_arg) +{ + struct nicvf *nic = container_of(work_arg, + struct nicvf, + link_change_work.work); + union nic_mbx mbx = {}; + mbx.msg.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE; + nicvf_send_msg_to_pf(nic, &mbx); + queue_delayed_work(nic->nicvf_rx_mode_wq, + &nic->link_change_work, 2 * HZ); +} + int nicvf_open(struct net_device *netdev) { int cpu, err, qidx; struct nicvf *nic = netdev_priv(netdev); struct queue_set *qs = nic->qs; struct nicvf_cq_poll *cq_poll = NULL; - union nic_mbx mbx = {}; + + /* wait till all queued set_rx_mode tasks completes if any */ + drain_workqueue(nic->nicvf_rx_mode_wq); netif_carrier_off(netdev); @@ -1512,8 +1548,12 @@ int nicvf_open(struct net_device *netdev) nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx); /* Send VF config done msg to PF */ - mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE; - nicvf_write_to_mbx(nic, &mbx); + nicvf_send_cfg_done(nic); + + INIT_DELAYED_WORK(&nic->link_change_work, + nicvf_link_status_check_task); + queue_delayed_work(nic->nicvf_rx_mode_wq, + &nic->link_change_work, 0); return 0; cleanup: @@ -1941,15 +1981,17 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs, /* flush DMAC filters and reset RX mode */ mbx.xcast.msg = NIC_MBOX_MSG_RESET_XCAST; - nicvf_send_msg_to_pf(nic, &mbx); + if (nicvf_send_msg_to_pf(nic, &mbx) < 0) + goto free_mc; if (mode & BGX_XCAST_MCAST_FILTER) { /* once enabling filtering, we need to signal to PF to add * its' own LMAC to the filter to accept packets for it. */ mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST; - mbx.xcast.data.mac = 0; - nicvf_send_msg_to_pf(nic, &mbx); + mbx.xcast.mac = 0; + if (nicvf_send_msg_to_pf(nic, &mbx) < 0) + goto free_mc; } /* check if we have any specific MACs to be added to PF DMAC filter */ @@ -1957,23 +1999,25 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs, /* now go through kernel list of MACs and add them one by one */ for (idx = 0; idx < mc_addrs->count; idx++) { mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST; - mbx.xcast.data.mac = mc_addrs->mc[idx]; - nicvf_send_msg_to_pf(nic, &mbx); + mbx.xcast.mac = mc_addrs->mc[idx]; + if (nicvf_send_msg_to_pf(nic, &mbx) < 0) + goto free_mc; } - kfree(mc_addrs); } /* and finally set rx mode for PF accordingly */ mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST; - mbx.xcast.data.mode = mode; + mbx.xcast.mode = mode; nicvf_send_msg_to_pf(nic, &mbx); +free_mc: + kfree(mc_addrs); } static void nicvf_set_rx_mode_task(struct work_struct *work_arg) { struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work, - work.work); + work); struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work); u8 mode; struct xcast_addr_list *mc; @@ -2030,7 +2074,7 @@ static void nicvf_set_rx_mode(struct net_device *netdev) kfree(nic->rx_mode_work.mc); nic->rx_mode_work.mc = mc_list; nic->rx_mode_work.mode = mode; - queue_delayed_work(nicvf_rx_mode_wq, &nic->rx_mode_work.work, 0); + queue_work(nic->nicvf_rx_mode_wq, &nic->rx_mode_work.work); spin_unlock(&nic->rx_mode_wq_lock); } @@ -2187,8 +2231,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&nic->reset_task, nicvf_reset_task); - INIT_DELAYED_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task); + nic->nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_rx_mode_wq_VF%d", + WQ_MEM_RECLAIM, + nic->vf_id); + INIT_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task); spin_lock_init(&nic->rx_mode_wq_lock); + mutex_init(&nic->rx_mode_mtx); err = register_netdev(netdev); if (err) { @@ -2228,13 +2276,15 @@ static void nicvf_remove(struct pci_dev *pdev) nic = netdev_priv(netdev); pnetdev = nic->pnicvf->netdev; - cancel_delayed_work_sync(&nic->rx_mode_work.work); - /* Check if this Qset is assigned to different VF. * If yes, clean primary and all secondary Qsets. */ if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED)) unregister_netdev(pnetdev); + if (nic->nicvf_rx_mode_wq) { + destroy_workqueue(nic->nicvf_rx_mode_wq); + nic->nicvf_rx_mode_wq = NULL; + } nicvf_unregister_interrupts(nic); pci_set_drvdata(pdev, NULL); if (nic->drv_stats) @@ -2261,17 +2311,11 @@ static struct pci_driver nicvf_driver = { static int __init nicvf_init_module(void) { pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION); - nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_generic", - WQ_MEM_RECLAIM); return pci_register_driver(&nicvf_driver); } static void __exit nicvf_cleanup_module(void) { - if (nicvf_rx_mode_wq) { - destroy_workqueue(nicvf_rx_mode_wq); - nicvf_rx_mode_wq = NULL; - } pci_unregister_driver(&nicvf_driver); } diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index e337da6ba2a4..673c57b8023f 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -1217,7 +1217,7 @@ static void bgx_init_hw(struct bgx *bgx) /* Disable MAC steering (NCSI traffic) */ for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++) - bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00); + bgx_reg_write(bgx, 0, BGX_CMR_RX_STEERING + (i * 8), 0x00); } static u8 bgx_get_lane2sds_cfg(struct bgx *bgx, struct lmac *lmac) diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h index cbdd20b9ee6f..5cbc54e9eb19 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h @@ -60,7 +60,7 @@ #define RX_DMACX_CAM_EN BIT_ULL(48) #define RX_DMACX_CAM_LMACID(x) (((u64)x) << 49) #define RX_DMAC_COUNT 32 -#define BGX_CMR_RX_STREERING 0x300 +#define BGX_CMR_RX_STEERING 0x300 #define RX_TRAFFIC_STEER_RULE_COUNT 8 #define BGX_CMR_CHAN_MSK_AND 0x450 #define BGX_CMR_BIST_STATUS 0x460 diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c index 30de26ef3da4..47b5c8e2104b 100644 --- a/drivers/net/ethernet/chelsio/cxgb/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb/sge.c @@ -585,8 +585,7 @@ static int alloc_rx_resources(struct sge *sge, struct sge_params *p) sizeof(struct cpl_rx_data) + sge->freelQ[!sge->jumbo_fl].dma_offset; - size = (16 * 1024) - - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + size = (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); sge->freelQ[sge->jumbo_fl].rx_buffer_size = size; diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c index 5701272aa7f7..ce28820c57c9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c +++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c @@ -289,8 +289,7 @@ struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start, if (clipt_size < CLIPT_MIN_HASH_BUCKETS) return NULL; - ctbl = kvzalloc(sizeof(*ctbl) + - clipt_size*sizeof(struct list_head), GFP_KERNEL); + ctbl = kvzalloc(struct_size(ctbl, hash_list, clipt_size), GFP_KERNEL); if (!ctbl) return NULL; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index 127b1f624413..7c5bfc931128 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -81,7 +81,7 @@ static int is_fw_attached(struct cudbg_init *pdbg_init) { struct adapter *padap = pdbg_init->adap; - if (!(padap->flags & FW_OK) || padap->use_bd) + if (!(padap->flags & CXGB4_FW_OK) || padap->use_bd) return 0; return 1; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 2d1ca920601e..956219c178e1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -568,7 +568,7 @@ struct sge_rspq; struct port_info { struct adapter *adapter; u16 viid; - s16 xact_addr_filt; /* index of exact MAC address filter */ + int xact_addr_filt; /* index of exact MAC address filter */ u16 rss_size; /* size of VI's RSS table slice */ s8 mdio_addr; enum fw_port_type port_type; @@ -606,17 +606,18 @@ struct dentry; struct work_struct; enum { /* adapter flags */ - FULL_INIT_DONE = (1 << 0), - DEV_ENABLED = (1 << 1), - USING_MSI = (1 << 2), - USING_MSIX = (1 << 3), - FW_OK = (1 << 4), - RSS_TNLALLLOOKUP = (1 << 5), - USING_SOFT_PARAMS = (1 << 6), - MASTER_PF = (1 << 7), - FW_OFLD_CONN = (1 << 9), - ROOT_NO_RELAXED_ORDERING = (1 << 10), - SHUTTING_DOWN = (1 << 11), + CXGB4_FULL_INIT_DONE = (1 << 0), + CXGB4_DEV_ENABLED = (1 << 1), + CXGB4_USING_MSI = (1 << 2), + CXGB4_USING_MSIX = (1 << 3), + CXGB4_FW_OK = (1 << 4), + CXGB4_RSS_TNLALLLOOKUP = (1 << 5), + CXGB4_USING_SOFT_PARAMS = (1 << 6), + CXGB4_MASTER_PF = (1 << 7), + CXGB4_FW_OFLD_CONN = (1 << 9), + CXGB4_ROOT_NO_RELAXED_ORDERING = (1 << 10), + CXGB4_SHUTTING_DOWN = (1 << 11), + CXGB4_SGE_DBQ_TIMER = (1 << 12), }; enum { @@ -756,6 +757,8 @@ struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */ #ifdef CONFIG_CHELSIO_T4_DCB u8 dcb_prio; /* DCB Priority bound to queue */ #endif + u8 dbqt; /* SGE Doorbell Queue Timer in use */ + unsigned int dbqtimerix; /* SGE Doorbell Queue Timer Index */ unsigned long tso; /* # of TSO requests */ unsigned long tx_cso; /* # of Tx checksum offloads */ unsigned long vlan_ins; /* # of Tx VLAN insertions */ @@ -816,6 +819,8 @@ struct sge { u16 nqs_per_uld; /* # of Rx queues per ULD */ u16 timer_val[SGE_NTIMERS]; u8 counter_val[SGE_NCOUNTERS]; + u16 dbqtimer_tick; + u16 dbqtimer_val[SGE_NDBQTIMERS]; u32 fl_pg_order; /* large page allocation size */ u32 stat_len; /* length of status page at ring end */ u32 pktshift; /* padding between CPL & packet data */ @@ -860,6 +865,7 @@ struct doorbell_stats { struct hash_mac_addr { struct list_head list; u8 addr[ETH_ALEN]; + unsigned int iface_mac; }; struct uld_msix_bmap { @@ -879,6 +885,7 @@ struct vf_info { unsigned int tx_rate; bool pf_set_mac; u16 vlan; + int link_state; }; enum { @@ -1401,7 +1408,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, rspq_flush_handler_t flush_handler, int cong); int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, struct net_device *dev, struct netdev_queue *netdevq, - unsigned int iqid); + unsigned int iqid, u8 dbqt); int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, struct net_device *dev, unsigned int iqid, unsigned int cmplqid); @@ -1414,6 +1421,8 @@ irqreturn_t t4_sge_intr_msix(int irq, void *cookie); int t4_sge_init(struct adapter *adap); void t4_sge_start(struct adapter *adap); void t4_sge_stop(struct adapter *adap); +int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *q, + int maxreclaim); void cxgb4_set_ethtool_ops(struct net_device *netdev); int cxgb4_write_rss(const struct port_info *pi, const u16 *queues); enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb); @@ -1820,6 +1829,8 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int eqid); int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type); +int t4_read_sge_dbqtimers(struct adapter *adap, unsigned int ndbqtimers, + u16 *dbqtimers); void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl); int t4_update_port_info(struct port_info *pi); int t4_get_link_params(struct port_info *pi, unsigned int *link_okp, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index b0ff9fa183f4..3130b43bba52 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -3143,7 +3143,7 @@ static int tid_info_show(struct seq_file *seq, void *v) seq_printf(seq, ", in use: %u/%u\n", atomic_read(&t->tids_in_use), atomic_read(&t->hash_tids_in_use)); - } else if (adap->flags & FW_OFLD_CONN) { + } else if (adap->flags & CXGB4_FW_OFLD_CONN) { seq_printf(seq, "TID range: %u..%u/%u..%u", t->aftid_base, t->aftid_end, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index d07230c892a5..bec4711005cc 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -446,8 +446,10 @@ static void fw_caps_to_lmm(enum fw_port_type port_type, unsigned long *link_mode_mask) { #define SET_LMM(__lmm_name) \ - __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \ - link_mode_mask) + do { \ + __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \ + link_mode_mask); \ + } while (0) #define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \ do { \ @@ -541,7 +543,7 @@ static void fw_caps_to_lmm(enum fw_port_type port_type, case FW_PORT_TYPE_CR4_QSFP: SET_LMM(FIBRE); FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full); - FW_CAPS_TO_LMM(SPEED_10G, 10000baseSR_Full); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full); FW_CAPS_TO_LMM(SPEED_40G, 40000baseSR4_Full); FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full); FW_CAPS_TO_LMM(SPEED_50G, 50000baseCR2_Full); @@ -552,6 +554,13 @@ static void fw_caps_to_lmm(enum fw_port_type port_type, break; } + if (fw_caps & FW_PORT_CAP32_FEC_V(FW_PORT_CAP32_FEC_M)) { + FW_CAPS_TO_LMM(FEC_RS, FEC_RS); + FW_CAPS_TO_LMM(FEC_BASER_RS, FEC_BASER); + } else { + SET_LMM(FEC_NONE); + } + FW_CAPS_TO_LMM(ANEG, Autoneg); FW_CAPS_TO_LMM(802_3_PAUSE, Pause); FW_CAPS_TO_LMM(802_3_ASM_DIR, Asym_Pause); @@ -679,18 +688,15 @@ static int set_link_ksettings(struct net_device *dev, base->autoneg == AUTONEG_DISABLE) { fw_caps = speed_to_fw_caps(base->speed); - /* Must only specify a single speed which must be supported - * as part of the Physical Port Capabilities. - */ - if ((fw_caps & (fw_caps - 1)) != 0 || - !(lc->pcaps & fw_caps)) + /* Speed must be supported by Physical Port Capabilities. */ + if (!(lc->pcaps & fw_caps)) return -EINVAL; lc->speed_caps = fw_caps; lc->acaps = fw_caps; } else { fw_caps = - lmm_to_fw_caps(link_ksettings->link_modes.advertising); + lmm_to_fw_caps(link_ksettings->link_modes.advertising); if (!(lc->pcaps & fw_caps)) return -EINVAL; lc->speed_caps = 0; @@ -869,7 +875,7 @@ static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES) return -EINVAL; - if (adapter->flags & FULL_INIT_DONE) + if (adapter->flags & CXGB4_FULL_INIT_DONE) return -EBUSY; for (i = 0; i < pi->nqsets; ++i) { @@ -926,11 +932,190 @@ static int get_adaptive_rx_setting(struct net_device *dev) return q->rspq.adaptive_rx; } -static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) +/* Return the current global Adapter SGE Doorbell Queue Timer Tick for all + * Ethernet TX Queues. + */ +static int get_dbqtimer_tick(struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + + if (!(adap->flags & CXGB4_SGE_DBQ_TIMER)) + return 0; + + return adap->sge.dbqtimer_tick; +} + +/* Return the SGE Doorbell Queue Timer Value for the Ethernet TX Queues + * associated with a Network Device. + */ +static int get_dbqtimer(struct net_device *dev) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + struct sge_eth_txq *txq; + + txq = &adap->sge.ethtxq[pi->first_qset]; + + if (!(adap->flags & CXGB4_SGE_DBQ_TIMER)) + return 0; + + /* all of the TX Queues use the same Timer Index */ + return adap->sge.dbqtimer_val[txq->dbqtimerix]; +} + +/* Set the global Adapter SGE Doorbell Queue Timer Tick for all Ethernet TX + * Queues. This is the fundamental "Tick" that sets the scale of values which + * can be used. Individual Ethernet TX Queues index into a relatively small + * array of Tick Multipliers. Changing the base Tick will thus change all of + * the resulting Timer Values associated with those multipliers for all + * Ethernet TX Queues. + */ +static int set_dbqtimer_tick(struct net_device *dev, int usecs) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + struct sge *s = &adap->sge; + u32 param, val; + int ret; + + if (!(adap->flags & CXGB4_SGE_DBQ_TIMER)) + return 0; + + /* return early if it's the same Timer Tick we're already using */ + if (s->dbqtimer_tick == usecs) + return 0; + + /* attempt to set the new Timer Tick value */ + param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK)); + val = usecs; + ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); + if (ret) + return ret; + s->dbqtimer_tick = usecs; + + /* if successful, reread resulting dependent Timer values */ + ret = t4_read_sge_dbqtimers(adap, ARRAY_SIZE(s->dbqtimer_val), + s->dbqtimer_val); + return ret; +} + +/* Set the SGE Doorbell Queue Timer Value for the Ethernet TX Queues + * associated with a Network Device. There is a relatively small array of + * possible Timer Values so we need to pick the closest value available. + */ +static int set_dbqtimer(struct net_device *dev, int usecs) +{ + int qix, timerix, min_timerix, delta, min_delta; + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + struct sge *s = &adap->sge; + struct sge_eth_txq *txq; + u32 param, val; + int ret; + + if (!(adap->flags & CXGB4_SGE_DBQ_TIMER)) + return 0; + + /* Find the SGE Doorbell Timer Value that's closest to the requested + * value. + */ + min_delta = INT_MAX; + min_timerix = 0; + for (timerix = 0; timerix < ARRAY_SIZE(s->dbqtimer_val); timerix++) { + delta = s->dbqtimer_val[timerix] - usecs; + if (delta < 0) + delta = -delta; + if (delta < min_delta) { + min_delta = delta; + min_timerix = timerix; + } + } + + /* Return early if it's the same Timer Index we're already using. + * We use the same Timer Index for all of the TX Queues for an + * interface so it's only necessary to check the first one. + */ + txq = &s->ethtxq[pi->first_qset]; + if (txq->dbqtimerix == min_timerix) + return 0; + + for (qix = 0; qix < pi->nqsets; qix++, txq++) { + if (adap->flags & CXGB4_FULL_INIT_DONE) { + param = + (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_TIMERIX) | + FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id)); + val = min_timerix; + ret = t4_set_params(adap, adap->mbox, adap->pf, 0, + 1, ¶m, &val); + if (ret) + return ret; + } + txq->dbqtimerix = min_timerix; + } + return 0; +} + +/* Set the global Adapter SGE Doorbell Queue Timer Tick for all Ethernet TX + * Queues and the Timer Value for the Ethernet TX Queues associated with a + * Network Device. Since changing the global Tick changes all of the + * available Timer Values, we need to do this first before selecting the + * resulting closest Timer Value. Moreover, since the Tick is global, + * changing it affects the Timer Values for all Network Devices on the + * adapter. So, before changing the Tick, we grab all of the current Timer + * Values for other Network Devices on this Adapter and then attempt to select + * new Timer Values which are close to the old values ... + */ +static int set_dbqtimer_tickval(struct net_device *dev, + int tick_usecs, int timer_usecs) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + int timer[MAX_NPORTS]; + unsigned int port; + int ret; + + /* Grab the other adapter Network Interface current timers and fill in + * the new one for this Network Interface. + */ + for_each_port(adap, port) + if (port == pi->port_id) + timer[port] = timer_usecs; + else + timer[port] = get_dbqtimer(adap->port[port]); + + /* Change the global Tick first ... */ + ret = set_dbqtimer_tick(dev, tick_usecs); + if (ret) + return ret; + + /* ... and then set all of the Network Interface Timer Values ... */ + for_each_port(adap, port) { + ret = set_dbqtimer(adap->port[port], timer[port]); + if (ret) + return ret; + } + + return 0; +} + +static int set_coalesce(struct net_device *dev, + struct ethtool_coalesce *coalesce) { - set_adaptive_rx_setting(dev, c->use_adaptive_rx_coalesce); - return set_rx_intr_params(dev, c->rx_coalesce_usecs, - c->rx_max_coalesced_frames); + int ret; + + set_adaptive_rx_setting(dev, coalesce->use_adaptive_rx_coalesce); + + ret = set_rx_intr_params(dev, coalesce->rx_coalesce_usecs, + coalesce->rx_max_coalesced_frames); + if (ret) + return ret; + + return set_dbqtimer_tickval(dev, + coalesce->tx_coalesce_usecs_irq, + coalesce->tx_coalesce_usecs); } static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) @@ -943,6 +1128,8 @@ static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN_F) ? adap->sge.counter_val[rq->pktcnt_idx] : 0; c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev); + c->tx_coalesce_usecs_irq = get_dbqtimer_tick(dev); + c->tx_coalesce_usecs = get_dbqtimer(dev); return 0; } @@ -1076,7 +1263,7 @@ static int set_flash(struct net_device *netdev, struct ethtool_flash *ef) * firmware image otherwise we'll try to do the entire job from the * host ... and we always "force" the operation in this path. */ - if (adap->flags & FULL_INIT_DONE) + if (adap->flags & CXGB4_FULL_INIT_DONE) mbox = adap->mbox; ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1); @@ -1155,7 +1342,7 @@ static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key, return 0; /* Interface must be brought up atleast once */ - if (pi->adapter->flags & FULL_INIT_DONE) { + if (pi->adapter->flags & CXGB4_FULL_INIT_DONE) { for (i = 0; i < pi->rss_size; i++) pi->rss[i] = p[i]; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c index 6c8a62eefe51..33b2c0c45509 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c @@ -74,7 +74,7 @@ int cxgb_fcoe_enable(struct net_device *netdev) if (is_t4(adap->params.chip)) return -EINVAL; - if (!(adap->flags & FULL_INIT_DONE)) + if (!(adap->flags & CXGB4_FULL_INIT_DONE)) return -EINVAL; dev_info(adap->pdev_dev, "Enabling FCoE offload features\n"); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index 7dddb9e748b8..5afb43000049 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -524,7 +524,7 @@ static int del_filter_wr(struct adapter *adapter, int fidx) return -ENOMEM; fwr = __skb_put(skb, len); - t4_mk_filtdelwr(f->tid, fwr, (adapter->flags & SHUTTING_DOWN) ? -1 + t4_mk_filtdelwr(f->tid, fwr, (adapter->flags & CXGB4_SHUTTING_DOWN) ? -1 : adapter->sge.fw_evtq.abs_id); /* Mark the filter as "pending" and ship off the Filter Work Request. @@ -1569,7 +1569,7 @@ int cxgb4_del_filter(struct net_device *dev, int filter_id, int ret; /* If we are shutting down the adapter do not wait for completion */ - if (netdev2adap(dev)->flags & SHUTTING_DOWN) + if (netdev2adap(dev)->flags & CXGB4_SHUTTING_DOWN) return __cxgb4_del_filter(dev, filter_id, fs, NULL); init_completion(&ctx.completion); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 6ba9099ca7fe..89179e316687 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -433,6 +433,60 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) } /** + * cxgb4_change_mac - Update match filter for a MAC address. + * @pi: the port_info + * @viid: the VI id + * @tcam_idx: TCAM index of existing filter for old value of MAC address, + * or -1 + * @addr: the new MAC address value + * @persist: whether a new MAC allocation should be persistent + * @add_smt: if true also add the address to the HW SMT + * + * Modifies an MPS filter and sets it to the new MAC address if + * @tcam_idx >= 0, or adds the MAC address to a new filter if + * @tcam_idx < 0. In the latter case the address is added persistently + * if @persist is %true. + * Addresses are programmed to hash region, if tcam runs out of entries. + * + */ +static int cxgb4_change_mac(struct port_info *pi, unsigned int viid, + int *tcam_idx, const u8 *addr, bool persist, + u8 *smt_idx) +{ + struct adapter *adapter = pi->adapter; + struct hash_mac_addr *entry, *new_entry; + int ret; + + ret = t4_change_mac(adapter, adapter->mbox, viid, + *tcam_idx, addr, persist, smt_idx); + /* We ran out of TCAM entries. try programming hash region. */ + if (ret == -ENOMEM) { + /* If the MAC address to be updated is in the hash addr + * list, update it from the list + */ + list_for_each_entry(entry, &adapter->mac_hlist, list) { + if (entry->iface_mac) { + ether_addr_copy(entry->addr, addr); + goto set_hash; + } + } + new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL); + if (!new_entry) + return -ENOMEM; + ether_addr_copy(new_entry->addr, addr); + new_entry->iface_mac = true; + list_add_tail(&new_entry->list, &adapter->mac_hlist); +set_hash: + ret = cxgb4_set_addr_hash(pi); + } else if (ret >= 0) { + *tcam_idx = ret; + ret = 0; + } + + return ret; +} + +/* * link_start - enable a port * @dev: the port to enable * @@ -450,15 +504,9 @@ static int link_start(struct net_device *dev) */ ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1, !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true); - if (ret == 0) { - ret = t4_change_mac(pi->adapter, mb, pi->viid, - pi->xact_addr_filt, dev->dev_addr, true, - &pi->smt_idx); - if (ret >= 0) { - pi->xact_addr_filt = ret; - ret = 0; - } - } + if (ret == 0) + ret = cxgb4_change_mac(pi, pi->viid, &pi->xact_addr_filt, + dev->dev_addr, true, &pi->smt_idx); if (ret == 0) ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan, &pi->link_cfg); @@ -527,7 +575,7 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, struct sge_eth_txq *eq; eq = container_of(txq, struct sge_eth_txq, q); - netif_tx_wake_queue(eq->txq); + t4_sge_eth_txq_egress_update(q->adap, eq, -1); } else { struct sge_uld_txq *oq; @@ -603,12 +651,12 @@ out: static void disable_msi(struct adapter *adapter) { - if (adapter->flags & USING_MSIX) { + if (adapter->flags & CXGB4_USING_MSIX) { pci_disable_msix(adapter->pdev); - adapter->flags &= ~USING_MSIX; - } else if (adapter->flags & USING_MSI) { + adapter->flags &= ~CXGB4_USING_MSIX; + } else if (adapter->flags & CXGB4_USING_MSI) { pci_disable_msi(adapter->pdev); - adapter->flags &= ~USING_MSI; + adapter->flags &= ~CXGB4_USING_MSI; } } @@ -624,7 +672,7 @@ static irqreturn_t t4_nondata_intr(int irq, void *cookie) adap->swintr = 1; t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v); } - if (adap->flags & MASTER_PF) + if (adap->flags & CXGB4_MASTER_PF) t4_slow_intr_handler(adap); return IRQ_HANDLED; } @@ -789,9 +837,9 @@ static void quiesce_rx(struct adapter *adap) /* Disable interrupt and napi handler */ static void disable_interrupts(struct adapter *adap) { - if (adap->flags & FULL_INIT_DONE) { + if (adap->flags & CXGB4_FULL_INIT_DONE) { t4_intr_disable(adap); - if (adap->flags & USING_MSIX) { + if (adap->flags & CXGB4_USING_MSIX) { free_msix_queue_irqs(adap); free_irq(adap->msix_info[0].vec, adap); } else { @@ -832,7 +880,7 @@ static int setup_fw_sge_queues(struct adapter *adap) bitmap_zero(s->starving_fl, s->egr_sz); bitmap_zero(s->txq_maperr, s->egr_sz); - if (adap->flags & USING_MSIX) + if (adap->flags & CXGB4_USING_MSIX) adap->msi_idx = 1; /* vector 0 is for non-queue interrupts */ else { err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0, @@ -885,10 +933,13 @@ static int setup_sge_queues(struct adapter *adap) q->rspq.idx = j; memset(&q->stats, 0, sizeof(q->stats)); } - for (j = 0; j < pi->nqsets; j++, t++) { + + q = &s->ethrxq[pi->first_qset]; + for (j = 0; j < pi->nqsets; j++, t++, q++) { err = t4_sge_alloc_eth_txq(adap, t, dev, netdev_get_tx_queue(dev, j), - s->fw_evtq.cntxt_id); + q->rspq.cntxt_id, + !!(adap->flags & CXGB4_SGE_DBQ_TIMER)); if (err) goto freeout; } @@ -910,7 +961,7 @@ static int setup_sge_queues(struct adapter *adap) if (!is_t4(adap->params.chip)) { err = t4_sge_alloc_eth_txq(adap, &s->ptptxq, adap->port[0], netdev_get_tx_queue(adap->port[0], 0) - , s->fw_evtq.cntxt_id); + , s->fw_evtq.cntxt_id, false); if (err) goto freeout; } @@ -2229,7 +2280,7 @@ static int cxgb_up(struct adapter *adap) if (err) goto freeq; - if (adap->flags & USING_MSIX) { + if (adap->flags & CXGB4_USING_MSIX) { name_msix_vecs(adap); err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0, adap->msix_info[0].desc, adap); @@ -2242,7 +2293,8 @@ static int cxgb_up(struct adapter *adap) } } else { err = request_irq(adap->pdev->irq, t4_intr_handler(adap), - (adap->flags & USING_MSI) ? 0 : IRQF_SHARED, + (adap->flags & CXGB4_USING_MSI) ? 0 + : IRQF_SHARED, adap->port[0]->name, adap); if (err) goto irq_err; @@ -2251,7 +2303,7 @@ static int cxgb_up(struct adapter *adap) enable_rx(adap); t4_sge_start(adap); t4_intr_enable(adap); - adap->flags |= FULL_INIT_DONE; + adap->flags |= CXGB4_FULL_INIT_DONE; mutex_unlock(&uld_mutex); notify_ulds(adap, CXGB4_STATE_UP); @@ -2280,7 +2332,7 @@ static void cxgb_down(struct adapter *adapter) t4_sge_stop(adapter); t4_free_sge_resources(adapter); - adapter->flags &= ~FULL_INIT_DONE; + adapter->flags &= ~CXGB4_FULL_INIT_DONE; } /* @@ -2294,7 +2346,7 @@ static int cxgb_open(struct net_device *dev) netif_carrier_off(dev); - if (!(adapter->flags & FULL_INIT_DONE)) { + if (!(adapter->flags & CXGB4_FULL_INIT_DONE)) { err = cxgb_up(adapter); if (err < 0) return err; @@ -2689,6 +2741,7 @@ static int cxgb4_mgmt_get_vf_config(struct net_device *dev, ivi->min_tx_rate = 0; ether_addr_copy(ivi->mac, vfinfo->vf_mac_addr); ivi->vlan = vfinfo->vlan; + ivi->linkstate = vfinfo->link_state; return 0; } @@ -2828,6 +2881,49 @@ static int cxgb4_mgmt_set_vf_vlan(struct net_device *dev, int vf, ret, (vlan ? "setting" : "clearing"), adap->pf, vf); return ret; } + +static int cxgb4_mgmt_set_vf_link_state(struct net_device *dev, int vf, + int link) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + u32 param, val; + int ret = 0; + + if (vf >= adap->num_vfs) + return -EINVAL; + + switch (link) { + case IFLA_VF_LINK_STATE_AUTO: + val = FW_VF_LINK_STATE_AUTO; + break; + + case IFLA_VF_LINK_STATE_ENABLE: + val = FW_VF_LINK_STATE_ENABLE; + break; + + case IFLA_VF_LINK_STATE_DISABLE: + val = FW_VF_LINK_STATE_DISABLE; + break; + + default: + return -EINVAL; + } + + param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_LINK_STATE)); + ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, + ¶m, &val); + if (ret) { + dev_err(adap->pdev_dev, + "Error %d in setting PF %d VF %d link state\n", + ret, adap->pf, vf); + return -EINVAL; + } + + adap->vfinfo[vf].link_state = link; + return ret; +} #endif /* CONFIG_PCI_IOV */ static int cxgb_set_mac_addr(struct net_device *dev, void *p) @@ -2839,9 +2935,8 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - ret = t4_change_mac(pi->adapter, pi->adapter->pf, pi->viid, - pi->xact_addr_filt, addr->sa_data, true, - &pi->smt_idx); + ret = cxgb4_change_mac(pi, pi->viid, &pi->xact_addr_filt, + addr->sa_data, true, &pi->smt_idx); if (ret < 0) return ret; @@ -2856,7 +2951,7 @@ static void cxgb_netpoll(struct net_device *dev) struct port_info *pi = netdev_priv(dev); struct adapter *adap = pi->adapter; - if (adap->flags & USING_MSIX) { + if (adap->flags & CXGB4_USING_MSIX) { int i; struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset]; @@ -2883,7 +2978,7 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate) if (index < 0 || index > pi->nqsets - 1) return -EINVAL; - if (!(adap->flags & FULL_INIT_DONE)) { + if (!(adap->flags & CXGB4_FULL_INIT_DONE)) { dev_err(adap->pdev_dev, "Failed to rate limit on queue %d. Link Down?\n", index); @@ -2984,7 +3079,7 @@ static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data, struct port_info *pi = netdev2pinfo(dev); struct adapter *adap = netdev2adap(dev); - if (!(adap->flags & FULL_INIT_DONE)) { + if (!(adap->flags & CXGB4_FULL_INIT_DONE)) { dev_err(adap->pdev_dev, "Failed to setup tc on port %d. Link Down?\n", pi->port_id); @@ -3244,12 +3339,13 @@ static const struct net_device_ops cxgb4_netdev_ops = { #ifdef CONFIG_PCI_IOV static const struct net_device_ops cxgb4_mgmt_netdev_ops = { - .ndo_open = cxgb4_mgmt_open, - .ndo_set_vf_mac = cxgb4_mgmt_set_vf_mac, - .ndo_get_vf_config = cxgb4_mgmt_get_vf_config, - .ndo_set_vf_rate = cxgb4_mgmt_set_vf_rate, - .ndo_get_phys_port_id = cxgb4_mgmt_get_phys_port_id, - .ndo_set_vf_vlan = cxgb4_mgmt_set_vf_vlan, + .ndo_open = cxgb4_mgmt_open, + .ndo_set_vf_mac = cxgb4_mgmt_set_vf_mac, + .ndo_get_vf_config = cxgb4_mgmt_get_vf_config, + .ndo_set_vf_rate = cxgb4_mgmt_set_vf_rate, + .ndo_get_phys_port_id = cxgb4_mgmt_get_phys_port_id, + .ndo_set_vf_vlan = cxgb4_mgmt_set_vf_vlan, + .ndo_set_vf_link_state = cxgb4_mgmt_set_vf_link_state, }; #endif @@ -4115,7 +4211,7 @@ static int adap_init0(struct adapter *adap) return ret; } if (ret == adap->mbox) - adap->flags |= MASTER_PF; + adap->flags |= CXGB4_MASTER_PF; /* * If we're the Master PF Driver and the device is uninitialized, @@ -4130,7 +4226,7 @@ static int adap_init0(struct adapter *adap) /* If firmware is too old (not supported by driver) force an update. */ if (ret) state = DEV_STATE_UNINIT; - if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) { + if ((adap->flags & CXGB4_MASTER_PF) && state != DEV_STATE_INIT) { struct fw_info *fw_info; struct fw_hdr *card_fw; const struct firmware *fw; @@ -4192,7 +4288,7 @@ static int adap_init0(struct adapter *adap) ret); dev_info(adap->pdev_dev, "Coming up as %s: "\ "Adapter already initialized\n", - adap->flags & MASTER_PF ? "MASTER" : "SLAVE"); + adap->flags & CXGB4_MASTER_PF ? "MASTER" : "SLAVE"); } else { dev_info(adap->pdev_dev, "Coming up as MASTER: "\ "Initializing adapter\n"); @@ -4278,6 +4374,24 @@ static int adap_init0(struct adapter *adap) if (ret < 0) goto bye; + /* Grab the SGE Doorbell Queue Timer values. If successful, that + * indicates that the Firmware and Hardware support this. + */ + params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK)); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, + 1, params, val); + + if (!ret) { + adap->sge.dbqtimer_tick = val[0]; + ret = t4_read_sge_dbqtimers(adap, + ARRAY_SIZE(adap->sge.dbqtimer_val), + adap->sge.dbqtimer_val); + } + + if (!ret) + adap->flags |= CXGB4_SGE_DBQ_TIMER; + if (is_bypass_device(adap->pdev->device)) adap->params.bypass = 1; @@ -4400,7 +4514,7 @@ static int adap_init0(struct adapter *adap) * offload connection through firmware work request */ if ((val[0] != val[1]) && (ret >= 0)) { - adap->flags |= FW_OFLD_CONN; + adap->flags |= CXGB4_FW_OFLD_CONN; adap->tids.aftid_base = val[0]; adap->tids.aftid_end = val[1]; } @@ -4493,7 +4607,7 @@ static int adap_init0(struct adapter *adap) * 2. Server filter: This are special filters which are used * to redirect SYN packets to offload queue. */ - if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) { + if (adap->flags & CXGB4_FW_OFLD_CONN && !is_bypass(adap)) { adap->tids.sftid_base = adap->tids.ftid_base + DIV_ROUND_UP(adap->tids.nftids, 3); adap->tids.nsftids = adap->tids.nftids - @@ -4672,7 +4786,7 @@ static int adap_init0(struct adapter *adap) adap->params.b_wnd); } t4_init_sge_params(adap); - adap->flags |= FW_OK; + adap->flags |= CXGB4_FW_OK; t4_init_tp_params(adap, true); return 0; @@ -4707,7 +4821,7 @@ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev, goto out; rtnl_lock(); - adap->flags &= ~FW_OK; + adap->flags &= ~CXGB4_FW_OK; notify_ulds(adap, CXGB4_STATE_START_RECOVERY); spin_lock(&adap->stats_lock); for_each_port(adap, i) { @@ -4719,12 +4833,12 @@ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev, } spin_unlock(&adap->stats_lock); disable_interrupts(adap); - if (adap->flags & FULL_INIT_DONE) + if (adap->flags & CXGB4_FULL_INIT_DONE) cxgb_down(adap); rtnl_unlock(); - if ((adap->flags & DEV_ENABLED)) { + if ((adap->flags & CXGB4_DEV_ENABLED)) { pci_disable_device(pdev); - adap->flags &= ~DEV_ENABLED; + adap->flags &= ~CXGB4_DEV_ENABLED; } out: return state == pci_channel_io_perm_failure ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; @@ -4742,13 +4856,13 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev) return PCI_ERS_RESULT_RECOVERED; } - if (!(adap->flags & DEV_ENABLED)) { + if (!(adap->flags & CXGB4_DEV_ENABLED)) { if (pci_enable_device(pdev)) { dev_err(&pdev->dev, "Cannot reenable PCI " "device after reset\n"); return PCI_ERS_RESULT_DISCONNECT; } - adap->flags |= DEV_ENABLED; + adap->flags |= CXGB4_DEV_ENABLED; } pci_set_master(pdev); @@ -4759,7 +4873,7 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev) return PCI_ERS_RESULT_DISCONNECT; if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0) return PCI_ERS_RESULT_DISCONNECT; - adap->flags |= FW_OK; + adap->flags |= CXGB4_FW_OK; if (adap_init1(adap, &c)) return PCI_ERS_RESULT_DISCONNECT; @@ -4871,7 +4985,7 @@ static int cfg_queues(struct adapter *adap) * at all is problematic ... */ niqflint = adap->params.pfres.niqflint - 1; - if (!(adap->flags & USING_MSIX)) + if (!(adap->flags & CXGB4_USING_MSIX)) niqflint--; neq = adap->params.pfres.neq / 2; avail_eth_qsets = min(niqflint, neq); @@ -5153,8 +5267,8 @@ static void print_adapter_info(struct adapter *adapter) /* Software/Hardware configuration */ dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n", is_offload(adapter) ? "R" : "", - ((adapter->flags & USING_MSIX) ? "MSI-X" : - (adapter->flags & USING_MSI) ? "MSI" : ""), + ((adapter->flags & CXGB4_USING_MSIX) ? "MSI-X" : + (adapter->flags & CXGB4_USING_MSI) ? "MSI" : ""), is_offload(adapter) ? "Offload" : "non-Offload"); } @@ -5229,13 +5343,13 @@ static void free_some_resources(struct adapter *adapter) kfree(adap2pinfo(adapter, i)->rss); free_netdev(adapter->port[i]); } - if (adapter->flags & FW_OK) + if (adapter->flags & CXGB4_FW_OK) t4_fw_bye(adapter, adapter->pf); } #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \ - NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA) + NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA) #define SEGMENT_SIZE 128 static int t4_get_chip_type(struct adapter *adap, int ver) @@ -5533,7 +5647,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } /* PCI device has been enabled */ - adapter->flags |= DEV_ENABLED; + adapter->flags |= CXGB4_DEV_ENABLED; memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); /* If possible, we use PCIe Relaxed Ordering Attribute to deliver @@ -5551,7 +5665,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) * using Relaxed Ordering. */ if (!pcie_relaxed_ordering_enabled(pdev)) - adapter->flags |= ROOT_NO_RELAXED_ORDERING; + adapter->flags |= CXGB4_ROOT_NO_RELAXED_ORDERING; spin_lock_init(&adapter->stats_lock); spin_lock_init(&adapter->tid_release_lock); @@ -5642,7 +5756,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->hw_features = NETIF_F_SG | TSO_FLAGS | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_RXCSUM | NETIF_F_RXHASH | + NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_GRO | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_TC; @@ -5651,9 +5765,12 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_TSO | NETIF_F_TSO6; - netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; + netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_HW_TLS_RECORD; } if (highdma) @@ -5680,7 +5797,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, adapter); - if (adapter->flags & FW_OK) { + if (adapter->flags & CXGB4_FW_OK) { err = t4_port_init(adapter, func, func, 0); if (err) goto out_free_dev; @@ -5702,7 +5819,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } } - if (!(adapter->flags & FW_OK)) + if (!(adapter->flags & CXGB4_FW_OK)) goto fw_attach_fail; /* Configure queues and allocate tables now, they can be needed as @@ -5796,9 +5913,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* See what interrupts we'll be using */ if (msi > 1 && enable_msix(adapter) == 0) - adapter->flags |= USING_MSIX; + adapter->flags |= CXGB4_USING_MSIX; else if (msi > 0 && pci_enable_msi(pdev) == 0) { - adapter->flags |= USING_MSI; + adapter->flags |= CXGB4_USING_MSI; if (msi > 1) free_msix_info(adapter); } @@ -5866,7 +5983,7 @@ fw_attach_fail: cxgb4_ptp_init(adapter); if (IS_REACHABLE(CONFIG_THERMAL) && - !is_t4(adapter->params.chip) && (adapter->flags & FW_OK)) + !is_t4(adapter->params.chip) && (adapter->flags & CXGB4_FW_OK)) cxgb4_thermal_init(adapter); print_adapter_info(adapter); @@ -5875,7 +5992,7 @@ fw_attach_fail: out_free_dev: t4_free_sge_resources(adapter); free_some_resources(adapter); - if (adapter->flags & USING_MSIX) + if (adapter->flags & CXGB4_USING_MSIX) free_msix_info(adapter); if (adapter->num_uld || adapter->num_ofld_uld) t4_uld_mem_free(adapter); @@ -5908,7 +6025,7 @@ static void remove_one(struct pci_dev *pdev) return; } - adapter->flags |= SHUTTING_DOWN; + adapter->flags |= CXGB4_SHUTTING_DOWN; if (adapter->pf == 4) { int i; @@ -5943,10 +6060,10 @@ static void remove_one(struct pci_dev *pdev) */ clear_all_filters(adapter); - if (adapter->flags & FULL_INIT_DONE) + if (adapter->flags & CXGB4_FULL_INIT_DONE) cxgb_down(adapter); - if (adapter->flags & USING_MSIX) + if (adapter->flags & CXGB4_USING_MSIX) free_msix_info(adapter); if (adapter->num_uld || adapter->num_ofld_uld) t4_uld_mem_free(adapter); @@ -5970,9 +6087,9 @@ static void remove_one(struct pci_dev *pdev) #endif iounmap(adapter->regs); pci_disable_pcie_error_reporting(pdev); - if ((adapter->flags & DEV_ENABLED)) { + if ((adapter->flags & CXGB4_DEV_ENABLED)) { pci_disable_device(pdev); - adapter->flags &= ~DEV_ENABLED; + adapter->flags &= ~CXGB4_DEV_ENABLED; } pci_release_regions(pdev); kfree(adapter->mbox_log); @@ -5998,7 +6115,7 @@ static void shutdown_one(struct pci_dev *pdev) return; } - adapter->flags |= SHUTTING_DOWN; + adapter->flags |= CXGB4_SHUTTING_DOWN; if (adapter->pf == 4) { int i; @@ -6016,7 +6133,7 @@ static void shutdown_one(struct pci_dev *pdev) disable_msi(adapter); t4_sge_stop(adapter); - if (adapter->flags & FW_OK) + if (adapter->flags & CXGB4_FW_OK) t4_fw_bye(adapter, adapter->mbox); } } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index c116f96956fe..82a8d1970060 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -83,28 +83,23 @@ static void cxgb4_process_flow_match(struct net_device *dev, struct tc_cls_flower_offload *cls, struct ch_filter_specification *fs) { + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(cls); u16 addr_type = 0; - if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { - struct flow_dissector_key_control *key = - skb_flow_dissector_target(cls->dissector, - FLOW_DISSECTOR_KEY_CONTROL, - cls->key); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_match_control match; - addr_type = key->addr_type; + flow_rule_match_control(rule, &match); + addr_type = match.key->addr_type; } - if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_BASIC)) { - struct flow_dissector_key_basic *key = - skb_flow_dissector_target(cls->dissector, - FLOW_DISSECTOR_KEY_BASIC, - cls->key); - struct flow_dissector_key_basic *mask = - skb_flow_dissector_target(cls->dissector, - FLOW_DISSECTOR_KEY_BASIC, - cls->mask); - u16 ethtype_key = ntohs(key->n_proto); - u16 ethtype_mask = ntohs(mask->n_proto); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + u16 ethtype_key, ethtype_mask; + + flow_rule_match_basic(rule, &match); + ethtype_key = ntohs(match.key->n_proto); + ethtype_mask = ntohs(match.mask->n_proto); if (ethtype_key == ETH_P_ALL) { ethtype_key = 0; @@ -116,115 +111,89 @@ static void cxgb4_process_flow_match(struct net_device *dev, fs->val.ethtype = ethtype_key; fs->mask.ethtype = ethtype_mask; - fs->val.proto = key->ip_proto; - fs->mask.proto = mask->ip_proto; + fs->val.proto = match.key->ip_proto; + fs->mask.proto = match.mask->ip_proto; } if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { - struct flow_dissector_key_ipv4_addrs *key = - skb_flow_dissector_target(cls->dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS, - cls->key); - struct flow_dissector_key_ipv4_addrs *mask = - skb_flow_dissector_target(cls->dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS, - cls->mask); + struct flow_match_ipv4_addrs match; + + flow_rule_match_ipv4_addrs(rule, &match); fs->type = 0; - memcpy(&fs->val.lip[0], &key->dst, sizeof(key->dst)); - memcpy(&fs->val.fip[0], &key->src, sizeof(key->src)); - memcpy(&fs->mask.lip[0], &mask->dst, sizeof(mask->dst)); - memcpy(&fs->mask.fip[0], &mask->src, sizeof(mask->src)); + memcpy(&fs->val.lip[0], &match.key->dst, sizeof(match.key->dst)); + memcpy(&fs->val.fip[0], &match.key->src, sizeof(match.key->src)); + memcpy(&fs->mask.lip[0], &match.mask->dst, sizeof(match.mask->dst)); + memcpy(&fs->mask.fip[0], &match.mask->src, sizeof(match.mask->src)); /* also initialize nat_lip/fip to same values */ - memcpy(&fs->nat_lip[0], &key->dst, sizeof(key->dst)); - memcpy(&fs->nat_fip[0], &key->src, sizeof(key->src)); - + memcpy(&fs->nat_lip[0], &match.key->dst, sizeof(match.key->dst)); + memcpy(&fs->nat_fip[0], &match.key->src, sizeof(match.key->src)); } if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { - struct flow_dissector_key_ipv6_addrs *key = - skb_flow_dissector_target(cls->dissector, - FLOW_DISSECTOR_KEY_IPV6_ADDRS, - cls->key); - struct flow_dissector_key_ipv6_addrs *mask = - skb_flow_dissector_target(cls->dissector, - FLOW_DISSECTOR_KEY_IPV6_ADDRS, - cls->mask); + struct flow_match_ipv6_addrs match; + flow_rule_match_ipv6_addrs(rule, &match); fs->type = 1; - memcpy(&fs->val.lip[0], key->dst.s6_addr, sizeof(key->dst)); - memcpy(&fs->val.fip[0], key->src.s6_addr, sizeof(key->src)); - memcpy(&fs->mask.lip[0], mask->dst.s6_addr, sizeof(mask->dst)); - memcpy(&fs->mask.fip[0], mask->src.s6_addr, sizeof(mask->src)); + memcpy(&fs->val.lip[0], match.key->dst.s6_addr, + sizeof(match.key->dst)); + memcpy(&fs->val.fip[0], match.key->src.s6_addr, + sizeof(match.key->src)); + memcpy(&fs->mask.lip[0], match.mask->dst.s6_addr, + sizeof(match.mask->dst)); + memcpy(&fs->mask.fip[0], match.mask->src.s6_addr, + sizeof(match.mask->src)); /* also initialize nat_lip/fip to same values */ - memcpy(&fs->nat_lip[0], key->dst.s6_addr, sizeof(key->dst)); - memcpy(&fs->nat_fip[0], key->src.s6_addr, sizeof(key->src)); + memcpy(&fs->nat_lip[0], match.key->dst.s6_addr, + sizeof(match.key->dst)); + memcpy(&fs->nat_fip[0], match.key->src.s6_addr, + sizeof(match.key->src)); } - if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_PORTS)) { - struct flow_dissector_key_ports *key, *mask; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports match; - key = skb_flow_dissector_target(cls->dissector, - FLOW_DISSECTOR_KEY_PORTS, - cls->key); - mask = skb_flow_dissector_target(cls->dissector, - FLOW_DISSECTOR_KEY_PORTS, - cls->mask); - fs->val.lport = cpu_to_be16(key->dst); - fs->mask.lport = cpu_to_be16(mask->dst); - fs->val.fport = cpu_to_be16(key->src); - fs->mask.fport = cpu_to_be16(mask->src); + flow_rule_match_ports(rule, &match); + fs->val.lport = cpu_to_be16(match.key->dst); + fs->mask.lport = cpu_to_be16(match.mask->dst); + fs->val.fport = cpu_to_be16(match.key->src); + fs->mask.fport = cpu_to_be16(match.mask->src); /* also initialize nat_lport/fport to same values */ - fs->nat_lport = cpu_to_be16(key->dst); - fs->nat_fport = cpu_to_be16(key->src); + fs->nat_lport = cpu_to_be16(match.key->dst); + fs->nat_fport = cpu_to_be16(match.key->src); } - if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_IP)) { - struct flow_dissector_key_ip *key, *mask; - - key = skb_flow_dissector_target(cls->dissector, - FLOW_DISSECTOR_KEY_IP, - cls->key); - mask = skb_flow_dissector_target(cls->dissector, - FLOW_DISSECTOR_KEY_IP, - cls->mask); - fs->val.tos = key->tos; - fs->mask.tos = mask->tos; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { + struct flow_match_ip match; + + flow_rule_match_ip(rule, &match); + fs->val.tos = match.key->tos; + fs->mask.tos = match.mask->tos; } - if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { - struct flow_dissector_key_keyid *key, *mask; - - key = skb_flow_dissector_target(cls->dissector, - FLOW_DISSECTOR_KEY_ENC_KEYID, - cls->key); - mask = skb_flow_dissector_target(cls->dissector, - FLOW_DISSECTOR_KEY_ENC_KEYID, - cls->mask); - fs->val.vni = be32_to_cpu(key->keyid); - fs->mask.vni = be32_to_cpu(mask->keyid); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { + struct flow_match_enc_keyid match; + + flow_rule_match_enc_keyid(rule, &match); + fs->val.vni = be32_to_cpu(match.key->keyid); + fs->mask.vni = be32_to_cpu(match.mask->keyid); if (fs->mask.vni) { fs->val.encap_vld = 1; fs->mask.encap_vld = 1; } } - if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_VLAN)) { - struct flow_dissector_key_vlan *key, *mask; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; u16 vlan_tci, vlan_tci_mask; - key = skb_flow_dissector_target(cls->dissector, - FLOW_DISSECTOR_KEY_VLAN, - cls->key); - mask = skb_flow_dissector_target(cls->dissector, - FLOW_DISSECTOR_KEY_VLAN, - cls->mask); - vlan_tci = key->vlan_id | (key->vlan_priority << - VLAN_PRIO_SHIFT); - vlan_tci_mask = mask->vlan_id | (mask->vlan_priority << - VLAN_PRIO_SHIFT); + flow_rule_match_vlan(rule, &match); + vlan_tci = match.key->vlan_id | (match.key->vlan_priority << + VLAN_PRIO_SHIFT); + vlan_tci_mask = match.mask->vlan_id | (match.mask->vlan_priority << + VLAN_PRIO_SHIFT); fs->val.ivlan = vlan_tci; fs->mask.ivlan = vlan_tci_mask; @@ -255,10 +224,12 @@ static void cxgb4_process_flow_match(struct net_device *dev, static int cxgb4_validate_flow_match(struct net_device *dev, struct tc_cls_flower_offload *cls) { + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(cls); + struct flow_dissector *dissector = rule->match.dissector; u16 ethtype_mask = 0; u16 ethtype_key = 0; - if (cls->dissector->used_keys & + if (dissector->used_keys & ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | BIT(FLOW_DISSECTOR_KEY_BASIC) | BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | @@ -268,36 +239,29 @@ static int cxgb4_validate_flow_match(struct net_device *dev, BIT(FLOW_DISSECTOR_KEY_VLAN) | BIT(FLOW_DISSECTOR_KEY_IP))) { netdev_warn(dev, "Unsupported key used: 0x%x\n", - cls->dissector->used_keys); + dissector->used_keys); return -EOPNOTSUPP; } - if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_BASIC)) { - struct flow_dissector_key_basic *key = - skb_flow_dissector_target(cls->dissector, - FLOW_DISSECTOR_KEY_BASIC, - cls->key); - struct flow_dissector_key_basic *mask = - skb_flow_dissector_target(cls->dissector, - FLOW_DISSECTOR_KEY_BASIC, - cls->mask); - ethtype_key = ntohs(key->n_proto); - ethtype_mask = ntohs(mask->n_proto); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + + flow_rule_match_basic(rule, &match); + ethtype_key = ntohs(match.key->n_proto); + ethtype_mask = ntohs(match.mask->n_proto); } - if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_IP)) { + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { u16 eth_ip_type = ethtype_key & ethtype_mask; - struct flow_dissector_key_ip *mask; + struct flow_match_ip match; if (eth_ip_type != ETH_P_IP && eth_ip_type != ETH_P_IPV6) { netdev_err(dev, "IP Key supported only with IPv4/v6"); return -EINVAL; } - mask = skb_flow_dissector_target(cls->dissector, - FLOW_DISSECTOR_KEY_IP, - cls->mask); - if (mask->ttl) { + flow_rule_match_ip(rule, &match); + if (match.mask->ttl) { netdev_warn(dev, "ttl match unsupported for offload"); return -EOPNOTSUPP; } @@ -328,7 +292,7 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val, u32 mask, u32 offset, u8 htype) { switch (htype) { - case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH: + case FLOW_ACT_MANGLE_HDR_TYPE_ETH: switch (offset) { case PEDIT_ETH_DMAC_31_0: fs->newdmac = 1; @@ -346,7 +310,7 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val, offload_pedit(fs, val, mask, ETH_SMAC_47_16); } break; - case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4: + case FLOW_ACT_MANGLE_HDR_TYPE_IP4: switch (offset) { case PEDIT_IP4_SRC: offload_pedit(fs, val, mask, IP4_SRC); @@ -356,7 +320,7 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val, } fs->nat_mode = NAT_MODE_ALL; break; - case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6: + case FLOW_ACT_MANGLE_HDR_TYPE_IP6: switch (offset) { case PEDIT_IP6_SRC_31_0: offload_pedit(fs, val, mask, IP6_SRC_31_0); @@ -384,7 +348,7 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val, } fs->nat_mode = NAT_MODE_ALL; break; - case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP: + case FLOW_ACT_MANGLE_HDR_TYPE_TCP: switch (offset) { case PEDIT_TCP_SPORT_DPORT: if (~mask & PEDIT_TCP_UDP_SPORT_MASK) @@ -397,7 +361,7 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val, } fs->nat_mode = NAT_MODE_ALL; break; - case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP: + case FLOW_ACT_MANGLE_HDR_TYPE_UDP: switch (offset) { case PEDIT_UDP_SPORT_DPORT: if (~mask & PEDIT_TCP_UDP_SPORT_MASK) @@ -416,56 +380,63 @@ static void cxgb4_process_flow_actions(struct net_device *in, struct tc_cls_flower_offload *cls, struct ch_filter_specification *fs) { - const struct tc_action *a; + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(cls); + struct flow_action_entry *act; int i; - tcf_exts_for_each_action(i, a, cls->exts) { - if (is_tcf_gact_ok(a)) { + flow_action_for_each(i, act, &rule->action) { + switch (act->id) { + case FLOW_ACTION_ACCEPT: fs->action = FILTER_PASS; - } else if (is_tcf_gact_shot(a)) { + break; + case FLOW_ACTION_DROP: fs->action = FILTER_DROP; - } else if (is_tcf_mirred_egress_redirect(a)) { - struct net_device *out = tcf_mirred_dev(a); + break; + case FLOW_ACTION_REDIRECT: { + struct net_device *out = act->dev; struct port_info *pi = netdev_priv(out); fs->action = FILTER_SWITCH; fs->eport = pi->port_id; - } else if (is_tcf_vlan(a)) { - u32 vlan_action = tcf_vlan_action(a); - u8 prio = tcf_vlan_push_prio(a); - u16 vid = tcf_vlan_push_vid(a); + } + break; + case FLOW_ACTION_VLAN_POP: + case FLOW_ACTION_VLAN_PUSH: + case FLOW_ACTION_VLAN_MANGLE: { + u8 prio = act->vlan.prio; + u16 vid = act->vlan.vid; u16 vlan_tci = (prio << VLAN_PRIO_SHIFT) | vid; - - switch (vlan_action) { - case TCA_VLAN_ACT_POP: + switch (act->id) { + case FLOW_ACTION_VLAN_POP: fs->newvlan |= VLAN_REMOVE; break; - case TCA_VLAN_ACT_PUSH: + case FLOW_ACTION_VLAN_PUSH: fs->newvlan |= VLAN_INSERT; fs->vlan = vlan_tci; break; - case TCA_VLAN_ACT_MODIFY: + case FLOW_ACTION_VLAN_MANGLE: fs->newvlan |= VLAN_REWRITE; fs->vlan = vlan_tci; break; default: break; } - } else if (is_tcf_pedit(a)) { + } + break; + case FLOW_ACTION_MANGLE: { u32 mask, val, offset; - int nkeys, i; u8 htype; - nkeys = tcf_pedit_nkeys(a); - for (i = 0; i < nkeys; i++) { - htype = tcf_pedit_htype(a, i); - mask = tcf_pedit_mask(a, i); - val = tcf_pedit_val(a, i); - offset = tcf_pedit_offset(a, i); + htype = act->mangle.htype; + mask = act->mangle.mask; + val = act->mangle.val; + offset = act->mangle.offset; - process_pedit_field(fs, val, mask, offset, - htype); + process_pedit_field(fs, val, mask, offset, htype); } + break; + default: + break; } } } @@ -484,101 +455,89 @@ static bool valid_l4_mask(u32 mask) } static bool valid_pedit_action(struct net_device *dev, - const struct tc_action *a) + const struct flow_action_entry *act) { u32 mask, offset; - u8 cmd, htype; - int nkeys, i; - - nkeys = tcf_pedit_nkeys(a); - for (i = 0; i < nkeys; i++) { - htype = tcf_pedit_htype(a, i); - cmd = tcf_pedit_cmd(a, i); - mask = tcf_pedit_mask(a, i); - offset = tcf_pedit_offset(a, i); - - if (cmd != TCA_PEDIT_KEY_EX_CMD_SET) { - netdev_err(dev, "%s: Unsupported pedit cmd\n", + u8 htype; + + htype = act->mangle.htype; + mask = act->mangle.mask; + offset = act->mangle.offset; + + switch (htype) { + case FLOW_ACT_MANGLE_HDR_TYPE_ETH: + switch (offset) { + case PEDIT_ETH_DMAC_31_0: + case PEDIT_ETH_DMAC_47_32_SMAC_15_0: + case PEDIT_ETH_SMAC_47_16: + break; + default: + netdev_err(dev, "%s: Unsupported pedit field\n", __func__); return false; } - - switch (htype) { - case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH: - switch (offset) { - case PEDIT_ETH_DMAC_31_0: - case PEDIT_ETH_DMAC_47_32_SMAC_15_0: - case PEDIT_ETH_SMAC_47_16: - break; - default: - netdev_err(dev, "%s: Unsupported pedit field\n", - __func__); - return false; - } - break; - case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4: - switch (offset) { - case PEDIT_IP4_SRC: - case PEDIT_IP4_DST: - break; - default: - netdev_err(dev, "%s: Unsupported pedit field\n", - __func__); - return false; - } + break; + case FLOW_ACT_MANGLE_HDR_TYPE_IP4: + switch (offset) { + case PEDIT_IP4_SRC: + case PEDIT_IP4_DST: break; - case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6: - switch (offset) { - case PEDIT_IP6_SRC_31_0: - case PEDIT_IP6_SRC_63_32: - case PEDIT_IP6_SRC_95_64: - case PEDIT_IP6_SRC_127_96: - case PEDIT_IP6_DST_31_0: - case PEDIT_IP6_DST_63_32: - case PEDIT_IP6_DST_95_64: - case PEDIT_IP6_DST_127_96: - break; - default: - netdev_err(dev, "%s: Unsupported pedit field\n", - __func__); - return false; - } + default: + netdev_err(dev, "%s: Unsupported pedit field\n", + __func__); + return false; + } + break; + case FLOW_ACT_MANGLE_HDR_TYPE_IP6: + switch (offset) { + case PEDIT_IP6_SRC_31_0: + case PEDIT_IP6_SRC_63_32: + case PEDIT_IP6_SRC_95_64: + case PEDIT_IP6_SRC_127_96: + case PEDIT_IP6_DST_31_0: + case PEDIT_IP6_DST_63_32: + case PEDIT_IP6_DST_95_64: + case PEDIT_IP6_DST_127_96: break; - case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP: - switch (offset) { - case PEDIT_TCP_SPORT_DPORT: - if (!valid_l4_mask(~mask)) { - netdev_err(dev, "%s: Unsupported mask for TCP L4 ports\n", - __func__); - return false; - } - break; - default: - netdev_err(dev, "%s: Unsupported pedit field\n", + default: + netdev_err(dev, "%s: Unsupported pedit field\n", + __func__); + return false; + } + break; + case FLOW_ACT_MANGLE_HDR_TYPE_TCP: + switch (offset) { + case PEDIT_TCP_SPORT_DPORT: + if (!valid_l4_mask(~mask)) { + netdev_err(dev, "%s: Unsupported mask for TCP L4 ports\n", __func__); return false; } break; - case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP: - switch (offset) { - case PEDIT_UDP_SPORT_DPORT: - if (!valid_l4_mask(~mask)) { - netdev_err(dev, "%s: Unsupported mask for UDP L4 ports\n", - __func__); - return false; - } - break; - default: - netdev_err(dev, "%s: Unsupported pedit field\n", + default: + netdev_err(dev, "%s: Unsupported pedit field\n", + __func__); + return false; + } + break; + case FLOW_ACT_MANGLE_HDR_TYPE_UDP: + switch (offset) { + case PEDIT_UDP_SPORT_DPORT: + if (!valid_l4_mask(~mask)) { + netdev_err(dev, "%s: Unsupported mask for UDP L4 ports\n", __func__); return false; } break; default: - netdev_err(dev, "%s: Unsupported pedit type\n", + netdev_err(dev, "%s: Unsupported pedit field\n", __func__); return false; } + break; + default: + netdev_err(dev, "%s: Unsupported pedit type\n", __func__); + return false; } return true; } @@ -586,24 +545,26 @@ static bool valid_pedit_action(struct net_device *dev, static int cxgb4_validate_flow_actions(struct net_device *dev, struct tc_cls_flower_offload *cls) { - const struct tc_action *a; + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(cls); + struct flow_action_entry *act; bool act_redir = false; bool act_pedit = false; bool act_vlan = false; int i; - tcf_exts_for_each_action(i, a, cls->exts) { - if (is_tcf_gact_ok(a)) { - /* Do nothing */ - } else if (is_tcf_gact_shot(a)) { + flow_action_for_each(i, act, &rule->action) { + switch (act->id) { + case FLOW_ACTION_ACCEPT: + case FLOW_ACTION_DROP: /* Do nothing */ - } else if (is_tcf_mirred_egress_redirect(a)) { + break; + case FLOW_ACTION_REDIRECT: { struct adapter *adap = netdev2adap(dev); struct net_device *n_dev, *target_dev; unsigned int i; bool found = false; - target_dev = tcf_mirred_dev(a); + target_dev = act->dev; for_each_port(adap, i) { n_dev = adap->port[i]; if (target_dev == n_dev) { @@ -621,15 +582,18 @@ static int cxgb4_validate_flow_actions(struct net_device *dev, return -EINVAL; } act_redir = true; - } else if (is_tcf_vlan(a)) { - u16 proto = be16_to_cpu(tcf_vlan_push_proto(a)); - u32 vlan_action = tcf_vlan_action(a); + } + break; + case FLOW_ACTION_VLAN_POP: + case FLOW_ACTION_VLAN_PUSH: + case FLOW_ACTION_VLAN_MANGLE: { + u16 proto = be16_to_cpu(act->vlan.proto); - switch (vlan_action) { - case TCA_VLAN_ACT_POP: + switch (act->id) { + case FLOW_ACTION_VLAN_POP: break; - case TCA_VLAN_ACT_PUSH: - case TCA_VLAN_ACT_MODIFY: + case FLOW_ACTION_VLAN_PUSH: + case FLOW_ACTION_VLAN_MANGLE: if (proto != ETH_P_8021Q) { netdev_err(dev, "%s: Unsupported vlan proto\n", __func__); @@ -642,13 +606,17 @@ static int cxgb4_validate_flow_actions(struct net_device *dev, return -EOPNOTSUPP; } act_vlan = true; - } else if (is_tcf_pedit(a)) { - bool pedit_valid = valid_pedit_action(dev, a); + } + break; + case FLOW_ACTION_MANGLE: { + bool pedit_valid = valid_pedit_action(dev, act); if (!pedit_valid) return -EOPNOTSUPP; act_pedit = true; - } else { + } + break; + default: netdev_err(dev, "%s: Unsupported action\n", __func__); return -EOPNOTSUPP; } @@ -843,9 +811,9 @@ int cxgb4_tc_flower_stats(struct net_device *dev, if (ofld_stats->packet_count != packets) { if (ofld_stats->prev_packet_count != packets) ofld_stats->last_used = jiffies; - tcf_exts_stats_update(cls->exts, bytes - ofld_stats->byte_count, - packets - ofld_stats->packet_count, - ofld_stats->last_used); + flow_stats_update(&cls->stats, bytes - ofld_stats->byte_count, + packets - ofld_stats->packet_count, + ofld_stats->last_used); ofld_stats->packet_count = packets; ofld_stats->byte_count = bytes; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c index c7d2b4dc7568..02fc63fa7f25 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c @@ -444,8 +444,7 @@ struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap) if (!max_tids) return NULL; - t = kvzalloc(sizeof(*t) + - (max_tids * sizeof(struct cxgb4_link)), GFP_KERNEL); + t = kvzalloc(struct_size(t, table, max_tids), GFP_KERNEL); if (!t) return NULL; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c index c041f44324db..6c685b920713 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c @@ -147,7 +147,7 @@ static int alloc_uld_rxqs(struct adapter *adap, per_chan = rxq_info->nrxq / adap->params.nports; - if (adap->flags & USING_MSIX) + if (adap->flags & CXGB4_USING_MSIX) msi_idx = 1; else msi_idx = -((int)s->intrq.abs_id + 1); @@ -195,7 +195,7 @@ setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro) struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; int i, ret = 0; - if (adap->flags & USING_MSIX) { + if (adap->flags & CXGB4_USING_MSIX) { rxq_info->msix_tbl = kcalloc((rxq_info->nrxq + rxq_info->nciq), sizeof(unsigned short), GFP_KERNEL); @@ -206,7 +206,7 @@ setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro) ret = !(!alloc_uld_rxqs(adap, rxq_info, lro)); /* Tell uP to route control queue completions to rdma rspq */ - if (adap->flags & FULL_INIT_DONE && + if (adap->flags & CXGB4_FULL_INIT_DONE && !ret && uld_type == CXGB4_ULD_RDMA) { struct sge *s = &adap->sge; unsigned int cmplqid; @@ -239,7 +239,7 @@ static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type) { struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; - if (adap->flags & FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) { + if (adap->flags & CXGB4_FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) { struct sge *s = &adap->sge; u32 param, cmdop, cmplqid = 0; int i; @@ -258,7 +258,7 @@ static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type) t4_free_uld_rxqs(adap, rxq_info->nciq, rxq_info->uldrxq + rxq_info->nrxq); t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq); - if (adap->flags & USING_MSIX) + if (adap->flags & CXGB4_USING_MSIX) kfree(rxq_info->msix_tbl); } @@ -273,7 +273,7 @@ static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type, if (!rxq_info) return -ENOMEM; - if (adap->flags & USING_MSIX && uld_info->nrxq > s->nqs_per_uld) { + if (adap->flags & CXGB4_USING_MSIX && uld_info->nrxq > s->nqs_per_uld) { i = s->nqs_per_uld; rxq_info->nrxq = roundup(i, adap->params.nports); } else { @@ -284,7 +284,7 @@ static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type, if (!uld_info->ciq) { rxq_info->nciq = 0; } else { - if (adap->flags & USING_MSIX) + if (adap->flags & CXGB4_USING_MSIX) rxq_info->nciq = min_t(int, s->nqs_per_uld, num_online_cpus()); else @@ -611,10 +611,10 @@ static void cxgb4_shutdown_uld_adapter(struct adapter *adap, enum cxgb4_uld type adap->uld[type].add = NULL; release_sge_txq_uld(adap, type); - if (adap->flags & FULL_INIT_DONE) + if (adap->flags & CXGB4_FULL_INIT_DONE) quiesce_rx_uld(adap, type); - if (adap->flags & USING_MSIX) + if (adap->flags & CXGB4_USING_MSIX) free_msix_queue_irqs_uld(adap, type); free_sge_queues_uld(adap, type); @@ -660,6 +660,7 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld) lld->cclk_ps = 1000000000 / adap->params.vpd.cclk; lld->udb_density = 1 << adap->params.sge.eq_qpp; lld->ucq_density = 1 << adap->params.sge.iq_qpp; + lld->sge_host_page_size = 1 << (adap->params.sge.hps + 10); lld->filt_mode = adap->params.tp.vlan_pri_map; /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ for (i = 0; i < NCHAN; i++) @@ -672,7 +673,7 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld) lld->sge_egrstatuspagesize = adap->sge.stat_len; lld->sge_pktshift = adap->sge.pktshift; lld->ulp_crypto = adap->params.crypto; - lld->enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN; + lld->enable_fw_ofld_conn = adap->flags & CXGB4_FW_OFLD_CONN; lld->max_ordird_qp = adap->params.max_ordird_qp; lld->max_ird_adapter = adap->params.max_ird_adapter; lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl; @@ -701,7 +702,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld) adap->uld[uld].handle = handle; t4_register_netevent_notifier(); - if (adap->flags & FULL_INIT_DONE) + if (adap->flags & CXGB4_FULL_INIT_DONE) adap->uld[uld].state_change(handle, CXGB4_STATE_UP); } @@ -736,13 +737,13 @@ void cxgb4_register_uld(enum cxgb4_uld type, ret = setup_sge_queues_uld(adap, type, p->lro); if (ret) goto free_queues; - if (adap->flags & USING_MSIX) { + if (adap->flags & CXGB4_USING_MSIX) { name_msix_vecs_uld(adap, type); ret = request_msix_queue_irqs_uld(adap, type); if (ret) goto free_rxq; } - if (adap->flags & FULL_INIT_DONE) + if (adap->flags & CXGB4_FULL_INIT_DONE) enable_rx_uld(adap, type); if (adap->uld[type].add) goto free_irq; @@ -753,9 +754,9 @@ void cxgb4_register_uld(enum cxgb4_uld type, uld_attach(adap, type); continue; free_irq: - if (adap->flags & FULL_INIT_DONE) + if (adap->flags & CXGB4_FULL_INIT_DONE) quiesce_rx_uld(adap, type); - if (adap->flags & USING_MSIX) + if (adap->flags & CXGB4_USING_MSIX) free_msix_queue_irqs_uld(adap, type); free_rxq: free_sge_queues_uld(adap, type); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index 5fa9a2d5fc4b..21da34a4ca24 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -336,6 +336,7 @@ struct cxgb4_lld_info { unsigned int cclk_ps; /* Core clock period in psec */ unsigned short udb_density; /* # of user DB/page */ unsigned short ucq_density; /* # of user CQs/page */ + unsigned int sge_host_page_size; /* SGE host page size */ unsigned short filt_mode; /* filter optional components */ unsigned short tx_modq[NCHAN]; /* maps each tx channel to a */ /* scheduler queue */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index 4852febbfec3..1a407d3c1d67 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -646,7 +646,7 @@ struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end) if (l2t_size < L2T_MIN_HASH_BUCKETS) return NULL; - d = kvzalloc(sizeof(*d) + l2t_size * sizeof(struct l2t_entry), GFP_KERNEL); + d = kvzalloc(struct_size(d, l2tab, l2t_size), GFP_KERNEL); if (!d) return NULL; diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c index 52edb688942b..ba6c153ee45c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sched.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c @@ -478,7 +478,7 @@ struct sched_table *t4_init_sched(unsigned int sched_size) struct sched_table *s; unsigned int i; - s = kvzalloc(sizeof(*s) + sched_size * sizeof(struct sched_class), GFP_KERNEL); + s = kvzalloc(struct_size(s, tab, sched_size), GFP_KERNEL); if (!s) return NULL; diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index fc0bc6458e84..88773ca58e6b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -80,9 +80,10 @@ * Max number of Tx descriptors we clean up at a time. Should be modest as * freeing skbs isn't cheap and it happens while holding locks. We just need * to free packets faster than they arrive, we eventually catch up and keep - * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES. + * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES. It should + * also match the CIDX Flush Threshold. */ -#define MAX_TX_RECLAIM 16 +#define MAX_TX_RECLAIM 32 /* * Max number of Rx buffers we replenish at a time. Again keep this modest, @@ -401,31 +402,52 @@ static inline int reclaimable(const struct sge_txq *q) } /** - * cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors + * reclaim_completed_tx - reclaims completed TX Descriptors * @adap: the adapter * @q: the Tx queue to reclaim completed descriptors from + * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1 * @unmap: whether the buffers should be unmapped for DMA * - * Reclaims Tx descriptors that the SGE has indicated it has processed, - * and frees the associated buffers if possible. Called with the Tx - * queue locked. + * Reclaims Tx Descriptors that the SGE has indicated it has processed, + * and frees the associated buffers if possible. If @max == -1, then + * we'll use a defaiult maximum. Called with the TX Queue locked. */ -inline void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, - bool unmap) +static inline int reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, + int maxreclaim, bool unmap) { - int avail = reclaimable(q); + int reclaim = reclaimable(q); - if (avail) { + if (reclaim) { /* * Limit the amount of clean up work we do at a time to keep * the Tx lock hold time O(1). */ - if (avail > MAX_TX_RECLAIM) - avail = MAX_TX_RECLAIM; + if (maxreclaim < 0) + maxreclaim = MAX_TX_RECLAIM; + if (reclaim > maxreclaim) + reclaim = maxreclaim; - free_tx_desc(adap, q, avail, unmap); - q->in_use -= avail; + free_tx_desc(adap, q, reclaim, unmap); + q->in_use -= reclaim; } + + return reclaim; +} + +/** + * cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors + * @adap: the adapter + * @q: the Tx queue to reclaim completed descriptors from + * @unmap: whether the buffers should be unmapped for DMA + * + * Reclaims Tx descriptors that the SGE has indicated it has processed, + * and frees the associated buffers if possible. Called with the Tx + * queue locked. + */ +void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, + bool unmap) +{ + (void)reclaim_completed_tx(adap, q, -1, unmap); } EXPORT_SYMBOL(cxgb4_reclaim_completed_tx); @@ -1287,6 +1309,44 @@ static inline void t6_fill_tnl_lso(struct sk_buff *skb, tnl_lso->EthLenOffset_Size = htonl(CPL_TX_TNL_LSO_SIZE_V(skb->len)); } +/** + * t4_sge_eth_txq_egress_update - handle Ethernet TX Queue update + * @adap: the adapter + * @eq: the Ethernet TX Queue + * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1 + * + * We're typically called here to update the state of an Ethernet TX + * Queue with respect to the hardware's progress in consuming the TX + * Work Requests that we've put on that Egress Queue. This happens + * when we get Egress Queue Update messages and also prophylactically + * in regular timer-based Ethernet TX Queue maintenance. + */ +int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq, + int maxreclaim) +{ + struct sge_txq *q = &eq->q; + unsigned int reclaimed; + + if (!q->in_use || !__netif_tx_trylock(eq->txq)) + return 0; + + /* Reclaim pending completed TX Descriptors. */ + reclaimed = reclaim_completed_tx(adap, &eq->q, maxreclaim, true); + + /* If the TX Queue is currently stopped and there's now more than half + * the queue available, restart it. Otherwise bail out since the rest + * of what we want do here is with the possibility of shipping any + * currently buffered Coalesced TX Work Request. + */ + if (netif_tx_queue_stopped(eq->txq) && txq_avail(q) > (q->size / 2)) { + netif_tx_wake_queue(eq->txq); + eq->q.restarts++; + } + + __netif_tx_unlock(eq->txq); + return reclaimed; +} + /** * cxgb4_eth_xmit - add a packet to an Ethernet Tx queue * @skb: the packet @@ -1357,7 +1417,7 @@ out_free: dev_kfree_skb_any(skb); } skb_tx_timestamp(skb); - cxgb4_reclaim_completed_tx(adap, &q->q, true); + reclaim_completed_tx(adap, &q->q, -1, true); cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F; #ifdef CONFIG_CHELSIO_T4_FCOE @@ -1400,8 +1460,25 @@ out_free: dev_kfree_skb_any(skb); wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2)); if (unlikely(credits < ETHTXQ_STOP_THRES)) { + /* After we're done injecting the Work Request for this + * packet, we'll be below our "stop threshold" so stop the TX + * Queue now and schedule a request for an SGE Egress Queue + * Update message. The queue will get started later on when + * the firmware processes this Work Request and sends us an + * Egress Queue Status Update message indicating that space + * has opened up. + */ eth_txq_stop(q); - wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; + + /* If we're using the SGE Doorbell Queue Timer facility, we + * don't need to ask the Firmware to send us Egress Queue CIDX + * Updates: the Hardware will do this automatically. And + * since we send the Ingress Queue CIDX Updates to the + * corresponding Ethernet Response Queue, we'll get them very + * quickly. + */ + if (!q->dbqt) + wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; } wr = (void *)&q->q.desc[q->q.pidx]; @@ -1671,7 +1748,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb, /* Take this opportunity to reclaim any TX Descriptors whose DMA * transfers have completed. */ - cxgb4_reclaim_completed_tx(adapter, &txq->q, true); + reclaim_completed_tx(adapter, &txq->q, -1, true); /* Calculate the number of flits and TX Descriptors we're going to * need along with how many TX Descriptors will be left over after @@ -1715,7 +1792,16 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb, * has opened up. */ eth_txq_stop(txq); - wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; + + /* If we're using the SGE Doorbell Queue Timer facility, we + * don't need to ask the Firmware to send us Egress Queue CIDX + * Updates: the Hardware will do this automatically. And + * since we send the Ingress Queue CIDX Updates to the + * corresponding Ethernet Response Queue, we'll get them very + * quickly. + */ + if (!txq->dbqt) + wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; } /* Start filling in our Work Request. Note that we do _not_ handle @@ -2793,6 +2879,74 @@ static int t4_tx_hststamp(struct adapter *adapter, struct sk_buff *skb, return 1; } +/** + * t4_tx_completion_handler - handle CPL_SGE_EGR_UPDATE messages + * @rspq: Ethernet RX Response Queue associated with Ethernet TX Queue + * @rsp: Response Entry pointer into Response Queue + * @gl: Gather List pointer + * + * For adapters which support the SGE Doorbell Queue Timer facility, + * we configure the Ethernet TX Queues to send CIDX Updates to the + * Associated Ethernet RX Response Queue with CPL_SGE_EGR_UPDATE + * messages. This adds a small load to PCIe Link RX bandwidth and, + * potentially, higher CPU Interrupt load, but allows us to respond + * much more quickly to the CIDX Updates. This is important for + * Upper Layer Software which isn't willing to have a large amount + * of TX Data outstanding before receiving DMA Completions. + */ +static void t4_tx_completion_handler(struct sge_rspq *rspq, + const __be64 *rsp, + const struct pkt_gl *gl) +{ + u8 opcode = ((const struct rss_header *)rsp)->opcode; + struct port_info *pi = netdev_priv(rspq->netdev); + struct adapter *adapter = rspq->adap; + struct sge *s = &adapter->sge; + struct sge_eth_txq *txq; + + /* skip RSS header */ + rsp++; + + /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG. + */ + if (unlikely(opcode == CPL_FW4_MSG && + ((const struct cpl_fw4_msg *)rsp)->type == + FW_TYPE_RSSCPL)) { + rsp++; + opcode = ((const struct rss_header *)rsp)->opcode; + rsp++; + } + + if (unlikely(opcode != CPL_SGE_EGR_UPDATE)) { + pr_info("%s: unexpected FW4/CPL %#x on Rx queue\n", + __func__, opcode); + return; + } + + txq = &s->ethtxq[pi->first_qset + rspq->idx]; + + /* We've got the Hardware Consumer Index Update in the Egress Update + * message. If we're using the SGE Doorbell Queue Timer mechanism, + * these Egress Update messages will be our sole CIDX Updates we get + * since we don't want to chew up PCIe bandwidth for both Ingress + * Messages and Status Page writes. However, The code which manages + * reclaiming successfully DMA'ed TX Work Requests uses the CIDX value + * stored in the Status Page at the end of the TX Queue. It's easiest + * to simply copy the CIDX Update value from the Egress Update message + * to the Status Page. Also note that no Endian issues need to be + * considered here since both are Big Endian and we're just copying + * bytes consistently ... + */ + if (txq->dbqt) { + struct cpl_sge_egr_update *egr; + + egr = (struct cpl_sge_egr_update *)rsp; + WRITE_ONCE(txq->q.stat->cidx, egr->cidx); + } + + t4_sge_eth_txq_egress_update(adapter, txq, -1); +} + /** * t4_ethrx_handler - process an ingress ethernet packet * @q: the response queue that received the packet @@ -2816,6 +2970,15 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, struct port_info *pi; int ret = 0; + /* If we're looking at TX Queue CIDX Update, handle that separately + * and return. + */ + if (unlikely((*(u8 *)rsp == CPL_FW4_MSG) || + (*(u8 *)rsp == CPL_SGE_EGR_UPDATE))) { + t4_tx_completion_handler(q, rsp, si); + return 0; + } + if (unlikely(*(u8 *)rsp == cpl_trace_pkt)) return handle_trace_pkt(q->adap, si); @@ -3212,7 +3375,7 @@ static irqreturn_t t4_intr_msi(int irq, void *cookie) { struct adapter *adap = cookie; - if (adap->flags & MASTER_PF) + if (adap->flags & CXGB4_MASTER_PF) t4_slow_intr_handler(adap); process_intrq(adap); return IRQ_HANDLED; @@ -3228,7 +3391,7 @@ static irqreturn_t t4_intr_intx(int irq, void *cookie) struct adapter *adap = cookie; t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0); - if (((adap->flags & MASTER_PF) && t4_slow_intr_handler(adap)) | + if (((adap->flags & CXGB4_MASTER_PF) && t4_slow_intr_handler(adap)) | process_intrq(adap)) return IRQ_HANDLED; return IRQ_NONE; /* probably shared interrupt */ @@ -3243,9 +3406,9 @@ static irqreturn_t t4_intr_intx(int irq, void *cookie) */ irq_handler_t t4_intr_handler(struct adapter *adap) { - if (adap->flags & USING_MSIX) + if (adap->flags & CXGB4_USING_MSIX) return t4_sge_intr_msix; - if (adap->flags & USING_MSI) + if (adap->flags & CXGB4_USING_MSI) return t4_intr_msi; return t4_intr_intx; } @@ -3278,7 +3441,7 @@ static void sge_rx_timer_cb(struct timer_list *t) * global Master PF activities like checking for chip ingress stalls, * etc. */ - if (!(adap->flags & MASTER_PF)) + if (!(adap->flags & CXGB4_MASTER_PF)) goto done; t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD); @@ -3289,10 +3452,10 @@ done: static void sge_tx_timer_cb(struct timer_list *t) { - unsigned long m; - unsigned int i, budget; struct adapter *adap = from_timer(adap, t, sge.tx_timer); struct sge *s = &adap->sge; + unsigned long m, period; + unsigned int i, budget; for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) for (m = s->txq_maperr[i]; m; m &= m - 1) { @@ -3320,29 +3483,29 @@ static void sge_tx_timer_cb(struct timer_list *t) budget = MAX_TIMER_TX_RECLAIM; i = s->ethtxq_rover; do { - struct sge_eth_txq *q = &s->ethtxq[i]; - - if (q->q.in_use && - time_after_eq(jiffies, q->txq->trans_start + HZ / 100) && - __netif_tx_trylock(q->txq)) { - int avail = reclaimable(&q->q); - - if (avail) { - if (avail > budget) - avail = budget; - - free_tx_desc(adap, &q->q, avail, true); - q->q.in_use -= avail; - budget -= avail; - } - __netif_tx_unlock(q->txq); - } + budget -= t4_sge_eth_txq_egress_update(adap, &s->ethtxq[i], + budget); + if (!budget) + break; if (++i >= s->ethqsets) i = 0; - } while (budget && i != s->ethtxq_rover); + } while (i != s->ethtxq_rover); s->ethtxq_rover = i; - mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2)); + + if (budget == 0) { + /* If we found too many reclaimable packets schedule a timer + * in the near future to continue where we left off. + */ + period = 2; + } else { + /* We reclaimed all reclaimable TX Descriptors, so reschedule + * at the normal period. + */ + period = TX_QCHECK_PERIOD; + } + + mod_timer(&s->tx_timer, jiffies + period); } /** @@ -3386,7 +3549,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, struct fw_iq_cmd c; struct sge *s = &adap->sge; struct port_info *pi = netdev_priv(dev); - int relaxed = !(adap->flags & ROOT_NO_RELAXED_ORDERING); + int relaxed = !(adap->flags & CXGB4_ROOT_NO_RELAXED_ORDERING); /* Size needs to be multiple of 16, including status entry. */ iq->size = roundup(iq->size, 16); @@ -3421,7 +3584,8 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, : FW_IQ_IQTYPE_OFLD)); if (fl) { - enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip); + unsigned int chip_ver = + CHELSIO_CHIP_VERSION(adap->params.chip); /* Allocate the ring for the hardware free list (with space * for its status page) along with the associated software @@ -3459,10 +3623,10 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, * the smaller 64-byte value there). */ c.fl0dcaen_to_fl0cidxfthresh = - htons(FW_IQ_CMD_FL0FBMIN_V(chip <= CHELSIO_T5 ? + htons(FW_IQ_CMD_FL0FBMIN_V(chip_ver <= CHELSIO_T5 ? FETCHBURSTMIN_128B_X : - FETCHBURSTMIN_64B_X) | - FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ? + FETCHBURSTMIN_64B_T6_X) | + FW_IQ_CMD_FL0FBMAX_V((chip_ver <= CHELSIO_T5) ? FETCHBURSTMAX_512B_X : FETCHBURSTMAX_256B_X)); c.fl0size = htons(flsz); @@ -3584,14 +3748,24 @@ static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) adap->sge.egr_map[id - adap->sge.egr_start] = q; } +/** + * t4_sge_alloc_eth_txq - allocate an Ethernet TX Queue + * @adap: the adapter + * @txq: the SGE Ethernet TX Queue to initialize + * @dev: the Linux Network Device + * @netdevq: the corresponding Linux TX Queue + * @iqid: the Ingress Queue to which to deliver CIDX Update messages + * @dbqt: whether this TX Queue will use the SGE Doorbell Queue Timers + */ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, struct net_device *dev, struct netdev_queue *netdevq, - unsigned int iqid) + unsigned int iqid, u8 dbqt) { - int ret, nentries; - struct fw_eq_eth_cmd c; - struct sge *s = &adap->sge; + unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); struct port_info *pi = netdev_priv(dev); + struct sge *s = &adap->sge; + struct fw_eq_eth_cmd c; + int ret, nentries; /* Add status entries */ nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); @@ -3610,19 +3784,47 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, FW_EQ_ETH_CMD_VFN_V(0)); c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F | FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c)); - c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F | - FW_EQ_ETH_CMD_VIID_V(pi->viid)); + + /* For TX Ethernet Queues using the SGE Doorbell Queue Timer + * mechanism, we use Ingress Queue messages for Hardware Consumer + * Index Updates on the TX Queue. Otherwise we have the Hardware + * write the CIDX Updates into the Status Page at the end of the + * TX Queue. + */ + c.autoequiqe_to_viid = htonl((dbqt + ? FW_EQ_ETH_CMD_AUTOEQUIQE_F + : FW_EQ_ETH_CMD_AUTOEQUEQE_F) | + FW_EQ_ETH_CMD_VIID_V(pi->viid)); + c.fetchszm_to_iqid = - htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) | + htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(dbqt + ? HOSTFCMODE_INGRESS_QUEUE_X + : HOSTFCMODE_STATUS_PAGE_X) | FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) | FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid)); + + /* Note that the CIDX Flush Threshold should match MAX_TX_RECLAIM. */ c.dcaen_to_eqsize = - htonl(FW_EQ_ETH_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) | + htonl(FW_EQ_ETH_CMD_FBMIN_V(chip_ver <= CHELSIO_T5 + ? FETCHBURSTMIN_64B_X + : FETCHBURSTMIN_64B_T6_X) | FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) | FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) | FW_EQ_ETH_CMD_EQSIZE_V(nentries)); + c.eqaddr = cpu_to_be64(txq->q.phys_addr); + /* If we're using the SGE Doorbell Queue Timer mechanism, pass in the + * currently configured Timer Index. THis can be changed later via an + * ethtool -C tx-usecs {Timer Val} command. Note that the SGE + * Doorbell Queue mode is currently automatically enabled in the + * Firmware by setting either AUTOEQUEQE or AUTOEQUIQE ... + */ + if (dbqt) + c.timeren_timerix = + cpu_to_be32(FW_EQ_ETH_CMD_TIMEREN_F | + FW_EQ_ETH_CMD_TIMERIX_V(txq->dbqtimerix)); + ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); if (ret) { kfree(txq->q.sdesc); @@ -3639,6 +3841,8 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, txq->txq = netdevq; txq->tso = txq->tx_cso = txq->vlan_ins = 0; txq->mapping_err = 0; + txq->dbqt = dbqt; + return 0; } @@ -3646,10 +3850,11 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, struct net_device *dev, unsigned int iqid, unsigned int cmplqid) { - int ret, nentries; - struct fw_eq_ctrl_cmd c; - struct sge *s = &adap->sge; + unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); struct port_info *pi = netdev_priv(dev); + struct sge *s = &adap->sge; + struct fw_eq_ctrl_cmd c; + int ret, nentries; /* Add status entries */ nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); @@ -3673,7 +3878,9 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) | FW_EQ_CTRL_CMD_FETCHRO_F | FW_EQ_CTRL_CMD_IQID_V(iqid)); c.dcaen_to_eqsize = - htonl(FW_EQ_CTRL_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) | + htonl(FW_EQ_CTRL_CMD_FBMIN_V(chip_ver <= CHELSIO_T5 + ? FETCHBURSTMIN_64B_X + : FETCHBURSTMIN_64B_T6_X) | FW_EQ_CTRL_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) | FW_EQ_CTRL_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) | FW_EQ_CTRL_CMD_EQSIZE_V(nentries)); @@ -3713,6 +3920,7 @@ int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq, struct net_device *dev, unsigned int iqid, unsigned int uld_type) { + unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); int ret, nentries; struct fw_eq_ofld_cmd c; struct sge *s = &adap->sge; @@ -3743,7 +3951,9 @@ int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq, FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) | FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid)); c.dcaen_to_eqsize = - htonl(FW_EQ_OFLD_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) | + htonl(FW_EQ_OFLD_CMD_FBMIN_V(chip_ver <= CHELSIO_T5 + ? FETCHBURSTMIN_64B_X + : FETCHBURSTMIN_64B_T6_X) | FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) | FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) | FW_EQ_OFLD_CMD_EQSIZE_V(nentries)); diff --git a/drivers/net/ethernet/chelsio/cxgb4/smt.c b/drivers/net/ethernet/chelsio/cxgb4/smt.c index 7b2207a2a130..eaf1fb74689c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/smt.c +++ b/drivers/net/ethernet/chelsio/cxgb4/smt.c @@ -47,8 +47,7 @@ struct smt_data *t4_init_smt(void) smt_size = SMT_SIZE; - s = kvzalloc(sizeof(*s) + smt_size * sizeof(struct smt_entry), - GFP_KERNEL); + s = kvzalloc(struct_size(s, smtab, smt_size), GFP_KERNEL); if (!s) return NULL; s->smt_size = smt_size; diff --git a/drivers/net/ethernet/chelsio/cxgb4/srq.c b/drivers/net/ethernet/chelsio/cxgb4/srq.c index 82b70a565e24..9a54302bb046 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/srq.c +++ b/drivers/net/ethernet/chelsio/cxgb4/srq.c @@ -77,7 +77,7 @@ int cxgb4_get_srq_entry(struct net_device *dev, adap = netdev2adap(dev); s = adap->srq; - if (!(adap->flags & FULL_INIT_DONE) || !s) + if (!(adap->flags & CXGB4_FULL_INIT_DONE) || !s) goto out; skb = alloc_skb(sizeof(*req), GFP_KERNEL); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 2b03f6187a24..a3544041ad32 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -198,7 +198,7 @@ static void t4_report_fw_error(struct adapter *adap) if (pcie_fw & PCIE_FW_ERR_F) { dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n", reason[PCIE_FW_EVAL_G(pcie_fw)]); - adap->flags &= ~FW_OK; + adap->flags &= ~CXGB4_FW_OK; } } @@ -4105,6 +4105,9 @@ static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec) * @mbox: the Firmware Mailbox to use * @port: the Port ID * @lc: the Port's Link Configuration + * @sleep_ok: if true we may sleep while awaiting command completion + * @timeout: time to wait for command to finish before timing out + * (negative implies @sleep_ok=false) * * Set up a port's MAC and PHY according to a desired link configuration. * - If the PHY can auto-negotiate first decide what to advertise, then @@ -4124,6 +4127,7 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox, int ret; fw_mdi = (FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO) & lc->pcaps); + /* Convert driver coding of Pause Frame Flow Control settings into the * Firmware's API. */ @@ -4143,8 +4147,13 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox, fw_fec = cc_to_fwcap_fec(cc_fec); /* Figure out what our Requested Port Capabilities are going to be. + * Note parallel structure in t4_handle_get_port_info() and + * init_link_config(). */ if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) { + if (lc->autoneg == AUTONEG_ENABLE) + return -EINVAL; + rcap = lc->acaps | fw_fc | fw_fec; lc->fc = lc->requested_fc & ~PAUSE_AUTONEG; lc->fec = cc_fec; @@ -4156,7 +4165,11 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox, rcap = lc->acaps | fw_fc | fw_fec | fw_mdi; } - /* Note that older Firmware doesn't have FW_PORT_CAP32_FORCE_PAUSE, so + /* Some Requested Port Capabilities are trivially wrong if they exceed + * the Physical Port Capabilities. We can check that here and provide + * moderately useful feedback in the system log. + * + * Note that older Firmware doesn't have FW_PORT_CAP32_FORCE_PAUSE, so * we need to exclude this from this check in order to maintain * compatibility ... */ @@ -4185,6 +4198,13 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox, ret = t4_wr_mbox_meat_timeout(adapter, mbox, &cmd, sizeof(cmd), NULL, sleep_ok, timeout); + + /* Unfortunately, even if the Requested Port Capabilities "fit" within + * the Physical Port Capabilities, some combinations of features may + * still not be leagal. For example, 40Gb/s and Reed-Solomon Forward + * Error Correction. So if the Firmware rejects the L1 Configure + * request, flag that here. + */ if (ret) { dev_err(adapter->pdev_dev, "Requested Port Capabilities %#x rejected, error %d\n", @@ -4942,7 +4962,13 @@ static void pl_intr_handler(struct adapter *adap) */ int t4_slow_intr_handler(struct adapter *adapter) { - u32 cause = t4_read_reg(adapter, PL_INT_CAUSE_A); + /* There are rare cases where a PL_INT_CAUSE bit may end up getting + * set when the corresponding PL_INT_ENABLE bit isn't set. It's + * easiest just to mask that case here. + */ + u32 raw_cause = t4_read_reg(adapter, PL_INT_CAUSE_A); + u32 enable = t4_read_reg(adapter, PL_INT_ENABLE_A); + u32 cause = raw_cause & enable; if (!(cause & GLBL_INTR_MASK)) return 0; @@ -4994,7 +5020,7 @@ int t4_slow_intr_handler(struct adapter *adapter) ulptx_intr_handler(adapter); /* Clear the interrupts just processed for which we are the master. */ - t4_write_reg(adapter, PL_INT_CAUSE_A, cause & GLBL_INTR_MASK); + t4_write_reg(adapter, PL_INT_CAUSE_A, raw_cause & GLBL_INTR_MASK); (void)t4_read_reg(adapter, PL_INT_CAUSE_A); /* flush */ return 1; } @@ -5217,7 +5243,7 @@ int t4_read_rss(struct adapter *adapter, u16 *map) static unsigned int t4_use_ldst(struct adapter *adap) { - return (adap->flags & FW_OK) && !adap->use_bd; + return (adap->flags & CXGB4_FW_OK) && !adap->use_bd; } /** @@ -6106,7 +6132,7 @@ unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx) * ( MPSBGMAP[Port 1] << 8 ) | * ( MPSBGMAP[Port 0] << 0 )) */ - if (adapter->flags & FW_OK) { + if (adapter->flags & CXGB4_FW_OK) { u32 param, val; int ret; @@ -6692,6 +6718,47 @@ int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type) return ret; } +/** + * t4_read_sge_dbqtimers - reag SGE Doorbell Queue Timer values + * @adap - the adapter + * @ndbqtimers: size of the provided SGE Doorbell Queue Timer table + * @dbqtimers: SGE Doorbell Queue Timer table + * + * Reads the SGE Doorbell Queue Timer values into the provided table. + * Returns 0 on success (Firmware and Hardware support this feature), + * an error on failure. + */ +int t4_read_sge_dbqtimers(struct adapter *adap, unsigned int ndbqtimers, + u16 *dbqtimers) +{ + int ret, dbqtimerix; + + ret = 0; + dbqtimerix = 0; + while (dbqtimerix < ndbqtimers) { + int nparams, param; + u32 params[7], vals[7]; + + nparams = ndbqtimers - dbqtimerix; + if (nparams > ARRAY_SIZE(params)) + nparams = ARRAY_SIZE(params); + + for (param = 0; param < nparams; param++) + params[param] = + (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMER) | + FW_PARAMS_PARAM_Y_V(dbqtimerix + param)); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, + nparams, params, vals); + if (ret) + break; + + for (param = 0; param < nparams; param++) + dbqtimers[dbqtimerix++] = vals[param]; + } + return ret; +} + /** * t4_fw_hello - establish communication with FW * @adap: the adapter @@ -7026,10 +7093,10 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, if (!t4_fw_matches_chip(adap, fw_hdr)) return -EINVAL; - /* Disable FW_OK flag so that mbox commands with FW_OK flag set - * wont be sent when we are flashing FW. + /* Disable CXGB4_FW_OK flag so that mbox commands with CXGB4_FW_OK flag + * set wont be sent when we are flashing FW. */ - adap->flags &= ~FW_OK; + adap->flags &= ~CXGB4_FW_OK; ret = t4_fw_halt(adap, mbox, force); if (ret < 0 && !force) @@ -7068,7 +7135,7 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, */ (void)t4_init_devlog_params(adap); out: - adap->flags |= FW_OK; + adap->flags |= CXGB4_FW_OK; return ret; } @@ -8461,6 +8528,10 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) fc = fwcap_to_cc_pause(linkattr); speed = fwcap_to_speed(linkattr); + /* Reset state for communicating new Transceiver Module status and + * whether the OS-dependent layer wants us to redo the current + * "sticky" L1 Configure Link Parameters. + */ lc->new_module = false; lc->redo_l1cfg = false; @@ -8497,9 +8568,15 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) */ pi->port_type = port_type; + /* Record new Module Type information. + */ pi->mod_type = mod_type; + /* Let the OS-dependent layer know if we have a new + * Transceiver Module inserted. + */ lc->new_module = t4_is_inserted_mod_type(mod_type); + t4_os_portmod_changed(adapter, pi->port_id); } @@ -8507,8 +8584,10 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) fc != lc->fc || fec != lc->fec) { /* something changed */ if (!link_ok && lc->link_ok) { lc->link_down_rc = linkdnrc; - dev_warn(adapter->pdev_dev, "Port %d link down, reason: %s\n", - pi->tx_chan, t4_link_down_rc_str(linkdnrc)); + dev_warn_ratelimited(adapter->pdev_dev, + "Port %d link down, reason: %s\n", + pi->tx_chan, + t4_link_down_rc_str(linkdnrc)); } lc->link_ok = link_ok; lc->speed = speed; @@ -8518,6 +8597,11 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) lc->lpacaps = lpacaps; lc->acaps = acaps & ADVERT_MASK; + /* If we're not physically capable of Auto-Negotiation, note + * this as Auto-Negotiation disabled. Otherwise, we track + * what Auto-Negotiation settings we have. Note parallel + * structure in t4_link_l1cfg_core() and init_link_config(). + */ if (!(lc->acaps & FW_PORT_CAP32_ANEG)) { lc->autoneg = AUTONEG_DISABLE; } else if (lc->acaps & FW_PORT_CAP32_ANEG) { @@ -8535,6 +8619,10 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) t4_os_link_changed(adapter, pi->port_id, link_ok); } + /* If we have a new Transceiver Module and the OS-dependent code has + * told us that it wants us to redo whatever "sticky" L1 Configuration + * Link Parameters are set, do that now. + */ if (lc->new_module && lc->redo_l1cfg) { struct link_config old_lc; int ret; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h index 361d5032c288..002fc62ea726 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h @@ -91,6 +91,7 @@ enum { SGE_CTXT_SIZE = 24, /* size of SGE context */ SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */ SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */ + SGE_NDBQTIMERS = 8, /* # of Doorbell Queue Timer values */ SGE_MAX_IQ_SIZE = 65520, SGE_TIMER_RSTRT_CNTR = 6, /* restart RX packet threshold counter */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index c62a0c830705..38dd41eb959e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -56,6 +56,7 @@ enum { CPL_TX_DATA_ISO = 0x1F, CPL_CLOSE_LISTSRV_RPL = 0x20, + CPL_GET_TCB_RPL = 0x22, CPL_L2T_WRITE_RPL = 0x23, CPL_PASS_OPEN_RPL = 0x24, CPL_ACT_OPEN_RPL = 0x25, @@ -688,6 +689,13 @@ struct cpl_get_tcb { #define NO_REPLY_V(x) ((x) << NO_REPLY_S) #define NO_REPLY_F NO_REPLY_V(1U) +struct cpl_get_tcb_rpl { + union opcode_tid ot; + __u8 cookie; + __u8 status; + __be16 len; +}; + struct cpl_set_tcb_field { WR_HDR; union opcode_tid ot; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index bf7325f6d553..0c5373462ced 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -218,6 +218,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x6088), /* Custom T62100-CR */ CH_PCI_ID_TABLE_FENTRY(0x6089), /* Custom T62100-KR */ CH_PCI_ID_TABLE_FENTRY(0x608a), /* Custom T62100-CR */ + CH_PCI_ID_TABLE_FENTRY(0x608b), /* Custom T6225-CR */ CH_PCI_DEVICE_ID_TABLE_DEFINE_END; #endif /* __T4_PCI_ID_TBL_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h index 3297ce025e8b..1b9afb192f7f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h @@ -41,6 +41,14 @@ #define TCB_SMAC_SEL_V(x) ((x) << TCB_SMAC_SEL_S) #define TCB_T_FLAGS_W 1 +#define TCB_T_FLAGS_S 0 +#define TCB_T_FLAGS_M 0xffffffffffffffffULL +#define TCB_T_FLAGS_V(x) ((__u64)(x) << TCB_T_FLAGS_S) + +#define TCB_RQ_START_W 30 +#define TCB_RQ_START_S 0 +#define TCB_RQ_START_M 0x3ffffffULL +#define TCB_RQ_START_V(x) ((x) << TCB_RQ_START_S) #define TF_CCTRL_ECE_S 60 #define TF_CCTRL_CWR_S 61 @@ -66,4 +74,8 @@ #define TCB_RX_FRAG3_LEN_RAW_W 29 #define TCB_RX_FRAG3_START_IDX_OFFSET_RAW_W 30 #define TCB_PDU_HDR_LEN_W 31 + +#define TF_RX_PDU_OUT_S 49 +#define TF_RX_PDU_OUT_V(x) ((__u64)(x) << TF_RX_PDU_OUT_S) + #endif /* __T4_TCB_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h index f6558cbfc54e..eb1aa82149db 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h @@ -71,12 +71,18 @@ #define FETCHBURSTMIN_64B_X 2 #define FETCHBURSTMIN_128B_X 3 +/* T6 and later use a single-bit encoding for FetchBurstMin */ +#define FETCHBURSTMIN_64B_T6_X 0 +#define FETCHBURSTMIN_128B_T6_X 1 + #define FETCHBURSTMAX_256B_X 2 #define FETCHBURSTMAX_512B_X 3 +#define HOSTFCMODE_INGRESS_QUEUE_X 1 #define HOSTFCMODE_STATUS_PAGE_X 2 #define CIDXFLUSHTHRESH_32_X 5 +#define CIDXFLUSHTHRESH_128_X 7 #define UPDATEDELIVERY_INTERRUPT_X 1 diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 1d9b3e1e5f94..b2a618e72fcf 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -1254,6 +1254,8 @@ enum fw_params_param_dev { FW_PARAMS_PARAM_DEV_RDMA_WRITE_WITH_IMM = 0x21, FW_PARAMS_PARAM_DEV_RI_WRITE_CMPL_WR = 0x24, FW_PARAMS_PARAM_DEV_OPAQUE_VIID_SMT_EXTN = 0x27, + FW_PARAMS_PARAM_DEV_DBQ_TIMER = 0x29, + FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK = 0x2A, }; /* @@ -1310,6 +1312,14 @@ enum fw_params_param_pfvf { FW_PARAMS_PARAM_PFVF_RAWF_END = 0x37, FW_PARAMS_PARAM_PFVF_NCRYPTO_LOOKASIDE = 0x39, FW_PARAMS_PARAM_PFVF_PORT_CAPS32 = 0x3A, + FW_PARAMS_PARAM_PFVF_LINK_STATE = 0x40, +}; + +/* Virtual link state as seen by the specified VF */ +enum vf_link_states { + FW_VF_LINK_STATE_AUTO = 0x00, + FW_VF_LINK_STATE_ENABLE = 0x01, + FW_VF_LINK_STATE_DISABLE = 0x02, }; /* @@ -1322,6 +1332,7 @@ enum fw_params_param_dmaq { FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL = 0x11, FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH = 0x12, FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH = 0x13, + FW_PARAMS_PARAM_DMAQ_EQ_TIMERIX = 0x15, FW_PARAMS_PARAM_DMAQ_CONM_CTXT = 0x20, }; @@ -1751,8 +1762,8 @@ struct fw_eq_eth_cmd { __be32 fetchszm_to_iqid; __be32 dcaen_to_eqsize; __be64 eqaddr; - __be32 viid_pkd; - __be32 r8_lo; + __be32 autoequiqe_to_viid; + __be32 timeren_timerix; __be64 r9; }; @@ -1847,6 +1858,10 @@ struct fw_eq_eth_cmd { #define FW_EQ_ETH_CMD_EQSIZE_S 0 #define FW_EQ_ETH_CMD_EQSIZE_V(x) ((x) << FW_EQ_ETH_CMD_EQSIZE_S) +#define FW_EQ_ETH_CMD_AUTOEQUIQE_S 31 +#define FW_EQ_ETH_CMD_AUTOEQUIQE_V(x) ((x) << FW_EQ_ETH_CMD_AUTOEQUIQE_S) +#define FW_EQ_ETH_CMD_AUTOEQUIQE_F FW_EQ_ETH_CMD_AUTOEQUIQE_V(1U) + #define FW_EQ_ETH_CMD_AUTOEQUEQE_S 30 #define FW_EQ_ETH_CMD_AUTOEQUEQE_V(x) ((x) << FW_EQ_ETH_CMD_AUTOEQUEQE_S) #define FW_EQ_ETH_CMD_AUTOEQUEQE_F FW_EQ_ETH_CMD_AUTOEQUEQE_V(1U) @@ -1854,6 +1869,19 @@ struct fw_eq_eth_cmd { #define FW_EQ_ETH_CMD_VIID_S 16 #define FW_EQ_ETH_CMD_VIID_V(x) ((x) << FW_EQ_ETH_CMD_VIID_S) +#define FW_EQ_ETH_CMD_TIMEREN_S 3 +#define FW_EQ_ETH_CMD_TIMEREN_M 0x1 +#define FW_EQ_ETH_CMD_TIMEREN_V(x) ((x) << FW_EQ_ETH_CMD_TIMEREN_S) +#define FW_EQ_ETH_CMD_TIMEREN_G(x) \ + (((x) >> FW_EQ_ETH_CMD_TIMEREN_S) & FW_EQ_ETH_CMD_TIMEREN_M) +#define FW_EQ_ETH_CMD_TIMEREN_F FW_EQ_ETH_CMD_TIMEREN_V(1U) + +#define FW_EQ_ETH_CMD_TIMERIX_S 0 +#define FW_EQ_ETH_CMD_TIMERIX_M 0x7 +#define FW_EQ_ETH_CMD_TIMERIX_V(x) ((x) << FW_EQ_ETH_CMD_TIMERIX_S) +#define FW_EQ_ETH_CMD_TIMERIX_G(x) \ + (((x) >> FW_EQ_ETH_CMD_TIMERIX_S) & FW_EQ_ETH_CMD_TIMERIX_M) + struct fw_eq_ctrl_cmd { __be32 op_to_vfn; __be32 alloc_to_len16; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h index a844296135b4..9125ddd89dd1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h @@ -36,8 +36,8 @@ #define __T4FW_VERSION_H__ #define T4FW_VERSION_MAJOR 0x01 -#define T4FW_VERSION_MINOR 0x14 -#define T4FW_VERSION_MICRO 0x08 +#define T4FW_VERSION_MINOR 0x16 +#define T4FW_VERSION_MICRO 0x09 #define T4FW_VERSION_BUILD 0x00 #define T4FW_MIN_VERSION_MAJOR 0x01 @@ -45,8 +45,8 @@ #define T4FW_MIN_VERSION_MICRO 0x00 #define T5FW_VERSION_MAJOR 0x01 -#define T5FW_VERSION_MINOR 0x14 -#define T5FW_VERSION_MICRO 0x08 +#define T5FW_VERSION_MINOR 0x16 +#define T5FW_VERSION_MICRO 0x09 #define T5FW_VERSION_BUILD 0x00 #define T5FW_MIN_VERSION_MAJOR 0x00 @@ -54,8 +54,8 @@ #define T5FW_MIN_VERSION_MICRO 0x00 #define T6FW_VERSION_MAJOR 0x01 -#define T6FW_VERSION_MINOR 0x14 -#define T6FW_VERSION_MICRO 0x08 +#define T6FW_VERSION_MINOR 0x16 +#define T6FW_VERSION_MICRO 0x09 #define T6FW_VERSION_BUILD 0x00 #define T6FW_MIN_VERSION_MAJOR 0x00 diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h index 5883f09e3804..3782e48dada2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h @@ -94,7 +94,7 @@ struct port_info { struct adapter *adapter; /* our adapter */ u32 vlan_id; /* vlan id for VST */ u16 viid; /* virtual interface ID */ - s16 xact_addr_filt; /* index of our MAC address filter */ + int xact_addr_filt; /* index of our MAC address filter */ u16 rss_size; /* size of VI's RSS table slice */ u8 pidx; /* index into adapter port[] */ s8 mdio_addr; @@ -352,6 +352,7 @@ struct sge { struct hash_mac_addr { struct list_head list; u8 addr[ETH_ALEN]; + unsigned int iface_mac; }; struct mbox_list { @@ -405,11 +406,12 @@ struct adapter { }; enum { /* adapter flags */ - FULL_INIT_DONE = (1UL << 0), - USING_MSI = (1UL << 1), - USING_MSIX = (1UL << 2), - QUEUES_BOUND = (1UL << 3), - ROOT_NO_RELAXED_ORDERING = (1UL << 4), + CXGB4VF_FULL_INIT_DONE = (1UL << 0), + CXGB4VF_USING_MSI = (1UL << 1), + CXGB4VF_USING_MSIX = (1UL << 2), + CXGB4VF_QUEUES_BOUND = (1UL << 3), + CXGB4VF_ROOT_NO_RELAXED_ORDERING = (1UL << 4), + CXGB4VF_FW_OK = (1UL << 5), }; /* diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 2fab87e86561..adc4d481815b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -155,6 +155,8 @@ void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok) const char *fc; const struct port_info *pi = netdev_priv(dev); + netif_carrier_on(dev); + switch (pi->link_cfg.speed) { case 100: s = "100Mbps"; @@ -200,6 +202,7 @@ void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok) netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, fc); } else { + netif_carrier_off(dev); netdev_info(dev, "link down\n"); } } @@ -236,6 +239,73 @@ void t4vf_os_portmod_changed(struct adapter *adapter, int pidx) "inserted\n", dev->name, pi->mod_type); } +static int cxgb4vf_set_addr_hash(struct port_info *pi) +{ + struct adapter *adapter = pi->adapter; + u64 vec = 0; + bool ucast = false; + struct hash_mac_addr *entry; + + /* Calculate the hash vector for the updated list and program it */ + list_for_each_entry(entry, &adapter->mac_hlist, list) { + ucast |= is_unicast_ether_addr(entry->addr); + vec |= (1ULL << hash_mac_addr(entry->addr)); + } + return t4vf_set_addr_hash(adapter, pi->viid, ucast, vec, false); +} + +/** + * cxgb4vf_change_mac - Update match filter for a MAC address. + * @pi: the port_info + * @viid: the VI id + * @tcam_idx: TCAM index of existing filter for old value of MAC address, + * or -1 + * @addr: the new MAC address value + * @persist: whether a new MAC allocation should be persistent + * @add_smt: if true also add the address to the HW SMT + * + * Modifies an MPS filter and sets it to the new MAC address if + * @tcam_idx >= 0, or adds the MAC address to a new filter if + * @tcam_idx < 0. In the latter case the address is added persistently + * if @persist is %true. + * Addresses are programmed to hash region, if tcam runs out of entries. + * + */ +static int cxgb4vf_change_mac(struct port_info *pi, unsigned int viid, + int *tcam_idx, const u8 *addr, bool persistent) +{ + struct hash_mac_addr *new_entry, *entry; + struct adapter *adapter = pi->adapter; + int ret; + + ret = t4vf_change_mac(adapter, viid, *tcam_idx, addr, persistent); + /* We ran out of TCAM entries. try programming hash region. */ + if (ret == -ENOMEM) { + /* If the MAC address to be updated is in the hash addr + * list, update it from the list + */ + list_for_each_entry(entry, &adapter->mac_hlist, list) { + if (entry->iface_mac) { + ether_addr_copy(entry->addr, addr); + goto set_hash; + } + } + new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL); + if (!new_entry) + return -ENOMEM; + ether_addr_copy(new_entry->addr, addr); + new_entry->iface_mac = true; + list_add_tail(&new_entry->list, &adapter->mac_hlist); +set_hash: + ret = cxgb4vf_set_addr_hash(pi); + } else if (ret >= 0) { + *tcam_idx = ret; + ret = 0; + } + + return ret; +} + /* * Net device operations. * ====================== @@ -259,14 +329,10 @@ static int link_start(struct net_device *dev) */ ret = t4vf_set_rxmode(pi->adapter, pi->viid, dev->mtu, -1, -1, -1, 1, true); - if (ret == 0) { - ret = t4vf_change_mac(pi->adapter, pi->viid, - pi->xact_addr_filt, dev->dev_addr, true); - if (ret >= 0) { - pi->xact_addr_filt = ret; - ret = 0; - } - } + if (ret == 0) + ret = cxgb4vf_change_mac(pi, pi->viid, + &pi->xact_addr_filt, + dev->dev_addr, true); /* * We don't need to actually "start the link" itself since the @@ -276,16 +342,6 @@ static int link_start(struct net_device *dev) if (ret == 0) ret = t4vf_enable_pi(pi->adapter, pi, true, true); - /* The Virtual Interfaces are connected to an internal switch on the - * chip which allows VIs attached to the same port to talk to each - * other even when the port link is down. As a result, we generally - * want to always report a VI's link as being "up", provided there are - * no errors in enabling vi. - */ - - if (ret == 0) - netif_carrier_on(dev); - return ret; } @@ -406,7 +462,7 @@ static void enable_rx(struct adapter *adapter) * The interrupt queue doesn't use NAPI so we do the 0-increment of * its Going To Sleep register here to get it started. */ - if (adapter->flags & USING_MSI) + if (adapter->flags & CXGB4VF_USING_MSI) t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, CIDXINC_V(0) | SEINTARM_V(s->intrq.intr_params) | @@ -550,7 +606,7 @@ static int setup_sge_queues(struct adapter *adapter) * the intrq's queue ID as the interrupt forwarding queue for the * subsequent calls ... */ - if (adapter->flags & USING_MSI) { + if (adapter->flags & CXGB4VF_USING_MSI) { err = t4vf_sge_alloc_rxq(adapter, &s->intrq, false, adapter->port[0], 0, NULL, NULL); if (err) @@ -710,7 +766,7 @@ static int adapter_up(struct adapter *adapter) * adapter setup. Once we've done this, many of our adapter * parameters can no longer be changed ... */ - if ((adapter->flags & FULL_INIT_DONE) == 0) { + if ((adapter->flags & CXGB4VF_FULL_INIT_DONE) == 0) { err = setup_sge_queues(adapter); if (err) return err; @@ -720,17 +776,18 @@ static int adapter_up(struct adapter *adapter) return err; } - if (adapter->flags & USING_MSIX) + if (adapter->flags & CXGB4VF_USING_MSIX) name_msix_vecs(adapter); - adapter->flags |= FULL_INIT_DONE; + adapter->flags |= CXGB4VF_FULL_INIT_DONE; } /* * Acquire our interrupt resources. We only support MSI-X and MSI. */ - BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0); - if (adapter->flags & USING_MSIX) + BUG_ON((adapter->flags & + (CXGB4VF_USING_MSIX | CXGB4VF_USING_MSI)) == 0); + if (adapter->flags & CXGB4VF_USING_MSIX) err = request_msix_queue_irqs(adapter); else err = request_irq(adapter->pdev->irq, @@ -761,7 +818,7 @@ static void adapter_down(struct adapter *adapter) /* * Free interrupt resources. */ - if (adapter->flags & USING_MSIX) + if (adapter->flags & CXGB4VF_USING_MSIX) free_msix_queue_irqs(adapter); else free_irq(adapter->pdev->irq, adapter); @@ -781,6 +838,13 @@ static int cxgb4vf_open(struct net_device *dev) struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; + /* + * If we don't have a connection to the firmware there's nothing we + * can do. + */ + if (!(adapter->flags & CXGB4VF_FW_OK)) + return -ENXIO; + /* * If this is the first interface that we're opening on the "adapter", * bring the "adapter" up now. @@ -791,6 +855,13 @@ static int cxgb4vf_open(struct net_device *dev) return err; } + /* It's possible that the basic port information could have + * changed since we first read it. + */ + err = t4vf_update_port_info(pi); + if (err < 0) + return err; + /* * Note that this interface is up and start everything up ... */ @@ -863,21 +934,6 @@ static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev) return ns; } -static inline int cxgb4vf_set_addr_hash(struct port_info *pi) -{ - struct adapter *adapter = pi->adapter; - u64 vec = 0; - bool ucast = false; - struct hash_mac_addr *entry; - - /* Calculate the hash vector for the updated list and program it */ - list_for_each_entry(entry, &adapter->mac_hlist, list) { - ucast |= is_unicast_ether_addr(entry->addr); - vec |= (1ULL << hash_mac_addr(entry->addr)); - } - return t4vf_set_addr_hash(adapter, pi->viid, ucast, vec, false); -} - static int cxgb4vf_mac_sync(struct net_device *netdev, const u8 *mac_addr) { struct port_info *pi = netdev_priv(netdev); @@ -1159,13 +1215,12 @@ static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - ret = t4vf_change_mac(pi->adapter, pi->viid, pi->xact_addr_filt, - addr->sa_data, true); + ret = cxgb4vf_change_mac(pi, pi->viid, &pi->xact_addr_filt, + addr->sa_data, true); if (ret < 0) return ret; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); - pi->xact_addr_filt = ret; return 0; } @@ -1179,7 +1234,7 @@ static void cxgb4vf_poll_controller(struct net_device *dev) struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; - if (adapter->flags & USING_MSIX) { + if (adapter->flags & CXGB4VF_USING_MSIX) { struct sge_eth_rxq *rxq; int nqsets; @@ -1354,7 +1409,7 @@ static void fw_caps_to_lmm(enum fw_port_type port_type, case FW_PORT_TYPE_CR4_QSFP: SET_LMM(FIBRE); FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full); - FW_CAPS_TO_LMM(SPEED_10G, 10000baseSR_Full); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full); FW_CAPS_TO_LMM(SPEED_40G, 40000baseSR4_Full); FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full); FW_CAPS_TO_LMM(SPEED_50G, 50000baseCR2_Full); @@ -1365,6 +1420,13 @@ static void fw_caps_to_lmm(enum fw_port_type port_type, break; } + if (fw_caps & FW_PORT_CAP32_FEC_V(FW_PORT_CAP32_FEC_M)) { + FW_CAPS_TO_LMM(FEC_RS, FEC_RS); + FW_CAPS_TO_LMM(FEC_BASER_RS, FEC_BASER); + } else { + SET_LMM(FEC_NONE); + } + FW_CAPS_TO_LMM(ANEG, Autoneg); FW_CAPS_TO_LMM(802_3_PAUSE, Pause); FW_CAPS_TO_LMM(802_3_ASM_DIR, Asym_Pause); @@ -1587,7 +1649,7 @@ static int cxgb4vf_set_ringparam(struct net_device *dev, rp->tx_pending < MIN_TXQ_ENTRIES) return -EINVAL; - if (adapter->flags & FULL_INIT_DONE) + if (adapter->flags & CXGB4VF_FULL_INIT_DONE) return -EBUSY; for (qs = pi->first_qset; qs < pi->first_qset + pi->nqsets; qs++) { @@ -1871,6 +1933,8 @@ static void cxgb4vf_get_wol(struct net_device *dev, * TCP Segmentation Offload flags which we support. */ #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) +#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \ + NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA) static const struct ethtool_ops cxgb4vf_ethtool_ops = { .get_link_ksettings = cxgb4vf_get_link_ksettings, @@ -2102,7 +2166,7 @@ static int sge_qinfo_show(struct seq_file *seq, void *v) static int sge_queue_entries(const struct adapter *adapter) { return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 + - ((adapter->flags & USING_MSI) != 0); + ((adapter->flags & CXGB4VF_USING_MSI) != 0); } static void *sge_queue_start(struct seq_file *seq, loff_t *pos) @@ -2248,7 +2312,7 @@ static int sge_qstats_show(struct seq_file *seq, void *v) static int sge_qstats_entries(const struct adapter *adapter) { return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 + - ((adapter->flags & USING_MSI) != 0); + ((adapter->flags & CXGB4VF_USING_MSI) != 0); } static void *sge_qstats_start(struct seq_file *seq, loff_t *pos) @@ -2657,6 +2721,7 @@ static int adap_init0(struct adapter *adapter) */ size_nports_qsets(adapter); + adapter->flags |= CXGB4VF_FW_OK; return 0; } @@ -2691,7 +2756,8 @@ static void cfg_queues(struct adapter *adapter) * support. In particular, this means that we need to know what kind * of interrupts we'll be using ... */ - BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0); + BUG_ON((adapter->flags & + (CXGB4VF_USING_MSIX | CXGB4VF_USING_MSI)) == 0); /* * Count the number of 10GbE Virtual Interfaces that we have. @@ -3017,11 +3083,13 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, * using Relaxed Ordering. */ if (!pcie_relaxed_ordering_enabled(pdev)) - adapter->flags |= ROOT_NO_RELAXED_ORDERING; + adapter->flags |= CXGB4VF_ROOT_NO_RELAXED_ORDERING; err = adap_init0(adapter); if (err) - goto err_unmap_bar; + dev_err(&pdev->dev, + "Adapter initialization failed, error %d. Continuing in debug mode\n", + err); /* Initialize hash mac addr list */ INIT_LIST_HEAD(&adapter->mac_hlist); @@ -3046,13 +3114,6 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, break; port_id = ffs(pmask) - 1; pmask &= ~(1 << port_id); - viid = t4vf_alloc_vi(adapter, port_id); - if (viid < 0) { - dev_err(&pdev->dev, "cannot allocate VI for port %d:" - " err=%d\n", port_id, viid); - err = viid; - goto err_free_dev; - } /* * Allocate our network device and stitch things together. @@ -3060,7 +3121,6 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, netdev = alloc_etherdev_mq(sizeof(struct port_info), MAX_PORT_QSETS); if (netdev == NULL) { - t4vf_free_vi(adapter, viid); err = -ENOMEM; goto err_free_dev; } @@ -3070,26 +3130,21 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, pi->adapter = adapter; pi->pidx = pidx; pi->port_id = port_id; - pi->viid = viid; /* * Initialize the starting state of our "port" and register * it. */ pi->xact_addr_filt = -1; - netif_carrier_off(netdev); netdev->irq = pdev->irq; - netdev->hw_features = NETIF_F_SG | TSO_FLAGS | - NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM; - netdev->vlan_features = NETIF_F_SG | TSO_FLAGS | - NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_HIGHDMA; - netdev->features = netdev->hw_features | - NETIF_F_HW_VLAN_CTAG_TX; + netdev->hw_features = NETIF_F_SG | TSO_FLAGS | NETIF_F_GRO | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; + netdev->features = netdev->hw_features; if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features = netdev->features & VLAN_FEAT; netdev->priv_flags |= IFF_UNICAST_FLT; netdev->min_mtu = 81; @@ -3099,6 +3154,23 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, netdev->ethtool_ops = &cxgb4vf_ethtool_ops; netdev->dev_port = pi->port_id; + /* + * If we haven't been able to contact the firmware, there's + * nothing else we can do for this "port" ... + */ + if (!(adapter->flags & CXGB4VF_FW_OK)) + continue; + + viid = t4vf_alloc_vi(adapter, port_id); + if (viid < 0) { + dev_err(&pdev->dev, + "cannot allocate VI for port %d: err=%d\n", + port_id, viid); + err = viid; + goto err_free_dev; + } + pi->viid = viid; + /* * Initialize the hardware/software state for the port. */ @@ -3136,7 +3208,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, * get MSI interrupts we bail with the error. */ if (msi == MSI_MSIX && enable_msix(adapter) == 0) - adapter->flags |= USING_MSIX; + adapter->flags |= CXGB4VF_USING_MSIX; else { if (msi == MSI_MSIX) { dev_info(adapter->pdev_dev, @@ -3156,7 +3228,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, " err=%d\n", err); goto err_free_dev; } - adapter->flags |= USING_MSI; + adapter->flags |= CXGB4VF_USING_MSI; } /* Now that we know how many "ports" we have and what interrupt @@ -3186,6 +3258,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, continue; } + netif_carrier_off(netdev); set_bit(pidx, &adapter->registered_device_map); } if (adapter->registered_device_map == 0) { @@ -3214,8 +3287,8 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, for_each_port(adapter, pidx) { dev_info(adapter->pdev_dev, "%s: Chelsio VF NIC PCIe %s\n", adapter->port[pidx]->name, - (adapter->flags & USING_MSIX) ? "MSI-X" : - (adapter->flags & USING_MSI) ? "MSI" : ""); + (adapter->flags & CXGB4VF_USING_MSIX) ? "MSI-X" : + (adapter->flags & CXGB4VF_USING_MSI) ? "MSI" : ""); } /* @@ -3228,12 +3301,12 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, * so far and return the error. */ err_disable_interrupts: - if (adapter->flags & USING_MSIX) { + if (adapter->flags & CXGB4VF_USING_MSIX) { pci_disable_msix(adapter->pdev); - adapter->flags &= ~USING_MSIX; - } else if (adapter->flags & USING_MSI) { + adapter->flags &= ~CXGB4VF_USING_MSIX; + } else if (adapter->flags & CXGB4VF_USING_MSI) { pci_disable_msi(adapter->pdev); - adapter->flags &= ~USING_MSI; + adapter->flags &= ~CXGB4VF_USING_MSI; } err_free_dev: @@ -3242,13 +3315,13 @@ err_free_dev: if (netdev == NULL) continue; pi = netdev_priv(netdev); - t4vf_free_vi(adapter, pi->viid); + if (pi->viid) + t4vf_free_vi(adapter, pi->viid); if (test_bit(pidx, &adapter->registered_device_map)) unregister_netdev(netdev); free_netdev(netdev); } -err_unmap_bar: if (!is_t4(adapter->params.chip)) iounmap(adapter->bar2); @@ -3293,12 +3366,12 @@ static void cxgb4vf_pci_remove(struct pci_dev *pdev) if (test_bit(pidx, &adapter->registered_device_map)) unregister_netdev(adapter->port[pidx]); t4vf_sge_stop(adapter); - if (adapter->flags & USING_MSIX) { + if (adapter->flags & CXGB4VF_USING_MSIX) { pci_disable_msix(adapter->pdev); - adapter->flags &= ~USING_MSIX; - } else if (adapter->flags & USING_MSI) { + adapter->flags &= ~CXGB4VF_USING_MSIX; + } else if (adapter->flags & CXGB4VF_USING_MSI) { pci_disable_msi(adapter->pdev); - adapter->flags &= ~USING_MSI; + adapter->flags &= ~CXGB4VF_USING_MSI; } /* @@ -3321,7 +3394,8 @@ static void cxgb4vf_pci_remove(struct pci_dev *pdev) continue; pi = netdev_priv(netdev); - t4vf_free_vi(adapter, pi->viid); + if (pi->viid) + t4vf_free_vi(adapter, pi->viid); free_netdev(netdev); } iounmap(adapter->regs); @@ -3369,12 +3443,12 @@ static void cxgb4vf_pci_shutdown(struct pci_dev *pdev) * Interrupts allowing various internal pathways to drain. */ t4vf_sge_stop(adapter); - if (adapter->flags & USING_MSIX) { + if (adapter->flags & CXGB4VF_USING_MSIX) { pci_disable_msix(adapter->pdev); - adapter->flags &= ~USING_MSIX; - } else if (adapter->flags & USING_MSI) { + adapter->flags &= ~CXGB4VF_USING_MSIX; + } else if (adapter->flags & CXGB4VF_USING_MSI) { pci_disable_msi(adapter->pdev); - adapter->flags &= ~USING_MSI; + adapter->flags &= ~CXGB4VF_USING_MSI; } /* diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 1d534f0baa69..f71c973398ec 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -2044,8 +2044,9 @@ static irqreturn_t t4vf_intr_msi(int irq, void *cookie) */ irq_handler_t t4vf_intr_handler(struct adapter *adapter) { - BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0); - if (adapter->flags & USING_MSIX) + BUG_ON((adapter->flags & + (CXGB4VF_USING_MSIX | CXGB4VF_USING_MSI)) == 0); + if (adapter->flags & CXGB4VF_USING_MSIX) return t4vf_sge_intr_msix; else return t4vf_intr_msi; @@ -2209,7 +2210,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, struct port_info *pi = netdev_priv(dev); struct fw_iq_cmd cmd, rpl; int ret, iqandst, flsz = 0; - int relaxed = !(adapter->flags & ROOT_NO_RELAXED_ORDERING); + int relaxed = !(adapter->flags & CXGB4VF_ROOT_NO_RELAXED_ORDERING); /* * If we're using MSI interrupts and we're not initializing the @@ -2218,7 +2219,8 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, * the Forwarded Interrupt Queue must be set up before any other * ingress queue ... */ - if ((adapter->flags & USING_MSI) && rspq != &adapter->sge.intrq) { + if ((adapter->flags & CXGB4VF_USING_MSI) && + rspq != &adapter->sge.intrq) { iqandst = SGE_INTRDST_IQ; intr_dest = adapter->sge.intrq.abs_id; } else @@ -2268,7 +2270,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, cmd.iqaddr = cpu_to_be64(rspq->phys_addr); if (fl) { - enum chip_type chip = + unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip); /* * Allocate the ring for the hardware free list (with space @@ -2319,10 +2321,10 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, */ cmd.fl0dcaen_to_fl0cidxfthresh = cpu_to_be16( - FW_IQ_CMD_FL0FBMIN_V(chip <= CHELSIO_T5 ? - FETCHBURSTMIN_128B_X : - FETCHBURSTMIN_64B_X) | - FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ? + FW_IQ_CMD_FL0FBMIN_V(chip_ver <= CHELSIO_T5 + ? FETCHBURSTMIN_128B_X + : FETCHBURSTMIN_64B_T6_X) | + FW_IQ_CMD_FL0FBMAX_V((chip_ver <= CHELSIO_T5) ? FETCHBURSTMAX_512B_X : FETCHBURSTMAX_256B_X)); cmd.fl0size = cpu_to_be16(flsz); @@ -2411,10 +2413,11 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, struct net_device *dev, struct netdev_queue *devq, unsigned int iqid) { + unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip); + struct port_info *pi = netdev_priv(dev); + struct fw_eq_eth_cmd cmd, rpl; struct sge *s = &adapter->sge; int ret, nentries; - struct fw_eq_eth_cmd cmd, rpl; - struct port_info *pi = netdev_priv(dev); /* * Calculate the size of the hardware TX Queue (including the Status @@ -2448,17 +2451,19 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC_F | FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(cmd)); - cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE_F | - FW_EQ_ETH_CMD_VIID_V(pi->viid)); + cmd.autoequiqe_to_viid = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE_F | + FW_EQ_ETH_CMD_VIID_V(pi->viid)); cmd.fetchszm_to_iqid = cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE_V(SGE_HOSTFCMODE_STPG) | FW_EQ_ETH_CMD_PCIECHN_V(pi->port_id) | FW_EQ_ETH_CMD_IQID_V(iqid)); cmd.dcaen_to_eqsize = - cpu_to_be32(FW_EQ_ETH_CMD_FBMIN_V(SGE_FETCHBURSTMIN_64B) | - FW_EQ_ETH_CMD_FBMAX_V(SGE_FETCHBURSTMAX_512B) | + cpu_to_be32(FW_EQ_ETH_CMD_FBMIN_V(chip_ver <= CHELSIO_T5 + ? FETCHBURSTMIN_64B_X + : FETCHBURSTMIN_64B_T6_X) | + FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) | FW_EQ_ETH_CMD_CIDXFTHRESH_V( - SGE_CIDXFLUSHTHRESH_32) | + CIDXFLUSHTHRESH_32_X) | FW_EQ_ETH_CMD_EQSIZE_V(nentries)); cmd.eqaddr = cpu_to_be64(txq->q.phys_addr); diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 5b8c08cf523f..84dff74ca9cd 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -2005,8 +2005,10 @@ static void t4vf_handle_get_port_info(struct port_info *pi, fc != lc->fc || fec != lc->fec) { /* something changed */ if (!link_ok && lc->link_ok) { lc->link_down_rc = linkdnrc; - dev_warn(adapter->pdev_dev, "Port %d link down, reason: %s\n", - pi->port_id, t4vf_link_down_rc_str(linkdnrc)); + dev_warn_ratelimited(adapter->pdev_dev, + "Port %d link down, reason: %s\n", + pi->port_id, + t4vf_link_down_rc_str(linkdnrc)); } lc->link_ok = link_ok; lc->speed = speed; diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c index 99038dfc7fbe..9900993b6aea 100644 --- a/drivers/net/ethernet/cisco/enic/enic_clsf.c +++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c @@ -32,7 +32,8 @@ int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq) break; default: return -EPROTONOSUPPORT; - }; + } + data.type = FILTER_IPV4_5TUPLE; data.u.ipv4.src_addr = ntohl(keys->addrs.v4addrs.src); data.u.ipv4.dst_addr = ntohl(keys->addrs.v4addrs.dst); diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 60641e202534..733d9172425b 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -119,7 +119,7 @@ static void enic_init_affinity_hint(struct enic *enic) for (i = 0; i < enic->intr_count; i++) { if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i) || - (enic->msix[i].affinity_mask && + (cpumask_available(enic->msix[i].affinity_mask) && !cpumask_empty(enic->msix[i].affinity_mask))) continue; if (zalloc_cpumask_var(&enic->msix[i].affinity_mask, @@ -148,7 +148,7 @@ static void enic_set_affinity_hint(struct enic *enic) for (i = 0; i < enic->intr_count; i++) { if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i) || - !enic->msix[i].affinity_mask || + !cpumask_available(enic->msix[i].affinity_mask) || cpumask_empty(enic->msix[i].affinity_mask)) continue; err = irq_set_affinity_hint(enic->msix_entry[i].vector, @@ -161,7 +161,7 @@ static void enic_set_affinity_hint(struct enic *enic) for (i = 0; i < enic->wq_count; i++) { int wq_intr = enic_msix_wq_intr(enic, i); - if (enic->msix[wq_intr].affinity_mask && + if (cpumask_available(enic->msix[wq_intr].affinity_mask) && !cpumask_empty(enic->msix[wq_intr].affinity_mask)) netif_set_xps_queue(enic->netdev, enic->msix[wq_intr].affinity_mask, @@ -1434,7 +1434,8 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, * csum is correct or is zero. */ if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc && - tcp_udp_csum_ok && ipv4_csum_ok && outer_csum_ok) { + tcp_udp_csum_ok && outer_csum_ok && + (ipv4_csum_ok || ipv6)) { skb->ip_summed = CHECKSUM_UNNECESSARY; skb->csum_level = encap; } diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 0a82fcf16d35..c2586f44c29d 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -395,6 +395,7 @@ static void dm9000_set_io(struct board_info *db, int byte_width) case 3: dev_dbg(db->dev, ": 3 byte IO, falling back to 16bit\n"); + /* fall through */ case 2: db->dumpblk = dm9000_dumpblk_16bit; db->outblk = dm9000_outblk_16bit; diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c index 13430f75496c..f1a2da15dd0a 100644 --- a/drivers/net/ethernet/dec/tulip/de2104x.c +++ b/drivers/net/ethernet/dec/tulip/de2104x.c @@ -585,7 +585,7 @@ static void de_tx (struct de_private *de) netif_dbg(de, tx_done, de->dev, "tx done, slot %d\n", tx_tail); } - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); } next: diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c index 1812f4916917..ba0a69b363f8 100644 --- a/drivers/net/ethernet/dec/tulip/eeprom.c +++ b/drivers/net/ethernet/dec/tulip/eeprom.c @@ -224,9 +224,7 @@ subsequent_board: return; } - mtable = kmalloc(sizeof(struct mediatable) + - count * sizeof(struct medialeaf), - GFP_KERNEL); + mtable = kmalloc(struct_size(mtable, mleaf, count), GFP_KERNEL); if (mtable == NULL) return; /* Horrible, impossible failure. */ last_mediatable = tp->mtable = mtable; diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index d8d423f22c4f..cfcdfee718b0 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -843,9 +843,9 @@ rio_free_tx (struct net_device *dev, int irq) desc_to_dma(&np->tx_ring[entry]), skb->len, PCI_DMA_TODEVICE); if (irq) - dev_kfree_skb_irq (skb); + dev_consume_skb_irq(skb); else - dev_kfree_skb (skb); + dev_kfree_skb(skb); np->tx_skbuff[entry] = NULL; entry = (entry + 1) % TX_RING_SIZE; diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c index 1a27176381fb..4a37a69764ce 100644 --- a/drivers/net/ethernet/dlink/sundance.c +++ b/drivers/net/ethernet/dlink/sundance.c @@ -1193,7 +1193,6 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) int handled = 0; int i; - do { int intr_status = ioread16(ioaddr + IntrStatus); iowrite16(intr_status, ioaddr + IntrStatus); @@ -1286,7 +1285,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) dma_unmap_single(&np->pci_dev->dev, le32_to_cpu(np->tx_ring[entry].frag[0].addr), skb->len, DMA_TO_DEVICE); - dev_kfree_skb_irq (np->tx_skbuff[entry]); + dev_consume_skb_irq(np->tx_skbuff[entry]); np->tx_skbuff[entry] = NULL; np->tx_ring[entry].frag[0].addr = 0; np->tx_ring[entry].frag[0].length = 0; @@ -1305,7 +1304,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) dma_unmap_single(&np->pci_dev->dev, le32_to_cpu(np->tx_ring[entry].frag[0].addr), skb->len, DMA_TO_DEVICE); - dev_kfree_skb_irq (np->tx_skbuff[entry]); + dev_consume_skb_irq(np->tx_skbuff[entry]); np->tx_skbuff[entry] = NULL; np->tx_ring[entry].frag[0].addr = 0; np->tx_ring[entry].frag[0].length = 0; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index d5026909dec5..3c7c04406a2b 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1270,10 +1270,6 @@ static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo) #define is_arp_allowed_on_bmc(adapter, skb) \ (is_arp(skb) && is_arp_filt_enabled(adapter)) -#define is_broadcast_packet(eh, adapter) \ - (is_multicast_ether_addr(eh->h_dest) && \ - !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast)) - #define is_arp(skb) (skb->protocol == htons(ETH_P_ARP)) #define is_arp_filt_enabled(adapter) \ diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 3e5e97186fc4..b17b79e612a3 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -1637,7 +1637,7 @@ static int ftgmac100_setup_mdio(struct net_device *netdev) reg = ioread32(priv->base + FTGMAC100_OFFSET_REVR); reg &= ~FTGMAC100_REVR_NEW_MDIO_INTERFACE; iowrite32(reg, priv->base + FTGMAC100_OFFSET_REVR); - }; + } /* Get PHY mode from device-tree */ if (np) { diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c index ae55da60ed0e..c24fd56a2c71 100644 --- a/drivers/net/ethernet/fealnx.c +++ b/drivers/net/ethernet/fealnx.c @@ -1531,7 +1531,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) /* Free the original skb. */ pci_unmap_single(np->pci_dev, np->cur_tx->buffer, np->cur_tx->skbuff->len, PCI_DMA_TODEVICE); - dev_kfree_skb_irq(np->cur_tx->skbuff); + dev_consume_skb_irq(np->cur_tx->skbuff); np->cur_tx->skbuff = NULL; --np->really_tx_count; if (np->cur_tx->control & TXLD) { diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index d3a62bc1f1c6..71793e03c3c8 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -97,5 +97,6 @@ config GIANFAR source "drivers/net/ethernet/freescale/dpaa/Kconfig" source "drivers/net/ethernet/freescale/dpaa2/Kconfig" +source "drivers/net/ethernet/freescale/enetc/Kconfig" endif # NET_VENDOR_FREESCALE diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile index 3b4ff08e3841..6a93293d31e0 100644 --- a/drivers/net/ethernet/freescale/Makefile +++ b/drivers/net/ethernet/freescale/Makefile @@ -23,3 +23,6 @@ obj-$(CONFIG_FSL_FMAN) += fman/ obj-$(CONFIG_FSL_DPAA_ETH) += dpaa/ obj-$(CONFIG_FSL_DPAA2_ETH) += dpaa2/ + +obj-$(CONFIG_FSL_ENETC) += enetc/ +obj-$(CONFIG_FSL_ENETC_VF) += enetc/ diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c index 62497119c85f..bdee441bc3b7 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c @@ -501,7 +501,7 @@ static int dpaa_get_ts_info(struct net_device *net_dev, struct device_node *mac_node = dev->of_node; struct device_node *fman_node = NULL, *ptp_node = NULL; struct platform_device *ptp_dev = NULL; - struct qoriq_ptp *ptp = NULL; + struct ptp_qoriq *ptp = NULL; info->phc_index = -1; diff --git a/drivers/net/ethernet/freescale/dpaa2/Makefile b/drivers/net/ethernet/freescale/dpaa2/Makefile index 2f424e0a8225..d1e78cdd512f 100644 --- a/drivers/net/ethernet/freescale/dpaa2/Makefile +++ b/drivers/net/ethernet/freescale/dpaa2/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += fsl-dpaa2-ptp.o fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o +fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o # Needed by the tracing framework diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c new file mode 100644 index 000000000000..a027f4a9d0cc --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c @@ -0,0 +1,237 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2015 Freescale Semiconductor Inc. + * Copyright 2018-2019 NXP + */ +#include +#include +#include "dpaa2-eth.h" +#include "dpaa2-eth-debugfs.h" + +#define DPAA2_ETH_DBG_ROOT "dpaa2-eth" + +static struct dentry *dpaa2_dbg_root; + +static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset) +{ + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private; + struct rtnl_link_stats64 *stats; + struct dpaa2_eth_drv_stats *extras; + int i; + + seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name); + seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s%16s\n", + "CPU", "Rx", "Rx Err", "Rx SG", "Tx", "Tx Err", "Tx conf", + "Tx SG", "Tx realloc", "Enq busy"); + + for_each_online_cpu(i) { + stats = per_cpu_ptr(priv->percpu_stats, i); + extras = per_cpu_ptr(priv->percpu_extras, i); + seq_printf(file, "%3d%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu\n", + i, + stats->rx_packets, + stats->rx_errors, + extras->rx_sg_frames, + stats->tx_packets, + stats->tx_errors, + extras->tx_conf_frames, + extras->tx_sg_frames, + extras->tx_reallocs, + extras->tx_portal_busy); + } + + return 0; +} + +static int dpaa2_dbg_cpu_open(struct inode *inode, struct file *file) +{ + int err; + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private; + + err = single_open(file, dpaa2_dbg_cpu_show, priv); + if (err < 0) + netdev_err(priv->net_dev, "single_open() failed\n"); + + return err; +} + +static const struct file_operations dpaa2_dbg_cpu_ops = { + .open = dpaa2_dbg_cpu_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static char *fq_type_to_str(struct dpaa2_eth_fq *fq) +{ + switch (fq->type) { + case DPAA2_RX_FQ: + return "Rx"; + case DPAA2_TX_CONF_FQ: + return "Tx conf"; + default: + return "N/A"; + } +} + +static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset) +{ + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private; + struct dpaa2_eth_fq *fq; + u32 fcnt, bcnt; + int i, err; + + seq_printf(file, "FQ stats for %s:\n", priv->net_dev->name); + seq_printf(file, "%s%16s%16s%16s%16s\n", + "VFQID", "CPU", "Type", "Frames", "Pending frames"); + + for (i = 0; i < priv->num_fqs; i++) { + fq = &priv->fq[i]; + err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt); + if (err) + fcnt = 0; + + seq_printf(file, "%5d%16d%16s%16llu%16u\n", + fq->fqid, + fq->target_cpu, + fq_type_to_str(fq), + fq->stats.frames, + fcnt); + } + + return 0; +} + +static int dpaa2_dbg_fqs_open(struct inode *inode, struct file *file) +{ + int err; + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private; + + err = single_open(file, dpaa2_dbg_fqs_show, priv); + if (err < 0) + netdev_err(priv->net_dev, "single_open() failed\n"); + + return err; +} + +static const struct file_operations dpaa2_dbg_fq_ops = { + .open = dpaa2_dbg_fqs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset) +{ + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private; + struct dpaa2_eth_channel *ch; + int i; + + seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name); + seq_printf(file, "%s%16s%16s%16s%16s\n", + "CHID", "CPU", "Deq busy", "CDANs", "Buf count"); + + for (i = 0; i < priv->num_channels; i++) { + ch = priv->channel[i]; + seq_printf(file, "%4d%16d%16llu%16llu%16d\n", + ch->ch_id, + ch->nctx.desired_cpu, + ch->stats.dequeue_portal_busy, + ch->stats.cdan, + ch->buf_count); + } + + return 0; +} + +static int dpaa2_dbg_ch_open(struct inode *inode, struct file *file) +{ + int err; + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private; + + err = single_open(file, dpaa2_dbg_ch_show, priv); + if (err < 0) + netdev_err(priv->net_dev, "single_open() failed\n"); + + return err; +} + +static const struct file_operations dpaa2_dbg_ch_ops = { + .open = dpaa2_dbg_ch_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) +{ + if (!dpaa2_dbg_root) + return; + + /* Create a directory for the interface */ + priv->dbg.dir = debugfs_create_dir(priv->net_dev->name, + dpaa2_dbg_root); + if (!priv->dbg.dir) { + netdev_err(priv->net_dev, "debugfs_create_dir() failed\n"); + return; + } + + /* per-cpu stats file */ + priv->dbg.cpu_stats = debugfs_create_file("cpu_stats", 0444, + priv->dbg.dir, priv, + &dpaa2_dbg_cpu_ops); + if (!priv->dbg.cpu_stats) { + netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); + goto err_cpu_stats; + } + + /* per-fq stats file */ + priv->dbg.fq_stats = debugfs_create_file("fq_stats", 0444, + priv->dbg.dir, priv, + &dpaa2_dbg_fq_ops); + if (!priv->dbg.fq_stats) { + netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); + goto err_fq_stats; + } + + /* per-fq stats file */ + priv->dbg.ch_stats = debugfs_create_file("ch_stats", 0444, + priv->dbg.dir, priv, + &dpaa2_dbg_ch_ops); + if (!priv->dbg.fq_stats) { + netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); + goto err_ch_stats; + } + + return; + +err_ch_stats: + debugfs_remove(priv->dbg.fq_stats); +err_fq_stats: + debugfs_remove(priv->dbg.cpu_stats); +err_cpu_stats: + debugfs_remove(priv->dbg.dir); +} + +void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) +{ + debugfs_remove(priv->dbg.fq_stats); + debugfs_remove(priv->dbg.ch_stats); + debugfs_remove(priv->dbg.cpu_stats); + debugfs_remove(priv->dbg.dir); +} + +void dpaa2_eth_dbg_init(void) +{ + dpaa2_dbg_root = debugfs_create_dir(DPAA2_ETH_DBG_ROOT, NULL); + if (!dpaa2_dbg_root) { + pr_err("DPAA2-ETH: debugfs create failed\n"); + return; + } + + pr_debug("DPAA2-ETH: debugfs created\n"); +} + +void dpaa2_eth_dbg_exit(void) +{ + debugfs_remove(dpaa2_dbg_root); +} diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h new file mode 100644 index 000000000000..4f63de997a26 --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* Copyright 2015 Freescale Semiconductor Inc. + * Copyright 2018-2019 NXP + */ +#ifndef DPAA2_ETH_DEBUGFS_H +#define DPAA2_ETH_DEBUGFS_H + +#include + +struct dpaa2_eth_priv; + +struct dpaa2_debugfs { + struct dentry *dir; + struct dentry *fq_stats; + struct dentry *ch_stats; + struct dentry *cpu_stats; +}; + +#ifdef CONFIG_DEBUG_FS +void dpaa2_eth_dbg_init(void); +void dpaa2_eth_dbg_exit(void); +void dpaa2_dbg_add(struct dpaa2_eth_priv *priv); +void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv); +#else +static inline void dpaa2_eth_dbg_init(void) {} +static inline void dpaa2_eth_dbg_exit(void) {} +static inline void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) {} +static inline void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) {} +#endif /* CONFIG_DEBUG_FS */ + +#endif /* DPAA2_ETH_DEBUGFS_H */ diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 1ca9a18139ec..2ba49e959c3f 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -86,16 +86,16 @@ static void free_rx_fd(struct dpaa2_eth_priv *priv, for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { addr = dpaa2_sg_get_addr(&sgt[i]); sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); - dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, - DMA_BIDIRECTIONAL); + dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE, + DMA_BIDIRECTIONAL); - skb_free_frag(sg_vaddr); + free_pages((unsigned long)sg_vaddr, 0); if (dpaa2_sg_is_final(&sgt[i])) break; } free_buf: - skb_free_frag(vaddr); + free_pages((unsigned long)vaddr, 0); } /* Build a linear skb based on a single-buffer frame descriptor */ @@ -109,7 +109,7 @@ static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch, ch->buf_count--; - skb = build_skb(fd_vaddr, DPAA2_ETH_SKB_SIZE); + skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE); if (unlikely(!skb)) return NULL; @@ -144,19 +144,19 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv, /* Get the address and length from the S/G entry */ sg_addr = dpaa2_sg_get_addr(sge); sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr); - dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE, - DMA_BIDIRECTIONAL); + dma_unmap_page(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE, + DMA_BIDIRECTIONAL); sg_length = dpaa2_sg_get_len(sge); if (i == 0) { /* We build the skb around the first data buffer */ - skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE); + skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE); if (unlikely(!skb)) { /* Free the first SG entry now, since we already * unmapped it and obtained the virtual address */ - skb_free_frag(sg_vaddr); + free_pages((unsigned long)sg_vaddr, 0); /* We still need to subtract the buffers used * by this FD from our software counter @@ -211,9 +211,9 @@ static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count) for (i = 0; i < count; i++) { vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]); - dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE, - DMA_BIDIRECTIONAL); - skb_free_frag(vaddr); + dma_unmap_page(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE, + DMA_BIDIRECTIONAL); + free_pages((unsigned long)vaddr, 0); } } @@ -264,9 +264,7 @@ static int xdp_enqueue(struct dpaa2_eth_priv *priv, struct dpaa2_fd *fd, fq = &priv->fq[queue_id]; for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { - err = dpaa2_io_service_enqueue_qd(fq->channel->dpio, - priv->tx_qdid, 0, - fq->tx_qdbin, fd); + err = priv->enqueue(priv, fq, fd, 0); if (err != -EBUSY) break; } @@ -298,6 +296,7 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv, xdp.data_end = xdp.data + dpaa2_fd_get_len(fd); xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM; xdp_set_data_meta_invalid(&xdp); + xdp.rxq = &ch->xdp_rxq; xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp); @@ -330,8 +329,20 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv, xdp_release_buf(priv, ch, addr); ch->stats.xdp_drop++; break; + case XDP_REDIRECT: + dma_unmap_page(priv->net_dev->dev.parent, addr, + DPAA2_ETH_RX_BUF_SIZE, DMA_BIDIRECTIONAL); + ch->buf_count--; + xdp.data_hard_start = vaddr; + err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog); + if (unlikely(err)) + ch->stats.xdp_drop++; + else + ch->stats.xdp_redirect++; + break; } + ch->xdp.res |= xdp_act; out: rcu_read_unlock(); return xdp_act; @@ -378,16 +389,16 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, return; } - dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, - DMA_BIDIRECTIONAL); + dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE, + DMA_BIDIRECTIONAL); skb = build_linear_skb(ch, fd, vaddr); } else if (fd_format == dpaa2_fd_sg) { WARN_ON(priv->xdp_prog); - dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, - DMA_BIDIRECTIONAL); + dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE, + DMA_BIDIRECTIONAL); skb = build_frag_skb(priv, ch, buf_data); - skb_free_frag(vaddr); + free_pages((unsigned long)vaddr, 0); percpu_extras->rx_sg_frames++; percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd); } else { @@ -573,10 +584,11 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv, * all of them on Tx Conf. */ swa = (struct dpaa2_eth_swa *)sgt_buf; - swa->skb = skb; - swa->scl = scl; - swa->num_sg = num_sg; - swa->sgt_size = sgt_buf_size; + swa->type = DPAA2_ETH_SWA_SG; + swa->sg.skb = skb; + swa->sg.scl = scl; + swa->sg.num_sg = num_sg; + swa->sg.sgt_size = sgt_buf_size; /* Separately map the SGT buffer */ addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL); @@ -611,7 +623,7 @@ static int build_single_fd(struct dpaa2_eth_priv *priv, { struct device *dev = priv->net_dev->dev.parent; u8 *buffer_start, *aligned_start; - struct sk_buff **skbh; + struct dpaa2_eth_swa *swa; dma_addr_t addr; buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb); @@ -628,8 +640,9 @@ static int build_single_fd(struct dpaa2_eth_priv *priv, * (in the private data area) such that we can release it * on Tx confirm */ - skbh = (struct sk_buff **)buffer_start; - *skbh = skb; + swa = (struct dpaa2_eth_swa *)buffer_start; + swa->type = DPAA2_ETH_SWA_SINGLE; + swa->single.skb = skb; addr = dma_map_single(dev, buffer_start, skb_tail_pointer(skb) - buffer_start, @@ -657,47 +670,65 @@ static int build_single_fd(struct dpaa2_eth_priv *priv, * dpaa2_eth_tx(). */ static void free_tx_fd(const struct dpaa2_eth_priv *priv, - const struct dpaa2_fd *fd) + struct dpaa2_eth_fq *fq, + const struct dpaa2_fd *fd, bool in_napi) { struct device *dev = priv->net_dev->dev.parent; dma_addr_t fd_addr; - struct sk_buff **skbh, *skb; + struct sk_buff *skb = NULL; unsigned char *buffer_start; struct dpaa2_eth_swa *swa; u8 fd_format = dpaa2_fd_get_format(fd); + u32 fd_len = dpaa2_fd_get_len(fd); fd_addr = dpaa2_fd_get_addr(fd); - skbh = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr); + buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr); + swa = (struct dpaa2_eth_swa *)buffer_start; if (fd_format == dpaa2_fd_single) { - skb = *skbh; - buffer_start = (unsigned char *)skbh; - /* Accessing the skb buffer is safe before dma unmap, because - * we didn't map the actual skb shell. - */ - dma_unmap_single(dev, fd_addr, - skb_tail_pointer(skb) - buffer_start, - DMA_BIDIRECTIONAL); + if (swa->type == DPAA2_ETH_SWA_SINGLE) { + skb = swa->single.skb; + /* Accessing the skb buffer is safe before dma unmap, + * because we didn't map the actual skb shell. + */ + dma_unmap_single(dev, fd_addr, + skb_tail_pointer(skb) - buffer_start, + DMA_BIDIRECTIONAL); + } else { + WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP, "Wrong SWA type"); + dma_unmap_single(dev, fd_addr, swa->xdp.dma_size, + DMA_BIDIRECTIONAL); + } } else if (fd_format == dpaa2_fd_sg) { - swa = (struct dpaa2_eth_swa *)skbh; - skb = swa->skb; + skb = swa->sg.skb; /* Unmap the scatterlist */ - dma_unmap_sg(dev, swa->scl, swa->num_sg, DMA_BIDIRECTIONAL); - kfree(swa->scl); + dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg, + DMA_BIDIRECTIONAL); + kfree(swa->sg.scl); /* Unmap the SGT buffer */ - dma_unmap_single(dev, fd_addr, swa->sgt_size, + dma_unmap_single(dev, fd_addr, swa->sg.sgt_size, DMA_BIDIRECTIONAL); } else { netdev_dbg(priv->net_dev, "Invalid FD format\n"); return; } + if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) { + fq->dq_frames++; + fq->dq_bytes += fd_len; + } + + if (swa->type == DPAA2_ETH_SWA_XDP) { + xdp_return_frame(swa->xdp.xdpf); + return; + } + /* Get the timestamp value */ if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { struct skb_shared_hwtstamps shhwtstamps; - __le64 *ts = dpaa2_get_ts(skbh, true); + __le64 *ts = dpaa2_get_ts(buffer_start, true); u64 ns; memset(&shhwtstamps, 0, sizeof(shhwtstamps)); @@ -709,10 +740,10 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv, /* Free SGT buffer allocated on tx */ if (fd_format != dpaa2_fd_single) - skb_free_frag(skbh); + skb_free_frag(buffer_start); /* Move on with skb release */ - dev_kfree_skb(skb); + napi_consume_skb(skb, in_napi); } static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) @@ -785,9 +816,7 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) queue_mapping = skb_get_queue_mapping(skb); fq = &priv->fq[queue_mapping]; for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { - err = dpaa2_io_service_enqueue_qd(fq->channel->dpio, - priv->tx_qdid, 0, - fq->tx_qdbin, &fd); + err = priv->enqueue(priv, fq, &fd, 0); if (err != -EBUSY) break; } @@ -795,7 +824,7 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) if (unlikely(err < 0)) { percpu_stats->tx_errors++; /* Clean up everything, including freeing the skb */ - free_tx_fd(priv, &fd); + free_tx_fd(priv, fq, &fd, false); } else { fd_len = dpaa2_fd_get_len(&fd); percpu_stats->tx_packets++; @@ -832,12 +861,9 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv, percpu_extras->tx_conf_frames++; percpu_extras->tx_conf_bytes += fd_len; - fq->dq_frames++; - fq->dq_bytes += fd_len; - /* Check frame errors in the FD field */ fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK; - free_tx_fd(priv, fd); + free_tx_fd(priv, fq, fd, true); if (likely(!fd_errors)) return; @@ -903,7 +929,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv, { struct device *dev = priv->net_dev->dev.parent; u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; - void *buf; + struct page *page; dma_addr_t addr; int i, err; @@ -911,14 +937,16 @@ static int add_bufs(struct dpaa2_eth_priv *priv, /* Allocate buffer visible to WRIOP + skb shared info + * alignment padding */ - buf = napi_alloc_frag(dpaa2_eth_buf_raw_size(priv)); - if (unlikely(!buf)) + /* allocate one page for each Rx buffer. WRIOP sees + * the entire page except for a tailroom reserved for + * skb shared info + */ + page = dev_alloc_pages(0); + if (!page) goto err_alloc; - buf = PTR_ALIGN(buf, priv->rx_buf_align); - - addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE, - DMA_BIDIRECTIONAL); + addr = dma_map_page(dev, page, 0, DPAA2_ETH_RX_BUF_SIZE, + DMA_BIDIRECTIONAL); if (unlikely(dma_mapping_error(dev, addr))) goto err_map; @@ -926,7 +954,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv, /* tracing point */ trace_dpaa2_eth_buf_seed(priv->net_dev, - buf, dpaa2_eth_buf_raw_size(priv), + page, DPAA2_ETH_RX_BUF_RAW_SIZE, addr, DPAA2_ETH_RX_BUF_SIZE, bpid); } @@ -948,7 +976,7 @@ release_bufs: return i; err_map: - skb_free_frag(buf); + __free_pages(page, 0); err_alloc: /* If we managed to allocate at least some buffers, * release them to hardware @@ -1083,6 +1111,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget) int err; ch = container_of(napi, struct dpaa2_eth_channel, napi); + ch->xdp.res = 0; priv = ch->priv; do { @@ -1128,7 +1157,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget) work_done = max(rx_cleaned, 1); out: - if (txc_fq) { + if (txc_fq && txc_fq->dq_frames) { nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid); netdev_tx_completed_queue(nq, txc_fq->dq_frames, txc_fq->dq_bytes); @@ -1136,6 +1165,9 @@ out: txc_fq->dq_bytes = 0; } + if (ch->xdp.res & XDP_REDIRECT) + xdp_do_flush_map(); + return work_done; } @@ -1243,34 +1275,36 @@ enable_err: return err; } -/* The DPIO store must be empty when we call this, - * at the end of every NAPI cycle. - */ -static u32 drain_channel(struct dpaa2_eth_channel *ch) +/* Total number of in-flight frames on ingress queues */ +static u32 ingress_fq_count(struct dpaa2_eth_priv *priv) { - u32 drained = 0, total = 0; + struct dpaa2_eth_fq *fq; + u32 fcnt = 0, bcnt = 0, total = 0; + int i, err; - do { - pull_channel(ch); - drained = consume_frames(ch, NULL); - total += drained; - } while (drained); + for (i = 0; i < priv->num_fqs; i++) { + fq = &priv->fq[i]; + err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt); + if (err) { + netdev_warn(priv->net_dev, "query_fq_count failed"); + break; + } + total += fcnt; + } return total; } -static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv) +static void wait_for_fq_empty(struct dpaa2_eth_priv *priv) { - struct dpaa2_eth_channel *ch; - int i; - u32 drained = 0; - - for (i = 0; i < priv->num_channels; i++) { - ch = priv->channel[i]; - drained += drain_channel(ch); - } + int retries = 10; + u32 pending; - return drained; + do { + pending = ingress_fq_count(priv); + if (pending) + msleep(100); + } while (pending && --retries); } static int dpaa2_eth_stop(struct net_device *net_dev) @@ -1278,14 +1312,22 @@ static int dpaa2_eth_stop(struct net_device *net_dev) struct dpaa2_eth_priv *priv = netdev_priv(net_dev); int dpni_enabled = 0; int retries = 10; - u32 drained; netif_tx_stop_all_queues(net_dev); netif_carrier_off(net_dev); - /* Loop while dpni_disable() attempts to drain the egress FQs - * and confirm them back to us. + /* On dpni_disable(), the MC firmware will: + * - stop MAC Rx and wait for all Rx frames to be enqueued to software + * - cut off WRIOP dequeues from egress FQs and wait until transmission + * of all in flight Tx frames is finished (and corresponding Tx conf + * frames are enqueued back to software) + * + * Before calling dpni_disable(), we wait for all Tx frames to arrive + * on WRIOP. After it finishes, wait until all remaining frames on Rx + * and Tx conf queues are consumed on NAPI poll. */ + msleep(500); + do { dpni_disable(priv->mc_io, 0, priv->mc_token); dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled); @@ -1300,19 +1342,9 @@ static int dpaa2_eth_stop(struct net_device *net_dev) */ } - /* Wait for NAPI to complete on every core and disable it. - * In particular, this will also prevent NAPI from being rescheduled if - * a new CDAN is serviced, effectively discarding the CDAN. We therefore - * don't even need to disarm the channels, except perhaps for the case - * of a huge coalescing value. - */ + wait_for_fq_empty(priv); disable_ch_napi(priv); - /* Manually drain the Rx and TxConf queues */ - drained = drain_ingress_frames(priv); - if (drained) - netdev_dbg(net_dev, "Drained %d frames.\n", drained); - /* Empty the buffer pool */ drain_pool(priv); @@ -1730,6 +1762,105 @@ static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp) return 0; } +static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev, + struct xdp_frame *xdpf) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + struct device *dev = net_dev->dev.parent; + struct rtnl_link_stats64 *percpu_stats; + struct dpaa2_eth_drv_stats *percpu_extras; + unsigned int needed_headroom; + struct dpaa2_eth_swa *swa; + struct dpaa2_eth_fq *fq; + struct dpaa2_fd fd; + void *buffer_start, *aligned_start; + dma_addr_t addr; + int err, i; + + /* We require a minimum headroom to be able to transmit the frame. + * Otherwise return an error and let the original net_device handle it + */ + needed_headroom = dpaa2_eth_needed_headroom(priv, NULL); + if (xdpf->headroom < needed_headroom) + return -EINVAL; + + percpu_stats = this_cpu_ptr(priv->percpu_stats); + percpu_extras = this_cpu_ptr(priv->percpu_extras); + + /* Setup the FD fields */ + memset(&fd, 0, sizeof(fd)); + + /* Align FD address, if possible */ + buffer_start = xdpf->data - needed_headroom; + aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN, + DPAA2_ETH_TX_BUF_ALIGN); + if (aligned_start >= xdpf->data - xdpf->headroom) + buffer_start = aligned_start; + + swa = (struct dpaa2_eth_swa *)buffer_start; + /* fill in necessary fields here */ + swa->type = DPAA2_ETH_SWA_XDP; + swa->xdp.dma_size = xdpf->data + xdpf->len - buffer_start; + swa->xdp.xdpf = xdpf; + + addr = dma_map_single(dev, buffer_start, + swa->xdp.dma_size, + DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(dev, addr))) { + percpu_stats->tx_dropped++; + return -ENOMEM; + } + + dpaa2_fd_set_addr(&fd, addr); + dpaa2_fd_set_offset(&fd, xdpf->data - buffer_start); + dpaa2_fd_set_len(&fd, xdpf->len); + dpaa2_fd_set_format(&fd, dpaa2_fd_single); + dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA); + + fq = &priv->fq[smp_processor_id()]; + for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { + err = priv->enqueue(priv, fq, &fd, 0); + if (err != -EBUSY) + break; + } + percpu_extras->tx_portal_busy += i; + if (unlikely(err < 0)) { + percpu_stats->tx_errors++; + /* let the Rx device handle the cleanup */ + return err; + } + + percpu_stats->tx_packets++; + percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd); + + return 0; +} + +static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n, + struct xdp_frame **frames, u32 flags) +{ + int drops = 0; + int i, err; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + if (!netif_running(net_dev)) + return -ENETDOWN; + + for (i = 0; i < n; i++) { + struct xdp_frame *xdpf = frames[i]; + + err = dpaa2_eth_xdp_xmit_frame(net_dev, xdpf); + if (err) { + xdp_return_frame_rx_napi(xdpf); + drops++; + } + } + + return n - drops; +} + static const struct net_device_ops dpaa2_eth_ops = { .ndo_open = dpaa2_eth_open, .ndo_start_xmit = dpaa2_eth_tx, @@ -1741,6 +1872,7 @@ static const struct net_device_ops dpaa2_eth_ops = { .ndo_do_ioctl = dpaa2_eth_ioctl, .ndo_change_mtu = dpaa2_eth_change_mtu, .ndo_bpf = dpaa2_eth_xdp, + .ndo_xdp_xmit = dpaa2_eth_xdp_xmit, }; static void cdan_cb(struct dpaa2_io_notification_ctx *ctx) @@ -1902,7 +2034,7 @@ static int setup_dpio(struct dpaa2_eth_priv *priv) /* Register the new context */ channel->dpio = dpaa2_io_service_select(i); - err = dpaa2_io_service_register(channel->dpio, nctx); + err = dpaa2_io_service_register(channel->dpio, nctx, dev); if (err) { dev_dbg(dev, "No affine DPIO for cpu %d\n", i); /* If no affine DPIO for this core, there's probably @@ -1942,7 +2074,7 @@ static int setup_dpio(struct dpaa2_eth_priv *priv) return 0; err_set_cdan: - dpaa2_io_service_deregister(channel->dpio, nctx); + dpaa2_io_service_deregister(channel->dpio, nctx, dev); err_service_reg: free_channel(priv, channel); err_alloc_ch: @@ -1962,13 +2094,14 @@ err_alloc_ch: static void free_dpio(struct dpaa2_eth_priv *priv) { - int i; + struct device *dev = priv->net_dev->dev.parent; struct dpaa2_eth_channel *ch; + int i; /* deregister CDAN notifications and free channels */ for (i = 0; i < priv->num_channels; i++) { ch = priv->channel[i]; - dpaa2_io_service_deregister(ch->dpio, &ch->nctx); + dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev); free_channel(priv, ch); } } @@ -2134,6 +2267,7 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv) { struct device *dev = priv->net_dev->dev.parent; struct dpni_buffer_layout buf_layout = {0}; + u16 rx_buf_align; int err; /* We need to check for WRIOP version 1.0.0, but depending on the MC @@ -2142,9 +2276,9 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv) */ if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) || priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0)) - priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1; + rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1; else - priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN; + rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN; /* tx buffer */ buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE; @@ -2184,7 +2318,7 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv) /* rx buffer */ buf_layout.pass_frame_status = true; buf_layout.pass_parser_result = true; - buf_layout.data_align = priv->rx_buf_align; + buf_layout.data_align = rx_buf_align; buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv); buf_layout.private_data_size = 0; buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT | @@ -2202,6 +2336,36 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv) return 0; } +#define DPNI_ENQUEUE_FQID_VER_MAJOR 7 +#define DPNI_ENQUEUE_FQID_VER_MINOR 9 + +static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_fq *fq, + struct dpaa2_fd *fd, u8 prio) +{ + return dpaa2_io_service_enqueue_qd(fq->channel->dpio, + priv->tx_qdid, prio, + fq->tx_qdbin, fd); +} + +static inline int dpaa2_eth_enqueue_fq(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_fq *fq, + struct dpaa2_fd *fd, + u8 prio __always_unused) +{ + return dpaa2_io_service_enqueue_fq(fq->channel->dpio, + fq->tx_fqid, fd); +} + +static void set_enqueue_mode(struct dpaa2_eth_priv *priv) +{ + if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR, + DPNI_ENQUEUE_FQID_VER_MINOR) < 0) + priv->enqueue = dpaa2_eth_enqueue_qd; + else + priv->enqueue = dpaa2_eth_enqueue_fq; +} + /* Configure the DPNI object this interface is associated with */ static int setup_dpni(struct fsl_mc_device *ls_dev) { @@ -2255,6 +2419,8 @@ static int setup_dpni(struct fsl_mc_device *ls_dev) if (err) goto close; + set_enqueue_mode(priv); + priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) * dpaa2_eth_fs_count(priv), GFP_KERNEL); if (!priv->cls_rules) @@ -2302,9 +2468,14 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv, queue.destination.type = DPNI_DEST_DPCON; queue.destination.priority = 1; queue.user_context = (u64)(uintptr_t)fq; + queue.flc.stash_control = 1; + queue.flc.value &= 0xFFFFFFFFFFFFFFC0; + /* 01 01 00 - data, annotation, flow context */ + queue.flc.value |= 0x14; err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, DPNI_QUEUE_RX, 0, fq->flowid, - DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST, + DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST | + DPNI_QUEUE_OPT_FLC, &queue); if (err) { dev_err(dev, "dpni_set_queue(RX) failed\n"); @@ -2320,6 +2491,21 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv, return err; } + /* xdp_rxq setup */ + err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev, + fq->flowid); + if (err) { + dev_err(dev, "xdp_rxq_info_reg failed\n"); + return err; + } + + err = xdp_rxq_info_reg_mem_model(&fq->channel->xdp_rxq, + MEM_TYPE_PAGE_ORDER0, NULL); + if (err) { + dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n"); + return err; + } + return 0; } @@ -2339,6 +2525,7 @@ static int setup_tx_flow(struct dpaa2_eth_priv *priv, } fq->tx_qdbin = qid.qdbin; + fq->tx_fqid = qid.fqid; err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, @@ -3083,6 +3270,10 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) goto err_netdev_reg; } +#ifdef CONFIG_DEBUG_FS + dpaa2_dbg_add(priv); +#endif + dev_info(dev, "Probed interface %s\n", net_dev->name); return 0; @@ -3126,6 +3317,9 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) net_dev = dev_get_drvdata(dev); priv = netdev_priv(net_dev); +#ifdef CONFIG_DEBUG_FS + dpaa2_dbg_remove(priv); +#endif unregister_netdev(net_dev); if (priv->do_link_poll) @@ -3170,4 +3364,25 @@ static struct fsl_mc_driver dpaa2_eth_driver = { .match_id_table = dpaa2_eth_match_id_table }; -module_fsl_mc_driver(dpaa2_eth_driver); +static int __init dpaa2_eth_driver_init(void) +{ + int err; + + dpaa2_eth_dbg_init(); + err = fsl_mc_driver_register(&dpaa2_eth_driver); + if (err) { + dpaa2_eth_dbg_exit(); + return err; + } + + return 0; +} + +static void __exit dpaa2_eth_driver_exit(void) +{ + dpaa2_eth_dbg_exit(); + fsl_mc_driver_unregister(&dpaa2_eth_driver); +} + +module_init(dpaa2_eth_driver_init); +module_exit(dpaa2_eth_driver_exit); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h index 69c965de192b..7879622aa3e6 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h @@ -16,6 +16,7 @@ #include "dpni-cmd.h" #include "dpaa2-eth-trace.h" +#include "dpaa2-eth-debugfs.h" #define DPAA2_WRIOP_VERSION(x, y, z) ((x) << 10 | (y) << 5 | (z) << 0) @@ -52,7 +53,8 @@ */ #define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64) #define DPAA2_ETH_NUM_BUFS (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256) -#define DPAA2_ETH_REFILL_THRESH DPAA2_ETH_MAX_FRAMES_PER_QUEUE +#define DPAA2_ETH_REFILL_THRESH \ + (DPAA2_ETH_NUM_BUFS - DPAA2_ETH_BUFS_PER_CMD) /* Maximum number of buffers that can be acquired/released through a single * QBMan command @@ -62,9 +64,11 @@ /* Hardware requires alignment for ingress/egress buffer addresses */ #define DPAA2_ETH_TX_BUF_ALIGN 64 -#define DPAA2_ETH_RX_BUF_SIZE 2048 -#define DPAA2_ETH_SKB_SIZE \ - (DPAA2_ETH_RX_BUF_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#define DPAA2_ETH_RX_BUF_RAW_SIZE PAGE_SIZE +#define DPAA2_ETH_RX_BUF_TAILROOM \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +#define DPAA2_ETH_RX_BUF_SIZE \ + (DPAA2_ETH_RX_BUF_RAW_SIZE - DPAA2_ETH_RX_BUF_TAILROOM) /* Hardware annotation area in RX/TX buffers */ #define DPAA2_ETH_RX_HWA_SIZE 64 @@ -85,12 +89,33 @@ */ #define DPAA2_ETH_SWA_SIZE 64 +/* We store different information in the software annotation area of a Tx frame + * based on what type of frame it is + */ +enum dpaa2_eth_swa_type { + DPAA2_ETH_SWA_SINGLE, + DPAA2_ETH_SWA_SG, + DPAA2_ETH_SWA_XDP, +}; + /* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */ struct dpaa2_eth_swa { - struct sk_buff *skb; - struct scatterlist *scl; - int num_sg; - int sgt_size; + enum dpaa2_eth_swa_type type; + union { + struct { + struct sk_buff *skb; + } single; + struct { + struct sk_buff *skb; + struct scatterlist *scl; + int num_sg; + int sgt_size; + } sg; + struct { + int dma_size; + struct xdp_frame *xdpf; + } xdp; + }; }; /* Annotation valid bits in FD FRC */ @@ -253,6 +278,7 @@ struct dpaa2_eth_ch_stats { __u64 xdp_drop; __u64 xdp_tx; __u64 xdp_tx_err; + __u64 xdp_redirect; }; /* Maximum number of queues associated with a DPNI */ @@ -273,6 +299,7 @@ struct dpaa2_eth_priv; struct dpaa2_eth_fq { u32 fqid; u32 tx_qdbin; + u32 tx_fqid; u16 flowid; int target_cpu; u32 dq_frames; @@ -291,6 +318,7 @@ struct dpaa2_eth_ch_xdp { struct bpf_prog *prog; u64 drop_bufs[DPAA2_ETH_BUFS_PER_CMD]; int drop_cnt; + unsigned int res; }; struct dpaa2_eth_channel { @@ -305,6 +333,7 @@ struct dpaa2_eth_channel { int buf_count; struct dpaa2_eth_ch_stats stats; struct dpaa2_eth_ch_xdp xdp; + struct xdp_rxq_info xdp_rxq; }; struct dpaa2_eth_dist_fields { @@ -325,6 +354,9 @@ struct dpaa2_eth_priv { u8 num_fqs; struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES]; + int (*enqueue)(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_fq *fq, + struct dpaa2_fd *fd, u8 prio); u8 num_channels; struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS]; @@ -342,7 +374,6 @@ struct dpaa2_eth_priv { bool rx_tstamp; /* Rx timestamping enabled */ u16 tx_qdid; - u16 rx_buf_align; struct fsl_mc_io *mc_io; /* Cores which have an affine DPIO/DPCON. * This is the cpu set on which Rx and Tx conf frames are processed @@ -365,6 +396,9 @@ struct dpaa2_eth_priv { struct dpaa2_eth_cls_rule *cls_rules; u8 rx_cls_enabled; struct bpf_prog *xdp_prog; +#ifdef CONFIG_DEBUG_FS + struct dpaa2_debugfs dbg; +#endif }; #define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \ @@ -405,26 +439,27 @@ static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv, #define dpaa2_eth_fs_count(priv) \ ((priv)->dpni_attrs.fs_entries) +/* We have exactly one {Rx, Tx conf} queue per channel */ +#define dpaa2_eth_queue_count(priv) \ + ((priv)->num_channels) + enum dpaa2_eth_rx_dist { DPAA2_ETH_RX_DIST_HASH, DPAA2_ETH_RX_DIST_CLS }; -/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but the skb built around - * the buffer also needs space for its shared info struct, and we need - * to allocate enough to accommodate hardware alignment restrictions - */ -static inline unsigned int dpaa2_eth_buf_raw_size(struct dpaa2_eth_priv *priv) -{ - return DPAA2_ETH_SKB_SIZE + priv->rx_buf_align; -} - static inline unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv, struct sk_buff *skb) { unsigned int headroom = DPAA2_ETH_SWA_SIZE; + /* If we don't have an skb (e.g. XDP buffer), we only need space for + * the software annotation area + */ + if (!skb) + return headroom; + /* For non-linear skbs we have no headroom requirement, as we build a * SG frame with a newly allocated SGT buffer */ @@ -443,14 +478,7 @@ unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv, */ static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv) { - return priv->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN - - DPAA2_ETH_RX_HWA_SIZE; -} - -/* We have exactly one {Rx, Tx conf} queue per channel */ -static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv) -{ - return priv->num_channels; + return priv->tx_data_offset - DPAA2_ETH_RX_HWA_SIZE; } int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c index a7389e722c49..591dfcf76adb 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c @@ -48,6 +48,7 @@ static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = { "[drv] xdp drop", "[drv] xdp tx", "[drv] xdp tx errors", + "[drv] xdp redirect", /* FQ stats */ "[qbman] rx pending frames", "[qbman] rx pending bytes", diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig new file mode 100644 index 000000000000..8429f5c1d810 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/Kconfig @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: GPL-2.0 +config FSL_ENETC + tristate "ENETC PF driver" + depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST) + help + This driver supports NXP ENETC gigabit ethernet controller PCIe + physical function (PF) devices, managing ENETC Ports at a privileged + level. + + If compiled as module (M), the module name is fsl-enetc. + +config FSL_ENETC_VF + tristate "ENETC VF driver" + depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST) + help + This driver supports NXP ENETC gigabit ethernet controller PCIe + virtual function (VF) devices enabled by the ENETC PF driver. + + If compiled as module (M), the module name is fsl-enetc-vf. + +config FSL_ENETC_PTP_CLOCK + tristate "ENETC PTP clock driver" + depends on PTP_1588_CLOCK_QORIQ && (FSL_ENETC || FSL_ENETC_VF) + default y + help + This driver adds support for using the ENETC 1588 timer + as a PTP clock. This clock is only useful if your PTP + programs are getting hardware time stamps on the PTP Ethernet + packets using the SO_TIMESTAMPING API. + + If compiled as module (M), the module name is fsl-enetc-ptp. diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile new file mode 100644 index 000000000000..7139e414dccf --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/Makefile @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_FSL_ENETC) += fsl-enetc.o +fsl-enetc-$(CONFIG_FSL_ENETC) += enetc.o enetc_cbdr.o enetc_ethtool.o \ + enetc_mdio.o +fsl-enetc-$(CONFIG_PCI_IOV) += enetc_msg.o +fsl-enetc-objs := enetc_pf.o $(fsl-enetc-y) + +obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o + +ifeq ($(CONFIG_FSL_ENETC)$(CONFIG_FSL_ENETC_VF), yy) +fsl-enetc-vf-objs := enetc_vf.o +else +fsl-enetc-vf-$(CONFIG_FSL_ENETC_VF) += enetc.o enetc_cbdr.o \ + enetc_ethtool.o +fsl-enetc-vf-objs := enetc_vf.o $(fsl-enetc-vf-y) +endif + +obj-$(CONFIG_FSL_ENETC_PTP_CLOCK) += fsl-enetc-ptp.o +fsl-enetc-ptp-$(CONFIG_FSL_ENETC_PTP_CLOCK) += enetc_ptp.o diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c new file mode 100644 index 000000000000..5bb9eb35d76d --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -0,0 +1,1604 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2017-2019 NXP */ + +#include "enetc.h" +#include +#include +#include +#include + +/* ENETC overhead: optional extension BD + 1 BD gap */ +#define ENETC_TXBDS_NEEDED(val) ((val) + 2) +/* max # of chained Tx BDs is 15, including head and extension BD */ +#define ENETC_MAX_SKB_FRAGS 13 +#define ENETC_TXBDS_MAX_NEEDED ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1) + +static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb); + +netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_bdr *tx_ring; + int count; + + tx_ring = priv->tx_ring[skb->queue_mapping]; + + if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS)) + if (unlikely(skb_linearize(skb))) + goto drop_packet_err; + + count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */ + if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) { + netif_stop_subqueue(ndev, tx_ring->index); + return NETDEV_TX_BUSY; + } + + count = enetc_map_tx_buffs(tx_ring, skb); + if (unlikely(!count)) + goto drop_packet_err; + + if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED) + netif_stop_subqueue(ndev, tx_ring->index); + + return NETDEV_TX_OK; + +drop_packet_err: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +static bool enetc_tx_csum(struct sk_buff *skb, union enetc_tx_bd *txbd) +{ + int l3_start, l3_hsize; + u16 l3_flags, l4_flags; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return false; + + switch (skb->csum_offset) { + case offsetof(struct tcphdr, check): + l4_flags = ENETC_TXBD_L4_TCP; + break; + case offsetof(struct udphdr, check): + l4_flags = ENETC_TXBD_L4_UDP; + break; + default: + skb_checksum_help(skb); + return false; + } + + l3_start = skb_network_offset(skb); + l3_hsize = skb_network_header_len(skb); + + l3_flags = 0; + if (skb->protocol == htons(ETH_P_IPV6)) + l3_flags = ENETC_TXBD_L3_IPV6; + + /* write BD fields */ + txbd->l3_csoff = enetc_txbd_l3_csoff(l3_start, l3_hsize, l3_flags); + txbd->l4_csoff = l4_flags; + + return true; +} + +static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring, + struct enetc_tx_swbd *tx_swbd) +{ + if (tx_swbd->is_dma_page) + dma_unmap_page(tx_ring->dev, tx_swbd->dma, + tx_swbd->len, DMA_TO_DEVICE); + else + dma_unmap_single(tx_ring->dev, tx_swbd->dma, + tx_swbd->len, DMA_TO_DEVICE); + tx_swbd->dma = 0; +} + +static void enetc_free_tx_skb(struct enetc_bdr *tx_ring, + struct enetc_tx_swbd *tx_swbd) +{ + if (tx_swbd->dma) + enetc_unmap_tx_buff(tx_ring, tx_swbd); + + if (tx_swbd->skb) { + dev_kfree_skb_any(tx_swbd->skb); + tx_swbd->skb = NULL; + } +} + +static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) +{ + struct enetc_tx_swbd *tx_swbd; + struct skb_frag_struct *frag; + int len = skb_headlen(skb); + union enetc_tx_bd temp_bd; + union enetc_tx_bd *txbd; + bool do_vlan, do_tstamp; + int i, count = 0; + unsigned int f; + dma_addr_t dma; + u8 flags = 0; + + i = tx_ring->next_to_use; + txbd = ENETC_TXBD(*tx_ring, i); + prefetchw(txbd); + + dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(tx_ring->dev, dma))) + goto dma_err; + + temp_bd.addr = cpu_to_le64(dma); + temp_bd.buf_len = cpu_to_le16(len); + temp_bd.lstatus = 0; + + tx_swbd = &tx_ring->tx_swbd[i]; + tx_swbd->dma = dma; + tx_swbd->len = len; + tx_swbd->is_dma_page = 0; + count++; + + do_vlan = skb_vlan_tag_present(skb); + do_tstamp = skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP; + + if (do_vlan || do_tstamp) + flags |= ENETC_TXBD_FLAGS_EX; + + if (enetc_tx_csum(skb, &temp_bd)) + flags |= ENETC_TXBD_FLAGS_CSUM | ENETC_TXBD_FLAGS_L4CS; + + /* first BD needs frm_len and offload flags set */ + temp_bd.frm_len = cpu_to_le16(skb->len); + temp_bd.flags = flags; + + if (flags & ENETC_TXBD_FLAGS_EX) { + u8 e_flags = 0; + *txbd = temp_bd; + enetc_clear_tx_bd(&temp_bd); + + /* add extension BD for VLAN and/or timestamping */ + flags = 0; + tx_swbd++; + txbd++; + i++; + if (unlikely(i == tx_ring->bd_count)) { + i = 0; + tx_swbd = tx_ring->tx_swbd; + txbd = ENETC_TXBD(*tx_ring, 0); + } + prefetchw(txbd); + + if (do_vlan) { + temp_bd.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb)); + temp_bd.ext.tpid = 0; /* < C-TAG */ + e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS; + } + + if (do_tstamp) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP; + } + + temp_bd.ext.e_flags = e_flags; + count++; + } + + frag = &skb_shinfo(skb)->frags[0]; + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) { + len = skb_frag_size(frag); + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len, + DMA_TO_DEVICE); + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_err; + + *txbd = temp_bd; + enetc_clear_tx_bd(&temp_bd); + + flags = 0; + tx_swbd++; + txbd++; + i++; + if (unlikely(i == tx_ring->bd_count)) { + i = 0; + tx_swbd = tx_ring->tx_swbd; + txbd = ENETC_TXBD(*tx_ring, 0); + } + prefetchw(txbd); + + temp_bd.addr = cpu_to_le64(dma); + temp_bd.buf_len = cpu_to_le16(len); + + tx_swbd->dma = dma; + tx_swbd->len = len; + tx_swbd->is_dma_page = 1; + count++; + } + + /* last BD needs 'F' bit set */ + flags |= ENETC_TXBD_FLAGS_F; + temp_bd.flags = flags; + *txbd = temp_bd; + + tx_ring->tx_swbd[i].skb = skb; + + enetc_bdr_idx_inc(tx_ring, &i); + tx_ring->next_to_use = i; + + /* let H/W know BD ring has been updated */ + enetc_wr_reg(tx_ring->tpir, i); /* includes wmb() */ + + return count; + +dma_err: + dev_err(tx_ring->dev, "DMA map error"); + + do { + tx_swbd = &tx_ring->tx_swbd[i]; + enetc_free_tx_skb(tx_ring, tx_swbd); + if (i == 0) + i = tx_ring->bd_count; + i--; + } while (count--); + + return 0; +} + +static irqreturn_t enetc_msix(int irq, void *data) +{ + struct enetc_int_vector *v = data; + int i; + + /* disable interrupts */ + enetc_wr_reg(v->rbier, 0); + + for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings) + enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 0); + + napi_schedule_irqoff(&v->napi); + + return IRQ_HANDLED; +} + +static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget); +static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, + struct napi_struct *napi, int work_limit); + +static int enetc_poll(struct napi_struct *napi, int budget) +{ + struct enetc_int_vector + *v = container_of(napi, struct enetc_int_vector, napi); + bool complete = true; + int work_done; + int i; + + for (i = 0; i < v->count_tx_rings; i++) + if (!enetc_clean_tx_ring(&v->tx_ring[i], budget)) + complete = false; + + work_done = enetc_clean_rx_ring(&v->rx_ring, napi, budget); + if (work_done == budget) + complete = false; + + if (!complete) + return budget; + + napi_complete_done(napi, work_done); + + /* enable interrupts */ + enetc_wr_reg(v->rbier, ENETC_RBIER_RXTIE); + + for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings) + enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), + ENETC_TBIER_TXTIE); + + return work_done; +} + +static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci) +{ + int pi = enetc_rd_reg(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK; + + return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi; +} + +static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget) +{ + struct net_device *ndev = tx_ring->ndev; + int tx_frm_cnt = 0, tx_byte_cnt = 0; + struct enetc_tx_swbd *tx_swbd; + int i, bds_to_clean; + + i = tx_ring->next_to_clean; + tx_swbd = &tx_ring->tx_swbd[i]; + bds_to_clean = enetc_bd_ready_count(tx_ring, i); + + while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) { + bool is_eof = !!tx_swbd->skb; + + enetc_unmap_tx_buff(tx_ring, tx_swbd); + if (is_eof) { + napi_consume_skb(tx_swbd->skb, napi_budget); + tx_swbd->skb = NULL; + } + + tx_byte_cnt += tx_swbd->len; + + bds_to_clean--; + tx_swbd++; + i++; + if (unlikely(i == tx_ring->bd_count)) { + i = 0; + tx_swbd = tx_ring->tx_swbd; + } + + /* BD iteration loop end */ + if (is_eof) { + tx_frm_cnt++; + /* re-arm interrupt source */ + enetc_wr_reg(tx_ring->idr, BIT(tx_ring->index) | + BIT(16 + tx_ring->index)); + } + + if (unlikely(!bds_to_clean)) + bds_to_clean = enetc_bd_ready_count(tx_ring, i); + } + + tx_ring->next_to_clean = i; + tx_ring->stats.packets += tx_frm_cnt; + tx_ring->stats.bytes += tx_byte_cnt; + + if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) && + __netif_subqueue_stopped(ndev, tx_ring->index) && + (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) { + netif_wake_subqueue(ndev, tx_ring->index); + } + + return tx_frm_cnt != ENETC_DEFAULT_TX_WORK; +} + +static bool enetc_new_page(struct enetc_bdr *rx_ring, + struct enetc_rx_swbd *rx_swbd) +{ + struct page *page; + dma_addr_t addr; + + page = dev_alloc_page(); + if (unlikely(!page)) + return false; + + addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(rx_ring->dev, addr))) { + __free_page(page); + + return false; + } + + rx_swbd->dma = addr; + rx_swbd->page = page; + rx_swbd->page_offset = ENETC_RXB_PAD; + + return true; +} + +static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt) +{ + struct enetc_rx_swbd *rx_swbd; + union enetc_rx_bd *rxbd; + int i, j; + + i = rx_ring->next_to_use; + rx_swbd = &rx_ring->rx_swbd[i]; + rxbd = ENETC_RXBD(*rx_ring, i); + + for (j = 0; j < buff_cnt; j++) { + /* try reuse page */ + if (unlikely(!rx_swbd->page)) { + if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) { + rx_ring->stats.rx_alloc_errs++; + break; + } + } + + /* update RxBD */ + rxbd->w.addr = cpu_to_le64(rx_swbd->dma + + rx_swbd->page_offset); + /* clear 'R" as well */ + rxbd->r.lstatus = 0; + + rx_swbd++; + rxbd++; + i++; + if (unlikely(i == rx_ring->bd_count)) { + i = 0; + rx_swbd = rx_ring->rx_swbd; + rxbd = ENETC_RXBD(*rx_ring, 0); + } + } + + if (likely(j)) { + rx_ring->next_to_alloc = i; /* keep track from page reuse */ + rx_ring->next_to_use = i; + /* update ENETC's consumer index */ + enetc_wr_reg(rx_ring->rcir, i); + } + + return j; +} + +static void enetc_get_offloads(struct enetc_bdr *rx_ring, + union enetc_rx_bd *rxbd, struct sk_buff *skb) +{ + /* TODO: add tstamp, hashing */ + if (rx_ring->ndev->features & NETIF_F_RXCSUM) { + u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum); + + skb->csum = csum_unfold((__force __sum16)~htons(inet_csum)); + skb->ip_summed = CHECKSUM_COMPLETE; + } + + /* copy VLAN to skb, if one is extracted, for now we assume it's a + * standard TPID, but HW also supports custom values + */ + if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + le16_to_cpu(rxbd->r.vlan_opt)); +} + +static void enetc_process_skb(struct enetc_bdr *rx_ring, + struct sk_buff *skb) +{ + skb_record_rx_queue(skb, rx_ring->index); + skb->protocol = eth_type_trans(skb, rx_ring->ndev); +} + +static bool enetc_page_reusable(struct page *page) +{ + return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1); +} + +static void enetc_reuse_page(struct enetc_bdr *rx_ring, + struct enetc_rx_swbd *old) +{ + struct enetc_rx_swbd *new; + + new = &rx_ring->rx_swbd[rx_ring->next_to_alloc]; + + /* next buf that may reuse a page */ + enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc); + + /* copy page reference */ + *new = *old; +} + +static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring, + int i, u16 size) +{ + struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i]; + + dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma, + rx_swbd->page_offset, + size, DMA_FROM_DEVICE); + return rx_swbd; +} + +static void enetc_put_rx_buff(struct enetc_bdr *rx_ring, + struct enetc_rx_swbd *rx_swbd) +{ + if (likely(enetc_page_reusable(rx_swbd->page))) { + rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE; + page_ref_inc(rx_swbd->page); + + enetc_reuse_page(rx_ring, rx_swbd); + + /* sync for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma, + rx_swbd->page_offset, + ENETC_RXB_DMA_SIZE, + DMA_FROM_DEVICE); + } else { + dma_unmap_page(rx_ring->dev, rx_swbd->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + } + + rx_swbd->page = NULL; +} + +static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring, + int i, u16 size) +{ + struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); + struct sk_buff *skb; + void *ba; + + ba = page_address(rx_swbd->page) + rx_swbd->page_offset; + skb = build_skb(ba - ENETC_RXB_PAD, ENETC_RXB_TRUESIZE); + if (unlikely(!skb)) { + rx_ring->stats.rx_alloc_errs++; + return NULL; + } + + skb_reserve(skb, ENETC_RXB_PAD); + __skb_put(skb, size); + + enetc_put_rx_buff(rx_ring, rx_swbd); + + return skb; +} + +static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i, + u16 size, struct sk_buff *skb) +{ + struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size); + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page, + rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE); + + enetc_put_rx_buff(rx_ring, rx_swbd); +} + +#define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */ + +static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, + struct napi_struct *napi, int work_limit) +{ + int rx_frm_cnt = 0, rx_byte_cnt = 0; + int cleaned_cnt, i; + + cleaned_cnt = enetc_bd_unused(rx_ring); + /* next descriptor to process */ + i = rx_ring->next_to_clean; + + while (likely(rx_frm_cnt < work_limit)) { + union enetc_rx_bd *rxbd; + struct sk_buff *skb; + u32 bd_status; + u16 size; + + if (cleaned_cnt >= ENETC_RXBD_BUNDLE) { + int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt); + + cleaned_cnt -= count; + } + + rxbd = ENETC_RXBD(*rx_ring, i); + bd_status = le32_to_cpu(rxbd->r.lstatus); + if (!bd_status) + break; + + enetc_wr_reg(rx_ring->idr, BIT(rx_ring->index)); + dma_rmb(); /* for reading other rxbd fields */ + size = le16_to_cpu(rxbd->r.buf_len); + skb = enetc_map_rx_buff_to_skb(rx_ring, i, size); + if (!skb) + break; + + enetc_get_offloads(rx_ring, rxbd, skb); + + cleaned_cnt++; + rxbd++; + i++; + if (unlikely(i == rx_ring->bd_count)) { + i = 0; + rxbd = ENETC_RXBD(*rx_ring, 0); + } + + if (unlikely(bd_status & + ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))) { + dev_kfree_skb(skb); + while (!(bd_status & ENETC_RXBD_LSTATUS_F)) { + dma_rmb(); + bd_status = le32_to_cpu(rxbd->r.lstatus); + rxbd++; + i++; + if (unlikely(i == rx_ring->bd_count)) { + i = 0; + rxbd = ENETC_RXBD(*rx_ring, 0); + } + } + + rx_ring->ndev->stats.rx_dropped++; + rx_ring->ndev->stats.rx_errors++; + + break; + } + + /* not last BD in frame? */ + while (!(bd_status & ENETC_RXBD_LSTATUS_F)) { + bd_status = le32_to_cpu(rxbd->r.lstatus); + size = ENETC_RXB_DMA_SIZE; + + if (bd_status & ENETC_RXBD_LSTATUS_F) { + dma_rmb(); + size = le16_to_cpu(rxbd->r.buf_len); + } + + enetc_add_rx_buff_to_skb(rx_ring, i, size, skb); + + cleaned_cnt++; + rxbd++; + i++; + if (unlikely(i == rx_ring->bd_count)) { + i = 0; + rxbd = ENETC_RXBD(*rx_ring, 0); + } + } + + rx_byte_cnt += skb->len; + + enetc_process_skb(rx_ring, skb); + + napi_gro_receive(napi, skb); + + rx_frm_cnt++; + } + + rx_ring->next_to_clean = i; + + rx_ring->stats.packets += rx_frm_cnt; + rx_ring->stats.bytes += rx_byte_cnt; + + return rx_frm_cnt; +} + +/* Probing and Init */ +#define ENETC_MAX_RFS_SIZE 64 +void enetc_get_si_caps(struct enetc_si *si) +{ + struct enetc_hw *hw = &si->hw; + u32 val; + + /* find out how many of various resources we have to work with */ + val = enetc_rd(hw, ENETC_SICAPR0); + si->num_rx_rings = (val >> 16) & 0xff; + si->num_tx_rings = val & 0xff; + + val = enetc_rd(hw, ENETC_SIRFSCAPR); + si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val); + si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE); + + si->num_rss = 0; + val = enetc_rd(hw, ENETC_SIPCAPR0); + if (val & ENETC_SIPCAPR0_RSS) { + val = enetc_rd(hw, ENETC_SIRSSCAPR); + si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(val); + } +} + +static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size) +{ + r->bd_base = dma_alloc_coherent(r->dev, r->bd_count * bd_size, + &r->bd_dma_base, GFP_KERNEL); + if (!r->bd_base) + return -ENOMEM; + + /* h/w requires 128B alignment */ + if (!IS_ALIGNED(r->bd_dma_base, 128)) { + dma_free_coherent(r->dev, r->bd_count * bd_size, r->bd_base, + r->bd_dma_base); + return -EINVAL; + } + + return 0; +} + +static int enetc_alloc_txbdr(struct enetc_bdr *txr) +{ + int err; + + txr->tx_swbd = vzalloc(txr->bd_count * sizeof(struct enetc_tx_swbd)); + if (!txr->tx_swbd) + return -ENOMEM; + + err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd)); + if (err) { + vfree(txr->tx_swbd); + return err; + } + + txr->next_to_clean = 0; + txr->next_to_use = 0; + + return 0; +} + +static void enetc_free_txbdr(struct enetc_bdr *txr) +{ + int size, i; + + for (i = 0; i < txr->bd_count; i++) + enetc_free_tx_skb(txr, &txr->tx_swbd[i]); + + size = txr->bd_count * sizeof(union enetc_tx_bd); + + dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base); + txr->bd_base = NULL; + + vfree(txr->tx_swbd); + txr->tx_swbd = NULL; +} + +static int enetc_alloc_tx_resources(struct enetc_ndev_priv *priv) +{ + int i, err; + + for (i = 0; i < priv->num_tx_rings; i++) { + err = enetc_alloc_txbdr(priv->tx_ring[i]); + + if (err) + goto fail; + } + + return 0; + +fail: + while (i-- > 0) + enetc_free_txbdr(priv->tx_ring[i]); + + return err; +} + +static void enetc_free_tx_resources(struct enetc_ndev_priv *priv) +{ + int i; + + for (i = 0; i < priv->num_tx_rings; i++) + enetc_free_txbdr(priv->tx_ring[i]); +} + +static int enetc_alloc_rxbdr(struct enetc_bdr *rxr) +{ + int err; + + rxr->rx_swbd = vzalloc(rxr->bd_count * sizeof(struct enetc_rx_swbd)); + if (!rxr->rx_swbd) + return -ENOMEM; + + err = enetc_dma_alloc_bdr(rxr, sizeof(union enetc_rx_bd)); + if (err) { + vfree(rxr->rx_swbd); + return err; + } + + rxr->next_to_clean = 0; + rxr->next_to_use = 0; + rxr->next_to_alloc = 0; + + return 0; +} + +static void enetc_free_rxbdr(struct enetc_bdr *rxr) +{ + int size; + + size = rxr->bd_count * sizeof(union enetc_rx_bd); + + dma_free_coherent(rxr->dev, size, rxr->bd_base, rxr->bd_dma_base); + rxr->bd_base = NULL; + + vfree(rxr->rx_swbd); + rxr->rx_swbd = NULL; +} + +static int enetc_alloc_rx_resources(struct enetc_ndev_priv *priv) +{ + int i, err; + + for (i = 0; i < priv->num_rx_rings; i++) { + err = enetc_alloc_rxbdr(priv->rx_ring[i]); + + if (err) + goto fail; + } + + return 0; + +fail: + while (i-- > 0) + enetc_free_rxbdr(priv->rx_ring[i]); + + return err; +} + +static void enetc_free_rx_resources(struct enetc_ndev_priv *priv) +{ + int i; + + for (i = 0; i < priv->num_rx_rings; i++) + enetc_free_rxbdr(priv->rx_ring[i]); +} + +static void enetc_free_tx_ring(struct enetc_bdr *tx_ring) +{ + int i; + + if (!tx_ring->tx_swbd) + return; + + for (i = 0; i < tx_ring->bd_count; i++) { + struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i]; + + enetc_free_tx_skb(tx_ring, tx_swbd); + } + + tx_ring->next_to_clean = 0; + tx_ring->next_to_use = 0; +} + +static void enetc_free_rx_ring(struct enetc_bdr *rx_ring) +{ + int i; + + if (!rx_ring->rx_swbd) + return; + + for (i = 0; i < rx_ring->bd_count; i++) { + struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i]; + + if (!rx_swbd->page) + continue; + + dma_unmap_page(rx_ring->dev, rx_swbd->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + __free_page(rx_swbd->page); + rx_swbd->page = NULL; + } + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + rx_ring->next_to_alloc = 0; +} + +static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv) +{ + int i; + + for (i = 0; i < priv->num_rx_rings; i++) + enetc_free_rx_ring(priv->rx_ring[i]); + + for (i = 0; i < priv->num_tx_rings; i++) + enetc_free_tx_ring(priv->tx_ring[i]); +} + +static int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr) +{ + int size = cbdr->bd_count * sizeof(struct enetc_cbd); + + cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base, + GFP_KERNEL); + if (!cbdr->bd_base) + return -ENOMEM; + + /* h/w requires 128B alignment */ + if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) { + dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base); + return -EINVAL; + } + + cbdr->next_to_clean = 0; + cbdr->next_to_use = 0; + + return 0; +} + +static void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr) +{ + int size = cbdr->bd_count * sizeof(struct enetc_cbd); + + dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base); + cbdr->bd_base = NULL; +} + +static void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr) +{ + /* set CBDR cache attributes */ + enetc_wr(hw, ENETC_SICAR2, + ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); + + enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base)); + enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base)); + enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count)); + + enetc_wr(hw, ENETC_SICBDRPIR, 0); + enetc_wr(hw, ENETC_SICBDRCIR, 0); + + /* enable ring */ + enetc_wr(hw, ENETC_SICBDRMR, BIT(31)); + + cbdr->pir = hw->reg + ENETC_SICBDRPIR; + cbdr->cir = hw->reg + ENETC_SICBDRCIR; +} + +static void enetc_clear_cbdr(struct enetc_hw *hw) +{ + enetc_wr(hw, ENETC_SICBDRMR, 0); +} + +static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups) +{ + int *rss_table; + int i; + + rss_table = kmalloc_array(si->num_rss, sizeof(*rss_table), GFP_KERNEL); + if (!rss_table) + return -ENOMEM; + + /* Set up RSS table defaults */ + for (i = 0; i < si->num_rss; i++) + rss_table[i] = i % num_groups; + + enetc_set_rss_table(si, rss_table, si->num_rss); + + kfree(rss_table); + + return 0; +} + +static int enetc_configure_si(struct enetc_ndev_priv *priv) +{ + struct enetc_si *si = priv->si; + struct enetc_hw *hw = &si->hw; + int err; + + enetc_setup_cbdr(hw, &si->cbd_ring); + /* set SI cache attributes */ + enetc_wr(hw, ENETC_SICAR0, + ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); + enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI); + /* enable SI */ + enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN); + + if (si->num_rss) { + err = enetc_setup_default_rss_table(si, priv->num_rx_rings); + if (err) + return err; + } + + return 0; +} + +void enetc_init_si_rings_params(struct enetc_ndev_priv *priv) +{ + struct enetc_si *si = priv->si; + int cpus = num_online_cpus(); + + priv->tx_bd_count = ENETC_BDR_DEFAULT_SIZE; + priv->rx_bd_count = ENETC_BDR_DEFAULT_SIZE; + + /* Enable all available TX rings in order to configure as many + * priorities as possible, when needed. + * TODO: Make # of TX rings run-time configurable + */ + priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings); + priv->num_tx_rings = si->num_tx_rings; + priv->bdr_int_num = cpus; + + /* SI specific */ + si->cbd_ring.bd_count = ENETC_CBDR_DEFAULT_SIZE; +} + +int enetc_alloc_si_resources(struct enetc_ndev_priv *priv) +{ + struct enetc_si *si = priv->si; + int err; + + err = enetc_alloc_cbdr(priv->dev, &si->cbd_ring); + if (err) + return err; + + priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules), + GFP_KERNEL); + if (!priv->cls_rules) { + err = -ENOMEM; + goto err_alloc_cls; + } + + err = enetc_configure_si(priv); + if (err) + goto err_config_si; + + return 0; + +err_config_si: + kfree(priv->cls_rules); +err_alloc_cls: + enetc_clear_cbdr(&si->hw); + enetc_free_cbdr(priv->dev, &si->cbd_ring); + + return err; +} + +void enetc_free_si_resources(struct enetc_ndev_priv *priv) +{ + struct enetc_si *si = priv->si; + + enetc_clear_cbdr(&si->hw); + enetc_free_cbdr(priv->dev, &si->cbd_ring); + + kfree(priv->cls_rules); +} + +static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) +{ + int idx = tx_ring->index; + u32 tbmr; + + enetc_txbdr_wr(hw, idx, ENETC_TBBAR0, + lower_32_bits(tx_ring->bd_dma_base)); + + enetc_txbdr_wr(hw, idx, ENETC_TBBAR1, + upper_32_bits(tx_ring->bd_dma_base)); + + WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */ + enetc_txbdr_wr(hw, idx, ENETC_TBLENR, + ENETC_RTBLENR_LEN(tx_ring->bd_count)); + + /* clearing PI/CI registers for Tx not supported, adjust sw indexes */ + tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR); + tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR); + + /* enable Tx ints by setting pkt thr to 1 */ + enetc_txbdr_wr(hw, idx, ENETC_TBICIR0, ENETC_TBICIR0_ICEN | 0x1); + + tbmr = ENETC_TBMR_EN; + if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX) + tbmr |= ENETC_TBMR_VIH; + + /* enable ring */ + enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr); + + tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR); + tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR); + tx_ring->idr = hw->reg + ENETC_SITXIDR; +} + +static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) +{ + int idx = rx_ring->index; + u32 rbmr; + + enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0, + lower_32_bits(rx_ring->bd_dma_base)); + + enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1, + upper_32_bits(rx_ring->bd_dma_base)); + + WARN_ON(!IS_ALIGNED(rx_ring->bd_count, 64)); /* multiple of 64 */ + enetc_rxbdr_wr(hw, idx, ENETC_RBLENR, + ENETC_RTBLENR_LEN(rx_ring->bd_count)); + + enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE); + + enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0); + + /* enable Rx ints by setting pkt thr to 1 */ + enetc_rxbdr_wr(hw, idx, ENETC_RBICIR0, ENETC_RBICIR0_ICEN | 0x1); + + rbmr = ENETC_RBMR_EN; + if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) + rbmr |= ENETC_RBMR_VTE; + + rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR); + rx_ring->idr = hw->reg + ENETC_SIRXIDR; + + enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring)); + + /* enable ring */ + enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr); +} + +static void enetc_setup_bdrs(struct enetc_ndev_priv *priv) +{ + int i; + + for (i = 0; i < priv->num_tx_rings; i++) + enetc_setup_txbdr(&priv->si->hw, priv->tx_ring[i]); + + for (i = 0; i < priv->num_rx_rings; i++) + enetc_setup_rxbdr(&priv->si->hw, priv->rx_ring[i]); +} + +static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) +{ + int idx = rx_ring->index; + + /* disable EN bit on ring */ + enetc_rxbdr_wr(hw, idx, ENETC_RBMR, 0); +} + +static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) +{ + int delay = 8, timeout = 100; + int idx = tx_ring->index; + + /* disable EN bit on ring */ + enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0); + + /* wait for busy to clear */ + while (delay < timeout && + enetc_txbdr_rd(hw, idx, ENETC_TBSR) & ENETC_TBSR_BUSY) { + msleep(delay); + delay *= 2; + } + + if (delay >= timeout) + netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n", + idx); +} + +static void enetc_clear_bdrs(struct enetc_ndev_priv *priv) +{ + int i; + + for (i = 0; i < priv->num_tx_rings; i++) + enetc_clear_txbdr(&priv->si->hw, priv->tx_ring[i]); + + for (i = 0; i < priv->num_rx_rings; i++) + enetc_clear_rxbdr(&priv->si->hw, priv->rx_ring[i]); + + udelay(1); +} + +static int enetc_setup_irqs(struct enetc_ndev_priv *priv) +{ + struct pci_dev *pdev = priv->si->pdev; + cpumask_t cpu_mask; + int i, j, err; + + for (i = 0; i < priv->bdr_int_num; i++) { + int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i); + struct enetc_int_vector *v = priv->int_vector[i]; + int entry = ENETC_BDR_INT_BASE_IDX + i; + struct enetc_hw *hw = &priv->si->hw; + + snprintf(v->name, sizeof(v->name), "%s-rxtx%d", + priv->ndev->name, i); + err = request_irq(irq, enetc_msix, 0, v->name, v); + if (err) { + dev_err(priv->dev, "request_irq() failed!\n"); + goto irq_err; + } + + v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER); + v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER); + + enetc_wr(hw, ENETC_SIMSIRRV(i), entry); + + for (j = 0; j < v->count_tx_rings; j++) { + int idx = v->tx_ring[j].index; + + enetc_wr(hw, ENETC_SIMSITRV(idx), entry); + } + cpumask_clear(&cpu_mask); + cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); + irq_set_affinity_hint(irq, &cpu_mask); + } + + return 0; + +irq_err: + while (i--) { + int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i); + + irq_set_affinity_hint(irq, NULL); + free_irq(irq, priv->int_vector[i]); + } + + return err; +} + +static void enetc_free_irqs(struct enetc_ndev_priv *priv) +{ + struct pci_dev *pdev = priv->si->pdev; + int i; + + for (i = 0; i < priv->bdr_int_num; i++) { + int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i); + + irq_set_affinity_hint(irq, NULL); + free_irq(irq, priv->int_vector[i]); + } +} + +static void enetc_enable_interrupts(struct enetc_ndev_priv *priv) +{ + int i; + + /* enable Tx & Rx event indication */ + for (i = 0; i < priv->num_rx_rings; i++) { + enetc_rxbdr_wr(&priv->si->hw, i, + ENETC_RBIER, ENETC_RBIER_RXTIE); + } + + for (i = 0; i < priv->num_tx_rings; i++) { + enetc_txbdr_wr(&priv->si->hw, i, + ENETC_TBIER, ENETC_TBIER_TXTIE); + } +} + +static void enetc_disable_interrupts(struct enetc_ndev_priv *priv) +{ + int i; + + for (i = 0; i < priv->num_tx_rings; i++) + enetc_txbdr_wr(&priv->si->hw, i, ENETC_TBIER, 0); + + for (i = 0; i < priv->num_rx_rings; i++) + enetc_rxbdr_wr(&priv->si->hw, i, ENETC_RBIER, 0); +} + +static void adjust_link(struct net_device *ndev) +{ + struct phy_device *phydev = ndev->phydev; + + phy_print_status(phydev); +} + +static int enetc_phy_connect(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct phy_device *phydev; + + if (!priv->phy_node) + return 0; /* phy-less mode */ + + phydev = of_phy_connect(ndev, priv->phy_node, &adjust_link, + 0, priv->if_mode); + if (!phydev) { + dev_err(&ndev->dev, "could not attach to PHY\n"); + return -ENODEV; + } + + phy_attached_info(phydev); + + return 0; +} + +int enetc_open(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + int i, err; + + err = enetc_setup_irqs(priv); + if (err) + return err; + + err = enetc_phy_connect(ndev); + if (err) + goto err_phy_connect; + + err = enetc_alloc_tx_resources(priv); + if (err) + goto err_alloc_tx; + + err = enetc_alloc_rx_resources(priv); + if (err) + goto err_alloc_rx; + + enetc_setup_bdrs(priv); + + err = netif_set_real_num_tx_queues(ndev, priv->num_tx_rings); + if (err) + goto err_set_queues; + + err = netif_set_real_num_rx_queues(ndev, priv->num_rx_rings); + if (err) + goto err_set_queues; + + for (i = 0; i < priv->bdr_int_num; i++) + napi_enable(&priv->int_vector[i]->napi); + + enetc_enable_interrupts(priv); + + if (ndev->phydev) + phy_start(ndev->phydev); + else + netif_carrier_on(ndev); + + netif_tx_start_all_queues(ndev); + + return 0; + +err_set_queues: + enetc_free_rx_resources(priv); +err_alloc_rx: + enetc_free_tx_resources(priv); +err_alloc_tx: + if (ndev->phydev) + phy_disconnect(ndev->phydev); +err_phy_connect: + enetc_free_irqs(priv); + + return err; +} + +int enetc_close(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + int i; + + netif_tx_stop_all_queues(ndev); + + if (ndev->phydev) { + phy_stop(ndev->phydev); + phy_disconnect(ndev->phydev); + } else { + netif_carrier_off(ndev); + } + + for (i = 0; i < priv->bdr_int_num; i++) { + napi_synchronize(&priv->int_vector[i]->napi); + napi_disable(&priv->int_vector[i]->napi); + } + + enetc_disable_interrupts(priv); + enetc_clear_bdrs(priv); + + enetc_free_rxtx_rings(priv); + enetc_free_rx_resources(priv); + enetc_free_tx_resources(priv); + enetc_free_irqs(priv); + + return 0; +} + +struct net_device_stats *enetc_get_stats(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + unsigned long packets = 0, bytes = 0; + int i; + + for (i = 0; i < priv->num_rx_rings; i++) { + packets += priv->rx_ring[i]->stats.packets; + bytes += priv->rx_ring[i]->stats.bytes; + } + + stats->rx_packets = packets; + stats->rx_bytes = bytes; + bytes = 0; + packets = 0; + + for (i = 0; i < priv->num_tx_rings; i++) { + packets += priv->tx_ring[i]->stats.packets; + bytes += priv->tx_ring[i]->stats.bytes; + } + + stats->tx_packets = packets; + stats->tx_bytes = bytes; + + return stats; +} + +static int enetc_set_rss(struct net_device *ndev, int en) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + u32 reg; + + enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings); + + reg = enetc_rd(hw, ENETC_SIMR); + reg &= ~ENETC_SIMR_RSSE; + reg |= (en) ? ENETC_SIMR_RSSE : 0; + enetc_wr(hw, ENETC_SIMR, reg); + + return 0; +} + +int enetc_set_features(struct net_device *ndev, + netdev_features_t features) +{ + netdev_features_t changed = ndev->features ^ features; + + if (changed & NETIF_F_RXHASH) + enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH)); + + return 0; +} + +int enetc_alloc_msix(struct enetc_ndev_priv *priv) +{ + struct pci_dev *pdev = priv->si->pdev; + int size, v_tx_rings; + int i, n, err, nvec; + + nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num; + /* allocate MSIX for both messaging and Rx/Tx interrupts */ + n = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX); + + if (n < 0) + return n; + + if (n != nvec) + return -EPERM; + + /* # of tx rings per int vector */ + v_tx_rings = priv->num_tx_rings / priv->bdr_int_num; + size = sizeof(struct enetc_int_vector) + + sizeof(struct enetc_bdr) * v_tx_rings; + + for (i = 0; i < priv->bdr_int_num; i++) { + struct enetc_int_vector *v; + struct enetc_bdr *bdr; + int j; + + v = kzalloc(size, GFP_KERNEL); + if (!v) { + err = -ENOMEM; + goto fail; + } + + priv->int_vector[i] = v; + + netif_napi_add(priv->ndev, &v->napi, enetc_poll, + NAPI_POLL_WEIGHT); + v->count_tx_rings = v_tx_rings; + + for (j = 0; j < v_tx_rings; j++) { + int idx; + + /* default tx ring mapping policy */ + if (priv->bdr_int_num == ENETC_MAX_BDR_INT) + idx = 2 * j + i; /* 2 CPUs */ + else + idx = j + i * v_tx_rings; /* default */ + + __set_bit(idx, &v->tx_rings_map); + bdr = &v->tx_ring[j]; + bdr->index = idx; + bdr->ndev = priv->ndev; + bdr->dev = priv->dev; + bdr->bd_count = priv->tx_bd_count; + priv->tx_ring[idx] = bdr; + } + + bdr = &v->rx_ring; + bdr->index = i; + bdr->ndev = priv->ndev; + bdr->dev = priv->dev; + bdr->bd_count = priv->rx_bd_count; + priv->rx_ring[i] = bdr; + } + + return 0; + +fail: + while (i--) { + netif_napi_del(&priv->int_vector[i]->napi); + kfree(priv->int_vector[i]); + } + + pci_free_irq_vectors(pdev); + + return err; +} + +void enetc_free_msix(struct enetc_ndev_priv *priv) +{ + int i; + + for (i = 0; i < priv->bdr_int_num; i++) { + struct enetc_int_vector *v = priv->int_vector[i]; + + netif_napi_del(&v->napi); + } + + for (i = 0; i < priv->num_rx_rings; i++) + priv->rx_ring[i] = NULL; + + for (i = 0; i < priv->num_tx_rings; i++) + priv->tx_ring[i] = NULL; + + for (i = 0; i < priv->bdr_int_num; i++) { + kfree(priv->int_vector[i]); + priv->int_vector[i] = NULL; + } + + /* disable all MSIX for this device */ + pci_free_irq_vectors(priv->si->pdev); +} + +static void enetc_kfree_si(struct enetc_si *si) +{ + char *p = (char *)si - si->pad; + + kfree(p); +} + +static void enetc_detect_errata(struct enetc_si *si) +{ + if (si->pdev->revision == ENETC_REV1) + si->errata = ENETC_ERR_TXCSUM | ENETC_ERR_VLAN_ISOL | + ENETC_ERR_UCMCSWP; +} + +int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv) +{ + struct enetc_si *si, *p; + struct enetc_hw *hw; + size_t alloc_size; + int err, len; + + pcie_flr(pdev); + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, "device enable failed\n"); + return err; + } + + /* set up for high or low dma */ + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "DMA configuration failed: 0x%x\n", err); + goto err_dma; + } + } + + err = pci_request_mem_regions(pdev, name); + if (err) { + dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err); + goto err_pci_mem_reg; + } + + pci_set_master(pdev); + + alloc_size = sizeof(struct enetc_si); + if (sizeof_priv) { + /* align priv to 32B */ + alloc_size = ALIGN(alloc_size, ENETC_SI_ALIGN); + alloc_size += sizeof_priv; + } + /* force 32B alignment for enetc_si */ + alloc_size += ENETC_SI_ALIGN - 1; + + p = kzalloc(alloc_size, GFP_KERNEL); + if (!p) { + err = -ENOMEM; + goto err_alloc_si; + } + + si = PTR_ALIGN(p, ENETC_SI_ALIGN); + si->pad = (char *)si - (char *)p; + + pci_set_drvdata(pdev, si); + si->pdev = pdev; + hw = &si->hw; + + len = pci_resource_len(pdev, ENETC_BAR_REGS); + hw->reg = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len); + if (!hw->reg) { + err = -ENXIO; + dev_err(&pdev->dev, "ioremap() failed\n"); + goto err_ioremap; + } + if (len > ENETC_PORT_BASE) + hw->port = hw->reg + ENETC_PORT_BASE; + if (len > ENETC_GLOBAL_BASE) + hw->global = hw->reg + ENETC_GLOBAL_BASE; + + enetc_detect_errata(si); + + return 0; + +err_ioremap: + enetc_kfree_si(si); +err_alloc_si: + pci_release_mem_regions(pdev); +err_pci_mem_reg: +err_dma: + pci_disable_device(pdev); + + return err; +} + +void enetc_pci_remove(struct pci_dev *pdev) +{ + struct enetc_si *si = pci_get_drvdata(pdev); + struct enetc_hw *hw = &si->hw; + + iounmap(hw->reg); + enetc_kfree_si(si); + pci_release_mem_regions(pdev); + pci_disable_device(pdev); +} diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h new file mode 100644 index 000000000000..b274135c5103 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc.h @@ -0,0 +1,230 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* Copyright 2017-2019 NXP */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "enetc_hw.h" + +#define ENETC_MAC_MAXFRM_SIZE 9600 +#define ENETC_MAX_MTU (ENETC_MAC_MAXFRM_SIZE - \ + (ETH_FCS_LEN + ETH_HLEN + VLAN_HLEN)) + +struct enetc_tx_swbd { + struct sk_buff *skb; + dma_addr_t dma; + u16 len; + u16 is_dma_page; +}; + +#define ENETC_RX_MAXFRM_SIZE ENETC_MAC_MAXFRM_SIZE +#define ENETC_RXB_TRUESIZE 2048 /* PAGE_SIZE >> 1 */ +#define ENETC_RXB_PAD NET_SKB_PAD /* add extra space if needed */ +#define ENETC_RXB_DMA_SIZE \ + (SKB_WITH_OVERHEAD(ENETC_RXB_TRUESIZE) - ENETC_RXB_PAD) + +struct enetc_rx_swbd { + dma_addr_t dma; + struct page *page; + u16 page_offset; +}; + +struct enetc_ring_stats { + unsigned int packets; + unsigned int bytes; + unsigned int rx_alloc_errs; +}; + +#define ENETC_BDR_DEFAULT_SIZE 1024 +#define ENETC_DEFAULT_TX_WORK 256 + +struct enetc_bdr { + struct device *dev; /* for DMA mapping */ + struct net_device *ndev; + void *bd_base; /* points to Rx or Tx BD ring */ + union { + void __iomem *tpir; + void __iomem *rcir; + }; + u16 index; + int bd_count; /* # of BDs */ + int next_to_use; + int next_to_clean; + union { + struct enetc_tx_swbd *tx_swbd; + struct enetc_rx_swbd *rx_swbd; + }; + union { + void __iomem *tcir; /* Tx */ + int next_to_alloc; /* Rx */ + }; + void __iomem *idr; /* Interrupt Detect Register pointer */ + + struct enetc_ring_stats stats; + + dma_addr_t bd_dma_base; +} ____cacheline_aligned_in_smp; + +static inline void enetc_bdr_idx_inc(struct enetc_bdr *bdr, int *i) +{ + if (unlikely(++*i == bdr->bd_count)) + *i = 0; +} + +static inline int enetc_bd_unused(struct enetc_bdr *bdr) +{ + if (bdr->next_to_clean > bdr->next_to_use) + return bdr->next_to_clean - bdr->next_to_use - 1; + + return bdr->bd_count + bdr->next_to_clean - bdr->next_to_use - 1; +} + +/* Control BD ring */ +#define ENETC_CBDR_DEFAULT_SIZE 64 +struct enetc_cbdr { + void *bd_base; /* points to Rx or Tx BD ring */ + void __iomem *pir; + void __iomem *cir; + + int bd_count; /* # of BDs */ + int next_to_use; + int next_to_clean; + + dma_addr_t bd_dma_base; +}; + +#define ENETC_TXBD(BDR, i) (&(((union enetc_tx_bd *)((BDR).bd_base))[i])) +#define ENETC_RXBD(BDR, i) (&(((union enetc_rx_bd *)((BDR).bd_base))[i])) + +struct enetc_msg_swbd { + void *vaddr; + dma_addr_t dma; + int size; +}; + +#define ENETC_REV1 0x1 +enum enetc_errata { + ENETC_ERR_TXCSUM = BIT(0), + ENETC_ERR_VLAN_ISOL = BIT(1), + ENETC_ERR_UCMCSWP = BIT(2), +}; + +/* PCI IEP device data */ +struct enetc_si { + struct pci_dev *pdev; + struct enetc_hw hw; + enum enetc_errata errata; + + struct net_device *ndev; /* back ref. */ + + struct enetc_cbdr cbd_ring; + + int num_rx_rings; /* how many rings are available in the SI */ + int num_tx_rings; + int num_fs_entries; + int num_rss; /* number of RSS buckets */ + unsigned short pad; +}; + +#define ENETC_SI_ALIGN 32 + +static inline void *enetc_si_priv(const struct enetc_si *si) +{ + return (char *)si + ALIGN(sizeof(struct enetc_si), ENETC_SI_ALIGN); +} + +static inline bool enetc_si_is_pf(struct enetc_si *si) +{ + return !!(si->hw.port); +} + +#define ENETC_MAX_NUM_TXQS 8 +#define ENETC_INT_NAME_MAX (IFNAMSIZ + 8) + +struct enetc_int_vector { + void __iomem *rbier; + void __iomem *tbier_base; + unsigned long tx_rings_map; + int count_tx_rings; + struct napi_struct napi; + char name[ENETC_INT_NAME_MAX]; + + struct enetc_bdr rx_ring ____cacheline_aligned_in_smp; + struct enetc_bdr tx_ring[0]; +}; + +struct enetc_cls_rule { + struct ethtool_rx_flow_spec fs; + int used; +}; + +#define ENETC_MAX_BDR_INT 2 /* fixed to max # of available cpus */ + +struct enetc_ndev_priv { + struct net_device *ndev; + struct device *dev; /* dma-mapping device */ + struct enetc_si *si; + + int bdr_int_num; /* number of Rx/Tx ring interrupts */ + struct enetc_int_vector *int_vector[ENETC_MAX_BDR_INT]; + u16 num_rx_rings, num_tx_rings; + u16 rx_bd_count, tx_bd_count; + + u16 msg_enable; + + struct enetc_bdr *tx_ring[16]; + struct enetc_bdr *rx_ring[16]; + + struct enetc_cls_rule *cls_rules; + + struct device_node *phy_node; + phy_interface_t if_mode; +}; + +/* Messaging */ + +/* VF-PF set primary MAC address message format */ +struct enetc_msg_cmd_set_primary_mac { + struct enetc_msg_cmd_header header; + struct sockaddr mac; +}; + +#define ENETC_CBD(R, i) (&(((struct enetc_cbd *)((R).bd_base))[i])) + +#define ENETC_CBDR_TIMEOUT 1000 /* usecs */ + +/* SI common */ +int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv); +void enetc_pci_remove(struct pci_dev *pdev); +int enetc_alloc_msix(struct enetc_ndev_priv *priv); +void enetc_free_msix(struct enetc_ndev_priv *priv); +void enetc_get_si_caps(struct enetc_si *si); +void enetc_init_si_rings_params(struct enetc_ndev_priv *priv); +int enetc_alloc_si_resources(struct enetc_ndev_priv *priv); +void enetc_free_si_resources(struct enetc_ndev_priv *priv); + +int enetc_open(struct net_device *ndev); +int enetc_close(struct net_device *ndev); +netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev); +struct net_device_stats *enetc_get_stats(struct net_device *ndev); +int enetc_set_features(struct net_device *ndev, + netdev_features_t features); +/* ethtool */ +void enetc_set_ethtool_ops(struct net_device *ndev); + +/* control buffer descriptor ring (CBDR) */ +int enetc_set_mac_flt_entry(struct enetc_si *si, int index, + char *mac_addr, int si_map); +int enetc_clear_mac_flt_entry(struct enetc_si *si, int index); +int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse, + int index); +void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes); +int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count); +int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c new file mode 100644 index 000000000000..de466b71bf8f --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c @@ -0,0 +1,210 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2017-2019 NXP */ + +#include "enetc.h" + +static void enetc_clean_cbdr(struct enetc_si *si) +{ + struct enetc_cbdr *ring = &si->cbd_ring; + struct enetc_cbd *dest_cbd; + int i, status; + + i = ring->next_to_clean; + + while (enetc_rd_reg(ring->cir) != i) { + dest_cbd = ENETC_CBD(*ring, i); + status = dest_cbd->status_flags & ENETC_CBD_STATUS_MASK; + if (status) + dev_warn(&si->pdev->dev, "CMD err %04x for cmd %04x\n", + status, dest_cbd->cmd); + + memset(dest_cbd, 0, sizeof(*dest_cbd)); + + i = (i + 1) % ring->bd_count; + } + + ring->next_to_clean = i; +} + +static int enetc_cbd_unused(struct enetc_cbdr *r) +{ + return (r->next_to_clean - r->next_to_use - 1 + r->bd_count) % + r->bd_count; +} + +static int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd) +{ + struct enetc_cbdr *ring = &si->cbd_ring; + int timeout = ENETC_CBDR_TIMEOUT; + struct enetc_cbd *dest_cbd; + int i; + + if (unlikely(!ring->bd_base)) + return -EIO; + + if (unlikely(!enetc_cbd_unused(ring))) + enetc_clean_cbdr(si); + + i = ring->next_to_use; + dest_cbd = ENETC_CBD(*ring, i); + + /* copy command to the ring */ + *dest_cbd = *cbd; + i = (i + 1) % ring->bd_count; + + ring->next_to_use = i; + /* let H/W know BD ring has been updated */ + enetc_wr_reg(ring->pir, i); + + do { + if (enetc_rd_reg(ring->cir) == i) + break; + udelay(10); /* cannot sleep, rtnl_lock() */ + timeout -= 10; + } while (timeout); + + if (!timeout) + return -EBUSY; + + enetc_clean_cbdr(si); + + return 0; +} + +int enetc_clear_mac_flt_entry(struct enetc_si *si, int index) +{ + struct enetc_cbd cbd; + + memset(&cbd, 0, sizeof(cbd)); + + cbd.cls = 1; + cbd.status_flags = ENETC_CBD_FLAGS_SF; + cbd.index = cpu_to_le16(index); + + return enetc_send_cmd(si, &cbd); +} + +int enetc_set_mac_flt_entry(struct enetc_si *si, int index, + char *mac_addr, int si_map) +{ + struct enetc_cbd cbd; + u32 upper; + u16 lower; + + memset(&cbd, 0, sizeof(cbd)); + + /* fill up the "set" descriptor */ + cbd.cls = 1; + cbd.status_flags = ENETC_CBD_FLAGS_SF; + cbd.index = cpu_to_le16(index); + cbd.opt[3] = cpu_to_le32(si_map); + /* enable entry */ + cbd.opt[0] = cpu_to_le32(BIT(31)); + + upper = *(const u32 *)mac_addr; + lower = *(const u16 *)(mac_addr + 4); + cbd.addr[0] = cpu_to_le32(upper); + cbd.addr[1] = cpu_to_le32(lower); + + return enetc_send_cmd(si, &cbd); +} + +#define RFSE_ALIGN 64 +/* Set entry in RFS table */ +int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse, + int index) +{ + struct enetc_cbd cbd = {.cmd = 0}; + dma_addr_t dma, dma_align; + void *tmp, *tmp_align; + int err; + + /* fill up the "set" descriptor */ + cbd.cmd = 0; + cbd.cls = 4; + cbd.index = cpu_to_le16(index); + cbd.length = cpu_to_le16(sizeof(*rfse)); + cbd.opt[3] = cpu_to_le32(0); /* SI */ + + tmp = dma_alloc_coherent(&si->pdev->dev, sizeof(*rfse) + RFSE_ALIGN, + &dma, GFP_KERNEL); + if (!tmp) { + dev_err(&si->pdev->dev, "DMA mapping of RFS entry failed!\n"); + return -ENOMEM; + } + + dma_align = ALIGN(dma, RFSE_ALIGN); + tmp_align = PTR_ALIGN(tmp, RFSE_ALIGN); + memcpy(tmp_align, rfse, sizeof(*rfse)); + + cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align)); + cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align)); + + err = enetc_send_cmd(si, &cbd); + if (err) + dev_err(&si->pdev->dev, "FS entry add failed (%d)!", err); + + dma_free_coherent(&si->pdev->dev, sizeof(*rfse) + RFSE_ALIGN, + tmp, dma); + + return err; +} + +#define RSSE_ALIGN 64 +static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count, + bool read) +{ + struct enetc_cbd cbd = {.cmd = 0}; + dma_addr_t dma, dma_align; + u8 *tmp, *tmp_align; + int err, i; + + if (count < RSSE_ALIGN) + /* HW only takes in a full 64 entry table */ + return -EINVAL; + + tmp = dma_alloc_coherent(&si->pdev->dev, count + RSSE_ALIGN, + &dma, GFP_KERNEL); + if (!tmp) { + dev_err(&si->pdev->dev, "DMA mapping of RSS table failed!\n"); + return -ENOMEM; + } + dma_align = ALIGN(dma, RSSE_ALIGN); + tmp_align = PTR_ALIGN(tmp, RSSE_ALIGN); + + if (!read) + for (i = 0; i < count; i++) + tmp_align[i] = (u8)(table[i]); + + /* fill up the descriptor */ + cbd.cmd = read ? 2 : 1; + cbd.cls = 3; + cbd.length = cpu_to_le16(count); + + cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align)); + cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align)); + + err = enetc_send_cmd(si, &cbd); + if (err) + dev_err(&si->pdev->dev, "RSS cmd failed (%d)!", err); + + if (read) + for (i = 0; i < count; i++) + table[i] = tmp_align[i]; + + dma_free_coherent(&si->pdev->dev, count + RSSE_ALIGN, tmp, dma); + + return err; +} + +/* Get RSS table */ +int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count) +{ + return enetc_cmd_rss_table(si, table, count, true); +} + +/* Set RSS table */ +int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count) +{ + return enetc_cmd_rss_table(si, (u32 *)table, count, false); +} diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c new file mode 100644 index 000000000000..1ecad9ffabae --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c @@ -0,0 +1,597 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2017-2019 NXP */ + +#include +#include +#include "enetc.h" + +static const u32 enetc_si_regs[] = { + ENETC_SIMR, ENETC_SIPMAR0, ENETC_SIPMAR1, ENETC_SICBDRMR, + ENETC_SICBDRSR, ENETC_SICBDRBAR0, ENETC_SICBDRBAR1, ENETC_SICBDRPIR, + ENETC_SICBDRCIR, ENETC_SICBDRLENR, ENETC_SICAPR0, ENETC_SICAPR1, + ENETC_SIUEFDCR +}; + +static const u32 enetc_txbdr_regs[] = { + ENETC_TBMR, ENETC_TBSR, ENETC_TBBAR0, ENETC_TBBAR1, + ENETC_TBPIR, ENETC_TBCIR, ENETC_TBLENR, ENETC_TBIER +}; + +static const u32 enetc_rxbdr_regs[] = { + ENETC_RBMR, ENETC_RBSR, ENETC_RBBSR, ENETC_RBCIR, ENETC_RBBAR0, + ENETC_RBBAR1, ENETC_RBPIR, ENETC_RBLENR, ENETC_RBICIR0, ENETC_RBIER +}; + +static const u32 enetc_port_regs[] = { + ENETC_PMR, ENETC_PSR, ENETC_PSIPMR, ENETC_PSIPMAR0(0), + ENETC_PSIPMAR1(0), ENETC_PTXMBAR, ENETC_PCAPR0, ENETC_PCAPR1, + ENETC_PSICFGR0(0), ENETC_PRFSCAPR, ENETC_PTCMSDUR(0), + ENETC_PM0_CMD_CFG, ENETC_PM0_MAXFRM, ENETC_PM0_IF_MODE +}; + +static int enetc_get_reglen(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + int len; + + len = ARRAY_SIZE(enetc_si_regs); + len += ARRAY_SIZE(enetc_txbdr_regs) * priv->num_tx_rings; + len += ARRAY_SIZE(enetc_rxbdr_regs) * priv->num_rx_rings; + + if (hw->port) + len += ARRAY_SIZE(enetc_port_regs); + + len *= sizeof(u32) * 2; /* store 2 entries per reg: addr and value */ + + return len; +} + +static void enetc_get_regs(struct net_device *ndev, struct ethtool_regs *regs, + void *regbuf) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + u32 *buf = (u32 *)regbuf; + int i, j; + u32 addr; + + for (i = 0; i < ARRAY_SIZE(enetc_si_regs); i++) { + *buf++ = enetc_si_regs[i]; + *buf++ = enetc_rd(hw, enetc_si_regs[i]); + } + + for (i = 0; i < priv->num_tx_rings; i++) { + for (j = 0; j < ARRAY_SIZE(enetc_txbdr_regs); j++) { + addr = ENETC_BDR(TX, i, enetc_txbdr_regs[j]); + + *buf++ = addr; + *buf++ = enetc_rd(hw, addr); + } + } + + for (i = 0; i < priv->num_rx_rings; i++) { + for (j = 0; j < ARRAY_SIZE(enetc_rxbdr_regs); j++) { + addr = ENETC_BDR(RX, i, enetc_rxbdr_regs[j]); + + *buf++ = addr; + *buf++ = enetc_rd(hw, addr); + } + } + + if (!hw->port) + return; + + for (i = 0; i < ARRAY_SIZE(enetc_port_regs); i++) { + addr = ENETC_PORT_BASE + enetc_port_regs[i]; + *buf++ = addr; + *buf++ = enetc_rd(hw, addr); + } +} + +static const struct { + int reg; + char name[ETH_GSTRING_LEN]; +} enetc_si_counters[] = { + { ENETC_SIROCT, "SI rx octets" }, + { ENETC_SIRFRM, "SI rx frames" }, + { ENETC_SIRUCA, "SI rx u-cast frames" }, + { ENETC_SIRMCA, "SI rx m-cast frames" }, + { ENETC_SITOCT, "SI tx octets" }, + { ENETC_SITFRM, "SI tx frames" }, + { ENETC_SITUCA, "SI tx u-cast frames" }, + { ENETC_SITMCA, "SI tx m-cast frames" }, + { ENETC_RBDCR(0), "Rx ring 0 discarded frames" }, + { ENETC_RBDCR(1), "Rx ring 1 discarded frames" }, + { ENETC_RBDCR(2), "Rx ring 2 discarded frames" }, + { ENETC_RBDCR(3), "Rx ring 3 discarded frames" }, + { ENETC_RBDCR(4), "Rx ring 4 discarded frames" }, + { ENETC_RBDCR(5), "Rx ring 5 discarded frames" }, + { ENETC_RBDCR(6), "Rx ring 6 discarded frames" }, + { ENETC_RBDCR(7), "Rx ring 7 discarded frames" }, + { ENETC_RBDCR(8), "Rx ring 8 discarded frames" }, + { ENETC_RBDCR(9), "Rx ring 9 discarded frames" }, + { ENETC_RBDCR(10), "Rx ring 10 discarded frames" }, + { ENETC_RBDCR(11), "Rx ring 11 discarded frames" }, + { ENETC_RBDCR(12), "Rx ring 12 discarded frames" }, + { ENETC_RBDCR(13), "Rx ring 13 discarded frames" }, + { ENETC_RBDCR(14), "Rx ring 14 discarded frames" }, + { ENETC_RBDCR(15), "Rx ring 15 discarded frames" }, +}; + +static const struct { + int reg; + char name[ETH_GSTRING_LEN]; +} enetc_port_counters[] = { + { ENETC_PM0_REOCT, "MAC rx ethernet octets" }, + { ENETC_PM0_RALN, "MAC rx alignment errors" }, + { ENETC_PM0_RXPF, "MAC rx valid pause frames" }, + { ENETC_PM0_RFRM, "MAC rx valid frames" }, + { ENETC_PM0_RFCS, "MAC rx fcs errors" }, + { ENETC_PM0_RVLAN, "MAC rx VLAN frames" }, + { ENETC_PM0_RERR, "MAC rx frame errors" }, + { ENETC_PM0_RUCA, "MAC rx unicast frames" }, + { ENETC_PM0_RMCA, "MAC rx multicast frames" }, + { ENETC_PM0_RBCA, "MAC rx broadcast frames" }, + { ENETC_PM0_RDRP, "MAC rx dropped packets" }, + { ENETC_PM0_RPKT, "MAC rx packets" }, + { ENETC_PM0_RUND, "MAC rx undersized packets" }, + { ENETC_PM0_R64, "MAC rx 64 byte packets" }, + { ENETC_PM0_R127, "MAC rx 65-127 byte packets" }, + { ENETC_PM0_R255, "MAC rx 128-255 byte packets" }, + { ENETC_PM0_R511, "MAC rx 256-511 byte packets" }, + { ENETC_PM0_R1023, "MAC rx 512-1023 byte packets" }, + { ENETC_PM0_R1518, "MAC rx 1024-1518 byte packets" }, + { ENETC_PM0_R1519X, "MAC rx 1519 to max-octet packets" }, + { ENETC_PM0_ROVR, "MAC rx oversized packets" }, + { ENETC_PM0_RJBR, "MAC rx jabber packets" }, + { ENETC_PM0_RFRG, "MAC rx fragment packets" }, + { ENETC_PM0_RCNP, "MAC rx control packets" }, + { ENETC_PM0_RDRNTP, "MAC rx fifo drop" }, + { ENETC_PM0_TEOCT, "MAC tx ethernet octets" }, + { ENETC_PM0_TOCT, "MAC tx octets" }, + { ENETC_PM0_TCRSE, "MAC tx carrier sense errors" }, + { ENETC_PM0_TXPF, "MAC tx valid pause frames" }, + { ENETC_PM0_TFRM, "MAC tx frames" }, + { ENETC_PM0_TFCS, "MAC tx fcs errors" }, + { ENETC_PM0_TVLAN, "MAC tx VLAN frames" }, + { ENETC_PM0_TERR, "MAC tx frames" }, + { ENETC_PM0_TUCA, "MAC tx unicast frames" }, + { ENETC_PM0_TMCA, "MAC tx multicast frames" }, + { ENETC_PM0_TBCA, "MAC tx broadcast frames" }, + { ENETC_PM0_TPKT, "MAC tx packets" }, + { ENETC_PM0_TUND, "MAC tx undersized packets" }, + { ENETC_PM0_T127, "MAC tx 65-127 byte packets" }, + { ENETC_PM0_T1023, "MAC tx 512-1023 byte packets" }, + { ENETC_PM0_T1518, "MAC tx 1024-1518 byte packets" }, + { ENETC_PM0_TCNP, "MAC tx control packets" }, + { ENETC_PM0_TDFR, "MAC tx deferred packets" }, + { ENETC_PM0_TMCOL, "MAC tx multiple collisions" }, + { ENETC_PM0_TSCOL, "MAC tx single collisions" }, + { ENETC_PM0_TLCOL, "MAC tx late collisions" }, + { ENETC_PM0_TECOL, "MAC tx excessive collisions" }, + { ENETC_UFDMF, "SI MAC nomatch u-cast discards" }, + { ENETC_MFDMF, "SI MAC nomatch m-cast discards" }, + { ENETC_PBFDSIR, "SI MAC nomatch b-cast discards" }, + { ENETC_PUFDVFR, "SI VLAN nomatch u-cast discards" }, + { ENETC_PMFDVFR, "SI VLAN nomatch m-cast discards" }, + { ENETC_PBFDVFR, "SI VLAN nomatch b-cast discards" }, + { ENETC_PFDMSAPR, "SI pruning discarded frames" }, + { ENETC_PICDR(0), "ICM DR0 discarded frames" }, + { ENETC_PICDR(1), "ICM DR1 discarded frames" }, + { ENETC_PICDR(2), "ICM DR2 discarded frames" }, + { ENETC_PICDR(3), "ICM DR3 discarded frames" }, +}; + +static const char rx_ring_stats[][ETH_GSTRING_LEN] = { + "Rx ring %2d frames", + "Rx ring %2d alloc errors", +}; + +static const char tx_ring_stats[][ETH_GSTRING_LEN] = { + "Tx ring %2d frames", +}; + +static int enetc_get_sset_count(struct net_device *ndev, int sset) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + + if (sset == ETH_SS_STATS) + return ARRAY_SIZE(enetc_si_counters) + + ARRAY_SIZE(tx_ring_stats) * priv->num_tx_rings + + ARRAY_SIZE(rx_ring_stats) * priv->num_rx_rings + + (enetc_si_is_pf(priv->si) ? + ARRAY_SIZE(enetc_port_counters) : 0); + + return -EOPNOTSUPP; +} + +static void enetc_get_strings(struct net_device *ndev, u32 stringset, u8 *data) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + u8 *p = data; + int i, j; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++) { + strlcpy(p, enetc_si_counters[i].name, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < priv->num_tx_rings; i++) { + for (j = 0; j < ARRAY_SIZE(tx_ring_stats); j++) { + snprintf(p, ETH_GSTRING_LEN, tx_ring_stats[j], + i); + p += ETH_GSTRING_LEN; + } + } + for (i = 0; i < priv->num_rx_rings; i++) { + for (j = 0; j < ARRAY_SIZE(rx_ring_stats); j++) { + snprintf(p, ETH_GSTRING_LEN, rx_ring_stats[j], + i); + p += ETH_GSTRING_LEN; + } + } + + if (!enetc_si_is_pf(priv->si)) + break; + + for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++) { + strlcpy(p, enetc_port_counters[i].name, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static void enetc_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *stats, u64 *data) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + int i, o = 0; + + for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++) + data[o++] = enetc_rd64(hw, enetc_si_counters[i].reg); + + for (i = 0; i < priv->num_tx_rings; i++) + data[o++] = priv->tx_ring[i]->stats.packets; + + for (i = 0; i < priv->num_rx_rings; i++) { + data[o++] = priv->rx_ring[i]->stats.packets; + data[o++] = priv->rx_ring[i]->stats.rx_alloc_errs; + } + + if (!enetc_si_is_pf(priv->si)) + return; + + for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++) + data[o++] = enetc_port_rd(hw, enetc_port_counters[i].reg); +} + +#define ENETC_RSSHASH_L3 (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO | RXH_IP_SRC | \ + RXH_IP_DST) +#define ENETC_RSSHASH_L4 (ENETC_RSSHASH_L3 | RXH_L4_B_0_1 | RXH_L4_B_2_3) +static int enetc_get_rsshash(struct ethtool_rxnfc *rxnfc) +{ + static const u32 rsshash[] = { + [TCP_V4_FLOW] = ENETC_RSSHASH_L4, + [UDP_V4_FLOW] = ENETC_RSSHASH_L4, + [SCTP_V4_FLOW] = ENETC_RSSHASH_L4, + [AH_ESP_V4_FLOW] = ENETC_RSSHASH_L3, + [IPV4_FLOW] = ENETC_RSSHASH_L3, + [TCP_V6_FLOW] = ENETC_RSSHASH_L4, + [UDP_V6_FLOW] = ENETC_RSSHASH_L4, + [SCTP_V6_FLOW] = ENETC_RSSHASH_L4, + [AH_ESP_V6_FLOW] = ENETC_RSSHASH_L3, + [IPV6_FLOW] = ENETC_RSSHASH_L3, + [ETHER_FLOW] = 0, + }; + + if (rxnfc->flow_type >= ARRAY_SIZE(rsshash)) + return -EINVAL; + + rxnfc->data = rsshash[rxnfc->flow_type]; + + return 0; +} + +/* current HW spec does byte reversal on everything including MAC addresses */ +static void ether_addr_copy_swap(u8 *dst, const u8 *src) +{ + int i; + + for (i = 0; i < ETH_ALEN; i++) + dst[i] = src[ETH_ALEN - i - 1]; +} + +static int enetc_set_cls_entry(struct enetc_si *si, + struct ethtool_rx_flow_spec *fs, bool en) +{ + struct ethtool_tcpip4_spec *l4ip4_h, *l4ip4_m; + struct ethtool_usrip4_spec *l3ip4_h, *l3ip4_m; + struct ethhdr *eth_h, *eth_m; + struct enetc_cmd_rfse rfse = { {0} }; + + if (!en) + goto done; + + switch (fs->flow_type & 0xff) { + case TCP_V4_FLOW: + l4ip4_h = &fs->h_u.tcp_ip4_spec; + l4ip4_m = &fs->m_u.tcp_ip4_spec; + goto l4ip4; + case UDP_V4_FLOW: + l4ip4_h = &fs->h_u.udp_ip4_spec; + l4ip4_m = &fs->m_u.udp_ip4_spec; + goto l4ip4; + case SCTP_V4_FLOW: + l4ip4_h = &fs->h_u.sctp_ip4_spec; + l4ip4_m = &fs->m_u.sctp_ip4_spec; +l4ip4: + rfse.sip_h[0] = l4ip4_h->ip4src; + rfse.sip_m[0] = l4ip4_m->ip4src; + rfse.dip_h[0] = l4ip4_h->ip4dst; + rfse.dip_m[0] = l4ip4_m->ip4dst; + rfse.sport_h = ntohs(l4ip4_h->psrc); + rfse.sport_m = ntohs(l4ip4_m->psrc); + rfse.dport_h = ntohs(l4ip4_h->pdst); + rfse.dport_m = ntohs(l4ip4_m->pdst); + if (l4ip4_m->tos) + netdev_warn(si->ndev, "ToS field is not supported and was ignored\n"); + rfse.ethtype_h = ETH_P_IP; /* IPv4 */ + rfse.ethtype_m = 0xffff; + break; + case IP_USER_FLOW: + l3ip4_h = &fs->h_u.usr_ip4_spec; + l3ip4_m = &fs->m_u.usr_ip4_spec; + + rfse.sip_h[0] = l3ip4_h->ip4src; + rfse.sip_m[0] = l3ip4_m->ip4src; + rfse.dip_h[0] = l3ip4_h->ip4dst; + rfse.dip_m[0] = l3ip4_m->ip4dst; + if (l3ip4_m->tos) + netdev_warn(si->ndev, "ToS field is not supported and was ignored\n"); + rfse.ethtype_h = ETH_P_IP; /* IPv4 */ + rfse.ethtype_m = 0xffff; + break; + case ETHER_FLOW: + eth_h = &fs->h_u.ether_spec; + eth_m = &fs->m_u.ether_spec; + + ether_addr_copy_swap(rfse.smac_h, eth_h->h_source); + ether_addr_copy_swap(rfse.smac_m, eth_m->h_source); + ether_addr_copy_swap(rfse.dmac_h, eth_h->h_dest); + ether_addr_copy_swap(rfse.dmac_m, eth_m->h_dest); + rfse.ethtype_h = ntohs(eth_h->h_proto); + rfse.ethtype_m = ntohs(eth_m->h_proto); + break; + default: + return -EOPNOTSUPP; + } + + rfse.mode |= ENETC_RFSE_EN; + if (fs->ring_cookie != RX_CLS_FLOW_DISC) { + rfse.mode |= ENETC_RFSE_MODE_BD; + rfse.result = fs->ring_cookie; + } +done: + return enetc_set_fs_entry(si, &rfse, fs->location); +} + +static int enetc_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc, + u32 *rule_locs) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + int i, j; + + switch (rxnfc->cmd) { + case ETHTOOL_GRXRINGS: + rxnfc->data = priv->num_rx_rings; + break; + case ETHTOOL_GRXFH: + /* get RSS hash config */ + return enetc_get_rsshash(rxnfc); + case ETHTOOL_GRXCLSRLCNT: + /* total number of entries */ + rxnfc->data = priv->si->num_fs_entries; + /* number of entries in use */ + rxnfc->rule_cnt = 0; + for (i = 0; i < priv->si->num_fs_entries; i++) + if (priv->cls_rules[i].used) + rxnfc->rule_cnt++; + break; + case ETHTOOL_GRXCLSRULE: + if (rxnfc->fs.location >= priv->si->num_fs_entries) + return -EINVAL; + + /* get entry x */ + rxnfc->fs = priv->cls_rules[rxnfc->fs.location].fs; + break; + case ETHTOOL_GRXCLSRLALL: + /* total number of entries */ + rxnfc->data = priv->si->num_fs_entries; + /* array of indexes of used entries */ + j = 0; + for (i = 0; i < priv->si->num_fs_entries; i++) { + if (!priv->cls_rules[i].used) + continue; + if (j == rxnfc->rule_cnt) + return -EMSGSIZE; + rule_locs[j++] = i; + } + /* number of entries in use */ + rxnfc->rule_cnt = j; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int enetc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *rxnfc) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + int err; + + switch (rxnfc->cmd) { + case ETHTOOL_SRXCLSRLINS: + if (rxnfc->fs.location >= priv->si->num_fs_entries) + return -EINVAL; + + if (rxnfc->fs.ring_cookie >= priv->num_rx_rings && + rxnfc->fs.ring_cookie != RX_CLS_FLOW_DISC) + return -EINVAL; + + err = enetc_set_cls_entry(priv->si, &rxnfc->fs, true); + if (err) + return err; + priv->cls_rules[rxnfc->fs.location].fs = rxnfc->fs; + priv->cls_rules[rxnfc->fs.location].used = 1; + break; + case ETHTOOL_SRXCLSRLDEL: + if (rxnfc->fs.location >= priv->si->num_fs_entries) + return -EINVAL; + + err = enetc_set_cls_entry(priv->si, &rxnfc->fs, false); + if (err) + return err; + priv->cls_rules[rxnfc->fs.location].used = 0; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static u32 enetc_get_rxfh_key_size(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + + /* return the size of the RX flow hash key. PF only */ + return (priv->si->hw.port) ? ENETC_RSSHASH_KEY_SIZE : 0; +} + +static u32 enetc_get_rxfh_indir_size(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + + /* return the size of the RX flow hash indirection table */ + return priv->si->num_rss; +} + +static int enetc_get_rxfh(struct net_device *ndev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + int err = 0, i; + + /* return hash function */ + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + /* return hash key */ + if (key && hw->port) + for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++) + ((u32 *)key)[i] = enetc_port_rd(hw, ENETC_PRSSK(i)); + + /* return RSS table */ + if (indir) + err = enetc_get_rss_table(priv->si, indir, priv->si->num_rss); + + return err; +} + +void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes) +{ + int i; + + for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++) + enetc_port_wr(hw, ENETC_PRSSK(i), ((u32 *)bytes)[i]); +} + +static int enetc_set_rxfh(struct net_device *ndev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + int err = 0; + + /* set hash key, if PF */ + if (key && hw->port) + enetc_set_rss_key(hw, key); + + /* set RSS table */ + if (indir) + err = enetc_set_rss_table(priv->si, indir, priv->si->num_rss); + + return err; +} + +static void enetc_get_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ring) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + + ring->rx_pending = priv->rx_bd_count; + ring->tx_pending = priv->tx_bd_count; + + /* do some h/w sanity checks for BDR length */ + if (netif_running(ndev)) { + struct enetc_hw *hw = &priv->si->hw; + u32 val = enetc_rxbdr_rd(hw, 0, ENETC_RBLENR); + + if (val != priv->rx_bd_count) + netif_err(priv, hw, ndev, "RxBDR[RBLENR] = %d!\n", val); + + val = enetc_txbdr_rd(hw, 0, ENETC_TBLENR); + + if (val != priv->tx_bd_count) + netif_err(priv, hw, ndev, "TxBDR[TBLENR] = %d!\n", val); + } +} + +static const struct ethtool_ops enetc_pf_ethtool_ops = { + .get_regs_len = enetc_get_reglen, + .get_regs = enetc_get_regs, + .get_sset_count = enetc_get_sset_count, + .get_strings = enetc_get_strings, + .get_ethtool_stats = enetc_get_ethtool_stats, + .get_rxnfc = enetc_get_rxnfc, + .set_rxnfc = enetc_set_rxnfc, + .get_rxfh_key_size = enetc_get_rxfh_key_size, + .get_rxfh_indir_size = enetc_get_rxfh_indir_size, + .get_rxfh = enetc_get_rxfh, + .set_rxfh = enetc_set_rxfh, + .get_ringparam = enetc_get_ringparam, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, +}; + +static const struct ethtool_ops enetc_vf_ethtool_ops = { + .get_regs_len = enetc_get_reglen, + .get_regs = enetc_get_regs, + .get_sset_count = enetc_get_sset_count, + .get_strings = enetc_get_strings, + .get_ethtool_stats = enetc_get_ethtool_stats, + .get_rxnfc = enetc_get_rxnfc, + .set_rxnfc = enetc_set_rxnfc, + .get_rxfh_indir_size = enetc_get_rxfh_indir_size, + .get_rxfh = enetc_get_rxfh, + .set_rxfh = enetc_set_rxfh, + .get_ringparam = enetc_get_ringparam, +}; + +void enetc_set_ethtool_ops(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + + if (enetc_si_is_pf(priv->si)) + ndev->ethtool_ops = &enetc_pf_ethtool_ops; + else + ndev->ethtool_ops = &enetc_vf_ethtool_ops; +} diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h new file mode 100644 index 000000000000..df8eb8882d92 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h @@ -0,0 +1,533 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* Copyright 2017-2019 NXP */ + +#include + +/* ENETC device IDs */ +#define ENETC_DEV_ID_PF 0xe100 +#define ENETC_DEV_ID_VF 0xef00 +#define ENETC_DEV_ID_PTP 0xee02 + +/* ENETC register block BAR */ +#define ENETC_BAR_REGS 0 + +/** SI regs, offset: 0h */ +#define ENETC_SIMR 0 +#define ENETC_SIMR_EN BIT(31) +#define ENETC_SIMR_RSSE BIT(0) +#define ENETC_SICTR0 0x18 +#define ENETC_SICTR1 0x1c +#define ENETC_SIPCAPR0 0x20 +#define ENETC_SIPCAPR0_RSS BIT(8) +#define ENETC_SIPCAPR1 0x24 +#define ENETC_SITGTGR 0x30 +#define ENETC_SIRBGCR 0x38 +/* cache attribute registers for transactions initiated by ENETC */ +#define ENETC_SICAR0 0x40 +#define ENETC_SICAR1 0x44 +#define ENETC_SICAR2 0x48 +/* rd snoop, no alloc + * wr snoop, no alloc, partial cache line update for BDs and full cache line + * update for data + */ +#define ENETC_SICAR_RD_COHERENT 0x2b2b0000 +#define ENETC_SICAR_WR_COHERENT 0x00006727 +#define ENETC_SICAR_MSI 0x00300030 /* rd/wr device, no snoop, no alloc */ + +#define ENETC_SIPMAR0 0x80 +#define ENETC_SIPMAR1 0x84 + +/* VF-PF Message passing */ +#define ENETC_DEFAULT_MSG_SIZE 1024 /* and max size */ +/* msg size encoding: default and max msg value of 1024B encoded as 0 */ +static inline u32 enetc_vsi_set_msize(u32 size) +{ + return size < ENETC_DEFAULT_MSG_SIZE ? size >> 5 : 0; +} + +#define ENETC_PSIMSGRR 0x204 +#define ENETC_PSIMSGRR_MR_MASK GENMASK(2, 1) +#define ENETC_PSIMSGRR_MR(n) BIT((n) + 1) /* n = VSI index */ +#define ENETC_PSIVMSGRCVAR0(n) (0x210 + (n) * 0x8) /* n = VSI index */ +#define ENETC_PSIVMSGRCVAR1(n) (0x214 + (n) * 0x8) + +#define ENETC_VSIMSGSR 0x204 /* RO */ +#define ENETC_VSIMSGSR_MB BIT(0) +#define ENETC_VSIMSGSR_MS BIT(1) +#define ENETC_VSIMSGSNDAR0 0x210 +#define ENETC_VSIMSGSNDAR1 0x214 + +#define ENETC_SIMSGSR_SET_MC(val) ((val) << 16) +#define ENETC_SIMSGSR_GET_MC(val) ((val) >> 16) + +/* SI statistics */ +#define ENETC_SIROCT 0x300 +#define ENETC_SIRFRM 0x308 +#define ENETC_SIRUCA 0x310 +#define ENETC_SIRMCA 0x318 +#define ENETC_SITOCT 0x320 +#define ENETC_SITFRM 0x328 +#define ENETC_SITUCA 0x330 +#define ENETC_SITMCA 0x338 +#define ENETC_RBDCR(n) (0x8180 + (n) * 0x200) + +/* Control BDR regs */ +#define ENETC_SICBDRMR 0x800 +#define ENETC_SICBDRSR 0x804 /* RO */ +#define ENETC_SICBDRBAR0 0x810 +#define ENETC_SICBDRBAR1 0x814 +#define ENETC_SICBDRPIR 0x818 +#define ENETC_SICBDRCIR 0x81c +#define ENETC_SICBDRLENR 0x820 + +#define ENETC_SICAPR0 0x900 +#define ENETC_SICAPR1 0x904 + +#define ENETC_PSIIER 0xa00 +#define ENETC_PSIIER_MR_MASK GENMASK(2, 1) +#define ENETC_PSIIDR 0xa08 +#define ENETC_SITXIDR 0xa18 +#define ENETC_SIRXIDR 0xa28 +#define ENETC_SIMSIVR 0xa30 + +#define ENETC_SIMSITRV(n) (0xB00 + (n) * 0x4) +#define ENETC_SIMSIRRV(n) (0xB80 + (n) * 0x4) + +#define ENETC_SIUEFDCR 0xe28 + +#define ENETC_SIRFSCAPR 0x1200 +#define ENETC_SIRFSCAPR_GET_NUM_RFS(val) ((val) & 0x7f) +#define ENETC_SIRSSCAPR 0x1600 +#define ENETC_SIRSSCAPR_GET_NUM_RSS(val) (BIT((val) & 0xf) * 32) + +/** SI BDR sub-blocks, n = 0..7 */ +enum enetc_bdr_type {TX, RX}; +#define ENETC_BDR_OFF(i) ((i) * 0x200) +#define ENETC_BDR(t, i, r) (0x8000 + (t) * 0x100 + ENETC_BDR_OFF(i) + (r)) +/* RX BDR reg offsets */ +#define ENETC_RBMR 0 +#define ENETC_RBMR_BDS BIT(2) +#define ENETC_RBMR_VTE BIT(5) +#define ENETC_RBMR_EN BIT(31) +#define ENETC_RBSR 0x4 +#define ENETC_RBBSR 0x8 +#define ENETC_RBCIR 0xc +#define ENETC_RBBAR0 0x10 +#define ENETC_RBBAR1 0x14 +#define ENETC_RBPIR 0x18 +#define ENETC_RBLENR 0x20 +#define ENETC_RBIER 0xa0 +#define ENETC_RBIER_RXTIE BIT(0) +#define ENETC_RBIDR 0xa4 +#define ENETC_RBICIR0 0xa8 +#define ENETC_RBICIR0_ICEN BIT(31) + +/* TX BDR reg offsets */ +#define ENETC_TBMR 0 +#define ENETC_TBSR_BUSY BIT(0) +#define ENETC_TBMR_VIH BIT(9) +#define ENETC_TBMR_PRIO_MASK GENMASK(2, 0) +#define ENETC_TBMR_PRIO_SET(val) val +#define ENETC_TBMR_EN BIT(31) +#define ENETC_TBSR 0x4 +#define ENETC_TBBAR0 0x10 +#define ENETC_TBBAR1 0x14 +#define ENETC_TBPIR 0x18 +#define ENETC_TBCIR 0x1c +#define ENETC_TBCIR_IDX_MASK 0xffff +#define ENETC_TBLENR 0x20 +#define ENETC_TBIER 0xa0 +#define ENETC_TBIER_TXTIE BIT(0) +#define ENETC_TBIDR 0xa4 +#define ENETC_TBICIR0 0xa8 +#define ENETC_TBICIR0_ICEN BIT(31) + +#define ENETC_RTBLENR_LEN(n) ((n) & ~0x7) + +/* Port regs, offset: 1_0000h */ +#define ENETC_PORT_BASE 0x10000 +#define ENETC_PMR 0x0000 +#define ENETC_PMR_EN GENMASK(18, 16) +#define ENETC_PSR 0x0004 /* RO */ +#define ENETC_PSIPMR 0x0018 +#define ENETC_PSIPMR_SET_UP(n) BIT(n) /* n = SI index */ +#define ENETC_PSIPMR_SET_MP(n) BIT((n) + 16) +#define ENETC_PSIPVMR 0x001c +#define ENETC_VLAN_PROMISC_MAP_ALL 0x7 +#define ENETC_PSIPVMR_SET_VP(simap) ((simap) & 0x7) +#define ENETC_PSIPVMR_SET_VUTA(simap) (((simap) & 0x7) << 16) +#define ENETC_PSIPMAR0(n) (0x0100 + (n) * 0x8) /* n = SI index */ +#define ENETC_PSIPMAR1(n) (0x0104 + (n) * 0x8) +#define ENETC_PVCLCTR 0x0208 +#define ENETC_VLAN_TYPE_C BIT(0) +#define ENETC_VLAN_TYPE_S BIT(1) +#define ENETC_PVCLCTR_OVTPIDL(bmp) ((bmp) & 0xff) /* VLAN_TYPE */ +#define ENETC_PSIVLANR(n) (0x0240 + (n) * 4) /* n = SI index */ +#define ENETC_PSIVLAN_EN BIT(31) +#define ENETC_PSIVLAN_SET_QOS(val) ((u32)(val) << 12) +#define ENETC_PTXMBAR 0x0608 +#define ENETC_PCAPR0 0x0900 +#define ENETC_PCAPR0_RXBDR(val) ((val) >> 24) +#define ENETC_PCAPR0_TXBDR(val) (((val) >> 16) & 0xff) +#define ENETC_PCAPR1 0x0904 +#define ENETC_PSICFGR0(n) (0x0940 + (n) * 0xc) /* n = SI index */ +#define ENETC_PSICFGR0_SET_TXBDR(val) ((val) & 0xff) +#define ENETC_PSICFGR0_SET_RXBDR(val) (((val) & 0xff) << 16) +#define ENETC_PSICFGR0_VTE BIT(12) +#define ENETC_PSICFGR0_SIVIE BIT(14) +#define ENETC_PSICFGR0_ASE BIT(15) +#define ENETC_PSICFGR0_SIVC(bmp) (((bmp) & 0xff) << 24) /* VLAN_TYPE */ + +#define ENETC_PTCCBSR0(n) (0x1110 + (n) * 8) /* n = 0 to 7*/ +#define ENETC_PTCCBSR1(n) (0x1114 + (n) * 8) /* n = 0 to 7*/ +#define ENETC_RSSHASH_KEY_SIZE 40 +#define ENETC_PRSSK(n) (0x1410 + (n) * 4) /* n = [0..9] */ +#define ENETC_PSIVLANFMR 0x1700 +#define ENETC_PSIVLANFMR_VS BIT(0) +#define ENETC_PRFSMR 0x1800 +#define ENETC_PRFSMR_RFSE BIT(31) +#define ENETC_PRFSCAPR 0x1804 +#define ENETC_PRFSCAPR_GET_NUM_RFS(val) ((((val) & 0xf) + 1) * 16) +#define ENETC_PSIRFSCFGR(n) (0x1814 + (n) * 4) /* n = SI index */ +#define ENETC_PFPMR 0x1900 +#define ENETC_PFPMR_PMACE BIT(1) +#define ENETC_PFPMR_MWLM BIT(0) +#define ENETC_PSIUMHFR0(n, err) (((err) ? 0x1d08 : 0x1d00) + (n) * 0x10) +#define ENETC_PSIUMHFR1(n) (0x1d04 + (n) * 0x10) +#define ENETC_PSIMMHFR0(n, err) (((err) ? 0x1d00 : 0x1d08) + (n) * 0x10) +#define ENETC_PSIMMHFR1(n) (0x1d0c + (n) * 0x10) +#define ENETC_PSIVHFR0(n) (0x1e00 + (n) * 8) /* n = SI index */ +#define ENETC_PSIVHFR1(n) (0x1e04 + (n) * 8) /* n = SI index */ +#define ENETC_MMCSR 0x1f00 +#define ENETC_MMCSR_ME BIT(16) +#define ENETC_PTCMSDUR(n) (0x2020 + (n) * 4) /* n = TC index [0..7] */ + +#define ENETC_PM0_CMD_CFG 0x8008 +#define ENETC_PM1_CMD_CFG 0x9008 +#define ENETC_PM0_TX_EN BIT(0) +#define ENETC_PM0_RX_EN BIT(1) +#define ENETC_PM0_PROMISC BIT(4) +#define ENETC_PM0_CMD_XGLP BIT(10) +#define ENETC_PM0_CMD_TXP BIT(11) +#define ENETC_PM0_CMD_PHY_TX_EN BIT(15) +#define ENETC_PM0_CMD_SFD BIT(21) +#define ENETC_PM0_MAXFRM 0x8014 +#define ENETC_SET_TX_MTU(val) ((val) << 16) +#define ENETC_SET_MAXFRM(val) ((val) & 0xffff) +#define ENETC_PM0_IF_MODE 0x8300 +#define ENETC_PMO_IFM_RG BIT(2) +#define ENETC_PM0_IFM_RLP (BIT(5) | BIT(11)) +#define ENETC_PM0_IFM_RGAUTO (BIT(15) | ENETC_PMO_IFM_RG | BIT(1)) +#define ENETC_PM0_IFM_XGMII BIT(12) + +/* MAC counters */ +#define ENETC_PM0_REOCT 0x8100 +#define ENETC_PM0_RALN 0x8110 +#define ENETC_PM0_RXPF 0x8118 +#define ENETC_PM0_RFRM 0x8120 +#define ENETC_PM0_RFCS 0x8128 +#define ENETC_PM0_RVLAN 0x8130 +#define ENETC_PM0_RERR 0x8138 +#define ENETC_PM0_RUCA 0x8140 +#define ENETC_PM0_RMCA 0x8148 +#define ENETC_PM0_RBCA 0x8150 +#define ENETC_PM0_RDRP 0x8158 +#define ENETC_PM0_RPKT 0x8160 +#define ENETC_PM0_RUND 0x8168 +#define ENETC_PM0_R64 0x8170 +#define ENETC_PM0_R127 0x8178 +#define ENETC_PM0_R255 0x8180 +#define ENETC_PM0_R511 0x8188 +#define ENETC_PM0_R1023 0x8190 +#define ENETC_PM0_R1518 0x8198 +#define ENETC_PM0_R1519X 0x81A0 +#define ENETC_PM0_ROVR 0x81A8 +#define ENETC_PM0_RJBR 0x81B0 +#define ENETC_PM0_RFRG 0x81B8 +#define ENETC_PM0_RCNP 0x81C0 +#define ENETC_PM0_RDRNTP 0x81C8 +#define ENETC_PM0_TEOCT 0x8200 +#define ENETC_PM0_TOCT 0x8208 +#define ENETC_PM0_TCRSE 0x8210 +#define ENETC_PM0_TXPF 0x8218 +#define ENETC_PM0_TFRM 0x8220 +#define ENETC_PM0_TFCS 0x8228 +#define ENETC_PM0_TVLAN 0x8230 +#define ENETC_PM0_TERR 0x8238 +#define ENETC_PM0_TUCA 0x8240 +#define ENETC_PM0_TMCA 0x8248 +#define ENETC_PM0_TBCA 0x8250 +#define ENETC_PM0_TPKT 0x8260 +#define ENETC_PM0_TUND 0x8268 +#define ENETC_PM0_T127 0x8278 +#define ENETC_PM0_T1023 0x8290 +#define ENETC_PM0_T1518 0x8298 +#define ENETC_PM0_TCNP 0x82C0 +#define ENETC_PM0_TDFR 0x82D0 +#define ENETC_PM0_TMCOL 0x82D8 +#define ENETC_PM0_TSCOL 0x82E0 +#define ENETC_PM0_TLCOL 0x82E8 +#define ENETC_PM0_TECOL 0x82F0 + +/* Port counters */ +#define ENETC_PICDR(n) (0x0700 + (n) * 8) /* n = [0..3] */ +#define ENETC_PBFDSIR 0x0810 +#define ENETC_PFDMSAPR 0x0814 +#define ENETC_UFDMF 0x1680 +#define ENETC_MFDMF 0x1684 +#define ENETC_PUFDVFR 0x1780 +#define ENETC_PMFDVFR 0x1784 +#define ENETC_PBFDVFR 0x1788 + +/** Global regs, offset: 2_0000h */ +#define ENETC_GLOBAL_BASE 0x20000 +#define ENETC_G_EIPBRR0 0x0bf8 +#define ENETC_G_EIPBRR1 0x0bfc +#define ENETC_G_EPFBLPR(n) (0xd00 + 4 * (n)) +#define ENETC_G_EPFBLPR1_XGMII 0x80000000 + +/* PCI device info */ +struct enetc_hw { + /* SI registers, used by all PCI functions */ + void __iomem *reg; + /* Port registers, PF only */ + void __iomem *port; + /* IP global registers, PF only */ + void __iomem *global; +}; + +/* general register accessors */ +#define enetc_rd_reg(reg) ioread32((reg)) +#define enetc_wr_reg(reg, val) iowrite32((val), (reg)) +#ifdef ioread64 +#define enetc_rd_reg64(reg) ioread64((reg)) +#else +/* using this to read out stats on 32b systems */ +static inline u64 enetc_rd_reg64(void __iomem *reg) +{ + u32 low, high, tmp; + + do { + high = ioread32(reg + 4); + low = ioread32(reg); + tmp = ioread32(reg + 4); + } while (high != tmp); + + return le64_to_cpu((__le64)high << 32 | low); +} +#endif + +#define enetc_rd(hw, off) enetc_rd_reg((hw)->reg + (off)) +#define enetc_wr(hw, off, val) enetc_wr_reg((hw)->reg + (off), val) +#define enetc_rd64(hw, off) enetc_rd_reg64((hw)->reg + (off)) +/* port register accessors - PF only */ +#define enetc_port_rd(hw, off) enetc_rd_reg((hw)->port + (off)) +#define enetc_port_wr(hw, off, val) enetc_wr_reg((hw)->port + (off), val) +/* global register accessors - PF only */ +#define enetc_global_rd(hw, off) enetc_rd_reg((hw)->global + (off)) +#define enetc_global_wr(hw, off, val) enetc_wr_reg((hw)->global + (off), val) +/* BDR register accessors, see ENETC_BDR() */ +#define enetc_bdr_rd(hw, t, n, off) \ + enetc_rd(hw, ENETC_BDR(t, n, off)) +#define enetc_bdr_wr(hw, t, n, off, val) \ + enetc_wr(hw, ENETC_BDR(t, n, off), val) +#define enetc_txbdr_rd(hw, n, off) enetc_bdr_rd(hw, TX, n, off) +#define enetc_rxbdr_rd(hw, n, off) enetc_bdr_rd(hw, RX, n, off) +#define enetc_txbdr_wr(hw, n, off, val) \ + enetc_bdr_wr(hw, TX, n, off, val) +#define enetc_rxbdr_wr(hw, n, off, val) \ + enetc_bdr_wr(hw, RX, n, off, val) + +/* Buffer Descriptors (BD) */ +union enetc_tx_bd { + struct { + __le64 addr; + __le16 buf_len; + __le16 frm_len; + union { + struct { + __le16 l3_csoff; + u8 l4_csoff; + u8 flags; + }; /* default layout */ + __le32 lstatus; + }; + }; + struct { + __le32 tstamp; + __le16 tpid; + __le16 vid; + u8 reserved[6]; + u8 e_flags; + u8 flags; + } ext; /* Tx BD extension */ +}; + +#define ENETC_TXBD_FLAGS_L4CS BIT(0) +#define ENETC_TXBD_FLAGS_W BIT(2) +#define ENETC_TXBD_FLAGS_CSUM BIT(3) +#define ENETC_TXBD_FLAGS_EX BIT(6) +#define ENETC_TXBD_FLAGS_F BIT(7) + +static inline void enetc_clear_tx_bd(union enetc_tx_bd *txbd) +{ + memset(txbd, 0, sizeof(*txbd)); +} + +/* L3 csum flags */ +#define ENETC_TXBD_L3_IPCS BIT(7) +#define ENETC_TXBD_L3_IPV6 BIT(15) + +#define ENETC_TXBD_L3_START_MASK GENMASK(6, 0) +#define ENETC_TXBD_L3_SET_HSIZE(val) ((((val) >> 2) & 0x7f) << 8) + +/* Extension flags */ +#define ENETC_TXBD_E_FLAGS_VLAN_INS BIT(0) +#define ENETC_TXBD_E_FLAGS_TWO_STEP_PTP BIT(2) + +static inline __le16 enetc_txbd_l3_csoff(int start, int hdr_sz, u16 l3_flags) +{ + return cpu_to_le16(l3_flags | ENETC_TXBD_L3_SET_HSIZE(hdr_sz) | + (start & ENETC_TXBD_L3_START_MASK)); +} + +/* L4 csum flags */ +#define ENETC_TXBD_L4_UDP BIT(5) +#define ENETC_TXBD_L4_TCP BIT(6) + +union enetc_rx_bd { + struct { + __le64 addr; + u8 reserved[8]; + } w; + struct { + __le16 inet_csum; + __le16 parse_summary; + __le32 rss_hash; + __le16 buf_len; + __le16 vlan_opt; + union { + struct { + __le16 flags; + __le16 error; + }; + __le32 lstatus; + }; + } r; +}; + +#define ENETC_RXBD_LSTATUS_R BIT(30) +#define ENETC_RXBD_LSTATUS_F BIT(31) +#define ENETC_RXBD_ERR_MASK 0xff +#define ENETC_RXBD_LSTATUS(flags) ((flags) << 16) +#define ENETC_RXBD_FLAG_VLAN BIT(9) +#define ENETC_RXBD_FLAG_TSTMP BIT(10) + +#define ENETC_MAC_ADDR_FILT_CNT 8 /* # of supported entries per port */ +#define EMETC_MAC_ADDR_FILT_RES 3 /* # of reserved entries at the beginning */ +#define ENETC_MAX_NUM_VFS 2 + +struct enetc_cbd { + union { + struct { + __le32 addr[2]; + __le32 opt[4]; + }; + __le32 data[6]; + }; + __le16 index; + __le16 length; + u8 cmd; + u8 cls; + u8 _res; + u8 status_flags; +}; + +#define ENETC_CBD_FLAGS_SF BIT(7) /* short format */ +#define ENETC_CBD_STATUS_MASK 0xf + +struct enetc_cmd_rfse { + u8 smac_h[6]; + u8 smac_m[6]; + u8 dmac_h[6]; + u8 dmac_m[6]; + u32 sip_h[4]; + u32 sip_m[4]; + u32 dip_h[4]; + u32 dip_m[4]; + u16 ethtype_h; + u16 ethtype_m; + u16 ethtype4_h; + u16 ethtype4_m; + u16 sport_h; + u16 sport_m; + u16 dport_h; + u16 dport_m; + u16 vlan_h; + u16 vlan_m; + u8 proto_h; + u8 proto_m; + u16 flags; + u16 result; + u16 mode; +}; + +#define ENETC_RFSE_EN BIT(15) +#define ENETC_RFSE_MODE_BD 2 + +static inline void enetc_get_primary_mac_addr(struct enetc_hw *hw, u8 *addr) +{ + *(u32 *)addr = __raw_readl(hw->reg + ENETC_SIPMAR0); + *(u16 *)(addr + 4) = __raw_readw(hw->reg + ENETC_SIPMAR1); +} + +#define ENETC_SI_INT_IDX 0 +/* base index for Rx/Tx interrupts */ +#define ENETC_BDR_INT_BASE_IDX 1 + +/* Messaging */ + +/* Command completion status */ +enum enetc_msg_cmd_status { + ENETC_MSG_CMD_STATUS_OK, + ENETC_MSG_CMD_STATUS_FAIL +}; + +/* VSI-PSI command message types */ +enum enetc_msg_cmd_type { + ENETC_MSG_CMD_MNG_MAC = 1, /* manage MAC address */ + ENETC_MSG_CMD_MNG_RX_MAC_FILTER,/* manage RX MAC table */ + ENETC_MSG_CMD_MNG_RX_VLAN_FILTER /* manage RX VLAN table */ +}; + +/* VSI-PSI command action types */ +enum enetc_msg_cmd_action_type { + ENETC_MSG_CMD_MNG_ADD = 1, + ENETC_MSG_CMD_MNG_REMOVE +}; + +/* PSI-VSI command header format */ +struct enetc_msg_cmd_header { + u16 type; /* command class type */ + u16 id; /* denotes the specific required action */ +}; + +/* Common H/W utility functions */ + +static inline void enetc_enable_rxvlan(struct enetc_hw *hw, int si_idx, + bool en) +{ + u32 val = enetc_rxbdr_rd(hw, si_idx, ENETC_RBMR); + + val = (val & ~ENETC_RBMR_VTE) | (en ? ENETC_RBMR_VTE : 0); + enetc_rxbdr_wr(hw, si_idx, ENETC_RBMR, val); +} + +static inline void enetc_enable_txvlan(struct enetc_hw *hw, int si_idx, + bool en) +{ + u32 val = enetc_txbdr_rd(hw, si_idx, ENETC_TBMR); + + val = (val & ~ENETC_TBMR_VIH) | (en ? ENETC_TBMR_VIH : 0); + enetc_txbdr_wr(hw, si_idx, ENETC_TBMR, val); +} diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c new file mode 100644 index 000000000000..77b9cd10ba2b --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c @@ -0,0 +1,199 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2019 NXP */ + +#include +#include +#include +#include + +#include "enetc_pf.h" + +struct enetc_mdio_regs { + u32 mdio_cfg; /* MDIO configuration and status */ + u32 mdio_ctl; /* MDIO control */ + u32 mdio_data; /* MDIO data */ + u32 mdio_addr; /* MDIO address */ +}; + +#define bus_to_enetc_regs(bus) (struct enetc_mdio_regs __iomem *)((bus)->priv) + +#define ENETC_MDIO_REG_OFFSET 0x1c00 +#define ENETC_MDC_DIV 258 + +#define MDIO_CFG_CLKDIV(x) ((((x) >> 1) & 0xff) << 8) +#define MDIO_CFG_BSY BIT(0) +#define MDIO_CFG_RD_ER BIT(1) +#define MDIO_CFG_ENC45 BIT(6) + /* external MDIO only - driven on neg MDC edge */ +#define MDIO_CFG_NEG BIT(23) + +#define MDIO_CTL_DEV_ADDR(x) ((x) & 0x1f) +#define MDIO_CTL_PORT_ADDR(x) (((x) & 0x1f) << 5) +#define MDIO_CTL_READ BIT(15) +#define MDIO_DATA(x) ((x) & 0xffff) + +#define TIMEOUT 1000 +static int enetc_mdio_wait_complete(struct enetc_mdio_regs __iomem *regs) +{ + u32 val; + + return readx_poll_timeout(enetc_rd_reg, ®s->mdio_cfg, val, + !(val & MDIO_CFG_BSY), 10, 10 * TIMEOUT); +} + +static int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, + u16 value) +{ + struct enetc_mdio_regs __iomem *regs = bus_to_enetc_regs(bus); + u32 mdio_ctl, mdio_cfg; + u16 dev_addr; + int ret; + + mdio_cfg = MDIO_CFG_CLKDIV(ENETC_MDC_DIV) | MDIO_CFG_NEG; + if (regnum & MII_ADDR_C45) { + dev_addr = (regnum >> 16) & 0x1f; + mdio_cfg |= MDIO_CFG_ENC45; + } else { + /* clause 22 (ie 1G) */ + dev_addr = regnum & 0x1f; + mdio_cfg &= ~MDIO_CFG_ENC45; + } + + enetc_wr_reg(®s->mdio_cfg, mdio_cfg); + + ret = enetc_mdio_wait_complete(regs); + if (ret) + return ret; + + /* set port and dev addr */ + mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); + enetc_wr_reg(®s->mdio_ctl, mdio_ctl); + + /* set the register address */ + if (regnum & MII_ADDR_C45) { + enetc_wr_reg(®s->mdio_addr, regnum & 0xffff); + + ret = enetc_mdio_wait_complete(regs); + if (ret) + return ret; + } + + /* write the value */ + enetc_wr_reg(®s->mdio_data, MDIO_DATA(value)); + + ret = enetc_mdio_wait_complete(regs); + if (ret) + return ret; + + return 0; +} + +static int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum) +{ + struct enetc_mdio_regs __iomem *regs = bus_to_enetc_regs(bus); + u32 mdio_ctl, mdio_cfg; + u16 dev_addr, value; + int ret; + + mdio_cfg = MDIO_CFG_CLKDIV(ENETC_MDC_DIV) | MDIO_CFG_NEG; + if (regnum & MII_ADDR_C45) { + dev_addr = (regnum >> 16) & 0x1f; + mdio_cfg |= MDIO_CFG_ENC45; + } else { + dev_addr = regnum & 0x1f; + mdio_cfg &= ~MDIO_CFG_ENC45; + } + + enetc_wr_reg(®s->mdio_cfg, mdio_cfg); + + ret = enetc_mdio_wait_complete(regs); + if (ret) + return ret; + + /* set port and device addr */ + mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); + enetc_wr_reg(®s->mdio_ctl, mdio_ctl); + + /* set the register address */ + if (regnum & MII_ADDR_C45) { + enetc_wr_reg(®s->mdio_addr, regnum & 0xffff); + + ret = enetc_mdio_wait_complete(regs); + if (ret) + return ret; + } + + /* initiate the read */ + enetc_wr_reg(®s->mdio_ctl, mdio_ctl | MDIO_CTL_READ); + + ret = enetc_mdio_wait_complete(regs); + if (ret) + return ret; + + /* return all Fs if nothing was there */ + if (enetc_rd_reg(®s->mdio_cfg) & MDIO_CFG_RD_ER) { + dev_dbg(&bus->dev, + "Error while reading PHY%d reg at %d.%hhu\n", + phy_id, dev_addr, regnum); + return 0xffff; + } + + value = enetc_rd_reg(®s->mdio_data) & 0xffff; + + return value; +} + +int enetc_mdio_probe(struct enetc_pf *pf) +{ + struct device *dev = &pf->si->pdev->dev; + struct enetc_mdio_regs __iomem *regs; + struct device_node *np; + struct mii_bus *bus; + int ret; + + bus = mdiobus_alloc_size(sizeof(regs)); + if (!bus) + return -ENOMEM; + + bus->name = "Freescale ENETC MDIO Bus"; + bus->read = enetc_mdio_read; + bus->write = enetc_mdio_write; + bus->parent = dev; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev)); + + /* store the enetc mdio base address for this bus */ + regs = pf->si->hw.port + ENETC_MDIO_REG_OFFSET; + bus->priv = regs; + + np = of_get_child_by_name(dev->of_node, "mdio"); + if (!np) { + dev_err(dev, "MDIO node missing\n"); + ret = -EINVAL; + goto err_registration; + } + + ret = of_mdiobus_register(bus, np); + if (ret) { + of_node_put(np); + dev_err(dev, "cannot register MDIO bus\n"); + goto err_registration; + } + + of_node_put(np); + pf->mdio = bus; + + return 0; + +err_registration: + mdiobus_free(bus); + + return ret; +} + +void enetc_mdio_remove(struct enetc_pf *pf) +{ + if (pf->mdio) { + mdiobus_unregister(pf->mdio); + mdiobus_free(pf->mdio); + } +} diff --git a/drivers/net/ethernet/freescale/enetc/enetc_msg.c b/drivers/net/ethernet/freescale/enetc/enetc_msg.c new file mode 100644 index 000000000000..40d22ebe9224 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_msg.c @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2017-2019 NXP */ + +#include "enetc_pf.h" + +static void enetc_msg_disable_mr_int(struct enetc_hw *hw) +{ + u32 psiier = enetc_rd(hw, ENETC_PSIIER); + /* disable MR int source(s) */ + enetc_wr(hw, ENETC_PSIIER, psiier & ~ENETC_PSIIER_MR_MASK); +} + +static void enetc_msg_enable_mr_int(struct enetc_hw *hw) +{ + u32 psiier = enetc_rd(hw, ENETC_PSIIER); + + enetc_wr(hw, ENETC_PSIIER, psiier | ENETC_PSIIER_MR_MASK); +} + +static irqreturn_t enetc_msg_psi_msix(int irq, void *data) +{ + struct enetc_si *si = (struct enetc_si *)data; + struct enetc_pf *pf = enetc_si_priv(si); + + enetc_msg_disable_mr_int(&si->hw); + schedule_work(&pf->msg_task); + + return IRQ_HANDLED; +} + +static void enetc_msg_task(struct work_struct *work) +{ + struct enetc_pf *pf = container_of(work, struct enetc_pf, msg_task); + struct enetc_hw *hw = &pf->si->hw; + unsigned long mr_mask; + int i; + + for (;;) { + mr_mask = enetc_rd(hw, ENETC_PSIMSGRR) & ENETC_PSIMSGRR_MR_MASK; + if (!mr_mask) { + /* re-arm MR interrupts, w1c the IDR reg */ + enetc_wr(hw, ENETC_PSIIDR, ENETC_PSIIER_MR_MASK); + enetc_msg_enable_mr_int(hw); + return; + } + + for (i = 0; i < pf->num_vfs; i++) { + u32 psimsgrr; + u16 msg_code; + + if (!(ENETC_PSIMSGRR_MR(i) & mr_mask)) + continue; + + enetc_msg_handle_rxmsg(pf, i, &msg_code); + + psimsgrr = ENETC_SIMSGSR_SET_MC(msg_code); + psimsgrr |= ENETC_PSIMSGRR_MR(i); /* w1c */ + enetc_wr(hw, ENETC_PSIMSGRR, psimsgrr); + } + } +} + +/* Init */ +static int enetc_msg_alloc_mbx(struct enetc_si *si, int idx) +{ + struct enetc_pf *pf = enetc_si_priv(si); + struct device *dev = &si->pdev->dev; + struct enetc_hw *hw = &si->hw; + struct enetc_msg_swbd *msg; + u32 val; + + msg = &pf->rxmsg[idx]; + /* allocate and set receive buffer */ + msg->size = ENETC_DEFAULT_MSG_SIZE; + + msg->vaddr = dma_alloc_coherent(dev, msg->size, &msg->dma, + GFP_KERNEL); + if (!msg->vaddr) { + dev_err(dev, "msg: fail to alloc dma buffer of size: %d\n", + msg->size); + return -ENOMEM; + } + + /* set multiple of 32 bytes */ + val = lower_32_bits(msg->dma); + enetc_wr(hw, ENETC_PSIVMSGRCVAR0(idx), val); + val = upper_32_bits(msg->dma); + enetc_wr(hw, ENETC_PSIVMSGRCVAR1(idx), val); + + return 0; +} + +static void enetc_msg_free_mbx(struct enetc_si *si, int idx) +{ + struct enetc_pf *pf = enetc_si_priv(si); + struct enetc_hw *hw = &si->hw; + struct enetc_msg_swbd *msg; + + msg = &pf->rxmsg[idx]; + dma_free_coherent(&si->pdev->dev, msg->size, msg->vaddr, msg->dma); + memset(msg, 0, sizeof(*msg)); + + enetc_wr(hw, ENETC_PSIVMSGRCVAR0(idx), 0); + enetc_wr(hw, ENETC_PSIVMSGRCVAR1(idx), 0); +} + +int enetc_msg_psi_init(struct enetc_pf *pf) +{ + struct enetc_si *si = pf->si; + int vector, i, err; + + /* register message passing interrupt handler */ + snprintf(pf->msg_int_name, sizeof(pf->msg_int_name), "%s-vfmsg", + si->ndev->name); + vector = pci_irq_vector(si->pdev, ENETC_SI_INT_IDX); + err = request_irq(vector, enetc_msg_psi_msix, 0, pf->msg_int_name, si); + if (err) { + dev_err(&si->pdev->dev, + "PSI messaging: request_irq() failed!\n"); + return err; + } + + /* set one IRQ entry for PSI message receive notification (SI int) */ + enetc_wr(&si->hw, ENETC_SIMSIVR, ENETC_SI_INT_IDX); + + /* initialize PSI mailbox */ + INIT_WORK(&pf->msg_task, enetc_msg_task); + + for (i = 0; i < pf->num_vfs; i++) { + err = enetc_msg_alloc_mbx(si, i); + if (err) + goto err_init_mbx; + } + + /* enable MR interrupts */ + enetc_msg_enable_mr_int(&si->hw); + + return 0; + +err_init_mbx: + for (i--; i >= 0; i--) + enetc_msg_free_mbx(si, i); + + free_irq(vector, si); + + return err; +} + +void enetc_msg_psi_free(struct enetc_pf *pf) +{ + struct enetc_si *si = pf->si; + int i; + + cancel_work_sync(&pf->msg_task); + + /* disable MR interrupts */ + enetc_msg_disable_mr_int(&si->hw); + + for (i = 0; i < pf->num_vfs; i++) + enetc_msg_free_mbx(si, i); + + /* de-register message passing interrupt handler */ + free_irq(pci_irq_vector(si->pdev, ENETC_SI_INT_IDX), si); +} diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c new file mode 100644 index 000000000000..15876a6e7598 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -0,0 +1,943 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2017-2019 NXP */ + +#include +#include +#include +#include "enetc_pf.h" + +#define ENETC_DRV_VER_MAJ 1 +#define ENETC_DRV_VER_MIN 0 + +#define ENETC_DRV_VER_STR __stringify(ENETC_DRV_VER_MAJ) "." \ + __stringify(ENETC_DRV_VER_MIN) +static const char enetc_drv_ver[] = ENETC_DRV_VER_STR; +#define ENETC_DRV_NAME_STR "ENETC PF driver" +static const char enetc_drv_name[] = ENETC_DRV_NAME_STR; + +static void enetc_pf_get_primary_mac_addr(struct enetc_hw *hw, int si, u8 *addr) +{ + u32 upper = __raw_readl(hw->port + ENETC_PSIPMAR0(si)); + u16 lower = __raw_readw(hw->port + ENETC_PSIPMAR1(si)); + + *(u32 *)addr = upper; + *(u16 *)(addr + 4) = lower; +} + +static void enetc_pf_set_primary_mac_addr(struct enetc_hw *hw, int si, + const u8 *addr) +{ + u32 upper = *(const u32 *)addr; + u16 lower = *(const u16 *)(addr + 4); + + __raw_writel(upper, hw->port + ENETC_PSIPMAR0(si)); + __raw_writew(lower, hw->port + ENETC_PSIPMAR1(si)); +} + +static int enetc_pf_set_mac_addr(struct net_device *ndev, void *addr) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct sockaddr *saddr = addr; + + if (!is_valid_ether_addr(saddr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(ndev->dev_addr, saddr->sa_data, ndev->addr_len); + enetc_pf_set_primary_mac_addr(&priv->si->hw, 0, saddr->sa_data); + + return 0; +} + +static void enetc_set_vlan_promisc(struct enetc_hw *hw, char si_map) +{ + u32 val = enetc_port_rd(hw, ENETC_PSIPVMR); + + val &= ~ENETC_PSIPVMR_SET_VP(ENETC_VLAN_PROMISC_MAP_ALL); + enetc_port_wr(hw, ENETC_PSIPVMR, ENETC_PSIPVMR_SET_VP(si_map) | val); +} + +static bool enetc_si_vlan_promisc_is_on(struct enetc_pf *pf, int si_idx) +{ + return pf->vlan_promisc_simap & BIT(si_idx); +} + +static bool enetc_vlan_filter_is_on(struct enetc_pf *pf) +{ + int i; + + for_each_set_bit(i, pf->active_vlans, VLAN_N_VID) + return true; + + return false; +} + +static void enetc_enable_si_vlan_promisc(struct enetc_pf *pf, int si_idx) +{ + pf->vlan_promisc_simap |= BIT(si_idx); + enetc_set_vlan_promisc(&pf->si->hw, pf->vlan_promisc_simap); +} + +static void enetc_disable_si_vlan_promisc(struct enetc_pf *pf, int si_idx) +{ + pf->vlan_promisc_simap &= ~BIT(si_idx); + enetc_set_vlan_promisc(&pf->si->hw, pf->vlan_promisc_simap); +} + +static void enetc_set_isol_vlan(struct enetc_hw *hw, int si, u16 vlan, u8 qos) +{ + u32 val = 0; + + if (vlan) + val = ENETC_PSIVLAN_EN | ENETC_PSIVLAN_SET_QOS(qos) | vlan; + + enetc_port_wr(hw, ENETC_PSIVLANR(si), val); +} + +static int enetc_mac_addr_hash_idx(const u8 *addr) +{ + u64 fold = __swab64(ether_addr_to_u64(addr)) >> 16; + u64 mask = 0; + int res = 0; + int i; + + for (i = 0; i < 8; i++) + mask |= BIT_ULL(i * 6); + + for (i = 0; i < 6; i++) + res |= (hweight64(fold & (mask << i)) & 0x1) << i; + + return res; +} + +static void enetc_reset_mac_addr_filter(struct enetc_mac_filter *filter) +{ + filter->mac_addr_cnt = 0; + + bitmap_zero(filter->mac_hash_table, + ENETC_MADDR_HASH_TBL_SZ); +} + +static void enetc_add_mac_addr_em_filter(struct enetc_mac_filter *filter, + const unsigned char *addr) +{ + /* add exact match addr */ + ether_addr_copy(filter->mac_addr, addr); + filter->mac_addr_cnt++; +} + +static void enetc_add_mac_addr_ht_filter(struct enetc_mac_filter *filter, + const unsigned char *addr) +{ + int idx = enetc_mac_addr_hash_idx(addr); + + /* add hash table entry */ + __set_bit(idx, filter->mac_hash_table); + filter->mac_addr_cnt++; +} + +static void enetc_clear_mac_ht_flt(struct enetc_si *si, int si_idx, int type) +{ + bool err = si->errata & ENETC_ERR_UCMCSWP; + + if (type == UC) { + enetc_port_wr(&si->hw, ENETC_PSIUMHFR0(si_idx, err), 0); + enetc_port_wr(&si->hw, ENETC_PSIUMHFR1(si_idx), 0); + } else { /* MC */ + enetc_port_wr(&si->hw, ENETC_PSIMMHFR0(si_idx, err), 0); + enetc_port_wr(&si->hw, ENETC_PSIMMHFR1(si_idx), 0); + } +} + +static void enetc_set_mac_ht_flt(struct enetc_si *si, int si_idx, int type, + u32 *hash) +{ + bool err = si->errata & ENETC_ERR_UCMCSWP; + + if (type == UC) { + enetc_port_wr(&si->hw, ENETC_PSIUMHFR0(si_idx, err), *hash); + enetc_port_wr(&si->hw, ENETC_PSIUMHFR1(si_idx), *(hash + 1)); + } else { /* MC */ + enetc_port_wr(&si->hw, ENETC_PSIMMHFR0(si_idx, err), *hash); + enetc_port_wr(&si->hw, ENETC_PSIMMHFR1(si_idx), *(hash + 1)); + } +} + +static void enetc_sync_mac_filters(struct enetc_pf *pf) +{ + struct enetc_mac_filter *f = pf->mac_filter; + struct enetc_si *si = pf->si; + int i, pos; + + pos = EMETC_MAC_ADDR_FILT_RES; + + for (i = 0; i < MADDR_TYPE; i++, f++) { + bool em = (f->mac_addr_cnt == 1) && (i == UC); + bool clear = !f->mac_addr_cnt; + + if (clear) { + if (i == UC) + enetc_clear_mac_flt_entry(si, pos); + + enetc_clear_mac_ht_flt(si, 0, i); + continue; + } + + /* exact match filter */ + if (em) { + int err; + + enetc_clear_mac_ht_flt(si, 0, UC); + + err = enetc_set_mac_flt_entry(si, pos, f->mac_addr, + BIT(0)); + if (!err) + continue; + + /* fallback to HT filtering */ + dev_warn(&si->pdev->dev, "fallback to HT filt (%d)\n", + err); + } + + /* hash table filter, clear EM filter for UC entries */ + if (i == UC) + enetc_clear_mac_flt_entry(si, pos); + + enetc_set_mac_ht_flt(si, 0, i, (u32 *)f->mac_hash_table); + } +} + +static void enetc_pf_set_rx_mode(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_pf *pf = enetc_si_priv(priv->si); + struct enetc_hw *hw = &priv->si->hw; + bool uprom = false, mprom = false; + struct enetc_mac_filter *filter; + struct netdev_hw_addr *ha; + u32 psipmr = 0; + bool em; + + if (ndev->flags & IFF_PROMISC) { + /* enable promisc mode for SI0 (PF) */ + psipmr = ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0); + uprom = true; + mprom = true; + /* enable VLAN promisc mode for SI0 */ + if (!enetc_si_vlan_promisc_is_on(pf, 0)) + enetc_enable_si_vlan_promisc(pf, 0); + + } else if (ndev->flags & IFF_ALLMULTI) { + /* enable multi cast promisc mode for SI0 (PF) */ + psipmr = ENETC_PSIPMR_SET_MP(0); + mprom = true; + } + + /* first 2 filter entries belong to PF */ + if (!uprom) { + /* Update unicast filters */ + filter = &pf->mac_filter[UC]; + enetc_reset_mac_addr_filter(filter); + + em = (netdev_uc_count(ndev) == 1); + netdev_for_each_uc_addr(ha, ndev) { + if (em) { + enetc_add_mac_addr_em_filter(filter, ha->addr); + break; + } + + enetc_add_mac_addr_ht_filter(filter, ha->addr); + } + } + + if (!mprom) { + /* Update multicast filters */ + filter = &pf->mac_filter[MC]; + enetc_reset_mac_addr_filter(filter); + + netdev_for_each_mc_addr(ha, ndev) { + if (!is_multicast_ether_addr(ha->addr)) + continue; + + enetc_add_mac_addr_ht_filter(filter, ha->addr); + } + } + + if (!uprom || !mprom) + /* update PF entries */ + enetc_sync_mac_filters(pf); + + psipmr |= enetc_port_rd(hw, ENETC_PSIPMR) & + ~(ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0)); + enetc_port_wr(hw, ENETC_PSIPMR, psipmr); +} + +static void enetc_set_vlan_ht_filter(struct enetc_hw *hw, int si_idx, + u32 *hash) +{ + enetc_port_wr(hw, ENETC_PSIVHFR0(si_idx), *hash); + enetc_port_wr(hw, ENETC_PSIVHFR1(si_idx), *(hash + 1)); +} + +static int enetc_vid_hash_idx(unsigned int vid) +{ + int res = 0; + int i; + + for (i = 0; i < 6; i++) + res |= (hweight8(vid & (BIT(i) | BIT(i + 6))) & 0x1) << i; + + return res; +} + +static void enetc_sync_vlan_ht_filter(struct enetc_pf *pf, bool rehash) +{ + int i; + + if (rehash) { + bitmap_zero(pf->vlan_ht_filter, ENETC_VLAN_HT_SIZE); + + for_each_set_bit(i, pf->active_vlans, VLAN_N_VID) { + int hidx = enetc_vid_hash_idx(i); + + __set_bit(hidx, pf->vlan_ht_filter); + } + } + + enetc_set_vlan_ht_filter(&pf->si->hw, 0, (u32 *)pf->vlan_ht_filter); +} + +static int enetc_vlan_rx_add_vid(struct net_device *ndev, __be16 prot, u16 vid) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_pf *pf = enetc_si_priv(priv->si); + int idx; + + if (enetc_si_vlan_promisc_is_on(pf, 0)) + enetc_disable_si_vlan_promisc(pf, 0); + + __set_bit(vid, pf->active_vlans); + + idx = enetc_vid_hash_idx(vid); + if (!__test_and_set_bit(idx, pf->vlan_ht_filter)) + enetc_sync_vlan_ht_filter(pf, false); + + return 0; +} + +static int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_pf *pf = enetc_si_priv(priv->si); + + __clear_bit(vid, pf->active_vlans); + enetc_sync_vlan_ht_filter(pf, true); + + if (!enetc_vlan_filter_is_on(pf)) + enetc_enable_si_vlan_promisc(pf, 0); + + return 0; +} + +static void enetc_set_loopback(struct net_device *ndev, bool en) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_hw *hw = &priv->si->hw; + u32 reg; + + reg = enetc_port_rd(hw, ENETC_PM0_IF_MODE); + if (reg & ENETC_PMO_IFM_RG) { + /* RGMII mode */ + reg = (reg & ~ENETC_PM0_IFM_RLP) | + (en ? ENETC_PM0_IFM_RLP : 0); + enetc_port_wr(hw, ENETC_PM0_IF_MODE, reg); + } else { + /* assume SGMII mode */ + reg = enetc_port_rd(hw, ENETC_PM0_CMD_CFG); + reg = (reg & ~ENETC_PM0_CMD_XGLP) | + (en ? ENETC_PM0_CMD_XGLP : 0); + reg = (reg & ~ENETC_PM0_CMD_PHY_TX_EN) | + (en ? ENETC_PM0_CMD_PHY_TX_EN : 0); + enetc_port_wr(hw, ENETC_PM0_CMD_CFG, reg); + enetc_port_wr(hw, ENETC_PM1_CMD_CFG, reg); + } +} + +static int enetc_pf_set_vf_mac(struct net_device *ndev, int vf, u8 *mac) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_pf *pf = enetc_si_priv(priv->si); + struct enetc_vf_state *vf_state; + + if (vf >= pf->total_vfs) + return -EINVAL; + + if (!is_valid_ether_addr(mac)) + return -EADDRNOTAVAIL; + + vf_state = &pf->vf_state[vf]; + vf_state->flags |= ENETC_VF_FLAG_PF_SET_MAC; + enetc_pf_set_primary_mac_addr(&priv->si->hw, vf + 1, mac); + return 0; +} + +static int enetc_pf_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, + u8 qos, __be16 proto) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_pf *pf = enetc_si_priv(priv->si); + + if (priv->si->errata & ENETC_ERR_VLAN_ISOL) + return -EOPNOTSUPP; + + if (vf >= pf->total_vfs) + return -EINVAL; + + if (proto != htons(ETH_P_8021Q)) + /* only C-tags supported for now */ + return -EPROTONOSUPPORT; + + enetc_set_isol_vlan(&priv->si->hw, vf + 1, vlan, qos); + return 0; +} + +static int enetc_pf_set_vf_spoofchk(struct net_device *ndev, int vf, bool en) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_pf *pf = enetc_si_priv(priv->si); + u32 cfgr; + + if (vf >= pf->total_vfs) + return -EINVAL; + + cfgr = enetc_port_rd(&priv->si->hw, ENETC_PSICFGR0(vf + 1)); + cfgr = (cfgr & ~ENETC_PSICFGR0_ASE) | (en ? ENETC_PSICFGR0_ASE : 0); + enetc_port_wr(&priv->si->hw, ENETC_PSICFGR0(vf + 1), cfgr); + + return 0; +} + +static void enetc_port_setup_primary_mac_address(struct enetc_si *si) +{ + unsigned char mac_addr[MAX_ADDR_LEN]; + struct enetc_pf *pf = enetc_si_priv(si); + struct enetc_hw *hw = &si->hw; + int i; + + /* check MAC addresses for PF and all VFs, if any is 0 set it ro rand */ + for (i = 0; i < pf->total_vfs + 1; i++) { + enetc_pf_get_primary_mac_addr(hw, i, mac_addr); + if (!is_zero_ether_addr(mac_addr)) + continue; + eth_random_addr(mac_addr); + dev_info(&si->pdev->dev, "no MAC address specified for SI%d, using %pM\n", + i, mac_addr); + enetc_pf_set_primary_mac_addr(hw, i, mac_addr); + } +} + +static void enetc_port_assign_rfs_entries(struct enetc_si *si) +{ + struct enetc_pf *pf = enetc_si_priv(si); + struct enetc_hw *hw = &si->hw; + int num_entries, vf_entries, i; + u32 val; + + /* split RFS entries between functions */ + val = enetc_port_rd(hw, ENETC_PRFSCAPR); + num_entries = ENETC_PRFSCAPR_GET_NUM_RFS(val); + vf_entries = num_entries / (pf->total_vfs + 1); + + for (i = 0; i < pf->total_vfs; i++) + enetc_port_wr(hw, ENETC_PSIRFSCFGR(i + 1), vf_entries); + enetc_port_wr(hw, ENETC_PSIRFSCFGR(0), + num_entries - vf_entries * pf->total_vfs); + + /* enable RFS on port */ + enetc_port_wr(hw, ENETC_PRFSMR, ENETC_PRFSMR_RFSE); +} + +static void enetc_port_si_configure(struct enetc_si *si) +{ + struct enetc_pf *pf = enetc_si_priv(si); + struct enetc_hw *hw = &si->hw; + int num_rings, i; + u32 val; + + val = enetc_port_rd(hw, ENETC_PCAPR0); + num_rings = min(ENETC_PCAPR0_RXBDR(val), ENETC_PCAPR0_TXBDR(val)); + + val = ENETC_PSICFGR0_SET_TXBDR(ENETC_PF_NUM_RINGS); + val |= ENETC_PSICFGR0_SET_RXBDR(ENETC_PF_NUM_RINGS); + + if (unlikely(num_rings < ENETC_PF_NUM_RINGS)) { + val = ENETC_PSICFGR0_SET_TXBDR(num_rings); + val |= ENETC_PSICFGR0_SET_RXBDR(num_rings); + + dev_warn(&si->pdev->dev, "Found %d rings, expected %d!\n", + num_rings, ENETC_PF_NUM_RINGS); + + num_rings = 0; + } + + /* Add default one-time settings for SI0 (PF) */ + val |= ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S); + + enetc_port_wr(hw, ENETC_PSICFGR0(0), val); + + if (num_rings) + num_rings -= ENETC_PF_NUM_RINGS; + + /* Configure the SIs for each available VF */ + val = ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S); + val |= ENETC_PSICFGR0_VTE | ENETC_PSICFGR0_SIVIE; + + if (num_rings) { + num_rings /= pf->total_vfs; + val |= ENETC_PSICFGR0_SET_TXBDR(num_rings); + val |= ENETC_PSICFGR0_SET_RXBDR(num_rings); + } + + for (i = 0; i < pf->total_vfs; i++) + enetc_port_wr(hw, ENETC_PSICFGR0(i + 1), val); + + /* Port level VLAN settings */ + val = ENETC_PVCLCTR_OVTPIDL(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S); + enetc_port_wr(hw, ENETC_PVCLCTR, val); + /* use outer tag for VLAN filtering */ + enetc_port_wr(hw, ENETC_PSIVLANFMR, ENETC_PSIVLANFMR_VS); +} + +static void enetc_configure_port_mac(struct enetc_hw *hw) +{ + enetc_port_wr(hw, ENETC_PM0_MAXFRM, + ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE)); + + enetc_port_wr(hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE); + enetc_port_wr(hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE); + + enetc_port_wr(hw, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN | + ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC | + ENETC_PM0_TX_EN | ENETC_PM0_RX_EN); + + enetc_port_wr(hw, ENETC_PM1_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN | + ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC | + ENETC_PM0_TX_EN | ENETC_PM0_RX_EN); + /* set auto-speed for RGMII */ + if (enetc_port_rd(hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG) + enetc_port_wr(hw, ENETC_PM0_IF_MODE, ENETC_PM0_IFM_RGAUTO); + if (enetc_global_rd(hw, ENETC_G_EPFBLPR(1)) == ENETC_G_EPFBLPR1_XGMII) + enetc_port_wr(hw, ENETC_PM0_IF_MODE, ENETC_PM0_IFM_XGMII); +} + +static void enetc_configure_port_pmac(struct enetc_hw *hw) +{ + u32 temp; + + /* Set pMAC step lock */ + temp = enetc_port_rd(hw, ENETC_PFPMR); + enetc_port_wr(hw, ENETC_PFPMR, + temp | ENETC_PFPMR_PMACE | ENETC_PFPMR_MWLM); + + temp = enetc_port_rd(hw, ENETC_MMCSR); + enetc_port_wr(hw, ENETC_MMCSR, temp | ENETC_MMCSR_ME); +} + +static void enetc_configure_port(struct enetc_pf *pf) +{ + u8 hash_key[ENETC_RSSHASH_KEY_SIZE]; + struct enetc_hw *hw = &pf->si->hw; + + enetc_configure_port_pmac(hw); + + enetc_configure_port_mac(hw); + + enetc_port_si_configure(pf->si); + + /* set up hash key */ + get_random_bytes(hash_key, ENETC_RSSHASH_KEY_SIZE); + enetc_set_rss_key(hw, hash_key); + + /* split up RFS entries */ + enetc_port_assign_rfs_entries(pf->si); + + /* fix-up primary MAC addresses, if not set already */ + enetc_port_setup_primary_mac_address(pf->si); + + /* enforce VLAN promisc mode for all SIs */ + pf->vlan_promisc_simap = ENETC_VLAN_PROMISC_MAP_ALL; + enetc_set_vlan_promisc(hw, pf->vlan_promisc_simap); + + enetc_port_wr(hw, ENETC_PSIPMR, 0); + + /* enable port */ + enetc_port_wr(hw, ENETC_PMR, ENETC_PMR_EN); +} + +/* Messaging */ +static u16 enetc_msg_pf_set_vf_primary_mac_addr(struct enetc_pf *pf, + int vf_id) +{ + struct enetc_vf_state *vf_state = &pf->vf_state[vf_id]; + struct enetc_msg_swbd *msg = &pf->rxmsg[vf_id]; + struct enetc_msg_cmd_set_primary_mac *cmd; + struct device *dev = &pf->si->pdev->dev; + u16 cmd_id; + char *addr; + + cmd = (struct enetc_msg_cmd_set_primary_mac *)msg->vaddr; + cmd_id = cmd->header.id; + if (cmd_id != ENETC_MSG_CMD_MNG_ADD) + return ENETC_MSG_CMD_STATUS_FAIL; + + addr = cmd->mac.sa_data; + if (vf_state->flags & ENETC_VF_FLAG_PF_SET_MAC) + dev_warn(dev, "Attempt to override PF set mac addr for VF%d\n", + vf_id); + else + enetc_pf_set_primary_mac_addr(&pf->si->hw, vf_id + 1, addr); + + return ENETC_MSG_CMD_STATUS_OK; +} + +void enetc_msg_handle_rxmsg(struct enetc_pf *pf, int vf_id, u16 *status) +{ + struct enetc_msg_swbd *msg = &pf->rxmsg[vf_id]; + struct device *dev = &pf->si->pdev->dev; + struct enetc_msg_cmd_header *cmd_hdr; + u16 cmd_type; + + *status = ENETC_MSG_CMD_STATUS_OK; + cmd_hdr = (struct enetc_msg_cmd_header *)msg->vaddr; + cmd_type = cmd_hdr->type; + + switch (cmd_type) { + case ENETC_MSG_CMD_MNG_MAC: + *status = enetc_msg_pf_set_vf_primary_mac_addr(pf, vf_id); + break; + default: + dev_err(dev, "command not supported (cmd_type: 0x%x)\n", + cmd_type); + } +} + +#ifdef CONFIG_PCI_IOV +static int enetc_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + struct enetc_si *si = pci_get_drvdata(pdev); + struct enetc_pf *pf = enetc_si_priv(si); + int err; + + if (!num_vfs) { + enetc_msg_psi_free(pf); + kfree(pf->vf_state); + pf->num_vfs = 0; + pci_disable_sriov(pdev); + } else { + pf->num_vfs = num_vfs; + + pf->vf_state = kcalloc(num_vfs, sizeof(struct enetc_vf_state), + GFP_KERNEL); + if (!pf->vf_state) { + pf->num_vfs = 0; + return -ENOMEM; + } + + err = enetc_msg_psi_init(pf); + if (err) { + dev_err(&pdev->dev, "enetc_msg_psi_init (%d)\n", err); + goto err_msg_psi; + } + + err = pci_enable_sriov(pdev, num_vfs); + if (err) { + dev_err(&pdev->dev, "pci_enable_sriov err %d\n", err); + goto err_en_sriov; + } + } + + return num_vfs; + +err_en_sriov: + enetc_msg_psi_free(pf); +err_msg_psi: + kfree(pf->vf_state); + pf->num_vfs = 0; + + return err; +} +#else +#define enetc_sriov_configure(pdev, num_vfs) (void)0 +#endif + +static int enetc_pf_set_features(struct net_device *ndev, + netdev_features_t features) +{ + netdev_features_t changed = ndev->features ^ features; + struct enetc_ndev_priv *priv = netdev_priv(ndev); + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + enetc_enable_rxvlan(&priv->si->hw, 0, + !!(features & NETIF_F_HW_VLAN_CTAG_RX)); + + if (changed & NETIF_F_HW_VLAN_CTAG_TX) + enetc_enable_txvlan(&priv->si->hw, 0, + !!(features & NETIF_F_HW_VLAN_CTAG_TX)); + + if (changed & NETIF_F_LOOPBACK) + enetc_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK)); + + return enetc_set_features(ndev, features); +} + +static const struct net_device_ops enetc_ndev_ops = { + .ndo_open = enetc_open, + .ndo_stop = enetc_close, + .ndo_start_xmit = enetc_xmit, + .ndo_get_stats = enetc_get_stats, + .ndo_set_mac_address = enetc_pf_set_mac_addr, + .ndo_set_rx_mode = enetc_pf_set_rx_mode, + .ndo_vlan_rx_add_vid = enetc_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = enetc_vlan_rx_del_vid, + .ndo_set_vf_mac = enetc_pf_set_vf_mac, + .ndo_set_vf_vlan = enetc_pf_set_vf_vlan, + .ndo_set_vf_spoofchk = enetc_pf_set_vf_spoofchk, + .ndo_set_features = enetc_pf_set_features, +}; + +static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev, + const struct net_device_ops *ndev_ops) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + + SET_NETDEV_DEV(ndev, &si->pdev->dev); + priv->ndev = ndev; + priv->si = si; + priv->dev = &si->pdev->dev; + si->ndev = ndev; + + priv->msg_enable = (NETIF_MSG_WOL << 1) - 1; + ndev->netdev_ops = ndev_ops; + enetc_set_ethtool_ops(ndev); + ndev->watchdog_timeo = 5 * HZ; + ndev->max_mtu = ENETC_MAX_MTU; + + ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_LOOPBACK; + ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | + NETIF_F_RXCSUM | NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER; + + if (si->num_rss) + ndev->hw_features |= NETIF_F_RXHASH; + + if (si->errata & ENETC_ERR_TXCSUM) { + ndev->hw_features &= ~NETIF_F_HW_CSUM; + ndev->features &= ~NETIF_F_HW_CSUM; + } + + ndev->priv_flags |= IFF_UNICAST_FLT; + + /* pick up primary MAC address from SI */ + enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr); +} + +static int enetc_of_get_phy(struct enetc_ndev_priv *priv) +{ + struct enetc_pf *pf = enetc_si_priv(priv->si); + struct device_node *np = priv->dev->of_node; + int err; + + if (!np) { + dev_err(priv->dev, "missing ENETC port node\n"); + return -ENODEV; + } + + priv->phy_node = of_parse_phandle(np, "phy-handle", 0); + if (!priv->phy_node) { + if (!of_phy_is_fixed_link(np)) { + dev_err(priv->dev, "PHY not specified\n"); + return -ENODEV; + } + + err = of_phy_register_fixed_link(np); + if (err < 0) { + dev_err(priv->dev, "fixed link registration failed\n"); + return err; + } + + priv->phy_node = of_node_get(np); + } + + if (!of_phy_is_fixed_link(np)) { + err = enetc_mdio_probe(pf); + if (err) { + of_node_put(priv->phy_node); + return err; + } + } + + priv->if_mode = of_get_phy_mode(np); + if (priv->if_mode < 0) { + dev_err(priv->dev, "missing phy type\n"); + of_node_put(priv->phy_node); + if (of_phy_is_fixed_link(np)) + of_phy_deregister_fixed_link(np); + else + enetc_mdio_remove(pf); + + return -EINVAL; + } + + return 0; +} + +static void enetc_of_put_phy(struct enetc_ndev_priv *priv) +{ + struct device_node *np = priv->dev->of_node; + + if (np && of_phy_is_fixed_link(np)) + of_phy_deregister_fixed_link(np); + if (priv->phy_node) + of_node_put(priv->phy_node); +} + +static int enetc_pf_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct enetc_ndev_priv *priv; + struct net_device *ndev; + struct enetc_si *si; + struct enetc_pf *pf; + int err; + + if (pdev->dev.of_node && !of_device_is_available(pdev->dev.of_node)) { + dev_info(&pdev->dev, "device is disabled, skipping\n"); + return -ENODEV; + } + + err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(*pf)); + if (err) { + dev_err(&pdev->dev, "PCI probing failed\n"); + return err; + } + + si = pci_get_drvdata(pdev); + if (!si->hw.port || !si->hw.global) { + err = -ENODEV; + dev_err(&pdev->dev, "could not map PF space, probing a VF?\n"); + goto err_map_pf_space; + } + + pf = enetc_si_priv(si); + pf->si = si; + pf->total_vfs = pci_sriov_get_totalvfs(pdev); + + enetc_configure_port(pf); + + enetc_get_si_caps(si); + + ndev = alloc_etherdev_mq(sizeof(*priv), ENETC_MAX_NUM_TXQS); + if (!ndev) { + err = -ENOMEM; + dev_err(&pdev->dev, "netdev creation failed\n"); + goto err_alloc_netdev; + } + + enetc_pf_netdev_setup(si, ndev, &enetc_ndev_ops); + + priv = netdev_priv(ndev); + + enetc_init_si_rings_params(priv); + + err = enetc_alloc_si_resources(priv); + if (err) { + dev_err(&pdev->dev, "SI resource alloc failed\n"); + goto err_alloc_si_res; + } + + err = enetc_alloc_msix(priv); + if (err) { + dev_err(&pdev->dev, "MSIX alloc failed\n"); + goto err_alloc_msix; + } + + err = enetc_of_get_phy(priv); + if (err) + dev_warn(&pdev->dev, "Fallback to PHY-less operation\n"); + + err = register_netdev(ndev); + if (err) + goto err_reg_netdev; + + netif_carrier_off(ndev); + + netif_info(priv, probe, ndev, "%s v%s\n", + enetc_drv_name, enetc_drv_ver); + + return 0; + +err_reg_netdev: + enetc_of_put_phy(priv); + enetc_free_msix(priv); +err_alloc_msix: + enetc_free_si_resources(priv); +err_alloc_si_res: + si->ndev = NULL; + free_netdev(ndev); +err_alloc_netdev: +err_map_pf_space: + enetc_pci_remove(pdev); + + return err; +} + +static void enetc_pf_remove(struct pci_dev *pdev) +{ + struct enetc_si *si = pci_get_drvdata(pdev); + struct enetc_pf *pf = enetc_si_priv(si); + struct enetc_ndev_priv *priv; + + if (pf->num_vfs) + enetc_sriov_configure(pdev, 0); + + priv = netdev_priv(si->ndev); + netif_info(priv, drv, si->ndev, "%s v%s remove\n", + enetc_drv_name, enetc_drv_ver); + + unregister_netdev(si->ndev); + + enetc_mdio_remove(pf); + enetc_of_put_phy(priv); + + enetc_free_msix(priv); + + enetc_free_si_resources(priv); + + free_netdev(si->ndev); + + enetc_pci_remove(pdev); +} + +static const struct pci_device_id enetc_pf_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_PF) }, + { 0, } /* End of table. */ +}; +MODULE_DEVICE_TABLE(pci, enetc_pf_id_table); + +static struct pci_driver enetc_pf_driver = { + .name = KBUILD_MODNAME, + .id_table = enetc_pf_id_table, + .probe = enetc_pf_probe, + .remove = enetc_pf_remove, +#ifdef CONFIG_PCI_IOV + .sriov_configure = enetc_sriov_configure, +#endif +}; +module_pci_driver(enetc_pf_driver); + +MODULE_DESCRIPTION(ENETC_DRV_NAME_STR); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(ENETC_DRV_VER_STR); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.h b/drivers/net/ethernet/freescale/enetc/enetc_pf.h new file mode 100644 index 000000000000..10dd1b53bb08 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* Copyright 2017-2019 NXP */ + +#include "enetc.h" + +#define ENETC_PF_NUM_RINGS 8 + +enum enetc_mac_addr_type {UC, MC, MADDR_TYPE}; +#define ENETC_MAX_NUM_MAC_FLT ((ENETC_MAX_NUM_VFS + 1) * MADDR_TYPE) + +#define ENETC_MADDR_HASH_TBL_SZ 64 +struct enetc_mac_filter { + union { + char mac_addr[ETH_ALEN]; + DECLARE_BITMAP(mac_hash_table, ENETC_MADDR_HASH_TBL_SZ); + }; + int mac_addr_cnt; +}; + +#define ENETC_VLAN_HT_SIZE 64 + +enum enetc_vf_flags { + ENETC_VF_FLAG_PF_SET_MAC = BIT(0), +}; + +struct enetc_vf_state { + enum enetc_vf_flags flags; +}; + +struct enetc_pf { + struct enetc_si *si; + int num_vfs; /* number of active VFs, after sriov_init */ + int total_vfs; /* max number of VFs, set for PF at probe */ + struct enetc_vf_state *vf_state; + + struct enetc_mac_filter mac_filter[ENETC_MAX_NUM_MAC_FLT]; + + struct enetc_msg_swbd rxmsg[ENETC_MAX_NUM_VFS]; + struct work_struct msg_task; + char msg_int_name[ENETC_INT_NAME_MAX]; + + char vlan_promisc_simap; /* bitmap of SIs in VLAN promisc mode */ + DECLARE_BITMAP(vlan_ht_filter, ENETC_VLAN_HT_SIZE); + DECLARE_BITMAP(active_vlans, VLAN_N_VID); + + struct mii_bus *mdio; /* saved for cleanup */ +}; + +int enetc_msg_psi_init(struct enetc_pf *pf); +void enetc_msg_psi_free(struct enetc_pf *pf); +void enetc_msg_handle_rxmsg(struct enetc_pf *pf, int mbox_id, u16 *status); + +/* MDIO */ +int enetc_mdio_probe(struct enetc_pf *pf); +void enetc_mdio_remove(struct enetc_pf *pf); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c new file mode 100644 index 000000000000..8c1497e7d9c5 --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c @@ -0,0 +1,144 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2019 NXP */ + +#include +#include +#include + +#include "enetc.h" + +static struct ptp_clock_info enetc_ptp_caps = { + .owner = THIS_MODULE, + .name = "ENETC PTP clock", + .max_adj = 512000, + .n_alarm = 0, + .n_ext_ts = 2, + .n_per_out = 0, + .n_pins = 0, + .pps = 1, + .adjfine = ptp_qoriq_adjfine, + .adjtime = ptp_qoriq_adjtime, + .gettime64 = ptp_qoriq_gettime, + .settime64 = ptp_qoriq_settime, + .enable = ptp_qoriq_enable, +}; + +static int enetc_ptp_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct ptp_qoriq *ptp_qoriq; + void __iomem *base; + int err, len, n; + + if (pdev->dev.of_node && !of_device_is_available(pdev->dev.of_node)) { + dev_info(&pdev->dev, "device is disabled, skipping\n"); + return -ENODEV; + } + + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, "device enable failed\n"); + return err; + } + + /* set up for high or low dma */ + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "DMA configuration failed: 0x%x\n", err); + goto err_dma; + } + } + + err = pci_request_mem_regions(pdev, KBUILD_MODNAME); + if (err) { + dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err); + goto err_pci_mem_reg; + } + + pci_set_master(pdev); + + ptp_qoriq = kzalloc(sizeof(*ptp_qoriq), GFP_KERNEL); + if (!ptp_qoriq) { + err = -ENOMEM; + goto err_alloc_ptp; + } + + len = pci_resource_len(pdev, ENETC_BAR_REGS); + + base = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len); + if (!base) { + err = -ENXIO; + dev_err(&pdev->dev, "ioremap() failed\n"); + goto err_ioremap; + } + + /* Allocate 1 interrupt */ + n = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX); + if (n != 1) { + err = -EPERM; + goto err_irq; + } + + ptp_qoriq->irq = pci_irq_vector(pdev, 0); + + err = request_irq(ptp_qoriq->irq, ptp_qoriq_isr, 0, DRIVER, ptp_qoriq); + if (err) { + dev_err(&pdev->dev, "request_irq() failed!\n"); + goto err_irq; + } + + ptp_qoriq->dev = &pdev->dev; + + err = ptp_qoriq_init(ptp_qoriq, base, &enetc_ptp_caps); + if (err) + goto err_no_clock; + + pci_set_drvdata(pdev, ptp_qoriq); + + return 0; + +err_no_clock: + free_irq(ptp_qoriq->irq, ptp_qoriq); +err_irq: + iounmap(base); +err_ioremap: + kfree(ptp_qoriq); +err_alloc_ptp: + pci_release_mem_regions(pdev); +err_pci_mem_reg: +err_dma: + pci_disable_device(pdev); + + return err; +} + +static void enetc_ptp_remove(struct pci_dev *pdev) +{ + struct ptp_qoriq *ptp_qoriq = pci_get_drvdata(pdev); + + ptp_qoriq_free(ptp_qoriq); + kfree(ptp_qoriq); + + pci_release_mem_regions(pdev); + pci_disable_device(pdev); +} + +static const struct pci_device_id enetc_ptp_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_PTP) }, + { 0, } /* End of table. */ +}; +MODULE_DEVICE_TABLE(pci, enetc_ptp_id_table); + +static struct pci_driver enetc_ptp_driver = { + .name = KBUILD_MODNAME, + .id_table = enetc_ptp_id_table, + .probe = enetc_ptp_probe, + .remove = enetc_ptp_remove, +}; +module_pci_driver(enetc_ptp_driver); + +MODULE_DESCRIPTION("ENETC PTP clock driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c new file mode 100644 index 000000000000..64bebee9f52a --- /dev/null +++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c @@ -0,0 +1,255 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2017-2019 NXP */ + +#include +#include "enetc.h" + +#define ENETC_DRV_VER_MAJ 1 +#define ENETC_DRV_VER_MIN 0 + +#define ENETC_DRV_VER_STR __stringify(ENETC_DRV_VER_MAJ) "." \ + __stringify(ENETC_DRV_VER_MIN) +static const char enetc_drv_ver[] = ENETC_DRV_VER_STR; +#define ENETC_DRV_NAME_STR "ENETC VF driver" +static const char enetc_drv_name[] = ENETC_DRV_NAME_STR; + +/* Messaging */ +static void enetc_msg_vsi_write_msg(struct enetc_hw *hw, + struct enetc_msg_swbd *msg) +{ + u32 val; + + val = enetc_vsi_set_msize(msg->size) | lower_32_bits(msg->dma); + enetc_wr(hw, ENETC_VSIMSGSNDAR1, upper_32_bits(msg->dma)); + enetc_wr(hw, ENETC_VSIMSGSNDAR0, val); +} + +static int enetc_msg_vsi_send(struct enetc_si *si, struct enetc_msg_swbd *msg) +{ + int timeout = 100; + u32 vsimsgsr; + + enetc_msg_vsi_write_msg(&si->hw, msg); + + do { + vsimsgsr = enetc_rd(&si->hw, ENETC_VSIMSGSR); + if (!(vsimsgsr & ENETC_VSIMSGSR_MB)) + break; + + usleep_range(1000, 2000); + } while (--timeout); + + if (!timeout) + return -ETIMEDOUT; + + /* check for message delivery error */ + if (vsimsgsr & ENETC_VSIMSGSR_MS) { + dev_err(&si->pdev->dev, "VSI command execute error: %d\n", + ENETC_SIMSGSR_GET_MC(vsimsgsr)); + return -EIO; + } + + return 0; +} + +static int enetc_msg_vsi_set_primary_mac_addr(struct enetc_ndev_priv *priv, + struct sockaddr *saddr) +{ + struct enetc_msg_cmd_set_primary_mac *cmd; + struct enetc_msg_swbd msg; + int err; + + msg.size = ALIGN(sizeof(struct enetc_msg_cmd_set_primary_mac), 64); + msg.vaddr = dma_alloc_coherent(priv->dev, msg.size, &msg.dma, + GFP_KERNEL); + if (!msg.vaddr) { + dev_err(priv->dev, "Failed to alloc Tx msg (size: %d)\n", + msg.size); + return -ENOMEM; + } + + cmd = (struct enetc_msg_cmd_set_primary_mac *)msg.vaddr; + cmd->header.type = ENETC_MSG_CMD_MNG_MAC; + cmd->header.id = ENETC_MSG_CMD_MNG_ADD; + memcpy(&cmd->mac, saddr, sizeof(struct sockaddr)); + + /* send the command and wait */ + err = enetc_msg_vsi_send(priv->si, &msg); + + dma_free_coherent(priv->dev, msg.size, msg.vaddr, msg.dma); + + return err; +} + +static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct sockaddr *saddr = addr; + int err; + + if (!is_valid_ether_addr(saddr->sa_data)) + return -EADDRNOTAVAIL; + + err = enetc_msg_vsi_set_primary_mac_addr(priv, saddr); + if (err) + return err; + + return 0; +} + +static int enetc_vf_set_features(struct net_device *ndev, + netdev_features_t features) +{ + return enetc_set_features(ndev, features); +} + +/* Probing/ Init */ +static const struct net_device_ops enetc_ndev_ops = { + .ndo_open = enetc_open, + .ndo_stop = enetc_close, + .ndo_start_xmit = enetc_xmit, + .ndo_get_stats = enetc_get_stats, + .ndo_set_mac_address = enetc_vf_set_mac_addr, + .ndo_set_features = enetc_vf_set_features, +}; + +static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev, + const struct net_device_ops *ndev_ops) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + + SET_NETDEV_DEV(ndev, &si->pdev->dev); + priv->ndev = ndev; + priv->si = si; + priv->dev = &si->pdev->dev; + si->ndev = ndev; + + priv->msg_enable = (NETIF_MSG_IFUP << 1) - 1; + ndev->netdev_ops = ndev_ops; + enetc_set_ethtool_ops(ndev); + ndev->watchdog_timeo = 5 * HZ; + ndev->max_mtu = ENETC_MAX_MTU; + + ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | + NETIF_F_RXCSUM | NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + + if (si->num_rss) + ndev->hw_features |= NETIF_F_RXHASH; + + if (si->errata & ENETC_ERR_TXCSUM) { + ndev->hw_features &= ~NETIF_F_HW_CSUM; + ndev->features &= ~NETIF_F_HW_CSUM; + } + + /* pick up primary MAC address from SI */ + enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr); +} + +static int enetc_vf_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct enetc_ndev_priv *priv; + struct net_device *ndev; + struct enetc_si *si; + int err; + + err = enetc_pci_probe(pdev, KBUILD_MODNAME, 0); + if (err) { + dev_err(&pdev->dev, "PCI probing failed\n"); + return err; + } + + si = pci_get_drvdata(pdev); + + enetc_get_si_caps(si); + + ndev = alloc_etherdev_mq(sizeof(*priv), ENETC_MAX_NUM_TXQS); + if (!ndev) { + err = -ENOMEM; + dev_err(&pdev->dev, "netdev creation failed\n"); + goto err_alloc_netdev; + } + + enetc_vf_netdev_setup(si, ndev, &enetc_ndev_ops); + + priv = netdev_priv(ndev); + + enetc_init_si_rings_params(priv); + + err = enetc_alloc_si_resources(priv); + if (err) { + dev_err(&pdev->dev, "SI resource alloc failed\n"); + goto err_alloc_si_res; + } + + err = enetc_alloc_msix(priv); + if (err) { + dev_err(&pdev->dev, "MSIX alloc failed\n"); + goto err_alloc_msix; + } + + err = register_netdev(ndev); + if (err) + goto err_reg_netdev; + + netif_carrier_off(ndev); + + netif_info(priv, probe, ndev, "%s v%s\n", + enetc_drv_name, enetc_drv_ver); + + return 0; + +err_reg_netdev: + enetc_free_msix(priv); +err_alloc_msix: + enetc_free_si_resources(priv); +err_alloc_si_res: + si->ndev = NULL; + free_netdev(ndev); +err_alloc_netdev: + enetc_pci_remove(pdev); + + return err; +} + +static void enetc_vf_remove(struct pci_dev *pdev) +{ + struct enetc_si *si = pci_get_drvdata(pdev); + struct enetc_ndev_priv *priv; + + priv = netdev_priv(si->ndev); + netif_info(priv, drv, si->ndev, "%s v%s remove\n", + enetc_drv_name, enetc_drv_ver); + unregister_netdev(si->ndev); + + enetc_free_msix(priv); + + enetc_free_si_resources(priv); + + free_netdev(si->ndev); + + enetc_pci_remove(pdev); +} + +static const struct pci_device_id enetc_vf_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_VF) }, + { 0, } /* End of table. */ +}; +MODULE_DEVICE_TABLE(pci, enetc_vf_id_table); + +static struct pci_driver enetc_vf_driver = { + .name = KBUILD_MODNAME, + .id_table = enetc_vf_id_table, + .probe = enetc_vf_probe, + .remove = enetc_vf_remove, +}; +module_pci_driver(enetc_vf_driver); + +MODULE_DESCRIPTION(ENETC_DRV_NAME_STR); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(ENETC_DRV_VER_STR); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 2370dc204202..697c2427f2b7 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -2098,6 +2098,7 @@ static int fec_enet_get_regs_len(struct net_device *ndev) #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) +static __u32 fec_enet_register_version = 2; static u32 fec_enet_register_offset[] = { FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, @@ -2128,6 +2129,7 @@ static u32 fec_enet_register_offset[] = { IEEE_R_FDXFC, IEEE_R_OCTETS_OK }; #else +static __u32 fec_enet_register_version = 1; static u32 fec_enet_register_offset[] = { FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0, @@ -2149,6 +2151,8 @@ static void fec_enet_get_regs(struct net_device *ndev, u32 *buf = (u32 *)regbuf; u32 i, off; + regs->version = fec_enet_register_version; + memset(buf, 0, regs->len); for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) { diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index b90bab72efdb..c1968b3ecec8 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -369,7 +369,7 @@ static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id) dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len, DMA_TO_DEVICE); - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); } spin_unlock(&priv->lock); diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 71f4205f14e7..3c21486c6c84 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -855,9 +855,7 @@ static int mac_probe(struct platform_device *_of_dev) if (err < 0) dev_err(dev, "fman_set_mac_active_pause() = %d\n", err); - dev_info(dev, "FMan MAC address: %02hx:%02hx:%02hx:%02hx:%02hx:%02hx\n", - mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2], - mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]); + dev_info(dev, "FMan MAC address: %pM\n", mac_dev->addr); priv->eth_dev = dpaa_eth_add_device(fman_id, mac_dev); if (IS_ERR(priv->eth_dev)) { diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 241325c35cb4..27ed995f439a 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -1492,7 +1492,7 @@ static int gfar_get_ts_info(struct net_device *dev, struct gfar_private *priv = netdev_priv(dev); struct platform_device *ptp_dev; struct device_node *ptp_node; - struct qoriq_ptp *ptp = NULL; + struct ptp_qoriq *ptp = NULL; info->phc_index = -1; diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index c3d539e209ed..eb3e65e8868f 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -1879,6 +1879,8 @@ static void ucc_geth_free_tx(struct ucc_geth_private *ugeth) u16 i, j; u8 __iomem *bd; + netdev_reset_queue(ugeth->ndev); + ug_info = ugeth->ug_info; uf_info = &ug_info->uf_info; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 3b9e74be5fbd..ac55db065f16 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -3081,6 +3081,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset) dsaf_dev = dev_get_drvdata(&pdev->dev); if (!dsaf_dev) { dev_err(&pdev->dev, "dsaf_dev is NULL\n"); + put_device(&pdev->dev); return -ENODEV; } @@ -3088,6 +3089,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset) if (AE_IS_VER1(dsaf_dev->dsaf_ver)) { dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n", dsaf_dev->ae_dev.name); + put_device(&pdev->dev); return -ENODEV; } @@ -3126,6 +3128,9 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset) dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1); dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit); } + + put_device(&pdev->dev); + return 0; } EXPORT_SYMBOL(hns_dsaf_roce_reset); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c index 0942e4916d9d..3d07c8a7639d 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c @@ -83,8 +83,9 @@ static int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index) else ppe_num = HNS_PPE_DEBUG_NW_ENGINE_NUM; - ppe_common = devm_kzalloc(dsaf_dev->dev, sizeof(*ppe_common) + - ppe_num * sizeof(struct hns_ppe_cb), GFP_KERNEL); + ppe_common = devm_kzalloc(dsaf_dev->dev, + struct_size(ppe_common, ppe_cb, ppe_num), + GFP_KERNEL); if (!ppe_common) return -ENOMEM; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index 5d64519b9b1d..6bf346c11b25 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c @@ -788,8 +788,9 @@ int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev, int ring_num = hns_rcb_get_ring_num(dsaf_dev); rcb_common = - devm_kzalloc(dsaf_dev->dev, sizeof(*rcb_common) + - ring_num * sizeof(struct ring_pair_cb), GFP_KERNEL); + devm_kzalloc(dsaf_dev->dev, + struct_size(rcb_common, ring_pair_cb, ring_num), + GFP_KERNEL); if (!rcb_common) { dev_err(dsaf_dev->dev, "rcb common devm_kzalloc fail!\n"); return -ENOMEM; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 5b33238c6680..60e7d7ae3787 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -2418,6 +2418,8 @@ static int hns_nic_dev_probe(struct platform_device *pdev) out_notify_fail: (void)cancel_work_sync(&priv->service_task); out_read_prop_fail: + /* safe for ACPI FW */ + of_node_put(to_of_node(priv->fwnode)); free_netdev(ndev); return ret; } @@ -2447,6 +2449,9 @@ static int hns_nic_dev_remove(struct platform_device *pdev) set_bit(NIC_STATE_REMOVING, &priv->state); (void)cancel_work_sync(&priv->service_task); + /* safe for ACPI FW */ + of_node_put(to_of_node(priv->fwnode)); + free_netdev(ndev); return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 8e9b95871d30..ce15d2350db9 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -1157,16 +1157,18 @@ static int hns_get_regs_len(struct net_device *net_dev) */ static int hns_nic_nway_reset(struct net_device *netdev) { - int ret = 0; struct phy_device *phy = netdev->phydev; - if (netif_running(netdev)) { - /* if autoneg is disabled, don't restart auto-negotiation */ - if (phy && phy->autoneg == AUTONEG_ENABLE) - ret = genphy_restart_aneg(phy); - } + if (!netif_running(netdev)) + return 0; - return ret; + if (!phy) + return -EOPNOTSUPP; + + if (phy->autoneg != AUTONEG_ENABLE) + return -EINVAL; + + return genphy_restart_aneg(phy); } static u32 diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index 691d12174902..299b277bc7ae 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -21,6 +21,7 @@ enum HCLGE_MBX_OPCODE { HCLGE_MBX_SET_MACVLAN, /* (VF -> PF) set unicast filter */ HCLGE_MBX_API_NEGOTIATE, /* (VF -> PF) negotiate API version */ HCLGE_MBX_GET_QINFO, /* (VF -> PF) get queue config */ + HCLGE_MBX_GET_QDEPTH, /* (VF -> PF) get queue depth */ HCLGE_MBX_GET_TCINFO, /* (VF -> PF) get TC config */ HCLGE_MBX_GET_RETA, /* (VF -> PF) get RETA */ HCLGE_MBX_GET_RSS_KEY, /* (VF -> PF) get RSS key */ @@ -40,6 +41,10 @@ enum HCLGE_MBX_OPCODE { HCLGE_MBX_SET_ALIVE, /* (VF -> PF) set alive state */ HCLGE_MBX_SET_MTU, /* (VF -> PF) set mtu */ HCLGE_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */ + HCLGE_MBX_LINK_STAT_MODE, /* (PF -> VF) link mode has changed */ + HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */ + + HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */ }; /* below are per-VF mac-vlan subcodes */ @@ -60,7 +65,7 @@ enum hclge_mbx_vlan_cfg_subcode { }; #define HCLGE_MBX_MAX_MSG_SIZE 16 -#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8 +#define HCLGE_MBX_MAX_RESP_DATA_SIZE 16 #define HCLGE_MBX_RING_MAP_BASIC_MSG_NUM 3 #define HCLGE_MBX_RING_NODE_VARIABLE_NUM 3 diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index 781e5dee3c70..17ab4f4af6ad 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -32,6 +32,9 @@ static bool hnae3_client_match(enum hnae3_client_type client_type, void hnae3_set_client_init_flag(struct hnae3_client *client, struct hnae3_ae_dev *ae_dev, int inited) { + if (!client || !ae_dev) + return; + switch (client->type) { case HNAE3_CLIENT_KNIC: hnae3_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited); @@ -109,6 +112,9 @@ int hnae3_register_client(struct hnae3_client *client) struct hnae3_ae_dev *ae_dev; int ret = 0; + if (!client) + return -ENODEV; + mutex_lock(&hnae3_common_lock); /* one system should only have one client for every type */ list_for_each_entry(client_tmp, &hnae3_client_list, node) { @@ -141,6 +147,9 @@ void hnae3_unregister_client(struct hnae3_client *client) { struct hnae3_ae_dev *ae_dev; + if (!client) + return; + mutex_lock(&hnae3_common_lock); /* un-initialize the client on every matched port */ list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { @@ -163,6 +172,9 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) struct hnae3_client *client; int ret = 0; + if (!ae_algo) + return; + mutex_lock(&hnae3_common_lock); list_add_tail(&ae_algo->node, &hnae3_ae_algo_list); @@ -173,8 +185,12 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) if (!id) continue; - /* ae_dev init should set flag */ + if (!ae_algo->ops) { + dev_err(&ae_dev->pdev->dev, "ae_algo ops are null\n"); + continue; + } ae_dev->ops = ae_algo->ops; + ret = ae_algo->ops->init_ae_dev(ae_dev); if (ret) { dev_err(&ae_dev->pdev->dev, @@ -182,6 +198,7 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) continue; } + /* ae_dev init should set flag */ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); /* check the client list for the match with this ae_dev type and @@ -209,6 +226,9 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo) struct hnae3_ae_dev *ae_dev; struct hnae3_client *client; + if (!ae_algo) + return; + mutex_lock(&hnae3_common_lock); /* Check if there are matched ae_dev */ list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { @@ -238,13 +258,16 @@ EXPORT_SYMBOL(hnae3_unregister_ae_algo); * @ae_dev: the AE device * NOTE: the duplicated name will not be checked */ -void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) +int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) { const struct pci_device_id *id; struct hnae3_ae_algo *ae_algo; struct hnae3_client *client; int ret = 0; + if (!ae_dev) + return -ENODEV; + mutex_lock(&hnae3_common_lock); list_add_tail(&ae_dev->node, &hnae3_ae_dev_list); @@ -255,14 +278,13 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) if (!id) continue; - ae_dev->ops = ae_algo->ops; - - if (!ae_dev->ops) { - dev_err(&ae_dev->pdev->dev, "ae_dev ops are null\n"); + if (!ae_algo->ops) { + dev_err(&ae_dev->pdev->dev, "ae_algo ops are null\n"); + ret = -EOPNOTSUPP; goto out_err; } + ae_dev->ops = ae_algo->ops; - /* ae_dev init should set flag */ ret = ae_dev->ops->init_ae_dev(ae_dev); if (ret) { dev_err(&ae_dev->pdev->dev, @@ -270,6 +292,7 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) goto out_err; } + /* ae_dev init should set flag */ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); break; } @@ -285,8 +308,15 @@ void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) ret); } + mutex_unlock(&hnae3_common_lock); + + return 0; + out_err: + list_del(&ae_dev->node); mutex_unlock(&hnae3_common_lock); + + return ret; } EXPORT_SYMBOL(hnae3_register_ae_dev); @@ -299,6 +329,9 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev) struct hnae3_ae_algo *ae_algo; struct hnae3_client *client; + if (!ae_dev) + return; + mutex_lock(&hnae3_common_lock); /* Check if there are matched ae_algo */ list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) { diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 36eab37d8a40..38b430f11fc1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -87,7 +87,8 @@ struct hnae3_queue { struct hnae3_handle *handle; int tqp_index; /* index in a handle */ u32 buf_size; /* size for hnae_desc->addr, preset by AE */ - u16 desc_num; /* total number of desc */ + u16 tx_desc_num;/* total number of tx desc */ + u16 rx_desc_num;/* total number of rx desc */ }; /*hnae3 loop mode*/ @@ -124,6 +125,7 @@ enum hnae3_reset_notify_type { HNAE3_DOWN_CLIENT, HNAE3_INIT_CLIENT, HNAE3_UNINIT_CLIENT, + HNAE3_RESTORE_CLIENT, }; enum hnae3_reset_type { @@ -192,6 +194,7 @@ struct hnae3_ae_dev { const struct hnae3_ae_ops *ops; struct list_head node; u32 flag; + u8 override_pci_need_reset; /* fix to stop multiple reset happening */ enum hnae3_dev_type dev_type; enum hnae3_reset_type reset_type; void *priv; @@ -432,7 +435,8 @@ struct hnae3_ae_ops { struct ethtool_channels *ch); void (*get_tqps_and_rss_info)(struct hnae3_handle *h, u16 *alloc_tqps, u16 *max_rss_size); - int (*set_channels)(struct hnae3_handle *handle, u32 new_tqps_num); + int (*set_channels)(struct hnae3_handle *handle, u32 new_tqps_num, + bool rxfh_configured); void (*get_flowctrl_adv)(struct hnae3_handle *handle, u32 *flowctrl_adv); int (*set_led_id)(struct hnae3_handle *handle, @@ -459,9 +463,11 @@ struct hnae3_ae_ops { bool (*get_hw_reset_stat)(struct hnae3_handle *handle); bool (*ae_dev_resetting)(struct hnae3_handle *handle); unsigned long (*ae_dev_reset_cnt)(struct hnae3_handle *handle); - int (*set_gro_en)(struct hnae3_handle *handle, int enable); + int (*set_gro_en)(struct hnae3_handle *handle, bool enable); u16 (*get_global_queue_id)(struct hnae3_handle *handle, u16 queue_id); void (*set_timer_task)(struct hnae3_handle *handle, bool enable); + int (*mac_connect_phy)(struct hnae3_handle *handle); + void (*mac_disconnect_phy)(struct hnae3_handle *handle); }; struct hnae3_dcb_ops { @@ -475,7 +481,6 @@ struct hnae3_dcb_ops { u8 (*getdcbx)(struct hnae3_handle *); u8 (*setdcbx)(struct hnae3_handle *, u8); - int (*map_update)(struct hnae3_handle *); int (*setup_tc)(struct hnae3_handle *, u8, u8 *); }; @@ -500,8 +505,10 @@ struct hnae3_tc_info { struct hnae3_knic_private_info { struct net_device *netdev; /* Set by KNIC client when init instance */ u16 rss_size; /* Allocated RSS queues */ + u16 req_rss_size; u16 rx_buf_len; - u16 num_desc; + u16 num_tx_desc; + u16 num_rx_desc; u8 num_tc; /* Total number of enabled TCs */ u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */ @@ -533,7 +540,9 @@ struct hnae3_roce_private_info { struct hnae3_unic_private_info { struct net_device *netdev; u16 rx_buf_len; - u16 num_desc; + u16 num_tx_desc; + u16 num_rx_desc; + u16 num_tqps; /* total number of tqps in this handle */ struct hnae3_queue **tqp; /* array base of all TQPs of this instance */ }; @@ -585,7 +594,7 @@ struct hnae3_handle { #define hnae3_get_bit(origin, shift) \ hnae3_get_field((origin), (0x1 << (shift)), (shift)) -void hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev); +int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev); void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev); void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 1bf7a5f116a0..1c1f17ec6be2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -21,6 +21,8 @@ #include "hnae3.h" #include "hns3_enet.h" +#define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift))) + static void hns3_clear_all_ring(struct hnae3_handle *h); static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h); static void hns3_remove_hw_addr(struct net_device *netdev); @@ -348,6 +350,8 @@ static int hns3_nic_net_up(struct net_device *netdev) return ret; } + clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); + /* enable the vectors */ for (i = 0; i < priv->vector_num; i++) hns3_vector_enable(&priv->tqp_vector[i]); @@ -361,11 +365,10 @@ static int hns3_nic_net_up(struct net_device *netdev) if (ret) goto out_start_err; - clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); - return 0; out_start_err: + set_bit(HNS3_NIC_STATE_DOWN, &priv->state); while (j--) hns3_tqp_disable(h->kinfo.tqp[j]); @@ -377,6 +380,29 @@ out_start_err: return ret; } +static void hns3_config_xps(struct hns3_nic_priv *priv) +{ + int i; + + for (i = 0; i < priv->vector_num; i++) { + struct hns3_enet_tqp_vector *tqp_vector = &priv->tqp_vector[i]; + struct hns3_enet_ring *ring = tqp_vector->tx_group.ring; + + while (ring) { + int ret; + + ret = netif_set_xps_queue(priv->netdev, + &tqp_vector->affinity_mask, + ring->tqp->tqp_index); + if (ret) + netdev_warn(priv->netdev, + "set xps queue failed: %d", ret); + + ring = ring->next; + } + } +} + static int hns3_nic_net_open(struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); @@ -409,6 +435,7 @@ static int hns3_nic_net_open(struct net_device *netdev) if (h->ae_algo->ops->set_timer_task) h->ae_algo->ops->set_timer_task(priv->ae_handle, true); + hns3_config_xps(priv); return 0; } @@ -506,7 +533,7 @@ static u8 hns3_get_netdev_flags(struct net_device *netdev) u8 flags = 0; if (netdev->flags & IFF_PROMISC) { - flags = HNAE3_USER_UPE | HNAE3_USER_MPE; + flags = HNAE3_USER_UPE | HNAE3_USER_MPE | HNAE3_BPE; } else { flags |= HNAE3_VLAN_FLTR; if (netdev->flags & IFF_ALLMULTI) @@ -541,13 +568,13 @@ static void hns3_nic_set_rx_mode(struct net_device *netdev) } } - hns3_update_promisc_mode(netdev, new_flags); /* User mode Promisc mode enable and vlan filtering is disabled to * let all packets in. MAC-VLAN Table overflow Promisc enabled and * vlan fitering is enabled */ hns3_enable_vlan_filter(netdev, new_flags & HNAE3_VLAN_FLTR); h->netdev_flags = new_flags; + hns3_update_promisc_mode(netdev, new_flags); } int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags) @@ -594,7 +621,7 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, return 0; ret = skb_cow_head(skb, 0); - if (ret) + if (unlikely(ret)) return ret; l3.hdr = skb_network_header(skb); @@ -633,7 +660,7 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, /* normal or tunnel packet*/ l4_offset = l4.hdr - skb->data; - hdr_len = (l4.tcp->doff * 4) + l4_offset; + hdr_len = (l4.tcp->doff << 2) + l4_offset; /* remove payload length from inner pseudo checksum when tso*/ l4_paylen = skb->len - l4_offset; @@ -642,8 +669,7 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, /* find the txbd field values */ *paylen = skb->len - hdr_len; - hnae3_set_bit(*type_cs_vlan_tso, - HNS3_TXD_TSO_B, 1); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1); /* get MSS for TSO */ *mss = skb_shinfo(skb)->gso_size; @@ -654,11 +680,7 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, u8 *il4_proto) { - union { - struct iphdr *v4; - struct ipv6hdr *v6; - unsigned char *hdr; - } l3; + union l3_hdr_info l3; unsigned char *l4_hdr; unsigned char *exthdr; u8 l4_proto_tmp; @@ -711,17 +733,8 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, u8 il4_proto, u32 *type_cs_vlan_tso, u32 *ol_type_vlan_len_msec) { - union { - struct iphdr *v4; - struct ipv6hdr *v6; - unsigned char *hdr; - } l3; - union { - struct tcphdr *tcp; - struct udphdr *udp; - struct gre_base_hdr *gre; - unsigned char *hdr; - } l4; + union l3_hdr_info l3; + union l4_hdr_info l4; unsigned char *l2_hdr; u8 l4_proto = ol4_proto; u32 ol2_len; @@ -735,21 +748,19 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, /* compute L2 header size for normal packet, defined in 2 Bytes */ l2_len = l3.hdr - skb->data; - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, - HNS3_TXD_L2LEN_S, l2_len >> 1); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1); /* tunnel packet*/ if (skb->encapsulation) { /* compute OL2 header size, defined in 2 Bytes */ ol2_len = l2_len; - hnae3_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_L2LEN_M, - HNS3_TXD_L2LEN_S, ol2_len >> 1); + hns3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_L2LEN_S, ol2_len >> 1); /* compute OL3 header size, defined in 4 Bytes */ ol3_len = l4.hdr - l3.hdr; - hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M, - HNS3_TXD_L3LEN_S, ol3_len >> 2); + hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, + ol3_len >> 2); /* MAC in UDP, MAC in GRE (0x6558)*/ if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) { @@ -758,17 +769,16 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, /* compute OL4 header size, defined in 4 Bytes. */ ol4_len = l2_hdr - l4.hdr; - hnae3_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S, - ol4_len >> 2); + hns3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_L4LEN_S, ol4_len >> 2); /* switch IP header ptr from outer to inner header */ l3.hdr = skb_inner_network_header(skb); /* compute inner l2 header size, defined in 2 Bytes. */ l2_len = l3.hdr - l2_hdr; - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, - HNS3_TXD_L2LEN_S, l2_len >> 1); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, + l2_len >> 1); } else { /* skb packet types not supported by hardware, * txbd len fild doesn't be filled. @@ -784,24 +794,21 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, /* compute inner(/normal) L3 header size, defined in 4 Bytes */ l3_len = l4.hdr - l3.hdr; - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M, - HNS3_TXD_L3LEN_S, l3_len >> 2); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2); /* compute inner(/normal) L4 header size, defined in 4 Bytes */ switch (l4_proto) { case IPPROTO_TCP: - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S, l4.tcp->doff); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, + l4.tcp->doff); break; case IPPROTO_SCTP: - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S, - (sizeof(struct sctphdr) >> 2)); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, + (sizeof(struct sctphdr) >> 2)); break; case IPPROTO_UDP: - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S, - (sizeof(struct udphdr) >> 2)); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, + (sizeof(struct udphdr) >> 2)); break; default: /* skb packet types not supported by hardware, @@ -820,12 +827,7 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, static bool hns3_tunnel_csum_bug(struct sk_buff *skb) { #define IANA_VXLAN_PORT 4789 - union { - struct tcphdr *tcp; - struct udphdr *udp; - struct gre_base_hdr *gre; - unsigned char *hdr; - } l4; + union l4_hdr_info l4; l4.hdr = skb_transport_header(skb); @@ -841,11 +843,7 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, u8 il4_proto, u32 *type_cs_vlan_tso, u32 *ol_type_vlan_len_msec) { - union { - struct iphdr *v4; - struct ipv6hdr *v6; - unsigned char *hdr; - } l3; + union l3_hdr_info l3; u32 l4_proto = ol4_proto; l3.hdr = skb_network_header(skb); @@ -855,34 +853,30 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, /* define outer network header type.*/ if (skb->protocol == htons(ETH_P_IP)) { if (skb_is_gso(skb)) - hnae3_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_OL3T_M, - HNS3_TXD_OL3T_S, - HNS3_OL3T_IPV4_CSUM); + hns3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV4_CSUM); else - hnae3_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_OL3T_M, - HNS3_TXD_OL3T_S, - HNS3_OL3T_IPV4_NO_CSUM); + hns3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV4_NO_CSUM); } else if (skb->protocol == htons(ETH_P_IPV6)) { - hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M, - HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6); + hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV6); } /* define tunnel type(OL4).*/ switch (l4_proto) { case IPPROTO_UDP: - hnae3_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_TUNTYPE_M, - HNS3_TXD_TUNTYPE_S, - HNS3_TUN_MAC_IN_UDP); + hns3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_TUNTYPE_S, + HNS3_TUN_MAC_IN_UDP); break; case IPPROTO_GRE: - hnae3_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_TUNTYPE_M, - HNS3_TXD_TUNTYPE_S, - HNS3_TUN_NVGRE); + hns3_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_TUNTYPE_S, + HNS3_TUN_NVGRE); break; default: /* drop the skb tunnel packet if hardware don't support, @@ -903,43 +897,37 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, } if (l3.v4->version == 4) { - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, - HNS3_TXD_L3T_S, HNS3_L3T_IPV4); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S, + HNS3_L3T_IPV4); /* the stack computes the IP header already, the only time we * need the hardware to recompute it is in the case of TSO. */ if (skb_is_gso(skb)) - hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); } else if (l3.v6->version == 6) { - hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, - HNS3_TXD_L3T_S, HNS3_L3T_IPV6); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S, + HNS3_L3T_IPV6); } switch (l4_proto) { case IPPROTO_TCP: - hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); - hnae3_set_field(*type_cs_vlan_tso, - HNS3_TXD_L4T_M, - HNS3_TXD_L4T_S, - HNS3_L4T_TCP); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, + HNS3_L4T_TCP); break; case IPPROTO_UDP: if (hns3_tunnel_csum_bug(skb)) break; - hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); - hnae3_set_field(*type_cs_vlan_tso, - HNS3_TXD_L4T_M, - HNS3_TXD_L4T_S, - HNS3_L4T_UDP); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, + HNS3_L4T_UDP); break; case IPPROTO_SCTP: - hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); - hnae3_set_field(*type_cs_vlan_tso, - HNS3_TXD_L4T_M, - HNS3_TXD_L4T_S, - HNS3_L4T_SCTP); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, + HNS3_L4T_SCTP); break; default: /* drop the skb tunnel packet if hardware don't support, @@ -961,11 +949,8 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end) { /* Config bd buffer end */ - hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M, - HNS3_TXD_BDTYPE_S, 0); - hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end); - hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1); - hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0); + hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end); + hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1); } static int hns3_fill_desc_vtags(struct sk_buff *skb, @@ -998,10 +983,10 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb, * and use inner_vtag in one tag case. */ if (skb->protocol == htons(ETH_P_8021Q)) { - hnae3_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1); + hns3_set_field(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1); *out_vtag = vlan_tag; } else { - hnae3_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1); + hns3_set_field(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1); *inner_vtag = vlan_tag; } } else if (skb->protocol == htons(ETH_P_8021Q)) { @@ -1009,7 +994,7 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb, int rc; rc = skb_cow_head(skb, 0); - if (rc < 0) + if (unlikely(rc < 0)) return rc; vhdr = (struct vlan_ethhdr *)skb->data; vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7) @@ -1026,26 +1011,21 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; struct hns3_desc *desc = &ring->desc[ring->next_to_use]; struct device *dev = ring_to_dev(ring); - u32 ol_type_vlan_len_msec = 0; u16 bdtp_fe_sc_vld_ra_ri = 0; struct skb_frag_struct *frag; unsigned int frag_buf_num; - u32 type_cs_vlan_tso = 0; - struct sk_buff *skb; - u16 inner_vtag = 0; - u16 out_vtag = 0; - unsigned int k; - int sizeoflast; - u32 paylen = 0; + int k, sizeoflast; dma_addr_t dma; - u16 mss = 0; - u8 ol4_proto; - u8 il4_proto; - int ret; if (type == DESC_TYPE_SKB) { - skb = (struct sk_buff *)priv; - paylen = skb->len; + struct sk_buff *skb = (struct sk_buff *)priv; + u32 ol_type_vlan_len_msec = 0; + u32 type_cs_vlan_tso = 0; + u32 paylen = skb->len; + u16 inner_vtag = 0; + u16 out_vtag = 0; + u16 mss = 0; + int ret; ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso, &ol_type_vlan_len_msec, @@ -1054,10 +1034,12 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, return ret; if (skb->ip_summed == CHECKSUM_PARTIAL) { + u8 ol4_proto, il4_proto; + skb_reset_mac_len(skb); ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); - if (ret) + if (unlikely(ret)) return ret; hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto, &type_cs_vlan_tso, @@ -1065,12 +1047,12 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto, &type_cs_vlan_tso, &ol_type_vlan_len_msec); - if (ret) + if (unlikely(ret)) return ret; ret = hns3_set_tso(skb, &paylen, &mss, &type_cs_vlan_tso); - if (ret) + if (unlikely(ret)) return ret; } @@ -1090,15 +1072,15 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); } - if (dma_mapping_error(ring->dev, dma)) { + if (unlikely(dma_mapping_error(ring->dev, dma))) { ring->stats.sw_err_cnt++; return -ENOMEM; } desc_cb->length = size; - frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; - sizeoflast = size % HNS3_MAX_BD_SIZE; + frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) >> HNS3_MAX_BD_SIZE_OFFSET; + sizeoflast = size & HNS3_TX_LAST_SIZE_M; sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; /* When frag size is bigger than hardware limit, split this frag */ @@ -1133,6 +1115,7 @@ static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum, struct hns3_enet_ring *ring) { struct sk_buff *skb = *out_skb; + struct sk_buff *new_skb = NULL; struct skb_frag_struct *frag; int bdnum_for_frag; int frag_num; @@ -1141,21 +1124,34 @@ static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum, int i; size = skb_headlen(skb); - buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; + buf_num = (size + HNS3_MAX_BD_SIZE - 1) >> HNS3_MAX_BD_SIZE_OFFSET; frag_num = skb_shinfo(skb)->nr_frags; for (i = 0; i < frag_num; i++) { frag = &skb_shinfo(skb)->frags[i]; size = skb_frag_size(frag); - bdnum_for_frag = - (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; - if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG) + bdnum_for_frag = (size + HNS3_MAX_BD_SIZE - 1) >> + HNS3_MAX_BD_SIZE_OFFSET; + if (unlikely(bdnum_for_frag > HNS3_MAX_BD_PER_FRAG)) return -ENOMEM; buf_num += bdnum_for_frag; } - if (buf_num > ring_space(ring)) + if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) { + buf_num = (skb->len + HNS3_MAX_BD_SIZE - 1) >> + HNS3_MAX_BD_SIZE_OFFSET; + if (ring_space(ring) < buf_num) + return -EBUSY; + /* manual split the send packet */ + new_skb = skb_copy(skb, GFP_ATOMIC); + if (!new_skb) + return -ENOMEM; + dev_kfree_skb_any(skb); + *out_skb = new_skb; + } + + if (unlikely(ring_space(ring) < buf_num)) return -EBUSY; *bnum = buf_num; @@ -1166,11 +1162,24 @@ static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum, struct hns3_enet_ring *ring) { struct sk_buff *skb = *out_skb; + struct sk_buff *new_skb = NULL; int buf_num; /* No. of segments (plus a header) */ buf_num = skb_shinfo(skb)->nr_frags + 1; + if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) { + buf_num = (skb->len + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; + if (ring_space(ring) < buf_num) + return -EBUSY; + /* manual split the send packet */ + new_skb = skb_copy(skb, GFP_ATOMIC); + if (!new_skb) + return -ENOMEM; + dev_kfree_skb_any(skb); + *out_skb = new_skb; + } + if (unlikely(ring_space(ring) < buf_num)) return -EBUSY; @@ -1252,9 +1261,9 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) next_to_use_head = ring->next_to_use; - ret = priv->ops.fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0, - DESC_TYPE_SKB); - if (ret) + ret = hns3_fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0, + DESC_TYPE_SKB); + if (unlikely(ret)) goto head_fill_err; next_to_use_frag = ring->next_to_use; @@ -1263,11 +1272,11 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) frag = &skb_shinfo(skb)->frags[i - 1]; size = skb_frag_size(frag); - ret = priv->ops.fill_desc(ring, frag, size, - seg_num - 1 == i ? 1 : 0, - DESC_TYPE_PAGE); + ret = hns3_fill_desc(ring, frag, size, + seg_num - 1 == i ? 1 : 0, + DESC_TYPE_PAGE); - if (ret) + if (unlikely(ret)) goto frag_fill_err; } @@ -1344,6 +1353,7 @@ static int hns3_nic_set_features(struct net_device *netdev, netdev_features_t changed = netdev->features ^ features; struct hns3_nic_priv *priv = netdev_priv(netdev); struct hnae3_handle *h = priv->ae_handle; + bool enable; int ret; if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) { @@ -1354,38 +1364,29 @@ static int hns3_nic_set_features(struct net_device *netdev, } if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) { - if (features & NETIF_F_GRO_HW) - ret = h->ae_algo->ops->set_gro_en(h, true); - else - ret = h->ae_algo->ops->set_gro_en(h, false); + enable = !!(features & NETIF_F_GRO_HW); + ret = h->ae_algo->ops->set_gro_en(h, enable); if (ret) return ret; } if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) && h->ae_algo->ops->enable_vlan_filter) { - if (features & NETIF_F_HW_VLAN_CTAG_FILTER) - h->ae_algo->ops->enable_vlan_filter(h, true); - else - h->ae_algo->ops->enable_vlan_filter(h, false); + enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER); + h->ae_algo->ops->enable_vlan_filter(h, enable); } if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && h->ae_algo->ops->enable_hw_strip_rxvtag) { - if (features & NETIF_F_HW_VLAN_CTAG_RX) - ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true); - else - ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, false); - + enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); + ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, enable); if (ret) return ret; } if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) { - if (features & NETIF_F_NTUPLE) - h->ae_algo->ops->enable_fd(h, true); - else - h->ae_algo->ops->enable_fd(h, false); + enable = !!(features & NETIF_F_NTUPLE); + h->ae_algo->ops->enable_fd(h, enable); } netdev->features = features; @@ -1399,7 +1400,12 @@ static void hns3_nic_get_stats64(struct net_device *netdev, int queue_num = priv->ae_handle->kinfo.num_tqps; struct hnae3_handle *handle = priv->ae_handle; struct hns3_enet_ring *ring; + u64 rx_length_errors = 0; + u64 rx_crc_errors = 0; + u64 rx_multicast = 0; unsigned int start; + u64 tx_errors = 0; + u64 rx_errors = 0; unsigned int idx; u64 tx_bytes = 0; u64 rx_bytes = 0; @@ -1420,8 +1426,8 @@ static void hns3_nic_get_stats64(struct net_device *netdev, start = u64_stats_fetch_begin_irq(&ring->syncp); tx_bytes += ring->stats.tx_bytes; tx_pkts += ring->stats.tx_pkts; - tx_drop += ring->stats.tx_busy; tx_drop += ring->stats.sw_err_cnt; + tx_errors += ring->stats.sw_err_cnt; } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); /* fetch the rx stats */ @@ -1431,8 +1437,13 @@ static void hns3_nic_get_stats64(struct net_device *netdev, rx_bytes += ring->stats.rx_bytes; rx_pkts += ring->stats.rx_pkts; rx_drop += ring->stats.non_vld_descs; - rx_drop += ring->stats.err_pkt_len; rx_drop += ring->stats.l2_err; + rx_errors += ring->stats.non_vld_descs; + rx_errors += ring->stats.l2_err; + rx_crc_errors += ring->stats.l2_err; + rx_crc_errors += ring->stats.l3l4_csum_err; + rx_multicast += ring->stats.rx_multicast; + rx_length_errors += ring->stats.err_pkt_len; } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); } @@ -1441,15 +1452,15 @@ static void hns3_nic_get_stats64(struct net_device *netdev, stats->rx_bytes = rx_bytes; stats->rx_packets = rx_pkts; - stats->rx_errors = netdev->stats.rx_errors; - stats->multicast = netdev->stats.multicast; - stats->rx_length_errors = netdev->stats.rx_length_errors; - stats->rx_crc_errors = netdev->stats.rx_crc_errors; + stats->rx_errors = rx_errors; + stats->multicast = rx_multicast; + stats->rx_length_errors = rx_length_errors; + stats->rx_crc_errors = rx_crc_errors; stats->rx_missed_errors = netdev->stats.rx_missed_errors; - stats->tx_errors = netdev->stats.tx_errors; - stats->rx_dropped = rx_drop + netdev->stats.rx_dropped; - stats->tx_dropped = tx_drop + netdev->stats.tx_dropped; + stats->tx_errors = tx_errors; + stats->rx_dropped = rx_drop; + stats->tx_dropped = tx_drop; stats->collisions = netdev->stats.collisions; stats->rx_over_errors = netdev->stats.rx_over_errors; stats->rx_frame_errors = netdev->stats.rx_frame_errors; @@ -1472,8 +1483,6 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data) u8 tc = mqprio_qopt->qopt.num_tc; u16 mode = mqprio_qopt->mode; u8 hw = mqprio_qopt->qopt.hw; - bool if_running; - int ret; if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS && mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0))) @@ -1485,24 +1494,8 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data) if (!netdev) return -EINVAL; - if_running = netif_running(netdev); - if (if_running) { - hns3_nic_net_stop(netdev); - msleep(100); - } - - ret = (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? + return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP; - if (ret) - goto out; - - ret = hns3_nic_set_real_num_queue(netdev); - -out: - if (if_running) - hns3_nic_net_open(netdev); - - return ret; } static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, @@ -1755,9 +1748,13 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hns3_get_dev_capability(pdev, ae_dev); pci_set_drvdata(pdev, ae_dev); - hnae3_register_ae_dev(ae_dev); + ret = hnae3_register_ae_dev(ae_dev); + if (ret) { + devm_kfree(&pdev->dev, ae_dev); + pci_set_drvdata(pdev, NULL); + } - return 0; + return ret; } /* hns3_remove - Device removal routine @@ -1771,6 +1768,7 @@ static void hns3_remove(struct pci_dev *pdev) hns3_disable_sriov(pdev); hnae3_unregister_ae_dev(ae_dev); + pci_set_drvdata(pdev, NULL); } /** @@ -1852,7 +1850,9 @@ static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev) /* request the reset */ if (ae_dev->ops->reset_event) { - ae_dev->ops->reset_event(pdev, NULL); + if (!ae_dev->override_pci_need_reset) + ae_dev->ops->reset_event(pdev, NULL); + return PCI_ERS_RESULT_RECOVERED; } @@ -1935,8 +1935,7 @@ static void hns3_set_default_feature(struct net_device *netdev) NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC; if (pdev->revision >= 0x21) { - netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER | - NETIF_F_GRO_HW; + netdev->hw_features |= NETIF_F_GRO_HW; netdev->features |= NETIF_F_GRO_HW; if (!(h->flags & HNAE3_SUPPORT_VF)) { @@ -2321,13 +2320,12 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, } /* check if hardware has done checksum */ - if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B)) + if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B))) return; - if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L3E_B) || - hnae3_get_bit(l234info, HNS3_RXD_L4E_B) || - hnae3_get_bit(l234info, HNS3_RXD_OL3E_B) || - hnae3_get_bit(l234info, HNS3_RXD_OL4E_B))) { + if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) | + BIT(HNS3_RXD_OL3E_B) | + BIT(HNS3_RXD_OL4E_B)))) { u64_stats_update_begin(&ring->syncp); ring->stats.l3l4_csum_err++; u64_stats_update_end(&ring->syncp); @@ -2335,11 +2333,6 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, return; } - l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, - HNS3_RXD_L3ID_S); - l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M, - HNS3_RXD_L4ID_S); - ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S); switch (ol4_type) { @@ -2348,6 +2341,11 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, skb->csum_level = 1; /* fall through */ case HNS3_OL4_TYPE_NO_TUN: + l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, + HNS3_RXD_L3ID_S); + l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M, + HNS3_RXD_L4ID_S); + /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */ if ((l3_type == HNS3_L3_TYPE_IPV4 || l3_type == HNS3_L3_TYPE_IPV6) && @@ -2472,11 +2470,13 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc, bd_base_info = le32_to_cpu(desc->rx.bd_base_info); } - while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) { + while (!(bd_base_info & BIT(HNS3_RXD_FE_B))) { desc = &ring->desc[ring->next_to_clean]; desc_cb = &ring->desc_cb[ring->next_to_clean]; bd_base_info = le32_to_cpu(desc->rx.bd_base_info); - if (!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B)) + /* make sure HW write desc complete */ + dma_rmb(); + if (!(bd_base_info & BIT(HNS3_RXD_VLD_B))) return -ENXIO; if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) { @@ -2572,6 +2572,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, struct sk_buff **out_skb) { struct net_device *netdev = ring->tqp->handle->kinfo.netdev; + enum hns3_pkt_l2t_type l2_frame_type; struct sk_buff *skb = ring->skb; struct hns3_desc_cb *desc_cb; struct hns3_desc *desc; @@ -2589,7 +2590,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, bd_base_info = le32_to_cpu(desc->rx.bd_base_info); /* Check valid BD */ - if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) + if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) return -ENXIO; if (!skb) @@ -2652,7 +2653,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, vlan_tag); } - if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) { + if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) { u64_stats_update_begin(&ring->syncp); ring->stats.non_vld_descs++; u64_stats_update_end(&ring->syncp); @@ -2662,25 +2663,26 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, } if (unlikely((!desc->rx.pkt_len) || - hnae3_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) { + (l234info & (BIT(HNS3_RXD_TRUNCAT_B) | + BIT(HNS3_RXD_L2E_B))))) { u64_stats_update_begin(&ring->syncp); - ring->stats.err_pkt_len++; + if (l234info & BIT(HNS3_RXD_L2E_B)) + ring->stats.l2_err++; + else + ring->stats.err_pkt_len++; u64_stats_update_end(&ring->syncp); dev_kfree_skb_any(skb); return -EFAULT; } - if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L2E_B))) { - u64_stats_update_begin(&ring->syncp); - ring->stats.l2_err++; - u64_stats_update_end(&ring->syncp); - - dev_kfree_skb_any(skb); - return -EFAULT; - } + l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M, + HNS3_RXD_DMAC_S); u64_stats_update_begin(&ring->syncp); + if (l2_frame_type == HNS3_L2_TYPE_MULTICAST) + ring->stats.rx_multicast++; + ring->stats.rx_pkts++; ring->stats.rx_bytes += skb->len; u64_stats_update_end(&ring->syncp); @@ -2769,7 +2771,7 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) u32 time_passed_ms; u16 new_int_gl; - if (!ring_group->coal.int_gl || !tqp_vector->last_jiffies) + if (!tqp_vector->last_jiffies) return false; if (ring_group->total_packets == 0) { @@ -2872,7 +2874,7 @@ static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector) } if (tx_group->coal.gl_adapt_enable) { - tx_update = hns3_get_new_int_gl(&tqp_vector->tx_group); + tx_update = hns3_get_new_int_gl(tx_group); if (tx_update) hns3_set_vector_coalesce_tx_gl(tqp_vector, tx_group->coal.int_gl); @@ -3175,25 +3177,23 @@ static void hns3_clear_ring_group(struct hns3_enet_ring_group *group) group->count = 0; } -static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) +static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) { struct hnae3_ring_chain_node vector_ring_chain; struct hnae3_handle *h = priv->ae_handle; struct hns3_enet_tqp_vector *tqp_vector; - int i, ret; + int i; for (i = 0; i < priv->vector_num; i++) { tqp_vector = &priv->tqp_vector[i]; - ret = hns3_get_vector_ring_chain(tqp_vector, - &vector_ring_chain); - if (ret) - return ret; + if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring) + continue; - ret = h->ae_algo->ops->unmap_ring_from_vector(h, + hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain); + + h->ae_algo->ops->unmap_ring_from_vector(h, tqp_vector->vector_irq, &vector_ring_chain); - if (ret) - return ret; hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); @@ -3205,13 +3205,10 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED; } - priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED; hns3_clear_ring_group(&tqp_vector->rx_group); hns3_clear_ring_group(&tqp_vector->tx_group); netif_napi_del(&priv->tqp_vector[i].napi); } - - return 0; } static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv) @@ -3240,16 +3237,19 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, int queue_num = priv->ae_handle->kinfo.num_tqps; struct pci_dev *pdev = priv->ae_handle->pdev; struct hns3_enet_ring *ring; + int desc_num; ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL); if (!ring) return -ENOMEM; if (ring_type == HNAE3_RING_TYPE_TX) { + desc_num = priv->ae_handle->kinfo.num_tx_desc; ring_data[q->tqp_index].ring = ring; ring_data[q->tqp_index].queue_index = q->tqp_index; ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET; } else { + desc_num = priv->ae_handle->kinfo.num_rx_desc; ring_data[q->tqp_index + queue_num].ring = ring; ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index; ring->io_base = q->io_base; @@ -3263,7 +3263,7 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, ring->dev = priv->dev; ring->desc_dma_addr = 0; ring->buf_size = q->buf_size; - ring->desc_num = q->desc_num; + ring->desc_num = desc_num; ring->next_to_use = 0; ring->next_to_clean = 0; @@ -3375,6 +3375,11 @@ static void hns3_fini_ring(struct hns3_enet_ring *ring) ring->desc_cb = NULL; ring->next_to_clean = 0; ring->next_to_use = 0; + ring->pending_buf = 0; + if (ring->skb) { + dev_kfree_skb_any(ring->skb); + ring->skb = NULL; + } } static int hns3_buf_size2type(u32 buf_size) @@ -3515,6 +3520,25 @@ static int hns3_init_mac_addr(struct net_device *netdev, bool init) return ret; } +static int hns3_init_phy(struct net_device *netdev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + int ret = 0; + + if (h->ae_algo->ops->mac_connect_phy) + ret = h->ae_algo->ops->mac_connect_phy(h); + + return ret; +} + +static void hns3_uninit_phy(struct net_device *netdev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (h->ae_algo->ops->mac_disconnect_phy) + h->ae_algo->ops->mac_disconnect_phy(h); +} + static int hns3_restore_fd_rules(struct net_device *netdev) { struct hnae3_handle *h = hns3_get_handle(netdev); @@ -3538,7 +3562,6 @@ static void hns3_nic_set_priv_ops(struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); - priv->ops.fill_desc = hns3_fill_desc; if ((netdev->features & NETIF_F_TSO) || (netdev->features & NETIF_F_TSO6)) priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; @@ -3581,6 +3604,7 @@ static int hns3_client_init(struct hnae3_handle *handle) priv->netdev = netdev; priv->ae_handle = handle; priv->tx_timeout_count = 0; + set_bit(HNS3_NIC_STATE_DOWN, &priv->state); handle->kinfo.netdev = netdev; handle->priv = (void *)priv; @@ -3623,6 +3647,10 @@ static int hns3_client_init(struct hnae3_handle *handle) goto out_init_ring_data; } + ret = hns3_init_phy(netdev); + if (ret) + goto out_init_phy; + ret = register_netdev(netdev); if (ret) { dev_err(priv->dev, "probe register netdev fail!\n"); @@ -3632,7 +3660,7 @@ static int hns3_client_init(struct hnae3_handle *handle) ret = hns3_client_start(handle); if (ret) { dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); - goto out_reg_netdev_fail; + goto out_client_start; } hns3_dcbnl_setup(handle); @@ -3646,9 +3674,14 @@ static int hns3_client_init(struct hnae3_handle *handle) return ret; +out_client_start: + unregister_netdev(netdev); out_reg_netdev_fail: + hns3_uninit_phy(netdev); +out_init_phy: + hns3_uninit_all_ring(priv); out_init_ring_data: - (void)hns3_nic_uninit_vector_data(priv); + hns3_nic_uninit_vector_data(priv); out_init_vector_data: hns3_nic_dealloc_vector_data(priv); out_alloc_vector_data: @@ -3681,9 +3714,9 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) hns3_force_clear_all_rx_ring(handle); - ret = hns3_nic_uninit_vector_data(priv); - if (ret) - netdev_err(netdev, "uninit vector error\n"); + hns3_uninit_phy(netdev); + + hns3_nic_uninit_vector_data(priv); ret = hns3_nic_dealloc_vector_data(priv); if (ret) @@ -3725,8 +3758,6 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) { struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct net_device *ndev = kinfo->netdev; - bool if_running; - int ret; if (tc > HNAE3_MAX_TC) return -EINVAL; @@ -3734,25 +3765,7 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) if (!ndev) return -ENODEV; - if_running = netif_running(ndev); - - if (if_running) { - (void)hns3_nic_net_stop(ndev); - msleep(100); - } - - ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ? - kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP; - if (ret) - goto err_out; - - ret = hns3_nic_set_real_num_queue(ndev); - -err_out: - if (if_running) - (void)hns3_nic_net_open(ndev); - - return ret; + return hns3_nic_set_real_num_queue(ndev); } static int hns3_recover_hw_addr(struct net_device *ndev) @@ -4013,41 +4026,18 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) { struct net_device *netdev = handle->kinfo.netdev; struct hns3_nic_priv *priv = netdev_priv(netdev); - bool vlan_filter_enable; int ret; - ret = hns3_init_mac_addr(netdev, false); - if (ret) - return ret; - - ret = hns3_recover_hw_addr(netdev); - if (ret) - return ret; - - ret = hns3_update_promisc_mode(netdev, handle->netdev_flags); - if (ret) - return ret; - - vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true; - hns3_enable_vlan_filter(netdev, vlan_filter_enable); - - /* Hardware table is only clear when pf resets */ - if (!(handle->flags & HNAE3_SUPPORT_VF)) { - ret = hns3_restore_vlan(netdev); - if (ret) - return ret; - } + /* Carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); - ret = hns3_restore_fd_rules(netdev); + ret = hns3_get_ring_config(priv); if (ret) return ret; - /* Carrier off reporting is important to ethtool even BEFORE open */ - netif_carrier_off(netdev); - ret = hns3_nic_alloc_vector_data(priv); if (ret) - return ret; + goto err_put_ring; hns3_restore_coal(priv); @@ -4068,10 +4058,44 @@ err_uninit_vector: priv->ring_data = NULL; err_dealloc_vector: hns3_nic_dealloc_vector_data(priv); +err_put_ring: + hns3_put_ring_config(priv); + priv->ring_data = NULL; return ret; } +static int hns3_reset_notify_restore_enet(struct hnae3_handle *handle) +{ + struct net_device *netdev = handle->kinfo.netdev; + bool vlan_filter_enable; + int ret; + + ret = hns3_init_mac_addr(netdev, false); + if (ret) + return ret; + + ret = hns3_recover_hw_addr(netdev); + if (ret) + return ret; + + ret = hns3_update_promisc_mode(netdev, handle->netdev_flags); + if (ret) + return ret; + + vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true; + hns3_enable_vlan_filter(netdev, vlan_filter_enable); + + /* Hardware table is only clear when pf resets */ + if (!(handle->flags & HNAE3_SUPPORT_VF)) { + ret = hns3_restore_vlan(netdev); + if (ret) + return ret; + } + + return hns3_restore_fd_rules(netdev); +} + static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) { struct net_device *netdev = handle->kinfo.netdev; @@ -4085,11 +4109,7 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) hns3_force_clear_all_rx_ring(handle); - ret = hns3_nic_uninit_vector_data(priv); - if (ret) { - netdev_err(netdev, "uninit vector error\n"); - return ret; - } + hns3_nic_uninit_vector_data(priv); hns3_store_coal(priv); @@ -4101,6 +4121,9 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) if (ret) netdev_err(netdev, "uninit ring error\n"); + hns3_put_ring_config(priv); + priv->ring_data = NULL; + clear_bit(HNS3_NIC_STATE_INITED, &priv->state); return ret; @@ -4124,6 +4147,9 @@ static int hns3_reset_notify(struct hnae3_handle *handle, case HNAE3_UNINIT_CLIENT: ret = hns3_reset_notify_uninit_enet(handle); break; + case HNAE3_RESTORE_CLIENT: + ret = hns3_reset_notify_restore_enet(handle); + break; default: break; } @@ -4131,57 +4157,12 @@ static int hns3_reset_notify(struct hnae3_handle *handle, return ret; } -static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num) -{ - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = hns3_get_handle(netdev); - int ret; - - ret = h->ae_algo->ops->set_channels(h, new_tqp_num); - if (ret) - return ret; - - ret = hns3_get_ring_config(priv); - if (ret) - return ret; - - ret = hns3_nic_alloc_vector_data(priv); - if (ret) - goto err_alloc_vector; - - hns3_restore_coal(priv); - - ret = hns3_nic_init_vector_data(priv); - if (ret) - goto err_uninit_vector; - - ret = hns3_init_all_ring(priv); - if (ret) - goto err_put_ring; - - return 0; - -err_put_ring: - hns3_put_ring_config(priv); -err_uninit_vector: - hns3_nic_uninit_vector_data(priv); -err_alloc_vector: - hns3_nic_dealloc_vector_data(priv); - return ret; -} - -static int hns3_adjust_tqps_num(u8 num_tc, u32 new_tqp_num) -{ - return (new_tqp_num / num_tc) * num_tc; -} - int hns3_set_channels(struct net_device *netdev, struct ethtool_channels *ch) { - struct hns3_nic_priv *priv = netdev_priv(netdev); struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_knic_private_info *kinfo = &h->kinfo; - bool if_running = netif_running(netdev); + bool rxfh_configured = netif_is_rxfh_configured(netdev); u32 new_tqp_num = ch->combined_count; u16 org_tqp_num; int ret; @@ -4190,39 +4171,29 @@ int hns3_set_channels(struct net_device *netdev, return -EINVAL; if (new_tqp_num > hns3_get_max_available_channels(h) || - new_tqp_num < kinfo->num_tc) { + new_tqp_num < 1) { dev_err(&netdev->dev, - "Change tqps fail, the tqp range is from %d to %d", - kinfo->num_tc, + "Change tqps fail, the tqp range is from 1 to %d", hns3_get_max_available_channels(h)); return -EINVAL; } - new_tqp_num = hns3_adjust_tqps_num(kinfo->num_tc, new_tqp_num); - if (kinfo->num_tqps == new_tqp_num) + if (kinfo->rss_size == new_tqp_num) return 0; - if (if_running) - hns3_nic_net_stop(netdev); - - ret = hns3_nic_uninit_vector_data(priv); - if (ret) { - dev_err(&netdev->dev, - "Unbind vector with tqp fail, nothing is changed"); - goto open_netdev; - } - - hns3_store_coal(priv); - - hns3_nic_dealloc_vector_data(priv); + ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT); + if (ret) + return ret; - hns3_uninit_all_ring(priv); - hns3_put_ring_config(priv); + ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT); + if (ret) + return ret; org_tqp_num = h->kinfo.num_tqps; - ret = hns3_modify_tqp_num(netdev, new_tqp_num); + ret = h->ae_algo->ops->set_channels(h, new_tqp_num, rxfh_configured); if (ret) { - ret = hns3_modify_tqp_num(netdev, org_tqp_num); + ret = h->ae_algo->ops->set_channels(h, org_tqp_num, + rxfh_configured); if (ret) { /* If revert to old tqp failed, fatal error occurred */ dev_err(&netdev->dev, @@ -4232,12 +4203,11 @@ int hns3_set_channels(struct net_device *netdev, dev_info(&netdev->dev, "Change tqp num fail, Revert to old tqp num"); } + ret = hns3_reset_notify(h, HNAE3_INIT_CLIENT); + if (ret) + return ret; -open_netdev: - if (if_running) - hns3_nic_net_open(netdev); - - return ret; + return hns3_reset_notify(h, HNAE3_UP_CLIENT); } static const struct hnae3_client_ops client_ops = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index e55995e93bb0..1db0bd41d209 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -74,7 +74,7 @@ enum hns3_nic_state { #define HNS3_RING_NAME_LEN 16 #define HNS3_BUFFER_SIZE_2048 2048 #define HNS3_RING_MAX_PENDING 32768 -#define HNS3_RING_MIN_PENDING 8 +#define HNS3_RING_MIN_PENDING 24 #define HNS3_RING_BD_MULTIPLE 8 /* max frame size of mac */ #define HNS3_MAC_MAX_FRAME 9728 @@ -184,6 +184,8 @@ enum hns3_nic_state { #define HNS3_TXD_MSS_S 0 #define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S) +#define HNS3_TX_LAST_SIZE_M 0xffff + #define HNS3_VECTOR_TX_IRQ BIT_ULL(0) #define HNS3_VECTOR_RX_IRQ BIT_ULL(1) @@ -191,6 +193,7 @@ enum hns3_nic_state { #define HNS3_VECTOR_INITED 1 #define HNS3_MAX_BD_SIZE 65535 +#define HNS3_MAX_BD_SIZE_OFFSET 16 #define HNS3_MAX_BD_PER_FRAG 8 #define HNS3_MAX_BD_PER_PKT MAX_SKB_FRAGS @@ -202,6 +205,13 @@ enum hns3_nic_state { #define HNS3_RING_EN_B 0 +enum hns3_pkt_l2t_type { + HNS3_L2_TYPE_UNICAST, + HNS3_L2_TYPE_MULTICAST, + HNS3_L2_TYPE_BROADCAST, + HNS3_L2_TYPE_INVALID, +}; + enum hns3_pkt_l3t_type { HNS3_L3T_NONE, HNS3_L3T_IPV6, @@ -376,6 +386,7 @@ struct ring_stats { u64 err_bd_num; u64 l2_err; u64 l3l4_csum_err; + u64 rx_multicast; }; }; }; @@ -412,7 +423,6 @@ struct hns3_enet_ring { unsigned char *va; /* first buffer address for current packet */ u32 flag; /* ring attribute */ - int irq_init_flag; int numa_node; cpumask_t affinity_mask; @@ -434,11 +444,8 @@ struct hns3_nic_ring_data { }; struct hns3_nic_ops { - int (*fill_desc)(struct hns3_enet_ring *ring, void *priv, - int size, int frag_end, enum hns_desc_type type); int (*maybe_stop_tx)(struct sk_buff **out_skb, int *bnum, struct hns3_enet_ring *ring); - void (*get_rxd_bnum)(u32 bnum_flag, int *out_bnum); }; enum hns3_flow_level_range { @@ -567,6 +574,7 @@ union l3_hdr_info { union l4_hdr_info { struct tcphdr *tcp; struct udphdr *udp; + struct gre_base_hdr *gre; unsigned char *hdr; }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index e678b6939da3..359d4731fb2d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -47,6 +47,7 @@ static const struct hns3_stats hns3_rxq_stats[] = { HNS3_TQP_STAT("err_bd_num", err_bd_num), HNS3_TQP_STAT("l2_err", l2_err), HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err), + HNS3_TQP_STAT("multicast", rx_multicast), }; #define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats) @@ -116,7 +117,7 @@ static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode) ret = hns3_lp_setup(ndev, loop_mode, true); usleep_range(10000, 20000); - return 0; + return ret; } static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode) @@ -333,10 +334,10 @@ static void hns3_self_test(struct net_device *ndev, continue; data[test_index] = hns3_lp_up(ndev, loop_type); - if (!data[test_index]) { + if (!data[test_index]) data[test_index] = hns3_lp_run_test(ndev, loop_type); - hns3_lp_down(ndev, loop_type); - } + + hns3_lp_down(ndev, loop_type); if (data[test_index]) eth_test->flags |= ETH_TEST_FL_FAILED; @@ -620,12 +621,11 @@ static int hns3_get_link_ksettings(struct net_device *netdev, hns3_get_ksettings(h, cmd); break; case HNAE3_MEDIA_TYPE_COPPER: - if (!netdev->phydev) - return -EOPNOTSUPP; - cmd->base.port = PORT_TP; - phy_ethtool_ksettings_get(netdev->phydev, cmd); - + if (!netdev->phydev) + hns3_get_ksettings(h, cmd); + else + phy_ethtool_ksettings_get(netdev->phydev, cmd); break; default: @@ -748,15 +748,19 @@ static int hns3_get_rxnfc(struct net_device *netdev, } static int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv, - u32 new_desc_num) + u32 tx_desc_num, u32 rx_desc_num) { struct hnae3_handle *h = priv->ae_handle; int i; - h->kinfo.num_desc = new_desc_num; + h->kinfo.num_tx_desc = tx_desc_num; + h->kinfo.num_rx_desc = rx_desc_num; - for (i = 0; i < h->kinfo.num_tqps * 2; i++) - priv->ring_data[i].ring->desc_num = new_desc_num; + for (i = 0; i < h->kinfo.num_tqps; i++) { + priv->ring_data[i].ring->desc_num = tx_desc_num; + priv->ring_data[i + h->kinfo.num_tqps].ring->desc_num = + rx_desc_num; + } return hns3_init_all_ring(priv); } @@ -767,7 +771,9 @@ static int hns3_set_ringparam(struct net_device *ndev, struct hns3_nic_priv *priv = netdev_priv(ndev); struct hnae3_handle *h = priv->ae_handle; bool if_running = netif_running(ndev); - u32 old_desc_num, new_desc_num; + u32 old_tx_desc_num, new_tx_desc_num; + u32 old_rx_desc_num, new_rx_desc_num; + int queue_num = h->kinfo.num_tqps; int ret; if (hns3_nic_resetting(ndev)) @@ -776,43 +782,41 @@ static int hns3_set_ringparam(struct net_device *ndev, if (param->rx_mini_pending || param->rx_jumbo_pending) return -EINVAL; - if (param->tx_pending != param->rx_pending) { - netdev_err(ndev, - "Descriptors of tx and rx must be equal"); - return -EINVAL; - } - if (param->tx_pending > HNS3_RING_MAX_PENDING || - param->tx_pending < HNS3_RING_MIN_PENDING) { - netdev_err(ndev, - "Descriptors requested (Tx/Rx: %d) out of range [%d-%d]\n", - param->tx_pending, HNS3_RING_MIN_PENDING, - HNS3_RING_MAX_PENDING); + param->tx_pending < HNS3_RING_MIN_PENDING || + param->rx_pending > HNS3_RING_MAX_PENDING || + param->rx_pending < HNS3_RING_MIN_PENDING) { + netdev_err(ndev, "Queue depth out of range [%d-%d]\n", + HNS3_RING_MIN_PENDING, HNS3_RING_MAX_PENDING); return -EINVAL; } - new_desc_num = param->tx_pending; - /* Hardware requires that its descriptors must be multiple of eight */ - new_desc_num = ALIGN(new_desc_num, HNS3_RING_BD_MULTIPLE); - old_desc_num = h->kinfo.num_desc; - if (old_desc_num == new_desc_num) + new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE); + new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE); + old_tx_desc_num = priv->ring_data[0].ring->desc_num; + old_rx_desc_num = priv->ring_data[queue_num].ring->desc_num; + if (old_tx_desc_num == new_tx_desc_num && + old_rx_desc_num == new_rx_desc_num) return 0; netdev_info(ndev, - "Changing descriptor count from %d to %d.\n", - old_desc_num, new_desc_num); + "Changing Tx/Rx ring depth from %d/%d to %d/%d\n", + old_tx_desc_num, old_rx_desc_num, + new_tx_desc_num, new_rx_desc_num); if (if_running) - dev_close(ndev); + ndev->netdev_ops->ndo_stop(ndev); ret = hns3_uninit_all_ring(priv); if (ret) return ret; - ret = hns3_change_all_ring_bd_num(priv, new_desc_num); + ret = hns3_change_all_ring_bd_num(priv, new_tx_desc_num, + new_rx_desc_num); if (ret) { - ret = hns3_change_all_ring_bd_num(priv, old_desc_num); + ret = hns3_change_all_ring_bd_num(priv, old_tx_desc_num, + old_rx_desc_num); if (ret) { netdev_err(ndev, "Revert to old bd num fail, ret=%d.\n", ret); @@ -821,7 +825,7 @@ static int hns3_set_ringparam(struct net_device *ndev, } if (if_running) - ret = dev_open(ndev, NULL); + ret = ndev->netdev_ops->ndo_open(ndev); return ret; } @@ -1114,6 +1118,8 @@ static const struct ethtool_ops hns3vf_ethtool_ops = { .get_channels = hns3_get_channels, .get_coalesce = hns3_get_coalesce, .set_coalesce = hns3_set_coalesce, + .get_regs_len = hns3_get_regs_len, + .get_regs = hns3_get_regs, .get_link = hns3_get_link, }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index e483a6e730e6..3a093a92eac5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -170,8 +170,12 @@ static bool hclge_is_special_opcode(u16 opcode) /* these commands have several descriptors, * and use the first one to save opcode and return value */ - u16 spec_opcode[3] = {HCLGE_OPC_STATS_64_BIT, - HCLGE_OPC_STATS_32_BIT, HCLGE_OPC_STATS_MAC}; + u16 spec_opcode[] = {HCLGE_OPC_STATS_64_BIT, + HCLGE_OPC_STATS_32_BIT, + HCLGE_OPC_STATS_MAC, + HCLGE_OPC_STATS_MAC_ALL, + HCLGE_OPC_QUERY_32_BIT_REG, + HCLGE_OPC_QUERY_64_BIT_REG}; int i; for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) { @@ -182,6 +186,38 @@ static bool hclge_is_special_opcode(u16 opcode) return false; } +static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc, + int num, int ntc) +{ + u16 opcode, desc_ret; + int handle; + int retval; + + opcode = le16_to_cpu(desc[0].opcode); + for (handle = 0; handle < num; handle++) { + desc[handle] = hw->cmq.csq.desc[ntc]; + ntc++; + if (ntc >= hw->cmq.csq.desc_num) + ntc = 0; + } + if (likely(!hclge_is_special_opcode(opcode))) + desc_ret = le16_to_cpu(desc[num - 1].retval); + else + desc_ret = le16_to_cpu(desc[0].retval); + + if (desc_ret == HCLGE_CMD_EXEC_SUCCESS) + retval = 0; + else if (desc_ret == HCLGE_CMD_NO_AUTH) + retval = -EPERM; + else if (desc_ret == HCLGE_CMD_NOT_SUPPORTED) + retval = -EOPNOTSUPP; + else + retval = -EIO; + hw->cmq.last_status = desc_ret; + + return retval; +} + /** * hclge_cmd_send - send command to command queue * @hw: pointer to the hw struct @@ -199,7 +235,6 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) u32 timeout = 0; int handle = 0; int retval = 0; - u16 opcode, desc_ret; int ntc; spin_lock_bh(&hw->cmq.csq.lock); @@ -215,12 +250,11 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) * which will be use for hardware to write back */ ntc = hw->cmq.csq.next_to_use; - opcode = le16_to_cpu(desc[0].opcode); while (handle < num) { desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use]; *desc_to_use = desc[handle]; (hw->cmq.csq.next_to_use)++; - if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num) + if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num) hw->cmq.csq.next_to_use = 0; handle++; } @@ -246,27 +280,7 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) if (!complete) { retval = -EAGAIN; } else { - handle = 0; - while (handle < num) { - /* Get the result of hardware write back */ - desc_to_use = &hw->cmq.csq.desc[ntc]; - desc[handle] = *desc_to_use; - - if (likely(!hclge_is_special_opcode(opcode))) - desc_ret = le16_to_cpu(desc[handle].retval); - else - desc_ret = le16_to_cpu(desc[0].retval); - - if (desc_ret == HCLGE_CMD_EXEC_SUCCESS) - retval = 0; - else - retval = -EIO; - hw->cmq.last_status = desc_ret; - ntc++; - handle++; - if (ntc == hw->cmq.csq.desc_num) - ntc = 0; - } + retval = hclge_cmd_check_retval(hw, desc, num, ntc); } /* Clean the command send queue */ @@ -376,6 +390,20 @@ int hclge_cmd_init(struct hclge_dev *hdev) return 0; } +static void hclge_cmd_uninit_regs(struct hclge_hw *hw) +{ + hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0); +} + static void hclge_destroy_queue(struct hclge_cmq_ring *ring) { spin_lock(&ring->lock); @@ -388,3 +416,15 @@ void hclge_destroy_cmd_queue(struct hclge_hw *hw) hclge_destroy_queue(&hw->cmq.csq); hclge_destroy_queue(&hw->cmq.crq); } + +void hclge_cmd_uninit(struct hclge_dev *hdev) +{ + spin_lock_bh(&hdev->hw.cmq.csq.lock); + spin_lock(&hdev->hw.cmq.crq.lock); + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + hclge_cmd_uninit_regs(&hdev->hw); + spin_unlock(&hdev->hw.cmq.crq.lock); + spin_unlock_bh(&hdev->hw.cmq.csq.lock); + + hclge_destroy_cmd_queue(&hdev->hw); +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index f23042b24c09..3714733c96d9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -39,7 +39,7 @@ struct hclge_cmq_ring { enum hclge_cmd_return_status { HCLGE_CMD_EXEC_SUCCESS = 0, HCLGE_CMD_NO_AUTH = 1, - HCLGE_CMD_NOT_EXEC = 2, + HCLGE_CMD_NOT_SUPPORTED = 2, HCLGE_CMD_QUEUE_FULL = 3, }; @@ -82,6 +82,8 @@ enum hclge_opcode_type { HCLGE_OPC_STATS_64_BIT = 0x0030, HCLGE_OPC_STATS_32_BIT = 0x0031, HCLGE_OPC_STATS_MAC = 0x0032, + HCLGE_OPC_QUERY_MAC_REG_NUM = 0x0033, + HCLGE_OPC_STATS_MAC_ALL = 0x0034, HCLGE_OPC_QUERY_REG_NUM = 0x0040, HCLGE_OPC_QUERY_32_BIT_REG = 0x0041, @@ -310,16 +312,16 @@ struct hclge_ctrl_vector_chain_cmd { u8 rsv; }; -#define HCLGE_TC_NUM 8 +#define HCLGE_MAX_TC_NUM 8 #define HCLGE_TC0_PRI_BUF_EN_B 15 /* Bit 15 indicate enable or not */ #define HCLGE_BUF_UNIT_S 7 /* Buf size is united by 128 bytes */ struct hclge_tx_buff_alloc_cmd { - __le16 tx_pkt_buff[HCLGE_TC_NUM]; + __le16 tx_pkt_buff[HCLGE_MAX_TC_NUM]; u8 tx_buff_rsv[8]; }; struct hclge_rx_priv_buff_cmd { - __le16 buf_num[HCLGE_TC_NUM]; + __le16 buf_num[HCLGE_MAX_TC_NUM]; __le16 shared_buf; u8 rsv[6]; }; @@ -365,7 +367,6 @@ struct hclge_priv_buf { u32 enable; /* Enable TC private buffer or not */ }; -#define HCLGE_MAX_TC_NUM 8 struct hclge_shared_buf { struct hclge_waterline self; struct hclge_tc_thrd tc_thrd[HCLGE_MAX_TC_NUM]; @@ -692,7 +693,9 @@ struct hclge_mac_vlan_remove_cmd { struct hclge_vlan_filter_ctrl_cmd { u8 vlan_type; u8 vlan_fe; - u8 rsv[22]; + u8 rsv1[2]; + u8 vf_id; + u8 rsv2[19]; }; struct hclge_vlan_filter_pf_cfg_cmd { @@ -974,6 +977,6 @@ enum hclge_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw, enum hclge_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw, struct hclge_desc *desc); -void hclge_destroy_cmd_queue(struct hclge_hw *hw); +void hclge_cmd_uninit(struct hclge_dev *hdev); int hclge_cmd_queue_init(struct hclge_dev *hdev); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c index f6323b2501dc..1161361a973b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c @@ -93,13 +93,11 @@ static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc, } } - for (i = 0; i < hdev->num_alloc_vport; i++) { - if (num_tc > hdev->vport[i].alloc_tqps) { - dev_err(&hdev->pdev->dev, - "allocated tqp(%u) checking failed, %u > tqp(%u)\n", - i, num_tc, hdev->vport[i].alloc_tqps); - return -EINVAL; - } + if (num_tc > hdev->vport[0].alloc_tqps) { + dev_err(&hdev->pdev->dev, + "allocated tqp checking failed, %u > tqp(%u)\n", + num_tc, hdev->vport[0].alloc_tqps); + return -EINVAL; } return 0; @@ -156,21 +154,15 @@ static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets, return 0; } -static int hclge_map_update(struct hnae3_handle *h) +static int hclge_map_update(struct hclge_dev *hdev) { - struct hclge_vport *vport = hclge_get_vport(h); - struct hclge_dev *hdev = vport->back; int ret; - ret = hclge_tm_map_cfg(hdev); + ret = hclge_tm_schd_setup_hw(hdev); if (ret) return ret; - ret = hclge_tm_schd_mode_hw(hdev); - if (ret) - return ret; - - ret = hclge_pause_setup_hw(hdev); + ret = hclge_pause_setup_hw(hdev, false); if (ret) return ret; @@ -222,19 +214,51 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets) if (ret) return ret; + if (map_changed) { + ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); + if (ret) + return ret; + + ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); + if (ret) + return ret; + } + hclge_tm_schd_info_update(hdev, num_tc); ret = hclge_ieee_ets_to_tm_info(hdev, ets); if (ret) - return ret; + goto err_out; if (map_changed) { + ret = hclge_map_update(hdev); + if (ret) + goto err_out; + ret = hclge_client_setup_tc(hdev); + if (ret) + goto err_out; + + ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT); + if (ret) + return ret; + + ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT); if (ret) return ret; } return hclge_tm_dwrr_cfg(hdev); + +err_out: + if (!map_changed) + return ret; + + if (hclge_notify_client(hdev, HNAE3_INIT_CLIENT)) + return ret; + + hclge_notify_client(hdev, HNAE3_UP_CLIENT); + return ret; } static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) @@ -283,6 +307,9 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE) return -EINVAL; + if (pfc->pfc_en == hdev->tm_info.pfc_en) + return 0; + prio_tc = hdev->tm_info.prio_tc; pfc_map = 0; @@ -295,12 +322,10 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) } } - if (pfc_map == hdev->tm_info.hw_pfc_map) - return 0; - hdev->tm_info.hw_pfc_map = pfc_map; + hdev->tm_info.pfc_en = pfc->pfc_en; - return hclge_pause_setup_hw(hdev); + return hclge_pause_setup_hw(hdev, false); } /* DCBX configuration */ @@ -345,12 +370,24 @@ static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc) if (ret) return -EINVAL; + ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); + if (ret) + return ret; + + ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); + if (ret) + return ret; + hclge_tm_schd_info_update(hdev, tc); hclge_tm_prio_tc_info_update(hdev, prio_tc); - ret = hclge_tm_init_hw(hdev); + ret = hclge_tm_init_hw(hdev, false); if (ret) - return ret; + goto err_out; + + ret = hclge_client_setup_tc(hdev); + if (ret) + goto err_out; hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE; @@ -359,7 +396,18 @@ static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc) else hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE; - return 0; + ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT); + if (ret) + return ret; + + return hclge_notify_client(hdev, HNAE3_UP_CLIENT); + +err_out: + if (hclge_notify_client(hdev, HNAE3_INIT_CLIENT)) + return ret; + + hclge_notify_client(hdev, HNAE3_UP_CLIENT); + return ret; } static const struct hnae3_dcb_ops hns3_dcb_ops = { @@ -369,7 +417,6 @@ static const struct hnae3_dcb_ops hns3_dcb_ops = { .ieee_setpfc = hclge_ieee_setpfc, .getdcbx = hclge_getdcbx, .setdcbx = hclge_setdcbx, - .map_update = hclge_map_update, .setup_tc = hclge_setup_tc, }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index 26d80504c730..1192cf6f2321 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -691,7 +691,7 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) dev_info(&hdev->pdev->dev, "dump qos buf cfg\n"); tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data; - for (i = 0; i < HCLGE_TC_NUM; i++) + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i, tx_buf_cmd->tx_pkt_buff[i]); @@ -703,7 +703,7 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) dev_info(&hdev->pdev->dev, "\n"); rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data; - for (i = 0; i < HCLGE_TC_NUM; i++) + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i, rx_buf_cmd->buf_num[i]); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c index d0f654123b9b..1f52d11f77b5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -80,7 +80,7 @@ static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = { { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err" }, { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err" }, { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err" }, - { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_erre" }, + { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_err" }, { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err" }, { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err" }, { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err" }, @@ -219,6 +219,12 @@ static const struct hclge_hw_error hclge_mac_afifo_tnl_int[] = { { .int_msk = BIT(5), .msg = "cge_igu_afifo_ecc_mbit_err" }, { .int_msk = BIT(6), .msg = "lge_igu_afifo_ecc_1bit_err" }, { .int_msk = BIT(7), .msg = "lge_igu_afifo_ecc_mbit_err" }, + { .int_msk = BIT(8), .msg = "cge_igu_afifo_overflow_err" }, + { .int_msk = BIT(9), .msg = "lge_igu_afifo_overflow_err" }, + { .int_msk = BIT(10), .msg = "egu_cge_afifo_underrun_err" }, + { .int_msk = BIT(11), .msg = "egu_lge_afifo_underrun_err" }, + { .int_msk = BIT(12), .msg = "egu_ge_afifo_underrun_err" }, + { .int_msk = BIT(13), .msg = "ge_igu_afifo_overflow_err" }, { /* sentinel */ } }; @@ -277,6 +283,45 @@ static const struct hclge_hw_error hclge_ssu_com_err_int[] = { { /* sentinel */ } }; +#define HCLGE_SSU_MEM_ECC_ERR(x) \ + { .int_msk = BIT(x), .msg = "ssu_mem" #x "_ecc_mbit_err" } + +static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = { + HCLGE_SSU_MEM_ECC_ERR(0), + HCLGE_SSU_MEM_ECC_ERR(1), + HCLGE_SSU_MEM_ECC_ERR(2), + HCLGE_SSU_MEM_ECC_ERR(3), + HCLGE_SSU_MEM_ECC_ERR(4), + HCLGE_SSU_MEM_ECC_ERR(5), + HCLGE_SSU_MEM_ECC_ERR(6), + HCLGE_SSU_MEM_ECC_ERR(7), + HCLGE_SSU_MEM_ECC_ERR(8), + HCLGE_SSU_MEM_ECC_ERR(9), + HCLGE_SSU_MEM_ECC_ERR(10), + HCLGE_SSU_MEM_ECC_ERR(11), + HCLGE_SSU_MEM_ECC_ERR(12), + HCLGE_SSU_MEM_ECC_ERR(13), + HCLGE_SSU_MEM_ECC_ERR(14), + HCLGE_SSU_MEM_ECC_ERR(15), + HCLGE_SSU_MEM_ECC_ERR(16), + HCLGE_SSU_MEM_ECC_ERR(17), + HCLGE_SSU_MEM_ECC_ERR(18), + HCLGE_SSU_MEM_ECC_ERR(19), + HCLGE_SSU_MEM_ECC_ERR(20), + HCLGE_SSU_MEM_ECC_ERR(21), + HCLGE_SSU_MEM_ECC_ERR(22), + HCLGE_SSU_MEM_ECC_ERR(23), + HCLGE_SSU_MEM_ECC_ERR(24), + HCLGE_SSU_MEM_ECC_ERR(25), + HCLGE_SSU_MEM_ECC_ERR(26), + HCLGE_SSU_MEM_ECC_ERR(27), + HCLGE_SSU_MEM_ECC_ERR(28), + HCLGE_SSU_MEM_ECC_ERR(29), + HCLGE_SSU_MEM_ECC_ERR(30), + HCLGE_SSU_MEM_ECC_ERR(31), + { /* sentinel */ } +}; + static const struct hclge_hw_error hclge_ssu_port_based_err_int[] = { { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port" }, { .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port" }, @@ -835,13 +880,15 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev, desc_data = (__le32 *)&desc[2]; status = le32_to_cpu(*(desc_data + 2)); if (status) { - dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_0 ssu_ecc_mbit_int[31:0]\n"); + hclge_log_error(dev, "SSU_ECC_MULTI_BIT_INT_0", + &hclge_ssu_mem_ecc_err_int[0], status); HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); } status = le32_to_cpu(*(desc_data + 3)) & BIT(0); if (status) { - dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_ecc_mbit_int[32]\n"); + dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n", + status); HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); } @@ -997,6 +1044,13 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev, hclge_log_error(dev, "IGU_EGU_TNL_INT_STS", &hclge_igu_egu_tnl_int[0], status); + /* log PPU(RCB) errors */ + desc_data = (__le32 *)&desc[3]; + status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_RAS_MASK; + if (status) + hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0", + &hclge_ppu_pf_abnormal_int[0], status); + /* clear all PF RAS errors */ hclge_cmd_reuse_desc(&desc[0], false); desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); @@ -1094,10 +1148,10 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev) return 0; } -static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev) +static enum hnae3_reset_type +hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev) { - enum hnae3_reset_type reset_type = HNAE3_FUNC_RESET; - struct hnae3_ae_dev *ae_dev = hdev->ae_dev; + enum hnae3_reset_type reset_type = HNAE3_NONE_RESET; struct device *dev = &hdev->pdev->dev; struct hclge_desc desc[2]; unsigned int status; @@ -1110,17 +1164,20 @@ static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev) if (ret) { dev_err(dev, "failed(%d) to query ROCEE RAS INT SRC\n", ret); /* reset everything for now */ - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); - return ret; + return HNAE3_GLOBAL_RESET; } status = le32_to_cpu(desc[0].data[0]); - if (status & HCLGE_ROCEE_RERR_INT_MASK) + if (status & HCLGE_ROCEE_RERR_INT_MASK) { dev_warn(dev, "ROCEE RAS AXI rresp error\n"); + reset_type = HNAE3_FUNC_RESET; + } - if (status & HCLGE_ROCEE_BERR_INT_MASK) + if (status & HCLGE_ROCEE_BERR_INT_MASK) { dev_warn(dev, "ROCEE RAS AXI bresp error\n"); + reset_type = HNAE3_FUNC_RESET; + } if (status & HCLGE_ROCEE_ECC_INT_MASK) { dev_warn(dev, "ROCEE RAS 2bit ECC error\n"); @@ -1132,9 +1189,9 @@ static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev) if (ret) { dev_err(dev, "failed(%d) to process ovf error\n", ret); /* reset everything for now */ - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); - return ret; + return HNAE3_GLOBAL_RESET; } + reset_type = HNAE3_FUNC_RESET; } /* clear error status */ @@ -1143,12 +1200,10 @@ static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev) if (ret) { dev_err(dev, "failed(%d) to clear ROCEE RAS error\n", ret); /* reset everything for now */ - reset_type = HNAE3_GLOBAL_RESET; + return HNAE3_GLOBAL_RESET; } - HCLGE_SET_DEFAULT_RESET_REQUEST(reset_type); - - return ret; + return reset_type; } static int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en) @@ -1178,15 +1233,18 @@ static int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en) return ret; } -static int hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev) +static void hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev) { + enum hnae3_reset_type reset_type = HNAE3_NONE_RESET; struct hclge_dev *hdev = ae_dev->priv; if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || hdev->pdev->revision < 0x21) - return HNAE3_NONE_RESET; + return; - return hclge_log_and_clear_rocee_ras_error(hdev); + reset_type = hclge_log_and_clear_rocee_ras_error(hdev); + if (reset_type != HNAE3_NONE_RESET) + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_type); } static const struct hclge_hw_blk hw_blk[] = { @@ -1259,8 +1317,10 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev) hclge_handle_all_ras_errors(hdev); } else { if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || - hdev->pdev->revision < 0x21) + hdev->pdev->revision < 0x21) { + ae_dev->override_pci_need_reset = 1; return PCI_ERS_RESULT_RECOVERED; + } } if (status & HCLGE_RAS_REG_ROCEE_ERR_MASK) { @@ -1269,8 +1329,11 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev) } if (status & HCLGE_RAS_REG_NFE_MASK || - status & HCLGE_RAS_REG_ROCEE_ERR_MASK) + status & HCLGE_RAS_REG_ROCEE_ERR_MASK) { + ae_dev->override_pci_need_reset = 0; return PCI_ERS_RESULT_NEED_RESET; + } + ae_dev->override_pci_need_reset = 1; return PCI_ERS_RESULT_RECOVERED; } @@ -1332,14 +1395,13 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev, set_bit(HNAE3_GLOBAL_RESET, reset_requests); } - /* log PPU(RCB) errors */ + /* log PPU(RCB) MPF errors */ desc_data = (__le32 *)&desc[5]; status = le32_to_cpu(*(desc_data + 2)) & HCLGE_PPU_MPF_INT_ST2_MSIX_MASK; if (status) { - dev_warn(dev, - "PPU_MPF_ABNORMAL_INT_ST2[28:29], err_status(0x%x)\n", - status); + hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2", + &hclge_ppu_mpf_abnormal_int_st2[0], status); set_bit(HNAE3_CORE_RESET, reset_requests); } @@ -1386,7 +1448,7 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev, hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0", &hclge_ppp_pf_abnormal_int[0], status); - /* PPU(RCB) PF errors */ + /* log PPU(RCB) PF errors */ desc_data = (__le32 *)&desc[3]; status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_MSIX_MASK; if (status) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h index 51a7d4eb066a..fc068280d391 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h @@ -45,8 +45,8 @@ #define HCLGE_TM_QCN_MEM_ERR_INT_EN 0xFFFFFF #define HCLGE_NCSI_ERR_INT_EN 0x3 #define HCLGE_NCSI_ERR_INT_TYPE 0x9 -#define HCLGE_MAC_COMMON_ERR_INT_EN GENMASK(7, 0) -#define HCLGE_MAC_COMMON_ERR_INT_EN_MASK GENMASK(7, 0) +#define HCLGE_MAC_COMMON_ERR_INT_EN 0x107FF +#define HCLGE_MAC_COMMON_ERR_INT_EN_MASK 0x107FF #define HCLGE_PPU_MPF_ABNORMAL_INT0_EN GENMASK(31, 0) #define HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK GENMASK(31, 0) #define HCLGE_PPU_MPF_ABNORMAL_INT1_EN GENMASK(31, 0) @@ -79,6 +79,7 @@ #define HCLGE_PPP_MPF_INT_ST3_MASK GENMASK(5, 0) #define HCLGE_PPU_MPF_INT_ST3_MASK GENMASK(7, 0) #define HCLGE_PPU_MPF_INT_ST2_MSIX_MASK GENMASK(29, 28) +#define HCLGE_PPU_PF_INT_RAS_MASK 0x18 #define HCLGE_PPU_PF_INT_MSIX_MASK 0x27 #define HCLGE_QCN_FIFO_INT_MASK GENMASK(17, 0) #define HCLGE_QCN_ECC_INT_MASK GENMASK(21, 0) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index f7637c08bb3a..deda606c51e7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -118,6 +118,12 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = { HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)}, {"mac_rx_mac_pause_num", HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)}, + {"mac_tx_control_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)}, + {"mac_rx_control_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)}, + {"mac_tx_pfc_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)}, {"mac_tx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)}, {"mac_tx_pfc_pri1_pkt_num", @@ -134,6 +140,8 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = { HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)}, {"mac_tx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)}, + {"mac_rx_pfc_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)}, {"mac_rx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)}, {"mac_rx_pfc_pri1_pkt_num", @@ -287,10 +295,17 @@ static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = { }, }; -static int hclge_mac_update_stats(struct hclge_dev *hdev) +static const u8 hclge_hash_key[] = { + 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, + 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, + 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, + 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, + 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA +}; + +static int hclge_mac_update_stats_defective(struct hclge_dev *hdev) { #define HCLGE_MAC_CMD_NUM 21 -#define HCLGE_RTN_DATA_NUM 4 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats); struct hclge_desc desc[HCLGE_MAC_CMD_NUM]; @@ -308,15 +323,18 @@ static int hclge_mac_update_stats(struct hclge_dev *hdev) } for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) { + /* for special opcode 0032, only the first desc has the head */ if (unlikely(i == 0)) { desc_data = (__le64 *)(&desc[i].data[0]); - n = HCLGE_RTN_DATA_NUM - 2; + n = HCLGE_RD_FIRST_STATS_NUM; } else { desc_data = (__le64 *)(&desc[i]); - n = HCLGE_RTN_DATA_NUM; + n = HCLGE_RD_OTHER_STATS_NUM; } + for (k = 0; k < n; k++) { - *data++ += le64_to_cpu(*desc_data); + *data += le64_to_cpu(*desc_data); + data++; desc_data++; } } @@ -324,6 +342,85 @@ static int hclge_mac_update_stats(struct hclge_dev *hdev) return 0; } +static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num) +{ + u64 *data = (u64 *)(&hdev->hw_stats.mac_stats); + struct hclge_desc *desc; + __le64 *desc_data; + u16 i, k, n; + int ret; + + desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_KERNEL); + if (!desc) + return -ENOMEM; + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true); + ret = hclge_cmd_send(&hdev->hw, desc, desc_num); + if (ret) { + kfree(desc); + return ret; + } + + for (i = 0; i < desc_num; i++) { + /* for special opcode 0034, only the first desc has the head */ + if (i == 0) { + desc_data = (__le64 *)(&desc[i].data[0]); + n = HCLGE_RD_FIRST_STATS_NUM; + } else { + desc_data = (__le64 *)(&desc[i]); + n = HCLGE_RD_OTHER_STATS_NUM; + } + + for (k = 0; k < n; k++) { + *data += le64_to_cpu(*desc_data); + data++; + desc_data++; + } + } + + kfree(desc); + + return 0; +} + +static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num) +{ + struct hclge_desc desc; + __le32 *desc_data; + u32 reg_num; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + return ret; + + desc_data = (__le32 *)(&desc.data[0]); + reg_num = le32_to_cpu(*desc_data); + + *desc_num = 1 + ((reg_num - 3) >> 2) + + (u32)(((reg_num - 3) & 0x3) ? 1 : 0); + + return 0; +} + +static int hclge_mac_update_stats(struct hclge_dev *hdev) +{ + u32 desc_num; + int ret; + + ret = hclge_mac_query_reg_num(hdev, &desc_num); + + /* The firmware supports the new statistics acquisition method */ + if (!ret) + ret = hclge_mac_update_stats_complete(hdev, desc_num); + else if (ret == -EOPNOTSUPP) + ret = hclge_mac_update_stats_defective(hdev); + else + dev_err(&hdev->pdev->dev, "query mac reg num fail!\n"); + + return ret; +} + static int hclge_tqps_update_stats(struct hnae3_handle *handle) { struct hnae3_knic_private_info *kinfo = &handle->kinfo; @@ -461,26 +558,6 @@ static u8 *hclge_comm_get_strings(u32 stringset, return (u8 *)buff; } -static void hclge_update_netstat(struct hclge_hw_stats *hw_stats, - struct net_device_stats *net_stats) -{ - net_stats->tx_dropped = 0; - net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num; - net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num; - net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num; - - net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num; - net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num; - - net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num; - net_stats->rx_length_errors = - hw_stats->mac_stats.mac_rx_undersize_pkt_num; - net_stats->rx_length_errors += - hw_stats->mac_stats.mac_rx_oversize_pkt_num; - net_stats->rx_over_errors = - hw_stats->mac_stats.mac_rx_oversize_pkt_num; -} - static void hclge_update_stats_for_all(struct hclge_dev *hdev) { struct hnae3_handle *handle; @@ -500,8 +577,6 @@ static void hclge_update_stats_for_all(struct hclge_dev *hdev) if (status) dev_err(&hdev->pdev->dev, "Update MAC stats fail, status = %d.\n", status); - - hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats); } static void hclge_update_stats(struct hnae3_handle *handle, @@ -509,7 +584,6 @@ static void hclge_update_stats(struct hnae3_handle *handle, { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - struct hclge_hw_stats *hw_stats = &hdev->hw_stats; int status; if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state)) @@ -527,8 +601,6 @@ static void hclge_update_stats(struct hnae3_handle *handle, "Update TQPS stats fail, status = %d.\n", status); - hclge_update_netstat(hw_stats, net_stats); - clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state); } @@ -767,37 +839,67 @@ static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev, unsigned long *supported = hdev->hw.mac.supported; if (speed_ability & HCLGE_SUPPORT_1G_BIT) - set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, - supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, + supported); if (speed_ability & HCLGE_SUPPORT_10G_BIT) - set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, - supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + supported); if (speed_ability & HCLGE_SUPPORT_25G_BIT) - set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, - supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, + supported); if (speed_ability & HCLGE_SUPPORT_50G_BIT) - set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, - supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, + supported); if (speed_ability & HCLGE_SUPPORT_100G_BIT) - set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, - supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, + supported); + + linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); +} + +static void hclge_parse_copper_link_mode(struct hclge_dev *hdev, + u8 speed_ability) +{ + unsigned long *supported = hdev->hw.mac.supported; + + /* default to support all speed for GE port */ + if (!speed_ability) + speed_ability = HCLGE_SUPPORT_GE; + + if (speed_ability & HCLGE_SUPPORT_1G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + supported); + + if (speed_ability & HCLGE_SUPPORT_100M_BIT) { + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, + supported); + } + + if (speed_ability & HCLGE_SUPPORT_10M_BIT) { + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported); + } - set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported); - set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); } static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability) { u8 media_type = hdev->hw.mac.media_type; - if (media_type != HNAE3_MEDIA_TYPE_FIBER) - return; - - hclge_parse_fiber_link_mode(hdev, speed_ability); + if (media_type == HNAE3_MEDIA_TYPE_FIBER) + hclge_parse_fiber_link_mode(hdev, speed_ability); + else if (media_type == HNAE3_MEDIA_TYPE_COPPER) + hclge_parse_copper_link_mode(hdev, speed_ability); } static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) @@ -931,12 +1033,16 @@ static int hclge_configure(struct hclge_dev *hdev) ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); hdev->hw.mac.media_type = cfg.media_type; hdev->hw.mac.phy_addr = cfg.phy_addr; - hdev->num_desc = cfg.tqp_desc_num; + hdev->num_tx_desc = cfg.tqp_desc_num; + hdev->num_rx_desc = cfg.tqp_desc_num; hdev->tm_info.num_pg = 1; hdev->tc_max = cfg.tc_num; hdev->tm_info.hw_pfc_map = 0; hdev->wanted_umv_size = cfg.umv_space; + if (hnae3_dev_fd_supported(hdev)) + hdev->fd_en = true; + ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); if (ret) { dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret); @@ -1035,7 +1141,8 @@ static int hclge_alloc_tqps(struct hclge_dev *hdev) tqp->q.ae_algo = &ae_algo; tqp->q.buf_size = hdev->rx_buf_len; - tqp->q.desc_num = hdev->num_desc; + tqp->q.tx_desc_num = hdev->num_tx_desc; + tqp->q.rx_desc_num = hdev->num_rx_desc; tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET + i * HCLGE_TQP_REG_SIZE; @@ -1068,64 +1175,51 @@ static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, return ret; } -static int hclge_assign_tqp(struct hclge_vport *vport) +static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps) { struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; struct hclge_dev *hdev = vport->back; int i, alloced; for (i = 0, alloced = 0; i < hdev->num_tqps && - alloced < kinfo->num_tqps; i++) { + alloced < num_tqps; i++) { if (!hdev->htqp[i].alloced) { hdev->htqp[i].q.handle = &vport->nic; hdev->htqp[i].q.tqp_index = alloced; - hdev->htqp[i].q.desc_num = kinfo->num_desc; + hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc; + hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc; kinfo->tqp[alloced] = &hdev->htqp[i].q; hdev->htqp[i].alloced = true; alloced++; } } - vport->alloc_tqps = kinfo->num_tqps; + vport->alloc_tqps = alloced; + kinfo->rss_size = min_t(u16, hdev->rss_size_max, + vport->alloc_tqps / hdev->tm_info.num_tc); return 0; } -static int hclge_knic_setup(struct hclge_vport *vport, - u16 num_tqps, u16 num_desc) +static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps, + u16 num_tx_desc, u16 num_rx_desc) + { struct hnae3_handle *nic = &vport->nic; struct hnae3_knic_private_info *kinfo = &nic->kinfo; struct hclge_dev *hdev = vport->back; - int i, ret; + int ret; - kinfo->num_desc = num_desc; - kinfo->rx_buf_len = hdev->rx_buf_len; - kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc); - kinfo->rss_size - = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc); - kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc; + kinfo->num_tx_desc = num_tx_desc; + kinfo->num_rx_desc = num_rx_desc; - for (i = 0; i < HNAE3_MAX_TC; i++) { - if (hdev->hw_tc_map & BIT(i)) { - kinfo->tc_info[i].enable = true; - kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; - kinfo->tc_info[i].tqp_count = kinfo->rss_size; - kinfo->tc_info[i].tc = i; - } else { - /* Set to default queue if TC is disable */ - kinfo->tc_info[i].enable = false; - kinfo->tc_info[i].tqp_offset = 0; - kinfo->tc_info[i].tqp_count = 1; - kinfo->tc_info[i].tc = 0; - } - } + kinfo->rx_buf_len = hdev->rx_buf_len; - kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, + kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps, sizeof(struct hnae3_queue *), GFP_KERNEL); if (!kinfo->tqp) return -ENOMEM; - ret = hclge_assign_tqp(vport); + ret = hclge_assign_tqp(vport, num_tqps); if (ret) dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); @@ -1140,7 +1234,7 @@ static int hclge_map_tqp_to_vport(struct hclge_dev *hdev, u16 i; kinfo = &nic->kinfo; - for (i = 0; i < kinfo->num_tqps; i++) { + for (i = 0; i < vport->alloc_tqps; i++) { struct hclge_tqp *q = container_of(kinfo->tqp[i], struct hclge_tqp, q); bool is_pf; @@ -1191,7 +1285,9 @@ static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps) nic->numa_node_mask = hdev->numa_node_mask; if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) { - ret = hclge_knic_setup(vport, num_tqps, hdev->num_desc); + ret = hclge_knic_setup(vport, num_tqps, + hdev->num_tx_desc, hdev->num_rx_desc); + if (ret) { dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret); @@ -1241,6 +1337,9 @@ static int hclge_alloc_vport(struct hclge_dev *hdev) vport->back = hdev; vport->vport_id = i; vport->mps = HCLGE_MAC_DEFAULT_FRAME; + INIT_LIST_HEAD(&vport->vlan_list); + INIT_LIST_HEAD(&vport->uc_mac_list); + INIT_LIST_HEAD(&vport->mc_mac_list); if (i == 0) ret = hclge_vport_setup(vport, tqp_main_vport); @@ -1273,7 +1372,7 @@ static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, req = (struct hclge_tx_buff_alloc_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0); - for (i = 0; i < HCLGE_TC_NUM; i++) { + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size; req->tx_pkt_buff[i] = @@ -1448,13 +1547,14 @@ static int hclge_tx_buffer_calc(struct hclge_dev *hdev, for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; - if (total_size < hdev->tx_buf_size) - return -ENOMEM; + if (hdev->hw_tc_map & BIT(i)) { + if (total_size < hdev->tx_buf_size) + return -ENOMEM; - if (hdev->hw_tc_map & BIT(i)) priv->tx_buf_size = hdev->tx_buf_size; - else + } else { priv->tx_buf_size = 0; + } total_size -= priv->tx_buf_size; } @@ -1462,66 +1562,15 @@ static int hclge_tx_buffer_calc(struct hclge_dev *hdev, return 0; } -/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs - * @hdev: pointer to struct hclge_dev - * @buf_alloc: pointer to buffer calculation data - * @return: 0: calculate sucessful, negative: fail - */ -static int hclge_rx_buffer_calc(struct hclge_dev *hdev, - struct hclge_pkt_buf_alloc *buf_alloc) +static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max, + struct hclge_pkt_buf_alloc *buf_alloc) { - u32 rx_all = hdev->pkt_buf_size, aligned_mps; - int no_pfc_priv_num, pfc_priv_num; - struct hclge_priv_buf *priv; + u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); + u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT); int i; - aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT); - rx_all -= hclge_get_tx_buff_alloced(buf_alloc); - - /* When DCB is not supported, rx private - * buffer is not allocated. - */ - if (!hnae3_dev_dcb_supported(hdev)) { - if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) - return -ENOMEM; - - return 0; - } - - /* step 1, try to alloc private buffer for all enabled tc */ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { - priv = &buf_alloc->priv_buf[i]; - if (hdev->hw_tc_map & BIT(i)) { - priv->enable = 1; - if (hdev->tm_info.hw_pfc_map & BIT(i)) { - priv->wl.low = aligned_mps; - priv->wl.high = - roundup(priv->wl.low + aligned_mps, - HCLGE_BUF_SIZE_UNIT); - priv->buf_size = priv->wl.high + - hdev->dv_buf_size; - } else { - priv->wl.low = 0; - priv->wl.high = 2 * aligned_mps; - priv->buf_size = priv->wl.high + - hdev->dv_buf_size; - } - } else { - priv->enable = 0; - priv->wl.low = 0; - priv->wl.high = 0; - priv->buf_size = 0; - } - } - - if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) - return 0; - - /* step 2, try to decrease the buffer size of - * no pfc TC's private buffer - */ - for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { - priv = &buf_alloc->priv_buf[i]; + struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; priv->enable = 0; priv->wl.low = 0; @@ -1534,28 +1583,30 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev, priv->enable = 1; if (hdev->tm_info.hw_pfc_map & BIT(i)) { - priv->wl.low = 256; - priv->wl.high = priv->wl.low + aligned_mps; - priv->buf_size = priv->wl.high + hdev->dv_buf_size; + priv->wl.low = max ? aligned_mps : 256; + priv->wl.high = roundup(priv->wl.low + aligned_mps, + HCLGE_BUF_SIZE_UNIT); } else { priv->wl.low = 0; - priv->wl.high = aligned_mps; - priv->buf_size = priv->wl.high + hdev->dv_buf_size; + priv->wl.high = max ? (aligned_mps * 2) : aligned_mps; } + + priv->buf_size = priv->wl.high + hdev->dv_buf_size; } - if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) - return 0; + return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); +} - /* step 3, try to reduce the number of pfc disabled TCs, - * which have private buffer - */ - /* get the total no pfc enable TC number, which have private buffer */ - no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc); +static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ + u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); + int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc); + int i; /* let the last to be cleared first */ for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { - priv = &buf_alloc->priv_buf[i]; + struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; if (hdev->hw_tc_map & BIT(i) && !(hdev->tm_info.hw_pfc_map & BIT(i))) { @@ -1572,17 +1623,19 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev, break; } - if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) - return 0; + return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); +} - /* step 4, try to reduce the number of pfc enabled TCs - * which have private buffer. - */ - pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc); +static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ + u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); + int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc); + int i; /* let the last to be cleared first */ for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { - priv = &buf_alloc->priv_buf[i]; + struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; if (hdev->hw_tc_map & BIT(i) && hdev->tm_info.hw_pfc_map & BIT(i)) { @@ -1598,7 +1651,40 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev, pfc_priv_num == 0) break; } - if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) + + return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); +} + +/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs + * @hdev: pointer to struct hclge_dev + * @buf_alloc: pointer to buffer calculation data + * @return: 0: calculate sucessful, negative: fail + */ +static int hclge_rx_buffer_calc(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) +{ + /* When DCB is not supported, rx private buffer is not allocated. */ + if (!hnae3_dev_dcb_supported(hdev)) { + u32 rx_all = hdev->pkt_buf_size; + + rx_all -= hclge_get_tx_buff_alloced(buf_alloc); + if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) + return -ENOMEM; + + return 0; + } + + if (hclge_rx_buf_calc_all(hdev, true, buf_alloc)) + return 0; + + /* try to decrease the buffer size */ + if (hclge_rx_buf_calc_all(hdev, false, buf_alloc)) + return 0; + + if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc)) + return 0; + + if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc)) return 0; return -ENOMEM; @@ -2122,7 +2208,9 @@ static int hclge_get_mac_phy_link(struct hclge_dev *hdev) static void hclge_update_link_status(struct hclge_dev *hdev) { + struct hnae3_client *rclient = hdev->roce_client; struct hnae3_client *client = hdev->nic_client; + struct hnae3_handle *rhandle; struct hnae3_handle *handle; int state; int i; @@ -2134,6 +2222,10 @@ static void hclge_update_link_status(struct hclge_dev *hdev) for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { handle = &hdev->vport[i].nic; client->ops->link_status_change(handle, state); + rhandle = &hdev->vport[i].roce; + if (rclient && rclient->ops->link_status_change) + rclient->ops->link_status_change(rhandle, + state); } hdev->hw.mac.link = state; } @@ -2418,8 +2510,8 @@ static void hclge_misc_irq_uninit(struct hclge_dev *hdev) hclge_free_vector(hdev, 0); } -static int hclge_notify_client(struct hclge_dev *hdev, - enum hnae3_reset_notify_type type) +int hclge_notify_client(struct hclge_dev *hdev, + enum hnae3_reset_notify_type type) { struct hnae3_client *client = hdev->nic_client; u16 i; @@ -2548,7 +2640,7 @@ static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset) return hclge_cmd_send(&hdev->hw, &desc, 1); } -int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) +static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) { int i; @@ -2883,6 +2975,10 @@ static void hclge_reset(struct hclge_dev *hdev) if (ret) goto err_reset_lock; + ret = hclge_notify_client(hdev, HNAE3_RESTORE_CLIENT); + if (ret) + goto err_reset_lock; + hclge_clear_reset_cause(hdev); ret = hclge_reset_prepare_up(hdev); @@ -3597,8 +3693,11 @@ void hclge_rss_indir_init_cfg(struct hclge_dev *hdev) static void hclge_rss_init_cfg(struct hclge_dev *hdev) { + int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; struct hclge_vport *vport = hdev->vport; - int i; + + if (hdev->pdev->revision >= 0x21) + rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE; for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { vport[i].rss_tuple_sets.ipv4_tcp_en = @@ -3618,9 +3717,10 @@ static void hclge_rss_init_cfg(struct hclge_dev *hdev) vport[i].rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; - vport[i].rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; + vport[i].rss_algo = rss_algo; - netdev_rss_key_fill(vport[i].rss_hash_key, HCLGE_RSS_KEY_SIZE); + memcpy(vport[i].rss_hash_key, hclge_hash_key, + HCLGE_RSS_KEY_SIZE); } hclge_rss_indir_init_cfg(hdev); @@ -3788,8 +3888,16 @@ static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; struct hclge_promisc_param param; + bool en_bc_pmc = true; - hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, true, + /* For revision 0x20, if broadcast promisc enabled, vlan filter is + * always bypassed. So broadcast promisc should be disabled until + * user enable promisc mode + */ + if (handle->pdev->revision == 0x20) + en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false; + + hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc, vport->vport_id); return hclge_cmd_set_promisc_mode(hdev, ¶m); } @@ -3898,7 +4006,6 @@ static int hclge_init_fd_config(struct hclge_dev *hdev) return -EOPNOTSUPP; } - hdev->fd_cfg.fd_en = true; hdev->fd_cfg.proto_support = TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW | UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW; @@ -4656,7 +4763,7 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle, if (!hnae3_dev_fd_supported(hdev)) return -EOPNOTSUPP; - if (!hdev->fd_cfg.fd_en) { + if (!hdev->fd_en) { dev_warn(&hdev->pdev->dev, "Please enable flow director first\n"); return -EOPNOTSUPP; @@ -4809,7 +4916,7 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle) return 0; /* if fd is disabled, should not restore it when reset */ - if (!hdev->fd_cfg.fd_en) + if (!hdev->fd_en) return 0; hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { @@ -5095,7 +5202,7 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - hdev->fd_cfg.fd_en = enable; + hdev->fd_en = enable; if (!enable) hclge_del_all_fd_entries(handle, false); else @@ -5174,8 +5281,15 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en, { #define HCLGE_SERDES_RETRY_MS 10 #define HCLGE_SERDES_RETRY_NUM 100 + +#define HCLGE_MAC_LINK_STATUS_MS 20 +#define HCLGE_MAC_LINK_STATUS_NUM 10 +#define HCLGE_MAC_LINK_STATUS_DOWN 0 +#define HCLGE_MAC_LINK_STATUS_UP 1 + struct hclge_serdes_lb_cmd *req; struct hclge_desc desc; + int mac_link_ret = 0; int ret, i = 0; u8 loop_mode_b; @@ -5198,8 +5312,10 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en, if (en) { req->enable = loop_mode_b; req->mask = loop_mode_b; + mac_link_ret = HCLGE_MAC_LINK_STATUS_UP; } else { req->mask = loop_mode_b; + mac_link_ret = HCLGE_MAC_LINK_STATUS_DOWN; } ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -5231,7 +5347,19 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en, } hclge_cfg_mac_mode(hdev, en); - return 0; + + i = 0; + do { + /* serdes Internal loopback, independent of the network cable.*/ + msleep(HCLGE_MAC_LINK_STATUS_MS); + ret = hclge_get_mac_link_status(hdev); + if (ret == mac_link_ret) + return 0; + } while (++i < HCLGE_MAC_LINK_STATUS_NUM); + + dev_err(&hdev->pdev->dev, "config mac mode timeout\n"); + + return -EBUSY; } static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id, @@ -5258,6 +5386,7 @@ static int hclge_set_loopback(struct hnae3_handle *handle, enum hnae3_loop loop_mode, bool en) { struct hclge_vport *vport = hclge_get_vport(handle); + struct hnae3_knic_private_info *kinfo; struct hclge_dev *hdev = vport->back; int i, ret; @@ -5276,7 +5405,11 @@ static int hclge_set_loopback(struct hnae3_handle *handle, break; } - for (i = 0; i < vport->alloc_tqps; i++) { + if (ret) + return ret; + + kinfo = &vport->nic.kinfo; + for (i = 0; i < kinfo->num_tqps; i++) { ret = hclge_tqp_enable(hdev, i, 0, en); if (ret) return ret; @@ -5288,11 +5421,13 @@ static int hclge_set_loopback(struct hnae3_handle *handle, static void hclge_reset_tqp_stats(struct hnae3_handle *handle) { struct hclge_vport *vport = hclge_get_vport(handle); + struct hnae3_knic_private_info *kinfo; struct hnae3_queue *queue; struct hclge_tqp *tqp; int i; - for (i = 0; i < vport->alloc_tqps; i++) { + kinfo = &vport->nic.kinfo; + for (i = 0; i < kinfo->num_tqps; i++) { queue = handle->kinfo.tqp[i]; tqp = container_of(queue, struct hclge_tqp, q); memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); @@ -5493,13 +5628,19 @@ static bool hclge_is_all_function_id_zero(struct hclge_desc *desc) } static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req, - const u8 *addr) + const u8 *addr, bool is_mc) { const unsigned char *mac_addr = addr; u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) | (mac_addr[0]) | (mac_addr[1] << 8); u32 low_val = mac_addr[4] | (mac_addr[5] << 8); + hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + if (is_mc) { + hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); + hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + } + new_req->mac_addr_hi32 = cpu_to_le32(high_val); new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); } @@ -5730,9 +5871,12 @@ static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free) if (is_free) { if (vport->used_umv_num > hdev->priv_umv_size) hdev->share_umv_size++; - vport->used_umv_num--; + + if (vport->used_umv_num > 0) + vport->used_umv_num--; } else { - if (vport->used_umv_num >= hdev->priv_umv_size) + if (vport->used_umv_num >= hdev->priv_umv_size && + hdev->share_umv_size > 0) hdev->share_umv_size--; vport->used_umv_num++; } @@ -5770,14 +5914,13 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport, } memset(&req, 0, sizeof(req)); - hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, HCLGE_MAC_EPORT_VFID_S, vport->vport_id); req.egress_port = cpu_to_le16(egress_port); - hclge_prepare_mac_addr(&req, addr); + hclge_prepare_mac_addr(&req, addr, false); /* Lookup the mac address in the mac_vlan table, and add * it if the entry is inexistent. Repeated unicast entry @@ -5835,9 +5978,8 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport, } memset(&req, 0, sizeof(req)); - hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); - hclge_prepare_mac_addr(&req, addr); + hclge_prepare_mac_addr(&req, addr, false); ret = hclge_remove_mac_vlan_tbl(vport, &req); if (!ret) hclge_update_umv_space(vport, true); @@ -5869,11 +6011,8 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, return -EINVAL; } memset(&req, 0, sizeof(req)); - hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); - hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); - hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); - hclge_prepare_mac_addr(&req, addr); + hclge_prepare_mac_addr(&req, addr, true); status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); if (!status) { /* This mac addr exist, update VFID for it */ @@ -5919,11 +6058,8 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, } memset(&req, 0, sizeof(req)); - hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); - hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); - hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); - hclge_prepare_mac_addr(&req, addr); + hclge_prepare_mac_addr(&req, addr, true); status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); if (!status) { /* This mac addr exist, remove this handle's VFID for it */ @@ -5949,6 +6085,103 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, return status; } +void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, + enum HCLGE_MAC_ADDR_TYPE mac_type) +{ + struct hclge_vport_mac_addr_cfg *mac_cfg; + struct list_head *list; + + if (!vport->vport_id) + return; + + mac_cfg = kzalloc(sizeof(*mac_cfg), GFP_KERNEL); + if (!mac_cfg) + return; + + mac_cfg->hd_tbl_status = true; + memcpy(mac_cfg->mac_addr, mac_addr, ETH_ALEN); + + list = (mac_type == HCLGE_MAC_ADDR_UC) ? + &vport->uc_mac_list : &vport->mc_mac_list; + + list_add_tail(&mac_cfg->node, list); +} + +void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, + bool is_write_tbl, + enum HCLGE_MAC_ADDR_TYPE mac_type) +{ + struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp; + struct list_head *list; + bool uc_flag, mc_flag; + + list = (mac_type == HCLGE_MAC_ADDR_UC) ? + &vport->uc_mac_list : &vport->mc_mac_list; + + uc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_UC; + mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC; + + list_for_each_entry_safe(mac_cfg, tmp, list, node) { + if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) { + if (uc_flag && mac_cfg->hd_tbl_status) + hclge_rm_uc_addr_common(vport, mac_addr); + + if (mc_flag && mac_cfg->hd_tbl_status) + hclge_rm_mc_addr_common(vport, mac_addr); + + list_del(&mac_cfg->node); + kfree(mac_cfg); + break; + } + } +} + +void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, + enum HCLGE_MAC_ADDR_TYPE mac_type) +{ + struct hclge_vport_mac_addr_cfg *mac_cfg, *tmp; + struct list_head *list; + + list = (mac_type == HCLGE_MAC_ADDR_UC) ? + &vport->uc_mac_list : &vport->mc_mac_list; + + list_for_each_entry_safe(mac_cfg, tmp, list, node) { + if (mac_type == HCLGE_MAC_ADDR_UC && mac_cfg->hd_tbl_status) + hclge_rm_uc_addr_common(vport, mac_cfg->mac_addr); + + if (mac_type == HCLGE_MAC_ADDR_MC && mac_cfg->hd_tbl_status) + hclge_rm_mc_addr_common(vport, mac_cfg->mac_addr); + + mac_cfg->hd_tbl_status = false; + if (is_del_list) { + list_del(&mac_cfg->node); + kfree(mac_cfg); + } + } +} + +void hclge_uninit_vport_mac_table(struct hclge_dev *hdev) +{ + struct hclge_vport_mac_addr_cfg *mac, *tmp; + struct hclge_vport *vport; + int i; + + mutex_lock(&hdev->vport_cfg_mutex); + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + list_for_each_entry_safe(mac, tmp, &vport->uc_mac_list, node) { + list_del(&mac->node); + kfree(mac); + } + + list_for_each_entry_safe(mac, tmp, &vport->mc_mac_list, node) { + list_del(&mac->node); + kfree(mac); + } + } + mutex_unlock(&hdev->vport_cfg_mutex); +} + static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev, u16 cmdq_resp, u8 resp_code) { @@ -6104,7 +6337,7 @@ static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr, } static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, - u8 fe_type, bool filter_en) + u8 fe_type, bool filter_en, u8 vf_id) { struct hclge_vlan_filter_ctrl_cmd *req; struct hclge_desc desc; @@ -6115,6 +6348,7 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; req->vlan_type = vlan_type; req->vlan_fe = filter_en ? fe_type : 0; + req->vf_id = vf_id; ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) @@ -6143,12 +6377,13 @@ static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) if (hdev->pdev->revision >= 0x21) { hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, - HCLGE_FILTER_FE_EGRESS, enable); + HCLGE_FILTER_FE_EGRESS, enable, 0); hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, - HCLGE_FILTER_FE_INGRESS, enable); + HCLGE_FILTER_FE_INGRESS, enable, 0); } else { hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, - HCLGE_FILTER_FE_EGRESS_V1_B, enable); + HCLGE_FILTER_FE_EGRESS_V1_B, enable, + 0); } if (enable) handle->netdev_flags |= HNAE3_VLAN_FLTR; @@ -6456,19 +6691,27 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) int i; if (hdev->pdev->revision >= 0x21) { - ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, - HCLGE_FILTER_FE_EGRESS, true); - if (ret) - return ret; + /* for revision 0x21, vf vlan filter is per function */ + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + ret = hclge_set_vlan_filter_ctrl(hdev, + HCLGE_FILTER_TYPE_VF, + HCLGE_FILTER_FE_EGRESS, + true, + vport->vport_id); + if (ret) + return ret; + } ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, - HCLGE_FILTER_FE_INGRESS, true); + HCLGE_FILTER_FE_INGRESS, true, + 0); if (ret) return ret; } else { ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, HCLGE_FILTER_FE_EGRESS_V1_B, - true); + true, 0); if (ret) return ret; } @@ -6522,6 +6765,84 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); } +void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id) +{ + struct hclge_vport_vlan_cfg *vlan; + + /* vlan 0 is reserved */ + if (!vlan_id) + return; + + vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); + if (!vlan) + return; + + vlan->hd_tbl_status = true; + vlan->vlan_id = vlan_id; + + list_add_tail(&vlan->node, &vport->vlan_list); +} + +void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, + bool is_write_tbl) +{ + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_dev *hdev = vport->back; + + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + if (vlan->vlan_id == vlan_id) { + if (is_write_tbl && vlan->hd_tbl_status) + hclge_set_vlan_filter_hw(hdev, + htons(ETH_P_8021Q), + vport->vport_id, + vlan_id, 0, + true); + + list_del(&vlan->node); + kfree(vlan); + break; + } + } +} + +void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list) +{ + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_dev *hdev = vport->back; + + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + if (vlan->hd_tbl_status) + hclge_set_vlan_filter_hw(hdev, + htons(ETH_P_8021Q), + vport->vport_id, + vlan->vlan_id, 0, + true); + + vlan->hd_tbl_status = false; + if (is_del_list) { + list_del(&vlan->node); + kfree(vlan); + } + } +} + +void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev) +{ + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_vport *vport; + int i; + + mutex_lock(&hdev->vport_cfg_mutex); + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + list_del(&vlan->node); + kfree(vlan); + } + } + mutex_unlock(&hdev->vport_cfg_mutex); +} + int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) { struct hclge_vport *vport = hclge_get_vport(handle); @@ -6959,16 +7280,6 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle, *tp_mdix = ETH_TP_MDI; } -static int hclge_init_instance_hw(struct hclge_dev *hdev) -{ - return hclge_mac_connect_phy(hdev); -} - -static void hclge_uninit_instance_hw(struct hclge_dev *hdev) -{ - hclge_mac_disconnect_phy(hdev); -} - static int hclge_init_client_instance(struct hnae3_client *client, struct hnae3_ae_dev *ae_dev) { @@ -6988,13 +7299,6 @@ static int hclge_init_client_instance(struct hnae3_client *client, if (ret) goto clear_nic; - ret = hclge_init_instance_hw(hdev); - if (ret) { - client->ops->uninit_instance(&vport->nic, - 0); - goto clear_nic; - } - hnae3_set_client_init_flag(client, ae_dev, 1); if (hdev->roce_client && @@ -7079,7 +7383,6 @@ static void hclge_uninit_client_instance(struct hnae3_client *client, if (client->type == HNAE3_CLIENT_ROCE) return; if (hdev->nic_client && client->ops->uninit_instance) { - hclge_uninit_instance_hw(hdev); client->ops->uninit_instance(&vport->nic, 0); hdev->nic_client = NULL; vport->nic.client = NULL; @@ -7222,6 +7525,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN; mutex_init(&hdev->vport_lock); + mutex_init(&hdev->vport_cfg_mutex); ret = hclge_pci_init(hdev); if (ret) { @@ -7298,7 +7602,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) ret = hclge_init_umv_space(hdev); if (ret) { dev_err(&pdev->dev, "umv space init error, ret=%d.\n", ret); - goto err_msi_irq_uninit; + goto err_mdiobus_unreg; } ret = hclge_mac_init(hdev); @@ -7383,7 +7687,7 @@ err_msi_irq_uninit: err_msi_uninit: pci_free_irq_vectors(pdev); err_cmd_uninit: - hclge_destroy_cmd_queue(&hdev->hw); + hclge_cmd_uninit(hdev); err_pci_uninit: pcim_iounmap(pdev, hdev->hw.io_base); pci_clear_master(pdev); @@ -7456,7 +7760,7 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } - ret = hclge_tm_init_hw(hdev); + ret = hclge_tm_init_hw(hdev, true); if (ret) { dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret); return ret; @@ -7510,10 +7814,13 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) synchronize_irq(hdev->misc_vector.vector_irq); hclge_hw_error_set_state(hdev, false); - hclge_destroy_cmd_queue(&hdev->hw); + hclge_cmd_uninit(hdev); hclge_misc_irq_uninit(hdev); hclge_pci_uninit(hdev); mutex_destroy(&hdev->vport_lock); + hclge_uninit_vport_mac_table(hdev); + hclge_uninit_vport_vlan_table(hdev); + mutex_destroy(&hdev->vport_cfg_mutex); ae_dev->priv = NULL; } @@ -7523,18 +7830,17 @@ static u32 hclge_get_max_channels(struct hnae3_handle *handle) struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); + return min_t(u32, hdev->rss_size_max, + vport->alloc_tqps / kinfo->num_tc); } static void hclge_get_channels(struct hnae3_handle *handle, struct ethtool_channels *ch) { - struct hclge_vport *vport = hclge_get_vport(handle); - ch->max_combined = hclge_get_max_channels(handle); ch->other_count = 1; ch->max_other = 1; - ch->combined_count = vport->alloc_tqps; + ch->combined_count = handle->kinfo.rss_size; } static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, @@ -7547,26 +7853,8 @@ static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, *max_rss_size = hdev->rss_size_max; } -static void hclge_release_tqp(struct hclge_vport *vport) -{ - struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; - struct hclge_dev *hdev = vport->back; - int i; - - for (i = 0; i < kinfo->num_tqps; i++) { - struct hclge_tqp *tqp = - container_of(kinfo->tqp[i], struct hclge_tqp, q); - - tqp->q.handle = NULL; - tqp->q.tqp_index = 0; - tqp->alloced = false; - } - - devm_kfree(&hdev->pdev->dev, kinfo->tqp); - kinfo->tqp = NULL; -} - -static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num) +static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, + bool rxfh_configured) { struct hclge_vport *vport = hclge_get_vport(handle); struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; @@ -7580,24 +7868,11 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num) u32 *rss_indir; int ret, i; - /* Free old tqps, and reallocate with new tqp number when nic setup */ - hclge_release_tqp(vport); - - ret = hclge_knic_setup(vport, new_tqps_num, kinfo->num_desc); - if (ret) { - dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret); - return ret; - } - - ret = hclge_map_tqp_to_vport(hdev, vport); - if (ret) { - dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret); - return ret; - } + kinfo->req_rss_size = new_tqps_num; - ret = hclge_tm_schd_init(hdev); + ret = hclge_tm_vport_map_update(hdev); if (ret) { - dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret); + dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret); return ret; } @@ -7618,6 +7893,10 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num) if (ret) return ret; + /* RSS indirection table has been configuared by user */ + if (rxfh_configured) + goto out; + /* Reinitializes the rss indirect table according to the new RSS size */ rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); if (!rss_indir) @@ -7633,6 +7912,7 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num) kfree(rss_indir); +out: if (!ret) dev_info(&hdev->pdev->dev, "Channels changed, rss_size from %d to %d, tqps from %d to %d", @@ -7927,7 +8207,7 @@ static void hclge_get_link_mode(struct hnae3_handle *handle, } } -static int hclge_gro_en(struct hnae3_handle *handle, int enable) +static int hclge_gro_en(struct hnae3_handle *handle, bool enable) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; @@ -8012,6 +8292,8 @@ static const struct hnae3_ae_ops hclge_ops = { .set_gro_en = hclge_gro_en, .get_global_queue_id = hclge_covert_handle_qid_global, .set_timer_task = hclge_set_timer_task, + .mac_connect_phy = hclge_mac_connect_phy, + .mac_disconnect_phy = hclge_mac_disconnect_phy, }; static struct hnae3_ae_algo ae_algo = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 6615b85a1c52..b57ac4beb313 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -16,6 +16,9 @@ #define HCLGE_MAX_PF_NUM 8 +#define HCLGE_RD_FIRST_STATS_NUM 2 +#define HCLGE_RD_OTHER_STATS_NUM 4 + #define HCLGE_INVALID_VPORT 0xffff #define HCLGE_PF_CFG_BLOCK_SIZE 32 @@ -185,6 +188,10 @@ enum HLCGE_PORT_TYPE { #define HCLGE_SUPPORT_25G_BIT BIT(2) #define HCLGE_SUPPORT_50G_BIT BIT(3) #define HCLGE_SUPPORT_100G_BIT BIT(4) +#define HCLGE_SUPPORT_100M_BIT BIT(6) +#define HCLGE_SUPPORT_10M_BIT BIT(7) +#define HCLGE_SUPPORT_GE \ + (HCLGE_SUPPORT_1G_BIT | HCLGE_SUPPORT_100M_BIT | HCLGE_SUPPORT_10M_BIT) enum HCLGE_DEV_STATE { HCLGE_STATE_REINITING, @@ -322,6 +329,7 @@ struct hclge_tm_info { struct hclge_tc_info tc_info[HNAE3_MAX_TC]; enum hclge_fc_mode fc_mode; u8 hw_pfc_map; /* Allow for packet drop or not on this TC */ + u8 pfc_en; /* PFC enabled or not for user priority */ }; struct hclge_comm_stats_str { @@ -415,6 +423,10 @@ struct hclge_mac_stats { u64 mac_rx_fcs_err_pkt_num; u64 mac_rx_send_app_good_pkt_num; u64 mac_rx_send_app_bad_pkt_num; + u64 mac_tx_pfc_pause_pkt_num; + u64 mac_rx_pfc_pause_pkt_num; + u64 mac_tx_ctrl_pkt_num; + u64 mac_rx_ctrl_pkt_num; }; #define HCLGE_STATS_TIMER_INTERVAL (60 * 5) @@ -575,7 +587,6 @@ struct hclge_fd_key_cfg { struct hclge_fd_cfg { u8 fd_mode; - u8 fd_en; u16 max_key_length; u32 proto_support; u32 rule_num[2]; /* rule entry number */ @@ -621,6 +632,23 @@ struct hclge_fd_ad_data { u16 rule_id; }; +struct hclge_vport_mac_addr_cfg { + struct list_head node; + int hd_tbl_status; + u8 mac_addr[ETH_ALEN]; +}; + +enum HCLGE_MAC_ADDR_TYPE { + HCLGE_MAC_ADDR_UC, + HCLGE_MAC_ADDR_MC +}; + +struct hclge_vport_vlan_cfg { + struct list_head node; + int hd_tbl_status; + u16 vlan_id; +}; + /* For each bit of TCAM entry, it uses a pair of 'x' and * 'y' to indicate which value to match, like below: * ---------------------------------- @@ -678,7 +706,8 @@ struct hclge_dev { u16 num_alloc_vport; /* Num vports this driver supports */ u32 numa_node_mask; u16 rx_buf_len; - u16 num_desc; + u16 num_tx_desc; /* desc num of per tx queue */ + u16 num_rx_desc; /* desc num of per rx queue */ u8 hw_tc_map; u8 tc_num_last_time; enum hclge_fc_mode fc_mode_last_time; @@ -750,6 +779,7 @@ struct hclge_dev { struct hclge_fd_cfg fd_cfg; struct hlist_head fd_rule_list; u16 hclge_fd_rule_num; + u8 fd_en; u16 wanted_umv_size; /* max available unicast mac vlan space */ @@ -759,6 +789,8 @@ struct hclge_dev { /* unicast mac vlan space shared by PF and its VFs */ u16 share_umv_size; struct mutex umv_mutex; /* protect share_umv_size */ + + struct mutex vport_cfg_mutex; /* Protect stored vf table */ }; /* VPort level vlan tag configuration for TX direction */ @@ -826,6 +858,10 @@ struct hclge_vport { unsigned long state; unsigned long last_active_jiffies; u32 mps; /* Max packet size */ + + struct list_head uc_mac_list; /* Store VF unicast table */ + struct list_head mc_mac_list; /* Store VF multicast table */ + struct list_head vlan_list; /* Store VF vlan table */ }; void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, @@ -878,4 +914,19 @@ void hclge_vport_stop(struct hclge_vport *vport); int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu); int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf); u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id); +int hclge_notify_client(struct hclge_dev *hdev, + enum hnae3_reset_notify_type type); +void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, + enum HCLGE_MAC_ADDR_TYPE mac_type); +void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, + bool is_write_tbl, + enum HCLGE_MAC_ADDR_TYPE mac_type); +void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, + enum HCLGE_MAC_ADDR_TYPE mac_type); +void hclge_uninit_vport_mac_table(struct hclge_dev *hdev); +void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id); +void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, + bool is_write_tbl); +void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list); +void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index a1de451a85df..306a23e486de 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -203,12 +203,11 @@ static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en, static int hclge_set_vf_promisc_mode(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *req) { - bool en_uc = req->msg[1] ? true : false; - bool en_mc = req->msg[2] ? true : false; + bool en_bc = req->msg[1] ? true : false; struct hclge_promisc_param param; - /* always enable broadcast promisc bit */ - hclge_promisc_param_init(¶m, en_uc, en_mc, true, vport->vport_id); + /* vf is not allowed to enable unicast/multicast broadcast */ + hclge_promisc_param_init(¶m, false, false, en_bc, vport->vport_id); return hclge_cmd_set_promisc_mode(vport->back, ¶m); } @@ -225,12 +224,24 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, hclge_rm_uc_addr_common(vport, old_addr); status = hclge_add_uc_addr_common(vport, mac_addr); - if (status) + if (status) { hclge_add_uc_addr_common(vport, old_addr); + } else { + hclge_rm_vport_mac_table(vport, mac_addr, + false, HCLGE_MAC_ADDR_UC); + hclge_add_vport_mac_table(vport, mac_addr, + HCLGE_MAC_ADDR_UC); + } } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_ADD) { status = hclge_add_uc_addr_common(vport, mac_addr); + if (!status) + hclge_add_vport_mac_table(vport, mac_addr, + HCLGE_MAC_ADDR_UC); } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_REMOVE) { status = hclge_rm_uc_addr_common(vport, mac_addr); + if (!status) + hclge_rm_vport_mac_table(vport, mac_addr, + false, HCLGE_MAC_ADDR_UC); } else { dev_err(&hdev->pdev->dev, "failed to set unicast mac addr, unknown subcode %d\n", @@ -256,8 +267,14 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_ADD) { status = hclge_add_mc_addr_common(vport, mac_addr); + if (!status) + hclge_add_vport_mac_table(vport, mac_addr, + HCLGE_MAC_ADDR_MC); } else if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_MC_REMOVE) { status = hclge_rm_mc_addr_common(vport, mac_addr); + if (!status) + hclge_rm_vport_mac_table(vport, mac_addr, + false, HCLGE_MAC_ADDR_MC); } else { dev_err(&hdev->pdev->dev, "failed to set mcast mac addr, unknown subcode %d\n", @@ -288,6 +305,9 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, memcpy(&proto, &mbx_req->msg[5], sizeof(proto)); status = hclge_set_vlan_filter(handle, cpu_to_be16(proto), vlan, is_kill); + if (!status) + is_kill ? hclge_rm_vport_vlan_table(vport, vlan, false) + : hclge_add_vport_vlan_table(vport, vlan); } else if (mbx_req->msg[1] == HCLGE_MBX_VLAN_RX_OFF_CFG) { struct hnae3_handle *handle = &vport->nic; bool en = mbx_req->msg[2] ? true : false; @@ -320,10 +340,14 @@ static int hclge_get_vf_tcinfo(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *mbx_req, bool gen_resp) { - struct hclge_dev *hdev = vport->back; - int ret; + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + u8 vf_tc_map = 0; + int i, ret; - ret = hclge_gen_resp_to_vf(vport, mbx_req, 0, &hdev->hw_tc_map, + for (i = 0; i < kinfo->num_tc; i++) + vf_tc_map |= BIT(i); + + ret = hclge_gen_resp_to_vf(vport, mbx_req, 0, &vf_tc_map, sizeof(u8)); return ret; @@ -333,35 +357,52 @@ static int hclge_get_vf_queue_info(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *mbx_req, bool gen_resp) { -#define HCLGE_TQPS_RSS_INFO_LEN 8 +#define HCLGE_TQPS_RSS_INFO_LEN 6 u8 resp_data[HCLGE_TQPS_RSS_INFO_LEN]; struct hclge_dev *hdev = vport->back; /* get the queue related info */ memcpy(&resp_data[0], &vport->alloc_tqps, sizeof(u16)); memcpy(&resp_data[2], &vport->nic.kinfo.rss_size, sizeof(u16)); - memcpy(&resp_data[4], &hdev->num_desc, sizeof(u16)); - memcpy(&resp_data[6], &hdev->rx_buf_len, sizeof(u16)); + memcpy(&resp_data[4], &hdev->rx_buf_len, sizeof(u16)); return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data, HCLGE_TQPS_RSS_INFO_LEN); } +static int hclge_get_vf_queue_depth(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req, + bool gen_resp) +{ +#define HCLGE_TQPS_DEPTH_INFO_LEN 4 + u8 resp_data[HCLGE_TQPS_DEPTH_INFO_LEN]; + struct hclge_dev *hdev = vport->back; + + /* get the queue depth info */ + memcpy(&resp_data[0], &hdev->num_tx_desc, sizeof(u16)); + memcpy(&resp_data[2], &hdev->num_rx_desc, sizeof(u16)); + return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data, + HCLGE_TQPS_DEPTH_INFO_LEN); +} + static int hclge_get_link_info(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *mbx_req) { struct hclge_dev *hdev = vport->back; u16 link_status; - u8 msg_data[8]; + u8 msg_data[10]; + u16 media_type; u8 dest_vfid; u16 duplex; /* mac.link can only be 0 or 1 */ link_status = (u16)hdev->hw.mac.link; duplex = hdev->hw.mac.duplex; + media_type = hdev->hw.mac.media_type; memcpy(&msg_data[0], &link_status, sizeof(u16)); memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32)); memcpy(&msg_data[6], &duplex, sizeof(u16)); + memcpy(&msg_data[8], &media_type, sizeof(u16)); dest_vfid = mbx_req->mbx_src_vfid; /* send this requested info to VF */ @@ -369,6 +410,29 @@ static int hclge_get_link_info(struct hclge_vport *vport, HCLGE_MBX_LINK_STAT_CHANGE, dest_vfid); } +static void hclge_get_link_mode(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ +#define HCLGE_SUPPORTED 1 + struct hclge_dev *hdev = vport->back; + unsigned long advertising; + unsigned long supported; + unsigned long send_data; + u8 msg_data[10]; + u8 dest_vfid; + + advertising = hdev->hw.mac.advertising[0]; + supported = hdev->hw.mac.supported[0]; + dest_vfid = mbx_req->mbx_src_vfid; + msg_data[0] = mbx_req->msg[2]; + + send_data = msg_data[0] == HCLGE_SUPPORTED ? supported : advertising; + + memcpy(&msg_data[2], &send_data, sizeof(unsigned long)); + hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), + HCLGE_MBX_LINK_STAT_MODE, dest_vfid); +} + static void hclge_mbx_reset_vf_queue(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *mbx_req) { @@ -426,6 +490,24 @@ static int hclge_get_queue_id_in_pf(struct hclge_vport *vport, return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data, 2); } +static int hclge_get_rss_key(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ +#define HCLGE_RSS_MBX_RESP_LEN 8 + u8 resp_data[HCLGE_RSS_MBX_RESP_LEN]; + struct hclge_dev *hdev = vport->back; + u8 index; + + index = mbx_req->msg[2]; + + memcpy(&resp_data[0], + &hdev->vport[0].rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN], + HCLGE_RSS_MBX_RESP_LEN); + + return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data, + HCLGE_RSS_MBX_RESP_LEN); +} + static bool hclge_cmd_crq_empty(struct hclge_hw *hw) { u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG); @@ -517,6 +599,14 @@ void hclge_mbx_handler(struct hclge_dev *hdev) "PF failed(%d) to get Q info for VF\n", ret); break; + case HCLGE_MBX_GET_QDEPTH: + ret = hclge_get_vf_queue_depth(vport, req, true); + if (ret) + dev_err(&hdev->pdev->dev, + "PF failed(%d) to get Q depth for VF\n", + ret); + break; + case HCLGE_MBX_GET_TCINFO: ret = hclge_get_vf_tcinfo(vport, req, true); if (ret) @@ -553,6 +643,25 @@ void hclge_mbx_handler(struct hclge_dev *hdev) "PF failed(%d) to get qid for VF\n", ret); break; + case HCLGE_MBX_GET_RSS_KEY: + ret = hclge_get_rss_key(vport, req); + if (ret) + dev_err(&hdev->pdev->dev, + "PF fail(%d) to get rss key for VF\n", + ret); + break; + case HCLGE_MBX_GET_LINK_MODE: + hclge_get_link_mode(vport, req); + break; + case HCLGE_MBX_GET_VF_FLR_STATUS: + mutex_lock(&hdev->vport_cfg_mutex); + hclge_rm_vport_all_mac_table(vport, true, + HCLGE_MAC_ADDR_UC); + hclge_rm_vport_all_mac_table(vport, true, + HCLGE_MAC_ADDR_MC); + hclge_rm_vport_all_vlan_table(vport, true); + mutex_unlock(&hdev->vport_cfg_mutex); + break; default: dev_err(&hdev->pdev->dev, "un-supported mailbox message, code = %d\n", diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index dabb8437f8dc..48eda2c6fdae 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -8,12 +8,6 @@ #include "hclge_main.h" #include "hclge_mdio.h" -#define HCLGE_PHY_SUPPORTED_FEATURES (SUPPORTED_Autoneg | \ - SUPPORTED_TP | \ - PHY_10BT_FEATURES | \ - PHY_100BT_FEATURES | \ - SUPPORTED_1000baseT_Full) - enum hclge_mdio_c22_op_seq { HCLGE_MDIO_C22_WRITE = 1, HCLGE_MDIO_C22_READ = 2 @@ -195,8 +189,10 @@ static void hclge_mac_adjust_link(struct net_device *netdev) netdev_err(netdev, "failed to configure flow control.\n"); } -int hclge_mac_connect_phy(struct hclge_dev *hdev) +int hclge_mac_connect_phy(struct hnae3_handle *handle) { + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; struct net_device *netdev = hdev->vport[0].nic.netdev; struct phy_device *phydev = hdev->hw.mac.phydev; __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; @@ -215,22 +211,17 @@ int hclge_mac_connect_phy(struct hclge_dev *hdev) return ret; } - linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask); - linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, mask); - linkmode_set_bit_array(phy_10_100_features_array, - ARRAY_SIZE(phy_10_100_features_array), - mask); - linkmode_set_bit_array(phy_gbit_features_array, - ARRAY_SIZE(phy_gbit_features_array), - mask); + linkmode_copy(mask, hdev->hw.mac.supported); linkmode_and(phydev->supported, phydev->supported, mask); - phy_support_asym_pause(phydev); + linkmode_copy(phydev->advertising, phydev->supported); return 0; } -void hclge_mac_disconnect_phy(struct hclge_dev *hdev) +void hclge_mac_disconnect_phy(struct hnae3_handle *handle) { + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; struct phy_device *phydev = hdev->hw.mac.phydev; if (!phydev) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h index 5fbf7dddb5d9..ef095d9c566f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h @@ -5,8 +5,8 @@ #define __HCLGE_MDIO_H int hclge_mac_mdio_config(struct hclge_dev *hdev); -int hclge_mac_connect_phy(struct hclge_dev *hdev); -void hclge_mac_disconnect_phy(struct hclge_dev *hdev); +int hclge_mac_connect_phy(struct hnae3_handle *handle); +void hclge_mac_disconnect_phy(struct hnae3_handle *handle); void hclge_mac_start_phy(struct hclge_dev *hdev); void hclge_mac_stop_phy(struct hclge_dev *hdev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 00458da67503..aafc69f4bfdd 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -517,20 +517,39 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) { struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; struct hclge_dev *hdev = vport->back; + u16 max_rss_size; u8 i; - vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit; - kinfo->num_tc = - min_t(u16, kinfo->num_tqps, hdev->tm_info.num_tc); - kinfo->rss_size - = min_t(u16, hdev->rss_size_max, - kinfo->num_tqps / kinfo->num_tc); - vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id; + /* TC configuration is shared by PF/VF in one port, only allow + * one tc for VF for simplicity. VF's vport_id is non zero. + */ + kinfo->num_tc = vport->vport_id ? 1 : + min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc); + vport->qs_offset = (vport->vport_id ? hdev->tm_info.num_tc : 0) + + (vport->vport_id ? (vport->vport_id - 1) : 0); + + max_rss_size = min_t(u16, hdev->rss_size_max, + vport->alloc_tqps / kinfo->num_tc); + + if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && + kinfo->req_rss_size <= max_rss_size) { + dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n", + kinfo->rss_size, kinfo->req_rss_size); + kinfo->rss_size = kinfo->req_rss_size; + } else if (kinfo->rss_size > max_rss_size || + (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) { + dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n", + kinfo->rss_size, max_rss_size); + kinfo->rss_size = max_rss_size; + } + + kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size; vport->dwrr = 100; /* 100 percent as init */ vport->alloc_rss_size = kinfo->rss_size; + vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit; - for (i = 0; i < kinfo->num_tc; i++) { - if (hdev->hw_tc_map & BIT(i)) { + for (i = 0; i < HNAE3_MAX_TC; i++) { + if (hdev->hw_tc_map & BIT(i) && i < kinfo->num_tc) { kinfo->tc_info[i].enable = true; kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; kinfo->tc_info[i].tqp_count = kinfo->rss_size; @@ -753,13 +772,17 @@ static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev) if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { /* Cfg qs -> pri mapping, one by one mapping */ - for (k = 0; k < hdev->num_alloc_vport; k++) - for (i = 0; i < hdev->tm_info.num_tc; i++) { + for (k = 0; k < hdev->num_alloc_vport; k++) { + struct hnae3_knic_private_info *kinfo = + &vport[k].nic.kinfo; + + for (i = 0; i < kinfo->num_tc; i++) { ret = hclge_tm_qs_to_pri_map_cfg( hdev, vport[k].qs_offset + i, i); if (ret) return ret; } + } } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) { /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */ for (k = 0; k < hdev->num_alloc_vport; k++) @@ -934,6 +957,36 @@ static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev) return 0; } +static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev) +{ +#define DEFAULT_TC_WEIGHT 1 +#define DEFAULT_TC_OFFSET 14 + + struct hclge_ets_tc_weight_cmd *ets_weight; + struct hclge_desc desc; + int i; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false); + ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data; + + for (i = 0; i < HNAE3_MAX_TC; i++) { + struct hclge_pg_info *pg_info; + + ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT; + + if (!(hdev->hw_tc_map & BIT(i))) + continue; + + pg_info = + &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid]; + ets_weight->tc_weight[i] = pg_info->tc_dwrr[i]; + } + + ets_weight->weight_offset = DEFAULT_TC_OFFSET; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport) { struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; @@ -983,6 +1036,19 @@ static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev) ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev); if (ret) return ret; + + if (!hnae3_dev_dcb_supported(hdev)) + return 0; + + ret = hclge_tm_ets_tc_dwrr_cfg(hdev); + if (ret == -EOPNOTSUPP) { + dev_warn(&hdev->pdev->dev, + "fw %08x does't support ets tc weight cmd\n", + hdev->fw_version); + ret = 0; + } + + return ret; } else { ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev); if (ret) @@ -992,7 +1058,7 @@ static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev) return 0; } -int hclge_tm_map_cfg(struct hclge_dev *hdev) +static int hclge_tm_map_cfg(struct hclge_dev *hdev) { int ret; @@ -1107,7 +1173,7 @@ static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev) return 0; } -int hclge_tm_schd_mode_hw(struct hclge_dev *hdev) +static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev) { int ret; @@ -1118,7 +1184,7 @@ int hclge_tm_schd_mode_hw(struct hclge_dev *hdev) return hclge_tm_lvl34_schd_mode_cfg(hdev); } -static int hclge_tm_schd_setup_hw(struct hclge_dev *hdev) +int hclge_tm_schd_setup_hw(struct hclge_dev *hdev) { int ret; @@ -1159,7 +1225,7 @@ static int hclge_pfc_setup_hw(struct hclge_dev *hdev) HCLGE_RX_MAC_PAUSE_EN_MSK; return hclge_pfc_pause_en_cfg(hdev, enable_bitmap, - hdev->tm_info.hw_pfc_map); + hdev->tm_info.pfc_en); } /* Each Tc has a 1024 queue sets to backpress, it divides to @@ -1228,10 +1294,23 @@ static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev) return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); } -int hclge_pause_setup_hw(struct hclge_dev *hdev) +static int hclge_tm_bp_setup(struct hclge_dev *hdev) +{ + int ret = 0; + int i; + + for (i = 0; i < hdev->tm_info.num_tc; i++) { + ret = hclge_bp_setup_hw(hdev, i); + if (ret) + return ret; + } + + return ret; +} + +int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init) { int ret; - u8 i; ret = hclge_pause_param_setup_hw(hdev); if (ret) @@ -1245,18 +1324,17 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev) if (!hnae3_dev_dcb_supported(hdev)) return 0; - /* When MAC is GE Mode, hdev does not support pfc setting */ + /* GE MAC does not support PFC, when driver is initializing and MAC + * is in GE Mode, ignore the error here, otherwise initialization + * will fail. + */ ret = hclge_pfc_setup_hw(hdev); - if (ret) - dev_warn(&hdev->pdev->dev, "set pfc pause failed:%d\n", ret); - - for (i = 0; i < hdev->tm_info.num_tc; i++) { - ret = hclge_bp_setup_hw(hdev, i); - if (ret) - return ret; - } + if (init && ret == -EOPNOTSUPP) + dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n"); + else + return ret; - return 0; + return hclge_tm_bp_setup(hdev); } void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc) @@ -1294,7 +1372,7 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc) hclge_tm_schd_info_init(hdev); } -int hclge_tm_init_hw(struct hclge_dev *hdev) +int hclge_tm_init_hw(struct hclge_dev *hdev, bool init) { int ret; @@ -1306,7 +1384,7 @@ int hclge_tm_init_hw(struct hclge_dev *hdev) if (ret) return ret; - ret = hclge_pause_setup_hw(hdev); + ret = hclge_pause_setup_hw(hdev, init); if (ret) return ret; @@ -1325,5 +1403,22 @@ int hclge_tm_schd_init(struct hclge_dev *hdev) if (ret) return ret; - return hclge_tm_init_hw(hdev); + return hclge_tm_init_hw(hdev, true); +} + +int hclge_tm_vport_map_update(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int ret; + + hclge_tm_vport_tc_info_update(vport); + + ret = hclge_vport_q_to_qs_map(hdev, vport); + if (ret) + return ret; + + if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) + return 0; + + return hclge_tm_bp_setup(hdev); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index b6496a439304..f60e540c7a62 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -142,13 +142,13 @@ struct hclge_port_shapping_cmd { (HCLGE_TM_SHAP_##string##_LSH)) int hclge_tm_schd_init(struct hclge_dev *hdev); -int hclge_pause_setup_hw(struct hclge_dev *hdev); -int hclge_tm_schd_mode_hw(struct hclge_dev *hdev); +int hclge_tm_vport_map_update(struct hclge_dev *hdev); +int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init); +int hclge_tm_schd_setup_hw(struct hclge_dev *hdev); void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc); void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc); int hclge_tm_dwrr_cfg(struct hclge_dev *hdev); -int hclge_tm_map_cfg(struct hclge_dev *hdev); -int hclge_tm_init_hw(struct hclge_dev *hdev); +int hclge_tm_init_hw(struct hclge_dev *hdev, bool init); int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx); int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr); int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c index 4e78e8812a04..9441b453d38d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c @@ -362,8 +362,28 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev) return 0; } +static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw) +{ + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0); +} + void hclgevf_cmd_uninit(struct hclgevf_dev *hdev) { + spin_lock_bh(&hdev->hw.cmq.csq.lock); + spin_lock(&hdev->hw.cmq.crq.lock); + clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); + hclgevf_cmd_uninit_regs(&hdev->hw); + spin_unlock(&hdev->hw.cmq.crq.lock); + spin_unlock_bh(&hdev->hw.cmq.csq.lock); hclgevf_free_cmd_desc(&hdev->hw.cmq.csq); hclgevf_free_cmd_desc(&hdev->hw.cmq.crq); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 82103d5fa815..8bc28e6f465f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -21,6 +21,14 @@ static const struct pci_device_id ae_algovf_pci_tbl[] = { {0, } }; +static const u8 hclgevf_hash_key[] = { + 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, + 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, + 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, + 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, + 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA +}; + MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, @@ -78,7 +86,12 @@ static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, static inline struct hclgevf_dev *hclgevf_ae_get_hdev( struct hnae3_handle *handle) { - return container_of(handle, struct hclgevf_dev, nic); + if (!handle->client) + return container_of(handle, struct hclgevf_dev, nic); + else if (handle->client->type == HNAE3_CLIENT_ROCE) + return container_of(handle, struct hclgevf_dev, roce); + else + return container_of(handle, struct hclgevf_dev, nic); } static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) @@ -234,7 +247,7 @@ static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) { -#define HCLGEVF_TQPS_RSS_INFO_LEN 8 +#define HCLGEVF_TQPS_RSS_INFO_LEN 6 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; int status; @@ -250,8 +263,29 @@ static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16)); memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16)); - memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16)); - memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16)); + memcpy(&hdev->rx_buf_len, &resp_msg[4], sizeof(u16)); + + return 0; +} + +static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) +{ +#define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 + u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; + int ret; + + ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QDEPTH, 0, NULL, 0, + true, resp_msg, + HCLGEVF_TQPS_DEPTH_INFO_LEN); + if (ret) { + dev_err(&hdev->pdev->dev, + "VF request to get tqp depth info from PF failed %d", + ret); + return ret; + } + + memcpy(&hdev->num_tx_desc, &resp_msg[0], sizeof(u16)); + memcpy(&hdev->num_rx_desc, &resp_msg[2], sizeof(u16)); return 0; } @@ -291,7 +325,8 @@ static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) tqp->q.ae_algo = &ae_algovf; tqp->q.buf_size = hdev->rx_buf_len; - tqp->q.desc_num = hdev->num_desc; + tqp->q.tx_desc_num = hdev->num_tx_desc; + tqp->q.rx_desc_num = hdev->num_rx_desc; tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + i * HCLGEVF_TQP_REG_SIZE; @@ -310,7 +345,8 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev) kinfo = &nic->kinfo; kinfo->num_tc = 0; - kinfo->num_desc = hdev->num_desc; + kinfo->num_tx_desc = hdev->num_tx_desc; + kinfo->num_rx_desc = hdev->num_rx_desc; kinfo->rx_buf_len = hdev->rx_buf_len; for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) if (hdev->hw_tc_map & BIT(i)) @@ -349,20 +385,40 @@ static void hclgevf_request_link_info(struct hclgevf_dev *hdev) void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) { + struct hnae3_handle *rhandle = &hdev->roce; struct hnae3_handle *handle = &hdev->nic; + struct hnae3_client *rclient; struct hnae3_client *client; client = handle->client; + rclient = hdev->roce_client; link_state = test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; if (link_state != hdev->hw.mac.link) { client->ops->link_status_change(handle, !!link_state); + if (rclient && rclient->ops->link_status_change) + rclient->ops->link_status_change(rhandle, !!link_state); hdev->hw.mac.link = link_state; } } +void hclgevf_update_link_mode(struct hclgevf_dev *hdev) +{ +#define HCLGEVF_ADVERTISING 0 +#define HCLGEVF_SUPPORTED 1 + u8 send_msg; + u8 resp_msg; + + send_msg = HCLGEVF_ADVERTISING; + hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg, + sizeof(u8), false, &resp_msg, sizeof(u8)); + send_msg = HCLGEVF_SUPPORTED; + hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_MODE, 0, &send_msg, + sizeof(u8), false, &resp_msg, sizeof(u8)); +} + static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) { struct hnae3_handle *nic = &hdev->nic; @@ -564,12 +620,50 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size) return status; } +/* for revision 0x20, vf shared the same rss config with pf */ +static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) +{ +#define HCLGEVF_RSS_MBX_RESP_LEN 8 + + struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; + u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN]; + u16 msg_num, hash_key_index; + u8 index; + int ret; + + msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / + HCLGEVF_RSS_MBX_RESP_LEN; + for (index = 0; index < msg_num; index++) { + ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_RSS_KEY, 0, + &index, sizeof(index), + true, resp_msg, + HCLGEVF_RSS_MBX_RESP_LEN); + if (ret) { + dev_err(&hdev->pdev->dev, + "VF get rss hash key from PF failed, ret=%d", + ret); + return ret; + } + + hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index; + if (index == msg_num - 1) + memcpy(&rss_cfg->rss_hash_key[hash_key_index], + &resp_msg[0], + HCLGEVF_RSS_KEY_SIZE - hash_key_index); + else + memcpy(&rss_cfg->rss_hash_key[hash_key_index], + &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN); + } + + return 0; +} + static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, u8 *hfunc) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; - int i; + int i, ret; if (handle->pdev->revision >= 0x21) { /* Get hash algorithm */ @@ -591,6 +685,16 @@ static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, if (key) memcpy(key, rss_cfg->rss_hash_key, HCLGEVF_RSS_KEY_SIZE); + } else { + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (key) { + ret = hclgevf_get_rss_hash_key(hdev); + if (ret) + return ret; + memcpy(key, rss_cfg->rss_hash_key, + HCLGEVF_RSS_KEY_SIZE); + } } if (indir) @@ -964,33 +1068,29 @@ static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) } static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, - bool en_uc_pmc, bool en_mc_pmc) + bool en_bc_pmc) { struct hclge_mbx_vf_to_pf_cmd *req; struct hclgevf_desc desc; - int status; + int ret; req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; - req->msg[1] = en_uc_pmc ? 1 : 0; - req->msg[2] = en_mc_pmc ? 1 : 0; + req->msg[1] = en_bc_pmc ? 1 : 0; - status = hclgevf_cmd_send(&hdev->hw, &desc, 1); - if (status) + ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (ret) dev_err(&hdev->pdev->dev, - "Set promisc mode fail, status is %d.\n", status); + "Set promisc mode fail, status is %d.\n", ret); - return status; + return ret; } -static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, - bool en_uc_pmc, bool en_mc_pmc) +static int hclgevf_set_promisc_mode(struct hclgevf_dev *hdev, bool en_bc_pmc) { - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - - return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc); + return hclgevf_cmd_set_promisc_mode(hdev, en_bc_pmc); } static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, @@ -1264,7 +1364,7 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev) if (ret) return ret; - return 0; + return hclgevf_notify_client(hdev, HNAE3_RESTORE_CLIENT); } static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) @@ -1610,6 +1710,10 @@ static void hclgevf_keep_alive_task(struct work_struct *work) int ret; hdev = container_of(work, struct hclgevf_dev, keep_alive_task); + + if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) + return; + ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL, 0, false, &respmsg, sizeof(u8)); if (ret) @@ -1628,6 +1732,8 @@ static void hclgevf_service_task(struct work_struct *work) */ hclgevf_request_link_info(hdev); + hclgevf_update_link_mode(hdev); + hclgevf_deferred_task_schedule(hdev); clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); @@ -1708,12 +1814,16 @@ static int hclgevf_configure(struct hclgevf_dev *hdev) { int ret; - hdev->hw.mac.media_type = HNAE3_MEDIA_TYPE_NONE; - /* get queue configuration from PF */ ret = hclgevf_get_queue_info(hdev); if (ret) return ret; + + /* get queue depth info from PF */ + ret = hclgevf_get_queue_depth(hdev); + if (ret) + return ret; + /* get tc configuration from PF */ return hclgevf_get_tc_info(hdev); } @@ -1788,9 +1898,9 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) rss_cfg->rss_size = hdev->rss_size_max; if (hdev->pdev->revision >= 0x21) { - rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; - netdev_rss_key_fill(rss_cfg->rss_hash_key, - HCLGEVF_RSS_KEY_SIZE); + rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; + memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key, + HCLGEVF_RSS_KEY_SIZE); ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, rss_cfg->rss_hash_key); @@ -1862,6 +1972,8 @@ static int hclgevf_ae_start(struct hnae3_handle *handle) hclgevf_request_link_info(hdev); + hclgevf_update_link_mode(hdev); + clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); return 0; @@ -2377,6 +2489,15 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) if (ret) goto err_config; + /* vf is not allowed to enable unicast/multicast promisc mode. + * For revision 0x20, default to disable broadcast promisc mode, + * firmware makes sure broadcast packets can be accepted. + * For revision 0x21, default to enable broadcast promisc mode. + */ + ret = hclgevf_set_promisc_mode(hdev, true); + if (ret) + goto err_config; + /* Initialize RSS for this VF */ ret = hclgevf_rss_init_hw(hdev); if (ret) { @@ -2461,7 +2582,8 @@ static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) struct hnae3_handle *nic = &hdev->nic; struct hnae3_knic_private_info *kinfo = &nic->kinfo; - return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); + return min_t(u32, hdev->rss_size_max, + hdev->num_tqps / kinfo->num_tc); } /** @@ -2482,7 +2604,7 @@ static void hclgevf_get_channels(struct hnae3_handle *handle, ch->max_combined = hclgevf_get_max_channels(hdev); ch->other_count = 0; ch->max_other = 0; - ch->combined_count = hdev->num_tqps; + ch->combined_count = handle->kinfo.rss_size; } static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, @@ -2522,7 +2644,7 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, hdev->hw.mac.duplex = duplex; } -static int hclgevf_gro_en(struct hnae3_handle *handle, int enable) +static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); @@ -2558,6 +2680,16 @@ static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) return hdev->reset_count; } +static void hclgevf_get_link_mode(struct hnae3_handle *handle, + unsigned long *supported, + unsigned long *advertising) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + *supported = hdev->hw.mac.supported; + *advertising = hdev->hw.mac.advertising; +} + #define MAX_SEPARATE_NUM 4 #define SEPARATOR_VALUE 0xFFFFFFFF #define REG_NUM_PER_LINE 4 @@ -2640,7 +2772,6 @@ static const struct hnae3_ae_ops hclgevf_ops = { .get_vector = hclgevf_get_vector, .put_vector = hclgevf_put_vector, .reset_queue = hclgevf_reset_tqp, - .set_promisc_mode = hclgevf_set_promisc_mode, .get_mac_addr = hclgevf_get_mac_addr, .set_mac_addr = hclgevf_set_mac_addr, .add_uc_addr = hclgevf_add_uc_addr, @@ -2677,6 +2808,7 @@ static const struct hnae3_ae_ops hclgevf_ops = { .set_mtu = hclgevf_set_mtu, .get_global_queue_id = hclgevf_get_qid_global, .set_timer_task = hclgevf_set_timer_task, + .get_link_mode = hclgevf_get_link_mode, }; static struct hnae3_ae_algo ae_algovf = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index 787bc06944e5..c128863ee7d0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -145,6 +145,8 @@ struct hclgevf_mac { int link; u8 duplex; u32 speed; + u64 supported; + u64 advertising; }; struct hclgevf_hw { @@ -237,7 +239,8 @@ struct hclgevf_dev { u16 num_alloc_vport; /* num vports this driver supports */ u32 numa_node_mask; u16 rx_buf_len; - u16 num_desc; + u16 num_tx_desc; /* desc num of per tx queue */ + u16 num_rx_desc; /* desc num of per rx queue */ u8 hw_tc_map; u16 num_msi; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index 84653f58b2d1..7dc3c9f79169 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -197,6 +197,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) break; case HCLGE_MBX_LINK_STAT_CHANGE: case HCLGE_MBX_ASSERTING_RESET: + case HCLGE_MBX_LINK_STAT_MODE: /* set this mbx event as pending. This is required as we * might loose interrupt event when mbx task is busy * handling. This shall be cleared when mbx task just @@ -247,6 +248,7 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) u8 duplex; u32 speed; u32 tail; + u8 idx; /* we can safely clear it now as we are at start of the async message * processing @@ -270,12 +272,22 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) link_status = le16_to_cpu(msg_q[1]); memcpy(&speed, &msg_q[2], sizeof(speed)); duplex = (u8)le16_to_cpu(msg_q[4]); + hdev->hw.mac.media_type = (u8)le16_to_cpu(msg_q[5]); /* update upper layer with new link link status */ hclgevf_update_link_status(hdev, link_status); hclgevf_update_speed_duplex(hdev, speed, duplex); break; + case HCLGE_MBX_LINK_STAT_MODE: + idx = (u8)le16_to_cpu(msg_q[1]); + if (idx) + memcpy(&hdev->hw.mac.supported, &msg_q[2], + sizeof(unsigned long)); + else + memcpy(&hdev->hw.mac.advertising, &msg_q[2], + sizeof(unsigned long)); + break; case HCLGE_MBX_ASSERTING_RESET: /* PF has asserted reset hence VF should go in pending * state and poll for the hardware reset status till it diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c index 017e08452d8c..baf5cc251f32 100644 --- a/drivers/net/ethernet/hisilicon/hns_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns_mdio.c @@ -321,7 +321,7 @@ static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum) } hns_mdio_cmd_write(mdio_dev, is_c45, - MDIO_C45_WRITE_ADDR, phy_id, devad); + MDIO_C45_READ, phy_id, devad); } /* Step 5: waitting for MDIO_COMMAND_REG 's mdio_start==0,*/ diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c index 6b19607a4caa..3875f39f43bb 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c @@ -1008,3 +1008,16 @@ int hinic_hwdev_hw_ci_addr_set(struct hinic_hwdev *hwdev, struct hinic_sq *sq, &hw_ci, sizeof(hw_ci), NULL, NULL, HINIC_MGMT_MSG_SYNC); } + +/** + * hinic_hwdev_set_msix_state- set msix state + * @hwdev: the NIC HW device + * @msix_index: IRQ corresponding index number + * @flag: msix state + * + **/ +void hinic_hwdev_set_msix_state(struct hinic_hwdev *hwdev, u16 msix_index, + enum hinic_msix_state flag) +{ + hinic_set_msix_state(hwdev->hwif, msix_index, flag); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h index d1a7d2522d82..c9e621e19dd0 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h @@ -240,4 +240,7 @@ int hinic_hwdev_msix_set(struct hinic_hwdev *hwdev, u16 msix_index, int hinic_hwdev_hw_ci_addr_set(struct hinic_hwdev *hwdev, struct hinic_sq *sq, u8 pending_limit, u8 coalesc_timer); +void hinic_hwdev_set_msix_state(struct hinic_hwdev *hwdev, u16 msix_index, + enum hinic_msix_state flag); + #endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c index 823a17061a97..9b160f076904 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c @@ -168,6 +168,22 @@ void hinic_db_state_set(struct hinic_hwif *hwif, hinic_hwif_write_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR, attr4); } +void hinic_set_msix_state(struct hinic_hwif *hwif, u16 msix_idx, + enum hinic_msix_state flag) +{ + u32 offset = msix_idx * HINIC_PCI_MSIX_ENTRY_SIZE + + HINIC_PCI_MSIX_ENTRY_VECTOR_CTRL; + u32 mask_bits; + + mask_bits = readl(hwif->intr_regs_base + offset); + mask_bits &= ~HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT; + + if (flag) + mask_bits |= HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT; + + writel(mask_bits, hwif->intr_regs_base + offset); +} + /** * hwif_ready - test if the HW is ready for use * @hwif: the HW interface of a pci function device @@ -321,6 +337,13 @@ int hinic_init_hwif(struct hinic_hwif *hwif, struct pci_dev *pdev) return -ENOMEM; } + hwif->intr_regs_base = pci_ioremap_bar(pdev, HINIC_PCI_INTR_REGS_BAR); + if (!hwif->intr_regs_base) { + dev_err(&pdev->dev, "Failed to map configuration regs\n"); + err = -ENOMEM; + goto err_map_intr_bar; + } + err = hwif_ready(hwif); if (err) { dev_err(&pdev->dev, "HW interface is not ready\n"); @@ -337,7 +360,11 @@ int hinic_init_hwif(struct hinic_hwif *hwif, struct pci_dev *pdev) return 0; err_hwif_ready: + iounmap(hwif->intr_regs_base); + +err_map_intr_bar: iounmap(hwif->cfg_regs_bar); + return err; } @@ -347,5 +374,6 @@ err_hwif_ready: **/ void hinic_free_hwif(struct hinic_hwif *hwif) { + iounmap(hwif->intr_regs_base); iounmap(hwif->cfg_regs_bar); } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h index 5b4760c0e9f5..22ec7f73e0a6 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h @@ -152,6 +152,7 @@ #define HINIC_IS_PPF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_PPF) #define HINIC_PCI_CFG_REGS_BAR 0 +#define HINIC_PCI_INTR_REGS_BAR 2 #define HINIC_PCI_DB_BAR 4 #define HINIC_PCIE_ST_DISABLE 0 @@ -164,6 +165,10 @@ #define HINIC_EQ_MSIX_LLI_CREDIT_LIMIT_DEFAULT 0 /* Disabled */ #define HINIC_EQ_MSIX_RESEND_TIMER_DEFAULT 7 /* max */ +#define HINIC_PCI_MSIX_ENTRY_SIZE 16 +#define HINIC_PCI_MSIX_ENTRY_VECTOR_CTRL 12 +#define HINIC_PCI_MSIX_ENTRY_CTRL_MASKBIT 1 + enum hinic_pcie_nosnoop { HINIC_PCIE_SNOOP = 0, HINIC_PCIE_NO_SNOOP = 1, @@ -207,6 +212,11 @@ enum hinic_db_state { HINIC_DB_DISABLE = 1, }; +enum hinic_msix_state { + HINIC_MSIX_ENABLE, + HINIC_MSIX_DISABLE, +}; + struct hinic_func_attr { u16 func_idx; u8 pf_idx; @@ -226,6 +236,7 @@ struct hinic_func_attr { struct hinic_hwif { struct pci_dev *pdev; void __iomem *cfg_regs_bar; + void __iomem *intr_regs_base; struct hinic_func_attr attr; }; @@ -251,6 +262,9 @@ int hinic_msix_attr_get(struct hinic_hwif *hwif, u16 msix_index, u8 *lli_timer, u8 *lli_credit_limit, u8 *resend_timer); +void hinic_set_msix_state(struct hinic_hwif *hwif, u16 msix_idx, + enum hinic_msix_state flag); + int hinic_msix_attr_cnt_clear(struct hinic_hwif *hwif, u16 msix_index); void hinic_set_pf_action(struct hinic_hwif *hwif, enum hinic_pf_action action); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index da323b9e1f62..e64bc664f687 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -51,9 +51,10 @@ static unsigned int rx_weight = 64; module_param(rx_weight, uint, 0644); MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)"); -#define HINIC_DEV_ID_QUAD_PORT_25GE 0x1822 -#define HINIC_DEV_ID_DUAL_PORT_25GE 0x0200 -#define HINIC_DEV_ID_DUAL_PORT_100GE 0x0201 +#define HINIC_DEV_ID_QUAD_PORT_25GE 0x1822 +#define HINIC_DEV_ID_DUAL_PORT_100GE 0x0200 +#define HINIC_DEV_ID_DUAL_PORT_100GE_MEZZ 0x0205 +#define HINIC_DEV_ID_QUAD_PORT_25GE_MEZZ 0x0210 #define HINIC_WQ_NAME "hinic_dev" @@ -1113,8 +1114,9 @@ static void hinic_shutdown(struct pci_dev *pdev) static const struct pci_device_id hinic_pci_table[] = { { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_QUAD_PORT_25GE), 0}, - { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_25GE), 0}, { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_100GE), 0}, + { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_DUAL_PORT_100GE_MEZZ), 0}, + { PCI_VDEVICE(HUAWEI, HINIC_DEV_ID_QUAD_PORT_25GE_MEZZ), 0}, { 0, 0} }; MODULE_DEVICE_TABLE(pci, hinic_pci_table); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c index 0098b206e7e9..b6d218768ec1 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c @@ -381,6 +381,7 @@ static int rxq_recv(struct hinic_rxq *rxq, int budget) static int rx_poll(struct napi_struct *napi, int budget) { struct hinic_rxq *rxq = container_of(napi, struct hinic_rxq, napi); + struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); struct hinic_rq *rq = rxq->rq; int pkts; @@ -389,7 +390,10 @@ static int rx_poll(struct napi_struct *napi, int budget) return budget; napi_complete(napi); - enable_irq(rq->irq); + hinic_hwdev_set_msix_state(nic_dev->hwdev, + rq->msix_entry, + HINIC_MSIX_ENABLE); + return pkts; } @@ -414,7 +418,10 @@ static irqreturn_t rx_irq(int irq, void *data) struct hinic_dev *nic_dev; /* Disable the interrupt until napi will be completed */ - disable_irq_nosync(rq->irq); + nic_dev = netdev_priv(rxq->netdev); + hinic_hwdev_set_msix_state(nic_dev->hwdev, + rq->msix_entry, + HINIC_MSIX_DISABLE); nic_dev = netdev_priv(rxq->netdev); hinic_hwdev_msix_cnt_set(nic_dev->hwdev, rq->msix_entry); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c index 11e73e67358d..e17bf33eba0c 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c @@ -655,7 +655,9 @@ static int free_tx_poll(struct napi_struct *napi, int budget) if (pkts < budget) { napi_complete(napi); - enable_irq(sq->irq); + hinic_hwdev_set_msix_state(nic_dev->hwdev, + sq->msix_entry, + HINIC_MSIX_ENABLE); return pkts; } @@ -682,7 +684,9 @@ static irqreturn_t tx_irq(int irq, void *data) nic_dev = netdev_priv(txq->netdev); /* Disable the interrupt until napi will be completed */ - disable_irq_nosync(txq->sq->irq); + hinic_hwdev_set_msix_state(nic_dev->hwdev, + txq->sq->msix_entry, + HINIC_MSIX_DISABLE); hinic_hwdev_msix_cnt_set(nic_dev->hwdev, txq->sq->msix_entry); diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c index d719668a6684..92929750f832 100644 --- a/drivers/net/ethernet/i825xx/82596.c +++ b/drivers/net/ethernet/i825xx/82596.c @@ -1310,7 +1310,7 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id) dev->stats.tx_aborted_errors++; } - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); tx_cmd->cmd.command = 0; /* Mark free */ break; diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c index 2f7ae118217f..1274ad24d6af 100644 --- a/drivers/net/ethernet/i825xx/lib82596.c +++ b/drivers/net/ethernet/i825xx/lib82596.c @@ -1194,7 +1194,7 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id) dma_unmap_single(dev->dev.parent, tx_cmd->dma_addr, skb->len, DMA_TO_DEVICE); - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); tx_cmd->cmd.command = 0; /* Mark free */ break; diff --git a/drivers/net/ethernet/ibm/emac/Kconfig b/drivers/net/ethernet/ibm/emac/Kconfig index 90d49191beb3..eacf7e141fdc 100644 --- a/drivers/net/ethernet/ibm/emac/Kconfig +++ b/drivers/net/ethernet/ibm/emac/Kconfig @@ -28,18 +28,6 @@ config IBM_EMAC_RX_COPY_THRESHOLD depends on IBM_EMAC default "256" -config IBM_EMAC_RX_SKB_HEADROOM - int "Additional RX skb headroom (bytes)" - depends on IBM_EMAC - default "0" - help - Additional receive skb headroom. Note, that driver - will always reserve at least 2 bytes to make IP header - aligned, so usually there is no need to add any additional - headroom. - - If unsure, set to 0. - config IBM_EMAC_DEBUG bool "Debugging" depends on IBM_EMAC diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 209255495bc9..3c2a5759844a 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -1071,7 +1071,9 @@ static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu) /* Second pass, allocate new skbs */ for (i = 0; i < NUM_RX_BUFF; ++i) { - struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC); + struct sk_buff *skb; + + skb = netdev_alloc_skb_ip_align(dev->ndev, rx_skb_size); if (!skb) { ret = -ENOMEM; goto oom; @@ -1080,10 +1082,10 @@ static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu) BUG_ON(!dev->rx_skb[i]); dev_kfree_skb(dev->rx_skb[i]); - skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2); dev->rx_desc[i].data_ptr = - dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size, - DMA_FROM_DEVICE) + 2; + dma_map_single(&dev->ofdev->dev, skb->data - NET_IP_ALIGN, + rx_sync_size, DMA_FROM_DEVICE) + + NET_IP_ALIGN; dev->rx_skb[i] = skb; } skip: @@ -1174,20 +1176,18 @@ static void emac_clean_rx_ring(struct emac_instance *dev) } } -static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot, - gfp_t flags) +static int +__emac_prepare_rx_skb(struct sk_buff *skb, struct emac_instance *dev, int slot) { - struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags); if (unlikely(!skb)) return -ENOMEM; dev->rx_skb[slot] = skb; dev->rx_desc[slot].data_len = 0; - skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2); dev->rx_desc[slot].data_ptr = - dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size, - DMA_FROM_DEVICE) + 2; + dma_map_single(&dev->ofdev->dev, skb->data - NET_IP_ALIGN, + dev->rx_sync_size, DMA_FROM_DEVICE) + NET_IP_ALIGN; wmb(); dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY | (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0); @@ -1195,6 +1195,27 @@ static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot, return 0; } +static int +emac_alloc_rx_skb(struct emac_instance *dev, int slot) +{ + struct sk_buff *skb; + + skb = __netdev_alloc_skb_ip_align(dev->ndev, dev->rx_skb_size, + GFP_KERNEL); + + return __emac_prepare_rx_skb(skb, dev, slot); +} + +static int +emac_alloc_rx_skb_napi(struct emac_instance *dev, int slot) +{ + struct sk_buff *skb; + + skb = napi_alloc_skb(&dev->mal->napi, dev->rx_skb_size); + + return __emac_prepare_rx_skb(skb, dev, slot); +} + static void emac_print_link_status(struct emac_instance *dev) { if (netif_carrier_ok(dev->ndev)) @@ -1225,7 +1246,7 @@ static int emac_open(struct net_device *ndev) /* Allocate RX ring */ for (i = 0; i < NUM_RX_BUFF; ++i) - if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) { + if (emac_alloc_rx_skb(dev, i)) { printk(KERN_ERR "%s: failed to allocate RX ring\n", ndev->name); goto oom; @@ -1660,8 +1681,9 @@ static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot, DBG2(dev, "recycle %d %d" NL, slot, len); if (len) - dma_map_single(&dev->ofdev->dev, skb->data - 2, - EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE); + dma_map_single(&dev->ofdev->dev, skb->data - NET_IP_ALIGN, + SKB_DATA_ALIGN(len + NET_IP_ALIGN), + DMA_FROM_DEVICE); dev->rx_desc[slot].data_len = 0; wmb(); @@ -1713,7 +1735,7 @@ static inline int emac_rx_sg_append(struct emac_instance *dev, int slot) int len = dev->rx_desc[slot].data_len; int tot_len = dev->rx_sg_skb->len + len; - if (unlikely(tot_len + 2 > dev->rx_skb_size)) { + if (unlikely(tot_len + NET_IP_ALIGN > dev->rx_skb_size)) { ++dev->estats.rx_dropped_mtu; dev_kfree_skb(dev->rx_sg_skb); dev->rx_sg_skb = NULL; @@ -1769,16 +1791,18 @@ static int emac_poll_rx(void *param, int budget) } if (len && len < EMAC_RX_COPY_THRESH) { - struct sk_buff *copy_skb = - alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC); + struct sk_buff *copy_skb; + + copy_skb = napi_alloc_skb(&dev->mal->napi, len); if (unlikely(!copy_skb)) goto oom; - skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2); - memcpy(copy_skb->data - 2, skb->data - 2, len + 2); + memcpy(copy_skb->data - NET_IP_ALIGN, + skb->data - NET_IP_ALIGN, + len + NET_IP_ALIGN); emac_recycle_rx_skb(dev, slot, len); skb = copy_skb; - } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) + } else if (unlikely(emac_alloc_rx_skb_napi(dev, slot))) goto oom; skb_put(skb, len); @@ -1799,7 +1823,7 @@ static int emac_poll_rx(void *param, int budget) sg: if (ctrl & MAL_RX_CTRL_FIRST) { BUG_ON(dev->rx_sg_skb); - if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) { + if (unlikely(emac_alloc_rx_skb_napi(dev, slot))) { DBG(dev, "rx OOM %d" NL, slot); ++dev->estats.rx_dropped_oom; emac_recycle_rx_skb(dev, slot, 0); diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h index 84caa4a3fc52..187689cd8212 100644 --- a/drivers/net/ethernet/ibm/emac/core.h +++ b/drivers/net/ethernet/ibm/emac/core.h @@ -68,22 +68,18 @@ static inline int emac_rx_size(int mtu) return mal_rx_size(ETH_DATA_LEN + EMAC_MTU_OVERHEAD); } -#define EMAC_DMA_ALIGN(x) ALIGN((x), dma_get_cache_alignment()) - -#define EMAC_RX_SKB_HEADROOM \ - EMAC_DMA_ALIGN(CONFIG_IBM_EMAC_RX_SKB_HEADROOM) - /* Size of RX skb for the given MTU */ static inline int emac_rx_skb_size(int mtu) { int size = max(mtu + EMAC_MTU_OVERHEAD, emac_rx_size(mtu)); - return EMAC_DMA_ALIGN(size + 2) + EMAC_RX_SKB_HEADROOM; + + return SKB_DATA_ALIGN(size + NET_IP_ALIGN) + NET_SKB_PAD; } /* RX DMA sync size */ static inline int emac_rx_sync_size(int mtu) { - return EMAC_DMA_ALIGN(emac_rx_size(mtu) + 2); + return SKB_DATA_ALIGN(emac_rx_size(mtu) + NET_IP_ALIGN); } /* Driver statistcs is split into two parts to make it more cache friendly: diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c index 257bd59bc9c6..f86d55657959 100644 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c @@ -696,11 +696,16 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) ret_val = e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, &kum_reg_data); - if (ret_val) - return ret_val; - kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; - e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, - kum_reg_data); + if (!ret_val) { + kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_INBAND_PARAM, + kum_reg_data); + if (ret_val) + e_dbg("Error disabling far-end loopback\n"); + } else { + e_dbg("Error disabling far-end loopback\n"); + } ret_val = e1000e_get_auto_rd_done(hw); if (ret_val) @@ -754,11 +759,19 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) return ret_val; /* Disable IBIST slave mode (far-end loopback) */ - e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, - &kum_reg_data); - kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; - e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, - kum_reg_data); + ret_val = + e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + &kum_reg_data); + if (!ret_val) { + kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_INBAND_PARAM, + kum_reg_data); + if (ret_val) + e_dbg("Error disabling far-end loopback\n"); + } else { + e_dbg("Error disabling far-end loopback\n"); + } /* Set the transmit descriptor write-back policy */ reg_data = er32(TXDCTL(0)); diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 189f231075c2..7acc61e4f645 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -2106,7 +2106,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter) if (strlen(netdev->name) < (IFNAMSIZ - 5)) snprintf(adapter->rx_ring->name, sizeof(adapter->rx_ring->name) - 1, - "%s-rx-0", netdev->name); + "%.14s-rx-0", netdev->name); else memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); err = request_irq(adapter->msix_entries[vector].vector, @@ -2122,7 +2122,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter) if (strlen(netdev->name) < (IFNAMSIZ - 5)) snprintf(adapter->tx_ring->name, sizeof(adapter->tx_ring->name) - 1, - "%s-tx-0", netdev->name); + "%.14s-tx-0", netdev->name); else memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); err = request_irq(adapter->msix_entries[vector].vector, @@ -5309,8 +5309,13 @@ static void e1000_watchdog_task(struct work_struct *work) /* 8000ES2LAN requires a Rx packet buffer work-around * on link down event; reset the controller to flush * the Rx packet buffer. + * + * If the link is lost the controller stops DMA, but + * if there is queued Tx work it cannot be done. So + * reset the controller to flush the Tx packet buffers. */ - if (adapter->flags & FLAG_RX_NEEDS_RESTART) + if ((adapter->flags & FLAG_RX_NEEDS_RESTART) || + e1000_desc_unused(tx_ring) + 1 < tx_ring->count) adapter->flags |= FLAG_RESTART_NOW; else pm_schedule_suspend(netdev->dev.parent, @@ -5333,14 +5338,6 @@ link_up: adapter->gotc_old = adapter->stats.gotc; spin_unlock(&adapter->stats64_lock); - /* If the link is lost the controller stops DMA, but - * if there is queued Tx work it cannot be done. So - * reset the controller to flush the Tx packet buffers. - */ - if (!netif_carrier_ok(netdev) && - (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) - adapter->flags |= FLAG_RESTART_NOW; - /* If reset is necessary, do it outside of interrupt context. */ if (adapter->flags & FLAG_RESTART_NOW) { schedule_work(&adapter->reset_task); @@ -7351,6 +7348,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) e1000_print_device_info(adapter); + dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP); + if (pci_dev_run_wake(pdev)) pm_runtime_put_noidle(&pdev->dev); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 6fd15a734324..5a0419421511 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -1603,14 +1603,12 @@ static int fm10k_alloc_q_vector(struct fm10k_intfc *interface, { struct fm10k_q_vector *q_vector; struct fm10k_ring *ring; - int ring_count, size; + int ring_count; ring_count = txr_count + rxr_count; - size = sizeof(struct fm10k_q_vector) + - (sizeof(struct fm10k_ring) * ring_count); /* allocate q_vector and rings */ - q_vector = kzalloc(size, GFP_KERNEL); + q_vector = kzalloc(struct_size(q_vector, ring, ring_count), GFP_KERNEL); if (!q_vector) return -ENOMEM; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index 8f0a99b6a537..cb4d02629b86 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -1148,7 +1148,7 @@ static void fm10k_iov_update_stats_pf(struct fm10k_hw *hw, * @results: Pointer array to message, results[0] is pointer to message * @mbx: Pointer to mailbox information structure * - * This function is a default handler for MSI-X requests from the VF. The + * This function is a default handler for MSI-X requests from the VF. The * assumption is that in this case it is acceptable to just directly * hand off the message from the VF to the underlying shared code. **/ diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 8de9085bba9e..d684998ba2b0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -34,6 +34,7 @@ #include #include #include +#include #include "i40e_type.h" #include "i40e_prototype.h" #include "i40e_client.h" @@ -523,6 +524,8 @@ struct i40e_pf { #define I40E_FLAG_FD_SB_INACTIVE BIT(22) #define I40E_FLAG_FD_SB_TO_CLOUD_FILTER BIT(23) #define I40E_FLAG_DISABLE_FW_LLDP BIT(24) +#define I40E_FLAG_RS_FEC BIT(25) +#define I40E_FLAG_BASE_R_FEC BIT(26) struct i40e_client_instance *cinst; bool stat_offsets_loaded; @@ -787,11 +790,6 @@ struct i40e_vsi { /* VSI specific handlers */ irqreturn_t (*irq_handler)(int irq, void *data); - - /* AF_XDP zero-copy */ - struct xdp_umem **xsk_umems; - u16 num_xsk_umems_used; - u16 num_xsk_umems; } ____cacheline_internodealigned_in_smp; struct i40e_netdev_priv { @@ -1091,6 +1089,8 @@ i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf); i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf); void i40e_print_link_message(struct i40e_vsi *vsi, bool isup); +void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags); + static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi) { return !!vsi->xdp_prog; @@ -1104,10 +1104,10 @@ static inline struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring) if (ring_is_xdp(ring)) qid -= ring->vsi->alloc_queue_pairs; - if (!ring->vsi->xsk_umems || !ring->vsi->xsk_umems[qid] || !xdp_on) + if (!xdp_on) return NULL; - return ring->vsi->xsk_umems[qid]; + return xdp_get_umem_from_qid(ring->vsi->netdev, qid); } int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch); diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index a20d1cf058ad..c67d485d6f99 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -1642,30 +1642,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp, count = buf_tmp - i40e_dbg_netdev_ops_buf + 1; } - if (strncmp(i40e_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) { - cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid); - if (cnt != 1) { - dev_info(&pf->pdev->dev, "tx_timeout \n"); - goto netdev_ops_write_done; - } - vsi = i40e_dbg_find_vsi(pf, vsi_seid); - if (!vsi) { - dev_info(&pf->pdev->dev, - "tx_timeout: VSI %d not found\n", vsi_seid); - } else if (!vsi->netdev) { - dev_info(&pf->pdev->dev, "tx_timeout: no netdev for VSI %d\n", - vsi_seid); - } else if (test_bit(__I40E_VSI_DOWN, vsi->state)) { - dev_info(&pf->pdev->dev, "tx_timeout: VSI %d not UP\n", - vsi_seid); - } else if (rtnl_trylock()) { - vsi->netdev->netdev_ops->ndo_tx_timeout(vsi->netdev); - rtnl_unlock(); - dev_info(&pf->pdev->dev, "tx_timeout called\n"); - } else { - dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n"); - } - } else if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) { + if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) { int mtu; cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i", @@ -1733,7 +1710,6 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp, dev_info(&pf->pdev->dev, "unknown command '%s'\n", i40e_dbg_netdev_ops_buf); dev_info(&pf->pdev->dev, "available commands\n"); - dev_info(&pf->pdev->dev, " tx_timeout \n"); dev_info(&pf->pdev->dev, " change_mtu \n"); dev_info(&pf->pdev->dev, " set_rx_mode \n"); dev_info(&pf->pdev->dev, " napi \n"); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index a6bc7847346b..4c885801fa26 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -438,6 +438,8 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = { I40E_PRIV_FLAG("disable-source-pruning", I40E_FLAG_SOURCE_PRUNING_DISABLED, 0), I40E_PRIV_FLAG("disable-fw-lldp", I40E_FLAG_DISABLE_FW_LLDP, 0), + I40E_PRIV_FLAG("rs-fec", I40E_FLAG_RS_FEC, 0), + I40E_PRIV_FLAG("base-r-fec", I40E_FLAG_BASE_R_FEC, 0), }; #define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags) @@ -606,6 +608,24 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseCR_Full); } + if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_KR || + phy_types & I40E_CAP_PHY_TYPE_25GBASE_CR || + phy_types & I40E_CAP_PHY_TYPE_25GBASE_SR || + phy_types & I40E_CAP_PHY_TYPE_25GBASE_LR || + phy_types & I40E_CAP_PHY_TYPE_25GBASE_AOC || + phy_types & I40E_CAP_PHY_TYPE_25GBASE_ACC) { + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB) { + ethtool_link_ksettings_add_link_mode(ks, advertising, + FEC_NONE); + ethtool_link_ksettings_add_link_mode(ks, advertising, + FEC_RS); + ethtool_link_ksettings_add_link_mode(ks, advertising, + FEC_BASER); + } + } /* need to add new 10G PHY types */ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 || phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU) { @@ -721,6 +741,13 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, 25000baseSR_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseSR_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); + ethtool_link_ksettings_add_link_mode(ks, advertising, + FEC_BASER); ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseSR_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, @@ -825,6 +852,9 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, 40000baseKR4_Full); ethtool_link_ksettings_add_link_mode(ks, supported, 25000baseKR_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); ethtool_link_ksettings_add_link_mode(ks, supported, 20000baseKR2_Full); ethtool_link_ksettings_add_link_mode(ks, supported, @@ -838,6 +868,10 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, 40000baseKR4_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseKR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); + ethtool_link_ksettings_add_link_mode(ks, advertising, + FEC_BASER); ethtool_link_ksettings_add_link_mode(ks, advertising, 20000baseKR2_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, @@ -855,6 +889,13 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, 25000baseCR_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseCR_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); + ethtool_link_ksettings_add_link_mode(ks, advertising, + FEC_BASER); break; case I40E_PHY_TYPE_25GBASE_AOC: case I40E_PHY_TYPE_25GBASE_ACC: @@ -862,9 +903,15 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); ethtool_link_ksettings_add_link_mode(ks, supported, 25000baseCR_Full); - ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseCR_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); + ethtool_link_ksettings_add_link_mode(ks, advertising, + FEC_BASER); ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseCR_Full); ethtool_link_ksettings_add_link_mode(ks, advertising, @@ -1261,6 +1308,154 @@ done: return err; } +static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_aq_get_phy_abilities_resp abilities; + struct i40e_pf *pf = np->vsi->back; + struct i40e_hw *hw = &pf->hw; + i40e_status status = 0; + u32 flags = 0; + int err = 0; + + flags = READ_ONCE(pf->flags); + i40e_set_fec_in_flags(fec_cfg, &flags); + + /* Get the current phy config */ + memset(&abilities, 0, sizeof(abilities)); + status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, + NULL); + if (status) { + err = -EAGAIN; + goto done; + } + + if (abilities.fec_cfg_curr_mod_ext_info != fec_cfg) { + struct i40e_aq_set_phy_config config; + + memset(&config, 0, sizeof(config)); + config.phy_type = abilities.phy_type; + config.abilities = abilities.abilities; + config.phy_type_ext = abilities.phy_type_ext; + config.link_speed = abilities.link_speed; + config.eee_capability = abilities.eee_capability; + config.eeer = abilities.eeer_val; + config.low_power_ctrl = abilities.d3_lpan; + config.fec_config = fec_cfg & I40E_AQ_PHY_FEC_CONFIG_MASK; + status = i40e_aq_set_phy_config(hw, &config, NULL); + if (status) { + netdev_info(netdev, + "Set phy config failed, err %s aq_err %s\n", + i40e_stat_str(hw, status), + i40e_aq_str(hw, hw->aq.asq_last_status)); + err = -EAGAIN; + goto done; + } + pf->flags = flags; + status = i40e_update_link_info(hw); + if (status) + /* debug level message only due to relation to the link + * itself rather than to the FEC settings + * (e.g. no physical connection etc.) + */ + netdev_dbg(netdev, + "Updating link info failed with err %s aq_err %s\n", + i40e_stat_str(hw, status), + i40e_aq_str(hw, hw->aq.asq_last_status)); + } + +done: + return err; +} + +static int i40e_get_fec_param(struct net_device *netdev, + struct ethtool_fecparam *fecparam) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_aq_get_phy_abilities_resp abilities; + struct i40e_pf *pf = np->vsi->back; + struct i40e_hw *hw = &pf->hw; + i40e_status status = 0; + int err = 0; + + /* Get the current phy config */ + memset(&abilities, 0, sizeof(abilities)); + status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, + NULL); + if (status) { + err = -EAGAIN; + goto done; + } + + fecparam->fec = 0; + if (abilities.fec_cfg_curr_mod_ext_info & I40E_AQ_SET_FEC_AUTO) + fecparam->fec |= ETHTOOL_FEC_AUTO; + if ((abilities.fec_cfg_curr_mod_ext_info & + I40E_AQ_SET_FEC_REQUEST_RS) || + (abilities.fec_cfg_curr_mod_ext_info & + I40E_AQ_SET_FEC_ABILITY_RS)) + fecparam->fec |= ETHTOOL_FEC_RS; + if ((abilities.fec_cfg_curr_mod_ext_info & + I40E_AQ_SET_FEC_REQUEST_KR) || + (abilities.fec_cfg_curr_mod_ext_info & I40E_AQ_SET_FEC_ABILITY_KR)) + fecparam->fec |= ETHTOOL_FEC_BASER; + if (abilities.fec_cfg_curr_mod_ext_info == 0) + fecparam->fec |= ETHTOOL_FEC_OFF; + + if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA) + fecparam->active_fec = ETHTOOL_FEC_BASER; + else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA) + fecparam->active_fec = ETHTOOL_FEC_RS; + else + fecparam->active_fec = ETHTOOL_FEC_OFF; +done: + return err; +} + +static int i40e_set_fec_param(struct net_device *netdev, + struct ethtool_fecparam *fecparam) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_hw *hw = &pf->hw; + u8 fec_cfg = 0; + int err = 0; + + if (hw->device_id != I40E_DEV_ID_25G_SFP28 && + hw->device_id != I40E_DEV_ID_25G_B) { + err = -EPERM; + goto done; + } + + switch (fecparam->fec) { + case ETHTOOL_FEC_AUTO: + fec_cfg = I40E_AQ_SET_FEC_AUTO; + break; + case ETHTOOL_FEC_RS: + fec_cfg = (I40E_AQ_SET_FEC_REQUEST_RS | + I40E_AQ_SET_FEC_ABILITY_RS); + break; + case ETHTOOL_FEC_BASER: + fec_cfg = (I40E_AQ_SET_FEC_REQUEST_KR | + I40E_AQ_SET_FEC_ABILITY_KR); + break; + case ETHTOOL_FEC_OFF: + case ETHTOOL_FEC_NONE: + fec_cfg = 0; + break; + default: + dev_warn(&pf->pdev->dev, "Unsupported FEC mode: %d", + fecparam->fec); + err = -EINVAL; + goto done; + } + + err = i40e_set_fec_cfg(netdev, fec_cfg); + +done: + return err; +} + static int i40e_nway_reset(struct net_device *netdev) { /* restart autonegotiation */ @@ -1376,7 +1571,7 @@ static int i40e_set_pauseparam(struct net_device *netdev, else if (!pause->rx_pause && !pause->tx_pause) hw->fc.requested_mode = I40E_FC_NONE; else - return -EINVAL; + return -EINVAL; /* Tell the OS link is going down, the link will go back up when fw * says it is ready asynchronously @@ -2175,7 +2370,7 @@ static int i40e_get_ts_info(struct net_device *dev, return 0; } -static int i40e_link_test(struct net_device *netdev, u64 *data) +static u64 i40e_link_test(struct net_device *netdev, u64 *data) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; @@ -2198,7 +2393,7 @@ static int i40e_link_test(struct net_device *netdev, u64 *data) return *data; } -static int i40e_reg_test(struct net_device *netdev, u64 *data) +static u64 i40e_reg_test(struct net_device *netdev, u64 *data) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; @@ -2209,7 +2404,7 @@ static int i40e_reg_test(struct net_device *netdev, u64 *data) return *data; } -static int i40e_eeprom_test(struct net_device *netdev, u64 *data) +static u64 i40e_eeprom_test(struct net_device *netdev, u64 *data) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; @@ -2223,7 +2418,7 @@ static int i40e_eeprom_test(struct net_device *netdev, u64 *data) return *data; } -static int i40e_intr_test(struct net_device *netdev, u64 *data) +static u64 i40e_intr_test(struct net_device *netdev, u64 *data) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; @@ -2440,10 +2635,10 @@ static int i40e_set_phys_id(struct net_device *netdev, default: break; } - if (ret) - return -ENOENT; - else - return 0; + if (ret) + return -ENOENT; + else + return 0; } /* NOTE: i40e hardware uses a conversion factor of 2 for Interrupt @@ -4676,6 +4871,15 @@ flags_complete: } } + if (((changed_flags & I40E_FLAG_RS_FEC) || + (changed_flags & I40E_FLAG_BASE_R_FEC)) && + pf->hw.device_id != I40E_DEV_ID_25G_SFP28 && + pf->hw.device_id != I40E_DEV_ID_25G_B) { + dev_warn(&pf->pdev->dev, + "Device does not support changing FEC configuration\n"); + return -EOPNOTSUPP; + } + /* Now that we've checked to ensure that the new flags are valid, load * them into place. Since we only modify flags either (a) during * initialization or (b) while holding the RTNL lock, we don't need @@ -4714,6 +4918,24 @@ flags_complete: } } + if ((changed_flags & I40E_FLAG_RS_FEC) || + (changed_flags & I40E_FLAG_BASE_R_FEC)) { + u8 fec_cfg = 0; + + if (pf->flags & I40E_FLAG_RS_FEC && + pf->flags & I40E_FLAG_BASE_R_FEC) { + fec_cfg = I40E_AQ_SET_FEC_AUTO; + } else if (pf->flags & I40E_FLAG_RS_FEC) { + fec_cfg = (I40E_AQ_SET_FEC_REQUEST_RS | + I40E_AQ_SET_FEC_ABILITY_RS); + } else if (pf->flags & I40E_FLAG_BASE_R_FEC) { + fec_cfg = (I40E_AQ_SET_FEC_REQUEST_KR | + I40E_AQ_SET_FEC_ABILITY_KR); + } + if (i40e_set_fec_cfg(dev, fec_cfg)) + dev_warn(&pf->pdev->dev, "Cannot change FEC config\n"); + } + if ((changed_flags & pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) && (pf->flags & I40E_FLAG_MFP_ENABLED)) @@ -4948,6 +5170,8 @@ static const struct ethtool_ops i40e_ethtool_ops = { .set_per_queue_coalesce = i40e_set_per_queue_coalesce, .get_link_ksettings = i40e_get_link_ksettings, .set_link_ksettings = i40e_set_link_ksettings, + .get_fecparam = i40e_get_fec_param, + .set_fecparam = i40e_set_fec_param, }; void i40e_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index f52e2c46e6a7..da62218eb70a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -26,8 +26,8 @@ static const char i40e_driver_string[] = #define DRV_KERN "-k" #define DRV_VERSION_MAJOR 2 -#define DRV_VERSION_MINOR 7 -#define DRV_VERSION_BUILD 6 +#define DRV_VERSION_MINOR 8 +#define DRV_VERSION_BUILD 10 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -3289,8 +3289,11 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) : !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); if (!ok) { + /* Log this in case the user has forgotten to give the kernel + * any buffers, even later in the application. + */ dev_info(&vsi->back->pdev->dev, - "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n", + "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n", ring->xsk_umem ? "UMEM enabled " : "", ring->queue_index, pf_q); } @@ -3610,7 +3613,7 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); - wr32(hw, I40E_QINT_TQCTL(nextqp), val); + wr32(hw, I40E_QINT_TQCTL(nextqp), val); } val = I40E_QINT_TQCTL_CAUSE_ENA_MASK | @@ -6725,8 +6728,13 @@ void i40e_down(struct i40e_vsi *vsi) for (i = 0; i < vsi->num_queue_pairs; i++) { i40e_clean_tx_ring(vsi->tx_rings[i]); - if (i40e_enabled_xdp_vsi(vsi)) + if (i40e_enabled_xdp_vsi(vsi)) { + /* Make sure that in-progress ndo_xdp_xmit + * calls are completed. + */ + synchronize_rcu(); i40e_clean_tx_ring(vsi->xdp_rings[i]); + } i40e_clean_rx_ring(vsi->rx_rings[i]); } @@ -7169,11 +7177,13 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, struct tc_cls_flower_offload *f, struct i40e_cloud_filter *filter) { + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f); + struct flow_dissector *dissector = rule->match.dissector; u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0; struct i40e_pf *pf = vsi->back; u8 field_flags = 0; - if (f->dissector->used_keys & + if (dissector->used_keys & ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | BIT(FLOW_DISSECTOR_KEY_BASIC) | BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | @@ -7183,143 +7193,109 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, BIT(FLOW_DISSECTOR_KEY_PORTS) | BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) { dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n", - f->dissector->used_keys); + dissector->used_keys); return -EOPNOTSUPP; } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { - struct flow_dissector_key_keyid *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_KEYID, - f->key); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { + struct flow_match_enc_keyid match; - struct flow_dissector_key_keyid *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_KEYID, - f->mask); - - if (mask->keyid != 0) + flow_rule_match_enc_keyid(rule, &match); + if (match.mask->keyid != 0) field_flags |= I40E_CLOUD_FIELD_TEN_ID; - filter->tenant_id = be32_to_cpu(key->keyid); + filter->tenant_id = be32_to_cpu(match.key->keyid); } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { - struct flow_dissector_key_basic *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, - f->key); - - struct flow_dissector_key_basic *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, - f->mask); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; - n_proto_key = ntohs(key->n_proto); - n_proto_mask = ntohs(mask->n_proto); + flow_rule_match_basic(rule, &match); + n_proto_key = ntohs(match.key->n_proto); + n_proto_mask = ntohs(match.mask->n_proto); if (n_proto_key == ETH_P_ALL) { n_proto_key = 0; n_proto_mask = 0; } filter->n_proto = n_proto_key & n_proto_mask; - filter->ip_proto = key->ip_proto; + filter->ip_proto = match.key->ip_proto; } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { - struct flow_dissector_key_eth_addrs *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ETH_ADDRS, - f->key); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; - struct flow_dissector_key_eth_addrs *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ETH_ADDRS, - f->mask); + flow_rule_match_eth_addrs(rule, &match); /* use is_broadcast and is_zero to check for all 0xf or 0 */ - if (!is_zero_ether_addr(mask->dst)) { - if (is_broadcast_ether_addr(mask->dst)) { + if (!is_zero_ether_addr(match.mask->dst)) { + if (is_broadcast_ether_addr(match.mask->dst)) { field_flags |= I40E_CLOUD_FIELD_OMAC; } else { dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n", - mask->dst); + match.mask->dst); return I40E_ERR_CONFIG; } } - if (!is_zero_ether_addr(mask->src)) { - if (is_broadcast_ether_addr(mask->src)) { + if (!is_zero_ether_addr(match.mask->src)) { + if (is_broadcast_ether_addr(match.mask->src)) { field_flags |= I40E_CLOUD_FIELD_IMAC; } else { dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n", - mask->src); + match.mask->src); return I40E_ERR_CONFIG; } } - ether_addr_copy(filter->dst_mac, key->dst); - ether_addr_copy(filter->src_mac, key->src); + ether_addr_copy(filter->dst_mac, match.key->dst); + ether_addr_copy(filter->src_mac, match.key->src); } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) { - struct flow_dissector_key_vlan *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_VLAN, - f->key); - struct flow_dissector_key_vlan *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_VLAN, - f->mask); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; - if (mask->vlan_id) { - if (mask->vlan_id == VLAN_VID_MASK) { + flow_rule_match_vlan(rule, &match); + if (match.mask->vlan_id) { + if (match.mask->vlan_id == VLAN_VID_MASK) { field_flags |= I40E_CLOUD_FIELD_IVLAN; } else { dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n", - mask->vlan_id); + match.mask->vlan_id); return I40E_ERR_CONFIG; } } - filter->vlan_id = cpu_to_be16(key->vlan_id); + filter->vlan_id = cpu_to_be16(match.key->vlan_id); } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { - struct flow_dissector_key_control *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_CONTROL, - f->key); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_match_control match; - addr_type = key->addr_type; + flow_rule_match_control(rule, &match); + addr_type = match.key->addr_type; } if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { - struct flow_dissector_key_ipv4_addrs *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS, - f->key); - struct flow_dissector_key_ipv4_addrs *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS, - f->mask); - - if (mask->dst) { - if (mask->dst == cpu_to_be32(0xffffffff)) { + struct flow_match_ipv4_addrs match; + + flow_rule_match_ipv4_addrs(rule, &match); + if (match.mask->dst) { + if (match.mask->dst == cpu_to_be32(0xffffffff)) { field_flags |= I40E_CLOUD_FIELD_IIP; } else { dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n", - &mask->dst); + &match.mask->dst); return I40E_ERR_CONFIG; } } - if (mask->src) { - if (mask->src == cpu_to_be32(0xffffffff)) { + if (match.mask->src) { + if (match.mask->src == cpu_to_be32(0xffffffff)) { field_flags |= I40E_CLOUD_FIELD_IIP; } else { dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n", - &mask->src); + &match.mask->src); return I40E_ERR_CONFIG; } } @@ -7328,70 +7304,60 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n"); return I40E_ERR_CONFIG; } - filter->dst_ipv4 = key->dst; - filter->src_ipv4 = key->src; + filter->dst_ipv4 = match.key->dst; + filter->src_ipv4 = match.key->src; } if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { - struct flow_dissector_key_ipv6_addrs *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV6_ADDRS, - f->key); - struct flow_dissector_key_ipv6_addrs *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV6_ADDRS, - f->mask); + struct flow_match_ipv6_addrs match; + + flow_rule_match_ipv6_addrs(rule, &match); /* src and dest IPV6 address should not be LOOPBACK * (0:0:0:0:0:0:0:1), which can be represented as ::1 */ - if (ipv6_addr_loopback(&key->dst) || - ipv6_addr_loopback(&key->src)) { + if (ipv6_addr_loopback(&match.key->dst) || + ipv6_addr_loopback(&match.key->src)) { dev_err(&pf->pdev->dev, "Bad ipv6, addr is LOOPBACK\n"); return I40E_ERR_CONFIG; } - if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src)) + if (!ipv6_addr_any(&match.mask->dst) || + !ipv6_addr_any(&match.mask->src)) field_flags |= I40E_CLOUD_FIELD_IIP; - memcpy(&filter->src_ipv6, &key->src.s6_addr32, + memcpy(&filter->src_ipv6, &match.key->src.s6_addr32, sizeof(filter->src_ipv6)); - memcpy(&filter->dst_ipv6, &key->dst.s6_addr32, + memcpy(&filter->dst_ipv6, &match.key->dst.s6_addr32, sizeof(filter->dst_ipv6)); } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) { - struct flow_dissector_key_ports *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_PORTS, - f->key); - struct flow_dissector_key_ports *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_PORTS, - f->mask); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports match; - if (mask->src) { - if (mask->src == cpu_to_be16(0xffff)) { + flow_rule_match_ports(rule, &match); + if (match.mask->src) { + if (match.mask->src == cpu_to_be16(0xffff)) { field_flags |= I40E_CLOUD_FIELD_IIP; } else { dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n", - be16_to_cpu(mask->src)); + be16_to_cpu(match.mask->src)); return I40E_ERR_CONFIG; } } - if (mask->dst) { - if (mask->dst == cpu_to_be16(0xffff)) { + if (match.mask->dst) { + if (match.mask->dst == cpu_to_be16(0xffff)) { field_flags |= I40E_CLOUD_FIELD_IIP; } else { dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n", - be16_to_cpu(mask->dst)); + be16_to_cpu(match.mask->dst)); return I40E_ERR_CONFIG; } } - filter->dst_port = key->dst; - filter->src_port = key->src; + filter->dst_port = match.key->dst; + filter->src_port = match.key->src; switch (filter->ip_proto) { case IPPROTO_TCP: @@ -8131,8 +8097,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, i40e_service_event_schedule(pf); } else { i40e_pf_unquiesce_all_vsi(pf); - set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); - set_bit(__I40E_CLIENT_L2_CHANGE, pf->state); + set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); + set_bit(__I40E_CLIENT_L2_CHANGE, pf->state); } exit: @@ -11042,6 +11008,7 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) if (!(pf->flags & I40E_FLAG_RSS_ENABLED)) return 0; + queue_count = min_t(int, queue_count, num_online_cpus()); new_rss_size = min_t(int, queue_count, pf->rss_size_max); if (queue_count != vsi->num_queue_pairs) { @@ -11644,7 +11611,8 @@ static int i40e_get_phys_port_id(struct net_device *netdev, static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, - u16 flags) + u16 flags, + struct netlink_ext_ack *extack) { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_pf *pf = np->vsi->back; @@ -11895,6 +11863,14 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, if (old_prog) bpf_prog_put(old_prog); + /* Kick start the NAPI context if there is an AF_XDP socket open + * on that queue id. This so that receiving will start. + */ + if (need_reset && prog) + for (i = 0; i < vsi->num_queue_pairs; i++) + if (vsi->xdp_rings[i]->xsk_umem) + (void)i40e_xsk_async_xmit(vsi->netdev, i); + return 0; } @@ -11955,8 +11931,13 @@ static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair) static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair) { i40e_clean_tx_ring(vsi->tx_rings[queue_pair]); - if (i40e_enabled_xdp_vsi(vsi)) + if (i40e_enabled_xdp_vsi(vsi)) { + /* Make sure that in-progress ndo_xdp_xmit calls are + * completed. + */ + synchronize_rcu(); i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]); + } i40e_clean_rx_ring(vsi->rx_rings[queue_pair]); } @@ -12168,9 +12149,6 @@ static int i40e_xdp(struct net_device *dev, case XDP_QUERY_PROG: xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0; return 0; - case XDP_QUERY_XSK_UMEM: - return i40e_xsk_umem_query(vsi, &xdp->xsk.umem, - xdp->xsk.queue_id); case XDP_SETUP_XSK_UMEM: return i40e_xsk_umem_setup(vsi, xdp->xsk.umem, xdp->xsk.queue_id); @@ -13858,6 +13836,29 @@ static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf) i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr); } +/** + * i40e_set_fec_in_flags - helper function for setting FEC options in flags + * @fec_cfg: FEC option to set in flags + * @flags: ptr to flags in which we set FEC option + **/ +void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags) +{ + if (fec_cfg & I40E_AQ_SET_FEC_AUTO) + *flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC; + if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) || + (fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) { + *flags |= I40E_FLAG_RS_FEC; + *flags &= ~I40E_FLAG_BASE_R_FEC; + } + if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) || + (fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) { + *flags |= I40E_FLAG_BASE_R_FEC; + *flags &= ~I40E_FLAG_RS_FEC; + } + if (fec_cfg == 0) + *flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC); +} + /** * i40e_probe - Device initialization routine * @pdev: PCI device information struct @@ -14349,6 +14350,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); pf->hw.phy.link_info.requested_speeds = abilities.link_speed; + /* set the FEC config due to the board capabilities */ + i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags); + /* get the supported phy types from the fw */ err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL); if (err) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index a7e14e98889f..6c97667d20ef 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -3709,6 +3709,7 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, struct i40e_netdev_priv *np = netdev_priv(dev); unsigned int queue_index = smp_processor_id(); struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; struct i40e_ring *xdp_ring; int drops = 0; int i; @@ -3716,7 +3717,8 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, if (test_bit(__I40E_VSI_DOWN, vsi->state)) return -ENETDOWN; - if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs) + if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs || + test_bit(__I40E_CONFIG_BUSY, pf->state)) return -ENXIO; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 2ac23ebfbf31..831d52bc3c9a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2069,6 +2069,11 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) goto error_param; } + if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) { + aq_ret = I40E_ERR_PARAM; + goto error_param; + } + for (i = 0; i < qci->num_queue_pairs; i++) { qpi = &qci->qpair[i]; @@ -3384,8 +3389,8 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) dev_info(&pf->pdev->dev, "VF %d: Invalid input/s, can't apply cloud filter\n", vf->vf_id); - aq_ret = I40E_ERR_PARAM; - goto err; + aq_ret = I40E_ERR_PARAM; + goto err; } cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL); @@ -3656,7 +3661,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, int ret; pf->vf_aq_requests++; - if (local_vf_id >= pf->num_alloc_vfs) + if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs) return -EINVAL; vf = &(pf->vf[local_vf_id]); diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 870cf654e436..b5c182e688e3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -9,69 +9,6 @@ #include "i40e_txrx_common.h" #include "i40e_xsk.h" -/** - * i40e_alloc_xsk_umems - Allocate an array to store per ring UMEMs - * @vsi: Current VSI - * - * Returns 0 on success, <0 on failure - **/ -static int i40e_alloc_xsk_umems(struct i40e_vsi *vsi) -{ - if (vsi->xsk_umems) - return 0; - - vsi->num_xsk_umems_used = 0; - vsi->num_xsk_umems = vsi->alloc_queue_pairs; - vsi->xsk_umems = kcalloc(vsi->num_xsk_umems, sizeof(*vsi->xsk_umems), - GFP_KERNEL); - if (!vsi->xsk_umems) { - vsi->num_xsk_umems = 0; - return -ENOMEM; - } - - return 0; -} - -/** - * i40e_add_xsk_umem - Store a UMEM for a certain ring/qid - * @vsi: Current VSI - * @umem: UMEM to store - * @qid: Ring/qid to associate with the UMEM - * - * Returns 0 on success, <0 on failure - **/ -static int i40e_add_xsk_umem(struct i40e_vsi *vsi, struct xdp_umem *umem, - u16 qid) -{ - int err; - - err = i40e_alloc_xsk_umems(vsi); - if (err) - return err; - - vsi->xsk_umems[qid] = umem; - vsi->num_xsk_umems_used++; - - return 0; -} - -/** - * i40e_remove_xsk_umem - Remove a UMEM for a certain ring/qid - * @vsi: Current VSI - * @qid: Ring/qid associated with the UMEM - **/ -static void i40e_remove_xsk_umem(struct i40e_vsi *vsi, u16 qid) -{ - vsi->xsk_umems[qid] = NULL; - vsi->num_xsk_umems_used--; - - if (vsi->num_xsk_umems == 0) { - kfree(vsi->xsk_umems); - vsi->xsk_umems = NULL; - vsi->num_xsk_umems = 0; - } -} - /** * i40e_xsk_umem_dma_map - DMA maps all UMEM memory for the netdev * @vsi: Current VSI @@ -140,6 +77,7 @@ static void i40e_xsk_umem_dma_unmap(struct i40e_vsi *vsi, struct xdp_umem *umem) static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem, u16 qid) { + struct net_device *netdev = vsi->netdev; struct xdp_umem_fq_reuse *reuseq; bool if_running; int err; @@ -150,12 +88,9 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem, if (qid >= vsi->num_queue_pairs) return -EINVAL; - if (vsi->xsk_umems) { - if (qid >= vsi->num_xsk_umems) - return -EINVAL; - if (vsi->xsk_umems[qid]) - return -EBUSY; - } + if (qid >= netdev->real_num_rx_queues || + qid >= netdev->real_num_tx_queues) + return -EINVAL; reuseq = xsk_reuseq_prepare(vsi->rx_rings[0]->count); if (!reuseq) @@ -173,16 +108,15 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem, err = i40e_queue_pair_disable(vsi, qid); if (err) return err; - } - err = i40e_add_xsk_umem(vsi, umem, qid); - if (err) - return err; - - if (if_running) { err = i40e_queue_pair_enable(vsi, qid); if (err) return err; + + /* Kick start the NAPI context so that receiving will start */ + err = i40e_xsk_async_xmit(vsi->netdev, qid); + if (err) + return err; } return 0; @@ -197,11 +131,13 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem, **/ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid) { + struct net_device *netdev = vsi->netdev; + struct xdp_umem *umem; bool if_running; int err; - if (!vsi->xsk_umems || qid >= vsi->num_xsk_umems || - !vsi->xsk_umems[qid]) + umem = xdp_get_umem_from_qid(netdev, qid); + if (!umem) return -EINVAL; if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); @@ -212,8 +148,7 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid) return err; } - i40e_xsk_umem_dma_unmap(vsi, vsi->xsk_umems[qid]); - i40e_remove_xsk_umem(vsi, qid); + i40e_xsk_umem_dma_unmap(vsi, umem); if (if_running) { err = i40e_queue_pair_enable(vsi, qid); @@ -224,36 +159,6 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid) return 0; } -/** - * i40e_xsk_umem_query - Queries a certain ring/qid for its UMEM - * @vsi: Current VSI - * @umem: UMEM associated to the ring, if any - * @qid: Rx ring to associate UMEM to - * - * This function will store, if any, the UMEM associated to certain ring. - * - * Returns 0 on success, <0 on failure - **/ -int i40e_xsk_umem_query(struct i40e_vsi *vsi, struct xdp_umem **umem, - u16 qid) -{ - if (vsi->type != I40E_VSI_MAIN) - return -EINVAL; - - if (qid >= vsi->num_queue_pairs) - return -EINVAL; - - if (vsi->xsk_umems) { - if (qid >= vsi->num_xsk_umems) - return -EINVAL; - *umem = vsi->xsk_umems[qid]; - return 0; - } - - *umem = NULL; - return 0; -} - /** * i40e_xsk_umem_setup - Enable/disassociate a UMEM to/from a ring/qid * @vsi: Current VSI @@ -945,13 +850,11 @@ void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring) **/ bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi) { + struct net_device *netdev = vsi->netdev; int i; - if (!vsi->xsk_umems) - return false; - for (i = 0; i < vsi->num_queue_pairs; i++) { - if (vsi->xsk_umems[i]) + if (xdp_get_umem_from_qid(netdev, i)) return true; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h index 9038c5d5cf08..8cc0a2e7d9a2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h @@ -10,8 +10,6 @@ struct zero_copy_allocator; int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair); int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair); -int i40e_xsk_umem_query(struct i40e_vsi *vsi, struct xdp_umem **umem, - u16 qid); int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem, u16 qid); void i40e_zca_free(struct zero_copy_allocator *alloc, unsigned long handle); diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 9f2b7b7adf6b..4569d69a2b55 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -2439,6 +2439,8 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, struct tc_cls_flower_offload *f, struct iavf_cloud_filter *filter) { + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f); + struct flow_dissector *dissector = rule->match.dissector; u16 n_proto_mask = 0; u16 n_proto_key = 0; u8 field_flags = 0; @@ -2447,7 +2449,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, int i = 0; struct virtchnl_filter *vf = &filter->f; - if (f->dissector->used_keys & + if (dissector->used_keys & ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | BIT(FLOW_DISSECTOR_KEY_BASIC) | BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | @@ -2457,32 +2459,24 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, BIT(FLOW_DISSECTOR_KEY_PORTS) | BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) { dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n", - f->dissector->used_keys); + dissector->used_keys); return -EOPNOTSUPP; } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { - struct flow_dissector_key_keyid *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_KEYID, - f->mask); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { + struct flow_match_enc_keyid match; - if (mask->keyid != 0) + flow_rule_match_enc_keyid(rule, &match); + if (match.mask->keyid != 0) field_flags |= IAVF_CLOUD_FIELD_TEN_ID; } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { - struct flow_dissector_key_basic *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, - f->key); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; - struct flow_dissector_key_basic *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, - f->mask); - n_proto_key = ntohs(key->n_proto); - n_proto_mask = ntohs(mask->n_proto); + flow_rule_match_basic(rule, &match); + n_proto_key = ntohs(match.key->n_proto); + n_proto_mask = ntohs(match.mask->n_proto); if (n_proto_key == ETH_P_ALL) { n_proto_key = 0; @@ -2496,122 +2490,103 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, vf->flow_type = VIRTCHNL_TCP_V6_FLOW; } - if (key->ip_proto != IPPROTO_TCP) { + if (match.key->ip_proto != IPPROTO_TCP) { dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n"); return -EINVAL; } } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { - struct flow_dissector_key_eth_addrs *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ETH_ADDRS, - f->key); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + + flow_rule_match_eth_addrs(rule, &match); - struct flow_dissector_key_eth_addrs *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ETH_ADDRS, - f->mask); /* use is_broadcast and is_zero to check for all 0xf or 0 */ - if (!is_zero_ether_addr(mask->dst)) { - if (is_broadcast_ether_addr(mask->dst)) { + if (!is_zero_ether_addr(match.mask->dst)) { + if (is_broadcast_ether_addr(match.mask->dst)) { field_flags |= IAVF_CLOUD_FIELD_OMAC; } else { dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n", - mask->dst); + match.mask->dst); return I40E_ERR_CONFIG; } } - if (!is_zero_ether_addr(mask->src)) { - if (is_broadcast_ether_addr(mask->src)) { + if (!is_zero_ether_addr(match.mask->src)) { + if (is_broadcast_ether_addr(match.mask->src)) { field_flags |= IAVF_CLOUD_FIELD_IMAC; } else { dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n", - mask->src); + match.mask->src); return I40E_ERR_CONFIG; } } - if (!is_zero_ether_addr(key->dst)) - if (is_valid_ether_addr(key->dst) || - is_multicast_ether_addr(key->dst)) { + if (!is_zero_ether_addr(match.key->dst)) + if (is_valid_ether_addr(match.key->dst) || + is_multicast_ether_addr(match.key->dst)) { /* set the mask if a valid dst_mac address */ for (i = 0; i < ETH_ALEN; i++) vf->mask.tcp_spec.dst_mac[i] |= 0xff; ether_addr_copy(vf->data.tcp_spec.dst_mac, - key->dst); + match.key->dst); } - if (!is_zero_ether_addr(key->src)) - if (is_valid_ether_addr(key->src) || - is_multicast_ether_addr(key->src)) { + if (!is_zero_ether_addr(match.key->src)) + if (is_valid_ether_addr(match.key->src) || + is_multicast_ether_addr(match.key->src)) { /* set the mask if a valid dst_mac address */ for (i = 0; i < ETH_ALEN; i++) vf->mask.tcp_spec.src_mac[i] |= 0xff; ether_addr_copy(vf->data.tcp_spec.src_mac, - key->src); + match.key->src); } } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) { - struct flow_dissector_key_vlan *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_VLAN, - f->key); - struct flow_dissector_key_vlan *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_VLAN, - f->mask); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; - if (mask->vlan_id) { - if (mask->vlan_id == VLAN_VID_MASK) { + flow_rule_match_vlan(rule, &match); + if (match.mask->vlan_id) { + if (match.mask->vlan_id == VLAN_VID_MASK) { field_flags |= IAVF_CLOUD_FIELD_IVLAN; } else { dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n", - mask->vlan_id); + match.mask->vlan_id); return I40E_ERR_CONFIG; } } vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff); - vf->data.tcp_spec.vlan_id = cpu_to_be16(key->vlan_id); + vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id); } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { - struct flow_dissector_key_control *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_CONTROL, - f->key); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_match_control match; - addr_type = key->addr_type; + flow_rule_match_control(rule, &match); + addr_type = match.key->addr_type; } if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { - struct flow_dissector_key_ipv4_addrs *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS, - f->key); - struct flow_dissector_key_ipv4_addrs *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS, - f->mask); - - if (mask->dst) { - if (mask->dst == cpu_to_be32(0xffffffff)) { + struct flow_match_ipv4_addrs match; + + flow_rule_match_ipv4_addrs(rule, &match); + if (match.mask->dst) { + if (match.mask->dst == cpu_to_be32(0xffffffff)) { field_flags |= IAVF_CLOUD_FIELD_IIP; } else { dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n", - be32_to_cpu(mask->dst)); + be32_to_cpu(match.mask->dst)); return I40E_ERR_CONFIG; } } - if (mask->src) { - if (mask->src == cpu_to_be32(0xffffffff)) { + if (match.mask->src) { + if (match.mask->src == cpu_to_be32(0xffffffff)) { field_flags |= IAVF_CLOUD_FIELD_IIP; } else { dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n", - be32_to_cpu(mask->dst)); + be32_to_cpu(match.mask->dst)); return I40E_ERR_CONFIG; } } @@ -2620,28 +2595,23 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n"); return I40E_ERR_CONFIG; } - if (key->dst) { + if (match.key->dst) { vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff); - vf->data.tcp_spec.dst_ip[0] = key->dst; + vf->data.tcp_spec.dst_ip[0] = match.key->dst; } - if (key->src) { + if (match.key->src) { vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff); - vf->data.tcp_spec.src_ip[0] = key->src; + vf->data.tcp_spec.src_ip[0] = match.key->src; } } if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { - struct flow_dissector_key_ipv6_addrs *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV6_ADDRS, - f->key); - struct flow_dissector_key_ipv6_addrs *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV6_ADDRS, - f->mask); + struct flow_match_ipv6_addrs match; + + flow_rule_match_ipv6_addrs(rule, &match); /* validate mask, make sure it is not IPV6_ADDR_ANY */ - if (ipv6_addr_any(&mask->dst)) { + if (ipv6_addr_any(&match.mask->dst)) { dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n", IPV6_ADDR_ANY); return I40E_ERR_CONFIG; @@ -2650,61 +2620,56 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter, /* src and dest IPv6 address should not be LOOPBACK * (0:0:0:0:0:0:0:1) which can be represented as ::1 */ - if (ipv6_addr_loopback(&key->dst) || - ipv6_addr_loopback(&key->src)) { + if (ipv6_addr_loopback(&match.key->dst) || + ipv6_addr_loopback(&match.key->src)) { dev_err(&adapter->pdev->dev, "ipv6 addr should not be loopback\n"); return I40E_ERR_CONFIG; } - if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src)) + if (!ipv6_addr_any(&match.mask->dst) || + !ipv6_addr_any(&match.mask->src)) field_flags |= IAVF_CLOUD_FIELD_IIP; for (i = 0; i < 4; i++) vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff); - memcpy(&vf->data.tcp_spec.dst_ip, &key->dst.s6_addr32, + memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32, sizeof(vf->data.tcp_spec.dst_ip)); for (i = 0; i < 4; i++) vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff); - memcpy(&vf->data.tcp_spec.src_ip, &key->src.s6_addr32, + memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32, sizeof(vf->data.tcp_spec.src_ip)); } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) { - struct flow_dissector_key_ports *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_PORTS, - f->key); - struct flow_dissector_key_ports *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_PORTS, - f->mask); - - if (mask->src) { - if (mask->src == cpu_to_be16(0xffff)) { + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports match; + + flow_rule_match_ports(rule, &match); + if (match.mask->src) { + if (match.mask->src == cpu_to_be16(0xffff)) { field_flags |= IAVF_CLOUD_FIELD_IIP; } else { dev_err(&adapter->pdev->dev, "Bad src port mask %u\n", - be16_to_cpu(mask->src)); + be16_to_cpu(match.mask->src)); return I40E_ERR_CONFIG; } } - if (mask->dst) { - if (mask->dst == cpu_to_be16(0xffff)) { + if (match.mask->dst) { + if (match.mask->dst == cpu_to_be16(0xffff)) { field_flags |= IAVF_CLOUD_FIELD_IIP; } else { dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n", - be16_to_cpu(mask->dst)); + be16_to_cpu(match.mask->dst)); return I40E_ERR_CONFIG; } } - if (key->dst) { + if (match.key->dst) { vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff); - vf->data.tcp_spec.dst_port = key->dst; + vf->data.tcp_spec.dst_port = match.key->dst; } - if (key->src) { + if (match.key->src) { vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff); - vf->data.tcp_spec.src_port = key->src; + vf->data.tcp_spec.src_port = match.key->src; } } vf->field_flags = field_flags; diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index a385575600f6..89440775aea1 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -82,7 +83,7 @@ extern const char ice_drv_ver[]; #define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) #define ICE_MAX_MTU (ICE_AQ_SET_MAC_FRAME_SIZE_MAX - \ - ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN) + (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))) #define ICE_UP_TABLE_TRANSLATE(val, i) \ (((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \ @@ -110,6 +111,9 @@ extern const char ice_drv_ver[]; #define ice_for_each_alloc_rxq(vsi, i) \ for ((i) = 0; (i) < (vsi)->alloc_rxq; (i)++) +#define ice_for_each_q_vector(vsi, i) \ + for ((i) = 0; (i) < (vsi)->num_q_vectors; (i)++) + struct ice_tc_info { u16 qoffset; u16 qcount_tx; @@ -129,6 +133,17 @@ struct ice_res_tracker { u16 list[1]; }; +struct ice_qs_cfg { + struct mutex *qs_mutex; /* will be assgined to &pf->avail_q_mutex */ + unsigned long *pf_map; + unsigned long pf_map_size; + unsigned int q_count; + unsigned int scatter_count; + u16 *vsi_map; + u16 vsi_map_offset; + u8 mapping_mode; +}; + struct ice_sw { struct ice_pf *pf; u16 sw_id; /* switch ID for this switch */ @@ -270,6 +285,7 @@ enum ice_pf_flags { ICE_FLAG_RSS_ENA, ICE_FLAG_SRIOV_ENA, ICE_FLAG_SRIOV_CAPABLE, + ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, ICE_PF_FLAGS_NBITS /* must be last */ }; diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index fcdcd80b18e7..242c78469181 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -657,8 +657,13 @@ struct ice_aqc_get_topo { /* Update TSE (indirect 0x0403) * Get TSE (indirect 0x0404) + * Add TSE (indirect 0x0401) + * Delete TSE (indirect 0x040F) + * Move TSE (indirect 0x0408) + * Suspend Nodes (indirect 0x0409) + * Resume Nodes (indirect 0x040A) */ -struct ice_aqc_get_cfg_elem { +struct ice_aqc_sched_elem_cmd { __le16 num_elem_req; /* Used by commands */ __le16 num_elem_resp; /* Used by responses */ __le32 reserved; @@ -674,18 +679,6 @@ struct ice_aqc_suspend_resume_elem { __le32 teid[1]; }; -/* Add TSE (indirect 0x0401) - * Delete TSE (indirect 0x040F) - * Move TSE (indirect 0x0408) - */ -struct ice_aqc_add_move_delete_elem { - __le16 num_grps_req; - __le16 num_grps_updated; - __le32 reserved; - __le32 addr_high; - __le32 addr_low; -}; - struct ice_aqc_elem_info_bw { __le16 bw_profile_idx; __le16 bw_alloc; @@ -854,11 +847,46 @@ struct ice_aqc_get_phy_caps { #define ICE_PHY_TYPE_LOW_40GBASE_KR4 BIT_ULL(33) #define ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC BIT_ULL(34) #define ICE_PHY_TYPE_LOW_40G_XLAUI BIT_ULL(35) +#define ICE_PHY_TYPE_LOW_50GBASE_CR2 BIT_ULL(36) +#define ICE_PHY_TYPE_LOW_50GBASE_SR2 BIT_ULL(37) +#define ICE_PHY_TYPE_LOW_50GBASE_LR2 BIT_ULL(38) +#define ICE_PHY_TYPE_LOW_50GBASE_KR2 BIT_ULL(39) +#define ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC BIT_ULL(40) +#define ICE_PHY_TYPE_LOW_50G_LAUI2 BIT_ULL(41) +#define ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC BIT_ULL(42) +#define ICE_PHY_TYPE_LOW_50G_AUI2 BIT_ULL(43) +#define ICE_PHY_TYPE_LOW_50GBASE_CP BIT_ULL(44) +#define ICE_PHY_TYPE_LOW_50GBASE_SR BIT_ULL(45) +#define ICE_PHY_TYPE_LOW_50GBASE_FR BIT_ULL(46) +#define ICE_PHY_TYPE_LOW_50GBASE_LR BIT_ULL(47) +#define ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4 BIT_ULL(48) +#define ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC BIT_ULL(49) +#define ICE_PHY_TYPE_LOW_50G_AUI1 BIT_ULL(50) +#define ICE_PHY_TYPE_LOW_100GBASE_CR4 BIT_ULL(51) +#define ICE_PHY_TYPE_LOW_100GBASE_SR4 BIT_ULL(52) +#define ICE_PHY_TYPE_LOW_100GBASE_LR4 BIT_ULL(53) +#define ICE_PHY_TYPE_LOW_100GBASE_KR4 BIT_ULL(54) +#define ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC BIT_ULL(55) +#define ICE_PHY_TYPE_LOW_100G_CAUI4 BIT_ULL(56) +#define ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC BIT_ULL(57) +#define ICE_PHY_TYPE_LOW_100G_AUI4 BIT_ULL(58) +#define ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 BIT_ULL(59) +#define ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 BIT_ULL(60) +#define ICE_PHY_TYPE_LOW_100GBASE_CP2 BIT_ULL(61) +#define ICE_PHY_TYPE_LOW_100GBASE_SR2 BIT_ULL(62) +#define ICE_PHY_TYPE_LOW_100GBASE_DR BIT_ULL(63) #define ICE_PHY_TYPE_LOW_MAX_INDEX 63 +/* The second set of defines is for phy_type_high. */ +#define ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4 BIT_ULL(0) +#define ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC BIT_ULL(1) +#define ICE_PHY_TYPE_HIGH_100G_CAUI2 BIT_ULL(2) +#define ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC BIT_ULL(3) +#define ICE_PHY_TYPE_HIGH_100G_AUI2 BIT_ULL(4) +#define ICE_PHY_TYPE_HIGH_MAX_INDEX 19 struct ice_aqc_get_phy_caps_data { __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */ - __le64 reserved; + __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */ u8 caps; #define ICE_AQC_PHY_EN_TX_LINK_PAUSE BIT(0) #define ICE_AQC_PHY_EN_RX_LINK_PAUSE BIT(1) @@ -923,7 +951,7 @@ struct ice_aqc_set_phy_cfg { /* Set PHY config command data structure */ struct ice_aqc_set_phy_cfg_data { __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */ - __le64 rsvd0; + __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */ u8 caps; #define ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY BIT(0) #define ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY BIT(1) @@ -1032,10 +1060,12 @@ struct ice_aqc_get_link_status_data { #define ICE_AQ_LINK_SPEED_20GB BIT(6) #define ICE_AQ_LINK_SPEED_25GB BIT(7) #define ICE_AQ_LINK_SPEED_40GB BIT(8) +#define ICE_AQ_LINK_SPEED_50GB BIT(9) +#define ICE_AQ_LINK_SPEED_100GB BIT(10) #define ICE_AQ_LINK_SPEED_UNKNOWN BIT(15) __le32 reserved3; /* Aligns next field to 8-byte boundary */ __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */ - __le64 reserved4; + __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */ }; /* Set event mask command (direct 0x0613) */ @@ -1055,6 +1085,16 @@ struct ice_aqc_set_event_mask { u8 reserved1[6]; }; +/* Set Port Identification LED (direct, 0x06E9) */ +struct ice_aqc_set_port_id_led { + u8 lport_num; + u8 lport_num_valid; + u8 ident_mode; +#define ICE_AQC_PORT_IDENT_LED_BLINK BIT(0) +#define ICE_AQC_PORT_IDENT_LED_ORIG 0 + u8 rsvd[13]; +}; + /* NVM Read command (indirect 0x0701) * NVM Erase commands (direct 0x0702) * NVM Update commands (indirect 0x0703) @@ -1341,12 +1381,12 @@ struct ice_aq_desc { struct ice_aqc_get_phy_caps get_phy; struct ice_aqc_set_phy_cfg set_phy; struct ice_aqc_restart_an restart_an; + struct ice_aqc_set_port_id_led set_port_id_led; struct ice_aqc_get_sw_cfg get_sw_conf; struct ice_aqc_sw_rules sw_rules; struct ice_aqc_get_topo get_topo; - struct ice_aqc_get_cfg_elem get_update_elem; + struct ice_aqc_sched_elem_cmd sched_elem_cmd; struct ice_aqc_query_txsched_res query_sched_res; - struct ice_aqc_add_move_delete_elem add_move_delete_elem; struct ice_aqc_nvm nvm; struct ice_aqc_pf_vf_msg virt; struct ice_aqc_get_set_rss_lut get_set_rss_lut; @@ -1442,6 +1482,7 @@ enum ice_adminq_opc { ice_aqc_opc_restart_an = 0x0605, ice_aqc_opc_get_link_status = 0x0607, ice_aqc_opc_set_event_mask = 0x0613, + ice_aqc_opc_set_port_id_led = 0x06E9, /* NVM commands */ ice_aqc_opc_nvm_read = 0x0701, diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 4c1d35da940d..63f003441300 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -165,8 +165,10 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, cmd->param0 |= cpu_to_le16(report_mode); status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd); - if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) + if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) { pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); + pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high); + } return status; } @@ -183,6 +185,9 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) return ICE_MEDIA_UNKNOWN; hw_link_info = &pi->phy.link_info; + if (hw_link_info->phy_type_low && hw_link_info->phy_type_high) + /* If more than one media type is selected, report unknown */ + return ICE_MEDIA_UNKNOWN; if (hw_link_info->phy_type_low) { switch (hw_link_info->phy_type_low) { @@ -196,6 +201,15 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) case ICE_PHY_TYPE_LOW_25G_AUI_C2C: case ICE_PHY_TYPE_LOW_40GBASE_SR4: case ICE_PHY_TYPE_LOW_40GBASE_LR4: + case ICE_PHY_TYPE_LOW_50GBASE_SR2: + case ICE_PHY_TYPE_LOW_50GBASE_LR2: + case ICE_PHY_TYPE_LOW_50GBASE_SR: + case ICE_PHY_TYPE_LOW_50GBASE_FR: + case ICE_PHY_TYPE_LOW_50GBASE_LR: + case ICE_PHY_TYPE_LOW_100GBASE_SR4: + case ICE_PHY_TYPE_LOW_100GBASE_LR4: + case ICE_PHY_TYPE_LOW_100GBASE_SR2: + case ICE_PHY_TYPE_LOW_100GBASE_DR: return ICE_MEDIA_FIBER; case ICE_PHY_TYPE_LOW_100BASE_TX: case ICE_PHY_TYPE_LOW_1000BASE_T: @@ -209,6 +223,11 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) case ICE_PHY_TYPE_LOW_25GBASE_CR_S: case ICE_PHY_TYPE_LOW_25GBASE_CR1: case ICE_PHY_TYPE_LOW_40GBASE_CR4: + case ICE_PHY_TYPE_LOW_50GBASE_CR2: + case ICE_PHY_TYPE_LOW_50GBASE_CP: + case ICE_PHY_TYPE_LOW_100GBASE_CR4: + case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: + case ICE_PHY_TYPE_LOW_100GBASE_CP2: return ICE_MEDIA_DA; case ICE_PHY_TYPE_LOW_1000BASE_KX: case ICE_PHY_TYPE_LOW_2500BASE_KX: @@ -219,10 +238,18 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) case ICE_PHY_TYPE_LOW_25GBASE_KR1: case ICE_PHY_TYPE_LOW_25GBASE_KR_S: case ICE_PHY_TYPE_LOW_40GBASE_KR4: + case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: + case ICE_PHY_TYPE_LOW_50GBASE_KR2: + case ICE_PHY_TYPE_LOW_100GBASE_KR4: + case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: + return ICE_MEDIA_BACKPLANE; + } + } else { + switch (hw_link_info->phy_type_high) { + case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: return ICE_MEDIA_BACKPLANE; } } - return ICE_MEDIA_UNKNOWN; } @@ -274,6 +301,7 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, /* update current link status information */ hw_link_info->link_speed = le16_to_cpu(link_data.link_speed); hw_link_info->phy_type_low = le64_to_cpu(link_data.phy_type_low); + hw_link_info->phy_type_high = le64_to_cpu(link_data.phy_type_high); *hw_media_type = ice_get_media_type(pi); hw_link_info->link_info = link_data.link_info; hw_link_info->an_info = link_data.an_info; @@ -750,6 +778,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) status = ICE_ERR_CFG; goto err_unroll_sched; } + INIT_LIST_HEAD(&hw->agg_list); status = ice_init_fltr_mgmt_struct(hw); if (status) @@ -800,6 +829,7 @@ void ice_deinit_hw(struct ice_hw *hw) ice_cleanup_fltr_mgmt_struct(hw); ice_sched_cleanup_all(hw); + ice_sched_clear_agg(hw); if (hw->port_info) { devm_kfree(ice_hw_to_dev(hw), hw->port_info); @@ -1655,7 +1685,7 @@ enum ice_status ice_get_caps(struct ice_hw *hw) * This function is used to write MAC address to the NVM (0x0108). */ enum ice_status -ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags, +ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, struct ice_sq_cd *cd) { struct ice_aqc_manage_mac_write *cmd; @@ -1667,8 +1697,8 @@ ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags, cmd->flags = flags; /* Prep values for flags, sah, sal */ - cmd->sah = htons(*((u16 *)mac_addr)); - cmd->sal = htonl(*((u32 *)(mac_addr + 2))); + cmd->sah = htons(*((const u16 *)mac_addr)); + cmd->sal = htonl(*((const u32 *)(mac_addr + 2))); return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } @@ -1705,16 +1735,20 @@ void ice_clear_pxe_mode(struct ice_hw *hw) /** * ice_get_link_speed_based_on_phy_type - returns link speed * @phy_type_low: lower part of phy_type + * @phy_type_high: higher part of phy_type * - * This helper function will convert a phy_type_low to its corresponding link + * This helper function will convert an entry in phy type structure + * [phy_type_low, phy_type_high] to its corresponding link speed. + * Note: In the structure of [phy_type_low, phy_type_high], there should + * be one bit set, as this function will convert one phy type to its * speed. - * Note: In the structure of phy_type_low, there should be one bit set, as - * this function will convert one phy type to its speed. * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned */ -static u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low) +static u16 +ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high) { + u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; switch (phy_type_low) { @@ -1768,41 +1802,110 @@ static u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low) case ICE_PHY_TYPE_LOW_40G_XLAUI: speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB; break; + case ICE_PHY_TYPE_LOW_50GBASE_CR2: + case ICE_PHY_TYPE_LOW_50GBASE_SR2: + case ICE_PHY_TYPE_LOW_50GBASE_LR2: + case ICE_PHY_TYPE_LOW_50GBASE_KR2: + case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: + case ICE_PHY_TYPE_LOW_50G_LAUI2: + case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: + case ICE_PHY_TYPE_LOW_50G_AUI2: + case ICE_PHY_TYPE_LOW_50GBASE_CP: + case ICE_PHY_TYPE_LOW_50GBASE_SR: + case ICE_PHY_TYPE_LOW_50GBASE_FR: + case ICE_PHY_TYPE_LOW_50GBASE_LR: + case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: + case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: + case ICE_PHY_TYPE_LOW_50G_AUI1: + speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB; + break; + case ICE_PHY_TYPE_LOW_100GBASE_CR4: + case ICE_PHY_TYPE_LOW_100GBASE_SR4: + case ICE_PHY_TYPE_LOW_100GBASE_LR4: + case ICE_PHY_TYPE_LOW_100GBASE_KR4: + case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: + case ICE_PHY_TYPE_LOW_100G_CAUI4: + case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: + case ICE_PHY_TYPE_LOW_100G_AUI4: + case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: + case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: + case ICE_PHY_TYPE_LOW_100GBASE_CP2: + case ICE_PHY_TYPE_LOW_100GBASE_SR2: + case ICE_PHY_TYPE_LOW_100GBASE_DR: + speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB; + break; default: speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; break; } - return speed_phy_type_low; + switch (phy_type_high) { + case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: + case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: + case ICE_PHY_TYPE_HIGH_100G_CAUI2: + case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: + case ICE_PHY_TYPE_HIGH_100G_AUI2: + speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB; + break; + default: + speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; + break; + } + + if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN && + speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) + return ICE_AQ_LINK_SPEED_UNKNOWN; + else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && + speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN) + return ICE_AQ_LINK_SPEED_UNKNOWN; + else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && + speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) + return speed_phy_type_low; + else + return speed_phy_type_high; } /** * ice_update_phy_type * @phy_type_low: pointer to the lower part of phy_type + * @phy_type_high: pointer to the higher part of phy_type * @link_speeds_bitmap: targeted link speeds bitmap * * Note: For the link_speeds_bitmap structure, you can check it at * [ice_aqc_get_link_status->link_speed]. Caller can pass in * link_speeds_bitmap include multiple speeds. * - * The value of phy_type_low will present a certain link speed. This helper - * function will turn on bits in the phy_type_low based on the value of + * Each entry in this [phy_type_low, phy_type_high] structure will + * present a certain link speed. This helper function will turn on bits + * in [phy_type_low, phy_type_high] structure based on the value of * link_speeds_bitmap input parameter. */ -void ice_update_phy_type(u64 *phy_type_low, u16 link_speeds_bitmap) +void +ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, + u16 link_speeds_bitmap) { u16 speed = ICE_AQ_LINK_SPEED_UNKNOWN; + u64 pt_high; u64 pt_low; int index; /* We first check with low part of phy_type */ for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) { pt_low = BIT_ULL(index); - speed = ice_get_link_speed_based_on_phy_type(pt_low); + speed = ice_get_link_speed_based_on_phy_type(pt_low, 0); if (link_speeds_bitmap & speed) *phy_type_low |= BIT_ULL(index); } + + /* We then check with high part of phy_type */ + for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) { + pt_high = BIT_ULL(index); + speed = ice_get_link_speed_based_on_phy_type(0, pt_high); + + if (link_speeds_bitmap & speed) + *phy_type_high |= BIT_ULL(index); + } } /** @@ -1934,6 +2037,7 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) if (ena_auto_link_update) cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; /* Copy over all the old settings */ + cfg.phy_type_high = pcaps->phy_type_high; cfg.phy_type_low = pcaps->phy_type_low; cfg.low_power_ctrl = pcaps->low_power_ctrl; cfg.eee_cap = pcaps->eee_cap; @@ -2031,6 +2135,34 @@ ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); } +/** + * ice_aq_set_port_id_led + * @pi: pointer to the port information + * @is_orig_mode: is this LED set to original mode (by the net-list) + * @cd: pointer to command details structure or NULL + * + * Set LED value for the given port (0x06e9) + */ +enum ice_status +ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, + struct ice_sq_cd *cd) +{ + struct ice_aqc_set_port_id_led *cmd; + struct ice_hw *hw = pi->hw; + struct ice_aq_desc desc; + + cmd = &desc.params.set_port_id_led; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led); + + if (is_orig_mode) + cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG; + else + cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK; + + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); +} + /** * __ice_aq_get_set_rss_lut * @hw: pointer to the hardware structure @@ -2318,6 +2450,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, { struct ice_aqc_dis_txqs *cmd; struct ice_aq_desc desc; + enum ice_status status; u16 i, sz = 0; cmd = &desc.params.dis_txqs; @@ -2353,6 +2486,8 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, break; } + /* flush pipe on time out */ + cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE; /* If no queue group info, we are in a reset flow. Issue the AQ */ if (!qg_list) goto do_aq; @@ -2378,7 +2513,17 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, return ICE_ERR_PARAM; do_aq: - return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); + status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); + if (status) { + if (!qg_list) + ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n", + vmvf_num, hw->adminq.sq_last_status); + else + ice_debug(hw, ICE_DBG_SCHED, "disable Q %d failed %d\n", + le16_to_cpu(qg_list[0].q_id[0]), + hw->adminq.sq_last_status); + } + return status; } /* End of FW Admin Queue command wrappers */ @@ -2664,8 +2809,12 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps, /* add the lan q */ status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); - if (status) + if (status) { + ice_debug(hw, ICE_DBG_SCHED, "enable Q %d failed %d\n", + le16_to_cpu(buf->txqs[0].txq_id), + hw->adminq.sq_last_status); goto ena_txq_exit; + } node.node_teid = buf->txqs[0].q_teid; node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index cf760c24a6aa..d7c7c2ed8823 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -28,6 +28,8 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, enum ice_aq_res_access_type access, u32 timeout); void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res); enum ice_status ice_init_nvm(struct ice_hw *hw); +enum ice_status ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, + u16 *data); enum ice_status ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_aq_desc *desc, void *buf, u16 buf_size, @@ -70,9 +72,10 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, struct ice_aqc_get_phy_caps_data *caps, struct ice_sq_cd *cd); void -ice_update_phy_type(u64 *phy_type_low, u16 link_speeds_bitmap); +ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, + u16 link_speeds_bitmap); enum ice_status -ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags, +ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, struct ice_sq_cd *cd); enum ice_status ice_clear_pf_cfg(struct ice_hw *hw); enum ice_status @@ -86,6 +89,10 @@ enum ice_status ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, struct ice_sq_cd *cd); enum ice_status +ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, + struct ice_sq_cd *cd); + +enum ice_status ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids, u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num, struct ice_sq_cd *cmd_details); diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 3b6e387f5440..eb8d149e317c 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -63,45 +63,45 @@ static const struct ice_stats ice_gstrings_vsi_stats[] = { * is queried on the base PF netdev. */ static const struct ice_stats ice_gstrings_pf_stats[] = { - ICE_PF_STAT("tx_bytes", stats.eth.tx_bytes), - ICE_PF_STAT("rx_bytes", stats.eth.rx_bytes), - ICE_PF_STAT("tx_unicast", stats.eth.tx_unicast), - ICE_PF_STAT("rx_unicast", stats.eth.rx_unicast), - ICE_PF_STAT("tx_multicast", stats.eth.tx_multicast), - ICE_PF_STAT("rx_multicast", stats.eth.rx_multicast), - ICE_PF_STAT("tx_broadcast", stats.eth.tx_broadcast), - ICE_PF_STAT("rx_broadcast", stats.eth.rx_broadcast), - ICE_PF_STAT("tx_errors", stats.eth.tx_errors), - ICE_PF_STAT("tx_size_64", stats.tx_size_64), - ICE_PF_STAT("rx_size_64", stats.rx_size_64), - ICE_PF_STAT("tx_size_127", stats.tx_size_127), - ICE_PF_STAT("rx_size_127", stats.rx_size_127), - ICE_PF_STAT("tx_size_255", stats.tx_size_255), - ICE_PF_STAT("rx_size_255", stats.rx_size_255), - ICE_PF_STAT("tx_size_511", stats.tx_size_511), - ICE_PF_STAT("rx_size_511", stats.rx_size_511), - ICE_PF_STAT("tx_size_1023", stats.tx_size_1023), - ICE_PF_STAT("rx_size_1023", stats.rx_size_1023), - ICE_PF_STAT("tx_size_1522", stats.tx_size_1522), - ICE_PF_STAT("rx_size_1522", stats.rx_size_1522), - ICE_PF_STAT("tx_size_big", stats.tx_size_big), - ICE_PF_STAT("rx_size_big", stats.rx_size_big), - ICE_PF_STAT("link_xon_tx", stats.link_xon_tx), - ICE_PF_STAT("link_xon_rx", stats.link_xon_rx), - ICE_PF_STAT("link_xoff_tx", stats.link_xoff_tx), - ICE_PF_STAT("link_xoff_rx", stats.link_xoff_rx), - ICE_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down), - ICE_PF_STAT("rx_undersize", stats.rx_undersize), - ICE_PF_STAT("rx_fragments", stats.rx_fragments), - ICE_PF_STAT("rx_oversize", stats.rx_oversize), - ICE_PF_STAT("rx_jabber", stats.rx_jabber), - ICE_PF_STAT("rx_csum_bad", hw_csum_rx_error), - ICE_PF_STAT("rx_length_errors", stats.rx_len_errors), - ICE_PF_STAT("rx_dropped", stats.eth.rx_discards), - ICE_PF_STAT("rx_crc_errors", stats.crc_errors), - ICE_PF_STAT("illegal_bytes", stats.illegal_bytes), - ICE_PF_STAT("mac_local_faults", stats.mac_local_faults), - ICE_PF_STAT("mac_remote_faults", stats.mac_remote_faults), + ICE_PF_STAT("port.tx_bytes", stats.eth.tx_bytes), + ICE_PF_STAT("port.rx_bytes", stats.eth.rx_bytes), + ICE_PF_STAT("port.tx_unicast", stats.eth.tx_unicast), + ICE_PF_STAT("port.rx_unicast", stats.eth.rx_unicast), + ICE_PF_STAT("port.tx_multicast", stats.eth.tx_multicast), + ICE_PF_STAT("port.rx_multicast", stats.eth.rx_multicast), + ICE_PF_STAT("port.tx_broadcast", stats.eth.tx_broadcast), + ICE_PF_STAT("port.rx_broadcast", stats.eth.rx_broadcast), + ICE_PF_STAT("port.tx_errors", stats.eth.tx_errors), + ICE_PF_STAT("port.tx_size_64", stats.tx_size_64), + ICE_PF_STAT("port.rx_size_64", stats.rx_size_64), + ICE_PF_STAT("port.tx_size_127", stats.tx_size_127), + ICE_PF_STAT("port.rx_size_127", stats.rx_size_127), + ICE_PF_STAT("port.tx_size_255", stats.tx_size_255), + ICE_PF_STAT("port.rx_size_255", stats.rx_size_255), + ICE_PF_STAT("port.tx_size_511", stats.tx_size_511), + ICE_PF_STAT("port.rx_size_511", stats.rx_size_511), + ICE_PF_STAT("port.tx_size_1023", stats.tx_size_1023), + ICE_PF_STAT("port.rx_size_1023", stats.rx_size_1023), + ICE_PF_STAT("port.tx_size_1522", stats.tx_size_1522), + ICE_PF_STAT("port.rx_size_1522", stats.rx_size_1522), + ICE_PF_STAT("port.tx_size_big", stats.tx_size_big), + ICE_PF_STAT("port.rx_size_big", stats.rx_size_big), + ICE_PF_STAT("port.link_xon_tx", stats.link_xon_tx), + ICE_PF_STAT("port.link_xon_rx", stats.link_xon_rx), + ICE_PF_STAT("port.link_xoff_tx", stats.link_xoff_tx), + ICE_PF_STAT("port.link_xoff_rx", stats.link_xoff_rx), + ICE_PF_STAT("port.tx_dropped_link_down", stats.tx_dropped_link_down), + ICE_PF_STAT("port.rx_undersize", stats.rx_undersize), + ICE_PF_STAT("port.rx_fragments", stats.rx_fragments), + ICE_PF_STAT("port.rx_oversize", stats.rx_oversize), + ICE_PF_STAT("port.rx_jabber", stats.rx_jabber), + ICE_PF_STAT("port.rx_csum_bad", hw_csum_rx_error), + ICE_PF_STAT("port.rx_length_errors", stats.rx_len_errors), + ICE_PF_STAT("port.rx_dropped", stats.eth.rx_discards), + ICE_PF_STAT("port.rx_crc_errors", stats.crc_errors), + ICE_PF_STAT("port.illegal_bytes", stats.illegal_bytes), + ICE_PF_STAT("port.mac_local_faults", stats.mac_local_faults), + ICE_PF_STAT("port.mac_remote_faults", stats.mac_remote_faults), }; static const u32 ice_regs_dump_list[] = { @@ -114,6 +114,22 @@ static const u32 ice_regs_dump_list[] = { QRX_ITR(0), }; +struct ice_priv_flag { + char name[ETH_GSTRING_LEN]; + u32 bitno; /* bit position in pf->flags */ +}; + +#define ICE_PRIV_FLAG(_name, _bitno) { \ + .name = _name, \ + .bitno = _bitno, \ +} + +static const struct ice_priv_flag ice_gstrings_priv_flags[] = { + ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA), +}; + +#define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags) + /** * ice_nvm_version_str - format the NVM version strings * @hw: ptr to the hardware info @@ -152,6 +168,7 @@ ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, pci_name(pf->pdev), sizeof(drvinfo->bus_info)); + drvinfo->n_priv_flags = ICE_PRIV_FLAG_ARRAY_SIZE; } static int ice_get_regs_len(struct net_device __always_unused *netdev) @@ -203,6 +220,55 @@ static void ice_set_msglevel(struct net_device *netdev, u32 data) #endif /* !CONFIG_DYNAMIC_DEBUG */ } +static int ice_get_eeprom_len(struct net_device *netdev) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_pf *pf = np->vsi->back; + + return (int)(pf->hw.nvm.sr_words * sizeof(u16)); +} + +static int +ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, + u8 *bytes) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + u16 first_word, last_word, nwords; + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + struct ice_hw *hw = &pf->hw; + enum ice_status status; + struct device *dev; + int ret = 0; + u16 *buf; + + dev = &pf->pdev->dev; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + nwords = last_word - first_word + 1; + + buf = devm_kcalloc(dev, nwords, sizeof(u16), GFP_KERNEL); + if (!buf) + return -ENOMEM; + + status = ice_read_sr_buf(hw, first_word, &nwords, buf); + if (status) { + dev_err(dev, "ice_read_sr_buf failed, err %d aq_err %d\n", + status, hw->adminq.sq_last_status); + eeprom->len = sizeof(u16) * nwords; + ret = -EIO; + goto out; + } + + memcpy(bytes, (u8 *)buf + (eeprom->offset & 1), eeprom->len); +out: + devm_kfree(dev, buf); + return ret; +} + static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { struct ice_netdev_priv *np = netdev_priv(netdev); @@ -238,17 +304,105 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data) return; for (i = 0; i < ICE_PF_STATS_LEN; i++) { - snprintf(p, ETH_GSTRING_LEN, "port.%s", + snprintf(p, ETH_GSTRING_LEN, "%s", ice_gstrings_pf_stats[i].stat_string); p += ETH_GSTRING_LEN; } + break; + case ETH_SS_PRIV_FLAGS: + for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) { + snprintf(p, ETH_GSTRING_LEN, "%s", + ice_gstrings_priv_flags[i].name); + p += ETH_GSTRING_LEN; + } break; default: break; } } +static int +ice_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + bool led_active; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + led_active = true; + break; + case ETHTOOL_ID_INACTIVE: + led_active = false; + break; + default: + return -EINVAL; + } + + if (ice_aq_set_port_id_led(np->vsi->port_info, !led_active, NULL)) + return -EIO; + + return 0; +} + +/** + * ice_get_priv_flags - report device private flags + * @netdev: network interface device structure + * + * The get string set count and the string set should be matched for each + * flag returned. Add new strings for each flag to the ice_gstrings_priv_flags + * array. + * + * Returns a u32 bitmap of flags. + */ +static u32 ice_get_priv_flags(struct net_device *netdev) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + u32 i, ret_flags = 0; + + for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) { + const struct ice_priv_flag *priv_flag; + + priv_flag = &ice_gstrings_priv_flags[i]; + + if (test_bit(priv_flag->bitno, pf->flags)) + ret_flags |= BIT(i); + } + + return ret_flags; +} + +/** + * ice_set_priv_flags - set private flags + * @netdev: network interface device structure + * @flags: bit flags to be set + */ +static int ice_set_priv_flags(struct net_device *netdev, u32 flags) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + u32 i; + + if (flags > BIT(ICE_PRIV_FLAG_ARRAY_SIZE)) + return -EINVAL; + + for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) { + const struct ice_priv_flag *priv_flag; + + priv_flag = &ice_gstrings_priv_flags[i]; + + if (flags & BIT(i)) + set_bit(priv_flag->bitno, pf->flags); + else + clear_bit(priv_flag->bitno, pf->flags); + } + + return 0; +} + static int ice_get_sset_count(struct net_device *netdev, int sset) { switch (sset) { @@ -272,6 +426,8 @@ static int ice_get_sset_count(struct net_device *netdev, int sset) * not safe. */ return ICE_ALL_STATS_LEN(netdev); + case ETH_SS_PRIV_FLAGS: + return ICE_PRIV_FLAG_ARRAY_SIZE; default: return -EOPNOTSUPP; } @@ -337,16 +493,20 @@ ice_get_ethtool_stats(struct net_device *netdev, * @netdev: network interface device structure * @ks: ethtool link ksettings struct to fill out */ -static void ice_phy_type_to_ethtool(struct net_device *netdev, - struct ethtool_link_ksettings *ks) +static void +ice_phy_type_to_ethtool(struct net_device *netdev, + struct ethtool_link_ksettings *ks) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_link_status *hw_link_info; + bool need_add_adv_mode = false; struct ice_vsi *vsi = np->vsi; + u64 phy_types_high; u64 phy_types_low; hw_link_info = &vsi->port_info->phy.link_info; phy_types_low = vsi->port_info->phy.phy_type_low; + phy_types_high = vsi->port_info->phy.phy_type_high; ethtool_link_ksettings_zero_link_mode(ks, supported); ethtool_link_ksettings_zero_link_mode(ks, advertising); @@ -495,6 +655,95 @@ static void ice_phy_type_to_ethtool(struct net_device *netdev, ethtool_link_ksettings_add_link_mode(ks, advertising, 40000baseLR4_Full); } + if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CR2 || + phy_types_low & ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC || + phy_types_low & ICE_PHY_TYPE_LOW_50G_LAUI2 || + phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC || + phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI2 || + phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CP || + phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_SR || + phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC || + phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI1) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 50000baseCR2_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 50000baseCR2_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR2 || + phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 50000baseKR2_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 50000baseKR2_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_SR2 || + phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_LR2 || + phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_FR || + phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_LR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 50000baseSR2_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 50000baseSR2_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR4 || + phy_types_low & ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC || + phy_types_low & ICE_PHY_TYPE_LOW_100G_CAUI4 || + phy_types_low & ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC || + phy_types_low & ICE_PHY_TYPE_LOW_100G_AUI4 || + phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 || + phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2 || + phy_types_high & ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC || + phy_types_high & ICE_PHY_TYPE_HIGH_100G_CAUI2 || + phy_types_high & ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC || + phy_types_high & ICE_PHY_TYPE_HIGH_100G_AUI2) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 100000baseCR4_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB) + need_add_adv_mode = true; + } + if (need_add_adv_mode) { + need_add_adv_mode = false; + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100000baseCR4_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR4 || + phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR2) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 100000baseSR4_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB) + need_add_adv_mode = true; + } + if (need_add_adv_mode) { + need_add_adv_mode = false; + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100000baseSR4_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_LR4 || + phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_DR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 100000baseLR4_ER4_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB) + need_add_adv_mode = true; + } + if (need_add_adv_mode) { + need_add_adv_mode = false; + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100000baseLR4_ER4_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR4 || + phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 || + phy_types_high & ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 100000baseKR4_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB) + need_add_adv_mode = true; + } + if (need_add_adv_mode) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100000baseKR4_Full); /* Autoneg PHY types */ if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX || @@ -520,6 +769,24 @@ static void ice_phy_type_to_ethtool(struct net_device *netdev, ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); } + if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CR2 || + phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR2 || + phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CP || + phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) { + ethtool_link_ksettings_add_link_mode(ks, supported, + Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + Autoneg); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR4 || + phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR4 || + phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 || + phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2) { + ethtool_link_ksettings_add_link_mode(ks, supported, + Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + Autoneg); + } } #define TEST_SET_BITS_TIMEOUT 50 @@ -531,13 +798,15 @@ static void ice_phy_type_to_ethtool(struct net_device *netdev, * @ks: ethtool ksettings to fill in * @netdev: network interface device structure */ -static void ice_get_settings_link_up(struct ethtool_link_ksettings *ks, - struct net_device *netdev) +static void +ice_get_settings_link_up(struct ethtool_link_ksettings *ks, + struct net_device *netdev) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ethtool_link_ksettings cap_ksettings; struct ice_link_status *link_info; struct ice_vsi *vsi = np->vsi; + bool unrecog_phy_high = false; bool unrecog_phy_low = false; link_info = &vsi->port_info->phy.link_info; @@ -699,25 +968,133 @@ static void ice_get_settings_link_up(struct ethtool_link_ksettings *ks, ethtool_link_ksettings_add_link_mode(ks, advertising, 40000baseKR4_Full); break; + case ICE_PHY_TYPE_LOW_50GBASE_CR2: + case ICE_PHY_TYPE_LOW_50GBASE_CP: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 50000baseCR2_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 50000baseCR2_Full); + break; + case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: + case ICE_PHY_TYPE_LOW_50G_LAUI2: + case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: + case ICE_PHY_TYPE_LOW_50G_AUI2: + case ICE_PHY_TYPE_LOW_50GBASE_SR: + case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: + case ICE_PHY_TYPE_LOW_50G_AUI1: + ethtool_link_ksettings_add_link_mode(ks, supported, + 50000baseCR2_Full); + break; + case ICE_PHY_TYPE_LOW_50GBASE_KR2: + case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 50000baseKR2_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 50000baseKR2_Full); + break; + case ICE_PHY_TYPE_LOW_50GBASE_SR2: + case ICE_PHY_TYPE_LOW_50GBASE_LR2: + case ICE_PHY_TYPE_LOW_50GBASE_FR: + case ICE_PHY_TYPE_LOW_50GBASE_LR: + ethtool_link_ksettings_add_link_mode(ks, supported, + 50000baseSR2_Full); + break; + case ICE_PHY_TYPE_LOW_100GBASE_CR4: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 100000baseCR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100000baseCR4_Full); + break; + case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: + case ICE_PHY_TYPE_LOW_100G_CAUI4: + case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: + case ICE_PHY_TYPE_LOW_100G_AUI4: + case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 100000baseCR4_Full); + break; + case ICE_PHY_TYPE_LOW_100GBASE_CP2: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 100000baseCR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100000baseCR4_Full); + break; + case ICE_PHY_TYPE_LOW_100GBASE_SR4: + case ICE_PHY_TYPE_LOW_100GBASE_SR2: + ethtool_link_ksettings_add_link_mode(ks, supported, + 100000baseSR4_Full); + break; + case ICE_PHY_TYPE_LOW_100GBASE_LR4: + case ICE_PHY_TYPE_LOW_100GBASE_DR: + ethtool_link_ksettings_add_link_mode(ks, supported, + 100000baseLR4_ER4_Full); + break; + case ICE_PHY_TYPE_LOW_100GBASE_KR4: + case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 100000baseKR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100000baseKR4_Full); + break; default: unrecog_phy_low = true; } - if (unrecog_phy_low) { + switch (link_info->phy_type_high) { + case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 100000baseKR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100000baseKR4_Full); + break; + case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: + case ICE_PHY_TYPE_HIGH_100G_CAUI2: + case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: + case ICE_PHY_TYPE_HIGH_100G_AUI2: + ethtool_link_ksettings_add_link_mode(ks, supported, + 100000baseCR4_Full); + break; + default: + unrecog_phy_high = true; + } + + if (unrecog_phy_low && unrecog_phy_high) { /* if we got here and link is up something bad is afoot */ - netdev_info(netdev, "WARNING: Unrecognized PHY_Low (0x%llx).\n", + netdev_info(netdev, + "WARNING: Unrecognized PHY_Low (0x%llx).\n", (u64)link_info->phy_type_low); + netdev_info(netdev, + "WARNING: Unrecognized PHY_High (0x%llx).\n", + (u64)link_info->phy_type_high); } /* Now that we've worked out everything that could be supported by the * current PHY type, get what is supported by the NVM and intersect * them to get what is truly supported */ - memset(&cap_ksettings, 0, sizeof(struct ethtool_link_ksettings)); + memset(&cap_ksettings, 0, sizeof(cap_ksettings)); ice_phy_type_to_ethtool(netdev, &cap_ksettings); ethtool_intersect_link_masks(ks, &cap_ksettings); switch (link_info->link_speed) { + case ICE_AQ_LINK_SPEED_100GB: + ks->base.speed = SPEED_100000; + break; + case ICE_AQ_LINK_SPEED_50GB: + ks->base.speed = SPEED_50000; + break; case ICE_AQ_LINK_SPEED_40GB: ks->base.speed = SPEED_40000; break; @@ -911,6 +1288,23 @@ ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks) ethtool_link_ksettings_test_link_mode(ks, advertising, 40000baseKR4_Full)) adv_link_speed |= ICE_AQ_LINK_SPEED_40GB; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 50000baseCR2_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 50000baseKR2_Full)) + adv_link_speed |= ICE_AQ_LINK_SPEED_50GB; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 50000baseSR2_Full)) + adv_link_speed |= ICE_AQ_LINK_SPEED_50GB; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 100000baseCR4_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 100000baseSR4_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 100000baseLR4_ER4_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 100000baseKR4_Full)) + adv_link_speed |= ICE_AQ_LINK_SPEED_100GB; return adv_link_speed; } @@ -981,8 +1375,9 @@ ice_setup_autoneg(struct ice_port_info *p, struct ethtool_link_ksettings *ks, * * Set speed/duplex per media_types advertised/forced */ -static int ice_set_link_ksettings(struct net_device *netdev, - const struct ethtool_link_ksettings *ks) +static int +ice_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *ks) { u8 autoneg, timeout = TEST_SET_BITS_TIMEOUT, lport = 0; struct ice_netdev_priv *np = netdev_priv(netdev); @@ -994,6 +1389,7 @@ static int ice_set_link_ksettings(struct net_device *netdev, struct ice_port_info *p; u8 autoneg_changed = 0; enum ice_status status; + u64 phy_type_high; u64 phy_type_low; int err = 0; bool linkup; @@ -1020,7 +1416,7 @@ static int ice_set_link_ksettings(struct net_device *netdev, return -EOPNOTSUPP; /* copy the ksettings to copy_ks to avoid modifying the original */ - memcpy(©_ks, ks, sizeof(struct ethtool_link_ksettings)); + memcpy(©_ks, ks, sizeof(copy_ks)); /* save autoneg out of ksettings */ autoneg = copy_ks.base.autoneg; @@ -1039,7 +1435,7 @@ static int ice_set_link_ksettings(struct net_device *netdev, return -EINVAL; /* get our own copy of the bits to check against */ - memset(&safe_ks, 0, sizeof(struct ethtool_link_ksettings)); + memset(&safe_ks, 0, sizeof(safe_ks)); safe_ks.base.cmd = copy_ks.base.cmd; safe_ks.base.link_mode_masks_nwords = copy_ks.base.link_mode_masks_nwords; @@ -1053,8 +1449,7 @@ static int ice_set_link_ksettings(struct net_device *netdev, /* If copy_ks.base and safe_ks.base are not the same now, then they are * trying to set something that we do not support. */ - if (memcmp(©_ks.base, &safe_ks.base, - sizeof(struct ethtool_link_settings))) + if (memcmp(©_ks.base, &safe_ks.base, sizeof(copy_ks.base))) return -EOPNOTSUPP; while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) { @@ -1078,7 +1473,7 @@ static int ice_set_link_ksettings(struct net_device *netdev, } /* Copy abilities to config in case autoneg is not set below */ - memset(&config, 0, sizeof(struct ice_aqc_set_phy_cfg_data)); + memset(&config, 0, sizeof(config)); config.caps = abilities->caps & ~ICE_AQC_PHY_AN_MODE; if (abilities->caps & ICE_AQC_PHY_AN_MODE) config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; @@ -1109,7 +1504,7 @@ static int ice_set_link_ksettings(struct net_device *netdev, adv_link_speed = curr_link_speed; /* Convert the advertise link speeds to their corresponded PHY_TYPE */ - ice_update_phy_type(&phy_type_low, adv_link_speed); + ice_update_phy_type(&phy_type_low, &phy_type_high, adv_link_speed); if (!autoneg_changed && adv_link_speed == curr_link_speed) { netdev_info(netdev, "Nothing changed, exiting without setting anything.\n"); @@ -1128,7 +1523,9 @@ static int ice_set_link_ksettings(struct net_device *netdev, /* set link and auto negotiation so changes take effect */ config.caps |= ICE_AQ_PHY_ENA_LINK; - if (phy_type_low) { + if (phy_type_low || phy_type_high) { + config.phy_type_high = cpu_to_le64(phy_type_high) & + abilities->phy_type_high; config.phy_type_low = cpu_to_le64(phy_type_low) & abilities->phy_type_low; } else { @@ -1270,7 +1667,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) vsi->tx_rings[0]->count, new_tx_cnt); tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq, - sizeof(struct ice_ring), GFP_KERNEL); + sizeof(*tx_rings), GFP_KERNEL); if (!tx_rings) { err = -ENOMEM; goto done; @@ -1302,7 +1699,7 @@ process_rx: vsi->rx_rings[0]->count, new_rx_cnt); rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq, - sizeof(struct ice_ring), GFP_KERNEL); + sizeof(*rx_rings), GFP_KERNEL); if (!rx_rings) { err = -ENOMEM; goto done; @@ -1421,21 +1818,36 @@ static void ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_port_info *pi; + struct ice_port_info *pi = np->vsi->port_info; + struct ice_aqc_get_phy_caps_data *pcaps; + struct ice_vsi *vsi = np->vsi; + enum ice_status status; - pi = np->vsi->port_info; - pause->autoneg = - ((pi->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) ? - AUTONEG_ENABLE : AUTONEG_DISABLE); + /* Initialize pause params */ + pause->rx_pause = 0; + pause->tx_pause = 0; - if (pi->fc.current_mode == ICE_FC_RX_PAUSE) { - pause->rx_pause = 1; - } else if (pi->fc.current_mode == ICE_FC_TX_PAUSE) { + pcaps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*pcaps), + GFP_KERNEL); + if (!pcaps) + return; + + /* Get current phy config */ + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, + NULL); + if (status) + goto out; + + pause->autoneg = ((pcaps->caps & ICE_AQC_PHY_AN_MODE) ? + AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) pause->tx_pause = 1; - } else if (pi->fc.current_mode == ICE_FC_FULL) { + if (pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) pause->rx_pause = 1; - pause->tx_pause = 1; - } + +out: + devm_kfree(&vsi->back->pdev->dev, pcaps); } /** @@ -1667,6 +2079,258 @@ static int ice_set_rxfh(struct net_device *netdev, const u32 *indir, return 0; } +enum ice_container_type { + ICE_RX_CONTAINER, + ICE_TX_CONTAINER, +}; + +/** + * ice_get_rc_coalesce - get ITR values for specific ring container + * @ec: ethtool structure to fill with driver's coalesce settings + * @c_type: container type, RX or TX + * @rc: ring container that the ITR values will come from + * + * Query the device for ice_ring_container specific ITR values. This is + * done per ice_ring_container because each q_vector can have 1 or more rings + * and all of said ring(s) will have the same ITR values. + * + * Returns 0 on success, negative otherwise. + */ +static int +ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type, + struct ice_ring_container *rc) +{ + struct ice_pf *pf = rc->ring->vsi->back; + + switch (c_type) { + case ICE_RX_CONTAINER: + ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc->itr_setting); + ec->rx_coalesce_usecs = rc->itr_setting & ~ICE_ITR_DYNAMIC; + break; + case ICE_TX_CONTAINER: + ec->use_adaptive_tx_coalesce = ITR_IS_DYNAMIC(rc->itr_setting); + ec->tx_coalesce_usecs = rc->itr_setting & ~ICE_ITR_DYNAMIC; + break; + default: + dev_dbg(&pf->pdev->dev, "Invalid c_type %d\n", c_type); + return -EINVAL; + } + + return 0; +} + +/** + * __ice_get_coalesce - get ITR/INTRL values for the device + * @netdev: pointer to the netdev associated with this query + * @ec: ethtool structure to fill with driver's coalesce settings + * @q_num: queue number to get the coalesce settings for + */ +static int +__ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, + int q_num) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + int tx = -EINVAL, rx = -EINVAL; + struct ice_vsi *vsi = np->vsi; + + if (q_num < 0) { + rx = ice_get_rc_coalesce(ec, ICE_RX_CONTAINER, + &vsi->rx_rings[0]->q_vector->rx); + tx = ice_get_rc_coalesce(ec, ICE_TX_CONTAINER, + &vsi->tx_rings[0]->q_vector->tx); + + goto update_coalesced_frames; + } + + if (q_num < vsi->num_rxq && q_num < vsi->num_txq) { + rx = ice_get_rc_coalesce(ec, ICE_RX_CONTAINER, + &vsi->rx_rings[q_num]->q_vector->rx); + tx = ice_get_rc_coalesce(ec, ICE_TX_CONTAINER, + &vsi->tx_rings[q_num]->q_vector->tx); + } else if (q_num < vsi->num_rxq) { + rx = ice_get_rc_coalesce(ec, ICE_RX_CONTAINER, + &vsi->rx_rings[q_num]->q_vector->rx); + } else if (q_num < vsi->num_txq) { + tx = ice_get_rc_coalesce(ec, ICE_TX_CONTAINER, + &vsi->tx_rings[q_num]->q_vector->tx); + } else { + /* q_num is invalid for both Rx and Tx queues */ + return -EINVAL; + } + +update_coalesced_frames: + /* either q_num is invalid for both Rx and Tx queues or setting coalesce + * failed completely + */ + if (tx && rx) + return -EINVAL; + + if (q_num < vsi->num_txq) + ec->tx_max_coalesced_frames_irq = vsi->work_lmt; + + if (q_num < vsi->num_rxq) + ec->rx_max_coalesced_frames_irq = vsi->work_lmt; + + return 0; +} + +static int +ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) +{ + return __ice_get_coalesce(netdev, ec, -1); +} + +static int ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num, + struct ethtool_coalesce *ec) +{ + return __ice_get_coalesce(netdev, ec, q_num); +} + +/** + * ice_set_rc_coalesce - set ITR values for specific ring container + * @c_type: container type, RX or TX + * @ec: ethtool structure from user to update ITR settings + * @rc: ring container that the ITR values will come from + * @vsi: VSI associated to the ring container + * + * Set specific ITR values. This is done per ice_ring_container because each + * q_vector can have 1 or more rings and all of said ring(s) will have the same + * ITR values. + * + * Returns 0 on success, negative otherwise. + */ +static int +ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec, + struct ice_ring_container *rc, struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + u16 itr_setting; + + if (!rc->ring) + return -EINVAL; + + itr_setting = rc->itr_setting & ~ICE_ITR_DYNAMIC; + + switch (c_type) { + case ICE_RX_CONTAINER: + if (ec->rx_coalesce_usecs != itr_setting && + ec->use_adaptive_rx_coalesce) { + netdev_info(vsi->netdev, + "Rx interrupt throttling cannot be changed if adaptive-rx is enabled\n"); + return -EINVAL; + } + + if (ec->rx_coalesce_usecs > ICE_ITR_MAX) { + netdev_info(vsi->netdev, + "Invalid value, rx-usecs range is 0-%d\n", + ICE_ITR_MAX); + return -EINVAL; + } + + if (ec->use_adaptive_rx_coalesce) { + rc->itr_setting |= ICE_ITR_DYNAMIC; + } else { + rc->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs); + rc->target_itr = ITR_TO_REG(rc->itr_setting); + } + break; + case ICE_TX_CONTAINER: + if (ec->tx_coalesce_usecs != itr_setting && + ec->use_adaptive_tx_coalesce) { + netdev_info(vsi->netdev, + "Tx interrupt throttling cannot be changed if adaptive-tx is enabled\n"); + return -EINVAL; + } + + if (ec->tx_coalesce_usecs > ICE_ITR_MAX) { + netdev_info(vsi->netdev, + "Invalid value, tx-usecs range is 0-%d\n", + ICE_ITR_MAX); + return -EINVAL; + } + + if (ec->use_adaptive_tx_coalesce) { + rc->itr_setting |= ICE_ITR_DYNAMIC; + } else { + rc->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs); + rc->target_itr = ITR_TO_REG(rc->itr_setting); + } + break; + default: + dev_dbg(&pf->pdev->dev, "Invalid container type %d\n", c_type); + return -EINVAL; + } + + return 0; +} + +static int +__ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, + int q_num) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + int rx = -EINVAL, tx = -EINVAL; + struct ice_vsi *vsi = np->vsi; + + if (q_num < 0) { + int i; + + ice_for_each_q_vector(vsi, i) { + struct ice_q_vector *q_vector = vsi->q_vectors[i]; + + if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec, + &q_vector->rx, vsi) || + ice_set_rc_coalesce(ICE_TX_CONTAINER, ec, + &q_vector->tx, vsi)) + return -EINVAL; + } + + goto set_work_lmt; + } + + if (q_num < vsi->num_rxq && q_num < vsi->num_txq) { + rx = ice_set_rc_coalesce(ICE_RX_CONTAINER, ec, + &vsi->rx_rings[q_num]->q_vector->rx, + vsi); + tx = ice_set_rc_coalesce(ICE_TX_CONTAINER, ec, + &vsi->tx_rings[q_num]->q_vector->tx, + vsi); + } else if (q_num < vsi->num_rxq) { + rx = ice_set_rc_coalesce(ICE_RX_CONTAINER, ec, + &vsi->rx_rings[q_num]->q_vector->rx, + vsi); + } else if (q_num < vsi->num_txq) { + tx = ice_set_rc_coalesce(ICE_TX_CONTAINER, ec, + &vsi->tx_rings[q_num]->q_vector->tx, + vsi); + } + + /* either q_num is invalid for both Rx and Tx queues or setting coalesce + * failed completely + */ + if (rx && tx) + return -EINVAL; + +set_work_lmt: + if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) + vsi->work_lmt = max(ec->tx_max_coalesced_frames_irq, + ec->rx_max_coalesced_frames_irq); + + return 0; +} + +static int +ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) +{ + return __ice_set_coalesce(netdev, ec, -1); +} + +static int ice_set_per_q_coalesce(struct net_device *netdev, u32 q_num, + struct ethtool_coalesce *ec) +{ + return __ice_set_coalesce(netdev, ec, q_num); +} + static const struct ethtool_ops ice_ethtool_ops = { .get_link_ksettings = ice_get_link_ksettings, .set_link_ksettings = ice_set_link_ksettings, @@ -1676,8 +2340,15 @@ static const struct ethtool_ops ice_ethtool_ops = { .get_msglevel = ice_get_msglevel, .set_msglevel = ice_set_msglevel, .get_link = ethtool_op_get_link, + .get_eeprom_len = ice_get_eeprom_len, + .get_eeprom = ice_get_eeprom, + .get_coalesce = ice_get_coalesce, + .set_coalesce = ice_set_coalesce, .get_strings = ice_get_strings, + .set_phys_id = ice_set_phys_id, .get_ethtool_stats = ice_get_ethtool_stats, + .get_priv_flags = ice_get_priv_flags, + .set_priv_flags = ice_set_priv_flags, .get_sset_count = ice_get_sset_count, .get_rxnfc = ice_get_rxnfc, .get_ringparam = ice_get_ringparam, @@ -1689,6 +2360,9 @@ static const struct ethtool_ops ice_ethtool_ops = { .get_rxfh_indir_size = ice_get_rxfh_indir_size, .get_rxfh = ice_get_rxfh, .set_rxfh = ice_set_rxfh, + .get_ts_info = ethtool_op_get_ts_info, + .get_per_queue_coalesce = ice_get_per_q_coalesce, + .set_per_queue_coalesce = ice_set_per_q_coalesce, }; /** diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 5507928c8fbe..6bf5cc064270 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -30,6 +30,7 @@ #define PF_FW_ATQLEN_ATQVFE_M BIT(28) #define PF_FW_ATQLEN_ATQOVFL_M BIT(29) #define PF_FW_ATQLEN_ATQCRIT_M BIT(30) +#define VF_MBX_ARQLEN(_VF) (0x0022BC00 + ((_VF) * 4)) #define PF_FW_ATQLEN_ATQENABLE_M BIT(31) #define PF_FW_ATQT 0x00080400 #define PF_MBX_ARQBAH 0x0022E400 @@ -110,6 +111,7 @@ #define GLINT_DYN_CTL_CLEARPBA_M BIT(1) #define GLINT_DYN_CTL_SWINT_TRIG_M BIT(2) #define GLINT_DYN_CTL_ITR_INDX_S 3 +#define GLINT_DYN_CTL_INTERVAL_S 5 #define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, 25) #define GLINT_DYN_CTL_INTENA_MSK_M BIT(31) #define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4)) diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index bb51dd7defb5..ef4c79b5aa32 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -346,6 +346,7 @@ enum ice_tx_desc_cmd_bits { ICE_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */ ICE_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */ ICE_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */ + ICE_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */ ICE_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */ }; @@ -488,5 +489,7 @@ static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype) #define ICE_LINK_SPEED_20000MBPS 20000 #define ICE_LINK_SPEED_25000MBPS 25000 #define ICE_LINK_SPEED_40000MBPS 40000 +#define ICE_LINK_SPEED_50000MBPS 50000 +#define ICE_LINK_SPEED_100000MBPS 100000 #endif /* _ICE_LAN_TX_RX_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 29b1dcfd4331..fa61203bee26 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -249,12 +249,12 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors) /* allocate memory for both Tx and Rx ring pointers */ vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq, - sizeof(struct ice_ring *), GFP_KERNEL); + sizeof(*vsi->tx_rings), GFP_KERNEL); if (!vsi->tx_rings) goto err_txrings; vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq, - sizeof(struct ice_ring *), GFP_KERNEL); + sizeof(*vsi->rx_rings), GFP_KERNEL); if (!vsi->rx_rings) goto err_rxrings; @@ -262,7 +262,7 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors) /* allocate memory for q_vector pointers */ vsi->q_vectors = devm_kcalloc(&pf->pdev->dev, vsi->num_q_vectors, - sizeof(struct ice_q_vector *), + sizeof(*vsi->q_vectors), GFP_KERNEL); if (!vsi->q_vectors) goto err_vectors; @@ -348,19 +348,25 @@ static int ice_get_free_slot(void *array, int size, int curr) void ice_vsi_delete(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; - struct ice_vsi_ctx ctxt; + struct ice_vsi_ctx *ctxt; enum ice_status status; + ctxt = devm_kzalloc(&pf->pdev->dev, sizeof(*ctxt), GFP_KERNEL); + if (!ctxt) + return; + if (vsi->type == ICE_VSI_VF) - ctxt.vf_num = vsi->vf_id; - ctxt.vsi_num = vsi->vsi_num; + ctxt->vf_num = vsi->vf_id; + ctxt->vsi_num = vsi->vsi_num; - memcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props)); + memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info)); - status = ice_free_vsi(&pf->hw, vsi->idx, &ctxt, false, NULL); + status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL); if (status) dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n", vsi->vsi_num); + + devm_kfree(&pf->pdev->dev, ctxt); } /** @@ -514,109 +520,88 @@ unlock_pf: } /** - * ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI - * @vsi: the VSI getting queues + * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI + * @qs_cfg: gathered variables needed for PF->VSI queues assignment * - * Return 0 on success and a negative value on error + * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap */ -static int ice_vsi_get_qs_contig(struct ice_vsi *vsi) +static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg) { - struct ice_pf *pf = vsi->back; - int offset, ret = 0; + int offset, i; - mutex_lock(&pf->avail_q_mutex); - /* look for contiguous block of queues for Tx */ - offset = bitmap_find_next_zero_area(pf->avail_txqs, ICE_MAX_TXQS, - 0, vsi->alloc_txq, 0); - if (offset < ICE_MAX_TXQS) { - int i; - - bitmap_set(pf->avail_txqs, offset, vsi->alloc_txq); - for (i = 0; i < vsi->alloc_txq; i++) - vsi->txq_map[i] = i + offset; - } else { - ret = -ENOMEM; - vsi->tx_mapping_mode = ICE_VSI_MAP_SCATTER; + mutex_lock(qs_cfg->qs_mutex); + offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size, + 0, qs_cfg->q_count, 0); + if (offset >= qs_cfg->pf_map_size) { + mutex_unlock(qs_cfg->qs_mutex); + return -ENOMEM; } - /* look for contiguous block of queues for Rx */ - offset = bitmap_find_next_zero_area(pf->avail_rxqs, ICE_MAX_RXQS, - 0, vsi->alloc_rxq, 0); - if (offset < ICE_MAX_RXQS) { - int i; - - bitmap_set(pf->avail_rxqs, offset, vsi->alloc_rxq); - for (i = 0; i < vsi->alloc_rxq; i++) - vsi->rxq_map[i] = i + offset; - } else { - ret = -ENOMEM; - vsi->rx_mapping_mode = ICE_VSI_MAP_SCATTER; - } - mutex_unlock(&pf->avail_q_mutex); + bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count); + for (i = 0; i < qs_cfg->q_count; i++) + qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = i + offset; + mutex_unlock(qs_cfg->qs_mutex); - return ret; + return 0; } /** - * ice_vsi_get_qs_scatter - Assign a scattered queues to VSI - * @vsi: the VSI getting queues + * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI + * @qs_cfg: gathered variables needed for PF->VSI queues assignment * - * Return 0 on success and a negative value on error + * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap */ -static int ice_vsi_get_qs_scatter(struct ice_vsi *vsi) +static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg) { - struct ice_pf *pf = vsi->back; int i, index = 0; - mutex_lock(&pf->avail_q_mutex); - - if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER) { - for (i = 0; i < vsi->alloc_txq; i++) { - index = find_next_zero_bit(pf->avail_txqs, - ICE_MAX_TXQS, index); - if (index < ICE_MAX_TXQS) { - set_bit(index, pf->avail_txqs); - vsi->txq_map[i] = index; - } else { - goto err_scatter_tx; - } - } - } - - if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER) { - for (i = 0; i < vsi->alloc_rxq; i++) { - index = find_next_zero_bit(pf->avail_rxqs, - ICE_MAX_RXQS, index); - if (index < ICE_MAX_RXQS) { - set_bit(index, pf->avail_rxqs); - vsi->rxq_map[i] = index; - } else { - goto err_scatter_rx; - } - } + mutex_lock(qs_cfg->qs_mutex); + for (i = 0; i < qs_cfg->q_count; i++) { + index = find_next_zero_bit(qs_cfg->pf_map, + qs_cfg->pf_map_size, index); + if (index >= qs_cfg->pf_map_size) + goto err_scatter; + set_bit(index, qs_cfg->pf_map); + qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = index; } + mutex_unlock(qs_cfg->qs_mutex); - mutex_unlock(&pf->avail_q_mutex); return 0; - -err_scatter_rx: - /* unflag any queues we have grabbed (i is failed position) */ - for (index = 0; index < i; index++) { - clear_bit(vsi->rxq_map[index], pf->avail_rxqs); - vsi->rxq_map[index] = 0; - } - i = vsi->alloc_txq; -err_scatter_tx: - /* i is either position of failed attempt or vsi->alloc_txq */ +err_scatter: for (index = 0; index < i; index++) { - clear_bit(vsi->txq_map[index], pf->avail_txqs); - vsi->txq_map[index] = 0; + clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map); + qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0; } + mutex_unlock(qs_cfg->qs_mutex); - mutex_unlock(&pf->avail_q_mutex); return -ENOMEM; } +/** + * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI + * @qs_cfg: gathered variables needed for PF->VSI queues assignment + * + * This is an internal function for assigning queues from the PF to VSI and + * initially tries to find contiguous space. If it is not successful to find + * contiguous space, then it tries with the scatter approach. + * + * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap + */ +static int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg) +{ + int ret = 0; + + ret = __ice_vsi_get_qs_contig(qs_cfg); + if (ret) { + /* contig failed, so try with scatter approach */ + qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER; + qs_cfg->q_count = min_t(u16, qs_cfg->q_count, + qs_cfg->scatter_count); + ret = __ice_vsi_get_qs_sc(qs_cfg); + } + return ret; +} + /** * ice_vsi_get_qs - Assign queues from PF to VSI * @vsi: the VSI to assign queues to @@ -625,25 +610,35 @@ err_scatter_tx: */ static int ice_vsi_get_qs(struct ice_vsi *vsi) { + struct ice_pf *pf = vsi->back; + struct ice_qs_cfg tx_qs_cfg = { + .qs_mutex = &pf->avail_q_mutex, + .pf_map = pf->avail_txqs, + .pf_map_size = ICE_MAX_TXQS, + .q_count = vsi->alloc_txq, + .scatter_count = ICE_MAX_SCATTER_TXQS, + .vsi_map = vsi->txq_map, + .vsi_map_offset = 0, + .mapping_mode = vsi->tx_mapping_mode + }; + struct ice_qs_cfg rx_qs_cfg = { + .qs_mutex = &pf->avail_q_mutex, + .pf_map = pf->avail_rxqs, + .pf_map_size = ICE_MAX_RXQS, + .q_count = vsi->alloc_rxq, + .scatter_count = ICE_MAX_SCATTER_RXQS, + .vsi_map = vsi->rxq_map, + .vsi_map_offset = 0, + .mapping_mode = vsi->rx_mapping_mode + }; int ret = 0; vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG; vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG; - /* NOTE: ice_vsi_get_qs_contig() will set the Rx/Tx mapping - * modes individually to scatter if assigning contiguous queues - * to Rx or Tx fails - */ - ret = ice_vsi_get_qs_contig(vsi); - if (ret < 0) { - if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER) - vsi->alloc_txq = max_t(u16, vsi->alloc_txq, - ICE_MAX_SCATTER_TXQS); - if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER) - vsi->alloc_rxq = max_t(u16, vsi->alloc_rxq, - ICE_MAX_SCATTER_RXQS); - ret = ice_vsi_get_qs_scatter(vsi); - } + ret = __ice_vsi_get_qs(&tx_qs_cfg); + if (!ret) + ret = __ice_vsi_get_qs(&rx_qs_cfg); return ret; } @@ -919,37 +914,41 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) */ static int ice_vsi_init(struct ice_vsi *vsi) { - struct ice_vsi_ctx ctxt = { 0 }; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; + struct ice_vsi_ctx *ctxt; int ret = 0; + ctxt = devm_kzalloc(&pf->pdev->dev, sizeof(*ctxt), GFP_KERNEL); + if (!ctxt) + return -ENOMEM; + switch (vsi->type) { case ICE_VSI_PF: - ctxt.flags = ICE_AQ_VSI_TYPE_PF; + ctxt->flags = ICE_AQ_VSI_TYPE_PF; break; case ICE_VSI_VF: - ctxt.flags = ICE_AQ_VSI_TYPE_VF; + ctxt->flags = ICE_AQ_VSI_TYPE_VF; /* VF number here is the absolute VF number (0-255) */ - ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; + ctxt->vf_num = vsi->vf_id + hw->func_caps.vf_base_id; break; default: return -ENODEV; } - ice_set_dflt_vsi_ctx(&ctxt); + ice_set_dflt_vsi_ctx(ctxt); /* if the switch is in VEB mode, allow VSI loopback */ if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB) - ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; + ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; /* Set LUT type and HASH type if RSS is enabled */ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) - ice_set_rss_vsi_ctx(&ctxt, vsi); + ice_set_rss_vsi_ctx(ctxt, vsi); - ctxt.info.sw_id = vsi->port_info->sw_id; - ice_vsi_setup_q_map(vsi, &ctxt); + ctxt->info.sw_id = vsi->port_info->sw_id; + ice_vsi_setup_q_map(vsi, ctxt); - ret = ice_add_vsi(hw, vsi->idx, &ctxt, NULL); + ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL); if (ret) { dev_err(&pf->pdev->dev, "Add VSI failed, err %d\n", ret); @@ -957,11 +956,12 @@ static int ice_vsi_init(struct ice_vsi *vsi) } /* keep context for update VSI operations */ - vsi->info = ctxt.info; + vsi->info = ctxt->info; /* record VSI number returned */ - vsi->vsi_num = ctxt.vsi_num; + vsi->vsi_num = ctxt->vsi_num; + devm_kfree(&pf->pdev->dev, ctxt); return ret; } @@ -1614,11 +1614,14 @@ setup_rings: /** * ice_vsi_cfg_txqs - Configure the VSI for Tx * @vsi: the VSI being configured + * @rings: Tx ring array to be configured + * @offset: offset within vsi->txq_map * * Return 0 on success and a negative value on error * Configure the Tx VSI for operation. */ -int ice_vsi_cfg_txqs(struct ice_vsi *vsi) +static int +ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset) { struct ice_aqc_add_tx_qgrp *qg_buf; struct ice_aqc_add_txqs_perq *txq; @@ -1626,9 +1629,9 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi) u8 num_q_grps, q_idx = 0; enum ice_status status; u16 buf_len, i, pf_q; - int err = 0, tc = 0; + int err = 0, tc; - buf_len = sizeof(struct ice_aqc_add_tx_qgrp); + buf_len = sizeof(*qg_buf); qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL); if (!qg_buf) return -ENOMEM; @@ -1644,9 +1647,8 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi) for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) { struct ice_tlan_ctx tlan_ctx = { 0 }; - pf_q = vsi->txq_map[q_idx]; - ice_setup_tx_ctx(vsi->tx_rings[q_idx], &tlan_ctx, - pf_q); + pf_q = vsi->txq_map[q_idx + offset]; + ice_setup_tx_ctx(rings[q_idx], &tlan_ctx, pf_q); /* copy context contents into the qg_buf */ qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, @@ -1655,7 +1657,7 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi) /* init queue specific tail reg. It is referred as * transmit comm scheduler queue doorbell. */ - vsi->tx_rings[q_idx]->tail = + rings[q_idx]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, num_q_grps, qg_buf, buf_len, @@ -1674,7 +1676,7 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi) */ txq = &qg_buf->txqs[0]; if (pf_q == le16_to_cpu(txq->txq_id)) - vsi->tx_rings[q_idx]->txq_teid = + rings[q_idx]->txq_teid = le32_to_cpu(txq->q_teid); q_idx++; @@ -1685,6 +1687,18 @@ err_cfg_txqs: return err; } +/** + * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx + * @vsi: the VSI being configured + * + * Return 0 on success and a negative value on error + * Configure the Tx VSI for operation. + */ +int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi) +{ + return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, 0); +} + /** * ice_intrl_usec_to_reg - convert interrupt rate limit to register value * @intrl: interrupt rate limit in usecs @@ -1714,22 +1728,34 @@ static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran) static void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector) { - u8 itr_gran = hw->itr_gran; - if (q_vector->num_ring_rx) { struct ice_ring_container *rc = &q_vector->rx; - rc->itr = ITR_TO_REG(ICE_DFLT_RX_ITR, itr_gran); + /* if this value is set then don't overwrite with default */ + if (!rc->itr_setting) + rc->itr_setting = ICE_DFLT_RX_ITR; + + rc->target_itr = ITR_TO_REG(rc->itr_setting); + rc->next_update = jiffies + 1; + rc->current_itr = rc->target_itr; rc->latency_range = ICE_LOW_LATENCY; - wr32(hw, GLINT_ITR(rc->itr_idx, vector), rc->itr); + wr32(hw, GLINT_ITR(rc->itr_idx, vector), + ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S); } if (q_vector->num_ring_tx) { struct ice_ring_container *rc = &q_vector->tx; - rc->itr = ITR_TO_REG(ICE_DFLT_TX_ITR, itr_gran); + /* if this value is set then don't overwrite with default */ + if (!rc->itr_setting) + rc->itr_setting = ICE_DFLT_TX_ITR; + + rc->target_itr = ITR_TO_REG(rc->itr_setting); + rc->next_update = jiffies + 1; + rc->current_itr = rc->target_itr; rc->latency_range = ICE_LOW_LATENCY; - wr32(hw, GLINT_ITR(rc->itr_idx, vector), rc->itr); + wr32(hw, GLINT_ITR(rc->itr_idx, vector), + ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S); } } @@ -1808,26 +1834,34 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi) { struct device *dev = &vsi->back->pdev->dev; struct ice_hw *hw = &vsi->back->hw; - struct ice_vsi_ctx ctxt = { 0 }; + struct ice_vsi_ctx *ctxt; enum ice_status status; + int ret = 0; + + ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); + if (!ctxt) + return -ENOMEM; /* Here we are configuring the VSI to let the driver add VLAN tags by * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag * insertion happens in the Tx hot path, in ice_tx_map. */ - ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL; + ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL; - ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); + ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); - status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); + status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); if (status) { dev_err(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n", status, hw->adminq.sq_last_status); - return -EIO; + ret = -EIO; + goto out; } - vsi->info.vlan_flags = ctxt.info.vlan_flags; - return 0; + vsi->info.vlan_flags = ctxt->info.vlan_flags; +out: + devm_kfree(dev, ctxt); + return ret; } /** @@ -1839,35 +1873,42 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) { struct device *dev = &vsi->back->pdev->dev; struct ice_hw *hw = &vsi->back->hw; - struct ice_vsi_ctx ctxt = { 0 }; + struct ice_vsi_ctx *ctxt; enum ice_status status; + int ret = 0; + + ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); + if (!ctxt) + return -ENOMEM; /* Here we are configuring what the VSI should do with the VLAN tag in * the Rx packet. We can either leave the tag in the packet or put it in * the Rx descriptor. */ - if (ena) { + if (ena) /* Strip VLAN tag from Rx packet and put it in the desc */ - ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH; - } else { + ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH; + else /* Disable stripping. Leave tag in packet */ - ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING; - } + ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING; /* Allow all packets untagged/tagged */ - ctxt.info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL; + ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL; - ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); + ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); - status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); + status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); if (status) { dev_err(dev, "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n", ena, status, hw->adminq.sq_last_status); - return -EIO; + ret = -EIO; + goto out; } - vsi->info.vlan_flags = ctxt.info.vlan_flags; - return 0; + vsi->info.vlan_flags = ctxt->info.vlan_flags; +out: + devm_kfree(dev, ctxt); + return ret; } /** @@ -1897,9 +1938,12 @@ int ice_vsi_stop_rx_rings(struct ice_vsi *vsi) * @vsi: the VSI being configured * @rst_src: reset source * @rel_vmvf_num: Relative id of VF/VM + * @rings: Tx ring array to be stopped + * @offset: offset within vsi->txq_map */ -int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, - u16 rel_vmvf_num) +static int +ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, + u16 rel_vmvf_num, struct ice_ring **rings, int offset) { struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; @@ -1927,19 +1971,18 @@ int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, ice_for_each_txq(vsi, i) { u16 v_idx; - if (!vsi->tx_rings || !vsi->tx_rings[i] || - !vsi->tx_rings[i]->q_vector) { + if (!rings || !rings[i] || !rings[i]->q_vector) { err = -EINVAL; goto err_out; } - q_ids[i] = vsi->txq_map[i]; - q_teids[i] = vsi->tx_rings[i]->txq_teid; + q_ids[i] = vsi->txq_map[i + offset]; + q_teids[i] = rings[i]->txq_teid; /* clear cause_ena bit for disabled queues */ - val = rd32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx)); + val = rd32(hw, QINT_TQCTL(rings[i]->reg_idx)); val &= ~QINT_TQCTL_CAUSE_ENA_M; - wr32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val); + wr32(hw, QINT_TQCTL(rings[i]->reg_idx), val); /* software is expected to wait for 100 ns */ ndelay(100); @@ -1947,7 +1990,7 @@ int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, /* trigger a software interrupt for the vector associated to * the queue to schedule NAPI handler */ - v_idx = vsi->tx_rings[i]->q_vector->v_idx; + v_idx = rings[i]->q_vector->v_idx; wr32(hw, GLINT_DYN_CTL(vsi->hw_base_vector + v_idx), GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M); } @@ -1976,6 +2019,19 @@ err_alloc_q_ids: return err; } +/** + * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings + * @vsi: the VSI being configured + * @rst_src: reset source + * @rel_vmvf_num: Relative id of VF/VM + */ +int ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, + enum ice_disq_rst_src rst_src, u16 rel_vmvf_num) +{ + return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, + 0); +} + /** * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI * @vsi: VSI to enable or disable VLAN pruning on @@ -2462,13 +2518,15 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi) */ int ice_vsi_release(struct ice_vsi *vsi) { + struct ice_vf *vf = NULL; struct ice_pf *pf; - struct ice_vf *vf; if (!vsi->back) return -ENODEV; pf = vsi->back; - vf = &pf->vf[vsi->vf_id]; + + if (vsi->type == ICE_VSI_VF) + vf = &pf->vf[vsi->vf_id]; /* do not unregister and free netdevs while driver is in the reset * recovery pending state. Since reset/rebuild happens through PF * service task workqueue, its not a good idea to unregister netdev @@ -2581,6 +2639,12 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) goto err_vectors; ice_vsi_map_rings_to_vectors(vsi); + /* Do not exit if configuring RSS had an issue, at least + * receive traffic on first queue. Hence no need to capture + * return value + */ + if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) + ice_vsi_cfg_rss_lut_key(vsi); break; case ICE_VSI_VF: ret = ice_vsi_alloc_q_vectors(vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 3831b4f0960a..7988a53729a9 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -15,7 +15,7 @@ void ice_update_eth_stats(struct ice_vsi *vsi); int ice_vsi_cfg_rxqs(struct ice_vsi *vsi); -int ice_vsi_cfg_txqs(struct ice_vsi *vsi); +int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi); void ice_vsi_cfg_msix(struct ice_vsi *vsi); @@ -31,7 +31,8 @@ int ice_vsi_start_rx_rings(struct ice_vsi *vsi); int ice_vsi_stop_rx_rings(struct ice_vsi *vsi); -int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, +int +ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, u16 rel_vmvf_num); int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 8725569d11f0..47cc3f905b7f 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -609,7 +609,8 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi) } } - ice_vc_notify_link_state(pf); + if (!new_link_same_as_old && pf->num_alloc_vfs) + ice_vc_notify_link_state(pf); return 0; } @@ -1355,15 +1356,40 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) return ret; } +/** + * ice_dis_ctrlq_interrupts - disable control queue interrupts + * @hw: pointer to HW structure + */ +static void ice_dis_ctrlq_interrupts(struct ice_hw *hw) +{ + /* disable Admin queue Interrupt causes */ + wr32(hw, PFINT_FW_CTL, + rd32(hw, PFINT_FW_CTL) & ~PFINT_FW_CTL_CAUSE_ENA_M); + + /* disable Mailbox queue Interrupt causes */ + wr32(hw, PFINT_MBX_CTL, + rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M); + + /* disable Control queue Interrupt causes */ + wr32(hw, PFINT_OICR_CTL, + rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M); + + ice_flush(hw); +} + /** * ice_free_irq_msix_misc - Unroll misc vector setup * @pf: board private structure */ static void ice_free_irq_msix_misc(struct ice_pf *pf) { + struct ice_hw *hw = &pf->hw; + + ice_dis_ctrlq_interrupts(hw); + /* disable OICR interrupt */ - wr32(&pf->hw, PFINT_OICR_ENA, 0); - ice_flush(&pf->hw); + wr32(hw, PFINT_OICR_ENA, 0); + ice_flush(hw); if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) { synchronize_irq(pf->msix_entries[pf->sw_oicr_idx].vector); @@ -1377,6 +1403,32 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf) ice_free_res(pf->hw_irq_tracker, pf->hw_oicr_idx, ICE_RES_MISC_VEC_ID); } +/** + * ice_ena_ctrlq_interrupts - enable control queue interrupts + * @hw: pointer to HW structure + * @v_idx: HW vector index to associate the control queue interrupts with + */ +static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 v_idx) +{ + u32 val; + + val = ((v_idx & PFINT_OICR_CTL_MSIX_INDX_M) | + PFINT_OICR_CTL_CAUSE_ENA_M); + wr32(hw, PFINT_OICR_CTL, val); + + /* enable Admin queue Interrupt causes */ + val = ((v_idx & PFINT_FW_CTL_MSIX_INDX_M) | + PFINT_FW_CTL_CAUSE_ENA_M); + wr32(hw, PFINT_FW_CTL, val); + + /* enable Mailbox queue Interrupt causes */ + val = ((v_idx & PFINT_MBX_CTL_MSIX_INDX_M) | + PFINT_MBX_CTL_CAUSE_ENA_M); + wr32(hw, PFINT_MBX_CTL, val); + + ice_flush(hw); +} + /** * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events * @pf: board private structure @@ -1389,8 +1441,6 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf) { struct ice_hw *hw = &pf->hw; int oicr_idx, err = 0; - u8 itr_gran; - u32 val; if (!pf->int_name[0]) snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", @@ -1439,24 +1489,9 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf) skip_req_irq: ice_ena_misc_vector(pf); - val = ((pf->hw_oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) | - PFINT_OICR_CTL_CAUSE_ENA_M); - wr32(hw, PFINT_OICR_CTL, val); - - /* This enables Admin queue Interrupt causes */ - val = ((pf->hw_oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) | - PFINT_FW_CTL_CAUSE_ENA_M); - wr32(hw, PFINT_FW_CTL, val); - - /* This enables Mailbox queue Interrupt causes */ - val = ((pf->hw_oicr_idx & PFINT_MBX_CTL_MSIX_INDX_M) | - PFINT_MBX_CTL_CAUSE_ENA_M); - wr32(hw, PFINT_MBX_CTL, val); - - itr_gran = hw->itr_gran; - + ice_ena_ctrlq_interrupts(hw, pf->hw_oicr_idx); wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->hw_oicr_idx), - ITR_TO_REG(ICE_ITR_8K, itr_gran)); + ITR_REG_ALIGN(ICE_ITR_8K) >> ICE_ITR_GRAN_S); ice_flush(hw); ice_irq_dynamic_ena(hw, NULL, NULL); @@ -1516,8 +1551,8 @@ static int ice_cfg_netdev(struct ice_vsi *vsi) u8 mac_addr[ETH_ALEN]; int err; - netdev = alloc_etherdev_mqs(sizeof(struct ice_netdev_priv), - vsi->alloc_txq, vsi->alloc_rxq); + netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, + vsi->alloc_rxq); if (!netdev) return -ENOMEM; @@ -1531,6 +1566,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi) csumo_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | + NETIF_F_SCTP_CRC | NETIF_F_IPV6_CSUM; vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER | @@ -1869,7 +1905,7 @@ static int ice_ena_msix_range(struct ice_pf *pf) v_left -= pf->num_lan_msix; pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget, - sizeof(struct msix_entry), GFP_KERNEL); + sizeof(*pf->msix_entries), GFP_KERNEL); if (!pf->msix_entries) { err = -ENOMEM; @@ -1957,7 +1993,6 @@ static void ice_clear_interrupt_scheme(struct ice_pf *pf) static int ice_init_interrupt_scheme(struct ice_pf *pf) { int vectors = 0, hw_vectors = 0; - ssize_t size; if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) vectors = ice_ena_msix_range(pf); @@ -1968,9 +2003,9 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf) return vectors; /* set up vector assignment tracking */ - size = sizeof(struct ice_res_tracker) + (sizeof(u16) * vectors); - - pf->sw_irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL); + pf->sw_irq_tracker = + devm_kzalloc(&pf->pdev->dev, sizeof(*pf->sw_irq_tracker) + + (sizeof(u16) * vectors), GFP_KERNEL); if (!pf->sw_irq_tracker) { ice_dis_msix(pf); return -ENOMEM; @@ -1982,9 +2017,9 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf) /* set up HW vector assignment tracking */ hw_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; - size = sizeof(struct ice_res_tracker) + (sizeof(u16) * hw_vectors); - - pf->hw_irq_tracker = devm_kzalloc(&pf->pdev->dev, size, GFP_KERNEL); + pf->hw_irq_tracker = + devm_kzalloc(&pf->pdev->dev, sizeof(*pf->hw_irq_tracker) + + (sizeof(u16) * hw_vectors), GFP_KERNEL); if (!pf->hw_irq_tracker) { ice_clear_interrupt_scheme(pf); return -ENOMEM; @@ -1997,6 +2032,23 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf) return 0; } +/** + * ice_verify_itr_gran - verify driver's assumption of ITR granularity + * @pf: pointer to the PF structure + * + * There is no error returned here because the driver will be able to handle a + * different ITR granularity, but interrupt moderation will not be accurate if + * the driver's assumptions are not verified. This assumption is made so we can + * use constants in the hot path instead of accessing structure members. + */ +static void ice_verify_itr_gran(struct ice_pf *pf) +{ + if (pf->hw.itr_gran != (ICE_ITR_GRAN_S << 1)) + dev_warn(&pf->pdev->dev, + "%d ITR granularity assumption is invalid, actual ITR granularity is %d. Interrupt moderation will be inaccurate!\n", + (ICE_ITR_GRAN_S << 1), pf->hw.itr_gran); +} + /** * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines * @pf: pointer to the PF structure @@ -2101,7 +2153,7 @@ static int ice_probe(struct pci_dev *pdev, } pf->vsi = devm_kcalloc(&pdev->dev, pf->num_alloc_vsi, - sizeof(struct ice_vsi *), GFP_KERNEL); + sizeof(*pf->vsi), GFP_KERNEL); if (!pf->vsi) { err = -ENOMEM; goto err_init_pf_unroll; @@ -2133,7 +2185,7 @@ static int ice_probe(struct pci_dev *pdev, } /* create switch struct for the switch element created by FW on boot */ - pf->first_sw = devm_kzalloc(&pdev->dev, sizeof(struct ice_sw), + pf->first_sw = devm_kzalloc(&pdev->dev, sizeof(*pf->first_sw), GFP_KERNEL); if (!pf->first_sw) { err = -ENOMEM; @@ -2163,6 +2215,7 @@ static int ice_probe(struct pci_dev *pdev, mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); ice_verify_cacheline_size(pf); + ice_verify_itr_gran(pf); return 0; @@ -2419,10 +2472,12 @@ static void ice_set_rx_mode(struct net_device *netdev) * @addr: the MAC address entry being added * @vid: VLAN id * @flags: instructions from stack about fdb operation + * @extack: netlink extended ack */ -static int ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[], - struct net_device *dev, const unsigned char *addr, - u16 vid, u16 flags) +static int +ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[], + struct net_device *dev, const unsigned char *addr, u16 vid, + u16 flags, struct netlink_ext_ack __always_unused *extack) { int err; @@ -2546,7 +2601,8 @@ static int ice_vsi_cfg(struct ice_vsi *vsi) if (err) return err; } - err = ice_vsi_cfg_txqs(vsi); + + err = ice_vsi_cfg_lan_txqs(vsi); if (!err) err = ice_vsi_cfg_rxqs(vsi); @@ -2944,13 +3000,92 @@ static void ice_napi_disable_all(struct ice_vsi *vsi) } } +/** + * ice_force_phys_link_state - Force the physical link state + * @vsi: VSI to force the physical link state to up/down + * @link_up: true/false indicates to set the physical link to up/down + * + * Force the physical link state by getting the current PHY capabilities from + * hardware and setting the PHY config based on the determined capabilities. If + * link changes a link event will be triggered because both the Enable Automatic + * Link Update and LESM Enable bits are set when setting the PHY capabilities. + * + * Returns 0 on success, negative on failure + */ +static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) +{ + struct ice_aqc_get_phy_caps_data *pcaps; + struct ice_aqc_set_phy_cfg_data *cfg; + struct ice_port_info *pi; + struct device *dev; + int retcode; + + if (!vsi || !vsi->port_info || !vsi->back) + return -EINVAL; + if (vsi->type != ICE_VSI_PF) + return 0; + + dev = &vsi->back->pdev->dev; + + pi = vsi->port_info; + + pcaps = devm_kzalloc(dev, sizeof(*pcaps), GFP_KERNEL); + if (!pcaps) + return -ENOMEM; + + retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, + NULL); + if (retcode) { + dev_err(dev, + "Failed to get phy capabilities, VSI %d error %d\n", + vsi->vsi_num, retcode); + retcode = -EIO; + goto out; + } + + /* No change in link */ + if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) && + link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) + goto out; + + cfg = devm_kzalloc(dev, sizeof(*cfg), GFP_KERNEL); + if (!cfg) { + retcode = -ENOMEM; + goto out; + } + + cfg->phy_type_low = pcaps->phy_type_low; + cfg->phy_type_high = pcaps->phy_type_high; + cfg->caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; + cfg->low_power_ctrl = pcaps->low_power_ctrl; + cfg->eee_cap = pcaps->eee_cap; + cfg->eeer_value = pcaps->eeer_value; + cfg->link_fec_opt = pcaps->link_fec_options; + if (link_up) + cfg->caps |= ICE_AQ_PHY_ENA_LINK; + else + cfg->caps &= ~ICE_AQ_PHY_ENA_LINK; + + retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi->lport, cfg, NULL); + if (retcode) { + dev_err(dev, "Failed to set phy config, VSI %d error %d\n", + vsi->vsi_num, retcode); + retcode = -EIO; + } + + devm_kfree(dev, cfg); +out: + devm_kfree(dev, pcaps); + return retcode; +} + /** * ice_down - Shutdown the connection * @vsi: The VSI being stopped */ int ice_down(struct ice_vsi *vsi) { - int i, tx_err, rx_err; + int i, tx_err, rx_err, link_err = 0; /* Caller of this function is expected to set the * vsi->state __ICE_DOWN bit @@ -2961,7 +3096,8 @@ int ice_down(struct ice_vsi *vsi) } ice_vsi_dis_irq(vsi); - tx_err = ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0); + + tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0); if (tx_err) netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n", @@ -2975,13 +3111,21 @@ int ice_down(struct ice_vsi *vsi) ice_napi_disable_all(vsi); + if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) { + link_err = ice_force_phys_link_state(vsi, false); + if (link_err) + netdev_err(vsi->netdev, + "Failed to set physical link down, VSI %d error %d\n", + vsi->vsi_num, link_err); + } + ice_for_each_txq(vsi, i) ice_clean_tx_ring(vsi->tx_rings[i]); ice_for_each_rxq(vsi, i) ice_clean_rx_ring(vsi->rx_rings[i]); - if (tx_err || rx_err) { + if (tx_err || rx_err || link_err) { netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n", vsi->vsi_num, vsi->vsw->sw_id); @@ -3601,30 +3745,39 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) struct device *dev = &vsi->back->pdev->dev; struct ice_aqc_vsi_props *vsi_props; struct ice_hw *hw = &vsi->back->hw; - struct ice_vsi_ctx ctxt = { 0 }; + struct ice_vsi_ctx *ctxt; enum ice_status status; + int ret = 0; vsi_props = &vsi->info; - ctxt.info = vsi->info; + + ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); + if (!ctxt) + return -ENOMEM; + + ctxt->info = vsi->info; if (bmode == BRIDGE_MODE_VEB) /* change from VEPA to VEB mode */ - ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; + ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; else /* change from VEB to VEPA mode */ - ctxt.info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB; - ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); + ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB; + ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); - status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); + status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); if (status) { dev_err(dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n", bmode, status, hw->adminq.sq_last_status); - return -EIO; + ret = -EIO; + goto out; } /* Update sw flags for book keeping */ - vsi_props->sw_flags = ctxt.info.sw_flags; + vsi_props->sw_flags = ctxt->info.sw_flags; - return 0; +out: + devm_kfree(dev, ctxt); + return ret; } /** @@ -3641,7 +3794,8 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) */ static int ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, - u16 __always_unused flags, struct netlink_ext_ack *extack) + u16 __always_unused flags, + struct netlink_ext_ack __always_unused *extack) { struct ice_netdev_priv *np = netdev_priv(dev); struct ice_pf *pf = np->vsi->back; @@ -3814,8 +3968,14 @@ static int ice_open(struct net_device *netdev) netif_carrier_off(netdev); - err = ice_vsi_open(vsi); + err = ice_force_phys_link_state(vsi, true); + if (err) { + netdev_err(netdev, + "Failed to set physical link up, error %d\n", err); + return err; + } + err = ice_vsi_open(vsi); if (err) netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n", vsi->vsi_num, vsi->vsw->sw_id); diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index 3274c543283c..413fdbbcc4d0 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -124,6 +124,63 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data) return status; } +/** + * ice_read_sr_buf_aq - Reads Shadow RAM buf via AQ + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) + * @words: (in) number of words to read; (out) number of words actually read + * @data: words read from the Shadow RAM + * + * Reads 16 bit words (data buf) from the SR using the ice_read_sr_aq + * method. Ownership of the NVM is taken before reading the buffer and later + * released. + */ +static enum ice_status +ice_read_sr_buf_aq(struct ice_hw *hw, u16 offset, u16 *words, u16 *data) +{ + enum ice_status status; + bool last_cmd = false; + u16 words_read = 0; + u16 i = 0; + + do { + u16 read_size, off_w; + + /* Calculate number of bytes we should read in this step. + * It's not allowed to read more than one page at a time or + * to cross page boundaries. + */ + off_w = offset % ICE_SR_SECTOR_SIZE_IN_WORDS; + read_size = off_w ? + min_t(u16, *words, + (ICE_SR_SECTOR_SIZE_IN_WORDS - off_w)) : + min_t(u16, (*words - words_read), + ICE_SR_SECTOR_SIZE_IN_WORDS); + + /* Check if this is last command, if so set proper flag */ + if ((words_read + read_size) >= *words) + last_cmd = true; + + status = ice_read_sr_aq(hw, offset, read_size, + data + words_read, last_cmd); + if (status) + goto read_nvm_buf_aq_exit; + + /* Increment counter for words already read and move offset to + * new read location + */ + words_read += read_size; + offset += read_size; + } while (words_read < *words); + + for (i = 0; i < *words; i++) + data[i] = le16_to_cpu(((__le16 *)data)[i]); + +read_nvm_buf_aq_exit: + *words = words_read; + return status; +} + /** * ice_acquire_nvm - Generic request for acquiring the NVM ownership * @hw: pointer to the HW structure @@ -234,3 +291,28 @@ enum ice_status ice_init_nvm(struct ice_hw *hw) return status; } + +/** + * ice_read_sr_buf - Reads Shadow RAM buf and acquire lock if necessary + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) + * @words: (in) number of words to read; (out) number of words actually read + * @data: words read from the Shadow RAM + * + * Reads 16 bit words (data buf) from the SR using the ice_read_nvm_buf_aq + * method. The buf read is preceded by the NVM ownership take + * and followed by the release. + */ +enum ice_status +ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data) +{ + enum ice_status status; + + status = ice_acquire_nvm(hw, ICE_RES_READ); + if (!status) { + status = ice_read_sr_buf_aq(hw, offset, words, data); + ice_release_nvm(hw); + } + + return status; +} diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index a1681853df2e..56049739a250 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -85,36 +85,58 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid) } /** - * ice_aq_query_sched_elems - query scheduler elements + * ice_aqc_send_sched_elem_cmd - send scheduling elements cmd * @hw: pointer to the hw struct - * @elems_req: number of elements to query + * @cmd_opc: cmd opcode + * @elems_req: number of elements to request * @buf: pointer to buffer * @buf_size: buffer size in bytes - * @elems_ret: returns total number of elements returned + * @elems_resp: returns total number of elements response * @cd: pointer to command details structure or NULL * - * Query scheduling elements (0x0404) + * This function sends a scheduling elements cmd (cmd_opc) */ static enum ice_status -ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, - struct ice_aqc_get_elem *buf, u16 buf_size, - u16 *elems_ret, struct ice_sq_cd *cd) +ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc, + u16 elems_req, void *buf, u16 buf_size, + u16 *elems_resp, struct ice_sq_cd *cd) { - struct ice_aqc_get_cfg_elem *cmd; + struct ice_aqc_sched_elem_cmd *cmd; struct ice_aq_desc desc; enum ice_status status; - cmd = &desc.params.get_update_elem; - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sched_elems); + cmd = &desc.params.sched_elem_cmd; + ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc); cmd->num_elem_req = cpu_to_le16(elems_req); desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); - if (!status && elems_ret) - *elems_ret = le16_to_cpu(cmd->num_elem_resp); + if (!status && elems_resp) + *elems_resp = le16_to_cpu(cmd->num_elem_resp); return status; } +/** + * ice_aq_query_sched_elems - query scheduler elements + * @hw: pointer to the hw struct + * @elems_req: number of elements to query + * @buf: pointer to buffer + * @buf_size: buffer size in bytes + * @elems_ret: returns total number of elements returned + * @cd: pointer to command details structure or NULL + * + * Query scheduling elements (0x0404) + */ +static enum ice_status +ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, + struct ice_aqc_get_elem *buf, u16 buf_size, + u16 *elems_ret, struct ice_sq_cd *cd) +{ + return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_get_sched_elems, + elems_req, (void *)buf, buf_size, + elems_ret, cd); +} + /** * ice_sched_query_elem - query element information from hw * @hw: pointer to the hw struct @@ -218,20 +240,9 @@ ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req, struct ice_aqc_delete_elem *buf, u16 buf_size, u16 *grps_del, struct ice_sq_cd *cd) { - struct ice_aqc_add_move_delete_elem *cmd; - struct ice_aq_desc desc; - enum ice_status status; - - cmd = &desc.params.add_move_delete_elem; - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_delete_sched_elems); - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); - cmd->num_grps_req = cpu_to_le16(grps_req); - - status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); - if (!status && grps_del) - *grps_del = le16_to_cpu(cmd->num_grps_updated); - - return status; + return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_delete_sched_elems, + grps_req, (void *)buf, buf_size, + grps_del, cd); } /** @@ -442,52 +453,9 @@ ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req, struct ice_aqc_add_elem *buf, u16 buf_size, u16 *grps_added, struct ice_sq_cd *cd) { - struct ice_aqc_add_move_delete_elem *cmd; - struct ice_aq_desc desc; - enum ice_status status; - - cmd = &desc.params.add_move_delete_elem; - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_sched_elems); - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); - - cmd->num_grps_req = cpu_to_le16(grps_req); - status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); - if (!status && grps_added) - *grps_added = le16_to_cpu(cmd->num_grps_updated); - - return status; -} - -/** - * ice_suspend_resume_elems - suspend/resume scheduler elements - * @hw: pointer to the hw struct - * @elems_req: number of elements to suspend - * @buf: pointer to buffer - * @buf_size: buffer size in bytes - * @elems_ret: returns total number of elements suspended - * @cd: pointer to command details structure or NULL - * @cmd_code: command code for suspend or resume - * - * suspend/resume scheduler elements - */ -static enum ice_status -ice_suspend_resume_elems(struct ice_hw *hw, u16 elems_req, - struct ice_aqc_suspend_resume_elem *buf, u16 buf_size, - u16 *elems_ret, struct ice_sq_cd *cd, - enum ice_adminq_opc cmd_code) -{ - struct ice_aqc_get_cfg_elem *cmd; - struct ice_aq_desc desc; - enum ice_status status; - - cmd = &desc.params.get_update_elem; - ice_fill_dflt_direct_cmd_desc(&desc, cmd_code); - cmd->num_elem_req = cpu_to_le16(elems_req); - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); - status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); - if (!status && elems_ret) - *elems_ret = le16_to_cpu(cmd->num_elem_resp); - return status; + return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_add_sched_elems, + grps_req, (void *)buf, buf_size, + grps_added, cd); } /** @@ -506,8 +474,9 @@ ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, struct ice_aqc_suspend_resume_elem *buf, u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) { - return ice_suspend_resume_elems(hw, elems_req, buf, buf_size, elems_ret, - cd, ice_aqc_opc_suspend_sched_elems); + return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_suspend_sched_elems, + elems_req, (void *)buf, buf_size, + elems_ret, cd); } /** @@ -526,8 +495,9 @@ ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, struct ice_aqc_suspend_resume_elem *buf, u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) { - return ice_suspend_resume_elems(hw, elems_req, buf, buf_size, elems_ret, - cd, ice_aqc_opc_resume_sched_elems); + return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_resume_sched_elems, + elems_req, (void *)buf, buf_size, + elems_ret, cd); } /** @@ -591,23 +561,18 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids, } /** - * ice_sched_clear_tx_topo - clears the schduler tree nodes - * @pi: port information structure + * ice_sched_clear_agg - clears the agg related information + * @hw: pointer to the hardware structure * - * This function removes all the nodes from HW as well as from SW DB. + * This function removes agg list and free up agg related memory + * previously allocated. */ -static void ice_sched_clear_tx_topo(struct ice_port_info *pi) +void ice_sched_clear_agg(struct ice_hw *hw) { struct ice_sched_agg_info *agg_info; struct ice_sched_agg_info *atmp; - struct ice_hw *hw; - - if (!pi) - return; - - hw = pi->hw; - list_for_each_entry_safe(agg_info, atmp, &pi->agg_list, list_entry) { + list_for_each_entry_safe(agg_info, atmp, &hw->agg_list, list_entry) { struct ice_sched_agg_vsi_info *agg_vsi_info; struct ice_sched_agg_vsi_info *vtmp; @@ -616,8 +581,21 @@ static void ice_sched_clear_tx_topo(struct ice_port_info *pi) list_del(&agg_vsi_info->list_entry); devm_kfree(ice_hw_to_dev(hw), agg_vsi_info); } + list_del(&agg_info->list_entry); + devm_kfree(ice_hw_to_dev(hw), agg_info); } +} +/** + * ice_sched_clear_tx_topo - clears the scheduler tree nodes + * @pi: port information structure + * + * This function removes all the nodes from HW as well as from SW DB. + */ +static void ice_sched_clear_tx_topo(struct ice_port_info *pi) +{ + if (!pi) + return; if (pi->root) { ice_free_sched_node(pi, pi->root); pi->root = NULL; @@ -1035,7 +1013,6 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi) /* initialize the port for handling the scheduler tree */ pi->port_state = ICE_SCHED_PORT_STATE_READY; mutex_init(&pi->sched_lock); - INIT_LIST_HEAD(&pi->agg_list); err_init_port: if (status && pi->root) { @@ -1089,11 +1066,10 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw) hw->max_children[i] = le16_to_cpu(max_sibl); } - hw->layer_info = (struct ice_aqc_layer_props *) - devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props, - (hw->num_tx_sched_layers * - sizeof(*hw->layer_info)), - GFP_KERNEL); + hw->layer_info = devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props, + (hw->num_tx_sched_layers * + sizeof(*hw->layer_info)), + GFP_KERNEL); if (!hw->layer_info) { status = ICE_ERR_NO_MEMORY; goto sched_query_out; @@ -1367,9 +1343,14 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw, node = node->sibling; } + /* tree has one intermediate node to add this new VSI. + * So no need to calculate supported nodes for below + * layers. + */ + if (node) + break; /* all the nodes are full, allocate a new one */ - if (!node) - num_nodes[i]++; + num_nodes[i]++; } } @@ -1618,7 +1599,8 @@ ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle) struct ice_sched_agg_info *agg_info; struct ice_sched_agg_info *atmp; - list_for_each_entry_safe(agg_info, atmp, &pi->agg_list, list_entry) { + list_for_each_entry_safe(agg_info, atmp, &pi->hw->agg_list, + list_entry) { struct ice_sched_agg_vsi_info *agg_vsi_info; struct ice_sched_agg_vsi_info *vtmp; @@ -1633,6 +1615,23 @@ ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle) } } +/** + * ice_sched_is_leaf_node_present - check for a leaf node in the sub-tree + * @node: pointer to the sub-tree node + * + * This function checks for a leaf node presence in a given sub-tree node. + */ +static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node) +{ + u8 i; + + for (i = 0; i < node->num_children; i++) + if (ice_sched_is_leaf_node_present(node->children[i])) + return true; + /* check for a leaf node */ + return (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF); +} + /** * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes * @pi: port information structure @@ -1667,6 +1666,12 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner) if (!vsi_node) continue; + if (ice_sched_is_leaf_node_present(vsi_node)) { + ice_debug(pi->hw, ICE_DBG_SCHED, + "VSI has leaf nodes in TC %d\n", i); + status = ICE_ERR_IN_USE; + goto exit_sched_rm_vsi_cfg; + } while (j < vsi_node->num_children) { if (vsi_node->children[j]->owner == owner) { ice_free_sched_node(pi, vsi_node->children[j]); diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h index da5b4c166da8..bee8221ad146 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.h +++ b/drivers/net/ethernet/intel/ice/ice_sched.h @@ -28,6 +28,8 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi); enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw); void ice_sched_clear_port(struct ice_port_info *pi); void ice_sched_cleanup_all(struct ice_hw *hw); +void ice_sched_clear_agg(struct ice_hw *hw); + struct ice_sched_node * ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid); enum ice_status diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index 533b989a23e1..d2db0d04e117 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -85,6 +85,12 @@ u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed) case ICE_AQ_LINK_SPEED_40GB: speed = ICE_LINK_SPEED_40000MBPS; break; + case ICE_AQ_LINK_SPEED_50GB: + speed = ICE_LINK_SPEED_50000MBPS; + break; + case ICE_AQ_LINK_SPEED_100GB: + speed = ICE_LINK_SPEED_100000MBPS; + break; default: speed = ICE_LINK_SPEED_UNKNOWN; break; @@ -116,6 +122,9 @@ u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed) break; case ICE_AQ_LINK_SPEED_40GB: /* fall through */ + case ICE_AQ_LINK_SPEED_50GB: + /* fall through */ + case ICE_AQ_LINK_SPEED_100GB: speed = (u32)VIRTCHNL_LINK_SPEED_40GB; break; default: diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h index f49f299ddf2c..683f48824a29 100644 --- a/drivers/net/ethernet/intel/ice/ice_status.h +++ b/drivers/net/ethernet/intel/ice/ice_status.h @@ -22,6 +22,7 @@ enum ice_status { ICE_ERR_OUT_OF_RANGE = -13, ICE_ERR_ALREADY_EXISTS = -14, ICE_ERR_DOES_NOT_EXIST = -15, + ICE_ERR_IN_USE = -16, ICE_ERR_MAX_LIMIT = -17, ICE_ERR_RESET_ONGOING = -18, ICE_ERR_BUF_TOO_SHORT = -52, diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 2e5693107fa4..09d1c314b68f 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -98,7 +98,7 @@ enum ice_status ice_init_def_sw_recp(struct ice_hw *hw) u8 i; recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES, - sizeof(struct ice_sw_recipe), GFP_KERNEL); + sizeof(*recps), GFP_KERNEL); if (!recps) return ICE_ERR_NO_MEMORY; @@ -1538,9 +1538,20 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id, } else if (!list_elem->vsi_list_info) { status = ICE_ERR_DOES_NOT_EXIST; goto exit; + } else if (list_elem->vsi_list_info->ref_cnt > 1) { + /* a ref_cnt > 1 indicates that the vsi_list is being + * shared by multiple rules. Decrement the ref_cnt and + * remove this rule, but do not modify the list, as it + * is in-use by other rules. + */ + list_elem->vsi_list_info->ref_cnt--; + remove_rule = true; } else { - if (list_elem->vsi_list_info->ref_cnt > 1) - list_elem->vsi_list_info->ref_cnt--; + /* a ref_cnt of 1 indicates the vsi_list is only used + * by one rule. However, the original removal request is only + * for a single VSI. Update the vsi_list first, and only + * remove the rule if there are no further VSIs in this list. + */ vsi_handle = f_entry->fltr_info.vsi_handle; status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem); if (status) diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 49fc38094185..c289d97f477d 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -48,7 +48,6 @@ static struct netdev_queue *txring_txq(const struct ice_ring *ring) */ void ice_clean_tx_ring(struct ice_ring *tx_ring) { - unsigned long size; u16 i; /* ring already cleared, nothing to do */ @@ -59,8 +58,7 @@ void ice_clean_tx_ring(struct ice_ring *tx_ring) for (i = 0; i < tx_ring->count; i++) ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); - size = sizeof(struct ice_tx_buf) * tx_ring->count; - memset(tx_ring->tx_buf, 0, size); + memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count); /* Zero out the descriptor ring */ memset(tx_ring->desc, 0, tx_ring->size); @@ -226,21 +224,21 @@ static bool ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring, int ice_setup_tx_ring(struct ice_ring *tx_ring) { struct device *dev = tx_ring->dev; - int bi_size; if (!dev) return -ENOMEM; /* warn if we are about to overwrite the pointer */ WARN_ON(tx_ring->tx_buf); - bi_size = sizeof(struct ice_tx_buf) * tx_ring->count; - tx_ring->tx_buf = devm_kzalloc(dev, bi_size, GFP_KERNEL); + tx_ring->tx_buf = + devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count, + GFP_KERNEL); if (!tx_ring->tx_buf) return -ENOMEM; /* round up to nearest 4K */ - tx_ring->size = tx_ring->count * sizeof(struct ice_tx_desc); - tx_ring->size = ALIGN(tx_ring->size, 4096); + tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc), + 4096); tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); if (!tx_ring->desc) { @@ -267,7 +265,6 @@ err: void ice_clean_rx_ring(struct ice_ring *rx_ring) { struct device *dev = rx_ring->dev; - unsigned long size; u16 i; /* ring already cleared, nothing to do */ @@ -292,8 +289,7 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring) rx_buf->page_offset = 0; } - size = sizeof(struct ice_rx_buf) * rx_ring->count; - memset(rx_ring->rx_buf, 0, size); + memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count); /* Zero out the descriptor ring */ memset(rx_ring->desc, 0, rx_ring->size); @@ -331,15 +327,15 @@ void ice_free_rx_ring(struct ice_ring *rx_ring) int ice_setup_rx_ring(struct ice_ring *rx_ring) { struct device *dev = rx_ring->dev; - int bi_size; if (!dev) return -ENOMEM; /* warn if we are about to overwrite the pointer */ WARN_ON(rx_ring->rx_buf); - bi_size = sizeof(struct ice_rx_buf) * rx_ring->count; - rx_ring->rx_buf = devm_kzalloc(dev, bi_size, GFP_KERNEL); + rx_ring->rx_buf = + devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count, + GFP_KERNEL); if (!rx_ring->rx_buf) return -ENOMEM; @@ -1052,6 +1048,69 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) return failure ? budget : (int)total_rx_pkts; } +/** + * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register + * @itr_idx: interrupt throttling index + * @reg_itr: interrupt throttling value adjusted based on ITR granularity + */ +static u32 ice_buildreg_itr(int itr_idx, u16 reg_itr) +{ + return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M | + (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) | + (reg_itr << GLINT_DYN_CTL_INTERVAL_S); +} + +/** + * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt + * @vsi: the VSI associated with the q_vector + * @q_vector: q_vector for which ITR is being updated and interrupt enabled + */ +static void +ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector) +{ + struct ice_hw *hw = &vsi->back->hw; + struct ice_ring_container *rc; + u32 itr_val; + + /* This block of logic allows us to get away with only updating + * one ITR value with each interrupt. The idea is to perform a + * pseudo-lazy update with the following criteria. + * + * 1. Rx is given higher priority than Tx if both are in same state + * 2. If we must reduce an ITR that is given highest priority. + * 3. We then give priority to increasing ITR based on amount. + */ + if (q_vector->rx.target_itr < q_vector->rx.current_itr) { + rc = &q_vector->rx; + /* Rx ITR needs to be reduced, this is highest priority */ + itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr); + rc->current_itr = rc->target_itr; + } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) || + ((q_vector->rx.target_itr - q_vector->rx.current_itr) < + (q_vector->tx.target_itr - q_vector->tx.current_itr))) { + rc = &q_vector->tx; + /* Tx ITR needs to be reduced, this is second priority + * Tx ITR needs to be increased more than Rx, fourth priority + */ + itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr); + rc->current_itr = rc->target_itr; + } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) { + rc = &q_vector->rx; + /* Rx ITR needs to be increased, third priority */ + itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr); + rc->current_itr = rc->target_itr; + } else { + /* Still have to re-enable the interrupts */ + itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0); + } + + if (!test_bit(__ICE_DOWN, vsi->state)) { + int vector = vsi->hw_base_vector + q_vector->v_idx; + + wr32(hw, GLINT_DYN_CTL(vector), itr_val); + } +} + /** * ice_napi_poll - NAPI polling Rx/Tx cleanup routine * @napi: napi struct with our devices info in it @@ -1108,9 +1167,9 @@ int ice_napi_poll(struct napi_struct *napi, int budget) */ if (likely(napi_complete_done(napi, work_done))) if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - ice_irq_dynamic_ena(&vsi->back->hw, vsi, q_vector); + ice_update_ena_itr(vsi, q_vector); - return min(work_done, budget - 1); + return min_t(int, work_done, budget - 1); } /* helper function for building cmd/type/offset */ @@ -1402,6 +1461,12 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; break; case IPPROTO_SCTP: + /* enable SCTP checksum offload */ + cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP; + l4_len = sizeof(struct sctphdr) >> 2; + offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S; + break; + default: if (first->tx_flags & ICE_TX_FLAGS_TSO) return -1; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index 75d0eaf6c9dd..fc358ea81816 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -116,16 +116,17 @@ enum ice_rx_dtype { /* indices into GLINT_ITR registers */ #define ICE_RX_ITR ICE_IDX_ITR0 #define ICE_TX_ITR ICE_IDX_ITR1 -#define ICE_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ -#define ICE_ITR_8K 125 +#define ICE_ITR_8K 124 #define ICE_ITR_20K 50 -#define ICE_DFLT_TX_ITR ICE_ITR_20K -#define ICE_DFLT_RX_ITR ICE_ITR_20K -/* apply ITR granularity translation to program the register. itr_gran is either - * 2 or 4 usecs so we need to divide by 2 first then shift by that value - */ -#define ITR_TO_REG(val, itr_gran) (((val) & ~ICE_ITR_DYNAMIC) >> \ - ((itr_gran) / 2)) +#define ICE_ITR_MAX 8160 +#define ICE_DFLT_TX_ITR (ICE_ITR_20K | ICE_ITR_DYNAMIC) +#define ICE_DFLT_RX_ITR (ICE_ITR_20K | ICE_ITR_DYNAMIC) +#define ICE_ITR_DYNAMIC 0x8000 /* used as flag for itr_setting */ +#define ITR_IS_DYNAMIC(setting) (!!((setting) & ICE_ITR_DYNAMIC)) +#define ITR_TO_REG(setting) ((setting) & ~ICE_ITR_DYNAMIC) +#define ICE_ITR_GRAN_S 1 /* Assume ITR granularity is 2us */ +#define ICE_ITR_MASK 0x1FFE /* ITR register value alignment mask */ +#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~ICE_ITR_MASK) #define ICE_DFLT_INTRL 0 @@ -180,13 +181,20 @@ enum ice_latency_range { }; struct ice_ring_container { - /* array of pointers to rings */ + /* head of linked-list of rings */ struct ice_ring *ring; + unsigned long next_update; /* jiffies value of next queue update */ unsigned int total_bytes; /* total bytes processed this int */ unsigned int total_pkts; /* total packets processed this int */ enum ice_latency_range latency_range; - int itr_idx; /* index in the interrupt vector */ - u16 itr; + int itr_idx; /* index in the interrupt vector */ + u16 target_itr; /* value in usecs divided by the hw->itr_gran */ + u16 current_itr; /* value in usecs divided by the hw->itr_gran */ + /* high bit set means dynamic ITR, rest is used to store user + * readable ITR value in usecs and must be converted before programming + * to a register. + */ + u16 itr_setting; }; /* iterator for handling rings in ring container */ diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 0ea428104215..17086d5b5c33 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -90,6 +90,7 @@ enum ice_vsi_type { struct ice_link_status { /* Refer to ice_aq_phy_type for bits definition */ u64 phy_type_low; + u64 phy_type_high; u16 max_frame_size; u16 link_speed; u16 req_speeds; @@ -118,6 +119,7 @@ struct ice_phy_info { struct ice_link_status link_info; struct ice_link_status link_info_old; u64 phy_type_low; + u64 phy_type_high; enum ice_media_type media_type; u8 get_link_info; }; @@ -272,7 +274,6 @@ struct ice_port_info { struct ice_mac_info mac; struct ice_phy_info phy; struct mutex sched_lock; /* protect access to TXSched tree */ - struct list_head agg_list; /* lists all aggregator */ u8 lport; #define ICE_LPORT_MASK 0xff u8 is_vf; @@ -326,6 +327,7 @@ struct ice_hw { u8 max_cgds; u8 sw_entry_point_layer; u16 max_children[ICE_AQC_TOPO_MAX_LEVEL_NUM]; + struct list_head agg_list; /* lists all aggregator */ struct ice_vsi_ctx *vsi_ctx[ICE_MAX_VSI]; u8 evb_veb; /* true for VEB, false for VEPA */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 05ff4f910649..57155b4a59dc 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -173,7 +173,8 @@ static void ice_dis_vf_mappings(struct ice_vf *vf) wr32(hw, VPINT_ALLOC(vf->vf_id), 0); wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0); - first = vf->first_vector_idx; + first = vf->first_vector_idx + + hw->func_caps.common_cap.msix_vector_first_id; last = first + pf->num_vf_msix - 1; for (v = first; v <= last; v++) { u32 reg; @@ -224,13 +225,15 @@ void ice_free_vfs(struct ice_pf *pf) /* Avoid wait time by stopping all VFs at the same time */ for (i = 0; i < pf->num_alloc_vfs; i++) { + struct ice_vsi *vsi; + if (!test_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states)) continue; + vsi = pf->vsi[pf->vf[i].lan_vsi_idx]; /* stop rings without wait time */ - ice_vsi_stop_tx_rings(pf->vsi[pf->vf[i].lan_vsi_idx], - ICE_NO_RESET, i); - ice_vsi_stop_rx_rings(pf->vsi[pf->vf[i].lan_vsi_idx]); + ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, i); + ice_vsi_stop_rx_rings(vsi); clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states); } @@ -308,6 +311,11 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr) */ clear_bit(ICE_VF_STATE_INIT, vf->vf_states); + /* Clear the VF's ARQLEN register. This is how the VF detects reset, + * since the VFGEN_RSTAT register doesn't stick at 0 after reset. + */ + wr32(hw, VF_MBX_ARQLEN(vf_abs_id), 0); + /* In the case of a VFLR, the HW has already reset the VF and we * just need to clean up, so don't hit the VFRTRIG register. */ @@ -343,25 +351,33 @@ static int ice_vsi_set_pvid(struct ice_vsi *vsi, u16 vid) { struct device *dev = &vsi->back->pdev->dev; struct ice_hw *hw = &vsi->back->hw; - struct ice_vsi_ctx ctxt = { 0 }; + struct ice_vsi_ctx *ctxt; enum ice_status status; + int ret = 0; + + ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); + if (!ctxt) + return -ENOMEM; - ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED | - ICE_AQ_VSI_PVLAN_INSERT_PVID | - ICE_AQ_VSI_VLAN_EMOD_STR; - ctxt.info.pvid = cpu_to_le16(vid); - ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); + ctxt->info.vlan_flags = (ICE_AQ_VSI_VLAN_MODE_UNTAGGED | + ICE_AQ_VSI_PVLAN_INSERT_PVID | + ICE_AQ_VSI_VLAN_EMOD_STR); + ctxt->info.pvid = cpu_to_le16(vid); + ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); - status = ice_update_vsi(hw, vsi->idx, &ctxt, NULL); + status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); if (status) { dev_info(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n", status, hw->adminq.sq_last_status); - return -EIO; + ret = -EIO; + goto out; } - vsi->info.pvid = ctxt.info.pvid; - vsi->info.vlan_flags = ctxt.info.vlan_flags; - return 0; + vsi->info.pvid = ctxt->info.pvid; + vsi->info.vlan_flags = ctxt->info.vlan_flags; +out: + devm_kfree(dev, ctxt); + return ret; } /** @@ -508,7 +524,8 @@ static void ice_ena_vf_mappings(struct ice_vf *vf) hw = &pf->hw; vsi = pf->vsi[vf->lan_vsi_idx]; - first = vf->first_vector_idx; + first = vf->first_vector_idx + + hw->func_caps.common_cap.msix_vector_first_id; last = (first + pf->num_vf_msix) - 1; abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; @@ -831,6 +848,7 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) { struct ice_pf *pf = vf->pf; struct ice_hw *hw = &pf->hw; + struct ice_vsi *vsi; bool rsd = false; u32 reg; int i; @@ -843,17 +861,18 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) ice_trigger_vf_reset(vf, is_vflr); + vsi = pf->vsi[vf->lan_vsi_idx]; + if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) { - ice_vsi_stop_tx_rings(pf->vsi[vf->lan_vsi_idx], ICE_VF_RESET, - vf->vf_id); - ice_vsi_stop_rx_rings(pf->vsi[vf->lan_vsi_idx]); + ice_vsi_stop_lan_tx_rings(vsi, ICE_VF_RESET, vf->vf_id); + ice_vsi_stop_rx_rings(vsi); clear_bit(ICE_VF_STATE_ENA, vf->vf_states); } else { /* Call Disable LAN Tx queue AQ call even when queues are not * enabled. This is needed for successful completiom of VFR */ - ice_dis_vsi_txq(pf->vsi[vf->lan_vsi_idx]->port_info, 0, - NULL, NULL, ICE_VF_RESET, vf->vf_id, NULL); + ice_dis_vsi_txq(vsi->port_info, 0, NULL, NULL, ICE_VF_RESET, + vf->vf_id, NULL); } /* poll VPGEN_VFRSTAT reg to make sure @@ -1614,7 +1633,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) goto error_param; } - if (ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, vf->vf_id)) { + if (ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id)) { dev_err(&vsi->back->pdev->dev, "Failed to stop tx rings on VSI %d\n", vsi->vsi_num); @@ -1784,7 +1803,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) vsi->num_txq = qci->num_queue_pairs; vsi->num_rxq = qci->num_queue_pairs; - if (!ice_vsi_cfg_txqs(vsi) && !ice_vsi_cfg_rxqs(vsi)) + if (!ice_vsi_cfg_lan_txqs(vsi) && !ice_vsi_cfg_rxqs(vsi)) aq_ret = 0; else aq_ret = ICE_ERR_PARAM; @@ -2475,11 +2494,12 @@ int ice_get_vf_cfg(struct net_device *netdev, int vf_id, int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) { struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_vsi_ctx ctx = { 0 }; struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; + struct ice_vsi_ctx *ctx; + enum ice_status status; struct ice_vf *vf; - int status; + int ret = 0; /* validate the request */ if (vf_id >= pf->num_alloc_vfs) { @@ -2499,25 +2519,31 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) return 0; } - ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); + ctx = devm_kzalloc(&pf->pdev->dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID); if (ena) { - ctx.info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF; - ctx.info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M; + ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF; + ctx->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M; } - status = ice_update_vsi(&pf->hw, vsi->idx, &ctx, NULL); + status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL); if (status) { dev_dbg(&pf->pdev->dev, "Error %d, failed to update VSI* parameters\n", status); - return -EIO; + ret = -EIO; + goto out; } vf->spoofchk = ena; - vsi->info.sec_flags = ctx.info.sec_flags; - vsi->info.sw_flags2 = ctx.info.sw_flags2; - - return status; + vsi->info.sec_flags = ctx->info.sec_flags; + vsi->info.sw_flags2 = ctx->info.sw_flags2; +out: + devm_kfree(&pf->pdev->dev, ctx); + return ret; } /** diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 7137e7f9c7f3..69b230c53fed 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -39,7 +39,7 @@ #include "igb.h" #define MAJ 5 -#define MIN 4 +#define MIN 6 #define BUILD 0 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ __stringify(BUILD) "-k" @@ -1189,15 +1189,15 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter, { struct igb_q_vector *q_vector; struct igb_ring *ring; - int ring_count, size; + int ring_count; + size_t size; /* igb only supports 1 Tx and/or 1 Rx queue per vector */ if (txr_count > 1 || rxr_count > 1) return -ENOMEM; ring_count = txr_count + rxr_count; - size = sizeof(struct igb_q_vector) + - (sizeof(struct igb_ring) * ring_count); + size = struct_size(q_vector, ring, ring_count); /* allocate q_vector and rings */ q_vector = adapter->q_vector[v_idx]; @@ -2486,7 +2486,8 @@ static int igb_set_features(struct net_device *netdev, static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, - u16 flags) + u16 flags, + struct netlink_ext_ack *extack) { /* guarantee we can provide a unique filter for the unicast address */ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { @@ -2580,9 +2581,11 @@ static int igb_parse_cls_flower(struct igb_adapter *adapter, int traffic_class, struct igb_nfc_filter *input) { + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f); + struct flow_dissector *dissector = rule->match.dissector; struct netlink_ext_ack *extack = f->common.extack; - if (f->dissector->used_keys & + if (dissector->used_keys & ~(BIT(FLOW_DISSECTOR_KEY_BASIC) | BIT(FLOW_DISSECTOR_KEY_CONTROL) | BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | @@ -2592,78 +2595,60 @@ static int igb_parse_cls_flower(struct igb_adapter *adapter, return -EOPNOTSUPP; } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { - struct flow_dissector_key_eth_addrs *key, *mask; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; - key = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ETH_ADDRS, - f->key); - mask = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ETH_ADDRS, - f->mask); - - if (!is_zero_ether_addr(mask->dst)) { - if (!is_broadcast_ether_addr(mask->dst)) { + flow_rule_match_eth_addrs(rule, &match); + if (!is_zero_ether_addr(match.mask->dst)) { + if (!is_broadcast_ether_addr(match.mask->dst)) { NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for destination MAC address"); return -EINVAL; } input->filter.match_flags |= IGB_FILTER_FLAG_DST_MAC_ADDR; - ether_addr_copy(input->filter.dst_addr, key->dst); + ether_addr_copy(input->filter.dst_addr, match.key->dst); } - if (!is_zero_ether_addr(mask->src)) { - if (!is_broadcast_ether_addr(mask->src)) { + if (!is_zero_ether_addr(match.mask->src)) { + if (!is_broadcast_ether_addr(match.mask->src)) { NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for source MAC address"); return -EINVAL; } input->filter.match_flags |= IGB_FILTER_FLAG_SRC_MAC_ADDR; - ether_addr_copy(input->filter.src_addr, key->src); + ether_addr_copy(input->filter.src_addr, match.key->src); } } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { - struct flow_dissector_key_basic *key, *mask; - - key = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, - f->key); - mask = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, - f->mask); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; - if (mask->n_proto) { - if (mask->n_proto != ETHER_TYPE_FULL_MASK) { + flow_rule_match_basic(rule, &match); + if (match.mask->n_proto) { + if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) { NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for EtherType filter"); return -EINVAL; } input->filter.match_flags |= IGB_FILTER_FLAG_ETHER_TYPE; - input->filter.etype = key->n_proto; + input->filter.etype = match.key->n_proto; } } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) { - struct flow_dissector_key_vlan *key, *mask; - - key = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_VLAN, - f->key); - mask = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_VLAN, - f->mask); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; - if (mask->vlan_priority) { - if (mask->vlan_priority != VLAN_PRIO_FULL_MASK) { + flow_rule_match_vlan(rule, &match); + if (match.mask->vlan_priority) { + if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) { NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority"); return -EINVAL; } input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI; - input->filter.vlan_tci = key->vlan_priority; + input->filter.vlan_tci = match.key->vlan_priority; } } diff --git a/drivers/net/ethernet/intel/igc/Makefile b/drivers/net/ethernet/intel/igc/Makefile index 4387f6ba8e67..88c6f88baac5 100644 --- a/drivers/net/ethernet/intel/igc/Makefile +++ b/drivers/net/ethernet/intel/igc/Makefile @@ -7,4 +7,5 @@ obj-$(CONFIG_IGC) += igc.o -igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o +igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \ +igc_ethtool.o diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index b1039dd3dd13..80faccc34cda 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -13,19 +13,43 @@ #include "igc_hw.h" -/* main */ +/* forward declaration */ +void igc_set_ethtool_ops(struct net_device *); + +struct igc_adapter; +struct igc_ring; + +void igc_up(struct igc_adapter *adapter); +void igc_down(struct igc_adapter *adapter); +int igc_setup_tx_resources(struct igc_ring *ring); +int igc_setup_rx_resources(struct igc_ring *ring); +void igc_free_tx_resources(struct igc_ring *ring); +void igc_free_rx_resources(struct igc_ring *ring); +unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter); +void igc_set_flag_queue_pairs(struct igc_adapter *adapter, + const u32 max_rss_queues); +int igc_reinit_queues(struct igc_adapter *adapter); +bool igc_has_link(struct igc_adapter *adapter); +void igc_reset(struct igc_adapter *adapter); +int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx); + extern char igc_driver_name[]; extern char igc_driver_version[]; +#define IGC_REGS_LEN 740 +#define IGC_RETA_SIZE 128 + /* Interrupt defines */ #define IGC_START_ITR 648 /* ~6000 ints/sec */ #define IGC_FLAG_HAS_MSI BIT(0) -#define IGC_FLAG_QUEUE_PAIRS BIT(4) +#define IGC_FLAG_QUEUE_PAIRS BIT(3) +#define IGC_FLAG_DMAC BIT(4) #define IGC_FLAG_NEED_LINK_UPDATE BIT(9) #define IGC_FLAG_MEDIA_RESET BIT(10) #define IGC_FLAG_MAS_ENABLE BIT(12) #define IGC_FLAG_HAS_MSIX BIT(13) #define IGC_FLAG_VLAN_PROMISC BIT(15) +#define IGC_FLAG_RX_LEGACY BIT(16) #define IGC_START_ITR 648 /* ~6000 ints/sec */ #define IGC_4K_ITR 980 @@ -60,6 +84,7 @@ extern char igc_driver_version[]; #define IGC_RXBUFFER_2048 2048 #define IGC_RXBUFFER_3072 3072 +#define AUTO_ALL_MODES 0 #define IGC_RX_HDR_LEN IGC_RXBUFFER_256 /* RX and TX descriptor control thresholds. @@ -340,6 +365,8 @@ struct igc_adapter { struct igc_mac_addr *mac_table; + u8 rss_indir_tbl[IGC_RETA_SIZE]; + unsigned long link_check_timeout; struct igc_info ei; }; @@ -418,6 +445,9 @@ static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data) return 0; } +/* forward declaration */ +void igc_reinit_locked(struct igc_adapter *); + #define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring)) #define IGC_TXD_DCMD (IGC_ADVTXD_DCMD_EOP | IGC_ADVTXD_DCMD_RS) diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c index df40af759542..51a8b8769c67 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.c +++ b/drivers/net/ethernet/intel/igc/igc_base.c @@ -53,22 +53,6 @@ out: return ret_val; } -/** - * igc_check_for_link_base - Check for link - * @hw: pointer to the HW structure - * - * If sgmii is enabled, then use the pcs register to determine link, otherwise - * use the generic interface for determining link. - */ -static s32 igc_check_for_link_base(struct igc_hw *hw) -{ - s32 ret_val = 0; - - ret_val = igc_check_for_copper_link(hw); - - return ret_val; -} - /** * igc_reset_hw_base - Reset hardware * @hw: pointer to the HW structure @@ -123,22 +107,6 @@ static s32 igc_reset_hw_base(struct igc_hw *hw) return ret_val; } -/** - * igc_get_phy_id_base - Retrieve PHY addr and id - * @hw: pointer to the HW structure - * - * Retrieves the PHY address and ID for both PHY's which do and do not use - * sgmi interface. - */ -static s32 igc_get_phy_id_base(struct igc_hw *hw) -{ - s32 ret_val = 0; - - ret_val = igc_get_phy_id(hw); - - return ret_val; -} - /** * igc_init_nvm_params_base - Init NVM func ptrs. * @hw: pointer to the HW structure @@ -163,6 +131,7 @@ static s32 igc_init_nvm_params_base(struct igc_hw *hw) if (size > 15) size = 15; + nvm->type = igc_nvm_eeprom_spi; nvm->word_size = BIT(size); nvm->opcode_bits = 8; nvm->delay_usec = 1; @@ -261,11 +230,11 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw) goto out; } - ret_val = igc_get_phy_id_base(hw); + ret_val = igc_get_phy_id(hw); if (ret_val) return ret_val; - igc_check_for_link_base(hw); + igc_check_for_copper_link(hw); /* Verify phy id and set remaining function pointers */ switch (phy->id) { @@ -349,26 +318,6 @@ static void igc_release_phy_base(struct igc_hw *hw) hw->mac.ops.release_swfw_sync(hw, mask); } -/** - * igc_get_link_up_info_base - Get link speed/duplex info - * @hw: pointer to the HW structure - * @speed: stores the current speed - * @duplex: stores the current duplex - * - * This is a wrapper function, if using the serial gigabit media independent - * interface, use PCS to retrieve the link speed and duplex information. - * Otherwise, use the generic function to get the link speed and duplex info. - */ -static s32 igc_get_link_up_info_base(struct igc_hw *hw, u16 *speed, - u16 *duplex) -{ - s32 ret_val; - - ret_val = igc_get_speed_and_duplex_copper(hw, speed, duplex); - - return ret_val; -} - /** * igc_init_hw_base - Initialize hardware * @hw: pointer to the HW structure @@ -407,19 +356,6 @@ static s32 igc_init_hw_base(struct igc_hw *hw) return ret_val; } -/** - * igc_read_mac_addr_base - Read device MAC address - * @hw: pointer to the HW structure - */ -static s32 igc_read_mac_addr_base(struct igc_hw *hw) -{ - s32 ret_val = 0; - - ret_val = igc_read_mac_addr(hw); - - return ret_val; -} - /** * igc_power_down_phy_copper_base - Remove link during PHY power down * @hw: pointer to the HW structure @@ -512,10 +448,10 @@ void igc_rx_fifo_flush_base(struct igc_hw *hw) static struct igc_mac_operations igc_mac_ops_base = { .init_hw = igc_init_hw_base, - .check_for_link = igc_check_for_link_base, + .check_for_link = igc_check_for_copper_link, .rar_set = igc_rar_set, - .read_mac_addr = igc_read_mac_addr_base, - .get_speed_and_duplex = igc_get_link_up_info_base, + .read_mac_addr = igc_read_mac_addr, + .get_speed_and_duplex = igc_get_speed_and_duplex_copper, }; static const struct igc_phy_operations igc_phy_ops_base = { diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h index 35588fa7b8c5..76d4991d7284 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.h +++ b/drivers/net/ethernet/intel/igc/igc_base.h @@ -36,28 +36,6 @@ union igc_adv_tx_desc { #define IGC_RAR_ENTRIES 16 -struct igc_adv_data_desc { - __le64 buffer_addr; /* Address of the descriptor's data buffer */ - union { - u32 data; - struct { - u32 datalen:16; /* Data buffer length */ - u32 rsvd:4; - u32 dtyp:4; /* Descriptor type */ - u32 dcmd:8; /* Descriptor command */ - } config; - } lower; - union { - u32 data; - struct { - u32 status:4; /* Descriptor status */ - u32 idx:4; - u32 popts:6; /* Packet Options */ - u32 paylen:18; /* Payload length */ - } options; - } upper; -}; - /* Receive Descriptor - Advanced */ union igc_adv_rx_desc { struct { @@ -90,9 +68,6 @@ union igc_adv_rx_desc { } wb; /* writeback */ }; -/* Adv Transmit Descriptor Config Masks */ -#define IGC_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ - /* Additional Transmit Descriptor Control definitions */ #define IGC_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */ diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index 8740754ea1fd..7d1bdcd1225a 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -4,6 +4,10 @@ #ifndef _IGC_DEFINES_H_ #define _IGC_DEFINES_H_ +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + #define IGC_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */ /* PCI Bus Info */ diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c new file mode 100644 index 000000000000..eff37a6c0afa --- /dev/null +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -0,0 +1,1032 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018 Intel Corporation */ + +/* ethtool support for igc */ +#include + +#include "igc.h" + +static const char igc_priv_flags_strings[][ETH_GSTRING_LEN] = { +#define IGC_PRIV_FLAGS_LEGACY_RX BIT(0) + "legacy-rx", +}; + +#define IGC_PRIV_FLAGS_STR_LEN ARRAY_SIZE(igc_priv_flags_strings) + +static void igc_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, igc_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, igc_driver_version, sizeof(drvinfo->version)); + + /* add fw_version here */ + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + + drvinfo->n_priv_flags = IGC_PRIV_FLAGS_STR_LEN; +} + +static int igc_get_regs_len(struct net_device *netdev) +{ + return IGC_REGS_LEN * sizeof(u32); +} + +static void igc_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u8 i; + + memset(p, 0, IGC_REGS_LEN * sizeof(u32)); + + regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id; + + /* General Registers */ + regs_buff[0] = rd32(IGC_CTRL); + regs_buff[1] = rd32(IGC_STATUS); + regs_buff[2] = rd32(IGC_CTRL_EXT); + regs_buff[3] = rd32(IGC_MDIC); + regs_buff[4] = rd32(IGC_CONNSW); + + /* NVM Register */ + regs_buff[5] = rd32(IGC_EECD); + + /* Interrupt */ + /* Reading EICS for EICR because they read the + * same but EICS does not clear on read + */ + regs_buff[6] = rd32(IGC_EICS); + regs_buff[7] = rd32(IGC_EICS); + regs_buff[8] = rd32(IGC_EIMS); + regs_buff[9] = rd32(IGC_EIMC); + regs_buff[10] = rd32(IGC_EIAC); + regs_buff[11] = rd32(IGC_EIAM); + /* Reading ICS for ICR because they read the + * same but ICS does not clear on read + */ + regs_buff[12] = rd32(IGC_ICS); + regs_buff[13] = rd32(IGC_ICS); + regs_buff[14] = rd32(IGC_IMS); + regs_buff[15] = rd32(IGC_IMC); + regs_buff[16] = rd32(IGC_IAC); + regs_buff[17] = rd32(IGC_IAM); + + /* Flow Control */ + regs_buff[18] = rd32(IGC_FCAL); + regs_buff[19] = rd32(IGC_FCAH); + regs_buff[20] = rd32(IGC_FCTTV); + regs_buff[21] = rd32(IGC_FCRTL); + regs_buff[22] = rd32(IGC_FCRTH); + regs_buff[23] = rd32(IGC_FCRTV); + + /* Receive */ + regs_buff[24] = rd32(IGC_RCTL); + regs_buff[25] = rd32(IGC_RXCSUM); + regs_buff[26] = rd32(IGC_RLPML); + regs_buff[27] = rd32(IGC_RFCTL); + + /* Transmit */ + regs_buff[28] = rd32(IGC_TCTL); + regs_buff[29] = rd32(IGC_TIPG); + + /* Wake Up */ + + /* MAC */ + + /* Statistics */ + regs_buff[30] = adapter->stats.crcerrs; + regs_buff[31] = adapter->stats.algnerrc; + regs_buff[32] = adapter->stats.symerrs; + regs_buff[33] = adapter->stats.rxerrc; + regs_buff[34] = adapter->stats.mpc; + regs_buff[35] = adapter->stats.scc; + regs_buff[36] = adapter->stats.ecol; + regs_buff[37] = adapter->stats.mcc; + regs_buff[38] = adapter->stats.latecol; + regs_buff[39] = adapter->stats.colc; + regs_buff[40] = adapter->stats.dc; + regs_buff[41] = adapter->stats.tncrs; + regs_buff[42] = adapter->stats.sec; + regs_buff[43] = adapter->stats.htdpmc; + regs_buff[44] = adapter->stats.rlec; + regs_buff[45] = adapter->stats.xonrxc; + regs_buff[46] = adapter->stats.xontxc; + regs_buff[47] = adapter->stats.xoffrxc; + regs_buff[48] = adapter->stats.xofftxc; + regs_buff[49] = adapter->stats.fcruc; + regs_buff[50] = adapter->stats.prc64; + regs_buff[51] = adapter->stats.prc127; + regs_buff[52] = adapter->stats.prc255; + regs_buff[53] = adapter->stats.prc511; + regs_buff[54] = adapter->stats.prc1023; + regs_buff[55] = adapter->stats.prc1522; + regs_buff[56] = adapter->stats.gprc; + regs_buff[57] = adapter->stats.bprc; + regs_buff[58] = adapter->stats.mprc; + regs_buff[59] = adapter->stats.gptc; + regs_buff[60] = adapter->stats.gorc; + regs_buff[61] = adapter->stats.gotc; + regs_buff[62] = adapter->stats.rnbc; + regs_buff[63] = adapter->stats.ruc; + regs_buff[64] = adapter->stats.rfc; + regs_buff[65] = adapter->stats.roc; + regs_buff[66] = adapter->stats.rjc; + regs_buff[67] = adapter->stats.mgprc; + regs_buff[68] = adapter->stats.mgpdc; + regs_buff[69] = adapter->stats.mgptc; + regs_buff[70] = adapter->stats.tor; + regs_buff[71] = adapter->stats.tot; + regs_buff[72] = adapter->stats.tpr; + regs_buff[73] = adapter->stats.tpt; + regs_buff[74] = adapter->stats.ptc64; + regs_buff[75] = adapter->stats.ptc127; + regs_buff[76] = adapter->stats.ptc255; + regs_buff[77] = adapter->stats.ptc511; + regs_buff[78] = adapter->stats.ptc1023; + regs_buff[79] = adapter->stats.ptc1522; + regs_buff[80] = adapter->stats.mptc; + regs_buff[81] = adapter->stats.bptc; + regs_buff[82] = adapter->stats.tsctc; + regs_buff[83] = adapter->stats.iac; + regs_buff[84] = adapter->stats.rpthc; + regs_buff[85] = adapter->stats.hgptc; + regs_buff[86] = adapter->stats.hgorc; + regs_buff[87] = adapter->stats.hgotc; + regs_buff[88] = adapter->stats.lenerrs; + regs_buff[89] = adapter->stats.scvpc; + regs_buff[90] = adapter->stats.hrmpc; + + for (i = 0; i < 4; i++) + regs_buff[91 + i] = rd32(IGC_SRRCTL(i)); + for (i = 0; i < 4; i++) + regs_buff[95 + i] = rd32(IGC_PSRTYPE(i)); + for (i = 0; i < 4; i++) + regs_buff[99 + i] = rd32(IGC_RDBAL(i)); + for (i = 0; i < 4; i++) + regs_buff[103 + i] = rd32(IGC_RDBAH(i)); + for (i = 0; i < 4; i++) + regs_buff[107 + i] = rd32(IGC_RDLEN(i)); + for (i = 0; i < 4; i++) + regs_buff[111 + i] = rd32(IGC_RDH(i)); + for (i = 0; i < 4; i++) + regs_buff[115 + i] = rd32(IGC_RDT(i)); + for (i = 0; i < 4; i++) + regs_buff[119 + i] = rd32(IGC_RXDCTL(i)); + + for (i = 0; i < 10; i++) + regs_buff[123 + i] = rd32(IGC_EITR(i)); + for (i = 0; i < 16; i++) + regs_buff[139 + i] = rd32(IGC_RAL(i)); + for (i = 0; i < 16; i++) + regs_buff[145 + i] = rd32(IGC_RAH(i)); + + for (i = 0; i < 4; i++) + regs_buff[149 + i] = rd32(IGC_TDBAL(i)); + for (i = 0; i < 4; i++) + regs_buff[152 + i] = rd32(IGC_TDBAH(i)); + for (i = 0; i < 4; i++) + regs_buff[156 + i] = rd32(IGC_TDLEN(i)); + for (i = 0; i < 4; i++) + regs_buff[160 + i] = rd32(IGC_TDH(i)); + for (i = 0; i < 4; i++) + regs_buff[164 + i] = rd32(IGC_TDT(i)); + for (i = 0; i < 4; i++) + regs_buff[168 + i] = rd32(IGC_TXDCTL(i)); +} + +static u32 igc_get_msglevel(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void igc_set_msglevel(struct net_device *netdev, u32 data) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + adapter->msg_enable = data; +} + +static int igc_nway_reset(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + igc_reinit_locked(adapter); + return 0; +} + +static u32 igc_get_link(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_mac_info *mac = &adapter->hw.mac; + + /* If the link is not reported up to netdev, interrupts are disabled, + * and so the physical link state may have changed since we last + * looked. Set get_link_status to make sure that the true link + * state is interrogated, rather than pulling a cached and possibly + * stale link state from the driver. + */ + if (!netif_carrier_ok(netdev)) + mac->get_link_status = 1; + + return igc_has_link(adapter); +} + +static int igc_get_eeprom_len(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + return adapter->hw.nvm.word_size * 2; +} + +static int igc_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + int first_word, last_word; + u16 *eeprom_buff; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + + eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16), + GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + if (hw->nvm.type == igc_nvm_eeprom_spi) { + ret_val = hw->nvm.ops.read(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + } else { + for (i = 0; i < last_word - first_word + 1; i++) { + ret_val = hw->nvm.ops.read(hw, first_word + i, 1, + &eeprom_buff[i]); + if (ret_val) + break; + } + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), + eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int igc_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + int max_len, first_word, last_word, ret_val = 0; + u16 *eeprom_buff; + void *ptr; + u16 i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if (hw->mac.type >= igc_i225 && + !igc_get_flash_presence_i225(hw)) { + return -EOPNOTSUPP; + } + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EFAULT; + + max_len = hw->nvm.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (void *)eeprom_buff; + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word + * only the second byte of the word is being modified + */ + ret_val = hw->nvm.ops.read(hw, first_word, 1, + &eeprom_buff[0]); + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && ret_val == 0) { + /* need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified + */ + ret_val = hw->nvm.ops.read(hw, last_word, 1, + &eeprom_buff[last_word - first_word]); + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); + + ret_val = hw->nvm.ops.write(hw, first_word, + last_word - first_word + 1, eeprom_buff); + + /* Update the checksum if nvm write succeeded */ + if (ret_val == 0) + hw->nvm.ops.update(hw); + + /* check if need: igc_set_fw_version(adapter); */ + kfree(eeprom_buff); + return ret_val; +} + +static void igc_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = IGC_MAX_RXD; + ring->tx_max_pending = IGC_MAX_TXD; + ring->rx_pending = adapter->rx_ring_count; + ring->tx_pending = adapter->tx_ring_count; +} + +static int igc_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_ring *temp_ring; + u16 new_rx_count, new_tx_count; + int i, err = 0; + + if (ring->rx_mini_pending || ring->rx_jumbo_pending) + return -EINVAL; + + new_rx_count = min_t(u32, ring->rx_pending, IGC_MAX_RXD); + new_rx_count = max_t(u16, new_rx_count, IGC_MIN_RXD); + new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); + + new_tx_count = min_t(u32, ring->tx_pending, IGC_MAX_TXD); + new_tx_count = max_t(u16, new_tx_count, IGC_MIN_TXD); + new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); + + if (new_tx_count == adapter->tx_ring_count && + new_rx_count == adapter->rx_ring_count) { + /* nothing to do */ + return 0; + } + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (!netif_running(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->count = new_tx_count; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto clear_reset; + } + + if (adapter->num_tx_queues > adapter->num_rx_queues) + temp_ring = vmalloc(array_size(sizeof(struct igc_ring), + adapter->num_tx_queues)); + else + temp_ring = vmalloc(array_size(sizeof(struct igc_ring), + adapter->num_rx_queues)); + + if (!temp_ring) { + err = -ENOMEM; + goto clear_reset; + } + + igc_down(adapter); + + /* We can't just free everything and then setup again, + * because the ISRs in MSI-X mode get passed pointers + * to the Tx and Rx ring structs. + */ + if (new_tx_count != adapter->tx_ring_count) { + for (i = 0; i < adapter->num_tx_queues; i++) { + memcpy(&temp_ring[i], adapter->tx_ring[i], + sizeof(struct igc_ring)); + + temp_ring[i].count = new_tx_count; + err = igc_setup_tx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + igc_free_tx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_tx_queues; i++) { + igc_free_tx_resources(adapter->tx_ring[i]); + + memcpy(adapter->tx_ring[i], &temp_ring[i], + sizeof(struct igc_ring)); + } + + adapter->tx_ring_count = new_tx_count; + } + + if (new_rx_count != adapter->rx_ring_count) { + for (i = 0; i < adapter->num_rx_queues; i++) { + memcpy(&temp_ring[i], adapter->rx_ring[i], + sizeof(struct igc_ring)); + + temp_ring[i].count = new_rx_count; + err = igc_setup_rx_resources(&temp_ring[i]); + if (err) { + while (i) { + i--; + igc_free_rx_resources(&temp_ring[i]); + } + goto err_setup; + } + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + igc_free_rx_resources(adapter->rx_ring[i]); + + memcpy(adapter->rx_ring[i], &temp_ring[i], + sizeof(struct igc_ring)); + } + + adapter->rx_ring_count = new_rx_count; + } +err_setup: + igc_up(adapter); + vfree(temp_ring); +clear_reset: + clear_bit(__IGC_RESETTING, &adapter->state); + return err; +} + +static void igc_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + + pause->autoneg = + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (hw->fc.current_mode == igc_fc_rx_pause) { + pause->rx_pause = 1; + } else if (hw->fc.current_mode == igc_fc_tx_pause) { + pause->tx_pause = 1; + } else if (hw->fc.current_mode == igc_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int igc_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + int retval = 0; + + adapter->fc_autoneg = pause->autoneg; + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (adapter->fc_autoneg == AUTONEG_ENABLE) { + hw->fc.requested_mode = igc_fc_default; + if (netif_running(adapter->netdev)) { + igc_down(adapter); + igc_up(adapter); + } else { + igc_reset(adapter); + } + } else { + if (pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = igc_fc_full; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = igc_fc_rx_pause; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = igc_fc_tx_pause; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = igc_fc_none; + + hw->fc.current_mode = hw->fc.requested_mode; + + retval = ((hw->phy.media_type == igc_media_type_copper) ? + igc_force_mac_fc(hw) : igc_setup_link(hw)); + } + + clear_bit(__IGC_RESETTING, &adapter->state); + return retval; +} + +static int igc_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + if (adapter->rx_itr_setting <= 3) + ec->rx_coalesce_usecs = adapter->rx_itr_setting; + else + ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; + + if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) { + if (adapter->tx_itr_setting <= 3) + ec->tx_coalesce_usecs = adapter->tx_itr_setting; + else + ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2; + } + + return 0; +} + +static int igc_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + int i; + + if (ec->rx_max_coalesced_frames || + ec->rx_coalesce_usecs_irq || + ec->rx_max_coalesced_frames_irq || + ec->tx_max_coalesced_frames || + ec->tx_coalesce_usecs_irq || + ec->stats_block_coalesce_usecs || + ec->use_adaptive_rx_coalesce || + ec->use_adaptive_tx_coalesce || + ec->pkt_rate_low || + ec->rx_coalesce_usecs_low || + ec->rx_max_coalesced_frames_low || + ec->tx_coalesce_usecs_low || + ec->tx_max_coalesced_frames_low || + ec->pkt_rate_high || + ec->rx_coalesce_usecs_high || + ec->rx_max_coalesced_frames_high || + ec->tx_coalesce_usecs_high || + ec->tx_max_coalesced_frames_high || + ec->rate_sample_interval) + return -ENOTSUPP; + + if (ec->rx_coalesce_usecs > IGC_MAX_ITR_USECS || + (ec->rx_coalesce_usecs > 3 && + ec->rx_coalesce_usecs < IGC_MIN_ITR_USECS) || + ec->rx_coalesce_usecs == 2) + return -EINVAL; + + if (ec->tx_coalesce_usecs > IGC_MAX_ITR_USECS || + (ec->tx_coalesce_usecs > 3 && + ec->tx_coalesce_usecs < IGC_MIN_ITR_USECS) || + ec->tx_coalesce_usecs == 2) + return -EINVAL; + + if ((adapter->flags & IGC_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs) + return -EINVAL; + + /* If ITR is disabled, disable DMAC */ + if (ec->rx_coalesce_usecs == 0) { + if (adapter->flags & IGC_FLAG_DMAC) + adapter->flags &= ~IGC_FLAG_DMAC; + } + + /* convert to rate of irq's per second */ + if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) + adapter->rx_itr_setting = ec->rx_coalesce_usecs; + else + adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; + + /* convert to rate of irq's per second */ + if (adapter->flags & IGC_FLAG_QUEUE_PAIRS) + adapter->tx_itr_setting = adapter->rx_itr_setting; + else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3) + adapter->tx_itr_setting = ec->tx_coalesce_usecs; + else + adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; + + for (i = 0; i < adapter->num_q_vectors; i++) { + struct igc_q_vector *q_vector = adapter->q_vector[i]; + + q_vector->tx.work_limit = adapter->tx_work_limit; + if (q_vector->rx.ring) + q_vector->itr_val = adapter->rx_itr_setting; + else + q_vector->itr_val = adapter->tx_itr_setting; + if (q_vector->itr_val && q_vector->itr_val <= 3) + q_vector->itr_val = IGC_START_ITR; + q_vector->set_itr = 1; + } + + return 0; +} + +void igc_write_rss_indir_tbl(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 reg = IGC_RETA(0); + u32 shift = 0; + int i = 0; + + while (i < IGC_RETA_SIZE) { + u32 val = 0; + int j; + + for (j = 3; j >= 0; j--) { + val <<= 8; + val |= adapter->rss_indir_tbl[i + j]; + } + + wr32(reg, val << shift); + reg += 4; + i += 4; + } +} + +static u32 igc_get_rxfh_indir_size(struct net_device *netdev) +{ + return IGC_RETA_SIZE; +} + +static int igc_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + int i; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (!indir) + return 0; + for (i = 0; i < IGC_RETA_SIZE; i++) + indir[i] = adapter->rss_indir_tbl[i]; + + return 0; +} + +static int igc_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + u32 num_queues; + int i; + + /* We do not allow change in unsupported parameters */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!indir) + return 0; + + num_queues = adapter->rss_queues; + + /* Verify user input. */ + for (i = 0; i < IGC_RETA_SIZE; i++) + if (indir[i] >= num_queues) + return -EINVAL; + + for (i = 0; i < IGC_RETA_SIZE; i++) + adapter->rss_indir_tbl[i] = indir[i]; + + igc_write_rss_indir_tbl(adapter); + + return 0; +} + +static unsigned int igc_max_channels(struct igc_adapter *adapter) +{ + return igc_get_max_rss_queues(adapter); +} + +static void igc_get_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + /* Report maximum channels */ + ch->max_combined = igc_max_channels(adapter); + + /* Report info for other vector */ + if (adapter->flags & IGC_FLAG_HAS_MSIX) { + ch->max_other = NON_Q_VECTORS; + ch->other_count = NON_Q_VECTORS; + } + + ch->combined_count = adapter->rss_queues; +} + +static int igc_set_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + unsigned int count = ch->combined_count; + unsigned int max_combined = 0; + + /* Verify they are not requesting separate vectors */ + if (!count || ch->rx_count || ch->tx_count) + return -EINVAL; + + /* Verify other_count is valid and has not been changed */ + if (ch->other_count != NON_Q_VECTORS) + return -EINVAL; + + /* Verify the number of channels doesn't exceed hw limits */ + max_combined = igc_max_channels(adapter); + if (count > max_combined) + return -EINVAL; + + if (count != adapter->rss_queues) { + adapter->rss_queues = count; + igc_set_flag_queue_pairs(adapter, max_combined); + + /* Hardware has to reinitialize queues and interrupts to + * match the new configuration. + */ + return igc_reinit_queues(adapter); + } + + return 0; +} + +static u32 igc_get_priv_flags(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + u32 priv_flags = 0; + + if (adapter->flags & IGC_FLAG_RX_LEGACY) + priv_flags |= IGC_PRIV_FLAGS_LEGACY_RX; + + return priv_flags; +} + +static int igc_set_priv_flags(struct net_device *netdev, u32 priv_flags) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + unsigned int flags = adapter->flags; + + flags &= ~IGC_FLAG_RX_LEGACY; + if (priv_flags & IGC_PRIV_FLAGS_LEGACY_RX) + flags |= IGC_FLAG_RX_LEGACY; + + if (flags != adapter->flags) { + adapter->flags = flags; + + /* reset interface to repopulate queues */ + if (netif_running(netdev)) + igc_reinit_locked(adapter); + } + + return 0; +} + +static int igc_ethtool_begin(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + pm_runtime_get_sync(&adapter->pdev->dev); + return 0; +} + +static void igc_ethtool_complete(struct net_device *netdev) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + + pm_runtime_put(&adapter->pdev->dev); +} + +static int igc_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 status; + u32 speed; + + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + + /* supported link modes */ + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 2500baseT_Full); + + /* twisted pair */ + cmd->base.port = PORT_TP; + cmd->base.phy_address = hw->phy.addr; + + /* advertising link modes */ + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full); + + /* set autoneg settings */ + if (hw->mac.autoneg == 1) { + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Autoneg); + } + + switch (hw->fc.requested_mode) { + case igc_fc_full: + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + break; + case igc_fc_rx_pause: + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); + break; + case igc_fc_tx_pause: + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); + break; + default: + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); + } + + status = rd32(IGC_STATUS); + + if (status & IGC_STATUS_LU) { + if (status & IGC_STATUS_SPEED_1000) { + /* For I225, STATUS will indicate 1G speed in both + * 1 Gbps and 2.5 Gbps link modes. + * An additional bit is used + * to differentiate between 1 Gbps and 2.5 Gbps. + */ + if (hw->mac.type == igc_i225 && + (status & IGC_STATUS_SPEED_2500)) { + speed = SPEED_2500; + hw_dbg("2500 Mbs, "); + } else { + speed = SPEED_1000; + hw_dbg("1000 Mbs, "); + } + } else if (status & IGC_STATUS_SPEED_100) { + speed = SPEED_100; + hw_dbg("100 Mbs, "); + } else { + speed = SPEED_10; + hw_dbg("10 Mbs, "); + } + if ((status & IGC_STATUS_FD) || + hw->phy.media_type != igc_media_type_copper) + cmd->base.duplex = DUPLEX_FULL; + else + cmd->base.duplex = DUPLEX_HALF; + } else { + speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + cmd->base.speed = speed; + if (hw->mac.autoneg) + cmd->base.autoneg = AUTONEG_ENABLE; + else + cmd->base.autoneg = AUTONEG_DISABLE; + + /* MDI-X => 2; MDI =>1; Invalid =>0 */ + if (hw->phy.media_type == igc_media_type_copper) + cmd->base.eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : + ETH_TP_MDI; + else + cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; + + if (hw->phy.mdix == AUTO_ALL_MODES) + cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + cmd->base.eth_tp_mdix_ctrl = hw->phy.mdix; + + return 0; +} + +static int igc_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 advertising; + + /* When adapter in resetting mode, autoneg/speed/duplex + * cannot be changed + */ + if (igc_check_reset_block(hw)) { + dev_err(&adapter->pdev->dev, + "Cannot change link characteristics when reset is active.\n"); + return -EINVAL; + } + + /* MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ + if (cmd->base.eth_tp_mdix_ctrl) { + if (cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO && + cmd->base.autoneg != AUTONEG_ENABLE) { + dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); + return -EINVAL; + } + } + + while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + hw->mac.autoneg = 1; + hw->phy.autoneg_advertised = advertising; + if (adapter->fc_autoneg) + hw->fc.requested_mode = igc_fc_default; + } else { + /* calling this overrides forced MDI setting */ + dev_info(&adapter->pdev->dev, + "Force mode currently not supported\n"); + } + + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (cmd->base.eth_tp_mdix_ctrl) { + /* fix up the value for auto (3 => 0) as zero is mapped + * internally to auto + */ + if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->phy.mdix = AUTO_ALL_MODES; + else + hw->phy.mdix = cmd->base.eth_tp_mdix_ctrl; + } + + /* reset the link */ + if (netif_running(adapter->netdev)) { + igc_down(adapter); + igc_up(adapter); + } else { + igc_reset(adapter); + } + + clear_bit(__IGC_RESETTING, &adapter->state); + + return 0; +} + +static const struct ethtool_ops igc_ethtool_ops = { + .get_drvinfo = igc_get_drvinfo, + .get_regs_len = igc_get_regs_len, + .get_regs = igc_get_regs, + .get_msglevel = igc_get_msglevel, + .set_msglevel = igc_set_msglevel, + .nway_reset = igc_nway_reset, + .get_link = igc_get_link, + .get_eeprom_len = igc_get_eeprom_len, + .get_eeprom = igc_get_eeprom, + .set_eeprom = igc_set_eeprom, + .get_ringparam = igc_get_ringparam, + .set_ringparam = igc_set_ringparam, + .get_pauseparam = igc_get_pauseparam, + .set_pauseparam = igc_set_pauseparam, + .get_coalesce = igc_get_coalesce, + .set_coalesce = igc_set_coalesce, + .get_rxfh_indir_size = igc_get_rxfh_indir_size, + .get_rxfh = igc_get_rxfh, + .set_rxfh = igc_set_rxfh, + .get_channels = igc_get_channels, + .set_channels = igc_set_channels, + .get_priv_flags = igc_get_priv_flags, + .set_priv_flags = igc_set_priv_flags, + .begin = igc_ethtool_begin, + .complete = igc_ethtool_complete, + .get_link_ksettings = igc_get_link_ksettings, + .set_link_ksettings = igc_set_link_ksettings, +}; + +void igc_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &igc_ethtool_ops; +} diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h index c50414f48f0d..7c88b7bd4799 100644 --- a/drivers/net/ethernet/intel/igc/igc_hw.h +++ b/drivers/net/ethernet/intel/igc/igc_hw.h @@ -55,6 +55,7 @@ enum igc_media_type { enum igc_nvm_type { igc_nvm_unknown = 0, + igc_nvm_eeprom_spi, igc_nvm_flash_hw, igc_nvm_invm, }; diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index f20183037fb2..87a11879bf2d 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -12,6 +12,8 @@ #define DRV_VERSION "0.0.1-k" #define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver" +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) + static int debug = -1; MODULE_AUTHOR("Intel Corporation, "); @@ -66,7 +68,7 @@ enum latency_range { latency_invalid = 255 }; -static void igc_reset(struct igc_adapter *adapter) +void igc_reset(struct igc_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; struct igc_hw *hw = &adapter->hw; @@ -150,7 +152,7 @@ static void igc_get_hw_control(struct igc_adapter *adapter) * * Free all transmit software resources */ -static void igc_free_tx_resources(struct igc_ring *tx_ring) +void igc_free_tx_resources(struct igc_ring *tx_ring) { igc_clean_tx_ring(tx_ring); @@ -261,7 +263,7 @@ static void igc_clean_all_tx_rings(struct igc_adapter *adapter) * * Return 0 on success, negative on failure */ -static int igc_setup_tx_resources(struct igc_ring *tx_ring) +int igc_setup_tx_resources(struct igc_ring *tx_ring) { struct device *dev = tx_ring->dev; int size = 0; @@ -381,7 +383,7 @@ static void igc_clean_all_rx_rings(struct igc_adapter *adapter) * * Free all receive software resources */ -static void igc_free_rx_resources(struct igc_ring *rx_ring) +void igc_free_rx_resources(struct igc_ring *rx_ring) { igc_clean_rx_ring(rx_ring); @@ -418,7 +420,7 @@ static void igc_free_all_rx_resources(struct igc_adapter *adapter) * * Returns 0 on success, negative on failure */ -static int igc_setup_rx_resources(struct igc_ring *rx_ring) +int igc_setup_rx_resources(struct igc_ring *rx_ring) { struct device *dev = rx_ring->dev; int size, desc_len; @@ -1703,7 +1705,7 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) * igc_up - Open the interface and prepare it to handle traffic * @adapter: board private structure */ -static void igc_up(struct igc_adapter *adapter) +void igc_up(struct igc_adapter *adapter) { struct igc_hw *hw = &adapter->hw; int i = 0; @@ -1748,7 +1750,7 @@ static void igc_nfc_filter_exit(struct igc_adapter *adapter) * igc_down - Close the interface * @adapter: board private structure */ -static void igc_down(struct igc_adapter *adapter) +void igc_down(struct igc_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct igc_hw *hw = &adapter->hw; @@ -1810,7 +1812,7 @@ static void igc_down(struct igc_adapter *adapter) igc_clean_all_rx_rings(adapter); } -static void igc_reinit_locked(struct igc_adapter *adapter) +void igc_reinit_locked(struct igc_adapter *adapter) { WARN_ON(in_interrupt()); while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) @@ -1922,7 +1924,7 @@ static void igc_configure(struct igc_adapter *adapter) /** * igc_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table - * @adapter: Pointer to adapter structure + * @adapter: address of board private structure * @index: Index of the RAR entry which need to be synced with MAC table */ static void igc_rar_set_index(struct igc_adapter *adapter, u32 index) @@ -2298,7 +2300,7 @@ static void igc_update_phy_info(struct timer_list *t) * igc_has_link - check shared code for link and determine up/down * @adapter: pointer to driver private info */ -static bool igc_has_link(struct igc_adapter *adapter) +bool igc_has_link(struct igc_adapter *adapter) { struct igc_hw *hw = &adapter->hw; bool link_active = false; @@ -2956,22 +2958,21 @@ static int igc_alloc_q_vector(struct igc_adapter *adapter, { struct igc_q_vector *q_vector; struct igc_ring *ring; - int ring_count, size; + int ring_count; /* igc only supports 1 Tx and/or 1 Rx queue per vector */ if (txr_count > 1 || rxr_count > 1) return -ENOMEM; ring_count = txr_count + rxr_count; - size = sizeof(struct igc_q_vector) + - (sizeof(struct igc_ring) * ring_count); /* allocate q_vector and rings */ q_vector = adapter->q_vector[v_idx]; if (!q_vector) - q_vector = kzalloc(size, GFP_KERNEL); + q_vector = kzalloc(struct_size(q_vector, ring, ring_count), + GFP_KERNEL); else - memset(q_vector, 0, size); + memset(q_vector, 0, struct_size(q_vector, ring, ring_count)); if (!q_vector) return -ENOMEM; @@ -3501,6 +3502,57 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg) return value; } +int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx) +{ + struct pci_dev *pdev = adapter->pdev; + struct igc_mac_info *mac = &adapter->hw.mac; + + mac->autoneg = 0; + + /* Make sure dplx is at most 1 bit and lsb of speed is not set + * for the switch() below to work + */ + if ((spd & 1) || (dplx & ~1)) + goto err_inval; + + switch (spd + dplx) { + case SPEED_10 + DUPLEX_HALF: + mac->forced_speed_duplex = ADVERTISE_10_HALF; + break; + case SPEED_10 + DUPLEX_FULL: + mac->forced_speed_duplex = ADVERTISE_10_FULL; + break; + case SPEED_100 + DUPLEX_HALF: + mac->forced_speed_duplex = ADVERTISE_100_HALF; + break; + case SPEED_100 + DUPLEX_FULL: + mac->forced_speed_duplex = ADVERTISE_100_FULL; + break; + case SPEED_1000 + DUPLEX_FULL: + mac->autoneg = 1; + adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; + break; + case SPEED_1000 + DUPLEX_HALF: /* not supported */ + goto err_inval; + case SPEED_2500 + DUPLEX_FULL: + mac->autoneg = 1; + adapter->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL; + break; + case SPEED_2500 + DUPLEX_HALF: /* not supported */ + default: + goto err_inval; + } + + /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ + adapter->hw.phy.mdix = AUTO_ALL_MODES; + + return 0; + +err_inval: + dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n"); + return -EINVAL; +} + /** * igc_probe - Device Initialization Routine * @pdev: PCI device information struct @@ -3568,7 +3620,7 @@ static int igc_probe(struct pci_dev *pdev, hw = &adapter->hw; hw->back = adapter; adapter->port_num = hw->bus.func; - adapter->msg_enable = GENMASK(debug - 1, 0); + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); err = pci_save_state(pdev); if (err) @@ -3584,7 +3636,7 @@ static int igc_probe(struct pci_dev *pdev, hw->hw_addr = adapter->io_addr; netdev->netdev_ops = &igc_netdev_ops; - + igc_set_ethtool_ops(netdev); netdev->watchdog_timeo = 5 * HZ; netdev->mem_start = pci_resource_start(pdev, 0); @@ -3744,8 +3796,8 @@ static struct pci_driver igc_driver = { .remove = igc_remove, }; -static void igc_set_flag_queue_pairs(struct igc_adapter *adapter, - const u32 max_rss_queues) +void igc_set_flag_queue_pairs(struct igc_adapter *adapter, + const u32 max_rss_queues) { /* Determine if we need to pair queues. */ /* If rss_queues > half of max_rss_queues, pair the queues in @@ -3757,7 +3809,7 @@ static void igc_set_flag_queue_pairs(struct igc_adapter *adapter, adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS; } -static unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter) +unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter) { unsigned int max_rss_queues; @@ -3836,6 +3888,32 @@ static int igc_sw_init(struct igc_adapter *adapter) return 0; } +/** + * igc_reinit_queues - return error + * @adapter: pointer to adapter structure + */ +int igc_reinit_queues(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err = 0; + + if (netif_running(netdev)) + igc_close(netdev); + + igc_reset_interrupt_capability(adapter); + + if (igc_init_interrupt_scheme(adapter, true)) { + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + if (netif_running(netdev)) + err = igc_open(netdev); + + return err; +} + /** * igc_get_hw_dev - return device * @hw: pointer to hardware structure diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c index 38e43e6fc1c7..4c8f96a9a148 100644 --- a/drivers/net/ethernet/intel/igc/igc_phy.c +++ b/drivers/net/ethernet/intel/igc/igc_phy.c @@ -152,7 +152,6 @@ void igc_power_down_phy_copper(struct igc_hw *hw) s32 igc_check_downshift(struct igc_hw *hw) { struct igc_phy_info *phy = &hw->phy; - u16 phy_data, offset, mask; s32 ret_val; switch (phy->type) { @@ -161,15 +160,8 @@ s32 igc_check_downshift(struct igc_hw *hw) /* speed downshift not supported */ phy->speed_downgraded = false; ret_val = 0; - goto out; } - ret_val = phy->ops.read_reg(hw, offset, &phy_data); - - if (!ret_val) - phy->speed_downgraded = (phy_data & mask) ? true : false; - -out: return ret_val; } diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h index a1bd3216c906..5afe7a8d3faf 100644 --- a/drivers/net/ethernet/intel/igc/igc_regs.h +++ b/drivers/net/ethernet/intel/igc/igc_regs.h @@ -80,6 +80,9 @@ /* MSI-X Table Register Descriptions */ #define IGC_PBACL 0x05B68 /* MSIx PBA Clear - R/W 1 to clear */ +/* Redirection Table - RW Array */ +#define IGC_RETA(_i) (0x05C00 + ((_i) * 4)) + /* Receive Register Descriptions */ #define IGC_RCTL 0x00100 /* Rx Control - RW */ #define IGC_SRRCTL(_n) (0x0C00C + ((_n) * 0x40)) @@ -188,7 +191,6 @@ #define IGC_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ #define IGC_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ #define IGC_LENERRS 0x04138 /* Length Errors Count */ -#define IGC_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */ #define IGC_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */ /* Management registers */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 1e49716f52bc..109f8de5a1c2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -1048,7 +1048,7 @@ mac_reset_top: * clear the multicast table. Also reset num_rar_entries to 128, * since we modify this value when programming the SAN MAC address. */ - hw->mac.num_rar_entries = 128; + hw->mac.num_rar_entries = IXGBE_82599_RAR_ENTRIES; hw->mac.ops.init_rx_addrs(hw); /* Store the permanent SAN mac address */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 62e6499e4146..cc3196ae5aea 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -836,12 +836,10 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, struct ixgbe_ring *ring; int node = NUMA_NO_NODE; int cpu = -1; - int ring_count, size; + int ring_count; u8 tcs = adapter->hw_tcs; ring_count = txr_count + rxr_count + xdp_count; - size = sizeof(struct ixgbe_q_vector) + - (sizeof(struct ixgbe_ring) * ring_count); /* customize cpu for Flow Director mapping */ if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { @@ -855,9 +853,11 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, } /* allocate q_vector and rings */ - q_vector = kzalloc_node(size, GFP_KERNEL, node); + q_vector = kzalloc_node(struct_size(q_vector, ring, ring_count), + GFP_KERNEL, node); if (!q_vector) - q_vector = kzalloc(size, GFP_KERNEL); + q_vector = kzalloc(struct_size(q_vector, ring, ring_count), + GFP_KERNEL); if (!q_vector) return -ENOMEM; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index daff8183534b..e100054a3765 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -3953,8 +3954,11 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) else mrqc = IXGBE_MRQC_VMDQRSS64EN; - /* Enable L3/L4 for Tx Switched packets */ - mrqc |= IXGBE_MRQC_L3L4TXSWEN; + /* Enable L3/L4 for Tx Switched packets only for X550, + * older devices do not support this feature + */ + if (hw->mac.type >= ixgbe_mac_X550) + mrqc |= IXGBE_MRQC_L3L4TXSWEN; } else { if (tcs > 4) mrqc = IXGBE_MRQC_RTRSS8TCEN; @@ -6415,7 +6419,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) { struct device *dev = tx_ring->dev; int orig_node = dev_to_node(dev); - int ring_node = -1; + int ring_node = NUMA_NO_NODE; int size; size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; @@ -6509,7 +6513,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, { struct device *dev = rx_ring->dev; int orig_node = dev_to_node(dev); - int ring_node = -1; + int ring_node = NUMA_NO_NODE; int size; size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; @@ -9910,7 +9914,8 @@ static void ixgbe_del_udp_tunnel_port(struct net_device *dev, static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, - u16 flags) + u16 flags, + struct netlink_ext_ack *extack) { /* guarantee we can provide a unique filter for the unicast address */ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { @@ -10225,6 +10230,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; struct ixgbe_adapter *adapter = netdev_priv(dev); struct bpf_prog *old_prog; + bool need_reset; if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) return -EINVAL; @@ -10247,9 +10253,10 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) return -ENOMEM; old_prog = xchg(&adapter->xdp_prog, prog); + need_reset = (!!prog != !!old_prog); /* If transitioning XDP modes reconfigure rings */ - if (!!prog != !!old_prog) { + if (need_reset) { int err = ixgbe_setup_tc(dev, adapter->hw_tcs); if (err) { @@ -10265,6 +10272,14 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) if (old_prog) bpf_prog_put(old_prog); + /* Kick start the NAPI context if there is an AF_XDP socket open + * on that queue id. This so that receiving will start. + */ + if (need_reset && prog) + for (i = 0; i < adapter->num_rx_queues; i++) + if (adapter->xdp_ring[i]->xsk_umem) + (void)ixgbe_xsk_async_xmit(adapter->netdev, i); + return 0; } @@ -10279,9 +10294,6 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp) xdp->prog_id = adapter->xdp_prog ? adapter->xdp_prog->aux->id : 0; return 0; - case XDP_QUERY_XSK_UMEM: - return ixgbe_xsk_umem_query(adapter, &xdp->xsk.umem, - xdp->xsk.queue_id); case XDP_SETUP_XSK_UMEM: return ixgbe_xsk_umem_setup(adapter, xdp->xsk.umem, xdp->xsk.queue_id); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h index 53d4089f5644..d93a690aff74 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h @@ -30,8 +30,6 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring); struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter, struct ixgbe_ring *ring); -int ixgbe_xsk_umem_query(struct ixgbe_adapter *adapter, struct xdp_umem **umem, - u16 qid); int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem, u16 qid); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index 65c3e2c979d4..bfe95ce0bd7f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -144,11 +144,19 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter, ixgbe_txrx_ring_disable(adapter, qid); err = ixgbe_add_xsk_umem(adapter, umem, qid); + if (err) + return err; - if (if_running) + if (if_running) { ixgbe_txrx_ring_enable(adapter, qid); - return err; + /* Kick start the NAPI context so that receiving will start */ + err = ixgbe_xsk_async_xmit(adapter->netdev, qid); + if (err) + return err; + } + + return 0; } static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid) @@ -174,23 +182,6 @@ static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid) return 0; } -int ixgbe_xsk_umem_query(struct ixgbe_adapter *adapter, struct xdp_umem **umem, - u16 qid) -{ - if (qid >= adapter->num_rx_queues) - return -EINVAL; - - if (adapter->xsk_umems) { - if (qid >= adapter->num_xsk_umems) - return -EINVAL; - *umem = adapter->xsk_umems[qid]; - return 0; - } - - *umem = NULL; - return 0; -} - int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem, u16 qid) { @@ -634,7 +625,8 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) dma_addr_t dma; while (budget-- > 0) { - if (unlikely(!ixgbe_desc_unused(xdp_ring))) { + if (unlikely(!ixgbe_desc_unused(xdp_ring)) || + !netif_carrier_ok(xdp_ring->netdev)) { work_done = false; break; } diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index a5ab6f3403ae..763ee5281177 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -2034,10 +2034,9 @@ static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int count) ctxbi->len, PCI_DMA_TODEVICE); - ctxbi->mapping = 0; - ctxbi->len = 0; + ctxbi->mapping = 0; + ctxbi->len = 0; } - } static int diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 32ac9045cdae..f9bb890733b5 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -112,10 +112,12 @@ struct ltq_etop_priv { static int ltq_etop_alloc_skb(struct ltq_etop_chan *ch) { + struct ltq_etop_priv *priv = netdev_priv(ch->netdev); + ch->skb[ch->dma.desc] = netdev_alloc_skb(ch->netdev, MAX_DMA_DATA_LEN); if (!ch->skb[ch->dma.desc]) return -ENOMEM; - ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL, + ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(&priv->pdev->dev, ch->skb[ch->dma.desc]->data, MAX_DMA_DATA_LEN, DMA_FROM_DEVICE); ch->dma.desc_base[ch->dma.desc].addr = @@ -487,7 +489,7 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev) netif_trans_update(dev); spin_lock_irqsave(&priv->lock, flags); - desc->addr = ((unsigned int) dma_map_single(NULL, skb->data, len, + desc->addr = ((unsigned int) dma_map_single(&priv->pdev->dev, skb->data, len, DMA_TO_DEVICE)) - byte_offset; wmb(); desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP | diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c index 2d4d10a017e5..d29104de0d53 100644 --- a/drivers/net/ethernet/lantiq_xrx200.c +++ b/drivers/net/ethernet/lantiq_xrx200.c @@ -335,7 +335,6 @@ static const struct net_device_ops xrx200_netdev_ops = { .ndo_start_xmit = xrx200_start_xmit, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, - .ndo_change_mtu = eth_change_mtu, }; static irqreturn_t xrx200_dma_irq(int irq, void *ptr) diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 2f427271a793..292a668ce88e 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2879,7 +2879,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) ret = mv643xx_eth_shared_of_probe(pdev); if (ret) - return ret; + goto err_put_clk; pd = dev_get_platdata(&pdev->dev); msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? @@ -2887,6 +2887,11 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) infer_hw_params(msp); return 0; + +err_put_clk: + if (!IS_ERR(msp->clk)) + clk_disable_unprepare(msp->clk); + return ret; } static int mv643xx_eth_shared_remove(struct platform_device *pdev) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 9d4568eb2297..c0a3718b2e2a 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -436,6 +437,7 @@ struct mvneta_port { struct device_node *dn; unsigned int tx_csum_limit; struct phylink *phylink; + struct phy *comphy; struct mvneta_bm *bm_priv; struct mvneta_bm_pool *pool_long; @@ -2146,7 +2148,7 @@ err_drop_frame: if (unlikely(!skb)) goto err_drop_frame_ret_pool; - dma_sync_single_range_for_cpu(dev->dev.parent, + dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev, rx_desc->buf_phys_addr, MVNETA_MH_SIZE + NET_SKB_PAD, rx_bytes, @@ -3147,10 +3149,27 @@ static int mvneta_setup_txqs(struct mvneta_port *pp) return 0; } +static int mvneta_comphy_init(struct mvneta_port *pp) +{ + int ret; + + if (!pp->comphy) + return 0; + + ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, + pp->phy_interface); + if (ret) + return ret; + + return phy_power_on(pp->comphy); +} + static void mvneta_start_dev(struct mvneta_port *pp) { int cpu; + WARN_ON(mvneta_comphy_init(pp)); + mvneta_max_rx_size_set(pp, pp->pkt_size); mvneta_txq_max_tx_size_set(pp, pp->pkt_size); @@ -3213,6 +3232,8 @@ static void mvneta_stop_dev(struct mvneta_port *pp) mvneta_tx_reset(pp); mvneta_rx_reset(pp); + + WARN_ON(phy_power_off(pp->comphy)); } static void mvneta_percpu_enable(void *arg) @@ -3338,6 +3359,7 @@ static int mvneta_set_mac_addr(struct net_device *dev, void *addr) static void mvneta_validate(struct net_device *ndev, unsigned long *supported, struct phylink_link_state *state) { + struct mvneta_port *pp = netdev_priv(ndev); __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; /* We only support QSGMII, SGMII, 802.3z and RGMII modes */ @@ -3358,8 +3380,13 @@ static void mvneta_validate(struct net_device *ndev, unsigned long *supported, phylink_set(mask, Pause); /* Half-duplex at speeds higher than 100Mbit is unsupported */ - phylink_set(mask, 1000baseT_Full); - phylink_set(mask, 1000baseX_Full); + if (pp->comphy || state->interface != PHY_INTERFACE_MODE_2500BASEX) { + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseX_Full); + } + if (pp->comphy || state->interface == PHY_INTERFACE_MODE_2500BASEX) { + phylink_set(mask, 2500baseX_Full); + } if (!phy_interface_mode_is_8023z(state->interface)) { /* 10M and 100M are only supported in non-802.3z mode */ @@ -3373,6 +3400,11 @@ static void mvneta_validate(struct net_device *ndev, unsigned long *supported, __ETHTOOL_LINK_MODE_MASK_NBITS); bitmap_and(state->advertising, state->advertising, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); + + /* We can only operate at 2500BaseX or 1000BaseX. If requested + * to advertise both, only report advertising at 2500BaseX. + */ + phylink_helper_basex_speed(state); } static int mvneta_mac_link_state(struct net_device *ndev, @@ -3384,7 +3416,9 @@ static int mvneta_mac_link_state(struct net_device *ndev, gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); if (gmac_stat & MVNETA_GMAC_SPEED_1000) - state->speed = SPEED_1000; + state->speed = + state->interface == PHY_INTERFACE_MODE_2500BASEX ? + SPEED_2500 : SPEED_1000; else if (gmac_stat & MVNETA_GMAC_SPEED_100) state->speed = SPEED_100; else @@ -3499,12 +3533,23 @@ static void mvneta_mac_config(struct net_device *ndev, unsigned int mode, MVNETA_GMAC_FORCE_LINK_DOWN); } + /* When at 2.5G, the link partner can send frames with shortened * preambles. */ if (state->speed == SPEED_2500) new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE; + if (pp->comphy && pp->phy_interface != state->interface && + (state->interface == PHY_INTERFACE_MODE_SGMII || + state->interface == PHY_INTERFACE_MODE_1000BASEX || + state->interface == PHY_INTERFACE_MODE_2500BASEX)) { + pp->phy_interface = state->interface; + + WARN_ON(phy_power_off(pp->comphy)); + WARN_ON(mvneta_comphy_init(pp)); + } + if (new_ctrl0 != gmac_ctrl0) mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0); if (new_ctrl2 != gmac_ctrl2) @@ -4404,7 +4449,7 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) if (phy_mode == PHY_INTERFACE_MODE_QSGMII) mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO); else if (phy_mode == PHY_INTERFACE_MODE_SGMII || - phy_mode == PHY_INTERFACE_MODE_1000BASEX) + phy_interface_mode_is_8023z(phy_mode)) mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO); else if (!phy_interface_mode_is_rgmii(phy_mode)) return -EINVAL; @@ -4421,6 +4466,7 @@ static int mvneta_probe(struct platform_device *pdev) struct mvneta_port *pp; struct net_device *dev; struct phylink *phylink; + struct phy *comphy; const char *dt_mac_addr; char hw_mac_addr[ETH_ALEN]; const char *mac_from; @@ -4446,6 +4492,14 @@ static int mvneta_probe(struct platform_device *pdev) goto err_free_irq; } + comphy = devm_of_phy_get(&pdev->dev, dn, NULL); + if (comphy == ERR_PTR(-EPROBE_DEFER)) { + err = -EPROBE_DEFER; + goto err_free_irq; + } else if (IS_ERR(comphy)) { + comphy = NULL; + } + phylink = phylink_create(dev, pdev->dev.fwnode, phy_mode, &mvneta_phylink_ops); if (IS_ERR(phylink)) { @@ -4462,6 +4516,7 @@ static int mvneta_probe(struct platform_device *pdev) pp = netdev_priv(dev); spin_lock_init(&pp->lock); pp->phylink = phylink; + pp->comphy = comphy; pp->phy_interface = phy_mode; pp->dn = dn; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index 398328f10743..ff0f4c503f53 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -389,7 +389,7 @@ #define MVPP2_GMAC_IN_BAND_AUTONEG BIT(2) #define MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS BIT(3) #define MVPP2_GMAC_IN_BAND_RESTART_AN BIT(4) -#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5) +#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5) #define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6) #define MVPP2_GMAC_AN_SPEED_EN BIT(7) #define MVPP2_GMAC_FC_ADV_EN BIT(9) @@ -402,8 +402,8 @@ #define MVPP2_GMAC_STATUS0_GMII_SPEED BIT(1) #define MVPP2_GMAC_STATUS0_MII_SPEED BIT(2) #define MVPP2_GMAC_STATUS0_FULL_DUPLEX BIT(3) -#define MVPP2_GMAC_STATUS0_RX_PAUSE BIT(6) -#define MVPP2_GMAC_STATUS0_TX_PAUSE BIT(7) +#define MVPP2_GMAC_STATUS0_RX_PAUSE BIT(4) +#define MVPP2_GMAC_STATUS0_TX_PAUSE BIT(5) #define MVPP2_GMAC_STATUS0_AN_COMPLETE BIT(11) #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c #define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6 @@ -430,6 +430,8 @@ #define MVPP22_XLG_CTRL0_REG 0x100 #define MVPP22_XLG_CTRL0_PORT_EN BIT(0) #define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1) +#define MVPP22_XLG_CTRL0_FORCE_LINK_DOWN BIT(2) +#define MVPP22_XLG_CTRL0_FORCE_LINK_PASS BIT(3) #define MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN BIT(7) #define MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN BIT(8) #define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14) @@ -481,6 +483,7 @@ /* XPCS registers. PPv2.2 only */ #define MVPP22_XPCS_BASE(port) (0x7400 + (port) * 0x1000) #define MVPP22_XPCS_CFG0 0x0 +#define MVPP22_XPCS_CFG0_RESET_DIS BIT(0) #define MVPP22_XPCS_CFG0_PCS_MODE(n) ((n) << 3) #define MVPP22_XPCS_CFG0_ACTIVE_LANE(n) ((n) << 5) @@ -549,8 +552,8 @@ #define MVPP2_MAX_TSO_SEGS 300 #define MVPP2_MAX_SKB_DESCS (MVPP2_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) -/* Dfault number of RXQs in use */ -#define MVPP2_DEFAULT_RXQ 1 +/* Max number of RXQs per port */ +#define MVPP2_PORT_MAX_RXQ 32 /* Max number of Rx descriptors */ #define MVPP2_MAX_RXD_MAX 1024 @@ -803,7 +806,7 @@ struct mvpp2_port { u8 id; /* Index of the port from the "group of ports" complex point - * of view + * of view. This is specific to PPv2.2. */ int gop_id; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 16066c2d5b3a..25fbed2b8d94 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -965,6 +965,11 @@ mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask) } /* Port configuration routines */ +static bool mvpp2_is_xlg(phy_interface_t interface) +{ + return interface == PHY_INTERFACE_MODE_10GKR || + interface == PHY_INTERFACE_MODE_XAUI; +} static void mvpp22_gop_init_rgmii(struct mvpp2_port *port) { @@ -1010,27 +1015,20 @@ static void mvpp22_gop_init_10gkr(struct mvpp2_port *port) void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); u32 val; - /* XPCS */ val = readl(xpcs + MVPP22_XPCS_CFG0); val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) | MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3)); val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2); writel(val, xpcs + MVPP22_XPCS_CFG0); - /* MPCS */ val = readl(mpcs + MVPP22_MPCS_CTRL); val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN; writel(val, mpcs + MVPP22_MPCS_CTRL); val = readl(mpcs + MVPP22_MPCS_CLK_RESET); - val &= ~(MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7) | MAC_CLK_RESET_MAC | - MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX); + val &= ~MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7); val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1); writel(val, mpcs + MVPP22_MPCS_CLK_RESET); - - val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET; - val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX; - writel(val, mpcs + MVPP22_MPCS_CLK_RESET); } static int mvpp22_gop_init(struct mvpp2_port *port) @@ -1090,9 +1088,8 @@ static void mvpp22_gop_unmask_irq(struct mvpp2_port *port) u32 val; if (phy_interface_mode_is_rgmii(port->phy_interface) || - port->phy_interface == PHY_INTERFACE_MODE_SGMII || - port->phy_interface == PHY_INTERFACE_MODE_1000BASEX || - port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) { + phy_interface_mode_is_8023z(port->phy_interface) || + port->phy_interface == PHY_INTERFACE_MODE_SGMII) { /* Enable the GMAC link status irq for this port */ val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; @@ -1102,7 +1099,7 @@ static void mvpp22_gop_unmask_irq(struct mvpp2_port *port) if (port->gop_id == 0) { /* Enable the XLG/GIG irqs for this port */ val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); - if (port->phy_interface == PHY_INTERFACE_MODE_10GKR) + if (mvpp2_is_xlg(port->phy_interface)) val |= MVPP22_XLG_EXT_INT_MASK_XLG; else val |= MVPP22_XLG_EXT_INT_MASK_GIG; @@ -1122,9 +1119,8 @@ static void mvpp22_gop_mask_irq(struct mvpp2_port *port) } if (phy_interface_mode_is_rgmii(port->phy_interface) || - port->phy_interface == PHY_INTERFACE_MODE_SGMII || - port->phy_interface == PHY_INTERFACE_MODE_1000BASEX || - port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) { + phy_interface_mode_is_8023z(port->phy_interface) || + port->phy_interface == PHY_INTERFACE_MODE_SGMII) { val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK); val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT; writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK); @@ -1135,10 +1131,10 @@ static void mvpp22_gop_setup_irq(struct mvpp2_port *port) { u32 val; - if (phy_interface_mode_is_rgmii(port->phy_interface) || - port->phy_interface == PHY_INTERFACE_MODE_SGMII || - port->phy_interface == PHY_INTERFACE_MODE_1000BASEX || - port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) { + if (port->phylink || + phy_interface_mode_is_rgmii(port->phy_interface) || + phy_interface_mode_is_8023z(port->phy_interface) || + port->phy_interface == PHY_INTERFACE_MODE_SGMII) { val = readl(port->base + MVPP22_GMAC_INT_MASK); val |= MVPP22_GMAC_INT_MASK_LINK_STAT; writel(val, port->base + MVPP22_GMAC_INT_MASK); @@ -1183,12 +1179,9 @@ static void mvpp2_port_enable(struct mvpp2_port *port) u32 val; /* Only GOP port 0 has an XLG MAC */ - if (port->gop_id == 0 && - (port->phy_interface == PHY_INTERFACE_MODE_XAUI || - port->phy_interface == PHY_INTERFACE_MODE_10GKR)) { + if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface)) { val = readl(port->base + MVPP22_XLG_CTRL0_REG); - val |= MVPP22_XLG_CTRL0_PORT_EN | - MVPP22_XLG_CTRL0_MAC_RESET_DIS; + val |= MVPP22_XLG_CTRL0_PORT_EN; val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS; writel(val, port->base + MVPP22_XLG_CTRL0_REG); } else { @@ -1204,21 +1197,15 @@ static void mvpp2_port_disable(struct mvpp2_port *port) u32 val; /* Only GOP port 0 has an XLG MAC */ - if (port->gop_id == 0 && - (port->phy_interface == PHY_INTERFACE_MODE_XAUI || - port->phy_interface == PHY_INTERFACE_MODE_10GKR)) { + if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface)) { val = readl(port->base + MVPP22_XLG_CTRL0_REG); val &= ~MVPP22_XLG_CTRL0_PORT_EN; writel(val, port->base + MVPP22_XLG_CTRL0_REG); - - /* Disable & reset should be done separately */ - val &= ~MVPP22_XLG_CTRL0_MAC_RESET_DIS; - writel(val, port->base + MVPP22_XLG_CTRL0_REG); - } else { - val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); - val &= ~(MVPP2_GMAC_PORT_EN_MASK); - writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); } + + val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); + val &= ~(MVPP2_GMAC_PORT_EN_MASK); + writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); } /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */ @@ -1244,9 +1231,8 @@ static void mvpp2_port_loopback_set(struct mvpp2_port *port, else val &= ~MVPP2_GMAC_GMII_LB_EN_MASK; - if (port->phy_interface == PHY_INTERFACE_MODE_SGMII || - port->phy_interface == PHY_INTERFACE_MODE_1000BASEX || - port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) + if (phy_interface_mode_is_8023z(port->phy_interface) || + port->phy_interface == PHY_INTERFACE_MODE_SGMII) val |= MVPP2_GMAC_PCS_LB_EN_MASK; else val &= ~MVPP2_GMAC_PCS_LB_EN_MASK; @@ -1371,22 +1357,75 @@ static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset) return -EOPNOTSUPP; } -static void mvpp2_port_reset(struct mvpp2_port *port) +static void mvpp2_mac_reset_assert(struct mvpp2_port *port) { - u32 val; unsigned int i; + u32 val; /* Read the GOP statistics to reset the hardware counters */ for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++) mvpp2_read_count(port, &mvpp2_ethtool_regs[i]); - val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) & - ~MVPP2_GMAC_PORT_RESET_MASK; + val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) | + MVPP2_GMAC_PORT_RESET_MASK; writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); - while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) & - MVPP2_GMAC_PORT_RESET_MASK) - continue; + if (port->priv->hw_version == MVPP22 && port->gop_id == 0) { + val = readl(port->base + MVPP22_XLG_CTRL0_REG) & + ~MVPP22_XLG_CTRL0_MAC_RESET_DIS; + writel(val, port->base + MVPP22_XLG_CTRL0_REG); + } +} + +static void mvpp22_pcs_reset_assert(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + void __iomem *mpcs, *xpcs; + u32 val; + + if (port->priv->hw_version != MVPP22 || port->gop_id != 0) + return; + + mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); + xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); + + val = readl(mpcs + MVPP22_MPCS_CLK_RESET); + val &= ~(MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX); + val |= MVPP22_MPCS_CLK_RESET_DIV_SET; + writel(val, mpcs + MVPP22_MPCS_CLK_RESET); + + val = readl(xpcs + MVPP22_XPCS_CFG0); + writel(val & ~MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0); +} + +static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + void __iomem *mpcs, *xpcs; + u32 val; + + if (port->priv->hw_version != MVPP22 || port->gop_id != 0) + return; + + mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); + xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); + + switch (port->phy_interface) { + case PHY_INTERFACE_MODE_10GKR: + val = readl(mpcs + MVPP22_MPCS_CLK_RESET); + val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | + MAC_CLK_RESET_SD_TX; + val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET; + writel(val, mpcs + MVPP22_MPCS_CLK_RESET); + break; + case PHY_INTERFACE_MODE_XAUI: + case PHY_INTERFACE_MODE_RXAUI: + val = readl(xpcs + MVPP22_XPCS_CFG0); + writel(val | MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0); + break; + default: + break; + } } /* Change maximum receive size of the port */ @@ -2462,8 +2501,7 @@ static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id) mvpp22_gop_mask_irq(port); - if (port->gop_id == 0 && - port->phy_interface == PHY_INTERFACE_MODE_10GKR) { + if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface)) { val = readl(port->base + MVPP22_XLG_INT_STAT); if (val & MVPP22_XLG_INT_STAT_LINK) { event = true; @@ -2472,9 +2510,8 @@ static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id) link = true; } } else if (phy_interface_mode_is_rgmii(port->phy_interface) || - port->phy_interface == PHY_INTERFACE_MODE_SGMII || - port->phy_interface == PHY_INTERFACE_MODE_1000BASEX || - port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) { + phy_interface_mode_is_8023z(port->phy_interface) || + port->phy_interface == PHY_INTERFACE_MODE_SGMII) { val = readl(port->base + MVPP22_GMAC_INT_STAT); if (val & MVPP22_GMAC_INT_STAT_LINK) { event = true; @@ -3143,19 +3180,26 @@ static void mvpp22_mode_reconfigure(struct mvpp2_port *port) { u32 ctrl3; + /* Set the GMAC & XLG MAC in reset */ + mvpp2_mac_reset_assert(port); + + /* Set the MPCS and XPCS in reset */ + mvpp22_pcs_reset_assert(port); + /* comphy reconfiguration */ mvpp22_comphy_init(port); /* gop reconfiguration */ mvpp22_gop_init(port); + mvpp22_pcs_reset_deassert(port); + /* Only GOP port 0 has an XLG MAC */ if (port->gop_id == 0) { ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG); ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; - if (port->phy_interface == PHY_INTERFACE_MODE_XAUI || - port->phy_interface == PHY_INTERFACE_MODE_10GKR) + if (mvpp2_is_xlg(port->phy_interface)) ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G; else ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC; @@ -3163,9 +3207,7 @@ static void mvpp22_mode_reconfigure(struct mvpp2_port *port) writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG); } - if (port->gop_id == 0 && - (port->phy_interface == PHY_INTERFACE_MODE_XAUI || - port->phy_interface == PHY_INTERFACE_MODE_10GKR)) + if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface)) mvpp2_xlg_max_rx_size_set(port); else mvpp2_gmac_max_rx_size_set(port); @@ -3483,6 +3525,9 @@ static int mvpp2_stop(struct net_device *dev) cancel_delayed_work_sync(&port->stats_work); + mvpp2_mac_reset_assert(port); + mvpp22_pcs_reset_assert(port); + return 0; } @@ -4072,8 +4117,8 @@ static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port, snprintf(irqname, sizeof(irqname), "hif%d", i); if (queue_mode == MVPP2_QDIST_MULTI_MODE) { - v->first_rxq = i * MVPP2_DEFAULT_RXQ; - v->nrxqs = MVPP2_DEFAULT_RXQ; + v->first_rxq = i; + v->nrxqs = 1; } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE && i == (port->nqvecs - 1)) { v->first_rxq = 0; @@ -4166,8 +4211,7 @@ static int mvpp2_port_init(struct mvpp2_port *port) MVPP2_MAX_PORTS * priv->max_port_rxqs) return -EINVAL; - if (port->nrxqs % MVPP2_DEFAULT_RXQ || - port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ) + if (port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ) return -EINVAL; /* Disable port */ @@ -4374,7 +4418,7 @@ static void mvpp2_phylink_validate(struct net_device *dev, case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_TXID: - if (port->gop_id == 0) + if (port->priv->hw_version == MVPP22 && port->gop_id == 0) goto empty_set; break; default: @@ -4414,6 +4458,7 @@ static void mvpp2_phylink_validate(struct net_device *dev, case PHY_INTERFACE_MODE_2500BASEX: phylink_set(mask, 1000baseT_Full); phylink_set(mask, 1000baseX_Full); + phylink_set(mask, 2500baseT_Full); phylink_set(mask, 2500baseX_Full); break; default: @@ -4505,131 +4550,198 @@ static int mvpp2_phylink_mac_link_state(struct net_device *dev, static void mvpp2_mac_an_restart(struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); - u32 val; - - if (port->phy_interface != PHY_INTERFACE_MODE_SGMII) - return; + u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); - val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); - /* The RESTART_AN bit is cleared by the h/w after restarting the AN - * process. - */ - val |= MVPP2_GMAC_IN_BAND_RESTART_AN | MVPP2_GMAC_IN_BAND_AUTONEG; - writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); + writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN, + port->base + MVPP2_GMAC_AUTONEG_CONFIG); + writel(val & ~MVPP2_GMAC_IN_BAND_RESTART_AN, + port->base + MVPP2_GMAC_AUTONEG_CONFIG); } static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode, const struct phylink_link_state *state) { - u32 ctrl0, ctrl4; + u32 old_ctrl0, ctrl0; + u32 old_ctrl4, ctrl4; - ctrl0 = readl(port->base + MVPP22_XLG_CTRL0_REG); - ctrl4 = readl(port->base + MVPP22_XLG_CTRL4_REG); + old_ctrl0 = ctrl0 = readl(port->base + MVPP22_XLG_CTRL0_REG); + old_ctrl4 = ctrl4 = readl(port->base + MVPP22_XLG_CTRL4_REG); + + ctrl0 |= MVPP22_XLG_CTRL0_MAC_RESET_DIS; if (state->pause & MLO_PAUSE_TX) ctrl0 |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN; + else + ctrl0 &= ~MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN; + if (state->pause & MLO_PAUSE_RX) ctrl0 |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN; + else + ctrl0 &= ~MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN; ctrl4 &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC; ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC | MVPP22_XLG_CTRL4_EN_IDLE_CHECK; - writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG); - writel(ctrl4, port->base + MVPP22_XLG_CTRL4_REG); + if (old_ctrl0 != ctrl0) + writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG); + if (old_ctrl4 != ctrl4) + writel(ctrl4, port->base + MVPP22_XLG_CTRL4_REG); + + if (!(old_ctrl0 & MVPP22_XLG_CTRL0_MAC_RESET_DIS)) { + while (!(readl(port->base + MVPP22_XLG_CTRL0_REG) & + MVPP22_XLG_CTRL0_MAC_RESET_DIS)) + continue; + } } static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode, const struct phylink_link_state *state) { - u32 an, ctrl0, ctrl2, ctrl4; - - an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); - ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG); - ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG); - ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG); + u32 old_an, an; + u32 old_ctrl0, ctrl0; + u32 old_ctrl2, ctrl2; + u32 old_ctrl4, ctrl4; - /* Force link down */ - an &= ~MVPP2_GMAC_FORCE_LINK_PASS; - an |= MVPP2_GMAC_FORCE_LINK_DOWN; - writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG); - - /* Set the GMAC in a reset state */ - ctrl2 |= MVPP2_GMAC_PORT_RESET_MASK; - writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG); + old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); + old_ctrl0 = ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG); + old_ctrl2 = ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG); + old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG); an &= ~(MVPP2_GMAC_CONFIG_MII_SPEED | MVPP2_GMAC_CONFIG_GMII_SPEED | MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG | MVPP2_GMAC_CONFIG_FULL_DUPLEX | MVPP2_GMAC_AN_DUPLEX_EN | - MVPP2_GMAC_FORCE_LINK_DOWN); + MVPP2_GMAC_IN_BAND_AUTONEG | MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS); ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK; - ctrl2 &= ~(MVPP2_GMAC_PORT_RESET_MASK | MVPP2_GMAC_PCS_ENABLE_MASK); - - if (state->interface == PHY_INTERFACE_MODE_1000BASEX || - state->interface == PHY_INTERFACE_MODE_2500BASEX) { - /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can - * they negotiate duplex: they are always operating with a fixed - * speed of 1000/2500Mbps in full duplex, so force 1000/2500 - * speed and full duplex here. - */ - ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK; - an |= MVPP2_GMAC_CONFIG_GMII_SPEED | - MVPP2_GMAC_CONFIG_FULL_DUPLEX; - } else if (!phy_interface_mode_is_rgmii(state->interface)) { - an |= MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG; + ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PORT_RESET_MASK | + MVPP2_GMAC_PCS_ENABLE_MASK); + ctrl4 &= ~(MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN); + + /* Configure port type */ + if (phy_interface_mode_is_8023z(state->interface)) { + ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK; + ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; + ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS | + MVPP22_CTRL4_DP_CLK_SEL | + MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; + } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { + ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_INBAND_AN_MASK; + ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; + ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS | + MVPP22_CTRL4_DP_CLK_SEL | + MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; + } else if (phy_interface_mode_is_rgmii(state->interface)) { + ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL; + ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL | + MVPP22_CTRL4_SYNC_BYPASS_DIS | + MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; } - if (state->duplex) - an |= MVPP2_GMAC_CONFIG_FULL_DUPLEX; + /* Configure advertisement bits */ if (phylink_test(state->advertising, Pause)) an |= MVPP2_GMAC_FC_ADV_EN; if (phylink_test(state->advertising, Asym_Pause)) an |= MVPP2_GMAC_FC_ADV_ASM_EN; - if (state->interface == PHY_INTERFACE_MODE_SGMII || - state->interface == PHY_INTERFACE_MODE_1000BASEX || - state->interface == PHY_INTERFACE_MODE_2500BASEX) { - an |= MVPP2_GMAC_IN_BAND_AUTONEG; - ctrl2 |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK; + /* Configure negotiation style */ + if (!phylink_autoneg_inband(mode)) { + /* Phy or fixed speed - no in-band AN */ + if (state->duplex) + an |= MVPP2_GMAC_CONFIG_FULL_DUPLEX; - ctrl4 &= ~(MVPP22_CTRL4_EXT_PIN_GMII_SEL | - MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN); - ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS | - MVPP22_CTRL4_DP_CLK_SEL | - MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; + if (state->speed == SPEED_1000 || state->speed == SPEED_2500) + an |= MVPP2_GMAC_CONFIG_GMII_SPEED; + else if (state->speed == SPEED_100) + an |= MVPP2_GMAC_CONFIG_MII_SPEED; if (state->pause & MLO_PAUSE_TX) ctrl4 |= MVPP22_CTRL4_TX_FC_EN; if (state->pause & MLO_PAUSE_RX) ctrl4 |= MVPP22_CTRL4_RX_FC_EN; - } else if (phy_interface_mode_is_rgmii(state->interface)) { - an |= MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS; + } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { + /* SGMII in-band mode receives the speed and duplex from + * the PHY. Flow control information is not received. */ + an &= ~(MVPP2_GMAC_FORCE_LINK_DOWN | MVPP2_GMAC_FORCE_LINK_PASS); + an |= MVPP2_GMAC_IN_BAND_AUTONEG | + MVPP2_GMAC_AN_SPEED_EN | + MVPP2_GMAC_AN_DUPLEX_EN; - if (state->speed == SPEED_1000) - an |= MVPP2_GMAC_CONFIG_GMII_SPEED; - else if (state->speed == SPEED_100) - an |= MVPP2_GMAC_CONFIG_MII_SPEED; + if (state->pause & MLO_PAUSE_TX) + ctrl4 |= MVPP22_CTRL4_TX_FC_EN; + if (state->pause & MLO_PAUSE_RX) + ctrl4 |= MVPP22_CTRL4_RX_FC_EN; + } else if (phy_interface_mode_is_8023z(state->interface)) { + /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can + * they negotiate duplex: they are always operating with a fixed + * speed of 1000/2500Mbps in full duplex, so force 1000/2500 + * speed and full duplex here. + */ + ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK; + an &= ~(MVPP2_GMAC_FORCE_LINK_DOWN | MVPP2_GMAC_FORCE_LINK_PASS); + an |= MVPP2_GMAC_IN_BAND_AUTONEG | + MVPP2_GMAC_CONFIG_GMII_SPEED | + MVPP2_GMAC_CONFIG_FULL_DUPLEX; - ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL; - ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL | - MVPP22_CTRL4_SYNC_BYPASS_DIS | - MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; + if (state->pause & MLO_PAUSE_AN && state->an_enabled) { + an |= MVPP2_GMAC_FLOW_CTRL_AUTONEG; + } else { + if (state->pause & MLO_PAUSE_TX) + ctrl4 |= MVPP22_CTRL4_TX_FC_EN; + if (state->pause & MLO_PAUSE_RX) + ctrl4 |= MVPP22_CTRL4_RX_FC_EN; + } } - writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG); - writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG); - writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG); - writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG); +/* Some fields of the auto-negotiation register require the port to be down when + * their value is updated. + */ +#define MVPP2_GMAC_AN_PORT_DOWN_MASK \ + (MVPP2_GMAC_IN_BAND_AUTONEG | \ + MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS | \ + MVPP2_GMAC_CONFIG_MII_SPEED | MVPP2_GMAC_CONFIG_GMII_SPEED | \ + MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_CONFIG_FULL_DUPLEX | \ + MVPP2_GMAC_AN_DUPLEX_EN) + + if ((old_ctrl0 ^ ctrl0) & MVPP2_GMAC_PORT_TYPE_MASK || + (old_ctrl2 ^ ctrl2) & MVPP2_GMAC_INBAND_AN_MASK || + (old_an ^ an) & MVPP2_GMAC_AN_PORT_DOWN_MASK) { + /* Force link down */ + old_an &= ~MVPP2_GMAC_FORCE_LINK_PASS; + old_an |= MVPP2_GMAC_FORCE_LINK_DOWN; + writel(old_an, port->base + MVPP2_GMAC_AUTONEG_CONFIG); + + /* Set the GMAC in a reset state - do this in a way that + * ensures we clear it below. + */ + old_ctrl2 |= MVPP2_GMAC_PORT_RESET_MASK; + writel(old_ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG); + } + + if (old_ctrl0 != ctrl0) + writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG); + if (old_ctrl2 != ctrl2) + writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG); + if (old_ctrl4 != ctrl4) + writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG); + if (old_an != an) + writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG); + + if (old_ctrl2 & MVPP2_GMAC_PORT_RESET_MASK) { + while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) & + MVPP2_GMAC_PORT_RESET_MASK) + continue; + } } static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, const struct phylink_link_state *state) { struct mvpp2_port *port = netdev_priv(dev); + bool change_interface = port->phy_interface != state->interface; /* Check for invalid configuration */ - if (state->interface == PHY_INTERFACE_MODE_10GKR && port->gop_id != 0) { + if (mvpp2_is_xlg(state->interface) && port->gop_id != 0) { netdev_err(dev, "Invalid mode on %s\n", dev->name); return; } @@ -4637,8 +4749,9 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, /* Make sure the port is disabled when reconfiguring the mode */ mvpp2_port_disable(port); - if (port->priv->hw_version == MVPP22 && - port->phy_interface != state->interface) { + if (port->priv->hw_version == MVPP22 && change_interface) { + mvpp22_gop_mask_irq(port); + port->phy_interface = state->interface; /* Reconfigure the serdes lanes */ @@ -4647,17 +4760,19 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, } /* mac (re)configuration */ - if (state->interface == PHY_INTERFACE_MODE_10GKR) + if (mvpp2_is_xlg(state->interface)) mvpp2_xlg_config(port, mode, state); else if (phy_interface_mode_is_rgmii(state->interface) || - state->interface == PHY_INTERFACE_MODE_SGMII || - state->interface == PHY_INTERFACE_MODE_1000BASEX || - state->interface == PHY_INTERFACE_MODE_2500BASEX) + phy_interface_mode_is_8023z(state->interface) || + state->interface == PHY_INTERFACE_MODE_SGMII) mvpp2_gmac_config(port, mode, state); if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK) mvpp2_port_loopback_set(port, state); + if (port->priv->hw_version == MVPP22 && change_interface) + mvpp22_gop_unmask_irq(port); + mvpp2_port_enable(port); } @@ -4667,13 +4782,18 @@ static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode, struct mvpp2_port *port = netdev_priv(dev); u32 val; - if (!phylink_autoneg_inband(mode) && - interface != PHY_INTERFACE_MODE_10GKR) { - val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); - val &= ~MVPP2_GMAC_FORCE_LINK_DOWN; - if (phy_interface_mode_is_rgmii(interface)) + if (!phylink_autoneg_inband(mode)) { + if (mvpp2_is_xlg(interface)) { + val = readl(port->base + MVPP22_XLG_CTRL0_REG); + val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_DOWN; + val |= MVPP22_XLG_CTRL0_FORCE_LINK_PASS; + writel(val, port->base + MVPP22_XLG_CTRL0_REG); + } else { + val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); + val &= ~MVPP2_GMAC_FORCE_LINK_DOWN; val |= MVPP2_GMAC_FORCE_LINK_PASS; - writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); + writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); + } } mvpp2_port_enable(port); @@ -4689,25 +4809,24 @@ static void mvpp2_mac_link_down(struct net_device *dev, unsigned int mode, struct mvpp2_port *port = netdev_priv(dev); u32 val; - if (!phylink_autoneg_inband(mode) && - interface != PHY_INTERFACE_MODE_10GKR) { - val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); - val &= ~MVPP2_GMAC_FORCE_LINK_PASS; - val |= MVPP2_GMAC_FORCE_LINK_DOWN; - writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); + if (!phylink_autoneg_inband(mode)) { + if (mvpp2_is_xlg(interface)) { + val = readl(port->base + MVPP22_XLG_CTRL0_REG); + val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS; + val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN; + writel(val, port->base + MVPP22_XLG_CTRL0_REG); + } else { + val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); + val &= ~MVPP2_GMAC_FORCE_LINK_PASS; + val |= MVPP2_GMAC_FORCE_LINK_DOWN; + writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); + } } netif_tx_stop_all_queues(dev); mvpp2_egress_disable(port); mvpp2_ingress_disable(port); - /* When using link interrupts to notify phylink of a MAC state change, - * we do not want the port to be disabled (we want to receive further - * interrupts, to be notified when the port will have a link later). - */ - if (!port->has_phy) - return; - mvpp2_port_disable(port); } @@ -4749,10 +4868,18 @@ static int mvpp2_port_probe(struct platform_device *pdev, } ntxqs = MVPP2_MAX_TXQ; - if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_MULTI_MODE) - nrxqs = MVPP2_DEFAULT_RXQ * num_possible_cpus(); - else - nrxqs = MVPP2_DEFAULT_RXQ; + if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE) { + nrxqs = 1; + } else { + /* According to the PPv2.2 datasheet and our experiments on + * PPv2.1, RX queues have an allocation granularity of 4 (when + * more than a single one on PPv2.2). + * Round up to nearest multiple of 4. + */ + nrxqs = (num_possible_cpus() + 3) & ~0x3; + if (nrxqs > MVPP2_PORT_MAX_RXQ) + nrxqs = MVPP2_PORT_MAX_RXQ; + } dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs); if (!dev) @@ -4883,7 +5010,8 @@ static int mvpp2_port_probe(struct platform_device *pdev, mvpp2_port_periodic_xon_disable(port); - mvpp2_port_reset(port); + mvpp2_mac_reset_assert(port); + mvpp22_pcs_reset_assert(port); port->pcpu = alloc_percpu(struct mvpp2_port_pcpu); if (!port->pcpu) { diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index f8a6d6e3cb7a..35f2142aac5e 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -201,6 +201,7 @@ struct tx_desc { }; struct pxa168_eth_private { + struct platform_device *pdev; int port_num; /* User Ethernet port number */ int phy_addr; int phy_speed; @@ -331,7 +332,7 @@ static void rxq_refill(struct net_device *dev) used_rx_desc = pep->rx_used_desc_q; p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc]; size = skb_end_pointer(skb) - skb->data; - p_used_rx_desc->buf_ptr = dma_map_single(NULL, + p_used_rx_desc->buf_ptr = dma_map_single(&pep->pdev->dev, skb->data, size, DMA_FROM_DEVICE); @@ -743,7 +744,7 @@ static int txq_reclaim(struct net_device *dev, int force) netdev_err(dev, "Error in TX\n"); dev->stats.tx_errors++; } - dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE); + dma_unmap_single(&pep->pdev->dev, addr, count, DMA_TO_DEVICE); if (skb) dev_kfree_skb_irq(skb); released++; @@ -805,7 +806,7 @@ static int rxq_process(struct net_device *dev, int budget) if (rx_next_curr_desc == rx_used_desc) pep->rx_resource_err = 1; pep->rx_desc_count--; - dma_unmap_single(NULL, rx_desc->buf_ptr, + dma_unmap_single(&pep->pdev->dev, rx_desc->buf_ptr, rx_desc->buf_size, DMA_FROM_DEVICE); received_packets++; @@ -1274,7 +1275,8 @@ pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) length = skb->len; pep->tx_skb[tx_index] = skb; desc->byte_cnt = length; - desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE); + desc->buf_ptr = dma_map_single(&pep->pdev->dev, skb->data, length, + DMA_TO_DEVICE); skb_tx_timestamp(skb); @@ -1528,6 +1530,7 @@ static int pxa168_eth_probe(struct platform_device *pdev) if (err) goto err_free_mdio; + pep->pdev = pdev; SET_NETDEV_DEV(dev, &pdev->dev); pxa168_init_hw(pep); err = register_netdev(dev); diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 04fd1f135011..654ac534b10e 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -152,8 +152,10 @@ static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs, memset(p, 0, regs->len); memcpy_fromio(p, io, B3_RAM_ADDR); - memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1, - regs->len - B3_RI_WTO_R1); + if (regs->len > B3_RI_WTO_R1) { + memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1, + regs->len - B3_RI_WTO_R1); + } } /* Wake on Lan only supported on Yukon chips with rev 1 or above */ diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index f3a5fa84860f..8b3495ee2b6e 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -46,6 +46,7 @@ #include #include #include +#include #include @@ -93,7 +94,7 @@ static int copybreak __read_mostly = 128; module_param(copybreak, int, 0); MODULE_PARM_DESC(copybreak, "Receive copy threshold"); -static int disable_msi = 0; +static int disable_msi = -1; module_param(disable_msi, int, 0); MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); @@ -4917,6 +4918,24 @@ static const char *sky2_name(u8 chipid, char *buf, int sz) return buf; } +static const struct dmi_system_id msi_blacklist[] = { + { + .ident = "Dell Inspiron 1545", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1545"), + }, + }, + { + .ident = "Gateway P-79", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Gateway"), + DMI_MATCH(DMI_PRODUCT_NAME, "P-79"), + }, + }, + {} +}; + static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *dev, *dev1; @@ -5028,6 +5047,9 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_free_pci; } + if (disable_msi == -1) + disable_msi = !!dmi_check_system(msi_blacklist); + if (!disable_msi && pci_enable_msi(pdev) == 0) { err = sky2_test_msi(hw); if (err) { @@ -5073,7 +5095,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&hw->restart_work, sky2_restart); pci_set_drvdata(pdev, hw); - pdev->d3_delay = 200; + pdev->d3_delay = 300; return 0; diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig index f9149d2a4694..43656f961891 100644 --- a/drivers/net/ethernet/mediatek/Kconfig +++ b/drivers/net/ethernet/mediatek/Kconfig @@ -1,6 +1,6 @@ config NET_VENDOR_MEDIATEK bool "MediaTek ethernet driver" - depends on ARCH_MEDIATEK + depends on ARCH_MEDIATEK || SOC_MT7621 ---help--- If you have a Mediatek SoC with ethernet, say Y. diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 49f926b7a91c..549d36497b8c 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -226,7 +226,7 @@ static void mtk_phy_link_adjust(struct net_device *dev) case SPEED_100: mcr |= MAC_MCR_SPEED_100; break; - }; + } if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) && !mac->id && !mac->trgmii) @@ -1745,6 +1745,22 @@ static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth) return IRQ_HANDLED; } +static irqreturn_t mtk_handle_irq(int irq, void *_eth) +{ + struct mtk_eth *eth = _eth; + + if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT) { + if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT) + mtk_handle_irq_rx(irq, _eth); + } + if (mtk_r32(eth, MTK_QDMA_INT_MASK) & MTK_TX_DONE_INT) { + if (mtk_r32(eth, MTK_QMTK_INT_STATUS) & MTK_TX_DONE_INT) + mtk_handle_irq_tx(irq, _eth); + } + + return IRQ_HANDLED; +} + #ifdef CONFIG_NET_POLL_CONTROLLER static void mtk_poll_controller(struct net_device *dev) { @@ -2485,7 +2501,10 @@ static int mtk_probe(struct platform_device *pdev) } for (i = 0; i < 3; i++) { - eth->irq[i] = platform_get_irq(pdev, i); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0) + eth->irq[i] = eth->irq[0]; + else + eth->irq[i] = platform_get_irq(pdev, i); if (eth->irq[i] < 0) { dev_err(&pdev->dev, "no IRQ%d resource found\n", i); return -ENXIO; @@ -2528,13 +2547,21 @@ static int mtk_probe(struct platform_device *pdev) goto err_deinit_hw; } - err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0, - dev_name(eth->dev), eth); - if (err) - goto err_free_dev; + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) { + err = devm_request_irq(eth->dev, eth->irq[0], + mtk_handle_irq, 0, + dev_name(eth->dev), eth); + } else { + err = devm_request_irq(eth->dev, eth->irq[1], + mtk_handle_irq_tx, 0, + dev_name(eth->dev), eth); + if (err) + goto err_free_dev; - err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0, - dev_name(eth->dev), eth); + err = devm_request_irq(eth->dev, eth->irq[2], + mtk_handle_irq_rx, 0, + dev_name(eth->dev), eth); + } if (err) goto err_free_dev; @@ -2607,6 +2634,12 @@ static const struct mtk_soc_data mt2701_data = { .required_pctl = true, }; +static const struct mtk_soc_data mt7621_data = { + .caps = MTK_SHARED_INT, + .required_clks = MT7621_CLKS_BITMAP, + .required_pctl = false, +}; + static const struct mtk_soc_data mt7622_data = { .caps = MTK_DUAL_GMAC_SHARED_SGMII | MTK_GMAC1_ESW | MTK_HWLRO, .required_clks = MT7622_CLKS_BITMAP, @@ -2621,6 +2654,7 @@ static const struct mtk_soc_data mt7623_data = { const struct of_device_id of_mtk_match[] = { { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data}, + { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data}, { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data}, { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data}, {}, diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 46819297fc3e..f7501997cea0 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -363,6 +363,7 @@ #define ETHSYS_CHIPID4_7 0x4 #define MT7623_ETH 7623 #define MT7622_ETH 7622 +#define MT7621_ETH 7621 /* ethernet subsystem config register */ #define ETHSYS_SYSCFG0 0x14 @@ -488,6 +489,8 @@ enum mtk_clks_map { BIT(MTK_CLK_SGMII_CDR_FB) | \ BIT(MTK_CLK_SGMII_CK) | \ BIT(MTK_CLK_ETH2PLL)) +#define MT7621_CLKS_BITMAP (0) + enum mtk_dev_state { MTK_HW_INIT, MTK_RESETTING @@ -567,6 +570,7 @@ struct mtk_rx_ring { #define MTK_DUAL_GMAC_SHARED_SGMII (BIT(11) | MTK_GMAC1_SGMII | \ MTK_GMAC2_SGMII) #define MTK_HWLRO BIT(12) +#define MTK_SHARED_INT BIT(13) #define MTK_HAS_CAPS(caps, _x) (((caps) & (_x)) == (_x)) /* struct mtk_eth_data - This is the structure holding all differences diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig index f200b8c420d5..ff8057ed97ee 100644 --- a/drivers/net/ethernet/mellanox/mlx4/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig @@ -4,7 +4,6 @@ config MLX4_EN tristate "Mellanox Technologies 1/10/40Gbit Ethernet support" - depends on MAY_USE_DEVLINK depends on PCI && NETDEVICES && ETHERNET && INET select MLX4_CORE imply PTP_1588_CLOCK diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c index dbc483e4a2ef..b330020dc0d6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c @@ -185,8 +185,7 @@ int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, bitmap->avail = num - reserved_top - reserved_bot; bitmap->effective_len = bitmap->avail; spin_lock_init(&bitmap->lock); - bitmap->table = kcalloc(BITS_TO_LONGS(bitmap->max), sizeof(long), - GFP_KERNEL); + bitmap->table = bitmap_zalloc(bitmap->max, GFP_KERNEL); if (!bitmap->table) return -ENOMEM; @@ -197,7 +196,7 @@ int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap) { - kfree(bitmap->table); + bitmap_free(bitmap->table); } struct mlx4_zone_allocator { diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index e65bc3c95630..c19e74e6ac94 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -3274,7 +3274,7 @@ int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_stat mlx4_warn(dev, "unknown value for link_state %02x on slave %d port %d\n", link_state, slave, port); return -EINVAL; - }; + } s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; s_info->link_state = link_state; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 6b88881b8e35..c1438ae52a11 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -3360,7 +3360,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, dev->addr_len = ETH_ALEN; mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]); if (!is_valid_ether_addr(dev->dev_addr)) { - en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n", + en_err(priv, "Port: %d, invalid mac burned: %pM, quitting\n", priv->port, dev->dev_addr); err = -EINVAL; goto out; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 9a0881cb7f51..6c01314e87b0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -617,6 +617,8 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, } #endif +#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN) + /* We reach this function only after checking that any of * the (IPv4 | IPv6) bits are set in cqe->status. */ @@ -624,9 +626,20 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, netdev_features_t dev_features) { __wsum hw_checksum = 0; + void *hdr; + + /* CQE csum doesn't cover padding octets in short ethernet + * frames. And the pad field is appended prior to calculating + * and appending the FCS field. + * + * Detecting these padded frames requires to verify and parse + * IP headers, so we simply force all those small frames to skip + * checksum complete. + */ + if (short_frame(skb->len)) + return -EINVAL; - void *hdr = (u8 *)va + sizeof(struct ethhdr); - + hdr = (u8 *)va + sizeof(struct ethhdr); hw_checksum = csum_unfold((__force __sum16)cqe->checksum); if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) && @@ -819,6 +832,11 @@ xdp_drop_no_cnt: skb_record_rx_queue(skb, cq_ring); if (likely(dev->features & NETIF_F_RXCSUM)) { + /* TODO: For IP non TCP/UDP packets when csum complete is + * not an option (not supported or any other reason) we can + * actually check cqe IPOK status bit and report + * CHECKSUM_UNNECESSARY rather than CHECKSUM_NONE + */ if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP | MLX4_CQE_STATUS_UDP)) && (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 2df92dbd38e1..a5be27772b8e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -100,7 +100,7 @@ static void eq_set_ci(struct mlx4_eq *eq, int req_not) req_not << 31), eq->doorbell); /* We still want ordering, just not swabbing, so add a barrier */ - mb(); + wmb(); } static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor, @@ -558,6 +558,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n", __func__, be32_to_cpu(eqe->event.srq.srqn), eq->eqn); + /* fall through */ case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: if (mlx4_is_master(dev)) { /* forward only to slave owning the SRQ */ @@ -820,7 +821,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? "HW" : "SW"); break; - }; + } ++eq->cons_index; eqes_found = 1; diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index bdb8dd161923..1f6e16d5ea6b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -3981,6 +3981,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) if (ret) goto err_params_unregister; + devlink_params_publish(devlink); pci_save_state(pdev); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 37a551436e4a..6debffb8336b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -4,7 +4,6 @@ config MLX5_CORE tristate "Mellanox 5th generation network adapters (ConnectX series) core driver" - depends on MAY_USE_DEVLINK depends on PCI imply PTP_1588_CLOCK imply VXLAN diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 9de9abacf7f6..1a16f6d73cbc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -13,7 +13,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o # mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ health.o mcg.o cq.o alloc.o qp.o port.o mr.o pd.o \ - mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \ + transobj.o vport.o sriov.o fs_cmd.o fs_core.o \ fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \ lib/devcom.o diag/fs_tracepoint.o diag/fw_tracer.o @@ -22,7 +22,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ # mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \ - en_selftest.o en/port.o en/monitor_stats.o + en_selftest.o en/port.o en/monitor_stats.o en/reporter_tx.o # # Netdev extra @@ -30,12 +30,12 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o -mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o +mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o lib/port_tun.o lag_mp.o # # Core extra # -mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o +mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o ecpf.o mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c index 421b9c3c8bf7..9008e17126db 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c @@ -186,10 +186,7 @@ static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev, if (!pgdir) return NULL; - pgdir->bitmap = kcalloc(BITS_TO_LONGS(db_per_page), - sizeof(unsigned long), - GFP_KERNEL); - + pgdir->bitmap = bitmap_zalloc(db_per_page, GFP_KERNEL); if (!pgdir->bitmap) { kfree(pgdir); return NULL; @@ -200,7 +197,7 @@ static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev, pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE, &pgdir->db_dma, node); if (!pgdir->db_page) { - kfree(pgdir->bitmap); + bitmap_free(pgdir->bitmap); kfree(pgdir); return NULL; } @@ -280,7 +277,7 @@ void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db) dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, db->u.pgdir->db_page, db->u.pgdir->db_dma); list_del(&db->u.pgdir->list); - kfree(db->u.pgdir->bitmap); + bitmap_free(db->u.pgdir->bitmap); kfree(db->u.pgdir); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 3e0fa8a8077b..be48c6440251 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -316,6 +316,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT: case MLX5_CMD_OP_DEALLOC_MEMIC: case MLX5_CMD_OP_PAGE_FAULT_RESUME: + case MLX5_CMD_OP_QUERY_HOST_PARAMS: return MLX5_CMD_STAT_OK; case MLX5_CMD_OP_QUERY_HCA_CAP: @@ -627,6 +628,7 @@ const char *mlx5_command_str(int command) MLX5_COMMAND_STR_CASE(QUERY_MODIFY_HEADER_CONTEXT); MLX5_COMMAND_STR_CASE(ALLOC_MEMIC); MLX5_COMMAND_STR_CASE(DEALLOC_MEMIC); + MLX5_COMMAND_STR_CASE(QUERY_HOST_PARAMS); default: return "unknown command opcode"; } } @@ -1583,6 +1585,24 @@ no_trig: spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); } +void mlx5_cmd_flush(struct mlx5_core_dev *dev) +{ + struct mlx5_cmd *cmd = &dev->cmd; + int i; + + for (i = 0; i < cmd->max_reg_cmds; i++) + while (down_trylock(&cmd->sem)) + mlx5_cmd_trigger_completions(dev); + + while (down_trylock(&cmd->pages_sem)) + mlx5_cmd_trigger_completions(dev); + + /* Unlock cmdif */ + up(&cmd->pages_sem); + for (i = 0; i < cmd->max_reg_cmds; i++) + up(&cmd->sem); +} + static int status_to_err(u8 status) { return status ? -1 : 0; /* TBD more meaningful codes */ @@ -1711,12 +1731,57 @@ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, } EXPORT_SYMBOL(mlx5_cmd_exec); -int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, - void *out, int out_size, mlx5_cmd_cbk_t callback, - void *context) +void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev, + struct mlx5_async_ctx *ctx) +{ + ctx->dev = dev; + /* Starts at 1 to avoid doing wake_up if we are not cleaning up */ + atomic_set(&ctx->num_inflight, 1); + init_waitqueue_head(&ctx->wait); +} +EXPORT_SYMBOL(mlx5_cmd_init_async_ctx); + +/** + * mlx5_cmd_cleanup_async_ctx - Clean up an async_ctx + * @ctx: The ctx to clean + * + * Upon return all callbacks given to mlx5_cmd_exec_cb() have been called. The + * caller must ensure that mlx5_cmd_exec_cb() is not called during or after + * the call mlx5_cleanup_async_ctx(). + */ +void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx) +{ + atomic_dec(&ctx->num_inflight); + wait_event(ctx->wait, atomic_read(&ctx->num_inflight) == 0); +} +EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx); + +static void mlx5_cmd_exec_cb_handler(int status, void *_work) +{ + struct mlx5_async_work *work = _work; + struct mlx5_async_ctx *ctx = work->ctx; + + work->user_callback(status, work); + if (atomic_dec_and_test(&ctx->num_inflight)) + wake_up(&ctx->wait); +} + +int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, + void *out, int out_size, mlx5_async_cbk_t callback, + struct mlx5_async_work *work) { - return cmd_exec(dev, in, in_size, out, out_size, callback, context, - false); + int ret; + + work->ctx = ctx; + work->user_callback = callback; + if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight))) + return -EIO; + ret = cmd_exec(ctx->dev, in, in_size, out, out_size, + mlx5_cmd_exec_cb_handler, work, false); + if (ret && atomic_dec_and_test(&ctx->num_inflight)) + wake_up(&ctx->wait); + + return ret; } EXPORT_SYMBOL(mlx5_cmd_exec_cb); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c index 424457ff9759..8ecac81a385d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c @@ -258,6 +258,8 @@ const char *parse_fs_dst(struct trace_seq *p, return ret; } +EXPORT_TRACEPOINT_SYMBOL(mlx5_fs_add_ft); +EXPORT_TRACEPOINT_SYMBOL(mlx5_fs_del_ft); EXPORT_TRACEPOINT_SYMBOL(mlx5_fs_add_fg); EXPORT_TRACEPOINT_SYMBOL(mlx5_fs_del_fg); EXPORT_TRACEPOINT_SYMBOL(mlx5_fs_set_fte); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h index d027ce00c8ce..a4cf123e3f17 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h @@ -61,6 +61,41 @@ const char *parse_fs_dst(struct trace_seq *p, const struct mlx5_flow_destination *dst, u32 counter_id); +TRACE_EVENT(mlx5_fs_add_ft, + TP_PROTO(const struct mlx5_flow_table *ft), + TP_ARGS(ft), + TP_STRUCT__entry( + __field(const struct mlx5_flow_table *, ft) + __field(u32, id) + __field(u32, level) + __field(u32, type) + ), + TP_fast_assign( + __entry->ft = ft; + __entry->id = ft->id; + __entry->level = ft->level; + __entry->type = ft->type; + ), + TP_printk("ft=%p id=%u level=%u type=%u \n", + __entry->ft, __entry->id, __entry->level, __entry->type) + ); + +TRACE_EVENT(mlx5_fs_del_ft, + TP_PROTO(const struct mlx5_flow_table *ft), + TP_ARGS(ft), + TP_STRUCT__entry( + __field(const struct mlx5_flow_table *, ft) + __field(u32, id) + ), + TP_fast_assign( + __entry->ft = ft; + __entry->id = ft->id; + + ), + TP_printk("ft=%p id=%u\n", + __entry->ft, __entry->id) + ); + TRACE_EVENT(mlx5_fs_add_fg, TP_PROTO(const struct mlx5_flow_group *fg), TP_ARGS(fg), diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c new file mode 100644 index 000000000000..4746f2d28fb6 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2019 Mellanox Technologies. */ + +#include "ecpf.h" + +bool mlx5_read_embedded_cpu(struct mlx5_core_dev *dev) +{ + return (ioread32be(&dev->iseg->initializing) >> MLX5_ECPU_BIT_NUM) & 1; +} + +static int mlx5_peer_pf_enable_hca(struct mlx5_core_dev *dev) +{ + u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {}; + u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {}; + + MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA); + MLX5_SET(enable_hca_in, in, function_id, 0); + MLX5_SET(enable_hca_in, in, embedded_cpu_function, 0); + return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); +} + +static int mlx5_peer_pf_disable_hca(struct mlx5_core_dev *dev) +{ + u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {}; + u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {}; + + MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA); + MLX5_SET(disable_hca_in, in, function_id, 0); + MLX5_SET(enable_hca_in, in, embedded_cpu_function, 0); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} + +static int mlx5_peer_pf_init(struct mlx5_core_dev *dev) +{ + int err; + + err = mlx5_peer_pf_enable_hca(dev); + if (err) + mlx5_core_err(dev, "Failed to enable peer PF HCA err(%d)\n", + err); + + return err; +} + +static void mlx5_peer_pf_cleanup(struct mlx5_core_dev *dev) +{ + int err; + + err = mlx5_peer_pf_disable_hca(dev); + if (err) { + mlx5_core_err(dev, "Failed to disable peer PF HCA err(%d)\n", + err); + return; + } + + err = mlx5_wait_for_pages(dev, &dev->priv.peer_pf_pages); + if (err) + mlx5_core_warn(dev, "Timeout reclaiming peer PF pages err(%d)\n", + err); +} + +int mlx5_ec_init(struct mlx5_core_dev *dev) +{ + int err = 0; + + if (!mlx5_core_is_ecpf(dev)) + return 0; + + /* ECPF shall enable HCA for peer PF in the same way a PF + * does this for its VFs. + */ + err = mlx5_peer_pf_init(dev); + if (err) + return err; + + return 0; +} + +void mlx5_ec_cleanup(struct mlx5_core_dev *dev) +{ + if (!mlx5_core_is_ecpf(dev)) + return; + + mlx5_peer_pf_cleanup(dev); +} + +static int mlx5_query_host_params_context(struct mlx5_core_dev *dev, + u32 *out, int outlen) +{ + u32 in[MLX5_ST_SZ_DW(query_host_params_in)] = {}; + + MLX5_SET(query_host_params_in, in, opcode, + MLX5_CMD_OP_QUERY_HOST_PARAMS); + + return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); +} + +int mlx5_query_host_params_num_vfs(struct mlx5_core_dev *dev, int *num_vf) +{ + u32 out[MLX5_ST_SZ_DW(query_host_params_out)] = {}; + int err; + + err = mlx5_query_host_params_context(dev, out, sizeof(out)); + if (err) + return err; + + *num_vf = MLX5_GET(query_host_params_out, out, + host_params_context.host_num_of_vfs); + mlx5_core_dbg(dev, "host_num_of_vfs %d\n", *num_vf); + + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.h b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.h new file mode 100644 index 000000000000..346372df218f --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2019 Mellanox Technologies. */ + +#ifndef __MLX5_ECPF_H__ +#define __MLX5_ECPF_H__ + +#include +#include "mlx5_core.h" + +#ifdef CONFIG_MLX5_ESWITCH + +enum { + MLX5_ECPU_BIT_NUM = 23, +}; + +bool mlx5_read_embedded_cpu(struct mlx5_core_dev *dev); +int mlx5_ec_init(struct mlx5_core_dev *dev); +void mlx5_ec_cleanup(struct mlx5_core_dev *dev); +int mlx5_query_host_params_num_vfs(struct mlx5_core_dev *dev, int *num_vf); + +#else /* CONFIG_MLX5_ESWITCH */ + +static inline bool +mlx5_read_embedded_cpu(struct mlx5_core_dev *dev) { return false; } +static inline int mlx5_ec_init(struct mlx5_core_dev *dev) { return 0; } +static inline void mlx5_ec_cleanup(struct mlx5_core_dev *dev) {} +static inline int +mlx5_query_host_params_num_vfs(struct mlx5_core_dev *dev, int *num_vf) +{ return -EOPNOTSUPP; } + +#endif /* CONFIG_MLX5_ESWITCH */ + +#endif /* __MLX5_ECPF_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 8fa8fdd30b85..71c65cc17904 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -76,15 +76,14 @@ struct page_pool; #define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#define MLX5E_RX_MAX_HEAD (256) + #define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \ (6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */ #define MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, req) \ max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req) -#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 6) -#define MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 8) -#define MLX5E_MPWQE_STRIDE_SZ(mdev, cqe_cmprs) \ - (cqe_cmprs ? MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) : \ - MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev)) +#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \ + MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD)) #define MLX5_MPWRQ_LOG_WQE_SZ 18 #define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \ @@ -119,8 +118,6 @@ struct page_pool; #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2 -#define MLX5E_RX_MAX_HEAD (256) - #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024) #define MLX5E_DEFAULT_LRO_TIMEOUT 32 #define MLX5E_LRO_TIMEOUT_ARR_SIZE 4 @@ -309,16 +306,18 @@ struct mlx5e_cq { struct mlx5_core_cq mcq; struct mlx5e_channel *channel; + /* control */ + struct mlx5_core_dev *mdev; + struct mlx5_wq_ctrl wq_ctrl; +} ____cacheline_aligned_in_smp; + +struct mlx5e_cq_decomp { /* cqe decompression */ struct mlx5_cqe64 title; struct mlx5_mini_cqe8 mini_arr[MLX5_MINI_CQE_ARRAY_SIZE]; u8 mini_arr_idx; - u16 decmprs_left; - u16 decmprs_wqe_counter; - - /* control */ - struct mlx5_core_dev *mdev; - struct mlx5_wq_ctrl wq_ctrl; + u16 left; + u16 wqe_counter; } ____cacheline_aligned_in_smp; struct mlx5e_tx_wqe_info { @@ -388,10 +387,7 @@ struct mlx5e_txqsq { struct mlx5e_channel *channel; int txq_ix; u32 rate_limit; - struct mlx5e_txqsq_recover { - struct work_struct recover_work; - u64 last_recover; - } recover; + struct work_struct recover_work; } ____cacheline_aligned_in_smp; struct mlx5e_dma_info { @@ -581,6 +577,7 @@ struct mlx5e_rq { struct net_device *netdev; struct mlx5e_rq_stats *stats; struct mlx5e_cq cq; + struct mlx5e_cq_decomp cqd; struct mlx5e_page_cache page_cache; struct hwtstamp_config *tstamp; struct mlx5_clock *clock; @@ -638,6 +635,7 @@ struct mlx5e_channel { struct hwtstamp_config *tstamp; int ix; int cpu; + cpumask_var_t xps_cpumask; }; struct mlx5e_channels { @@ -657,6 +655,7 @@ struct mlx5e_channel_stats { enum { MLX5E_STATE_OPENED, MLX5E_STATE_DESTROYING, + MLX5E_STATE_XDP_TX_ENABLED, }; struct mlx5e_rqt { @@ -682,6 +681,13 @@ struct mlx5e_rss_params { u8 hfunc; }; +struct mlx5e_modify_sq_param { + int curr_state; + int next_state; + int rl_update; + int rl_index; +}; + struct mlx5e_priv { /* priv data path fields - start */ struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC]; @@ -737,6 +743,7 @@ struct mlx5e_priv { #ifdef CONFIG_MLX5_EN_TLS struct mlx5e_tls *tls; #endif + struct devlink_health_reporter *tx_reporter; }; struct mlx5e_profile { @@ -803,6 +810,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, void mlx5e_update_stats(struct mlx5e_priv *priv); void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); +void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s); void mlx5e_init_l2_addr(struct mlx5e_priv *priv); int mlx5e_self_test_num(struct mlx5e_priv *priv); @@ -850,9 +858,9 @@ void mlx5e_close_channels(struct mlx5e_channels *chs); * switching channels */ typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv); -void mlx5e_switch_priv_channels(struct mlx5e_priv *priv, - struct mlx5e_channels *new_chs, - mlx5e_fp_hw_modify hw_modify); +int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, + struct mlx5e_channels *new_chs, + mlx5e_fp_hw_modify hw_modify); void mlx5e_activate_priv_channels(struct mlx5e_priv *priv); void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv); @@ -866,6 +874,11 @@ void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params); void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params); +int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn, + struct mlx5e_modify_sq_param *p); +void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq); +void mlx5e_tx_disable_queue(struct netdev_queue *txq); + static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev) { return (MLX5_CAP_ETH(mdev, tunnel_stateless_gre) && diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c index 2ce420851e77..7cd5b02e0f10 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c @@ -66,7 +66,7 @@ static int mlx5e_monitor_event_handler(struct notifier_block *nb, return NOTIFY_OK; } -void mlx5e_monitor_counter_start(struct mlx5e_priv *priv) +static void mlx5e_monitor_counter_start(struct mlx5e_priv *priv) { MLX5_NB_INIT(&priv->monitor_counters_nb, mlx5e_monitor_event_handler, MONITOR_COUNTER); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c index 4a37713023be..122927f3a600 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c @@ -63,66 +63,168 @@ static const u32 mlx5e_link_speed[MLX5E_LINK_MODES_NUMBER] = { [MLX5E_50GBASE_KR2] = 50000, }; -u32 mlx5e_port_ptys2speed(u32 eth_proto_oper) +static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = { + [MLX5E_SGMII_100M] = 100, + [MLX5E_1000BASE_X_SGMII] = 1000, + [MLX5E_5GBASE_R] = 5000, + [MLX5E_10GBASE_XFI_XAUI_1] = 10000, + [MLX5E_40GBASE_XLAUI_4_XLPPI_4] = 40000, + [MLX5E_25GAUI_1_25GBASE_CR_KR] = 25000, + [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2] = 50000, + [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR] = 50000, + [MLX5E_CAUI_4_100GBASE_CR4_KR4] = 100000, + [MLX5E_200GAUI_4_200GBASE_CR4_KR4] = 200000, + [MLX5E_400GAUI_8] = 400000, +}; + +static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev, + const u32 **arr, u32 *size) +{ + bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + + *size = ext ? ARRAY_SIZE(mlx5e_ext_link_speed) : + ARRAY_SIZE(mlx5e_link_speed); + *arr = ext ? mlx5e_ext_link_speed : mlx5e_link_speed; +} + +int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext, + struct mlx5e_port_eth_proto *eproto) +{ + u32 out[MLX5_ST_SZ_DW(ptys_reg)]; + int err; + + if (!eproto) + return -EINVAL; + + if (ext != MLX5_CAP_PCAM_FEATURE(dev, ptys_extended_ethernet)) + return -EOPNOTSUPP; + + err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, port); + if (err) + return err; + + eproto->cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, + eth_proto_capability); + eproto->admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_admin); + eproto->oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper); + return 0; +} + +void mlx5_port_query_eth_autoneg(struct mlx5_core_dev *dev, u8 *an_status, + u8 *an_disable_cap, u8 *an_disable_admin) +{ + u32 out[MLX5_ST_SZ_DW(ptys_reg)]; + + *an_status = 0; + *an_disable_cap = 0; + *an_disable_admin = 0; + + if (mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, 1)) + return; + + *an_status = MLX5_GET(ptys_reg, out, an_status); + *an_disable_cap = MLX5_GET(ptys_reg, out, an_disable_cap); + *an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin); +} + +int mlx5_port_set_eth_ptys(struct mlx5_core_dev *dev, bool an_disable, + u32 proto_admin, bool ext) +{ + u32 out[MLX5_ST_SZ_DW(ptys_reg)]; + u32 in[MLX5_ST_SZ_DW(ptys_reg)]; + u8 an_disable_admin; + u8 an_disable_cap; + u8 an_status; + + mlx5_port_query_eth_autoneg(dev, &an_status, &an_disable_cap, + &an_disable_admin); + if (!an_disable_cap && an_disable) + return -EPERM; + + memset(in, 0, sizeof(in)); + + MLX5_SET(ptys_reg, in, local_port, 1); + MLX5_SET(ptys_reg, in, an_disable_admin, an_disable); + MLX5_SET(ptys_reg, in, proto_mask, MLX5_PTYS_EN); + if (ext) + MLX5_SET(ptys_reg, in, ext_eth_proto_admin, proto_admin); + else + MLX5_SET(ptys_reg, in, eth_proto_admin, proto_admin); + + return mlx5_core_access_reg(dev, in, sizeof(in), out, + sizeof(out), MLX5_REG_PTYS, 0, 1); +} + +u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper) { unsigned long temp = eth_proto_oper; + const u32 *table; u32 speed = 0; + u32 max_size; int i; - i = find_first_bit(&temp, MLX5E_LINK_MODES_NUMBER); - if (i < MLX5E_LINK_MODES_NUMBER) - speed = mlx5e_link_speed[i]; - + mlx5e_port_get_speed_arr(mdev, &table, &max_size); + i = find_first_bit(&temp, max_size); + if (i < max_size) + speed = table[i]; return speed; } int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) { - u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {}; - u32 eth_proto_oper; + struct mlx5e_port_eth_proto eproto; + bool ext; int err; - err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1); + ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto); if (err) - return err; + goto out; - eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); - *speed = mlx5e_port_ptys2speed(eth_proto_oper); + *speed = mlx5e_port_ptys2speed(mdev, eproto.oper); if (!(*speed)) err = -EINVAL; +out: return err; } int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) { + struct mlx5e_port_eth_proto eproto; u32 max_speed = 0; - u32 proto_cap; + const u32 *table; + u32 max_size; + bool ext; int err; int i; - err = mlx5_query_port_proto_cap(mdev, &proto_cap, MLX5_PTYS_EN); + ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto); if (err) return err; - for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) - if (proto_cap & MLX5E_PROT_MASK(i)) - max_speed = max(max_speed, mlx5e_link_speed[i]); + mlx5e_port_get_speed_arr(mdev, &table, &max_size); + for (i = 0; i < max_size; ++i) + if (eproto.cap & MLX5E_PROT_MASK(i)) + max_speed = max(max_speed, table[i]); *speed = max_speed; return 0; } -u32 mlx5e_port_speed2linkmodes(u32 speed) +u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed) { u32 link_modes = 0; + const u32 *table; + u32 max_size; int i; - for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { - if (mlx5e_link_speed[i] == speed) + mlx5e_port_get_speed_arr(mdev, &table, &max_size); + for (i = 0; i < max_size; ++i) { + if (table[i] == speed) link_modes |= MLX5E_PROT_MASK(i); } - return link_modes; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h index cd2160b8c9bf..70f536ec51c4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h @@ -36,10 +36,22 @@ #include #include "en.h" -u32 mlx5e_port_ptys2speed(u32 eth_proto_oper); +struct mlx5e_port_eth_proto { + u32 cap; + u32 admin; + u32 oper; +}; + +int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext, + struct mlx5e_port_eth_proto *eproto); +void mlx5_port_query_eth_autoneg(struct mlx5_core_dev *dev, u8 *an_status, + u8 *an_disable_cap, u8 *an_disable_admin); +int mlx5_port_set_eth_ptys(struct mlx5_core_dev *dev, bool an_disable, + u32 proto_admin, bool ext); +u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper); int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); -u32 mlx5e_port_speed2linkmodes(u32 speed); +u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed); int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out); int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter.h new file mode 100644 index 000000000000..e78e92753d73 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019 Mellanox Technologies. */ + +#ifndef __MLX5E_EN_REPORTER_H +#define __MLX5E_EN_REPORTER_H + +#include +#include "en.h" + +int mlx5e_tx_reporter_create(struct mlx5e_priv *priv); +void mlx5e_tx_reporter_destroy(struct mlx5e_priv *priv); +void mlx5e_tx_reporter_err_cqe(struct mlx5e_txqsq *sq); +int mlx5e_tx_reporter_timeout(struct mlx5e_txqsq *sq); + +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c new file mode 100644 index 000000000000..9d38e62cdf24 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c @@ -0,0 +1,309 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019 Mellanox Technologies. */ + +#include +#include "reporter.h" +#include "lib/eq.h" + +#define MLX5E_TX_REPORTER_PER_SQ_MAX_LEN 256 + +struct mlx5e_tx_err_ctx { + int (*recover)(struct mlx5e_txqsq *sq); + struct mlx5e_txqsq *sq; +}; + +static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq) +{ + unsigned long exp_time = jiffies + msecs_to_jiffies(2000); + + while (time_before(jiffies, exp_time)) { + if (sq->cc == sq->pc) + return 0; + + msleep(20); + } + + netdev_err(sq->channel->netdev, + "Wait for SQ 0x%x flush timeout (sq cc = 0x%x, sq pc = 0x%x)\n", + sq->sqn, sq->cc, sq->pc); + + return -ETIMEDOUT; +} + +static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq) +{ + WARN_ONCE(sq->cc != sq->pc, + "SQ 0x%x: cc (0x%x) != pc (0x%x)\n", + sq->sqn, sq->cc, sq->pc); + sq->cc = 0; + sq->dma_fifo_cc = 0; + sq->pc = 0; +} + +static int mlx5e_sq_to_ready(struct mlx5e_txqsq *sq, int curr_state) +{ + struct mlx5_core_dev *mdev = sq->channel->mdev; + struct net_device *dev = sq->channel->netdev; + struct mlx5e_modify_sq_param msp = {0}; + int err; + + msp.curr_state = curr_state; + msp.next_state = MLX5_SQC_STATE_RST; + + err = mlx5e_modify_sq(mdev, sq->sqn, &msp); + if (err) { + netdev_err(dev, "Failed to move sq 0x%x to reset\n", sq->sqn); + return err; + } + + memset(&msp, 0, sizeof(msp)); + msp.curr_state = MLX5_SQC_STATE_RST; + msp.next_state = MLX5_SQC_STATE_RDY; + + err = mlx5e_modify_sq(mdev, sq->sqn, &msp); + if (err) { + netdev_err(dev, "Failed to move sq 0x%x to ready\n", sq->sqn); + return err; + } + + return 0; +} + +static int mlx5e_tx_reporter_err_cqe_recover(struct mlx5e_txqsq *sq) +{ + struct mlx5_core_dev *mdev = sq->channel->mdev; + struct net_device *dev = sq->channel->netdev; + u8 state; + int err; + + if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) + return 0; + + err = mlx5_core_query_sq_state(mdev, sq->sqn, &state); + if (err) { + netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n", + sq->sqn, err); + return err; + } + + if (state != MLX5_SQC_STATE_ERR) { + netdev_err(dev, "SQ 0x%x not in ERROR state\n", sq->sqn); + return -EINVAL; + } + + mlx5e_tx_disable_queue(sq->txq); + + err = mlx5e_wait_for_sq_flush(sq); + if (err) + return err; + + /* At this point, no new packets will arrive from the stack as TXQ is + * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all + * pending WQEs. SQ can safely reset the SQ. + */ + + err = mlx5e_sq_to_ready(sq, state); + if (err) + return err; + + mlx5e_reset_txqsq_cc_pc(sq); + sq->stats->recover++; + mlx5e_activate_txqsq(sq); + + return 0; +} + +static int mlx5_tx_health_report(struct devlink_health_reporter *tx_reporter, + char *err_str, + struct mlx5e_tx_err_ctx *err_ctx) +{ + if (IS_ERR_OR_NULL(tx_reporter)) { + netdev_err(err_ctx->sq->channel->netdev, err_str); + return err_ctx->recover(err_ctx->sq); + } + + return devlink_health_report(tx_reporter, err_str, err_ctx); +} + +void mlx5e_tx_reporter_err_cqe(struct mlx5e_txqsq *sq) +{ + char err_str[MLX5E_TX_REPORTER_PER_SQ_MAX_LEN]; + struct mlx5e_tx_err_ctx err_ctx = {0}; + + err_ctx.sq = sq; + err_ctx.recover = mlx5e_tx_reporter_err_cqe_recover; + sprintf(err_str, "ERR CQE on SQ: 0x%x", sq->sqn); + + mlx5_tx_health_report(sq->channel->priv->tx_reporter, err_str, + &err_ctx); +} + +static int mlx5e_tx_reporter_timeout_recover(struct mlx5e_txqsq *sq) +{ + struct mlx5_eq_comp *eq = sq->cq.mcq.eq; + u32 eqe_count; + int ret; + + netdev_err(sq->channel->netdev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n", + eq->core.eqn, eq->core.cons_index, eq->core.irqn); + + eqe_count = mlx5_eq_poll_irq_disabled(eq); + ret = eqe_count ? false : true; + if (!eqe_count) { + clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); + return ret; + } + + netdev_err(sq->channel->netdev, "Recover %d eqes on EQ 0x%x\n", + eqe_count, eq->core.eqn); + sq->channel->stats->eq_rearm++; + return ret; +} + +int mlx5e_tx_reporter_timeout(struct mlx5e_txqsq *sq) +{ + char err_str[MLX5E_TX_REPORTER_PER_SQ_MAX_LEN]; + struct mlx5e_tx_err_ctx err_ctx; + + err_ctx.sq = sq; + err_ctx.recover = mlx5e_tx_reporter_timeout_recover; + sprintf(err_str, + "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n", + sq->channel->ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc, + jiffies_to_usecs(jiffies - sq->txq->trans_start)); + + return mlx5_tx_health_report(sq->channel->priv->tx_reporter, err_str, + &err_ctx); +} + +/* state lock cannot be grabbed within this function. + * It can cause a dead lock or a read-after-free. + */ +static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_tx_err_ctx *err_ctx) +{ + return err_ctx->recover(err_ctx->sq); +} + +static int mlx5e_tx_reporter_recover_all(struct mlx5e_priv *priv) +{ + int err; + + rtnl_lock(); + mutex_lock(&priv->state_lock); + mlx5e_close_locked(priv->netdev); + err = mlx5e_open_locked(priv->netdev); + mutex_unlock(&priv->state_lock); + rtnl_unlock(); + + return err; +} + +static int mlx5e_tx_reporter_recover(struct devlink_health_reporter *reporter, + void *context) +{ + struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); + struct mlx5e_tx_err_ctx *err_ctx = context; + + return err_ctx ? mlx5e_tx_reporter_recover_from_ctx(err_ctx) : + mlx5e_tx_reporter_recover_all(priv); +} + +static int +mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg, + u32 sqn, u8 state, bool stopped) +{ + int err; + + err = devlink_fmsg_obj_nest_start(fmsg); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "sqn", sqn); + if (err) + return err; + + err = devlink_fmsg_u8_pair_put(fmsg, "HW state", state); + if (err) + return err; + + err = devlink_fmsg_bool_pair_put(fmsg, "stopped", stopped); + if (err) + return err; + + err = devlink_fmsg_obj_nest_end(fmsg); + if (err) + return err; + + return 0; +} + +static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg) +{ + struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); + int i, err = 0; + + mutex_lock(&priv->state_lock); + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + goto unlock; + + err = devlink_fmsg_arr_pair_nest_start(fmsg, "SQs"); + if (err) + goto unlock; + + for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; + i++) { + struct mlx5e_txqsq *sq = priv->txq2sq[i]; + u8 state; + + err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state); + if (err) + break; + + err = mlx5e_tx_reporter_build_diagnose_output(fmsg, sq->sqn, + state, + netif_xmit_stopped(sq->txq)); + if (err) + break; + } + err = devlink_fmsg_arr_pair_nest_end(fmsg); + if (err) + goto unlock; + +unlock: + mutex_unlock(&priv->state_lock); + return err; +} + +static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = { + .name = "tx", + .recover = mlx5e_tx_reporter_recover, + .diagnose = mlx5e_tx_reporter_diagnose, +}; + +#define MLX5_REPORTER_TX_GRACEFUL_PERIOD 500 + +int mlx5e_tx_reporter_create(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + struct devlink *devlink = priv_to_devlink(mdev); + + priv->tx_reporter = + devlink_health_reporter_create(devlink, &mlx5_tx_reporter_ops, + MLX5_REPORTER_TX_GRACEFUL_PERIOD, + true, priv); + if (IS_ERR(priv->tx_reporter)) + netdev_warn(priv->netdev, + "Failed to create tx reporter, err = %ld\n", + PTR_ERR(priv->tx_reporter)); + return IS_ERR_OR_NULL(priv->tx_reporter); +} + +void mlx5e_tx_reporter_destroy(struct mlx5e_priv *priv) +{ + if (IS_ERR_OR_NULL(priv->tx_reporter)) + return; + + devlink_health_reporter_destroy(priv->tx_reporter); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index 046948ead152..fa2a3c444cdc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -25,7 +25,7 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv, /* if the egress device isn't on the same HW e-switch or * it's a LAG device, use the uplink */ - if (!switchdev_port_same_parent_id(priv->netdev, dev) || + if (!netdev_port_same_parent_id(priv->netdev, dev) || dst_is_lag_dev) { *route_dev = uplink_dev; *out_dev = *route_dev; @@ -54,12 +54,24 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, struct neighbour *n = NULL; #if IS_ENABLED(CONFIG_INET) + struct mlx5_core_dev *mdev = priv->mdev; + struct net_device *uplink_dev; int ret; + if (mlx5_lag_is_multipath(mdev)) { + struct mlx5_eswitch *esw = mdev->priv.eswitch; + + uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); + fl4->flowi4_oif = uplink_dev->ifindex; + } + rt = ip_route_output_key(dev_net(mirred_dev), fl4); ret = PTR_ERR_OR_ZERO(rt); if (ret) return ret; + + if (mlx5_lag_is_multipath(mdev) && !rt->rt_gateway) + return -ENETUNREACH; #else return -EOPNOTSUPP; #endif @@ -256,6 +268,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, e->m_neigh.family = n->ops->family; memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); e->out_dev = out_dev; + e->route_dev = route_dev; /* It's important to add the neigh to the hash table before checking * the neigh validity state. So if we'll get a notification, in case the @@ -294,7 +307,9 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, if (!(nud_state & NUD_VALID)) { neigh_event_send(n, NULL); - err = -EAGAIN; + /* the encap entry will be made valid on neigh update event + * and not used before that. + */ goto out; } @@ -369,6 +384,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, e->m_neigh.family = n->ops->family; memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); e->out_dev = out_dev; + e->route_dev = route_dev; /* It's importent to add the neigh to the hash table before checking * the neigh validity state. So if we'll get a notification, in case the @@ -406,7 +422,9 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, if (!(nud_state & NUD_VALID)) { neigh_event_send(n, NULL); - err = -EAGAIN; + /* the encap entry will be made valid on neigh update event + * and not used before that. + */ goto out; } @@ -496,25 +514,21 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv, void *headers_c, void *headers_v) { + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f); struct netlink_ext_ack *extack = f->common.extack; - struct flow_dissector_key_ports *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_PORTS, - f->key); - struct flow_dissector_key_ports *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_PORTS, - f->mask); void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); + struct flow_match_ports enc_ports; + + flow_rule_match_enc_ports(rule, &enc_ports); /* Full udp dst port must be given */ - if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS) || - memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) { + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) || + memchr_inv(&enc_ports.mask->dst, 0xff, sizeof(enc_ports.mask->dst))) { NL_SET_ERR_MSG_MOD(extack, "VXLAN decap filter must include enc_dst_port condition"); netdev_warn(priv->netdev, @@ -523,12 +537,12 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv, } /* udp dst port must be knonwn as a VXLAN port */ - if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->dst))) { + if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(enc_ports.key->dst))) { NL_SET_ERR_MSG_MOD(extack, "Matched UDP port is not registered as a VXLAN port"); netdev_warn(priv->netdev, "UDP port %d is not registered as a VXLAN port\n", - be16_to_cpu(key->dst)); + be16_to_cpu(enc_ports.key->dst)); return -EOPNOTSUPP; } @@ -536,26 +550,26 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv, MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP); - MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport, ntohs(mask->dst)); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, ntohs(key->dst)); + MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport, + ntohs(enc_ports.mask->dst)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, + ntohs(enc_ports.key->dst)); - MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport, ntohs(mask->src)); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport, ntohs(key->src)); + MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport, + ntohs(enc_ports.mask->src)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport, + ntohs(enc_ports.key->src)); /* match on VNI */ - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { - struct flow_dissector_key_keyid *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_KEYID, - f->key); - struct flow_dissector_key_keyid *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_KEYID, - f->mask); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { + struct flow_match_enc_keyid enc_keyid; + + flow_rule_match_enc_keyid(rule, &enc_keyid); + MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni, - be32_to_cpu(mask->keyid)); + be32_to_cpu(enc_keyid.mask->keyid)); MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni, - be32_to_cpu(key->keyid)); + be32_to_cpu(enc_keyid.key->keyid)); } return 0; } @@ -570,6 +584,7 @@ static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv, misc_parameters); void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f); if (!MLX5_CAP_ESW(priv->mdev, nvgre_encap_decap)) { NL_SET_ERR_MSG_MOD(f->common.extack, @@ -587,21 +602,14 @@ static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv, MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, ETH_P_TEB); /* gre key */ - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { - struct flow_dissector_key_keyid *mask = NULL; - struct flow_dissector_key_keyid *key = NULL; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { + struct flow_match_enc_keyid enc_keyid; - mask = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_KEYID, - f->mask); + flow_rule_match_enc_keyid(rule, &enc_keyid); MLX5_SET(fte_match_set_misc, misc_c, - gre_key.key, be32_to_cpu(mask->keyid)); - - key = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_KEYID, - f->key); + gre_key.key, be32_to_cpu(enc_keyid.mask->keyid)); MLX5_SET(fte_match_set_misc, misc_v, - gre_key.key, be32_to_cpu(key->keyid)); + gre_key.key, be32_to_cpu(enc_keyid.key->keyid)); } return 0; @@ -612,16 +620,18 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev, struct mlx5_flow_spec *spec, struct tc_cls_flower_offload *f, void *headers_c, - void *headers_v) + void *headers_v, u8 *match_level) { int tunnel_type; int err = 0; tunnel_type = mlx5e_tc_tun_get_type(filter_dev); if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) { + *match_level = MLX5_MATCH_L4; err = mlx5e_tc_tun_parse_vxlan(priv, spec, f, headers_c, headers_v); } else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) { + *match_level = MLX5_MATCH_L3; err = mlx5e_tc_tun_parse_gretap(priv, spec, f, headers_c, headers_v); } else { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h index 706ce7bf15e7..b63f15de899d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h @@ -39,6 +39,6 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev, struct mlx5_flow_spec *spec, struct tc_cls_flower_offload *f, void *headers_c, - void *headers_v); + void *headers_v, u8 *match_level); #endif //__MLX5_EN_TC_TUNNEL_H__ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 3740177eed09..03b2a9f9c589 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -365,7 +365,8 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, int sq_num; int i; - if (unlikely(!test_bit(MLX5E_STATE_OPENED, &priv->state))) + /* this flag is sufficient, no need to test internal sq state */ + if (unlikely(!mlx5e_xdp_tx_is_enabled(priv))) return -ENETDOWN; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) @@ -378,9 +379,6 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, sq = &priv->channels.c[sq_num]->xdpsq; - if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) - return -ENETDOWN; - for (i = 0; i < n; i++) { struct xdp_frame *xdpf = frames[i]; struct mlx5e_xdp_info xdpi; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h index 3a67cb3cd179..ee27a7c8cd87 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h @@ -50,6 +50,23 @@ void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq); int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags); +static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv) +{ + set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state); +} + +static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv) +{ + clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state); + /* let other device's napi(s) see our new state */ + synchronize_rcu(); +} + +static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv) +{ + return test_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state); +} + static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) { if (sq->doorbell_cseg) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index 722998d68564..554672edf8c3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -1126,9 +1126,7 @@ static void mlx5e_trust_update_sq_inline_mode(struct mlx5e_priv *priv) priv->channels.params.tx_min_inline_mode) goto out; - if (mlx5e_open_channels(priv, &new_channels)) - goto out; - mlx5e_switch_priv_channels(priv, &new_channels, NULL); + mlx5e_safe_switch_channels(priv, &new_channels, NULL); out: mutex_unlock(&priv->state_lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 3bbccead2f63..0804b478ad19 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -63,76 +63,147 @@ struct ptys2ethtool_config { __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised); }; -static struct ptys2ethtool_config ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER]; +static +struct ptys2ethtool_config ptys2legacy_ethtool_table[MLX5E_LINK_MODES_NUMBER]; +static +struct ptys2ethtool_config ptys2ext_ethtool_table[MLX5E_EXT_LINK_MODES_NUMBER]; -#define MLX5_BUILD_PTYS2ETHTOOL_CONFIG(reg_, ...) \ +#define MLX5_BUILD_PTYS2ETHTOOL_CONFIG(reg_, table, ...) \ ({ \ struct ptys2ethtool_config *cfg; \ const unsigned int modes[] = { __VA_ARGS__ }; \ - unsigned int i; \ - cfg = &ptys2ethtool_table[reg_]; \ + unsigned int i, bit, idx; \ + cfg = &ptys2##table##_ethtool_table[reg_]; \ bitmap_zero(cfg->supported, \ __ETHTOOL_LINK_MODE_MASK_NBITS); \ bitmap_zero(cfg->advertised, \ __ETHTOOL_LINK_MODE_MASK_NBITS); \ for (i = 0 ; i < ARRAY_SIZE(modes) ; ++i) { \ - __set_bit(modes[i], cfg->supported); \ - __set_bit(modes[i], cfg->advertised); \ + bit = modes[i] % 64; \ + idx = modes[i] / 64; \ + __set_bit(bit, &cfg->supported[idx]); \ + __set_bit(bit, &cfg->advertised[idx]); \ } \ }) void mlx5e_build_ptys2ethtool_map(void) { - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_CX_SGMII, + memset(ptys2legacy_ethtool_table, 0, sizeof(ptys2legacy_ethtool_table)); + memset(ptys2ext_ethtool_table, 0, sizeof(ptys2ext_ethtool_table)); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_CX_SGMII, legacy, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_KX, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_KX, legacy, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CX4, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CX4, legacy, ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KX4, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KX4, legacy, ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KR, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KR, legacy, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_20GBASE_KR2, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_20GBASE_KR2, legacy, ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_CR4, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_CR4, legacy, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_KR4, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_KR4, legacy, ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_56GBASE_R4, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_56GBASE_R4, legacy, ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CR, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CR, legacy, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_SR, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_SR, legacy, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_ER, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_ER, legacy, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_SR4, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_SR4, legacy, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_LR4, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_LR4, legacy, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_SR2, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_SR2, legacy, ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_CR4, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_CR4, legacy, ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_SR4, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_SR4, legacy, ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_KR4, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_KR4, legacy, ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_LR4, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_LR4, legacy, ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_T, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_T, legacy, ETHTOOL_LINK_MODE_10000baseT_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_CR, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_CR, legacy, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_KR, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_KR, legacy, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_SR, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_SR, legacy, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_CR2, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_CR2, legacy, ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT); - MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_KR2, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_KR2, legacy, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_SGMII_100M, ext, + ETHTOOL_LINK_MODE_100baseT_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_X_SGMII, ext, + ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + ETHTOOL_LINK_MODE_1000baseX_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_5GBASE_R, ext, + ETHTOOL_LINK_MODE_5000baseT_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_XFI_XAUI_1, ext, + ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, + ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseER_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_XLAUI_4_XLPPI_4, ext, + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GAUI_1_25GBASE_CR_KR, ext, + ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_25000baseSR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2, + ext, + ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, + ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR, ext, + ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseDR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_CAUI_4_100GBASE_CR4_KR4, ext, + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GAUI_2_100GBASE_CR2_KR2, ext, + ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_200GAUI_4_200GBASE_CR4_KR4, ext, + ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, + ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, + ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT); +} + +static void mlx5e_ethtool_get_speed_arr(struct mlx5_core_dev *mdev, + struct ptys2ethtool_config **arr, + u32 *size) +{ + bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + + *arr = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table; + *size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) : + ARRAY_SIZE(ptys2legacy_ethtool_table); } typedef int (*mlx5e_pflag_handler)(struct net_device *netdev, bool enable); @@ -298,11 +369,7 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, goto unlock; } - err = mlx5e_open_channels(priv, &new_channels); - if (err) - goto unlock; - - mlx5e_switch_priv_channels(priv, &new_channels, NULL); + err = mlx5e_safe_switch_channels(priv, &new_channels, NULL); unlock: mutex_unlock(&priv->state_lock); @@ -354,32 +421,29 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, new_channels.params = priv->channels.params; new_channels.params.num_channels = count; - if (!netif_is_rxfh_configured(priv->netdev)) - mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt, - MLX5E_INDIR_RQT_SIZE, count); if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { priv->channels.params = new_channels.params; goto out; } - /* Create fresh channels with new parameters */ - err = mlx5e_open_channels(priv, &new_channels); - if (err) - goto out; - arfs_enabled = priv->netdev->features & NETIF_F_NTUPLE; if (arfs_enabled) mlx5e_arfs_disable(priv); + if (!netif_is_rxfh_configured(priv->netdev)) + mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt, + MLX5E_INDIR_RQT_SIZE, count); + /* Switch to new channels, set new parameters and close old ones */ - mlx5e_switch_priv_channels(priv, &new_channels, NULL); + err = mlx5e_safe_switch_channels(priv, &new_channels, NULL); if (arfs_enabled) { - err = mlx5e_arfs_enable(priv); - if (err) + int err2 = mlx5e_arfs_enable(priv); + + if (err2) netdev_err(priv->netdev, "%s: mlx5e_arfs_enable failed: %d\n", - __func__, err); + __func__, err2); } out: @@ -505,12 +569,7 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, goto out; } - /* open fresh channels with new coal parameters */ - err = mlx5e_open_channels(priv, &new_channels); - if (err) - goto out; - - mlx5e_switch_priv_channels(priv, &new_channels, NULL); + err = mlx5e_safe_switch_channels(priv, &new_channels, NULL); out: mutex_unlock(&priv->state_lock); @@ -525,27 +584,35 @@ static int mlx5e_set_coalesce(struct net_device *netdev, return mlx5e_ethtool_set_coalesce(priv, coal); } -static void ptys2ethtool_supported_link(unsigned long *supported_modes, +static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev, + unsigned long *supported_modes, u32 eth_proto_cap) { unsigned long proto_cap = eth_proto_cap; + struct ptys2ethtool_config *table; + u32 max_size; int proto; - for_each_set_bit(proto, &proto_cap, MLX5E_LINK_MODES_NUMBER) + mlx5e_ethtool_get_speed_arr(mdev, &table, &max_size); + for_each_set_bit(proto, &proto_cap, max_size) bitmap_or(supported_modes, supported_modes, - ptys2ethtool_table[proto].supported, + table[proto].supported, __ETHTOOL_LINK_MODE_MASK_NBITS); } -static void ptys2ethtool_adver_link(unsigned long *advertising_modes, +static void ptys2ethtool_adver_link(struct mlx5_core_dev *mdev, + unsigned long *advertising_modes, u32 eth_proto_cap) { unsigned long proto_cap = eth_proto_cap; + struct ptys2ethtool_config *table; + u32 max_size; int proto; - for_each_set_bit(proto, &proto_cap, MLX5E_LINK_MODES_NUMBER) + mlx5e_ethtool_get_speed_arr(mdev, &table, &max_size); + for_each_set_bit(proto, &proto_cap, max_size) bitmap_or(advertising_modes, advertising_modes, - ptys2ethtool_table[proto].advertised, + table[proto].advertised, __ETHTOOL_LINK_MODE_MASK_NBITS); } @@ -695,13 +762,14 @@ static void get_speed_duplex(struct net_device *netdev, u32 eth_proto_oper, struct ethtool_link_ksettings *link_ksettings) { + struct mlx5e_priv *priv = netdev_priv(netdev); u32 speed = SPEED_UNKNOWN; u8 duplex = DUPLEX_UNKNOWN; if (!netif_carrier_ok(netdev)) goto out; - speed = mlx5e_port_ptys2speed(eth_proto_oper); + speed = mlx5e_port_ptys2speed(priv->mdev, eth_proto_oper); if (!speed) { speed = SPEED_UNKNOWN; goto out; @@ -714,22 +782,22 @@ out: link_ksettings->base.duplex = duplex; } -static void get_supported(u32 eth_proto_cap, +static void get_supported(struct mlx5_core_dev *mdev, u32 eth_proto_cap, struct ethtool_link_ksettings *link_ksettings) { unsigned long *supported = link_ksettings->link_modes.supported; + ptys2ethtool_supported_link(mdev, supported, eth_proto_cap); - ptys2ethtool_supported_link(supported, eth_proto_cap); ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause); } -static void get_advertising(u32 eth_proto_cap, u8 tx_pause, - u8 rx_pause, +static void get_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_cap, + u8 tx_pause, u8 rx_pause, struct ethtool_link_ksettings *link_ksettings) { unsigned long *advertising = link_ksettings->link_modes.advertising; + ptys2ethtool_adver_link(mdev, advertising, eth_proto_cap); - ptys2ethtool_adver_link(advertising, eth_proto_cap); if (rx_pause) ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause); if (tx_pause ^ rx_pause) @@ -779,12 +847,12 @@ static u8 get_connector_port(u32 eth_proto, u8 connector_type) return PORT_OTHER; } -static void get_lp_advertising(u32 eth_proto_lp, +static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp, struct ethtool_link_ksettings *link_ksettings) { unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising; - ptys2ethtool_adver_link(lp_advertising, eth_proto_lp); + ptys2ethtool_adver_link(mdev, lp_advertising, eth_proto_lp); } int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, @@ -801,6 +869,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, u8 an_disable_admin; u8 an_status; u8 connector_type; + bool ext; int err; err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1); @@ -809,22 +878,25 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, __func__, err); goto err_query_regs; } - - eth_proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability); - eth_proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin); - eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); - eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise); - an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin); - an_status = MLX5_GET(ptys_reg, out, an_status); - connector_type = MLX5_GET(ptys_reg, out, connector_type); + ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + eth_proto_cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, + eth_proto_capability); + eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, + eth_proto_admin); + eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, + eth_proto_oper); + eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise); + an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin); + an_status = MLX5_GET(ptys_reg, out, an_status); + connector_type = MLX5_GET(ptys_reg, out, connector_type); mlx5_query_port_pause(mdev, &rx_pause, &tx_pause); ethtool_link_ksettings_zero_link_mode(link_ksettings, supported); ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); - get_supported(eth_proto_cap, link_ksettings); - get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings); + get_supported(mdev, eth_proto_cap, link_ksettings); + get_advertising(mdev, eth_proto_admin, tx_pause, rx_pause, link_ksettings); get_speed_duplex(priv->netdev, eth_proto_oper, link_ksettings); eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; @@ -833,7 +905,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, connector_type); ptys2ethtool_supported_advertised_port(link_ksettings, eth_proto_admin, connector_type); - get_lp_advertising(eth_proto_lp, link_ksettings); + get_lp_advertising(mdev, eth_proto_lp, link_ksettings); if (an_status == MLX5_AN_COMPLETE) ethtool_link_ksettings_add_link_mode(link_ksettings, @@ -872,7 +944,9 @@ static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes) u32 i, ptys_modes = 0; for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { - if (bitmap_intersects(ptys2ethtool_table[i].advertised, + if (*ptys2legacy_ethtool_table[i].advertised == 0) + continue; + if (bitmap_intersects(ptys2legacy_ethtool_table[i].advertised, link_modes, __ETHTOOL_LINK_MODE_MASK_NBITS)) ptys_modes |= MLX5E_PROT_MASK(i); @@ -881,13 +955,34 @@ static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes) return ptys_modes; } +static u32 mlx5e_ethtool2ptys_ext_adver_link(const unsigned long *link_modes) +{ + u32 i, ptys_modes = 0; + unsigned long modes[2]; + + for (i = 0; i < MLX5E_EXT_LINK_MODES_NUMBER; ++i) { + if (*ptys2ext_ethtool_table[i].advertised == 0) + continue; + memset(modes, 0, sizeof(modes)); + bitmap_and(modes, ptys2ext_ethtool_table[i].advertised, + link_modes, __ETHTOOL_LINK_MODE_MASK_NBITS); + + if (modes[0] == ptys2ext_ethtool_table[i].advertised[0] && + modes[1] == ptys2ext_ethtool_table[i].advertised[1]) + ptys_modes |= MLX5E_PROT_MASK(i); + } + return ptys_modes; +} + int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, const struct ethtool_link_ksettings *link_ksettings) { struct mlx5_core_dev *mdev = priv->mdev; - u32 eth_proto_cap, eth_proto_admin; + struct mlx5e_port_eth_proto eproto; bool an_changes = false; u8 an_disable_admin; + bool ext_supported; + bool ext_requested; u8 an_disable_cap; bool an_disable; u32 link_modes; @@ -895,20 +990,33 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, u32 speed; int err; - speed = link_ksettings->base.speed; + u32 (*ethtool2ptys_adver_func)(const unsigned long *adver); - link_modes = link_ksettings->base.autoneg == AUTONEG_ENABLE ? - mlx5e_ethtool2ptys_adver_link(link_ksettings->link_modes.advertising) : - mlx5e_port_speed2linkmodes(speed); +#define MLX5E_PTYS_EXT ((1ULL << ETHTOOL_LINK_MODE_50000baseKR_Full_BIT) - 1) - err = mlx5_query_port_proto_cap(mdev, ð_proto_cap, MLX5_PTYS_EN); + ext_requested = (link_ksettings->link_modes.advertising[0] > + MLX5E_PTYS_EXT); + ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); + + /*when ptys_extended_ethernet is set legacy link modes are deprecated */ + if (ext_requested != ext_supported) + return -EPROTONOSUPPORT; + + speed = link_ksettings->base.speed; + ethtool2ptys_adver_func = ext_requested ? + mlx5e_ethtool2ptys_ext_adver_link : + mlx5e_ethtool2ptys_adver_link; + err = mlx5_port_query_eth_proto(mdev, 1, ext_supported, &eproto); if (err) { - netdev_err(priv->netdev, "%s: query port eth proto cap failed: %d\n", + netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n", __func__, err); goto out; } + link_modes = link_ksettings->base.autoneg == AUTONEG_ENABLE ? + ethtool2ptys_adver_func(link_ksettings->link_modes.advertising) : + mlx5e_port_speed2linkmodes(mdev, speed); - link_modes = link_modes & eth_proto_cap; + link_modes = link_modes & eproto.cap; if (!link_modes) { netdev_err(priv->netdev, "%s: Not supported link mode(s) requested", __func__); @@ -916,24 +1024,17 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, goto out; } - err = mlx5_query_port_proto_admin(mdev, ð_proto_admin, MLX5_PTYS_EN); - if (err) { - netdev_err(priv->netdev, "%s: query port eth proto admin failed: %d\n", - __func__, err); - goto out; - } - - mlx5_query_port_autoneg(mdev, MLX5_PTYS_EN, &an_status, - &an_disable_cap, &an_disable_admin); + mlx5_port_query_eth_autoneg(mdev, &an_status, &an_disable_cap, + &an_disable_admin); an_disable = link_ksettings->base.autoneg == AUTONEG_DISABLE; an_changes = ((!an_disable && an_disable_admin) || (an_disable && !an_disable_admin)); - if (!an_changes && link_modes == eth_proto_admin) + if (!an_changes && link_modes == eproto.admin) goto out; - mlx5_set_port_ptys(mdev, an_disable, link_modes, MLX5_PTYS_EN); + mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_supported); mlx5_toggle_port_link(mdev); out: @@ -1521,7 +1622,6 @@ static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable, struct mlx5e_channels new_channels = {}; bool mode_changed; u8 cq_period_mode, current_cq_period_mode; - int err = 0; cq_period_mode = enable ? MLX5_CQ_PERIOD_MODE_START_FROM_CQE : @@ -1549,12 +1649,7 @@ static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable, return 0; } - err = mlx5e_open_channels(priv, &new_channels); - if (err) - return err; - - mlx5e_switch_priv_channels(priv, &new_channels, NULL); - return 0; + return mlx5e_safe_switch_channels(priv, &new_channels, NULL); } static int set_pflag_tx_cqe_based_moder(struct net_device *netdev, bool enable) @@ -1587,11 +1682,10 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val return 0; } - err = mlx5e_open_channels(priv, &new_channels); + err = mlx5e_safe_switch_channels(priv, &new_channels, NULL); if (err) return err; - mlx5e_switch_priv_channels(priv, &new_channels, NULL); mlx5e_dbg(DRV, priv, "MLX5E: RxCqeCmprss was turned %s\n", MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS) ? "ON" : "OFF"); @@ -1624,7 +1718,6 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable) struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_channels new_channels = {}; - int err; if (enable) { if (!mlx5e_check_fragmented_striding_rq_cap(mdev)) @@ -1646,12 +1739,7 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable) return 0; } - err = mlx5e_open_channels(priv, &new_channels); - if (err) - return err; - - mlx5e_switch_priv_channels(priv, &new_channels, NULL); - return 0; + return mlx5e_safe_switch_channels(priv, &new_channels, NULL); } static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable) @@ -1694,12 +1782,8 @@ static int set_pflag_xdp_tx_mpwqe(struct net_device *netdev, bool enable) return 0; } - err = mlx5e_open_channels(priv, &new_channels); - if (err) - return err; - - mlx5e_switch_priv_channels(priv, &new_channels, NULL); - return 0; + err = mlx5e_safe_switch_channels(priv, &new_channels, NULL); + return err; } static const struct pflag_desc mlx5e_priv_flags[MLX5E_NUM_PFLAGS] = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 8cfd2ec7c0a2..b5fdbd3190d9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include "eswitch.h" #include "en.h" @@ -51,6 +52,7 @@ #include "en/xdp.h" #include "lib/eq.h" #include "en/monitor_stats.h" +#include "en/reporter.h" struct mlx5e_rq_param { u32 rqc[MLX5_ST_SZ_DW(rqc)]; @@ -171,8 +173,7 @@ static u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev, if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params)) return order_base_2(mlx5e_rx_get_linear_frag_sz(params)); - return MLX5E_MPWQE_STRIDE_SZ(mdev, - MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)); + return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev); } static u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev, @@ -950,7 +951,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c, if (params->rx_dim_enabled) __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); - if (params->pflags & MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) + if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE)) __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state); return 0; @@ -1160,7 +1161,7 @@ static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa) return 0; } -static void mlx5e_sq_recover(struct work_struct *work); +static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work); static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, int txq_ix, struct mlx5e_params *params, @@ -1182,7 +1183,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, sq->uar_map = mdev->mlx5e_res.bfreg.map; sq->min_inline_mode = params->tx_min_inline_mode; sq->stats = &c->priv->channel_stats[c->ix].sq[tc]; - INIT_WORK(&sq->recover.recover_work, mlx5e_sq_recover); + INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work); if (MLX5_IPSEC_DEV(c->priv->mdev)) set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); if (mlx5_accel_is_tls_device(c->priv->mdev)) @@ -1270,15 +1271,8 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev, return err; } -struct mlx5e_modify_sq_param { - int curr_state; - int next_state; - bool rl_update; - int rl_index; -}; - -static int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn, - struct mlx5e_modify_sq_param *p) +int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn, + struct mlx5e_modify_sq_param *p) { void *in; void *sqc; @@ -1376,17 +1370,7 @@ err_free_txqsq: return err; } -static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq) -{ - WARN_ONCE(sq->cc != sq->pc, - "SQ 0x%x: cc (0x%x) != pc (0x%x)\n", - sq->sqn, sq->cc, sq->pc); - sq->cc = 0; - sq->dma_fifo_cc = 0; - sq->pc = 0; -} - -static void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq) +void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq) { sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix); clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state); @@ -1395,7 +1379,7 @@ static void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq) netif_tx_start_queue(sq->txq); } -static inline void netif_tx_disable_queue(struct netdev_queue *txq) +void mlx5e_tx_disable_queue(struct netdev_queue *txq) { __netif_tx_lock_bh(txq); netif_tx_stop_queue(txq); @@ -1411,7 +1395,7 @@ static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq) /* prevent netif_tx_wake_queue */ napi_synchronize(&c->napi); - netif_tx_disable_queue(sq->txq); + mlx5e_tx_disable_queue(sq->txq); /* last doorbell out, godspeed .. */ if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) { @@ -1431,6 +1415,7 @@ static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq) struct mlx5_rate_limit rl = {0}; cancel_work_sync(&sq->dim.work); + cancel_work_sync(&sq->recover_work); mlx5e_destroy_sq(mdev, sq->sqn); if (sq->rate_limit) { rl.rate = sq->rate_limit; @@ -1440,105 +1425,12 @@ static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq) mlx5e_free_txqsq(sq); } -static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq) -{ - unsigned long exp_time = jiffies + msecs_to_jiffies(2000); - - while (time_before(jiffies, exp_time)) { - if (sq->cc == sq->pc) - return 0; - - msleep(20); - } - - netdev_err(sq->channel->netdev, - "Wait for SQ 0x%x flush timeout (sq cc = 0x%x, sq pc = 0x%x)\n", - sq->sqn, sq->cc, sq->pc); - - return -ETIMEDOUT; -} - -static int mlx5e_sq_to_ready(struct mlx5e_txqsq *sq, int curr_state) -{ - struct mlx5_core_dev *mdev = sq->channel->mdev; - struct net_device *dev = sq->channel->netdev; - struct mlx5e_modify_sq_param msp = {0}; - int err; - - msp.curr_state = curr_state; - msp.next_state = MLX5_SQC_STATE_RST; - - err = mlx5e_modify_sq(mdev, sq->sqn, &msp); - if (err) { - netdev_err(dev, "Failed to move sq 0x%x to reset\n", sq->sqn); - return err; - } - - memset(&msp, 0, sizeof(msp)); - msp.curr_state = MLX5_SQC_STATE_RST; - msp.next_state = MLX5_SQC_STATE_RDY; - - err = mlx5e_modify_sq(mdev, sq->sqn, &msp); - if (err) { - netdev_err(dev, "Failed to move sq 0x%x to ready\n", sq->sqn); - return err; - } - - return 0; -} - -static void mlx5e_sq_recover(struct work_struct *work) +static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work) { - struct mlx5e_txqsq_recover *recover = - container_of(work, struct mlx5e_txqsq_recover, - recover_work); - struct mlx5e_txqsq *sq = container_of(recover, struct mlx5e_txqsq, - recover); - struct mlx5_core_dev *mdev = sq->channel->mdev; - struct net_device *dev = sq->channel->netdev; - u8 state; - int err; - - err = mlx5_core_query_sq_state(mdev, sq->sqn, &state); - if (err) { - netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n", - sq->sqn, err); - return; - } - - if (state != MLX5_RQC_STATE_ERR) { - netdev_err(dev, "SQ 0x%x not in ERROR state\n", sq->sqn); - return; - } - - netif_tx_disable_queue(sq->txq); + struct mlx5e_txqsq *sq = container_of(recover_work, struct mlx5e_txqsq, + recover_work); - if (mlx5e_wait_for_sq_flush(sq)) - return; - - /* If the interval between two consecutive recovers per SQ is too - * short, don't recover to avoid infinite loop of ERR_CQE -> recover. - * If we reached this state, there is probably a bug that needs to be - * fixed. let's keep the queue close and let tx timeout cleanup. - */ - if (jiffies_to_msecs(jiffies - recover->last_recover) < - MLX5E_SQ_RECOVER_MIN_INTERVAL) { - netdev_err(dev, "Recover SQ 0x%x canceled, too many error CQEs\n", - sq->sqn); - return; - } - - /* At this point, no new packets will arrive from the stack as TXQ is - * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all - * pending WQEs. SQ can safely reset the SQ. - */ - if (mlx5e_sq_to_ready(sq, state)) - return; - - mlx5e_reset_txqsq_cc_pc(sq); - sq->stats->recover++; - recover->last_recover = jiffies; - mlx5e_activate_txqsq(sq); + mlx5e_tx_reporter_err_cqe(sq); } static int mlx5e_open_icosq(struct mlx5e_channel *c, @@ -1950,6 +1842,29 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate) return err; } +static int mlx5e_alloc_xps_cpumask(struct mlx5e_channel *c, + struct mlx5e_params *params) +{ + int num_comp_vectors = mlx5_comp_vectors_count(c->mdev); + int irq; + + if (!zalloc_cpumask_var(&c->xps_cpumask, GFP_KERNEL)) + return -ENOMEM; + + for (irq = c->ix; irq < num_comp_vectors; irq += params->num_channels) { + int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(c->mdev, irq)); + + cpumask_set_cpu(cpu, c->xps_cpumask); + } + + return 0; +} + +static void mlx5e_free_xps_cpumask(struct mlx5e_channel *c) +{ + free_cpumask_var(c->xps_cpumask); +} + static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, struct mlx5e_params *params, struct mlx5e_channel_param *cparam, @@ -1982,9 +1897,12 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, c->num_tc = params->num_tc; c->xdp = !!params->xdp_prog; c->stats = &priv->channel_stats[ix].ch; - c->irq_desc = irq_to_desc(irq); + err = mlx5e_alloc_xps_cpumask(c, params); + if (err) + goto err_free_channel; + netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64); err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->icosq.cq); @@ -2067,6 +1985,9 @@ err_close_icosq_cq: err_napi_del: netif_napi_del(&c->napi); + mlx5e_free_xps_cpumask(c); + +err_free_channel: kvfree(c); return err; @@ -2079,7 +2000,7 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c) for (tc = 0; tc < c->num_tc; tc++) mlx5e_activate_txqsq(&c->sq[tc]); mlx5e_activate_rq(&c->rq); - netif_set_xps_queue(c->netdev, get_cpu_mask(c->cpu), c->ix); + netif_set_xps_queue(c->netdev, c->xps_cpumask, c->ix); } static void mlx5e_deactivate_channel(struct mlx5e_channel *c) @@ -2107,6 +2028,7 @@ static void mlx5e_close_channel(struct mlx5e_channel *c) mlx5e_close_tx_cqs(c); mlx5e_close_cq(&c->icosq.cq); netif_napi_del(&c->napi); + mlx5e_free_xps_cpumask(c); kvfree(c); } @@ -2380,6 +2302,10 @@ int mlx5e_open_channels(struct mlx5e_priv *priv, goto err_close_channels; } + if (!IS_ERR_OR_NULL(priv->tx_reporter)) + devlink_health_reporter_state_update(priv->tx_reporter, + DEVLINK_HEALTH_REPORTER_STATE_HEALTHY); + kvfree(cparam); return 0; @@ -2938,6 +2864,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) mlx5e_build_tx2sq_maps(priv); mlx5e_activate_channels(&priv->channels); + mlx5e_xdp_tx_enable(priv); netif_tx_start_all_queues(priv->netdev); if (mlx5e_is_vport_rep(priv)) @@ -2959,16 +2886,18 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) */ netif_tx_stop_all_queues(priv->netdev); netif_tx_disable(priv->netdev); + mlx5e_xdp_tx_disable(priv); mlx5e_deactivate_channels(&priv->channels); } -void mlx5e_switch_priv_channels(struct mlx5e_priv *priv, - struct mlx5e_channels *new_chs, - mlx5e_fp_hw_modify hw_modify) +static void mlx5e_switch_priv_channels(struct mlx5e_priv *priv, + struct mlx5e_channels *new_chs, + mlx5e_fp_hw_modify hw_modify) { struct net_device *netdev = priv->netdev; int new_num_txqs; int carrier_ok; + new_num_txqs = new_chs->num * new_chs->params.num_tc; carrier_ok = netif_carrier_ok(netdev); @@ -2994,6 +2923,20 @@ void mlx5e_switch_priv_channels(struct mlx5e_priv *priv, netif_carrier_on(netdev); } +int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, + struct mlx5e_channels *new_chs, + mlx5e_fp_hw_modify hw_modify) +{ + int err; + + err = mlx5e_open_channels(priv, new_chs); + if (err) + return err; + + mlx5e_switch_priv_channels(priv, new_chs, hw_modify); + return 0; +} + void mlx5e_timestamp_init(struct mlx5e_priv *priv) { priv->tstamp.tx_type = HWTSTAMP_TX_OFF; @@ -3207,6 +3150,7 @@ static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv) { int tc; + mlx5e_tx_reporter_destroy(priv); for (tc = 0; tc < priv->profile->max_tc; tc++) mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]); } @@ -3409,13 +3353,12 @@ static int mlx5e_setup_tc_mqprio(struct net_device *netdev, goto out; } - err = mlx5e_open_channels(priv, &new_channels); + err = mlx5e_safe_switch_channels(priv, &new_channels, NULL); if (err) goto out; priv->max_opened_tc = max_t(u8, priv->max_opened_tc, new_channels.params.num_tc); - mlx5e_switch_priv_channels(priv, &new_channels, NULL); out: mutex_unlock(&priv->state_lock); return err; @@ -3492,11 +3435,32 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, } } +void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s) +{ + int i; + + for (i = 0; i < mlx5e_get_netdev_max_channels(priv->netdev); i++) { + struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i]; + struct mlx5e_rq_stats *rq_stats = &channel_stats->rq; + int j; + + s->rx_packets += rq_stats->packets; + s->rx_bytes += rq_stats->bytes; + + for (j = 0; j < priv->max_opened_tc; j++) { + struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j]; + + s->tx_packets += sq_stats->packets; + s->tx_bytes += sq_stats->bytes; + s->tx_dropped += sq_stats->dropped; + } + } +} + void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mlx5e_priv *priv = netdev_priv(dev); - struct mlx5e_sw_stats *sstats = &priv->stats.sw; struct mlx5e_vport_stats *vstats = &priv->stats.vport; struct mlx5e_pport_stats *pstats = &priv->stats.pport; @@ -3511,12 +3475,7 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok); stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok); } else { - mlx5e_grp_sw_update_stats(priv); - stats->rx_packets = sstats->rx_packets; - stats->rx_bytes = sstats->rx_bytes; - stats->tx_packets = sstats->tx_packets; - stats->tx_bytes = sstats->tx_bytes; - stats->tx_dropped = sstats->tx_queue_dropped; + mlx5e_fold_sw_stats64(priv, stats); } stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer; @@ -3609,11 +3568,7 @@ static int set_feature_lro(struct net_device *netdev, bool enable) goto out; } - err = mlx5e_open_channels(priv, &new_channels); - if (err) - goto out; - - mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_modify_tirs_lro); + err = mlx5e_safe_switch_channels(priv, &new_channels, mlx5e_modify_tirs_lro); out: mutex_unlock(&priv->state_lock); return err; @@ -3831,11 +3786,10 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, goto out; } - err = mlx5e_open_channels(priv, &new_channels); + err = mlx5e_safe_switch_channels(priv, &new_channels, set_mtu_cb); if (err) goto out; - mlx5e_switch_priv_channels(priv, &new_channels, set_mtu_cb); netdev->mtu = new_channels.params.sw_mtu; out: @@ -4178,31 +4132,13 @@ netdev_features_t mlx5e_features_check(struct sk_buff *skb, return features; } -static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev, - struct mlx5e_txqsq *sq) -{ - struct mlx5_eq_comp *eq = sq->cq.mcq.eq; - u32 eqe_count; - - netdev_err(dev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n", - eq->core.eqn, eq->core.cons_index, eq->core.irqn); - - eqe_count = mlx5_eq_poll_irq_disabled(eq); - if (!eqe_count) - return false; - - netdev_err(dev, "Recover %d eqes on EQ 0x%x\n", eqe_count, eq->core.eqn); - sq->channel->stats->eq_rearm++; - return true; -} - static void mlx5e_tx_timeout_work(struct work_struct *work) { struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, tx_timeout_work); - struct net_device *dev = priv->netdev; - bool reopen_channels = false; - int i, err; + bool report_failed = false; + int err; + int i; rtnl_lock(); mutex_lock(&priv->state_lock); @@ -4211,31 +4147,22 @@ static void mlx5e_tx_timeout_work(struct work_struct *work) goto unlock; for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) { - struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, i); + struct netdev_queue *dev_queue = + netdev_get_tx_queue(priv->netdev, i); struct mlx5e_txqsq *sq = priv->txq2sq[i]; if (!netif_xmit_stopped(dev_queue)) continue; - netdev_err(dev, - "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n", - i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc, - jiffies_to_usecs(jiffies - dev_queue->trans_start)); - - /* If we recover a lost interrupt, most likely TX timeout will - * be resolved, skip reopening channels - */ - if (!mlx5e_tx_timeout_eq_recover(dev, sq)) { - clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); - reopen_channels = true; - } + if (mlx5e_tx_reporter_timeout(sq)) + report_failed = true; } - if (!reopen_channels) + if (!report_failed) goto unlock; - mlx5e_close_locked(dev); - err = mlx5e_open_locked(dev); + mlx5e_close_locked(priv->netdev); + err = mlx5e_open_locked(priv->netdev); if (err) netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n", @@ -4383,6 +4310,61 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp) } } +#ifdef CONFIG_MLX5_ESWITCH +static int mlx5e_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, u32 filter_mask, + int nlflags) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + u8 mode, setting; + int err; + + err = mlx5_eswitch_get_vepa(mdev->priv.eswitch, &setting); + if (err) + return err; + mode = setting ? BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB; + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, + mode, + 0, 0, nlflags, filter_mask, NULL); +} + +static int mlx5e_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, + u16 flags, struct netlink_ext_ack *extack) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + struct nlattr *attr, *br_spec; + u16 mode = BRIDGE_MODE_UNDEF; + u8 setting; + int rem; + + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + if (!br_spec) + return -EINVAL; + + nla_for_each_nested(attr, br_spec, rem) { + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + + if (nla_len(attr) < sizeof(mode)) + return -EINVAL; + + mode = nla_get_u16(attr); + if (mode > BRIDGE_MODE_VEPA) + return -EINVAL; + + break; + } + + if (mode == BRIDGE_MODE_UNDEF) + return -EINVAL; + + setting = (mode == BRIDGE_MODE_VEPA) ? 1 : 0; + return mlx5_eswitch_set_vepa(mdev->priv.eswitch, setting); +} +#endif + const struct net_device_ops mlx5e_netdev_ops = { .ndo_open = mlx5e_open, .ndo_stop = mlx5e_close, @@ -4409,6 +4391,9 @@ const struct net_device_ops mlx5e_netdev_ops = { .ndo_rx_flow_steer = mlx5e_rx_flow_steer, #endif #ifdef CONFIG_MLX5_ESWITCH + .ndo_bridge_setlink = mlx5e_bridge_setlink, + .ndo_bridge_getlink = mlx5e_bridge_getlink, + /* SRIOV E-Switch NDOs */ .ndo_set_vf_mac = mlx5e_set_vf_mac, .ndo_set_vf_vlan = mlx5e_set_vf_vlan, @@ -4908,6 +4893,7 @@ static int mlx5e_init_nic_tx(struct mlx5e_priv *priv) #ifdef CONFIG_MLX5_CORE_EN_DCB mlx5e_dcbnl_initialize(priv); #endif + mlx5e_tx_reporter_create(priv); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 04736212a21c..a1a3e2774989 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -44,6 +44,7 @@ #include "en_tc.h" #include "en/tc_tun.h" #include "fs_core.h" +#include "lib/port_tun.h" #define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \ max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE) @@ -153,7 +154,7 @@ static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv) struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep = rpriv->rep; - if (rep->vport == FDB_UPLINK_VPORT) + if (rep->vport == MLX5_VPORT_UPLINK) mlx5e_uplink_rep_update_hw_counters(priv); else mlx5e_vf_rep_update_hw_counters(priv); @@ -162,27 +163,16 @@ static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv) static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv) { struct mlx5e_sw_stats *s = &priv->stats.sw; - struct mlx5e_rq_stats *rq_stats; - struct mlx5e_sq_stats *sq_stats; - int i, j; + struct rtnl_link_stats64 stats64 = {}; memset(s, 0, sizeof(*s)); - for (i = 0; i < priv->channels.num; i++) { - struct mlx5e_channel *c = priv->channels.c[i]; - - rq_stats = c->rq.stats; - - s->rx_packets += rq_stats->packets; - s->rx_bytes += rq_stats->bytes; - - for (j = 0; j < priv->channels.params.num_tc; j++) { - sq_stats = c->sq[j].stats; + mlx5e_fold_sw_stats64(priv, &stats64); - s->tx_packets += sq_stats->packets; - s->tx_bytes += sq_stats->bytes; - s->tx_queue_dropped += sq_stats->dropped; - } - } + s->rx_packets = stats64.rx_packets; + s->rx_bytes = stats64.rx_bytes; + s->tx_packets = stats64.tx_packets; + s->tx_bytes = stats64.tx_bytes; + s->tx_queue_dropped = stats64.tx_dropped; } static void mlx5e_rep_get_ethtool_stats(struct net_device *dev, @@ -195,8 +185,7 @@ static void mlx5e_rep_get_ethtool_stats(struct net_device *dev, return; mutex_lock(&priv->state_lock); - if (test_bit(MLX5E_STATE_OPENED, &priv->state)) - mlx5e_rep_update_sw_counters(priv); + mlx5e_rep_update_sw_counters(priv); mlx5e_rep_update_hw_counters(priv); mutex_unlock(&priv->state_lock); @@ -393,7 +382,8 @@ static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = { .set_pauseparam = mlx5e_uplink_rep_set_pauseparam, }; -static int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr) +static int mlx5e_rep_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) { struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; @@ -410,20 +400,14 @@ static int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr) uplink_priv = netdev_priv(uplink_dev); } - switch (attr->id) { - case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: - attr->u.ppid.id_len = ETH_ALEN; - if (uplink_upper && mlx5_lag_is_sriov(uplink_priv->mdev)) { - ether_addr_copy(attr->u.ppid.id, uplink_upper->dev_addr); - } else { - struct mlx5e_rep_priv *rpriv = priv->ppriv; - struct mlx5_eswitch_rep *rep = rpriv->rep; + ppid->id_len = ETH_ALEN; + if (uplink_upper && mlx5_lag_is_sriov(uplink_priv->mdev)) { + ether_addr_copy(ppid->id, uplink_upper->dev_addr); + } else { + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5_eswitch_rep *rep = rpriv->rep; - ether_addr_copy(attr->u.ppid.id, rep->hw_id); - } - break; - default: - return -EOPNOTSUPP; + ether_addr_copy(ppid->id, rep->hw_id); } return 0; @@ -596,6 +580,10 @@ static void mlx5e_rep_update_flows(struct mlx5e_priv *priv, if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) { ether_addr_copy(e->h_dest, ha); ether_addr_copy(eth->h_dest, ha); + /* Update the encap source mac, in case that we delete + * the flows when encap source mac changed. + */ + ether_addr_copy(eth->h_source, e->route_dev->dev_addr); mlx5e_tc_encap_flows_add(priv, e); } @@ -1057,14 +1045,23 @@ static void mlx5e_rep_neigh_entry_destroy(struct mlx5e_priv *priv, int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e) { + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv; + struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy; struct mlx5e_neigh_hash_entry *nhe; int err; + err = mlx5_tun_entropy_refcount_inc(tun_entropy, e->reformat_type); + if (err) + return err; nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh); if (!nhe) { err = mlx5e_rep_neigh_entry_create(priv, e, &nhe); - if (err) + if (err) { + mlx5_tun_entropy_refcount_dec(tun_entropy, + e->reformat_type); return err; + } } list_add(&e->encap_list, &nhe->encap_list); return 0; @@ -1073,6 +1070,9 @@ int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv, void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e) { + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv; + struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy; struct mlx5e_neigh_hash_entry *nhe; list_del(&e->encap_list); @@ -1080,6 +1080,7 @@ void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv, if (list_empty(&nhe->encap_list)) mlx5e_rep_neigh_entry_destroy(priv, nhe); + mlx5_tun_entropy_refcount_dec(tun_entropy, e->reformat_type); } static int mlx5e_vf_rep_open(struct net_device *dev) @@ -1096,7 +1097,8 @@ static int mlx5e_vf_rep_open(struct net_device *dev) if (!mlx5_modify_vport_admin_state(priv->mdev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, - rep->vport, MLX5_VPORT_ADMIN_STATE_UP)) + rep->vport, 1, + MLX5_VPORT_ADMIN_STATE_UP)) netif_carrier_on(dev); unlock: @@ -1114,7 +1116,8 @@ static int mlx5e_vf_rep_close(struct net_device *dev) mutex_lock(&priv->state_lock); mlx5_modify_vport_admin_state(priv->mdev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, - rep->vport, MLX5_VPORT_ADMIN_STATE_DOWN); + rep->vport, 1, + MLX5_VPORT_ADMIN_STATE_DOWN); ret = mlx5e_close_locked(dev); mutex_unlock(&priv->state_lock); return ret; @@ -1126,9 +1129,17 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev, struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep = rpriv->rep; - int ret; + int ret, pf_num; + + ret = mlx5_lag_get_pf_num(priv->mdev, &pf_num); + if (ret) + return ret; + + if (rep->vport == MLX5_VPORT_UPLINK) + ret = snprintf(buf, len, "p%d", pf_num); + else + ret = snprintf(buf, len, "pf%dvf%d", pf_num, rep->vport - 1); - ret = snprintf(buf, len, "%d", rep->vport - 1); if (ret >= len) return -EOPNOTSUPP; @@ -1211,7 +1222,7 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) return false; rep = rpriv->rep; - return (rep->vport == FDB_UPLINK_VPORT); + return (rep->vport == MLX5_VPORT_UPLINK); } static bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id) @@ -1229,17 +1240,8 @@ mlx5e_get_sw_stats64(const struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mlx5e_priv *priv = netdev_priv(dev); - struct mlx5e_sw_stats *sstats = &priv->stats.sw; - - mlx5e_rep_update_sw_counters(priv); - - stats->rx_packets = sstats->rx_packets; - stats->rx_bytes = sstats->rx_bytes; - stats->tx_packets = sstats->tx_packets; - stats->tx_bytes = sstats->tx_bytes; - - stats->tx_dropped = sstats->tx_queue_dropped; + mlx5e_fold_sw_stats64(priv, stats); return 0; } @@ -1285,9 +1287,17 @@ static int mlx5e_uplink_rep_set_mac(struct net_device *netdev, void *addr) return 0; } -static const struct switchdev_ops mlx5e_rep_switchdev_ops = { - .switchdev_port_attr_get = mlx5e_attr_get, -}; +static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos, + __be16 vlan_proto) +{ + netdev_warn_once(dev, "legacy vf vlan setting isn't supported in switchdev mode\n"); + + if (vlan != 0) + return -EOPNOTSUPP; + + /* allow setting 0-vid for compatibility with libvirt */ + return 0; +} static const struct net_device_ops mlx5e_netdev_ops_vf_rep = { .ndo_open = mlx5e_vf_rep_open, @@ -1299,6 +1309,7 @@ static const struct net_device_ops mlx5e_netdev_ops_vf_rep = { .ndo_has_offload_stats = mlx5e_rep_has_offload_stats, .ndo_get_offload_stats = mlx5e_rep_get_offload_stats, .ndo_change_mtu = mlx5e_vf_rep_change_mtu, + .ndo_get_port_parent_id = mlx5e_rep_get_port_parent_id, }; static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = { @@ -1319,6 +1330,8 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = { .ndo_set_vf_rate = mlx5e_set_vf_rate, .ndo_get_vf_config = mlx5e_get_vf_config, .ndo_get_vf_stats = mlx5e_get_vf_stats, + .ndo_set_vf_vlan = mlx5e_uplink_rep_set_vf_vlan, + .ndo_get_port_parent_id = mlx5e_rep_get_port_parent_id, }; bool mlx5e_eswitch_rep(struct net_device *netdev) @@ -1347,7 +1360,7 @@ static void mlx5e_build_rep_params(struct net_device *netdev) params->sw_mtu = netdev->mtu; /* SQ */ - if (rep->vport == FDB_UPLINK_VPORT) + if (rep->vport == MLX5_VPORT_UPLINK) params->log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; else params->log_sq_size = MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE; @@ -1374,7 +1387,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev) struct mlx5_eswitch_rep *rep = rpriv->rep; struct mlx5_core_dev *mdev = priv->mdev; - if (rep->vport == FDB_UPLINK_VPORT) { + if (rep->vport == MLX5_VPORT_UPLINK) { SET_NETDEV_DEV(netdev, &priv->mdev->pdev->dev); netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep; /* we want a persistent mac for the uplink rep */ @@ -1393,8 +1406,6 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev) netdev->watchdog_timeo = 15 * HZ; - netdev->switchdev_ops = &mlx5e_rep_switchdev_ops; - netdev->features |= NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL; netdev->hw_features |= NETIF_F_HW_TC; @@ -1406,7 +1417,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev) netdev->hw_features |= NETIF_F_TSO6; netdev->hw_features |= NETIF_F_RXCSUM; - if (rep->vport != FDB_UPLINK_VPORT) + if (rep->vport != MLX5_VPORT_UPLINK) netdev->features |= NETIF_F_VLAN_CHALLENGED; netdev->features |= netdev->hw_features; @@ -1559,14 +1570,18 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) return err; } - if (rpriv->rep->vport == FDB_UPLINK_VPORT) { + if (rpriv->rep->vport == MLX5_VPORT_UPLINK) { uplink_priv = &rpriv->uplink_priv; + INIT_LIST_HEAD(&uplink_priv->unready_flows); + /* init shared tc flow table */ err = mlx5e_tc_esw_init(&uplink_priv->tc_ht); if (err) goto destroy_tises; + mlx5_init_port_tun_entropy(&uplink_priv->tun_entropy, priv->mdev); + /* init indirect block notifications */ INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list); uplink_priv->netdevice_nb.notifier_call = mlx5e_nic_rep_netdevice_event; @@ -1595,7 +1610,7 @@ static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv) for (tc = 0; tc < priv->profile->max_tc; tc++) mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]); - if (rpriv->rep->vport == FDB_UPLINK_VPORT) { + if (rpriv->rep->vport == MLX5_VPORT_UPLINK) { /* clean indirect TC block notifications */ unregister_netdevice_notifier(&rpriv->uplink_priv.netdevice_nb); mlx5e_rep_indr_clean_block_privs(rpriv); @@ -1619,27 +1634,38 @@ static void mlx5e_vf_rep_enable(struct mlx5e_priv *priv) static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event, void *data) { struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb); - struct mlx5_eqe *eqe = data; - if (event != MLX5_EVENT_TYPE_PORT_CHANGE) - return NOTIFY_DONE; + if (event == MLX5_EVENT_TYPE_PORT_CHANGE) { + struct mlx5_eqe *eqe = data; - switch (eqe->sub_type) { - case MLX5_PORT_CHANGE_SUBTYPE_DOWN: - case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: - queue_work(priv->wq, &priv->update_carrier_work); - break; - default: - return NOTIFY_DONE; + switch (eqe->sub_type) { + case MLX5_PORT_CHANGE_SUBTYPE_DOWN: + case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: + queue_work(priv->wq, &priv->update_carrier_work); + break; + default: + return NOTIFY_DONE; + } + + return NOTIFY_OK; } - return NOTIFY_OK; + if (event == MLX5_DEV_EVENT_PORT_AFFINITY) { + struct mlx5e_rep_priv *rpriv = priv->ppriv; + + queue_work(priv->wq, &rpriv->uplink_priv.reoffload_flows_work); + + return NOTIFY_OK; + } + + return NOTIFY_DONE; } static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv) { struct net_device *netdev = priv->netdev; struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_rep_priv *rpriv = priv->ppriv; u16 max_mtu; netdev->min_mtu = ETH_MIN_MTU; @@ -1647,6 +1673,9 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv) netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu); mlx5e_set_dev_port_mtu(priv); + INIT_WORK(&rpriv->uplink_priv.reoffload_flows_work, + mlx5e_tc_reoffload_flows_work); + mlx5_lag_add(mdev, netdev); priv->events_nb.notifier_call = uplink_rep_async_event; mlx5_notifier_register(mdev, &priv->events_nb); @@ -1659,11 +1688,13 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv) static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_rep_priv *rpriv = priv->ppriv; #ifdef CONFIG_MLX5_CORE_EN_DCB mlx5e_dcbnl_delete_app(priv); #endif mlx5_notifier_unregister(mdev, &priv->events_nb); + cancel_work_sync(&rpriv->uplink_priv.reoffload_flows_work); mlx5_lag_remove(mdev); } @@ -1714,7 +1745,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) rpriv->rep = rep; nch = mlx5e_get_max_num_channels(dev); - profile = (rep->vport == FDB_UPLINK_VPORT) ? &mlx5e_uplink_rep_profile : &mlx5e_vf_rep_profile; + profile = (rep->vport == MLX5_VPORT_UPLINK) ? &mlx5e_uplink_rep_profile : &mlx5e_vf_rep_profile; netdev = mlx5e_create_netdev(dev, profile, nch, rpriv); if (!netdev) { pr_warn("Failed to create representor netdev for vport %d\n", @@ -1727,7 +1758,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) rep->rep_if[REP_ETH].priv = rpriv; INIT_LIST_HEAD(&rpriv->vport_sqs_list); - if (rep->vport == FDB_UPLINK_VPORT) { + if (rep->vport == MLX5_VPORT_UPLINK) { err = mlx5e_create_mdev_resources(dev); if (err) goto err_destroy_netdev; @@ -1763,7 +1794,7 @@ err_detach_netdev: mlx5e_detach_netdev(netdev_priv(netdev)); err_destroy_mdev_resources: - if (rep->vport == FDB_UPLINK_VPORT) + if (rep->vport == MLX5_VPORT_UPLINK) mlx5e_destroy_mdev_resources(dev); err_destroy_netdev: @@ -1783,7 +1814,7 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep) unregister_netdev(netdev); mlx5e_rep_neigh_cleanup(rpriv); mlx5e_detach_netdev(priv); - if (rep->vport == FDB_UPLINK_VPORT) + if (rep->vport == MLX5_VPORT_UPLINK) mlx5e_destroy_mdev_resources(priv->mdev); mlx5e_destroy_netdev(priv); kfree(ppriv); /* mlx5e_rep_priv */ @@ -1801,25 +1832,18 @@ static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep) void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev) { struct mlx5_eswitch *esw = mdev->priv.eswitch; - int total_vfs = MLX5_TOTAL_VPORTS(mdev); - int vport; + struct mlx5_eswitch_rep_if rep_if = {}; - for (vport = 0; vport < total_vfs; vport++) { - struct mlx5_eswitch_rep_if rep_if = {}; + rep_if.load = mlx5e_vport_rep_load; + rep_if.unload = mlx5e_vport_rep_unload; + rep_if.get_proto_dev = mlx5e_vport_rep_get_proto_dev; - rep_if.load = mlx5e_vport_rep_load; - rep_if.unload = mlx5e_vport_rep_unload; - rep_if.get_proto_dev = mlx5e_vport_rep_get_proto_dev; - mlx5_eswitch_register_vport_rep(esw, vport, &rep_if, REP_ETH); - } + mlx5_eswitch_register_vport_reps(esw, &rep_if, REP_ETH); } void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev) { struct mlx5_eswitch *esw = mdev->priv.eswitch; - int total_vfs = MLX5_TOTAL_VPORTS(mdev); - int vport; - for (vport = total_vfs - 1; vport >= 0; vport--) - mlx5_eswitch_unregister_vport_rep(esw, vport, REP_ETH); + mlx5_eswitch_unregister_vport_reps(esw, REP_ETH); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h index edd722824697..83b573b1abac 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h @@ -37,6 +37,7 @@ #include #include "eswitch.h" #include "en.h" +#include "lib/port_tun.h" #ifdef CONFIG_MLX5_ESWITCH struct mlx5e_neigh_update_table { @@ -71,6 +72,11 @@ struct mlx5_rep_uplink_priv { */ struct list_head tc_indr_block_priv_list; struct notifier_block netdevice_nb; + + struct mlx5_tun_entropy tun_entropy; + + struct list_head unready_flows; + struct work_struct reoffload_flows_work; }; struct mlx5e_rep_priv { @@ -148,6 +154,7 @@ struct mlx5e_encap_entry { unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ struct net_device *out_dev; + struct net_device *route_dev; int tunnel_type; int tunnel_hlen; int reformat_type; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index f86e4804e83e..be396e5e4e39 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -52,40 +52,45 @@ static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config) return config->rx_filter == HWTSTAMP_FILTER_ALL; } -static inline void mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cqcc, - void *data) +static inline void mlx5e_read_cqe_slot(struct mlx5_cqwq *wq, + u32 cqcc, void *data) { - u32 ci = mlx5_cqwq_ctr2ix(&cq->wq, cqcc); + u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc); - memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, ci), sizeof(struct mlx5_cqe64)); + memcpy(data, mlx5_cqwq_get_wqe(wq, ci), sizeof(struct mlx5_cqe64)); } static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq, - struct mlx5e_cq *cq, u32 cqcc) + struct mlx5_cqwq *wq, + u32 cqcc) { - mlx5e_read_cqe_slot(cq, cqcc, &cq->title); - cq->decmprs_left = be32_to_cpu(cq->title.byte_cnt); - cq->decmprs_wqe_counter = be16_to_cpu(cq->title.wqe_counter); + struct mlx5e_cq_decomp *cqd = &rq->cqd; + struct mlx5_cqe64 *title = &cqd->title; + + mlx5e_read_cqe_slot(wq, cqcc, title); + cqd->left = be32_to_cpu(title->byte_cnt); + cqd->wqe_counter = be16_to_cpu(title->wqe_counter); rq->stats->cqe_compress_blks++; } -static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq *cq, u32 cqcc) +static inline void mlx5e_read_mini_arr_slot(struct mlx5_cqwq *wq, + struct mlx5e_cq_decomp *cqd, + u32 cqcc) { - mlx5e_read_cqe_slot(cq, cqcc, cq->mini_arr); - cq->mini_arr_idx = 0; + mlx5e_read_cqe_slot(wq, cqcc, cqd->mini_arr); + cqd->mini_arr_idx = 0; } -static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n) +static inline void mlx5e_cqes_update_owner(struct mlx5_cqwq *wq, int n) { - struct mlx5_cqwq *wq = &cq->wq; - + u32 cqcc = wq->cc; u8 op_own = mlx5_cqwq_get_ctr_wrap_cnt(wq, cqcc) & 1; u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc); u32 wq_sz = mlx5_cqwq_get_size(wq); u32 ci_top = min_t(u32, wq_sz, ci + n); for (; ci < ci_top; ci++, n--) { - struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, ci); + struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); cqe->op_own = op_own; } @@ -93,7 +98,7 @@ static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n) if (unlikely(ci == wq_sz)) { op_own = !op_own; for (ci = 0; ci < n; ci++) { - struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, ci); + struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); cqe->op_own = op_own; } @@ -101,68 +106,79 @@ static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n) } static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq, - struct mlx5e_cq *cq, u32 cqcc) + struct mlx5_cqwq *wq, + u32 cqcc) { - cq->title.byte_cnt = cq->mini_arr[cq->mini_arr_idx].byte_cnt; - cq->title.check_sum = cq->mini_arr[cq->mini_arr_idx].checksum; - cq->title.op_own &= 0xf0; - cq->title.op_own |= 0x01 & (cqcc >> cq->wq.fbc.log_sz); - cq->title.wqe_counter = cpu_to_be16(cq->decmprs_wqe_counter); + struct mlx5e_cq_decomp *cqd = &rq->cqd; + struct mlx5_mini_cqe8 *mini_cqe = &cqd->mini_arr[cqd->mini_arr_idx]; + struct mlx5_cqe64 *title = &cqd->title; + + title->byte_cnt = mini_cqe->byte_cnt; + title->check_sum = mini_cqe->checksum; + title->op_own &= 0xf0; + title->op_own |= 0x01 & (cqcc >> wq->fbc.log_sz); + title->wqe_counter = cpu_to_be16(cqd->wqe_counter); if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) - cq->decmprs_wqe_counter += - mpwrq_get_cqe_consumed_strides(&cq->title); + cqd->wqe_counter += mpwrq_get_cqe_consumed_strides(title); else - cq->decmprs_wqe_counter = - mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cq->decmprs_wqe_counter + 1); + cqd->wqe_counter = + mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cqd->wqe_counter + 1); } static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq, - struct mlx5e_cq *cq, u32 cqcc) + struct mlx5_cqwq *wq, + u32 cqcc) { - mlx5e_decompress_cqe(rq, cq, cqcc); - cq->title.rss_hash_type = 0; - cq->title.rss_hash_result = 0; + struct mlx5e_cq_decomp *cqd = &rq->cqd; + + mlx5e_decompress_cqe(rq, wq, cqcc); + cqd->title.rss_hash_type = 0; + cqd->title.rss_hash_result = 0; } static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq, - struct mlx5e_cq *cq, + struct mlx5_cqwq *wq, int update_owner_only, int budget_rem) { - u32 cqcc = cq->wq.cc + update_owner_only; + struct mlx5e_cq_decomp *cqd = &rq->cqd; + u32 cqcc = wq->cc + update_owner_only; u32 cqe_count; u32 i; - cqe_count = min_t(u32, cq->decmprs_left, budget_rem); + cqe_count = min_t(u32, cqd->left, budget_rem); for (i = update_owner_only; i < cqe_count; - i++, cq->mini_arr_idx++, cqcc++) { - if (cq->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE) - mlx5e_read_mini_arr_slot(cq, cqcc); + i++, cqd->mini_arr_idx++, cqcc++) { + if (cqd->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE) + mlx5e_read_mini_arr_slot(wq, cqd, cqcc); - mlx5e_decompress_cqe_no_hash(rq, cq, cqcc); - rq->handle_rx_cqe(rq, &cq->title); + mlx5e_decompress_cqe_no_hash(rq, wq, cqcc); + rq->handle_rx_cqe(rq, &cqd->title); } - mlx5e_cqes_update_owner(cq, cq->wq.cc, cqcc - cq->wq.cc); - cq->wq.cc = cqcc; - cq->decmprs_left -= cqe_count; + mlx5e_cqes_update_owner(wq, cqcc - wq->cc); + wq->cc = cqcc; + cqd->left -= cqe_count; rq->stats->cqe_compress_pkts += cqe_count; return cqe_count; } static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq, - struct mlx5e_cq *cq, + struct mlx5_cqwq *wq, int budget_rem) { - mlx5e_read_title_slot(rq, cq, cq->wq.cc); - mlx5e_read_mini_arr_slot(cq, cq->wq.cc + 1); - mlx5e_decompress_cqe(rq, cq, cq->wq.cc); - rq->handle_rx_cqe(rq, &cq->title); - cq->mini_arr_idx++; + struct mlx5e_cq_decomp *cqd = &rq->cqd; + u32 cc = wq->cc; - return mlx5e_decompress_cqes_cont(rq, cq, 1, budget_rem) - 1; + mlx5e_read_title_slot(rq, wq, cc); + mlx5e_read_mini_arr_slot(wq, cqd, cc + 1); + mlx5e_decompress_cqe(rq, wq, cc); + rq->handle_rx_cqe(rq, &cqd->title); + cqd->mini_arr_idx++; + + return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1; } static inline bool mlx5e_page_is_reserved(struct page *page) @@ -369,7 +385,7 @@ mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb, static inline void mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb, struct mlx5e_dma_info *dma_info, - int offset_from, int offset_to, u32 headlen) + int offset_from, u32 headlen) { const void *from = page_address(dma_info->page) + offset_from; /* Aligning len to sizeof(long) optimizes memcpy performance */ @@ -377,24 +393,7 @@ mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb, dma_sync_single_for_cpu(pdev, dma_info->addr + offset_from, len, DMA_FROM_DEVICE); - skb_copy_to_linear_data_offset(skb, offset_to, from, len); -} - -static inline void -mlx5e_copy_skb_header_mpwqe(struct device *pdev, - struct sk_buff *skb, - struct mlx5e_dma_info *dma_info, - u32 offset, u32 headlen) -{ - u16 headlen_pg = min_t(u32, headlen, PAGE_SIZE - offset); - - mlx5e_copy_skb_header(pdev, skb, dma_info, offset, 0, headlen_pg); - - if (unlikely(offset + headlen > PAGE_SIZE)) { - dma_info++; - mlx5e_copy_skb_header(pdev, skb, dma_info, 0, headlen_pg, - headlen - headlen_pg); - } + skb_copy_to_linear_data(skb, from, len); } static void @@ -973,8 +972,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, } /* copy header */ - mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset, - 0, headlen); + mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset, headlen); /* skb linear part was allocated with headlen and aligned to long */ skb->tail += headlen; skb->len += headlen; @@ -1096,8 +1094,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w di++; } /* copy header */ - mlx5e_copy_skb_header_mpwqe(rq->pdev, skb, head_di, - head_offset, headlen); + mlx5e_copy_skb_header(rq->pdev, skb, head_di, head_offset, headlen); /* skb linear part was allocated with headlen and aligned to long */ skb->tail += headlen; skb->len += headlen; @@ -1203,16 +1200,17 @@ mpwrq_cqe_out: int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) { struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); + struct mlx5_cqwq *cqwq = &cq->wq; struct mlx5_cqe64 *cqe; int work_done = 0; if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) return 0; - if (cq->decmprs_left) - work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget); + if (rq->cqd.left) + work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget); - cqe = mlx5_cqwq_get_cqe(&cq->wq); + cqe = mlx5_cqwq_get_cqe(cqwq); if (!cqe) { if (unlikely(work_done)) goto out; @@ -1222,21 +1220,21 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) do { if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) { work_done += - mlx5e_decompress_cqes_start(rq, cq, + mlx5e_decompress_cqes_start(rq, cqwq, budget - work_done); continue; } - mlx5_cqwq_pop(&cq->wq); + mlx5_cqwq_pop(cqwq); rq->handle_rx_cqe(rq, cqe); - } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); + } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq))); out: if (rq->xdp_prog) mlx5e_xdp_rx_poll_complete(rq); - mlx5_cqwq_update_db_record(&cq->wq); + mlx5_cqwq_update_db_record(cqwq); /* ensure cq space is freed before enabling more cqes */ wmb(); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index d3fe48ff9da9..1a78e05cbba8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -127,9 +127,9 @@ static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) return idx; } -void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) +static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) { - struct mlx5e_sw_stats temp, *s = &temp; + struct mlx5e_sw_stats *s = &priv->stats.sw; int i; memset(s, 0, sizeof(*s)); @@ -212,8 +212,6 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->tx_cqes += sq_stats->cqes; } } - - memcpy(&priv->stats.sw, s, sizeof(*s)); } static const struct counter_desc q_stats_desc[] = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index fe91ec06e3c7..4640d4f986f8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -277,7 +277,6 @@ struct mlx5e_stats_grp { extern const struct mlx5e_stats_grp mlx5e_stats_grps[]; extern const int mlx5e_num_stats_grps; -void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv); void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv); #endif /* __MLX5_EN_STATS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index cae6c6d48984..b4967a0ff8c7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -38,7 +38,6 @@ #include #include #include -#include #include #include #include @@ -76,6 +75,7 @@ enum { MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 2), MLX5E_TC_FLOW_SLOW = BIT(MLX5E_TC_FLOW_BASE + 3), MLX5E_TC_FLOW_DUP = BIT(MLX5E_TC_FLOW_BASE + 4), + MLX5E_TC_FLOW_NOT_READY = BIT(MLX5E_TC_FLOW_BASE + 5), }; #define MLX5E_TC_MAX_SPLITS 1 @@ -117,6 +117,7 @@ struct mlx5e_tc_flow { struct list_head mod_hdr; /* flows sharing the same mod hdr ID */ struct list_head hairpin; /* flows sharing the same hairpin */ struct list_head peer; /* flows with peer flow */ + struct list_head unready; /* flows not ready to be offloaded (e.g due to missing route) */ union { struct mlx5_esw_flow_attr esw_attr[0]; struct mlx5_nic_flow_attr nic_attr[0]; @@ -128,6 +129,7 @@ struct mlx5e_tc_flow_parse_attr { struct net_device *filter_dev; struct mlx5_flow_spec spec; int num_mod_hdr_actions; + int max_mod_hdr_actions; void *mod_hdr_actions; int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS]; }; @@ -850,12 +852,12 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow, int out_index); static int mlx5e_attach_encap(struct mlx5e_priv *priv, - struct ip_tunnel_info *tun_info, - struct net_device *mirred_dev, - struct net_device **encap_dev, struct mlx5e_tc_flow *flow, + struct net_device *mirred_dev, + int out_index, struct netlink_ext_ack *extack, - int out_index); + struct net_device **encap_dev, + bool *encap_valid); static struct mlx5_flow_handle * mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw, @@ -927,21 +929,42 @@ mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw, flow->flags &= ~MLX5E_TC_FLOW_SLOW; } +static void add_unready_flow(struct mlx5e_tc_flow *flow) +{ + struct mlx5_rep_uplink_priv *uplink_priv; + struct mlx5e_rep_priv *rpriv; + struct mlx5_eswitch *esw; + + esw = flow->priv->mdev->priv.eswitch; + rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); + uplink_priv = &rpriv->uplink_priv; + + flow->flags |= MLX5E_TC_FLOW_NOT_READY; + list_add_tail(&flow->unready, &uplink_priv->unready_flows); +} + +static void remove_unready_flow(struct mlx5e_tc_flow *flow) +{ + list_del(&flow->unready); + flow->flags &= ~MLX5E_TC_FLOW_NOT_READY; +} + static int mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, - struct mlx5e_tc_flow_parse_attr *parse_attr, struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; u32 max_chain = mlx5_eswitch_get_chain_range(esw); struct mlx5_esw_flow_attr *attr = flow->esw_attr; + struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr; u16 max_prio = mlx5_eswitch_get_prio_range(esw); struct net_device *out_dev, *encap_dev = NULL; struct mlx5_fc *counter = NULL; struct mlx5e_rep_priv *rpriv; struct mlx5e_priv *out_priv; - int err = 0, encap_err = 0; + bool encap_valid = true; + int err = 0; int out_index; if (!mlx5_eswitch_prios_supported(esw) && attr->prio != 1) { @@ -967,17 +990,14 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, if (!(attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)) continue; - mirred_ifindex = attr->parse_attr->mirred_ifindex[out_index]; + mirred_ifindex = parse_attr->mirred_ifindex[out_index]; out_dev = __dev_get_by_index(dev_net(priv->netdev), mirred_ifindex); - err = mlx5e_attach_encap(priv, - &parse_attr->tun_info[out_index], - out_dev, &encap_dev, flow, - extack, out_index); - if (err && err != -EAGAIN) + err = mlx5e_attach_encap(priv, flow, out_dev, out_index, + extack, &encap_dev, &encap_valid); + if (err) goto err_attach_encap; - if (err == -EAGAIN) - encap_err = err; + out_priv = netdev_priv(encap_dev); rpriv = out_priv->ppriv; attr->dests[out_index].rep = rpriv->rep; @@ -1005,10 +1025,11 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, attr->counter = counter; } - /* we get here if (1) there's no error or when - * (2) there's an encap action and we're on -EAGAIN (no valid neigh) + /* we get here if one of the following takes place: + * (1) there's no error + * (2) there's an encap action and we don't have valid neigh */ - if (encap_err == -EAGAIN) { + if (!encap_valid) { /* continue with goto slow path rule instead */ struct mlx5_esw_flow_attr slow_attr; @@ -1048,6 +1069,12 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, struct mlx5_esw_flow_attr slow_attr; int out_index; + if (flow->flags & MLX5E_TC_FLOW_NOT_READY) { + remove_unready_flow(flow); + kvfree(attr->parse_attr); + return; + } + if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { if (flow->flags & MLX5E_TC_FLOW_SLOW) mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr); @@ -1302,101 +1329,89 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, static int parse_tunnel_attr(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, struct tc_cls_flower_offload *f, - struct net_device *filter_dev) + struct net_device *filter_dev, u8 *match_level) { struct netlink_ext_ack *extack = f->common.extack; void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers); void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); - - struct flow_dissector_key_control *enc_control = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_CONTROL, - f->key); - int err = 0; + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f); + struct flow_match_control enc_control; + int err; err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f, - headers_c, headers_v); + headers_c, headers_v, match_level); if (err) { NL_SET_ERR_MSG_MOD(extack, "failed to parse tunnel attributes"); return err; } - if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { - struct flow_dissector_key_ipv4_addrs *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, - f->key); - struct flow_dissector_key_ipv4_addrs *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, - f->mask); + flow_rule_match_enc_control(rule, &enc_control); + + if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + struct flow_match_ipv4_addrs match; + + flow_rule_match_enc_ipv4_addrs(rule, &match); MLX5_SET(fte_match_set_lyr_2_4, headers_c, src_ipv4_src_ipv6.ipv4_layout.ipv4, - ntohl(mask->src)); + ntohl(match.mask->src)); MLX5_SET(fte_match_set_lyr_2_4, headers_v, src_ipv4_src_ipv6.ipv4_layout.ipv4, - ntohl(key->src)); + ntohl(match.key->src)); MLX5_SET(fte_match_set_lyr_2_4, headers_c, dst_ipv4_dst_ipv6.ipv4_layout.ipv4, - ntohl(mask->dst)); + ntohl(match.mask->dst)); MLX5_SET(fte_match_set_lyr_2_4, headers_v, dst_ipv4_dst_ipv6.ipv4_layout.ipv4, - ntohl(key->dst)); + ntohl(match.key->dst)); MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP); - } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { - struct flow_dissector_key_ipv6_addrs *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, - f->key); - struct flow_dissector_key_ipv6_addrs *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, - f->mask); + } else if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + struct flow_match_ipv6_addrs match; + flow_rule_match_enc_ipv6_addrs(rule, &match); memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, src_ipv4_src_ipv6.ipv6_layout.ipv6), - &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); + &match.mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, src_ipv4_src_ipv6.ipv6_layout.ipv6), - &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); + &match.key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, dst_ipv4_dst_ipv6.ipv6_layout.ipv6), - &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); + &match.mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dst_ipv4_dst_ipv6.ipv6_layout.ipv6), - &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); + &match.key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6)); MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6); } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IP)) { - struct flow_dissector_key_ip *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_IP, - f->key); - struct flow_dissector_key_ip *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_IP, - f->mask); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) { + struct flow_match_ip match; - MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3); + flow_rule_match_enc_ip(rule, &match); + MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, + match.mask->tos & 0x3); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, + match.key->tos & 0x3); - MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos >> 2); + MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, + match.mask->tos >> 2); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, + match.key->tos >> 2); - MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl); + MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, + match.mask->ttl); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, + match.key->ttl); - if (mask->ttl && + if (match.mask->ttl && !MLX5_CAP_ESW_FLOWTABLE_FDB (priv->mdev, ft_field_support.outer_ipv4_ttl)) { @@ -1426,7 +1441,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, struct tc_cls_flower_offload *f, struct net_device *filter_dev, - u8 *match_level) + u8 *match_level, u8 *tunnel_match_level) { struct netlink_ext_ack *extack = f->common.extack; void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, @@ -1437,12 +1452,14 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, misc_parameters); void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f); + struct flow_dissector *dissector = rule->match.dissector; u16 addr_type = 0; u8 ip_proto = 0; *match_level = MLX5_MATCH_NONE; - if (f->dissector->used_keys & + if (dissector->used_keys & ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | BIT(FLOW_DISSECTOR_KEY_BASIC) | BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | @@ -1461,23 +1478,21 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, BIT(FLOW_DISSECTOR_KEY_ENC_IP))) { NL_SET_ERR_MSG_MOD(extack, "Unsupported key"); netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n", - f->dissector->used_keys); + dissector->used_keys); return -EOPNOTSUPP; } - if ((dissector_uses_key(f->dissector, - FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) || - dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) || - dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) && - dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { - struct flow_dissector_key_control *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_CONTROL, - f->key); - switch (key->addr_type) { + if ((flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) || + flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID) || + flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) && + flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { + struct flow_match_control match; + + flow_rule_match_enc_control(rule, &match); + switch (match.key->addr_type) { case FLOW_DISSECTOR_KEY_IPV4_ADDRS: case FLOW_DISSECTOR_KEY_IPV6_ADDRS: - if (parse_tunnel_attr(priv, spec, f, filter_dev)) + if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level)) return -EOPNOTSUPP; break; default: @@ -1493,35 +1508,27 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, inner_headers); } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { - struct flow_dissector_key_basic *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, - f->key); - struct flow_dissector_key_basic *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, - f->mask); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + + flow_rule_match_basic(rule, &match); MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype, - ntohs(mask->n_proto)); + ntohs(match.mask->n_proto)); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, - ntohs(key->n_proto)); + ntohs(match.key->n_proto)); - if (mask->n_proto) + if (match.mask->n_proto) *match_level = MLX5_MATCH_L2; } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) { - struct flow_dissector_key_vlan *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_VLAN, - f->key); - struct flow_dissector_key_vlan *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_VLAN, - f->mask); - if (mask->vlan_id || mask->vlan_priority || mask->vlan_tpid) { - if (key->vlan_tpid == htons(ETH_P_8021AD)) { + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + + flow_rule_match_vlan(rule, &match); + if (match.mask->vlan_id || + match.mask->vlan_priority || + match.mask->vlan_tpid) { + if (match.key->vlan_tpid == htons(ETH_P_8021AD)) { MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1); MLX5_SET(fte_match_set_lyr_2_4, headers_v, @@ -1533,11 +1540,15 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, cvlan_tag, 1); } - MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id); + MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, + match.mask->vlan_id); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, + match.key->vlan_id); - MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority); + MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, + match.mask->vlan_priority); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, + match.key->vlan_priority); *match_level = MLX5_MATCH_L2; } @@ -1547,17 +1558,14 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, *match_level = MLX5_MATCH_L2; } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) { - struct flow_dissector_key_vlan *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_CVLAN, - f->key); - struct flow_dissector_key_vlan *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_CVLAN, - f->mask); - if (mask->vlan_id || mask->vlan_priority || mask->vlan_tpid) { - if (key->vlan_tpid == htons(ETH_P_8021AD)) { + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) { + struct flow_match_vlan match; + + flow_rule_match_vlan(rule, &match); + if (match.mask->vlan_id || + match.mask->vlan_priority || + match.mask->vlan_tpid) { + if (match.key->vlan_tpid == htons(ETH_P_8021AD)) { MLX5_SET(fte_match_set_misc, misc_c, outer_second_svlan_tag, 1); MLX5_SET(fte_match_set_misc, misc_v, @@ -1570,69 +1578,58 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, } MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid, - mask->vlan_id); + match.mask->vlan_id); MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid, - key->vlan_id); + match.key->vlan_id); MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio, - mask->vlan_priority); + match.mask->vlan_priority); MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio, - key->vlan_priority); + match.key->vlan_priority); *match_level = MLX5_MATCH_L2; } } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { - struct flow_dissector_key_eth_addrs *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ETH_ADDRS, - f->key); - struct flow_dissector_key_eth_addrs *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ETH_ADDRS, - f->mask); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + flow_rule_match_eth_addrs(rule, &match); ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, dmac_47_16), - mask->dst); + match.mask->dst); ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16), - key->dst); + match.key->dst); ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, smac_47_16), - mask->src); + match.mask->src); ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16), - key->src); + match.key->src); - if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst)) + if (!is_zero_ether_addr(match.mask->src) || + !is_zero_ether_addr(match.mask->dst)) *match_level = MLX5_MATCH_L2; } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { - struct flow_dissector_key_control *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_CONTROL, - f->key); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_match_control match; - struct flow_dissector_key_control *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_CONTROL, - f->mask); - addr_type = key->addr_type; + flow_rule_match_control(rule, &match); + addr_type = match.key->addr_type; /* the HW doesn't support frag first/later */ - if (mask->flags & FLOW_DIS_FIRST_FRAG) + if (match.mask->flags & FLOW_DIS_FIRST_FRAG) return -EOPNOTSUPP; - if (mask->flags & FLOW_DIS_IS_FRAGMENT) { + if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) { MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1); MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, - key->flags & FLOW_DIS_IS_FRAGMENT); + match.key->flags & FLOW_DIS_IS_FRAGMENT); /* the HW doesn't need L3 inline to match on frag=no */ - if (!(key->flags & FLOW_DIS_IS_FRAGMENT)) + if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT)) *match_level = MLX5_MATCH_L2; /* *** L2 attributes parsing up to here *** */ else @@ -1640,102 +1637,85 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, } } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { - struct flow_dissector_key_basic *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, - f->key); - struct flow_dissector_key_basic *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, - f->mask); - ip_proto = key->ip_proto; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + + flow_rule_match_basic(rule, &match); + ip_proto = match.key->ip_proto; MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol, - mask->ip_proto); + match.mask->ip_proto); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, - key->ip_proto); + match.key->ip_proto); - if (mask->ip_proto) + if (match.mask->ip_proto) *match_level = MLX5_MATCH_L3; } if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { - struct flow_dissector_key_ipv4_addrs *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS, - f->key); - struct flow_dissector_key_ipv4_addrs *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS, - f->mask); + struct flow_match_ipv4_addrs match; + flow_rule_match_ipv4_addrs(rule, &match); memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, src_ipv4_src_ipv6.ipv4_layout.ipv4), - &mask->src, sizeof(mask->src)); + &match.mask->src, sizeof(match.mask->src)); memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, src_ipv4_src_ipv6.ipv4_layout.ipv4), - &key->src, sizeof(key->src)); + &match.key->src, sizeof(match.key->src)); memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, dst_ipv4_dst_ipv6.ipv4_layout.ipv4), - &mask->dst, sizeof(mask->dst)); + &match.mask->dst, sizeof(match.mask->dst)); memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dst_ipv4_dst_ipv6.ipv4_layout.ipv4), - &key->dst, sizeof(key->dst)); + &match.key->dst, sizeof(match.key->dst)); - if (mask->src || mask->dst) + if (match.mask->src || match.mask->dst) *match_level = MLX5_MATCH_L3; } if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { - struct flow_dissector_key_ipv6_addrs *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV6_ADDRS, - f->key); - struct flow_dissector_key_ipv6_addrs *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV6_ADDRS, - f->mask); + struct flow_match_ipv6_addrs match; + flow_rule_match_ipv6_addrs(rule, &match); memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, src_ipv4_src_ipv6.ipv6_layout.ipv6), - &mask->src, sizeof(mask->src)); + &match.mask->src, sizeof(match.mask->src)); memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, src_ipv4_src_ipv6.ipv6_layout.ipv6), - &key->src, sizeof(key->src)); + &match.key->src, sizeof(match.key->src)); memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, dst_ipv4_dst_ipv6.ipv6_layout.ipv6), - &mask->dst, sizeof(mask->dst)); + &match.mask->dst, sizeof(match.mask->dst)); memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dst_ipv4_dst_ipv6.ipv6_layout.ipv6), - &key->dst, sizeof(key->dst)); + &match.key->dst, sizeof(match.key->dst)); - if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY || - ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY) + if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY || + ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY) *match_level = MLX5_MATCH_L3; } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) { - struct flow_dissector_key_ip *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IP, - f->key); - struct flow_dissector_key_ip *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IP, - f->mask); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { + struct flow_match_ip match; - MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3); + flow_rule_match_ip(rule, &match); + MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, + match.mask->tos & 0x3); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, + match.key->tos & 0x3); - MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos >> 2); + MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, + match.mask->tos >> 2); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, + match.key->tos >> 2); - MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl); + MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, + match.mask->ttl); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, + match.key->ttl); - if (mask->ttl && + if (match.mask->ttl && !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, ft_field_support.outer_ipv4_ttl)) { NL_SET_ERR_MSG_MOD(extack, @@ -1743,44 +1723,39 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, return -EOPNOTSUPP; } - if (mask->tos || mask->ttl) + if (match.mask->tos || match.mask->ttl) *match_level = MLX5_MATCH_L3; } /* *** L3 attributes parsing up to here *** */ - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) { - struct flow_dissector_key_ports *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_PORTS, - f->key); - struct flow_dissector_key_ports *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_PORTS, - f->mask); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports match; + + flow_rule_match_ports(rule, &match); switch (ip_proto) { case IPPROTO_TCP: MLX5_SET(fte_match_set_lyr_2_4, headers_c, - tcp_sport, ntohs(mask->src)); + tcp_sport, ntohs(match.mask->src)); MLX5_SET(fte_match_set_lyr_2_4, headers_v, - tcp_sport, ntohs(key->src)); + tcp_sport, ntohs(match.key->src)); MLX5_SET(fte_match_set_lyr_2_4, headers_c, - tcp_dport, ntohs(mask->dst)); + tcp_dport, ntohs(match.mask->dst)); MLX5_SET(fte_match_set_lyr_2_4, headers_v, - tcp_dport, ntohs(key->dst)); + tcp_dport, ntohs(match.key->dst)); break; case IPPROTO_UDP: MLX5_SET(fte_match_set_lyr_2_4, headers_c, - udp_sport, ntohs(mask->src)); + udp_sport, ntohs(match.mask->src)); MLX5_SET(fte_match_set_lyr_2_4, headers_v, - udp_sport, ntohs(key->src)); + udp_sport, ntohs(match.key->src)); MLX5_SET(fte_match_set_lyr_2_4, headers_c, - udp_dport, ntohs(mask->dst)); + udp_dport, ntohs(match.mask->dst)); MLX5_SET(fte_match_set_lyr_2_4, headers_v, - udp_dport, ntohs(key->dst)); + udp_dport, ntohs(match.key->dst)); break; default: NL_SET_ERR_MSG_MOD(extack, @@ -1790,26 +1765,20 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, return -EINVAL; } - if (mask->src || mask->dst) + if (match.mask->src || match.mask->dst) *match_level = MLX5_MATCH_L4; } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP)) { - struct flow_dissector_key_tcp *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_TCP, - f->key); - struct flow_dissector_key_tcp *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_TCP, - f->mask); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) { + struct flow_match_tcp match; + flow_rule_match_tcp(rule, &match); MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags, - ntohs(mask->flags)); + ntohs(match.mask->flags)); MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags, - ntohs(key->flags)); + ntohs(match.key->flags)); - if (mask->flags) + if (match.mask->flags) *match_level = MLX5_MATCH_L4; } @@ -1826,15 +1795,15 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_core_dev *dev = priv->mdev; struct mlx5_eswitch *esw = dev->priv.eswitch; struct mlx5e_rep_priv *rpriv = priv->ppriv; + u8 match_level, tunnel_match_level = MLX5_MATCH_NONE; struct mlx5_eswitch_rep *rep; - u8 match_level; int err; - err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level); + err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level, &tunnel_match_level); if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) { rep = rpriv->rep; - if (rep->vport != FDB_UPLINK_VPORT && + if (rep->vport != MLX5_VPORT_UPLINK && (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE && esw->offloads.inline_mode < match_level)) { NL_SET_ERR_MSG_MOD(extack, @@ -1846,10 +1815,12 @@ static int parse_cls_flower(struct mlx5e_priv *priv, } } - if (flow->flags & MLX5E_TC_FLOW_ESWITCH) + if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { flow->esw_attr->match_level = match_level; - else + flow->esw_attr->tunnel_match_level = tunnel_match_level; + } else { flow->nic_attr->match_level = match_level; + } return err; } @@ -1862,27 +1833,29 @@ struct pedit_headers { struct udphdr udp; }; +struct pedit_headers_action { + struct pedit_headers vals; + struct pedit_headers masks; + u32 pedits; +}; + static int pedit_header_offsets[] = { - [TCA_PEDIT_KEY_EX_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth), - [TCA_PEDIT_KEY_EX_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4), - [TCA_PEDIT_KEY_EX_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6), - [TCA_PEDIT_KEY_EX_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp), - [TCA_PEDIT_KEY_EX_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp), + [FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth), + [FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4), + [FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6), + [FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp), + [FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp), }; #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype]) static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset, - struct pedit_headers *masks, - struct pedit_headers *vals) + struct pedit_headers_action *hdrs) { u32 *curr_pmask, *curr_pval; - if (hdr_type >= __PEDIT_HDR_TYPE_MAX) - goto out_err; - - curr_pmask = (u32 *)(pedit_header(masks, hdr_type) + offset); - curr_pval = (u32 *)(pedit_header(vals, hdr_type) + offset); + curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset); + curr_pval = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset); if (*curr_pmask & mask) /* disallow acting twice on the same location */ goto out_err; @@ -1934,12 +1907,11 @@ static struct mlx5_fields fields[] = { OFFLOAD(UDP_DPORT, 2, udp.dest, 0), }; -/* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at - * max from the SW pedit action. On success, it says how many HW actions were - * actually parsed. +/* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at + * max from the SW pedit action. On success, attr->num_mod_hdr_actions + * says how many HW actions were actually parsed. */ -static int offload_pedit_fields(struct pedit_headers *masks, - struct pedit_headers *vals, +static int offload_pedit_fields(struct pedit_headers_action *hdrs, struct mlx5e_tc_flow_parse_attr *parse_attr, struct netlink_ext_ack *extack) { @@ -1954,15 +1926,17 @@ static int offload_pedit_fields(struct pedit_headers *masks, __be16 mask_be16; void *action; - set_masks = &masks[TCA_PEDIT_KEY_EX_CMD_SET]; - add_masks = &masks[TCA_PEDIT_KEY_EX_CMD_ADD]; - set_vals = &vals[TCA_PEDIT_KEY_EX_CMD_SET]; - add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD]; + set_masks = &hdrs[0].masks; + add_masks = &hdrs[1].masks; + set_vals = &hdrs[0].vals; + add_vals = &hdrs[1].vals; action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto); - action = parse_attr->mod_hdr_actions; - max_actions = parse_attr->num_mod_hdr_actions; - nactions = 0; + action = parse_attr->mod_hdr_actions + + parse_attr->num_mod_hdr_actions * action_size; + + max_actions = parse_attr->max_mod_hdr_actions; + nactions = parse_attr->num_mod_hdr_actions; for (i = 0; i < ARRAY_SIZE(fields); i++) { f = &fields[i]; @@ -2053,12 +2027,14 @@ static int offload_pedit_fields(struct pedit_headers *masks, } static int alloc_mod_hdr_actions(struct mlx5e_priv *priv, - const struct tc_action *a, int namespace, + struct pedit_headers_action *hdrs, + int namespace, struct mlx5e_tc_flow_parse_attr *parse_attr) { int nkeys, action_size, max_actions; - nkeys = tcf_pedit_nkeys(a); + nkeys = hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits + + hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits; action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto); if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */ @@ -2073,62 +2049,67 @@ static int alloc_mod_hdr_actions(struct mlx5e_priv *priv, if (!parse_attr->mod_hdr_actions) return -ENOMEM; - parse_attr->num_mod_hdr_actions = max_actions; + parse_attr->max_mod_hdr_actions = max_actions; return 0; } static const struct pedit_headers zero_masks = {}; static int parse_tc_pedit_action(struct mlx5e_priv *priv, - const struct tc_action *a, int namespace, + const struct flow_action_entry *act, int namespace, struct mlx5e_tc_flow_parse_attr *parse_attr, + struct pedit_headers_action *hdrs, struct netlink_ext_ack *extack) { - struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks; - int nkeys, i, err = -EOPNOTSUPP; + u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1; + int err = -EOPNOTSUPP; u32 mask, val, offset; - u8 cmd, htype; + u8 htype; - nkeys = tcf_pedit_nkeys(a); + htype = act->mangle.htype; + err = -EOPNOTSUPP; /* can't be all optimistic */ - memset(masks, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX); - memset(vals, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX); + if (htype == FLOW_ACT_MANGLE_UNSPEC) { + NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded"); + goto out_err; + } - for (i = 0; i < nkeys; i++) { - htype = tcf_pedit_htype(a, i); - cmd = tcf_pedit_cmd(a, i); - err = -EOPNOTSUPP; /* can't be all optimistic */ + mask = act->mangle.mask; + val = act->mangle.val; + offset = act->mangle.offset; - if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) { - NL_SET_ERR_MSG_MOD(extack, - "legacy pedit isn't offloaded"); - goto out_err; - } + err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd]); + if (err) + goto out_err; - if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) { - NL_SET_ERR_MSG_MOD(extack, "pedit cmd isn't offloaded"); - goto out_err; - } + hdrs[cmd].pedits++; + + return 0; +out_err: + return err; +} - mask = tcf_pedit_mask(a, i); - val = tcf_pedit_val(a, i); - offset = tcf_pedit_offset(a, i); +static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace, + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct pedit_headers_action *hdrs, + struct netlink_ext_ack *extack) +{ + struct pedit_headers *cmd_masks; + int err; + u8 cmd; - err = set_pedit_val(htype, ~mask, val, offset, &masks[cmd], &vals[cmd]); + if (!parse_attr->mod_hdr_actions) { + err = alloc_mod_hdr_actions(priv, hdrs, namespace, parse_attr); if (err) goto out_err; } - err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr); - if (err) - goto out_err; - - err = offload_pedit_fields(masks, vals, parse_attr, extack); + err = offload_pedit_fields(hdrs, parse_attr, extack); if (err < 0) goto out_dealloc_parsed_actions; for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) { - cmd_masks = &masks[cmd]; + cmd_masks = &hdrs[cmd].masks; if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) { NL_SET_ERR_MSG_MOD(extack, "attempt to offload an unsupported field"); @@ -2178,17 +2159,22 @@ static bool csum_offload_supported(struct mlx5e_priv *priv, } static bool modify_header_match_supported(struct mlx5_flow_spec *spec, - struct tcf_exts *exts, + struct flow_action *flow_action, + u32 actions, struct netlink_ext_ack *extack) { - const struct tc_action *a; + const struct flow_action_entry *act; bool modify_ip_header; u8 htype, ip_proto; void *headers_v; u16 ethertype; - int nkeys, i; + int i; + + if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) + headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers); + else + headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); - headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype); /* for non-IP we only re-write MACs, so we're okay */ @@ -2196,20 +2182,16 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec, goto out_ok; modify_ip_header = false; - tcf_exts_for_each_action(i, a, exts) { - int k; - - if (!is_tcf_pedit(a)) + flow_action_for_each(i, act, flow_action) { + if (act->id != FLOW_ACTION_MANGLE && + act->id != FLOW_ACTION_ADD) continue; - nkeys = tcf_pedit_nkeys(a); - for (k = 0; k < nkeys; k++) { - htype = tcf_pedit_htype(a, k); - if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 || - htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) { - modify_ip_header = true; - break; - } + htype = act->mangle.htype; + if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4 || + htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) { + modify_ip_header = true; + break; } } @@ -2227,7 +2209,7 @@ out_ok: } static bool actions_match_supported(struct mlx5e_priv *priv, - struct tcf_exts *exts, + struct flow_action *flow_action, struct mlx5e_tc_flow_parse_attr *parse_attr, struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack) @@ -2244,7 +2226,8 @@ static bool actions_match_supported(struct mlx5e_priv *priv, return false; if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) - return modify_header_match_supported(&parse_attr->spec, exts, + return modify_header_match_supported(&parse_attr->spec, + flow_action, actions, extack); return true; @@ -2264,52 +2247,50 @@ static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) return (fsystem_guid == psystem_guid); } -static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, +static int parse_tc_nic_actions(struct mlx5e_priv *priv, + struct flow_action *flow_action, struct mlx5e_tc_flow_parse_attr *parse_attr, struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack) { struct mlx5_nic_flow_attr *attr = flow->nic_attr; - const struct tc_action *a; + struct pedit_headers_action hdrs[2] = {}; + const struct flow_action_entry *act; u32 action = 0; int err, i; - if (!tcf_exts_has_actions(exts)) + if (!flow_action_has_entries(flow_action)) return -EINVAL; attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; - tcf_exts_for_each_action(i, a, exts) { - if (is_tcf_gact_shot(a)) { + flow_action_for_each(i, act, flow_action) { + switch (act->id) { + case FLOW_ACTION_DROP: action |= MLX5_FLOW_CONTEXT_ACTION_DROP; if (MLX5_CAP_FLOWTABLE(priv->mdev, flow_table_properties_nic_receive.flow_counter)) action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; - continue; - } - - if (is_tcf_pedit(a)) { - err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL, - parse_attr, extack); + break; + case FLOW_ACTION_MANGLE: + case FLOW_ACTION_ADD: + err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL, + parse_attr, hdrs, extack); if (err) return err; action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - continue; - } - - if (is_tcf_csum(a)) { + break; + case FLOW_ACTION_CSUM: if (csum_offload_supported(priv, action, - tcf_csum_update_flags(a), + act->csum_flags, extack)) - continue; + break; return -EOPNOTSUPP; - } - - if (is_tcf_mirred_egress_redirect(a)) { - struct net_device *peer_dev = tcf_mirred_dev(a); + case FLOW_ACTION_REDIRECT: { + struct net_device *peer_dev = act->dev; if (priv->netdev->netdev_ops == peer_dev->netdev_ops && same_hw_devs(priv, netdev_priv(peer_dev))) { @@ -2324,11 +2305,10 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, peer_dev->name); return -EINVAL; } - continue; - } - - if (is_tcf_skbedit_mark(a)) { - u32 mark = tcf_skbedit_mark(a); + } + break; + case FLOW_ACTION_MARK: { + u32 mark = act->mark; if (mark & ~MLX5E_TC_FLOW_ID_MASK) { NL_SET_ERR_MSG_MOD(extack, @@ -2338,14 +2318,23 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, attr->flow_tag = mark; action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - continue; + } + break; + default: + return -EINVAL; } + } - return -EINVAL; + if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits || + hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) { + err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL, + parse_attr, hdrs, extack); + if (err) + return err; } attr->action = action; - if (!actions_match_supported(priv, exts, parse_attr, flow, extack)) + if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack)) return -EOPNOTSUPP; return 0; @@ -2371,31 +2360,37 @@ static bool is_merged_eswitch_dev(struct mlx5e_priv *priv, peer_priv = netdev_priv(peer_netdev); return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) && - (priv->netdev->netdev_ops == peer_netdev->netdev_ops) && - same_hw_devs(priv, peer_priv) && - MLX5_VPORT_MANAGER(peer_priv->mdev) && - (peer_priv->mdev->priv.eswitch->mode == SRIOV_OFFLOADS)); + mlx5e_eswitch_rep(priv->netdev) && + mlx5e_eswitch_rep(peer_netdev) && + same_hw_devs(priv, peer_priv)); } static int mlx5e_attach_encap(struct mlx5e_priv *priv, - struct ip_tunnel_info *tun_info, - struct net_device *mirred_dev, - struct net_device **encap_dev, struct mlx5e_tc_flow *flow, + struct net_device *mirred_dev, + int out_index, struct netlink_ext_ack *extack, - int out_index) + struct net_device **encap_dev, + bool *encap_valid) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - unsigned short family = ip_tunnel_info_af(tun_info); struct mlx5_esw_flow_attr *attr = flow->esw_attr; - struct ip_tunnel_key *key = &tun_info->key; + struct mlx5e_tc_flow_parse_attr *parse_attr; + struct ip_tunnel_info *tun_info; + struct ip_tunnel_key *key; struct mlx5e_encap_entry *e; + unsigned short family; uintptr_t hash_key; bool found = false; int err = 0; + parse_attr = attr->parse_attr; + tun_info = &parse_attr->tun_info[out_index]; + family = ip_tunnel_info_af(tun_info); + key = &tun_info->key; + hash_key = hash_encap_info(key); hash_for_each_possible_rcu(esw->offloads.encap_tbl, e, @@ -2426,7 +2421,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, else if (family == AF_INET6) err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e); - if (err && err != -EAGAIN) + if (err) goto out_err; hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key); @@ -2438,8 +2433,9 @@ attach_flow: if (e->flags & MLX5_ENCAP_ENTRY_VALID) { attr->dests[out_index].encap_id = e->encap_id; attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID; + *encap_valid = true; } else { - err = -EAGAIN; + *encap_valid = false; } return err; @@ -2450,7 +2446,7 @@ out_err: } static int parse_tc_vlan_action(struct mlx5e_priv *priv, - const struct tc_action *a, + const struct flow_action_entry *act, struct mlx5_esw_flow_attr *attr, u32 *action) { @@ -2459,7 +2455,8 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv, if (vlan_idx >= MLX5_FS_VLAN_DEPTH) return -EOPNOTSUPP; - if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) { + switch (act->id) { + case FLOW_ACTION_VLAN_POP: if (vlan_idx) { if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, MLX5_FS_VLAN_DEPTH)) @@ -2469,10 +2466,11 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv, } else { *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; } - } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) { - attr->vlan_vid[vlan_idx] = tcf_vlan_push_vid(a); - attr->vlan_prio[vlan_idx] = tcf_vlan_push_prio(a); - attr->vlan_proto[vlan_idx] = tcf_vlan_push_proto(a); + break; + case FLOW_ACTION_VLAN_PUSH: + attr->vlan_vid[vlan_idx] = act->vlan.vid; + attr->vlan_prio[vlan_idx] = act->vlan.prio; + attr->vlan_proto[vlan_idx] = act->vlan.proto; if (!attr->vlan_proto[vlan_idx]) attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q); @@ -2484,13 +2482,15 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv, *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2; } else { if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) && - (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) || - tcf_vlan_push_prio(a))) + (act->vlan.proto != htons(ETH_P_8021Q) || + act->vlan.prio)) return -EOPNOTSUPP; *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; } - } else { /* action is TCA_VLAN_ACT_MODIFY */ + break; + default: + /* action is FLOW_ACT_VLAN_MANGLE */ return -EOPNOTSUPP; } @@ -2499,58 +2499,56 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv, return 0; } -static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, +static int parse_tc_fdb_actions(struct mlx5e_priv *priv, + struct flow_action *flow_action, struct mlx5e_tc_flow_parse_attr *parse_attr, struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack) { + struct pedit_headers_action hdrs[2] = {}; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_esw_flow_attr *attr = flow->esw_attr; struct mlx5e_rep_priv *rpriv = priv->ppriv; - struct ip_tunnel_info *info = NULL; - const struct tc_action *a; + const struct ip_tunnel_info *info = NULL; + const struct flow_action_entry *act; bool encap = false; u32 action = 0; int err, i; - if (!tcf_exts_has_actions(exts)) + if (!flow_action_has_entries(flow_action)) return -EINVAL; attr->in_rep = rpriv->rep; attr->in_mdev = priv->mdev; - tcf_exts_for_each_action(i, a, exts) { - if (is_tcf_gact_shot(a)) { + flow_action_for_each(i, act, flow_action) { + switch (act->id) { + case FLOW_ACTION_DROP: action |= MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT; - continue; - } - - if (is_tcf_pedit(a)) { - err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB, - parse_attr, extack); + break; + case FLOW_ACTION_MANGLE: + case FLOW_ACTION_ADD: + err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB, + parse_attr, hdrs, extack); if (err) return err; action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; attr->split_count = attr->out_count; - continue; - } - - if (is_tcf_csum(a)) { + break; + case FLOW_ACTION_CSUM: if (csum_offload_supported(priv, action, - tcf_csum_update_flags(a), - extack)) - continue; + act->csum_flags, extack)) + break; return -EOPNOTSUPP; - } - - if (is_tcf_mirred_egress_redirect(a) || is_tcf_mirred_egress_mirror(a)) { + case FLOW_ACTION_REDIRECT: + case FLOW_ACTION_MIRRED: { struct mlx5e_priv *out_priv; struct net_device *out_dev; - out_dev = tcf_mirred_dev(a); + out_dev = act->dev; if (!out_dev) { /* out_dev is NULL when filters with * non-existing mirred device are replayed to @@ -2569,8 +2567,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT; - if (switchdev_port_same_parent_id(priv->netdev, - out_dev) || + if (netdev_port_same_parent_id(priv->netdev, + out_dev) || is_merged_eswitch_dev(priv, out_dev)) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); @@ -2615,35 +2613,29 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, priv->netdev->name, out_dev->name); return -EINVAL; } - continue; - } - - if (is_tcf_tunnel_set(a)) { - info = tcf_tunnel_info(a); + } + break; + case FLOW_ACTION_TUNNEL_ENCAP: + info = act->tunnel; if (info) encap = true; else return -EOPNOTSUPP; - continue; - } - - if (is_tcf_vlan(a)) { - err = parse_tc_vlan_action(priv, a, attr, &action); + break; + case FLOW_ACTION_VLAN_PUSH: + case FLOW_ACTION_VLAN_POP: + err = parse_tc_vlan_action(priv, act, attr, &action); if (err) return err; attr->split_count = attr->out_count; - continue; - } - - if (is_tcf_tunnel_release(a)) { + break; + case FLOW_ACTION_TUNNEL_DECAP: action |= MLX5_FLOW_CONTEXT_ACTION_DECAP; - continue; - } - - if (is_tcf_gact_goto_chain(a)) { - u32 dest_chain = tcf_gact_goto_chain_index(a); + break; + case FLOW_ACTION_GOTO: { + u32 dest_chain = act->chain_index; u32 max_chain = mlx5_eswitch_get_chain_range(esw); if (dest_chain <= attr->chain) { @@ -2656,15 +2648,23 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, } action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; attr->dest_chain = dest_chain; - - continue; + break; + } + default: + return -EINVAL; } + } - return -EINVAL; + if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits || + hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) { + err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL, + parse_attr, hdrs, extack); + if (err) + return err; } attr->action = action; - if (!actions_match_supported(priv, exts, parse_attr, flow, extack)) + if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack)) return -EOPNOTSUPP; if (attr->dest_chain) { @@ -2724,15 +2724,22 @@ static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv, int flags) static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow) { struct mlx5_esw_flow_attr *attr = flow->esw_attr; - bool is_rep_ingress = attr->in_rep->vport != FDB_UPLINK_VPORT && + bool is_rep_ingress = attr->in_rep->vport != MLX5_VPORT_UPLINK && flow->flags & MLX5E_TC_FLOW_INGRESS; bool act_is_encap = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT); bool esw_paired = mlx5_devcom_is_paired(attr->in_mdev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS); - return esw_paired && mlx5_lag_is_sriov(attr->in_mdev) && - (is_rep_ingress || act_is_encap); + if (!esw_paired) + return false; + + if ((mlx5_lag_is_sriov(attr->in_mdev) || + mlx5_lag_is_multipath(attr->in_mdev)) && + (is_rep_ingress || act_is_encap)) + return true; + + return false; } static int @@ -2767,17 +2774,40 @@ err_free: return err; } -static int +static void +mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr, + struct mlx5e_priv *priv, + struct mlx5e_tc_flow_parse_attr *parse_attr, + struct tc_cls_flower_offload *f, + struct mlx5_eswitch_rep *in_rep, + struct mlx5_core_dev *in_mdev) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + + esw_attr->parse_attr = parse_attr; + esw_attr->chain = f->common.chain_index; + esw_attr->prio = TC_H_MAJ(f->common.prio) >> 16; + + esw_attr->in_rep = in_rep; + esw_attr->in_mdev = in_mdev; + + if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) == + MLX5_COUNTER_SOURCE_ESWITCH) + esw_attr->counter_dev = in_mdev; + else + esw_attr->counter_dev = priv->mdev; +} + +static struct mlx5e_tc_flow * __mlx5e_add_fdb_flow(struct mlx5e_priv *priv, struct tc_cls_flower_offload *f, u16 flow_flags, struct net_device *filter_dev, struct mlx5_eswitch_rep *in_rep, - struct mlx5_core_dev *in_mdev, - struct mlx5e_tc_flow **__flow) + struct mlx5_core_dev *in_mdev) { + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f); struct netlink_ext_ack *extack = f->common.extack; - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5e_tc_flow_parse_attr *parse_attr; struct mlx5e_tc_flow *flow; int attr_size, err; @@ -2788,45 +2818,41 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv, &parse_attr, &flow); if (err) goto out; + parse_attr->filter_dev = filter_dev; - flow->esw_attr->parse_attr = parse_attr; + mlx5e_flow_esw_attr_init(flow->esw_attr, + priv, parse_attr, + f, in_rep, in_mdev); + err = parse_cls_flower(flow->priv, flow, &parse_attr->spec, f, filter_dev); if (err) goto err_free; - flow->esw_attr->chain = f->common.chain_index; - flow->esw_attr->prio = TC_H_MAJ(f->common.prio) >> 16; - err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow, extack); + err = parse_tc_fdb_actions(priv, &rule->action, parse_attr, flow, extack); if (err) goto err_free; - flow->esw_attr->in_rep = in_rep; - flow->esw_attr->in_mdev = in_mdev; - - if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) == - MLX5_COUNTER_SOURCE_ESWITCH) - flow->esw_attr->counter_dev = in_mdev; - else - flow->esw_attr->counter_dev = priv->mdev; - - err = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow, extack); - if (err) - goto err_free; + err = mlx5e_tc_add_fdb_flow(priv, flow, extack); + if (err) { + if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev))) + goto err_free; - *__flow = flow; + add_unready_flow(flow); + } - return 0; + return flow; err_free: kfree(flow); kvfree(parse_attr); out: - return err; + return ERR_PTR(err); } static int mlx5e_tc_add_fdb_peer_flow(struct tc_cls_flower_offload *f, - struct mlx5e_tc_flow *flow) + struct mlx5e_tc_flow *flow, + u16 flow_flags) { struct mlx5e_priv *priv = flow->priv, *peer_priv; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw; @@ -2849,17 +2875,19 @@ static int mlx5e_tc_add_fdb_peer_flow(struct tc_cls_flower_offload *f, * original flow and packets redirected from uplink use the * peer mdev. */ - if (flow->esw_attr->in_rep->vport == FDB_UPLINK_VPORT) + if (flow->esw_attr->in_rep->vport == MLX5_VPORT_UPLINK) in_mdev = peer_priv->mdev; else in_mdev = priv->mdev; parse_attr = flow->esw_attr->parse_attr; - err = __mlx5e_add_fdb_flow(peer_priv, f, flow->flags, - parse_attr->filter_dev, - flow->esw_attr->in_rep, in_mdev, &peer_flow); - if (err) + peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow_flags, + parse_attr->filter_dev, + flow->esw_attr->in_rep, in_mdev); + if (IS_ERR(peer_flow)) { + err = PTR_ERR(peer_flow); goto out; + } flow->peer_flow = peer_flow; flow->flags |= MLX5E_TC_FLOW_DUP; @@ -2885,13 +2913,13 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow; int err; - err = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep, - in_mdev, &flow); - if (err) - goto out; + flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep, + in_mdev); + if (IS_ERR(flow)) + return PTR_ERR(flow); if (is_peer_flow_needed(flow)) { - err = mlx5e_tc_add_fdb_peer_flow(f, flow); + err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags); if (err) { mlx5e_tc_del_fdb_flow(priv, flow); goto out; @@ -2913,6 +2941,7 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv, struct net_device *filter_dev, struct mlx5e_tc_flow **__flow) { + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f); struct netlink_ext_ack *extack = f->common.extack; struct mlx5e_tc_flow_parse_attr *parse_attr; struct mlx5e_tc_flow *flow; @@ -2935,7 +2964,7 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv, if (err) goto err_free; - err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow, extack); + err = parse_tc_nic_actions(priv, &rule->action, parse_attr, flow, extack); if (err) goto err_free; @@ -3055,23 +3084,25 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, struct mlx5_eswitch *peer_esw; struct mlx5e_tc_flow *flow; struct mlx5_fc *counter; - u64 bytes; - u64 packets; - u64 lastuse; + u64 lastuse = 0; + u64 packets = 0; + u64 bytes = 0; flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params); if (!flow || !same_flow_direction(flow, flags)) return -EINVAL; - if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED)) - return 0; - - counter = mlx5e_tc_get_counter(flow); - if (!counter) - return 0; + if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { + counter = mlx5e_tc_get_counter(flow); + if (!counter) + return 0; - mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); + mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); + } + /* Under multipath it's possible for one rule to be currently + * un-offloaded while the other rule is offloaded. + */ peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); if (!peer_esw) goto out; @@ -3083,6 +3114,8 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, u64 lastuse2; counter = mlx5e_tc_get_counter(flow->peer_flow); + if (!counter) + goto no_peer_counter; mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2); bytes += bytes2; @@ -3090,10 +3123,10 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, lastuse = max_t(u64, lastuse, lastuse2); } +no_peer_counter: mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); - out: - tcf_exts_stats_update(f->exts, bytes, packets, lastuse); + flow_stats_update(&f->stats, bytes, packets, lastuse); return 0; } @@ -3213,3 +3246,18 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw) list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer) __mlx5e_tc_del_fdb_peer_flow(flow); } + +void mlx5e_tc_reoffload_flows_work(struct work_struct *work) +{ + struct mlx5_rep_uplink_priv *rpriv = + container_of(work, struct mlx5_rep_uplink_priv, + reoffload_flows_work); + struct mlx5e_tc_flow *flow, *tmp; + + rtnl_lock(); + list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) { + if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL)) + remove_unready_flow(flow); + } + rtnl_unlock(); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index d2d87f978c06..f62e81902d27 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -72,6 +72,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe); int mlx5e_tc_num_filters(struct mlx5e_priv *priv, int flags); +void mlx5e_tc_reoffload_flows_work(struct work_struct *work); #else /* CONFIG_MLX5_ESWITCH */ static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 598ad7e4d5c9..25a8f8260c14 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -148,12 +148,8 @@ static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb) static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb) { - struct flow_keys keys; - if (skb_transport_header_was_set(skb)) return skb_transport_offset(skb); - else if (skb_flow_dissect_flow_keys(skb, &keys, 0)) - return keys.control.thoff; else return mlx5e_skb_l2_header_offset(skb); } @@ -172,15 +168,8 @@ static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode, hlen += VLAN_HLEN; break; case MLX5_INLINE_MODE_IP: - /* When transport header is set to zero, it means no transport - * header. When transport header is set to 0xff's, it means - * transport header wasn't set. - */ - if (skb_transport_offset(skb)) { - hlen = mlx5e_skb_l3_header_offset(skb); - break; - } - /* fall through */ + hlen = mlx5e_skb_l3_header_offset(skb); + break; case MLX5_INLINE_MODE_L2: default: hlen = mlx5e_skb_l2_header_offset(skb); @@ -387,8 +376,14 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); if (unlikely(contig_wqebbs_room < num_wqebbs)) { +#ifdef CONFIG_MLX5_EN_IPSEC + struct mlx5_wqe_eth_seg cur_eth = wqe->eth; +#endif mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room); mlx5e_sq_fetch_wqe(sq, &wqe, &pi); +#ifdef CONFIG_MLX5_EN_IPSEC + wqe->eth = cur_eth; +#endif } /* fill wqe */ @@ -514,7 +509,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) mlx5e_dump_error_cqe(sq, (struct mlx5_err_cqe *)cqe); queue_work(cq->channel->priv->wq, - &sq->recover.recover_work); + &sq->recover_work); } stats->cqe_err++; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index ee04aab65a9f..bb6e5b5d9681 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #ifdef CONFIG_RFS_ACCEL @@ -114,11 +115,11 @@ static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn) struct mlx5_cq_table *table = &eq->cq_table; struct mlx5_core_cq *cq = NULL; - spin_lock(&table->lock); + rcu_read_lock(); cq = radix_tree_lookup(&table->tree, cqn); if (likely(cq)) mlx5_cq_hold(cq); - spin_unlock(&table->lock); + rcu_read_unlock(); return cq; } @@ -371,9 +372,9 @@ int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq) struct mlx5_cq_table *table = &eq->cq_table; int err; - spin_lock_irq(&table->lock); + spin_lock(&table->lock); err = radix_tree_insert(&table->tree, cq->cqn, cq); - spin_unlock_irq(&table->lock); + spin_unlock(&table->lock); return err; } @@ -383,9 +384,9 @@ int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq) struct mlx5_cq_table *table = &eq->cq_table; struct mlx5_core_cq *tmp; - spin_lock_irq(&table->lock); + spin_lock(&table->lock); tmp = radix_tree_delete(&table->tree, cq->cqn); - spin_unlock_irq(&table->lock); + spin_unlock(&table->lock); if (!tmp) { mlx5_core_warn(eq->dev, "cq 0x%x not found in eq 0x%x tree\n", eq->eqn, cq->cqn); @@ -530,6 +531,9 @@ static u64 gather_async_events_mask(struct mlx5_core_dev *dev) if (MLX5_CAP_GEN(dev, max_num_of_monitor_counters)) async_event_mask |= (1ull << MLX5_EVENT_TYPE_MONITOR_COUNTER); + if (mlx5_core_is_ecpf_esw_manager(dev)) + async_event_mask |= (1ull << MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE); + return async_event_mask; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index a44ea7b85614..d0b28251abf2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -39,8 +39,7 @@ #include "lib/eq.h" #include "eswitch.h" #include "fs_core.h" - -#define UPLINK_VPORT 0xFFFF +#include "ecpf.h" enum { MLX5_ACTION_NONE = 0, @@ -52,7 +51,7 @@ enum { struct vport_addr { struct l2addr_node node; u8 action; - u32 vport; + u16 vport; struct mlx5_flow_handle *flow_rule; bool mpfs; /* UC MAC was added to MPFs */ /* A flag indicating that mac was added due to mc promiscuous vport */ @@ -65,11 +64,36 @@ enum { PROMISC_CHANGE = BIT(3), }; +static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw); +static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw); + /* Vport context events */ #define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \ MC_ADDR_CHANGE | \ PROMISC_CHANGE) +/* The vport getter/iterator are only valid after esw->total_vports + * and vport->vport are initialized in mlx5_eswitch_init. + */ +#define mlx5_esw_for_all_vports(esw, i, vport) \ + for ((i) = MLX5_VPORT_PF; \ + (vport) = &(esw)->vports[i], \ + (i) < (esw)->total_vports; (i)++) + +#define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \ + for ((i) = MLX5_VPORT_FIRST_VF; \ + (vport) = &(esw)->vports[i], \ + (i) <= (nvfs); (i)++) + +static struct mlx5_vport *mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, + u16 vport_num) +{ + u16 idx = mlx5_eswitch_vport_num_to_index(esw, vport_num); + + WARN_ON(idx > esw->total_vports - 1); + return &esw->vports[idx]; +} + static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, u32 events_mask) { @@ -115,7 +139,7 @@ static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport, return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); } -static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport, +static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport, u16 vlan, u8 qos, u8 set_flags) { u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0}; @@ -152,7 +176,7 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport, /* E-Switch FDB */ static struct mlx5_flow_handle * -__esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, +__esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u16 vport, bool rx_rule, u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN]) { int match_header = (is_zero_ether_addr(mac_c) ? 0 : @@ -188,7 +212,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, misc_parameters); mc_misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); - MLX5_SET(fte_match_set_misc, mv_misc, source_port, UPLINK_VPORT); + MLX5_SET(fte_match_set_misc, mv_misc, source_port, MLX5_VPORT_UPLINK); MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port); } @@ -215,7 +239,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, } static struct mlx5_flow_handle * -esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport) +esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u16 vport) { u8 mac_c[ETH_ALEN]; @@ -224,7 +248,7 @@ esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport) } static struct mlx5_flow_handle * -esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport) +esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u16 vport) { u8 mac_c[ETH_ALEN]; u8 mac_v[ETH_ALEN]; @@ -237,7 +261,7 @@ esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport) } static struct mlx5_flow_handle * -esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport) +esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u16 vport) { u8 mac_c[ETH_ALEN]; u8 mac_v[ETH_ALEN]; @@ -247,6 +271,37 @@ esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport) return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v); } +enum { + LEGACY_VEPA_PRIO = 0, + LEGACY_FDB_PRIO, +}; + +static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw) +{ + struct mlx5_core_dev *dev = esw->dev; + struct mlx5_flow_namespace *root_ns; + struct mlx5_flow_table *fdb; + int err; + + root_ns = mlx5_get_fdb_sub_ns(dev, 0); + if (!root_ns) { + esw_warn(dev, "Failed to get FDB flow namespace\n"); + return -EOPNOTSUPP; + } + + /* num FTE 2, num FG 2 */ + fdb = mlx5_create_auto_grouped_flow_table(root_ns, LEGACY_VEPA_PRIO, + 2, 2, 0, 0); + if (IS_ERR(fdb)) { + err = PTR_ERR(fdb); + esw_warn(dev, "Failed to create VEPA FDB err %d\n", err); + return err; + } + esw->fdb_table.legacy.vepa_fdb = fdb; + + return 0; +} + static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); @@ -275,8 +330,8 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw) return -ENOMEM; table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); - ft_attr.max_fte = table_size; + ft_attr.prio = LEGACY_FDB_PRIO; fdb = mlx5_create_flow_table(root_ns, &ft_attr); if (IS_ERR(fdb)) { err = PTR_ERR(fdb); @@ -335,41 +390,65 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw) esw->fdb_table.legacy.promisc_grp = g; out: - if (err) { - if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.allmulti_grp)) { - mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp); - esw->fdb_table.legacy.allmulti_grp = NULL; - } - if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.addr_grp)) { - mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp); - esw->fdb_table.legacy.addr_grp = NULL; - } - if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.fdb)) { - mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb); - esw->fdb_table.legacy.fdb = NULL; - } - } + if (err) + esw_destroy_legacy_fdb_table(esw); kvfree(flow_group_in); return err; } +static void esw_destroy_legacy_vepa_table(struct mlx5_eswitch *esw) +{ + esw_debug(esw->dev, "Destroy VEPA Table\n"); + if (!esw->fdb_table.legacy.vepa_fdb) + return; + + mlx5_destroy_flow_table(esw->fdb_table.legacy.vepa_fdb); + esw->fdb_table.legacy.vepa_fdb = NULL; +} + static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw) { + esw_debug(esw->dev, "Destroy FDB Table\n"); if (!esw->fdb_table.legacy.fdb) return; - esw_debug(esw->dev, "Destroy FDB Table\n"); - mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp); - mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp); - mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp); + if (esw->fdb_table.legacy.promisc_grp) + mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp); + if (esw->fdb_table.legacy.allmulti_grp) + mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp); + if (esw->fdb_table.legacy.addr_grp) + mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp); mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb); + esw->fdb_table.legacy.fdb = NULL; esw->fdb_table.legacy.addr_grp = NULL; esw->fdb_table.legacy.allmulti_grp = NULL; esw->fdb_table.legacy.promisc_grp = NULL; } +static int esw_create_legacy_table(struct mlx5_eswitch *esw) +{ + int err; + + err = esw_create_legacy_vepa_table(esw); + if (err) + return err; + + err = esw_create_legacy_fdb_table(esw); + if (err) + esw_destroy_legacy_vepa_table(esw); + + return err; +} + +static void esw_destroy_legacy_table(struct mlx5_eswitch *esw) +{ + esw_cleanup_vepa_rules(esw); + esw_destroy_legacy_fdb_table(esw); + esw_destroy_legacy_vepa_table(esw); +} + /* E-Switch vport UC/MC lists management */ typedef int (*vport_addr_action)(struct mlx5_eswitch *esw, struct vport_addr *vaddr); @@ -377,19 +456,19 @@ typedef int (*vport_addr_action)(struct mlx5_eswitch *esw, static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) { u8 *mac = vaddr->node.addr; - u32 vport = vaddr->vport; + u16 vport = vaddr->vport; int err; - /* Skip mlx5_mpfs_add_mac for PFs, - * it is already done by the PF netdev in mlx5e_execute_l2_action + /* Skip mlx5_mpfs_add_mac for eswitch_managers, + * it is already done by its netdev in mlx5e_execute_l2_action */ - if (!vport) + if (esw->manager_vport == vport) goto fdb_add; err = mlx5_mpfs_add_mac(esw->dev, mac); if (err) { esw_warn(esw->dev, - "Failed to add L2 table mac(%pM) for vport(%d), err(%d)\n", + "Failed to add L2 table mac(%pM) for vport(0x%x), err(%d)\n", mac, vport, err); return err; } @@ -409,13 +488,13 @@ fdb_add: static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) { u8 *mac = vaddr->node.addr; - u32 vport = vaddr->vport; + u16 vport = vaddr->vport; int err = 0; - /* Skip mlx5_mpfs_del_mac for PFs, - * it is already done by the PF netdev in mlx5e_execute_l2_action + /* Skip mlx5_mpfs_del_mac for eswitch managerss, + * it is already done by its netdev in mlx5e_execute_l2_action */ - if (!vport || !vaddr->mpfs) + if (!vaddr->mpfs || esw->manager_vport == vport) goto fdb_del; err = mlx5_mpfs_del_mac(esw->dev, mac); @@ -438,17 +517,18 @@ static void update_allmulti_vports(struct mlx5_eswitch *esw, struct esw_mc_addr *esw_mc) { u8 *mac = vaddr->node.addr; - u32 vport_idx = 0; + struct mlx5_vport *vport; + u16 i, vport_num; - for (vport_idx = 0; vport_idx < esw->total_vports; vport_idx++) { - struct mlx5_vport *vport = &esw->vports[vport_idx]; + mlx5_esw_for_all_vports(esw, i, vport) { struct hlist_head *vport_hash = vport->mc_list; struct vport_addr *iter_vaddr = l2addr_hash_find(vport_hash, mac, struct vport_addr); + vport_num = vport->vport; if (IS_ERR_OR_NULL(vport->allmulti_rule) || - vaddr->vport == vport_idx) + vaddr->vport == vport_num) continue; switch (vaddr->action) { case MLX5_ACTION_ADD: @@ -460,14 +540,14 @@ static void update_allmulti_vports(struct mlx5_eswitch *esw, if (!iter_vaddr) { esw_warn(esw->dev, "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n", - mac, vport_idx); + mac, vport_num); continue; } - iter_vaddr->vport = vport_idx; + iter_vaddr->vport = vport_num; iter_vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, - vport_idx); + vport_num); iter_vaddr->mc_promisc = true; break; case MLX5_ACTION_DEL: @@ -485,7 +565,7 @@ static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) struct hlist_head *hash = esw->mc_table; struct esw_mc_addr *esw_mc; u8 *mac = vaddr->node.addr; - u32 vport = vaddr->vport; + u16 vport = vaddr->vport; if (!esw->fdb_table.legacy.fdb) return 0; @@ -499,7 +579,7 @@ static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) return -ENOMEM; esw_mc->uplink_rule = /* Forward MC MAC to Uplink */ - esw_fdb_set_vport_rule(esw, mac, UPLINK_VPORT); + esw_fdb_set_vport_rule(esw, mac, MLX5_VPORT_UPLINK); /* Add this multicast mac to all the mc promiscuous vports */ update_allmulti_vports(esw, vaddr, esw_mc); @@ -525,7 +605,7 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) struct hlist_head *hash = esw->mc_table; struct esw_mc_addr *esw_mc; u8 *mac = vaddr->node.addr; - u32 vport = vaddr->vport; + u16 vport = vaddr->vport; if (!esw->fdb_table.legacy.fdb) return 0; @@ -564,9 +644,9 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) /* Apply vport UC/MC list to HW l2 table and FDB table */ static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw, - u32 vport_num, int list_type) + u16 vport_num, int list_type) { - struct mlx5_vport *vport = &esw->vports[vport_num]; + struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC; vport_addr_action vport_addr_add; vport_addr_action vport_addr_del; @@ -599,9 +679,9 @@ static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw, /* Sync vport UC/MC list from vport context */ static void esw_update_vport_addr_list(struct mlx5_eswitch *esw, - u32 vport_num, int list_type) + u16 vport_num, int list_type) { - struct mlx5_vport *vport = &esw->vports[vport_num]; + struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC; u8 (*mac_list)[ETH_ALEN]; struct l2addr_node *node; @@ -686,9 +766,9 @@ out: /* Sync vport UC/MC list from vport context * Must be called after esw_update_vport_addr_list */ -static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u32 vport_num) +static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u16 vport_num) { - struct mlx5_vport *vport = &esw->vports[vport_num]; + struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); struct l2addr_node *node; struct vport_addr *addr; struct hlist_head *hash; @@ -721,11 +801,11 @@ static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u32 vport_num) } /* Apply vport rx mode to HW FDB table */ -static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num, +static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u16 vport_num, bool promisc, bool mc_promisc) { + struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); struct esw_mc_addr *allmulti_addr = &esw->mc_promisc; - struct mlx5_vport *vport = &esw->vports[vport_num]; if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc) goto promisc; @@ -736,7 +816,7 @@ static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num, if (!allmulti_addr->uplink_rule) allmulti_addr->uplink_rule = esw_fdb_set_vport_allmulti_rule(esw, - UPLINK_VPORT); + MLX5_VPORT_UPLINK); allmulti_addr->refcnt++; } else if (vport->allmulti_rule) { mlx5_del_flow_rules(vport->allmulti_rule); @@ -764,9 +844,9 @@ promisc: } /* Sync vport rx mode from vport context */ -static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num) +static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u16 vport_num) { - struct mlx5_vport *vport = &esw->vports[vport_num]; + struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); int promisc_all = 0; int promisc_uc = 0; int promisc_mc = 0; @@ -1134,13 +1214,6 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, int err = 0; u8 *smac_v; - if (vport->info.spoofchk && !is_valid_ether_addr(vport->info.mac)) { - mlx5_core_warn(esw->dev, - "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n", - vport->vport); - return -EPERM; - } - esw_vport_cleanup_ingress_rules(esw, vport); if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) { @@ -1350,8 +1423,8 @@ static void esw_destroy_tsar(struct mlx5_eswitch *esw) static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num, u32 initial_max_rate, u32 initial_bw_share) { + struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; - struct mlx5_vport *vport = &esw->vports[vport_num]; struct mlx5_core_dev *dev = esw->dev; void *vport_elem; int err = 0; @@ -1390,7 +1463,7 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num, static void esw_vport_disable_qos(struct mlx5_eswitch *esw, int vport_num) { - struct mlx5_vport *vport = &esw->vports[vport_num]; + struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); int err = 0; if (!vport->qos.enabled) @@ -1409,8 +1482,8 @@ static void esw_vport_disable_qos(struct mlx5_eswitch *esw, int vport_num) static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num, u32 max_rate, u32 bw_share) { + struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; - struct mlx5_vport *vport = &esw->vports[vport_num]; struct mlx5_core_dev *dev = esw->dev; void *vport_elem; u32 bitmask = 0; @@ -1466,15 +1539,22 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw, { int vport_num = vport->vport; - if (!vport_num) + if (esw->manager_vport == vport_num) return; mlx5_modify_vport_admin_state(esw->dev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, - vport_num, + vport_num, 1, vport->info.link_state); - mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, vport->info.mac); - mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, vport->info.node_guid); + + /* Host PF has its own mac/guid. */ + if (vport_num) { + mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, + vport->info.mac); + mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, + vport->info.node_guid); + } + modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos, (vport->info.vlan || vport->info.qos)); @@ -1520,10 +1600,10 @@ static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport) mlx5_fc_destroy(dev, vport->egress.drop_counter); } -static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, +static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport, int enable_events) { - struct mlx5_vport *vport = &esw->vports[vport_num]; + u16 vport_num = vport->vport; mutex_lock(&esw->state_lock); WARN_ON(vport->enabled); @@ -1546,8 +1626,11 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, vport->enabled_events = enable_events; vport->enabled = true; - /* only PF is trusted by default */ - if (!vport_num) + /* Esw manager is trusted by default. Host PF (vport 0) is trusted as well + * in smartNIC as it's a vport group manager. + */ + if (esw->manager_vport == vport_num || + (!vport_num && mlx5_core_is_ecpf(esw->dev))) vport->info.trusted = true; esw_vport_change_handle_locked(vport); @@ -1557,9 +1640,10 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, mutex_unlock(&esw->state_lock); } -static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) +static void esw_disable_vport(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) { - struct mlx5_vport *vport = &esw->vports[vport_num]; + u16 vport_num = vport->vport; if (!vport->enabled) return; @@ -1580,10 +1664,11 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) esw_vport_change_handle_locked(vport); vport->enabled_events = 0; esw_vport_disable_qos(esw, vport_num); - if (vport_num && esw->mode == SRIOV_LEGACY) { + if (esw->manager_vport != vport_num && + esw->mode == SRIOV_LEGACY) { mlx5_modify_vport_admin_state(esw->dev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, - vport_num, + vport_num, 1, MLX5_VPORT_ADMIN_STATE_DOWN); esw_vport_disable_egress_acl(esw, vport); esw_vport_disable_ingress_acl(esw, vport); @@ -1602,7 +1687,7 @@ static int eswitch_vport_event(struct notifier_block *nb, u16 vport_num; vport_num = be16_to_cpu(eqe->data.vport_change.vport_num); - vport = &esw->vports[vport_num]; + vport = mlx5_eswitch_get_vport(esw, vport_num); if (vport->enabled) queue_work(esw->work_queue, &vport->vport_change_handler); @@ -1614,6 +1699,8 @@ static int eswitch_vport_event(struct notifier_block *nb, int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { + int vf_nvports = 0, total_nvports = 0; + struct mlx5_vport *vport; int err; int i, enabled_events; @@ -1631,16 +1718,30 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode); + if (mode == SRIOV_OFFLOADS) { + if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { + err = mlx5_query_host_params_num_vfs(esw->dev, &vf_nvports); + if (err) + return err; + total_nvports = esw->total_vports; + } else { + vf_nvports = nvfs; + total_nvports = nvfs + MLX5_SPECIAL_VPORTS(esw->dev); + } + } + esw->mode = mode; mlx5_lag_update(esw->dev); if (mode == SRIOV_LEGACY) { - err = esw_create_legacy_fdb_table(esw); + err = esw_create_legacy_table(esw); + if (err) + goto abort; } else { mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH); mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); - err = esw_offloads_init(esw, nvfs + 1); + err = esw_offloads_init(esw, vf_nvports, total_nvports); } if (err) @@ -1655,8 +1756,20 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) * 2. FDB/Eswitch is programmed by user space tools */ enabled_events = (mode == SRIOV_LEGACY) ? SRIOV_VPORT_EVENTS : 0; - for (i = 0; i <= nvfs; i++) - esw_enable_vport(esw, i, enabled_events); + + /* Enable PF vport */ + vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); + esw_enable_vport(esw, vport, enabled_events); + + /* Enable ECPF vports */ + if (mlx5_ecpf_vport_exists(esw->dev)) { + vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); + esw_enable_vport(esw, vport, enabled_events); + } + + /* Enable VF vports */ + mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) + esw_enable_vport(esw, vport, enabled_events); if (mode == SRIOV_LEGACY) { MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE); @@ -1681,8 +1794,8 @@ abort: void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) { struct esw_mc_addr *mc_promisc; + struct mlx5_vport *vport; int old_mode; - int nvports; int i; if (!ESW_ALLOWED(esw) || esw->mode == SRIOV_NONE) @@ -1692,13 +1805,12 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) esw->enabled_vports, esw->mode); mc_promisc = &esw->mc_promisc; - nvports = esw->enabled_vports; if (esw->mode == SRIOV_LEGACY) mlx5_eq_notifier_unregister(esw->dev, &esw->nb); - for (i = 0; i < esw->total_vports; i++) - esw_disable_vport(esw, i); + mlx5_esw_for_all_vports(esw, i, vport) + esw_disable_vport(esw, vport); if (mc_promisc && mc_promisc->uplink_rule) mlx5_del_flow_rules(mc_promisc->uplink_rule); @@ -1706,9 +1818,9 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) esw_destroy_tsar(esw); if (esw->mode == SRIOV_LEGACY) - esw_destroy_legacy_fdb_table(esw); + esw_destroy_legacy_table(esw); else if (esw->mode == SRIOV_OFFLOADS) - esw_offloads_cleanup(esw, nvports); + esw_offloads_cleanup(esw); old_mode = esw->mode; esw->mode = SRIOV_NONE; @@ -1725,10 +1837,10 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) { int total_vports = MLX5_TOTAL_VPORTS(dev); struct mlx5_eswitch *esw; - int vport_num; - int err; + struct mlx5_vport *vport; + int err, i; - if (!MLX5_ESWITCH_MANAGER(dev)) + if (!MLX5_VPORT_MANAGER(dev)) return 0; esw_info(dev, @@ -1742,6 +1854,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) return -ENOMEM; esw->dev = dev; + esw->manager_vport = mlx5_eswitch_manager_vport(dev); esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq"); if (!esw->work_queue) { @@ -1756,6 +1869,8 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) goto abort; } + esw->total_vports = total_vports; + err = esw_offloads_init_reps(esw); if (err) goto abort; @@ -1764,17 +1879,14 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) hash_init(esw->offloads.mod_hdr_tbl); mutex_init(&esw->state_lock); - for (vport_num = 0; vport_num < total_vports; vport_num++) { - struct mlx5_vport *vport = &esw->vports[vport_num]; - - vport->vport = vport_num; + mlx5_esw_for_all_vports(esw, i, vport) { + vport->vport = mlx5_eswitch_index_to_vport_num(esw, i); vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO; vport->dev = dev; INIT_WORK(&vport->vport_change_handler, esw_vport_change_handler); } - esw->total_vports = total_vports; esw->enabled_vports = 0; esw->mode = SRIOV_NONE; esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE; @@ -1797,7 +1909,7 @@ abort: void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) { - if (!esw || !MLX5_ESWITCH_MANAGER(esw->dev)) + if (!esw || !MLX5_VPORT_MANAGER(esw->dev)) return; esw_info(esw->dev, "cleanup\n"); @@ -1827,13 +1939,10 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, mutex_lock(&esw->state_lock); evport = &esw->vports[vport]; - if (evport->info.spoofchk && !is_valid_ether_addr(mac)) { + if (evport->info.spoofchk && !is_valid_ether_addr(mac)) mlx5_core_warn(esw->dev, - "MAC invalidation is not allowed when spoofchk is on, vport(%d)\n", + "Set invalid MAC while spoofchk is on, vport(%d)\n", vport); - err = -EPERM; - goto unlock; - } err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac); if (err) { @@ -1876,7 +1985,7 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, err = mlx5_modify_vport_admin_state(esw->dev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, - vport, link_state); + vport, 1, link_state); if (err) { mlx5_core_warn(esw->dev, "Failed to set vport %d link state, err = %d", @@ -1979,6 +2088,10 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, evport = &esw->vports[vport]; pschk = evport->info.spoofchk; evport->info.spoofchk = spoofchk; + if (pschk && !is_valid_ether_addr(evport->info.mac)) + mlx5_core_warn(esw->dev, + "Spoofchk in set while MAC is invalid, vport(%d)\n", + evport->vport); if (evport->enabled && esw->mode == SRIOV_LEGACY) err = esw_vport_ingress_config(esw, evport); if (err) @@ -1988,6 +2101,127 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, return err; } +static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw) +{ + if (esw->fdb_table.legacy.vepa_uplink_rule) + mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_uplink_rule); + + if (esw->fdb_table.legacy.vepa_star_rule) + mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_star_rule); + + esw->fdb_table.legacy.vepa_uplink_rule = NULL; + esw->fdb_table.legacy.vepa_star_rule = NULL; +} + +static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw, + u8 setting) +{ + struct mlx5_flow_destination dest = {}; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_handle *flow_rule; + struct mlx5_flow_spec *spec; + int err = 0; + void *misc; + + if (!setting) { + esw_cleanup_vepa_rules(esw); + return 0; + } + + if (esw->fdb_table.legacy.vepa_uplink_rule) + return 0; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return -ENOMEM; + + /* Uplink rule forward uplink traffic to FDB */ + misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); + MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK); + + misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); + MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); + + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest.ft = esw->fdb_table.legacy.fdb; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec, + &flow_act, &dest, 1); + if (IS_ERR(flow_rule)) { + err = PTR_ERR(flow_rule); + goto out; + } else { + esw->fdb_table.legacy.vepa_uplink_rule = flow_rule; + } + + /* Star rule to forward all traffic to uplink vport */ + memset(spec, 0, sizeof(*spec)); + dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; + dest.vport.num = MLX5_VPORT_UPLINK; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec, + &flow_act, &dest, 1); + if (IS_ERR(flow_rule)) { + err = PTR_ERR(flow_rule); + goto out; + } else { + esw->fdb_table.legacy.vepa_star_rule = flow_rule; + } + +out: + kvfree(spec); + if (err) + esw_cleanup_vepa_rules(esw); + return err; +} + +int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting) +{ + int err = 0; + + if (!esw) + return -EOPNOTSUPP; + + if (!ESW_ALLOWED(esw)) + return -EPERM; + + mutex_lock(&esw->state_lock); + if (esw->mode != SRIOV_LEGACY) { + err = -EOPNOTSUPP; + goto out; + } + + err = _mlx5_eswitch_set_vepa_locked(esw, setting); + +out: + mutex_unlock(&esw->state_lock); + return err; +} + +int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting) +{ + int err = 0; + + if (!esw) + return -EOPNOTSUPP; + + if (!ESW_ALLOWED(esw)) + return -EPERM; + + mutex_lock(&esw->state_lock); + if (esw->mode != SRIOV_LEGACY) { + err = -EOPNOTSUPP; + goto out; + } + + *setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0; + +out: + mutex_unlock(&esw->state_lock); + return err; +} + int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, int vport, bool setting) { @@ -2015,8 +2249,7 @@ static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw) u32 max_guarantee = 0; int i; - for (i = 0; i < esw->total_vports; i++) { - evport = &esw->vports[i]; + mlx5_esw_for_all_vports(esw, i, evport) { if (!evport->enabled || evport->info.min_rate < max_guarantee) continue; max_guarantee = evport->info.min_rate; @@ -2035,8 +2268,7 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider) int err; int i; - for (i = 0; i < esw->total_vports; i++) { - evport = &esw->vports[i]; + mlx5_esw_for_all_vports(esw, i, evport) { if (!evport->enabled) continue; vport_min_rate = evport->info.min_rate; @@ -2051,7 +2283,7 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider) if (bw_share == evport->qos.bw_share) continue; - err = esw_vport_qos_config(esw, i, vport_max_rate, + err = esw_vport_qos_config(esw, evport->vport, vport_max_rate, bw_share); if (!err) evport->qos.bw_share = bw_share; @@ -2134,7 +2366,7 @@ static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev, !MLX5_CAP_GEN(dev, transmit_discard_vport_down)) return 0; - err = mlx5_query_vport_down_stats(dev, vport_idx, + err = mlx5_query_vport_down_stats(dev, vport_idx, 1, &rx_discard_vport_down, &tx_discard_vport_down); if (err) @@ -2171,8 +2403,7 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, MLX5_CMD_OP_QUERY_VPORT_COUNTER); MLX5_SET(query_vport_counter_in, in, op_mod, 0); MLX5_SET(query_vport_counter_in, in, vport_number, vport); - if (vport) - MLX5_SET(query_vport_counter_in, in, other_vport, 1); + MLX5_SET(query_vport_counter_in, in, other_vport, 1); memset(out, 0, outlen); err = mlx5_cmd_exec(esw->dev, in, sizeof(in), out, outlen); @@ -2245,3 +2476,10 @@ bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) return false; } + +bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0, + struct mlx5_core_dev *dev1) +{ + return (dev0->priv.eswitch->mode == SRIOV_OFFLOADS && + dev1->priv.eswitch->mode == SRIOV_OFFLOADS); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 9c89eea9b2c3..3f3cd32ae60a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -38,6 +38,7 @@ #include #include #include +#include #include #include "lib/mpfs.h" @@ -49,8 +50,6 @@ #define MLX5_MAX_MC_PER_VPORT(dev) \ (1 << MLX5_CAP_GEN(dev, log_max_current_mc_list)) -#define FDB_UPLINK_VPORT 0xffff - #define MLX5_MIN_BW_SHARE 1 #define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \ @@ -138,6 +137,9 @@ struct mlx5_eswitch_fdb { struct mlx5_flow_group *addr_grp; struct mlx5_flow_group *allmulti_grp; struct mlx5_flow_group *promisc_grp; + struct mlx5_flow_table *vepa_fdb; + struct mlx5_flow_handle *vepa_uplink_rule; + struct mlx5_flow_handle *vepa_star_rule; } legacy; struct offloads_fdb { @@ -183,6 +185,16 @@ struct esw_mc_addr { /* SRIOV only */ u32 refcnt; }; +struct mlx5_host_work { + struct work_struct work; + struct mlx5_eswitch *esw; +}; + +struct mlx5_host_info { + struct mlx5_nb nb; + u16 num_vfs; +}; + struct mlx5_eswitch { struct mlx5_core_dev *dev; struct mlx5_nb nb; @@ -206,10 +218,13 @@ struct mlx5_eswitch { struct mlx5_esw_offload offloads; int mode; int nvports; + u16 manager_vport; + struct mlx5_host_info host_info; }; -void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports); -int esw_offloads_init(struct mlx5_eswitch *esw, int nvports); +void esw_offloads_cleanup(struct mlx5_eswitch *esw); +int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports, + int total_nvports); void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw); int esw_offloads_init_reps(struct mlx5_eswitch *esw); @@ -230,6 +245,8 @@ int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, int vport_num, bool setting); int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport, u32 max_rate, u32 min_rate); +int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting); +int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting); int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, int vport, struct ifla_vf_info *ivi); int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, @@ -312,6 +329,7 @@ struct mlx5_esw_flow_attr { } dests[MLX5_MAX_FLOW_FWD_VPORTS]; u32 mod_hdr_id; u8 match_level; + u8 tunnel_match_level; struct mlx5_fc *counter; u32 chain; u16 prio; @@ -353,6 +371,8 @@ static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1); +bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0, + struct mlx5_core_dev *dev1); #define MLX5_DEBUG_ESWITCH_MASK BIT(3) @@ -364,6 +384,53 @@ bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, #define esw_debug(dev, format, ...) \ mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__) + +/* The returned number is valid only when the dev is eswitch manager. */ +static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev) +{ + return mlx5_core_is_ecpf_esw_manager(dev) ? + MLX5_VPORT_ECPF : MLX5_VPORT_PF; +} + +static inline int mlx5_eswitch_uplink_idx(struct mlx5_eswitch *esw) +{ + /* Uplink always locate at the last element of the array.*/ + return esw->total_vports - 1; +} + +static inline int mlx5_eswitch_ecpf_idx(struct mlx5_eswitch *esw) +{ + return esw->total_vports - 2; +} + +static inline int mlx5_eswitch_vport_num_to_index(struct mlx5_eswitch *esw, + u16 vport_num) +{ + if (vport_num == MLX5_VPORT_ECPF) { + if (!mlx5_ecpf_vport_exists(esw->dev)) + esw_warn(esw->dev, "ECPF vport doesn't exist!\n"); + return mlx5_eswitch_ecpf_idx(esw); + } + + if (vport_num == MLX5_VPORT_UPLINK) + return mlx5_eswitch_uplink_idx(esw); + + return vport_num; +} + +static inline int mlx5_eswitch_index_to_vport_num(struct mlx5_eswitch *esw, + int index) +{ + if (index == mlx5_eswitch_ecpf_idx(esw) && + mlx5_ecpf_vport_exists(esw->dev)) + return MLX5_VPORT_ECPF; + + if (index == mlx5_eswitch_uplink_idx(esw)) + return MLX5_VPORT_UPLINK; + + return index; +} + #else /* CONFIG_MLX5_ESWITCH */ /* eswitch API stubs */ static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 53065b6ae593..f2260391be5b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -40,15 +40,59 @@ #include "en.h" #include "fs_core.h" #include "lib/devcom.h" +#include "ecpf.h" +#include "lib/eq.h" enum { FDB_FAST_PATH = 0, FDB_SLOW_PATH }; +/* There are two match-all miss flows, one for unicast dst mac and + * one for multicast. + */ +#define MLX5_ESW_MISS_FLOWS (2) + #define fdb_prio_table(esw, chain, prio, level) \ (esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)] +#define UPLINK_REP_INDEX 0 + +/* The rep getter/iterator are only valid after esw->total_vports + * and vport->vport are initialized in mlx5_eswitch_init. + */ +#define mlx5_esw_for_all_reps(esw, i, rep) \ + for ((i) = MLX5_VPORT_PF; \ + (rep) = &(esw)->offloads.vport_reps[i], \ + (i) < (esw)->total_vports; (i)++) + +#define mlx5_esw_for_each_vf_rep(esw, i, rep, nvfs) \ + for ((i) = MLX5_VPORT_FIRST_VF; \ + (rep) = &(esw)->offloads.vport_reps[i], \ + (i) <= (nvfs); (i)++) + +#define mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvfs) \ + for ((i) = (nvfs); \ + (rep) = &(esw)->offloads.vport_reps[i], \ + (i) >= MLX5_VPORT_FIRST_VF; (i)--) + +#define mlx5_esw_for_each_vf_vport(esw, vport, nvfs) \ + for ((vport) = MLX5_VPORT_FIRST_VF; \ + (vport) <= (nvfs); (vport)++) + +#define mlx5_esw_for_each_vf_vport_reverse(esw, vport, nvfs) \ + for ((vport) = (nvfs); \ + (vport) >= MLX5_VPORT_FIRST_VF; (vport)--) + +static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw, + u16 vport_num) +{ + u16 idx = mlx5_eswitch_vport_num_to_index(esw, vport_num); + + WARN_ON(idx > esw->total_vports - 1); + return &esw->offloads.vport_reps[idx]; +} + static struct mlx5_flow_table * esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level); static void @@ -160,14 +204,15 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_eswitch_owner_vhca_id); - if (attr->match_level == MLX5_MATCH_NONE) - spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; - else - spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | - MLX5_MATCH_MISC_PARAMETERS; - - if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) - spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; + if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) { + if (attr->tunnel_match_level != MLX5_MATCH_NONE) + spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; + if (attr->match_level != MLX5_MATCH_NONE) + spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; + } else if (attr->match_level != MLX5_MATCH_NONE) { + spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; + } if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) flow_act.modify_id = attr->mod_hdr_id; @@ -318,7 +363,7 @@ static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val) esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none"); for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) { rep = &esw->offloads.vport_reps[vf_vport]; - if (!rep->rep_if[REP_ETH].valid) + if (rep->rep_if[REP_ETH].state != REP_LOADED) continue; err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val); @@ -359,15 +404,15 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr, in_rep = attr->in_rep; out_rep = attr->dests[0].rep; - if (push && in_rep->vport == FDB_UPLINK_VPORT) + if (push && in_rep->vport == MLX5_VPORT_UPLINK) goto out_notsupp; - if (pop && out_rep->vport == FDB_UPLINK_VPORT) + if (pop && out_rep->vport == MLX5_VPORT_UPLINK) goto out_notsupp; /* vport has vlan push configured, can't offload VF --> wire rules w.o it */ if (!push && !pop && fwd) - if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT) + if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK) goto out_notsupp; /* protects against (1) setting rules with different vlans to push and @@ -409,7 +454,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, if (!push && !pop && fwd) { /* tracks VF --> wire rules without vlan push action */ - if (attr->dests[0].rep->vport == FDB_UPLINK_VPORT) { + if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) { vport->vlan_refcount++; attr->vlan_handled = true; } @@ -469,7 +514,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, if (!push && !pop && fwd) { /* tracks VF --> wire rules without vlan push action */ - if (attr->dests[0].rep->vport == FDB_UPLINK_VPORT) + if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) vport->vlan_refcount--; return 0; @@ -516,7 +561,8 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn); - MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */ + /* source vport is the esw manager */ + MLX5_SET(fte_match_set_misc, misc, source_port, esw->manager_vport); misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn); @@ -561,7 +607,7 @@ static void peer_miss_rules_setup(struct mlx5_core_dev *peer_dev, source_eswitch_owner_vhca_id); dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT; - dest->vport.num = 0; + dest->vport.num = peer_dev->priv.eswitch->manager_vport; dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id); dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; } @@ -595,14 +641,35 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); - for (i = 1; i < nvports; i++) { + if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { + MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_PF); + flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, + spec, &flow_act, &dest, 1); + if (IS_ERR(flow)) { + err = PTR_ERR(flow); + goto add_pf_flow_err; + } + flows[MLX5_VPORT_PF] = flow; + } + + if (mlx5_ecpf_vport_exists(esw->dev)) { + MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF); + flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, + spec, &flow_act, &dest, 1); + if (IS_ERR(flow)) { + err = PTR_ERR(flow); + goto add_ecpf_flow_err; + } + flows[mlx5_eswitch_ecpf_idx(esw)] = flow; + } + + mlx5_esw_for_each_vf_vport(esw, i, mlx5_core_max_vfs(esw->dev)) { MLX5_SET(fte_match_set_misc, misc, source_port, i); flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec, &flow_act, &dest, 1); if (IS_ERR(flow)) { err = PTR_ERR(flow); - esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err); - goto add_flow_err; + goto add_vf_flow_err; } flows[i] = flow; } @@ -612,9 +679,18 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, kvfree(spec); return 0; -add_flow_err: - for (i--; i > 0; i--) +add_vf_flow_err: + nvports = --i; + mlx5_esw_for_each_vf_vport_reverse(esw, i, nvports) mlx5_del_flow_rules(flows[i]); + + if (mlx5_ecpf_vport_exists(esw->dev)) + mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]); +add_ecpf_flow_err: + if (mlx5_core_is_ecpf_esw_manager(esw->dev)) + mlx5_del_flow_rules(flows[MLX5_VPORT_PF]); +add_pf_flow_err: + esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err); kvfree(flows); alloc_flows_err: kvfree(spec); @@ -628,9 +704,15 @@ static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw) flows = esw->fdb_table.offloads.peer_miss_rules; - for (i = 1; i < esw->total_vports; i++) + mlx5_esw_for_each_vf_vport_reverse(esw, i, mlx5_core_max_vfs(esw->dev)) mlx5_del_flow_rules(flows[i]); + if (mlx5_ecpf_vport_exists(esw->dev)) + mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]); + + if (mlx5_core_is_ecpf_esw_manager(esw->dev)) + mlx5_del_flow_rules(flows[MLX5_VPORT_PF]); + kvfree(flows); } @@ -660,7 +742,7 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) dmac_c[0] = 0x01; dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; - dest.vport.num = 0; + dest.vport.num = esw->manager_vport; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec, @@ -904,8 +986,8 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) esw->fdb_table.offloads.fdb_left[i] = ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0; - table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 2 + - esw->total_vports; + table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + + MLX5_ESW_MISS_FLOWS + esw->total_vports; /* create the slow path fdb with encap set, so further table instances * can be created at run time while VFs are probed if the FW allows that. @@ -999,7 +1081,8 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) dmac[0] = 0x01; MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix); - MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 2); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, + ix + MLX5_ESW_MISS_FLOWS); g = mlx5_create_flow_group(fdb, flow_group_in); if (IS_ERR(g)) { @@ -1048,7 +1131,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) esw_destroy_offloads_fast_fdb_tables(esw); } -static int esw_create_offloads_table(struct mlx5_eswitch *esw) +static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports) { struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_core_dev *dev = esw->dev; @@ -1062,7 +1145,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw) return -EOPNOTSUPP; } - ft_attr.max_fte = dev->priv.sriov.num_vfs + 2; + ft_attr.max_fte = nvports + MLX5_ESW_MISS_FLOWS; ft_offloads = mlx5_create_flow_table(ns, &ft_attr); if (IS_ERR(ft_offloads)) { @@ -1082,16 +1165,15 @@ static void esw_destroy_offloads_table(struct mlx5_eswitch *esw) mlx5_destroy_flow_table(offloads->ft_offloads); } -static int esw_create_vport_rx_group(struct mlx5_eswitch *esw) +static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); struct mlx5_flow_group *g; - struct mlx5_priv *priv = &esw->dev->priv; u32 *flow_group_in; void *match_criteria, *misc; int err = 0; - int nvports = priv->sriov.num_vfs + 2; + nvports = nvports + MLX5_ESW_MISS_FLOWS; flow_group_in = kvzalloc(inlen, GFP_KERNEL); if (!flow_group_in) return -ENOMEM; @@ -1168,7 +1250,8 @@ static int esw_offloads_start(struct mlx5_eswitch *esw, { int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs; - if (esw->mode != SRIOV_LEGACY) { + if (esw->mode != SRIOV_LEGACY && + !mlx5_core_is_ecpf_esw_manager(esw->dev)) { NL_SET_ERR_MSG_MOD(extack, "Can't set offloads mode, SRIOV legacy not enabled"); return -EINVAL; @@ -1206,9 +1289,8 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw) { int total_vfs = MLX5_TOTAL_VPORTS(esw->dev); struct mlx5_core_dev *dev = esw->dev; - struct mlx5_esw_offload *offloads; struct mlx5_eswitch_rep *rep; - u8 hw_id[ETH_ALEN]; + u8 hw_id[ETH_ALEN], rep_type; int vport; esw->offloads.vport_reps = kcalloc(total_vfs, @@ -1217,75 +1299,203 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw) if (!esw->offloads.vport_reps) return -ENOMEM; - offloads = &esw->offloads; mlx5_query_nic_vport_mac_address(dev, 0, hw_id); - for (vport = 0; vport < total_vfs; vport++) { - rep = &offloads->vport_reps[vport]; - - rep->vport = vport; + mlx5_esw_for_all_reps(esw, vport, rep) { + rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport); ether_addr_copy(rep->hw_id, hw_id); - } - offloads->vport_reps[0].vport = FDB_UPLINK_VPORT; + for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) + rep->rep_if[rep_type].state = REP_UNREGISTERED; + } return 0; } -static void esw_offloads_unload_reps_type(struct mlx5_eswitch *esw, int nvports, - u8 rep_type) +static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep, u8 rep_type) +{ + if (rep->rep_if[rep_type].state != REP_LOADED) + return; + + rep->rep_if[rep_type].unload(rep); + rep->rep_if[rep_type].state = REP_REGISTERED; +} + +static void __unload_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type) { struct mlx5_eswitch_rep *rep; - int vport; - for (vport = nvports - 1; vport >= 0; vport--) { - rep = &esw->offloads.vport_reps[vport]; - if (!rep->rep_if[rep_type].valid) - continue; + if (mlx5_ecpf_vport_exists(esw->dev)) { + rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF); + __esw_offloads_unload_rep(esw, rep, rep_type); + } - rep->rep_if[rep_type].unload(rep); + if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { + rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF); + __esw_offloads_unload_rep(esw, rep, rep_type); } + + rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK); + __esw_offloads_unload_rep(esw, rep, rep_type); } -static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports) +static void __unload_reps_vf_vport(struct mlx5_eswitch *esw, int nvports, + u8 rep_type) +{ + struct mlx5_eswitch_rep *rep; + int i; + + mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvports) + __esw_offloads_unload_rep(esw, rep, rep_type); +} + +static void esw_offloads_unload_vf_reps(struct mlx5_eswitch *esw, int nvports) +{ + u8 rep_type = NUM_REP_TYPES; + + while (rep_type-- > 0) + __unload_reps_vf_vport(esw, nvports, rep_type); +} + +static void __unload_reps_all_vport(struct mlx5_eswitch *esw, int nvports, + u8 rep_type) +{ + __unload_reps_vf_vport(esw, nvports, rep_type); + + /* Special vports must be the last to unload. */ + __unload_reps_special_vport(esw, rep_type); +} + +static void esw_offloads_unload_all_reps(struct mlx5_eswitch *esw, int nvports) { u8 rep_type = NUM_REP_TYPES; while (rep_type-- > 0) - esw_offloads_unload_reps_type(esw, nvports, rep_type); + __unload_reps_all_vport(esw, nvports, rep_type); +} + +static int __esw_offloads_load_rep(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep, u8 rep_type) +{ + int err = 0; + + if (rep->rep_if[rep_type].state != REP_REGISTERED) + return 0; + + err = rep->rep_if[rep_type].load(esw->dev, rep); + if (err) + return err; + + rep->rep_if[rep_type].state = REP_LOADED; + + return 0; } -static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports, - u8 rep_type) +static int __load_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type) { struct mlx5_eswitch_rep *rep; - int vport; int err; - for (vport = 0; vport < nvports; vport++) { - rep = &esw->offloads.vport_reps[vport]; - if (!rep->rep_if[rep_type].valid) - continue; + rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK); + err = __esw_offloads_load_rep(esw, rep, rep_type); + if (err) + return err; - err = rep->rep_if[rep_type].load(esw->dev, rep); + if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { + rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF); + err = __esw_offloads_load_rep(esw, rep, rep_type); if (err) - goto err_reps; + goto err_pf; + } + + if (mlx5_ecpf_vport_exists(esw->dev)) { + rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF); + err = __esw_offloads_load_rep(esw, rep, rep_type); + if (err) + goto err_ecpf; + } + + return 0; + +err_ecpf: + if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { + rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF); + __esw_offloads_unload_rep(esw, rep, rep_type); + } + +err_pf: + rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK); + __esw_offloads_unload_rep(esw, rep, rep_type); + return err; +} + +static int __load_reps_vf_vport(struct mlx5_eswitch *esw, int nvports, + u8 rep_type) +{ + struct mlx5_eswitch_rep *rep; + int err, i; + + mlx5_esw_for_each_vf_rep(esw, i, rep, nvports) { + err = __esw_offloads_load_rep(esw, rep, rep_type); + if (err) + goto err_vf; } return 0; +err_vf: + __unload_reps_vf_vport(esw, --i, rep_type); + return err; +} + +static int esw_offloads_load_vf_reps(struct mlx5_eswitch *esw, int nvports) +{ + u8 rep_type = 0; + int err; + + for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) { + err = __load_reps_vf_vport(esw, nvports, rep_type); + if (err) + goto err_reps; + } + + return err; + err_reps: - esw_offloads_unload_reps_type(esw, vport, rep_type); + while (rep_type-- > 0) + __unload_reps_vf_vport(esw, nvports, rep_type); + return err; +} + +static int __load_reps_all_vport(struct mlx5_eswitch *esw, int nvports, + u8 rep_type) +{ + int err; + + /* Special vports must be loaded first. */ + err = __load_reps_special_vport(esw, rep_type); + if (err) + return err; + + err = __load_reps_vf_vport(esw, nvports, rep_type); + if (err) + goto err_vfs; + + return 0; + +err_vfs: + __unload_reps_special_vport(esw, rep_type); return err; } -static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports) +static int esw_offloads_load_all_reps(struct mlx5_eswitch *esw, int nvports) { u8 rep_type = 0; int err; for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) { - err = esw_offloads_load_reps_type(esw, nvports, rep_type); + err = __load_reps_all_vport(esw, nvports, rep_type); if (err) goto err_reps; } @@ -1294,7 +1504,7 @@ static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports) err_reps: while (rep_type-- > 0) - esw_offloads_unload_reps_type(esw, nvports, rep_type); + __unload_reps_all_vport(esw, nvports, rep_type); return err; } @@ -1397,7 +1607,7 @@ static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS); } -int esw_offloads_init(struct mlx5_eswitch *esw, int nvports) +static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports) { int err; @@ -1407,24 +1617,16 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports) if (err) return err; - err = esw_create_offloads_table(esw); + err = esw_create_offloads_table(esw, nvports); if (err) goto create_ft_err; - err = esw_create_vport_rx_group(esw); + err = esw_create_vport_rx_group(esw, nvports); if (err) goto create_fg_err; - err = esw_offloads_load_reps(esw, nvports); - if (err) - goto err_reps; - - esw_offloads_devcom_init(esw); return 0; -err_reps: - esw_destroy_vport_rx_group(esw); - create_fg_err: esw_destroy_offloads_table(esw); @@ -1434,6 +1636,95 @@ create_ft_err: return err; } +static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw) +{ + esw_destroy_vport_rx_group(esw); + esw_destroy_offloads_table(esw); + esw_destroy_offloads_fdb_tables(esw); +} + +static void esw_host_params_event_handler(struct work_struct *work) +{ + struct mlx5_host_work *host_work; + struct mlx5_eswitch *esw; + int err, num_vf = 0; + + host_work = container_of(work, struct mlx5_host_work, work); + esw = host_work->esw; + + err = mlx5_query_host_params_num_vfs(esw->dev, &num_vf); + if (err || num_vf == esw->host_info.num_vfs) + goto out; + + /* Number of VFs can only change from "0 to x" or "x to 0". */ + if (esw->host_info.num_vfs > 0) { + esw_offloads_unload_vf_reps(esw, esw->host_info.num_vfs); + } else { + err = esw_offloads_load_vf_reps(esw, num_vf); + + if (err) + goto out; + } + + esw->host_info.num_vfs = num_vf; + +out: + kfree(host_work); +} + +static int esw_host_params_event(struct notifier_block *nb, + unsigned long type, void *data) +{ + struct mlx5_host_work *host_work; + struct mlx5_host_info *host_info; + struct mlx5_eswitch *esw; + + host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC); + if (!host_work) + return NOTIFY_DONE; + + host_info = mlx5_nb_cof(nb, struct mlx5_host_info, nb); + esw = container_of(host_info, struct mlx5_eswitch, host_info); + + host_work->esw = esw; + + INIT_WORK(&host_work->work, esw_host_params_event_handler); + queue_work(esw->work_queue, &host_work->work); + + return NOTIFY_OK; +} + +int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports, + int total_nvports) +{ + int err; + + mutex_init(&esw->fdb_table.offloads.fdb_prio_lock); + + err = esw_offloads_steering_init(esw, total_nvports); + if (err) + return err; + + err = esw_offloads_load_all_reps(esw, vf_nvports); + if (err) + goto err_reps; + + esw_offloads_devcom_init(esw); + + if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { + MLX5_NB_INIT(&esw->host_info.nb, esw_host_params_event, + HOST_PARAMS_CHANGE); + mlx5_eq_notifier_register(esw->dev, &esw->host_info.nb); + esw->host_info.num_vfs = vf_nvports; + } + + return 0; + +err_reps: + esw_offloads_steering_cleanup(esw); + return err; +} + static int esw_offloads_stop(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack) { @@ -1453,13 +1744,21 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw, return err; } -void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports) +void esw_offloads_cleanup(struct mlx5_eswitch *esw) { + u16 num_vfs; + + if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { + mlx5_eq_notifier_unregister(esw->dev, &esw->host_info.nb); + flush_workqueue(esw->work_queue); + num_vfs = esw->host_info.num_vfs; + } else { + num_vfs = esw->dev->priv.sriov.num_vfs; + } + esw_offloads_devcom_cleanup(esw); - esw_offloads_unload_reps(esw, nvports); - esw_destroy_vport_rx_group(esw); - esw_destroy_offloads_table(esw); - esw_destroy_offloads_fdb_tables(esw); + esw_offloads_unload_all_reps(esw, num_vfs); + esw_offloads_steering_cleanup(esw); } static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode) @@ -1548,7 +1847,8 @@ static int mlx5_devlink_eswitch_check(struct devlink *devlink) if(!MLX5_ESWITCH_MANAGER(dev)) return -EPERM; - if (dev->priv.eswitch->mode == SRIOV_NONE) + if (dev->priv.eswitch->mode == SRIOV_NONE && + !mlx5_core_is_ecpf_esw_manager(dev)) return -EOPNOTSUPP; return 0; @@ -1760,47 +2060,45 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap) return 0; } -void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, - int vport_index, - struct mlx5_eswitch_rep_if *__rep_if, - u8 rep_type) +void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep_if *__rep_if, + u8 rep_type) { - struct mlx5_esw_offload *offloads = &esw->offloads; struct mlx5_eswitch_rep_if *rep_if; + struct mlx5_eswitch_rep *rep; + int i; - rep_if = &offloads->vport_reps[vport_index].rep_if[rep_type]; - - rep_if->load = __rep_if->load; - rep_if->unload = __rep_if->unload; - rep_if->get_proto_dev = __rep_if->get_proto_dev; - rep_if->priv = __rep_if->priv; + mlx5_esw_for_all_reps(esw, i, rep) { + rep_if = &rep->rep_if[rep_type]; + rep_if->load = __rep_if->load; + rep_if->unload = __rep_if->unload; + rep_if->get_proto_dev = __rep_if->get_proto_dev; + rep_if->priv = __rep_if->priv; - rep_if->valid = true; + rep_if->state = REP_REGISTERED; + } } -EXPORT_SYMBOL(mlx5_eswitch_register_vport_rep); +EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps); -void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw, - int vport_index, u8 rep_type) +void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type) { - struct mlx5_esw_offload *offloads = &esw->offloads; + u16 max_vf = mlx5_core_max_vfs(esw->dev); struct mlx5_eswitch_rep *rep; + int i; - rep = &offloads->vport_reps[vport_index]; - - if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled) - rep->rep_if[rep_type].unload(rep); + if (esw->mode == SRIOV_OFFLOADS) + __unload_reps_all_vport(esw, max_vf, rep_type); - rep->rep_if[rep_type].valid = false; + mlx5_esw_for_all_reps(esw, i, rep) + rep->rep_if[rep_type].state = REP_UNREGISTERED; } -EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_rep); +EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps); void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type) { -#define UPLINK_REP_INDEX 0 - struct mlx5_esw_offload *offloads = &esw->offloads; struct mlx5_eswitch_rep *rep; - rep = &offloads->vport_reps[UPLINK_REP_INDEX]; + rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK); return rep->rep_if[rep_type].priv; } @@ -1808,15 +2106,11 @@ void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw, int vport, u8 rep_type) { - struct mlx5_esw_offload *offloads = &esw->offloads; struct mlx5_eswitch_rep *rep; - if (vport == FDB_UPLINK_VPORT) - vport = UPLINK_REP_INDEX; - - rep = &offloads->vport_reps[vport]; + rep = mlx5_eswitch_get_rep(esw, vport); - if (rep->rep_if[rep_type].valid && + if (rep->rep_if[rep_type].state == REP_LOADED && rep->rep_if[rep_type].get_proto_dev) return rep->rep_if[rep_type].get_proto_dev(rep); return NULL; @@ -1825,13 +2119,13 @@ EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev); void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type) { - return mlx5_eswitch_get_proto_dev(esw, UPLINK_REP_INDEX, rep_type); + return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type); } EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev); struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw, int vport) { - return &esw->offloads.vport_reps[vport]; + return mlx5_eswitch_get_rep(esw, vport); } EXPORT_SYMBOL(mlx5_eswitch_vport_rep); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c index fbc42b7252a9..5d5864e8df3c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/events.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c @@ -103,6 +103,8 @@ static const char *eqe_type_str(u8 type) return "MLX5_EVENT_TYPE_STALL_EVENT"; case MLX5_EVENT_TYPE_CMD: return "MLX5_EVENT_TYPE_CMD"; + case MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE: + return "MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE"; case MLX5_EVENT_TYPE_PAGE_REQUEST: return "MLX5_EVENT_TYPE_PAGE_REQUEST"; case MLX5_EVENT_TYPE_PAGE_FAULT: @@ -211,11 +213,10 @@ static int port_module(struct notifier_block *nb, unsigned long type, void *data enum port_module_event_status_type module_status; enum port_module_event_error_type error_type; struct mlx5_eqe_port_module *module_event_eqe; - const char *status_str, *error_str; + const char *status_str; u8 module_num; module_event_eqe = &eqe->data.port_module; - module_num = module_event_eqe->module; module_status = module_event_eqe->module_status & PORT_MODULE_EVENT_MODULE_STATUS_MASK; error_type = module_event_eqe->error_type & @@ -223,25 +224,27 @@ static int port_module(struct notifier_block *nb, unsigned long type, void *data if (module_status < MLX5_MODULE_STATUS_NUM) events->pme_stats.status_counters[module_status]++; - status_str = mlx5_pme_status_to_string(module_status); - if (module_status == MLX5_MODULE_STATUS_ERROR) { + if (module_status == MLX5_MODULE_STATUS_ERROR) if (error_type < MLX5_MODULE_EVENT_ERROR_NUM) events->pme_stats.error_counters[error_type]++; - error_str = mlx5_pme_error_to_string(error_type); - } if (!printk_ratelimit()) return NOTIFY_OK; - if (module_status == MLX5_MODULE_STATUS_ERROR) + module_num = module_event_eqe->module; + status_str = mlx5_pme_status_to_string(module_status); + if (module_status == MLX5_MODULE_STATUS_ERROR) { + const char *error_str = mlx5_pme_error_to_string(error_type); + mlx5_core_err(events->dev, "Port module event[error]: module %u, %s, %s\n", module_num, status_str, error_str); - else + } else { mlx5_core_info(events->dev, "Port module event: module %u, %s\n", module_num, status_str); + } return NOTIFY_OK; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c index 27c5f6c7d36a..d046d1ec2a86 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c @@ -317,7 +317,6 @@ static int mlx5_fpga_event(struct mlx5_fpga_device *fdev, const char *event_name; bool teardown = false; unsigned long flags; - u32 fpga_qpn; u8 syndrome; switch (event) { @@ -328,7 +327,6 @@ static int mlx5_fpga_event(struct mlx5_fpga_device *fdev, case MLX5_EVENT_TYPE_FPGA_QP_ERROR: syndrome = MLX5_GET(fpga_qp_error_event, data, syndrome); event_name = mlx5_fpga_qp_syndrome_to_string(syndrome); - fpga_qpn = MLX5_GET(fpga_qp_error_event, data, fpga_qpn); break; default: return NOTIFY_DONE; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 79f122b45def..f2cfa012315e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -32,6 +32,7 @@ #include #include +#include #include #include "mlx5_core.h" @@ -397,6 +398,7 @@ static void del_hw_flow_table(struct fs_node *node) fs_get_obj(ft, node); dev = get_dev(&ft->node); root = find_root(&ft->node); + trace_mlx5_fs_del_ft(ft); if (node->active) { err = root->cmds->destroy_flow_table(dev, ft); @@ -618,7 +620,8 @@ static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steer if (ret) { kmem_cache_free(steering->fgs_cache, fg); return ERR_PTR(ret); -} + } + ida_init(&fg->fte_allocator); fg->mask.match_criteria_enable = match_criteria_enable; memcpy(&fg->mask.match_criteria, match_criteria, @@ -1019,6 +1022,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa fs_prio->num_ft++; up_write_ref_node(&fs_prio->node); mutex_unlock(&root->chain_lock); + trace_mlx5_fs_add_ft(ft); return ft; destroy_ft: root->cmds->destroy_flow_table(root->dev, ft); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 196c07383082..cb9fa3430c53 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -103,7 +103,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force) mlx5_core_err(dev, "start\n"); if (pci_channel_offline(dev->pdev) || in_fatal(dev) || force) { dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; - mlx5_cmd_trigger_completions(dev); + mlx5_cmd_flush(dev); } mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index bfc0f6581729..4eac42555c7d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -446,11 +446,11 @@ static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu) new_channels.params = *params; new_channels.params.sw_mtu = new_mtu; - err = mlx5e_open_channels(priv, &new_channels); + + err = mlx5e_safe_switch_channels(priv, &new_channels, NULL); if (err) goto out; - mlx5e_switch_priv_channels(priv, &new_channels, NULL); netdev->mtu = new_channels.params.sw_mtu; out: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c index 3a6baed722d8..48aa6e030bcf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c @@ -35,37 +35,8 @@ #include #include "mlx5_core.h" #include "eswitch.h" - -enum { - MLX5_LAG_FLAG_ROCE = 1 << 0, - MLX5_LAG_FLAG_SRIOV = 1 << 1, -}; - -#define MLX5_LAG_MODE_FLAGS (MLX5_LAG_FLAG_ROCE | MLX5_LAG_FLAG_SRIOV) - -struct lag_func { - struct mlx5_core_dev *dev; - struct net_device *netdev; -}; - -/* Used for collection of netdev event info. */ -struct lag_tracker { - enum netdev_lag_tx_type tx_type; - struct netdev_lag_lower_state_info netdev_state[MLX5_MAX_PORTS]; - bool is_bonded; -}; - -/* LAG data of a ConnectX card. - * It serves both its phys functions. - */ -struct mlx5_lag { - u8 flags; - u8 v2p_map[MLX5_MAX_PORTS]; - struct lag_func pf[MLX5_MAX_PORTS]; - struct lag_tracker tracker; - struct delayed_work bond_work; - struct notifier_block nb; -}; +#include "lag.h" +#include "lag_mp.h" /* General purpose, use for short periods of time. * Beware of lock dependencies (preferably, no locks should be acquired @@ -147,13 +118,8 @@ static int mlx5_cmd_query_cong_counter(struct mlx5_core_dev *dev, return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size); } -static struct mlx5_lag *mlx5_lag_dev_get(struct mlx5_core_dev *dev) -{ - return dev->priv.lag; -} - -static int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev, - struct net_device *ndev) +int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev, + struct net_device *ndev) { int i; @@ -174,11 +140,6 @@ static bool __mlx5_lag_is_sriov(struct mlx5_lag *ldev) return !!(ldev->flags & MLX5_LAG_FLAG_SRIOV); } -static bool __mlx5_lag_is_active(struct mlx5_lag *ldev) -{ - return !!(ldev->flags & MLX5_LAG_MODE_FLAGS); -} - static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker, u8 *port1, u8 *port2) { @@ -195,8 +156,8 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker, *port2 = 1; } -static void mlx5_modify_lag(struct mlx5_lag *ldev, - struct lag_tracker *tracker) +void mlx5_modify_lag(struct mlx5_lag *ldev, + struct lag_tracker *tracker) { struct mlx5_core_dev *dev0 = ldev->pf[0].dev; u8 v2p_port1, v2p_port2; @@ -241,9 +202,9 @@ static int mlx5_create_lag(struct mlx5_lag *ldev, return err; } -static int mlx5_activate_lag(struct mlx5_lag *ldev, - struct lag_tracker *tracker, - u8 flags) +int mlx5_activate_lag(struct mlx5_lag *ldev, + struct lag_tracker *tracker, + u8 flags) { bool roce_lag = !!(flags & MLX5_LAG_FLAG_ROCE); struct mlx5_core_dev *dev0 = ldev->pf[0].dev; @@ -343,6 +304,11 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) roce_lag = !mlx5_sriov_is_enabled(dev0) && !mlx5_sriov_is_enabled(dev1); +#ifdef CONFIG_MLX5_ESWITCH + roce_lag &= dev0->priv.eswitch->mode == SRIOV_NONE && + dev1->priv.eswitch->mode == SRIOV_NONE; +#endif + if (roce_lag) mlx5_lag_remove_ib_devices(ldev); @@ -381,7 +347,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay) { - schedule_delayed_work(&ldev->bond_work, delay); + queue_delayed_work(ldev->wq, &ldev->bond_work, delay); } static void mlx5_do_bond_work(struct work_struct *work) @@ -533,6 +499,12 @@ static struct mlx5_lag *mlx5_lag_dev_alloc(void) if (!ldev) return NULL; + ldev->wq = create_singlethread_workqueue("mlx5_lag"); + if (!ldev->wq) { + kfree(ldev); + return NULL; + } + INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work); return ldev; @@ -540,6 +512,7 @@ static struct mlx5_lag *mlx5_lag_dev_alloc(void) static void mlx5_lag_dev_free(struct mlx5_lag *ldev) { + destroy_workqueue(ldev->wq); kfree(ldev); } @@ -587,6 +560,7 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev) { struct mlx5_lag *ldev = NULL; struct mlx5_core_dev *tmp_dev; + int err; if (!MLX5_CAP_GEN(dev, vport_group_manager) || !MLX5_CAP_GEN(dev, lag_master) || @@ -614,6 +588,32 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev) mlx5_core_err(dev, "Failed to register LAG netdev notifier\n"); } } + + err = mlx5_lag_mp_init(ldev); + if (err) + mlx5_core_err(dev, "Failed to init multipath lag err=%d\n", + err); +} + +int mlx5_lag_get_pf_num(struct mlx5_core_dev *dev, int *pf_num) +{ + struct mlx5_lag *ldev; + int n; + + ldev = mlx5_lag_dev_get(dev); + if (!ldev) { + mlx5_core_warn(dev, "no lag device, can't get pf num\n"); + return -EINVAL; + } + + for (n = 0; n < MLX5_MAX_PORTS; n++) + if (ldev->pf[n].dev == dev) { + *pf_num = n; + return 0; + } + + mlx5_core_warn(dev, "wasn't able to locate pf in the lag device\n"); + return -EINVAL; } /* Must be called with intf_mutex held */ @@ -638,6 +638,7 @@ void mlx5_lag_remove(struct mlx5_core_dev *dev) if (i == MLX5_MAX_PORTS) { if (ldev->nb.notifier_call) unregister_netdevice_notifier(&ldev->nb); + mlx5_lag_mp_cleanup(ldev); cancel_delayed_work_sync(&ldev->bond_work); mlx5_lag_dev_free(ldev); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag.h new file mode 100644 index 000000000000..1dea0b1c9826 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2019 Mellanox Technologies. */ + +#ifndef __MLX5_LAG_H__ +#define __MLX5_LAG_H__ + +#include "mlx5_core.h" +#include "lag_mp.h" + +enum { + MLX5_LAG_FLAG_ROCE = 1 << 0, + MLX5_LAG_FLAG_SRIOV = 1 << 1, + MLX5_LAG_FLAG_MULTIPATH = 1 << 2, +}; + +#define MLX5_LAG_MODE_FLAGS (MLX5_LAG_FLAG_ROCE | MLX5_LAG_FLAG_SRIOV |\ + MLX5_LAG_FLAG_MULTIPATH) + +struct lag_func { + struct mlx5_core_dev *dev; + struct net_device *netdev; +}; + +/* Used for collection of netdev event info. */ +struct lag_tracker { + enum netdev_lag_tx_type tx_type; + struct netdev_lag_lower_state_info netdev_state[MLX5_MAX_PORTS]; + unsigned int is_bonded:1; +}; + +/* LAG data of a ConnectX card. + * It serves both its phys functions. + */ +struct mlx5_lag { + u8 flags; + u8 v2p_map[MLX5_MAX_PORTS]; + struct lag_func pf[MLX5_MAX_PORTS]; + struct lag_tracker tracker; + struct workqueue_struct *wq; + struct delayed_work bond_work; + struct notifier_block nb; + struct lag_mp lag_mp; +}; + +static inline struct mlx5_lag * +mlx5_lag_dev_get(struct mlx5_core_dev *dev) +{ + return dev->priv.lag; +} + +static inline bool +__mlx5_lag_is_active(struct mlx5_lag *ldev) +{ + return !!(ldev->flags & MLX5_LAG_MODE_FLAGS); +} + +void mlx5_modify_lag(struct mlx5_lag *ldev, + struct lag_tracker *tracker); +int mlx5_activate_lag(struct mlx5_lag *ldev, + struct lag_tracker *tracker, + u8 flags); +int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev, + struct net_device *ndev); + +#endif /* __MLX5_LAG_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c new file mode 100644 index 000000000000..5633f8572800 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c @@ -0,0 +1,315 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2019 Mellanox Technologies. */ + +#include +#include "lag.h" +#include "lag_mp.h" +#include "mlx5_core.h" +#include "eswitch.h" +#include "lib/mlx5.h" + +static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev) +{ + if (!ldev->pf[0].dev || !ldev->pf[1].dev) + return false; + + return mlx5_esw_multipath_prereq(ldev->pf[0].dev, ldev->pf[1].dev); +} + +static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev) +{ + return !!(ldev->flags & MLX5_LAG_FLAG_MULTIPATH); +} + +bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev) +{ + struct mlx5_lag *ldev; + bool res; + + ldev = mlx5_lag_dev_get(dev); + res = ldev && __mlx5_lag_is_multipath(ldev); + + return res; +} + +/** + * Set lag port affinity + * + * @ldev: lag device + * @port: + * 0 - set normal affinity. + * 1 - set affinity to port 1. + * 2 - set affinity to port 2. + * + **/ +static void mlx5_lag_set_port_affinity(struct mlx5_lag *ldev, int port) +{ + struct lag_tracker tracker; + + if (!__mlx5_lag_is_multipath(ldev)) + return; + + switch (port) { + case 0: + tracker.netdev_state[0].tx_enabled = true; + tracker.netdev_state[1].tx_enabled = true; + tracker.netdev_state[0].link_up = true; + tracker.netdev_state[1].link_up = true; + break; + case 1: + tracker.netdev_state[0].tx_enabled = true; + tracker.netdev_state[0].link_up = true; + tracker.netdev_state[1].tx_enabled = false; + tracker.netdev_state[1].link_up = false; + break; + case 2: + tracker.netdev_state[0].tx_enabled = false; + tracker.netdev_state[0].link_up = false; + tracker.netdev_state[1].tx_enabled = true; + tracker.netdev_state[1].link_up = true; + break; + default: + mlx5_core_warn(ldev->pf[0].dev, "Invalid affinity port %d", + port); + return; + } + + if (tracker.netdev_state[0].tx_enabled) + mlx5_notifier_call_chain(ldev->pf[0].dev->priv.events, + MLX5_DEV_EVENT_PORT_AFFINITY, + (void *)0); + + if (tracker.netdev_state[1].tx_enabled) + mlx5_notifier_call_chain(ldev->pf[1].dev->priv.events, + MLX5_DEV_EVENT_PORT_AFFINITY, + (void *)0); + + mlx5_modify_lag(ldev, &tracker); +} + +static void mlx5_lag_fib_event_flush(struct notifier_block *nb) +{ + struct lag_mp *mp = container_of(nb, struct lag_mp, fib_nb); + struct mlx5_lag *ldev = container_of(mp, struct mlx5_lag, lag_mp); + + flush_workqueue(ldev->wq); +} + +struct mlx5_fib_event_work { + struct work_struct work; + struct mlx5_lag *ldev; + unsigned long event; + union { + struct fib_entry_notifier_info fen_info; + struct fib_nh_notifier_info fnh_info; + }; +}; + +static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, + unsigned long event, + struct fib_info *fi) +{ + struct lag_mp *mp = &ldev->lag_mp; + + /* Handle delete event */ + if (event == FIB_EVENT_ENTRY_DEL) { + /* stop track */ + if (mp->mfi == fi) + mp->mfi = NULL; + return; + } + + /* Handle add/replace event */ + if (fi->fib_nhs == 1) { + if (__mlx5_lag_is_active(ldev)) { + struct net_device *nh_dev = fi->fib_nh[0].nh_dev; + int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev); + + mlx5_lag_set_port_affinity(ldev, ++i); + } + return; + } + + if (fi->fib_nhs != 2) + return; + + /* Verify next hops are ports of the same hca */ + if (!(fi->fib_nh[0].nh_dev == ldev->pf[0].netdev && + fi->fib_nh[1].nh_dev == ldev->pf[1].netdev) && + !(fi->fib_nh[0].nh_dev == ldev->pf[1].netdev && + fi->fib_nh[1].nh_dev == ldev->pf[0].netdev)) { + mlx5_core_warn(ldev->pf[0].dev, "Multipath offload require two ports of the same HCA\n"); + return; + } + + /* First time we see multipath route */ + if (!mp->mfi && !__mlx5_lag_is_active(ldev)) { + struct lag_tracker tracker; + + tracker = ldev->tracker; + mlx5_activate_lag(ldev, &tracker, MLX5_LAG_FLAG_MULTIPATH); + } + + mlx5_lag_set_port_affinity(ldev, 0); + mp->mfi = fi; +} + +static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev, + unsigned long event, + struct fib_nh *fib_nh, + struct fib_info *fi) +{ + struct lag_mp *mp = &ldev->lag_mp; + + /* Check the nh event is related to the route */ + if (!mp->mfi || mp->mfi != fi) + return; + + /* nh added/removed */ + if (event == FIB_EVENT_NH_DEL) { + int i = mlx5_lag_dev_get_netdev_idx(ldev, fib_nh->nh_dev); + + if (i >= 0) { + i = (i + 1) % 2 + 1; /* peer port */ + mlx5_lag_set_port_affinity(ldev, i); + } + } else if (event == FIB_EVENT_NH_ADD && + fi->fib_nhs == 2) { + mlx5_lag_set_port_affinity(ldev, 0); + } +} + +static void mlx5_lag_fib_update(struct work_struct *work) +{ + struct mlx5_fib_event_work *fib_work = + container_of(work, struct mlx5_fib_event_work, work); + struct mlx5_lag *ldev = fib_work->ldev; + struct fib_nh *fib_nh; + + /* Protect internal structures from changes */ + rtnl_lock(); + switch (fib_work->event) { + case FIB_EVENT_ENTRY_REPLACE: /* fall through */ + case FIB_EVENT_ENTRY_APPEND: /* fall through */ + case FIB_EVENT_ENTRY_ADD: /* fall through */ + case FIB_EVENT_ENTRY_DEL: + mlx5_lag_fib_route_event(ldev, fib_work->event, + fib_work->fen_info.fi); + fib_info_put(fib_work->fen_info.fi); + break; + case FIB_EVENT_NH_ADD: /* fall through */ + case FIB_EVENT_NH_DEL: + fib_nh = fib_work->fnh_info.fib_nh; + mlx5_lag_fib_nexthop_event(ldev, + fib_work->event, + fib_work->fnh_info.fib_nh, + fib_nh->nh_parent); + fib_info_put(fib_work->fnh_info.fib_nh->nh_parent); + break; + } + + rtnl_unlock(); + kfree(fib_work); +} + +static struct mlx5_fib_event_work * +mlx5_lag_init_fib_work(struct mlx5_lag *ldev, unsigned long event) +{ + struct mlx5_fib_event_work *fib_work; + + fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC); + if (WARN_ON(!fib_work)) + return NULL; + + INIT_WORK(&fib_work->work, mlx5_lag_fib_update); + fib_work->ldev = ldev; + fib_work->event = event; + + return fib_work; +} + +static int mlx5_lag_fib_event(struct notifier_block *nb, + unsigned long event, + void *ptr) +{ + struct lag_mp *mp = container_of(nb, struct lag_mp, fib_nb); + struct mlx5_lag *ldev = container_of(mp, struct mlx5_lag, lag_mp); + struct fib_notifier_info *info = ptr; + struct mlx5_fib_event_work *fib_work; + struct fib_entry_notifier_info *fen_info; + struct fib_nh_notifier_info *fnh_info; + struct fib_info *fi; + + if (info->family != AF_INET) + return NOTIFY_DONE; + + if (!mlx5_lag_multipath_check_prereq(ldev)) + return NOTIFY_DONE; + + switch (event) { + case FIB_EVENT_ENTRY_REPLACE: /* fall through */ + case FIB_EVENT_ENTRY_APPEND: /* fall through */ + case FIB_EVENT_ENTRY_ADD: /* fall through */ + case FIB_EVENT_ENTRY_DEL: + fen_info = container_of(info, struct fib_entry_notifier_info, + info); + fi = fen_info->fi; + if (fi->fib_dev != ldev->pf[0].netdev && + fi->fib_dev != ldev->pf[1].netdev) { + return NOTIFY_DONE; + } + fib_work = mlx5_lag_init_fib_work(ldev, event); + if (!fib_work) + return NOTIFY_DONE; + fib_work->fen_info = *fen_info; + /* Take reference on fib_info to prevent it from being + * freed while work is queued. Release it afterwards. + */ + fib_info_hold(fib_work->fen_info.fi); + break; + case FIB_EVENT_NH_ADD: /* fall through */ + case FIB_EVENT_NH_DEL: + fnh_info = container_of(info, struct fib_nh_notifier_info, + info); + fib_work = mlx5_lag_init_fib_work(ldev, event); + if (!fib_work) + return NOTIFY_DONE; + fib_work->fnh_info = *fnh_info; + fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent); + break; + default: + return NOTIFY_DONE; + } + + queue_work(ldev->wq, &fib_work->work); + + return NOTIFY_DONE; +} + +int mlx5_lag_mp_init(struct mlx5_lag *ldev) +{ + struct lag_mp *mp = &ldev->lag_mp; + int err; + + if (mp->fib_nb.notifier_call) + return 0; + + mp->fib_nb.notifier_call = mlx5_lag_fib_event; + err = register_fib_notifier(&mp->fib_nb, + mlx5_lag_fib_event_flush); + if (err) + mp->fib_nb.notifier_call = NULL; + + return err; +} + +void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev) +{ + struct lag_mp *mp = &ldev->lag_mp; + + if (!mp->fib_nb.notifier_call) + return; + + unregister_fib_notifier(&mp->fib_nb); + mp->fib_nb.notifier_call = NULL; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h new file mode 100644 index 000000000000..6d14b1100be9 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2019 Mellanox Technologies. */ + +#ifndef __MLX5_LAG_MP_H__ +#define __MLX5_LAG_MP_H__ + +#include "lag.h" +#include "mlx5_core.h" + +struct lag_mp { + struct notifier_block fib_nb; + struct fib_info *mfi; /* used in tracking fib events */ +}; + +#ifdef CONFIG_MLX5_ESWITCH + +int mlx5_lag_mp_init(struct mlx5_lag *ldev); +void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev); + +#else /* CONFIG_MLX5_ESWITCH */ + +static inline int mlx5_lag_mp_init(struct mlx5_lag *ldev) { return 0; } +static inline void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev) {} + +#endif /* CONFIG_MLX5_ESWITCH */ +#endif /* __MLX5_LAG_MP_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c index 98359559c77e..a71d5b9c7ab2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c @@ -108,8 +108,7 @@ int mlx5_mpfs_init(struct mlx5_core_dev *dev) mutex_init(&mpfs->lock); mpfs->size = l2table_size; - mpfs->bitmap = kcalloc(BITS_TO_LONGS(l2table_size), - sizeof(uintptr_t), GFP_KERNEL); + mpfs->bitmap = bitmap_zalloc(l2table_size, GFP_KERNEL); if (!mpfs->bitmap) { kfree(mpfs); return -ENOMEM; @@ -127,7 +126,7 @@ void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev) return; WARN_ON(!hlist_empty(mpfs->hash)); - kfree(mpfs->bitmap); + bitmap_free(mpfs->bitmap); kfree(mpfs); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c new file mode 100644 index 000000000000..40f4a19b1ce1 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c @@ -0,0 +1,205 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2019 Mellanox Technologies. */ + +#include +#include +#include +#include +#include "mlx5_core.h" +#include "lib/port_tun.h" + +struct mlx5_port_tun_entropy_flags { + bool force_supported, force_enabled; + bool calc_supported, calc_enabled; + bool gre_calc_supported, gre_calc_enabled; +}; + +static void mlx5_query_port_tun_entropy(struct mlx5_core_dev *mdev, + struct mlx5_port_tun_entropy_flags *entropy_flags) +{ + u32 out[MLX5_ST_SZ_DW(pcmr_reg)]; + /* Default values for FW which do not support MLX5_REG_PCMR */ + entropy_flags->force_supported = false; + entropy_flags->calc_supported = false; + entropy_flags->gre_calc_supported = false; + entropy_flags->force_enabled = false; + entropy_flags->calc_enabled = true; + entropy_flags->gre_calc_enabled = true; + + if (!MLX5_CAP_GEN(mdev, ports_check)) + return; + + if (mlx5_query_ports_check(mdev, out, sizeof(out))) + return; + + entropy_flags->force_supported = !!(MLX5_GET(pcmr_reg, out, entropy_force_cap)); + entropy_flags->calc_supported = !!(MLX5_GET(pcmr_reg, out, entropy_calc_cap)); + entropy_flags->gre_calc_supported = !!(MLX5_GET(pcmr_reg, out, entropy_gre_calc_cap)); + entropy_flags->force_enabled = !!(MLX5_GET(pcmr_reg, out, entropy_force)); + entropy_flags->calc_enabled = !!(MLX5_GET(pcmr_reg, out, entropy_calc)); + entropy_flags->gre_calc_enabled = !!(MLX5_GET(pcmr_reg, out, entropy_gre_calc)); +} + +static int mlx5_set_port_tun_entropy_calc(struct mlx5_core_dev *mdev, u8 enable, + u8 force) +{ + u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {0}; + int err; + + err = mlx5_query_ports_check(mdev, in, sizeof(in)); + if (err) + return err; + MLX5_SET(pcmr_reg, in, local_port, 1); + MLX5_SET(pcmr_reg, in, entropy_force, force); + MLX5_SET(pcmr_reg, in, entropy_calc, enable); + return mlx5_set_ports_check(mdev, in, sizeof(in)); +} + +static int mlx5_set_port_gre_tun_entropy_calc(struct mlx5_core_dev *mdev, + u8 enable, u8 force) +{ + u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {0}; + int err; + + err = mlx5_query_ports_check(mdev, in, sizeof(in)); + if (err) + return err; + MLX5_SET(pcmr_reg, in, local_port, 1); + MLX5_SET(pcmr_reg, in, entropy_force, force); + MLX5_SET(pcmr_reg, in, entropy_gre_calc, enable); + return mlx5_set_ports_check(mdev, in, sizeof(in)); +} + +void mlx5_init_port_tun_entropy(struct mlx5_tun_entropy *tun_entropy, + struct mlx5_core_dev *mdev) +{ + struct mlx5_port_tun_entropy_flags entropy_flags; + + tun_entropy->mdev = mdev; + mutex_init(&tun_entropy->lock); + mlx5_query_port_tun_entropy(mdev, &entropy_flags); + tun_entropy->num_enabling_entries = 0; + tun_entropy->num_disabling_entries = 0; + tun_entropy->enabled = entropy_flags.calc_enabled; + tun_entropy->enabled = + (entropy_flags.calc_supported) ? + entropy_flags.calc_enabled : true; +} + +static int mlx5_set_entropy(struct mlx5_tun_entropy *tun_entropy, + int reformat_type, bool enable) +{ + struct mlx5_port_tun_entropy_flags entropy_flags; + int err; + + mlx5_query_port_tun_entropy(tun_entropy->mdev, &entropy_flags); + /* Tunnel entropy calculation may be controlled either on port basis + * for all tunneling protocols or specifically for GRE protocol. + * Prioritize GRE protocol control (if capable) over global port + * configuration. + */ + if (entropy_flags.gre_calc_supported && + reformat_type == MLX5_REFORMAT_TYPE_L2_TO_NVGRE) { + /* Other applications may change the global FW entropy + * calculations settings. Check that the current entropy value + * is the negative of the updated value. + */ + if (entropy_flags.force_enabled && + enable == entropy_flags.gre_calc_enabled) { + mlx5_core_warn(tun_entropy->mdev, + "Unexpected GRE entropy calc setting - expected %d", + !entropy_flags.gre_calc_enabled); + return -EOPNOTSUPP; + } + err = mlx5_set_port_gre_tun_entropy_calc(tun_entropy->mdev, enable, + entropy_flags.force_supported); + if (err) + return err; + /* if we turn on the entropy we don't need to force it anymore */ + if (entropy_flags.force_supported && enable) { + err = mlx5_set_port_gre_tun_entropy_calc(tun_entropy->mdev, 1, 0); + if (err) + return err; + } + } else if (entropy_flags.calc_supported) { + /* Other applications may change the global FW entropy + * calculations settings. Check that the current entropy value + * is the negative of the updated value. + */ + if (entropy_flags.force_enabled && + enable == entropy_flags.calc_enabled) { + mlx5_core_warn(tun_entropy->mdev, + "Unexpected entropy calc setting - expected %d", + !entropy_flags.calc_enabled); + return -EOPNOTSUPP; + } + /* GRE requires disabling entropy calculation. if there are + * enabling entries (i.e VXLAN) we cannot turn it off for them, + * thus fail. + */ + if (tun_entropy->num_enabling_entries) + return -EOPNOTSUPP; + err = mlx5_set_port_tun_entropy_calc(tun_entropy->mdev, enable, + entropy_flags.force_supported); + if (err) + return err; + tun_entropy->enabled = enable; + /* if we turn on the entropy we don't need to force it anymore */ + if (entropy_flags.force_supported && enable) { + err = mlx5_set_port_tun_entropy_calc(tun_entropy->mdev, 1, 0); + if (err) + return err; + } + } + + return 0; +} + +/* the function manages the refcount for enabling/disabling tunnel types. + * the return value indicates if the inc is successful or not, depending on + * entropy capabilities and configuration. + */ +int mlx5_tun_entropy_refcount_inc(struct mlx5_tun_entropy *tun_entropy, + int reformat_type) +{ + /* the default is error for unknown (non VXLAN/GRE tunnel types) */ + int err = -EOPNOTSUPP; + + mutex_lock(&tun_entropy->lock); + if (reformat_type == MLX5_REFORMAT_TYPE_L2_TO_VXLAN && + tun_entropy->enabled) { + /* in case entropy calculation is enabled for all tunneling + * types, it is ok for VXLAN, so approve. + * otherwise keep the error default. + */ + tun_entropy->num_enabling_entries++; + err = 0; + } else if (reformat_type == MLX5_REFORMAT_TYPE_L2_TO_NVGRE) { + /* turn off the entropy only for the first GRE rule. + * for the next rules the entropy was already disabled + * successfully. + */ + if (tun_entropy->num_disabling_entries == 0) + err = mlx5_set_entropy(tun_entropy, reformat_type, 0); + else + err = 0; + if (!err) + tun_entropy->num_disabling_entries++; + } + mutex_unlock(&tun_entropy->lock); + + return err; +} + +void mlx5_tun_entropy_refcount_dec(struct mlx5_tun_entropy *tun_entropy, + int reformat_type) +{ + mutex_lock(&tun_entropy->lock); + if (reformat_type == MLX5_REFORMAT_TYPE_L2_TO_VXLAN) + tun_entropy->num_enabling_entries--; + else if (reformat_type == MLX5_REFORMAT_TYPE_L2_TO_NVGRE && + --tun_entropy->num_disabling_entries == 0) + mlx5_set_entropy(tun_entropy, reformat_type, 1); + mutex_unlock(&tun_entropy->lock); +} + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.h new file mode 100644 index 000000000000..54c42a88705e --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2019 Mellanox Technologies. */ + +#ifndef __MLX5_PORT_TUN_H__ +#define __MLX5_PORT_TUN_H__ + +#include + +struct mlx5_tun_entropy { + struct mlx5_core_dev *mdev; + u32 num_enabling_entries; + u32 num_disabling_entries; + u8 enabled; + struct mutex lock; /* lock the entropy fields */ +}; + +void mlx5_init_port_tun_entropy(struct mlx5_tun_entropy *tun_entropy, + struct mlx5_core_dev *mdev); +int mlx5_tun_entropy_refcount_inc(struct mlx5_tun_entropy *tun_entropy, + int reformat_type); +void mlx5_tun_entropy_refcount_dec(struct mlx5_tun_entropy *tun_entropy, + int reformat_type); + +#endif /* __MLX5_PORT_TUN_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mad.c b/drivers/net/ethernet/mellanox/mlx5/core/mad.c deleted file mode 100644 index 3a3b0005fd2b..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/mad.c +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include -#include -#include -#include -#include "mlx5_core.h" - -int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb, - u16 opmod, u8 port) -{ - int outlen = MLX5_ST_SZ_BYTES(mad_ifc_out); - int inlen = MLX5_ST_SZ_BYTES(mad_ifc_in); - int err = -ENOMEM; - void *data; - void *resp; - u32 *out; - u32 *in; - - in = kzalloc(inlen, GFP_KERNEL); - out = kzalloc(outlen, GFP_KERNEL); - if (!in || !out) - goto out; - - MLX5_SET(mad_ifc_in, in, opcode, MLX5_CMD_OP_MAD_IFC); - MLX5_SET(mad_ifc_in, in, op_mod, opmod); - MLX5_SET(mad_ifc_in, in, port, port); - - data = MLX5_ADDR_OF(mad_ifc_in, in, mad); - memcpy(data, inb, MLX5_FLD_SZ_BYTES(mad_ifc_in, mad)); - - err = mlx5_cmd_exec(dev, in, inlen, out, outlen); - if (err) - goto out; - - resp = MLX5_ADDR_OF(mad_ifc_out, out, response_mad_packet); - memcpy(outb, resp, - MLX5_FLD_SZ_BYTES(mad_ifc_out, response_mad_packet)); - -out: - kfree(out); - kfree(in); - return err; -} -EXPORT_SYMBOL_GPL(mlx5_core_mad_ifc); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index be81b319b0dc..70cc906a102b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -65,6 +65,7 @@ #include "lib/vxlan.h" #include "lib/devcom.h" #include "diag/fw_tracer.h" +#include "ecpf.h" MODULE_AUTHOR("Eli Cohen "); MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver"); @@ -459,6 +460,58 @@ static int handle_hca_cap_atomic(struct mlx5_core_dev *dev) return err; } +static int handle_hca_cap_odp(struct mlx5_core_dev *dev) +{ + void *set_hca_cap; + void *set_ctx; + int set_sz; + bool do_set = false; + int err; + + if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) || + !MLX5_CAP_GEN(dev, pg)) + return 0; + + err = mlx5_core_get_caps(dev, MLX5_CAP_ODP); + if (err) + return err; + + set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); + set_ctx = kzalloc(set_sz, GFP_KERNEL); + if (!set_ctx) + return -ENOMEM; + + set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability); + memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_ODP], + MLX5_ST_SZ_BYTES(odp_cap)); + +#define ODP_CAP_SET_MAX(dev, field) \ + do { \ + u32 _res = MLX5_CAP_ODP_MAX(dev, field); \ + if (_res) { \ + do_set = true; \ + MLX5_SET(odp_cap, set_hca_cap, field, _res); \ + } \ + } while (0) + + ODP_CAP_SET_MAX(dev, ud_odp_caps.srq_receive); + ODP_CAP_SET_MAX(dev, rc_odp_caps.srq_receive); + ODP_CAP_SET_MAX(dev, xrc_odp_caps.srq_receive); + ODP_CAP_SET_MAX(dev, xrc_odp_caps.send); + ODP_CAP_SET_MAX(dev, xrc_odp_caps.receive); + ODP_CAP_SET_MAX(dev, xrc_odp_caps.write); + ODP_CAP_SET_MAX(dev, xrc_odp_caps.read); + ODP_CAP_SET_MAX(dev, xrc_odp_caps.atomic); + + if (do_set) + err = set_caps(dev, set_ctx, set_sz, + MLX5_SET_HCA_CAP_OP_MOD_ODP); + + kfree(set_ctx); + + return err; +} + static int handle_hca_cap(struct mlx5_core_dev *dev) { void *set_ctx = NULL; @@ -532,6 +585,33 @@ query_ex: return err; } +static int set_hca_cap(struct mlx5_core_dev *dev) +{ + struct pci_dev *pdev = dev->pdev; + int err; + + err = handle_hca_cap(dev); + if (err) { + dev_err(&pdev->dev, "handle_hca_cap failed\n"); + goto out; + } + + err = handle_hca_cap_atomic(dev); + if (err) { + dev_err(&pdev->dev, "handle_hca_cap_atomic failed\n"); + goto out; + } + + err = handle_hca_cap_odp(dev); + if (err) { + dev_err(&pdev->dev, "handle_hca_cap_odp failed\n"); + goto out; + } + +out: + return err; +} + static int set_hca_ctrl(struct mlx5_core_dev *dev) { struct mlx5_reg_host_endianness he_in; @@ -567,6 +647,8 @@ int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id) MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA); MLX5_SET(enable_hca_in, in, function_id, func_id); + MLX5_SET(enable_hca_in, in, embedded_cpu_function, + dev->caps.embedded_cpu); return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); } @@ -577,6 +659,8 @@ int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id) MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA); MLX5_SET(disable_hca_in, in, function_id, func_id); + MLX5_SET(enable_hca_in, in, embedded_cpu_function, + dev->caps.embedded_cpu); return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } @@ -693,6 +777,11 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv) goto err_clr_master; } + if (pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP32) && + pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP64) && + pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP128)) + mlx5_core_dbg(dev, "Enabling pci atomics failed\n"); + dev->iseg_base = pci_resource_start(dev->pdev, 0); dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg)); if (!dev->iseg) { @@ -849,6 +938,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, struct pci_dev *pdev = dev->pdev; int err; + dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev); mutex_lock(&dev->intf_state_mutex); if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n", @@ -914,15 +1004,9 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, goto reclaim_boot_pages; } - err = handle_hca_cap(dev); - if (err) { - dev_err(&pdev->dev, "handle_hca_cap failed\n"); - goto reclaim_boot_pages; - } - - err = handle_hca_cap_atomic(dev); + err = set_hca_cap(dev); if (err) { - dev_err(&pdev->dev, "handle_hca_cap_atomic failed\n"); + dev_err(&pdev->dev, "set_hca_cap failed\n"); goto reclaim_boot_pages; } @@ -1014,6 +1098,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, goto err_sriov; } + err = mlx5_ec_init(dev); + if (err) { + dev_err(&pdev->dev, "Failed to init embedded CPU\n"); + goto err_ec; + } + if (mlx5_device_registered(dev)) { mlx5_attach_device(dev); } else { @@ -1031,6 +1121,9 @@ out: return 0; err_reg_dev: + mlx5_ec_cleanup(dev); + +err_ec: mlx5_sriov_detach(dev); err_sriov: @@ -1105,6 +1198,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, if (mlx5_device_registered(dev)) mlx5_detach_device(dev); + mlx5_ec_cleanup(dev); mlx5_sriov_detach(dev); mlx5_cleanup_fs(dev); mlx5_accel_ipsec_cleanup(dev); @@ -1415,6 +1509,8 @@ static const struct pci_device_id mlx5_core_pci_table[] = { { PCI_VDEVICE(MELLANOX, 0x101a), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 Ex VF */ { PCI_VDEVICE(MELLANOX, 0x101b) }, /* ConnectX-6 */ { PCI_VDEVICE(MELLANOX, 0x101c), MLX5_PCI_DEV_IS_VF}, /* ConnectX-6 VF */ + { PCI_VDEVICE(MELLANOX, 0x101d) }, /* ConnectX-6 Dx */ + { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */ { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */ { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */ { 0, } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index c68dcea5985b..9529cf9623e3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -121,11 +121,12 @@ int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, u32 modify_bitmask); int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, u32 element_id); -int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev); +int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages); u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev, struct ptp_system_timestamp *sts); void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev); +void mlx5_cmd_flush(struct mlx5_core_dev *dev); int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); @@ -187,6 +188,8 @@ static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev) MLX5_CAP_GEN(dev, lag_master); } +int mlx5_lag_get_pf_num(struct mlx5_core_dev *dev, int *pf_num); + void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol); void mlx5_lag_update(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c index 0670165afd5f..ea744d8466ea 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c @@ -51,9 +51,10 @@ void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev) int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, - u32 *in, int inlen, - u32 *out, int outlen, - mlx5_cmd_cbk_t callback, void *context) + struct mlx5_async_ctx *async_ctx, u32 *in, + int inlen, u32 *out, int outlen, + mlx5_async_cbk_t callback, + struct mlx5_async_work *context) { struct mlx5_mkey_table *table = &dev->priv.mkey_table; u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {0}; @@ -71,7 +72,7 @@ int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev, MLX5_SET(mkc, mkc, mkey_7_0, key); if (callback) - return mlx5_cmd_exec_cb(dev, in, inlen, out, outlen, + return mlx5_cmd_exec_cb(async_ctx, in, inlen, out, outlen, callback, context); err = mlx5_cmd_exec(dev, in, inlen, lout, sizeof(lout)); @@ -105,7 +106,7 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, u32 *in, int inlen) { - return mlx5_core_create_mkey_cb(dev, mkey, in, inlen, + return mlx5_core_create_mkey_cb(dev, mkey, NULL, in, inlen, NULL, 0, NULL, NULL); } EXPORT_SYMBOL(mlx5_core_create_mkey); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index a83b517b0714..41025387ff2c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -48,6 +48,7 @@ enum { struct mlx5_pages_req { struct mlx5_core_dev *dev; u16 func_id; + u8 ec_function; s32 npages; struct work_struct work; }; @@ -143,6 +144,7 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, MLX5_SET(query_pages_in, in, op_mod, boot ? MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES : MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES); + MLX5_SET(query_pages_in, in, embedded_cpu_function, mlx5_core_is_ecpf(dev)); err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); if (err) @@ -253,7 +255,8 @@ err_mapping: return err; } -static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id) +static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id, + bool ec_function) { u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0}; u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0}; @@ -262,6 +265,7 @@ static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id) MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES); MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE); MLX5_SET(manage_pages_in, in, function_id, func_id); + MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function); err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); if (err) @@ -270,7 +274,7 @@ static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id) } static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, - int notify_fail) + int notify_fail, bool ec_function) { u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0}; int inlen = MLX5_ST_SZ_BYTES(manage_pages_in); @@ -305,6 +309,7 @@ retry: MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE); MLX5_SET(manage_pages_in, in, function_id, func_id); MLX5_SET(manage_pages_in, in, input_num_entries, npages); + MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function); err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); if (err) { @@ -316,8 +321,11 @@ retry: dev->priv.fw_pages += npages; if (func_id) dev->priv.vfs_pages += npages; + else if (mlx5_core_is_ecpf(dev) && !ec_function) + dev->priv.peer_pf_pages += npages; - mlx5_core_dbg(dev, "err %d\n", err); + mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n", + npages, ec_function, func_id, err); kvfree(in); return 0; @@ -328,7 +336,7 @@ out_4k: out_free: kvfree(in); if (notify_fail) - page_notify_fail(dev, func_id); + page_notify_fail(dev, func_id, ec_function); return err; } @@ -364,7 +372,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev, } static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, - int *nclaimed) + int *nclaimed, bool ec_function) { int outlen = MLX5_ST_SZ_BYTES(manage_pages_out); u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0}; @@ -385,6 +393,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE); MLX5_SET(manage_pages_in, in, function_id, func_id); MLX5_SET(manage_pages_in, in, input_num_entries, npages); + MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function); mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen); @@ -410,6 +419,8 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, dev->priv.fw_pages -= num_claimed; if (func_id) dev->priv.vfs_pages -= num_claimed; + else if (mlx5_core_is_ecpf(dev) && !ec_function) + dev->priv.peer_pf_pages -= num_claimed; out_free: kvfree(out); @@ -423,9 +434,10 @@ static void pages_work_handler(struct work_struct *work) int err = 0; if (req->npages < 0) - err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL); + err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL, + req->ec_function); else if (req->npages > 0) - err = give_pages(dev, req->func_id, req->npages, 1); + err = give_pages(dev, req->func_id, req->npages, 1, req->ec_function); if (err) mlx5_core_warn(dev, "%s fail %d\n", @@ -434,6 +446,10 @@ static void pages_work_handler(struct work_struct *work) kfree(req); } +enum { + EC_FUNCTION_MASK = 0x8000, +}; + static int req_pages_handler(struct notifier_block *nb, unsigned long type, void *data) { @@ -441,6 +457,7 @@ static int req_pages_handler(struct notifier_block *nb, struct mlx5_core_dev *dev; struct mlx5_priv *priv; struct mlx5_eqe *eqe; + bool ec_function; u16 func_id; s32 npages; @@ -450,6 +467,7 @@ static int req_pages_handler(struct notifier_block *nb, func_id = be16_to_cpu(eqe->data.req_pages.func_id); npages = be32_to_cpu(eqe->data.req_pages.num_pages); + ec_function = be16_to_cpu(eqe->data.req_pages.ec_function) & EC_FUNCTION_MASK; mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n", func_id, npages); req = kzalloc(sizeof(*req), GFP_ATOMIC); @@ -461,6 +479,7 @@ static int req_pages_handler(struct notifier_block *nb, req->dev = dev; req->func_id = func_id; req->npages = npages; + req->ec_function = ec_function; INIT_WORK(&req->work, pages_work_handler); queue_work(dev->priv.pg_wq, &req->work); return NOTIFY_OK; @@ -479,7 +498,7 @@ int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n", npages, boot ? "boot" : "init", func_id); - return give_pages(dev, func_id, npages, 0); + return give_pages(dev, func_id, npages, 0, mlx5_core_is_ecpf(dev)); } enum { @@ -513,7 +532,7 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) fwp = rb_entry(p, struct fw_page, rb_node); err = reclaim_pages(dev, fwp->func_id, optimal_reclaimed_pages(), - &nclaimed); + &nclaimed, mlx5_core_is_ecpf(dev)); if (err) { mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", @@ -535,6 +554,9 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) WARN(dev->priv.vfs_pages, "VFs FW pages counter is %d after reclaiming all pages\n", dev->priv.vfs_pages); + WARN(dev->priv.peer_pf_pages, + "Peer PF FW pages counter is %d after reclaiming all pages\n", + dev->priv.peer_pf_pages); return 0; } @@ -567,10 +589,10 @@ void mlx5_pagealloc_stop(struct mlx5_core_dev *dev) flush_workqueue(dev->priv.pg_wq); } -int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev) +int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages) { unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS); - int prev_vfs_pages = dev->priv.vfs_pages; + int prev_pages = *pages; /* In case of internal error we will free the pages manually later */ if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { @@ -578,16 +600,16 @@ int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev) return 0; } - mlx5_core_dbg(dev, "Waiting for %d pages from %s\n", prev_vfs_pages, + mlx5_core_dbg(dev, "Waiting for %d pages from %s\n", prev_pages, dev->priv.name); - while (dev->priv.vfs_pages) { + while (*pages) { if (time_after(jiffies, end)) { - mlx5_core_warn(dev, "aborting while there are %d pending pages\n", dev->priv.vfs_pages); + mlx5_core_warn(dev, "aborting while there are %d pending pages\n", *pages); return -ETIMEDOUT; } - if (dev->priv.vfs_pages < prev_vfs_pages) { + if (*pages < prev_pages) { end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS); - prev_vfs_pages = dev->priv.vfs_pages; + prev_pages = *pages; } msleep(50); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 2b82f35f4c35..21b7f05b16a5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -30,10 +30,7 @@ * SOFTWARE. */ -#include -#include #include -#include #include "mlx5_core.h" int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, @@ -157,44 +154,6 @@ int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration) sizeof(out), MLX5_REG_MLCR, 0, 1); } -int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev, - u32 *proto_cap, int proto_mask) -{ - u32 out[MLX5_ST_SZ_DW(ptys_reg)]; - int err; - - err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1); - if (err) - return err; - - if (proto_mask == MLX5_PTYS_EN) - *proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability); - else - *proto_cap = MLX5_GET(ptys_reg, out, ib_proto_capability); - - return 0; -} -EXPORT_SYMBOL_GPL(mlx5_query_port_proto_cap); - -int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev, - u32 *proto_admin, int proto_mask) -{ - u32 out[MLX5_ST_SZ_DW(ptys_reg)]; - int err; - - err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1); - if (err) - return err; - - if (proto_mask == MLX5_PTYS_EN) - *proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin); - else - *proto_admin = MLX5_GET(ptys_reg, out, ib_proto_admin); - - return 0; -} -EXPORT_SYMBOL_GPL(mlx5_query_port_proto_admin); - int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev, u8 *link_width_oper, u8 local_port) { @@ -211,23 +170,6 @@ int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev, } EXPORT_SYMBOL_GPL(mlx5_query_port_link_width_oper); -int mlx5_query_port_eth_proto_oper(struct mlx5_core_dev *dev, - u32 *proto_oper, u8 local_port) -{ - u32 out[MLX5_ST_SZ_DW(ptys_reg)]; - int err; - - err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, - local_port); - if (err) - return err; - - *proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); - - return 0; -} -EXPORT_SYMBOL(mlx5_query_port_eth_proto_oper); - int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev, u8 *proto_oper, u8 local_port) { @@ -245,35 +187,6 @@ int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev, } EXPORT_SYMBOL(mlx5_query_port_ib_proto_oper); -int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable, - u32 proto_admin, int proto_mask) -{ - u32 out[MLX5_ST_SZ_DW(ptys_reg)]; - u32 in[MLX5_ST_SZ_DW(ptys_reg)]; - u8 an_disable_admin; - u8 an_disable_cap; - u8 an_status; - - mlx5_query_port_autoneg(dev, proto_mask, &an_status, - &an_disable_cap, &an_disable_admin); - if (!an_disable_cap && an_disable) - return -EPERM; - - memset(in, 0, sizeof(in)); - - MLX5_SET(ptys_reg, in, local_port, 1); - MLX5_SET(ptys_reg, in, an_disable_admin, an_disable); - MLX5_SET(ptys_reg, in, proto_mask, proto_mask); - if (proto_mask == MLX5_PTYS_EN) - MLX5_SET(ptys_reg, in, eth_proto_admin, proto_admin); - else - MLX5_SET(ptys_reg, in, ib_proto_admin, proto_admin); - - return mlx5_core_access_reg(dev, in, sizeof(in), out, - sizeof(out), MLX5_REG_PTYS, 0, 1); -} -EXPORT_SYMBOL_GPL(mlx5_set_port_ptys); - /* This function should be used after setting a port register only */ void mlx5_toggle_port_link(struct mlx5_core_dev *dev) { @@ -606,25 +519,6 @@ int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx) } EXPORT_SYMBOL_GPL(mlx5_query_port_pfc); -void mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask, - u8 *an_status, - u8 *an_disable_cap, u8 *an_disable_admin) -{ - u32 out[MLX5_ST_SZ_DW(ptys_reg)]; - - *an_status = 0; - *an_disable_cap = 0; - *an_disable_admin = 0; - - if (mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1)) - return; - - *an_status = MLX5_GET(ptys_reg, out, an_status); - *an_disable_cap = MLX5_GET(ptys_reg, out, an_disable_cap); - *an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin); -} -EXPORT_SYMBOL_GPL(mlx5_query_port_autoneg); - int mlx5_max_tc(struct mlx5_core_dev *mdev) { u8 num_tc = MLX5_CAP_GEN(mdev, max_tc) ? : 8; @@ -870,8 +764,7 @@ int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode) } EXPORT_SYMBOL_GPL(mlx5_query_port_wol); -static int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, - int outlen) +int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen) { u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {0}; @@ -880,7 +773,7 @@ static int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, outlen, MLX5_REG_PCMR, 0, 0); } -static int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen) +int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen) { u32 out[MLX5_ST_SZ_DW(pcmr_reg)]; @@ -891,7 +784,11 @@ static int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen) int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable) { u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {0}; + int err; + err = mlx5_query_ports_check(mdev, in, sizeof(in)); + if (err) + return err; MLX5_SET(pcmr_reg, in, local_port, 1); MLX5_SET(pcmr_reg, in, fcs_chk, enable); return mlx5_set_ports_check(mdev, in, sizeof(in)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index 388f205a497f..370ca94b6775 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -44,14 +44,15 @@ static struct mlx5_core_rsc_common * mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn) { struct mlx5_core_rsc_common *common; + unsigned long flags; - spin_lock(&table->lock); + spin_lock_irqsave(&table->lock, flags); common = radix_tree_lookup(&table->tree, rsn); if (common) atomic_inc(&common->refcount); - spin_unlock(&table->lock); + spin_unlock_irqrestore(&table->lock, flags); return common; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c index 6e178030d8fb..7b23fa8d2d60 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c @@ -147,7 +147,7 @@ out: if (MLX5_ESWITCH_MANAGER(dev)) mlx5_eswitch_disable_sriov(dev->priv.eswitch); - if (mlx5_wait_for_vf_pages(dev)) + if (mlx5_wait_for_pages(dev, &dev->priv.vfs_pages)) mlx5_core_warn(dev, "timeout reclaiming VFs pages\n"); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c index 8b97066dd1f1..94464723ff77 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c @@ -90,8 +90,8 @@ static void up_rel_func(struct kref *kref) iounmap(up->map); if (mlx5_cmd_free_uar(up->mdev, up->index)) mlx5_core_warn(up->mdev, "failed to free uar index %d\n", up->index); - kfree(up->reg_bitmap); - kfree(up->fp_bitmap); + bitmap_free(up->reg_bitmap); + bitmap_free(up->fp_bitmap); kfree(up); } @@ -110,11 +110,11 @@ static struct mlx5_uars_page *alloc_uars_page(struct mlx5_core_dev *mdev, return ERR_PTR(err); up->mdev = mdev; - up->reg_bitmap = kcalloc(BITS_TO_LONGS(bfregs), sizeof(unsigned long), GFP_KERNEL); + up->reg_bitmap = bitmap_zalloc(bfregs, GFP_KERNEL); if (!up->reg_bitmap) goto error1; - up->fp_bitmap = kcalloc(BITS_TO_LONGS(bfregs), sizeof(unsigned long), GFP_KERNEL); + up->fp_bitmap = bitmap_zalloc(bfregs, GFP_KERNEL); if (!up->fp_bitmap) goto error1; @@ -157,8 +157,8 @@ error2: if (mlx5_cmd_free_uar(mdev, up->index)) mlx5_core_warn(mdev, "failed to free uar index %d\n", up->index); error1: - kfree(up->fp_bitmap); - kfree(up->reg_bitmap); + bitmap_free(up->fp_bitmap); + bitmap_free(up->reg_bitmap); kfree(up); return ERR_PTR(err); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 9b150ce9d315..ef95feca9961 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -64,7 +64,7 @@ u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport) } int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, - u16 vport, u8 state) + u16 vport, u8 other_vport, u8 state) { u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {0}; u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)] = {0}; @@ -73,8 +73,7 @@ int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, MLX5_CMD_OP_MODIFY_VPORT_STATE); MLX5_SET(modify_vport_state_in, in, op_mod, opmod); MLX5_SET(modify_vport_state_in, in, vport_number, vport); - if (vport) - MLX5_SET(modify_vport_state_in, in, other_vport, 1); + MLX5_SET(modify_vport_state_in, in, other_vport, other_vport); MLX5_SET(modify_vport_state_in, in, admin_state, state); return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); @@ -255,7 +254,7 @@ int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu) EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu); int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev, - u32 vport, + u16 vport, enum mlx5_list_type list_type, u8 addr_list[][ETH_ALEN], int *list_size) @@ -373,7 +372,7 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev, EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list); int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev, - u32 vport, + u16 vport, u16 vlans[], int *size) { @@ -526,7 +525,7 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid) EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid); int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev, - u32 vport, u64 node_guid) + u16 vport, u64 node_guid) { int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); void *nic_vport_context; @@ -827,7 +826,7 @@ int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev, EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid); int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev, - u32 vport, + u16 vport, int *promisc_uc, int *promisc_mc, int *promisc_all) @@ -1057,7 +1056,7 @@ free: EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter); int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport, - u64 *rx_discard_vport_down, + u8 other_vport, u64 *rx_discard_vport_down, u64 *tx_discard_vport_down) { u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {0}; @@ -1068,8 +1067,7 @@ int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport, MLX5_CMD_OP_QUERY_VNIC_ENV); MLX5_SET(query_vnic_env_in, in, op_mod, 0); MLX5_SET(query_vnic_env_in, in, vport_number, vport); - if (vport) - MLX5_SET(query_vnic_env_in, in, other_vport, 1); + MLX5_SET(query_vnic_env_in, in, other_vport, other_vport); err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); if (err) diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h index 7a712b6b09ec..14c0c62f8e73 100644 --- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h +++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxfw/mlxfw.h - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Yotam Gigi - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2017-2019 Mellanox Technologies. All rights reserved */ #ifndef _MLXFW_H #define _MLXFW_H diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c index 2cf89126fb23..240c027e5f07 100644 --- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c +++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxfw/mlxfw.c - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Yotam Gigi - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2017-2019 Mellanox Technologies. All rights reserved */ #define pr_fmt(fmt) "mlxfw: " fmt diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c index 993cb5ba934e..544344ac4894 100644 --- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c +++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Yotam Gigi - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2017-2019 Mellanox Technologies. All rights reserved */ #define pr_fmt(fmt) "mlxfw_mfa2: " fmt diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.h index 20472aa139cd..5bba6ad79d34 100644 --- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.h +++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.h @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.h - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Yotam Gigi - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2017-2019 Mellanox Technologies. All rights reserved */ #ifndef _MLXFW_MFA2_H #define _MLXFW_MFA2_H diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_file.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_file.h index f667942b1ea3..874c0a2474ae 100644 --- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_file.h +++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_file.h @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_file.h - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Yotam Gigi - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2017-2019 Mellanox Technologies. All rights reserved */ #ifndef _MLXFW_MFA2_FILE_H #define _MLXFW_MFA2_FILE_H diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h index dd66737c033d..b001e5258091 100644 --- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h +++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h @@ -1,36 +1,6 @@ -/* - * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Yotam Gigi - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2017-2019 Mellanox Technologies. All rights reserved */ + #ifndef _MLXFW_MFA2_FORMAT_H #define _MLXFW_MFA2_FORMAT_H diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h index cc013e77b326..33c971190bba 100644 --- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h +++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv.h - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Yotam Gigi - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2017-2019 Mellanox Technologies. All rights reserved */ #ifndef _MLXFW_MFA2_TLV_H #define _MLXFW_MFA2_TLV_H diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c index 0094b92a233b..017d68f1e123 100644 --- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c +++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c @@ -1,36 +1,5 @@ -/* - * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Yotam Gigi - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2017-2019 Mellanox Technologies. All rights reserved */ #define pr_fmt(fmt) "MFA2: " fmt diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.h index 2c667894f3a2..633284eeded7 100644 --- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.h +++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.h @@ -1,36 +1,6 @@ -/* - * drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.h - * Copyright (c) 2017 Mellanox Technologies. All rights reserved. - * Copyright (c) 2017 Yotam Gigi - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the names of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * Alternatively, this software may be distributed under the terms of the - * GNU General Public License ("GPL") version 2 as published by the Free - * Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2017-2019 Mellanox Technologies. All rights reserved */ + #ifndef _MLXFW_MFA2_TLV_MULTI_H #define _MLXFW_MFA2_TLV_MULTI_H diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig index b9a25aed5d11..9c195dfed031 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig +++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig @@ -4,7 +4,6 @@ config MLXSW_CORE tristate "Mellanox Technologies Switch ASICs support" - depends on MAY_USE_DEVLINK ---help--- This driver supports Mellanox Technologies Switch ASICs family. diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index bbf45f10c208..a01d15546e37 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_MLXSW_CORE) += mlxsw_core.o mlxsw_core-objs := core.o core_acl_flex_keys.o \ - core_acl_flex_actions.o + core_acl_flex_actions.o core_env.o mlxsw_core-$(CONFIG_MLXSW_CORE_HWMON) += core_hwmon.o mlxsw_core-$(CONFIG_MLXSW_CORE_THERMAL) += core_thermal.o obj-$(CONFIG_MLXSW_PCI) += mlxsw_pci.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index ddedf8ab5b64..d23d53c0e284 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -1062,6 +1062,9 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, goto err_driver_init; } + if (mlxsw_driver->params_register && !reload) + devlink_params_publish(devlink); + return 0; err_driver_init: @@ -1131,6 +1134,8 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, return; } + if (mlxsw_core->driver->params_unregister && !reload) + devlink_params_unpublish(devlink); if (mlxsw_core->driver->fini) mlxsw_core->driver->fini(mlxsw_core); mlxsw_thermal_fini(mlxsw_core->thermal); @@ -1460,13 +1465,17 @@ static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans) if (trans->retries) dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n", trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid); - if (err) + if (err) { dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n", trans->tid, trans->reg->id, mlxsw_reg_id_str(trans->reg->id), mlxsw_core_reg_access_type_str(trans->type), trans->emad_status, mlxsw_emad_op_tlv_status_str(trans->emad_status)); + trace_devlink_hwerr(priv_to_devlink(mlxsw_core), + trans->emad_status, + mlxsw_emad_op_tlv_status_str(trans->emad_status)); + } list_del(&trans->bulk_list); kfree_rcu(trans, rcu); @@ -1908,6 +1917,43 @@ void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core) } EXPORT_SYMBOL(mlxsw_core_fw_flash_end); +int mlxsw_core_resources_query(struct mlxsw_core *mlxsw_core, char *mbox, + struct mlxsw_res *res) +{ + int index, i; + u64 data; + u16 id; + int err; + + if (!res) + return 0; + + mlxsw_cmd_mbox_zero(mbox); + + for (index = 0; index < MLXSW_CMD_QUERY_RESOURCES_MAX_QUERIES; + index++) { + err = mlxsw_cmd_query_resources(mlxsw_core, mbox, index); + if (err) + return err; + + for (i = 0; i < MLXSW_CMD_QUERY_RESOURCES_PER_QUERY; i++) { + id = mlxsw_cmd_mbox_query_resource_id_get(mbox, i); + data = mlxsw_cmd_mbox_query_resource_data_get(mbox, i); + + if (id == MLXSW_CMD_QUERY_RESOURCES_TABLE_END_ID) + return 0; + + mlxsw_res_parse(res, id, data); + } + } + + /* If after MLXSW_RESOURCES_QUERY_MAX_QUERIES we still didn't get + * MLXSW_RESOURCES_TABLE_END_ID, something went bad in the FW. + */ + return -EIO; +} +EXPORT_SYMBOL(mlxsw_core_resources_query); + static int __init mlxsw_core_module_init(void) { int err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 4e114f35ee0d..8ec53f027575 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -182,6 +182,8 @@ int mlxsw_core_port_get_phys_port_name(struct mlxsw_core *mlxsw_core, int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay); bool mlxsw_core_schedule_work(struct work_struct *work); void mlxsw_core_flush_owq(void); +int mlxsw_core_resources_query(struct mlxsw_core *mlxsw_core, char *mbox, + struct mlxsw_res *res); #define MLXSW_CONFIG_PROFILE_SWID_COUNT 8 @@ -344,6 +346,7 @@ struct mlxsw_bus_info { struct mlxsw_fw_rev fw_rev; u8 vsd[MLXSW_CMD_BOARDINFO_VSD_LEN]; u8 psid[MLXSW_CMD_BOARDINFO_PSID_LEN]; + u8 low_frequency; }; struct mlxsw_hwmon; @@ -394,4 +397,9 @@ static inline void mlxsw_thermal_fini(struct mlxsw_thermal *thermal) #endif +enum mlxsw_devlink_param_id { + MLXSW_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, + MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, +}; + #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c index df78d23b3ec3..cb3e663b1d37 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c @@ -236,12 +236,10 @@ mlxsw_afk_key_info_create(struct mlxsw_afk *mlxsw_afk, struct mlxsw_afk_element_usage *elusage) { struct mlxsw_afk_key_info *key_info; - size_t alloc_size; int err; - alloc_size = sizeof(*key_info) + - sizeof(key_info->blocks[0]) * mlxsw_afk->max_blocks; - key_info = kzalloc(alloc_size, GFP_KERNEL); + key_info = kzalloc(struct_size(key_info, blocks, mlxsw_afk->max_blocks), + GFP_KERNEL); if (!key_info) return ERR_PTR(-ENOMEM); err = mlxsw_afk_picker(mlxsw_afk, key_info, elusage); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c new file mode 100644 index 000000000000..7a15e932ed2f --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c @@ -0,0 +1,238 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */ + +#include +#include + +#include "core.h" +#include "core_env.h" +#include "item.h" +#include "reg.h" + +static int mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id, + bool *qsfp) +{ + char eeprom_tmp[MLXSW_REG_MCIA_EEPROM_SIZE]; + char mcia_pl[MLXSW_REG_MCIA_LEN]; + u8 ident; + int err; + + mlxsw_reg_mcia_pack(mcia_pl, id, 0, MLXSW_REG_MCIA_PAGE0_LO_OFF, 0, 1, + MLXSW_REG_MCIA_I2C_ADDR_LOW); + err = mlxsw_reg_query(core, MLXSW_REG(mcia), mcia_pl); + if (err) + return err; + mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp); + ident = eeprom_tmp[0]; + switch (ident) { + case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_SFP: + *qsfp = false; + break; + case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP: /* fall-through */ + case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_PLUS: /* fall-through */ + case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28: /* fall-through */ + case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_DD: + *qsfp = true; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int +mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module, + u16 offset, u16 size, void *data, + unsigned int *p_read_size) +{ + char eeprom_tmp[MLXSW_REG_MCIA_EEPROM_SIZE]; + char mcia_pl[MLXSW_REG_MCIA_LEN]; + u16 i2c_addr; + int status; + int err; + + size = min_t(u16, size, MLXSW_REG_MCIA_EEPROM_SIZE); + + if (offset < MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH && + offset + size > MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH) + /* Cross pages read, read until offset 256 in low page */ + size = MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH - offset; + + i2c_addr = MLXSW_REG_MCIA_I2C_ADDR_LOW; + if (offset >= MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH) { + i2c_addr = MLXSW_REG_MCIA_I2C_ADDR_HIGH; + offset -= MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH; + } + + mlxsw_reg_mcia_pack(mcia_pl, module, 0, 0, offset, size, i2c_addr); + + err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcia), mcia_pl); + if (err) + return err; + + status = mlxsw_reg_mcia_status_get(mcia_pl); + if (status) + return -EIO; + + mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp); + memcpy(data, eeprom_tmp, size); + *p_read_size = size; + + return 0; +} + +int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module, + int off, int *temp) +{ + char eeprom_tmp[MLXSW_REG_MCIA_EEPROM_SIZE]; + union { + u8 buf[MLXSW_REG_MCIA_TH_ITEM_SIZE]; + u16 temp; + } temp_thresh; + char mcia_pl[MLXSW_REG_MCIA_LEN] = {0}; + char mtbr_pl[MLXSW_REG_MTBR_LEN] = {0}; + u16 module_temp; + bool qsfp; + int err; + + mlxsw_reg_mtbr_pack(mtbr_pl, MLXSW_REG_MTBR_BASE_MODULE_INDEX + module, + 1); + err = mlxsw_reg_query(core, MLXSW_REG(mtbr), mtbr_pl); + if (err) + return err; + + /* Don't read temperature thresholds for module with no valid info. */ + mlxsw_reg_mtbr_temp_unpack(mtbr_pl, 0, &module_temp, NULL); + switch (module_temp) { + case MLXSW_REG_MTBR_BAD_SENS_INFO: /* fall-through */ + case MLXSW_REG_MTBR_NO_CONN: /* fall-through */ + case MLXSW_REG_MTBR_NO_TEMP_SENS: /* fall-through */ + case MLXSW_REG_MTBR_INDEX_NA: + *temp = 0; + return 0; + default: + /* Do not consider thresholds for zero temperature. */ + if (!MLXSW_REG_MTMP_TEMP_TO_MC(module_temp)) { + *temp = 0; + return 0; + } + break; + } + + /* Read Free Side Device Temperature Thresholds from page 03h + * (MSB at lower byte address). + * Bytes: + * 128-129 - Temp High Alarm (SFP_TEMP_HIGH_ALARM); + * 130-131 - Temp Low Alarm (SFP_TEMP_LOW_ALARM); + * 132-133 - Temp High Warning (SFP_TEMP_HIGH_WARN); + * 134-135 - Temp Low Warning (SFP_TEMP_LOW_WARN); + */ + + /* Validate module identifier value. */ + err = mlxsw_env_validate_cable_ident(core, module, &qsfp); + if (err) + return err; + + if (qsfp) + mlxsw_reg_mcia_pack(mcia_pl, module, 0, + MLXSW_REG_MCIA_TH_PAGE_NUM, + MLXSW_REG_MCIA_TH_PAGE_OFF + off, + MLXSW_REG_MCIA_TH_ITEM_SIZE, + MLXSW_REG_MCIA_I2C_ADDR_LOW); + else + mlxsw_reg_mcia_pack(mcia_pl, module, 0, + MLXSW_REG_MCIA_PAGE0_LO, + off, MLXSW_REG_MCIA_TH_ITEM_SIZE, + MLXSW_REG_MCIA_I2C_ADDR_HIGH); + + err = mlxsw_reg_query(core, MLXSW_REG(mcia), mcia_pl); + if (err) + return err; + + mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp); + memcpy(temp_thresh.buf, eeprom_tmp, MLXSW_REG_MCIA_TH_ITEM_SIZE); + *temp = temp_thresh.temp * 1000; + + return 0; +} + +int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module, + struct ethtool_modinfo *modinfo) +{ + u8 module_info[MLXSW_REG_MCIA_EEPROM_MODULE_INFO_SIZE]; + u16 offset = MLXSW_REG_MCIA_EEPROM_MODULE_INFO_SIZE; + u8 module_rev_id, module_id; + unsigned int read_size; + int err; + + err = mlxsw_env_query_module_eeprom(mlxsw_core, module, 0, offset, + module_info, &read_size); + if (err) + return err; + + if (read_size < offset) + return -EIO; + + module_rev_id = module_info[MLXSW_REG_MCIA_EEPROM_MODULE_INFO_REV_ID]; + module_id = module_info[MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID]; + + switch (module_id) { + case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP: + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + break; + case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_PLUS: /* fall-through */ + case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28: + if (module_id == MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28 || + module_rev_id >= + MLXSW_REG_MCIA_EEPROM_MODULE_INFO_REV_ID_8636) { + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + } + break; + case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_SFP: + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + break; + default: + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL(mlxsw_env_get_module_info); + +int mlxsw_env_get_module_eeprom(struct net_device *netdev, + struct mlxsw_core *mlxsw_core, int module, + struct ethtool_eeprom *ee, u8 *data) +{ + int offset = ee->offset; + unsigned int read_size; + int i = 0; + int err; + + if (!ee->len) + return -EINVAL; + + memset(data, 0, ee->len); + + while (i < ee->len) { + err = mlxsw_env_query_module_eeprom(mlxsw_core, module, offset, + ee->len - i, data + i, + &read_size); + if (err) { + netdev_err(netdev, "Eeprom query failed\n"); + return err; + } + + i += read_size; + offset += read_size; + } + + return 0; +} +EXPORT_SYMBOL(mlxsw_env_get_module_eeprom); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.h b/drivers/net/ethernet/mellanox/mlxsw/core_env.h new file mode 100644 index 000000000000..064d0e770c01 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */ + +#ifndef _MLXSW_CORE_ENV_H +#define _MLXSW_CORE_ENV_H + +int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module, + int off, int *temp); + +int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module, + struct ethtool_modinfo *modinfo); + +int mlxsw_env_get_module_eeprom(struct net_device *netdev, + struct mlxsw_core *mlxsw_core, int module, + struct ethtool_eeprom *ee, u8 *data); + +#endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c index e04e8162aa14..6956bbebe2f1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c @@ -7,8 +7,10 @@ #include #include #include +#include #include "core.h" +#include "core_env.h" #define MLXSW_HWMON_TEMP_SENSOR_MAX_COUNT 127 #define MLXSW_HWMON_ATTR_COUNT (MLXSW_HWMON_TEMP_SENSOR_MAX_COUNT * 4 + \ @@ -30,6 +32,7 @@ struct mlxsw_hwmon { struct attribute *attrs[MLXSW_HWMON_ATTR_COUNT + 1]; struct mlxsw_hwmon_attr hwmon_attrs[MLXSW_HWMON_ATTR_COUNT]; unsigned int attrs_count; + u8 sensor_count; }; static ssize_t mlxsw_hwmon_temp_show(struct device *dev, @@ -121,6 +124,27 @@ static ssize_t mlxsw_hwmon_fan_rpm_show(struct device *dev, return sprintf(buf, "%u\n", mlxsw_reg_mfsm_rpm_get(mfsm_pl)); } +static ssize_t mlxsw_hwmon_fan_fault_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + container_of(attr, struct mlxsw_hwmon_attr, dev_attr); + struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + char fore_pl[MLXSW_REG_FORE_LEN]; + bool fault; + int err; + + err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(fore), fore_pl); + if (err) { + dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query fan\n"); + return err; + } + mlxsw_reg_fore_unpack(fore_pl, mlwsw_hwmon_attr->type_index, &fault); + + return sprintf(buf, "%u\n", fault); +} + static ssize_t mlxsw_hwmon_pwm_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -167,12 +191,160 @@ static ssize_t mlxsw_hwmon_pwm_store(struct device *dev, return len; } +static ssize_t mlxsw_hwmon_module_temp_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + container_of(attr, struct mlxsw_hwmon_attr, dev_attr); + struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + char mtbr_pl[MLXSW_REG_MTBR_LEN] = {0}; + u16 temp; + u8 module; + int err; + + module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count; + mlxsw_reg_mtbr_pack(mtbr_pl, MLXSW_REG_MTBR_BASE_MODULE_INDEX + module, + 1); + err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtbr), mtbr_pl); + if (err) { + dev_err(dev, "Failed to query module temperature sensor\n"); + return err; + } + + mlxsw_reg_mtbr_temp_unpack(mtbr_pl, 0, &temp, NULL); + /* Update status and temperature cache. */ + switch (temp) { + case MLXSW_REG_MTBR_NO_CONN: /* fall-through */ + case MLXSW_REG_MTBR_NO_TEMP_SENS: /* fall-through */ + case MLXSW_REG_MTBR_INDEX_NA: + temp = 0; + break; + case MLXSW_REG_MTBR_BAD_SENS_INFO: + /* Untrusted cable is connected. Reading temperature from its + * sensor is faulty. + */ + temp = 0; + break; + default: + temp = MLXSW_REG_MTMP_TEMP_TO_MC(temp); + break; + } + + return sprintf(buf, "%u\n", temp); +} + +static ssize_t mlxsw_hwmon_module_temp_fault_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + container_of(attr, struct mlxsw_hwmon_attr, dev_attr); + struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + char mtbr_pl[MLXSW_REG_MTBR_LEN] = {0}; + u8 module, fault; + u16 temp; + int err; + + module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count; + mlxsw_reg_mtbr_pack(mtbr_pl, MLXSW_REG_MTBR_BASE_MODULE_INDEX + module, + 1); + err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtbr), mtbr_pl); + if (err) { + dev_err(dev, "Failed to query module temperature sensor\n"); + return err; + } + + mlxsw_reg_mtbr_temp_unpack(mtbr_pl, 0, &temp, NULL); + + /* Update status and temperature cache. */ + switch (temp) { + case MLXSW_REG_MTBR_BAD_SENS_INFO: + /* Untrusted cable is connected. Reading temperature from its + * sensor is faulty. + */ + fault = 1; + break; + case MLXSW_REG_MTBR_NO_CONN: /* fall-through */ + case MLXSW_REG_MTBR_NO_TEMP_SENS: /* fall-through */ + case MLXSW_REG_MTBR_INDEX_NA: + default: + fault = 0; + break; + } + + return sprintf(buf, "%u\n", fault); +} + +static ssize_t +mlxsw_hwmon_module_temp_critical_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + container_of(attr, struct mlxsw_hwmon_attr, dev_attr); + struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + int temp; + u8 module; + int err; + + module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count; + err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, module, + SFP_TEMP_HIGH_WARN, &temp); + if (err) { + dev_err(dev, "Failed to query module temperature thresholds\n"); + return err; + } + + return sprintf(buf, "%u\n", temp); +} + +static ssize_t +mlxsw_hwmon_module_temp_emergency_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + container_of(attr, struct mlxsw_hwmon_attr, dev_attr); + struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon; + u8 module; + int temp; + int err; + + module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count; + err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, module, + SFP_TEMP_HIGH_ALARM, &temp); + if (err) { + dev_err(dev, "Failed to query module temperature thresholds\n"); + return err; + } + + return sprintf(buf, "%u\n", temp); +} + +static ssize_t +mlxsw_hwmon_module_temp_label_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mlxsw_hwmon_attr *mlwsw_hwmon_attr = + container_of(attr, struct mlxsw_hwmon_attr, dev_attr); + + return sprintf(buf, "front panel %03u\n", + mlwsw_hwmon_attr->type_index); +} + enum mlxsw_hwmon_attr_type { MLXSW_HWMON_ATTR_TYPE_TEMP, MLXSW_HWMON_ATTR_TYPE_TEMP_MAX, MLXSW_HWMON_ATTR_TYPE_TEMP_RST, MLXSW_HWMON_ATTR_TYPE_FAN_RPM, + MLXSW_HWMON_ATTR_TYPE_FAN_FAULT, MLXSW_HWMON_ATTR_TYPE_PWM, + MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE, + MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_FAULT, + MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_CRIT, + MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_EMERG, + MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_LABEL, }; static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon, @@ -209,6 +381,12 @@ static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon, snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), "fan%u_input", num + 1); break; + case MLXSW_HWMON_ATTR_TYPE_FAN_FAULT: + mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_fan_fault_show; + mlxsw_hwmon_attr->dev_attr.attr.mode = 0444; + snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), + "fan%u_fault", num + 1); + break; case MLXSW_HWMON_ATTR_TYPE_PWM: mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_pwm_show; mlxsw_hwmon_attr->dev_attr.store = mlxsw_hwmon_pwm_store; @@ -216,6 +394,40 @@ static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon, snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), "pwm%u", num + 1); break; + case MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE: + mlxsw_hwmon_attr->dev_attr.show = mlxsw_hwmon_module_temp_show; + mlxsw_hwmon_attr->dev_attr.attr.mode = 0444; + snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), + "temp%u_input", num + 1); + break; + case MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_FAULT: + mlxsw_hwmon_attr->dev_attr.show = + mlxsw_hwmon_module_temp_fault_show; + mlxsw_hwmon_attr->dev_attr.attr.mode = 0444; + snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), + "temp%u_fault", num + 1); + break; + case MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_CRIT: + mlxsw_hwmon_attr->dev_attr.show = + mlxsw_hwmon_module_temp_critical_show; + mlxsw_hwmon_attr->dev_attr.attr.mode = 0444; + snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), + "temp%u_crit", num + 1); + break; + case MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_EMERG: + mlxsw_hwmon_attr->dev_attr.show = + mlxsw_hwmon_module_temp_emergency_show; + mlxsw_hwmon_attr->dev_attr.attr.mode = 0444; + snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), + "temp%u_emergency", num + 1); + break; + case MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_LABEL: + mlxsw_hwmon_attr->dev_attr.show = + mlxsw_hwmon_module_temp_label_show; + mlxsw_hwmon_attr->dev_attr.attr.mode = 0444; + snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name), + "temp%u_label", num + 1); + break; default: WARN_ON(1); } @@ -233,7 +445,6 @@ static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon *mlxsw_hwmon) { char mtcap_pl[MLXSW_REG_MTCAP_LEN] = {0}; char mtmp_pl[MLXSW_REG_MTMP_LEN]; - u8 sensor_count; int i; int err; @@ -242,8 +453,8 @@ static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon *mlxsw_hwmon) dev_err(mlxsw_hwmon->bus_info->dev, "Failed to get number of temp sensors\n"); return err; } - sensor_count = mlxsw_reg_mtcap_sensor_count_get(mtcap_pl); - for (i = 0; i < sensor_count; i++) { + mlxsw_hwmon->sensor_count = mlxsw_reg_mtcap_sensor_count_get(mtcap_pl); + for (i = 0; i < mlxsw_hwmon->sensor_count; i++) { mlxsw_reg_mtmp_pack(mtmp_pl, i, true, true); err = mlxsw_reg_write(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl); @@ -280,10 +491,14 @@ static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon *mlxsw_hwmon) mlxsw_reg_mfcr_unpack(mfcr_pl, &freq, &tacho_active, &pwm_active); num = 0; for (type_index = 0; type_index < MLXSW_MFCR_TACHOS_MAX; type_index++) { - if (tacho_active & BIT(type_index)) + if (tacho_active & BIT(type_index)) { mlxsw_hwmon_attr_add(mlxsw_hwmon, MLXSW_HWMON_ATTR_TYPE_FAN_RPM, + type_index, num); + mlxsw_hwmon_attr_add(mlxsw_hwmon, + MLXSW_HWMON_ATTR_TYPE_FAN_FAULT, type_index, num++); + } } num = 0; for (type_index = 0; type_index < MLXSW_MFCR_PWMS_MAX; type_index++) { @@ -295,6 +510,53 @@ static int mlxsw_hwmon_fans_init(struct mlxsw_hwmon *mlxsw_hwmon) return 0; } +static int mlxsw_hwmon_module_init(struct mlxsw_hwmon *mlxsw_hwmon) +{ + unsigned int module_count = mlxsw_core_max_ports(mlxsw_hwmon->core); + char pmlp_pl[MLXSW_REG_PMLP_LEN] = {0}; + int i, index; + u8 width; + int err; + + /* Add extra attributes for module temperature. Sensor index is + * assigned to sensor_count value, while all indexed before + * sensor_count are already utilized by the sensors connected through + * mtmp register by mlxsw_hwmon_temp_init(). + */ + index = mlxsw_hwmon->sensor_count; + for (i = 1; i < module_count; i++) { + mlxsw_reg_pmlp_pack(pmlp_pl, i); + err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(pmlp), + pmlp_pl); + if (err) { + dev_err(mlxsw_hwmon->bus_info->dev, "Failed to read module index %d\n", + i); + return err; + } + width = mlxsw_reg_pmlp_width_get(pmlp_pl); + if (!width) + continue; + mlxsw_hwmon_attr_add(mlxsw_hwmon, + MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE, index, + index); + mlxsw_hwmon_attr_add(mlxsw_hwmon, + MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_FAULT, + index, index); + mlxsw_hwmon_attr_add(mlxsw_hwmon, + MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_CRIT, + index, index); + mlxsw_hwmon_attr_add(mlxsw_hwmon, + MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_EMERG, + index, index); + mlxsw_hwmon_attr_add(mlxsw_hwmon, + MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_LABEL, + index, index); + index++; + } + + return 0; +} + int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core, const struct mlxsw_bus_info *mlxsw_bus_info, struct mlxsw_hwmon **p_hwmon) @@ -317,6 +579,10 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core, if (err) goto err_fans_init; + err = mlxsw_hwmon_module_init(mlxsw_hwmon); + if (err) + goto err_temp_module_init; + mlxsw_hwmon->groups[0] = &mlxsw_hwmon->group; mlxsw_hwmon->group.attrs = mlxsw_hwmon->attrs; @@ -333,6 +599,7 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core, return 0; err_hwmon_register: +err_temp_module_init: err_fans_init: err_temp_init: kfree(mlxsw_hwmon); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c index 61f897b40f82..0b85c7252f9e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c @@ -9,11 +9,20 @@ #include #include #include +#include #include "core.h" +#include "core_env.h" #define MLXSW_THERMAL_POLL_INT 1000 /* ms */ -#define MLXSW_THERMAL_MAX_TEMP 110000 /* 110C */ +#define MLXSW_THERMAL_SLOW_POLL_INT 20000 /* ms */ +#define MLXSW_THERMAL_ASIC_TEMP_NORM 75000 /* 75C */ +#define MLXSW_THERMAL_ASIC_TEMP_HIGH 85000 /* 85C */ +#define MLXSW_THERMAL_ASIC_TEMP_HOT 105000 /* 105C */ +#define MLXSW_THERMAL_ASIC_TEMP_CRIT 110000 /* 110C */ +#define MLXSW_THERMAL_HYSTERESIS_TEMP 5000 /* 5C */ +#define MLXSW_THERMAL_MODULE_TEMP_SHIFT (MLXSW_THERMAL_HYSTERESIS_TEMP * 2) +#define MLXSW_THERMAL_ZONE_MAX_NAME 16 #define MLXSW_THERMAL_MAX_STATE 10 #define MLXSW_THERMAL_MAX_DUTY 255 /* Minimum and maximum fan allowed speed in percent: from 20% to 100%. Values @@ -26,9 +35,22 @@ #define MLXSW_THERMAL_SPEED_MAX (MLXSW_THERMAL_MAX_STATE * 2) #define MLXSW_THERMAL_SPEED_MIN_LEVEL 2 /* 20% */ +/* External cooling devices, allowed for binding to mlxsw thermal zones. */ +static char * const mlxsw_thermal_external_allowed_cdev[] = { + "mlxreg_fan", +}; + +enum mlxsw_thermal_trips { + MLXSW_THERMAL_TEMP_TRIP_NORM, + MLXSW_THERMAL_TEMP_TRIP_HIGH, + MLXSW_THERMAL_TEMP_TRIP_HOT, + MLXSW_THERMAL_TEMP_TRIP_CRIT, +}; + struct mlxsw_thermal_trip { int type; int temp; + int hyst; int min_state; int max_state; }; @@ -36,32 +58,29 @@ struct mlxsw_thermal_trip { static const struct mlxsw_thermal_trip default_thermal_trips[] = { { /* In range - 0-40% PWM */ .type = THERMAL_TRIP_ACTIVE, - .temp = 75000, + .temp = MLXSW_THERMAL_ASIC_TEMP_NORM, + .hyst = MLXSW_THERMAL_HYSTERESIS_TEMP, .min_state = 0, .max_state = (4 * MLXSW_THERMAL_MAX_STATE) / 10, }, - { /* High - 40-100% PWM */ - .type = THERMAL_TRIP_ACTIVE, - .temp = 80000, - .min_state = (4 * MLXSW_THERMAL_MAX_STATE) / 10, - .max_state = MLXSW_THERMAL_MAX_STATE, - }, { - /* Very high - 100% PWM */ + /* In range - 40-100% PWM */ .type = THERMAL_TRIP_ACTIVE, - .temp = 85000, - .min_state = MLXSW_THERMAL_MAX_STATE, + .temp = MLXSW_THERMAL_ASIC_TEMP_HIGH, + .hyst = MLXSW_THERMAL_HYSTERESIS_TEMP, + .min_state = (4 * MLXSW_THERMAL_MAX_STATE) / 10, .max_state = MLXSW_THERMAL_MAX_STATE, }, { /* Warning */ .type = THERMAL_TRIP_HOT, - .temp = 105000, + .temp = MLXSW_THERMAL_ASIC_TEMP_HOT, + .hyst = MLXSW_THERMAL_HYSTERESIS_TEMP, .min_state = MLXSW_THERMAL_MAX_STATE, .max_state = MLXSW_THERMAL_MAX_STATE, }, { /* Critical - soft poweroff */ .type = THERMAL_TRIP_CRITICAL, - .temp = MLXSW_THERMAL_MAX_TEMP, + .temp = MLXSW_THERMAL_ASIC_TEMP_CRIT, .min_state = MLXSW_THERMAL_MAX_STATE, .max_state = MLXSW_THERMAL_MAX_STATE, } @@ -72,14 +91,27 @@ static const struct mlxsw_thermal_trip default_thermal_trips[] = { /* Make sure all trips are writable */ #define MLXSW_THERMAL_TRIP_MASK (BIT(MLXSW_THERMAL_NUM_TRIPS) - 1) +struct mlxsw_thermal; + +struct mlxsw_thermal_module { + struct mlxsw_thermal *parent; + struct thermal_zone_device *tzdev; + struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS]; + enum thermal_device_mode mode; + int module; +}; + struct mlxsw_thermal { struct mlxsw_core *core; const struct mlxsw_bus_info *bus_info; struct thermal_zone_device *tzdev; + int polling_delay; struct thermal_cooling_device *cdevs[MLXSW_MFCR_PWMS_MAX]; u8 cooling_levels[MLXSW_THERMAL_MAX_STATE + 1]; struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS]; enum thermal_device_mode mode; + struct mlxsw_thermal_module *tz_module_arr; + unsigned int tz_module_num; }; static inline u8 mlxsw_state_to_duty(int state) @@ -103,9 +135,67 @@ static int mlxsw_get_cooling_device_idx(struct mlxsw_thermal *thermal, if (thermal->cdevs[i] == cdev) return i; + /* Allow mlxsw thermal zone binding to an external cooling device */ + for (i = 0; i < ARRAY_SIZE(mlxsw_thermal_external_allowed_cdev); i++) { + if (strnstr(cdev->type, mlxsw_thermal_external_allowed_cdev[i], + sizeof(cdev->type))) + return 0; + } + return -ENODEV; } +static void +mlxsw_thermal_module_trips_reset(struct mlxsw_thermal_module *tz) +{ + tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = 0; + tz->trips[MLXSW_THERMAL_TEMP_TRIP_HIGH].temp = 0; + tz->trips[MLXSW_THERMAL_TEMP_TRIP_HOT].temp = 0; + tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = 0; +} + +static int +mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core, + struct mlxsw_thermal_module *tz) +{ + int crit_temp, emerg_temp; + int err; + + err = mlxsw_env_module_temp_thresholds_get(core, tz->module, + SFP_TEMP_HIGH_WARN, + &crit_temp); + if (err) + return err; + + err = mlxsw_env_module_temp_thresholds_get(core, tz->module, + SFP_TEMP_HIGH_ALARM, + &emerg_temp); + if (err) + return err; + + /* According to the system thermal requirements, the thermal zones are + * defined with four trip points. The critical and emergency + * temperature thresholds, provided by QSFP module are set as "active" + * and "hot" trip points, "normal" and "critical" trip points are + * derived from "active" and "hot" by subtracting or adding double + * hysteresis value. + */ + if (crit_temp >= MLXSW_THERMAL_MODULE_TEMP_SHIFT) + tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = crit_temp - + MLXSW_THERMAL_MODULE_TEMP_SHIFT; + else + tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = crit_temp; + tz->trips[MLXSW_THERMAL_TEMP_TRIP_HIGH].temp = crit_temp; + tz->trips[MLXSW_THERMAL_TEMP_TRIP_HOT].temp = emerg_temp; + if (emerg_temp > crit_temp) + tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = emerg_temp + + MLXSW_THERMAL_MODULE_TEMP_SHIFT; + else + tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = emerg_temp; + + return 0; +} + static int mlxsw_thermal_bind(struct thermal_zone_device *tzdev, struct thermal_cooling_device *cdev) { @@ -172,7 +262,7 @@ static int mlxsw_thermal_set_mode(struct thermal_zone_device *tzdev, mutex_lock(&tzdev->lock); if (mode == THERMAL_DEVICE_ENABLED) - tzdev->polling_delay = MLXSW_THERMAL_POLL_INT; + tzdev->polling_delay = thermal->polling_delay; else tzdev->polling_delay = 0; @@ -237,13 +327,31 @@ static int mlxsw_thermal_set_trip_temp(struct thermal_zone_device *tzdev, struct mlxsw_thermal *thermal = tzdev->devdata; if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS || - temp > MLXSW_THERMAL_MAX_TEMP) + temp > MLXSW_THERMAL_ASIC_TEMP_CRIT) return -EINVAL; thermal->trips[trip].temp = temp; return 0; } +static int mlxsw_thermal_get_trip_hyst(struct thermal_zone_device *tzdev, + int trip, int *p_hyst) +{ + struct mlxsw_thermal *thermal = tzdev->devdata; + + *p_hyst = thermal->trips[trip].hyst; + return 0; +} + +static int mlxsw_thermal_set_trip_hyst(struct thermal_zone_device *tzdev, + int trip, int hyst) +{ + struct mlxsw_thermal *thermal = tzdev->devdata; + + thermal->trips[trip].hyst = hyst; + return 0; +} + static struct thermal_zone_device_ops mlxsw_thermal_ops = { .bind = mlxsw_thermal_bind, .unbind = mlxsw_thermal_unbind, @@ -253,6 +361,206 @@ static struct thermal_zone_device_ops mlxsw_thermal_ops = { .get_trip_type = mlxsw_thermal_get_trip_type, .get_trip_temp = mlxsw_thermal_get_trip_temp, .set_trip_temp = mlxsw_thermal_set_trip_temp, + .get_trip_hyst = mlxsw_thermal_get_trip_hyst, + .set_trip_hyst = mlxsw_thermal_set_trip_hyst, +}; + +static int mlxsw_thermal_module_bind(struct thermal_zone_device *tzdev, + struct thermal_cooling_device *cdev) +{ + struct mlxsw_thermal_module *tz = tzdev->devdata; + struct mlxsw_thermal *thermal = tz->parent; + int i, j, err; + + /* If the cooling device is one of ours bind it */ + if (mlxsw_get_cooling_device_idx(thermal, cdev) < 0) + return 0; + + for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) { + const struct mlxsw_thermal_trip *trip = &tz->trips[i]; + + err = thermal_zone_bind_cooling_device(tzdev, i, cdev, + trip->max_state, + trip->min_state, + THERMAL_WEIGHT_DEFAULT); + if (err < 0) + goto err_bind_cooling_device; + } + return 0; + +err_bind_cooling_device: + for (j = i - 1; j >= 0; j--) + thermal_zone_unbind_cooling_device(tzdev, j, cdev); + return err; +} + +static int mlxsw_thermal_module_unbind(struct thermal_zone_device *tzdev, + struct thermal_cooling_device *cdev) +{ + struct mlxsw_thermal_module *tz = tzdev->devdata; + struct mlxsw_thermal *thermal = tz->parent; + int i; + int err; + + /* If the cooling device is one of ours unbind it */ + if (mlxsw_get_cooling_device_idx(thermal, cdev) < 0) + return 0; + + for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) { + err = thermal_zone_unbind_cooling_device(tzdev, i, cdev); + WARN_ON(err); + } + return err; +} + +static int mlxsw_thermal_module_mode_get(struct thermal_zone_device *tzdev, + enum thermal_device_mode *mode) +{ + struct mlxsw_thermal_module *tz = tzdev->devdata; + + *mode = tz->mode; + + return 0; +} + +static int mlxsw_thermal_module_mode_set(struct thermal_zone_device *tzdev, + enum thermal_device_mode mode) +{ + struct mlxsw_thermal_module *tz = tzdev->devdata; + struct mlxsw_thermal *thermal = tz->parent; + + mutex_lock(&tzdev->lock); + + if (mode == THERMAL_DEVICE_ENABLED) + tzdev->polling_delay = thermal->polling_delay; + else + tzdev->polling_delay = 0; + + mutex_unlock(&tzdev->lock); + + tz->mode = mode; + thermal_zone_device_update(tzdev, THERMAL_EVENT_UNSPECIFIED); + + return 0; +} + +static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev, + int *p_temp) +{ + struct mlxsw_thermal_module *tz = tzdev->devdata; + struct mlxsw_thermal *thermal = tz->parent; + struct device *dev = thermal->bus_info->dev; + char mtbr_pl[MLXSW_REG_MTBR_LEN]; + u16 temp; + int err; + + /* Read module temperature. */ + mlxsw_reg_mtbr_pack(mtbr_pl, MLXSW_REG_MTBR_BASE_MODULE_INDEX + + tz->module, 1); + err = mlxsw_reg_query(thermal->core, MLXSW_REG(mtbr), mtbr_pl); + if (err) + return err; + + mlxsw_reg_mtbr_temp_unpack(mtbr_pl, 0, &temp, NULL); + /* Update temperature. */ + switch (temp) { + case MLXSW_REG_MTBR_NO_CONN: /* fall-through */ + case MLXSW_REG_MTBR_NO_TEMP_SENS: /* fall-through */ + case MLXSW_REG_MTBR_INDEX_NA: /* fall-through */ + case MLXSW_REG_MTBR_BAD_SENS_INFO: + temp = 0; + break; + default: + temp = MLXSW_REG_MTMP_TEMP_TO_MC(temp); + /* Reset all trip point. */ + mlxsw_thermal_module_trips_reset(tz); + /* Update trip points. */ + err = mlxsw_thermal_module_trips_update(dev, thermal->core, + tz); + if (err) + return err; + break; + } + + *p_temp = (int) temp; + return 0; +} + +static int +mlxsw_thermal_module_trip_type_get(struct thermal_zone_device *tzdev, int trip, + enum thermal_trip_type *p_type) +{ + struct mlxsw_thermal_module *tz = tzdev->devdata; + + if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS) + return -EINVAL; + + *p_type = tz->trips[trip].type; + return 0; +} + +static int +mlxsw_thermal_module_trip_temp_get(struct thermal_zone_device *tzdev, + int trip, int *p_temp) +{ + struct mlxsw_thermal_module *tz = tzdev->devdata; + + if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS) + return -EINVAL; + + *p_temp = tz->trips[trip].temp; + return 0; +} + +static int +mlxsw_thermal_module_trip_temp_set(struct thermal_zone_device *tzdev, + int trip, int temp) +{ + struct mlxsw_thermal_module *tz = tzdev->devdata; + + if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS || + temp > tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp) + return -EINVAL; + + tz->trips[trip].temp = temp; + return 0; +} + +static int +mlxsw_thermal_module_trip_hyst_get(struct thermal_zone_device *tzdev, int trip, + int *p_hyst) +{ + struct mlxsw_thermal_module *tz = tzdev->devdata; + + *p_hyst = tz->trips[trip].hyst; + return 0; +} + +static int +mlxsw_thermal_module_trip_hyst_set(struct thermal_zone_device *tzdev, int trip, + int hyst) +{ + struct mlxsw_thermal_module *tz = tzdev->devdata; + + tz->trips[trip].hyst = hyst; + return 0; +} + +static struct thermal_zone_params mlxsw_thermal_module_params = { + .governor_name = "user_space", +}; + +static struct thermal_zone_device_ops mlxsw_thermal_module_ops = { + .bind = mlxsw_thermal_module_bind, + .unbind = mlxsw_thermal_module_unbind, + .get_mode = mlxsw_thermal_module_mode_get, + .set_mode = mlxsw_thermal_module_mode_set, + .get_temp = mlxsw_thermal_module_temp_get, + .get_trip_type = mlxsw_thermal_module_trip_type_get, + .get_trip_temp = mlxsw_thermal_module_trip_temp_get, + .set_trip_temp = mlxsw_thermal_module_trip_temp_set, + .get_trip_hyst = mlxsw_thermal_module_trip_hyst_get, + .set_trip_hyst = mlxsw_thermal_module_trip_hyst_set, }; static int mlxsw_thermal_get_max_state(struct thermal_cooling_device *cdev, @@ -355,6 +663,123 @@ static const struct thermal_cooling_device_ops mlxsw_cooling_ops = { .set_cur_state = mlxsw_thermal_set_cur_state, }; +static int +mlxsw_thermal_module_tz_init(struct mlxsw_thermal_module *module_tz) +{ + char tz_name[MLXSW_THERMAL_ZONE_MAX_NAME]; + int err; + + snprintf(tz_name, sizeof(tz_name), "mlxsw-module%d", + module_tz->module + 1); + module_tz->tzdev = thermal_zone_device_register(tz_name, + MLXSW_THERMAL_NUM_TRIPS, + MLXSW_THERMAL_TRIP_MASK, + module_tz, + &mlxsw_thermal_module_ops, + &mlxsw_thermal_module_params, + 0, 0); + if (IS_ERR(module_tz->tzdev)) { + err = PTR_ERR(module_tz->tzdev); + return err; + } + + return 0; +} + +static void mlxsw_thermal_module_tz_fini(struct thermal_zone_device *tzdev) +{ + thermal_zone_device_unregister(tzdev); +} + +static int +mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core, + struct mlxsw_thermal *thermal, u8 local_port) +{ + struct mlxsw_thermal_module *module_tz; + char pmlp_pl[MLXSW_REG_PMLP_LEN]; + u8 width, module; + int err; + + mlxsw_reg_pmlp_pack(pmlp_pl, local_port); + err = mlxsw_reg_query(core, MLXSW_REG(pmlp), pmlp_pl); + if (err) + return err; + + width = mlxsw_reg_pmlp_width_get(pmlp_pl); + if (!width) + return 0; + + module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); + module_tz = &thermal->tz_module_arr[module]; + module_tz->module = module; + module_tz->parent = thermal; + memcpy(module_tz->trips, default_thermal_trips, + sizeof(thermal->trips)); + /* Initialize all trip point. */ + mlxsw_thermal_module_trips_reset(module_tz); + /* Update trip point according to the module data. */ + err = mlxsw_thermal_module_trips_update(dev, core, module_tz); + if (err) + return err; + + thermal->tz_module_num++; + + return 0; +} + +static void mlxsw_thermal_module_fini(struct mlxsw_thermal_module *module_tz) +{ + if (module_tz && module_tz->tzdev) { + mlxsw_thermal_module_tz_fini(module_tz->tzdev); + module_tz->tzdev = NULL; + } +} + +static int +mlxsw_thermal_modules_init(struct device *dev, struct mlxsw_core *core, + struct mlxsw_thermal *thermal) +{ + unsigned int module_count = mlxsw_core_max_ports(core); + int i, err; + + thermal->tz_module_arr = kcalloc(module_count, + sizeof(*thermal->tz_module_arr), + GFP_KERNEL); + if (!thermal->tz_module_arr) + return -ENOMEM; + + for (i = 1; i < module_count; i++) { + err = mlxsw_thermal_module_init(dev, core, thermal, i); + if (err) + goto err_unreg_tz_module_arr; + } + + for (i = 0; i < thermal->tz_module_num; i++) { + err = mlxsw_thermal_module_tz_init(&thermal->tz_module_arr[i]); + if (err) + goto err_unreg_tz_module_arr; + } + + return 0; + +err_unreg_tz_module_arr: + for (i = module_count - 1; i >= 0; i--) + mlxsw_thermal_module_fini(&thermal->tz_module_arr[i]); + kfree(thermal->tz_module_arr); + return err; +} + +static void +mlxsw_thermal_modules_fini(struct mlxsw_thermal *thermal) +{ + unsigned int module_count = mlxsw_core_max_ports(thermal->core); + int i; + + for (i = module_count - 1; i >= 0; i--) + mlxsw_thermal_module_fini(&thermal->tz_module_arr[i]); + kfree(thermal->tz_module_arr); +} + int mlxsw_thermal_init(struct mlxsw_core *core, const struct mlxsw_bus_info *bus_info, struct mlxsw_thermal **p_thermal) @@ -407,8 +832,9 @@ int mlxsw_thermal_init(struct mlxsw_core *core, if (pwm_active & BIT(i)) { struct thermal_cooling_device *cdev; - cdev = thermal_cooling_device_register("Fan", thermal, - &mlxsw_cooling_ops); + cdev = thermal_cooling_device_register("mlxsw_fan", + thermal, + &mlxsw_cooling_ops); if (IS_ERR(cdev)) { err = PTR_ERR(cdev); dev_err(dev, "Failed to register cooling device\n"); @@ -423,22 +849,36 @@ int mlxsw_thermal_init(struct mlxsw_core *core, thermal->cooling_levels[i] = max(MLXSW_THERMAL_SPEED_MIN_LEVEL, i); + thermal->polling_delay = bus_info->low_frequency ? + MLXSW_THERMAL_SLOW_POLL_INT : + MLXSW_THERMAL_POLL_INT; + thermal->tzdev = thermal_zone_device_register("mlxsw", MLXSW_THERMAL_NUM_TRIPS, MLXSW_THERMAL_TRIP_MASK, thermal, &mlxsw_thermal_ops, NULL, 0, - MLXSW_THERMAL_POLL_INT); + thermal->polling_delay); if (IS_ERR(thermal->tzdev)) { err = PTR_ERR(thermal->tzdev); dev_err(dev, "Failed to register thermal zone\n"); goto err_unreg_cdevs; } + err = mlxsw_thermal_modules_init(dev, core, thermal); + if (err) + goto err_unreg_tzdev; + thermal->mode = THERMAL_DEVICE_ENABLED; *p_thermal = thermal; return 0; + +err_unreg_tzdev: + if (thermal->tzdev) { + thermal_zone_device_unregister(thermal->tzdev); + thermal->tzdev = NULL; + } err_unreg_cdevs: for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++) if (thermal->cdevs[i]) @@ -452,6 +892,7 @@ void mlxsw_thermal_fini(struct mlxsw_thermal *thermal) { int i; + mlxsw_thermal_modules_fini(thermal); if (thermal->tzdev) { thermal_zone_device_unregister(thermal->tzdev); thermal->tzdev = NULL; diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c index 798bd5aca384..06aea1999518 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c +++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c @@ -14,14 +14,17 @@ #include "cmd.h" #include "core.h" #include "i2c.h" +#include "resources.h" #define MLXSW_I2C_CIR2_BASE 0x72000 #define MLXSW_I2C_CIR_STATUS_OFF 0x18 #define MLXSW_I2C_CIR2_OFF_STATUS (MLXSW_I2C_CIR2_BASE + \ MLXSW_I2C_CIR_STATUS_OFF) #define MLXSW_I2C_OPMOD_SHIFT 12 +#define MLXSW_I2C_EVENT_BIT_SHIFT 22 #define MLXSW_I2C_GO_BIT_SHIFT 23 #define MLXSW_I2C_CIR_CTRL_STATUS_SHIFT 24 +#define MLXSW_I2C_EVENT_BIT BIT(MLXSW_I2C_EVENT_BIT_SHIFT) #define MLXSW_I2C_GO_BIT BIT(MLXSW_I2C_GO_BIT_SHIFT) #define MLXSW_I2C_GO_OPMODE BIT(MLXSW_I2C_OPMOD_SHIFT) #define MLXSW_I2C_SET_IMM_CMD (MLXSW_I2C_GO_OPMODE | \ @@ -33,6 +36,9 @@ #define MLXSW_I2C_TLV_HDR_SIZE 0x10 #define MLXSW_I2C_ADDR_WIDTH 4 #define MLXSW_I2C_PUSH_CMD_SIZE (MLXSW_I2C_ADDR_WIDTH + 4) +#define MLXSW_I2C_SET_EVENT_CMD (MLXSW_I2C_EVENT_BIT) +#define MLXSW_I2C_PUSH_EVENT_CMD (MLXSW_I2C_GO_BIT | \ + MLXSW_I2C_SET_EVENT_CMD) #define MLXSW_I2C_READ_SEMA_SIZE 4 #define MLXSW_I2C_PREP_SIZE (MLXSW_I2C_ADDR_WIDTH + 28) #define MLXSW_I2C_MBOX_SIZE 20 @@ -44,6 +50,7 @@ #define MLXSW_I2C_BLK_MAX 32 #define MLXSW_I2C_RETRY 5 #define MLXSW_I2C_TIMEOUT_MSECS 5000 +#define MLXSW_I2C_MAX_DATA_SIZE 256 /** * struct mlxsw_i2c - device private data: @@ -167,7 +174,7 @@ static int mlxsw_i2c_wait_go_bit(struct i2c_client *client, return err > 0 ? 0 : err; } -/* Routine posts a command to ASIC though mail box. */ +/* Routine posts a command to ASIC through mail box. */ static int mlxsw_i2c_write_cmd(struct i2c_client *client, struct mlxsw_i2c *mlxsw_i2c, int immediate) @@ -213,6 +220,66 @@ static int mlxsw_i2c_write_cmd(struct i2c_client *client, return 0; } +/* Routine posts initialization command to ASIC through mail box. */ +static int +mlxsw_i2c_write_init_cmd(struct i2c_client *client, + struct mlxsw_i2c *mlxsw_i2c, u16 opcode, u32 in_mod) +{ + __be32 push_cmd_buf[MLXSW_I2C_PUSH_CMD_SIZE / 4] = { + 0, cpu_to_be32(MLXSW_I2C_PUSH_EVENT_CMD) + }; + __be32 prep_cmd_buf[MLXSW_I2C_PREP_SIZE / 4] = { + 0, 0, 0, 0, 0, 0, + cpu_to_be32(client->adapter->nr & 0xffff), + cpu_to_be32(MLXSW_I2C_SET_EVENT_CMD) + }; + struct i2c_msg push_cmd = + MLXSW_I2C_WRITE_MSG(client, push_cmd_buf, + MLXSW_I2C_PUSH_CMD_SIZE); + struct i2c_msg prep_cmd = + MLXSW_I2C_WRITE_MSG(client, prep_cmd_buf, MLXSW_I2C_PREP_SIZE); + u8 status; + int err; + + push_cmd_buf[1] = cpu_to_be32(MLXSW_I2C_PUSH_EVENT_CMD | opcode); + prep_cmd_buf[3] = cpu_to_be32(in_mod); + prep_cmd_buf[7] = cpu_to_be32(MLXSW_I2C_GO_BIT | opcode); + mlxsw_i2c_set_slave_addr((u8 *)prep_cmd_buf, + MLXSW_I2C_CIR2_BASE); + mlxsw_i2c_set_slave_addr((u8 *)push_cmd_buf, + MLXSW_I2C_CIR2_OFF_STATUS); + + /* Prepare Command Interface Register for transaction */ + err = i2c_transfer(client->adapter, &prep_cmd, 1); + if (err < 0) + return err; + else if (err != 1) + return -EIO; + + /* Write out Command Interface Register GO bit to push transaction */ + err = i2c_transfer(client->adapter, &push_cmd, 1); + if (err < 0) + return err; + else if (err != 1) + return -EIO; + + /* Wait until go bit is cleared. */ + err = mlxsw_i2c_wait_go_bit(client, mlxsw_i2c, &status); + if (err) { + dev_err(&client->dev, "HW semaphore is not released"); + return err; + } + + /* Validate transaction completion status. */ + if (status) { + dev_err(&client->dev, "Bad transaction completion status %x\n", + status); + return -EIO; + } + + return 0; +} + /* Routine obtains mail box offsets from ASIC register space. */ static int mlxsw_i2c_get_mbox(struct i2c_client *client, struct mlxsw_i2c *mlxsw_i2c) @@ -310,8 +377,8 @@ mlxsw_i2c_write(struct device *dev, size_t in_mbox_size, u8 *in_mbox, int num, /* Routine executes I2C command. */ static int -mlxsw_i2c_cmd(struct device *dev, size_t in_mbox_size, u8 *in_mbox, - size_t out_mbox_size, u8 *out_mbox, u8 *status) +mlxsw_i2c_cmd(struct device *dev, u16 opcode, u32 in_mod, size_t in_mbox_size, + u8 *in_mbox, size_t out_mbox_size, u8 *out_mbox, u8 *status) { struct i2c_client *client = to_i2c_client(dev); struct mlxsw_i2c *mlxsw_i2c = i2c_get_clientdata(client); @@ -326,24 +393,40 @@ mlxsw_i2c_cmd(struct device *dev, size_t in_mbox_size, u8 *in_mbox, WARN_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32)); - reg_size = mlxsw_i2c_get_reg_size(in_mbox); - num = reg_size / MLXSW_I2C_BLK_MAX; - if (reg_size % MLXSW_I2C_BLK_MAX) - num++; + if (in_mbox) { + reg_size = mlxsw_i2c_get_reg_size(in_mbox); + num = reg_size / MLXSW_I2C_BLK_MAX; + if (reg_size % MLXSW_I2C_BLK_MAX) + num++; - if (mutex_lock_interruptible(&mlxsw_i2c->cmd.lock) < 0) { - dev_err(&client->dev, "Could not acquire lock"); - return -EINVAL; - } + if (mutex_lock_interruptible(&mlxsw_i2c->cmd.lock) < 0) { + dev_err(&client->dev, "Could not acquire lock"); + return -EINVAL; + } + + err = mlxsw_i2c_write(dev, reg_size, in_mbox, num, status); + if (err) + goto cmd_fail; + + /* No out mailbox is case of write transaction. */ + if (!out_mbox) { + mutex_unlock(&mlxsw_i2c->cmd.lock); + return 0; + } + } else { + /* No input mailbox is case of initialization query command. */ + reg_size = MLXSW_I2C_MAX_DATA_SIZE; + num = reg_size / MLXSW_I2C_BLK_MAX; - err = mlxsw_i2c_write(dev, reg_size, in_mbox, num, status); - if (err) - goto cmd_fail; + if (mutex_lock_interruptible(&mlxsw_i2c->cmd.lock) < 0) { + dev_err(&client->dev, "Could not acquire lock"); + return -EINVAL; + } - /* No out mailbox is case of write transaction. */ - if (!out_mbox) { - mutex_unlock(&mlxsw_i2c->cmd.lock); - return 0; + err = mlxsw_i2c_write_init_cmd(client, mlxsw_i2c, opcode, + in_mod); + if (err) + goto cmd_fail; } /* Send read transaction to get output mailbox content. */ @@ -395,8 +478,8 @@ static int mlxsw_i2c_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod, { struct mlxsw_i2c *mlxsw_i2c = bus_priv; - return mlxsw_i2c_cmd(mlxsw_i2c->dev, in_mbox_size, in_mbox, - out_mbox_size, out_mbox, status); + return mlxsw_i2c_cmd(mlxsw_i2c->dev, opcode, in_mod, in_mbox_size, + in_mbox, out_mbox_size, out_mbox, status); } static bool mlxsw_i2c_skb_transmit_busy(void *bus_priv, @@ -414,13 +497,22 @@ static int mlxsw_i2c_skb_transmit(void *bus_priv, struct sk_buff *skb, static int mlxsw_i2c_init(void *bus_priv, struct mlxsw_core *mlxsw_core, const struct mlxsw_config_profile *profile, - struct mlxsw_res *resources) + struct mlxsw_res *res) { struct mlxsw_i2c *mlxsw_i2c = bus_priv; + char *mbox; + int err; mlxsw_i2c->core = mlxsw_core; - return 0; + mbox = mlxsw_cmd_mbox_alloc(); + if (!mbox) + return -ENOMEM; + + err = mlxsw_core_resources_query(mlxsw_core, mbox, res); + + mlxsw_cmd_mbox_free(mbox); + return err; } static void mlxsw_i2c_fini(void *bus_priv) @@ -503,6 +595,7 @@ static int mlxsw_i2c_probe(struct i2c_client *client, mlxsw_i2c->bus_info.device_kind = id->name; mlxsw_i2c->bus_info.device_name = client->name; mlxsw_i2c->bus_info.dev = &client->dev; + mlxsw_i2c->bus_info.low_frequency = true; mlxsw_i2c->dev = &client->dev; err = mlxsw_core_bus_device_register(&mlxsw_i2c->bus_info, diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c index 5a6c4457fb55..68bee9572a1b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c @@ -1,6 +1,9 @@ // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 -/* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */ +/* Copyright (c) 2016-2019 Mellanox Technologies. All rights reserved */ +#include +#include +#include #include #include #include @@ -8,59 +11,377 @@ #include #include "core.h" +#include "core_env.h" #include "i2c.h" -static const char mlxsw_minimal_driver_name[] = "mlxsw_minimal"; +static const char mlxsw_m_driver_name[] = "mlxsw_minimal"; -static const struct mlxsw_config_profile mlxsw_minimal_config_profile; +struct mlxsw_m_port; -static struct mlxsw_driver mlxsw_minimal_driver = { - .kind = mlxsw_minimal_driver_name, - .priv_size = 1, - .profile = &mlxsw_minimal_config_profile, +struct mlxsw_m { + struct mlxsw_m_port **ports; + int *module_to_port; + struct mlxsw_core *core; + const struct mlxsw_bus_info *bus_info; + u8 base_mac[ETH_ALEN]; + u8 max_ports; }; -static const struct i2c_device_id mlxsw_minimal_i2c_id[] = { +struct mlxsw_m_port { + struct net_device *dev; + struct mlxsw_m *mlxsw_m; + u8 local_port; + u8 module; +}; + +static int mlxsw_m_port_dummy_open_stop(struct net_device *dev) +{ + return 0; +} + +static int +mlxsw_m_port_get_phys_port_name(struct net_device *dev, char *name, size_t len) +{ + struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev); + struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core; + u8 local_port = mlxsw_m_port->local_port; + + return mlxsw_core_port_get_phys_port_name(core, local_port, name, len); +} + +static int mlxsw_m_port_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) +{ + struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev); + struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m; + + ppid->id_len = sizeof(mlxsw_m->base_mac); + memcpy(&ppid->id, &mlxsw_m->base_mac, ppid->id_len); + + return 0; +} + +static const struct net_device_ops mlxsw_m_port_netdev_ops = { + .ndo_open = mlxsw_m_port_dummy_open_stop, + .ndo_stop = mlxsw_m_port_dummy_open_stop, + .ndo_get_phys_port_name = mlxsw_m_port_get_phys_port_name, + .ndo_get_port_parent_id = mlxsw_m_port_get_port_parent_id, +}; + +static int mlxsw_m_get_module_info(struct net_device *netdev, + struct ethtool_modinfo *modinfo) +{ + struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev); + struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core; + + return mlxsw_env_get_module_info(core, mlxsw_m_port->module, modinfo); +} + +static int +mlxsw_m_get_module_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee, + u8 *data) +{ + struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev); + struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core; + + return mlxsw_env_get_module_eeprom(netdev, core, mlxsw_m_port->module, + ee, data); +} + +static const struct ethtool_ops mlxsw_m_port_ethtool_ops = { + .get_module_info = mlxsw_m_get_module_info, + .get_module_eeprom = mlxsw_m_get_module_eeprom, +}; + +static int +mlxsw_m_port_module_info_get(struct mlxsw_m *mlxsw_m, u8 local_port, + u8 *p_module, u8 *p_width) +{ + char pmlp_pl[MLXSW_REG_PMLP_LEN]; + int err; + + mlxsw_reg_pmlp_pack(pmlp_pl, local_port); + err = mlxsw_reg_query(mlxsw_m->core, MLXSW_REG(pmlp), pmlp_pl); + if (err) + return err; + *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); + *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); + + return 0; +} + +static int +mlxsw_m_port_dev_addr_get(struct mlxsw_m_port *mlxsw_m_port) +{ + struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m; + struct net_device *dev = mlxsw_m_port->dev; + char ppad_pl[MLXSW_REG_PPAD_LEN]; + int err; + + mlxsw_reg_ppad_pack(ppad_pl, false, 0); + err = mlxsw_reg_query(mlxsw_m->core, MLXSW_REG(ppad), ppad_pl); + if (err) + return err; + mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, dev->dev_addr); + /* The last byte value in base mac address is guaranteed + * to be such it does not overflow when adding local_port + * value. + */ + dev->dev_addr[ETH_ALEN - 1] += mlxsw_m_port->module + 1; + return 0; +} + +static int +mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u8 local_port, u8 module) +{ + struct mlxsw_m_port *mlxsw_m_port; + struct net_device *dev; + int err; + + err = mlxsw_core_port_init(mlxsw_m->core, local_port); + if (err) { + dev_err(mlxsw_m->bus_info->dev, "Port %d: Failed to init core port\n", + local_port); + return err; + } + + dev = alloc_etherdev(sizeof(struct mlxsw_m_port)); + if (!dev) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + + SET_NETDEV_DEV(dev, mlxsw_m->bus_info->dev); + mlxsw_m_port = netdev_priv(dev); + mlxsw_m_port->dev = dev; + mlxsw_m_port->mlxsw_m = mlxsw_m; + mlxsw_m_port->local_port = local_port; + mlxsw_m_port->module = module; + + dev->netdev_ops = &mlxsw_m_port_netdev_ops; + dev->ethtool_ops = &mlxsw_m_port_ethtool_ops; + + err = mlxsw_m_port_dev_addr_get(mlxsw_m_port); + if (err) { + dev_err(mlxsw_m->bus_info->dev, "Port %d: Unable to get port mac address\n", + mlxsw_m_port->local_port); + goto err_dev_addr_get; + } + + netif_carrier_off(dev); + mlxsw_m->ports[local_port] = mlxsw_m_port; + err = register_netdev(dev); + if (err) { + dev_err(mlxsw_m->bus_info->dev, "Port %d: Failed to register netdev\n", + mlxsw_m_port->local_port); + goto err_register_netdev; + } + + mlxsw_core_port_eth_set(mlxsw_m->core, mlxsw_m_port->local_port, + mlxsw_m_port, dev, module + 1, false, 0); + + return 0; + +err_register_netdev: + mlxsw_m->ports[local_port] = NULL; + free_netdev(dev); +err_dev_addr_get: +err_alloc_etherdev: + mlxsw_core_port_fini(mlxsw_m->core, local_port); + return err; +} + +static void mlxsw_m_port_remove(struct mlxsw_m *mlxsw_m, u8 local_port) +{ + struct mlxsw_m_port *mlxsw_m_port = mlxsw_m->ports[local_port]; + + mlxsw_core_port_clear(mlxsw_m->core, local_port, mlxsw_m); + unregister_netdev(mlxsw_m_port->dev); /* This calls ndo_stop */ + mlxsw_m->ports[local_port] = NULL; + free_netdev(mlxsw_m_port->dev); + mlxsw_core_port_fini(mlxsw_m->core, local_port); +} + +static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u8 local_port, + u8 *last_module) +{ + u8 module, width; + int err; + + /* Fill out to local port mapping array */ + err = mlxsw_m_port_module_info_get(mlxsw_m, local_port, &module, + &width); + if (err) + return err; + + if (!width) + return 0; + /* Skip, if port belongs to the cluster */ + if (module == *last_module) + return 0; + *last_module = module; + mlxsw_m->module_to_port[module] = ++mlxsw_m->max_ports; + + return 0; +} + +static void mlxsw_m_port_module_unmap(struct mlxsw_m *mlxsw_m, u8 module) +{ + mlxsw_m->module_to_port[module] = -1; +} + +static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m) +{ + unsigned int max_ports = mlxsw_core_max_ports(mlxsw_m->core); + u8 last_module = max_ports; + int i; + int err; + + mlxsw_m->ports = kcalloc(max_ports, sizeof(*mlxsw_m->ports), + GFP_KERNEL); + if (!mlxsw_m->ports) + return -ENOMEM; + + mlxsw_m->module_to_port = kmalloc_array(max_ports, sizeof(int), + GFP_KERNEL); + if (!mlxsw_m->module_to_port) { + err = -ENOMEM; + goto err_module_to_port_alloc; + } + + /* Invalidate the entries of module to local port mapping array */ + for (i = 0; i < max_ports; i++) + mlxsw_m->module_to_port[i] = -1; + + /* Fill out module to local port mapping array */ + for (i = 1; i < max_ports; i++) { + err = mlxsw_m_port_module_map(mlxsw_m, i, &last_module); + if (err) + goto err_module_to_port_map; + } + + /* Create port objects for each valid entry */ + for (i = 0; i < mlxsw_m->max_ports; i++) { + if (mlxsw_m->module_to_port[i] > 0) { + err = mlxsw_m_port_create(mlxsw_m, + mlxsw_m->module_to_port[i], + i); + if (err) + goto err_module_to_port_create; + } + } + + return 0; + +err_module_to_port_create: + for (i--; i >= 0; i--) { + if (mlxsw_m->module_to_port[i] > 0) + mlxsw_m_port_remove(mlxsw_m, + mlxsw_m->module_to_port[i]); + } + i = max_ports; +err_module_to_port_map: + for (i--; i > 0; i--) + mlxsw_m_port_module_unmap(mlxsw_m, i); + kfree(mlxsw_m->module_to_port); +err_module_to_port_alloc: + kfree(mlxsw_m->ports); + return err; +} + +static void mlxsw_m_ports_remove(struct mlxsw_m *mlxsw_m) +{ + int i; + + for (i = 0; i < mlxsw_m->max_ports; i++) { + if (mlxsw_m->module_to_port[i] > 0) { + mlxsw_m_port_remove(mlxsw_m, + mlxsw_m->module_to_port[i]); + mlxsw_m_port_module_unmap(mlxsw_m, i); + } + } + + kfree(mlxsw_m->module_to_port); + kfree(mlxsw_m->ports); +} + +static int mlxsw_m_init(struct mlxsw_core *mlxsw_core, + const struct mlxsw_bus_info *mlxsw_bus_info) +{ + struct mlxsw_m *mlxsw_m = mlxsw_core_driver_priv(mlxsw_core); + int err; + + mlxsw_m->core = mlxsw_core; + mlxsw_m->bus_info = mlxsw_bus_info; + + err = mlxsw_m_ports_create(mlxsw_m); + if (err) { + dev_err(mlxsw_m->bus_info->dev, "Failed to create ports\n"); + return err; + } + + return 0; +} + +static void mlxsw_m_fini(struct mlxsw_core *mlxsw_core) +{ + struct mlxsw_m *mlxsw_m = mlxsw_core_driver_priv(mlxsw_core); + + mlxsw_m_ports_remove(mlxsw_m); +} + +static const struct mlxsw_config_profile mlxsw_m_config_profile; + +static struct mlxsw_driver mlxsw_m_driver = { + .kind = mlxsw_m_driver_name, + .priv_size = sizeof(struct mlxsw_m), + .init = mlxsw_m_init, + .fini = mlxsw_m_fini, + .profile = &mlxsw_m_config_profile, + .res_query_enabled = true, +}; + +static const struct i2c_device_id mlxsw_m_i2c_id[] = { { "mlxsw_minimal", 0}, { }, }; -static struct i2c_driver mlxsw_minimal_i2c_driver = { +static struct i2c_driver mlxsw_m_i2c_driver = { .driver.name = "mlxsw_minimal", .class = I2C_CLASS_HWMON, - .id_table = mlxsw_minimal_i2c_id, + .id_table = mlxsw_m_i2c_id, }; -static int __init mlxsw_minimal_module_init(void) +static int __init mlxsw_m_module_init(void) { int err; - err = mlxsw_core_driver_register(&mlxsw_minimal_driver); + err = mlxsw_core_driver_register(&mlxsw_m_driver); if (err) return err; - err = mlxsw_i2c_driver_register(&mlxsw_minimal_i2c_driver); + err = mlxsw_i2c_driver_register(&mlxsw_m_i2c_driver); if (err) goto err_i2c_driver_register; return 0; err_i2c_driver_register: - mlxsw_core_driver_unregister(&mlxsw_minimal_driver); + mlxsw_core_driver_unregister(&mlxsw_m_driver); return err; } -static void __exit mlxsw_minimal_module_exit(void) +static void __exit mlxsw_m_module_exit(void) { - mlxsw_i2c_driver_unregister(&mlxsw_minimal_i2c_driver); - mlxsw_core_driver_unregister(&mlxsw_minimal_driver); + mlxsw_i2c_driver_unregister(&mlxsw_m_i2c_driver); + mlxsw_core_driver_unregister(&mlxsw_m_driver); } -module_init(mlxsw_minimal_module_init); -module_exit(mlxsw_minimal_module_exit); +module_init(mlxsw_m_module_init); +module_exit(mlxsw_m_module_exit); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Vadim Pasternak "); MODULE_DESCRIPTION("Mellanox minimal driver"); -MODULE_DEVICE_TABLE(i2c, mlxsw_minimal_i2c_id); +MODULE_DEVICE_TABLE(i2c, mlxsw_m_i2c_id); diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index a2321fe8d6a0..b40455f8293d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -1039,42 +1039,6 @@ mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci, mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask); } -static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox, - struct mlxsw_res *res) -{ - int index, i; - u64 data; - u16 id; - int err; - - if (!res) - return 0; - - mlxsw_cmd_mbox_zero(mbox); - - for (index = 0; index < MLXSW_CMD_QUERY_RESOURCES_MAX_QUERIES; - index++) { - err = mlxsw_cmd_query_resources(mlxsw_pci->core, mbox, index); - if (err) - return err; - - for (i = 0; i < MLXSW_CMD_QUERY_RESOURCES_PER_QUERY; i++) { - id = mlxsw_cmd_mbox_query_resource_id_get(mbox, i); - data = mlxsw_cmd_mbox_query_resource_data_get(mbox, i); - - if (id == MLXSW_CMD_QUERY_RESOURCES_TABLE_END_ID) - return 0; - - mlxsw_res_parse(res, id, data); - } - } - - /* If after MLXSW_RESOURCES_QUERY_MAX_QUERIES we still didn't get - * MLXSW_RESOURCES_TABLE_END_ID, something went bad in the FW. - */ - return -EIO; -} - static int mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_pci *mlxsw_pci, const struct mlxsw_config_profile *profile, @@ -1459,7 +1423,7 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, if (err) goto err_boardinfo; - err = mlxsw_pci_resources_query(mlxsw_pci, mbox, res); + err = mlxsw_core_resources_query(mlxsw_core, mbox, res); if (err) goto err_query_resources; diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 9b48dffc9f63..eb4c5e8964cd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -2199,6 +2199,14 @@ MLXSW_ITEM32(reg, pagt, size, 0x00, 0, 8); */ MLXSW_ITEM32(reg, pagt, acl_group_id, 0x08, 0, 16); +/* reg_pagt_multi + * Multi-ACL + * 0 - This ACL is the last ACL in the multi-ACL + * 1 - This ACL is part of a multi-ACL + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, pagt, multi, 0x30, 31, 1, 0x04, 0x00, false); + /* reg_pagt_acl_id * ACL identifier * Access: RW @@ -2212,12 +2220,13 @@ static inline void mlxsw_reg_pagt_pack(char *payload, u16 acl_group_id) } static inline void mlxsw_reg_pagt_acl_id_pack(char *payload, int index, - u16 acl_id) + u16 acl_id, bool multi) { u8 size = mlxsw_reg_pagt_size_get(payload); if (index >= size) mlxsw_reg_pagt_size_set(payload, index + 1); + mlxsw_reg_pagt_multi_set(payload, index, multi); mlxsw_reg_pagt_acl_id_set(payload, index, acl_id); } @@ -3962,6 +3971,25 @@ enum { */ MLXSW_ITEM32(reg, ptys, an_status, 0x04, 28, 4); +#define MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M BIT(0) +#define MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII BIT(1) +#define MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII BIT(2) +#define MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R BIT(3) +#define MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G BIT(4) +#define MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G BIT(5) +#define MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR BIT(6) +#define MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_2_LAUI_2_50GBASE_CR2_KR2 BIT(7) +#define MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR BIT(8) +#define MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4 BIT(9) +#define MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2 BIT(10) +#define MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4 BIT(12) + +/* reg_ptys_ext_eth_proto_cap + * Extended Ethernet port supported speeds and protocols. + * Access: RO + */ +MLXSW_ITEM32(reg, ptys, ext_eth_proto_cap, 0x08, 0, 32); + #define MLXSW_REG_PTYS_ETH_SPEED_SGMII BIT(0) #define MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX BIT(1) #define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 BIT(2) @@ -4016,6 +4044,12 @@ MLXSW_ITEM32(reg, ptys, ib_link_width_cap, 0x10, 16, 16); */ MLXSW_ITEM32(reg, ptys, ib_proto_cap, 0x10, 0, 16); +/* reg_ptys_ext_eth_proto_admin + * Extended speed and protocol to set port to. + * Access: RW + */ +MLXSW_ITEM32(reg, ptys, ext_eth_proto_admin, 0x14, 0, 32); + /* reg_ptys_eth_proto_admin * Speed and protocol to set port to. * Access: RW @@ -4034,6 +4068,12 @@ MLXSW_ITEM32(reg, ptys, ib_link_width_admin, 0x1C, 16, 16); */ MLXSW_ITEM32(reg, ptys, ib_proto_admin, 0x1C, 0, 16); +/* reg_ptys_ext_eth_proto_oper + * The extended current speed and protocol configured for the port. + * Access: RO + */ +MLXSW_ITEM32(reg, ptys, ext_eth_proto_oper, 0x20, 0, 32); + /* reg_ptys_eth_proto_oper * The current speed and protocol configured for the port. * Access: RO @@ -4052,12 +4092,23 @@ MLXSW_ITEM32(reg, ptys, ib_link_width_oper, 0x28, 16, 16); */ MLXSW_ITEM32(reg, ptys, ib_proto_oper, 0x28, 0, 16); -/* reg_ptys_eth_proto_lp_advertise - * The protocols that were advertised by the link partner during - * autonegotiation. +enum mlxsw_reg_ptys_connector_type { + MLXSW_REG_PTYS_CONNECTOR_TYPE_UNKNOWN_OR_NO_CONNECTOR, + MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_NONE, + MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_TP, + MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_AUI, + MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_BNC, + MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_MII, + MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_FIBRE, + MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_DA, + MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_OTHER, +}; + +/* reg_ptys_connector_type + * Connector type indication. * Access: RO */ -MLXSW_ITEM32(reg, ptys, eth_proto_lp_advertise, 0x30, 0, 32); +MLXSW_ITEM32(reg, ptys, connector_type, 0x2C, 0, 4); static inline void mlxsw_reg_ptys_eth_pack(char *payload, u8 local_port, u32 proto_admin, bool autoneg) @@ -4069,17 +4120,46 @@ static inline void mlxsw_reg_ptys_eth_pack(char *payload, u8 local_port, mlxsw_reg_ptys_an_disable_admin_set(payload, !autoneg); } +static inline void mlxsw_reg_ptys_ext_eth_pack(char *payload, u8 local_port, + u32 proto_admin, bool autoneg) +{ + MLXSW_REG_ZERO(ptys, payload); + mlxsw_reg_ptys_local_port_set(payload, local_port); + mlxsw_reg_ptys_proto_mask_set(payload, MLXSW_REG_PTYS_PROTO_MASK_ETH); + mlxsw_reg_ptys_ext_eth_proto_admin_set(payload, proto_admin); + mlxsw_reg_ptys_an_disable_admin_set(payload, !autoneg); +} + static inline void mlxsw_reg_ptys_eth_unpack(char *payload, u32 *p_eth_proto_cap, - u32 *p_eth_proto_adm, + u32 *p_eth_proto_admin, u32 *p_eth_proto_oper) { if (p_eth_proto_cap) - *p_eth_proto_cap = mlxsw_reg_ptys_eth_proto_cap_get(payload); - if (p_eth_proto_adm) - *p_eth_proto_adm = mlxsw_reg_ptys_eth_proto_admin_get(payload); + *p_eth_proto_cap = + mlxsw_reg_ptys_eth_proto_cap_get(payload); + if (p_eth_proto_admin) + *p_eth_proto_admin = + mlxsw_reg_ptys_eth_proto_admin_get(payload); + if (p_eth_proto_oper) + *p_eth_proto_oper = + mlxsw_reg_ptys_eth_proto_oper_get(payload); +} + +static inline void mlxsw_reg_ptys_ext_eth_unpack(char *payload, + u32 *p_eth_proto_cap, + u32 *p_eth_proto_admin, + u32 *p_eth_proto_oper) +{ + if (p_eth_proto_cap) + *p_eth_proto_cap = + mlxsw_reg_ptys_ext_eth_proto_cap_get(payload); + if (p_eth_proto_admin) + *p_eth_proto_admin = + mlxsw_reg_ptys_ext_eth_proto_admin_get(payload); if (p_eth_proto_oper) - *p_eth_proto_oper = mlxsw_reg_ptys_eth_proto_oper_get(payload); + *p_eth_proto_oper = + mlxsw_reg_ptys_ext_eth_proto_oper_get(payload); } static inline void mlxsw_reg_ptys_ib_pack(char *payload, u8 local_port, @@ -5666,6 +5746,8 @@ enum mlxsw_reg_ritr_loopback_protocol { MLXSW_REG_RITR_LOOPBACK_PROTOCOL_IPIP_IPV4, /* IPinIP IPv6 underlay Unicast */ MLXSW_REG_RITR_LOOPBACK_PROTOCOL_IPIP_IPV6, + /* IPinIP generic - used for Spectrum-2 underlay RIF */ + MLXSW_REG_RITR_LOOPBACK_GENERIC, }; /* reg_ritr_loopback_protocol @@ -5706,6 +5788,13 @@ MLXSW_ITEM32(reg, ritr, loopback_ipip_options, 0x10, 20, 4); */ MLXSW_ITEM32(reg, ritr, loopback_ipip_uvr, 0x10, 0, 16); +/* reg_ritr_loopback_ipip_underlay_rif + * Underlay ingress router interface. + * Reserved for Spectrum. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, loopback_ipip_underlay_rif, 0x14, 0, 16); + /* reg_ritr_loopback_ipip_usip* * Encapsulation Underlay source IP. * Access: RW @@ -5821,11 +5910,12 @@ static inline void mlxsw_reg_ritr_loopback_ipip_common_pack(char *payload, enum mlxsw_reg_ritr_loopback_ipip_type ipip_type, enum mlxsw_reg_ritr_loopback_ipip_options options, - u16 uvr_id, u32 gre_key) + u16 uvr_id, u16 underlay_rif, u32 gre_key) { mlxsw_reg_ritr_loopback_ipip_type_set(payload, ipip_type); mlxsw_reg_ritr_loopback_ipip_options_set(payload, options); mlxsw_reg_ritr_loopback_ipip_uvr_set(payload, uvr_id); + mlxsw_reg_ritr_loopback_ipip_underlay_rif_set(payload, underlay_rif); mlxsw_reg_ritr_loopback_ipip_gre_key_set(payload, gre_key); } @@ -5833,12 +5923,12 @@ static inline void mlxsw_reg_ritr_loopback_ipip4_pack(char *payload, enum mlxsw_reg_ritr_loopback_ipip_type ipip_type, enum mlxsw_reg_ritr_loopback_ipip_options options, - u16 uvr_id, u32 usip, u32 gre_key) + u16 uvr_id, u16 underlay_rif, u32 usip, u32 gre_key) { mlxsw_reg_ritr_loopback_protocol_set(payload, MLXSW_REG_RITR_LOOPBACK_PROTOCOL_IPIP_IPV4); mlxsw_reg_ritr_loopback_ipip_common_pack(payload, ipip_type, options, - uvr_id, gre_key); + uvr_id, underlay_rif, gre_key); mlxsw_reg_ritr_loopback_ipip_usip4_set(payload, usip); } @@ -7200,6 +7290,13 @@ MLXSW_ITEM32(reg, rtdp, type, 0x00, 28, 4); */ MLXSW_ITEM32(reg, rtdp, tunnel_index, 0x00, 0, 24); +/* reg_rtdp_egress_router_interface + * Underlay egress router interface. + * Valid range is from 0 to cap_max_router_interfaces - 1 + * Access: RW + */ +MLXSW_ITEM32(reg, rtdp, egress_router_interface, 0x40, 0, 16); + /* IPinIP */ /* reg_rtdp_ipip_irif @@ -7849,6 +7946,35 @@ static inline void mlxsw_reg_mfsl_unpack(char *payload, u8 tacho, *p_tach_max = mlxsw_reg_mfsl_tach_max_get(payload); } +/* FORE - Fan Out of Range Event Register + * -------------------------------------- + * This register reports the status of the controlled fans compared to the + * range defined by the MFSL register. + */ +#define MLXSW_REG_FORE_ID 0x9007 +#define MLXSW_REG_FORE_LEN 0x0C + +MLXSW_REG_DEFINE(fore, MLXSW_REG_FORE_ID, MLXSW_REG_FORE_LEN); + +/* fan_under_limit + * Fan speed is below the low limit defined in MFSL register. Each bit relates + * to a single tachometer and indicates the specific tachometer reading is + * below the threshold. + * Access: RO + */ +MLXSW_ITEM32(reg, fore, fan_under_limit, 0x00, 16, 10); + +static inline void mlxsw_reg_fore_unpack(char *payload, u8 tacho, + bool *fault) +{ + u16 limit; + + if (fault) { + limit = mlxsw_reg_fore_fan_under_limit_get(payload); + *fault = limit & BIT(tacho); + } +} + /* MTCAP - Management Temperature Capabilities * ------------------------------------------- * This register exposes the capabilities of the device and @@ -7975,6 +8101,80 @@ static inline void mlxsw_reg_mtmp_unpack(char *payload, unsigned int *p_temp, mlxsw_reg_mtmp_sensor_name_memcpy_from(payload, sensor_name); } +/* MTBR - Management Temperature Bulk Register + * ------------------------------------------- + * This register is used for bulk temperature reading. + */ +#define MLXSW_REG_MTBR_ID 0x900F +#define MLXSW_REG_MTBR_BASE_LEN 0x10 /* base length, without records */ +#define MLXSW_REG_MTBR_REC_LEN 0x04 /* record length */ +#define MLXSW_REG_MTBR_REC_MAX_COUNT 47 /* firmware limitation */ +#define MLXSW_REG_MTBR_LEN (MLXSW_REG_MTBR_BASE_LEN + \ + MLXSW_REG_MTBR_REC_LEN * \ + MLXSW_REG_MTBR_REC_MAX_COUNT) + +MLXSW_REG_DEFINE(mtbr, MLXSW_REG_MTBR_ID, MLXSW_REG_MTBR_LEN); + +/* reg_mtbr_base_sensor_index + * Base sensors index to access (0 - ASIC sensor, 1-63 - ambient sensors, + * 64-127 are mapped to the SFP+/QSFP modules sequentially). + * Access: Index + */ +MLXSW_ITEM32(reg, mtbr, base_sensor_index, 0x00, 0, 7); + +/* reg_mtbr_num_rec + * Request: Number of records to read + * Response: Number of records read + * See above description for more details. + * Range 1..255 + * Access: RW + */ +MLXSW_ITEM32(reg, mtbr, num_rec, 0x04, 0, 8); + +/* reg_mtbr_rec_max_temp + * The highest measured temperature from the sensor. + * When the bit mte is cleared, the field max_temperature is reserved. + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, mtbr, rec_max_temp, MLXSW_REG_MTBR_BASE_LEN, 16, + 16, MLXSW_REG_MTBR_REC_LEN, 0x00, false); + +/* reg_mtbr_rec_temp + * Temperature reading from the sensor. Reading is in 0..125 Celsius + * degrees units. + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, mtbr, rec_temp, MLXSW_REG_MTBR_BASE_LEN, 0, 16, + MLXSW_REG_MTBR_REC_LEN, 0x00, false); + +static inline void mlxsw_reg_mtbr_pack(char *payload, u8 base_sensor_index, + u8 num_rec) +{ + MLXSW_REG_ZERO(mtbr, payload); + mlxsw_reg_mtbr_base_sensor_index_set(payload, base_sensor_index); + mlxsw_reg_mtbr_num_rec_set(payload, num_rec); +} + +/* Error codes from temperatute reading */ +enum mlxsw_reg_mtbr_temp_status { + MLXSW_REG_MTBR_NO_CONN = 0x8000, + MLXSW_REG_MTBR_NO_TEMP_SENS = 0x8001, + MLXSW_REG_MTBR_INDEX_NA = 0x8002, + MLXSW_REG_MTBR_BAD_SENS_INFO = 0x8003, +}; + +/* Base index for reading modules temperature */ +#define MLXSW_REG_MTBR_BASE_MODULE_INDEX 64 + +static inline void mlxsw_reg_mtbr_temp_unpack(char *payload, int rec_ind, + u16 *p_temp, u16 *p_max_temp) +{ + if (p_temp) + *p_temp = mlxsw_reg_mtbr_rec_temp_get(payload, rec_ind); + if (p_max_temp) + *p_max_temp = mlxsw_reg_mtbr_rec_max_temp_get(payload, rec_ind); +} + /* MCIA - Management Cable Info Access * ----------------------------------- * MCIA register is used to access the SFP+ and QSFP connector's EPROM. @@ -8029,13 +8229,41 @@ MLXSW_ITEM32(reg, mcia, device_address, 0x04, 0, 16); */ MLXSW_ITEM32(reg, mcia, size, 0x08, 0, 16); -#define MLXSW_SP_REG_MCIA_EEPROM_SIZE 48 +#define MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH 256 +#define MLXSW_REG_MCIA_EEPROM_SIZE 48 +#define MLXSW_REG_MCIA_I2C_ADDR_LOW 0x50 +#define MLXSW_REG_MCIA_I2C_ADDR_HIGH 0x51 +#define MLXSW_REG_MCIA_PAGE0_LO_OFF 0xa0 +#define MLXSW_REG_MCIA_TH_ITEM_SIZE 2 +#define MLXSW_REG_MCIA_TH_PAGE_NUM 3 +#define MLXSW_REG_MCIA_PAGE0_LO 0 +#define MLXSW_REG_MCIA_TH_PAGE_OFF 0x80 + +enum mlxsw_reg_mcia_eeprom_module_info_rev_id { + MLXSW_REG_MCIA_EEPROM_MODULE_INFO_REV_ID_UNSPC = 0x00, + MLXSW_REG_MCIA_EEPROM_MODULE_INFO_REV_ID_8436 = 0x01, + MLXSW_REG_MCIA_EEPROM_MODULE_INFO_REV_ID_8636 = 0x03, +}; + +enum mlxsw_reg_mcia_eeprom_module_info_id { + MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_SFP = 0x03, + MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP = 0x0C, + MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_PLUS = 0x0D, + MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28 = 0x11, + MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_DD = 0x18, +}; + +enum mlxsw_reg_mcia_eeprom_module_info { + MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID, + MLXSW_REG_MCIA_EEPROM_MODULE_INFO_REV_ID, + MLXSW_REG_MCIA_EEPROM_MODULE_INFO_SIZE, +}; /* reg_mcia_eeprom * Bytes to read/write. * Access: RW */ -MLXSW_ITEM_BUF(reg, mcia, eeprom, 0x10, MLXSW_SP_REG_MCIA_EEPROM_SIZE); +MLXSW_ITEM_BUF(reg, mcia, eeprom, 0x10, MLXSW_REG_MCIA_EEPROM_SIZE); static inline void mlxsw_reg_mcia_pack(char *payload, u8 module, u8 lock, u8 page_number, u16 device_addr, @@ -9723,8 +9951,10 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(mfsc), MLXSW_REG(mfsm), MLXSW_REG(mfsl), + MLXSW_REG(fore), MLXSW_REG(mtcap), MLXSW_REG(mtmp), + MLXSW_REG(mtbr), MLXSW_REG(mcia), MLXSW_REG(mpat), MLXSW_REG(mpar), diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h index b8b3a01c2a9e..773ef7fdb285 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/resources.h +++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h @@ -26,6 +26,7 @@ enum mlxsw_res_id { MLXSW_RES_ID_MAX_LAG_MEMBERS, MLXSW_RES_ID_MAX_BUFFER_SIZE, MLXSW_RES_ID_CELL_SIZE, + MLXSW_RES_ID_MAX_HEADROOM_SIZE, MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS, MLXSW_RES_ID_ACL_MAX_TCAM_RULES, MLXSW_RES_ID_ACL_MAX_REGIONS, @@ -79,6 +80,7 @@ static u16 mlxsw_res_ids[] = { [MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521, [MLXSW_RES_ID_MAX_BUFFER_SIZE] = 0x2802, /* Bytes */ [MLXSW_RES_ID_CELL_SIZE] = 0x2803, /* Bytes */ + [MLXSW_RES_ID_MAX_HEADROOM_SIZE] = 0x2811, /* Bytes */ [MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS] = 0x2901, [MLXSW_RES_ID_ACL_MAX_TCAM_RULES] = 0x2902, [MLXSW_RES_ID_ACL_MAX_REGIONS] = 0x2903, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 32519c93df17..9eb63300c1d3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -32,6 +32,7 @@ #include "spectrum.h" #include "pci.h" #include "core.h" +#include "core_env.h" #include "reg.h" #include "port.h" #include "trap.h" @@ -852,8 +853,12 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; u16 delay = !!my_pfc ? my_pfc->delay : 0; char pbmc_pl[MLXSW_REG_PBMC_LEN]; + u32 taken_headroom_cells = 0; + u32 max_headroom_cells; int i, j, err; + max_headroom_cells = mlxsw_sp_sb_max_headroom_cells(mlxsw_sp); + mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); if (err) @@ -862,8 +867,10 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { bool configure = false; bool pfc = false; + u16 thres_cells; + u16 delay_cells; + u16 total_cells; bool lossy; - u16 thres; for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { if (prio_tc[j] == i) { @@ -877,10 +884,17 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, continue; lossy = !(pfc || pause_en); - thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); - delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc, - pause_en); - mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy); + thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); + delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, + pfc, pause_en); + total_cells = thres_cells + delay_cells; + + taken_headroom_cells += total_cells; + if (taken_headroom_cells > max_headroom_cells) + return -ENOBUFS; + + mlxsw_sp_pg_buf_pack(pbmc_pl, i, total_cells, + thres_cells, lossy); } return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); @@ -1700,6 +1714,18 @@ static int mlxsw_sp_set_features(struct net_device *dev, mlxsw_sp_feature_hw_tc); } +static int mlxsw_sp_port_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + + ppid->id_len = sizeof(mlxsw_sp->base_mac); + memcpy(&ppid->id, &mlxsw_sp->base_mac, ppid->id_len); + + return 0; +} + static const struct net_device_ops mlxsw_sp_port_netdev_ops = { .ndo_open = mlxsw_sp_port_open, .ndo_stop = mlxsw_sp_port_stop, @@ -1715,6 +1741,7 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = { .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name, .ndo_set_features = mlxsw_sp_set_features, + .ndo_get_port_parent_id = mlxsw_sp_port_get_port_parent_id, }; static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, @@ -2103,7 +2130,7 @@ static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) int i; for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { - snprintf(*p, ETH_GSTRING_LEN, "%s_%d", + snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", mlxsw_sp_port_hw_prio_stats[i].str, prio); *p += ETH_GSTRING_LEN; } @@ -2114,7 +2141,7 @@ static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) int i; for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { - snprintf(*p, ETH_GSTRING_LEN, "%s_%d", + snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", mlxsw_sp_port_hw_tc_stats[i].str, tc); *p += ETH_GSTRING_LEN; } @@ -2310,13 +2337,13 @@ static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) } } -struct mlxsw_sp_port_link_mode { +struct mlxsw_sp1_port_link_mode { enum ethtool_link_mode_bit_indices mask_ethtool; u32 mask; u32 speed; }; -static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = { +static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = { { .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT, @@ -2387,11 +2414,6 @@ static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = { .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, .speed = SPEED_25000, }, - { - .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, - .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, - .speed = SPEED_25000, - }, { .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2, .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, @@ -2449,11 +2471,12 @@ static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = { }, }; -#define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode) +#define MLXSW_SP1_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp1_port_link_mode) static void -mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto, - struct ethtool_link_ksettings *cmd) +mlxsw_sp1_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, + u32 ptys_eth_proto, + struct ethtool_link_ksettings *cmd) { if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | @@ -2471,19 +2494,23 @@ mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto, ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); } -static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode) +static void +mlxsw_sp1_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, + unsigned long *mode) { int i; - for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { - if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) - __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool, + for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { + if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) + __set_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, mode); } } -static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto, - struct ethtool_link_ksettings *cmd) +static void +mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, + u32 ptys_eth_proto, + struct ethtool_link_ksettings *cmd) { u32 speed = SPEED_UNKNOWN; u8 duplex = DUPLEX_UNKNOWN; @@ -2492,9 +2519,9 @@ static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto, if (!carrier_ok) goto out; - for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { - if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) { - speed = mlxsw_sp_port_link_mode[i].speed; + for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { + if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) { + speed = mlxsw_sp1_port_link_mode[i].speed; duplex = DUPLEX_FULL; break; } @@ -2504,129 +2531,559 @@ out: cmd->base.duplex = duplex; } -static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto) +static u32 +mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, + const struct ethtool_link_ksettings *cmd) { - if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | - MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | - MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | - MLXSW_REG_PTYS_ETH_SPEED_SGMII)) - return PORT_FIBRE; + u32 ptys_proto = 0; + int i; - if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | - MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | - MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4)) - return PORT_DA; + for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { + if (test_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, + cmd->link_modes.advertising)) + ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; + } + return ptys_proto; +} - if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | - MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | - MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | - MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4)) - return PORT_NONE; +static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 speed) +{ + u32 ptys_proto = 0; + int i; - return PORT_OTHER; + for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { + if (speed == mlxsw_sp1_port_link_mode[i].speed) + ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; + } + return ptys_proto; } static u32 -mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd) +mlxsw_sp1_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed) { u32 ptys_proto = 0; int i; - for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { - if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool, - cmd->link_modes.advertising)) - ptys_proto |= mlxsw_sp_port_link_mode[i].mask; + for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { + if (mlxsw_sp1_port_link_mode[i].speed <= upper_speed) + ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; + } + return ptys_proto; +} + +static int +mlxsw_sp1_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port, + u32 *base_speed) +{ + *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G; + return 0; +} + +static void +mlxsw_sp1_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, + u8 local_port, u32 proto_admin, bool autoneg) +{ + mlxsw_reg_ptys_eth_pack(payload, local_port, proto_admin, autoneg); +} + +static void +mlxsw_sp1_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, + u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, + u32 *p_eth_proto_oper) +{ + mlxsw_reg_ptys_eth_unpack(payload, p_eth_proto_cap, p_eth_proto_admin, + p_eth_proto_oper); +} + +static const struct mlxsw_sp_port_type_speed_ops +mlxsw_sp1_port_type_speed_ops = { + .from_ptys_supported_port = mlxsw_sp1_from_ptys_supported_port, + .from_ptys_link = mlxsw_sp1_from_ptys_link, + .from_ptys_speed_duplex = mlxsw_sp1_from_ptys_speed_duplex, + .to_ptys_advert_link = mlxsw_sp1_to_ptys_advert_link, + .to_ptys_speed = mlxsw_sp1_to_ptys_speed, + .to_ptys_upper_speed = mlxsw_sp1_to_ptys_upper_speed, + .port_speed_base = mlxsw_sp1_port_speed_base, + .reg_ptys_eth_pack = mlxsw_sp1_reg_ptys_eth_pack, + .reg_ptys_eth_unpack = mlxsw_sp1_reg_ptys_eth_unpack, +}; + +static const enum ethtool_link_mode_bit_indices +mlxsw_sp2_mask_ethtool_sgmii_100m[] = { + ETHTOOL_LINK_MODE_100baseT_Full_BIT, +}; + +#define MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN \ + ARRAY_SIZE(mlxsw_sp2_mask_ethtool_sgmii_100m) + +static const enum ethtool_link_mode_bit_indices +mlxsw_sp2_mask_ethtool_1000base_x_sgmii[] = { + ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, +}; + +#define MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN \ + ARRAY_SIZE(mlxsw_sp2_mask_ethtool_1000base_x_sgmii) + +static const enum ethtool_link_mode_bit_indices +mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii[] = { + ETHTOOL_LINK_MODE_2500baseX_Full_BIT, +}; + +#define MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN \ + ARRAY_SIZE(mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii) + +static const enum ethtool_link_mode_bit_indices +mlxsw_sp2_mask_ethtool_5gbase_r[] = { + ETHTOOL_LINK_MODE_5000baseT_Full_BIT, +}; + +#define MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN \ + ARRAY_SIZE(mlxsw_sp2_mask_ethtool_5gbase_r) + +static const enum ethtool_link_mode_bit_indices +mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g[] = { + ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, + ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseER_Full_BIT, +}; + +#define MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN \ + ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g) + +static const enum ethtool_link_mode_bit_indices +mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g[] = { + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, +}; + +#define MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN \ + ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g) + +static const enum ethtool_link_mode_bit_indices +mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr[] = { + ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, +}; + +#define MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN \ + ARRAY_SIZE(mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr) + +static const enum ethtool_link_mode_bit_indices +mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2[] = { + ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, + ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, +}; + +#define MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN \ + ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2) + +static const enum ethtool_link_mode_bit_indices +mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr[] = { + ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, +}; + +#define MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN \ + ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr) + +static const enum ethtool_link_mode_bit_indices +mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4[] = { + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, +}; + +#define MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN \ + ARRAY_SIZE(mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4) + +static const enum ethtool_link_mode_bit_indices +mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2[] = { + ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, +}; + +#define MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN \ + ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2) + +static const enum ethtool_link_mode_bit_indices +mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = { + ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, + ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, + ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, +}; + +#define MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN \ + ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4) + +struct mlxsw_sp2_port_link_mode { + const enum ethtool_link_mode_bit_indices *mask_ethtool; + int m_ethtool_len; + u32 mask; + u32 speed; +}; + +static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = { + { + .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M, + .mask_ethtool = mlxsw_sp2_mask_ethtool_sgmii_100m, + .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN, + .speed = SPEED_100, + }, + { + .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII, + .mask_ethtool = mlxsw_sp2_mask_ethtool_1000base_x_sgmii, + .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN, + .speed = SPEED_1000, + }, + { + .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII, + .mask_ethtool = mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii, + .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN, + .speed = SPEED_2500, + }, + { + .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R, + .mask_ethtool = mlxsw_sp2_mask_ethtool_5gbase_r, + .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN, + .speed = SPEED_5000, + }, + { + .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G, + .mask_ethtool = mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g, + .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN, + .speed = SPEED_10000, + }, + { + .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G, + .mask_ethtool = mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g, + .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN, + .speed = SPEED_40000, + }, + { + .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR, + .mask_ethtool = mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr, + .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN, + .speed = SPEED_25000, + }, + { + .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_2_LAUI_2_50GBASE_CR2_KR2, + .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2, + .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN, + .speed = SPEED_50000, + }, + { + .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR, + .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr, + .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN, + .speed = SPEED_50000, + }, + { + .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4, + .mask_ethtool = mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4, + .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN, + .speed = SPEED_100000, + }, + { + .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2, + .mask_ethtool = mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2, + .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN, + .speed = SPEED_100000, + }, + { + .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4, + .mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4, + .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN, + .speed = SPEED_200000, + }, +}; + +#define MLXSW_SP2_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp2_port_link_mode) + +static void +mlxsw_sp2_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, + u32 ptys_eth_proto, + struct ethtool_link_ksettings *cmd) +{ + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); +} + +static void +mlxsw_sp2_set_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, + unsigned long *mode) +{ + int i; + + for (i = 0; i < link_mode->m_ethtool_len; i++) + __set_bit(link_mode->mask_ethtool[i], mode); +} + +static void +mlxsw_sp2_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, + unsigned long *mode) +{ + int i; + + for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { + if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) + mlxsw_sp2_set_bit_ethtool(&mlxsw_sp2_port_link_mode[i], + mode); + } +} + +static void +mlxsw_sp2_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, + u32 ptys_eth_proto, + struct ethtool_link_ksettings *cmd) +{ + u32 speed = SPEED_UNKNOWN; + u8 duplex = DUPLEX_UNKNOWN; + int i; + + if (!carrier_ok) + goto out; + + for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { + if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) { + speed = mlxsw_sp2_port_link_mode[i].speed; + duplex = DUPLEX_FULL; + break; + } + } +out: + cmd->base.speed = speed; + cmd->base.duplex = duplex; +} + +static bool +mlxsw_sp2_test_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, + const unsigned long *mode) +{ + int cnt = 0; + int i; + + for (i = 0; i < link_mode->m_ethtool_len; i++) { + if (test_bit(link_mode->mask_ethtool[i], mode)) + cnt++; + } + + return cnt == link_mode->m_ethtool_len; +} + +static u32 +mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, + const struct ethtool_link_ksettings *cmd) +{ + u32 ptys_proto = 0; + int i; + + for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { + if (mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i], + cmd->link_modes.advertising)) + ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; } return ptys_proto; } -static u32 mlxsw_sp_to_ptys_speed(u32 speed) +static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 speed) { u32 ptys_proto = 0; int i; - for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { - if (speed == mlxsw_sp_port_link_mode[i].speed) - ptys_proto |= mlxsw_sp_port_link_mode[i].mask; + for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { + if (speed == mlxsw_sp2_port_link_mode[i].speed) + ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; } return ptys_proto; } -static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed) +static u32 +mlxsw_sp2_to_ptys_upper_speed(struct mlxsw_sp *mlxsw_sp, u32 upper_speed) { u32 ptys_proto = 0; int i; - for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { - if (mlxsw_sp_port_link_mode[i].speed <= upper_speed) - ptys_proto |= mlxsw_sp_port_link_mode[i].mask; + for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { + if (mlxsw_sp2_port_link_mode[i].speed <= upper_speed) + ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; } return ptys_proto; } -static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap, - struct ethtool_link_ksettings *cmd) +static int +mlxsw_sp2_port_speed_base(struct mlxsw_sp *mlxsw_sp, u8 local_port, + u32 *base_speed) +{ + char ptys_pl[MLXSW_REG_PTYS_LEN]; + u32 eth_proto_cap; + int err; + + /* In Spectrum-2, the speed of 1x can change from port to port, so query + * it from firmware. + */ + mlxsw_reg_ptys_ext_eth_pack(ptys_pl, local_port, 0, false); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); + if (err) + return err; + mlxsw_reg_ptys_ext_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL); + + if (eth_proto_cap & + MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR) { + *base_speed = MLXSW_SP_PORT_BASE_SPEED_50G; + return 0; + } + + if (eth_proto_cap & + MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR) { + *base_speed = MLXSW_SP_PORT_BASE_SPEED_25G; + return 0; + } + + return -EIO; +} + +static void +mlxsw_sp2_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, + u8 local_port, u32 proto_admin, + bool autoneg) +{ + mlxsw_reg_ptys_ext_eth_pack(payload, local_port, proto_admin, autoneg); +} + +static void +mlxsw_sp2_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, + u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, + u32 *p_eth_proto_oper) +{ + mlxsw_reg_ptys_ext_eth_unpack(payload, p_eth_proto_cap, + p_eth_proto_admin, p_eth_proto_oper); +} + +static const struct mlxsw_sp_port_type_speed_ops +mlxsw_sp2_port_type_speed_ops = { + .from_ptys_supported_port = mlxsw_sp2_from_ptys_supported_port, + .from_ptys_link = mlxsw_sp2_from_ptys_link, + .from_ptys_speed_duplex = mlxsw_sp2_from_ptys_speed_duplex, + .to_ptys_advert_link = mlxsw_sp2_to_ptys_advert_link, + .to_ptys_speed = mlxsw_sp2_to_ptys_speed, + .to_ptys_upper_speed = mlxsw_sp2_to_ptys_upper_speed, + .port_speed_base = mlxsw_sp2_port_speed_base, + .reg_ptys_eth_pack = mlxsw_sp2_reg_ptys_eth_pack, + .reg_ptys_eth_unpack = mlxsw_sp2_reg_ptys_eth_unpack, +}; + +static void +mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap, + struct ethtool_link_ksettings *cmd) { + const struct mlxsw_sp_port_type_speed_ops *ops; + + ops = mlxsw_sp->port_type_speed_ops; + ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); - mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd); - mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported); + ops->from_ptys_supported_port(mlxsw_sp, eth_proto_cap, cmd); + ops->from_ptys_link(mlxsw_sp, eth_proto_cap, cmd->link_modes.supported); } -static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg, - struct ethtool_link_ksettings *cmd) +static void +mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp, + u32 eth_proto_admin, bool autoneg, + struct ethtool_link_ksettings *cmd) { + const struct mlxsw_sp_port_type_speed_ops *ops; + + ops = mlxsw_sp->port_type_speed_ops; + if (!autoneg) return; ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); - mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising); + ops->from_ptys_link(mlxsw_sp, eth_proto_admin, + cmd->link_modes.advertising); } -static void -mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status, - struct ethtool_link_ksettings *cmd) +static u8 +mlxsw_sp_port_connector_port(enum mlxsw_reg_ptys_connector_type connector_type) { - if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp) - return; - - ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg); - mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising); + switch (connector_type) { + case MLXSW_REG_PTYS_CONNECTOR_TYPE_UNKNOWN_OR_NO_CONNECTOR: + return PORT_OTHER; + case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_NONE: + return PORT_NONE; + case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_TP: + return PORT_TP; + case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_AUI: + return PORT_AUI; + case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_BNC: + return PORT_BNC; + case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_MII: + return PORT_MII; + case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_FIBRE: + return PORT_FIBRE; + case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_DA: + return PORT_DA; + case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_OTHER: + return PORT_OTHER; + default: + WARN_ON_ONCE(1); + return PORT_OTHER; + } } static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { - u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp; + u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + const struct mlxsw_sp_port_type_speed_ops *ops; char ptys_pl[MLXSW_REG_PTYS_LEN]; - u8 autoneg_status; + u8 connector_type; bool autoneg; int err; + ops = mlxsw_sp->port_type_speed_ops; + autoneg = mlxsw_sp_port->link.autoneg; - mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0, false); + ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, + 0, false); err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); if (err) return err; - mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, - ð_proto_oper); + ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, + ð_proto_admin, ð_proto_oper); - mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd); + mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap, cmd); - mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd); - - eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl); - autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl); - mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd); + mlxsw_sp_port_get_link_advertise(mlxsw_sp, eth_proto_admin, autoneg, + cmd); cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; - cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper); - mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper, - cmd); + connector_type = mlxsw_reg_ptys_connector_type_get(ptys_pl); + cmd->base.port = mlxsw_sp_port_connector_port(connector_type); + ops->from_ptys_speed_duplex(mlxsw_sp, netif_carrier_ok(dev), + eth_proto_oper, cmd); return 0; } @@ -2637,21 +3094,25 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev, { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + const struct mlxsw_sp_port_type_speed_ops *ops; char ptys_pl[MLXSW_REG_PTYS_LEN]; u32 eth_proto_cap, eth_proto_new; bool autoneg; int err; - mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0, false); + ops = mlxsw_sp->port_type_speed_ops; + + ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, + 0, false); err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); if (err) return err; - mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL); + ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, NULL, NULL); autoneg = cmd->base.autoneg == AUTONEG_ENABLE; eth_proto_new = autoneg ? - mlxsw_sp_to_ptys_advert_link(cmd) : - mlxsw_sp_to_ptys_speed(cmd->base.speed); + ops->to_ptys_advert_link(mlxsw_sp, cmd) : + ops->to_ptys_speed(mlxsw_sp, cmd->base.speed); eth_proto_new = eth_proto_new & eth_proto_cap; if (!eth_proto_new) { @@ -2659,8 +3120,8 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev, return -EINVAL; } - mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, - eth_proto_new, autoneg); + ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, + eth_proto_new, autoneg); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); if (err) return err; @@ -2701,117 +3162,18 @@ out: return err; } -#define MLXSW_SP_I2C_ADDR_LOW 0x50 -#define MLXSW_SP_I2C_ADDR_HIGH 0x51 -#define MLXSW_SP_EEPROM_PAGE_LENGTH 256 - -static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port, - u16 offset, u16 size, void *data, - unsigned int *p_read_size) -{ - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - char eeprom_tmp[MLXSW_SP_REG_MCIA_EEPROM_SIZE]; - char mcia_pl[MLXSW_REG_MCIA_LEN]; - u16 i2c_addr; - int status; - int err; - - size = min_t(u16, size, MLXSW_SP_REG_MCIA_EEPROM_SIZE); - - if (offset < MLXSW_SP_EEPROM_PAGE_LENGTH && - offset + size > MLXSW_SP_EEPROM_PAGE_LENGTH) - /* Cross pages read, read until offset 256 in low page */ - size = MLXSW_SP_EEPROM_PAGE_LENGTH - offset; - - i2c_addr = MLXSW_SP_I2C_ADDR_LOW; - if (offset >= MLXSW_SP_EEPROM_PAGE_LENGTH) { - i2c_addr = MLXSW_SP_I2C_ADDR_HIGH; - offset -= MLXSW_SP_EEPROM_PAGE_LENGTH; - } - - mlxsw_reg_mcia_pack(mcia_pl, mlxsw_sp_port->mapping.module, - 0, 0, offset, size, i2c_addr); - - err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcia), mcia_pl); - if (err) - return err; - - status = mlxsw_reg_mcia_status_get(mcia_pl); - if (status) - return -EIO; - - mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp); - memcpy(data, eeprom_tmp, size); - *p_read_size = size; - - return 0; -} - -enum mlxsw_sp_eeprom_module_info_rev_id { - MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_UNSPC = 0x00, - MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8436 = 0x01, - MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636 = 0x03, -}; - -enum mlxsw_sp_eeprom_module_info_id { - MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP = 0x03, - MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP = 0x0C, - MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS = 0x0D, - MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 = 0x11, -}; - -enum mlxsw_sp_eeprom_module_info { - MLXSW_SP_EEPROM_MODULE_INFO_ID, - MLXSW_SP_EEPROM_MODULE_INFO_REV_ID, - MLXSW_SP_EEPROM_MODULE_INFO_SIZE, -}; - static int mlxsw_sp_get_module_info(struct net_device *netdev, struct ethtool_modinfo *modinfo) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); - u8 module_info[MLXSW_SP_EEPROM_MODULE_INFO_SIZE]; - u8 module_rev_id, module_id; - unsigned int read_size; + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; int err; - err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, 0, - MLXSW_SP_EEPROM_MODULE_INFO_SIZE, - module_info, &read_size); - if (err) - return err; - - if (read_size < MLXSW_SP_EEPROM_MODULE_INFO_SIZE) - return -EIO; - - module_rev_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_REV_ID]; - module_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_ID]; - - switch (module_id) { - case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP: - modinfo->type = ETH_MODULE_SFF_8436; - modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; - break; - case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS: - case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28: - if (module_id == MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 || - module_rev_id >= MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636) { - modinfo->type = ETH_MODULE_SFF_8636; - modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; - } else { - modinfo->type = ETH_MODULE_SFF_8436; - modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; - } - break; - case MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP: - modinfo->type = ETH_MODULE_SFF_8472; - modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; - break; - default: - return -EINVAL; - } + err = mlxsw_env_get_module_info(mlxsw_sp->core, + mlxsw_sp_port->mapping.module, + modinfo); - return 0; + return err; } static int mlxsw_sp_get_module_eeprom(struct net_device *netdev, @@ -2819,30 +3181,14 @@ static int mlxsw_sp_get_module_eeprom(struct net_device *netdev, u8 *data) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); - int offset = ee->offset; - unsigned int read_size; - int i = 0; + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; int err; - if (!ee->len) - return -EINVAL; - - memset(data, 0, ee->len); - - while (i < ee->len) { - err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, offset, - ee->len - i, data + i, - &read_size); - if (err) { - netdev_err(mlxsw_sp_port->dev, "Eeprom query failed\n"); - return err; - } - - i += read_size; - offset += read_size; - } + err = mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core, + mlxsw_sp_port->mapping.module, ee, + data); - return 0; + return err; } static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { @@ -2865,13 +3211,24 @@ static int mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width; + const struct mlxsw_sp_port_type_speed_ops *ops; char ptys_pl[MLXSW_REG_PTYS_LEN]; u32 eth_proto_admin; + u32 upper_speed; + u32 base_speed; + int err; + + ops = mlxsw_sp->port_type_speed_ops; - eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed); - mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, - eth_proto_admin, mlxsw_sp_port->link.autoneg); + err = ops->port_speed_base(mlxsw_sp, mlxsw_sp_port->local_port, + &base_speed); + if (err) + return err; + upper_speed = base_speed * width; + + eth_proto_admin = ops->to_ptys_upper_speed(mlxsw_sp, upper_speed); + ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, + eth_proto_admin, mlxsw_sp_port->link.autoneg); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); } @@ -3207,7 +3564,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, } mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; - mlxsw_sp_port_switchdev_init(mlxsw_sp_port); mlxsw_sp->ports[local_port] = mlxsw_sp_port; err = register_netdev(dev); if (err) { @@ -3224,7 +3580,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, err_register_netdev: mlxsw_sp->ports[local_port] = NULL; - mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); err_port_vlan_create: err_port_pvid_set: @@ -3267,7 +3622,6 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ mlxsw_sp->ports[local_port] = NULL; - mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); mlxsw_sp_port_nve_fini(mlxsw_sp_port); mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); @@ -3744,8 +4098,8 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) burst_size = 7; break; case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: - rate = 4 * 1024; - burst_size = 4; + rate = 1024; + burst_size = 7; break; default: continue; @@ -4094,6 +4448,9 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; + mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr; + mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals; + mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); } @@ -4110,6 +4467,9 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; + mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr; + mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals; + mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); } @@ -4398,6 +4758,71 @@ static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core) ARRAY_SIZE(mlxsw_sp_devlink_params)); } +static int +mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink); + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + + ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp); + return 0; +} + +static int +mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink); + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + + return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32); +} + +static const struct devlink_param mlxsw_sp2_devlink_params[] = { + DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, + "acl_region_rehash_interval", + DEVLINK_PARAM_TYPE_U32, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + mlxsw_sp_params_acl_region_rehash_intrvl_get, + mlxsw_sp_params_acl_region_rehash_intrvl_set, + NULL), +}; + +static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_core); + union devlink_param_value value; + int err; + + err = mlxsw_sp_params_register(mlxsw_core); + if (err) + return err; + + err = devlink_params_register(devlink, mlxsw_sp2_devlink_params, + ARRAY_SIZE(mlxsw_sp2_devlink_params)); + if (err) + goto err_devlink_params_register; + + value.vu32 = 0; + devlink_param_driverinit_value_set(devlink, + MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL, + value); + return 0; + +err_devlink_params_register: + mlxsw_sp_params_unregister(mlxsw_core); + return err; +} + +static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core) +{ + devlink_params_unregister(priv_to_devlink(mlxsw_core), + mlxsw_sp2_devlink_params, + ARRAY_SIZE(mlxsw_sp2_devlink_params)); + mlxsw_sp_params_unregister(mlxsw_core); +} + static struct mlxsw_driver mlxsw_sp1_driver = { .kind = mlxsw_sp1_driver_name, .priv_size = sizeof(struct mlxsw_sp), @@ -4446,8 +4871,8 @@ static struct mlxsw_driver mlxsw_sp2_driver = { .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, .txhdr_construct = mlxsw_sp_txhdr_construct, .resources_register = mlxsw_sp2_resources_register, - .params_register = mlxsw_sp_params_register, - .params_unregister = mlxsw_sp_params_unregister, + .params_register = mlxsw_sp2_params_register, + .params_unregister = mlxsw_sp2_params_unregister, .txhdr_len = MLXSW_TXHDR_LEN, .profile = &mlxsw_sp2_config_profile, .res_query_enabled = true, @@ -4691,9 +5116,6 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); if (err) goto err_col_port_add; - err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id); - if (err) - goto err_col_port_enable; mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, mlxsw_sp_port->local_port); @@ -4707,8 +5129,6 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, return 0; -err_col_port_enable: - mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); err_col_port_add: if (!lag->ref_count) mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); @@ -4727,7 +5147,6 @@ static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); WARN_ON(lag->ref_count == 0); - mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); /* Any VLANs configured on the port are no longer valid */ @@ -4772,21 +5191,56 @@ static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); } -static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port, - bool lag_tx_enabled) +static int +mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port) { - if (lag_tx_enabled) - return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, - mlxsw_sp_port->lag_id); - else - return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, - mlxsw_sp_port->lag_id); + int err; + + err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, + mlxsw_sp_port->lag_id); + if (err) + return err; + + err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); + if (err) + goto err_dist_port_add; + + return 0; + +err_dist_port_add: + mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id); + return err; +} + +static int +mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port) +{ + int err; + + err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, + mlxsw_sp_port->lag_id); + if (err) + return err; + + err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, + mlxsw_sp_port->lag_id); + if (err) + goto err_col_port_disable; + + return 0; + +err_col_port_disable: + mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id); + return err; } static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, struct netdev_lag_lower_state_info *info) { - return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled); + if (info->tx_enabled) + return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port); + else + return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); } static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, @@ -5009,8 +5463,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, err = mlxsw_sp_port_lag_join(mlxsw_sp_port, upper_dev); } else { - mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, - false); + mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port); mlxsw_sp_port_lag_leave(mlxsw_sp_port, upper_dev); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index a1c32a81b011..da6278b0caa4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -33,7 +33,8 @@ #define MLXSW_SP_PORTS_PER_CLUSTER_MAX 4 -#define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */ +#define MLXSW_SP_PORT_BASE_SPEED_25G 25000 /* Mb/s */ +#define MLXSW_SP_PORT_BASE_SPEED_50G 50000 /* Mb/s */ #define MLXSW_SP_KVD_LINEAR_SIZE 98304 /* entries */ #define MLXSW_SP_KVD_GRANULARITY 128 @@ -75,6 +76,11 @@ enum mlxsw_sp_rif_type { MLXSW_SP_RIF_TYPE_MAX, }; +struct mlxsw_sp_rif_ops; + +extern const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[]; +extern const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[]; + enum mlxsw_sp_fid_type { MLXSW_SP_FID_TYPE_8021Q, MLXSW_SP_FID_TYPE_8021D, @@ -128,6 +134,8 @@ struct mlxsw_sp_kvdl_ops; struct mlxsw_sp_mr_tcam_ops; struct mlxsw_sp_acl_tcam_ops; struct mlxsw_sp_nve_ops; +struct mlxsw_sp_sb_vals; +struct mlxsw_sp_port_type_speed_ops; struct mlxsw_sp { struct mlxsw_sp_port **ports; @@ -161,6 +169,9 @@ struct mlxsw_sp { const struct mlxsw_sp_mr_tcam_ops *mr_tcam_ops; const struct mlxsw_sp_acl_tcam_ops *acl_tcam_ops; const struct mlxsw_sp_nve_ops **nve_ops_arr; + const struct mlxsw_sp_rif_ops **rif_ops_arr; + const struct mlxsw_sp_sb_vals *sb_vals; + const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops; }; static inline struct mlxsw_sp_upper * @@ -250,6 +261,29 @@ struct mlxsw_sp_port { struct mlxsw_sp_acl_block *eg_acl_block; }; +struct mlxsw_sp_port_type_speed_ops { + void (*from_ptys_supported_port)(struct mlxsw_sp *mlxsw_sp, + u32 ptys_eth_proto, + struct ethtool_link_ksettings *cmd); + void (*from_ptys_link)(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, + unsigned long *mode); + void (*from_ptys_speed_duplex)(struct mlxsw_sp *mlxsw_sp, + bool carrier_ok, u32 ptys_eth_proto, + struct ethtool_link_ksettings *cmd); + u32 (*to_ptys_advert_link)(struct mlxsw_sp *mlxsw_sp, + const struct ethtool_link_ksettings *cmd); + u32 (*to_ptys_speed)(struct mlxsw_sp *mlxsw_sp, u32 speed); + u32 (*to_ptys_upper_speed)(struct mlxsw_sp *mlxsw_sp, u32 upper_speed); + int (*port_speed_base)(struct mlxsw_sp *mlxsw_sp, u8 local_port, + u32 *base_speed); + void (*reg_ptys_eth_pack)(struct mlxsw_sp *mlxsw_sp, char *payload, + u8 local_port, u32 proto_admin, bool autoneg); + void (*reg_ptys_eth_unpack)(struct mlxsw_sp *mlxsw_sp, char *payload, + u32 *p_eth_proto_cap, + u32 *p_eth_proto_admin, + u32 *p_eth_proto_oper); +}; + static inline struct net_device * mlxsw_sp_bridge_vxlan_dev_find(struct net_device *br_dev) { @@ -365,12 +399,14 @@ int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port, u32 *p_cur, u32 *p_max); u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells); u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes); +u32 mlxsw_sp_sb_max_headroom_cells(const struct mlxsw_sp *mlxsw_sp); + +extern const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals; +extern const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals; /* spectrum_switchdev.c */ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp); -void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port); -void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port); int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid, bool adding); void @@ -501,6 +537,9 @@ void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id, const union mlxsw_sp_l3addr *ul_sip); int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id, u16 *vr_id); +int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id, + u16 *ul_rif_index); +void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index); /* spectrum_kvdl.c */ enum mlxsw_sp_kvdl_entry_type { @@ -681,6 +720,8 @@ struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp); int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp); +u32 mlxsw_sp_acl_region_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp); +int mlxsw_sp_acl_region_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp, u32 val); /* spectrum_acl_tcam.c */ struct mlxsw_sp_acl_tcam; @@ -695,10 +736,13 @@ struct mlxsw_sp_acl_tcam_ops { size_t region_priv_size; int (*region_init)(struct mlxsw_sp *mlxsw_sp, void *region_priv, void *tcam_priv, - struct mlxsw_sp_acl_tcam_region *region); + struct mlxsw_sp_acl_tcam_region *region, + void *hints_priv); void (*region_fini)(struct mlxsw_sp *mlxsw_sp, void *region_priv); int (*region_associate)(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_region *region); + void * (*region_rehash_hints_get)(void *region_priv); + void (*region_rehash_hints_put)(void *hints_priv); size_t chunk_priv_size; void (*chunk_init)(void *region_priv, void *chunk_priv, unsigned int priority); @@ -712,8 +756,7 @@ struct mlxsw_sp_acl_tcam_ops { void *region_priv, void *chunk_priv, void *entry_priv); int (*entry_action_replace)(struct mlxsw_sp *mlxsw_sp, - void *region_priv, void *chunk_priv, - void *entry_priv, + void *region_priv, void *entry_priv, struct mlxsw_sp_acl_rule_info *rulei); int (*entry_activity_get)(struct mlxsw_sp *mlxsw_sp, void *region_priv, void *entry_priv, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c index fe270c1a26a6..3a636f753607 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c @@ -112,7 +112,8 @@ mlxsw_sp1_acl_ctcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp1_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv, void *tcam_priv, - struct mlxsw_sp_acl_tcam_region *_region) + struct mlxsw_sp_acl_tcam_region *_region, + void *hints_priv) { struct mlxsw_sp1_acl_tcam_region *region = region_priv; int err; @@ -194,8 +195,7 @@ static void mlxsw_sp1_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp1_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp, - void *region_priv, void *chunk_priv, - void *entry_priv, + void *region_priv, void *entry_priv, struct mlxsw_sp_acl_rule_info *rulei) { return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c index 234ab51916db..6c66a0f1b79e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c @@ -139,7 +139,8 @@ static void mlxsw_sp2_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv) static int mlxsw_sp2_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv, void *tcam_priv, - struct mlxsw_sp_acl_tcam_region *_region) + struct mlxsw_sp_acl_tcam_region *_region, + void *hints_priv) { struct mlxsw_sp2_acl_tcam_region *region = region_priv; struct mlxsw_sp2_acl_tcam *tcam = tcam_priv; @@ -147,7 +148,8 @@ mlxsw_sp2_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv, region->region = _region; return mlxsw_sp_acl_atcam_region_init(mlxsw_sp, &tcam->atcam, - ®ion->aregion, _region, + ®ion->aregion, + _region, hints_priv, &mlxsw_sp2_acl_ctcam_region_ops); } @@ -166,6 +168,18 @@ mlxsw_sp2_acl_tcam_region_associate(struct mlxsw_sp *mlxsw_sp, return mlxsw_sp_acl_atcam_region_associate(mlxsw_sp, region->id); } +static void *mlxsw_sp2_acl_tcam_region_rehash_hints_get(void *region_priv) +{ + struct mlxsw_sp2_acl_tcam_region *region = region_priv; + + return mlxsw_sp_acl_atcam_rehash_hints_get(®ion->aregion); +} + +static void mlxsw_sp2_acl_tcam_region_rehash_hints_put(void *hints_priv) +{ + mlxsw_sp_acl_atcam_rehash_hints_put(hints_priv); +} + static void mlxsw_sp2_acl_tcam_chunk_init(void *region_priv, void *chunk_priv, unsigned int priority) { @@ -212,18 +226,15 @@ static void mlxsw_sp2_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp2_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp, - void *region_priv, void *chunk_priv, - void *entry_priv, + void *region_priv, void *entry_priv, struct mlxsw_sp_acl_rule_info *rulei) { struct mlxsw_sp2_acl_tcam_region *region = region_priv; - struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv; struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv; entry->act_block = rulei->act_block; return mlxsw_sp_acl_atcam_entry_action_replace(mlxsw_sp, ®ion->aregion, - &chunk->achunk, &entry->aentry, rulei); } @@ -246,6 +257,8 @@ const struct mlxsw_sp_acl_tcam_ops mlxsw_sp2_acl_tcam_ops = { .region_init = mlxsw_sp2_acl_tcam_region_init, .region_fini = mlxsw_sp2_acl_tcam_region_fini, .region_associate = mlxsw_sp2_acl_tcam_region_associate, + .region_rehash_hints_get = mlxsw_sp2_acl_tcam_region_rehash_hints_get, + .region_rehash_hints_put = mlxsw_sp2_acl_tcam_region_rehash_hints_put, .chunk_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_chunk), .chunk_init = mlxsw_sp2_acl_tcam_chunk_init, .chunk_fini = mlxsw_sp2_acl_tcam_chunk_fini, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 695d33358988..a146a44634e9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -588,7 +588,7 @@ int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp, { u8 ethertype; - if (action == TCA_VLAN_ACT_MODIFY) { + if (action == FLOW_ACTION_VLAN_MANGLE) { switch (proto) { case ETH_P_8021Q: ethertype = 0; @@ -640,7 +640,7 @@ mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp, int err; mlxsw_sp_acl_ruleset_ref_inc(ruleset); - rule = kzalloc(sizeof(*rule) + ops->rule_priv_size(mlxsw_sp), + rule = kzalloc(sizeof(*rule) + ops->rule_priv_size, GFP_KERNEL); if (!rule) { err = -ENOMEM; @@ -742,8 +742,7 @@ int mlxsw_sp_acl_rule_action_replace(struct mlxsw_sp *mlxsw_sp, rulei = mlxsw_sp_acl_rule_rulei(rule); rulei->act_block = afa_block; - return ops->rule_action_replace(mlxsw_sp, ruleset->priv, rule->priv, - rule->rulei); + return ops->rule_action_replace(mlxsw_sp, rule->priv, rule->rulei); } struct mlxsw_sp_acl_rule * @@ -806,7 +805,7 @@ static void mlxsw_sp_acl_rule_activity_work_schedule(struct mlxsw_sp_acl *acl) msecs_to_jiffies(interval)); } -static void mlxsw_sp_acl_rul_activity_update_work(struct work_struct *work) +static void mlxsw_sp_acl_rule_activity_update_work(struct work_struct *work) { struct mlxsw_sp_acl *acl = container_of(work, struct mlxsw_sp_acl, rule_activity_update.dw.work); @@ -885,7 +884,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp) /* Create the delayed work for the rule activity_update */ INIT_DELAYED_WORK(&acl->rule_activity_update.dw, - mlxsw_sp_acl_rul_activity_update_work); + mlxsw_sp_acl_rule_activity_update_work); acl->rule_activity_update.interval = MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS; mlxsw_core_schedule_dw(&acl->rule_activity_update.dw, 0); return 0; @@ -913,3 +912,19 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp) mlxsw_afk_destroy(acl->afk); kfree(acl); } + +u32 mlxsw_sp_acl_region_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_acl *acl = mlxsw_sp->acl; + + return mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(mlxsw_sp, + &acl->tcam); +} + +int mlxsw_sp_acl_region_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp, u32 val) +{ + struct mlxsw_sp_acl *acl = mlxsw_sp->acl; + + return mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(mlxsw_sp, + &acl->tcam, val); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c index 80fb268d51a5..ded4cf658680 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c @@ -7,6 +7,8 @@ #include #include #include +#define CREATE_TRACE_POINTS +#include #include "reg.h" #include "core.h" @@ -316,6 +318,7 @@ mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_atcam *atcam, struct mlxsw_sp_acl_atcam_region *aregion, struct mlxsw_sp_acl_tcam_region *region, + void *hints_priv, const struct mlxsw_sp_acl_ctcam_region_ops *ops) { int err; @@ -332,7 +335,7 @@ mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp, err = aregion->ops->init(aregion); if (err) goto err_ops_init; - err = mlxsw_sp_acl_erp_region_init(aregion); + err = mlxsw_sp_acl_erp_region_init(aregion, hints_priv); if (err) goto err_erp_region_init; err = mlxsw_sp_acl_ctcam_region_init(mlxsw_sp, &aregion->cregion, @@ -390,8 +393,7 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, if (err) return err; - lkey_id = aregion->ops->lkey_id_get(aregion, aentry->ht_key.enc_key, - erp_id); + lkey_id = aregion->ops->lkey_id_get(aregion, aentry->enc_key, erp_id); if (IS_ERR(lkey_id)) return PTR_ERR(lkey_id); aentry->lkey_id = lkey_id; @@ -399,7 +401,7 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, kvdl_index = mlxsw_afa_block_first_kvdl_index(rulei->act_block); mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_WRITE, priority, region->tcam_region_info, - aentry->ht_key.enc_key, erp_id, + aentry->enc_key, erp_id, aentry->delta_info.start, aentry->delta_info.mask, aentry->delta_info.value, @@ -424,12 +426,11 @@ mlxsw_sp_acl_atcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_atcam_lkey_id *lkey_id = aentry->lkey_id; struct mlxsw_sp_acl_tcam_region *region = aregion->region; u8 erp_id = mlxsw_sp_acl_erp_mask_erp_id(aentry->erp_mask); - char *enc_key = aentry->ht_key.enc_key; char ptce3_pl[MLXSW_REG_PTCE3_LEN]; mlxsw_reg_ptce3_pack(ptce3_pl, false, MLXSW_REG_PTCE3_OP_WRITE_WRITE, 0, region->tcam_region_info, - enc_key, erp_id, + aentry->enc_key, erp_id, aentry->delta_info.start, aentry->delta_info.mask, aentry->delta_info.value, @@ -458,7 +459,7 @@ mlxsw_sp_acl_atcam_region_entry_action_replace(struct mlxsw_sp *mlxsw_sp, kvdl_index = mlxsw_afa_block_first_kvdl_index(rulei->act_block); mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_UPDATE, priority, region->tcam_region_info, - aentry->ht_key.enc_key, erp_id, + aentry->enc_key, erp_id, aentry->delta_info.start, aentry->delta_info.mask, aentry->delta_info.value, @@ -481,15 +482,15 @@ __mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp, int err; mlxsw_afk_encode(afk, region->key_info, &rulei->values, - aentry->full_enc_key, mask); + aentry->ht_key.full_enc_key, mask); erp_mask = mlxsw_sp_acl_erp_mask_get(aregion, mask, false); if (IS_ERR(erp_mask)) return PTR_ERR(erp_mask); aentry->erp_mask = erp_mask; aentry->ht_key.erp_id = mlxsw_sp_acl_erp_mask_erp_id(erp_mask); - memcpy(aentry->ht_key.enc_key, aentry->full_enc_key, - sizeof(aentry->ht_key.enc_key)); + memcpy(aentry->enc_key, aentry->ht_key.full_enc_key, + sizeof(aentry->enc_key)); /* Compute all needed delta information and clear the delta bits * from the encrypted key. @@ -498,8 +499,9 @@ __mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp, aentry->delta_info.start = mlxsw_sp_acl_erp_delta_start(delta); aentry->delta_info.mask = mlxsw_sp_acl_erp_delta_mask(delta); aentry->delta_info.value = - mlxsw_sp_acl_erp_delta_value(delta, aentry->full_enc_key); - mlxsw_sp_acl_erp_delta_clear(delta, aentry->ht_key.enc_key); + mlxsw_sp_acl_erp_delta_value(delta, + aentry->ht_key.full_enc_key); + mlxsw_sp_acl_erp_delta_clear(delta, aentry->enc_key); /* Add rule to the list of A-TCAM rules, assuming this * rule is intended to A-TCAM. In case this rule does @@ -579,6 +581,7 @@ int mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp, /* It is possible we failed to add the rule to the A-TCAM due to * exceeded number of masks. Try to spill into C-TCAM. */ + trace_mlxsw_sp_acl_atcam_entry_add_ctcam_spill(mlxsw_sp, aregion); err = mlxsw_sp_acl_ctcam_entry_add(mlxsw_sp, &aregion->cregion, &achunk->cchunk, &aentry->centry, rulei, true); @@ -603,7 +606,6 @@ void mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp, int mlxsw_sp_acl_atcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_atcam_region *aregion, - struct mlxsw_sp_acl_atcam_chunk *achunk, struct mlxsw_sp_acl_atcam_entry *aentry, struct mlxsw_sp_acl_rule_info *rulei) { @@ -612,7 +614,6 @@ mlxsw_sp_acl_atcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp, if (mlxsw_sp_acl_atcam_is_centry(aentry)) err = mlxsw_sp_acl_ctcam_entry_action_replace(mlxsw_sp, &aregion->cregion, - &achunk->cchunk, &aentry->centry, rulei); else @@ -634,3 +635,14 @@ void mlxsw_sp_acl_atcam_fini(struct mlxsw_sp *mlxsw_sp, { mlxsw_sp_acl_erps_fini(mlxsw_sp, atcam); } + +void * +mlxsw_sp_acl_atcam_rehash_hints_get(struct mlxsw_sp_acl_atcam_region *aregion) +{ + return mlxsw_sp_acl_erp_rehash_hints_get(aregion); +} + +void mlxsw_sp_acl_atcam_rehash_hints_put(void *hints_priv) +{ + mlxsw_sp_acl_erp_rehash_hints_put(hints_priv); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c index 505b87846acc..3a2de13fcb68 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c @@ -5,11 +5,13 @@ #include #include #include +#include #include "spectrum.h" #include "spectrum_acl_tcam.h" struct mlxsw_sp_acl_bf { + struct mutex lock; /* Protects Bloom Filter updates. */ unsigned int bank_size; refcount_t refcnt[0]; }; @@ -133,7 +135,7 @@ mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion, memcpy(chunk + MLXSW_BLOOM_CHUNK_PAD_BYTES, &erp_region_id, sizeof(erp_region_id)); memcpy(chunk + MLXSW_BLOOM_CHUNK_KEY_OFFSET, - &aentry->ht_key.enc_key[chunk_key_offsets[chunk_index]], + &aentry->enc_key[chunk_key_offsets[chunk_index]], MLXSW_BLOOM_CHUNK_KEY_BYTES); chunk += MLXSW_BLOOM_KEY_CHUNK_BYTES; } @@ -172,26 +174,36 @@ mlxsw_sp_acl_bf_entry_add(struct mlxsw_sp *mlxsw_sp, u16 bf_index; int err; + mutex_lock(&bf->lock); + bf_index = mlxsw_sp_acl_bf_index_get(bf, aregion, aentry); rule_index = mlxsw_sp_acl_bf_rule_count_index_get(bf, erp_bank, bf_index); - if (refcount_inc_not_zero(&bf->refcnt[rule_index])) - return 0; + if (refcount_inc_not_zero(&bf->refcnt[rule_index])) { + err = 0; + goto unlock; + } peabfe_pl = kmalloc(MLXSW_REG_PEABFE_LEN, GFP_KERNEL); - if (!peabfe_pl) - return -ENOMEM; + if (!peabfe_pl) { + err = -ENOMEM; + goto unlock; + } mlxsw_reg_peabfe_pack(peabfe_pl); mlxsw_reg_peabfe_rec_pack(peabfe_pl, 0, 1, erp_bank, bf_index); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(peabfe), peabfe_pl); kfree(peabfe_pl); if (err) - return err; + goto unlock; refcount_set(&bf->refcnt[rule_index], 1); - return 0; + err = 0; + +unlock: + mutex_unlock(&bf->lock); + return err; } void @@ -205,6 +217,8 @@ mlxsw_sp_acl_bf_entry_del(struct mlxsw_sp *mlxsw_sp, char *peabfe_pl; u16 bf_index; + mutex_lock(&bf->lock); + bf_index = mlxsw_sp_acl_bf_index_get(bf, aregion, aentry); rule_index = mlxsw_sp_acl_bf_rule_count_index_get(bf, erp_bank, bf_index); @@ -212,13 +226,16 @@ mlxsw_sp_acl_bf_entry_del(struct mlxsw_sp *mlxsw_sp, if (refcount_dec_and_test(&bf->refcnt[rule_index])) { peabfe_pl = kmalloc(MLXSW_REG_PEABFE_LEN, GFP_KERNEL); if (!peabfe_pl) - return; + goto unlock; mlxsw_reg_peabfe_pack(peabfe_pl); mlxsw_reg_peabfe_rec_pack(peabfe_pl, 0, 0, erp_bank, bf_index); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(peabfe), peabfe_pl); kfree(peabfe_pl); } + +unlock: + mutex_unlock(&bf->lock); } struct mlxsw_sp_acl_bf * @@ -234,16 +251,19 @@ mlxsw_sp_acl_bf_init(struct mlxsw_sp *mlxsw_sp, unsigned int num_erp_banks) * is 2^ACL_MAX_BF_LOG */ bf_bank_size = 1 << MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_BF_LOG); - bf = kzalloc(sizeof(*bf) + bf_bank_size * num_erp_banks * - sizeof(*bf->refcnt), GFP_KERNEL); + bf = kzalloc(struct_size(bf, refcnt, bf_bank_size * num_erp_banks), + GFP_KERNEL); if (!bf) return ERR_PTR(-ENOMEM); bf->bank_size = bf_bank_size; + mutex_init(&bf->lock); + return bf; } void mlxsw_sp_acl_bf_fini(struct mlxsw_sp_acl_bf *bf) { + mutex_destroy(&bf->lock); kfree(bf); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c index ac222833a5cf..05680a7e6c56 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c @@ -223,7 +223,6 @@ void mlxsw_sp_acl_ctcam_entry_del(struct mlxsw_sp *mlxsw_sp, int mlxsw_sp_acl_ctcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ctcam_region *cregion, - struct mlxsw_sp_acl_ctcam_chunk *cchunk, struct mlxsw_sp_acl_ctcam_entry *centry, struct mlxsw_sp_acl_rule_info *rulei) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c index 2941967e1cc5..c1a9cc9a3292 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -63,6 +64,7 @@ struct mlxsw_sp_acl_erp_table { unsigned int num_ctcam_erps; unsigned int num_deltas; struct objagg *objagg; + struct mutex objagg_lock; /* guards objagg manipulation */ }; struct mlxsw_sp_acl_erp_table_ops { @@ -1001,17 +1003,15 @@ struct mlxsw_sp_acl_erp_mask * mlxsw_sp_acl_erp_mask_get(struct mlxsw_sp_acl_atcam_region *aregion, const char *mask, bool ctcam) { + struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table; struct mlxsw_sp_acl_erp_key key; struct objagg_obj *objagg_obj; - /* eRPs are allocated from a shared resource, but currently all - * allocations are done under RTNL. - */ - ASSERT_RTNL(); - memcpy(key.mask, mask, MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN); key.ctcam = ctcam; - objagg_obj = objagg_obj_get(aregion->erp_table->objagg, &key); + mutex_lock(&erp_table->objagg_lock); + objagg_obj = objagg_obj_get(erp_table->objagg, &key); + mutex_unlock(&erp_table->objagg_lock); if (IS_ERR(objagg_obj)) return ERR_CAST(objagg_obj); return (struct mlxsw_sp_acl_erp_mask *) objagg_obj; @@ -1021,8 +1021,11 @@ void mlxsw_sp_acl_erp_mask_put(struct mlxsw_sp_acl_atcam_region *aregion, struct mlxsw_sp_acl_erp_mask *erp_mask) { struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask; + struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table; - objagg_obj_put(aregion->erp_table->objagg, objagg_obj); + mutex_lock(&erp_table->objagg_lock); + objagg_obj_put(erp_table->objagg, objagg_obj); + mutex_unlock(&erp_table->objagg_lock); } int mlxsw_sp_acl_erp_bf_insert(struct mlxsw_sp *mlxsw_sp, @@ -1034,7 +1037,6 @@ int mlxsw_sp_acl_erp_bf_insert(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_acl_erp *erp = objagg_obj_root_priv(objagg_obj); unsigned int erp_bank; - ASSERT_RTNL(); if (!mlxsw_sp_acl_erp_table_is_used(erp->erp_table)) return 0; @@ -1200,6 +1202,32 @@ mlxsw_sp_acl_erp_delta_fill(const struct mlxsw_sp_acl_erp_key *parent_key, return 0; } +static bool mlxsw_sp_acl_erp_delta_check(void *priv, const void *parent_obj, + const void *obj) +{ + const struct mlxsw_sp_acl_erp_key *parent_key = parent_obj; + const struct mlxsw_sp_acl_erp_key *key = obj; + u16 delta_start; + u8 delta_mask; + int err; + + err = mlxsw_sp_acl_erp_delta_fill(parent_key, key, + &delta_start, &delta_mask); + return err ? false : true; +} + +static int mlxsw_sp_acl_erp_hints_obj_cmp(const void *obj1, const void *obj2) +{ + const struct mlxsw_sp_acl_erp_key *key1 = obj1; + const struct mlxsw_sp_acl_erp_key *key2 = obj2; + + /* For hints purposes, two objects are considered equal + * in case the masks are the same. Does not matter what + * the "ctcam" value is. + */ + return memcmp(key1->mask, key2->mask, sizeof(key1->mask)); +} + static void *mlxsw_sp_acl_erp_delta_create(void *priv, void *parent_obj, void *obj) { @@ -1254,12 +1282,17 @@ static void mlxsw_sp_acl_erp_delta_destroy(void *priv, void *delta_priv) kfree(delta); } -static void *mlxsw_sp_acl_erp_root_create(void *priv, void *obj) +static void *mlxsw_sp_acl_erp_root_create(void *priv, void *obj, + unsigned int root_id) { struct mlxsw_sp_acl_atcam_region *aregion = priv; struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table; struct mlxsw_sp_acl_erp_key *key = obj; + if (!key->ctcam && + root_id != OBJAGG_OBJ_ROOT_ID_INVALID && + root_id >= MLXSW_SP_ACL_ERP_MAX_PER_REGION) + return ERR_PTR(-ENOBUFS); return erp_table->ops->erp_create(erp_table, key); } @@ -1273,6 +1306,8 @@ static void mlxsw_sp_acl_erp_root_destroy(void *priv, void *root_priv) static const struct objagg_ops mlxsw_sp_acl_erp_objagg_ops = { .obj_size = sizeof(struct mlxsw_sp_acl_erp_key), + .delta_check = mlxsw_sp_acl_erp_delta_check, + .hints_obj_cmp = mlxsw_sp_acl_erp_hints_obj_cmp, .delta_create = mlxsw_sp_acl_erp_delta_create, .delta_destroy = mlxsw_sp_acl_erp_delta_destroy, .root_create = mlxsw_sp_acl_erp_root_create, @@ -1280,7 +1315,8 @@ static const struct objagg_ops mlxsw_sp_acl_erp_objagg_ops = { }; static struct mlxsw_sp_acl_erp_table * -mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion) +mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion, + struct objagg_hints *hints) { struct mlxsw_sp_acl_erp_table *erp_table; int err; @@ -1290,7 +1326,7 @@ mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion) return ERR_PTR(-ENOMEM); erp_table->objagg = objagg_create(&mlxsw_sp_acl_erp_objagg_ops, - aregion); + hints, aregion); if (IS_ERR(erp_table->objagg)) { err = PTR_ERR(erp_table->objagg); goto err_objagg_create; @@ -1300,6 +1336,7 @@ mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion) erp_table->ops = &erp_no_mask_ops; INIT_LIST_HEAD(&erp_table->atcam_erps_list); erp_table->aregion = aregion; + mutex_init(&erp_table->objagg_lock); return erp_table; @@ -1312,6 +1349,7 @@ static void mlxsw_sp_acl_erp_table_destroy(struct mlxsw_sp_acl_erp_table *erp_table) { WARN_ON(!list_empty(&erp_table->atcam_erps_list)); + mutex_destroy(&erp_table->objagg_lock); objagg_destroy(erp_table->objagg); kfree(erp_table); } @@ -1337,12 +1375,93 @@ mlxsw_sp_acl_erp_region_param_init(struct mlxsw_sp_acl_atcam_region *aregion) return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pererp), pererp_pl); } -int mlxsw_sp_acl_erp_region_init(struct mlxsw_sp_acl_atcam_region *aregion) +static int +mlxsw_sp_acl_erp_hints_check(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam_region *aregion, + struct objagg_hints *hints, bool *p_rehash_needed) +{ + struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table; + const struct objagg_stats *ostats; + const struct objagg_stats *hstats; + int err; + + *p_rehash_needed = false; + + mutex_lock(&erp_table->objagg_lock); + ostats = objagg_stats_get(erp_table->objagg); + mutex_unlock(&erp_table->objagg_lock); + if (IS_ERR(ostats)) { + dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get ERP stats\n"); + return PTR_ERR(ostats); + } + + hstats = objagg_hints_stats_get(hints); + if (IS_ERR(hstats)) { + dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get ERP hints stats\n"); + err = PTR_ERR(hstats); + goto err_hints_stats_get; + } + + /* Very basic criterion for now. */ + if (hstats->root_count < ostats->root_count) + *p_rehash_needed = true; + + err = 0; + + objagg_stats_put(hstats); +err_hints_stats_get: + objagg_stats_put(ostats); + return err; +} + +void * +mlxsw_sp_acl_erp_rehash_hints_get(struct mlxsw_sp_acl_atcam_region *aregion) +{ + struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table; + struct mlxsw_sp *mlxsw_sp = aregion->region->mlxsw_sp; + struct objagg_hints *hints; + bool rehash_needed; + int err; + + mutex_lock(&erp_table->objagg_lock); + hints = objagg_hints_get(erp_table->objagg, + OBJAGG_OPT_ALGO_SIMPLE_GREEDY); + mutex_unlock(&erp_table->objagg_lock); + if (IS_ERR(hints)) { + dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to create ERP hints\n"); + return ERR_CAST(hints); + } + err = mlxsw_sp_acl_erp_hints_check(mlxsw_sp, aregion, hints, + &rehash_needed); + if (err) + goto errout; + + if (!rehash_needed) { + err = -EAGAIN; + goto errout; + } + return hints; + +errout: + objagg_hints_put(hints); + return ERR_PTR(err); +} + +void mlxsw_sp_acl_erp_rehash_hints_put(void *hints_priv) +{ + struct objagg_hints *hints = hints_priv; + + objagg_hints_put(hints); +} + +int mlxsw_sp_acl_erp_region_init(struct mlxsw_sp_acl_atcam_region *aregion, + void *hints_priv) { struct mlxsw_sp_acl_erp_table *erp_table; + struct objagg_hints *hints = hints_priv; int err; - erp_table = mlxsw_sp_acl_erp_table_create(aregion); + erp_table = mlxsw_sp_acl_erp_table_create(aregion, hints); if (IS_ERR(erp_table)) return PTR_ERR(erp_table); aregion->erp_table = erp_table; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c index fe230acf92a9..8811f6513e36 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -8,6 +8,8 @@ #include #include #include +#include +#include #include "reg.h" #include "core.h" @@ -23,6 +25,10 @@ size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp) return ops->priv_size; } +#define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT 5000 /* ms */ +#define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN 3000 /* ms */ +#define MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS 100 /* number of entries */ + int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam *tcam) { @@ -33,6 +39,11 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, size_t alloc_size; int err; + mutex_init(&tcam->lock); + tcam->vregion_rehash_intrvl = + MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT; + INIT_LIST_HEAD(&tcam->vregion_list); + max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_REGIONS); max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS); @@ -76,6 +87,7 @@ void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, { const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; + mutex_destroy(&tcam->lock); ops->fini(mlxsw_sp, tcam->priv); kfree(tcam->used_groups); kfree(tcam->used_regions); @@ -153,37 +165,100 @@ struct mlxsw_sp_acl_tcam_pattern { struct mlxsw_sp_acl_tcam_group { struct mlxsw_sp_acl_tcam *tcam; u16 id; + struct mutex lock; /* guards region list updates */ struct list_head region_list; unsigned int region_count; - struct rhashtable chunk_ht; - struct mlxsw_sp_acl_tcam_group_ops *ops; +}; + +struct mlxsw_sp_acl_tcam_vgroup { + struct mlxsw_sp_acl_tcam_group group; + struct list_head vregion_list; + struct rhashtable vchunk_ht; const struct mlxsw_sp_acl_tcam_pattern *patterns; unsigned int patterns_count; bool tmplt_elusage_set; struct mlxsw_afk_element_usage tmplt_elusage; + bool vregion_rehash_enabled; }; -struct mlxsw_sp_acl_tcam_chunk { - struct list_head list; /* Member of a TCAM region */ - struct rhash_head ht_node; /* Member of a chunk HT */ - unsigned int priority; /* Priority within the region and group */ - struct mlxsw_sp_acl_tcam_group *group; +struct mlxsw_sp_acl_tcam_rehash_ctx { + void *hints_priv; + bool this_is_rollback; + struct mlxsw_sp_acl_tcam_vchunk *current_vchunk; /* vchunk being + * currently migrated. + */ + struct mlxsw_sp_acl_tcam_ventry *start_ventry; /* ventry to start + * migration from in + * a vchunk being + * currently migrated. + */ + struct mlxsw_sp_acl_tcam_ventry *stop_ventry; /* ventry to stop + * migration at + * a vchunk being + * currently migrated. + */ +}; + +struct mlxsw_sp_acl_tcam_vregion { + struct mutex lock; /* Protects consistency of region, region2 pointers + * and vchunk_list. + */ struct mlxsw_sp_acl_tcam_region *region; + struct mlxsw_sp_acl_tcam_region *region2; /* Used during migration */ + struct list_head list; /* Member of a TCAM group */ + struct list_head tlist; /* Member of a TCAM */ + struct list_head vchunk_list; /* List of vchunks under this vregion */ + struct mlxsw_afk_key_info *key_info; + struct mlxsw_sp_acl_tcam *tcam; + struct mlxsw_sp_acl_tcam_vgroup *vgroup; + struct { + struct delayed_work dw; + struct mlxsw_sp_acl_tcam_rehash_ctx ctx; + } rehash; + struct mlxsw_sp *mlxsw_sp; + bool failed_rollback; /* Indicates failed rollback during migration */ unsigned int ref_count; +}; + +struct mlxsw_sp_acl_tcam_vchunk; + +struct mlxsw_sp_acl_tcam_chunk { + struct mlxsw_sp_acl_tcam_vchunk *vchunk; + struct mlxsw_sp_acl_tcam_region *region; unsigned long priv[0]; /* priv has to be always the last item */ }; +struct mlxsw_sp_acl_tcam_vchunk { + struct mlxsw_sp_acl_tcam_chunk *chunk; + struct mlxsw_sp_acl_tcam_chunk *chunk2; /* Used during migration */ + struct list_head list; /* Member of a TCAM vregion */ + struct rhash_head ht_node; /* Member of a chunk HT */ + struct list_head ventry_list; + unsigned int priority; /* Priority within the vregion and group */ + struct mlxsw_sp_acl_tcam_vgroup *vgroup; + struct mlxsw_sp_acl_tcam_vregion *vregion; + unsigned int ref_count; +}; + struct mlxsw_sp_acl_tcam_entry { + struct mlxsw_sp_acl_tcam_ventry *ventry; struct mlxsw_sp_acl_tcam_chunk *chunk; unsigned long priv[0]; /* priv has to be always the last item */ }; -static const struct rhashtable_params mlxsw_sp_acl_tcam_chunk_ht_params = { +struct mlxsw_sp_acl_tcam_ventry { + struct mlxsw_sp_acl_tcam_entry *entry; + struct list_head list; /* Member of a TCAM vchunk */ + struct mlxsw_sp_acl_tcam_vchunk *vchunk; + struct mlxsw_sp_acl_rule_info *rulei; +}; + +static const struct rhashtable_params mlxsw_sp_acl_tcam_vchunk_ht_params = { .key_len = sizeof(unsigned int), - .key_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, priority), - .head_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, ht_node), + .key_offset = offsetof(struct mlxsw_sp_acl_tcam_vchunk, priority), + .head_offset = offsetof(struct mlxsw_sp_acl_tcam_vchunk, ht_node), .automatic_shrinking = true, }; @@ -195,55 +270,90 @@ static int mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp *mlxsw_sp, int acl_index = 0; mlxsw_reg_pagt_pack(pagt_pl, group->id); - list_for_each_entry(region, &group->region_list, list) - mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++, region->id); + list_for_each_entry(region, &group->region_list, list) { + bool multi = false; + + /* Check if the next entry in the list has the same vregion. */ + if (region->list.next != &group->region_list && + list_next_entry(region, list)->vregion == region->vregion) + multi = true; + mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++, + region->id, multi); + } mlxsw_reg_pagt_size_set(pagt_pl, acl_index); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pagt), pagt_pl); } static int -mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam *tcam, - struct mlxsw_sp_acl_tcam_group *group, - const struct mlxsw_sp_acl_tcam_pattern *patterns, - unsigned int patterns_count, - struct mlxsw_afk_element_usage *tmplt_elusage) +mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp_acl_tcam *tcam, + struct mlxsw_sp_acl_tcam_group *group) { int err; group->tcam = tcam; - group->patterns = patterns; - group->patterns_count = patterns_count; - if (tmplt_elusage) { - group->tmplt_elusage_set = true; - memcpy(&group->tmplt_elusage, tmplt_elusage, - sizeof(group->tmplt_elusage)); - } + mutex_init(&group->lock); INIT_LIST_HEAD(&group->region_list); + err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id); if (err) return err; - err = rhashtable_init(&group->chunk_ht, - &mlxsw_sp_acl_tcam_chunk_ht_params); + return 0; +} + +static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp_acl_tcam_group *group) +{ + struct mlxsw_sp_acl_tcam *tcam = group->tcam; + + mutex_destroy(&group->lock); + mlxsw_sp_acl_tcam_group_id_put(tcam, group->id); + WARN_ON(!list_empty(&group->region_list)); +} + +static int +mlxsw_sp_acl_tcam_vgroup_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam *tcam, + struct mlxsw_sp_acl_tcam_vgroup *vgroup, + const struct mlxsw_sp_acl_tcam_pattern *patterns, + unsigned int patterns_count, + struct mlxsw_afk_element_usage *tmplt_elusage, + bool vregion_rehash_enabled) +{ + int err; + + vgroup->patterns = patterns; + vgroup->patterns_count = patterns_count; + vgroup->vregion_rehash_enabled = vregion_rehash_enabled; + + if (tmplt_elusage) { + vgroup->tmplt_elusage_set = true; + memcpy(&vgroup->tmplt_elusage, tmplt_elusage, + sizeof(vgroup->tmplt_elusage)); + } + INIT_LIST_HEAD(&vgroup->vregion_list); + + err = mlxsw_sp_acl_tcam_group_add(tcam, &vgroup->group); + if (err) + return err; + + err = rhashtable_init(&vgroup->vchunk_ht, + &mlxsw_sp_acl_tcam_vchunk_ht_params); if (err) goto err_rhashtable_init; return 0; err_rhashtable_init: - mlxsw_sp_acl_tcam_group_id_put(tcam, group->id); + mlxsw_sp_acl_tcam_group_del(&vgroup->group); return err; } -static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_group *group) +static void +mlxsw_sp_acl_tcam_vgroup_del(struct mlxsw_sp_acl_tcam_vgroup *vgroup) { - struct mlxsw_sp_acl_tcam *tcam = group->tcam; - - rhashtable_destroy(&group->chunk_ht); - mlxsw_sp_acl_tcam_group_id_put(tcam, group->id); - WARN_ON(!list_empty(&group->region_list)); + rhashtable_destroy(&vgroup->vchunk_ht); + mlxsw_sp_acl_tcam_group_del(&vgroup->group); + WARN_ON(!list_empty(&vgroup->vregion_list)); } static int @@ -283,146 +393,194 @@ mlxsw_sp_acl_tcam_group_id(struct mlxsw_sp_acl_tcam_group *group) } static unsigned int -mlxsw_sp_acl_tcam_region_prio(struct mlxsw_sp_acl_tcam_region *region) +mlxsw_sp_acl_tcam_vregion_prio(struct mlxsw_sp_acl_tcam_vregion *vregion) { - struct mlxsw_sp_acl_tcam_chunk *chunk; + struct mlxsw_sp_acl_tcam_vchunk *vchunk; - if (list_empty(®ion->chunk_list)) + if (list_empty(&vregion->vchunk_list)) return 0; - /* As a priority of a region, return priority of the first chunk */ - chunk = list_first_entry(®ion->chunk_list, typeof(*chunk), list); - return chunk->priority; + /* As a priority of a vregion, return priority of the first vchunk */ + vchunk = list_first_entry(&vregion->vchunk_list, + typeof(*vchunk), list); + return vchunk->priority; } static unsigned int -mlxsw_sp_acl_tcam_region_max_prio(struct mlxsw_sp_acl_tcam_region *region) +mlxsw_sp_acl_tcam_vregion_max_prio(struct mlxsw_sp_acl_tcam_vregion *vregion) { - struct mlxsw_sp_acl_tcam_chunk *chunk; + struct mlxsw_sp_acl_tcam_vchunk *vchunk; - if (list_empty(®ion->chunk_list)) + if (list_empty(&vregion->vchunk_list)) return 0; - chunk = list_last_entry(®ion->chunk_list, typeof(*chunk), list); - return chunk->priority; + vchunk = list_last_entry(&vregion->vchunk_list, + typeof(*vchunk), list); + return vchunk->priority; } -static void -mlxsw_sp_acl_tcam_group_list_add(struct mlxsw_sp_acl_tcam_group *group, - struct mlxsw_sp_acl_tcam_region *region) +static int +mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_group *group, + struct mlxsw_sp_acl_tcam_region *region, + unsigned int priority, + struct mlxsw_sp_acl_tcam_region *next_region) { struct mlxsw_sp_acl_tcam_region *region2; struct list_head *pos; + int err; - /* Position the region inside the list according to priority */ - list_for_each(pos, &group->region_list) { - region2 = list_entry(pos, typeof(*region2), list); - if (mlxsw_sp_acl_tcam_region_prio(region2) > - mlxsw_sp_acl_tcam_region_prio(region)) - break; + mutex_lock(&group->lock); + if (group->region_count == group->tcam->max_group_size) { + err = -ENOBUFS; + goto err_region_count_check; + } + + if (next_region) { + /* If the next region is defined, place the new one + * before it. The next one is a sibling. + */ + pos = &next_region->list; + } else { + /* Position the region inside the list according to priority */ + list_for_each(pos, &group->region_list) { + region2 = list_entry(pos, typeof(*region2), list); + if (mlxsw_sp_acl_tcam_vregion_prio(region2->vregion) > + priority) + break; + } } list_add_tail(®ion->list, pos); + region->group = group; + + err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group); + if (err) + goto err_group_update; + group->region_count++; + mutex_unlock(&group->lock); + return 0; + +err_group_update: + list_del(®ion->list); +err_region_count_check: + mutex_unlock(&group->lock); + return err; } static void -mlxsw_sp_acl_tcam_group_list_del(struct mlxsw_sp_acl_tcam_group *group, - struct mlxsw_sp_acl_tcam_region *region) +mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_region *region) { - group->region_count--; + struct mlxsw_sp_acl_tcam_group *group = region->group; + + mutex_lock(&group->lock); list_del(®ion->list); + group->region_count--; + mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group); + mutex_unlock(&group->lock); } static int -mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_group *group, - struct mlxsw_sp_acl_tcam_region *region) +mlxsw_sp_acl_tcam_vgroup_vregion_attach(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_vgroup *vgroup, + struct mlxsw_sp_acl_tcam_vregion *vregion, + unsigned int priority) { + struct mlxsw_sp_acl_tcam_vregion *vregion2; + struct list_head *pos; int err; - if (group->region_count == group->tcam->max_group_size) - return -ENOBUFS; - - mlxsw_sp_acl_tcam_group_list_add(group, region); + /* Position the vregion inside the list according to priority */ + list_for_each(pos, &vgroup->vregion_list) { + vregion2 = list_entry(pos, typeof(*vregion2), list); + if (mlxsw_sp_acl_tcam_vregion_prio(vregion2) > priority) + break; + } + list_add_tail(&vregion->list, pos); - err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group); + err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, &vgroup->group, + vregion->region, + priority, NULL); if (err) - goto err_group_update; - region->group = group; + goto err_region_attach; return 0; -err_group_update: - mlxsw_sp_acl_tcam_group_list_del(group, region); - mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group); +err_region_attach: + list_del(&vregion->list); return err; } static void -mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_region *region) +mlxsw_sp_acl_tcam_vgroup_vregion_detach(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_vregion *vregion) { - struct mlxsw_sp_acl_tcam_group *group = region->group; - - mlxsw_sp_acl_tcam_group_list_del(group, region); - mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group); + list_del(&vregion->list); + if (vregion->region2) + mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, + vregion->region2); + mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, vregion->region); } -static struct mlxsw_sp_acl_tcam_region * -mlxsw_sp_acl_tcam_group_region_find(struct mlxsw_sp_acl_tcam_group *group, - unsigned int priority, - struct mlxsw_afk_element_usage *elusage, - bool *p_need_split) +static struct mlxsw_sp_acl_tcam_vregion * +mlxsw_sp_acl_tcam_vgroup_vregion_find(struct mlxsw_sp_acl_tcam_vgroup *vgroup, + unsigned int priority, + struct mlxsw_afk_element_usage *elusage, + bool *p_need_split) { - struct mlxsw_sp_acl_tcam_region *region, *region2; + struct mlxsw_sp_acl_tcam_vregion *vregion, *vregion2; struct list_head *pos; bool issubset; - list_for_each(pos, &group->region_list) { - region = list_entry(pos, typeof(*region), list); + list_for_each(pos, &vgroup->vregion_list) { + vregion = list_entry(pos, typeof(*vregion), list); /* First, check if the requested priority does not rather belong - * under some of the next regions. + * under some of the next vregions. */ - if (pos->next != &group->region_list) { /* not last */ - region2 = list_entry(pos->next, typeof(*region2), list); - if (priority >= mlxsw_sp_acl_tcam_region_prio(region2)) + if (pos->next != &vgroup->vregion_list) { /* not last */ + vregion2 = list_entry(pos->next, typeof(*vregion2), + list); + if (priority >= + mlxsw_sp_acl_tcam_vregion_prio(vregion2)) continue; } - issubset = mlxsw_afk_key_info_subset(region->key_info, elusage); + issubset = mlxsw_afk_key_info_subset(vregion->key_info, + elusage); /* If requested element usage would not fit and the priority - * is lower than the currently inspected region we cannot - * use this region, so return NULL to indicate new region has + * is lower than the currently inspected vregion we cannot + * use this region, so return NULL to indicate new vregion has * to be created. */ if (!issubset && - priority < mlxsw_sp_acl_tcam_region_prio(region)) + priority < mlxsw_sp_acl_tcam_vregion_prio(vregion)) return NULL; /* If requested element usage would not fit and the priority - * is higher than the currently inspected region we cannot - * use this region. There is still some hope that the next - * region would be the fit. So let it be processed and + * is higher than the currently inspected vregion we cannot + * use this vregion. There is still some hope that the next + * vregion would be the fit. So let it be processed and * eventually break at the check right above this. */ if (!issubset && - priority > mlxsw_sp_acl_tcam_region_max_prio(region)) + priority > mlxsw_sp_acl_tcam_vregion_max_prio(vregion)) continue; - /* Indicate if the region needs to be split in order to add + /* Indicate if the vregion needs to be split in order to add * the requested priority. Split is needed when requested - * element usage won't fit into the found region. + * element usage won't fit into the found vregion. */ *p_need_split = !issubset; - return region; + return vregion; } - return NULL; /* New region has to be created. */ + return NULL; /* New vregion has to be created. */ } static void -mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group, - struct mlxsw_afk_element_usage *elusage, - struct mlxsw_afk_element_usage *out) +mlxsw_sp_acl_tcam_vgroup_use_patterns(struct mlxsw_sp_acl_tcam_vgroup *vgroup, + struct mlxsw_afk_element_usage *elusage, + struct mlxsw_afk_element_usage *out) { const struct mlxsw_sp_acl_tcam_pattern *pattern; int i; @@ -430,14 +588,14 @@ mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group, /* In case the template is set, we don't have to look up the pattern * and just use the template. */ - if (group->tmplt_elusage_set) { - memcpy(out, &group->tmplt_elusage, sizeof(*out)); + if (vgroup->tmplt_elusage_set) { + memcpy(out, &vgroup->tmplt_elusage, sizeof(*out)); WARN_ON(!mlxsw_afk_element_usage_subset(elusage, out)); return; } - for (i = 0; i < group->patterns_count; i++) { - pattern = &group->patterns[i]; + for (i = 0; i < vgroup->patterns_count; i++) { + pattern = &vgroup->patterns[i]; mlxsw_afk_element_usage_fill(out, pattern->elements, pattern->elements_count); if (mlxsw_afk_element_usage_subset(elusage, out)) @@ -511,24 +669,19 @@ mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp, static struct mlxsw_sp_acl_tcam_region * mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam *tcam, - struct mlxsw_afk_element_usage *elusage) + struct mlxsw_sp_acl_tcam_vregion *vregion, + void *hints_priv) { const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; - struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl); struct mlxsw_sp_acl_tcam_region *region; int err; region = kzalloc(sizeof(*region) + ops->region_priv_size, GFP_KERNEL); if (!region) return ERR_PTR(-ENOMEM); - INIT_LIST_HEAD(®ion->chunk_list); region->mlxsw_sp = mlxsw_sp; - - region->key_info = mlxsw_afk_key_info_get(afk, elusage); - if (IS_ERR(region->key_info)) { - err = PTR_ERR(region->key_info); - goto err_key_info_get; - } + region->vregion = vregion; + region->key_info = vregion->key_info; err = mlxsw_sp_acl_tcam_region_id_get(tcam, ®ion->id); if (err) @@ -547,7 +700,8 @@ mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp, if (err) goto err_tcam_region_enable; - err = ops->region_init(mlxsw_sp, region->priv, tcam->priv, region); + err = ops->region_init(mlxsw_sp, region->priv, tcam->priv, + region, hints_priv); if (err) goto err_tcam_region_init; @@ -561,8 +715,6 @@ err_tcam_region_alloc: err_tcam_region_associate: mlxsw_sp_acl_tcam_region_id_put(tcam, region->id); err_region_id_get: - mlxsw_afk_key_info_put(region->key_info); -err_key_info_get: kfree(region); return ERR_PTR(err); } @@ -576,220 +728,415 @@ mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp, ops->region_fini(mlxsw_sp, region->priv); mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region); mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region); - mlxsw_sp_acl_tcam_region_id_put(region->group->tcam, region->id); - mlxsw_afk_key_info_put(region->key_info); + mlxsw_sp_acl_tcam_region_id_put(region->group->tcam, + region->id); kfree(region); } -static int -mlxsw_sp_acl_tcam_chunk_assoc(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_group *group, - unsigned int priority, - struct mlxsw_afk_element_usage *elusage, - struct mlxsw_sp_acl_tcam_chunk *chunk) +static void +mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(struct mlxsw_sp_acl_tcam_vregion *vregion) { - struct mlxsw_sp_acl_tcam_region *region; - bool region_created = false; - bool need_split; - int err; + unsigned long interval = vregion->tcam->vregion_rehash_intrvl; + + if (!interval) + return; + mlxsw_core_schedule_dw(&vregion->rehash.dw, + msecs_to_jiffies(interval)); +} + +static void +mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_vregion *vregion, + int *credits); - region = mlxsw_sp_acl_tcam_group_region_find(group, priority, elusage, - &need_split); - if (region && need_split) { - /* According to priority, the chunk should belong to an - * existing region. However, this chunk needs elements - * that region does not contain. We need to split the existing - * region into two and create a new region for this chunk - * in between. This is not supported now. +static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work) +{ + struct mlxsw_sp_acl_tcam_vregion *vregion = + container_of(work, struct mlxsw_sp_acl_tcam_vregion, + rehash.dw.work); + int credits = MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS; + + mlxsw_sp_acl_tcam_vregion_rehash(vregion->mlxsw_sp, vregion, &credits); + if (credits < 0) + /* Rehash gone out of credits so it was interrupted. + * Schedule the work as soon as possible to continue. */ - return -EOPNOTSUPP; - } - if (!region) { - struct mlxsw_afk_element_usage region_elusage; - - mlxsw_sp_acl_tcam_group_use_patterns(group, elusage, - ®ion_elusage); - region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, group->tcam, - ®ion_elusage); - if (IS_ERR(region)) - return PTR_ERR(region); - region_created = true; + mlxsw_core_schedule_dw(&vregion->rehash.dw, 0); + else + mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion); +} + +static void +mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(struct mlxsw_sp_acl_tcam_vchunk *vchunk) +{ + struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion; + + /* If a rule was added or deleted from vchunk which is currently + * under rehash migration, we have to reset the ventry pointers + * to make sure all rules are properly migrated. + */ + if (vregion->rehash.ctx.current_vchunk == vchunk) { + vregion->rehash.ctx.start_ventry = NULL; + vregion->rehash.ctx.stop_ventry = NULL; } +} - chunk->region = region; - list_add_tail(&chunk->list, ®ion->chunk_list); +static void +mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(struct mlxsw_sp_acl_tcam_vregion *vregion) +{ + /* If a chunk was added or deleted from vregion we have to reset + * the current chunk pointer to make sure all chunks + * are properly migrated. + */ + vregion->rehash.ctx.current_vchunk = NULL; +} - if (!region_created) - return 0; +static struct mlxsw_sp_acl_tcam_vregion * +mlxsw_sp_acl_tcam_vregion_create(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_vgroup *vgroup, + unsigned int priority, + struct mlxsw_afk_element_usage *elusage) +{ + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; + struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl); + struct mlxsw_sp_acl_tcam *tcam = vgroup->group.tcam; + struct mlxsw_sp_acl_tcam_vregion *vregion; + int err; + + vregion = kzalloc(sizeof(*vregion), GFP_KERNEL); + if (!vregion) + return ERR_PTR(-ENOMEM); + INIT_LIST_HEAD(&vregion->vchunk_list); + mutex_init(&vregion->lock); + vregion->tcam = tcam; + vregion->mlxsw_sp = mlxsw_sp; + vregion->vgroup = vgroup; + vregion->ref_count = 1; + + vregion->key_info = mlxsw_afk_key_info_get(afk, elusage); + if (IS_ERR(vregion->key_info)) { + err = PTR_ERR(vregion->key_info); + goto err_key_info_get; + } - err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, group, region); + vregion->region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, tcam, + vregion, NULL); + if (IS_ERR(vregion->region)) { + err = PTR_ERR(vregion->region); + goto err_region_create; + } + + err = mlxsw_sp_acl_tcam_vgroup_vregion_attach(mlxsw_sp, vgroup, vregion, + priority); if (err) - goto err_group_region_attach; + goto err_vgroup_vregion_attach; + + if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) { + /* Create the delayed work for vregion periodic rehash */ + INIT_DELAYED_WORK(&vregion->rehash.dw, + mlxsw_sp_acl_tcam_vregion_rehash_work); + mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion); + mutex_lock(&tcam->lock); + list_add_tail(&vregion->tlist, &tcam->vregion_list); + mutex_unlock(&tcam->lock); + } - return 0; + return vregion; -err_group_region_attach: - mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region); - return err; +err_vgroup_vregion_attach: + mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region); +err_region_create: + mlxsw_afk_key_info_put(vregion->key_info); +err_key_info_get: + kfree(vregion); + return ERR_PTR(err); } static void -mlxsw_sp_acl_tcam_chunk_deassoc(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_chunk *chunk) +mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_vregion *vregion) +{ + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; + struct mlxsw_sp_acl_tcam_vgroup *vgroup = vregion->vgroup; + struct mlxsw_sp_acl_tcam *tcam = vregion->tcam; + + if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) { + mutex_lock(&tcam->lock); + list_del(&vregion->tlist); + mutex_unlock(&tcam->lock); + cancel_delayed_work_sync(&vregion->rehash.dw); + } + mlxsw_sp_acl_tcam_vgroup_vregion_detach(mlxsw_sp, vregion); + if (vregion->region2) + mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region2); + mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region); + mlxsw_afk_key_info_put(vregion->key_info); + mutex_destroy(&vregion->lock); + kfree(vregion); +} + +u32 mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam *tcam) +{ + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; + u32 vregion_rehash_intrvl; + + if (WARN_ON(!ops->region_rehash_hints_get)) + return 0; + vregion_rehash_intrvl = tcam->vregion_rehash_intrvl; + return vregion_rehash_intrvl; +} + +int mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam *tcam, + u32 val) +{ + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; + struct mlxsw_sp_acl_tcam_vregion *vregion; + + if (val < MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN && val) + return -EINVAL; + if (WARN_ON(!ops->region_rehash_hints_get)) + return -EOPNOTSUPP; + tcam->vregion_rehash_intrvl = val; + mutex_lock(&tcam->lock); + list_for_each_entry(vregion, &tcam->vregion_list, tlist) { + if (val) + mlxsw_core_schedule_dw(&vregion->rehash.dw, 0); + else + cancel_delayed_work_sync(&vregion->rehash.dw); + } + mutex_unlock(&tcam->lock); + return 0; +} + +static struct mlxsw_sp_acl_tcam_vregion * +mlxsw_sp_acl_tcam_vregion_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_vgroup *vgroup, + unsigned int priority, + struct mlxsw_afk_element_usage *elusage) { - struct mlxsw_sp_acl_tcam_region *region = chunk->region; + struct mlxsw_afk_element_usage vregion_elusage; + struct mlxsw_sp_acl_tcam_vregion *vregion; + bool need_split; - list_del(&chunk->list); - if (list_empty(®ion->chunk_list)) { - mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, region); - mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region); + vregion = mlxsw_sp_acl_tcam_vgroup_vregion_find(vgroup, priority, + elusage, &need_split); + if (vregion) { + if (need_split) { + /* According to priority, new vchunk should belong to + * an existing vregion. However, this vchunk needs + * elements that vregion does not contain. We need + * to split the existing vregion into two and create + * a new vregion for the new vchunk in between. + * This is not supported now. + */ + return ERR_PTR(-EOPNOTSUPP); + } + vregion->ref_count++; + return vregion; } + + mlxsw_sp_acl_tcam_vgroup_use_patterns(vgroup, elusage, + &vregion_elusage); + + return mlxsw_sp_acl_tcam_vregion_create(mlxsw_sp, vgroup, priority, + &vregion_elusage); +} + +static void +mlxsw_sp_acl_tcam_vregion_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_vregion *vregion) +{ + if (--vregion->ref_count) + return; + mlxsw_sp_acl_tcam_vregion_destroy(mlxsw_sp, vregion); } static struct mlxsw_sp_acl_tcam_chunk * mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_group *group, - unsigned int priority, - struct mlxsw_afk_element_usage *elusage) + struct mlxsw_sp_acl_tcam_vchunk *vchunk, + struct mlxsw_sp_acl_tcam_region *region) { const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; struct mlxsw_sp_acl_tcam_chunk *chunk; + + chunk = kzalloc(sizeof(*chunk) + ops->chunk_priv_size, GFP_KERNEL); + if (!chunk) + return ERR_PTR(-ENOMEM); + chunk->vchunk = vchunk; + chunk->region = region; + + ops->chunk_init(region->priv, chunk->priv, vchunk->priority); + return chunk; +} + +static void +mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_chunk *chunk) +{ + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; + + ops->chunk_fini(chunk->priv); + kfree(chunk); +} + +static struct mlxsw_sp_acl_tcam_vchunk * +mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_vgroup *vgroup, + unsigned int priority, + struct mlxsw_afk_element_usage *elusage) +{ + struct mlxsw_sp_acl_tcam_vregion *vregion; + struct mlxsw_sp_acl_tcam_vchunk *vchunk; int err; if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO) return ERR_PTR(-EINVAL); - chunk = kzalloc(sizeof(*chunk) + ops->chunk_priv_size, GFP_KERNEL); - if (!chunk) + vchunk = kzalloc(sizeof(*vchunk), GFP_KERNEL); + if (!vchunk) return ERR_PTR(-ENOMEM); - chunk->priority = priority; - chunk->group = group; - chunk->ref_count = 1; - - err = mlxsw_sp_acl_tcam_chunk_assoc(mlxsw_sp, group, priority, - elusage, chunk); - if (err) - goto err_chunk_assoc; + INIT_LIST_HEAD(&vchunk->ventry_list); + vchunk->priority = priority; + vchunk->vgroup = vgroup; + vchunk->ref_count = 1; + + vregion = mlxsw_sp_acl_tcam_vregion_get(mlxsw_sp, vgroup, + priority, elusage); + if (IS_ERR(vregion)) { + err = PTR_ERR(vregion); + goto err_vregion_get; + } - ops->chunk_init(chunk->region->priv, chunk->priv, priority); + vchunk->vregion = vregion; - err = rhashtable_insert_fast(&group->chunk_ht, &chunk->ht_node, - mlxsw_sp_acl_tcam_chunk_ht_params); + err = rhashtable_insert_fast(&vgroup->vchunk_ht, &vchunk->ht_node, + mlxsw_sp_acl_tcam_vchunk_ht_params); if (err) goto err_rhashtable_insert; - return chunk; + mutex_lock(&vregion->lock); + vchunk->chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, + vchunk->vregion->region); + if (IS_ERR(vchunk->chunk)) { + mutex_unlock(&vregion->lock); + err = PTR_ERR(vchunk->chunk); + goto err_chunk_create; + } + mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion); + list_add_tail(&vchunk->list, &vregion->vchunk_list); + mutex_unlock(&vregion->lock); + + return vchunk; + +err_chunk_create: + rhashtable_remove_fast(&vgroup->vchunk_ht, &vchunk->ht_node, + mlxsw_sp_acl_tcam_vchunk_ht_params); err_rhashtable_insert: - ops->chunk_fini(chunk->priv); - mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk); -err_chunk_assoc: - kfree(chunk); + mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp, vregion); +err_vregion_get: + kfree(vchunk); return ERR_PTR(err); } static void -mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_chunk *chunk) +mlxsw_sp_acl_tcam_vchunk_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_vchunk *vchunk) { - const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; - struct mlxsw_sp_acl_tcam_group *group = chunk->group; - - rhashtable_remove_fast(&group->chunk_ht, &chunk->ht_node, - mlxsw_sp_acl_tcam_chunk_ht_params); - ops->chunk_fini(chunk->priv); - mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk); - kfree(chunk); + struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion; + struct mlxsw_sp_acl_tcam_vgroup *vgroup = vchunk->vgroup; + + mutex_lock(&vregion->lock); + mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion); + list_del(&vchunk->list); + if (vchunk->chunk2) + mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2); + mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk); + mutex_unlock(&vregion->lock); + rhashtable_remove_fast(&vgroup->vchunk_ht, &vchunk->ht_node, + mlxsw_sp_acl_tcam_vchunk_ht_params); + mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp, vchunk->vregion); + kfree(vchunk); } -static struct mlxsw_sp_acl_tcam_chunk * -mlxsw_sp_acl_tcam_chunk_get(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_group *group, - unsigned int priority, - struct mlxsw_afk_element_usage *elusage) +static struct mlxsw_sp_acl_tcam_vchunk * +mlxsw_sp_acl_tcam_vchunk_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_vgroup *vgroup, + unsigned int priority, + struct mlxsw_afk_element_usage *elusage) { - struct mlxsw_sp_acl_tcam_chunk *chunk; + struct mlxsw_sp_acl_tcam_vchunk *vchunk; - chunk = rhashtable_lookup_fast(&group->chunk_ht, &priority, - mlxsw_sp_acl_tcam_chunk_ht_params); - if (chunk) { - if (WARN_ON(!mlxsw_afk_key_info_subset(chunk->region->key_info, + vchunk = rhashtable_lookup_fast(&vgroup->vchunk_ht, &priority, + mlxsw_sp_acl_tcam_vchunk_ht_params); + if (vchunk) { + if (WARN_ON(!mlxsw_afk_key_info_subset(vchunk->vregion->key_info, elusage))) return ERR_PTR(-EINVAL); - chunk->ref_count++; - return chunk; + vchunk->ref_count++; + return vchunk; } - return mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, group, - priority, elusage); + return mlxsw_sp_acl_tcam_vchunk_create(mlxsw_sp, vgroup, + priority, elusage); } -static void mlxsw_sp_acl_tcam_chunk_put(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_chunk *chunk) +static void +mlxsw_sp_acl_tcam_vchunk_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_vchunk *vchunk) { - if (--chunk->ref_count) + if (--vchunk->ref_count) return; - mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, chunk); -} - -static size_t mlxsw_sp_acl_tcam_entry_priv_size(struct mlxsw_sp *mlxsw_sp) -{ - const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; - - return ops->entry_priv_size; + mlxsw_sp_acl_tcam_vchunk_destroy(mlxsw_sp, vchunk); } -static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_group *group, - struct mlxsw_sp_acl_tcam_entry *entry, - struct mlxsw_sp_acl_rule_info *rulei) +static struct mlxsw_sp_acl_tcam_entry * +mlxsw_sp_acl_tcam_entry_create(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_ventry *ventry, + struct mlxsw_sp_acl_tcam_chunk *chunk) { const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; - struct mlxsw_sp_acl_tcam_chunk *chunk; - struct mlxsw_sp_acl_tcam_region *region; + struct mlxsw_sp_acl_tcam_entry *entry; int err; - chunk = mlxsw_sp_acl_tcam_chunk_get(mlxsw_sp, group, rulei->priority, - &rulei->values.elusage); - if (IS_ERR(chunk)) - return PTR_ERR(chunk); - - region = chunk->region; + entry = kzalloc(sizeof(*entry) + ops->entry_priv_size, GFP_KERNEL); + if (!entry) + return ERR_PTR(-ENOMEM); + entry->ventry = ventry; + entry->chunk = chunk; - err = ops->entry_add(mlxsw_sp, region->priv, chunk->priv, - entry->priv, rulei); + err = ops->entry_add(mlxsw_sp, chunk->region->priv, chunk->priv, + entry->priv, ventry->rulei); if (err) goto err_entry_add; - entry->chunk = chunk; - return 0; + return entry; err_entry_add: - mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk); - return err; + kfree(entry); + return ERR_PTR(err); } -static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_entry *entry) +static void mlxsw_sp_acl_tcam_entry_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_entry *entry) { const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; - struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk; - struct mlxsw_sp_acl_tcam_region *region = chunk->region; - ops->entry_del(mlxsw_sp, region->priv, chunk->priv, entry->priv); - mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk); + ops->entry_del(mlxsw_sp, entry->chunk->region->priv, + entry->chunk->priv, entry->priv); + kfree(entry); } static int mlxsw_sp_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_acl_tcam_group *group, + struct mlxsw_sp_acl_tcam_region *region, struct mlxsw_sp_acl_tcam_entry *entry, struct mlxsw_sp_acl_rule_info *rulei) { const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; - struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk; - struct mlxsw_sp_acl_tcam_region *region = chunk->region; - return ops->entry_action_replace(mlxsw_sp, region->priv, chunk->priv, + return ops->entry_action_replace(mlxsw_sp, region->priv, entry->priv, rulei); } @@ -799,13 +1146,377 @@ mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp, bool *activity) { const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; - struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk; - struct mlxsw_sp_acl_tcam_region *region = chunk->region; - return ops->entry_activity_get(mlxsw_sp, region->priv, + return ops->entry_activity_get(mlxsw_sp, entry->chunk->region->priv, entry->priv, activity); } +static int mlxsw_sp_acl_tcam_ventry_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_vgroup *vgroup, + struct mlxsw_sp_acl_tcam_ventry *ventry, + struct mlxsw_sp_acl_rule_info *rulei) +{ + struct mlxsw_sp_acl_tcam_vregion *vregion; + struct mlxsw_sp_acl_tcam_vchunk *vchunk; + int err; + + vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp, vgroup, rulei->priority, + &rulei->values.elusage); + if (IS_ERR(vchunk)) + return PTR_ERR(vchunk); + + ventry->vchunk = vchunk; + ventry->rulei = rulei; + vregion = vchunk->vregion; + + mutex_lock(&vregion->lock); + ventry->entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry, + vchunk->chunk); + if (IS_ERR(ventry->entry)) { + mutex_unlock(&vregion->lock); + err = PTR_ERR(ventry->entry); + goto err_entry_create; + } + + list_add_tail(&ventry->list, &vchunk->ventry_list); + mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(vchunk); + mutex_unlock(&vregion->lock); + + return 0; + +err_entry_create: + mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk); + return err; +} + +static void mlxsw_sp_acl_tcam_ventry_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_ventry *ventry) +{ + struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk; + struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion; + + mutex_lock(&vregion->lock); + mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(vchunk); + list_del(&ventry->list); + mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry); + mutex_unlock(&vregion->lock); + mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk); +} + +static int +mlxsw_sp_acl_tcam_ventry_action_replace(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_ventry *ventry, + struct mlxsw_sp_acl_rule_info *rulei) +{ + struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk; + + return mlxsw_sp_acl_tcam_entry_action_replace(mlxsw_sp, + vchunk->vregion->region, + ventry->entry, rulei); +} + +static int +mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_ventry *ventry, + bool *activity) +{ + return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, + ventry->entry, activity); +} + +static int +mlxsw_sp_acl_tcam_ventry_migrate(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_ventry *ventry, + struct mlxsw_sp_acl_tcam_chunk *chunk, + int *credits) +{ + struct mlxsw_sp_acl_tcam_entry *new_entry; + + /* First check if the entry is not already where we want it to be. */ + if (ventry->entry->chunk == chunk) + return 0; + + if (--(*credits) < 0) + return 0; + + new_entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry, chunk); + if (IS_ERR(new_entry)) + return PTR_ERR(new_entry); + mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry); + ventry->entry = new_entry; + return 0; +} + +static int +mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_vchunk *vchunk, + struct mlxsw_sp_acl_tcam_region *region, + struct mlxsw_sp_acl_tcam_rehash_ctx *ctx) +{ + struct mlxsw_sp_acl_tcam_chunk *new_chunk; + + new_chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region); + if (IS_ERR(new_chunk)) { + if (ctx->this_is_rollback) + vchunk->vregion->failed_rollback = true; + return PTR_ERR(new_chunk); + } + vchunk->chunk2 = vchunk->chunk; + vchunk->chunk = new_chunk; + ctx->current_vchunk = vchunk; + ctx->start_ventry = NULL; + ctx->stop_ventry = NULL; + return 0; +} + +static void +mlxsw_sp_acl_tcam_vchunk_migrate_end(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_vchunk *vchunk, + struct mlxsw_sp_acl_tcam_rehash_ctx *ctx) +{ + mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2); + vchunk->chunk2 = NULL; + ctx->current_vchunk = NULL; +} + +static int +mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_vchunk *vchunk, + struct mlxsw_sp_acl_tcam_region *region, + struct mlxsw_sp_acl_tcam_rehash_ctx *ctx, + int *credits) +{ + struct mlxsw_sp_acl_tcam_ventry *ventry; + int err; + + if (vchunk->chunk->region != region) { + err = mlxsw_sp_acl_tcam_vchunk_migrate_start(mlxsw_sp, vchunk, + region, ctx); + if (err) + return err; + } else if (!vchunk->chunk2) { + /* The chunk is already as it should be, nothing to do. */ + return 0; + } + + /* If the migration got interrupted, we have the ventry to start from + * stored in context. + */ + if (ctx->start_ventry) + ventry = ctx->start_ventry; + else + ventry = list_first_entry(&vchunk->ventry_list, + typeof(*ventry), list); + + list_for_each_entry_from(ventry, &vchunk->ventry_list, list) { + /* During rollback, once we reach the ventry that failed + * to migrate, we are done. + */ + if (ventry == ctx->stop_ventry) + break; + + err = mlxsw_sp_acl_tcam_ventry_migrate(mlxsw_sp, ventry, + vchunk->chunk, credits); + if (err) { + if (ctx->this_is_rollback) + return err; + /* Swap the chunk and chunk2 pointers so the follow-up + * rollback call will see the original chunk pointer + * in vchunk->chunk. + */ + swap(vchunk->chunk, vchunk->chunk2); + /* The rollback has to be done from beginning of the + * chunk, that is why we have to null the start_ventry. + * However, we know where to stop the rollback, + * at the current ventry. + */ + ctx->start_ventry = NULL; + ctx->stop_ventry = ventry; + return err; + } else if (*credits < 0) { + /* We are out of credits, the rest of the ventries + * will be migrated later. Save the ventry + * which we ended with. + */ + ctx->start_ventry = ventry; + return 0; + } + } + + mlxsw_sp_acl_tcam_vchunk_migrate_end(mlxsw_sp, vchunk, ctx); + return 0; +} + +static int +mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_vregion *vregion, + struct mlxsw_sp_acl_tcam_rehash_ctx *ctx, + int *credits) +{ + struct mlxsw_sp_acl_tcam_vchunk *vchunk; + int err; + + /* If the migration got interrupted, we have the vchunk + * we are working on stored in context. + */ + if (ctx->current_vchunk) + vchunk = ctx->current_vchunk; + else + vchunk = list_first_entry(&vregion->vchunk_list, + typeof(*vchunk), list); + + list_for_each_entry_from(vchunk, &vregion->vchunk_list, list) { + err = mlxsw_sp_acl_tcam_vchunk_migrate_one(mlxsw_sp, vchunk, + vregion->region, + ctx, credits); + if (err || *credits < 0) + return err; + } + return 0; +} + +static int +mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_vregion *vregion, + struct mlxsw_sp_acl_tcam_rehash_ctx *ctx, + int *credits) +{ + int err, err2; + + trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion); + mutex_lock(&vregion->lock); + err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion, + ctx, credits); + if (err) { + /* In case migration was not successful, we need to swap + * so the original region pointer is assigned again + * to vregion->region. + */ + swap(vregion->region, vregion->region2); + ctx->current_vchunk = NULL; + ctx->this_is_rollback = true; + err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion, + ctx, credits); + if (err2) + vregion->failed_rollback = true; + } + mutex_unlock(&vregion->lock); + trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion); + return err; +} + +static bool +mlxsw_sp_acl_tcam_vregion_rehash_in_progress(const struct mlxsw_sp_acl_tcam_rehash_ctx *ctx) +{ + return ctx->hints_priv; +} + +static int +mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_vregion *vregion, + struct mlxsw_sp_acl_tcam_rehash_ctx *ctx) +{ + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; + unsigned int priority = mlxsw_sp_acl_tcam_vregion_prio(vregion); + struct mlxsw_sp_acl_tcam_region *new_region; + void *hints_priv; + int err; + + trace_mlxsw_sp_acl_tcam_vregion_rehash(mlxsw_sp, vregion); + if (vregion->failed_rollback) + return -EBUSY; + + hints_priv = ops->region_rehash_hints_get(vregion->region->priv); + if (IS_ERR(hints_priv)) + return PTR_ERR(hints_priv); + + new_region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, vregion->tcam, + vregion, hints_priv); + if (IS_ERR(new_region)) { + err = PTR_ERR(new_region); + goto err_region_create; + } + + /* vregion->region contains the pointer to the new region + * we are going to migrate to. + */ + vregion->region2 = vregion->region; + vregion->region = new_region; + err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, + vregion->region2->group, + new_region, priority, + vregion->region2); + if (err) + goto err_group_region_attach; + + ctx->hints_priv = hints_priv; + ctx->this_is_rollback = false; + + return 0; + +err_group_region_attach: + vregion->region = vregion->region2; + vregion->region2 = NULL; + mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, new_region); +err_region_create: + ops->region_rehash_hints_put(hints_priv); + return err; +} + +static void +mlxsw_sp_acl_tcam_vregion_rehash_end(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_vregion *vregion, + struct mlxsw_sp_acl_tcam_rehash_ctx *ctx) +{ + struct mlxsw_sp_acl_tcam_region *unused_region = vregion->region2; + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; + + if (!vregion->failed_rollback) { + vregion->region2 = NULL; + mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, unused_region); + mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, unused_region); + } + ops->region_rehash_hints_put(ctx->hints_priv); + ctx->hints_priv = NULL; +} + +static void +mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_vregion *vregion, + int *credits) +{ + struct mlxsw_sp_acl_tcam_rehash_ctx *ctx = &vregion->rehash.ctx; + int err; + + /* Check if the previous rehash work was interrupted + * which means we have to continue it now. + * If not, start a new rehash. + */ + if (!mlxsw_sp_acl_tcam_vregion_rehash_in_progress(ctx)) { + err = mlxsw_sp_acl_tcam_vregion_rehash_start(mlxsw_sp, + vregion, ctx); + if (err) { + if (err != -EAGAIN) + dev_err(mlxsw_sp->bus_info->dev, "Failed get rehash hints\n"); + return; + } + } + + err = mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion, + ctx, credits); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n"); + if (vregion->failed_rollback) { + trace_mlxsw_sp_acl_tcam_vregion_rehash_dis(mlxsw_sp, + vregion); + dev_err(mlxsw_sp->bus_info->dev, "Failed to rollback during vregion migration fail\n"); + } + } + + if (*credits >= 0) + mlxsw_sp_acl_tcam_vregion_rehash_end(mlxsw_sp, vregion, ctx); +} + static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = { MLXSW_AFK_ELEMENT_SRC_SYS_PORT, MLXSW_AFK_ELEMENT_DMAC_32_47, @@ -856,20 +1567,11 @@ static const struct mlxsw_sp_acl_tcam_pattern mlxsw_sp_acl_tcam_patterns[] = { ARRAY_SIZE(mlxsw_sp_acl_tcam_patterns) struct mlxsw_sp_acl_tcam_flower_ruleset { - struct mlxsw_sp_acl_tcam_group group; + struct mlxsw_sp_acl_tcam_vgroup vgroup; }; struct mlxsw_sp_acl_tcam_flower_rule { - struct mlxsw_sp_acl_tcam_entry entry; -}; - -struct mlxsw_sp_acl_tcam_mr_ruleset { - struct mlxsw_sp_acl_tcam_chunk *chunk; - struct mlxsw_sp_acl_tcam_group group; -}; - -struct mlxsw_sp_acl_tcam_mr_rule { - struct mlxsw_sp_acl_tcam_entry entry; + struct mlxsw_sp_acl_tcam_ventry ventry; }; static int @@ -880,10 +1582,10 @@ mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; - return mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group, - mlxsw_sp_acl_tcam_patterns, - MLXSW_SP_ACL_TCAM_PATTERNS_COUNT, - tmplt_elusage); + return mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup, + mlxsw_sp_acl_tcam_patterns, + MLXSW_SP_ACL_TCAM_PATTERNS_COUNT, + tmplt_elusage, true); } static void @@ -892,7 +1594,7 @@ mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; - mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group); + mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup); } static int @@ -903,7 +1605,7 @@ mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; - return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->group, + return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->vgroup.group, mlxsw_sp_port, ingress); } @@ -915,7 +1617,7 @@ mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; - mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group, + mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->vgroup.group, mlxsw_sp_port, ingress); } @@ -924,13 +1626,7 @@ mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv) { struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; - return mlxsw_sp_acl_tcam_group_id(&ruleset->group); -} - -static size_t mlxsw_sp_acl_tcam_flower_rule_priv_size(struct mlxsw_sp *mlxsw_sp) -{ - return sizeof(struct mlxsw_sp_acl_tcam_flower_rule) + - mlxsw_sp_acl_tcam_entry_priv_size(mlxsw_sp); + return mlxsw_sp_acl_tcam_group_id(&ruleset->vgroup.group); } static int @@ -941,8 +1637,8 @@ mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv; - return mlxsw_sp_acl_tcam_entry_add(mlxsw_sp, &ruleset->group, - &rule->entry, rulei); + return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->vgroup, + &rule->ventry, rulei); } static void @@ -950,12 +1646,11 @@ mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv) { struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv; - mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry); + mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp, &rule->ventry); } static int mlxsw_sp_acl_tcam_flower_rule_action_replace(struct mlxsw_sp *mlxsw_sp, - void *ruleset_priv, void *rule_priv, struct mlxsw_sp_acl_rule_info *rulei) { @@ -968,8 +1663,8 @@ mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv; - return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, &rule->entry, - activity); + return mlxsw_sp_acl_tcam_ventry_activity_get(mlxsw_sp, &rule->ventry, + activity); } static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = { @@ -979,13 +1674,22 @@ static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = { .ruleset_bind = mlxsw_sp_acl_tcam_flower_ruleset_bind, .ruleset_unbind = mlxsw_sp_acl_tcam_flower_ruleset_unbind, .ruleset_group_id = mlxsw_sp_acl_tcam_flower_ruleset_group_id, - .rule_priv_size = mlxsw_sp_acl_tcam_flower_rule_priv_size, + .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_rule), .rule_add = mlxsw_sp_acl_tcam_flower_rule_add, .rule_del = mlxsw_sp_acl_tcam_flower_rule_del, .rule_action_replace = mlxsw_sp_acl_tcam_flower_rule_action_replace, .rule_activity_get = mlxsw_sp_acl_tcam_flower_rule_activity_get, }; +struct mlxsw_sp_acl_tcam_mr_ruleset { + struct mlxsw_sp_acl_tcam_vchunk *vchunk; + struct mlxsw_sp_acl_tcam_vgroup vgroup; +}; + +struct mlxsw_sp_acl_tcam_mr_rule { + struct mlxsw_sp_acl_tcam_ventry ventry; +}; + static int mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam *tcam, @@ -995,10 +1699,10 @@ mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv; int err; - err = mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group, - mlxsw_sp_acl_tcam_patterns, - MLXSW_SP_ACL_TCAM_PATTERNS_COUNT, - tmplt_elusage); + err = mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup, + mlxsw_sp_acl_tcam_patterns, + MLXSW_SP_ACL_TCAM_PATTERNS_COUNT, + tmplt_elusage, false); if (err) return err; @@ -1008,17 +1712,18 @@ mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp, * specific ACL Group ID which must exist in HW before multicast router * is initialized. */ - ruleset->chunk = mlxsw_sp_acl_tcam_chunk_get(mlxsw_sp, &ruleset->group, - 1, tmplt_elusage); - if (IS_ERR(ruleset->chunk)) { - err = PTR_ERR(ruleset->chunk); + ruleset->vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp, + &ruleset->vgroup, 1, + tmplt_elusage); + if (IS_ERR(ruleset->vchunk)) { + err = PTR_ERR(ruleset->vchunk); goto err_chunk_get; } return 0; err_chunk_get: - mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group); + mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup); return err; } @@ -1027,8 +1732,8 @@ mlxsw_sp_acl_tcam_mr_ruleset_del(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv) { struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv; - mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, ruleset->chunk); - mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group); + mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, ruleset->vchunk); + mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup); } static int @@ -1053,13 +1758,7 @@ mlxsw_sp_acl_tcam_mr_ruleset_group_id(void *ruleset_priv) { struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv; - return mlxsw_sp_acl_tcam_group_id(&ruleset->group); -} - -static size_t mlxsw_sp_acl_tcam_mr_rule_priv_size(struct mlxsw_sp *mlxsw_sp) -{ - return sizeof(struct mlxsw_sp_acl_tcam_mr_rule) + - mlxsw_sp_acl_tcam_entry_priv_size(mlxsw_sp); + return mlxsw_sp_acl_tcam_group_id(&ruleset->vgroup.group); } static int @@ -1070,8 +1769,8 @@ mlxsw_sp_acl_tcam_mr_rule_add(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv; struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv; - return mlxsw_sp_acl_tcam_entry_add(mlxsw_sp, &ruleset->group, - &rule->entry, rulei); + return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->vgroup, + &rule->ventry, rulei); } static void @@ -1079,19 +1778,18 @@ mlxsw_sp_acl_tcam_mr_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv) { struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv; - mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry); + mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp, &rule->ventry); } static int mlxsw_sp_acl_tcam_mr_rule_action_replace(struct mlxsw_sp *mlxsw_sp, - void *ruleset_priv, void *rule_priv, + void *rule_priv, struct mlxsw_sp_acl_rule_info *rulei) { - struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv; struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv; - return mlxsw_sp_acl_tcam_entry_action_replace(mlxsw_sp, &ruleset->group, - &rule->entry, rulei); + return mlxsw_sp_acl_tcam_ventry_action_replace(mlxsw_sp, &rule->ventry, + rulei); } static int @@ -1100,8 +1798,8 @@ mlxsw_sp_acl_tcam_mr_rule_activity_get(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv; - return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, &rule->entry, - activity); + return mlxsw_sp_acl_tcam_ventry_activity_get(mlxsw_sp, &rule->ventry, + activity); } static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_mr_ops = { @@ -1111,7 +1809,7 @@ static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_mr_ops = { .ruleset_bind = mlxsw_sp_acl_tcam_mr_ruleset_bind, .ruleset_unbind = mlxsw_sp_acl_tcam_mr_ruleset_unbind, .ruleset_group_id = mlxsw_sp_acl_tcam_mr_ruleset_group_id, - .rule_priv_size = mlxsw_sp_acl_tcam_mr_rule_priv_size, + .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_mr_rule), .rule_add = mlxsw_sp_acl_tcam_mr_rule_add, .rule_del = mlxsw_sp_acl_tcam_mr_rule_del, .rule_action_replace = mlxsw_sp_acl_tcam_mr_rule_action_replace, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h index 0f1a9dee63de..5965913565a5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h @@ -17,6 +17,9 @@ struct mlxsw_sp_acl_tcam { unsigned long *used_groups; /* bit array */ unsigned int max_groups; unsigned int max_group_size; + struct mutex lock; /* guards vregion list */ + struct list_head vregion_list; + u32 vregion_rehash_intrvl; /* ms */ unsigned long priv[0]; /* priv has to be always the last item */ }; @@ -26,6 +29,11 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam *tcam); void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam *tcam); +u32 mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam *tcam); +int mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam *tcam, + u32 val); int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei, u32 *priority, bool fillup_priority); @@ -43,13 +51,12 @@ struct mlxsw_sp_acl_profile_ops { struct mlxsw_sp_port *mlxsw_sp_port, bool ingress); u16 (*ruleset_group_id)(void *ruleset_priv); - size_t (*rule_priv_size)(struct mlxsw_sp *mlxsw_sp); + size_t rule_priv_size; int (*rule_add)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, void *rule_priv, struct mlxsw_sp_acl_rule_info *rulei); void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv); - int (*rule_action_replace)(struct mlxsw_sp *mlxsw_sp, - void *ruleset_priv, void *rule_priv, + int (*rule_action_replace)(struct mlxsw_sp *mlxsw_sp, void *rule_priv, struct mlxsw_sp_acl_rule_info *rulei); int (*rule_activity_get)(struct mlxsw_sp *mlxsw_sp, void *rule_priv, bool *activity); @@ -68,11 +75,12 @@ mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp, (MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN * BITS_PER_BYTE) struct mlxsw_sp_acl_tcam_group; +struct mlxsw_sp_acl_tcam_vregion; struct mlxsw_sp_acl_tcam_region { - struct list_head list; /* Member of a TCAM group */ - struct list_head chunk_list; /* List of chunks under this region */ + struct mlxsw_sp_acl_tcam_vregion *vregion; struct mlxsw_sp_acl_tcam_group *group; + struct list_head list; /* Member of a TCAM group */ enum mlxsw_reg_ptar_key_type key_type; u16 id; /* ACL ID and region ID - they are same */ char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN]; @@ -126,7 +134,6 @@ void mlxsw_sp_acl_ctcam_entry_del(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ctcam_entry *centry); int mlxsw_sp_acl_ctcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ctcam_region *cregion, - struct mlxsw_sp_acl_ctcam_chunk *cchunk, struct mlxsw_sp_acl_ctcam_entry *centry, struct mlxsw_sp_acl_rule_info *rulei); static inline unsigned int @@ -163,9 +170,9 @@ struct mlxsw_sp_acl_atcam_region { }; struct mlxsw_sp_acl_atcam_entry_ht_key { - char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key, - * minus delta bits. - */ + char full_enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded + * key. + */ u8 erp_id; }; @@ -177,7 +184,9 @@ struct mlxsw_sp_acl_atcam_entry { struct rhash_head ht_node; struct list_head list; /* Member in entries_list */ struct mlxsw_sp_acl_atcam_entry_ht_key ht_key; - char full_enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key */ + char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key, + * minus delta bits. + */ struct { u16 start; u8 mask; @@ -207,6 +216,7 @@ mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_atcam *atcam, struct mlxsw_sp_acl_atcam_region *aregion, struct mlxsw_sp_acl_tcam_region *region, + void *hints_priv, const struct mlxsw_sp_acl_ctcam_region_ops *ops); void mlxsw_sp_acl_atcam_region_fini(struct mlxsw_sp_acl_atcam_region *aregion); void mlxsw_sp_acl_atcam_chunk_init(struct mlxsw_sp_acl_atcam_region *aregion, @@ -224,13 +234,15 @@ void mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_atcam_entry *aentry); int mlxsw_sp_acl_atcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_atcam_region *aregion, - struct mlxsw_sp_acl_atcam_chunk *achunk, struct mlxsw_sp_acl_atcam_entry *aentry, struct mlxsw_sp_acl_rule_info *rulei); int mlxsw_sp_acl_atcam_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_atcam *atcam); void mlxsw_sp_acl_atcam_fini(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_atcam *atcam); +void * +mlxsw_sp_acl_atcam_rehash_hints_get(struct mlxsw_sp_acl_atcam_region *aregion); +void mlxsw_sp_acl_atcam_rehash_hints_put(void *hints_priv); struct mlxsw_sp_acl_erp_delta; @@ -261,7 +273,11 @@ void mlxsw_sp_acl_erp_bf_remove(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_atcam_region *aregion, struct mlxsw_sp_acl_erp_mask *erp_mask, struct mlxsw_sp_acl_atcam_entry *aentry); -int mlxsw_sp_acl_erp_region_init(struct mlxsw_sp_acl_atcam_region *aregion); +void * +mlxsw_sp_acl_erp_rehash_hints_get(struct mlxsw_sp_acl_atcam_region *aregion); +void mlxsw_sp_acl_erp_rehash_hints_put(void *hints_priv); +int mlxsw_sp_acl_erp_region_init(struct mlxsw_sp_acl_atcam_region *aregion, + void *hints_priv); void mlxsw_sp_acl_erp_region_fini(struct mlxsw_sp_acl_atcam_region *aregion); int mlxsw_sp_acl_erps_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_atcam *atcam); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 12c61e0cc570..9a79b5e11597 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -37,13 +37,19 @@ struct mlxsw_sp_sb_pm { struct mlxsw_cp_sb_occ occ; }; +struct mlxsw_sp_sb_mm { + u32 min_buff; + u32 max_buff; + u16 pool_index; +}; + struct mlxsw_sp_sb_pool_des { enum mlxsw_reg_sbxx_dir dir; u8 pool; }; /* Order ingress pools before egress pools. */ -static const struct mlxsw_sp_sb_pool_des mlxsw_sp_sb_pool_dess[] = { +static const struct mlxsw_sp_sb_pool_des mlxsw_sp1_sb_pool_dess[] = { {MLXSW_REG_SBXX_DIR_INGRESS, 0}, {MLXSW_REG_SBXX_DIR_INGRESS, 1}, {MLXSW_REG_SBXX_DIR_INGRESS, 2}, @@ -55,7 +61,16 @@ static const struct mlxsw_sp_sb_pool_des mlxsw_sp_sb_pool_dess[] = { {MLXSW_REG_SBXX_DIR_EGRESS, 15}, }; -#define MLXSW_SP_SB_POOL_DESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pool_dess) +static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = { + {MLXSW_REG_SBXX_DIR_INGRESS, 0}, + {MLXSW_REG_SBXX_DIR_INGRESS, 1}, + {MLXSW_REG_SBXX_DIR_INGRESS, 2}, + {MLXSW_REG_SBXX_DIR_INGRESS, 3}, + {MLXSW_REG_SBXX_DIR_EGRESS, 0}, + {MLXSW_REG_SBXX_DIR_EGRESS, 1}, + {MLXSW_REG_SBXX_DIR_EGRESS, 2}, + {MLXSW_REG_SBXX_DIR_EGRESS, 3}, +}; #define MLXSW_SP_SB_ING_TC_COUNT 8 #define MLXSW_SP_SB_EG_TC_COUNT 16 @@ -63,16 +78,32 @@ static const struct mlxsw_sp_sb_pool_des mlxsw_sp_sb_pool_dess[] = { struct mlxsw_sp_sb_port { struct mlxsw_sp_sb_cm ing_cms[MLXSW_SP_SB_ING_TC_COUNT]; struct mlxsw_sp_sb_cm eg_cms[MLXSW_SP_SB_EG_TC_COUNT]; - struct mlxsw_sp_sb_pm pms[MLXSW_SP_SB_POOL_DESS_LEN]; + struct mlxsw_sp_sb_pm *pms; }; struct mlxsw_sp_sb { - struct mlxsw_sp_sb_pr prs[MLXSW_SP_SB_POOL_DESS_LEN]; + struct mlxsw_sp_sb_pr *prs; struct mlxsw_sp_sb_port *ports; u32 cell_size; + u32 max_headroom_cells; u64 sb_size; }; +struct mlxsw_sp_sb_vals { + unsigned int pool_count; + const struct mlxsw_sp_sb_pool_des *pool_dess; + const struct mlxsw_sp_sb_pm *pms; + const struct mlxsw_sp_sb_pr *prs; + const struct mlxsw_sp_sb_mm *mms; + const struct mlxsw_sp_sb_cm *cms_ingress; + const struct mlxsw_sp_sb_cm *cms_egress; + const struct mlxsw_sp_sb_cm *cms_cpu; + unsigned int mms_count; + unsigned int cms_ingress_count; + unsigned int cms_egress_count; + unsigned int cms_cpu_count; +}; + u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells) { return mlxsw_sp->sb->cell_size * cells; @@ -83,6 +114,11 @@ u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes) return DIV_ROUND_UP(bytes, mlxsw_sp->sb->cell_size); } +u32 mlxsw_sp_sb_max_headroom_cells(const struct mlxsw_sp *mlxsw_sp) +{ + return mlxsw_sp->sb->max_headroom_cells; +} + static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp, u16 pool_index) { @@ -121,7 +157,7 @@ static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u16 pool_index, u32 size, bool infi_size) { const struct mlxsw_sp_sb_pool_des *des = - &mlxsw_sp_sb_pool_dess[pool_index]; + &mlxsw_sp->sb_vals->pool_dess[pool_index]; char sbpr_pl[MLXSW_REG_SBPR_LEN]; struct mlxsw_sp_sb_pr *pr; int err; @@ -145,7 +181,7 @@ static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port, bool infi_max, u16 pool_index) { const struct mlxsw_sp_sb_pool_des *des = - &mlxsw_sp_sb_pool_dess[pool_index]; + &mlxsw_sp->sb_vals->pool_dess[pool_index]; char sbcm_pl[MLXSW_REG_SBCM_LEN]; struct mlxsw_sp_sb_cm *cm; int err; @@ -174,7 +210,7 @@ static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port, u16 pool_index, u32 min_buff, u32 max_buff) { const struct mlxsw_sp_sb_pool_des *des = - &mlxsw_sp_sb_pool_dess[pool_index]; + &mlxsw_sp->sb_vals->pool_dess[pool_index]; char sbpm_pl[MLXSW_REG_SBPM_LEN]; struct mlxsw_sp_sb_pm *pm; int err; @@ -195,7 +231,7 @@ static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port, u16 pool_index, struct list_head *bulk_list) { const struct mlxsw_sp_sb_pool_des *des = - &mlxsw_sp_sb_pool_dess[pool_index]; + &mlxsw_sp->sb_vals->pool_dess[pool_index]; char sbpm_pl[MLXSW_REG_SBPM_LEN]; mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir, @@ -217,7 +253,7 @@ static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port, u16 pool_index, struct list_head *bulk_list) { const struct mlxsw_sp_sb_pool_des *des = - &mlxsw_sp_sb_pool_dess[pool_index]; + &mlxsw_sp->sb_vals->pool_dess[pool_index]; char sbpm_pl[MLXSW_REG_SBPM_LEN]; struct mlxsw_sp_sb_pm *pm; @@ -230,24 +266,24 @@ static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port, (unsigned long) pm); } -static const u16 mlxsw_sp_pbs[] = { - [0] = 2 * ETH_FRAME_LEN, - [9] = 2 * MLXSW_PORT_MAX_MTU, -}; - -#define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs) +/* 1/4 of a headroom necessary for 100Gbps port and 100m cable. */ +#define MLXSW_SP_PB_HEADROOM 25632 #define MLXSW_SP_PB_UNUSED 8 static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port) { + const u32 pbs[] = { + [0] = MLXSW_SP_PB_HEADROOM * mlxsw_sp_port->mapping.width, + [9] = 2 * MLXSW_PORT_MAX_MTU, + }; struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char pbmc_pl[MLXSW_REG_PBMC_LEN]; int i; mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0xffff, 0xffff / 2); - for (i = 0; i < MLXSW_SP_PBS_LEN; i++) { - u16 size = mlxsw_sp_bytes_cells(mlxsw_sp, mlxsw_sp_pbs[i]); + for (i = 0; i < ARRAY_SIZE(pbs); i++) { + u16 size = mlxsw_sp_bytes_cells(mlxsw_sp, pbs[i]); if (i == MLXSW_SP_PB_UNUSED) continue; @@ -280,50 +316,119 @@ static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port) return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port); } +static int mlxsw_sp_sb_port_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_sb_port *sb_port) +{ + struct mlxsw_sp_sb_pm *pms; + + pms = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*pms), + GFP_KERNEL); + if (!pms) + return -ENOMEM; + sb_port->pms = pms; + return 0; +} + +static void mlxsw_sp_sb_port_fini(struct mlxsw_sp_sb_port *sb_port) +{ + kfree(sb_port->pms); +} + static int mlxsw_sp_sb_ports_init(struct mlxsw_sp *mlxsw_sp) { unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); + struct mlxsw_sp_sb_pr *prs; + int i; + int err; mlxsw_sp->sb->ports = kcalloc(max_ports, sizeof(struct mlxsw_sp_sb_port), GFP_KERNEL); if (!mlxsw_sp->sb->ports) return -ENOMEM; + + prs = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*prs), + GFP_KERNEL); + if (!prs) { + err = -ENOMEM; + goto err_alloc_prs; + } + mlxsw_sp->sb->prs = prs; + + for (i = 0; i < max_ports; i++) { + err = mlxsw_sp_sb_port_init(mlxsw_sp, &mlxsw_sp->sb->ports[i]); + if (err) + goto err_sb_port_init; + } + return 0; + +err_sb_port_init: + for (i--; i >= 0; i--) + mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]); + kfree(mlxsw_sp->sb->prs); +err_alloc_prs: + kfree(mlxsw_sp->sb->ports); + return err; } static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp) { + int max_ports = mlxsw_core_max_ports(mlxsw_sp->core); + int i; + + for (i = max_ports - 1; i >= 0; i--) + mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]); + kfree(mlxsw_sp->sb->prs); kfree(mlxsw_sp->sb->ports); } -#define MLXSW_SP_SB_PR_INGRESS_SIZE 12440000 -#define MLXSW_SP_SB_PR_INGRESS_MNG_SIZE (200 * 1000) -#define MLXSW_SP_SB_PR_EGRESS_SIZE 13232000 - #define MLXSW_SP_SB_PR(_mode, _size) \ { \ .mode = _mode, \ .size = _size, \ } -static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs[] = { +#define MLXSW_SP1_SB_PR_INGRESS_SIZE 12440000 +#define MLXSW_SP1_SB_PR_INGRESS_MNG_SIZE (200 * 1000) +#define MLXSW_SP1_SB_PR_EGRESS_SIZE 13232000 + +static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = { /* Ingress pools. */ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, - MLXSW_SP_SB_PR_INGRESS_SIZE), + MLXSW_SP1_SB_PR_INGRESS_SIZE), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, - MLXSW_SP_SB_PR_INGRESS_MNG_SIZE), + MLXSW_SP1_SB_PR_INGRESS_MNG_SIZE), /* Egress pools. */ - MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_PR_EGRESS_SIZE), + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, + MLXSW_SP1_SB_PR_EGRESS_SIZE), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI), }; -#define MLXSW_SP_SB_PRS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs) +#define MLXSW_SP2_SB_PR_INGRESS_SIZE 40960000 +#define MLXSW_SP2_SB_PR_INGRESS_MNG_SIZE (200 * 1000) +#define MLXSW_SP2_SB_PR_EGRESS_SIZE 40960000 + +static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = { + /* Ingress pools. */ + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, + MLXSW_SP2_SB_PR_INGRESS_SIZE), + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, + MLXSW_SP2_SB_PR_INGRESS_MNG_SIZE), + /* Egress pools. */ + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, + MLXSW_SP2_SB_PR_EGRESS_SIZE), + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), +}; static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_sb_pr *prs, @@ -357,7 +462,7 @@ static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp, .pool_index = _pool, \ } -static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = { +static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_ingress[] = { MLXSW_SP_SB_CM(10000, 8, 0), MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), @@ -370,9 +475,20 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = { MLXSW_SP_SB_CM(20000, 1, 3), }; -#define MLXSW_SP_SB_CMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_ingress) +static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_ingress[] = { + MLXSW_SP_SB_CM(0, 7, 0), + MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), + MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), + MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), + MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), + MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), + MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), + MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), + MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */ + MLXSW_SP_SB_CM(20000, 1, 3), +}; -static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = { +static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_egress[] = { MLXSW_SP_SB_CM(1500, 9, 4), MLXSW_SP_SB_CM(1500, 9, 4), MLXSW_SP_SB_CM(1500, 9, 4), @@ -392,7 +508,25 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = { MLXSW_SP_SB_CM(1, 0xff, 4), }; -#define MLXSW_SP_SB_CMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_egress) +static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = { + MLXSW_SP_SB_CM(0, 7, 4), + MLXSW_SP_SB_CM(0, 7, 4), + MLXSW_SP_SB_CM(0, 7, 4), + MLXSW_SP_SB_CM(0, 7, 4), + MLXSW_SP_SB_CM(0, 7, 4), + MLXSW_SP_SB_CM(0, 7, 4), + MLXSW_SP_SB_CM(0, 7, 4), + MLXSW_SP_SB_CM(0, 7, 4), + MLXSW_SP_SB_CM(0, 7, 4), + MLXSW_SP_SB_CM(0, 7, 4), + MLXSW_SP_SB_CM(0, 7, 4), + MLXSW_SP_SB_CM(0, 7, 4), + MLXSW_SP_SB_CM(0, 7, 4), + MLXSW_SP_SB_CM(0, 7, 4), + MLXSW_SP_SB_CM(0, 7, 4), + MLXSW_SP_SB_CM(0, 7, 4), + MLXSW_SP_SB_CM(1, 0xff, 4), +}; #define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 4) @@ -431,9 +565,6 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = { MLXSW_SP_CPU_PORT_SB_CM, }; -#define MLXSW_SP_CPU_PORT_SB_MCS_LEN \ - ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms) - static bool mlxsw_sp_sb_pool_is_static(struct mlxsw_sp *mlxsw_sp, u16 pool_index) { @@ -447,6 +578,7 @@ static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, const struct mlxsw_sp_sb_cm *cms, size_t cms_len) { + const struct mlxsw_sp_sb_vals *sb_vals = mlxsw_sp->sb_vals; int i; int err; @@ -458,7 +590,7 @@ static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS) continue; /* PG number 8 does not exist, skip it */ cm = &cms[i]; - if (WARN_ON(mlxsw_sp_sb_pool_dess[cm->pool_index].dir != dir)) + if (WARN_ON(sb_vals->pool_dess[cm->pool_index].dir != dir)) continue; min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, cm->min_buff); @@ -484,27 +616,28 @@ static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port) { + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; int err; - err = __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp, + err = __mlxsw_sp_sb_cms_init(mlxsw_sp, mlxsw_sp_port->local_port, MLXSW_REG_SBXX_DIR_INGRESS, - mlxsw_sp_sb_cms_ingress, - MLXSW_SP_SB_CMS_INGRESS_LEN); + mlxsw_sp->sb_vals->cms_ingress, + mlxsw_sp->sb_vals->cms_ingress_count); if (err) return err; return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp, mlxsw_sp_port->local_port, MLXSW_REG_SBXX_DIR_EGRESS, - mlxsw_sp_sb_cms_egress, - MLXSW_SP_SB_CMS_EGRESS_LEN); + mlxsw_sp->sb_vals->cms_egress, + mlxsw_sp->sb_vals->cms_egress_count); } static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp) { return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS, - mlxsw_sp_cpu_port_sb_cms, - MLXSW_SP_CPU_PORT_SB_MCS_LEN); + mlxsw_sp->sb_vals->cms_cpu, + mlxsw_sp->sb_vals->cms_cpu_count); } #define MLXSW_SP_SB_PM(_min_buff, _max_buff) \ @@ -513,7 +646,7 @@ static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp) .max_buff = _max_buff, \ } -static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms[] = { +static const struct mlxsw_sp_sb_pm mlxsw_sp1_sb_pms[] = { /* Ingress pools. */ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX), MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), @@ -527,7 +660,18 @@ static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms[] = { MLXSW_SP_SB_PM(10000, 90000), }; -#define MLXSW_SP_SB_PMS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms) +static const struct mlxsw_sp_sb_pm mlxsw_sp2_sb_pms[] = { + /* Ingress pools. */ + MLXSW_SP_SB_PM(0, 7), + MLXSW_SP_SB_PM(0, 0), + MLXSW_SP_SB_PM(0, 0), + MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX), + /* Egress pools. */ + MLXSW_SP_SB_PM(0, 7), + MLXSW_SP_SB_PM(0, 0), + MLXSW_SP_SB_PM(0, 0), + MLXSW_SP_SB_PM(0, 0), +}; static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port) { @@ -535,8 +679,8 @@ static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port) int i; int err; - for (i = 0; i < MLXSW_SP_SB_PMS_LEN; i++) { - const struct mlxsw_sp_sb_pm *pm = &mlxsw_sp_sb_pms[i]; + for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) { + const struct mlxsw_sp_sb_pm *pm = &mlxsw_sp->sb_vals->pms[i]; u32 max_buff; u32 min_buff; @@ -552,12 +696,6 @@ static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port) return 0; } -struct mlxsw_sp_sb_mm { - u32 min_buff; - u32 max_buff; - u16 pool_index; -}; - #define MLXSW_SP_SB_MM(_min_buff, _max_buff, _pool) \ { \ .min_buff = _min_buff, \ @@ -583,21 +721,19 @@ static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = { MLXSW_SP_SB_MM(0, 6, 4), }; -#define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms) - static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp) { char sbmm_pl[MLXSW_REG_SBMM_LEN]; int i; int err; - for (i = 0; i < MLXSW_SP_SB_MMS_LEN; i++) { + for (i = 0; i < mlxsw_sp->sb_vals->mms_count; i++) { const struct mlxsw_sp_sb_pool_des *des; const struct mlxsw_sp_sb_mm *mc; u32 min_buff; - mc = &mlxsw_sp_sb_mms[i]; - des = &mlxsw_sp_sb_pool_dess[mc->pool_index]; + mc = &mlxsw_sp->sb_vals->mms[i]; + des = &mlxsw_sp->sb_vals->pool_dess[mc->pool_index]; /* All pools used by sb_mm's are initialized using dynamic * thresholds, therefore 'max_buff' isn't specified in cells. */ @@ -611,22 +747,55 @@ static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp) return 0; } -static void mlxsw_sp_pool_count(u16 *p_ingress_len, u16 *p_egress_len) +static void mlxsw_sp_pool_count(struct mlxsw_sp *mlxsw_sp, + u16 *p_ingress_len, u16 *p_egress_len) { int i; - for (i = 0; i < MLXSW_SP_SB_POOL_DESS_LEN; ++i) - if (mlxsw_sp_sb_pool_dess[i].dir == MLXSW_REG_SBXX_DIR_EGRESS) + for (i = 0; i < mlxsw_sp->sb_vals->pool_count; ++i) + if (mlxsw_sp->sb_vals->pool_dess[i].dir == + MLXSW_REG_SBXX_DIR_EGRESS) goto out; WARN(1, "No egress pools\n"); out: *p_ingress_len = i; - *p_egress_len = MLXSW_SP_SB_POOL_DESS_LEN - i; + *p_egress_len = mlxsw_sp->sb_vals->pool_count - i; } +const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals = { + .pool_count = ARRAY_SIZE(mlxsw_sp1_sb_pool_dess), + .pool_dess = mlxsw_sp1_sb_pool_dess, + .pms = mlxsw_sp1_sb_pms, + .prs = mlxsw_sp1_sb_prs, + .mms = mlxsw_sp_sb_mms, + .cms_ingress = mlxsw_sp1_sb_cms_ingress, + .cms_egress = mlxsw_sp1_sb_cms_egress, + .cms_cpu = mlxsw_sp_cpu_port_sb_cms, + .mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms), + .cms_ingress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_ingress), + .cms_egress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_egress), + .cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms), +}; + +const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals = { + .pool_count = ARRAY_SIZE(mlxsw_sp2_sb_pool_dess), + .pool_dess = mlxsw_sp2_sb_pool_dess, + .pms = mlxsw_sp2_sb_pms, + .prs = mlxsw_sp2_sb_prs, + .mms = mlxsw_sp_sb_mms, + .cms_ingress = mlxsw_sp2_sb_cms_ingress, + .cms_egress = mlxsw_sp2_sb_cms_egress, + .cms_cpu = mlxsw_sp_cpu_port_sb_cms, + .mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms), + .cms_ingress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_ingress), + .cms_egress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_egress), + .cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms), +}; + int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp) { + u32 max_headroom_size; u16 ing_pool_count; u16 eg_pool_count; int err; @@ -637,18 +806,26 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp) if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_BUFFER_SIZE)) return -EIO; + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_HEADROOM_SIZE)) + return -EIO; + mlxsw_sp->sb = kzalloc(sizeof(*mlxsw_sp->sb), GFP_KERNEL); if (!mlxsw_sp->sb) return -ENOMEM; mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE); mlxsw_sp->sb->sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE); + max_headroom_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, + MAX_HEADROOM_SIZE); + /* Round down, because this limit must not be overstepped. */ + mlxsw_sp->sb->max_headroom_cells = max_headroom_size / + mlxsw_sp->sb->cell_size; err = mlxsw_sp_sb_ports_init(mlxsw_sp); if (err) goto err_sb_ports_init; - err = mlxsw_sp_sb_prs_init(mlxsw_sp, mlxsw_sp_sb_prs, - MLXSW_SP_SB_PRS_LEN); + err = mlxsw_sp_sb_prs_init(mlxsw_sp, mlxsw_sp->sb_vals->prs, + mlxsw_sp->sb_vals->pool_count); if (err) goto err_sb_prs_init; err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp); @@ -657,7 +834,7 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp) err = mlxsw_sp_sb_mms_init(mlxsw_sp); if (err) goto err_sb_mms_init; - mlxsw_sp_pool_count(&ing_pool_count, &eg_pool_count); + mlxsw_sp_pool_count(mlxsw_sp, &ing_pool_count, &eg_pool_count); err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0, mlxsw_sp->sb->sb_size, ing_pool_count, @@ -705,14 +882,16 @@ int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core, unsigned int sb_index, u16 pool_index, struct devlink_sb_pool_info *pool_info) { - enum mlxsw_reg_sbxx_dir dir = mlxsw_sp_sb_pool_dess[pool_index].dir; struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + enum mlxsw_reg_sbxx_dir dir; struct mlxsw_sp_sb_pr *pr; + dir = mlxsw_sp->sb_vals->pool_dess[pool_index].dir; pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index); pool_info->pool_type = (enum devlink_sb_pool_type) dir; pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, pr->size); pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode; + pool_info->cell_size = mlxsw_sp->sb->cell_size; return 0; } @@ -833,7 +1012,7 @@ int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port, u32 max_buff; int err; - if (dir != mlxsw_sp_sb_pool_dess[pool_index].dir) + if (dir != mlxsw_sp->sb_vals->pool_dess[pool_index].dir) return -EINVAL; err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index, @@ -931,7 +1110,7 @@ next_batch: continue; mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1); mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1); - for (i = 0; i < MLXSW_SP_SB_POOL_DESS_LEN; i++) { + for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) { err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i, &bulk_list); if (err) @@ -990,7 +1169,7 @@ next_batch: continue; mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1); mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1); - for (i = 0; i < MLXSW_SP_SB_POOL_DESS_LEN; i++) { + for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) { err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i, &bulk_list); if (err) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c index 41e607a14846..49933818c6f5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c @@ -220,7 +220,7 @@ start_again: for (; i < rif_count; i++) { struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i); - if (!rif) + if (!rif || !mlxsw_sp_rif_dev(rif)) continue; err = mlxsw_sp_erif_entry_get(mlxsw_sp, &entry, rif, counters_enabled); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c index 9d9aa28684af..46baf3b44309 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c @@ -1188,8 +1188,7 @@ static int mlxsw_sp_fid_family_register(struct mlxsw_sp *mlxsw_sp, fid_family->mlxsw_sp = mlxsw_sp; INIT_LIST_HEAD(&fid_family->fids_list); - fid_family->fids_bitmap = kcalloc(BITS_TO_LONGS(nr_fids), - sizeof(unsigned long), GFP_KERNEL); + fid_family->fids_bitmap = bitmap_zalloc(nr_fids, GFP_KERNEL); if (!fid_family->fids_bitmap) { err = -ENOMEM; goto err_alloc_fids_bitmap; @@ -1206,7 +1205,7 @@ static int mlxsw_sp_fid_family_register(struct mlxsw_sp *mlxsw_sp, return 0; err_fid_flood_tables_init: - kfree(fid_family->fids_bitmap); + bitmap_free(fid_family->fids_bitmap); err_alloc_fids_bitmap: kfree(fid_family); return err; @@ -1217,7 +1216,7 @@ mlxsw_sp_fid_family_unregister(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid_family *fid_family) { mlxsw_sp->fid_core->fid_family_arr[fid_family->type] = NULL; - kfree(fid_family->fids_bitmap); + bitmap_free(fid_family->fids_bitmap); WARN_ON_ONCE(!list_empty(&fid_family->fids_list)); kfree(fid_family); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index ff072358d950..15f804453cd6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -17,13 +17,13 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_block *block, struct mlxsw_sp_acl_rule_info *rulei, - struct tcf_exts *exts, + struct flow_action *flow_action, struct netlink_ext_ack *extack) { - const struct tc_action *a; + const struct flow_action_entry *act; int err, i; - if (!tcf_exts_has_actions(exts)) + if (!flow_action_has_entries(flow_action)) return 0; /* Count action is inserted first */ @@ -31,27 +31,31 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, if (err) return err; - tcf_exts_for_each_action(i, a, exts) { - if (is_tcf_gact_ok(a)) { + flow_action_for_each(i, act, flow_action) { + switch (act->id) { + case FLOW_ACTION_ACCEPT: err = mlxsw_sp_acl_rulei_act_terminate(rulei); if (err) { NL_SET_ERR_MSG_MOD(extack, "Cannot append terminate action"); return err; } - } else if (is_tcf_gact_shot(a)) { + break; + case FLOW_ACTION_DROP: err = mlxsw_sp_acl_rulei_act_drop(rulei); if (err) { NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action"); return err; } - } else if (is_tcf_gact_trap(a)) { + break; + case FLOW_ACTION_TRAP: err = mlxsw_sp_acl_rulei_act_trap(rulei); if (err) { NL_SET_ERR_MSG_MOD(extack, "Cannot append trap action"); return err; } - } else if (is_tcf_gact_goto_chain(a)) { - u32 chain_index = tcf_gact_goto_chain_index(a); + break; + case FLOW_ACTION_GOTO: { + u32 chain_index = act->chain_index; struct mlxsw_sp_acl_ruleset *ruleset; u16 group_id; @@ -67,7 +71,9 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, NL_SET_ERR_MSG_MOD(extack, "Cannot append jump action"); return err; } - } else if (is_tcf_mirred_egress_redirect(a)) { + } + break; + case FLOW_ACTION_REDIRECT: { struct net_device *out_dev; struct mlxsw_sp_fid *fid; u16 fid_index; @@ -79,29 +85,33 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, if (err) return err; - out_dev = tcf_mirred_dev(a); + out_dev = act->dev; err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei, out_dev, extack); if (err) return err; - } else if (is_tcf_mirred_egress_mirror(a)) { - struct net_device *out_dev = tcf_mirred_dev(a); + } + break; + case FLOW_ACTION_MIRRED: { + struct net_device *out_dev = act->dev; err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei, block, out_dev, extack); if (err) return err; - } else if (is_tcf_vlan(a)) { - u16 proto = be16_to_cpu(tcf_vlan_push_proto(a)); - u32 action = tcf_vlan_action(a); - u8 prio = tcf_vlan_push_prio(a); - u16 vid = tcf_vlan_push_vid(a); + } + break; + case FLOW_ACTION_VLAN_MANGLE: { + u16 proto = be16_to_cpu(act->vlan.proto); + u8 prio = act->vlan.prio; + u16 vid = act->vlan.vid; return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei, - action, vid, + act->id, vid, proto, prio, extack); - } else { + } + default: NL_SET_ERR_MSG_MOD(extack, "Unsupported action"); dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n"); return -EOPNOTSUPP; @@ -113,59 +123,49 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei, struct tc_cls_flower_offload *f) { - struct flow_dissector_key_ipv4_addrs *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS, - f->key); - struct flow_dissector_key_ipv4_addrs *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS, - f->mask); + struct flow_match_ipv4_addrs match; + + flow_rule_match_ipv4_addrs(f->rule, &match); mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31, - (char *) &key->src, - (char *) &mask->src, 4); + (char *) &match.key->src, + (char *) &match.mask->src, 4); mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31, - (char *) &key->dst, - (char *) &mask->dst, 4); + (char *) &match.key->dst, + (char *) &match.mask->dst, 4); } static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei, struct tc_cls_flower_offload *f) { - struct flow_dissector_key_ipv6_addrs *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV6_ADDRS, - f->key); - struct flow_dissector_key_ipv6_addrs *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV6_ADDRS, - f->mask); + struct flow_match_ipv6_addrs match; + + flow_rule_match_ipv6_addrs(f->rule, &match); mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127, - &key->src.s6_addr[0x0], - &mask->src.s6_addr[0x0], 4); + &match.key->src.s6_addr[0x0], + &match.mask->src.s6_addr[0x0], 4); mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95, - &key->src.s6_addr[0x4], - &mask->src.s6_addr[0x4], 4); + &match.key->src.s6_addr[0x4], + &match.mask->src.s6_addr[0x4], 4); mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63, - &key->src.s6_addr[0x8], - &mask->src.s6_addr[0x8], 4); + &match.key->src.s6_addr[0x8], + &match.mask->src.s6_addr[0x8], 4); mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31, - &key->src.s6_addr[0xC], - &mask->src.s6_addr[0xC], 4); + &match.key->src.s6_addr[0xC], + &match.mask->src.s6_addr[0xC], 4); mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127, - &key->dst.s6_addr[0x0], - &mask->dst.s6_addr[0x0], 4); + &match.key->dst.s6_addr[0x0], + &match.mask->dst.s6_addr[0x0], 4); mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95, - &key->dst.s6_addr[0x4], - &mask->dst.s6_addr[0x4], 4); + &match.key->dst.s6_addr[0x4], + &match.mask->dst.s6_addr[0x4], 4); mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63, - &key->dst.s6_addr[0x8], - &mask->dst.s6_addr[0x8], 4); + &match.key->dst.s6_addr[0x8], + &match.mask->dst.s6_addr[0x8], 4); mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31, - &key->dst.s6_addr[0xC], - &mask->dst.s6_addr[0xC], 4); + &match.key->dst.s6_addr[0xC], + &match.mask->dst.s6_addr[0xC], 4); } static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp, @@ -173,9 +173,10 @@ static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp, struct tc_cls_flower_offload *f, u8 ip_proto) { - struct flow_dissector_key_ports *key, *mask; + const struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f); + struct flow_match_ports match; - if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) return 0; if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) { @@ -184,16 +185,13 @@ static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp, return -EINVAL; } - key = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_PORTS, - f->key); - mask = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_PORTS, - f->mask); + flow_rule_match_ports(rule, &match); mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT, - ntohs(key->dst), ntohs(mask->dst)); + ntohs(match.key->dst), + ntohs(match.mask->dst)); mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT, - ntohs(key->src), ntohs(mask->src)); + ntohs(match.key->src), + ntohs(match.mask->src)); return 0; } @@ -202,9 +200,10 @@ static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp, struct tc_cls_flower_offload *f, u8 ip_proto) { - struct flow_dissector_key_tcp *key, *mask; + const struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f); + struct flow_match_tcp match; - if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP)) + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) return 0; if (ip_proto != IPPROTO_TCP) { @@ -213,14 +212,11 @@ static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp, return -EINVAL; } - key = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_TCP, - f->key); - mask = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_TCP, - f->mask); + flow_rule_match_tcp(rule, &match); + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS, - ntohs(key->flags), ntohs(mask->flags)); + ntohs(match.key->flags), + ntohs(match.mask->flags)); return 0; } @@ -229,9 +225,10 @@ static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp, struct tc_cls_flower_offload *f, u16 n_proto) { - struct flow_dissector_key_ip *key, *mask; + const struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f); + struct flow_match_ip match; - if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) return 0; if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) { @@ -240,20 +237,18 @@ static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp, return -EINVAL; } - key = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IP, - f->key); - mask = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IP, - f->mask); + flow_rule_match_ip(rule, &match); + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_, - key->ttl, mask->ttl); + match.key->ttl, match.mask->ttl); mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN, - key->tos & 0x3, mask->tos & 0x3); + match.key->tos & 0x3, + match.mask->tos & 0x3); mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP, - key->tos >> 6, mask->tos >> 6); + match.key->tos >> 6, + match.mask->tos >> 6); return 0; } @@ -263,13 +258,15 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei, struct tc_cls_flower_offload *f) { + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f); + struct flow_dissector *dissector = rule->match.dissector; u16 n_proto_mask = 0; u16 n_proto_key = 0; u16 addr_type = 0; u8 ip_proto = 0; int err; - if (f->dissector->used_keys & + if (dissector->used_keys & ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | BIT(FLOW_DISSECTOR_KEY_BASIC) | BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | @@ -286,25 +283,19 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_rulei_priority(rulei, f->common.prio); - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { - struct flow_dissector_key_control *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_CONTROL, - f->key); - addr_type = key->addr_type; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_match_control match; + + flow_rule_match_control(rule, &match); + addr_type = match.key->addr_type; } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { - struct flow_dissector_key_basic *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, - f->key); - struct flow_dissector_key_basic *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, - f->mask); - n_proto_key = ntohs(key->n_proto); - n_proto_mask = ntohs(mask->n_proto); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + + flow_rule_match_basic(rule, &match); + n_proto_key = ntohs(match.key->n_proto); + n_proto_mask = ntohs(match.mask->n_proto); if (n_proto_key == ETH_P_ALL) { n_proto_key = 0; @@ -314,60 +305,53 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, MLXSW_AFK_ELEMENT_ETHERTYPE, n_proto_key, n_proto_mask); - ip_proto = key->ip_proto; + ip_proto = match.key->ip_proto; mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_PROTO, - key->ip_proto, mask->ip_proto); + match.key->ip_proto, + match.mask->ip_proto); } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { - struct flow_dissector_key_eth_addrs *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ETH_ADDRS, - f->key); - struct flow_dissector_key_eth_addrs *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ETH_ADDRS, - f->mask); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + flow_rule_match_eth_addrs(rule, &match); mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DMAC_32_47, - key->dst, mask->dst, 2); + match.key->dst, + match.mask->dst, 2); mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DMAC_0_31, - key->dst + 2, mask->dst + 2, 4); + match.key->dst + 2, + match.mask->dst + 2, 4); mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SMAC_32_47, - key->src, mask->src, 2); + match.key->src, + match.mask->src, 2); mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SMAC_0_31, - key->src + 2, mask->src + 2, 4); + match.key->src + 2, + match.mask->src + 2, 4); } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) { - struct flow_dissector_key_vlan *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_VLAN, - f->key); - struct flow_dissector_key_vlan *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_VLAN, - f->mask); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + flow_rule_match_vlan(rule, &match); if (mlxsw_sp_acl_block_is_egress_bound(block)) { NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress"); return -EOPNOTSUPP; } - if (mask->vlan_id != 0) + if (match.mask->vlan_id != 0) mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_VID, - key->vlan_id, - mask->vlan_id); - if (mask->vlan_priority != 0) + match.key->vlan_id, + match.mask->vlan_id); + if (match.mask->vlan_priority != 0) mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_PCP, - key->vlan_priority, - mask->vlan_priority); + match.key->vlan_priority, + match.mask->vlan_priority); } if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) @@ -387,7 +371,8 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, if (err) return err; - return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, f->exts, + return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, + &f->rule->action, f->common.extack); } @@ -486,7 +471,7 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp, if (err) goto err_rule_get_stats; - tcf_exts_stats_update(f->exts, bytes, packets, lastuse); + flow_stats_update(&f->stats, bytes, packets, lastuse); mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); return 0; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c index 00db26c96bf5..6400cd644b7a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c @@ -145,6 +145,7 @@ mlxsw_sp_ipip_fib_entry_op_gre4_rtdp(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_ipip_entry *ipip_entry) { u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb); + u16 ul_rif_id = mlxsw_sp_ipip_lb_ul_rif_id(ipip_entry->ol_lb); char rtdp_pl[MLXSW_REG_RTDP_LEN]; struct ip_tunnel_parm parms; unsigned int type_check; @@ -157,6 +158,7 @@ mlxsw_sp_ipip_fib_entry_op_gre4_rtdp(struct mlxsw_sp *mlxsw_sp, ikey = mlxsw_sp_ipip_parms4_ikey(parms); mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_IPIP, tunnel_index); + mlxsw_reg_rtdp_egress_router_interface_set(rtdp_pl, ul_rif_id); type_check = has_ikey ? MLXSW_REG_RTDP_IPIP_TYPE_CHECK_ALLOW_GRE_KEY : diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c index fb1c48c698f2..1df164a4b06d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c @@ -267,8 +267,8 @@ mlxsw_sp_nve_mc_record_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nve_mc_record *mc_record; int err; - mc_record = kzalloc(sizeof(*mc_record) + num_max_entries * - sizeof(struct mlxsw_sp_nve_mc_entry), GFP_KERNEL); + mc_record = kzalloc(struct_size(mc_record, entries, num_max_entries), + GFP_KERNEL); if (!mc_record) return ERR_PTR(-ENOMEM); @@ -841,11 +841,9 @@ int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid, nve->config = config; - err = ops->fdb_replay(params->dev, params->vni); - if (err) { - NL_SET_ERR_MSG_MOD(extack, "Failed to offload the FDB"); + err = ops->fdb_replay(params->dev, params->vni, extack); + if (err) goto err_fdb_replay; - } return 0; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h index 02937ea95bc3..0035640156a1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h @@ -28,6 +28,7 @@ struct mlxsw_sp_nve { unsigned int num_nve_tunnels; /* Protected by RTNL */ unsigned int num_max_mc_entries[MLXSW_SP_L3_PROTO_MAX]; u32 tunnel_index; + u16 ul_rif_index; /* Reserved for Spectrum */ }; struct mlxsw_sp_nve_ops { @@ -41,7 +42,8 @@ struct mlxsw_sp_nve_ops { int (*init)(struct mlxsw_sp_nve *nve, const struct mlxsw_sp_nve_config *config); void (*fini)(struct mlxsw_sp_nve *nve); - int (*fdb_replay)(const struct net_device *nve_dev, __be32 vni); + int (*fdb_replay)(const struct net_device *nve_dev, __be32 vni, + struct netlink_ext_ack *extack); void (*fdb_clear_offload)(const struct net_device *nve_dev, __be32 vni); }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c index 74e564c4ac19..93ccd9fc2266 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c @@ -7,6 +7,7 @@ #include #include "reg.h" +#include "spectrum.h" #include "spectrum_nve.h" /* Eth (18B) | IPv6 (40B) | UDP (8B) | VxLAN (8B) | Eth (14B) | IPv6 (40B) @@ -20,9 +21,9 @@ #define MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS (VXLAN_F_UDP_ZERO_CSUM_TX | \ VXLAN_F_LEARN) -static bool mlxsw_sp1_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve, - const struct net_device *dev, - struct netlink_ext_ack *extack) +static bool mlxsw_sp_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve, + const struct net_device *dev, + struct netlink_ext_ack *extack) { struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_config *cfg = &vxlan->cfg; @@ -112,13 +113,30 @@ static int mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp, return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); } +static void +mlxsw_sp_nve_vxlan_config_prepare(char *tngcr_pl, + const struct mlxsw_sp_nve_config *config) +{ + u8 udp_sport; + + mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, true, + config->ttl); + /* VxLAN driver's default UDP source port range is 32768 (0x8000) + * to 60999 (0xee47). Set the upper 8 bits of the UDP source port + * to a random number between 0x80 and 0xee + */ + get_random_bytes(&udp_sport, sizeof(udp_sport)); + udp_sport = (udp_sport % (0xee - 0x80 + 1)) + 0x80; + mlxsw_reg_tngcr_nve_udp_sport_prefix_set(tngcr_pl, udp_sport); + mlxsw_reg_tngcr_usipv4_set(tngcr_pl, be32_to_cpu(config->ul_sip.addr4)); +} + static int mlxsw_sp1_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_nve_config *config) { char tngcr_pl[MLXSW_REG_TNGCR_LEN]; u16 ul_vr_id; - u8 udp_sport; int err; err = mlxsw_sp_router_tb_id_vr_id(mlxsw_sp, config->ul_tb_id, @@ -126,18 +144,9 @@ mlxsw_sp1_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp, if (err) return err; - mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, true, - config->ttl); - /* VxLAN driver's default UDP source port range is 32768 (0x8000) - * to 60999 (0xee47). Set the upper 8 bits of the UDP source port - * to a random number between 0x80 and 0xee - */ - get_random_bytes(&udp_sport, sizeof(udp_sport)); - udp_sport = (udp_sport % (0xee - 0x80 + 1)) + 0x80; - mlxsw_reg_tngcr_nve_udp_sport_prefix_set(tngcr_pl, udp_sport); + mlxsw_sp_nve_vxlan_config_prepare(tngcr_pl, config); mlxsw_reg_tngcr_learn_enable_set(tngcr_pl, config->learning_en); mlxsw_reg_tngcr_underlay_virtual_router_set(tngcr_pl, ul_vr_id); - mlxsw_reg_tngcr_usipv4_set(tngcr_pl, be32_to_cpu(config->ul_sip.addr4)); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl); } @@ -212,11 +221,13 @@ static void mlxsw_sp1_nve_vxlan_fini(struct mlxsw_sp_nve *nve) } static int -mlxsw_sp_nve_vxlan_fdb_replay(const struct net_device *nve_dev, __be32 vni) +mlxsw_sp_nve_vxlan_fdb_replay(const struct net_device *nve_dev, __be32 vni, + struct netlink_ext_ack *extack) { if (WARN_ON(!netif_is_vxlan(nve_dev))) return -EINVAL; - return vxlan_fdb_replay(nve_dev, vni, &mlxsw_sp_switchdev_notifier); + return vxlan_fdb_replay(nve_dev, vni, &mlxsw_sp_switchdev_notifier, + extack); } static void @@ -229,7 +240,7 @@ mlxsw_sp_nve_vxlan_clear_offload(const struct net_device *nve_dev, __be32 vni) const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops = { .type = MLXSW_SP_NVE_TYPE_VXLAN, - .can_offload = mlxsw_sp1_nve_vxlan_can_offload, + .can_offload = mlxsw_sp_nve_vxlan_can_offload, .nve_config = mlxsw_sp_nve_vxlan_config, .init = mlxsw_sp1_nve_vxlan_init, .fini = mlxsw_sp1_nve_vxlan_fini, @@ -237,26 +248,126 @@ const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops = { .fdb_clear_offload = mlxsw_sp_nve_vxlan_clear_offload, }; -static bool mlxsw_sp2_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve, - const struct net_device *dev, - struct netlink_ext_ack *extack) +static bool mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp, + bool learning_en) { - return false; + char tnpc_pl[MLXSW_REG_TNPC_LEN]; + + mlxsw_reg_tnpc_pack(tnpc_pl, MLXSW_REG_TNPC_TUNNEL_PORT_NVE, + learning_en); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tnpc), tnpc_pl); +} + +static int +mlxsw_sp2_nve_vxlan_config_set(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_nve_config *config) +{ + char tngcr_pl[MLXSW_REG_TNGCR_LEN]; + u16 ul_rif_index; + int err; + + err = mlxsw_sp_router_ul_rif_get(mlxsw_sp, config->ul_tb_id, + &ul_rif_index); + if (err) + return err; + mlxsw_sp->nve->ul_rif_index = ul_rif_index; + + err = mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, config->learning_en); + if (err) + goto err_vxlan_learning_set; + + mlxsw_sp_nve_vxlan_config_prepare(tngcr_pl, config); + mlxsw_reg_tngcr_underlay_rif_set(tngcr_pl, ul_rif_index); + + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl); + if (err) + goto err_tngcr_write; + + return 0; + +err_tngcr_write: + mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, false); +err_vxlan_learning_set: + mlxsw_sp_router_ul_rif_put(mlxsw_sp, ul_rif_index); + return err; +} + +static void mlxsw_sp2_nve_vxlan_config_clear(struct mlxsw_sp *mlxsw_sp) +{ + char tngcr_pl[MLXSW_REG_TNGCR_LEN]; + + mlxsw_reg_tngcr_pack(tngcr_pl, MLXSW_REG_TNGCR_TYPE_VXLAN, false, 0); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tngcr), tngcr_pl); + mlxsw_sp2_nve_vxlan_learning_set(mlxsw_sp, false); + mlxsw_sp_router_ul_rif_put(mlxsw_sp, mlxsw_sp->nve->ul_rif_index); +} + +static int mlxsw_sp2_nve_vxlan_rtdp_set(struct mlxsw_sp *mlxsw_sp, + unsigned int tunnel_index, + u16 ul_rif_index) +{ + char rtdp_pl[MLXSW_REG_RTDP_LEN]; + + mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_NVE, tunnel_index); + mlxsw_reg_rtdp_egress_router_interface_set(rtdp_pl, ul_rif_index); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtdp), rtdp_pl); } static int mlxsw_sp2_nve_vxlan_init(struct mlxsw_sp_nve *nve, const struct mlxsw_sp_nve_config *config) { - return -EOPNOTSUPP; + struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp; + int err; + + err = mlxsw_sp_nve_parsing_set(mlxsw_sp, + MLXSW_SP_NVE_VXLAN_PARSING_DEPTH, + config->udp_dport); + if (err) + return err; + + err = mlxsw_sp2_nve_vxlan_config_set(mlxsw_sp, config); + if (err) + goto err_config_set; + + err = mlxsw_sp2_nve_vxlan_rtdp_set(mlxsw_sp, nve->tunnel_index, + nve->ul_rif_index); + if (err) + goto err_rtdp_set; + + err = mlxsw_sp_router_nve_promote_decap(mlxsw_sp, config->ul_tb_id, + config->ul_proto, + &config->ul_sip, + nve->tunnel_index); + if (err) + goto err_promote_decap; + + return 0; + +err_promote_decap: +err_rtdp_set: + mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp); +err_config_set: + mlxsw_sp_nve_parsing_set(mlxsw_sp, MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH, + config->udp_dport); + return err; } static void mlxsw_sp2_nve_vxlan_fini(struct mlxsw_sp_nve *nve) { + struct mlxsw_sp_nve_config *config = &nve->config; + struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp; + + mlxsw_sp_router_nve_demote_decap(mlxsw_sp, config->ul_tb_id, + config->ul_proto, &config->ul_sip); + mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp); + mlxsw_sp_nve_parsing_set(mlxsw_sp, MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH, + config->udp_dport); } const struct mlxsw_sp_nve_ops mlxsw_sp2_nve_vxlan_ops = { .type = MLXSW_SP_NVE_TYPE_VXLAN, - .can_offload = mlxsw_sp2_nve_vxlan_can_offload, + .can_offload = mlxsw_sp_nve_vxlan_can_offload, .nve_config = mlxsw_sp_nve_vxlan_config, .init = mlxsw_sp2_nve_vxlan_init, .fini = mlxsw_sp2_nve_vxlan_fini, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 98e5ffd71b91..52fed8c7bf1e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -80,7 +80,7 @@ struct mlxsw_sp_router { struct mlxsw_sp_rif { struct list_head nexthop_list; struct list_head neigh_list; - struct net_device *dev; + struct net_device *dev; /* NULL for underlay RIF */ struct mlxsw_sp_fid *fid; unsigned char addr[ETH_ALEN]; int mtu; @@ -120,6 +120,7 @@ struct mlxsw_sp_rif_ipip_lb { struct mlxsw_sp_rif common; struct mlxsw_sp_rif_ipip_lb_config lb_config; u16 ul_vr_id; /* Reserved for Spectrum-2. */ + u16 ul_rif_id; /* Reserved for Spectrum. */ }; struct mlxsw_sp_rif_params_ipip_lb { @@ -363,6 +364,7 @@ enum mlxsw_sp_fib_entry_type { MLXSW_SP_FIB_ENTRY_TYPE_REMOTE, MLXSW_SP_FIB_ENTRY_TYPE_LOCAL, MLXSW_SP_FIB_ENTRY_TYPE_TRAP, + MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE, /* This is a special case of local delivery, where a packet should be * decapsulated on reception. Note that there is no corresponding ENCAP, @@ -440,6 +442,8 @@ struct mlxsw_sp_vr { struct mlxsw_sp_fib *fib4; struct mlxsw_sp_fib *fib6; struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX]; + struct mlxsw_sp_rif *ul_rif; + refcount_t ul_rif_refcnt; }; static const struct rhashtable_params mlxsw_sp_fib_ht_params; @@ -1437,8 +1441,8 @@ mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp, } static int -mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, - struct mlxsw_sp_vr *ul_vr, bool enable) +mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id, + u16 ul_rif_id, bool enable) { struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config; struct mlxsw_sp_rif *rif = &lb_rif->common; @@ -1453,7 +1457,7 @@ mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, rif->rif_index, rif->vr_id, rif->dev->mtu); mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt, MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET, - ul_vr->id, saddr4, lb_cf.okey); + ul_vr_id, ul_rif_id, saddr4, lb_cf.okey); break; case MLXSW_SP_L3_PROTO_IPV6: @@ -1468,14 +1472,13 @@ static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_ipip_entry *ipip_entry; struct mlxsw_sp_rif_ipip_lb *lb_rif; - struct mlxsw_sp_vr *ul_vr; int err = 0; ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev); if (ipip_entry) { lb_rif = ipip_entry->ol_lb; - ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id]; - err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, true); + err = mlxsw_sp_rif_ipip_lb_op(lb_rif, lb_rif->ul_vr_id, + lb_rif->ul_rif_id, true); if (err) goto out; lb_rif->common.mtu = ol_dev->mtu; @@ -3811,13 +3814,11 @@ mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) struct mlxsw_sp_nexthop_group *nh_grp; struct mlxsw_sp_nexthop *nh; struct fib_nh *fib_nh; - size_t alloc_size; int i; int err; - alloc_size = sizeof(*nh_grp) + - fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop); - nh_grp = kzalloc(alloc_size, GFP_KERNEL); + nh_grp = kzalloc(struct_size(nh_grp, nexthops, fi->fib_nhs), + GFP_KERNEL); if (!nh_grp) return ERR_PTR(-ENOMEM); nh_grp->priv = fi; @@ -3926,6 +3927,7 @@ mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry) return !!nh_group->adj_index_valid; case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL: return !!nh_group->nh_rif; + case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE: case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP: case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP: return true; @@ -3961,6 +3963,7 @@ mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) int i; if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL || + fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE || fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP || fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP) { nh_grp->nexthops->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD; @@ -4002,7 +4005,8 @@ mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry, common); - if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) { + if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL || + fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE) { list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6, list)->rt->fib6_nh.nh_flags |= RTNH_F_OFFLOAD; return; @@ -4170,6 +4174,19 @@ static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp, return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); } +static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + enum mlxsw_reg_ralue_op op) +{ + enum mlxsw_reg_ralue_trap_action trap_action; + char ralue_pl[MLXSW_REG_RALUE_LEN]; + + trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR; + mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op); + mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, 0, 0); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); +} + static int mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry, @@ -4209,6 +4226,8 @@ static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp, return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op); case MLXSW_SP_FIB_ENTRY_TYPE_TRAP: return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op); + case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE: + return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, fib_entry, op); case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP: return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp, fib_entry, op); @@ -4277,8 +4296,10 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp, case RTN_BROADCAST: fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; return 0; + case RTN_BLACKHOLE: + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE; + return 0; case RTN_UNREACHABLE: /* fall through */ - case RTN_BLACKHOLE: /* fall through */ case RTN_PROHIBIT: /* Packets hitting these routes need to be trapped, but * can do so with a lower priority than packets directed @@ -5043,13 +5064,11 @@ mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop_group *nh_grp; struct mlxsw_sp_rt6 *mlxsw_sp_rt6; struct mlxsw_sp_nexthop *nh; - size_t alloc_size; int i = 0; int err; - alloc_size = sizeof(*nh_grp) + - fib6_entry->nrt6 * sizeof(struct mlxsw_sp_nexthop); - nh_grp = kzalloc(alloc_size, GFP_KERNEL); + nh_grp = kzalloc(struct_size(nh_grp, nexthops, fib6_entry->nrt6), + GFP_KERNEL); if (!nh_grp) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&nh_grp->fib_list); @@ -5227,6 +5246,8 @@ static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp, */ if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; + else if (rt->fib6_type == RTN_BLACKHOLE) + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE; else if (rt->fib6_flags & RTF_REJECT) fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt)) @@ -6121,7 +6142,7 @@ static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif) mlxsw_reg_ritr_rif_pack(ritr_pl, rif); err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); - if (WARN_ON_ONCE(err)) + if (err) return err; mlxsw_reg_ritr_enable_set(ritr_pl, false); @@ -6224,10 +6245,12 @@ static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index, INIT_LIST_HEAD(&rif->nexthop_list); INIT_LIST_HEAD(&rif->neigh_list); - ether_addr_copy(rif->addr, l3_dev->dev_addr); - rif->mtu = l3_dev->mtu; + if (l3_dev) { + ether_addr_copy(rif->addr, l3_dev->dev_addr); + rif->mtu = l3_dev->mtu; + rif->dev = l3_dev; + } rif->vr_id = vr_id; - rif->dev = l3_dev; rif->rif_index = rif_index; return rif; @@ -6251,7 +6274,19 @@ u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif) u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif) { - return lb_rif->ul_vr_id; + u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(lb_rif->common.dev); + struct mlxsw_sp_vr *ul_vr; + + ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL); + if (WARN_ON(IS_ERR(ul_vr))) + return 0; + + return ul_vr->id; +} + +u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif) +{ + return lb_rif->ul_rif_id; } int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif) @@ -6284,7 +6319,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, int i, err; type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev); - ops = mlxsw_sp->router->rif_ops_arr[type]; + ops = mlxsw_sp->rif_ops_arr[type]; vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack); if (IS_ERR(vr)) @@ -6303,6 +6338,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, goto err_rif_alloc; } dev_hold(rif->dev); + mlxsw_sp->router->rifs[rif_index] = rif; rif->mlxsw_sp = mlxsw_sp; rif->ops = ops; @@ -6329,7 +6365,6 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, } mlxsw_sp_rif_counters_alloc(rif); - mlxsw_sp->router->rifs[rif_index] = rif; return rif; @@ -6341,6 +6376,7 @@ err_configure: if (fid) mlxsw_sp_fid_put(fid); err_fid_get: + mlxsw_sp->router->rifs[rif_index] = NULL; dev_put(rif->dev); kfree(rif); err_rif_alloc: @@ -6361,7 +6397,6 @@ static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif) mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif); vr = &mlxsw_sp->router->vrs[rif->vr_id]; - mlxsw_sp->router->rifs[rif->rif_index] = NULL; mlxsw_sp_rif_counters_free(rif); for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) mlxsw_sp_mr_rif_del(vr->mr_table[i], rif); @@ -6369,6 +6404,7 @@ static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif) if (fid) /* Loopback RIFs are not associated with a FID. */ mlxsw_sp_fid_put(fid); + mlxsw_sp->router->rifs[rif->rif_index] = NULL; dev_put(rif->dev); kfree(rif); vr->rif_count--; @@ -6750,7 +6786,7 @@ static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp, for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) { rif = mlxsw_sp->router->rifs[i]; - if (rif && rif->dev != dev && + if (rif && rif->dev && rif->dev != dev && !ether_addr_equal_masked(rif->dev->dev_addr, dev_addr, mlxsw_sp->mac_mask)) { NL_SET_ERR_MSG_MOD(extack, "All router interface MAC addresses must have the same prefix"); @@ -7294,7 +7330,8 @@ static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac) info.addr = mac; info.vid = vid; - call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info); + call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info, + NULL); } static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = { @@ -7381,7 +7418,8 @@ static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac) info.addr = mac; info.vid = 0; - call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info); + call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info, + NULL); } static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = { @@ -7422,7 +7460,7 @@ mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif, } static int -mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif) +mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif) { struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif); u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev); @@ -7434,11 +7472,12 @@ mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif) if (IS_ERR(ul_vr)) return PTR_ERR(ul_vr); - err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, true); + err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, true); if (err) goto err_loopback_op; lb_rif->ul_vr_id = ul_vr->id; + lb_rif->ul_rif_id = 0; ++ul_vr->rif_count; return 0; @@ -7447,32 +7486,213 @@ err_loopback_op: return err; } -static void mlxsw_sp_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif) +static void mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif) { struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif); struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; struct mlxsw_sp_vr *ul_vr; ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id]; - mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr, false); + mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, false); --ul_vr->rif_count; mlxsw_sp_vr_put(mlxsw_sp, ul_vr); } -static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_ipip_lb_ops = { +static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = { + .type = MLXSW_SP_RIF_TYPE_IPIP_LB, + .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb), + .setup = mlxsw_sp_rif_ipip_lb_setup, + .configure = mlxsw_sp1_rif_ipip_lb_configure, + .deconfigure = mlxsw_sp1_rif_ipip_lb_deconfigure, +}; + +const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = { + [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops, + [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops, + [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops, + [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp1_rif_ipip_lb_ops, +}; + +static int +mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif *ul_rif, bool enable) +{ + struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp; + char ritr_pl[MLXSW_REG_RITR_LEN]; + + mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF, + ul_rif->rif_index, ul_rif->vr_id, IP_MAX_MTU); + mlxsw_reg_ritr_loopback_protocol_set(ritr_pl, + MLXSW_REG_RITR_LOOPBACK_GENERIC); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); +} + +static struct mlxsw_sp_rif * +mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_rif *ul_rif; + u16 rif_index; + int err; + + err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces"); + return ERR_PTR(err); + } + + ul_rif = mlxsw_sp_rif_alloc(sizeof(*ul_rif), rif_index, vr->id, NULL); + if (!ul_rif) + return ERR_PTR(-ENOMEM); + + mlxsw_sp->router->rifs[rif_index] = ul_rif; + ul_rif->mlxsw_sp = mlxsw_sp; + err = mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, true); + if (err) + goto ul_rif_op_err; + + return ul_rif; + +ul_rif_op_err: + mlxsw_sp->router->rifs[rif_index] = NULL; + kfree(ul_rif); + return ERR_PTR(err); +} + +static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif) +{ + struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp; + + mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false); + mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL; + kfree(ul_rif); +} + +static struct mlxsw_sp_rif * +mlxsw_sp_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_vr *vr; + int err; + + vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, extack); + if (IS_ERR(vr)) + return ERR_CAST(vr); + + if (refcount_inc_not_zero(&vr->ul_rif_refcnt)) + return vr->ul_rif; + + vr->ul_rif = mlxsw_sp_ul_rif_create(mlxsw_sp, vr, extack); + if (IS_ERR(vr->ul_rif)) { + err = PTR_ERR(vr->ul_rif); + goto err_ul_rif_create; + } + + vr->rif_count++; + refcount_set(&vr->ul_rif_refcnt, 1); + + return vr->ul_rif; + +err_ul_rif_create: + mlxsw_sp_vr_put(mlxsw_sp, vr); + return ERR_PTR(err); +} + +static void mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif *ul_rif) +{ + struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp; + struct mlxsw_sp_vr *vr; + + vr = &mlxsw_sp->router->vrs[ul_rif->vr_id]; + + if (!refcount_dec_and_test(&vr->ul_rif_refcnt)) + return; + + vr->rif_count--; + mlxsw_sp_ul_rif_destroy(ul_rif); + mlxsw_sp_vr_put(mlxsw_sp, vr); +} + +int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id, + u16 *ul_rif_index) +{ + struct mlxsw_sp_rif *ul_rif; + + ASSERT_RTNL(); + + ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL); + if (IS_ERR(ul_rif)) + return PTR_ERR(ul_rif); + *ul_rif_index = ul_rif->rif_index; + + return 0; +} + +void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index) +{ + struct mlxsw_sp_rif *ul_rif; + + ASSERT_RTNL(); + + ul_rif = mlxsw_sp->router->rifs[ul_rif_index]; + if (WARN_ON(!ul_rif)) + return; + + mlxsw_sp_ul_rif_put(ul_rif); +} + +static int +mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif) +{ + struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif); + u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev); + struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; + struct mlxsw_sp_rif *ul_rif; + int err; + + ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL); + if (IS_ERR(ul_rif)) + return PTR_ERR(ul_rif); + + err = mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, ul_rif->rif_index, true); + if (err) + goto err_loopback_op; + + lb_rif->ul_vr_id = 0; + lb_rif->ul_rif_id = ul_rif->rif_index; + + return 0; + +err_loopback_op: + mlxsw_sp_ul_rif_put(ul_rif); + return err; +} + +static void mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif) +{ + struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif); + struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; + struct mlxsw_sp_rif *ul_rif; + + ul_rif = mlxsw_sp_rif_by_index(mlxsw_sp, lb_rif->ul_rif_id); + mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, lb_rif->ul_rif_id, false); + mlxsw_sp_ul_rif_put(ul_rif); +} + +static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = { .type = MLXSW_SP_RIF_TYPE_IPIP_LB, .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb), .setup = mlxsw_sp_rif_ipip_lb_setup, - .configure = mlxsw_sp_rif_ipip_lb_configure, - .deconfigure = mlxsw_sp_rif_ipip_lb_deconfigure, + .configure = mlxsw_sp2_rif_ipip_lb_configure, + .deconfigure = mlxsw_sp2_rif_ipip_lb_deconfigure, }; -static const struct mlxsw_sp_rif_ops *mlxsw_sp_rif_ops_arr[] = { +const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = { [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops, [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops, [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops, - [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp_rif_ipip_lb_ops, + [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp2_rif_ipip_lb_ops, }; static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp) @@ -7485,8 +7705,6 @@ static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp) if (!mlxsw_sp->router->rifs) return -ENOMEM; - mlxsw_sp->router->rif_ops_arr = mlxsw_sp_rif_ops_arr; - return 0; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h index 3dbafdeaab2b..cc1de91e8217 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -29,6 +29,7 @@ struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp, u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif); u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *rif); u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *rif); +u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif); u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev); int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif); const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index ad5a9b9e1466..536c23c578c3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -305,7 +305,7 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev, parms = mlxsw_sp_ipip_netdev_parms4(to_dev); ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp, - 0, 0, parms.link, tun->fwmark); + 0, 0, parms.link, tun->fwmark, 0); rt = ip_route_output_key(tun->net, &fl4); if (IS_ERR(rt)) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index c772109b638d..f6ce386c3036 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -431,46 +431,6 @@ static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan) mlxsw_sp_bridge_vlan_destroy(bridge_vlan); } -static void mlxsw_sp_port_bridge_flags_get(struct mlxsw_sp_bridge *bridge, - struct net_device *dev, - unsigned long *brport_flags) -{ - struct mlxsw_sp_bridge_port *bridge_port; - - bridge_port = mlxsw_sp_bridge_port_find(bridge, dev); - if (WARN_ON(!bridge_port)) - return; - - memcpy(brport_flags, &bridge_port->flags, sizeof(*brport_flags)); -} - -static int mlxsw_sp_port_attr_get(struct net_device *dev, - struct switchdev_attr *attr) -{ - struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - - switch (attr->id) { - case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: - attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac); - memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac, - attr->u.ppid.id_len); - break; - case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: - mlxsw_sp_port_bridge_flags_get(mlxsw_sp->bridge, attr->orig_dev, - &attr->u.brport_flags); - break; - case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT: - attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD | - BR_MCAST_FLOOD; - break; - default: - return -EOPNOTSUPP; - } - - return 0; -} - static int mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_bridge_vlan *bridge_vlan, @@ -620,6 +580,17 @@ err_port_bridge_vlan_learning_set: return err; } +static int mlxsw_sp_port_attr_br_pre_flags_set(struct mlxsw_sp_port + *mlxsw_sp_port, + struct switchdev_trans *trans, + unsigned long brport_flags) +{ + if (brport_flags & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD)) + return -EINVAL; + + return 0; +} + static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, struct switchdev_trans *trans, struct net_device *orig_dev, @@ -866,6 +837,11 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev, attr->orig_dev, attr->u.stp_state); break; + case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: + err = mlxsw_sp_port_attr_br_pre_flags_set(mlxsw_sp_port, + trans, + attr->u.brport_flags); + break; case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans, attr->orig_dev, @@ -1962,11 +1938,6 @@ static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp, return NULL; } -static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = { - .switchdev_port_attr_get = mlxsw_sp_port_attr_get, - .switchdev_port_attr_set = mlxsw_sp_port_attr_set, -}; - static int mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device, struct mlxsw_sp_bridge_port *bridge_port, @@ -2027,6 +1998,7 @@ mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device, return 0; if (mlxsw_sp_fid_vni_is_set(fid)) { + NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID"); err = -EINVAL; goto err_vni_exists; } @@ -2213,10 +2185,13 @@ mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device, int err; fid = mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex); - if (!fid) + if (!fid) { + NL_SET_ERR_MSG_MOD(extack, "Did not find a corresponding FID"); return -EINVAL; + } if (mlxsw_sp_fid_vni_is_set(fid)) { + NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID"); err = -EINVAL; goto err_vni_exists; } @@ -2443,7 +2418,7 @@ static void mlxsw_sp_fdb_vxlan_call_notifiers(struct net_device *dev, ether_addr_copy(info.eth_addr, mac); info.vni = vni; info.offloaded = adding; - call_switchdev_notifiers(type, dev, &info.info); + call_switchdev_notifiers(type, dev, &info.info, NULL); } static void mlxsw_sp_fdb_nve_call_notifiers(struct net_device *dev, @@ -2468,7 +2443,7 @@ mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type, info.addr = mac; info.vid = vid; info.offloaded = offloaded; - call_switchdev_notifiers(type, dev, &info.info); + call_switchdev_notifiers(type, dev, &info.info, NULL); } static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp, @@ -2819,7 +2794,7 @@ mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp, return; vxlan_fdb_info.offloaded = true; call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev, - &vxlan_fdb_info.info); + &vxlan_fdb_info.info, NULL); mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, vxlan_fdb_info.eth_addr, fdb_info->vid, dev, true); @@ -2832,7 +2807,7 @@ mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp, false); vxlan_fdb_info.offloaded = false; call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev, - &vxlan_fdb_info.info); + &vxlan_fdb_info.info, NULL); break; } } @@ -2977,7 +2952,7 @@ mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp, } vxlan_fdb_info->offloaded = true; call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev, - &vxlan_fdb_info->info); + &vxlan_fdb_info->info, NULL); mlxsw_sp_fid_put(fid); return; } @@ -2998,7 +2973,7 @@ mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp, goto err_fdb_tunnel_uc_op; vxlan_fdb_info->offloaded = true; call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev, - &vxlan_fdb_info->info); + &vxlan_fdb_info->info, NULL); mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, vxlan_fdb_info->eth_addr, vid, dev, true); @@ -3099,23 +3074,34 @@ mlxsw_sp_switchdev_vxlan_work_prepare(struct mlxsw_sp_switchdev_event_work * struct vxlan_dev *vxlan = netdev_priv(switchdev_work->dev); struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info; struct vxlan_config *cfg = &vxlan->cfg; + struct netlink_ext_ack *extack; + extack = switchdev_notifier_info_to_extack(info); vxlan_fdb_info = container_of(info, struct switchdev_notifier_vxlan_fdb_info, info); - if (vxlan_fdb_info->remote_port != cfg->dst_port) + if (vxlan_fdb_info->remote_port != cfg->dst_port) { + NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default remote port is not supported"); return -EOPNOTSUPP; - if (vxlan_fdb_info->remote_vni != cfg->vni) - return -EOPNOTSUPP; - if (vxlan_fdb_info->vni != cfg->vni) + } + if (vxlan_fdb_info->remote_vni != cfg->vni || + vxlan_fdb_info->vni != cfg->vni) { + NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default VNI is not supported"); return -EOPNOTSUPP; - if (vxlan_fdb_info->remote_ifindex) + } + if (vxlan_fdb_info->remote_ifindex) { + NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Local interface is not supported"); return -EOPNOTSUPP; - if (is_multicast_ether_addr(vxlan_fdb_info->eth_addr)) + } + if (is_multicast_ether_addr(vxlan_fdb_info->eth_addr)) { + NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast MAC addresses not supported"); return -EOPNOTSUPP; - if (vxlan_addr_multicast(&vxlan_fdb_info->remote_ip)) + } + if (vxlan_addr_multicast(&vxlan_fdb_info->remote_ip)) { + NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast destination IP is not supported"); return -EOPNOTSUPP; + } switchdev_work->vxlan_fdb_info = *vxlan_fdb_info; @@ -3133,6 +3119,13 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused, struct net_device *br_dev; int err; + if (event == SWITCHDEV_PORT_ATTR_SET) { + err = switchdev_handle_port_attr_set(dev, ptr, + mlxsw_sp_port_dev_check, + mlxsw_sp_port_attr_set); + return notifier_from_errno(err); + } + /* Tunnel devices are not our uppers, so check their master instead */ br_dev = netdev_master_upper_dev_get_rcu(dev); if (!br_dev) @@ -3220,8 +3213,10 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp, * the lookup function to return 'vxlan_dev' */ if (flag_untagged && flag_pvid && - mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid)) + mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid)) { + NL_SET_ERR_MSG_MOD(extack, "VLAN already mapped to a different VNI"); return -EINVAL; + } if (!netif_running(vxlan_dev)) return 0; @@ -3454,6 +3449,11 @@ static int mlxsw_sp_switchdev_blocking_event(struct notifier_block *unused, mlxsw_sp_port_dev_check, mlxsw_sp_port_obj_del); return notifier_from_errno(err); + case SWITCHDEV_PORT_ATTR_SET: + err = switchdev_handle_port_attr_set(dev, ptr, + mlxsw_sp_port_dev_check, + mlxsw_sp_port_attr_set); + return notifier_from_errno(err); } return NOTIFY_DONE; @@ -3541,11 +3541,3 @@ void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp) kfree(mlxsw_sp->bridge); } -void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port) -{ - mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops; -} - -void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port) -{ -} diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index 2d4f213e154d..533fe6235b7c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -11,7 +11,6 @@ #include #include #include -#include #include "pci.h" #include "core.h" @@ -390,6 +389,18 @@ static int mlxsw_sx_port_get_phys_port_name(struct net_device *dev, char *name, name, len); } +static int mlxsw_sx_port_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) +{ + struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); + struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; + + ppid->id_len = sizeof(mlxsw_sx->hw_id); + memcpy(&ppid->id, &mlxsw_sx->hw_id, ppid->id_len); + + return 0; +} + static const struct net_device_ops mlxsw_sx_port_netdev_ops = { .ndo_open = mlxsw_sx_port_open, .ndo_stop = mlxsw_sx_port_stop, @@ -397,6 +408,7 @@ static const struct net_device_ops mlxsw_sx_port_netdev_ops = { .ndo_change_mtu = mlxsw_sx_port_change_mtu, .ndo_get_stats64 = mlxsw_sx_port_get_stats64, .ndo_get_phys_port_name = mlxsw_sx_port_get_phys_port_name, + .ndo_get_port_parent_id = mlxsw_sx_port_get_port_parent_id, }; static void mlxsw_sx_port_get_drvinfo(struct net_device *dev, @@ -901,28 +913,6 @@ static const struct ethtool_ops mlxsw_sx_port_ethtool_ops = { .set_link_ksettings = mlxsw_sx_port_set_link_ksettings, }; -static int mlxsw_sx_port_attr_get(struct net_device *dev, - struct switchdev_attr *attr) -{ - struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev); - struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; - - switch (attr->id) { - case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: - attr->u.ppid.id_len = sizeof(mlxsw_sx->hw_id); - memcpy(&attr->u.ppid.id, &mlxsw_sx->hw_id, attr->u.ppid.id_len); - break; - default: - return -EOPNOTSUPP; - } - - return 0; -} - -static const struct switchdev_ops mlxsw_sx_port_switchdev_ops = { - .switchdev_port_attr_get = mlxsw_sx_port_attr_get, -}; - static int mlxsw_sx_hw_id_get(struct mlxsw_sx *mlxsw_sx) { char spad_pl[MLXSW_REG_SPAD_LEN] = {0}; @@ -1034,7 +1024,6 @@ static int __mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port, dev->netdev_ops = &mlxsw_sx_port_netdev_ops; dev->ethtool_ops = &mlxsw_sx_port_ethtool_ops; - dev->switchdev_ops = &mlxsw_sx_port_switchdev_ops; err = mlxsw_sx_port_dev_addr_get(mlxsw_sx_port); if (err) { diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c index b881f5d4a7f9..6006d47707cb 100644 --- a/drivers/net/ethernet/micrel/ks8695net.c +++ b/drivers/net/ethernet/micrel/ks8695net.c @@ -391,7 +391,7 @@ ks8695_tx_irq(int irq, void *dev_id) ksp->tx_buffers[buff_n].dma_ptr, ksp->tx_buffers[buff_n].length, DMA_TO_DEVICE); - dev_kfree_skb_irq(ksp->tx_buffers[buff_n].skb); + dev_consume_skb_irq(ksp->tx_buffers[buff_n].skb); ksp->tx_buffers[buff_n].skb = NULL; ksp->tx_ring_used--; } diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c index f6ecfa778660..8f72587b5a2c 100644 --- a/drivers/net/ethernet/microchip/enc28j60.c +++ b/drivers/net/ethernet/microchip/enc28j60.c @@ -1681,5 +1681,5 @@ MODULE_DESCRIPTION(DRV_NAME " ethernet driver"); MODULE_AUTHOR("Claudio Lanconelli "); MODULE_LICENSE("GPL"); module_param_named(debug, debug.msg_enable, int, 0); -MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., ffff=all)"); +MODULE_PARM_DESC(debug, "Debug verbosity level in amount of bits set (0=none, ..., 31=all)"); MODULE_ALIAS("spi:" DRV_NAME); diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c index 07c1eb63415a..3a0b289d9771 100644 --- a/drivers/net/ethernet/microchip/lan743x_ethtool.c +++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c @@ -14,61 +14,138 @@ #define EEPROM_INDICATOR_1 (0xA5) #define EEPROM_INDICATOR_2 (0xAA) #define EEPROM_MAC_OFFSET (0x01) -#define MAX_EEPROM_SIZE 512 +#define MAX_EEPROM_SIZE (512) +#define MAX_OTP_SIZE (1024) #define OTP_INDICATOR_1 (0xF3) #define OTP_INDICATOR_2 (0xF7) -static int lan743x_otp_write(struct lan743x_adapter *adapter, u32 offset, - u32 length, u8 *data) +static int lan743x_otp_power_up(struct lan743x_adapter *adapter) +{ + u32 reg_value; + + reg_value = lan743x_csr_read(adapter, OTP_PWR_DN); + + if (reg_value & OTP_PWR_DN_PWRDN_N_) { + /* clear it and wait to be cleared */ + reg_value &= ~OTP_PWR_DN_PWRDN_N_; + lan743x_csr_write(adapter, OTP_PWR_DN, reg_value); + + usleep_range(100, 20000); + } + + return 0; +} + +static void lan743x_otp_power_down(struct lan743x_adapter *adapter) +{ + u32 reg_value; + + reg_value = lan743x_csr_read(adapter, OTP_PWR_DN); + if (!(reg_value & OTP_PWR_DN_PWRDN_N_)) { + /* set power down bit */ + reg_value |= OTP_PWR_DN_PWRDN_N_; + lan743x_csr_write(adapter, OTP_PWR_DN, reg_value); + } +} + +static void lan743x_otp_set_address(struct lan743x_adapter *adapter, + u32 address) +{ + lan743x_csr_write(adapter, OTP_ADDR_HIGH, (address >> 8) & 0x03); + lan743x_csr_write(adapter, OTP_ADDR_LOW, address & 0xFF); +} + +static void lan743x_otp_read_go(struct lan743x_adapter *adapter) +{ + lan743x_csr_write(adapter, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_); + lan743x_csr_write(adapter, OTP_CMD_GO, OTP_CMD_GO_GO_); +} + +static int lan743x_otp_wait_till_not_busy(struct lan743x_adapter *adapter) { unsigned long timeout; - u32 buf; + u32 reg_val; + + timeout = jiffies + HZ; + do { + if (time_after(jiffies, timeout)) { + netif_warn(adapter, drv, adapter->netdev, + "Timeout on OTP_STATUS completion\n"); + return -EIO; + } + udelay(1); + reg_val = lan743x_csr_read(adapter, OTP_STATUS); + } while (reg_val & OTP_STATUS_BUSY_); + + return 0; +} + +static int lan743x_otp_read(struct lan743x_adapter *adapter, u32 offset, + u32 length, u8 *data) +{ + int ret; int i; - buf = lan743x_csr_read(adapter, OTP_PWR_DN); + if (offset + length > MAX_OTP_SIZE) + return -EINVAL; - if (buf & OTP_PWR_DN_PWRDN_N_) { - /* clear it and wait to be cleared */ - lan743x_csr_write(adapter, OTP_PWR_DN, 0); - - timeout = jiffies + HZ; - do { - udelay(1); - buf = lan743x_csr_read(adapter, OTP_PWR_DN); - if (time_after(jiffies, timeout)) { - netif_warn(adapter, drv, adapter->netdev, - "timeout on OTP_PWR_DN completion\n"); - return -EIO; - } - } while (buf & OTP_PWR_DN_PWRDN_N_); + ret = lan743x_otp_power_up(adapter); + if (ret < 0) + return ret; + + ret = lan743x_otp_wait_till_not_busy(adapter); + if (ret < 0) + return ret; + + for (i = 0; i < length; i++) { + lan743x_otp_set_address(adapter, offset + i); + + lan743x_otp_read_go(adapter); + ret = lan743x_otp_wait_till_not_busy(adapter); + if (ret < 0) + return ret; + data[i] = lan743x_csr_read(adapter, OTP_READ_DATA); } + lan743x_otp_power_down(adapter); + + return 0; +} + +static int lan743x_otp_write(struct lan743x_adapter *adapter, u32 offset, + u32 length, u8 *data) +{ + int ret; + int i; + + if (offset + length > MAX_OTP_SIZE) + return -EINVAL; + + ret = lan743x_otp_power_up(adapter); + if (ret < 0) + return ret; + + ret = lan743x_otp_wait_till_not_busy(adapter); + if (ret < 0) + return ret; + /* set to BYTE program mode */ lan743x_csr_write(adapter, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_); for (i = 0; i < length; i++) { - lan743x_csr_write(adapter, OTP_ADDR1, - ((offset + i) >> 8) & - OTP_ADDR1_15_11_MASK_); - lan743x_csr_write(adapter, OTP_ADDR2, - ((offset + i) & - OTP_ADDR2_10_3_MASK_)); + lan743x_otp_set_address(adapter, offset + i); + lan743x_csr_write(adapter, OTP_PRGM_DATA, data[i]); lan743x_csr_write(adapter, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_); lan743x_csr_write(adapter, OTP_CMD_GO, OTP_CMD_GO_GO_); - timeout = jiffies + HZ; - do { - udelay(1); - buf = lan743x_csr_read(adapter, OTP_STATUS); - if (time_after(jiffies, timeout)) { - netif_warn(adapter, drv, adapter->netdev, - "Timeout on OTP_STATUS completion\n"); - return -EIO; - } - } while (buf & OTP_STATUS_BUSY_); + ret = lan743x_otp_wait_till_not_busy(adapter); + if (ret < 0) + return ret; } + lan743x_otp_power_down(adapter); + return 0; } @@ -120,6 +197,9 @@ static int lan743x_eeprom_read(struct lan743x_adapter *adapter, u32 val; int i; + if (offset + length > MAX_EEPROM_SIZE) + return -EINVAL; + retval = lan743x_eeprom_confirm_not_busy(adapter); if (retval) return retval; @@ -148,6 +228,9 @@ static int lan743x_eeprom_write(struct lan743x_adapter *adapter, u32 val; int i; + if (offset + length > MAX_EEPROM_SIZE) + return -EINVAL; + retval = lan743x_eeprom_confirm_not_busy(adapter); if (retval) return retval; @@ -207,6 +290,11 @@ static void lan743x_ethtool_set_msglevel(struct net_device *netdev, static int lan743x_ethtool_get_eeprom_len(struct net_device *netdev) { + struct lan743x_adapter *adapter = netdev_priv(netdev); + + if (adapter->flags & LAN743X_ADAPTER_FLAG_OTP) + return MAX_OTP_SIZE; + return MAX_EEPROM_SIZE; } @@ -214,8 +302,14 @@ static int lan743x_ethtool_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee, u8 *data) { struct lan743x_adapter *adapter = netdev_priv(netdev); + int ret = 0; + + if (adapter->flags & LAN743X_ADAPTER_FLAG_OTP) + ret = lan743x_otp_read(adapter, ee->offset, ee->len, data); + else + ret = lan743x_eeprom_read(adapter, ee->offset, ee->len, data); - return lan743x_eeprom_read(adapter, ee->offset, ee->len, data); + return ret; } static int lan743x_ethtool_set_eeprom(struct net_device *netdev, @@ -224,17 +318,18 @@ static int lan743x_ethtool_set_eeprom(struct net_device *netdev, struct lan743x_adapter *adapter = netdev_priv(netdev); int ret = -EINVAL; - if (ee->magic == LAN743X_EEPROM_MAGIC) - ret = lan743x_eeprom_write(adapter, ee->offset, ee->len, - data); - /* Beware! OTP is One Time Programming ONLY! - * So do some strict condition check before messing up - */ - else if ((ee->magic == LAN743X_OTP_MAGIC) && - (ee->offset == 0) && - (ee->len == MAX_EEPROM_SIZE) && - (data[0] == OTP_INDICATOR_1)) - ret = lan743x_otp_write(adapter, ee->offset, ee->len, data); + if (adapter->flags & LAN743X_ADAPTER_FLAG_OTP) { + /* Beware! OTP is One Time Programming ONLY! */ + if (ee->magic == LAN743X_OTP_MAGIC) { + ret = lan743x_otp_write(adapter, ee->offset, + ee->len, data); + } + } else { + if (ee->magic == LAN743X_EEPROM_MAGIC) { + ret = lan743x_eeprom_write(adapter, ee->offset, + ee->len, data); + } + } return ret; } @@ -360,6 +455,10 @@ static const u32 lan743x_set2_hw_cnt_addr[] = { STAT_TX_COUNTER_ROLLOVER_STATUS }; +static const char lan743x_priv_flags_strings[][ETH_GSTRING_LEN] = { + "OTP_ACCESS", +}; + static void lan743x_ethtool_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { @@ -375,6 +474,10 @@ static void lan743x_ethtool_get_strings(struct net_device *netdev, lan743x_set2_hw_cnt_strings, sizeof(lan743x_set2_hw_cnt_strings)); break; + case ETH_SS_PRIV_FLAGS: + memcpy(data, lan743x_priv_flags_strings, + sizeof(lan743x_priv_flags_strings)); + break; } } @@ -399,6 +502,22 @@ static void lan743x_ethtool_get_ethtool_stats(struct net_device *netdev, } } +static u32 lan743x_ethtool_get_priv_flags(struct net_device *netdev) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + return adapter->flags; +} + +static int lan743x_ethtool_set_priv_flags(struct net_device *netdev, u32 flags) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + adapter->flags = flags; + + return 0; +} + static int lan743x_ethtool_get_sset_count(struct net_device *netdev, int sset) { switch (sset) { @@ -411,6 +530,8 @@ static int lan743x_ethtool_get_sset_count(struct net_device *netdev, int sset) ret += ARRAY_SIZE(lan743x_set2_hw_cnt_strings); return ret; } + case ETH_SS_PRIV_FLAGS: + return ARRAY_SIZE(lan743x_priv_flags_strings); default: return -EOPNOTSUPP; } @@ -705,6 +826,8 @@ const struct ethtool_ops lan743x_ethtool_ops = { .set_eeprom = lan743x_ethtool_set_eeprom, .get_strings = lan743x_ethtool_get_strings, .get_ethtool_stats = lan743x_ethtool_get_ethtool_stats, + .get_priv_flags = lan743x_ethtool_get_priv_flags, + .set_priv_flags = lan743x_ethtool_set_priv_flags, .get_sset_count = lan743x_ethtool_get_sset_count, .get_rxnfc = lan743x_ethtool_get_rxnfc, .get_rxfh_key_size = lan743x_ethtool_get_rxfh_key_size, diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 310807ef328b..4d1b4a24907f 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -1400,7 +1400,8 @@ static int lan743x_tx_frame_start(struct lan743x_tx *tx, } static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx, - unsigned int frame_length) + unsigned int frame_length, + int nr_frags) { /* called only from within lan743x_tx_xmit_frame. * assuming tx->ring_lock has already been acquired. @@ -1410,6 +1411,10 @@ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx, /* wrap up previous descriptor */ tx->frame_data0 |= TX_DESC_DATA0_EXT_; + if (nr_frags <= 0) { + tx->frame_data0 |= TX_DESC_DATA0_LS_; + tx->frame_data0 |= TX_DESC_DATA0_IOC_; + } tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; tx_descriptor->data0 = tx->frame_data0; @@ -1514,8 +1519,11 @@ static void lan743x_tx_frame_end(struct lan743x_tx *tx, u32 tx_tail_flags = 0; /* wrap up previous descriptor */ - tx->frame_data0 |= TX_DESC_DATA0_LS_; - tx->frame_data0 |= TX_DESC_DATA0_IOC_; + if ((tx->frame_data0 & TX_DESC_DATA0_DTYPE_MASK_) == + TX_DESC_DATA0_DTYPE_DATA_) { + tx->frame_data0 |= TX_DESC_DATA0_LS_; + tx->frame_data0 |= TX_DESC_DATA0_IOC_; + } tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; buffer_info = &tx->buffer_info[tx->frame_tail]; @@ -1600,7 +1608,7 @@ static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx, } if (gso) - lan743x_tx_frame_add_lso(tx, frame_length); + lan743x_tx_frame_add_lso(tx, frame_length, nr_frags); if (nr_frags <= 0) goto finish; diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h index 2d6eea18973e..3b02eeae5f45 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.h +++ b/drivers/net/ethernet/microchip/lan743x_main.h @@ -26,6 +26,8 @@ #define FPGA_REV_GET_MAJOR_(fpga_rev) ((fpga_rev) & 0x000000FF) #define HW_CFG (0x010) +#define HW_CFG_RELOAD_TYPE_ALL_ (0x00000FC0) +#define HW_CFG_EE_OTP_RELOAD_ BIT(4) #define HW_CFG_LRST_ BIT(1) #define PMT_CTL (0x014) @@ -453,17 +455,19 @@ #define OTP_PWR_DN (0x1000) #define OTP_PWR_DN_PWRDN_N_ BIT(0) -#define OTP_ADDR1 (0x1004) -#define OTP_ADDR1_15_11_MASK_ (0x1F) - -#define OTP_ADDR2 (0x1008) -#define OTP_ADDR2_10_3_MASK_ (0xFF) +#define OTP_ADDR_HIGH (0x1004) +#define OTP_ADDR_LOW (0x1008) #define OTP_PRGM_DATA (0x1010) #define OTP_PRGM_MODE (0x1014) #define OTP_PRGM_MODE_BYTE_ BIT(0) +#define OTP_READ_DATA (0x1018) + +#define OTP_FUNC_CMD (0x1020) +#define OTP_FUNC_CMD_READ_ BIT(0) + #define OTP_TST_CMD (0x1024) #define OTP_TST_CMD_PRGVRFY_ BIT(3) @@ -713,6 +717,9 @@ struct lan743x_adapter { struct lan743x_phy phy; struct lan743x_tx tx[LAN743X_MAX_TX_CHANNELS]; struct lan743x_rx rx[LAN743X_MAX_RX_CHANNELS]; + +#define LAN743X_ADAPTER_FLAG_OTP BIT(0) + u32 flags; }; #define LAN743X_COMPONENT_FLAG_RX(channel) BIT(20 + (channel)) diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index b34055ac476f..e1651756bf9d 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -81,11 +81,13 @@ static void moxart_mac_free_memory(struct net_device *ndev) priv->rx_buf_size, DMA_FROM_DEVICE); if (priv->tx_desc_base) - dma_free_coherent(NULL, TX_REG_DESC_SIZE * TX_DESC_NUM, + dma_free_coherent(&priv->pdev->dev, + TX_REG_DESC_SIZE * TX_DESC_NUM, priv->tx_desc_base, priv->tx_base); if (priv->rx_desc_base) - dma_free_coherent(NULL, RX_REG_DESC_SIZE * RX_DESC_NUM, + dma_free_coherent(&priv->pdev->dev, + RX_REG_DESC_SIZE * RX_DESC_NUM, priv->rx_desc_base, priv->rx_base); kfree(priv->tx_buf_base); @@ -298,7 +300,7 @@ static void moxart_tx_finished(struct net_device *ndev) ndev->stats.tx_packets++; ndev->stats.tx_bytes += priv->tx_skb[tx_tail]->len; - dev_kfree_skb_irq(priv->tx_skb[tx_tail]); + dev_consume_skb_irq(priv->tx_skb[tx_tail]); priv->tx_skb[tx_tail] = NULL; tx_tail = TX_NEXT(tx_tail); @@ -476,6 +478,7 @@ static int moxart_mac_probe(struct platform_device *pdev) priv = netdev_priv(ndev); priv->ndev = ndev; + priv->pdev = pdev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ndev->base_addr = res->start; @@ -491,7 +494,7 @@ static int moxart_mac_probe(struct platform_device *pdev) priv->tx_buf_size = TX_BUF_SIZE; priv->rx_buf_size = RX_BUF_SIZE; - priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE * + priv->tx_desc_base = dma_alloc_coherent(&pdev->dev, TX_REG_DESC_SIZE * TX_DESC_NUM, &priv->tx_base, GFP_DMA | GFP_KERNEL); if (!priv->tx_desc_base) { @@ -499,7 +502,7 @@ static int moxart_mac_probe(struct platform_device *pdev) goto init_fail; } - priv->rx_desc_base = dma_alloc_coherent(NULL, RX_REG_DESC_SIZE * + priv->rx_desc_base = dma_alloc_coherent(&pdev->dev, RX_REG_DESC_SIZE * RX_DESC_NUM, &priv->rx_base, GFP_DMA | GFP_KERNEL); if (!priv->rx_desc_base) { diff --git a/drivers/net/ethernet/moxa/moxart_ether.h b/drivers/net/ethernet/moxa/moxart_ether.h index bee608b547d1..bf4c3029cd0c 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.h +++ b/drivers/net/ethernet/moxa/moxart_ether.h @@ -292,6 +292,7 @@ #define LINK_STATUS 0x4 struct moxart_mac_priv_t { + struct platform_device *pdev; void __iomem *base; unsigned int reg_maccr; unsigned int reg_imr; diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 215a45374d7b..a1d0d6e42533 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -721,7 +721,8 @@ static void ocelot_get_stats64(struct net_device *dev, static int ocelot_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, - u16 vid, u16 flags) + u16 vid, u16 flags, + struct netlink_ext_ack *extack) { struct ocelot_port *port = netdev_priv(dev); struct ocelot *ocelot = port->ocelot; @@ -915,6 +916,18 @@ static int ocelot_set_features(struct net_device *dev, return 0; } +static int ocelot_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) +{ + struct ocelot_port *ocelot_port = netdev_priv(dev); + struct ocelot *ocelot = ocelot_port->ocelot; + + ppid->id_len = sizeof(ocelot->base_mac); + memcpy(&ppid->id, &ocelot->base_mac, ppid->id_len); + + return 0; +} + static const struct net_device_ops ocelot_port_netdev_ops = { .ndo_open = ocelot_port_open, .ndo_stop = ocelot_port_stop, @@ -929,6 +942,7 @@ static const struct net_device_ops ocelot_port_netdev_ops = { .ndo_vlan_rx_add_vid = ocelot_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ocelot_vlan_rx_kill_vid, .ndo_set_features = ocelot_set_features, + .ndo_get_port_parent_id = ocelot_get_port_parent_id, }; static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data) @@ -1012,25 +1026,6 @@ static const struct ethtool_ops ocelot_ethtool_ops = { .set_link_ksettings = phy_ethtool_set_link_ksettings, }; -static int ocelot_port_attr_get(struct net_device *dev, - struct switchdev_attr *attr) -{ - struct ocelot_port *ocelot_port = netdev_priv(dev); - struct ocelot *ocelot = ocelot_port->ocelot; - - switch (attr->id) { - case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: - attr->u.ppid.id_len = sizeof(ocelot->base_mac); - memcpy(&attr->u.ppid.id, &ocelot->base_mac, - attr->u.ppid.id_len); - break; - default: - return -EOPNOTSUPP; - } - - return 0; -} - static int ocelot_port_attr_stp_state_set(struct ocelot_port *ocelot_port, struct switchdev_trans *trans, u8 state) @@ -1329,11 +1324,6 @@ static int ocelot_port_obj_del(struct net_device *dev, return ret; } -static const struct switchdev_ops ocelot_port_switchdev_ops = { - .switchdev_port_attr_get = ocelot_port_attr_get, - .switchdev_port_attr_set = ocelot_port_attr_set, -}; - static int ocelot_port_bridge_join(struct ocelot_port *ocelot_port, struct net_device *bridge) { @@ -1588,6 +1578,28 @@ struct notifier_block ocelot_netdevice_nb __read_mostly = { }; EXPORT_SYMBOL(ocelot_netdevice_nb); +static int ocelot_switchdev_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + int err; + + switch (event) { + case SWITCHDEV_PORT_ATTR_SET: + err = switchdev_handle_port_attr_set(dev, ptr, + ocelot_netdevice_dev_check, + ocelot_port_attr_set); + return notifier_from_errno(err); + } + + return NOTIFY_DONE; +} + +struct notifier_block ocelot_switchdev_nb __read_mostly = { + .notifier_call = ocelot_switchdev_event, +}; +EXPORT_SYMBOL(ocelot_switchdev_nb); + static int ocelot_switchdev_blocking_event(struct notifier_block *unused, unsigned long event, void *ptr) { @@ -1606,6 +1618,11 @@ static int ocelot_switchdev_blocking_event(struct notifier_block *unused, ocelot_netdevice_dev_check, ocelot_port_obj_del); return notifier_from_errno(err); + case SWITCHDEV_PORT_ATTR_SET: + err = switchdev_handle_port_attr_set(dev, ptr, + ocelot_netdevice_dev_check, + ocelot_port_attr_set); + return notifier_from_errno(err); } return NOTIFY_DONE; @@ -1639,7 +1656,6 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port, dev->netdev_ops = &ocelot_port_netdev_ops; dev->ethtool_ops = &ocelot_ethtool_ops; - dev->switchdev_ops = &ocelot_port_switchdev_ops; dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXFCS; dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h index 086775f7b52f..ba3b3380b4d0 100644 --- a/drivers/net/ethernet/mscc/ocelot.h +++ b/drivers/net/ethernet/mscc/ocelot.h @@ -499,6 +499,7 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port, struct phy_device *phy); extern struct notifier_block ocelot_netdevice_nb; +extern struct notifier_block ocelot_switchdev_nb; extern struct notifier_block ocelot_switchdev_blocking_nb; #endif diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c index ca3ea2fbfcd0..e7f90101d2e0 100644 --- a/drivers/net/ethernet/mscc/ocelot_board.c +++ b/drivers/net/ethernet/mscc/ocelot_board.c @@ -267,6 +267,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev) struct phy *serdes; void __iomem *regs; char res_name[8]; + int phy_mode; u32 port; if (of_property_read_u32(portnp, "reg", &port)) @@ -292,11 +293,11 @@ static int mscc_ocelot_probe(struct platform_device *pdev) if (err) return err; - err = of_get_phy_mode(portnp); - if (err < 0) + phy_mode = of_get_phy_mode(portnp); + if (phy_mode < 0) ocelot->ports[port]->phy_mode = PHY_INTERFACE_MODE_NA; else - ocelot->ports[port]->phy_mode = err; + ocelot->ports[port]->phy_mode = phy_mode; switch (ocelot->ports[port]->phy_mode) { case PHY_INTERFACE_MODE_NA: @@ -304,6 +305,13 @@ static int mscc_ocelot_probe(struct platform_device *pdev) case PHY_INTERFACE_MODE_SGMII: break; case PHY_INTERFACE_MODE_QSGMII: + /* Ensure clock signals and speed is set on all + * QSGMII links + */ + ocelot_port_writel(ocelot->ports[port], + DEV_CLOCK_CFG_LINK_SPEED + (OCELOT_SPEED_1000), + DEV_CLOCK_CFG); break; default: dev_err(ocelot->dev, @@ -329,6 +337,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev) } register_netdevice_notifier(&ocelot_netdevice_nb); + register_switchdev_notifier(&ocelot_switchdev_nb); register_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb); dev_info(&pdev->dev, "Ocelot switch probed\n"); @@ -345,6 +354,7 @@ static int mscc_ocelot_remove(struct platform_device *pdev) ocelot_deinit(ocelot); unregister_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb); + unregister_switchdev_notifier(&ocelot_switchdev_nb); unregister_netdevice_notifier(&ocelot_netdevice_nb); return 0; diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 19ce0e605096..e0340f778d8f 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -1408,7 +1408,7 @@ myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index) if (skb) { ss->stats.tx_bytes += skb->len; ss->stats.tx_packets++; - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); if (len) pci_unmap_single(pdev, dma_unmap_addr(&tx->info[idx], diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c index b9a1a9f999ea..1a2634cbbb69 100644 --- a/drivers/net/ethernet/natsemi/natsemi.c +++ b/drivers/net/ethernet/natsemi/natsemi.c @@ -2173,7 +2173,7 @@ static void netdev_tx_done(struct net_device *dev) np->tx_skbuff[entry]->len, PCI_DMA_TODEVICE); /* Free the original skb. */ - dev_kfree_skb_irq(np->tx_skbuff[entry]); + dev_consume_skb_irq(np->tx_skbuff[entry]); np->tx_skbuff[entry] = NULL; } if (netif_queue_stopped(dev) && diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c index 958fced4dacf..9098ee7fe0d1 100644 --- a/drivers/net/ethernet/natsemi/ns83820.c +++ b/drivers/net/ethernet/natsemi/ns83820.c @@ -1003,7 +1003,7 @@ static void do_tx_done(struct net_device *ndev) addr, len, PCI_DMA_TODEVICE); - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); atomic_dec(&dev->nr_tx_skbs); } else pci_unmap_page(dev->pci_dev, @@ -1869,56 +1869,28 @@ static unsigned ns83820_mii_write_reg(struct ns83820 *dev, unsigned phy, unsigne static void ns83820_probe_phy(struct net_device *ndev) { struct ns83820 *dev = PRIV(ndev); - static int first; - int i; -#define MII_PHYIDR1 0x02 -#define MII_PHYIDR2 0x03 - -#if 0 - if (!first) { - unsigned tmp; - ns83820_mii_read_reg(dev, 1, 0x09); - ns83820_mii_write_reg(dev, 1, 0x10, 0x0d3e); - - tmp = ns83820_mii_read_reg(dev, 1, 0x00); - ns83820_mii_write_reg(dev, 1, 0x00, tmp | 0x8000); - udelay(1300); - ns83820_mii_read_reg(dev, 1, 0x09); - } -#endif - first = 1; - - for (i=1; i<2; i++) { - int j; - unsigned a, b; - a = ns83820_mii_read_reg(dev, i, MII_PHYIDR1); - b = ns83820_mii_read_reg(dev, i, MII_PHYIDR2); - - //printk("%s: phy %d: 0x%04x 0x%04x\n", - // ndev->name, i, a, b); - - for (j=0; j<0x16; j+=4) { - dprintk("%s: [0x%02x] %04x %04x %04x %04x\n", - ndev->name, j, - ns83820_mii_read_reg(dev, i, 0 + j), - ns83820_mii_read_reg(dev, i, 1 + j), - ns83820_mii_read_reg(dev, i, 2 + j), - ns83820_mii_read_reg(dev, i, 3 + j) - ); - } - } - { - unsigned a, b; - /* read firmware version: memory addr is 0x8402 and 0x8403 */ - ns83820_mii_write_reg(dev, 1, 0x16, 0x000d); - ns83820_mii_write_reg(dev, 1, 0x1e, 0x810e); - a = ns83820_mii_read_reg(dev, 1, 0x1d); - - ns83820_mii_write_reg(dev, 1, 0x16, 0x000d); - ns83820_mii_write_reg(dev, 1, 0x1e, 0x810e); - b = ns83820_mii_read_reg(dev, 1, 0x1d); - dprintk("version: 0x%04x 0x%04x\n", a, b); + int j; + unsigned a, b; + + for (j = 0; j < 0x16; j += 4) { + dprintk("%s: [0x%02x] %04x %04x %04x %04x\n", + ndev->name, j, + ns83820_mii_read_reg(dev, 1, 0 + j), + ns83820_mii_read_reg(dev, 1, 1 + j), + ns83820_mii_read_reg(dev, 1, 2 + j), + ns83820_mii_read_reg(dev, 1, 3 + j) + ); } + + /* read firmware version: memory addr is 0x8402 and 0x8403 */ + ns83820_mii_write_reg(dev, 1, 0x16, 0x000d); + ns83820_mii_write_reg(dev, 1, 0x1e, 0x810e); + a = ns83820_mii_read_reg(dev, 1, 0x1d); + + ns83820_mii_write_reg(dev, 1, 0x16, 0x000d); + ns83820_mii_write_reg(dev, 1, 0x1e, 0x810e); + b = ns83820_mii_read_reg(dev, 1, 0x1d); + dprintk("version: 0x%04x 0x%04x\n", a, b); } #endif diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c index c805dcbebd02..aaec00912ea0 100644 --- a/drivers/net/ethernet/natsemi/sonic.c +++ b/drivers/net/ethernet/natsemi/sonic.c @@ -328,7 +328,7 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) } /* We must free the original skb */ - dev_kfree_skb_irq(lp->tx_skb[entry]); + dev_consume_skb_irq(lp->tx_skb[entry]); lp->tx_skb[entry] = NULL; /* and unmap DMA buffer */ dma_unmap_single(lp->device, lp->tx_laddr[entry], lp->tx_len[entry], DMA_TO_DEVICE); diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index 82be90075695..feda9644289d 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -3055,7 +3055,7 @@ static void tx_intr_handler(struct fifo_info *fifo_data) /* Updating the statistics block */ swstats->mem_freed += skb->truesize; - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); get_info.offset++; if (get_info.offset == get_info.fifo_len + 1) diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index 5ae3fa82909f..b877acec5cde 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -114,7 +114,7 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo) /* free SKBs */ for (temp = completed; temp != skb_ptr; temp++) - dev_kfree_skb_irq(*temp); + dev_consume_skb_irq(*temp); } while (more); } @@ -2553,7 +2553,7 @@ static int vxge_add_isr(struct vxgedev *vdev) vxge_debug_init(VXGE_ERR, "%s: Defaulting to INTA", vdev->ndev->name); - goto INTA_MODE; + goto INTA_MODE; } msix_idx = (vdev->vpaths[0].handle->vpath->vp_id * diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig index 66f15b05b65e..549898d5d450 100644 --- a/drivers/net/ethernet/netronome/Kconfig +++ b/drivers/net/ethernet/netronome/Kconfig @@ -19,7 +19,6 @@ config NFP tristate "Netronome(R) NFP4000/NFP6000 NIC driver" depends on PCI && PCI_MSI depends on VXLAN || VXLAN=n - depends on MAY_USE_DEVLINK ---help--- This driver supports the Netronome(R) NFP4000/NFP6000 based cards working as a advanced Ethernet NIC. It works with both diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index e23ca90289f7..f272247d1708 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -1266,7 +1266,7 @@ wrp_alu64_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, u64 imm = insn->imm; /* sign extend */ if (skip) { - meta->skip = true; + meta->flags |= FLAG_INSN_SKIP_NOOP; return 0; } @@ -1291,15 +1291,10 @@ wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, static int wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, - enum alu_op alu_op, bool skip) + enum alu_op alu_op) { const struct bpf_insn *insn = &meta->insn; - if (skip) { - meta->skip = true; - return 0; - } - wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm); wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); @@ -1334,8 +1329,9 @@ wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op, insn->src_reg * 2, br_mask, insn->off); - wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op, - insn->src_reg * 2 + 1, br_mask, insn->off); + if (is_mbpf_jmp64(meta)) + wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op, + insn->src_reg * 2 + 1, br_mask, insn->off); return 0; } @@ -1390,13 +1386,15 @@ static int cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) else emit_alu(nfp_prog, reg_none(), tmp_reg, alu_op, reg_a(reg)); - tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); - if (!code->swap) - emit_alu(nfp_prog, reg_none(), - reg_a(reg + 1), carry_op, tmp_reg); - else - emit_alu(nfp_prog, reg_none(), - tmp_reg, carry_op, reg_a(reg + 1)); + if (is_mbpf_jmp64(meta)) { + tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); + if (!code->swap) + emit_alu(nfp_prog, reg_none(), + reg_a(reg + 1), carry_op, tmp_reg); + else + emit_alu(nfp_prog, reg_none(), + tmp_reg, carry_op, reg_a(reg + 1)); + } emit_br(nfp_prog, code->br_mask, insn->off, 0); @@ -1423,8 +1421,9 @@ static int cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) } emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg)); - emit_alu(nfp_prog, reg_none(), - reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1)); + if (is_mbpf_jmp64(meta)) + emit_alu(nfp_prog, reg_none(), + reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1)); emit_br(nfp_prog, code->br_mask, insn->off, 0); return 0; @@ -1963,6 +1962,9 @@ static int neg_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) */ static int __shl_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) { + if (!shift_amt) + return 0; + if (shift_amt < 32) { emit_shf(nfp_prog, reg_both(dst + 1), reg_a(dst + 1), SHF_OP_NONE, reg_b(dst), SHF_SC_R_DSHF, @@ -2075,6 +2077,9 @@ static int shl_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) */ static int __shr_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) { + if (!shift_amt) + return 0; + if (shift_amt < 32) { emit_shf(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE, reg_b(dst), SHF_SC_R_DSHF, shift_amt); @@ -2176,6 +2181,9 @@ static int shr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) */ static int __ashr_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) { + if (!shift_amt) + return 0; + if (shift_amt < 32) { emit_shf(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE, reg_b(dst), SHF_SC_R_DSHF, shift_amt); @@ -2309,7 +2317,7 @@ static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm); + return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR); } static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) @@ -2319,7 +2327,7 @@ static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); + return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND); } static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) @@ -2329,7 +2337,7 @@ static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); + return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR); } static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) @@ -2339,7 +2347,7 @@ static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm); + return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD); } static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) @@ -2349,7 +2357,7 @@ static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm); + return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB); } static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) @@ -2384,10 +2392,13 @@ static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int __ashr_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) { - /* Set signedness bit (MSB of result). */ - emit_alu(nfp_prog, reg_none(), reg_a(dst), ALU_OP_OR, reg_imm(0)); - emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, reg_b(dst), - SHF_SC_R_SHF, shift_amt); + if (shift_amt) { + /* Set signedness bit (MSB of result). */ + emit_alu(nfp_prog, reg_none(), reg_a(dst), ALU_OP_OR, + reg_imm(0)); + emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, + reg_b(dst), SHF_SC_R_SHF, shift_amt); + } wrp_immed(nfp_prog, reg_both(dst + 1), 0); return 0; @@ -2425,18 +2436,75 @@ static int ashr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return __ashr_imm(nfp_prog, dst, insn->imm); } +static int __shr_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) +{ + if (shift_amt) + emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, + reg_b(dst), SHF_SC_R_SHF, shift_amt); + wrp_immed(nfp_prog, reg_both(dst + 1), 0); + return 0; +} + +static int shr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + const struct bpf_insn *insn = &meta->insn; + u8 dst = insn->dst_reg * 2; + + return __shr_imm(nfp_prog, dst, insn->imm); +} + +static int shr_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + const struct bpf_insn *insn = &meta->insn; + u64 umin, umax; + u8 dst, src; + + dst = insn->dst_reg * 2; + umin = meta->umin_src; + umax = meta->umax_src; + if (umin == umax) + return __shr_imm(nfp_prog, dst, umin); + + src = insn->src_reg * 2; + emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0)); + emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, + reg_b(dst), SHF_SC_R_SHF); + wrp_immed(nfp_prog, reg_both(dst + 1), 0); + return 0; +} + +static int __shl_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) +{ + if (shift_amt) + emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE, + reg_b(dst), SHF_SC_L_SHF, shift_amt); + wrp_immed(nfp_prog, reg_both(dst + 1), 0); + return 0; +} + static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { const struct bpf_insn *insn = &meta->insn; + u8 dst = insn->dst_reg * 2; - if (!insn->imm) - return 1; /* TODO: zero shift means indirect */ + return __shl_imm(nfp_prog, dst, insn->imm); +} - emit_shf(nfp_prog, reg_both(insn->dst_reg * 2), - reg_none(), SHF_OP_NONE, reg_b(insn->dst_reg * 2), - SHF_SC_L_SHF, insn->imm); - wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); +static int shl_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + const struct bpf_insn *insn = &meta->insn; + u64 umin, umax; + u8 dst, src; + + dst = insn->dst_reg * 2; + umin = meta->umin_src; + umax = meta->umax_src; + if (umin == umax) + return __shl_imm(nfp_prog, dst, umin); + src = insn->src_reg * 2; + shl_reg64_lt32_low(nfp_prog, dst, src); + wrp_immed(nfp_prog, reg_both(dst + 1), 0); return 0; } @@ -3048,6 +3116,19 @@ static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return 0; } +static int jeq32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + const struct bpf_insn *insn = &meta->insn; + swreg tmp_reg; + + tmp_reg = ur_load_imm_any(nfp_prog, insn->imm, imm_b(nfp_prog)); + emit_alu(nfp_prog, reg_none(), + reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); + emit_br(nfp_prog, BR_BEQ, insn->off, 0); + + return 0; +} + static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { const struct bpf_insn *insn = &meta->insn; @@ -3061,9 +3142,10 @@ static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) /* Upper word of the mask can only be 0 or ~0 from sign extension, * so either ignore it or OR the whole thing in. */ - if (imm >> 32) + if (is_mbpf_jmp64(meta) && imm >> 32) { emit_alu(nfp_prog, reg_none(), reg_a(dst_gpr + 1), ALU_OP_OR, imm_b(nfp_prog)); + } emit_br(nfp_prog, BR_BNE, insn->off, 0); return 0; @@ -3073,11 +3155,16 @@ static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { const struct bpf_insn *insn = &meta->insn; u64 imm = insn->imm; /* sign extend */ + bool is_jmp32 = is_mbpf_jmp32(meta); swreg tmp_reg; if (!imm) { - emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2), - ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1)); + if (is_jmp32) + emit_alu(nfp_prog, reg_none(), reg_none(), ALU_OP_NONE, + reg_b(insn->dst_reg * 2)); + else + emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2), + ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1)); emit_br(nfp_prog, BR_BNE, insn->off, 0); return 0; } @@ -3087,6 +3174,9 @@ static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg); emit_br(nfp_prog, BR_BNE, insn->off, 0); + if (is_jmp32) + return 0; + tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg); @@ -3101,10 +3191,13 @@ static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2), ALU_OP_XOR, reg_b(insn->src_reg * 2)); - emit_alu(nfp_prog, imm_b(nfp_prog), reg_a(insn->dst_reg * 2 + 1), - ALU_OP_XOR, reg_b(insn->src_reg * 2 + 1)); - emit_alu(nfp_prog, reg_none(), - imm_a(nfp_prog), ALU_OP_OR, imm_b(nfp_prog)); + if (is_mbpf_jmp64(meta)) { + emit_alu(nfp_prog, imm_b(nfp_prog), + reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, + reg_b(insn->src_reg * 2 + 1)); + emit_alu(nfp_prog, reg_none(), imm_a(nfp_prog), ALU_OP_OR, + imm_b(nfp_prog)); + } emit_br(nfp_prog, BR_BEQ, insn->off, 0); return 0; @@ -3182,7 +3275,7 @@ bpf_to_bpf_call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) wrp_immed_relo(nfp_prog, imm_b(nfp_prog), 0, RELO_IMMED_REL); } else { ret_tgt = nfp_prog_current_offset(nfp_prog) + 2; - emit_br(nfp_prog, BR_UNC, meta->n + 1 + meta->insn.imm, 1); + emit_br(nfp_prog, BR_UNC, meta->insn.imm, 1); offset_br = nfp_prog_current_offset(nfp_prog); } wrp_immed_relo(nfp_prog, ret_reg(nfp_prog), ret_tgt, RELO_IMMED_REL); @@ -3321,7 +3414,10 @@ static const instr_cb_t instr_cb[256] = { [BPF_ALU | BPF_DIV | BPF_X] = div_reg, [BPF_ALU | BPF_DIV | BPF_K] = div_imm, [BPF_ALU | BPF_NEG] = neg_reg, + [BPF_ALU | BPF_LSH | BPF_X] = shl_reg, [BPF_ALU | BPF_LSH | BPF_K] = shl_imm, + [BPF_ALU | BPF_RSH | BPF_X] = shr_reg, + [BPF_ALU | BPF_RSH | BPF_K] = shr_imm, [BPF_ALU | BPF_ARSH | BPF_X] = ashr_reg, [BPF_ALU | BPF_ARSH | BPF_K] = ashr_imm, [BPF_ALU | BPF_END | BPF_X] = end_reg32, @@ -3369,6 +3465,28 @@ static const instr_cb_t instr_cb[256] = { [BPF_JMP | BPF_JSLE | BPF_X] = cmp_reg, [BPF_JMP | BPF_JSET | BPF_X] = jset_reg, [BPF_JMP | BPF_JNE | BPF_X] = jne_reg, + [BPF_JMP32 | BPF_JEQ | BPF_K] = jeq32_imm, + [BPF_JMP32 | BPF_JGT | BPF_K] = cmp_imm, + [BPF_JMP32 | BPF_JGE | BPF_K] = cmp_imm, + [BPF_JMP32 | BPF_JLT | BPF_K] = cmp_imm, + [BPF_JMP32 | BPF_JLE | BPF_K] = cmp_imm, + [BPF_JMP32 | BPF_JSGT | BPF_K] =cmp_imm, + [BPF_JMP32 | BPF_JSGE | BPF_K] =cmp_imm, + [BPF_JMP32 | BPF_JSLT | BPF_K] =cmp_imm, + [BPF_JMP32 | BPF_JSLE | BPF_K] =cmp_imm, + [BPF_JMP32 | BPF_JSET | BPF_K] =jset_imm, + [BPF_JMP32 | BPF_JNE | BPF_K] = jne_imm, + [BPF_JMP32 | BPF_JEQ | BPF_X] = jeq_reg, + [BPF_JMP32 | BPF_JGT | BPF_X] = cmp_reg, + [BPF_JMP32 | BPF_JGE | BPF_X] = cmp_reg, + [BPF_JMP32 | BPF_JLT | BPF_X] = cmp_reg, + [BPF_JMP32 | BPF_JLE | BPF_X] = cmp_reg, + [BPF_JMP32 | BPF_JSGT | BPF_X] =cmp_reg, + [BPF_JMP32 | BPF_JSGE | BPF_X] =cmp_reg, + [BPF_JMP32 | BPF_JSLT | BPF_X] =cmp_reg, + [BPF_JMP32 | BPF_JSLE | BPF_X] =cmp_reg, + [BPF_JMP32 | BPF_JSET | BPF_X] =jset_reg, + [BPF_JMP32 | BPF_JNE | BPF_X] = jne_reg, [BPF_JMP | BPF_CALL] = call, [BPF_JMP | BPF_EXIT] = jmp_exit, }; @@ -3395,9 +3513,9 @@ static int nfp_fixup_branches(struct nfp_prog *nfp_prog) int err; list_for_each_entry(meta, &nfp_prog->insns, l) { - if (meta->skip) + if (meta->flags & FLAG_INSN_SKIP_MASK) continue; - if (BPF_CLASS(meta->insn.code) != BPF_JMP) + if (!is_mbpf_jmp(meta)) continue; if (meta->insn.code == (BPF_JMP | BPF_EXIT) && !nfp_is_main_function(meta)) @@ -3439,7 +3557,7 @@ static int nfp_fixup_branches(struct nfp_prog *nfp_prog) jmp_dst = meta->jmp_dst; - if (jmp_dst->skip) { + if (jmp_dst->flags & FLAG_INSN_SKIP_PREC_DEPENDENT) { pr_err("Branch landing on removed instruction!!\n"); return -ELOOP; } @@ -3689,7 +3807,7 @@ static int nfp_translate(struct nfp_prog *nfp_prog) return nfp_prog->error; } - if (meta->skip) { + if (meta->flags & FLAG_INSN_SKIP_MASK) { nfp_prog->n_translated++; continue; } @@ -3737,10 +3855,10 @@ static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog) /* Programs start with R6 = R1 but we ignore the skb pointer */ if (insn.code == (BPF_ALU64 | BPF_MOV | BPF_X) && insn.src_reg == 1 && insn.dst_reg == 6) - meta->skip = true; + meta->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT; /* Return as soon as something doesn't match */ - if (!meta->skip) + if (!(meta->flags & FLAG_INSN_SKIP_MASK)) return; } } @@ -3755,19 +3873,17 @@ static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog) list_for_each_entry(meta, &nfp_prog->insns, l) { struct bpf_insn insn = meta->insn; - if (meta->skip) + if (meta->flags & FLAG_INSN_SKIP_MASK) continue; - if (BPF_CLASS(insn.code) != BPF_ALU && - BPF_CLASS(insn.code) != BPF_ALU64 && - BPF_CLASS(insn.code) != BPF_JMP) + if (!is_mbpf_alu(meta) && !is_mbpf_jmp(meta)) continue; if (BPF_SRC(insn.code) != BPF_K) continue; if (insn.imm >= 0) continue; - if (BPF_CLASS(insn.code) == BPF_JMP) { + if (is_mbpf_jmp(meta)) { switch (BPF_OP(insn.code)) { case BPF_JGE: case BPF_JSGE: @@ -3829,7 +3945,7 @@ static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog) if (meta2->flags & FLAG_INSN_IS_JUMP_DST) continue; - meta2->skip = true; + meta2->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT; } } @@ -3869,8 +3985,8 @@ static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog) meta3->flags & FLAG_INSN_IS_JUMP_DST) continue; - meta2->skip = true; - meta3->skip = true; + meta2->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT; + meta3->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT; } } @@ -4065,7 +4181,8 @@ static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog) } head_ld_meta->paired_st = &head_st_meta->insn; - head_st_meta->skip = true; + head_st_meta->flags |= + FLAG_INSN_SKIP_PREC_DEPENDENT; } else { head_ld_meta->ldst_gather_len = 0; } @@ -4098,8 +4215,8 @@ static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog) head_ld_meta = meta1; head_st_meta = meta2; } else { - meta1->skip = true; - meta2->skip = true; + meta1->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT; + meta2->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT; } head_ld_meta->ldst_gather_len += BPF_LDST_BYTES(ld); @@ -4124,7 +4241,7 @@ static void nfp_bpf_opt_pkt_cache(struct nfp_prog *nfp_prog) if (meta->flags & FLAG_INSN_IS_JUMP_DST) cache_avail = false; - if (meta->skip) + if (meta->flags & FLAG_INSN_SKIP_MASK) continue; insn = &meta->insn; @@ -4210,7 +4327,7 @@ start_new: } list_for_each_entry(meta, &nfp_prog->insns, l) { - if (meta->skip) + if (meta->flags & FLAG_INSN_SKIP_MASK) continue; if (is_mbpf_load_pkt(meta) && !meta->ldst_gather_len) { @@ -4246,7 +4363,8 @@ static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog) u32 id; nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) { - if (meta1->skip || meta2->skip) + if (meta1->flags & FLAG_INSN_SKIP_MASK || + meta2->flags & FLAG_INSN_SKIP_MASK) continue; if (meta1->insn.code != (BPF_LD | BPF_IMM | BPF_DW) || @@ -4325,7 +4443,7 @@ int nfp_bpf_jit(struct nfp_prog *nfp_prog) return ret; } -void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt) +void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog) { struct nfp_insn_meta *meta; @@ -4336,7 +4454,7 @@ void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt) unsigned int dst_idx; bool pseudo_call; - if (BPF_CLASS(code) != BPF_JMP) + if (!is_mbpf_jmp(meta)) continue; if (BPF_OP(code) == BPF_EXIT) continue; @@ -4353,7 +4471,7 @@ void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt) else dst_idx = meta->n + 1 + meta->insn.off; - dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_idx, cnt); + dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_idx); if (pseudo_call) dst_meta->flags |= FLAG_INSN_IS_SUBPROG_START; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index dccae0319204..275de9f4c61c 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -465,7 +465,7 @@ static int nfp_bpf_init(struct nfp_app *app) app->ctrl_mtu = nfp_bpf_ctrl_cmsg_mtu(bpf); } - bpf->bpf_dev = bpf_offload_dev_create(&nfp_bpf_dev_ops); + bpf->bpf_dev = bpf_offload_dev_create(&nfp_bpf_dev_ops, bpf); err = PTR_ERR_OR_ZERO(bpf->bpf_dev); if (err) goto err_free_neutral_maps; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index 941277936475..b25a48218bcf 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -243,6 +243,16 @@ struct nfp_bpf_reg_state { #define FLAG_INSN_IS_JUMP_DST BIT(0) #define FLAG_INSN_IS_SUBPROG_START BIT(1) #define FLAG_INSN_PTR_CALLER_STACK_FRAME BIT(2) +/* Instruction is pointless, noop even on its own */ +#define FLAG_INSN_SKIP_NOOP BIT(3) +/* Instruction is optimized out based on preceding instructions */ +#define FLAG_INSN_SKIP_PREC_DEPENDENT BIT(4) +/* Instruction is optimized by the verifier */ +#define FLAG_INSN_SKIP_VERIFIER_OPT BIT(5) + +#define FLAG_INSN_SKIP_MASK (FLAG_INSN_SKIP_NOOP | \ + FLAG_INSN_SKIP_PREC_DEPENDENT | \ + FLAG_INSN_SKIP_VERIFIER_OPT) /** * struct nfp_insn_meta - BPF instruction wrapper @@ -271,7 +281,6 @@ struct nfp_bpf_reg_state { * @n: eBPF instruction number * @flags: eBPF instruction extra optimization flags * @subprog_idx: index of subprogram to which the instruction belongs - * @skip: skip this instruction (optimized out) * @double_cb: callback for second part of the instruction * @l: link on nfp_prog->insns list */ @@ -319,7 +328,6 @@ struct nfp_insn_meta { unsigned short n; unsigned short flags; unsigned short subprog_idx; - bool skip; instr_cb_t double_cb; struct list_head l; @@ -357,6 +365,21 @@ static inline bool is_mbpf_load(const struct nfp_insn_meta *meta) return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM); } +static inline bool is_mbpf_jmp32(const struct nfp_insn_meta *meta) +{ + return mbpf_class(meta) == BPF_JMP32; +} + +static inline bool is_mbpf_jmp64(const struct nfp_insn_meta *meta) +{ + return mbpf_class(meta) == BPF_JMP; +} + +static inline bool is_mbpf_jmp(const struct nfp_insn_meta *meta) +{ + return is_mbpf_jmp32(meta) || is_mbpf_jmp64(meta); +} + static inline bool is_mbpf_store(const struct nfp_insn_meta *meta) { return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM); @@ -407,6 +430,20 @@ static inline bool is_mbpf_div(const struct nfp_insn_meta *meta) return is_mbpf_alu(meta) && mbpf_op(meta) == BPF_DIV; } +static inline bool is_mbpf_cond_jump(const struct nfp_insn_meta *meta) +{ + u8 op; + + if (is_mbpf_jmp32(meta)) + return true; + + if (!is_mbpf_jmp64(meta)) + return false; + + op = mbpf_op(meta); + return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL; +} + static inline bool is_mbpf_helper_call(const struct nfp_insn_meta *meta) { struct bpf_insn insn = meta->insn; @@ -457,6 +494,7 @@ struct nfp_bpf_subprog_info { * @subprog_cnt: number of sub-programs, including main function * @map_records: the map record pointers from bpf->maps_neutral * @subprog: pointer to an array of objects holding info about sub-programs + * @n_insns: number of instructions on @insns list * @insns: list of BPF instruction wrappers (struct nfp_insn_meta) */ struct nfp_prog { @@ -489,6 +527,7 @@ struct nfp_prog { struct nfp_bpf_neutral_map **map_records; struct nfp_bpf_subprog_info *subprog; + unsigned int n_insns; struct list_head insns; }; @@ -505,7 +544,7 @@ struct nfp_bpf_vnic { }; bool nfp_is_subprog_start(struct nfp_insn_meta *meta); -void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt); +void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog); int nfp_bpf_jit(struct nfp_prog *prog); bool nfp_bpf_supported_opcode(u8 code); @@ -513,6 +552,10 @@ int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx); int nfp_bpf_finalize(struct bpf_verifier_env *env); +int nfp_bpf_opt_replace_insn(struct bpf_verifier_env *env, u32 off, + struct bpf_insn *insn); +int nfp_bpf_opt_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt); + extern const struct bpf_prog_offload_ops nfp_bpf_dev_ops; struct netdev_bpf; @@ -526,7 +569,7 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, struct nfp_insn_meta * nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, - unsigned int insn_idx, unsigned int n_insns); + unsigned int insn_idx); void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv); diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index f0283854fade..15dce97650a5 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -163,8 +163,9 @@ nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog, list_add_tail(&meta->l, &nfp_prog->insns); } + nfp_prog->n_insns = cnt; - nfp_bpf_jit_prepare(nfp_prog, cnt); + nfp_bpf_jit_prepare(nfp_prog); return 0; } @@ -184,8 +185,6 @@ static void nfp_prog_free(struct nfp_prog *nfp_prog) static int nfp_bpf_verifier_prep(struct bpf_prog *prog) { - struct nfp_net *nn = netdev_priv(prog->aux->offload->netdev); - struct nfp_app *app = nn->app; struct nfp_prog *nfp_prog; int ret; @@ -196,7 +195,7 @@ static int nfp_bpf_verifier_prep(struct bpf_prog *prog) INIT_LIST_HEAD(&nfp_prog->insns); nfp_prog->type = prog->type; - nfp_prog->bpf = app->priv; + nfp_prog->bpf = bpf_offload_dev_priv(prog->aux->offload->offdev); ret = nfp_prog_prepare(nfp_prog, prog->insnsi, prog->len); if (ret) @@ -219,6 +218,10 @@ static int nfp_bpf_translate(struct bpf_prog *prog) unsigned int max_instr; int err; + /* We depend on dead code elimination succeeding */ + if (prog->aux->offload->opt_failed) + return -EINVAL; + max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN); nfp_prog->__prog_alloc_len = max_instr * sizeof(u64); @@ -591,6 +594,8 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, const struct bpf_prog_offload_ops nfp_bpf_dev_ops = { .insn_hook = nfp_verify_insn, .finalize = nfp_bpf_finalize, + .replace_insn = nfp_bpf_opt_replace_insn, + .remove_insns = nfp_bpf_opt_remove_insns, .prepare = nfp_bpf_verifier_prep, .translate = nfp_bpf_translate, .destroy = nfp_bpf_destroy, diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c index 337bb862ec1d..36f56eb4cbe2 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c @@ -18,15 +18,15 @@ struct nfp_insn_meta * nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, - unsigned int insn_idx, unsigned int n_insns) + unsigned int insn_idx) { unsigned int forward, backward, i; backward = meta->n - insn_idx; forward = insn_idx - meta->n; - if (min(forward, backward) > n_insns - insn_idx - 1) { - backward = n_insns - insn_idx - 1; + if (min(forward, backward) > nfp_prog->n_insns - insn_idx - 1) { + backward = nfp_prog->n_insns - insn_idx - 1; meta = nfp_prog_last_meta(nfp_prog); } if (min(forward, backward) > insn_idx && backward > insn_idx) { @@ -629,7 +629,7 @@ int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv; struct nfp_insn_meta *meta = nfp_prog->verifier_meta; - meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx, env->prog->len); + meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx); nfp_prog->verifier_meta = meta; if (!nfp_bpf_supported_opcode(meta->insn.code)) { @@ -690,8 +690,7 @@ nfp_assign_subprog_idx_and_regs(struct bpf_verifier_env *env, return 0; } -static unsigned int -nfp_bpf_get_stack_usage(struct nfp_prog *nfp_prog, unsigned int cnt) +static unsigned int nfp_bpf_get_stack_usage(struct nfp_prog *nfp_prog) { struct nfp_insn_meta *meta = nfp_prog_first_meta(nfp_prog); unsigned int max_depth = 0, depth = 0, frame = 0; @@ -726,7 +725,7 @@ continue_subprog: /* Find the callee and start processing it. */ meta = nfp_bpf_goto_meta(nfp_prog, meta, - meta->n + 1 + meta->insn.imm, cnt); + meta->n + 1 + meta->insn.imm); idx = meta->subprog_idx; frame++; goto process_subprog; @@ -778,8 +777,7 @@ int nfp_bpf_finalize(struct bpf_verifier_env *env) nn = netdev_priv(env->prog->aux->offload->netdev); max_stack = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64; - nfp_prog->stack_size = nfp_bpf_get_stack_usage(nfp_prog, - env->prog->len); + nfp_prog->stack_size = nfp_bpf_get_stack_usage(nfp_prog); if (nfp_prog->stack_size > max_stack) { pr_vlog(env, "stack too large: program %dB > FW stack %dB\n", nfp_prog->stack_size, max_stack); @@ -788,3 +786,61 @@ int nfp_bpf_finalize(struct bpf_verifier_env *env) return 0; } + +int nfp_bpf_opt_replace_insn(struct bpf_verifier_env *env, u32 off, + struct bpf_insn *insn) +{ + struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv; + struct bpf_insn_aux_data *aux_data = env->insn_aux_data; + struct nfp_insn_meta *meta = nfp_prog->verifier_meta; + + meta = nfp_bpf_goto_meta(nfp_prog, meta, aux_data[off].orig_idx); + nfp_prog->verifier_meta = meta; + + /* conditional jump to jump conversion */ + if (is_mbpf_cond_jump(meta) && + insn->code == (BPF_JMP | BPF_JA | BPF_K)) { + unsigned int tgt_off; + + tgt_off = off + insn->off + 1; + + if (!insn->off) { + meta->jmp_dst = list_next_entry(meta, l); + meta->jump_neg_op = false; + } else if (meta->jmp_dst->n != aux_data[tgt_off].orig_idx) { + pr_vlog(env, "branch hard wire at %d changes target %d -> %d\n", + off, meta->jmp_dst->n, + aux_data[tgt_off].orig_idx); + return -EINVAL; + } + return 0; + } + + pr_vlog(env, "unsupported instruction replacement %hhx -> %hhx\n", + meta->insn.code, insn->code); + return -EINVAL; +} + +int nfp_bpf_opt_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt) +{ + struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv; + struct bpf_insn_aux_data *aux_data = env->insn_aux_data; + struct nfp_insn_meta *meta = nfp_prog->verifier_meta; + unsigned int i; + + meta = nfp_bpf_goto_meta(nfp_prog, meta, aux_data[off].orig_idx); + + for (i = 0; i < cnt; i++) { + if (WARN_ON_ONCE(&meta->l == &nfp_prog->insns)) + return -EINVAL; + + /* doesn't count if it already has the flag */ + if (meta->flags & FLAG_INSN_SKIP_VERIFIER_OPT) + i--; + + meta->flags |= FLAG_INSN_SKIP_VERIFIER_OPT; + meta = list_next_entry(meta, l); + } + + return 0; +} diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 8d54b36afee8..eeda4ed98333 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -3,7 +3,6 @@ #include #include -#include #include #include #include @@ -37,7 +36,7 @@ static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan) static void nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan, - const struct tc_action *action) + const struct flow_action_entry *act) { size_t act_size = sizeof(struct nfp_fl_push_vlan); u16 tmp_push_vlan_tci; @@ -45,17 +44,17 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan, push_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_VLAN; push_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ; push_vlan->reserved = 0; - push_vlan->vlan_tpid = tcf_vlan_push_proto(action); + push_vlan->vlan_tpid = act->vlan.proto; tmp_push_vlan_tci = - FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, tcf_vlan_push_prio(action)) | - FIELD_PREP(NFP_FL_PUSH_VLAN_VID, tcf_vlan_push_vid(action)) | + FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) | + FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid) | NFP_FL_PUSH_VLAN_CFI; push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci); } static int -nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action, +nfp_fl_pre_lag(struct nfp_app *app, const struct flow_action_entry *act, struct nfp_fl_payload *nfp_flow, int act_len) { size_t act_size = sizeof(struct nfp_fl_pre_lag); @@ -63,7 +62,7 @@ nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action, struct net_device *out_dev; int err; - out_dev = tcf_mirred_dev(action); + out_dev = act->dev; if (!out_dev || !netif_is_lag_master(out_dev)) return 0; @@ -92,7 +91,8 @@ nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action, static int nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output, - const struct tc_action *action, struct nfp_fl_payload *nfp_flow, + const struct flow_action_entry *act, + struct nfp_fl_payload *nfp_flow, bool last, struct net_device *in_dev, enum nfp_flower_tun_type tun_type, int *tun_out_cnt) { @@ -104,7 +104,7 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output, output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT; output->head.len_lw = act_size >> NFP_FL_LW_SIZ; - out_dev = tcf_mirred_dev(action); + out_dev = act->dev; if (!out_dev) return -EOPNOTSUPP; @@ -137,7 +137,7 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output, if (nfp_netdev_is_nfp_repr(in_dev)) { /* Confirm ingress and egress are on same device. */ - if (!switchdev_port_same_parent_id(in_dev, out_dev)) + if (!netdev_port_same_parent_id(in_dev, out_dev)) return -EOPNOTSUPP; } @@ -155,9 +155,9 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output, static enum nfp_flower_tun_type nfp_fl_get_tun_from_act_l4_port(struct nfp_app *app, - const struct tc_action *action) + const struct flow_action_entry *act) { - struct ip_tunnel_info *tun = tcf_tunnel_info(action); + const struct ip_tunnel_info *tun = act->tunnel; struct nfp_flower_priv *priv = app->priv; switch (tun->key.tp_dst) { @@ -195,9 +195,9 @@ static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len) static int nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len, - const struct tc_action *action) + const struct flow_action_entry *act) { - struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action); + struct ip_tunnel_info *ip_tun = (struct ip_tunnel_info *)act->tunnel; int opt_len, opt_cnt, act_start, tot_push_len; u8 *src = ip_tunnel_info_opts(ip_tun); @@ -259,13 +259,13 @@ nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len, static int nfp_fl_set_ipv4_udp_tun(struct nfp_app *app, struct nfp_fl_set_ipv4_udp_tun *set_tun, - const struct tc_action *action, + const struct flow_action_entry *act, struct nfp_fl_pre_tunnel *pre_tun, enum nfp_flower_tun_type tun_type, struct net_device *netdev) { size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun); - struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action); + const struct ip_tunnel_info *ip_tun = act->tunnel; struct nfp_flower_priv *priv = app->priv; u32 tmp_set_ip_tun_type_index = 0; /* Currently support one pre-tunnel so index is always 0. */ @@ -345,7 +345,7 @@ static void nfp_fl_set_helper32(u32 value, u32 mask, u8 *p_exact, u8 *p_mask) } static int -nfp_fl_set_eth(const struct tc_action *action, int idx, u32 off, +nfp_fl_set_eth(const struct flow_action_entry *act, u32 off, struct nfp_fl_set_eth *set_eth) { u32 exact, mask; @@ -353,8 +353,8 @@ nfp_fl_set_eth(const struct tc_action *action, int idx, u32 off, if (off + 4 > ETH_ALEN * 2) return -EOPNOTSUPP; - mask = ~tcf_pedit_mask(action, idx); - exact = tcf_pedit_val(action, idx); + mask = ~act->mangle.mask; + exact = act->mangle.val; if (exact & ~mask) return -EOPNOTSUPP; @@ -376,7 +376,7 @@ struct ipv4_ttl_word { }; static int -nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off, +nfp_fl_set_ip4(const struct flow_action_entry *act, u32 off, struct nfp_fl_set_ip4_addrs *set_ip_addr, struct nfp_fl_set_ip4_ttl_tos *set_ip_ttl_tos) { @@ -387,8 +387,8 @@ nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off, __be32 exact, mask; /* We are expecting tcf_pedit to return a big endian value */ - mask = (__force __be32)~tcf_pedit_mask(action, idx); - exact = (__force __be32)tcf_pedit_val(action, idx); + mask = (__force __be32)~act->mangle.mask; + exact = (__force __be32)act->mangle.val; if (exact & ~mask) return -EOPNOTSUPP; @@ -505,7 +505,7 @@ nfp_fl_set_ip6_hop_limit_flow_label(u32 off, __be32 exact, __be32 mask, } static int -nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off, +nfp_fl_set_ip6(const struct flow_action_entry *act, u32 off, struct nfp_fl_set_ipv6_addr *ip_dst, struct nfp_fl_set_ipv6_addr *ip_src, struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl) @@ -515,8 +515,8 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off, u8 word; /* We are expecting tcf_pedit to return a big endian value */ - mask = (__force __be32)~tcf_pedit_mask(action, idx); - exact = (__force __be32)tcf_pedit_val(action, idx); + mask = (__force __be32)~act->mangle.mask; + exact = (__force __be32)act->mangle.val; if (exact & ~mask) return -EOPNOTSUPP; @@ -541,7 +541,7 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off, } static int -nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off, +nfp_fl_set_tport(const struct flow_action_entry *act, u32 off, struct nfp_fl_set_tport *set_tport, int opcode) { u32 exact, mask; @@ -549,8 +549,8 @@ nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off, if (off) return -EOPNOTSUPP; - mask = ~tcf_pedit_mask(action, idx); - exact = tcf_pedit_val(action, idx); + mask = ~act->mangle.mask; + exact = act->mangle.val; if (exact & ~mask) return -EOPNOTSUPP; @@ -584,20 +584,22 @@ static u32 nfp_fl_csum_l4_to_flag(u8 ip_proto) } static int -nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow, +nfp_fl_pedit(const struct flow_action_entry *act, + struct tc_cls_flower_offload *flow, char *nfp_action, int *a_len, u32 *csum_updated) { + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src; struct nfp_fl_set_ipv6_tc_hl_fl set_ip6_tc_hl_fl; struct nfp_fl_set_ip4_ttl_tos set_ip_ttl_tos; struct nfp_fl_set_ip4_addrs set_ip_addr; + enum flow_action_mangle_base htype; struct nfp_fl_set_tport set_tport; struct nfp_fl_set_eth set_eth; - enum pedit_header_type htype; - int idx, nkeys, err; size_t act_size = 0; - u32 offset, cmd; u8 ip_proto = 0; + u32 offset; + int err; memset(&set_ip6_tc_hl_fl, 0, sizeof(set_ip6_tc_hl_fl)); memset(&set_ip_ttl_tos, 0, sizeof(set_ip_ttl_tos)); @@ -606,50 +608,41 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow, memset(&set_ip_addr, 0, sizeof(set_ip_addr)); memset(&set_tport, 0, sizeof(set_tport)); memset(&set_eth, 0, sizeof(set_eth)); - nkeys = tcf_pedit_nkeys(action); - - for (idx = 0; idx < nkeys; idx++) { - cmd = tcf_pedit_cmd(action, idx); - htype = tcf_pedit_htype(action, idx); - offset = tcf_pedit_offset(action, idx); - if (cmd != TCA_PEDIT_KEY_EX_CMD_SET) - return -EOPNOTSUPP; + htype = act->mangle.htype; + offset = act->mangle.offset; - switch (htype) { - case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH: - err = nfp_fl_set_eth(action, idx, offset, &set_eth); - break; - case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4: - err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr, - &set_ip_ttl_tos); - break; - case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6: - err = nfp_fl_set_ip6(action, idx, offset, &set_ip6_dst, - &set_ip6_src, &set_ip6_tc_hl_fl); - break; - case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP: - err = nfp_fl_set_tport(action, idx, offset, &set_tport, - NFP_FL_ACTION_OPCODE_SET_TCP); - break; - case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP: - err = nfp_fl_set_tport(action, idx, offset, &set_tport, - NFP_FL_ACTION_OPCODE_SET_UDP); - break; - default: - return -EOPNOTSUPP; - } - if (err) - return err; + switch (htype) { + case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH: + err = nfp_fl_set_eth(act, offset, &set_eth); + break; + case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4: + err = nfp_fl_set_ip4(act, offset, &set_ip_addr, + &set_ip_ttl_tos); + break; + case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6: + err = nfp_fl_set_ip6(act, offset, &set_ip6_dst, + &set_ip6_src, &set_ip6_tc_hl_fl); + break; + case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP: + err = nfp_fl_set_tport(act, offset, &set_tport, + NFP_FL_ACTION_OPCODE_SET_TCP); + break; + case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP: + err = nfp_fl_set_tport(act, offset, &set_tport, + NFP_FL_ACTION_OPCODE_SET_UDP); + break; + default: + return -EOPNOTSUPP; } + if (err) + return err; - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) { - struct flow_dissector_key_basic *basic; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; - basic = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_BASIC, - flow->key); - ip_proto = basic->ip_proto; + flow_rule_match_basic(rule, &match); + ip_proto = match.key->ip_proto; } if (set_eth.head.len_lw) { @@ -733,7 +726,7 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow, } static int -nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a, +nfp_flower_output_action(struct nfp_app *app, const struct flow_action_entry *act, struct nfp_fl_payload *nfp_fl, int *a_len, struct net_device *netdev, bool last, enum nfp_flower_tun_type *tun_type, int *tun_out_cnt, @@ -753,7 +746,7 @@ nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a, return -EOPNOTSUPP; output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len]; - err = nfp_fl_output(app, output, a, nfp_fl, last, netdev, *tun_type, + err = nfp_fl_output(app, output, act, nfp_fl, last, netdev, *tun_type, tun_out_cnt); if (err) return err; @@ -764,7 +757,7 @@ nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a, /* nfp_fl_pre_lag returns -err or size of prelag action added. * This will be 0 if it is not egressing to a lag dev. */ - prelag_size = nfp_fl_pre_lag(app, a, nfp_fl, *a_len); + prelag_size = nfp_fl_pre_lag(app, act, nfp_fl, *a_len); if (prelag_size < 0) return prelag_size; else if (prelag_size > 0 && (!last || *out_cnt)) @@ -778,7 +771,7 @@ nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a, } static int -nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a, +nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act, struct tc_cls_flower_offload *flow, struct nfp_fl_payload *nfp_fl, int *a_len, struct net_device *netdev, @@ -791,23 +784,25 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a, struct nfp_fl_pop_vlan *pop_v; int err; - if (is_tcf_gact_shot(a)) { + switch (act->id) { + case FLOW_ACTION_DROP: nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP); - } else if (is_tcf_mirred_egress_redirect(a)) { - err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev, + break; + case FLOW_ACTION_REDIRECT: + err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev, true, tun_type, tun_out_cnt, out_cnt, csum_updated); if (err) return err; - - } else if (is_tcf_mirred_egress_mirror(a)) { - err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev, + break; + case FLOW_ACTION_MIRRED: + err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev, false, tun_type, tun_out_cnt, out_cnt, csum_updated); if (err) return err; - - } else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_POP) { + break; + case FLOW_ACTION_VLAN_POP: if (*a_len + sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ) return -EOPNOTSUPP; @@ -816,19 +811,21 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a, nfp_fl_pop_vlan(pop_v); *a_len += sizeof(struct nfp_fl_pop_vlan); - } else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) { + break; + case FLOW_ACTION_VLAN_PUSH: if (*a_len + sizeof(struct nfp_fl_push_vlan) > NFP_FL_MAX_A_SIZ) return -EOPNOTSUPP; psh_v = (struct nfp_fl_push_vlan *)&nfp_fl->action_data[*a_len]; nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); - nfp_fl_push_vlan(psh_v, a); + nfp_fl_push_vlan(psh_v, act); *a_len += sizeof(struct nfp_fl_push_vlan); - } else if (is_tcf_tunnel_set(a)) { - struct ip_tunnel_info *ip_tun = tcf_tunnel_info(a); + break; + case FLOW_ACTION_TUNNEL_ENCAP: { + const struct ip_tunnel_info *ip_tun = act->tunnel; - *tun_type = nfp_fl_get_tun_from_act_l4_port(app, a); + *tun_type = nfp_fl_get_tun_from_act_l4_port(app, act); if (*tun_type == NFP_FL_TUNNEL_NONE) return -EOPNOTSUPP; @@ -847,32 +844,36 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a, nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); *a_len += sizeof(struct nfp_fl_pre_tunnel); - err = nfp_fl_push_geneve_options(nfp_fl, a_len, a); + err = nfp_fl_push_geneve_options(nfp_fl, a_len, act); if (err) return err; set_tun = (void *)&nfp_fl->action_data[*a_len]; - err = nfp_fl_set_ipv4_udp_tun(app, set_tun, a, pre_tun, + err = nfp_fl_set_ipv4_udp_tun(app, set_tun, act, pre_tun, *tun_type, netdev); if (err) return err; *a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun); - } else if (is_tcf_tunnel_release(a)) { + } + break; + case FLOW_ACTION_TUNNEL_DECAP: /* Tunnel decap is handled by default so accept action. */ return 0; - } else if (is_tcf_pedit(a)) { - if (nfp_fl_pedit(a, flow, &nfp_fl->action_data[*a_len], + case FLOW_ACTION_MANGLE: + if (nfp_fl_pedit(act, flow, &nfp_fl->action_data[*a_len], a_len, csum_updated)) return -EOPNOTSUPP; - } else if (is_tcf_csum(a)) { + break; + case FLOW_ACTION_CSUM: /* csum action requests recalc of something we have not fixed */ - if (tcf_csum_update_flags(a) & ~*csum_updated) + if (act->csum_flags & ~*csum_updated) return -EOPNOTSUPP; /* If we will correctly fix the csum we can remove it from the * csum update list. Which will later be used to check support. */ - *csum_updated &= ~tcf_csum_update_flags(a); - } else { + *csum_updated &= ~act->csum_flags; + break; + default: /* Currently we do not handle any other actions. */ return -EOPNOTSUPP; } @@ -887,7 +888,7 @@ int nfp_flower_compile_action(struct nfp_app *app, { int act_len, act_cnt, err, tun_out_cnt, out_cnt, i; enum nfp_flower_tun_type tun_type; - const struct tc_action *a; + struct flow_action_entry *act; u32 csum_updated = 0; memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ); @@ -898,8 +899,8 @@ int nfp_flower_compile_action(struct nfp_app *app, tun_out_cnt = 0; out_cnt = 0; - tcf_exts_for_each_action(i, a, flow->exts) { - err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len, + flow_action_for_each(i, act, &flow->rule->action) { + err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len, netdev, &tun_type, &tun_out_cnt, &out_cnt, &csum_updated); if (err) diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c index 4c5eaf36d5bb..cf9e1118ee8f 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c @@ -45,11 +45,9 @@ nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports) { struct nfp_flower_cmsg_mac_repr *msg; struct sk_buff *skb; - unsigned int size; - size = sizeof(*msg) + num_ports * sizeof(msg->ports[0]); - skb = nfp_flower_cmsg_alloc(app, size, NFP_FLOWER_CMSG_TYPE_MAC_REPR, - GFP_KERNEL); + skb = nfp_flower_cmsg_alloc(app, struct_size(msg, ports, num_ports), + NFP_FLOWER_CMSG_TYPE_MAC_REPR, GFP_KERNEL); if (!skb) return NULL; @@ -203,7 +201,7 @@ nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb) } atomic_inc(&priv->reify_replies); - wake_up_interruptible(&priv->reify_wait_queue); + wake_up(&priv->reify_wait_queue); } static void diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 15f41cfef9f1..4fcaf11ed56e 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -97,6 +97,9 @@ #define NFP_FLOWER_WORKQ_MAX_SKBS 30000 +/* Cmesg reply (empirical) timeout*/ +#define NFP_FL_REPLY_TIMEOUT msecs_to_jiffies(40) + #define nfp_flower_cmsg_warn(app, fmt, args...) \ do { \ if (net_ratelimit()) \ diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index 5059110a1768..408089133599 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -32,6 +32,71 @@ static enum devlink_eswitch_mode eswitch_mode_get(struct nfp_app *app) return DEVLINK_ESWITCH_MODE_SWITCHDEV; } +static struct nfp_flower_non_repr_priv * +nfp_flower_non_repr_priv_lookup(struct nfp_app *app, struct net_device *netdev) +{ + struct nfp_flower_priv *priv = app->priv; + struct nfp_flower_non_repr_priv *entry; + + ASSERT_RTNL(); + + list_for_each_entry(entry, &priv->non_repr_priv, list) + if (entry->netdev == netdev) + return entry; + + return NULL; +} + +void +__nfp_flower_non_repr_priv_get(struct nfp_flower_non_repr_priv *non_repr_priv) +{ + non_repr_priv->ref_count++; +} + +struct nfp_flower_non_repr_priv * +nfp_flower_non_repr_priv_get(struct nfp_app *app, struct net_device *netdev) +{ + struct nfp_flower_priv *priv = app->priv; + struct nfp_flower_non_repr_priv *entry; + + entry = nfp_flower_non_repr_priv_lookup(app, netdev); + if (entry) + goto inc_ref; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return NULL; + + entry->netdev = netdev; + list_add(&entry->list, &priv->non_repr_priv); + +inc_ref: + __nfp_flower_non_repr_priv_get(entry); + return entry; +} + +void +__nfp_flower_non_repr_priv_put(struct nfp_flower_non_repr_priv *non_repr_priv) +{ + if (--non_repr_priv->ref_count) + return; + + list_del(&non_repr_priv->list); + kfree(non_repr_priv); +} + +void +nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev) +{ + struct nfp_flower_non_repr_priv *entry; + + entry = nfp_flower_non_repr_priv_lookup(app, netdev); + if (!entry) + return; + + __nfp_flower_non_repr_priv_put(entry); +} + static enum nfp_repr_type nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port) { @@ -107,16 +172,14 @@ static int nfp_flower_wait_repr_reify(struct nfp_app *app, atomic_t *replies, int tot_repl) { struct nfp_flower_priv *priv = app->priv; - int err; if (!tot_repl) return 0; lockdep_assert_held(&app->pf->lock); - err = wait_event_interruptible_timeout(priv->reify_wait_queue, - atomic_read(replies) >= tot_repl, - msecs_to_jiffies(10)); - if (err <= 0) { + if (!wait_event_timeout(priv->reify_wait_queue, + atomic_read(replies) >= tot_repl, + NFP_FL_REPLY_TIMEOUT)) { nfp_warn(app->cpp, "Not all reprs responded to reify\n"); return -EIO; } @@ -223,6 +286,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, nfp_repr = netdev_priv(repr); nfp_repr->app_priv = repr_priv; + repr_priv->nfp_repr = nfp_repr; /* For now we only support 1 PF */ WARN_ON(repr_type == NFP_REPR_TYPE_PF && i); @@ -337,6 +401,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) nfp_repr = netdev_priv(repr); nfp_repr->app_priv = repr_priv; + repr_priv->nfp_repr = nfp_repr; port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr); if (IS_ERR(port)) { @@ -476,8 +541,8 @@ err_clear_nn: static int nfp_flower_init(struct nfp_app *app) { + u64 version, features, ctx_count, num_mems; const struct nfp_pf *pf = app->pf; - u64 version, features, ctx_count; struct nfp_flower_priv *app_priv; int err; @@ -502,6 +567,23 @@ static int nfp_flower_init(struct nfp_app *app) return err; } + num_mems = nfp_rtsym_read_le(app->pf->rtbl, "CONFIG_FC_HOST_CTX_SPLIT", + &err); + if (err) { + nfp_warn(app->cpp, + "FlowerNIC: unsupported host context memory: %d\n", + err); + err = 0; + num_mems = 1; + } + + if (!FIELD_FIT(NFP_FL_STAT_ID_MU_NUM, num_mems) || !num_mems) { + nfp_warn(app->cpp, + "FlowerNIC: invalid host context memory: %llu\n", + num_mems); + return -EINVAL; + } + ctx_count = nfp_rtsym_read_le(app->pf->rtbl, "CONFIG_FC_HOST_CTX_COUNT", &err); if (err) { @@ -522,6 +604,8 @@ static int nfp_flower_init(struct nfp_app *app) if (!app_priv) return -ENOMEM; + app_priv->total_mem_units = num_mems; + app_priv->active_mem_unit = 0; app_priv->stats_ring_size = roundup_pow_of_two(ctx_count); app->priv = app_priv; app_priv->app = app; @@ -533,7 +617,7 @@ static int nfp_flower_init(struct nfp_app *app) init_waitqueue_head(&app_priv->mtu_conf.wait_q); spin_lock_init(&app_priv->mtu_conf.lock); - err = nfp_flower_metadata_init(app, ctx_count); + err = nfp_flower_metadata_init(app, ctx_count, num_mems); if (err) goto err_free_app_priv; @@ -558,6 +642,7 @@ static int nfp_flower_init(struct nfp_app *app) } INIT_LIST_HEAD(&app_priv->indr_block_cb_priv); + INIT_LIST_HEAD(&app_priv->non_repr_priv); return 0; @@ -601,7 +686,7 @@ nfp_flower_repr_change_mtu(struct nfp_app *app, struct net_device *netdev, { struct nfp_flower_priv *app_priv = app->priv; struct nfp_repr *repr = netdev_priv(netdev); - int err, ack; + int err; /* Only need to config FW for physical port MTU change. */ if (repr->port->type != NFP_PORT_PHYS_PORT) @@ -628,11 +713,9 @@ nfp_flower_repr_change_mtu(struct nfp_app *app, struct net_device *netdev, } /* Wait for fw to ack the change. */ - ack = wait_event_timeout(app_priv->mtu_conf.wait_q, - nfp_flower_check_ack(app_priv), - msecs_to_jiffies(10)); - - if (!ack) { + if (!wait_event_timeout(app_priv->mtu_conf.wait_q, + nfp_flower_check_ack(app_priv), + NFP_FL_REPLY_TIMEOUT)) { spin_lock_bh(&app_priv->mtu_conf.lock); app_priv->mtu_conf.requested_val = 0; spin_unlock_bh(&app_priv->mtu_conf.lock); diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index b858bac47621..c0945a5fd1a4 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -20,6 +20,9 @@ struct nfp_fl_pre_lag; struct net_device; struct nfp_app; +#define NFP_FL_STAT_ID_MU_NUM GENMASK(31, 22) +#define NFP_FL_STAT_ID_STAT GENMASK(21, 0) + #define NFP_FL_STATS_ELEM_RS FIELD_SIZEOF(struct nfp_fl_stats_id, \ init_unalloc) #define NFP_FLOWER_MASK_ENTRY_RS 256 @@ -53,6 +56,26 @@ struct nfp_fl_stats_id { u8 repeated_em_count; }; +/** + * struct nfp_fl_tunnel_offloads - priv data for tunnel offloads + * @offloaded_macs: Hashtable of the offloaded MAC addresses + * @ipv4_off_list: List of IPv4 addresses to offload + * @neigh_off_list: List of neighbour offloads + * @ipv4_off_lock: Lock for the IPv4 address list + * @neigh_off_lock: Lock for the neighbour address list + * @mac_off_ids: IDA to manage id assignment for offloaded MACs + * @neigh_nb: Notifier to monitor neighbour state + */ +struct nfp_fl_tunnel_offloads { + struct rhashtable offloaded_macs; + struct list_head ipv4_off_list; + struct list_head neigh_off_list; + struct mutex ipv4_off_lock; + spinlock_t neigh_off_lock; + struct ida mac_off_ids; + struct notifier_block neigh_nb; +}; + /** * struct nfp_mtu_conf - manage MTU setting * @portnum: NFP port number of repr with requested MTU change @@ -113,23 +136,16 @@ struct nfp_fl_lag { * processing * @cmsg_skbs_low: List of lower priority skbs for control message * processing - * @nfp_mac_off_list: List of MAC addresses to offload - * @nfp_mac_index_list: List of unique 8-bit indexes for non NFP netdevs - * @nfp_ipv4_off_list: List of IPv4 addresses to offload - * @nfp_neigh_off_list: List of neighbour offloads - * @nfp_mac_off_lock: Lock for the MAC address list - * @nfp_mac_index_lock: Lock for the MAC index list - * @nfp_ipv4_off_lock: Lock for the IPv4 address list - * @nfp_neigh_off_lock: Lock for the neighbour address list - * @nfp_mac_off_ids: IDA to manage id assignment for offloaded macs - * @nfp_mac_off_count: Number of MACs in address list - * @nfp_tun_neigh_nb: Notifier to monitor neighbour state + * @tun: Tunnel offload data * @reify_replies: atomically stores the number of replies received * from firmware for repr reify * @reify_wait_queue: wait queue for repr reify response counting * @mtu_conf: Configuration of repr MTU value * @nfp_lag: Link aggregation data block * @indr_block_cb_priv: List of priv data passed to indirect block cbs + * @non_repr_priv: List of offloaded non-repr ports and their priv data + * @active_mem_unit: Current active memory unit for flower rules + * @total_mem_units: Total number of available memory units for flower rules */ struct nfp_flower_priv { struct nfp_app *app; @@ -147,30 +163,47 @@ struct nfp_flower_priv { struct work_struct cmsg_work; struct sk_buff_head cmsg_skbs_high; struct sk_buff_head cmsg_skbs_low; - struct list_head nfp_mac_off_list; - struct list_head nfp_mac_index_list; - struct list_head nfp_ipv4_off_list; - struct list_head nfp_neigh_off_list; - struct mutex nfp_mac_off_lock; - struct mutex nfp_mac_index_lock; - struct mutex nfp_ipv4_off_lock; - spinlock_t nfp_neigh_off_lock; - struct ida nfp_mac_off_ids; - int nfp_mac_off_count; - struct notifier_block nfp_tun_neigh_nb; + struct nfp_fl_tunnel_offloads tun; atomic_t reify_replies; wait_queue_head_t reify_wait_queue; struct nfp_mtu_conf mtu_conf; struct nfp_fl_lag nfp_lag; struct list_head indr_block_cb_priv; + struct list_head non_repr_priv; + unsigned int active_mem_unit; + unsigned int total_mem_units; }; /** * struct nfp_flower_repr_priv - Flower APP per-repr priv data + * @nfp_repr: Back pointer to nfp_repr * @lag_port_flags: Extended port flags to record lag state of repr + * @mac_offloaded: Flag indicating a MAC address is offloaded for repr + * @offloaded_mac_addr: MAC address that has been offloaded for repr + * @mac_list: List entry of reprs that share the same offloaded MAC */ struct nfp_flower_repr_priv { + struct nfp_repr *nfp_repr; unsigned long lag_port_flags; + bool mac_offloaded; + u8 offloaded_mac_addr[ETH_ALEN]; + struct list_head mac_list; +}; + +/** + * struct nfp_flower_non_repr_priv - Priv data for non-repr offloaded ports + * @list: List entry of offloaded reprs + * @netdev: Pointer to non-repr net_device + * @ref_count: Number of references held for this priv data + * @mac_offloaded: Flag indicating a MAC address is offloaded for device + * @offloaded_mac_addr: MAC address that has been offloaded for dev + */ +struct nfp_flower_non_repr_priv { + struct list_head list; + struct net_device *netdev; + int ref_count; + bool mac_offloaded; + u8 offloaded_mac_addr[ETH_ALEN]; }; struct nfp_fl_key_ls { @@ -217,7 +250,8 @@ struct nfp_fl_stats_frame { __be64 stats_cookie; }; -int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count); +int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, + unsigned int host_ctx_split); void nfp_flower_metadata_cleanup(struct nfp_app *app); int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, @@ -252,7 +286,6 @@ void nfp_tunnel_config_stop(struct nfp_app *app); int nfp_tunnel_mac_event_handler(struct nfp_app *app, struct net_device *netdev, unsigned long event, void *ptr); -void nfp_tunnel_write_macs(struct nfp_app *app); void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4); void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4); void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb); @@ -273,4 +306,12 @@ int nfp_flower_reg_indir_block_handler(struct nfp_app *app, struct net_device *netdev, unsigned long event); +void +__nfp_flower_non_repr_priv_get(struct nfp_flower_non_repr_priv *non_repr_priv); +struct nfp_flower_non_repr_priv * +nfp_flower_non_repr_priv_get(struct nfp_app *app, struct net_device *netdev); +void +__nfp_flower_non_repr_priv_put(struct nfp_flower_non_repr_priv *non_repr_priv); +void +nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev); #endif diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index cdf75595f627..e03c8ef2c28c 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c @@ -8,31 +8,41 @@ #include "main.h" static void -nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *frame, - struct tc_cls_flower_offload *flow, u8 key_type, - bool mask_version) +nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext, + struct nfp_flower_meta_tci *msk, + struct tc_cls_flower_offload *flow, u8 key_type) { - struct fl_flow_key *target = mask_version ? flow->mask : flow->key; - struct flow_dissector_key_vlan *flow_vlan; + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); u16 tmp_tci; - memset(frame, 0, sizeof(struct nfp_flower_meta_tci)); + memset(ext, 0, sizeof(struct nfp_flower_meta_tci)); + memset(msk, 0, sizeof(struct nfp_flower_meta_tci)); + /* Populate the metadata frame. */ - frame->nfp_flow_key_layer = key_type; - frame->mask_id = ~0; + ext->nfp_flow_key_layer = key_type; + ext->mask_id = ~0; + + msk->nfp_flow_key_layer = key_type; + msk->mask_id = ~0; - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) { - flow_vlan = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_VLAN, - target); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + + flow_rule_match_vlan(rule, &match); /* Populate the tci field. */ - if (flow_vlan->vlan_id || flow_vlan->vlan_priority) { + if (match.key->vlan_id || match.key->vlan_priority) { tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, - flow_vlan->vlan_priority) | + match.key->vlan_priority) | FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, - flow_vlan->vlan_id) | + match.key->vlan_id) | NFP_FLOWER_MASK_VLAN_CFI; - frame->tci = cpu_to_be16(tmp_tci); + ext->tci = cpu_to_be16(tmp_tci); + tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, + match.mask->vlan_priority) | + FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, + match.mask->vlan_id) | + NFP_FLOWER_MASK_VLAN_CFI; + msk->tci = cpu_to_be16(tmp_tci); } } } @@ -64,231 +74,249 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port, } static void -nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame, - struct tc_cls_flower_offload *flow, - bool mask_version) +nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext, + struct nfp_flower_mac_mpls *msk, + struct tc_cls_flower_offload *flow) { - struct fl_flow_key *target = mask_version ? flow->mask : flow->key; - struct flow_dissector_key_eth_addrs *addr; + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); + + memset(ext, 0, sizeof(struct nfp_flower_mac_mpls)); + memset(msk, 0, sizeof(struct nfp_flower_mac_mpls)); - memset(frame, 0, sizeof(struct nfp_flower_mac_mpls)); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { - addr = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_ETH_ADDRS, - target); + flow_rule_match_eth_addrs(rule, &match); /* Populate mac frame. */ - ether_addr_copy(frame->mac_dst, &addr->dst[0]); - ether_addr_copy(frame->mac_src, &addr->src[0]); + ether_addr_copy(ext->mac_dst, &match.key->dst[0]); + ether_addr_copy(ext->mac_src, &match.key->src[0]); + ether_addr_copy(msk->mac_dst, &match.mask->dst[0]); + ether_addr_copy(msk->mac_src, &match.mask->src[0]); } - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) { - struct flow_dissector_key_mpls *mpls; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) { + struct flow_match_mpls match; u32 t_mpls; - mpls = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_MPLS, - target); - - t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, mpls->mpls_label) | - FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, mpls->mpls_tc) | - FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, mpls->mpls_bos) | + flow_rule_match_mpls(rule, &match); + t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, match.key->mpls_label) | + FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, match.key->mpls_tc) | + FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, match.key->mpls_bos) | NFP_FLOWER_MASK_MPLS_Q; - - frame->mpls_lse = cpu_to_be32(t_mpls); - } else if (dissector_uses_key(flow->dissector, - FLOW_DISSECTOR_KEY_BASIC)) { + ext->mpls_lse = cpu_to_be32(t_mpls); + t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, match.mask->mpls_label) | + FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, match.mask->mpls_tc) | + FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, match.mask->mpls_bos) | + NFP_FLOWER_MASK_MPLS_Q; + msk->mpls_lse = cpu_to_be32(t_mpls); + } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { /* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q * bit, which indicates an mpls ether type but without any * mpls fields. */ - struct flow_dissector_key_basic *key_basic; - - key_basic = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_BASIC, - flow->key); - if (key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_UC) || - key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_MC)) - frame->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q); + struct flow_match_basic match; + + flow_rule_match_basic(rule, &match); + if (match.key->n_proto == cpu_to_be16(ETH_P_MPLS_UC) || + match.key->n_proto == cpu_to_be16(ETH_P_MPLS_MC)) { + ext->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q); + msk->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q); + } } } static void -nfp_flower_compile_tport(struct nfp_flower_tp_ports *frame, - struct tc_cls_flower_offload *flow, - bool mask_version) +nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext, + struct nfp_flower_tp_ports *msk, + struct tc_cls_flower_offload *flow) { - struct fl_flow_key *target = mask_version ? flow->mask : flow->key; - struct flow_dissector_key_ports *tp; + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); + + memset(ext, 0, sizeof(struct nfp_flower_tp_ports)); + memset(msk, 0, sizeof(struct nfp_flower_tp_ports)); - memset(frame, 0, sizeof(struct nfp_flower_tp_ports)); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports match; - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_PORTS)) { - tp = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_PORTS, - target); - frame->port_src = tp->src; - frame->port_dst = tp->dst; + flow_rule_match_ports(rule, &match); + ext->port_src = match.key->src; + ext->port_dst = match.key->dst; + msk->port_src = match.mask->src; + msk->port_dst = match.mask->dst; } } static void -nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *frame, - struct tc_cls_flower_offload *flow, - bool mask_version) +nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext, + struct nfp_flower_ip_ext *msk, + struct tc_cls_flower_offload *flow) { - struct fl_flow_key *target = mask_version ? flow->mask : flow->key; + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) { - struct flow_dissector_key_basic *basic; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; - basic = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_BASIC, - target); - frame->proto = basic->ip_proto; + flow_rule_match_basic(rule, &match); + ext->proto = match.key->ip_proto; + msk->proto = match.mask->ip_proto; } - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP)) { - struct flow_dissector_key_ip *flow_ip; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { + struct flow_match_ip match; - flow_ip = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_IP, - target); - frame->tos = flow_ip->tos; - frame->ttl = flow_ip->ttl; + flow_rule_match_ip(rule, &match); + ext->tos = match.key->tos; + ext->ttl = match.key->ttl; + msk->tos = match.mask->tos; + msk->ttl = match.mask->ttl; } - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_TCP)) { - struct flow_dissector_key_tcp *tcp; - u32 tcp_flags; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) { + u16 tcp_flags, tcp_flags_mask; + struct flow_match_tcp match; - tcp = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_TCP, target); - tcp_flags = be16_to_cpu(tcp->flags); + flow_rule_match_tcp(rule, &match); + tcp_flags = be16_to_cpu(match.key->flags); + tcp_flags_mask = be16_to_cpu(match.mask->flags); if (tcp_flags & TCPHDR_FIN) - frame->flags |= NFP_FL_TCP_FLAG_FIN; + ext->flags |= NFP_FL_TCP_FLAG_FIN; + if (tcp_flags_mask & TCPHDR_FIN) + msk->flags |= NFP_FL_TCP_FLAG_FIN; + if (tcp_flags & TCPHDR_SYN) - frame->flags |= NFP_FL_TCP_FLAG_SYN; + ext->flags |= NFP_FL_TCP_FLAG_SYN; + if (tcp_flags_mask & TCPHDR_SYN) + msk->flags |= NFP_FL_TCP_FLAG_SYN; + if (tcp_flags & TCPHDR_RST) - frame->flags |= NFP_FL_TCP_FLAG_RST; + ext->flags |= NFP_FL_TCP_FLAG_RST; + if (tcp_flags_mask & TCPHDR_RST) + msk->flags |= NFP_FL_TCP_FLAG_RST; + if (tcp_flags & TCPHDR_PSH) - frame->flags |= NFP_FL_TCP_FLAG_PSH; + ext->flags |= NFP_FL_TCP_FLAG_PSH; + if (tcp_flags_mask & TCPHDR_PSH) + msk->flags |= NFP_FL_TCP_FLAG_PSH; + if (tcp_flags & TCPHDR_URG) - frame->flags |= NFP_FL_TCP_FLAG_URG; + ext->flags |= NFP_FL_TCP_FLAG_URG; + if (tcp_flags_mask & TCPHDR_URG) + msk->flags |= NFP_FL_TCP_FLAG_URG; } - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { - struct flow_dissector_key_control *key; - - key = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_CONTROL, - target); - if (key->flags & FLOW_DIS_IS_FRAGMENT) - frame->flags |= NFP_FL_IP_FRAGMENTED; - if (key->flags & FLOW_DIS_FIRST_FRAG) - frame->flags |= NFP_FL_IP_FRAG_FIRST; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_match_control match; + + flow_rule_match_control(rule, &match); + if (match.key->flags & FLOW_DIS_IS_FRAGMENT) + ext->flags |= NFP_FL_IP_FRAGMENTED; + if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) + msk->flags |= NFP_FL_IP_FRAGMENTED; + if (match.key->flags & FLOW_DIS_FIRST_FRAG) + ext->flags |= NFP_FL_IP_FRAG_FIRST; + if (match.mask->flags & FLOW_DIS_FIRST_FRAG) + msk->flags |= NFP_FL_IP_FRAG_FIRST; } } static void -nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame, - struct tc_cls_flower_offload *flow, - bool mask_version) +nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext, + struct nfp_flower_ipv4 *msk, + struct tc_cls_flower_offload *flow) { - struct fl_flow_key *target = mask_version ? flow->mask : flow->key; - struct flow_dissector_key_ipv4_addrs *addr; - - memset(frame, 0, sizeof(struct nfp_flower_ipv4)); - - if (dissector_uses_key(flow->dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { - addr = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS, - target); - frame->ipv4_src = addr->src; - frame->ipv4_dst = addr->dst; + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); + struct flow_match_ipv4_addrs match; + + memset(ext, 0, sizeof(struct nfp_flower_ipv4)); + memset(msk, 0, sizeof(struct nfp_flower_ipv4)); + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { + flow_rule_match_ipv4_addrs(rule, &match); + ext->ipv4_src = match.key->src; + ext->ipv4_dst = match.key->dst; + msk->ipv4_src = match.mask->src; + msk->ipv4_dst = match.mask->dst; } - nfp_flower_compile_ip_ext(&frame->ip_ext, flow, mask_version); + nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, flow); } static void -nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame, - struct tc_cls_flower_offload *flow, - bool mask_version) +nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext, + struct nfp_flower_ipv6 *msk, + struct tc_cls_flower_offload *flow) { - struct fl_flow_key *target = mask_version ? flow->mask : flow->key; - struct flow_dissector_key_ipv6_addrs *addr; - - memset(frame, 0, sizeof(struct nfp_flower_ipv6)); - - if (dissector_uses_key(flow->dissector, - FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { - addr = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_IPV6_ADDRS, - target); - frame->ipv6_src = addr->src; - frame->ipv6_dst = addr->dst; + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); + + memset(ext, 0, sizeof(struct nfp_flower_ipv6)); + memset(msk, 0, sizeof(struct nfp_flower_ipv6)); + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { + struct flow_match_ipv6_addrs match; + + flow_rule_match_ipv6_addrs(rule, &match); + ext->ipv6_src = match.key->src; + ext->ipv6_dst = match.key->dst; + msk->ipv6_src = match.mask->src; + msk->ipv6_dst = match.mask->dst; } - nfp_flower_compile_ip_ext(&frame->ip_ext, flow, mask_version); + nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, flow); } static int -nfp_flower_compile_geneve_opt(void *key_buf, struct tc_cls_flower_offload *flow, - bool mask_version) +nfp_flower_compile_geneve_opt(void *ext, void *msk, + struct tc_cls_flower_offload *flow) { - struct fl_flow_key *target = mask_version ? flow->mask : flow->key; - struct flow_dissector_key_enc_opts *opts; + struct flow_match_enc_opts match; - opts = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_ENC_OPTS, - target); - memcpy(key_buf, opts->data, opts->len); + flow_rule_match_enc_opts(flow->rule, &match); + memcpy(ext, match.key->data, match.key->len); + memcpy(msk, match.mask->data, match.mask->len); return 0; } static void -nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame, - struct tc_cls_flower_offload *flow, - bool mask_version) +nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext, + struct nfp_flower_ipv4_udp_tun *msk, + struct tc_cls_flower_offload *flow) { - struct fl_flow_key *target = mask_version ? flow->mask : flow->key; - struct flow_dissector_key_ipv4_addrs *tun_ips; - struct flow_dissector_key_keyid *vni; - struct flow_dissector_key_ip *ip; + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); - memset(frame, 0, sizeof(struct nfp_flower_ipv4_udp_tun)); + memset(ext, 0, sizeof(struct nfp_flower_ipv4_udp_tun)); + memset(msk, 0, sizeof(struct nfp_flower_ipv4_udp_tun)); - if (dissector_uses_key(flow->dissector, - FLOW_DISSECTOR_KEY_ENC_KEYID)) { + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { + struct flow_match_enc_keyid match; u32 temp_vni; - vni = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_ENC_KEYID, - target); - temp_vni = be32_to_cpu(vni->keyid) << NFP_FL_TUN_VNI_OFFSET; - frame->tun_id = cpu_to_be32(temp_vni); + flow_rule_match_enc_keyid(rule, &match); + temp_vni = be32_to_cpu(match.key->keyid) << NFP_FL_TUN_VNI_OFFSET; + ext->tun_id = cpu_to_be32(temp_vni); + temp_vni = be32_to_cpu(match.mask->keyid) << NFP_FL_TUN_VNI_OFFSET; + msk->tun_id = cpu_to_be32(temp_vni); } - if (dissector_uses_key(flow->dissector, - FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { - tun_ips = - skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, - target); - frame->ip_src = tun_ips->src; - frame->ip_dst = tun_ips->dst; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { + struct flow_match_ipv4_addrs match; + + flow_rule_match_enc_ipv4_addrs(rule, &match); + ext->ip_src = match.key->src; + ext->ip_dst = match.key->dst; + msk->ip_src = match.mask->src; + msk->ip_dst = match.mask->dst; } - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ENC_IP)) { - ip = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_ENC_IP, - target); - frame->tos = ip->tos; - frame->ttl = ip->ttl; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) { + struct flow_match_ip match; + + flow_rule_match_enc_ip(rule, &match); + ext->tos = match.key->tos; + ext->ttl = match.key->ttl; + msk->tos = match.mask->tos; + msk->ttl = match.mask->ttl; } } @@ -313,12 +341,9 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, ext = nfp_flow->unmasked_data; msk = nfp_flow->mask_data; - /* Populate Exact Metadata. */ nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext, - flow, key_ls->key_layer, false); - /* Populate Mask Metadata. */ - nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)msk, - flow, key_ls->key_layer, true); + (struct nfp_flower_meta_tci *)msk, + flow, key_ls->key_layer); ext += sizeof(struct nfp_flower_meta_tci); msk += sizeof(struct nfp_flower_meta_tci); @@ -348,45 +373,33 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, msk += sizeof(struct nfp_flower_in_port); if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) { - /* Populate Exact MAC Data. */ nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext, - flow, false); - /* Populate Mask MAC Data. */ - nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)msk, - flow, true); + (struct nfp_flower_mac_mpls *)msk, + flow); ext += sizeof(struct nfp_flower_mac_mpls); msk += sizeof(struct nfp_flower_mac_mpls); } if (NFP_FLOWER_LAYER_TP & key_ls->key_layer) { - /* Populate Exact TP Data. */ nfp_flower_compile_tport((struct nfp_flower_tp_ports *)ext, - flow, false); - /* Populate Mask TP Data. */ - nfp_flower_compile_tport((struct nfp_flower_tp_ports *)msk, - flow, true); + (struct nfp_flower_tp_ports *)msk, + flow); ext += sizeof(struct nfp_flower_tp_ports); msk += sizeof(struct nfp_flower_tp_ports); } if (NFP_FLOWER_LAYER_IPV4 & key_ls->key_layer) { - /* Populate Exact IPv4 Data. */ nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)ext, - flow, false); - /* Populate Mask IPv4 Data. */ - nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)msk, - flow, true); + (struct nfp_flower_ipv4 *)msk, + flow); ext += sizeof(struct nfp_flower_ipv4); msk += sizeof(struct nfp_flower_ipv4); } if (NFP_FLOWER_LAYER_IPV6 & key_ls->key_layer) { - /* Populate Exact IPv4 Data. */ nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)ext, - flow, false); - /* Populate Mask IPv4 Data. */ - nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)msk, - flow, true); + (struct nfp_flower_ipv6 *)msk, + flow); ext += sizeof(struct nfp_flower_ipv6); msk += sizeof(struct nfp_flower_ipv6); } @@ -395,17 +408,11 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) { __be32 tun_dst; - /* Populate Exact VXLAN Data. */ - nfp_flower_compile_ipv4_udp_tun((void *)ext, flow, false); - /* Populate Mask VXLAN Data. */ - nfp_flower_compile_ipv4_udp_tun((void *)msk, flow, true); + nfp_flower_compile_ipv4_udp_tun((void *)ext, (void *)msk, flow); tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ip_dst; ext += sizeof(struct nfp_flower_ipv4_udp_tun); msk += sizeof(struct nfp_flower_ipv4_udp_tun); - /* Configure tunnel end point MAC. */ - nfp_tunnel_write_macs(app); - /* Store the tunnel destination in the rule data. * This must be present and be an exact match. */ @@ -413,11 +420,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, nfp_tunnel_add_ipv4_off(app, tun_dst); if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) { - err = nfp_flower_compile_geneve_opt(ext, flow, false); - if (err) - return err; - - err = nfp_flower_compile_geneve_opt(msk, flow, true); + err = nfp_flower_compile_geneve_opt(ext, msk, flow); if (err) return err; } diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c index 573a4400a26c..492837b852b6 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c @@ -4,6 +4,7 @@ #include #include #include +#include #include #include @@ -52,8 +53,17 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id) freed_stats_id = priv->stats_ring_size; /* Check for unallocated entries first. */ if (priv->stats_ids.init_unalloc > 0) { - *stats_context_id = priv->stats_ids.init_unalloc - 1; - priv->stats_ids.init_unalloc--; + if (priv->active_mem_unit == priv->total_mem_units) { + priv->stats_ids.init_unalloc--; + priv->active_mem_unit = 0; + } + + *stats_context_id = + FIELD_PREP(NFP_FL_STAT_ID_STAT, + priv->stats_ids.init_unalloc - 1) | + FIELD_PREP(NFP_FL_STAT_ID_MU_NUM, + priv->active_mem_unit); + priv->active_mem_unit++; return 0; } @@ -381,10 +391,11 @@ const struct rhashtable_params nfp_flower_table_params = { .automatic_shrinking = true, }; -int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count) +int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, + unsigned int host_num_mems) { struct nfp_flower_priv *priv = app->priv; - int err; + int err, stats_size; hash_init(priv->mask_table); @@ -417,10 +428,12 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count) if (!priv->stats_ids.free_list.buf) goto err_free_last_used; - priv->stats_ids.init_unalloc = host_ctx_count; + priv->stats_ids.init_unalloc = div_u64(host_ctx_count, host_num_mems); - priv->stats = kvmalloc_array(priv->stats_ring_size, - sizeof(struct nfp_fl_stats), GFP_KERNEL); + stats_size = FIELD_PREP(NFP_FL_STAT_ID_STAT, host_ctx_count) | + FIELD_PREP(NFP_FL_STAT_ID_MU_NUM, host_num_mems - 1); + priv->stats = kvmalloc_array(stats_size, sizeof(struct nfp_fl_stats), + GFP_KERNEL); if (!priv->stats) goto err_free_ring_buf; diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 2cdbf29ecbe7..450d7296fd57 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -102,23 +102,22 @@ nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow, static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f) { - return dissector_uses_key(f->dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS) || - dissector_uses_key(f->dissector, - FLOW_DISSECTOR_KEY_IPV6_ADDRS) || - dissector_uses_key(f->dissector, - FLOW_DISSECTOR_KEY_PORTS) || - dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ICMP); + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f); + + return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) || + flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) || + flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) || + flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP); } static int -nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts, +nfp_flower_calc_opt_layer(struct flow_match_enc_opts *enc_opts, u32 *key_layer_two, int *key_size) { - if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY) + if (enc_opts->key->len > NFP_FL_MAX_GENEVE_OPT_KEY) return -EOPNOTSUPP; - if (enc_opts->len > 0) { + if (enc_opts->key->len > 0) { *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP; *key_size += sizeof(struct nfp_flower_geneve_options); } @@ -133,20 +132,21 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, struct tc_cls_flower_offload *flow, enum nfp_flower_tun_type *tun_type) { - struct flow_dissector_key_basic *mask_basic = NULL; - struct flow_dissector_key_basic *key_basic = NULL; + struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); + struct flow_dissector *dissector = rule->match.dissector; + struct flow_match_basic basic = { NULL, NULL}; struct nfp_flower_priv *priv = app->priv; u32 key_layer_two; u8 key_layer; int key_size; int err; - if (flow->dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) + if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) return -EOPNOTSUPP; /* If any tun dissector is used then the required set must be used. */ - if (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR && - (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) + if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR && + (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) return -EOPNOTSUPP; @@ -155,76 +155,52 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, key_size = sizeof(struct nfp_flower_meta_tci) + sizeof(struct nfp_flower_in_port); - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS) || - dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) { + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS) || + flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) { key_layer |= NFP_FLOWER_LAYER_MAC; key_size += sizeof(struct nfp_flower_mac_mpls); } - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) { - struct flow_dissector_key_vlan *flow_vlan; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan vlan; - flow_vlan = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_VLAN, - flow->mask); + flow_rule_match_vlan(rule, &vlan); if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) && - flow_vlan->vlan_priority) + vlan.key->vlan_priority) return -EOPNOTSUPP; } - if (dissector_uses_key(flow->dissector, - FLOW_DISSECTOR_KEY_ENC_CONTROL)) { - struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL; - struct flow_dissector_key_ports *mask_enc_ports = NULL; - struct flow_dissector_key_enc_opts *enc_op = NULL; - struct flow_dissector_key_ports *enc_ports = NULL; - struct flow_dissector_key_control *mask_enc_ctl = - skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_ENC_CONTROL, - flow->mask); - struct flow_dissector_key_control *enc_ctl = - skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_ENC_CONTROL, - flow->key); - - if (mask_enc_ctl->addr_type != 0xffff || - enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { + struct flow_match_enc_opts enc_op = { NULL, NULL }; + struct flow_match_ipv4_addrs ipv4_addrs; + struct flow_match_control enc_ctl; + struct flow_match_ports enc_ports; + + flow_rule_match_enc_control(rule, &enc_ctl); + + if (enc_ctl.mask->addr_type != 0xffff || + enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) return -EOPNOTSUPP; /* These fields are already verified as used. */ - mask_ipv4 = - skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, - flow->mask); - if (mask_ipv4->dst != cpu_to_be32(~0)) + flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs); + if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) return -EOPNOTSUPP; - mask_enc_ports = - skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_ENC_PORTS, - flow->mask); - enc_ports = - skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_ENC_PORTS, - flow->key); - - if (mask_enc_ports->dst != cpu_to_be16(~0)) + flow_rule_match_enc_ports(rule, &enc_ports); + if (enc_ports.mask->dst != cpu_to_be16(~0)) return -EOPNOTSUPP; - if (dissector_uses_key(flow->dissector, - FLOW_DISSECTOR_KEY_ENC_OPTS)) { - enc_op = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_ENC_OPTS, - flow->key); - } + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) + flow_rule_match_enc_opts(rule, &enc_op); - switch (enc_ports->dst) { + switch (enc_ports.key->dst) { case htons(NFP_FL_VXLAN_PORT): *tun_type = NFP_FL_TUNNEL_VXLAN; key_layer |= NFP_FLOWER_LAYER_VXLAN; key_size += sizeof(struct nfp_flower_ipv4_udp_tun); - if (enc_op) + if (enc_op.key) return -EOPNOTSUPP; break; case htons(NFP_FL_GENEVE_PORT): @@ -236,11 +212,11 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, key_layer_two |= NFP_FLOWER_LAYER2_GENEVE; key_size += sizeof(struct nfp_flower_ipv4_udp_tun); - if (!enc_op) + if (!enc_op.key) break; if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)) return -EOPNOTSUPP; - err = nfp_flower_calc_opt_layer(enc_op, &key_layer_two, + err = nfp_flower_calc_opt_layer(&enc_op, &key_layer_two, &key_size); if (err) return err; @@ -254,19 +230,12 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, return -EOPNOTSUPP; } - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) { - mask_basic = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_BASIC, - flow->mask); - - key_basic = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_BASIC, - flow->key); - } + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) + flow_rule_match_basic(rule, &basic); - if (mask_basic && mask_basic->n_proto) { + if (basic.mask && basic.mask->n_proto) { /* Ethernet type is present in the key. */ - switch (key_basic->n_proto) { + switch (basic.key->n_proto) { case cpu_to_be16(ETH_P_IP): key_layer |= NFP_FLOWER_LAYER_IPV4; key_size += sizeof(struct nfp_flower_ipv4); @@ -305,9 +274,9 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, } } - if (mask_basic && mask_basic->ip_proto) { + if (basic.mask && basic.mask->ip_proto) { /* Ethernet type is present in the key. */ - switch (key_basic->ip_proto) { + switch (basic.key->ip_proto) { case IPPROTO_TCP: case IPPROTO_UDP: case IPPROTO_SCTP: @@ -324,14 +293,12 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, } } - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_TCP)) { - struct flow_dissector_key_tcp *tcp; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) { + struct flow_match_tcp tcp; u32 tcp_flags; - tcp = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_TCP, - flow->key); - tcp_flags = be16_to_cpu(tcp->flags); + flow_rule_match_tcp(rule, &tcp); + tcp_flags = be16_to_cpu(tcp.key->flags); if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS) return -EOPNOTSUPP; @@ -347,12 +314,12 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, * space, thus we need to ensure we include a IPv4/IPv6 key * layer if we have not done so already. */ - if (!key_basic) + if (!basic.key) return -EOPNOTSUPP; if (!(key_layer & NFP_FLOWER_LAYER_IPV4) && !(key_layer & NFP_FLOWER_LAYER_IPV6)) { - switch (key_basic->n_proto) { + switch (basic.key->n_proto) { case cpu_to_be16(ETH_P_IP): key_layer |= NFP_FLOWER_LAYER_IPV4; key_size += sizeof(struct nfp_flower_ipv4); @@ -369,14 +336,11 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, } } - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { - struct flow_dissector_key_control *key_ctl; - - key_ctl = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_CONTROL, - flow->key); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_match_control ctl; - if (key_ctl->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS) + flow_rule_match_control(rule, &ctl); + if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS) return -EOPNOTSUPP; } @@ -589,9 +553,8 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev, ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id); spin_lock_bh(&priv->stats_lock); - tcf_exts_stats_update(flow->exts, priv->stats[ctx_id].bytes, - priv->stats[ctx_id].pkts, - priv->stats[ctx_id].used); + flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes, + priv->stats[ctx_id].pkts, priv->stats[ctx_id].used); priv->stats[ctx_id].pkts = 0; priv->stats[ctx_id].bytes = 0; diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index 2d9f26a725c2..4d78be4ec4e9 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -98,47 +98,51 @@ struct nfp_ipv4_addr_entry { struct list_head list; }; -/** - * struct nfp_tun_mac_addr - configure MAC address of tunnel EP on NFP - * @reserved: reserved for future use - * @count: number of MAC addresses in the message - * @addresses.index: index of MAC address in the lookup table - * @addresses.addr: interface MAC address - * @addresses: series of MACs to offload - */ -struct nfp_tun_mac_addr { - __be16 reserved; - __be16 count; - struct index_mac_addr { - __be16 index; - u8 addr[ETH_ALEN]; - } addresses[]; -}; +#define NFP_TUN_MAC_OFFLOAD_DEL_FLAG 0x2 /** - * struct nfp_tun_mac_offload_entry - list of MACs to offload - * @index: index of MAC address for offloading + * struct nfp_tun_mac_addr_offload - configure MAC address of tunnel EP on NFP + * @flags: MAC address offload options + * @count: number of MAC addresses in the message (should be 1) + * @index: index of MAC address in the lookup table * @addr: interface MAC address - * @list: list pointer */ -struct nfp_tun_mac_offload_entry { +struct nfp_tun_mac_addr_offload { + __be16 flags; + __be16 count; __be16 index; u8 addr[ETH_ALEN]; - struct list_head list; +}; + +enum nfp_flower_mac_offload_cmd { + NFP_TUNNEL_MAC_OFFLOAD_ADD = 0, + NFP_TUNNEL_MAC_OFFLOAD_DEL = 1, + NFP_TUNNEL_MAC_OFFLOAD_MOD = 2, }; #define NFP_MAX_MAC_INDEX 0xff /** - * struct nfp_tun_mac_non_nfp_idx - converts non NFP netdev ifindex to 8-bit id - * @ifindex: netdev ifindex of the device - * @index: index of netdevs mac on NFP - * @list: list pointer + * struct nfp_tun_offloaded_mac - hashtable entry for an offloaded MAC + * @ht_node: Hashtable entry + * @addr: Offloaded MAC address + * @index: Offloaded index for given MAC address + * @ref_count: Number of devs using this MAC address + * @repr_list: List of reprs sharing this MAC address */ -struct nfp_tun_mac_non_nfp_idx { - int ifindex; - u8 index; - struct list_head list; +struct nfp_tun_offloaded_mac { + struct rhash_head ht_node; + u8 addr[ETH_ALEN]; + u16 index; + int ref_count; + struct list_head repr_list; +}; + +static const struct rhashtable_params offloaded_macs_params = { + .key_offset = offsetof(struct nfp_tun_offloaded_mac, addr), + .head_offset = offsetof(struct nfp_tun_offloaded_mac, ht_node), + .key_len = ETH_ALEN, + .automatic_shrinking = true, }; void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb) @@ -205,15 +209,15 @@ static bool nfp_tun_has_route(struct nfp_app *app, __be32 ipv4_addr) struct nfp_ipv4_route_entry *entry; struct list_head *ptr, *storage; - spin_lock_bh(&priv->nfp_neigh_off_lock); - list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) { + spin_lock_bh(&priv->tun.neigh_off_lock); + list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) { entry = list_entry(ptr, struct nfp_ipv4_route_entry, list); if (entry->ipv4_addr == ipv4_addr) { - spin_unlock_bh(&priv->nfp_neigh_off_lock); + spin_unlock_bh(&priv->tun.neigh_off_lock); return true; } } - spin_unlock_bh(&priv->nfp_neigh_off_lock); + spin_unlock_bh(&priv->tun.neigh_off_lock); return false; } @@ -223,24 +227,24 @@ static void nfp_tun_add_route_to_cache(struct nfp_app *app, __be32 ipv4_addr) struct nfp_ipv4_route_entry *entry; struct list_head *ptr, *storage; - spin_lock_bh(&priv->nfp_neigh_off_lock); - list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) { + spin_lock_bh(&priv->tun.neigh_off_lock); + list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) { entry = list_entry(ptr, struct nfp_ipv4_route_entry, list); if (entry->ipv4_addr == ipv4_addr) { - spin_unlock_bh(&priv->nfp_neigh_off_lock); + spin_unlock_bh(&priv->tun.neigh_off_lock); return; } } entry = kmalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) { - spin_unlock_bh(&priv->nfp_neigh_off_lock); + spin_unlock_bh(&priv->tun.neigh_off_lock); nfp_flower_cmsg_warn(app, "Mem error when storing new route.\n"); return; } entry->ipv4_addr = ipv4_addr; - list_add_tail(&entry->list, &priv->nfp_neigh_off_list); - spin_unlock_bh(&priv->nfp_neigh_off_lock); + list_add_tail(&entry->list, &priv->tun.neigh_off_list); + spin_unlock_bh(&priv->tun.neigh_off_lock); } static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr) @@ -249,8 +253,8 @@ static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr) struct nfp_ipv4_route_entry *entry; struct list_head *ptr, *storage; - spin_lock_bh(&priv->nfp_neigh_off_lock); - list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) { + spin_lock_bh(&priv->tun.neigh_off_lock); + list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) { entry = list_entry(ptr, struct nfp_ipv4_route_entry, list); if (entry->ipv4_addr == ipv4_addr) { list_del(&entry->list); @@ -258,7 +262,7 @@ static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr) break; } } - spin_unlock_bh(&priv->nfp_neigh_off_lock); + spin_unlock_bh(&priv->tun.neigh_off_lock); } static void @@ -326,7 +330,7 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, if (!nfp_netdev_is_nfp_repr(n->dev)) return NOTIFY_DONE; - app_priv = container_of(nb, struct nfp_flower_priv, nfp_tun_neigh_nb); + app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb); app = app_priv->app; /* Only concerned with changes to routes already added to NFP. */ @@ -401,11 +405,11 @@ static void nfp_tun_write_ipv4_list(struct nfp_app *app) int count; memset(&payload, 0, sizeof(struct nfp_tun_ipv4_addr)); - mutex_lock(&priv->nfp_ipv4_off_lock); + mutex_lock(&priv->tun.ipv4_off_lock); count = 0; - list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) { + list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) { if (count >= NFP_FL_IPV4_ADDRS_MAX) { - mutex_unlock(&priv->nfp_ipv4_off_lock); + mutex_unlock(&priv->tun.ipv4_off_lock); nfp_flower_cmsg_warn(app, "IPv4 offload exceeds limit.\n"); return; } @@ -413,7 +417,7 @@ static void nfp_tun_write_ipv4_list(struct nfp_app *app) payload.ipv4_addr[count++] = entry->ipv4_addr; } payload.count = cpu_to_be32(count); - mutex_unlock(&priv->nfp_ipv4_off_lock); + mutex_unlock(&priv->tun.ipv4_off_lock); nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS, sizeof(struct nfp_tun_ipv4_addr), @@ -426,26 +430,26 @@ void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4) struct nfp_ipv4_addr_entry *entry; struct list_head *ptr, *storage; - mutex_lock(&priv->nfp_ipv4_off_lock); - list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) { + mutex_lock(&priv->tun.ipv4_off_lock); + list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) { entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list); if (entry->ipv4_addr == ipv4) { entry->ref_count++; - mutex_unlock(&priv->nfp_ipv4_off_lock); + mutex_unlock(&priv->tun.ipv4_off_lock); return; } } entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) { - mutex_unlock(&priv->nfp_ipv4_off_lock); + mutex_unlock(&priv->tun.ipv4_off_lock); nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n"); return; } entry->ipv4_addr = ipv4; entry->ref_count = 1; - list_add_tail(&entry->list, &priv->nfp_ipv4_off_list); - mutex_unlock(&priv->nfp_ipv4_off_lock); + list_add_tail(&entry->list, &priv->tun.ipv4_off_list); + mutex_unlock(&priv->tun.ipv4_off_lock); nfp_tun_write_ipv4_list(app); } @@ -456,8 +460,8 @@ void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4) struct nfp_ipv4_addr_entry *entry; struct list_head *ptr, *storage; - mutex_lock(&priv->nfp_ipv4_off_lock); - list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) { + mutex_lock(&priv->tun.ipv4_off_lock); + list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) { entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list); if (entry->ipv4_addr == ipv4) { entry->ref_count--; @@ -468,191 +472,357 @@ void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4) break; } } - mutex_unlock(&priv->nfp_ipv4_off_lock); + mutex_unlock(&priv->tun.ipv4_off_lock); nfp_tun_write_ipv4_list(app); } -void nfp_tunnel_write_macs(struct nfp_app *app) +static int +__nfp_tunnel_offload_mac(struct nfp_app *app, u8 *mac, u16 idx, bool del) { - struct nfp_flower_priv *priv = app->priv; - struct nfp_tun_mac_offload_entry *entry; - struct nfp_tun_mac_addr *payload; - struct list_head *ptr, *storage; - int mac_count, err, pay_size; + struct nfp_tun_mac_addr_offload payload; - mutex_lock(&priv->nfp_mac_off_lock); - if (!priv->nfp_mac_off_count) { - mutex_unlock(&priv->nfp_mac_off_lock); - return; - } + memset(&payload, 0, sizeof(payload)); - pay_size = sizeof(struct nfp_tun_mac_addr) + - sizeof(struct index_mac_addr) * priv->nfp_mac_off_count; + if (del) + payload.flags = cpu_to_be16(NFP_TUN_MAC_OFFLOAD_DEL_FLAG); - payload = kzalloc(pay_size, GFP_KERNEL); - if (!payload) { - mutex_unlock(&priv->nfp_mac_off_lock); - return; - } + /* FW supports multiple MACs per cmsg but restrict to single. */ + payload.count = cpu_to_be16(1); + payload.index = cpu_to_be16(idx); + ether_addr_copy(payload.addr, mac); - payload->count = cpu_to_be16(priv->nfp_mac_off_count); + return nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC, + sizeof(struct nfp_tun_mac_addr_offload), + &payload, GFP_KERNEL); +} - mac_count = 0; - list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) { - entry = list_entry(ptr, struct nfp_tun_mac_offload_entry, - list); - payload->addresses[mac_count].index = entry->index; - ether_addr_copy(payload->addresses[mac_count].addr, - entry->addr); - mac_count++; - } +static bool nfp_tunnel_port_is_phy_repr(int port) +{ + if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) == + NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT) + return true; - err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC, - pay_size, payload, GFP_KERNEL); + return false; +} - kfree(payload); +static u16 nfp_tunnel_get_mac_idx_from_phy_port_id(int port) +{ + return port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT; +} - if (err) { - mutex_unlock(&priv->nfp_mac_off_lock); - /* Write failed so retain list for future retry. */ - return; - } +static u16 nfp_tunnel_get_global_mac_idx_from_ida(int id) +{ + return id << 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT; +} + +static int nfp_tunnel_get_ida_from_global_mac_idx(u16 nfp_mac_idx) +{ + return nfp_mac_idx >> 8; +} + +static bool nfp_tunnel_is_mac_idx_global(u16 nfp_mac_idx) +{ + return (nfp_mac_idx & 0xff) == NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT; +} + +static struct nfp_tun_offloaded_mac * +nfp_tunnel_lookup_offloaded_macs(struct nfp_app *app, u8 *mac) +{ + struct nfp_flower_priv *priv = app->priv; + + return rhashtable_lookup_fast(&priv->tun.offloaded_macs, mac, + offloaded_macs_params); +} + +static void +nfp_tunnel_offloaded_macs_inc_ref_and_link(struct nfp_tun_offloaded_mac *entry, + struct net_device *netdev, bool mod) +{ + if (nfp_netdev_is_nfp_repr(netdev)) { + struct nfp_flower_repr_priv *repr_priv; + struct nfp_repr *repr; - /* If list was successfully offloaded, flush it. */ - list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) { - entry = list_entry(ptr, struct nfp_tun_mac_offload_entry, - list); - list_del(&entry->list); - kfree(entry); + repr = netdev_priv(netdev); + repr_priv = repr->app_priv; + + /* If modifing MAC, remove repr from old list first. */ + if (mod) + list_del(&repr_priv->mac_list); + + list_add_tail(&repr_priv->mac_list, &entry->repr_list); } - priv->nfp_mac_off_count = 0; - mutex_unlock(&priv->nfp_mac_off_lock); + entry->ref_count++; } -static int nfp_tun_get_mac_idx(struct nfp_app *app, int ifindex) +static int +nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev, + int port, bool mod) { struct nfp_flower_priv *priv = app->priv; - struct nfp_tun_mac_non_nfp_idx *entry; - struct list_head *ptr, *storage; - int idx; - - mutex_lock(&priv->nfp_mac_index_lock); - list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) { - entry = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, list); - if (entry->ifindex == ifindex) { - idx = entry->index; - mutex_unlock(&priv->nfp_mac_index_lock); - return idx; - } + int ida_idx = NFP_MAX_MAC_INDEX, err; + struct nfp_tun_offloaded_mac *entry; + u16 nfp_mac_idx = 0; + + entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr); + if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) { + nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod); + return 0; } - idx = ida_simple_get(&priv->nfp_mac_off_ids, 0, - NFP_MAX_MAC_INDEX, GFP_KERNEL); - if (idx < 0) { - mutex_unlock(&priv->nfp_mac_index_lock); - return idx; + /* Assign a global index if non-repr or MAC address is now shared. */ + if (entry || !port) { + ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0, + NFP_MAX_MAC_INDEX, GFP_KERNEL); + if (ida_idx < 0) + return ida_idx; + + nfp_mac_idx = nfp_tunnel_get_global_mac_idx_from_ida(ida_idx); + } else { + nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port); } - entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) { - mutex_unlock(&priv->nfp_mac_index_lock); - return -ENOMEM; + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + err = -ENOMEM; + goto err_free_ida; + } + + ether_addr_copy(entry->addr, netdev->dev_addr); + INIT_LIST_HEAD(&entry->repr_list); + + if (rhashtable_insert_fast(&priv->tun.offloaded_macs, + &entry->ht_node, + offloaded_macs_params)) { + err = -ENOMEM; + goto err_free_entry; + } + } + + err = __nfp_tunnel_offload_mac(app, netdev->dev_addr, + nfp_mac_idx, false); + if (err) { + /* If not shared then free. */ + if (!entry->ref_count) + goto err_remove_hash; + goto err_free_ida; } - entry->ifindex = ifindex; - entry->index = idx; - list_add_tail(&entry->list, &priv->nfp_mac_index_list); - mutex_unlock(&priv->nfp_mac_index_lock); - return idx; + entry->index = nfp_mac_idx; + nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod); + + return 0; + +err_remove_hash: + rhashtable_remove_fast(&priv->tun.offloaded_macs, &entry->ht_node, + offloaded_macs_params); +err_free_entry: + kfree(entry); +err_free_ida: + if (ida_idx != NFP_MAX_MAC_INDEX) + ida_simple_remove(&priv->tun.mac_off_ids, ida_idx); + + return err; } -static void nfp_tun_del_mac_idx(struct nfp_app *app, int ifindex) +static int +nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev, + u8 *mac, bool mod) { struct nfp_flower_priv *priv = app->priv; - struct nfp_tun_mac_non_nfp_idx *entry; - struct list_head *ptr, *storage; + struct nfp_flower_repr_priv *repr_priv; + struct nfp_tun_offloaded_mac *entry; + struct nfp_repr *repr; + int ida_idx; + + entry = nfp_tunnel_lookup_offloaded_macs(app, mac); + if (!entry) + return 0; + + entry->ref_count--; + /* If del is part of a mod then mac_list is still in use elsewheree. */ + if (nfp_netdev_is_nfp_repr(netdev) && !mod) { + repr = netdev_priv(netdev); + repr_priv = repr->app_priv; + list_del(&repr_priv->mac_list); + } - mutex_lock(&priv->nfp_mac_index_lock); - list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) { - entry = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, list); - if (entry->ifindex == ifindex) { - ida_simple_remove(&priv->nfp_mac_off_ids, - entry->index); - list_del(&entry->list); - kfree(entry); - break; + /* If MAC is now used by 1 repr set the offloaded MAC index to port. */ + if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) { + u16 nfp_mac_idx; + int port, err; + + repr_priv = list_first_entry(&entry->repr_list, + struct nfp_flower_repr_priv, + mac_list); + repr = repr_priv->nfp_repr; + port = nfp_repr_get_port_id(repr->netdev); + nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port); + err = __nfp_tunnel_offload_mac(app, mac, nfp_mac_idx, false); + if (err) { + nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n", + netdev_name(netdev)); + return 0; } + + ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index); + ida_simple_remove(&priv->tun.mac_off_ids, ida_idx); + entry->index = nfp_mac_idx; + return 0; } - mutex_unlock(&priv->nfp_mac_index_lock); -} -static void nfp_tun_add_to_mac_offload_list(struct net_device *netdev, - struct nfp_app *app) -{ - struct nfp_flower_priv *priv = app->priv; - struct nfp_tun_mac_offload_entry *entry; - u16 nfp_mac_idx; - int port = 0; + if (entry->ref_count) + return 0; - /* Check if MAC should be offloaded. */ - if (!is_valid_ether_addr(netdev->dev_addr)) - return; + WARN_ON_ONCE(rhashtable_remove_fast(&priv->tun.offloaded_macs, + &entry->ht_node, + offloaded_macs_params)); + /* If MAC has global ID then extract and free the ida entry. */ + if (nfp_tunnel_is_mac_idx_global(entry->index)) { + ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index); + ida_simple_remove(&priv->tun.mac_off_ids, ida_idx); + } - if (nfp_netdev_is_nfp_repr(netdev)) + kfree(entry); + + return __nfp_tunnel_offload_mac(app, mac, 0, true); +} + +static int +nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev, + enum nfp_flower_mac_offload_cmd cmd) +{ + struct nfp_flower_non_repr_priv *nr_priv = NULL; + bool non_repr = false, *mac_offloaded; + u8 *off_mac = NULL; + int err, port = 0; + + if (nfp_netdev_is_nfp_repr(netdev)) { + struct nfp_flower_repr_priv *repr_priv; + struct nfp_repr *repr; + + repr = netdev_priv(netdev); + if (repr->app != app) + return 0; + + repr_priv = repr->app_priv; + mac_offloaded = &repr_priv->mac_offloaded; + off_mac = &repr_priv->offloaded_mac_addr[0]; port = nfp_repr_get_port_id(netdev); - else if (!nfp_fl_is_netdev_to_offload(netdev)) - return; + if (!nfp_tunnel_port_is_phy_repr(port)) + return 0; + } else if (nfp_fl_is_netdev_to_offload(netdev)) { + nr_priv = nfp_flower_non_repr_priv_get(app, netdev); + if (!nr_priv) + return -ENOMEM; + + mac_offloaded = &nr_priv->mac_offloaded; + off_mac = &nr_priv->offloaded_mac_addr[0]; + non_repr = true; + } else { + return 0; + } - entry = kmalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) { - nfp_flower_cmsg_warn(app, "Mem fail when offloading MAC.\n"); - return; + if (!is_valid_ether_addr(netdev->dev_addr)) { + err = -EINVAL; + goto err_put_non_repr_priv; } - if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) == - NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT) { - nfp_mac_idx = port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT; - } else if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) == - NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT) { - port = FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC, port); - nfp_mac_idx = port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT; - } else { - /* Must assign our own unique 8-bit index. */ - int idx = nfp_tun_get_mac_idx(app, netdev->ifindex); + if (cmd == NFP_TUNNEL_MAC_OFFLOAD_MOD && !*mac_offloaded) + cmd = NFP_TUNNEL_MAC_OFFLOAD_ADD; - if (idx < 0) { - nfp_flower_cmsg_warn(app, "Can't assign non-repr MAC index.\n"); - kfree(entry); - return; - } - nfp_mac_idx = idx << 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT; + switch (cmd) { + case NFP_TUNNEL_MAC_OFFLOAD_ADD: + err = nfp_tunnel_add_shared_mac(app, netdev, port, false); + if (err) + goto err_put_non_repr_priv; + + if (non_repr) + __nfp_flower_non_repr_priv_get(nr_priv); + + *mac_offloaded = true; + ether_addr_copy(off_mac, netdev->dev_addr); + break; + case NFP_TUNNEL_MAC_OFFLOAD_DEL: + /* Only attempt delete if add was successful. */ + if (!*mac_offloaded) + break; + + if (non_repr) + __nfp_flower_non_repr_priv_put(nr_priv); + + *mac_offloaded = false; + + err = nfp_tunnel_del_shared_mac(app, netdev, netdev->dev_addr, + false); + if (err) + goto err_put_non_repr_priv; + + break; + case NFP_TUNNEL_MAC_OFFLOAD_MOD: + /* Ignore if changing to the same address. */ + if (ether_addr_equal(netdev->dev_addr, off_mac)) + break; + + err = nfp_tunnel_add_shared_mac(app, netdev, port, true); + if (err) + goto err_put_non_repr_priv; + + /* Delete the previous MAC address. */ + err = nfp_tunnel_del_shared_mac(app, netdev, off_mac, true); + if (err) + nfp_flower_cmsg_warn(app, "Failed to remove offload of replaced MAC addr on %s.\n", + netdev_name(netdev)); + + ether_addr_copy(off_mac, netdev->dev_addr); + break; + default: + err = -EINVAL; + goto err_put_non_repr_priv; } - entry->index = cpu_to_be16(nfp_mac_idx); - ether_addr_copy(entry->addr, netdev->dev_addr); + if (non_repr) + __nfp_flower_non_repr_priv_put(nr_priv); + + return 0; + +err_put_non_repr_priv: + if (non_repr) + __nfp_flower_non_repr_priv_put(nr_priv); - mutex_lock(&priv->nfp_mac_off_lock); - priv->nfp_mac_off_count++; - list_add_tail(&entry->list, &priv->nfp_mac_off_list); - mutex_unlock(&priv->nfp_mac_off_lock); + return err; } int nfp_tunnel_mac_event_handler(struct nfp_app *app, struct net_device *netdev, unsigned long event, void *ptr) { - if (event == NETDEV_DOWN || event == NETDEV_UNREGISTER) { - /* If non-nfp netdev then free its offload index. */ - if (nfp_fl_is_netdev_to_offload(netdev)) - nfp_tun_del_mac_idx(app, netdev->ifindex); - } else if (event == NETDEV_UP || event == NETDEV_CHANGEADDR || - event == NETDEV_REGISTER) { - nfp_tun_add_to_mac_offload_list(netdev, app); - - /* Force a list write to keep NFP up to date. */ - nfp_tunnel_write_macs(app); + int err; + + if (event == NETDEV_DOWN) { + err = nfp_tunnel_offload_mac(app, netdev, + NFP_TUNNEL_MAC_OFFLOAD_DEL); + if (err) + nfp_flower_cmsg_warn(app, "Failed to delete offload MAC on %s.\n", + netdev_name(netdev)); + } else if (event == NETDEV_UP) { + err = nfp_tunnel_offload_mac(app, netdev, + NFP_TUNNEL_MAC_OFFLOAD_ADD); + if (err) + nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n", + netdev_name(netdev)); + } else if (event == NETDEV_CHANGEADDR) { + /* Only offload addr change if netdev is already up. */ + if (!(netdev->flags & IFF_UP)) + return NOTIFY_OK; + + err = nfp_tunnel_offload_mac(app, netdev, + NFP_TUNNEL_MAC_OFFLOAD_MOD); + if (err) + nfp_flower_cmsg_warn(app, "Failed to offload MAC change on %s.\n", + netdev_name(netdev)); } return NOTIFY_OK; } @@ -660,68 +830,62 @@ int nfp_tunnel_mac_event_handler(struct nfp_app *app, int nfp_tunnel_config_start(struct nfp_app *app) { struct nfp_flower_priv *priv = app->priv; + int err; + + /* Initialise rhash for MAC offload tracking. */ + err = rhashtable_init(&priv->tun.offloaded_macs, + &offloaded_macs_params); + if (err) + return err; - /* Initialise priv data for MAC offloading. */ - priv->nfp_mac_off_count = 0; - mutex_init(&priv->nfp_mac_off_lock); - INIT_LIST_HEAD(&priv->nfp_mac_off_list); - mutex_init(&priv->nfp_mac_index_lock); - INIT_LIST_HEAD(&priv->nfp_mac_index_list); - ida_init(&priv->nfp_mac_off_ids); + ida_init(&priv->tun.mac_off_ids); /* Initialise priv data for IPv4 offloading. */ - mutex_init(&priv->nfp_ipv4_off_lock); - INIT_LIST_HEAD(&priv->nfp_ipv4_off_list); + mutex_init(&priv->tun.ipv4_off_lock); + INIT_LIST_HEAD(&priv->tun.ipv4_off_list); /* Initialise priv data for neighbour offloading. */ - spin_lock_init(&priv->nfp_neigh_off_lock); - INIT_LIST_HEAD(&priv->nfp_neigh_off_list); - priv->nfp_tun_neigh_nb.notifier_call = nfp_tun_neigh_event_handler; + spin_lock_init(&priv->tun.neigh_off_lock); + INIT_LIST_HEAD(&priv->tun.neigh_off_list); + priv->tun.neigh_nb.notifier_call = nfp_tun_neigh_event_handler; + + err = register_netevent_notifier(&priv->tun.neigh_nb); + if (err) { + rhashtable_free_and_destroy(&priv->tun.offloaded_macs, + nfp_check_rhashtable_empty, NULL); + return err; + } - return register_netevent_notifier(&priv->nfp_tun_neigh_nb); + return 0; } void nfp_tunnel_config_stop(struct nfp_app *app) { - struct nfp_tun_mac_offload_entry *mac_entry; struct nfp_flower_priv *priv = app->priv; struct nfp_ipv4_route_entry *route_entry; - struct nfp_tun_mac_non_nfp_idx *mac_idx; struct nfp_ipv4_addr_entry *ip_entry; struct list_head *ptr, *storage; - unregister_netevent_notifier(&priv->nfp_tun_neigh_nb); - - /* Free any memory that may be occupied by MAC list. */ - list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) { - mac_entry = list_entry(ptr, struct nfp_tun_mac_offload_entry, - list); - list_del(&mac_entry->list); - kfree(mac_entry); - } - - /* Free any memory that may be occupied by MAC index list. */ - list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) { - mac_idx = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, - list); - list_del(&mac_idx->list); - kfree(mac_idx); - } + unregister_netevent_notifier(&priv->tun.neigh_nb); - ida_destroy(&priv->nfp_mac_off_ids); + ida_destroy(&priv->tun.mac_off_ids); /* Free any memory that may be occupied by ipv4 list. */ - list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) { + list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) { ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list); list_del(&ip_entry->list); kfree(ip_entry); } /* Free any memory that may be occupied by the route list. */ - list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) { + list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) { route_entry = list_entry(ptr, struct nfp_ipv4_route_entry, list); list_del(&route_entry->list); kfree(route_entry); } + + /* Destroy rhash. Entries should be cleaned on netdev notifier unreg. */ + rhashtable_free_and_destroy(&priv->tun.offloaded_macs, + nfp_check_rhashtable_empty, NULL); } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h index d578d856a009..f8d422713705 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h @@ -433,4 +433,6 @@ int nfp_app_nic_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, int nfp_app_nic_vnic_init_phy_port(struct nfp_pf *pf, struct nfp_app *app, struct nfp_net *nn, unsigned int id); +struct devlink *nfp_devlink_get_devlink(struct net_device *netdev); + #endif diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c index 808647ec3573..e9eca99cf493 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c @@ -4,6 +4,7 @@ #include #include +#include "nfpcore/nfp.h" #include "nfpcore/nfp_nsp.h" #include "nfp_app.h" #include "nfp_main.h" @@ -171,6 +172,173 @@ static int nfp_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, return ret; } +static const struct nfp_devlink_versions_simple { + const char *key; + const char *hwinfo; +} nfp_devlink_versions_hwinfo[] = { + { DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, "assembly.partno", }, + { DEVLINK_INFO_VERSION_GENERIC_BOARD_REV, "assembly.revision", }, + { DEVLINK_INFO_VERSION_GENERIC_BOARD_MANUFACTURE, "assembly.vendor", }, + { "board.model", /* code name */ "assembly.model", }, +}; + +static int +nfp_devlink_versions_get_hwinfo(struct nfp_pf *pf, struct devlink_info_req *req) +{ + unsigned int i; + int err; + + for (i = 0; i < ARRAY_SIZE(nfp_devlink_versions_hwinfo); i++) { + const struct nfp_devlink_versions_simple *info; + const char *val; + + info = &nfp_devlink_versions_hwinfo[i]; + + val = nfp_hwinfo_lookup(pf->hwinfo, info->hwinfo); + if (!val) + continue; + + err = devlink_info_version_fixed_put(req, info->key, val); + if (err) + return err; + } + + return 0; +} + +static const struct nfp_devlink_versions { + enum nfp_nsp_versions id; + const char *key; +} nfp_devlink_versions_nsp[] = { + { NFP_VERSIONS_BUNDLE, "fw.bundle_id", }, + { NFP_VERSIONS_BSP, DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, }, + { NFP_VERSIONS_CPLD, "fw.cpld", }, + { NFP_VERSIONS_APP, DEVLINK_INFO_VERSION_GENERIC_FW_APP, }, + { NFP_VERSIONS_UNDI, DEVLINK_INFO_VERSION_GENERIC_FW_UNDI, }, + { NFP_VERSIONS_NCSI, DEVLINK_INFO_VERSION_GENERIC_FW_NCSI, }, + { NFP_VERSIONS_CFGR, "chip.init", }, +}; + +static int +nfp_devlink_versions_get_nsp(struct devlink_info_req *req, bool flash, + const u8 *buf, unsigned int size) +{ + unsigned int i; + int err; + + for (i = 0; i < ARRAY_SIZE(nfp_devlink_versions_nsp); i++) { + const struct nfp_devlink_versions *info; + const char *version; + + info = &nfp_devlink_versions_nsp[i]; + + version = nfp_nsp_versions_get(info->id, flash, buf, size); + if (IS_ERR(version)) { + if (PTR_ERR(version) == -ENOENT) + continue; + else + return PTR_ERR(version); + } + + if (flash) + err = devlink_info_version_stored_put(req, info->key, + version); + else + err = devlink_info_version_running_put(req, info->key, + version); + if (err) + return err; + } + + return 0; +} + +static int +nfp_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + struct nfp_pf *pf = devlink_priv(devlink); + const char *sn, *vendor, *part; + struct nfp_nsp *nsp; + char *buf = NULL; + int err; + + err = devlink_info_driver_name_put(req, "nfp"); + if (err) + return err; + + vendor = nfp_hwinfo_lookup(pf->hwinfo, "assembly.vendor"); + part = nfp_hwinfo_lookup(pf->hwinfo, "assembly.partno"); + sn = nfp_hwinfo_lookup(pf->hwinfo, "assembly.serial"); + if (vendor && part && sn) { + char *buf; + + buf = kmalloc(strlen(vendor) + strlen(part) + strlen(sn) + 1, + GFP_KERNEL); + if (!buf) + return -ENOMEM; + + buf[0] = '\0'; + strcat(buf, vendor); + strcat(buf, part); + strcat(buf, sn); + + err = devlink_info_serial_number_put(req, buf); + kfree(buf); + if (err) + return err; + } + + nsp = nfp_nsp_open(pf->cpp); + if (IS_ERR(nsp)) { + NL_SET_ERR_MSG_MOD(extack, "can't access NSP"); + return PTR_ERR(nsp); + } + + if (nfp_nsp_has_versions(nsp)) { + buf = kzalloc(NFP_NSP_VERSION_BUFSZ, GFP_KERNEL); + if (!buf) { + err = -ENOMEM; + goto err_close_nsp; + } + + err = nfp_nsp_versions(nsp, buf, NFP_NSP_VERSION_BUFSZ); + if (err) + goto err_free_buf; + + err = nfp_devlink_versions_get_nsp(req, false, + buf, NFP_NSP_VERSION_BUFSZ); + if (err) + goto err_free_buf; + + err = nfp_devlink_versions_get_nsp(req, true, + buf, NFP_NSP_VERSION_BUFSZ); + if (err) + goto err_free_buf; + + kfree(buf); + } + + nfp_nsp_close(nsp); + + return nfp_devlink_versions_get_hwinfo(pf, req); + +err_free_buf: + kfree(buf); +err_close_nsp: + nfp_nsp_close(nsp); + return err; +} + +static int +nfp_devlink_flash_update(struct devlink *devlink, const char *path, + const char *component, struct netlink_ext_ack *extack) +{ + if (component) + return -EOPNOTSUPP; + return nfp_flash_update_common(devlink_priv(devlink), path, extack); +} + const struct devlink_ops nfp_devlink_ops = { .port_split = nfp_devlink_port_split, .port_unsplit = nfp_devlink_port_unsplit, @@ -178,6 +346,8 @@ const struct devlink_ops nfp_devlink_ops = { .sb_pool_set = nfp_devlink_sb_pool_set, .eswitch_mode_get = nfp_devlink_eswitch_mode_get, .eswitch_mode_set = nfp_devlink_eswitch_mode_set, + .info_get = nfp_devlink_info_get, + .flash_update = nfp_devlink_flash_update, }; int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port) @@ -206,3 +376,14 @@ void nfp_devlink_port_unregister(struct nfp_port *port) { devlink_port_unregister(&port->dl_port); } + +struct devlink *nfp_devlink_get_devlink(struct net_device *netdev) +{ + struct nfp_app *app; + + app = nfp_app_from_netdev(netdev); + if (!app) + return NULL; + + return priv_to_devlink(app->pf); +} diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 6c10e8d119e4..f4c8776e42b6 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -300,6 +300,47 @@ static int nfp_pcie_sriov_configure(struct pci_dev *pdev, int num_vfs) return nfp_pcie_sriov_enable(pdev, num_vfs); } +int nfp_flash_update_common(struct nfp_pf *pf, const char *path, + struct netlink_ext_ack *extack) +{ + struct device *dev = &pf->pdev->dev; + const struct firmware *fw; + struct nfp_nsp *nsp; + int err; + + nsp = nfp_nsp_open(pf->cpp); + if (IS_ERR(nsp)) { + err = PTR_ERR(nsp); + if (extack) + NL_SET_ERR_MSG_MOD(extack, "can't access NSP"); + else + dev_err(dev, "Failed to access the NSP: %d\n", err); + return err; + } + + err = request_firmware_direct(&fw, path, dev); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "unable to read flash file from disk"); + goto exit_close_nsp; + } + + dev_info(dev, "Please be patient while writing flash image: %s\n", + path); + + err = nfp_nsp_write_flash(nsp, fw); + if (err < 0) + goto exit_release_fw; + dev_info(dev, "Finished writing flash image\n"); + err = 0; + +exit_release_fw: + release_firmware(fw); +exit_close_nsp: + nfp_nsp_close(nsp); + return err; +} + static const struct firmware * nfp_net_fw_request(struct pci_dev *pdev, struct nfp_pf *pf, const char *name) { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h index a3613a2e0aa5..b7211f200d22 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h @@ -164,6 +164,8 @@ nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt, unsigned int min_size, struct nfp_cpp_area **area); int nfp_mbox_cmd(struct nfp_pf *pf, u32 cmd, void *in_data, u64 in_length, void *out_data, u64 out_length); +int nfp_flash_update_common(struct nfp_pf *pf, const char *path, + struct netlink_ext_ack *extack); enum nfp_dump_diag { NFP_DUMP_NSP_DIAG = 0, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 7d2d4241498f..6d1b8816552e 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -36,7 +36,6 @@ #include #include -#include #include #include "nfpcore/nfp_nsp.h" @@ -3531,6 +3530,8 @@ const struct net_device_ops nfp_net_netdev_ops = { .ndo_udp_tunnel_add = nfp_net_add_vxlan_port, .ndo_udp_tunnel_del = nfp_net_del_vxlan_port, .ndo_bpf = nfp_net_xdp, + .ndo_get_port_parent_id = nfp_port_get_port_parent_id, + .ndo_get_devlink = nfp_devlink_get_devlink, }; /** @@ -3815,8 +3816,6 @@ static void nfp_net_netdev_init(struct nfp_net *nn) netdev->netdev_ops = &nfp_net_netdev_ops; netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000); - SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops); - /* MTU range: 68 - hw-specific max */ netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = nn->max_mtu; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h index 166d7f71442e..372adea10e14 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h @@ -392,7 +392,7 @@ #define NFP_NET_CFG_MBOX_SIMPLE_CMD 0x0 #define NFP_NET_CFG_MBOX_SIMPLE_RET 0x4 #define NFP_NET_CFG_MBOX_SIMPLE_VAL 0x8 -#define NFP_NET_CFG_MBOX_SIMPLE_LEN 0x12 +#define NFP_NET_CFG_MBOX_SIMPLE_LEN 12 #define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD 1 #define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL 2 diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index cb9c512abc76..690b62718dbb 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -1234,57 +1234,6 @@ static int nfp_net_set_channels(struct net_device *netdev, return nfp_net_set_num_rings(nn, total_rx, total_tx); } -static int -nfp_net_flash_device(struct net_device *netdev, struct ethtool_flash *flash) -{ - const struct firmware *fw; - struct nfp_app *app; - struct nfp_nsp *nsp; - struct device *dev; - int err; - - if (flash->region != ETHTOOL_FLASH_ALL_REGIONS) - return -EOPNOTSUPP; - - app = nfp_app_from_netdev(netdev); - if (!app) - return -EOPNOTSUPP; - - dev = &app->pdev->dev; - - nsp = nfp_nsp_open(app->cpp); - if (IS_ERR(nsp)) { - err = PTR_ERR(nsp); - dev_err(dev, "Failed to access the NSP: %d\n", err); - return err; - } - - err = request_firmware_direct(&fw, flash->data, dev); - if (err) - goto exit_close_nsp; - - dev_info(dev, "Please be patient while writing flash image: %s\n", - flash->data); - dev_hold(netdev); - rtnl_unlock(); - - err = nfp_nsp_write_flash(nsp, fw); - if (err < 0) { - dev_err(dev, "Flash write failed: %d\n", err); - goto exit_rtnl_lock; - } - dev_info(dev, "Finished writing flash image\n"); - -exit_rtnl_lock: - rtnl_lock(); - dev_put(netdev); - release_firmware(fw); - -exit_close_nsp: - nfp_nsp_close(nsp); - return err; -} - static const struct ethtool_ops nfp_net_ethtool_ops = { .get_drvinfo = nfp_net_get_drvinfo, .get_link = ethtool_op_get_link, @@ -1295,7 +1244,6 @@ static const struct ethtool_ops nfp_net_ethtool_ops = { .get_sset_count = nfp_net_get_sset_count, .get_rxnfc = nfp_net_get_rxnfc, .set_rxnfc = nfp_net_set_rxnfc, - .flash_device = nfp_net_flash_device, .get_rxfh_indir_size = nfp_net_get_rxfh_indir_size, .get_rxfh_key_size = nfp_net_get_rxfh_key_size, .get_rxfh = nfp_net_get_rxfh, @@ -1321,7 +1269,6 @@ const struct ethtool_ops nfp_port_ethtool_ops = { .get_strings = nfp_port_get_strings, .get_ethtool_stats = nfp_port_get_stats, .get_sset_count = nfp_port_get_sset_count, - .flash_device = nfp_net_flash_device, .set_dump = nfp_app_set_dump, .get_dump_flag = nfp_app_get_dump_flag, .get_dump_data = nfp_app_get_dump_data, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index 69d7aebda09b..d2c803bb4e56 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -5,7 +5,6 @@ #include #include #include -#include #include "nfpcore/nfp_cpp.h" #include "nfpcore/nfp_nsp.h" @@ -273,6 +272,8 @@ const struct net_device_ops nfp_repr_netdev_ops = { .ndo_fix_features = nfp_repr_fix_features, .ndo_set_features = nfp_port_set_features, .ndo_set_mac_address = eth_mac_addr, + .ndo_get_port_parent_id = nfp_port_get_port_parent_id, + .ndo_get_devlink = nfp_devlink_get_devlink, }; void @@ -336,8 +337,6 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev, netdev->max_mtu = pf_netdev->max_mtu; - SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops); - /* Set features the lower device can support with representors */ if (repr_cap & NFP_NET_CFG_CTRL_LIVE_ADDR) netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.c b/drivers/net/ethernet/netronome/nfp/nfp_port.c index 86bc149ca231..93c5bfc0510b 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.c @@ -3,7 +3,6 @@ #include #include -#include #include "nfpcore/nfp_cpp.h" #include "nfpcore/nfp_nsp.h" @@ -31,34 +30,22 @@ struct nfp_port *nfp_port_from_netdev(struct net_device *netdev) return NULL; } -static int -nfp_port_attr_get(struct net_device *netdev, struct switchdev_attr *attr) +int nfp_port_get_port_parent_id(struct net_device *netdev, + struct netdev_phys_item_id *ppid) { struct nfp_port *port; + const u8 *serial; port = nfp_port_from_netdev(netdev); if (!port) return -EOPNOTSUPP; - switch (attr->id) { - case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: { - const u8 *serial; - /* N.B: attr->u.ppid.id is binary data */ - attr->u.ppid.id_len = nfp_cpp_serial(port->app->cpp, &serial); - memcpy(&attr->u.ppid.id, serial, attr->u.ppid.id_len); - break; - } - default: - return -EOPNOTSUPP; - } + ppid->id_len = nfp_cpp_serial(port->app->cpp, &serial); + memcpy(&ppid->id, serial, ppid->id_len); return 0; } -const struct switchdev_ops nfp_port_switchdev_ops = { - .switchdev_port_attr_get = nfp_port_attr_get, -}; - int nfp_port_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data) { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h index b2479a2a49e5..90ae053f5c07 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h @@ -7,6 +7,7 @@ #include struct net_device; +struct netdev_phys_item_id; struct nfp_app; struct nfp_pf; struct nfp_port; @@ -90,7 +91,6 @@ struct nfp_port { }; extern const struct ethtool_ops nfp_port_ethtool_ops; -extern const struct switchdev_ops nfp_port_switchdev_ops; __printf(2, 3) u8 *nfp_pr_et(u8 *data, const char *fmt, ...); @@ -106,6 +106,8 @@ int nfp_port_set_features(struct net_device *netdev, netdev_features_t features); struct nfp_port *nfp_port_from_netdev(struct net_device *netdev); +int nfp_port_get_port_parent_id(struct net_device *netdev, + struct netdev_phys_item_id *ppid); struct nfp_port * nfp_port_from_id(struct nfp_pf *pf, enum nfp_port_type type, unsigned int id); struct nfp_eth_table_port *__nfp_port_get_eth_port(struct nfp_port *port); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c b/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c index 814360ed3a20..ea2e3f829aba 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_shared_buf.c @@ -48,6 +48,7 @@ int nfp_shared_buf_pool_get(struct nfp_pf *pf, unsigned int sb, u16 pool_index, pool_info->pool_type = le32_to_cpu(get_data.pool_type); pool_info->threshold_type = le32_to_cpu(get_data.threshold_type); pool_info->size = le32_to_cpu(get_data.size) * unit_size; + pool_info->cell_size = unit_size; return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c index ce1577bbbd2a..3a4e224a64b7 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c @@ -7,11 +7,13 @@ * Jason McMullan */ +#include #include #include #include #include #include +#include #include #include @@ -36,6 +38,7 @@ #define NSP_COMMAND 0x08 #define NSP_COMMAND_OPTION GENMASK_ULL(63, 32) #define NSP_COMMAND_CODE GENMASK_ULL(31, 16) +#define NSP_COMMAND_DMA_BUF BIT_ULL(1) #define NSP_COMMAND_START BIT_ULL(0) /* CPP address to retrieve the data from */ @@ -48,8 +51,12 @@ #define NSP_DFLT_BUFFER_ADDRESS GENMASK_ULL(39, 0) #define NSP_DFLT_BUFFER_CONFIG 0x20 +#define NSP_DFLT_BUFFER_DMA_CHUNK_ORDER GENMASK_ULL(63, 58) +#define NSP_DFLT_BUFFER_SIZE_4KB GENMASK_ULL(15, 8) #define NSP_DFLT_BUFFER_SIZE_MB GENMASK_ULL(7, 0) +#define NFP_CAP_CMD_DMA_SG 0x28 + #define NSP_MAGIC 0xab10 #define NSP_MAJOR 0 #define NSP_MINOR 8 @@ -62,6 +69,16 @@ #define NFP_HWINFO_LOOKUP_SIZE GENMASK(11, 0) +#define NFP_VERSIONS_SIZE GENMASK(11, 0) +#define NFP_VERSIONS_CNT_OFF 0 +#define NFP_VERSIONS_BSP_OFF 2 +#define NFP_VERSIONS_CPLD_OFF 6 +#define NFP_VERSIONS_APP_OFF 10 +#define NFP_VERSIONS_BUNDLE_OFF 14 +#define NFP_VERSIONS_UNDI_OFF 18 +#define NFP_VERSIONS_NCSI_OFF 22 +#define NFP_VERSIONS_CFGR_OFF 26 + enum nfp_nsp_cmd { SPCODE_NOOP = 0, /* No operation */ SPCODE_SOFT_RESET = 1, /* Soft reset the NFP */ @@ -77,6 +94,17 @@ enum nfp_nsp_cmd { SPCODE_NSP_IDENTIFY = 13, /* Read NSP version */ SPCODE_FW_STORED = 16, /* If no FW loaded, load flash app FW */ SPCODE_HWINFO_LOOKUP = 17, /* Lookup HWinfo with overwrites etc. */ + SPCODE_VERSIONS = 21, /* Report FW versions */ +}; + +struct nfp_nsp_dma_buf { + __le32 chunk_cnt; + __le32 reserved[3]; + struct { + __le32 size; + __le32 reserved; + __le64 addr; + } descs[]; }; static const struct { @@ -107,18 +135,18 @@ struct nfp_nsp { /** * struct nfp_nsp_command_arg - NFP command argument structure * @code: NFP SP Command Code + * @dma: @buf points to a host buffer, not NSP buffer * @timeout_sec:Timeout value to wait for completion in seconds * @option: NFP SP Command Argument - * @buff_cpp: NFP SP Buffer CPP Address info - * @buff_addr: NFP SP Buffer Host address + * @buf: NFP SP Buffer Address * @error_cb: Callback for interpreting option if error occurred */ struct nfp_nsp_command_arg { u16 code; + bool dma; unsigned int timeout_sec; u32 option; - u32 buff_cpp; - u64 buff_addr; + u64 buf; void (*error_cb)(struct nfp_nsp *state, u32 ret_val); }; @@ -332,22 +360,14 @@ __nfp_nsp_command(struct nfp_nsp *state, const struct nfp_nsp_command_arg *arg) if (err) return err; - if (!FIELD_FIT(NSP_BUFFER_CPP, arg->buff_cpp >> 8) || - !FIELD_FIT(NSP_BUFFER_ADDRESS, arg->buff_addr)) { - nfp_err(cpp, "Host buffer out of reach %08x %016llx\n", - arg->buff_cpp, arg->buff_addr); - return -EINVAL; - } - - err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_buffer, - FIELD_PREP(NSP_BUFFER_CPP, arg->buff_cpp >> 8) | - FIELD_PREP(NSP_BUFFER_ADDRESS, arg->buff_addr)); + err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_buffer, arg->buf); if (err < 0) return err; err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_command, FIELD_PREP(NSP_COMMAND_OPTION, arg->option) | FIELD_PREP(NSP_COMMAND_CODE, arg->code) | + FIELD_PREP(NSP_COMMAND_DMA_BUF, arg->dma) | FIELD_PREP(NSP_COMMAND_START, 1)); if (err < 0) return err; @@ -399,36 +419,14 @@ static int nfp_nsp_command(struct nfp_nsp *state, u16 code) } static int -nfp_nsp_command_buf(struct nfp_nsp *nsp, struct nfp_nsp_command_buf_arg *arg) +nfp_nsp_command_buf_def(struct nfp_nsp *nsp, + struct nfp_nsp_command_buf_arg *arg) { struct nfp_cpp *cpp = nsp->cpp; - unsigned int max_size; u64 reg, cpp_buf; - int ret, err; + int err, ret; u32 cpp_id; - if (nsp->ver.minor < 13) { - nfp_err(cpp, "NSP: Code 0x%04x with buffer not supported (ABI %hu.%hu)\n", - arg->arg.code, nsp->ver.major, nsp->ver.minor); - return -EOPNOTSUPP; - } - - err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res), - nfp_resource_address(nsp->res) + - NSP_DFLT_BUFFER_CONFIG, - ®); - if (err < 0) - return err; - - max_size = max(arg->in_size, arg->out_size); - if (FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M < max_size) { - nfp_err(cpp, "NSP: default buffer too small for command 0x%04x (%llu < %u)\n", - arg->arg.code, - FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M, - max_size); - return -EINVAL; - } - err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res), nfp_resource_address(nsp->res) + NSP_DFLT_BUFFER, @@ -447,15 +445,21 @@ nfp_nsp_command_buf(struct nfp_nsp *nsp, struct nfp_nsp_command_buf_arg *arg) } /* Zero out remaining part of the buffer */ if (arg->out_buf && arg->out_size && arg->out_size > arg->in_size) { - memset(arg->out_buf, 0, arg->out_size - arg->in_size); err = nfp_cpp_write(cpp, cpp_id, cpp_buf + arg->in_size, arg->out_buf, arg->out_size - arg->in_size); if (err < 0) return err; } - arg->arg.buff_cpp = cpp_id; - arg->arg.buff_addr = cpp_buf; + if (!FIELD_FIT(NSP_BUFFER_CPP, cpp_id >> 8) || + !FIELD_FIT(NSP_BUFFER_ADDRESS, cpp_buf)) { + nfp_err(cpp, "Buffer out of reach %08x %016llx\n", + cpp_id, cpp_buf); + return -EINVAL; + } + + arg->arg.buf = FIELD_PREP(NSP_BUFFER_CPP, cpp_id >> 8) | + FIELD_PREP(NSP_BUFFER_ADDRESS, cpp_buf); ret = __nfp_nsp_command(nsp, &arg->arg); if (ret < 0) return ret; @@ -470,6 +474,210 @@ nfp_nsp_command_buf(struct nfp_nsp *nsp, struct nfp_nsp_command_buf_arg *arg) return ret; } +static int +nfp_nsp_command_buf_dma_sg(struct nfp_nsp *nsp, + struct nfp_nsp_command_buf_arg *arg, + unsigned int max_size, unsigned int chunk_order, + unsigned int dma_order) +{ + struct nfp_cpp *cpp = nsp->cpp; + struct nfp_nsp_dma_buf *desc; + struct { + dma_addr_t dma_addr; + unsigned long len; + void *chunk; + } *chunks; + size_t chunk_size, dma_size; + dma_addr_t dma_desc; + struct device *dev; + unsigned long off; + int i, ret, nseg; + size_t desc_sz; + + chunk_size = BIT_ULL(chunk_order); + dma_size = BIT_ULL(dma_order); + nseg = DIV_ROUND_UP(max_size, chunk_size); + + chunks = kzalloc(array_size(sizeof(*chunks), nseg), GFP_KERNEL); + if (!chunks) + return -ENOMEM; + + off = 0; + ret = -ENOMEM; + for (i = 0; i < nseg; i++) { + unsigned long coff; + + chunks[i].chunk = kmalloc(chunk_size, + GFP_KERNEL | __GFP_NOWARN); + if (!chunks[i].chunk) + goto exit_free_prev; + + chunks[i].len = min_t(u64, chunk_size, max_size - off); + + coff = 0; + if (arg->in_size > off) { + coff = min_t(u64, arg->in_size - off, chunk_size); + memcpy(chunks[i].chunk, arg->in_buf + off, coff); + } + memset(chunks[i].chunk + coff, 0, chunk_size - coff); + + off += chunks[i].len; + } + + dev = nfp_cpp_device(cpp)->parent; + + for (i = 0; i < nseg; i++) { + dma_addr_t addr; + + addr = dma_map_single(dev, chunks[i].chunk, chunks[i].len, + DMA_BIDIRECTIONAL); + chunks[i].dma_addr = addr; + + ret = dma_mapping_error(dev, addr); + if (ret) + goto exit_unmap_prev; + + if (WARN_ONCE(round_down(addr, dma_size) != + round_down(addr + chunks[i].len - 1, dma_size), + "unaligned DMA address: %pad %lu %zd\n", + &addr, chunks[i].len, dma_size)) { + ret = -EFAULT; + i++; + goto exit_unmap_prev; + } + } + + desc_sz = struct_size(desc, descs, nseg); + desc = kmalloc(desc_sz, GFP_KERNEL); + if (!desc) { + ret = -ENOMEM; + goto exit_unmap_all; + } + + desc->chunk_cnt = cpu_to_le32(nseg); + for (i = 0; i < nseg; i++) { + desc->descs[i].size = cpu_to_le32(chunks[i].len); + desc->descs[i].addr = cpu_to_le64(chunks[i].dma_addr); + } + + dma_desc = dma_map_single(dev, desc, desc_sz, DMA_TO_DEVICE); + ret = dma_mapping_error(dev, dma_desc); + if (ret) + goto exit_free_desc; + + arg->arg.dma = true; + arg->arg.buf = dma_desc; + ret = __nfp_nsp_command(nsp, &arg->arg); + if (ret < 0) + goto exit_unmap_desc; + + i = 0; + off = 0; + while (off < arg->out_size) { + unsigned int len; + + len = min_t(u64, chunks[i].len, arg->out_size - off); + memcpy(arg->out_buf + off, chunks[i].chunk, len); + off += len; + i++; + } + +exit_unmap_desc: + dma_unmap_single(dev, dma_desc, desc_sz, DMA_TO_DEVICE); +exit_free_desc: + kfree(desc); +exit_unmap_all: + i = nseg; +exit_unmap_prev: + while (--i >= 0) + dma_unmap_single(dev, chunks[i].dma_addr, chunks[i].len, + DMA_BIDIRECTIONAL); + i = nseg; +exit_free_prev: + while (--i >= 0) + kfree(chunks[i].chunk); + kfree(chunks); + if (ret < 0) + nfp_err(cpp, "NSP: SG DMA failed for command 0x%04x: %d (sz:%d cord:%d)\n", + arg->arg.code, ret, max_size, chunk_order); + return ret; +} + +static int +nfp_nsp_command_buf_dma(struct nfp_nsp *nsp, + struct nfp_nsp_command_buf_arg *arg, + unsigned int max_size, unsigned int dma_order) +{ + unsigned int chunk_order, buf_order; + struct nfp_cpp *cpp = nsp->cpp; + bool sg_ok; + u64 reg; + int err; + + buf_order = order_base_2(roundup_pow_of_two(max_size)); + + err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res), + nfp_resource_address(nsp->res) + NFP_CAP_CMD_DMA_SG, + ®); + if (err < 0) + return err; + sg_ok = reg & BIT_ULL(arg->arg.code - 1); + + if (!sg_ok) { + if (buf_order > dma_order) { + nfp_err(cpp, "NSP: can't service non-SG DMA for command 0x%04x\n", + arg->arg.code); + return -ENOMEM; + } + chunk_order = buf_order; + } else { + chunk_order = min_t(unsigned int, dma_order, PAGE_SHIFT); + } + + return nfp_nsp_command_buf_dma_sg(nsp, arg, max_size, chunk_order, + dma_order); +} + +static int +nfp_nsp_command_buf(struct nfp_nsp *nsp, struct nfp_nsp_command_buf_arg *arg) +{ + unsigned int dma_order, def_size, max_size; + struct nfp_cpp *cpp = nsp->cpp; + u64 reg; + int err; + + if (nsp->ver.minor < 13) { + nfp_err(cpp, "NSP: Code 0x%04x with buffer not supported (ABI %hu.%hu)\n", + arg->arg.code, nsp->ver.major, nsp->ver.minor); + return -EOPNOTSUPP; + } + + err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res), + nfp_resource_address(nsp->res) + + NSP_DFLT_BUFFER_CONFIG, + ®); + if (err < 0) + return err; + + /* Zero out undefined part of the out buffer */ + if (arg->out_buf && arg->out_size && arg->out_size > arg->in_size) + memset(arg->out_buf, 0, arg->out_size - arg->in_size); + + max_size = max(arg->in_size, arg->out_size); + def_size = FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M + + FIELD_GET(NSP_DFLT_BUFFER_SIZE_4KB, reg) * SZ_4K; + dma_order = FIELD_GET(NSP_DFLT_BUFFER_DMA_CHUNK_ORDER, reg); + if (def_size >= max_size) { + return nfp_nsp_command_buf_def(nsp, arg); + } else if (!dma_order) { + nfp_err(cpp, "NSP: default buffer too small for command 0x%04x (%u < %u)\n", + arg->arg.code, def_size, max_size); + return -EINVAL; + } + + return nfp_nsp_command_buf_dma(nsp, arg, max_size, dma_order); +} + int nfp_nsp_wait(struct nfp_nsp *state) { const unsigned long wait_until = jiffies + NFP_NSP_TIMEOUT_BOOT * HZ; @@ -591,10 +799,7 @@ int nfp_nsp_write_flash(struct nfp_nsp *state, const struct firmware *fw) { .code = SPCODE_NSP_WRITE_FLASH, .option = fw->size, - /* The flash time is specified to take a maximum of 70s - * so we add an additional factor to this spec time. - */ - .timeout_sec = 2.5 * 70, + .timeout_sec = 900, }, .in_buf = fw->data, .in_size = fw->size, @@ -711,3 +916,52 @@ int nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, void *buf, unsigned int size) return 0; } + +int nfp_nsp_versions(struct nfp_nsp *state, void *buf, unsigned int size) +{ + struct nfp_nsp_command_buf_arg versions = { + { + .code = SPCODE_VERSIONS, + .option = min_t(u32, size, NFP_VERSIONS_SIZE), + }, + .out_buf = buf, + .out_size = min_t(u32, size, NFP_VERSIONS_SIZE), + }; + + return nfp_nsp_command_buf(state, &versions); +} + +const char *nfp_nsp_versions_get(enum nfp_nsp_versions id, bool flash, + const u8 *buf, unsigned int size) +{ + static const u32 id2off[] = { + [NFP_VERSIONS_BSP] = NFP_VERSIONS_BSP_OFF, + [NFP_VERSIONS_CPLD] = NFP_VERSIONS_CPLD_OFF, + [NFP_VERSIONS_APP] = NFP_VERSIONS_APP_OFF, + [NFP_VERSIONS_BUNDLE] = NFP_VERSIONS_BUNDLE_OFF, + [NFP_VERSIONS_UNDI] = NFP_VERSIONS_UNDI_OFF, + [NFP_VERSIONS_NCSI] = NFP_VERSIONS_NCSI_OFF, + [NFP_VERSIONS_CFGR] = NFP_VERSIONS_CFGR_OFF, + }; + unsigned int field, buf_field_cnt, buf_off; + + if (id >= ARRAY_SIZE(id2off) || !id2off[id]) + return ERR_PTR(-EINVAL); + + field = id * 2 + flash; + + buf_field_cnt = get_unaligned_le16(buf); + if (buf_field_cnt <= field) + return ERR_PTR(-ENOENT); + + buf_off = get_unaligned_le16(buf + id2off[id] + flash * 2); + if (!buf_off) + return ERR_PTR(-ENOENT); + + if (buf_off >= size) + return ERR_PTR(-EINVAL); + if (strnlen(&buf[buf_off], size - buf_off) == size - buf_off) + return ERR_PTR(-EINVAL); + + return (const char *)&buf[buf_off]; +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h index ff33ac54097a..bd9c358c646f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h @@ -38,12 +38,18 @@ static inline bool nfp_nsp_has_hwinfo_lookup(struct nfp_nsp *state) return nfp_nsp_get_abi_ver_minor(state) > 24; } +static inline bool nfp_nsp_has_versions(struct nfp_nsp *state) +{ + return nfp_nsp_get_abi_ver_minor(state) > 27; +} + enum nfp_eth_interface { NFP_INTERFACE_NONE = 0, NFP_INTERFACE_SFP = 1, NFP_INTERFACE_SFPP = 10, NFP_INTERFACE_SFP28 = 28, NFP_INTERFACE_QSFP = 40, + NFP_INTERFACE_RJ45 = 45, NFP_INTERFACE_CXP = 100, NFP_INTERFACE_QSFP28 = 112, }; @@ -208,4 +214,19 @@ enum nfp_nsp_sensor_id { int nfp_hwmon_read_sensor(struct nfp_cpp *cpp, enum nfp_nsp_sensor_id id, long *val); +#define NFP_NSP_VERSION_BUFSZ 1024 /* reasonable size, not in the ABI */ + +enum nfp_nsp_versions { + NFP_VERSIONS_BSP, + NFP_VERSIONS_CPLD, + NFP_VERSIONS_APP, + NFP_VERSIONS_BUNDLE, + NFP_VERSIONS_UNDI, + NFP_VERSIONS_NCSI, + NFP_VERSIONS_CFGR, +}; + +int nfp_nsp_versions(struct nfp_nsp *state, void *buf, unsigned int size); +const char *nfp_nsp_versions_get(enum nfp_nsp_versions id, bool flash, + const u8 *buf, unsigned int size); #endif diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c index 802c9224bb32..311a5be25acb 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c @@ -206,6 +206,9 @@ nfp_eth_calc_port_type(struct nfp_cpp *cpp, struct nfp_eth_table_port *entry) if (entry->interface == NFP_INTERFACE_NONE) { entry->port_type = PORT_NONE; return; + } else if (entry->interface == NFP_INTERFACE_RJ45) { + entry->port_type = PORT_TP; + return; } if (entry->media == NFP_MEDIA_FIBRE) @@ -269,8 +272,7 @@ __nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp) goto err; } - table = kzalloc(sizeof(*table) + - sizeof(struct nfp_eth_table_port) * cnt, GFP_KERNEL); + table = kzalloc(struct_size(table, ports, cnt), GFP_KERNEL); if (!table) goto err; diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c index 1e408d1a9b5f..96f7a9818294 100644 --- a/drivers/net/ethernet/ni/nixge.c +++ b/drivers/net/ethernet/ni/nixge.c @@ -105,6 +105,12 @@ #define NIXGE_MAX_JUMBO_FRAME_SIZE \ (NIXGE_JUMBO_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE) +enum nixge_version { + NIXGE_V2, + NIXGE_V3, + NIXGE_VERSION_COUNT +}; + struct nixge_hw_dma_bd { u32 next_lo; u32 next_hi; @@ -1225,11 +1231,60 @@ static void *nixge_get_nvmem_address(struct device *dev) return mac; } +/* Match table for of_platform binding */ +static const struct of_device_id nixge_dt_ids[] = { + { .compatible = "ni,xge-enet-2.00", .data = (void *)NIXGE_V2 }, + { .compatible = "ni,xge-enet-3.00", .data = (void *)NIXGE_V3 }, + {}, +}; +MODULE_DEVICE_TABLE(of, nixge_dt_ids); + +static int nixge_of_get_resources(struct platform_device *pdev) +{ + const struct of_device_id *of_id; + enum nixge_version version; + struct resource *ctrlres; + struct resource *dmares; + struct net_device *ndev; + struct nixge_priv *priv; + + ndev = platform_get_drvdata(pdev); + priv = netdev_priv(ndev); + of_id = of_match_node(nixge_dt_ids, pdev->dev.of_node); + if (!of_id) + return -ENODEV; + + version = (enum nixge_version)of_id->data; + if (version <= NIXGE_V2) + dmares = platform_get_resource(pdev, IORESOURCE_MEM, 0); + else + dmares = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "dma"); + + priv->dma_regs = devm_ioremap_resource(&pdev->dev, dmares); + if (IS_ERR(priv->dma_regs)) { + netdev_err(ndev, "failed to map dma regs\n"); + return PTR_ERR(priv->dma_regs); + } + if (version <= NIXGE_V2) { + priv->ctrl_regs = priv->dma_regs + NIXGE_REG_CTRL_OFFSET; + } else { + ctrlres = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "ctrl"); + priv->ctrl_regs = devm_ioremap_resource(&pdev->dev, ctrlres); + } + if (IS_ERR(priv->ctrl_regs)) { + netdev_err(ndev, "failed to map ctrl regs\n"); + return PTR_ERR(priv->ctrl_regs); + } + return 0; +} + static int nixge_probe(struct platform_device *pdev) { + struct device_node *mn, *phy_node; struct nixge_priv *priv; struct net_device *ndev; - struct resource *dmares; const u8 *mac_addr; int err; @@ -1261,14 +1316,9 @@ static int nixge_probe(struct platform_device *pdev) priv->dev = &pdev->dev; netif_napi_add(ndev, &priv->napi, nixge_poll, NAPI_POLL_WEIGHT); - - dmares = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->dma_regs = devm_ioremap_resource(&pdev->dev, dmares); - if (IS_ERR(priv->dma_regs)) { - netdev_err(ndev, "failed to map dma regs\n"); - return PTR_ERR(priv->dma_regs); - } - priv->ctrl_regs = priv->dma_regs + NIXGE_REG_CTRL_OFFSET; + err = nixge_of_get_resources(pdev); + if (err) + return err; __nixge_hw_set_mac_address(ndev); priv->tx_irq = platform_get_irq_byname(pdev, "tx"); @@ -1286,10 +1336,14 @@ static int nixge_probe(struct platform_device *pdev) priv->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; priv->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; - err = nixge_mdio_setup(priv, pdev->dev.of_node); - if (err) { - netdev_err(ndev, "error registering mdio bus"); - goto free_netdev; + mn = of_get_child_by_name(pdev->dev.of_node, "mdio"); + if (mn) { + err = nixge_mdio_setup(priv, mn); + of_node_put(mn); + if (err) { + netdev_err(ndev, "error registering mdio bus"); + goto free_netdev; + } } priv->phy_mode = of_get_phy_mode(pdev->dev.of_node); @@ -1299,23 +1353,33 @@ static int nixge_probe(struct platform_device *pdev) goto unregister_mdio; } - priv->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); - if (!priv->phy_node) { - netdev_err(ndev, "not find \"phy-handle\" property\n"); - err = -EINVAL; - goto unregister_mdio; + phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); + if (!phy_node && of_phy_is_fixed_link(pdev->dev.of_node)) { + err = of_phy_register_fixed_link(pdev->dev.of_node); + if (err < 0) { + netdev_err(ndev, "broken fixed-link specification\n"); + goto unregister_mdio; + } + phy_node = of_node_get(pdev->dev.of_node); } + priv->phy_node = phy_node; err = register_netdev(priv->ndev); if (err) { netdev_err(ndev, "register_netdev() error (%i)\n", err); - goto unregister_mdio; + goto free_phy; } return 0; +free_phy: + if (of_phy_is_fixed_link(pdev->dev.of_node)) + of_phy_deregister_fixed_link(pdev->dev.of_node); + of_node_put(phy_node); + unregister_mdio: - mdiobus_unregister(priv->mii_bus); + if (priv->mii_bus) + mdiobus_unregister(priv->mii_bus); free_netdev: free_netdev(ndev); @@ -1330,20 +1394,18 @@ static int nixge_remove(struct platform_device *pdev) unregister_netdev(ndev); - mdiobus_unregister(priv->mii_bus); + if (of_phy_is_fixed_link(pdev->dev.of_node)) + of_phy_deregister_fixed_link(pdev->dev.of_node); + of_node_put(priv->phy_node); + + if (priv->mii_bus) + mdiobus_unregister(priv->mii_bus); free_netdev(ndev); return 0; } -/* Match table for of_platform binding */ -static const struct of_device_id nixge_dt_ids[] = { - { .compatible = "ni,xge-enet-2.00", }, - {}, -}; -MODULE_DEVICE_TABLE(of, nixge_dt_ids); - static struct platform_driver nixge_driver = { .probe = nixge_probe, .remove = nixge_remove, diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c index c662c6f5bee3..67bf02b0763a 100644 --- a/drivers/net/ethernet/nuvoton/w90p910_ether.c +++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c @@ -630,7 +630,7 @@ static int w90p910_ether_start_xmit(struct sk_buff *skb, struct net_device *dev) if (!(w90p910_send_frame(dev, skb->data, skb->len))) { ether->skb = skb; - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); return 0; } return -EAGAIN; diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 552d930e3940..528f6b4fd16a 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -27,7 +27,6 @@ #define DRV_VERSION "1.01" const char pch_driver_version[] = DRV_VERSION; -#define PCI_DEVICE_ID_INTEL_IOH1_GBE 0x8802 /* Pci device ID */ #define PCH_GBE_MAR_ENTRIES 16 #define PCH_GBE_SHORT_PKT 64 #define DSC_INIT16 0xC000 @@ -37,11 +36,9 @@ const char pch_driver_version[] = DRV_VERSION; #define PCH_GBE_PCI_BAR 1 #define PCH_GBE_RESERVE_MEMORY 0x200000 /* 2MB */ -/* Macros for ML7223 */ -#define PCI_VENDOR_ID_ROHM 0x10db -#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013 +#define PCI_DEVICE_ID_INTEL_IOH1_GBE 0x8802 -/* Macros for ML7831 */ +#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013 #define PCI_DEVICE_ID_ROHM_ML7831_GBE 0x8802 #define PCH_GBE_TX_WEIGHT 64 diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c index c9529c29a0a7..eee883a2aa8d 100644 --- a/drivers/net/ethernet/packetengines/hamachi.c +++ b/drivers/net/ethernet/packetengines/hamachi.c @@ -1337,7 +1337,7 @@ static irqreturn_t hamachi_interrupt(int irq, void *dev_instance) leXX_to_cpu(hmp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE); - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); hmp->tx_skbuff[entry] = NULL; } hmp->tx_ring[entry].status_n_length = 0; diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c index 54224d1822e3..6f8d6584f809 100644 --- a/drivers/net/ethernet/packetengines/yellowfin.c +++ b/drivers/net/ethernet/packetengines/yellowfin.c @@ -925,7 +925,7 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance) /* Free the original skb. */ pci_unmap_single(yp->pci_dev, le32_to_cpu(yp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE); - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); yp->tx_skbuff[entry] = NULL; } if (yp->tx_full && @@ -983,7 +983,7 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance) pci_unmap_single(yp->pci_dev, yp->tx_ring[entry<<1].addr, skb->len, PCI_DMA_TODEVICE); - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); yp->tx_skbuff[entry] = 0; /* Mark status as empty. */ yp->tx_status[entry].tx_errs = 0; diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index d21041554507..a5bf46310f60 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -1716,6 +1716,7 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = -ENODEV; goto out; } + dma_set_mask(&mac->dma_pdev->dev, DMA_BIT_MASK(64)); mac->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL); if (!mac->iob_pdev) { diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 24a90163775e..43a57ec296fd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -53,7 +53,7 @@ extern const struct qed_common_ops qed_common_ops_pass; #define QED_MAJOR_VERSION 8 -#define QED_MINOR_VERSION 33 +#define QED_MINOR_VERSION 37 #define QED_REVISION_VERSION 0 #define QED_ENGINEERING_VERSION 20 @@ -554,7 +554,6 @@ struct qed_hwfn { u8 dp_level; char name[NAME_SIZE]; - bool first_on_engine; bool hw_init_done; u8 num_funcs_on_engine; @@ -754,6 +753,7 @@ struct qed_dev { #define CHIP_BOND_ID_SHIFT 0 u8 num_engines; + u8 num_ports; u8 num_ports_in_engine; u8 num_funcs_in_port; @@ -805,6 +805,9 @@ struct qed_dev { u32 mcp_nvm_resp; + /* Recovery */ + bool recov_in_prog; + /* Linux specific here */ struct qede_dev *edev; struct pci_dev *pdev; @@ -890,7 +893,6 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); int qed_device_num_engines(struct qed_dev *cdev); -int qed_device_get_port_id(struct qed_dev *cdev); void qed_set_fw_mac_addr(__le16 *fw_msb, __le16 *fw_mid, __le16 *fw_lsb, u8 *mac); @@ -937,6 +939,10 @@ bool qed_edpm_enabled(struct qed_hwfn *p_hwfn); writel((u32)val, (void __iomem *)((u8 __iomem *)\ (cdev->doorbells) + (db_addr))) +#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \ + qed_device_num_ports((_p_hwfn)->cdev)) +int qed_device_num_ports(struct qed_dev *cdev); + /* Prototypes */ int qed_fill_dev_info(struct qed_dev *cdev, struct qed_dev_info *dev_info); @@ -944,6 +950,7 @@ void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt); u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len, u8 *input_buf, u32 max_size, u8 *unzip_buf); +void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn); void qed_get_protocol_stats(struct qed_dev *cdev, enum qed_mcp_protocol_type type, union qed_mcp_protocol_stats *stats); diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index c2ad405b2f50..e61d1d905415 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -2129,17 +2129,18 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks) rdma_tasks); /* no need for break since RoCE coexist with Ethernet */ } + /* fall through */ case QED_PCI_ETH: { struct qed_eth_pf_params *p_params = &p_hwfn->pf_params.eth_pf_params; - if (!p_params->num_vf_cons) - p_params->num_vf_cons = - ETH_PF_PARAMS_VF_CONS_DEFAULT; - qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH, - p_params->num_cons, - p_params->num_vf_cons); + if (!p_params->num_vf_cons) + p_params->num_vf_cons = + ETH_PF_PARAMS_VF_CONS_DEFAULT; + qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH, + p_params->num_cons, + p_params->num_vf_cons); p_hwfn->p_cxt_mngr->arfs_count = p_params->num_arfs_filters; break; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 8f6551421945..9df8c4b3b54e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -795,19 +795,19 @@ static void qed_init_qm_pq(struct qed_hwfn *p_hwfn, /* get pq index according to PQ_FLAGS */ static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn, - u32 pq_flags) + unsigned long pq_flags) { struct qed_qm_info *qm_info = &p_hwfn->qm_info; /* Can't have multiple flags set here */ - if (bitmap_weight((unsigned long *)&pq_flags, + if (bitmap_weight(&pq_flags, sizeof(pq_flags) * BITS_PER_BYTE) > 1) { - DP_ERR(p_hwfn, "requested multiple pq flags 0x%x\n", pq_flags); + DP_ERR(p_hwfn, "requested multiple pq flags 0x%lx\n", pq_flags); goto err; } if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) { - DP_ERR(p_hwfn, "pq flag 0x%x is not set\n", pq_flags); + DP_ERR(p_hwfn, "pq flag 0x%lx is not set\n", pq_flags); goto err; } @@ -1959,11 +1959,6 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, (p_hwfn->hw_info.personality == QED_PCI_FCOE) ? 1 : 0); STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0); - /* Cleanup chip from previous driver if such remains exist */ - rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false); - if (rc) - return rc; - /* Sanity check before the PF init sequence that uses DMAE */ rc = qed_dmae_sanity(p_hwfn, p_ptt, "pf_phase"); if (rc) @@ -2007,17 +2002,15 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, return rc; } -static int qed_change_pci_hwfn(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u8 enable) +int qed_pglueb_set_pfid_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, bool b_enable) { - u32 delay_idx = 0, val, set_val = enable ? 1 : 0; + u32 delay_idx = 0, val, set_val = b_enable ? 1 : 0; - /* Change PF in PXP */ - qed_wr(p_hwfn, p_ptt, - PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val); + /* Configure the PF's internal FID_enable for master transactions */ + qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val); - /* wait until value is set - try for 1 second every 50us */ + /* Wait until value is set - try for 1 second every 50us */ for (delay_idx = 0; delay_idx < 20000; delay_idx++) { val = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); @@ -2071,13 +2064,19 @@ static int qed_vf_start(struct qed_hwfn *p_hwfn, return 0; } +static void qed_pglueb_clear_err(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, + BIT(p_hwfn->abs_pf_id)); +} + int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) { struct qed_load_req_params load_req_params; u32 load_code, resp, param, drv_mb_param; bool b_default_mtu = true; struct qed_hwfn *p_hwfn; - int rc = 0, mfw_rc, i; + int rc = 0, i; u16 ether_type; if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { @@ -2092,7 +2091,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) } for_each_hwfn(cdev, i) { - struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + p_hwfn = &cdev->hwfns[i]; /* If management didn't provide a default, set one of our own */ if (!p_hwfn->hw_info.mtu) { @@ -2105,9 +2104,6 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) continue; } - /* Enable DMAE in PXP */ - rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true); - rc = qed_calc_hw_mode(p_hwfn); if (rc) return rc; @@ -2144,12 +2140,43 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) "Load request was sent. Load code: 0x%x\n", load_code); + /* Only relevant for recovery: + * Clear the indication after LOAD_REQ is responded by the MFW. + */ + cdev->recov_in_prog = false; + qed_mcp_set_capabilities(p_hwfn, p_hwfn->p_main_ptt); qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt); - p_hwfn->first_on_engine = (load_code == - FW_MSG_CODE_DRV_LOAD_ENGINE); + /* Clean up chip from previous driver if such remains exist. + * This is not needed when the PF is the first one on the + * engine, since afterwards we are going to init the FW. + */ + if (load_code != FW_MSG_CODE_DRV_LOAD_ENGINE) { + rc = qed_final_cleanup(p_hwfn, p_hwfn->p_main_ptt, + p_hwfn->rel_pf_id, false); + if (rc) { + DP_NOTICE(p_hwfn, "Final cleanup failed\n"); + goto load_err; + } + } + + /* Log and clear previous pglue_b errors if such exist */ + qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt); + + /* Enable the PF's internal FID_enable in the PXP */ + rc = qed_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt, + true); + if (rc) + goto load_err; + + /* Clear the pglue_b was_error indication. + * In E4 it must be done after the BME and the internal + * FID_enable for the PF are set, since VDMs may cause the + * indication to be set again. + */ + qed_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt); switch (load_code) { case FW_MSG_CODE_DRV_LOAD_ENGINE: @@ -2180,39 +2207,29 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) break; } - if (rc) + if (rc) { DP_NOTICE(p_hwfn, "init phase failed for loadcode 0x%x (rc %d)\n", - load_code, rc); + load_code, rc); + goto load_err; + } - /* ACK mfw regardless of success or failure of initialization */ - mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, - DRV_MSG_CODE_LOAD_DONE, - 0, &load_code, ¶m); + rc = qed_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt); if (rc) return rc; - if (mfw_rc) { - DP_NOTICE(p_hwfn, "Failed sending LOAD_DONE command\n"); - return mfw_rc; - } - - /* Check if there is a DID mismatch between nvm-cfg/efuse */ - if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR) - DP_NOTICE(p_hwfn, - "warning: device configuration is not supported on this board type. The device may not function as expected.\n"); /* send DCBX attention request command */ DP_VERBOSE(p_hwfn, QED_MSG_DCB, "sending phony dcbx set command to trigger DCBx attention handling\n"); - mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, - DRV_MSG_CODE_SET_DCBX, - 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT, - &load_code, ¶m); - if (mfw_rc) { + rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, + DRV_MSG_CODE_SET_DCBX, + 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT, + &resp, ¶m); + if (rc) { DP_NOTICE(p_hwfn, "Failed to send DCBX attention request\n"); - return mfw_rc; + return rc; } p_hwfn->hw_init_done = true; @@ -2261,6 +2278,12 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) } return 0; + +load_err: + /* The MFW load lock should be released also when initialization fails. + */ + qed_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt); + return rc; } #define QED_HW_STOP_RETRY_LIMIT (10) @@ -2273,6 +2296,9 @@ static void qed_hw_timers_stop(struct qed_dev *cdev, qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0); qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0); + if (cdev->recov_in_prog) + return; + for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) { if ((!qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN)) && @@ -2335,12 +2361,14 @@ int qed_hw_stop(struct qed_dev *cdev) p_hwfn->hw_init_done = false; /* Send unload command to MCP */ - rc = qed_mcp_unload_req(p_hwfn, p_ptt); - if (rc) { - DP_NOTICE(p_hwfn, - "Failed sending a UNLOAD_REQ command. rc = %d.\n", - rc); - rc2 = -EINVAL; + if (!cdev->recov_in_prog) { + rc = qed_mcp_unload_req(p_hwfn, p_ptt); + if (rc) { + DP_NOTICE(p_hwfn, + "Failed sending a UNLOAD_REQ command. rc = %d.\n", + rc); + rc2 = -EINVAL; + } } qed_slowpath_irq_sync(p_hwfn); @@ -2382,27 +2410,31 @@ int qed_hw_stop(struct qed_dev *cdev) qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0); qed_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0); - qed_mcp_unload_done(p_hwfn, p_ptt); - if (rc) { - DP_NOTICE(p_hwfn, - "Failed sending a UNLOAD_DONE command. rc = %d.\n", - rc); - rc2 = -EINVAL; + if (!cdev->recov_in_prog) { + rc = qed_mcp_unload_done(p_hwfn, p_ptt); + if (rc) { + DP_NOTICE(p_hwfn, + "Failed sending a UNLOAD_DONE command. rc = %d.\n", + rc); + rc2 = -EINVAL; + } } } - if (IS_PF(cdev)) { + if (IS_PF(cdev) && !cdev->recov_in_prog) { p_hwfn = QED_LEADING_HWFN(cdev); p_ptt = QED_LEADING_HWFN(cdev)->p_main_ptt; - /* Disable DMAE in PXP - in CMT, this should only be done for - * first hw-function, and only after all transactions have - * stopped for all active hw-functions. + /* Clear the PF's internal FID_enable in the PXP. + * In CMT this should only be done for first hw-function, and + * only after all transactions have stopped for all active + * hw-functions. */ - rc = qed_change_pci_hwfn(p_hwfn, p_ptt, false); + rc = qed_pglueb_set_pfid_enable(p_hwfn, p_ptt, false); if (rc) { DP_NOTICE(p_hwfn, - "qed_change_pci_hwfn failed. rc = %d.\n", rc); + "qed_pglueb_set_pfid_enable() failed. rc = %d.\n", + rc); rc2 = -EINVAL; } } @@ -2502,9 +2534,8 @@ static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn) PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0); } - /* Clean Previous errors if such exist */ - qed_wr(p_hwfn, p_hwfn->p_main_ptt, - PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 1 << p_hwfn->abs_pf_id); + /* Clean previous pglue_b errors if such exist */ + qed_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt); /* enable internal target-read */ qed_wr(p_hwfn, p_hwfn->p_main_ptt, @@ -3238,55 +3269,43 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine); } -static void qed_hw_info_port_num_bb(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) -{ - u32 port_mode; - - port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB); - - if (port_mode < 3) { - p_hwfn->cdev->num_ports_in_engine = 1; - } else if (port_mode <= 5) { - p_hwfn->cdev->num_ports_in_engine = 2; - } else { - DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n", - p_hwfn->cdev->num_ports_in_engine); - - /* Default num_ports_in_engine to something */ - p_hwfn->cdev->num_ports_in_engine = 1; - } -} - -static void qed_hw_info_port_num_ah(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +static void qed_hw_info_port_num(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - u32 port; - int i; - - p_hwfn->cdev->num_ports_in_engine = 0; + u32 addr, global_offsize, global_addr, port_mode; + struct qed_dev *cdev = p_hwfn->cdev; - for (i = 0; i < MAX_NUM_PORTS_K2; i++) { - port = qed_rd(p_hwfn, p_ptt, - CNIG_REG_NIG_PORT0_CONF_K2 + (i * 4)); - if (port & 1) - p_hwfn->cdev->num_ports_in_engine++; + /* In CMT there is always only one port */ + if (cdev->num_hwfns > 1) { + cdev->num_ports_in_engine = 1; + cdev->num_ports = 1; + return; } - if (!p_hwfn->cdev->num_ports_in_engine) { - DP_NOTICE(p_hwfn, "All NIG ports are inactive\n"); - - /* Default num_ports_in_engine to something */ - p_hwfn->cdev->num_ports_in_engine = 1; + /* Determine the number of ports per engine */ + port_mode = qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE); + switch (port_mode) { + case 0x0: + cdev->num_ports_in_engine = 1; + break; + case 0x1: + cdev->num_ports_in_engine = 2; + break; + case 0x2: + cdev->num_ports_in_engine = 4; + break; + default: + DP_NOTICE(p_hwfn, "Unknown port mode 0x%08x\n", port_mode); + cdev->num_ports_in_engine = 1; /* Default to something */ + break; } -} -static void qed_hw_info_port_num(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) -{ - if (QED_IS_BB(p_hwfn->cdev)) - qed_hw_info_port_num_bb(p_hwfn, p_ptt); - else - qed_hw_info_port_num_ah(p_hwfn, p_ptt); + /* Get the total number of ports of the device */ + addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_GLOBAL); + global_offsize = qed_rd(p_hwfn, p_ptt, addr); + global_addr = SECTION_ADDR(global_offsize, 0); + addr = global_addr + offsetof(struct public_global, max_ports); + cdev->num_ports = (u8)qed_rd(p_hwfn, p_ptt, addr); } static void qed_get_eee_caps(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) @@ -3324,7 +3343,8 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, return rc; } - qed_hw_info_port_num(p_hwfn, p_ptt); + if (IS_LEAD_HWFN(p_hwfn)) + qed_hw_info_port_num(p_hwfn, p_ptt); qed_mcp_get_capabilities(p_hwfn, p_ptt); @@ -3440,6 +3460,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, void __iomem *p_doorbells, enum qed_pci_personality personality) { + struct qed_dev *cdev = p_hwfn->cdev; int rc = 0; /* Split PCI bars evenly between hwfns */ @@ -3492,7 +3513,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, /* Sending a mailbox to the MFW should be done after qed_get_hw_info() * is called as it sets the ports number in an engine. */ - if (IS_LEAD_HWFN(p_hwfn)) { + if (IS_LEAD_HWFN(p_hwfn) && !cdev->recov_in_prog) { rc = qed_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt); if (rc) DP_NOTICE(p_hwfn, "Failed to initiate PF FLR\n"); @@ -4728,23 +4749,9 @@ void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports); } -int qed_device_num_engines(struct qed_dev *cdev) -{ - return QED_IS_BB(cdev) ? 2 : 1; -} - -static int qed_device_num_ports(struct qed_dev *cdev) -{ - /* in CMT always only one port */ - if (cdev->num_hwfns > 1) - return 1; - - return cdev->num_ports_in_engine * qed_device_num_engines(cdev); -} - -int qed_device_get_port_id(struct qed_dev *cdev) +int qed_device_num_ports(struct qed_dev *cdev) { - return (QED_LEADING_HWFN(cdev)->abs_pf_id) % qed_device_num_ports(cdev); + return cdev->num_ports; } void qed_set_fw_mac_addr(__le16 *fw_msb, diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index acccd85170aa..e4b4e3b78e8a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -472,6 +472,18 @@ int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *coal, void *handle); int qed_set_queue_coalesce(u16 rx_coal, u16 tx_coal, void *p_handle); +/** + * @brief qed_pglueb_set_pfid_enable - Enable or disable PCI BUS MASTER + * + * @param p_hwfn + * @param p_ptt + * @param b_enable - true/false + * + * @return int + */ +int qed_pglueb_set_pfid_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, bool b_enable); + /** * @brief db_recovery_add - add doorbell information to the doorbell * recovery mechanism. diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index b13cfb449d8f..37edaa847512 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -12796,6 +12796,7 @@ struct public_drv_mb { #define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3 /* get MFW feature support response */ +#define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ 0x00000001 #define FW_MB_PARAM_FEATURE_SUPPORT_EEE 0x00000002 #define FW_MB_PARAM_FEATURE_SUPPORT_VLINK 0x00010000 @@ -12827,7 +12828,7 @@ enum MFW_DRV_MSG_TYPE { MFW_DRV_MSG_LLDP_DATA_UPDATED, MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED, MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED, - MFW_DRV_MSG_RESERVED4, + MFW_DRV_MSG_ERROR_RECOVERY, MFW_DRV_MSG_BW_UPDATE, MFW_DRV_MSG_S_TAG_UPDATE, MFW_DRV_MSG_GET_LAN_STATS, diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c index 70504dcf4087..72ec1c6bdf70 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.c +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -703,6 +703,17 @@ static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn, int qed_status = 0; u32 offset = 0; + if (p_hwfn->cdev->recov_in_prog) { + DP_VERBOSE(p_hwfn, + NETIF_MSG_HW, + "Recovery is in progress. Avoid DMAE transaction [{src: addr 0x%llx, type %d}, {dst: addr 0x%llx, type %d}, size %d].\n", + src_addr, src_type, dst_addr, dst_type, + size_in_dwords); + + /* Let the flow complete w/o any error handling */ + return 0; + } + qed_dmae_opcode(p_hwfn, (src_type == QED_DMAE_ADDRESS_GRC), (dst_type == QED_DMAE_ADDRESS_GRC), diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index 92340919d852..e23980e301b6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -255,112 +255,114 @@ out: #define PGLUE_ATTENTION_ICPL_VALID (1 << 23) #define PGLUE_ATTENTION_ZLR_VALID (1 << 25) #define PGLUE_ATTENTION_ILT_VALID (1 << 23) -static int qed_pglub_rbc_attn_cb(struct qed_hwfn *p_hwfn) + +int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) { u32 tmp; - tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, - PGLUE_B_REG_TX_ERR_WR_DETAILS2); + tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2); if (tmp & PGLUE_ATTENTION_VALID) { u32 addr_lo, addr_hi, details; - addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + addr_lo = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_ADD_31_0); - addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + addr_hi = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_ADD_63_32); - details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + details = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS); - DP_INFO(p_hwfn, - "Illegal write by chip to [%08x:%08x] blocked.\n" - "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" - "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", - addr_hi, addr_lo, details, - (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), - (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), - GET_FIELD(details, - PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0, - tmp, - GET_FIELD(tmp, - PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0, - GET_FIELD(tmp, - PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0, - GET_FIELD(tmp, - PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0); + DP_NOTICE(p_hwfn, + "Illegal write by chip to [%08x:%08x] blocked.\n" + "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" + "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", + addr_hi, addr_lo, details, + (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), + (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), + GET_FIELD(details, + PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0, + tmp, + GET_FIELD(tmp, + PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0, + GET_FIELD(tmp, + PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0, + GET_FIELD(tmp, + PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0); } - tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, - PGLUE_B_REG_TX_ERR_RD_DETAILS2); + tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2); if (tmp & PGLUE_ATTENTION_RD_VALID) { u32 addr_lo, addr_hi, details; - addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + addr_lo = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_ADD_31_0); - addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + addr_hi = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_ADD_63_32); - details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + details = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS); - DP_INFO(p_hwfn, - "Illegal read by chip from [%08x:%08x] blocked.\n" - " Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" - " Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", - addr_hi, addr_lo, details, - (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), - (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), - GET_FIELD(details, - PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0, - tmp, - GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 - : 0, - GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0, - GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 - : 0); + DP_NOTICE(p_hwfn, + "Illegal read by chip from [%08x:%08x] blocked.\n" + "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" + "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", + addr_hi, addr_lo, details, + (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), + (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), + GET_FIELD(details, + PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0, + tmp, + GET_FIELD(tmp, + PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0, + GET_FIELD(tmp, + PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0, + GET_FIELD(tmp, + PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0); } - tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, - PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL); + tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL); if (tmp & PGLUE_ATTENTION_ICPL_VALID) - DP_INFO(p_hwfn, "ICPL error - %08x\n", tmp); + DP_NOTICE(p_hwfn, "ICPL error - %08x\n", tmp); - tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, - PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS); + tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS); if (tmp & PGLUE_ATTENTION_ZLR_VALID) { u32 addr_hi, addr_lo; - addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + addr_lo = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0); - addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + addr_hi = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32); - DP_INFO(p_hwfn, "ZLR eror - %08x [Address %08x:%08x]\n", - tmp, addr_hi, addr_lo); + DP_NOTICE(p_hwfn, "ZLR error - %08x [Address %08x:%08x]\n", + tmp, addr_hi, addr_lo); } - tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, - PGLUE_B_REG_VF_ILT_ERR_DETAILS2); + tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS2); if (tmp & PGLUE_ATTENTION_ILT_VALID) { u32 addr_hi, addr_lo, details; - addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + addr_lo = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_ADD_31_0); - addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + addr_hi = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_ADD_63_32); - details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + details = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS); - DP_INFO(p_hwfn, - "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n", - details, tmp, addr_hi, addr_lo); + DP_NOTICE(p_hwfn, + "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n", + details, tmp, addr_hi, addr_lo); } /* Clear the indications */ - qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, - PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2)); + qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_LATCHED_ERRORS_CLR, BIT(2)); return 0; } +static int qed_pglueb_rbc_attn_cb(struct qed_hwfn *p_hwfn) +{ + return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt); +} + #define QED_DORQ_ATTENTION_REASON_MASK (0xfffff) #define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff) #define QED_DORQ_ATTENTION_OPAQUE_SHIFT (0x0) @@ -540,7 +542,7 @@ static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = { {"PGLUE misc_flr", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, {"PGLUE B RBC", ATTENTION_PAR_INT, - qed_pglub_rbc_attn_cb, BLOCK_PGLUE_B}, + qed_pglueb_rbc_attn_cb, BLOCK_PGLUE_B}, {"PGLUE misc_mctp", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, {"Flash event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index d81a62ebd524..1f356ed4f761 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -431,4 +431,7 @@ int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, #define QED_MAPPING_MEMORY_SIZE(dev) (NUM_OF_SBS(dev)) +int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index beb8e5d6401a..ded556b7bab5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -1688,6 +1688,15 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn, eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0); + if (!ether_addr_equal(ethh->h_dest, + p_hwfn->p_rdma_info->iwarp.mac_addr)) { + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "Got unexpected mac %pM instead of %pM\n", + ethh->h_dest, p_hwfn->p_rdma_info->iwarp.mac_addr); + return -EINVAL; + } + ether_addr_copy(remote_mac_addr, ethh->h_source); ether_addr_copy(local_mac_addr, ethh->h_dest); @@ -2605,7 +2614,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, struct qed_iwarp_info *iwarp_info; struct qed_ll2_acquire_data data; struct qed_ll2_cbs cbs; - u32 mpa_buff_size; + u32 buff_size; u16 n_ooo_bufs; int rc = 0; int i; @@ -2632,7 +2641,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, memset(&data, 0, sizeof(data)); data.input.conn_type = QED_LL2_TYPE_IWARP; - data.input.mtu = QED_IWARP_MAX_SYN_PKT_SIZE; + data.input.mtu = params->max_mtu; data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE; data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE; data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */ @@ -2654,9 +2663,10 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, goto err; } + buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu); rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, QED_IWARP_LL2_SYN_RX_SIZE, - QED_IWARP_MAX_SYN_PKT_SIZE, + buff_size, iwarp_info->ll2_syn_handle); if (rc) goto err; @@ -2710,10 +2720,9 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, if (rc) goto err; - mpa_buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu); rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, data.input.rx_num_desc, - mpa_buff_size, + buff_size, iwarp_info->ll2_mpa_handle); if (rc) goto err; @@ -2726,7 +2735,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps; - iwarp_info->mpa_intermediate_buf = kzalloc(mpa_buff_size, GFP_KERNEL); + iwarp_info->mpa_intermediate_buf = kzalloc(buff_size, GFP_KERNEL); if (!iwarp_info->mpa_intermediate_buf) goto err; diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h index b8f612d00241..7ac959038324 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h @@ -46,7 +46,6 @@ enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state); #define QED_IWARP_LL2_SYN_TX_SIZE (128) #define QED_IWARP_LL2_SYN_RX_SIZE (256) -#define QED_IWARP_MAX_SYN_PKT_SIZE (128) #define QED_IWARP_LL2_OOO_DEF_TX_SIZE (256) #define QED_IWARP_MAX_OOO (16) diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 67c02ea93906..57641728df69 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -609,6 +609,10 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn, (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) && !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); + SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL, + (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) && + !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED))); + SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL, !!(accept_filter & QED_ACCEPT_BCAST)); @@ -744,6 +748,11 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn, return rc; } + if (p_params->update_ctl_frame_check) { + p_cmn->ctl_frame_mac_check_en = p_params->mac_chk_en; + p_cmn->ctl_frame_ethtype_check_en = p_params->ethtype_chk_en; + } + /* Update mcast bins for VFs, PF doesn't use this functionality */ qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params); @@ -1889,6 +1898,7 @@ static void _qed_get_vport_stats(struct qed_dev *cdev, struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn) : NULL; + bool b_get_port_stats; if (IS_PF(cdev)) { /* The main vport index is relative first */ @@ -1903,8 +1913,9 @@ static void _qed_get_vport_stats(struct qed_dev *cdev, continue; } + b_get_port_stats = IS_PF(cdev) && IS_LEAD_HWFN(p_hwfn); __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport, - IS_PF(cdev) ? true : false); + b_get_port_stats); out: if (IS_PF(cdev) && p_ptt) @@ -2207,7 +2218,7 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, u16 num_queues = 0; /* Since the feature controls only queue-zones, - * make sure we have the contexts [rx, tx, xdp] to + * make sure we have the contexts [rx, xdp, tcs] to * match. */ for_each_hwfn(cdev, i) { @@ -2217,7 +2228,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, u16 cids; cids = hwfn->pf_params.eth_pf_params.num_cons; - num_queues += min_t(u16, l2_queues, cids / 3); + cids /= (2 + info->num_tc); + num_queues += min_t(u16, l2_queues, cids); } /* queues might theoretically be >256, but interrupts' @@ -2688,7 +2700,8 @@ static int qed_configure_filter_rx_mode(struct qed_dev *cdev, if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) { accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED; - accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; + accept_flags.tx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED | + QED_ACCEPT_MCAST_UNMATCHED; } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) { accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; @@ -2860,7 +2873,8 @@ static int qed_get_coalesce(struct qed_dev *cdev, u16 *coal, void *handle) p_hwfn = p_cid->p_owner; rc = qed_get_queue_coalesce(p_hwfn, coal, handle); if (rc) - DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n"); + DP_VERBOSE(cdev, QED_MSG_DEBUG, + "Unable to read queue coalescing\n"); return rc; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index 8d80f1095d17..7127d5aaac42 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -219,6 +219,9 @@ struct qed_sp_vport_update_params { struct qed_rss_params *rss_params; struct qed_filter_accept_flags accept_flags; struct qed_sge_tpa_params *sge_tpa_params; + u8 update_ctl_frame_check; + u8 mac_chk_en; + u8 ethtype_chk_en; }; int qed_sp_vport_update(struct qed_hwfn *p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index d9237c65a838..b5f419b71287 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -2451,19 +2451,24 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb, { struct qed_ll2_tx_pkt_info pkt; const skb_frag_t *frag; + u8 flags = 0, nr_frags; int rc = -EINVAL, i; dma_addr_t mapping; u16 vlan = 0; - u8 flags = 0; if (unlikely(skb->ip_summed != CHECKSUM_NONE)) { DP_INFO(cdev, "Cannot transmit a checksummed packet\n"); return -EINVAL; } - if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) { + /* Cache number of fragments from SKB since SKB may be freed by + * the completion routine after calling qed_ll2_prepare_tx_packet() + */ + nr_frags = skb_shinfo(skb)->nr_frags; + + if (1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) { DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n", - 1 + skb_shinfo(skb)->nr_frags); + 1 + nr_frags); return -EINVAL; } @@ -2485,7 +2490,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb, } memset(&pkt, 0, sizeof(pkt)); - pkt.num_of_bds = 1 + skb_shinfo(skb)->nr_frags; + pkt.num_of_bds = 1 + nr_frags; pkt.vlan = vlan; pkt.bd_flags = flags; pkt.tx_dest = QED_LL2_TX_DEST_NW; @@ -2496,12 +2501,17 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb, test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags)) pkt.remove_stag = true; + /* qed_ll2_prepare_tx_packet() may actually send the packet if + * there are no fragments in the skb and subsequently the completion + * routine may run and free the SKB, so no dereferencing the SKB + * beyond this point unless skb has any fragments. + */ rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle, &pkt, 1); if (rc) goto err; - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + for (i = 0; i < nr_frags; i++) { frag = &skb_shinfo(skb)->frags[i]; mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0, diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 6adf5bda9811..f164d4acebcb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -281,6 +281,8 @@ int qed_fill_dev_info(struct qed_dev *cdev, if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME) dev_info->wol_support = true; + dev_info->smart_an = qed_mcp_is_smart_an_supported(p_hwfn); + dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id; } else { qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major, @@ -359,6 +361,8 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev, qed_init_dp(cdev, params->dp_module, params->dp_level); + cdev->recov_in_prog = params->recov_in_prog; + rc = qed_init_pci(cdev, pdev); if (rc) { DP_ERR(cdev, "init pci failed\n"); @@ -2203,6 +2207,15 @@ static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type, return qed_mcp_get_nvm_image(hwfn, type, buf, len); } +void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn) +{ + struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common; + void *cookie = p_hwfn->cdev->ops_cookie; + + if (ops && ops->schedule_recovery_handler) + ops->schedule_recovery_handler(cookie); +} + static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, void *handle) { @@ -2226,6 +2239,23 @@ static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) return status; } +static int qed_recovery_process(struct qed_dev *cdev) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt; + int rc = 0; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EAGAIN; + + rc = qed_start_recovery_process(p_hwfn, p_ptt); + + qed_ptt_release(p_hwfn, p_ptt); + + return rc; +} + static int qed_update_wol(struct qed_dev *cdev, bool enabled) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); @@ -2380,6 +2410,8 @@ const struct qed_common_ops qed_common_ops_pass = { .nvm_get_image = &qed_nvm_get_image, .set_coalesce = &qed_set_coalesce, .set_led = &qed_set_led, + .recovery_process = &qed_recovery_process, + .recovery_prolog = &qed_recovery_prolog, .update_drv_state = &qed_update_drv_state, .update_mac = &qed_update_mac, .update_mtu = &qed_update_mtu, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index e7f18e34ff0d..cc27fd60d689 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1070,6 +1070,27 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn, return 0; } +int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 resp = 0, param = 0; + int rc; + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp, + ¶m); + if (rc) { + DP_NOTICE(p_hwfn, + "Failed to send a LOAD_DONE command, rc = %d\n", rc); + return rc; + } + + /* Check if there is a DID mismatch between nvm-cfg/efuse */ + if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR) + DP_NOTICE(p_hwfn, + "warning: device configuration is not supported on this board type. The device may not function as expected.\n"); + + return 0; +} + int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_mcp_mb_params mb_params; @@ -1528,6 +1549,60 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) return 0; } +u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt; + + if (IS_VF(p_hwfn->cdev)) + return -EINVAL; + + path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_PATH); + path_offsize = qed_rd(p_hwfn, p_ptt, path_offsize_addr); + path_addr = SECTION_ADDR(path_offsize, QED_PATH_ID(p_hwfn)); + + proc_kill_cnt = qed_rd(p_hwfn, p_ptt, + path_addr + + offsetof(struct public_path, process_kill)) & + PROCESS_KILL_COUNTER_MASK; + + return proc_kill_cnt; +} + +static void qed_mcp_handle_process_kill(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + struct qed_dev *cdev = p_hwfn->cdev; + u32 proc_kill_cnt; + + /* Prevent possible attentions/interrupts during the recovery handling + * and till its load phase, during which they will be re-enabled. + */ + qed_int_igu_disable_int(p_hwfn, p_ptt); + + DP_NOTICE(p_hwfn, "Received a process kill indication\n"); + + /* The following operations should be done once, and thus in CMT mode + * are carried out by only the first HW function. + */ + if (p_hwfn != QED_LEADING_HWFN(cdev)) + return; + + if (cdev->recov_in_prog) { + DP_NOTICE(p_hwfn, + "Ignoring the indication since a recovery process is already in progress\n"); + return; + } + + cdev->recov_in_prog = true; + + proc_kill_cnt = qed_get_process_kill_counter(p_hwfn, p_ptt); + DP_NOTICE(p_hwfn, "Process kill counter: %d\n", proc_kill_cnt); + + qed_schedule_recovery_handler(p_hwfn); +} + static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum MFW_DRV_MSG_TYPE type) @@ -1758,6 +1833,9 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE: qed_mcp_handle_transceiver_change(p_hwfn, p_ptt); break; + case MFW_DRV_MSG_ERROR_RECOVERY: + qed_mcp_handle_process_kill(p_hwfn, p_ptt); + break; case MFW_DRV_MSG_GET_LAN_STATS: case MFW_DRV_MSG_GET_FCOE_STATS: case MFW_DRV_MSG_GET_ISCSI_STATS: @@ -2303,6 +2381,43 @@ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn, return 0; } +int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_dev *cdev = p_hwfn->cdev; + + if (cdev->recov_in_prog) { + DP_NOTICE(p_hwfn, + "Avoid triggering a recovery since such a process is already in progress\n"); + return -EAGAIN; + } + + DP_NOTICE(p_hwfn, "Triggering a recovery process\n"); + qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1); + + return 0; +} + +#define QED_RECOVERY_PROLOG_SLEEP_MS 100 + +int qed_recovery_prolog(struct qed_dev *cdev) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; + int rc; + + /* Allow ongoing PCIe transactions to complete */ + msleep(QED_RECOVERY_PROLOG_SLEEP_MS); + + /* Clear the PF's internal FID_enable in the PXP */ + rc = qed_pglueb_set_pfid_enable(p_hwfn, p_ptt, false); + if (rc) + DP_NOTICE(p_hwfn, + "qed_pglueb_set_pfid_enable() failed. rc = %d.\n", + rc); + + return rc; +} + static int qed_mcp_config_vf_msix_bb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 vf_id, u8 num) @@ -3539,6 +3654,12 @@ void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock, } } +bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn) +{ + return !!(p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ); +} + int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 mcp_resp; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index eddf67798d6f..261c1a392e2c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -440,6 +440,38 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_mcp_drv_version *p_ver); +/** + * @brief Read the MFW process kill counter + * + * @param p_hwfn + * @param p_ptt + * + * @return u32 + */ +u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + +/** + * @brief Trigger a recovery process + * + * @param p_hwfn + * @param p_ptt + * + * @return int + */ +int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + +/** + * @brief A recovery handler must call this function as its first step. + * It is assumed that the handler is not run from an interrupt context. + * + * @param cdev + * @param p_ptt + * + * @return int + */ +int qed_recovery_prolog(struct qed_dev *cdev); + /** * @brief Notify MFW about the change in base device properties * @@ -659,10 +691,6 @@ int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); rel_pfid) #define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id) -#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \ - ((_p_hwfn)->cdev->num_ports_in_engine * \ - qed_device_num_engines((_p_hwfn)->cdev))) - struct qed_mcp_info { /* List for mailbox commands which were sent and wait for a response */ struct list_head cmd_list; @@ -800,6 +828,16 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_load_req_params *p_params); +/** + * @brief Sends a LOAD_DONE message to the MFW + * + * @param p_hwfn + * @param p_ptt + * + * @return int - 0 - Operation was successful. + */ +int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + /** * @brief Sends a UNLOAD_REQ message to the MFW * @@ -1106,6 +1144,16 @@ void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock, struct qed_resc_unlock_params *p_unlock, enum qed_resc_lock resource, bool b_is_permanent); + +/** + * @brief - Return whether management firmware support smart AN + * + * @param p_hwfn + * + * @return bool - true if feature is supported. + */ +bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn); + /** * @brief Learn of supported MFW features; To be done during early init * diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c index 5a90d69dc2f8..1302b308bd87 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ptp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c @@ -47,7 +47,7 @@ static enum qed_resc_lock qed_ptcdev_to_resc(struct qed_hwfn *p_hwfn) { - switch (qed_device_get_port_id(p_hwfn->cdev)) { + switch (MFW_PORT(p_hwfn)) { case 0: return QED_RESC_LOCK_PTP_PORT0; case 1: diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index 8939ed6e08b7..5ce825ca5f24 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -518,6 +518,8 @@ 0x180824UL #define MISC_REG_AEU_GENERAL_ATTN_0 \ 0x008400UL +#define MISC_REG_AEU_GENERAL_ATTN_35 \ + 0x00848cUL #define CAU_REG_SB_ADDR_MEMORY \ 0x1c8000UL #define CAU_REG_SB_VAR_MEMORY \ diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 4179c9013fc6..96ab77ae6af5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -382,6 +382,7 @@ void qed_consq_setup(struct qed_hwfn *p_hwfn); * @param p_hwfn */ void qed_consq_free(struct qed_hwfn *p_hwfn); +int qed_spq_pend_post(struct qed_hwfn *p_hwfn); /** * @file diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 888274fa208b..5a495fda9e9d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -604,6 +604,9 @@ int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn) p_ent->ramrod.pf_update.update_mf_vlan_flag = true; p_ent->ramrod.pf_update.mf_vlan = cpu_to_le16(p_hwfn->hw_info.ovlan); + if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) + p_ent->ramrod.pf_update.mf_vlan |= + cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13)); return qed_spq_post(p_hwfn, p_ent, NULL); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index eb88bbc6b193..79b311b86f66 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -397,6 +397,11 @@ int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie) qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain)); + /* Attempt to post pending requests */ + spin_lock_bh(&p_hwfn->p_spq->lock); + rc = qed_spq_pend_post(p_hwfn); + spin_unlock_bh(&p_hwfn->p_spq->lock); + return rc; } @@ -767,7 +772,7 @@ static int qed_spq_post_list(struct qed_hwfn *p_hwfn, return 0; } -static int qed_spq_pend_post(struct qed_hwfn *p_hwfn) +int qed_spq_pend_post(struct qed_hwfn *p_hwfn) { struct qed_spq *p_spq = p_hwfn->p_spq; struct qed_spq_entry *p_ent = NULL; @@ -790,6 +795,17 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn) SPQ_HIGH_PRI_RESERVE_DEFAULT); } +static void qed_spq_recov_set_ret_code(struct qed_spq_entry *p_ent, + u8 *fw_return_code) +{ + if (!fw_return_code) + return; + + if (p_ent->elem.hdr.protocol_id == PROTOCOLID_ROCE || + p_ent->elem.hdr.protocol_id == PROTOCOLID_IWARP) + *fw_return_code = RDMA_RETURN_OK; +} + /* Avoid overriding of SPQ entries when getting out-of-order completions, by * marking the completions in a bitmap and increasing the chain consumer only * for the first successive completed entries. @@ -825,6 +841,17 @@ int qed_spq_post(struct qed_hwfn *p_hwfn, return -EINVAL; } + if (p_hwfn->cdev->recov_in_prog) { + DP_VERBOSE(p_hwfn, + QED_MSG_SPQ, + "Recovery is in progress. Skip spq post [cmd %02x protocol %02x]\n", + p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id); + + /* Let the flow complete w/o any error handling */ + qed_spq_recov_set_ret_code(p_ent, fw_return_code); + return 0; + } + /* Complete the entry */ rc = qed_spq_fill_entry(p_hwfn, p_ent); @@ -905,7 +932,6 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent = NULL; struct qed_spq_entry *tmp; struct qed_spq_entry *found = NULL; - int rc; if (!p_hwfn) return -EINVAL; @@ -963,12 +989,7 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, */ qed_spq_return_entry(p_hwfn, found); - /* Attempt to post pending requests */ - spin_lock_bh(&p_spq->lock); - rc = qed_spq_pend_post(p_hwfn); - spin_unlock_bh(&p_spq->lock); - - return rc; + return 0; } int qed_consq_alloc(struct qed_hwfn *p_hwfn) diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index ca6290fa0f30..9faaa6df78ed 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -1969,7 +1969,9 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, params.vport_id = vf->vport_id; params.max_buffers_per_cqe = start->max_buffers_per_cqe; params.mtu = vf->mtu; - params.check_mac = true; + + /* Non trusted VFs should enable control frame filtering */ + params.check_mac = !vf->p_vf_info.is_trusted_configured; rc = qed_sp_eth_vport_start(p_hwfn, ¶ms); if (rc) { @@ -4447,6 +4449,13 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled) pci_disable_sriov(cdev->pdev); + if (cdev->recov_in_prog) { + DP_VERBOSE(cdev, + QED_MSG_IOV, + "Skip SRIOV disable operations in the device since a recovery is in progress\n"); + goto out; + } + for_each_hwfn(cdev, i) { struct qed_hwfn *hwfn = &cdev->hwfns[i]; struct qed_ptt *ptt = qed_ptt_acquire(hwfn); @@ -4486,7 +4495,7 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) qed_ptt_release(hwfn, ptt); } - +out: qed_iov_set_vfs_to_disable(cdev, false); return 0; @@ -5130,6 +5139,9 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn) params.opaque_fid = vf->opaque_fid; params.vport_id = vf->vport_id; + params.update_ctl_frame_check = 1; + params.mac_chk_en = !vf_info->is_trusted_configured; + if (vf_info->rx_accept_mode & mask) { flags->update_rx_mode_config = 1; flags->rx_accept_filter = vf_info->rx_accept_mode; @@ -5147,7 +5159,8 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn) } if (flags->update_rx_mode_config || - flags->update_tx_mode_config) + flags->update_tx_mode_config || + params.update_ctl_frame_check) qed_sp_vport_update(hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index b6cccf44bf40..5dda547772c1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -261,6 +261,7 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp; struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; struct vf_pf_resc_request *p_resc; + u8 retry_cnt = VF_ACQUIRE_THRESH; bool resources_acquired = false; struct vfpf_acquire_tlv *req; int rc = 0, attempts = 0; @@ -314,6 +315,15 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) /* send acquire request */ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + + /* Re-try acquire in case of vf-pf hw channel timeout */ + if (retry_cnt && rc == -EBUSY) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF retrying to acquire due to VPC timeout\n"); + retry_cnt--; + continue; + } + if (rc) goto exit; diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 613249d1e967..63a78162cfaf 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -56,7 +56,7 @@ #include #define QEDE_MAJOR_VERSION 8 -#define QEDE_MINOR_VERSION 33 +#define QEDE_MINOR_VERSION 37 #define QEDE_REVISION_VERSION 0 #define QEDE_ENGINEERING_VERSION 20 #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ @@ -162,6 +162,7 @@ struct qede_rdma_dev { struct list_head entry; struct list_head rdma_event_list; struct workqueue_struct *rdma_wq; + bool exp_recovery; }; struct qede_ptp; @@ -264,6 +265,7 @@ struct qede_dev { enum QEDE_STATE { QEDE_STATE_CLOSED, QEDE_STATE_OPEN, + QEDE_STATE_RECOVERY, }; #define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo)) @@ -462,6 +464,7 @@ struct qede_fastpath { #define QEDE_CSUM_UNNECESSARY BIT(1) #define QEDE_TUNN_CSUM_UNNECESSARY BIT(2) +#define QEDE_SP_RECOVERY 0 #define QEDE_SP_RX_MODE 1 #ifdef CONFIG_RFS_ACCEL @@ -494,6 +497,9 @@ struct qede_reload_args { /* Datapath functions definition */ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev); +u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev, + select_queue_fallback_t fallback); netdev_features_t qede_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features); diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 16331c6c6fa7..b4c8949933f1 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -186,11 +186,13 @@ static const struct { enum { QEDE_PRI_FLAG_CMT, + QEDE_PRI_FLAG_SMART_AN_SUPPORT, /* MFW supports SmartAN */ QEDE_PRI_FLAG_LEN, }; static const char qede_private_arr[QEDE_PRI_FLAG_LEN][ETH_GSTRING_LEN] = { "Coupled-Function", + "SmartAN capable", }; enum qede_ethtool_tests { @@ -404,8 +406,15 @@ static int qede_get_sset_count(struct net_device *dev, int stringset) static u32 qede_get_priv_flags(struct net_device *dev) { struct qede_dev *edev = netdev_priv(dev); + u32 flags = 0; - return (!!(edev->dev_info.common.num_hwfns > 1)) << QEDE_PRI_FLAG_CMT; + if (edev->dev_info.common.num_hwfns > 1) + flags |= BIT(QEDE_PRI_FLAG_CMT); + + if (edev->dev_info.common.smart_an) + flags |= BIT(QEDE_PRI_FLAG_SMART_AN_SUPPORT); + + return flags; } struct qede_link_mode_mapping { @@ -1654,8 +1663,11 @@ static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode) /* Wait for loopback configuration to apply */ msleep_interruptible(500); - /* prepare the loopback packet */ - pkt_size = edev->ndev->mtu + ETH_HLEN; + /* Setting max packet size to 1.5K to avoid data being split over + * multiple BDs in cases where MTU > PAGE_SIZE. + */ + pkt_size = (((edev->ndev->mtu < ETH_DATA_LEN) ? + edev->ndev->mtu : ETH_DATA_LEN) + ETH_HLEN); skb = netdev_alloc_skb(edev->ndev, pkt_size); if (!skb) { diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index b16ce7d93caf..add922b93d2c 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -1665,198 +1665,6 @@ static int qede_set_v6_tuple_to_profile(struct qede_dev *edev, return 0; } -static int qede_flow_spec_to_tuple_ipv4_common(struct qede_dev *edev, - struct qede_arfs_tuple *t, - struct ethtool_rx_flow_spec *fs) -{ - if ((fs->h_u.tcp_ip4_spec.ip4src & - fs->m_u.tcp_ip4_spec.ip4src) != fs->h_u.tcp_ip4_spec.ip4src) { - DP_INFO(edev, "Don't support IP-masks\n"); - return -EOPNOTSUPP; - } - - if ((fs->h_u.tcp_ip4_spec.ip4dst & - fs->m_u.tcp_ip4_spec.ip4dst) != fs->h_u.tcp_ip4_spec.ip4dst) { - DP_INFO(edev, "Don't support IP-masks\n"); - return -EOPNOTSUPP; - } - - if ((fs->h_u.tcp_ip4_spec.psrc & - fs->m_u.tcp_ip4_spec.psrc) != fs->h_u.tcp_ip4_spec.psrc) { - DP_INFO(edev, "Don't support port-masks\n"); - return -EOPNOTSUPP; - } - - if ((fs->h_u.tcp_ip4_spec.pdst & - fs->m_u.tcp_ip4_spec.pdst) != fs->h_u.tcp_ip4_spec.pdst) { - DP_INFO(edev, "Don't support port-masks\n"); - return -EOPNOTSUPP; - } - - if (fs->h_u.tcp_ip4_spec.tos) { - DP_INFO(edev, "Don't support tos\n"); - return -EOPNOTSUPP; - } - - t->eth_proto = htons(ETH_P_IP); - t->src_ipv4 = fs->h_u.tcp_ip4_spec.ip4src; - t->dst_ipv4 = fs->h_u.tcp_ip4_spec.ip4dst; - t->src_port = fs->h_u.tcp_ip4_spec.psrc; - t->dst_port = fs->h_u.tcp_ip4_spec.pdst; - - return qede_set_v4_tuple_to_profile(edev, t); -} - -static int qede_flow_spec_to_tuple_tcpv4(struct qede_dev *edev, - struct qede_arfs_tuple *t, - struct ethtool_rx_flow_spec *fs) -{ - t->ip_proto = IPPROTO_TCP; - - if (qede_flow_spec_to_tuple_ipv4_common(edev, t, fs)) - return -EINVAL; - - return 0; -} - -static int qede_flow_spec_to_tuple_udpv4(struct qede_dev *edev, - struct qede_arfs_tuple *t, - struct ethtool_rx_flow_spec *fs) -{ - t->ip_proto = IPPROTO_UDP; - - if (qede_flow_spec_to_tuple_ipv4_common(edev, t, fs)) - return -EINVAL; - - return 0; -} - -static int qede_flow_spec_to_tuple_ipv6_common(struct qede_dev *edev, - struct qede_arfs_tuple *t, - struct ethtool_rx_flow_spec *fs) -{ - struct in6_addr zero_addr; - - memset(&zero_addr, 0, sizeof(zero_addr)); - - if ((fs->h_u.tcp_ip6_spec.psrc & - fs->m_u.tcp_ip6_spec.psrc) != fs->h_u.tcp_ip6_spec.psrc) { - DP_INFO(edev, "Don't support port-masks\n"); - return -EOPNOTSUPP; - } - - if ((fs->h_u.tcp_ip6_spec.pdst & - fs->m_u.tcp_ip6_spec.pdst) != fs->h_u.tcp_ip6_spec.pdst) { - DP_INFO(edev, "Don't support port-masks\n"); - return -EOPNOTSUPP; - } - - if (fs->h_u.tcp_ip6_spec.tclass) { - DP_INFO(edev, "Don't support tclass\n"); - return -EOPNOTSUPP; - } - - t->eth_proto = htons(ETH_P_IPV6); - memcpy(&t->src_ipv6, &fs->h_u.tcp_ip6_spec.ip6src, - sizeof(struct in6_addr)); - memcpy(&t->dst_ipv6, &fs->h_u.tcp_ip6_spec.ip6dst, - sizeof(struct in6_addr)); - t->src_port = fs->h_u.tcp_ip6_spec.psrc; - t->dst_port = fs->h_u.tcp_ip6_spec.pdst; - - return qede_set_v6_tuple_to_profile(edev, t, &zero_addr); -} - -static int qede_flow_spec_to_tuple_tcpv6(struct qede_dev *edev, - struct qede_arfs_tuple *t, - struct ethtool_rx_flow_spec *fs) -{ - t->ip_proto = IPPROTO_TCP; - - if (qede_flow_spec_to_tuple_ipv6_common(edev, t, fs)) - return -EINVAL; - - return 0; -} - -static int qede_flow_spec_to_tuple_udpv6(struct qede_dev *edev, - struct qede_arfs_tuple *t, - struct ethtool_rx_flow_spec *fs) -{ - t->ip_proto = IPPROTO_UDP; - - if (qede_flow_spec_to_tuple_ipv6_common(edev, t, fs)) - return -EINVAL; - - return 0; -} - -static int qede_flow_spec_to_tuple(struct qede_dev *edev, - struct qede_arfs_tuple *t, - struct ethtool_rx_flow_spec *fs) -{ - memset(t, 0, sizeof(*t)); - - if (qede_flow_spec_validate_unused(edev, fs)) - return -EOPNOTSUPP; - - switch ((fs->flow_type & ~FLOW_EXT)) { - case TCP_V4_FLOW: - return qede_flow_spec_to_tuple_tcpv4(edev, t, fs); - case UDP_V4_FLOW: - return qede_flow_spec_to_tuple_udpv4(edev, t, fs); - case TCP_V6_FLOW: - return qede_flow_spec_to_tuple_tcpv6(edev, t, fs); - case UDP_V6_FLOW: - return qede_flow_spec_to_tuple_udpv6(edev, t, fs); - default: - DP_VERBOSE(edev, NETIF_MSG_IFUP, - "Can't support flow of type %08x\n", fs->flow_type); - return -EOPNOTSUPP; - } - - return 0; -} - -static int qede_flow_spec_validate(struct qede_dev *edev, - struct ethtool_rx_flow_spec *fs, - struct qede_arfs_tuple *t) -{ - if (fs->location >= QEDE_RFS_MAX_FLTR) { - DP_INFO(edev, "Location out-of-bounds\n"); - return -EINVAL; - } - - /* Check location isn't already in use */ - if (test_bit(fs->location, edev->arfs->arfs_fltr_bmap)) { - DP_INFO(edev, "Location already in use\n"); - return -EINVAL; - } - - /* Check if the filtering-mode could support the filter */ - if (edev->arfs->filter_count && - edev->arfs->mode != t->mode) { - DP_INFO(edev, - "flow_spec would require filtering mode %08x, but %08x is configured\n", - t->mode, edev->arfs->filter_count); - return -EINVAL; - } - - /* If drop requested then no need to validate other data */ - if (fs->ring_cookie == RX_CLS_FLOW_DISC) - return 0; - - if (ethtool_get_flow_spec_ring_vf(fs->ring_cookie)) - return 0; - - if (fs->ring_cookie >= QEDE_RSS_COUNT(edev)) { - DP_INFO(edev, "Queue out-of-bounds\n"); - return -EINVAL; - } - - return 0; -} - /* Must be called while qede lock is held */ static struct qede_arfs_fltr_node * qede_flow_find_fltr(struct qede_dev *edev, struct qede_arfs_tuple *t) @@ -1896,72 +1704,6 @@ static void qede_flow_set_destination(struct qede_dev *edev, "Configuring N-tuple for VF 0x%02x\n", n->vfid - 1); } -int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info) -{ - struct ethtool_rx_flow_spec *fsp = &info->fs; - struct qede_arfs_fltr_node *n; - struct qede_arfs_tuple t; - int min_hlen, rc; - - __qede_lock(edev); - - if (!edev->arfs) { - rc = -EPERM; - goto unlock; - } - - /* Translate the flow specification into something fittign our DB */ - rc = qede_flow_spec_to_tuple(edev, &t, fsp); - if (rc) - goto unlock; - - /* Make sure location is valid and filter isn't already set */ - rc = qede_flow_spec_validate(edev, fsp, &t); - if (rc) - goto unlock; - - if (qede_flow_find_fltr(edev, &t)) { - rc = -EINVAL; - goto unlock; - } - - n = kzalloc(sizeof(*n), GFP_KERNEL); - if (!n) { - rc = -ENOMEM; - goto unlock; - } - - min_hlen = qede_flow_get_min_header_size(&t); - n->data = kzalloc(min_hlen, GFP_KERNEL); - if (!n->data) { - kfree(n); - rc = -ENOMEM; - goto unlock; - } - - n->sw_id = fsp->location; - set_bit(n->sw_id, edev->arfs->arfs_fltr_bmap); - n->buf_len = min_hlen; - - memcpy(&n->tuple, &t, sizeof(n->tuple)); - - qede_flow_set_destination(edev, n, fsp); - - /* Build a minimal header according to the flow */ - n->tuple.build_hdr(&n->tuple, n->data); - - rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0); - if (rc) - goto unlock; - - qede_configure_arfs_fltr(edev, n, n->rxq_id, true); - rc = qede_poll_arfs_filter_config(edev, n); -unlock: - __qede_unlock(edev); - - return rc; -} - int qede_delete_flow_filter(struct qede_dev *edev, u64 cookie) { struct qede_arfs_fltr_node *fltr = NULL; @@ -2004,190 +1746,172 @@ unlock: } static int qede_parse_actions(struct qede_dev *edev, - struct tcf_exts *exts) + struct flow_action *flow_action) { - int rc = -EINVAL, num_act = 0, i; - const struct tc_action *a; - bool is_drop = false; + const struct flow_action_entry *act; + int i; - if (!tcf_exts_has_actions(exts)) { - DP_NOTICE(edev, "No tc actions received\n"); - return rc; + if (!flow_action_has_entries(flow_action)) { + DP_NOTICE(edev, "No actions received\n"); + return -EINVAL; } - tcf_exts_for_each_action(i, a, exts) { - num_act++; + flow_action_for_each(i, act, flow_action) { + switch (act->id) { + case FLOW_ACTION_DROP: + break; + case FLOW_ACTION_QUEUE: + if (act->queue.vf) + break; - if (is_tcf_gact_shot(a)) - is_drop = true; + if (act->queue.index >= QEDE_RSS_COUNT(edev)) { + DP_INFO(edev, "Queue out-of-bounds\n"); + return -EINVAL; + } + break; + default: + return -EINVAL; + } } - if (num_act == 1 && is_drop) - return 0; - - return rc; + return 0; } static int -qede_tc_parse_ports(struct qede_dev *edev, - struct tc_cls_flower_offload *f, - struct qede_arfs_tuple *t) -{ - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) { - struct flow_dissector_key_ports *key, *mask; - - key = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_PORTS, - f->key); - mask = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_PORTS, - f->mask); - - if ((key->src && mask->src != U16_MAX) || - (key->dst && mask->dst != U16_MAX)) { +qede_flow_parse_ports(struct qede_dev *edev, struct flow_rule *rule, + struct qede_arfs_tuple *t) +{ + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports match; + + flow_rule_match_ports(rule, &match); + if ((match.key->src && match.mask->src != U16_MAX) || + (match.key->dst && match.mask->dst != U16_MAX)) { DP_NOTICE(edev, "Do not support ports masks\n"); return -EINVAL; } - t->src_port = key->src; - t->dst_port = key->dst; + t->src_port = match.key->src; + t->dst_port = match.key->dst; } return 0; } static int -qede_tc_parse_v6_common(struct qede_dev *edev, - struct tc_cls_flower_offload *f, - struct qede_arfs_tuple *t) +qede_flow_parse_v6_common(struct qede_dev *edev, struct flow_rule *rule, + struct qede_arfs_tuple *t) { struct in6_addr zero_addr, addr; memset(&zero_addr, 0, sizeof(addr)); memset(&addr, 0xff, sizeof(addr)); - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { - struct flow_dissector_key_ipv6_addrs *key, *mask; - - key = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV6_ADDRS, - f->key); - mask = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV6_ADDRS, - f->mask); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { + struct flow_match_ipv6_addrs match; - if ((memcmp(&key->src, &zero_addr, sizeof(addr)) && - memcmp(&mask->src, &addr, sizeof(addr))) || - (memcmp(&key->dst, &zero_addr, sizeof(addr)) && - memcmp(&mask->dst, &addr, sizeof(addr)))) { + flow_rule_match_ipv6_addrs(rule, &match); + if ((memcmp(&match.key->src, &zero_addr, sizeof(addr)) && + memcmp(&match.mask->src, &addr, sizeof(addr))) || + (memcmp(&match.key->dst, &zero_addr, sizeof(addr)) && + memcmp(&match.mask->dst, &addr, sizeof(addr)))) { DP_NOTICE(edev, "Do not support IPv6 address prefix/mask\n"); return -EINVAL; } - memcpy(&t->src_ipv6, &key->src, sizeof(addr)); - memcpy(&t->dst_ipv6, &key->dst, sizeof(addr)); + memcpy(&t->src_ipv6, &match.key->src, sizeof(addr)); + memcpy(&t->dst_ipv6, &match.key->dst, sizeof(addr)); } - if (qede_tc_parse_ports(edev, f, t)) + if (qede_flow_parse_ports(edev, rule, t)) return -EINVAL; return qede_set_v6_tuple_to_profile(edev, t, &zero_addr); } static int -qede_tc_parse_v4_common(struct qede_dev *edev, - struct tc_cls_flower_offload *f, +qede_flow_parse_v4_common(struct qede_dev *edev, struct flow_rule *rule, struct qede_arfs_tuple *t) { - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { - struct flow_dissector_key_ipv4_addrs *key, *mask; - - key = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS, - f->key); - mask = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_IPV4_ADDRS, - f->mask); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { + struct flow_match_ipv4_addrs match; - if ((key->src && mask->src != U32_MAX) || - (key->dst && mask->dst != U32_MAX)) { + flow_rule_match_ipv4_addrs(rule, &match); + if ((match.key->src && match.mask->src != U32_MAX) || + (match.key->dst && match.mask->dst != U32_MAX)) { DP_NOTICE(edev, "Do not support ipv4 prefix/masks\n"); return -EINVAL; } - t->src_ipv4 = key->src; - t->dst_ipv4 = key->dst; + t->src_ipv4 = match.key->src; + t->dst_ipv4 = match.key->dst; } - if (qede_tc_parse_ports(edev, f, t)) + if (qede_flow_parse_ports(edev, rule, t)) return -EINVAL; return qede_set_v4_tuple_to_profile(edev, t); } static int -qede_tc_parse_tcp_v6(struct qede_dev *edev, - struct tc_cls_flower_offload *f, +qede_flow_parse_tcp_v6(struct qede_dev *edev, struct flow_rule *rule, struct qede_arfs_tuple *tuple) { tuple->ip_proto = IPPROTO_TCP; tuple->eth_proto = htons(ETH_P_IPV6); - return qede_tc_parse_v6_common(edev, f, tuple); + return qede_flow_parse_v6_common(edev, rule, tuple); } static int -qede_tc_parse_tcp_v4(struct qede_dev *edev, - struct tc_cls_flower_offload *f, +qede_flow_parse_tcp_v4(struct qede_dev *edev, struct flow_rule *rule, struct qede_arfs_tuple *tuple) { tuple->ip_proto = IPPROTO_TCP; tuple->eth_proto = htons(ETH_P_IP); - return qede_tc_parse_v4_common(edev, f, tuple); + return qede_flow_parse_v4_common(edev, rule, tuple); } static int -qede_tc_parse_udp_v6(struct qede_dev *edev, - struct tc_cls_flower_offload *f, +qede_flow_parse_udp_v6(struct qede_dev *edev, struct flow_rule *rule, struct qede_arfs_tuple *tuple) { tuple->ip_proto = IPPROTO_UDP; tuple->eth_proto = htons(ETH_P_IPV6); - return qede_tc_parse_v6_common(edev, f, tuple); + return qede_flow_parse_v6_common(edev, rule, tuple); } static int -qede_tc_parse_udp_v4(struct qede_dev *edev, - struct tc_cls_flower_offload *f, +qede_flow_parse_udp_v4(struct qede_dev *edev, struct flow_rule *rule, struct qede_arfs_tuple *tuple) { tuple->ip_proto = IPPROTO_UDP; tuple->eth_proto = htons(ETH_P_IP); - return qede_tc_parse_v4_common(edev, f, tuple); + return qede_flow_parse_v4_common(edev, rule, tuple); } static int -qede_parse_flower_attr(struct qede_dev *edev, __be16 proto, - struct tc_cls_flower_offload *f, - struct qede_arfs_tuple *tuple) +qede_parse_flow_attr(struct qede_dev *edev, __be16 proto, + struct flow_rule *rule, struct qede_arfs_tuple *tuple) { + struct flow_dissector *dissector = rule->match.dissector; int rc = -EINVAL; u8 ip_proto = 0; memset(tuple, 0, sizeof(*tuple)); - if (f->dissector->used_keys & + if (dissector->used_keys & ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | BIT(FLOW_DISSECTOR_KEY_BASIC) | BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | BIT(FLOW_DISSECTOR_KEY_PORTS))) { DP_NOTICE(edev, "Unsupported key set:0x%x\n", - f->dissector->used_keys); + dissector->used_keys); return -EOPNOTSUPP; } @@ -2197,25 +1921,23 @@ qede_parse_flower_attr(struct qede_dev *edev, __be16 proto, return -EPROTONOSUPPORT; } - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { - struct flow_dissector_key_basic *key; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; - key = skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_BASIC, - f->key); - ip_proto = key->ip_proto; + flow_rule_match_basic(rule, &match); + ip_proto = match.key->ip_proto; } if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IP)) - rc = qede_tc_parse_tcp_v4(edev, f, tuple); + rc = qede_flow_parse_tcp_v4(edev, rule, tuple); else if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IPV6)) - rc = qede_tc_parse_tcp_v6(edev, f, tuple); + rc = qede_flow_parse_tcp_v6(edev, rule, tuple); else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IP)) - rc = qede_tc_parse_udp_v4(edev, f, tuple); + rc = qede_flow_parse_udp_v4(edev, rule, tuple); else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IPV6)) - rc = qede_tc_parse_udp_v6(edev, f, tuple); + rc = qede_flow_parse_udp_v6(edev, rule, tuple); else - DP_NOTICE(edev, "Invalid tc protocol request\n"); + DP_NOTICE(edev, "Invalid protocol request\n"); return rc; } @@ -2235,7 +1957,7 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto, } /* parse flower attribute and prepare filter */ - if (qede_parse_flower_attr(edev, proto, f, &t)) + if (qede_parse_flow_attr(edev, proto, f->rule, &t)) goto unlock; /* Validate profile mode and number of filters */ @@ -2248,7 +1970,7 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto, } /* parse tc actions and get the vf_id */ - if (qede_parse_actions(edev, f->exts)) + if (qede_parse_actions(edev, &f->rule->action)) goto unlock; if (qede_flow_find_fltr(edev, &t)) { @@ -2290,3 +2012,141 @@ unlock: __qede_unlock(edev); return rc; } + +static int qede_flow_spec_validate(struct qede_dev *edev, + struct flow_action *flow_action, + struct qede_arfs_tuple *t, + __u32 location) +{ + if (location >= QEDE_RFS_MAX_FLTR) { + DP_INFO(edev, "Location out-of-bounds\n"); + return -EINVAL; + } + + /* Check location isn't already in use */ + if (test_bit(location, edev->arfs->arfs_fltr_bmap)) { + DP_INFO(edev, "Location already in use\n"); + return -EINVAL; + } + + /* Check if the filtering-mode could support the filter */ + if (edev->arfs->filter_count && + edev->arfs->mode != t->mode) { + DP_INFO(edev, + "flow_spec would require filtering mode %08x, but %08x is configured\n", + t->mode, edev->arfs->filter_count); + return -EINVAL; + } + + if (qede_parse_actions(edev, flow_action)) + return -EINVAL; + + return 0; +} + +static int qede_flow_spec_to_rule(struct qede_dev *edev, + struct qede_arfs_tuple *t, + struct ethtool_rx_flow_spec *fs) +{ + struct ethtool_rx_flow_spec_input input = {}; + struct ethtool_rx_flow_rule *flow; + __be16 proto; + int err = 0; + + if (qede_flow_spec_validate_unused(edev, fs)) + return -EOPNOTSUPP; + + switch ((fs->flow_type & ~FLOW_EXT)) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + proto = htons(ETH_P_IP); + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + proto = htons(ETH_P_IPV6); + break; + default: + DP_VERBOSE(edev, NETIF_MSG_IFUP, + "Can't support flow of type %08x\n", fs->flow_type); + return -EOPNOTSUPP; + } + + input.fs = fs; + flow = ethtool_rx_flow_rule_create(&input); + if (IS_ERR(flow)) + return PTR_ERR(flow); + + if (qede_parse_flow_attr(edev, proto, flow->rule, t)) { + err = -EINVAL; + goto err_out; + } + + /* Make sure location is valid and filter isn't already set */ + err = qede_flow_spec_validate(edev, &flow->rule->action, t, + fs->location); +err_out: + ethtool_rx_flow_rule_destroy(flow); + return err; + +} + +int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info) +{ + struct ethtool_rx_flow_spec *fsp = &info->fs; + struct qede_arfs_fltr_node *n; + struct qede_arfs_tuple t; + int min_hlen, rc; + + __qede_lock(edev); + + if (!edev->arfs) { + rc = -EPERM; + goto unlock; + } + + /* Translate the flow specification into something fittign our DB */ + rc = qede_flow_spec_to_rule(edev, &t, fsp); + if (rc) + goto unlock; + + if (qede_flow_find_fltr(edev, &t)) { + rc = -EINVAL; + goto unlock; + } + + n = kzalloc(sizeof(*n), GFP_KERNEL); + if (!n) { + rc = -ENOMEM; + goto unlock; + } + + min_hlen = qede_flow_get_min_header_size(&t); + n->data = kzalloc(min_hlen, GFP_KERNEL); + if (!n->data) { + kfree(n); + rc = -ENOMEM; + goto unlock; + } + + n->sw_id = fsp->location; + set_bit(n->sw_id, edev->arfs->arfs_fltr_bmap); + n->buf_len = min_hlen; + + memcpy(&n->tuple, &t, sizeof(n->tuple)); + + qede_flow_set_destination(edev, n, fsp); + + /* Build a minimal header according to the flow */ + n->tuple.build_hdr(&n->tuple, n->data); + + rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0); + if (rc) + goto unlock; + + qede_configure_arfs_fltr(edev, n, n->rxq_id, true); + rc = qede_poll_arfs_filter_config(edev, n); +unlock: + __qede_unlock(edev); + + return rc; +} diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index bdf816fe5a16..31b046e24565 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -1695,6 +1695,19 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev) return NETDEV_TX_OK; } +u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev, + select_queue_fallback_t fallback) +{ + struct qede_dev *edev = netdev_priv(dev); + int total_txq; + + total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc; + + return QEDE_TSS_COUNT(edev) ? + fallback(dev, skb, NULL) % total_txq : 0; +} + /* 8B udp header + 8B base tunnel header + 32B option length */ #define QEDE_MAX_TUN_HDR_LEN 48 diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 5a74fcbdbc2b..02a97c659e29 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -133,23 +133,12 @@ static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id); static void qede_remove(struct pci_dev *pdev); static void qede_shutdown(struct pci_dev *pdev); static void qede_link_update(void *dev, struct qed_link_output *link); +static void qede_schedule_recovery_handler(void *dev); +static void qede_recovery_handler(struct qede_dev *edev); static void qede_get_eth_tlv_data(void *edev, void *data); static void qede_get_generic_tlv_data(void *edev, struct qed_generic_tlvs *data); -/* The qede lock is used to protect driver state change and driver flows that - * are not reentrant. - */ -void __qede_lock(struct qede_dev *edev) -{ - mutex_lock(&edev->qede_lock); -} - -void __qede_unlock(struct qede_dev *edev) -{ - mutex_unlock(&edev->qede_lock); -} - #ifdef CONFIG_QED_SRIOV static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos, __be16 vlan_proto) @@ -231,6 +220,7 @@ static struct qed_eth_cb_ops qede_ll_ops = { .arfs_filter_op = qede_arfs_filter_op, #endif .link_update = qede_link_update, + .schedule_recovery_handler = qede_schedule_recovery_handler, .get_generic_tlv_data = qede_get_generic_tlv_data, .get_protocol_tlv_data = qede_get_eth_tlv_data, }, @@ -631,6 +621,7 @@ static const struct net_device_ops qede_netdev_ops = { .ndo_open = qede_open, .ndo_stop = qede_close, .ndo_start_xmit = qede_start_xmit, + .ndo_select_queue = qede_select_queue, .ndo_set_rx_mode = qede_set_rx_mode, .ndo_set_mac_address = qede_set_mac_addr, .ndo_validate_addr = eth_validate_addr, @@ -666,6 +657,7 @@ static const struct net_device_ops qede_netdev_vf_ops = { .ndo_open = qede_open, .ndo_stop = qede_close, .ndo_start_xmit = qede_start_xmit, + .ndo_select_queue = qede_select_queue, .ndo_set_rx_mode = qede_set_rx_mode, .ndo_set_mac_address = qede_set_mac_addr, .ndo_validate_addr = eth_validate_addr, @@ -684,6 +676,7 @@ static const struct net_device_ops qede_netdev_vf_xdp_ops = { .ndo_open = qede_open, .ndo_stop = qede_close, .ndo_start_xmit = qede_start_xmit, + .ndo_select_queue = qede_select_queue, .ndo_set_rx_mode = qede_set_rx_mode, .ndo_set_mac_address = qede_set_mac_addr, .ndo_validate_addr = eth_validate_addr, @@ -950,11 +943,57 @@ err: return -ENOMEM; } +/* The qede lock is used to protect driver state change and driver flows that + * are not reentrant. + */ +void __qede_lock(struct qede_dev *edev) +{ + mutex_lock(&edev->qede_lock); +} + +void __qede_unlock(struct qede_dev *edev) +{ + mutex_unlock(&edev->qede_lock); +} + +/* This version of the lock should be used when acquiring the RTNL lock is also + * needed in addition to the internal qede lock. + */ +void qede_lock(struct qede_dev *edev) +{ + rtnl_lock(); + __qede_lock(edev); +} + +void qede_unlock(struct qede_dev *edev) +{ + __qede_unlock(edev); + rtnl_unlock(); +} + static void qede_sp_task(struct work_struct *work) { struct qede_dev *edev = container_of(work, struct qede_dev, sp_task.work); + /* The locking scheme depends on the specific flag: + * In case of QEDE_SP_RECOVERY, acquiring the RTNL lock is required to + * ensure that ongoing flows are ended and new ones are not started. + * In other cases - only the internal qede lock should be acquired. + */ + + if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) { +#ifdef CONFIG_QED_SRIOV + /* SRIOV must be disabled outside the lock to avoid a deadlock. + * The recovery of the active VFs is currently not supported. + */ + qede_sriov_configure(edev->pdev, 0); +#endif + qede_lock(edev); + qede_recovery_handler(edev); + qede_unlock(edev); + } + __qede_lock(edev); if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags)) @@ -1031,6 +1070,7 @@ static void qede_log_probe(struct qede_dev *edev) enum qede_probe_mode { QEDE_PROBE_NORMAL, + QEDE_PROBE_RECOVERY, }; static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, @@ -1051,6 +1091,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, probe_params.dp_module = dp_module; probe_params.dp_level = dp_level; probe_params.is_vf = is_vf; + probe_params.recov_in_prog = (mode == QEDE_PROBE_RECOVERY); cdev = qed_ops->common->probe(pdev, &probe_params); if (!cdev) { rc = -ENODEV; @@ -1078,11 +1119,20 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, if (rc) goto err2; - edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module, - dp_level); - if (!edev) { - rc = -ENOMEM; - goto err2; + if (mode != QEDE_PROBE_RECOVERY) { + edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module, + dp_level); + if (!edev) { + rc = -ENOMEM; + goto err2; + } + } else { + struct net_device *ndev = pci_get_drvdata(pdev); + + edev = netdev_priv(ndev); + edev->cdev = cdev; + memset(&edev->stats, 0, sizeof(edev->stats)); + memcpy(&edev->dev_info, &dev_info, sizeof(dev_info)); } if (is_vf) @@ -1090,28 +1140,31 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, qede_init_ndev(edev); - rc = qede_rdma_dev_add(edev); + rc = qede_rdma_dev_add(edev, (mode == QEDE_PROBE_RECOVERY)); if (rc) goto err3; - /* Prepare the lock prior to the registration of the netdev, - * as once it's registered we might reach flows requiring it - * [it's even possible to reach a flow needing it directly - * from there, although it's unlikely]. - */ - INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); - mutex_init(&edev->qede_lock); - rc = register_netdev(edev->ndev); - if (rc) { - DP_NOTICE(edev, "Cannot register net-device\n"); - goto err4; + if (mode != QEDE_PROBE_RECOVERY) { + /* Prepare the lock prior to the registration of the netdev, + * as once it's registered we might reach flows requiring it + * [it's even possible to reach a flow needing it directly + * from there, although it's unlikely]. + */ + INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); + mutex_init(&edev->qede_lock); + + rc = register_netdev(edev->ndev); + if (rc) { + DP_NOTICE(edev, "Cannot register net-device\n"); + goto err4; + } } edev->ops->common->set_name(cdev, edev->ndev->name); /* PTP not supported on VFs */ if (!is_vf) - qede_ptp_enable(edev, true); + qede_ptp_enable(edev, (mode == QEDE_PROBE_NORMAL)); edev->ops->register_ops(cdev, &qede_ll_ops, edev); @@ -1126,7 +1179,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, return 0; err4: - qede_rdma_dev_remove(edev); + qede_rdma_dev_remove(edev, (mode == QEDE_PROBE_RECOVERY)); err3: free_netdev(edev->ndev); err2: @@ -1162,6 +1215,7 @@ static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id) enum qede_remove_mode { QEDE_REMOVE_NORMAL, + QEDE_REMOVE_RECOVERY, }; static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) @@ -1172,15 +1226,19 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) DP_INFO(edev, "Starting qede_remove\n"); - qede_rdma_dev_remove(edev); - unregister_netdev(ndev); - cancel_delayed_work_sync(&edev->sp_task); + qede_rdma_dev_remove(edev, (mode == QEDE_REMOVE_RECOVERY)); - qede_ptp_disable(edev); + if (mode != QEDE_REMOVE_RECOVERY) { + unregister_netdev(ndev); - edev->ops->common->set_power_state(cdev, PCI_D0); + cancel_delayed_work_sync(&edev->sp_task); - pci_set_drvdata(pdev, NULL); + edev->ops->common->set_power_state(cdev, PCI_D0); + + pci_set_drvdata(pdev, NULL); + } + + qede_ptp_disable(edev); /* Use global ops since we've freed edev */ qed_ops->common->slowpath_stop(cdev); @@ -1194,7 +1252,8 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) * [e.g., QED register callbacks] won't break anything when * accessing the netdevice. */ - free_netdev(ndev); + if (mode != QEDE_REMOVE_RECOVERY) + free_netdev(ndev); dev_info(&pdev->dev, "Ending qede_remove successfully\n"); } @@ -1539,6 +1598,58 @@ static int qede_alloc_mem_load(struct qede_dev *edev) return 0; } +static void qede_empty_tx_queue(struct qede_dev *edev, + struct qede_tx_queue *txq) +{ + unsigned int pkts_compl = 0, bytes_compl = 0; + struct netdev_queue *netdev_txq; + int rc, len = 0; + + netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id); + + while (qed_chain_get_cons_idx(&txq->tx_pbl) != + qed_chain_get_prod_idx(&txq->tx_pbl)) { + DP_VERBOSE(edev, NETIF_MSG_IFDOWN, + "Freeing a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n", + txq->index, qed_chain_get_cons_idx(&txq->tx_pbl), + qed_chain_get_prod_idx(&txq->tx_pbl)); + + rc = qede_free_tx_pkt(edev, txq, &len); + if (rc) { + DP_NOTICE(edev, + "Failed to free a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n", + txq->index, + qed_chain_get_cons_idx(&txq->tx_pbl), + qed_chain_get_prod_idx(&txq->tx_pbl)); + break; + } + + bytes_compl += len; + pkts_compl++; + txq->sw_tx_cons++; + } + + netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl); +} + +static void qede_empty_tx_queues(struct qede_dev *edev) +{ + int i; + + for_each_queue(i) + if (edev->fp_array[i].type & QEDE_FASTPATH_TX) { + int cos; + + for_each_cos_in_txq(edev, cos) { + struct qede_fastpath *fp; + + fp = &edev->fp_array[i]; + qede_empty_tx_queue(edev, + &fp->txq[cos]); + } + } +} + /* This function inits fp content and resets the SB, RXQ and TXQ structures */ static void qede_init_fp(struct qede_dev *edev) { @@ -2053,6 +2164,7 @@ out: enum qede_unload_mode { QEDE_UNLOAD_NORMAL, + QEDE_UNLOAD_RECOVERY, }; static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode, @@ -2068,7 +2180,8 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode, clear_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags); - edev->state = QEDE_STATE_CLOSED; + if (mode != QEDE_UNLOAD_RECOVERY) + edev->state = QEDE_STATE_CLOSED; qede_rdma_dev_event_close(edev); @@ -2076,17 +2189,20 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode, netif_tx_disable(edev->ndev); netif_carrier_off(edev->ndev); - /* Reset the link */ - memset(&link_params, 0, sizeof(link_params)); - link_params.link_up = false; - edev->ops->common->set_link(edev->cdev, &link_params); - rc = qede_stop_queues(edev); - if (rc) { - qede_sync_free_irqs(edev); - goto out; - } + if (mode != QEDE_UNLOAD_RECOVERY) { + /* Reset the link */ + memset(&link_params, 0, sizeof(link_params)); + link_params.link_up = false; + edev->ops->common->set_link(edev->cdev, &link_params); - DP_INFO(edev, "Stopped Queues\n"); + rc = qede_stop_queues(edev); + if (rc) { + qede_sync_free_irqs(edev); + goto out; + } + + DP_INFO(edev, "Stopped Queues\n"); + } qede_vlan_mark_nonconfigured(edev); edev->ops->fastpath_stop(edev->cdev); @@ -2102,18 +2218,26 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode, qede_napi_disable_remove(edev); + if (mode == QEDE_UNLOAD_RECOVERY) + qede_empty_tx_queues(edev); + qede_free_mem_load(edev); qede_free_fp_array(edev); out: if (!is_locked) __qede_unlock(edev); + + if (mode != QEDE_UNLOAD_RECOVERY) + DP_NOTICE(edev, "Link is down\n"); + DP_INFO(edev, "Ending qede unload\n"); } enum qede_load_mode { QEDE_LOAD_NORMAL, QEDE_LOAD_RELOAD, + QEDE_LOAD_RECOVERY, }; static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, @@ -2293,6 +2417,77 @@ static void qede_link_update(void *dev, struct qed_link_output *link) } } +static void qede_schedule_recovery_handler(void *dev) +{ + struct qede_dev *edev = dev; + + if (edev->state == QEDE_STATE_RECOVERY) { + DP_NOTICE(edev, + "Avoid scheduling a recovery handling since already in recovery state\n"); + return; + } + + set_bit(QEDE_SP_RECOVERY, &edev->sp_flags); + schedule_delayed_work(&edev->sp_task, 0); + + DP_INFO(edev, "Scheduled a recovery handler\n"); +} + +static void qede_recovery_failed(struct qede_dev *edev) +{ + netdev_err(edev->ndev, "Recovery handling has failed. Power cycle is needed.\n"); + + netif_device_detach(edev->ndev); + + if (edev->cdev) + edev->ops->common->set_power_state(edev->cdev, PCI_D3hot); +} + +static void qede_recovery_handler(struct qede_dev *edev) +{ + u32 curr_state = edev->state; + int rc; + + DP_NOTICE(edev, "Starting a recovery process\n"); + + /* No need to acquire first the qede_lock since is done by qede_sp_task + * before calling this function. + */ + edev->state = QEDE_STATE_RECOVERY; + + edev->ops->common->recovery_prolog(edev->cdev); + + if (curr_state == QEDE_STATE_OPEN) + qede_unload(edev, QEDE_UNLOAD_RECOVERY, true); + + __qede_remove(edev->pdev, QEDE_REMOVE_RECOVERY); + + rc = __qede_probe(edev->pdev, edev->dp_module, edev->dp_level, + IS_VF(edev), QEDE_PROBE_RECOVERY); + if (rc) { + edev->cdev = NULL; + goto err; + } + + if (curr_state == QEDE_STATE_OPEN) { + rc = qede_load(edev, QEDE_LOAD_RECOVERY, true); + if (rc) + goto err; + + qede_config_rx_mode(edev->ndev); + udp_tunnel_get_rx_info(edev->ndev); + } + + edev->state = curr_state; + + DP_NOTICE(edev, "Recovery handling is done\n"); + + return; + +err: + qede_recovery_failed(edev); +} + static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq) { struct netdev_queue *netdev_txq; diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c index 1900bf7e67d1..ffabc2d2f082 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c +++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c @@ -50,6 +50,8 @@ static void _qede_rdma_dev_add(struct qede_dev *edev) if (!qedr_drv) return; + /* Leftovers from previous error recovery */ + edev->rdma_info.exp_recovery = false; edev->rdma_info.qedr_dev = qedr_drv->add(edev->cdev, edev->pdev, edev->ndev); } @@ -87,21 +89,26 @@ static void qede_rdma_destroy_wq(struct qede_dev *edev) destroy_workqueue(edev->rdma_info.rdma_wq); } -int qede_rdma_dev_add(struct qede_dev *edev) +int qede_rdma_dev_add(struct qede_dev *edev, bool recovery) { - int rc = 0; + int rc; - if (qede_rdma_supported(edev)) { - rc = qede_rdma_create_wq(edev); - if (rc) - return rc; + if (!qede_rdma_supported(edev)) + return 0; - INIT_LIST_HEAD(&edev->rdma_info.entry); - mutex_lock(&qedr_dev_list_lock); - list_add_tail(&edev->rdma_info.entry, &qedr_dev_list); - _qede_rdma_dev_add(edev); - mutex_unlock(&qedr_dev_list_lock); - } + /* Cannot start qedr while recovering since it wasn't fully stopped */ + if (recovery) + return 0; + + rc = qede_rdma_create_wq(edev); + if (rc) + return rc; + + INIT_LIST_HEAD(&edev->rdma_info.entry); + mutex_lock(&qedr_dev_list_lock); + list_add_tail(&edev->rdma_info.entry, &qedr_dev_list); + _qede_rdma_dev_add(edev); + mutex_unlock(&qedr_dev_list_lock); return rc; } @@ -110,19 +117,30 @@ static void _qede_rdma_dev_remove(struct qede_dev *edev) { if (qedr_drv && qedr_drv->remove && edev->rdma_info.qedr_dev) qedr_drv->remove(edev->rdma_info.qedr_dev); - edev->rdma_info.qedr_dev = NULL; } -void qede_rdma_dev_remove(struct qede_dev *edev) +void qede_rdma_dev_remove(struct qede_dev *edev, bool recovery) { if (!qede_rdma_supported(edev)) return; - qede_rdma_destroy_wq(edev); - mutex_lock(&qedr_dev_list_lock); - _qede_rdma_dev_remove(edev); - list_del(&edev->rdma_info.entry); - mutex_unlock(&qedr_dev_list_lock); + /* Cannot remove qedr while recovering since it wasn't fully stopped */ + if (!recovery) { + qede_rdma_destroy_wq(edev); + mutex_lock(&qedr_dev_list_lock); + if (!edev->rdma_info.exp_recovery) + _qede_rdma_dev_remove(edev); + edev->rdma_info.qedr_dev = NULL; + list_del(&edev->rdma_info.entry); + mutex_unlock(&qedr_dev_list_lock); + } else { + if (!edev->rdma_info.exp_recovery) { + mutex_lock(&qedr_dev_list_lock); + _qede_rdma_dev_remove(edev); + mutex_unlock(&qedr_dev_list_lock); + } + edev->rdma_info.exp_recovery = true; + } } static void _qede_rdma_dev_open(struct qede_dev *edev) @@ -204,7 +222,8 @@ void qede_rdma_unregister_driver(struct qedr_driver *drv) mutex_lock(&qedr_dev_list_lock); list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) { - if (edev->rdma_info.qedr_dev) + /* If device has experienced recovery it was already removed */ + if (edev->rdma_info.qedr_dev && !edev->rdma_info.exp_recovery) _qede_rdma_dev_remove(edev); } qedr_drv = NULL; @@ -284,6 +303,10 @@ static void qede_rdma_add_event(struct qede_dev *edev, { struct qede_rdma_event_work *event_node; + /* If a recovery was experienced avoid adding the event */ + if (edev->rdma_info.exp_recovery) + return; + if (!edev->rdma_info.qedr_dev) return; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 16d0479f6891..7a873002e626 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -396,7 +396,8 @@ static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *netdev, - const unsigned char *addr, u16 vid, u16 flags) + const unsigned char *addr, u16 vid, u16 flags, + struct netlink_ext_ack *extack) { struct qlcnic_adapter *adapter = netdev_priv(netdev); int err = 0; diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c index 5edbd532127d..a6886cc5654c 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c @@ -249,8 +249,8 @@ static void ql_update_stats(struct ql_adapter *qdev) spin_lock(&qdev->stats_lock); if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) { - netif_err(qdev, drv, qdev->ndev, - "Couldn't get xgmac sem.\n"); + netif_err(qdev, drv, qdev->ndev, + "Couldn't get xgmac sem.\n"); goto quit; } /* diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 059ba9429e51..096515c27263 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -1159,10 +1159,10 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) map = lbq_desc->p.pg_chunk.map + lbq_desc->p.pg_chunk.offset; - dma_unmap_addr_set(lbq_desc, mapaddr, map); + dma_unmap_addr_set(lbq_desc, mapaddr, map); dma_unmap_len_set(lbq_desc, maplen, rx_ring->lbq_buf_size); - *lbq_desc->addr = cpu_to_le64(map); + *lbq_desc->addr = cpu_to_le64(map); pci_dma_sync_single_for_device(qdev->pdev, map, rx_ring->lbq_buf_size, diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index 8d790313ee3d..20d2400ad300 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -1204,7 +1204,7 @@ void emac_mac_tx_process(struct emac_adapter *adpt, struct emac_tx_queue *tx_q) if (tpbuf->skb) { pkts_compl++; bytes_compl += tpbuf->skb->len; - dev_kfree_skb_irq(tpbuf->skb); + dev_consume_skb_irq(tpbuf->skb); tpbuf->skb = NULL; } diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 44f6e4873aad..4f910c4f67b0 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -691,7 +691,7 @@ static void cp_tx (struct cp_private *cp) } bytes_compl += skb->len; pkts_compl++; - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); } cp->tx_skb[tx_tail] = NULL; diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index 69d752f0b621..55d01266e615 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -258,6 +258,7 @@ static const struct pci_device_id rtl8139_pci_tbl[] = { {0x126c, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, {0x1743, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, {0x021b, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, + {0x16ec, 0xab06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, #ifdef CONFIG_SH_SECUREEDGE5410 /* Bogus 8139 silicon reports 8129 without external PROM :-( */ diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c index 7e011c1c1e6e..cfb67b746595 100644 --- a/drivers/net/ethernet/realtek/atp.c +++ b/drivers/net/ethernet/realtek/atp.c @@ -454,14 +454,14 @@ static void hardware_init(struct net_device *dev) { struct net_local *lp = netdev_priv(dev); long ioaddr = dev->base_addr; - int i; + int i; /* Turn off the printer multiplexer on the 8012. */ for (i = 0; i < 8; i++) outb(mux_8012[i], ioaddr + PAR_DATA); write_reg_high(ioaddr, CMR1, CMR1h_RESET); - for (i = 0; i < 6; i++) + for (i = 0; i < 6; i++) write_reg_byte(ioaddr, PAR0 + i, dev->dev_addr[i]); write_reg_high(ioaddr, CMR2, lp->addr_mode); @@ -471,15 +471,15 @@ static void hardware_init(struct net_device *dev) (read_nibble(ioaddr, CMR2_h) >> 3) & 0x0f); } - write_reg(ioaddr, CMR2, CMR2_IRQOUT); - write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE); + write_reg(ioaddr, CMR2, CMR2_IRQOUT); + write_reg_high(ioaddr, CMR1, CMR1h_RxENABLE | CMR1h_TxENABLE); /* Enable the interrupt line from the serial port. */ outb(Ctrl_SelData + Ctrl_IRQEN, ioaddr + PAR_CONTROL); /* Unmask the interesting interrupts. */ - write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK); - write_reg_high(ioaddr, IMR, ISRh_RxErr); + write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK); + write_reg_high(ioaddr, IMR, ISRh_RxErr); lp->tx_unit_busy = 0; lp->pac_cnt_in_tx_buf = 0; @@ -610,10 +610,12 @@ static irqreturn_t atp_interrupt(int irq, void *dev_instance) write_reg(ioaddr, CMR2, CMR2_NULL); write_reg(ioaddr, IMR, 0); - if (net_debug > 5) printk(KERN_DEBUG "%s: In interrupt ", dev->name); - while (--boguscount > 0) { + if (net_debug > 5) + printk(KERN_DEBUG "%s: In interrupt ", dev->name); + while (--boguscount > 0) { int status = read_nibble(ioaddr, ISR); - if (net_debug > 5) printk("loop status %02x..", status); + if (net_debug > 5) + printk("loop status %02x..", status); if (status & (ISR_RxOK<<3)) { handled = 1; @@ -640,7 +642,8 @@ static irqreturn_t atp_interrupt(int irq, void *dev_instance) } while (--boguscount > 0); } else if (status & ((ISR_TxErr + ISR_TxOK)<<3)) { handled = 1; - if (net_debug > 6) printk("handling Tx done.."); + if (net_debug > 6) + printk("handling Tx done.."); /* Clear the Tx interrupt. We should check for too many failures and reinitialize the adapter. */ write_reg(ioaddr, ISR, ISR_TxErr + ISR_TxOK); @@ -680,7 +683,7 @@ static irqreturn_t atp_interrupt(int irq, void *dev_instance) break; } else break; - } + } /* This following code fixes a rare (and very difficult to track down) problem where the adapter forgets its ethernet address. */ @@ -694,7 +697,7 @@ static irqreturn_t atp_interrupt(int irq, void *dev_instance) } /* Tell the adapter that it can go back to using the output line as IRQ. */ - write_reg(ioaddr, CMR2, CMR2_IRQOUT); + write_reg(ioaddr, CMR2, CMR2_IRQOUT); /* Enable the physical interrupt line, which is sure to be low until.. */ outb(Ctrl_SelData + Ctrl_IRQEN, ioaddr + PAR_CONTROL); /* .. we enable the interrupt sources. */ diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index abb94c543aa2..c29dde064078 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -229,7 +229,6 @@ static const struct pci_device_id rtl8169_pci_tbl[] = { MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl); -static int use_dac = -1; static struct { u32 msg_enable; } debug = { -1 }; @@ -639,6 +638,7 @@ struct rtl8169_private { void __iomem *mmio_addr; /* memory map physical address */ struct pci_dev *pci_dev; struct net_device *dev; + struct phy_device *phydev; struct napi_struct napi; u32 msg_enable; u16 mac_version; @@ -679,12 +679,12 @@ struct rtl8169_private { } wk; unsigned supports_gmii:1; - struct mii_bus *mii_bus; dma_addr_t counters_phys_addr; struct rtl8169_counters *counters; struct rtl8169_tc_offsets tc_offset; u32 saved_wolopts; + const char *fw_name; struct rtl_fw { const struct firmware *fw; @@ -697,15 +697,12 @@ struct rtl8169_private { size_t size; } phy_action; } *rtl_fw; -#define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN) u32 ocp_base; }; MODULE_AUTHOR("Realtek and the Linux r8169 crew "); MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver"); -module_param(use_dac, int, 0); -MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot."); module_param_named(debug, debug.msg_enable, int, 0); MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); MODULE_SOFTDEP("pre: realtek"); @@ -745,6 +742,16 @@ static void rtl_unlock_work(struct rtl8169_private *tp) mutex_unlock(&tp->wk.mutex); } +static void rtl_lock_config_regs(struct rtl8169_private *tp) +{ + RTL_W8(tp, Cfg9346, Cfg9346_Lock); +} + +static void rtl_unlock_config_regs(struct rtl8169_private *tp) +{ + RTL_W8(tp, Cfg9346, Cfg9346_Unlock); +} + static void rtl_tx_performance_tweak(struct rtl8169_private *tp, u16 force) { pcie_capability_clear_and_set_word(tp->pci_dev, PCI_EXP_DEVCTL, @@ -1278,11 +1285,6 @@ static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr) RTL_R32(tp, EFUSEAR) & EFUSEAR_DATA_MASK : ~0; } -static u16 rtl_get_events(struct rtl8169_private *tp) -{ - return RTL_R16(tp, IntrStatus); -} - static void rtl_ack_events(struct rtl8169_private *tp, u16 bits) { RTL_W16(tp, IntrStatus, bits); @@ -1313,7 +1315,7 @@ static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp) static void rtl_link_chg_patch(struct rtl8169_private *tp) { struct net_device *dev = tp->dev; - struct phy_device *phydev = dev->phydev; + struct phy_device *phydev = tp->phydev; if (!netif_running(dev)) return; @@ -1369,41 +1371,6 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp) #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) -static u32 __rtl8169_get_wol(struct rtl8169_private *tp) -{ - u8 options; - u32 wolopts = 0; - - options = RTL_R8(tp, Config1); - if (!(options & PMEnable)) - return 0; - - options = RTL_R8(tp, Config3); - if (options & LinkUp) - wolopts |= WAKE_PHY; - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38: - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: - if (rtl_eri_read(tp, 0xdc, ERIAR_EXGMAC) & MagicPacket_v2) - wolopts |= WAKE_MAGIC; - break; - default: - if (options & MagicPacket) - wolopts |= WAKE_MAGIC; - break; - } - - options = RTL_R8(tp, Config5); - if (options & UWF) - wolopts |= WAKE_UCAST; - if (options & BWF) - wolopts |= WAKE_BCAST; - if (options & MWF) - wolopts |= WAKE_MCAST; - - return wolopts; -} - static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct rtl8169_private *tp = netdev_priv(dev); @@ -1431,7 +1398,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) }; u8 options; - RTL_W8(tp, Cfg9346, Cfg9346_Unlock); + rtl_unlock_config_regs(tp); switch (tp->mac_version) { case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38: @@ -1479,7 +1446,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) break; } - RTL_W8(tp, Cfg9346, Cfg9346_Lock); + rtl_lock_config_regs(tp); device_set_wakeup_enable(tp_to_dev(tp), wolopts); } @@ -1508,11 +1475,6 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) return 0; } -static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp) -{ - return rtl_chip_infos[tp->mac_version].fw_name; -} - static void rtl8169_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { @@ -1522,7 +1484,7 @@ static void rtl8169_get_drvinfo(struct net_device *dev, strlcpy(info->driver, MODULENAME, sizeof(info->driver)); strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info)); BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version)); - if (!IS_ERR_OR_NULL(rtl_fw)) + if (rtl_fw) strlcpy(info->fw_version, rtl_fw->version, sizeof(info->fw_version)); } @@ -1987,6 +1949,196 @@ static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) return 0; } +static int rtl_get_eee_supp(struct rtl8169_private *tp) +{ + struct phy_device *phydev = tp->phydev; + int ret; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_35: + case RTL_GIGA_MAC_VER_36: + case RTL_GIGA_MAC_VER_38: + ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: + phy_write(phydev, 0x1f, 0x0a5c); + ret = phy_read(phydev, 0x12); + phy_write(phydev, 0x1f, 0x0000); + break; + default: + ret = -EPROTONOSUPPORT; + break; + } + + return ret; +} + +static int rtl_get_eee_lpadv(struct rtl8169_private *tp) +{ + struct phy_device *phydev = tp->phydev; + int ret; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_35: + case RTL_GIGA_MAC_VER_36: + case RTL_GIGA_MAC_VER_38: + ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: + phy_write(phydev, 0x1f, 0x0a5d); + ret = phy_read(phydev, 0x11); + phy_write(phydev, 0x1f, 0x0000); + break; + default: + ret = -EPROTONOSUPPORT; + break; + } + + return ret; +} + +static int rtl_get_eee_adv(struct rtl8169_private *tp) +{ + struct phy_device *phydev = tp->phydev; + int ret; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_35: + case RTL_GIGA_MAC_VER_36: + case RTL_GIGA_MAC_VER_38: + ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: + phy_write(phydev, 0x1f, 0x0a5d); + ret = phy_read(phydev, 0x10); + phy_write(phydev, 0x1f, 0x0000); + break; + default: + ret = -EPROTONOSUPPORT; + break; + } + + return ret; +} + +static int rtl_set_eee_adv(struct rtl8169_private *tp, int val) +{ + struct phy_device *phydev = tp->phydev; + int ret = 0; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_35: + case RTL_GIGA_MAC_VER_36: + case RTL_GIGA_MAC_VER_38: + ret = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: + phy_write(phydev, 0x1f, 0x0a5d); + phy_write(phydev, 0x10, val); + phy_write(phydev, 0x1f, 0x0000); + break; + default: + ret = -EPROTONOSUPPORT; + break; + } + + return ret; +} + +static int rtl8169_get_eee(struct net_device *dev, struct ethtool_eee *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct device *d = tp_to_dev(tp); + int ret; + + pm_runtime_get_noresume(d); + + if (!pm_runtime_active(d)) { + ret = -EOPNOTSUPP; + goto out; + } + + /* Get Supported EEE */ + ret = rtl_get_eee_supp(tp); + if (ret < 0) + goto out; + data->supported = mmd_eee_cap_to_ethtool_sup_t(ret); + + /* Get advertisement EEE */ + ret = rtl_get_eee_adv(tp); + if (ret < 0) + goto out; + data->advertised = mmd_eee_adv_to_ethtool_adv_t(ret); + data->eee_enabled = !!data->advertised; + + /* Get LP advertisement EEE */ + ret = rtl_get_eee_lpadv(tp); + if (ret < 0) + goto out; + data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(ret); + data->eee_active = !!(data->advertised & data->lp_advertised); +out: + pm_runtime_put_noidle(d); + return ret < 0 ? ret : 0; +} + +static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct device *d = tp_to_dev(tp); + int old_adv, adv = 0, cap, ret; + + pm_runtime_get_noresume(d); + + if (!dev->phydev || !pm_runtime_active(d)) { + ret = -EOPNOTSUPP; + goto out; + } + + if (dev->phydev->autoneg == AUTONEG_DISABLE || + dev->phydev->duplex != DUPLEX_FULL) { + ret = -EPROTONOSUPPORT; + goto out; + } + + /* Get Supported EEE */ + ret = rtl_get_eee_supp(tp); + if (ret < 0) + goto out; + cap = ret; + + ret = rtl_get_eee_adv(tp); + if (ret < 0) + goto out; + old_adv = ret; + + if (data->eee_enabled) { + adv = !data->advertised ? cap : + ethtool_adv_to_mmd_eee_adv_t(data->advertised) & cap; + /* Mask prohibited EEE modes */ + adv &= ~dev->phydev->eee_broken_modes; + } + + if (old_adv != adv) { + ret = rtl_set_eee_adv(tp, adv); + if (ret < 0) + goto out; + + /* Restart autonegotiation so the new modes get sent to the + * link partner. + */ + ret = phy_restart_aneg(dev->phydev); + } + +out: + pm_runtime_put_noidle(d); + return ret < 0 ? ret : 0; +} + static const struct ethtool_ops rtl8169_ethtool_ops = { .get_drvinfo = rtl8169_get_drvinfo, .get_regs_len = rtl8169_get_regs_len, @@ -2003,10 +2155,20 @@ static const struct ethtool_ops rtl8169_ethtool_ops = { .get_ethtool_stats = rtl8169_get_ethtool_stats, .get_ts_info = ethtool_op_get_ts_info, .nway_reset = phy_ethtool_nway_reset, + .get_eee = rtl8169_get_eee, + .set_eee = rtl8169_set_eee, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, }; +static void rtl_enable_eee(struct rtl8169_private *tp) +{ + int supported = rtl_get_eee_supp(tp); + + if (supported > 0) + rtl_set_eee_adv(tp, supported); +} + static void rtl8169_get_mac_version(struct rtl8169_private *tp) { /* @@ -2199,7 +2361,7 @@ static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) if (fw->size % FW_OPCODE_SIZE) goto out; - strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE); + strlcpy(version, tp->fw_name, RTL_VER_SIZE); pa->code = (__le32 *)fw->data; pa->size = fw->size / FW_OPCODE_SIZE; @@ -2374,20 +2536,18 @@ static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) static void rtl_release_firmware(struct rtl8169_private *tp) { - if (!IS_ERR_OR_NULL(tp->rtl_fw)) { + if (tp->rtl_fw) { release_firmware(tp->rtl_fw->fw); kfree(tp->rtl_fw); + tp->rtl_fw = NULL; } - tp->rtl_fw = RTL_FIRMWARE_UNKNOWN; } static void rtl_apply_firmware(struct rtl8169_private *tp) { - struct rtl_fw *rtl_fw = tp->rtl_fw; - /* TODO: release firmware once rtl_phy_write_fw signals failures. */ - if (!IS_ERR_OR_NULL(rtl_fw)) - rtl_phy_write_fw(tp, rtl_fw); + if (tp->rtl_fw) + rtl_phy_write_fw(tp, tp->rtl_fw); } static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val) @@ -2398,6 +2558,33 @@ static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val) rtl_apply_firmware(tp); } +static void rtl8168_config_eee_mac(struct rtl8169_private *tp) +{ + rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0003, 0x0000, ERIAR_EXGMAC); +} + +static void rtl8168f_config_eee_phy(struct rtl8169_private *tp) +{ + struct phy_device *phydev = tp->phydev; + + phy_write(phydev, 0x1f, 0x0007); + phy_write(phydev, 0x1e, 0x0020); + phy_set_bits(phydev, 0x15, BIT(8)); + + phy_write(phydev, 0x1f, 0x0005); + phy_write(phydev, 0x05, 0x8b85); + phy_set_bits(phydev, 0x06, BIT(13)); + + phy_write(phydev, 0x1f, 0x0000); +} + +static void rtl8168g_config_eee_phy(struct rtl8169_private *tp) +{ + phy_write(tp->phydev, 0x1f, 0x0a43); + phy_set_bits(tp->phydev, 0x11, BIT(4)); + phy_write(tp->phydev, 0x1f, 0x0000); +} + static void rtl8169s_hw_phy_config(struct rtl8169_private *tp) { static const struct phy_reg phy_reg_init[] = { @@ -3165,22 +3352,8 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp) rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000); rtl_writephy(tp, 0x1f, 0x0000); - /* EEE setting */ - rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0003, 0x0000, ERIAR_EXGMAC); - rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x8b85); - rtl_w0w1_phy(tp, 0x06, 0x2000, 0x0000); - rtl_writephy(tp, 0x1f, 0x0004); - rtl_writephy(tp, 0x1f, 0x0007); - rtl_writephy(tp, 0x1e, 0x0020); - rtl_w0w1_phy(tp, 0x15, 0x0100, 0x0000); - rtl_writephy(tp, 0x1f, 0x0002); - rtl_writephy(tp, 0x1f, 0x0000); - rtl_writephy(tp, 0x0d, 0x0007); - rtl_writephy(tp, 0x0e, 0x003c); - rtl_writephy(tp, 0x0d, 0x4007); - rtl_writephy(tp, 0x0e, 0x0006); - rtl_writephy(tp, 0x0d, 0x0000); + rtl8168f_config_eee_phy(tp); + rtl_enable_eee(tp); /* Green feature */ rtl_writephy(tp, 0x1f, 0x0003); @@ -3215,6 +3388,9 @@ static void rtl8168f_hw_phy_config(struct rtl8169_private *tp) rtl_writephy(tp, 0x05, 0x8b86); rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000); rtl_writephy(tp, 0x1f, 0x0000); + + rtl8168f_config_eee_phy(tp); + rtl_enable_eee(tp); } static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp) @@ -3348,22 +3524,6 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp) rtl_w0w1_phy(tp, 0x06, 0x8000, 0x0000); rtl_writephy(tp, 0x1f, 0x0000); - /* eee setting */ - rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC); - rtl_writephy(tp, 0x1f, 0x0005); - rtl_writephy(tp, 0x05, 0x8b85); - rtl_w0w1_phy(tp, 0x06, 0x0000, 0x2000); - rtl_writephy(tp, 0x1f, 0x0004); - rtl_writephy(tp, 0x1f, 0x0007); - rtl_writephy(tp, 0x1e, 0x0020); - rtl_w0w1_phy(tp, 0x15, 0x0000, 0x0100); - rtl_writephy(tp, 0x1f, 0x0000); - rtl_writephy(tp, 0x0d, 0x0007); - rtl_writephy(tp, 0x0e, 0x003c); - rtl_writephy(tp, 0x0d, 0x4007); - rtl_writephy(tp, 0x0e, 0x0000); - rtl_writephy(tp, 0x0d, 0x0000); - /* Green feature */ rtl_writephy(tp, 0x1f, 0x0003); rtl_w0w1_phy(tp, 0x19, 0x0000, 0x0001); @@ -3371,6 +3531,30 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp) rtl_writephy(tp, 0x1f, 0x0000); } +static void rtl8168g_disable_aldps(struct rtl8169_private *tp) +{ + phy_write(tp->phydev, 0x1f, 0x0a43); + phy_clear_bits(tp->phydev, 0x10, BIT(2)); +} + +static void rtl8168g_phy_adjust_10m_aldps(struct rtl8169_private *tp) +{ + struct phy_device *phydev = tp->phydev; + + phy_write(phydev, 0x1f, 0x0bcc); + phy_clear_bits(phydev, 0x14, BIT(8)); + + phy_write(phydev, 0x1f, 0x0a44); + phy_set_bits(phydev, 0x11, BIT(7) | BIT(6)); + + phy_write(phydev, 0x1f, 0x0a43); + phy_write(phydev, 0x13, 0x8084); + phy_clear_bits(phydev, 0x14, BIT(14) | BIT(13)); + phy_set_bits(phydev, 0x10, BIT(12) | BIT(1) | BIT(0)); + + phy_write(phydev, 0x1f, 0x0000); +} + static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp) { rtl_apply_firmware(tp); @@ -3397,14 +3581,7 @@ static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp) rtl_writephy(tp, 0x1f, 0x0a44); rtl_w0w1_phy(tp, 0x11, 0x000c, 0x0000); - rtl_writephy(tp, 0x1f, 0x0bcc); - rtl_w0w1_phy(tp, 0x14, 0x0100, 0x0000); - rtl_writephy(tp, 0x1f, 0x0a44); - rtl_w0w1_phy(tp, 0x11, 0x00c0, 0x0000); - rtl_writephy(tp, 0x1f, 0x0a43); - rtl_writephy(tp, 0x13, 0x8084); - rtl_w0w1_phy(tp, 0x14, 0x0000, 0x6000); - rtl_w0w1_phy(tp, 0x10, 0x1003, 0x0000); + rtl8168g_phy_adjust_10m_aldps(tp); /* EEE auto-fallback function */ rtl_writephy(tp, 0x1f, 0x0a4b); @@ -3429,17 +3606,16 @@ static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp) rtl_writephy(tp, 0x14, 0x9065); rtl_writephy(tp, 0x14, 0x1065); - /* Check ALDPS bit, disable it if enabled */ - rtl_writephy(tp, 0x1f, 0x0a43); - if (rtl_readphy(tp, 0x10) & 0x0004) - rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004); - - rtl_writephy(tp, 0x1f, 0x0000); + rtl8168g_disable_aldps(tp); + rtl8168g_config_eee_phy(tp); + rtl_enable_eee(tp); } static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp) { rtl_apply_firmware(tp); + rtl8168g_config_eee_phy(tp); + rtl_enable_eee(tp); } static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp) @@ -3544,12 +3720,9 @@ static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp) rtl_w0w1_phy(tp, 0x11, 0x0000, 0x0080); rtl_writephy(tp, 0x1f, 0x0000); - /* Check ALDPS bit, disable it if enabled */ - rtl_writephy(tp, 0x1f, 0x0a43); - if (rtl_readphy(tp, 0x10) & 0x0004) - rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004); - - rtl_writephy(tp, 0x1f, 0x0000); + rtl8168g_disable_aldps(tp); + rtl8168g_config_eee_phy(tp); + rtl_enable_eee(tp); } static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp) @@ -3617,12 +3790,9 @@ static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp) rtl_w0w1_phy(tp, 0x11, 0x0000, 0x0080); rtl_writephy(tp, 0x1f, 0x0000); - /* Check ALDPS bit, disable it if enabled */ - rtl_writephy(tp, 0x1f, 0x0a43); - if (rtl_readphy(tp, 0x10) & 0x0004) - rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004); - - rtl_writephy(tp, 0x1f, 0x0000); + rtl8168g_disable_aldps(tp); + rtl8168g_config_eee_phy(tp); + rtl_enable_eee(tp); } static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp) @@ -3632,16 +3802,7 @@ static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp) rtl_w0w1_phy(tp, 0x11, 0x000c, 0x0000); rtl_writephy(tp, 0x1f, 0x0000); - /* patch 10M & ALDPS */ - rtl_writephy(tp, 0x1f, 0x0bcc); - rtl_w0w1_phy(tp, 0x14, 0x0000, 0x0100); - rtl_writephy(tp, 0x1f, 0x0a44); - rtl_w0w1_phy(tp, 0x11, 0x00c0, 0x0000); - rtl_writephy(tp, 0x1f, 0x0a43); - rtl_writephy(tp, 0x13, 0x8084); - rtl_w0w1_phy(tp, 0x14, 0x0000, 0x6000); - rtl_w0w1_phy(tp, 0x10, 0x1003, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); + rtl8168g_phy_adjust_10m_aldps(tp); /* Enable EEE auto-fallback function */ rtl_writephy(tp, 0x1f, 0x0a4b); @@ -3659,26 +3820,14 @@ static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp) rtl_w0w1_phy(tp, 0x11, 0x4000, 0x2000); rtl_writephy(tp, 0x1f, 0x0000); - /* Check ALDPS bit, disable it if enabled */ - rtl_writephy(tp, 0x1f, 0x0a43); - if (rtl_readphy(tp, 0x10) & 0x0004) - rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004); - - rtl_writephy(tp, 0x1f, 0x0000); + rtl8168g_disable_aldps(tp); + rtl8168g_config_eee_phy(tp); + rtl_enable_eee(tp); } static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp) { - /* patch 10M & ALDPS */ - rtl_writephy(tp, 0x1f, 0x0bcc); - rtl_w0w1_phy(tp, 0x14, 0x0000, 0x0100); - rtl_writephy(tp, 0x1f, 0x0a44); - rtl_w0w1_phy(tp, 0x11, 0x00c0, 0x0000); - rtl_writephy(tp, 0x1f, 0x0a43); - rtl_writephy(tp, 0x13, 0x8084); - rtl_w0w1_phy(tp, 0x14, 0x0000, 0x6000); - rtl_w0w1_phy(tp, 0x10, 0x1003, 0x0000); - rtl_writephy(tp, 0x1f, 0x0000); + rtl8168g_phy_adjust_10m_aldps(tp); /* Enable UC LPF tune function */ rtl_writephy(tp, 0x1f, 0x0a43); @@ -3750,12 +3899,9 @@ static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp) rtl_writephy(tp, 0x14, 0x1065); rtl_writephy(tp, 0x1f, 0x0000); - /* Check ALDPS bit, disable it if enabled */ - rtl_writephy(tp, 0x1f, 0x0a43); - if (rtl_readphy(tp, 0x10) & 0x0004) - rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004); - - rtl_writephy(tp, 0x1f, 0x0000); + rtl8168g_disable_aldps(tp); + rtl8168g_config_eee_phy(tp); + rtl_enable_eee(tp); } static void rtl8102e_hw_phy_config(struct rtl8169_private *tp) @@ -3994,24 +4140,24 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) } /* We may have called phy_speed_down before */ - phy_speed_up(dev->phydev); + phy_speed_up(tp->phydev); - genphy_soft_reset(dev->phydev); + genphy_soft_reset(tp->phydev); /* It was reported that several chips end up with 10MBit/Half on a * 1GBit link after resuming from S3. For whatever reason the PHY on * these chips doesn't properly start a renegotiation when soft-reset. * Explicitly requesting a renegotiation fixes this. */ - if (dev->phydev->autoneg == AUTONEG_ENABLE) - phy_restart_aneg(dev->phydev); + if (tp->phydev->autoneg == AUTONEG_ENABLE) + phy_restart_aneg(tp->phydev); } static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) { rtl_lock_work(tp); - RTL_W8(tp, Cfg9346, Cfg9346_Unlock); + rtl_unlock_config_regs(tp); RTL_W32(tp, MAC4, addr[4] | addr[5] << 8); RTL_R32(tp, MAC4); @@ -4022,7 +4168,7 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) if (tp->mac_version == RTL_GIGA_MAC_VER_34) rtl_rar_exgmac_set(tp, addr); - RTL_W8(tp, Cfg9346, Cfg9346_Lock); + rtl_lock_config_regs(tp); rtl_unlock_work(tp); } @@ -4049,10 +4195,12 @@ static int rtl_set_mac_address(struct net_device *dev, void *p) static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { + struct rtl8169_private *tp = netdev_priv(dev); + if (!netif_running(dev)) return -ENODEV; - return phy_mii_ioctl(dev->phydev, ifr, cmd); + return phy_mii_ioctl(tp->phydev, ifr, cmd); } static void rtl_init_mdio_ops(struct rtl8169_private *tp) @@ -4099,22 +4247,6 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) } } -static bool rtl_wol_pll_power_down(struct rtl8169_private *tp) -{ - struct phy_device *phydev; - - if (!__rtl8169_get_wol(tp)) - return false; - - /* phydev may not be attached to netdevice */ - phydev = mdiobus_get_phy(tp->mii_bus, 0); - - phy_speed_down(phydev, false); - rtl_wol_suspend_quirk(tp); - - return true; -} - static void r8168_pll_power_down(struct rtl8169_private *tp) { if (r8168_check_dash(tp)) @@ -4124,8 +4256,11 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) tp->mac_version == RTL_GIGA_MAC_VER_33) rtl_ephy_write(tp, 0x19, 0xff64); - if (rtl_wol_pll_power_down(tp)) + if (device_may_wakeup(tp_to_dev(tp))) { + phy_speed_down(tp->phydev, false); + rtl_wol_suspend_quirk(tp); return; + } switch (tp->mac_version) { case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33: @@ -4178,7 +4313,7 @@ static void r8168_pll_power_up(struct rtl8169_private *tp) break; } - phy_resume(tp->dev->phydev); + phy_resume(tp->phydev); /* give MAC/PHY some time to resume */ msleep(20); } @@ -4234,18 +4369,18 @@ static void rtl8169_init_ring_indexes(struct rtl8169_private *tp) static void rtl_hw_jumbo_enable(struct rtl8169_private *tp) { if (tp->jumbo_ops.enable) { - RTL_W8(tp, Cfg9346, Cfg9346_Unlock); + rtl_unlock_config_regs(tp); tp->jumbo_ops.enable(tp); - RTL_W8(tp, Cfg9346, Cfg9346_Lock); + rtl_lock_config_regs(tp); } } static void rtl_hw_jumbo_disable(struct rtl8169_private *tp) { if (tp->jumbo_ops.disable) { - RTL_W8(tp, Cfg9346, Cfg9346_Unlock); + rtl_unlock_config_regs(tp); tp->jumbo_ops.disable(tp); - RTL_W8(tp, Cfg9346, Cfg9346_Lock); + rtl_lock_config_regs(tp); } } @@ -4378,21 +4513,20 @@ static void rtl_hw_reset(struct rtl8169_private *tp) rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100); } -static void rtl_request_uncached_firmware(struct rtl8169_private *tp) +static void rtl_request_firmware(struct rtl8169_private *tp) { struct rtl_fw *rtl_fw; - const char *name; int rc = -ENOMEM; - name = rtl_lookup_firmware_name(tp); - if (!name) - goto out_no_firmware; + /* firmware loaded already or no firmware available */ + if (tp->rtl_fw || !tp->fw_name) + return; rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL); if (!rtl_fw) goto err_warn; - rc = request_firmware(&rtl_fw->fw, name, tp_to_dev(tp)); + rc = request_firmware(&rtl_fw->fw, tp->fw_name, tp_to_dev(tp)); if (rc < 0) goto err_free; @@ -4401,7 +4535,7 @@ static void rtl_request_uncached_firmware(struct rtl8169_private *tp) goto err_release_firmware; tp->rtl_fw = rtl_fw; -out: + return; err_release_firmware: @@ -4410,16 +4544,7 @@ err_free: kfree(rtl_fw); err_warn: netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n", - name, rc); -out_no_firmware: - tp->rtl_fw = NULL; - goto out; -} - -static void rtl_request_firmware(struct rtl8169_private *tp) -{ - if (IS_ERR(tp->rtl_fw)) - rtl_request_uncached_firmware(tp); + tp->fw_name, rc); } static void rtl_rx_close(struct rtl8169_private *tp) @@ -4566,13 +4691,13 @@ static void rtl_set_rx_mode(struct net_device *dev) static void rtl_hw_start(struct rtl8169_private *tp) { - RTL_W8(tp, Cfg9346, Cfg9346_Unlock); + rtl_unlock_config_regs(tp); tp->hw_start(tp); rtl_set_rx_max_size(tp); rtl_set_rx_tx_desc_registers(tp); - RTL_W8(tp, Cfg9346, Cfg9346_Lock); + rtl_lock_config_regs(tp); /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ RTL_R8(tp, IntrMask); @@ -4696,18 +4821,10 @@ static void rtl_enable_clock_request(struct rtl8169_private *tp) PCI_EXP_LNKCTL_CLKREQ_EN); } -static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable) +static void rtl_pcie_state_l2l3_disable(struct rtl8169_private *tp) { - u8 data; - - data = RTL_R8(tp, Config3); - - if (enable) - data |= Rdy_to_L23; - else - data &= ~Rdy_to_L23; - - RTL_W8(tp, Config3, data); + /* work around an issue when PCI reset occurs during L2/L3 state */ + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Rdy_to_L23); } static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) @@ -4965,6 +5082,8 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) /* Adjust EEE LED frequency */ RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); + rtl8168_config_eee_mac(tp); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); @@ -4997,6 +5116,8 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp) RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN); RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN); RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en); + + rtl8168_config_eee_mac(tp); } static void rtl_hw_start_8168f_1(struct rtl8169_private *tp) @@ -5028,7 +5149,7 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp) }; rtl_hw_start_8168f(tp); - rtl_pcie_state_l2l3_enable(tp, false); + rtl_pcie_state_l2l3_disable(tp); rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); @@ -5059,10 +5180,12 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp) /* Adjust EEE LED frequency */ RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); + rtl8168_config_eee_mac(tp); + rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC); rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC); - rtl_pcie_state_l2l3_enable(tp, false); + rtl_pcie_state_l2l3_disable(tp); } static void rtl_hw_start_8168g_1(struct rtl8169_private *tp) @@ -5161,6 +5284,8 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) /* Adjust EEE LED frequency */ RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); + rtl8168_config_eee_mac(tp); + RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN); @@ -5168,7 +5293,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC); - rtl_pcie_state_l2l3_enable(tp, false); + rtl_pcie_state_l2l3_disable(tp); rtl_writephy(tp, 0x1f, 0x0c42); rg_saw_cnt = (rtl_readphy(tp, 0x13) & 0x3fff); @@ -5241,11 +5366,13 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp) /* Adjust EEE LED frequency */ RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07); + rtl8168_config_eee_mac(tp); + rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC); RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN); - rtl_pcie_state_l2l3_enable(tp, false); + rtl_pcie_state_l2l3_disable(tp); } static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp) @@ -5516,7 +5643,7 @@ static void rtl_hw_start_8105e_1(struct rtl8169_private *tp) rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1)); - rtl_pcie_state_l2l3_enable(tp, false); + rtl_pcie_state_l2l3_disable(tp); } static void rtl_hw_start_8105e_2(struct rtl8169_private *tp) @@ -5551,7 +5678,7 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp) rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC); - rtl_pcie_state_l2l3_enable(tp, false); + rtl_pcie_state_l2l3_disable(tp); } static void rtl_hw_start_8106(struct rtl8169_private *tp) @@ -5565,7 +5692,7 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp) RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET); RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); - rtl_pcie_state_l2l3_enable(tp, false); + rtl_pcie_state_l2l3_disable(tp); rtl_hw_aspm_clkreq_enable(tp, true); } @@ -5665,11 +5792,6 @@ static inline void rtl8169_mark_to_asic(struct RxDesc *desc) desc->opts1 = cpu_to_le32(DescOwn | eor | R8169_RX_BUF_SIZE); } -static inline void *rtl8169_align(void *data) -{ - return (void *)ALIGN((long)data, 16); -} - static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp, struct RxDesc *desc) { @@ -5682,15 +5804,13 @@ static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp, if (!data) return NULL; - if (rtl8169_align(data) != data) { - kfree(data); - data = kmalloc_node(R8169_RX_BUF_SIZE + 15, GFP_KERNEL, node); - if (!data) - return NULL; + /* Memory should be properly aligned, but better check. */ + if (!IS_ALIGNED((unsigned long)data, 8)) { + netdev_err_once(tp->dev, "RX buffer not 8-byte-aligned\n"); + goto err_out; } - mapping = dma_map_single(d, rtl8169_align(data), R8169_RX_BUF_SIZE, - DMA_FROM_DEVICE); + mapping = dma_map_single(d, data, R8169_RX_BUF_SIZE, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(d, mapping))) { if (net_ratelimit()) netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n"); @@ -6072,7 +6192,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, struct device *d = tp_to_dev(tp); dma_addr_t mapping; u32 opts[2], len; - bool stop_queue; int frags; if (unlikely(!rtl_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) { @@ -6114,6 +6233,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, txd->opts2 = cpu_to_le32(opts[1]); + netdev_sent_queue(dev, skb->len); + skb_tx_timestamp(skb); /* Force memory writes to complete before releasing descriptor */ @@ -6126,14 +6247,14 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, tp->cur_tx += frags + 1; - stop_queue = !rtl_tx_slots_avail(tp, MAX_SKB_FRAGS); - if (unlikely(stop_queue)) - netif_stop_queue(dev); - - if (__netdev_sent_queue(dev, skb->len, skb->xmit_more)) - RTL_W8(tp, TxPoll, NPQ); + RTL_W8(tp, TxPoll, NPQ); - if (unlikely(stop_queue)) { + if (!rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) { + /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must + * not miss a ring update when it notices a stopped queue. + */ + smp_wmb(); + netif_stop_queue(dev); /* Sync with rtl_tx: * - publish queue status and cur_tx ring index (write barrier) * - refresh dirty_tx ring index (read barrier). @@ -6193,16 +6314,6 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev) PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT)); - /* The infamous DAC f*ckup only happens at boot time */ - if ((tp->cp_cmd & PCIDAC) && !tp->cur_rx) { - netif_info(tp, intr, dev, "disabling PCI DAC\n"); - tp->cp_cmd &= ~PCIDAC; - RTL_W16(tp, CPlusCmd, tp->cp_cmd); - dev->features &= ~NETIF_F_HIGHDMA; - } - - rtl8169_hw_reset(tp); - rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); } @@ -6298,7 +6409,6 @@ static struct sk_buff *rtl8169_try_rx_copy(void *data, struct sk_buff *skb; struct device *d = tp_to_dev(tp); - data = rtl8169_align(data); dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE); prefetch(data); skb = napi_alloc_skb(&tp->napi, pkt_size); @@ -6409,7 +6519,7 @@ release_descriptor: static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) { struct rtl8169_private *tp = dev_instance; - u16 status = rtl_get_events(tp); + u16 status = RTL_R16(tp, IntrStatus); u16 irq_mask = RTL_R16(tp, IntrMask); if (status == 0xffff || !(status & irq_mask)) @@ -6420,8 +6530,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) goto out; } - if (status & LinkChg && tp->dev->phydev) - phy_mac_interrupt(tp->dev->phydev); + if (status & LinkChg) + phy_mac_interrupt(tp->phydev); if (unlikely(status & RxFIFOOver && tp->mac_version == RTL_GIGA_MAC_VER_11)) { @@ -6512,12 +6622,12 @@ static void r8169_phylink_handler(struct net_device *ndev) } if (net_ratelimit()) - phy_print_status(ndev->phydev); + phy_print_status(tp->phydev); } static int r8169_phy_connect(struct rtl8169_private *tp) { - struct phy_device *phydev = mdiobus_get_phy(tp->mii_bus, 0); + struct phy_device *phydev = tp->phydev; phy_interface_t phy_mode; int ret; @@ -6544,7 +6654,7 @@ static void rtl8169_down(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); - phy_stop(dev->phydev); + phy_stop(tp->phydev); napi_disable(&tp->napi); netif_stop_queue(dev); @@ -6586,7 +6696,7 @@ static int rtl8169_close(struct net_device *dev) cancel_work_sync(&tp->wk.work); - phy_disconnect(dev->phydev); + phy_disconnect(tp->phydev); pci_free_irq(pdev, 0, tp); @@ -6637,10 +6747,6 @@ static int rtl_open(struct net_device *dev) if (retval < 0) goto err_free_rx_1; - INIT_WORK(&tp->wk.work, rtl_task); - - smp_mb(); - rtl_request_firmware(tp); retval = pci_request_irq(pdev, 0, rtl8169_interrupt, NULL, tp, @@ -6667,7 +6773,7 @@ static int rtl_open(struct net_device *dev) if (!rtl8169_init_counter_offsets(tp)) netif_warn(tp, hw, dev, "counter reset/update failed\n"); - phy_start(dev->phydev); + phy_start(tp->phydev); netif_start_queue(dev); rtl_unlock_work(tp); @@ -6756,7 +6862,7 @@ static void rtl8169_net_suspend(struct net_device *dev) if (!netif_running(dev)) return; - phy_stop(dev->phydev); + phy_stop(tp->phydev); netif_device_detach(dev); rtl_lock_work(tp); @@ -6791,14 +6897,13 @@ static void __rtl8169_resume(struct net_device *dev) rtl_pll_power_up(tp); rtl8169_init_phy(dev, tp); - phy_start(tp->dev->phydev); + phy_start(tp->phydev); rtl_lock_work(tp); napi_enable(&tp->napi); set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); + rtl_reset_work(tp); rtl_unlock_work(tp); - - rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); } static int rtl8169_resume(struct device *device) @@ -6935,7 +7040,7 @@ static void rtl_remove_one(struct pci_dev *pdev) netif_napi_del(&tp->napi); unregister_netdev(dev); - mdiobus_unregister(tp->mii_bus); + mdiobus_unregister(tp->phydev->mdio.bus); rtl_release_firmware(tp); @@ -6995,9 +7100,9 @@ static int rtl_alloc_irq(struct rtl8169_private *tp) unsigned int flags; if (tp->mac_version <= RTL_GIGA_MAC_VER_06) { - RTL_W8(tp, Cfg9346, Cfg9346_Unlock); + rtl_unlock_config_regs(tp); RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable); - RTL_W8(tp, Cfg9346, Cfg9346_Lock); + rtl_lock_config_regs(tp); flags = PCI_IRQ_LEGACY; } else { flags = PCI_IRQ_ALL_TYPES; @@ -7006,6 +7111,30 @@ static int rtl_alloc_irq(struct rtl8169_private *tp) return pci_alloc_irq_vectors(tp->pci_dev, 1, 1, flags); } +static void rtl_read_mac_address(struct rtl8169_private *tp, + u8 mac_addr[ETH_ALEN]) +{ + u32 value; + + /* Get MAC address */ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_35 ... RTL_GIGA_MAC_VER_38: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: + value = rtl_eri_read(tp, 0xe0, ERIAR_EXGMAC); + mac_addr[0] = (value >> 0) & 0xff; + mac_addr[1] = (value >> 8) & 0xff; + mac_addr[2] = (value >> 16) & 0xff; + mac_addr[3] = (value >> 24) & 0xff; + + value = rtl_eri_read(tp, 0xe4, ERIAR_EXGMAC); + mac_addr[4] = (value >> 0) & 0xff; + mac_addr[5] = (value >> 8) & 0xff; + break; + default: + break; + } +} + DECLARE_RTL_COND(rtl_link_list_ready_cond) { return RTL_R8(tp, MCU) & LINK_LIST_RDY; @@ -7042,7 +7171,6 @@ static int r8169_mdio_write_reg(struct mii_bus *mii_bus, int phyaddr, static int r8169_mdio_register(struct rtl8169_private *tp) { struct pci_dev *pdev = tp->pci_dev; - struct phy_device *phydev; struct mii_bus *new_bus; int ret; @@ -7064,16 +7192,14 @@ static int r8169_mdio_register(struct rtl8169_private *tp) if (ret) return ret; - phydev = mdiobus_get_phy(new_bus, 0); - if (!phydev) { + tp->phydev = mdiobus_get_phy(new_bus, 0); + if (!tp->phydev) { mdiobus_unregister(new_bus); return -ENODEV; } /* PHY will be woken up in rtl_open() */ - phy_suspend(phydev); - - tp->mii_bus = new_bus; + phy_suspend(tp->phydev); return 0; } @@ -7171,9 +7297,37 @@ static void rtl_disable_clk(void *data) clk_disable_unprepare(data); } +static int rtl_get_ether_clk(struct rtl8169_private *tp) +{ + struct device *d = tp_to_dev(tp); + struct clk *clk; + int rc; + + clk = devm_clk_get(d, "ether_clk"); + if (IS_ERR(clk)) { + rc = PTR_ERR(clk); + if (rc == -ENOENT) + /* clk-core allows NULL (for suspend / resume) */ + rc = 0; + else if (rc != -EPROBE_DEFER) + dev_err(d, "failed to get clk: %d\n", rc); + } else { + tp->clk = clk; + rc = clk_prepare_enable(clk); + if (rc) + dev_err(d, "failed to enable clk: %d\n", rc); + else + rc = devm_add_action_or_reset(d, rtl_disable_clk, clk); + } + + return rc; +} + static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; + /* align to u16 for is_valid_ether_addr() */ + u8 mac_addr[ETH_ALEN] __aligned(2) = {}; struct rtl8169_private *tp; struct net_device *dev; int chipset, region, i; @@ -7192,30 +7346,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->supports_gmii = cfg->has_gmii; /* Get the *optional* external "ether_clk" used on some boards */ - tp->clk = devm_clk_get(&pdev->dev, "ether_clk"); - if (IS_ERR(tp->clk)) { - rc = PTR_ERR(tp->clk); - if (rc == -ENOENT) { - /* clk-core allows NULL (for suspend / resume) */ - tp->clk = NULL; - } else if (rc == -EPROBE_DEFER) { - return rc; - } else { - dev_err(&pdev->dev, "failed to get clk: %d\n", rc); - return rc; - } - } else { - rc = clk_prepare_enable(tp->clk); - if (rc) { - dev_err(&pdev->dev, "failed to enable clk: %d\n", rc); - return rc; - } - - rc = devm_add_action_or_reset(&pdev->dev, rtl_disable_clk, - tp->clk); - if (rc) - return rc; - } + rc = rtl_get_ether_clk(tp); + if (rc) + return rc; /* enable device (incl. PCI PM wakeup and hotplug setup) */ rc = pcim_enable_device(pdev); @@ -7260,13 +7393,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->cp_cmd = RTL_R16(tp, CPlusCmd); - if (sizeof(dma_addr_t) > 4 && (use_dac == 1 || (use_dac == -1 && - tp->mac_version >= RTL_GIGA_MAC_VER_18)) && + if (sizeof(dma_addr_t) > 4 && tp->mac_version >= RTL_GIGA_MAC_VER_18 && !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { - - /* CPlusCmd Dual Access Cycle is only needed for non-PCIe */ - if (!pci_is_pcie(pdev)) - tp->cp_cmd |= PCIDAC; dev->features |= NETIF_F_HIGHDMA; } else { rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); @@ -7297,26 +7425,19 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return rc; } - tp->saved_wolopts = __rtl8169_get_wol(tp); - mutex_init(&tp->wk.mutex); + INIT_WORK(&tp->wk.work, rtl_task); u64_stats_init(&tp->rx_stats.syncp); u64_stats_init(&tp->tx_stats.syncp); - /* Get MAC address */ - switch (tp->mac_version) { - u8 mac_addr[ETH_ALEN] __aligned(4); - case RTL_GIGA_MAC_VER_35 ... RTL_GIGA_MAC_VER_38: - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51: - *(u32 *)&mac_addr[0] = rtl_eri_read(tp, 0xe0, ERIAR_EXGMAC); - *(u16 *)&mac_addr[4] = rtl_eri_read(tp, 0xe4, ERIAR_EXGMAC); + /* get MAC address */ + rc = eth_platform_get_mac_address(&pdev->dev, mac_addr); + if (rc) + rtl_read_mac_address(tp, mac_addr); + + if (is_valid_ether_addr(mac_addr)) + rtl_rar_set(tp, mac_addr); - if (is_valid_ether_addr(mac_addr)) - rtl_rar_set(tp, mac_addr); - break; - default: - break; - } for (i = 0; i < ETH_ALEN; i++) dev->dev_addr[i] = RTL_R8(tp, MAC0 + i); @@ -7365,7 +7486,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->irq_mask = RTL_EVENT_NAPI | cfg->irq_mask; tp->coalesce_info = cfg->coalesce_info; - tp->rtl_fw = RTL_FIRMWARE_UNKNOWN; + tp->fw_name = rtl_chip_infos[chipset].fw_name; tp->counters = dmam_alloc_coherent (&pdev->dev, sizeof(*tp->counters), &tp->counters_phys_addr, @@ -7406,7 +7527,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; err_mdio_unregister: - mdiobus_unregister(tp->mii_bus); + mdiobus_unregister(tp->phydev->mdio.bus); return rc; } diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index d28c8f9ca55b..8154b38c08f7 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -458,7 +458,7 @@ static int ravb_dmac_init(struct net_device *ndev) RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR); /* Set FIFO size */ - ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00222200, TGC); + ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00112200, TGC); /* Timestamp enable */ ravb_write(ndev, TCCR_TFEN, TCCR); diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index f27a0dc8c563..339b2eae2100 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -555,7 +555,7 @@ static int sh_eth_soft_reset_gether(struct net_device *ndev) sh_eth_write(ndev, 0, RDFFR); /* Reset HW CRC register */ - if (mdp->cd->hw_checksum) + if (mdp->cd->csmr) sh_eth_write(ndev, 0, CSMR); /* Select MII mode */ @@ -619,7 +619,8 @@ static struct sh_eth_cpu_data r7s72100_data = { .no_trimd = 1, .no_ade = 1, .xdfar_rw = 1, - .hw_checksum = 1, + .csmr = 1, + .rx_csum = 1, .tsu = 1, .no_tx_cntrs = 1, }; @@ -668,7 +669,8 @@ static struct sh_eth_cpu_data r8a7740_data = { .no_trimd = 1, .no_ade = 1, .xdfar_rw = 1, - .hw_checksum = 1, + .csmr = 1, + .rx_csum = 1, .tsu = 1, .select_mii = 1, .magic = 1, @@ -793,7 +795,8 @@ static struct sh_eth_cpu_data r8a77980_data = { .no_trimd = 1, .no_ade = 1, .xdfar_rw = 1, - .hw_checksum = 1, + .csmr = 1, + .rx_csum = 1, .select_mii = 1, .magic = 1, .cexcr = 1, @@ -1045,7 +1048,8 @@ static struct sh_eth_cpu_data sh7734_data = { .no_ade = 1, .xdfar_rw = 1, .tsu = 1, - .hw_checksum = 1, + .csmr = 1, + .rx_csum = 1, .select_mii = 1, .magic = 1, .cexcr = 1, @@ -1088,6 +1092,7 @@ static struct sh_eth_cpu_data sh7763_data = { .irq_flags = IRQF_SHARED, .magic = 1, .cexcr = 1, + .rx_csum = 1, .dual_port = 1, }; @@ -1532,8 +1537,9 @@ static int sh_eth_dev_init(struct net_device *ndev) mdp->irq_enabled = true; sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); - /* PAUSE Prohibition */ + /* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */ sh_eth_write(ndev, ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | + (ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) | ECMR_TE | ECMR_RE, ECMR); if (mdp->cd->set_rate) @@ -1592,6 +1598,19 @@ static void sh_eth_dev_exit(struct net_device *ndev) update_mac_address(ndev); } +static void sh_eth_rx_csum(struct sk_buff *skb) +{ + u8 *hw_csum; + + /* The hardware checksum is 2 bytes appended to packet data */ + if (unlikely(skb->len < sizeof(__sum16))) + return; + hw_csum = skb_tail_pointer(skb) - sizeof(__sum16); + skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum)); + skb->ip_summed = CHECKSUM_COMPLETE; + skb_trim(skb, skb->len - sizeof(__sum16)); +} + /* Packet receive function */ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) { @@ -1633,7 +1652,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) * the RFS bits are from bit 25 to bit 16. So, the * driver needs right shifting by 16. */ - if (mdp->cd->hw_checksum) + if (mdp->cd->csmr) desc_status >>= 16; skb = mdp->rx_skbuff[entry]; @@ -1666,6 +1685,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) DMA_FROM_DEVICE); skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, ndev); + if (ndev->features & NETIF_F_RXCSUM) + sh_eth_rx_csum(skb); netif_receive_skb(skb); ndev->stats.rx_packets++; ndev->stats.rx_bytes += pkt_len; @@ -2173,7 +2194,7 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf) add_reg(MAFCR); if (cd->rtrate) add_reg(RTRATE); - if (cd->hw_checksum) + if (cd->csmr) add_reg(CSMR); if (cd->select_mii) add_reg(RMII_MII); @@ -2921,6 +2942,39 @@ static void sh_eth_set_rx_mode(struct net_device *ndev) spin_unlock_irqrestore(&mdp->lock, flags); } +static void sh_eth_set_rx_csum(struct net_device *ndev, bool enable) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + unsigned long flags; + + spin_lock_irqsave(&mdp->lock, flags); + + /* Disable TX and RX */ + sh_eth_rcv_snd_disable(ndev); + + /* Modify RX Checksum setting */ + sh_eth_modify(ndev, ECMR, ECMR_RCSC, enable ? ECMR_RCSC : 0); + + /* Enable TX and RX */ + sh_eth_rcv_snd_enable(ndev); + + spin_unlock_irqrestore(&mdp->lock, flags); +} + +static int sh_eth_set_features(struct net_device *ndev, + netdev_features_t features) +{ + netdev_features_t changed = ndev->features ^ features; + struct sh_eth_private *mdp = netdev_priv(ndev); + + if (changed & NETIF_F_RXCSUM && mdp->cd->rx_csum) + sh_eth_set_rx_csum(ndev, features & NETIF_F_RXCSUM); + + ndev->features = features; + + return 0; +} + static int sh_eth_get_vtag_index(struct sh_eth_private *mdp) { if (!mdp->port) @@ -3102,6 +3156,7 @@ static const struct net_device_ops sh_eth_netdev_ops = { .ndo_change_mtu = sh_eth_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, + .ndo_set_features = sh_eth_set_features, }; static const struct net_device_ops sh_eth_netdev_ops_tsu = { @@ -3117,6 +3172,7 @@ static const struct net_device_ops sh_eth_netdev_ops_tsu = { .ndo_change_mtu = sh_eth_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, + .ndo_set_features = sh_eth_set_features, }; #ifdef CONFIG_OF @@ -3245,6 +3301,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev) ndev->max_mtu = 2000 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); ndev->min_mtu = ETH_MIN_MTU; + if (mdp->cd->rx_csum) { + ndev->features = NETIF_F_RXCSUM; + ndev->hw_features = NETIF_F_RXCSUM; + } + /* set function */ if (mdp->cd->tsu) ndev->netdev_ops = &sh_eth_netdev_ops_tsu; @@ -3294,7 +3355,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev) goto out_release; } mdp->port = port; - ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER; + ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; /* Need to init only the first port of the two sharing a TSU */ if (port == 0) { diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 0c18650bbfe6..850726301e1c 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -499,7 +499,8 @@ struct sh_eth_cpu_data { unsigned no_ade:1; /* E-DMAC DOES NOT have ADE bit in EESR */ unsigned no_xdfar:1; /* E-DMAC DOES NOT have RDFAR/TDFAR */ unsigned xdfar_rw:1; /* E-DMAC has writeable RDFAR/TDFAR */ - unsigned hw_checksum:1; /* E-DMAC has CSMR */ + unsigned csmr:1; /* E-DMAC has CSMR */ + unsigned rx_csum:1; /* EtherC has ECMR.RCSC */ unsigned select_mii:1; /* EtherC has RMII_MII (MII select register) */ unsigned rmiimode:1; /* EtherC has RMIIMODE register */ unsigned rtrate:1; /* EtherC has RTRATE register */ diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h index 748fb12260a6..2b2e1c4f0dc3 100644 --- a/drivers/net/ethernet/rocker/rocker.h +++ b/drivers/net/ethernet/rocker/rocker.h @@ -109,8 +109,6 @@ struct rocker_world_ops { int (*port_attr_bridge_flags_set)(struct rocker_port *rocker_port, unsigned long brport_flags, struct switchdev_trans *trans); - int (*port_attr_bridge_flags_get)(const struct rocker_port *rocker_port, - unsigned long *p_brport_flags); int (*port_attr_bridge_flags_support_get)(const struct rocker_port * rocker_port, unsigned long * diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 6213827e3956..c883aa89b7ca 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -1566,45 +1566,57 @@ static int rocker_world_port_attr_stp_state_set(struct rocker_port *rocker_port, } static int -rocker_world_port_attr_bridge_flags_set(struct rocker_port *rocker_port, - unsigned long brport_flags, - struct switchdev_trans *trans) +rocker_world_port_attr_bridge_flags_support_get(const struct rocker_port * + rocker_port, + unsigned long * + p_brport_flags_support) { struct rocker_world_ops *wops = rocker_port->rocker->wops; - if (!wops->port_attr_bridge_flags_set) + if (!wops->port_attr_bridge_flags_support_get) return -EOPNOTSUPP; - - if (switchdev_trans_ph_prepare(trans)) - return 0; - - return wops->port_attr_bridge_flags_set(rocker_port, brport_flags, - trans); + return wops->port_attr_bridge_flags_support_get(rocker_port, + p_brport_flags_support); } static int -rocker_world_port_attr_bridge_flags_get(const struct rocker_port *rocker_port, - unsigned long *p_brport_flags) +rocker_world_port_attr_pre_bridge_flags_set(struct rocker_port *rocker_port, + unsigned long brport_flags, + struct switchdev_trans *trans) { struct rocker_world_ops *wops = rocker_port->rocker->wops; + unsigned long brport_flags_s; + int err; - if (!wops->port_attr_bridge_flags_get) + if (!wops->port_attr_bridge_flags_set) return -EOPNOTSUPP; - return wops->port_attr_bridge_flags_get(rocker_port, p_brport_flags); + + err = rocker_world_port_attr_bridge_flags_support_get(rocker_port, + &brport_flags_s); + if (err) + return err; + + if (brport_flags & ~brport_flags_s) + return -EINVAL; + + return 0; } static int -rocker_world_port_attr_bridge_flags_support_get(const struct rocker_port * - rocker_port, - unsigned long * - p_brport_flags_support) +rocker_world_port_attr_bridge_flags_set(struct rocker_port *rocker_port, + unsigned long brport_flags, + struct switchdev_trans *trans) { struct rocker_world_ops *wops = rocker_port->rocker->wops; - if (!wops->port_attr_bridge_flags_support_get) + if (!wops->port_attr_bridge_flags_set) return -EOPNOTSUPP; - return wops->port_attr_bridge_flags_support_get(rocker_port, - p_brport_flags_support); + + if (switchdev_trans_ph_prepare(trans)) + return 0; + + return wops->port_attr_bridge_flags_set(rocker_port, brport_flags, + trans); } static int @@ -2026,6 +2038,18 @@ static void rocker_port_neigh_destroy(struct net_device *dev, err); } +static int rocker_port_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) +{ + const struct rocker_port *rocker_port = netdev_priv(dev); + const struct rocker *rocker = rocker_port->rocker; + + ppid->id_len = sizeof(rocker->hw.id); + memcpy(&ppid->id, &rocker->hw.id, ppid->id_len); + + return 0; +} + static const struct net_device_ops rocker_port_netdev_ops = { .ndo_open = rocker_port_open, .ndo_stop = rocker_port_stop, @@ -2035,39 +2059,13 @@ static const struct net_device_ops rocker_port_netdev_ops = { .ndo_get_phys_port_name = rocker_port_get_phys_port_name, .ndo_change_proto_down = rocker_port_change_proto_down, .ndo_neigh_destroy = rocker_port_neigh_destroy, + .ndo_get_port_parent_id = rocker_port_get_port_parent_id, }; /******************** * swdev interface ********************/ -static int rocker_port_attr_get(struct net_device *dev, - struct switchdev_attr *attr) -{ - const struct rocker_port *rocker_port = netdev_priv(dev); - const struct rocker *rocker = rocker_port->rocker; - int err = 0; - - switch (attr->id) { - case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: - attr->u.ppid.id_len = sizeof(rocker->hw.id); - memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len); - break; - case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: - err = rocker_world_port_attr_bridge_flags_get(rocker_port, - &attr->u.brport_flags); - break; - case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT: - err = rocker_world_port_attr_bridge_flags_support_get(rocker_port, - &attr->u.brport_flags_support); - break; - default: - return -EOPNOTSUPP; - } - - return err; -} - static int rocker_port_attr_set(struct net_device *dev, const struct switchdev_attr *attr, struct switchdev_trans *trans) @@ -2081,6 +2079,11 @@ static int rocker_port_attr_set(struct net_device *dev, attr->u.stp_state, trans); break; + case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: + err = rocker_world_port_attr_pre_bridge_flags_set(rocker_port, + attr->u.brport_flags, + trans); + break; case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: err = rocker_world_port_attr_bridge_flags_set(rocker_port, attr->u.brport_flags, @@ -2139,11 +2142,6 @@ static int rocker_port_obj_del(struct net_device *dev, return err; } -static const struct switchdev_ops rocker_port_switchdev_ops = { - .switchdev_port_attr_get = rocker_port_attr_get, - .switchdev_port_attr_set = rocker_port_attr_set, -}; - struct rocker_fib_event_work { struct work_struct work; union { @@ -2597,7 +2595,6 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) rocker_port_dev_addr_init(rocker_port); dev->netdev_ops = &rocker_port_netdev_ops; dev->ethtool_ops = &rocker_port_ethtool_ops; - dev->switchdev_ops = &rocker_port_switchdev_ops; netif_tx_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx, NAPI_POLL_WEIGHT); netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx, @@ -2708,6 +2705,19 @@ static bool rocker_port_dev_check(const struct net_device *dev) return dev->netdev_ops == &rocker_port_netdev_ops; } +static int +rocker_switchdev_port_attr_set_event(struct net_device *netdev, + struct switchdev_notifier_port_attr_info *port_attr_info) +{ + int err; + + err = rocker_port_attr_set(netdev, port_attr_info->attr, + port_attr_info->trans); + + port_attr_info->handled = true; + return notifier_from_errno(err); +} + struct rocker_switchdev_event_work { struct work_struct work; struct switchdev_notifier_fdb_info fdb_info; @@ -2725,7 +2735,7 @@ rocker_fdb_offload_notify(struct rocker_port *rocker_port, info.vid = recv_info->vid; info.offloaded = true; call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, - rocker_port->dev, &info.info); + rocker_port->dev, &info.info, NULL); } static void rocker_switchdev_event_work(struct work_struct *work) @@ -2777,6 +2787,9 @@ static int rocker_switchdev_event(struct notifier_block *unused, if (!rocker_port_dev_check(dev)) return NOTIFY_DONE; + if (event == SWITCHDEV_PORT_ATTR_SET) + return rocker_switchdev_port_attr_set_event(dev, ptr); + rocker_port = netdev_priv(dev); switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); if (WARN_ON(!switchdev_work)) @@ -2839,6 +2852,8 @@ static int rocker_switchdev_blocking_event(struct notifier_block *unused, case SWITCHDEV_PORT_OBJ_ADD: case SWITCHDEV_PORT_OBJ_DEL: return rocker_switchdev_port_obj_event(event, dev, ptr); + case SWITCHDEV_PORT_ATTR_SET: + return rocker_switchdev_port_attr_set_event(dev, ptr); } return NOTIFY_DONE; diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c index 6473cc68c2d5..fa296a7c255d 100644 --- a/drivers/net/ethernet/rocker/rocker_ofdpa.c +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c @@ -1833,10 +1833,10 @@ static void ofdpa_port_fdb_learn_work(struct work_struct *work) rtnl_lock(); if (learned && removing) call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, - lw->ofdpa_port->dev, &info.info); + lw->ofdpa_port->dev, &info.info, NULL); else if (learned && !removing) call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, - lw->ofdpa_port->dev, &info.info); + lw->ofdpa_port->dev, &info.info, NULL); rtnl_unlock(); kfree(work); @@ -2511,16 +2511,6 @@ static int ofdpa_port_attr_bridge_flags_set(struct rocker_port *rocker_port, return err; } -static int -ofdpa_port_attr_bridge_flags_get(const struct rocker_port *rocker_port, - unsigned long *p_brport_flags) -{ - const struct ofdpa_port *ofdpa_port = rocker_port->wpriv; - - *p_brport_flags = ofdpa_port->brport_flags; - return 0; -} - static int ofdpa_port_attr_bridge_flags_support_get(const struct rocker_port * rocker_port, @@ -2823,7 +2813,6 @@ struct rocker_world_ops rocker_ofdpa_ops = { .port_stop = ofdpa_port_stop, .port_attr_stp_state_set = ofdpa_port_attr_stp_state_set, .port_attr_bridge_flags_set = ofdpa_port_attr_bridge_flags_set, - .port_attr_bridge_flags_get = ofdpa_port_attr_bridge_flags_get, .port_attr_bridge_flags_support_get = ofdpa_port_attr_bridge_flags_support_get, .port_attr_bridge_ageing_time_set = ofdpa_port_attr_bridge_ageing_time_set, .port_obj_vlan_add = ofdpa_port_obj_vlan_add, diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 2f2bda68d861..e888b479c596 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -6041,10 +6041,13 @@ static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = { { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" }, { NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" }, { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" }, - /* MUM and SUC firmware share the same partition type */ { NVRAM_PARTITION_TYPE_MUM_FIRMWARE, 0, 0, "sfc_mumfw" }, { NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 0, 0, "sfc_uefi" }, - { NVRAM_PARTITION_TYPE_STATUS, 0, 0, "sfc_status" } + { NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS, 0, 0, "sfc_dynamic_cfg_dflt" }, + { NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS, 0, 0, "sfc_exp_rom_cfg_dflt" }, + { NVRAM_PARTITION_TYPE_STATUS, 0, 0, "sfc_status" }, + { NVRAM_PARTITION_TYPE_BUNDLE, 0, 0, "sfc_bundle" }, + { NVRAM_PARTITION_TYPE_BUNDLE_METADATA, 0, 0, "sfc_bundle_metadata" }, }; #define EF10_NVRAM_PARTITION_COUNT ARRAY_SIZE(efx_ef10_nvram_types) @@ -6074,8 +6077,15 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected); if (rc) return rc; + if (protected && + (type != NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS && + type != NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS)) + /* Hide protected partitions that don't provide defaults. */ + return -ENODEV; + if (protected) - return -ENODEV; /* hide it */ + /* Protected partitions are read only. */ + erase_size = 0; /* If we've already exposed a partition of this type, hide this * duplicate. All operations on MTDs are keyed by the type anyway, @@ -6115,7 +6125,7 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, static int efx_ef10_mtd_probe(struct efx_nic *efx) { MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX); - DECLARE_BITMAP(found, EF10_NVRAM_PARTITION_COUNT); + DECLARE_BITMAP(found, EF10_NVRAM_PARTITION_COUNT) = { 0 }; struct efx_mcdi_mtd_partition *parts; size_t outlen, n_parts_total, i, n_parts; unsigned int type; diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 3643015a55cf..bc655ffc9e02 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -915,7 +915,7 @@ rollback: void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue) { - mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100)); + mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10)); } static bool efx_default_channel_want_txqs(struct efx_channel *channel) diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index dfad93fca0a6..295ec1787b9f 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -2074,22 +2074,26 @@ fail: static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type) { - MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_IN_LEN); + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN); int rc; MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type); + MCDI_POPULATE_DWORD_1(inbuf, NVRAM_UPDATE_START_V2_IN_FLAGS, + NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT, + 1); BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0); rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf), NULL, 0, NULL); + return rc; } static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, loff_t offset, u8 *buffer, size_t length) { - MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_LEN); + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_V2_LEN); MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)); size_t outlen; @@ -2098,6 +2102,8 @@ static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type); MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset); MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length); + MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_V2_MODE, + MC_CMD_NVRAM_READ_IN_V2_DEFAULT); rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); @@ -2147,15 +2153,51 @@ static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type) { - MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN); - int rc; + MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN); + size_t outlen; + int rc, rc2; MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type); - - BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0); + /* Always set this flag. Old firmware ignores it */ + MCDI_POPULATE_DWORD_1(inbuf, NVRAM_UPDATE_FINISH_V2_IN_FLAGS, + NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT, + 1); rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf), - NULL, 0, NULL); + outbuf, sizeof(outbuf), &outlen); + if (!rc && outlen >= MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN) { + rc2 = MCDI_DWORD(outbuf, NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE); + if (rc2 != MC_CMD_NVRAM_VERIFY_RC_SUCCESS) + netif_err(efx, drv, efx->net_dev, + "NVRAM update failed verification with code 0x%x\n", + rc2); + switch (rc2) { + case MC_CMD_NVRAM_VERIFY_RC_SUCCESS: + break; + case MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED: + case MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED: + case MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED: + case MC_CMD_NVRAM_VERIFY_RC_TRUSTED_APPROVERS_CHECK_FAILED: + case MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHAIN_CHECK_FAILED: + rc = -EIO; + break; + case MC_CMD_NVRAM_VERIFY_RC_INVALID_CMS_FORMAT: + case MC_CMD_NVRAM_VERIFY_RC_BAD_MESSAGE_DIGEST: + rc = -EINVAL; + break; + case MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES: + case MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS: + case MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH: + rc = -EPERM; + break; + default: + netif_err(efx, drv, efx->net_dev, + "Unknown response to NVRAM_UPDATE_FINISH\n"); + rc = -EIO; + } + } + return rc; } diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h index 3839eec783ea..20a5523bf9f3 100644 --- a/drivers/net/ethernet/sfc/mcdi_pcol.h +++ b/drivers/net/ethernet/sfc/mcdi_pcol.h @@ -6784,6 +6784,14 @@ * subset of the information stored in this partition. */ #define NVRAM_PARTITION_TYPE_FRU_INFORMATION 0x1d00 +/* enum: Bundle image partition */ +#define NVRAM_PARTITION_TYPE_BUNDLE 0x1e00 +/* enum: Bundle metadata partition that holds additional information related to + * a bundle update in TLV format + */ +#define NVRAM_PARTITION_TYPE_BUNDLE_METADATA 0x1e01 +/* enum: Bundle update non-volatile log output partition */ +#define NVRAM_PARTITION_TYPE_BUNDLE_LOG 0x1e02 /* enum: Start of reserved value range (firmware may use for any purpose) */ #define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00 /* enum: End of reserved value range (firmware may use for any purpose) */ diff --git a/drivers/net/ethernet/sfc/mtd.c b/drivers/net/ethernet/sfc/mtd.c index 4ac30b6e5dab..0d03e0577d85 100644 --- a/drivers/net/ethernet/sfc/mtd.c +++ b/drivers/net/ethernet/sfc/mtd.c @@ -66,6 +66,9 @@ int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts, part->mtd.writesize = 1; + if (!(part->mtd.flags & MTD_NO_ERASE)) + part->mtd.flags |= MTD_WRITEABLE; + part->mtd.owner = THIS_MODULE; part->mtd.priv = efx; part->mtd.name = part->name; diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 396ff01298cd..8702ab44d80b 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -360,8 +360,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic) rc = efx_init_rx_buffers(rx_queue, atomic); if (unlikely(rc)) { /* Ensure that we don't leave the rx queue empty */ - if (rx_queue->added_count == rx_queue->removed_count) - efx_schedule_slow_fill(rx_queue); + efx_schedule_slow_fill(rx_queue); goto out; } } while ((space -= batch_size) >= batch_size); diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 22eb059086f7..06c8f282263f 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -471,7 +471,7 @@ static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, if (IS_ERR(segments)) return PTR_ERR(segments); - dev_kfree_skb_any(skb); + dev_consume_skb_any(skb); skb = segments; while (skb) { diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c index 3140999642ba..358e66b81926 100644 --- a/drivers/net/ethernet/sgi/ioc3-eth.c +++ b/drivers/net/ethernet/sgi/ioc3-eth.c @@ -666,7 +666,7 @@ static inline void ioc3_tx(struct net_device *dev) packets++; skb = ip->tx_skbs[o_entry]; bytes += skb->len; - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); ip->tx_skbs[o_entry] = NULL; o_entry = (o_entry + 1) & 127; /* Next */ diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c index 0e1b7e960b98..f1271402ca21 100644 --- a/drivers/net/ethernet/sgi/meth.c +++ b/drivers/net/ethernet/sgi/meth.c @@ -68,6 +68,8 @@ module_param(timeout, int, 0); * packets in and out, so there is place for a packet */ struct meth_private { + struct platform_device *pdev; + /* in-memory copy of MAC Control register */ u64 mac_ctrl; @@ -211,8 +213,8 @@ static void meth_check_link(struct net_device *dev) static int meth_init_tx_ring(struct meth_private *priv) { /* Init TX ring */ - priv->tx_ring = dma_alloc_coherent(NULL, TX_RING_BUFFER_SIZE, - &priv->tx_ring_dma, GFP_ATOMIC); + priv->tx_ring = dma_alloc_coherent(&priv->pdev->dev, + TX_RING_BUFFER_SIZE, &priv->tx_ring_dma, GFP_ATOMIC); if (!priv->tx_ring) return -ENOMEM; @@ -236,7 +238,7 @@ static int meth_init_rx_ring(struct meth_private *priv) priv->rx_ring[i]=(rx_packet*)(priv->rx_skbs[i]->head); /* I'll need to re-sync it after each RX */ priv->rx_ring_dmas[i] = - dma_map_single(NULL, priv->rx_ring[i], + dma_map_single(&priv->pdev->dev, priv->rx_ring[i], METH_RX_BUFF_SIZE, DMA_FROM_DEVICE); mace->eth.rx_fifo = priv->rx_ring_dmas[i]; } @@ -253,7 +255,7 @@ static void meth_free_tx_ring(struct meth_private *priv) dev_kfree_skb(priv->tx_skbs[i]); priv->tx_skbs[i] = NULL; } - dma_free_coherent(NULL, TX_RING_BUFFER_SIZE, priv->tx_ring, + dma_free_coherent(&priv->pdev->dev, TX_RING_BUFFER_SIZE, priv->tx_ring, priv->tx_ring_dma); } @@ -263,7 +265,7 @@ static void meth_free_rx_ring(struct meth_private *priv) int i; for (i = 0; i < RX_RING_ENTRIES; i++) { - dma_unmap_single(NULL, priv->rx_ring_dmas[i], + dma_unmap_single(&priv->pdev->dev, priv->rx_ring_dmas[i], METH_RX_BUFF_SIZE, DMA_FROM_DEVICE); priv->rx_ring[i] = 0; priv->rx_ring_dmas[i] = 0; @@ -393,7 +395,8 @@ static void meth_rx(struct net_device* dev, unsigned long int_status) fifo_rptr = (fifo_rptr - 1) & 0x0f; } while (priv->rx_write != fifo_rptr) { - dma_unmap_single(NULL, priv->rx_ring_dmas[priv->rx_write], + dma_unmap_single(&priv->pdev->dev, + priv->rx_ring_dmas[priv->rx_write], METH_RX_BUFF_SIZE, DMA_FROM_DEVICE); status = priv->rx_ring[priv->rx_write]->status.raw; #if MFE_DEBUG @@ -454,7 +457,8 @@ static void meth_rx(struct net_device* dev, unsigned long int_status) priv->rx_ring[priv->rx_write] = (rx_packet*)skb->head; priv->rx_ring[priv->rx_write]->status.raw = 0; priv->rx_ring_dmas[priv->rx_write] = - dma_map_single(NULL, priv->rx_ring[priv->rx_write], + dma_map_single(&priv->pdev->dev, + priv->rx_ring[priv->rx_write], METH_RX_BUFF_SIZE, DMA_FROM_DEVICE); mace->eth.rx_fifo = priv->rx_ring_dmas[priv->rx_write]; ADVANCE_RX_PTR(priv->rx_write); @@ -521,7 +525,7 @@ static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status) DPRINTK("RPTR points us here, but packet not done?\n"); break; } - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); priv->tx_skbs[priv->tx_read] = NULL; priv->tx_ring[priv->tx_read].header.raw = 0; priv->tx_read = (priv->tx_read+1)&(TX_RING_ENTRIES-1); @@ -637,7 +641,7 @@ static void meth_tx_1page_prepare(struct meth_private *priv, } /* first page */ - catbuf = dma_map_single(NULL, buffer_data, buffer_len, + catbuf = dma_map_single(&priv->pdev->dev, buffer_data, buffer_len, DMA_TO_DEVICE); desc->data.cat_buf[0].form.start_addr = catbuf >> 3; desc->data.cat_buf[0].form.len = buffer_len - 1; @@ -663,12 +667,12 @@ static void meth_tx_2page_prepare(struct meth_private *priv, } /* first page */ - catbuf1 = dma_map_single(NULL, buffer1_data, buffer1_len, + catbuf1 = dma_map_single(&priv->pdev->dev, buffer1_data, buffer1_len, DMA_TO_DEVICE); desc->data.cat_buf[0].form.start_addr = catbuf1 >> 3; desc->data.cat_buf[0].form.len = buffer1_len - 1; /* second page */ - catbuf2 = dma_map_single(NULL, buffer2_data, buffer2_len, + catbuf2 = dma_map_single(&priv->pdev->dev, buffer2_data, buffer2_len, DMA_TO_DEVICE); desc->data.cat_buf[1].form.start_addr = catbuf2 >> 3; desc->data.cat_buf[1].form.len = buffer2_len - 1; @@ -840,6 +844,7 @@ static int meth_probe(struct platform_device *pdev) memcpy(dev->dev_addr, o2meth_eaddr, ETH_ALEN); priv = netdev_priv(dev); + priv->pdev = pdev; spin_lock_init(&priv->meth_lock); SET_NETDEV_DEV(dev, &pdev->dev); diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c index 808cf9816673..5b351beb78cb 100644 --- a/drivers/net/ethernet/sis/sis190.c +++ b/drivers/net/ethernet/sis/sis190.c @@ -714,7 +714,7 @@ static void sis190_tx_interrupt(struct net_device *dev, sis190_unmap_tx_skb(tp->pci_dev, skb, txd); tp->Tx_skbuff[entry] = NULL; - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); } if (tp->dirty_tx != dirty_tx) { diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index 4bb89f74742c..6073387511f8 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -1927,7 +1927,7 @@ static void sis900_finish_xmit (struct net_device *net_dev) pci_unmap_single(sis_priv->pci_dev, sis_priv->tx_ring[entry].bufptr, skb->len, PCI_DMA_TODEVICE); - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); sis_priv->tx_skbuff[entry] = NULL; sis_priv->tx_ring[entry].bufptr = 0; sis_priv->tx_ring[entry].cmdsts = 0; diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c index 15c62c160953..be47d864f8b9 100644 --- a/drivers/net/ethernet/smsc/epic100.c +++ b/drivers/net/ethernet/smsc/epic100.c @@ -1037,7 +1037,7 @@ static void epic_tx(struct net_device *dev, struct epic_private *ep) skb = ep->tx_skbuff[entry]; pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr, skb->len, PCI_DMA_TODEVICE); - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); ep->tx_skbuff[entry] = NULL; } diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c index 8355dfbb8ec3..b550e624500d 100644 --- a/drivers/net/ethernet/smsc/smc911x.c +++ b/drivers/net/ethernet/smsc/smc911x.c @@ -1188,7 +1188,7 @@ smc911x_tx_dma_irq(void *data) DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "TX DMA irq handler\n"); BUG_ON(skb == NULL); - dma_unmap_single(NULL, tx_dmabuf, tx_dmalen, DMA_TO_DEVICE); + dma_unmap_single(lp->dev, tx_dmabuf, tx_dmalen, DMA_TO_DEVICE); netif_trans_update(dev); dev_kfree_skb_irq(skb); lp->current_tx_skb = NULL; @@ -1219,7 +1219,7 @@ smc911x_rx_dma_irq(void *data) DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev, "RX DMA irq handler\n"); - dma_unmap_single(NULL, rx_dmabuf, rx_dmalen, DMA_FROM_DEVICE); + dma_unmap_single(lp->dev, rx_dmabuf, rx_dmalen, DMA_FROM_DEVICE); BUG_ON(skb == NULL); lp->current_rx_skb = NULL; PRINT_PKT(skb->data, skb->len); diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 6209cc1fb305..f194235153f9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -105,6 +105,16 @@ config DWMAC_OXNAS This selects the Oxford Semiconductor OXNASSoC glue layer support for the stmmac device driver. This driver is used for OX820. +config DWMAC_QCOM_ETHQOS + tristate "Qualcomm ETHQOS support" + default ARCH_QCOM + depends on OF && (ARCH_QCOM || COMPILE_TEST) + help + Support for the Qualcomm ETHQOS core. + + This selects the Qualcomm ETHQOS glue layer support for the + stmmac device driver. + config DWMAC_ROCKCHIP tristate "Rockchip dwmac support" default ARCH_ROCKCHIP diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index bf09701d2623..c529c21e9bdd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -16,6 +16,7 @@ obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o obj-$(CONFIG_DWMAC_MEDIATEK) += dwmac-mediatek.o obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o obj-$(CONFIG_DWMAC_OXNAS) += dwmac-oxnas.o +obj-$(CONFIG_DWMAC_QCOM_ETHQOS) += dwmac-qcom-ethqos.o obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c new file mode 100644 index 000000000000..7ec895407d23 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -0,0 +1,545 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2018-19, Linaro Limited + +#include +#include +#include +#include +#include +#include "stmmac.h" +#include "stmmac_platform.h" + +#define RGMII_IO_MACRO_CONFIG 0x0 +#define SDCC_HC_REG_DLL_CONFIG 0x4 +#define SDCC_HC_REG_DDR_CONFIG 0xC +#define SDCC_HC_REG_DLL_CONFIG2 0x10 +#define SDC4_STATUS 0x14 +#define SDCC_USR_CTL 0x18 +#define RGMII_IO_MACRO_CONFIG2 0x1C +#define RGMII_IO_MACRO_DEBUG1 0x20 +#define EMAC_SYSTEM_LOW_POWER_DEBUG 0x28 + +/* RGMII_IO_MACRO_CONFIG fields */ +#define RGMII_CONFIG_FUNC_CLK_EN BIT(30) +#define RGMII_CONFIG_POS_NEG_DATA_SEL BIT(23) +#define RGMII_CONFIG_GPIO_CFG_RX_INT GENMASK(21, 20) +#define RGMII_CONFIG_GPIO_CFG_TX_INT GENMASK(19, 17) +#define RGMII_CONFIG_MAX_SPD_PRG_9 GENMASK(16, 8) +#define RGMII_CONFIG_MAX_SPD_PRG_2 GENMASK(7, 6) +#define RGMII_CONFIG_INTF_SEL GENMASK(5, 4) +#define RGMII_CONFIG_BYPASS_TX_ID_EN BIT(3) +#define RGMII_CONFIG_LOOPBACK_EN BIT(2) +#define RGMII_CONFIG_PROG_SWAP BIT(1) +#define RGMII_CONFIG_DDR_MODE BIT(0) + +/* SDCC_HC_REG_DLL_CONFIG fields */ +#define SDCC_DLL_CONFIG_DLL_RST BIT(30) +#define SDCC_DLL_CONFIG_PDN BIT(29) +#define SDCC_DLL_CONFIG_MCLK_FREQ GENMASK(26, 24) +#define SDCC_DLL_CONFIG_CDR_SELEXT GENMASK(23, 20) +#define SDCC_DLL_CONFIG_CDR_EXT_EN BIT(19) +#define SDCC_DLL_CONFIG_CK_OUT_EN BIT(18) +#define SDCC_DLL_CONFIG_CDR_EN BIT(17) +#define SDCC_DLL_CONFIG_DLL_EN BIT(16) +#define SDCC_DLL_MCLK_GATING_EN BIT(5) +#define SDCC_DLL_CDR_FINE_PHASE GENMASK(3, 2) + +/* SDCC_HC_REG_DDR_CONFIG fields */ +#define SDCC_DDR_CONFIG_PRG_DLY_EN BIT(31) +#define SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY GENMASK(26, 21) +#define SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_CODE GENMASK(29, 27) +#define SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN BIT(30) +#define SDCC_DDR_CONFIG_PRG_RCLK_DLY GENMASK(8, 0) + +/* SDCC_HC_REG_DLL_CONFIG2 fields */ +#define SDCC_DLL_CONFIG2_DLL_CLOCK_DIS BIT(21) +#define SDCC_DLL_CONFIG2_MCLK_FREQ_CALC GENMASK(17, 10) +#define SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SEL GENMASK(3, 2) +#define SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SW BIT(1) +#define SDCC_DLL_CONFIG2_DDR_CAL_EN BIT(0) + +/* SDC4_STATUS bits */ +#define SDC4_STATUS_DLL_LOCK BIT(7) + +/* RGMII_IO_MACRO_CONFIG2 fields */ +#define RGMII_CONFIG2_RSVD_CONFIG15 GENMASK(31, 17) +#define RGMII_CONFIG2_RGMII_CLK_SEL_CFG BIT(16) +#define RGMII_CONFIG2_TX_TO_RX_LOOPBACK_EN BIT(13) +#define RGMII_CONFIG2_CLK_DIVIDE_SEL BIT(12) +#define RGMII_CONFIG2_RX_PROG_SWAP BIT(7) +#define RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL BIT(6) +#define RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN BIT(5) + +struct ethqos_emac_por { + unsigned int offset; + unsigned int value; +}; + +struct qcom_ethqos { + struct platform_device *pdev; + void __iomem *rgmii_base; + + unsigned int rgmii_clk_rate; + struct clk *rgmii_clk; + unsigned int speed; + + const struct ethqos_emac_por *por; + unsigned int num_por; +}; + +static int rgmii_readl(struct qcom_ethqos *ethqos, unsigned int offset) +{ + return readl(ethqos->rgmii_base + offset); +} + +static void rgmii_writel(struct qcom_ethqos *ethqos, + int value, unsigned int offset) +{ + writel(value, ethqos->rgmii_base + offset); +} + +static void rgmii_updatel(struct qcom_ethqos *ethqos, + int mask, int val, unsigned int offset) +{ + unsigned int temp; + + temp = rgmii_readl(ethqos, offset); + temp = (temp & ~(mask)) | val; + rgmii_writel(ethqos, temp, offset); +} + +static void rgmii_dump(struct qcom_ethqos *ethqos) +{ + dev_dbg(ðqos->pdev->dev, "Rgmii register dump\n"); + dev_dbg(ðqos->pdev->dev, "RGMII_IO_MACRO_CONFIG: %x\n", + rgmii_readl(ethqos, RGMII_IO_MACRO_CONFIG)); + dev_dbg(ðqos->pdev->dev, "SDCC_HC_REG_DLL_CONFIG: %x\n", + rgmii_readl(ethqos, SDCC_HC_REG_DLL_CONFIG)); + dev_dbg(ðqos->pdev->dev, "SDCC_HC_REG_DDR_CONFIG: %x\n", + rgmii_readl(ethqos, SDCC_HC_REG_DDR_CONFIG)); + dev_dbg(ðqos->pdev->dev, "SDCC_HC_REG_DLL_CONFIG2: %x\n", + rgmii_readl(ethqos, SDCC_HC_REG_DLL_CONFIG2)); + dev_dbg(ðqos->pdev->dev, "SDC4_STATUS: %x\n", + rgmii_readl(ethqos, SDC4_STATUS)); + dev_dbg(ðqos->pdev->dev, "SDCC_USR_CTL: %x\n", + rgmii_readl(ethqos, SDCC_USR_CTL)); + dev_dbg(ðqos->pdev->dev, "RGMII_IO_MACRO_CONFIG2: %x\n", + rgmii_readl(ethqos, RGMII_IO_MACRO_CONFIG2)); + dev_dbg(ðqos->pdev->dev, "RGMII_IO_MACRO_DEBUG1: %x\n", + rgmii_readl(ethqos, RGMII_IO_MACRO_DEBUG1)); + dev_dbg(ðqos->pdev->dev, "EMAC_SYSTEM_LOW_POWER_DEBUG: %x\n", + rgmii_readl(ethqos, EMAC_SYSTEM_LOW_POWER_DEBUG)); +} + +/* Clock rates */ +#define RGMII_1000_NOM_CLK_FREQ (250 * 1000 * 1000UL) +#define RGMII_ID_MODE_100_LOW_SVS_CLK_FREQ (50 * 1000 * 1000UL) +#define RGMII_ID_MODE_10_LOW_SVS_CLK_FREQ (5 * 1000 * 1000UL) + +static void +ethqos_update_rgmii_clk(struct qcom_ethqos *ethqos, unsigned int speed) +{ + switch (speed) { + case SPEED_1000: + ethqos->rgmii_clk_rate = RGMII_1000_NOM_CLK_FREQ; + break; + + case SPEED_100: + ethqos->rgmii_clk_rate = RGMII_ID_MODE_100_LOW_SVS_CLK_FREQ; + break; + + case SPEED_10: + ethqos->rgmii_clk_rate = RGMII_ID_MODE_10_LOW_SVS_CLK_FREQ; + break; + } + + clk_set_rate(ethqos->rgmii_clk, ethqos->rgmii_clk_rate); +} + +static void ethqos_set_func_clk_en(struct qcom_ethqos *ethqos) +{ + rgmii_updatel(ethqos, RGMII_CONFIG_FUNC_CLK_EN, + RGMII_CONFIG_FUNC_CLK_EN, RGMII_IO_MACRO_CONFIG); +} + +static const struct ethqos_emac_por emac_v2_3_0_por[] = { + { .offset = RGMII_IO_MACRO_CONFIG, .value = 0x00C01343 }, + { .offset = SDCC_HC_REG_DLL_CONFIG, .value = 0x2004642C }, + { .offset = SDCC_HC_REG_DDR_CONFIG, .value = 0x00000000 }, + { .offset = SDCC_HC_REG_DLL_CONFIG2, .value = 0x00200000 }, + { .offset = SDCC_USR_CTL, .value = 0x00010800 }, + { .offset = RGMII_IO_MACRO_CONFIG2, .value = 0x00002060 }, +}; + +static int ethqos_dll_configure(struct qcom_ethqos *ethqos) +{ + unsigned int val; + int retry = 1000; + + /* Set CDR_EN */ + rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CDR_EN, + SDCC_DLL_CONFIG_CDR_EN, SDCC_HC_REG_DLL_CONFIG); + + /* Set CDR_EXT_EN */ + rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CDR_EXT_EN, + SDCC_DLL_CONFIG_CDR_EXT_EN, SDCC_HC_REG_DLL_CONFIG); + + /* Clear CK_OUT_EN */ + rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN, + 0, SDCC_HC_REG_DLL_CONFIG); + + /* Set DLL_EN */ + rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_EN, + SDCC_DLL_CONFIG_DLL_EN, SDCC_HC_REG_DLL_CONFIG); + + rgmii_updatel(ethqos, SDCC_DLL_MCLK_GATING_EN, + 0, SDCC_HC_REG_DLL_CONFIG); + + rgmii_updatel(ethqos, SDCC_DLL_CDR_FINE_PHASE, + 0, SDCC_HC_REG_DLL_CONFIG); + + /* Wait for CK_OUT_EN clear */ + do { + val = rgmii_readl(ethqos, SDCC_HC_REG_DLL_CONFIG); + val &= SDCC_DLL_CONFIG_CK_OUT_EN; + if (!val) + break; + mdelay(1); + retry--; + } while (retry > 0); + if (!retry) + dev_err(ðqos->pdev->dev, "Clear CK_OUT_EN timedout\n"); + + /* Set CK_OUT_EN */ + rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN, + SDCC_DLL_CONFIG_CK_OUT_EN, SDCC_HC_REG_DLL_CONFIG); + + /* Wait for CK_OUT_EN set */ + retry = 1000; + do { + val = rgmii_readl(ethqos, SDCC_HC_REG_DLL_CONFIG); + val &= SDCC_DLL_CONFIG_CK_OUT_EN; + if (val) + break; + mdelay(1); + retry--; + } while (retry > 0); + if (!retry) + dev_err(ðqos->pdev->dev, "Set CK_OUT_EN timedout\n"); + + /* Set DDR_CAL_EN */ + rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_CAL_EN, + SDCC_DLL_CONFIG2_DDR_CAL_EN, SDCC_HC_REG_DLL_CONFIG2); + + rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DLL_CLOCK_DIS, + 0, SDCC_HC_REG_DLL_CONFIG2); + + rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_MCLK_FREQ_CALC, + 0x1A << 10, SDCC_HC_REG_DLL_CONFIG2); + + rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SEL, + BIT(2), SDCC_HC_REG_DLL_CONFIG2); + + rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SW, + SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SW, + SDCC_HC_REG_DLL_CONFIG2); + + return 0; +} + +static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos) +{ + /* Disable loopback mode */ + rgmii_updatel(ethqos, RGMII_CONFIG2_TX_TO_RX_LOOPBACK_EN, + 0, RGMII_IO_MACRO_CONFIG2); + + /* Select RGMII, write 0 to interface select */ + rgmii_updatel(ethqos, RGMII_CONFIG_INTF_SEL, + 0, RGMII_IO_MACRO_CONFIG); + + switch (ethqos->speed) { + case SPEED_1000: + rgmii_updatel(ethqos, RGMII_CONFIG_DDR_MODE, + RGMII_CONFIG_DDR_MODE, RGMII_IO_MACRO_CONFIG); + rgmii_updatel(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN, + 0, RGMII_IO_MACRO_CONFIG); + rgmii_updatel(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL, + RGMII_CONFIG_POS_NEG_DATA_SEL, + RGMII_IO_MACRO_CONFIG); + rgmii_updatel(ethqos, RGMII_CONFIG_PROG_SWAP, + RGMII_CONFIG_PROG_SWAP, RGMII_IO_MACRO_CONFIG); + rgmii_updatel(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL, + 0, RGMII_IO_MACRO_CONFIG2); + rgmii_updatel(ethqos, RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN, + RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN, + RGMII_IO_MACRO_CONFIG2); + rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15, + 0, RGMII_IO_MACRO_CONFIG2); + rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP, + RGMII_CONFIG2_RX_PROG_SWAP, + RGMII_IO_MACRO_CONFIG2); + + /* Set PRG_RCLK_DLY to 57 for 1.8 ns delay */ + rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_RCLK_DLY, + 57, SDCC_HC_REG_DDR_CONFIG); + rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_DLY_EN, + SDCC_DDR_CONFIG_PRG_DLY_EN, + SDCC_HC_REG_DDR_CONFIG); + rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN, + RGMII_CONFIG_LOOPBACK_EN, RGMII_IO_MACRO_CONFIG); + break; + + case SPEED_100: + rgmii_updatel(ethqos, RGMII_CONFIG_DDR_MODE, + RGMII_CONFIG_DDR_MODE, RGMII_IO_MACRO_CONFIG); + rgmii_updatel(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN, + RGMII_CONFIG_BYPASS_TX_ID_EN, + RGMII_IO_MACRO_CONFIG); + rgmii_updatel(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL, + 0, RGMII_IO_MACRO_CONFIG); + rgmii_updatel(ethqos, RGMII_CONFIG_PROG_SWAP, + 0, RGMII_IO_MACRO_CONFIG); + rgmii_updatel(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL, + 0, RGMII_IO_MACRO_CONFIG2); + rgmii_updatel(ethqos, RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN, + RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN, + RGMII_IO_MACRO_CONFIG2); + rgmii_updatel(ethqos, RGMII_CONFIG_MAX_SPD_PRG_2, + BIT(6), RGMII_IO_MACRO_CONFIG); + rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15, + 0, RGMII_IO_MACRO_CONFIG2); + rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP, + 0, RGMII_IO_MACRO_CONFIG2); + /* Write 0x5 to PRG_RCLK_DLY_CODE */ + rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_CODE, + (BIT(29) | BIT(27)), SDCC_HC_REG_DDR_CONFIG); + rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY, + SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY, + SDCC_HC_REG_DDR_CONFIG); + rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN, + SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN, + SDCC_HC_REG_DDR_CONFIG); + rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN, + RGMII_CONFIG_LOOPBACK_EN, RGMII_IO_MACRO_CONFIG); + break; + + case SPEED_10: + rgmii_updatel(ethqos, RGMII_CONFIG_DDR_MODE, + RGMII_CONFIG_DDR_MODE, RGMII_IO_MACRO_CONFIG); + rgmii_updatel(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN, + RGMII_CONFIG_BYPASS_TX_ID_EN, + RGMII_IO_MACRO_CONFIG); + rgmii_updatel(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL, + 0, RGMII_IO_MACRO_CONFIG); + rgmii_updatel(ethqos, RGMII_CONFIG_PROG_SWAP, + 0, RGMII_IO_MACRO_CONFIG); + rgmii_updatel(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL, + 0, RGMII_IO_MACRO_CONFIG2); + rgmii_updatel(ethqos, RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN, + 0, RGMII_IO_MACRO_CONFIG2); + rgmii_updatel(ethqos, RGMII_CONFIG_MAX_SPD_PRG_9, + BIT(12) | GENMASK(9, 8), + RGMII_IO_MACRO_CONFIG); + rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15, + 0, RGMII_IO_MACRO_CONFIG2); + rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP, + 0, RGMII_IO_MACRO_CONFIG2); + /* Write 0x5 to PRG_RCLK_DLY_CODE */ + rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_CODE, + (BIT(29) | BIT(27)), SDCC_HC_REG_DDR_CONFIG); + rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY, + SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY, + SDCC_HC_REG_DDR_CONFIG); + rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN, + SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN, + SDCC_HC_REG_DDR_CONFIG); + rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN, + RGMII_CONFIG_LOOPBACK_EN, RGMII_IO_MACRO_CONFIG); + break; + default: + dev_err(ðqos->pdev->dev, + "Invalid speed %d\n", ethqos->speed); + return -EINVAL; + } + + return 0; +} + +static int ethqos_configure(struct qcom_ethqos *ethqos) +{ + volatile unsigned int dll_lock; + unsigned int i, retry = 1000; + + /* Reset to POR values and enable clk */ + for (i = 0; i < ethqos->num_por; i++) + rgmii_writel(ethqos, ethqos->por[i].value, + ethqos->por[i].offset); + ethqos_set_func_clk_en(ethqos); + + /* Initialize the DLL first */ + + /* Set DLL_RST */ + rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_RST, + SDCC_DLL_CONFIG_DLL_RST, SDCC_HC_REG_DLL_CONFIG); + + /* Set PDN */ + rgmii_updatel(ethqos, SDCC_DLL_CONFIG_PDN, + SDCC_DLL_CONFIG_PDN, SDCC_HC_REG_DLL_CONFIG); + + /* Clear DLL_RST */ + rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_RST, 0, + SDCC_HC_REG_DLL_CONFIG); + + /* Clear PDN */ + rgmii_updatel(ethqos, SDCC_DLL_CONFIG_PDN, 0, + SDCC_HC_REG_DLL_CONFIG); + + if (ethqos->speed != SPEED_100 && ethqos->speed != SPEED_10) { + /* Set DLL_EN */ + rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_EN, + SDCC_DLL_CONFIG_DLL_EN, SDCC_HC_REG_DLL_CONFIG); + + /* Set CK_OUT_EN */ + rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN, + SDCC_DLL_CONFIG_CK_OUT_EN, + SDCC_HC_REG_DLL_CONFIG); + + /* Set USR_CTL bit 26 with mask of 3 bits */ + rgmii_updatel(ethqos, GENMASK(26, 24), BIT(26), SDCC_USR_CTL); + + /* wait for DLL LOCK */ + do { + mdelay(1); + dll_lock = rgmii_readl(ethqos, SDC4_STATUS); + if (dll_lock & SDC4_STATUS_DLL_LOCK) + break; + } while (retry > 0); + if (!retry) + dev_err(ðqos->pdev->dev, + "Timeout while waiting for DLL lock\n"); + } + + if (ethqos->speed == SPEED_1000) + ethqos_dll_configure(ethqos); + + ethqos_rgmii_macro_init(ethqos); + + return 0; +} + +static void ethqos_fix_mac_speed(void *priv, unsigned int speed) +{ + struct qcom_ethqos *ethqos = priv; + + ethqos->speed = speed; + ethqos_update_rgmii_clk(ethqos, speed); + ethqos_configure(ethqos); +} + +static int qcom_ethqos_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct plat_stmmacenet_data *plat_dat; + struct stmmac_resources stmmac_res; + struct qcom_ethqos *ethqos; + struct resource *res; + int ret; + + ret = stmmac_get_platform_resources(pdev, &stmmac_res); + if (ret) + return ret; + + plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + if (IS_ERR(plat_dat)) { + dev_err(&pdev->dev, "dt configuration failed\n"); + return PTR_ERR(plat_dat); + } + + ethqos = devm_kzalloc(&pdev->dev, sizeof(*ethqos), GFP_KERNEL); + if (!ethqos) { + ret = -ENOMEM; + goto err_mem; + } + + ethqos->pdev = pdev; + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rgmii"); + ethqos->rgmii_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(ethqos->rgmii_base)) { + dev_err(&pdev->dev, "Can't get rgmii base\n"); + ret = PTR_ERR(ethqos->rgmii_base); + goto err_mem; + } + + ethqos->por = of_device_get_match_data(&pdev->dev); + + ethqos->rgmii_clk = devm_clk_get(&pdev->dev, "rgmii"); + if (IS_ERR(ethqos->rgmii_clk)) { + ret = PTR_ERR(ethqos->rgmii_clk); + goto err_mem; + } + + ret = clk_prepare_enable(ethqos->rgmii_clk); + if (ret) + goto err_mem; + + ethqos->speed = SPEED_1000; + ethqos_update_rgmii_clk(ethqos, SPEED_1000); + ethqos_set_func_clk_en(ethqos); + + plat_dat->bsp_priv = ethqos; + plat_dat->fix_mac_speed = ethqos_fix_mac_speed; + plat_dat->has_gmac4 = 1; + plat_dat->pmt = 1; + plat_dat->tso_en = of_property_read_bool(np, "snps,tso"); + + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + if (ret) + goto err_clk; + + rgmii_dump(ethqos); + + return ret; + +err_clk: + clk_disable_unprepare(ethqos->rgmii_clk); + +err_mem: + stmmac_remove_config_dt(pdev, plat_dat); + + return ret; +} + +static int qcom_ethqos_remove(struct platform_device *pdev) +{ + struct qcom_ethqos *ethqos; + int ret; + + ethqos = get_stmmac_bsp_priv(&pdev->dev); + if (!ethqos) + return -ENODEV; + + ret = stmmac_pltfr_remove(pdev); + clk_disable_unprepare(ethqos->rgmii_clk); + + return ret; +} + +static const struct of_device_id qcom_ethqos_match[] = { + { .compatible = "qcom,qcs404-ethqos", .data = &emac_v2_3_0_por}, + { } +}; +MODULE_DEVICE_TABLE(of, qcom_ethqos_match); + +static struct platform_driver qcom_ethqos_driver = { + .probe = qcom_ethqos_probe, + .remove = qcom_ethqos_remove, + .driver = { + .name = "qcom-ethqos", + .pm = &stmmac_pltfr_pm_ops, + .of_match_table = of_match_ptr(qcom_ethqos_match), + }, +}; +module_platform_driver(qcom_ethqos_driver); + +MODULE_DESCRIPTION("Qualcomm ETHQOS driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 7b923362ee55..3b174eae77c1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -1342,8 +1342,10 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv) } ret = phy_power_on(bsp_priv, true); - if (ret) + if (ret) { + gmac_clk_enable(bsp_priv, false); return ret; + } pm_runtime_enable(dev); pm_runtime_get_sync(dev); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c index 7e2e79dedebf..062a600fa5a7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c @@ -25,9 +25,24 @@ #define SYSCFG_MCU_ETH_MASK BIT(23) #define SYSCFG_MP1_ETH_MASK GENMASK(23, 16) +#define SYSCFG_PMCCLRR_OFFSET 0x40 #define SYSCFG_PMCR_ETH_CLK_SEL BIT(16) #define SYSCFG_PMCR_ETH_REF_CLK_SEL BIT(17) + +/* Ethernet PHY interface selection in register SYSCFG Configuration + *------------------------------------------ + * src |BIT(23)| BIT(22)| BIT(21)|BIT(20)| + *------------------------------------------ + * MII | 0 | 0 | 0 | 1 | + *------------------------------------------ + * GMII | 0 | 0 | 0 | 0 | + *------------------------------------------ + * RGMII | 0 | 0 | 1 | n/a | + *------------------------------------------ + * RMII | 1 | 0 | 0 | n/a | + *------------------------------------------ + */ #define SYSCFG_PMCR_ETH_SEL_MII BIT(20) #define SYSCFG_PMCR_ETH_SEL_RGMII BIT(21) #define SYSCFG_PMCR_ETH_SEL_RMII BIT(23) @@ -35,14 +50,54 @@ #define SYSCFG_MCU_ETH_SEL_MII 0 #define SYSCFG_MCU_ETH_SEL_RMII 1 +/* STM32MP1 register definitions + * + * Below table summarizes the clock requirement and clock sources for + * supported phy interface modes. + * __________________________________________________________________________ + *|PHY_MODE | Normal | PHY wo crystal| PHY wo crystal |No 125Mhz from PHY| + *| | | 25MHz | 50MHz | | + * --------------------------------------------------------------------------- + *| MII | - | eth-ck | n/a | n/a | + *| | | | | | + * --------------------------------------------------------------------------- + *| GMII | - | eth-ck | n/a | n/a | + *| | | | | | + * --------------------------------------------------------------------------- + *| RGMII | - | eth-ck | n/a | eth-ck (no pin) | + *| | | | | st,eth-clk-sel | + * --------------------------------------------------------------------------- + *| RMII | - | eth-ck | eth-ck | n/a | + *| | | | st,eth-ref-clk-sel | | + * --------------------------------------------------------------------------- + * + * BIT(17) : set this bit in RMII mode when you have PHY without crystal 50MHz + * BIT(16) : set this bit in GMII/RGMII PHY when you do not want use 125Mhz + * from PHY + *----------------------------------------------------- + * src | BIT(17) | BIT(16) | + *----------------------------------------------------- + * MII | n/a | n/a | + *----------------------------------------------------- + * GMII | n/a | st,eth-clk-sel | + *----------------------------------------------------- + * RGMII | n/a | st,eth-clk-sel | + *----------------------------------------------------- + * RMII | st,eth-ref-clk-sel | n/a | + *----------------------------------------------------- + * + */ + struct stm32_dwmac { struct clk *clk_tx; struct clk *clk_rx; struct clk *clk_eth_ck; struct clk *clk_ethstp; struct clk *syscfg_clk; - bool int_phyclk; /* Clock from RCC to drive PHY */ - u32 mode_reg; /* MAC glue-logic mode register */ + int eth_clk_sel_reg; + int eth_ref_clk_sel_reg; + int irq_pwr_wakeup; + u32 mode_reg; /* MAC glue-logic mode register */ struct regmap *regmap; u32 speed; const struct stm32_ops *ops; @@ -102,7 +157,7 @@ static int stm32mp1_clk_prepare(struct stm32_dwmac *dwmac, bool prepare) if (ret) return ret; - if (dwmac->int_phyclk) { + if (dwmac->clk_eth_ck) { ret = clk_prepare_enable(dwmac->clk_eth_ck); if (ret) { clk_disable_unprepare(dwmac->syscfg_clk); @@ -111,7 +166,7 @@ static int stm32mp1_clk_prepare(struct stm32_dwmac *dwmac, bool prepare) } } else { clk_disable_unprepare(dwmac->syscfg_clk); - if (dwmac->int_phyclk) + if (dwmac->clk_eth_ck) clk_disable_unprepare(dwmac->clk_eth_ck); } return ret; @@ -121,7 +176,7 @@ static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat) { struct stm32_dwmac *dwmac = plat_dat->bsp_priv; u32 reg = dwmac->mode_reg; - int val; + int val, ret; switch (plat_dat->interface) { case PHY_INTERFACE_MODE_MII: @@ -130,19 +185,22 @@ static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat) break; case PHY_INTERFACE_MODE_GMII: val = SYSCFG_PMCR_ETH_SEL_GMII; - if (dwmac->int_phyclk) + if (dwmac->eth_clk_sel_reg) val |= SYSCFG_PMCR_ETH_CLK_SEL; pr_debug("SYSCFG init : PHY_INTERFACE_MODE_GMII\n"); break; case PHY_INTERFACE_MODE_RMII: val = SYSCFG_PMCR_ETH_SEL_RMII; - if (dwmac->int_phyclk) + if (dwmac->eth_ref_clk_sel_reg) val |= SYSCFG_PMCR_ETH_REF_CLK_SEL; pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RMII\n"); break; case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: val = SYSCFG_PMCR_ETH_SEL_RGMII; - if (dwmac->int_phyclk) + if (dwmac->eth_clk_sel_reg) val |= SYSCFG_PMCR_ETH_CLK_SEL; pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RGMII\n"); break; @@ -153,6 +211,11 @@ static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat) return -EINVAL; } + /* Need to update PMCCLRR (clear register) */ + ret = regmap_write(dwmac->regmap, reg + SYSCFG_PMCCLRR_OFFSET, + dwmac->ops->syscfg_eth_mask); + + /* Update PMCSETR (set register) */ return regmap_update_bits(dwmac->regmap, reg, dwmac->ops->syscfg_eth_mask, val); } @@ -180,7 +243,7 @@ static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat) } return regmap_update_bits(dwmac->regmap, reg, - dwmac->ops->syscfg_eth_mask, val); + dwmac->ops->syscfg_eth_mask, val << 23); } static void stm32_dwmac_clk_disable(struct stm32_dwmac *dwmac) @@ -232,24 +295,29 @@ static int stm32_dwmac_parse_data(struct stm32_dwmac *dwmac, static int stm32mp1_parse_data(struct stm32_dwmac *dwmac, struct device *dev) { + struct platform_device *pdev = to_platform_device(dev); struct device_node *np = dev->of_node; + int err = 0; - dwmac->int_phyclk = of_property_read_bool(np, "st,int-phyclk"); + /* Gigabit Ethernet 125MHz clock selection. */ + dwmac->eth_clk_sel_reg = of_property_read_bool(np, "st,eth-clk-sel"); - /* Check if internal clk from RCC selected */ - if (dwmac->int_phyclk) { - /* Get ETH_CLK clocks */ - dwmac->clk_eth_ck = devm_clk_get(dev, "eth-ck"); - if (IS_ERR(dwmac->clk_eth_ck)) { - dev_err(dev, "No ETH CK clock provided...\n"); - return PTR_ERR(dwmac->clk_eth_ck); - } + /* Ethernet 50Mhz RMII clock selection */ + dwmac->eth_ref_clk_sel_reg = + of_property_read_bool(np, "st,eth-ref-clk-sel"); + + /* Get ETH_CLK clocks */ + dwmac->clk_eth_ck = devm_clk_get(dev, "eth-ck"); + if (IS_ERR(dwmac->clk_eth_ck)) { + dev_warn(dev, "No phy clock provided...\n"); + dwmac->clk_eth_ck = NULL; } /* Clock used for low power mode */ dwmac->clk_ethstp = devm_clk_get(dev, "ethstp"); if (IS_ERR(dwmac->clk_ethstp)) { - dev_err(dev, "No ETH peripheral clock provided for CStop mode ...\n"); + dev_err(dev, + "No ETH peripheral clock provided for CStop mode ...\n"); return PTR_ERR(dwmac->clk_ethstp); } @@ -260,7 +328,26 @@ static int stm32mp1_parse_data(struct stm32_dwmac *dwmac, return PTR_ERR(dwmac->syscfg_clk); } - return 0; + /* Get IRQ information early to have an ability to ask for deferred + * probe if needed before we went too far with resource allocation. + */ + dwmac->irq_pwr_wakeup = platform_get_irq_byname(pdev, + "stm32_pwr_wakeup"); + if (!dwmac->clk_eth_ck && dwmac->irq_pwr_wakeup >= 0) { + err = device_init_wakeup(&pdev->dev, true); + if (err) { + dev_err(&pdev->dev, "Failed to init wake up irq\n"); + return err; + } + err = dev_pm_set_dedicated_wake_irq(&pdev->dev, + dwmac->irq_pwr_wakeup); + if (err) { + dev_err(&pdev->dev, "Failed to set wake up irq\n"); + device_init_wakeup(&pdev->dev, false); + } + device_set_wakeup_enable(&pdev->dev, false); + } + return err; } static int stm32_dwmac_probe(struct platform_device *pdev) @@ -326,9 +413,15 @@ static int stm32_dwmac_remove(struct platform_device *pdev) struct net_device *ndev = platform_get_drvdata(pdev); struct stmmac_priv *priv = netdev_priv(ndev); int ret = stmmac_dvr_remove(&pdev->dev); + struct stm32_dwmac *dwmac = priv->plat->bsp_priv; stm32_dwmac_clk_disable(priv->plat->bsp_priv); + if (dwmac->irq_pwr_wakeup >= 0) { + dev_pm_clear_wake_irq(&pdev->dev); + device_init_wakeup(&pdev->dev, false); + } + return ret; } @@ -342,7 +435,7 @@ static int stm32mp1_suspend(struct stm32_dwmac *dwmac) clk_disable_unprepare(dwmac->clk_tx); clk_disable_unprepare(dwmac->syscfg_clk); - if (dwmac->int_phyclk) + if (dwmac->clk_eth_ck) clk_disable_unprepare(dwmac->clk_eth_ck); return ret; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index 20299f6f65fc..7fbb6a4dbf51 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -241,15 +241,18 @@ static inline void dwmac4_get_timestamp(void *desc, u32 ats, u64 *ts) static int dwmac4_rx_check_timestamp(void *desc) { struct dma_desc *p = (struct dma_desc *)desc; + unsigned int rdes0 = le32_to_cpu(p->des0); + unsigned int rdes1 = le32_to_cpu(p->des1); + unsigned int rdes3 = le32_to_cpu(p->des3); u32 own, ctxt; int ret = 1; - own = p->des3 & RDES3_OWN; - ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR) + own = rdes3 & RDES3_OWN; + ctxt = ((rdes3 & RDES3_CONTEXT_DESCRIPTOR) >> RDES3_CONTEXT_DESCRIPTOR_SHIFT); if (likely(!own && ctxt)) { - if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff)) + if ((rdes0 == 0xffffffff) && (rdes1 == 0xffffffff)) /* Corrupted value */ ret = -EINVAL; else @@ -268,7 +271,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, void *next_desc, int ret = -EINVAL; /* Get the status from normal w/b descriptor */ - if (likely(p->des3 & TDES3_RS1V)) { + if (likely(le32_to_cpu(p->des3) & RDES3_RDES1_VALID)) { if (likely(le32_to_cpu(p->des1) & RDES1_TIMESTAMP_AVAILABLE)) { int i = 0; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c index 49f5687879df..545cb9c47433 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c @@ -124,9 +124,9 @@ void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan) int dwmac4_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x, u32 chan) { - int ret = 0; - u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan)); + u32 intr_en = readl(ioaddr + DMA_CHAN_INTR_ENA(chan)); + int ret = 0; /* ABNORMAL interrupts */ if (unlikely(intr_status & DMA_CHAN_STATUS_AIS)) { @@ -151,16 +151,11 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr, if (likely(intr_status & DMA_CHAN_STATUS_NIS)) { x->normal_irq_n++; if (likely(intr_status & DMA_CHAN_STATUS_RI)) { - u32 value; - - value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan)); - /* to schedule NAPI on real RIE event. */ - if (likely(value & DMA_CHAN_INTR_ENA_RIE)) { - x->rx_normal_irq_n++; - ret |= handle_rx; - } + x->rx_normal_irq_n++; + ret |= handle_rx; } - if (likely(intr_status & DMA_CHAN_STATUS_TI)) { + if (likely(intr_status & (DMA_CHAN_STATUS_TI | + DMA_CHAN_STATUS_TBU))) { x->tx_normal_irq_n++; ret |= handle_tx; } @@ -168,12 +163,7 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr, x->rx_early_irq++; } - /* Clear the interrupt by writing a logic 1 to the chanX interrupt - * status [21-0] expect reserved bits [5-3] - */ - writel((intr_status & 0x3fffc7), - ioaddr + DMA_CHAN_STATUS(chan)); - + writel(intr_status & intr_en, ioaddr + DMA_CHAN_STATUS(chan)); return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h index d6bb953685fa..37d5e6fe7473 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h @@ -193,9 +193,10 @@ #define XGMAC_AIE BIT(14) #define XGMAC_RBUE BIT(7) #define XGMAC_RIE BIT(6) +#define XGMAC_TBUE BIT(2) #define XGMAC_TIE BIT(0) #define XGMAC_DMA_INT_DEFAULT_EN (XGMAC_NIE | XGMAC_AIE | XGMAC_RBUE | \ - XGMAC_RIE | XGMAC_TIE) + XGMAC_RIE | XGMAC_TBUE | XGMAC_TIE) #define XGMAC_DMA_CH_Rx_WATCHDOG(x) (0x0000313c + (0x80 * (x))) #define XGMAC_RWT GENMASK(7, 0) #define XGMAC_DMA_CH_STATUS(x) (0x00003160 + (0x80 * (x))) @@ -204,6 +205,7 @@ #define XGMAC_FBE BIT(12) #define XGMAC_RBU BIT(7) #define XGMAC_RI BIT(6) +#define XGMAC_TBU BIT(2) #define XGMAC_TPS BIT(1) #define XGMAC_TI BIT(0) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c index c5e25580a43f..2ba712b48a89 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c @@ -283,12 +283,10 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr, x->normal_irq_n++; if (likely(intr_status & XGMAC_RI)) { - if (likely(intr_en & XGMAC_RIE)) { - x->rx_normal_irq_n++; - ret |= handle_rx; - } + x->rx_normal_irq_n++; + ret |= handle_rx; } - if (likely(intr_status & XGMAC_TI)) { + if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) { x->tx_normal_irq_n++; ret |= handle_tx; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 63e1064b27a2..dd95d959c1ce 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -28,6 +28,7 @@ #include #include "common.h" #include +#include #include struct stmmac_resources { @@ -78,11 +79,10 @@ struct stmmac_rx_queue { }; struct stmmac_channel { - struct napi_struct napi ____cacheline_aligned_in_smp; + struct napi_struct rx_napi ____cacheline_aligned_in_smp; + struct napi_struct tx_napi ____cacheline_aligned_in_smp; struct stmmac_priv *priv_data; u32 index; - int has_rx; - int has_tx; }; struct stmmac_tc_entry { @@ -174,6 +174,7 @@ struct stmmac_priv { unsigned int mode; unsigned int chain_mode; int extend_desc; + struct hwtstamp_config tstamp_config; struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_clock_ops; unsigned int default_addend; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index d1f61c25d82b..3c749c327cbd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -696,33 +696,38 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev, struct ethtool_eee *edata) { struct stmmac_priv *priv = netdev_priv(dev); + int ret; - priv->eee_enabled = edata->eee_enabled; - - if (!priv->eee_enabled) + if (!edata->eee_enabled) { stmmac_disable_eee_mode(priv); - else { + } else { /* We are asking for enabling the EEE but it is safe * to verify all by invoking the eee_init function. * In case of failure it will return an error. */ - priv->eee_enabled = stmmac_eee_init(priv); - if (!priv->eee_enabled) + edata->eee_enabled = stmmac_eee_init(priv); + if (!edata->eee_enabled) return -EOPNOTSUPP; - - /* Do not change tx_lpi_timer in case of failure */ - priv->tx_lpi_timer = edata->tx_lpi_timer; } - return phy_ethtool_set_eee(dev->phydev, edata); + ret = phy_ethtool_set_eee(dev->phydev, edata); + if (ret) + return ret; + + priv->eee_enabled = edata->eee_enabled; + priv->tx_lpi_timer = edata->tx_lpi_timer; + return 0; } static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv) { unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); - if (!clk) - return 0; + if (!clk) { + clk = priv->plat->clk_ref_rate; + if (!clk) + return 0; + } return (usec * (clk / 1000000)) / 256; } @@ -731,8 +736,11 @@ static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv) { unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); - if (!clk) - return 0; + if (!clk) { + clk = priv->plat->clk_ref_rate; + if (!clk) + return 0; + } return (riwt * 256) / (clk / 1000000); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 5afba69981cf..97c5e1aad88f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -155,7 +155,10 @@ static void stmmac_disable_all_queues(struct stmmac_priv *priv) for (queue = 0; queue < maxq; queue++) { struct stmmac_channel *ch = &priv->channel[queue]; - napi_disable(&ch->napi); + if (queue < rx_queues_cnt) + napi_disable(&ch->rx_napi); + if (queue < tx_queues_cnt) + napi_disable(&ch->tx_napi); } } @@ -173,7 +176,10 @@ static void stmmac_enable_all_queues(struct stmmac_priv *priv) for (queue = 0; queue < maxq; queue++) { struct stmmac_channel *ch = &priv->channel[queue]; - napi_enable(&ch->napi); + if (queue < rx_queues_cnt) + napi_enable(&ch->rx_napi); + if (queue < tx_queues_cnt) + napi_enable(&ch->tx_napi); } } @@ -474,7 +480,7 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, struct sk_buff *skb) { struct skb_shared_hwtstamps shhwtstamp; - u64 ns; + u64 ns = 0; if (!priv->hwts_tx_en) return; @@ -513,7 +519,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, { struct skb_shared_hwtstamps *shhwtstamp = NULL; struct dma_desc *desc = p; - u64 ns; + u64 ns = 0; if (!priv->hwts_rx_en) return; @@ -534,7 +540,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, } /** - * stmmac_hwtstamp_ioctl - control hardware timestamping. + * stmmac_hwtstamp_set - control hardware timestamping. * @dev: device pointer. * @ifr: An IOCTL specific structure, that can contain a pointer to * a proprietary structure used to pass information to the driver. @@ -544,7 +550,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, * Return Value: * 0 on success and an appropriate -ve integer on failure. */ -static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) +static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) { struct stmmac_priv *priv = netdev_priv(dev); struct hwtstamp_config config; @@ -558,8 +564,8 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) u32 snap_type_sel = 0; u32 ts_master_en = 0; u32 ts_event_en = 0; + u32 sec_inc = 0; u32 value = 0; - u32 sec_inc; bool xmac; xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; @@ -573,7 +579,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) } if (copy_from_user(&config, ifr->ifr_data, - sizeof(struct hwtstamp_config))) + sizeof(config))) return -EFAULT; netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", @@ -597,12 +603,13 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: /* PTP v1, UDP, any kind of event packet */ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; - /* take time stamp for all event messages */ - if (xmac) - snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; - else - snap_type_sel = PTP_TCR_SNAPTYPSEL_1; - + /* 'xmac' hardware can support Sync, Pdelay_Req and + * Pdelay_resp by setting bit14 and bits17/16 to 01 + * This leaves Delay_Req timestamps out. + * Enable all events *and* general purpose message + * timestamping + */ + snap_type_sel = PTP_TCR_SNAPTYPSEL_1; ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; break; @@ -633,10 +640,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; ptp_v2 = PTP_TCR_TSVER2ENA; /* take time stamp for all event messages */ - if (xmac) - snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; - else - snap_type_sel = PTP_TCR_SNAPTYPSEL_1; + snap_type_sel = PTP_TCR_SNAPTYPSEL_1; ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; @@ -669,12 +673,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) /* PTP v2/802.AS1 any layer, any kind of event packet */ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; ptp_v2 = PTP_TCR_TSVER2ENA; - /* take time stamp for all event messages */ - if (xmac) - snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; - else - snap_type_sel = PTP_TCR_SNAPTYPSEL_1; - + snap_type_sel = PTP_TCR_SNAPTYPSEL_1; ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; ptp_over_ethernet = PTP_TCR_TSIPENA; @@ -765,8 +764,31 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) (u32)now.tv_sec, now.tv_nsec); } + memcpy(&priv->tstamp_config, &config, sizeof(config)); + return copy_to_user(ifr->ifr_data, &config, - sizeof(struct hwtstamp_config)) ? -EFAULT : 0; + sizeof(config)) ? -EFAULT : 0; +} + +/** + * stmmac_hwtstamp_get - read hardware timestamping. + * @dev: device pointer. + * @ifr: An IOCTL specific structure, that can contain a pointer to + * a proprietary structure used to pass information to the driver. + * Description: + * This function obtain the current hardware timestamping settings + as requested. + */ +static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) +{ + struct stmmac_priv *priv = netdev_priv(dev); + struct hwtstamp_config *config = &priv->tstamp_config; + + if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) + return -EOPNOTSUPP; + + return copy_to_user(ifr->ifr_data, config, + sizeof(*config)) ? -EFAULT : 0; } /** @@ -1939,6 +1961,10 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); } + /* We still have pending packets, let's call for a new scheduling */ + if (tx_q->dirty_tx != tx_q->cur_tx) + mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10)); + __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); return count; @@ -2029,23 +2055,15 @@ static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan) int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, &priv->xstats, chan); struct stmmac_channel *ch = &priv->channel[chan]; - bool needs_work = false; - if ((status & handle_rx) && ch->has_rx) { - needs_work = true; - } else { - status &= ~handle_rx; - } - - if ((status & handle_tx) && ch->has_tx) { - needs_work = true; - } else { - status &= ~handle_tx; + if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) { + stmmac_disable_dma_irq(priv, priv->ioaddr, chan); + napi_schedule_irqoff(&ch->rx_napi); } - if (needs_work && napi_schedule_prep(&ch->napi)) { + if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) { stmmac_disable_dma_irq(priv, priv->ioaddr, chan); - __napi_schedule(&ch->napi); + napi_schedule_irqoff(&ch->tx_napi); } return status; @@ -2241,8 +2259,14 @@ static void stmmac_tx_timer(struct timer_list *t) ch = &priv->channel[tx_q->queue_index]; - if (likely(napi_schedule_prep(&ch->napi))) - __napi_schedule(&ch->napi); + /* + * If NAPI is already running we can miss some events. Let's rearm + * the timer and try again. + */ + if (likely(napi_schedule_prep(&ch->tx_napi))) + __napi_schedule(&ch->tx_napi); + else + mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10)); } /** @@ -3023,10 +3047,22 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) tx_q = &priv->tx_queue[queue]; + if (priv->tx_path_in_lpi_mode) + stmmac_disable_eee_mode(priv); + /* Manage oversized TCP frames for GMAC4 device */ if (skb_is_gso(skb) && priv->tso) { - if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) + if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { + /* + * There is no way to determine the number of TSO + * capable Queues. Let's use always the Queue 0 + * because if TSO is supported then at least this + * one will be capable. + */ + skb_set_queue_mapping(skb, 0); + return stmmac_tso_xmit(skb, dev); + } } if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { @@ -3041,9 +3077,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_BUSY; } - if (priv->tx_path_in_lpi_mode) - stmmac_disable_eee_mode(priv); - entry = tx_q->cur_tx; first_entry = entry; WARN_ON(tx_q->tx_skbuff[first_entry]); @@ -3489,7 +3522,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) else skb->ip_summed = CHECKSUM_UNNECESSARY; - napi_gro_receive(&ch->napi, skb); + napi_gro_receive(&ch->rx_napi, skb); priv->dev->stats.rx_packets++; priv->dev->stats.rx_bytes += frame_len; @@ -3504,40 +3537,45 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) return count; } -/** - * stmmac_poll - stmmac poll method (NAPI) - * @napi : pointer to the napi structure. - * @budget : maximum number of packets that the current CPU can receive from - * all interfaces. - * Description : - * To look at the incoming frames and clear the tx resources. - */ -static int stmmac_napi_poll(struct napi_struct *napi, int budget) +static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget) { struct stmmac_channel *ch = - container_of(napi, struct stmmac_channel, napi); + container_of(napi, struct stmmac_channel, rx_napi); struct stmmac_priv *priv = ch->priv_data; - int work_done, rx_done = 0, tx_done = 0; u32 chan = ch->index; + int work_done; priv->xstats.napi_poll++; - if (ch->has_tx) - tx_done = stmmac_tx_clean(priv, budget, chan); - if (ch->has_rx) - rx_done = stmmac_rx(priv, budget, chan); + work_done = stmmac_rx(priv, budget, chan); + if (work_done < budget && napi_complete_done(napi, work_done)) + stmmac_enable_dma_irq(priv, priv->ioaddr, chan); + return work_done; +} - work_done = max(rx_done, tx_done); - work_done = min(work_done, budget); +static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) +{ + struct stmmac_channel *ch = + container_of(napi, struct stmmac_channel, tx_napi); + struct stmmac_priv *priv = ch->priv_data; + struct stmmac_tx_queue *tx_q; + u32 chan = ch->index; + int work_done; - if (work_done < budget && napi_complete_done(napi, work_done)) { - int stat; + priv->xstats.napi_poll++; + + work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan); + work_done = min(work_done, budget); + if (work_done < budget && napi_complete_done(napi, work_done)) stmmac_enable_dma_irq(priv, priv->ioaddr, chan); - stat = stmmac_dma_interrupt_status(priv, priv->ioaddr, - &priv->xstats, chan); - if (stat && napi_reschedule(napi)) - stmmac_disable_dma_irq(priv, priv->ioaddr, chan); + + /* Force transmission restart */ + tx_q = &priv->tx_queue[chan]; + if (tx_q->cur_tx != tx_q->dirty_tx) { + stmmac_enable_dma_transmission(priv, priv->ioaddr); + stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, + chan); } return work_done; @@ -3767,7 +3805,10 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) ret = phy_mii_ioctl(dev->phydev, rq, cmd); break; case SIOCSHWTSTAMP: - ret = stmmac_hwtstamp_ioctl(dev, rq); + ret = stmmac_hwtstamp_set(dev, rq); + break; + case SIOCGHWTSTAMP: + ret = stmmac_hwtstamp_get(dev, rq); break; default: break; @@ -4314,13 +4355,14 @@ int stmmac_dvr_probe(struct device *device, ch->priv_data = priv; ch->index = queue; - if (queue < priv->plat->rx_queues_to_use) - ch->has_rx = true; - if (queue < priv->plat->tx_queues_to_use) - ch->has_tx = true; - - netif_napi_add(ndev, &ch->napi, stmmac_napi_poll, - NAPI_POLL_WEIGHT); + if (queue < priv->plat->rx_queues_to_use) { + netif_napi_add(ndev, &ch->rx_napi, stmmac_napi_poll_rx, + NAPI_POLL_WEIGHT); + } + if (queue < priv->plat->tx_queues_to_use) { + netif_napi_add(ndev, &ch->tx_napi, stmmac_napi_poll_tx, + NAPI_POLL_WEIGHT); + } } mutex_init(&priv->lock); @@ -4376,7 +4418,10 @@ error_mdio_register: for (queue = 0; queue < maxq; queue++) { struct stmmac_channel *ch = &priv->channel[queue]; - netif_napi_del(&ch->napi); + if (queue < priv->plat->rx_queues_to_use) + netif_napi_del(&ch->rx_napi); + if (queue < priv->plat->tx_queues_to_use) + netif_napi_del(&ch->tx_napi); } error_hw_init: destroy_workqueue(priv->wq); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 2b800ce1d5bf..3031f2bf15d6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -408,6 +408,9 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) /* Default to phy auto-detection */ plat->phy_addr = -1; + /* Get clk_csr from device tree */ + of_property_read_u32(np, "clk_csr", &plat->clk_csr); + /* "snps,phy-addr" is not a standard property. Mark it as deprecated * and warn of its use. Remove this when phy node support is added. */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index 2293e21f789f..cc60b3fb0892 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -105,7 +105,7 @@ static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts) struct stmmac_priv *priv = container_of(ptp, struct stmmac_priv, ptp_clock_ops); unsigned long flags; - u64 ns; + u64 ns = 0; spin_lock_irqsave(&priv->ptp_lock, flags); stmmac_get_systime(priv, priv->ptpaddr, &ns); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h index ecccf895fd7e..e852821289cf 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h @@ -59,9 +59,14 @@ #define PTP_TCR_TSEVNTENA BIT(14) /* Enable Snapshot for Messages Relevant to Master */ #define PTP_TCR_TSMSTRENA BIT(15) -/* Select PTP packets for Taking Snapshots */ +/* Select PTP packets for Taking Snapshots + * On gmac4 specifically: + * Enable SYNC, Pdelay_Req, Pdelay_Resp when TSEVNTENA is enabled. + * or + * Enable SYNC, Follow_Up, Delay_Req, Delay_Resp, Pdelay_Req, Pdelay_Resp, + * Pdelay_Resp_Follow_Up if TSEVNTENA is disabled + */ #define PTP_TCR_SNAPTYPSEL_1 BIT(16) -#define PTP_GMAC4_TCR_SNAPTYPSEL_1 GENMASK(17, 16) /* Enable MAC address for PTP Frame Filtering */ #define PTP_TCR_TSENMACADDR BIT(18) diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 7ec4eb74fe21..6fc05c106afc 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -1898,7 +1898,7 @@ static inline void cas_tx_ringN(struct cas *cp, int ring, int limit) cp->net_stats[ring].tx_packets++; cp->net_stats[ring].tx_bytes += skb->len; spin_unlock(&cp->stat_lock[ring]); - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); } cp->tx_old[ring] = entry; diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index d84501441edd..6f99437a6962 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -7464,6 +7464,7 @@ static int niu_add_ethtool_tcam_entry(struct niu *np, class = CLASS_CODE_USER_PROG4; break; default: + class = CLASS_CODE_UNRECOG; break; } ret = tcam_user_ip_class_set(np, class, 0, diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c index 720b7ac77f3b..e9b757b03b56 100644 --- a/drivers/net/ethernet/sun/sunbmac.c +++ b/drivers/net/ethernet/sun/sunbmac.c @@ -781,7 +781,7 @@ static void bigmac_tx(struct bigmac *bp) DTX(("skb(%p) ", skb)); bp->tx_skbs[elem] = NULL; - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); elem = NEXT_TX(elem); } diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index b9221fc1674d..3e7631160384 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -2760,7 +2760,7 @@ static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr) void __iomem *p = pci_map_rom(pdev, &size); if (p) { - int found; + int found; found = readb(p) == 0x55 && readb(p + 1) == 0xaa && diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index ff641cf30a4e..d007dfeba5c3 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -1962,7 +1962,7 @@ static void happy_meal_tx(struct happy_meal *hp) this = &txbase[elem]; } - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); dev->stats.tx_packets++; } hp->tx_old = elem; diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index dc966ddb6d81..b24c11187017 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -1739,7 +1739,7 @@ static void bdx_tx_cleanup(struct bdx_priv *priv) tx_level -= db->rptr->len; /* '-' koz len is negative */ /* now should come skb pointer - free it */ - dev_kfree_skb_irq(db->rptr->addr.skb); + dev_consume_skb_irq(db->rptr->addr.skb); bdx_tx_db_inc_rptr(db); } diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index bb126be1eb72..8b21b40a9fe5 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -49,10 +49,11 @@ config TI_DAVINCI_CPDMA will be called davinci_cpdma. This is recommended. config TI_CPSW_PHY_SEL - bool + bool "TI CPSW Phy mode Selection (DEPRECATED)" + default n ---help--- This driver supports configuring of the phy mode connected to - the CPSW. + the CPSW. DEPRECATED: use PHY_TI_GMII_SEL. config TI_CPSW_ALE tristate "TI CPSW ALE Support" @@ -64,7 +65,6 @@ config TI_CPSW depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST select TI_DAVINCI_CPDMA select TI_DAVINCI_MDIO - select TI_CPSW_PHY_SEL select TI_CPSW_ALE select MFD_SYSCON select REGMAP diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c index 810dfc7de1f9..e2d47b24a869 100644 --- a/drivers/net/ethernet/ti/cpmac.c +++ b/drivers/net/ethernet/ti/cpmac.c @@ -608,7 +608,7 @@ static void cpmac_end_xmit(struct net_device *dev, int queue) netdev_dbg(dev, "sent 0x%p, len=%d\n", desc->skb, desc->skb->len); - dev_kfree_skb_irq(desc->skb); + dev_consume_skb_irq(desc->skb); desc->skb = NULL; if (__netif_subqueue_stopped(dev, queue)) netif_wake_subqueue(dev, queue); diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c index 396e1cd10667..fec275e2208d 100644 --- a/drivers/net/ethernet/ti/cpsw-phy-sel.c +++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c @@ -78,7 +78,7 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv, case PHY_INTERFACE_MODE_MII: mode = AM33XX_GMII_SEL_MODE_MII; break; - }; + } mask = GMII_SEL_MODE_MASK << (slave * 2) | BIT(slave + 6); mask |= BIT(slave + 4); @@ -133,7 +133,7 @@ static void cpsw_gmii_sel_dra7xx(struct cpsw_phy_sel_priv *priv, case PHY_INTERFACE_MODE_MII: mode = AM33XX_GMII_SEL_MODE_MII; break; - }; + } switch (slave) { case 0: diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h index cf111db3dc27..907e05fc22e4 100644 --- a/drivers/net/ethernet/ti/cpsw.h +++ b/drivers/net/ethernet/ti/cpsw.h @@ -21,7 +21,13 @@ ((mac)[2] << 16) | ((mac)[3] << 24)) #define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8)) +#if IS_ENABLED(CONFIG_TI_CPSW_PHY_SEL) void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave); +#else +static inline +void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave) +{} +#endif int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr); #endif /* __CPSW_H__ */ diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 840820402cd0..57450b174fc4 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -2029,7 +2029,6 @@ static const struct dev_pm_ops davinci_emac_pm_ops = { .resume = davinci_emac_resume, }; -#if IS_ENABLED(CONFIG_OF) static const struct emac_platform_data am3517_emac_data = { .version = EMAC_VERSION_2, .hw_ram_addr = 0x01e20000, @@ -2046,14 +2045,13 @@ static const struct of_device_id davinci_emac_of_match[] = { {}, }; MODULE_DEVICE_TABLE(of, davinci_emac_of_match); -#endif /* davinci_emac_driver: EMAC platform driver structure */ static struct platform_driver davinci_emac_driver = { .driver = { .name = "davinci_emac", .pm = &davinci_emac_pm_ops, - .of_match_table = of_match_ptr(davinci_emac_of_match), + .of_match_table = davinci_emac_of_match, }, .probe = davinci_emac_probe, .remove = davinci_emac_remove, diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 1f612268c998..d847f672a705 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -259,7 +259,7 @@ static int netcp_module_probe(struct netcp_device *netcp_device, const char *name; char node_name[32]; - if (of_property_read_string(node, "label", &name) < 0) { + if (of_property_read_string(child, "label", &name) < 0) { snprintf(node_name, sizeof(node_name), "%pOFn", child); name = node_name; } diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index 82412691ee66..27f6cf140845 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -1740,7 +1740,7 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, dma_unmap_single(vptr->dev, tdinfo->skb_dma[i], le16_to_cpu(pktlen), DMA_TO_DEVICE); } - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); tdinfo->skb = NULL; } diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 15bb058db392..44efffbe7970 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -630,7 +630,7 @@ static void temac_start_xmit_done(struct net_device *ndev) dma_unmap_single(ndev->dev.parent, cur_p->phys, cur_p->len, DMA_TO_DEVICE); if (cur_p->app4) - dev_kfree_skb_irq((struct sk_buff *)cur_p->app4); + dev_consume_skb_irq((struct sk_buff *)cur_p->app4); cur_p->app0 = 0; cur_p->app1 = 0; cur_p->app2 = 0; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 0789d8af7d72..ec7e7ec24ff9 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -595,7 +595,7 @@ static void axienet_start_xmit_done(struct net_device *ndev) (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), DMA_TO_DEVICE); if (cur_p->app4) - dev_kfree_skb_irq((struct sk_buff *)cur_p->app4); + dev_consume_skb_irq((struct sk_buff *)cur_p->app4); /*cur_p->phys = 0;*/ cur_p->app0 = 0; cur_p->app1 = 0; diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 639e3e99af46..b03a417d0073 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -581,7 +581,7 @@ static void xemaclite_tx_handler(struct net_device *dev) return; dev->stats.tx_bytes += lp->deferred_skb->len; - dev_kfree_skb_irq(lp->deferred_skb); + dev_consume_skb_irq(lp->deferred_skb); lp->deferred_skb = NULL; netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index aee55c03def0..ed6623a9801e 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -139,7 +139,7 @@ #ifdef __ARMEB__ typedef struct sk_buff buffer_t; #define free_buffer dev_kfree_skb -#define free_buffer_irq dev_kfree_skb_irq +#define free_buffer_irq dev_consume_skb_irq #else typedef void buffer_t; #define free_buffer kfree diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c index 38ac8ef41f5f..56b7791911bf 100644 --- a/drivers/net/fddi/defxx.c +++ b/drivers/net/fddi/defxx.c @@ -3512,7 +3512,7 @@ static int dfx_xmt_done(DFX_board_t *bp) bp->descr_block_virt->xmt_data[comp].long_1, p_xmt_drv_descr->p_skb->len, DMA_TO_DEVICE); - dev_kfree_skb_irq(p_xmt_drv_descr->p_skb); + dev_consume_skb_irq(p_xmt_drv_descr->p_skb); /* * Move to start of next packet by updating completion index diff --git a/drivers/net/fddi/skfp/pcmplc.c b/drivers/net/fddi/skfp/pcmplc.c index 6ef44c480bd5..f29f5a6a45ab 100644 --- a/drivers/net/fddi/skfp/pcmplc.c +++ b/drivers/net/fddi/skfp/pcmplc.c @@ -851,6 +851,7 @@ static void pcm_fsm(struct s_smc *smc, struct s_phy *phy, int cmd) case ACTIONS(PC5_SIGNAL) : ACTIONS_DONE() ; + /* fall through */ case PC5_SIGNAL : if ((cmd != PC_SIGNAL) && (cmd != PC_TIMEOUT_LCT)) break ; diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 58bbba8582b0..5583d993480d 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -692,15 +692,20 @@ out: static int geneve_open(struct net_device *dev) { struct geneve_dev *geneve = netdev_priv(dev); - bool ipv6 = !!(geneve->info.mode & IP_TUNNEL_INFO_IPV6); bool metadata = geneve->collect_md; + bool ipv4, ipv6; int ret = 0; + ipv6 = geneve->info.mode & IP_TUNNEL_INFO_IPV6 || metadata; + ipv4 = !ipv6 || metadata; #if IS_ENABLED(CONFIG_IPV6) - if (ipv6 || metadata) + if (ipv6) { ret = geneve_sock_add(geneve, true); + if (ret < 0 && ret != -EAFNOSUPPORT) + ipv4 = false; + } #endif - if (!ret && (!ipv6 || metadata)) + if (ipv4) ret = geneve_sock_add(geneve, false); if (ret < 0) geneve_sock_release(geneve); @@ -1512,9 +1517,13 @@ static void geneve_link_config(struct net_device *dev, } #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: { - struct rt6_info *rt = rt6_lookup(geneve->net, - &info->key.u.ipv6.dst, NULL, 0, - NULL, 0); + struct rt6_info *rt; + + if (!__in6_dev_get(dev)) + break; + + rt = rt6_lookup(geneve->net, &info->key.u.ipv6.dst, NULL, 0, + NULL, 0); if (rt && rt->dst.dev) ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV6_HLEN; diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c index 190f66c88479..ed0841630990 100644 --- a/drivers/net/hamradio/baycom_ser_fdx.c +++ b/drivers/net/hamradio/baycom_ser_fdx.c @@ -203,32 +203,6 @@ static inline void ser12_set_divisor(struct net_device *dev, */ } -/* --------------------------------------------------------------------- */ - -#if 0 -static inline unsigned int hweight16(unsigned int w) - __attribute__ ((unused)); -static inline unsigned int hweight8(unsigned int w) - __attribute__ ((unused)); - -static inline unsigned int hweight16(unsigned int w) -{ - unsigned short res = (w & 0x5555) + ((w >> 1) & 0x5555); - res = (res & 0x3333) + ((res >> 2) & 0x3333); - res = (res & 0x0F0F) + ((res >> 4) & 0x0F0F); - return (res & 0x00FF) + ((res >> 8) & 0x00FF); -} - -static inline unsigned int hweight8(unsigned int w) -{ - unsigned short res = (w & 0x55) + ((w >> 1) & 0x55); - res = (res & 0x33) + ((res >> 2) & 0x33); - return (res & 0x0F) + ((res >> 4) & 0x0F); -} -#endif - -/* --------------------------------------------------------------------- */ - static __inline__ void ser12_rx(struct net_device *dev, struct baycom_state *bc, struct timespec64 *ts, unsigned char curs) { int timediff; diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 256adbd044f5..cf4897043e83 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -744,6 +744,14 @@ void netvsc_linkstatus_callback(struct net_device *net, schedule_delayed_work(&ndev_ctx->dwork, 0); } +static void netvsc_comp_ipcsum(struct sk_buff *skb) +{ + struct iphdr *iph = (struct iphdr *)skb->data; + + iph->check = 0; + iph->check = ip_fast_csum(iph, iph->ihl); +} + static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, struct netvsc_channel *nvchan) { @@ -770,9 +778,17 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, /* skb is already created with CHECKSUM_NONE */ skb_checksum_none_assert(skb); - /* - * In Linux, the IP checksum is always checked. - * Do L4 checksum offload if enabled and present. + /* Incoming packets may have IP header checksum verified by the host. + * They may not have IP header checksum computed after coalescing. + * We compute it here if the flags are set, because on Linux, the IP + * checksum is always checked. + */ + if (csum_info && csum_info->receive.ip_checksum_value_invalid && + csum_info->receive.ip_checksum_succeeded && + skb->protocol == htons(ETH_P_IP)) + netvsc_comp_ipcsum(skb); + + /* Do L4 checksum offload if enabled and present. */ if (csum_info && (net->features & NETIF_F_RXCSUM)) { if (csum_info->receive.tcp_checksum_succeeded || diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c index 44de81e5f140..c589f5ae75bb 100644 --- a/drivers/net/ieee802154/mcr20a.c +++ b/drivers/net/ieee802154/mcr20a.c @@ -905,9 +905,9 @@ mcr20a_irq_clean_complete(void *context) } break; case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_SEQIRQ): - /* rx is starting */ - dev_dbg(printdev(lp), "RX is starting\n"); - mcr20a_handle_rx(lp); + /* rx is starting */ + dev_dbg(printdev(lp), "RX is starting\n"); + mcr20a_handle_rx(lp); break; case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ): if (lp->is_tx) { diff --git a/drivers/net/ipvlan/Makefile b/drivers/net/ipvlan/Makefile index 8a2c64dc9641..3ee95367a994 100644 --- a/drivers/net/ipvlan/Makefile +++ b/drivers/net/ipvlan/Makefile @@ -5,4 +5,5 @@ obj-$(CONFIG_IPVLAN) += ipvlan.o obj-$(CONFIG_IPVTAP) += ipvtap.o -ipvlan-objs := ipvlan_core.o ipvlan_main.o +ipvlan-objs-$(CONFIG_IPVLAN_L3S) += ipvlan_l3s.o +ipvlan-objs := ipvlan_core.o ipvlan_main.o $(ipvlan-objs-y) diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h index adb826f55e60..b906d2f6bd04 100644 --- a/drivers/net/ipvlan/ipvlan.h +++ b/drivers/net/ipvlan/ipvlan.h @@ -165,10 +165,9 @@ struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan, const void *iaddr, bool is_v6); bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6); void ipvlan_ht_addr_del(struct ipvl_addr *addr); -struct sk_buff *ipvlan_l3_rcv(struct net_device *dev, struct sk_buff *skb, - u16 proto); -unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb, - const struct nf_hook_state *state); +struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h, + int addr_type, bool use_dest); +void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type); void ipvlan_count_rx(const struct ipvl_dev *ipvlan, unsigned int len, bool success, bool mcast); int ipvlan_link_new(struct net *src_net, struct net_device *dev, @@ -177,6 +176,36 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev, void ipvlan_link_delete(struct net_device *dev, struct list_head *head); void ipvlan_link_setup(struct net_device *dev); int ipvlan_link_register(struct rtnl_link_ops *ops); +#ifdef CONFIG_IPVLAN_L3S +int ipvlan_l3s_register(struct ipvl_port *port); +void ipvlan_l3s_unregister(struct ipvl_port *port); +void ipvlan_migrate_l3s_hook(struct net *oldnet, struct net *newnet); +int ipvlan_l3s_init(void); +void ipvlan_l3s_cleanup(void); +#else +static inline int ipvlan_l3s_register(struct ipvl_port *port) +{ + return -ENOTSUPP; +} + +static inline void ipvlan_l3s_unregister(struct ipvl_port *port) +{ +} + +static inline void ipvlan_migrate_l3s_hook(struct net *oldnet, + struct net *newnet) +{ +} + +static inline int ipvlan_l3s_init(void) +{ + return 0; +} + +static inline void ipvlan_l3s_cleanup(void) +{ +} +#endif /* CONFIG_IPVLAN_L3S */ static inline bool netif_is_ipvlan_port(const struct net_device *dev) { diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index 1a8132eb2a3e..e0f5bc82b10c 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -138,7 +138,7 @@ bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6) return ret; } -static void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type) +void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type) { void *lyr3h = NULL; @@ -355,9 +355,8 @@ out: return ret; } -static struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, - void *lyr3h, int addr_type, - bool use_dest) +struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h, + int addr_type, bool use_dest) { struct ipvl_addr *addr = NULL; @@ -647,7 +646,9 @@ int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) case IPVLAN_MODE_L2: return ipvlan_xmit_mode_l2(skb, dev); case IPVLAN_MODE_L3: +#ifdef CONFIG_IPVLAN_L3S case IPVLAN_MODE_L3S: +#endif return ipvlan_xmit_mode_l3(skb, dev); } @@ -743,8 +744,10 @@ rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb) return ipvlan_handle_mode_l2(pskb, port); case IPVLAN_MODE_L3: return ipvlan_handle_mode_l3(pskb, port); +#ifdef CONFIG_IPVLAN_L3S case IPVLAN_MODE_L3S: return RX_HANDLER_PASS; +#endif } /* Should not reach here */ @@ -753,97 +756,3 @@ rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb) kfree_skb(skb); return RX_HANDLER_CONSUMED; } - -static struct ipvl_addr *ipvlan_skb_to_addr(struct sk_buff *skb, - struct net_device *dev) -{ - struct ipvl_addr *addr = NULL; - struct ipvl_port *port; - void *lyr3h; - int addr_type; - - if (!dev || !netif_is_ipvlan_port(dev)) - goto out; - - port = ipvlan_port_get_rcu(dev); - if (!port || port->mode != IPVLAN_MODE_L3S) - goto out; - - lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type); - if (!lyr3h) - goto out; - - addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true); -out: - return addr; -} - -struct sk_buff *ipvlan_l3_rcv(struct net_device *dev, struct sk_buff *skb, - u16 proto) -{ - struct ipvl_addr *addr; - struct net_device *sdev; - - addr = ipvlan_skb_to_addr(skb, dev); - if (!addr) - goto out; - - sdev = addr->master->dev; - switch (proto) { - case AF_INET: - { - int err; - struct iphdr *ip4h = ip_hdr(skb); - - err = ip_route_input_noref(skb, ip4h->daddr, ip4h->saddr, - ip4h->tos, sdev); - if (unlikely(err)) - goto out; - break; - } -#if IS_ENABLED(CONFIG_IPV6) - case AF_INET6: - { - struct dst_entry *dst; - struct ipv6hdr *ip6h = ipv6_hdr(skb); - int flags = RT6_LOOKUP_F_HAS_SADDR; - struct flowi6 fl6 = { - .flowi6_iif = sdev->ifindex, - .daddr = ip6h->daddr, - .saddr = ip6h->saddr, - .flowlabel = ip6_flowinfo(ip6h), - .flowi6_mark = skb->mark, - .flowi6_proto = ip6h->nexthdr, - }; - - skb_dst_drop(skb); - dst = ip6_route_input_lookup(dev_net(sdev), sdev, &fl6, - skb, flags); - skb_dst_set(skb, dst); - break; - } -#endif - default: - break; - } - -out: - return skb; -} - -unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb, - const struct nf_hook_state *state) -{ - struct ipvl_addr *addr; - unsigned int len; - - addr = ipvlan_skb_to_addr(skb, skb->dev); - if (!addr) - goto out; - - skb->dev = addr->master->dev; - len = skb->len + ETH_HLEN; - ipvlan_count_rx(addr->master, len, true, false); -out: - return NF_ACCEPT; -} diff --git a/drivers/net/ipvlan/ipvlan_l3s.c b/drivers/net/ipvlan/ipvlan_l3s.c new file mode 100644 index 000000000000..d17480a911a3 --- /dev/null +++ b/drivers/net/ipvlan/ipvlan_l3s.c @@ -0,0 +1,227 @@ +/* Copyright (c) 2014 Mahesh Bandewar + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + */ + +#include "ipvlan.h" + +static unsigned int ipvlan_netid __read_mostly; + +struct ipvlan_netns { + unsigned int ipvl_nf_hook_refcnt; +}; + +static struct ipvl_addr *ipvlan_skb_to_addr(struct sk_buff *skb, + struct net_device *dev) +{ + struct ipvl_addr *addr = NULL; + struct ipvl_port *port; + int addr_type; + void *lyr3h; + + if (!dev || !netif_is_ipvlan_port(dev)) + goto out; + + port = ipvlan_port_get_rcu(dev); + if (!port || port->mode != IPVLAN_MODE_L3S) + goto out; + + lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type); + if (!lyr3h) + goto out; + + addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true); +out: + return addr; +} + +static struct sk_buff *ipvlan_l3_rcv(struct net_device *dev, + struct sk_buff *skb, u16 proto) +{ + struct ipvl_addr *addr; + struct net_device *sdev; + + addr = ipvlan_skb_to_addr(skb, dev); + if (!addr) + goto out; + + sdev = addr->master->dev; + switch (proto) { + case AF_INET: + { + struct iphdr *ip4h = ip_hdr(skb); + int err; + + err = ip_route_input_noref(skb, ip4h->daddr, ip4h->saddr, + ip4h->tos, sdev); + if (unlikely(err)) + goto out; + break; + } +#if IS_ENABLED(CONFIG_IPV6) + case AF_INET6: + { + struct dst_entry *dst; + struct ipv6hdr *ip6h = ipv6_hdr(skb); + int flags = RT6_LOOKUP_F_HAS_SADDR; + struct flowi6 fl6 = { + .flowi6_iif = sdev->ifindex, + .daddr = ip6h->daddr, + .saddr = ip6h->saddr, + .flowlabel = ip6_flowinfo(ip6h), + .flowi6_mark = skb->mark, + .flowi6_proto = ip6h->nexthdr, + }; + + skb_dst_drop(skb); + dst = ip6_route_input_lookup(dev_net(sdev), sdev, &fl6, + skb, flags); + skb_dst_set(skb, dst); + break; + } +#endif + default: + break; + } +out: + return skb; +} + +static const struct l3mdev_ops ipvl_l3mdev_ops = { + .l3mdev_l3_rcv = ipvlan_l3_rcv, +}; + +static unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb, + const struct nf_hook_state *state) +{ + struct ipvl_addr *addr; + unsigned int len; + + addr = ipvlan_skb_to_addr(skb, skb->dev); + if (!addr) + goto out; + + skb->dev = addr->master->dev; + len = skb->len + ETH_HLEN; + ipvlan_count_rx(addr->master, len, true, false); +out: + return NF_ACCEPT; +} + +static const struct nf_hook_ops ipvl_nfops[] = { + { + .hook = ipvlan_nf_input, + .pf = NFPROTO_IPV4, + .hooknum = NF_INET_LOCAL_IN, + .priority = INT_MAX, + }, +#if IS_ENABLED(CONFIG_IPV6) + { + .hook = ipvlan_nf_input, + .pf = NFPROTO_IPV6, + .hooknum = NF_INET_LOCAL_IN, + .priority = INT_MAX, + }, +#endif +}; + +static int ipvlan_register_nf_hook(struct net *net) +{ + struct ipvlan_netns *vnet = net_generic(net, ipvlan_netid); + int err = 0; + + if (!vnet->ipvl_nf_hook_refcnt) { + err = nf_register_net_hooks(net, ipvl_nfops, + ARRAY_SIZE(ipvl_nfops)); + if (!err) + vnet->ipvl_nf_hook_refcnt = 1; + } else { + vnet->ipvl_nf_hook_refcnt++; + } + + return err; +} + +static void ipvlan_unregister_nf_hook(struct net *net) +{ + struct ipvlan_netns *vnet = net_generic(net, ipvlan_netid); + + if (WARN_ON(!vnet->ipvl_nf_hook_refcnt)) + return; + + vnet->ipvl_nf_hook_refcnt--; + if (!vnet->ipvl_nf_hook_refcnt) + nf_unregister_net_hooks(net, ipvl_nfops, + ARRAY_SIZE(ipvl_nfops)); +} + +void ipvlan_migrate_l3s_hook(struct net *oldnet, struct net *newnet) +{ + struct ipvlan_netns *old_vnet; + + ASSERT_RTNL(); + + old_vnet = net_generic(oldnet, ipvlan_netid); + if (!old_vnet->ipvl_nf_hook_refcnt) + return; + + ipvlan_register_nf_hook(newnet); + ipvlan_unregister_nf_hook(oldnet); +} + +static void ipvlan_ns_exit(struct net *net) +{ + struct ipvlan_netns *vnet = net_generic(net, ipvlan_netid); + + if (WARN_ON_ONCE(vnet->ipvl_nf_hook_refcnt)) { + vnet->ipvl_nf_hook_refcnt = 0; + nf_unregister_net_hooks(net, ipvl_nfops, + ARRAY_SIZE(ipvl_nfops)); + } +} + +static struct pernet_operations ipvlan_net_ops = { + .id = &ipvlan_netid, + .size = sizeof(struct ipvlan_netns), + .exit = ipvlan_ns_exit, +}; + +int ipvlan_l3s_init(void) +{ + return register_pernet_subsys(&ipvlan_net_ops); +} + +void ipvlan_l3s_cleanup(void) +{ + unregister_pernet_subsys(&ipvlan_net_ops); +} + +int ipvlan_l3s_register(struct ipvl_port *port) +{ + struct net_device *dev = port->dev; + int ret; + + ASSERT_RTNL(); + + ret = ipvlan_register_nf_hook(read_pnet(&port->pnet)); + if (!ret) { + dev->l3mdev_ops = &ipvl_l3mdev_ops; + dev->priv_flags |= IFF_L3MDEV_RX_HANDLER; + } + + return ret; +} + +void ipvlan_l3s_unregister(struct ipvl_port *port) +{ + struct net_device *dev = port->dev; + + ASSERT_RTNL(); + + dev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER; + ipvlan_unregister_nf_hook(read_pnet(&port->pnet)); + dev->l3mdev_ops = NULL; +} diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 19bdde60680c..bbeb1623e2d5 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -9,73 +9,10 @@ #include "ipvlan.h" -static unsigned int ipvlan_netid __read_mostly; - -struct ipvlan_netns { - unsigned int ipvl_nf_hook_refcnt; -}; - -static const struct nf_hook_ops ipvl_nfops[] = { - { - .hook = ipvlan_nf_input, - .pf = NFPROTO_IPV4, - .hooknum = NF_INET_LOCAL_IN, - .priority = INT_MAX, - }, -#if IS_ENABLED(CONFIG_IPV6) - { - .hook = ipvlan_nf_input, - .pf = NFPROTO_IPV6, - .hooknum = NF_INET_LOCAL_IN, - .priority = INT_MAX, - }, -#endif -}; - -static const struct l3mdev_ops ipvl_l3mdev_ops = { - .l3mdev_l3_rcv = ipvlan_l3_rcv, -}; - -static void ipvlan_adjust_mtu(struct ipvl_dev *ipvlan, struct net_device *dev) -{ - ipvlan->dev->mtu = dev->mtu; -} - -static int ipvlan_register_nf_hook(struct net *net) -{ - struct ipvlan_netns *vnet = net_generic(net, ipvlan_netid); - int err = 0; - - if (!vnet->ipvl_nf_hook_refcnt) { - err = nf_register_net_hooks(net, ipvl_nfops, - ARRAY_SIZE(ipvl_nfops)); - if (!err) - vnet->ipvl_nf_hook_refcnt = 1; - } else { - vnet->ipvl_nf_hook_refcnt++; - } - - return err; -} - -static void ipvlan_unregister_nf_hook(struct net *net) -{ - struct ipvlan_netns *vnet = net_generic(net, ipvlan_netid); - - if (WARN_ON(!vnet->ipvl_nf_hook_refcnt)) - return; - - vnet->ipvl_nf_hook_refcnt--; - if (!vnet->ipvl_nf_hook_refcnt) - nf_unregister_net_hooks(net, ipvl_nfops, - ARRAY_SIZE(ipvl_nfops)); -} - static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval, struct netlink_ext_ack *extack) { struct ipvl_dev *ipvlan; - struct net_device *mdev = port->dev; unsigned int flags; int err; @@ -97,17 +34,12 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval, } if (nval == IPVLAN_MODE_L3S) { /* New mode is L3S */ - err = ipvlan_register_nf_hook(read_pnet(&port->pnet)); - if (!err) { - mdev->l3mdev_ops = &ipvl_l3mdev_ops; - mdev->priv_flags |= IFF_L3MDEV_MASTER; - } else + err = ipvlan_l3s_register(port); + if (err) goto fail; } else if (port->mode == IPVLAN_MODE_L3S) { /* Old mode was L3S */ - mdev->priv_flags &= ~IFF_L3MDEV_MASTER; - ipvlan_unregister_nf_hook(read_pnet(&port->pnet)); - mdev->l3mdev_ops = NULL; + ipvlan_l3s_unregister(port); } port->mode = nval; } @@ -166,11 +98,8 @@ static void ipvlan_port_destroy(struct net_device *dev) struct ipvl_port *port = ipvlan_port_get_rtnl(dev); struct sk_buff *skb; - if (port->mode == IPVLAN_MODE_L3S) { - dev->priv_flags &= ~IFF_L3MDEV_MASTER; - ipvlan_unregister_nf_hook(dev_net(dev)); - dev->l3mdev_ops = NULL; - } + if (port->mode == IPVLAN_MODE_L3S) + ipvlan_l3s_unregister(port); netdev_rx_handler_unregister(dev); cancel_work_sync(&port->wq); while ((skb = __skb_dequeue(&port->backlog)) != NULL) { @@ -446,6 +375,11 @@ static const struct header_ops ipvlan_header_ops = { .cache_update = eth_header_cache_update, }; +static void ipvlan_adjust_mtu(struct ipvl_dev *ipvlan, struct net_device *dev) +{ + ipvlan->dev->mtu = dev->mtu; +} + static bool netif_is_ipvlan(const struct net_device *dev) { /* both ipvlan and ipvtap devices use the same netdev_ops */ @@ -499,6 +433,8 @@ static int ipvlan_nl_changelink(struct net_device *dev, if (!data) return 0; + if (!ns_capable(dev_net(ipvlan->phy_dev)->user_ns, CAP_NET_ADMIN)) + return -EPERM; if (data[IFLA_IPVLAN_MODE]) { u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]); @@ -601,6 +537,8 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev, struct ipvl_dev *tmp = netdev_priv(phy_dev); phy_dev = tmp->phy_dev; + if (!ns_capable(dev_net(phy_dev)->user_ns, CAP_NET_ADMIN)) + return -EPERM; } else if (!netif_is_ipvlan_port(phy_dev)) { /* Exit early if the underlying link is invalid or busy */ if (phy_dev->type != ARPHRD_ETHER || @@ -781,7 +719,6 @@ static int ipvlan_device_event(struct notifier_block *unused, case NETDEV_REGISTER: { struct net *oldnet, *newnet = dev_net(dev); - struct ipvlan_netns *old_vnet; oldnet = read_pnet(&port->pnet); if (net_eq(newnet, oldnet)) @@ -789,12 +726,7 @@ static int ipvlan_device_event(struct notifier_block *unused, write_pnet(&port->pnet, newnet); - old_vnet = net_generic(oldnet, ipvlan_netid); - if (!old_vnet->ipvl_nf_hook_refcnt) - break; - - ipvlan_register_nf_hook(newnet); - ipvlan_unregister_nf_hook(oldnet); + ipvlan_migrate_l3s_hook(oldnet, newnet); break; } case NETDEV_UNREGISTER: @@ -1068,23 +1000,6 @@ static struct notifier_block ipvlan_addr6_vtor_notifier_block __read_mostly = { }; #endif -static void ipvlan_ns_exit(struct net *net) -{ - struct ipvlan_netns *vnet = net_generic(net, ipvlan_netid); - - if (WARN_ON_ONCE(vnet->ipvl_nf_hook_refcnt)) { - vnet->ipvl_nf_hook_refcnt = 0; - nf_unregister_net_hooks(net, ipvl_nfops, - ARRAY_SIZE(ipvl_nfops)); - } -} - -static struct pernet_operations ipvlan_net_ops = { - .id = &ipvlan_netid, - .size = sizeof(struct ipvlan_netns), - .exit = ipvlan_ns_exit, -}; - static int __init ipvlan_init_module(void) { int err; @@ -1099,13 +1014,13 @@ static int __init ipvlan_init_module(void) register_inetaddr_notifier(&ipvlan_addr4_notifier_block); register_inetaddr_validator_notifier(&ipvlan_addr4_vtor_notifier_block); - err = register_pernet_subsys(&ipvlan_net_ops); + err = ipvlan_l3s_init(); if (err < 0) goto error; err = ipvlan_link_register(&ipvlan_link_ops); if (err < 0) { - unregister_pernet_subsys(&ipvlan_net_ops); + ipvlan_l3s_cleanup(); goto error; } @@ -1126,7 +1041,7 @@ error: static void __exit ipvlan_cleanup_module(void) { rtnl_link_unregister(&ipvlan_link_ops); - unregister_pernet_subsys(&ipvlan_net_ops); + ipvlan_l3s_cleanup(); unregister_netdevice_notifier(&ipvlan_notifier_block); unregister_inetaddr_notifier(&ipvlan_addr4_notifier_block); unregister_inetaddr_validator_notifier( diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 6d067176320f..0c0f105657d3 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -119,8 +119,6 @@ static struct macvlan_port *macvlan_port_get_rtnl(const struct net_device *dev) return rtnl_dereference(dev->rx_handler_data); } -#define macvlan_port_exists(dev) (dev->priv_flags & IFF_MACVLAN_PORT) - static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port, const unsigned char *addr) { @@ -963,7 +961,8 @@ static int macvlan_vlan_rx_kill_vid(struct net_device *dev, static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, - u16 flags) + u16 flags, + struct netlink_ext_ack *extack) { struct macvlan_dev *vlan = netdev_priv(dev); int err = -EINVAL; @@ -1123,6 +1122,7 @@ static const struct net_device_ops macvlan_netdev_ops = { #endif .ndo_get_iflink = macvlan_dev_get_iflink, .ndo_features_check = passthru_features_check, + .ndo_change_proto_down = dev_change_proto_down_generic, }; void macvlan_common_setup(struct net_device *dev) @@ -1377,7 +1377,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, if (!tb[IFLA_ADDRESS]) eth_hw_addr_random(dev); - if (!macvlan_port_exists(lowerdev)) { + if (!netif_is_macvlan_port(lowerdev)) { err = macvlan_port_create(lowerdev); if (err < 0) return err; @@ -1637,7 +1637,7 @@ static int macvlan_device_event(struct notifier_block *unused, struct macvlan_port *port; LIST_HEAD(list_kill); - if (!macvlan_port_exists(dev)) + if (!netif_is_macvlan_port(dev)) return NOTIFY_DONE; port = macvlan_port_get_rtnl(dev); diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c index 172b271c8bd2..f92c43453ec6 100644 --- a/drivers/net/netdevsim/bpf.c +++ b/drivers/net/netdevsim/bpf.c @@ -248,7 +248,7 @@ static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog) static int nsim_bpf_verifier_prep(struct bpf_prog *prog) { - struct netdevsim *ns = netdev_priv(prog->aux->offload->netdev); + struct netdevsim *ns = bpf_offload_dev_priv(prog->aux->offload->offdev); if (!ns->bpf_bind_accept) return -EOPNOTSUPP; @@ -589,7 +589,8 @@ int nsim_bpf_init(struct netdevsim *ns) if (IS_ERR_OR_NULL(ns->sdev->ddir_bpf_bound_progs)) return -ENOMEM; - ns->sdev->bpf_dev = bpf_offload_dev_create(&nsim_bpf_dev_ops); + ns->sdev->bpf_dev = bpf_offload_dev_create(&nsim_bpf_dev_ops, + ns); err = PTR_ERR_OR_ZERO(ns->sdev->bpf_dev); if (err) return err; diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index 8d8e2b3f263e..75a50b59cb8f 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -22,7 +22,6 @@ #include #include #include -#include #include "netdevsim.h" @@ -148,26 +147,16 @@ static struct device_type nsim_dev_type = { .release = nsim_dev_release, }; -static int -nsim_port_attr_get(struct net_device *dev, struct switchdev_attr *attr) +static int nsim_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) { struct netdevsim *ns = netdev_priv(dev); - switch (attr->id) { - case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: - attr->u.ppid.id_len = sizeof(ns->sdev->switch_id); - memcpy(&attr->u.ppid.id, &ns->sdev->switch_id, - attr->u.ppid.id_len); - return 0; - default: - return -EOPNOTSUPP; - } + ppid->id_len = sizeof(ns->sdev->switch_id); + memcpy(&ppid->id, &ns->sdev->switch_id, ppid->id_len); + return 0; } -static const struct switchdev_ops nsim_switchdev_ops = { - .switchdev_port_attr_get = nsim_port_attr_get, -}; - static int nsim_init(struct net_device *dev) { char sdev_ddir_name[10], sdev_link_name[32]; @@ -214,7 +203,6 @@ static int nsim_init(struct net_device *dev) goto err_bpf_uninit; SET_NETDEV_DEV(dev, &ns->dev); - SWITCHDEV_SET_OPS(dev, &nsim_switchdev_ops); err = nsim_devlink_setup(ns); if (err) @@ -493,6 +481,7 @@ static const struct net_device_ops nsim_netdev_ops = { .ndo_setup_tc = nsim_setup_tc, .ndo_set_features = nsim_set_features, .ndo_bpf = nsim_bpf, + .ndo_get_port_parent_id = nsim_get_port_parent_id, }; static void nsim_setup(struct net_device *dev) diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 3d187cd50eb0..071869db44cf 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -87,6 +87,18 @@ config MDIO_BUS_MUX_MMIOREG Currently, only 8/16/32 bits registers are supported. +config MDIO_BUS_MUX_MULTIPLEXER + tristate "MDIO bus multiplexer using kernel multiplexer subsystem" + depends on OF_MDIO + select MULTIPLEXER + select MDIO_BUS_MUX + help + This module provides a driver for MDIO bus multiplexer + that is controlled via the kernel multiplexer subsystem. The + bus multiplexer connects one of several child MDIO busses to + a parent bus. Child bus selection is under the control of + the kernel multiplexer subsystem. + config MDIO_CAVIUM tristate diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 5805c0b7d60e..ece5dae67174 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -29,6 +29,7 @@ obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) += mdio-mux-bcm-iproc.o obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o +obj-$(CONFIG_MDIO_BUS_MUX_MULTIPLEXER) += mdio-mux-multiplexer.o obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o @@ -45,6 +46,10 @@ sfp-obj-$(CONFIG_SFP) += sfp-bus.o obj-y += $(sfp-obj-y) $(sfp-obj-m) obj-$(CONFIG_AMD_PHY) += amd.o +aquantia-objs += aquantia_main.o +ifdef CONFIG_HWMON +aquantia-objs += aquantia_hwmon.o +endif obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o obj-$(CONFIG_ASIX_PHY) += asix.o obj-$(CONFIG_AT803X_PHY) += at803x.o diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c index 9d0504f3e3b2..65b4b0960b1e 100644 --- a/drivers/net/phy/amd.c +++ b/drivers/net/phy/amd.c @@ -1,15 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Driver for AMD am79c PHYs * * Author: Heiko Schocher * * Copyright (c) 2011 DENX Software Engineering GmbH - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * */ #include #include diff --git a/drivers/net/phy/aquantia.c b/drivers/net/phy/aquantia.c deleted file mode 100644 index beb3309bb0f0..000000000000 --- a/drivers/net/phy/aquantia.c +++ /dev/null @@ -1,193 +0,0 @@ -/* - * Driver for Aquantia PHY - * - * Author: Shaohui Xie - * - * Copyright 2015 Freescale Semiconductor, Inc. - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - */ - -#include -#include -#include -#include -#include -#include -#include - -#define PHY_ID_AQ1202 0x03a1b445 -#define PHY_ID_AQ2104 0x03a1b460 -#define PHY_ID_AQR105 0x03a1b4a2 -#define PHY_ID_AQR106 0x03a1b4d0 -#define PHY_ID_AQR107 0x03a1b4e0 -#define PHY_ID_AQR405 0x03a1b4b0 - -static int aquantia_config_aneg(struct phy_device *phydev) -{ - linkmode_copy(phydev->supported, phy_10gbit_features); - linkmode_copy(phydev->advertising, phydev->supported); - - return 0; -} - -static int aquantia_config_intr(struct phy_device *phydev) -{ - int err; - - if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { - err = phy_write_mmd(phydev, MDIO_MMD_AN, 0xd401, 1); - if (err < 0) - return err; - - err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff00, 1); - if (err < 0) - return err; - - err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff01, 0x1001); - } else { - err = phy_write_mmd(phydev, MDIO_MMD_AN, 0xd401, 0); - if (err < 0) - return err; - - err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff00, 0); - if (err < 0) - return err; - - err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff01, 0); - } - - return err; -} - -static int aquantia_ack_interrupt(struct phy_device *phydev) -{ - int reg; - - reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xcc01); - return (reg < 0) ? reg : 0; -} - -static int aquantia_read_status(struct phy_device *phydev) -{ - int reg; - - reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); - reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); - if (reg & MDIO_STAT1_LSTATUS) - phydev->link = 1; - else - phydev->link = 0; - - reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800); - mdelay(10); - reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800); - - switch (reg) { - case 0x9: - phydev->speed = SPEED_2500; - break; - case 0x5: - phydev->speed = SPEED_1000; - break; - case 0x3: - phydev->speed = SPEED_100; - break; - case 0x7: - default: - phydev->speed = SPEED_10000; - break; - } - phydev->duplex = DUPLEX_FULL; - - return 0; -} - -static struct phy_driver aquantia_driver[] = { -{ - .phy_id = PHY_ID_AQ1202, - .phy_id_mask = 0xfffffff0, - .name = "Aquantia AQ1202", - .features = PHY_10GBIT_FULL_FEATURES, - .aneg_done = genphy_c45_aneg_done, - .config_aneg = aquantia_config_aneg, - .config_intr = aquantia_config_intr, - .ack_interrupt = aquantia_ack_interrupt, - .read_status = aquantia_read_status, -}, -{ - .phy_id = PHY_ID_AQ2104, - .phy_id_mask = 0xfffffff0, - .name = "Aquantia AQ2104", - .features = PHY_10GBIT_FULL_FEATURES, - .aneg_done = genphy_c45_aneg_done, - .config_aneg = aquantia_config_aneg, - .config_intr = aquantia_config_intr, - .ack_interrupt = aquantia_ack_interrupt, - .read_status = aquantia_read_status, -}, -{ - .phy_id = PHY_ID_AQR105, - .phy_id_mask = 0xfffffff0, - .name = "Aquantia AQR105", - .features = PHY_10GBIT_FULL_FEATURES, - .aneg_done = genphy_c45_aneg_done, - .config_aneg = aquantia_config_aneg, - .config_intr = aquantia_config_intr, - .ack_interrupt = aquantia_ack_interrupt, - .read_status = aquantia_read_status, -}, -{ - .phy_id = PHY_ID_AQR106, - .phy_id_mask = 0xfffffff0, - .name = "Aquantia AQR106", - .features = PHY_10GBIT_FULL_FEATURES, - .aneg_done = genphy_c45_aneg_done, - .config_aneg = aquantia_config_aneg, - .config_intr = aquantia_config_intr, - .ack_interrupt = aquantia_ack_interrupt, - .read_status = aquantia_read_status, -}, -{ - .phy_id = PHY_ID_AQR107, - .phy_id_mask = 0xfffffff0, - .name = "Aquantia AQR107", - .features = PHY_10GBIT_FULL_FEATURES, - .aneg_done = genphy_c45_aneg_done, - .config_aneg = aquantia_config_aneg, - .config_intr = aquantia_config_intr, - .ack_interrupt = aquantia_ack_interrupt, - .read_status = aquantia_read_status, -}, -{ - .phy_id = PHY_ID_AQR405, - .phy_id_mask = 0xfffffff0, - .name = "Aquantia AQR405", - .features = PHY_10GBIT_FULL_FEATURES, - .aneg_done = genphy_c45_aneg_done, - .config_aneg = aquantia_config_aneg, - .config_intr = aquantia_config_intr, - .ack_interrupt = aquantia_ack_interrupt, - .read_status = aquantia_read_status, -}, -}; - -module_phy_driver(aquantia_driver); - -static struct mdio_device_id __maybe_unused aquantia_tbl[] = { - { PHY_ID_AQ1202, 0xfffffff0 }, - { PHY_ID_AQ2104, 0xfffffff0 }, - { PHY_ID_AQR105, 0xfffffff0 }, - { PHY_ID_AQR106, 0xfffffff0 }, - { PHY_ID_AQR107, 0xfffffff0 }, - { PHY_ID_AQR405, 0xfffffff0 }, - { } -}; - -MODULE_DEVICE_TABLE(mdio, aquantia_tbl); - -MODULE_DESCRIPTION("Aquantia PHY driver"); -MODULE_AUTHOR("Shaohui Xie "); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/phy/aquantia.h b/drivers/net/phy/aquantia.h new file mode 100644 index 000000000000..5a16caab7b2f --- /dev/null +++ b/drivers/net/phy/aquantia.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 + * HWMON driver for Aquantia PHY + * + * Author: Nikita Yushchenko + * Author: Andrew Lunn + * Author: Heiner Kallweit + */ + +#include +#include + +#if IS_REACHABLE(CONFIG_HWMON) +int aqr_hwmon_probe(struct phy_device *phydev); +#else +static inline int aqr_hwmon_probe(struct phy_device *phydev) { return 0; } +#endif diff --git a/drivers/net/phy/aquantia_hwmon.c b/drivers/net/phy/aquantia_hwmon.c new file mode 100644 index 000000000000..19c4c280a6cd --- /dev/null +++ b/drivers/net/phy/aquantia_hwmon.c @@ -0,0 +1,250 @@ +// SPDX-License-Identifier: GPL-2.0 +/* HWMON driver for Aquantia PHY + * + * Author: Nikita Yushchenko + * Author: Andrew Lunn + * Author: Heiner Kallweit + */ + +#include +#include +#include +#include + +#include "aquantia.h" + +/* Vendor specific 1, MDIO_MMD_VEND2 */ +#define VEND1_THERMAL_PROV_HIGH_TEMP_FAIL 0xc421 +#define VEND1_THERMAL_PROV_LOW_TEMP_FAIL 0xc422 +#define VEND1_THERMAL_PROV_HIGH_TEMP_WARN 0xc423 +#define VEND1_THERMAL_PROV_LOW_TEMP_WARN 0xc424 +#define VEND1_THERMAL_STAT1 0xc820 +#define VEND1_THERMAL_STAT2 0xc821 +#define VEND1_THERMAL_STAT2_VALID BIT(0) +#define VEND1_GENERAL_STAT1 0xc830 +#define VEND1_GENERAL_STAT1_HIGH_TEMP_FAIL BIT(14) +#define VEND1_GENERAL_STAT1_LOW_TEMP_FAIL BIT(13) +#define VEND1_GENERAL_STAT1_HIGH_TEMP_WARN BIT(12) +#define VEND1_GENERAL_STAT1_LOW_TEMP_WARN BIT(11) + +#if IS_REACHABLE(CONFIG_HWMON) + +static umode_t aqr_hwmon_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + if (type != hwmon_temp) + return 0; + + switch (attr) { + case hwmon_temp_input: + case hwmon_temp_min_alarm: + case hwmon_temp_max_alarm: + case hwmon_temp_lcrit_alarm: + case hwmon_temp_crit_alarm: + return 0444; + case hwmon_temp_min: + case hwmon_temp_max: + case hwmon_temp_lcrit: + case hwmon_temp_crit: + return 0644; + default: + return 0; + } +} + +static int aqr_hwmon_get(struct phy_device *phydev, int reg, long *value) +{ + int temp = phy_read_mmd(phydev, MDIO_MMD_VEND1, reg); + + if (temp < 0) + return temp; + + /* 16 bit value is 2's complement with LSB = 1/256th degree Celsius */ + *value = (s16)temp * 1000 / 256; + + return 0; +} + +static int aqr_hwmon_set(struct phy_device *phydev, int reg, long value) +{ + int temp; + + if (value >= 128000 || value < -128000) + return -ERANGE; + + temp = value * 256 / 1000; + + /* temp is in s16 range and we're interested in lower 16 bits only */ + return phy_write_mmd(phydev, MDIO_MMD_VEND1, reg, (u16)temp); +} + +static int aqr_hwmon_test_bit(struct phy_device *phydev, int reg, int bit) +{ + int val = phy_read_mmd(phydev, MDIO_MMD_VEND1, reg); + + if (val < 0) + return val; + + return !!(val & bit); +} + +static int aqr_hwmon_status1(struct phy_device *phydev, int bit, long *value) +{ + int val = aqr_hwmon_test_bit(phydev, VEND1_GENERAL_STAT1, bit); + + if (val < 0) + return val; + + *value = val; + + return 0; +} + +static int aqr_hwmon_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *value) +{ + struct phy_device *phydev = dev_get_drvdata(dev); + int reg; + + if (type != hwmon_temp) + return -EOPNOTSUPP; + + switch (attr) { + case hwmon_temp_input: + reg = aqr_hwmon_test_bit(phydev, VEND1_THERMAL_STAT2, + VEND1_THERMAL_STAT2_VALID); + if (reg < 0) + return reg; + if (!reg) + return -EBUSY; + + return aqr_hwmon_get(phydev, VEND1_THERMAL_STAT1, value); + + case hwmon_temp_lcrit: + return aqr_hwmon_get(phydev, VEND1_THERMAL_PROV_LOW_TEMP_FAIL, + value); + case hwmon_temp_min: + return aqr_hwmon_get(phydev, VEND1_THERMAL_PROV_LOW_TEMP_WARN, + value); + case hwmon_temp_max: + return aqr_hwmon_get(phydev, VEND1_THERMAL_PROV_HIGH_TEMP_WARN, + value); + case hwmon_temp_crit: + return aqr_hwmon_get(phydev, VEND1_THERMAL_PROV_HIGH_TEMP_FAIL, + value); + case hwmon_temp_lcrit_alarm: + return aqr_hwmon_status1(phydev, + VEND1_GENERAL_STAT1_LOW_TEMP_FAIL, + value); + case hwmon_temp_min_alarm: + return aqr_hwmon_status1(phydev, + VEND1_GENERAL_STAT1_LOW_TEMP_WARN, + value); + case hwmon_temp_max_alarm: + return aqr_hwmon_status1(phydev, + VEND1_GENERAL_STAT1_HIGH_TEMP_WARN, + value); + case hwmon_temp_crit_alarm: + return aqr_hwmon_status1(phydev, + VEND1_GENERAL_STAT1_HIGH_TEMP_FAIL, + value); + default: + return -EOPNOTSUPP; + } +} + +static int aqr_hwmon_write(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long value) +{ + struct phy_device *phydev = dev_get_drvdata(dev); + + if (type != hwmon_temp) + return -EOPNOTSUPP; + + switch (attr) { + case hwmon_temp_lcrit: + return aqr_hwmon_set(phydev, VEND1_THERMAL_PROV_LOW_TEMP_FAIL, + value); + case hwmon_temp_min: + return aqr_hwmon_set(phydev, VEND1_THERMAL_PROV_LOW_TEMP_WARN, + value); + case hwmon_temp_max: + return aqr_hwmon_set(phydev, VEND1_THERMAL_PROV_HIGH_TEMP_WARN, + value); + case hwmon_temp_crit: + return aqr_hwmon_set(phydev, VEND1_THERMAL_PROV_HIGH_TEMP_FAIL, + value); + default: + return -EOPNOTSUPP; + } +} + +static const struct hwmon_ops aqr_hwmon_ops = { + .is_visible = aqr_hwmon_is_visible, + .read = aqr_hwmon_read, + .write = aqr_hwmon_write, +}; + +static u32 aqr_hwmon_chip_config[] = { + HWMON_C_REGISTER_TZ, + 0, +}; + +static const struct hwmon_channel_info aqr_hwmon_chip = { + .type = hwmon_chip, + .config = aqr_hwmon_chip_config, +}; + +static u32 aqr_hwmon_temp_config[] = { + HWMON_T_INPUT | + HWMON_T_MAX | HWMON_T_MIN | + HWMON_T_MAX_ALARM | HWMON_T_MIN_ALARM | + HWMON_T_CRIT | HWMON_T_LCRIT | + HWMON_T_CRIT_ALARM | HWMON_T_LCRIT_ALARM, + 0, +}; + +static const struct hwmon_channel_info aqr_hwmon_temp = { + .type = hwmon_temp, + .config = aqr_hwmon_temp_config, +}; + +static const struct hwmon_channel_info *aqr_hwmon_info[] = { + &aqr_hwmon_chip, + &aqr_hwmon_temp, + NULL, +}; + +static const struct hwmon_chip_info aqr_hwmon_chip_info = { + .ops = &aqr_hwmon_ops, + .info = aqr_hwmon_info, +}; + +int aqr_hwmon_probe(struct phy_device *phydev) +{ + struct device *dev = &phydev->mdio.dev; + struct device *hwmon_dev; + char *hwmon_name; + int i, j; + + hwmon_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL); + if (!hwmon_name) + return -ENOMEM; + + for (i = j = 0; hwmon_name[i]; i++) { + if (isalnum(hwmon_name[i])) { + if (i != j) + hwmon_name[j] = hwmon_name[i]; + j++; + } + } + hwmon_name[j] = '\0'; + + hwmon_dev = devm_hwmon_device_register_with_info(dev, hwmon_name, + phydev, &aqr_hwmon_chip_info, NULL); + + return PTR_ERR_OR_ZERO(hwmon_dev); +} + +#endif diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c new file mode 100644 index 000000000000..37218e5d7cc9 --- /dev/null +++ b/drivers/net/phy/aquantia_main.c @@ -0,0 +1,283 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for Aquantia PHY + * + * Author: Shaohui Xie + * + * Copyright 2015 Freescale Semiconductor, Inc. + */ + +#include +#include +#include +#include + +#include "aquantia.h" + +#define PHY_ID_AQ1202 0x03a1b445 +#define PHY_ID_AQ2104 0x03a1b460 +#define PHY_ID_AQR105 0x03a1b4a2 +#define PHY_ID_AQR106 0x03a1b4d0 +#define PHY_ID_AQR107 0x03a1b4e0 +#define PHY_ID_AQCS109 0x03a1b5c2 +#define PHY_ID_AQR405 0x03a1b4b0 + +#define MDIO_AN_VEND_PROV 0xc400 +#define MDIO_AN_VEND_PROV_1000BASET_FULL BIT(15) +#define MDIO_AN_VEND_PROV_1000BASET_HALF BIT(14) + +#define MDIO_AN_TX_VEND_STATUS1 0xc800 +#define MDIO_AN_TX_VEND_STATUS1_10BASET (0x0 << 1) +#define MDIO_AN_TX_VEND_STATUS1_100BASETX (0x1 << 1) +#define MDIO_AN_TX_VEND_STATUS1_1000BASET (0x2 << 1) +#define MDIO_AN_TX_VEND_STATUS1_10GBASET (0x3 << 1) +#define MDIO_AN_TX_VEND_STATUS1_2500BASET (0x4 << 1) +#define MDIO_AN_TX_VEND_STATUS1_5000BASET (0x5 << 1) +#define MDIO_AN_TX_VEND_STATUS1_RATE_MASK (0x7 << 1) +#define MDIO_AN_TX_VEND_STATUS1_FULL_DUPLEX BIT(0) + +#define MDIO_AN_TX_VEND_INT_STATUS2 0xcc01 + +#define MDIO_AN_TX_VEND_INT_MASK2 0xd401 +#define MDIO_AN_TX_VEND_INT_MASK2_LINK BIT(0) + +#define MDIO_AN_RX_LP_STAT1 0xe820 +#define MDIO_AN_RX_LP_STAT1_1000BASET_FULL BIT(15) +#define MDIO_AN_RX_LP_STAT1_1000BASET_HALF BIT(14) + +/* Vendor specific 1, MDIO_MMD_VEND1 */ +#define VEND1_GLOBAL_INT_STD_STATUS 0xfc00 +#define VEND1_GLOBAL_INT_VEND_STATUS 0xfc01 + +#define VEND1_GLOBAL_INT_STD_MASK 0xff00 +#define VEND1_GLOBAL_INT_STD_MASK_PMA1 BIT(15) +#define VEND1_GLOBAL_INT_STD_MASK_PMA2 BIT(14) +#define VEND1_GLOBAL_INT_STD_MASK_PCS1 BIT(13) +#define VEND1_GLOBAL_INT_STD_MASK_PCS2 BIT(12) +#define VEND1_GLOBAL_INT_STD_MASK_PCS3 BIT(11) +#define VEND1_GLOBAL_INT_STD_MASK_PHY_XS1 BIT(10) +#define VEND1_GLOBAL_INT_STD_MASK_PHY_XS2 BIT(9) +#define VEND1_GLOBAL_INT_STD_MASK_AN1 BIT(8) +#define VEND1_GLOBAL_INT_STD_MASK_AN2 BIT(7) +#define VEND1_GLOBAL_INT_STD_MASK_GBE BIT(6) +#define VEND1_GLOBAL_INT_STD_MASK_ALL BIT(0) + +#define VEND1_GLOBAL_INT_VEND_MASK 0xff01 +#define VEND1_GLOBAL_INT_VEND_MASK_PMA BIT(15) +#define VEND1_GLOBAL_INT_VEND_MASK_PCS BIT(14) +#define VEND1_GLOBAL_INT_VEND_MASK_PHY_XS BIT(13) +#define VEND1_GLOBAL_INT_VEND_MASK_AN BIT(12) +#define VEND1_GLOBAL_INT_VEND_MASK_GBE BIT(11) +#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL1 BIT(2) +#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2 BIT(1) +#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 BIT(0) + +static int aqr_config_aneg(struct phy_device *phydev) +{ + bool changed = false; + u16 reg; + int ret; + + if (phydev->autoneg == AUTONEG_DISABLE) + return genphy_c45_pma_setup_forced(phydev); + + ret = genphy_c45_an_config_aneg(phydev); + if (ret < 0) + return ret; + if (ret > 0) + changed = true; + + /* Clause 45 has no standardized support for 1000BaseT, therefore + * use vendor registers for this mode. + */ + reg = 0; + if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + phydev->advertising)) + reg |= MDIO_AN_VEND_PROV_1000BASET_FULL; + + if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + phydev->advertising)) + reg |= MDIO_AN_VEND_PROV_1000BASET_HALF; + + ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV, + MDIO_AN_VEND_PROV_1000BASET_HALF | + MDIO_AN_VEND_PROV_1000BASET_FULL, reg); + if (ret < 0) + return ret; + if (ret > 0) + changed = true; + + return genphy_c45_check_and_restart_aneg(phydev, changed); +} + +static int aqr_config_intr(struct phy_device *phydev) +{ + int err; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { + err = phy_write_mmd(phydev, MDIO_MMD_AN, + MDIO_AN_TX_VEND_INT_MASK2, + MDIO_AN_TX_VEND_INT_MASK2_LINK); + if (err < 0) + return err; + + err = phy_write_mmd(phydev, MDIO_MMD_VEND1, + VEND1_GLOBAL_INT_STD_MASK, + VEND1_GLOBAL_INT_STD_MASK_ALL); + if (err < 0) + return err; + + err = phy_write_mmd(phydev, MDIO_MMD_VEND1, + VEND1_GLOBAL_INT_VEND_MASK, + VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 | + VEND1_GLOBAL_INT_VEND_MASK_AN); + } else { + err = phy_write_mmd(phydev, MDIO_MMD_AN, + MDIO_AN_TX_VEND_INT_MASK2, 0); + if (err < 0) + return err; + + err = phy_write_mmd(phydev, MDIO_MMD_VEND1, + VEND1_GLOBAL_INT_STD_MASK, 0); + if (err < 0) + return err; + + err = phy_write_mmd(phydev, MDIO_MMD_VEND1, + VEND1_GLOBAL_INT_VEND_MASK, 0); + } + + return err; +} + +static int aqr_ack_interrupt(struct phy_device *phydev) +{ + int reg; + + reg = phy_read_mmd(phydev, MDIO_MMD_AN, + MDIO_AN_TX_VEND_INT_STATUS2); + return (reg < 0) ? reg : 0; +} + +static int aqr_read_status(struct phy_device *phydev) +{ + int val; + + if (phydev->autoneg == AUTONEG_ENABLE) { + val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_RX_LP_STAT1); + if (val < 0) + return val; + + linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + phydev->lp_advertising, + val & MDIO_AN_RX_LP_STAT1_1000BASET_FULL); + linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + phydev->lp_advertising, + val & MDIO_AN_RX_LP_STAT1_1000BASET_HALF); + } + + return genphy_c45_read_status(phydev); +} + +static int aqcs109_config_init(struct phy_device *phydev) +{ + /* AQCS109 belongs to a chip family partially supporting 10G and 5G. + * PMA speed ability bits are the same for all members of the family, + * AQCS109 however supports speeds up to 2.5G only. + */ + return phy_set_max_speed(phydev, SPEED_2500); +} + +static struct phy_driver aqr_driver[] = { +{ + PHY_ID_MATCH_MODEL(PHY_ID_AQ1202), + .name = "Aquantia AQ1202", + .aneg_done = genphy_c45_aneg_done, + .get_features = genphy_c45_pma_read_abilities, + .config_aneg = aqr_config_aneg, + .config_intr = aqr_config_intr, + .ack_interrupt = aqr_ack_interrupt, + .read_status = aqr_read_status, +}, +{ + PHY_ID_MATCH_MODEL(PHY_ID_AQ2104), + .name = "Aquantia AQ2104", + .aneg_done = genphy_c45_aneg_done, + .get_features = genphy_c45_pma_read_abilities, + .config_aneg = aqr_config_aneg, + .config_intr = aqr_config_intr, + .ack_interrupt = aqr_ack_interrupt, + .read_status = aqr_read_status, +}, +{ + PHY_ID_MATCH_MODEL(PHY_ID_AQR105), + .name = "Aquantia AQR105", + .aneg_done = genphy_c45_aneg_done, + .get_features = genphy_c45_pma_read_abilities, + .config_aneg = aqr_config_aneg, + .config_intr = aqr_config_intr, + .ack_interrupt = aqr_ack_interrupt, + .read_status = aqr_read_status, +}, +{ + PHY_ID_MATCH_MODEL(PHY_ID_AQR106), + .name = "Aquantia AQR106", + .aneg_done = genphy_c45_aneg_done, + .get_features = genphy_c45_pma_read_abilities, + .config_aneg = aqr_config_aneg, + .config_intr = aqr_config_intr, + .ack_interrupt = aqr_ack_interrupt, + .read_status = aqr_read_status, +}, +{ + PHY_ID_MATCH_MODEL(PHY_ID_AQR107), + .name = "Aquantia AQR107", + .aneg_done = genphy_c45_aneg_done, + .get_features = genphy_c45_pma_read_abilities, + .probe = aqr_hwmon_probe, + .config_aneg = aqr_config_aneg, + .config_intr = aqr_config_intr, + .ack_interrupt = aqr_ack_interrupt, + .read_status = aqr_read_status, +}, +{ + PHY_ID_MATCH_MODEL(PHY_ID_AQCS109), + .name = "Aquantia AQCS109", + .aneg_done = genphy_c45_aneg_done, + .get_features = genphy_c45_pma_read_abilities, + .probe = aqr_hwmon_probe, + .config_init = aqcs109_config_init, + .config_aneg = aqr_config_aneg, + .config_intr = aqr_config_intr, + .ack_interrupt = aqr_ack_interrupt, + .read_status = aqr_read_status, +}, +{ + PHY_ID_MATCH_MODEL(PHY_ID_AQR405), + .name = "Aquantia AQR405", + .aneg_done = genphy_c45_aneg_done, + .get_features = genphy_c45_pma_read_abilities, + .config_aneg = aqr_config_aneg, + .config_intr = aqr_config_intr, + .ack_interrupt = aqr_ack_interrupt, + .read_status = aqr_read_status, +}, +}; + +module_phy_driver(aqr_driver); + +static struct mdio_device_id __maybe_unused aqr_tbl[] = { + { PHY_ID_MATCH_MODEL(PHY_ID_AQ1202) }, + { PHY_ID_MATCH_MODEL(PHY_ID_AQ2104) }, + { PHY_ID_MATCH_MODEL(PHY_ID_AQR105) }, + { PHY_ID_MATCH_MODEL(PHY_ID_AQR106) }, + { PHY_ID_MATCH_MODEL(PHY_ID_AQR107) }, + { PHY_ID_MATCH_MODEL(PHY_ID_AQCS109) }, + { PHY_ID_MATCH_MODEL(PHY_ID_AQR405) }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, aqr_tbl); + +MODULE_DESCRIPTION("Aquantia PHY driver"); +MODULE_AUTHOR("Shaohui Xie "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index f9432d053a22..f3e96191eb6f 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -1,14 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * drivers/net/phy/at803x.c * * Driver for Atheros 803x PHY * * Author: Matus Ujhelyi - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. */ #include @@ -39,9 +35,6 @@ #define AT803X_LOC_MAC_ADDR_0_15_OFFSET 0x804C #define AT803X_LOC_MAC_ADDR_16_31_OFFSET 0x804B #define AT803X_LOC_MAC_ADDR_32_47_OFFSET 0x804A -#define AT803X_MMD_ACCESS_CONTROL 0x0D -#define AT803X_MMD_ACCESS_CONTROL_DATA 0x0E -#define AT803X_FUNC_DATA 0x4003 #define AT803X_REG_CHIP_CONFIG 0x1f #define AT803X_BT_BX_REG_SEL 0x8000 @@ -110,16 +103,28 @@ static int at803x_debug_reg_mask(struct phy_device *phydev, u16 reg, return phy_write(phydev, AT803X_DEBUG_DATA, val); } -static inline int at803x_enable_rx_delay(struct phy_device *phydev) +static int at803x_enable_rx_delay(struct phy_device *phydev) { return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0, 0, - AT803X_DEBUG_RX_CLK_DLY_EN); + AT803X_DEBUG_RX_CLK_DLY_EN); } -static inline int at803x_enable_tx_delay(struct phy_device *phydev) +static int at803x_enable_tx_delay(struct phy_device *phydev) { return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_5, 0, - AT803X_DEBUG_TX_CLK_DLY_EN); + AT803X_DEBUG_TX_CLK_DLY_EN); +} + +static int at803x_disable_rx_delay(struct phy_device *phydev) +{ + return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0, + AT803X_DEBUG_RX_CLK_DLY_EN, 0); +} + +static int at803x_disable_tx_delay(struct phy_device *phydev) +{ + return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_5, + AT803X_DEBUG_TX_CLK_DLY_EN, 0); } /* save relevant PHY registers to private copy */ @@ -168,16 +173,9 @@ static int at803x_set_wol(struct phy_device *phydev, if (!is_valid_ether_addr(mac)) return -EINVAL; - for (i = 0; i < 3; i++) { - phy_write(phydev, AT803X_MMD_ACCESS_CONTROL, - AT803X_DEVICE_ADDR); - phy_write(phydev, AT803X_MMD_ACCESS_CONTROL_DATA, - offsets[i]); - phy_write(phydev, AT803X_MMD_ACCESS_CONTROL, - AT803X_FUNC_DATA); - phy_write(phydev, AT803X_MMD_ACCESS_CONTROL_DATA, - mac[(i * 2) + 1] | (mac[(i * 2)] << 8)); - } + for (i = 0; i < 3; i++) + phy_write_mmd(phydev, AT803X_DEVICE_ADDR, offsets[i], + mac[(i * 2) + 1] | (mac[(i * 2)] << 8)); value = phy_read(phydev, AT803X_INTR_ENABLE); value |= AT803X_INTR_ENABLE_WOL; @@ -255,21 +253,42 @@ static int at803x_config_init(struct phy_device *phydev) if (ret < 0) return ret; - if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID || - phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) { + /* The RX and TX delay default is: + * after HW reset: RX delay enabled and TX delay disabled + * after SW reset: RX delay enabled, while TX delay retains the + * value before reset. + * + * So let's first disable the RX and TX delays in PHY and enable + * them based on the mode selected (this also takes care of RGMII + * mode where we expect delays to be disabled) + */ + + ret = at803x_disable_rx_delay(phydev); + if (ret < 0) + return ret; + ret = at803x_disable_tx_delay(phydev); + if (ret < 0) + return ret; + + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || + phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) { + /* If RGMII_ID or RGMII_RXID are specified enable RX delay, + * otherwise keep it disabled + */ ret = at803x_enable_rx_delay(phydev); if (ret < 0) return ret; } - if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID || - phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) { + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || + phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) { + /* If RGMII_ID or RGMII_TXID are specified enable TX delay, + * otherwise keep it disabled + */ ret = at803x_enable_tx_delay(phydev); - if (ret < 0) - return ret; } - return 0; + return ret; } static int at803x_ack_interrupt(struct phy_device *phydev) diff --git a/drivers/net/phy/bcm-cygnus.c b/drivers/net/phy/bcm-cygnus.c index e757b09f1889..ab8e12922bf9 100644 --- a/drivers/net/phy/bcm-cygnus.c +++ b/drivers/net/phy/bcm-cygnus.c @@ -1,14 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2015 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ /* Broadcom Cygnus SoC internal transceivers support. */ diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c index e10e7b54ec4b..a75642051b8b 100644 --- a/drivers/net/phy/bcm-phy-lib.c +++ b/drivers/net/phy/bcm-phy-lib.c @@ -1,14 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2015-2017 Broadcom - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include "bcm-phy-lib.h" diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h index 81cceaa412fe..17faaefcfd60 100644 --- a/drivers/net/phy/bcm-phy-lib.h +++ b/drivers/net/phy/bcm-phy-lib.h @@ -1,14 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2015 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef _LINUX_BCM_PHY_LIB_H diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c index a88dd14a25c0..44e6cff419a0 100644 --- a/drivers/net/phy/bcm63xx.c +++ b/drivers/net/phy/bcm63xx.c @@ -1,10 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Driver for Broadcom 63xx SOCs integrated PHYs - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. */ #include "bcm-phy-lib.h" #include diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index 712224cc442d..b8415f8fae14 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c @@ -1,12 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Broadcom BCM7xxx internal transceivers support. * * Copyright (C) 2014-2017 Broadcom - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. */ #include diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c index a271239748f2..f0c0eefe2202 100644 --- a/drivers/net/phy/bcm87xx.c +++ b/drivers/net/phy/bcm87xx.c @@ -1,8 +1,5 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * * Copyright (C) 2011 - 2012 Cavium, Inc. */ @@ -221,4 +218,4 @@ static struct phy_driver bcm87xx_driver[] = { module_phy_driver(bcm87xx_driver); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index aa73c5cc5f86..9605d4fe540b 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * drivers/net/phy/broadcom.c * @@ -7,11 +8,6 @@ * Copyright (c) 2006 Maciej W. Rozycki * * Inspired by code written by Amy Fong. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. */ #include "bcm-phy-lib.h" diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c index fea61c81bda9..108ed24f8489 100644 --- a/drivers/net/phy/cicada.c +++ b/drivers/net/phy/cicada.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * drivers/net/phy/cicada.c * @@ -6,12 +7,6 @@ * Author: Andy Fleming * * Copyright (c) 2004 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * */ #include #include diff --git a/drivers/net/phy/cortina.c b/drivers/net/phy/cortina.c index 1a4d04afb7f0..856cdc36aacd 100644 --- a/drivers/net/phy/cortina.c +++ b/drivers/net/phy/cortina.c @@ -1,16 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Copyright 2017 NXP * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * * CORTINA is a registered trademark of Cortina Systems, Inc. * */ @@ -89,10 +80,9 @@ static struct phy_driver cortina_driver[] = { .phy_id_mask = 0xffffffff, .name = "Cortina CS4340", .features = PHY_10GBIT_FEATURES, - .config_init = gen10g_config_init, .config_aneg = gen10g_config_aneg, .read_status = cortina_read_status, - .soft_reset = gen10g_no_soft_reset, + .soft_reset = genphy_no_soft_reset, .probe = cortina_probe, }, }; diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c index 97162008f42b..bf39baa7f2c8 100644 --- a/drivers/net/phy/davicom.c +++ b/drivers/net/phy/davicom.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * drivers/net/phy/davicom.c * @@ -6,12 +7,6 @@ * Author: Andy Fleming * * Copyright (c) 2004 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * */ #include #include diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 18b41bc345ab..2fe2ebaf62d1 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -1,21 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Driver for the National Semiconductor DP83640 PHYTER * * Copyright (C) 2010 OMICRON electronics GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -898,14 +885,14 @@ static void decode_txts(struct dp83640_private *dp83640, struct phy_txts *phy_txts) { struct skb_shared_hwtstamps shhwtstamps; + struct dp83640_skb_info *skb_info; struct sk_buff *skb; - u64 ns; u8 overflow; + u64 ns; /* We must already have the skb that triggered this. */ - +again: skb = skb_dequeue(&dp83640->tx_queue); - if (!skb) { pr_debug("have timestamp but tx_queue empty\n"); return; @@ -920,6 +907,11 @@ static void decode_txts(struct dp83640_private *dp83640, } return; } + skb_info = (struct dp83640_skb_info *)skb->cb; + if (time_after(jiffies, skb_info->tmo)) { + kfree_skb(skb); + goto again; + } ns = phy2txts(phy_txts); memset(&shhwtstamps, 0, sizeof(shhwtstamps)); @@ -1472,6 +1464,7 @@ static bool dp83640_rxtstamp(struct phy_device *phydev, static void dp83640_txtstamp(struct phy_device *phydev, struct sk_buff *skb, int type) { + struct dp83640_skb_info *skb_info = (struct dp83640_skb_info *)skb->cb; struct dp83640_private *dp83640 = phydev->priv; switch (dp83640->hwts_tx_en) { @@ -1484,6 +1477,7 @@ static void dp83640_txtstamp(struct phy_device *phydev, /* fall through */ case HWTSTAMP_TX_ON: skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT; skb_queue_tail(&dp83640->tx_queue, skb); break; diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c index 24c7f149f3e6..bbd8c22067f3 100644 --- a/drivers/net/phy/dp83822.c +++ b/drivers/net/phy/dp83822.c @@ -1,16 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Driver for the Texas Instruments DP83822 PHY * * Copyright (C) 2017 Texas Instruments Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include @@ -338,4 +330,4 @@ MODULE_DEVICE_TABLE(mdio, dp83822_tbl); MODULE_DESCRIPTION("Texas Instruments DP83822 PHY driver"); MODULE_AUTHOR("Dan Murphy @@ -133,4 +125,4 @@ module_phy_driver(dp83848_driver); MODULE_DESCRIPTION("Texas Instruments DP83848 PHY driver"); MODULE_AUTHOR("Andrew F. Davis "); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index da6a67d47ce9..8448d01819ef 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -1,16 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Driver for the Texas Instruments DP83867 PHY * * Copyright (C) 2015 Texas Instruments Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include @@ -19,6 +11,7 @@ #include #include #include +#include #include @@ -135,17 +128,13 @@ static int dp83867_config_port_mirroring(struct phy_device *phydev) { struct dp83867_private *dp83867 = (struct dp83867_private *)phydev->priv; - u16 val; - - val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4); if (dp83867->port_mirroring == DP83867_PORT_MIRROING_EN) - val |= DP83867_CFG4_PORT_MIRROR_EN; + phy_set_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4, + DP83867_CFG4_PORT_MIRROR_EN); else - val &= ~DP83867_CFG4_PORT_MIRROR_EN; - - phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4, val); - + phy_clear_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4, + DP83867_CFG4_PORT_MIRROR_EN); return 0; } @@ -230,11 +219,9 @@ static int dp83867_config_init(struct phy_device *phydev) } /* RX_DV/RX_CTRL strapped in mode 1 or mode 2 workaround */ - if (dp83867->rxctrl_strap_quirk) { - val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4); - val &= ~BIT(7); - phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4, val); - } + if (dp83867->rxctrl_strap_quirk) + phy_clear_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4, + BIT(7)); if (phy_interface_is_rgmii(phydev)) { val = phy_read(phydev, MII_DP83867_PHYCTRL); @@ -283,17 +270,11 @@ static int dp83867_config_init(struct phy_device *phydev) phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIIDCTL, delay); - if (dp83867->io_impedance >= 0) { - val = phy_read_mmd(phydev, DP83867_DEVADDR, - DP83867_IO_MUX_CFG); - - val &= ~DP83867_IO_MUX_CFG_IO_IMPEDANCE_CTRL; - val |= dp83867->io_impedance & - DP83867_IO_MUX_CFG_IO_IMPEDANCE_CTRL; - - phy_write_mmd(phydev, DP83867_DEVADDR, - DP83867_IO_MUX_CFG, val); - } + if (dp83867->io_impedance >= 0) + phy_modify_mmd(phydev, DP83867_DEVADDR, DP83867_IO_MUX_CFG, + DP83867_IO_MUX_CFG_IO_IMPEDANCE_CTRL, + dp83867->io_impedance & + DP83867_IO_MUX_CFG_IO_IMPEDANCE_CTRL); } /* Enable Interrupt output INT_OE in CFG3 register */ @@ -307,12 +288,11 @@ static int dp83867_config_init(struct phy_device *phydev) dp83867_config_port_mirroring(phydev); /* Clock output selection if muxing property is set */ - if (dp83867->clk_output_sel != DP83867_CLK_O_SEL_REF_CLK) { - val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_IO_MUX_CFG); - val &= ~DP83867_IO_MUX_CFG_CLK_O_SEL_MASK; - val |= (dp83867->clk_output_sel << DP83867_IO_MUX_CFG_CLK_O_SEL_SHIFT); - phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_IO_MUX_CFG, val); - } + if (dp83867->clk_output_sel != DP83867_CLK_O_SEL_REF_CLK) + phy_modify_mmd(phydev, DP83867_DEVADDR, DP83867_IO_MUX_CFG, + DP83867_IO_MUX_CFG_CLK_O_SEL_MASK, + dp83867->clk_output_sel << + DP83867_IO_MUX_CFG_CLK_O_SEL_SHIFT); return 0; } @@ -325,6 +305,8 @@ static int dp83867_phy_reset(struct phy_device *phydev) if (err < 0) return err; + usleep_range(10, 20); + return dp83867_config_init(phydev); } @@ -357,4 +339,4 @@ MODULE_DEVICE_TABLE(mdio, dp83867_tbl); MODULE_DESCRIPTION("Texas Instruments DP83867 PHY driver"); MODULE_AUTHOR("Dan Murphy #include diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c index 72d43c88e6ff..1acd8bfdb3bc 100644 --- a/drivers/net/phy/fixed_phy.c +++ b/drivers/net/phy/fixed_phy.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Fixed MDIO bus (MDIO bus emulation with fixed PHYs) * @@ -5,11 +6,6 @@ * Anton Vorontsov * * Copyright (c) 2006-2007 MontaVista Software, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. */ #include @@ -22,10 +18,11 @@ #include #include #include -#include +#include #include #include #include +#include #include "swphy.h" @@ -42,7 +39,7 @@ struct fixed_phy { bool no_carrier; int (*link_update)(struct net_device *, struct fixed_phy_status *); struct list_head node; - int link_gpio; + struct gpio_desc *link_gpiod; }; static struct platform_device *pdev; @@ -71,8 +68,8 @@ EXPORT_SYMBOL_GPL(fixed_phy_change_carrier); static void fixed_phy_update(struct fixed_phy *fp) { - if (!fp->no_carrier && gpio_is_valid(fp->link_gpio)) - fp->status.link = !!gpio_get_value_cansleep(fp->link_gpio); + if (!fp->no_carrier && fp->link_gpiod) + fp->status.link = !!gpiod_get_value_cansleep(fp->link_gpiod); } static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num) @@ -89,11 +86,11 @@ static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num) s = read_seqcount_begin(&fp->seqcount); fp->status.link = !fp->no_carrier; /* Issue callback if user registered it. */ - if (fp->link_update) { + if (fp->link_update) fp->link_update(fp->phydev->attached_dev, &fp->status); - fixed_phy_update(fp); - } + /* Check the GPIO for change in status */ + fixed_phy_update(fp); state = fp->status; } while (read_seqcount_retry(&fp->seqcount, s)); @@ -137,9 +134,9 @@ int fixed_phy_set_link_update(struct phy_device *phydev, } EXPORT_SYMBOL_GPL(fixed_phy_set_link_update); -int fixed_phy_add(unsigned int irq, int phy_addr, - struct fixed_phy_status *status, - int link_gpio) +static int fixed_phy_add_gpiod(unsigned int irq, int phy_addr, + struct fixed_phy_status *status, + struct gpio_desc *gpiod) { int ret; struct fixed_mdio_bus *fmb = &platform_fmb; @@ -160,24 +157,19 @@ int fixed_phy_add(unsigned int irq, int phy_addr, fp->addr = phy_addr; fp->status = *status; - fp->link_gpio = link_gpio; - - if (gpio_is_valid(fp->link_gpio)) { - ret = gpio_request_one(fp->link_gpio, GPIOF_DIR_IN, - "fixed-link-gpio-link"); - if (ret) - goto err_regs; - } + fp->link_gpiod = gpiod; fixed_phy_update(fp); list_add_tail(&fp->node, &fmb->phys); return 0; +} -err_regs: - kfree(fp); - return ret; +int fixed_phy_add(unsigned int irq, int phy_addr, + struct fixed_phy_status *status) { + + return fixed_phy_add_gpiod(irq, phy_addr, status, NULL); } EXPORT_SYMBOL_GPL(fixed_phy_add); @@ -191,8 +183,8 @@ static void fixed_phy_del(int phy_addr) list_for_each_entry_safe(fp, tmp, &fmb->phys, node) { if (fp->addr == phy_addr) { list_del(&fp->node); - if (gpio_is_valid(fp->link_gpio)) - gpio_free(fp->link_gpio); + if (fp->link_gpiod) + gpiod_put(fp->link_gpiod); kfree(fp); ida_simple_remove(&phy_fixed_ida, phy_addr); return; @@ -200,10 +192,48 @@ static void fixed_phy_del(int phy_addr) } } -struct phy_device *fixed_phy_register(unsigned int irq, - struct fixed_phy_status *status, - int link_gpio, - struct device_node *np) +#ifdef CONFIG_OF_GPIO +static struct gpio_desc *fixed_phy_get_gpiod(struct device_node *np) +{ + struct device_node *fixed_link_node; + struct gpio_desc *gpiod; + + if (!np) + return NULL; + + fixed_link_node = of_get_child_by_name(np, "fixed-link"); + if (!fixed_link_node) + return NULL; + + /* + * As the fixed link is just a device tree node without any + * Linux device associated with it, we simply have obtain + * the GPIO descriptor from the device tree like this. + */ + gpiod = gpiod_get_from_of_node(fixed_link_node, "link-gpios", 0, + GPIOD_IN, "mdio"); + of_node_put(fixed_link_node); + if (IS_ERR(gpiod)) { + if (PTR_ERR(gpiod) == -EPROBE_DEFER) + return gpiod; + pr_err("error getting GPIO for fixed link %pOF, proceed without\n", + fixed_link_node); + gpiod = NULL; + } + + return gpiod; +} +#else +static struct gpio_desc *fixed_phy_get_gpiod(struct device_node *np) +{ + return NULL; +} +#endif + +static struct phy_device *__fixed_phy_register(unsigned int irq, + struct fixed_phy_status *status, + struct device_node *np, + struct gpio_desc *gpiod) { struct fixed_mdio_bus *fmb = &platform_fmb; struct phy_device *phy; @@ -213,12 +243,19 @@ struct phy_device *fixed_phy_register(unsigned int irq, if (!fmb->mii_bus || fmb->mii_bus->state != MDIOBUS_REGISTERED) return ERR_PTR(-EPROBE_DEFER); + /* Check if we have a GPIO associated with this fixed phy */ + if (!gpiod) { + gpiod = fixed_phy_get_gpiod(np); + if (IS_ERR(gpiod)) + return ERR_CAST(gpiod); + } + /* Get the next available PHY address, up to PHY_MAX_ADDR */ phy_addr = ida_simple_get(&phy_fixed_ida, 0, PHY_MAX_ADDR, GFP_KERNEL); if (phy_addr < 0) return ERR_PTR(phy_addr); - ret = fixed_phy_add(irq, phy_addr, status, link_gpio); + ret = fixed_phy_add_gpiod(irq, phy_addr, status, gpiod); if (ret < 0) { ida_simple_remove(&phy_fixed_ida, phy_addr); return ERR_PTR(ret); @@ -264,6 +301,8 @@ struct phy_device *fixed_phy_register(unsigned int irq, phy->supported); } + linkmode_copy(phy->advertising, phy->supported); + ret = phy_device_register(phy); if (ret) { phy_device_free(phy); @@ -274,8 +313,24 @@ struct phy_device *fixed_phy_register(unsigned int irq, return phy; } + +struct phy_device *fixed_phy_register(unsigned int irq, + struct fixed_phy_status *status, + struct device_node *np) +{ + return __fixed_phy_register(irq, status, np, NULL); +} EXPORT_SYMBOL_GPL(fixed_phy_register); +struct phy_device * +fixed_phy_register_with_gpiod(unsigned int irq, + struct fixed_phy_status *status, + struct gpio_desc *gpiod) +{ + return __fixed_phy_register(irq, status, NULL, gpiod); +} +EXPORT_SYMBOL_GPL(fixed_phy_register_with_gpiod); + void fixed_phy_unregister(struct phy_device *phy) { phy_device_remove(phy); diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c index 7d5938b87660..ebef8354bc81 100644 --- a/drivers/net/phy/icplus.c +++ b/drivers/net/phy/icplus.c @@ -1,13 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Driver for ICPlus PHYs * * Copyright (c) 2007 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * */ #include #include diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c index fc0f5024a29e..02d9713318b6 100644 --- a/drivers/net/phy/intel-xway.c +++ b/drivers/net/phy/intel-xway.c @@ -1,16 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2012 Daniel Schwierzeck * Copyright (C) 2016 Hauke Mehrtens - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c index c8bb29ae1a2a..a93d673baf35 100644 --- a/drivers/net/phy/lxt.c +++ b/drivers/net/phy/lxt.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * drivers/net/phy/lxt.c * @@ -6,12 +7,6 @@ * Author: Andy Fleming * * Copyright (c) 2004 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * */ #include #include diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 2e12f982534f..3ccba37bd6dd 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * drivers/net/phy/marvell.c * @@ -8,12 +9,6 @@ * Copyright (c) 2004 Freescale Semiconductor, Inc. * * Copyright (c) 2013 Michael Stapelberg - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * */ #include #include @@ -847,7 +842,6 @@ static int m88e1510_config_init(struct phy_device *phydev) /* SGMII-to-Copper mode initialization */ if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { - /* Select page 18 */ err = marvell_set_page(phydev, 18); if (err < 0) @@ -870,21 +864,6 @@ static int m88e1510_config_init(struct phy_device *phydev) err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE); if (err < 0) return err; - - /* There appears to be a bug in the 88e1512 when used in - * SGMII to copper mode, where the AN advertisement register - * clears the pause bits each time a negotiation occurs. - * This means we can never be truely sure what was advertised, - * so disable Pause support. - */ - linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, - phydev->supported); - linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, - phydev->supported); - linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, - phydev->advertising); - linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, - phydev->advertising); } return m88e1318_config_init(phydev); diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index 82ab6ed3b74e..100b401b1f4a 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Marvell 10G 88x3310 PHY driver * @@ -26,6 +27,9 @@ #include #include +#define MV_PHY_ALASKA_NBT_QUIRK_MASK 0xfffffffe +#define MV_PHY_ALASKA_NBT_QUIRK_REV (MARVELL_PHY_ID_88X3310 | 0xa) + enum { MV_PCS_BASE_T = 0x0000, MV_PCS_BASE_R = 0x1000, @@ -57,24 +61,6 @@ struct mv3310_priv { char *hwmon_name; }; -static int mv3310_modify(struct phy_device *phydev, int devad, u16 reg, - u16 mask, u16 bits) -{ - int old, val, ret; - - old = phy_read_mmd(phydev, devad, reg); - if (old < 0) - return old; - - val = (old & ~mask) | (bits & mask); - if (val == old) - return 0; - - ret = phy_write_mmd(phydev, devad, reg, val); - - return ret < 0 ? ret : 1; -} - #ifdef CONFIG_HWMON static umode_t mv3310_hwmon_is_visible(const void *data, enum hwmon_sensor_types type, @@ -158,10 +144,9 @@ static int mv3310_hwmon_config(struct phy_device *phydev, bool enable) return ret; val = enable ? MV_V2_TEMP_CTRL_SAMPLE : MV_V2_TEMP_CTRL_DISABLE; - ret = mv3310_modify(phydev, MDIO_MMD_VEND2, MV_V2_TEMP_CTRL, - MV_V2_TEMP_CTRL_MASK, val); - return ret < 0 ? ret : 0; + return phy_modify_mmd(phydev, MDIO_MMD_VEND2, MV_V2_TEMP_CTRL, + MV_V2_TEMP_CTRL_MASK, val); } static void mv3310_hwmon_disable(void *data) @@ -249,95 +234,58 @@ static int mv3310_resume(struct phy_device *phydev) return mv3310_hwmon_config(phydev, true); } -static int mv3310_config_init(struct phy_device *phydev) +/* Some PHYs in the Alaska family such as the 88X3310 and the 88E2010 + * don't set bit 14 in PMA Extended Abilities (1.11), although they do + * support 2.5GBASET and 5GBASET. For these models, we can still read their + * 2.5G/5G extended abilities register (1.21). We detect these models based on + * the PMA device identifier, with a mask matching models known to have this + * issue + */ +static bool mv3310_has_pma_ngbaset_quirk(struct phy_device *phydev) { - __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, }; - int val; + if (!(phydev->c45_ids.devices_in_package & MDIO_DEVS_PMAPMD)) + return false; + /* Only some revisions of the 88X3310 family PMA seem to be impacted */ + return (phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] & + MV_PHY_ALASKA_NBT_QUIRK_MASK) == MV_PHY_ALASKA_NBT_QUIRK_REV; +} + +static int mv3310_config_init(struct phy_device *phydev) +{ /* Check that the PHY interface type is compatible */ if (phydev->interface != PHY_INTERFACE_MODE_SGMII && + phydev->interface != PHY_INTERFACE_MODE_2500BASEX && phydev->interface != PHY_INTERFACE_MODE_XAUI && phydev->interface != PHY_INTERFACE_MODE_RXAUI && phydev->interface != PHY_INTERFACE_MODE_10GKR) return -ENODEV; - __set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); - __set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported); - - if (phydev->c45_ids.devices_in_package & MDIO_DEVS_AN) { - val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); - if (val < 0) - return val; + return 0; +} - if (val & MDIO_AN_STAT1_ABLE) - __set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported); - } +static int mv3310_get_features(struct phy_device *phydev) +{ + int ret, val; - val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_STAT2); - if (val < 0) - return val; + ret = genphy_c45_pma_read_abilities(phydev); + if (ret) + return ret; - /* Ethtool does not support the WAN mode bits */ - if (val & (MDIO_PMA_STAT2_10GBSR | MDIO_PMA_STAT2_10GBLR | - MDIO_PMA_STAT2_10GBER | MDIO_PMA_STAT2_10GBLX4 | - MDIO_PMA_STAT2_10GBSW | MDIO_PMA_STAT2_10GBLW | - MDIO_PMA_STAT2_10GBEW)) - __set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported); - if (val & MDIO_PMA_STAT2_10GBSR) - __set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, supported); - if (val & MDIO_PMA_STAT2_10GBLR) - __set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, supported); - if (val & MDIO_PMA_STAT2_10GBER) - __set_bit(ETHTOOL_LINK_MODE_10000baseER_Full_BIT, supported); - - if (val & MDIO_PMA_STAT2_EXTABLE) { - val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_EXTABLE); + if (mv3310_has_pma_ngbaset_quirk(phydev)) { + val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, + MDIO_PMA_NG_EXTABLE); if (val < 0) return val; - if (val & (MDIO_PMA_EXTABLE_10GBT | MDIO_PMA_EXTABLE_1000BT | - MDIO_PMA_EXTABLE_100BTX | MDIO_PMA_EXTABLE_10BT)) - __set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported); - if (val & MDIO_PMA_EXTABLE_10GBLRM) - __set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported); - if (val & (MDIO_PMA_EXTABLE_10GBKX4 | MDIO_PMA_EXTABLE_10GBKR | - MDIO_PMA_EXTABLE_1000BKX)) - __set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, supported); - if (val & MDIO_PMA_EXTABLE_10GBLRM) - __set_bit(ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, - supported); - if (val & MDIO_PMA_EXTABLE_10GBT) - __set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, - supported); - if (val & MDIO_PMA_EXTABLE_10GBKX4) - __set_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, - supported); - if (val & MDIO_PMA_EXTABLE_10GBKR) - __set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, - supported); - if (val & MDIO_PMA_EXTABLE_1000BT) - __set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, - supported); - if (val & MDIO_PMA_EXTABLE_1000BKX) - __set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, - supported); - if (val & MDIO_PMA_EXTABLE_100BTX) { - __set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, - supported); - __set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, - supported); - } - if (val & MDIO_PMA_EXTABLE_10BT) { - __set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, - supported); - __set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, - supported); - } - } + linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, + phydev->supported, + val & MDIO_PMA_NG_EXTABLE_2_5GBT); - linkmode_copy(phydev->supported, supported); - linkmode_and(phydev->advertising, phydev->advertising, - phydev->supported); + linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, + phydev->supported, + val & MDIO_PMA_NG_EXTABLE_5GBT); + } return 0; } @@ -351,52 +299,27 @@ static int mv3310_config_aneg(struct phy_device *phydev) /* We don't support manual MDI control */ phydev->mdix_ctrl = ETH_TP_MDI_AUTO; - if (phydev->autoneg == AUTONEG_DISABLE) { - ret = genphy_c45_pma_setup_forced(phydev); - if (ret < 0) - return ret; - - return genphy_c45_an_disable_aneg(phydev); - } - - linkmode_and(phydev->advertising, phydev->advertising, - phydev->supported); + if (phydev->autoneg == AUTONEG_DISABLE) + return genphy_c45_pma_setup_forced(phydev); - ret = mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, - ADVERTISE_ALL | ADVERTISE_100BASE4 | - ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM, - linkmode_adv_to_mii_adv_t(phydev->advertising)); + ret = genphy_c45_an_config_aneg(phydev); if (ret < 0) return ret; if (ret > 0) changed = true; + /* Clause 45 has no standardized support for 1000BaseT, therefore + * use vendor registers for this mode. + */ reg = linkmode_adv_to_mii_ctrl1000_t(phydev->advertising); - ret = mv3310_modify(phydev, MDIO_MMD_AN, MV_AN_CTRL1000, - ADVERTISE_1000FULL | ADVERTISE_1000HALF, reg); + ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MV_AN_CTRL1000, + ADVERTISE_1000FULL | ADVERTISE_1000HALF, reg); if (ret < 0) return ret; if (ret > 0) changed = true; - /* 10G control register */ - if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, - phydev->advertising)) - reg = MDIO_AN_10GBT_CTRL_ADV10G; - else - reg = 0; - - ret = mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL, - MDIO_AN_10GBT_CTRL_ADV10G, reg); - if (ret < 0) - return ret; - if (ret > 0) - changed = true; - - if (changed) - ret = genphy_c45_restart_aneg(phydev); - - return ret; + return genphy_c45_check_and_restart_aneg(phydev, changed); } static int mv3310_aneg_done(struct phy_device *phydev) @@ -416,18 +339,29 @@ static int mv3310_aneg_done(struct phy_device *phydev) static void mv3310_update_interface(struct phy_device *phydev) { if ((phydev->interface == PHY_INTERFACE_MODE_SGMII || + phydev->interface == PHY_INTERFACE_MODE_2500BASEX || phydev->interface == PHY_INTERFACE_MODE_10GKR) && phydev->link) { /* The PHY automatically switches its serdes interface (and - * active PHYXS instance) between Cisco SGMII and 10GBase-KR - * modes according to the speed. Florian suggests setting - * phydev->interface to communicate this to the MAC. Only do - * this if we are already in either SGMII or 10GBase-KR mode. + * active PHYXS instance) between Cisco SGMII, 10GBase-KR and + * 2500BaseX modes according to the speed. Florian suggests + * setting phydev->interface to communicate this to the MAC. + * Only do this if we are already in one of the above modes. */ - if (phydev->speed == SPEED_10000) + switch (phydev->speed) { + case SPEED_10000: phydev->interface = PHY_INTERFACE_MODE_10GKR; - else if (phydev->speed >= SPEED_10 && - phydev->speed < SPEED_10000) + break; + case SPEED_2500: + phydev->interface = PHY_INTERFACE_MODE_2500BASEX; + break; + case SPEED_1000: + case SPEED_100: + case SPEED_10: phydev->interface = PHY_INTERFACE_MODE_SGMII; + break; + default: + break; + } } } @@ -445,16 +379,8 @@ static int mv3310_read_10gbr_status(struct phy_device *phydev) static int mv3310_read_status(struct phy_device *phydev) { - u32 mmd_mask = phydev->c45_ids.devices_in_package; int val; - /* The vendor devads do not report link status. Avoid the PHYXS - * instance as there are three, and its status depends on the MAC - * being appropriately configured for the negotiated speed. - */ - mmd_mask &= ~(BIT(MDIO_MMD_VEND1) | BIT(MDIO_MMD_VEND2) | - BIT(MDIO_MMD_PHYXS)); - phydev->speed = SPEED_UNKNOWN; phydev->duplex = DUPLEX_UNKNOWN; linkmode_zero(phydev->lp_advertising); @@ -470,12 +396,10 @@ static int mv3310_read_status(struct phy_device *phydev) if (val & MDIO_STAT1_LSTATUS) return mv3310_read_10gbr_status(phydev); - val = genphy_c45_read_link(phydev, mmd_mask); + val = genphy_c45_read_link(phydev); if (val < 0) return val; - phydev->link = val > 0 ? 1 : 0; - val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); if (val < 0) return val; @@ -531,11 +455,11 @@ static int mv3310_read_status(struct phy_device *phydev) static struct phy_driver mv3310_drivers[] = { { - .phy_id = 0x002b09aa, + .phy_id = MARVELL_PHY_ID_88X3310, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "mv88x3310", - .features = PHY_10GBIT_FEATURES, - .soft_reset = gen10g_no_soft_reset, + .get_features = mv3310_get_features, + .soft_reset = genphy_no_soft_reset, .config_init = mv3310_config_init, .probe = mv3310_probe, .suspend = mv3310_suspend, @@ -544,12 +468,25 @@ static struct phy_driver mv3310_drivers[] = { .aneg_done = mv3310_aneg_done, .read_status = mv3310_read_status, }, + { + .phy_id = MARVELL_PHY_ID_88E2110, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "mv88x2110", + .get_features = genphy_c45_pma_read_abilities, + .probe = mv3310_probe, + .soft_reset = genphy_no_soft_reset, + .config_init = mv3310_config_init, + .config_aneg = mv3310_config_aneg, + .aneg_done = mv3310_aneg_done, + .read_status = mv3310_read_status, + }, }; module_phy_driver(mv3310_drivers); static struct mdio_device_id __maybe_unused mv3310_tbl[] = { - { 0x002b09aa, MARVELL_PHY_ID_MASK }, + { MARVELL_PHY_ID_88X3310, MARVELL_PHY_ID_MASK }, + { MARVELL_PHY_ID_88E2110, MARVELL_PHY_ID_MASK }, { }, }; MODULE_DEVICE_TABLE(mdio, mv3310_tbl); diff --git a/drivers/net/phy/mdio-bcm-iproc.c b/drivers/net/phy/mdio-bcm-iproc.c index 46fe1ae919a3..7d0f388d8db8 100644 --- a/drivers/net/phy/mdio-bcm-iproc.c +++ b/drivers/net/phy/mdio-bcm-iproc.c @@ -1,14 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2015 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c index df75efa96a7d..8295bc7c8c20 100644 --- a/drivers/net/phy/mdio-bcm-unimac.c +++ b/drivers/net/phy/mdio-bcm-unimac.c @@ -1,12 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Broadcom UniMAC MDIO bus controller driver * * Copyright (C) 2014-2017 Broadcom - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #include diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c index 15352f987bdf..5136275c8e73 100644 --- a/drivers/net/phy/mdio-bitbang.c +++ b/drivers/net/phy/mdio-bitbang.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Bitbanged MDIO support. * @@ -11,10 +12,6 @@ * * 2005 (c) MontaVista Software, Inc. * Vitaly Bordug - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. */ #include @@ -232,4 +229,4 @@ void free_mdio_bitbang(struct mii_bus *bus) } EXPORT_SYMBOL(free_mdio_bitbang); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/phy/mdio-boardinfo.c b/drivers/net/phy/mdio-boardinfo.c index 863496fa5d13..d9b54c67ef9f 100644 --- a/drivers/net/phy/mdio-boardinfo.c +++ b/drivers/net/phy/mdio-boardinfo.c @@ -1,10 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * mdio-boardinfo - Collect pre-declarations for MDIO devices - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. */ #include diff --git a/drivers/net/phy/mdio-cavium.c b/drivers/net/phy/mdio-cavium.c index 6df2fa755bb4..1afd6fc1a351 100644 --- a/drivers/net/phy/mdio-cavium.c +++ b/drivers/net/phy/mdio-cavium.c @@ -1,8 +1,5 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * * Copyright (C) 2009-2016 Cavium, Inc. */ @@ -150,4 +147,4 @@ EXPORT_SYMBOL(cavium_mdiobus_write); MODULE_DESCRIPTION("Common code for OCTEON and Thunder MDIO bus drivers"); MODULE_AUTHOR("David Daney"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/phy/mdio-cavium.h b/drivers/net/phy/mdio-cavium.h index 4bccd45d24e2..ed5f9bb5448d 100644 --- a/drivers/net/phy/mdio-cavium.h +++ b/drivers/net/phy/mdio-cavium.h @@ -1,8 +1,5 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * * Copyright (C) 2009-2016 Cavium, Inc. */ diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index ea9a0e339778..1b00235d7dc5 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * GPIO based MDIO bitbang driver. * Supports OpenFirmware. @@ -14,10 +15,6 @@ * * 2005 (c) MontaVista Software, Inc. * Vitaly Bordug - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. */ #include @@ -216,5 +213,5 @@ module_platform_driver(mdio_gpio_driver); MODULE_ALIAS("platform:mdio-gpio"); MODULE_AUTHOR("Laurent Pinchart, Paulius Zaleckas"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Generic driver for MDIO bus emulation using GPIO"); diff --git a/drivers/net/phy/mdio-i2c.c b/drivers/net/phy/mdio-i2c.c index 6d24fd13ca86..0dce67672548 100644 --- a/drivers/net/phy/mdio-i2c.c +++ b/drivers/net/phy/mdio-i2c.c @@ -1,12 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 /* * MDIO I2C bridge * * Copyright (C) 2015-2016 Russell King * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * * Network PHYs can appear on I2C buses when they are part of SFP module. * This driver exposes these PHYs to the networking PHY code, allowing * our PHY drivers access to these PHYs, and so allowing configuration diff --git a/drivers/net/phy/mdio-i2c.h b/drivers/net/phy/mdio-i2c.h index 889ab57d7f3e..751dab281f57 100644 --- a/drivers/net/phy/mdio-i2c.h +++ b/drivers/net/phy/mdio-i2c.h @@ -1,11 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 /* * MDIO I2C bridge * * Copyright (C) 2015 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef MDIO_I2C_H #define MDIO_I2C_H diff --git a/drivers/net/phy/mdio-moxart.c b/drivers/net/phy/mdio-moxart.c index 5bb56d126693..af3910fe8ec7 100644 --- a/drivers/net/phy/mdio-moxart.c +++ b/drivers/net/phy/mdio-moxart.c @@ -1,10 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* MOXA ART Ethernet (RTL8201CP) MDIO interface driver * * Copyright (C) 2013 Jonas Jensen - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #include @@ -190,4 +187,4 @@ module_platform_driver(moxart_mdio_driver); MODULE_DESCRIPTION("MOXA ART MDIO interface driver"); MODULE_AUTHOR("Jonas Jensen "); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c index 696bdf1e4576..88d409e48c1f 100644 --- a/drivers/net/phy/mdio-mux-bcm-iproc.c +++ b/drivers/net/phy/mdio-mux-bcm-iproc.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright 2016 Broadcom - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation (the "GPL"). - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 (GPLv2) for more details. - * - * You should have received a copy of the GNU General Public License - * version 2 (GPLv2) along with this source code. */ #include #include diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c index fe34576262bd..6c8960df43b0 100644 --- a/drivers/net/phy/mdio-mux-gpio.c +++ b/drivers/net/phy/mdio-mux-gpio.c @@ -1,8 +1,5 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * * Copyright (C) 2011, 2012 Cavium, Inc. */ @@ -103,4 +100,4 @@ module_platform_driver(mdio_mux_gpio_driver); MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_VERSION(DRV_VERSION); MODULE_AUTHOR("David Daney"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/phy/mdio-mux-mmioreg.c b/drivers/net/phy/mdio-mux-mmioreg.c index 70f6115530af..d1a8780e24d8 100644 --- a/drivers/net/phy/mdio-mux-mmioreg.c +++ b/drivers/net/phy/mdio-mux-mmioreg.c @@ -1,13 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Simple memory-mapped device MDIO MUX driver * * Author: Timur Tabi * * Copyright 2012 Freescale Semiconductor, Inc. - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. */ #include diff --git a/drivers/net/phy/mdio-mux-multiplexer.c b/drivers/net/phy/mdio-mux-multiplexer.c new file mode 100644 index 000000000000..d6564381aa3e --- /dev/null +++ b/drivers/net/phy/mdio-mux-multiplexer.c @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* MDIO bus multiplexer using kernel multiplexer subsystem + * + * Copyright 2019 NXP + */ + +#include +#include +#include +#include + +struct mdio_mux_multiplexer_state { + struct mux_control *muxc; + bool do_deselect; + void *mux_handle; +}; + +/** + * mdio_mux_multiplexer_switch_fn - This function is called by the mdio-mux + * layer when it thinks the mdio bus + * multiplexer needs to switch. + * @current_child: current value of the mux register. + * @desired_child: value of the 'reg' property of the target child MDIO node. + * @data: Private data used by this switch_fn passed to mdio_mux_init function + * via mdio_mux_init(.., .., .., .., data, ..). + * + * The first time this function is called, current_child == -1. + * If current_child == desired_child, then the mux is already set to the + * correct bus. + */ +static int mdio_mux_multiplexer_switch_fn(int current_child, int desired_child, + void *data) +{ + struct platform_device *pdev; + struct mdio_mux_multiplexer_state *s; + int ret = 0; + + pdev = (struct platform_device *)data; + s = platform_get_drvdata(pdev); + + if (!(current_child ^ desired_child)) + return 0; + + if (s->do_deselect) + ret = mux_control_deselect(s->muxc); + if (ret) { + dev_err(&pdev->dev, "mux_control_deselect failed in %s: %d\n", + __func__, ret); + return ret; + } + + ret = mux_control_select(s->muxc, desired_child); + if (!ret) { + dev_dbg(&pdev->dev, "%s %d -> %d\n", __func__, current_child, + desired_child); + s->do_deselect = true; + } else { + s->do_deselect = false; + } + + return ret; +} + +static int mdio_mux_multiplexer_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mdio_mux_multiplexer_state *s; + int ret = 0; + + s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL); + if (!s) + return -ENOMEM; + + s->muxc = devm_mux_control_get(dev, NULL); + if (IS_ERR(s->muxc)) { + ret = PTR_ERR(s->muxc); + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, "Failed to get mux: %d\n", ret); + return ret; + } + + platform_set_drvdata(pdev, s); + + ret = mdio_mux_init(&pdev->dev, pdev->dev.of_node, + mdio_mux_multiplexer_switch_fn, &s->mux_handle, + pdev, NULL); + + return ret; +} + +static int mdio_mux_multiplexer_remove(struct platform_device *pdev) +{ + struct mdio_mux_multiplexer_state *s = platform_get_drvdata(pdev); + + mdio_mux_uninit(s->mux_handle); + + if (s->do_deselect) + mux_control_deselect(s->muxc); + + return 0; +} + +static const struct of_device_id mdio_mux_multiplexer_match[] = { + { .compatible = "mdio-mux-multiplexer", }, + {}, +}; +MODULE_DEVICE_TABLE(of, mdio_mux_multiplexer_match); + +static struct platform_driver mdio_mux_multiplexer_driver = { + .driver = { + .name = "mdio-mux-multiplexer", + .of_match_table = mdio_mux_multiplexer_match, + }, + .probe = mdio_mux_multiplexer_probe, + .remove = mdio_mux_multiplexer_remove, +}; + +module_platform_driver(mdio_mux_multiplexer_driver); + +MODULE_DESCRIPTION("MDIO bus multiplexer using kernel multiplexer subsystem"); +MODULE_AUTHOR("Pankaj Bansal "); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c index 0a86f1e4c02f..6a1d3540210b 100644 --- a/drivers/net/phy/mdio-mux.c +++ b/drivers/net/phy/mdio-mux.c @@ -1,8 +1,5 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * * Copyright (C) 2011, 2012 Cavium, Inc. */ @@ -210,4 +207,4 @@ EXPORT_SYMBOL_GPL(mdio_mux_uninit); MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_AUTHOR("David Daney"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c index ab6914f8bd50..8327382aa568 100644 --- a/drivers/net/phy/mdio-octeon.c +++ b/drivers/net/phy/mdio-octeon.c @@ -1,8 +1,5 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * * Copyright (C) 2009-2015 Cavium, Inc. */ @@ -122,4 +119,4 @@ module_platform_driver(octeon_mdiobus_driver); MODULE_DESCRIPTION("Cavium OCTEON MDIO bus driver"); MODULE_AUTHOR("David Daney"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c index 6425ce04d3f9..20ffd8fb79ce 100644 --- a/drivers/net/phy/mdio-sun4i.c +++ b/drivers/net/phy/mdio-sun4i.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Allwinner EMAC MDIO interface driver * @@ -6,10 +7,6 @@ * * Based on the Linux driver provided by Allwinner: * Copyright (C) 1997 Sten Wang - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. */ #include @@ -179,4 +176,4 @@ module_platform_driver(sun4i_mdio_driver); MODULE_DESCRIPTION("Allwinner EMAC MDIO interface driver"); MODULE_AUTHOR("Maxime Ripard "); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/phy/mdio-thunder.c b/drivers/net/phy/mdio-thunder.c index 1546f6398831..b6128ae7f14f 100644 --- a/drivers/net/phy/mdio-thunder.c +++ b/drivers/net/phy/mdio-thunder.c @@ -1,8 +1,5 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * * Copyright (C) 2009-2016 Cavium, Inc. */ @@ -151,4 +148,4 @@ static struct pci_driver thunder_mdiobus_driver = { module_pci_driver(thunder_mdiobus_driver); MODULE_DESCRIPTION("Cavium ThunderX MDIO bus driver"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/phy/mdio-xgene.c index 07c6048200c6..717cc2a056e8 100644 --- a/drivers/net/phy/mdio-xgene.c +++ b/drivers/net/phy/mdio-xgene.c @@ -1,20 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0+ /* Applied Micro X-Gene SoC MDIO Driver * * Copyright (c) 2016, Applied Micro Circuits Corporation * Author: Iyappan Subramanian - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . */ #include diff --git a/drivers/net/phy/mdio-xgene.h b/drivers/net/phy/mdio-xgene.h index 3c85f3e30baa..b1f5ccb4ad9c 100644 --- a/drivers/net/phy/mdio-xgene.h +++ b/drivers/net/phy/mdio-xgene.h @@ -1,20 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0+ /* Applied Micro X-Gene SoC MDIO Driver * * Copyright (c) 2016, Applied Micro Circuits Corporation * Author: Iyappan Subramanian - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . */ #ifndef __MDIO_XGENE_H__ diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 66b9cfe692fc..4be4cc09eb90 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -1,14 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0+ /* MDIO Bus interface * * Author: Andy Fleming * * Copyright (c) 2004 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -39,8 +34,6 @@ #include #include -#include - #define CREATE_TRACE_POINTS #include @@ -55,11 +48,12 @@ static int mdiobus_register_gpiod(struct mdio_device *mdiodev) gpiod = fwnode_get_named_gpiod(&mdiodev->dev.of_node->fwnode, "reset-gpios", 0, GPIOD_OUT_LOW, "PHY reset"); - if (PTR_ERR(gpiod) == -ENOENT || - PTR_ERR(gpiod) == -ENOSYS) - gpiod = NULL; - else if (IS_ERR(gpiod)) - return PTR_ERR(gpiod); + if (IS_ERR(gpiod)) { + if (PTR_ERR(gpiod) == -ENOENT || PTR_ERR(gpiod) == -ENOSYS) + gpiod = NULL; + else + return PTR_ERR(gpiod); + } mdiodev->reset = gpiod; @@ -379,7 +373,6 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) err = device_register(&bus->dev); if (err) { pr_err("mii_bus %s failed to register\n", bus->id); - put_device(&bus->dev); return -EINVAL; } diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c index c924700cf37b..887076292e50 100644 --- a/drivers/net/phy/mdio_device.c +++ b/drivers/net/phy/mdio_device.c @@ -1,12 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0+ /* Framework for MDIO devices, other than PHYs. * * Copyright (c) 2016 Andrew Lunn - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c index 3ddaf9595697..a238388eb1a5 100644 --- a/drivers/net/phy/meson-gxl.c +++ b/drivers/net/phy/meson-gxl.c @@ -1,20 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Amlogic Meson GXL Internal PHY Driver * * Copyright (C) 2015 Amlogic, Inc. All rights reserved. * Copyright (C) 2016 BayLibre, SAS. All rights reserved. * Author: Neil Armstrong - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * */ #include #include diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index b1f959935f50..352da24f1f33 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * drivers/net/phy/micrel.c * @@ -8,11 +9,6 @@ * Copyright (c) 2010-2013 Micrel, Inc. * Copyright (c) 2014 Johan Hovold * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * * Support : Micrel Phys: * Giga phys: ksz9021, ksz9031, ksz9131 * 100/10 Phys : ksz8001, ksz8721, ksz8737, ksz8041 @@ -344,6 +340,17 @@ static int ksz8041_config_aneg(struct phy_device *phydev) return genphy_config_aneg(phydev); } +static int ksz8061_config_init(struct phy_device *phydev) +{ + int ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A); + if (ret) + return ret; + + return kszphy_config_init(phydev); +} + static int ksz9021_load_values_from_of(struct phy_device *phydev, const struct device_node *of_node, u16 reg, @@ -426,9 +433,6 @@ static int ksz9021_config_init(struct phy_device *phydev) return 0; } -#define MII_KSZ9031RN_MMD_CTRL_REG 0x0d -#define MII_KSZ9031RN_MMD_REGDATA_REG 0x0e -#define OP_DATA 1 #define KSZ9031_PS_TO_REG 60 /* Extended registers */ @@ -446,24 +450,6 @@ static int ksz9021_config_init(struct phy_device *phydev) #define MII_KSZ9031RN_EDPD 0x23 #define MII_KSZ9031RN_EDPD_ENABLE BIT(0) -static int ksz9031_extended_write(struct phy_device *phydev, - u8 mode, u32 dev_addr, u32 regnum, u16 val) -{ - phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, dev_addr); - phy_write(phydev, MII_KSZ9031RN_MMD_REGDATA_REG, regnum); - phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, (mode << 14) | dev_addr); - return phy_write(phydev, MII_KSZ9031RN_MMD_REGDATA_REG, val); -} - -static int ksz9031_extended_read(struct phy_device *phydev, - u8 mode, u32 dev_addr, u32 regnum) -{ - phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, dev_addr); - phy_write(phydev, MII_KSZ9031RN_MMD_REGDATA_REG, regnum); - phy_write(phydev, MII_KSZ9031RN_MMD_CTRL_REG, (mode << 14) | dev_addr); - return phy_read(phydev, MII_KSZ9031RN_MMD_REGDATA_REG); -} - static int ksz9031_of_load_skew_values(struct phy_device *phydev, const struct device_node *of_node, u16 reg, size_t field_sz, @@ -484,7 +470,7 @@ static int ksz9031_of_load_skew_values(struct phy_device *phydev, return 0; if (matches < numfields) - newval = ksz9031_extended_read(phydev, OP_DATA, 2, reg); + newval = phy_read_mmd(phydev, 2, reg); else newval = 0; @@ -498,7 +484,7 @@ static int ksz9031_of_load_skew_values(struct phy_device *phydev, << (field_sz * i)); } - return ksz9031_extended_write(phydev, OP_DATA, 2, reg, newval); + return phy_write_mmd(phydev, 2, reg, newval); } /* Center KSZ9031RNX FLP timing at 16ms. */ @@ -506,13 +492,13 @@ static int ksz9031_center_flp_timing(struct phy_device *phydev) { int result; - result = ksz9031_extended_write(phydev, OP_DATA, 0, - MII_KSZ9031RN_FLP_BURST_TX_HI, 0x0006); + result = phy_write_mmd(phydev, 0, MII_KSZ9031RN_FLP_BURST_TX_HI, + 0x0006); if (result) return result; - result = ksz9031_extended_write(phydev, OP_DATA, 0, - MII_KSZ9031RN_FLP_BURST_TX_LO, 0x1A80); + result = phy_write_mmd(phydev, 0, MII_KSZ9031RN_FLP_BURST_TX_LO, + 0x1A80); if (result) return result; @@ -524,11 +510,11 @@ static int ksz9031_enable_edpd(struct phy_device *phydev) { int reg; - reg = ksz9031_extended_read(phydev, OP_DATA, 0x1C, MII_KSZ9031RN_EDPD); + reg = phy_read_mmd(phydev, 0x1C, MII_KSZ9031RN_EDPD); if (reg < 0) return reg; - return ksz9031_extended_write(phydev, OP_DATA, 0x1C, MII_KSZ9031RN_EDPD, - reg | MII_KSZ9031RN_EDPD_ENABLE); + return phy_write_mmd(phydev, 0x1C, MII_KSZ9031RN_EDPD, + reg | MII_KSZ9031RN_EDPD_ENABLE); } static int ksz9031_config_init(struct phy_device *phydev) @@ -654,7 +640,7 @@ static int ksz9131_of_load_skew_values(struct phy_device *phydev, return 0; if (matches < numfields) - newval = ksz9031_extended_read(phydev, OP_DATA, 2, reg); + newval = phy_read_mmd(phydev, 2, reg); else newval = 0; @@ -668,7 +654,7 @@ static int ksz9131_of_load_skew_values(struct phy_device *phydev, << (field_sz * i)); } - return ksz9031_extended_write(phydev, OP_DATA, 2, reg, newval); + return phy_write_mmd(phydev, 2, reg, newval); } static int ksz9131_config_init(struct phy_device *phydev) @@ -1040,7 +1026,7 @@ static struct phy_driver ksphy_driver[] = { .name = "Micrel KSZ8061", .phy_id_mask = MICREL_PHY_ID_MASK, .features = PHY_BASIC_FEATURES, - .config_init = kszphy_config_init, + .config_init = ksz8061_config_init, .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, .suspend = genphy_suspend, diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c index 7557bebd5d7f..c6cbb3aa8ae0 100644 --- a/drivers/net/phy/microchip.c +++ b/drivers/net/phy/microchip.c @@ -1,18 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2015 Microchip Technology - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see . */ #include #include diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c index 3949fe299b18..db50efb30df5 100644 --- a/drivers/net/phy/mscc.c +++ b/drivers/net/phy/mscc.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) /* * Driver for Microsemi VSC85xx PHYs * diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c index 139bed2c8ab4..42282a86b680 100644 --- a/drivers/net/phy/national.c +++ b/drivers/net/phy/national.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * drivers/net/phy/national.c * @@ -7,12 +8,6 @@ * Maintainer: Giuseppe Cavallaro * * Copyright (c) 2008 STMicroelectronics Limited - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index 03af927fa5ad..9e24d9569424 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -47,6 +47,16 @@ int genphy_c45_pma_setup_forced(struct phy_device *phydev) /* Assume 1000base-T */ ctrl2 |= MDIO_PMA_CTRL2_1000BT; break; + case SPEED_2500: + ctrl1 |= MDIO_CTRL1_SPEED2_5G; + /* Assume 2.5Gbase-T */ + ctrl2 |= MDIO_PMA_CTRL2_2_5GBT; + break; + case SPEED_5000: + ctrl1 |= MDIO_CTRL1_SPEED5G; + /* Assume 5Gbase-T */ + ctrl2 |= MDIO_PMA_CTRL2_5GBT; + break; case SPEED_10000: ctrl1 |= MDIO_CTRL1_SPEED10G; /* Assume 10Gbase-T */ @@ -60,10 +70,59 @@ int genphy_c45_pma_setup_forced(struct phy_device *phydev) if (ret < 0) return ret; - return phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL2, ctrl2); + ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL2, ctrl2); + if (ret < 0) + return ret; + + return genphy_c45_an_disable_aneg(phydev); } EXPORT_SYMBOL_GPL(genphy_c45_pma_setup_forced); +/** + * genphy_c45_an_config_aneg - configure advertisement registers + * @phydev: target phy_device struct + * + * Configure advertisement registers based on modes set in phydev->advertising + * + * Returns negative errno code on failure, 0 if advertisement didn't change, + * or 1 if advertised modes changed. + */ +int genphy_c45_an_config_aneg(struct phy_device *phydev) +{ + int changed, ret; + u32 adv; + + linkmode_and(phydev->advertising, phydev->advertising, + phydev->supported); + + changed = genphy_config_eee_advert(phydev); + + adv = linkmode_adv_to_mii_adv_t(phydev->advertising); + + ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, + ADVERTISE_ALL | ADVERTISE_100BASE4 | + ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM, + adv); + if (ret < 0) + return ret; + if (ret > 0) + changed = 1; + + adv = linkmode_adv_to_mii_10gbt_adv_t(phydev->advertising); + + ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL, + MDIO_AN_10GBT_CTRL_ADV10G | + MDIO_AN_10GBT_CTRL_ADV5G | + MDIO_AN_10GBT_CTRL_ADV2_5G, adv); + if (ret < 0) + return ret; + if (ret > 0) + changed = 1; + + return changed; +} +EXPORT_SYMBOL_GPL(genphy_c45_an_config_aneg); + /** * genphy_c45_an_disable_aneg - disable auto-negotiation * @phydev: target phy_device struct @@ -75,15 +134,9 @@ EXPORT_SYMBOL_GPL(genphy_c45_pma_setup_forced); */ int genphy_c45_an_disable_aneg(struct phy_device *phydev) { - int val; - val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1); - if (val < 0) - return val; - - val &= ~(MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART); - - return phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, val); + return phy_clear_bits_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, + MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART); } EXPORT_SYMBOL_GPL(genphy_c45_an_disable_aneg); @@ -97,17 +150,40 @@ EXPORT_SYMBOL_GPL(genphy_c45_an_disable_aneg); */ int genphy_c45_restart_aneg(struct phy_device *phydev) { - int val; + return phy_set_bits_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, + MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART); +} +EXPORT_SYMBOL_GPL(genphy_c45_restart_aneg); - val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1); - if (val < 0) - return val; +/** + * genphy_c45_check_and_restart_aneg - Enable and restart auto-negotiation + * @phydev: target phy_device struct + * @restart: whether aneg restart is requested + * + * This assumes that the auto-negotiation MMD is present. + * + * Check, and restart auto-negotiation if needed. + */ +int genphy_c45_check_and_restart_aneg(struct phy_device *phydev, bool restart) +{ + int ret = 0; + + if (!restart) { + /* Configure and restart aneg if it wasn't set before */ + ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1); + if (ret < 0) + return ret; - val |= MDIO_AN_CTRL1_ENABLE | MDIO_AN_CTRL1_RESTART; + if (!(ret & MDIO_AN_CTRL1_ENABLE)) + restart = true; + } + + if (restart) + ret = genphy_c45_restart_aneg(phydev); - return phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, val); + return ret; } -EXPORT_SYMBOL_GPL(genphy_c45_restart_aneg); +EXPORT_SYMBOL_GPL(genphy_c45_check_and_restart_aneg); /** * genphy_c45_aneg_done - return auto-negotiation complete status @@ -131,25 +207,33 @@ EXPORT_SYMBOL_GPL(genphy_c45_aneg_done); /** * genphy_c45_read_link - read the overall link status from the MMDs * @phydev: target phy_device struct - * @mmd_mask: MMDs to read status from * * Read the link status from the specified MMDs, and if they all indicate - * that the link is up, return positive. If an error is encountered, + * that the link is up, set phydev->link to 1. If an error is encountered, * a negative errno will be returned, otherwise zero. */ -int genphy_c45_read_link(struct phy_device *phydev, u32 mmd_mask) +int genphy_c45_read_link(struct phy_device *phydev) { + u32 mmd_mask = MDIO_DEVS_PMAPMD; int val, devad; bool link = true; - while (mmd_mask) { + while (mmd_mask && link) { devad = __ffs(mmd_mask); mmd_mask &= ~BIT(devad); /* The link state is latched low so that momentary link - * drops can be detected. Do not double-read the status - * register if the link is down. + * drops can be detected. Do not double-read the status + * in polling mode to detect such short link drops. */ + if (!phy_polling_mode(phydev)) { + val = phy_read_mmd(phydev, devad, MDIO_STAT1); + if (val < 0) + return val; + else if (val & MDIO_STAT1_LSTATUS) + continue; + } + val = phy_read_mmd(phydev, devad, MDIO_STAT1); if (val < 0) return val; @@ -158,7 +242,9 @@ int genphy_c45_read_link(struct phy_device *phydev, u32 mmd_mask) link = false; } - return link; + phydev->link = link; + + return 0; } EXPORT_SYMBOL_GPL(genphy_c45_read_link); @@ -181,7 +267,7 @@ int genphy_c45_read_lpa(struct phy_device *phydev) if (val < 0) return val; - mii_lpa_to_linkmode_lpa_t(phydev->lp_advertising, val); + mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, val); phydev->pause = val & LPA_PAUSE_CAP ? 1 : 0; phydev->asym_pause = val & LPA_PAUSE_ASYM ? 1 : 0; @@ -190,9 +276,7 @@ int genphy_c45_read_lpa(struct phy_device *phydev) if (val < 0) return val; - if (val & MDIO_AN_10GBT_STAT_LP10G) - linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, - phydev->lp_advertising); + mii_10gbt_stat_mod_linkmode_lpa_t(phydev->lp_advertising, val); return 0; } @@ -220,6 +304,12 @@ int genphy_c45_read_pma(struct phy_device *phydev) case MDIO_PMA_CTRL1_SPEED1000: phydev->speed = SPEED_1000; break; + case MDIO_CTRL1_SPEED2_5G: + phydev->speed = SPEED_2500; + break; + case MDIO_CTRL1_SPEED5G: + phydev->speed = SPEED_5000; + break; case MDIO_CTRL1_SPEED10G: phydev->speed = SPEED_10000; break; @@ -267,75 +357,162 @@ int genphy_c45_read_mdix(struct phy_device *phydev) } EXPORT_SYMBOL_GPL(genphy_c45_read_mdix); -/* The gen10g_* functions are the old Clause 45 stub */ - -int gen10g_config_aneg(struct phy_device *phydev) +/** + * genphy_c45_pma_read_abilities - read supported link modes from PMA + * @phydev: target phy_device struct + * + * Read the supported link modes from the PMA Status 2 (1.8) register. If bit + * 1.8.9 is set, the list of supported modes is build using the values in the + * PMA Extended Abilities (1.11) register, indicating 1000BASET an 10G related + * modes. If bit 1.11.14 is set, then the list is also extended with the modes + * in the 2.5G/5G PMA Extended register (1.21), indicating if 2.5GBASET and + * 5GBASET are supported. + */ +int genphy_c45_pma_read_abilities(struct phy_device *phydev) { - return 0; -} -EXPORT_SYMBOL_GPL(gen10g_config_aneg); + int val; -int gen10g_read_status(struct phy_device *phydev) -{ - u32 mmd_mask = phydev->c45_ids.devices_in_package; - int ret; + linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported); + if (phydev->c45_ids.devices_in_package & MDIO_DEVS_AN) { + val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); + if (val < 0) + return val; - /* For now just lie and say it's 10G all the time */ - phydev->speed = SPEED_10000; - phydev->duplex = DUPLEX_FULL; + if (val & MDIO_AN_STAT1_ABLE) + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + phydev->supported); + } - /* Avoid reading the vendor MMDs */ - mmd_mask &= ~(BIT(MDIO_MMD_VEND1) | BIT(MDIO_MMD_VEND2)); + val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_STAT2); + if (val < 0) + return val; - ret = genphy_c45_read_link(phydev, mmd_mask); + linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + phydev->supported, + val & MDIO_PMA_STAT2_10GBSR); - phydev->link = ret > 0 ? 1 : 0; + linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, + phydev->supported, + val & MDIO_PMA_STAT2_10GBLR); - return 0; -} -EXPORT_SYMBOL_GPL(gen10g_read_status); + linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseER_Full_BIT, + phydev->supported, + val & MDIO_PMA_STAT2_10GBER); + + if (val & MDIO_PMA_STAT2_EXTABLE) { + val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_EXTABLE); + if (val < 0) + return val; + + linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, + phydev->supported, + val & MDIO_PMA_EXTABLE_10GBLRM); + linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + phydev->supported, + val & MDIO_PMA_EXTABLE_10GBT); + linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, + phydev->supported, + val & MDIO_PMA_EXTABLE_10GBKX4); + linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + phydev->supported, + val & MDIO_PMA_EXTABLE_10GBKR); + linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + phydev->supported, + val & MDIO_PMA_EXTABLE_1000BT); + linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + phydev->supported, + val & MDIO_PMA_EXTABLE_1000BKX); + + linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + phydev->supported, + val & MDIO_PMA_EXTABLE_100BTX); + linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, + phydev->supported, + val & MDIO_PMA_EXTABLE_100BTX); + + linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, + phydev->supported, + val & MDIO_PMA_EXTABLE_10BT); + linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, + phydev->supported, + val & MDIO_PMA_EXTABLE_10BT); + + if (val & MDIO_PMA_EXTABLE_NBT) { + val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, + MDIO_PMA_NG_EXTABLE); + if (val < 0) + return val; + + linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, + phydev->supported, + val & MDIO_PMA_NG_EXTABLE_2_5GBT); + + linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, + phydev->supported, + val & MDIO_PMA_NG_EXTABLE_5GBT); + } + } -int gen10g_no_soft_reset(struct phy_device *phydev) -{ - /* Do nothing for now */ return 0; } -EXPORT_SYMBOL_GPL(gen10g_no_soft_reset); +EXPORT_SYMBOL_GPL(genphy_c45_pma_read_abilities); -int gen10g_config_init(struct phy_device *phydev) +/** + * genphy_c45_read_status - read PHY status + * @phydev: target phy_device struct + * + * Reads status from PHY and sets phy_device members accordingly. + */ +int genphy_c45_read_status(struct phy_device *phydev) { - /* Temporarily just say we support everything */ - linkmode_zero(phydev->supported); + int ret; - linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, - phydev->supported); - linkmode_copy(phydev->advertising, phydev->supported); + ret = genphy_c45_read_link(phydev); + if (ret) + return ret; - return 0; + phydev->speed = SPEED_UNKNOWN; + phydev->duplex = DUPLEX_UNKNOWN; + phydev->pause = 0; + phydev->asym_pause = 0; + + if (phydev->autoneg == AUTONEG_ENABLE) { + ret = genphy_c45_read_lpa(phydev); + if (ret) + return ret; + + phy_resolve_aneg_linkmode(phydev); + } else { + ret = genphy_c45_read_pma(phydev); + } + + return ret; } -EXPORT_SYMBOL_GPL(gen10g_config_init); +EXPORT_SYMBOL_GPL(genphy_c45_read_status); -int gen10g_suspend(struct phy_device *phydev) +/* The gen10g_* functions are the old Clause 45 stub */ + +int gen10g_config_aneg(struct phy_device *phydev) { return 0; } -EXPORT_SYMBOL_GPL(gen10g_suspend); +EXPORT_SYMBOL_GPL(gen10g_config_aneg); -int gen10g_resume(struct phy_device *phydev) +static int gen10g_read_status(struct phy_device *phydev) { - return 0; + /* For now just lie and say it's 10G all the time */ + phydev->speed = SPEED_10000; + phydev->duplex = DUPLEX_FULL; + + return genphy_c45_read_link(phydev); } -EXPORT_SYMBOL_GPL(gen10g_resume); struct phy_driver genphy_10g_driver = { .phy_id = 0xffffffff, .phy_id_mask = 0xffffffff, .name = "Generic 10G PHY", - .soft_reset = gen10g_no_soft_reset, - .config_init = gen10g_config_init, + .soft_reset = genphy_no_soft_reset, .features = PHY_10GBIT_FEATURES, .config_aneg = gen10g_config_aneg, .read_status = gen10g_read_status, - .suspend = gen10g_suspend, - .resume = gen10g_resume, }; diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c index 20fbd5eb56fd..5016cd5fd7c7 100644 --- a/drivers/net/phy/phy-core.c +++ b/drivers/net/phy/phy-core.c @@ -1,13 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Core PHY library, taken from phy.c - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. */ #include #include +#include const char *phy_speed_to_str(int speed) { @@ -342,6 +339,77 @@ size_t phy_speeds(unsigned int *speeds, size_t size, return count; } +static int __set_phy_supported(struct phy_device *phydev, u32 max_speed) +{ + const struct phy_setting *p; + int i; + + for (i = 0, p = settings; i < ARRAY_SIZE(settings); i++, p++) { + if (p->speed > max_speed) + linkmode_clear_bit(p->bit, phydev->supported); + else + break; + } + + return 0; +} + +int phy_set_max_speed(struct phy_device *phydev, u32 max_speed) +{ + int err; + + err = __set_phy_supported(phydev, max_speed); + if (err) + return err; + + linkmode_copy(phydev->advertising, phydev->supported); + + return 0; +} +EXPORT_SYMBOL(phy_set_max_speed); + +void of_set_phy_supported(struct phy_device *phydev) +{ + struct device_node *node = phydev->mdio.dev.of_node; + u32 max_speed; + + if (!IS_ENABLED(CONFIG_OF_MDIO)) + return; + + if (!node) + return; + + if (!of_property_read_u32(node, "max-speed", &max_speed)) + __set_phy_supported(phydev, max_speed); +} + +void of_set_phy_eee_broken(struct phy_device *phydev) +{ + struct device_node *node = phydev->mdio.dev.of_node; + u32 broken = 0; + + if (!IS_ENABLED(CONFIG_OF_MDIO)) + return; + + if (!node) + return; + + if (of_property_read_bool(node, "eee-broken-100tx")) + broken |= MDIO_EEE_100TX; + if (of_property_read_bool(node, "eee-broken-1000t")) + broken |= MDIO_EEE_1000T; + if (of_property_read_bool(node, "eee-broken-10gt")) + broken |= MDIO_EEE_10GT; + if (of_property_read_bool(node, "eee-broken-1000kx")) + broken |= MDIO_EEE_1000KX; + if (of_property_read_bool(node, "eee-broken-10gkx4")) + broken |= MDIO_EEE_10GKX4; + if (of_property_read_bool(node, "eee-broken-10gkr")) + broken |= MDIO_EEE_10GKR; + + phydev->eee_broken_modes = broken; +} + /** * phy_resolve_aneg_linkmode - resolve the advertisements into phy settings * @phydev: The phy_device struct @@ -353,45 +421,16 @@ size_t phy_speeds(unsigned int *speeds, size_t size, void phy_resolve_aneg_linkmode(struct phy_device *phydev) { __ETHTOOL_DECLARE_LINK_MODE_MASK(common); + int i; linkmode_and(common, phydev->lp_advertising, phydev->advertising); - if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, common)) { - phydev->speed = SPEED_10000; - phydev->duplex = DUPLEX_FULL; - } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, - common)) { - phydev->speed = SPEED_5000; - phydev->duplex = DUPLEX_FULL; - } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, - common)) { - phydev->speed = SPEED_2500; - phydev->duplex = DUPLEX_FULL; - } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, - common)) { - phydev->speed = SPEED_1000; - phydev->duplex = DUPLEX_FULL; - } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, - common)) { - phydev->speed = SPEED_1000; - phydev->duplex = DUPLEX_HALF; - } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, - common)) { - phydev->speed = SPEED_100; - phydev->duplex = DUPLEX_FULL; - } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, - common)) { - phydev->speed = SPEED_100; - phydev->duplex = DUPLEX_HALF; - } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, - common)) { - phydev->speed = SPEED_10; - phydev->duplex = DUPLEX_FULL; - } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, - common)) { - phydev->speed = SPEED_10; - phydev->duplex = DUPLEX_HALF; - } + for (i = 0; i < ARRAY_SIZE(settings); i++) + if (test_bit(settings[i].bit, common)) { + phydev->speed = settings[i].speed; + phydev->duplex = settings[i].duplex; + break; + } if (phydev->duplex == DUPLEX_FULL) { phydev->pause = linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, @@ -418,15 +457,15 @@ static void mmd_phy_indirect(struct mii_bus *bus, int phy_addr, int devad, } /** - * phy_read_mmd - Convenience function for reading a register + * __phy_read_mmd - Convenience function for reading a register * from an MMD on a given PHY. * @phydev: The phy_device struct * @devad: The MMD to read from (0..31) * @regnum: The register on the MMD to read (0..65535) * - * Same rules as for phy_read(); + * Same rules as for __phy_read(); */ -int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum) +int __phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum) { int val; @@ -438,33 +477,52 @@ int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum) } else if (phydev->is_c45) { u32 addr = MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff); - val = mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, addr); + val = __mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, addr); } else { struct mii_bus *bus = phydev->mdio.bus; int phy_addr = phydev->mdio.addr; - mutex_lock(&bus->mdio_lock); mmd_phy_indirect(bus, phy_addr, devad, regnum); /* Read the content of the MMD's selected register */ val = __mdiobus_read(bus, phy_addr, MII_MMD_DATA); - mutex_unlock(&bus->mdio_lock); } return val; } +EXPORT_SYMBOL(__phy_read_mmd); + +/** + * phy_read_mmd - Convenience function for reading a register + * from an MMD on a given PHY. + * @phydev: The phy_device struct + * @devad: The MMD to read from + * @regnum: The register on the MMD to read + * + * Same rules as for phy_read(); + */ +int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum) +{ + int ret; + + mutex_lock(&phydev->mdio.bus->mdio_lock); + ret = __phy_read_mmd(phydev, devad, regnum); + mutex_unlock(&phydev->mdio.bus->mdio_lock); + + return ret; +} EXPORT_SYMBOL(phy_read_mmd); /** - * phy_write_mmd - Convenience function for writing a register + * __phy_write_mmd - Convenience function for writing a register * on an MMD on a given PHY. * @phydev: The phy_device struct * @devad: The MMD to read from * @regnum: The register on the MMD to read * @val: value to write to @regnum * - * Same rules as for phy_write(); + * Same rules as for __phy_write(); */ -int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val) +int __phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val) { int ret; @@ -476,27 +534,47 @@ int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val) } else if (phydev->is_c45) { u32 addr = MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff); - ret = mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, - addr, val); + ret = __mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, + addr, val); } else { struct mii_bus *bus = phydev->mdio.bus; int phy_addr = phydev->mdio.addr; - mutex_lock(&bus->mdio_lock); mmd_phy_indirect(bus, phy_addr, devad, regnum); /* Write the data into MMD's selected register */ __mdiobus_write(bus, phy_addr, MII_MMD_DATA, val); - mutex_unlock(&bus->mdio_lock); ret = 0; } return ret; } +EXPORT_SYMBOL(__phy_write_mmd); + +/** + * phy_write_mmd - Convenience function for writing a register + * on an MMD on a given PHY. + * @phydev: The phy_device struct + * @devad: The MMD to read from + * @regnum: The register on the MMD to read + * @val: value to write to @regnum + * + * Same rules as for phy_write(); + */ +int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val) +{ + int ret; + + mutex_lock(&phydev->mdio.bus->mdio_lock); + ret = __phy_write_mmd(phydev, devad, regnum, val); + mutex_unlock(&phydev->mdio.bus->mdio_lock); + + return ret; +} EXPORT_SYMBOL(phy_write_mmd); /** - * __phy_modify() - Convenience function for modifying a PHY register + * __phy_modify_changed() - Convenience function for modifying a PHY register * @phydev: a pointer to a &struct phy_device * @regnum: register number * @mask: bit mask of bits to clear @@ -504,16 +582,69 @@ EXPORT_SYMBOL(phy_write_mmd); * * Unlocked helper function which allows a PHY register to be modified as * new register value = (old register value & ~mask) | set + * + * Returns negative errno, 0 if there was no change, and 1 in case of change */ -int __phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set) +int __phy_modify_changed(struct phy_device *phydev, u32 regnum, u16 mask, + u16 set) { - int ret; + int new, ret; ret = __phy_read(phydev, regnum); if (ret < 0) return ret; - ret = __phy_write(phydev, regnum, (ret & ~mask) | set); + new = (ret & ~mask) | set; + if (new == ret) + return 0; + + ret = __phy_write(phydev, regnum, new); + + return ret < 0 ? ret : 1; +} +EXPORT_SYMBOL_GPL(__phy_modify_changed); + +/** + * phy_modify_changed - Function for modifying a PHY register + * @phydev: the phy_device struct + * @regnum: register number to modify + * @mask: bit mask of bits to clear + * @set: new value of bits set in mask to write to @regnum + * + * NOTE: MUST NOT be called from interrupt context, + * because the bus read/write functions may wait for an interrupt + * to conclude the operation. + * + * Returns negative errno, 0 if there was no change, and 1 in case of change + */ +int phy_modify_changed(struct phy_device *phydev, u32 regnum, u16 mask, u16 set) +{ + int ret; + + mutex_lock(&phydev->mdio.bus->mdio_lock); + ret = __phy_modify_changed(phydev, regnum, mask, set); + mutex_unlock(&phydev->mdio.bus->mdio_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(phy_modify_changed); + +/** + * __phy_modify - Convenience function for modifying a PHY register + * @phydev: the phy_device struct + * @regnum: register number to modify + * @mask: bit mask of bits to clear + * @set: new value of bits set in mask to write to @regnum + * + * NOTE: MUST NOT be called from interrupt context, + * because the bus read/write functions may wait for an interrupt + * to conclude the operation. + */ +int __phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set) +{ + int ret; + + ret = __phy_modify_changed(phydev, regnum, mask, set); return ret < 0 ? ret : 0; } @@ -542,6 +673,113 @@ int phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set) } EXPORT_SYMBOL_GPL(phy_modify); +/** + * __phy_modify_mmd_changed - Function for modifying a register on MMD + * @phydev: the phy_device struct + * @devad: the MMD containing register to modify + * @regnum: register number to modify + * @mask: bit mask of bits to clear + * @set: new value of bits set in mask to write to @regnum + * + * Unlocked helper function which allows a MMD register to be modified as + * new register value = (old register value & ~mask) | set + * + * Returns negative errno, 0 if there was no change, and 1 in case of change + */ +int __phy_modify_mmd_changed(struct phy_device *phydev, int devad, u32 regnum, + u16 mask, u16 set) +{ + int new, ret; + + ret = __phy_read_mmd(phydev, devad, regnum); + if (ret < 0) + return ret; + + new = (ret & ~mask) | set; + if (new == ret) + return 0; + + ret = __phy_write_mmd(phydev, devad, regnum, new); + + return ret < 0 ? ret : 1; +} +EXPORT_SYMBOL_GPL(__phy_modify_mmd_changed); + +/** + * phy_modify_mmd_changed - Function for modifying a register on MMD + * @phydev: the phy_device struct + * @devad: the MMD containing register to modify + * @regnum: register number to modify + * @mask: bit mask of bits to clear + * @set: new value of bits set in mask to write to @regnum + * + * NOTE: MUST NOT be called from interrupt context, + * because the bus read/write functions may wait for an interrupt + * to conclude the operation. + * + * Returns negative errno, 0 if there was no change, and 1 in case of change + */ +int phy_modify_mmd_changed(struct phy_device *phydev, int devad, u32 regnum, + u16 mask, u16 set) +{ + int ret; + + mutex_lock(&phydev->mdio.bus->mdio_lock); + ret = __phy_modify_mmd_changed(phydev, devad, regnum, mask, set); + mutex_unlock(&phydev->mdio.bus->mdio_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(phy_modify_mmd_changed); + +/** + * __phy_modify_mmd - Convenience function for modifying a register on MMD + * @phydev: the phy_device struct + * @devad: the MMD containing register to modify + * @regnum: register number to modify + * @mask: bit mask of bits to clear + * @set: new value of bits set in mask to write to @regnum + * + * NOTE: MUST NOT be called from interrupt context, + * because the bus read/write functions may wait for an interrupt + * to conclude the operation. + */ +int __phy_modify_mmd(struct phy_device *phydev, int devad, u32 regnum, + u16 mask, u16 set) +{ + int ret; + + ret = __phy_modify_mmd_changed(phydev, devad, regnum, mask, set); + + return ret < 0 ? ret : 0; +} +EXPORT_SYMBOL_GPL(__phy_modify_mmd); + +/** + * phy_modify_mmd - Convenience function for modifying a register on MMD + * @phydev: the phy_device struct + * @devad: the MMD containing register to modify + * @regnum: register number to modify + * @mask: bit mask of bits to clear + * @set: new value of bits set in mask to write to @regnum + * + * NOTE: MUST NOT be called from interrupt context, + * because the bus read/write functions may wait for an interrupt + * to conclude the operation. + */ +int phy_modify_mmd(struct phy_device *phydev, int devad, u32 regnum, + u16 mask, u16 set) +{ + int ret; + + mutex_lock(&phydev->mdio.bus->mdio_lock); + ret = __phy_modify_mmd(phydev, devad, regnum, mask, set); + mutex_unlock(&phydev->mdio.bus->mdio_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(phy_modify_mmd); + static int __phy_read_page(struct phy_device *phydev) { return phydev->drv->read_page(phydev); diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 189cd2048c3a..3745220c5c98 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0+ /* Framework for configuring and reading PHY devices * Based on code in sungem_phy.c and gianfar_phy.c * @@ -5,16 +6,8 @@ * * Copyright (c) 2004 Freescale Semiconductor, Inc. * Copyright (c) 2006, 2007 Maciej W. Rozycki - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - #include #include #include @@ -36,8 +29,6 @@ #include #include -#include - #define PHY_STATE_STR(_state) \ case PHY_##_state: \ return __stringify(_state); \ @@ -51,7 +42,6 @@ static const char *phy_state_to_str(enum phy_state st) PHY_STATE_STR(RUNNING) PHY_STATE_STR(NOLINK) PHY_STATE_STR(FORCING) - PHY_STATE_STR(CHANGELINK) PHY_STATE_STR(HALTED) PHY_STATE_STR(RESUMING) } @@ -154,14 +144,10 @@ int phy_aneg_done(struct phy_device *phydev) { if (phydev->drv && phydev->drv->aneg_done) return phydev->drv->aneg_done(phydev); - - /* Avoid genphy_aneg_done() if the Clause 45 PHY does not - * implement Clause 22 registers - */ - if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0))) - return -EINVAL; - - return genphy_aneg_done(phydev); + else if (phydev->is_c45) + return genphy_c45_aneg_done(phydev); + else + return genphy_aneg_done(phydev); } EXPORT_SYMBOL(phy_aneg_done); @@ -553,7 +539,7 @@ int phy_start_aneg(struct phy_device *phydev) if (err < 0) goto out_unlock; - if (__phy_is_started(phydev)) { + if (phy_is_started(phydev)) { if (phydev->autoneg == AUTONEG_ENABLE) { err = phy_check_link_status(phydev); } else { @@ -709,7 +695,7 @@ void phy_stop_machine(struct phy_device *phydev) cancel_delayed_work_sync(&phydev->state_queue); mutex_lock(&phydev->lock); - if (__phy_is_started(phydev)) + if (phy_is_started(phydev)) phydev->state = PHY_UP; mutex_unlock(&phydev->lock); } @@ -762,9 +748,6 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat) { struct phy_device *phydev = phy_dat; - if (!phy_is_started(phydev)) - return IRQ_NONE; /* It can't be ours. */ - if (phydev->drv->did_interrupt && !phydev->drv->did_interrupt(phydev)) return IRQ_NONE; @@ -795,46 +778,27 @@ static int phy_enable_interrupts(struct phy_device *phydev) } /** - * phy_start_interrupts - request and enable interrupts for a PHY device + * phy_request_interrupt - request interrupt for a PHY device * @phydev: target phy_device struct * * Description: Request the interrupt for the given PHY. * If this fails, then we set irq to PHY_POLL. - * Otherwise, we enable the interrupts in the PHY. * This should only be called with a valid IRQ number. - * Returns 0 on success or < 0 on error. */ -int phy_start_interrupts(struct phy_device *phydev) +void phy_request_interrupt(struct phy_device *phydev) { - if (request_threaded_irq(phydev->irq, NULL, phy_interrupt, - IRQF_ONESHOT | IRQF_SHARED, - phydev_name(phydev), phydev) < 0) { - pr_warn("%s: Can't get IRQ %d (PHY)\n", - phydev->mdio.bus->name, phydev->irq); + int err; + + err = request_threaded_irq(phydev->irq, NULL, phy_interrupt, + IRQF_ONESHOT | IRQF_SHARED, + phydev_name(phydev), phydev); + if (err) { + phydev_warn(phydev, "Error %d requesting IRQ %d, falling back to polling\n", + err, phydev->irq); phydev->irq = PHY_POLL; - return 0; } - - return phy_enable_interrupts(phydev); } -EXPORT_SYMBOL(phy_start_interrupts); - -/** - * phy_stop_interrupts - disable interrupts from a PHY device - * @phydev: target phy_device struct - */ -int phy_stop_interrupts(struct phy_device *phydev) -{ - int err = phy_disable_interrupts(phydev); - - if (err) - phy_error(phydev); - - free_irq(phydev->irq, phydev); - - return err; -} -EXPORT_SYMBOL(phy_stop_interrupts); +EXPORT_SYMBOL(phy_request_interrupt); /** * phy_stop - Bring down the PHY link, and stop checking the status @@ -842,15 +806,14 @@ EXPORT_SYMBOL(phy_stop_interrupts); */ void phy_stop(struct phy_device *phydev) { - mutex_lock(&phydev->lock); - - if (!__phy_is_started(phydev)) { + if (!phy_is_started(phydev)) { WARN(1, "called from state %s\n", phy_state_to_str(phydev->state)); - mutex_unlock(&phydev->lock); return; } + mutex_lock(&phydev->lock); + if (phy_interrupt_is_valid(phydev)) phy_disable_interrupts(phydev); @@ -859,6 +822,7 @@ void phy_stop(struct phy_device *phydev) mutex_unlock(&phydev->lock); phy_state_machine(&phydev->state_queue.work); + phy_stop_machine(phydev); /* Cannot call flush_scheduled_work() here as desired because * of rtnl_lock(), but PHY_HALTED shall guarantee irq handler @@ -879,33 +843,34 @@ EXPORT_SYMBOL(phy_stop); */ void phy_start(struct phy_device *phydev) { - int err = 0; + int err; mutex_lock(&phydev->lock); - switch (phydev->state) { - case PHY_READY: - phydev->state = PHY_UP; - break; - case PHY_HALTED: - /* if phy was suspended, bring the physical link up again */ - __phy_resume(phydev); + if (phydev->state != PHY_READY && phydev->state != PHY_HALTED) { + WARN(1, "called from state %s\n", + phy_state_to_str(phydev->state)); + goto out; + } - /* make sure interrupts are re-enabled for the PHY */ - if (phy_interrupt_is_valid(phydev)) { - err = phy_enable_interrupts(phydev); - if (err < 0) - break; - } + /* if phy was suspended, bring the physical link up again */ + __phy_resume(phydev); - phydev->state = PHY_RESUMING; - break; - default: - break; + /* make sure interrupts are enabled for the PHY */ + if (phy_interrupt_is_valid(phydev)) { + err = phy_enable_interrupts(phydev); + if (err < 0) + goto out; } - mutex_unlock(&phydev->lock); - phy_trigger_machine(phydev); + if (phydev->state == PHY_READY) + phydev->state = PHY_UP; + else + phydev->state = PHY_RESUMING; + + phy_start_machine(phydev); +out: + mutex_unlock(&phydev->lock); } EXPORT_SYMBOL(phy_start); @@ -939,7 +904,6 @@ void phy_state_machine(struct work_struct *work) break; case PHY_NOLINK: case PHY_RUNNING: - case PHY_CHANGELINK: case PHY_RESUMING: err = phy_check_link_status(phydev); break; @@ -989,8 +953,10 @@ void phy_state_machine(struct work_struct *work) * state machine would be pointless and possibly error prone when * called from phy_disconnect() synchronously. */ + mutex_lock(&phydev->lock); if (phy_polling_mode(phydev) && phy_is_started(phydev)) phy_queue_state_machine(phydev, PHY_STATE_TIME); + mutex_unlock(&phydev->lock); } /** @@ -1088,17 +1054,12 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) if (!phy_check_valid(phydev->speed, phydev->duplex, common)) goto eee_exit_err; - if (clk_stop_enable) { + if (clk_stop_enable) /* Configure the PHY to stop receiving xMII * clock while it is signaling LPI. */ - int val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1); - if (val < 0) - return val; - - val |= MDIO_PCS_CTRL1_CLKSTOP_EN; - phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, val); - } + phy_set_bits_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, + MDIO_PCS_CTRL1_CLKSTOP_EN); return 0; /* EEE supported */ } diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 46c86725a693..49fdd1ee798e 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1,15 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0+ /* Framework for finding and configuring PHYs. * Also contains generic PHY driver * * Author: Andy Fleming * * Copyright (c) 2004 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -35,9 +30,6 @@ #include #include #include -#include - -#include MODULE_DESCRIPTION("PHY library"); MODULE_AUTHOR("Andy Fleming"); @@ -560,12 +552,33 @@ static const struct device_type mdio_bus_phy_type = { .pm = MDIO_BUS_PHY_PM_OPS, }; +static int phy_request_driver_module(struct phy_device *dev, int phy_id) +{ + int ret; + + ret = request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, + MDIO_ID_ARGS(phy_id)); + /* We only check for failures in executing the usermode binary, + * not whether a PHY driver module exists for the PHY ID. + * Accept -ENOENT because this may occur in case no initramfs exists, + * then modprobe isn't available. + */ + if (IS_ENABLED(CONFIG_MODULES) && ret < 0 && ret != -ENOENT) { + phydev_err(dev, "error %d loading PHY driver module for ID 0x%08x\n", + ret, phy_id); + return ret; + } + + return 0; +} + struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, bool is_c45, struct phy_c45_device_ids *c45_ids) { struct phy_device *dev; struct mdio_device *mdiodev; + int ret = 0; /* We allocate the device, and initialize the default values */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); @@ -622,15 +635,21 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, if (!(c45_ids->devices_in_package & (1 << i))) continue; - request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, - MDIO_ID_ARGS(c45_ids->device_ids[i])); + ret = phy_request_driver_module(dev, + c45_ids->device_ids[i]); + if (ret) + break; } } else { - request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, - MDIO_ID_ARGS(phy_id)); + ret = phy_request_driver_module(dev, phy_id); } - device_initialize(&mdiodev->dev); + if (!ret) { + device_initialize(&mdiodev->dev); + } else { + kfree(dev); + dev = ERR_PTR(ret); + } return dev; } @@ -656,13 +675,16 @@ static int get_phy_c45_devs_in_pkg(struct mii_bus *bus, int addr, int dev_addr, phy_reg = mdiobus_read(bus, addr, reg_addr); if (phy_reg < 0) return -EIO; - *devices_in_package = (phy_reg & 0xffff) << 16; + *devices_in_package = phy_reg << 16; reg_addr = MII_ADDR_C45 | dev_addr << 16 | MDIO_DEVS1; phy_reg = mdiobus_read(bus, addr, reg_addr); if (phy_reg < 0) return -EIO; - *devices_in_package |= (phy_reg & 0xffff); + *devices_in_package |= phy_reg; + + /* Bit 0 doesn't represent a device, it indicates c22 regs presence */ + *devices_in_package &= ~BIT(0); return 0; } @@ -723,13 +745,13 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id, phy_reg = mdiobus_read(bus, addr, reg_addr); if (phy_reg < 0) return -EIO; - c45_ids->device_ids[i] = (phy_reg & 0xffff) << 16; + c45_ids->device_ids[i] = phy_reg << 16; reg_addr = MII_ADDR_C45 | i << 16 | MII_PHYSID2; phy_reg = mdiobus_read(bus, addr, reg_addr); if (phy_reg < 0) return -EIO; - c45_ids->device_ids[i] |= (phy_reg & 0xffff); + c45_ids->device_ids[i] |= phy_reg; } *phy_id = 0; return 0; @@ -762,25 +784,18 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id, /* Grab the bits from PHYIR1, and put them in the upper half */ phy_reg = mdiobus_read(bus, addr, MII_PHYSID1); if (phy_reg < 0) { - /* if there is no device, return without an error so scanning - * the bus works properly - */ - if (phy_reg == -EIO || phy_reg == -ENODEV) { - *phy_id = 0xffffffff; - return 0; - } - - return -EIO; + /* returning -ENODEV doesn't stop bus scanning */ + return (phy_reg == -EIO || phy_reg == -ENODEV) ? -ENODEV : -EIO; } - *phy_id = (phy_reg & 0xffff) << 16; + *phy_id = phy_reg << 16; /* Grab the bits from PHYIR2, and put them in the lower half */ phy_reg = mdiobus_read(bus, addr, MII_PHYSID2); if (phy_reg < 0) return -EIO; - *phy_id |= (phy_reg & 0xffff); + *phy_id |= phy_reg; return 0; } @@ -831,13 +846,13 @@ int phy_device_register(struct phy_device *phydev) /* Run all of the fixups for this PHY */ err = phy_scan_fixups(phydev); if (err) { - pr_err("PHY %d failed to initialize\n", phydev->mdio.addr); + phydev_err(phydev, "failed to initialize\n"); goto out; } err = device_add(&phydev->mdio.dev); if (err) { - pr_err("PHY %d failed to add\n", phydev->mdio.addr); + phydev_err(phydev, "failed to add\n"); goto out; } @@ -938,9 +953,8 @@ int phy_connect_direct(struct net_device *dev, struct phy_device *phydev, return rc; phy_prepare_link(phydev, handler); - phy_start_machine(phydev); - if (phydev->irq > 0) - phy_start_interrupts(phydev); + if (phy_interrupt_is_valid(phydev)) + phy_request_interrupt(phydev); return 0; } @@ -995,10 +1009,11 @@ EXPORT_SYMBOL(phy_connect); */ void phy_disconnect(struct phy_device *phydev) { - if (phydev->irq > 0) - phy_stop_interrupts(phydev); + if (phy_is_started(phydev)) + phy_stop(phydev); - phy_stop_machine(phydev); + if (phy_interrupt_is_valid(phydev)) + free_irq(phydev->irq, phydev); phydev->adjust_link = NULL; @@ -1053,7 +1068,7 @@ int phy_init_hw(struct phy_device *phydev) /* Deassert the reset signal */ phy_device_reset(phydev, 0); - if (!phydev->drv || !phydev->drv->config_init) + if (!phydev->drv) return 0; if (phydev->drv->soft_reset) @@ -1066,7 +1081,10 @@ int phy_init_hw(struct phy_device *phydev) if (ret < 0) return ret; - return phydev->drv->config_init(phydev); + if (phydev->drv->config_init) + ret = phydev->drv->config_init(phydev); + + return ret; } EXPORT_SYMBOL(phy_init_hw); @@ -1291,6 +1309,36 @@ struct phy_device *phy_attach(struct net_device *dev, const char *bus_id, } EXPORT_SYMBOL(phy_attach); +static bool phy_driver_is_genphy_kind(struct phy_device *phydev, + struct device_driver *driver) +{ + struct device *d = &phydev->mdio.dev; + bool ret = false; + + if (!phydev->drv) + return ret; + + get_device(d); + ret = d->driver == driver; + put_device(d); + + return ret; +} + +bool phy_driver_is_genphy(struct phy_device *phydev) +{ + return phy_driver_is_genphy_kind(phydev, + &genphy_driver.mdiodrv.driver); +} +EXPORT_SYMBOL_GPL(phy_driver_is_genphy); + +bool phy_driver_is_genphy_10g(struct phy_device *phydev) +{ + return phy_driver_is_genphy_kind(phydev, + &genphy_10g_driver.mdiodrv.driver); +} +EXPORT_SYMBOL_GPL(phy_driver_is_genphy_10g); + /** * phy_detach - detach a PHY device from its network device * @phydev: target phy_device struct @@ -1322,8 +1370,8 @@ void phy_detach(struct phy_device *phydev) * from the generic driver so that there's a chance a * real driver could be loaded */ - if (phydev->mdio.dev.driver == &genphy_10g_driver.mdiodrv.driver || - phydev->mdio.dev.driver == &genphy_driver.mdiodrv.driver) + if (phy_driver_is_genphy(phydev) || + phy_driver_is_genphy_10g(phydev)) device_release_driver(&phydev->mdio.dev); /* @@ -1467,7 +1515,7 @@ EXPORT_SYMBOL(phy_reset_after_clk_enable); static int genphy_config_advert(struct phy_device *phydev) { u32 advertise; - int oldadv, adv, bmsr; + int bmsr, adv; int err, changed = 0; /* Only allow advertising what this PHY supports */ @@ -1480,22 +1528,14 @@ static int genphy_config_advert(struct phy_device *phydev) phydev->advertising); /* Setup standard advertisement */ - adv = phy_read(phydev, MII_ADVERTISE); - if (adv < 0) - return adv; - - oldadv = adv; - adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | - ADVERTISE_PAUSE_ASYM); - adv |= ethtool_adv_to_mii_adv_t(advertise); - - if (adv != oldadv) { - err = phy_write(phydev, MII_ADVERTISE, adv); - - if (err < 0) - return err; + err = phy_modify_changed(phydev, MII_ADVERTISE, + ADVERTISE_ALL | ADVERTISE_100BASE4 | + ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM, + ethtool_adv_to_mii_adv_t(advertise)); + if (err < 0) + return err; + if (err > 0) changed = 1; - } bmsr = phy_read(phydev, MII_BMSR); if (bmsr < 0) @@ -1509,25 +1549,20 @@ static int genphy_config_advert(struct phy_device *phydev) return changed; /* Configure gigabit if it's supported */ - adv = phy_read(phydev, MII_CTRL1000); - if (adv < 0) - return adv; - - oldadv = adv; - adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF); - + adv = 0; if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, phydev->supported) || linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, phydev->supported)) - adv |= ethtool_adv_to_mii_ctrl1000_t(advertise); - - if (adv != oldadv) - changed = 1; + adv = ethtool_adv_to_mii_ctrl1000_t(advertise); - err = phy_write(phydev, MII_CTRL1000, adv); + err = phy_modify_changed(phydev, MII_CTRL1000, + ADVERTISE_1000FULL | ADVERTISE_1000HALF, + adv); if (err < 0) return err; + if (err > 0) + changed = 1; return changed; } @@ -1540,34 +1575,20 @@ static int genphy_config_advert(struct phy_device *phydev) * efficent ethernet modes. Returns 0 if the PHY's advertisement hasn't * changed, and 1 if it has changed. */ -static int genphy_config_eee_advert(struct phy_device *phydev) +int genphy_config_eee_advert(struct phy_device *phydev) { - int broken = phydev->eee_broken_modes; - int old_adv, adv; + int err; /* Nothing to disable */ - if (!broken) + if (!phydev->eee_broken_modes) return 0; - /* If the following call fails, we assume that EEE is not - * supported by the phy. If we read 0, EEE is not advertised - * In both case, we don't need to continue - */ - adv = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV); - if (adv <= 0) - return 0; - - old_adv = adv; - adv &= ~broken; - - /* Advertising remains unchanged with the broken mask */ - if (old_adv == adv) - return 0; - - phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv); - - return 1; + err = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, + phydev->eee_broken_modes, 0); + /* If the call failed, we assume that EEE is not supported */ + return err < 0 ? 0 : err; } +EXPORT_SYMBOL(genphy_config_eee_advert); /** * genphy_setup_forced - configures/forces speed/duplex from @phydev @@ -1683,10 +1704,19 @@ int genphy_update_link(struct phy_device *phydev) { int status; - /* Do a fake read */ - status = phy_read(phydev, MII_BMSR); - if (status < 0) - return status; + /* The link state is latched low so that momentary link + * drops can be detected. Do not double-read the status + * in polling mode to detect such short link drops. + */ + if (!phy_polling_mode(phydev)) { + status = phy_read(phydev, MII_BMSR); + if (status < 0) { + return status; + } else if (status & BMSR_LSTATUS) { + phydev->link = 1; + return 0; + } + } /* Read link and autonegotiation status */ status = phy_read(phydev, MII_BMSR); @@ -1717,8 +1747,6 @@ int genphy_read_status(struct phy_device *phydev) int err; int lpa; int lpagb = 0; - int common_adv; - int common_adv_gb = 0; /* Update the link, but return if there was an error */ err = genphy_update_link(phydev); @@ -1750,7 +1778,6 @@ int genphy_read_status(struct phy_device *phydev) mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, lpagb); - common_adv_gb = lpagb & adv << 2; } lpa = phy_read(phydev, MII_LPA); @@ -1759,35 +1786,12 @@ int genphy_read_status(struct phy_device *phydev) mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, lpa); - adv = phy_read(phydev, MII_ADVERTISE); - if (adv < 0) - return adv; - - common_adv = lpa & adv; - - phydev->speed = SPEED_10; - phydev->duplex = DUPLEX_HALF; + phydev->speed = SPEED_UNKNOWN; + phydev->duplex = DUPLEX_UNKNOWN; phydev->pause = 0; phydev->asym_pause = 0; - if (common_adv_gb & (LPA_1000FULL | LPA_1000HALF)) { - phydev->speed = SPEED_1000; - - if (common_adv_gb & LPA_1000FULL) - phydev->duplex = DUPLEX_FULL; - } else if (common_adv & (LPA_100FULL | LPA_100HALF)) { - phydev->speed = SPEED_100; - - if (common_adv & LPA_100FULL) - phydev->duplex = DUPLEX_FULL; - } else - if (common_adv & LPA_10FULL) - phydev->duplex = DUPLEX_FULL; - - if (phydev->duplex == DUPLEX_FULL) { - phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0; - phydev->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0; - } + phy_resolve_aneg_linkmode(phydev); } else { int bmcr = phy_read(phydev, MII_BMCR); @@ -1919,44 +1923,6 @@ int genphy_loopback(struct phy_device *phydev, bool enable) } EXPORT_SYMBOL(genphy_loopback); -static int __set_phy_supported(struct phy_device *phydev, u32 max_speed) -{ - switch (max_speed) { - case SPEED_10: - linkmode_clear_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, - phydev->supported); - linkmode_clear_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, - phydev->supported); - /* fall through */ - case SPEED_100: - linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, - phydev->supported); - linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, - phydev->supported); - break; - case SPEED_1000: - break; - default: - return -ENOTSUPP; - } - - return 0; -} - -int phy_set_max_speed(struct phy_device *phydev, u32 max_speed) -{ - int err; - - err = __set_phy_supported(phydev, max_speed); - if (err) - return err; - - linkmode_copy(phydev->advertising, phydev->supported); - - return 0; -} -EXPORT_SYMBOL(phy_set_max_speed); - /** * phy_remove_link_mode - Remove a supported link mode * @phydev: phy_device structure to remove link mode from @@ -2087,48 +2053,6 @@ bool phy_validate_pause(struct phy_device *phydev, } EXPORT_SYMBOL(phy_validate_pause); -static void of_set_phy_supported(struct phy_device *phydev) -{ - struct device_node *node = phydev->mdio.dev.of_node; - u32 max_speed; - - if (!IS_ENABLED(CONFIG_OF_MDIO)) - return; - - if (!node) - return; - - if (!of_property_read_u32(node, "max-speed", &max_speed)) - __set_phy_supported(phydev, max_speed); -} - -static void of_set_phy_eee_broken(struct phy_device *phydev) -{ - struct device_node *node = phydev->mdio.dev.of_node; - u32 broken = 0; - - if (!IS_ENABLED(CONFIG_OF_MDIO)) - return; - - if (!node) - return; - - if (of_property_read_bool(node, "eee-broken-100tx")) - broken |= MDIO_EEE_100TX; - if (of_property_read_bool(node, "eee-broken-1000t")) - broken |= MDIO_EEE_1000T; - if (of_property_read_bool(node, "eee-broken-10gt")) - broken |= MDIO_EEE_10GT; - if (of_property_read_bool(node, "eee-broken-1000kx")) - broken |= MDIO_EEE_1000KX; - if (of_property_read_bool(node, "eee-broken-10gkx4")) - broken |= MDIO_EEE_10GKX4; - if (of_property_read_bool(node, "eee-broken-10gkr")) - broken |= MDIO_EEE_10GKR; - - phydev->eee_broken_modes = broken; -} - static bool phy_drv_supports_irq(struct phy_driver *phydrv) { return phydrv->config_intr && phydrv->ack_interrupt; @@ -2162,11 +2086,30 @@ static int phy_probe(struct device *dev) mutex_lock(&phydev->lock); + if (phydev->drv->probe) { + /* Deassert the reset signal */ + phy_device_reset(phydev, 0); + + err = phydev->drv->probe(phydev); + if (err) { + /* Assert the reset signal */ + phy_device_reset(phydev, 1); + goto out; + } + } + /* Start out supporting everything. Eventually, * a controller will attach, and may modify one * or both of these values */ - linkmode_copy(phydev->supported, phydrv->features); + if (phydrv->features) { + linkmode_copy(phydev->supported, phydrv->features); + } else { + err = phydrv->get_features(phydev); + if (err) + goto out; + } + of_set_phy_supported(phydev); linkmode_copy(phydev->advertising, phydev->supported); @@ -2186,20 +2129,8 @@ static int phy_probe(struct device *dev) * (e.g. hardware erratum) where the driver wants to set only one * of these bits. */ - if (test_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydrv->features) || - test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydrv->features)) { - linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, - phydev->supported); - linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, - phydev->supported); - if (test_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydrv->features)) - linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, - phydev->supported); - if (test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, - phydrv->features)) - linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, - phydev->supported); - } else { + if (!test_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported) && + !test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->supported)) { linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported); linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, @@ -2209,17 +2140,7 @@ static int phy_probe(struct device *dev) /* Set the state to READY by default */ phydev->state = PHY_READY; - if (phydev->drv->probe) { - /* Deassert the reset signal */ - phy_device_reset(phydev, 0); - - err = phydev->drv->probe(phydev); - if (err) { - /* Assert the reset signal */ - phy_device_reset(phydev, 1); - } - } - +out: mutex_unlock(&phydev->lock); return err; @@ -2255,7 +2176,11 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner) { int retval; - if (WARN_ON(!new_driver->features)) { + /* Either the features are hard coded, or dynamically + * determine. It cannot be both or neither + */ + if (WARN_ON((!new_driver->features && !new_driver->get_features) || + (new_driver->features && new_driver->get_features))) { pr_err("%s: Driver features are missing\n", new_driver->name); return -EINVAL; } @@ -2267,14 +2192,6 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner) new_driver->mdiodrv.driver.remove = phy_remove; new_driver->mdiodrv.driver.owner = owner; - /* The following works around an issue where the PHY driver doesn't bind - * to the device, resulting in the genphy driver being used instead of - * the dedicated driver. The root cause of the issue isn't known yet - * and seems to be in the base driver core. Once this is fixed we may - * remove this workaround. - */ - new_driver->mdiodrv.driver.probe_type = PROBE_FORCE_SYNCHRONOUS; - retval = driver_register(&new_driver->mdiodrv.driver); if (retval) { pr_err("%s: Error %d in registering driver\n", diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c index 263385b75bba..b86a4b2116f8 100644 --- a/drivers/net/phy/phy_led_triggers.c +++ b/drivers/net/phy/phy_led_triggers.c @@ -1,15 +1,5 @@ -/* Copyright (C) 2016 National Instruments Corp. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2016 National Instruments Corp. */ #include #include #include diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index e7becc7379d7..89750c7dfd6f 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -1,12 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 /* * phylink models the MAC to optional PHY connection, supporting * technologies such as SFP cages where the PHY is hot-pluggable. * * Copyright (C) 2015 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include #include @@ -305,6 +302,13 @@ static void phylink_mac_config(struct phylink *pl, pl->ops->mac_config(pl->netdev, pl->link_an_mode, state); } +static void phylink_mac_config_up(struct phylink *pl, + const struct phylink_link_state *state) +{ + if (state->link) + phylink_mac_config(pl, state); +} + static void phylink_mac_an_restart(struct phylink *pl) { if (pl->link_config.an_enabled && @@ -320,6 +324,10 @@ static int phylink_get_mac_state(struct phylink *pl, struct phylink_link_state * linkmode_zero(state->lp_advertising); state->interface = pl->link_config.interface; state->an_enabled = pl->link_config.an_enabled; + state->speed = SPEED_UNKNOWN; + state->duplex = DUPLEX_UNKNOWN; + state->pause = MLO_PAUSE_NONE; + state->an_complete = 0; state->link = 1; return pl->ops->mac_link_state(ndev, state); @@ -404,12 +412,12 @@ static void phylink_resolve(struct work_struct *w) case MLO_AN_PHY: link_state = pl->phy_state; phylink_resolve_flow(pl, &link_state); - phylink_mac_config(pl, &link_state); + phylink_mac_config_up(pl, &link_state); break; case MLO_AN_FIXED: phylink_get_fixed_state(pl, &link_state); - phylink_mac_config(pl, &link_state); + phylink_mac_config_up(pl, &link_state); break; case MLO_AN_INBAND: @@ -474,6 +482,17 @@ static void phylink_run_resolve(struct phylink *pl) queue_work(system_power_efficient_wq, &pl->resolve); } +static void phylink_run_resolve_and_disable(struct phylink *pl, int bit) +{ + unsigned long state = pl->phylink_disable_state; + + set_bit(bit, &pl->phylink_disable_state); + if (state == 0) { + queue_work(system_power_efficient_wq, &pl->resolve); + flush_work(&pl->resolve); + } +} + static void phylink_fixed_poll(struct timer_list *t) { struct phylink *pl = container_of(t, struct phylink, link_poll); @@ -679,9 +698,8 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy) __ETHTOOL_LINK_MODE_MASK_NBITS, pl->supported, __ETHTOOL_LINK_MODE_MASK_NBITS, phy->advertising); - phy_start_machine(phy); - if (phy->irq > 0) - phy_start_interrupts(phy); + if (phy_interrupt_is_valid(phy)) + phy_request_interrupt(phy); return 0; } @@ -924,9 +942,7 @@ void phylink_stop(struct phylink *pl) if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio)) del_timer_sync(&pl->link_poll); - set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); - queue_work(system_power_efficient_wq, &pl->resolve); - flush_work(&pl->resolve); + phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_STOPPED); } EXPORT_SYMBOL_GPL(phylink_stop); @@ -1268,6 +1284,24 @@ int phylink_get_eee_err(struct phylink *pl) } EXPORT_SYMBOL_GPL(phylink_get_eee_err); +/** + * phylink_init_eee() - init and check the EEE features + * @pl: a pointer to a &struct phylink returned from phylink_create() + * @clk_stop_enable: allow PHY to stop receive clock + * + * Must be called either with RTNL held or within mac_link_up() + */ +int phylink_init_eee(struct phylink *pl, bool clk_stop_enable) +{ + int ret = -EOPNOTSUPP; + + if (pl->phydev) + ret = phy_init_eee(pl->phydev, clk_stop_enable); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_init_eee); + /** * phylink_ethtool_get_eee() - read the energy efficient ethernet parameters * @pl: a pointer to a &struct phylink returned from phylink_create() @@ -1632,9 +1666,7 @@ static void phylink_sfp_link_down(void *upstream) ASSERT_RTNL(); - set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state); - queue_work(system_power_efficient_wq, &pl->resolve); - flush_work(&pl->resolve); + phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_LINK); } static void phylink_sfp_link_up(void *upstream) @@ -1697,4 +1729,4 @@ void phylink_helper_basex_speed(struct phylink_link_state *state) } EXPORT_SYMBOL_GPL(phylink_helper_basex_speed); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c index cfe2313dbefd..5486f6fb2ab2 100644 --- a/drivers/net/phy/qsemi.c +++ b/drivers/net/phy/qsemi.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * drivers/net/phy/qsemi.c * @@ -6,12 +7,6 @@ * Author: Andy Fleming * * Copyright (c) 2004 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * */ #include #include diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index c6010fb1aa0f..10df52ccddfe 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * drivers/net/phy/realtek.c * @@ -6,12 +7,6 @@ * Author: Johnson Leung * * Copyright (c) 2004 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * */ #include #include @@ -277,11 +272,27 @@ static struct phy_driver realtek_drvs[] = { .resume = genphy_resume, .read_page = rtl821x_read_page, .write_page = rtl821x_write_page, + }, { + PHY_ID_MATCH_EXACT(0x001cc800), + .name = "Generic Realtek PHY", + .features = PHY_GBIT_FEATURES, + .config_init = genphy_config_init, + .suspend = genphy_suspend, + .resume = genphy_resume, + .read_page = rtl821x_read_page, + .write_page = rtl821x_write_page, }, { PHY_ID_MATCH_EXACT(0x001cc961), .name = "RTL8366RB Gigabit Ethernet", .features = PHY_GBIT_FEATURES, .config_init = &rtl8366rb_config_init, + /* These interrupts are handled by the irq controller + * embedded inside the RTL8366RB, they get unmasked when the + * irq is requested and ACKed by reading the status register, + * which is done by the irqchip code. + */ + .ack_interrupt = genphy_no_ack_interrupt, + .config_intr = genphy_no_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, }, diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index ad9db652874d..fef701bfad62 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -347,6 +347,7 @@ static int sfp_register_bus(struct sfp_bus *bus) return ret; } } + bus->socket_ops->attach(bus->sfp); if (bus->started) bus->socket_ops->start(bus->sfp); bus->netdev->sfp_bus = bus; @@ -362,6 +363,7 @@ static void sfp_unregister_bus(struct sfp_bus *bus) if (bus->registered) { if (bus->started) bus->socket_ops->stop(bus->sfp); + bus->socket_ops->detach(bus->sfp); if (bus->phydev && ops && ops->disconnect_phy) ops->disconnect_phy(bus->upstream); } diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index fd8bb998ae52..d4635c2178d1 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 #include #include #include @@ -184,6 +185,7 @@ struct sfp { struct gpio_desc *gpio[GPIO_MAX]; + bool attached; unsigned int state; struct delayed_work poll; struct delayed_work timeout; @@ -1475,7 +1477,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event) */ switch (sfp->sm_mod_state) { default: - if (event == SFP_E_INSERT) { + if (event == SFP_E_INSERT && sfp->attached) { sfp_module_tx_disable(sfp); sfp_sm_ins_next(sfp, SFP_MOD_PROBE, T_PROBE_INIT); } @@ -1607,6 +1609,19 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event) mutex_unlock(&sfp->sm_mutex); } +static void sfp_attach(struct sfp *sfp) +{ + sfp->attached = true; + if (sfp->state & SFP_F_PRESENT) + sfp_sm_event(sfp, SFP_E_INSERT); +} + +static void sfp_detach(struct sfp *sfp) +{ + sfp->attached = false; + sfp_sm_event(sfp, SFP_E_REMOVE); +} + static void sfp_start(struct sfp *sfp) { sfp_sm_event(sfp, SFP_E_DEV_UP); @@ -1667,6 +1682,8 @@ static int sfp_module_eeprom(struct sfp *sfp, struct ethtool_eeprom *ee, } static const struct sfp_socket_ops sfp_module_ops = { + .attach = sfp_attach, + .detach = sfp_detach, .start = sfp_start, .stop = sfp_stop, .module_info = sfp_module_info, @@ -1834,10 +1851,6 @@ static int sfp_probe(struct platform_device *pdev) dev_info(sfp->dev, "Host maximum power %u.%uW\n", sfp->max_power_mW / 1000, (sfp->max_power_mW / 100) % 10); - sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops); - if (!sfp->sfp_bus) - return -ENOMEM; - /* Get the initial state, and always signal TX disable, * since the network interface will not be up. */ @@ -1848,10 +1861,6 @@ static int sfp_probe(struct platform_device *pdev) sfp->state |= SFP_F_RATE_SELECT; sfp_set_state(sfp, sfp->state); sfp_module_tx_disable(sfp); - rtnl_lock(); - if (sfp->state & SFP_F_PRESENT) - sfp_sm_event(sfp, SFP_E_INSERT); - rtnl_unlock(); for (i = 0; i < GPIO_MAX; i++) { if (gpio_flags[i] != GPIOD_IN || !sfp->gpio[i]) @@ -1884,6 +1893,10 @@ static int sfp_probe(struct platform_device *pdev) dev_warn(sfp->dev, "No tx_disable pin: SFP modules will always be emitting.\n"); + sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops); + if (!sfp->sfp_bus) + return -ENOMEM; + return 0; } diff --git a/drivers/net/phy/sfp.h b/drivers/net/phy/sfp.h index 31b0acf337e2..64f54b0bbd8c 100644 --- a/drivers/net/phy/sfp.h +++ b/drivers/net/phy/sfp.h @@ -7,6 +7,8 @@ struct sfp; struct sfp_socket_ops { + void (*attach)(struct sfp *sfp); + void (*detach)(struct sfp *sfp); void (*start)(struct sfp *sfp); void (*stop)(struct sfp *sfp); int (*module_info)(struct sfp *sfp, struct ethtool_modinfo *modinfo); diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index f9477ff55545..c94d3bfbc772 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * drivers/net/phy/smsc.c * @@ -7,11 +8,6 @@ * * Copyright (c) 2006 Herbert Valerio Riedel * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * * Support added for SMSC LAN8187 and LAN8700 by steve.glendinning@shawell.net * */ diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c index f17b3441779b..92b64e254b44 100644 --- a/drivers/net/phy/spi_ks8995.c +++ b/drivers/net/phy/spi_ks8995.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * SPI driver for Micrel/Kendin KS8995M and KSZ8864RMN ethernet switches * @@ -5,10 +6,6 @@ * * This file was based on: drivers/spi/at25.c * Copyright (C) 2006 David Brownell - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c index 33d733684f5b..5b6acf431f98 100644 --- a/drivers/net/phy/ste10Xp.c +++ b/drivers/net/phy/ste10Xp.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * drivers/net/phy/ste10Xp.c * @@ -6,12 +7,6 @@ * Author: Giuseppe Cavallaro * * Copyright (c) 2008 STMicroelectronics Limited - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * */ #include diff --git a/drivers/net/phy/swphy.c b/drivers/net/phy/swphy.c index 34f58f2349e9..dad22481d9c1 100644 --- a/drivers/net/phy/swphy.c +++ b/drivers/net/phy/swphy.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Software PHY emulation * @@ -7,11 +8,6 @@ * Anton Vorontsov * * Copyright (c) 2006-2007 MontaVista Software, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. */ #include #include @@ -23,7 +19,6 @@ #define MII_REGS_NUM 29 struct swmii_regs { - u16 bmcr; u16 bmsr; u16 lpa; u16 lpagb; @@ -44,16 +39,13 @@ enum { */ static const struct swmii_regs speed[] = { [SWMII_SPEED_10] = { - .bmcr = BMCR_FULLDPLX, .lpa = LPA_10FULL | LPA_10HALF, }, [SWMII_SPEED_100] = { - .bmcr = BMCR_FULLDPLX | BMCR_SPEED100, .bmsr = BMSR_100FULL | BMSR_100HALF, .lpa = LPA_100FULL | LPA_100HALF, }, [SWMII_SPEED_1000] = { - .bmcr = BMCR_FULLDPLX | BMCR_SPEED1000, .bmsr = BMSR_ESTATEN, .lpagb = LPA_1000FULL | LPA_1000HALF, }, @@ -61,13 +53,11 @@ static const struct swmii_regs speed[] = { static const struct swmii_regs duplex[] = { [SWMII_DUPLEX_HALF] = { - .bmcr = ~BMCR_FULLDPLX, .bmsr = BMSR_ESTATEN | BMSR_100HALF, .lpa = LPA_10HALF | LPA_100HALF, .lpagb = LPA_1000HALF, }, [SWMII_DUPLEX_FULL] = { - .bmcr = ~0, .bmsr = BMSR_ESTATEN | BMSR_100FULL, .lpa = LPA_10FULL | LPA_100FULL, .lpagb = LPA_1000FULL, @@ -122,7 +112,6 @@ int swphy_read_reg(int reg, const struct fixed_phy_status *state) { int speed_index, duplex_index; u16 bmsr = BMSR_ANEGCAPABLE; - u16 bmcr = 0; u16 lpagb = 0; u16 lpa = 0; @@ -140,7 +129,6 @@ int swphy_read_reg(int reg, const struct fixed_phy_status *state) if (state->link) { bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE; - bmcr |= speed[speed_index].bmcr & duplex[duplex_index].bmcr; lpa |= speed[speed_index].lpa & duplex[duplex_index].lpa; lpagb |= speed[speed_index].lpagb & duplex[duplex_index].lpagb; @@ -153,7 +141,7 @@ int swphy_read_reg(int reg, const struct fixed_phy_status *state) switch (reg) { case MII_BMCR: - return bmcr; + return BMCR_ANENABLE; case MII_BMSR: return bmsr; case MII_PHYSID1: diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c index 91247182bc52..beb054b931ee 100644 --- a/drivers/net/phy/teranetics.c +++ b/drivers/net/phy/teranetics.c @@ -1,13 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Driver for Teranetics PHY * * Author: Shaohui Xie * * Copyright 2015 Freescale Semiconductor, Inc. - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. */ #include @@ -81,9 +78,8 @@ static struct phy_driver teranetics_driver[] = { .phy_id_mask = 0xffffffff, .name = "Teranetics TN2020", .features = PHY_10GBIT_FEATURES, - .soft_reset = gen10g_no_soft_reset, + .soft_reset = genphy_no_soft_reset, .aneg_done = teranetics_aneg_done, - .config_init = gen10g_config_init, .config_aneg = gen10g_config_aneg, .read_status = teranetics_read_status, .match_phy_device = teranetics_match_phy_device, diff --git a/drivers/net/phy/uPD60620.c b/drivers/net/phy/uPD60620.c index 1e4fc42e4629..219fc7cdc2b3 100644 --- a/drivers/net/phy/uPD60620.c +++ b/drivers/net/phy/uPD60620.c @@ -1,13 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Driver for the Renesas PHY uPD60620. * * Copyright (C) 2015 Softing Industrial Automation GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * */ #include diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c index 0646af458f6a..dc0dd87a6694 100644 --- a/drivers/net/phy/vitesse.c +++ b/drivers/net/phy/vitesse.c @@ -1,15 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Driver for Vitesse PHYs * * Author: Kriston Carson - * - * Copyright (c) 2005, 2009, 2011 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * */ #include diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c index 74a8782313cf..2d1449345959 100644 --- a/drivers/net/phy/xilinx_gmii2rgmii.c +++ b/drivers/net/phy/xilinx_gmii2rgmii.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0+ /* Xilinx GMII2RGMII Converter driver * * Copyright (C) 2016 Xilinx, Inc. @@ -8,16 +9,6 @@ * * Description: * This driver is developed for Xilinx GMII2RGMII Converter - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include #include @@ -44,7 +35,10 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev) u16 val = 0; int err; - err = priv->phy_drv->read_status(phydev); + if (priv->phy_drv->read_status) + err = priv->phy_drv->read_status(phydev); + else + err = genphy_read_status(phydev); if (err < 0) return err; diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c index 7820fced33f6..941cfa8f1c2a 100644 --- a/drivers/net/sb1000.c +++ b/drivers/net/sb1000.c @@ -535,17 +535,20 @@ sb1000_activate(const int ioaddr[], const char* name) int status; ssleep(1); - if ((status = card_send_command(ioaddr, name, Command0, st))) + status = card_send_command(ioaddr, name, Command0, st); + if (status) return status; - if ((status = card_send_command(ioaddr, name, Command1, st))) + status = card_send_command(ioaddr, name, Command1, st); + if (status) return status; if (st[3] != 0xf1) { - if ((status = sb1000_start_get_set_command(ioaddr, name))) + status = sb1000_start_get_set_command(ioaddr, name); + if (status) return status; return -EIO; } udelay(1000); - return sb1000_start_get_set_command(ioaddr, name); + return sb1000_start_get_set_command(ioaddr, name); } /* get SB1000 firmware version */ diff --git a/drivers/net/tap.c b/drivers/net/tap.c index c0b52e48f0e6..2ea9b4976f4a 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -712,7 +712,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control, goto err_kfree; } - skb_probe_transport_header(skb, ETH_HLEN); + skb_probe_transport_header(skb); /* Move network header to the right position for VLAN tagged packets */ if ((skb->protocol == htons(ETH_P_8021Q) || @@ -1187,7 +1187,7 @@ static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp) tap = rcu_dereference(q->tap); if (tap) { skb->dev = tap->dev; - skb_probe_transport_header(skb, ETH_HLEN); + skb_probe_transport_header(skb); dev_queue_xmit(skb); } else { kfree_skb(skb); diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index afd9d25d1992..6ed96fdfd96d 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -28,7 +28,6 @@ #include #include #include -#include #include #include @@ -256,17 +255,6 @@ static void __team_option_inst_mark_removed_port(struct team *team, } } -static bool __team_option_inst_tmp_find(const struct list_head *opts, - const struct team_option_inst *needle) -{ - struct team_option_inst *opt_inst; - - list_for_each_entry(opt_inst, opts, tmp_list) - if (opt_inst == needle) - return true; - return false; -} - static int __team_options_register(struct team *team, const struct team_option *option, size_t option_count) @@ -1267,7 +1255,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev, list_add_tail_rcu(&port->list, &team->port_list); team_port_enable(team, port); __team_compute_features(team); - __team_port_change_port_added(port, !!netif_carrier_ok(port_dev)); + __team_port_change_port_added(port, !!netif_oper_up(port_dev)); __team_options_change_check(team); netdev_info(dev, "Port device %s added\n", portname); @@ -2460,7 +2448,6 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) int err = 0; int i; struct nlattr *nl_option; - LIST_HEAD(opt_inst_list); rtnl_lock(); @@ -2480,6 +2467,7 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1]; struct nlattr *attr; struct nlattr *attr_data; + LIST_HEAD(opt_inst_list); enum team_option_type opt_type; int opt_port_ifindex = 0; /* != 0 for per-port options */ u32 opt_array_index = 0; @@ -2584,23 +2572,17 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) if (err) goto team_put; opt_inst->changed = true; - - /* dumb/evil user-space can send us duplicate opt, - * keep only the last one - */ - if (__team_option_inst_tmp_find(&opt_inst_list, - opt_inst)) - continue; - list_add(&opt_inst->tmp_list, &opt_inst_list); } if (!opt_found) { err = -ENOENT; goto team_put; } - } - err = team_nl_send_event_options_get(team, &opt_inst_list); + err = team_nl_send_event_options_get(team, &opt_inst_list); + if (err) + break; + } team_put: team_nl_team_put(team); @@ -2932,7 +2914,7 @@ static int team_device_event(struct notifier_block *unused, switch (event) { case NETDEV_UP: - if (netif_carrier_ok(dev)) + if (netif_oper_up(dev)) team_port_change_check(port, true); break; case NETDEV_DOWN: diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c index a5ef97010eb3..5541e1c19936 100644 --- a/drivers/net/team/team_mode_loadbalance.c +++ b/drivers/net/team/team_mode_loadbalance.c @@ -325,6 +325,20 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx) return 0; } +static void lb_bpf_func_free(struct team *team) +{ + struct lb_priv *lb_priv = get_lb_priv(team); + struct bpf_prog *fp; + + if (!lb_priv->ex->orig_fprog) + return; + + __fprog_destroy(lb_priv->ex->orig_fprog); + fp = rcu_dereference_protected(lb_priv->fp, + lockdep_is_held(&team->lock)); + bpf_prog_destroy(fp); +} + static int lb_tx_method_get(struct team *team, struct team_gsetter_ctx *ctx) { struct lb_priv *lb_priv = get_lb_priv(team); @@ -639,6 +653,7 @@ static void lb_exit(struct team *team) team_options_unregister(team, lb_options, ARRAY_SIZE(lb_options)); + lb_bpf_func_free(team); cancel_delayed_work_sync(&lb_priv->ex->stats.refresh_dw); free_percpu(lb_priv->pcpu_stats); kfree(lb_priv->ex); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 18656c4094b3..1d68921723dc 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -866,8 +866,6 @@ static int tun_attach(struct tun_struct *tun, struct file *file, if (rtnl_dereference(tun->xdp_prog)) sock_set_flag(&tfile->sk, SOCK_XDP); - tun_set_real_num_queues(tun); - /* device is allowed to go away first, so no need to hold extra * refcnt. */ @@ -879,6 +877,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file, rcu_assign_pointer(tfile->tun, tun); rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); tun->numqueues++; + tun_set_real_num_queues(tun); out: return err; } @@ -1930,7 +1929,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, } skb_reset_network_header(skb); - skb_probe_transport_header(skb, 0); + skb_probe_transport_header(skb); if (skb_xdp) { struct bpf_prog *xdp_prog; @@ -2168,9 +2167,9 @@ static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err) } add_wait_queue(&tfile->wq.wait, &wait); - current->state = TASK_INTERRUPTIBLE; while (1) { + set_current_state(TASK_INTERRUPTIBLE); ptr = ptr_ring_consume(&tfile->tx_ring); if (ptr) break; @@ -2186,7 +2185,7 @@ static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err) schedule(); } - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); remove_wait_queue(&tfile->wq.wait, &wait); out: @@ -2483,7 +2482,7 @@ build: skb->protocol = eth_type_trans(skb, tun->dev); skb_reset_network_header(skb); - skb_probe_transport_header(skb, 0); + skb_probe_transport_header(skb); if (skb_xdp) { err = do_xdp_generic(xdp_prog, skb); diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c index 78b16eb9e58c..63aaae487995 100644 --- a/drivers/net/usb/cdc-phonet.c +++ b/drivers/net/usb/cdc-phonet.c @@ -361,8 +361,8 @@ static int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *i else return -EINVAL; - dev = alloc_netdev(sizeof(*pnd) + sizeof(pnd->urbs[0]) * rxq_size, - ifname, NET_NAME_UNKNOWN, usbpn_setup); + dev = alloc_netdev(struct_size(pnd, urbs, rxq_size), ifname, + NET_NAME_UNKNOWN, usbpn_setup); if (!dev) return -ENOMEM; diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index e96bc0c6140f..3d92ea6fcc02 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -2051,8 +2051,7 @@ static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev) phydev = phy_find_first(dev->mdiobus); if (!phydev) { netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n"); - phydev = fixed_phy_register(PHY_POLL, &fphy_status, -1, - NULL); + phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); if (IS_ERR(phydev)) { netdev_err(dev->net, "No PHY/fixed_PHY found\n"); return NULL; diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index f4247b275e09..63e44e746ccc 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@ -1011,6 +1011,7 @@ static int pegasus_ioctl(struct net_device *net, struct ifreq *rq, int cmd) switch (cmd) { case SIOCDEVPRIVATE: data[0] = pegasus->phy; + /* fall through */ case SIOCDEVPRIVATE + 1: read_mii_word(pegasus, data[0], data[1] & 0x1f, &data[3]); res = 0; diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 735ad838e2ba..74bebbdb4b15 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -976,6 +976,13 @@ static const struct usb_device_id products[] = { 0xff), .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr, }, + { /* Quectel EG12/EM12 */ + USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0512, + USB_CLASS_VENDOR_SPEC, + USB_SUBCLASS_VENDOR_SPEC, + 0xff), + .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr, + }, /* 3. Combined interface devices matching on interface number */ {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */ @@ -1201,8 +1208,8 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */ - {QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC7304/MC7354 */ - {QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC7304/MC7354 */ + {QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC7304/MC7354, WP76xx */ + {QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 10)},/* Sierra Wireless MC7304/MC7354 */ {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */ {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */ @@ -1343,17 +1350,20 @@ static bool quectel_ec20_detected(struct usb_interface *intf) return false; } -static bool quectel_ep06_diag_detected(struct usb_interface *intf) +static bool quectel_diag_detected(struct usb_interface *intf) { struct usb_device *dev = interface_to_usbdev(intf); struct usb_interface_descriptor intf_desc = intf->cur_altsetting->desc; + u16 id_vendor = le16_to_cpu(dev->descriptor.idVendor); + u16 id_product = le16_to_cpu(dev->descriptor.idProduct); - if (le16_to_cpu(dev->descriptor.idVendor) == 0x2c7c && - le16_to_cpu(dev->descriptor.idProduct) == 0x0306 && - intf_desc.bNumEndpoints == 2) - return true; + if (id_vendor != 0x2c7c || intf_desc.bNumEndpoints != 2) + return false; - return false; + if (id_product == 0x0306 || id_product == 0x0512) + return true; + else + return false; } static int qmi_wwan_probe(struct usb_interface *intf, @@ -1390,13 +1400,13 @@ static int qmi_wwan_probe(struct usb_interface *intf, return -ENODEV; } - /* Quectel EP06/EM06/EG06 supports dynamic interface configuration, so + /* Several Quectel modems supports dynamic interface configuration, so * we need to match on class/subclass/protocol. These values are * identical for the diagnostic- and QMI-interface, but bNumEndpoints is * different. Ignore the current interface if the number of endpoints * the number for the diag interface (two). */ - if (quectel_ep06_diag_detected(intf)) + if (quectel_diag_detected(intf)) return -ENODEV; return usbnet_probe(intf, id); diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 60dd1ec1665f..86c8c64fbb0f 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -557,6 +557,7 @@ enum spd_duplex { /* MAC PASSTHRU */ #define AD_MASK 0xfee0 #define BND_MASK 0x0004 +#define BD_MASK 0x0001 #define EFUSE 0xcfdb #define PASS_THRU_MASK 0x1 @@ -1176,9 +1177,9 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa) return -ENODEV; } } else { - /* test for RTL8153-BND */ + /* test for RTL8153-BND and RTL8153-BD */ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1); - if ((ocp_data & BND_MASK) == 0) { + if ((ocp_data & BND_MASK) == 0 && (ocp_data & BD_MASK) == 0) { netif_dbg(tp, probe, tp->netdev, "Invalid variant for MAC pass through\n"); return -ENODEV; diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index 80373a9171dd..59dbdbb5feff 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -388,7 +388,6 @@ static void read_bulk_callback(struct urb *urb) unsigned pkt_len, res; struct sk_buff *skb; struct net_device *netdev; - u16 rx_stat; int status = urb->status; int result; unsigned long flags; @@ -424,7 +423,6 @@ static void read_bulk_callback(struct urb *urb) goto goon; res = urb->actual_length; - rx_stat = le16_to_cpu(*(__le16 *)(urb->transfer_buffer + res - 4)); pkt_len = res - 4; skb_put(dev->rx_skb, pkt_len); @@ -849,6 +847,7 @@ static int rtl8150_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) switch (cmd) { case SIOCDEVPRIVATE: data[0] = dev->phy; + /* fall through */ case SIOCDEVPRIVATE + 1: read_mii_word(dev, dev->phy, (data[1] & 0x1f), &data[3]); break; diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c index 6ac232e52bf7..e04c8054c2cf 100644 --- a/drivers/net/usb/sr9700.c +++ b/drivers/net/usb/sr9700.c @@ -434,7 +434,7 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb) usbnet_skb_return(dev, sr_skb); skb_pull(skb, len + SR_RX_OVERHEAD); - }; + } return 0; } diff --git a/drivers/net/veth.c b/drivers/net/veth.c index f412ea1cef18..569e87a51a33 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -115,7 +115,8 @@ static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf) p += sizeof(ethtool_stats_keys); for (i = 0; i < dev->real_num_rx_queues; i++) { for (j = 0; j < VETH_RQ_STATS_LEN; j++) { - snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_%s", + snprintf(p, ETH_GSTRING_LEN, + "rx_queue_%u_%.11s", i, veth_rq_stats_desc[j].desc); p += ETH_GSTRING_LEN; } @@ -540,8 +541,10 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq, goto xdp_xmit; default: bpf_warn_invalid_xdp_action(act); + /* fall through */ case XDP_ABORTED: trace_xdp_exception(rq->dev, xdp_prog, act); + /* fall through */ case XDP_DROP: goto err_xdp; } @@ -661,8 +664,10 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb, goto xdp_xmit; default: bpf_warn_invalid_xdp_action(act); + /* fall through */ case XDP_ABORTED: trace_xdp_exception(rq->dev, xdp_prog, act); + /* fall through */ case XDP_DROP: goto drop; } diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 8fadd8eaf601..7eb38ea9ba56 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -57,6 +57,8 @@ module_param(napi_tx, bool, 0644); #define VIRTIO_XDP_TX BIT(0) #define VIRTIO_XDP_REDIR BIT(1) +#define VIRTIO_XDP_FLAG BIT(0) + /* RX packet size EWMA. The average packet size is used to determine the packet * buffer size when refilling RX rings. As the entire RX ring may be refilled * at once, the weight is chosen so that the EWMA will be insensitive to short- @@ -252,6 +254,21 @@ struct padded_vnet_hdr { char padding[4]; }; +static bool is_xdp_frame(void *ptr) +{ + return (unsigned long)ptr & VIRTIO_XDP_FLAG; +} + +static void *xdp_to_ptr(struct xdp_frame *ptr) +{ + return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG); +} + +static struct xdp_frame *ptr_to_xdp(void *ptr) +{ + return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG); +} + /* Converting between virtqueue no. and kernel tx/rx queue no. * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq */ @@ -462,7 +479,8 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, sg_init_one(sq->sg, xdpf->data, xdpf->len); - err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdpf, GFP_ATOMIC); + err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf), + GFP_ATOMIC); if (unlikely(err)) return -ENOSPC; /* Caller handle free/refcnt */ @@ -482,36 +500,47 @@ static int virtnet_xdp_xmit(struct net_device *dev, { struct virtnet_info *vi = netdev_priv(dev); struct receive_queue *rq = vi->rq; - struct xdp_frame *xdpf_sent; struct bpf_prog *xdp_prog; struct send_queue *sq; unsigned int len; + int packets = 0; + int bytes = 0; int drops = 0; int kicks = 0; int ret, err; + void *ptr; int i; - sq = virtnet_xdp_sq(vi); - - if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) { - ret = -EINVAL; - drops = n; - goto out; - } - /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this * indicate XDP resources have been successfully allocated. */ xdp_prog = rcu_dereference(rq->xdp_prog); - if (!xdp_prog) { - ret = -ENXIO; + if (!xdp_prog) + return -ENXIO; + + sq = virtnet_xdp_sq(vi); + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) { + ret = -EINVAL; drops = n; goto out; } /* Free up any pending old buffers before queueing new ones. */ - while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) - xdp_return_frame(xdpf_sent); + while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { + if (likely(is_xdp_frame(ptr))) { + struct xdp_frame *frame = ptr_to_xdp(ptr); + + bytes += frame->len; + xdp_return_frame(frame); + } else { + struct sk_buff *skb = ptr; + + bytes += skb->len; + napi_consume_skb(skb, false); + } + packets++; + } for (i = 0; i < n; i++) { struct xdp_frame *xdpf = frames[i]; @@ -530,6 +559,8 @@ static int virtnet_xdp_xmit(struct net_device *dev, } out: u64_stats_update_begin(&sq->stats.syncp); + sq->stats.bytes += bytes; + sq->stats.packets += packets; sq->stats.xdp_tx += n; sq->stats.xdp_tx_drops += drops; sq->stats.kicks += kicks; @@ -1035,6 +1066,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, goto frame_err; } + skb_record_rx_queue(skb, vq2rxq(rq->vq)); skb->protocol = eth_type_trans(skb, dev); pr_debug("Receiving skb proto 0x%04x len %i type %i\n", ntohs(skb->protocol), skb->len, skb->pkt_type); @@ -1332,18 +1364,26 @@ static int virtnet_receive(struct receive_queue *rq, int budget, static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) { - struct sk_buff *skb; unsigned int len; unsigned int packets = 0; unsigned int bytes = 0; + void *ptr; - while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { - pr_debug("Sent skb %p\n", skb); + while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { + if (likely(!is_xdp_frame(ptr))) { + struct sk_buff *skb = ptr; - bytes += skb->len; - packets++; + pr_debug("Sent skb %p\n", skb); + + bytes += skb->len; + napi_consume_skb(skb, in_napi); + } else { + struct xdp_frame *frame = ptr_to_xdp(ptr); - napi_consume_skb(skb, in_napi); + bytes += frame->len; + xdp_return_frame(frame); + } + packets++; } /* Avoid overhead when no packets have been processed @@ -1358,6 +1398,16 @@ static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) u64_stats_update_end(&sq->stats.syncp); } +static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) +{ + if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) + return false; + else if (q < vi->curr_queue_pairs) + return true; + else + return false; +} + static void virtnet_poll_cleantx(struct receive_queue *rq) { struct virtnet_info *vi = rq->vq->vdev->priv; @@ -1365,7 +1415,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq) struct send_queue *sq = &vi->sq[index]; struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); - if (!sq->napi.weight) + if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index)) return; if (__netif_tx_trylock(txq)) { @@ -1442,8 +1492,16 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget) { struct send_queue *sq = container_of(napi, struct send_queue, napi); struct virtnet_info *vi = sq->vq->vdev->priv; - struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq)); + unsigned int index = vq2txq(sq->vq); + struct netdev_queue *txq; + if (unlikely(is_xdp_raw_buffer_queue(vi, index))) { + /* We don't need to enable cb for XDP */ + napi_complete_done(napi, 0); + return 0; + } + + txq = netdev_get_tx_queue(vi->dev, index); __netif_tx_lock(txq, raw_smp_processor_id()); free_old_xmit_skbs(sq, true); __netif_tx_unlock(txq); @@ -2395,6 +2453,10 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, return -ENOMEM; } + old_prog = rtnl_dereference(vi->rq[0].xdp_prog); + if (!prog && !old_prog) + return 0; + if (prog) { prog = bpf_prog_add(prog, vi->max_queue_pairs - 1); if (IS_ERR(prog)) @@ -2402,36 +2464,62 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, } /* Make sure NAPI is not using any XDP TX queues for RX. */ - if (netif_running(dev)) - for (i = 0; i < vi->max_queue_pairs; i++) + if (netif_running(dev)) { + for (i = 0; i < vi->max_queue_pairs; i++) { napi_disable(&vi->rq[i].napi); + virtnet_napi_tx_disable(&vi->sq[i].napi); + } + } + + if (!prog) { + for (i = 0; i < vi->max_queue_pairs; i++) { + rcu_assign_pointer(vi->rq[i].xdp_prog, prog); + if (i == 0) + virtnet_restore_guest_offloads(vi); + } + synchronize_net(); + } - netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); err = _virtnet_set_queues(vi, curr_qp + xdp_qp); if (err) goto err; + netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); vi->xdp_queue_pairs = xdp_qp; - for (i = 0; i < vi->max_queue_pairs; i++) { - old_prog = rtnl_dereference(vi->rq[i].xdp_prog); - rcu_assign_pointer(vi->rq[i].xdp_prog, prog); - if (i == 0) { - if (!old_prog) + if (prog) { + for (i = 0; i < vi->max_queue_pairs; i++) { + rcu_assign_pointer(vi->rq[i].xdp_prog, prog); + if (i == 0 && !old_prog) virtnet_clear_guest_offloads(vi); - if (!prog) - virtnet_restore_guest_offloads(vi); } + } + + for (i = 0; i < vi->max_queue_pairs; i++) { if (old_prog) bpf_prog_put(old_prog); - if (netif_running(dev)) + if (netif_running(dev)) { virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); + virtnet_napi_tx_enable(vi, vi->sq[i].vq, + &vi->sq[i].napi); + } } return 0; err: - for (i = 0; i < vi->max_queue_pairs; i++) - virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); + if (!prog) { + virtnet_clear_guest_offloads(vi); + for (i = 0; i < vi->max_queue_pairs; i++) + rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog); + } + + if (netif_running(dev)) { + for (i = 0; i < vi->max_queue_pairs; i++) { + virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); + virtnet_napi_tx_enable(vi, vi->sq[i].vq, + &vi->sq[i].napi); + } + } if (prog) bpf_prog_sub(prog, vi->max_queue_pairs - 1); return err; @@ -2613,16 +2701,6 @@ static void free_receive_page_frags(struct virtnet_info *vi) put_page(vi->rq[i].alloc_frag.page); } -static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) -{ - if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) - return false; - else if (q < vi->curr_queue_pairs) - return true; - else - return false; -} - static void free_unused_bufs(struct virtnet_info *vi) { void *buf; @@ -2631,10 +2709,10 @@ static void free_unused_bufs(struct virtnet_info *vi) for (i = 0; i < vi->max_queue_pairs; i++) { struct virtqueue *vq = vi->sq[i].vq; while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { - if (!is_xdp_raw_buffer_queue(vi, i)) + if (!is_xdp_frame(buf)) dev_kfree_skb(buf); else - put_page(virt_to_head_page(buf)); + xdp_return_frame(ptr_to_xdp(buf)); } } diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 95909e262ba4..7c1430ed0244 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -1273,6 +1273,9 @@ static void vrf_setup(struct net_device *dev) /* default to no qdisc; user can add if desired */ dev->priv_flags |= IFF_NO_QUEUE; + + dev->min_mtu = 0; + dev->max_mtu = 0; } static int vrf_validate(struct nlattr *tb[], struct nlattr *data[], diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 5209ee9aac47..077f1b9f2761 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -361,10 +361,11 @@ errout: static void vxlan_fdb_switchdev_notifier_info(const struct vxlan_dev *vxlan, const struct vxlan_fdb *fdb, const struct vxlan_rdst *rd, + struct netlink_ext_ack *extack, struct switchdev_notifier_vxlan_fdb_info *fdb_info) { fdb_info->info.dev = vxlan->dev; - fdb_info->info.extack = NULL; + fdb_info->info.extack = extack; fdb_info->remote_ip = rd->remote_ip; fdb_info->remote_port = rd->remote_port; fdb_info->remote_vni = rd->remote_vni; @@ -375,41 +376,50 @@ static void vxlan_fdb_switchdev_notifier_info(const struct vxlan_dev *vxlan, fdb_info->added_by_user = fdb->flags & NTF_VXLAN_ADDED_BY_USER; } -static void vxlan_fdb_switchdev_call_notifiers(struct vxlan_dev *vxlan, - struct vxlan_fdb *fdb, - struct vxlan_rdst *rd, - bool adding) +static int vxlan_fdb_switchdev_call_notifiers(struct vxlan_dev *vxlan, + struct vxlan_fdb *fdb, + struct vxlan_rdst *rd, + bool adding, + struct netlink_ext_ack *extack) { struct switchdev_notifier_vxlan_fdb_info info; enum switchdev_notifier_type notifier_type; + int ret; if (WARN_ON(!rd)) - return; + return 0; notifier_type = adding ? SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE : SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE; - vxlan_fdb_switchdev_notifier_info(vxlan, fdb, rd, &info); - call_switchdev_notifiers(notifier_type, vxlan->dev, - &info.info); + vxlan_fdb_switchdev_notifier_info(vxlan, fdb, rd, NULL, &info); + ret = call_switchdev_notifiers(notifier_type, vxlan->dev, + &info.info, extack); + return notifier_to_errno(ret); } -static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb, - struct vxlan_rdst *rd, int type, bool swdev_notify) +static int vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb, + struct vxlan_rdst *rd, int type, bool swdev_notify, + struct netlink_ext_ack *extack) { + int err; + if (swdev_notify) { switch (type) { case RTM_NEWNEIGH: - vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd, - true); + err = vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd, + true, extack); + if (err) + return err; break; case RTM_DELNEIGH: vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd, - false); + false, extack); break; } } __vxlan_fdb_notify(vxlan, fdb, rd, type); + return 0; } static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa) @@ -423,7 +433,7 @@ static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa) .remote_vni = cpu_to_be32(VXLAN_N_VID), }; - vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true); + vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true, NULL); } static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN]) @@ -435,7 +445,7 @@ static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN]) memcpy(f.eth_addr, eth_addr, ETH_ALEN); - vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true); + vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true, NULL); } /* Hash Ethernet address */ @@ -545,7 +555,7 @@ int vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni, } rdst = first_remote_rcu(f); - vxlan_fdb_switchdev_notifier_info(vxlan, f, rdst, fdb_info); + vxlan_fdb_switchdev_notifier_info(vxlan, f, rdst, NULL, fdb_info); out: rcu_read_unlock(); @@ -556,19 +566,21 @@ EXPORT_SYMBOL_GPL(vxlan_fdb_find_uc); static int vxlan_fdb_notify_one(struct notifier_block *nb, const struct vxlan_dev *vxlan, const struct vxlan_fdb *f, - const struct vxlan_rdst *rdst) + const struct vxlan_rdst *rdst, + struct netlink_ext_ack *extack) { struct switchdev_notifier_vxlan_fdb_info fdb_info; int rc; - vxlan_fdb_switchdev_notifier_info(vxlan, f, rdst, &fdb_info); + vxlan_fdb_switchdev_notifier_info(vxlan, f, rdst, extack, &fdb_info); rc = nb->notifier_call(nb, SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE, &fdb_info); return notifier_to_errno(rc); } int vxlan_fdb_replay(const struct net_device *dev, __be32 vni, - struct notifier_block *nb) + struct notifier_block *nb, + struct netlink_ext_ack *extack) { struct vxlan_dev *vxlan; struct vxlan_rdst *rdst; @@ -586,7 +598,8 @@ int vxlan_fdb_replay(const struct net_device *dev, __be32 vni, if (f->vni == vni) { list_for_each_entry(rdst, &f->remotes, list) { rc = vxlan_fdb_notify_one(nb, vxlan, - f, rdst); + f, rdst, + extack); if (rc) goto out; } @@ -625,7 +638,7 @@ EXPORT_SYMBOL_GPL(vxlan_fdb_clear_offload); /* Replace destination of unicast mac */ static int vxlan_fdb_replace(struct vxlan_fdb *f, union vxlan_addr *ip, __be16 port, __be32 vni, - __u32 ifindex) + __u32 ifindex, struct vxlan_rdst *oldrd) { struct vxlan_rdst *rd; @@ -637,6 +650,7 @@ static int vxlan_fdb_replace(struct vxlan_fdb *f, if (!rd) return 0; + *oldrd = *rd; dst_cache_reset(&rd->dst_cache); rd->remote_ip = *ip; rd->remote_port = port; @@ -826,92 +840,6 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan, return 0; } -/* Add new entry to forwarding table -- assumes lock held */ -static int vxlan_fdb_update(struct vxlan_dev *vxlan, - const u8 *mac, union vxlan_addr *ip, - __u16 state, __u16 flags, - __be16 port, __be32 src_vni, __be32 vni, - __u32 ifindex, __u16 ndm_flags, - bool swdev_notify) -{ - __u16 fdb_flags = (ndm_flags & ~NTF_USE); - struct vxlan_rdst *rd = NULL; - struct vxlan_fdb *f; - int notify = 0; - int rc; - - f = __vxlan_find_mac(vxlan, mac, src_vni); - if (f) { - if (flags & NLM_F_EXCL) { - netdev_dbg(vxlan->dev, - "lost race to create %pM\n", mac); - return -EEXIST; - } - - /* Do not allow an externally learned entry to take over an - * entry added by the user. - */ - if (!(fdb_flags & NTF_EXT_LEARNED) || - !(f->flags & NTF_VXLAN_ADDED_BY_USER)) { - if (f->state != state) { - f->state = state; - f->updated = jiffies; - notify = 1; - } - if (f->flags != fdb_flags) { - f->flags = fdb_flags; - f->updated = jiffies; - notify = 1; - } - } - - if ((flags & NLM_F_REPLACE)) { - /* Only change unicasts */ - if (!(is_multicast_ether_addr(f->eth_addr) || - is_zero_ether_addr(f->eth_addr))) { - notify |= vxlan_fdb_replace(f, ip, port, vni, - ifindex); - } else - return -EOPNOTSUPP; - } - if ((flags & NLM_F_APPEND) && - (is_multicast_ether_addr(f->eth_addr) || - is_zero_ether_addr(f->eth_addr))) { - rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); - - if (rc < 0) - return rc; - notify |= rc; - } - - if (ndm_flags & NTF_USE) - f->used = jiffies; - } else { - if (!(flags & NLM_F_CREATE)) - return -ENOENT; - - /* Disallow replace to add a multicast entry */ - if ((flags & NLM_F_REPLACE) && - (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac))) - return -EOPNOTSUPP; - - netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); - rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni, - vni, ifindex, fdb_flags, &f); - if (rc < 0) - return rc; - notify = 1; - } - - if (notify) { - if (rd == NULL) - rd = first_remote_rtnl(f); - vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH, swdev_notify); - } - - return 0; -} - static void vxlan_fdb_free(struct rcu_head *head) { struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu); @@ -929,14 +857,13 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f, { struct vxlan_rdst *rd; - netdev_dbg(vxlan->dev, - "delete %pM\n", f->eth_addr); + netdev_dbg(vxlan->dev, "delete %pM\n", f->eth_addr); --vxlan->addrcnt; if (do_notify) list_for_each_entry(rd, &f->remotes, list) vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH, - swdev_notify); + swdev_notify, NULL); hlist_del_rcu(&f->hlist); call_rcu(&f->rcu, vxlan_fdb_free); @@ -950,11 +877,157 @@ static void vxlan_dst_free(struct rcu_head *head) kfree(rd); } +static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan, + union vxlan_addr *ip, + __u16 state, __u16 flags, + __be16 port, __be32 vni, + __u32 ifindex, __u16 ndm_flags, + struct vxlan_fdb *f, + bool swdev_notify, + struct netlink_ext_ack *extack) +{ + __u16 fdb_flags = (ndm_flags & ~NTF_USE); + struct vxlan_rdst *rd = NULL; + struct vxlan_rdst oldrd; + int notify = 0; + int rc = 0; + int err; + + /* Do not allow an externally learned entry to take over an entry added + * by the user. + */ + if (!(fdb_flags & NTF_EXT_LEARNED) || + !(f->flags & NTF_VXLAN_ADDED_BY_USER)) { + if (f->state != state) { + f->state = state; + f->updated = jiffies; + notify = 1; + } + if (f->flags != fdb_flags) { + f->flags = fdb_flags; + f->updated = jiffies; + notify = 1; + } + } + + if ((flags & NLM_F_REPLACE)) { + /* Only change unicasts */ + if (!(is_multicast_ether_addr(f->eth_addr) || + is_zero_ether_addr(f->eth_addr))) { + rc = vxlan_fdb_replace(f, ip, port, vni, + ifindex, &oldrd); + notify |= rc; + } else { + return -EOPNOTSUPP; + } + } + if ((flags & NLM_F_APPEND) && + (is_multicast_ether_addr(f->eth_addr) || + is_zero_ether_addr(f->eth_addr))) { + rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); + + if (rc < 0) + return rc; + notify |= rc; + } + + if (ndm_flags & NTF_USE) + f->used = jiffies; + + if (notify) { + if (rd == NULL) + rd = first_remote_rtnl(f); + + err = vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH, + swdev_notify, extack); + if (err) + goto err_notify; + } + + return 0; + +err_notify: + if ((flags & NLM_F_REPLACE) && rc) + *rd = oldrd; + else if ((flags & NLM_F_APPEND) && rc) { + list_del_rcu(&rd->list); + call_rcu(&rd->rcu, vxlan_dst_free); + } + return err; +} + +static int vxlan_fdb_update_create(struct vxlan_dev *vxlan, + const u8 *mac, union vxlan_addr *ip, + __u16 state, __u16 flags, + __be16 port, __be32 src_vni, __be32 vni, + __u32 ifindex, __u16 ndm_flags, + bool swdev_notify, + struct netlink_ext_ack *extack) +{ + __u16 fdb_flags = (ndm_flags & ~NTF_USE); + struct vxlan_fdb *f; + int rc; + + /* Disallow replace to add a multicast entry */ + if ((flags & NLM_F_REPLACE) && + (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac))) + return -EOPNOTSUPP; + + netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); + rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni, + vni, ifindex, fdb_flags, &f); + if (rc < 0) + return rc; + + rc = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH, + swdev_notify, extack); + if (rc) + goto err_notify; + + return 0; + +err_notify: + vxlan_fdb_destroy(vxlan, f, false, false); + return rc; +} + +/* Add new entry to forwarding table -- assumes lock held */ +static int vxlan_fdb_update(struct vxlan_dev *vxlan, + const u8 *mac, union vxlan_addr *ip, + __u16 state, __u16 flags, + __be16 port, __be32 src_vni, __be32 vni, + __u32 ifindex, __u16 ndm_flags, + bool swdev_notify, + struct netlink_ext_ack *extack) +{ + struct vxlan_fdb *f; + + f = __vxlan_find_mac(vxlan, mac, src_vni); + if (f) { + if (flags & NLM_F_EXCL) { + netdev_dbg(vxlan->dev, + "lost race to create %pM\n", mac); + return -EEXIST; + } + + return vxlan_fdb_update_existing(vxlan, ip, state, flags, port, + vni, ifindex, ndm_flags, f, + swdev_notify, extack); + } else { + if (!(flags & NLM_F_CREATE)) + return -ENOENT; + + return vxlan_fdb_update_create(vxlan, mac, ip, state, flags, + port, src_vni, vni, ifindex, + ndm_flags, swdev_notify, extack); + } +} + static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f, struct vxlan_rdst *rd, bool swdev_notify) { list_del_rcu(&rd->list); - vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH, swdev_notify); + vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH, swdev_notify, NULL); call_rcu(&rd->rcu, vxlan_dst_free); } @@ -1025,7 +1098,8 @@ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, /* Add static entry (via netlink) */ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, - const unsigned char *addr, u16 vid, u16 flags) + const unsigned char *addr, u16 vid, u16 flags, + struct netlink_ext_ack *extack) { struct vxlan_dev *vxlan = netdev_priv(dev); /* struct net *net = dev_net(vxlan->dev); */ @@ -1055,7 +1129,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags, port, src_vni, vni, ifindex, ndm->ndm_flags | NTF_VXLAN_ADDED_BY_USER, - true); + true, extack); spin_unlock_bh(&vxlan->hash_lock); return err; @@ -1223,7 +1297,7 @@ static bool vxlan_snoop(struct net_device *dev, rdst->remote_ip = *src_ip; f->updated = jiffies; - vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH, true); + vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH, true, NULL); } else { /* learned new entry */ spin_lock(&vxlan->hash_lock); @@ -1236,7 +1310,7 @@ static bool vxlan_snoop(struct net_device *dev, vxlan->cfg.dst_port, vni, vxlan->default_dst.remote_vni, - ifindex, NTF_SELF, true); + ifindex, NTF_SELF, true, NULL); spin_unlock(&vxlan->hash_lock); } @@ -1657,6 +1731,14 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) goto drop; } + rcu_read_lock(); + + if (unlikely(!(vxlan->dev->flags & IFF_UP))) { + rcu_read_unlock(); + atomic_long_inc(&vxlan->dev->rx_dropped); + goto drop; + } + stats = this_cpu_ptr(vxlan->dev->tstats); u64_stats_update_begin(&stats->syncp); stats->rx_packets++; @@ -1664,6 +1746,9 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) u64_stats_update_end(&stats->syncp); gro_cells_receive(&vxlan->gro_cells, skb); + + rcu_read_unlock(); + return 0; drop: @@ -2219,7 +2304,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, struct pcpu_sw_netstats *tx_stats, *rx_stats; union vxlan_addr loopback; union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip; - struct net_device *dev = skb->dev; + struct net_device *dev; int len = skb->len; tx_stats = this_cpu_ptr(src_vxlan->dev->tstats); @@ -2239,9 +2324,15 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, #endif } + rcu_read_lock(); + dev = skb->dev; + if (unlikely(!(dev->flags & IFF_UP))) { + kfree_skb(skb); + goto drop; + } + if (dst_vxlan->cfg.flags & VXLAN_F_LEARN) - vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source, 0, - vni); + vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source, 0, vni); u64_stats_update_begin(&tx_stats->syncp); tx_stats->tx_packets++; @@ -2254,8 +2345,10 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, rx_stats->rx_bytes += len; u64_stats_update_end(&rx_stats->syncp); } else { +drop: dev->stats.rx_dropped++; } + rcu_read_unlock(); } static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev, @@ -2608,7 +2701,7 @@ static void vxlan_cleanup(struct timer_list *t) for (h = 0; h < FDB_HASH_SIZE; ++h) { struct hlist_node *p, *n; - spin_lock_bh(&vxlan->hash_lock); + spin_lock(&vxlan->hash_lock); hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) { struct vxlan_fdb *f = container_of(p, struct vxlan_fdb, hlist); @@ -2630,7 +2723,7 @@ static void vxlan_cleanup(struct timer_list *t) } else if (time_before(timeout, next_timer)) next_timer = timeout; } - spin_unlock_bh(&vxlan->hash_lock); + spin_unlock(&vxlan->hash_lock); } mod_timer(&vxlan->age_timer, next_timer); @@ -2685,6 +2778,8 @@ static void vxlan_uninit(struct net_device *dev) { struct vxlan_dev *vxlan = netdev_priv(dev); + gro_cells_destroy(&vxlan->gro_cells); + vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni); free_percpu(dev->tstats); @@ -2841,6 +2936,7 @@ static const struct net_device_ops vxlan_netdev_ether_ops = { .ndo_fdb_dump = vxlan_fdb_dump, .ndo_fdb_get = vxlan_fdb_get, .ndo_fill_metadata_dst = vxlan_fill_metadata_dst, + .ndo_change_proto_down = dev_change_proto_down_generic, }; static const struct net_device_ops vxlan_netdev_raw_ops = { @@ -3478,9 +3574,12 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev, goto errout; /* notify default fdb entry */ - if (f) - vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH, - true); + if (f) { + err = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), + RTM_NEWNEIGH, true, extack); + if (err) + goto errout; + } list_add(&vxlan->next, &vn->vxlan_list); return 0; @@ -3497,11 +3596,40 @@ errout: return err; } +/* Set/clear flags based on attribute */ +static int vxlan_nl2flag(struct vxlan_config *conf, struct nlattr *tb[], + int attrtype, unsigned long mask, bool changelink, + bool changelink_supported, + struct netlink_ext_ack *extack) +{ + unsigned long flags; + + if (!tb[attrtype]) + return 0; + + if (changelink && !changelink_supported) { + vxlan_flag_attr_error(attrtype, extack); + return -EOPNOTSUPP; + } + + if (vxlan_policy[attrtype].type == NLA_FLAG) + flags = conf->flags | mask; + else if (nla_get_u8(tb[attrtype])) + flags = conf->flags | mask; + else + flags = conf->flags & ~mask; + + conf->flags = flags; + + return 0; +} + static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[], struct net_device *dev, struct vxlan_config *conf, - bool changelink) + bool changelink, struct netlink_ext_ack *extack) { struct vxlan_dev *vxlan = netdev_priv(dev); + int err = 0; memset(conf, 0, sizeof(*conf)); @@ -3512,40 +3640,54 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[], if (data[IFLA_VXLAN_ID]) { __be32 vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID])); - if (changelink && (vni != conf->vni)) + if (changelink && (vni != conf->vni)) { + NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_ID], "Cannot change VNI"); return -EOPNOTSUPP; + } conf->vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID])); } if (data[IFLA_VXLAN_GROUP]) { - if (changelink && (conf->remote_ip.sa.sa_family != AF_INET)) + if (changelink && (conf->remote_ip.sa.sa_family != AF_INET)) { + NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_GROUP], "New group address family does not match old group"); return -EOPNOTSUPP; + } conf->remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]); conf->remote_ip.sa.sa_family = AF_INET; } else if (data[IFLA_VXLAN_GROUP6]) { - if (!IS_ENABLED(CONFIG_IPV6)) + if (!IS_ENABLED(CONFIG_IPV6)) { + NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_GROUP6], "IPv6 support not enabled in the kernel"); return -EPFNOSUPPORT; + } - if (changelink && (conf->remote_ip.sa.sa_family != AF_INET6)) + if (changelink && (conf->remote_ip.sa.sa_family != AF_INET6)) { + NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_GROUP6], "New group address family does not match old group"); return -EOPNOTSUPP; + } conf->remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]); conf->remote_ip.sa.sa_family = AF_INET6; } if (data[IFLA_VXLAN_LOCAL]) { - if (changelink && (conf->saddr.sa.sa_family != AF_INET)) + if (changelink && (conf->saddr.sa.sa_family != AF_INET)) { + NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_LOCAL], "New local address family does not match old"); return -EOPNOTSUPP; + } conf->saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]); conf->saddr.sa.sa_family = AF_INET; } else if (data[IFLA_VXLAN_LOCAL6]) { - if (!IS_ENABLED(CONFIG_IPV6)) + if (!IS_ENABLED(CONFIG_IPV6)) { + NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_LOCAL6], "IPv6 support not enabled in the kernel"); return -EPFNOSUPPORT; + } - if (changelink && (conf->saddr.sa.sa_family != AF_INET6)) + if (changelink && (conf->saddr.sa.sa_family != AF_INET6)) { + NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_LOCAL6], "New local address family does not match old"); return -EOPNOTSUPP; + } /* TODO: respect scope id */ conf->saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]); @@ -3562,9 +3704,12 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[], conf->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]); if (data[IFLA_VXLAN_TTL_INHERIT]) { - if (changelink) - return -EOPNOTSUPP; - conf->flags |= VXLAN_F_TTL_INHERIT; + err = vxlan_nl2flag(conf, data, IFLA_VXLAN_TTL_INHERIT, + VXLAN_F_TTL_INHERIT, changelink, false, + extack); + if (err) + return err; + } if (data[IFLA_VXLAN_LABEL]) @@ -3572,10 +3717,11 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[], IPV6_FLOWLABEL_MASK; if (data[IFLA_VXLAN_LEARNING]) { - if (nla_get_u8(data[IFLA_VXLAN_LEARNING])) - conf->flags |= VXLAN_F_LEARN; - else - conf->flags &= ~VXLAN_F_LEARN; + err = vxlan_nl2flag(conf, data, IFLA_VXLAN_LEARNING, + VXLAN_F_LEARN, changelink, true, + extack); + if (err) + return err; } else if (!changelink) { /* default to learn on a new device */ conf->flags |= VXLAN_F_LEARN; @@ -3585,44 +3731,52 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[], conf->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]); if (data[IFLA_VXLAN_PROXY]) { - if (changelink) - return -EOPNOTSUPP; - if (nla_get_u8(data[IFLA_VXLAN_PROXY])) - conf->flags |= VXLAN_F_PROXY; + err = vxlan_nl2flag(conf, data, IFLA_VXLAN_PROXY, + VXLAN_F_PROXY, changelink, false, + extack); + if (err) + return err; } if (data[IFLA_VXLAN_RSC]) { - if (changelink) - return -EOPNOTSUPP; - if (nla_get_u8(data[IFLA_VXLAN_RSC])) - conf->flags |= VXLAN_F_RSC; + err = vxlan_nl2flag(conf, data, IFLA_VXLAN_RSC, + VXLAN_F_RSC, changelink, false, + extack); + if (err) + return err; } if (data[IFLA_VXLAN_L2MISS]) { - if (changelink) - return -EOPNOTSUPP; - if (nla_get_u8(data[IFLA_VXLAN_L2MISS])) - conf->flags |= VXLAN_F_L2MISS; + err = vxlan_nl2flag(conf, data, IFLA_VXLAN_L2MISS, + VXLAN_F_L2MISS, changelink, false, + extack); + if (err) + return err; } if (data[IFLA_VXLAN_L3MISS]) { - if (changelink) - return -EOPNOTSUPP; - if (nla_get_u8(data[IFLA_VXLAN_L3MISS])) - conf->flags |= VXLAN_F_L3MISS; + err = vxlan_nl2flag(conf, data, IFLA_VXLAN_L3MISS, + VXLAN_F_L3MISS, changelink, false, + extack); + if (err) + return err; } if (data[IFLA_VXLAN_LIMIT]) { - if (changelink) + if (changelink) { + NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_LIMIT], + "Cannot change limit"); return -EOPNOTSUPP; + } conf->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]); } if (data[IFLA_VXLAN_COLLECT_METADATA]) { - if (changelink) - return -EOPNOTSUPP; - if (nla_get_u8(data[IFLA_VXLAN_COLLECT_METADATA])) - conf->flags |= VXLAN_F_COLLECT_METADATA; + err = vxlan_nl2flag(conf, data, IFLA_VXLAN_COLLECT_METADATA, + VXLAN_F_COLLECT_METADATA, changelink, false, + extack); + if (err) + return err; } if (data[IFLA_VXLAN_PORT_RANGE]) { @@ -3632,72 +3786,92 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[], conf->port_min = ntohs(p->low); conf->port_max = ntohs(p->high); } else { + NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_PORT_RANGE], + "Cannot change port range"); return -EOPNOTSUPP; } } if (data[IFLA_VXLAN_PORT]) { - if (changelink) + if (changelink) { + NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_PORT], + "Cannot change port"); return -EOPNOTSUPP; + } conf->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]); } if (data[IFLA_VXLAN_UDP_CSUM]) { - if (changelink) + if (changelink) { + NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_UDP_CSUM], + "Cannot change UDP_CSUM flag"); return -EOPNOTSUPP; + } if (!nla_get_u8(data[IFLA_VXLAN_UDP_CSUM])) conf->flags |= VXLAN_F_UDP_ZERO_CSUM_TX; } if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]) { - if (changelink) - return -EOPNOTSUPP; - if (nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX])) - conf->flags |= VXLAN_F_UDP_ZERO_CSUM6_TX; + err = vxlan_nl2flag(conf, data, IFLA_VXLAN_UDP_ZERO_CSUM6_TX, + VXLAN_F_UDP_ZERO_CSUM6_TX, changelink, + false, extack); + if (err) + return err; } if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]) { - if (changelink) - return -EOPNOTSUPP; - if (nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX])) - conf->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX; + err = vxlan_nl2flag(conf, data, IFLA_VXLAN_UDP_ZERO_CSUM6_RX, + VXLAN_F_UDP_ZERO_CSUM6_RX, changelink, + false, extack); + if (err) + return err; } if (data[IFLA_VXLAN_REMCSUM_TX]) { - if (changelink) - return -EOPNOTSUPP; - if (nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX])) - conf->flags |= VXLAN_F_REMCSUM_TX; + err = vxlan_nl2flag(conf, data, IFLA_VXLAN_REMCSUM_TX, + VXLAN_F_REMCSUM_TX, changelink, false, + extack); + if (err) + return err; } if (data[IFLA_VXLAN_REMCSUM_RX]) { - if (changelink) - return -EOPNOTSUPP; - if (nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX])) - conf->flags |= VXLAN_F_REMCSUM_RX; + err = vxlan_nl2flag(conf, data, IFLA_VXLAN_REMCSUM_RX, + VXLAN_F_REMCSUM_RX, changelink, false, + extack); + if (err) + return err; } if (data[IFLA_VXLAN_GBP]) { - if (changelink) - return -EOPNOTSUPP; - conf->flags |= VXLAN_F_GBP; + err = vxlan_nl2flag(conf, data, IFLA_VXLAN_GBP, + VXLAN_F_GBP, changelink, false, extack); + if (err) + return err; } if (data[IFLA_VXLAN_GPE]) { - if (changelink) - return -EOPNOTSUPP; - conf->flags |= VXLAN_F_GPE; + err = vxlan_nl2flag(conf, data, IFLA_VXLAN_GPE, + VXLAN_F_GPE, changelink, false, + extack); + if (err) + return err; } if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) { - if (changelink) - return -EOPNOTSUPP; - conf->flags |= VXLAN_F_REMCSUM_NOPARTIAL; + err = vxlan_nl2flag(conf, data, IFLA_VXLAN_REMCSUM_NOPARTIAL, + VXLAN_F_REMCSUM_NOPARTIAL, changelink, + false, extack); + if (err) + return err; } if (tb[IFLA_MTU]) { - if (changelink) + if (changelink) { + NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_MTU], + "Cannot change mtu"); return -EOPNOTSUPP; + } conf->mtu = nla_get_u32(tb[IFLA_MTU]); } @@ -3714,7 +3888,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev, struct vxlan_config conf; int err; - err = vxlan_nl2conf(tb, data, dev, &conf, false); + err = vxlan_nl2conf(tb, data, dev, &conf, false, extack); if (err) return err; @@ -3727,56 +3901,51 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], { struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_rdst *dst = &vxlan->default_dst; - unsigned long old_age_interval; - struct vxlan_rdst old_dst; + struct net_device *lowerdev; struct vxlan_config conf; int err; - err = vxlan_nl2conf(tb, data, - dev, &conf, true); + err = vxlan_nl2conf(tb, data, dev, &conf, true, extack); if (err) return err; - old_age_interval = vxlan->cfg.age_interval; - memcpy(&old_dst, dst, sizeof(struct vxlan_rdst)); - - err = vxlan_dev_configure(vxlan->net, dev, &conf, true, extack); + err = vxlan_config_validate(vxlan->net, &conf, &lowerdev, + vxlan, extack); if (err) return err; - if (old_age_interval != vxlan->cfg.age_interval) - mod_timer(&vxlan->age_timer, jiffies); - /* handle default dst entry */ - if (!vxlan_addr_equal(&dst->remote_ip, &old_dst.remote_ip)) { + if (!vxlan_addr_equal(&conf.remote_ip, &dst->remote_ip)) { spin_lock_bh(&vxlan->hash_lock); - if (!vxlan_addr_any(&old_dst.remote_ip)) - __vxlan_fdb_delete(vxlan, all_zeros_mac, - old_dst.remote_ip, - vxlan->cfg.dst_port, - old_dst.remote_vni, - old_dst.remote_vni, - old_dst.remote_ifindex, - true); - - if (!vxlan_addr_any(&dst->remote_ip)) { + if (!vxlan_addr_any(&conf.remote_ip)) { err = vxlan_fdb_update(vxlan, all_zeros_mac, - &dst->remote_ip, + &conf.remote_ip, NUD_REACHABLE | NUD_PERMANENT, NLM_F_APPEND | NLM_F_CREATE, vxlan->cfg.dst_port, - dst->remote_vni, - dst->remote_vni, - dst->remote_ifindex, - NTF_SELF, true); + conf.vni, conf.vni, + conf.remote_ifindex, + NTF_SELF, true, extack); if (err) { spin_unlock_bh(&vxlan->hash_lock); return err; } } + if (!vxlan_addr_any(&dst->remote_ip)) + __vxlan_fdb_delete(vxlan, all_zeros_mac, + dst->remote_ip, + vxlan->cfg.dst_port, + dst->remote_vni, + dst->remote_vni, + dst->remote_ifindex, + true); spin_unlock_bh(&vxlan->hash_lock); } + if (conf.age_interval != vxlan->cfg.age_interval) + mod_timer(&vxlan->age_timer, jiffies); + + vxlan_config_apply(dev, &conf, lowerdev, vxlan->net, true); return 0; } @@ -3786,7 +3955,6 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head) vxlan_flush(vxlan, true); - gro_cells_destroy(&vxlan->gro_cells); list_del(&vxlan->next); unregister_netdevice_queue(dev, head); } @@ -4051,8 +4219,11 @@ vxlan_fdb_external_learn_add(struct net_device *dev, struct switchdev_notifier_vxlan_fdb_info *fdb_info) { struct vxlan_dev *vxlan = netdev_priv(dev); + struct netlink_ext_ack *extack; int err; + extack = switchdev_notifier_info_to_extack(&fdb_info->info); + spin_lock_bh(&vxlan->hash_lock); err = vxlan_fdb_update(vxlan, fdb_info->eth_addr, &fdb_info->remote_ip, NUD_REACHABLE, @@ -4062,7 +4233,7 @@ vxlan_fdb_external_learn_add(struct net_device *dev, fdb_info->remote_vni, fdb_info->remote_ifindex, NTF_USE | NTF_SELF | NTF_EXT_LEARNED, - false); + false, extack); spin_unlock_bh(&vxlan->hash_lock); return err; diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c index f6b000ddcd15..55f76e422fa0 100644 --- a/drivers/net/wan/cosa.c +++ b/drivers/net/wan/cosa.c @@ -769,7 +769,7 @@ static int cosa_net_tx_done(struct channel_data *chan, int size) chan->netdev->stats.tx_aborted_errors++; return 1; } - dev_kfree_skb_irq(chan->tx_skb); + dev_consume_skb_irq(chan->tx_skb); chan->tx_skb = NULL; chan->netdev->stats.tx_packets++; chan->netdev->stats.tx_bytes += size; diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c index c0b0f525c87c..fa78d2b14136 100644 --- a/drivers/net/wan/dscc4.c +++ b/drivers/net/wan/dscc4.c @@ -456,16 +456,16 @@ static int state_check(u32 state, struct dscc4_dev_priv *dpriv, int ret = 0; if (debug > 1) { - if (SOURCE_ID(state) != dpriv->dev_id) { - printk(KERN_DEBUG "%s (%s): Source Id=%d, state=%08x\n", - dev->name, msg, SOURCE_ID(state), state ); + if (SOURCE_ID(state) != dpriv->dev_id) { + printk(KERN_DEBUG "%s (%s): Source Id=%d, state=%08x\n", + dev->name, msg, SOURCE_ID(state), state); ret = -1; - } - if (state & 0x0df80c00) { - printk(KERN_DEBUG "%s (%s): state=%08x (UFO alert)\n", - dev->name, msg, state); + } + if (state & 0x0df80c00) { + printk(KERN_DEBUG "%s (%s): state=%08x (UFO alert)\n", + dev->name, msg, state); ret = -1; - } + } } return ret; } @@ -1575,7 +1575,7 @@ try: dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; } - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); dpriv->tx_skbuff[cur] = NULL; ++dpriv->tx_dirty; } else { @@ -1760,25 +1760,25 @@ try: } else { /* SccEvt */ if (debug > 1) { //FIXME: verifier la presence de tous les evenements - static struct { - u32 mask; - const char *irq_name; - } evts[] = { - { 0x00008000, "TIN"}, - { 0x00000020, "RSC"}, - { 0x00000010, "PCE"}, - { 0x00000008, "PLLA"}, - { 0, NULL} - }, *evt; - - for (evt = evts; evt->irq_name; evt++) { - if (state & evt->mask) { + static struct { + u32 mask; + const char *irq_name; + } evts[] = { + { 0x00008000, "TIN"}, + { 0x00000020, "RSC"}, + { 0x00000010, "PCE"}, + { 0x00000008, "PLLA"}, + { 0, NULL} + }, *evt; + + for (evt = evts; evt->irq_name; evt++) { + if (state & evt->mask) { printk(KERN_DEBUG "%s: %s\n", - dev->name, evt->irq_name); - if (!(state &= ~evt->mask)) - goto try; + dev->name, evt->irq_name); + if (!(state &= ~evt->mask)) + goto try; + } } - } } else { if (!(state &= ~0x0000c03c)) goto try; diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index 66d889d54e58..a08f04c3f644 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -482,7 +482,7 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv) memset(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr), 0, skb->len); - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); priv->tx_skbuff[priv->skb_dirtytx] = NULL; priv->skb_dirtytx = diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c index 6a505c26a3e7..5c60dc60a8e6 100644 --- a/drivers/net/wan/ixp4xx_hss.c +++ b/drivers/net/wan/ixp4xx_hss.c @@ -246,7 +246,7 @@ #ifdef __ARMEB__ typedef struct sk_buff buffer_t; #define free_buffer dev_kfree_skb -#define free_buffer_irq dev_kfree_skb_irq +#define free_buffer_irq dev_consume_skb_irq #else typedef void buffer_t; #define free_buffer kfree diff --git a/drivers/net/wan/lmc/Makefile b/drivers/net/wan/lmc/Makefile index 609710d64eb5..247f60c401ef 100644 --- a/drivers/net/wan/lmc/Makefile +++ b/drivers/net/wan/lmc/Makefile @@ -14,4 +14,4 @@ lmc-objs := lmc_debug.o lmc_media.o lmc_main.o lmc_proto.o # -DDEBUG \ # -DLMC_PACKET_LOG -ccflags-y := -I. $(DBGDEF) +ccflags-y := $(DBGDEF) diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index 4907453f17f5..22b065ff6d39 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c @@ -1320,8 +1320,7 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/ sc->lmc_device->stats.tx_packets++; } - // dev_kfree_skb(sc->lmc_txq[i]); - dev_kfree_skb_irq(sc->lmc_txq[i]); + dev_consume_skb_irq(sc->lmc_txq[i]); sc->lmc_txq[i] = NULL; badtx++; diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c index 8e8c4c0e1b64..40c04ea1200a 100644 --- a/drivers/net/wan/sbni.c +++ b/drivers/net/wan/sbni.c @@ -761,7 +761,7 @@ send_complete( struct net_device *dev ) dev->stats.tx_packets++; dev->stats.tx_bytes += nl->tx_buf_p->len; #endif - dev_kfree_skb_irq( nl->tx_buf_p ); + dev_consume_skb_irq(nl->tx_buf_p); nl->tx_buf_p = NULL; diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c index d573a57bc301..10d5333b7a88 100644 --- a/drivers/net/wan/wanxl.c +++ b/drivers/net/wan/wanxl.c @@ -185,7 +185,7 @@ static inline void wanxl_tx_intr(struct port *port) desc->stat = PACKET_EMPTY; /* Free descriptor */ pci_unmap_single(port->card->pdev, desc->address, skb->len, PCI_DMA_TODEVICE); - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); port->tx_in = (port->tx_in + 1) % TX_BUFFERS; } } @@ -565,7 +565,7 @@ static int wanxl_pci_init_one(struct pci_dev *pdev, u32 plx_phy; /* PLX PCI base address */ u32 mem_phy; /* memory PCI base addr */ u8 __iomem *mem; /* memory virtual base addr */ - int i, ports, alloc_size; + int i, ports; #ifndef MODULE pr_info_once("%s\n", version); @@ -601,8 +601,7 @@ static int wanxl_pci_init_one(struct pci_dev *pdev, default: ports = 4; } - alloc_size = sizeof(struct card) + ports * sizeof(struct port); - card = kzalloc(alloc_size, GFP_KERNEL); + card = kzalloc(struct_size(card, ports, ports), GFP_KERNEL); if (card == NULL) { pci_release_regions(pdev); pci_disable_device(pdev); diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c index deea41e96f01..0e56ab0aed9a 100644 --- a/drivers/net/wan/z85230.c +++ b/drivers/net/wan/z85230.c @@ -1536,7 +1536,7 @@ static void z8530_tx_done(struct z8530_channel *c) z8530_tx_begin(c); c->netdevice->stats.tx_packets++; c->netdevice->stats.tx_bytes += skb->len; - dev_kfree_skb_irq(skb); + dev_consume_skb_irq(skb); } /** diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c index 0b602951ff6b..d28b96d06919 100644 --- a/drivers/net/wimax/i2400m/rx.c +++ b/drivers/net/wimax/i2400m/rx.c @@ -1260,8 +1260,8 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb) goto error_msg_hdr_check; result = -EIO; num_pls = le16_to_cpu(msg_hdr->num_pls); - pl_itr = sizeof(*msg_hdr) + /* Check payload descriptor(s) */ - num_pls * sizeof(msg_hdr->pld[0]); + /* Check payload descriptor(s) */ + pl_itr = struct_size(msg_hdr, pld, num_pls); pl_itr = ALIGN(pl_itr, I2400M_PL_ALIGN); if (pl_itr > skb_len) { /* got all the payload descriptors? */ dev_err(dev, "RX: HW BUG? message too short (%u bytes) for " diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c index f8eb66ef2944..73842a830645 100644 --- a/drivers/net/wimax/i2400m/usb.c +++ b/drivers/net/wimax/i2400m/usb.c @@ -210,6 +210,7 @@ retry: msleep(10); /* give the device some time */ goto retry; } + /* fall through */ case -EINVAL: /* while removing driver */ case -ENODEV: /* dev disconnect ... */ case -ENOENT: /* just ignore it */ diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile index 66326b949ab1..142c777b287f 100644 --- a/drivers/net/wireless/ath/ath10k/Makefile +++ b/drivers/net/wireless/ath/ath10k/Makefile @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0 +# SPDX-License-Identifier: ISC obj-$(CONFIG_ATH10K) += ath10k_core.o ath10k_core-y += mac.o \ debug.o \ diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c index 4cd69aca75e2..0bf726c55736 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.c +++ b/drivers/net/wireless/ath/ath10k/ahb.c @@ -1,18 +1,7 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2016-2017 Qualcomm Atheros, Inc. All rights reserved. * Copyright (c) 2015 The Linux Foundation. All rights reserved. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include #include @@ -661,7 +650,8 @@ static void ath10k_ahb_hif_stop(struct ath10k *ar) ath10k_pci_flush(ar); } -static int ath10k_ahb_hif_power_up(struct ath10k *ar) +static int ath10k_ahb_hif_power_up(struct ath10k *ar, + enum ath10k_firmware_mode fw_mode) { int ret; diff --git a/drivers/net/wireless/ath/ath10k/ahb.h b/drivers/net/wireless/ath/ath10k/ahb.h index d43e375215c8..cee11a3ae2a5 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.h +++ b/drivers/net/wireless/ath/ath10k/ahb.h @@ -1,18 +1,7 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2016 Qualcomm Atheros, Inc. All rights reserved. * Copyright (c) 2015 The Linux Foundation. All rights reserved. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _AHB_H_ diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c index 1750b182209b..95dc4be82e5c 100644 --- a/drivers/net/wireless/ath/ath10k/bmi.c +++ b/drivers/net/wireless/ath/ath10k/bmi.c @@ -1,18 +1,7 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2014,2016-2017 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "bmi.h" diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h index 725c9afc63f2..ef3bdba43bed 100644 --- a/drivers/net/wireless/ath/ath10k/bmi.h +++ b/drivers/net/wireless/ath/ath10k/bmi.h @@ -1,18 +1,7 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2015,2017 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _BMI_H_ diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c index 2a5668b4f6bc..24b983edb357 100644 --- a/drivers/net/wireless/ath/ath10k/ce.c +++ b/drivers/net/wireless/ath/ath10k/ce.c @@ -1,19 +1,8 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * Copyright (c) 2018 The Linux Foundation. All rights reserved. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "hif.h" @@ -228,11 +217,31 @@ ath10k_ce_shadow_dest_ring_write_index_set(struct ath10k *ar, } static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar, - u32 ce_ctrl_addr, - unsigned int addr) + u32 ce_id, + u64 addr) +{ + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; + u32 ce_ctrl_addr = ath10k_ce_base_address(ar, ce_id); + u32 addr_lo = lower_32_bits(addr); + + ath10k_ce_write32(ar, ce_ctrl_addr + + ar->hw_ce_regs->sr_base_addr_lo, addr_lo); + + if (ce_state->ops->ce_set_src_ring_base_addr_hi) { + ce_state->ops->ce_set_src_ring_base_addr_hi(ar, ce_ctrl_addr, + addr); + } +} + +static void ath10k_ce_set_src_ring_base_addr_hi(struct ath10k *ar, + u32 ce_ctrl_addr, + u64 addr) { + u32 addr_hi = upper_32_bits(addr) & CE_DESC_ADDR_HI_MASK; + ath10k_ce_write32(ar, ce_ctrl_addr + - ar->hw_ce_regs->sr_base_addr, addr); + ar->hw_ce_regs->sr_base_addr_hi, addr_hi); } static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar, @@ -313,11 +322,36 @@ static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar, } static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar, - u32 ce_ctrl_addr, - u32 addr) + u32 ce_id, + u64 addr) { + struct ath10k_ce *ce = ath10k_ce_priv(ar); + struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id]; + u32 ce_ctrl_addr = ath10k_ce_base_address(ar, ce_id); + u32 addr_lo = lower_32_bits(addr); + ath10k_ce_write32(ar, ce_ctrl_addr + - ar->hw_ce_regs->dr_base_addr, addr); + ar->hw_ce_regs->dr_base_addr_lo, addr_lo); + + if (ce_state->ops->ce_set_dest_ring_base_addr_hi) { + ce_state->ops->ce_set_dest_ring_base_addr_hi(ar, ce_ctrl_addr, + addr); + } +} + +static void ath10k_ce_set_dest_ring_base_addr_hi(struct ath10k *ar, + u32 ce_ctrl_addr, + u64 addr) +{ + u32 addr_hi = upper_32_bits(addr) & CE_DESC_ADDR_HI_MASK; + u32 reg_value; + + reg_value = ath10k_ce_read32(ar, ce_ctrl_addr + + ar->hw_ce_regs->dr_base_addr_hi); + reg_value &= ~CE_DESC_ADDR_HI_MASK; + reg_value |= addr_hi; + ath10k_ce_write32(ar, ce_ctrl_addr + + ar->hw_ce_regs->dr_base_addr_hi, reg_value); } static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar, @@ -500,14 +534,8 @@ static int _ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, write_index = CE_RING_IDX_INCR(nentries_mask, write_index); /* WORKAROUND */ - if (!(flags & CE_SEND_FLAG_GATHER)) { - if (ar->hw_params.shadow_reg_support) - ath10k_ce_shadow_src_ring_write_index_set(ar, ce_state, - write_index); - else - ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, - write_index); - } + if (!(flags & CE_SEND_FLAG_GATHER)) + ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index); src_ring->write_index = write_index; exit: @@ -563,7 +591,7 @@ static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state, addr = (__le32 *)&sdesc.addr; - flags |= upper_32_bits(buffer) & CE_DESC_FLAGS_GET_MASK; + flags |= upper_32_bits(buffer) & CE_DESC_ADDR_HI_MASK; addr[0] = __cpu_to_le32(buffer); addr[1] = __cpu_to_le32(flags); if (flags & CE_SEND_FLAG_GATHER) @@ -581,8 +609,14 @@ static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state, /* Update Source Ring Write Index */ write_index = CE_RING_IDX_INCR(nentries_mask, write_index); - if (!(flags & CE_SEND_FLAG_GATHER)) - ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index); + if (!(flags & CE_SEND_FLAG_GATHER)) { + if (ar->hw_params.shadow_reg_support) + ath10k_ce_shadow_src_ring_write_index_set(ar, ce_state, + write_index); + else + ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, + write_index); + } src_ring->write_index = write_index; exit: @@ -731,7 +765,7 @@ static int __ath10k_ce_rx_post_buf_64(struct ath10k_ce_pipe *pipe, return -ENOSPC; desc->addr = __cpu_to_le64(paddr); - desc->addr &= __cpu_to_le64(CE_DESC_37BIT_ADDR_MASK); + desc->addr &= __cpu_to_le64(CE_DESC_ADDR_MASK); desc->nbytes = 0; @@ -1032,8 +1066,8 @@ EXPORT_SYMBOL(ath10k_ce_revoke_recv_next); * Guts of ath10k_ce_completed_send_next. * The caller takes responsibility for any necessary locking. */ -int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, - void **per_transfer_contextp) +static int _ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, + void **per_transfer_contextp) { struct ath10k_ce_ring *src_ring = ce_state->src_ring; u32 ctrl_addr = ce_state->ctrl_addr; @@ -1084,6 +1118,66 @@ int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, return 0; } + +static int _ath10k_ce_completed_send_next_nolock_64(struct ath10k_ce_pipe *ce_state, + void **per_transfer_contextp) +{ + struct ath10k_ce_ring *src_ring = ce_state->src_ring; + u32 ctrl_addr = ce_state->ctrl_addr; + struct ath10k *ar = ce_state->ar; + unsigned int nentries_mask = src_ring->nentries_mask; + unsigned int sw_index = src_ring->sw_index; + unsigned int read_index; + struct ce_desc_64 *desc; + + if (src_ring->hw_index == sw_index) { + /* + * The SW completion index has caught up with the cached + * version of the HW completion index. + * Update the cached HW completion index to see whether + * the SW has really caught up to the HW, or if the cached + * value of the HW index has become stale. + */ + + read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); + if (read_index == 0xffffffff) + return -ENODEV; + + read_index &= nentries_mask; + src_ring->hw_index = read_index; + } + + if (ar->hw_params.rri_on_ddr) + read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); + else + read_index = src_ring->hw_index; + + if (read_index == sw_index) + return -EIO; + + if (per_transfer_contextp) + *per_transfer_contextp = + src_ring->per_transfer_context[sw_index]; + + /* sanity */ + src_ring->per_transfer_context[sw_index] = NULL; + desc = CE_SRC_RING_TO_DESC_64(src_ring->base_addr_owner_space, + sw_index); + desc->nbytes = 0; + + /* Update sw_index */ + sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); + src_ring->sw_index = sw_index; + + return 0; +} + +int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, + void **per_transfer_contextp) +{ + return ce_state->ops->ce_completed_send_next_nolock(ce_state, + per_transfer_contextp); +} EXPORT_SYMBOL(ath10k_ce_completed_send_next_nolock); static void ath10k_ce_extract_desc_data(struct ath10k *ar, @@ -1346,7 +1440,7 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar, ath10k_ce_src_ring_write_index_get(ar, ctrl_addr); src_ring->write_index &= src_ring->nentries_mask; - ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, + ath10k_ce_src_ring_base_addr_set(ar, ce_id, src_ring->base_addr_ce_space); ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries); ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max); @@ -1385,7 +1479,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar, ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr); dest_ring->write_index &= dest_ring->nentries_mask; - ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, + ath10k_ce_dest_ring_base_addr_set(ar, ce_id, dest_ring->base_addr_ce_space); ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries); ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0); @@ -1404,12 +1498,12 @@ static int ath10k_ce_alloc_shadow_base(struct ath10k *ar, u32 nentries) { src_ring->shadow_base_unaligned = kcalloc(nentries, - sizeof(struct ce_desc), + sizeof(struct ce_desc_64), GFP_KERNEL); if (!src_ring->shadow_base_unaligned) return -ENOMEM; - src_ring->shadow_base = (struct ce_desc *) + src_ring->shadow_base = (struct ce_desc_64 *) PTR_ALIGN(src_ring->shadow_base_unaligned, CE_DESC_RING_ALIGN); return 0; @@ -1461,7 +1555,7 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id, ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries); if (ret) { dma_free_coherent(ar->dev, - (nentries * sizeof(struct ce_desc) + + (nentries * sizeof(struct ce_desc_64) + CE_DESC_RING_ALIGN), src_ring->base_addr_owner_space_unaligned, base_addr); @@ -1554,7 +1648,8 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id, */ dest_ring->base_addr_owner_space_unaligned = dma_alloc_coherent(ar->dev, - (nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN), + (nentries * sizeof(struct ce_desc) + + CE_DESC_RING_ALIGN), &base_addr, GFP_KERNEL); if (!dest_ring->base_addr_owner_space_unaligned) { kfree(dest_ring); @@ -1660,7 +1755,7 @@ static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id) { u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id); - ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0); + ath10k_ce_src_ring_base_addr_set(ar, ce_id, 0); ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0); ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0); ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0); @@ -1670,7 +1765,7 @@ static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id) { u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id); - ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0); + ath10k_ce_dest_ring_base_addr_set(ar, ce_id, 0); ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0); ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0); } @@ -1802,6 +1897,9 @@ static const struct ath10k_ce_ops ce_ops = { .ce_extract_desc_data = ath10k_ce_extract_desc_data, .ce_free_pipe = _ath10k_ce_free_pipe, .ce_send_nolock = _ath10k_ce_send_nolock, + .ce_set_src_ring_base_addr_hi = NULL, + .ce_set_dest_ring_base_addr_hi = NULL, + .ce_completed_send_next_nolock = _ath10k_ce_completed_send_next_nolock, }; static const struct ath10k_ce_ops ce_64_ops = { @@ -1814,6 +1912,9 @@ static const struct ath10k_ce_ops ce_64_ops = { .ce_extract_desc_data = ath10k_ce_extract_desc_data_64, .ce_free_pipe = _ath10k_ce_free_pipe_64, .ce_send_nolock = _ath10k_ce_send_nolock_64, + .ce_set_src_ring_base_addr_hi = ath10k_ce_set_src_ring_base_addr_hi, + .ce_set_dest_ring_base_addr_hi = ath10k_ce_set_dest_ring_base_addr_hi, + .ce_completed_send_next_nolock = _ath10k_ce_completed_send_next_nolock_64, }; static void ath10k_ce_set_ops(struct ath10k *ar, @@ -1909,7 +2010,7 @@ void ath10k_ce_alloc_rri(struct ath10k *ar) lower_32_bits(ce->paddr_rri)); ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_high, (upper_32_bits(ce->paddr_rri) & - CE_DESC_FLAGS_GET_MASK)); + CE_DESC_ADDR_HI_MASK)); for (i = 0; i < CE_COUNT; i++) { ctrl1_regs = ar->hw_ce_regs->ctrl1_regs->addr; diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h index ead9987c3259..a7478c240f78 100644 --- a/drivers/net/wireless/ath/ath10k/ce.h +++ b/drivers/net/wireless/ath/ath10k/ce.h @@ -1,19 +1,8 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * Copyright (c) 2018 The Linux Foundation. All rights reserved. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _CE_H_ @@ -39,8 +28,8 @@ struct ath10k_ce_pipe; #define CE_DESC_FLAGS_BYTE_SWAP (1 << 1) #define CE_WCN3990_DESC_FLAGS_GATHER BIT(31) -#define CE_DESC_FLAGS_GET_MASK GENMASK(4, 0) -#define CE_DESC_37BIT_ADDR_MASK GENMASK_ULL(37, 0) +#define CE_DESC_ADDR_MASK GENMASK_ULL(34, 0) +#define CE_DESC_ADDR_HI_MASK GENMASK(4, 0) /* Following desc flags are used in QCA99X0 */ #define CE_DESC_FLAGS_HOST_INT_DIS (1 << 2) @@ -104,7 +93,7 @@ struct ath10k_ce_ring { /* Host address space */ void *base_addr_owner_space_unaligned; /* CE address space */ - u32 base_addr_ce_space_unaligned; + dma_addr_t base_addr_ce_space_unaligned; /* * Actual start of descriptors. @@ -115,10 +104,10 @@ struct ath10k_ce_ring { void *base_addr_owner_space; /* CE address space */ - u32 base_addr_ce_space; + dma_addr_t base_addr_ce_space; char *shadow_base_unaligned; - struct ce_desc *shadow_base; + struct ce_desc_64 *shadow_base; /* keep last */ void *per_transfer_context[0]; @@ -334,6 +323,14 @@ struct ath10k_ce_ops { void *per_transfer_context, dma_addr_t buffer, u32 nbytes, u32 transfer_id, u32 flags); + void (*ce_set_src_ring_base_addr_hi)(struct ath10k *ar, + u32 ce_ctrl_addr, + u64 addr); + void (*ce_set_dest_ring_base_addr_hi)(struct ath10k *ar, + u32 ce_ctrl_addr, + u64 addr); + int (*ce_completed_send_next_nolock)(struct ath10k_ce_pipe *ce_state, + void **per_transfer_contextp); }; static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 399b501f3c3c..835b8de92d55 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -1,19 +1,8 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * Copyright (c) 2018, The Linux Foundation. All rights reserved. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include @@ -548,7 +537,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { { .id = WCN3990_HW_1_0_DEV_VERSION, .dev_id = 0, - .bus = ATH10K_BUS_PCI, + .bus = ATH10K_BUS_SNOC, .name = "wcn3990 hw1.0", .continuous_frag_desc = true, .tx_chain_mask = 0x7, @@ -560,10 +549,10 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .sw_decrypt_mcast_mgmt = true, .hw_ops = &wcn3990_ops, .decap_align_bytes = 1, - .num_peers = TARGET_HL_10_TLV_NUM_PEERS, - .n_cipher_suites = 8, - .ast_skid_limit = TARGET_HL_10_TLV_AST_SKID_LIMIT, - .num_wds_entries = TARGET_HL_10_TLV_NUM_WDS_ENTRIES, + .num_peers = TARGET_HL_TLV_NUM_PEERS, + .n_cipher_suites = 11, + .ast_skid_limit = TARGET_HL_TLV_AST_SKID_LIMIT, + .num_wds_entries = TARGET_HL_TLV_NUM_WDS_ENTRIES, .target_64bit = true, .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL_DUAL_MAC, .per_ce_irq = true, @@ -648,11 +637,24 @@ static void ath10k_init_sdio(struct ath10k *ar) ath10k_bmi_write32(ar, hi_mbox_isr_yield_limit, 99); ath10k_bmi_read32(ar, hi_acs_flags, ¶m); - param |= (HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_SET | - HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_SET | - HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE); + /* Data transfer is not initiated, when reduced Tx completion + * is used for SDIO. disable it until fixed + */ + param &= ~HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_SET; + /* Alternate credit size of 1544 as used by SDIO firmware is + * not big enough for mac80211 / native wifi frames. disable it + */ + param &= ~HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE; + param |= HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_SET; ath10k_bmi_write32(ar, hi_acs_flags, param); + + /* Explicitly set fwlog prints to zero as target may turn it on + * based on scratch registers. + */ + ath10k_bmi_read32(ar, hi_option_flag, ¶m); + param |= HI_OPTION_DISABLE_DBGLOG; + ath10k_bmi_write32(ar, hi_option_flag, param); } static int ath10k_init_configure_target(struct ath10k *ar) @@ -2309,10 +2311,14 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) ar->max_num_stations = TARGET_TLV_NUM_STATIONS; ar->max_num_vdevs = TARGET_TLV_NUM_VDEVS; ar->max_num_tdls_vdevs = TARGET_TLV_NUM_TDLS_VDEVS; - ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC; + if (ar->hif.bus == ATH10K_BUS_SDIO) + ar->htt.max_num_pending_tx = + TARGET_TLV_NUM_MSDU_DESC_HL; + else + ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC; ar->wow.max_num_patterns = TARGET_TLV_NUM_WOW_PATTERNS; - ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV | - WMI_STAT_PEER; + ar->fw_stats_req_mask = WMI_TLV_STAT_PDEV | WMI_TLV_STAT_VDEV | + WMI_TLV_STAT_PEER | WMI_TLV_STAT_PEER_EXTD; ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM; ar->wmi.mgmt_max_num_pending_tx = TARGET_TLV_MGMT_NUM_MSDU_DESC; break; @@ -2556,6 +2562,12 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, goto err_hif_stop; } + status = ath10k_hif_swap_mailbox(ar); + if (status) { + ath10k_err(ar, "failed to swap mailbox: %d\n", status); + goto err_hif_stop; + } + if (mode == ATH10K_FIRMWARE_MODE_NORMAL) { status = ath10k_htt_connect(&ar->htt); if (status) { @@ -2621,6 +2633,9 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, ar->wmi.svc_map)) val |= WMI_10_4_TX_DATA_ACK_RSSI; + if (test_bit(WMI_SERVICE_REPORT_AIRTIME, ar->wmi.svc_map)) + val |= WMI_10_4_REPORT_AIRTIME; + status = ath10k_mac_ext_resource_config(ar, val); if (status) { ath10k_err(ar, @@ -2649,6 +2664,13 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, goto err_hif_stop; } + status = ath10k_wmi_pdev_set_base_macaddr(ar, ar->mac_addr); + if (status && status != -EOPNOTSUPP) { + ath10k_err(ar, + "failed to set base mac address: %d\n", status); + goto err_hif_stop; + } + /* Some firmware revisions do not properly set up hardware rx filter * registers. * @@ -2763,7 +2785,7 @@ static int ath10k_core_probe_fw(struct ath10k *ar) struct bmi_target_info target_info; int ret = 0; - ret = ath10k_hif_power_up(ar); + ret = ath10k_hif_power_up(ar, ATH10K_FIRMWARE_MODE_NORMAL); if (ret) { ath10k_err(ar, "could not power on hif bus (%d)\n", ret); return ret; @@ -2977,8 +2999,8 @@ err: int ath10k_core_register(struct ath10k *ar, const struct ath10k_bus_params *bus_params) { - ar->chip_id = bus_params->chip_id; - ar->dev_type = bus_params->dev_type; + ar->bus_param = *bus_params; + queue_work(ar->workqueue, &ar->register_work); return 0; @@ -3098,9 +3120,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, mutex_init(&ar->conf_mutex); spin_lock_init(&ar->data_lock); - spin_lock_init(&ar->txqs_lock); - INIT_LIST_HEAD(&ar->txqs); INIT_LIST_HEAD(&ar->peers); init_waitqueue_head(&ar->peer_mapping_wq); init_waitqueue_head(&ar->htt.empty_tx_wq); diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 46e9c8c97a4d..e08a17b01e03 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -1,19 +1,8 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * Copyright (c) 2018, The Linux Foundation. All rights reserved. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _CORE_H_ @@ -90,6 +79,9 @@ /* The magic used by QCA spec */ #define ATH10K_SMBIOS_BDF_EXT_MAGIC "BDF_" +/* Default Airtime weight multipler (Tuned for multiclient performance) */ +#define ATH10K_AIRTIME_WEIGHT_MULTIPLIER 4 + struct ath10k; static inline const char *ath10k_bus_str(enum ath10k_bus bus) @@ -116,6 +108,7 @@ enum ath10k_skb_flags { ATH10K_SKB_F_DELIVER_CAB = BIT(2), ATH10K_SKB_F_MGMT = BIT(3), ATH10K_SKB_F_QOS = BIT(4), + ATH10K_SKB_F_RAW_TX = BIT(5), }; struct ath10k_skb_cb { @@ -123,6 +116,7 @@ struct ath10k_skb_cb { u8 flags; u8 eid; u16 msdu_id; + u16 airtime_est; struct ieee80211_vif *vif; struct ieee80211_txq *txq; } __packed; @@ -195,7 +189,7 @@ struct ath10k_fw_stats_peer { u32 peer_rssi; u32 peer_tx_rate; u32 peer_rx_rate; /* 10x only */ - u32 rx_duration; + u64 rx_duration; }; struct ath10k_fw_extd_stats_peer { @@ -443,14 +437,14 @@ enum ath10k_amsdu_subfrm_num { }; struct ath10k_sta_tid_stats { - unsigned long int rx_pkt_from_fw; - unsigned long int rx_pkt_unchained; - unsigned long int rx_pkt_drop_chained; - unsigned long int rx_pkt_drop_filter; - unsigned long int rx_pkt_err[ATH10K_PKT_RX_ERR_MAX]; - unsigned long int rx_pkt_queued_for_mac; - unsigned long int rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_MAX]; - unsigned long int rx_pkt_amsdu[ATH10K_AMSDU_SUBFRM_NUM_MAX]; + unsigned long rx_pkt_from_fw; + unsigned long rx_pkt_unchained; + unsigned long rx_pkt_drop_chained; + unsigned long rx_pkt_drop_filter; + unsigned long rx_pkt_err[ATH10K_PKT_RX_ERR_MAX]; + unsigned long rx_pkt_queued_for_mac; + unsigned long rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_MAX]; + unsigned long rx_pkt_amsdu[ATH10K_AMSDU_SUBFRM_NUM_MAX]; }; enum ath10k_counter_type { @@ -495,6 +489,7 @@ struct ath10k_sta { u16 peer_id; struct rate_info txrate; struct ieee80211_tx_info tx_info; + u32 last_tx_bitrate; struct work_struct update_wk; u64 rx_duration; @@ -571,6 +566,7 @@ struct ath10k_vif { bool nohwcrypt; int num_legacy_stations; int txpower; + bool ftm_responder; struct wmi_wmm_params_all_arg wmm_params; struct work_struct ap_csa_work; struct delayed_work connection_loss_work; @@ -922,6 +918,7 @@ enum ath10k_dev_type { struct ath10k_bus_params { u32 chip_id; enum ath10k_dev_type dev_type; + bool link_can_suspend; }; struct ath10k { @@ -1068,10 +1065,7 @@ struct ath10k { /* protects shared structure data */ spinlock_t data_lock; - /* protects: ar->txqs, artxq->list */ - spinlock_t txqs_lock; - struct list_head txqs; struct list_head arvifs; struct list_head peers; struct ath10k_peer *peer_map[ATH10K_MAX_NUM_PEER_IDS]; @@ -1182,6 +1176,7 @@ struct ath10k { u32 ampdu_reference; + const u8 *wmi_key_cipher; void *ce_priv; u32 sta_tid_stats_mask; @@ -1190,6 +1185,7 @@ struct ath10k { enum ath10k_radar_confirmation_state radar_conf_state; struct ath10k_radar_found_info last_radar_info; struct work_struct radar_confirmation_work; + struct ath10k_bus_params bus_param; /* must be last */ u8 drv_priv[0] __aligned(sizeof(void *)); diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c index eadae2f9206b..33838d9c1cb6 100644 --- a/drivers/net/wireless/ath/ath10k/coredump.c +++ b/drivers/net/wireless/ath/ath10k/coredump.c @@ -1,18 +1,7 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * Copyright (c) 2018, The Linux Foundation. All rights reserved. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "coredump.h" @@ -1167,7 +1156,7 @@ static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar) dump_data->version = cpu_to_le32(ATH10K_FW_CRASH_DUMP_VERSION); guid_copy(&dump_data->guid, &crash_data->guid); - dump_data->chip_id = cpu_to_le32(ar->chip_id); + dump_data->chip_id = cpu_to_le32(ar->bus_param.chip_id); dump_data->bus_type = cpu_to_le32(0); dump_data->target_version = cpu_to_le32(ar->target_version); dump_data->fw_version_major = cpu_to_le32(ar->fw_version_major); diff --git a/drivers/net/wireless/ath/ath10k/coredump.h b/drivers/net/wireless/ath/ath10k/coredump.h index 5dac653e1649..09de41922f97 100644 --- a/drivers/net/wireless/ath/ath10k/coredump.h +++ b/drivers/net/wireless/ath/ath10k/coredump.h @@ -1,17 +1,6 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _COREDUMP_H_ diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 02988fc378a1..32d967a31c65 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -1,19 +1,8 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * Copyright (c) 2018, The Linux Foundation. All rights reserved. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include @@ -58,7 +47,7 @@ void ath10k_debug_print_hwfw_info(struct ath10k *ar) ath10k_info(ar, "%s target 0x%08x chip_id 0x%08x sub %04x:%04x", ar->hw_params.name, ar->target_version, - ar->chip_id, + ar->bus_param.chip_id, ar->id.subsystem_vendor, ar->id.subsystem_device); ath10k_info(ar, "kconfig debug %d debugfs %d tracing %d dfs %d testmode %d\n", @@ -625,7 +614,7 @@ static ssize_t ath10k_read_chip_id(struct file *file, char __user *user_buf, size_t len; char buf[50]; - len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->chip_id); + len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->bus_param.chip_id); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } @@ -1263,6 +1252,9 @@ static int ath10k_debug_cal_data_fetch(struct ath10k *ar) if (WARN_ON(ar->hw_params.cal_data_len > ATH10K_DEBUG_CAL_DATA_LEN)) return -EINVAL; + if (ar->hw_params.cal_data_len == 0) + return -EOPNOTSUPP; + hi_addr = host_interest_item_address(HI_ITEM(hi_board_data)); ret = ath10k_hif_diag_read(ar, hi_addr, &addr, sizeof(addr)); diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h index 5cf16d690724..db78e855a80f 100644 --- a/drivers/net/wireless/ath/ath10k/debug.h +++ b/drivers/net/wireless/ath/ath10k/debug.h @@ -1,19 +1,8 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * Copyright (c) 2018, The Linux Foundation. All rights reserved. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _DEBUG_H_ @@ -213,12 +202,12 @@ void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void ath10k_sta_update_rx_duration(struct ath10k *ar, struct ath10k_fw_stats *stats); void ath10k_sta_update_rx_tid_stats(struct ath10k *ar, u8 *first_hdr, - unsigned long int num_msdus, + unsigned long num_msdus, enum ath10k_pkt_rx_err err, - unsigned long int unchain_cnt, - unsigned long int drop_cnt, - unsigned long int drop_cnt_filter, - unsigned long int queued_msdus); + unsigned long unchain_cnt, + unsigned long drop_cnt, + unsigned long drop_cnt_filter, + unsigned long queued_msdus); void ath10k_sta_update_rx_tid_stats_ampdu(struct ath10k *ar, u16 peer_id, u8 tid, struct htt_rx_indication_mpdu_range *ranges, @@ -232,12 +221,12 @@ void ath10k_sta_update_rx_duration(struct ath10k *ar, static inline void ath10k_sta_update_rx_tid_stats(struct ath10k *ar, u8 *first_hdr, - unsigned long int num_msdus, + unsigned long num_msdus, enum ath10k_pkt_rx_err err, - unsigned long int unchain_cnt, - unsigned long int drop_cnt, - unsigned long int drop_cnt_filter, - unsigned long int queued_msdus) + unsigned long unchain_cnt, + unsigned long drop_cnt, + unsigned long drop_cnt_filter, + unsigned long queued_msdus) { } diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c index 4778a455d81a..c704ae371c4d 100644 --- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c +++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c @@ -1,18 +1,7 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2014-2017 Qualcomm Atheros, Inc. * Copyright (c) 2018, The Linux Foundation. All rights reserved. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "core.h" @@ -87,12 +76,12 @@ out: } void ath10k_sta_update_rx_tid_stats(struct ath10k *ar, u8 *first_hdr, - unsigned long int num_msdus, + unsigned long num_msdus, enum ath10k_pkt_rx_err err, - unsigned long int unchain_cnt, - unsigned long int drop_cnt, - unsigned long int drop_cnt_filter, - unsigned long int queued_msdus) + unsigned long unchain_cnt, + unsigned long drop_cnt, + unsigned long drop_cnt_filter, + unsigned long queued_msdus) { struct ieee80211_sta *sta; struct ath10k_sta *arsta; @@ -696,11 +685,12 @@ static ssize_t ath10k_dbg_sta_dump_tx_stats(struct file *file, " %llu ", stats->ht[j][i]); len += scnprintf(buf + len, size - len, "\n"); len += scnprintf(buf + len, size - len, - " BW %s (20,40,80,160 MHz)\n", str[j]); + " BW %s (20,5,10,40,80,160 MHz)\n", str[j]); len += scnprintf(buf + len, size - len, - " %llu %llu %llu %llu\n", + " %llu %llu %llu %llu %llu %llu\n", stats->bw[j][0], stats->bw[j][1], - stats->bw[j][2], stats->bw[j][3]); + stats->bw[j][2], stats->bw[j][3], + stats->bw[j][4], stats->bw[j][5]); len += scnprintf(buf + len, size - len, " NSS %s (1x1,2x2,3x3,4x4)\n", str[j]); len += scnprintf(buf + len, size - len, diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h index 1a59ea0068c2..fe5417962f40 100644 --- a/drivers/net/wireless/ath/ath10k/hif.h +++ b/drivers/net/wireless/ath/ath10k/hif.h @@ -1,18 +1,7 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2015,2017 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _HIF_H_ @@ -59,6 +48,8 @@ struct ath10k_hif_ops { */ void (*stop)(struct ath10k *ar); + int (*swap_mailbox)(struct ath10k *ar); + int (*map_service_to_pipe)(struct ath10k *ar, u16 service_id, u8 *ul_pipe, u8 *dl_pipe); @@ -81,7 +72,7 @@ struct ath10k_hif_ops { void (*write32)(struct ath10k *ar, u32 address, u32 value); /* Power up the device and enter BMI transfer mode for FW download */ - int (*power_up)(struct ath10k *ar); + int (*power_up)(struct ath10k *ar, enum ath10k_firmware_mode fw_mode); /* Power down the device and free up resources. stop() must be called * before this if start() was called earlier @@ -139,6 +130,13 @@ static inline void ath10k_hif_stop(struct ath10k *ar) return ar->hif.ops->stop(ar); } +static inline int ath10k_hif_swap_mailbox(struct ath10k *ar) +{ + if (ar->hif.ops->swap_mailbox) + return ar->hif.ops->swap_mailbox(ar); + return 0; +} + static inline int ath10k_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id, u8 *ul_pipe, u8 *dl_pipe) @@ -165,9 +163,10 @@ static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar, return ar->hif.ops->get_free_queue_number(ar, pipe_id); } -static inline int ath10k_hif_power_up(struct ath10k *ar) +static inline int ath10k_hif_power_up(struct ath10k *ar, + enum ath10k_firmware_mode fw_mode) { - return ar->hif.ops->power_up(ar); + return ar->hif.ops->power_up(ar, fw_mode); } static inline void ath10k_hif_power_down(struct ath10k *ar) diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c index 28daed5981a1..805a7f8a04f2 100644 --- a/drivers/net/wireless/ath/ath10k/htc.c +++ b/drivers/net/wireless/ath/ath10k/htc.c @@ -1,18 +1,7 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "core.h" @@ -53,7 +42,7 @@ static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc, { struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); - if (htc->ar->dev_type != ATH10K_DEV_TYPE_HL) + if (htc->ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE); skb_pull(skb, sizeof(struct ath10k_htc_hdr)); } @@ -88,7 +77,8 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep, hdr->eid = ep->eid; hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr)); hdr->flags = 0; - hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE; + if (ep->tx_credit_flow_enabled) + hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE; spin_lock_bh(&ep->htc->tx_lock); hdr->seq_no = ep->seq_no++; @@ -138,7 +128,7 @@ int ath10k_htc_send(struct ath10k_htc *htc, ath10k_htc_prepare_tx_skb(ep, skb); skb_cb->eid = eid; - if (ar->dev_type != ATH10K_DEV_TYPE_HL) { + if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) { skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE); ret = dma_mapping_error(dev, skb_cb->paddr); @@ -161,7 +151,7 @@ int ath10k_htc_send(struct ath10k_htc *htc, return 0; err_unmap: - if (ar->dev_type != ATH10K_DEV_TYPE_HL) + if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE); err_credits: if (ep->tx_credit_flow_enabled) { diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h index 51fda6c23f69..f55d3caec61f 100644 --- a/drivers/net/wireless/ath/ath10k/htc.h +++ b/drivers/net/wireless/ath/ath10k/htc.h @@ -1,18 +1,7 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2016 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _HTC_H_ @@ -51,7 +40,6 @@ struct ath10k; */ #define HTC_HOST_MAX_MSG_PER_RX_BUNDLE 8 -#define HTC_HOST_MAX_MSG_PER_TX_BUNDLE 16 enum ath10k_htc_tx_flags { ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01, diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c index 21a67f82f037..d235ff3098e8 100644 --- a/drivers/net/wireless/ath/ath10k/htt.c +++ b/drivers/net/wireless/ath/ath10k/htt.c @@ -1,18 +1,7 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include @@ -268,7 +257,7 @@ int ath10k_htt_setup(struct ath10k_htt *htt) return status; } - status = ath10k_htt_h2t_aggr_cfg_msg(htt, + status = htt->tx_ops->htt_h2t_aggr_cfg_msg(htt, htt->max_num_ampdu, htt->max_num_amsdu); if (status) { diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index a76f7c9e2199..4cee5492abc8 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -1,19 +1,8 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * Copyright (c) 2018, The Linux Foundation. All rights reserved. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _HTT_H_ @@ -357,6 +346,13 @@ struct htt_aggr_conf { u8 max_num_amsdu_subframes; } __packed; +struct htt_aggr_conf_v2 { + u8 max_num_ampdu_subframes; + /* amsdu_subframes is limited by 0x1F mask */ + u8 max_num_amsdu_subframes; + u8 reserved; +} __packed; + #define HTT_MGMT_FRM_HDR_DOWNLOAD_LEN 32 struct htt_mgmt_tx_desc_qca99x0 { __le32 rate; @@ -564,6 +560,7 @@ struct htt_mgmt_tx_completion { #define HTT_RX_INDICATION_INFO0_EXT_TID_LSB (0) #define HTT_RX_INDICATION_INFO0_FLUSH_VALID (1 << 5) #define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 6) +#define HTT_RX_INDICATION_INFO0_PPDU_DURATION BIT(7) #define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_MASK 0x0000003F #define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_LSB 0 @@ -576,7 +573,14 @@ struct htt_mgmt_tx_completion { #define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_MASK 0xFF000000 #define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_LSB 24 -#define HTT_TX_CMPL_FLAG_DATA_RSSI BIT(0) +#define HTT_TX_CMPL_FLAG_DATA_RSSI BIT(0) +#define HTT_TX_CMPL_FLAG_PPID_PRESENT BIT(1) +#define HTT_TX_CMPL_FLAG_PA_PRESENT BIT(2) +#define HTT_TX_CMPL_FLAG_PPDU_DURATION_PRESENT BIT(3) + +#define HTT_TX_DATA_RSSI_ENABLE_WCN3990 BIT(3) +#define HTT_TX_DATA_APPEND_RETRIES BIT(0) +#define HTT_TX_DATA_APPEND_TIMESTAMP BIT(1) struct htt_rx_indication_hdr { u8 info0; /* %HTT_RX_INDICATION_INFO0_ */ @@ -852,6 +856,88 @@ enum htt_data_tx_flags { #define HTT_TX_COMPL_INV_MSDU_ID 0xFFFF +struct htt_append_retries { + __le16 msdu_id; + u8 tx_retries; + u8 flag; +} __packed; + +struct htt_data_tx_completion_ext { + struct htt_append_retries a_retries; + __le32 t_stamp; + __le16 msdus_rssi[0]; +} __packed; + +/** + * @brief target -> host TX completion indication message definition + * + * @details + * The following diagram shows the format of the TX completion indication sent + * from the target to the host + * + * |31 28|27|26|25|24|23 16| 15 |14 11|10 8|7 0| + * |-------------------------------------------------------------| + * header: |rsvd |A2|TP|A1|A0| num | t_i| tid |status| msg_type | + * |-------------------------------------------------------------| + * payload: | MSDU1 ID | MSDU0 ID | + * |-------------------------------------------------------------| + * : MSDU3 ID : MSDU2 ID : + * |-------------------------------------------------------------| + * | struct htt_tx_compl_ind_append_retries | + * |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -| + * | struct htt_tx_compl_ind_append_tx_tstamp | + * |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -| + * | MSDU1 ACK RSSI | MSDU0 ACK RSSI | + * |-------------------------------------------------------------| + * : MSDU3 ACK RSSI : MSDU2 ACK RSSI : + * |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -| + * -msg_type + * Bits 7:0 + * Purpose: identifies this as HTT TX completion indication + * -status + * Bits 10:8 + * Purpose: the TX completion status of payload fragmentations descriptors + * Value: could be HTT_TX_COMPL_IND_STAT_OK or HTT_TX_COMPL_IND_STAT_DISCARD + * -tid + * Bits 14:11 + * Purpose: the tid associated with those fragmentation descriptors. It is + * valid or not, depending on the tid_invalid bit. + * Value: 0 to 15 + * -tid_invalid + * Bits 15:15 + * Purpose: this bit indicates whether the tid field is valid or not + * Value: 0 indicates valid, 1 indicates invalid + * -num + * Bits 23:16 + * Purpose: the number of payload in this indication + * Value: 1 to 255 + * -A0 = append + * Bits 24:24 + * Purpose: append the struct htt_tx_compl_ind_append_retries which contains + * the number of tx retries for one MSDU at the end of this message + * Value: 0 indicates no appending, 1 indicates appending + * -A1 = append1 + * Bits 25:25 + * Purpose: Append the struct htt_tx_compl_ind_append_tx_tstamp which + * contains the timestamp info for each TX msdu id in payload. + * Value: 0 indicates no appending, 1 indicates appending + * -TP = MSDU tx power presence + * Bits 26:26 + * Purpose: Indicate whether the TX_COMPL_IND includes a tx power report + * for each MSDU referenced by the TX_COMPL_IND message. + * The order of the per-MSDU tx power reports matches the order + * of the MSDU IDs. + * Value: 0 indicates not appending, 1 indicates appending + * -A2 = append2 + * Bits 27:27 + * Purpose: Indicate whether data ACK RSSI is appended for each MSDU in + * TX_COMP_IND message. The order of the per-MSDU ACK RSSI report + * matches the order of the MSDU IDs. + * The ACK RSSI values are valid when status is COMPLETE_OK (and + * this append2 bit is set). + * Value: 0 indicates not appending, 1 indicates appending + */ + struct htt_data_tx_completion { union { u8 flags; @@ -866,6 +952,21 @@ struct htt_data_tx_completion { __le16 msdus[0]; /* variable length based on %num_msdus */ } __packed; +#define HTT_TX_PPDU_DUR_INFO0_PEER_ID_MASK GENMASK(15, 0) +#define HTT_TX_PPDU_DUR_INFO0_TID_MASK GENMASK(20, 16) + +struct htt_data_tx_ppdu_dur { + __le32 info0; /* HTT_TX_PPDU_DUR_INFO0_ */ + __le32 tx_duration; /* in usecs */ +} __packed; + +#define HTT_TX_COMPL_PPDU_DUR_INFO0_NUM_ENTRIES_MASK GENMASK(7, 0) + +struct htt_data_tx_compl_ppdu_dur { + __le32 info0; /* HTT_TX_COMPL_PPDU_DUR_INFO0_ */ + struct htt_data_tx_ppdu_dur ppdu_dur[0]; +} __packed; + struct htt_tx_compl_ind_base { u32 hdr; u16 payload[1/*or more*/]; @@ -1650,6 +1751,7 @@ struct htt_cmd { struct htt_stats_req stats_req; struct htt_oob_sync_req oob_sync_req; struct htt_aggr_conf aggr_conf; + struct htt_aggr_conf_v2 aggr_conf_v2; struct htt_frag_desc_bank_cfg32 frag_desc_bank_cfg32; struct htt_frag_desc_bank_cfg64 frag_desc_bank_cfg64; struct htt_tx_fetch_resp tx_fetch_resp; @@ -1716,14 +1818,14 @@ struct ath10k_htt_txbuf_32 { struct ath10k_htc_hdr htc_hdr; struct htt_cmd_hdr cmd_hdr; struct htt_data_tx_desc cmd_tx; -} __packed; +} __packed __aligned(4); struct ath10k_htt_txbuf_64 { struct htt_data_tx_desc_frag frags[2]; struct ath10k_htc_hdr htc_hdr; struct htt_cmd_hdr cmd_hdr; struct htt_data_tx_desc_64 cmd_tx; -} __packed; +} __packed __aligned(4); struct ath10k_htt { struct ath10k *ar; @@ -1890,6 +1992,9 @@ struct ath10k_htt_tx_ops { struct sk_buff *msdu); int (*htt_alloc_txbuff)(struct ath10k_htt *htt); void (*htt_free_txbuff)(struct ath10k_htt *htt); + int (*htt_h2t_aggr_cfg_msg)(struct ath10k_htt *htt, + u8 max_subfrms_ampdu, + u8 max_subfrms_amsdu); }; static inline int ath10k_htt_send_rx_ring_cfg(struct ath10k_htt *htt) diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index f42bac204ef8..a20ea270d519 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -1,19 +1,8 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * Copyright (c) 2018, The Linux Foundation. All rights reserved. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "core.h" @@ -265,7 +254,7 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar) struct ath10k_htt *htt = &ar->htt; int ret; - if (ar->dev_type == ATH10K_DEV_TYPE_HL) + if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) return 0; spin_lock_bh(&htt->rx_ring.lock); @@ -282,7 +271,7 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar) void ath10k_htt_rx_free(struct ath10k_htt *htt) { - if (htt->ar->dev_type == ATH10K_DEV_TYPE_HL) + if (htt->ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) return; del_timer_sync(&htt->rx_ring.refill_retry_timer); @@ -760,7 +749,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt) size_t size; struct timer_list *timer = &htt->rx_ring.refill_retry_timer; - if (ar->dev_type == ATH10K_DEV_TYPE_HL) + if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) return 0; htt->rx_confused = false; @@ -1905,7 +1894,7 @@ static void ath10k_htt_rx_h_enqueue(struct ath10k *ar, } static int ath10k_unchain_msdu(struct sk_buff_head *amsdu, - unsigned long int *unchain_cnt) + unsigned long *unchain_cnt) { struct sk_buff *skb, *first; int space; @@ -1954,8 +1943,8 @@ static int ath10k_unchain_msdu(struct sk_buff_head *amsdu, static void ath10k_htt_rx_h_unchain(struct ath10k *ar, struct sk_buff_head *amsdu, - unsigned long int *drop_cnt, - unsigned long int *unchain_cnt) + unsigned long *drop_cnt, + unsigned long *unchain_cnt) { struct sk_buff *first; struct htt_rx_desc *rxd; @@ -2005,7 +1994,7 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar, static void ath10k_htt_rx_h_filter(struct ath10k *ar, struct sk_buff_head *amsdu, struct ieee80211_rx_status *rx_status, - unsigned long int *drop_cnt) + unsigned long *drop_cnt) { if (skb_queue_empty(amsdu)) return; @@ -2025,10 +2014,10 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt) struct ieee80211_rx_status *rx_status = &htt->rx_status; struct sk_buff_head amsdu; int ret; - unsigned long int drop_cnt = 0; - unsigned long int unchain_cnt = 0; - unsigned long int drop_cnt_filter = 0; - unsigned long int msdus_to_queue, num_msdus; + unsigned long drop_cnt = 0; + unsigned long unchain_cnt = 0; + unsigned long drop_cnt_filter = 0; + unsigned long msdus_to_queue, num_msdus; enum ath10k_pkt_rx_err err = ATH10K_PKT_RX_ERR_MAX; u8 first_hdr[RX_HTT_HDR_STATUS_LEN]; @@ -2130,9 +2119,15 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt, hdr = (struct ieee80211_hdr *)skb->data; rx_status = IEEE80211_SKB_RXCB(skb); rx_status->chains |= BIT(0); - rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR + - rx->ppdu.combined_rssi; - rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL; + if (rx->ppdu.combined_rssi == 0) { + /* SDIO firmware does not provide signal */ + rx_status->signal = 0; + rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; + } else { + rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR + + rx->ppdu.combined_rssi; + rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL; + } spin_lock_bh(&ar->data_lock); ch = ar->scan_channel; @@ -2220,8 +2215,12 @@ static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar, int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS); __le16 msdu_id, *msdus; bool rssi_enabled = false; - u8 msdu_count = 0; - int i; + u8 msdu_count = 0, num_airtime_records, tid; + int i, htt_pad = 0; + struct htt_data_tx_compl_ppdu_dur *ppdu_info; + struct ath10k_peer *peer; + u16 ppdu_info_offset = 0, peer_id; + u32 tx_duration; switch (status) { case HTT_DATA_TX_STATUS_NO_ACK: @@ -2245,12 +2244,14 @@ static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar, resp->data_tx_completion.num_msdus); msdu_count = resp->data_tx_completion.num_msdus; + msdus = resp->data_tx_completion.msdus; + rssi_enabled = ath10k_is_rssi_enable(&ar->hw_params, resp); - if (resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_DATA_RSSI) - rssi_enabled = true; + if (rssi_enabled) + htt_pad = ath10k_tx_data_rssi_get_pad_bytes(&ar->hw_params, + resp); for (i = 0; i < msdu_count; i++) { - msdus = resp->data_tx_completion.msdus; msdu_id = msdus[i]; tx_done.msdu_id = __le16_to_cpu(msdu_id); @@ -2260,10 +2261,10 @@ static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar, * last msdu id with 0xffff */ if (msdu_count & 0x01) { - msdu_id = msdus[msdu_count + i + 1]; + msdu_id = msdus[msdu_count + i + 1 + htt_pad]; tx_done.ack_rssi = __le16_to_cpu(msdu_id); } else { - msdu_id = msdus[msdu_count + i]; + msdu_id = msdus[msdu_count + i + htt_pad]; tx_done.ack_rssi = __le16_to_cpu(msdu_id); } } @@ -2282,6 +2283,50 @@ static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar, ath10k_txrx_tx_unref(htt, &tx_done); } } + + if (!(resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_PPDU_DURATION_PRESENT)) + return; + + ppdu_info_offset = (msdu_count & 0x01) ? msdu_count + 1 : msdu_count; + + if (rssi_enabled) + ppdu_info_offset += ppdu_info_offset; + + if (resp->data_tx_completion.flags2 & + (HTT_TX_CMPL_FLAG_PPID_PRESENT | HTT_TX_CMPL_FLAG_PA_PRESENT)) + ppdu_info_offset += 2; + + ppdu_info = (struct htt_data_tx_compl_ppdu_dur *)&msdus[ppdu_info_offset]; + num_airtime_records = FIELD_GET(HTT_TX_COMPL_PPDU_DUR_INFO0_NUM_ENTRIES_MASK, + __le32_to_cpu(ppdu_info->info0)); + + for (i = 0; i < num_airtime_records; i++) { + struct htt_data_tx_ppdu_dur *ppdu_dur; + u32 info0; + + ppdu_dur = &ppdu_info->ppdu_dur[i]; + info0 = __le32_to_cpu(ppdu_dur->info0); + + peer_id = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_PEER_ID_MASK, + info0); + rcu_read_lock(); + spin_lock_bh(&ar->data_lock); + + peer = ath10k_peer_find_by_id(ar, peer_id); + if (!peer) { + spin_unlock_bh(&ar->data_lock); + rcu_read_unlock(); + continue; + } + + tid = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_TID_MASK, info0); + tx_duration = __le32_to_cpu(ppdu_dur->tx_duration); + + ieee80211_sta_register_airtime(peer->sta, tid, tx_duration, 0); + + spin_unlock_bh(&ar->data_lock); + rcu_read_unlock(); + } } static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp) @@ -2596,6 +2641,7 @@ static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb) u8 tid; int ret; int i; + bool may_tx; ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n"); @@ -2668,8 +2714,13 @@ static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb) num_msdus = 0; num_bytes = 0; + ieee80211_txq_schedule_start(hw, txq->ac); + may_tx = ieee80211_txq_may_transmit(hw, txq); while (num_msdus < max_num_msdus && num_bytes < max_num_bytes) { + if (!may_tx) + break; + ret = ath10k_mac_tx_push_txq(hw, txq); if (ret < 0) break; @@ -2677,6 +2728,8 @@ static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb) num_msdus++; num_bytes += ret; } + ieee80211_return_txq(hw, txq); + ieee80211_txq_schedule_end(hw, txq->ac); record->num_msdus = cpu_to_le16(num_msdus); record->num_bytes = cpu_to_le32(num_bytes); @@ -2868,17 +2921,19 @@ ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar, struct rate_info *txrate = &arsta->txrate; struct ath10k_htt_tx_stats *tx_stats; int idx, ht_idx, gi, mcs, bw, nss; + unsigned long flags; if (!arsta->tx_stats) return; tx_stats = arsta->tx_stats; - gi = (arsta->txrate.flags & RATE_INFO_FLAGS_SHORT_GI); - ht_idx = txrate->mcs + txrate->nss * 8; - mcs = txrate->mcs; + flags = txrate->flags; + gi = test_bit(ATH10K_RATE_INFO_FLAGS_SGI_BIT, &flags); + mcs = ATH10K_HW_MCS_RATE(pstats->ratecode); bw = txrate->bw; nss = txrate->nss; - idx = mcs * 8 + 8 * 10 * nss; + ht_idx = mcs + (nss - 1) * 8; + idx = mcs * 8 + 8 * 10 * (nss - 1); idx += bw * 2 + gi; #define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name] @@ -2924,7 +2979,7 @@ ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar, } STATS_OP_FMT(AMPDU).bw[0][bw] += pstats->succ_bytes + pstats->retry_bytes; - STATS_OP_FMT(AMPDU).nss[0][nss] += + STATS_OP_FMT(AMPDU).nss[0][nss - 1] += pstats->succ_bytes + pstats->retry_bytes; STATS_OP_FMT(AMPDU).gi[0][gi] += pstats->succ_bytes + pstats->retry_bytes; @@ -2932,7 +2987,7 @@ ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar, pstats->succ_bytes + pstats->retry_bytes; STATS_OP_FMT(AMPDU).bw[1][bw] += pstats->succ_pkts + pstats->retry_pkts; - STATS_OP_FMT(AMPDU).nss[1][nss] += + STATS_OP_FMT(AMPDU).nss[1][nss - 1] += pstats->succ_pkts + pstats->retry_pkts; STATS_OP_FMT(AMPDU).gi[1][gi] += pstats->succ_pkts + pstats->retry_pkts; @@ -2944,27 +2999,27 @@ ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar, } STATS_OP_FMT(SUCC).bw[0][bw] += pstats->succ_bytes; - STATS_OP_FMT(SUCC).nss[0][nss] += pstats->succ_bytes; + STATS_OP_FMT(SUCC).nss[0][nss - 1] += pstats->succ_bytes; STATS_OP_FMT(SUCC).gi[0][gi] += pstats->succ_bytes; STATS_OP_FMT(SUCC).bw[1][bw] += pstats->succ_pkts; - STATS_OP_FMT(SUCC).nss[1][nss] += pstats->succ_pkts; + STATS_OP_FMT(SUCC).nss[1][nss - 1] += pstats->succ_pkts; STATS_OP_FMT(SUCC).gi[1][gi] += pstats->succ_pkts; STATS_OP_FMT(FAIL).bw[0][bw] += pstats->failed_bytes; - STATS_OP_FMT(FAIL).nss[0][nss] += pstats->failed_bytes; + STATS_OP_FMT(FAIL).nss[0][nss - 1] += pstats->failed_bytes; STATS_OP_FMT(FAIL).gi[0][gi] += pstats->failed_bytes; STATS_OP_FMT(FAIL).bw[1][bw] += pstats->failed_pkts; - STATS_OP_FMT(FAIL).nss[1][nss] += pstats->failed_pkts; + STATS_OP_FMT(FAIL).nss[1][nss - 1] += pstats->failed_pkts; STATS_OP_FMT(FAIL).gi[1][gi] += pstats->failed_pkts; STATS_OP_FMT(RETRY).bw[0][bw] += pstats->retry_bytes; - STATS_OP_FMT(RETRY).nss[0][nss] += pstats->retry_bytes; + STATS_OP_FMT(RETRY).nss[0][nss - 1] += pstats->retry_bytes; STATS_OP_FMT(RETRY).gi[0][gi] += pstats->retry_bytes; STATS_OP_FMT(RETRY).bw[1][bw] += pstats->retry_pkts; - STATS_OP_FMT(RETRY).nss[1][nss] += pstats->retry_pkts; + STATS_OP_FMT(RETRY).nss[1][nss - 1] += pstats->retry_pkts; STATS_OP_FMT(RETRY).gi[1][gi] += pstats->retry_pkts; if (txrate->flags >= RATE_INFO_FLAGS_MCS) { @@ -2975,6 +3030,8 @@ ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar, STATS_OP_FMT(RETRY).rate_table[0][idx] += pstats->retry_bytes; STATS_OP_FMT(RETRY).rate_table[1][idx] += pstats->retry_pkts; } + + tx_stats->tx_duration += pstats->duration; } static void @@ -3070,6 +3127,7 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar, arsta->txrate.nss = txrate.nss; arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw); + arsta->last_tx_bitrate = cfg80211_calculate_bitrate(&arsta->txrate); if (sgi) arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; @@ -3141,6 +3199,7 @@ static void ath10k_htt_fetch_peer_stats(struct ath10k *ar, p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts); p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts); p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts); + p_tx_stats->duration = __le16_to_cpu(tx_stats->tx_duration); ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats); } @@ -3234,7 +3293,7 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) break; } case HTT_T2H_MSG_TYPE_RX_IND: - if (ar->dev_type == ATH10K_DEV_TYPE_HL) + if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb); @@ -3530,7 +3589,7 @@ void ath10k_htt_set_rx_ops(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; - if (ar->dev_type == ATH10K_DEV_TYPE_HL) + if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) htt->rx_ops = &htt_rx_ops_hl; else if (ar->hw_params.target_64bit) htt->rx_ops = &htt_rx_ops_64; diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index ad05ab714c9b..d8e9cc0bb772 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -1,18 +1,7 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include @@ -495,7 +484,7 @@ int ath10k_htt_tx_start(struct ath10k_htt *htt) if (htt->tx_mem_allocated) return 0; - if (ar->dev_type == ATH10K_DEV_TYPE_HL) + if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) return 0; ret = ath10k_htt_tx_alloc_buf(htt); @@ -1035,6 +1024,53 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt, return 0; } +static int ath10k_htt_h2t_aggr_cfg_msg_v2(struct ath10k_htt *htt, + u8 max_subfrms_ampdu, + u8 max_subfrms_amsdu) +{ + struct ath10k *ar = htt->ar; + struct htt_aggr_conf_v2 *aggr_conf; + struct sk_buff *skb; + struct htt_cmd *cmd; + int len; + int ret; + + /* Firmware defaults are: amsdu = 3 and ampdu = 64 */ + + if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64) + return -EINVAL; + + if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31) + return -EINVAL; + + len = sizeof(cmd->hdr); + len += sizeof(cmd->aggr_conf_v2); + + skb = ath10k_htc_alloc_skb(ar, len); + if (!skb) + return -ENOMEM; + + skb_put(skb, len); + cmd = (struct htt_cmd *)skb->data; + cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG; + + aggr_conf = &cmd->aggr_conf_v2; + aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu; + aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu; + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d", + aggr_conf->max_num_amsdu_subframes, + aggr_conf->max_num_ampdu_subframes); + + ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); + if (ret) { + dev_kfree_skb_any(skb); + return ret; + } + + return 0; +} + int ath10k_htt_tx_fetch_resp(struct ath10k *ar, __le32 token, __le16 fetch_seq_num, @@ -1177,7 +1213,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) return 0; err_unmap_msdu: - if (ar->dev_type != ATH10K_DEV_TYPE_HL) + if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); err_free_txdesc: dev_kfree_skb_any(txdesc); @@ -1498,7 +1534,7 @@ static int ath10k_htt_tx_64(struct ath10k_htt *htt, u16 msdu_id, flags1 = 0; u16 freq = 0; dma_addr_t frags_paddr = 0; - u32 txbuf_paddr; + dma_addr_t txbuf_paddr; struct htt_msdu_ext_desc_64 *ext_desc = NULL; struct htt_msdu_ext_desc_64 *ext_desc_t = NULL; @@ -1692,6 +1728,7 @@ static const struct ath10k_htt_tx_ops htt_tx_ops_32 = { .htt_tx = ath10k_htt_tx_32, .htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_32, .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_32, + .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg, }; static const struct ath10k_htt_tx_ops htt_tx_ops_64 = { @@ -1702,6 +1739,7 @@ static const struct ath10k_htt_tx_ops htt_tx_ops_64 = { .htt_tx = ath10k_htt_tx_64, .htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_64, .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_64, + .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_v2, }; static const struct ath10k_htt_tx_ops htt_tx_ops_hl = { @@ -1714,7 +1752,7 @@ void ath10k_htt_set_tx_ops(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; - if (ar->dev_type == ATH10K_DEV_TYPE_HL) + if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) htt->tx_ops = &htt_tx_ops_hl; else if (ar->hw_params.target_64bit) htt->tx_ops = &htt_tx_ops_64; diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c index 61ecf931ba4d..ad082b7d7643 100644 --- a/drivers/net/wireless/ath/ath10k/hw.c +++ b/drivers/net/wireless/ath/ath10k/hw.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2014-2017 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include @@ -318,9 +307,11 @@ static struct ath10k_hw_ce_ctrl1_upd wcn3990_ctrl1_upd = { }; const struct ath10k_hw_ce_regs wcn3990_ce_regs = { - .sr_base_addr = 0x00000000, + .sr_base_addr_lo = 0x00000000, + .sr_base_addr_hi = 0x00000004, .sr_size_addr = 0x00000008, - .dr_base_addr = 0x0000000c, + .dr_base_addr_lo = 0x0000000c, + .dr_base_addr_hi = 0x00000010, .dr_size_addr = 0x00000014, .misc_ie_addr = 0x00000034, .sr_wr_index_addr = 0x0000003c, @@ -464,9 +455,9 @@ static struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_dst_ring = { }; const struct ath10k_hw_ce_regs qcax_ce_regs = { - .sr_base_addr = 0x00000000, + .sr_base_addr_lo = 0x00000000, .sr_size_addr = 0x00000004, - .dr_base_addr = 0x00000008, + .dr_base_addr_lo = 0x00000008, .dr_size_addr = 0x0000000c, .ce_cmd_addr = 0x00000018, .misc_ie_addr = 0x00000034, @@ -1109,6 +1100,32 @@ int ath10k_hw_diag_fast_download(struct ath10k *ar, return ret; } +static int ath10k_htt_tx_rssi_enable(struct htt_resp *resp) +{ + return (resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_DATA_RSSI); +} + +static int ath10k_htt_tx_rssi_enable_wcn3990(struct htt_resp *resp) +{ + return (resp->data_tx_completion.flags2 & + HTT_TX_DATA_RSSI_ENABLE_WCN3990); +} + +static int ath10k_get_htt_tx_data_rssi_pad(struct htt_resp *resp) +{ + struct htt_data_tx_completion_ext extd; + int pad_bytes = 0; + + if (resp->data_tx_completion.flags2 & HTT_TX_DATA_APPEND_RETRIES) + pad_bytes += sizeof(extd.a_retries) / + sizeof(extd.msdus_rssi[0]); + + if (resp->data_tx_completion.flags2 & HTT_TX_DATA_APPEND_TIMESTAMP) + pad_bytes += sizeof(extd.t_stamp) / sizeof(extd.msdus_rssi[0]); + + return pad_bytes; +} + const struct ath10k_hw_ops qca988x_ops = { .set_coverage_class = ath10k_hw_qca988x_set_coverage_class, }; @@ -1133,6 +1150,10 @@ const struct ath10k_hw_ops qca99x0_ops = { const struct ath10k_hw_ops qca6174_ops = { .set_coverage_class = ath10k_hw_qca988x_set_coverage_class, .enable_pll_clk = ath10k_hw_qca6174_enable_pll_clock, + .is_rssi_enable = ath10k_htt_tx_rssi_enable, }; -const struct ath10k_hw_ops wcn3990_ops = {}; +const struct ath10k_hw_ops wcn3990_ops = { + .tx_data_rssi_pad_bytes = ath10k_get_htt_tx_data_rssi_pad, + .is_rssi_enable = ath10k_htt_tx_rssi_enable_wcn3990, +}; diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index e50a8dc5b093..71314999aa24 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -1,19 +1,8 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * Copyright (c) 2018 The Linux Foundation. All rights reserved. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _HW_H_ @@ -353,9 +342,11 @@ struct ath10k_hw_ce_ctrl1_upd { }; struct ath10k_hw_ce_regs { - u32 sr_base_addr; + u32 sr_base_addr_lo; + u32 sr_base_addr_hi; u32 sr_size_addr; - u32 dr_base_addr; + u32 dr_base_addr_lo; + u32 dr_base_addr_hi; u32 dr_size_addr; u32 ce_cmd_addr; u32 misc_ie_addr; @@ -618,6 +609,8 @@ struct ath10k_hw_params { }; struct htt_rx_desc; +struct htt_resp; +struct htt_data_tx_completion_ext; /* Defines needed for Rx descriptor abstraction */ struct ath10k_hw_ops { @@ -625,6 +618,8 @@ struct ath10k_hw_ops { void (*set_coverage_class)(struct ath10k *ar, s16 value); int (*enable_pll_clk)(struct ath10k *ar); bool (*rx_desc_get_msdu_limit_error)(struct htt_rx_desc *rxd); + int (*tx_data_rssi_pad_bytes)(struct htt_resp *htt); + int (*is_rssi_enable)(struct htt_resp *resp); }; extern const struct ath10k_hw_ops qca988x_ops; @@ -652,6 +647,24 @@ ath10k_rx_desc_msdu_limit_error(struct ath10k_hw_params *hw, return false; } +static inline int +ath10k_tx_data_rssi_get_pad_bytes(struct ath10k_hw_params *hw, + struct htt_resp *htt) +{ + if (hw->hw_ops->tx_data_rssi_pad_bytes) + return hw->hw_ops->tx_data_rssi_pad_bytes(htt); + return 0; +} + +static inline int +ath10k_is_rssi_enable(struct ath10k_hw_params *hw, + struct htt_resp *resp) +{ + if (hw->hw_ops->is_rssi_enable) + return hw->hw_ops->is_rssi_enable(resp); + return 0; +} + /* Target specific defines for MAIN firmware */ #define TARGET_NUM_VDEVS 8 #define TARGET_NUM_PEER_AST 2 @@ -734,13 +747,14 @@ ath10k_rx_desc_msdu_limit_error(struct ath10k_hw_params *hw, #define TARGET_TLV_NUM_TDLS_VDEVS 1 #define TARGET_TLV_NUM_TIDS ((TARGET_TLV_NUM_PEERS) * 2) #define TARGET_TLV_NUM_MSDU_DESC (1024 + 32) +#define TARGET_TLV_NUM_MSDU_DESC_HL 64 #define TARGET_TLV_NUM_WOW_PATTERNS 22 #define TARGET_TLV_MGMT_NUM_MSDU_DESC (50) /* Target specific defines for WMI-HL-1.0 firmware */ -#define TARGET_HL_10_TLV_NUM_PEERS 14 -#define TARGET_HL_10_TLV_AST_SKID_LIMIT 6 -#define TARGET_HL_10_TLV_NUM_WDS_ENTRIES 2 +#define TARGET_HL_TLV_NUM_PEERS 33 +#define TARGET_HL_TLV_AST_SKID_LIMIT 16 +#define TARGET_HL_TLV_NUM_WDS_ENTRIES 2 /* Diagnostic Window */ #define CE_DIAG_PIPE 7 diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 49758490eaba..b73c23d4ce86 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -1,19 +1,8 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. - * Copyright (c) 2018, The Linux Foundation. All rights reserved. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */ #include "mac.h" @@ -250,24 +239,24 @@ static int ath10k_send_key(struct ath10k_vif *arvif, switch (key->cipher) { case WLAN_CIPHER_SUITE_CCMP: - arg.key_cipher = WMI_CIPHER_AES_CCM; + arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_AES_CCM]; key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT; break; case WLAN_CIPHER_SUITE_TKIP: - arg.key_cipher = WMI_CIPHER_TKIP; + arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_TKIP]; arg.key_txmic_len = 8; arg.key_rxmic_len = 8; break; case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: - arg.key_cipher = WMI_CIPHER_WEP; + arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_WEP]; break; case WLAN_CIPHER_SUITE_CCMP_256: - arg.key_cipher = WMI_CIPHER_AES_CCM; + arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_AES_CCM]; break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: - arg.key_cipher = WMI_CIPHER_AES_GCM; + arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_AES_GCM]; break; case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: @@ -284,7 +273,7 @@ static int ath10k_send_key(struct ath10k_vif *arvif, key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; if (cmd == DISABLE_KEY) { - arg.key_cipher = WMI_CIPHER_NONE; + arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_NONE]; arg.key_data = NULL; } @@ -3397,6 +3386,7 @@ ath10k_mac_tx_h_get_txmode(struct ath10k *ar, struct sk_buff *skb) { const struct ieee80211_hdr *hdr = (void *)skb->data; + const struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); __le16 fc = hdr->frame_control; if (!vif || vif->type == NL80211_IFTYPE_MONITOR) @@ -3438,7 +3428,8 @@ ath10k_mac_tx_h_get_txmode(struct ath10k *ar, if (ieee80211_is_data_present(fc) && sta && sta->tdls) return ATH10K_HW_TXRX_ETHERNET; - if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) + if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags) || + skb_cb->flags & ATH10K_SKB_F_RAW_TX) return ATH10K_HW_TXRX_RAW; return ATH10K_HW_TXRX_NATIVE_WIFI; @@ -3544,10 +3535,13 @@ static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar, struct ieee80211_vif *vif, struct ieee80211_txq *txq, - struct sk_buff *skb) + struct sk_buff *skb, u16 airtime) { struct ieee80211_hdr *hdr = (void *)skb->data; struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); + const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + bool is_data = ieee80211_is_data(hdr->frame_control) || + ieee80211_is_data_qos(hdr->frame_control); cb->flags = 0; if (!ath10k_tx_h_use_hwcrypto(vif, skb)) @@ -3559,8 +3553,19 @@ static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar, if (ieee80211_is_data_qos(hdr->frame_control)) cb->flags |= ATH10K_SKB_F_QOS; + /* Data frames encrypted in software will be posted to firmware + * with tx encap mode set to RAW. Ex: Multicast traffic generated + * for a specific VLAN group will always be encrypted in software. + */ + if (is_data && ieee80211_has_protected(hdr->frame_control) && + !info->control.hw_key) { + cb->flags |= ATH10K_SKB_F_NO_HWCRYPT; + cb->flags |= ATH10K_SKB_F_RAW_TX; + } + cb->vif = vif; cb->txq = txq; + cb->airtime_est = airtime; } bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar) @@ -3667,6 +3672,7 @@ static int ath10k_mac_tx(struct ath10k *ar, { struct ieee80211_hw *hw = ar->hw; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + const struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); int ret; /* We should disable CCK RATE due to P2P */ @@ -3684,7 +3690,8 @@ static int ath10k_mac_tx(struct ath10k *ar, ath10k_tx_h_8023(skb); break; case ATH10K_HW_TXRX_RAW: - if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { + if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags) && + !(skb_cb->flags & ATH10K_SKB_F_RAW_TX)) { WARN_ON_ONCE(1); ieee80211_free_txskb(hw, skb); return -ENOTSUPP; @@ -3863,7 +3870,7 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work) ath10k_warn(ar, "failed to transmit management frame by ref via WMI: %d\n", ret); dma_unmap_single(ar->dev, paddr, skb->len, - DMA_FROM_DEVICE); + DMA_TO_DEVICE); ieee80211_free_txskb(ar->hw, skb); } } else { @@ -3890,7 +3897,6 @@ static void ath10k_mac_txq_init(struct ieee80211_txq *txq) static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq) { - struct ath10k_txq *artxq; struct ath10k_skb_cb *cb; struct sk_buff *msdu; int msdu_id; @@ -3898,12 +3904,6 @@ static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq) if (!txq) return; - artxq = (void *)txq->drv_priv; - spin_lock_bh(&ar->txqs_lock); - if (!list_empty(&artxq->list)) - list_del_init(&artxq->list); - spin_unlock_bh(&ar->txqs_lock); - spin_lock_bh(&ar->htt.tx_lock); idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) { cb = ATH10K_SKB_CB(msdu); @@ -3943,7 +3943,6 @@ static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw, struct ath10k_txq *artxq = (void *)txq->drv_priv; /* No need to get locks */ - if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) return true; @@ -3956,6 +3955,52 @@ static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw, return false; } +/* Return estimated airtime in microsecond, which is calculated using last + * reported TX rate. This is just a rough estimation because host driver has no + * knowledge of the actual transmit rate, retries or aggregation. If actual + * airtime can be reported by firmware, then delta between estimated and actual + * airtime can be adjusted from deficit. + */ +#define IEEE80211_ATF_OVERHEAD 100 /* IFS + some slot time */ +#define IEEE80211_ATF_OVERHEAD_IFS 16 /* IFS only */ +static u16 ath10k_mac_update_airtime(struct ath10k *ar, + struct ieee80211_txq *txq, + struct sk_buff *skb) +{ + struct ath10k_sta *arsta; + u32 pktlen; + u16 airtime = 0; + + if (!txq || !txq->sta) + return airtime; + + if (test_bit(WMI_SERVICE_REPORT_AIRTIME, ar->wmi.svc_map)) + return airtime; + + spin_lock_bh(&ar->data_lock); + arsta = (struct ath10k_sta *)txq->sta->drv_priv; + + pktlen = skb->len + 38; /* Assume MAC header 30, SNAP 8 for most case */ + if (arsta->last_tx_bitrate) { + /* airtime in us, last_tx_bitrate in 100kbps */ + airtime = (pktlen * 8 * (1000 / 100)) + / arsta->last_tx_bitrate; + /* overhead for media access time and IFS */ + airtime += IEEE80211_ATF_OVERHEAD_IFS; + } else { + /* This is mostly for throttle excessive BC/MC frames, and the + * airtime/rate doesn't need be exact. Airtime of BC/MC frames + * in 2G get some discount, which helps prevent very low rate + * frames from being blocked for too long. + */ + airtime = (pktlen * 8 * (1000 / 100)) / 60; /* 6M */ + airtime += IEEE80211_ATF_OVERHEAD; + } + spin_unlock_bh(&ar->data_lock); + + return airtime; +} + int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { @@ -3971,6 +4016,7 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, size_t skb_len; bool is_mgmt, is_presp; int ret; + u16 airtime; spin_lock_bh(&ar->htt.tx_lock); ret = ath10k_htt_tx_inc_pending(htt); @@ -3988,7 +4034,8 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, return -ENOENT; } - ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb); + airtime = ath10k_mac_update_airtime(ar, txq, skb); + ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb, airtime); skb_len = skb->len; txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); @@ -4030,48 +4077,45 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, return skb_len; } -void ath10k_mac_tx_push_pending(struct ath10k *ar) +static int ath10k_mac_schedule_txq(struct ieee80211_hw *hw, u32 ac) { - struct ieee80211_hw *hw = ar->hw; struct ieee80211_txq *txq; - struct ath10k_txq *artxq; - struct ath10k_txq *last; - int ret; - int max; - - if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2)) - return; - - spin_lock_bh(&ar->txqs_lock); - rcu_read_lock(); - - last = list_last_entry(&ar->txqs, struct ath10k_txq, list); - while (!list_empty(&ar->txqs)) { - artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list); - txq = container_of((void *)artxq, struct ieee80211_txq, - drv_priv); + int ret = 0; - /* Prevent aggressive sta/tid taking over tx queue */ - max = HTC_HOST_MAX_MSG_PER_TX_BUNDLE; - ret = 0; - while (ath10k_mac_tx_can_push(hw, txq) && max--) { + ieee80211_txq_schedule_start(hw, ac); + while ((txq = ieee80211_next_txq(hw, ac))) { + while (ath10k_mac_tx_can_push(hw, txq)) { ret = ath10k_mac_tx_push_txq(hw, txq); if (ret < 0) break; } + ieee80211_return_txq(hw, txq); + ath10k_htt_tx_txq_update(hw, txq); + if (ret == -EBUSY) + break; + } + ieee80211_txq_schedule_end(hw, ac); - list_del_init(&artxq->list); - if (ret != -ENOENT) - list_add_tail(&artxq->list, &ar->txqs); + return ret; +} - ath10k_htt_tx_txq_update(hw, txq); +void ath10k_mac_tx_push_pending(struct ath10k *ar) +{ + struct ieee80211_hw *hw = ar->hw; + u32 ac; + + if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH) + return; + + if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2)) + return; - if (artxq == last || (ret < 0 && ret != -ENOENT)) + rcu_read_lock(); + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + if (ath10k_mac_schedule_txq(hw, ac) == -EBUSY) break; } - rcu_read_unlock(); - spin_unlock_bh(&ar->txqs_lock); } EXPORT_SYMBOL(ath10k_mac_tx_push_pending); @@ -4258,8 +4302,10 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw, bool is_mgmt; bool is_presp; int ret; + u16 airtime; - ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb); + airtime = ath10k_mac_update_airtime(ar, txq, skb); + ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb, airtime); txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); @@ -4310,31 +4356,28 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { struct ath10k *ar = hw->priv; - struct ath10k_txq *artxq = (void *)txq->drv_priv; - struct ieee80211_txq *f_txq; - struct ath10k_txq *f_artxq; - int ret = 0; - int max = HTC_HOST_MAX_MSG_PER_TX_BUNDLE; + int ret; + u8 ac; - spin_lock_bh(&ar->txqs_lock); - if (list_empty(&artxq->list)) - list_add_tail(&artxq->list, &ar->txqs); + ath10k_htt_tx_txq_update(hw, txq); + if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH) + return; - f_artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list); - f_txq = container_of((void *)f_artxq, struct ieee80211_txq, drv_priv); - list_del_init(&f_artxq->list); + ac = txq->ac; + ieee80211_txq_schedule_start(hw, ac); + txq = ieee80211_next_txq(hw, ac); + if (!txq) + goto out; - while (ath10k_mac_tx_can_push(hw, f_txq) && max--) { - ret = ath10k_mac_tx_push_txq(hw, f_txq); + while (ath10k_mac_tx_can_push(hw, txq)) { + ret = ath10k_mac_tx_push_txq(hw, txq); if (ret < 0) break; } - if (ret != -ENOENT) - list_add_tail(&f_artxq->list, &ar->txqs); - spin_unlock_bh(&ar->txqs_lock); - - ath10k_htt_tx_txq_update(hw, f_txq); + ieee80211_return_txq(hw, txq); ath10k_htt_tx_txq_update(hw, txq); +out: + ieee80211_txq_schedule_end(hw, ac); } /* Must not be called with conf_mutex held as workers can use that also. */ @@ -4549,7 +4592,8 @@ static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar) ht_cap.cap |= stbc; } - if (ar->ht_cap_info & WMI_HT_CAP_LDPC) + if (ar->ht_cap_info & WMI_HT_CAP_LDPC || (ar->ht_cap_info & + WMI_HT_CAP_RX_LDPC && (ar->ht_cap_info & WMI_HT_CAP_TX_LDPC))) ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT) @@ -4704,7 +4748,7 @@ static int ath10k_start(struct ieee80211_hw *hw) goto err; } - ret = ath10k_hif_power_up(ar); + ret = ath10k_hif_power_up(ar, ATH10K_FIRMWARE_MODE_NORMAL); if (ret) { ath10k_err(ar, "Could not init hif: %d\n", ret); goto err_off; @@ -5341,6 +5385,17 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, goto err_peer_delete; } + if (test_bit(WMI_SERVICE_RTT_RESPONDER_ROLE, ar->wmi.svc_map)) { + vdev_param = ar->wmi.vdev_param->rtt_responder_role; + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, + arvif->ftm_responder); + + /* It is harmless to not set FTM role. Do not warn */ + if (ret && ret != -EOPNOTSUPP) + ath10k_warn(ar, "failed to set vdev %i FTM Responder: %d\n", + arvif->vdev_id, ret); + } + if (vif->type == NL80211_IFTYPE_MONITOR) { ar->monitor_arvif = arvif; ret = ath10k_monitor_recalc(ar); @@ -5615,6 +5670,20 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid)) ether_addr_copy(arvif->bssid, info->bssid); + if (changed & BSS_CHANGED_FTM_RESPONDER && + arvif->ftm_responder != info->ftm_responder && + test_bit(WMI_SERVICE_RTT_RESPONDER_ROLE, ar->wmi.svc_map)) { + arvif->ftm_responder = info->ftm_responder; + + vdev_param = ar->wmi.vdev_param->rtt_responder_role; + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, + arvif->ftm_responder); + + ath10k_dbg(ar, ATH10K_DBG_MAC, + "mac vdev %d ftm_responder %d:ret %d\n", + arvif->vdev_id, arvif->ftm_responder, ret); + } + if (changed & BSS_CHANGED_BEACON_ENABLED) ath10k_control_beaconing(arvif, info); @@ -8617,6 +8686,15 @@ int ath10k_mac_register(struct ath10k *ar) wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT); + if (ath10k_peer_stats_enabled(ar) || + test_bit(WMI_SERVICE_REPORT_AIRTIME, ar->wmi.svc_map)) + wiphy_ext_feature_set(ar->hw->wiphy, + NL80211_EXT_FEATURE_AIRTIME_FAIRNESS); + + if (test_bit(WMI_SERVICE_RTT_RESPONDER_ROLE, ar->wmi.svc_map)) + wiphy_ext_feature_set(ar->hw->wiphy, + NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER); + /* * on LL hardware queues are managed entirely by the FW * so we only advertise to mac we can do the queues thing @@ -8726,12 +8804,19 @@ int ath10k_mac_register(struct ath10k *ar) wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); + ar->hw->weight_multiplier = ATH10K_AIRTIME_WEIGHT_MULTIPLIER; + ret = ieee80211_register_hw(ar->hw); if (ret) { ath10k_err(ar, "failed to register ieee80211: %d\n", ret); goto err_dfs_detector_exit; } + if (test_bit(WMI_SERVICE_PER_PACKET_SW_ENCRYPT, ar->wmi.svc_map)) { + ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN); + ar->hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_AP_VLAN); + } + if (!ath_is_world_regd(&ar->ath_common.regulatory)) { ret = regulatory_hint(ar->hw->wiphy, ar->ath_common.regulatory.alpha2); diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h index 570493d2d648..1fe84948b868 100644 --- a/drivers/net/wireless/ath/ath10k/mac.h +++ b/drivers/net/wireless/ath/ath10k/mac.h @@ -1,18 +1,7 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _MAC_H_ diff --git a/drivers/net/wireless/ath/ath10k/p2p.c b/drivers/net/wireless/ath/ath10k/p2p.c index 7e621ee194e3..29c737b2f432 100644 --- a/drivers/net/wireless/ath/ath10k/p2p.c +++ b/drivers/net/wireless/ath/ath10k/p2p.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2015 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "core.h" diff --git a/drivers/net/wireless/ath/ath10k/p2p.h b/drivers/net/wireless/ath/ath10k/p2p.h index 7be616e2e121..7d7f44809fbb 100644 --- a/drivers/net/wireless/ath/ath10k/p2p.h +++ b/drivers/net/wireless/ath/ath10k/p2p.h @@ -1,17 +1,6 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2015 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _P2P_H diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 39e0b1cc2a12..271f92c24d44 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -1,18 +1,7 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include @@ -913,7 +902,6 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, int nbytes) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ath10k_ce *ce = ath10k_ce_priv(ar); int ret = 0; u32 *buf; unsigned int completed_nbytes, alloc_nbytes, remaining_bytes; @@ -924,8 +912,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, void *data_buf = NULL; int i; - spin_lock_bh(&ce->ce_lock); - + mutex_lock(&ar_pci->ce_diag_mutex); ce_diag = ar_pci->ce_diag; /* @@ -960,19 +947,17 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, nbytes = min_t(unsigned int, remaining_bytes, DIAG_TRANSFER_LIMIT); - ret = ce_diag->ops->ce_rx_post_buf(ce_diag, &ce_data, ce_data); + ret = ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data); if (ret != 0) goto done; /* Request CE to send from Target(!) address to Host buffer */ - ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0, - 0); + ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0, 0); if (ret) goto done; i = 0; - while (ath10k_ce_completed_send_next_nolock(ce_diag, - NULL) != 0) { + while (ath10k_ce_completed_send_next(ce_diag, NULL) != 0) { udelay(DIAG_ACCESS_CE_WAIT_US); i += DIAG_ACCESS_CE_WAIT_US; @@ -983,10 +968,8 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, } i = 0; - while (ath10k_ce_completed_recv_next_nolock(ce_diag, - (void **)&buf, - &completed_nbytes) - != 0) { + while (ath10k_ce_completed_recv_next(ce_diag, (void **)&buf, + &completed_nbytes) != 0) { udelay(DIAG_ACCESS_CE_WAIT_US); i += DIAG_ACCESS_CE_WAIT_US; @@ -1019,7 +1002,7 @@ done: dma_free_coherent(ar->dev, alloc_nbytes, data_buf, ce_data_base); - spin_unlock_bh(&ce->ce_lock); + mutex_unlock(&ar_pci->ce_diag_mutex); return ret; } @@ -1067,7 +1050,6 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, const void *data, int nbytes) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); - struct ath10k_ce *ce = ath10k_ce_priv(ar); int ret = 0; u32 *buf; unsigned int completed_nbytes, alloc_nbytes, remaining_bytes; @@ -1076,8 +1058,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, dma_addr_t ce_data_base = 0; int i; - spin_lock_bh(&ce->ce_lock); - + mutex_lock(&ar_pci->ce_diag_mutex); ce_diag = ar_pci->ce_diag; /* @@ -1118,7 +1099,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, memcpy(data_buf, data, nbytes); /* Set up to receive directly into Target(!) address */ - ret = ce_diag->ops->ce_rx_post_buf(ce_diag, &address, address); + ret = ath10k_ce_rx_post_buf(ce_diag, &address, address); if (ret != 0) goto done; @@ -1126,14 +1107,12 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, * Request CE to send caller-supplied data that * was copied to bounce buffer to Target(!) address. */ - ret = ath10k_ce_send_nolock(ce_diag, NULL, ce_data_base, - nbytes, 0, 0); + ret = ath10k_ce_send(ce_diag, NULL, ce_data_base, nbytes, 0, 0); if (ret != 0) goto done; i = 0; - while (ath10k_ce_completed_send_next_nolock(ce_diag, - NULL) != 0) { + while (ath10k_ce_completed_send_next(ce_diag, NULL) != 0) { udelay(DIAG_ACCESS_CE_WAIT_US); i += DIAG_ACCESS_CE_WAIT_US; @@ -1144,10 +1123,8 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, } i = 0; - while (ath10k_ce_completed_recv_next_nolock(ce_diag, - (void **)&buf, - &completed_nbytes) - != 0) { + while (ath10k_ce_completed_recv_next(ce_diag, (void **)&buf, + &completed_nbytes) != 0) { udelay(DIAG_ACCESS_CE_WAIT_US); i += DIAG_ACCESS_CE_WAIT_US; @@ -1182,7 +1159,7 @@ done: ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n", address, ret); - spin_unlock_bh(&ce->ce_lock); + mutex_unlock(&ar_pci->ce_diag_mutex); return ret; } @@ -2283,7 +2260,7 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar) return 1; case QCA6164_2_1_DEVICE_ID: case QCA6174_2_1_DEVICE_ID: - switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) { + switch (MS(ar->bus_param.chip_id, SOC_CHIP_ID_REV)) { case QCA6174_HW_1_0_CHIP_ID_REV: case QCA6174_HW_1_1_CHIP_ID_REV: case QCA6174_HW_2_1_CHIP_ID_REV: @@ -2806,7 +2783,8 @@ static int ath10k_pci_chip_reset(struct ath10k *ar) return ar_pci->pci_hard_reset(ar); } -static int ath10k_pci_hif_power_up(struct ath10k *ar) +static int ath10k_pci_hif_power_up(struct ath10k *ar, + enum ath10k_firmware_mode fw_mode) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ret; @@ -3462,6 +3440,7 @@ int ath10k_pci_setup_resource(struct ath10k *ar) spin_lock_init(&ce->ce_lock); spin_lock_init(&ar_pci->ps_lock); + mutex_init(&ar_pci->ce_diag_mutex); timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0); @@ -3553,7 +3532,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev, case QCA9377_1_0_DEVICE_ID: hw_rev = ATH10K_HW_QCA9377; pci_ps = true; - pci_soft_reset = NULL; + pci_soft_reset = ath10k_pci_warm_reset; pci_hard_reset = ath10k_pci_qca6174_chip_reset; targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr; break; @@ -3636,6 +3615,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev, } bus_params.dev_type = ATH10K_DEV_TYPE_LL; + bus_params.link_can_suspend = true; bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS); if (bus_params.chip_id == 0xffffffff) { ath10k_err(ar, "failed to get chip id\n"); diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h index e8d86331c539..3773c79f322f 100644 --- a/drivers/net/wireless/ath/ath10k/pci.h +++ b/drivers/net/wireless/ath/ath10k/pci.h @@ -1,24 +1,14 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _PCI_H_ #define _PCI_H_ #include +#include #include "hw.h" #include "ce.h" @@ -128,6 +118,8 @@ struct ath10k_pci { /* Copy Engine used for Diagnostic Accesses */ struct ath10k_ce_pipe *ce_diag; + /* For protecting ce_diag */ + struct mutex ce_diag_mutex; struct ath10k_ce ce; struct timer_list rx_post_retry; diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c index 37b3bd629f48..a7bc2c70d076 100644 --- a/drivers/net/wireless/ath/ath10k/qmi.c +++ b/drivers/net/wireless/ath/ath10k/qmi.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2018 The Linux Foundation. All rights reserved. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include diff --git a/drivers/net/wireless/ath/ath10k/qmi.h b/drivers/net/wireless/ath/ath10k/qmi.h index 1efe1d22fc2f..e4aa20445666 100644 --- a/drivers/net/wireless/ath/ath10k/qmi.h +++ b/drivers/net/wireless/ath/ath10k/qmi.h @@ -1,17 +1,6 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2018 The Linux Foundation. All rights reserved. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _ATH10K_QMI_H_ #define _ATH10K_QMI_H_ diff --git a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c index ba79c2e4aed6..1fe05c6218c3 100644 --- a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c +++ b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2018 The Linux Foundation. All rights reserved. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include @@ -1763,14 +1752,239 @@ struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[] = { daemon_support_valid), }, { - .data_type = QMI_UNSIGNED_1_BYTE, + .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, - .elem_size = sizeof(u8), + .elem_size = sizeof(u32), .array_type = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_host_cap_req_msg_v01, daemon_support), }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + wake_msi_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + wake_msi), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + gpios_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + gpios_len), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = QMI_WLFW_MAX_NUM_GPIO_V01, + .elem_size = sizeof(u32), + .array_type = VAR_LEN_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + gpios), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + nm_modem_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + nm_modem), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + bdf_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + bdf_support), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + bdf_cache_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + bdf_cache_support), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + m3_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + m3_support), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + m3_cache_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + m3_cache_support), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + cal_filesys_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + cal_filesys_support), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + cal_cache_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + cal_cache_support), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + cal_done_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + cal_done), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x1B, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + mem_bucket_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .array_type = NO_ARRAY, + .tlv_type = 0x1B, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + mem_bucket), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x1C, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + mem_cfg_mode_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .array_type = NO_ARRAY, + .tlv_type = 0x1C, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + mem_cfg_mode), + }, {} }; diff --git a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h index c5e3870b8871..bca1186e1560 100644 --- a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h +++ b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h @@ -1,17 +1,6 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2018 The Linux Foundation. All rights reserved. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef WCN3990_QMI_SVC_V01_H @@ -553,12 +542,38 @@ struct wlfw_mac_addr_resp_msg_v01 { #define WLFW_MAC_ADDR_RESP_MSG_V01_MAX_MSG_LEN 7 extern struct qmi_elem_info wlfw_mac_addr_resp_msg_v01_ei[]; +#define QMI_WLFW_MAX_NUM_GPIO_V01 32 struct wlfw_host_cap_req_msg_v01 { u8 daemon_support_valid; - u8 daemon_support; -}; - -#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 4 + u32 daemon_support; + u8 wake_msi_valid; + u32 wake_msi; + u8 gpios_valid; + u32 gpios_len; + u32 gpios[QMI_WLFW_MAX_NUM_GPIO_V01]; + u8 nm_modem_valid; + u8 nm_modem; + u8 bdf_support_valid; + u8 bdf_support; + u8 bdf_cache_support_valid; + u8 bdf_cache_support; + u8 m3_support_valid; + u8 m3_support; + u8 m3_cache_support_valid; + u8 m3_cache_support; + u8 cal_filesys_support_valid; + u8 cal_filesys_support; + u8 cal_cache_support_valid; + u8 cal_cache_support; + u8 cal_done_valid; + u8 cal_done; + u8 mem_bucket_valid; + u32 mem_bucket; + u8 mem_cfg_mode_valid; + u8 mem_cfg_mode; +}; + +#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 189 extern struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[]; struct wlfw_host_cap_resp_msg_v01 { diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h index dfbfe674e11e..dec1582005b9 100644 --- a/drivers/net/wireless/ath/ath10k/rx_desc.h +++ b/drivers/net/wireless/ath/ath10k/rx_desc.h @@ -1,18 +1,7 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _RX_DESC_H_ diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c index 983ecfef1d28..fae56c67766f 100644 --- a/drivers/net/wireless/ath/ath10k/sdio.c +++ b/drivers/net/wireless/ath/ath10k/sdio.c @@ -1,19 +1,8 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2004-2011 Atheros Communications Inc. * Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc. * Copyright (c) 2016-2017 Erik Stromdahl - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include @@ -1381,7 +1370,8 @@ static int ath10k_sdio_hif_disable_intrs(struct ath10k *ar) return ret; } -static int ath10k_sdio_hif_power_up(struct ath10k *ar) +static int ath10k_sdio_hif_power_up(struct ath10k *ar, + enum ath10k_firmware_mode fw_mode) { struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); struct sdio_func *func = ar_sdio->func; @@ -1392,6 +1382,12 @@ static int ath10k_sdio_hif_power_up(struct ath10k *ar) ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power on\n"); + ret = ath10k_sdio_config(ar); + if (ret) { + ath10k_err(ar, "failed to config sdio: %d\n", ret); + return ret; + } + sdio_claim_host(func); ret = sdio_enable_func(func); @@ -1429,11 +1425,19 @@ static void ath10k_sdio_hif_power_down(struct ath10k *ar) /* Disable the card */ sdio_claim_host(ar_sdio->func); + ret = sdio_disable_func(ar_sdio->func); - sdio_release_host(ar_sdio->func); + if (ret) { + ath10k_warn(ar, "unable to disable sdio function: %d\n", ret); + sdio_release_host(ar_sdio->func); + return; + } + ret = mmc_hw_reset(ar_sdio->func->card->host); if (ret) - ath10k_warn(ar, "unable to disable sdio function: %d\n", ret); + ath10k_warn(ar, "unable to reset sdio: %d\n", ret); + + sdio_release_host(ar_sdio->func); ar_sdio->is_disabled = true; } @@ -1615,12 +1619,33 @@ static int ath10k_sdio_hif_diag_write_mem(struct ath10k *ar, u32 address, return 0; } +static int ath10k_sdio_hif_swap_mailbox(struct ath10k *ar) +{ + struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); + u32 addr, val; + int ret = 0; + + addr = host_interest_item_address(HI_ITEM(hi_acs_flags)); + + ret = ath10k_sdio_hif_diag_read32(ar, addr, &val); + if (ret) { + ath10k_warn(ar, "unable to read hi_acs_flags : %d\n", ret); + return ret; + } + + if (val & HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_FW_ACK) { + ath10k_dbg(ar, ATH10K_DBG_SDIO, + "sdio mailbox swap service enabled\n"); + ar_sdio->swap_mbox = true; + } + return 0; +} + /* HIF start/stop */ static int ath10k_sdio_hif_start(struct ath10k *ar) { struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar); - u32 addr, val; int ret; /* Sleep 20 ms before HIF interrupts are disabled. @@ -1654,20 +1679,6 @@ static int ath10k_sdio_hif_start(struct ath10k *ar) if (ret) ath10k_warn(ar, "failed to enable sdio interrupts: %d\n", ret); - addr = host_interest_item_address(HI_ITEM(hi_acs_flags)); - - ret = ath10k_sdio_hif_diag_read32(ar, addr, &val); - if (ret) { - ath10k_warn(ar, "unable to read hi_acs_flags address: %d\n", ret); - return ret; - } - - if (val & HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_FW_ACK) { - ath10k_dbg(ar, ATH10K_DBG_SDIO, - "sdio mailbox swap service enabled\n"); - ar_sdio->swap_mbox = true; - } - /* Enable sleep and then disable it again */ ret = ath10k_sdio_hif_set_mbox_sleep(ar, true); if (ret) @@ -1898,6 +1909,7 @@ static const struct ath10k_hif_ops ath10k_sdio_hif_ops = { .exchange_bmi_msg = ath10k_sdio_bmi_exchange_msg, .start = ath10k_sdio_hif_start, .stop = ath10k_sdio_hif_stop, + .swap_mailbox = ath10k_sdio_hif_swap_mailbox, .map_service_to_pipe = ath10k_sdio_hif_map_service_to_pipe, .get_default_pipe = ath10k_sdio_hif_get_default_pipe, .send_complete_check = ath10k_sdio_hif_send_complete_check, @@ -2030,12 +2042,6 @@ static int ath10k_sdio_probe(struct sdio_func *func, ath10k_sdio_set_mbox_info(ar); - ret = ath10k_sdio_config(ar); - if (ret) { - ath10k_err(ar, "failed to config sdio: %d\n", ret); - goto err_free_wq; - } - bus_params.dev_type = ATH10K_DEV_TYPE_HL; /* TODO: don't know yet how to get chip_id with SDIO */ bus_params.chip_id = 0; @@ -2088,7 +2094,10 @@ static struct sdio_driver ath10k_sdio_driver = { .id_table = ath10k_sdio_devices, .probe = ath10k_sdio_probe, .remove = ath10k_sdio_remove, - .drv.pm = ATH10K_SDIO_PM_OPS, + .drv = { + .owner = THIS_MODULE, + .pm = ATH10K_SDIO_PM_OPS, + }, }; static int __init ath10k_sdio_init(void) diff --git a/drivers/net/wireless/ath/ath10k/sdio.h b/drivers/net/wireless/ath/ath10k/sdio.h index 453eb6263143..b8c7ac0330bd 100644 --- a/drivers/net/wireless/ath/ath10k/sdio.h +++ b/drivers/net/wireless/ath/ath10k/sdio.h @@ -1,19 +1,8 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2004-2011 Atheros Communications Inc. * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. * Copyright (c) 2016-2017 Erik Stromdahl - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _SDIO_H_ diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index 54efe6be8f1d..873cb4ce419b 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2018 The Linux Foundation. All rights reserved. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include @@ -30,6 +19,7 @@ #define ATH10K_SNOC_RX_POST_RETRY_MS 50 #define CE_POLL_PIPE 4 +#define ATH10K_SNOC_WAKE_IRQ 2 static char *const ce_name[] = { "WLAN_CE_0", @@ -66,7 +56,7 @@ static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state); static const struct ath10k_snoc_drv_priv drv_priv = { .hw_rev = ATH10K_HW_WCN3990, - .dma_mask = DMA_BIT_MASK(37), + .dma_mask = DMA_BIT_MASK(35), .msa_size = 0x100000, }; @@ -875,13 +865,11 @@ static void ath10k_snoc_tx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe) { struct ath10k_ce_pipe *ce_pipe; struct ath10k_ce_ring *ce_ring; - struct ath10k_snoc *ar_snoc; struct sk_buff *skb; struct ath10k *ar; int i; ar = snoc_pipe->hif_ce_state; - ar_snoc = ath10k_snoc_priv(ar); ce_pipe = snoc_pipe->ce_hdl; ce_ring = ce_pipe->src_ring; @@ -958,7 +946,8 @@ static int ath10k_snoc_init_pipes(struct ath10k *ar) return 0; } -static int ath10k_snoc_wlan_enable(struct ath10k *ar) +static int ath10k_snoc_wlan_enable(struct ath10k *ar, + enum ath10k_firmware_mode fw_mode) { struct ath10k_tgt_pipe_cfg tgt_cfg[CE_COUNT_MAX]; struct ath10k_qmi_wlan_enable_cfg cfg; @@ -992,7 +981,17 @@ static int ath10k_snoc_wlan_enable(struct ath10k *ar) cfg.shadow_reg_cfg = (struct ath10k_shadow_reg_cfg *) &target_shadow_reg_cfg_map; - mode = QMI_WLFW_MISSION_V01; + switch (fw_mode) { + case ATH10K_FIRMWARE_MODE_NORMAL: + mode = QMI_WLFW_MISSION_V01; + break; + case ATH10K_FIRMWARE_MODE_UTF: + mode = QMI_WLFW_FTM_V01; + break; + default: + ath10k_err(ar, "invalid firmware mode %d\n", fw_mode); + return -EINVAL; + } return ath10k_qmi_wlan_enable(ar, &cfg, mode, NULL); @@ -1000,7 +999,16 @@ static int ath10k_snoc_wlan_enable(struct ath10k *ar) static void ath10k_snoc_wlan_disable(struct ath10k *ar) { - if (!test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + + /* If both ATH10K_FLAG_CRASH_FLUSH and ATH10K_SNOC_FLAG_RECOVERY + * flags are not set, it means that the driver has restarted + * due to a crash inject via debugfs. In this case, the driver + * needs to restart the firmware and hence send qmi wlan disable, + * during the driver restart sequence. + */ + if (!test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags) || + !test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags)) ath10k_qmi_wlan_disable(ar); } @@ -1012,14 +1020,15 @@ static void ath10k_snoc_hif_power_down(struct ath10k *ar) ath10k_ce_free_rri(ar); } -static int ath10k_snoc_hif_power_up(struct ath10k *ar) +static int ath10k_snoc_hif_power_up(struct ath10k *ar, + enum ath10k_firmware_mode fw_mode) { int ret; ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 driver state = %d\n", __func__, ar->state); - ret = ath10k_snoc_wlan_enable(ar); + ret = ath10k_snoc_wlan_enable(ar, fw_mode); if (ret) { ath10k_err(ar, "failed to enable wcn3990: %d\n", ret); return ret; @@ -1041,6 +1050,46 @@ err_wlan_enable: return ret; } +#ifdef CONFIG_PM +static int ath10k_snoc_hif_suspend(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + int ret; + + if (!device_may_wakeup(ar->dev)) + return -EPERM; + + ret = enable_irq_wake(ar_snoc->ce_irqs[ATH10K_SNOC_WAKE_IRQ].irq_line); + if (ret) { + ath10k_err(ar, "failed to enable wakeup irq :%d\n", ret); + return ret; + } + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc device suspended\n"); + + return ret; +} + +static int ath10k_snoc_hif_resume(struct ath10k *ar) +{ + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + int ret; + + if (!device_may_wakeup(ar->dev)) + return -EPERM; + + ret = disable_irq_wake(ar_snoc->ce_irqs[ATH10K_SNOC_WAKE_IRQ].irq_line); + if (ret) { + ath10k_err(ar, "failed to disable wakeup irq: %d\n", ret); + return ret; + } + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc device resumed\n"); + + return ret; +} +#endif + static const struct ath10k_hif_ops ath10k_snoc_hif_ops = { .read32 = ath10k_snoc_read32, .write32 = ath10k_snoc_write32, @@ -1054,6 +1103,10 @@ static const struct ath10k_hif_ops ath10k_snoc_hif_ops = { .send_complete_check = ath10k_snoc_hif_send_complete_check, .get_free_queue_number = ath10k_snoc_hif_get_free_queue_number, .get_target_info = ath10k_snoc_hif_get_target_info, +#ifdef CONFIG_PM + .suspend = ath10k_snoc_hif_suspend, + .resume = ath10k_snoc_hif_resume, +#endif }; static const struct ath10k_bus_ops ath10k_snoc_bus_ops = { diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h index 2b2f23cf7c5d..d62f53501fbb 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.h +++ b/drivers/net/wireless/ath/ath10k/snoc.h @@ -1,17 +1,6 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2018 The Linux Foundation. All rights reserved. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _SNOC_H_ @@ -90,7 +79,7 @@ struct ath10k_snoc { struct ath10k_vreg_info *vreg; struct ath10k_clk_info *clk; struct ath10k_qmi *qmi; - unsigned long int flags; + unsigned long flags; }; static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar) diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c index 653b6d013207..5db6bff5193b 100644 --- a/drivers/net/wireless/ath/ath10k/spectral.c +++ b/drivers/net/wireless/ath/ath10k/spectral.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2013-2017 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include @@ -494,6 +483,9 @@ static struct dentry *create_buf_file_handler(const char *filename, buf_file = debugfs_create_file(filename, mode, parent, buf, &relay_file_operations); + if (IS_ERR(buf_file)) + return NULL; + *is_global = 1; return buf_file; } diff --git a/drivers/net/wireless/ath/ath10k/spectral.h b/drivers/net/wireless/ath/ath10k/spectral.h index 13276f4dc12c..5f481f11c6e5 100644 --- a/drivers/net/wireless/ath/ath10k/spectral.h +++ b/drivers/net/wireless/ath/ath10k/spectral.h @@ -1,17 +1,6 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2013-2015 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef SPECTRAL_H diff --git a/drivers/net/wireless/ath/ath10k/swap.c b/drivers/net/wireless/ath/ath10k/swap.c index e7f57efadae1..4dddeee684b4 100644 --- a/drivers/net/wireless/ath/ath10k/swap.c +++ b/drivers/net/wireless/ath/ath10k/swap.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2015-2016 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* This file has implementation for code swap logic. With code swap feature, diff --git a/drivers/net/wireless/ath/ath10k/swap.h b/drivers/net/wireless/ath/ath10k/swap.h index fa602f15fa93..25e0ad36ddb1 100644 --- a/drivers/net/wireless/ath/ath10k/swap.h +++ b/drivers/net/wireless/ath/ath10k/swap.h @@ -1,17 +1,6 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2015-2016 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _SWAP_H_ diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h index b11a1c3d87b4..dff6c8ac9dba 100644 --- a/drivers/net/wireless/ath/ath10k/targaddrs.h +++ b/drivers/net/wireless/ath/ath10k/targaddrs.h @@ -1,18 +1,7 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2016 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef __TARGADDRS_H__ diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c index c24ee616833c..6433ff10d80e 100644 --- a/drivers/net/wireless/ath/ath10k/testmode.c +++ b/drivers/net/wireless/ath/ath10k/testmode.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2014-2017 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "testmode.h" @@ -270,7 +259,7 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[]) ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode wmi version %d\n", ar->testmode.utf_mode_fw.fw_file.wmi_op_version); - ret = ath10k_hif_power_up(ar); + ret = ath10k_hif_power_up(ar, ATH10K_FIRMWARE_MODE_UTF); if (ret) { ath10k_err(ar, "failed to power up hif (testmode): %d\n", ret); ar->state = ATH10K_STATE_OFF; diff --git a/drivers/net/wireless/ath/ath10k/testmode.h b/drivers/net/wireless/ath/ath10k/testmode.h index 9cdd150815db..6488fd514ae3 100644 --- a/drivers/net/wireless/ath/ath10k/testmode.h +++ b/drivers/net/wireless/ath/ath10k/testmode.h @@ -1,17 +1,6 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2014 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "core.h" diff --git a/drivers/net/wireless/ath/ath10k/testmode_i.h b/drivers/net/wireless/ath/ath10k/testmode_i.h index 6514d1a14242..ee1cb27c1d60 100644 --- a/drivers/net/wireless/ath/ath10k/testmode_i.h +++ b/drivers/net/wireless/ath/ath10k/testmode_i.h @@ -1,17 +1,6 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2014,2017 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* "API" level of the ath10k testmode interface. Bump it after every diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c index fe35edcd3ec8..36c9a1364253 100644 --- a/drivers/net/wireless/ath/ath10k/thermal.c +++ b/drivers/net/wireless/ath/ath10k/thermal.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2014-2015 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include diff --git a/drivers/net/wireless/ath/ath10k/thermal.h b/drivers/net/wireless/ath/ath10k/thermal.h index 65e2419543f9..5fdb020f4da3 100644 --- a/drivers/net/wireless/ath/ath10k/thermal.h +++ b/drivers/net/wireless/ath/ath10k/thermal.h @@ -1,17 +1,6 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2014-2016 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _THERMAL_ #define _THERMAL_ diff --git a/drivers/net/wireless/ath/ath10k/trace.c b/drivers/net/wireless/ath/ath10k/trace.c index 4a31e2c6fbd4..3ecdff17f64e 100644 --- a/drivers/net/wireless/ath/ath10k/trace.c +++ b/drivers/net/wireless/ath/ath10k/trace.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2012 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h index 7d2fac342150..ba977bbe6291 100644 --- a/drivers/net/wireless/ath/ath10k/trace.h +++ b/drivers/net/wireless/ath/ath10k/trace.h @@ -1,18 +1,7 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2016 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #if !defined(_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index 23606b6972d0..c5818d28f55a 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c @@ -1,19 +1,8 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2016 Qualcomm Atheros, Inc. * Copyright (c) 2018, The Linux Foundation. All rights reserved. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "core.h" @@ -95,7 +84,11 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt, wake_up(&htt->empty_tx_wq); spin_unlock_bh(&htt->tx_lock); - if (ar->dev_type != ATH10K_DEV_TYPE_HL) + if (txq && txq->sta && skb_cb->airtime_est) + ieee80211_sta_register_airtime(txq->sta, txq->tid, + skb_cb->airtime_est, 0); + + if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); ath10k_report_offchan_tx(htt->ar, msdu); diff --git a/drivers/net/wireless/ath/ath10k/txrx.h b/drivers/net/wireless/ath/ath10k/txrx.h index 2bf401e436d3..ecac441d83a7 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.h +++ b/drivers/net/wireless/ath/ath10k/txrx.h @@ -1,18 +1,7 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2014,2016 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _TXRX_H_ #define _TXRX_H_ diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c index f731d35ee76d..970cf69ac35f 100644 --- a/drivers/net/wireless/ath/ath10k/usb.c +++ b/drivers/net/wireless/ath/ath10k/usb.c @@ -1,19 +1,8 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2007-2011 Atheros Communications Inc. * Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc. * Copyright (c) 2016-2017 Erik Stromdahl - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include @@ -706,7 +695,8 @@ static void ath10k_usb_hif_send_complete_check(struct ath10k *ar, { } -static int ath10k_usb_hif_power_up(struct ath10k *ar) +static int ath10k_usb_hif_power_up(struct ath10k *ar, + enum ath10k_firmware_mode fw_mode) { return 0; } diff --git a/drivers/net/wireless/ath/ath10k/usb.h b/drivers/net/wireless/ath/ath10k/usb.h index f60a3cc7d712..34d683e8fc18 100644 --- a/drivers/net/wireless/ath/ath10k/usb.h +++ b/drivers/net/wireless/ath/ath10k/usb.h @@ -1,19 +1,8 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2004-2011 Atheros Communications Inc. * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. * Copyright (c) 2016-2017 Erik Stromdahl - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _USB_H_ diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h index 04663076d27a..1491c25518bb 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-ops.h +++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h @@ -1,19 +1,8 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * Copyright (c) 2018, The Linux Foundation. All rights reserved. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _WMI_OPS_H_ @@ -33,6 +22,9 @@ struct wmi_ops { struct wmi_mgmt_rx_ev_arg *arg); int (*pull_mgmt_tx_compl)(struct ath10k *ar, struct sk_buff *skb, struct wmi_tlv_mgmt_tx_compl_ev_arg *arg); + int (*pull_mgmt_tx_bundle_compl)( + struct ath10k *ar, struct sk_buff *skb, + struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg *arg); int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb, struct wmi_ch_info_ev_arg *arg); int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb, @@ -66,6 +58,8 @@ struct wmi_ops { struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt); struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar); + struct sk_buff *(*gen_pdev_set_base_macaddr)(struct ath10k *ar, + const u8 macaddr[ETH_ALEN]); struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g, u16 ctl2g, u16 ctl5g, enum wmi_dfs_region dfs_reg); @@ -279,6 +273,16 @@ ath10k_wmi_pull_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb, return ar->wmi.ops->pull_mgmt_tx_compl(ar, skb, arg); } +static inline int +ath10k_wmi_pull_mgmt_tx_bundle_compl(struct ath10k *ar, struct sk_buff *skb, + struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg *arg) +{ + if (!ar->wmi.ops->pull_mgmt_tx_bundle_compl) + return -EOPNOTSUPP; + + return ar->wmi.ops->pull_mgmt_tx_bundle_compl(ar, skb, arg); +} + static inline int ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb, struct wmi_mgmt_rx_ev_arg *arg) @@ -506,6 +510,22 @@ ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g, ar->wmi.cmd->pdev_set_regdomain_cmdid); } +static inline int +ath10k_wmi_pdev_set_base_macaddr(struct ath10k *ar, const u8 macaddr[ETH_ALEN]) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_pdev_set_base_macaddr) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_pdev_set_base_macaddr(ar, macaddr); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->pdev_set_base_macaddr_cmdid); +} + static inline int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt) { diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index 892bd8c30dd9..582fb11f648a 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -1,19 +1,8 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * Copyright (c) 2018, The Linux Foundation. All rights reserved. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "core.h" #include "debug.h" @@ -24,6 +13,7 @@ #include "wmi-tlv.h" #include "p2p.h" #include "testmode.h" +#include /***************/ /* TLV helpers */ @@ -620,6 +610,9 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb) case WMI_TLV_MGMT_TX_COMPLETION_EVENTID: ath10k_wmi_event_mgmt_tx_compl(ar, skb); break; + case WMI_TLV_MGMT_TX_BUNDLE_COMPLETION_EVENTID: + ath10k_wmi_event_mgmt_tx_bundle_compl(ar, skb); + break; default: ath10k_dbg(ar, ATH10K_DBG_WMI, "Unknown eventid: %d\n", id); break; @@ -681,11 +674,88 @@ ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev(struct ath10k *ar, struct sk_buff *skb, arg->desc_id = ev->desc_id; arg->status = ev->status; arg->pdev_id = ev->pdev_id; + arg->ppdu_id = ev->ppdu_id; + + if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map)) + arg->ack_rssi = ev->ack_rssi; kfree(tb); return 0; } +struct wmi_tlv_tx_bundle_compl_parse { + const __le32 *num_reports; + const __le32 *desc_ids; + const __le32 *status; + const __le32 *ppdu_ids; + const __le32 *ack_rssi; + bool desc_ids_done; + bool status_done; + bool ppdu_ids_done; + bool ack_rssi_done; +}; + +static int +ath10k_wmi_tlv_mgmt_tx_bundle_compl_parse(struct ath10k *ar, u16 tag, u16 len, + const void *ptr, void *data) +{ + struct wmi_tlv_tx_bundle_compl_parse *bundle_tx_compl = data; + + switch (tag) { + case WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_BUNDLE_EVENT: + bundle_tx_compl->num_reports = ptr; + break; + case WMI_TLV_TAG_ARRAY_UINT32: + if (!bundle_tx_compl->desc_ids_done) { + bundle_tx_compl->desc_ids_done = true; + bundle_tx_compl->desc_ids = ptr; + } else if (!bundle_tx_compl->status_done) { + bundle_tx_compl->status_done = true; + bundle_tx_compl->status = ptr; + } else if (!bundle_tx_compl->ppdu_ids_done) { + bundle_tx_compl->ppdu_ids_done = true; + bundle_tx_compl->ppdu_ids = ptr; + } else if (!bundle_tx_compl->ack_rssi_done) { + bundle_tx_compl->ack_rssi_done = true; + bundle_tx_compl->ack_rssi = ptr; + } + break; + default: + break; + } + return 0; +} + +static int ath10k_wmi_tlv_op_pull_mgmt_tx_bundle_compl_ev( + struct ath10k *ar, struct sk_buff *skb, + struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg *arg) +{ + struct wmi_tlv_tx_bundle_compl_parse bundle_tx_compl = { }; + int ret; + + ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len, + ath10k_wmi_tlv_mgmt_tx_bundle_compl_parse, + &bundle_tx_compl); + if (ret) { + ath10k_warn(ar, "failed to parse tlv: %d\n", ret); + return ret; + } + + if (!bundle_tx_compl.num_reports || !bundle_tx_compl.desc_ids || + !bundle_tx_compl.status) + return -EPROTO; + + arg->num_reports = *bundle_tx_compl.num_reports; + arg->desc_ids = bundle_tx_compl.desc_ids; + arg->status = bundle_tx_compl.status; + arg->ppdu_ids = bundle_tx_compl.ppdu_ids; + + if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map)) + arg->ack_rssi = bundle_tx_compl.ack_rssi; + + return 0; +} + static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb, struct wmi_mgmt_rx_ev_arg *arg) @@ -1227,6 +1297,7 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar, { const void **tb; const struct wmi_tlv_stats_ev *ev; + u32 num_peer_stats_extd; const void *data; u32 num_pdev_stats; u32 num_vdev_stats; @@ -1234,6 +1305,7 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar, u32 num_bcnflt_stats; u32 num_chan_stats; size_t data_len; + u32 stats_id; int ret; int i; @@ -1258,11 +1330,13 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar, num_peer_stats = __le32_to_cpu(ev->num_peer_stats); num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats); num_chan_stats = __le32_to_cpu(ev->num_chan_stats); + stats_id = __le32_to_cpu(ev->stats_id); + num_peer_stats_extd = __le32_to_cpu(ev->num_peer_stats_extd); ath10k_dbg(ar, ATH10K_DBG_WMI, - "wmi tlv stats update pdev %i vdev %i peer %i bcnflt %i chan %i\n", + "wmi tlv stats update pdev %i vdev %i peer %i bcnflt %i chan %i peer_extd %i\n", num_pdev_stats, num_vdev_stats, num_peer_stats, - num_bcnflt_stats, num_chan_stats); + num_bcnflt_stats, num_chan_stats, num_peer_stats_extd); for (i = 0; i < num_pdev_stats; i++) { const struct wmi_pdev_stats *src; @@ -1327,6 +1401,28 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar, ath10k_wmi_pull_peer_stats(&src->old, dst); dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate); + + if (stats_id & WMI_TLV_STAT_PEER_EXTD) { + const struct wmi_tlv_peer_stats_extd *extd; + unsigned long rx_duration_high; + + extd = data + sizeof(*src) * (num_peer_stats - i - 1) + + sizeof(*extd) * i; + + dst->rx_duration = __le32_to_cpu(extd->rx_duration); + rx_duration_high = __le32_to_cpu + (extd->rx_duration_high); + + if (test_bit(WMI_TLV_PEER_RX_DURATION_HIGH_VALID_BIT, + &rx_duration_high)) { + rx_duration_high = + FIELD_GET(WMI_TLV_PEER_RX_DURATION_HIGH_MASK, + rx_duration_high); + dst->rx_duration |= (u64)rx_duration_high << + WMI_TLV_PEER_RX_DURATION_SHIFT; + } + } + list_add_tail(&dst->list, &stats->peers); } @@ -1514,21 +1610,55 @@ ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id, cmd->param_id = __cpu_to_le32(param_id); cmd->param_value = __cpu_to_le32(param_value); - ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set param\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set param %d value 0x%x\n", + param_id, param_value); return skb; } +static void +ath10k_wmi_tlv_put_host_mem_chunks(struct ath10k *ar, void *host_mem_chunks) +{ + struct host_memory_chunk *chunk; + struct wmi_tlv *tlv; + int i; + __le16 tlv_len, tlv_tag; + + tlv_tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WLAN_HOST_MEMORY_CHUNK); + tlv_len = __cpu_to_le16(sizeof(*chunk)); + for (i = 0; i < ar->wmi.num_mem_chunks; i++) { + tlv = host_mem_chunks; + tlv->tag = tlv_tag; + tlv->len = tlv_len; + chunk = (void *)tlv->value; + + chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr); + chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len); + chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi-tlv chunk %d len %d, addr 0x%llx, id 0x%x\n", + i, + ar->wmi.mem_chunks[i].len, + (unsigned long long)ar->wmi.mem_chunks[i].paddr, + ar->wmi.mem_chunks[i].req_id); + + host_mem_chunks += sizeof(*tlv); + host_mem_chunks += sizeof(*chunk); + } +} + static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar) { struct sk_buff *skb; struct wmi_tlv *tlv; struct wmi_tlv_init_cmd *cmd; struct wmi_tlv_resource_config *cfg; - struct wmi_host_mem_chunks *chunks; + void *chunks; size_t len, chunks_len; void *ptr; - chunks_len = ar->wmi.num_mem_chunks * sizeof(struct host_memory_chunk); + chunks_len = ar->wmi.num_mem_chunks * + (sizeof(struct host_memory_chunk) + sizeof(*tlv)); len = (sizeof(*tlv) + sizeof(*cmd)) + (sizeof(*tlv) + sizeof(*cfg)) + (sizeof(*tlv) + chunks_len); @@ -1611,7 +1741,7 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar) cfg->rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(0); cfg->vow_config = __cpu_to_le32(0); cfg->gtk_offload_max_vdev = __cpu_to_le32(2); - cfg->num_msdu_desc = __cpu_to_le32(TARGET_TLV_NUM_MSDU_DESC); + cfg->num_msdu_desc = __cpu_to_le32(ar->htt.max_num_pending_tx); cfg->max_frag_entries = __cpu_to_le32(2); cfg->num_tdls_vdevs = __cpu_to_le32(TARGET_TLV_NUM_TDLS_VDEVS); cfg->num_tdls_conn_table_entries = __cpu_to_le32(0x20); @@ -1626,9 +1756,12 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar) cfg->num_ocb_vdevs = __cpu_to_le32(0); cfg->num_ocb_channels = __cpu_to_le32(0); cfg->num_ocb_schedules = __cpu_to_le32(0); - cfg->host_capab = __cpu_to_le32(0); + cfg->host_capab = __cpu_to_le32(WMI_TLV_FLAG_MGMT_BUNDLE_TX_COMPL); - ath10k_wmi_put_host_mem_chunks(ar, chunks); + if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map)) + cfg->host_capab |= __cpu_to_le32(WMI_RSRC_CFG_FLAG_TX_ACK_RSSI); + + ath10k_wmi_tlv_put_host_mem_chunks(ar, chunks); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv init\n"); return skb; @@ -1984,7 +2117,8 @@ ath10k_wmi_tlv_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id, cmd->param_id = __cpu_to_le32(param_id); cmd->param_value = __cpu_to_le32(param_value); - ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev set param\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev %d set param %d value 0x%x\n", + vdev_id, param_id, param_value); return skb; } @@ -1998,9 +2132,11 @@ ath10k_wmi_tlv_op_gen_vdev_install_key(struct ath10k *ar, size_t len; void *ptr; - if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL) + if (arg->key_cipher == ar->wmi_key_cipher[WMI_CIPHER_NONE] && + arg->key_data) return ERR_PTR(-EINVAL); - if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL) + if (arg->key_cipher != ar->wmi_key_cipher[WMI_CIPHER_NONE] && + !arg->key_data) return ERR_PTR(-EINVAL); len = sizeof(*tlv) + sizeof(*cmd) + @@ -2298,7 +2434,9 @@ ath10k_wmi_tlv_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id, cmd->param_value = __cpu_to_le32(param_value); ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); - ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer set param\n"); + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi tlv vdev %d peer %pM set param %d value 0x%x\n", + vdev_id, peer_addr, param_id, param_value); return skb; } @@ -2692,7 +2830,9 @@ ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu, arvif = (void *)cb->vif->drv_priv; vdev_id = arvif->vdev_id; - if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control))) + if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control) && + (!(ieee80211_is_nullfunc(hdr->frame_control) || + ieee80211_is_qos_nullfunc(hdr->frame_control))))) return ERR_PTR(-EINVAL); len = sizeof(*cmd) + 2 * sizeof(*tlv); @@ -2700,10 +2840,8 @@ ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu, if ((ieee80211_is_action(hdr->frame_control) || ieee80211_is_deauth(hdr->frame_control) || ieee80211_is_disassoc(hdr->frame_control)) && - ieee80211_has_protected(hdr->frame_control)) { - len += IEEE80211_CCMP_MIC_LEN; + ieee80211_has_protected(hdr->frame_control)) buf_len += IEEE80211_CCMP_MIC_LEN; - } buf_len = min_t(u32, buf_len, WMI_TLV_MGMT_TX_FRAME_MAX_LEN); buf_len = round_up(buf_len, 4); @@ -3259,6 +3397,8 @@ ath10k_wmi_tlv_op_gen_wow_enable(struct ath10k *ar) cmd = (void *)tlv->value; cmd->enable = __cpu_to_le32(1); + if (!ar->bus_param.link_can_suspend) + cmd->pause_iface_config = __cpu_to_le32(WOW_IFACE_PAUSE_DISABLED); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow enable\n"); return skb; @@ -4093,6 +4233,7 @@ static const struct wmi_ops wmi_tlv_ops = { .pull_scan = ath10k_wmi_tlv_op_pull_scan_ev, .pull_mgmt_rx = ath10k_wmi_tlv_op_pull_mgmt_rx_ev, .pull_mgmt_tx_compl = ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev, + .pull_mgmt_tx_bundle_compl = ath10k_wmi_tlv_op_pull_mgmt_tx_bundle_compl_ev, .pull_ch_info = ath10k_wmi_tlv_op_pull_ch_info_ev, .pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev, .pull_peer_kick = ath10k_wmi_tlv_op_pull_peer_kick_ev, diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h index e07e9907e355..65e6aa520b06 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h @@ -1,19 +1,8 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * Copyright (c) 2018, The Linux Foundation. All rights reserved. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _WMI_TLV_H #define _WMI_TLV_H @@ -25,6 +14,8 @@ #define WMI_TLV_VDEV_PARAM_UNSUPPORTED 0 #define WMI_TLV_MGMT_TX_FRAME_MAX_LEN 64 +#define WMI_RSRC_CFG_FLAG_TX_ACK_RSSI BIT(18) + enum wmi_tlv_grp_id { WMI_TLV_GRP_START = 0x3, WMI_TLV_GRP_SCAN = WMI_TLV_GRP_START, @@ -321,6 +312,7 @@ enum wmi_tlv_event_id { WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID, WMI_TLV_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID, WMI_TLV_MGMT_TX_COMPLETION_EVENTID, + WMI_TLV_MGMT_TX_BUNDLE_COMPLETION_EVENTID, WMI_TLV_TX_DELBA_COMPLETE_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_BA_NEG), WMI_TLV_TX_ADDBA_COMPLETE_EVENTID, WMI_TLV_BA_RSP_SSN_EVENTID, @@ -1394,6 +1386,25 @@ enum wmi_tlv_service { WMI_TLV_SERVICE_AP_TWT = 153, WMI_TLV_SERVICE_GMAC_OFFLOAD_SUPPORT = 154, WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT = 155, + WMI_TLV_SERVICE_PEER_TID_CONFIGS_SUPPORT = 156, + WMI_TLV_SERVICE_VDEV_SWRETRY_PER_AC_CONFIG_SUPPORT = 157, + WMI_TLV_SERVICE_DUAL_BEACON_ON_SINGLE_MAC_SCC_SUPPORT = 158, + WMI_TLV_SERVICE_DUAL_BEACON_ON_SINGLE_MAC_MCC_SUPPORT = 159, + WMI_TLV_SERVICE_MOTION_DET = 160, + WMI_TLV_SERVICE_INFRA_MBSSID = 161, + WMI_TLV_SERVICE_OBSS_SPATIAL_REUSE = 162, + WMI_TLV_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT = 163, + WMI_TLV_SERVICE_NAN_DBS_SUPPORT = 164, + WMI_TLV_SERVICE_NDI_DBS_SUPPORT = 165, + WMI_TLV_SERVICE_NAN_SAP_SUPPORT = 166, + WMI_TLV_SERVICE_NDI_SAP_SUPPORT = 167, + WMI_TLV_SERVICE_CFR_CAPTURE_SUPPORT = 168, + WMI_TLV_SERVICE_CFR_CAPTURE_IND_MSG_TYPE_1 = 169, + WMI_TLV_SERVICE_ESP_SUPPORT = 170, + WMI_TLV_SERVICE_PEER_CHWIDTH_CHANGE = 171, + WMI_TLV_SERVICE_WLAN_HPCS_PULSE = 172, + WMI_TLV_SERVICE_PER_VDEV_CHAINMASK_CONFIG_SUPPORT = 173, + WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI = 174, WMI_TLV_MAX_EXT_SERVICE = 256, }; @@ -1567,6 +1578,8 @@ wmi_tlv_svc_map_ext(const __le32 *in, unsigned long *out, size_t len) SVCMAP(WMI_TLV_SERVICE_THERM_THROT, WMI_SERVICE_THERM_THROT, WMI_TLV_MAX_SERVICE); + SVCMAP(WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI, + WMI_SERVICE_TX_DATA_ACK_RSSI, WMI_TLV_MAX_SERVICE); } #undef SVCMAP @@ -1592,10 +1605,14 @@ struct chan_info_params { u32 mac_clk_mhz; }; +#define WMI_TLV_FLAG_MGMT_BUNDLE_TX_COMPL BIT(9) + struct wmi_tlv_mgmt_tx_compl_ev { __le32 desc_id; __le32 status; __le32 pdev_id; + __le32 ppdu_id; + __le32 ack_rssi; }; #define WMI_TLV_MGMT_RX_NUM_RSSI 4 @@ -1872,6 +1889,22 @@ struct wmi_tlv_req_stats_cmd { struct wmi_mac_addr peer_macaddr; } __packed; +#define WMI_TLV_PEER_RX_DURATION_HIGH_VALID_BIT 31 +#define WMI_TLV_PEER_RX_DURATION_HIGH_MASK GENMASK(30, 0) +#define WMI_TLV_PEER_RX_DURATION_SHIFT 32 + +struct wmi_tlv_peer_stats_extd { + struct wmi_mac_addr peer_macaddr; + __le32 rx_duration; + __le32 peer_tx_bytes; + __le32 peer_rx_bytes; + __le32 last_tx_rate_code; + __le32 last_tx_power; + __le32 rx_mc_bc_cnt; + __le32 rx_duration_high; + __le32 reserved[2]; +} __packed; + struct wmi_tlv_vdev_stats { __le32 vdev_id; __le32 beacon_snr; @@ -1965,6 +1998,10 @@ struct wmi_tlv_stats_ev { __le32 num_peer_stats; __le32 num_bcnflt_stats; __le32 num_chan_stats; + __le32 num_mib_stats; + __le32 pdev_id; + __le32 num_bcn_stats; + __le32 num_peer_stats_extd; } __packed; struct wmi_tlv_p2p_noa_ev { @@ -1998,8 +2035,15 @@ struct wmi_tlv_set_quiet_cmd { __le32 enabled; } __packed; +enum wmi_tlv_wow_interface_cfg { + WOW_IFACE_PAUSE_ENABLED, + WOW_IFACE_PAUSE_DISABLED +}; + struct wmi_tlv_wow_enable_cmd { __le32 enable; + __le32 pause_iface_config; + __le32 flags; } __packed; struct wmi_tlv_wow_host_wakeup_ind { diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 8e236d158ca6..98a90e49d666 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -1,19 +1,8 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. - * Copyright (c) 2018, The Linux Foundation. All rights reserved. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */ #include @@ -827,6 +816,7 @@ static struct wmi_vdev_param_map wmi_vdev_param_map = { .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED, .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, .disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED, + .rtt_responder_role = WMI_VDEV_PARAM_UNSUPPORTED, }; /* 10.X WMI VDEV param map */ @@ -903,6 +893,7 @@ static struct wmi_vdev_param_map wmi_10x_vdev_param_map = { .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED, .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, .disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED, + .rtt_responder_role = WMI_VDEV_PARAM_UNSUPPORTED, }; static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = { @@ -978,6 +969,7 @@ static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = { .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED, .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, .disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED, + .rtt_responder_role = WMI_VDEV_PARAM_UNSUPPORTED, }; static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = { @@ -1056,6 +1048,7 @@ static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = { .inc_tsf = WMI_10_4_VDEV_PARAM_TSF_INCREMENT, .dec_tsf = WMI_10_4_VDEV_PARAM_TSF_DECREMENT, .disable_4addr_src_lrn = WMI_10_4_VDEV_PARAM_DISABLE_4_ADDR_SRC_LRN, + .rtt_responder_role = WMI_10_4_VDEV_PARAM_ENABLE_DISABLE_RTT_RESPONDER_ROLE, }; static struct wmi_pdev_param_map wmi_pdev_param_map = { @@ -1606,6 +1599,30 @@ static struct wmi_pdev_param_map wmi_10_4_pdev_param_map = { .enable_btcoex = WMI_10_4_PDEV_PARAM_ENABLE_BTCOEX, }; +static const u8 wmi_key_cipher_suites[] = { + [WMI_CIPHER_NONE] = WMI_CIPHER_NONE, + [WMI_CIPHER_WEP] = WMI_CIPHER_WEP, + [WMI_CIPHER_TKIP] = WMI_CIPHER_TKIP, + [WMI_CIPHER_AES_OCB] = WMI_CIPHER_AES_OCB, + [WMI_CIPHER_AES_CCM] = WMI_CIPHER_AES_CCM, + [WMI_CIPHER_WAPI] = WMI_CIPHER_WAPI, + [WMI_CIPHER_CKIP] = WMI_CIPHER_CKIP, + [WMI_CIPHER_AES_CMAC] = WMI_CIPHER_AES_CMAC, + [WMI_CIPHER_AES_GCM] = WMI_CIPHER_AES_GCM, +}; + +static const u8 wmi_tlv_key_cipher_suites[] = { + [WMI_CIPHER_NONE] = WMI_TLV_CIPHER_NONE, + [WMI_CIPHER_WEP] = WMI_TLV_CIPHER_WEP, + [WMI_CIPHER_TKIP] = WMI_TLV_CIPHER_TKIP, + [WMI_CIPHER_AES_OCB] = WMI_TLV_CIPHER_AES_OCB, + [WMI_CIPHER_AES_CCM] = WMI_TLV_CIPHER_AES_CCM, + [WMI_CIPHER_WAPI] = WMI_TLV_CIPHER_WAPI, + [WMI_CIPHER_CKIP] = WMI_TLV_CIPHER_CKIP, + [WMI_CIPHER_AES_CMAC] = WMI_TLV_CIPHER_AES_CMAC, + [WMI_CIPHER_AES_GCM] = WMI_TLV_CIPHER_AES_GCM, +}; + static const struct wmi_peer_flags_map wmi_peer_flags_map = { .auth = WMI_PEER_AUTH, .qos = WMI_PEER_QOS, @@ -2325,8 +2342,8 @@ static bool ath10k_wmi_rx_is_decrypted(struct ath10k *ar, return true; } -static int wmi_process_mgmt_tx_comp(struct ath10k *ar, u32 desc_id, - u32 status) +static int +wmi_process_mgmt_tx_comp(struct ath10k *ar, struct mgmt_tx_compl_params *param) { struct ath10k_mgmt_tx_pkt_addr *pkt_addr; struct ath10k_wmi *wmi = &ar->wmi; @@ -2336,30 +2353,34 @@ static int wmi_process_mgmt_tx_comp(struct ath10k *ar, u32 desc_id, spin_lock_bh(&ar->data_lock); - pkt_addr = idr_find(&wmi->mgmt_pending_tx, desc_id); + pkt_addr = idr_find(&wmi->mgmt_pending_tx, param->desc_id); if (!pkt_addr) { ath10k_warn(ar, "received mgmt tx completion for invalid msdu_id: %d\n", - desc_id); + param->desc_id); ret = -ENOENT; goto out; } msdu = pkt_addr->vaddr; dma_unmap_single(ar->dev, pkt_addr->paddr, - msdu->len, DMA_FROM_DEVICE); + msdu->len, DMA_TO_DEVICE); info = IEEE80211_SKB_CB(msdu); - if (status) + if (param->status) { info->flags &= ~IEEE80211_TX_STAT_ACK; - else + } else { info->flags |= IEEE80211_TX_STAT_ACK; + info->status.ack_signal = ATH10K_DEFAULT_NOISE_FLOOR + + param->ack_rssi; + info->status.is_valid_ack_signal = true; + } ieee80211_tx_status_irqsafe(ar->hw, msdu); ret = 0; out: - idr_remove(&wmi->mgmt_pending_tx, desc_id); + idr_remove(&wmi->mgmt_pending_tx, param->desc_id); spin_unlock_bh(&ar->data_lock); return ret; } @@ -2367,6 +2388,7 @@ out: int ath10k_wmi_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb) { struct wmi_tlv_mgmt_tx_compl_ev_arg arg; + struct mgmt_tx_compl_params param; int ret; ret = ath10k_wmi_pull_mgmt_tx_compl(ar, skb, &arg); @@ -2375,14 +2397,50 @@ int ath10k_wmi_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb) return ret; } - wmi_process_mgmt_tx_comp(ar, __le32_to_cpu(arg.desc_id), - __le32_to_cpu(arg.status)); + memset(¶m, 0, sizeof(struct mgmt_tx_compl_params)); + param.desc_id = __le32_to_cpu(arg.desc_id); + param.status = __le32_to_cpu(arg.status); + + if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map)) + param.ack_rssi = __le32_to_cpu(arg.ack_rssi); + + wmi_process_mgmt_tx_comp(ar, ¶m); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv evnt mgmt tx completion\n"); return 0; } +int ath10k_wmi_event_mgmt_tx_bundle_compl(struct ath10k *ar, struct sk_buff *skb) +{ + struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg arg; + struct mgmt_tx_compl_params param; + u32 num_reports; + int i, ret; + + ret = ath10k_wmi_pull_mgmt_tx_bundle_compl(ar, skb, &arg); + if (ret) { + ath10k_warn(ar, "failed to parse bundle mgmt compl event: %d\n", ret); + return ret; + } + + num_reports = __le32_to_cpu(arg.num_reports); + + for (i = 0; i < num_reports; i++) { + memset(¶m, 0, sizeof(struct mgmt_tx_compl_params)); + param.desc_id = __le32_to_cpu(arg.desc_ids[i]); + param.status = __le32_to_cpu(arg.desc_ids[i]); + + if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map)) + param.ack_rssi = __le32_to_cpu(arg.ack_rssi[i]); + wmi_process_mgmt_tx_comp(ar, ¶m); + } + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv event bundle mgmt tx completion\n"); + + return 0; +} + int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb) { struct wmi_mgmt_rx_ev_arg arg = {}; @@ -6239,6 +6297,25 @@ int ath10k_wmi_connect(struct ath10k *ar) return 0; } +static struct sk_buff * +ath10k_wmi_op_gen_pdev_set_base_macaddr(struct ath10k *ar, + const u8 macaddr[ETH_ALEN]) +{ + struct wmi_pdev_set_base_macaddr_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_pdev_set_base_macaddr_cmd *)skb->data; + ether_addr_copy(cmd->mac_addr.addr, macaddr); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi pdev basemac %pM\n", macaddr); + return skb; +} + static struct sk_buff * ath10k_wmi_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g, u16 ctl2g, u16 ctl5g, @@ -8245,7 +8322,7 @@ ath10k_wmi_fw_peer_stats_fill(const struct ath10k_fw_stats_peer *peer, "Peer TX rate", peer->peer_tx_rate); len += scnprintf(buf + len, buf_len - len, "%30s %u\n", "Peer RX rate", peer->peer_rx_rate); - len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + len += scnprintf(buf + len, buf_len - len, "%30s %llu\n", "Peer RX duration", peer->rx_duration); len += scnprintf(buf + len, buf_len - len, "\n"); @@ -9048,6 +9125,7 @@ static const struct wmi_ops wmi_10_2_ops = { .gen_peer_create = ath10k_wmi_op_gen_peer_create, .gen_peer_delete = ath10k_wmi_op_gen_peer_delete, .gen_peer_flush = ath10k_wmi_op_gen_peer_flush, + .gen_pdev_set_base_macaddr = ath10k_wmi_op_gen_pdev_set_base_macaddr, .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param, .gen_set_psmode = ath10k_wmi_op_gen_set_psmode, .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps, @@ -9166,6 +9244,7 @@ static const struct wmi_ops wmi_10_4_ops = { .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend, .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume, + .gen_pdev_set_base_macaddr = ath10k_wmi_op_gen_pdev_set_base_macaddr, .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd, .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param, .gen_init = ath10k_wmi_10_4_op_gen_init, @@ -9229,6 +9308,7 @@ int ath10k_wmi_attach(struct ath10k *ar) ar->wmi.vdev_param = &wmi_10_4_vdev_param_map; ar->wmi.pdev_param = &wmi_10_4_pdev_param_map; ar->wmi.peer_flags = &wmi_10_2_peer_flags_map; + ar->wmi_key_cipher = wmi_key_cipher_suites; break; case ATH10K_FW_WMI_OP_VERSION_10_2_4: ar->wmi.cmd = &wmi_10_2_4_cmd_map; @@ -9236,6 +9316,7 @@ int ath10k_wmi_attach(struct ath10k *ar) ar->wmi.vdev_param = &wmi_10_2_4_vdev_param_map; ar->wmi.pdev_param = &wmi_10_2_4_pdev_param_map; ar->wmi.peer_flags = &wmi_10_2_peer_flags_map; + ar->wmi_key_cipher = wmi_key_cipher_suites; break; case ATH10K_FW_WMI_OP_VERSION_10_2: ar->wmi.cmd = &wmi_10_2_cmd_map; @@ -9243,6 +9324,7 @@ int ath10k_wmi_attach(struct ath10k *ar) ar->wmi.vdev_param = &wmi_10x_vdev_param_map; ar->wmi.pdev_param = &wmi_10x_pdev_param_map; ar->wmi.peer_flags = &wmi_10_2_peer_flags_map; + ar->wmi_key_cipher = wmi_key_cipher_suites; break; case ATH10K_FW_WMI_OP_VERSION_10_1: ar->wmi.cmd = &wmi_10x_cmd_map; @@ -9250,6 +9332,7 @@ int ath10k_wmi_attach(struct ath10k *ar) ar->wmi.vdev_param = &wmi_10x_vdev_param_map; ar->wmi.pdev_param = &wmi_10x_pdev_param_map; ar->wmi.peer_flags = &wmi_10x_peer_flags_map; + ar->wmi_key_cipher = wmi_key_cipher_suites; break; case ATH10K_FW_WMI_OP_VERSION_MAIN: ar->wmi.cmd = &wmi_cmd_map; @@ -9257,9 +9340,11 @@ int ath10k_wmi_attach(struct ath10k *ar) ar->wmi.vdev_param = &wmi_vdev_param_map; ar->wmi.pdev_param = &wmi_pdev_param_map; ar->wmi.peer_flags = &wmi_peer_flags_map; + ar->wmi_key_cipher = wmi_key_cipher_suites; break; case ATH10K_FW_WMI_OP_VERSION_TLV: ath10k_wmi_tlv_attach(ar); + ar->wmi_key_cipher = wmi_tlv_key_cipher_suites; break; case ATH10K_FW_WMI_OP_VERSION_UNSET: case ATH10K_FW_WMI_OP_VERSION_MAX: diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 2034ccc7cc72..e1c40bb69932 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -1,26 +1,15 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * Copyright (c) 2018, The Linux Foundation. All rights reserved. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _WMI_H_ #define _WMI_H_ #include -#include +#include /* * This file specifies the WMI interface for the Unified Software @@ -208,6 +197,11 @@ enum wmi_service { WMI_SERVICE_VDEV_DISABLE_4_ADDR_SRC_LRN_SUPPORT, WMI_SERVICE_BB_TIMING_CONFIG_SUPPORT, WMI_SERVICE_THERM_THROT, + WMI_SERVICE_RTT_RESPONDER_ROLE, + WMI_SERVICE_PER_PACKET_SW_ENCRYPT, + WMI_SERVICE_REPORT_AIRTIME, + + /* Remember to add the new value to wmi_service_name()! */ /* keep last */ WMI_SERVICE_MAX, @@ -368,9 +362,14 @@ enum wmi_10_4_service { WMI_10_4_SERVICE_HTT_ASSERT_TRIGGER_SUPPORT, WMI_10_4_SERVICE_VDEV_FILTER_NEIGHBOR_RX_PACKETS, WMI_10_4_SERVICE_VDEV_DISABLE_4_ADDR_SRC_LRN_SUPPORT, + WMI_10_4_SERVICE_PEER_CHWIDTH_CHANGE, + WMI_10_4_SERVICE_RX_FILTER_OUT_COUNT, + WMI_10_4_SERVICE_RTT_RESPONDER_ROLE, + WMI_10_4_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT, + WMI_10_4_SERVICE_REPORT_AIRTIME, }; -static inline char *wmi_service_name(int service_id) +static inline char *wmi_service_name(enum wmi_service service_id) { #define SVCSTR(x) case x: return #x @@ -467,6 +466,7 @@ static inline char *wmi_service_name(int service_id) SVCSTR(WMI_SERVICE_TX_MODE_PUSH_PULL); SVCSTR(WMI_SERVICE_TX_MODE_DYNAMIC); SVCSTR(WMI_SERVICE_VDEV_RX_FILTER); + SVCSTR(WMI_SERVICE_BTCOEX); SVCSTR(WMI_SERVICE_CHECK_CAL_VERSION); SVCSTR(WMI_SERVICE_DBGLOG_WARN2); SVCSTR(WMI_SERVICE_BTCOEX_DUTY_CYCLE); @@ -476,18 +476,29 @@ static inline char *wmi_service_name(int service_id) SVCSTR(WMI_SERVICE_SMART_LOGGING_SUPPORT); SVCSTR(WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE); SVCSTR(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY); + SVCSTR(WMI_SERVICE_MGMT_TX_WMI); SVCSTR(WMI_SERVICE_TDLS_WIDER_BANDWIDTH); SVCSTR(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS); SVCSTR(WMI_SERVICE_HOST_DFS_CHECK_SUPPORT); SVCSTR(WMI_SERVICE_TPC_STATS_FINAL); SVCSTR(WMI_SERVICE_RESET_CHIP); + SVCSTR(WMI_SERVICE_SPOOF_MAC_SUPPORT); SVCSTR(WMI_SERVICE_TX_DATA_ACK_RSSI); SVCSTR(WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT); - default: + SVCSTR(WMI_SERVICE_VDEV_DISABLE_4_ADDR_SRC_LRN_SUPPORT); + SVCSTR(WMI_SERVICE_BB_TIMING_CONFIG_SUPPORT); + SVCSTR(WMI_SERVICE_THERM_THROT); + SVCSTR(WMI_SERVICE_RTT_RESPONDER_ROLE); + SVCSTR(WMI_SERVICE_PER_PACKET_SW_ENCRYPT); + SVCSTR(WMI_SERVICE_REPORT_AIRTIME); + + case WMI_SERVICE_MAX: return NULL; } #undef SVCSTR + + return NULL; } #define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \ @@ -579,6 +590,8 @@ static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out, WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, len); SVCMAP(WMI_10X_SERVICE_BB_TIMING_CONFIG_SUPPORT, WMI_SERVICE_BB_TIMING_CONFIG_SUPPORT, len); + SVCMAP(WMI_10X_SERVICE_PER_PACKET_SW_ENCRYPT, + WMI_SERVICE_PER_PACKET_SW_ENCRYPT, len); } static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out, @@ -799,6 +812,12 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out, WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT, len); SVCMAP(WMI_10_4_SERVICE_VDEV_DISABLE_4_ADDR_SRC_LRN_SUPPORT, WMI_SERVICE_VDEV_DISABLE_4_ADDR_SRC_LRN_SUPPORT, len); + SVCMAP(WMI_10_4_SERVICE_RTT_RESPONDER_ROLE, + WMI_SERVICE_RTT_RESPONDER_ROLE, len); + SVCMAP(WMI_10_4_SERVICE_PER_PACKET_SW_ENCRYPT, + WMI_SERVICE_PER_PACKET_SW_ENCRYPT, len); + SVCMAP(WMI_10_4_SERVICE_REPORT_AIRTIME, + WMI_SERVICE_REPORT_AIRTIME, len); } #undef SVCMAP @@ -1986,7 +2005,7 @@ static inline const char *ath10k_wmi_phymode_str(enum wmi_phy_mode mode) /* no default handler to allow compiler to check that the * enum is fully handled */ - }; + } return ""; } @@ -2075,6 +2094,8 @@ enum wmi_channel_change_cause { #define WMI_HT_CAP_MPDU_DENSITY 0x0700 /* MPDU Density */ #define WMI_HT_CAP_MPDU_DENSITY_MASK_SHIFT 8 #define WMI_HT_CAP_HT40_SGI 0x0800 +#define WMI_HT_CAP_RX_LDPC 0x1000 /* LDPC RX support */ +#define WMI_HT_CAP_TX_LDPC 0x2000 /* LDPC TX support */ #define WMI_HT_CAP_DEFAULT_ALL (WMI_HT_CAP_ENABLED | \ WMI_HT_CAP_HT20_SGI | \ @@ -2972,6 +2993,8 @@ enum wmi_10_4_feature_mask { WMI_10_4_TDLS_CONN_TRACKER_IN_HOST_MODE = BIT(11), WMI_10_4_TDLS_EXPLICIT_MODE_ONLY = BIT(12), WMI_10_4_TX_DATA_ACK_RSSI = BIT(16), + WMI_10_4_EXT_PEER_TID_CONFIGS_SUPPORT = BIT(17), + WMI_10_4_REPORT_AIRTIME = BIT(18), }; @@ -4083,6 +4106,10 @@ struct wmi_pdev_set_param_cmd { __le32 param_value; } __packed; +struct wmi_pdev_set_base_macaddr_cmd { + struct wmi_mac_addr mac_addr; +} __packed; + /* valid period is 1 ~ 60000ms, unit in millisecond */ #define WMI_PDEV_PARAM_CAL_PERIOD_MAX 60000 @@ -4507,6 +4534,13 @@ enum wmi_10_4_stats_id { WMI_10_4_STAT_VDEV_EXTD = BIT(4), }; +enum wmi_tlv_stats_id { + WMI_TLV_STAT_PDEV = BIT(0), + WMI_TLV_STAT_VDEV = BIT(1), + WMI_TLV_STAT_PEER = BIT(2), + WMI_TLV_STAT_PEER_EXTD = BIT(10), +}; + struct wlan_inst_rssi_args { __le16 cfg_retry_count; __le16 retry_count; @@ -4929,15 +4963,30 @@ struct wmi_key_seq_counter { __le32 key_seq_counter_h; } __packed; -#define WMI_CIPHER_NONE 0x0 /* clear key */ -#define WMI_CIPHER_WEP 0x1 -#define WMI_CIPHER_TKIP 0x2 -#define WMI_CIPHER_AES_OCB 0x3 -#define WMI_CIPHER_AES_CCM 0x4 -#define WMI_CIPHER_WAPI 0x5 -#define WMI_CIPHER_CKIP 0x6 -#define WMI_CIPHER_AES_CMAC 0x7 -#define WMI_CIPHER_AES_GCM 0x8 +enum wmi_cipher_suites { + WMI_CIPHER_NONE, + WMI_CIPHER_WEP, + WMI_CIPHER_TKIP, + WMI_CIPHER_AES_OCB, + WMI_CIPHER_AES_CCM, + WMI_CIPHER_WAPI, + WMI_CIPHER_CKIP, + WMI_CIPHER_AES_CMAC, + WMI_CIPHER_AES_GCM, +}; + +enum wmi_tlv_cipher_suites { + WMI_TLV_CIPHER_NONE, + WMI_TLV_CIPHER_WEP, + WMI_TLV_CIPHER_TKIP, + WMI_TLV_CIPHER_AES_OCB, + WMI_TLV_CIPHER_AES_CCM, + WMI_TLV_CIPHER_WAPI, + WMI_TLV_CIPHER_CKIP, + WMI_TLV_CIPHER_AES_CMAC, + WMI_TLV_CIPHER_ANY, + WMI_TLV_CIPHER_AES_GCM, +}; struct wmi_vdev_install_key_cmd { __le32 vdev_id; @@ -5003,12 +5052,13 @@ enum wmi_rate_preamble { #define ATH10K_FW_SKIPPED_RATE_CTRL(flags) (((flags) >> 6) & 0x1) #define ATH10K_VHT_MCS_NUM 10 -#define ATH10K_BW_NUM 4 +#define ATH10K_BW_NUM 6 #define ATH10K_NSS_NUM 4 #define ATH10K_LEGACY_NUM 12 #define ATH10K_GI_NUM 2 #define ATH10K_HT_MCS_NUM 32 #define ATH10K_RATE_TABLE_NUM 320 +#define ATH10K_RATE_INFO_FLAGS_SGI_BIT 2 /* Value to disable fixed rate setting */ #define WMI_FIXED_RATE_NONE (0xff) @@ -5083,6 +5133,7 @@ struct wmi_vdev_param_map { u32 inc_tsf; u32 dec_tsf; u32 disable_4addr_src_lrn; + u32 rtt_responder_role; }; #define WMI_VDEV_PARAM_UNSUPPORTED 0 @@ -6682,10 +6733,27 @@ struct wmi_scan_ev_arg { __le32 vdev_id; }; +struct mgmt_tx_compl_params { + u32 desc_id; + u32 status; + u32 ppdu_id; + int ack_rssi; +}; + struct wmi_tlv_mgmt_tx_compl_ev_arg { __le32 desc_id; __le32 status; __le32 pdev_id; + __le32 ppdu_id; + __le32 ack_rssi; +}; + +struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg { + __le32 num_reports; + const __le32 *desc_ids; + const __le32 *status; + const __le32 *ppdu_ids; + const __le32 *ack_rssi; }; struct wmi_mgmt_rx_ev_arg { @@ -7244,6 +7312,7 @@ int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg); int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb); int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb); int ath10k_wmi_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb); +int ath10k_wmi_event_mgmt_tx_bundle_compl(struct ath10k *ar, struct sk_buff *skb); void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb); void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb); int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb); diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c index 36d4245c308e..8c26adddd034 100644 --- a/drivers/net/wireless/ath/ath10k/wow.c +++ b/drivers/net/wireless/ath/ath10k/wow.c @@ -1,18 +1,7 @@ +// SPDX-License-Identifier: ISC /* * Copyright (c) 2015-2017 Qualcomm Atheros, Inc. * Copyright (c) 2018, The Linux Foundation. All rights reserved. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "mac.h" @@ -77,7 +66,7 @@ static int ath10k_wow_cleanup(struct ath10k *ar) return 0; } -/** +/* * Convert a 802.3 format to a 802.11 format. * +------------+-----------+--------+----------------+ * 802.3: |dest mac(6B)|src mac(6B)|type(2B)| body... | @@ -88,9 +77,8 @@ static int ath10k_wow_cleanup(struct ath10k *ar) * 802.11: |4B|dest mac(6B)| 6B |src mac(6B)| 8B |type(2B)| body... | * +--+------------+----+-----------+---------------+-----------+ */ -static void ath10k_wow_convert_8023_to_80211 - (struct cfg80211_pkt_pattern *new, - const struct cfg80211_pkt_pattern *old) +static void ath10k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new, + const struct cfg80211_pkt_pattern *old) { u8 hdr_8023_pattern[ETH_HLEN] = {}; u8 hdr_8023_bit_mask[ETH_HLEN] = {}; diff --git a/drivers/net/wireless/ath/ath10k/wow.h b/drivers/net/wireless/ath/ath10k/wow.h index 6e810105b775..14ea4e1e925e 100644 --- a/drivers/net/wireless/ath/ath10k/wow.h +++ b/drivers/net/wireless/ath/ath10k/wow.h @@ -1,17 +1,6 @@ +/* SPDX-License-Identifier: ISC */ /* * Copyright (c) 2015,2017 Qualcomm Atheros, Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifndef _WOW_H_ #define _WOW_H_ diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c index 54132af70094..aa1c71a76ef7 100644 --- a/drivers/net/wireless/ath/ath6kl/init.c +++ b/drivers/net/wireless/ath/ath6kl/init.c @@ -1140,7 +1140,7 @@ static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name) len -= ie_len; data += ie_len; - }; + } ret = 0; out: diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c index 9d7ac1ab2d02..68854c45d0a4 100644 --- a/drivers/net/wireless/ath/ath6kl/wmi.c +++ b/drivers/net/wireless/ath/ath6kl/wmi.c @@ -776,10 +776,8 @@ int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi) cmd->info.params.roam_rssi_floor = DEF_LRSSI_ROAM_FLOOR; cmd->roam_ctrl = WMI_SET_LRSSI_SCAN_PARAMS; - ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID, + return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID, NO_SYNC_WMIFLAG); - - return 0; } int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid) diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index f019a20e5a1f..2b29bf4730f6 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -3457,9 +3457,9 @@ static u32 ar9003_dump_cal_data(struct ath_hw *ah, char *buf, u32 len, u32 size, if (!((pBase->txrxMask >> i) & 1)) continue; - len += snprintf(buf + len, size - len, "Chain %d\n", i); + len += scnprintf(buf + len, size - len, "Chain %d\n", i); - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "Freq\t ref\tvolt\ttemp\tnf_cal\tnf_pow\trx_temp\n"); for (j = 0; j < cal_pier_nr; j++) { @@ -3471,10 +3471,10 @@ static u32 ar9003_dump_cal_data(struct ath_hw *ah, char *buf, u32 len, u32 size, freq = 4800 + eep->calFreqPier5G[j] * 5; } - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "%d\t", freq); - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "%d\t%d\t%d\t%d\t%d\t%d\n", cal_pier->refPower, cal_pier->voltMeas, @@ -3505,12 +3505,12 @@ static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, len += scnprintf(buf + len, size - len, "Calibration data\n"); len = ar9003_dump_cal_data(ah, buf, len, size, true); - len += snprintf(buf + len, size - len, + len += scnprintf(buf + len, size - len, "%20s :\n", "5GHz modal Header"); len = ar9003_dump_modal_eeprom(buf, len, size, &eep->modalHeader5G); - len += snprintf(buf + len, size - len, "Calibration data\n"); + len += scnprintf(buf + len, size - len, "Calibration data\n"); len = ar9003_dump_cal_data(ah, buf, len, size, false); goto out; diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 0fca44e91a71..a412b352182c 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -112,8 +112,6 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd, #define ATH_TXFIFO_DEPTH 8 #define ATH_TX_ERROR 0x01 -#define ATH_AIRTIME_QUANTUM 300 /* usec */ - /* Stop tx traffic 1ms before the GO goes away */ #define ATH_P2P_PS_STOP_TIME 1000 @@ -246,10 +244,8 @@ struct ath_atx_tid { s8 bar_index; bool active; bool clear_ps_filter; - bool has_queued; }; -void __ath_tx_queue_tid(struct ath_softc *sc, struct ath_atx_tid *tid); void ath_tx_queue_tid(struct ath_softc *sc, struct ath_atx_tid *tid); struct ath_node { @@ -263,12 +259,9 @@ struct ath_node { bool sleeping; bool no_ps_filter; - s64 airtime_deficit[IEEE80211_NUM_ACS]; - u32 airtime_rx_start; #ifdef CONFIG_ATH9K_STATION_STATISTICS struct ath_rx_rate_stats rx_rate_stats; - struct ath_airtime_stats airtime_stats; #endif u8 key_idx[4]; @@ -986,11 +979,6 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs); #define ATH9K_NUM_CHANCTX 2 /* supports 2 operating channels */ -#define AIRTIME_USE_TX BIT(0) -#define AIRTIME_USE_RX BIT(1) -#define AIRTIME_USE_NEW_QUEUES BIT(2) -#define AIRTIME_ACTIVE(flags) (!!(flags & (AIRTIME_USE_TX|AIRTIME_USE_RX))) - struct ath_softc { struct ieee80211_hw *hw; struct device *dev; @@ -1034,8 +1022,6 @@ struct ath_softc { short nbcnvifs; unsigned long ps_usecount; - u16 airtime_flags; /* AIRTIME_* */ - struct ath_rx rx; struct ath_tx tx; struct ath_beacon beacon; diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c index 6aa3ec024ffa..21191955a7c1 100644 --- a/drivers/net/wireless/ath/ath9k/common-spectral.c +++ b/drivers/net/wireless/ath/ath9k/common-spectral.c @@ -1039,6 +1039,9 @@ static struct dentry *create_buf_file_handler(const char *filename, buf_file = debugfs_create_file(filename, mode, parent, buf, &relay_file_operations); + if (IS_ERR(buf_file)) + return NULL; + *is_global = 1; return buf_file; } diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index 4399e9ad058f..26ea51a72156 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -148,7 +148,7 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf, { "OFDM LEVEL", ah->ani.ofdmNoiseImmunityLevel }, { "CCK LEVEL", ah->ani.cckNoiseImmunityLevel }, { "SPUR UP", ah->stats.ast_ani_spurup }, - { "SPUR DOWN", ah->stats.ast_ani_spurup }, + { "SPUR DOWN", ah->stats.ast_ani_spurdown }, { "OFDM WS-DET ON", ah->stats.ast_ani_ofdmon }, { "OFDM WS-DET OFF", ah->stats.ast_ani_ofdmoff }, { "MRC-CCK ON", ah->stats.ast_ani_ccklow }, @@ -1443,9 +1443,6 @@ int ath9k_init_debug(struct ath_hw *ah) #endif debugfs_create_file("tpc", 0600, sc->debug.debugfs_phy, sc, &fops_tpc); - debugfs_create_u16("airtime_flags", 0600, - sc->debug.debugfs_phy, &sc->airtime_flags); - debugfs_create_file("nf_override", 0600, sc->debug.debugfs_phy, sc, &fops_nf_override); diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h index 79607db14387..33826aa13687 100644 --- a/drivers/net/wireless/ath/ath9k/debug.h +++ b/drivers/net/wireless/ath/ath9k/debug.h @@ -319,20 +319,12 @@ ath9k_debug_sync_cause(struct ath_softc *sc, u32 sync_cause) void ath_debug_rate_stats(struct ath_softc *sc, struct ath_rx_status *rs, struct sk_buff *skb); -void ath_debug_airtime(struct ath_softc *sc, - struct ath_node *an, - u32 rx, u32 tx); #else static inline void ath_debug_rate_stats(struct ath_softc *sc, struct ath_rx_status *rs, struct sk_buff *skb) { } -static inline void ath_debug_airtime(struct ath_softc *sc, - struct ath_node *an, - u32 rx, u32 tx) -{ -} #endif /* CONFIG_ATH9K_STATION_STATISTICS */ #endif /* DEBUG_H */ diff --git a/drivers/net/wireless/ath/ath9k/debug_sta.c b/drivers/net/wireless/ath/ath9k/debug_sta.c index e8fcd3e1c470..d95cabddce33 100644 --- a/drivers/net/wireless/ath/ath9k/debug_sta.c +++ b/drivers/net/wireless/ath/ath9k/debug_sta.c @@ -242,75 +242,6 @@ static const struct file_operations fops_node_recv = { .llseek = default_llseek, }; -void ath_debug_airtime(struct ath_softc *sc, - struct ath_node *an, - u32 rx, - u32 tx) -{ - struct ath_airtime_stats *astats = &an->airtime_stats; - - astats->rx_airtime += rx; - astats->tx_airtime += tx; -} - -static ssize_t read_airtime(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ath_node *an = file->private_data; - struct ath_airtime_stats *astats; - static const char *qname[4] = { - "VO", "VI", "BE", "BK" - }; - u32 len = 0, size = 256; - char *buf; - size_t retval; - int i; - - buf = kzalloc(size, GFP_KERNEL); - if (buf == NULL) - return -ENOMEM; - - astats = &an->airtime_stats; - - len += scnprintf(buf + len, size - len, "RX: %u us\n", astats->rx_airtime); - len += scnprintf(buf + len, size - len, "TX: %u us\n", astats->tx_airtime); - len += scnprintf(buf + len, size - len, "Deficit: "); - for (i = 0; i < 4; i++) - len += scnprintf(buf+len, size - len, "%s: %lld us ", qname[i], an->airtime_deficit[i]); - if (len < size) - buf[len++] = '\n'; - - retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); - kfree(buf); - - return retval; -} - -static ssize_t -write_airtime_reset_stub(struct file *file, const char __user *ubuf, - size_t count, loff_t *ppos) -{ - struct ath_node *an = file->private_data; - struct ath_airtime_stats *astats; - int i; - - astats = &an->airtime_stats; - astats->rx_airtime = 0; - astats->tx_airtime = 0; - for (i = 0; i < 4; i++) - an->airtime_deficit[i] = ATH_AIRTIME_QUANTUM; - return count; -} - -static const struct file_operations fops_airtime = { - .read = read_airtime, - .write = write_airtime_reset_stub, - .open = simple_open, - .owner = THIS_MODULE, - .llseek = default_llseek, -}; - - void ath9k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -320,5 +251,4 @@ void ath9k_sta_add_debugfs(struct ieee80211_hw *hw, debugfs_create_file("node_aggr", 0444, dir, an, &fops_node_aggr); debugfs_create_file("node_recv", 0444, dir, an, &fops_node_recv); - debugfs_create_file("airtime", 0644, dir, an, &fops_airtime); } diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index 799010ed04e0..4e8e80ac8341 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c @@ -574,12 +574,12 @@ void ath9k_tx_failed_tasklet(unsigned long data) { struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data; - spin_lock_bh(&priv->tx.tx_lock); + spin_lock(&priv->tx.tx_lock); if (priv->tx.flags & ATH9K_HTC_OP_TX_DRAIN) { - spin_unlock_bh(&priv->tx.tx_lock); + spin_unlock(&priv->tx.tx_lock); return; } - spin_unlock_bh(&priv->tx.tx_lock); + spin_unlock(&priv->tx.tx_lock); ath9k_htc_tx_drainq(priv, &priv->tx.tx_failed); } diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index c070a9e51ebf..98141b699c88 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -636,15 +636,15 @@ static int ath9k_of_init(struct ath_softc *sc) ret = ath9k_eeprom_request(sc, eeprom_name); if (ret) return ret; + + ah->ah_flags &= ~AH_USE_EEPROM; + ah->ah_flags |= AH_NO_EEP_SWAP; } mac = of_get_mac_address(np); if (mac) ether_addr_copy(common->macaddr, mac); - ah->ah_flags &= ~AH_USE_EEPROM; - ah->ah_flags |= AH_NO_EEP_SWAP; - return 0; } @@ -676,8 +676,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, /* Will be cleared in ath9k_start() */ set_bit(ATH_OP_INVALID, &common->op_flags); - sc->airtime_flags = (AIRTIME_USE_TX | AIRTIME_USE_RX | - AIRTIME_USE_NEW_QUEUES); sc->sc_ah = ah; sc->dfs_detector = dfs_pattern_detector_init(common, NL80211_DFS_UNSET); @@ -1013,6 +1011,7 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) SET_IEEE80211_PERM_ADDR(hw, common->macaddr); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); + wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS); } int ath9k_init_device(u16 devid, struct ath_softc *sc, diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 30d1bd832d90..4e97f7f3b2a3 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -1006,9 +1006,6 @@ static void ath_rx_count_airtime(struct ath_softc *sc, struct ath_rx_status *rs, struct sk_buff *skb) { - struct ath_node *an; - struct ath_acq *acq; - struct ath_vif *avp; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); @@ -1019,7 +1016,7 @@ static void ath_rx_count_airtime(struct ath_softc *sc, int phy; u16 len = rs->rs_datalen; u32 airtime = 0; - u8 tidno, acno; + u8 tidno; if (!ieee80211_is_data(hdr->frame_control)) return; @@ -1029,11 +1026,7 @@ static void ath_rx_count_airtime(struct ath_softc *sc, sta = ieee80211_find_sta_by_ifaddr(sc->hw, hdr->addr2, NULL); if (!sta) goto exit; - an = (struct ath_node *) sta->drv_priv; - avp = (struct ath_vif *) an->vif->drv_priv; tidno = skb->priority & IEEE80211_QOS_CTL_TID_MASK; - acno = TID_TO_WME_AC(tidno); - acq = &avp->chanctx->acq[acno]; rxs = IEEE80211_SKB_RXCB(skb); @@ -1054,14 +1047,7 @@ static void ath_rx_count_airtime(struct ath_softc *sc, len, rxs->rate_idx, is_sp); } - if (!!(sc->airtime_flags & AIRTIME_USE_RX)) { - spin_lock_bh(&acq->lock); - an->airtime_deficit[acno] -= airtime; - if (an->airtime_deficit[acno] <= 0) - __ath_tx_queue_tid(sc, ATH_AN_2_TID(an, tidno)); - spin_unlock_bh(&acq->lock); - } - ath_debug_airtime(sc, an, airtime, 0); + ieee80211_sta_register_airtime(sta, tidno, 0, airtime); exit: rcu_read_unlock(); } diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index f448d5716639..773d428ff1b0 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -113,44 +113,14 @@ void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq) ath_tx_status(hw, skb); } -void __ath_tx_queue_tid(struct ath_softc *sc, struct ath_atx_tid *tid) -{ - struct ath_vif *avp = (struct ath_vif *) tid->an->vif->drv_priv; - struct ath_chanctx *ctx = avp->chanctx; - struct ath_acq *acq; - struct list_head *tid_list; - u8 acno = TID_TO_WME_AC(tid->tidno); - - if (!ctx || !list_empty(&tid->list)) - return; - - - acq = &ctx->acq[acno]; - if ((sc->airtime_flags & AIRTIME_USE_NEW_QUEUES) && - tid->an->airtime_deficit[acno] > 0) - tid_list = &acq->acq_new; - else - tid_list = &acq->acq_old; - - list_add_tail(&tid->list, tid_list); -} - void ath_tx_queue_tid(struct ath_softc *sc, struct ath_atx_tid *tid) { - struct ath_vif *avp = (struct ath_vif *) tid->an->vif->drv_priv; - struct ath_chanctx *ctx = avp->chanctx; - struct ath_acq *acq; + struct ieee80211_txq *queue = + container_of((void *)tid, struct ieee80211_txq, drv_priv); - if (!ctx || !list_empty(&tid->list)) - return; - - acq = &ctx->acq[TID_TO_WME_AC(tid->tidno)]; - spin_lock_bh(&acq->lock); - __ath_tx_queue_tid(sc, tid); - spin_unlock_bh(&acq->lock); + ieee80211_schedule_txq(sc->hw, queue); } - void ath9k_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *queue) { struct ath_softc *sc = hw->priv; @@ -163,11 +133,7 @@ void ath9k_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *queue) tid->tidno); ath_txq_lock(sc, txq); - - tid->has_queued = true; - ath_tx_queue_tid(sc, tid); ath_txq_schedule(sc, txq); - ath_txq_unlock(sc, txq); } @@ -217,8 +183,8 @@ ath_get_skb_tid(struct ath_softc *sc, struct ath_node *an, struct sk_buff *skb) return ATH_AN_2_TID(an, tidno); } -static struct sk_buff * -ath_tid_pull(struct ath_atx_tid *tid) +static int +ath_tid_pull(struct ath_atx_tid *tid, struct sk_buff **skbuf) { struct ieee80211_txq *txq = container_of((void*)tid, struct ieee80211_txq, drv_priv); struct ath_softc *sc = tid->an->sc; @@ -229,20 +195,16 @@ ath_tid_pull(struct ath_atx_tid *tid) }; struct sk_buff *skb; struct ath_frame_info *fi; - int q; - - if (!tid->has_queued) - return NULL; + int q, ret; skb = ieee80211_tx_dequeue(hw, txq); - if (!skb) { - tid->has_queued = false; - return NULL; - } + if (!skb) + return -ENOENT; - if (ath_tx_prepare(hw, skb, &txctl)) { + ret = ath_tx_prepare(hw, skb, &txctl); + if (ret) { ieee80211_free_txskb(hw, skb); - return NULL; + return ret; } q = skb_get_queue_mapping(skb); @@ -252,24 +214,19 @@ ath_tid_pull(struct ath_atx_tid *tid) ++tid->txq->pending_frames; } - return skb; -} - - -static bool ath_tid_has_buffered(struct ath_atx_tid *tid) -{ - return !skb_queue_empty(&tid->retry_q) || tid->has_queued; + *skbuf = skb; + return 0; } -static struct sk_buff *ath_tid_dequeue(struct ath_atx_tid *tid) +static int ath_tid_dequeue(struct ath_atx_tid *tid, + struct sk_buff **skb) { - struct sk_buff *skb; + int ret = 0; + *skb = __skb_dequeue(&tid->retry_q); + if (!*skb) + ret = ath_tid_pull(tid, skb); - skb = __skb_dequeue(&tid->retry_q); - if (!skb) - skb = ath_tid_pull(tid); - - return skb; + return ret; } static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) @@ -365,11 +322,12 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq, struct list_head bf_head; struct ath_tx_status ts; struct ath_frame_info *fi; + int ret; memset(&ts, 0, sizeof(ts)); INIT_LIST_HEAD(&bf_head); - while ((skb = ath_tid_dequeue(tid))) { + while ((ret = ath_tid_dequeue(tid, &skb)) == 0) { fi = get_frame_info(skb); bf = fi->bf; @@ -681,7 +639,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, skb_queue_splice_tail(&bf_pending, &tid->retry_q); if (!an->sleeping) { ath_tx_queue_tid(sc, tid); - if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY)) tid->clear_ps_filter = true; } @@ -708,11 +665,11 @@ static bool bf_is_ampdu_not_probing(struct ath_buf *bf) return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); } -static void ath_tx_count_airtime(struct ath_softc *sc, struct ath_node *an, - struct ath_atx_tid *tid, struct ath_buf *bf, +static void ath_tx_count_airtime(struct ath_softc *sc, + struct ieee80211_sta *sta, + struct ath_buf *bf, struct ath_tx_status *ts) { - struct ath_txq *txq = tid->txq; u32 airtime = 0; int i; @@ -722,17 +679,7 @@ static void ath_tx_count_airtime(struct ath_softc *sc, struct ath_node *an, airtime += rate_dur * bf->rates[i].count; } - if (sc->airtime_flags & AIRTIME_USE_TX) { - int q = txq->mac80211_qnum; - struct ath_acq *acq = &sc->cur_chan->acq[q]; - - spin_lock_bh(&acq->lock); - an->airtime_deficit[q] -= airtime; - if (an->airtime_deficit[q] <= 0) - __ath_tx_queue_tid(sc, tid); - spin_unlock_bh(&acq->lock); - } - ath_debug_airtime(sc, an, 0, airtime); + ieee80211_sta_register_airtime(sta, ts->tid, airtime, 0); } static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, @@ -762,7 +709,7 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, if (sta) { struct ath_node *an = (struct ath_node *)sta->drv_priv; tid = ath_get_skb_tid(sc, an, bf->bf_mpdu); - ath_tx_count_airtime(sc, an, tid, bf, ts); + ath_tx_count_airtime(sc, sta, bf, ts); if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY)) tid->clear_ps_filter = true; } @@ -947,20 +894,21 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid, return ndelim; } -static struct ath_buf * +static int ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq, - struct ath_atx_tid *tid) + struct ath_atx_tid *tid, struct ath_buf **buf) { struct ieee80211_tx_info *tx_info; struct ath_frame_info *fi; - struct sk_buff *skb, *first_skb = NULL; struct ath_buf *bf; + struct sk_buff *skb, *first_skb = NULL; u16 seqno; + int ret; while (1) { - skb = ath_tid_dequeue(tid); - if (!skb) - break; + ret = ath_tid_dequeue(tid, &skb); + if (ret < 0) + return ret; fi = get_frame_info(skb); bf = fi->bf; @@ -992,7 +940,7 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq, if (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) { bf->bf_state.bf_type = 0; - return bf; + break; } bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR; @@ -1011,7 +959,7 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq, first_skb = skb; continue; } - break; + return -EINPROGRESS; } if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) { @@ -1028,10 +976,11 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq, if (bf_isampdu(bf)) ath_tx_addto_baw(sc, tid, bf); - return bf; + break; } - return NULL; + *buf = bf; + return 0; } static int @@ -1041,7 +990,7 @@ ath_tx_form_aggr(struct ath_softc *sc, struct ath_txq *txq, { #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4) struct ath_buf *bf = bf_first, *bf_prev = NULL; - int nframes = 0, ndelim; + int nframes = 0, ndelim, ret; u16 aggr_limit = 0, al = 0, bpad = 0, al_delta, h_baw = tid->baw_size / 2; struct ieee80211_tx_info *tx_info; @@ -1093,7 +1042,9 @@ ath_tx_form_aggr(struct ath_softc *sc, struct ath_txq *txq, bf_prev = bf; - bf = ath_tx_get_tid_subframe(sc, txq, tid); + ret = ath_tx_get_tid_subframe(sc, txq, tid, &bf); + if (ret < 0) + break; } goto finish; stop: @@ -1490,7 +1441,7 @@ ath_tx_form_burst(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf_first) { struct ath_buf *bf = bf_first, *bf_prev = NULL; - int nframes = 0; + int nframes = 0, ret; do { struct ieee80211_tx_info *tx_info; @@ -1504,8 +1455,8 @@ ath_tx_form_burst(struct ath_softc *sc, struct ath_txq *txq, if (nframes >= 2) break; - bf = ath_tx_get_tid_subframe(sc, txq, tid); - if (!bf) + ret = ath_tx_get_tid_subframe(sc, txq, tid, &bf); + if (ret < 0) break; tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); @@ -1518,30 +1469,27 @@ ath_tx_form_burst(struct ath_softc *sc, struct ath_txq *txq, } while (1); } -static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, - struct ath_atx_tid *tid) +static int ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, + struct ath_atx_tid *tid) { - struct ath_buf *bf; + struct ath_buf *bf = NULL; struct ieee80211_tx_info *tx_info; struct list_head bf_q; - int aggr_len = 0; + int aggr_len = 0, ret; bool aggr; - if (!ath_tid_has_buffered(tid)) - return false; - INIT_LIST_HEAD(&bf_q); - bf = ath_tx_get_tid_subframe(sc, txq, tid); - if (!bf) - return false; + ret = ath_tx_get_tid_subframe(sc, txq, tid, &bf); + if (ret < 0) + return ret; tx_info = IEEE80211_SKB_CB(bf->bf_mpdu); aggr = !!(tx_info->flags & IEEE80211_TX_CTL_AMPDU); if ((aggr && txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) || (!aggr && txq->axq_depth >= ATH_NON_AGGR_MIN_QDEPTH)) { __skb_queue_tail(&tid->retry_q, bf->bf_mpdu); - return false; + return -EBUSY; } ath_set_rates(tid->an->vif, tid->an->sta, bf); @@ -1551,7 +1499,7 @@ static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, ath_tx_form_burst(sc, txq, tid, &bf_q, bf); if (list_empty(&bf_q)) - return false; + return -EAGAIN; if (tid->clear_ps_filter || tid->an->no_ps_filter) { tid->clear_ps_filter = false; @@ -1560,7 +1508,7 @@ static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, ath_tx_fill_desc(sc, bf, txq, aggr_len); ath_tx_txqaddbuf(sc, txq, &bf_q, false); - return true; + return 0; } int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, @@ -1623,28 +1571,16 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, { struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_atx_tid *tid; - struct ath_txq *txq; int tidno; ath_dbg(common, XMIT, "%s called\n", __func__); for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) { tid = ath_node_to_tid(an, tidno); - txq = tid->txq; - - ath_txq_lock(sc, txq); - - if (list_empty(&tid->list)) { - ath_txq_unlock(sc, txq); - continue; - } if (!skb_queue_empty(&tid->retry_q)) ieee80211_sta_set_buffered(sta, tid->tidno, true); - list_del_init(&tid->list); - - ath_txq_unlock(sc, txq); } } @@ -1663,11 +1599,12 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an) ath_txq_lock(sc, txq); tid->clear_ps_filter = true; - if (ath_tid_has_buffered(tid)) { + if (!skb_queue_empty(&tid->retry_q)) { ath_tx_queue_tid(sc, tid); ath_txq_schedule(sc, txq); } ath_txq_unlock_complete(sc, txq); + } } @@ -1698,9 +1635,9 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw, struct ath_txq *txq = sc->tx.uapsdq; struct ieee80211_tx_info *info; struct list_head bf_q; - struct ath_buf *bf_tail = NULL, *bf; + struct ath_buf *bf_tail = NULL, *bf = NULL; int sent = 0; - int i; + int i, ret; INIT_LIST_HEAD(&bf_q); for (i = 0; tids && nframes; i++, tids >>= 1) { @@ -1713,8 +1650,9 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw, ath_txq_lock(sc, tid->txq); while (nframes > 0) { - bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid); - if (!bf) + ret = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, + tid, &bf); + if (ret < 0) break; ath9k_set_moredata(sc, bf, true); @@ -1980,11 +1918,11 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) */ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) { + struct ieee80211_hw *hw = sc->hw; struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ieee80211_txq *queue; struct ath_atx_tid *tid; - struct list_head *tid_list; - struct ath_acq *acq; - bool active = AIRTIME_ACTIVE(sc->airtime_flags); + int ret; if (txq->mac80211_qnum < 0) return; @@ -1992,58 +1930,26 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) if (test_bit(ATH_OP_HW_RESET, &common->op_flags)) return; + ieee80211_txq_schedule_start(hw, txq->mac80211_qnum); spin_lock_bh(&sc->chan_lock); rcu_read_lock(); - acq = &sc->cur_chan->acq[txq->mac80211_qnum]; if (sc->cur_chan->stopped) goto out; -begin: - tid_list = &acq->acq_new; - if (list_empty(tid_list)) { - tid_list = &acq->acq_old; - if (list_empty(tid_list)) - goto out; - } - tid = list_first_entry(tid_list, struct ath_atx_tid, list); - - if (active && tid->an->airtime_deficit[txq->mac80211_qnum] <= 0) { - spin_lock_bh(&acq->lock); - tid->an->airtime_deficit[txq->mac80211_qnum] += ATH_AIRTIME_QUANTUM; - list_move_tail(&tid->list, &acq->acq_old); - spin_unlock_bh(&acq->lock); - goto begin; - } - - if (!ath_tid_has_buffered(tid)) { - spin_lock_bh(&acq->lock); - if ((tid_list == &acq->acq_new) && !list_empty(&acq->acq_old)) - list_move_tail(&tid->list, &acq->acq_old); - else { - list_del_init(&tid->list); - } - spin_unlock_bh(&acq->lock); - goto begin; - } + while ((queue = ieee80211_next_txq(hw, txq->mac80211_qnum))) { + tid = (struct ath_atx_tid *)queue->drv_priv; + ret = ath_tx_sched_aggr(sc, txq, tid); + ath_dbg(common, QUEUE, "ath_tx_sched_aggr returned %d\n", ret); - /* - * If we succeed in scheduling something, immediately restart to make - * sure we keep the HW busy. - */ - if(ath_tx_sched_aggr(sc, txq, tid)) { - if (!active) { - spin_lock_bh(&acq->lock); - list_move_tail(&tid->list, &acq->acq_old); - spin_unlock_bh(&acq->lock); - } - goto begin; + ieee80211_return_txq(hw, queue); } out: rcu_read_unlock(); spin_unlock_bh(&sc->chan_lock); + ieee80211_txq_schedule_end(hw, txq->mac80211_qnum); } void ath_txq_schedule_all(struct ath_softc *sc) @@ -2646,6 +2552,9 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, } tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1; + + /* we report airtime in ath_tx_count_airtime(), don't report twice */ + tx_info->status.tx_time = 0; } static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) @@ -2887,9 +2796,6 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) struct ath_atx_tid *tid; int tidno, acno; - for (acno = 0; acno < IEEE80211_NUM_ACS; acno++) - an->airtime_deficit[acno] = ATH_AIRTIME_QUANTUM; - for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) { tid = ath_node_to_tid(an, tidno); tid->an = an; @@ -2899,7 +2805,6 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) tid->baw_head = tid->baw_tail = 0; tid->active = false; tid->clear_ps_filter = true; - tid->has_queued = false; __skb_queue_head_init(&tid->retry_q); INIT_LIST_HEAD(&tid->list); acno = TID_TO_WME_AC(tidno); diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c index f7c2f19e81c1..8e154f6364a3 100644 --- a/drivers/net/wireless/ath/carl9170/rx.c +++ b/drivers/net/wireless/ath/carl9170/rx.c @@ -427,7 +427,7 @@ static int carl9170_rx_mac_status(struct ar9170 *ar, if (head->plcp[6] & 0x80) status->enc_flags |= RX_ENC_FLAG_SHORT_GI; - status->rate_idx = clamp(0, 75, head->plcp[3] & 0x7f); + status->rate_idx = clamp(head->plcp[3] & 0x7f, 0, 75); status->encoding = RX_ENC_HT; break; diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h index d73e45e26547..75ddaefdd049 100644 --- a/drivers/net/wireless/ath/regd.h +++ b/drivers/net/wireless/ath/regd.h @@ -185,7 +185,9 @@ enum CountryCode { CTRY_UKRAINE = 804, CTRY_UNITED_KINGDOM = 826, CTRY_UNITED_STATES = 840, + CTRY_UNITED_STATES2 = 841, CTRY_UNITED_STATES_FCC49 = 842, + CTRY_UNITED_STATES3 = 843, CTRY_URUGUAY = 858, CTRY_UZBEKISTAN = 860, CTRY_VENEZUELA = 862, diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index 4021e37a225a..c4bd26e65949 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -483,6 +483,8 @@ static struct country_code_to_enum_rd allCountries[] = { {CTRY_UAE, NULL1_WORLD, "AE"}, {CTRY_UNITED_KINGDOM, ETSI1_WORLD, "GB"}, {CTRY_UNITED_STATES, FCC3_FCCA, "US"}, + {CTRY_UNITED_STATES2, FCC3_FCCA, "US"}, + {CTRY_UNITED_STATES3, FCC3_FCCA, "US"}, /* This "PS" is for US public safety actually... to support this we * would need to assign new special alpha2 to CRDA db as with the world * regdomain and use another alpha2 */ diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 9b2f9f543952..a1e226652b4a 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. - * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -395,7 +395,7 @@ static int wil_find_cid_by_idx(struct wil6210_priv *wil, u8 mid, int idx) { int i; - for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { + for (i = 0; i < max_assoc_sta; i++) { if (wil->sta[i].status == wil_sta_unused) continue; if (wil->sta[i].mid != mid) @@ -1580,6 +1580,12 @@ static int _wil_cfg80211_merge_extra_ies(const u8 *ies1, u16 ies1_len, u8 *buf, *dpos; const u8 *spos; + if (!ies1) + ies1_len = 0; + + if (!ies2) + ies2_len = 0; + if (ies1_len == 0 && ies2_len == 0) { *merged_ies = NULL; *merged_len = 0; @@ -1589,17 +1595,19 @@ static int _wil_cfg80211_merge_extra_ies(const u8 *ies1, u16 ies1_len, buf = kmalloc(ies1_len + ies2_len, GFP_KERNEL); if (!buf) return -ENOMEM; - memcpy(buf, ies1, ies1_len); + if (ies1) + memcpy(buf, ies1, ies1_len); dpos = buf + ies1_len; spos = ies2; - while (spos + 1 < ies2 + ies2_len) { + while (spos && (spos + 1 < ies2 + ies2_len)) { /* IE tag at offset 0, length at offset 1 */ u16 ielen = 2 + spos[1]; if (spos + ielen > ies2 + ies2_len) break; if (spos[0] == WLAN_EID_VENDOR_SPECIFIC && - !_wil_cfg80211_find_ie(ies1, ies1_len, spos, ielen)) { + (!ies1 || !_wil_cfg80211_find_ie(ies1, ies1_len, + spos, ielen))) { memcpy(dpos, spos, ielen); dpos += ielen; } @@ -3007,7 +3015,7 @@ static int wil_rf_sector_set_selected(struct wiphy *wiphy, wil, vif->mid, WMI_INVALID_RF_SECTOR_INDEX, sector_type, WIL_CID_ALL); if (rc == -EINVAL) { - for (i = 0; i < WIL6210_MAX_CID; i++) { + for (i = 0; i < max_assoc_sta; i++) { if (wil->sta[i].mid != vif->mid) continue; rc = wil_rf_sector_wmi_set_selected( diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 835c902b84c1..7ad4e5328439 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. - * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -162,7 +162,7 @@ static int ring_show(struct seq_file *s, void *data) snprintf(name, sizeof(name), "tx_%2d", i); - if (cid < WIL6210_MAX_CID) + if (cid < max_assoc_sta) seq_printf(s, "\n%pM CID %d TID %d 1x%s BACK([%u] %u TU A%s) [%3d|%3d] idle %s\n", wil->sta[cid].addr, cid, tid, @@ -792,14 +792,14 @@ static ssize_t wil_write_back(struct file *file, const char __user *buf, "BACK: del_rx require at least 2 params\n"); return -EINVAL; } - if (p1 < 0 || p1 >= WIL6210_MAX_CID) { + if (p1 < 0 || p1 >= max_assoc_sta) { wil_err(wil, "BACK: invalid CID %d\n", p1); return -EINVAL; } if (rc < 4) p3 = WLAN_REASON_QSTA_LEAVE_QBSS; sta = &wil->sta[p1]; - wmi_delba_rx(wil, sta->mid, mk_cidxtid(p1, p2), p3); + wmi_delba_rx(wil, sta->mid, p1, p2, p3); } else { wil_err(wil, "BACK: Unrecognized command \"%s\"\n", cmd); return -EINVAL; @@ -1243,7 +1243,7 @@ static int bf_show(struct seq_file *s, void *data) memset(&reply, 0, sizeof(reply)); - for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { + for (i = 0; i < max_assoc_sta; i++) { u32 status; cmd.cid = i; @@ -1340,7 +1340,7 @@ static int link_show(struct seq_file *s, void *data) if (!sinfo) return -ENOMEM; - for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { + for (i = 0; i < max_assoc_sta; i++) { struct wil_sta_info *p = &wil->sta[i]; char *status = "unknown"; struct wil6210_vif *vif; @@ -1542,7 +1542,7 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock) struct wil6210_priv *wil = s->private; int i, tid, mcs; - for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { + for (i = 0; i < max_assoc_sta; i++) { struct wil_sta_info *p = &wil->sta[i]; char *status = "unknown"; u8 aid = 0; @@ -1651,7 +1651,7 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock) struct wil6210_priv *wil = s->private; int i, bin; - for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { + for (i = 0; i < max_assoc_sta; i++) { struct wil_sta_info *p = &wil->sta[i]; char *status = "unknown"; u8 aid = 0; @@ -1740,7 +1740,7 @@ static ssize_t wil_tx_latency_write(struct file *file, const char __user *buf, size_t sz = sizeof(u64) * WIL_NUM_LATENCY_BINS; wil->tx_latency_res = val; - for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { + for (i = 0; i < max_assoc_sta; i++) { struct wil_sta_info *sta = &wil->sta[i]; kfree(sta->tx_latency_bins); @@ -1825,7 +1825,7 @@ static void wil_link_stats_debugfs_show_vif(struct wil6210_vif *vif, } seq_printf(s, "TSF %lld\n", vif->fw_stats_tsf); - for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { + for (i = 0; i < max_assoc_sta; i++) { if (wil->sta[i].status == wil_sta_unused) continue; if (wil->sta[i].mid != vif->mid) @@ -2386,6 +2386,7 @@ static const struct dbg_off dbg_statics[] = { {"led_polarity", 0644, (ulong)&led_polarity, doff_u8}, {"status_index", 0644, (ulong)&dbg_status_msg_index, doff_u32}, {"sring_index", 0644, (ulong)&dbg_sring_index, doff_u32}, + {"drop_if_ring_full", 0644, (ulong)&drop_if_ring_full, doff_u8}, {}, }; @@ -2439,7 +2440,7 @@ void wil6210_debugfs_remove(struct wil6210_priv *wil) wil->debug = NULL; kfree(wil->dbg_data.data_arr); - for (i = 0; i < ARRAY_SIZE(wil->sta); i++) + for (i = 0; i < max_assoc_sta; i++) kfree(wil->sta[i].tx_latency_bins); /* free pmc memory without sending command to fw, as it will diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c index 5d287a8e1b45..3f5bd177d55f 100644 --- a/drivers/net/wireless/ath/wil6210/interrupt.c +++ b/drivers/net/wireless/ath/wil6210/interrupt.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. - * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -575,10 +575,14 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie) } if (isr & BIT_DMA_EP_MISC_ICR_HALP) { - wil_dbg_irq(wil, "irq_misc: HALP IRQ invoked\n"); - wil6210_mask_halp(wil); isr &= ~BIT_DMA_EP_MISC_ICR_HALP; - complete(&wil->halp.comp); + if (wil->halp.handle_icr) { + /* no need to handle HALP ICRs until next vote */ + wil->halp.handle_icr = false; + wil_dbg_irq(wil, "irq_misc: HALP IRQ invoked\n"); + wil6210_mask_halp(wil); + complete(&wil->halp.comp); + } } wil->isr_misc = isr; diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 5b7de00affe2..277abfdf3322 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. - * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -219,7 +219,7 @@ static bool wil_vif_is_connected(struct wil6210_priv *wil, u8 mid) { int i; - for (i = 0; i < WIL6210_MAX_CID; i++) { + for (i = 0; i < max_assoc_sta; i++) { if (wil->sta[i].mid == mid && wil->sta[i].status == wil_sta_connected) return true; @@ -322,7 +322,7 @@ static void _wil6210_disconnect_complete(struct wil6210_vif *vif, wil_disconnect_cid_complete(vif, cid, reason_code); } else { /* all */ wil_dbg_misc(wil, "Disconnect complete all\n"); - for (cid = 0; cid < WIL6210_MAX_CID; cid++) + for (cid = 0; cid < max_assoc_sta; cid++) wil_disconnect_cid_complete(vif, cid, reason_code); } @@ -434,7 +434,7 @@ static void _wil6210_disconnect(struct wil6210_vif *vif, const u8 *bssid, wil_disconnect_cid(vif, cid, reason_code); } else { /* all */ wil_dbg_misc(wil, "Disconnect all\n"); - for (cid = 0; cid < WIL6210_MAX_CID; cid++) + for (cid = 0; cid < max_assoc_sta; cid++) wil_disconnect_cid(vif, cid, reason_code); } @@ -1895,7 +1895,7 @@ int wil_find_cid(struct wil6210_priv *wil, u8 mid, const u8 *mac) int i; int rc = -ENOENT; - for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { + for (i = 0; i < max_assoc_sta; i++) { if (wil->sta[i].mid == mid && wil->sta[i].status != wil_sta_unused && ether_addr_equal(wil->sta[i].addr, mac)) { @@ -1919,11 +1919,14 @@ void wil_halp_vote(struct wil6210_priv *wil) if (++wil->halp.ref_cnt == 1) { reinit_completion(&wil->halp.comp); + /* mark to IRQ context to handle HALP ICR */ + wil->halp.handle_icr = true; wil6210_set_halp(wil); rc = wait_for_completion_timeout(&wil->halp.comp, to_jiffies); if (!rc) { wil_err(wil, "HALP vote timed out\n"); /* Mask HALP as done in case the interrupt is raised */ + wil->halp.handle_icr = false; wil6210_mask_halp(wil); } else { wil_dbg_irq(wil, diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c index 983bd001b53b..32b14fc33a59 100644 --- a/drivers/net/wireless/ath/wil6210/rx_reorder.c +++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2014-2017 Qualcomm Atheros, Inc. - * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -307,8 +307,8 @@ static u16 wil_agg_size(struct wil6210_priv *wil, u16 req_agg_wsize) } /* Block Ack - Rx side (recipient) */ -int wil_addba_rx_request(struct wil6210_priv *wil, u8 mid, - u8 cidxtid, u8 dialog_token, __le16 ba_param_set, +int wil_addba_rx_request(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid, + u8 dialog_token, __le16 ba_param_set, __le16 ba_timeout, __le16 ba_seq_ctrl) __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) { @@ -316,7 +316,6 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) u16 agg_timeout = le16_to_cpu(ba_timeout); u16 seq_ctrl = le16_to_cpu(ba_seq_ctrl); struct wil_sta_info *sta; - u8 cid, tid; u16 agg_wsize = 0; /* bit 0: A-MSDU supported * bit 1: policy (should be 0 for us) @@ -335,10 +334,9 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) int rc = 0; might_sleep(); - parse_cidxtid(cidxtid, &cid, &tid); /* sanity checks */ - if (cid >= WIL6210_MAX_CID) { + if (cid >= max_assoc_sta) { wil_err(wil, "BACK: invalid CID %d\n", cid); rc = -EINVAL; goto out; diff --git a/drivers/net/wireless/ath/wil6210/trace.h b/drivers/net/wireless/ath/wil6210/trace.h index 853abc3a73e4..36ebfcf9ef30 100644 --- a/drivers/net/wireless/ath/wil6210/trace.h +++ b/drivers/net/wireless/ath/wil6210/trace.h @@ -1,5 +1,6 @@ /* * Copyright (c) 2013-2016 Qualcomm Atheros, Inc. + * Copyright (c) 2019, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -181,7 +182,7 @@ TRACE_EVENT(wil6210_rx, __entry->seq = wil_rxdesc_seq(d); __entry->mcs = wil_rxdesc_mcs(d); ), - TP_printk("index %d len %d mid %d cid %d tid %d mcs %d seq 0x%03x" + TP_printk("index %d len %d mid %d cid (%%8) %d tid %d mcs %d seq 0x%03x" " type 0x%1x subtype 0x%1x", __entry->index, __entry->len, __entry->mid, __entry->cid, __entry->tid, __entry->mcs, __entry->seq, __entry->type, __entry->subtype) diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index 3e1c831ab2fb..4ccfd1404458 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. - * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -30,11 +30,6 @@ #include "trace.h" #include "txrx_edma.h" -static bool rtap_include_phy_info; -module_param(rtap_include_phy_info, bool, 0444); -MODULE_PARM_DESC(rtap_include_phy_info, - " Include PHY info in the radiotap header, default - no"); - bool rx_align_2; module_param(rx_align_2, bool, 0444); MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no"); @@ -43,6 +38,9 @@ bool rx_large_buf; module_param(rx_large_buf, bool, 0444); MODULE_PARM_DESC(rx_large_buf, " allocate 8KB RX buffers, default - no"); +/* Drop Tx packets in case Tx ring is full */ +bool drop_if_ring_full; + static inline uint wil_rx_snaplen(void) { return rx_align_2 ? 6 : 0; @@ -332,87 +330,34 @@ static void wil_rx_add_radiotap_header(struct wil6210_priv *wil, u8 mcs_flags; u8 mcs_index; } __packed; - struct wil6210_rtap_vendor { - struct wil6210_rtap rtap; - /* vendor */ - u8 vendor_oui[3] __aligned(2); - u8 vendor_ns; - __le16 vendor_skip; - u8 vendor_data[0]; - } __packed; struct vring_rx_desc *d = wil_skb_rxdesc(skb); - struct wil6210_rtap_vendor *rtap_vendor; + struct wil6210_rtap *rtap; int rtap_len = sizeof(struct wil6210_rtap); - int phy_length = 0; /* phy info header size, bytes */ - static char phy_data[128]; struct ieee80211_channel *ch = wil->monitor_chandef.chan; - if (rtap_include_phy_info) { - rtap_len = sizeof(*rtap_vendor) + sizeof(*d); - /* calculate additional length */ - if (d->dma.status & RX_DMA_STATUS_PHY_INFO) { - /** - * PHY info starts from 8-byte boundary - * there are 8-byte lines, last line may be partially - * written (HW bug), thus FW configures for last line - * to be excessive. Driver skips this last line. - */ - int len = min_t(int, 8 + sizeof(phy_data), - wil_rxdesc_phy_length(d)); - - if (len > 8) { - void *p = skb_tail_pointer(skb); - void *pa = PTR_ALIGN(p, 8); - - if (skb_tailroom(skb) >= len + (pa - p)) { - phy_length = len - 8; - memcpy(phy_data, pa, phy_length); - } - } - } - rtap_len += phy_length; - } - if (skb_headroom(skb) < rtap_len && pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) { wil_err(wil, "Unable to expand headroom to %d\n", rtap_len); return; } - rtap_vendor = skb_push(skb, rtap_len); - memset(rtap_vendor, 0, rtap_len); + rtap = skb_push(skb, rtap_len); + memset(rtap, 0, rtap_len); - rtap_vendor->rtap.rthdr.it_version = PKTHDR_RADIOTAP_VERSION; - rtap_vendor->rtap.rthdr.it_len = cpu_to_le16(rtap_len); - rtap_vendor->rtap.rthdr.it_present = cpu_to_le32( - (1 << IEEE80211_RADIOTAP_FLAGS) | + rtap->rthdr.it_version = PKTHDR_RADIOTAP_VERSION; + rtap->rthdr.it_len = cpu_to_le16(rtap_len); + rtap->rthdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | (1 << IEEE80211_RADIOTAP_CHANNEL) | (1 << IEEE80211_RADIOTAP_MCS)); if (d->dma.status & RX_DMA_STATUS_ERROR) - rtap_vendor->rtap.flags |= IEEE80211_RADIOTAP_F_BADFCS; - - rtap_vendor->rtap.chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320); - rtap_vendor->rtap.chnl_flags = cpu_to_le16(0); - - rtap_vendor->rtap.mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS; - rtap_vendor->rtap.mcs_flags = 0; - rtap_vendor->rtap.mcs_index = wil_rxdesc_mcs(d); - - if (rtap_include_phy_info) { - rtap_vendor->rtap.rthdr.it_present |= cpu_to_le32(1 << - IEEE80211_RADIOTAP_VENDOR_NAMESPACE); - /* OUI for Wilocity 04:ce:14 */ - rtap_vendor->vendor_oui[0] = 0x04; - rtap_vendor->vendor_oui[1] = 0xce; - rtap_vendor->vendor_oui[2] = 0x14; - rtap_vendor->vendor_ns = 1; - /* Rx descriptor + PHY data */ - rtap_vendor->vendor_skip = cpu_to_le16(sizeof(*d) + - phy_length); - memcpy(rtap_vendor->vendor_data, (void *)d, sizeof(*d)); - memcpy(rtap_vendor->vendor_data + sizeof(*d), phy_data, - phy_length); - } + rtap->flags |= IEEE80211_RADIOTAP_F_BADFCS; + + rtap->chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320); + rtap->chnl_flags = cpu_to_le16(0); + + rtap->mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS; + rtap->mcs_flags = 0; + rtap->mcs_index = wil_rxdesc_mcs(d); } static bool wil_is_rx_idle(struct wil6210_priv *wil) @@ -427,6 +372,76 @@ static bool wil_is_rx_idle(struct wil6210_priv *wil) return true; } +static int wil_rx_get_cid_by_skb(struct wil6210_priv *wil, struct sk_buff *skb) +{ + struct vring_rx_desc *d = wil_skb_rxdesc(skb); + int mid = wil_rxdesc_mid(d); + struct wil6210_vif *vif = wil->vifs[mid]; + /* cid from DMA descriptor is limited to 3 bits. + * In case of cid>=8, the value would be cid modulo 8 and we need to + * find real cid by locating the transmitter (ta) inside sta array + */ + int cid = wil_rxdesc_cid(d); + unsigned int snaplen = wil_rx_snaplen(); + struct ieee80211_hdr_3addr *hdr; + int i; + unsigned char *ta; + u8 ftype; + + /* in monitor mode there are no connections */ + if (vif->wdev.iftype == NL80211_IFTYPE_MONITOR) + return cid; + + ftype = wil_rxdesc_ftype(d) << 2; + if (likely(ftype == IEEE80211_FTYPE_DATA)) { + if (unlikely(skb->len < ETH_HLEN + snaplen)) { + wil_err_ratelimited(wil, + "Short data frame, len = %d\n", + skb->len); + return -ENOENT; + } + ta = wil_skb_get_sa(skb); + } else { + if (unlikely(skb->len < sizeof(struct ieee80211_hdr_3addr))) { + wil_err_ratelimited(wil, "Short frame, len = %d\n", + skb->len); + return -ENOENT; + } + hdr = (void *)skb->data; + ta = hdr->addr2; + } + + if (max_assoc_sta <= WIL6210_RX_DESC_MAX_CID) + return cid; + + /* assuming no concurrency between AP interfaces and STA interfaces. + * multista is used only in P2P_GO or AP mode. In other modes return + * cid from the rx descriptor + */ + if (vif->wdev.iftype != NL80211_IFTYPE_P2P_GO && + vif->wdev.iftype != NL80211_IFTYPE_AP) + return cid; + + /* For Rx packets cid from rx descriptor is limited to 3 bits (0..7), + * to find the real cid, compare transmitter address with the stored + * stations mac address in the driver sta array + */ + for (i = cid; i < max_assoc_sta; i += WIL6210_RX_DESC_MAX_CID) { + if (wil->sta[i].status != wil_sta_unused && + ether_addr_equal(wil->sta[i].addr, ta)) { + cid = i; + break; + } + } + if (i >= max_assoc_sta) { + wil_err_ratelimited(wil, "Could not find cid for frame with transmit addr = %pM, iftype = %d, frametype = %d, len = %d\n", + ta, vif->wdev.iftype, ftype, skb->len); + cid = -ENOENT; + } + + return cid; +} + /** * reap 1 frame from @swhead * @@ -452,7 +467,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, int i; struct wil_net_stats *stats; - BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb)); + BUILD_BUG_ON(sizeof(struct skb_rx_info) > sizeof(skb->cb)); again: if (unlikely(wil_ring_is_empty(vring))) @@ -484,7 +499,6 @@ again: wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4, (const void *)d, sizeof(*d), false); - cid = wil_rxdesc_cid(d); mid = wil_rxdesc_mid(d); vif = wil->vifs[mid]; @@ -495,11 +509,9 @@ again: goto again; } ndev = vif_to_ndev(vif); - stats = &wil->sta[cid].stats; - if (unlikely(dmalen > sz)) { - wil_err(wil, "Rx size too large: %d bytes!\n", dmalen); - stats->rx_large_frame++; + wil_err_ratelimited(wil, "Rx size too large: %d bytes!\n", + dmalen); kfree_skb(skb); goto again; } @@ -510,6 +522,14 @@ again: wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1, skb->data, skb_headlen(skb), false); + cid = wil_rx_get_cid_by_skb(wil, skb); + if (cid == -ENOENT) { + kfree_skb(skb); + goto again; + } + wil_skb_set_cid(skb, (u8)cid); + stats = &wil->sta[cid].stats; + stats->last_mcs_rx = wil_rxdesc_mcs(d); if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs)) stats->rx_per_mcs[stats->last_mcs_rx]++; @@ -556,13 +576,6 @@ again: goto again; } - if (unlikely(skb->len < ETH_HLEN + snaplen)) { - wil_err(wil, "Short frame, len = %d\n", skb->len); - stats->rx_short_frame++; - kfree_skb(skb); - goto again; - } - /* L4 IDENT is on when HW calculated checksum, check status * and in case of error drop the packet * higher stack layers will handle retransmission (if required) @@ -659,7 +672,7 @@ int reverse_memcmp(const void *cs, const void *ct, size_t count) static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb) { struct vring_rx_desc *d = wil_skb_rxdesc(skb); - int cid = wil_rxdesc_cid(d); + int cid = wil_skb_get_cid(skb); int tid = wil_rxdesc_tid(d); int key_id = wil_rxdesc_key_id(d); int mc = wil_rxdesc_mcast(d); @@ -707,7 +720,7 @@ static void wil_get_netif_rx_params(struct sk_buff *skb, int *cid, { struct vring_rx_desc *d = wil_skb_rxdesc(skb); - *cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */ + *cid = wil_skb_get_cid(skb); *security = wil_rxdesc_security(d); } @@ -724,11 +737,11 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) unsigned int len = skb->len; int cid; int security; - struct ethhdr *eth = (void *)skb->data; + u8 *sa, *da = wil_skb_get_da(skb); /* here looking for DA, not A1, thus Rxdesc's 'mcast' indication * is not suitable, need to look at data */ - int mcast = is_multicast_ether_addr(eth->h_dest); + int mcast = is_multicast_ether_addr(da); struct wil_net_stats *stats; struct sk_buff *xmit_skb = NULL; static const char * const gro_res_str[] = { @@ -759,7 +772,8 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) } if (wdev->iftype == NL80211_IFTYPE_STATION) { - if (mcast && ether_addr_equal(eth->h_source, ndev->dev_addr)) { + sa = wil_skb_get_sa(skb); + if (mcast && ether_addr_equal(sa, ndev->dev_addr)) { /* mcast packet looped back to us */ rc = GRO_DROP; dev_kfree_skb(skb); @@ -772,8 +786,7 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) */ xmit_skb = skb_copy(skb, GFP_ATOMIC); } else { - int xmit_cid = wil_find_cid(wil, vif->mid, - eth->h_dest); + int xmit_cid = wil_find_cid(wil, vif->mid, da); if (xmit_cid >= 0) { /* The destination station is associated to @@ -971,7 +984,6 @@ static int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size, .ring_size = cpu_to_le16(size), }, .ringid = id, - .cidxtid = mk_cidxtid(cid, tid), .encap_trans_type = WMI_VRING_ENC_TYPE_802_3, .mac_ctrl = 0, .to_resolution = 0, @@ -991,6 +1003,14 @@ static int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size, struct wil_ring *vring = &wil->ring_tx[id]; struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id]; + if (cid >= WIL6210_RX_DESC_MAX_CID) { + cmd.vring_cfg.cidxtid = CIDXTID_EXTENDED_CID_TID; + cmd.vring_cfg.cid = cid; + cmd.vring_cfg.tid = tid; + } else { + cmd.vring_cfg.cidxtid = mk_cidxtid(cid, tid); + } + wil_dbg_misc(wil, "vring_init_tx: max_mpdu_size %d\n", cmd.vring_cfg.tx_sw_ring.max_mpdu_size); lockdep_assert_held(&wil->mutex); @@ -1043,7 +1063,7 @@ static int wil_vring_init_tx(struct wil6210_vif *vif, int id, int size, txdata->enabled = 0; spin_unlock_bh(&txdata->lock); wil_vring_free(wil, vring); - wil->ring2cid_tid[id][0] = WIL6210_MAX_CID; + wil->ring2cid_tid[id][0] = max_assoc_sta; wil->ring2cid_tid[id][1] = 0; out: @@ -1128,7 +1148,7 @@ fail: txdata->dot1x_open = false; txdata->enabled = 0; spin_unlock_bh(&txdata->lock); - wil->ring2cid_tid[ring_id][0] = WIL6210_MAX_CID; + wil->ring2cid_tid[ring_id][0] = max_assoc_sta; wil->ring2cid_tid[ring_id][1] = 0; return rc; } @@ -1175,7 +1195,7 @@ int wil_vring_init_bcast(struct wil6210_vif *vif, int id, int size) if (rc) goto out; - wil->ring2cid_tid[id][0] = WIL6210_MAX_CID; /* CID */ + wil->ring2cid_tid[id][0] = max_assoc_sta; /* CID */ wil->ring2cid_tid[id][1] = 0; /* TID */ cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa); @@ -1217,12 +1237,13 @@ static struct wil_ring *wil_find_tx_ucast(struct wil6210_priv *wil, struct wil6210_vif *vif, struct sk_buff *skb) { - int i; - struct ethhdr *eth = (void *)skb->data; - int cid = wil_find_cid(wil, vif->mid, eth->h_dest); + int i, cid; + const u8 *da = wil_skb_get_da(skb); int min_ring_id = wil_get_min_tx_ring_id(wil); - if (cid < 0) + cid = wil_find_cid(wil, vif->mid, da); + + if (cid < 0 || cid >= max_assoc_sta) return NULL; /* TODO: fix for multiple TID */ @@ -1235,7 +1256,7 @@ static struct wil_ring *wil_find_tx_ucast(struct wil6210_priv *wil, struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i]; wil_dbg_txrx(wil, "find_tx_ucast: (%pM) -> [%d]\n", - eth->h_dest, i); + da, i); if (v->va && txdata->enabled) { return v; } else { @@ -1274,7 +1295,7 @@ static struct wil_ring *wil_find_tx_ring_sta(struct wil6210_priv *wil, continue; cid = wil->ring2cid_tid[i][0]; - if (cid >= WIL6210_MAX_CID) /* skip BCAST */ + if (cid >= max_assoc_sta) /* skip BCAST */ continue; if (!wil->ring_tx_data[i].dot1x_open && @@ -1326,10 +1347,10 @@ static struct wil_ring *wil_find_tx_bcast_1(struct wil6210_priv *wil, static void wil_set_da_for_vring(struct wil6210_priv *wil, struct sk_buff *skb, int vring_index) { - struct ethhdr *eth = (void *)skb->data; + u8 *da = wil_skb_get_da(skb); int cid = wil->ring2cid_tid[vring_index][0]; - ether_addr_copy(eth->h_dest, wil->sta[cid].addr); + ether_addr_copy(da, wil->sta[cid].addr); } static struct wil_ring *wil_find_tx_bcast_2(struct wil6210_priv *wil, @@ -1340,8 +1361,7 @@ static struct wil_ring *wil_find_tx_bcast_2(struct wil6210_priv *wil, struct sk_buff *skb2; int i; u8 cid; - struct ethhdr *eth = (void *)skb->data; - char *src = eth->h_source; + const u8 *src = wil_skb_get_sa(skb); struct wil_ring_tx_data *txdata, *txdata2; int min_ring_id = wil_get_min_tx_ring_id(wil); @@ -1353,7 +1373,7 @@ static struct wil_ring *wil_find_tx_bcast_2(struct wil6210_priv *wil, continue; cid = wil->ring2cid_tid[i][0]; - if (cid >= WIL6210_MAX_CID) /* skip BCAST */ + if (cid >= max_assoc_sta) /* skip BCAST */ continue; if (!wil->ring_tx_data[i].dot1x_open && skb->protocol != cpu_to_be16(ETH_P_PAE)) @@ -1381,7 +1401,7 @@ found: if (!v2->va || txdata2->mid != vif->mid) continue; cid = wil->ring2cid_tid[i][0]; - if (cid >= WIL6210_MAX_CID) /* skip BCAST */ + if (cid >= max_assoc_sta) /* skip BCAST */ continue; if (!wil->ring_tx_data[i].dot1x_open && skb->protocol != cpu_to_be16(ETH_P_PAE)) @@ -2032,6 +2052,10 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil, wil_dbg_txrx(wil, "check_stop=%d, mid=%d, stopped=%d", check_stop, vif->mid, vif->net_queue_stopped); + if (ring && drop_if_ring_full) + /* no need to stop/wake net queues */ + return; + if (check_stop == vif->net_queue_stopped) /* net queues already in desired state */ return; @@ -2095,8 +2119,8 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct wil6210_vif *vif = ndev_to_vif(ndev); struct wil6210_priv *wil = vif_to_wil(vif); - struct ethhdr *eth = (void *)skb->data; - bool bcast = is_multicast_ether_addr(eth->h_dest); + const u8 *da = wil_skb_get_da(skb); + bool bcast = is_multicast_ether_addr(da); struct wil_ring *ring; static bool pr_once_fw; int rc; @@ -2143,7 +2167,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) ring = wil_find_tx_ucast(wil, vif, skb); } if (unlikely(!ring)) { - wil_dbg_txrx(wil, "No Tx RING found for %pM\n", eth->h_dest); + wil_dbg_txrx(wil, "No Tx RING found for %pM\n", da); goto drop; } /* set up vring entry */ @@ -2157,6 +2181,8 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) dev_kfree_skb_any(skb); return NETDEV_TX_OK; case -ENOMEM: + if (drop_if_ring_full) + goto drop; return NETDEV_TX_BUSY; default: break; /* goto drop; */ @@ -2228,7 +2254,7 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid) used_before_complete = wil_ring_used_tx(vring); - if (cid < WIL6210_MAX_CID) + if (cid < max_assoc_sta) stats = &wil->sta[cid].stats; while (!wil_ring_is_empty(vring)) { @@ -2337,7 +2363,7 @@ static void wil_get_reorder_params(struct wil6210_priv *wil, struct vring_rx_desc *d = wil_skb_rxdesc(skb); *tid = wil_rxdesc_tid(d); - *cid = wil_rxdesc_cid(d); + *cid = wil_skb_get_cid(skb); *mid = wil_rxdesc_mid(d); *seq = wil_rxdesc_seq(d); *mcast = wil_rxdesc_mcast(d); diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h index 9d83be481839..c0da1340c2d2 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.h +++ b/drivers/net/wireless/ath/wil6210/txrx.h @@ -1,6 +1,6 @@ /* * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. - * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -458,6 +458,18 @@ union wil_ring_desc { union wil_rx_desc rx; } __packed; +struct packet_rx_info { + u8 cid; +}; + +/* this struct will be stored in the skb cb buffer + * max length of the struct is limited to 48 bytes + */ +struct skb_rx_info { + struct vring_rx_desc rx_desc; + struct packet_rx_info rx_info; +}; + static inline int wil_rxdesc_tid(struct vring_rx_desc *d) { return WIL_GET_BITS(d->mac.d0, 0, 3); @@ -530,11 +542,6 @@ static inline int wil_rxdesc_mcast(struct vring_rx_desc *d) return WIL_GET_BITS(d->mac.d1, 13, 14); } -static inline int wil_rxdesc_phy_length(struct vring_rx_desc *d) -{ - return WIL_GET_BITS(d->dma.d0, 16, 29); -} - static inline struct vring_rx_desc *wil_skb_rxdesc(struct sk_buff *skb) { return (void *)skb->cb; @@ -560,11 +567,25 @@ static inline int wil_ring_is_full(struct wil_ring *ring) return wil_ring_next_tail(ring) == ring->swhead; } -static inline bool wil_need_txstat(struct sk_buff *skb) +static inline u8 *wil_skb_get_da(struct sk_buff *skb) +{ + struct ethhdr *eth = (void *)skb->data; + + return eth->h_dest; +} + +static inline u8 *wil_skb_get_sa(struct sk_buff *skb) { struct ethhdr *eth = (void *)skb->data; - return is_unicast_ether_addr(eth->h_dest) && skb->sk && + return eth->h_source; +} + +static inline bool wil_need_txstat(struct sk_buff *skb) +{ + const u8 *da = wil_skb_get_da(skb); + + return is_unicast_ether_addr(da) && skb->sk && (skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS); } @@ -610,6 +631,20 @@ static inline bool wil_val_in_range(int val, int min, int max) return val >= min && val < max; } +static inline u8 wil_skb_get_cid(struct sk_buff *skb) +{ + struct skb_rx_info *skb_rx_info = (void *)skb->cb; + + return skb_rx_info->rx_info.cid; +} + +static inline void wil_skb_set_cid(struct sk_buff *skb, u8 cid) +{ + struct skb_rx_info *skb_rx_info = (void *)skb->cb; + + skb_rx_info->rx_info.cid = cid; +} + void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev); void wil_rx_reorder(struct wil6210_priv *wil, struct sk_buff *skb); void wil_rx_bar(struct wil6210_priv *wil, struct wil6210_vif *vif, diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c index 3380aaef456c..c38773878ae3 100644 --- a/drivers/net/wireless/ath/wil6210/txrx_edma.c +++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. + * Copyright (c) 2012-2019 The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -727,7 +727,7 @@ static int wil_ring_init_tx_edma(struct wil6210_vif *vif, int ring_id, txdata->enabled = 0; spin_unlock_bh(&txdata->lock); wil_ring_free_edma(wil, ring); - wil->ring2cid_tid[ring_id][0] = WIL6210_MAX_CID; + wil->ring2cid_tid[ring_id][0] = max_assoc_sta; wil->ring2cid_tid[ring_id][1] = 0; out: @@ -932,7 +932,7 @@ again: eop = wil_rx_status_get_eop(msg); cid = wil_rx_status_get_cid(msg); - if (unlikely(!wil_val_in_range(cid, 0, WIL6210_MAX_CID))) { + if (unlikely(!wil_val_in_range(cid, 0, max_assoc_sta))) { wil_err(wil, "Corrupt cid=%d, sring->swhead=%d\n", cid, sring->swhead); rxdata->skipping = true; @@ -1137,7 +1137,7 @@ int wil_tx_sring_handler(struct wil6210_priv *wil, /* Total number of completed descriptors in all descriptor rings */ int desc_cnt = 0; int cid; - struct wil_net_stats *stats = NULL; + struct wil_net_stats *stats; struct wil_tx_enhanced_desc *_d; unsigned int ring_id; unsigned int num_descs; @@ -1187,8 +1187,7 @@ int wil_tx_sring_handler(struct wil6210_priv *wil, ndev = vif_to_ndev(vif); cid = wil->ring2cid_tid[ring_id][0]; - if (cid < WIL6210_MAX_CID) - stats = &wil->sta[cid].stats; + stats = (cid < max_assoc_sta ? &wil->sta[cid].stats : NULL); wil_dbg_txrx(wil, "tx_status: completed desc_ring (%d), num_descs (%d)\n", diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 0f3be3ffc6a2..e1b1039b13ab 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -1,6 +1,6 @@ /* * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. - * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -38,6 +38,8 @@ extern bool rx_large_buf; extern bool debug_fw; extern bool disable_ap_sme; extern bool ftm_mode; +extern bool drop_if_ring_full; +extern uint max_assoc_sta; struct wil6210_priv; struct wil6210_vif; @@ -89,7 +91,8 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1) #define WIL_RING_SIZE_ORDER_MIN (5) #define WIL_RING_SIZE_ORDER_MAX (15) #define WIL6210_MAX_TX_RINGS (24) /* HW limit */ -#define WIL6210_MAX_CID (8) /* HW limit */ +#define WIL6210_MAX_CID (20) /* max number of stations */ +#define WIL6210_RX_DESC_MAX_CID (8) /* HW limit */ #define WIL6210_NAPI_BUDGET (16) /* arbitrary */ #define WIL_MAX_AMPDU_SIZE (64 * 1024) /* FW/HW limit */ #define WIL_MAX_AGG_WSIZE (32) /* FW/HW limit */ @@ -457,7 +460,7 @@ static inline void parse_cidxtid(u8 cidxtid, u8 *cid, u8 *tid) */ static inline bool wil_cid_valid(u8 cid) { - return cid < WIL6210_MAX_CID; + return (cid >= 0 && cid < max_assoc_sta); } struct wil6210_mbox_ring { @@ -791,6 +794,7 @@ struct wil_halp { struct mutex lock; /* protect halp ref_cnt */ unsigned int ref_cnt; struct completion comp; + u8 handle_icr; }; struct wil_blob_wrapper { @@ -1235,7 +1239,7 @@ int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac, u16 reason, int wmi_addba(struct wil6210_priv *wil, u8 mid, u8 ringid, u8 size, u16 timeout); int wmi_delba_tx(struct wil6210_priv *wil, u8 mid, u8 ringid, u16 reason); -int wmi_delba_rx(struct wil6210_priv *wil, u8 mid, u8 cidxtid, u16 reason); +int wmi_delba_rx(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid, u16 reason); int wmi_addba_rx_resp(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid, u8 token, u16 status, bool amsdu, u16 agg_wsize, u16 timeout); @@ -1248,8 +1252,8 @@ int wmi_port_allocate(struct wil6210_priv *wil, u8 mid, const u8 *mac, enum nl80211_iftype iftype); int wmi_port_delete(struct wil6210_priv *wil, u8 mid); int wmi_link_stats_cfg(struct wil6210_vif *vif, u32 type, u8 cid, u32 interval); -int wil_addba_rx_request(struct wil6210_priv *wil, u8 mid, - u8 cidxtid, u8 dialog_token, __le16 ba_param_set, +int wil_addba_rx_request(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid, + u8 dialog_token, __le16 ba_param_set, __le16 ba_timeout, __le16 ba_seq_ctrl); int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize); diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 345f05969190..bda4a9712f91 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. - * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -24,8 +24,9 @@ #include "wmi.h" #include "trace.h" -static uint max_assoc_sta = WIL6210_MAX_CID; -module_param(max_assoc_sta, uint, 0644); +/* set the default max assoc sta to max supported by driver */ +uint max_assoc_sta = WIL6210_MAX_CID; +module_param(max_assoc_sta, uint, 0444); MODULE_PARM_DESC(max_assoc_sta, " Max number of stations associated to the AP"); int agg_wsize; /* = 0; */ @@ -770,6 +771,7 @@ static void wmi_evt_ready(struct wil6210_vif *vif, int id, void *d, int len) struct wil6210_priv *wil = vif_to_wil(vif); struct wiphy *wiphy = wil_to_wiphy(wil); struct wmi_ready_event *evt = d; + u8 fw_max_assoc_sta; wil_info(wil, "FW ver. %s(SW %d); MAC %pM; %d MID's\n", wil->fw_version, le32_to_cpu(evt->sw_version), @@ -787,6 +789,25 @@ static void wmi_evt_ready(struct wil6210_vif *vif, int id, void *d, int len) evt->rfc_read_calib_result); wil->fw_calib_result = evt->rfc_read_calib_result; } + + fw_max_assoc_sta = WIL6210_RX_DESC_MAX_CID; + if (len > offsetof(struct wmi_ready_event, max_assoc_sta) && + evt->max_assoc_sta > 0) { + fw_max_assoc_sta = evt->max_assoc_sta; + wil_dbg_wmi(wil, "fw reported max assoc sta %d\n", + fw_max_assoc_sta); + + if (fw_max_assoc_sta > WIL6210_MAX_CID) { + wil_dbg_wmi(wil, + "fw max assoc sta %d exceeds max driver supported %d\n", + fw_max_assoc_sta, WIL6210_MAX_CID); + fw_max_assoc_sta = WIL6210_MAX_CID; + } + } + + max_assoc_sta = min_t(uint, max_assoc_sta, fw_max_assoc_sta); + wil_dbg_wmi(wil, "setting max assoc sta to %d\n", max_assoc_sta); + wil_set_recovery_state(wil, fw_recovery_idle); set_bit(wil_status_fwready, wil->status); /* let the reset sequence continue */ @@ -952,7 +973,7 @@ static void wmi_evt_connect(struct wil6210_vif *vif, int id, void *d, int len) evt->assoc_req_len, evt->assoc_resp_len); return; } - if (evt->cid >= WIL6210_MAX_CID) { + if (evt->cid >= max_assoc_sta) { wil_err(wil, "Connect CID invalid : %d\n", evt->cid); return; } @@ -1271,9 +1292,16 @@ static void wmi_evt_addba_rx_req(struct wil6210_vif *vif, int id, void *d, int len) { struct wil6210_priv *wil = vif_to_wil(vif); + u8 cid, tid; struct wmi_rcp_addba_req_event *evt = d; - wil_addba_rx_request(wil, vif->mid, evt->cidxtid, evt->dialog_token, + if (evt->cidxtid != CIDXTID_EXTENDED_CID_TID) { + parse_cidxtid(evt->cidxtid, &cid, &tid); + } else { + cid = evt->cid; + tid = evt->tid; + } + wil_addba_rx_request(wil, vif->mid, cid, tid, evt->dialog_token, evt->ba_param_set, evt->ba_timeout, evt->ba_seq_ctrl); } @@ -1289,7 +1317,13 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) struct wil_tid_ampdu_rx *r; might_sleep(); - parse_cidxtid(evt->cidxtid, &cid, &tid); + + if (evt->cidxtid != CIDXTID_EXTENDED_CID_TID) { + parse_cidxtid(evt->cidxtid, &cid, &tid); + } else { + cid = evt->cid; + tid = evt->tid; + } wil_dbg_wmi(wil, "DELBA MID %d CID %d TID %d from %s reason %d\n", vif->mid, cid, tid, evt->from_initiator ? "originator" : "recipient", @@ -1404,7 +1438,7 @@ static void wil_link_stats_store_basic(struct wil6210_vif *vif, u8 cid = basic->cid; struct wil_sta_info *sta; - if (cid < 0 || cid >= WIL6210_MAX_CID) { + if (cid < 0 || cid >= max_assoc_sta) { wil_err(wil, "invalid cid %d\n", cid); return; } @@ -1554,7 +1588,7 @@ static int wil_find_cid_ringid_sta(struct wil6210_priv *wil, continue; lcid = wil->ring2cid_tid[i][0]; - if (lcid >= WIL6210_MAX_CID) /* skip BCAST */ + if (lcid >= max_assoc_sta) /* skip BCAST */ continue; wil_dbg_wmi(wil, "find sta -> ringid %d cid %d\n", i, lcid); @@ -2120,10 +2154,9 @@ int wmi_pcp_start(struct wil6210_vif *vif, if ((cmd.pcp_max_assoc_sta > WIL6210_MAX_CID) || (cmd.pcp_max_assoc_sta <= 0)) { - wil_info(wil, - "Requested connection limit %u, valid values are 1 - %d. Setting to %d\n", - max_assoc_sta, WIL6210_MAX_CID, WIL6210_MAX_CID); - cmd.pcp_max_assoc_sta = WIL6210_MAX_CID; + wil_err(wil, "unexpected max_assoc_sta %d\n", + cmd.pcp_max_assoc_sta); + return -EOPNOTSUPP; } if (disable_ap_sme && @@ -2516,7 +2549,7 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct wil_ring *vring) if (ch) cmd.sniffer_cfg.channel = ch->hw_value - 1; cmd.sniffer_cfg.phy_info_mode = - cpu_to_le32(ndev->type == ARPHRD_IEEE80211_RADIOTAP); + cpu_to_le32(WMI_SNIFFER_PHY_INFO_DISABLED); cmd.sniffer_cfg.phy_support = cpu_to_le32((wil->monitor_flags & MONITOR_FLAG_CONTROL) ? WMI_SNIFFER_CP : WMI_SNIFFER_BOTH_PHYS); @@ -2651,15 +2684,22 @@ int wmi_delba_tx(struct wil6210_priv *wil, u8 mid, u8 ringid, u16 reason) return wmi_send(wil, WMI_RING_BA_DIS_CMDID, mid, &cmd, sizeof(cmd)); } -int wmi_delba_rx(struct wil6210_priv *wil, u8 mid, u8 cidxtid, u16 reason) +int wmi_delba_rx(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid, u16 reason) { struct wmi_rcp_delba_cmd cmd = { - .cidxtid = cidxtid, .reason = cpu_to_le16(reason), }; - wil_dbg_wmi(wil, "delba_rx: (CID %d TID %d reason %d)\n", cidxtid & 0xf, - (cidxtid >> 4) & 0xf, reason); + if (cid >= WIL6210_RX_DESC_MAX_CID) { + cmd.cidxtid = CIDXTID_EXTENDED_CID_TID; + cmd.cid = cid; + cmd.tid = tid; + } else { + cmd.cidxtid = mk_cidxtid(cid, tid); + } + + wil_dbg_wmi(wil, "delba_rx: (CID %d TID %d reason %d)\n", cid, + tid, reason); return wmi_send(wil, WMI_RCP_DELBA_CMDID, mid, &cmd, sizeof(cmd)); } @@ -2670,7 +2710,6 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil, { int rc; struct wmi_rcp_addba_resp_cmd cmd = { - .cidxtid = mk_cidxtid(cid, tid), .dialog_token = token, .status_code = cpu_to_le16(status), /* bit 0: A-MSDU supported @@ -2689,6 +2728,14 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil, .evt = {.status = cpu_to_le16(WMI_FW_STATUS_FAILURE)}, }; + if (cid >= WIL6210_RX_DESC_MAX_CID) { + cmd.cidxtid = CIDXTID_EXTENDED_CID_TID; + cmd.cid = cid; + cmd.tid = tid; + } else { + cmd.cidxtid = mk_cidxtid(cid, tid); + } + wil_dbg_wmi(wil, "ADDBA response for MID %d CID %d TID %d size %d timeout %d status %d AMSDU%s\n", mid, cid, tid, agg_wsize, diff --git a/drivers/net/wireless/broadcom/b43/debugfs.c b/drivers/net/wireless/broadcom/b43/debugfs.c index 77046384dd80..976c8ec4e992 100644 --- a/drivers/net/wireless/broadcom/b43/debugfs.c +++ b/drivers/net/wireless/broadcom/b43/debugfs.c @@ -668,15 +668,13 @@ static void b43_remove_dynamic_debug(struct b43_wldev *dev) static void b43_add_dynamic_debug(struct b43_wldev *dev) { struct b43_dfsentry *e = dev->dfsentry; - struct dentry *d; -#define add_dyn_dbg(name, id, initstate) do { \ - e->dyn_debug[id] = (initstate); \ - d = debugfs_create_bool(name, 0600, e->subdir, \ - &(e->dyn_debug[id])); \ - if (!IS_ERR(d)) \ - e->dyn_debug_dentries[id] = d; \ - } while (0) +#define add_dyn_dbg(name, id, initstate) do { \ + e->dyn_debug[id] = (initstate); \ + e->dyn_debug_dentries[id] = \ + debugfs_create_bool(name, 0600, e->subdir, \ + &(e->dyn_debug[id])); \ + } while (0) add_dyn_dbg("debug_xmitpower", B43_DBG_XMITPOWER, false); add_dyn_dbg("debug_dmaoverflow", B43_DBG_DMAOVERFLOW, false); @@ -718,19 +716,6 @@ void b43_debugfs_add_device(struct b43_wldev *dev) snprintf(devdir, sizeof(devdir), "%s", wiphy_name(dev->wl->hw->wiphy)); e->subdir = debugfs_create_dir(devdir, rootdir); - if (!e->subdir || IS_ERR(e->subdir)) { - if (e->subdir == ERR_PTR(-ENODEV)) { - b43dbg(dev->wl, "DebugFS (CONFIG_DEBUG_FS) not " - "enabled in kernel config\n"); - } else { - b43err(dev->wl, "debugfs: cannot create %s directory\n", - devdir); - } - dev->dfsentry = NULL; - kfree(log->log); - kfree(e); - return; - } e->mmio16read_next = 0xFFFF; /* invalid address */ e->mmio32read_next = 0xFFFF; /* invalid address */ @@ -741,13 +726,10 @@ void b43_debugfs_add_device(struct b43_wldev *dev) #define ADD_FILE(name, mode) \ do { \ - struct dentry *d; \ - d = debugfs_create_file(__stringify(name), \ + e->file_##name.dentry = \ + debugfs_create_file(__stringify(name), \ mode, e->subdir, dev, \ &fops_##name.fops); \ - e->file_##name.dentry = NULL; \ - if (!IS_ERR(d)) \ - e->file_##name.dentry = d; \ } while (0) @@ -818,8 +800,6 @@ void b43_debugfs_log_txstat(struct b43_wldev *dev, void b43_debugfs_init(void) { rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL); - if (IS_ERR(rootdir)) - rootdir = NULL; } void b43_debugfs_exit(void) diff --git a/drivers/net/wireless/broadcom/b43legacy/debugfs.c b/drivers/net/wireless/broadcom/b43legacy/debugfs.c index 82ef56ed7ca1..8150adee3e34 100644 --- a/drivers/net/wireless/broadcom/b43legacy/debugfs.c +++ b/drivers/net/wireless/broadcom/b43legacy/debugfs.c @@ -361,15 +361,13 @@ static void b43legacy_remove_dynamic_debug(struct b43legacy_wldev *dev) static void b43legacy_add_dynamic_debug(struct b43legacy_wldev *dev) { struct b43legacy_dfsentry *e = dev->dfsentry; - struct dentry *d; -#define add_dyn_dbg(name, id, initstate) do { \ - e->dyn_debug[id] = (initstate); \ - d = debugfs_create_bool(name, 0600, e->subdir, \ - &(e->dyn_debug[id])); \ - if (!IS_ERR(d)) \ - e->dyn_debug_dentries[id] = d; \ - } while (0) +#define add_dyn_dbg(name, id, initstate) do { \ + e->dyn_debug[id] = (initstate); \ + e->dyn_debug_dentries[id] = \ + debugfs_create_bool(name, 0600, e->subdir, \ + &(e->dyn_debug[id])); \ + } while (0) add_dyn_dbg("debug_xmitpower", B43legacy_DBG_XMITPOWER, false); add_dyn_dbg("debug_dmaoverflow", B43legacy_DBG_DMAOVERFLOW, false); @@ -408,29 +406,14 @@ void b43legacy_debugfs_add_device(struct b43legacy_wldev *dev) snprintf(devdir, sizeof(devdir), "%s", wiphy_name(dev->wl->hw->wiphy)); e->subdir = debugfs_create_dir(devdir, rootdir); - if (!e->subdir || IS_ERR(e->subdir)) { - if (e->subdir == ERR_PTR(-ENODEV)) { - b43legacydbg(dev->wl, "DebugFS (CONFIG_DEBUG_FS) not " - "enabled in kernel config\n"); - } else { - b43legacyerr(dev->wl, "debugfs: cannot create %s directory\n", - devdir); - } - dev->dfsentry = NULL; - kfree(log->log); - kfree(e); - return; - } #define ADD_FILE(name, mode) \ do { \ - struct dentry *d; \ - d = debugfs_create_file(__stringify(name), \ + e->file_##name.dentry = \ + debugfs_create_file(__stringify(name), \ mode, e->subdir, dev, \ &fops_##name.fops); \ e->file_##name.dentry = NULL; \ - if (!IS_ERR(d)) \ - e->file_##name.dentry = d; \ } while (0) @@ -492,8 +475,6 @@ void b43legacy_debugfs_log_txstat(struct b43legacy_wldev *dev, void b43legacy_debugfs_init(void) { rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL); - if (IS_ERR(rootdir)) - rootdir = NULL; } void b43legacy_debugfs_exit(void) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile index 22fd95a736a8..f7cf3e5f4849 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile @@ -16,8 +16,8 @@ # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ccflags-y += \ - -Idrivers/net/wireless/broadcom/brcm80211/brcmfmac \ - -Idrivers/net/wireless/broadcom/brcm80211/include + -I $(srctree)/$(src) \ + -I $(srctree)/$(src)/../include obj-$(CONFIG_BRCMFMAC) += brcmfmac.o brcmfmac-objs += \ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c index 1068a2a4494c..73d3c1a0a7c9 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c @@ -178,8 +178,8 @@ brcmf_proto_bcdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd, *fwerr = 0; ret = brcmf_proto_bcdc_msg(drvr, ifidx, cmd, buf, len, false); if (ret < 0) { - brcmf_err("brcmf_proto_bcdc_msg failed w/status %d\n", - ret); + bphy_err(drvr, "brcmf_proto_bcdc_msg failed w/status %d\n", + ret); goto done; } @@ -195,9 +195,9 @@ retry: if ((id < bcdc->reqid) && (++retries < RETRIES)) goto retry; if (id != bcdc->reqid) { - brcmf_err("%s: unexpected request id %d (expected %d)\n", - brcmf_ifname(brcmf_get_ifp(drvr, ifidx)), id, - bcdc->reqid); + bphy_err(drvr, "%s: unexpected request id %d (expected %d)\n", + brcmf_ifname(brcmf_get_ifp(drvr, ifidx)), id, + bcdc->reqid); ret = -EINVAL; goto done; } @@ -245,9 +245,9 @@ brcmf_proto_bcdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd, id = (flags & BCDC_DCMD_ID_MASK) >> BCDC_DCMD_ID_SHIFT; if (id != bcdc->reqid) { - brcmf_err("%s: unexpected request id %d (expected %d)\n", - brcmf_ifname(brcmf_get_ifp(drvr, ifidx)), id, - bcdc->reqid); + bphy_err(drvr, "%s: unexpected request id %d (expected %d)\n", + brcmf_ifname(brcmf_get_ifp(drvr, ifidx)), id, + bcdc->reqid); ret = -EINVAL; goto done; } @@ -312,8 +312,8 @@ brcmf_proto_bcdc_hdrpull(struct brcmf_pub *drvr, bool do_fws, } if (((h->flags & BCDC_FLAG_VER_MASK) >> BCDC_FLAG_VER_SHIFT) != BCDC_PROTO_VER) { - brcmf_err("%s: non-BCDC packet received, flags 0x%x\n", - brcmf_ifname(tmp_if), h->flags); + bphy_err(drvr, "%s: non-BCDC packet received, flags 0x%x\n", + brcmf_ifname(tmp_if), h->flags); return -EBADE; } @@ -460,7 +460,7 @@ int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr) /* ensure that the msg buf directly follows the cdc msg struct */ if ((unsigned long)(&bcdc->msg + 1) != (unsigned long)bcdc->buf) { - brcmf_err("struct brcmf_proto_bcdc is not correctly defined\n"); + bphy_err(drvr, "struct brcmf_proto_bcdc is not correctly defined\n"); goto fail; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index d64bf233b12c..ec129864cc9c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -315,7 +315,7 @@ static int brcmf_sdiod_skbuff_read(struct brcmf_sdio_dev *sdiodev, /* bail out as things are really fishy here */ WARN(1, "invalid sdio function number: %d\n", func->num); err = -ENOMEDIUM; - }; + } if (err == -ENOMEDIUM) brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h index c4965184cdf3..3d441c5c745c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h @@ -90,6 +90,7 @@ struct brcmf_bus_ops { int (*get_memdump)(struct device *dev, void *data, size_t len); int (*get_fwname)(struct device *dev, const char *ext, unsigned char *fw_name); + void (*debugfs_create)(struct device *dev); }; @@ -235,6 +236,15 @@ int brcmf_bus_get_fwname(struct brcmf_bus *bus, const char *ext, return bus->ops->get_fwname(bus->dev, ext, fw_name); } +static inline +void brcmf_bus_debugfs_create(struct brcmf_bus *bus) +{ + if (!bus->ops->debugfs_create) + return; + + return bus->ops->debugfs_create(bus->dev); +} + /* * interface functions from common layer */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 35301237d435..e92f6351bd22 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -457,6 +457,7 @@ static void convert_key_from_CPU(struct brcmf_wsec_key *key, static int send_key_to_dongle(struct brcmf_if *ifp, struct brcmf_wsec_key *key) { + struct brcmf_pub *drvr = ifp->drvr; int err; struct brcmf_wsec_key_le key_le; @@ -468,7 +469,7 @@ send_key_to_dongle(struct brcmf_if *ifp, struct brcmf_wsec_key *key) sizeof(key_le)); if (err) - brcmf_err("wsec_key error (%d)\n", err); + bphy_err(drvr, "wsec_key error (%d)\n", err); return err; } @@ -508,6 +509,7 @@ static int brcmf_get_first_free_bsscfgidx(struct brcmf_pub *drvr) static int brcmf_cfg80211_request_ap_if(struct brcmf_if *ifp) { + struct brcmf_pub *drvr = ifp->drvr; struct brcmf_mbss_ssid_le mbss_ssid_le; int bsscfgidx; int err; @@ -524,7 +526,7 @@ static int brcmf_cfg80211_request_ap_if(struct brcmf_if *ifp) err = brcmf_fil_bsscfg_data_set(ifp, "bsscfg:ssid", &mbss_ssid_le, sizeof(mbss_ssid_le)); if (err < 0) - brcmf_err("setting ssid failed %d\n", err); + bphy_err(drvr, "setting ssid failed %d\n", err); return err; } @@ -542,6 +544,7 @@ struct wireless_dev *brcmf_ap_add_vif(struct wiphy *wiphy, const char *name, { struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg)); + struct brcmf_pub *drvr = cfg->pub; struct brcmf_cfg80211_vif *vif; int err; @@ -567,7 +570,7 @@ struct wireless_dev *brcmf_ap_add_vif(struct wiphy *wiphy, const char *name, BRCMF_VIF_EVENT_TIMEOUT); brcmf_cfg80211_arm_vif_event(cfg, NULL); if (!err) { - brcmf_err("timeout occurred\n"); + bphy_err(drvr, "timeout occurred\n"); err = -EIO; goto fail; } @@ -575,7 +578,7 @@ struct wireless_dev *brcmf_ap_add_vif(struct wiphy *wiphy, const char *name, /* interface created in firmware */ ifp = vif->ifp; if (!ifp) { - brcmf_err("no if pointer provided\n"); + bphy_err(drvr, "no if pointer provided\n"); err = -ENOENT; goto fail; } @@ -583,7 +586,7 @@ struct wireless_dev *brcmf_ap_add_vif(struct wiphy *wiphy, const char *name, strncpy(ifp->ndev->name, name, sizeof(ifp->ndev->name) - 1); err = brcmf_net_attach(ifp, true); if (err) { - brcmf_err("Registering netdevice failed\n"); + bphy_err(drvr, "Registering netdevice failed\n"); free_netdev(ifp->ndev); goto fail; } @@ -614,13 +617,15 @@ static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy, enum nl80211_iftype type, struct vif_params *params) { + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct brcmf_pub *drvr = cfg->pub; struct wireless_dev *wdev; int err; brcmf_dbg(TRACE, "enter: %s type %d\n", name, type); err = brcmf_vif_add_validate(wiphy_to_cfg(wiphy), type); if (err) { - brcmf_err("iface validation failed: err=%d\n", err); + bphy_err(drvr, "iface validation failed: err=%d\n", err); return ERR_PTR(err); } switch (type) { @@ -645,8 +650,8 @@ static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy, } if (IS_ERR(wdev)) - brcmf_err("add iface %s type %d failed: err=%d\n", - name, type, (int)PTR_ERR(wdev)); + bphy_err(drvr, "add iface %s type %d failed: err=%d\n", name, + type, (int)PTR_ERR(wdev)); else brcmf_cfg80211_update_proto_addr_mode(wdev); @@ -661,12 +666,13 @@ static void brcmf_scan_config_mpc(struct brcmf_if *ifp, int mpc) void brcmf_set_mpc(struct brcmf_if *ifp, int mpc) { + struct brcmf_pub *drvr = ifp->drvr; s32 err = 0; if (check_vif_up(ifp->vif)) { err = brcmf_fil_iovar_int_set(ifp, "mpc", mpc); if (err) { - brcmf_err("fail to set mpc\n"); + bphy_err(drvr, "fail to set mpc\n"); return; } brcmf_dbg(INFO, "MPC : %d\n", mpc); @@ -677,6 +683,7 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp, bool aborted, bool fw_abort) { + struct brcmf_pub *drvr = cfg->pub; struct brcmf_scan_params_le params_le; struct cfg80211_scan_request *scan_request; u64 reqid; @@ -711,7 +718,7 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg, err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCAN, ¶ms_le, sizeof(params_le)); if (err) - brcmf_err("Scan abort failed\n"); + bphy_err(drvr, "Scan abort failed\n"); } brcmf_scan_config_mpc(ifp, 1); @@ -756,6 +763,7 @@ static int brcmf_cfg80211_del_ap_iface(struct wiphy *wiphy, struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct net_device *ndev = wdev->netdev; struct brcmf_if *ifp = netdev_priv(ndev); + struct brcmf_pub *drvr = cfg->pub; int ret; int err; @@ -763,7 +771,7 @@ static int brcmf_cfg80211_del_ap_iface(struct wiphy *wiphy, err = brcmf_fil_bsscfg_data_set(ifp, "interface_remove", NULL, 0); if (err) { - brcmf_err("interface_remove failed %d\n", err); + bphy_err(drvr, "interface_remove failed %d\n", err); goto err_unarm; } @@ -771,7 +779,7 @@ static int brcmf_cfg80211_del_ap_iface(struct wiphy *wiphy, ret = brcmf_cfg80211_wait_vif_event(cfg, BRCMF_E_IF_DEL, BRCMF_VIF_EVENT_TIMEOUT); if (!ret) { - brcmf_err("timeout occurred\n"); + bphy_err(drvr, "timeout occurred\n"); err = -EIO; goto err_unarm; } @@ -834,6 +842,7 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev, struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct brcmf_if *ifp = netdev_priv(ndev); struct brcmf_cfg80211_vif *vif = ifp->vif; + struct brcmf_pub *drvr = cfg->pub; s32 infra = 0; s32 ap = 0; s32 err = 0; @@ -873,14 +882,14 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev, } err = brcmf_vif_change_validate(wiphy_to_cfg(wiphy), vif, type); if (err) { - brcmf_err("iface validation failed: err=%d\n", err); + bphy_err(drvr, "iface validation failed: err=%d\n", err); return err; } switch (type) { case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_WDS: - brcmf_err("type (%d) : currently we do not support this type\n", - type); + bphy_err(drvr, "type (%d) : currently we do not support this type\n", + type); return -EOPNOTSUPP; case NL80211_IFTYPE_ADHOC: infra = 0; @@ -908,7 +917,7 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev, } else { err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, infra); if (err) { - brcmf_err("WLC_SET_INFRA error (%d)\n", err); + bphy_err(drvr, "WLC_SET_INFRA error (%d)\n", err); err = -EAGAIN; goto done; } @@ -999,6 +1008,7 @@ static s32 brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp, struct cfg80211_scan_request *request) { + struct brcmf_pub *drvr = cfg->pub; s32 params_size = BRCMF_SCAN_PARAMS_FIXED_SIZE + offsetof(struct brcmf_escan_params_le, params_le); struct brcmf_escan_params_le *params; @@ -1030,7 +1040,7 @@ brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp, if (err == -EBUSY) brcmf_dbg(INFO, "system busy : escan canceled\n"); else - brcmf_err("error (%d)\n", err); + bphy_err(drvr, "error (%d)\n", err); } kfree(params); @@ -1067,6 +1077,7 @@ static s32 brcmf_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request) { struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct brcmf_pub *drvr = cfg->pub; struct brcmf_cfg80211_vif *vif; s32 err = 0; @@ -1076,21 +1087,22 @@ brcmf_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request) return -EIO; if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) { - brcmf_err("Scanning already: status (%lu)\n", cfg->scan_status); + bphy_err(drvr, "Scanning already: status (%lu)\n", + cfg->scan_status); return -EAGAIN; } if (test_bit(BRCMF_SCAN_STATUS_ABORT, &cfg->scan_status)) { - brcmf_err("Scanning being aborted: status (%lu)\n", - cfg->scan_status); + bphy_err(drvr, "Scanning being aborted: status (%lu)\n", + cfg->scan_status); return -EAGAIN; } if (test_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status)) { - brcmf_err("Scanning suppressed: status (%lu)\n", - cfg->scan_status); + bphy_err(drvr, "Scanning suppressed: status (%lu)\n", + cfg->scan_status); return -EAGAIN; } if (test_bit(BRCMF_VIF_STATUS_CONNECTING, &vif->sme_state)) { - brcmf_err("Connecting: status (%lu)\n", vif->sme_state); + bphy_err(drvr, "Connecting: status (%lu)\n", vif->sme_state); return -EAGAIN; } @@ -1124,7 +1136,7 @@ brcmf_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request) return 0; scan_out: - brcmf_err("scan error (%d)\n", err); + bphy_err(drvr, "scan error (%d)\n", err); clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status); cfg->scan_request = NULL; return err; @@ -1132,36 +1144,41 @@ scan_out: static s32 brcmf_set_rts(struct net_device *ndev, u32 rts_threshold) { + struct brcmf_if *ifp = netdev_priv(ndev); + struct brcmf_pub *drvr = ifp->drvr; s32 err = 0; - err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "rtsthresh", - rts_threshold); + err = brcmf_fil_iovar_int_set(ifp, "rtsthresh", rts_threshold); if (err) - brcmf_err("Error (%d)\n", err); + bphy_err(drvr, "Error (%d)\n", err); return err; } static s32 brcmf_set_frag(struct net_device *ndev, u32 frag_threshold) { + struct brcmf_if *ifp = netdev_priv(ndev); + struct brcmf_pub *drvr = ifp->drvr; s32 err = 0; - err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "fragthresh", + err = brcmf_fil_iovar_int_set(ifp, "fragthresh", frag_threshold); if (err) - brcmf_err("Error (%d)\n", err); + bphy_err(drvr, "Error (%d)\n", err); return err; } static s32 brcmf_set_retry(struct net_device *ndev, u32 retry, bool l) { + struct brcmf_if *ifp = netdev_priv(ndev); + struct brcmf_pub *drvr = ifp->drvr; s32 err = 0; u32 cmd = (l ? BRCMF_C_SET_LRL : BRCMF_C_SET_SRL); - err = brcmf_fil_cmd_int_set(netdev_priv(ndev), cmd, retry); + err = brcmf_fil_cmd_int_set(ifp, cmd, retry); if (err) { - brcmf_err("cmd (%d) , error (%d)\n", cmd, err); + bphy_err(drvr, "cmd (%d) , error (%d)\n", cmd, err); return err; } return err; @@ -1237,6 +1254,7 @@ static u16 brcmf_map_fw_linkdown_reason(const struct brcmf_event_msg *e) static int brcmf_set_pmk(struct brcmf_if *ifp, const u8 *pmk_data, u16 pmk_len) { + struct brcmf_pub *drvr = ifp->drvr; struct brcmf_wsec_pmk_le pmk; int i, err; @@ -1250,8 +1268,8 @@ static int brcmf_set_pmk(struct brcmf_if *ifp, const u8 *pmk_data, u16 pmk_len) err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_WSEC_PMK, &pmk, sizeof(pmk)); if (err < 0) - brcmf_err("failed to change PSK in firmware (len=%u)\n", - pmk_len); + bphy_err(drvr, "failed to change PSK in firmware (len=%u)\n", + pmk_len); return err; } @@ -1259,6 +1277,7 @@ static int brcmf_set_pmk(struct brcmf_if *ifp, const u8 *pmk_data, u16 pmk_len) static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason) { struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(vif->wdev.wiphy); + struct brcmf_pub *drvr = cfg->pub; s32 err = 0; brcmf_dbg(TRACE, "Enter\n"); @@ -1268,7 +1287,7 @@ static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason) err = brcmf_fil_cmd_data_set(vif->ifp, BRCMF_C_DISASSOC, NULL, 0); if (err) { - brcmf_err("WLC_DISASSOC failed (%d)\n", err); + bphy_err(drvr, "WLC_DISASSOC failed (%d)\n", err); } if ((vif->wdev.iftype == NL80211_IFTYPE_STATION) || (vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT)) @@ -1292,6 +1311,7 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev, struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct brcmf_if *ifp = netdev_priv(ndev); struct brcmf_cfg80211_profile *profile = &ifp->vif->profile; + struct brcmf_pub *drvr = cfg->pub; struct brcmf_join_params join_params; size_t join_params_size = 0; s32 err = 0; @@ -1356,7 +1376,7 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev, err = brcmf_fil_iovar_int_set(ifp, "wsec", wsec); if (err) { - brcmf_err("wsec failed (%d)\n", err); + bphy_err(drvr, "wsec failed (%d)\n", err); goto done; } @@ -1368,7 +1388,7 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev, err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_BCNPRD, bcnprd); if (err) { - brcmf_err("WLC_SET_BCNPRD failed (%d)\n", err); + bphy_err(drvr, "WLC_SET_BCNPRD failed (%d)\n", err); goto done; } @@ -1413,7 +1433,7 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev, err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_CHANNEL, target_channel); if (err) { - brcmf_err("WLC_SET_CHANNEL failed (%d)\n", err); + bphy_err(drvr, "WLC_SET_CHANNEL failed (%d)\n", err); goto done; } } else @@ -1425,7 +1445,7 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev, err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID, &join_params, join_params_size); if (err) { - brcmf_err("WLC_SET_SSID failed (%d)\n", err); + bphy_err(drvr, "WLC_SET_SSID failed (%d)\n", err); goto done; } @@ -1461,7 +1481,9 @@ brcmf_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *ndev) static s32 brcmf_set_wpa_version(struct net_device *ndev, struct cfg80211_connect_params *sme) { + struct brcmf_if *ifp = netdev_priv(ndev); struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev); + struct brcmf_pub *drvr = ifp->drvr; struct brcmf_cfg80211_security *sec; s32 val = 0; s32 err = 0; @@ -1473,9 +1495,9 @@ static s32 brcmf_set_wpa_version(struct net_device *ndev, else val = WPA_AUTH_DISABLED; brcmf_dbg(CONN, "setting wpa_auth to 0x%0x\n", val); - err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wpa_auth", val); + err = brcmf_fil_bsscfg_int_set(ifp, "wpa_auth", val); if (err) { - brcmf_err("set wpa_auth failed (%d)\n", err); + bphy_err(drvr, "set wpa_auth failed (%d)\n", err); return err; } sec = &profile->sec; @@ -1486,7 +1508,9 @@ static s32 brcmf_set_wpa_version(struct net_device *ndev, static s32 brcmf_set_auth_type(struct net_device *ndev, struct cfg80211_connect_params *sme) { + struct brcmf_if *ifp = netdev_priv(ndev); struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev); + struct brcmf_pub *drvr = ifp->drvr; struct brcmf_cfg80211_security *sec; s32 val = 0; s32 err = 0; @@ -1506,9 +1530,9 @@ static s32 brcmf_set_auth_type(struct net_device *ndev, break; } - err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "auth", val); + err = brcmf_fil_bsscfg_int_set(ifp, "auth", val); if (err) { - brcmf_err("set auth failed (%d)\n", err); + bphy_err(drvr, "set auth failed (%d)\n", err); return err; } sec = &profile->sec; @@ -1520,7 +1544,9 @@ static s32 brcmf_set_wsec_mode(struct net_device *ndev, struct cfg80211_connect_params *sme) { + struct brcmf_if *ifp = netdev_priv(ndev); struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev); + struct brcmf_pub *drvr = ifp->drvr; struct brcmf_cfg80211_security *sec; s32 pval = 0; s32 gval = 0; @@ -1543,8 +1569,8 @@ brcmf_set_wsec_mode(struct net_device *ndev, pval = AES_ENABLED; break; default: - brcmf_err("invalid cipher pairwise (%d)\n", - sme->crypto.ciphers_pairwise[0]); + bphy_err(drvr, "invalid cipher pairwise (%d)\n", + sme->crypto.ciphers_pairwise[0]); return -EINVAL; } } @@ -1564,8 +1590,8 @@ brcmf_set_wsec_mode(struct net_device *ndev, gval = AES_ENABLED; break; default: - brcmf_err("invalid cipher group (%d)\n", - sme->crypto.cipher_group); + bphy_err(drvr, "invalid cipher group (%d)\n", + sme->crypto.cipher_group); return -EINVAL; } } @@ -1578,9 +1604,9 @@ brcmf_set_wsec_mode(struct net_device *ndev, pval = AES_ENABLED; wsec = pval | gval; - err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wsec", wsec); + err = brcmf_fil_bsscfg_int_set(ifp, "wsec", wsec); if (err) { - brcmf_err("error (%d)\n", err); + bphy_err(drvr, "error (%d)\n", err); return err; } @@ -1596,6 +1622,7 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme) { struct brcmf_if *ifp = netdev_priv(ndev); struct brcmf_cfg80211_profile *profile = &ifp->vif->profile; + struct brcmf_pub *drvr = ifp->drvr; s32 val; s32 err; const struct brcmf_tlv *rsn_ie; @@ -1613,7 +1640,7 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme) err = brcmf_fil_bsscfg_int_get(netdev_priv(ndev), "wpa_auth", &val); if (err) { - brcmf_err("could not get wpa_auth (%d)\n", err); + bphy_err(drvr, "could not get wpa_auth (%d)\n", err); return err; } if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) { @@ -1627,8 +1654,8 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme) val = WPA_AUTH_PSK; break; default: - brcmf_err("invalid cipher group (%d)\n", - sme->crypto.cipher_group); + bphy_err(drvr, "invalid cipher group (%d)\n", + sme->crypto.cipher_group); return -EINVAL; } } else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) { @@ -1658,8 +1685,8 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme) val = WPA2_AUTH_PSK | WPA2_AUTH_FT; break; default: - brcmf_err("invalid cipher group (%d)\n", - sme->crypto.cipher_group); + bphy_err(drvr, "invalid cipher group (%d)\n", + sme->crypto.cipher_group); return -EINVAL; } } @@ -1705,7 +1732,7 @@ skip_mfp_config: brcmf_dbg(CONN, "setting wpa_auth to %d\n", val); err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wpa_auth", val); if (err) { - brcmf_err("could not set wpa_auth (%d)\n", err); + bphy_err(drvr, "could not set wpa_auth (%d)\n", err); return err; } @@ -1716,6 +1743,8 @@ static s32 brcmf_set_sharedkey(struct net_device *ndev, struct cfg80211_connect_params *sme) { + struct brcmf_if *ifp = netdev_priv(ndev); + struct brcmf_pub *drvr = ifp->drvr; struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev); struct brcmf_cfg80211_security *sec; struct brcmf_wsec_key key; @@ -1742,7 +1771,7 @@ brcmf_set_sharedkey(struct net_device *ndev, key.len = (u32) sme->key_len; key.index = (u32) sme->key_idx; if (key.len > sizeof(key.data)) { - brcmf_err("Too long key length (%u)\n", key.len); + bphy_err(drvr, "Too long key length (%u)\n", key.len); return -EINVAL; } memcpy(key.data, sme->key, key.len); @@ -1755,24 +1784,24 @@ brcmf_set_sharedkey(struct net_device *ndev, key.algo = CRYPTO_ALGO_WEP128; break; default: - brcmf_err("Invalid algorithm (%d)\n", - sme->crypto.ciphers_pairwise[0]); + bphy_err(drvr, "Invalid algorithm (%d)\n", + sme->crypto.ciphers_pairwise[0]); return -EINVAL; } /* Set the new key/index */ brcmf_dbg(CONN, "key length (%d) key index (%d) algo (%d)\n", key.len, key.index, key.algo); brcmf_dbg(CONN, "key \"%s\"\n", key.data); - err = send_key_to_dongle(netdev_priv(ndev), &key); + err = send_key_to_dongle(ifp, &key); if (err) return err; if (sec->auth_type == NL80211_AUTHTYPE_SHARED_KEY) { brcmf_dbg(CONN, "set auth_type to shared key\n"); val = WL_AUTH_SHARED_KEY; /* shared key */ - err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "auth", val); + err = brcmf_fil_bsscfg_int_set(ifp, "auth", val); if (err) - brcmf_err("set auth failed (%d)\n", err); + bphy_err(drvr, "set auth failed (%d)\n", err); } return err; } @@ -1792,6 +1821,7 @@ enum nl80211_auth_type brcmf_war_auth_type(struct brcmf_if *ifp, static void brcmf_set_join_pref(struct brcmf_if *ifp, struct cfg80211_bss_selection *bss_select) { + struct brcmf_pub *drvr = ifp->drvr; struct brcmf_join_pref_params join_pref_params[2]; enum nl80211_band band; int err, i = 0; @@ -1830,7 +1860,7 @@ static void brcmf_set_join_pref(struct brcmf_if *ifp, err = brcmf_fil_iovar_data_set(ifp, "join_pref", join_pref_params, sizeof(join_pref_params)); if (err) - brcmf_err("Set join_pref error (%d)\n", err); + bphy_err(drvr, "Set join_pref error (%d)\n", err); } static s32 @@ -1841,6 +1871,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, struct brcmf_if *ifp = netdev_priv(ndev); struct brcmf_cfg80211_profile *profile = &ifp->vif->profile; struct ieee80211_channel *chan = sme->channel; + struct brcmf_pub *drvr = ifp->drvr; struct brcmf_join_params join_params; size_t join_params_size; const struct brcmf_tlv *rsn_ie; @@ -1857,7 +1888,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, return -EIO; if (!sme->ssid) { - brcmf_err("Invalid ssid\n"); + bphy_err(drvr, "Invalid ssid\n"); return -EOPNOTSUPP; } @@ -1886,7 +1917,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, err = brcmf_vif_set_mgmt_ie(ifp->vif, BRCMF_VNDR_IE_ASSOCREQ_FLAG, sme->ie, sme->ie_len); if (err) - brcmf_err("Set Assoc REQ IE Failed\n"); + bphy_err(drvr, "Set Assoc REQ IE Failed\n"); else brcmf_dbg(TRACE, "Applied Vndr IEs for Assoc request\n"); @@ -1907,32 +1938,32 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, err = brcmf_set_wpa_version(ndev, sme); if (err) { - brcmf_err("wl_set_wpa_version failed (%d)\n", err); + bphy_err(drvr, "wl_set_wpa_version failed (%d)\n", err); goto done; } sme->auth_type = brcmf_war_auth_type(ifp, sme->auth_type); err = brcmf_set_auth_type(ndev, sme); if (err) { - brcmf_err("wl_set_auth_type failed (%d)\n", err); + bphy_err(drvr, "wl_set_auth_type failed (%d)\n", err); goto done; } err = brcmf_set_wsec_mode(ndev, sme); if (err) { - brcmf_err("wl_set_set_cipher failed (%d)\n", err); + bphy_err(drvr, "wl_set_set_cipher failed (%d)\n", err); goto done; } err = brcmf_set_key_mgmt(ndev, sme); if (err) { - brcmf_err("wl_set_key_mgmt failed (%d)\n", err); + bphy_err(drvr, "wl_set_key_mgmt failed (%d)\n", err); goto done; } err = brcmf_set_sharedkey(ndev, sme); if (err) { - brcmf_err("brcmf_set_sharedkey failed (%d)\n", err); + bphy_err(drvr, "brcmf_set_sharedkey failed (%d)\n", err); goto done; } @@ -1949,7 +1980,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, /* enable firmware supplicant for this interface */ err = brcmf_fil_iovar_int_set(ifp, "sup_wpa", 1); if (err < 0) { - brcmf_err("failed to enable fw supplicant\n"); + bphy_err(drvr, "failed to enable fw supplicant\n"); goto done; } } @@ -2044,7 +2075,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID, &join_params, join_params_size); if (err) - brcmf_err("BRCMF_C_SET_SSID failed (%d)\n", err); + bphy_err(drvr, "BRCMF_C_SET_SSID failed (%d)\n", err); done: if (err) @@ -2057,8 +2088,10 @@ static s32 brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev, u16 reason_code) { + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct brcmf_if *ifp = netdev_priv(ndev); struct brcmf_cfg80211_profile *profile = &ifp->vif->profile; + struct brcmf_pub *drvr = cfg->pub; struct brcmf_scb_val_le scbval; s32 err = 0; @@ -2075,7 +2108,7 @@ brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev, err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_DISASSOC, &scbval, sizeof(scbval)); if (err) - brcmf_err("error (%d)\n", err); + bphy_err(drvr, "error (%d)\n", err); brcmf_dbg(TRACE, "Exit\n"); return err; @@ -2088,6 +2121,7 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct net_device *ndev = cfg_to_ndev(cfg); struct brcmf_if *ifp = netdev_priv(ndev); + struct brcmf_pub *drvr = cfg->pub; s32 err; s32 disable; u32 qdbm = 127; @@ -2102,7 +2136,7 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, case NL80211_TX_POWER_LIMITED: case NL80211_TX_POWER_FIXED: if (mbm < 0) { - brcmf_err("TX_POWER_FIXED - dbm is negative\n"); + bphy_err(drvr, "TX_POWER_FIXED - dbm is negative\n"); err = -EINVAL; goto done; } @@ -2112,7 +2146,7 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, qdbm |= WL_TXPWR_OVERRIDE; break; default: - brcmf_err("Unsupported type %d\n", type); + bphy_err(drvr, "Unsupported type %d\n", type); err = -EINVAL; goto done; } @@ -2120,11 +2154,11 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, disable = WL_RADIO_SW_DISABLE << 16; err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_RADIO, disable); if (err) - brcmf_err("WLC_SET_RADIO error (%d)\n", err); + bphy_err(drvr, "WLC_SET_RADIO error (%d)\n", err); err = brcmf_fil_iovar_int_set(ifp, "qtxpower", qdbm); if (err) - brcmf_err("qtxpower error (%d)\n", err); + bphy_err(drvr, "qtxpower error (%d)\n", err); done: brcmf_dbg(TRACE, "Exit %d (qdbm)\n", qdbm & ~WL_TXPWR_OVERRIDE); @@ -2135,7 +2169,9 @@ static s32 brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, s32 *dbm) { + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct brcmf_cfg80211_vif *vif = wdev_to_vif(wdev); + struct brcmf_pub *drvr = cfg->pub; s32 qdbm = 0; s32 err; @@ -2145,7 +2181,7 @@ brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, err = brcmf_fil_iovar_int_get(vif->ifp, "qtxpower", &qdbm); if (err) { - brcmf_err("error (%d)\n", err); + bphy_err(drvr, "error (%d)\n", err); goto done; } *dbm = (qdbm & ~WL_TXPWR_OVERRIDE) / 4; @@ -2160,6 +2196,7 @@ brcmf_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_idx, bool unicast, bool multicast) { struct brcmf_if *ifp = netdev_priv(ndev); + struct brcmf_pub *drvr = ifp->drvr; u32 index; u32 wsec; s32 err = 0; @@ -2171,7 +2208,7 @@ brcmf_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *ndev, err = brcmf_fil_bsscfg_int_get(ifp, "wsec", &wsec); if (err) { - brcmf_err("WLC_GET_WSEC error (%d)\n", err); + bphy_err(drvr, "WLC_GET_WSEC error (%d)\n", err); goto done; } @@ -2181,7 +2218,7 @@ brcmf_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *ndev, err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_KEY_PRIMARY, index); if (err) - brcmf_err("error (%d)\n", err); + bphy_err(drvr, "error (%d)\n", err); } done: brcmf_dbg(TRACE, "Exit\n"); @@ -2230,7 +2267,9 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_idx, bool pairwise, const u8 *mac_addr, struct key_params *params) { + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct brcmf_if *ifp = netdev_priv(ndev); + struct brcmf_pub *drvr = cfg->pub; struct brcmf_wsec_key *key; s32 val; s32 wsec; @@ -2245,7 +2284,7 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, if (key_idx >= BRCMF_MAX_DEFAULT_KEYS) { /* we ignore this key index in this case */ - brcmf_err("invalid key index (%d)\n", key_idx); + bphy_err(drvr, "invalid key index (%d)\n", key_idx); return -EINVAL; } @@ -2254,7 +2293,7 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, mac_addr); if (params->key_len > sizeof(key->data)) { - brcmf_err("Too long key length (%u)\n", params->key_len); + bphy_err(drvr, "Too long key length (%u)\n", params->key_len); return -EINVAL; } @@ -2308,7 +2347,7 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_CCMP\n"); break; default: - brcmf_err("Invalid cipher (0x%x)\n", params->cipher); + bphy_err(drvr, "Invalid cipher (0x%x)\n", params->cipher); err = -EINVAL; goto done; } @@ -2319,13 +2358,13 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, err = brcmf_fil_bsscfg_int_get(ifp, "wsec", &wsec); if (err) { - brcmf_err("get wsec error (%d)\n", err); + bphy_err(drvr, "get wsec error (%d)\n", err); goto done; } wsec |= val; err = brcmf_fil_bsscfg_int_set(ifp, "wsec", wsec); if (err) { - brcmf_err("set wsec error (%d)\n", err); + bphy_err(drvr, "set wsec error (%d)\n", err); goto done; } @@ -2340,9 +2379,11 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_idx, void (*callback)(void *cookie, struct key_params *params)) { + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct key_params params; struct brcmf_if *ifp = netdev_priv(ndev); struct brcmf_cfg80211_profile *profile = &ifp->vif->profile; + struct brcmf_pub *drvr = cfg->pub; struct brcmf_cfg80211_security *sec; s32 wsec; s32 err = 0; @@ -2356,7 +2397,7 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_idx, err = brcmf_fil_bsscfg_int_get(ifp, "wsec", &wsec); if (err) { - brcmf_err("WLC_GET_WSEC error (%d)\n", err); + bphy_err(drvr, "WLC_GET_WSEC error (%d)\n", err); /* Ignore this error, may happen during DISASSOC */ err = -EAGAIN; goto done; @@ -2377,7 +2418,7 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_idx, params.cipher = WLAN_CIPHER_SUITE_AES_CMAC; brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_AES_CMAC\n"); } else { - brcmf_err("Invalid algo (0x%x)\n", wsec); + bphy_err(drvr, "Invalid algo (0x%x)\n", wsec); err = -EINVAL; goto done; } @@ -2407,6 +2448,7 @@ brcmf_cfg80211_config_default_mgmt_key(struct wiphy *wiphy, static void brcmf_cfg80211_reconfigure_wep(struct brcmf_if *ifp) { + struct brcmf_pub *drvr = ifp->drvr; s32 err; u8 key_idx; struct brcmf_wsec_key *key; @@ -2423,18 +2465,18 @@ brcmf_cfg80211_reconfigure_wep(struct brcmf_if *ifp) err = send_key_to_dongle(ifp, key); if (err) { - brcmf_err("Setting WEP key failed (%d)\n", err); + bphy_err(drvr, "Setting WEP key failed (%d)\n", err); return; } err = brcmf_fil_bsscfg_int_get(ifp, "wsec", &wsec); if (err) { - brcmf_err("get wsec error (%d)\n", err); + bphy_err(drvr, "get wsec error (%d)\n", err); return; } wsec |= WEP_ENABLED; err = brcmf_fil_bsscfg_int_set(ifp, "wsec", wsec); if (err) - brcmf_err("set wsec error (%d)\n", err); + bphy_err(drvr, "set wsec error (%d)\n", err); } static void brcmf_convert_sta_flags(u32 fw_sta_flags, struct station_info *si) @@ -2460,6 +2502,7 @@ static void brcmf_convert_sta_flags(u32 fw_sta_flags, struct station_info *si) static void brcmf_fill_bss_param(struct brcmf_if *ifp, struct station_info *si) { + struct brcmf_pub *drvr = ifp->drvr; struct { __le32 len; struct brcmf_bss_info_le bss_le; @@ -2475,7 +2518,7 @@ static void brcmf_fill_bss_param(struct brcmf_if *ifp, struct station_info *si) err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSS_INFO, buf, WL_BSS_INFO_MAX); if (err) { - brcmf_err("Failed to get bss info (%d)\n", err); + bphy_err(drvr, "Failed to get bss info (%d)\n", err); goto out_kfree; } si->filled |= BIT_ULL(NL80211_STA_INFO_BSS_PARAM); @@ -2497,6 +2540,7 @@ static s32 brcmf_cfg80211_get_station_ibss(struct brcmf_if *ifp, struct station_info *sinfo) { + struct brcmf_pub *drvr = ifp->drvr; struct brcmf_scb_val_le scbval; struct brcmf_pktcnt_le pktcnt; s32 err; @@ -2506,7 +2550,7 @@ brcmf_cfg80211_get_station_ibss(struct brcmf_if *ifp, /* Get the current tx rate */ err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_RATE, &rate); if (err < 0) { - brcmf_err("BRCMF_C_GET_RATE error (%d)\n", err); + bphy_err(drvr, "BRCMF_C_GET_RATE error (%d)\n", err); return err; } sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); @@ -2516,7 +2560,7 @@ brcmf_cfg80211_get_station_ibss(struct brcmf_if *ifp, err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_RSSI, &scbval, sizeof(scbval)); if (err) { - brcmf_err("BRCMF_C_GET_RSSI error (%d)\n", err); + bphy_err(drvr, "BRCMF_C_GET_RSSI error (%d)\n", err); return err; } rssi = le32_to_cpu(scbval.val); @@ -2526,7 +2570,7 @@ brcmf_cfg80211_get_station_ibss(struct brcmf_if *ifp, err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_GET_PKTCNTS, &pktcnt, sizeof(pktcnt)); if (err) { - brcmf_err("BRCMF_C_GET_GET_PKTCNTS error (%d)\n", err); + bphy_err(drvr, "BRCMF_C_GET_GET_PKTCNTS error (%d)\n", err); return err; } sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS) | @@ -2545,7 +2589,9 @@ static s32 brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, const u8 *mac, struct station_info *sinfo) { + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct brcmf_if *ifp = netdev_priv(ndev); + struct brcmf_pub *drvr = cfg->pub; struct brcmf_scb_val_le scb_val; s32 err = 0; struct brcmf_sta_info_le sta_info_le; @@ -2574,7 +2620,7 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, &sta_info_le, sizeof(sta_info_le)); if (err < 0) { - brcmf_err("GET STA INFO failed, %d\n", err); + bphy_err(drvr, "GET STA INFO failed, %d\n", err); goto done; } } @@ -2643,7 +2689,8 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_RSSI, &scb_val, sizeof(scb_val)); if (err) { - brcmf_err("Could not get rssi (%d)\n", err); + bphy_err(drvr, "Could not get rssi (%d)\n", + err); goto done; } else { rssi = le32_to_cpu(scb_val.val); @@ -2664,6 +2711,7 @@ brcmf_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *ndev, { struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct brcmf_if *ifp = netdev_priv(ndev); + struct brcmf_pub *drvr = cfg->pub; s32 err; brcmf_dbg(TRACE, "Enter, idx %d\n", idx); @@ -2674,8 +2722,8 @@ brcmf_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *ndev, &cfg->assoclist, sizeof(cfg->assoclist)); if (err) { - brcmf_err("BRCMF_C_GET_ASSOCLIST unsupported, err=%d\n", - err); + bphy_err(drvr, "BRCMF_C_GET_ASSOCLIST unsupported, err=%d\n", + err); cfg->assoclist.count = 0; return -EOPNOTSUPP; } @@ -2695,6 +2743,7 @@ brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev, s32 err = 0; struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct brcmf_if *ifp = netdev_priv(ndev); + struct brcmf_pub *drvr = cfg->pub; brcmf_dbg(TRACE, "Enter\n"); @@ -2723,9 +2772,9 @@ brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev, err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PM, pm); if (err) { if (err == -ENODEV) - brcmf_err("net_device is not ready yet\n"); + bphy_err(drvr, "net_device is not ready yet\n"); else - brcmf_err("error (%d)\n", err); + bphy_err(drvr, "error (%d)\n", err); } done: brcmf_dbg(TRACE, "Exit\n"); @@ -2736,6 +2785,7 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg, struct brcmf_bss_info_le *bi) { struct wiphy *wiphy = cfg_to_wiphy(cfg); + struct brcmf_pub *drvr = cfg->pub; struct cfg80211_bss *bss; enum nl80211_band band; struct brcmu_chan ch; @@ -2748,7 +2798,7 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg, struct cfg80211_inform_bss bss_data = {}; if (le32_to_cpu(bi->length) > WL_BSS_INFO_MAX) { - brcmf_err("Bss info is larger than buffer. Discarding\n"); + bphy_err(drvr, "Bss info is larger than buffer. Discarding\n"); return 0; } @@ -2807,6 +2857,7 @@ next_bss_le(struct brcmf_scan_results *list, struct brcmf_bss_info_le *bss) static s32 brcmf_inform_bss(struct brcmf_cfg80211_info *cfg) { + struct brcmf_pub *drvr = cfg->pub; struct brcmf_scan_results *bss_list; struct brcmf_bss_info_le *bi = NULL; /* must be initialized */ s32 err = 0; @@ -2815,8 +2866,8 @@ static s32 brcmf_inform_bss(struct brcmf_cfg80211_info *cfg) bss_list = (struct brcmf_scan_results *)cfg->escan_info.escan_buf; if (bss_list->count != 0 && bss_list->version != BRCMF_BSS_INFO_VERSION) { - brcmf_err("Version %d != WL_BSS_INFO_VERSION\n", - bss_list->version); + bphy_err(drvr, "Version %d != WL_BSS_INFO_VERSION\n", + bss_list->version); return -EOPNOTSUPP; } brcmf_dbg(SCAN, "scanned AP count (%d)\n", bss_list->count); @@ -2833,6 +2884,7 @@ static s32 brcmf_inform_ibss(struct brcmf_cfg80211_info *cfg, struct net_device *ndev, const u8 *bssid) { struct wiphy *wiphy = cfg_to_wiphy(cfg); + struct brcmf_pub *drvr = cfg->pub; struct ieee80211_channel *notify_channel; struct brcmf_bss_info_le *bi = NULL; struct ieee80211_supported_band *band; @@ -2860,7 +2912,7 @@ static s32 brcmf_inform_ibss(struct brcmf_cfg80211_info *cfg, err = brcmf_fil_cmd_data_get(netdev_priv(ndev), BRCMF_C_GET_BSS_INFO, buf, WL_BSS_INFO_MAX); if (err) { - brcmf_err("WLC_GET_BSS_INFO failed: %d\n", err); + bphy_err(drvr, "WLC_GET_BSS_INFO failed: %d\n", err); goto CleanUp; } @@ -2914,6 +2966,7 @@ CleanUp: static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp) { + struct brcmf_pub *drvr = cfg->pub; struct brcmf_bss_info_le *bi; const struct brcmf_tlv *tim; u16 beacon_interval; @@ -2930,7 +2983,7 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg, err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSS_INFO, cfg->extra_buf, WL_EXTRA_BUF_MAX); if (err) { - brcmf_err("Could not get bss info %d\n", err); + bphy_err(drvr, "Could not get bss info %d\n", err); goto update_bss_info_out; } @@ -2955,7 +3008,7 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg, u32 var; err = brcmf_fil_iovar_int_get(ifp, "dtim_assoc", &var); if (err) { - brcmf_err("wl dtim_assoc failed (%d)\n", err); + bphy_err(drvr, "wl dtim_assoc failed (%d)\n", err); goto update_bss_info_out; } dtim_period = (u8)var; @@ -2993,9 +3046,10 @@ static void brcmf_escan_timeout(struct timer_list *t) { struct brcmf_cfg80211_info *cfg = from_timer(cfg, t, escan_timeout); + struct brcmf_pub *drvr = cfg->pub; if (cfg->int_escan_map || cfg->scan_request) { - brcmf_err("timer expired\n"); + bphy_err(drvr, "timer expired\n"); schedule_work(&cfg->escan_timeout_work); } } @@ -3043,7 +3097,8 @@ static s32 brcmf_cfg80211_escan_handler(struct brcmf_if *ifp, const struct brcmf_event_msg *e, void *data) { - struct brcmf_cfg80211_info *cfg = ifp->drvr->config; + struct brcmf_pub *drvr = ifp->drvr; + struct brcmf_cfg80211_info *cfg = drvr->config; s32 status; struct brcmf_escan_result_le *escan_result_le; u32 escan_buflen; @@ -3060,32 +3115,33 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp, goto exit; if (!test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) { - brcmf_err("scan not ready, bsscfgidx=%d\n", ifp->bsscfgidx); + bphy_err(drvr, "scan not ready, bsscfgidx=%d\n", + ifp->bsscfgidx); return -EPERM; } if (status == BRCMF_E_STATUS_PARTIAL) { brcmf_dbg(SCAN, "ESCAN Partial result\n"); if (e->datalen < sizeof(*escan_result_le)) { - brcmf_err("invalid event data length\n"); + bphy_err(drvr, "invalid event data length\n"); goto exit; } escan_result_le = (struct brcmf_escan_result_le *) data; if (!escan_result_le) { - brcmf_err("Invalid escan result (NULL pointer)\n"); + bphy_err(drvr, "Invalid escan result (NULL pointer)\n"); goto exit; } escan_buflen = le32_to_cpu(escan_result_le->buflen); if (escan_buflen > BRCMF_ESCAN_BUF_SIZE || escan_buflen > e->datalen || escan_buflen < sizeof(*escan_result_le)) { - brcmf_err("Invalid escan buffer length: %d\n", - escan_buflen); + bphy_err(drvr, "Invalid escan buffer length: %d\n", + escan_buflen); goto exit; } if (le16_to_cpu(escan_result_le->bss_count) != 1) { - brcmf_err("Invalid bss_count %d: ignoring\n", - escan_result_le->bss_count); + bphy_err(drvr, "Invalid bss_count %d: ignoring\n", + escan_result_le->bss_count); goto exit; } bss_info_le = &escan_result_le->bss_info_le; @@ -3100,8 +3156,8 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp, bi_length = le32_to_cpu(bss_info_le->length); if (bi_length != escan_buflen - WL_ESCAN_RESULTS_FIXED_SIZE) { - brcmf_err("Ignoring invalid bss_info length: %d\n", - bi_length); + bphy_err(drvr, "Ignoring invalid bss_info length: %d\n", + bi_length); goto exit; } @@ -3109,7 +3165,7 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp, BIT(NL80211_IFTYPE_ADHOC))) { if (le16_to_cpu(bss_info_le->capability) & WLAN_CAPABILITY_IBSS) { - brcmf_err("Ignoring IBSS result\n"); + bphy_err(drvr, "Ignoring IBSS result\n"); goto exit; } } @@ -3117,7 +3173,7 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp, list = (struct brcmf_scan_results *) cfg->escan_info.escan_buf; if (bi_length > BRCMF_ESCAN_BUF_SIZE - list->buflen) { - brcmf_err("Buffer is too small: ignoring\n"); + bphy_err(drvr, "Buffer is too small: ignoring\n"); goto exit; } @@ -3276,7 +3332,8 @@ static s32 brcmf_notify_sched_scan_results(struct brcmf_if *ifp, const struct brcmf_event_msg *e, void *data) { - struct brcmf_cfg80211_info *cfg = ifp->drvr->config; + struct brcmf_pub *drvr = ifp->drvr; + struct brcmf_cfg80211_info *cfg = drvr->config; struct brcmf_pno_net_info_le *netinfo, *netinfo_start; struct cfg80211_scan_request *request = NULL; struct wiphy *wiphy = cfg_to_wiphy(cfg); @@ -3309,14 +3366,14 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp, WARN_ON(status != BRCMF_PNO_SCAN_COMPLETE); brcmf_dbg(SCAN, "PFN NET FOUND event. count: %d\n", result_count); if (!result_count) { - brcmf_err("FALSE PNO Event. (pfn_count == 0)\n"); + bphy_err(drvr, "FALSE PNO Event. (pfn_count == 0)\n"); goto out_err; } netinfo_start = brcmf_get_netinfo_array(pfn_result); datalen = e->datalen - ((void *)netinfo_start - (void *)pfn_result); if (datalen < result_count * sizeof(*netinfo)) { - brcmf_err("insufficient event data\n"); + bphy_err(drvr, "insufficient event data\n"); goto out_err; } @@ -3363,15 +3420,16 @@ brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_sched_scan_request *req) { - struct brcmf_if *ifp = netdev_priv(ndev); struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct brcmf_if *ifp = netdev_priv(ndev); + struct brcmf_pub *drvr = cfg->pub; brcmf_dbg(SCAN, "Enter: n_match_sets=%d n_ssids=%d\n", req->n_match_sets, req->n_ssids); if (test_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status)) { - brcmf_err("Scanning suppressed: status=%lu\n", - cfg->scan_status); + bphy_err(drvr, "Scanning suppressed: status=%lu\n", + cfg->scan_status); return -EAGAIN; } @@ -3449,7 +3507,8 @@ static s32 brcmf_wowl_nd_results(struct brcmf_if *ifp, const struct brcmf_event_msg *e, void *data) { - struct brcmf_cfg80211_info *cfg = ifp->drvr->config; + struct brcmf_pub *drvr = ifp->drvr; + struct brcmf_cfg80211_info *cfg = drvr->config; struct brcmf_pno_scanresults_le *pfn_result; struct brcmf_pno_net_info_le *netinfo; @@ -3468,12 +3527,14 @@ brcmf_wowl_nd_results(struct brcmf_if *ifp, const struct brcmf_event_msg *e, } if (le32_to_cpu(pfn_result->count) < 1) { - brcmf_err("Invalid result count, expected 1 (%d)\n", - le32_to_cpu(pfn_result->count)); + bphy_err(drvr, "Invalid result count, expected 1 (%d)\n", + le32_to_cpu(pfn_result->count)); return -EINVAL; } netinfo = brcmf_get_netinfo_array(pfn_result); + if (netinfo->SSID_len > IEEE80211_MAX_SSID_LEN) + netinfo->SSID_len = IEEE80211_MAX_SSID_LEN; memcpy(cfg->wowl.nd->ssid.ssid, netinfo->SSID, netinfo->SSID_len); cfg->wowl.nd->ssid.ssid_len = netinfo->SSID_len; cfg->wowl.nd->n_channels = 1; @@ -3496,6 +3557,7 @@ brcmf_wowl_nd_results(struct brcmf_if *ifp, const struct brcmf_event_msg *e, static void brcmf_report_wowl_wakeind(struct wiphy *wiphy, struct brcmf_if *ifp) { struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct brcmf_pub *drvr = cfg->pub; struct brcmf_wowl_wakeind_le wake_ind_le; struct cfg80211_wowlan_wakeup wakeup_data; struct cfg80211_wowlan_wakeup *wakeup; @@ -3506,7 +3568,7 @@ static void brcmf_report_wowl_wakeind(struct wiphy *wiphy, struct brcmf_if *ifp) err = brcmf_fil_iovar_data_get(ifp, "wowl_wakeind", &wake_ind_le, sizeof(wake_ind_le)); if (err) { - brcmf_err("Get wowl_wakeind failed, err = %d\n", err); + bphy_err(drvr, "Get wowl_wakeind failed, err = %d\n", err); return; } @@ -3547,7 +3609,7 @@ static void brcmf_report_wowl_wakeind(struct wiphy *wiphy, struct brcmf_if *ifp) cfg->wowl.nd_data_completed, BRCMF_ND_INFO_TIMEOUT); if (!timeout) - brcmf_err("No result for wowl net detect\n"); + bphy_err(drvr, "No result for wowl net detect\n"); else wakeup_data.net_detect = cfg->wowl.nd_info; } @@ -3736,6 +3798,7 @@ brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *ndev, struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct brcmf_if *ifp = netdev_priv(ndev); struct brcmf_pmksa *pmk = &cfg->pmk_list.pmk[0]; + struct brcmf_pub *drvr = cfg->pub; s32 err; u32 npmk, i; @@ -3755,7 +3818,7 @@ brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *ndev, cfg->pmk_list.npmk = cpu_to_le32(npmk); } } else { - brcmf_err("Too many PMKSA entries cached %d\n", npmk); + bphy_err(drvr, "Too many PMKSA entries cached %d\n", npmk); return -EINVAL; } @@ -3778,6 +3841,7 @@ brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev, struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct brcmf_if *ifp = netdev_priv(ndev); struct brcmf_pmksa *pmk = &cfg->pmk_list.pmk[0]; + struct brcmf_pub *drvr = cfg->pub; s32 err; u32 npmk, i; @@ -3801,7 +3865,7 @@ brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev, memset(&pmk[i], 0, sizeof(*pmk)); cfg->pmk_list.npmk = cpu_to_le32(npmk - 1); } else { - brcmf_err("Cache entry not found\n"); + bphy_err(drvr, "Cache entry not found\n"); return -EINVAL; } @@ -3833,19 +3897,20 @@ brcmf_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *ndev) static s32 brcmf_configure_opensecurity(struct brcmf_if *ifp) { + struct brcmf_pub *drvr = ifp->drvr; s32 err; s32 wpa_val; /* set auth */ err = brcmf_fil_bsscfg_int_set(ifp, "auth", 0); if (err < 0) { - brcmf_err("auth error %d\n", err); + bphy_err(drvr, "auth error %d\n", err); return err; } /* set wsec */ err = brcmf_fil_bsscfg_int_set(ifp, "wsec", 0); if (err < 0) { - brcmf_err("wsec error %d\n", err); + bphy_err(drvr, "wsec error %d\n", err); return err; } /* set upper-layer auth */ @@ -3855,7 +3920,7 @@ static s32 brcmf_configure_opensecurity(struct brcmf_if *ifp) wpa_val = WPA_AUTH_DISABLED; err = brcmf_fil_bsscfg_int_set(ifp, "wpa_auth", wpa_val); if (err < 0) { - brcmf_err("wpa_auth error %d\n", err); + bphy_err(drvr, "wpa_auth error %d\n", err); return err; } @@ -3875,6 +3940,7 @@ brcmf_configure_wpaie(struct brcmf_if *ifp, const struct brcmf_vs_tlv *wpa_ie, bool is_rsn_ie) { + struct brcmf_pub *drvr = ifp->drvr; u32 auth = 0; /* d11 open authentication */ u16 count; s32 err = 0; @@ -3905,13 +3971,13 @@ brcmf_configure_wpaie(struct brcmf_if *ifp, /* check for multicast cipher suite */ if (offset + WPA_IE_MIN_OUI_LEN > len) { err = -EINVAL; - brcmf_err("no multicast cipher suite\n"); + bphy_err(drvr, "no multicast cipher suite\n"); goto exit; } if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) { err = -EINVAL; - brcmf_err("ivalid OUI\n"); + bphy_err(drvr, "ivalid OUI\n"); goto exit; } offset += TLV_OUI_LEN; @@ -3933,7 +3999,7 @@ brcmf_configure_wpaie(struct brcmf_if *ifp, break; default: err = -EINVAL; - brcmf_err("Invalid multi cast cipher info\n"); + bphy_err(drvr, "Invalid multi cast cipher info\n"); goto exit; } @@ -3944,13 +4010,13 @@ brcmf_configure_wpaie(struct brcmf_if *ifp, /* Check for unicast suite(s) */ if (offset + (WPA_IE_MIN_OUI_LEN * count) > len) { err = -EINVAL; - brcmf_err("no unicast cipher suite\n"); + bphy_err(drvr, "no unicast cipher suite\n"); goto exit; } for (i = 0; i < count; i++) { if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) { err = -EINVAL; - brcmf_err("ivalid OUI\n"); + bphy_err(drvr, "ivalid OUI\n"); goto exit; } offset += TLV_OUI_LEN; @@ -3968,7 +4034,7 @@ brcmf_configure_wpaie(struct brcmf_if *ifp, pval |= AES_ENABLED; break; default: - brcmf_err("Invalid unicast security info\n"); + bphy_err(drvr, "Invalid unicast security info\n"); } offset++; } @@ -3978,13 +4044,13 @@ brcmf_configure_wpaie(struct brcmf_if *ifp, /* Check for auth key management suite(s) */ if (offset + (WPA_IE_MIN_OUI_LEN * count) > len) { err = -EINVAL; - brcmf_err("no auth key mgmt suite\n"); + bphy_err(drvr, "no auth key mgmt suite\n"); goto exit; } for (i = 0; i < count; i++) { if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) { err = -EINVAL; - brcmf_err("ivalid OUI\n"); + bphy_err(drvr, "ivalid OUI\n"); goto exit; } offset += TLV_OUI_LEN; @@ -4012,7 +4078,7 @@ brcmf_configure_wpaie(struct brcmf_if *ifp, wpa_auth |= WPA2_AUTH_1X_SHA256; break; default: - brcmf_err("Invalid key mgmt info\n"); + bphy_err(drvr, "Invalid key mgmt info\n"); } offset++; } @@ -4054,7 +4120,7 @@ brcmf_configure_wpaie(struct brcmf_if *ifp, err = brcmf_fil_bsscfg_int_set(ifp, "wme_bss_disable", wme_bss_disable); if (err < 0) { - brcmf_err("wme_bss_disable error %d\n", err); + bphy_err(drvr, "wme_bss_disable error %d\n", err); goto exit; } @@ -4068,7 +4134,7 @@ brcmf_configure_wpaie(struct brcmf_if *ifp, &data[offset], WPA_IE_MIN_OUI_LEN); if (err < 0) { - brcmf_err("bip error %d\n", err); + bphy_err(drvr, "bip error %d\n", err); goto exit; } } @@ -4079,13 +4145,13 @@ brcmf_configure_wpaie(struct brcmf_if *ifp, /* set auth */ err = brcmf_fil_bsscfg_int_set(ifp, "auth", auth); if (err < 0) { - brcmf_err("auth error %d\n", err); + bphy_err(drvr, "auth error %d\n", err); goto exit; } /* set wsec */ err = brcmf_fil_bsscfg_int_set(ifp, "wsec", wsec); if (err < 0) { - brcmf_err("wsec error %d\n", err); + bphy_err(drvr, "wsec error %d\n", err); goto exit; } /* Configure MFP, this needs to go after wsec otherwise the wsec command @@ -4094,14 +4160,14 @@ brcmf_configure_wpaie(struct brcmf_if *ifp, if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MFP)) { err = brcmf_fil_bsscfg_int_set(ifp, "mfp", mfp); if (err < 0) { - brcmf_err("mfp error %d\n", err); + bphy_err(drvr, "mfp error %d\n", err); goto exit; } } /* set upper-layer auth */ err = brcmf_fil_bsscfg_int_set(ifp, "wpa_auth", wpa_auth); if (err < 0) { - brcmf_err("wpa_auth error %d\n", err); + bphy_err(drvr, "wpa_auth error %d\n", err); goto exit; } @@ -4187,6 +4253,7 @@ brcmf_vndr_ie(u8 *iebuf, s32 pktflag, u8 *ie_ptr, u32 ie_len, s8 *add_del_cmd) s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag, const u8 *vndr_ie_buf, u32 vndr_ie_len) { + struct brcmf_pub *drvr; struct brcmf_if *ifp; struct vif_saved_ie *saved_ie; s32 err = 0; @@ -4208,6 +4275,7 @@ s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag, if (!vif) return -ENODEV; ifp = vif->ifp; + drvr = ifp->drvr; saved_ie = &vif->saved_ie; brcmf_dbg(TRACE, "bsscfgidx %d, pktflag : 0x%02X\n", ifp->bsscfgidx, @@ -4239,13 +4307,13 @@ s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag, break; default: err = -EPERM; - brcmf_err("not suitable type\n"); + bphy_err(drvr, "not suitable type\n"); goto exit; } if (vndr_ie_len > mgmt_ie_buf_len) { err = -ENOMEM; - brcmf_err("extra IE size too big\n"); + bphy_err(drvr, "extra IE size too big\n"); goto exit; } @@ -4306,8 +4374,8 @@ s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag, /* verify remained buf size before copy data */ if (remained_buf_len < (vndrie_info->vndrie.len + VNDR_IE_VSIE_OFFSET)) { - brcmf_err("no space in mgmt_ie_buf: len left %d", - remained_buf_len); + bphy_err(drvr, "no space in mgmt_ie_buf: len left %d", + remained_buf_len); break; } remained_buf_len -= (vndrie_info->ie_len + @@ -4338,7 +4406,7 @@ s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag, err = brcmf_fil_bsscfg_data_set(ifp, "vndr_ie", iovar_ie_buf, total_ie_buf_len); if (err) - brcmf_err("vndr ie set error : %d\n", err); + bphy_err(drvr, "vndr ie set error : %d\n", err); } exit: @@ -4366,13 +4434,14 @@ static s32 brcmf_config_ap_mgmt_ie(struct brcmf_cfg80211_vif *vif, struct cfg80211_beacon_data *beacon) { + struct brcmf_pub *drvr = vif->ifp->drvr; s32 err; /* Set Beacon IEs to FW */ err = brcmf_vif_set_mgmt_ie(vif, BRCMF_VNDR_IE_BEACON_FLAG, beacon->tail, beacon->tail_len); if (err) { - brcmf_err("Set Beacon IE Failed\n"); + bphy_err(drvr, "Set Beacon IE Failed\n"); return err; } brcmf_dbg(TRACE, "Applied Vndr IEs for Beacon\n"); @@ -4382,7 +4451,7 @@ brcmf_config_ap_mgmt_ie(struct brcmf_cfg80211_vif *vif, beacon->proberesp_ies, beacon->proberesp_ies_len); if (err) - brcmf_err("Set Probe Resp IE Failed\n"); + bphy_err(drvr, "Set Probe Resp IE Failed\n"); else brcmf_dbg(TRACE, "Applied Vndr IEs for Probe Resp\n"); @@ -4396,6 +4465,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, s32 ie_offset; struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct brcmf_if *ifp = netdev_priv(ndev); + struct brcmf_pub *drvr = cfg->pub; const struct brcmf_tlv *ssid_ie; const struct brcmf_tlv *country_ie; struct brcmf_ssid_le ssid_le; @@ -4491,7 +4561,8 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_REGULATORY, is_11d); if (err < 0) { - brcmf_err("Regulatory Set Error, %d\n", err); + bphy_err(drvr, "Regulatory Set Error, %d\n", + err); goto exit; } } @@ -4499,8 +4570,8 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_BCNPRD, settings->beacon_interval); if (err < 0) { - brcmf_err("Beacon Interval Set Error, %d\n", - err); + bphy_err(drvr, "Beacon Interval Set Error, %d\n", + err); goto exit; } } @@ -4508,7 +4579,8 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_DTIMPRD, settings->dtim_period); if (err < 0) { - brcmf_err("DTIM Interval Set Error, %d\n", err); + bphy_err(drvr, "DTIM Interval Set Error, %d\n", + err); goto exit; } } @@ -4518,7 +4590,8 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, !brcmf_feat_is_enabled(ifp, BRCMF_FEAT_RSDB))) { err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1); if (err < 0) { - brcmf_err("BRCMF_C_DOWN error %d\n", err); + bphy_err(drvr, "BRCMF_C_DOWN error %d\n", + err); goto exit; } brcmf_fil_iovar_int_set(ifp, "apsta", 0); @@ -4526,7 +4599,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 1); if (err < 0) { - brcmf_err("SET INFRA error %d\n", err); + bphy_err(drvr, "SET INFRA error %d\n", err); goto exit; } } else if (WARN_ON(supports_11d && (is_11d != ifp->vif->is_11d))) { @@ -4542,7 +4615,8 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 1); if (err < 0) { - brcmf_err("setting AP mode failed %d\n", err); + bphy_err(drvr, "setting AP mode failed %d\n", + err); goto exit; } if (!mbss) { @@ -4551,14 +4625,14 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, */ err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec); if (err < 0) { - brcmf_err("Set Channel failed: chspec=%d, %d\n", - chanspec, err); + bphy_err(drvr, "Set Channel failed: chspec=%d, %d\n", + chanspec, err); goto exit; } } err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1); if (err < 0) { - brcmf_err("BRCMF_C_UP error (%d)\n", err); + bphy_err(drvr, "BRCMF_C_UP error (%d)\n", err); goto exit; } /* On DOWN the firmware removes the WEP keys, reconfigure @@ -4573,14 +4647,14 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID, &join_params, sizeof(join_params)); if (err < 0) { - brcmf_err("SET SSID error (%d)\n", err); + bphy_err(drvr, "SET SSID error (%d)\n", err); goto exit; } if (settings->hidden_ssid) { err = brcmf_fil_iovar_int_set(ifp, "closednet", 1); if (err) { - brcmf_err("closednet error (%d)\n", err); + bphy_err(drvr, "closednet error (%d)\n", err); goto exit; } } @@ -4589,14 +4663,14 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, } else if (dev_role == NL80211_IFTYPE_P2P_GO) { err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec); if (err < 0) { - brcmf_err("Set Channel failed: chspec=%d, %d\n", - chanspec, err); + bphy_err(drvr, "Set Channel failed: chspec=%d, %d\n", + chanspec, err); goto exit; } err = brcmf_fil_bsscfg_data_set(ifp, "ssid", &ssid_le, sizeof(ssid_le)); if (err < 0) { - brcmf_err("setting ssid failed %d\n", err); + bphy_err(drvr, "setting ssid failed %d\n", err); goto exit; } bss_enable.bsscfgidx = cpu_to_le32(ifp->bsscfgidx); @@ -4604,7 +4678,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, err = brcmf_fil_iovar_data_set(ifp, "bss", &bss_enable, sizeof(bss_enable)); if (err < 0) { - brcmf_err("bss_enable config failed %d\n", err); + bphy_err(drvr, "bss_enable config failed %d\n", err); goto exit; } @@ -4627,7 +4701,9 @@ exit: static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) { + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct brcmf_if *ifp = netdev_priv(ndev); + struct brcmf_pub *drvr = cfg->pub; s32 err; struct brcmf_fil_bss_enable_le bss_enable; struct brcmf_join_params join_params; @@ -4652,13 +4728,13 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID, &join_params, sizeof(join_params)); if (err < 0) - brcmf_err("SET SSID error (%d)\n", err); + bphy_err(drvr, "SET SSID error (%d)\n", err); err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1); if (err < 0) - brcmf_err("BRCMF_C_DOWN error %d\n", err); + bphy_err(drvr, "BRCMF_C_DOWN error %d\n", err); err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0); if (err < 0) - brcmf_err("setting AP mode failed %d\n", err); + bphy_err(drvr, "setting AP mode failed %d\n", err); if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS)) brcmf_fil_iovar_int_set(ifp, "mbss", 0); brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_REGULATORY, @@ -4666,7 +4742,7 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) /* Bring device back up so it can be used again */ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1); if (err < 0) - brcmf_err("BRCMF_C_UP error %d\n", err); + bphy_err(drvr, "BRCMF_C_UP error %d\n", err); brcmf_vif_clear_mgmt_ies(ifp->vif); } else { @@ -4675,7 +4751,7 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) err = brcmf_fil_iovar_data_set(ifp, "bss", &bss_enable, sizeof(bss_enable)); if (err < 0) - brcmf_err("bss_enable config failed %d\n", err); + bphy_err(drvr, "bss_enable config failed %d\n", err); } brcmf_set_mpc(ifp, 1); brcmf_configure_arp_nd_offload(ifp, true); @@ -4704,6 +4780,7 @@ brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev, struct station_del_parameters *params) { struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct brcmf_pub *drvr = cfg->pub; struct brcmf_scb_val_le scbval; struct brcmf_if *ifp = netdev_priv(ndev); s32 err; @@ -4723,7 +4800,8 @@ brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev, err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON, &scbval, sizeof(scbval)); if (err) - brcmf_err("SCB_DEAUTHENTICATE_FOR_REASON failed %d\n", err); + bphy_err(drvr, "SCB_DEAUTHENTICATE_FOR_REASON failed %d\n", + err); brcmf_dbg(TRACE, "Exit\n"); return err; @@ -4733,6 +4811,8 @@ static int brcmf_cfg80211_change_station(struct wiphy *wiphy, struct net_device *ndev, const u8 *mac, struct station_parameters *params) { + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct brcmf_pub *drvr = cfg->pub; struct brcmf_if *ifp = netdev_priv(ndev); s32 err; @@ -4753,7 +4833,7 @@ brcmf_cfg80211_change_station(struct wiphy *wiphy, struct net_device *ndev, err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SCB_DEAUTHORIZE, (void *)mac, ETH_ALEN); if (err < 0) - brcmf_err("Setting SCB (de-)authorize failed, %d\n", err); + bphy_err(drvr, "Setting SCB (de-)authorize failed, %d\n", err); return err; } @@ -4783,6 +4863,7 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, { struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct ieee80211_channel *chan = params->chan; + struct brcmf_pub *drvr = cfg->pub; const u8 *buf = params->buf; size_t len = params->len; const struct ieee80211_mgmt *mgmt; @@ -4803,7 +4884,7 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, mgmt = (const struct ieee80211_mgmt *)buf; if (!ieee80211_is_mgmt(mgmt->frame_control)) { - brcmf_err("Driver only allows MGMT packet type\n"); + bphy_err(drvr, "Driver only allows MGMT packet type\n"); return -EPERM; } @@ -4834,13 +4915,13 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, GFP_KERNEL); } else if (ieee80211_is_action(mgmt->frame_control)) { if (len > BRCMF_FIL_ACTION_FRAME_SIZE + DOT11_MGMT_HDR_LEN) { - brcmf_err("invalid action frame length\n"); + bphy_err(drvr, "invalid action frame length\n"); err = -EINVAL; goto exit; } af_params = kzalloc(sizeof(*af_params), GFP_KERNEL); if (af_params == NULL) { - brcmf_err("unable to allocate frame\n"); + bphy_err(drvr, "unable to allocate frame\n"); err = -ENOMEM; goto exit; } @@ -4891,6 +4972,7 @@ brcmf_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy, u64 cookie) { struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct brcmf_pub *drvr = cfg->pub; struct brcmf_cfg80211_vif *vif; int err = 0; @@ -4898,7 +4980,7 @@ brcmf_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy, vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif; if (vif == NULL) { - brcmf_err("No p2p device available for probe response\n"); + bphy_err(drvr, "No p2p device available for probe response\n"); err = -ENODEV; goto exit; } @@ -4913,6 +4995,7 @@ static int brcmf_cfg80211_get_channel(struct wiphy *wiphy, { struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct net_device *ndev = wdev->netdev; + struct brcmf_pub *drvr = cfg->pub; struct brcmf_if *ifp; struct brcmu_chan ch; enum nl80211_band band = 0; @@ -4926,7 +5009,7 @@ static int brcmf_cfg80211_get_channel(struct wiphy *wiphy, err = brcmf_fil_iovar_int_get(ifp, "chanspec", &chanspec); if (err) { - brcmf_err("chanspec failed (%d)\n", err); + bphy_err(drvr, "chanspec failed (%d)\n", err); return err; } @@ -5048,6 +5131,8 @@ static int brcmf_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *ndev, const u8 *peer, enum nl80211_tdls_operation oper) { + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct brcmf_pub *drvr = cfg->pub; struct brcmf_if *ifp; struct brcmf_tdls_iovar_le info; int ret = 0; @@ -5065,7 +5150,7 @@ static int brcmf_cfg80211_tdls_oper(struct wiphy *wiphy, ret = brcmf_fil_iovar_data_set(ifp, "tdls_endpoint", &info, sizeof(info)); if (ret < 0) - brcmf_err("tdls_endpoint iovar failed: ret=%d\n", ret); + bphy_err(drvr, "tdls_endpoint iovar failed: ret=%d\n", ret); return ret; } @@ -5076,6 +5161,8 @@ brcmf_cfg80211_update_conn_params(struct wiphy *wiphy, struct cfg80211_connect_params *sme, u32 changed) { + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct brcmf_pub *drvr = cfg->pub; struct brcmf_if *ifp; int err; @@ -5086,7 +5173,7 @@ brcmf_cfg80211_update_conn_params(struct wiphy *wiphy, err = brcmf_vif_set_mgmt_ie(ifp->vif, BRCMF_VNDR_IE_ASSOCREQ_FLAG, sme->ie, sme->ie_len); if (err) - brcmf_err("Set Assoc REQ IE Failed\n"); + bphy_err(drvr, "Set Assoc REQ IE Failed\n"); else brcmf_dbg(TRACE, "Applied Vndr IEs for Assoc request\n"); @@ -5098,6 +5185,8 @@ static int brcmf_cfg80211_set_rekey_data(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_gtk_rekey_data *gtk) { + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct brcmf_pub *drvr = cfg->pub; struct brcmf_if *ifp = netdev_priv(ndev); struct brcmf_gtk_keyinfo_le gtk_le; int ret; @@ -5112,7 +5201,7 @@ brcmf_cfg80211_set_rekey_data(struct wiphy *wiphy, struct net_device *ndev, ret = brcmf_fil_iovar_data_set(ifp, "gtk_key_info", >k_le, sizeof(gtk_le)); if (ret < 0) - brcmf_err("gtk_key_info iovar failed: ret=%d\n", ret); + bphy_err(drvr, "gtk_key_info iovar failed: ret=%d\n", ret); return ret; } @@ -5344,6 +5433,7 @@ static void brcmf_clear_assoc_ies(struct brcmf_cfg80211_info *cfg) static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp) { + struct brcmf_pub *drvr = cfg->pub; struct brcmf_cfg80211_assoc_ielen_le *assoc_info; struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg); u32 req_len; @@ -5355,7 +5445,7 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg, err = brcmf_fil_iovar_data_get(ifp, "assoc_info", cfg->extra_buf, WL_ASSOC_INFO_MAX); if (err) { - brcmf_err("could not get assoc info (%d)\n", err); + bphy_err(drvr, "could not get assoc info (%d)\n", err); return err; } assoc_info = @@ -5367,7 +5457,7 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg, cfg->extra_buf, WL_ASSOC_INFO_MAX); if (err) { - brcmf_err("could not get assoc req (%d)\n", err); + bphy_err(drvr, "could not get assoc req (%d)\n", err); return err; } conn_info->req_ie_len = req_len; @@ -5383,7 +5473,7 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg, cfg->extra_buf, WL_ASSOC_INFO_MAX); if (err) { - brcmf_err("could not get assoc resp (%d)\n", err); + bphy_err(drvr, "could not get assoc resp (%d)\n", err); return err; } conn_info->resp_ie_len = resp_len; @@ -5510,6 +5600,7 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg, struct net_device *ndev, const struct brcmf_event_msg *e, void *data) { + struct brcmf_pub *drvr = cfg->pub; static int generation; u32 event = e->event_code; u32 reason = e->reason; @@ -5527,7 +5618,7 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg, if (((event == BRCMF_E_ASSOC_IND) || (event == BRCMF_E_REASSOC_IND)) && (reason == BRCMF_E_STATUS_SUCCESS)) { if (!data) { - brcmf_err("No IEs present in ASSOC/REASSOC_IND"); + bphy_err(drvr, "No IEs present in ASSOC/REASSOC_IND\n"); return -EINVAL; } @@ -5819,6 +5910,7 @@ static void init_vif_event(struct brcmf_cfg80211_vif_event *event) static s32 brcmf_dongle_roam(struct brcmf_if *ifp) { + struct brcmf_pub *drvr = ifp->drvr; s32 err; u32 bcn_timeout; __le32 roamtrigger[2]; @@ -5831,7 +5923,7 @@ static s32 brcmf_dongle_roam(struct brcmf_if *ifp) bcn_timeout = BRCMF_DEFAULT_BCN_TIMEOUT_ROAM_ON; err = brcmf_fil_iovar_int_set(ifp, "bcn_timeout", bcn_timeout); if (err) { - brcmf_err("bcn_timeout error (%d)\n", err); + bphy_err(drvr, "bcn_timeout error (%d)\n", err); goto roam_setup_done; } @@ -5843,7 +5935,7 @@ static s32 brcmf_dongle_roam(struct brcmf_if *ifp) err = brcmf_fil_iovar_int_set(ifp, "roam_off", ifp->drvr->settings->roamoff); if (err) { - brcmf_err("roam_off error (%d)\n", err); + bphy_err(drvr, "roam_off error (%d)\n", err); goto roam_setup_done; } @@ -5852,7 +5944,7 @@ static s32 brcmf_dongle_roam(struct brcmf_if *ifp) err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_ROAM_TRIGGER, (void *)roamtrigger, sizeof(roamtrigger)); if (err) { - brcmf_err("WLC_SET_ROAM_TRIGGER error (%d)\n", err); + bphy_err(drvr, "WLC_SET_ROAM_TRIGGER error (%d)\n", err); goto roam_setup_done; } @@ -5861,7 +5953,7 @@ static s32 brcmf_dongle_roam(struct brcmf_if *ifp) err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_ROAM_DELTA, (void *)roam_delta, sizeof(roam_delta)); if (err) { - brcmf_err("WLC_SET_ROAM_DELTA error (%d)\n", err); + bphy_err(drvr, "WLC_SET_ROAM_DELTA error (%d)\n", err); goto roam_setup_done; } @@ -5872,25 +5964,26 @@ roam_setup_done: static s32 brcmf_dongle_scantime(struct brcmf_if *ifp) { + struct brcmf_pub *drvr = ifp->drvr; s32 err = 0; err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_CHANNEL_TIME, BRCMF_SCAN_CHANNEL_TIME); if (err) { - brcmf_err("Scan assoc time error (%d)\n", err); + bphy_err(drvr, "Scan assoc time error (%d)\n", err); goto dongle_scantime_out; } err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_UNASSOC_TIME, BRCMF_SCAN_UNASSOC_TIME); if (err) { - brcmf_err("Scan unassoc time error (%d)\n", err); + bphy_err(drvr, "Scan unassoc time error (%d)\n", err); goto dongle_scantime_out; } err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_PASSIVE_TIME, BRCMF_SCAN_PASSIVE_TIME); if (err) { - brcmf_err("Scan passive time error (%d)\n", err); + bphy_err(drvr, "Scan passive time error (%d)\n", err); goto dongle_scantime_out; } @@ -5922,10 +6015,11 @@ static void brcmf_update_bw40_channel_flag(struct ieee80211_channel *channel, static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, u32 bw_cap[]) { - struct brcmf_if *ifp = brcmf_get_ifp(cfg->pub, 0); + struct wiphy *wiphy = cfg_to_wiphy(cfg); + struct brcmf_pub *drvr = cfg->pub; + struct brcmf_if *ifp = brcmf_get_ifp(drvr, 0); struct ieee80211_supported_band *band; struct ieee80211_channel *channel; - struct wiphy *wiphy; struct brcmf_chanspec_list *list; struct brcmu_chan ch; int err; @@ -5944,11 +6038,10 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, err = brcmf_fil_iovar_data_get(ifp, "chanspecs", pbuf, BRCMF_DCMD_MEDLEN); if (err) { - brcmf_err("get chanspecs error (%d)\n", err); + bphy_err(drvr, "get chanspecs error (%d)\n", err); goto fail_pbuf; } - wiphy = cfg_to_wiphy(cfg); band = wiphy->bands[NL80211_BAND_2GHZ]; if (band) for (i = 0; i < band->n_channels; i++) @@ -5968,7 +6061,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, } else if (ch.band == BRCMU_CHAN_BAND_5G) { band = wiphy->bands[NL80211_BAND_5GHZ]; } else { - brcmf_err("Invalid channel Spec. 0x%x.\n", ch.chspec); + bphy_err(drvr, "Invalid channel Spec. 0x%x.\n", + ch.chspec); continue; } if (!band) @@ -5991,8 +6085,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, /* It seems firmware supports some channel we never * considered. Something new in IEEE standard? */ - brcmf_err("Ignoring unexpected firmware channel %d\n", - ch.control_ch_num); + bphy_err(drvr, "Ignoring unexpected firmware channel %d\n", + ch.control_ch_num); continue; } @@ -6002,11 +6096,21 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, /* assuming the chanspecs order is HT20, * HT40 upper, HT40 lower, and VHT80. */ - if (ch.bw == BRCMU_CHAN_BW_80) { + switch (ch.bw) { + case BRCMU_CHAN_BW_160: + channel->flags &= ~IEEE80211_CHAN_NO_160MHZ; + break; + case BRCMU_CHAN_BW_80: channel->flags &= ~IEEE80211_CHAN_NO_80MHZ; - } else if (ch.bw == BRCMU_CHAN_BW_40) { + break; + case BRCMU_CHAN_BW_40: brcmf_update_bw40_channel_flag(channel, &ch); - } else { + break; + default: + wiphy_warn(wiphy, "Firmware reported unsupported bandwidth %d\n", + ch.bw); + /* fall through */ + case BRCMU_CHAN_BW_20: /* enable the channel and disable other bandwidths * for now as mentioned order assure they are enabled * for subsequent chanspecs. @@ -6038,7 +6142,8 @@ fail_pbuf: static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg) { - struct brcmf_if *ifp = brcmf_get_ifp(cfg->pub, 0); + struct brcmf_pub *drvr = cfg->pub; + struct brcmf_if *ifp = brcmf_get_ifp(drvr, 0); struct ieee80211_supported_band *band; struct brcmf_fil_bwcap_le band_bwcap; struct brcmf_chanspec_list *list; @@ -6084,7 +6189,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg) err = brcmf_fil_iovar_data_get(ifp, "chanspecs", pbuf, BRCMF_DCMD_MEDLEN); if (err) { - brcmf_err("get chanspecs error (%d)\n", err); + bphy_err(drvr, "get chanspecs error (%d)\n", err); kfree(pbuf); return err; } @@ -6115,6 +6220,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg) static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[]) { + struct brcmf_pub *drvr = ifp->drvr; u32 band, mimo_bwcap; int err; @@ -6150,7 +6256,7 @@ static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[]) bw_cap[NL80211_BAND_5GHZ] |= WLC_BW_20MHZ_BIT; break; default: - brcmf_err("invalid mimo_bw_cap value\n"); + bphy_err(drvr, "invalid mimo_bw_cap value\n"); } } @@ -6225,8 +6331,9 @@ static void brcmf_update_vht_cap(struct ieee80211_supported_band *band, static int brcmf_setup_wiphybands(struct brcmf_cfg80211_info *cfg) { - struct brcmf_if *ifp = brcmf_get_ifp(cfg->pub, 0); - struct wiphy *wiphy; + struct brcmf_pub *drvr = cfg->pub; + struct brcmf_if *ifp = brcmf_get_ifp(drvr, 0); + struct wiphy *wiphy = cfg_to_wiphy(cfg); u32 nmode = 0; u32 vhtmode = 0; u32 bw_cap[2] = { WLC_BW_20MHZ_BIT, WLC_BW_20MHZ_BIT }; @@ -6242,7 +6349,7 @@ static int brcmf_setup_wiphybands(struct brcmf_cfg80211_info *cfg) (void)brcmf_fil_iovar_int_get(ifp, "vhtmode", &vhtmode); err = brcmf_fil_iovar_int_get(ifp, "nmode", &nmode); if (err) { - brcmf_err("nmode error (%d)\n", err); + bphy_err(drvr, "nmode error (%d)\n", err); } else { brcmf_get_bwcap(ifp, bw_cap); } @@ -6252,7 +6359,7 @@ static int brcmf_setup_wiphybands(struct brcmf_cfg80211_info *cfg) err = brcmf_fil_iovar_int_get(ifp, "rxchain", &rxchain); if (err) { - brcmf_err("rxchain error (%d)\n", err); + bphy_err(drvr, "rxchain error (%d)\n", err); nchain = 1; } else { for (nchain = 0; rxchain; nchain++) @@ -6262,7 +6369,7 @@ static int brcmf_setup_wiphybands(struct brcmf_cfg80211_info *cfg) err = brcmf_construct_chaninfo(cfg, bw_cap); if (err) { - brcmf_err("brcmf_construct_chaninfo failed (%d)\n", err); + bphy_err(drvr, "brcmf_construct_chaninfo failed (%d)\n", err); return err; } @@ -6274,7 +6381,6 @@ static int brcmf_setup_wiphybands(struct brcmf_cfg80211_info *cfg) &txbf_bfr_cap); } - wiphy = cfg_to_wiphy(cfg); for (i = 0; i < ARRAY_SIZE(wiphy->bands); i++) { band = wiphy->bands[i]; if (band == NULL) @@ -6470,12 +6576,13 @@ static void brcmf_wiphy_wowl_params(struct wiphy *wiphy, struct brcmf_if *ifp) { #ifdef CONFIG_PM struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct brcmf_pub *drvr = cfg->pub; struct wiphy_wowlan_support *wowl; wowl = kmemdup(&brcmf_wowlan_support, sizeof(brcmf_wowlan_support), GFP_KERNEL); if (!wowl) { - brcmf_err("only support basic wowlan features\n"); + bphy_err(drvr, "only support basic wowlan features\n"); wiphy->wowlan = &brcmf_wowlan_support; return; } @@ -6572,7 +6679,7 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp) err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BANDLIST, &bandlist, sizeof(bandlist)); if (err) { - brcmf_err("could not obtain band info: err=%d\n", err); + bphy_err(drvr, "could not obtain band info: err=%d\n", err); return err; } /* first entry in bandlist is number of bands */ @@ -6621,6 +6728,7 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp) static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg) { + struct brcmf_pub *drvr = cfg->pub; struct net_device *ndev; struct wireless_dev *wdev; struct brcmf_if *ifp; @@ -6658,7 +6766,7 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg) err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_FAKEFRAG, 1); if (err) { - brcmf_err("failed to set frameburst mode\n"); + bphy_err(drvr, "failed to set frameburst mode\n"); goto default_conf_out; } @@ -6839,6 +6947,7 @@ static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy, { struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct brcmf_if *ifp = brcmf_get_ifp(cfg->pub, 0); + struct brcmf_pub *drvr = cfg->pub; struct brcmf_fil_country_le ccreq; s32 err; int i; @@ -6850,8 +6959,8 @@ static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy, /* ignore non-ISO3166 country codes */ for (i = 0; i < 2; i++) if (req->alpha2[i] < 'A' || req->alpha2[i] > 'Z') { - brcmf_err("not an ISO3166 code (0x%02x 0x%02x)\n", - req->alpha2[0], req->alpha2[1]); + bphy_err(drvr, "not an ISO3166 code (0x%02x 0x%02x)\n", + req->alpha2[0], req->alpha2[1]); return; } @@ -6860,7 +6969,7 @@ static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy, err = brcmf_fil_iovar_data_get(ifp, "country", &ccreq, sizeof(ccreq)); if (err) { - brcmf_err("Country code iovar returned err = %d\n", err); + bphy_err(drvr, "Country code iovar returned err = %d\n", err); return; } @@ -6870,7 +6979,7 @@ static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy, err = brcmf_fil_iovar_data_set(ifp, "country", &ccreq, sizeof(ccreq)); if (err) { - brcmf_err("Firmware rejected country setting\n"); + bphy_err(drvr, "Firmware rejected country setting\n"); return; } brcmf_setup_wiphybands(cfg); @@ -6916,13 +7025,13 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, u16 *cap = NULL; if (!ndev) { - brcmf_err("ndev is invalid\n"); + bphy_err(drvr, "ndev is invalid\n"); return NULL; } cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); if (!cfg) { - brcmf_err("Could not allocate wiphy device\n"); + bphy_err(drvr, "Could not allocate wiphy device\n"); return NULL; } @@ -6943,7 +7052,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, err = wl_init_priv(cfg); if (err) { - brcmf_err("Failed to init iwm_priv (%d)\n", err); + bphy_err(drvr, "Failed to init iwm_priv (%d)\n", err); brcmf_free_vif(vif); goto wiphy_out; } @@ -6952,7 +7061,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, /* determine d11 io type before wiphy setup */ err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_VERSION, &io_type); if (err) { - brcmf_err("Failed to get D11 version (%d)\n", err); + bphy_err(drvr, "Failed to get D11 version (%d)\n", err); goto priv_out; } cfg->d11inf.io_type = (u8)io_type; @@ -6986,13 +7095,13 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, #endif err = wiphy_register(wiphy); if (err < 0) { - brcmf_err("Could not register wiphy device (%d)\n", err); + bphy_err(drvr, "Could not register wiphy device (%d)\n", err); goto priv_out; } err = brcmf_setup_wiphybands(cfg); if (err) { - brcmf_err("Setting wiphy bands failed (%d)\n", err); + bphy_err(drvr, "Setting wiphy bands failed (%d)\n", err); goto wiphy_unreg_out; } @@ -7010,24 +7119,24 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, err = brcmf_fweh_activate_events(ifp); if (err) { - brcmf_err("FWEH activation failed (%d)\n", err); + bphy_err(drvr, "FWEH activation failed (%d)\n", err); goto wiphy_unreg_out; } err = brcmf_p2p_attach(cfg, p2pdev_forced); if (err) { - brcmf_err("P2P initialisation failed (%d)\n", err); + bphy_err(drvr, "P2P initialisation failed (%d)\n", err); goto wiphy_unreg_out; } err = brcmf_btcoex_attach(cfg); if (err) { - brcmf_err("BT-coex initialisation failed (%d)\n", err); + bphy_err(drvr, "BT-coex initialisation failed (%d)\n", err); brcmf_p2p_detach(&cfg->p2p); goto wiphy_unreg_out; } err = brcmf_pno_attach(cfg); if (err) { - brcmf_err("PNO initialisation failed (%d)\n", err); + bphy_err(drvr, "PNO initialisation failed (%d)\n", err); brcmf_btcoex_detach(cfg); brcmf_p2p_detach(&cfg->p2p); goto wiphy_unreg_out; @@ -7047,7 +7156,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, /* (re-) activate FWEH event handling */ err = brcmf_fweh_activate_events(ifp); if (err) { - brcmf_err("FWEH activation failed (%d)\n", err); + bphy_err(drvr, "FWEH activation failed (%d)\n", err); goto detach; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c index 1f1e95a15a17..96b8d5b3aeed 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c @@ -90,6 +90,7 @@ struct brcmf_mp_global_t brcmf_mp_global; void brcmf_c_set_joinpref_default(struct brcmf_if *ifp) { + struct brcmf_pub *drvr = ifp->drvr; struct brcmf_join_pref_params join_pref_params[2]; int err; @@ -106,7 +107,7 @@ void brcmf_c_set_joinpref_default(struct brcmf_if *ifp) err = brcmf_fil_iovar_data_set(ifp, "join_pref", join_pref_params, sizeof(join_pref_params)); if (err) - brcmf_err("Set join_pref error (%d)\n", err); + bphy_err(drvr, "Set join_pref error (%d)\n", err); } static int brcmf_c_download(struct brcmf_if *ifp, u16 flag, @@ -129,7 +130,8 @@ static int brcmf_c_download(struct brcmf_if *ifp, u16 flag, static int brcmf_c_process_clm_blob(struct brcmf_if *ifp) { - struct brcmf_bus *bus = ifp->drvr->bus_if; + struct brcmf_pub *drvr = ifp->drvr; + struct brcmf_bus *bus = drvr->bus_if; struct brcmf_dload_data_le *chunk_buf; const struct firmware *clm = NULL; u8 clm_name[BRCMF_FW_NAME_LEN]; @@ -145,11 +147,11 @@ static int brcmf_c_process_clm_blob(struct brcmf_if *ifp) memset(clm_name, 0, sizeof(clm_name)); err = brcmf_bus_get_fwname(bus, ".clm_blob", clm_name); if (err) { - brcmf_err("get CLM blob file name failed (%d)\n", err); + bphy_err(drvr, "get CLM blob file name failed (%d)\n", err); return err; } - err = request_firmware(&clm, clm_name, bus->dev); + err = firmware_request_nowarn(&clm, clm_name, bus->dev); if (err) { brcmf_info("no clm_blob available (err=%d), device may have limited channels available\n", err); @@ -182,12 +184,12 @@ static int brcmf_c_process_clm_blob(struct brcmf_if *ifp) } while ((datalen > 0) && (err == 0)); if (err) { - brcmf_err("clmload (%zu byte file) failed (%d); ", - clm->size, err); + bphy_err(drvr, "clmload (%zu byte file) failed (%d)\n", + clm->size, err); /* Retrieve clmload_status and print */ err = brcmf_fil_iovar_int_get(ifp, "clmload_status", &status); if (err) - brcmf_err("get clmload_status failed (%d)\n", err); + bphy_err(drvr, "get clmload_status failed (%d)\n", err); else brcmf_dbg(INFO, "clmload_status=%d\n", status); err = -EIO; @@ -201,6 +203,7 @@ done: int brcmf_c_preinit_dcmds(struct brcmf_if *ifp) { + struct brcmf_pub *drvr = ifp->drvr; s8 eventmask[BRCMF_EVENTING_MASK_LEN]; u8 buf[BRCMF_DCMD_SMLEN]; struct brcmf_bus *bus; @@ -214,7 +217,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp) err = brcmf_fil_iovar_data_get(ifp, "cur_etheraddr", ifp->mac_addr, sizeof(ifp->mac_addr)); if (err < 0) { - brcmf_err("Retrieving cur_etheraddr failed, %d\n", err); + bphy_err(drvr, "Retrieving cur_etheraddr failed, %d\n", err); goto done; } memcpy(ifp->drvr->wiphy->perm_addr, ifp->drvr->mac, ETH_ALEN); @@ -226,7 +229,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp) err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_REVINFO, &revinfo, sizeof(revinfo)); if (err < 0) { - brcmf_err("retrieving revision info failed, %d\n", err); + bphy_err(drvr, "retrieving revision info failed, %d\n", err); strlcpy(ri->chipname, "UNKNOWN", sizeof(ri->chipname)); } else { ri->vendorid = le32_to_cpu(revinfo.vendorid); @@ -260,7 +263,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp) /* Do any CLM downloading */ err = brcmf_c_process_clm_blob(ifp); if (err < 0) { - brcmf_err("download CLM blob file failed, %d\n", err); + bphy_err(drvr, "download CLM blob file failed, %d\n", err); goto done; } @@ -269,8 +272,8 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp) strcpy(buf, "ver"); err = brcmf_fil_iovar_data_get(ifp, "ver", buf, sizeof(buf)); if (err < 0) { - brcmf_err("Retrieving version information failed, %d\n", - err); + bphy_err(drvr, "Retrieving version information failed, %d\n", + err); goto done; } ptr = (char *)buf; @@ -304,7 +307,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp) /* set mpc */ err = brcmf_fil_iovar_int_set(ifp, "mpc", 1); if (err) { - brcmf_err("failed setting mpc\n"); + bphy_err(drvr, "failed setting mpc\n"); goto done; } @@ -314,14 +317,14 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp) err = brcmf_fil_iovar_data_get(ifp, "event_msgs", eventmask, BRCMF_EVENTING_MASK_LEN); if (err) { - brcmf_err("Get event_msgs error (%d)\n", err); + bphy_err(drvr, "Get event_msgs error (%d)\n", err); goto done; } setbit(eventmask, BRCMF_E_IF); err = brcmf_fil_iovar_data_set(ifp, "event_msgs", eventmask, BRCMF_EVENTING_MASK_LEN); if (err) { - brcmf_err("Set event_msgs error (%d)\n", err); + bphy_err(drvr, "Set event_msgs error (%d)\n", err); goto done; } @@ -329,8 +332,8 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp) err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_CHANNEL_TIME, BRCMF_DEFAULT_SCAN_CHANNEL_TIME); if (err) { - brcmf_err("BRCMF_C_SET_SCAN_CHANNEL_TIME error (%d)\n", - err); + bphy_err(drvr, "BRCMF_C_SET_SCAN_CHANNEL_TIME error (%d)\n", + err); goto done; } @@ -338,8 +341,8 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp) err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_UNASSOC_TIME, BRCMF_DEFAULT_SCAN_UNASSOC_TIME); if (err) { - brcmf_err("BRCMF_C_SET_SCAN_UNASSOC_TIME error (%d)\n", - err); + bphy_err(drvr, "BRCMF_C_SET_SCAN_UNASSOC_TIME error (%d)\n", + err); goto done; } @@ -350,7 +353,7 @@ done: } #ifndef CONFIG_BRCM_TRACING -void __brcmf_err(const char *func, const char *fmt, ...) +void __brcmf_err(struct brcmf_bus *bus, const char *func, const char *fmt, ...) { struct va_format vaf; va_list args; @@ -359,7 +362,10 @@ void __brcmf_err(const char *func, const char *fmt, ...) vaf.fmt = fmt; vaf.va = &args; - pr_err("%s: %pV", func, &vaf); + if (bus) + dev_err(bus->dev, "%s: %pV", func, &vaf); + else + pr_err("%s: %pV", func, &vaf); va_end(args); } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index 860a4372cb56..4fbe8791f674 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -43,6 +43,36 @@ #define BRCMF_BSSIDX_INVALID -1 +#define RXS_PBPRES BIT(2) + +#define D11_PHY_HDR_LEN 6 + +struct d11rxhdr_le { + __le16 RxFrameSize; + u16 PAD; + __le16 PhyRxStatus_0; + __le16 PhyRxStatus_1; + __le16 PhyRxStatus_2; + __le16 PhyRxStatus_3; + __le16 PhyRxStatus_4; + __le16 PhyRxStatus_5; + __le16 RxStatus1; + __le16 RxStatus2; + __le16 RxTSFTime; + __le16 RxChan; + u8 unknown[12]; +} __packed; + +struct wlc_d11rxhdr { + struct d11rxhdr_le rxhdr; + __le32 tsf_l; + s8 rssi; + s8 rxpwr0; + s8 rxpwr1; + s8 do_rssi_ma; + s8 rxpwr[4]; +} __packed; + char *brcmf_ifname(struct brcmf_if *ifp) { if (!ifp) @@ -60,7 +90,7 @@ struct brcmf_if *brcmf_get_ifp(struct brcmf_pub *drvr, int ifidx) s32 bsscfgidx; if (ifidx < 0 || ifidx >= BRCMF_MAX_IFS) { - brcmf_err("ifidx %d out of range\n", ifidx); + bphy_err(drvr, "ifidx %d out of range\n", ifidx); return NULL; } @@ -111,7 +141,9 @@ void brcmf_configure_arp_nd_offload(struct brcmf_if *ifp, bool enable) static void _brcmf_set_multicast_list(struct work_struct *work) { - struct brcmf_if *ifp; + struct brcmf_if *ifp = container_of(work, struct brcmf_if, + multicast_work); + struct brcmf_pub *drvr = ifp->drvr; struct net_device *ndev; struct netdev_hw_addr *ha; u32 cmd_value, cnt; @@ -120,8 +152,6 @@ static void _brcmf_set_multicast_list(struct work_struct *work) u32 buflen; s32 err; - ifp = container_of(work, struct brcmf_if, multicast_work); - brcmf_dbg(TRACE, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx); ndev = ifp->ndev; @@ -151,7 +181,7 @@ static void _brcmf_set_multicast_list(struct work_struct *work) err = brcmf_fil_iovar_data_set(ifp, "mcast_list", buf, buflen); if (err < 0) { - brcmf_err("Setting mcast_list failed, %d\n", err); + bphy_err(drvr, "Setting mcast_list failed, %d\n", err); cmd_value = cnt ? true : cmd_value; } @@ -164,25 +194,25 @@ static void _brcmf_set_multicast_list(struct work_struct *work) */ err = brcmf_fil_iovar_int_set(ifp, "allmulti", cmd_value); if (err < 0) - brcmf_err("Setting allmulti failed, %d\n", err); + bphy_err(drvr, "Setting allmulti failed, %d\n", err); /*Finally, pick up the PROMISC flag */ cmd_value = (ndev->flags & IFF_PROMISC) ? true : false; err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PROMISC, cmd_value); if (err < 0) - brcmf_err("Setting BRCMF_C_SET_PROMISC failed, %d\n", - err); + bphy_err(drvr, "Setting BRCMF_C_SET_PROMISC failed, %d\n", + err); brcmf_configure_arp_nd_offload(ifp, !cmd_value); } #if IS_ENABLED(CONFIG_IPV6) static void _brcmf_update_ndtable(struct work_struct *work) { - struct brcmf_if *ifp; + struct brcmf_if *ifp = container_of(work, struct brcmf_if, + ndoffload_work); + struct brcmf_pub *drvr = ifp->drvr; int i, ret; - ifp = container_of(work, struct brcmf_if, ndoffload_work); - /* clear the table in firmware */ ret = brcmf_fil_iovar_data_set(ifp, "nd_hostip_clear", NULL, 0); if (ret) { @@ -195,7 +225,7 @@ static void _brcmf_update_ndtable(struct work_struct *work) &ifp->ipv6_addr_tbl[i], sizeof(struct in6_addr)); if (ret) - brcmf_err("add nd ip err %d\n", ret); + bphy_err(drvr, "add nd ip err %d\n", ret); } } #else @@ -208,6 +238,7 @@ static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr) { struct brcmf_if *ifp = netdev_priv(ndev); struct sockaddr *sa = (struct sockaddr *)addr; + struct brcmf_pub *drvr = ifp->drvr; int err; brcmf_dbg(TRACE, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx); @@ -215,7 +246,7 @@ static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr) err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", sa->sa_data, ETH_ALEN); if (err < 0) { - brcmf_err("Setting cur_etheraddr failed, %d\n", err); + bphy_err(drvr, "Setting cur_etheraddr failed, %d\n", err); } else { brcmf_dbg(TRACE, "updated to %pM\n", sa->sa_data); memcpy(ifp->mac_addr, sa->sa_data, ETH_ALEN); @@ -275,7 +306,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb, /* Can the device send data? */ if (drvr->bus_if->state != BRCMF_BUS_UP) { - brcmf_err("xmit rejected state=%d\n", drvr->bus_if->state); + bphy_err(drvr, "xmit rejected state=%d\n", drvr->bus_if->state); netif_stop_queue(ndev); dev_kfree_skb(skb); ret = -ENODEV; @@ -309,8 +340,8 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb, ret = pskb_expand_head(skb, ALIGN(head_delta, NET_SKB_PAD), 0, GFP_ATOMIC); if (ret < 0) { - brcmf_err("%s: failed to expand headroom\n", - brcmf_ifname(ifp)); + bphy_err(drvr, "%s: failed to expand headroom\n", + brcmf_ifname(ifp)); atomic_inc(&drvr->bus_if->stats.pktcow_failed); goto done; } @@ -409,6 +440,31 @@ void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb) { if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MONITOR_FMT_RADIOTAP)) { /* Do nothing */ + } else if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MONITOR_FMT_HW_RX_HDR)) { + struct wlc_d11rxhdr *wlc_rxhdr = (struct wlc_d11rxhdr *)skb->data; + struct ieee80211_radiotap_header *radiotap; + unsigned int offset; + u16 RxStatus1; + + RxStatus1 = le16_to_cpu(wlc_rxhdr->rxhdr.RxStatus1); + + offset = sizeof(struct wlc_d11rxhdr); + /* MAC inserts 2 pad bytes for a4 headers or QoS or A-MSDU + * subframes + */ + if (RxStatus1 & RXS_PBPRES) + offset += 2; + offset += D11_PHY_HDR_LEN; + + skb_pull(skb, offset); + + /* TODO: use RX header to fill some radiotap data */ + radiotap = skb_push(skb, sizeof(*radiotap)); + memset(radiotap, 0, sizeof(*radiotap)); + radiotap->it_len = cpu_to_le16(sizeof(*radiotap)); + + /* TODO: 4 bytes with receive status? */ + skb->len -= 4; } else { struct ieee80211_radiotap_header *radiotap; @@ -464,7 +520,8 @@ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event) } else { /* Process special event packets */ if (handle_event) - brcmf_fweh_process_skb(ifp->drvr, skb); + brcmf_fweh_process_skb(ifp->drvr, skb, + BCMILCP_SUBTYPE_VENDOR_LONG); brcmf_netif_rx(ifp, skb); } @@ -481,7 +538,7 @@ void brcmf_rx_event(struct device *dev, struct sk_buff *skb) if (brcmf_rx_hdrpull(drvr, skb, &ifp)) return; - brcmf_fweh_process_skb(ifp->drvr, skb); + brcmf_fweh_process_skb(ifp->drvr, skb, 0); brcmu_pkt_buf_free_skb(skb); } @@ -551,7 +608,7 @@ static int brcmf_netdev_open(struct net_device *ndev) /* If bus is not ready, can't continue */ if (bus_if->state != BRCMF_BUS_UP) { - brcmf_err("failed bus is not ready\n"); + bphy_err(drvr, "failed bus is not ready\n"); return -EAGAIN; } @@ -565,7 +622,7 @@ static int brcmf_netdev_open(struct net_device *ndev) ndev->features &= ~NETIF_F_IP_CSUM; if (brcmf_cfg80211_up(ndev)) { - brcmf_err("failed to bring up cfg80211\n"); + bphy_err(drvr, "failed to bring up cfg80211\n"); return -EIO; } @@ -610,7 +667,7 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked) else err = register_netdev(ndev); if (err != 0) { - brcmf_err("couldn't register the net device\n"); + bphy_err(drvr, "couldn't register the net device\n"); goto fail; } @@ -687,6 +744,7 @@ static const struct net_device_ops brcmf_netdev_ops_p2p = { static int brcmf_net_p2p_attach(struct brcmf_if *ifp) { + struct brcmf_pub *drvr = ifp->drvr; struct net_device *ndev; brcmf_dbg(TRACE, "Enter, bsscfgidx=%d mac=%pM\n", ifp->bsscfgidx, @@ -699,7 +757,7 @@ static int brcmf_net_p2p_attach(struct brcmf_if *ifp) memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN); if (register_netdev(ndev) != 0) { - brcmf_err("couldn't register the p2p net device\n"); + bphy_err(drvr, "couldn't register the p2p net device\n"); goto fail; } @@ -728,8 +786,8 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bsscfgidx, s32 ifidx, */ if (ifp) { if (ifidx) { - brcmf_err("ERROR: netdev:%s already exists\n", - ifp->ndev->name); + bphy_err(drvr, "ERROR: netdev:%s already exists\n", + ifp->ndev->name); netif_stop_queue(ifp->ndev); brcmf_net_detach(ifp->ndev, false); drvr->iflist[bsscfgidx] = NULL; @@ -787,7 +845,7 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx, ifp = drvr->iflist[bsscfgidx]; drvr->iflist[bsscfgidx] = NULL; if (!ifp) { - brcmf_err("Null interface, bsscfgidx=%d\n", bsscfgidx); + bphy_err(drvr, "Null interface, bsscfgidx=%d\n", bsscfgidx); return; } brcmf_dbg(TRACE, "Enter, bsscfgidx=%d, ifidx=%d\n", bsscfgidx, @@ -837,16 +895,17 @@ static int brcmf_psm_watchdog_notify(struct brcmf_if *ifp, const struct brcmf_event_msg *evtmsg, void *data) { + struct brcmf_pub *drvr = ifp->drvr; int err; brcmf_dbg(TRACE, "enter: bsscfgidx=%d\n", ifp->bsscfgidx); - brcmf_err("PSM's watchdog has fired!\n"); + bphy_err(drvr, "PSM's watchdog has fired!\n"); err = brcmf_debug_create_memdump(ifp->drvr->bus_if, data, evtmsg->datalen); if (err) - brcmf_err("Failed to get memory dump, %d\n", err); + bphy_err(drvr, "Failed to get memory dump, %d\n", err); return err; } @@ -890,7 +949,7 @@ static int brcmf_inetaddr_changed(struct notifier_block *nb, ret = brcmf_fil_iovar_data_get(ifp, "arp_hostip", addr_table, sizeof(addr_table)); if (ret) { - brcmf_err("fail to get arp ip table err:%d\n", ret); + bphy_err(drvr, "fail to get arp ip table err:%d\n", ret); return NOTIFY_OK; } @@ -907,7 +966,7 @@ static int brcmf_inetaddr_changed(struct notifier_block *nb, ret = brcmf_fil_iovar_data_set(ifp, "arp_hostip", &ifa->ifa_address, sizeof(ifa->ifa_address)); if (ret) - brcmf_err("add arp ip err %d\n", ret); + bphy_err(drvr, "add arp ip err %d\n", ret); } break; case NETDEV_DOWN: @@ -919,8 +978,8 @@ static int brcmf_inetaddr_changed(struct notifier_block *nb, ret = brcmf_fil_iovar_data_set(ifp, "arp_hostip_clear", NULL, 0); if (ret) { - brcmf_err("fail to clear arp ip table err:%d\n", - ret); + bphy_err(drvr, "fail to clear arp ip table err:%d\n", + ret); return NOTIFY_OK; } for (i = 0; i < ARPOL_MAX_ENTRIES; i++) { @@ -930,8 +989,8 @@ static int brcmf_inetaddr_changed(struct notifier_block *nb, &addr_table[i], sizeof(addr_table[i])); if (ret) - brcmf_err("add arp ip err %d\n", - ret); + bphy_err(drvr, "add arp ip err %d\n", + ret); } } break; @@ -1100,11 +1159,12 @@ static int brcmf_bus_started(struct brcmf_pub *drvr, struct cfg80211_ops *ops) brcmf_debugfs_add_entry(drvr, "revinfo", brcmf_revinfo_read); brcmf_feat_debugfs_create(drvr); brcmf_proto_debugfs_create(drvr); + brcmf_bus_debugfs_create(bus_if); return 0; fail: - brcmf_err("failed: %d\n", ret); + bphy_err(drvr, "failed: %d\n", ret); if (drvr->config) { brcmf_cfg80211_detach(drvr->config); drvr->config = NULL; @@ -1156,7 +1216,7 @@ int brcmf_attach(struct device *dev, struct brcmf_mp_device *settings) /* Attach and link in the protocol */ ret = brcmf_proto_attach(drvr); if (ret != 0) { - brcmf_err("brcmf_prot_attach failed\n"); + bphy_err(drvr, "brcmf_prot_attach failed\n"); goto fail; } @@ -1169,7 +1229,7 @@ int brcmf_attach(struct device *dev, struct brcmf_mp_device *settings) ret = brcmf_bus_started(drvr, ops); if (ret != 0) { - brcmf_err("dongle is not responding: err=%d\n", ret); + bphy_err(drvr, "dongle is not responding: err=%d\n", ret); goto fail; } @@ -1269,6 +1329,7 @@ static int brcmf_get_pend_8021x_cnt(struct brcmf_if *ifp) int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp) { + struct brcmf_pub *drvr = ifp->drvr; int err; err = wait_event_timeout(ifp->pend_8021x_wait, @@ -1276,7 +1337,7 @@ int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp) MAX_WAIT_FOR_8021X_TX); if (!err) - brcmf_err("Timed out waiting for no pending 802.1x packets\n"); + bphy_err(drvr, "Timed out waiting for no pending 802.1x packets\n"); return !err; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h index dcf6e27cc16f..d8085ce579f4 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h @@ -36,7 +36,7 @@ #define BRCMF_DCMD_MEDLEN 1536 #define BRCMF_DCMD_MAXLEN 8192 -/* IOCTL from host to device are limited in lenght. A device can only handle +/* IOCTL from host to device are limited in length. A device can only handle * ethernet frame size. This limitation is to be applied by protocol layer. */ #define BRCMF_TX_IOCTL_MAX_MSG_SIZE (ETH_FRAME_LEN+ETH_FCS_LEN) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h index cfed0626bf5a..2998726b62c3 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h @@ -45,17 +45,30 @@ #undef pr_fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -__printf(2, 3) -void __brcmf_err(const char *func, const char *fmt, ...); +struct brcmf_bus; + +__printf(3, 4) +void __brcmf_err(struct brcmf_bus *bus, const char *func, const char *fmt, ...); /* Macro for error messages. When debugging / tracing the driver all error * messages are important to us. */ +#ifndef brcmf_err #define brcmf_err(fmt, ...) \ do { \ if (IS_ENABLED(CONFIG_BRCMDBG) || \ IS_ENABLED(CONFIG_BRCM_TRACING) || \ net_ratelimit()) \ - __brcmf_err(__func__, fmt, ##__VA_ARGS__); \ + __brcmf_err(NULL, __func__, fmt, ##__VA_ARGS__);\ + } while (0) +#endif + +#define bphy_err(drvr, fmt, ...) \ + do { \ + if (IS_ENABLED(CONFIG_BRCMDBG) || \ + IS_ENABLED(CONFIG_BRCM_TRACING) || \ + net_ratelimit()) \ + wiphy_err((drvr)->wiphy, "%s: " fmt, __func__, \ + ##__VA_ARGS__); \ } while (0) #if defined(DEBUG) || defined(CONFIG_BRCM_TRACING) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c index 51d76ac45075..7535cb0d4ac0 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c @@ -43,6 +43,10 @@ static const struct brcmf_dmi_data meegopad_t08_data = { BRCM_CC_43340_CHIP_ID, 2, "meegopad-t08" }; +static const struct brcmf_dmi_data pov_tab_p1006w_data = { + BRCM_CC_43340_CHIP_ID, 2, "pov-tab-p1006w-data" +}; + static const struct dmi_system_id dmi_platform_data[] = { { /* Match for the GPDwin which unfortunately uses somewhat @@ -81,6 +85,17 @@ static const struct dmi_system_id dmi_platform_data[] = { }, .driver_data = (void *)&meegopad_t08_data, }, + { + /* Point of View TAB-P1006W-232 */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Insyde"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "BayTrail"), + /* Note 105b is Foxcon's USB/PCI vendor id */ + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "105B"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "0E57"), + }, + .driver_data = (void *)&pov_tab_p1006w_data, + }, {} }; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c index 4c5a3995dc35..acca719b3907 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c @@ -103,6 +103,10 @@ static const struct brcmf_feat_fwfeat brcmf_feat_fwfeat_map[] = { { "01-6cb8e269", BIT(BRCMF_FEAT_MONITOR) }, /* brcmfmac4366b-pcie.bin from linux-firmware.git commit 52442afee990 */ { "01-c47a91a4", BIT(BRCMF_FEAT_MONITOR) }, + /* brcmfmac4366b-pcie.bin from linux-firmware.git commit 211de1679a68 */ + { "01-801fb449", BIT(BRCMF_FEAT_MONITOR_FMT_HW_RX_HDR) }, + /* brcmfmac4366c-pcie.bin from linux-firmware.git commit 211de1679a68 */ + { "01-d2cbb8fd", BIT(BRCMF_FEAT_MONITOR_FMT_HW_RX_HDR) }, }; static void brcmf_feat_firmware_overrides(struct brcmf_pub *drv) @@ -181,13 +185,14 @@ static void brcmf_feat_iovar_data_set(struct brcmf_if *ifp, #define MAX_CAPS_BUFFER_SIZE 768 static void brcmf_feat_firmware_capabilities(struct brcmf_if *ifp) { + struct brcmf_pub *drvr = ifp->drvr; char caps[MAX_CAPS_BUFFER_SIZE]; enum brcmf_feat_id id; int i, err; err = brcmf_fil_iovar_data_get(ifp, "cap", caps, sizeof(caps)); if (err) { - brcmf_err("could not get firmware cap (%d)\n", err); + bphy_err(drvr, "could not get firmware cap (%d)\n", err); return; } @@ -212,14 +217,15 @@ static void brcmf_feat_firmware_capabilities(struct brcmf_if *ifp) static int brcmf_feat_fwcap_debugfs_read(struct seq_file *seq, void *data) { struct brcmf_bus *bus_if = dev_get_drvdata(seq->private); - struct brcmf_if *ifp = brcmf_get_ifp(bus_if->drvr, 0); + struct brcmf_pub *drvr = bus_if->drvr; + struct brcmf_if *ifp = brcmf_get_ifp(drvr, 0); char caps[MAX_CAPS_BUFFER_SIZE + 1] = { }; char *tmp; int err; err = brcmf_fil_iovar_data_get(ifp, "cap", caps, sizeof(caps)); if (err) { - brcmf_err("could not get firmware cap (%d)\n", err); + bphy_err(drvr, "could not get firmware cap (%d)\n", err); return err; } @@ -268,9 +274,15 @@ void brcmf_feat_attach(struct brcmf_pub *drvr) BIT(BRCMF_FEAT_WOWL_GTK); } } - /* MBSS does not work for 43362 */ - if (drvr->bus_if->chip == BRCM_CC_43362_CHIP_ID) + /* MBSS does not work for all chips */ + switch (drvr->bus_if->chip) { + case BRCM_CC_4330_CHIP_ID: + case BRCM_CC_43362_CHIP_ID: ifp->drvr->feat_flags &= ~BIT(BRCMF_FEAT_MBSS); + break; + default: + break; + } brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_RSDB, "rsdb_mode"); brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_TDLS, "tdls_enable"); brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MFP, "mfp"); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h index 0b4974df353a..5e88a7f16ad2 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h @@ -35,6 +35,7 @@ * FWSUP: Firmware supplicant. * MONITOR: firmware can pass monitor packets to host. * MONITOR_FMT_RADIOTAP: firmware provides monitor packets with radiotap header + * MONITOR_FMT_HW_RX_HDR: firmware provides monitor packets with hw/ucode header */ #define BRCMF_FEAT_LIST \ BRCMF_FEAT_DEF(MBSS) \ @@ -52,7 +53,8 @@ BRCMF_FEAT_DEF(GSCAN) \ BRCMF_FEAT_DEF(FWSUP) \ BRCMF_FEAT_DEF(MONITOR) \ - BRCMF_FEAT_DEF(MONITOR_FMT_RADIOTAP) + BRCMF_FEAT_DEF(MONITOR_FMT_RADIOTAP) \ + BRCMF_FEAT_DEF(MONITOR_FMT_HW_RX_HDR) /* * Quirks: diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c index 14b948917a1a..8209a42dea72 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c @@ -47,7 +47,7 @@ enum nvram_parser_state { * @state: current parser state. * @data: input buffer being parsed. * @nvram: output buffer with parse result. - * @nvram_len: lenght of parse result. + * @nvram_len: length of parse result. * @line: current line. * @column: current column in line. * @pos: byte offset in input buffer. @@ -719,8 +719,10 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev, break; } + brcmf_chip_name(chip, chiprev, chipname, sizeof(chipname)); + if (i == table_size) { - brcmf_err("Unknown chipid %d [%d]\n", chip, chiprev); + brcmf_err("Unknown chip %s\n", chipname); return NULL; } @@ -729,8 +731,6 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev, if (!fwreq) return NULL; - brcmf_chip_name(chip, chiprev, chipname, sizeof(chipname)); - brcmf_info("using %s for chip %s\n", mapping_table[i].fw_base, chipname); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c index e7eaa57d11d9..63e98fd583ab 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c @@ -102,7 +102,8 @@ static void brcmf_fweh_queue_event(struct brcmf_fweh_info *fweh, schedule_work(&fweh->event_work); } -static int brcmf_fweh_call_event_handler(struct brcmf_if *ifp, +static int brcmf_fweh_call_event_handler(struct brcmf_pub *drvr, + struct brcmf_if *ifp, enum brcmf_fweh_event_code code, struct brcmf_event_msg *emsg, void *data) @@ -117,9 +118,9 @@ static int brcmf_fweh_call_event_handler(struct brcmf_if *ifp, if (fweh->evt_handler[code]) err = fweh->evt_handler[code](ifp, emsg, data); else - brcmf_err("unhandled event %d ignored\n", code); + bphy_err(drvr, "unhandled event %d ignored\n", code); } else { - brcmf_err("no interface object\n"); + bphy_err(drvr, "no interface object\n"); } return err; } @@ -158,7 +159,7 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr, return; } if (ifevent->ifidx >= BRCMF_MAX_IFS) { - brcmf_err("invalid interface index: %u\n", ifevent->ifidx); + bphy_err(drvr, "invalid interface index: %u\n", ifevent->ifidx); return; } @@ -181,7 +182,8 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr, if (ifp && ifevent->action == BRCMF_E_IF_CHANGE) brcmf_proto_reset_if(drvr, ifp); - err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data); + err = brcmf_fweh_call_event_handler(drvr, ifp, emsg->event_code, emsg, + data); if (ifp && ifevent->action == BRCMF_E_IF_DEL) { bool armed = brcmf_cfg80211_vif_event_armed(drvr->config); @@ -268,11 +270,11 @@ static void brcmf_fweh_event_worker(struct work_struct *work) ifp = drvr->iflist[0]; else ifp = drvr->iflist[emsg.bsscfgidx]; - err = brcmf_fweh_call_event_handler(ifp, event->code, &emsg, - event->data); + err = brcmf_fweh_call_event_handler(drvr, ifp, event->code, + &emsg, event->data); if (err) { - brcmf_err("event handler failed (%d)\n", - event->code); + bphy_err(drvr, "event handler failed (%d)\n", + event->code); err = 0; } event_free: @@ -339,7 +341,7 @@ int brcmf_fweh_register(struct brcmf_pub *drvr, enum brcmf_fweh_event_code code, brcmf_fweh_handler_t handler) { if (drvr->fweh.evt_handler[code]) { - brcmf_err("event code %d already registered\n", code); + bphy_err(drvr, "event code %d already registered\n", code); return -ENOSPC; } drvr->fweh.evt_handler[code] = handler; @@ -369,6 +371,7 @@ void brcmf_fweh_unregister(struct brcmf_pub *drvr, */ int brcmf_fweh_activate_events(struct brcmf_if *ifp) { + struct brcmf_pub *drvr = ifp->drvr; int i, err; s8 eventmask[BRCMF_EVENTING_MASK_LEN]; @@ -388,7 +391,7 @@ int brcmf_fweh_activate_events(struct brcmf_if *ifp) err = brcmf_fil_iovar_data_set(ifp, "event_msgs", eventmask, BRCMF_EVENTING_MASK_LEN); if (err) - brcmf_err("Set event_msgs error (%d)\n", err); + bphy_err(drvr, "Set event_msgs error (%d)\n", err); return err; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h index 816f80ea925b..7027243db17e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h @@ -211,7 +211,7 @@ enum brcmf_fweh_event_code { */ #define BRCM_OUI "\x00\x10\x18" #define BCMILCP_BCM_SUBTYPE_EVENT 1 - +#define BCMILCP_SUBTYPE_VENDOR_LONG 32769 /** * struct brcm_ethhdr - broadcom specific ether header. @@ -266,7 +266,7 @@ struct brcmf_event { * @status: status information. * @reason: reason code. * @auth_type: authentication type. - * @datalen: lenght of event data buffer. + * @datalen: length of event data buffer. * @addr: ether address. * @ifname: interface name. * @ifidx: interface index. @@ -334,10 +334,10 @@ void brcmf_fweh_process_event(struct brcmf_pub *drvr, void brcmf_fweh_p2pdev_setup(struct brcmf_if *ifp, bool ongoing); static inline void brcmf_fweh_process_skb(struct brcmf_pub *drvr, - struct sk_buff *skb) + struct sk_buff *skb, u16 stype) { struct brcmf_event *event_packet; - u16 usr_stype; + u16 subtype, usr_stype; /* only process events when protocol matches */ if (skb->protocol != cpu_to_be16(ETH_P_LINK_CTL)) @@ -346,8 +346,16 @@ static inline void brcmf_fweh_process_skb(struct brcmf_pub *drvr, if ((skb->len + ETH_HLEN) < sizeof(*event_packet)) return; - /* check for BRCM oui match */ event_packet = (struct brcmf_event *)skb_mac_header(skb); + + /* check subtype if needed */ + if (unlikely(stype)) { + subtype = get_unaligned_be16(&event_packet->hdr.subtype); + if (subtype != stype) + return; + } + + /* check for BRCM oui match */ if (memcmp(BRCM_OUI, &event_packet->hdr.oui[0], sizeof(event_packet->hdr.oui))) return; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c index 802d7cb73b80..8ea27489734e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c @@ -110,7 +110,7 @@ brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set) s32 err, fwerr; if (drvr->bus_if->state != BRCMF_BUS_UP) { - brcmf_err("bus is down. we have nothing to do.\n"); + bphy_err(drvr, "bus is down. we have nothing to do.\n"); return -EIO; } @@ -242,7 +242,7 @@ brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, const void *data, buflen, true); } else { err = -EPERM; - brcmf_err("Creating iovar failed\n"); + bphy_err(drvr, "Creating iovar failed\n"); } mutex_unlock(&drvr->proto_block); @@ -268,7 +268,7 @@ brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data, memcpy(data, drvr->proto_buf, len); } else { err = -EPERM; - brcmf_err("Creating iovar failed\n"); + bphy_err(drvr, "Creating iovar failed\n"); } brcmf_dbg(FIL, "ifidx=%d, name=%s, len=%d\n", ifp->ifidx, name, len); @@ -366,7 +366,7 @@ brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, char *name, buflen, true); } else { err = -EPERM; - brcmf_err("Creating bsscfg failed\n"); + bphy_err(drvr, "Creating bsscfg failed\n"); } mutex_unlock(&drvr->proto_block); @@ -392,7 +392,7 @@ brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, char *name, memcpy(data, drvr->proto_buf, len); } else { err = -EPERM; - brcmf_err("Creating bsscfg failed\n"); + bphy_err(drvr, "Creating bsscfg failed\n"); } brcmf_dbg(FIL, "ifidx=%d, bsscfgidx=%d, name=%s, len=%d\n", ifp->ifidx, ifp->bsscfgidx, name, len); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c index 02759ebd207c..abeb305492e0 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c @@ -1255,6 +1255,7 @@ static int brcmf_fws_enq(struct brcmf_fws_info *fws, enum brcmf_fws_skb_state state, int fifo, struct sk_buff *p) { + struct brcmf_pub *drvr = fws->drvr; int prec = 2 * fifo; u32 *qfull_stat = &fws->stats.delayq_full_error; struct brcmf_fws_mac_descriptor *entry; @@ -1267,7 +1268,7 @@ static int brcmf_fws_enq(struct brcmf_fws_info *fws, entry = brcmf_skbcb(p)->mac; if (entry == NULL) { - brcmf_err("no mac descriptor found for skb %p\n", p); + bphy_err(drvr, "no mac descriptor found for skb %p\n", p); return -ENOENT; } @@ -1457,6 +1458,7 @@ static int brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot, u32 genbit, u16 seq, u8 compcnt) { + struct brcmf_pub *drvr = fws->drvr; u32 fifo; u8 cnt = 0; int ret; @@ -1481,14 +1483,14 @@ brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot, else if (flags == BRCMF_FWS_TXSTATUS_HOST_TOSSED) fws->stats.txs_host_tossed += compcnt; else - brcmf_err("unexpected txstatus\n"); + bphy_err(drvr, "unexpected txstatus\n"); while (cnt < compcnt) { ret = brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb, remove_from_hanger); if (ret != 0) { - brcmf_err("no packet in hanger slot: hslot=%d\n", - hslot); + bphy_err(drvr, "no packet in hanger slot: hslot=%d\n", + hslot); goto cont; } @@ -1612,12 +1614,13 @@ static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp, const struct brcmf_event_msg *e, void *data) { - struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr); + struct brcmf_pub *drvr = ifp->drvr; + struct brcmf_fws_info *fws = drvr_to_fws(drvr); int i; u8 *credits = data; if (e->datalen < BRCMF_FWS_FIFO_COUNT) { - brcmf_err("event payload too small (%d)\n", e->datalen); + bphy_err(drvr, "event payload too small (%d)\n", e->datalen); return -EINVAL; } @@ -1681,6 +1684,7 @@ static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi, void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt) { + struct brcmf_pub *drvr = ifp->drvr; u8 *reorder_data; u8 flow_id, max_idx, cur_idx, exp_idx, end_idx; struct brcmf_ampdu_rx_reorder *rfi; @@ -1695,7 +1699,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt) /* validate flags and flow id */ if (flags == 0xFF) { - brcmf_err("invalid flags...so ignore this packet\n"); + bphy_err(drvr, "invalid flags...so ignore this packet\n"); brcmf_netif_rx(ifp, pkt); return; } @@ -1732,7 +1736,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt) flow_id, max_idx); rfi = kzalloc(buf_size, GFP_ATOMIC); if (rfi == NULL) { - brcmf_err("failed to alloc buffer\n"); + bphy_err(drvr, "failed to alloc buffer\n"); brcmf_netif_rx(ifp, pkt); return; } @@ -1996,6 +2000,7 @@ static u8 brcmf_fws_precommit_skb(struct brcmf_fws_info *fws, int fifo, static void brcmf_fws_rollback_toq(struct brcmf_fws_info *fws, struct sk_buff *skb, int fifo) { + struct brcmf_pub *drvr = fws->drvr; struct brcmf_fws_mac_descriptor *entry; struct sk_buff *pktout; int qidx, hslot; @@ -2009,11 +2014,11 @@ static void brcmf_fws_rollback_toq(struct brcmf_fws_info *fws, pktout = brcmu_pktq_penq_head(&entry->psq, qidx, skb); if (pktout == NULL) { - brcmf_err("%s queue %d full\n", entry->name, qidx); + bphy_err(drvr, "%s queue %d full\n", entry->name, qidx); rc = -ENOSPC; } } else { - brcmf_err("%s entry removed\n", entry->name); + bphy_err(drvr, "%s entry removed\n", entry->name); rc = -ENOENT; } @@ -2118,7 +2123,8 @@ static int brcmf_fws_assign_htod(struct brcmf_fws_info *fws, struct sk_buff *p, int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb) { - struct brcmf_fws_info *fws = drvr_to_fws(ifp->drvr); + struct brcmf_pub *drvr = ifp->drvr; + struct brcmf_fws_info *fws = drvr_to_fws(drvr); struct brcmf_skbuff_cb *skcb = brcmf_skbcb(skb); struct ethhdr *eh = (struct ethhdr *)(skb->data); int fifo = BRCMF_FWS_FIFO_BCMC; @@ -2146,7 +2152,7 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb) brcmf_fws_enq(fws, BRCMF_FWS_SKBSTATE_DELAYED, fifo, skb); brcmf_fws_schedule_deq(fws); } else { - brcmf_err("drop skb: no hanger slot\n"); + bphy_err(drvr, "drop skb: no hanger slot\n"); brcmf_txfinalize(ifp, skb, false); rc = -ENOMEM; } @@ -2365,7 +2371,7 @@ struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr) fws->fws_wq = create_singlethread_workqueue("brcmf_fws_wq"); if (fws->fws_wq == NULL) { - brcmf_err("workqueue creation failed\n"); + bphy_err(drvr, "workqueue creation failed\n"); rc = -EBADF; goto fail; } @@ -2381,13 +2387,13 @@ struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr) rc = brcmf_fweh_register(drvr, BRCMF_E_FIFO_CREDIT_MAP, brcmf_fws_notify_credit_map); if (rc < 0) { - brcmf_err("register credit map handler failed\n"); + bphy_err(drvr, "register credit map handler failed\n"); goto fail; } rc = brcmf_fweh_register(drvr, BRCMF_E_BCMC_CREDIT_SUPPORT, brcmf_fws_notify_bcmc_credit_support); if (rc < 0) { - brcmf_err("register bcmc credit handler failed\n"); + bphy_err(drvr, "register bcmc credit handler failed\n"); brcmf_fweh_unregister(drvr, BRCMF_E_FIFO_CREDIT_MAP); goto fail; } @@ -2399,7 +2405,7 @@ struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr) fws->fw_signals = true; ifp = brcmf_get_ifp(drvr, 0); if (brcmf_fil_iovar_int_set(ifp, "tlv", tlv)) { - brcmf_err("failed to set bdcv2 tlv signaling\n"); + bphy_err(drvr, "failed to set bdcv2 tlv signaling\n"); fws->fcmode = BRCMF_FWS_FCMODE_NONE; fws->fw_signals = false; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c index 4e8397a0cbc8..d3780eae7f19 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c @@ -134,6 +134,22 @@ struct msgbuf_completion_hdr { __le16 flow_ring_id; }; +/* Data struct for the MSGBUF_TYPE_GEN_STATUS */ +struct msgbuf_gen_status { + struct msgbuf_common_hdr msg; + struct msgbuf_completion_hdr compl_hdr; + __le16 write_idx; + __le32 rsvd0[3]; +}; + +/* Data struct for the MSGBUF_TYPE_RING_STATUS */ +struct msgbuf_ring_status { + struct msgbuf_common_hdr msg; + struct msgbuf_completion_hdr compl_hdr; + __le16 write_idx; + __le16 rsvd0[5]; +}; + struct msgbuf_rx_event { struct msgbuf_common_hdr msg; struct msgbuf_completion_hdr compl_hdr; @@ -431,7 +447,7 @@ static int brcmf_msgbuf_tx_ioctl(struct brcmf_pub *drvr, int ifidx, brcmf_commonring_lock(commonring); ret_ptr = brcmf_commonring_reserve_for_write(commonring); if (!ret_ptr) { - brcmf_err("Failed to reserve space in commonring\n"); + bphy_err(drvr, "Failed to reserve space in commonring\n"); brcmf_commonring_unlock(commonring); return -ENOMEM; } @@ -495,7 +511,7 @@ static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx, timeout = brcmf_msgbuf_ioctl_resp_wait(msgbuf); if (!timeout) { - brcmf_err("Timeout on response for query command\n"); + bphy_err(drvr, "Timeout on response for query command\n"); return -EIO; } @@ -572,6 +588,7 @@ static u32 brcmf_msgbuf_flowring_create_worker(struct brcmf_msgbuf *msgbuf, struct brcmf_msgbuf_work_item *work) { + struct brcmf_pub *drvr = msgbuf->drvr; struct msgbuf_tx_flowring_create_req *create; struct brcmf_commonring *commonring; void *ret_ptr; @@ -587,7 +604,7 @@ brcmf_msgbuf_flowring_create_worker(struct brcmf_msgbuf *msgbuf, &msgbuf->flowring_dma_handle[flowid], GFP_KERNEL); if (!dma_buf) { - brcmf_err("dma_alloc_coherent failed\n"); + bphy_err(drvr, "dma_alloc_coherent failed\n"); brcmf_flowring_delete(msgbuf->flow, flowid); return BRCMF_FLOWRING_INVALID_ID; } @@ -600,7 +617,7 @@ brcmf_msgbuf_flowring_create_worker(struct brcmf_msgbuf *msgbuf, brcmf_commonring_lock(commonring); ret_ptr = brcmf_commonring_reserve_for_write(commonring); if (!ret_ptr) { - brcmf_err("Failed to reserve space in commonring\n"); + bphy_err(drvr, "Failed to reserve space in commonring\n"); brcmf_commonring_unlock(commonring); brcmf_msgbuf_remove_flowring(msgbuf, flowid); return BRCMF_FLOWRING_INVALID_ID; @@ -627,7 +644,7 @@ brcmf_msgbuf_flowring_create_worker(struct brcmf_msgbuf *msgbuf, err = brcmf_commonring_write_complete(commonring); brcmf_commonring_unlock(commonring); if (err) { - brcmf_err("Failed to write commonring\n"); + bphy_err(drvr, "Failed to write commonring\n"); brcmf_msgbuf_remove_flowring(msgbuf, flowid); return BRCMF_FLOWRING_INVALID_ID; } @@ -686,6 +703,7 @@ static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx, static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u16 flowid) { struct brcmf_flowring *flow = msgbuf->flow; + struct brcmf_pub *drvr = msgbuf->drvr; struct brcmf_commonring *commonring; void *ret_ptr; u32 count; @@ -705,8 +723,8 @@ static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u16 flowid) while (brcmf_flowring_qlen(flow, flowid)) { skb = brcmf_flowring_dequeue(flow, flowid); if (skb == NULL) { - brcmf_err("No SKB, but qlen %d\n", - brcmf_flowring_qlen(flow, flowid)); + bphy_err(drvr, "No SKB, but qlen %d\n", + brcmf_flowring_qlen(flow, flowid)); break; } skb_orphan(skb); @@ -714,7 +732,7 @@ static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u16 flowid) msgbuf->tx_pktids, skb, ETH_HLEN, &physaddr, &pktid)) { brcmf_flowring_reinsert(flow, flowid, skb); - brcmf_err("No PKTID available !!\n"); + bphy_err(drvr, "No PKTID available !!\n"); break; } ret_ptr = brcmf_commonring_reserve_for_write(commonring); @@ -885,6 +903,7 @@ brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf) static u32 brcmf_msgbuf_rxbuf_data_post(struct brcmf_msgbuf *msgbuf, u32 count) { + struct brcmf_pub *drvr = msgbuf->drvr; struct brcmf_commonring *commonring; void *ret_ptr; struct sk_buff *skb; @@ -912,7 +931,7 @@ static u32 brcmf_msgbuf_rxbuf_data_post(struct brcmf_msgbuf *msgbuf, u32 count) skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE); if (skb == NULL) { - brcmf_err("Failed to alloc SKB\n"); + bphy_err(drvr, "Failed to alloc SKB\n"); brcmf_commonring_write_cancel(commonring, alloced - i); break; } @@ -922,7 +941,7 @@ static u32 brcmf_msgbuf_rxbuf_data_post(struct brcmf_msgbuf *msgbuf, u32 count) msgbuf->rx_pktids, skb, 0, &physaddr, &pktid)) { dev_kfree_skb_any(skb); - brcmf_err("No PKTID available !!\n"); + bphy_err(drvr, "No PKTID available !!\n"); brcmf_commonring_write_cancel(commonring, alloced - i); break; } @@ -992,6 +1011,7 @@ static u32 brcmf_msgbuf_rxbuf_ctrl_post(struct brcmf_msgbuf *msgbuf, bool event_buf, u32 count) { + struct brcmf_pub *drvr = msgbuf->drvr; struct brcmf_commonring *commonring; void *ret_ptr; struct sk_buff *skb; @@ -1009,7 +1029,7 @@ brcmf_msgbuf_rxbuf_ctrl_post(struct brcmf_msgbuf *msgbuf, bool event_buf, count, &alloced); if (!ret_ptr) { - brcmf_err("Failed to reserve space in commonring\n"); + bphy_err(drvr, "Failed to reserve space in commonring\n"); brcmf_commonring_unlock(commonring); return 0; } @@ -1021,7 +1041,7 @@ brcmf_msgbuf_rxbuf_ctrl_post(struct brcmf_msgbuf *msgbuf, bool event_buf, skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE); if (skb == NULL) { - brcmf_err("Failed to alloc SKB\n"); + bphy_err(drvr, "Failed to alloc SKB\n"); brcmf_commonring_write_cancel(commonring, alloced - i); break; } @@ -1031,7 +1051,7 @@ brcmf_msgbuf_rxbuf_ctrl_post(struct brcmf_msgbuf *msgbuf, bool event_buf, msgbuf->rx_pktids, skb, 0, &physaddr, &pktid)) { dev_kfree_skb_any(skb); - brcmf_err("No PKTID available !!\n"); + bphy_err(drvr, "No PKTID available !!\n"); brcmf_commonring_write_cancel(commonring, alloced - i); break; } @@ -1083,6 +1103,7 @@ static void brcmf_msgbuf_rxbuf_event_post(struct brcmf_msgbuf *msgbuf) static void brcmf_msgbuf_process_event(struct brcmf_msgbuf *msgbuf, void *buf) { + struct brcmf_pub *drvr = msgbuf->drvr; struct msgbuf_rx_event *event; u32 idx; u16 buflen; @@ -1109,14 +1130,14 @@ static void brcmf_msgbuf_process_event(struct brcmf_msgbuf *msgbuf, void *buf) ifp = brcmf_get_ifp(msgbuf->drvr, event->msg.ifidx); if (!ifp || !ifp->ndev) { - brcmf_err("Received pkt for invalid ifidx %d\n", - event->msg.ifidx); + bphy_err(drvr, "Received pkt for invalid ifidx %d\n", + event->msg.ifidx); goto exit; } skb->protocol = eth_type_trans(skb, ifp->ndev); - brcmf_fweh_process_skb(ifp->drvr, skb); + brcmf_fweh_process_skb(ifp->drvr, skb, 0); exit: brcmu_pkt_buf_free_skb(skb); @@ -1126,6 +1147,7 @@ exit: static void brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf) { + struct brcmf_pub *drvr = msgbuf->drvr; struct msgbuf_rx_complete *rx_complete; struct sk_buff *skb; u16 data_offset; @@ -1159,7 +1181,7 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf) ifp = msgbuf->drvr->mon_if; if (!ifp) { - brcmf_err("Received unexpected monitor pkt\n"); + bphy_err(drvr, "Received unexpected monitor pkt\n"); brcmu_pkt_buf_free_skb(skb); return; } @@ -1170,8 +1192,8 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf) ifp = brcmf_get_ifp(msgbuf->drvr, rx_complete->msg.ifidx); if (!ifp || !ifp->ndev) { - brcmf_err("Received pkt for invalid ifidx %d\n", - rx_complete->msg.ifidx); + bphy_err(drvr, "Received pkt for invalid ifidx %d\n", + rx_complete->msg.ifidx); brcmu_pkt_buf_free_skb(skb); return; } @@ -1180,11 +1202,39 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf) brcmf_netif_rx(ifp, skb); } +static void brcmf_msgbuf_process_gen_status(struct brcmf_msgbuf *msgbuf, + void *buf) +{ + struct msgbuf_gen_status *gen_status = buf; + struct brcmf_pub *drvr = msgbuf->drvr; + int err; + + err = le16_to_cpu(gen_status->compl_hdr.status); + if (err) + bphy_err(drvr, "Firmware reported general error: %d\n", err); +} + +static void brcmf_msgbuf_process_ring_status(struct brcmf_msgbuf *msgbuf, + void *buf) +{ + struct msgbuf_ring_status *ring_status = buf; + struct brcmf_pub *drvr = msgbuf->drvr; + int err; + + err = le16_to_cpu(ring_status->compl_hdr.status); + if (err) { + int ring = le16_to_cpu(ring_status->compl_hdr.flow_ring_id); + + bphy_err(drvr, "Firmware reported ring %d error: %d\n", ring, + err); + } +} static void brcmf_msgbuf_process_flow_ring_create_response(struct brcmf_msgbuf *msgbuf, void *buf) { + struct brcmf_pub *drvr = msgbuf->drvr; struct msgbuf_flowring_create_resp *flowring_create_resp; u16 status; u16 flowid; @@ -1196,7 +1246,7 @@ brcmf_msgbuf_process_flow_ring_create_response(struct brcmf_msgbuf *msgbuf, status = le16_to_cpu(flowring_create_resp->compl_hdr.status); if (status) { - brcmf_err("Flowring creation failed, code %d\n", status); + bphy_err(drvr, "Flowring creation failed, code %d\n", status); brcmf_msgbuf_remove_flowring(msgbuf, flowid); return; } @@ -1213,6 +1263,7 @@ static void brcmf_msgbuf_process_flow_ring_delete_response(struct brcmf_msgbuf *msgbuf, void *buf) { + struct brcmf_pub *drvr = msgbuf->drvr; struct msgbuf_flowring_delete_resp *flowring_delete_resp; u16 status; u16 flowid; @@ -1224,7 +1275,7 @@ brcmf_msgbuf_process_flow_ring_delete_response(struct brcmf_msgbuf *msgbuf, status = le16_to_cpu(flowring_delete_resp->compl_hdr.status); if (status) { - brcmf_err("Flowring deletion failed, code %d\n", status); + bphy_err(drvr, "Flowring deletion failed, code %d\n", status); brcmf_flowring_delete(msgbuf->flow, flowid); return; } @@ -1237,10 +1288,19 @@ brcmf_msgbuf_process_flow_ring_delete_response(struct brcmf_msgbuf *msgbuf, static void brcmf_msgbuf_process_msgtype(struct brcmf_msgbuf *msgbuf, void *buf) { + struct brcmf_pub *drvr = msgbuf->drvr; struct msgbuf_common_hdr *msg; msg = (struct msgbuf_common_hdr *)buf; switch (msg->msgtype) { + case MSGBUF_TYPE_GEN_STATUS: + brcmf_dbg(MSGBUF, "MSGBUF_TYPE_GEN_STATUS\n"); + brcmf_msgbuf_process_gen_status(msgbuf, buf); + break; + case MSGBUF_TYPE_RING_STATUS: + brcmf_dbg(MSGBUF, "MSGBUF_TYPE_RING_STATUS\n"); + brcmf_msgbuf_process_ring_status(msgbuf, buf); + break; case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT: brcmf_dbg(MSGBUF, "MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT\n"); brcmf_msgbuf_process_flow_ring_create_response(msgbuf, buf); @@ -1269,7 +1329,7 @@ static void brcmf_msgbuf_process_msgtype(struct brcmf_msgbuf *msgbuf, void *buf) brcmf_msgbuf_process_rx_complete(msgbuf, buf); break; default: - brcmf_err("Unsupported msgtype %d\n", msg->msgtype); + bphy_err(drvr, "Unsupported msgtype %d\n", msg->msgtype); break; } } @@ -1352,7 +1412,7 @@ void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid) brcmf_commonring_lock(commonring); ret_ptr = brcmf_commonring_reserve_for_write(commonring); if (!ret_ptr) { - brcmf_err("FW unaware, flowring will be removed !!\n"); + bphy_err(drvr, "FW unaware, flowring will be removed !!\n"); brcmf_commonring_unlock(commonring); brcmf_msgbuf_remove_flowring(msgbuf, flowid); return; @@ -1376,7 +1436,7 @@ void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid) err = brcmf_commonring_write_complete(commonring); brcmf_commonring_unlock(commonring); if (err) { - brcmf_err("Failed to submit RING_DELETE, flowring will be removed\n"); + bphy_err(drvr, "Failed to submit RING_DELETE, flowring will be removed\n"); brcmf_msgbuf_remove_flowring(msgbuf, flowid); } } @@ -1451,8 +1511,8 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr) if_msgbuf = drvr->bus_if->msgbuf; if (if_msgbuf->max_flowrings >= BRCMF_FLOWRING_HASHSIZE) { - brcmf_err("driver not configured for this many flowrings %d\n", - if_msgbuf->max_flowrings); + bphy_err(drvr, "driver not configured for this many flowrings %d\n", + if_msgbuf->max_flowrings); if_msgbuf->max_flowrings = BRCMF_FLOWRING_HASHSIZE - 1; } @@ -1462,7 +1522,7 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr) msgbuf->txflow_wq = create_singlethread_workqueue("msgbuf_txflow"); if (msgbuf->txflow_wq == NULL) { - brcmf_err("workqueue creation failed\n"); + bphy_err(drvr, "workqueue creation failed\n"); goto fail; } INIT_WORK(&msgbuf->txflow_work, brcmf_msgbuf_txflow_worker); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c index 456a1bf008b3..73a0e550f2b2 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c @@ -434,6 +434,7 @@ static void brcmf_p2p_print_actframe(bool tx, void *frame, u32 frame_len) */ static int brcmf_p2p_set_firmware(struct brcmf_if *ifp, u8 *p2p_mac) { + struct brcmf_pub *drvr = ifp->drvr; s32 ret = 0; brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1); @@ -450,7 +451,7 @@ static int brcmf_p2p_set_firmware(struct brcmf_if *ifp, u8 *p2p_mac) ret = brcmf_fil_iovar_data_set(ifp, "p2p_da_override", p2p_mac, ETH_ALEN); if (ret) - brcmf_err("failed to update device address ret %d\n", ret); + bphy_err(drvr, "failed to update device address ret %d\n", ret); return ret; } @@ -570,13 +571,14 @@ static s32 brcmf_p2p_deinit_discovery(struct brcmf_p2p_info *p2p) */ static int brcmf_p2p_enable_discovery(struct brcmf_p2p_info *p2p) { + struct brcmf_pub *drvr = p2p->cfg->pub; struct brcmf_cfg80211_vif *vif; s32 ret = 0; brcmf_dbg(TRACE, "enter\n"); vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif; if (!vif) { - brcmf_err("P2P config device not available\n"); + bphy_err(drvr, "P2P config device not available\n"); ret = -EPERM; goto exit; } @@ -590,13 +592,13 @@ static int brcmf_p2p_enable_discovery(struct brcmf_p2p_info *p2p) vif = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif; ret = brcmf_fil_iovar_int_set(vif->ifp, "p2p_disc", 1); if (ret < 0) { - brcmf_err("set p2p_disc error\n"); + bphy_err(drvr, "set p2p_disc error\n"); goto exit; } vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif; ret = brcmf_p2p_set_discover_state(vif->ifp, WL_P2P_DISC_ST_SCAN, 0, 0); if (ret < 0) { - brcmf_err("unable to set WL_P2P_DISC_ST_SCAN\n"); + bphy_err(drvr, "unable to set WL_P2P_DISC_ST_SCAN\n"); goto exit; } @@ -608,7 +610,7 @@ static int brcmf_p2p_enable_discovery(struct brcmf_p2p_info *p2p) */ ret = brcmf_fil_bsscfg_int_set(vif->ifp, "wsec", AES_ENABLED); if (ret < 0) { - brcmf_err("wsec error %d\n", ret); + bphy_err(drvr, "wsec error %d\n", ret); goto exit; } @@ -630,6 +632,7 @@ static s32 brcmf_p2p_escan(struct brcmf_p2p_info *p2p, u32 num_chans, u16 chanspecs[], s32 search_state, enum p2p_bss_type bss_type) { + struct brcmf_pub *drvr = p2p->cfg->pub; s32 ret = 0; s32 memsize = offsetof(struct brcmf_p2p_scan_le, eparams.params_le.channel_list); @@ -648,7 +651,7 @@ static s32 brcmf_p2p_escan(struct brcmf_p2p_info *p2p, u32 num_chans, vif = p2p->bss_idx[bss_type].vif; if (vif == NULL) { - brcmf_err("no vif for bss type %d\n", bss_type); + bphy_err(drvr, "no vif for bss type %d\n", bss_type); ret = -EINVAL; goto exit; } @@ -676,7 +679,7 @@ static s32 brcmf_p2p_escan(struct brcmf_p2p_info *p2p, u32 num_chans, BRCMF_P2P_WILDCARD_SSID_LEN); break; default: - brcmf_err(" invalid search state %d\n", search_state); + bphy_err(drvr, " invalid search state %d\n", search_state); ret = -EINVAL; goto exit; } @@ -760,6 +763,7 @@ static s32 brcmf_p2p_run_escan(struct brcmf_cfg80211_info *cfg, struct cfg80211_scan_request *request) { struct brcmf_p2p_info *p2p = &cfg->p2p; + struct brcmf_pub *drvr = cfg->pub; s32 err = 0; s32 search_state = WL_P2P_DISC_ST_SCAN; struct brcmf_cfg80211_vif *vif; @@ -822,7 +826,7 @@ static s32 brcmf_p2p_run_escan(struct brcmf_cfg80211_info *cfg, } exit: if (err) - brcmf_err("error (%d)\n", err); + bphy_err(drvr, "error (%d)\n", err); return err; } @@ -917,19 +921,20 @@ int brcmf_p2p_scan_prep(struct wiphy *wiphy, static s32 brcmf_p2p_discover_listen(struct brcmf_p2p_info *p2p, u16 channel, u32 duration) { + struct brcmf_pub *drvr = p2p->cfg->pub; struct brcmf_cfg80211_vif *vif; struct brcmu_chan ch; s32 err = 0; vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif; if (!vif) { - brcmf_err("Discovery is not set, so we have nothing to do\n"); + bphy_err(drvr, "Discovery is not set, so we have nothing to do\n"); err = -EPERM; goto exit; } if (test_bit(BRCMF_P2P_STATUS_DISCOVER_LISTEN, &p2p->status)) { - brcmf_err("Previous LISTEN is not completed yet\n"); + bphy_err(drvr, "Previous LISTEN is not completed yet\n"); /* WAR: prevent cookie mismatch in wpa_supplicant return OK */ goto exit; } @@ -1046,6 +1051,7 @@ void brcmf_p2p_cancel_remain_on_channel(struct brcmf_if *ifp) */ static s32 brcmf_p2p_act_frm_search(struct brcmf_p2p_info *p2p, u16 channel) { + struct brcmf_pub *drvr = p2p->cfg->pub; s32 err; u32 channel_cnt; u16 *default_chan_list; @@ -1061,7 +1067,7 @@ static s32 brcmf_p2p_act_frm_search(struct brcmf_p2p_info *p2p, u16 channel) default_chan_list = kcalloc(channel_cnt, sizeof(*default_chan_list), GFP_KERNEL); if (default_chan_list == NULL) { - brcmf_err("channel list allocation failed\n"); + bphy_err(drvr, "channel list allocation failed\n"); err = -ENOMEM; goto exit; } @@ -1103,6 +1109,7 @@ static void brcmf_p2p_afx_handler(struct work_struct *work) struct brcmf_p2p_info *p2p = container_of(afx_hdl, struct brcmf_p2p_info, afx_hdl); + struct brcmf_pub *drvr = p2p->cfg->pub; s32 err; if (!afx_hdl->is_active) @@ -1116,7 +1123,7 @@ static void brcmf_p2p_afx_handler(struct work_struct *work) err = brcmf_p2p_act_frm_search(p2p, afx_hdl->peer_listen_chan); if (err) { - brcmf_err("ERROR occurred! value is (%d)\n", err); + bphy_err(drvr, "ERROR occurred! value is (%d)\n", err); if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status)) complete(&afx_hdl->act_frm_scan); @@ -1338,7 +1345,8 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp, const struct brcmf_event_msg *e, void *data) { - struct brcmf_cfg80211_info *cfg = ifp->drvr->config; + struct brcmf_pub *drvr = ifp->drvr; + struct brcmf_cfg80211_info *cfg = drvr->config; struct brcmf_p2p_info *p2p = &cfg->p2p; struct afx_hdl *afx_hdl = &p2p->afx_hdl; struct wireless_dev *wdev; @@ -1409,7 +1417,7 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp, mgmt_frame = kzalloc(offsetof(struct ieee80211_mgmt, u) + mgmt_frame_len, GFP_KERNEL); if (!mgmt_frame) { - brcmf_err("No memory available for action frame\n"); + bphy_err(drvr, "No memory available for action frame\n"); return -ENOMEM; } memcpy(mgmt_frame->da, ifp->mac_addr, ETH_ALEN); @@ -1492,6 +1500,7 @@ int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp, static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p, struct brcmf_fil_af_params_le *af_params) { + struct brcmf_pub *drvr = p2p->cfg->pub; struct brcmf_cfg80211_vif *vif; s32 err = 0; s32 timeout = 0; @@ -1506,7 +1515,7 @@ static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p, err = brcmf_fil_bsscfg_data_set(vif->ifp, "actframe", af_params, sizeof(*af_params)); if (err) { - brcmf_err(" sending action frame has failed\n"); + bphy_err(drvr, " sending action frame has failed\n"); goto exit; } @@ -1556,6 +1565,7 @@ static s32 brcmf_p2p_pub_af_tx(struct brcmf_cfg80211_info *cfg, struct brcmf_config_af_params *config_af_params) { struct brcmf_p2p_info *p2p = &cfg->p2p; + struct brcmf_pub *drvr = cfg->pub; struct brcmf_fil_action_frame_le *action_frame; struct brcmf_p2p_pub_act_frame *act_frm; s32 err = 0; @@ -1634,8 +1644,8 @@ static s32 brcmf_p2p_pub_af_tx(struct brcmf_cfg80211_info *cfg, config_af_params->extra_listen = false; break; default: - brcmf_err("Unknown p2p pub act frame subtype: %d\n", - act_frm->subtype); + bphy_err(drvr, "Unknown p2p pub act frame subtype: %d\n", + act_frm->subtype); err = -EINVAL; } return err; @@ -1657,6 +1667,7 @@ bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg, struct brcmf_fil_action_frame_le *action_frame; struct brcmf_config_af_params config_af_params; struct afx_hdl *afx_hdl = &p2p->afx_hdl; + struct brcmf_pub *drvr = cfg->pub; u16 action_frame_len; bool ack = false; u8 category; @@ -1692,7 +1703,7 @@ bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg, if (brcmf_p2p_pub_af_tx(cfg, af_params, &config_af_params)) { /* Just send unknown subtype frame with */ /* default parameters. */ - brcmf_err("P2P Public action frame, unknown subtype.\n"); + bphy_err(drvr, "P2P Public action frame, unknown subtype.\n"); } } else if (brcmf_p2p_is_gas_action(action_frame->data, action_frame_len)) { @@ -1714,7 +1725,7 @@ bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg, af_params->dwell_time = cpu_to_le32(P2P_AF_MIN_DWELL_TIME); } else { - brcmf_err("Unknown action type: %d\n", action); + bphy_err(drvr, "Unknown action type: %d\n", action); goto exit; } } else if (brcmf_p2p_is_p2p_action(action_frame->data, @@ -1722,8 +1733,8 @@ bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg, /* do not configure anything. it will be */ /* sent with a default configuration */ } else { - brcmf_err("Unknown Frame: category 0x%x, action 0x%x\n", - category, action); + bphy_err(drvr, "Unknown Frame: category 0x%x, action 0x%x\n", + category, action); return false; } @@ -1761,7 +1772,7 @@ bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg, if (brcmf_p2p_af_searching_channel(p2p) == P2P_INVALID_CHANNEL) { - brcmf_err("Couldn't find peer's channel.\n"); + bphy_err(drvr, "Couldn't find peer's channel.\n"); goto exit; } @@ -1783,7 +1794,8 @@ bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg, tx_retry++; } if (ack == false) { - brcmf_err("Failed to send Action Frame(retry %d)\n", tx_retry); + bphy_err(drvr, "Failed to send Action Frame(retry %d)\n", + tx_retry); clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status); } @@ -1965,6 +1977,7 @@ int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg, enum brcmf_fil_p2p_if_types if_type) { struct brcmf_p2p_info *p2p = &cfg->p2p; + struct brcmf_pub *drvr = cfg->pub; struct brcmf_cfg80211_vif *vif; struct brcmf_fil_p2p_if_le if_request; s32 err; @@ -1974,13 +1987,13 @@ int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg, vif = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif; if (!vif) { - brcmf_err("vif for P2PAPI_BSSCFG_PRIMARY does not exist\n"); + bphy_err(drvr, "vif for P2PAPI_BSSCFG_PRIMARY does not exist\n"); return -EPERM; } brcmf_notify_escan_complete(cfg, vif->ifp, true, true); vif = p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif; if (!vif) { - brcmf_err("vif for P2PAPI_BSSCFG_CONNECTION does not exist\n"); + bphy_err(drvr, "vif for P2PAPI_BSSCFG_CONNECTION does not exist\n"); return -EPERM; } brcmf_set_mpc(vif->ifp, 0); @@ -1998,7 +2011,7 @@ int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg, err = brcmf_fil_iovar_data_set(vif->ifp, "p2p_ifupd", &if_request, sizeof(if_request)); if (err) { - brcmf_err("p2p_ifupd FAILED, err=%d\n", err); + bphy_err(drvr, "p2p_ifupd FAILED, err=%d\n", err); brcmf_cfg80211_arm_vif_event(cfg, NULL); return err; } @@ -2006,7 +2019,7 @@ int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg, BRCMF_VIF_EVENT_TIMEOUT); brcmf_cfg80211_arm_vif_event(cfg, NULL); if (!err) { - brcmf_err("No BRCMF_E_IF_CHANGE event received\n"); + bphy_err(drvr, "No BRCMF_E_IF_CHANGE event received\n"); return -EIO; } @@ -2069,6 +2082,7 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p, struct wiphy *wiphy, u8 *addr) { + struct brcmf_pub *drvr = p2p->cfg->pub; struct brcmf_cfg80211_vif *p2p_vif; struct brcmf_if *p2p_ifp; struct brcmf_if *pri_ifp; @@ -2080,7 +2094,7 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p, p2p_vif = brcmf_alloc_vif(p2p->cfg, NL80211_IFTYPE_P2P_DEVICE); if (IS_ERR(p2p_vif)) { - brcmf_err("could not create discovery vif\n"); + bphy_err(drvr, "could not create discovery vif\n"); return (struct wireless_dev *)p2p_vif; } @@ -2088,7 +2102,7 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p, /* firmware requires unique mac address for p2pdev interface */ if (addr && ether_addr_equal(addr, pri_ifp->mac_addr)) { - brcmf_err("discovery vif must be different from primary interface\n"); + bphy_err(drvr, "discovery vif must be different from primary interface\n"); return ERR_PTR(-EINVAL); } @@ -2101,7 +2115,7 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p, /* Initialize P2P Discovery in the firmware */ err = brcmf_fil_iovar_int_set(pri_ifp, "p2p_disc", 1); if (err < 0) { - brcmf_err("set p2p_disc error\n"); + bphy_err(drvr, "set p2p_disc error\n"); brcmf_fweh_p2pdev_setup(pri_ifp, false); brcmf_cfg80211_arm_vif_event(p2p->cfg, NULL); goto fail; @@ -2113,7 +2127,7 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p, brcmf_cfg80211_arm_vif_event(p2p->cfg, NULL); brcmf_fweh_p2pdev_setup(pri_ifp, false); if (!err) { - brcmf_err("timeout occurred\n"); + bphy_err(drvr, "timeout occurred\n"); err = -EIO; goto fail; } @@ -2127,7 +2141,7 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p, /* verify bsscfg index for P2P discovery */ err = brcmf_fil_iovar_int_get(pri_ifp, "p2p_dev", &bsscfgidx); if (err < 0) { - brcmf_err("retrieving discover bsscfg index failed\n"); + bphy_err(drvr, "retrieving discover bsscfg index failed\n"); goto fail; } @@ -2161,6 +2175,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name, { struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg)); + struct brcmf_pub *drvr = cfg->pub; struct brcmf_cfg80211_vif *vif; enum brcmf_fil_p2p_if_types iftype; int err; @@ -2201,7 +2216,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name, BRCMF_VIF_EVENT_TIMEOUT); brcmf_cfg80211_arm_vif_event(cfg, NULL); if (!err) { - brcmf_err("timeout occurred\n"); + bphy_err(drvr, "timeout occurred\n"); err = -EIO; goto fail; } @@ -2209,7 +2224,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name, /* interface created in firmware */ ifp = vif->ifp; if (!ifp) { - brcmf_err("no if pointer provided\n"); + bphy_err(drvr, "no if pointer provided\n"); err = -ENOENT; goto fail; } @@ -2218,7 +2233,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name, ifp->ndev->name_assign_type = name_assign_type; err = brcmf_net_attach(ifp, true); if (err) { - brcmf_err("Registering netdevice failed\n"); + bphy_err(drvr, "Registering netdevice failed\n"); free_netdev(ifp->ndev); goto fail; } @@ -2373,6 +2388,7 @@ void brcmf_p2p_stop_device(struct wiphy *wiphy, struct wireless_dev *wdev) */ s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg, bool p2pdev_forced) { + struct brcmf_pub *drvr = cfg->pub; struct brcmf_p2p_info *p2p; struct brcmf_if *pri_ifp; s32 err = 0; @@ -2387,7 +2403,7 @@ s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg, bool p2pdev_forced) if (p2pdev_forced) { err_ptr = brcmf_p2p_create_p2pdev(p2p, NULL, NULL); if (IS_ERR(err_ptr)) { - brcmf_err("P2P device creation failed.\n"); + bphy_err(drvr, "P2P device creation failed.\n"); err = PTR_ERR(err_ptr); } } else { diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 0f69b3fa296e..58a6bc379358 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -30,6 +30,15 @@ #include #include +/* Custom brcmf_err() that takes bus arg and passes it further */ +#define brcmf_err(bus, fmt, ...) \ + do { \ + if (IS_ENABLED(CONFIG_BRCMDBG) || \ + IS_ENABLED(CONFIG_BRCM_TRACING) || \ + net_ratelimit()) \ + __brcmf_err(bus, __func__, fmt, ##__VA_ARGS__); \ + } while (0) + #include "debug.h" #include "bus.h" #include "commonring.h" @@ -531,6 +540,7 @@ static void brcmf_pcie_select_core(struct brcmf_pciedev_info *devinfo, u16 coreid) { const struct pci_dev *pdev = devinfo->pdev; + struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev); struct brcmf_core *core; u32 bar0_win; @@ -548,7 +558,7 @@ brcmf_pcie_select_core(struct brcmf_pciedev_info *devinfo, u16 coreid) } } } else { - brcmf_err("Unsupported core selected %x\n", coreid); + brcmf_err(bus, "Unsupported core selected %x\n", coreid); } } @@ -848,9 +858,8 @@ static irqreturn_t brcmf_pcie_isr_thread(int irq, void *arg) static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo) { - struct pci_dev *pdev; - - pdev = devinfo->pdev; + struct pci_dev *pdev = devinfo->pdev; + struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev); brcmf_pcie_intr_disable(devinfo); @@ -861,7 +870,7 @@ static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo) brcmf_pcie_isr_thread, IRQF_SHARED, "brcmf_pcie_intr", devinfo)) { pci_disable_msi(pdev); - brcmf_err("Failed to request IRQ %d\n", pdev->irq); + brcmf_err(bus, "Failed to request IRQ %d\n", pdev->irq); return -EIO; } devinfo->irq_allocated = true; @@ -871,15 +880,14 @@ static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo) static void brcmf_pcie_release_irq(struct brcmf_pciedev_info *devinfo) { - struct pci_dev *pdev; + struct pci_dev *pdev = devinfo->pdev; + struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev); u32 status; u32 count; if (!devinfo->irq_allocated) return; - pdev = devinfo->pdev; - brcmf_pcie_intr_disable(devinfo); free_irq(pdev->irq, devinfo); pci_disable_msi(pdev); @@ -891,7 +899,7 @@ static void brcmf_pcie_release_irq(struct brcmf_pciedev_info *devinfo) count++; } if (devinfo->in_irq) - brcmf_err("Still in IRQ (processing) !!!\n"); + brcmf_err(bus, "Still in IRQ (processing) !!!\n"); status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT); brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT, status); @@ -1102,6 +1110,7 @@ static void brcmf_pcie_release_ringbuffers(struct brcmf_pciedev_info *devinfo) static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo) { + struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev); struct brcmf_pcie_ringbuf *ring; struct brcmf_pcie_ringbuf *rings; u32 d2h_w_idx_ptr; @@ -1254,7 +1263,7 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo) return 0; fail: - brcmf_err("Allocating ring buffers failed\n"); + brcmf_err(bus, "Allocating ring buffers failed\n"); brcmf_pcie_release_ringbuffers(devinfo); return -ENOMEM; } @@ -1277,6 +1286,7 @@ brcmf_pcie_release_scratchbuffers(struct brcmf_pciedev_info *devinfo) static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo) { + struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev); u64 address; u32 addr; @@ -1316,7 +1326,7 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo) return 0; fail: - brcmf_err("Allocating scratch buffers failed\n"); + brcmf_err(bus, "Allocating scratch buffers failed\n"); brcmf_pcie_release_scratchbuffers(devinfo); return -ENOMEM; } @@ -1437,6 +1447,7 @@ static int brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo, u32 sharedram_addr) { + struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev); struct brcmf_pcie_shared_info *shared; u32 addr; @@ -1448,7 +1459,8 @@ brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo, brcmf_dbg(PCIE, "PCIe protocol version %d\n", shared->version); if ((shared->version > BRCMF_PCIE_MAX_SHARED_VERSION) || (shared->version < BRCMF_PCIE_MIN_SHARED_VERSION)) { - brcmf_err("Unsupported PCIE version %d\n", shared->version); + brcmf_err(bus, "Unsupported PCIE version %d\n", + shared->version); return -EINVAL; } @@ -1490,6 +1502,7 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo, const struct firmware *fw, void *nvram, u32 nvram_len) { + struct brcmf_bus *bus = dev_get_drvdata(&devinfo->pdev->dev); u32 sharedram_addr; u32 sharedram_addr_written; u32 loop_counter; @@ -1544,7 +1557,13 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo, loop_counter--; } if (sharedram_addr == sharedram_addr_written) { - brcmf_err("FW failed to initialize\n"); + brcmf_err(bus, "FW failed to initialize\n"); + return -ENODEV; + } + if (sharedram_addr < devinfo->ci->rambase || + sharedram_addr >= devinfo->ci->rambase + devinfo->ci->ramsize) { + brcmf_err(bus, "Invalid shared RAM address 0x%08x\n", + sharedram_addr); return -ENODEV; } brcmf_dbg(PCIE, "Shared RAM addr: 0x%08x\n", sharedram_addr); @@ -1555,16 +1574,15 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo, static int brcmf_pcie_get_resource(struct brcmf_pciedev_info *devinfo) { - struct pci_dev *pdev; + struct pci_dev *pdev = devinfo->pdev; + struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev); int err; phys_addr_t bar0_addr, bar1_addr; ulong bar1_size; - pdev = devinfo->pdev; - err = pci_enable_device(pdev); if (err) { - brcmf_err("pci_enable_device failed err=%d\n", err); + brcmf_err(bus, "pci_enable_device failed err=%d\n", err); return err; } @@ -1577,7 +1595,7 @@ static int brcmf_pcie_get_resource(struct brcmf_pciedev_info *devinfo) /* read Bar-1 mapped memory range */ bar1_size = pci_resource_len(pdev, 2); if ((bar1_size == 0) || (bar1_addr == 0)) { - brcmf_err("BAR1 Not enabled, device size=%ld, addr=%#016llx\n", + brcmf_err(bus, "BAR1 Not enabled, device size=%ld, addr=%#016llx\n", bar1_size, (unsigned long long)bar1_addr); return -EINVAL; } @@ -1586,7 +1604,7 @@ static int brcmf_pcie_get_resource(struct brcmf_pciedev_info *devinfo) devinfo->tcm = ioremap_nocache(bar1_addr, bar1_size); if (!devinfo->regs || !devinfo->tcm) { - brcmf_err("ioremap() failed (%p,%p)\n", devinfo->regs, + brcmf_err(bus, "ioremap() failed (%p,%p)\n", devinfo->regs, devinfo->tcm); return -EINVAL; } @@ -1873,7 +1891,7 @@ fail_bus: kfree(bus->msgbuf); kfree(bus); fail: - brcmf_err("failed %x:%x\n", pdev->vendor, pdev->device); + brcmf_err(NULL, "failed %x:%x\n", pdev->vendor, pdev->device); brcmf_pcie_release_resource(devinfo); if (devinfo->ci) brcmf_chip_detach(devinfo->ci); @@ -1947,7 +1965,7 @@ static int brcmf_pcie_pm_enter_D3(struct device *dev) wait_event_timeout(devinfo->mbdata_resp_wait, devinfo->mbdata_completed, BRCMF_PCIE_MBDATA_TIMEOUT); if (!devinfo->mbdata_completed) { - brcmf_err("Timeout on response for entering D3 substate\n"); + brcmf_err(bus, "Timeout on response for entering D3 substate\n"); brcmf_bus_change_state(bus, BRCMF_BUS_UP); return -EIO; } @@ -1993,7 +2011,7 @@ cleanup: err = brcmf_pcie_probe(pdev, NULL); if (err) - brcmf_err("probe after resume failed, err=%d\n", err); + brcmf_err(bus, "probe after resume failed, err=%d\n", err); return err; } @@ -2064,7 +2082,8 @@ void brcmf_pcie_register(void) brcmf_dbg(PCIE, "Enter\n"); err = pci_register_driver(&brcmf_pciedrvr); if (err) - brcmf_err("PCIE driver registration failed, err=%d\n", err); + brcmf_err(NULL, "PCIE driver registration failed, err=%d\n", + err); } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c index ffa243e2e2d0..0fb97f7dd5a2 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c @@ -109,6 +109,7 @@ static int brcmf_pno_channel_config(struct brcmf_if *ifp, static int brcmf_pno_config(struct brcmf_if *ifp, u32 scan_freq, u32 mscan, u32 bestn) { + struct brcmf_pub *drvr = ifp->drvr; struct brcmf_pno_param_le pfn_param; u16 flags; u32 pfnmem; @@ -132,13 +133,13 @@ static int brcmf_pno_config(struct brcmf_if *ifp, u32 scan_freq, /* set bestn in firmware */ err = brcmf_fil_iovar_int_set(ifp, "pfnmem", pfnmem); if (err < 0) { - brcmf_err("failed to set pfnmem\n"); + bphy_err(drvr, "failed to set pfnmem\n"); goto exit; } /* get max mscan which the firmware supports */ err = brcmf_fil_iovar_int_get(ifp, "pfnmem", &pfnmem); if (err < 0) { - brcmf_err("failed to get pfnmem\n"); + bphy_err(drvr, "failed to get pfnmem\n"); goto exit; } mscan = min_t(u32, mscan, pfnmem); @@ -152,7 +153,7 @@ static int brcmf_pno_config(struct brcmf_if *ifp, u32 scan_freq, err = brcmf_fil_iovar_data_set(ifp, "pfn_set", &pfn_param, sizeof(pfn_param)); if (err) - brcmf_err("pfn_set failed, err=%d\n", err); + bphy_err(drvr, "pfn_set failed, err=%d\n", err); exit: return err; @@ -160,6 +161,7 @@ exit: static int brcmf_pno_set_random(struct brcmf_if *ifp, struct brcmf_pno_info *pi) { + struct brcmf_pub *drvr = ifp->drvr; struct brcmf_pno_macaddr_le pfn_mac; u8 *mac_addr = NULL; u8 *mac_mask = NULL; @@ -194,7 +196,7 @@ static int brcmf_pno_set_random(struct brcmf_if *ifp, struct brcmf_pno_info *pi) err = brcmf_fil_iovar_data_set(ifp, "pfn_macaddr", &pfn_mac, sizeof(pfn_mac)); if (err) - brcmf_err("pfn_macaddr failed, err=%d\n", err); + bphy_err(drvr, "pfn_macaddr failed, err=%d\n", err); return err; } @@ -202,6 +204,7 @@ static int brcmf_pno_set_random(struct brcmf_if *ifp, struct brcmf_pno_info *pi) static int brcmf_pno_add_ssid(struct brcmf_if *ifp, struct cfg80211_ssid *ssid, bool active) { + struct brcmf_pub *drvr = ifp->drvr; struct brcmf_pno_net_param_le pfn; int err; @@ -218,12 +221,13 @@ static int brcmf_pno_add_ssid(struct brcmf_if *ifp, struct cfg80211_ssid *ssid, brcmf_dbg(SCAN, "adding ssid=%.32s (active=%d)\n", ssid->ssid, active); err = brcmf_fil_iovar_data_set(ifp, "pfn_add", &pfn, sizeof(pfn)); if (err < 0) - brcmf_err("adding failed: err=%d\n", err); + bphy_err(drvr, "adding failed: err=%d\n", err); return err; } static int brcmf_pno_add_bssid(struct brcmf_if *ifp, const u8 *bssid) { + struct brcmf_pub *drvr = ifp->drvr; struct brcmf_pno_bssid_le bssid_cfg; int err; @@ -234,7 +238,7 @@ static int brcmf_pno_add_bssid(struct brcmf_if *ifp, const u8 *bssid) err = brcmf_fil_iovar_data_set(ifp, "pfn_add_bssid", &bssid_cfg, sizeof(bssid_cfg)); if (err < 0) - brcmf_err("adding failed: err=%d\n", err); + bphy_err(drvr, "adding failed: err=%d\n", err); return err; } @@ -258,6 +262,7 @@ static bool brcmf_is_ssid_active(struct cfg80211_ssid *ssid, static int brcmf_pno_clean(struct brcmf_if *ifp) { + struct brcmf_pub *drvr = ifp->drvr; int ret; /* Disable pfn */ @@ -267,7 +272,7 @@ static int brcmf_pno_clean(struct brcmf_if *ifp) ret = brcmf_fil_iovar_data_set(ifp, "pfnclear", NULL, 0); } if (ret < 0) - brcmf_err("failed code %d\n", ret); + bphy_err(drvr, "failed code %d\n", ret); return ret; } @@ -392,6 +397,7 @@ static int brcmf_pno_config_networks(struct brcmf_if *ifp, static int brcmf_pno_config_sched_scans(struct brcmf_if *ifp) { + struct brcmf_pub *drvr = ifp->drvr; struct brcmf_pno_info *pi; struct brcmf_gscan_config *gscan_cfg; struct brcmf_gscan_bucket_config *buckets; @@ -416,7 +422,7 @@ static int brcmf_pno_config_sched_scans(struct brcmf_if *ifp) /* clean up everything */ err = brcmf_pno_clean(ifp); if (err < 0) { - brcmf_err("failed error=%d\n", err); + bphy_err(drvr, "failed error=%d\n", err); goto free_gscan; } @@ -496,6 +502,11 @@ int brcmf_pno_stop_sched_scan(struct brcmf_if *ifp, u64 reqid) brcmf_dbg(TRACE, "reqid=%llu\n", reqid); pi = ifp_to_pno(ifp); + + /* No PNO request */ + if (!pi->n_reqs) + return 0; + err = brcmf_pno_remove_request(pi, reqid); if (err) return err; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c index c5ff551ec659..024c643052bc 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c @@ -47,8 +47,8 @@ int brcmf_proto_attach(struct brcmf_pub *drvr) if (brcmf_proto_msgbuf_attach(drvr)) goto fail; } else { - brcmf_err("Unsupported proto type %d\n", - drvr->bus_if->proto_type); + bphy_err(drvr, "Unsupported proto type %d\n", + drvr->bus_if->proto_type); goto fail; } if (!proto->tx_queue_data || (proto->hdrpull == NULL) || @@ -56,7 +56,7 @@ int brcmf_proto_attach(struct brcmf_pub *drvr) (proto->configure_addr_mode == NULL) || (proto->delete_peer == NULL) || (proto->add_tdls_peer == NULL) || (proto->debugfs_create == NULL)) { - brcmf_err("Not all proto handlers have been installed\n"); + bphy_err(drvr, "Not all proto handlers have been installed\n"); goto fail; } return 0; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 0cd5b8d970d7..4d104ab80fd8 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -2999,21 +2999,35 @@ static int brcmf_sdio_trap_info(struct seq_file *seq, struct brcmf_sdio *bus, if (error < 0) return error; - seq_printf(seq, - "dongle trap info: type 0x%x @ epc 0x%08x\n" - " cpsr 0x%08x spsr 0x%08x sp 0x%08x\n" - " lr 0x%08x pc 0x%08x offset 0x%x\n" - " r0 0x%08x r1 0x%08x r2 0x%08x r3 0x%08x\n" - " r4 0x%08x r5 0x%08x r6 0x%08x r7 0x%08x\n", - le32_to_cpu(tr.type), le32_to_cpu(tr.epc), - le32_to_cpu(tr.cpsr), le32_to_cpu(tr.spsr), - le32_to_cpu(tr.r13), le32_to_cpu(tr.r14), - le32_to_cpu(tr.pc), sh->trap_addr, - le32_to_cpu(tr.r0), le32_to_cpu(tr.r1), - le32_to_cpu(tr.r2), le32_to_cpu(tr.r3), - le32_to_cpu(tr.r4), le32_to_cpu(tr.r5), - le32_to_cpu(tr.r6), le32_to_cpu(tr.r7)); - + if (seq) + seq_printf(seq, + "dongle trap info: type 0x%x @ epc 0x%08x\n" + " cpsr 0x%08x spsr 0x%08x sp 0x%08x\n" + " lr 0x%08x pc 0x%08x offset 0x%x\n" + " r0 0x%08x r1 0x%08x r2 0x%08x r3 0x%08x\n" + " r4 0x%08x r5 0x%08x r6 0x%08x r7 0x%08x\n", + le32_to_cpu(tr.type), le32_to_cpu(tr.epc), + le32_to_cpu(tr.cpsr), le32_to_cpu(tr.spsr), + le32_to_cpu(tr.r13), le32_to_cpu(tr.r14), + le32_to_cpu(tr.pc), sh->trap_addr, + le32_to_cpu(tr.r0), le32_to_cpu(tr.r1), + le32_to_cpu(tr.r2), le32_to_cpu(tr.r3), + le32_to_cpu(tr.r4), le32_to_cpu(tr.r5), + le32_to_cpu(tr.r6), le32_to_cpu(tr.r7)); + else + pr_debug("dongle trap info: type 0x%x @ epc 0x%08x\n" + " cpsr 0x%08x spsr 0x%08x sp 0x%08x\n" + " lr 0x%08x pc 0x%08x offset 0x%x\n" + " r0 0x%08x r1 0x%08x r2 0x%08x r3 0x%08x\n" + " r4 0x%08x r5 0x%08x r6 0x%08x r7 0x%08x\n", + le32_to_cpu(tr.type), le32_to_cpu(tr.epc), + le32_to_cpu(tr.cpsr), le32_to_cpu(tr.spsr), + le32_to_cpu(tr.r13), le32_to_cpu(tr.r14), + le32_to_cpu(tr.pc), sh->trap_addr, + le32_to_cpu(tr.r0), le32_to_cpu(tr.r1), + le32_to_cpu(tr.r2), le32_to_cpu(tr.r3), + le32_to_cpu(tr.r4), le32_to_cpu(tr.r5), + le32_to_cpu(tr.r6), le32_to_cpu(tr.r7)); return 0; } @@ -3067,8 +3081,10 @@ static int brcmf_sdio_checkdied(struct brcmf_sdio *bus) else if (sh.flags & SDPCM_SHARED_ASSERT) brcmf_err("assertion in dongle\n"); - if (sh.flags & SDPCM_SHARED_TRAP) + if (sh.flags & SDPCM_SHARED_TRAP) { brcmf_err("firmware trap in dongle\n"); + brcmf_sdio_trap_info(NULL, bus, &sh); + } return 0; } @@ -3143,9 +3159,12 @@ static int brcmf_debugfs_sdio_count_read(struct seq_file *seq, void *data) return 0; } -static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus) +static void brcmf_sdio_debugfs_create(struct device *dev) { - struct brcmf_pub *drvr = bus->sdiodev->bus_if->drvr; + struct brcmf_bus *bus_if = dev_get_drvdata(dev); + struct brcmf_pub *drvr = bus_if->drvr; + struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; + struct brcmf_sdio *bus = sdiodev->bus; struct dentry *dentry = brcmf_debugfs_get_devdir(drvr); if (IS_ERR_OR_NULL(dentry)) @@ -3165,7 +3184,7 @@ static int brcmf_sdio_checkdied(struct brcmf_sdio *bus) return 0; } -static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus) +static void brcmf_sdio_debugfs_create(struct device *dev) { } #endif /* DEBUG */ @@ -3477,8 +3496,6 @@ static int brcmf_sdio_bus_preinit(struct device *dev) if (bus->rxbuf) bus->rxblen = value; - brcmf_sdio_debugfs_create(bus); - /* the commands below use the terms tx and rx from * a device perspective, ie. bus:txglom affects the * bus transfers from device to host. @@ -4088,6 +4105,7 @@ static const struct brcmf_bus_ops brcmf_sdio_bus_ops = { .get_ramsize = brcmf_sdio_bus_get_ramsize, .get_memdump = brcmf_sdio_bus_get_memdump, .get_fwname = brcmf_sdio_get_fwname, + .debugfs_create = brcmf_sdio_debugfs_create }; #define BRCMF_SDIO_FW_CODE 0 @@ -4197,7 +4215,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err, } else { /* Disable F2 again */ sdio_disable_func(sdiod->func2); - goto release; + goto checkdied; } if (brcmf_chip_sr_capable(bus->ci)) { @@ -4218,8 +4236,10 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err, } /* If we didn't come up, turn off backplane clock */ - if (err != 0) + if (err != 0) { brcmf_sdio_clkctl(bus, CLK_NONE, false); + goto checkdied; + } sdio_release_host(sdiod->func1); @@ -4233,12 +4253,15 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err, err = brcmf_attach(sdiod->dev, sdiod->settings); if (err != 0) { brcmf_err("brcmf_attach failed\n"); - goto fail; + sdio_claim_host(sdiod->func1); + goto checkdied; } /* ready */ return; +checkdied: + brcmf_sdio_checkdied(bus); release: sdio_release_host(sdiod->func1); fail: diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.c index fe6755944b7b..a5c271bff446 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.c @@ -14,14 +14,16 @@ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +#include #include /* bug in tracepoint.h, it should include this */ #ifndef __CHECKER__ #define CREATE_TRACE_POINTS +#include "bus.h" #include "tracepoint.h" #include "debug.h" -void __brcmf_err(const char *func, const char *fmt, ...) +void __brcmf_err(struct brcmf_bus *bus, const char *func, const char *fmt, ...) { struct va_format vaf = { .fmt = fmt, @@ -30,7 +32,10 @@ void __brcmf_err(const char *func, const char *fmt, ...) va_start(args, fmt); vaf.va = &args; - pr_err("%s: %pV", func, &vaf); + if (bus) + dev_err(bus->dev, "%s: %pV", func, &vaf); + else + pr_err("%s: %pV", func, &vaf); trace_brcmf_err(func, &vaf); va_end(args); } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c index a4308c6e72d7..e9cbfd077710 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c @@ -508,7 +508,7 @@ static void brcmf_usb_rx_complete(struct urb *urb) skb = req->skb; req->skb = NULL; - /* zero lenght packets indicate usb "failure". Do not refill */ + /* zero length packets indicate usb "failure". Do not refill */ if (urb->status != 0 || !urb->actual_length) { brcmu_pkt_buf_free_skb(skb); brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req, NULL); @@ -575,7 +575,6 @@ static void brcmf_usb_state_change(struct brcmf_usbdev_info *devinfo, int state) { struct brcmf_bus *bcmf_bus = devinfo->bus_pub.bus; - int old_state; brcmf_dbg(USB, "Enter, current state=%d, new state=%d\n", devinfo->bus_pub.state, state); @@ -583,7 +582,6 @@ brcmf_usb_state_change(struct brcmf_usbdev_info *devinfo, int state) if (devinfo->bus_pub.state == state) return; - old_state = devinfo->bus_pub.state; devinfo->bus_pub.state = state; /* update state of upper layer */ @@ -1550,6 +1548,10 @@ void brcmf_usb_exit(void) void brcmf_usb_register(void) { + int ret; + brcmf_dbg(USB, "Enter\n"); - usb_register(&brcmf_usbdrvr); + ret = usb_register(&brcmf_usbdrvr); + if (ret) + brcmf_err("usb_register failed %d\n", ret); } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile index ed83f33aceb7..482d7737764d 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile @@ -16,9 +16,9 @@ # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ccflags-y := \ - -Idrivers/net/wireless/broadcom/brcm80211/brcmsmac \ - -Idrivers/net/wireless/broadcom/brcm80211/brcmsmac/phy \ - -Idrivers/net/wireless/broadcom/brcm80211/include + -I $(srctree)/$(src) \ + -I $(srctree)/$(src)/phy \ + -I $(srctree)/$(src)/../include brcmsmac-y := \ mac80211_if.o \ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c index 3bd54f125776..6d776ef6ff54 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c @@ -37,27 +37,18 @@ static struct dentry *root_folder; void brcms_debugfs_init(void) { root_folder = debugfs_create_dir(KBUILD_MODNAME, NULL); - if (IS_ERR(root_folder)) - root_folder = NULL; } void brcms_debugfs_exit(void) { - if (!root_folder) - return; - debugfs_remove_recursive(root_folder); root_folder = NULL; } -int brcms_debugfs_attach(struct brcms_pub *drvr) +void brcms_debugfs_attach(struct brcms_pub *drvr) { - if (!root_folder) - return -ENODEV; - drvr->dbgfs_dir = debugfs_create_dir( dev_name(&drvr->wlc->hw->d11core->dev), root_folder); - return PTR_ERR_OR_ZERO(drvr->dbgfs_dir); } void brcms_debugfs_detach(struct brcms_pub *drvr) @@ -195,7 +186,7 @@ static const struct file_operations brcms_debugfs_def_ops = { .llseek = seq_lseek }; -static int +static void brcms_debugfs_add_entry(struct brcms_pub *drvr, const char *fn, int (*read_fn)(struct seq_file *seq, void *data)) { @@ -203,27 +194,18 @@ brcms_debugfs_add_entry(struct brcms_pub *drvr, const char *fn, struct dentry *dentry = drvr->dbgfs_dir; struct brcms_debugfs_entry *entry; - if (IS_ERR_OR_NULL(dentry)) - return -ENOENT; - entry = devm_kzalloc(dev, sizeof(*entry), GFP_KERNEL); if (!entry) - return -ENOMEM; + return; entry->read = read_fn; entry->drvr = drvr; - dentry = debugfs_create_file(fn, 0444, dentry, entry, - &brcms_debugfs_def_ops); - - return PTR_ERR_OR_ZERO(dentry); + debugfs_create_file(fn, 0444, dentry, entry, &brcms_debugfs_def_ops); } void brcms_debugfs_create_files(struct brcms_pub *drvr) { - if (IS_ERR_OR_NULL(drvr->dbgfs_dir)) - return; - brcms_debugfs_add_entry(drvr, "hardware", brcms_debugfs_hardware_read); brcms_debugfs_add_entry(drvr, "macstat", brcms_debugfs_macstat_read); } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.h index 822781cf15d4..56898e6d789d 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.h @@ -68,7 +68,7 @@ void __brcms_dbg(struct device *dev, u32 level, const char *func, struct brcms_pub; void brcms_debugfs_init(void); void brcms_debugfs_exit(void); -int brcms_debugfs_attach(struct brcms_pub *drvr); +void brcms_debugfs_attach(struct brcms_pub *drvr); void brcms_debugfs_detach(struct brcms_pub *drvr); struct dentry *brcms_debugfs_get_devdir(struct brcms_pub *drvr); void brcms_debugfs_create_files(struct brcms_pub *drvr); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c index e78a93a45741..c6e107f41948 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c @@ -1199,8 +1199,6 @@ wlc_lcnphy_rx_iq_est(struct brcms_phy *pi, { int wait_count = 0; bool result = true; - u8 phybw40; - phybw40 = CHSPEC_IS40(pi->radio_chanspec); mod_phy_reg(pi, 0x6da, (0x1 << 5), (1) << 5); @@ -3082,7 +3080,7 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi) u8 bbmult; struct phytbl_info tab; s32 a1, b0, b1; - s32 tssi, pwr, maxtargetpwr, mintargetpwr; + s32 tssi, pwr, mintargetpwr; bool suspend; struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro); @@ -3119,7 +3117,6 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi) b0 = pi->txpa_2g[0]; b1 = pi->txpa_2g[1]; a1 = pi->txpa_2g[2]; - maxtargetpwr = wlc_lcnphy_tssi2dbm(10, a1, b0, b1); mintargetpwr = wlc_lcnphy_tssi2dbm(125, a1, b0, b1); tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; @@ -4212,7 +4209,7 @@ static void wlc_lcnphy_periodic_cal(struct brcms_phy *pi) s8 index; struct phytbl_info tab; s32 a1, b0, b1; - s32 tssi, pwr, maxtargetpwr, mintargetpwr; + s32 tssi, pwr, mintargetpwr; struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy; pi->phy_lastcal = pi->sh->now; @@ -4249,7 +4246,6 @@ static void wlc_lcnphy_periodic_cal(struct brcms_phy *pi) b0 = pi->txpa_2g[0]; b1 = pi->txpa_2g[1]; a1 = pi->txpa_2g[2]; - maxtargetpwr = wlc_lcnphy_tssi2dbm(10, a1, b0, b1); mintargetpwr = wlc_lcnphy_tssi2dbm(125, a1, b0, b1); tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; @@ -4622,13 +4618,10 @@ static void wlc_lcnphy_radio_init(struct brcms_phy *pi) static void wlc_lcnphy_tbl_init(struct brcms_phy *pi) { uint idx; - u8 phybw40; struct phytbl_info tab; const struct phytbl_info *tb; u32 val; - phybw40 = CHSPEC_IS40(pi->radio_chanspec); - for (idx = 0; idx < dot11lcnphytbl_info_sz_rev0; idx++) wlc_lcnphy_write_table(pi, &dot11lcnphytbl_info_rev0[idx]); @@ -4831,9 +4824,7 @@ static void wlc_lcnphy_baseband_init(struct brcms_phy *pi) void wlc_phy_init_lcnphy(struct brcms_phy *pi) { - u8 phybw40; struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy; - phybw40 = CHSPEC_IS40(pi->radio_chanspec); pi_lcn->lcnphy_cal_counter = 0; pi_lcn->lcnphy_cal_temper = pi_lcn->lcnphy_rawtempsense; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmutil/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmutil/Makefile index 256c91f9ac4b..bb02c6220a88 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmutil/Makefile +++ b/drivers/net/wireless/broadcom/brcm80211/brcmutil/Makefile @@ -15,9 +15,7 @@ # OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -ccflags-y := \ - -Idrivers/net/wireless/broadcom/brcm80211/brcmutil \ - -Idrivers/net/wireless/broadcom/brcm80211/include +ccflags-y := -I $(srctree)/$(src)/../include obj-$(CONFIG_BRCMUTIL) += brcmutil.o brcmutil-objs = utils.o d11.o diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c index 57e3b6cca234..271977f7fbb0 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c @@ -3756,10 +3756,7 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto out_remove_sysfs; - err = il_dbgfs_register(il, DRV_NAME); - if (err) - IL_ERR("failed to create debugfs files. Ignoring error: %d\n", - err); + il_dbgfs_register(il, DRV_NAME); /* Start monitoring the killswitch */ queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll, 2 * HZ); diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c index 6b4488a178a7..94222ae464ae 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c @@ -4988,10 +4988,7 @@ il4965_ucode_callback(const struct firmware *ucode_raw, void *context) if (err) goto out_unbind; - err = il_dbgfs_register(il, DRV_NAME); - if (err) - IL_ERR("failed to create debugfs files. Ignoring error: %d\n", - err); + il_dbgfs_register(il, DRV_NAME); err = sysfs_create_group(&il->pci_dev->dev.kobj, &il_attribute_group); if (err) { diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h index dc6a74a05983..b079c64ca014 100644 --- a/drivers/net/wireless/intel/iwlegacy/common.h +++ b/drivers/net/wireless/intel/iwlegacy/common.h @@ -2974,13 +2974,11 @@ il_print_hex_dump(struct il_priv *il, int level, const void *p, u32 len) #endif /* CONFIG_IWLEGACY_DEBUG */ #ifdef CONFIG_IWLEGACY_DEBUGFS -int il_dbgfs_register(struct il_priv *il, const char *name); +void il_dbgfs_register(struct il_priv *il, const char *name); void il_dbgfs_unregister(struct il_priv *il); #else -static inline int -il_dbgfs_register(struct il_priv *il, const char *name) +static inline void il_dbgfs_register(struct il_priv *il, const char *name) { - return 0; } static inline void diff --git a/drivers/net/wireless/intel/iwlegacy/debug.c b/drivers/net/wireless/intel/iwlegacy/debug.c index d76073def677..fa211412e0ac 100644 --- a/drivers/net/wireless/intel/iwlegacy/debug.c +++ b/drivers/net/wireless/intel/iwlegacy/debug.c @@ -128,23 +128,12 @@ EXPORT_SYMBOL(il_update_stats); /* create and remove of files */ #define DEBUGFS_ADD_FILE(name, parent, mode) do { \ - if (!debugfs_create_file(#name, mode, parent, il, \ - &il_dbgfs_##name##_ops)) \ - goto err; \ + debugfs_create_file(#name, mode, parent, il, \ + &il_dbgfs_##name##_ops); \ } while (0) #define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \ - struct dentry *__tmp; \ - __tmp = debugfs_create_bool(#name, 0600, parent, ptr); \ - if (IS_ERR(__tmp) || !__tmp) \ - goto err; \ -} while (0) - -#define DEBUGFS_ADD_X32(name, parent, ptr) do { \ - struct dentry *__tmp; \ - __tmp = debugfs_create_x32(#name, 0600, parent, ptr); \ - if (IS_ERR(__tmp) || !__tmp) \ - goto err; \ + debugfs_create_bool(#name, 0600, parent, ptr); \ } while (0) /* file operation */ @@ -1341,27 +1330,18 @@ DEBUGFS_WRITE_FILE_OPS(wd_timeout); * Create the debugfs files and directories * */ -int +void il_dbgfs_register(struct il_priv *il, const char *name) { struct dentry *phyd = il->hw->wiphy->debugfsdir; struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug; dir_drv = debugfs_create_dir(name, phyd); - if (!dir_drv) - return -ENOMEM; - il->debugfs_dir = dir_drv; dir_data = debugfs_create_dir("data", dir_drv); - if (!dir_data) - goto err; dir_rf = debugfs_create_dir("rf", dir_drv); - if (!dir_rf) - goto err; dir_debug = debugfs_create_dir("debug", dir_drv); - if (!dir_debug) - goto err; DEBUGFS_ADD_FILE(nvm, dir_data, 0400); DEBUGFS_ADD_FILE(sram, dir_data, 0600); @@ -1399,12 +1379,6 @@ il_dbgfs_register(struct il_priv *il, const char *name) DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf, &il->disable_chain_noise_cal); DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf, &il->disable_tx_power_cal); - return 0; - -err: - IL_ERR("Can't create the debugfs directory\n"); - il_dbgfs_unregister(il); - return -ENOMEM; } EXPORT_SYMBOL(il_dbgfs_register); diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig index 491ca3c8b43c..83d5bceea08f 100644 --- a/drivers/net/wireless/intel/iwlwifi/Kconfig +++ b/drivers/net/wireless/intel/iwlwifi/Kconfig @@ -1,6 +1,6 @@ config IWLWIFI tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) " - depends on PCI && HAS_IOMEM + depends on PCI && HAS_IOMEM && CFG80211 select FW_LOADER ---help--- Select to build the driver supporting the: @@ -47,6 +47,7 @@ if IWLWIFI config IWLWIFI_LEDS bool depends on LEDS_CLASS=y || LEDS_CLASS=IWLWIFI + depends on IWLMVM || IWLDVM select LEDS_TRIGGERS select MAC80211_LEDS default y diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index 7e65073834b7..fdc56f821b5a 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018 Intel Corporation + * Copyright (C) 2018-2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -20,7 +20,7 @@ * BSD LICENSE * * Copyright(c) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018 Intel Corporation + * Copyright (C) 2018-2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -56,7 +56,7 @@ #include "iwl-config.h" /* Highest firmware API version supported */ -#define IWL_22000_UCODE_API_MAX 43 +#define IWL_22000_UCODE_API_MAX 46 /* Lowest firmware API version supported */ #define IWL_22000_UCODE_API_MIN 39 @@ -79,10 +79,15 @@ #define IWL_22000_HR_B_F0_FW_PRE "iwlwifi-Qu-b0-hr-b0-" #define IWL_22000_QU_B_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-" #define IWL_22000_HR_B_FW_PRE "iwlwifi-QuQnj-b0-hr-b0-" -#define IWL_22000_JF_B0_FW_PRE "iwlwifi-QuQnj-a0-jf-b0-" #define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-" #define IWL_22000_SU_Z0_FW_PRE "iwlwifi-su-z0-" #define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-" +#define IWL_QNJ_B_JF_B_FW_PRE "iwlwifi-QuQnj-b0-jf-b0-" +#define IWL_CC_A_FW_PRE "iwlwifi-cc-a0-" +#define IWL_22000_SO_A_JF_B_FW_PRE "iwlwifi-so-a0-jf-b0-" +#define IWL_22000_SO_A_HR_B_FW_PRE "iwlwifi-so-a0-hr-b0-" +#define IWL_22000_SO_A_GF_A_FW_PRE "iwlwifi-so-a0-gf-a0-" +#define IWL_22000_TY_A_GF_A_FW_PRE "iwlwifi-ty-a0-gf-a0-" #define IWL_22000_HR_MODULE_FIRMWARE(api) \ IWL_22000_HR_FW_PRE __stringify(api) ".ucode" @@ -96,14 +101,26 @@ IWL_22000_QU_B_HR_B_FW_PRE __stringify(api) ".ucode" #define IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(api) \ IWL_22000_HR_B_FW_PRE __stringify(api) ".ucode" -#define IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(api) \ - IWL_22000_JF_B0_FW_PRE __stringify(api) ".ucode" #define IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(api) \ IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode" #define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \ IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode" #define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \ IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode" +#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \ + IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode" +#define IWL_QNJ_B_JF_B_MODULE_FIRMWARE(api) \ + IWL_QNJ_B_JF_B_FW_PRE __stringify(api) ".ucode" +#define IWL_CC_A_MODULE_FIRMWARE(api) \ + IWL_CC_A_FW_PRE __stringify(api) ".ucode" +#define IWL_22000_SO_A_JF_B_MODULE_FIRMWARE(api) \ + IWL_22000_SO_A_JF_B_FW_PRE __stringify(api) ".ucode" +#define IWL_22000_SO_A_HR_B_MODULE_FIRMWARE(api) \ + IWL_22000_SO_A_HR_B_FW_PRE __stringify(api) ".ucode" +#define IWL_22000_SO_A_GF_A_MODULE_FIRMWARE(api) \ + IWL_22000_SO_A_GF_A_FW_PRE __stringify(api) ".ucode" +#define IWL_22000_TY_A_GF_A_MODULE_FIRMWARE(api) \ + IWL_22000_TY_A_GF_A_FW_PRE __stringify(api) ".ucode" static const struct iwl_base_params iwl_22000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, @@ -164,6 +181,10 @@ static const struct iwl_ht_params iwl_22000_ht_params = { .d3_debug_data_base_addr = 0x401000, \ .d3_debug_data_length = 60 * 1024 +#define IWL_DEVICE_AX200_COMMON \ + IWL_DEVICE_22000_COMMON, \ + .umac_prph_offset = 0x300000 + #define IWL_DEVICE_22500 \ IWL_DEVICE_22000_COMMON, \ .device_family = IWL_DEVICE_FAMILY_22000, \ @@ -176,6 +197,13 @@ static const struct iwl_ht_params iwl_22000_ht_params = { .base_params = &iwl_22560_base_params, \ .csr = &iwl_csr_v2 +#define IWL_DEVICE_AX210 \ + IWL_DEVICE_AX200_COMMON, \ + .device_family = IWL_DEVICE_FAMILY_AX210, \ + .base_params = &iwl_22000_base_params, \ + .csr = &iwl_csr_v1, \ + .min_txq_size = 128 + const struct iwl_cfg iwl22000_2ac_cfg_hr = { .name = "Intel(R) Dual Band Wireless AC 22000", .fw_name_pre = IWL_22000_HR_FW_PRE, @@ -195,8 +223,8 @@ const struct iwl_cfg iwl22000_2ac_cfg_jf = { IWL_DEVICE_22500, }; -const struct iwl_cfg iwl22000_2ax_cfg_hr = { - .name = "Intel(R) Dual Band Wireless AX 22000", +const struct iwl_cfg iwl_ax101_cfg_qu_hr = { + .name = "Intel(R) Wi-Fi 6 AX101", .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE, IWL_DEVICE_22500, /* @@ -207,6 +235,45 @@ const struct iwl_cfg iwl22000_2ax_cfg_hr = { .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; +const struct iwl_cfg iwl22260_2ax_cfg = { + .name = "Intel(R) Wireless-AX 22260", + .fw_name_pre = IWL_CC_A_FW_PRE, + IWL_DEVICE_22500, + /* + * This device doesn't support receiving BlockAck with a large bitmap + * so we need to restrict the size of transmitted aggregation to the + * HT size; mac80211 would otherwise pick the HE max (256) by default. + */ + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, + .bisr_workaround = 1, +}; + +const struct iwl_cfg killer1650x_2ax_cfg = { + .name = "Killer(R) Wireless-AX 1650x Wireless Network Adapter (200NGW)", + .fw_name_pre = IWL_CC_A_FW_PRE, + IWL_DEVICE_22500, + /* + * This device doesn't support receiving BlockAck with a large bitmap + * so we need to restrict the size of transmitted aggregation to the + * HT size; mac80211 would otherwise pick the HE max (256) by default. + */ + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, + .bisr_workaround = 1, +}; + +const struct iwl_cfg killer1650w_2ax_cfg = { + .name = "Killer(R) Wireless-AX 1650w Wireless Network Adapter (200D2W)", + .fw_name_pre = IWL_CC_A_FW_PRE, + IWL_DEVICE_22500, + /* + * This device doesn't support receiving BlockAck with a large bitmap + * so we need to restrict the size of transmitted aggregation to the + * HT size; mac80211 would otherwise pick the HE max (256) by default. + */ + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, + .bisr_workaround = 1, +}; + /* * All JF radio modules are part of the 9000 series, but the MAC part * looks more like 22000. That's why this device is here, but called @@ -230,6 +297,24 @@ const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0 = { IWL_DEVICE_22500, }; +const struct iwl_cfg iwl9560_2ac_160_cfg_qu_b0_jf_b0 = { + .name = "Intel(R) Wireless-AC 9560 160MHz", + .fw_name_pre = IWL_QU_B_JF_B_FW_PRE, + IWL_DEVICE_22500, +}; + +const struct iwl_cfg iwl9560_2ac_cfg_qnj_jf_b0 = { + .name = "Intel(R) Wireless-AC 9560 160MHz", + .fw_name_pre = IWL_QNJ_B_JF_B_FW_PRE, + IWL_DEVICE_22500, + /* + * This device doesn't support receiving BlockAck with a large bitmap + * so we need to restrict the size of transmitted aggregation to the + * HT size; mac80211 would otherwise pick the HE max (256) by default. + */ + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, +}; + const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0 = { .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)", .fw_name_pre = IWL_QU_B_JF_B_FW_PRE, @@ -242,9 +327,9 @@ const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0 = { IWL_DEVICE_22500, }; -const struct iwl_cfg iwl22000_2ax_cfg_jf = { - .name = "Intel(R) Dual Band Wireless AX 22000", - .fw_name_pre = IWL_QU_B_JF_B_FW_PRE, +const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = { + .name = "Killer(R) Wireless-AX 1650i Wireless Network Adapter (22560NGW)", + .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap @@ -254,9 +339,21 @@ const struct iwl_cfg iwl22000_2ax_cfg_jf = { .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; -const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0 = { +const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = { + .name = "Killer(R) Wireless-AX 1650s Wireless Network Adapter (22560D2W)", + .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE, + IWL_DEVICE_22500, + /* + * This device doesn't support receiving BlockAck with a large bitmap + * so we need to restrict the size of transmitted aggregation to the + * HT size; mac80211 would otherwise pick the HE max (256) by default. + */ + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, +}; + +const struct iwl_cfg iwl22000_2ax_cfg_jf = { .name = "Intel(R) Dual Band Wireless AX 22000", - .fw_name_pre = IWL_22000_HR_A_F0_FW_PRE, + .fw_name_pre = IWL_QU_B_JF_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap @@ -266,9 +363,9 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0 = { .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; -const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0 = { +const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0 = { .name = "Intel(R) Dual Band Wireless AX 22000", - .fw_name_pre = IWL_22000_HR_B_FW_PRE, + .fw_name_pre = IWL_22000_HR_A_F0_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap @@ -278,9 +375,9 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0 = { .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; -const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0 = { +const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0 = { .name = "Intel(R) Dual Band Wireless AX 22000", - .fw_name_pre = IWL_22000_JF_B0_FW_PRE, + .fw_name_pre = IWL_22000_HR_B_FW_PRE, IWL_DEVICE_22500, /* * This device doesn't support receiving BlockAck with a large bitmap @@ -315,12 +412,41 @@ const struct iwl_cfg iwl22560_2ax_cfg_su_cdb = { .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; +const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0 = { + .name = "Intel(R) Wireless-AC 9560 160MHz", + .fw_name_pre = IWL_22000_SO_A_JF_B_FW_PRE, + IWL_DEVICE_AX210, +}; + +const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0 = { + .name = "Intel(R) Wi-Fi 6 AX201 160MHz", + .fw_name_pre = IWL_22000_SO_A_HR_B_FW_PRE, + IWL_DEVICE_AX210, +}; + +const struct iwl_cfg iwlax210_2ax_cfg_so_gf_a0 = { + .name = "Intel(R) Wi-Fi 7 AX211 160MHz", + .fw_name_pre = IWL_22000_SO_A_GF_A_FW_PRE, + IWL_DEVICE_AX210, +}; + +const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0 = { + .name = "Intel(R) Wi-Fi 7 AX210 160MHz", + .fw_name_pre = IWL_22000_TY_A_GF_A_FW_PRE, + IWL_DEVICE_AX210, +}; + MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_JF_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_HR_B_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_QNJ_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_CC_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_22000_SO_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_22000_SO_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_22000_SO_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_22000_TY_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c index f2114137c13f..3225b64eb845 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c @@ -57,7 +57,7 @@ #include "fw/file.h" /* Highest firmware API version supported */ -#define IWL9000_UCODE_API_MAX 43 +#define IWL9000_UCODE_API_MAX 46 /* Lowest firmware API version supported */ #define IWL9000_UCODE_API_MIN 30 @@ -73,21 +73,12 @@ #define IWL9000_SMEM_OFFSET 0x400000 #define IWL9000_SMEM_LEN 0x68000 -#define IWL9000A_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-" -#define IWL9000B_FW_PRE "iwlwifi-9000-pu-b0-jf-b0-" -#define IWL9000RFB_FW_PRE "iwlwifi-9000-pu-a0-jf-b0-" -#define IWL9260A_FW_PRE "iwlwifi-9260-th-a0-jf-a0-" -#define IWL9260B_FW_PRE "iwlwifi-9260-th-b0-jf-b0-" -#define IWL9000A_MODULE_FIRMWARE(api) \ - IWL9000A_FW_PRE __stringify(api) ".ucode" -#define IWL9000B_MODULE_FIRMWARE(api) \ - IWL9000B_FW_PRE __stringify(api) ".ucode" -#define IWL9000RFB_MODULE_FIRMWARE(api) \ - IWL9000RFB_FW_PRE __stringify(api) ".ucode" -#define IWL9260A_MODULE_FIRMWARE(api) \ - IWL9260A_FW_PRE __stringify(api) ".ucode" -#define IWL9260B_MODULE_FIRMWARE(api) \ - IWL9260B_FW_PRE __stringify(api) ".ucode" +#define IWL9000_FW_PRE "iwlwifi-9000-pu-b0-jf-b0-" +#define IWL9260_FW_PRE "iwlwifi-9260-th-b0-jf-b0-" +#define IWL9000_MODULE_FIRMWARE(api) \ + IWL9000_FW_PRE __stringify(api) ".ucode" +#define IWL9260_MODULE_FIRMWARE(api) \ + IWL9260_FW_PRE __stringify(api) ".ucode" static const struct iwl_base_params iwl9000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, @@ -162,81 +153,87 @@ static const struct iwl_tt_params iwl9000_tt_params = { const struct iwl_cfg iwl9160_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 9160", - .fw_name_pre = IWL9260A_FW_PRE, - .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE, + .fw_name_pre = IWL9260_FW_PRE, IWL_DEVICE_9000, }; const struct iwl_cfg iwl9260_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 9260", - .fw_name_pre = IWL9260A_FW_PRE, - .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE, + .fw_name_pre = IWL9260_FW_PRE, + IWL_DEVICE_9000, +}; + +const struct iwl_cfg iwl9260_2ac_160_cfg = { + .name = "Intel(R) Wireless-AC 9260 160MHz", + .fw_name_pre = IWL9260_FW_PRE, IWL_DEVICE_9000, }; const struct iwl_cfg iwl9260_killer_2ac_cfg = { .name = "Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW)", - .fw_name_pre = IWL9260A_FW_PRE, - .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE, + .fw_name_pre = IWL9260_FW_PRE, IWL_DEVICE_9000, }; const struct iwl_cfg iwl9270_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 9270", - .fw_name_pre = IWL9260A_FW_PRE, - .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE, + .fw_name_pre = IWL9260_FW_PRE, IWL_DEVICE_9000, }; const struct iwl_cfg iwl9460_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 9460", - .fw_name_pre = IWL9260A_FW_PRE, - .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE, + .fw_name_pre = IWL9260_FW_PRE, IWL_DEVICE_9000, }; const struct iwl_cfg iwl9460_2ac_cfg_soc = { .name = "Intel(R) Dual Band Wireless AC 9460", - .fw_name_pre = IWL9000A_FW_PRE, - .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, - .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + .fw_name_pre = IWL9000_FW_PRE, IWL_DEVICE_9000, .integrated = true, .soc_latency = 5000, }; const struct iwl_cfg iwl9461_2ac_cfg_soc = { - .name = "Intel(R) Dual Band Wireless AC 9461", - .fw_name_pre = IWL9000A_FW_PRE, - .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, - .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, - IWL_DEVICE_9000, - .integrated = true, - .soc_latency = 5000, + .name = "Intel(R) Dual Band Wireless AC 9461", + .fw_name_pre = IWL9000_FW_PRE, + IWL_DEVICE_9000, + .integrated = true, + .soc_latency = 5000, }; const struct iwl_cfg iwl9462_2ac_cfg_soc = { - .name = "Intel(R) Dual Band Wireless AC 9462", - .fw_name_pre = IWL9000A_FW_PRE, - .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, - .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, - IWL_DEVICE_9000, - .integrated = true, - .soc_latency = 5000, + .name = "Intel(R) Dual Band Wireless AC 9462", + .fw_name_pre = IWL9000_FW_PRE, + IWL_DEVICE_9000, + .integrated = true, + .soc_latency = 5000, }; const struct iwl_cfg iwl9560_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 9560", - .fw_name_pre = IWL9260A_FW_PRE, - .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE, + .fw_name_pre = IWL9260_FW_PRE, + IWL_DEVICE_9000, +}; + +const struct iwl_cfg iwl9560_2ac_160_cfg = { + .name = "Intel(R) Wireless-AC 9560 160MHz", + .fw_name_pre = IWL9260_FW_PRE, IWL_DEVICE_9000, }; const struct iwl_cfg iwl9560_2ac_cfg_soc = { .name = "Intel(R) Dual Band Wireless AC 9560", - .fw_name_pre = IWL9000A_FW_PRE, - .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, - .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + .fw_name_pre = IWL9000_FW_PRE, + IWL_DEVICE_9000, + .integrated = true, + .soc_latency = 5000, +}; + +const struct iwl_cfg iwl9560_2ac_160_cfg_soc = { + .name = "Intel(R) Wireless-AC 9560 160MHz", + .fw_name_pre = IWL9000_FW_PRE, IWL_DEVICE_9000, .integrated = true, .soc_latency = 5000, @@ -244,9 +241,7 @@ const struct iwl_cfg iwl9560_2ac_cfg_soc = { const struct iwl_cfg iwl9560_killer_2ac_cfg_soc = { .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)", - .fw_name_pre = IWL9000A_FW_PRE, - .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, - .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + .fw_name_pre = IWL9000_FW_PRE, IWL_DEVICE_9000, .integrated = true, .soc_latency = 5000, @@ -254,9 +249,7 @@ const struct iwl_cfg iwl9560_killer_2ac_cfg_soc = { const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc = { .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)", - .fw_name_pre = IWL9000A_FW_PRE, - .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, - .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + .fw_name_pre = IWL9000_FW_PRE, IWL_DEVICE_9000, .integrated = true, .soc_latency = 5000, @@ -264,9 +257,7 @@ const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc = { const struct iwl_cfg iwl9460_2ac_cfg_shared_clk = { .name = "Intel(R) Dual Band Wireless AC 9460", - .fw_name_pre = IWL9000A_FW_PRE, - .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, - .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + .fw_name_pre = IWL9000_FW_PRE, IWL_DEVICE_9000, .integrated = true, .soc_latency = 5000, @@ -275,9 +266,7 @@ const struct iwl_cfg iwl9460_2ac_cfg_shared_clk = { const struct iwl_cfg iwl9461_2ac_cfg_shared_clk = { .name = "Intel(R) Dual Band Wireless AC 9461", - .fw_name_pre = IWL9000A_FW_PRE, - .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, - .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + .fw_name_pre = IWL9000_FW_PRE, IWL_DEVICE_9000, .integrated = true, .soc_latency = 5000, @@ -286,9 +275,7 @@ const struct iwl_cfg iwl9461_2ac_cfg_shared_clk = { const struct iwl_cfg iwl9462_2ac_cfg_shared_clk = { .name = "Intel(R) Dual Band Wireless AC 9462", - .fw_name_pre = IWL9000A_FW_PRE, - .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, - .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + .fw_name_pre = IWL9000_FW_PRE, IWL_DEVICE_9000, .integrated = true, .soc_latency = 5000, @@ -297,9 +284,16 @@ const struct iwl_cfg iwl9462_2ac_cfg_shared_clk = { const struct iwl_cfg iwl9560_2ac_cfg_shared_clk = { .name = "Intel(R) Dual Band Wireless AC 9560", - .fw_name_pre = IWL9000A_FW_PRE, - .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, - .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + .fw_name_pre = IWL9000_FW_PRE, + IWL_DEVICE_9000, + .integrated = true, + .soc_latency = 5000, + .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK +}; + +const struct iwl_cfg iwl9560_2ac_160_cfg_shared_clk = { + .name = "Intel(R) Wireless-AC 9560 160MHz", + .fw_name_pre = IWL9000_FW_PRE, IWL_DEVICE_9000, .integrated = true, .soc_latency = 5000, @@ -308,9 +302,7 @@ const struct iwl_cfg iwl9560_2ac_cfg_shared_clk = { const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk = { .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)", - .fw_name_pre = IWL9000A_FW_PRE, - .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, - .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + .fw_name_pre = IWL9000_FW_PRE, IWL_DEVICE_9000, .integrated = true, .soc_latency = 5000, @@ -319,17 +311,12 @@ const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk = { const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk = { .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)", - .fw_name_pre = IWL9000A_FW_PRE, - .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, - .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + .fw_name_pre = IWL9000_FW_PRE, IWL_DEVICE_9000, .integrated = true, .soc_latency = 5000, .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK }; -MODULE_FIRMWARE(IWL9000A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL9000B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL9000RFB_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL9260A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL9260B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL9260_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/Makefile b/drivers/net/wireless/intel/iwlwifi/dvm/Makefile index 702d42b2d452..0486b17d7c41 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/dvm/Makefile @@ -11,4 +11,4 @@ iwldvm-objs += rxon.o devices.o iwldvm-$(CONFIG_IWLWIFI_LEDS) += led.o iwldvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o -ccflags-y += -I$(src)/../ +ccflags-y += -I $(srctree)/$(src)/../ diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h index 431e13c6ee35..254a5ce52456 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h @@ -439,13 +439,10 @@ static inline void iwl_dvm_set_pmi(struct iwl_priv *priv, bool state) } #ifdef CONFIG_IWLWIFI_DEBUGFS -int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir); +void iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir); #else -static inline int iwl_dbgfs_register(struct iwl_priv *priv, - struct dentry *dbgfs_dir) -{ - return 0; -} +static inline void iwl_dbgfs_register(struct iwl_priv *priv, + struct dentry *dbgfs_dir) { } #endif /* CONFIG_IWLWIFI_DEBUGFS */ #ifdef CONFIG_IWLWIFI_DEBUG diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c index 3d2e44a642de..d4b19673b06a 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c @@ -3,6 +3,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -36,31 +37,8 @@ /* create and remove of files */ #define DEBUGFS_ADD_FILE(name, parent, mode) do { \ - if (!debugfs_create_file(#name, mode, parent, priv, \ - &iwl_dbgfs_##name##_ops)) \ - goto err; \ -} while (0) - -#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \ - struct dentry *__tmp; \ - __tmp = debugfs_create_bool(#name, 0600, parent, ptr); \ - if (IS_ERR(__tmp) || !__tmp) \ - goto err; \ -} while (0) - -#define DEBUGFS_ADD_X32(name, parent, ptr) do { \ - struct dentry *__tmp; \ - __tmp = debugfs_create_x32(#name, 0600, parent, ptr); \ - if (IS_ERR(__tmp) || !__tmp) \ - goto err; \ -} while (0) - -#define DEBUGFS_ADD_U32(name, parent, ptr, mode) do { \ - struct dentry *__tmp; \ - __tmp = debugfs_create_u32(#name, mode, \ - parent, ptr); \ - if (IS_ERR(__tmp) || !__tmp) \ - goto err; \ + debugfs_create_file(#name, mode, parent, priv, \ + &iwl_dbgfs_##name##_ops); \ } while (0) /* file operation */ @@ -2238,7 +2216,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file, buf_size = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, buf_size)) return -EFAULT; - if (sscanf(buf, "%d", &event_log_flag) != 1) + if (sscanf(buf, "%u", &event_log_flag) != 1) return -EFAULT; if (event_log_flag == 1) iwl_dump_nic_event_log(priv, true, NULL); @@ -2347,21 +2325,15 @@ DEBUGFS_READ_WRITE_FILE_OPS(calib_disabled); * Create the debugfs files and directories * */ -int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir) +void iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir) { struct dentry *dir_data, *dir_rf, *dir_debug; priv->debugfs_dir = dbgfs_dir; dir_data = debugfs_create_dir("data", dbgfs_dir); - if (!dir_data) - goto err; dir_rf = debugfs_create_dir("rf", dbgfs_dir); - if (!dir_rf) - goto err; dir_debug = debugfs_create_dir("debug", dbgfs_dir); - if (!dir_debug) - goto err; DEBUGFS_ADD_FILE(nvm, dir_data, 0400); DEBUGFS_ADD_FILE(sram, dir_data, 0600); @@ -2421,13 +2393,6 @@ int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir) snprintf(buf, 100, "../../%pd2", dev_dir); - if (!debugfs_create_symlink("iwlwifi", mac80211_dir, buf)) - goto err; + debugfs_create_symlink("iwlwifi", mac80211_dir, buf); } - - return 0; - -err: - IWL_ERR(priv, "failed to create the dvm debugfs entries\n"); - return -ENOMEM; } diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c index 49b71dbf8490..54b759cec8b3 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c @@ -1,6 +1,7 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * Copyright (C) 2018 Intel Corporation * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -710,24 +711,6 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, return ret; } -static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg) -{ - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) - return false; - return true; -} - -static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg) -{ - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) - return false; - if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG) - return true; - - /* disabled by default */ - return false; -} - static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_ampdu_params *params) @@ -752,7 +735,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, switch (action) { case IEEE80211_AMPDU_RX_START: - if (!iwl_enable_rx_ampdu(priv->cfg)) + if (!iwl_enable_rx_ampdu()) break; IWL_DEBUG_HT(priv, "start Rx\n"); ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn); @@ -764,7 +747,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, case IEEE80211_AMPDU_TX_START: if (!priv->trans->ops->txq_enable) break; - if (!iwl_enable_tx_ampdu(priv->cfg)) + if (!iwl_enable_tx_ampdu()) break; IWL_DEBUG_HT(priv, "start Tx\n"); ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c index c219bca5cff4..7c68a86ed9e1 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c @@ -1054,7 +1054,7 @@ static void iwl_bg_restart(struct work_struct *data) ieee80211_restart_hw(priv->hw); else IWL_ERR(priv, - "Cannot request restart before registrating with mac80211\n"); + "Cannot request restart before registering with mac80211\n"); } else { WARN_ON(1); } @@ -1509,13 +1509,10 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, if (iwlagn_mac_setup_register(priv, &fw->ucode_capa)) goto out_destroy_workqueue; - if (iwl_dbgfs_register(priv, dbgfs_dir)) - goto out_mac80211_unregister; + iwl_dbgfs_register(priv, dbgfs_dir); return op_mode; -out_mac80211_unregister: - iwlagn_mac_unregister(priv); out_destroy_workqueue: iwl_tt_exit(priv); iwl_cancel_deferred_work(priv); @@ -1881,7 +1878,7 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log, return pos; } - if (!(iwl_have_debug_level(IWL_DL_FW_ERRORS)) && !full_log) + if (!(iwl_have_debug_level(IWL_DL_FW)) && !full_log) size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES) ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size; IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n", @@ -1897,7 +1894,7 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log, if (!*buf) return -ENOMEM; } - if (iwl_have_debug_level(IWL_DL_FW_ERRORS) || full_log) { + if (iwl_have_debug_level(IWL_DL_FW) || full_log) { /* * if uCode has wrapped back to top of log, * start at the oldest entry, @@ -1927,7 +1924,7 @@ static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand) unsigned int reload_msec; unsigned long reload_jiffies; - if (iwl_have_debug_level(IWL_DL_FW_ERRORS)) + if (iwl_have_debug_level(IWL_DL_FW)) iwl_print_rx_config_cmd(priv, IWL_RXON_CTX_BSS); /* uCode is no longer loaded. */ @@ -1965,12 +1962,12 @@ static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand) if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) { if (iwlwifi_mod_params.fw_restart) { - IWL_DEBUG_FW_ERRORS(priv, - "Restarting adapter due to uCode error.\n"); + IWL_DEBUG_FW(priv, + "Restarting adapter due to uCode error.\n"); queue_work(priv->workqueue, &priv->restart); } else - IWL_DEBUG_FW_ERRORS(priv, - "Detected FW error, but not restarting\n"); + IWL_DEBUG_FW(priv, + "Detected FW error, but not restarting\n"); } } diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c index 6f17a5e24e82..e224b23f0ba8 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c @@ -2,6 +2,7 @@ * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * Portions of this file are derived from the ipw3945 project, as well * as portionhelp of the ieee80211 subsystem header files. @@ -592,7 +593,7 @@ static int iwlagn_set_decrypted_flag(struct iwl_priv *priv, if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == RX_RES_STATUS_BAD_KEY_TTAK) break; - + /* fall through */ case RX_RES_STATUS_SEC_TYPE_WEP: if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == RX_RES_STATUS_BAD_ICV_MIC) { @@ -601,6 +602,7 @@ static int iwlagn_set_decrypted_flag(struct iwl_priv *priv, IWL_DEBUG_RX(priv, "Packet destroyed\n"); return -1; } + /* fall through */ case RX_RES_STATUS_SEC_TYPE_CCMP: if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == RX_RES_STATUS_DECRYPT_OK) { @@ -729,7 +731,7 @@ static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in) decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK; break; } - /* fall through if TTAK OK */ + /* fall through */ default: if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK)) decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c index 8d7aafb4d9e9..f190f7beb3a8 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c @@ -3,6 +3,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -418,7 +419,7 @@ static u16 iwl_limit_dwell(struct iwl_priv *priv, u16 dwell_time) limit = (limits[1] * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2; limit /= 2; dwell_time = min(limit, dwell_time); - /* fall through to limit further */ + /* fall through */ case 1: limit = (limits[0] * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2; limit /= n_active; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tt.c b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c index 4de2727ac63e..a156dcf5b7d9 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c @@ -1,6 +1,7 @@ /****************************************************************************** * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright (C) 2018 Intel Corporation * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -325,9 +326,9 @@ static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp, bool force) iwl_prepare_ct_kill_task(priv); tt->state = old_state; } - } else if (old_state == IWL_TI_CT_KILL && - tt->state != IWL_TI_CT_KILL) + } else if (old_state == IWL_TI_CT_KILL) { iwl_perform_ct_kill_task(priv, false); + } IWL_DEBUG_TEMP(priv, "Temperature state changed %u\n", tt->state); IWL_DEBUG_TEMP(priv, "Power Index change to %u\n", diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index 32d000cffe9f..405038ce98d6 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright (C) 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -26,6 +27,7 @@ * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright (C) 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -205,3 +207,33 @@ out: return dflt_pwr_limit; } IWL_EXPORT_SYMBOL(iwl_acpi_get_pwr_limit); + +int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk) +{ + union acpi_object *wifi_pkg, *data; + int ret; + + data = iwl_acpi_get_object(dev, ACPI_ECKV_METHOD); + if (IS_ERR(data)) + return PTR_ERR(data); + + wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_ECKV_WIFI_DATA_SIZE); + if (IS_ERR(wifi_pkg)) { + ret = PTR_ERR(wifi_pkg); + goto out_free; + } + + if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) { + ret = -EINVAL; + goto out_free; + } + + *extl_clk = wifi_pkg->package.elements[1].integer.value; + + ret = 0; + +out_free: + kfree(data); + return ret; +} +IWL_EXPORT_SYMBOL(iwl_acpi_get_eckv); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h index 7492dfb6729b..f5704e16643f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -27,7 +27,7 @@ * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -67,6 +67,7 @@ #define ACPI_WGDS_METHOD "WGDS" #define ACPI_WRDD_METHOD "WRDD" #define ACPI_SPLC_METHOD "SPLC" +#define ACPI_ECKV_METHOD "ECKV" #define ACPI_WIFI_DOMAIN (0x07) @@ -86,6 +87,7 @@ #define ACPI_WGDS_WIFI_DATA_SIZE 19 #define ACPI_WRDD_WIFI_DATA_SIZE 2 #define ACPI_SPLC_WIFI_DATA_SIZE 2 +#define ACPI_ECKV_WIFI_DATA_SIZE 2 #define ACPI_WGDS_NUM_BANDS 2 #define ACPI_WGDS_TABLE_SIZE 3 @@ -109,6 +111,17 @@ int iwl_acpi_get_mcc(struct device *dev, char *mcc); u64 iwl_acpi_get_pwr_limit(struct device *dev); +/* + * iwl_acpi_get_eckv - read external clock validation from ACPI, if available + * + * @dev: the struct device + * @extl_clk: output var (2 bytes) that will get the clk indication. + * + * This function tries to read the external clock indication + * from ACPI if available. + */ +int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk); + #else /* CONFIG_ACPI */ static inline void *iwl_acpi_get_object(struct device *dev, acpi_string method) @@ -133,5 +146,10 @@ static inline u64 iwl_acpi_get_pwr_limit(struct device *dev) return 0; } +static inline int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk) +{ + return -ENOENT; +} + #endif /* CONFIG_ACPI */ #endif /* __iwl_fw_acpi__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h index 08d3d8a190f6..df1bd0d2450e 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h @@ -96,14 +96,7 @@ enum { #define IWL_ALIVE_FLG_RFKILL BIT(0) -struct iwl_lmac_alive { - __le32 ucode_major; - __le32 ucode_minor; - u8 ver_subtype; - u8 ver_type; - u8 mac; - u8 opt; - __le32 timestamp; +struct iwl_lmac_debug_addrs { __le32 error_event_table_ptr; /* SRAM address for error log */ __le32 log_event_table_ptr; /* SRAM address for LMAC event log */ __le32 cpu_register_ptr; @@ -112,13 +105,28 @@ struct iwl_lmac_alive { __le32 scd_base_ptr; /* SRAM address for SCD */ __le32 st_fwrd_addr; /* pointer to Store and forward */ __le32 st_fwrd_size; +} __packed; /* UCODE_DEBUG_ADDRS_API_S_VER_2 */ + +struct iwl_lmac_alive { + __le32 ucode_major; + __le32 ucode_minor; + u8 ver_subtype; + u8 ver_type; + u8 mac; + u8 opt; + __le32 timestamp; + struct iwl_lmac_debug_addrs dbg_ptrs; } __packed; /* UCODE_ALIVE_NTFY_API_S_VER_3 */ +struct iwl_umac_debug_addrs { + __le32 error_info_addr; /* SRAM address for UMAC error log */ + __le32 dbg_print_buff_addr; +} __packed; /* UMAC_DEBUG_ADDRS_API_S_VER_1 */ + struct iwl_umac_alive { __le32 umac_major; /* UMAC version: major */ __le32 umac_minor; /* UMAC version: minor */ - __le32 error_info_addr; /* SRAM address for UMAC error log */ - __le32 dbg_print_buff_addr; + struct iwl_umac_debug_addrs dbg_ptrs; } __packed; /* UMAC_ALIVE_DATA_API_S_VER_2 */ struct mvm_alive_resp_v3 { @@ -189,4 +197,24 @@ struct iwl_card_state_notif { __le32 flags; } __packed; /* CARD_STATE_NTFY_API_S_VER_1 */ +/** + * enum iwl_error_recovery_flags - flags for error recovery cmd + * @ERROR_RECOVERY_UPDATE_DB: update db from blob sent + * @ERROR_RECOVERY_END_OF_RECOVERY: end of recovery + */ +enum iwl_error_recovery_flags { + ERROR_RECOVERY_UPDATE_DB = BIT(0), + ERROR_RECOVERY_END_OF_RECOVERY = BIT(1), +}; + +/** + * struct iwl_fw_error_recovery_cmd - recovery cmd sent upon assert + * @flags: &enum iwl_error_recovery_flags + * @buf_size: db buffer size in bytes + */ +struct iwl_fw_error_recovery_cmd { + __le32 flags; + __le32 buf_size; +} __packed; /* ERROR_RECOVERY_CMD_HDR_API_S_VER_1 */ + #endif /* __iwl_fw_api_alive_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h index 8b4922bbe139..4d2274bcc0b5 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h @@ -77,7 +77,8 @@ * @DATA_PATH_GROUP: data path group, uses command IDs from * &enum iwl_data_path_subcmd_ids * @NAN_GROUP: NAN group, uses command IDs from &enum iwl_nan_subcmd_ids - * @TOF_GROUP: TOF group, uses command IDs from &enum iwl_tof_subcmd_ids + * @LOCATION_GROUP: location group, uses command IDs from + * &enum iwl_location_subcmd_ids * @PROT_OFFLOAD_GROUP: protocol offload group, uses command IDs from * &enum iwl_prot_offload_subcmd_ids * @REGULATORY_AND_NVM_GROUP: regulatory/NVM group, uses command IDs from @@ -92,7 +93,7 @@ enum iwl_mvm_command_groups { PHY_OPS_GROUP = 0x4, DATA_PATH_GROUP = 0x5, NAN_GROUP = 0x7, - TOF_GROUP = 0x8, + LOCATION_GROUP = 0x8, PROT_OFFLOAD_GROUP = 0xb, REGULATORY_AND_NVM_GROUP = 0xc, DEBUG_GROUP = 0xf, @@ -352,16 +353,6 @@ enum iwl_legacy_cmds { */ PHY_DB_CMD = 0x6c, - /** - * @TOF_CMD: &struct iwl_tof_config_cmd - */ - TOF_CMD = 0x10, - - /** - * @TOF_NOTIFICATION: &struct iwl_tof_gen_resp_cmd - */ - TOF_NOTIFICATION = 0x11, - /** * @POWER_TABLE_CMD: &struct iwl_device_power_cmd */ @@ -415,7 +406,11 @@ enum iwl_legacy_cmds { TX_ANT_CONFIGURATION_CMD = 0x98, /** - * @STATISTICS_CMD: &struct iwl_statistics_cmd + * @STATISTICS_CMD: + * one of &struct iwl_statistics_cmd, + * &struct iwl_notif_statistics_v11, + * &struct iwl_notif_statistics_v10, + * &struct iwl_notif_statistics */ STATISTICS_CMD = 0x9c, @@ -423,7 +418,7 @@ enum iwl_legacy_cmds { * @STATISTICS_NOTIFICATION: * one of &struct iwl_notif_statistics_v10, * &struct iwl_notif_statistics_v11, - * &struct iwl_notif_statistics_cdb + * &struct iwl_notif_statistics */ STATISTICS_NOTIFICATION = 0x9d, @@ -648,6 +643,11 @@ enum iwl_system_subcmd_ids { * @INIT_EXTENDED_CFG_CMD: &struct iwl_init_extended_cfg_cmd */ INIT_EXTENDED_CFG_CMD = 0x03, + + /** + * @FW_ERROR_RECOVERY_CMD: &struct iwl_fw_error_recovery_cmd + */ + FW_ERROR_RECOVERY_CMD = 0x7, }; #endif /* __iwl_fw_api_commands_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h index 6fae02fa4cad..86ea0784e1a3 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h @@ -224,8 +224,18 @@ struct iwl_wowlan_pattern { #define IWL_WOWLAN_MAX_PATTERNS 20 +/** + * struct iwl_wowlan_patterns_cmd - WoWLAN wakeup patterns + */ struct iwl_wowlan_patterns_cmd { + /** + * @n_patterns: number of patterns + */ __le32 n_patterns; + + /** + * @patterns: the patterns, array length in @n_patterns + */ struct iwl_wowlan_pattern patterns[]; } __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_1 */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h index fdc54a5dc9de..93c06e6c1ced 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h @@ -104,6 +104,12 @@ enum iwl_data_path_subcmd_ids { */ HE_AIR_SNIFFER_CONFIG_CMD = 0x13, + /** + * @CHEST_COLLECTOR_FILTER_CONFIG_CMD: Configure the CSI + * matrix collection, uses &struct iwl_channel_estimation_cfg + */ + CHEST_COLLECTOR_FILTER_CONFIG_CMD = 0x14, + /** * @RX_NO_DATA_NOTIF: &struct iwl_rx_no_data */ @@ -156,4 +162,53 @@ struct iwl_mu_group_mgmt_notif { __le32 user_position[4]; } __packed; /* MU_GROUP_MNG_NTFY_API_S_VER_1 */ +enum iwl_channel_estimation_flags { + IWL_CHANNEL_ESTIMATION_ENABLE = BIT(0), + IWL_CHANNEL_ESTIMATION_TIMER = BIT(1), + IWL_CHANNEL_ESTIMATION_COUNTER = BIT(2), +}; + +/** + * struct iwl_channel_estimation_cfg - channel estimation reporting config + */ +struct iwl_channel_estimation_cfg { + /** + * @flags: flags, see &enum iwl_channel_estimation_flags + */ + __le32 flags; + /** + * @timer: if enabled via flags, automatically disable after this many + * microseconds + */ + __le32 timer; + /** + * @count: if enabled via flags, automatically disable after this many + * frames with channel estimation matrix were captured + */ + __le32 count; + /** + * @rate_n_flags_mask: only try to record the channel estimation matrix + * if the rate_n_flags value for the received frame (let's call + * that rx_rnf) matches the mask/value given here like this: + * (rx_rnf & rate_n_flags_mask) == rate_n_flags_val. + */ + __le32 rate_n_flags_mask; + /** + * @rate_n_flags_val: see @rate_n_flags_mask + */ + __le32 rate_n_flags_val; + /** + * @reserved: reserved (for alignment) + */ + __le32 reserved; + /** + * @frame_types: bitmap of frame types to capture, the received frame's + * subtype|type takes 6 bits in the frame and the corresponding bit + * in this field must be set to 1 to capture channel estimation for + * that frame type. Set to all-ones to enable capturing for all + * frame types. + */ + __le64 frame_types; +} __packed; /* CHEST_COLLECTOR_FILTER_CMD_API_S_VER_1 */ + #endif /* __iwl_fw_api_datapath_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h index ab82b7a67967..33858787817b 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright (C) 2018 Intel Corporation + * Copyright (C) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -25,7 +25,7 @@ * * BSD LICENSE * - * Copyright (C) 2018 Intel Corporation + * Copyright (C) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -70,7 +70,7 @@ struct iwl_fw_ini_header { __le32 tlv_version; __le32 apply_point; u8 data[]; -} __packed; /* FW_INI_HEADER_TLV_S */ +} __packed; /* FW_DEBUG_TLV_HEADER_S */ /** * struct iwl_fw_ini_allocation_tlv - (IWL_FW_INI_TLV_TYPE_BUFFER_ALLOCATION) @@ -92,7 +92,7 @@ struct iwl_fw_ini_allocation_tlv { __le32 size; __le32 max_fragments; __le32 min_frag_size; -} __packed; /* FW_INI_BUFFER_ALLOCATION_TLV_S_VER_1 */ +} __packed; /* FW_DEBUG_TLV_BUFFER_ALLOCATION_TLV_S_VER_1 */ /** * struct iwl_fw_ini_hcmd (IWL_FW_INI_TLV_TYPE_HCMD) @@ -108,7 +108,7 @@ struct iwl_fw_ini_hcmd { u8 group; __le16 padding; u8 data[0]; -} __packed; /* FW_INI_HCMD_S */ +} __packed; /* FW_DEBUG_TLV_HCMD_DATA_S */ /** * struct iwl_fw_ini_hcmd_tlv @@ -118,7 +118,7 @@ struct iwl_fw_ini_hcmd { struct iwl_fw_ini_hcmd_tlv { struct iwl_fw_ini_header header; struct iwl_fw_ini_hcmd hcmd; -} __packed; /* FW_INI_HCMD_TLV_S_VER_1 */ +} __packed; /* FW_DEBUG_TLV_HCMD_S_VER_1 */ /* * struct iwl_fw_ini_debug_flow_tlv (IWL_FW_INI_TLV_TYPE_DEBUG_FLOW) @@ -129,20 +129,50 @@ struct iwl_fw_ini_hcmd_tlv { struct iwl_fw_ini_debug_flow_tlv { struct iwl_fw_ini_header header; __le32 debug_flow_cfg; -} __packed; /* FW_INI_DEBUG_FLOW_TLV_S_VER_1 */ +} __packed; /* FW_DEBUG_TLV_FLOW_TLV_S_VER_1 */ -#define IWL_FW_INI_MAX_REGION_ID 20 +#define IWL_FW_INI_MAX_REGION_ID 64 #define IWL_FW_INI_MAX_NAME 32 + +/** + * struct iwl_fw_ini_region_cfg_internal - meta data of internal memory region + * @num_of_range: the amount of ranges in the region + * @range_data_size: size of the data to read per range, in bytes. + */ +struct iwl_fw_ini_region_cfg_internal { + __le32 num_of_ranges; + __le32 range_data_size; +} __packed; /* FW_DEBUG_TLV_REGION_NIC_INTERNAL_RANGES_S */ + +/** + * struct iwl_fw_ini_region_cfg_fifos - meta data of fifos region + * @fid1: fifo id 1 - bitmap of lmac tx/rx fifos to include in the region + * @fid2: fifo id 2 - bitmap of umac rx fifos to include in the region. + * It is unused for tx. + * @num_of_registers: number of prph registers in the region, each register is + * 4 bytes size. + * @header_only: none zero value indicates that this region does not include + * fifo data and includes only the given registers. + */ +struct iwl_fw_ini_region_cfg_fifos { + __le32 fid1; + __le32 fid2; + __le32 num_of_registers; + __le32 header_only; +} __packed; /* FW_DEBUG_TLV_REGION_FIFOS_S */ + /** * struct iwl_fw_ini_region_cfg * @region_id: ID of this dump configuration * @region_type: &enum iwl_fw_ini_region_type * @num_regions: amount of regions in the address array. - * @allocation_id: For DRAM type field substitutes for allocation_id. * @name_len: name length * @name: file name to use for this region - * @size: size of the data, in bytes.(unused for IWL_FW_INI_REGION_DRAM_BUFFER) - * @start_addr: array of addresses. (unused for IWL_FW_INI_REGION_DRAM_BUFFER) + * @internal: used in case the region uses internal memory. + * @allocation_id: For DRAM type field substitutes for allocation_id + * @fifos: used in case of fifos region. + * @offset: offset to use for each memory base address + * @start_addr: array of addresses. */ struct iwl_fw_ini_region_cfg { __le32 region_id; @@ -150,32 +180,38 @@ struct iwl_fw_ini_region_cfg { __le32 name_len; u8 name[IWL_FW_INI_MAX_NAME]; union { - __le32 num_regions; + struct iwl_fw_ini_region_cfg_internal internal; __le32 allocation_id; + struct iwl_fw_ini_region_cfg_fifos fifos; }; - __le32 size; + __le32 offset; __le32 start_addr[]; -} __packed; /* FW_INI_REGION_CONFIG_S */ +} __packed; /* FW_DEBUG_TLV_REGION_CONFIG_S */ /** * struct iwl_fw_ini_region_tlv - (IWL_FW_INI_TLV_TYPE_REGION_CFG) * DUMP sections define IDs and triggers that use those IDs TLV * @header: header * @num_regions: how many different region section and IDs are coming next - * @iwl_fw_ini_dump dump_config: list of dump configurations + * @region_config: list of dump configurations */ struct iwl_fw_ini_region_tlv { struct iwl_fw_ini_header header; __le32 num_regions; struct iwl_fw_ini_region_cfg region_config[]; -} __packed; /* FW_INI_REGION_CFG_S */ +} __packed; /* FW_DEBUG_TLV_REGIONS_S_VER_1 */ /** * struct iwl_fw_ini_trigger - (IWL_FW_INI_TLV_TYPE_DUMP_CFG) * Region sections define IDs and triggers that use those IDs TLV * * @trigger_id: enum &iwl_fw_ini_tigger_id - * @ignore_default: override FW TLV with binary TLV + * @override_trig: determines how apply trigger in case a trigger with the + * same id is already in use. Using the first 2 bytes: + * Byte 0: if 0, override trigger configuration, otherwise use the + * existing configuration. + * Byte 1: if 0, override trigger regions, otherwise append regions to + * existing trigger. * @dump_delay: delay from trigger fire to dump, in usec * @occurrences: max amount of times to be fired * @ignore_consec: ignore consecutive triggers, in usec @@ -187,7 +223,7 @@ struct iwl_fw_ini_region_tlv { */ struct iwl_fw_ini_trigger { __le32 trigger_id; - __le32 ignore_default; + __le32 override_trig; __le32 dump_delay; __le32 occurrences; __le32 ignore_consec; @@ -196,7 +232,7 @@ struct iwl_fw_ini_trigger { __le32 trigger_data; __le32 num_regions; __le32 data[]; -} __packed; /* FW_INI_TRIGGER_CONFIG_S */ +} __packed; /* FW_TLV_DEBUG_TRIGGER_CONFIG_S */ /** * struct iwl_fw_ini_trigger_tlv - (IWL_FW_INI_TLV_TYPE_TRIGGERS_CFG) @@ -210,20 +246,17 @@ struct iwl_fw_ini_trigger_tlv { struct iwl_fw_ini_header header; __le32 num_triggers; struct iwl_fw_ini_trigger trigger_config[]; -} __packed; /* FW_INI_TRIGGER_CFG_S */ +} __packed; /* FW_TLV_DEBUG_TRIGGERS_S_VER_1 */ /** * enum iwl_fw_ini_trigger_id * @IWL_FW_TRIGGER_ID_FW_ASSERT: FW assert - * @IWL_FW_TRIGGER_ID_FW_TFD_Q_HANG: TFD queue hang * @IWL_FW_TRIGGER_ID_FW_HW_ERROR: HW assert - * @IWL_FW_TRIGGER_ID_FW_TRIGGER_ERROR: FW error notification - * @IWL_FW_TRIGGER_ID_FW_TRIGGER_WARNING: FW warning notification - * @IWL_FW_TRIGGER_ID_FW_TRIGGER_INFO: FW info notification - * @IWL_FW_TRIGGER_ID_FW_TRIGGER_DEBUG: FW debug notification + * @IWL_FW_TRIGGER_ID_FW_TFD_Q_HANG: TFD queue hang + * @IWL_FW_TRIGGER_ID_FW_DEBUG_HOST_TRIGGER: FW debug notification + * @IWL_FW_TRIGGER_ID_FW_GENERIC_NOTIFOCATION: FW generic notification * @IWL_FW_TRIGGER_ID_USER_TRIGGER: User trigger * @IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_INACTIVITY: peer inactivity - * @FW_DEBUG_TLV_TRIGGER_ID_HOST_DID_INITIATED_EVENT: undefined * @IWL_FW_TRIGGER_ID_HOST_TX_LATENCY_THRESHOLD_CROSSED: TX latency * threshold was crossed * @IWL_FW_TRIGGER_ID_HOST_TX_RESPONSE_STATUS_FAILED: TX failed @@ -257,50 +290,53 @@ struct iwl_fw_ini_trigger_tlv { * @IWL_FW_TRIGGER_ID_NUM: number of trigger IDs */ enum iwl_fw_ini_trigger_id { + IWL_FW_TRIGGER_ID_INVALID = 0, + /* Errors triggers */ IWL_FW_TRIGGER_ID_FW_ASSERT = 1, - IWL_FW_TRIGGER_ID_FW_TFD_Q_HANG = 2, - IWL_FW_TRIGGER_ID_FW_HW_ERROR = 3, - /* Generic triggers */ - IWL_FW_TRIGGER_ID_FW_TRIGGER_ERROR = 4, - IWL_FW_TRIGGER_ID_FW_TRIGGER_WARNING = 5, - IWL_FW_TRIGGER_ID_FW_TRIGGER_INFO = 6, - IWL_FW_TRIGGER_ID_FW_TRIGGER_DEBUG = 7, - /* User Trigger */ - IWL_FW_TRIGGER_ID_USER_TRIGGER = 8, + IWL_FW_TRIGGER_ID_FW_HW_ERROR = 2, + IWL_FW_TRIGGER_ID_FW_TFD_Q_HANG = 3, + + /* FW triggers */ + IWL_FW_TRIGGER_ID_FW_DEBUG_HOST_TRIGGER = 4, + IWL_FW_TRIGGER_ID_FW_GENERIC_NOTIFOCATION = 5, + + /* User trigger */ + IWL_FW_TRIGGER_ID_USER_TRIGGER = 6, + /* Host triggers */ - IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_INACTIVITY = 9, - IWL_FW_TRIGGER_ID_HOST_DID_INITIATED_EVENT = 10, - IWL_FW_TRIGGER_ID_HOST_TX_LATENCY_THRESHOLD_CROSSED = 11, - IWL_FW_TRIGGER_ID_HOST_TX_RESPONSE_STATUS_FAILED = 12, - IWL_FW_TRIGGER_ID_HOST_OS_REQ_DEAUTH_PEER = 13, - IWL_FW_TRIGGER_ID_HOST_STOP_GO_REQUEST = 14, - IWL_FW_TRIGGER_ID_HOST_START_GO_REQUEST = 15, - IWL_FW_TRIGGER_ID_HOST_JOIN_GROUP_REQUEST = 16, - IWL_FW_TRIGGER_ID_HOST_SCAN_START = 17, - IWL_FW_TRIGGER_ID_HOST_SCAN_SUBITTED = 18, - IWL_FW_TRIGGER_ID_HOST_SCAN_PARAMS = 19, - IWL_FW_TRIGGER_ID_HOST_CHECK_FOR_HANG = 20, - IWL_FW_TRIGGER_ID_HOST_BAR_RECEIVED = 21, - IWL_FW_TRIGGER_ID_HOST_AGG_TX_RESPONSE_STATUS_FAILED = 22, - IWL_FW_TRIGGER_ID_HOST_EAPOL_TX_RESPONSE_FAILED = 23, - IWL_FW_TRIGGER_ID_HOST_FAKE_TX_RESPONSE_SUSPECTED = 24, - IWL_FW_TRIGGER_ID_HOST_AUTH_REQ_FROM_ASSOC_CLIENT = 25, - IWL_FW_TRIGGER_ID_HOST_ROAM_COMPLETE = 26, - IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAST_FAILED = 27, - IWL_FW_TRIGGER_ID_HOST_D3_START = 28, - IWL_FW_TRIGGER_ID_HOST_D3_END = 29, - IWL_FW_TRIGGER_ID_HOST_BSS_MISSED_BEACONS = 30, - IWL_FW_TRIGGER_ID_HOST_P2P_CLIENT_MISSED_BEACONS = 31, - IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_TX_FAILURES = 32, - IWL_FW_TRIGGER_ID_HOST_TX_WFD_ACTION_FRAME_FAILED = 33, - IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAILED = 34, - IWL_FW_TRIGGER_ID_HOST_SCAN_COMPLETE = 35, - IWL_FW_TRIGGER_ID_HOST_SCAN_ABORT = 36, - IWL_FW_TRIGGER_ID_HOST_NIC_ALIVE = 37, - IWL_FW_TRIGGER_ID_HOST_CHANNEL_SWITCH_COMPLETE = 38, + IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_INACTIVITY = 7, + IWL_FW_TRIGGER_ID_HOST_TX_LATENCY_THRESHOLD_CROSSED = 8, + IWL_FW_TRIGGER_ID_HOST_TX_RESPONSE_STATUS_FAILED = 9, + IWL_FW_TRIGGER_ID_HOST_OS_REQ_DEAUTH_PEER = 10, + IWL_FW_TRIGGER_ID_HOST_STOP_GO_REQUEST = 11, + IWL_FW_TRIGGER_ID_HOST_START_GO_REQUEST = 12, + IWL_FW_TRIGGER_ID_HOST_JOIN_GROUP_REQUEST = 13, + IWL_FW_TRIGGER_ID_HOST_SCAN_START = 14, + IWL_FW_TRIGGER_ID_HOST_SCAN_SUBMITTED = 15, + IWL_FW_TRIGGER_ID_HOST_SCAN_PARAMS = 16, + IWL_FW_TRIGGER_ID_HOST_CHECK_FOR_HANG = 17, + IWL_FW_TRIGGER_ID_HOST_BAR_RECEIVED = 18, + IWL_FW_TRIGGER_ID_HOST_AGG_TX_RESPONSE_STATUS_FAILED = 19, + IWL_FW_TRIGGER_ID_HOST_EAPOL_TX_RESPONSE_FAILED = 20, + IWL_FW_TRIGGER_ID_HOST_FAKE_TX_RESPONSE_SUSPECTED = 21, + IWL_FW_TRIGGER_ID_HOST_AUTH_REQ_FROM_ASSOC_CLIENT = 22, + IWL_FW_TRIGGER_ID_HOST_ROAM_COMPLETE = 23, + IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAST_FAILED = 24, + IWL_FW_TRIGGER_ID_HOST_D3_START = 25, + IWL_FW_TRIGGER_ID_HOST_D3_END = 26, + IWL_FW_TRIGGER_ID_HOST_BSS_MISSED_BEACONS = 27, + IWL_FW_TRIGGER_ID_HOST_P2P_CLIENT_MISSED_BEACONS = 28, + IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_TX_FAILURES = 29, + IWL_FW_TRIGGER_ID_HOST_TX_WFD_ACTION_FRAME_FAILED = 30, + IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAILED = 31, + IWL_FW_TRIGGER_ID_HOST_SCAN_COMPLETE = 32, + IWL_FW_TRIGGER_ID_HOST_SCAN_ABORT = 33, + IWL_FW_TRIGGER_ID_HOST_NIC_ALIVE = 34, + IWL_FW_TRIGGER_ID_HOST_CHANNEL_SWITCH_COMPLETE = 35, + IWL_FW_TRIGGER_ID_NUM, -}; /* FW_INI_TRIGGER_ID_E_VER_1 */ +}; /* FW_DEBUG_TLV_TRIGGER_ID_E_VER_1 */ /** * enum iwl_fw_ini_apply_point @@ -320,7 +356,7 @@ enum iwl_fw_ini_apply_point { IWL_FW_INI_APPLY_MISSED_BEACONS, IWL_FW_INI_APPLY_SCAN_COMPLETE, IWL_FW_INI_APPLY_NUM, -}; /* FW_INI_APPLY_POINT_E_VER_1 */ +}; /* FW_DEBUG_TLV_APPLY_POINT_E_VER_1 */ /** * enum iwl_fw_ini_allocation_id @@ -340,7 +376,7 @@ enum iwl_fw_ini_allocation_id { IWL_FW_INI_ALLOCATION_ID_SDFX, IWL_FW_INI_ALLOCATION_ID_FW_DUMP, IWL_FW_INI_ALLOCATION_ID_USER_DEFINED, -}; /* FW_INI_ALLOCATION_ID_E_VER_1 */ +}; /* FW_DEBUG_TLV_ALLOCATION_ID_E_VER_1 */ /** * enum iwl_fw_ini_buffer_location @@ -349,10 +385,10 @@ enum iwl_fw_ini_allocation_id { * @IWL_FW_INI_LOCATION_DRAM_PATH: DRAM location */ enum iwl_fw_ini_buffer_location { - IWL_FW_INI_LOCATION_SRAM_INVALID, + IWL_FW_INI_LOCATION_INVALID, IWL_FW_INI_LOCATION_SRAM_PATH, IWL_FW_INI_LOCATION_DRAM_PATH, -}; /* FW_INI_BUFFER_LOCATION_E_VER_1 */ +}; /* FW_DEBUG_TLV_BUFFER_LOCATION_E_VER_1 */ /** * enum iwl_fw_ini_debug_flow @@ -364,7 +400,7 @@ enum iwl_fw_ini_debug_flow { IWL_FW_INI_DEBUG_INVALID, IWL_FW_INI_DEBUG_DBTR_FLOW, IWL_FW_INI_DEBUG_TB2DTF_FLOW, -}; /* FW_INI_DEBUG_FLOW_E_VER_1 */ +}; /* FW_DEBUG_TLV_FLOW_E_VER_1 */ /** * enum iwl_fw_ini_region_type @@ -396,6 +432,6 @@ enum iwl_fw_ini_region_type { IWL_FW_INI_REGION_PAGING, IWL_FW_INI_REGION_CSR, IWL_FW_INI_REGION_NUM -}; /* FW_INI_REGION_TYPE_E_VER_1*/ +}; /* FW_DEBUG_TLV_REGION_TYPE_E_VER_1 */ #endif diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h index dc1fa377087a..988584973aba 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h @@ -335,29 +335,11 @@ struct iwl_dbg_mem_access_rsp { __le32 data[]; } __packed; /* DEBUG_(U|L)MAC_RD_WR_RSP_API_S_VER_1 */ -#define CONT_REC_COMMAND_SIZE 80 -#define ENABLE_CONT_RECORDING 0x15 -#define DISABLE_CONT_RECORDING 0x16 +#define LDBG_CFG_COMMAND_SIZE 80 #define BUFFER_ALLOCATION 0x27 #define START_DEBUG_RECORDING 0x29 #define STOP_DEBUG_RECORDING 0x2A -/* - * struct iwl_continuous_record_mode - recording mode - */ -struct iwl_continuous_record_mode { - __le16 enable_recording; -} __packed; - -/* - * struct iwl_continuous_record_cmd - enable/disable continuous recording - */ -struct iwl_continuous_record_cmd { - struct iwl_continuous_record_mode record_mode; - u8 pad[CONT_REC_COMMAND_SIZE - - sizeof(struct iwl_continuous_record_mode)]; -} __packed; - /* maximum fragments to be allocated per target of allocationId */ #define IWL_BUFFER_LOCATION_MAX_FRAGS 2 @@ -385,4 +367,17 @@ struct iwl_buffer_allocation_cmd { struct iwl_fragment_data fragments[IWL_BUFFER_LOCATION_MAX_FRAGS]; } __packed; /* BUFFER_ALLOCATION_CMD_API_S_VER_1 */ +/** + * struct iwl_ldbg_config_cmd - LDBG config command + * @type: configuration type + * @pad: reserved space for type-dependent data + */ +struct iwl_ldbg_config_cmd { + __le32 type; + union { + u8 pad[LDBG_CFG_COMMAND_SIZE - sizeof(__le32)]; + struct iwl_buffer_allocation_cmd buffer_allocation; + }; /* LDBG_CFG_BODY_API_U_VER_2 (partially) */ +} __packed; /* LDBG_CFG_CMD_API_S_VER_2 */ + #endif /* __iwl_fw_api_debug_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h new file mode 100644 index 000000000000..5dddb21c1c4d --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h @@ -0,0 +1,878 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation + * Copyright (C) 2019 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation + * Copyright (C) 2019 Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_fw_api_location_h__ +#define __iwl_fw_api_location_h__ + +/** + * enum iwl_location_subcmd_ids - location group command IDs + */ +enum iwl_location_subcmd_ids { + /** + * @TOF_RANGE_REQ_CMD: TOF ranging request, + * uses &struct iwl_tof_range_req_cmd + */ + TOF_RANGE_REQ_CMD = 0x0, + /** + * @TOF_CONFIG_CMD: TOF configuration, uses &struct iwl_tof_config_cmd + */ + TOF_CONFIG_CMD = 0x1, + /** + * @TOF_RANGE_ABORT_CMD: abort ongoing ranging, uses + * &struct iwl_tof_range_abort_cmd + */ + TOF_RANGE_ABORT_CMD = 0x2, + /** + * @TOF_RANGE_REQ_EXT_CMD: TOF extended ranging config, + * uses &struct iwl_tof_range_request_ext_cmd + */ + TOF_RANGE_REQ_EXT_CMD = 0x3, + /** + * @TOF_RESPONDER_CONFIG_CMD: FTM responder configuration, + * uses &struct iwl_tof_responder_config_cmd + */ + TOF_RESPONDER_CONFIG_CMD = 0x4, + /** + * @TOF_RESPONDER_DYN_CONFIG_CMD: FTM dynamic configuration, + * uses &struct iwl_tof_responder_dyn_config_cmd + */ + TOF_RESPONDER_DYN_CONFIG_CMD = 0x5, + /** + * @CSI_HEADER_NOTIFICATION: CSI header + */ + CSI_HEADER_NOTIFICATION = 0xFA, + /** + * @CSI_CHUNKS_NOTIFICATION: CSI chunk, + * uses &struct iwl_csi_chunk_notification + */ + CSI_CHUNKS_NOTIFICATION = 0xFB, + /** + * @TOF_LC_NOTIF: used for LCI/civic location, contains just + * the action frame + */ + TOF_LC_NOTIF = 0xFC, + /** + * @TOF_RESPONDER_STATS: FTM responder statistics notification, + * uses &struct iwl_ftm_responder_stats + */ + TOF_RESPONDER_STATS = 0xFD, + /** + * @TOF_MCSI_DEBUG_NOTIF: MCSI debug notification, uses + * &struct iwl_tof_mcsi_notif + */ + TOF_MCSI_DEBUG_NOTIF = 0xFE, + /** + * @TOF_RANGE_RESPONSE_NOTIF: ranging response, using + * &struct iwl_tof_range_rsp_ntfy + */ + TOF_RANGE_RESPONSE_NOTIF = 0xFF, +}; + +/** + * struct iwl_tof_config_cmd - ToF configuration + * @tof_disabled: indicates if ToF is disabled (or not) + * @one_sided_disabled: indicates if one-sided is disabled (or not) + * @is_debug_mode: indiciates if debug mode is active + * @is_buf_required: indicates if channel estimation buffer is required + */ +struct iwl_tof_config_cmd { + u8 tof_disabled; + u8 one_sided_disabled; + u8 is_debug_mode; + u8 is_buf_required; +} __packed; + +/** + * enum iwl_tof_bandwidth - values for iwl_tof_range_req_ap_entry.bandwidth + * @IWL_TOF_BW_20_LEGACY: 20 MHz non-HT + * @IWL_TOF_BW_20_HT: 20 MHz HT + * @IWL_TOF_BW_40: 40 MHz + * @IWL_TOF_BW_80: 80 MHz + * @IWL_TOF_BW_160: 160 MHz + */ +enum iwl_tof_bandwidth { + IWL_TOF_BW_20_LEGACY, + IWL_TOF_BW_20_HT, + IWL_TOF_BW_40, + IWL_TOF_BW_80, + IWL_TOF_BW_160, +}; /* LOCAT_BW_TYPE_E */ + +/* + * enum iwl_tof_algo_type - Algorithym type for range measurement request + */ +enum iwl_tof_algo_type { + IWL_TOF_ALGO_TYPE_MAX_LIKE = 0, + IWL_TOF_ALGO_TYPE_LINEAR_REG = 1, + IWL_TOF_ALGO_TYPE_FFT = 2, + + /* Keep last */ + IWL_TOF_ALGO_TYPE_INVALID, +}; /* ALGO_TYPE_E */ + +/* + * enum iwl_tof_mcsi_ntfy - Enable/Disable MCSI notifications + */ +enum iwl_tof_mcsi_enable { + IWL_TOF_MCSI_DISABLED = 0, + IWL_TOF_MCSI_ENABLED = 1, +}; /* MCSI_ENABLE_E */ + +/** + * enum iwl_tof_responder_cmd_valid_field - valid fields in the responder cfg + * @IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO: channel info is valid + * @IWL_TOF_RESPONDER_CMD_VALID_TOA_OFFSET: ToA offset is valid + * @IWL_TOF_RESPONDER_CMD_VALID_COMMON_CALIB: common calibration mode is valid + * @IWL_TOF_RESPONDER_CMD_VALID_SPECIFIC_CALIB: spefici calibration mode is + * valid + * @IWL_TOF_RESPONDER_CMD_VALID_BSSID: BSSID is valid + * @IWL_TOF_RESPONDER_CMD_VALID_TX_ANT: TX antenna is valid + * @IWL_TOF_RESPONDER_CMD_VALID_ALGO_TYPE: algorithm type is valid + * @IWL_TOF_RESPONDER_CMD_VALID_NON_ASAP_SUPPORT: non-ASAP support is valid + * @IWL_TOF_RESPONDER_CMD_VALID_STATISTICS_REPORT_SUPPORT: statistics report + * support is valid + * @IWL_TOF_RESPONDER_CMD_VALID_MCSI_NOTIF_SUPPORT: MCSI notification support + * is valid + * @IWL_TOF_RESPONDER_CMD_VALID_FAST_ALGO_SUPPORT: fast algorithm support + * is valid + * @IWL_TOF_RESPONDER_CMD_VALID_RETRY_ON_ALGO_FAIL: retry on algorithm failure + * is valid + * @IWL_TOF_RESPONDER_CMD_VALID_STA_ID: station ID is valid + */ +enum iwl_tof_responder_cmd_valid_field { + IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO = BIT(0), + IWL_TOF_RESPONDER_CMD_VALID_TOA_OFFSET = BIT(1), + IWL_TOF_RESPONDER_CMD_VALID_COMMON_CALIB = BIT(2), + IWL_TOF_RESPONDER_CMD_VALID_SPECIFIC_CALIB = BIT(3), + IWL_TOF_RESPONDER_CMD_VALID_BSSID = BIT(4), + IWL_TOF_RESPONDER_CMD_VALID_TX_ANT = BIT(5), + IWL_TOF_RESPONDER_CMD_VALID_ALGO_TYPE = BIT(6), + IWL_TOF_RESPONDER_CMD_VALID_NON_ASAP_SUPPORT = BIT(7), + IWL_TOF_RESPONDER_CMD_VALID_STATISTICS_REPORT_SUPPORT = BIT(8), + IWL_TOF_RESPONDER_CMD_VALID_MCSI_NOTIF_SUPPORT = BIT(9), + IWL_TOF_RESPONDER_CMD_VALID_FAST_ALGO_SUPPORT = BIT(10), + IWL_TOF_RESPONDER_CMD_VALID_RETRY_ON_ALGO_FAIL = BIT(11), + IWL_TOF_RESPONDER_CMD_VALID_STA_ID = BIT(12), +}; + +/** + * enum iwl_tof_responder_cfg_flags - responder configuration flags + * @IWL_TOF_RESPONDER_FLAGS_NON_ASAP_SUPPORT: non-ASAP support + * @IWL_TOF_RESPONDER_FLAGS_REPORT_STATISTICS: report statistics + * @IWL_TOF_RESPONDER_FLAGS_REPORT_MCSI: report MCSI + * @IWL_TOF_RESPONDER_FLAGS_ALGO_TYPE: algorithm type + * @IWL_TOF_RESPONDER_FLAGS_TOA_OFFSET_MODE: ToA offset mode + * @IWL_TOF_RESPONDER_FLAGS_COMMON_CALIB_MODE: common calibration mode + * @IWL_TOF_RESPONDER_FLAGS_SPECIFIC_CALIB_MODE: specific calibration mode + * @IWL_TOF_RESPONDER_FLAGS_FAST_ALGO_SUPPORT: fast algorithm support + * @IWL_TOF_RESPONDER_FLAGS_RETRY_ON_ALGO_FAIL: retry on algorithm fail + * @IWL_TOF_RESPONDER_FLAGS_FTM_TX_ANT: TX antenna mask + */ +enum iwl_tof_responder_cfg_flags { + IWL_TOF_RESPONDER_FLAGS_NON_ASAP_SUPPORT = BIT(0), + IWL_TOF_RESPONDER_FLAGS_REPORT_STATISTICS = BIT(1), + IWL_TOF_RESPONDER_FLAGS_REPORT_MCSI = BIT(2), + IWL_TOF_RESPONDER_FLAGS_ALGO_TYPE = BIT(3) | BIT(4) | BIT(5), + IWL_TOF_RESPONDER_FLAGS_TOA_OFFSET_MODE = BIT(6), + IWL_TOF_RESPONDER_FLAGS_COMMON_CALIB_MODE = BIT(7), + IWL_TOF_RESPONDER_FLAGS_SPECIFIC_CALIB_MODE = BIT(8), + IWL_TOF_RESPONDER_FLAGS_FAST_ALGO_SUPPORT = BIT(9), + IWL_TOF_RESPONDER_FLAGS_RETRY_ON_ALGO_FAIL = BIT(10), + IWL_TOF_RESPONDER_FLAGS_FTM_TX_ANT = RATE_MCS_ANT_ABC_MSK, +}; + +/** + * struct iwl_tof_responder_config_cmd - ToF AP mode (for debug) + * @cmd_valid_fields: &iwl_tof_responder_cmd_valid_field + * @responder_cfg_flags: &iwl_tof_responder_cfg_flags + * @bandwidth: current AP Bandwidth: &enum iwl_tof_bandwidth + * @rate: current AP rate + * @channel_num: current AP Channel + * @ctrl_ch_position: coding of the control channel position relative to + * the center frequency, see iwl_mvm_get_ctrl_pos() + * @sta_id: index of the AP STA when in AP mode + * @reserved1: reserved + * @toa_offset: Artificial addition [pSec] for the ToA - to be used for debug + * purposes, simulating station movement by adding various values + * to this field + * @common_calib: XVT: common calibration value + * @specific_calib: XVT: specific calibration value + * @bssid: Current AP BSSID + * @reserved2: reserved + */ +struct iwl_tof_responder_config_cmd { + __le32 cmd_valid_fields; + __le32 responder_cfg_flags; + u8 bandwidth; + u8 rate; + u8 channel_num; + u8 ctrl_ch_position; + u8 sta_id; + u8 reserved1; + __le16 toa_offset; + __le16 common_calib; + __le16 specific_calib; + u8 bssid[ETH_ALEN]; + __le16 reserved2; +} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_6 */ + +#define IWL_LCI_CIVIC_IE_MAX_SIZE 400 + +/** + * struct iwl_tof_responder_dyn_config_cmd - Dynamic responder settings + * @lci_len: The length of the 1st (LCI) part in the @lci_civic buffer + * @civic_len: The length of the 2nd (CIVIC) part in the @lci_civic buffer + * @lci_civic: The LCI/CIVIC buffer. LCI data (if exists) comes first, then, if + * needed, 0-padding such that the next part is dword-aligned, then CIVIC + * data (if exists) follows, and then 0-padding again to complete a + * 4-multiple long buffer. + */ +struct iwl_tof_responder_dyn_config_cmd { + __le32 lci_len; + __le32 civic_len; + u8 lci_civic[]; +} __packed; /* TOF_RESPONDER_DYN_CONFIG_CMD_API_S_VER_2 */ + +/** + * struct iwl_tof_range_request_ext_cmd - extended range req for WLS + * @tsf_timer_offset_msec: the recommended time offset (mSec) from the AP's TSF + * @reserved: reserved + * @min_delta_ftm: Minimal time between two consecutive measurements, + * in units of 100us. 0 means no preference by station + * @ftm_format_and_bw20M: FTM Channel Spacing/Format for 20MHz: recommended + * value be sent to the AP + * @ftm_format_and_bw40M: FTM Channel Spacing/Format for 40MHz: recommended + * value to be sent to the AP + * @ftm_format_and_bw80M: FTM Channel Spacing/Format for 80MHz: recommended + * value to be sent to the AP + */ +struct iwl_tof_range_req_ext_cmd { + __le16 tsf_timer_offset_msec; + __le16 reserved; + u8 min_delta_ftm; + u8 ftm_format_and_bw20M; + u8 ftm_format_and_bw40M; + u8 ftm_format_and_bw80M; +} __packed; + +/** + * enum iwl_tof_location_query - values for query bitmap + * @IWL_TOF_LOC_LCI: query LCI + * @IWL_TOF_LOC_CIVIC: query civic + */ +enum iwl_tof_location_query { + IWL_TOF_LOC_LCI = 0x01, + IWL_TOF_LOC_CIVIC = 0x02, +}; + + /** + * struct iwl_tof_range_req_ap_entry_v2 - AP configuration parameters + * @channel_num: Current AP Channel + * @bandwidth: Current AP Bandwidth. One of iwl_tof_bandwidth. + * @tsf_delta_direction: TSF relatively to the subject AP + * @ctrl_ch_position: Coding of the control channel position relative to the + * center frequency, see iwl_mvm_get_ctrl_pos(). + * @bssid: AP's BSSID + * @measure_type: Measurement type: 0 - two sided, 1 - One sided + * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of the + * number of measurement iterations (min 2^0 = 1, max 2^14) + * @burst_period: Recommended value to be sent to the AP. Measurement + * periodicity In units of 100ms. ignored if num_of_bursts = 0 + * @samples_per_burst: 2-sided: the number of FTMs pairs in single Burst (1-31); + * 1-sided: how many rts/cts pairs should be used per burst. + * @retries_per_sample: Max number of retries that the LMAC should send + * in case of no replies by the AP. + * @tsf_delta: TSF Delta in units of microseconds. + * The difference between the AP TSF and the device local clock. + * @location_req: Location Request Bit[0] LCI should be sent in the FTMR; + * Bit[1] Civic should be sent in the FTMR + * @asap_mode: 0 - non asap mode, 1 - asap mode (not relevant for one sided) + * @enable_dyn_ack: Enable Dynamic ACK BW. + * 0: Initiator interact with regular AP; + * 1: Initiator interact with Responder machine: need to send the + * Initiator Acks with HT 40MHz / 80MHz, since the Responder should + * use it for its ch est measurement (this flag will be set when we + * configure the opposite machine to be Responder). + * @rssi: Last received value + * legal values: -128-0 (0x7f). above 0x0 indicating an invalid value. + * @algo_type: &enum iwl_tof_algo_type + * @notify_mcsi: &enum iwl_tof_mcsi_ntfy. + * @reserved: For alignment and future use + */ +struct iwl_tof_range_req_ap_entry_v2 { + u8 channel_num; + u8 bandwidth; + u8 tsf_delta_direction; + u8 ctrl_ch_position; + u8 bssid[ETH_ALEN]; + u8 measure_type; + u8 num_of_bursts; + __le16 burst_period; + u8 samples_per_burst; + u8 retries_per_sample; + __le32 tsf_delta; + u8 location_req; + u8 asap_mode; + u8 enable_dyn_ack; + s8 rssi; + u8 algo_type; + u8 notify_mcsi; + __le16 reserved; +} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_2 */ + +/** + * enum iwl_initiator_ap_flags - per responder FTM configuration flags + * @IWL_INITIATOR_AP_FLAGS_ASAP: Request for ASAP measurement. + * @IWL_INITIATOR_AP_FLAGS_LCI_REQUEST: Request for LCI information + * @IWL_INITIATOR_AP_FLAGS_CIVIC_REQUEST: Request for CIVIC information + * @IWL_INITIATOR_AP_FLAGS_DYN_ACK: Send HT/VHT ack for FTM frames. If not set, + * 20Mhz dup acks will be sent. + * @IWL_INITIATOR_AP_FLAGS_ALGO_LR: Use LR algo type for rtt calculation. + * Default algo type is ML. + * @IWL_INITIATOR_AP_FLAGS_ALGO_FFT: Use FFT algo type for rtt calculation. + * Default algo type is ML. + * @IWL_INITIATOR_AP_FLAGS_MCSI_REPORT: Send the MCSI for each FTM frame to the + * driver. + */ +enum iwl_initiator_ap_flags { + IWL_INITIATOR_AP_FLAGS_ASAP = BIT(1), + IWL_INITIATOR_AP_FLAGS_LCI_REQUEST = BIT(2), + IWL_INITIATOR_AP_FLAGS_CIVIC_REQUEST = BIT(3), + IWL_INITIATOR_AP_FLAGS_DYN_ACK = BIT(4), + IWL_INITIATOR_AP_FLAGS_ALGO_LR = BIT(5), + IWL_INITIATOR_AP_FLAGS_ALGO_FFT = BIT(6), + IWL_INITIATOR_AP_FLAGS_MCSI_REPORT = BIT(8), +}; + +/** + * struct iwl_tof_range_req_ap_entry - AP configuration parameters + * @initiator_ap_flags: see &enum iwl_initiator_ap_flags. + * @channel_num: AP Channel number + * @bandwidth: AP bandwidth. One of iwl_tof_bandwidth. + * @ctrl_ch_position: Coding of the control channel position relative to the + * center frequency, see iwl_mvm_get_ctrl_pos(). + * @ftmr_max_retries: Max number of retries to send the FTMR in case of no + * reply from the AP. + * @bssid: AP's BSSID + * @burst_period: Recommended value to be sent to the AP. Measurement + * periodicity In units of 100ms. ignored if num_of_bursts_exp = 0 + * @samples_per_burst: the number of FTMs pairs in single Burst (1-31); + * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of + * the number of measurement iterations (min 2^0 = 1, max 2^14) + * @reserved: For alignment and future use + * @tsf_delta: not in use + */ +struct iwl_tof_range_req_ap_entry { + __le32 initiator_ap_flags; + u8 channel_num; + u8 bandwidth; + u8 ctrl_ch_position; + u8 ftmr_max_retries; + u8 bssid[ETH_ALEN]; + __le16 burst_period; + u8 samples_per_burst; + u8 num_of_bursts; + __le16 reserved; + __le32 tsf_delta; +} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_3 */ + +/** + * enum iwl_tof_response_mode + * @IWL_MVM_TOF_RESPONSE_ASAP: report each AP measurement separately as soon as + * possible (not supported for this release) + * @IWL_MVM_TOF_RESPONSE_TIMEOUT: report all AP measurements as a batch upon + * timeout expiration + * @IWL_MVM_TOF_RESPONSE_COMPLETE: report all AP measurements as a batch at the + * earlier of: measurements completion / timeout + * expiration. + */ +enum iwl_tof_response_mode { + IWL_MVM_TOF_RESPONSE_ASAP, + IWL_MVM_TOF_RESPONSE_TIMEOUT, + IWL_MVM_TOF_RESPONSE_COMPLETE, +}; + +/** + * enum iwl_tof_initiator_flags + * + * @IWL_TOF_INITIATOR_FLAGS_FAST_ALGO_DISABLED: disable fast algo, meaning run + * the algo on ant A+B, instead of only one of them. + * @IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_A: open RX antenna A for FTMs RX + * @IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_B: open RX antenna B for FTMs RX + * @IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_C: open RX antenna C for FTMs RX + * @IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_A: use antenna A fo TX ACKs during FTM + * @IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_B: use antenna B fo TX ACKs during FTM + * @IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_C: use antenna C fo TX ACKs during FTM + * @IWL_TOF_INITIATOR_FLAGS_MACADDR_RANDOM: use random mac address for FTM + * @IWL_TOF_INITIATOR_FLAGS_SPECIFIC_CALIB: use the specific calib value from + * the range request command + * @IWL_TOF_INITIATOR_FLAGS_COMMON_CALIB: use the common calib value from the + * ragne request command + * @IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT: support non-asap measurements + */ +enum iwl_tof_initiator_flags { + IWL_TOF_INITIATOR_FLAGS_FAST_ALGO_DISABLED = BIT(0), + IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_A = BIT(1), + IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_B = BIT(2), + IWL_TOF_INITIATOR_FLAGS_RX_CHAIN_SEL_C = BIT(3), + IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_A = BIT(4), + IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_B = BIT(5), + IWL_TOF_INITIATOR_FLAGS_TX_CHAIN_SEL_C = BIT(6), + IWL_TOF_INITIATOR_FLAGS_MACADDR_RANDOM = BIT(7), + IWL_TOF_INITIATOR_FLAGS_SPECIFIC_CALIB = BIT(15), + IWL_TOF_INITIATOR_FLAGS_COMMON_CALIB = BIT(16), + IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT = BIT(20), +}; /* LOCATION_RANGE_REQ_CMD_API_S_VER_5 */ + +#define IWL_MVM_TOF_MAX_APS 5 +#define IWL_MVM_TOF_MAX_TWO_SIDED_APS 5 + +/** + * struct iwl_tof_range_req_cmd_v5 - start measurement cmd + * @initiator_flags: see flags @ iwl_tof_initiator_flags + * @request_id: A Token incremented per request. The same Token will be + * sent back in the range response + * @initiator: 0- NW initiated, 1 - Client Initiated + * @one_sided_los_disable: '0'- run ML-Algo for both ToF/OneSided, + * '1' - run ML-Algo for ToF only + * @req_timeout: Requested timeout of the response in units of 100ms. + * This is equivalent to the session time configured to the + * LMAC in Initiator Request + * @report_policy: Supported partially for this release: For current release - + * the range report will be uploaded as a batch when ready or + * when the session is done (successfully / partially). + * one of iwl_tof_response_mode. + * @reserved0: reserved + * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) + * @macaddr_random: '0' Use default source MAC address (i.e. p2_p), + * '1' Use MAC Address randomization according to the below + * @range_req_bssid: ranging request BSSID + * @macaddr_template: MAC address template to use for non-randomized bits + * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template. + * Bits set to 1 shall be randomized by the UMAC + * @ftm_rx_chains: Rx chain to open to receive Responder's FTMs (XVT) + * @ftm_tx_chains: Tx chain to send the ack to the Responder FTM (XVT) + * @common_calib: The common calib value to inject to this measurement calc + * @specific_calib: The specific calib value to inject to this measurement calc + * @ap: per-AP request data + */ +struct iwl_tof_range_req_cmd_v5 { + __le32 initiator_flags; + u8 request_id; + u8 initiator; + u8 one_sided_los_disable; + u8 req_timeout; + u8 report_policy; + u8 reserved0; + u8 num_of_ap; + u8 macaddr_random; + u8 range_req_bssid[ETH_ALEN]; + u8 macaddr_template[ETH_ALEN]; + u8 macaddr_mask[ETH_ALEN]; + u8 ftm_rx_chains; + u8 ftm_tx_chains; + __le16 common_calib; + __le16 specific_calib; + struct iwl_tof_range_req_ap_entry_v2 ap[IWL_MVM_TOF_MAX_APS]; +} __packed; +/* LOCATION_RANGE_REQ_CMD_API_S_VER_5 */ + +/** + * struct iwl_tof_range_req_cmd - start measurement cmd + * @initiator_flags: see flags @ iwl_tof_initiator_flags + * @request_id: A Token incremented per request. The same Token will be + * sent back in the range response + * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) + * @range_req_bssid: ranging request BSSID + * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template. + * Bits set to 1 shall be randomized by the UMAC + * @macaddr_template: MAC address template to use for non-randomized bits + * @req_timeout_ms: Requested timeout of the response in units of milliseconds. + * This is the session time for completing the measurement. + * @tsf_mac_id: report the measurement start time for each ap in terms of the + * TSF of this mac id. 0xff to disable TSF reporting. + * @common_calib: The common calib value to inject to this measurement calc + * @specific_calib: The specific calib value to inject to this measurement calc + * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v2. + */ +struct iwl_tof_range_req_cmd { + __le32 initiator_flags; + u8 request_id; + u8 num_of_ap; + u8 range_req_bssid[ETH_ALEN]; + u8 macaddr_mask[ETH_ALEN]; + u8 macaddr_template[ETH_ALEN]; + __le32 req_timeout_ms; + __le32 tsf_mac_id; + __le16 common_calib; + __le16 specific_calib; + struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS]; +} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_7 */ + +/* + * enum iwl_tof_range_request_status - status of the sent request + * @IWL_TOF_RANGE_REQUEST_STATUS_SUCCESSFUL - FW successfully received the + * request + * @IWL_TOF_RANGE_REQUEST_STATUS_BUSY - FW is busy with a previous request, the + * sent request will not be handled + */ +enum iwl_tof_range_request_status { + IWL_TOF_RANGE_REQUEST_STATUS_SUCCESS, + IWL_TOF_RANGE_REQUEST_STATUS_BUSY, +}; + +/** + * enum iwl_tof_entry_status + * + * @IWL_TOF_ENTRY_SUCCESS: successful measurement. + * @IWL_TOF_ENTRY_GENERAL_FAILURE: General failure. + * @IWL_TOF_ENTRY_NO_RESPONSE: Responder didn't reply to the request. + * @IWL_TOF_ENTRY_REQUEST_REJECTED: Responder rejected the request. + * @IWL_TOF_ENTRY_NOT_SCHEDULED: Time event was scheduled but not called yet. + * @IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT: Time event triggered but no + * measurement was completed. + * @IWL_TOF_ENTRY_TARGET_DIFF_CH_CANNOT_CHANGE: No range due inability to switch + * from the primary channel. + * @IWL_TOF_ENTRY_RANGE_NOT_SUPPORTED: Device doesn't support FTM. + * @IWL_TOF_ENTRY_REQUEST_ABORT_UNKNOWN_REASON: Request aborted due to unknown + * reason. + * @IWL_TOF_ENTRY_LOCATION_INVALID_T1_T4_TIME_STAMP: Failure due to invalid + * T1/T4. + * @IWL_TOF_ENTRY_11MC_PROTOCOL_FAILURE: Failure due to invalid FTM frame + * structure. + * @IWL_TOF_ENTRY_REQUEST_CANNOT_SCHED: Request cannot be scheduled. + * @IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE: Responder cannot serve the + * initiator for some period, period supplied in @refusal_period. + * @IWL_TOF_ENTRY_BAD_REQUEST_ARGS: Bad request arguments. + * @IWL_TOF_ENTRY_WIFI_NOT_ENABLED: Wifi not enabled. + * @IWL_TOF_ENTRY_RESPONDER_OVERRIDE_PARAMS: Responder override the original + * parameters within the current session. + */ +enum iwl_tof_entry_status { + IWL_TOF_ENTRY_SUCCESS = 0, + IWL_TOF_ENTRY_GENERAL_FAILURE = 1, + IWL_TOF_ENTRY_NO_RESPONSE = 2, + IWL_TOF_ENTRY_REQUEST_REJECTED = 3, + IWL_TOF_ENTRY_NOT_SCHEDULED = 4, + IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT = 5, + IWL_TOF_ENTRY_TARGET_DIFF_CH_CANNOT_CHANGE = 6, + IWL_TOF_ENTRY_RANGE_NOT_SUPPORTED = 7, + IWL_TOF_ENTRY_REQUEST_ABORT_UNKNOWN_REASON = 8, + IWL_TOF_ENTRY_LOCATION_INVALID_T1_T4_TIME_STAMP = 9, + IWL_TOF_ENTRY_11MC_PROTOCOL_FAILURE = 10, + IWL_TOF_ENTRY_REQUEST_CANNOT_SCHED = 11, + IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE = 12, + IWL_TOF_ENTRY_BAD_REQUEST_ARGS = 13, + IWL_TOF_ENTRY_WIFI_NOT_ENABLED = 14, + IWL_TOF_ENTRY_RESPONDER_OVERRIDE_PARAMS = 15, +}; /* LOCATION_RANGE_RSP_AP_ENTRY_NTFY_API_S_VER_2 */ + +/** + * struct iwl_tof_range_rsp_ap_entry_ntfy_v3 - AP parameters (response) + * @bssid: BSSID of the AP + * @measure_status: current APs measurement status, one of + * &enum iwl_tof_entry_status. + * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz + * @rtt: The Round Trip Time that took for the last measurement for + * current AP [pSec] + * @rtt_variance: The Variance of the RTT values measured for current AP + * @rtt_spread: The Difference between the maximum and the minimum RTT + * values measured for current AP in the current session [pSec] + * @rssi: RSSI as uploaded in the Channel Estimation notification + * @rssi_spread: The Difference between the maximum and the minimum RSSI values + * measured for current AP in the current session + * @reserved: reserved + * @refusal_period: refusal period in case of + * @IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE [sec] + * @range: Measured range [cm] + * @range_variance: Measured range variance [cm] + * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was + * uploaded by the LMAC + * @t2t3_initiator: as calculated from the algo in the initiator + * @t1t4_responder: as calculated from the algo in the responder + * @common_calib: Calib val that was used in for this AP measurement + * @specific_calib: val that was used in for this AP measurement + * @papd_calib_output: The result of the tof papd calibration that was injected + * into the algorithm. + */ +struct iwl_tof_range_rsp_ap_entry_ntfy_v3 { + u8 bssid[ETH_ALEN]; + u8 measure_status; + u8 measure_bw; + __le32 rtt; + __le32 rtt_variance; + __le32 rtt_spread; + s8 rssi; + u8 rssi_spread; + u8 reserved; + u8 refusal_period; + __le32 range; + __le32 range_variance; + __le32 timestamp; + __le32 t2t3_initiator; + __le32 t1t4_responder; + __le16 common_calib; + __le16 specific_calib; + __le32 papd_calib_output; +} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_3 */ + +/** + * struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response) + * @bssid: BSSID of the AP + * @measure_status: current APs measurement status, one of + * &enum iwl_tof_entry_status. + * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz + * @rtt: The Round Trip Time that took for the last measurement for + * current AP [pSec] + * @rtt_variance: The Variance of the RTT values measured for current AP + * @rtt_spread: The Difference between the maximum and the minimum RTT + * values measured for current AP in the current session [pSec] + * @rssi: RSSI as uploaded in the Channel Estimation notification + * @rssi_spread: The Difference between the maximum and the minimum RSSI values + * measured for current AP in the current session + * @last_burst: 1 if no more FTM sessions are scheduled for this responder + * @refusal_period: refusal period in case of + * @IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE [sec] + * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was + * uploaded by the LMAC + * @start_tsf: measurement start time in TSF of the mac specified in the range + * request + * @rx_rate_n_flags: rate and flags of the last FTM frame received from this + * responder + * @tx_rate_n_flags: rate and flags of the last ack sent to this responder + * @t2t3_initiator: as calculated from the algo in the initiator + * @t1t4_responder: as calculated from the algo in the responder + * @common_calib: Calib val that was used in for this AP measurement + * @specific_calib: val that was used in for this AP measurement + * @papd_calib_output: The result of the tof papd calibration that was injected + * into the algorithm. + */ +struct iwl_tof_range_rsp_ap_entry_ntfy { + u8 bssid[ETH_ALEN]; + u8 measure_status; + u8 measure_bw; + __le32 rtt; + __le32 rtt_variance; + __le32 rtt_spread; + s8 rssi; + u8 rssi_spread; + u8 last_burst; + u8 refusal_period; + __le32 timestamp; + __le32 start_tsf; + __le32 rx_rate_n_flags; + __le32 tx_rate_n_flags; + __le32 t2t3_initiator; + __le32 t1t4_responder; + __le16 common_calib; + __le16 specific_calib; + __le32 papd_calib_output; +} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_4 */ + +/** + * enum iwl_tof_response_status - tof response status + * + * @IWL_TOF_RESPONSE_SUCCESS: successful range. + * @IWL_TOF_RESPONSE_TIMEOUT: request aborted due to timeout expiration. + * partial result of ranges done so far is included in the response. + * @IWL_TOF_RESPONSE_ABORTED: Measurement aborted by command. + * @IWL_TOF_RESPONSE_FAILED: Measurement request command failed. + */ +enum iwl_tof_response_status { + IWL_TOF_RESPONSE_SUCCESS = 0, + IWL_TOF_RESPONSE_TIMEOUT = 1, + IWL_TOF_RESPONSE_ABORTED = 4, + IWL_TOF_RESPONSE_FAILED = 5, +}; /* LOCATION_RNG_RSP_STATUS */ + +/** + * struct iwl_tof_range_rsp_ntfy_v5 - ranging response notification + * @request_id: A Token ID of the corresponding Range request + * @request_status: status of current measurement session, one of + * &enum iwl_tof_response_status. + * @last_in_batch: reprot policy (when not all responses are uploaded at once) + * @num_of_aps: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) + * @ap: per-AP data + */ +struct iwl_tof_range_rsp_ntfy_v5 { + u8 request_id; + u8 request_status; + u8 last_in_batch; + u8 num_of_aps; + struct iwl_tof_range_rsp_ap_entry_ntfy_v3 ap[IWL_MVM_TOF_MAX_APS]; +} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_5 */ + +/** + * struct iwl_tof_range_rsp_ntfy - ranging response notification + * @request_id: A Token ID of the corresponding Range request + * @num_of_aps: Number of APs results + * @last_report: 1 if no more FTM sessions are scheduled, 0 otherwise. + * @reserved: reserved + * @ap: per-AP data + */ +struct iwl_tof_range_rsp_ntfy { + u8 request_id; + u8 num_of_aps; + u8 last_report; + u8 reserved; + struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_MVM_TOF_MAX_APS]; +} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_6 */ + +#define IWL_MVM_TOF_MCSI_BUF_SIZE (245) +/** + * struct iwl_tof_mcsi_notif - used for debug + * @token: token ID for the current session + * @role: '0' - initiator, '1' - responder + * @reserved: reserved + * @initiator_bssid: initiator machine + * @responder_bssid: responder machine + * @mcsi_buffer: debug data + */ +struct iwl_tof_mcsi_notif { + u8 token; + u8 role; + __le16 reserved; + u8 initiator_bssid[ETH_ALEN]; + u8 responder_bssid[ETH_ALEN]; + u8 mcsi_buffer[IWL_MVM_TOF_MCSI_BUF_SIZE * 4]; +} __packed; + +/** + * struct iwl_tof_range_abort_cmd + * @request_id: corresponds to a range request + * @reserved: reserved + */ +struct iwl_tof_range_abort_cmd { + u8 request_id; + u8 reserved[3]; +} __packed; + +enum ftm_responder_stats_flags { + FTM_RESP_STAT_NON_ASAP_STARTED = BIT(0), + FTM_RESP_STAT_NON_ASAP_IN_WIN = BIT(1), + FTM_RESP_STAT_NON_ASAP_OUT_WIN = BIT(2), + FTM_RESP_STAT_TRIGGER_DUP = BIT(3), + FTM_RESP_STAT_DUP = BIT(4), + FTM_RESP_STAT_DUP_IN_WIN = BIT(5), + FTM_RESP_STAT_DUP_OUT_WIN = BIT(6), + FTM_RESP_STAT_SCHED_SUCCESS = BIT(7), + FTM_RESP_STAT_ASAP_REQ = BIT(8), + FTM_RESP_STAT_NON_ASAP_REQ = BIT(9), + FTM_RESP_STAT_ASAP_RESP = BIT(10), + FTM_RESP_STAT_NON_ASAP_RESP = BIT(11), + FTM_RESP_STAT_FAIL_INITIATOR_INACTIVE = BIT(12), + FTM_RESP_STAT_FAIL_INITIATOR_OUT_WIN = BIT(13), + FTM_RESP_STAT_FAIL_INITIATOR_RETRY_LIM = BIT(14), + FTM_RESP_STAT_FAIL_NEXT_SERVED = BIT(15), + FTM_RESP_STAT_FAIL_TRIGGER_ERR = BIT(16), + FTM_RESP_STAT_FAIL_GC = BIT(17), + FTM_RESP_STAT_SUCCESS = BIT(18), + FTM_RESP_STAT_INTEL_IE = BIT(19), + FTM_RESP_STAT_INITIATOR_ACTIVE = BIT(20), + FTM_RESP_STAT_MEASUREMENTS_AVAILABLE = BIT(21), + FTM_RESP_STAT_TRIGGER_UNKNOWN = BIT(22), + FTM_RESP_STAT_PROCESS_FAIL = BIT(23), + FTM_RESP_STAT_ACK = BIT(24), + FTM_RESP_STAT_NACK = BIT(25), + FTM_RESP_STAT_INVALID_INITIATOR_ID = BIT(26), + FTM_RESP_STAT_TIMER_MIN_DELTA = BIT(27), + FTM_RESP_STAT_INITIATOR_REMOVED = BIT(28), + FTM_RESP_STAT_INITIATOR_ADDED = BIT(29), + FTM_RESP_STAT_ERR_LIST_FULL = BIT(30), + FTM_RESP_STAT_INITIATOR_SCHED_NOW = BIT(31), +}; /* RESP_IND_E */ + +/** + * struct iwl_ftm_responder_stats - FTM responder statistics + * @addr: initiator address + * @success_ftm: number of successful ftm frames + * @ftm_per_burst: num of FTM frames that were received + * @flags: &enum ftm_responder_stats_flags + * @duration: actual duration of FTM + * @allocated_duration: time that was allocated for this FTM session + * @bw: FTM request bandwidth + * @rate: FTM request rate + * @reserved: for alingment and future use + */ +struct iwl_ftm_responder_stats { + u8 addr[ETH_ALEN]; + u8 success_ftm; + u8 ftm_per_burst; + __le32 flags; + __le32 duration; + __le32 allocated_duration; + u8 bw; + u8 rate; + __le16 reserved; +} __packed; /* TOF_RESPONDER_STATISTICS_NTFY_S_VER_2 */ + +#define IWL_CSI_CHUNK_CTL_NUM_MASK 0x3 +#define IWL_CSI_CHUNK_CTL_IDX_MASK 0xc + +struct iwl_csi_chunk_notification { + __le32 token; + __le16 seq; + __le16 ctl; + __le32 size; + u8 data[]; +} __packed; /* CSI_CHUNKS_HDR_NTFY_API_S_VER_1 */ + +#endif /* __iwl_fw_api_location_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h index ca49db786ed6..6b4d59daacd6 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h @@ -73,6 +73,10 @@ enum iwl_mac_conf_subcmd_ids { * @LOW_LATENCY_CMD: &struct iwl_mac_low_latency_cmd */ LOW_LATENCY_CMD = 0x3, + /** + * @CHANNEL_SWITCH_TIME_EVENT_CMD: &struct iwl_chan_switch_te_cmd + */ + CHANNEL_SWITCH_TIME_EVENT_CMD = 0x4, /** * @PROBE_RESPONSE_DATA_NOTIF: &struct iwl_probe_resp_data_notif */ @@ -135,6 +139,29 @@ struct iwl_channel_switch_noa_notif { __le32 id_and_color; } __packed; /* CHANNEL_SWITCH_START_NTFY_API_S_VER_1 */ +/** + * struct iwl_chan_switch_te_cmd - Channel Switch Time Event command + * + * @mac_id: MAC ID for channel switch + * @action: action to perform, one of FW_CTXT_ACTION_* + * @tsf: beacon tsf + * @cs_count: channel switch count from CSA/eCSA IE + * @cs_delayed_bcn_count: if set to N (!= 0) GO/AP can delay N beacon intervals + * at the new channel after the channel switch, otherwise (N == 0) expect + * beacon right after the channel switch. + * @cs_mode: 1 - quiet, 0 - otherwise + * @reserved: reserved for alignment purposes + */ +struct iwl_chan_switch_te_cmd { + __le32 mac_id; + __le32 action; + __le32 tsf; + u8 cs_count; + u8 cs_delayed_bcn_count; + u8 cs_mode; + u8 reserved; +} __packed; /* MAC_CHANNEL_SWITCH_TIME_EVENT_S_VER_2 */ + /** * struct iwl_mac_low_latency_cmd - set/clear mac to 'low-latency mode' * diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h index 7a3f7b7e6358..941c50477003 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -29,7 +29,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -433,6 +433,28 @@ struct iwl_he_backoff_conf { __le16 mu_time; } __packed; /* AC_QOS_DOT11AX_API_S */ +/** + * enum iwl_he_pkt_ext_constellations - PPE constellation indices + * @IWL_HE_PKT_EXT_BPSK: BPSK + * @IWL_HE_PKT_EXT_QPSK: QPSK + * @IWL_HE_PKT_EXT_16QAM: 16-QAM + * @IWL_HE_PKT_EXT_64QAM: 64-QAM + * @IWL_HE_PKT_EXT_256QAM: 256-QAM + * @IWL_HE_PKT_EXT_1024QAM: 1024-QAM + * @IWL_HE_PKT_EXT_RESERVED: reserved value + * @IWL_HE_PKT_EXT_NONE: not defined + */ +enum iwl_he_pkt_ext_constellations { + IWL_HE_PKT_EXT_BPSK = 0, + IWL_HE_PKT_EXT_QPSK, + IWL_HE_PKT_EXT_16QAM, + IWL_HE_PKT_EXT_64QAM, + IWL_HE_PKT_EXT_256QAM, + IWL_HE_PKT_EXT_1024QAM, + IWL_HE_PKT_EXT_RESERVED, + IWL_HE_PKT_EXT_NONE, +}; + #define MAX_HE_SUPP_NSS 2 #define MAX_HE_CHANNEL_BW_INDX 4 diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h index 45f61c6af14e..b833b80ea3d6 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -95,17 +97,36 @@ #define PHY_VHT_CTRL_POS_4_ABOVE (0x7) /* + * struct iwl_fw_channel_info_v1 - channel information + * * @band: PHY_BAND_* * @channel: channel number * @width: PHY_[VHT|LEGACY]_CHANNEL_* * @ctrl channel: PHY_[VHT|LEGACY]_CTRL_* */ -struct iwl_fw_channel_info { +struct iwl_fw_channel_info_v1 { u8 band; u8 channel; u8 width; u8 ctrl_pos; -} __packed; +} __packed; /* CHANNEL_CONFIG_API_S_VER_1 */ + +/* + * struct iwl_fw_channel_info - channel information + * + * @channel: channel number + * @band: PHY_BAND_* + * @width: PHY_[VHT|LEGACY]_CHANNEL_* + * @ctrl channel: PHY_[VHT|LEGACY]_CTRL_* + * @reserved: for future use and alignment + */ +struct iwl_fw_channel_info { + __le32 channel; + u8 band; + u8 width; + u8 ctrl_pos; + u8 reserved; +} __packed; /*CHANNEL_CONFIG_API_S_VER_2 */ #define PHY_RX_CHAIN_DRIVER_FORCE_POS (0) #define PHY_RX_CHAIN_DRIVER_FORCE_MSK \ @@ -133,6 +154,22 @@ struct iwl_fw_channel_info { #define NUM_PHY_CTX 3 /* TODO: complete missing documentation */ +/** + * struct iwl_phy_context_cmd_tail - tail of iwl_phy_ctx_cmd for alignment with + * various channel structures. + * + * @txchain_info: ??? + * @rxchain_info: ??? + * @acquisition_data: ??? + * @dsp_cfg_flags: set to 0 + */ +struct iwl_phy_context_cmd_tail { + __le32 txchain_info; + __le32 rxchain_info; + __le32 acquisition_data; + __le32 dsp_cfg_flags; +} __packed; + /** * struct iwl_phy_context_cmd - config of the PHY context * ( PHY_CONTEXT_CMD = 0x8 ) @@ -142,10 +179,7 @@ struct iwl_fw_channel_info { * other value means apply new params after X usecs * @tx_param_color: ??? * @ci: channel info - * @txchain_info: ??? - * @rxchain_info: ??? - * @acquisition_data: ??? - * @dsp_cfg_flags: set to 0 + * @tail: command tail */ struct iwl_phy_context_cmd { /* COMMON_INDEX_HDR_API_S_VER_1 */ @@ -155,10 +189,7 @@ struct iwl_phy_context_cmd { __le32 apply_time; __le32 tx_param_color; struct iwl_fw_channel_info ci; - __le32 txchain_info; - __le32 rxchain_info; - __le32 acquisition_data; - __le32 dsp_cfg_flags; + struct iwl_phy_context_cmd_tail tail; } __packed; /* PHY_CONTEXT_CMD_API_VER_1 */ #endif /* __iwl_fw_api_phy_ctxt_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h index 286a22da232d..01f003c6cff9 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h @@ -8,7 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 Intel Corporation + * Copyright (C) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 Intel Corporation + * Copyright (C) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -200,9 +200,16 @@ struct iwl_powertable_cmd { * @DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK: * '1' Allow to save power by turning off * receiver and transmitter. '0' - does not allow. + * @DEVICE_POWER_FLAGS_ALLOW_MEM_RETENTION_MSK: + * Device Retention indication, '1' indicate retention is enabled. + * @DEVICE_POWER_FLAGS_32K_CLK_VALID_MSK: + * 32Khz external slow clock valid indication, '1' indicate cloack is + * valid. */ enum iwl_device_power_flags { - DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0), + DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0), + DEVICE_POWER_FLAGS_ALLOW_MEM_RETENTION_MSK = BIT(1), + DEVICE_POWER_FLAGS_32K_CLK_VALID_MSK = BIT(12), }; /** @@ -470,6 +477,13 @@ struct iwl_geo_tx_power_profiles_resp { * @ba_escape_timer: Fully receive and parse beacon if no beacons were passed * for a longer period of time then this escape-timeout. Units: Beacons. * @ba_enable_beacon_abort: 1, beacon abort is enabled; 0, disabled. + * @bf_threshold_absolute_low: See below. + * @bf_threshold_absolute_high: Send Beacon to driver if Energy value calculated + * for this beacon crossed this absolute threshold. For the 'Increase' + * direction the bf_energy_absolute_low[i] is used. For the 'Decrease' + * direction the bf_energy_absolute_high[i] is used. Zero value means + * that this specific threshold is ignored for beacon filtering, and + * beacon will not be forced to be sent to driver due to this setting. */ struct iwl_beacon_filter_cmd { __le32 bf_energy_delta; @@ -483,7 +497,9 @@ struct iwl_beacon_filter_cmd { __le32 bf_escape_timer; __le32 ba_escape_timer; __le32 ba_enable_beacon_abort; -} __packed; + __le32 bf_threshold_absolute_low[2]; + __le32 bf_threshold_absolute_high[2]; +} __packed; /* BEACON_FILTER_CONFIG_API_S_VER_4 */ /* Beacon filtering and beacon abort */ #define IWL_BF_ENERGY_DELTA_DEFAULT 5 diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h index 0791a854fc8f..6e8224ce8906 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h @@ -209,8 +209,6 @@ enum iwl_rx_phy_flags { * @RX_MPDU_RES_STATUS_CSUM_OK: checksum found no errors * @RX_MPDU_RES_STATUS_STA_ID_MSK: station ID mask * @RX_MDPU_RES_STATUS_STA_ID_SHIFT: station ID bit shift - * @RX_MPDU_RES_STATUS_FILTERING_MSK: filter status - * @RX_MPDU_RES_STATUS2_FILTERING_MSK: filter status 2 */ enum iwl_mvm_rx_status { RX_MPDU_RES_STATUS_CRC_OK = BIT(0), @@ -238,8 +236,6 @@ enum iwl_mvm_rx_status { RX_MPDU_RES_STATUS_CSUM_OK = BIT(17), RX_MDPU_RES_STATUS_STA_ID_SHIFT = 24, RX_MPDU_RES_STATUS_STA_ID_MSK = 0x1f << RX_MDPU_RES_STATUS_STA_ID_SHIFT, - RX_MPDU_RES_STATUS_FILTERING_MSK = (0xc00000), - RX_MPDU_RES_STATUS2_FILTERING_MSK = (0xc0000000), }; /* 9000 series API */ @@ -337,6 +333,8 @@ enum iwl_rx_mpdu_phy_info { IWL_RX_MPDU_PHY_AMPDU = BIT(5), IWL_RX_MPDU_PHY_AMPDU_TOGGLE = BIT(6), IWL_RX_MPDU_PHY_SHORT_PREAMBLE = BIT(7), + /* short preamble is only for CCK, for non-CCK overridden by this */ + IWL_RX_MPDU_PHY_NCCK_ADDTL_NTFY = BIT(7), IWL_RX_MPDU_PHY_TSF_OVERLOAD = BIT(8), }; @@ -723,6 +721,9 @@ struct iwl_rx_mpdu_desc { #define RX_NO_DATA_FRAME_TIME_POS 0 #define RX_NO_DATA_FRAME_TIME_MSK (0xfffff << RX_NO_DATA_FRAME_TIME_POS) +#define RX_NO_DATA_RX_VEC0_HE_NSTS_MSK 0x03800000 +#define RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK 0x38000000 + /** * struct iwl_rx_no_data - RX no data descriptor * @info: 7:0 frame type, 15:8 RX error type @@ -743,7 +744,7 @@ struct iwl_rx_no_data { __le32 fr_time; __le32 rate; __le32 phy_info[2]; - __le32 rx_vec[3]; + __le32 rx_vec[2]; } __packed; /* RX_NO_DATA_NTFY_API_S_VER_1 */ /** diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h index 18741889ec30..890a939c463d 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h @@ -8,7 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -434,7 +434,7 @@ struct iwl_periodic_scan_complete { /* The maximum of either of these cannot exceed 8, because we use an * 8-bit mask (see IWL_MVM_SCAN_MASK in mvm.h). */ -#define IWL_MVM_MAX_UMAC_SCANS 8 +#define IWL_MVM_MAX_UMAC_SCANS 4 #define IWL_MVM_MAX_LMAC_SCANS 1 enum scan_config_flags { diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h index 53cb622aa9ab..318843138490 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -29,6 +30,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright (C) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -363,14 +365,7 @@ struct mvm_statistics_general_v8 { u8 reserved[4 - (NUM_MAC_INDEX % 4)]; } __packed; /* STATISTICS_GENERAL_API_S_VER_8 */ -struct mvm_statistics_general_cdb_v9 { - struct mvm_statistics_general_common_v19 common; - __le32 beacon_counter[NUM_MAC_INDEX_CDB]; - u8 beacon_average_energy[NUM_MAC_INDEX_CDB]; - u8 reserved[4 - (NUM_MAC_INDEX_CDB % 4)]; -} __packed; /* STATISTICS_GENERAL_API_S_VER_9 */ - -struct mvm_statistics_general_cdb { +struct mvm_statistics_general { struct mvm_statistics_general_common common; __le32 beacon_counter[MAC_INDEX_AUX]; u8 beacon_average_energy[MAC_INDEX_AUX]; @@ -435,11 +430,11 @@ struct iwl_notif_statistics_v11 { struct mvm_statistics_load_v1 load_stats; } __packed; /* STATISTICS_NTFY_API_S_VER_11 */ -struct iwl_notif_statistics_cdb { +struct iwl_notif_statistics { __le32 flag; struct mvm_statistics_rx rx; struct mvm_statistics_tx tx; - struct mvm_statistics_general_cdb general; + struct mvm_statistics_general general; struct mvm_statistics_load load_stats; } __packed; /* STATISTICS_NTFY_API_S_VER_13 */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h index 7c6c2462d0e8..b089285ac466 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -110,6 +112,17 @@ struct iwl_tdls_channel_switch_frame { u8 data[IWL_TDLS_CH_SW_FRAME_MAX_SIZE]; } __packed; /* TDLS_STA_CHANNEL_SWITCH_FRAME_API_S_VER_1 */ +/** + * struct iwl_tdls_channel_switch_cmd_tail - tail of iwl_tdls_channel_switch_cmd + * + * @timing: timing related data for command + * @frame: channel-switch request/response template, depending to switch_type + */ +struct iwl_tdls_channel_switch_cmd_tail { + struct iwl_tdls_channel_switch_timing timing; + struct iwl_tdls_channel_switch_frame frame; +} __packed; + /** * struct iwl_tdls_channel_switch_cmd - TDLS channel switch command * @@ -119,15 +132,13 @@ struct iwl_tdls_channel_switch_frame { * @switch_type: see &enum iwl_tdls_channel_switch_type * @peer_sta_id: station id of TDLS peer * @ci: channel we switch to - * @timing: timing related data for command - * @frame: channel-switch request/response template, depending to switch_type + * @tail: command tail */ struct iwl_tdls_channel_switch_cmd { u8 switch_type; __le32 peer_sta_id; struct iwl_fw_channel_info ci; - struct iwl_tdls_channel_switch_timing timing; - struct iwl_tdls_channel_switch_frame frame; + struct iwl_tdls_channel_switch_cmd_tail tail; } __packed; /* TDLS_STA_CHANNEL_SWITCH_CMD_API_S_VER_1 */ /** diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h index f824bebceb06..4621ef93a2cf 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -317,6 +319,25 @@ struct iwl_time_event_notif { __le32 status; } __packed; /* MAC_TIME_EVENT_NTFY_API_S_VER_1 */ +/* + * struct iwl_hs20_roc_req_tail - tail of iwl_hs20_roc_req + * + * @node_addr: Our MAC Address + * @reserved: reserved for alignment + * @apply_time: GP2 value to start (should always be the current GP2 value) + * @apply_time_max_delay: Maximum apply time delay value in TU. Defines max + * time by which start of the event is allowed to be postponed. + * @duration: event duration in TU To calculate event duration: + * timeEventDuration = min(duration, remainingQuota) + */ +struct iwl_hs20_roc_req_tail { + u8 node_addr[ETH_ALEN]; + __le16 reserved; + __le32 apply_time; + __le32 apply_time_max_delay; + __le32 duration; +} __packed; + /* * Aux ROC command * @@ -336,13 +357,6 @@ struct iwl_time_event_notif { * @sta_id_and_color: station id and color, resumed during "Remain On Channel" * activity. * @channel_info: channel info - * @node_addr: Our MAC Address - * @reserved: reserved for alignment - * @apply_time: GP2 value to start (should always be the current GP2 value) - * @apply_time_max_delay: Maximum apply time delay value in TU. Defines max - * time by which start of the event is allowed to be postponed. - * @duration: event duration in TU To calculate event duration: - * timeEventDuration = min(duration, remainingQuota) */ struct iwl_hs20_roc_req { /* COMMON_INDEX_HDR_API_S_VER_1 hdr */ @@ -351,11 +365,7 @@ struct iwl_hs20_roc_req { __le32 event_unique_id; __le32 sta_id_and_color; struct iwl_fw_channel_info channel_info; - u8 node_addr[ETH_ALEN]; - __le16 reserved; - __le32 apply_time; - __le32 apply_time_max_delay; - __le32 duration; + struct iwl_hs20_roc_req_tail tail; } __packed; /* HOT_SPOT_CMD_API_S_VER_1 */ /* diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tof.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tof.h deleted file mode 100644 index 7328a1606146..000000000000 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/tof.h +++ /dev/null @@ -1,393 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#ifndef __iwl_fw_api_tof_h__ -#define __iwl_fw_api_tof_h__ - -/* ToF sub-group command IDs */ -enum iwl_mvm_tof_sub_grp_ids { - TOF_RANGE_REQ_CMD = 0x1, - TOF_CONFIG_CMD = 0x2, - TOF_RANGE_ABORT_CMD = 0x3, - TOF_RANGE_REQ_EXT_CMD = 0x4, - TOF_RESPONDER_CONFIG_CMD = 0x5, - TOF_NW_INITIATED_RES_SEND_CMD = 0x6, - TOF_NEIGHBOR_REPORT_REQ_CMD = 0x7, - TOF_NEIGHBOR_REPORT_RSP_NOTIF = 0xFC, - TOF_NW_INITIATED_REQ_RCVD_NOTIF = 0xFD, - TOF_RANGE_RESPONSE_NOTIF = 0xFE, - TOF_MCSI_DEBUG_NOTIF = 0xFB, -}; - -/** - * struct iwl_tof_config_cmd - ToF configuration - * @tof_disabled: 0 enabled, 1 - disabled - * @one_sided_disabled: 0 enabled, 1 - disabled - * @is_debug_mode: 1 debug mode, 0 - otherwise - * @is_buf_required: 1 channel estimation buffer required, 0 - otherwise - */ -struct iwl_tof_config_cmd { - __le32 sub_grp_cmd_id; - u8 tof_disabled; - u8 one_sided_disabled; - u8 is_debug_mode; - u8 is_buf_required; -} __packed; - -/** - * struct iwl_tof_responder_config_cmd - ToF AP mode (for debug) - * @burst_period: future use: (currently hard coded in the LMAC) - * The interval between two sequential bursts. - * @min_delta_ftm: future use: (currently hard coded in the LMAC) - * The minimum delay between two sequential FTM Responses - * in the same burst. - * @burst_duration: future use: (currently hard coded in the LMAC) - * The total time for all FTMs handshake in the same burst. - * Affect the time events duration in the LMAC. - * @num_of_burst_exp: future use: (currently hard coded in the LMAC) - * The number of bursts for the current ToF request. Affect - * the number of events allocations in the current iteration. - * @get_ch_est: for xVT only, NA for driver - * @abort_responder: when set to '1' - Responder will terminate its activity - * (all other fields in the command are ignored) - * @recv_sta_req_params: 1 - Responder will ignore the other Responder's - * params and use the recomended Initiator params. - * 0 - otherwise - * @channel_num: current AP Channel - * @bandwidth: current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz - * @rate: current AP rate - * @ctrl_ch_position: coding of the control channel position relative to - * the center frequency: - * - * 40 MHz - * 0 below center, 1 above center - * - * 80 MHz - * bits [0..1] - * * 0 the near 20MHz to the center, - * * 1 the far 20MHz to the center - * bit[2] - * as above 40MHz - * @ftm_per_burst: FTMs per Burst - * @ftm_resp_ts_avail: '0' - we don't measure over the Initial FTM Response, - * '1' - we measure over the Initial FTM Response - * @asap_mode: ASAP / Non ASAP mode for the current WLS station - * @sta_id: index of the AP STA when in AP mode - * @tsf_timer_offset_msecs: The dictated time offset (mSec) from the AP's TSF - * @toa_offset: Artificial addition [0.1nsec] for the ToA - to be used for debug - * purposes, simulating station movement by adding various values - * to this field - * @bssid: Current AP BSSID - */ -struct iwl_tof_responder_config_cmd { - __le32 sub_grp_cmd_id; - __le16 burst_period; - u8 min_delta_ftm; - u8 burst_duration; - u8 num_of_burst_exp; - u8 get_ch_est; - u8 abort_responder; - u8 recv_sta_req_params; - u8 channel_num; - u8 bandwidth; - u8 rate; - u8 ctrl_ch_position; - u8 ftm_per_burst; - u8 ftm_resp_ts_avail; - u8 asap_mode; - u8 sta_id; - __le16 tsf_timer_offset_msecs; - __le16 toa_offset; - u8 bssid[ETH_ALEN]; -} __packed; - -/** - * struct iwl_tof_range_request_ext_cmd - extended range req for WLS - * @tsf_timer_offset_msec: the recommended time offset (mSec) from the AP's TSF - * @reserved: reserved - * @min_delta_ftm: Minimal time between two consecutive measurements, - * in units of 100us. 0 means no preference by station - * @ftm_format_and_bw20M: FTM Channel Spacing/Format for 20MHz: recommended - * value be sent to the AP - * @ftm_format_and_bw40M: FTM Channel Spacing/Format for 40MHz: recommended - * value to be sent to the AP - * @ftm_format_and_bw80M: FTM Channel Spacing/Format for 80MHz: recommended - * value to be sent to the AP - */ -struct iwl_tof_range_req_ext_cmd { - __le32 sub_grp_cmd_id; - __le16 tsf_timer_offset_msec; - __le16 reserved; - u8 min_delta_ftm; - u8 ftm_format_and_bw20M; - u8 ftm_format_and_bw40M; - u8 ftm_format_and_bw80M; -} __packed; - -#define IWL_MVM_TOF_MAX_APS 21 - -/** - * struct iwl_tof_range_req_ap_entry - AP configuration parameters - * @channel_num: Current AP Channel - * @bandwidth: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz - * @tsf_delta_direction: TSF relatively to the subject AP - * @ctrl_ch_position: Coding of the control channel position relative to the - * center frequency. - * 40MHz 0 below center, 1 above center - * 80MHz bits [0..1]: 0 the near 20MHz to the center, - * 1 the far 20MHz to the center - * bit[2] as above 40MHz - * @bssid: AP's bss id - * @measure_type: Measurement type: 0 - two sided, 1 - One sided - * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of the - * number of measurement iterations (min 2^0 = 1, max 2^14) - * @burst_period: Recommended value to be sent to the AP. Measurement - * periodicity In units of 100ms. ignored if num_of_bursts = 0 - * @samples_per_burst: 2-sided: the number of FTMs pairs in single Burst (1-31) - * 1-sided: how many rts/cts pairs should be used per burst. - * @retries_per_sample: Max number of retries that the LMAC should send - * in case of no replies by the AP. - * @tsf_delta: TSF Delta in units of microseconds. - * The difference between the AP TSF and the device local clock. - * @location_req: Location Request Bit[0] LCI should be sent in the FTMR - * Bit[1] Civic should be sent in the FTMR - * @asap_mode: 0 - non asap mode, 1 - asap mode (not relevant for one sided) - * @enable_dyn_ack: Enable Dynamic ACK BW. - * 0 Initiator interact with regular AP - * 1 Initiator interact with Responder machine: need to send the - * Initiator Acks with HT 40MHz / 80MHz, since the Responder should - * use it for its ch est measurement (this flag will be set when we - * configure the opposite machine to be Responder). - * @rssi: Last received value - * leagal values: -128-0 (0x7f). above 0x0 indicating an invalid value. - */ -struct iwl_tof_range_req_ap_entry { - u8 channel_num; - u8 bandwidth; - u8 tsf_delta_direction; - u8 ctrl_ch_position; - u8 bssid[ETH_ALEN]; - u8 measure_type; - u8 num_of_bursts; - __le16 burst_period; - u8 samples_per_burst; - u8 retries_per_sample; - __le32 tsf_delta; - u8 location_req; - u8 asap_mode; - u8 enable_dyn_ack; - s8 rssi; -} __packed; - -/** - * enum iwl_tof_response_mode - * @IWL_MVM_TOF_RESPOSE_ASAP: report each AP measurement separately as soon as - * possible (not supported for this release) - * @IWL_MVM_TOF_RESPOSE_TIMEOUT: report all AP measurements as a batch upon - * timeout expiration - * @IWL_MVM_TOF_RESPOSE_COMPLETE: report all AP measurements as a batch at the - * earlier of: measurements completion / timeout - * expiration. - */ -enum iwl_tof_response_mode { - IWL_MVM_TOF_RESPOSE_ASAP = 1, - IWL_MVM_TOF_RESPOSE_TIMEOUT, - IWL_MVM_TOF_RESPOSE_COMPLETE, -}; - -/** - * struct iwl_tof_range_req_cmd - start measurement cmd - * @request_id: A Token incremented per request. The same Token will be - * sent back in the range response - * @initiator: 0- NW initiated, 1 - Client Initiated - * @one_sided_los_disable: '0'- run ML-Algo for both ToF/OneSided, - * '1' - run ML-Algo for ToF only - * @req_timeout: Requested timeout of the response in units of 100ms. - * This is equivalent to the session time configured to the - * LMAC in Initiator Request - * @report_policy: Supported partially for this release: For current release - - * the range report will be uploaded as a batch when ready or - * when the session is done (successfully / partially). - * one of iwl_tof_response_mode. - * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) - * @macaddr_random: '0' Use default source MAC address (i.e. p2_p), - * '1' Use MAC Address randomization according to the below - * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template. - * Bits set to 1 shall be randomized by the UMAC - * @ap: per-AP request data - */ -struct iwl_tof_range_req_cmd { - __le32 sub_grp_cmd_id; - u8 request_id; - u8 initiator; - u8 one_sided_los_disable; - u8 req_timeout; - u8 report_policy; - u8 los_det_disable; - u8 num_of_ap; - u8 macaddr_random; - u8 macaddr_template[ETH_ALEN]; - u8 macaddr_mask[ETH_ALEN]; - struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS]; -} __packed; - -/** - * struct iwl_tof_gen_resp_cmd - generic ToF response - */ -struct iwl_tof_gen_resp_cmd { - __le32 sub_grp_cmd_id; - u8 data[]; -} __packed; - -/** - * struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response) - * @bssid: BSSID of the AP - * @measure_status: current APs measurement status, one of - * &enum iwl_tof_entry_status. - * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz - * @rtt: The Round Trip Time that took for the last measurement for - * current AP [nSec] - * @rtt_variance: The Variance of the RTT values measured for current AP - * @rtt_spread: The Difference between the maximum and the minimum RTT - * values measured for current AP in the current session [nsec] - * @rssi: RSSI as uploaded in the Channel Estimation notification - * @rssi_spread: The Difference between the maximum and the minimum RSSI values - * measured for current AP in the current session - * @reserved: reserved - * @range: Measured range [cm] - * @range_variance: Measured range variance [cm] - * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was - * uploaded by the LMAC - */ -struct iwl_tof_range_rsp_ap_entry_ntfy { - u8 bssid[ETH_ALEN]; - u8 measure_status; - u8 measure_bw; - __le32 rtt; - __le32 rtt_variance; - __le32 rtt_spread; - s8 rssi; - u8 rssi_spread; - __le16 reserved; - __le32 range; - __le32 range_variance; - __le32 timestamp; -} __packed; - -/** - * struct iwl_tof_range_rsp_ntfy - - * @request_id: A Token ID of the corresponding Range request - * @request_status: status of current measurement session - * @last_in_batch: reprot policy (when not all responses are uploaded at once) - * @num_of_aps: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) - * @ap: per-AP data - */ -struct iwl_tof_range_rsp_ntfy { - u8 request_id; - u8 request_status; - u8 last_in_batch; - u8 num_of_aps; - struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_MVM_TOF_MAX_APS]; -} __packed; - -#define IWL_MVM_TOF_MCSI_BUF_SIZE (245) -/** - * struct iwl_tof_mcsi_notif - used for debug - * @token: token ID for the current session - * @role: '0' - initiator, '1' - responder - * @reserved: reserved - * @initiator_bssid: initiator machine - * @responder_bssid: responder machine - * @mcsi_buffer: debug data - */ -struct iwl_tof_mcsi_notif { - u8 token; - u8 role; - __le16 reserved; - u8 initiator_bssid[ETH_ALEN]; - u8 responder_bssid[ETH_ALEN]; - u8 mcsi_buffer[IWL_MVM_TOF_MCSI_BUF_SIZE * 4]; -} __packed; - -/** - * struct iwl_tof_neighbor_report_notif - * @bssid: BSSID of the AP which sent the report - * @request_token: same token as the corresponding request - * @status: - * @report_ie_len: the length of the response frame starting from the Element ID - * @data: the IEs - */ -struct iwl_tof_neighbor_report { - u8 bssid[ETH_ALEN]; - u8 request_token; - u8 status; - __le16 report_ie_len; - u8 data[]; -} __packed; - -/** - * struct iwl_tof_range_abort_cmd - * @request_id: corresponds to a range request - * @reserved: reserved - */ -struct iwl_tof_range_abort_cmd { - __le32 sub_grp_cmd_id; - u8 request_id; - u8 reserved[3]; -} __packed; - -#endif diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h index 358bdf051e83..8511e735c374 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h @@ -847,19 +847,33 @@ struct iwl_beacon_notif { } __packed; /** - * struct iwl_extended_beacon_notif - notifies about beacon transmission + * struct iwl_extended_beacon_notif_v5 - notifies about beacon transmission * @beacon_notify_hdr: tx response command associated with the beacon * @tsf: last beacon tsf * @ibss_mgr_status: whether IBSS is manager * @gp2: last beacon time in gp2 */ -struct iwl_extended_beacon_notif { +struct iwl_extended_beacon_notif_v5 { struct iwl_mvm_tx_resp beacon_notify_hdr; __le64 tsf; __le32 ibss_mgr_status; __le32 gp2; } __packed; /* BEACON_NTFY_API_S_VER_5 */ +/** + * struct iwl_extended_beacon_notif - notifies about beacon transmission + * @status: the status of the Tx response of the beacon + * @tsf: last beacon tsf + * @ibss_mgr_status: whether IBSS is manager + * @gp2: last beacon time in gp2 + */ +struct iwl_extended_beacon_notif { + __le32 status; + __le64 tsf; + __le32 ibss_mgr_status; + __le32 gp2; +} __packed; /* BEACON_NTFY_API_S_VER_6_ */ + /** * enum iwl_dump_control - dump (flush) control flags * @DUMP_TX_FIFO_FLUSH: Dump MSDUs until the the FIFO is empty diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index 2a19b178c5e8..f119c49cd39c 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -8,7 +8,7 @@ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -242,7 +242,8 @@ static void iwl_fw_dump_rxf(struct iwl_fw_runtime *fwrt, cfg->lmac[0].rxfifo1_size, 0, 0); /* Pull RXF2 */ iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size, - RXF_DIFF_FROM_PREV, 1); + RXF_DIFF_FROM_PREV + + fwrt->trans->cfg->umac_prph_offset, 1); /* Pull LMAC2 RXF1 */ if (fwrt->smem_cfg.num_lmacs > 1) iwl_fwrt_dump_rxf(fwrt, dump_data, @@ -469,6 +470,93 @@ static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = { { .start = 0x00a02400, .end = 0x00a02758 }, }; +static const struct iwl_prph_range iwl_prph_dump_addr_22000[] = { + { .start = 0x00a00000, .end = 0x00a00000 }, + { .start = 0x00a0000c, .end = 0x00a00024 }, + { .start = 0x00a0002c, .end = 0x00a00034 }, + { .start = 0x00a0003c, .end = 0x00a0003c }, + { .start = 0x00a00410, .end = 0x00a00418 }, + { .start = 0x00a00420, .end = 0x00a00420 }, + { .start = 0x00a00428, .end = 0x00a00428 }, + { .start = 0x00a00430, .end = 0x00a0043c }, + { .start = 0x00a00444, .end = 0x00a00444 }, + { .start = 0x00a00840, .end = 0x00a00840 }, + { .start = 0x00a00850, .end = 0x00a00858 }, + { .start = 0x00a01004, .end = 0x00a01008 }, + { .start = 0x00a01010, .end = 0x00a01010 }, + { .start = 0x00a01018, .end = 0x00a01018 }, + { .start = 0x00a01024, .end = 0x00a01024 }, + { .start = 0x00a0102c, .end = 0x00a01034 }, + { .start = 0x00a0103c, .end = 0x00a01040 }, + { .start = 0x00a01048, .end = 0x00a01050 }, + { .start = 0x00a01058, .end = 0x00a01058 }, + { .start = 0x00a01060, .end = 0x00a01070 }, + { .start = 0x00a0108c, .end = 0x00a0108c }, + { .start = 0x00a01c20, .end = 0x00a01c28 }, + { .start = 0x00a01d10, .end = 0x00a01d10 }, + { .start = 0x00a01e28, .end = 0x00a01e2c }, + { .start = 0x00a01e60, .end = 0x00a01e60 }, + { .start = 0x00a01e80, .end = 0x00a01e80 }, + { .start = 0x00a01ea0, .end = 0x00a01ea0 }, + { .start = 0x00a02000, .end = 0x00a0201c }, + { .start = 0x00a02024, .end = 0x00a02024 }, + { .start = 0x00a02040, .end = 0x00a02048 }, + { .start = 0x00a020c0, .end = 0x00a020e0 }, + { .start = 0x00a02400, .end = 0x00a02404 }, + { .start = 0x00a0240c, .end = 0x00a02414 }, + { .start = 0x00a0241c, .end = 0x00a0243c }, + { .start = 0x00a02448, .end = 0x00a024bc }, + { .start = 0x00a024c4, .end = 0x00a024cc }, + { .start = 0x00a02508, .end = 0x00a02508 }, + { .start = 0x00a02510, .end = 0x00a02514 }, + { .start = 0x00a0251c, .end = 0x00a0251c }, + { .start = 0x00a0252c, .end = 0x00a0255c }, + { .start = 0x00a02564, .end = 0x00a025a0 }, + { .start = 0x00a025a8, .end = 0x00a025b4 }, + { .start = 0x00a025c0, .end = 0x00a025c0 }, + { .start = 0x00a025e8, .end = 0x00a025f4 }, + { .start = 0x00a02c08, .end = 0x00a02c18 }, + { .start = 0x00a02c2c, .end = 0x00a02c38 }, + { .start = 0x00a02c68, .end = 0x00a02c78 }, + { .start = 0x00a03000, .end = 0x00a03000 }, + { .start = 0x00a03010, .end = 0x00a03014 }, + { .start = 0x00a0301c, .end = 0x00a0302c }, + { .start = 0x00a03034, .end = 0x00a03038 }, + { .start = 0x00a03040, .end = 0x00a03044 }, + { .start = 0x00a03060, .end = 0x00a03068 }, + { .start = 0x00a03070, .end = 0x00a03070 }, + { .start = 0x00a0307c, .end = 0x00a03084 }, + { .start = 0x00a0308c, .end = 0x00a03090 }, + { .start = 0x00a03098, .end = 0x00a03098 }, + { .start = 0x00a030a0, .end = 0x00a030a0 }, + { .start = 0x00a030a8, .end = 0x00a030b4 }, + { .start = 0x00a030bc, .end = 0x00a030c0 }, + { .start = 0x00a030c8, .end = 0x00a030f4 }, + { .start = 0x00a03100, .end = 0x00a0312c }, + { .start = 0x00a03c00, .end = 0x00a03c5c }, + { .start = 0x00a04400, .end = 0x00a04454 }, + { .start = 0x00a04460, .end = 0x00a04474 }, + { .start = 0x00a044c0, .end = 0x00a044ec }, + { .start = 0x00a04500, .end = 0x00a04504 }, + { .start = 0x00a04510, .end = 0x00a04538 }, + { .start = 0x00a04540, .end = 0x00a04548 }, + { .start = 0x00a04560, .end = 0x00a04560 }, + { .start = 0x00a04570, .end = 0x00a0457c }, + { .start = 0x00a04590, .end = 0x00a04590 }, + { .start = 0x00a04598, .end = 0x00a04598 }, + { .start = 0x00a045c0, .end = 0x00a045f4 }, + { .start = 0x00a0c000, .end = 0x00a0c018 }, + { .start = 0x00a0c020, .end = 0x00a0c028 }, + { .start = 0x00a0c038, .end = 0x00a0c094 }, + { .start = 0x00a0c0c0, .end = 0x00a0c104 }, + { .start = 0x00a0c10c, .end = 0x00a0c118 }, + { .start = 0x00a0c150, .end = 0x00a0c174 }, + { .start = 0x00a0c17c, .end = 0x00a0c188 }, + { .start = 0x00a0c190, .end = 0x00a0c198 }, + { .start = 0x00a0c1a0, .end = 0x00a0c1a8 }, + { .start = 0x00a0c1b0, .end = 0x00a0c1b8 }, +}; + static void iwl_read_prph_block(struct iwl_trans *trans, u32 start, u32 len_bytes, __le32 *data) { @@ -478,15 +566,20 @@ static void iwl_read_prph_block(struct iwl_trans *trans, u32 start, *data++ = cpu_to_le32(iwl_read_prph_no_grab(trans, start + i)); } -static void iwl_dump_prph(struct iwl_trans *trans, - struct iwl_fw_error_dump_data **data, +static void iwl_dump_prph(struct iwl_fw_runtime *fwrt, const struct iwl_prph_range *iwl_prph_dump_addr, - u32 range_len) + u32 range_len, void *ptr) { struct iwl_fw_error_dump_prph *prph; + struct iwl_trans *trans = fwrt->trans; + struct iwl_fw_error_dump_data **data = + (struct iwl_fw_error_dump_data **)ptr; unsigned long flags; u32 i; + if (!data) + return; + IWL_DEBUG_INFO(trans, "WRT PRPH dump\n"); if (!iwl_trans_grab_nic_access(trans, &flags)) @@ -552,37 +645,49 @@ static struct scatterlist *alloc_sgtable(int size) return table; } -static int iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt) +static void iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt, + const struct iwl_prph_range *iwl_prph_dump_addr, + u32 range_len, void *ptr) { - u32 prph_len = 0; - int i; + u32 *prph_len = (u32 *)ptr; + int i, num_bytes_in_chunk; - for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm); - i++) { + if (!prph_len) + return; + + for (i = 0; i < range_len; i++) { /* The range includes both boundaries */ - int num_bytes_in_chunk = - iwl_prph_dump_addr_comm[i].end - - iwl_prph_dump_addr_comm[i].start + 4; + num_bytes_in_chunk = + iwl_prph_dump_addr[i].end - + iwl_prph_dump_addr[i].start + 4; - prph_len += sizeof(struct iwl_fw_error_dump_data) + + *prph_len += sizeof(struct iwl_fw_error_dump_data) + sizeof(struct iwl_fw_error_dump_prph) + num_bytes_in_chunk; } +} + +static void iwl_fw_prph_handler(struct iwl_fw_runtime *fwrt, void *ptr, + void (*handler)(struct iwl_fw_runtime *, + const struct iwl_prph_range *, + u32, void *)) +{ + u32 range_len; - if (fwrt->trans->cfg->mq_rx_supported) { - for (i = 0; i < - ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) { - /* The range includes both boundaries */ - int num_bytes_in_chunk = - iwl_prph_dump_addr_9000[i].end - - iwl_prph_dump_addr_9000[i].start + 4; - - prph_len += sizeof(struct iwl_fw_error_dump_data) + - sizeof(struct iwl_fw_error_dump_prph) + - num_bytes_in_chunk; + if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + /* TODO */ + } else if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) { + range_len = ARRAY_SIZE(iwl_prph_dump_addr_22000); + handler(fwrt, iwl_prph_dump_addr_22000, range_len, ptr); + } else { + range_len = ARRAY_SIZE(iwl_prph_dump_addr_comm); + handler(fwrt, iwl_prph_dump_addr_comm, range_len, ptr); + + if (fwrt->trans->cfg->mq_rx_supported) { + range_len = ARRAY_SIZE(iwl_prph_dump_addr_9000); + handler(fwrt, iwl_prph_dump_addr_9000, range_len, ptr); } } - return prph_len; } static void iwl_fw_dump_mem(struct iwl_fw_runtime *fwrt, @@ -605,28 +710,6 @@ static void iwl_fw_dump_mem(struct iwl_fw_runtime *fwrt, IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n", dump_mem->type); } -static void iwl_fw_dump_named_mem(struct iwl_fw_runtime *fwrt, - struct iwl_fw_error_dump_data **dump_data, - u32 len, u32 ofs, u8 *name, u8 name_len) -{ - struct iwl_fw_error_dump_named_mem *dump_mem; - - if (!len) - return; - - (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); - (*dump_data)->len = cpu_to_le32(len + sizeof(*dump_mem)); - dump_mem = (void *)(*dump_data)->data; - dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_NAMED_MEM); - dump_mem->offset = cpu_to_le32(ofs); - dump_mem->name_len = name_len; - memcpy(dump_mem->name, name, name_len); - iwl_trans_read_mem_bytes(fwrt->trans, ofs, dump_mem->data, len); - *dump_data = iwl_fw_error_next_data(*dump_data); - - IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n", dump_mem->type); -} - #define ADD_LEN(len, item_len, const_len) \ do {size_t item = item_len; len += (!!item) * const_len + item; } \ while (0) @@ -646,6 +729,9 @@ static int iwl_fw_rxf_len(struct iwl_fw_runtime *fwrt, ADD_LEN(fifo_len, mem_cfg->rxfifo2_size, hdr_len); /* Count RXF1 sizes */ + if (WARN_ON(mem_cfg->num_lmacs > MAX_NUM_LMAC)) + mem_cfg->num_lmacs = MAX_NUM_LMAC; + for (i = 0; i < mem_cfg->num_lmacs; i++) ADD_LEN(fifo_len, mem_cfg->lmac[i].rxfifo1_size, hdr_len); @@ -664,6 +750,9 @@ static int iwl_fw_txf_len(struct iwl_fw_runtime *fwrt, goto dump_internal_txf; /* Count TXF sizes */ + if (WARN_ON(mem_cfg->num_lmacs > MAX_NUM_LMAC)) + mem_cfg->num_lmacs = MAX_NUM_LMAC; + for (i = 0; i < mem_cfg->num_lmacs; i++) { int j; @@ -707,6 +796,9 @@ static void iwl_dump_paging(struct iwl_fw_runtime *fwrt, DMA_BIDIRECTIONAL); memcpy(paging->data, page_address(pages), PAGING_BLOCK_SIZE); + dma_sync_single_for_device(fwrt->trans->dev, addr, + PAGING_BLOCK_SIZE, + DMA_BIDIRECTIONAL); (*data) = iwl_fw_error_next_data(*data); } } @@ -733,6 +825,8 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt, if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) { const struct fw_img *img; + if (fwrt->cur_fw_img >= IWL_UCODE_TYPE_MAX) + return NULL; img = &fwrt->fw->img[fwrt->cur_fw_img]; sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset; sram_len = img->sec[IWL_UCODE_SECTION_DATA].len; @@ -747,9 +841,9 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt, fifo_len += iwl_fw_txf_len(fwrt, mem_cfg); /* Make room for PRPH registers */ - if (!fwrt->trans->cfg->gen2 && - iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PRPH)) - prph_len += iwl_fw_get_prph_len(fwrt); + if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PRPH)) + iwl_fw_prph_handler(fwrt, &prph_len, + iwl_fw_get_prph_len); if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 && iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RADIO_REG)) @@ -828,7 +922,13 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt, sizeof(dump_info->dev_human_readable) - 1); strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name, sizeof(dump_info->bus_human_readable) - 1); - dump_info->rt_status = cpu_to_le32(fwrt->dump.rt_status); + dump_info->num_of_lmacs = fwrt->smem_cfg.num_lmacs; + dump_info->lmac_err_id[0] = + cpu_to_le32(fwrt->dump.lmac_err_id[0]); + if (fwrt->smem_cfg.num_lmacs > 1) + dump_info->lmac_err_id[1] = + cpu_to_le32(fwrt->dump.lmac_err_id[1]); + dump_info->umac_err_id = cpu_to_le32(fwrt->dump.umac_err_id); dump_data = iwl_fw_error_next_data(dump_data); } @@ -935,133 +1035,653 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt, if (iwl_fw_dbg_is_paging_enabled(fwrt)) iwl_dump_paging(fwrt, &dump_data); - if (prph_len) { - iwl_dump_prph(fwrt->trans, &dump_data, - iwl_prph_dump_addr_comm, - ARRAY_SIZE(iwl_prph_dump_addr_comm)); - - if (fwrt->trans->cfg->mq_rx_supported) - iwl_dump_prph(fwrt->trans, &dump_data, - iwl_prph_dump_addr_9000, - ARRAY_SIZE(iwl_prph_dump_addr_9000)); - } + if (prph_len) + iwl_fw_prph_handler(fwrt, &dump_data, iwl_dump_prph); out: dump_file->file_len = cpu_to_le32(file_len); return dump_file; } -static void iwl_dump_prph_ini(struct iwl_trans *trans, - struct iwl_fw_error_dump_data **data, - struct iwl_fw_ini_region_cfg *reg) +static int iwl_dump_ini_prph_iter(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_region_cfg *reg, + void *range_ptr, int idx) { - struct iwl_fw_error_dump_prph *prph; + struct iwl_fw_ini_error_dump_range *range = range_ptr; + __le32 *val = range->data; + u32 addr, prph_val, offset = le32_to_cpu(reg->offset); + int i; + + range->start_addr = reg->start_addr[idx]; + range->range_data_size = reg->internal.range_data_size; + for (i = 0; i < le32_to_cpu(reg->internal.range_data_size); i += 4) { + addr = le32_to_cpu(range->start_addr) + i; + prph_val = iwl_read_prph(fwrt->trans, addr + offset); + if (prph_val == 0x5a5a5a5a) + return -EBUSY; + *val++ = cpu_to_le32(prph_val); + } + + return sizeof(*range) + le32_to_cpu(range->range_data_size); +} + +static int iwl_dump_ini_csr_iter(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_region_cfg *reg, + void *range_ptr, int idx) +{ + struct iwl_fw_ini_error_dump_range *range = range_ptr; + __le32 *val = range->data; + u32 addr, offset = le32_to_cpu(reg->offset); + int i; + + range->start_addr = reg->start_addr[idx]; + range->range_data_size = reg->internal.range_data_size; + for (i = 0; i < le32_to_cpu(reg->internal.range_data_size); i += 4) { + addr = le32_to_cpu(range->start_addr) + i; + *val++ = cpu_to_le32(iwl_trans_read32(fwrt->trans, + addr + offset)); + } + + return sizeof(*range) + le32_to_cpu(range->range_data_size); +} + +static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_region_cfg *reg, + void *range_ptr, int idx) +{ + struct iwl_fw_ini_error_dump_range *range = range_ptr; + u32 addr = le32_to_cpu(range->start_addr); + u32 offset = le32_to_cpu(reg->offset); + + range->start_addr = reg->start_addr[idx]; + range->range_data_size = reg->internal.range_data_size; + iwl_trans_read_mem_bytes(fwrt->trans, addr + offset, range->data, + le32_to_cpu(reg->internal.range_data_size)); + + return sizeof(*range) + le32_to_cpu(range->range_data_size); +} + +static int +iwl_dump_ini_paging_gen2_iter(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_region_cfg *reg, + void *range_ptr, int idx) +{ + struct iwl_fw_ini_error_dump_range *range = range_ptr; + u32 page_size = fwrt->trans->init_dram.paging[idx].size; + + range->start_addr = cpu_to_le32(idx); + range->range_data_size = cpu_to_le32(page_size); + memcpy(range->data, fwrt->trans->init_dram.paging[idx].block, + page_size); + + return sizeof(*range) + le32_to_cpu(range->range_data_size); +} + +static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_region_cfg *reg, + void *range_ptr, int idx) +{ + /* increase idx by 1 since the pages are from 1 to + * fwrt->num_of_paging_blk + 1 + */ + struct page *page = fwrt->fw_paging_db[++idx].fw_paging_block; + struct iwl_fw_ini_error_dump_range *range = range_ptr; + dma_addr_t addr = fwrt->fw_paging_db[idx].fw_paging_phys; + u32 page_size = fwrt->fw_paging_db[idx].fw_paging_size; + + range->start_addr = cpu_to_le32(idx); + range->range_data_size = cpu_to_le32(page_size); + dma_sync_single_for_cpu(fwrt->trans->dev, addr, page_size, + DMA_BIDIRECTIONAL); + memcpy(range->data, page_address(page), page_size); + dma_sync_single_for_device(fwrt->trans->dev, addr, page_size, + DMA_BIDIRECTIONAL); + + return sizeof(*range) + le32_to_cpu(range->range_data_size); +} + +static int +iwl_dump_ini_mon_dram_iter(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_region_cfg *reg, void *range_ptr, + int idx) +{ + struct iwl_fw_ini_error_dump_range *range = range_ptr; + u32 start_addr = iwl_read_umac_prph(fwrt->trans, + MON_BUFF_BASE_ADDR_VER2); + + if (start_addr == 0x5a5a5a5a) + return -EBUSY; + + range->start_addr = cpu_to_le32(start_addr); + range->range_data_size = cpu_to_le32(fwrt->trans->fw_mon[idx].size); + + memcpy(range->data, fwrt->trans->fw_mon[idx].block, + fwrt->trans->fw_mon[idx].size); + + return sizeof(*range) + le32_to_cpu(range->range_data_size); +} + +struct iwl_ini_txf_iter_data { + int fifo; + int lmac; + u32 fifo_size; + bool internal_txf; + bool init; +}; + +static bool iwl_ini_txf_iter(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_region_cfg *reg) +{ + struct iwl_ini_txf_iter_data *iter = fwrt->dump.fifo_iter; + struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg; + int txf_num = cfg->num_txfifo_entries; + int int_txf_num = ARRAY_SIZE(cfg->internal_txfifo_size); + u32 lmac_bitmap = le32_to_cpu(reg->fifos.fid1); + + if (!iter) + return false; + + if (iter->init) { + if (le32_to_cpu(reg->offset) && + WARN_ONCE(cfg->num_lmacs == 1, + "Invalid lmac offset: 0x%x\n", + le32_to_cpu(reg->offset))) + return false; + + iter->init = false; + iter->internal_txf = false; + iter->fifo_size = 0; + iter->fifo = -1; + if (le32_to_cpu(reg->offset)) + iter->lmac = 1; + else + iter->lmac = 0; + } + + if (!iter->internal_txf) + for (iter->fifo++; iter->fifo < txf_num; iter->fifo++) { + iter->fifo_size = + cfg->lmac[iter->lmac].txfifo_size[iter->fifo]; + if (iter->fifo_size && (lmac_bitmap & BIT(iter->fifo))) + return true; + } + + iter->internal_txf = true; + + if (!fw_has_capa(&fwrt->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) + return false; + + for (iter->fifo++; iter->fifo < int_txf_num + txf_num; iter->fifo++) { + iter->fifo_size = + cfg->internal_txfifo_size[iter->fifo - txf_num]; + if (iter->fifo_size && (lmac_bitmap & BIT(iter->fifo))) + return true; + } + + return false; +} + +static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_region_cfg *reg, + void *range_ptr, int idx) +{ + struct iwl_fw_ini_fifo_error_dump_range *range = range_ptr; + struct iwl_ini_txf_iter_data *iter; + u32 offs = le32_to_cpu(reg->offset), addr; + u32 registers_size = + le32_to_cpu(reg->fifos.num_of_registers) * sizeof(__le32); + __le32 *val = range->data; unsigned long flags; - u32 i, size = le32_to_cpu(reg->num_regions); + int i; - IWL_DEBUG_INFO(trans, "WRT PRPH dump\n"); + if (!iwl_ini_txf_iter(fwrt, reg)) + return -EIO; - if (!iwl_trans_grab_nic_access(trans, &flags)) + if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) + return -EBUSY; + + iter = fwrt->dump.fifo_iter; + + range->fifo_num = cpu_to_le32(iter->fifo); + range->num_of_registers = reg->fifos.num_of_registers; + range->range_data_size = cpu_to_le32(iter->fifo_size + registers_size); + + iwl_write_prph_no_grab(fwrt->trans, TXF_LARC_NUM + offs, iter->fifo); + + /* read txf registers */ + for (i = 0; i < le32_to_cpu(reg->fifos.num_of_registers); i++) { + addr = le32_to_cpu(reg->start_addr[i]) + offs; + + *val++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr)); + } + + if (reg->fifos.header_only) { + range->range_data_size = cpu_to_le32(registers_size); + goto out; + } + + /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */ + iwl_write_prph_no_grab(fwrt->trans, TXF_READ_MODIFY_ADDR + offs, + TXF_WR_PTR + offs); + + /* Dummy-read to advance the read pointer to the head */ + iwl_read_prph_no_grab(fwrt->trans, TXF_READ_MODIFY_DATA + offs); + + /* Read FIFO */ + addr = TXF_READ_MODIFY_DATA + offs; + for (i = 0; i < iter->fifo_size; i += sizeof(__le32)) + *val++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr)); + +out: + iwl_trans_release_nic_access(fwrt->trans, &flags); + + return sizeof(*range) + le32_to_cpu(range->range_data_size); +} + +struct iwl_ini_rxf_data { + u32 fifo_num; + u32 size; + u32 offset; +}; + +static void iwl_ini_get_rxf_data(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_region_cfg *reg, + struct iwl_ini_rxf_data *data) +{ + u32 fid1 = le32_to_cpu(reg->fifos.fid1); + u32 fid2 = le32_to_cpu(reg->fifos.fid2); + u32 fifo_idx; + + if (!data) return; - for (i = 0; i < size; i++) { - (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH); - (*data)->len = cpu_to_le32(le32_to_cpu(reg->size) + - sizeof(*prph)); - prph = (void *)(*data)->data; - prph->prph_start = reg->start_addr[i]; - prph->data[0] = cpu_to_le32(iwl_read_prph_no_grab(trans, - le32_to_cpu(prph->prph_start))); - *data = iwl_fw_error_next_data(*data); + memset(data, 0, sizeof(*data)); + + if (WARN_ON_ONCE((fid1 && fid2) || (!fid1 && !fid2))) + return; + + fifo_idx = ffs(fid1) - 1; + if (fid1 && !WARN_ON_ONCE((~BIT(fifo_idx) & fid1) || + fifo_idx >= MAX_NUM_LMAC)) { + data->size = fwrt->smem_cfg.lmac[fifo_idx].rxfifo1_size; + data->fifo_num = fifo_idx; + return; + } + + fifo_idx = ffs(fid2) - 1; + if (fid2 && !WARN_ON_ONCE(fifo_idx != 0)) { + data->size = fwrt->smem_cfg.rxfifo2_size; + data->offset = RXF_DIFF_FROM_PREV; + /* use bit 31 to distinguish between umac and lmac rxf while + * parsing the dump + */ + data->fifo_num = fifo_idx | IWL_RXF_UMAC_BIT; + return; } - iwl_trans_release_nic_access(trans, &flags); } -static void iwl_dump_csr_ini(struct iwl_trans *trans, - struct iwl_fw_error_dump_data **data, - struct iwl_fw_ini_region_cfg *reg) +static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_region_cfg *reg, + void *range_ptr, int idx) { - int i, num = le32_to_cpu(reg->num_regions); - u32 size = le32_to_cpu(reg->size); + struct iwl_fw_ini_fifo_error_dump_range *range = range_ptr; + struct iwl_ini_rxf_data rxf_data; + u32 offs = le32_to_cpu(reg->offset), addr; + u32 registers_size = + le32_to_cpu(reg->fifos.num_of_registers) * sizeof(__le32); + __le32 *val = range->data; + unsigned long flags; + int i; - IWL_DEBUG_INFO(trans, "WRT CSR dump\n"); + iwl_ini_get_rxf_data(fwrt, reg, &rxf_data); + if (!rxf_data.size) + return -EIO; - for (i = 0; i < num; i++) { - u32 add = le32_to_cpu(reg->start_addr[i]); - __le32 *val; - int j; + if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) + return -EBUSY; - (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR); - (*data)->len = cpu_to_le32(size); - val = (void *)(*data)->data; + offs += rxf_data.offset; - for (j = 0; j < size; j += 4) - *val++ = cpu_to_le32(iwl_trans_read32(trans, j + add)); + range->fifo_num = cpu_to_le32(rxf_data.fifo_num); + range->num_of_registers = reg->fifos.num_of_registers; + range->range_data_size = cpu_to_le32(rxf_data.size + registers_size); - *data = iwl_fw_error_next_data(*data); + /* read rxf registers */ + for (i = 0; i < le32_to_cpu(reg->fifos.num_of_registers); i++) { + addr = le32_to_cpu(reg->start_addr[i]) + offs; + + *val++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr)); + } + + if (reg->fifos.header_only) { + range->range_data_size = cpu_to_le32(registers_size); + goto out; + } + + /* Lock fence */ + iwl_write_prph_no_grab(fwrt->trans, RXF_SET_FENCE_MODE + offs, 0x1); + /* Set fence pointer to the same place like WR pointer */ + iwl_write_prph_no_grab(fwrt->trans, RXF_LD_WR2FENCE + offs, 0x1); + /* Set fence offset */ + iwl_write_prph_no_grab(fwrt->trans, RXF_LD_FENCE_OFFSET_ADDR + offs, + 0x0); + + /* Read FIFO */ + addr = RXF_FIFO_RD_FENCE_INC + offs; + for (i = 0; i < rxf_data.size; i += sizeof(__le32)) + *val++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr)); + +out: + iwl_trans_release_nic_access(fwrt->trans, &flags); + + return sizeof(*range) + le32_to_cpu(range->range_data_size); +} + +static void *iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_region_cfg *reg, + void *data) +{ + struct iwl_fw_ini_error_dump *dump = data; + + return dump->ranges; +} + +static void +*iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_region_cfg *reg, + void *data) +{ + struct iwl_fw_ini_monitor_dram_dump *mon_dump = (void *)data; + u32 write_ptr, cycle_cnt; + unsigned long flags; + + if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) { + IWL_ERR(fwrt, "Failed to get DRAM monitor header\n"); + return NULL; + } + write_ptr = iwl_read_umac_prph_no_grab(fwrt->trans, + MON_BUFF_WRPTR_VER2); + cycle_cnt = iwl_read_umac_prph_no_grab(fwrt->trans, + MON_BUFF_CYCLE_CNT_VER2); + iwl_trans_release_nic_access(fwrt->trans, &flags); + + mon_dump->write_ptr = cpu_to_le32(write_ptr); + mon_dump->cycle_cnt = cpu_to_le32(cycle_cnt); + + return mon_dump->ranges; +} + +static void *iwl_dump_ini_fifo_fill_header(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_region_cfg *reg, + void *data) +{ + struct iwl_fw_ini_fifo_error_dump *dump = data; + + return dump->ranges; +} + +static u32 iwl_dump_ini_mem_ranges(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_region_cfg *reg) +{ + return le32_to_cpu(reg->internal.num_of_ranges); +} + +static u32 iwl_dump_ini_paging_gen2_ranges(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_region_cfg *reg) +{ + return fwrt->trans->init_dram.paging_cnt; +} + +static u32 iwl_dump_ini_paging_ranges(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_region_cfg *reg) +{ + return fwrt->num_of_paging_blk; +} + +static u32 iwl_dump_ini_mon_dram_ranges(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_region_cfg *reg) +{ + return 1; +} + +static u32 iwl_dump_ini_txf_ranges(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_region_cfg *reg) +{ + struct iwl_ini_txf_iter_data iter = { .init = true }; + void *fifo_iter = fwrt->dump.fifo_iter; + u32 num_of_fifos = 0; + + fwrt->dump.fifo_iter = &iter; + while (iwl_ini_txf_iter(fwrt, reg)) + num_of_fifos++; + + fwrt->dump.fifo_iter = fifo_iter; + + return num_of_fifos; +} + +static u32 iwl_dump_ini_rxf_ranges(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_region_cfg *reg) +{ + /* Each Rx fifo needs a different offset and therefore, it's + * region can contain only one fifo, i.e. 1 memory range. + */ + return 1; +} + +static u32 iwl_dump_ini_mem_get_size(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_region_cfg *reg) +{ + return sizeof(struct iwl_fw_ini_error_dump) + + iwl_dump_ini_mem_ranges(fwrt, reg) * + (sizeof(struct iwl_fw_ini_error_dump_range) + + le32_to_cpu(reg->internal.range_data_size)); +} + +static u32 iwl_dump_ini_paging_gen2_get_size(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_region_cfg *reg) +{ + int i; + u32 range_header_len = sizeof(struct iwl_fw_ini_error_dump_range); + u32 size = sizeof(struct iwl_fw_ini_error_dump); + + for (i = 0; i < iwl_dump_ini_paging_gen2_ranges(fwrt, reg); i++) + size += range_header_len + + fwrt->trans->init_dram.paging[i].size; + + return size; +} + +static u32 iwl_dump_ini_paging_get_size(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_region_cfg *reg) +{ + int i; + u32 range_header_len = sizeof(struct iwl_fw_ini_error_dump_range); + u32 size = sizeof(struct iwl_fw_ini_error_dump); + + for (i = 1; i <= iwl_dump_ini_paging_ranges(fwrt, reg); i++) + size += range_header_len + fwrt->fw_paging_db[i].fw_paging_size; + + return size; +} + +static u32 iwl_dump_ini_mon_dram_get_size(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_region_cfg *reg) +{ + u32 size = sizeof(struct iwl_fw_ini_monitor_dram_dump); + + if (fwrt->trans->num_blocks) + size += fwrt->trans->fw_mon[0].size; + + return size; +} + +static u32 iwl_dump_ini_txf_get_size(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_region_cfg *reg) +{ + struct iwl_ini_txf_iter_data iter = { .init = true }; + void *fifo_iter = fwrt->dump.fifo_iter; + u32 size = 0; + u32 fifo_hdr = sizeof(struct iwl_fw_ini_fifo_error_dump_range) + + le32_to_cpu(reg->fifos.num_of_registers) * sizeof(__le32); + + fwrt->dump.fifo_iter = &iter; + while (iwl_ini_txf_iter(fwrt, reg)) { + size += fifo_hdr; + if (!reg->fifos.header_only) + size += iter.fifo_size; + } + + if (size) + size += sizeof(struct iwl_fw_ini_fifo_error_dump); + + fwrt->dump.fifo_iter = fifo_iter; + + return size; +} + +static u32 iwl_dump_ini_rxf_get_size(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_region_cfg *reg) +{ + struct iwl_ini_rxf_data rx_data; + u32 size = sizeof(struct iwl_fw_ini_fifo_error_dump) + + sizeof(struct iwl_fw_ini_fifo_error_dump_range) + + le32_to_cpu(reg->fifos.num_of_registers) * sizeof(__le32); + + if (reg->fifos.header_only) + return size; + + iwl_ini_get_rxf_data(fwrt, reg, &rx_data); + size += rx_data.size; + + return size; +} + +/** + * struct iwl_dump_ini_mem_ops - ini memory dump operations + * @get_num_of_ranges: returns the number of memory ranges in the region. + * @get_size: returns the total size of the region. + * @fill_mem_hdr: fills region type specific headers and returns pointer to + * the first range or NULL if failed to fill headers. + * @fill_range: copies a given memory range into the dump. + * Returns the size of the range or negative error value otherwise. + */ +struct iwl_dump_ini_mem_ops { + u32 (*get_num_of_ranges)(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_region_cfg *reg); + u32 (*get_size)(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_region_cfg *reg); + void *(*fill_mem_hdr)(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_region_cfg *reg, void *data); + int (*fill_range)(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_region_cfg *reg, void *range, + int idx); +}; + +/** + * iwl_dump_ini_mem - copy a memory region into the dump + * @fwrt: fw runtime struct. + * @data: dump memory data. + * @reg: region to copy to the dump. + */ +static void +iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt, + enum iwl_fw_ini_region_type type, + struct iwl_fw_error_dump_data **data, + struct iwl_fw_ini_region_cfg *reg, + struct iwl_dump_ini_mem_ops *ops) +{ + struct iwl_fw_ini_error_dump_header *header = (void *)(*data)->data; + void *range; + u32 num_of_ranges, i; + + if (WARN_ON(!ops || !ops->get_num_of_ranges || !ops->get_size || + !ops->fill_mem_hdr || !ops->fill_range)) + return; + + num_of_ranges = ops->get_num_of_ranges(fwrt, reg); + + (*data)->type = cpu_to_le32(type | INI_DUMP_BIT); + (*data)->len = cpu_to_le32(ops->get_size(fwrt, reg)); + + header->num_of_ranges = cpu_to_le32(num_of_ranges); + header->name_len = cpu_to_le32(min_t(int, IWL_FW_INI_MAX_NAME, + le32_to_cpu(reg->name_len))); + memcpy(header->name, reg->name, le32_to_cpu(header->name_len)); + + range = ops->fill_mem_hdr(fwrt, reg, header); + if (!range) { + IWL_ERR(fwrt, "Failed to fill region header: id=%d, type=%d\n", + le32_to_cpu(reg->region_id), type); + return; } + + for (i = 0; i < num_of_ranges; i++) { + int range_size = ops->fill_range(fwrt, reg, range, i); + + if (range_size < 0) { + IWL_ERR(fwrt, "Failed to dump region: id=%d, type=%d\n", + le32_to_cpu(reg->region_id), type); + return; + } + range = range + range_size; + } + *data = iwl_fw_error_next_data(*data); } static int iwl_fw_ini_get_trigger_len(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_trigger *trigger) { - int i, num, size = 0, hdr_len = sizeof(struct iwl_fw_error_dump_data); + int i, size = 0, hdr_len = sizeof(struct iwl_fw_error_dump_data); if (!trigger || !trigger->num_regions) return 0; - num = le32_to_cpu(trigger->num_regions); - for (i = 0; i < num; i++) { + for (i = 0; i < le32_to_cpu(trigger->num_regions); i++) { u32 reg_id = le32_to_cpu(trigger->data[i]); struct iwl_fw_ini_region_cfg *reg; enum iwl_fw_ini_region_type type; - u32 num_entries; if (WARN_ON(reg_id >= ARRAY_SIZE(fwrt->dump.active_regs))) continue; - reg = fwrt->dump.active_regs[reg_id].reg; + reg = fwrt->dump.active_regs[reg_id]; if (WARN(!reg, "Unassigned region %d\n", reg_id)) continue; type = le32_to_cpu(reg->region_type); - num_entries = le32_to_cpu(reg->num_regions); - switch (type) { case IWL_FW_INI_REGION_DEVICE_MEMORY: - size += hdr_len + - sizeof(struct iwl_fw_error_dump_named_mem) + - le32_to_cpu(reg->size); - break; case IWL_FW_INI_REGION_PERIPHERY_MAC: case IWL_FW_INI_REGION_PERIPHERY_PHY: case IWL_FW_INI_REGION_PERIPHERY_AUX: - size += num_entries * - (hdr_len + - sizeof(struct iwl_fw_error_dump_prph) + - sizeof(u32)); + case IWL_FW_INI_REGION_INTERNAL_BUFFER: + case IWL_FW_INI_REGION_CSR: + size += hdr_len + iwl_dump_ini_mem_get_size(fwrt, reg); break; case IWL_FW_INI_REGION_TXF: - size += iwl_fw_txf_len(fwrt, &fwrt->smem_cfg); + size += hdr_len + iwl_dump_ini_txf_get_size(fwrt, reg); break; case IWL_FW_INI_REGION_RXF: - size += iwl_fw_rxf_len(fwrt, &fwrt->smem_cfg); + size += hdr_len + iwl_dump_ini_rxf_get_size(fwrt, reg); break; - case IWL_FW_INI_REGION_PAGING: - if (!iwl_fw_dbg_is_paging_enabled(fwrt)) - break; - size += fwrt->num_of_paging_blk * - (hdr_len + - sizeof(struct iwl_fw_error_dump_paging) + - PAGING_BLOCK_SIZE); - break; - case IWL_FW_INI_REGION_CSR: - size += num_entries * - (hdr_len + le32_to_cpu(reg->size)); + case IWL_FW_INI_REGION_PAGING: { + size += hdr_len; + if (iwl_fw_dbg_is_paging_enabled(fwrt)) { + size += iwl_dump_ini_paging_get_size(fwrt, reg); + } else { + size += iwl_dump_ini_paging_gen2_get_size(fwrt, + reg); + } break; + } case IWL_FW_INI_REGION_DRAM_BUFFER: - /* Transport takes care of DRAM dumping */ - case IWL_FW_INI_REGION_INTERNAL_BUFFER: + if (!fwrt->trans->num_blocks) + break; + size += hdr_len + + iwl_dump_ini_mon_dram_get_size(fwrt, reg); + break; case IWL_FW_INI_REGION_DRAM_IMR: /* Undefined yet */ default: @@ -1073,8 +1693,7 @@ static int iwl_fw_ini_get_trigger_len(struct iwl_fw_runtime *fwrt, static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_trigger *trigger, - struct iwl_fw_error_dump_data **data, - u32 *dump_mask) + struct iwl_fw_error_dump_data **data) { int i, num = le32_to_cpu(trigger->num_regions); @@ -1082,11 +1701,12 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt, u32 reg_id = le32_to_cpu(trigger->data[i]); enum iwl_fw_ini_region_type type; struct iwl_fw_ini_region_cfg *reg; + struct iwl_dump_ini_mem_ops ops; if (reg_id >= ARRAY_SIZE(fwrt->dump.active_regs)) continue; - reg = fwrt->dump.active_regs[reg_id].reg; + reg = fwrt->dump.active_regs[reg_id]; /* Don't warn, get_trigger_len already warned */ if (!reg) continue; @@ -1094,39 +1714,75 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt, type = le32_to_cpu(reg->region_type); switch (type) { case IWL_FW_INI_REGION_DEVICE_MEMORY: - if (WARN_ON(le32_to_cpu(reg->num_regions) > 1)) - continue; - iwl_fw_dump_named_mem(fwrt, data, - le32_to_cpu(reg->size), - le32_to_cpu(reg->start_addr[0]), - reg->name, - le32_to_cpu(reg->name_len)); + case IWL_FW_INI_REGION_INTERNAL_BUFFER: + ops.get_num_of_ranges = iwl_dump_ini_mem_ranges; + ops.get_size = iwl_dump_ini_mem_get_size; + ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header; + ops.fill_range = iwl_dump_ini_dev_mem_iter; + iwl_dump_ini_mem(fwrt, type, data, reg, &ops); break; case IWL_FW_INI_REGION_PERIPHERY_MAC: case IWL_FW_INI_REGION_PERIPHERY_PHY: case IWL_FW_INI_REGION_PERIPHERY_AUX: - iwl_dump_prph_ini(fwrt->trans, data, reg); + ops.get_num_of_ranges = iwl_dump_ini_mem_ranges; + ops.get_size = iwl_dump_ini_mem_get_size; + ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header; + ops.fill_range = iwl_dump_ini_prph_iter; + iwl_dump_ini_mem(fwrt, type, data, reg, &ops); break; case IWL_FW_INI_REGION_DRAM_BUFFER: - *dump_mask |= IWL_FW_ERROR_DUMP_FW_MONITOR; + ops.get_num_of_ranges = iwl_dump_ini_mon_dram_ranges; + ops.get_size = iwl_dump_ini_mon_dram_get_size; + ops.fill_mem_hdr = iwl_dump_ini_mon_dram_fill_header; + ops.fill_range = iwl_dump_ini_mon_dram_iter; + iwl_dump_ini_mem(fwrt, type, data, reg, &ops); break; - case IWL_FW_INI_REGION_PAGING: - if (iwl_fw_dbg_is_paging_enabled(fwrt)) - iwl_dump_paging(fwrt, data); - else - *dump_mask |= IWL_FW_ERROR_DUMP_PAGING; + case IWL_FW_INI_REGION_PAGING: { + ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header; + if (iwl_fw_dbg_is_paging_enabled(fwrt)) { + ops.get_num_of_ranges = + iwl_dump_ini_paging_ranges; + ops.get_size = iwl_dump_ini_paging_get_size; + ops.fill_range = iwl_dump_ini_paging_iter; + } else { + ops.get_num_of_ranges = + iwl_dump_ini_paging_gen2_ranges; + ops.get_size = + iwl_dump_ini_paging_gen2_get_size; + ops.fill_range = iwl_dump_ini_paging_gen2_iter; + } + + iwl_dump_ini_mem(fwrt, type, data, reg, &ops); break; - case IWL_FW_INI_REGION_TXF: - iwl_fw_dump_txf(fwrt, data); + } + case IWL_FW_INI_REGION_TXF: { + struct iwl_ini_txf_iter_data iter = { .init = true }; + void *fifo_iter = fwrt->dump.fifo_iter; + + fwrt->dump.fifo_iter = &iter; + ops.get_num_of_ranges = iwl_dump_ini_txf_ranges; + ops.get_size = iwl_dump_ini_txf_get_size; + ops.fill_mem_hdr = iwl_dump_ini_fifo_fill_header; + ops.fill_range = iwl_dump_ini_txf_iter; + iwl_dump_ini_mem(fwrt, type, data, reg, &ops); + fwrt->dump.fifo_iter = fifo_iter; break; + } case IWL_FW_INI_REGION_RXF: - iwl_fw_dump_rxf(fwrt, data); + ops.get_num_of_ranges = iwl_dump_ini_rxf_ranges; + ops.get_size = iwl_dump_ini_rxf_get_size; + ops.fill_mem_hdr = iwl_dump_ini_fifo_fill_header; + ops.fill_range = iwl_dump_ini_rxf_iter; + iwl_dump_ini_mem(fwrt, type, data, reg, &ops); break; case IWL_FW_INI_REGION_CSR: - iwl_dump_csr_ini(fwrt->trans, data, reg); + ops.get_num_of_ranges = iwl_dump_ini_mem_ranges; + ops.get_size = iwl_dump_ini_mem_get_size; + ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header; + ops.fill_range = iwl_dump_ini_csr_iter; + iwl_dump_ini_mem(fwrt, type, data, reg, &ops); break; case IWL_FW_INI_REGION_DRAM_IMR: - case IWL_FW_INI_REGION_INTERNAL_BUFFER: /* This is undefined yet */ default: break; @@ -1136,30 +1792,23 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt, static struct iwl_fw_error_dump_file * _iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt, - struct iwl_fw_dump_ptrs *fw_error_dump, - u32 *dump_mask) + struct iwl_fw_dump_ptrs *fw_error_dump) { int size, id = le32_to_cpu(fwrt->dump.desc->trig_desc.type); struct iwl_fw_error_dump_data *dump_data; struct iwl_fw_error_dump_file *dump_file; - struct iwl_fw_ini_trigger *trigger, *ext; + struct iwl_fw_ini_trigger *trigger; if (id == FW_DBG_TRIGGER_FW_ASSERT) id = IWL_FW_TRIGGER_ID_FW_ASSERT; - else if (id == FW_DBG_TRIGGER_USER) - id = IWL_FW_TRIGGER_ID_USER_TRIGGER; - else if (id < FW_DBG_TRIGGER_MAX) - return NULL; - if (WARN_ON(id >= ARRAY_SIZE(fwrt->dump.active_trigs))) + if (!iwl_fw_ini_trigger_on(fwrt, id)) return NULL; - trigger = fwrt->dump.active_trigs[id].conf; - ext = fwrt->dump.active_trigs[id].conf_ext; + trigger = fwrt->dump.active_trigs[id].trig; size = sizeof(*dump_file); size += iwl_fw_ini_get_trigger_len(fwrt, trigger); - size += iwl_fw_ini_get_trigger_len(fwrt, ext); if (!size) return NULL; @@ -1174,11 +1823,7 @@ _iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt, dump_data = (void *)dump_file->data; dump_file->file_len = cpu_to_le32(size); - *dump_mask = 0; - if (trigger) - iwl_fw_ini_dump_trigger(fwrt, trigger, &dump_data, dump_mask); - if (ext) - iwl_fw_ini_dump_trigger(fwrt, ext, &dump_data, dump_mask); + iwl_fw_ini_dump_trigger(fwrt, trigger, &dump_data); return dump_file; } @@ -1204,8 +1849,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) goto out; if (fwrt->trans->ini_valid) - dump_file = _iwl_fw_error_ini_dump(fwrt, fw_error_dump, - &dump_mask); + dump_file = _iwl_fw_error_ini_dump(fwrt, fw_error_dump); else dump_file = _iwl_fw_error_dump(fwrt, fw_error_dump); @@ -1217,7 +1861,10 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) if (!fwrt->trans->ini_valid && fwrt->dump.monitor_only) dump_mask &= IWL_FW_ERROR_DUMP_FW_MONITOR; - fw_error_dump->trans_ptr = iwl_trans_dump_data(fwrt->trans, dump_mask); + if (!fwrt->trans->ini_valid) + fw_error_dump->trans_ptr = + iwl_trans_dump_data(fwrt->trans, dump_mask); + file_len = le32_to_cpu(dump_file->file_len); fw_error_dump->fwrt_len = file_len; if (fw_error_dump->trans_ptr) { @@ -1258,61 +1905,12 @@ const struct iwl_fw_dump_desc iwl_dump_desc_assert = { }; IWL_EXPORT_SYMBOL(iwl_dump_desc_assert); -void iwl_fw_assert_error_dump(struct iwl_fw_runtime *fwrt) -{ - IWL_INFO(fwrt, "error dump due to fw assert\n"); - fwrt->dump.desc = &iwl_dump_desc_assert; - iwl_fw_error_dump(fwrt); -} -IWL_EXPORT_SYMBOL(iwl_fw_assert_error_dump); - -void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt) -{ - struct iwl_fw_dump_desc *iwl_dump_desc_no_alive = - kmalloc(sizeof(*iwl_dump_desc_no_alive), GFP_KERNEL); - - if (!iwl_dump_desc_no_alive) - return; - - iwl_dump_desc_no_alive->trig_desc.type = - cpu_to_le32(FW_DBG_TRIGGER_NO_ALIVE); - iwl_dump_desc_no_alive->len = 0; - - if (WARN_ON(fwrt->dump.desc)) - iwl_fw_free_dump_desc(fwrt); - - IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n", - FW_DBG_TRIGGER_NO_ALIVE); - - fwrt->dump.desc = iwl_dump_desc_no_alive; - iwl_fw_error_dump(fwrt); - clear_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &fwrt->status); -} -IWL_EXPORT_SYMBOL(iwl_fw_alive_error_dump); - int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, const struct iwl_fw_dump_desc *desc, bool monitor_only, unsigned int delay) { - /* - * If the loading of the FW completed successfully, the next step is to - * get the SMEM config data. Thus, if fwrt->smem_cfg.num_lmacs is non - * zero, the FW was already loaded successully. If the state is "NO_FW" - * in such a case - exit, since FW may be dead. Otherwise, we - * can try to collect the data, since FW might just not be fully - * loaded (no "ALIVE" yet), and the debug data is accessible. - * - * Corner case: got the FW alive but crashed before getting the SMEM - * config. In such a case, due to HW access problems, we might - * collect garbage. - */ - if (fwrt->trans->state == IWL_TRANS_NO_FW && - fwrt->smem_cfg.num_lmacs) - return -EIO; - - if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status) || - test_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &fwrt->status)) + if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status)) return -EBUSY; if (WARN_ON(fwrt->dump.desc)) @@ -1324,12 +1922,39 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, fwrt->dump.desc = desc; fwrt->dump.monitor_only = monitor_only; - schedule_delayed_work(&fwrt->dump.wk, delay); + schedule_delayed_work(&fwrt->dump.wk, usecs_to_jiffies(delay)); return 0; } IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_desc); +int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt, + enum iwl_fw_dbg_trigger trig_type) +{ + int ret; + struct iwl_fw_dump_desc *iwl_dump_error_desc = + kmalloc(sizeof(*iwl_dump_error_desc), GFP_KERNEL); + + if (!iwl_dump_error_desc) + return -ENOMEM; + + iwl_dump_error_desc->trig_desc.type = cpu_to_le32(trig_type); + iwl_dump_error_desc->len = 0; + + ret = iwl_fw_dbg_collect_desc(fwrt, iwl_dump_error_desc, false, 0); + if (ret) { + kfree(iwl_dump_error_desc); + } else { + set_bit(STATUS_FW_WAIT_DUMP, &fwrt->trans->status); + + /* trigger nmi to halt the fw */ + iwl_force_nmi(fwrt->trans); + } + + return ret; +} +IWL_EXPORT_SYMBOL(iwl_fw_dbg_error_collect); + int _iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, enum iwl_fw_dbg_trigger trig, const char *str, size_t len, @@ -1353,8 +1978,10 @@ int _iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, } trigger->occurrences = cpu_to_le16(occurrences); - delay = le16_to_cpu(trigger->trig_dis_ms); monitor_only = trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY; + + /* convert msec to usec */ + delay = le32_to_cpu(trigger->stop_delay) * USEC_PER_MSEC; } desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC); @@ -1374,6 +2001,7 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, u32 id, const char *str, size_t len) { struct iwl_fw_dump_desc *desc; + struct iwl_fw_ini_active_triggers *active; u32 occur, delay; if (!fwrt->trans->ini_valid) @@ -1382,15 +2010,17 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, if (id == FW_DBG_TRIGGER_USER) id = IWL_FW_TRIGGER_ID_USER_TRIGGER; - if (WARN_ON(!fwrt->dump.active_trigs[id].active)) + active = &fwrt->dump.active_trigs[id]; + + if (WARN_ON(!active->active)) return -EINVAL; - delay = le32_to_cpu(fwrt->dump.active_trigs[id].conf->ignore_consec); - occur = le32_to_cpu(fwrt->dump.active_trigs[id].conf->occurrences); + delay = le32_to_cpu(active->trig->dump_delay); + occur = le32_to_cpu(active->trig->occurrences); if (!occur) return 0; - if (le32_to_cpu(fwrt->dump.active_trigs[id].conf->force_restart)) { + if (le32_to_cpu(active->trig->force_restart)) { IWL_WARN(fwrt, "Force restart: trigger %d fired.\n", id); iwl_force_nmi(fwrt->trans); return 0; @@ -1400,8 +2030,7 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, if (!desc) return -ENOMEM; - occur--; - fwrt->dump.active_trigs[id].conf->occurrences = cpu_to_le32(occur); + active->trig->occurrences = cpu_to_le32(--occur); desc->len = len; desc->trig_desc.type = cpu_to_le32(id); @@ -1566,55 +2195,81 @@ void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt) IWL_EXPORT_SYMBOL(iwl_fw_dbg_read_d3_debug_data); static void -iwl_fw_dbg_buffer_allocation(struct iwl_fw_runtime *fwrt, - struct iwl_fw_ini_allocation_tlv *alloc) +iwl_fw_dbg_buffer_allocation(struct iwl_fw_runtime *fwrt, u32 size) { struct iwl_trans *trans = fwrt->trans; - struct iwl_continuous_record_cmd cont_rec = {}; - struct iwl_buffer_allocation_cmd *cmd = (void *)&cont_rec.pad[0]; - struct iwl_host_cmd hcmd = { - .id = LDBG_CONFIG_CMD, - .flags = CMD_ASYNC, - .data[0] = &cont_rec, - .len[0] = sizeof(cont_rec), - }; void *virtual_addr = NULL; - u32 size = le32_to_cpu(alloc->size); dma_addr_t phys_addr; - cont_rec.record_mode.enable_recording = cpu_to_le16(BUFFER_ALLOCATION); - - if (!trans->num_blocks && - le32_to_cpu(alloc->buffer_location) != - IWL_FW_INI_LOCATION_DRAM_PATH) + if (WARN_ON_ONCE(trans->num_blocks == ARRAY_SIZE(trans->fw_mon))) return; - virtual_addr = dma_alloc_coherent(fwrt->trans->dev, size, - &phys_addr, GFP_KERNEL); + virtual_addr = + dma_alloc_coherent(fwrt->trans->dev, size, &phys_addr, + GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO | + __GFP_COMP); /* TODO: alloc fragments if needed */ if (!virtual_addr) IWL_ERR(fwrt, "Failed to allocate debug memory\n"); - if (WARN_ON_ONCE(trans->num_blocks == ARRAY_SIZE(trans->fw_mon))) - return; - trans->fw_mon[trans->num_blocks].block = virtual_addr; trans->fw_mon[trans->num_blocks].physical = phys_addr; trans->fw_mon[trans->num_blocks].size = size; trans->num_blocks++; IWL_DEBUG_FW(trans, "Allocated debug block of size %d\n", size); +} + +static void iwl_fw_dbg_buffer_apply(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_allocation_data *alloc, + enum iwl_fw_ini_apply_point pnt) +{ + struct iwl_trans *trans = fwrt->trans; + struct iwl_ldbg_config_cmd ldbg_cmd = { + .type = cpu_to_le32(BUFFER_ALLOCATION), + }; + struct iwl_buffer_allocation_cmd *cmd = &ldbg_cmd.buffer_allocation; + struct iwl_host_cmd hcmd = { + .id = LDBG_CONFIG_CMD, + .flags = CMD_ASYNC, + .data[0] = &ldbg_cmd, + .len[0] = sizeof(ldbg_cmd), + }; + int block_idx = trans->num_blocks; + u32 buf_location = le32_to_cpu(alloc->tlv.buffer_location); + + if (buf_location == IWL_FW_INI_LOCATION_SRAM_PATH) { + if (!WARN(pnt != IWL_FW_INI_APPLY_EARLY, + "Invalid apply point %d for SMEM buffer allocation", + pnt)) + /* set sram monitor by enabling bit 7 */ + iwl_set_bit(fwrt->trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM); + return; + } + + if (buf_location != IWL_FW_INI_LOCATION_DRAM_PATH) + return; + + if (!alloc->is_alloc) { + iwl_fw_dbg_buffer_allocation(fwrt, + le32_to_cpu(alloc->tlv.size)); + if (block_idx == trans->num_blocks) + return; + alloc->is_alloc = 1; + } /* First block is assigned via registers / context info */ if (trans->num_blocks == 1) return; cmd->num_frags = cpu_to_le32(1); - cmd->fragments[0].address = cpu_to_le64(phys_addr); - cmd->fragments[0].size = alloc->size; - cmd->allocation_id = alloc->allocation_id; - cmd->buffer_location = alloc->buffer_location; + cmd->fragments[0].address = + cpu_to_le64(trans->fw_mon[block_idx].physical); + cmd->fragments[0].size = alloc->tlv.size; + cmd->allocation_id = alloc->tlv.allocation_id; + cmd->buffer_location = alloc->tlv.buffer_location; iwl_trans_send_cmd(trans, &hcmd); } @@ -1643,9 +2298,9 @@ static void iwl_fw_dbg_update_regions(struct iwl_fw_runtime *fwrt, int i, size = le32_to_cpu(tlv->num_regions); for (i = 0; i < size; i++) { - struct iwl_fw_ini_region_cfg *reg = iter; + struct iwl_fw_ini_region_cfg *reg = iter, **active; int id = le32_to_cpu(reg->region_id); - struct iwl_fw_ini_active_regs *active; + u32 type = le32_to_cpu(reg->region_type); if (WARN(id >= ARRAY_SIZE(fwrt->dump.active_regs), "Invalid region id %d for apply point %d\n", id, pnt)) @@ -1653,26 +2308,47 @@ static void iwl_fw_dbg_update_regions(struct iwl_fw_runtime *fwrt, active = &fwrt->dump.active_regs[id]; - if (ext && active->apply_point == pnt) - IWL_WARN(fwrt->trans, - "External region TLV overrides FW default %x\n", - id); + if (*active) + IWL_WARN(fwrt->trans, "region TLV %d override\n", id); IWL_DEBUG_FW(fwrt, "%s: apply point %d, activating region ID %d\n", __func__, pnt, id); - active->reg = reg; - active->apply_point = pnt; + *active = reg; - if (le32_to_cpu(reg->region_type) != - IWL_FW_INI_REGION_DRAM_BUFFER) - iter += le32_to_cpu(reg->num_regions) * sizeof(__le32); + if (type == IWL_FW_INI_REGION_TXF || + type == IWL_FW_INI_REGION_RXF) + iter += le32_to_cpu(reg->fifos.num_of_registers) * + sizeof(__le32); + else if (type != IWL_FW_INI_REGION_DRAM_BUFFER) + iter += le32_to_cpu(reg->internal.num_of_ranges) * + sizeof(__le32); iter += sizeof(*reg); } } +static int iwl_fw_dbg_trig_realloc(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_active_triggers *active, + u32 id, int size) +{ + void *ptr; + + if (size <= active->size) + return 0; + + ptr = krealloc(active->trig, size, GFP_KERNEL); + if (!ptr) { + IWL_ERR(fwrt, "Failed to allocate memory for trigger %d\n", id); + return -ENOMEM; + } + active->trig = ptr; + active->size = size; + + return 0; +} + static void iwl_fw_dbg_update_triggers(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_trigger_tlv *tlv, bool ext, @@ -1685,43 +2361,61 @@ static void iwl_fw_dbg_update_triggers(struct iwl_fw_runtime *fwrt, struct iwl_fw_ini_trigger *trig = iter; struct iwl_fw_ini_active_triggers *active; int id = le32_to_cpu(trig->trigger_id); - u32 num; + u32 trig_regs_size = le32_to_cpu(trig->num_regions) * + sizeof(__le32); if (WARN_ON(id >= ARRAY_SIZE(fwrt->dump.active_trigs))) break; active = &fwrt->dump.active_trigs[id]; - if (active->apply_point != apply_point) { - active->conf = NULL; - active->conf_ext = NULL; - } + if (!active->active) { + size_t trig_size = sizeof(*trig) + trig_regs_size; + + if (iwl_fw_dbg_trig_realloc(fwrt, active, id, + trig_size)) + goto next; - num = le32_to_cpu(trig->num_regions); + memcpy(active->trig, trig, trig_size); - if (ext && active->apply_point == apply_point) { - num += le32_to_cpu(active->conf->num_regions); - if (trig->ignore_default) { - active->conf_ext = active->conf; - active->conf = trig; + } else { + u32 conf_override = + !(le32_to_cpu(trig->override_trig) & 0xff); + u32 region_override = + !(le32_to_cpu(trig->override_trig) & 0xff00); + u32 offset = 0; + u32 active_regs = + le32_to_cpu(active->trig->num_regions); + u32 new_regs = le32_to_cpu(trig->num_regions); + int mem_to_add = trig_regs_size; + + if (region_override) { + mem_to_add -= active_regs * sizeof(__le32); } else { - active->conf_ext = trig; + offset += active_regs; + new_regs += active_regs; } - } else { - active->conf = trig; + + if (iwl_fw_dbg_trig_realloc(fwrt, active, id, + active->size + mem_to_add)) + goto next; + + if (conf_override) + memcpy(active->trig, trig, sizeof(*trig)); + + memcpy(active->trig->data + offset, trig->data, + trig_regs_size); + active->trig->num_regions = cpu_to_le32(new_regs); } /* Since zero means infinity - just set to -1 */ - if (!le32_to_cpu(trig->occurrences)) - trig->occurrences = cpu_to_le32(-1); - if (!le32_to_cpu(trig->ignore_consec)) - trig->ignore_consec = cpu_to_le32(-1); + if (!le32_to_cpu(active->trig->occurrences)) + active->trig->occurrences = cpu_to_le32(-1); - iter += sizeof(*trig) + - le32_to_cpu(trig->num_regions) * sizeof(__le32); + active->active = true; +next: + iter += sizeof(*trig) + trig_regs_size; - active->active = num; - active->apply_point = apply_point; } } @@ -1738,9 +2432,13 @@ static void _iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt, u32 type = le32_to_cpu(tlv->type); switch (type) { - case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION: - iwl_fw_dbg_buffer_allocation(fwrt, ini_tlv); + case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION: { + struct iwl_fw_ini_allocation_data *buf_alloc = ini_tlv; + + iwl_fw_dbg_buffer_apply(fwrt, ini_tlv, pnt); + iter += sizeof(buf_alloc->is_alloc); break; + } case IWL_UCODE_TLV_TYPE_HCMD: if (pnt < IWL_FW_INI_APPLY_AFTER_ALIVE) { IWL_ERR(fwrt, @@ -1771,6 +2469,16 @@ void iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt, enum iwl_fw_ini_apply_point apply_point) { void *data = &fwrt->trans->apply_points[apply_point]; + int i; + + if (apply_point == IWL_FW_INI_APPLY_EARLY) { + for (i = 0; i < IWL_FW_INI_MAX_REGION_ID; i++) + fwrt->dump.active_regs[i] = NULL; + + /* disable the triggers, used in recovery flow */ + for (i = 0; i < IWL_FW_TRIGGER_ID_NUM; i++) + fwrt->dump.active_trigs[i].active = false; + } _iwl_fw_dbg_apply_point(fwrt, data, apply_point, false); @@ -1778,3 +2486,27 @@ void iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt, _iwl_fw_dbg_apply_point(fwrt, data, apply_point, true); } IWL_EXPORT_SYMBOL(iwl_fw_dbg_apply_point); + +void iwl_fwrt_stop_device(struct iwl_fw_runtime *fwrt) +{ + /* if the wait event timeout elapses instead of wake up then + * the driver did not receive NMI interrupt and can not assume the FW + * is halted + */ + int ret = wait_event_timeout(fwrt->trans->fw_halt_waitq, + !test_bit(STATUS_FW_WAIT_DUMP, + &fwrt->trans->status), + msecs_to_jiffies(2000)); + if (!ret) { + /* failed to receive NMI interrupt, assuming the FW is stuck */ + set_bit(STATUS_FW_ERROR, &fwrt->trans->status); + + clear_bit(STATUS_FW_WAIT_DUMP, &fwrt->trans->status); + } + + /* Assuming the op mode mutex is held at this point */ + iwl_fw_dbg_collect_sync(fwrt); + + iwl_trans_stop_device(fwrt->trans); +} +IWL_EXPORT_SYMBOL(iwl_fwrt_stop_device); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h index 6aabbdd72326..a199056234d3 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h @@ -8,7 +8,7 @@ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -102,13 +102,18 @@ static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt) if (fwrt->dump.desc != &iwl_dump_desc_assert) kfree(fwrt->dump.desc); fwrt->dump.desc = NULL; - fwrt->dump.rt_status = 0; + fwrt->dump.lmac_err_id[0] = 0; + if (fwrt->smem_cfg.num_lmacs > 1) + fwrt->dump.lmac_err_id[1] = 0; + fwrt->dump.umac_err_id = 0; } void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt); int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, const struct iwl_fw_dump_desc *desc, bool monitor_only, unsigned int delay); +int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt, + enum iwl_fw_dbg_trigger trig_type); int _iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, enum iwl_fw_dbg_trigger trig, const char *str, size_t len, @@ -157,9 +162,9 @@ iwl_fw_dbg_trigger_stop_conf_match(struct iwl_fw_runtime *fwrt, } static inline bool -iwl_fw_dbg_no_trig_window(struct iwl_fw_runtime *fwrt, u32 id, u32 dis_ms) +iwl_fw_dbg_no_trig_window(struct iwl_fw_runtime *fwrt, u32 id, u32 dis_usec) { - unsigned long wind_jiff = msecs_to_jiffies(dis_ms); + unsigned long wind_jiff = usecs_to_jiffies(dis_usec); /* If this is the first event checked, jump to update start ts */ if (fwrt->dump.non_collect_ts_start[id] && @@ -176,11 +181,12 @@ iwl_fw_dbg_trigger_check_stop(struct iwl_fw_runtime *fwrt, struct wireless_dev *wdev, struct iwl_fw_dbg_trigger_tlv *trig) { + u32 usec = le16_to_cpu(trig->trig_dis_ms) * USEC_PER_MSEC; + if (wdev && !iwl_fw_dbg_trigger_vif_match(trig, wdev)) return false; - if (iwl_fw_dbg_no_trig_window(fwrt, le32_to_cpu(trig->id), - le16_to_cpu(trig->trig_dis_ms))) { + if (iwl_fw_dbg_no_trig_window(fwrt, le32_to_cpu(trig->id), usec)) { IWL_WARN(fwrt, "Trigger %d occurred while no-collect window.\n", trig->id); return false; @@ -217,23 +223,22 @@ _iwl_fw_dbg_trigger_on(struct iwl_fw_runtime *fwrt, }) static inline bool -_iwl_fw_ini_trigger_on(struct iwl_fw_runtime *fwrt, - const enum iwl_fw_dbg_trigger id) +iwl_fw_ini_trigger_on(struct iwl_fw_runtime *fwrt, + enum iwl_fw_ini_trigger_id id) { - struct iwl_fw_ini_active_triggers *trig = &fwrt->dump.active_trigs[id]; - u32 ms; + struct iwl_fw_ini_trigger *trig; + u32 usec; - if (!fwrt->trans->ini_valid) - return false; - if (!trig || !trig->active) + + if (!fwrt->trans->ini_valid || id >= IWL_FW_TRIGGER_ID_NUM || + !fwrt->dump.active_trigs[id].active) return false; - ms = le32_to_cpu(trig->conf->ignore_consec); - if (ms) - ms /= USEC_PER_MSEC; + trig = fwrt->dump.active_trigs[id].trig; + usec = le32_to_cpu(trig->ignore_consec); - if (iwl_fw_dbg_no_trig_window(fwrt, id, ms)) { + if (iwl_fw_dbg_no_trig_window(fwrt, id, usec)) { IWL_WARN(fwrt, "Trigger %d fired in no-collect window\n", id); return false; } @@ -241,12 +246,6 @@ _iwl_fw_ini_trigger_on(struct iwl_fw_runtime *fwrt, return true; } -#define iwl_fw_ini_trigger_on(fwrt, wdev, id) ({ \ - BUILD_BUG_ON(!__builtin_constant_p(id)); \ - BUILD_BUG_ON((id) >= IWL_FW_TRIGGER_ID_NUM); \ - _iwl_fw_ini_trigger_on((fwrt), (wdev), (id)); \ -}) - static inline void _iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt, struct wireless_dev *wdev, @@ -266,20 +265,20 @@ _iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt, iwl_fw_dbg_get_trigger((fwrt)->fw,\ (trig))) -static int iwl_fw_dbg_start_stop_hcmd(struct iwl_fw_runtime *fwrt, bool start) +static inline int +iwl_fw_dbg_start_stop_hcmd(struct iwl_fw_runtime *fwrt, bool start) { - struct iwl_continuous_record_cmd cont_rec = {}; + struct iwl_ldbg_config_cmd cmd = { + .type = start ? cpu_to_le32(START_DEBUG_RECORDING) : + cpu_to_le32(STOP_DEBUG_RECORDING), + }; struct iwl_host_cmd hcmd = { .id = LDBG_CONFIG_CMD, .flags = CMD_ASYNC, - .data[0] = &cont_rec, - .len[0] = sizeof(cont_rec), + .data[0] = &cmd, + .len[0] = sizeof(cmd), }; - cont_rec.record_mode.enable_recording = start ? - cpu_to_le16(START_DEBUG_RECORDING) : - cpu_to_le16(STOP_DEBUG_RECORDING); - return iwl_trans_send_cmd(fwrt->trans, &hcmd); } @@ -293,13 +292,13 @@ _iwl_fw_dbg_stop_recording(struct iwl_trans *trans, } if (params) { - params->in_sample = iwl_read_prph(trans, DBGC_IN_SAMPLE); - params->out_ctrl = iwl_read_prph(trans, DBGC_OUT_CTRL); + params->in_sample = iwl_read_umac_prph(trans, DBGC_IN_SAMPLE); + params->out_ctrl = iwl_read_umac_prph(trans, DBGC_OUT_CTRL); } - iwl_write_prph(trans, DBGC_IN_SAMPLE, 0); + iwl_write_umac_prph(trans, DBGC_IN_SAMPLE, 0); udelay(100); - iwl_write_prph(trans, DBGC_OUT_CTRL, 0); + iwl_write_umac_prph(trans, DBGC_OUT_CTRL, 0); #ifdef CONFIG_IWLWIFI_DEBUGFS trans->dbg_rec_on = false; #endif @@ -327,9 +326,9 @@ _iwl_fw_dbg_restart_recording(struct iwl_trans *trans, iwl_clear_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x1); iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x1); } else { - iwl_write_prph(trans, DBGC_IN_SAMPLE, params->in_sample); + iwl_write_umac_prph(trans, DBGC_IN_SAMPLE, params->in_sample); udelay(100); - iwl_write_prph(trans, DBGC_OUT_CTRL, params->out_ctrl); + iwl_write_umac_prph(trans, DBGC_OUT_CTRL, params->out_ctrl); } } @@ -370,7 +369,9 @@ static inline bool iwl_fw_dbg_is_d3_debug_enabled(struct iwl_fw_runtime *fwrt) { return fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_D3_DEBUG) && - fwrt->trans->cfg->d3_debug_data_length && + fwrt->trans->cfg->d3_debug_data_length && fwrt->ops && + fwrt->ops->d3_debug_enable && + fwrt->ops->d3_debug_enable(fwrt->ops_ctx) && iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_D3_DEBUG_DATA); } @@ -378,6 +379,7 @@ static inline bool iwl_fw_dbg_is_paging_enabled(struct iwl_fw_runtime *fwrt) { return iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PAGING) && !fwrt->trans->cfg->gen2 && + fwrt->cur_fw_img < IWL_UCODE_TYPE_MAX && fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size && fwrt->fw_paging_db[0].fw_paging_block; } @@ -430,10 +432,33 @@ static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {} #endif /* CONFIG_IWLWIFI_DEBUGFS */ -void iwl_fw_assert_error_dump(struct iwl_fw_runtime *fwrt); -void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt); void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt); void iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt, enum iwl_fw_ini_apply_point apply_point); +void iwl_fwrt_stop_device(struct iwl_fw_runtime *fwrt); + +static inline void iwl_fw_lmac1_set_alive_err_table(struct iwl_trans *trans, + u32 lmac_error_event_table) +{ + if (!(trans->error_event_table_tlv_status & + IWL_ERROR_EVENT_TABLE_LMAC1) || + WARN_ON(trans->lmac_error_event_table[0] != + lmac_error_event_table)) + trans->lmac_error_event_table[0] = lmac_error_event_table; +} + +static inline void iwl_fw_umac_set_alive_err_table(struct iwl_trans *trans, + u32 umac_error_event_table) +{ + if (!(trans->error_event_table_tlv_status & + IWL_ERROR_EVENT_TABLE_UMAC) || + WARN_ON(trans->umac_error_event_table != + umac_error_event_table)) + trans->umac_error_event_table = umac_error_event_table; +} + +/* This bit is used to differentiate the legacy dump from the ini dump */ +#define INI_DUMP_BIT BIT(31) + #endif /* __iwl_fw_dbg_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c index 3e120dd47305..c1aa4360736b 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c @@ -173,9 +173,8 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \ _FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_fw_runtime) #define FWRT_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do { \ - if (!debugfs_create_file(alias, mode, parent, fwrt, \ - &iwl_dbgfs_##name##_ops)) \ - goto err; \ + debugfs_create_file(alias, mode, parent, fwrt, \ + &iwl_dbgfs_##name##_ops); \ } while (0) #define FWRT_DEBUGFS_ADD_FILE(name, parent, mode) \ FWRT_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode) @@ -321,14 +320,10 @@ out: FWRT_DEBUGFS_WRITE_FILE_OPS(send_hcmd, 512); -int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt, +void iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt, struct dentry *dbgfs_dir) { INIT_DELAYED_WORK(&fwrt->timestamp.wk, iwl_fw_timestamp_marker_wk); FWRT_DEBUGFS_ADD_FILE(timestamp_marker, dbgfs_dir, 0200); FWRT_DEBUGFS_ADD_FILE(send_hcmd, dbgfs_dir, 0200); - return 0; -err: - IWL_ERR(fwrt, "Can't create the fwrt debugfs directory\n"); - return -ENOMEM; } diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h index 88255035e8ef..fde40ff88451 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h @@ -63,14 +63,11 @@ #include "runtime.h" #ifdef CONFIG_IWLWIFI_DEBUGFS -int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt, +void iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt, struct dentry *dbgfs_dir); #else -static inline int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt, - struct dentry *dbgfs_dir) -{ - return 0; -} +static inline void iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt, + struct dentry *dbgfs_dir) { } #endif /* CONFIG_IWLWIFI_DEBUGFS */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h index 65faecf552cd..9b5077bd46c3 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h @@ -8,7 +8,7 @@ * Copyright(c) 2014 Intel Corporation. All rights reserved. * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 Intel Corporation + * Copyright (C) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2014 Intel Corporation. All rights reserved. * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 Intel Corporation + * Copyright (C) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -180,6 +180,8 @@ enum iwl_fw_error_dump_family { IWL_FW_ERROR_DUMP_FAMILY_8 = 8, }; +#define MAX_NUM_LMAC 2 + /** * struct iwl_fw_error_dump_info - info on the device / firmware * @device_family: the family of the device (7 / 8) @@ -187,7 +189,10 @@ enum iwl_fw_error_dump_family { * @fw_human_readable: human readable FW version * @dev_human_readable: name of the device * @bus_human_readable: name of the bus used - * @rt_status: the error_id/rt_status that that triggered the latest dump + * @num_of_lmacs: the number of lmacs + * @lmac_err_id: the lmac 0/1 error_id/rt_status that triggered the latest dump + * if the dump collection was not initiated by an assert, the value is 0 + * @umac_err_id: the umac error_id/rt_status that triggered the latest dump * if the dump collection was not initiated by an assert, the value is 0 */ struct iwl_fw_error_dump_info { @@ -196,7 +201,9 @@ struct iwl_fw_error_dump_info { u8 fw_human_readable[FW_VER_HUMAN_READABLE_SZ]; u8 dev_human_readable[64]; u8 bus_human_readable[8]; - __le32 rt_status; + u8 num_of_lmacs; + __le32 umac_err_id; + __le32 lmac_err_id[MAX_NUM_LMAC]; } __packed; /** @@ -268,21 +275,67 @@ struct iwl_fw_error_dump_mem { }; /** - * struct iwl_fw_error_dump_named_mem - chunk of memory - * @type: &enum iwl_fw_error_dump_mem_type - * @offset: the offset from which the memory was read - * @name_len: name length - * @name: file name - * @data: the content of the memory + * struct iwl_fw_ini_error_dump_range - range of memory + * @start_addr: the start address of this range + * @range_data_size: the size of this range, in bytes + * @data: the actual memory */ -struct iwl_fw_error_dump_named_mem { - __le32 type; - __le32 offset; - u8 name_len; - u8 name[32]; - u8 data[]; +struct iwl_fw_ini_error_dump_range { + __le32 start_addr; + __le32 range_data_size; + __le32 data[]; +} __packed; + +/** + * struct iwl_fw_ini_error_dump_header - ini region dump header + * @num_of_ranges: number of ranges in this region + * @name_len: number of bytes allocated to the name string of this region + * @name: name of the region + */ +struct iwl_fw_ini_error_dump_header { + __le32 num_of_ranges; + __le32 name_len; + u8 name[IWL_FW_INI_MAX_NAME]; }; +/** + * struct iwl_fw_ini_error_dump - ini region dump + * @header: the header of this region + * @ranges: the memory ranges of this region + */ +struct iwl_fw_ini_error_dump { + struct iwl_fw_ini_error_dump_header header; + struct iwl_fw_ini_error_dump_range ranges[]; +} __packed; + +/* This bit is used to differentiate between lmac and umac rxf */ +#define IWL_RXF_UMAC_BIT BIT(31) + +/** + * struct iwl_fw_ini_fifo_error_dump_range - ini fifo range dump + * @fifo_num: the fifo num. In case of rxf and umac rxf, set BIT(31) to + * distinguish between lmac and umac + * @num_of_registers: num of registers to dump, dword size each + * @range_data_size: the size of the registers and fifo data + * @data: fifo data + */ +struct iwl_fw_ini_fifo_error_dump_range { + __le32 fifo_num; + __le32 num_of_registers; + __le32 range_data_size; + __le32 data[]; +} __packed; + +/** + * struct iwl_fw_ini_fifo_error_dump - ini fifo region dump + * @header: the header of this region + * @ranges: the memory ranges of this region + */ +struct iwl_fw_ini_fifo_error_dump { + struct iwl_fw_ini_error_dump_header header; + struct iwl_fw_ini_fifo_error_dump_range ranges[]; +} __packed; + /** * struct iwl_fw_error_dump_rb - content of an Receive Buffer * @index: the index of the Receive Buffer in the Rx queue @@ -297,6 +350,20 @@ struct iwl_fw_error_dump_rb { u8 data[]; }; +/** + * struct iwl_fw_ini_monitor_dram_dump - ini dram monitor dump + * @header - header of the region + * @write_ptr - write pointer position in the dram + * @cycle_cnt - cycles count + * @ranges - the memory ranges of this this region + */ +struct iwl_fw_ini_monitor_dram_dump { + struct iwl_fw_ini_error_dump_header header; + __le32 write_ptr; + __le32 cycle_cnt; + struct iwl_fw_ini_error_dump_range ranges[]; +} __packed; + /** * struct iwl_fw_error_dump_paging - content of the UMAC's image page * block on DRAM @@ -348,7 +415,9 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data) * @FW_DBG_TDLS: trigger log collection upon TDLS related events. * @FW_DBG_TRIGGER_TX_STATUS: trigger log collection upon tx status when * the firmware sends a tx reply. - * @FW_DBG_TRIGGER_NO_ALIVE: trigger log collection if alive flow fails + * @FW_DBG_TRIGGER_ALIVE_TIMEOUT: trigger log collection if alive flow timeouts + * @FW_DBG_TRIGGER_DRIVER: trigger log collection upon a flow failure + * in the driver. */ enum iwl_fw_dbg_trigger { FW_DBG_TRIGGER_INVALID = 0, @@ -366,7 +435,8 @@ enum iwl_fw_dbg_trigger { FW_DBG_TRIGGER_TX_LATENCY, FW_DBG_TRIGGER_TDLS, FW_DBG_TRIGGER_TX_STATUS, - FW_DBG_TRIGGER_NO_ALIVE, + FW_DBG_TRIGGER_ALIVE_TIMEOUT, + FW_DBG_TRIGGER_DRIVER, /* must be last */ FW_DBG_TRIGGER_MAX, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index 81f557c0b58d..641c95d03b15 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -9,6 +9,7 @@ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,6 +33,7 @@ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -143,6 +145,9 @@ enum iwl_ucode_tlv_type { IWL_UCODE_TLV_FW_GSCAN_CAPA = 50, IWL_UCODE_TLV_FW_MEM_SEG = 51, IWL_UCODE_TLV_IML = 52, + IWL_UCODE_TLV_UMAC_DEBUG_ADDRS = 54, + IWL_UCODE_TLV_LMAC_DEBUG_ADDRS = 55, + IWL_UCODE_TLV_FW_RECOVERY_INFO = 57, IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION = IWL_UCODE_INI_TLV_GROUP | 0x1, IWL_UCODE_TLV_TYPE_HCMD = IWL_UCODE_INI_TLV_GROUP | 0x2, IWL_UCODE_TLV_TYPE_REGIONS = IWL_UCODE_INI_TLV_GROUP | 0x3, @@ -263,6 +268,12 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t; * @IWL_UCODE_TLV_API_FRAG_EBS: This ucode supports fragmented EBS * @IWL_UCODE_TLV_API_REDUCE_TX_POWER: This ucode supports v5 of * the REDUCE_TX_POWER_CMD. + * @IWL_UCODE_TLV_API_SHORT_BEACON_NOTIF: This ucode supports the short + * version of the beacon notification. + * @IWL_UCODE_TLV_API_BEACON_FILTER_V4: This ucode supports v4 of + * BEACON_FILTER_CONFIG_API_S_VER_4. + * @IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ: This ucode supports v7 of + * LOCATION_RANGE_REQ_CMD_API_S and v6 of LOCATION_RANGE_RESP_NTFY_API_S. * * @NUM_IWL_UCODE_TLV_API: number of bits used */ @@ -287,6 +298,9 @@ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_ADAPTIVE_DWELL_V2 = (__force iwl_ucode_tlv_api_t)42, IWL_UCODE_TLV_API_FRAG_EBS = (__force iwl_ucode_tlv_api_t)44, IWL_UCODE_TLV_API_REDUCE_TX_POWER = (__force iwl_ucode_tlv_api_t)45, + IWL_UCODE_TLV_API_SHORT_BEACON_NOTIF = (__force iwl_ucode_tlv_api_t)46, + IWL_UCODE_TLV_API_BEACON_FILTER_V4 = (__force iwl_ucode_tlv_api_t)47, + IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ = (__force iwl_ucode_tlv_api_t)49, NUM_IWL_UCODE_TLV_API #ifdef __CHECKER__ @@ -303,7 +317,6 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t; * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT: supports Location Aware Regulatory * @IWL_UCODE_TLV_CAPA_UMAC_SCAN: supports UMAC scan. * @IWL_UCODE_TLV_CAPA_BEAMFORMER: supports Beamformer - * @IWL_UCODE_TLV_CAPA_TOF_SUPPORT: supports Time of Flight (802.11mc FTM) * @IWL_UCODE_TLV_CAPA_TDLS_SUPPORT: support basic TDLS functionality * @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current * tx power value into TPC Report action frame and Link Measurement Report @@ -334,6 +347,9 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t; * @IWL_UCODE_TLV_CAPA_TLC_OFFLOAD: firmware implements rate scaling algorithm * @IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA: firmware implements quota related * @IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2: firmware implements Coex Schema 2 + * IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD: firmware supports CSA command + * @IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS: firmware supports ultra high band + * (6 GHz). * @IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement * @IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts * @IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT @@ -357,19 +373,24 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t; * @IWL_UCODE_TLV_CAPA_TX_POWER_ACK: reduced TX power API has larger * command size (command version 4) that supports toggling ACK TX * power reduction. - * @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload * @IWL_UCODE_TLV_CAPA_D3_DEBUG: supports debug recording during D3 * @IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT: MCC response support 11ax * capability. + * @IWL_UCODE_TLV_CAPA_CSI_REPORTING: firmware is capable of being configured + * to report the CSI information with (certain) RX frames + * @IWL_UCODE_TLV_CAPA_FTM_CALIBRATED: has FTM calibrated and thus supports both + * initiator and responder + * + * @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload * * @NUM_IWL_UCODE_TLV_CAPA: number of bits used */ enum iwl_ucode_tlv_capa { + /* set 0 */ IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = (__force iwl_ucode_tlv_capa_t)0, IWL_UCODE_TLV_CAPA_LAR_SUPPORT = (__force iwl_ucode_tlv_capa_t)1, IWL_UCODE_TLV_CAPA_UMAC_SCAN = (__force iwl_ucode_tlv_capa_t)2, IWL_UCODE_TLV_CAPA_BEAMFORMER = (__force iwl_ucode_tlv_capa_t)3, - IWL_UCODE_TLV_CAPA_TOF_SUPPORT = (__force iwl_ucode_tlv_capa_t)5, IWL_UCODE_TLV_CAPA_TDLS_SUPPORT = (__force iwl_ucode_tlv_capa_t)6, IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = (__force iwl_ucode_tlv_capa_t)8, IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = (__force iwl_ucode_tlv_capa_t)9, @@ -387,6 +408,8 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = (__force iwl_ucode_tlv_capa_t)29, IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)31, + + /* set 1 */ IWL_UCODE_TLV_CAPA_STA_PM_NOTIF = (__force iwl_ucode_tlv_capa_t)38, IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)39, IWL_UCODE_TLV_CAPA_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)40, @@ -394,6 +417,11 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_TLC_OFFLOAD = (__force iwl_ucode_tlv_capa_t)43, IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA = (__force iwl_ucode_tlv_capa_t)44, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2 = (__force iwl_ucode_tlv_capa_t)45, + IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD = (__force iwl_ucode_tlv_capa_t)46, + IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS = (__force iwl_ucode_tlv_capa_t)48, + IWL_UCODE_TLV_CAPA_FTM_CALIBRATED = (__force iwl_ucode_tlv_capa_t)47, + + /* set 2 */ IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64, IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65, IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT = (__force iwl_ucode_tlv_capa_t)67, @@ -412,6 +440,9 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_D3_DEBUG = (__force iwl_ucode_tlv_capa_t)87, IWL_UCODE_TLV_CAPA_LED_CMD_SUPPORT = (__force iwl_ucode_tlv_capa_t)88, IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT = (__force iwl_ucode_tlv_capa_t)89, + IWL_UCODE_TLV_CAPA_CSI_REPORTING = (__force iwl_ucode_tlv_capa_t)90, + + /* set 3 */ IWL_UCODE_TLV_CAPA_MLME_OFFLOAD = (__force iwl_ucode_tlv_capa_t)96, NUM_IWL_UCODE_TLV_CAPA diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h index 12333167ea23..f4c5a4d73206 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/img.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h @@ -105,6 +105,8 @@ struct iwl_ucode_capabilities { u32 n_scan_channels; u32 standard_phy_calibration_size; u32 flags; + u32 error_log_addr; + u32 error_log_size; unsigned long _api[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_API)]; unsigned long _capa[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_CAPA)]; }; @@ -222,28 +224,25 @@ struct iwl_fw_dbg { u32 dump_mask; }; +/** + * @tlv: the buffer allocation tlv + * @is_alloc: indicates if the buffer was already allocated + */ +struct iwl_fw_ini_allocation_data { + struct iwl_fw_ini_allocation_tlv tlv; + u32 is_alloc; +} __packed; + /** * struct iwl_fw_ini_active_triggers * @active: is this trigger active - * @apply_point: last apply point that updated this trigger - * @conf: active trigger - * @conf_ext: second trigger, contains extra regions to dump + * @size: allocated memory size of the trigger + * @trig: trigger */ struct iwl_fw_ini_active_triggers { bool active; - enum iwl_fw_ini_apply_point apply_point; - struct iwl_fw_ini_trigger *conf; - struct iwl_fw_ini_trigger *conf_ext; -}; - -/** - * struct iwl_fw_ini_active_regs - * @reg: active region from TLV - * @apply_point: apply point where it became active - */ -struct iwl_fw_ini_active_regs { - struct iwl_fw_ini_region_cfg *reg; - enum iwl_fw_ini_apply_point apply_point; + size_t size; + struct iwl_fw_ini_trigger *trig; }; /** diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c index 2efac307909e..7adf4e4e841a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/init.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -26,6 +27,7 @@ * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -74,6 +76,7 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans, fwrt->ops_ctx = ops_ctx; INIT_DELAYED_WORK(&fwrt->dump.wk, iwl_fw_error_dump_wk); iwl_fwrt_dbgfs_register(fwrt, dbgfs_dir); + init_waitqueue_head(&fwrt->trans->fw_halt_waitq); } IWL_EXPORT_SYMBOL(iwl_fw_runtime_init); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index 4f7090f88cb0..a5fe1a8ca426 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright (C) 2018-2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -27,7 +27,7 @@ * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright (C) 2018-2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -73,6 +73,7 @@ struct iwl_fw_runtime_ops { void (*dump_end)(void *ctx); bool (*fw_running)(void *ctx); int (*send_hcmd)(void *ctx, struct iwl_host_cmd *host_cmd); + bool (*d3_debug_enable)(void *ctx); }; #define MAX_NUM_LMAC 2 @@ -90,7 +91,6 @@ struct iwl_fwrt_shared_mem_cfg { enum iwl_fw_runtime_status { IWL_FWRT_STATUS_DUMPING = 0, - IWL_FWRT_STATUS_WAIT_ALIVE, }; /** @@ -138,11 +138,13 @@ struct iwl_fw_runtime { u8 conf; /* ts of the beginning of a non-collect fw dbg data period */ - unsigned long non_collect_ts_start[IWL_FW_TRIGGER_ID_NUM - 1]; + unsigned long non_collect_ts_start[IWL_FW_TRIGGER_ID_NUM]; u32 *d3_debug_data; - struct iwl_fw_ini_active_regs active_regs[IWL_FW_INI_MAX_REGION_ID]; + struct iwl_fw_ini_region_cfg *active_regs[IWL_FW_INI_MAX_REGION_ID]; struct iwl_fw_ini_active_triggers active_trigs[IWL_FW_TRIGGER_ID_NUM]; - u32 rt_status; + u32 lmac_err_id[MAX_NUM_LMAC]; + u32 umac_err_id; + void *fifo_iter; } dump; #ifdef CONFIG_IWLWIFI_DEBUGFS struct { @@ -160,8 +162,20 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans, static inline void iwl_fw_runtime_free(struct iwl_fw_runtime *fwrt) { + int i; + kfree(fwrt->dump.d3_debug_data); fwrt->dump.d3_debug_data = NULL; + + for (i = 0; i < IWL_FW_TRIGGER_ID_NUM; i++) { + struct iwl_fw_ini_active_triggers *active = + &fwrt->dump.active_trigs[i]; + + active->active = false; + active->size = 0; + kfree(active->trig); + active->trig = NULL; + } } void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 91861a9cbe57..f5f87773667b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -7,7 +7,7 @@ * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright (C) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -29,7 +29,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright (C) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -89,6 +89,7 @@ enum iwl_device_family { IWL_DEVICE_FAMILY_9000, IWL_DEVICE_FAMILY_22000, IWL_DEVICE_FAMILY_22560, + IWL_DEVICE_FAMILY_AX210, }; /* @@ -335,10 +336,6 @@ struct iwl_csr_params { * @fw_name_pre: Firmware filename prefix. The api version and extension * (.ucode) will be added to filename before loading from disk. The * filename is constructed as fw_name_pre.ucode. - * @fw_name_pre_b_or_c_step: same as @fw_name_pre, only for b or c steps - * (if supported) - * @fw_name_pre_rf_next_step: same as @fw_name_pre_b_or_c_step, only for rf - * next step. Supported only in integrated solutions. * @ucode_api_max: Highest version of uCode API supported by driver. * @ucode_api_min: Lowest version of uCode API supported by driver. * @max_inst_size: The maximal length of the fw inst section (only DVM) @@ -383,6 +380,9 @@ struct iwl_csr_params { * @nvm_type: see &enum iwl_nvm_type * @d3_debug_data_base_addr: base address where D3 debug data is stored * @d3_debug_data_length: length of the D3 debug data + * @bisr_workaround: BISR hardware workaround (for 22260 series devices) + * @min_txq_size: minimum number of slots required in a TX queue + * @umac_prph_offset: offset to add to UMAC periphery address * * We enable the driver to be backward compatible wrt. hardware features. * API differences in uCode shouldn't be handled here but through TLVs @@ -392,8 +392,6 @@ struct iwl_cfg { /* params specific to an individual device within a device family */ const char *name; const char *fw_name_pre; - const char *fw_name_pre_b_or_c_step; - const char *fw_name_pre_rf_next_step; /* params not likely to change within a device family */ const struct iwl_base_params *base_params; /* params likely to change within a device family */ @@ -434,7 +432,8 @@ struct iwl_cfg { use_tfh:1, gen2:1, cdb:1, - dbgc_supported:1; + dbgc_supported:1, + bisr_workaround:1; u8 valid_tx_ant; u8 valid_rx_ant; u8 non_shared_ant; @@ -449,37 +448,12 @@ struct iwl_cfg { u32 extra_phy_cfg_flags; u32 d3_debug_data_base_addr; u32 d3_debug_data_length; + u32 min_txq_size; + u32 umac_prph_offset; }; -static const struct iwl_csr_params iwl_csr_v1 = { - .flag_mac_clock_ready = 0, - .flag_val_mac_access_en = 0, - .flag_init_done = 2, - .flag_mac_access_req = 3, - .flag_sw_reset = 7, - .flag_master_dis = 8, - .flag_stop_master = 9, - .addr_sw_reset = (CSR_BASE + 0x020), - .mac_addr0_otp = 0x380, - .mac_addr1_otp = 0x384, - .mac_addr0_strap = 0x388, - .mac_addr1_strap = 0x38C -}; - -static const struct iwl_csr_params iwl_csr_v2 = { - .flag_init_done = 6, - .flag_mac_clock_ready = 20, - .flag_val_mac_access_en = 20, - .flag_mac_access_req = 21, - .flag_master_dis = 28, - .flag_stop_master = 29, - .flag_sw_reset = 31, - .addr_sw_reset = (CSR_BASE + 0x024), - .mac_addr0_otp = 0x30, - .mac_addr1_otp = 0x34, - .mac_addr0_strap = 0x38, - .mac_addr1_strap = 0x3C -}; +extern const struct iwl_csr_params iwl_csr_v1; +extern const struct iwl_csr_params iwl_csr_v2; /* * This list declares the config structures for all devices. @@ -551,38 +525,53 @@ extern const struct iwl_cfg iwl8275_2ac_cfg; extern const struct iwl_cfg iwl4165_2ac_cfg; extern const struct iwl_cfg iwl9160_2ac_cfg; extern const struct iwl_cfg iwl9260_2ac_cfg; +extern const struct iwl_cfg iwl9260_2ac_160_cfg; extern const struct iwl_cfg iwl9260_killer_2ac_cfg; extern const struct iwl_cfg iwl9270_2ac_cfg; extern const struct iwl_cfg iwl9460_2ac_cfg; extern const struct iwl_cfg iwl9560_2ac_cfg; +extern const struct iwl_cfg iwl9560_2ac_160_cfg; extern const struct iwl_cfg iwl9460_2ac_cfg_soc; extern const struct iwl_cfg iwl9461_2ac_cfg_soc; extern const struct iwl_cfg iwl9462_2ac_cfg_soc; extern const struct iwl_cfg iwl9560_2ac_cfg_soc; +extern const struct iwl_cfg iwl9560_2ac_160_cfg_soc; extern const struct iwl_cfg iwl9560_killer_2ac_cfg_soc; extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc; extern const struct iwl_cfg iwl9460_2ac_cfg_shared_clk; extern const struct iwl_cfg iwl9461_2ac_cfg_shared_clk; extern const struct iwl_cfg iwl9462_2ac_cfg_shared_clk; extern const struct iwl_cfg iwl9560_2ac_cfg_shared_clk; +extern const struct iwl_cfg iwl9560_2ac_160_cfg_shared_clk; extern const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk; extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk; extern const struct iwl_cfg iwl22000_2ac_cfg_hr; extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb; extern const struct iwl_cfg iwl22000_2ac_cfg_jf; +extern const struct iwl_cfg iwl_ax101_cfg_qu_hr; extern const struct iwl_cfg iwl22000_2ax_cfg_hr; +extern const struct iwl_cfg iwl22260_2ax_cfg; +extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0; +extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0; +extern const struct iwl_cfg killer1650x_2ax_cfg; +extern const struct iwl_cfg killer1650w_2ax_cfg; extern const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0; extern const struct iwl_cfg iwl9462_2ac_cfg_qu_b0_jf_b0; extern const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0; +extern const struct iwl_cfg iwl9560_2ac_160_cfg_qu_b0_jf_b0; extern const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0; extern const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0; extern const struct iwl_cfg iwl22000_2ax_cfg_jf; extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0; extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0_f0; extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0; -extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0; +extern const struct iwl_cfg iwl9560_2ac_cfg_qnj_jf_b0; extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0; extern const struct iwl_cfg iwl22560_2ax_cfg_su_cdb; +extern const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0; +extern const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0; +extern const struct iwl_cfg iwlax210_2ax_cfg_so_gf_a0; +extern const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0; #endif /* CPTCFG_IWLMVM || CPTCFG_IWLFMAC */ #endif /* __IWL_CONFIG_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index caa5806acd81..aea6d03e545a 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -180,6 +180,7 @@ /* Bits for CSR_HW_IF_CONFIG_REG */ #define CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH (0x00000003) #define CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP (0x0000000C) +#define CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM (0x00000080) #define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x000000C0) #define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100) #define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200) @@ -325,12 +326,16 @@ enum { #define CSR_HW_REV_TYPE_7265D (0x0000210) #define CSR_HW_REV_TYPE_NONE (0x00001F0) #define CSR_HW_REV_TYPE_QNJ (0x0000360) +#define CSR_HW_REV_TYPE_QNJ_B0 (0x0000364) #define CSR_HW_REV_TYPE_HR_CDB (0x0000340) +#define CSR_HW_REV_TYPE_SO (0x0000370) +#define CSR_HW_REV_TYPE_TY (0x0000420) /* RF_ID value */ #define CSR_HW_RF_ID_TYPE_JF (0x00105100) #define CSR_HW_RF_ID_TYPE_HR (0x0010A000) #define CSR_HW_RF_ID_TYPE_HRCDB (0x00109F00) +#define CSR_HW_RF_ID_TYPE_GF (0x0010D000) /* HW_RF CHIP ID */ #define CSR_HW_RF_ID_TYPE_CHIP_ID(_val) (((_val) >> 12) & 0xFFF) @@ -592,6 +597,7 @@ enum msix_hw_int_causes { MSIX_HW_INT_CAUSES_REG_ALIVE = BIT(0), MSIX_HW_INT_CAUSES_REG_WAKEUP = BIT(1), MSIX_HW_INT_CAUSES_REG_IPC = BIT(1), + MSIX_HW_INT_CAUSES_REG_IML = BIT(2), MSIX_HW_INT_CAUSES_REG_SW_ERR_V2 = BIT(5), MSIX_HW_INT_CAUSES_REG_CT_KILL = BIT(6), MSIX_HW_INT_CAUSES_REG_RF_KILL = BIT(7), diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c index 43d815cb3ce9..5798f434f68f 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c @@ -71,6 +71,7 @@ void iwl_fw_dbg_copy_tlv(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv, u32 apply_point = le32_to_cpu(header->apply_point); int copy_size = le32_to_cpu(tlv->length) + sizeof(*tlv); + int offset_size = copy_size; if (WARN_ONCE(apply_point >= IWL_FW_INI_APPLY_NUM, "Invalid apply point id %d\n", apply_point)) @@ -81,17 +82,25 @@ void iwl_fw_dbg_copy_tlv(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv, else data = &trans->apply_points[apply_point]; + /* add room for is_alloc field in &iwl_fw_ini_allocation_data struct */ + if (le32_to_cpu(tlv->type) == IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION) { + struct iwl_fw_ini_allocation_data *buf_alloc = + (void *)tlv->data; + + offset_size += sizeof(buf_alloc->is_alloc); + } + /* * Make sure we still have room to copy this TLV. Offset points to the * location the last copy ended. */ - if (WARN_ONCE(data->offset + copy_size > data->size, + if (WARN_ONCE(data->offset + offset_size > data->size, "Not enough memory for apply point %d\n", apply_point)) return; memcpy(data->data + data->offset, (void *)tlv, copy_size); - data->offset += copy_size; + data->offset += offset_size; } void iwl_alloc_dbg_tlv(struct iwl_trans *trans, size_t len, const u8 *data, @@ -129,6 +138,16 @@ void iwl_alloc_dbg_tlv(struct iwl_trans *trans, size_t len, const u8 *data, if (WARN_ON(apply >= IWL_FW_INI_APPLY_NUM)) continue; + /* add room for is_alloc field in &iwl_fw_ini_allocation_data + * struct + */ + if (tlv_type == IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION) { + struct iwl_fw_ini_allocation_data *buf_alloc = + (void *)tlv->data; + + size[apply] += sizeof(buf_alloc->is_alloc); + } + size[apply] += sizeof(*tlv) + tlv_len; } diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h index a2af68a0d34b..655ff5694560 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h @@ -1,6 +1,7 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * Copyright (C) 2018 Intel Corporation * * Portions of this file are derived from the ipw3945 project. * @@ -159,7 +160,7 @@ do { \ /* 0x000F0000 - 0x00010000 */ #define IWL_DL_FW 0x00010000 #define IWL_DL_RF_KILL 0x00020000 -#define IWL_DL_FW_ERRORS 0x00040000 +#define IWL_DL_TPT 0x00040000 /* 0x00F00000 - 0x00100000 */ #define IWL_DL_RATE 0x00100000 #define IWL_DL_CALIB 0x00200000 @@ -193,7 +194,6 @@ do { \ #define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a) #define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a) #define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a) -#define IWL_DEBUG_FW_ERRORS(p, f, a...) IWL_DEBUG(p, IWL_DL_FW_ERRORS, f, ## a) #define IWL_DEBUG_DROP(p, f, a...) IWL_DEBUG(p, IWL_DL_DROP, f, ## a) #define IWL_DEBUG_DROP_LIMIT(p, f, a...) \ IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a) @@ -215,6 +215,7 @@ do { \ #define IWL_DEBUG_DEV_RADIO(p, f, a...) IWL_DEBUG_DEV(p, IWL_DL_RADIO, f, ## a) #define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a) #define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a) +#define IWL_DEBUG_TPT(p, f, a...) IWL_DEBUG(p, IWL_DL_TPT, f, ## a) #define IWL_DEBUG_RPM(p, f, a...) IWL_DEBUG(p, IWL_DL_RPM, f, ## a) #define IWL_DEBUG_LAR(p, f, a...) IWL_DEBUG(p, IWL_DL_LAR, f, ## a) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index bf1be985f36b..689a65b11cc3 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -8,6 +8,7 @@ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,6 +31,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -75,6 +77,7 @@ #include "iwl-dbg-tlv.h" #include "iwl-config.h" #include "iwl-modparams.h" +#include "fw/api/alive.h" /****************************************************************************** * @@ -210,18 +213,15 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first) { const struct iwl_cfg *cfg = drv->trans->cfg; char tag[8]; - const char *fw_pre_name; if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_9000 && - (CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_B_STEP || - CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_C_STEP)) - fw_pre_name = cfg->fw_name_pre_b_or_c_step; - else if (drv->trans->cfg->integrated && - CSR_HW_RFID_STEP(drv->trans->hw_rf_id) == SILICON_B_STEP && - cfg->fw_name_pre_rf_next_step) - fw_pre_name = cfg->fw_name_pre_rf_next_step; - else - fw_pre_name = cfg->fw_name_pre; + (CSR_HW_REV_STEP(drv->trans->hw_rev) != SILICON_B_STEP && + CSR_HW_REV_STEP(drv->trans->hw_rev) != SILICON_C_STEP)) { + IWL_ERR(drv, + "Only HW steps B and C are currently supported (0x%0x)\n", + drv->trans->hw_rev); + return -EINVAL; + } if (first) { drv->fw_index = cfg->ucode_api_max; @@ -235,15 +235,13 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first) IWL_ERR(drv, "no suitable firmware found!\n"); if (cfg->ucode_api_min == cfg->ucode_api_max) { - IWL_ERR(drv, "%s%d is required\n", fw_pre_name, + IWL_ERR(drv, "%s%d is required\n", cfg->fw_name_pre, cfg->ucode_api_max); } else { IWL_ERR(drv, "minimum version required: %s%d\n", - fw_pre_name, - cfg->ucode_api_min); + cfg->fw_name_pre, cfg->ucode_api_min); IWL_ERR(drv, "maximum version supported: %s%d\n", - fw_pre_name, - cfg->ucode_api_max); + cfg->fw_name_pre, cfg->ucode_api_max); } IWL_ERR(drv, @@ -252,7 +250,7 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first) } snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode", - fw_pre_name, tag); + cfg->fw_name_pre, tag); IWL_DEBUG_INFO(drv, "attempting to load firmware '%s'\n", drv->firmware_name); @@ -593,6 +591,8 @@ static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv, return 0; } +#define FW_ADDR_CACHE_CONTROL 0xC0000000 + static int iwl_parse_tlv_firmware(struct iwl_drv *drv, const struct firmware *ucode_raw, struct iwl_firmware_pieces *pieces, @@ -1090,6 +1090,52 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, return -ENOMEM; break; } + case IWL_UCODE_TLV_FW_RECOVERY_INFO: { + struct { + __le32 buf_addr; + __le32 buf_size; + } *recov_info = (void *)tlv_data; + + if (tlv_len != sizeof(*recov_info)) + goto invalid_tlv_len; + capa->error_log_addr = + le32_to_cpu(recov_info->buf_addr); + capa->error_log_size = + le32_to_cpu(recov_info->buf_size); + } + break; + case IWL_UCODE_TLV_UMAC_DEBUG_ADDRS: { + struct iwl_umac_debug_addrs *dbg_ptrs = + (void *)tlv_data; + + if (tlv_len != sizeof(*dbg_ptrs)) + goto invalid_tlv_len; + if (drv->trans->cfg->device_family < + IWL_DEVICE_FAMILY_22000) + break; + drv->trans->umac_error_event_table = + le32_to_cpu(dbg_ptrs->error_info_addr) & + ~FW_ADDR_CACHE_CONTROL; + drv->trans->error_event_table_tlv_status |= + IWL_ERROR_EVENT_TABLE_UMAC; + break; + } + case IWL_UCODE_TLV_LMAC_DEBUG_ADDRS: { + struct iwl_lmac_debug_addrs *dbg_ptrs = + (void *)tlv_data; + + if (tlv_len != sizeof(*dbg_ptrs)) + goto invalid_tlv_len; + if (drv->trans->cfg->device_family < + IWL_DEVICE_FAMILY_22000) + break; + drv->trans->lmac_error_event_table[0] = + le32_to_cpu(dbg_ptrs->error_event_table_ptr) & + ~FW_ADDR_CACHE_CONTROL; + drv->trans->error_event_table_tlv_status |= + IWL_ERROR_EVENT_TABLE_LMAC1; + break; + } case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION: case IWL_UCODE_TLV_TYPE_HCMD: case IWL_UCODE_TLV_TYPE_REGIONS: @@ -1207,11 +1253,6 @@ _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op) #ifdef CONFIG_IWLWIFI_DEBUGFS drv->dbgfs_op_mode = debugfs_create_dir(op->name, drv->dbgfs_drv); - if (!drv->dbgfs_op_mode) { - IWL_ERR(drv, - "failed to create opmode debugfs directory\n"); - return op_mode; - } dbgfs_dir = drv->dbgfs_op_mode; #endif @@ -1267,8 +1308,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) fw->ucode_capa.standard_phy_calibration_size = IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE; fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS; - /* dump all fw memory areas by default except d3 debug data */ - fw->dbg.dump_mask = 0xfffdffff; + /* dump all fw memory areas by default */ + fw->dbg.dump_mask = 0xffffffff; pieces = kzalloc(sizeof(*pieces), GFP_KERNEL); if (!pieces) @@ -1574,20 +1615,8 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans) drv->dbgfs_drv = debugfs_create_dir(dev_name(trans->dev), iwl_dbgfs_root); - if (!drv->dbgfs_drv) { - IWL_ERR(drv, "failed to create debugfs directory\n"); - ret = -ENOMEM; - goto err_free_tlv; - } - /* Create transport layer debugfs dir */ drv->trans->dbgfs_dir = debugfs_create_dir("trans", drv->dbgfs_drv); - - if (!drv->trans->dbgfs_dir) { - IWL_ERR(drv, "failed to create transport debugfs directory\n"); - ret = -ENOMEM; - goto err_free_dbgfs; - } #endif ret = iwl_request_firmware(drv, true); @@ -1600,9 +1629,7 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans) err_fw: #ifdef CONFIG_IWLWIFI_DEBUGFS -err_free_dbgfs: debugfs_remove_recursive(drv->dbgfs_drv); -err_free_tlv: iwl_fw_dbg_free(drv->trans); #endif kfree(drv); @@ -1713,9 +1740,6 @@ static int __init iwl_drv_init(void) #ifdef CONFIG_IWLWIFI_DEBUGFS /* Create the root of iwlwifi debugfs subsystem. */ iwl_dbgfs_root = debugfs_create_dir(DRV_NAME, NULL); - - if (!iwl_dbgfs_root) - return -EFAULT; #endif return iwl_pci_register_driver(); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c index 75940ac406b9..04338c3a6205 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c @@ -850,8 +850,7 @@ iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg, if (WARN_ON(!cfg || !cfg->eeprom_params)) return NULL; - data = kzalloc(sizeof(*data) + - sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS, + data = kzalloc(struct_size(data, channels, IWL_NUM_CHANNELS), GFP_KERNEL); if (!data) return NULL; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c index a6db6a814257..82e87192119e 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c @@ -193,34 +193,25 @@ static int iwl_init_otp_access(struct iwl_trans *trans) { int ret; - /* Enable 40MHz radio clock */ - iwl_write32(trans, CSR_GP_CNTRL, - iwl_read32(trans, CSR_GP_CNTRL) | - BIT(trans->cfg->csr->flag_init_done)); - - /* wait for clock to be ready */ - ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - BIT(trans->cfg->csr->flag_mac_clock_ready), - BIT(trans->cfg->csr->flag_mac_clock_ready), - 25000); - if (ret < 0) { - IWL_ERR(trans, "Time out access OTP\n"); - } else { - iwl_set_bits_prph(trans, APMG_PS_CTRL_REG, - APMG_PS_CTRL_VAL_RESET_REQ); - udelay(5); - iwl_clear_bits_prph(trans, APMG_PS_CTRL_REG, - APMG_PS_CTRL_VAL_RESET_REQ); - - /* - * CSR auto clock gate disable bit - - * this is only applicable for HW with OTP shadow RAM - */ - if (trans->cfg->base_params->shadow_ram_support) - iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, - CSR_RESET_LINK_PWR_MGMT_DISABLED); - } - return ret; + ret = iwl_finish_nic_init(trans); + if (ret) + return ret; + + iwl_set_bits_prph(trans, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_RESET_REQ); + udelay(5); + iwl_clear_bits_prph(trans, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_RESET_REQ); + + /* + * CSR auto clock gate disable bit - + * this is only applicable for HW with OTP shadow RAM + */ + if (trans->cfg->base_params->shadow_ram_support) + iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, + CSR_RESET_LINK_PWR_MGMT_DISABLED); + + return 0; } static int iwl_read_otp_word(struct iwl_trans *trans, u16 addr, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c index 4f10914f6048..a704e25af810 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c @@ -1,9 +1,13 @@ /****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 - 2016 Intel Deutschland GmbH - * - * Portions of this file are derived from the ipw3945 project. + * Copyright(C) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -15,12 +19,45 @@ * more details. * * The full GNU General Public License is included in this distribution in the - * file called LICENSE. + * file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * + * BSD LICENSE + * + * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH + * Copyright (C) 2018 - 2019 Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * *****************************************************************************/ #include #include @@ -33,6 +70,36 @@ #include "iwl-prph.h" #include "iwl-fh.h" +const struct iwl_csr_params iwl_csr_v1 = { + .flag_mac_clock_ready = 0, + .flag_val_mac_access_en = 0, + .flag_init_done = 2, + .flag_mac_access_req = 3, + .flag_sw_reset = 7, + .flag_master_dis = 8, + .flag_stop_master = 9, + .addr_sw_reset = CSR_BASE + 0x020, + .mac_addr0_otp = 0x380, + .mac_addr1_otp = 0x384, + .mac_addr0_strap = 0x388, + .mac_addr1_strap = 0x38C +}; + +const struct iwl_csr_params iwl_csr_v2 = { + .flag_init_done = 6, + .flag_mac_clock_ready = 20, + .flag_val_mac_access_en = 20, + .flag_mac_access_req = 21, + .flag_master_dis = 28, + .flag_stop_master = 29, + .flag_sw_reset = 31, + .addr_sw_reset = CSR_BASE + 0x024, + .mac_addr0_otp = 0x30, + .mac_addr1_otp = 0x34, + .mac_addr0_strap = 0x38, + .mac_addr1_strap = 0x3C +}; + void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val) { trace_iwlwifi_dev_iowrite8(trans->dev, ofs, val); @@ -240,9 +307,12 @@ void iwl_force_nmi(struct iwl_trans *trans) if (trans->cfg->device_family < IWL_DEVICE_FAMILY_9000) iwl_write_prph(trans, DEVICE_SET_NMI_REG, DEVICE_SET_NMI_VAL_DRV); + else if (trans->cfg->device_family < IWL_DEVICE_FAMILY_AX210) + iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER, + UREG_NIC_SET_NMI_DRIVER_NMI_FROM_DRIVER_MSK); else - iwl_write_prph(trans, UREG_NIC_SET_NMI_DRIVER, - UREG_NIC_SET_NMI_DRIVER_NMI_FROM_DRIVER_MSK); + iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, + UREG_DOORBELL_TO_ISR6_NMI_BIT); } IWL_EXPORT_SYMBOL(iwl_force_nmi); @@ -421,3 +491,43 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf) return 0; } + +int iwl_finish_nic_init(struct iwl_trans *trans) +{ + int err; + + if (trans->cfg->bisr_workaround) { + /* ensure the TOP FSM isn't still in previous reset */ + mdelay(2); + } + + /* + * Set "initialization complete" bit to move adapter from + * D0U* --> D0A* (powered-up active) state. + */ + iwl_set_bit(trans, CSR_GP_CNTRL, + BIT(trans->cfg->csr->flag_init_done)); + + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) + udelay(2); + + /* + * Wait for clock stabilization; once stabilized, access to + * device-internal resources is supported, e.g. iwl_write_prph() + * and accesses to uCode SRAM. + */ + err = iwl_poll_bit(trans, CSR_GP_CNTRL, + BIT(trans->cfg->csr->flag_mac_clock_ready), + BIT(trans->cfg->csr->flag_mac_clock_ready), + 25000); + if (err < 0) + IWL_DEBUG_INFO(trans, "Failed to wake NIC\n"); + + if (trans->cfg->bisr_workaround) { + /* ensure BISR shift has finished */ + udelay(200); + } + + return err < 0 ? err : 0; +} +IWL_EXPORT_SYMBOL(iwl_finish_nic_init); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-io.h index 38085850a2d3..920e2146ea3f 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-io.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.h @@ -1,8 +1,11 @@ /****************************************************************************** * - * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. * - * Portions of this file are derived from the ipw3945 project. + * GPL LICENSE SUMMARY + * + * Copyright (C) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -14,14 +17,44 @@ * more details. * * The full GNU General Public License is included in this distribution in the - * file called LICENSE. + * file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * + * BSD LICENSE + * + * Copyright (C) 2018 - 2019 Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * *****************************************************************************/ - #ifndef __iwl_io_h__ #define __iwl_io_h__ @@ -66,7 +99,48 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs, void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask); void iwl_force_nmi(struct iwl_trans *trans); +int iwl_finish_nic_init(struct iwl_trans *trans); + /* Error handling */ int iwl_dump_fh(struct iwl_trans *trans, char **buf); +/* + * UMAC periphery address space changed from 0xA00000 to 0xD00000 starting from + * device family AX200. So peripheries used in families above and below AX200 + * should go through iwl_..._umac_..._prph. + */ +static inline u32 iwl_umac_prph(struct iwl_trans *trans, u32 ofs) +{ + return ofs + trans->cfg->umac_prph_offset; +} + +static inline u32 iwl_read_umac_prph_no_grab(struct iwl_trans *trans, u32 ofs) +{ + return iwl_read_prph_no_grab(trans, ofs + trans->cfg->umac_prph_offset); +} + +static inline u32 iwl_read_umac_prph(struct iwl_trans *trans, u32 ofs) +{ + return iwl_read_prph(trans, ofs + trans->cfg->umac_prph_offset); +} + +static inline void iwl_write_umac_prph_no_grab(struct iwl_trans *trans, u32 ofs, + u32 val) +{ + iwl_write_prph_no_grab(trans, ofs + trans->cfg->umac_prph_offset, val); +} + +static inline void iwl_write_umac_prph(struct iwl_trans *trans, u32 ofs, + u32 val) +{ + iwl_write_prph(trans, ofs + trans->cfg->umac_prph_offset, val); +} + +static inline int iwl_poll_umac_prph_bit(struct iwl_trans *trans, u32 addr, + u32 bits, u32 mask, int timeout) +{ + return iwl_poll_prph_bit(trans, addr + trans->cfg->umac_prph_offset, + bits, mask, timeout); +} + #endif diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h index 73b1c46f1158..0cae2ef9b9df 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h @@ -152,4 +152,22 @@ struct iwl_mod_params { bool enable_ini; }; +static inline bool iwl_enable_rx_ampdu(void) +{ + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) + return false; + return true; +} + +static inline bool iwl_enable_tx_ampdu(void) +{ + if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) + return false; + if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG) + return true; + + /* enabled by default */ + return true; +} + #endif /* #__iwl_modparams_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index d9afedc3d1d9..87d6de7efdd2 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -8,7 +8,7 @@ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -463,6 +463,9 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, } vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map; + + vht_cap->vht_mcs.tx_highest |= + cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE); } static struct ieee80211_sband_iftype_data iwl_he_capa[] = { @@ -479,7 +482,6 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = { IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, .mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP | - IEEE80211_HE_MAC_CAP2_MU_CASCADING | IEEE80211_HE_MAC_CAP2_ACK_EN, .mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_OMI_CONTROL | @@ -490,7 +492,9 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = { .mac_cap_info[5] = IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B40 | IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B41 | - IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU, + IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU | + IEEE80211_HE_MAC_CAP5_HE_DYNAMIC_SM_PS | + IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX, .phy_cap_info[0] = IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | @@ -498,18 +502,13 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = { .phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | - IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | - IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS, + IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD, .phy_cap_info[2] = - IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | - IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | - IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ | - IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO | - IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO, + IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US, .phy_cap_info[3] = - IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK | + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM | IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 | - IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK | + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM | IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1, .phy_cap_info[4] = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE | @@ -517,16 +516,8 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = { IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8, .phy_cap_info[5] = IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 | - IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2 | - IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK | - IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK, + IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2, .phy_cap_info[6] = - IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU | - IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU | - IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB | - IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB | - IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB | - IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO | IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, .phy_cap_info[7] = IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR | @@ -537,11 +528,12 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = { IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G | IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU | IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU | - IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_160_OR_80P80_MHZ, + IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_2x996, .phy_cap_info[9] = IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK | IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB | - IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB, + IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB | + IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED, }, /* * Set default Tx/Rx HE MCS NSS Support field. @@ -569,35 +561,32 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = { .has_he = true, .he_cap_elem = { .mac_cap_info[0] = - IEEE80211_HE_MAC_CAP0_HTC_HE | - IEEE80211_HE_MAC_CAP0_TWT_RES, + IEEE80211_HE_MAC_CAP0_HTC_HE, .mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, .mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_BSR | - IEEE80211_HE_MAC_CAP2_MU_CASCADING | IEEE80211_HE_MAC_CAP2_ACK_EN, .mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_OMI_CONTROL | IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2, .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU, + .mac_cap_info[5] = + IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU, .phy_cap_info[0] = IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G, .phy_cap_info[1] = - IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | - IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS, + IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD, .phy_cap_info[2] = - IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | - IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | - IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ, + IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US, .phy_cap_info[3] = - IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK | + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM | IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 | - IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK | + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM | IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1, .phy_cap_info[4] = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE | @@ -605,12 +594,8 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = { IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8, .phy_cap_info[5] = IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 | - IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2 | - IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK | - IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK, + IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2, .phy_cap_info[6] = - IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU | - IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU | IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, .phy_cap_info[7] = IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI | @@ -620,10 +605,11 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = { IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G | IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU | IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU | - IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_160_OR_80P80_MHZ, + IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_2x996, .phy_cap_info[9] = IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB | - IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB, + IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB | + IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED, }, /* * Set default Tx/Rx HE MCS NSS Support field. @@ -947,15 +933,13 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, const __le16 *ch_section; if (cfg->nvm_type != IWL_NVM_EXT) - data = kzalloc(sizeof(*data) + - sizeof(struct ieee80211_channel) * - IWL_NVM_NUM_CHANNELS, - GFP_KERNEL); + data = kzalloc(struct_size(data, channels, + IWL_NVM_NUM_CHANNELS), + GFP_KERNEL); else - data = kzalloc(sizeof(*data) + - sizeof(struct ieee80211_channel) * - IWL_NVM_NUM_CHANNELS_EXT, - GFP_KERNEL); + data = kzalloc(struct_size(data, channels, + IWL_NVM_NUM_CHANNELS_EXT), + GFP_KERNEL); if (!data) return NULL; @@ -1103,12 +1087,12 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ? IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS; - if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES)) - return ERR_PTR(-EINVAL); - if (WARN_ON(num_of_ch > max_num_ch)) num_of_ch = max_num_ch; + if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES)) + return ERR_PTR(-EINVAL); + IWL_DEBUG_DEV(dev, IWL_DL_LAR, "building regdom for %d channels\n", num_of_ch); @@ -1196,14 +1180,12 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, regd_to_copy = sizeof(struct ieee80211_regdomain) + valid_rules * sizeof(struct ieee80211_reg_rule); - copy_rd = kzalloc(regd_to_copy, GFP_KERNEL); + copy_rd = kmemdup(regd, regd_to_copy, GFP_KERNEL); if (!copy_rd) { copy_rd = ERR_PTR(-ENOMEM); goto out; } - memcpy(copy_rd, regd, regd_to_copy); - out: kfree(regdb_ptrs); kfree(regd); @@ -1444,9 +1426,7 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, if (empty_otp) IWL_INFO(trans, "OTP is empty\n"); - nvm = kzalloc(sizeof(*nvm) + - sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS, - GFP_KERNEL); + nvm = kzalloc(struct_size(nvm, channels, IWL_NUM_CHANNELS), GFP_KERNEL); if (!nvm) { ret = -ENOMEM; goto out; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index 9d89b7d7f9fa..1af9f9e1ecd4 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h @@ -8,7 +8,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2016 Intel Deutschland GmbH - * Copyright (C) 2018 Intel Corporation + * Copyright (C) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2016 Intel Deutschland GmbH - * Copyright (C) 2018 Intel Corporation + * Copyright (C) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -358,12 +358,12 @@ /* FW monitor */ #define MON_BUFF_SAMPLE_CTL (0xa03c00) -#define MON_BUFF_BASE_ADDR (0xa03c3c) +#define MON_BUFF_BASE_ADDR (0xa03c1c) #define MON_BUFF_END_ADDR (0xa03c40) #define MON_BUFF_WRPTR (0xa03c44) #define MON_BUFF_CYCLE_CNT (0xa03c48) /* FW monitor family 8000 and on */ -#define MON_BUFF_BASE_ADDR_VER2 (0xa03c3c) +#define MON_BUFF_BASE_ADDR_VER2 (0xa03c1c) #define MON_BUFF_END_ADDR_VER2 (0xa03c20) #define MON_BUFF_WRPTR_VER2 (0xa03c24) #define MON_BUFF_CYCLE_CNT_VER2 (0xa03c28) @@ -433,4 +433,7 @@ enum { #define HPM_DEBUG 0xA03440 #define PERSISTENCE_BIT BIT(12) #define PREG_WFPM_ACCESS BIT(12) + +#define UREG_DOORBELL_TO_ISR6 0xA05C04 +#define UREG_DOORBELL_TO_ISR6_NMI_BIT BIT(0) #endif /* __iwl_prph_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index a7009cd4232d..bbebbf3efd57 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -8,6 +8,7 @@ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,6 +31,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -230,6 +232,12 @@ enum iwl_hcmd_dataflag { IWL_HCMD_DFL_DUP = BIT(1), }; +enum iwl_error_event_table_status { + IWL_ERROR_EVENT_TABLE_LMAC1 = BIT(0), + IWL_ERROR_EVENT_TABLE_LMAC2 = BIT(1), + IWL_ERROR_EVENT_TABLE_UMAC = BIT(2), +}; + /** * struct iwl_host_cmd - Host command to the uCode * @@ -330,6 +338,7 @@ enum iwl_d3_status { * are sent * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation + * @STATUS_FW_WAIT_DUMP: if set, wait until cleared before collecting dump */ enum iwl_trans_status { STATUS_SYNC_HCMD_ACTIVE, @@ -342,6 +351,7 @@ enum iwl_trans_status { STATUS_TRANS_GOING_IDLE, STATUS_TRANS_IDLE, STATUS_TRANS_DEAD, + STATUS_FW_WAIT_DUMP, }; static inline int @@ -684,6 +694,9 @@ enum iwl_plat_pm_mode { */ #define IWL_TRANS_IDLE_TIMEOUT 2000 +/* Max time to wait for nmi interrupt */ +#define IWL_TRANS_NMI_TIMEOUT (HZ / 4) + /** * struct iwl_dram_data * @physical: page phy pointer @@ -696,6 +709,20 @@ struct iwl_dram_data { int size; }; +/** + * struct iwl_self_init_dram - dram data used by self init process + * @fw: lmac and umac dram data + * @fw_cnt: total number of items in array + * @paging: paging dram data + * @paging_cnt: total number of items in array + */ +struct iwl_self_init_dram { + struct iwl_dram_data *fw; + int fw_cnt; + struct iwl_dram_data *paging; + int paging_cnt; +}; + /** * struct iwl_trans - transport common data * @@ -738,6 +765,10 @@ struct iwl_dram_data { * mode is set during the initialization phase and is not * supposed to change during runtime. * @dbg_rec_on: true iff there is a fw debug recording currently active + * @lmac_error_event_table: addrs of lmacs error tables + * @umac_error_event_table: addr of umac error table + * @error_event_table_tlv_status: bitmap that indicates what error table + * pointers was recevied via TLV. use enum &iwl_error_event_table_status */ struct iwl_trans { const struct iwl_trans_ops *ops; @@ -790,12 +821,18 @@ struct iwl_trans { u8 dbg_n_dest_reg; int num_blocks; struct iwl_dram_data fw_mon[IWL_FW_INI_APPLY_NUM]; + struct iwl_self_init_dram init_dram; enum iwl_plat_pm_mode system_pm_mode; enum iwl_plat_pm_mode runtime_pm_mode; bool suspending; bool dbg_rec_on; + u32 lmac_error_event_table[2]; + u32 umac_error_event_table; + unsigned int error_event_table_tlv_status; + wait_queue_head_t fw_halt_waitq; + /* pointer to trans specific struct */ /*Ensure that this pointer will always be aligned to sizeof pointer */ char trans_specific[0] __aligned(sizeof(void *)); @@ -1202,6 +1239,10 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans) /* prevent double restarts due to the same erroneous FW */ if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) iwl_op_mode_nic_error(trans->op_mode); + + if (test_and_clear_bit(STATUS_FW_WAIT_DUMP, &trans->status)) + wake_up(&trans->fw_halt_waitq); + } /***************************************************** diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile index 9ffd21918b5a..dd268c4bd371 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile @@ -5,9 +5,9 @@ iwlmvm-y += utils.o rx.o rxmq.o tx.o binding.o quota.o sta.o sf.o iwlmvm-y += scan.o time-event.o rs.o rs-fw.o iwlmvm-y += power.o coex.o iwlmvm-y += tt.o offloading.o tdls.o +iwlmvm-y += ftm-responder.o ftm-initiator.o iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o -iwlmvm-y += tof.o iwlmvm-$(CONFIG_PM) += d3.o -ccflags-y += -I$(src)/../ +ccflags-y += -I $(srctree)/$(src)/../ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c index 730e37744dc0..3d2abbc5c76c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c @@ -241,7 +241,6 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, struct iwl_bt_coex_reduced_txp_update_cmd cmd = {}; struct iwl_mvm_sta *mvmsta; u32 value; - int ret; mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); if (!mvmsta) @@ -262,10 +261,8 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, cmd.reduced_txp = cpu_to_le32(value); mvmsta->bt_reduced_txpower = enable; - ret = iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_REDUCED_TXP, CMD_ASYNC, - sizeof(cmd), &cmd); - - return ret; + return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_REDUCED_TXP, + CMD_ASYNC, sizeof(cmd), &cmd); } struct iwl_bt_iterator_data { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index d96ada3c06fc..dff14f1ec55f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -8,6 +8,7 @@ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,6 +31,7 @@ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -63,6 +65,7 @@ #define __MVM_CONSTANTS_H #include +#include "fw-api.h" #define IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM 20 @@ -114,6 +117,7 @@ #define IWL_MVM_TCM_LOWLAT_ENABLE_THRESH 100 /* packets/10 seconds */ #define IWL_MVM_UAPSD_NONAGG_PERIOD 5000 /* msecs */ #define IWL_MVM_UAPSD_NOAGG_LIST_LEN IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM +#define IWL_MVM_NON_TRANSMITTING_AP 0 #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1 @@ -145,5 +149,8 @@ #define IWL_MVM_RS_TPC_SR_NO_INCREASE 85 /* percent */ #define IWL_MVM_RS_TPC_TX_POWER_STEP 3 #define IWL_MVM_ENABLE_EBS 1 +#define IWL_MVM_FTM_INITIATOR_ALGO IWL_TOF_ALGO_TYPE_MAX_LIKE +#define IWL_MVM_FTM_INITIATOR_DYNACK true +#define IWL_MVM_D3_DEBUG false #endif /* __MVM_CONSTANTS_H */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 01b5338201d6..808bc6f363d0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -1899,7 +1899,7 @@ static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac, static int iwl_mvm_check_rt_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { - u32 base = mvm->error_event_table[0]; + u32 base = mvm->trans->lmac_error_event_table[0]; struct error_table_start { /* cf. struct iwl_error_event_table */ u32 valid; @@ -2125,7 +2125,6 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file) file->private_data = inode->i_private; - ieee80211_stop_queues(mvm->hw); synchronize_net(); mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3; @@ -2140,10 +2139,9 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file) rtnl_unlock(); if (err > 0) err = -EINVAL; - if (err) { - ieee80211_wake_queues(mvm->hw); + if (err) return err; - } + mvm->d3_test_active = true; mvm->keep_vif = NULL; return 0; @@ -2223,8 +2221,6 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif); - ieee80211_wake_queues(mvm->hw); - return 0; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c index 33b0af24a537..2453ceabf00d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c @@ -60,7 +60,6 @@ * *****************************************************************************/ #include "mvm.h" -#include "fw/api/tof.h" #include "debugfs.h" static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm, @@ -523,753 +522,30 @@ static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file, return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } -static ssize_t iwl_dbgfs_tof_enable_write(struct ieee80211_vif *vif, - char *buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - u32 value; - int ret = -EINVAL; - char *data; - - mutex_lock(&mvm->mutex); - - data = iwl_dbgfs_is_match("tof_disabled=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.tof_cfg.tof_disabled = value; - goto out; - } - - data = iwl_dbgfs_is_match("one_sided_disabled=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.tof_cfg.one_sided_disabled = value; - goto out; - } - - data = iwl_dbgfs_is_match("is_debug_mode=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.tof_cfg.is_debug_mode = value; - goto out; - } - - data = iwl_dbgfs_is_match("is_buf=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.tof_cfg.is_buf_required = value; - goto out; - } - - data = iwl_dbgfs_is_match("send_tof_cfg=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0 && value) { - ret = iwl_mvm_tof_config_cmd(mvm); - goto out; - } - } - -out: - mutex_unlock(&mvm->mutex); - - return ret ?: count; -} - -static ssize_t iwl_dbgfs_tof_enable_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ieee80211_vif *vif = file->private_data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - char buf[256]; - int pos = 0; - const size_t bufsz = sizeof(buf); - struct iwl_tof_config_cmd *cmd; - - cmd = &mvm->tof_data.tof_cfg; - - mutex_lock(&mvm->mutex); - - pos += scnprintf(buf + pos, bufsz - pos, "tof_disabled = %d\n", - cmd->tof_disabled); - pos += scnprintf(buf + pos, bufsz - pos, "one_sided_disabled = %d\n", - cmd->one_sided_disabled); - pos += scnprintf(buf + pos, bufsz - pos, "is_debug_mode = %d\n", - cmd->is_debug_mode); - pos += scnprintf(buf + pos, bufsz - pos, "is_buf_required = %d\n", - cmd->is_buf_required); - - mutex_unlock(&mvm->mutex); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_dbgfs_tof_responder_params_write(struct ieee80211_vif *vif, - char *buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - u32 value; - int ret = 0; - char *data; - - mutex_lock(&mvm->mutex); - - data = iwl_dbgfs_is_match("burst_period=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (!ret) - mvm->tof_data.responder_cfg.burst_period = - cpu_to_le16(value); - goto out; - } - - data = iwl_dbgfs_is_match("min_delta_ftm=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.min_delta_ftm = value; - goto out; - } - - data = iwl_dbgfs_is_match("burst_duration=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.burst_duration = value; - goto out; - } - - data = iwl_dbgfs_is_match("num_of_burst_exp=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.num_of_burst_exp = value; - goto out; - } - - data = iwl_dbgfs_is_match("abort_responder=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.abort_responder = value; - goto out; - } - - data = iwl_dbgfs_is_match("get_ch_est=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.get_ch_est = value; - goto out; - } - - data = iwl_dbgfs_is_match("recv_sta_req_params=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.recv_sta_req_params = value; - goto out; - } - - data = iwl_dbgfs_is_match("channel_num=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.channel_num = value; - goto out; - } - - data = iwl_dbgfs_is_match("bandwidth=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.bandwidth = value; - goto out; - } - - data = iwl_dbgfs_is_match("rate=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.rate = value; - goto out; - } - - data = iwl_dbgfs_is_match("bssid=", buf); - if (data) { - u8 *mac = mvm->tof_data.responder_cfg.bssid; - - if (!mac_pton(data, mac)) { - ret = -EINVAL; - goto out; - } - } - - data = iwl_dbgfs_is_match("tsf_timer_offset_msecs=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.tsf_timer_offset_msecs = - cpu_to_le16(value); - goto out; - } - - data = iwl_dbgfs_is_match("toa_offset=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.toa_offset = - cpu_to_le16(value); - goto out; - } - - data = iwl_dbgfs_is_match("center_freq=", buf); - if (data) { - struct iwl_tof_responder_config_cmd *cmd = - &mvm->tof_data.responder_cfg; - - ret = kstrtou32(data, 10, &value); - if (ret == 0 && value) { - enum nl80211_band band = (cmd->channel_num <= 14) ? - NL80211_BAND_2GHZ : - NL80211_BAND_5GHZ; - struct ieee80211_channel chn = { - .band = band, - .center_freq = ieee80211_channel_to_frequency( - cmd->channel_num, band), - }; - struct cfg80211_chan_def chandef = { - .chan = &chn, - .center_freq1 = - ieee80211_channel_to_frequency(value, - band), - }; - - cmd->ctrl_ch_position = iwl_mvm_get_ctrl_pos(&chandef); - } - goto out; - } - - data = iwl_dbgfs_is_match("ftm_per_burst=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.ftm_per_burst = value; - goto out; - } - - data = iwl_dbgfs_is_match("ftm_resp_ts_avail=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.ftm_resp_ts_avail = value; - goto out; - } - - data = iwl_dbgfs_is_match("asap_mode=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.responder_cfg.asap_mode = value; - goto out; - } - - data = iwl_dbgfs_is_match("send_responder_cfg=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0 && value) { - ret = iwl_mvm_tof_responder_cmd(mvm, vif); - goto out; - } - } - -out: - mutex_unlock(&mvm->mutex); - - return ret ?: count; -} - -static ssize_t iwl_dbgfs_tof_responder_params_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ieee80211_vif *vif = file->private_data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - char buf[256]; - int pos = 0; - const size_t bufsz = sizeof(buf); - struct iwl_tof_responder_config_cmd *cmd; - - cmd = &mvm->tof_data.responder_cfg; - - mutex_lock(&mvm->mutex); - - pos += scnprintf(buf + pos, bufsz - pos, "burst_period = %d\n", - le16_to_cpu(cmd->burst_period)); - pos += scnprintf(buf + pos, bufsz - pos, "burst_duration = %d\n", - cmd->burst_duration); - pos += scnprintf(buf + pos, bufsz - pos, "bandwidth = %d\n", - cmd->bandwidth); - pos += scnprintf(buf + pos, bufsz - pos, "channel_num = %d\n", - cmd->channel_num); - pos += scnprintf(buf + pos, bufsz - pos, "ctrl_ch_position = 0x%x\n", - cmd->ctrl_ch_position); - pos += scnprintf(buf + pos, bufsz - pos, "bssid = %pM\n", - cmd->bssid); - pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %d\n", - cmd->min_delta_ftm); - pos += scnprintf(buf + pos, bufsz - pos, "num_of_burst_exp = %d\n", - cmd->num_of_burst_exp); - pos += scnprintf(buf + pos, bufsz - pos, "rate = %d\n", cmd->rate); - pos += scnprintf(buf + pos, bufsz - pos, "abort_responder = %d\n", - cmd->abort_responder); - pos += scnprintf(buf + pos, bufsz - pos, "get_ch_est = %d\n", - cmd->get_ch_est); - pos += scnprintf(buf + pos, bufsz - pos, "recv_sta_req_params = %d\n", - cmd->recv_sta_req_params); - pos += scnprintf(buf + pos, bufsz - pos, "ftm_per_burst = %d\n", - cmd->ftm_per_burst); - pos += scnprintf(buf + pos, bufsz - pos, "ftm_resp_ts_avail = %d\n", - cmd->ftm_resp_ts_avail); - pos += scnprintf(buf + pos, bufsz - pos, "asap_mode = %d\n", - cmd->asap_mode); - pos += scnprintf(buf + pos, bufsz - pos, - "tsf_timer_offset_msecs = %d\n", - le16_to_cpu(cmd->tsf_timer_offset_msecs)); - pos += scnprintf(buf + pos, bufsz - pos, "toa_offset = %d\n", - le16_to_cpu(cmd->toa_offset)); - - mutex_unlock(&mvm->mutex); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_dbgfs_tof_range_request_write(struct ieee80211_vif *vif, - char *buf, size_t count, - loff_t *ppos) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - u32 value; - int ret = 0; - char *data; - - mutex_lock(&mvm->mutex); - - data = iwl_dbgfs_is_match("request_id=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.range_req.request_id = value; - goto out; - } - - data = iwl_dbgfs_is_match("initiator=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.range_req.initiator = value; - goto out; - } - - data = iwl_dbgfs_is_match("one_sided_los_disable=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.range_req.one_sided_los_disable = value; - goto out; - } - - data = iwl_dbgfs_is_match("req_timeout=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.range_req.req_timeout = value; - goto out; - } - - data = iwl_dbgfs_is_match("report_policy=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.range_req.report_policy = value; - goto out; - } - - data = iwl_dbgfs_is_match("macaddr_random=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.range_req.macaddr_random = value; - goto out; - } - - data = iwl_dbgfs_is_match("num_of_ap=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.range_req.num_of_ap = value; - goto out; - } - - data = iwl_dbgfs_is_match("macaddr_template=", buf); - if (data) { - u8 mac[ETH_ALEN]; - - if (!mac_pton(data, mac)) { - ret = -EINVAL; - goto out; - } - memcpy(mvm->tof_data.range_req.macaddr_template, mac, ETH_ALEN); - goto out; - } - - data = iwl_dbgfs_is_match("macaddr_mask=", buf); - if (data) { - u8 mac[ETH_ALEN]; - - if (!mac_pton(data, mac)) { - ret = -EINVAL; - goto out; - } - memcpy(mvm->tof_data.range_req.macaddr_mask, mac, ETH_ALEN); - goto out; - } - - data = iwl_dbgfs_is_match("ap=", buf); - if (data) { - struct iwl_tof_range_req_ap_entry ap = {}; - int size = sizeof(struct iwl_tof_range_req_ap_entry); - u16 burst_period; - u8 *mac = ap.bssid; - unsigned int i; - - if (sscanf(data, "%u %hhd %hhd %hhd" - "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx" - "%hhd %hhd %hd" - "%hhd %hhd %d" - "%hhx %hhd %hhd %hhd", - &i, &ap.channel_num, &ap.bandwidth, - &ap.ctrl_ch_position, - mac, mac + 1, mac + 2, mac + 3, mac + 4, mac + 5, - &ap.measure_type, &ap.num_of_bursts, - &burst_period, - &ap.samples_per_burst, &ap.retries_per_sample, - &ap.tsf_delta, &ap.location_req, &ap.asap_mode, - &ap.enable_dyn_ack, &ap.rssi) != 20) { - ret = -EINVAL; - goto out; - } - if (i >= IWL_MVM_TOF_MAX_APS) { - IWL_ERR(mvm, "Invalid AP index %d\n", i); - ret = -EINVAL; - goto out; - } - - ap.burst_period = cpu_to_le16(burst_period); - - memcpy(&mvm->tof_data.range_req.ap[i], &ap, size); - goto out; - } - - data = iwl_dbgfs_is_match("send_range_request=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0 && value) - ret = iwl_mvm_tof_range_request_cmd(mvm, vif); - goto out; - } - - ret = -EINVAL; -out: - mutex_unlock(&mvm->mutex); - return ret ?: count; -} - -static ssize_t iwl_dbgfs_tof_range_request_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ieee80211_vif *vif = file->private_data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - char buf[512]; - int pos = 0; - const size_t bufsz = sizeof(buf); - struct iwl_tof_range_req_cmd *cmd; - int i; - - cmd = &mvm->tof_data.range_req; - - mutex_lock(&mvm->mutex); - - pos += scnprintf(buf + pos, bufsz - pos, "request_id= %d\n", - cmd->request_id); - pos += scnprintf(buf + pos, bufsz - pos, "initiator= %d\n", - cmd->initiator); - pos += scnprintf(buf + pos, bufsz - pos, "one_sided_los_disable = %d\n", - cmd->one_sided_los_disable); - pos += scnprintf(buf + pos, bufsz - pos, "req_timeout= %d\n", - cmd->req_timeout); - pos += scnprintf(buf + pos, bufsz - pos, "report_policy= %d\n", - cmd->report_policy); - pos += scnprintf(buf + pos, bufsz - pos, "macaddr_random= %d\n", - cmd->macaddr_random); - pos += scnprintf(buf + pos, bufsz - pos, "macaddr_template= %pM\n", - cmd->macaddr_template); - pos += scnprintf(buf + pos, bufsz - pos, "macaddr_mask= %pM\n", - cmd->macaddr_mask); - pos += scnprintf(buf + pos, bufsz - pos, "num_of_ap= %d\n", - cmd->num_of_ap); - for (i = 0; i < cmd->num_of_ap; i++) { - struct iwl_tof_range_req_ap_entry *ap = &cmd->ap[i]; - - pos += scnprintf(buf + pos, bufsz - pos, - "ap %.2d: channel_num=%hhd bw=%hhd" - " control=%hhd bssid=%pM type=%hhd" - " num_of_bursts=%hhd burst_period=%hd ftm=%hhd" - " retries=%hhd tsf_delta=%d" - " tsf_delta_direction=%hhd location_req=0x%hhx " - " asap=%hhd enable=%hhd rssi=%hhd\n", - i, ap->channel_num, ap->bandwidth, - ap->ctrl_ch_position, ap->bssid, - ap->measure_type, ap->num_of_bursts, - ap->burst_period, ap->samples_per_burst, - ap->retries_per_sample, ap->tsf_delta, - ap->tsf_delta_direction, - ap->location_req, ap->asap_mode, - ap->enable_dyn_ack, ap->rssi); - } - - mutex_unlock(&mvm->mutex); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_dbgfs_tof_range_req_ext_write(struct ieee80211_vif *vif, - char *buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - u32 value; - int ret = 0; - char *data; - - mutex_lock(&mvm->mutex); - - data = iwl_dbgfs_is_match("tsf_timer_offset_msec=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.range_req_ext.tsf_timer_offset_msec = - cpu_to_le16(value); - goto out; - } - - data = iwl_dbgfs_is_match("min_delta_ftm=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.range_req_ext.min_delta_ftm = value; - goto out; - } - - data = iwl_dbgfs_is_match("ftm_format_and_bw20M=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.range_req_ext.ftm_format_and_bw20M = - value; - goto out; - } - - data = iwl_dbgfs_is_match("ftm_format_and_bw40M=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.range_req_ext.ftm_format_and_bw40M = - value; - goto out; - } - - data = iwl_dbgfs_is_match("ftm_format_and_bw80M=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.range_req_ext.ftm_format_and_bw80M = - value; - goto out; - } - - data = iwl_dbgfs_is_match("send_range_req_ext=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0 && value) - ret = iwl_mvm_tof_range_request_ext_cmd(mvm, vif); - goto out; - } - - ret = -EINVAL; -out: - mutex_unlock(&mvm->mutex); - return ret ?: count; -} - -static ssize_t iwl_dbgfs_tof_range_req_ext_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ieee80211_vif *vif = file->private_data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - char buf[256]; - int pos = 0; - const size_t bufsz = sizeof(buf); - struct iwl_tof_range_req_ext_cmd *cmd; - - cmd = &mvm->tof_data.range_req_ext; - - mutex_lock(&mvm->mutex); - - pos += scnprintf(buf + pos, bufsz - pos, - "tsf_timer_offset_msec = %hd\n", - cmd->tsf_timer_offset_msec); - pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %hhd\n", - cmd->min_delta_ftm); - pos += scnprintf(buf + pos, bufsz - pos, - "ftm_format_and_bw20M = %hhd\n", - cmd->ftm_format_and_bw20M); - pos += scnprintf(buf + pos, bufsz - pos, - "ftm_format_and_bw40M = %hhd\n", - cmd->ftm_format_and_bw40M); - pos += scnprintf(buf + pos, bufsz - pos, - "ftm_format_and_bw80M = %hhd\n", - cmd->ftm_format_and_bw80M); - - mutex_unlock(&mvm->mutex); - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_dbgfs_tof_range_abort_write(struct ieee80211_vif *vif, - char *buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - u32 value; - int abort_id, ret = 0; - char *data; - - mutex_lock(&mvm->mutex); - - data = iwl_dbgfs_is_match("abort_id=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0) - mvm->tof_data.last_abort_id = value; - goto out; - } - - data = iwl_dbgfs_is_match("send_range_abort=", buf); - if (data) { - ret = kstrtou32(data, 10, &value); - if (ret == 0 && value) { - abort_id = mvm->tof_data.last_abort_id; - ret = iwl_mvm_tof_range_abort_cmd(mvm, abort_id); - goto out; - } - } - -out: - mutex_unlock(&mvm->mutex); - return ret ?: count; -} - -static ssize_t iwl_dbgfs_tof_range_abort_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ieee80211_vif *vif = file->private_data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - char buf[32]; - int pos = 0; - const size_t bufsz = sizeof(buf); - int last_abort_id; - - mutex_lock(&mvm->mutex); - last_abort_id = mvm->tof_data.last_abort_id; - mutex_unlock(&mvm->mutex); - - pos += scnprintf(buf + pos, bufsz - pos, "last_abort_id = %d\n", - last_abort_id); - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t iwl_dbgfs_tof_range_response_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) +static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf, + size_t count, loff_t *ppos) { - struct ieee80211_vif *vif = file->private_data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; - char *buf; - int pos = 0; - const size_t bufsz = sizeof(struct iwl_tof_range_rsp_ntfy) + 256; - struct iwl_tof_range_rsp_ntfy *cmd; - int i, ret; + u8 value; + int ret; - buf = kzalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; + ret = kstrtou8(buf, 0, &value); + if (ret) + return ret; + if (value > 1) + return -EINVAL; mutex_lock(&mvm->mutex); - cmd = &mvm->tof_data.range_resp; - - pos += scnprintf(buf + pos, bufsz - pos, "request_id = %d\n", - cmd->request_id); - pos += scnprintf(buf + pos, bufsz - pos, "status = %d\n", - cmd->request_status); - pos += scnprintf(buf + pos, bufsz - pos, "last_in_batch = %d\n", - cmd->last_in_batch); - pos += scnprintf(buf + pos, bufsz - pos, "num_of_aps = %d\n", - cmd->num_of_aps); - for (i = 0; i < cmd->num_of_aps; i++) { - struct iwl_tof_range_rsp_ap_entry_ntfy *ap = &cmd->ap[i]; - - pos += scnprintf(buf + pos, bufsz - pos, - "ap %.2d: bssid=%pM status=%hhd bw=%hhd" - " rtt=%d rtt_var=%d rtt_spread=%d" - " rssi=%hhd rssi_spread=%hhd" - " range=%d range_var=%d" - " time_stamp=%d\n", - i, ap->bssid, ap->measure_status, - ap->measure_bw, - ap->rtt, ap->rtt_variance, ap->rtt_spread, - ap->rssi, ap->rssi_spread, ap->range, - ap->range_variance, ap->timestamp); - } + iwl_mvm_update_low_latency(mvm, vif, value, LOW_LATENCY_DEBUGFS); mutex_unlock(&mvm->mutex); - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - return ret; + return count; } -static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf, - size_t count, loff_t *ppos) +static ssize_t +iwl_dbgfs_low_latency_force_write(struct ieee80211_vif *vif, char *buf, + size_t count, loff_t *ppos) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; @@ -1279,13 +555,24 @@ static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf, ret = kstrtou8(buf, 0, &value); if (ret) return ret; - if (value > 1) + + if (value > NUM_LOW_LATENCY_FORCE) return -EINVAL; mutex_lock(&mvm->mutex); - iwl_mvm_update_low_latency(mvm, vif, value, LOW_LATENCY_DEBUGFS); + if (value == LOW_LATENCY_FORCE_UNSET) { + iwl_mvm_update_low_latency(mvm, vif, false, + LOW_LATENCY_DEBUGFS_FORCE); + iwl_mvm_update_low_latency(mvm, vif, false, + LOW_LATENCY_DEBUGFS_FORCE_ENABLE); + } else { + iwl_mvm_update_low_latency(mvm, vif, + value == LOW_LATENCY_FORCE_ON, + LOW_LATENCY_DEBUGFS_FORCE); + iwl_mvm_update_low_latency(mvm, vif, true, + LOW_LATENCY_DEBUGFS_FORCE_ENABLE); + } mutex_unlock(&mvm->mutex); - return count; } @@ -1295,15 +582,25 @@ static ssize_t iwl_dbgfs_low_latency_read(struct file *file, { struct ieee80211_vif *vif = file->private_data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - char buf[30] = {}; + char format[] = "traffic=%d\ndbgfs=%d\nvcmd=%d\nvif_type=%d\n" + "dbgfs_force_enable=%d\ndbgfs_force=%d\nactual=%d\n"; + + /* + * all values in format are boolean so the size of format is enough + * for holding the result string + */ + char buf[sizeof(format) + 1] = {}; int len; - len = scnprintf(buf, sizeof(buf) - 1, - "traffic=%d\ndbgfs=%d\nvcmd=%d\nvif_type=%d\n", + len = scnprintf(buf, sizeof(buf) - 1, format, !!(mvmvif->low_latency & LOW_LATENCY_TRAFFIC), !!(mvmvif->low_latency & LOW_LATENCY_DEBUGFS), !!(mvmvif->low_latency & LOW_LATENCY_VCMD), - !!(mvmvif->low_latency & LOW_LATENCY_VIF_TYPE)); + !!(mvmvif->low_latency & LOW_LATENCY_VIF_TYPE), + !!(mvmvif->low_latency & + LOW_LATENCY_DEBUGFS_FORCE_ENABLE), + !!(mvmvif->low_latency & LOW_LATENCY_DEBUGFS_FORCE), + !!(mvmvif->low_latency_actual)); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } @@ -1456,14 +753,9 @@ MVM_DEBUGFS_READ_FILE_OPS(tx_pwr_lmt); MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32); MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256); MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10); +MVM_DEBUGFS_WRITE_FILE_OPS(low_latency_force, 10); MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20); MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10); -MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_enable, 32); -MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_request, 512); -MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_req_ext, 32); -MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_abort, 32); -MVM_DEBUGFS_READ_FILE_OPS(tof_range_response); -MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32); MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32); MVM_DEBUGFS_READ_FILE_OPS(os_device_timediff); @@ -1497,6 +789,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) MVM_DEBUGFS_ADD_FILE_VIF(tx_pwr_lmt, mvmvif->dbgfs_dir, 0400); MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, 0400); MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir, 0600); + MVM_DEBUGFS_ADD_FILE_VIF(low_latency_force, mvmvif->dbgfs_dir, 0600); MVM_DEBUGFS_ADD_FILE_VIF(uapsd_misbehaving, mvmvif->dbgfs_dir, 0600); MVM_DEBUGFS_ADD_FILE_VIF(rx_phyinfo, mvmvif->dbgfs_dir, 0600); MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir, 0600); @@ -1506,24 +799,6 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) mvmvif == mvm->bf_allowed_vif) MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir, 0600); - if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT) && - !vif->p2p && (vif->type != NL80211_IFTYPE_P2P_DEVICE)) { - if (IWL_MVM_TOF_IS_RESPONDER && vif->type == NL80211_IFTYPE_AP) - MVM_DEBUGFS_ADD_FILE_VIF(tof_responder_params, - mvmvif->dbgfs_dir, 0600); - - MVM_DEBUGFS_ADD_FILE_VIF(tof_range_request, mvmvif->dbgfs_dir, - 0600); - MVM_DEBUGFS_ADD_FILE_VIF(tof_range_req_ext, mvmvif->dbgfs_dir, - 0600); - MVM_DEBUGFS_ADD_FILE_VIF(tof_enable, mvmvif->dbgfs_dir, - 0600); - MVM_DEBUGFS_ADD_FILE_VIF(tof_range_abort, mvmvif->dbgfs_dir, - 0600); - MVM_DEBUGFS_ADD_FILE_VIF(tof_range_response, mvmvif->dbgfs_dir, - 0400); - } - /* * Create symlink for convenience pointing to interface specific * debugfs entries for the driver. For example, under diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 52c361a6124c..776b24f54200 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -8,7 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -69,6 +69,7 @@ #include "sta.h" #include "iwl-io.h" #include "debugfs.h" +#include "iwl-modparams.h" #include "fw/error-dump.h" static ssize_t iwl_dbgfs_ctdp_budget_read(struct file *file, @@ -1187,64 +1188,128 @@ out: return ret ?: count; } -static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) +static int _iwl_dbgfs_inject_beacon_ie(struct iwl_mvm *mvm, char *bin, int len) { - struct iwl_mvm *mvm = file->private_data; - int conf; - char buf[8]; - const size_t bufsz = sizeof(buf); - int pos = 0; + struct ieee80211_vif *vif; + struct iwl_mvm_vif *mvmvif; + struct sk_buff *beacon; + struct ieee80211_tx_info *info; + struct iwl_mac_beacon_cmd beacon_cmd = {}; + u8 rate; + u16 flags; + int i; + + len /= 2; + + /* Element len should be represented by u8 */ + if (len >= U8_MAX) + return -EINVAL; + + if (!iwl_mvm_firmware_running(mvm)) + return -EIO; + + if (!iwl_mvm_has_new_tx_api(mvm) && + !fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE)) + return -EINVAL; + + rcu_read_lock(); + + for (i = 0; i < NUM_MAC_INDEX_DRIVER; i++) { + vif = iwl_mvm_rcu_dereference_vif_id(mvm, i, true); + if (!vif) + continue; + + if (vif->type == NL80211_IFTYPE_AP) + break; + } + + if (i == NUM_MAC_INDEX_DRIVER || !vif) + goto out_err; + + mvm->hw->extra_beacon_tailroom = len; + + beacon = ieee80211_beacon_get_template(mvm->hw, vif, NULL); + if (!beacon) + goto out_err; + + if (len && hex2bin(skb_put_zero(beacon, len), bin, len)) { + dev_kfree_skb(beacon); + goto out_err; + } + + mvm->beacon_inject_active = true; + + mvmvif = iwl_mvm_vif_from_mac80211(vif); + info = IEEE80211_SKB_CB(beacon); + rate = iwl_mvm_mac_ctxt_get_lowest_rate(info, vif); + flags = iwl_mvm_mac80211_idx_to_hwrate(rate); + + if (rate == IWL_FIRST_CCK_RATE) + flags |= IWL_MAC_BEACON_CCK; + + beacon_cmd.flags = cpu_to_le16(flags); + beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon->len); + beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id); + + iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx, + &beacon_cmd.tim_size, + beacon->data, beacon->len); mutex_lock(&mvm->mutex); - conf = mvm->fwrt.dump.conf; + iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, + sizeof(beacon_cmd)); mutex_unlock(&mvm->mutex); - pos += scnprintf(buf + pos, bufsz - pos, "%d\n", conf); + dev_kfree_skb(beacon); - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); + rcu_read_unlock(); + return 0; + +out_err: + rcu_read_unlock(); + return -EINVAL; } -/* - * Enable / Disable continuous recording. - * Cause the FW to start continuous recording, by sending the relevant hcmd. - * Enable: input of every integer larger than 0, ENABLE_CONT_RECORDING. - * Disable: for 0 as input, DISABLE_CONT_RECORDING. - */ -static ssize_t iwl_dbgfs_cont_recording_write(struct iwl_mvm *mvm, - char *buf, size_t count, - loff_t *ppos) +static ssize_t iwl_dbgfs_inject_beacon_ie_write(struct iwl_mvm *mvm, + char *buf, size_t count, + loff_t *ppos) { - struct iwl_trans *trans = mvm->trans; - const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg_dest_tlv; - struct iwl_continuous_record_cmd cont_rec = {}; - int ret, rec_mode; + int ret = _iwl_dbgfs_inject_beacon_ie(mvm, buf, count); - if (!iwl_mvm_firmware_running(mvm)) - return -EIO; - - if (!dest) - return -EOPNOTSUPP; + mvm->hw->extra_beacon_tailroom = 0; + return ret ?: count; +} - if (dest->monitor_mode != SMEM_MODE || - trans->cfg->device_family < IWL_DEVICE_FAMILY_8000) - return -EOPNOTSUPP; +static ssize_t iwl_dbgfs_inject_beacon_ie_restore_write(struct iwl_mvm *mvm, + char *buf, + size_t count, + loff_t *ppos) +{ + int ret = _iwl_dbgfs_inject_beacon_ie(mvm, NULL, 0); - ret = kstrtoint(buf, 0, &rec_mode); - if (ret) - return ret; + mvm->hw->extra_beacon_tailroom = 0; + mvm->beacon_inject_active = false; + return ret ?: count; +} - cont_rec.record_mode.enable_recording = rec_mode ? - cpu_to_le16(ENABLE_CONT_RECORDING) : - cpu_to_le16(DISABLE_CONT_RECORDING); +static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + int conf; + char buf[8]; + const size_t bufsz = sizeof(buf); + int pos = 0; mutex_lock(&mvm->mutex); - ret = iwl_mvm_send_cmd_pdu(mvm, LDBG_CONFIG_CMD, 0, - sizeof(cont_rec), &cont_rec); + conf = mvm->fwrt.dump.conf; mutex_unlock(&mvm->mutex); - return ret ?: count; + pos += scnprintf(buf + pos, bufsz - pos, "%d\n", conf); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } static ssize_t iwl_dbgfs_fw_dbg_conf_write(struct iwl_mvm *mvm, @@ -1722,11 +1787,36 @@ iwl_dbgfs_send_echo_cmd_write(struct iwl_mvm *mvm, char *buf, return ret ?: count; } +struct iwl_mvm_sniffer_apply { + struct iwl_mvm *mvm; + u8 *bssid; + u16 aid; +}; + +static bool iwl_mvm_sniffer_apply(struct iwl_notif_wait_data *notif_data, + struct iwl_rx_packet *pkt, void *data) +{ + struct iwl_mvm_sniffer_apply *apply = data; + + apply->mvm->cur_aid = cpu_to_le16(apply->aid); + memcpy(apply->mvm->cur_bssid, apply->bssid, + sizeof(apply->mvm->cur_bssid)); + + return true; +} + static ssize_t iwl_dbgfs_he_sniffer_params_write(struct iwl_mvm *mvm, char *buf, - size_t count, loff_t *ppos) + size_t count, loff_t *ppos) { + struct iwl_notification_wait wait; struct iwl_he_monitor_cmd he_mon_cmd = {}; + struct iwl_mvm_sniffer_apply apply = { + .mvm = mvm, + }; + u16 wait_cmds[] = { + iwl_cmd_id(HE_AIR_SNIFFER_CONFIG_CMD, DATA_PATH_GROUP, 0), + }; u32 aid; int ret; @@ -1742,15 +1832,53 @@ iwl_dbgfs_he_sniffer_params_write(struct iwl_mvm *mvm, char *buf, he_mon_cmd.aid = cpu_to_le16(aid); + apply.aid = aid; + apply.bssid = (void *)he_mon_cmd.bssid; + mutex_lock(&mvm->mutex); + + /* + * Use the notification waiter to get our function triggered + * in sequence with other RX. This ensures that frames we get + * on the RX queue _before_ the new configuration is applied + * still have mvm->cur_aid pointing to the old AID, and that + * frames on the RX queue _after_ the firmware processed the + * new configuration (and sent the response, synchronously) + * get mvm->cur_aid correctly set to the new AID. + */ + iwl_init_notification_wait(&mvm->notif_wait, &wait, + wait_cmds, ARRAY_SIZE(wait_cmds), + iwl_mvm_sniffer_apply, &apply); + ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(HE_AIR_SNIFFER_CONFIG_CMD, DATA_PATH_GROUP, 0), 0, sizeof(he_mon_cmd), &he_mon_cmd); + + /* no need to really wait, we already did anyway */ + iwl_remove_notification(&mvm->notif_wait, &wait); + mutex_unlock(&mvm->mutex); return ret ?: count; } +static ssize_t +iwl_dbgfs_he_sniffer_params_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + u8 buf[32]; + int len; + + len = scnprintf(buf, sizeof(buf), + "%d %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx\n", + le16_to_cpu(mvm->cur_aid), mvm->cur_bssid[0], + mvm->cur_bssid[1], mvm->cur_bssid[2], mvm->cur_bssid[3], + mvm->cur_bssid[4], mvm->cur_bssid[5]); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + static ssize_t iwl_dbgfs_uapsd_noagg_bssids_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -1800,11 +1928,12 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8); MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8); MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8); MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64); -MVM_DEBUGFS_WRITE_FILE_OPS(cont_recording, 8); MVM_DEBUGFS_WRITE_FILE_OPS(max_amsdu_len, 8); MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl, (IWL_RSS_INDIRECTION_TABLE_SIZE * 2)); MVM_DEBUGFS_WRITE_FILE_OPS(inject_packet, 512); +MVM_DEBUGFS_WRITE_FILE_OPS(inject_beacon_ie, 512); +MVM_DEBUGFS_WRITE_FILE_OPS(inject_beacon_ie_restore, 512); MVM_DEBUGFS_READ_FILE_OPS(uapsd_noagg_bssids); @@ -1820,7 +1949,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8); MVM_DEBUGFS_READ_FILE_OPS(sar_geo_profile); #endif -MVM_DEBUGFS_WRITE_FILE_OPS(he_sniffer_params, 32); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(he_sniffer_params, 32); static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -2004,13 +2133,14 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(max_amsdu_len, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, 0200); - MVM_DEBUGFS_ADD_FILE(cont_recording, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(inject_packet, mvm->debugfs_dir, 0200); + MVM_DEBUGFS_ADD_FILE(inject_beacon_ie, mvm->debugfs_dir, 0200); + MVM_DEBUGFS_ADD_FILE(inject_beacon_ie_restore, mvm->debugfs_dir, 0200); #ifdef CONFIG_ACPI MVM_DEBUGFS_ADD_FILE(sar_geo_profile, dbgfs_dir, 0400); #endif - MVM_DEBUGFS_ADD_FILE(he_sniffer_params, mvm->debugfs_dir, 0200); + MVM_DEBUGFS_ADD_FILE(he_sniffer_params, mvm->debugfs_dir, 0600); if (!debugfs_create_bool("enable_scan_iteration_notif", 0600, @@ -2071,6 +2201,9 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) if (!debugfs_create_blob("nvm_phy_sku", 0400, mvm->debugfs_dir, &mvm->nvm_phy_sku_blob)) goto err; + if (!debugfs_create_blob("nvm_reg", S_IRUSR, + mvm->debugfs_dir, &mvm->nvm_reg_blob)) + goto err; debugfs_create_file("mem", 0600, dbgfs_dir, mvm, &iwl_dbgfs_mem_ops); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c new file mode 100644 index 000000000000..e9822a3ec373 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c @@ -0,0 +1,654 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation + * Copyright (C) 2019 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation + * Copyright (C) 2019 Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include +#include +#include +#include "mvm.h" +#include "iwl-io.h" +#include "iwl-prph.h" +#include "constants.h" + +struct iwl_mvm_loc_entry { + struct list_head list; + u8 addr[ETH_ALEN]; + u8 lci_len, civic_len; + u8 buf[]; +}; + +static void iwl_mvm_ftm_reset(struct iwl_mvm *mvm) +{ + struct iwl_mvm_loc_entry *e, *t; + + mvm->ftm_initiator.req = NULL; + mvm->ftm_initiator.req_wdev = NULL; + memset(mvm->ftm_initiator.responses, 0, + sizeof(mvm->ftm_initiator.responses)); + list_for_each_entry_safe(e, t, &mvm->ftm_initiator.loc_list, list) { + list_del(&e->list); + kfree(e); + } +} + +void iwl_mvm_ftm_restart(struct iwl_mvm *mvm) +{ + struct cfg80211_pmsr_result result = { + .status = NL80211_PMSR_STATUS_FAILURE, + .final = 1, + .host_time = ktime_get_boot_ns(), + .type = NL80211_PMSR_TYPE_FTM, + }; + int i; + + lockdep_assert_held(&mvm->mutex); + + if (!mvm->ftm_initiator.req) + return; + + for (i = 0; i < mvm->ftm_initiator.req->n_peers; i++) { + memcpy(result.addr, mvm->ftm_initiator.req->peers[i].addr, + ETH_ALEN); + result.ftm.burst_index = mvm->ftm_initiator.responses[i]; + + cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev, + mvm->ftm_initiator.req, + &result, GFP_KERNEL); + } + + cfg80211_pmsr_complete(mvm->ftm_initiator.req_wdev, + mvm->ftm_initiator.req, GFP_KERNEL); + iwl_mvm_ftm_reset(mvm); +} + +static int +iwl_ftm_range_request_status_to_err(enum iwl_tof_range_request_status s) +{ + switch (s) { + case IWL_TOF_RANGE_REQUEST_STATUS_SUCCESS: + return 0; + case IWL_TOF_RANGE_REQUEST_STATUS_BUSY: + return -EBUSY; + default: + WARN_ON_ONCE(1); + return -EIO; + } +} + +static void iwl_mvm_ftm_cmd_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct iwl_tof_range_req_cmd_v5 *cmd, + struct cfg80211_pmsr_request *req) +{ + int i; + + cmd->request_id = req->cookie; + cmd->num_of_ap = req->n_peers; + + /* use maximum for "no timeout" or bigger than what we can do */ + if (!req->timeout || req->timeout > 255 * 100) + cmd->req_timeout = 255; + else + cmd->req_timeout = DIV_ROUND_UP(req->timeout, 100); + + /* + * We treat it always as random, since if not we'll + * have filled our local address there instead. + */ + cmd->macaddr_random = 1; + memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN); + for (i = 0; i < ETH_ALEN; i++) + cmd->macaddr_mask[i] = ~req->mac_addr_mask[i]; + + if (vif->bss_conf.assoc) + memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN); + else + eth_broadcast_addr(cmd->range_req_bssid); +} + +static void iwl_mvm_ftm_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct iwl_tof_range_req_cmd *cmd, + struct cfg80211_pmsr_request *req) +{ + int i; + + cmd->initiator_flags = + cpu_to_le32(IWL_TOF_INITIATOR_FLAGS_MACADDR_RANDOM | + IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT); + cmd->request_id = req->cookie; + cmd->num_of_ap = req->n_peers; + + /* + * Use a large value for "no timeout". Don't use the maximum value + * because of fw limitations. + */ + if (req->timeout) + cmd->req_timeout_ms = cpu_to_le32(req->timeout); + else + cmd->req_timeout_ms = cpu_to_le32(0xfffff); + + memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN); + for (i = 0; i < ETH_ALEN; i++) + cmd->macaddr_mask[i] = ~req->mac_addr_mask[i]; + + if (vif->bss_conf.assoc) + memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN); + else + eth_broadcast_addr(cmd->range_req_bssid); + + /* TODO: fill in tsf_mac_id if needed */ + cmd->tsf_mac_id = cpu_to_le32(0xff); +} + +static int iwl_mvm_ftm_target_chandef(struct iwl_mvm *mvm, + struct cfg80211_pmsr_request_peer *peer, + u8 *channel, u8 *bandwidth, + u8 *ctrl_ch_position) +{ + u32 freq = peer->chandef.chan->center_freq; + + *channel = ieee80211_frequency_to_channel(freq); + + switch (peer->chandef.width) { + case NL80211_CHAN_WIDTH_20_NOHT: + *bandwidth = IWL_TOF_BW_20_LEGACY; + break; + case NL80211_CHAN_WIDTH_20: + *bandwidth = IWL_TOF_BW_20_HT; + break; + case NL80211_CHAN_WIDTH_40: + *bandwidth = IWL_TOF_BW_40; + break; + case NL80211_CHAN_WIDTH_80: + *bandwidth = IWL_TOF_BW_80; + break; + default: + IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n", + peer->chandef.width); + return -EINVAL; + } + + *ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ? + iwl_mvm_get_ctrl_pos(&peer->chandef) : 0; + + return 0; +} + +static int +iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm, + struct cfg80211_pmsr_request_peer *peer, + struct iwl_tof_range_req_ap_entry_v2 *target) +{ + int ret; + + ret = iwl_mvm_ftm_target_chandef(mvm, peer, &target->channel_num, + &target->bandwidth, + &target->ctrl_ch_position); + if (ret) + return ret; + + memcpy(target->bssid, peer->addr, ETH_ALEN); + target->burst_period = + cpu_to_le16(peer->ftm.burst_period); + target->samples_per_burst = peer->ftm.ftms_per_burst; + target->num_of_bursts = peer->ftm.num_bursts_exp; + target->measure_type = 0; /* regular two-sided FTM */ + target->retries_per_sample = peer->ftm.ftmr_retries; + target->asap_mode = peer->ftm.asap; + target->enable_dyn_ack = IWL_MVM_FTM_INITIATOR_DYNACK; + + if (peer->ftm.request_lci) + target->location_req |= IWL_TOF_LOC_LCI; + if (peer->ftm.request_civicloc) + target->location_req |= IWL_TOF_LOC_CIVIC; + + target->algo_type = IWL_MVM_FTM_INITIATOR_ALGO; + + return 0; +} + +#define FTM_PUT_FLAG(flag) (target->initiator_ap_flags |= \ + cpu_to_le32(IWL_INITIATOR_AP_FLAGS_##flag)) + +static int iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, + struct cfg80211_pmsr_request_peer *peer, + struct iwl_tof_range_req_ap_entry *target) +{ + int ret; + + ret = iwl_mvm_ftm_target_chandef(mvm, peer, &target->channel_num, + &target->bandwidth, + &target->ctrl_ch_position); + if (ret) + return ret; + + memcpy(target->bssid, peer->addr, ETH_ALEN); + target->burst_period = + cpu_to_le16(peer->ftm.burst_period); + target->samples_per_burst = peer->ftm.ftms_per_burst; + target->num_of_bursts = peer->ftm.num_bursts_exp; + target->ftmr_max_retries = peer->ftm.ftmr_retries; + target->initiator_ap_flags = cpu_to_le32(0); + + if (peer->ftm.asap) + FTM_PUT_FLAG(ASAP); + + if (peer->ftm.request_lci) + FTM_PUT_FLAG(LCI_REQUEST); + + if (peer->ftm.request_civicloc) + FTM_PUT_FLAG(CIVIC_REQUEST); + + if (IWL_MVM_FTM_INITIATOR_DYNACK) + FTM_PUT_FLAG(DYN_ACK); + + if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_LINEAR_REG) + FTM_PUT_FLAG(ALGO_LR); + else if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_FFT) + FTM_PUT_FLAG(ALGO_FFT); + + return 0; +} + +int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct cfg80211_pmsr_request *req) +{ + struct iwl_tof_range_req_cmd_v5 cmd_v5; + struct iwl_tof_range_req_cmd cmd; + bool new_api = fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ); + u8 num_of_ap; + struct iwl_host_cmd hcmd = { + .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0), + .dataflags[0] = IWL_HCMD_DFL_DUP, + }; + u32 status = 0; + int err, i; + + lockdep_assert_held(&mvm->mutex); + + if (mvm->ftm_initiator.req) + return -EBUSY; + + if (new_api) { + iwl_mvm_ftm_cmd(mvm, vif, &cmd, req); + hcmd.data[0] = &cmd; + hcmd.len[0] = sizeof(cmd); + num_of_ap = cmd.num_of_ap; + } else { + iwl_mvm_ftm_cmd_v5(mvm, vif, &cmd_v5, req); + hcmd.data[0] = &cmd_v5; + hcmd.len[0] = sizeof(cmd_v5); + num_of_ap = cmd_v5.num_of_ap; + } + + for (i = 0; i < num_of_ap; i++) { + struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; + + if (new_api) + err = iwl_mvm_ftm_put_target(mvm, peer, &cmd.ap[i]); + else + err = iwl_mvm_ftm_put_target_v2(mvm, peer, + &cmd_v5.ap[i]); + + if (err) + return err; + } + + err = iwl_mvm_send_cmd_status(mvm, &hcmd, &status); + if (!err && status) { + IWL_ERR(mvm, "FTM range request command failure, status: %u\n", + status); + err = iwl_ftm_range_request_status_to_err(status); + } + + if (!err) { + mvm->ftm_initiator.req = req; + mvm->ftm_initiator.req_wdev = ieee80211_vif_to_wdev(vif); + } + + return err; +} + +void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req) +{ + struct iwl_tof_range_abort_cmd cmd = { + .request_id = req->cookie, + }; + + lockdep_assert_held(&mvm->mutex); + + if (req != mvm->ftm_initiator.req) + return; + + if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RANGE_ABORT_CMD, + LOCATION_GROUP, 0), + 0, sizeof(cmd), &cmd)) + IWL_ERR(mvm, "failed to abort FTM process\n"); +} + +static int iwl_mvm_ftm_find_peer(struct cfg80211_pmsr_request *req, + const u8 *addr) +{ + int i; + + for (i = 0; i < req->n_peers; i++) { + struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; + + if (ether_addr_equal_unaligned(peer->addr, addr)) + return i; + } + + return -ENOENT; +} + +static u64 iwl_mvm_ftm_get_host_time(struct iwl_mvm *mvm, __le32 fw_gp2_ts) +{ + u32 gp2_ts = le32_to_cpu(fw_gp2_ts); + u32 curr_gp2, diff; + u64 now_from_boot_ns; + + iwl_mvm_get_sync_time(mvm, &curr_gp2, &now_from_boot_ns); + + if (curr_gp2 >= gp2_ts) + diff = curr_gp2 - gp2_ts; + else + diff = curr_gp2 + (U32_MAX - gp2_ts + 1); + + return now_from_boot_ns - (u64)diff * 1000; +} + +static void iwl_mvm_ftm_get_lci_civic(struct iwl_mvm *mvm, + struct cfg80211_pmsr_result *res) +{ + struct iwl_mvm_loc_entry *entry; + + list_for_each_entry(entry, &mvm->ftm_initiator.loc_list, list) { + if (!ether_addr_equal_unaligned(res->addr, entry->addr)) + continue; + + if (entry->lci_len) { + res->ftm.lci_len = entry->lci_len; + res->ftm.lci = entry->buf; + } + + if (entry->civic_len) { + res->ftm.civicloc_len = entry->civic_len; + res->ftm.civicloc = entry->buf + entry->lci_len; + } + + /* we found the entry we needed */ + break; + } +} + +static int iwl_mvm_ftm_range_resp_valid(struct iwl_mvm *mvm, u8 request_id, + u8 num_of_aps) +{ + lockdep_assert_held(&mvm->mutex); + + if (request_id != (u8)mvm->ftm_initiator.req->cookie) { + IWL_ERR(mvm, "Request ID mismatch, got %u, active %u\n", + request_id, (u8)mvm->ftm_initiator.req->cookie); + return -EINVAL; + } + + if (num_of_aps > mvm->ftm_initiator.req->n_peers) { + IWL_ERR(mvm, "FTM range response invalid\n"); + return -EINVAL; + } + + return 0; +} + +static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index, + struct cfg80211_pmsr_result *res) +{ + s64 rtt_avg = res->ftm.rtt_avg * 100; + + do_div(rtt_avg, 6666); + + IWL_DEBUG_INFO(mvm, "entry %d\n", index); + IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status); + IWL_DEBUG_INFO(mvm, "\tBSSID: %pM\n", res->addr); + IWL_DEBUG_INFO(mvm, "\thost time: %llu\n", res->host_time); + IWL_DEBUG_INFO(mvm, "\tburst index: %hhu\n", res->ftm.burst_index); + IWL_DEBUG_INFO(mvm, "\tsuccess num: %u\n", res->ftm.num_ftmr_successes); + IWL_DEBUG_INFO(mvm, "\trssi: %d\n", res->ftm.rssi_avg); + IWL_DEBUG_INFO(mvm, "\trssi spread: %hhu\n", res->ftm.rssi_spread); + IWL_DEBUG_INFO(mvm, "\trtt: %lld\n", res->ftm.rtt_avg); + IWL_DEBUG_INFO(mvm, "\trtt var: %llu\n", res->ftm.rtt_variance); + IWL_DEBUG_INFO(mvm, "\trtt spread: %llu\n", res->ftm.rtt_spread); + IWL_DEBUG_INFO(mvm, "\tdistance: %lld\n", rtt_avg); +} + +void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_tof_range_rsp_ntfy_v5 *fw_resp_v5 = (void *)pkt->data; + struct iwl_tof_range_rsp_ntfy *fw_resp = (void *)pkt->data; + int i; + bool new_api = fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ); + u8 num_of_aps, last_in_batch; + + lockdep_assert_held(&mvm->mutex); + + if (!mvm->ftm_initiator.req) { + IWL_ERR(mvm, "Got FTM response but have no request?\n"); + return; + } + + if (new_api) { + if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp->request_id, + fw_resp->num_of_aps)) + return; + + num_of_aps = fw_resp->num_of_aps; + last_in_batch = fw_resp->last_report; + } else { + if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v5->request_id, + fw_resp_v5->num_of_aps)) + return; + + num_of_aps = fw_resp_v5->num_of_aps; + last_in_batch = fw_resp_v5->last_in_batch; + } + + IWL_DEBUG_INFO(mvm, "Range response received\n"); + IWL_DEBUG_INFO(mvm, "request id: %lld, num of entries: %hhu\n", + mvm->ftm_initiator.req->cookie, num_of_aps); + + for (i = 0; i < num_of_aps && i < IWL_MVM_TOF_MAX_APS; i++) { + struct cfg80211_pmsr_result result = {}; + struct iwl_tof_range_rsp_ap_entry_ntfy *fw_ap; + int peer_idx; + + if (new_api) { + fw_ap = &fw_resp->ap[i]; + result.final = fw_resp->ap[i].last_burst; + } else { + /* the first part is the same for old and new APIs */ + fw_ap = (void *)&fw_resp_v5->ap[i]; + /* + * FIXME: the firmware needs to report this, we don't + * even know the number of bursts the responder picked + * (if we asked it to) + */ + result.final = 0; + } + + peer_idx = iwl_mvm_ftm_find_peer(mvm->ftm_initiator.req, + fw_ap->bssid); + if (peer_idx < 0) { + IWL_WARN(mvm, + "Unknown address (%pM, target #%d) in FTM response\n", + fw_ap->bssid, i); + continue; + } + + switch (fw_ap->measure_status) { + case IWL_TOF_ENTRY_SUCCESS: + result.status = NL80211_PMSR_STATUS_SUCCESS; + break; + case IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT: + result.status = NL80211_PMSR_STATUS_TIMEOUT; + break; + case IWL_TOF_ENTRY_NO_RESPONSE: + result.status = NL80211_PMSR_STATUS_FAILURE; + result.ftm.failure_reason = + NL80211_PMSR_FTM_FAILURE_NO_RESPONSE; + break; + case IWL_TOF_ENTRY_REQUEST_REJECTED: + result.status = NL80211_PMSR_STATUS_FAILURE; + result.ftm.failure_reason = + NL80211_PMSR_FTM_FAILURE_PEER_BUSY; + result.ftm.busy_retry_time = fw_ap->refusal_period; + break; + default: + result.status = NL80211_PMSR_STATUS_FAILURE; + result.ftm.failure_reason = + NL80211_PMSR_FTM_FAILURE_UNSPECIFIED; + break; + } + memcpy(result.addr, fw_ap->bssid, ETH_ALEN); + result.host_time = iwl_mvm_ftm_get_host_time(mvm, + fw_ap->timestamp); + result.type = NL80211_PMSR_TYPE_FTM; + result.ftm.burst_index = mvm->ftm_initiator.responses[peer_idx]; + mvm->ftm_initiator.responses[peer_idx]++; + result.ftm.rssi_avg = fw_ap->rssi; + result.ftm.rssi_avg_valid = 1; + result.ftm.rssi_spread = fw_ap->rssi_spread; + result.ftm.rssi_spread_valid = 1; + result.ftm.rtt_avg = (s32)le32_to_cpu(fw_ap->rtt); + result.ftm.rtt_avg_valid = 1; + result.ftm.rtt_variance = le32_to_cpu(fw_ap->rtt_variance); + result.ftm.rtt_variance_valid = 1; + result.ftm.rtt_spread = le32_to_cpu(fw_ap->rtt_spread); + result.ftm.rtt_spread_valid = 1; + + iwl_mvm_ftm_get_lci_civic(mvm, &result); + + cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev, + mvm->ftm_initiator.req, + &result, GFP_KERNEL); + + iwl_mvm_debug_range_resp(mvm, i, &result); + } + + if (last_in_batch) { + cfg80211_pmsr_complete(mvm->ftm_initiator.req_wdev, + mvm->ftm_initiator.req, + GFP_KERNEL); + iwl_mvm_ftm_reset(mvm); + } +} + +void iwl_mvm_ftm_lc_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + const struct ieee80211_mgmt *mgmt = (void *)pkt->data; + size_t len = iwl_rx_packet_payload_len(pkt); + struct iwl_mvm_loc_entry *entry; + const u8 *ies, *lci, *civic, *msr_ie; + size_t ies_len, lci_len = 0, civic_len = 0; + size_t baselen = IEEE80211_MIN_ACTION_SIZE + + sizeof(mgmt->u.action.u.ftm); + static const u8 rprt_type_lci = IEEE80211_SPCT_MSR_RPRT_TYPE_LCI; + static const u8 rprt_type_civic = IEEE80211_SPCT_MSR_RPRT_TYPE_CIVIC; + + if (len <= baselen) + return; + + lockdep_assert_held(&mvm->mutex); + + ies = mgmt->u.action.u.ftm.variable; + ies_len = len - baselen; + + msr_ie = cfg80211_find_ie_match(WLAN_EID_MEASURE_REPORT, ies, ies_len, + &rprt_type_lci, 1, 4); + if (msr_ie) { + lci = msr_ie + 2; + lci_len = msr_ie[1]; + } + + msr_ie = cfg80211_find_ie_match(WLAN_EID_MEASURE_REPORT, ies, ies_len, + &rprt_type_civic, 1, 4); + if (msr_ie) { + civic = msr_ie + 2; + civic_len = msr_ie[1]; + } + + entry = kmalloc(sizeof(*entry) + lci_len + civic_len, GFP_KERNEL); + if (!entry) + return; + + memcpy(entry->addr, mgmt->bssid, ETH_ALEN); + + entry->lci_len = lci_len; + if (lci_len) + memcpy(entry->buf, lci, lci_len); + + entry->civic_len = civic_len; + if (civic_len) + memcpy(entry->buf + lci_len, civic, civic_len); + + list_add_tail(&entry->list, &mvm->ftm_initiator.loc_list); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c new file mode 100644 index 000000000000..1513b8b4062f --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c @@ -0,0 +1,244 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include +#include +#include "mvm.h" +#include "constants.h" + +static int +iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct cfg80211_chan_def *chandef) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_tof_responder_config_cmd cmd = { + .channel_num = chandef->chan->hw_value, + .cmd_valid_fields = + cpu_to_le32(IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO | + IWL_TOF_RESPONDER_CMD_VALID_BSSID | + IWL_TOF_RESPONDER_CMD_VALID_STA_ID), + .sta_id = mvmvif->bcast_sta.sta_id, + }; + + lockdep_assert_held(&mvm->mutex); + + switch (chandef->width) { + case NL80211_CHAN_WIDTH_20_NOHT: + cmd.bandwidth = IWL_TOF_BW_20_LEGACY; + break; + case NL80211_CHAN_WIDTH_20: + cmd.bandwidth = IWL_TOF_BW_20_HT; + break; + case NL80211_CHAN_WIDTH_40: + cmd.bandwidth = IWL_TOF_BW_40; + cmd.ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef); + break; + case NL80211_CHAN_WIDTH_80: + cmd.bandwidth = IWL_TOF_BW_80; + cmd.ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef); + break; + default: + WARN_ON(1); + return -EINVAL; + } + + memcpy(cmd.bssid, vif->addr, ETH_ALEN); + + return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RESPONDER_CONFIG_CMD, + LOCATION_GROUP, 0), + 0, sizeof(cmd), &cmd); +} + +static int +iwl_mvm_ftm_responder_dyn_cfg_cmd(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_ftm_responder_params *params) +{ + struct iwl_tof_responder_dyn_config_cmd cmd = { + .lci_len = cpu_to_le32(params->lci_len + 2), + .civic_len = cpu_to_le32(params->civicloc_len + 2), + }; + u8 data[IWL_LCI_CIVIC_IE_MAX_SIZE] = {0}; + struct iwl_host_cmd hcmd = { + .id = iwl_cmd_id(TOF_RESPONDER_DYN_CONFIG_CMD, + LOCATION_GROUP, 0), + .data[0] = &cmd, + .len[0] = sizeof(cmd), + .data[1] = &data, + /* .len[1] set later */ + /* may not be able to DMA from stack */ + .dataflags[1] = IWL_HCMD_DFL_DUP, + }; + u32 aligned_lci_len = ALIGN(params->lci_len + 2, 4); + u32 aligned_civicloc_len = ALIGN(params->civicloc_len + 2, 4); + u8 *pos = data; + + lockdep_assert_held(&mvm->mutex); + + if (aligned_lci_len + aligned_civicloc_len > sizeof(data)) { + IWL_ERR(mvm, "LCI/civicloc data too big (%zd + %zd)\n", + params->lci_len, params->civicloc_len); + return -ENOBUFS; + } + + pos[0] = WLAN_EID_MEASURE_REPORT; + pos[1] = params->lci_len; + memcpy(pos + 2, params->lci, params->lci_len); + + pos += aligned_lci_len; + pos[0] = WLAN_EID_MEASURE_REPORT; + pos[1] = params->civicloc_len; + memcpy(pos + 2, params->civicloc, params->civicloc_len); + + hcmd.len[1] = aligned_lci_len + aligned_civicloc_len; + + return iwl_mvm_send_cmd(mvm, &hcmd); +} + +int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct ieee80211_ftm_responder_params *params; + struct ieee80211_chanctx_conf ctx, *pctx; + u16 *phy_ctxt_id; + struct iwl_mvm_phy_ctxt *phy_ctxt; + int ret; + + params = vif->bss_conf.ftmr_params; + + lockdep_assert_held(&mvm->mutex); + + if (WARN_ON_ONCE(!vif->bss_conf.ftm_responder)) + return -EINVAL; + + if (vif->p2p || vif->type != NL80211_IFTYPE_AP || + !mvmvif->ap_ibss_active) { + IWL_ERR(mvm, "Cannot start responder, not in AP mode\n"); + return -EIO; + } + + rcu_read_lock(); + pctx = rcu_dereference(vif->chanctx_conf); + /* Copy the ctx to unlock the rcu and send the phy ctxt. We don't care + * about changes in the ctx after releasing the lock because the driver + * is still protected by the mutex. */ + ctx = *pctx; + phy_ctxt_id = (u16 *)pctx->drv_priv; + rcu_read_unlock(); + + phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; + ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx.def, + ctx.rx_chains_static, + ctx.rx_chains_dynamic); + if (ret) + return ret; + + ret = iwl_mvm_ftm_responder_cmd(mvm, vif, &ctx.def); + if (ret) + return ret; + + if (params) + ret = iwl_mvm_ftm_responder_dyn_cfg_cmd(mvm, vif, params); + + return ret; +} + +void iwl_mvm_ftm_restart_responder(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + if (!vif->bss_conf.ftm_responder) + return; + + iwl_mvm_ftm_start_responder(mvm, vif); +} + +void iwl_mvm_ftm_responder_stats(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_ftm_responder_stats *resp = (void *)pkt->data; + struct cfg80211_ftm_responder_stats *stats = &mvm->ftm_resp_stats; + u32 flags = le32_to_cpu(resp->flags); + + if (resp->success_ftm == resp->ftm_per_burst) + stats->success_num++; + else if (resp->success_ftm >= 2) + stats->partial_num++; + else + stats->failed_num++; + + if ((flags & FTM_RESP_STAT_ASAP_REQ) && + (flags & FTM_RESP_STAT_ASAP_RESP)) + stats->asap_num++; + + if (flags & FTM_RESP_STAT_NON_ASAP_RESP) + stats->non_asap_num++; + + stats->total_duration_ms += le32_to_cpu(resp->duration) / USEC_PER_MSEC; + + if (flags & FTM_RESP_STAT_TRIGGER_UNKNOWN) + stats->unknown_triggers_num++; + + if (flags & FTM_RESP_STAT_DUP) + stats->reschedule_requests_num++; + + if (flags & FTM_RESP_STAT_NON_ASAP_OUT_WIN) + stats->out_of_window_triggers_num++; +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index 143c7fcaea41..e3eb812e0248 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -89,7 +91,7 @@ #include "fw/api/sf.h" #include "fw/api/sta.h" #include "fw/api/stats.h" -#include "fw/api/tof.h" +#include "fw/api/location.h" #include "fw/api/tx.h" #endif /* __fw_api_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 0d6c313b6669..00a47f6f1d81 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -8,7 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -105,12 +105,12 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm) int i; struct iwl_rss_config_cmd cmd = { .flags = cpu_to_le32(IWL_RSS_ENABLE), - .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP | - IWL_RSS_HASH_TYPE_IPV4_UDP | - IWL_RSS_HASH_TYPE_IPV4_PAYLOAD | - IWL_RSS_HASH_TYPE_IPV6_TCP | - IWL_RSS_HASH_TYPE_IPV6_UDP | - IWL_RSS_HASH_TYPE_IPV6_PAYLOAD, + .hash_mask = BIT(IWL_RSS_HASH_TYPE_IPV4_TCP) | + BIT(IWL_RSS_HASH_TYPE_IPV4_UDP) | + BIT(IWL_RSS_HASH_TYPE_IPV4_PAYLOAD) | + BIT(IWL_RSS_HASH_TYPE_IPV6_TCP) | + BIT(IWL_RSS_HASH_TYPE_IPV6_UDP) | + BIT(IWL_RSS_HASH_TYPE_IPV6_PAYLOAD), }; if (mvm->trans->num_rx_queues == 1) @@ -127,13 +127,17 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm) static int iwl_configure_rxq(struct iwl_mvm *mvm) { - int i, num_queues, size; + int i, num_queues, size, ret; struct iwl_rfh_queue_config *cmd; + struct iwl_host_cmd hcmd = { + .id = WIDE_ID(DATA_PATH_GROUP, RFH_QUEUE_CONFIG_CMD), + .dataflags[0] = IWL_HCMD_DFL_NOCOPY, + }; /* Do not configure default queue, it is configured via context info */ num_queues = mvm->trans->num_rx_queues - 1; - size = sizeof(*cmd) + num_queues * sizeof(struct iwl_rfh_queue_data); + size = struct_size(cmd, data, num_queues); cmd = kzalloc(size, GFP_KERNEL); if (!cmd) @@ -154,10 +158,14 @@ static int iwl_configure_rxq(struct iwl_mvm *mvm) cmd->data[i].fr_bd_wid = cpu_to_le32(data.fr_bd_wid); } - return iwl_mvm_send_cmd_pdu(mvm, - WIDE_ID(DATA_PATH_GROUP, - RFH_QUEUE_CONFIG_CMD), - 0, size, cmd); + hcmd.data[0] = cmd; + hcmd.len[0] = size; + + ret = iwl_mvm_send_cmd(mvm, &hcmd); + + kfree(cmd); + + return ret; } static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm) @@ -210,7 +218,7 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, struct iwl_lmac_alive *lmac1; struct iwl_lmac_alive *lmac2 = NULL; u16 status; - u32 umac_error_event_table; + u32 lmac_error_event_table, umac_error_event_table; if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) { palive = (void *)pkt->data; @@ -225,30 +233,35 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, status = le16_to_cpu(palive3->status); } - mvm->error_event_table[0] = le32_to_cpu(lmac1->error_event_table_ptr); + lmac_error_event_table = + le32_to_cpu(lmac1->dbg_ptrs.error_event_table_ptr); + iwl_fw_lmac1_set_alive_err_table(mvm->trans, lmac_error_event_table); + if (lmac2) - mvm->error_event_table[1] = - le32_to_cpu(lmac2->error_event_table_ptr); - mvm->log_event_table = le32_to_cpu(lmac1->log_event_table_ptr); + mvm->trans->lmac_error_event_table[1] = + le32_to_cpu(lmac2->dbg_ptrs.error_event_table_ptr); - umac_error_event_table = le32_to_cpu(umac->error_info_addr); + umac_error_event_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr); if (!umac_error_event_table) { mvm->support_umac_log = false; } else if (umac_error_event_table >= mvm->trans->cfg->min_umac_error_event_table) { mvm->support_umac_log = true; - mvm->umac_error_event_table = umac_error_event_table; } else { IWL_ERR(mvm, "Not valid error log pointer 0x%08X for %s uCode\n", - mvm->umac_error_event_table, + umac_error_event_table, (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) ? "Init" : "RT"); mvm->support_umac_log = false; } - alive_data->scd_base_addr = le32_to_cpu(lmac1->scd_base_ptr); + if (mvm->support_umac_log) + iwl_fw_umac_set_alive_err_table(mvm->trans, + umac_error_event_table); + + alive_data->scd_base_addr = le32_to_cpu(lmac1->dbg_ptrs.scd_base_ptr); alive_data->valid = status == IWL_ALIVE_STATUS_OK; IWL_DEBUG_FW(mvm, @@ -293,13 +306,12 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, enum iwl_ucode_type ucode_type) { struct iwl_notification_wait alive_wait; - struct iwl_mvm_alive_data alive_data; + struct iwl_mvm_alive_data alive_data = {}; const struct fw_img *fw; - int ret, i; + int ret; enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img; static const u16 alive_cmd[] = { MVM_ALIVE }; - set_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status); if (ucode_type == IWL_UCODE_REGULAR && iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) && !(fw_has_capa(&mvm->fw->ucode_capa, @@ -332,11 +344,16 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, if (ret) { struct iwl_trans *trans = mvm->trans; + if (ret == -ETIMEDOUT) + iwl_fw_dbg_error_collect(&mvm->fwrt, + FW_DBG_TRIGGER_ALIVE_TIMEOUT); + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) IWL_ERR(mvm, "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", - iwl_read_prph(trans, UMAG_SB_CPU_1_STATUS), - iwl_read_prph(trans, UMAG_SB_CPU_2_STATUS)); + iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS), + iwl_read_umac_prph(trans, + UMAG_SB_CPU_2_STATUS)); else if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) IWL_ERR(mvm, "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", @@ -373,14 +390,10 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].tid_bitmap = BIT(IWL_MAX_TID_COUNT + 2); - for (i = 0; i < IEEE80211_MAX_QUEUES; i++) - atomic_set(&mvm->mac80211_queue_stop_count[i], 0); - set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); #ifdef CONFIG_IWLWIFI_DEBUGFS iwl_fw_set_dbg_rec_on(&mvm->fwrt); #endif - clear_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status); return 0; } @@ -406,13 +419,15 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) iwl_wait_init_complete, NULL); + iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_EARLY); + /* Will also start the device */ ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR); if (ret) { IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); - iwl_fw_assert_error_dump(&mvm->fwrt); goto error; } + iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_AFTER_ALIVE); /* Send init config command to mark that we are sending NVM access * commands @@ -631,10 +646,10 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm) } #ifdef CONFIG_ACPI -static int iwl_mvm_sar_set_profile(struct iwl_mvm *mvm, - union acpi_object *table, - struct iwl_mvm_sar_profile *profile, - bool enabled) +static inline int iwl_mvm_sar_set_profile(struct iwl_mvm *mvm, + union acpi_object *table, + struct iwl_mvm_sar_profile *profile, + bool enabled) { int i; @@ -965,6 +980,57 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) } #endif /* CONFIG_ACPI */ +void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags) +{ + u32 error_log_size = mvm->fw->ucode_capa.error_log_size; + int ret; + u32 resp; + + struct iwl_fw_error_recovery_cmd recovery_cmd = { + .flags = cpu_to_le32(flags), + .buf_size = 0, + }; + struct iwl_host_cmd host_cmd = { + .id = WIDE_ID(SYSTEM_GROUP, FW_ERROR_RECOVERY_CMD), + .flags = CMD_WANT_SKB, + .data = {&recovery_cmd, }, + .len = {sizeof(recovery_cmd), }, + }; + + /* no error log was defined in TLV */ + if (!error_log_size) + return; + + if (flags & ERROR_RECOVERY_UPDATE_DB) { + /* no buf was allocated while HW reset */ + if (!mvm->error_recovery_buf) + return; + + host_cmd.data[1] = mvm->error_recovery_buf; + host_cmd.len[1] = error_log_size; + host_cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY; + recovery_cmd.buf_size = cpu_to_le32(error_log_size); + } + + ret = iwl_mvm_send_cmd(mvm, &host_cmd); + kfree(mvm->error_recovery_buf); + mvm->error_recovery_buf = NULL; + + if (ret) { + IWL_ERR(mvm, "Failed to send recovery cmd %d\n", ret); + return; + } + + /* skb respond is only relevant in ERROR_RECOVERY_UPDATE_DB */ + if (flags & ERROR_RECOVERY_UPDATE_DB) { + resp = le32_to_cpu(*(__le32 *)host_cmd.resp_pkt->data); + if (resp) + IWL_ERR(mvm, + "Failed to send recovery cmd blob was invalid %d\n", + resp); + } +} + static int iwl_mvm_sar_init(struct iwl_mvm *mvm) { int ret; @@ -1055,7 +1121,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) ret = iwl_mvm_load_rt_fw(mvm); if (ret) { IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); - iwl_fw_assert_error_dump(&mvm->fwrt); + iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER); goto error; } @@ -1201,6 +1267,12 @@ int iwl_mvm_up(struct iwl_mvm *mvm) if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN); + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) + iwl_mvm_send_recovery_cmd(mvm, ERROR_RECOVERY_UPDATE_DB); + + if (iwl_acpi_get_eckv(mvm->dev, &mvm->ext_clock_valid)) + IWL_DEBUG_INFO(mvm, "ECKV table doesn't exist in BIOS\n"); + ret = iwl_mvm_sar_init(mvm); if (ret == 0) { ret = iwl_mvm_sar_geo_init(mvm); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/led.c b/drivers/net/wireless/intel/iwlwifi/mvm/led.c index 9bb1de1cad64..4348bb00e761 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/led.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/led.c @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -28,6 +29,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -113,6 +115,7 @@ int iwl_mvm_leds_init(struct iwl_mvm *mvm) switch (mode) { case IWL_LED_BLINK: IWL_ERR(mvm, "Blink led mode not supported, used default\n"); + /* fall through */ case IWL_LED_DEFAULT: case IWL_LED_RF_STATE: mode = IWL_LED_RF_STATE; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 7779951a9533..6a70dece447d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -97,11 +97,6 @@ struct iwl_mvm_mac_iface_iterator_data { bool found_vif; }; -struct iwl_mvm_hw_queues_iface_iterator_data { - struct ieee80211_vif *exclude_vif; - unsigned long used_hw_queues; -}; - static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) { @@ -208,61 +203,6 @@ static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac, data->preferred_tsf = NUM_TSF_IDS; } -/* - * Get the mask of the queues used by the vif - */ -u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif) -{ - u32 qmask = 0, ac; - - if (vif->type == NL80211_IFTYPE_P2P_DEVICE) - return BIT(IWL_MVM_OFFCHANNEL_QUEUE); - - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { - if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE) - qmask |= BIT(vif->hw_queue[ac]); - } - - if (vif->type == NL80211_IFTYPE_AP || - vif->type == NL80211_IFTYPE_ADHOC) - qmask |= BIT(vif->cab_queue); - - return qmask; -} - -static void iwl_mvm_iface_hw_queues_iter(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct iwl_mvm_hw_queues_iface_iterator_data *data = _data; - - /* exclude the given vif */ - if (vif == data->exclude_vif) - return; - - data->used_hw_queues |= iwl_mvm_mac_get_queues_mask(vif); -} - -unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm, - struct ieee80211_vif *exclude_vif) -{ - struct iwl_mvm_hw_queues_iface_iterator_data data = { - .exclude_vif = exclude_vif, - .used_hw_queues = - BIT(IWL_MVM_OFFCHANNEL_QUEUE) | - BIT(mvm->aux_queue) | - BIT(IWL_MVM_DQA_GCAST_QUEUE), - }; - - lockdep_assert_held(&mvm->mutex); - - /* mark all VIF used hw queues */ - ieee80211_iterate_active_interfaces_atomic( - mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, - iwl_mvm_iface_hw_queues_iter, &data); - - return data.used_hw_queues; -} - static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) { @@ -360,8 +300,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, iwl_mvm_mac_iface_iterator, &data); - used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, vif); - /* * In the case we're getting here during resume, it's similar to * firmware restart, and with RESUME_ALL the iterator will find @@ -416,9 +354,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) * the ones here - no real limit */ queue_limit = IEEE80211_MAX_QUEUES; - BUILD_BUG_ON(IEEE80211_MAX_QUEUES > - BITS_PER_BYTE * - sizeof(mvm->hw_queue_to_mac80211[0])); /* * Find available queues, and allocate them to the ACs. When in @@ -446,9 +381,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) * queue value (when queue is enabled). */ mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; - vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; - } else { - vif->cab_queue = IEEE80211_INVAL_HW_QUEUE; } mvmvif->bcast_sta.sta_id = IWL_MVM_INVALID_STA; @@ -462,8 +394,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) exit_fail: memset(mvmvif, 0, sizeof(struct iwl_mvm_vif)); - memset(vif->hw_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(vif->hw_queue)); - vif->cab_queue = IEEE80211_INVAL_HW_QUEUE; return ret; } @@ -776,29 +706,10 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, if (vif->probe_req_reg && vif->bss_conf.assoc && vif->p2p) cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); - if (vif->bss_conf.assoc && vif->bss_conf.he_support && - !iwlwifi_mod_params.disable_11ax) { - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - u8 sta_id = mvmvif->ap_sta_id; - + if (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) { cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX); - if (sta_id != IWL_MVM_INVALID_STA) { - struct ieee80211_sta *sta; - - sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], - lockdep_is_held(&mvm->mutex)); - - /* - * TODO: we should check the ext cap IE but it is - * unclear why the spec requires two bits (one in HE - * cap IE, and one in the ext cap IE). In the meantime - * rely on the HE cap IE only. - */ - if (sta && (sta->he_cap.he_cap_elem.mac_cap_info[0] & - IEEE80211_HE_MAC_CAP0_TWT_RES)) - ctxt_sta->data_policy |= - cpu_to_le32(TWT_SUPPORTED); - } + if (vif->bss_conf.twt_requester) + ctxt_sta->data_policy |= cpu_to_le32(TWT_SUPPORTED); } @@ -881,8 +792,6 @@ static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm, iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); - cmd.protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT); - /* Override the filter flags to accept only probe requests */ cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); @@ -902,9 +811,9 @@ static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm, return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); } -static void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm, - __le32 *tim_index, __le32 *tim_size, - u8 *beacon, u32 frame_size) +void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm, + __le32 *tim_index, __le32 *tim_size, + u8 *beacon, u32 frame_size) { u32 tim_idx; struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon; @@ -944,8 +853,8 @@ static u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size) return ie - beacon; } -static u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info *info, - struct ieee80211_vif *vif) +u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info *info, + struct ieee80211_vif *vif) { u8 rate; @@ -995,9 +904,9 @@ static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm, } -static int iwl_mvm_mac_ctxt_send_beacon_cmd(struct iwl_mvm *mvm, - struct sk_buff *beacon, - void *data, int len) +int iwl_mvm_mac_ctxt_send_beacon_cmd(struct iwl_mvm *mvm, + struct sk_buff *beacon, + void *data, int len) { struct iwl_host_cmd cmd = { .id = BEACON_TEMPLATE_CMD, @@ -1100,13 +1009,16 @@ static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm, sizeof(beacon_cmd)); } -static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct sk_buff *beacon) +int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct sk_buff *beacon) { if (WARN_ON(!beacon)) return -EINVAL; + if (IWL_MVM_NON_TRANSMITTING_AP) + return 0; + if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD)) return iwl_mvm_mac_ctxt_send_beacon_v6(mvm, vif, beacon); @@ -1132,6 +1044,11 @@ int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm, if (!beacon) return -ENOMEM; +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (mvm->beacon_inject_active) + return -EBUSY; +#endif + ret = iwl_mvm_mac_ctxt_send_beacon(mvm, vif, beacon); dev_kfree_skb(beacon); return ret; @@ -1203,7 +1120,7 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm, if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) - ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue); + ctxt_ap->mcast_qid = cpu_to_le32(mvmvif->cab_queue); /* * Only set the beacon time when the MAC is being added, when we @@ -1420,7 +1337,7 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_extended_beacon_notif *beacon = (void *)pkt->data; - struct iwl_mvm_tx_resp *beacon_notify_hdr; + struct iwl_extended_beacon_notif_v5 *beacon_v5 = (void *)pkt->data; struct ieee80211_vif *csa_vif; struct ieee80211_vif *tx_blocked_vif; struct agg_tx_status *agg_status; @@ -1428,18 +1345,29 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); - beacon_notify_hdr = &beacon->beacon_notify_hdr; mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2); - mvm->ibss_manager = beacon->ibss_mgr_status != 0; - agg_status = iwl_mvm_get_agg_status(mvm, beacon_notify_hdr); - status = le16_to_cpu(agg_status->status) & TX_STATUS_MSK; - IWL_DEBUG_RX(mvm, - "beacon status %#x retries:%d tsf:0x%16llX gp2:0x%X rate:%d\n", - status, beacon_notify_hdr->failure_frame, - le64_to_cpu(beacon->tsf), - mvm->ap_last_beacon_gp2, - le32_to_cpu(beacon_notify_hdr->initial_rate)); + if (!iwl_mvm_is_short_beacon_notif_supported(mvm)) { + struct iwl_mvm_tx_resp *beacon_notify_hdr = + &beacon_v5->beacon_notify_hdr; + + mvm->ibss_manager = beacon_v5->ibss_mgr_status != 0; + agg_status = iwl_mvm_get_agg_status(mvm, beacon_notify_hdr); + status = le16_to_cpu(agg_status->status) & TX_STATUS_MSK; + IWL_DEBUG_RX(mvm, + "beacon status %#x retries:%d tsf:0x%016llX gp2:0x%X rate:%d\n", + status, beacon_notify_hdr->failure_frame, + le64_to_cpu(beacon->tsf), + mvm->ap_last_beacon_gp2, + le32_to_cpu(beacon_notify_hdr->initial_rate)); + } else { + mvm->ibss_manager = beacon->ibss_mgr_status != 0; + status = le32_to_cpu(beacon->status) & TX_STATUS_MSK; + IWL_DEBUG_RX(mvm, + "beacon status %#x tsf:0x%016llX gp2:0x%X\n", + status, le64_to_cpu(beacon->tsf), + mvm->ap_last_beacon_gp2); + } csa_vif = rcu_dereference_protected(mvm->csa_vif, lockdep_is_held(&mvm->mutex)); @@ -1472,35 +1400,48 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, } } -static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) +void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { - struct iwl_missed_beacons_notif *missed_beacons = _data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_missed_beacons_notif *mb = (void *)pkt->data; struct iwl_fw_dbg_trigger_missed_bcon *bcon_trig; struct iwl_fw_dbg_trigger_tlv *trigger; u32 stop_trig_missed_bcon, stop_trig_missed_bcon_since_rx; u32 rx_missed_bcon, rx_missed_bcon_since_rx; + struct ieee80211_vif *vif; + u32 id = le32_to_cpu(mb->mac_id); - if (mvmvif->id != (u16)le32_to_cpu(missed_beacons->mac_id)) - return; + IWL_DEBUG_INFO(mvm, + "missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n", + le32_to_cpu(mb->mac_id), + le32_to_cpu(mb->consec_missed_beacons), + le32_to_cpu(mb->consec_missed_beacons_since_last_rx), + le32_to_cpu(mb->num_recvd_beacons), + le32_to_cpu(mb->num_expected_beacons)); + + rcu_read_lock(); + + vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true); + if (!vif) + goto out; - rx_missed_bcon = le32_to_cpu(missed_beacons->consec_missed_beacons); + rx_missed_bcon = le32_to_cpu(mb->consec_missed_beacons); rx_missed_bcon_since_rx = - le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx); + le32_to_cpu(mb->consec_missed_beacons_since_last_rx); /* * TODO: the threshold should be adjusted based on latency conditions, * and/or in case of a CS flow on one of the other AP vifs. */ - if (le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx) > - IWL_MVM_MISSED_BEACONS_THRESHOLD) + if (rx_missed_bcon > IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG) + iwl_mvm_connection_loss(mvm, vif, "missed beacons"); + else if (rx_missed_bcon_since_rx > IWL_MVM_MISSED_BEACONS_THRESHOLD) ieee80211_beacon_loss(vif); trigger = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), FW_DBG_TRIGGER_MISSED_BEACONS); if (!trigger) - return; + goto out; bcon_trig = (void *)trigger->data; stop_trig_missed_bcon = le32_to_cpu(bcon_trig->stop_consec_missed_bcon); @@ -1512,28 +1453,11 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac, if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx || rx_missed_bcon >= stop_trig_missed_bcon) iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, NULL); -} - -void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_missed_beacons_notif *mb = (void *)pkt->data; - - IWL_DEBUG_INFO(mvm, - "missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n", - le32_to_cpu(mb->mac_id), - le32_to_cpu(mb->consec_missed_beacons), - le32_to_cpu(mb->consec_missed_beacons_since_last_rx), - le32_to_cpu(mb->num_recvd_beacons), - le32_to_cpu(mb->num_expected_beacons)); - - ieee80211_iterate_active_interfaces_atomic(mvm->hw, - IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_beacon_loss_iterator, - mb); iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_MISSED_BEACONS); + +out: + rcu_read_unlock(); } void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, @@ -1575,16 +1499,29 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, ieee80211_rx_napi(mvm->hw, NULL, skb, NULL); } -static void iwl_mvm_probe_resp_data_iter(void *_data, u8 *mac, - struct ieee80211_vif *vif) +void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) { - struct iwl_probe_resp_data_notif *notif = _data; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_probe_resp_data_notif *notif = (void *)pkt->data; struct iwl_probe_resp_data *old_data, *new_data; + int len = iwl_rx_packet_payload_len(pkt); + u32 id = le32_to_cpu(notif->mac_id); + struct ieee80211_vif *vif; + struct iwl_mvm_vif *mvmvif; + + if (WARN_ON_ONCE(len < sizeof(*notif))) + return; + + IWL_DEBUG_INFO(mvm, "Probe response data notif: noa %d, csa %d\n", + notif->noa_active, notif->csa_counter); - if (mvmvif->id != (u16)le32_to_cpu(notif->mac_id)) + vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, false); + if (!vif) return; + mvmvif = iwl_mvm_vif_from_mac80211(vif); + new_data = kzalloc(sizeof(*new_data), GFP_KERNEL); if (!new_data) return; @@ -1615,66 +1552,63 @@ static void iwl_mvm_probe_resp_data_iter(void *_data, u8 *mac, ieee80211_csa_set_counter(vif, notif->csa_counter); } -void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_probe_resp_data_notif *notif = (void *)pkt->data; - int len = iwl_rx_packet_payload_len(pkt); - - if (WARN_ON_ONCE(len < sizeof(*notif))) - return; - - IWL_DEBUG_INFO(mvm, "Probe response data notif: noa %d, csa %d\n", - notif->noa_active, notif->csa_counter); - - ieee80211_iterate_active_interfaces(mvm->hw, - IEEE80211_IFACE_ITER_ACTIVE, - iwl_mvm_probe_resp_data_iter, - notif); -} - void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_channel_switch_noa_notif *notif = (void *)pkt->data; - struct ieee80211_vif *csa_vif; + struct ieee80211_vif *csa_vif, *vif; struct iwl_mvm_vif *mvmvif; int len = iwl_rx_packet_payload_len(pkt); - u32 id_n_color; + u32 id_n_color, csa_id, mac_id; if (WARN_ON_ONCE(len < sizeof(*notif))) return; - rcu_read_lock(); - - csa_vif = rcu_dereference(mvm->csa_vif); - if (WARN_ON(!csa_vif || !csa_vif->csa_active)) - goto out_unlock; - id_n_color = le32_to_cpu(notif->id_and_color); + mac_id = id_n_color & FW_CTXT_ID_MSK; + + if (WARN_ON_ONCE(mac_id >= NUM_MAC_INDEX_DRIVER)) + return; - mvmvif = iwl_mvm_vif_from_mac80211(csa_vif); - if (WARN(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color) != id_n_color, - "channel switch noa notification on unexpected vif (csa_vif=%d, notif=%d)", - FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color), id_n_color)) - goto out_unlock; + rcu_read_lock(); + vif = rcu_dereference(mvm->vif_id_to_mac[mac_id]); - IWL_DEBUG_INFO(mvm, "Channel Switch Started Notification\n"); + switch (vif->type) { + case NL80211_IFTYPE_AP: + csa_vif = rcu_dereference(mvm->csa_vif); + if (WARN_ON(!csa_vif || !csa_vif->csa_active || + csa_vif != vif)) + goto out_unlock; - schedule_delayed_work(&mvm->cs_tx_unblock_dwork, - msecs_to_jiffies(IWL_MVM_CS_UNBLOCK_TX_TIMEOUT * - csa_vif->bss_conf.beacon_int)); + mvmvif = iwl_mvm_vif_from_mac80211(csa_vif); + csa_id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color); + if (WARN(csa_id != id_n_color, + "channel switch noa notification on unexpected vif (csa_vif=%d, notif=%d)", + csa_id, id_n_color)) + goto out_unlock; - ieee80211_csa_finish(csa_vif); + IWL_DEBUG_INFO(mvm, "Channel Switch Started Notification\n"); - rcu_read_unlock(); + schedule_delayed_work(&mvm->cs_tx_unblock_dwork, + msecs_to_jiffies(IWL_MVM_CS_UNBLOCK_TX_TIMEOUT * + csa_vif->bss_conf.beacon_int)); - RCU_INIT_POINTER(mvm->csa_vif, NULL); + ieee80211_csa_finish(csa_vif); - return; + rcu_read_unlock(); + RCU_INIT_POINTER(mvm->csa_vif, NULL); + return; + case NL80211_IFTYPE_STATION: + iwl_mvm_csa_client_absent(mvm, vif); + ieee80211_chswitch_done(vif, true); + break; + default: + /* should never happen */ + WARN_ON_ONCE(1); + break; + } out_unlock: rcu_read_unlock(); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 97dc464379d2..3a92c09d4692 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -184,6 +184,29 @@ static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = { }; #endif +static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = { + .max_peers = IWL_MVM_TOF_MAX_APS, + .report_ap_tsf = 1, + .randomize_mac_addr = 1, + + .ftm = { + .supported = 1, + .asap = 1, + .non_asap = 1, + .request_lci = 1, + .request_civicloc = 1, + .max_bursts_exponent = -1, /* all supported */ + .max_ftms_per_burst = 0, /* no limits */ + .bandwidths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20) | + BIT(NL80211_CHAN_WIDTH_40) | + BIT(NL80211_CHAN_WIDTH_80), + .preambles = BIT(NL80211_PREAMBLE_LEGACY) | + BIT(NL80211_PREAMBLE_HT) | + BIT(NL80211_PREAMBLE_VHT), + }, +}; + void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type) { if (!iwl_mvm_is_d0i3_supported(mvm)) @@ -395,6 +418,21 @@ int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm) return ret; } +const static u8 he_if_types_ext_capa_sta[] = { + [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, + [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, + [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT, +}; + +const static struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = { + { + .iftype = NL80211_IFTYPE_STATION, + .extended_capabilities = he_if_types_ext_capa_sta, + .extended_capabilities_mask = he_if_types_ext_capa_sta, + .extended_capabilities_len = sizeof(he_if_types_ext_capa_sta), + }, +}; + int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) { struct ieee80211_hw *hw = mvm->hw; @@ -405,12 +443,15 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) WLAN_CIPHER_SUITE_TKIP, WLAN_CIPHER_SUITE_CCMP, }; +#ifdef CONFIG_PM_SLEEP + bool unified = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); +#endif /* Tell mac80211 our characteristics */ ieee80211_hw_set(hw, SIGNAL_DBM); ieee80211_hw_set(hw, SPECTRUM_MGMT); ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); - ieee80211_hw_set(hw, QUEUE_CONTROL); ieee80211_hw_set(hw, WANT_MONITOR_VIF); ieee80211_hw_set(hw, SUPPORTS_PS); ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); @@ -424,6 +465,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR); ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP); ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW); + ieee80211_hw_set(hw, BUFF_MMPDU_TXQ); + ieee80211_hw_set(hw, STA_MMPDU_TXQ); + ieee80211_hw_set(hw, TX_AMSDU); + ieee80211_hw_set(hw, TX_FRAG_LIST); if (iwl_mvm_has_tlc_offload(mvm)) { ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW); @@ -469,6 +514,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES; hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; + hw->max_tx_fragments = mvm->trans->max_skb_frags; BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6); memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers)); @@ -525,6 +571,13 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->wiphy->n_cipher_suites++; } + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_FTM_CALIBRATED)) { + wiphy_ext_feature_set(hw->wiphy, + NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER); + hw->wiphy->pmsr_capa = &iwl_mvm_pmsr_capa; + } + ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); hw->wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR | @@ -534,6 +587,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->sta_data_size = sizeof(struct iwl_mvm_sta); hw->vif_data_size = sizeof(struct iwl_mvm_vif); hw->chanctx_data_size = sizeof(u16); + hw->txq_data_size = sizeof(struct iwl_mvm_txq); hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_P2P_CLIENT) | @@ -673,6 +727,13 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE); } + if (mvm->nvm_data->sku_cap_11ax_enable && + !iwlwifi_mod_params.disable_11ax) { + hw->wiphy->iftype_ext_capab = he_iftypes_ext_capa; + hw->wiphy->num_iftype_ext_capab = + ARRAY_SIZE(he_iftypes_ext_capa); + } + mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; #ifdef CONFIG_PM_SLEEP @@ -682,7 +743,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->wiphy->wowlan = &mvm->wowlan; } - if (mvm->fw->img[IWL_UCODE_WOWLAN].num_sec && + if ((unified || mvm->fw->img[IWL_UCODE_WOWLAN].num_sec) && mvm->trans->ops->d3_suspend && mvm->trans->ops->d3_resume && device_can_wakeup(mvm->trans->dev)) { @@ -735,15 +796,15 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->netdev_features |= IWL_TX_CSUM_NETIF_FLAGS; } - ret = ieee80211_register_hw(mvm->hw); - if (ret) - iwl_mvm_leds_exit(mvm); - mvm->init_status |= IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE; - if (mvm->cfg->vht_mu_mimo_supported) wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER); + ret = ieee80211_register_hw(mvm->hw); + if (ret) { + iwl_mvm_leds_exit(mvm); + } + return ret; } @@ -776,7 +837,6 @@ static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm, goto out; __skb_queue_tail(&mvm->d0i3_tx, skb); - ieee80211_stop_queues(mvm->hw); /* trigger wakeup */ iwl_mvm_ref(mvm, IWL_MVM_REF_TX); @@ -796,13 +856,15 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, struct ieee80211_sta *sta = control->sta; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (void *)skb->data; + bool offchannel = IEEE80211_SKB_CB(skb)->flags & + IEEE80211_TX_CTL_TX_OFFCHAN; if (iwl_mvm_is_radio_killed(mvm)) { IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n"); goto drop; } - if (info->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE && + if (offchannel && !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) && !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) goto drop; @@ -815,8 +877,8 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, sta = NULL; /* If there is no sta, and it's not offchannel - send through AP */ - if (info->control.vif->type == NL80211_IFTYPE_STATION && - info->hw_queue != IWL_MVM_OFFCHANNEL_QUEUE && !sta) { + if (!sta && info->control.vif->type == NL80211_IFTYPE_STATION && + !offchannel) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif); u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id); @@ -844,22 +906,101 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, ieee80211_free_txskb(hw, skb); } -static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg) +void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) - return false; - return true; + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq); + struct sk_buff *skb = NULL; + + /* + * No need for threads to be pending here, they can leave the first + * taker all the work. + * + * mvmtxq->tx_request logic: + * + * If 0, no one is currently TXing, set to 1 to indicate current thread + * will now start TX and other threads should quit. + * + * If 1, another thread is currently TXing, set to 2 to indicate to + * that thread that there was another request. Since that request may + * have raced with the check whether the queue is empty, the TXing + * thread should check the queue's status one more time before leaving. + * This check is done in order to not leave any TX hanging in the queue + * until the next TX invocation (which may not even happen). + * + * If 2, another thread is currently TXing, and it will already double + * check the queue, so do nothing. + */ + if (atomic_fetch_add_unless(&mvmtxq->tx_request, 1, 2)) + return; + + rcu_read_lock(); + do { + while (likely(!mvmtxq->stopped && + (mvm->trans->system_pm_mode == + IWL_PLAT_PM_MODE_DISABLED))) { + skb = ieee80211_tx_dequeue(hw, txq); + + if (!skb) { + if (txq->sta) + IWL_DEBUG_TX(mvm, + "TXQ of sta %pM tid %d is now empty\n", + txq->sta->addr, + txq->tid); + break; + } + + if (!txq->sta) + iwl_mvm_tx_skb_non_sta(mvm, skb); + else + iwl_mvm_tx_skb(mvm, skb, txq->sta); + } + } while (atomic_dec_return(&mvmtxq->tx_request)); + rcu_read_unlock(); } -static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg) +static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) { - if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) - return false; - if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG) - return true; + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq); - /* enabled by default */ - return true; + /* + * Please note that racing is handled very carefully here: + * mvmtxq->txq_id is updated during allocation, and mvmtxq->list is + * deleted afterwards. + * This means that if: + * mvmtxq->txq_id != INVALID_QUEUE && list_empty(&mvmtxq->list): + * queue is allocated and we can TX. + * mvmtxq->txq_id != INVALID_QUEUE && !list_empty(&mvmtxq->list): + * a race, should defer the frame. + * mvmtxq->txq_id == INVALID_QUEUE && list_empty(&mvmtxq->list): + * need to allocate the queue and defer the frame. + * mvmtxq->txq_id == INVALID_QUEUE && !list_empty(&mvmtxq->list): + * queue is already scheduled for allocation, no need to allocate, + * should defer the frame. + */ + + /* If the queue is allocated TX and return. */ + if (!txq->sta || mvmtxq->txq_id != IWL_MVM_INVALID_QUEUE) { + /* + * Check that list is empty to avoid a race where txq_id is + * already updated, but the queue allocation work wasn't + * finished + */ + if (unlikely(txq->sta && !list_empty(&mvmtxq->list))) + return; + + iwl_mvm_mac_itxq_xmit(hw, txq); + return; + } + + /* The list is being deleted only after the queue is fully allocated. */ + if (!list_empty(&mvmtxq->list)) + return; + + list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs); + schedule_work(&mvm->add_stream_wk); } #define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \ @@ -974,7 +1115,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, mvmvif = iwl_mvm_vif_from_mac80211(vif); cancel_delayed_work(&mvmvif->uapsd_nonagg_detected_wk); } - if (!iwl_enable_rx_ampdu(mvm->cfg)) { + if (!iwl_enable_rx_ampdu()) { ret = -EINVAL; break; } @@ -986,7 +1127,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, timeout); break; case IEEE80211_AMPDU_TX_START: - if (!iwl_enable_tx_ampdu(mvm->cfg)) { + if (!iwl_enable_tx_ampdu()) { ret = -EINVAL; break; } @@ -1066,6 +1207,8 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) iwl_mvm_stop_device(mvm); + mvm->cur_aid = 0; + mvm->scan_status = 0; mvm->ps_disabled = false; mvm->calibrating = false; @@ -1074,6 +1217,8 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) iwl_mvm_cleanup_roc_te(mvm); ieee80211_remain_on_channel_expired(mvm->hw); + iwl_mvm_ftm_restart(mvm); + /* * cleanup all interfaces, even inactive ones, as some might have * gone down during the HW restart @@ -1085,7 +1230,6 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) iwl_mvm_reset_phy_ctxts(mvm); memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); - memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames)); memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd)); @@ -1188,6 +1332,8 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm) /* allow transport/FW low power modes */ iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN); + iwl_mvm_send_recovery_cmd(mvm, ERROR_RECOVERY_END_OF_RECOVERY); + /* * If we have TDLS peers, remove them. We don't know the last seqno/PN * of packets the FW sent out, so we must reconnect. @@ -1391,6 +1537,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, if (ret) goto out_unlock; + rcu_assign_pointer(mvm->vif_id_to_mac[mvmvif->id], vif); + /* Counting number of interfaces is needed for legacy PM */ if (vif->type != NL80211_IFTYPE_P2P_DEVICE) mvm->vif_count++; @@ -1549,6 +1697,9 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, IEEE80211_VIF_SUPPORTS_CQM_RSSI); } + if (vif->bss_conf.ftm_responder) + memset(&mvm->ftm_resp_stats, 0, sizeof(mvm->ftm_resp_stats)); + iwl_mvm_vif_dbgfs_clean(mvm, vif); /* @@ -1582,6 +1733,8 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, iwl_mvm_power_update_mac(mvm); iwl_mvm_mac_ctxt_remove(mvm, vif); + RCU_INIT_POINTER(mvm->vif_id_to_mac[mvmvif->id], NULL); + if (vif->type == NL80211_IFTYPE_MONITOR) mvm->monitor_on = false; @@ -2076,6 +2229,46 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, } flags |= STA_CTXT_HE_PACKET_EXT; + } else if ((sta->he_cap.he_cap_elem.phy_cap_info[9] & + IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK) != + IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED) { + int low_th = -1; + int high_th = -1; + + /* Take the PPE thresholds from the nominal padding info */ + switch (sta->he_cap.he_cap_elem.phy_cap_info[9] & + IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK) { + case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_0US: + low_th = IWL_HE_PKT_EXT_NONE; + high_th = IWL_HE_PKT_EXT_NONE; + break; + case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_8US: + low_th = IWL_HE_PKT_EXT_BPSK; + high_th = IWL_HE_PKT_EXT_NONE; + break; + case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US: + low_th = IWL_HE_PKT_EXT_NONE; + high_th = IWL_HE_PKT_EXT_BPSK; + break; + } + + /* Set the PPE thresholds accordingly */ + if (low_th >= 0 && high_th >= 0) { + u8 ***pkt_ext_qam = + (void *)sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th; + + for (i = 0; i < MAX_HE_SUPP_NSS; i++) { + u8 bw; + + for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX; + bw++) { + pkt_ext_qam[i][bw][0] = low_th; + pkt_ext_qam[i][bw][1] = high_th; + } + } + + flags |= STA_CTXT_HE_PACKET_EXT; + } } rcu_read_unlock(); @@ -2146,6 +2339,12 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); } + /* Update MU EDCA params */ + if (changes & BSS_CHANGED_QOS && mvmvif->associated && + bss_conf->assoc && vif->bss_conf.he_support && + !iwlwifi_mod_params.disable_11ax) + iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id); + /* * If we're not associated yet, take the (new) BSSID before associating * so the firmware knows. If we're already associated, then use the old @@ -2211,7 +2410,10 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, * If update fails - SF might be running in associated * mode while disassociated - which is forbidden. */ - WARN_ONCE(iwl_mvm_sf_update(mvm, vif, false), + ret = iwl_mvm_sf_update(mvm, vif, false); + WARN_ONCE(ret && + !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, + &mvm->status), "Failed to update SF upon disassociation\n"); /* @@ -2432,6 +2634,8 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, if (iwl_mvm_phy_ctx_count(mvm) > 1) iwl_mvm_teardown_tdls_peers(mvm); + iwl_mvm_ftm_restart_responder(mvm, vif); + goto out_unlock; out_quota_failed: @@ -2543,6 +2747,15 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm, bss_conf->txpower); iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower); } + + if (changes & BSS_CHANGED_FTM_RESPONDER) { + int ret = iwl_mvm_ftm_start_responder(mvm, vif); + + if (ret) + IWL_WARN(mvm, "Failed to enable FTM responder (%d)\n", + ret); + } + } static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, @@ -2672,7 +2885,7 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, return; spin_lock_bh(&mvmsta->lock); - for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { + for (tid = 0; tid < ARRAY_SIZE(mvmsta->tid_data); tid++) { struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE) @@ -2861,32 +3074,6 @@ iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm, peer_addr, action); } -static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm, - struct iwl_mvm_sta *mvm_sta) -{ - struct iwl_mvm_tid_data *tid_data; - struct sk_buff *skb; - int i; - - spin_lock_bh(&mvm_sta->lock); - for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { - tid_data = &mvm_sta->tid_data[i]; - - while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames))) { - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - - /* - * The first deferred frame should've stopped the MAC - * queues, so we should never get a second deferred - * frame for the RA/TID. - */ - iwl_mvm_start_mac_queues(mvm, BIT(info->hw_queue)); - ieee80211_free_txskb(mvm->hw, skb); - } - } - spin_unlock_bh(&mvm_sta->lock); -} - static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -2920,7 +3107,6 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, */ if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST) { - iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta); flush_work(&mvm->add_stream_wk); /* @@ -2967,6 +3153,8 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, NL80211_TDLS_SETUP); } + + sta->max_rc_amsdu_len = 1; } else if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_AUTH) { /* @@ -2979,11 +3167,15 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, } else if (old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC) { if (vif->type == NL80211_IFTYPE_AP) { + vif->bss_conf.he_support = sta->he_cap.has_he; mvmvif->ap_assoc_sta_count++; iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); if (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) iwl_mvm_cfg_he_sta(mvm, vif, mvm_sta->sta_id); + } else if (vif->type == NL80211_IFTYPE_STATION) { + vif->bss_conf.he_support = sta->he_cap.has_he; + iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); } iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, @@ -2991,6 +3183,24 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, ret = iwl_mvm_update_sta(mvm, vif, sta); } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTHORIZED) { + /* if wep is used, need to set the key for the station now */ + if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key) { + mvm_sta->wep_key = + kmemdup(mvmvif->ap_wep_key, + sizeof(*mvmvif->ap_wep_key) + + mvmvif->ap_wep_key->keylen, + GFP_KERNEL); + if (!mvm_sta->wep_key) { + ret = -ENOMEM; + goto out_unlock; + } + + ret = iwl_mvm_set_sta_key(mvm, vif, sta, + mvm_sta->wep_key, + STA_KEY_IDX_INVALID); + } else { + ret = 0; + } /* we don't support TDLS during DCM */ if (iwl_mvm_phy_ctx_count(mvm) > 1) @@ -3005,18 +3215,13 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, true); - - /* if wep is used, need to set the key for the station now */ - if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key) - ret = iwl_mvm_set_sta_key(mvm, vif, sta, - mvmvif->ap_wep_key, - STA_KEY_IDX_INVALID); - else - ret = 0; } else if (old_state == IEEE80211_STA_AUTHORIZED && new_state == IEEE80211_STA_ASSOC) { /* disable beacon filtering */ - WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, 0)); + ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); + WARN_ON(ret && + !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, + &mvm->status)); ret = 0; } else if (old_state == IEEE80211_STA_ASSOC && new_state == IEEE80211_STA_AUTH) { @@ -3036,6 +3241,22 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, NL80211_TDLS_DISABLE_LINK); } + + /* Remove STA key if this is an AP using WEP */ + if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key) { + int rm_ret = iwl_mvm_remove_sta_key(mvm, vif, sta, + mvm_sta->wep_key); + + if (!ret) + ret = rm_ret; + kfree(mvm_sta->wep_key); + mvm_sta->wep_key = NULL; + } + + if (unlikely(ret && + test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, + &mvm->status))) + ret = 0; } else { ret = -EIO; } @@ -3431,14 +3652,20 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, .id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)), .sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id), - /* Set the channel info data */ - .channel_info.band = (channel->band == NL80211_BAND_2GHZ) ? - PHY_BAND_24 : PHY_BAND_5, - .channel_info.channel = channel->hw_value, - .channel_info.width = PHY_VHT_CHANNEL_MODE20, - /* Set the time and duration */ - .apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg)), - }; + }; + struct iwl_hs20_roc_req_tail *tail = iwl_mvm_chan_info_cmd_tail(mvm, + &aux_roc_req.channel_info); + u16 len = sizeof(aux_roc_req) - iwl_mvm_chan_info_padding(mvm); + + /* Set the channel info data */ + iwl_mvm_set_chan_info(mvm, &aux_roc_req.channel_info, channel->hw_value, + (channel->band == NL80211_BAND_2GHZ) ? + PHY_BAND_24 : PHY_BAND_5, + PHY_VHT_CHANNEL_MODE20, + 0); + + /* Set the time and duration */ + tail->apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg)); delay = AUX_ROC_MIN_DELAY; req_dur = MSEC_TO_TU(duration); @@ -3463,15 +3690,15 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, } } - aux_roc_req.duration = cpu_to_le32(req_dur); - aux_roc_req.apply_time_max_delay = cpu_to_le32(delay); + tail->duration = cpu_to_le32(req_dur); + tail->apply_time_max_delay = cpu_to_le32(delay); IWL_DEBUG_TE(mvm, "ROC: Requesting to remain on channel %u for %ums (requested = %ums, max_delay = %ums, dtim_interval = %ums)\n", channel->hw_value, req_dur, duration, delay, dtim_interval); /* Set the node address */ - memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN); + memcpy(tail->node_addr, vif->addr, ETH_ALEN); lockdep_assert_held(&mvm->mutex); @@ -3502,7 +3729,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, ARRAY_SIZE(time_event_response), iwl_mvm_rx_aux_roc, te_data); - res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, sizeof(aux_roc_req), + res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, len, &aux_roc_req); if (res) { @@ -3673,11 +3900,43 @@ static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw) return 0; } +struct iwl_mvm_ftm_responder_iter_data { + bool responder; + struct ieee80211_chanctx_conf *ctx; +}; + +static void iwl_mvm_ftm_responder_chanctx_iter(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_ftm_responder_iter_data *data = _data; + + if (rcu_access_pointer(vif->chanctx_conf) == data->ctx && + vif->type == NL80211_IFTYPE_AP && vif->bss_conf.ftmr_params) + data->responder = true; +} + +static bool iwl_mvm_is_ftm_responder_chanctx(struct iwl_mvm *mvm, + struct ieee80211_chanctx_conf *ctx) +{ + struct iwl_mvm_ftm_responder_iter_data data = { + .responder = false, + .ctx = ctx, + }; + + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_ftm_responder_chanctx_iter, + &data); + return data.responder; +} + static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm, struct ieee80211_chanctx_conf *ctx) { u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; struct iwl_mvm_phy_ctxt *phy_ctxt; + bool responder = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx); + struct cfg80211_chan_def *def = responder ? &ctx->def : &ctx->min_def; int ret; lockdep_assert_held(&mvm->mutex); @@ -3690,7 +3949,7 @@ static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm, goto out; } - ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def, + ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def, ctx->rx_chains_static, ctx->rx_chains_dynamic); if (ret) { @@ -3745,6 +4004,8 @@ static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw, struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; + bool responder = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx); + struct cfg80211_chan_def *def = responder ? &ctx->def : &ctx->min_def; if (WARN_ONCE((phy_ctxt->ref > 1) && (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH | @@ -3759,17 +4020,17 @@ static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw, /* we are only changing the min_width, may be a noop */ if (changed == IEEE80211_CHANCTX_CHANGE_MIN_WIDTH) { - if (phy_ctxt->width == ctx->min_def.width) + if (phy_ctxt->width == def->width) goto out_unlock; /* we are just toggling between 20_NOHT and 20 */ if (phy_ctxt->width <= NL80211_CHAN_WIDTH_20 && - ctx->min_def.width <= NL80211_CHAN_WIDTH_20) + def->width <= NL80211_CHAN_WIDTH_20) goto out_unlock; } iwl_mvm_bt_coex_vif_change(mvm); - iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def, + iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def, ctx->rx_chains_static, ctx->rx_chains_dynamic); @@ -3798,6 +4059,7 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, mvmvif->ap_ibss_active = true; break; } + /* fall through */ case NL80211_IFTYPE_ADHOC: /* * The AP binding flow is handled as part of the start_ap flow @@ -3850,25 +4112,30 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, } if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) { - u32 duration = 3 * vif->bss_conf.beacon_int; + mvmvif->csa_bcn_pending = true; - /* iwl_mvm_protect_session() reads directly from the - * device (the system time), so make sure it is - * available. - */ - ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_CSA); - if (ret) - goto out_remove_binding; + if (!fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) { + u32 duration = 3 * vif->bss_conf.beacon_int; - /* Protect the session to make sure we hear the first - * beacon on the new channel. - */ - mvmvif->csa_bcn_pending = true; - iwl_mvm_protect_session(mvm, vif, duration, duration, - vif->bss_conf.beacon_int / 2, - true); - iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA); + /* iwl_mvm_protect_session() reads directly from the + * device (the system time), so make sure it is + * available. + */ + ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_CSA); + if (ret) + goto out_remove_binding; + + /* Protect the session to make sure we hear the first + * beacon on the new channel. + */ + iwl_mvm_protect_session(mvm, vif, duration, duration, + vif->bss_conf.beacon_int / 2, + true); + + iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA); + } iwl_mvm_update_quotas(mvm, false, NULL); } @@ -3938,7 +4205,9 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm, disabled_vif = vif; - iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL); + if (!fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) + iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL); break; default: break; @@ -4189,6 +4458,27 @@ static void iwl_mvm_channel_switch(struct ieee80211_hw *hw, "dummy channel switch op\n"); } +static int iwl_mvm_schedule_client_csa(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_channel_switch *chsw) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_chan_switch_te_cmd cmd = { + .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, + mvmvif->color)), + .action = cpu_to_le32(FW_CTXT_ACTION_ADD), + .tsf = cpu_to_le32(chsw->timestamp), + .cs_count = chsw->count, + }; + + lockdep_assert_held(&mvm->mutex); + + return iwl_mvm_send_cmd_pdu(mvm, + WIDE_ID(MAC_CONF_GROUP, + CHANNEL_SWITCH_TIME_EVENT_CMD), + 0, sizeof(cmd), &cmd); +} + static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel_switch *chsw) @@ -4256,14 +4546,19 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, if (chsw->block_tx) iwl_mvm_csa_client_absent(mvm, vif); - iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int, - apply_time); if (mvmvif->bf_data.bf_enabled) { ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); if (ret) goto out_unlock; } + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) + iwl_mvm_schedule_client_csa(mvm, vif, chsw); + else + iwl_mvm_schedule_csa_period(mvm, vif, + vif->bss_conf.beacon_int, + apply_time); break; default: break; @@ -4656,8 +4951,89 @@ static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw) mutex_unlock(&mvm->mutex); } +static int +iwl_mvm_mac_get_ftm_responder_stats(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_ftm_responder_stats *stats) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (vif->p2p || vif->type != NL80211_IFTYPE_AP || + !mvmvif->ap_ibss_active || !vif->bss_conf.ftm_responder) + return -EINVAL; + + mutex_lock(&mvm->mutex); + *stats = mvm->ftm_resp_stats; + mutex_unlock(&mvm->mutex); + + stats->filled = BIT(NL80211_FTM_STATS_SUCCESS_NUM) | + BIT(NL80211_FTM_STATS_PARTIAL_NUM) | + BIT(NL80211_FTM_STATS_FAILED_NUM) | + BIT(NL80211_FTM_STATS_ASAP_NUM) | + BIT(NL80211_FTM_STATS_NON_ASAP_NUM) | + BIT(NL80211_FTM_STATS_TOTAL_DURATION_MSEC) | + BIT(NL80211_FTM_STATS_UNKNOWN_TRIGGERS_NUM) | + BIT(NL80211_FTM_STATS_RESCHEDULE_REQUESTS_NUM) | + BIT(NL80211_FTM_STATS_OUT_OF_WINDOW_TRIGGERS_NUM); + + return 0; +} + +static int iwl_mvm_start_pmsr(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_pmsr_request *request) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + int ret; + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_ftm_start(mvm, vif, request); + mutex_unlock(&mvm->mutex); + + return ret; +} + +static void iwl_mvm_abort_pmsr(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_pmsr_request *request) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + mutex_lock(&mvm->mutex); + iwl_mvm_ftm_abort(mvm, request); + mutex_unlock(&mvm->mutex); +} + +static bool iwl_mvm_can_hw_csum(struct sk_buff *skb) +{ + u8 protocol = ip_hdr(skb)->protocol; + + if (!IS_ENABLED(CONFIG_INET)) + return false; + + return protocol == IPPROTO_TCP || protocol == IPPROTO_UDP; +} + +static bool iwl_mvm_mac_can_aggregate(struct ieee80211_hw *hw, + struct sk_buff *head, + struct sk_buff *skb) +{ + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + + /* For now don't aggregate IPv6 in AMSDU */ + if (skb->protocol != htons(ETH_P_IP)) + return false; + + if (!iwl_mvm_is_csum_supported(mvm)) + return true; + + return iwl_mvm_can_hw_csum(skb) == iwl_mvm_can_hw_csum(head); +} + const struct ieee80211_ops iwl_mvm_hw_ops = { .tx = iwl_mvm_mac_tx, + .wake_tx_queue = iwl_mvm_mac_wake_tx_queue, .ampdu_action = iwl_mvm_mac_ampdu_action, .start = iwl_mvm_mac_start, .reconfig_complete = iwl_mvm_mac_reconfig_complete, @@ -4731,6 +5107,11 @@ const struct ieee80211_ops iwl_mvm_hw_ops = { #endif .get_survey = iwl_mvm_mac_get_survey, .sta_statistics = iwl_mvm_mac_sta_statistics, + .get_ftm_responder_stats = iwl_mvm_mac_get_ftm_responder_stats, + .start_pmsr = iwl_mvm_start_pmsr, + .abort_pmsr = iwl_mvm_abort_pmsr, + + .can_aggregate_in_amsdu = iwl_mvm_mac_can_aggregate, #ifdef CONFIG_IWLWIFI_DEBUGFS .sta_add_debugfs = iwl_mvm_sta_add_debugfs, #endif diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 1aa690e081ff..bca6f6b536d9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -83,7 +83,6 @@ #include "sta.h" #include "fw-api.h" #include "constants.h" -#include "tof.h" #include "fw/runtime.h" #include "fw/dbg.h" #include "fw/acpi.h" @@ -95,6 +94,8 @@ /* RSSI offset for WkP */ #define IWL_RSSI_OFFSET 50 #define IWL_MVM_MISSED_BEACONS_THRESHOLD 8 +#define IWL_MVM_MISSED_BEACONS_THRESHOLD_LONG 16 + /* A TimeUnit is 1024 microsecond */ #define MSEC_TO_TU(_msec) (_msec*1000/1024) @@ -298,18 +299,39 @@ enum iwl_bt_force_ant_mode { BT_FORCE_ANT_MAX, }; +/** + * struct iwl_mvm_low_latency_force - low latency force mode set by debugfs + * @LOW_LATENCY_FORCE_UNSET: unset force mode + * @LOW_LATENCY_FORCE_ON: for low latency on + * @LOW_LATENCY_FORCE_OFF: for low latency off + * @NUM_LOW_LATENCY_FORCE: max num of modes + */ +enum iwl_mvm_low_latency_force { + LOW_LATENCY_FORCE_UNSET, + LOW_LATENCY_FORCE_ON, + LOW_LATENCY_FORCE_OFF, + NUM_LOW_LATENCY_FORCE +}; + /** * struct iwl_mvm_low_latency_cause - low latency set causes * @LOW_LATENCY_TRAFFIC: indicates low latency traffic was detected * @LOW_LATENCY_DEBUGFS: low latency mode set from debugfs * @LOW_LATENCY_VCMD: low latency mode set from vendor command * @LOW_LATENCY_VIF_TYPE: low latency mode set because of vif type (ap) +* @LOW_LATENCY_DEBUGFS_FORCE_ENABLE: indicate that force mode is enabled +* the actual set/unset is done with LOW_LATENCY_DEBUGFS_FORCE +* @LOW_LATENCY_DEBUGFS_FORCE: low latency force mode from debugfs +* set this with LOW_LATENCY_DEBUGFS_FORCE_ENABLE flag +* in low_latency. */ enum iwl_mvm_low_latency_cause { LOW_LATENCY_TRAFFIC = BIT(0), LOW_LATENCY_DEBUGFS = BIT(1), LOW_LATENCY_VCMD = BIT(2), LOW_LATENCY_VIF_TYPE = BIT(3), + LOW_LATENCY_DEBUGFS_FORCE_ENABLE = BIT(4), + LOW_LATENCY_DEBUGFS_FORCE = BIT(5), }; /** @@ -360,8 +382,10 @@ struct iwl_probe_resp_data { * @pm_enabled - Indicate if MAC power management is allowed * @monitor_active: indicates that monitor context is configured, and that the * interface should get quota etc. - * @low_latency: indicates low latency is set, see - * enum &iwl_mvm_low_latency_cause for causes. + * @low_latency: bit flags for low latency + * see enum &iwl_mvm_low_latency_cause for causes. + * @low_latency_actual: boolean, indicates low latency is set, + * as a result from low_latency bit flags and takes force into account. * @ps_disabled: indicates that this interface requires PS to be disabled * @queue_params: QoS params for this MAC * @bcast_sta: station used for broadcast packets. Used by the following @@ -393,7 +417,8 @@ struct iwl_mvm_vif { bool ap_ibss_active; bool pm_enabled; bool monitor_active; - u8 low_latency; + u8 low_latency: 6; + u8 low_latency_actual: 1; bool ps_disabled; struct iwl_mvm_vif_bf_data bf_data; @@ -778,6 +803,39 @@ struct iwl_mvm_geo_profile { u8 values[ACPI_GEO_TABLE_SIZE]; }; +struct iwl_mvm_txq { + struct list_head list; + u16 txq_id; + atomic_t tx_request; + bool stopped; +}; + +static inline struct iwl_mvm_txq * +iwl_mvm_txq_from_mac80211(struct ieee80211_txq *txq) +{ + return (void *)txq->drv_priv; +} + +static inline struct iwl_mvm_txq * +iwl_mvm_txq_from_tid(struct ieee80211_sta *sta, u8 tid) +{ + if (tid == IWL_MAX_TID_COUNT) + tid = IEEE80211_NUM_TIDS; + + return (void *)sta->txq[tid]->drv_priv; +} + +/** + * struct iwl_mvm_tvqm_txq_info - maps TVQM hw queue to tid + * + * @sta_id: sta id + * @txq_tid: txq tid + */ +struct iwl_mvm_tvqm_txq_info { + u8 sta_id; + u8 txq_tid; +}; + struct iwl_mvm_dqa_txq_info { u8 ra_sta_id; /* The RA this queue is mapped to, if exists */ bool reserved; /* Is this the TXQ reserved for a STA */ @@ -821,9 +879,6 @@ struct iwl_mvm { bool hw_registered; bool calibrating; - u32 error_event_table[2]; - u32 log_event_table; - u32 umac_error_event_table; bool support_umac_log; u32 ampdu_ref; @@ -843,13 +898,13 @@ struct iwl_mvm { u64 on_time_scan; } radio_stats, accu_radio_stats; - u16 hw_queue_to_mac80211[IWL_MAX_TVQM_QUEUES]; - - struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES]; + struct list_head add_stream_txqs; + union { + struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES]; + struct iwl_mvm_tvqm_txq_info tvqm_info[IWL_MAX_TVQM_QUEUES]; + }; struct work_struct add_stream_wk; /* To add streams to queues */ - atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES]; - const char *nvm_file_name; struct iwl_nvm_data *nvm_data; /* NVM sections */ @@ -863,7 +918,6 @@ struct iwl_mvm { /* data related to data path */ struct iwl_rx_phy_info last_phy_info; struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT]; - unsigned long sta_deferred_frames[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)]; u8 rx_ba_sessions; /* configured by mac80211 */ @@ -924,6 +978,7 @@ struct iwl_mvm { u32 dbgfs_prph_reg_addr; bool disable_power_off; bool disable_power_off_d3; + bool beacon_inject_active; bool scan_iter_notif_enabled; @@ -932,6 +987,7 @@ struct iwl_mvm { struct debugfs_blob_wrapper nvm_calib_blob; struct debugfs_blob_wrapper nvm_prod_blob; struct debugfs_blob_wrapper nvm_phy_sku_blob; + struct debugfs_blob_wrapper nvm_reg_blob; struct iwl_mvm_frame_stats drv_rx_stats; spinlock_t drv_stats_lock; @@ -955,9 +1011,11 @@ struct iwl_mvm { u8 refs[IWL_MVM_REF_COUNT]; u8 vif_count; + struct ieee80211_vif __rcu *vif_id_to_mac[NUM_MAC_INDEX_DRIVER]; /* -1 for always, 0 for never, >0 for that many times */ s8 fw_restart; + u8 *error_recovery_buf; #ifdef CONFIG_IWLWIFI_LEDS struct led_classdev led; @@ -1047,6 +1105,8 @@ struct iwl_mvm { /* Indicate if device power save is allowed */ u8 ps_disabled; /* u8 instead of bool to ease debugfs_create_* usage */ + /* Indicate if 32Khz external clock is valid */ + u32 ext_clock_valid; unsigned int max_amsdu_len; /* used for debugfs only */ struct ieee80211_vif __rcu *csa_vif; @@ -1090,7 +1150,14 @@ struct iwl_mvm { u32 ciphers[IWL_MVM_NUM_CIPHERS]; struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS]; - struct iwl_mvm_tof_data tof_data; + + struct cfg80211_ftm_responder_stats ftm_resp_stats; + struct { + struct cfg80211_pmsr_request *req; + struct wireless_dev *req_wdev; + struct list_head loc_list; + int responses[IWL_MVM_TOF_MAX_APS]; + } ftm_initiator; struct ieee80211_vif *nan_vif; #define IWL_MAX_BAID 32 @@ -1106,6 +1173,11 @@ struct iwl_mvm { /* does a monitor vif exist (only one can exist hence bool) */ bool monitor_on; + + /* sniffer data to include in radiotap */ + __le16 cur_aid; + u8 cur_bssid[ETH_ALEN]; + #ifdef CONFIG_ACPI struct iwl_mvm_sar_profile sar_profiles[ACPI_SAR_PROFILE_NUM]; struct iwl_mvm_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES]; @@ -1149,8 +1221,6 @@ enum iwl_mvm_status { enum iwl_mvm_init_status { IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE = BIT(0), IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE = BIT(1), - IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE = BIT(2), - IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE = BIT(3), }; static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm) @@ -1207,6 +1277,19 @@ iwl_mvm_sta_from_staid_protected(struct iwl_mvm *mvm, u8 sta_id) return iwl_mvm_sta_from_mac80211(sta); } +static inline struct ieee80211_vif * +iwl_mvm_rcu_dereference_vif_id(struct iwl_mvm *mvm, u8 vif_id, bool rcu) +{ + if (WARN_ON(vif_id >= ARRAY_SIZE(mvm->vif_id_to_mac))) + return NULL; + + if (rcu) + return rcu_dereference(mvm->vif_id_to_mac[vif_id]); + + return rcu_dereference_protected(mvm->vif_id_to_mac[vif_id], + lockdep_is_held(&mvm->mutex)); +} + static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm) { return !iwlwifi_mod_params.d0i3_disable && @@ -1237,6 +1320,12 @@ static inline bool iwl_mvm_is_frag_ebs_supported(struct iwl_mvm *mvm) return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAG_EBS); } +static inline bool iwl_mvm_is_short_beacon_notif_supported(struct iwl_mvm *mvm) +{ + return fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_SHORT_BEACON_NOTIF); +} + static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm) { /* For now we only use this mode to differentiate between @@ -1470,6 +1559,11 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, __le16 fc); +void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq); +unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + unsigned int tid); + #ifdef CONFIG_IWLWIFI_DEBUG const char *iwl_mvm_get_tx_fail_reason(u32 status); #else @@ -1567,6 +1661,7 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags); void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); @@ -1599,9 +1694,19 @@ int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool force_assoc_off, const u8 *bssid_override); int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif); -u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct sk_buff *beacon); +int iwl_mvm_mac_ctxt_send_beacon_cmd(struct iwl_mvm *mvm, + struct sk_buff *beacon, + void *data, int len); +u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info *info, + struct ieee80211_vif *vif); +void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm, + __le32 *tim_index, __le32 *tim_size, + u8 *beacon, u32 frame_size); void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, @@ -1615,8 +1720,6 @@ void iwl_mvm_window_status_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm, struct ieee80211_vif *vif); -unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm, - struct ieee80211_vif *exclude_vif); void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm, @@ -1870,17 +1973,43 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif) * binding, so this has no real impact. For now, just return * the current desired low-latency state. */ - return mvmvif->low_latency; + return mvmvif->low_latency_actual; } static inline void iwl_mvm_vif_set_low_latency(struct iwl_mvm_vif *mvmvif, bool set, enum iwl_mvm_low_latency_cause cause) { + u8 new_state; + if (set) mvmvif->low_latency |= cause; else mvmvif->low_latency &= ~cause; + + /* + * if LOW_LATENCY_DEBUGFS_FORCE_ENABLE is enabled no changes are + * allowed to actual mode. + */ + if (mvmvif->low_latency & LOW_LATENCY_DEBUGFS_FORCE_ENABLE && + cause != LOW_LATENCY_DEBUGFS_FORCE_ENABLE) + return; + + if (cause == LOW_LATENCY_DEBUGFS_FORCE_ENABLE && set) + /* + * We enter force state + */ + new_state = !!(mvmvif->low_latency & + LOW_LATENCY_DEBUGFS_FORCE); + else + /* + * Check if any other one set low latency + */ + new_state = !!(mvmvif->low_latency & + ~(LOW_LATENCY_DEBUGFS_FORCE_ENABLE | + LOW_LATENCY_DEBUGFS_FORCE)); + + mvmvif->low_latency_actual = new_state; } /* Return a bitmask with all the hw supported queues, except for the @@ -1895,21 +2024,24 @@ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm) static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm) { lockdep_assert_held(&mvm->mutex); - /* calling this function without using dump_start/end since at this - * point we already hold the op mode mutex + /* If IWL_MVM_STATUS_HW_RESTART_REQUESTED bit is set then we received + * an assert. Since we failed to bring the interface up, mac80211 + * will not attempt to reconfig the device, + * which handles the dump collection in assert flow, + * so trigger dump collection here. */ - iwl_fw_dbg_collect_sync(&mvm->fwrt); + if (test_and_clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, + &mvm->status)) + iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert, + false, 0); + iwl_fw_cancel_timestamp(&mvm->fwrt); - iwl_free_fw_paging(&mvm->fwrt); clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); + iwl_fwrt_stop_device(&mvm->fwrt); + iwl_free_fw_paging(&mvm->fwrt); iwl_fw_dump_conf_clear(&mvm->fwrt); - iwl_trans_stop_device(mvm->trans); } -/* Stop/start all mac queues in a given bitmap */ -void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq); -void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq); - /* Re-configure the SCD for a queue that has already been configured */ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, int tid, int frame_limit, u16 ssn); @@ -1949,6 +2081,23 @@ void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm); int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool added_vif); +/* FTM responder */ +int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +void iwl_mvm_ftm_restart_responder(struct iwl_mvm *mvm, + struct ieee80211_vif *vif); +void iwl_mvm_ftm_responder_stats(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); + +/* FTM initiator */ +void iwl_mvm_ftm_restart(struct iwl_mvm *mvm); +void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_ftm_lc_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); +int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct cfg80211_pmsr_request *request); +void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req); + /* TDLS */ /* @@ -2015,4 +2164,59 @@ void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw, struct dentry *dir); #endif +/* Channel info utils */ +static inline bool iwl_mvm_has_ultra_hb_channel(struct iwl_mvm *mvm) +{ + return fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS); +} + +static inline void *iwl_mvm_chan_info_cmd_tail(struct iwl_mvm *mvm, + struct iwl_fw_channel_info *ci) +{ + return (u8 *)ci + (iwl_mvm_has_ultra_hb_channel(mvm) ? + sizeof(struct iwl_fw_channel_info) : + sizeof(struct iwl_fw_channel_info_v1)); +} + +static inline size_t iwl_mvm_chan_info_padding(struct iwl_mvm *mvm) +{ + return iwl_mvm_has_ultra_hb_channel(mvm) ? 0 : + sizeof(struct iwl_fw_channel_info) - + sizeof(struct iwl_fw_channel_info_v1); +} + +static inline void iwl_mvm_set_chan_info(struct iwl_mvm *mvm, + struct iwl_fw_channel_info *ci, + u32 chan, u8 band, u8 width, + u8 ctrl_pos) +{ + if (iwl_mvm_has_ultra_hb_channel(mvm)) { + ci->channel = cpu_to_le32(chan); + ci->band = band; + ci->width = width; + ci->ctrl_pos = ctrl_pos; + } else { + struct iwl_fw_channel_info_v1 *ci_v1 = + (struct iwl_fw_channel_info_v1 *)ci; + + ci_v1->channel = chan; + ci_v1->band = band; + ci_v1->width = width; + ci_v1->ctrl_pos = ctrl_pos; + } +} + +static inline void +iwl_mvm_set_chan_info_chandef(struct iwl_mvm *mvm, + struct iwl_fw_channel_info *ci, + struct cfg80211_chan_def *chandef) +{ + iwl_mvm_set_chan_info(mvm, ci, chandef->chan->hw_value, + (chandef->chan->band == NL80211_BAND_2GHZ ? + PHY_BAND_24 : PHY_BAND_5), + iwl_mvm_get_channel_width(chandef), + iwl_mvm_get_ctrl_pos(chandef)); +} + #endif /* __IWL_MVM_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index 6fc5cc1f2b5b..7bdbd010ae6b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -179,7 +179,7 @@ static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section, IWL_DEBUG_EEPROM(mvm->trans->dev, "NVM access command failed with status %d (device: %s)\n", ret, mvm->cfg->name); - ret = -EIO; + ret = -ENODATA; } goto exit; } @@ -380,8 +380,12 @@ int iwl_nvm_init(struct iwl_mvm *mvm) /* we override the constness for initial read */ ret = iwl_nvm_read_section(mvm, section, nvm_buffer, size_read); - if (ret < 0) + if (ret == -ENODATA) { + ret = 0; continue; + } + if (ret < 0) + break; size_read += ret; temp = kmemdup(nvm_buffer, ret, GFP_KERNEL); if (!temp) { @@ -412,6 +416,11 @@ int iwl_nvm_init(struct iwl_mvm *mvm) mvm->nvm_phy_sku_blob.data = temp; mvm->nvm_phy_sku_blob.size = ret; break; + case NVM_SECTION_TYPE_REGULATORY_SDP: + case NVM_SECTION_TYPE_REGULATORY: + mvm->nvm_reg_blob.data = temp; + mvm->nvm_reg_blob.size = ret; + break; default: if (section == mvm->cfg->nvm_hw_section_num) { mvm->nvm_hw_blob.data = temp; @@ -454,7 +463,7 @@ int iwl_nvm_init(struct iwl_mvm *mvm) IWL_DEBUG_EEPROM(mvm->trans->dev, "nvm version = %x\n", mvm->nvm_data->nvm_version); - return 0; + return ret < 0 ? ret : 0; } struct iwl_mcc_update_resp * diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 30c5127034a0..ba27dce4c2bb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -82,7 +82,6 @@ #include "fw/api/scan.h" #include "time-event.h" #include "fw-api.h" -#include "fw/api/scan.h" #include "fw/acpi.h" #define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux" @@ -301,8 +300,14 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { RX_HANDLER_ASYNC_LOCKED), RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif, RX_HANDLER_SYNC), - RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler, - RX_HANDLER_ASYNC_LOCKED), + RX_HANDLER_GRP(LOCATION_GROUP, TOF_RESPONDER_STATS, + iwl_mvm_ftm_responder_stats, RX_HANDLER_ASYNC_LOCKED), + + RX_HANDLER_GRP(LOCATION_GROUP, TOF_RANGE_RESPONSE_NOTIF, + iwl_mvm_ftm_range_resp, RX_HANDLER_ASYNC_LOCKED), + RX_HANDLER_GRP(LOCATION_GROUP, TOF_LC_NOTIF, + iwl_mvm_ftm_lc_notif, RX_HANDLER_ASYNC_LOCKED), + RX_HANDLER_GRP(DEBUG_GROUP, MFU_ASSERT_DUMP_NTF, iwl_mvm_mfu_assert_dump_notif, RX_HANDLER_SYNC), RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF, @@ -329,8 +334,6 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { HCMD_NAME(SCAN_REQ_UMAC), HCMD_NAME(SCAN_ABORT_UMAC), HCMD_NAME(SCAN_COMPLETE_UMAC), - HCMD_NAME(TOF_CMD), - HCMD_NAME(TOF_NOTIFICATION), HCMD_NAME(BA_WINDOW_STATUS_NOTIFICATION_ID), HCMD_NAME(ADD_STA_KEY), HCMD_NAME(ADD_STA), @@ -419,12 +422,14 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { static const struct iwl_hcmd_names iwl_mvm_system_names[] = { HCMD_NAME(SHARED_MEM_CFG_CMD), HCMD_NAME(INIT_EXTENDED_CFG_CMD), + HCMD_NAME(FW_ERROR_RECOVERY_CMD), }; /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = { + HCMD_NAME(CHANNEL_SWITCH_TIME_EVENT_CMD), HCMD_NAME(CHANNEL_SWITCH_NOA_NOTIF), }; @@ -449,6 +454,8 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = { HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD), HCMD_NAME(STA_HE_CTXT_CMD), HCMD_NAME(RFH_QUEUE_CONFIG_CMD), + HCMD_NAME(TLC_MNG_CONFIG_CMD), + HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD), HCMD_NAME(STA_PM_NOTIF), HCMD_NAME(MU_GROUP_MGMT_NOTIF), HCMD_NAME(RX_QUEUES_NOTIFICATION), @@ -461,6 +468,22 @@ static const struct iwl_hcmd_names iwl_mvm_debug_names[] = { HCMD_NAME(MFU_ASSERT_DUMP_NTF), }; +/* Please keep this array *SORTED* by hex value. + * Access is done through binary search + */ +static const struct iwl_hcmd_names iwl_mvm_location_names[] = { + HCMD_NAME(TOF_RANGE_REQ_CMD), + HCMD_NAME(TOF_CONFIG_CMD), + HCMD_NAME(TOF_RANGE_ABORT_CMD), + HCMD_NAME(TOF_RANGE_REQ_EXT_CMD), + HCMD_NAME(TOF_RESPONDER_CONFIG_CMD), + HCMD_NAME(TOF_RESPONDER_DYN_CONFIG_CMD), + HCMD_NAME(TOF_LC_NOTIF), + HCMD_NAME(TOF_RESPONDER_STATS), + HCMD_NAME(TOF_MCSI_DEBUG_NOTIF), + HCMD_NAME(TOF_RANGE_RESPONSE_NOTIF), +}; + /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ @@ -483,6 +506,7 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = { [MAC_CONF_GROUP] = HCMD_ARR(iwl_mvm_mac_conf_names), [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names), [DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names), + [LOCATION_GROUP] = HCMD_ARR(iwl_mvm_location_names), [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names), [REGULATORY_AND_NVM_GROUP] = HCMD_ARR(iwl_mvm_regulatory_and_nvm_names), @@ -577,11 +601,17 @@ static int iwl_mvm_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd) return ret; } +static bool iwl_mvm_d3_debug_enable(void *ctx) +{ + return IWL_MVM_D3_DEBUG; +} + static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = { .dump_start = iwl_mvm_fwrt_dump_start, .dump_end = iwl_mvm_fwrt_dump_end, .fw_running = iwl_mvm_fwrt_fw_running, .send_hcmd = iwl_mvm_fwrt_send_hcmd, + .d3_debug_enable = iwl_mvm_d3_debug_enable, }; static struct iwl_op_mode * @@ -676,6 +706,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, INIT_LIST_HEAD(&mvm->aux_roc_te_list); INIT_LIST_HEAD(&mvm->async_handlers_list); spin_lock_init(&mvm->time_event_lock); + INIT_LIST_HEAD(&mvm->ftm_initiator.loc_list); INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk); INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk); @@ -685,6 +716,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work); INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk); INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk); + INIT_LIST_HEAD(&mvm->add_stream_txqs); spin_lock_init(&mvm->d0i3_tx_lock); spin_lock_init(&mvm->refs_lock); @@ -736,6 +768,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, trans_cfg.rx_buf_size = rb_size_default; } + BUILD_BUG_ON(sizeof(struct iwl_ldbg_config_cmd) != + LDBG_CFG_COMMAND_SIZE); + trans->wide_cmd_header = true; trans_cfg.bc_table_dword = mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_22560; @@ -799,8 +834,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mutex_lock(&mvm->mutex); iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE); err = iwl_run_init_mvm_ucode(mvm, true); - if (test_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status)) - iwl_fw_alive_error_dump(&mvm->fwrt); + if (err) + iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER); if (!iwlmvm_mod_params.init_dbg || !err) iwl_mvm_stop_device(mvm); iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE); @@ -842,8 +877,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, if (iwl_mvm_is_d0i3_supported(mvm)) iwl_trans_unref(mvm->trans); - iwl_mvm_tof_init(mvm); - iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx); return op_mode; @@ -886,15 +919,15 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) iwl_mvm_thermal_exit(mvm); - if (mvm->init_status & IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE) { - ieee80211_unregister_hw(mvm->hw); - mvm->init_status &= ~IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE; - } + ieee80211_unregister_hw(mvm->hw); kfree(mvm->scan_cmd); kfree(mvm->mcast_filter_cmd); mvm->mcast_filter_cmd = NULL; + kfree(mvm->error_recovery_buf); + mvm->error_recovery_buf = NULL; + #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS) kfree(mvm->d3_resume_sram); #endif @@ -909,8 +942,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) cancel_delayed_work_sync(&mvm->tcm.work); - iwl_mvm_tof_clean(mvm); - iwl_fw_runtime_free(&mvm->fwrt); mutex_destroy(&mvm->mutex); mutex_destroy(&mvm->d0i3_suspend_mutex); @@ -1079,24 +1110,6 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode, iwl_mvm_rx_common(mvm, rxb, pkt); } -void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq) -{ - int q; - - if (WARN_ON_ONCE(!mq)) - return; - - for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) { - if (atomic_inc_return(&mvm->mac80211_queue_stop_count[q]) > 1) { - IWL_DEBUG_TX_QUEUES(mvm, - "mac80211 %d already stopped\n", q); - continue; - } - - ieee80211_stop_queue(mvm->hw, q); - } -} - static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode, const struct iwl_device_cmd *cmd) { @@ -1109,38 +1122,81 @@ static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode, iwl_trans_block_txq_ptrs(mvm->trans, false); } -static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue) +static int iwl_mvm_is_static_queue(struct iwl_mvm *mvm, int queue) { - struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - unsigned long mq = mvm->hw_queue_to_mac80211[hw_queue]; - - iwl_mvm_stop_mac_queues(mvm, mq); + return queue == mvm->aux_queue || queue == mvm->probe_queue || + queue == mvm->p2p_dev_queue || queue == mvm->snif_queue; } -void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq) +static void iwl_mvm_queue_state_change(struct iwl_op_mode *op_mode, + int hw_queue, bool start) { - int q; + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + struct ieee80211_sta *sta; + struct ieee80211_txq *txq; + struct iwl_mvm_txq *mvmtxq; + int i; + unsigned long tid_bitmap; + struct iwl_mvm_sta *mvmsta; + u8 sta_id; + + sta_id = iwl_mvm_has_new_tx_api(mvm) ? + mvm->tvqm_info[hw_queue].sta_id : + mvm->queue_info[hw_queue].ra_sta_id; - if (WARN_ON_ONCE(!mq)) + if (WARN_ON_ONCE(sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) return; - for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) { - if (atomic_dec_return(&mvm->mac80211_queue_stop_count[q]) > 0) { - IWL_DEBUG_TX_QUEUES(mvm, - "mac80211 %d still stopped\n", q); - continue; - } + rcu_read_lock(); + + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); + if (IS_ERR_OR_NULL(sta)) + goto out; + mvmsta = iwl_mvm_sta_from_mac80211(sta); + + if (iwl_mvm_is_static_queue(mvm, hw_queue)) { + if (!start) + ieee80211_stop_queues(mvm->hw); + else if (mvmsta->sta_state != IEEE80211_STA_NOTEXIST) + ieee80211_wake_queues(mvm->hw); + + goto out; + } + + if (iwl_mvm_has_new_tx_api(mvm)) { + int tid = mvm->tvqm_info[hw_queue].txq_tid; + + tid_bitmap = BIT(tid); + } else { + tid_bitmap = mvm->queue_info[hw_queue].tid_bitmap; + } + + for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { + int tid = i; + + if (tid == IWL_MAX_TID_COUNT) + tid = IEEE80211_NUM_TIDS; - ieee80211_wake_queue(mvm->hw, q); + txq = sta->txq[tid]; + mvmtxq = iwl_mvm_txq_from_mac80211(txq); + mvmtxq->stopped = !start; + + if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST) + iwl_mvm_mac_itxq_xmit(mvm->hw, txq); } + +out: + rcu_read_unlock(); } -static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue) +static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue) { - struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - unsigned long mq = mvm->hw_queue_to_mac80211[hw_queue]; + iwl_mvm_queue_state_change(op_mode, hw_queue, false); +} - iwl_mvm_start_mac_queues(mvm, mq); +static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue) +{ + iwl_mvm_queue_state_change(op_mode, hw_queue, true); } static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm) @@ -1261,12 +1317,29 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) reprobe->dev = mvm->trans->dev; INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk); schedule_work(&reprobe->work); + } else if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, + &mvm->status)) { + IWL_ERR(mvm, "HW restart already requested, but not started\n"); } else if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR && mvm->hw_registered && !test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) { /* don't let the transport/FW power down */ iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); + if (mvm->fw->ucode_capa.error_log_size) { + u32 src_size = mvm->fw->ucode_capa.error_log_size; + u32 src_addr = mvm->fw->ucode_capa.error_log_addr; + u8 *recover_buf = kzalloc(src_size, GFP_ATOMIC); + + if (recover_buf) { + mvm->error_recovery_buf = recover_buf; + iwl_trans_read_mem_bytes(mvm->trans, + src_addr, + recover_buf, + src_size); + } + } + if (fw_error && mvm->fw_restart > 0) mvm->fw_restart--; set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c index 7f5434b34d0d..86e40bae57e3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c @@ -109,6 +109,7 @@ u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef) return PHY_VHT_CTRL_POS_4_ABOVE; default: WARN(1, "Invalid channel definition"); + /* fall through */ case 0: /* * The FW is expected to check the control channel position only @@ -143,14 +144,11 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm, u8 chains_static, u8 chains_dynamic) { u8 active_cnt, idle_cnt; + struct iwl_phy_context_cmd_tail *tail = + iwl_mvm_chan_info_cmd_tail(mvm, &cmd->ci); /* Set the channel info data */ - cmd->ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ? - PHY_BAND_24 : PHY_BAND_5); - - cmd->ci.channel = chandef->chan->hw_value; - cmd->ci.width = iwl_mvm_get_channel_width(chandef); - cmd->ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef); + iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef); /* Set rx the chains */ idle_cnt = chains_static; @@ -168,17 +166,17 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm, active_cnt = 2; } - cmd->rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) << + tail->rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) << PHY_RX_CHAIN_VALID_POS); - cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS); - cmd->rxchain_info |= cpu_to_le32(active_cnt << + tail->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS); + tail->rxchain_info |= cpu_to_le32(active_cnt << PHY_RX_CHAIN_MIMO_CNT_POS); #ifdef CONFIG_IWLWIFI_DEBUGFS if (unlikely(mvm->dbgfs_rx_phyinfo)) - cmd->rxchain_info = cpu_to_le32(mvm->dbgfs_rx_phyinfo); + tail->rxchain_info = cpu_to_le32(mvm->dbgfs_rx_phyinfo); #endif - cmd->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); + tail->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm)); } /* @@ -195,6 +193,7 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm, { struct iwl_phy_context_cmd cmd; int ret; + u16 len = sizeof(cmd) - iwl_mvm_chan_info_padding(mvm); /* Set the command header fields */ iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, action, apply_time); @@ -203,9 +202,7 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm, iwl_mvm_phy_ctxt_cmd_data(mvm, &cmd, chandef, chains_static, chains_dynamic); - ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, 0, - sizeof(struct iwl_phy_context_cmd), - &cmd); + ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, 0, len, &cmd); if (ret) IWL_ERR(mvm, "PHY ctxt cmd error. ret=%d\n", ret); return ret; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c index 5a0a28fd762d..36f5fa1ee793 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -79,6 +81,8 @@ int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm, struct iwl_beacon_filter_cmd *cmd, u32 flags) { + u16 len; + IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n", le32_to_cpu(cmd->ba_enable_beacon_abort)); IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n", @@ -101,9 +105,23 @@ int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm, le32_to_cpu(cmd->bf_temp_fast_filter)); IWL_DEBUG_POWER(mvm, "bf_temp_slow_filter is: %d\n", le32_to_cpu(cmd->bf_temp_slow_filter)); + IWL_DEBUG_POWER(mvm, "bf_threshold_absolute_low is: %d, %d\n", + le32_to_cpu(cmd->bf_threshold_absolute_low[0]), + le32_to_cpu(cmd->bf_threshold_absolute_low[1])); + + IWL_DEBUG_POWER(mvm, "bf_threshold_absolute_high is: %d, %d\n", + le32_to_cpu(cmd->bf_threshold_absolute_high[0]), + le32_to_cpu(cmd->bf_threshold_absolute_high[1])); + + if (fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_BEACON_FILTER_V4)) + len = sizeof(struct iwl_beacon_filter_cmd); + else + len = offsetof(struct iwl_beacon_filter_cmd, + bf_threshold_absolute_low); return iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, flags, - sizeof(struct iwl_beacon_filter_cmd), cmd); + len, cmd); } static @@ -526,6 +544,9 @@ int iwl_mvm_power_update_device(struct iwl_mvm *mvm) cmd.flags &= cpu_to_le16(~DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK); #endif + if (mvm->ext_clock_valid) + cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_32K_CLK_VALID_MSK); + IWL_DEBUG_POWER(mvm, "Sending device power command with flags = 0x%X\n", cmd.flags); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c index dabbc04853ac..a28283ff7295 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c @@ -149,14 +149,9 @@ static u16 rs_fw_set_config_flags(struct iwl_mvm *mvm, if (he_cap && he_cap->has_he && (he_cap->he_cap_elem.phy_cap_info[3] & - IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK)) { + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK)) flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK; - if (he_cap->he_cap_elem.phy_cap_info[3] & - IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2) - flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_2_MSK; - } - return flags; } @@ -320,12 +315,26 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, if (flags & IWL_TLC_NOTIF_FLAG_AMSDU) { u16 size = le32_to_cpu(notif->amsdu_size); + int i; if (WARN_ON(sta->max_amsdu_len < size)) goto out; mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled); mvmsta->max_amsdu_len = size; + sta->max_rc_amsdu_len = mvmsta->max_amsdu_len; + + for (i = 0; i < IWL_MAX_TID_COUNT; i++) { + if (mvmsta->amsdu_enabled & BIT(i)) + sta->max_tid_amsdu_len[i] = + iwl_mvm_max_amsdu_size(mvm, sta, i); + else + /* + * Not so elegant, but this will effectively + * prevent AMSDU on this TID + */ + sta->max_tid_amsdu_len[i] = 1; + } IWL_DEBUG_RATE(mvm, "AMSDU update. AMSDU size: %d, AMSDU selected size: %d, AMSDU TID bitmap 0x%X\n", diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 089972280daa..e231a44d2423 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -3,7 +3,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -1643,8 +1643,26 @@ static s32 rs_get_best_rate(struct iwl_mvm *mvm, static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta) { + struct ieee80211_sta_vht_cap *sta_vht_cap = &sta->vht_cap; + struct ieee80211_vht_cap vht_cap = { + .vht_cap_info = cpu_to_le32(sta_vht_cap->cap), + .supp_mcs = sta_vht_cap->vht_mcs, + }; + switch (sta->bandwidth) { case IEEE80211_STA_RX_BW_160: + /* + * Don't use 160 MHz if VHT extended NSS support + * says we cannot use 2 streams, we don't want to + * deal with this. + * We only check MCS 0 - they will support that if + * we got here at all and we don't care which MCS, + * we want to determine a more global state. + */ + if (ieee80211_get_vht_max_nss(&vht_cap, + IEEE80211_VHT_CHANWIDTH_160MHZ, + 0, true) < sta->rx_nss) + return RATE_MCS_CHAN_WIDTH_80; return RATE_MCS_CHAN_WIDTH_160; case IEEE80211_STA_RX_BW_80: return RATE_MCS_CHAN_WIDTH_80; @@ -1744,6 +1762,7 @@ static void rs_set_amsdu_len(struct iwl_mvm *mvm, struct ieee80211_sta *sta, enum rs_action scale_action) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + int i; /* * In case TLC offload is not active amsdu_enabled is either 0xFFFF @@ -1756,7 +1775,25 @@ static void rs_set_amsdu_len(struct iwl_mvm *mvm, struct ieee80211_sta *sta, else mvmsta->amsdu_enabled = 0xFFFF; - mvmsta->max_amsdu_len = sta->max_amsdu_len; + if (mvmsta->vif->bss_conf.he_support && + !iwlwifi_mod_params.disable_11ax) + mvmsta->max_amsdu_len = sta->max_amsdu_len; + else + mvmsta->max_amsdu_len = min_t(int, sta->max_amsdu_len, 8500); + + sta->max_rc_amsdu_len = mvmsta->max_amsdu_len; + + for (i = 0; i < IWL_MAX_TID_COUNT; i++) { + if (mvmsta->amsdu_enabled) + sta->max_tid_amsdu_len[i] = + iwl_mvm_max_amsdu_size(mvm, sta, i); + else + /* + * Not so elegant, but this will effectively + * prevent AMSDU on this TID + */ + sta->max_tid_amsdu_len[i] = 1; + } } /* @@ -1777,7 +1814,7 @@ static bool rs_tweak_rate_tbl(struct iwl_mvm *mvm, struct iwl_scale_tbl_info *tbl, enum rs_action scale_action) { - if (sta->bandwidth != IEEE80211_STA_RX_BW_80) + if (rs_bw_from_sta_bw(sta) != RATE_MCS_CHAN_WIDTH_80) return false; if (!is_vht_siso(&tbl->rate)) @@ -3332,12 +3369,12 @@ static void rs_fill_rates_for_column(struct iwl_mvm *mvm, /* Building the rate table is non trivial. When we're in MIMO2/VHT/80Mhz/SGI * column the rate table should look like this: * - * rate[0] 0x400D019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI - * rate[1] 0x400D019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI - * rate[2] 0x400D018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI - * rate[3] 0x400D018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI - * rate[4] 0x400D017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI - * rate[5] 0x400D017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI + * rate[0] 0x400F019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI + * rate[1] 0x400F019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI + * rate[2] 0x400F018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI + * rate[3] 0x400F018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI + * rate[4] 0x400F017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI + * rate[5] 0x400F017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI * rate[6] 0x4005007 VHT | ANT: A BW: 80Mhz MCS: 7 NSS: 1 NGI * rate[7] 0x4009006 VHT | ANT: B BW: 80Mhz MCS: 6 NSS: 1 NGI * rate[8] 0x4005005 VHT | ANT: A BW: 80Mhz MCS: 5 NSS: 1 NGI @@ -4108,6 +4145,7 @@ static const struct rate_control_ops rs_mvm_ops_drv = { .add_sta_debugfs = rs_drv_add_sta_debugfs, .remove_sta_debugfs = rs_remove_sta_debugfs, #endif + .capa = RATE_CTRL_CAPA_VHT_EXT_NSS_BW, }; void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index 6653a238f32e..fbd3014e8b82 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -222,7 +222,7 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm, !(rx_pkt_status & RX_MPDU_RES_STATUS_TTAK_OK)) return 0; *crypt_len = IEEE80211_TKIP_IV_LEN; - /* fall through if TTAK OK */ + /* fall through */ case RX_MPDU_RES_STATUS_SEC_WEP_ENC: if (!(rx_pkt_status & RX_MPDU_RES_STATUS_ICV_OK)) @@ -599,8 +599,8 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac, * data copied into the "data" struct, but rather the data from * the notification directly. */ - if (iwl_mvm_is_cdb_supported(mvm)) { - struct mvm_statistics_general_cdb *general = + if (iwl_mvm_has_new_rx_stats_api(mvm)) { + struct mvm_statistics_general *general = data->general; mvmvif->beacon_stats.num_beacons = @@ -723,7 +723,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, else expected_size = sizeof(struct iwl_notif_statistics_v10); } else { - expected_size = sizeof(struct iwl_notif_statistics_cdb); + expected_size = sizeof(struct iwl_notif_statistics); } if (WARN_ONCE(iwl_rx_packet_payload_len(pkt) != expected_size, @@ -753,7 +753,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, flags = stats->flag; } else { - struct iwl_notif_statistics_cdb *stats = (void *)&pkt->data; + struct iwl_notif_statistics *stats = (void *)&pkt->data; data.mac_id = stats->rx.general.mac_id; data.beacon_filter_average_energy = @@ -792,7 +792,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, bytes = (void *)&v11->load_stats.byte_count; air_time = (void *)&v11->load_stats.air_time; } else { - struct iwl_notif_statistics_cdb *stats = (void *)&pkt->data; + struct iwl_notif_statistics *stats = (void *)&pkt->data; energy = (void *)&stats->load_stats.avg_energy; bytes = (void *)&stats->load_stats.byte_count; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 7bd8676508f5..1e03acf30762 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -66,11 +66,37 @@ #include "mvm.h" #include "fw-api.h" +static void *iwl_mvm_skb_get_hdr(struct sk_buff *skb) +{ + struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); + u8 *data = skb->data; + + /* Alignment concerns */ + BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) % 4); + BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) % 4); + BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_lsig) % 4); + BUILD_BUG_ON(sizeof(struct ieee80211_vendor_radiotap) % 4); + + if (rx_status->flag & RX_FLAG_RADIOTAP_HE) + data += sizeof(struct ieee80211_radiotap_he); + if (rx_status->flag & RX_FLAG_RADIOTAP_HE_MU) + data += sizeof(struct ieee80211_radiotap_he_mu); + if (rx_status->flag & RX_FLAG_RADIOTAP_LSIG) + data += sizeof(struct ieee80211_radiotap_lsig); + if (rx_status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { + struct ieee80211_vendor_radiotap *radiotap = (void *)data; + + data += sizeof(*radiotap) + radiotap->len + radiotap->pad; + } + + return data; +} + static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb, int queue, struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvmsta; - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_hdr *hdr = iwl_mvm_skb_get_hdr(skb); struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb); struct iwl_mvm_key_pn *ptk_pn; int res; @@ -192,27 +218,50 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr, } } +static void iwl_mvm_add_rtap_sniffer_config(struct iwl_mvm *mvm, + struct sk_buff *skb) +{ + struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_vendor_radiotap *radiotap; + const int size = sizeof(*radiotap) + sizeof(__le16); + + if (!mvm->cur_aid) + return; + + /* ensure alignment */ + BUILD_BUG_ON((size + 2) % 4); + + radiotap = skb_put(skb, size + 2); + radiotap->align = 1; + /* Intel OUI */ + radiotap->oui[0] = 0xf6; + radiotap->oui[1] = 0x54; + radiotap->oui[2] = 0x25; + /* radiotap sniffer config sub-namespace */ + radiotap->subns = 1; + radiotap->present = 0x1; + radiotap->len = size - sizeof(*radiotap); + radiotap->pad = 2; + + /* fill the data now */ + memcpy(radiotap->data, &mvm->cur_aid, sizeof(mvm->cur_aid)); + /* and clear the padding */ + memset(radiotap->data + sizeof(__le16), 0, radiotap->pad); + + rx_status->flag |= RX_FLAG_RADIOTAP_VENDOR_DATA; +} + /* iwl_mvm_pass_packet_to_mac80211 - passes the packet for mac80211 */ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, struct napi_struct *napi, struct sk_buff *skb, int queue, - struct ieee80211_sta *sta) + struct ieee80211_sta *sta, + bool csi) { - struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); - - if (!(rx_status->flag & RX_FLAG_NO_PSDU) && - iwl_mvm_check_pn(mvm, skb, queue, sta)) { + if (iwl_mvm_check_pn(mvm, skb, queue, sta)) kfree_skb(skb); - } else { - unsigned int radiotap_len = 0; - - if (rx_status->flag & RX_FLAG_RADIOTAP_HE) - radiotap_len += sizeof(struct ieee80211_radiotap_he); - if (rx_status->flag & RX_FLAG_RADIOTAP_HE_MU) - radiotap_len += sizeof(struct ieee80211_radiotap_he_mu); - __skb_push(skb, radiotap_len); + else ieee80211_rx_napi(mvm->hw, sta, skb, napi); - } } static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, @@ -289,7 +338,7 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, stats->flag |= RX_FLAG_MMIC_ERROR; *crypt_len = IEEE80211_TKIP_IV_LEN; - /* fall through if TTAK OK */ + /* fall through */ case IWL_RX_MPDU_STATUS_SEC_WEP: if (!(status & IWL_RX_MPDU_STATUS_ICV_OK)) return -1; @@ -473,7 +522,7 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm, while ((skb = __skb_dequeue(skb_list))) { iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, reorder_buf->queue, - sta); + sta, false); reorder_buf->num_stored--; } } @@ -642,7 +691,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, struct sk_buff *skb, struct iwl_rx_mpdu_desc *desc) { - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_hdr *hdr = iwl_mvm_skb_get_hdr(skb); struct iwl_mvm_sta *mvm_sta; struct iwl_mvm_baid_data *baid_data; struct iwl_mvm_reorder_buffer *buffer; @@ -666,6 +715,8 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, * This also covers the case of receiving a Block Ack Request * outside a BA session; we'll pass it to mac80211 and that * then sends a delBA action frame. + * This also covers pure monitor mode, in which case we won't + * have any BA sessions. */ if (baid == IWL_RX_REORDER_DATA_INVALID_BAID) return false; @@ -937,6 +988,7 @@ iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data, * the TSF/timers are not be transmitted in HE-MU. */ u8 ru = le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK); + u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; u8 offs = 0; rx_status->bw = RATE_INFO_BW_HE_RU; @@ -979,19 +1031,27 @@ iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data, he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC); - if (he_mu) { #define CHECK_BW(bw) \ BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_ ## bw ## MHZ != \ + RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS); \ + BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_ ## bw ## MHZ != \ RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS) - CHECK_BW(20); - CHECK_BW(40); - CHECK_BW(80); - CHECK_BW(160); + CHECK_BW(20); + CHECK_BW(40); + CHECK_BW(80); + CHECK_BW(160); + + if (he_mu) he_mu->flags2 |= le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK, rate_n_flags), IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW); - } + else if (he_type == RATE_MCS_HE_TYPE_TRIG) + he->data6 |= + cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_KNOWN) | + le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK, + rate_n_flags), + IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW); } static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm, @@ -1014,16 +1074,16 @@ static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm, IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE2_KNOWN | IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE3_KNOWN | IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE4_KNOWN); - he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d0, + he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d2, IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE1), IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE1); - he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d0, + he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d2, IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE2), IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE2); - he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d0, + he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d2, IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE3), IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE3); - he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d0, + he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d2, IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE4), IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE4); /* fall through */ @@ -1033,7 +1093,6 @@ static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm, case IWL_RX_PHY_INFO_TYPE_HE_TB: /* HE common */ he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN | - IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN | IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN | IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN); he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN | @@ -1053,9 +1112,6 @@ static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm, he->data3 |= le16_encode_bits(le32_get_bits(phy_data->d0, IWL_RX_PHY_DATA0_HE_LDPC_EXT_SYM), IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG); - he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d0, - IWL_RX_PHY_DATA0_HE_SPATIAL_REUSE_MASK), - IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE); he->data5 |= le16_encode_bits(le32_get_bits(phy_data->d0, IWL_RX_PHY_DATA0_HE_PRE_FEC_PAD_MASK), IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD); @@ -1074,6 +1130,20 @@ static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm, break; } + switch (phy_data->info_type) { + case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT: + case IWL_RX_PHY_INFO_TYPE_HE_MU: + case IWL_RX_PHY_INFO_TYPE_HE_SU: + he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN); + he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d0, + IWL_RX_PHY_DATA0_HE_SPATIAL_REUSE_MASK), + IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE); + break; + default: + /* nothing here */ + break; + } + switch (phy_data->info_type) { case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT: he_mu->flags1 |= @@ -1142,30 +1212,22 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb, .flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN | IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN), }; - unsigned int radiotap_len = 0; he = skb_put_data(skb, &known, sizeof(known)); - radiotap_len += sizeof(known); rx_status->flag |= RX_FLAG_RADIOTAP_HE; if (phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_MU || phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_MU_EXT) { he_mu = skb_put_data(skb, &mu_known, sizeof(mu_known)); - radiotap_len += sizeof(mu_known); rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU; } - /* temporarily hide the radiotap data */ - __skb_pull(skb, radiotap_len); - - if (phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_SU) { - /* report the AMPDU-EOF bit on single frames */ - if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) { - rx_status->flag |= RX_FLAG_AMPDU_DETAILS; - rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; - if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_HE_DELIM_EOF)) - rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; - } + /* report the AMPDU-EOF bit on single frames */ + if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) { + rx_status->flag |= RX_FLAG_AMPDU_DETAILS; + rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; + if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_HE_DELIM_EOF)) + rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; } if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) @@ -1178,9 +1240,7 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb, bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE; /* toggle is switched whenever new aggregation starts */ - if (toggle_bit != mvm->ampdu_toggle && - (he_type == RATE_MCS_HE_TYPE_MU || - he_type == RATE_MCS_HE_TYPE_SU)) { + if (toggle_bit != mvm->ampdu_toggle) { rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_HE_DELIM_EOF)) rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; @@ -1314,6 +1374,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, .d4 = desc->phy_data4, .info_type = IWL_RX_PHY_INFO_TYPE_NONE, }; + bool csi = false; if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))) return; @@ -1412,7 +1473,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; } /* set the preamble flag if appropriate */ - if (phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE) + if (rate_n_flags & RATE_MCS_CCK_MSK && + phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE) rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE; if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) { @@ -1441,14 +1503,23 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE; rx_status->flag |= RX_FLAG_AMPDU_DETAILS; - rx_status->ampdu_reference = mvm->ampdu_ref; - /* toggle is switched whenever new aggregation starts */ + /* + * Toggle is switched whenever new aggregation starts. Make + * sure ampdu_reference is never 0 so we can later use it to + * see if the frame was really part of an A-MPDU or not. + */ if (toggle_bit != mvm->ampdu_toggle) { mvm->ampdu_ref++; + if (mvm->ampdu_ref == 0) + mvm->ampdu_ref++; mvm->ampdu_toggle = toggle_bit; } + rx_status->ampdu_reference = mvm->ampdu_ref; } + if (unlikely(mvm->monitor_on)) + iwl_mvm_add_rtap_sniffer_config(mvm, skb); + rcu_read_lock(); if (desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) { @@ -1602,7 +1673,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb); if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc)) - iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta); + iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, + sta, csi); out: rcu_read_unlock(); } @@ -1705,15 +1777,24 @@ void iwl_mvm_rx_monitor_ndp(struct iwl_mvm *mvm, struct napi_struct *napi, } else if (rate_n_flags & RATE_MCS_VHT_MSK) { u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS; - rx_status->nss = - ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> - RATE_VHT_MCS_NSS_POS) + 1; rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; rx_status->encoding = RX_ENC_VHT; rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; if (rate_n_flags & RATE_MCS_BF_MSK) rx_status->enc_flags |= RX_ENC_FLAG_BF; - } else if (!(rate_n_flags & RATE_MCS_HE_MSK)) { + /* + * take the nss from the rx_vec since the rate_n_flags has + * only 2 bits for the nss which gives a max of 4 ss but + * there may be up to 8 spatial streams + */ + rx_status->nss = + le32_get_bits(desc->rx_vec[0], + RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK) + 1; + } else if (rate_n_flags & RATE_MCS_HE_MSK) { + rx_status->nss = + le32_get_bits(desc->rx_vec[0], + RX_NO_DATA_RX_VEC0_HE_NSTS_MSK) + 1; + } else { int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, rx_status->band); @@ -1726,7 +1807,7 @@ void iwl_mvm_rx_monitor_ndp(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status->rate_idx = rate; } - iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta); + ieee80211_rx_napi(mvm->hw, sta, skb, napi); out: rcu_read_unlock(); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 86d598d5b68f..78694bc38e76 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -8,7 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -1580,6 +1580,11 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type) * scheduled scan before starting a normal scan. */ + /* FW supports only a single periodic scan */ + if ((type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT) && + mvm->scan_status & (IWL_MVM_SCAN_SCHED | IWL_MVM_SCAN_NETDETECT)) + return -EBUSY; + if (iwl_mvm_num_scans(mvm) < mvm->max_scans) return 0; @@ -1616,10 +1621,10 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type) if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK) return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); - - /* fall through, something is wrong if no scan was - * running but we ran out of scans. + /* Something is wrong if no scan was running but we + * ran out of scans. */ + /* fall through */ default: WARN_ON(1); break; @@ -1976,9 +1981,8 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type) return ret; } - ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ); - - return ret; + return iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, + 1 * HZ); } int iwl_mvm_scan_size(struct iwl_mvm *mvm) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c index d1d76bb9a750..9da0dae78510 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c @@ -7,6 +7,7 @@ * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -28,6 +29,7 @@ * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright (C) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -64,7 +66,7 @@ struct iwl_mvm_active_iface_iterator_data { struct ieee80211_vif *ignore_vif; u8 sta_vif_ap_sta_id; enum iwl_sf_state sta_vif_state; - int num_active_macs; + u32 num_active_macs; }; /* diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index e28009832da0..498c315291cf 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -314,7 +314,6 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue, struct iwl_mvm_sta *mvmsta; u32 status; u8 sta_id; - int ret; if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; @@ -349,31 +348,21 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue, /* Notify FW of queue removal from the STA queues */ status = ADD_STA_SUCCESS; - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, - iwl_mvm_add_sta_cmd_size(mvm), - &cmd, &status); - - return ret; + return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, + iwl_mvm_add_sta_cmd_size(mvm), + &cmd, &status); } -static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, - int mac80211_queue, u8 tid, u8 flags) +static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + int queue, u8 tid, u8 flags) { struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, .action = SCD_CFG_DISABLE_QUEUE, }; - bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE; int ret; - if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES)) - return -EINVAL; - if (iwl_mvm_has_new_tx_api(mvm)) { - if (remove_mac_queue) - mvm->hw_queue_to_mac80211[queue] &= - ~BIT(mac80211_queue); - iwl_trans_txq_free(mvm->trans, queue); return 0; @@ -384,36 +373,15 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); - /* - * If there is another TID with the same AC - don't remove the MAC queue - * from the mapping - */ - if (tid < IWL_MAX_TID_COUNT) { - unsigned long tid_bitmap = - mvm->queue_info[queue].tid_bitmap; - int ac = tid_to_mac80211_ac[tid]; - int i; - - for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) { - if (tid_to_mac80211_ac[i] == ac) - remove_mac_queue = false; - } - } - - if (remove_mac_queue) - mvm->hw_queue_to_mac80211[queue] &= - ~BIT(mac80211_queue); - cmd.action = mvm->queue_info[queue].tid_bitmap ? SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE; if (cmd.action == SCD_CFG_DISABLE_QUEUE) mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE; IWL_DEBUG_TX_QUEUES(mvm, - "Disabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n", + "Disabling TXQ #%d tids=0x%x\n", queue, - mvm->queue_info[queue].tid_bitmap, - mvm->hw_queue_to_mac80211[queue]); + mvm->queue_info[queue].tid_bitmap); /* If the queue is still enabled - nothing left to do in this func */ if (cmd.action == SCD_CFG_ENABLE_QUEUE) @@ -423,15 +391,19 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, cmd.tid = mvm->queue_info[queue].txq_tid; /* Make sure queue info is correct even though we overwrite it */ - WARN(mvm->queue_info[queue].tid_bitmap || - mvm->hw_queue_to_mac80211[queue], - "TXQ #%d info out-of-sync - mac map=0x%x, tids=0x%x\n", - queue, mvm->hw_queue_to_mac80211[queue], - mvm->queue_info[queue].tid_bitmap); + WARN(mvm->queue_info[queue].tid_bitmap, + "TXQ #%d info out-of-sync - tids=0x%x\n", + queue, mvm->queue_info[queue].tid_bitmap); /* If we are here - the queue is freed and we can zero out these vals */ mvm->queue_info[queue].tid_bitmap = 0; - mvm->hw_queue_to_mac80211[queue] = 0; + + if (sta) { + struct iwl_mvm_txq *mvmtxq = + iwl_mvm_txq_from_tid(sta, tid); + + mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; + } /* Regardless if this is a reserved TXQ for a STA - mark it as false */ mvm->queue_info[queue].reserved = false; @@ -517,9 +489,14 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) spin_lock_bh(&mvmsta->lock); /* Unmap MAC queues and TIDs from this queue */ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { + struct iwl_mvm_txq *mvmtxq = + iwl_mvm_txq_from_tid(sta, tid); + if (mvmsta->tid_data[tid].state == IWL_AGG_ON) disable_agg_tids |= BIT(tid); mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; + + mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; } mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */ @@ -541,10 +518,11 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) } static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, + struct ieee80211_sta *old_sta, u8 new_sta_id) { struct iwl_mvm_sta *mvmsta; - u8 txq_curr_ac, sta_id, tid; + u8 sta_id, tid; unsigned long disable_agg_tids = 0; bool same_sta; int ret; @@ -554,7 +532,6 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; - txq_curr_ac = mvm->queue_info[queue].mac80211_ac; sta_id = mvm->queue_info[queue].ra_sta_id; tid = mvm->queue_info[queue].txq_tid; @@ -570,9 +547,7 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, iwl_mvm_invalidate_sta_queue(mvm, queue, disable_agg_tids, false); - ret = iwl_mvm_disable_txq(mvm, queue, - mvmsta->vif->hw_queue[txq_curr_ac], - tid, 0); + ret = iwl_mvm_disable_txq(mvm, old_sta, queue, tid, 0); if (ret) { IWL_ERR(mvm, "Failed to free inactive queue %d (ret=%d)\n", @@ -662,16 +637,15 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm, * in such a case, otherwise - if no redirection required - it does nothing, * unless the %force param is true. */ -static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, - int ac, int ssn, unsigned int wdg_timeout, - bool force) +static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid, + int ac, int ssn, unsigned int wdg_timeout, + bool force, struct iwl_mvm_txq *txq) { struct iwl_scd_txq_cfg_cmd cmd = { .scd_queue = queue, .action = SCD_CFG_DISABLE_QUEUE, }; bool shared_queue; - unsigned long mq; int ret; if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) @@ -695,14 +669,14 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, cmd.sta_id = mvm->queue_info[queue].ra_sta_id; cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac]; cmd.tid = mvm->queue_info[queue].txq_tid; - mq = mvm->hw_queue_to_mac80211[queue]; shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1; IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n", queue, iwl_mvm_ac_to_tx_fifo[ac]); - /* Stop MAC queues and wait for this queue to empty */ - iwl_mvm_stop_mac_queues(mvm, mq); + /* Stop the queue and wait for it to empty */ + txq->stopped = true; + ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue)); if (ret) { IWL_ERR(mvm, "Error draining queue %d before reconfig\n", @@ -743,8 +717,8 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); out: - /* Continue using the MAC queues */ - iwl_mvm_start_mac_queues(mvm, mq); + /* Continue using the queue */ + txq->stopped = false; return ret; } @@ -769,14 +743,15 @@ static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, return -ENOSPC; } -static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue, +static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, u8 sta_id, u8 tid, unsigned int timeout) { int queue, size = IWL_DEFAULT_QUEUE_SIZE; if (tid == IWL_MAX_TID_COUNT) { tid = IWL_MGMT_TID; - size = IWL_MGMT_QUEUE_SIZE; + size = max_t(u32, IWL_MGMT_QUEUE_SIZE, + mvm->trans->cfg->min_txq_size); } queue = iwl_trans_txq_alloc(mvm->trans, cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE), @@ -792,10 +767,7 @@ static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue, IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n", queue, sta_id, tid); - mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue); - IWL_DEBUG_TX_QUEUES(mvm, - "Enabling TXQ #%d (mac80211 map:0x%x)\n", - queue, mvm->hw_queue_to_mac80211[queue]); + IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d\n", queue); return queue; } @@ -805,9 +777,10 @@ static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm, int tid) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_txq *mvmtxq = + iwl_mvm_txq_from_tid(sta, tid); unsigned int wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); - u8 mac_queue = mvmsta->vif->hw_queue[ac]; int queue = -1; lockdep_assert_held(&mvm->mutex); @@ -815,11 +788,14 @@ static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm, IWL_DEBUG_TX_QUEUES(mvm, "Allocating queue for sta %d on tid %d\n", mvmsta->sta_id, tid); - queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid, - wdg_timeout); + queue = iwl_mvm_tvqm_enable_txq(mvm, mvmsta->sta_id, tid, wdg_timeout); if (queue < 0) return queue; + mvmtxq->txq_id = queue; + mvm->tvqm_info[queue].txq_tid = tid; + mvm->tvqm_info[queue].sta_id = mvmsta->sta_id; + IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue); spin_lock_bh(&mvmsta->lock); @@ -829,8 +805,9 @@ static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm, return 0; } -static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue, - int mac80211_queue, u8 sta_id, u8 tid) +static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + int queue, u8 sta_id, u8 tid) { bool enable_queue = true; @@ -845,14 +822,6 @@ static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue, if (mvm->queue_info[queue].tid_bitmap) enable_queue = false; - if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) { - WARN(mac80211_queue >= - BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]), - "cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n", - mac80211_queue, queue, sta_id, tid); - mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue); - } - mvm->queue_info[queue].tid_bitmap |= BIT(tid); mvm->queue_info[queue].ra_sta_id = sta_id; @@ -866,16 +835,22 @@ static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue, mvm->queue_info[queue].txq_tid = tid; } + if (sta) { + struct iwl_mvm_txq *mvmtxq = + iwl_mvm_txq_from_tid(sta, tid); + + mvmtxq->txq_id = queue; + } + IWL_DEBUG_TX_QUEUES(mvm, - "Enabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n", - queue, mvm->queue_info[queue].tid_bitmap, - mvm->hw_queue_to_mac80211[queue]); + "Enabling TXQ #%d tids=0x%x\n", + queue, mvm->queue_info[queue].tid_bitmap); return enable_queue; } -static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, - int mac80211_queue, u16 ssn, +static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + int queue, u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg, unsigned int wdg_timeout) { @@ -895,8 +870,7 @@ static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, return false; /* Send the enabling command if we need to */ - if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue, - cfg->sta_id, cfg->tid)) + if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid)) return false; inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, @@ -989,9 +963,10 @@ static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue) ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number); - ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, - tid_to_mac80211_ac[tid], ssn, - wdg_timeout, true); + ret = iwl_mvm_redirect_queue(mvm, queue, tid, + tid_to_mac80211_ac[tid], ssn, + wdg_timeout, true, + iwl_mvm_txq_from_tid(sta, tid)); if (ret) { IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue); return; @@ -1068,11 +1043,9 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, * Remove the ones that did. */ for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { - int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]; u16 tid_bitmap; mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; - mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue); mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); tid_bitmap = mvm->queue_info[queue].tid_bitmap; @@ -1105,10 +1078,6 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, * sure all TIDs have existing corresponding mac queues enabled */ tid_bitmap = mvm->queue_info[queue].tid_bitmap; - for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { - mvm->hw_queue_to_mac80211[queue] |= - BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]); - } /* If the queue is marked as shared - "unshare" it */ if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 && @@ -1136,6 +1105,7 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta) unsigned long unshare_queues = 0; unsigned long changetid_queues = 0; int i, ret, free_queue = -ENOSPC; + struct ieee80211_sta *queue_owner = NULL; lockdep_assert_held(&mvm->mutex); @@ -1201,13 +1171,14 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta) inactive_tid_bitmap, &unshare_queues, &changetid_queues); - if (ret >= 0 && free_queue < 0) + if (ret >= 0 && free_queue < 0) { + queue_owner = sta; free_queue = ret; + } /* only unlock sta lock - we still need the queue info lock */ spin_unlock_bh(&mvmsta->lock); } - rcu_read_unlock(); /* Reconfigure queues requiring reconfiguation */ for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES) @@ -1216,18 +1187,21 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta) iwl_mvm_change_queue_tid(mvm, i); if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) { - ret = iwl_mvm_free_inactive_queue(mvm, free_queue, + ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner, alloc_for_sta); - if (ret) + if (ret) { + rcu_read_unlock(); return ret; + } } + rcu_read_unlock(); + return free_queue; } static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, - struct ieee80211_sta *sta, u8 ac, int tid, - struct ieee80211_hdr *hdr) + struct ieee80211_sta *sta, u8 ac, int tid) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_trans_txq_scd_cfg cfg = { @@ -1238,7 +1212,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, }; unsigned int wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); - u8 mac_queue = mvmsta->vif->hw_queue[ac]; int queue = -1; unsigned long disable_agg_tids = 0; enum iwl_mvm_agg_state queue_state; @@ -1257,12 +1230,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number); spin_unlock_bh(&mvmsta->lock); - /* - * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one - * exists - */ - if (!ieee80211_is_data_qos(hdr->frame_control) || - ieee80211_is_qos_nullfunc(hdr->frame_control)) { + if (tid == IWL_MAX_TID_COUNT) { queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, IWL_MVM_DQA_MIN_MGMT_QUEUE, IWL_MVM_DQA_MAX_MGMT_QUEUE); @@ -1341,8 +1309,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, } } - inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue, - ssn, &cfg, wdg_timeout); + inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout); /* * Mark queue as shared in transport if shared @@ -1384,8 +1351,9 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, } } else { /* Redirect queue, if needed */ - ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn, - wdg_timeout, false); + ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn, + wdg_timeout, false, + iwl_mvm_txq_from_tid(sta, tid)); if (ret) goto out_err; } @@ -1393,7 +1361,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, return 0; out_err: - iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0); + iwl_mvm_disable_txq(mvm, sta, queue, tid, 0); return ret; } @@ -1406,87 +1374,32 @@ static inline u8 iwl_mvm_tid_to_ac_queue(int tid) return tid_to_mac80211_ac[tid]; } -static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm, - struct ieee80211_sta *sta, int tid) -{ - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; - struct sk_buff *skb; - struct ieee80211_hdr *hdr; - struct sk_buff_head deferred_tx; - u8 mac_queue; - bool no_queue = false; /* Marks if there is a problem with the queue */ - u8 ac; - - lockdep_assert_held(&mvm->mutex); - - skb = skb_peek(&tid_data->deferred_tx_frames); - if (!skb) - return; - hdr = (void *)skb->data; - - ac = iwl_mvm_tid_to_ac_queue(tid); - mac_queue = IEEE80211_SKB_CB(skb)->hw_queue; - - if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE && - iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) { - IWL_ERR(mvm, - "Can't alloc TXQ for sta %d tid %d - dropping frame\n", - mvmsta->sta_id, tid); - - /* - * Mark queue as problematic so later the deferred traffic is - * freed, as we can do nothing with it - */ - no_queue = true; - } - - __skb_queue_head_init(&deferred_tx); - - /* Disable bottom-halves when entering TX path */ - local_bh_disable(); - spin_lock(&mvmsta->lock); - skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx); - mvmsta->deferred_traffic_tid_map &= ~BIT(tid); - spin_unlock(&mvmsta->lock); - - while ((skb = __skb_dequeue(&deferred_tx))) - if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta)) - ieee80211_free_txskb(mvm->hw, skb); - local_bh_enable(); - - /* Wake queue */ - iwl_mvm_start_mac_queues(mvm, BIT(mac_queue)); -} - void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) { struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, add_stream_wk); - struct ieee80211_sta *sta; - struct iwl_mvm_sta *mvmsta; - unsigned long deferred_tid_traffic; - int sta_id, tid; mutex_lock(&mvm->mutex); iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA); - /* Go over all stations with deferred traffic */ - for_each_set_bit(sta_id, mvm->sta_deferred_frames, - IWL_MVM_STATION_COUNT) { - clear_bit(sta_id, mvm->sta_deferred_frames); - sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], - lockdep_is_held(&mvm->mutex)); - if (IS_ERR_OR_NULL(sta)) - continue; + while (!list_empty(&mvm->add_stream_txqs)) { + struct iwl_mvm_txq *mvmtxq; + struct ieee80211_txq *txq; + u8 tid; - mvmsta = iwl_mvm_sta_from_mac80211(sta); - deferred_tid_traffic = mvmsta->deferred_traffic_tid_map; + mvmtxq = list_first_entry(&mvm->add_stream_txqs, + struct iwl_mvm_txq, list); + + txq = container_of((void *)mvmtxq, struct ieee80211_txq, + drv_priv); + tid = txq->tid; + if (tid == IEEE80211_NUM_TIDS) + tid = IWL_MAX_TID_COUNT; - for_each_set_bit(tid, &deferred_tid_traffic, - IWL_MAX_TID_COUNT + 1) - iwl_mvm_tx_deferred_stream(mvm, sta, tid); + iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid); + list_del_init(&mvmtxq->list); + iwl_mvm_mac_itxq_xmit(mvm->hw, txq); } mutex_unlock(&mvm->mutex); @@ -1542,10 +1455,11 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, * Note that re-enabling aggregations isn't done in this function. */ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, - struct iwl_mvm_sta *mvm_sta) + struct ieee80211_sta *sta) { - unsigned int wdg_timeout = - iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false); + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + unsigned int wdg = + iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false); int i; struct iwl_trans_txq_scd_cfg cfg = { .sta_id = mvm_sta->sta_id, @@ -1561,23 +1475,18 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i]; int txq_id = tid_data->txq_id; int ac; - u8 mac_queue; if (txq_id == IWL_MVM_INVALID_QUEUE) continue; - skb_queue_head_init(&tid_data->deferred_tx_frames); - ac = tid_to_mac80211_ac[i]; - mac_queue = mvm_sta->vif->hw_queue[ac]; if (iwl_mvm_has_new_tx_api(mvm)) { IWL_DEBUG_TX_QUEUES(mvm, "Re-mapping sta %d tid %d\n", mvm_sta->sta_id, i); - txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, - mvm_sta->sta_id, - i, wdg_timeout); + txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id, + i, wdg); tid_data->txq_id = txq_id; /* @@ -1600,8 +1509,7 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, "Re-mapping sta %d tid %d to queue %d\n", mvm_sta->sta_id, i, txq_id); - iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg, - wdg_timeout); + iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg); mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY; } } @@ -1691,7 +1599,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, if (ret) goto err; - iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta); + iwl_mvm_realloc_queues_after_restart(mvm, sta); sta_update = true; sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES; goto update_fw; @@ -1724,9 +1632,17 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, * frames until the queue is allocated */ mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE; - skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames); } - mvm_sta->deferred_traffic_tid_map = 0; + + for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { + struct iwl_mvm_txq *mvmtxq = + iwl_mvm_txq_from_mac80211(sta->txq[i]); + + mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; + INIT_LIST_HEAD(&mvmtxq->list); + atomic_set(&mvmtxq->tx_request, 0); + } + mvm_sta->agg_tids = 0; if (iwl_mvm_has_new_rx_api(mvm) && @@ -1861,9 +1777,9 @@ static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id) static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct iwl_mvm_sta *mvm_sta) + struct ieee80211_sta *sta) { - int ac; + struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); int i; lockdep_assert_held(&mvm->mutex); @@ -1872,11 +1788,17 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE) continue; - ac = iwl_mvm_tid_to_ac_queue(i); - iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id, - vif->hw_queue[ac], i, 0); + iwl_mvm_disable_txq(mvm, sta, mvm_sta->tid_data[i].txq_id, i, + 0); mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE; } + + for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { + struct iwl_mvm_txq *mvmtxq = + iwl_mvm_txq_from_mac80211(sta->txq[i]); + + mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; + } } int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm, @@ -1938,7 +1860,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); - iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta); + iwl_mvm_disable_sta_queues(mvm, vif, sta); /* If there is a TXQ still marked as reserved - free it */ if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) { @@ -2044,7 +1966,7 @@ static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue, if (iwl_mvm_has_new_tx_api(mvm)) { int tvqm_queue = - iwl_mvm_tvqm_enable_txq(mvm, *queue, sta_id, + iwl_mvm_tvqm_enable_txq(mvm, sta_id, IWL_MAX_TID_COUNT, wdg_timeout); *queue = tvqm_queue; @@ -2057,7 +1979,7 @@ static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue, .frame_limit = IWL_FRAME_LIMIT, }; - iwl_mvm_enable_txq(mvm, *queue, *queue, 0, &cfg, wdg_timeout); + iwl_mvm_enable_txq(mvm, NULL, *queue, 0, &cfg, wdg_timeout); } } @@ -2135,8 +2057,7 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) lockdep_assert_held(&mvm->mutex); - iwl_mvm_disable_txq(mvm, mvm->snif_queue, mvm->snif_queue, - IWL_MAX_TID_COUNT, 0); + iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0); ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id); if (ret) IWL_WARN(mvm, "Failed sending remove station\n"); @@ -2195,8 +2116,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) bsta->tfd_queue_msk |= BIT(queue); - iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, - &cfg, wdg_timeout); + iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout); } if (vif->type == NL80211_IFTYPE_ADHOC) @@ -2215,8 +2135,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) * to firmware so enable queue here - after the station was added */ if (iwl_mvm_has_new_tx_api(mvm)) { - queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0], - bsta->sta_id, + queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id, IWL_MAX_TID_COUNT, wdg_timeout); @@ -2254,7 +2173,7 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm, return; } - iwl_mvm_disable_txq(mvm, queue, vif->hw_queue[0], IWL_MAX_TID_COUNT, 0); + iwl_mvm_disable_txq(mvm, NULL, queue, IWL_MAX_TID_COUNT, 0); if (iwl_mvm_has_new_tx_api(mvm)) return; @@ -2377,10 +2296,8 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) * Note that this is done here as we want to avoid making DQA * changes in mac80211 layer. */ - if (vif->type == NL80211_IFTYPE_ADHOC) { - vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; - mvmvif->cab_queue = vif->cab_queue; - } + if (vif->type == NL80211_IFTYPE_ADHOC) + mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; /* * While in previous FWs we had to exclude cab queue from TFD queue @@ -2388,9 +2305,9 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) */ if (!iwl_mvm_has_new_tx_api(mvm) && fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) { - iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0, - &cfg, timeout); - msta->tfd_queue_msk |= BIT(vif->cab_queue); + iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg, + timeout); + msta->tfd_queue_msk |= BIT(mvmvif->cab_queue); } ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr, mvmvif->id, mvmvif->color); @@ -2407,24 +2324,25 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) * tfd_queue_mask. */ if (iwl_mvm_has_new_tx_api(mvm)) { - int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue, - msta->sta_id, + int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id, 0, timeout); mvmvif->cab_queue = queue; } else if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) - iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0, - &cfg, timeout); + iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg, + timeout); if (mvmvif->ap_wep_key) { u8 key_offset = iwl_mvm_set_fw_key_idx(mvm); + __set_bit(key_offset, mvm->fw_key_table); + if (key_offset == STA_KEY_IDX_INVALID) return -ENOSPC; ret = iwl_mvm_send_sta_key(mvm, mvmvif->mcast_sta.sta_id, - mvmvif->ap_wep_key, 1, 0, NULL, 0, + mvmvif->ap_wep_key, true, 0, NULL, 0, key_offset, 0); if (ret) return ret; @@ -2433,6 +2351,59 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) return 0; } +static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id, + struct ieee80211_key_conf *keyconf, + bool mcast) +{ + union { + struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1; + struct iwl_mvm_add_sta_key_cmd cmd; + } u = {}; + bool new_api = fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_TKIP_MIC_KEYS); + __le16 key_flags; + int ret, size; + u32 status; + + /* This is a valid situation for GTK removal */ + if (sta_id == IWL_MVM_INVALID_STA) + return 0; + + key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & + STA_KEY_FLG_KEYID_MSK); + key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP); + key_flags |= cpu_to_le16(STA_KEY_NOT_VALID); + + if (mcast) + key_flags |= cpu_to_le16(STA_KEY_MULTICAST); + + /* + * The fields assigned here are in the same location at the start + * of the command, so we can do this union trick. + */ + u.cmd.common.key_flags = key_flags; + u.cmd.common.key_offset = keyconf->hw_key_idx; + u.cmd.common.sta_id = sta_id; + + size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1); + + status = ADD_STA_SUCCESS; + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd, + &status); + + switch (status) { + case ADD_STA_SUCCESS: + IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n"); + break; + default: + ret = -EIO; + IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n"); + break; + } + + return ret; +} + /* * Send the FW a request to remove the station from it's internal data * structures, and in addition remove it from the local data structure. @@ -2446,8 +2417,29 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0); - iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue, - 0, 0); + iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0); + + if (mvmvif->ap_wep_key) { + int i; + + if (!__test_and_clear_bit(mvmvif->ap_wep_key->hw_key_idx, + mvm->fw_key_table)) { + IWL_ERR(mvm, "offset %d not used in fw key table.\n", + mvmvif->ap_wep_key->hw_key_idx); + return -ENOENT; + } + + /* track which key was deleted last */ + for (i = 0; i < STA_KEY_MAX_NUM; i++) { + if (mvm->fw_key_deleted[i] < U8_MAX) + mvm->fw_key_deleted[i]++; + } + mvm->fw_key_deleted[mvmvif->ap_wep_key->hw_key_idx] = 0; + ret = __iwl_mvm_remove_sta_key(mvm, mvmvif->mcast_sta.sta_id, + mvmvif->ap_wep_key, true); + if (ret) + return ret; + } ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id); if (ret) @@ -2781,7 +2773,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_tid_data *tid_data; u16 normalized_ssn; - int txq_id; + u16 txq_id; int ret; if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) @@ -2823,17 +2815,24 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, */ txq_id = mvmsta->tid_data[tid].txq_id; if (txq_id == IWL_MVM_INVALID_QUEUE) { - txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, - IWL_MVM_DQA_MIN_DATA_QUEUE, - IWL_MVM_DQA_MAX_DATA_QUEUE); - if (txq_id < 0) { - ret = txq_id; + ret = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, + IWL_MVM_DQA_MIN_DATA_QUEUE, + IWL_MVM_DQA_MAX_DATA_QUEUE); + if (ret < 0) { IWL_ERR(mvm, "Failed to allocate agg queue\n"); goto out; } + txq_id = ret; + /* TXQ hasn't yet been enabled, so mark it only as reserved */ mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED; + } else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) { + ret = -ENXIO; + IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n", + tid, IWL_MAX_HW_QUEUES - 1); + goto out; + } else if (unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED)) { ret = -ENXIO; @@ -2976,8 +2975,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, } if (alloc_queue) - iwl_mvm_enable_txq(mvm, queue, - vif->hw_queue[tid_to_mac80211_ac[tid]], ssn, + iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout); /* Send ADD_STA command to enable aggs only if the queue isn't shared */ @@ -3474,59 +3472,6 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, return ret; } -static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id, - struct ieee80211_key_conf *keyconf, - bool mcast) -{ - union { - struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1; - struct iwl_mvm_add_sta_key_cmd cmd; - } u = {}; - bool new_api = fw_has_api(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_API_TKIP_MIC_KEYS); - __le16 key_flags; - int ret, size; - u32 status; - - /* This is a valid situation for GTK removal */ - if (sta_id == IWL_MVM_INVALID_STA) - return 0; - - key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & - STA_KEY_FLG_KEYID_MSK); - key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP); - key_flags |= cpu_to_le16(STA_KEY_NOT_VALID); - - if (mcast) - key_flags |= cpu_to_le16(STA_KEY_MULTICAST); - - /* - * The fields assigned here are in the same location at the start - * of the command, so we can do this union trick. - */ - u.cmd.common.key_flags = key_flags; - u.cmd.common.key_offset = keyconf->hw_key_idx; - u.cmd.common.sta_id = sta_id; - - size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1); - - status = ADD_STA_SUCCESS; - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd, - &status); - - switch (status) { - case ADD_STA_SUCCESS: - IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n"); - break; - default: - ret = -EIO; - IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n"); - break; - } - - return ret; -} - int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index d52cd888f77d..79700c7310a1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -297,7 +297,6 @@ enum iwl_mvm_agg_state { /** * struct iwl_mvm_tid_data - holds the states for each RA / TID - * @deferred_tx_frames: deferred TX frames for this RA/TID * @seq_number: the next WiFi sequence number to use * @next_reclaimed: the WiFi sequence number of the next packet to be acked. * This is basically (last acked packet++). @@ -318,7 +317,6 @@ enum iwl_mvm_agg_state { * tpt_meas_start */ struct iwl_mvm_tid_data { - struct sk_buff_head deferred_tx_frames; u16 seq_number; u16 next_reclaimed; /* The rest is Tx AGG related */ @@ -396,6 +394,7 @@ struct iwl_mvm_rxq_dup_data { * the BA window. To be used for UAPSD only. * @ptk_pn: per-queue PTK PN data structures * @dup_data: per queue duplicate packet detection data + * @wep_key: used in AP mode. Is a duplicate of the WEP key. * @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID * @tx_ant: the index of the antenna to use for data tx to this station. Only * used during connection establishment (e.g. for the 4 way handshake @@ -427,7 +426,7 @@ struct iwl_mvm_sta { struct iwl_mvm_key_pn __rcu *ptk_pn[4]; struct iwl_mvm_rxq_dup_data *dup_data; - u16 deferred_traffic_tid_map; + struct ieee80211_key_conf *wep_key; u8 reserved_queue; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c index e02f4eb20359..859aa5a4e6b5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c @@ -399,6 +399,9 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm, struct ieee80211_tx_info *info; struct ieee80211_hdr *hdr; struct iwl_tdls_channel_switch_cmd cmd = {0}; + struct iwl_tdls_channel_switch_cmd_tail *tail = + iwl_mvm_chan_info_cmd_tail(mvm, &cmd.ci); + u16 len = sizeof(cmd) - iwl_mvm_chan_info_padding(mvm); int ret; lockdep_assert_held(&mvm->mutex); @@ -414,9 +417,9 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm, } cmd.switch_type = type; - cmd.timing.frame_timestamp = cpu_to_le32(timestamp); - cmd.timing.switch_time = cpu_to_le32(switch_time); - cmd.timing.switch_timeout = cpu_to_le32(switch_timeout); + tail->timing.frame_timestamp = cpu_to_le32(timestamp); + tail->timing.switch_time = cpu_to_le32(switch_time); + tail->timing.switch_timeout = cpu_to_le32(switch_timeout); rcu_read_lock(); sta = ieee80211_find_sta(vif, peer); @@ -448,21 +451,16 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm, } } - if (chandef) { - cmd.ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ? - PHY_BAND_24 : PHY_BAND_5); - cmd.ci.channel = chandef->chan->hw_value; - cmd.ci.width = iwl_mvm_get_channel_width(chandef); - cmd.ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef); - } + if (chandef) + iwl_mvm_set_chan_info_chandef(mvm, &cmd.ci, chandef); /* keep quota calculation simple for now - 50% of DTIM for TDLS */ - cmd.timing.max_offchan_duration = + tail->timing.max_offchan_duration = cpu_to_le32(TU_TO_US(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int) / 2); /* Switch time is the first element in the switch-timing IE. */ - cmd.frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2); + tail->frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2); info = IEEE80211_SKB_CB(skb); hdr = (void *)skb->data; @@ -472,20 +470,19 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm, ret = -EINVAL; goto out; } - iwl_mvm_set_tx_cmd_ccmp(info, &cmd.frame.tx_cmd); + iwl_mvm_set_tx_cmd_ccmp(info, &tail->frame.tx_cmd); } - iwl_mvm_set_tx_cmd(mvm, skb, &cmd.frame.tx_cmd, info, + iwl_mvm_set_tx_cmd(mvm, skb, &tail->frame.tx_cmd, info, mvmsta->sta_id); - iwl_mvm_set_tx_cmd_rate(mvm, &cmd.frame.tx_cmd, info, sta, + iwl_mvm_set_tx_cmd_rate(mvm, &tail->frame.tx_cmd, info, sta, hdr->frame_control); rcu_read_unlock(); - memcpy(cmd.frame.data, skb->data, skb->len); + memcpy(tail->frame.data, skb->data, skb->len); - ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0, - sizeof(cmd), &cmd); + ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0, len, &cmd); if (ret) { IWL_ERR(mvm, "Failed to send TDLS_CHANNEL_SWITCH cmd: %d\n", ret); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index e1a6f4e22253..9693fa4cdc39 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -85,7 +85,7 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm, { lockdep_assert_held(&mvm->time_event_lock); - if (!te_data->vif) + if (!te_data || !te_data->vif) return; list_del(&te_data->list); @@ -334,6 +334,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, switch (te_data->vif->type) { case NL80211_IFTYPE_P2P_DEVICE: ieee80211_remain_on_channel_expired(mvm->hw); + set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status); iwl_mvm_roc_finished(mvm); break; case NL80211_IFTYPE_STATION: @@ -686,6 +687,8 @@ static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm *mvm, struct iwl_mvm_time_event_data *te_data) { struct iwl_hs20_roc_req aux_cmd = {}; + u16 len = sizeof(aux_cmd) - iwl_mvm_chan_info_padding(mvm); + u32 uid; int ret; @@ -699,7 +702,7 @@ static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm *mvm, IWL_DEBUG_TE(mvm, "Removing BSS AUX ROC TE 0x%x\n", le32_to_cpu(aux_cmd.event_unique_id)); ret = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, - sizeof(aux_cmd), &aux_cmd); + len, &aux_cmd); if (WARN_ON(ret)) return; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tof.c b/drivers/net/wireless/intel/iwlwifi/mvm/tof.c deleted file mode 100644 index 01e0a999063b..000000000000 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tof.c +++ /dev/null @@ -1,305 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2015 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2015 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#include "mvm.h" -#include "fw/api/tof.h" - -#define IWL_MVM_TOF_RANGE_REQ_MAX_ID 256 - -void iwl_mvm_tof_init(struct iwl_mvm *mvm) -{ - struct iwl_mvm_tof_data *tof_data = &mvm->tof_data; - - if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) - return; - - memset(tof_data, 0, sizeof(*tof_data)); - - tof_data->tof_cfg.sub_grp_cmd_id = cpu_to_le32(TOF_CONFIG_CMD); - -#ifdef CONFIG_IWLWIFI_DEBUGFS - if (IWL_MVM_TOF_IS_RESPONDER) { - tof_data->responder_cfg.sub_grp_cmd_id = - cpu_to_le32(TOF_RESPONDER_CONFIG_CMD); - tof_data->responder_cfg.sta_id = IWL_MVM_INVALID_STA; - } -#endif - - tof_data->range_req.sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_REQ_CMD); - tof_data->range_req.req_timeout = 1; - tof_data->range_req.initiator = 1; - tof_data->range_req.report_policy = 3; - - tof_data->range_req_ext.sub_grp_cmd_id = - cpu_to_le32(TOF_RANGE_REQ_EXT_CMD); - - mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID; - mvm->init_status |= IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE; -} - -void iwl_mvm_tof_clean(struct iwl_mvm *mvm) -{ - struct iwl_mvm_tof_data *tof_data = &mvm->tof_data; - - if (!fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_TOF_SUPPORT) || - !(mvm->init_status & IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE)) - return; - - memset(tof_data, 0, sizeof(*tof_data)); - mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID; - mvm->init_status &= ~IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE; -} - -static void iwl_tof_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - bool *enabled = _data; - - /* non bss vif exists */ - if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) - *enabled = false; -} - -int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm) -{ - struct iwl_tof_config_cmd *cmd = &mvm->tof_data.tof_cfg; - bool enabled; - - lockdep_assert_held(&mvm->mutex); - - if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) - return -EINVAL; - - ieee80211_iterate_active_interfaces_atomic(mvm->hw, - IEEE80211_IFACE_ITER_NORMAL, - iwl_tof_iterator, &enabled); - if (!enabled) { - IWL_DEBUG_INFO(mvm, "ToF is not supported (non bss vif)\n"); - return -EINVAL; - } - - mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID; - return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD, - IWL_ALWAYS_LONG_GROUP, 0), - 0, sizeof(*cmd), cmd); -} - -int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id) -{ - struct iwl_tof_range_abort_cmd cmd = { - .sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_ABORT_CMD), - .request_id = id, - }; - - lockdep_assert_held(&mvm->mutex); - - if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) - return -EINVAL; - - if (id != mvm->tof_data.active_range_request) { - IWL_ERR(mvm, "Invalid range request id %d (active %d)\n", - id, mvm->tof_data.active_range_request); - return -EINVAL; - } - - /* after abort is sent there's no active request anymore */ - mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID; - - return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD, - IWL_ALWAYS_LONG_GROUP, 0), - 0, sizeof(cmd), &cmd); -} - -#ifdef CONFIG_IWLWIFI_DEBUGFS -int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) -{ - struct iwl_tof_responder_config_cmd *cmd = &mvm->tof_data.responder_cfg; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - lockdep_assert_held(&mvm->mutex); - - if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) - return -EINVAL; - - if (vif->p2p || vif->type != NL80211_IFTYPE_AP || - !mvmvif->ap_ibss_active) { - IWL_ERR(mvm, "Cannot start responder, not in AP mode\n"); - return -EIO; - } - - cmd->sta_id = mvmvif->bcast_sta.sta_id; - memcpy(cmd->bssid, vif->addr, ETH_ALEN); - return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD, - IWL_ALWAYS_LONG_GROUP, 0), - 0, sizeof(*cmd), cmd); -} -#endif - -int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) -{ - struct iwl_host_cmd cmd = { - .id = iwl_cmd_id(TOF_CMD, IWL_ALWAYS_LONG_GROUP, 0), - .len = { sizeof(mvm->tof_data.range_req), }, - /* no copy because of the command size */ - .dataflags = { IWL_HCMD_DFL_NOCOPY, }, - }; - - lockdep_assert_held(&mvm->mutex); - - if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) - return -EINVAL; - - if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) { - IWL_ERR(mvm, "Cannot send range request, not STA mode\n"); - return -EIO; - } - - /* nesting of range requests is not supported in FW */ - if (mvm->tof_data.active_range_request != - IWL_MVM_TOF_RANGE_REQ_MAX_ID) { - IWL_ERR(mvm, "Cannot send range req, already active req %d\n", - mvm->tof_data.active_range_request); - return -EIO; - } - - mvm->tof_data.active_range_request = mvm->tof_data.range_req.request_id; - - cmd.data[0] = &mvm->tof_data.range_req; - return iwl_mvm_send_cmd(mvm, &cmd); -} - -int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) -{ - lockdep_assert_held(&mvm->mutex); - - if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT)) - return -EINVAL; - - if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) { - IWL_ERR(mvm, "Cannot send ext range req, not in STA mode\n"); - return -EIO; - } - - return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD, - IWL_ALWAYS_LONG_GROUP, 0), - 0, sizeof(mvm->tof_data.range_req_ext), - &mvm->tof_data.range_req_ext); -} - -static int iwl_mvm_tof_range_resp(struct iwl_mvm *mvm, void *data) -{ - struct iwl_tof_range_rsp_ntfy *resp = (void *)data; - - if (resp->request_id != mvm->tof_data.active_range_request) { - IWL_ERR(mvm, "Request id mismatch, got %d, active %d\n", - resp->request_id, mvm->tof_data.active_range_request); - return -EIO; - } - - memcpy(&mvm->tof_data.range_resp, resp, - sizeof(struct iwl_tof_range_rsp_ntfy)); - mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID; - - return 0; -} - -static int iwl_mvm_tof_mcsi_notif(struct iwl_mvm *mvm, void *data) -{ - struct iwl_tof_mcsi_notif *resp = (struct iwl_tof_mcsi_notif *)data; - - IWL_DEBUG_INFO(mvm, "MCSI notification, token %d\n", resp->token); - return 0; -} - -static int iwl_mvm_tof_nb_report_notif(struct iwl_mvm *mvm, void *data) -{ - struct iwl_tof_neighbor_report *report = - (struct iwl_tof_neighbor_report *)data; - - IWL_DEBUG_INFO(mvm, "NB report, bssid %pM, token %d, status 0x%x\n", - report->bssid, report->request_token, report->status); - return 0; -} - -void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_tof_gen_resp_cmd *resp = (void *)pkt->data; - - lockdep_assert_held(&mvm->mutex); - - switch (le32_to_cpu(resp->sub_grp_cmd_id)) { - case TOF_RANGE_RESPONSE_NOTIF: - iwl_mvm_tof_range_resp(mvm, resp->data); - break; - case TOF_MCSI_DEBUG_NOTIF: - iwl_mvm_tof_mcsi_notif(mvm, resp->data); - break; - case TOF_NEIGHBOR_REPORT_RSP_NOTIF: - iwl_mvm_tof_nb_report_notif(mvm, resp->data); - break; - default: - IWL_ERR(mvm, "Unknown sub-group command 0x%x\n", - resp->sub_grp_cmd_id); - break; - } -} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tof.h b/drivers/net/wireless/intel/iwlwifi/mvm/tof.h deleted file mode 100644 index 8138d0606c52..000000000000 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tof.h +++ /dev/null @@ -1,89 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2015 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2015 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#ifndef __tof_h__ -#define __tof_h__ - -#include "fw/api/tof.h" - -struct iwl_mvm_tof_data { - struct iwl_tof_config_cmd tof_cfg; - struct iwl_tof_range_req_cmd range_req; - struct iwl_tof_range_req_ext_cmd range_req_ext; -#ifdef CONFIG_IWLWIFI_DEBUGFS - struct iwl_tof_responder_config_cmd responder_cfg; -#endif - struct iwl_tof_range_rsp_ntfy range_resp; - u8 last_abort_id; - u16 active_range_request; -}; - -void iwl_mvm_tof_init(struct iwl_mvm *mvm); -void iwl_mvm_tof_clean(struct iwl_mvm *mvm); -int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm); -int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id); -int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm, - struct ieee80211_vif *vif); -void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb); -int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm, - struct ieee80211_vif *vif); -#ifdef CONFIG_IWLWIFI_DEBUGFS -int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm, - struct ieee80211_vif *vif); -#endif -#endif /* __tof_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 995fe2a6abbb..0c2aabc842f9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -8,7 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -209,7 +209,9 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, u16 offload_assist = 0; u8 ac; - if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) || + (ieee80211_is_probe_resp(fc) && + !is_multicast_ether_addr(hdr->addr1))) tx_flags |= TX_CMD_FLG_ACK; else tx_flags &= ~TX_CMD_FLG_ACK; @@ -278,7 +280,7 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, } if (ieee80211_is_data(fc) && len > mvm->rts_threshold && - !is_multicast_ether_addr(ieee80211_get_DA(hdr))) + !is_multicast_ether_addr(hdr->addr1)) tx_flags |= TX_CMD_FLG_PROT_REQUIRE; if (fw_has_capa(&mvm->fw->ucode_capa, @@ -533,10 +535,11 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, /* * For data packets rate info comes from the fw. Only - * set rate/antenna during connection establishment. + * set rate/antenna during connection establishment or in case + * no station is given. */ - if (sta && (!ieee80211_is_data(hdr->frame_control) || - mvmsta->sta_state < IEEE80211_STA_AUTHORIZED)) { + if (!sta || !ieee80211_is_data(hdr->frame_control) || + mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) { flags |= IWL_TX_FLAGS_CMD_RATE; rate_n_flags = iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, @@ -602,11 +605,12 @@ static void iwl_mvm_skb_prepare_status(struct sk_buff *skb, } static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, - struct ieee80211_tx_info *info, __le16 fc) + struct ieee80211_tx_info *info, + struct ieee80211_hdr *hdr) { - struct iwl_mvm_vif *mvmvif; - - mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif); + struct iwl_mvm_vif *mvmvif = + iwl_mvm_vif_from_mac80211(info->control.vif); + __le16 fc = hdr->frame_control; switch (info->control.vif->type) { case NL80211_IFTYPE_AP: @@ -625,7 +629,9 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, (!ieee80211_is_bufferable_mmpdu(fc) || ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc))) return mvm->probe_queue; - if (info->hw_queue == info->control.vif->cab_queue) + + if (!ieee80211_has_order(fc) && !ieee80211_is_probe_req(fc) && + is_multicast_ether_addr(hdr->addr1)) return mvmvif->cab_queue; WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC, @@ -634,8 +640,6 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, case NL80211_IFTYPE_P2P_DEVICE: if (ieee80211_is_mgmt(fc)) return mvm->p2p_dev_queue; - if (info->hw_queue == info->control.vif->cab_queue) - return mvmvif->cab_queue; WARN_ON_ONCE(1); return mvm->p2p_dev_queue; @@ -713,18 +717,18 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) u8 sta_id; int hdrlen = ieee80211_hdrlen(hdr->frame_control); __le16 fc = hdr->frame_control; + bool offchannel = IEEE80211_SKB_CB(skb)->flags & + IEEE80211_TX_CTL_TX_OFFCHAN; int queue = -1; + if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc)) + return -1; + memcpy(&info, skb->cb, sizeof(info)); if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU)) return -1; - if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM && - (!info.control.vif || - info.hw_queue != info.control.vif->cab_queue))) - return -1; - if (info.control.vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(info.control.vif); @@ -737,14 +741,12 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) else sta_id = mvmvif->mcast_sta.sta_id; - queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, - hdr->frame_control); - + queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, hdr); } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) { queue = mvm->snif_queue; sta_id = mvm->snif_sta.sta_id; } else if (info.control.vif->type == NL80211_IFTYPE_STATION && - info.hw_queue == IWL_MVM_OFFCHANNEL_QUEUE) { + offchannel) { /* * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets * that can be used in 2 different types of vifs, P2P & @@ -758,8 +760,10 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) } } - if (queue < 0) + if (queue < 0) { + IWL_ERR(mvm, "No queue was found. Dropping TX\n"); return -1; + } if (unlikely(ieee80211_is_probe_resp(fc))) iwl_mvm_probe_resp_set_noa(mvm, skb); @@ -781,6 +785,35 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) return 0; } +unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, unsigned int tid) +{ + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + enum nl80211_band band = mvmsta->vif->bss_conf.chandef.chan->band; + u8 ac = tid_to_mac80211_ac[tid]; + unsigned int txf; + int lmac = IWL_LMAC_24G_INDEX; + + if (iwl_mvm_is_cdb_supported(mvm) && + band == NL80211_BAND_5GHZ) + lmac = IWL_LMAC_5G_INDEX; + + /* For HE redirect to trigger based fifos */ + if (sta->he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm))) + ac += 4; + + txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac); + + /* + * Don't send an AMSDU that will be longer than the TXF. + * Add a security margin of 256 for the TX command + headers. + * We also want to have the start of the next packet inside the + * fifo to be able to send bursts. + */ + return min_t(unsigned int, mvmsta->max_amsdu_len, + mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256); +} + #ifdef CONFIG_INET static int @@ -850,36 +883,6 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes, return 0; } -static unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm, - struct ieee80211_sta *sta, - unsigned int tid) -{ - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - enum nl80211_band band = mvmsta->vif->bss_conf.chandef.chan->band; - u8 ac = tid_to_mac80211_ac[tid]; - unsigned int txf; - int lmac = IWL_LMAC_24G_INDEX; - - if (iwl_mvm_is_cdb_supported(mvm) && - band == NL80211_BAND_5GHZ) - lmac = IWL_LMAC_5G_INDEX; - - /* For HE redirect to trigger based fifos */ - if (sta->he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm))) - ac += 4; - - txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac); - - /* - * Don't send an AMSDU that will be longer than the TXF. - * Add a security margin of 256 for the TX command + headers. - * We also want to have the start of the next packet inside the - * fifo to be able to send bursts. - */ - return min_t(unsigned int, mvmsta->max_amsdu_len, - mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256); -} - static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, @@ -1002,34 +1005,6 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, } #endif -static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm, - struct iwl_mvm_sta *mvm_sta, u8 tid, - struct sk_buff *skb) -{ - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - u8 mac_queue = info->hw_queue; - struct sk_buff_head *deferred_tx_frames; - - lockdep_assert_held(&mvm_sta->lock); - - mvm_sta->deferred_traffic_tid_map |= BIT(tid); - set_bit(mvm_sta->sta_id, mvm->sta_deferred_frames); - - deferred_tx_frames = &mvm_sta->tid_data[tid].deferred_tx_frames; - - skb_queue_tail(deferred_tx_frames, skb); - - /* - * The first deferred frame should've stopped the MAC queues, so we - * should never get a second deferred frame for the RA/TID. - * In case of GSO the first packet may have been split, so don't warn. - */ - if (skb_queue_len(deferred_tx_frames) == 1) { - iwl_mvm_stop_mac_queues(mvm, BIT(mac_queue)); - schedule_work(&mvm->add_stream_wk); - } -} - /* Check if there are any timed-out TIDs on a given shared TXQ */ static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id) { @@ -1054,7 +1029,12 @@ static void iwl_mvm_tx_airtime(struct iwl_mvm *mvm, int airtime) { int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK; - struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac]; + struct iwl_mvm_tcm_mac *mdata; + + if (mac >= NUM_MAC_INDEX_DRIVER) + return; + + mdata = &mvm->tcm.data[mac]; if (mvm->tcm.paused) return; @@ -1065,14 +1045,21 @@ static void iwl_mvm_tx_airtime(struct iwl_mvm *mvm, mdata->tx.airtime += airtime; } -static void iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm, - struct iwl_mvm_sta *mvmsta, int tid) +static int iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvmsta, int tid) { u32 ac = tid_to_mac80211_ac[tid]; int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK; - struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac]; + struct iwl_mvm_tcm_mac *mdata; + + if (mac >= NUM_MAC_INDEX_DRIVER) + return -EINVAL; + + mdata = &mvm->tcm.data[mac]; mdata->tx.pkts[ac]++; + + return 0; } /* @@ -1088,7 +1075,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, __le16 fc; u16 seq_number = 0; u8 tid = IWL_MAX_TID_COUNT; - u16 txq_id = info->hw_queue; + u16 txq_id; bool is_ampdu = false; int hdrlen; @@ -1096,6 +1083,9 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, fc = hdr->frame_control; hdrlen = ieee80211_hdrlen(fc); + if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc)) + return -1; + if (WARN_ON_ONCE(!mvmsta)) return -1; @@ -1125,12 +1115,14 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, */ if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) { tid = ieee80211_get_tid(hdr); - if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) + if (WARN_ONCE(tid >= IWL_MAX_TID_COUNT, "Invalid TID %d", tid)) goto drop_unlock_sta; is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU; - if (WARN_ON_ONCE(is_ampdu && - mvmsta->tid_data[tid].state != IWL_AGG_ON)) + if (WARN_ONCE(is_ampdu && + mvmsta->tid_data[tid].state != IWL_AGG_ON, + "Invalid internal agg state %d for TID %d", + mvmsta->tid_data[tid].state, tid)) goto drop_unlock_sta; seq_number = mvmsta->tid_data[tid].seq_number; @@ -1152,14 +1144,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); - /* Check if TXQ needs to be allocated or re-activated */ - if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE)) { - iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb); - - /* - * The frame is now deferred, and the worker scheduled - * will re-allocate it, so we can free it for now. - */ + if (WARN_ONCE(txq_id == IWL_MVM_INVALID_QUEUE, "Invalid TXQ id")) { iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); spin_unlock(&mvmsta->lock); return 0; @@ -1199,7 +1184,9 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, spin_unlock(&mvmsta->lock); - iwl_mvm_tx_pkt_queued(mvm, mvmsta, tid == IWL_MAX_TID_COUNT ? 0 : tid); + if (iwl_mvm_tx_pkt_queued(mvm, mvmsta, + tid == IWL_MAX_TID_COUNT ? 0 : tid)) + goto drop; return 0; @@ -1207,6 +1194,7 @@ drop_unlock_sta: iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); spin_unlock(&mvmsta->lock); drop: + IWL_DEBUG_TX(mvm, "TX to [%d|%d] dropped\n", mvmsta->sta_id, tid); return -1; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index d116c6ae18ff..4649327abb45 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -248,7 +248,7 @@ void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n", le16_to_cpu(err_resp->bad_cmd_seq_num), le32_to_cpu(err_resp->error_service)); - IWL_ERR(mvm, "FW Error notification: timestamp 0x%16llX\n", + IWL_ERR(mvm, "FW Error notification: timestamp 0x%016llX\n", le64_to_cpu(err_resp->timestamp)); } @@ -294,6 +294,7 @@ static const struct { { "SYSASSERT", 0x35 }, { "UCODE_VERSION_MISMATCH", 0x37 }, { "BAD_COMMAND", 0x38 }, + { "BAD_COMMAND", 0x39 }, { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C }, { "FATAL_ERROR", 0x3D }, { "NMI_TRM_HW_ERR", 0x46 }, @@ -456,12 +457,17 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm) { struct iwl_trans *trans = mvm->trans; struct iwl_umac_error_event_table table; + u32 base = mvm->trans->umac_error_event_table; - if (!mvm->support_umac_log) + if (!mvm->support_umac_log && + !(mvm->trans->error_event_table_tlv_status & + IWL_ERROR_EVENT_TABLE_UMAC)) return; - iwl_trans_read_mem_bytes(trans, mvm->umac_error_event_table, &table, - sizeof(table)); + iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); + + if (table.valid) + mvm->fwrt.dump.umac_err_id = table.error_id; if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { IWL_ERR(trans, "Start IWL Error Log Dump:\n"); @@ -486,11 +492,11 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm) IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref); } -static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base) +static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u8 lmac_num) { struct iwl_trans *trans = mvm->trans; struct iwl_error_event_table table; - u32 val; + u32 val, base = mvm->trans->lmac_error_event_table[lmac_num]; if (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) { if (!base) @@ -519,29 +525,15 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base) /* reset the device */ iwl_trans_sw_reset(trans); - /* set INIT_DONE flag */ - iwl_set_bit(trans, CSR_GP_CNTRL, - BIT(trans->cfg->csr->flag_init_done)); - - /* and wait for clock stabilization */ - if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) - udelay(2); - - err = iwl_poll_bit(trans, CSR_GP_CNTRL, - BIT(trans->cfg->csr->flag_mac_clock_ready), - BIT(trans->cfg->csr->flag_mac_clock_ready), - 25000); - if (err < 0) { - IWL_DEBUG_INFO(trans, - "Failed to reset the card for the dump\n"); + err = iwl_finish_nic_init(trans); + if (err) return; - } } iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); if (table.valid) - mvm->fwrt.dump.rt_status = table.error_id; + mvm->fwrt.dump.lmac_err_id[lmac_num] = table.error_id; if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { IWL_ERR(trans, "Start IWL Error Log Dump:\n"); @@ -598,10 +590,10 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) return; } - iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[0]); + iwl_mvm_dump_lmac_error_log(mvm, 0); - if (mvm->error_event_table[1]) - iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[1]); + if (mvm->trans->lmac_error_event_table[1]) + iwl_mvm_dump_lmac_error_log(mvm, 1); iwl_mvm_dump_umac_error_log(mvm); } @@ -1133,19 +1125,14 @@ static void iwl_mvm_tcm_uapsd_nonagg_detected_wk(struct work_struct *wk) "AP isn't using AMPDU with uAPSD enabled"); } -static void iwl_mvm_uapsd_agg_disconnect_iter(void *data, u8 *mac, - struct ieee80211_vif *vif) +static void iwl_mvm_uapsd_agg_disconnect(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_mvm *mvm = mvmvif->mvm; - int *mac_id = data; if (vif->type != NL80211_IFTYPE_STATION) return; - if (mvmvif->id != *mac_id) - return; - if (!vif->bss_conf.assoc) return; @@ -1155,10 +1142,10 @@ static void iwl_mvm_uapsd_agg_disconnect_iter(void *data, u8 *mac, !mvmvif->queue_params[IEEE80211_AC_BK].uapsd) return; - if (mvm->tcm.data[*mac_id].uapsd_nonagg_detect.detected) + if (mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected) return; - mvm->tcm.data[*mac_id].uapsd_nonagg_detect.detected = true; + mvm->tcm.data[mvmvif->id].uapsd_nonagg_detect.detected = true; IWL_INFO(mvm, "detected AP should do aggregation but isn't, likely due to U-APSD\n"); schedule_delayed_work(&mvmvif->uapsd_nonagg_detected_wk, 15 * HZ); @@ -1171,6 +1158,7 @@ static void iwl_mvm_check_uapsd_agg_expected_tpt(struct iwl_mvm *mvm, u64 bytes = mvm->tcm.data[mac].uapsd_nonagg_detect.rx_bytes; u64 tpt; unsigned long rate; + struct ieee80211_vif *vif; rate = ewma_rate_read(&mvm->tcm.data[mac].uapsd_nonagg_detect.rate); @@ -1199,9 +1187,11 @@ static void iwl_mvm_check_uapsd_agg_expected_tpt(struct iwl_mvm *mvm, return; } - ieee80211_iterate_active_interfaces_atomic( - mvm->hw, IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_uapsd_agg_disconnect_iter, &mac); + rcu_read_lock(); + vif = rcu_dereference(mvm->vif_id_to_mac[mac]); + if (vif) + iwl_mvm_uapsd_agg_disconnect(mvm, vif); + rcu_read_unlock(); } static void iwl_mvm_tcm_iterator(void *_data, u8 *mac, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c index ceb3aa03d561..1e36459948db 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -18,7 +18,7 @@ * * BSD LICENSE * - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -66,6 +66,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, void *iml_img; u32 control_flags = 0; int ret; + int cmdq_size = max_t(u32, TFD_CMD_SLOTS, trans->cfg->min_txq_size); /* Allocate prph scratch */ prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch), @@ -151,7 +152,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, ctxt_info_gen3->mcr_base_addr = cpu_to_le64(trans_pcie->rxq->used_bd_dma); ctxt_info_gen3->mtr_size = - cpu_to_le16(TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS)); + cpu_to_le16(TFD_QUEUE_CB_SIZE(cmdq_size)); ctxt_info_gen3->mcr_size = cpu_to_le16(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE)); @@ -175,8 +176,13 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, iwl_write64(trans, CSR_IML_DATA_ADDR, trans_pcie->iml_dma_addr); iwl_write32(trans, CSR_IML_SIZE_ADDR, trans->iml_len); - iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL, CSR_AUTO_FUNC_BOOT_ENA); - iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT); + + iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL, + CSR_AUTO_FUNC_BOOT_ENA); + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1); + else + iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT); return 0; } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c index 7f4aaa810ea1..9274e317cc77 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c @@ -59,8 +59,7 @@ void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_self_init_dram *dram = &trans_pcie->init_dram; + struct iwl_self_init_dram *dram = &trans->init_dram; int i; if (!dram->paging) { @@ -83,8 +82,7 @@ int iwl_pcie_init_fw_sec(struct iwl_trans *trans, const struct fw_img *fw, struct iwl_context_info_dram *ctxt_dram) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_self_init_dram *dram = &trans_pcie->init_dram; + struct iwl_self_init_dram *dram = &trans->init_dram; int i, ret, lmac_cnt, umac_cnt, paging_cnt; if (WARN(dram->paging, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 353581ccc01e..2b94e4cef56c 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -8,7 +8,7 @@ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016-2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * All rights reserved. * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -513,10 +513,12 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)}, /* 9000 Series */ - {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x0040, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0x02F0, 0x0044, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_soc)}, @@ -525,23 +527,26 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x0244, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x0040, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0x06F0, 0x0044, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x06F0, 0x0060, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x0064, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x00A0, iwl9462_2ac_cfg_soc)}, @@ -550,26 +555,28 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x06F0, 0x0234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x0238, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x023C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x0244, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x06F0, 0x0260, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x0264, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x02A0, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x02A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_160_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_160_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_160_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x001C, iwl9260_2ac_160_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_160_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x003C, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_160_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x003C, iwl9560_2ac_160_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0064, iwl9460_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9460_2ac_cfg)}, @@ -593,24 +600,28 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x2526, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_160_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x4018, iwl9260_2ac_160_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x401C, iwl9260_2ac_160_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_160_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x4034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_160_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x8010, iwl9260_2ac_160_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_160_cfg)}, {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)}, {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)}, {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)}, {IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x271C, 0x0214, iwl9260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_160_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_160_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_160_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x0044, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_soc)}, @@ -619,6 +630,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x0244, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_soc)}, @@ -628,17 +640,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x2720, 0x1210, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2720, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x1552, iwl9560_killer_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2720, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_160_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x4034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x4234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2720, 0x42A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x0034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_soc)}, @@ -656,17 +668,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x30DC, 0x1210, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x30DC, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x1552, iwl9560_killer_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x40A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x4234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x30DC, 0x42A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_160_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_shared_clk)}, - {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg_shared_clk)}, - {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_160_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_160_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x0064, iwl9461_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x00A0, iwl9462_2ac_cfg_shared_clk)}, @@ -684,18 +696,19 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x31DC, 0x1210, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x31DC, 0x1551, iwl9560_killer_s_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x1552, iwl9560_killer_2ac_cfg_shared_clk)}, - {IWL_PCI_DEVICE(0x31DC, 0x2030, iwl9560_2ac_cfg_shared_clk)}, - {IWL_PCI_DEVICE(0x31DC, 0x2034, iwl9560_2ac_cfg_shared_clk)}, - {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_shared_clk)}, - {IWL_PCI_DEVICE(0x31DC, 0x4034, iwl9560_2ac_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x2030, iwl9560_2ac_160_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x2034, iwl9560_2ac_160_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_160_cfg_shared_clk)}, + {IWL_PCI_DEVICE(0x31DC, 0x4034, iwl9560_2ac_160_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x40A4, iwl9462_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x4234, iwl9560_2ac_cfg_shared_clk)}, {IWL_PCI_DEVICE(0x31DC, 0x42A4, iwl9462_2ac_cfg_shared_clk)}, - {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)}, - {IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_cfg_qu_b0_jf_b0)}, - {IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x0044, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x34F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)}, @@ -704,24 +717,25 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x34F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x0244, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x34F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)}, - {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_qu_b0_jf_b0)}, - {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_qu_b0_jf_b0)}, - {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_qu_b0_jf_b0)}, - {IWL_PCI_DEVICE(0x34F0, 0x4034, iwl9560_2ac_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)}, {IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)}, - {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x0060, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x0064, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x00A0, iwl9462_2ac_cfg_soc)}, @@ -739,17 +753,18 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x3DF0, 0x1210, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x3DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x40A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x4234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x3DF0, 0x42A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x0044, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_soc)}, @@ -758,6 +773,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x43F0, 0x0234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x0238, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x023C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x0244, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x43F0, 0x0260, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x0264, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x02A0, iwl9462_2ac_cfg_soc)}, @@ -767,19 +783,19 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x43F0, 0x1210, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x43F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x40A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x4234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x43F0, 0x42A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x0034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x0064, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x00A0, iwl9462_2ac_cfg_soc)}, @@ -805,18 +821,19 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x9DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x9DF0, 0x2030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x9DF0, 0x2034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x2030, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x2034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x9DF0, 0x4030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0x9DF0, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x4030, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x4034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x40A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x4234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x9DF0, 0x42A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0044, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_soc)}, @@ -825,6 +842,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0xA0F0, 0x0234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x0238, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x023C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0244, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0xA0F0, 0x0260, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x0264, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x02A0, iwl9462_2ac_cfg_soc)}, @@ -834,17 +852,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0xA0F0, 0x1210, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0xA0F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x40A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x4234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA0F0, 0x42A4, iwl9462_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x0064, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x00A0, iwl9462_2ac_cfg_soc)}, @@ -862,41 +880,98 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0xA370, 0x1210, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0xA370, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x1552, iwl9560_killer_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA370, 0x2030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA370, 0x2034, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_cfg_soc)}, - {IWL_PCI_DEVICE(0xA370, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x2030, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x2034, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_160_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x4034, iwl9560_2ac_160_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x40A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x4234, iwl9560_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x42A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x0030, iwl9560_2ac_cfg_qnj_jf_b0)}, /* 22000 Series */ - {IWL_PCI_DEVICE(0x2720, 0x0000, iwl22000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0x2720, 0x0040, iwl22000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0x2720, 0x0078, iwl22000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x02F0, 0x0070, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0x02F0, 0x0074, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0x02F0, 0x0078, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0x02F0, 0x007C, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0x02F0, 0x0310, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0x02F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)}, + {IWL_PCI_DEVICE(0x02F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)}, + {IWL_PCI_DEVICE(0x02F0, 0x4070, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0x06F0, 0x0070, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0x06F0, 0x0074, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0x06F0, 0x0078, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0x06F0, 0x007C, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0x06F0, 0x0310, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0x06F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)}, + {IWL_PCI_DEVICE(0x06F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)}, + {IWL_PCI_DEVICE(0x06F0, 0x4070, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0x2720, 0x0000, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0x2720, 0x0040, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x2720, 0x0070, iwl22000_2ac_cfg_hr_cdb)}, - {IWL_PCI_DEVICE(0x2720, 0x0030, iwl22000_2ac_cfg_hr_cdb)}, - {IWL_PCI_DEVICE(0x2720, 0x1080, iwl22000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x2720, 0x0074, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0x2720, 0x0078, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0x2720, 0x007C, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x2720, 0x0090, iwl22000_2ac_cfg_hr_cdb)}, {IWL_PCI_DEVICE(0x2720, 0x0310, iwl22000_2ac_cfg_hr_cdb)}, - {IWL_PCI_DEVICE(0x34F0, 0x0040, iwl22000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0x34F0, 0x0078, iwl22000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x2720, 0x0A10, iwl22000_2ac_cfg_hr_cdb)}, + {IWL_PCI_DEVICE(0x2720, 0x1080, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0x2720, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)}, + {IWL_PCI_DEVICE(0x2720, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)}, + {IWL_PCI_DEVICE(0x2720, 0x4070, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0x34F0, 0x0040, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0x34F0, 0x0070, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0x34F0, 0x0074, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0x34F0, 0x0078, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0x34F0, 0x007C, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0x34F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)}, + {IWL_PCI_DEVICE(0x34F0, 0x4070, iwl_ax101_cfg_qu_hr)}, {IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22560_2ax_cfg_su_cdb)}, {IWL_PCI_DEVICE(0x40C0, 0x0010, iwl22560_2ax_cfg_su_cdb)}, {IWL_PCI_DEVICE(0x40c0, 0x0090, iwl22560_2ax_cfg_su_cdb)}, {IWL_PCI_DEVICE(0x40C0, 0x0310, iwl22560_2ax_cfg_su_cdb)}, {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwl22560_2ax_cfg_su_cdb)}, - {IWL_PCI_DEVICE(0x43F0, 0x0040, iwl22000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0x43F0, 0x0070, iwl22000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0x43F0, 0x0078, iwl22000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0xA0F0, 0x0000, iwl22000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0xA0F0, 0x0040, iwl22000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0xA0F0, 0x0070, iwl22000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0xA0F0, 0x0078, iwl22000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0xA0F0, 0x00B0, iwl22000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0xA0F0, 0x0A10, iwl22000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x43F0, 0x0040, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0x43F0, 0x0070, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0x43F0, 0x0074, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0x43F0, 0x0078, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0x43F0, 0x007C, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)}, + {IWL_PCI_DEVICE(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)}, + {IWL_PCI_DEVICE(0x43F0, 0x4070, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0000, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0040, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0070, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0074, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0078, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0xA0F0, 0x007C, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0xA0F0, 0x00B0, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0A10, iwl_ax101_cfg_qu_hr)}, + {IWL_PCI_DEVICE(0xA0F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)}, + {IWL_PCI_DEVICE(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)}, + {IWL_PCI_DEVICE(0xA0F0, 0x4070, iwl_ax101_cfg_qu_hr)}, + + {IWL_PCI_DEVICE(0x2723, 0x0080, iwl22260_2ax_cfg)}, + {IWL_PCI_DEVICE(0x2723, 0x0084, iwl22260_2ax_cfg)}, + {IWL_PCI_DEVICE(0x2723, 0x0088, iwl22260_2ax_cfg)}, + {IWL_PCI_DEVICE(0x2723, 0x008C, iwl22260_2ax_cfg)}, + {IWL_PCI_DEVICE(0x2723, 0x1653, killer1650w_2ax_cfg)}, + {IWL_PCI_DEVICE(0x2723, 0x1654, killer1650x_2ax_cfg)}, + {IWL_PCI_DEVICE(0x2723, 0x4080, iwl22260_2ax_cfg)}, + {IWL_PCI_DEVICE(0x2723, 0x4088, iwl22260_2ax_cfg)}, + + {IWL_PCI_DEVICE(0x1a56, 0x1653, killer1650w_2ax_cfg)}, + {IWL_PCI_DEVICE(0x1a56, 0x1654, killer1650x_2ax_cfg)}, + + {IWL_PCI_DEVICE(0x2725, 0x0090, iwlax210_2ax_cfg_so_hr_a0)}, + {IWL_PCI_DEVICE(0x7A70, 0x0090, iwlax210_2ax_cfg_so_hr_a0)}, + {IWL_PCI_DEVICE(0x7A70, 0x0310, iwlax210_2ax_cfg_so_hr_a0)}, + {IWL_PCI_DEVICE(0x2725, 0x0020, iwlax210_2ax_cfg_so_hr_a0)}, + {IWL_PCI_DEVICE(0x2725, 0x0310, iwlax210_2ax_cfg_so_hr_a0)}, + {IWL_PCI_DEVICE(0x2725, 0x0A10, iwlax210_2ax_cfg_so_hr_a0)}, + {IWL_PCI_DEVICE(0x2725, 0x00B0, iwlax210_2ax_cfg_so_hr_a0)}, #endif /* CONFIG_IWLMVM */ @@ -949,7 +1024,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (rf_id_chp == jf_chp_id) { if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ) - cfg = &iwl22000_2ax_cfg_qnj_jf_b0; + cfg = &iwl9560_2ac_cfg_qnj_jf_b0; else cfg = &iwl22000_2ac_cfg_jf; } else if (rf_id_chp == hr_chp_id) { diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index d6fc6ce73e0a..bf8b61a476c5 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -1,12 +1,14 @@ /****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY * * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation - * - * Portions of this file are derived from the ipw3945 project, as well - * as portions of the ieee80211 subsystem header files. + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -18,12 +20,46 @@ * more details. * * The full GNU General Public License is included in this distribution in the - * file called LICENSE. + * file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * + * BSD LICENSE + * + * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 - 2019 Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * *****************************************************************************/ #ifndef __iwl_trans_int_pcie_h__ #define __iwl_trans_int_pcie_h__ @@ -364,6 +400,8 @@ struct iwl_txq { u32 id; int low_mark; int high_mark; + + bool overflow_tx; }; static inline dma_addr_t @@ -417,20 +455,6 @@ enum iwl_image_response_code { IWL_IMAGE_RESP_FAIL = 2, }; -/** - * struct iwl_self_init_dram - dram data used by self init process - * @fw: lmac and umac dram data - * @fw_cnt: total number of items in array - * @paging: paging dram data - * @paging_cnt: total number of items in array - */ -struct iwl_self_init_dram { - struct iwl_dram_data *fw; - int fw_cnt; - struct iwl_dram_data *paging; - int paging_cnt; -}; - /** * struct cont_rec: continuous recording data structure * @prev_wr_ptr: the last address that was read in monitor_data @@ -502,6 +526,8 @@ struct cont_rec { * @fh_mask: current unmasked fh causes * @hw_mask: current unmasked hw causes * @in_rescan: true if we have triggered a device rescan + * @base_rb_stts: base virtual address of receive buffer status for all queues + * @base_rb_stts_dma: base physical address of receive buffer status */ struct iwl_trans_pcie { struct iwl_rxq *rxq; @@ -518,7 +544,6 @@ struct iwl_trans_pcie { dma_addr_t prph_info_dma_addr; dma_addr_t prph_scratch_dma_addr; dma_addr_t iml_dma_addr; - struct iwl_self_init_dram init_dram; struct iwl_trans *trans; struct net_device napi_dev; @@ -594,6 +619,9 @@ struct iwl_trans_pcie { cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES]; u16 tx_cmd_queue_size; bool in_rescan; + + void *base_rb_stts; + dma_addr_t base_rb_stts_dma; }; static inline struct iwl_trans_pcie * @@ -777,8 +805,7 @@ static inline int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans, static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_self_init_dram *dram = &trans_pcie->init_dram; + struct iwl_self_init_dram *dram = &trans->init_dram; int i; if (!dram->fw) { @@ -1016,6 +1043,7 @@ static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans) void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state); void iwl_trans_pcie_dump_regs(struct iwl_trans *trans); +void iwl_trans_sync_nmi(struct iwl_trans *trans); #ifdef CONFIG_IWLWIFI_DEBUGFS int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans); @@ -1029,8 +1057,6 @@ static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans); int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans); -void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable); - void iwl_pcie_rx_allocator_work(struct work_struct *data); /* common functions that are used by gen2 transport */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 9e850c25877b..8d4f0628622b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -1,12 +1,14 @@ /****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation - * - * Portions of this file are derived from the ipw3945 project, as well - * as portions of the ieee80211 subsystem header files. + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -18,12 +20,46 @@ * more details. * * The full GNU General Public License is included in this distribution in the - * file called LICENSE. + * file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * + * BSD LICENSE + * + * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 - 2019 Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * *****************************************************************************/ #include #include @@ -166,9 +202,9 @@ int iwl_pcie_rx_stop(struct iwl_trans *trans) { if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { /* TODO: remove this for 22560 once fw does it */ - iwl_write_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0); - return iwl_poll_prph_bit(trans, RFH_GEN_STATUS_GEN3, - RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); + iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0); + return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_GEN3, + RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); } else if (trans->cfg->mq_rx_supported) { iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0); return iwl_poll_prph_bit(trans, RFH_GEN_STATUS, @@ -211,7 +247,7 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, } rxq->write_actual = round_down(rxq->write, 8); - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560) iwl_write32(trans, HBUS_TARG_WRPTR, (rxq->write_actual | ((FIRST_RX_QUEUE + rxq->id) << 16))); @@ -256,6 +292,9 @@ static void iwl_pcie_restock_bd(struct iwl_trans *trans, bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid); } + + IWL_DEBUG_RX(trans, "Assigned virtual RB ID %u to queue %d index %d\n", + (u32)rxb->vid, rxq->id, rxq->write); } /* @@ -499,9 +538,9 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rb_allocator *rba = &trans_pcie->rba; struct list_head local_empty; - int pending = atomic_xchg(&rba->req_pending, 0); + int pending = atomic_read(&rba->req_pending); - IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending); + IWL_DEBUG_TPT(trans, "Pending allocation requests = %d\n", pending); /* If we were scheduled - there is at least one request */ spin_lock(&rba->lock); @@ -554,12 +593,15 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans) i++; } + atomic_dec(&rba->req_pending); pending--; + if (!pending) { - pending = atomic_xchg(&rba->req_pending, 0); - IWL_DEBUG_RX(trans, - "Pending allocation requests = %d\n", - pending); + pending = atomic_read(&rba->req_pending); + if (pending) + IWL_DEBUG_TPT(trans, + "Got more pending allocation requests = %d\n", + pending); } spin_lock(&rba->lock); @@ -570,12 +612,15 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans) spin_unlock(&rba->lock); atomic_inc(&rba->req_ready); + } spin_lock(&rba->lock); /* return unused rbds to the allocator empty list */ list_splice_tail(&local_empty, &rba->rbd_empty); spin_unlock(&rba->lock); + + IWL_DEBUG_TPT(trans, "%s, exit.\n", __func__); } /* @@ -657,11 +702,6 @@ static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans, rxq->bd_dma = 0; rxq->bd = NULL; - if (rxq->rb_stts) - dma_free_coherent(trans->dev, - use_rx_td ? sizeof(__le16) : - sizeof(struct iwl_rb_status), - rxq->rb_stts, rxq->rb_stts_dma); rxq->rb_stts_dma = 0; rxq->rb_stts = NULL; @@ -698,6 +738,8 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, int free_size; bool use_rx_td = (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560); + size_t rb_stts_size = use_rx_td ? sizeof(__le16) : + sizeof(struct iwl_rb_status); spin_lock_init(&rxq->lock); if (trans->cfg->mq_rx_supported) @@ -725,12 +767,9 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, goto err; } - /* Allocate the driver's pointer to receive buffer status */ - rxq->rb_stts = dma_alloc_coherent(dev, - use_rx_td ? sizeof(__le16) : sizeof(struct iwl_rb_status), - &rxq->rb_stts_dma, GFP_KERNEL); - if (!rxq->rb_stts) - goto err; + rxq->rb_stts = trans_pcie->base_rb_stts + rxq->id * rb_stts_size; + rxq->rb_stts_dma = + trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size; if (!use_rx_td) return 0; @@ -760,7 +799,6 @@ err: iwl_pcie_free_rxq_dma(trans, rxq); } - kfree(trans_pcie->rxq); return -ENOMEM; } @@ -770,6 +808,9 @@ int iwl_pcie_rx_alloc(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rb_allocator *rba = &trans_pcie->rba; int i, ret; + size_t rb_stts_size = trans->cfg->device_family >= + IWL_DEVICE_FAMILY_22560 ? + sizeof(__le16) : sizeof(struct iwl_rb_status); if (WARN_ON(trans_pcie->rxq)) return -EINVAL; @@ -777,18 +818,46 @@ int iwl_pcie_rx_alloc(struct iwl_trans *trans) trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq), GFP_KERNEL); if (!trans_pcie->rxq) - return -EINVAL; + return -ENOMEM; spin_lock_init(&rba->lock); + /* + * Allocate the driver's pointer to receive buffer status. + * Allocate for all queues continuously (HW requirement). + */ + trans_pcie->base_rb_stts = + dma_alloc_coherent(trans->dev, + rb_stts_size * trans->num_rx_queues, + &trans_pcie->base_rb_stts_dma, + GFP_KERNEL); + if (!trans_pcie->base_rb_stts) { + ret = -ENOMEM; + goto err; + } + for (i = 0; i < trans->num_rx_queues; i++) { struct iwl_rxq *rxq = &trans_pcie->rxq[i]; + rxq->id = i; ret = iwl_pcie_alloc_rxq_dma(trans, rxq); if (ret) - return ret; + goto err; } return 0; + +err: + if (trans_pcie->base_rb_stts) { + dma_free_coherent(trans->dev, + rb_stts_size * trans->num_rx_queues, + trans_pcie->base_rb_stts, + trans_pcie->base_rb_stts_dma); + trans_pcie->base_rb_stts = NULL; + trans_pcie->base_rb_stts_dma = 0; + } + kfree(trans_pcie->rxq); + + return ret; } static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) @@ -860,30 +929,6 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE); } -void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable) -{ - if (trans->cfg->device_family != IWL_DEVICE_FAMILY_9000) - return; - - if (CSR_HW_REV_STEP(trans->hw_rev) != SILICON_A_STEP) - return; - - if (!trans->cfg->integrated) - return; - - /* - * Turn on the chicken-bits that cause MAC wakeup for RX-related - * values. - * This costs some power, but needed for W/A 9000 integrated A-step - * bug where shadow registers are not in the retention list and their - * value is lost when NIC powers down - */ - iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, - CSR_MAC_SHADOW_REG_CTRL_RX_WAKE); - iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTL2, - CSR_MAC_SHADOW_REG_CTL2_RX_WAKE); -} - static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -971,8 +1016,6 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) /* Set interrupt coalescing timer to default (2048 usecs) */ iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); - - iwl_pcie_enable_rx_wake(trans, true); } void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) @@ -1023,8 +1066,6 @@ int _iwl_pcie_rx_init(struct iwl_trans *trans) for (i = 0; i < trans->num_rx_queues; i++) { struct iwl_rxq *rxq = &trans_pcie->rxq[i]; - rxq->id = i; - spin_lock(&rxq->lock); /* * Set read write pointer to reflect that we have processed @@ -1111,6 +1152,9 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rb_allocator *rba = &trans_pcie->rba; int i; + size_t rb_stts_size = trans->cfg->device_family >= + IWL_DEVICE_FAMILY_22560 ? + sizeof(__le16) : sizeof(struct iwl_rb_status); /* * if rxq is NULL, it means that nothing has been allocated, @@ -1125,6 +1169,15 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) iwl_pcie_free_rbs_pool(trans); + if (trans_pcie->base_rb_stts) { + dma_free_coherent(trans->dev, + rb_stts_size * trans->num_rx_queues, + trans_pcie->base_rb_stts, + trans_pcie->base_rb_stts_dma); + trans_pcie->base_rb_stts = NULL; + trans_pcie->base_rb_stts_dma = 0; + } + for (i = 0; i < trans->num_rx_queues; i++) { struct iwl_rxq *rxq = &trans_pcie->rxq[i]; @@ -1360,6 +1413,8 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans, if (rxb->invalid) goto out_err; + IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid); + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) rxb->size = le32_to_cpu(rxq->cd[i].size) & IWL_RX_CD_SIZE; @@ -1409,13 +1464,17 @@ restart: !emergency)) { iwl_pcie_rx_move_to_allocator(rxq, rba); emergency = true; + IWL_DEBUG_TPT(trans, + "RX path is in emergency. Pending allocations %d\n", + rb_pending_alloc); } + IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i); + rxb = iwl_pcie_get_rxb(trans, rxq, i); if (!rxb) goto out; - IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i); iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i); i = (i + 1) & (rxq->queue_size - 1); @@ -1437,8 +1496,12 @@ restart: count++; if (count == 8) { count = 0; - if (rb_pending_alloc < rxq->queue_size / 3) + if (rb_pending_alloc < rxq->queue_size / 3) { + IWL_DEBUG_TPT(trans, + "RX path exited emergency. Pending allocations %d\n", + rb_pending_alloc); emergency = false; + } rxq->read = i; spin_unlock(&rxq->lock); @@ -2104,7 +2167,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) } } - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560 && + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560 && inta_hw & MSIX_HW_INT_CAUSES_REG_IPC) { /* Reflect IML transfer status */ int res = iwl_read32(trans, CSR_IML_RESP_ADDR); @@ -2123,6 +2186,17 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) isr_stats->wakeup++; } + if (inta_hw & MSIX_HW_INT_CAUSES_REG_IML) { + /* Reflect IML transfer status */ + int res = iwl_read32(trans, CSR_IML_RESP_ADDR); + + IWL_DEBUG_ISR(trans, "IML transfer status: %d\n", res); + if (res == IWL_IMAGE_RESP_FAIL) { + isr_stats->sw++; + iwl_pcie_irq_handle_error(trans); + } + } + /* Chip got too hot and stopped itself */ if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) { IWL_ERR(trans, "Microcode CT kill error detected.\n"); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index 77f3610e5ca9..9c203ca75de9 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -92,26 +92,9 @@ int iwl_pcie_gen2_apm_init(struct iwl_trans *trans) iwl_pcie_apm_config(trans); - /* - * Set "initialization complete" bit to move adapter from - * D0U* --> D0A* (powered-up active) state. - */ - iwl_set_bit(trans, CSR_GP_CNTRL, - BIT(trans->cfg->csr->flag_init_done)); - - /* - * Wait for clock stabilization; once stabilized, access to - * device-internal resources is supported, e.g. iwl_write_prph() - * and accesses to uCode SRAM. - */ - ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - BIT(trans->cfg->csr->flag_mac_clock_ready), - BIT(trans->cfg->csr->flag_mac_clock_ready), - 25000); - if (ret < 0) { - IWL_DEBUG_INFO(trans, "Failed to init the card\n"); + ret = iwl_finish_nic_init(trans); + if (ret) return ret; - } set_bit(STATUS_DEVICE_ENABLED, &trans->status); @@ -188,7 +171,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power) } iwl_pcie_ctxt_info_free_paging(trans); - if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560) + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) iwl_pcie_ctxt_info_gen3_free(trans); else iwl_pcie_ctxt_info_free(trans); @@ -251,6 +234,7 @@ void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power) static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int queue_size = max_t(u32, TFD_CMD_SLOTS, trans->cfg->min_txq_size); /* TODO: most of the logic can be removed in A0 - but not in Z0 */ spin_lock(&trans_pcie->irq_lock); @@ -264,7 +248,7 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans) return -ENOMEM; /* Allocate or reset and init all Tx and Command queues */ - if (iwl_pcie_gen2_tx_init(trans, trans_pcie->cmd_queue, TFD_CMD_SLOTS)) + if (iwl_pcie_gen2_tx_init(trans, trans_pcie->cmd_queue, queue_size)) return -ENOMEM; /* enable shadow regs in HW */ @@ -349,7 +333,7 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, goto out; } - if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560) + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ret = iwl_pcie_ctxt_info_gen3_init(trans, fw); else ret = iwl_pcie_ctxt_info_init(trans, fw); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index f97aea5ffc44..fe8269d023de 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -8,7 +8,7 @@ * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,7 +31,7 @@ * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -364,26 +364,9 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans) if (trans->cfg->base_params->pll_cfg) iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL); - /* - * Set "initialization complete" bit to move adapter from - * D0U* --> D0A* (powered-up active) state. - */ - iwl_set_bit(trans, CSR_GP_CNTRL, - BIT(trans->cfg->csr->flag_init_done)); - - /* - * Wait for clock stabilization; once stabilized, access to - * device-internal resources is supported, e.g. iwl_write_prph() - * and accesses to uCode SRAM. - */ - ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - BIT(trans->cfg->csr->flag_mac_clock_ready), - BIT(trans->cfg->csr->flag_mac_clock_ready), - 25000); - if (ret < 0) { - IWL_ERR(trans, "Failed to init the card\n"); + ret = iwl_finish_nic_init(trans); + if (ret) return ret; - } if (trans->cfg->host_interrupt_operation_mode) { /* @@ -453,23 +436,8 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) iwl_trans_pcie_sw_reset(trans); - /* - * Set "initialization complete" bit to move adapter from - * D0U* --> D0A* (powered-up active) state. - */ - iwl_set_bit(trans, CSR_GP_CNTRL, - BIT(trans->cfg->csr->flag_init_done)); - - /* - * Wait for clock stabilization; once stabilized, access to - * device-internal resources is possible. - */ - ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - BIT(trans->cfg->csr->flag_mac_clock_ready), - BIT(trans->cfg->csr->flag_mac_clock_ready), - 25000); - if (WARN_ON(ret < 0)) { - IWL_ERR(trans, "Access time out - failed to enable LP XTAL\n"); + ret = iwl_finish_nic_init(trans); + if (WARN_ON(ret)) { /* Release XTAL ON request */ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_XTAL_ON); @@ -928,13 +896,13 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans) if (!trans->num_blocks) return; - iwl_write_prph(trans, MON_BUFF_BASE_ADDR_VER2, - trans->fw_mon[0].physical >> - MON_BUFF_SHIFT_VER2); - iwl_write_prph(trans, MON_BUFF_END_ADDR_VER2, - (trans->fw_mon[0].physical + - trans->fw_mon[0].size - 256) >> - MON_BUFF_SHIFT_VER2); + iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2, + trans->fw_mon[0].physical >> + MON_BUFF_SHIFT_VER2); + iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2, + (trans->fw_mon[0].physical + + trans->fw_mon[0].size - 256) >> + MON_BUFF_SHIFT_VER2); return; } @@ -1126,6 +1094,7 @@ static struct iwl_causes_list causes_list[] = { {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5}, {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10}, {MSIX_HW_INT_CAUSES_REG_WAKEUP, CSR_MSIX_HW_INT_MASK_AD, 0x11}, + {MSIX_HW_INT_CAUSES_REG_IML, CSR_MSIX_HW_INT_MASK_AD, 0x12}, {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16}, {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17}, {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18}, @@ -1158,7 +1127,7 @@ static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE; int i, arr_size = - (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) ? + (trans->cfg->device_family != IWL_DEVICE_FAMILY_22560) ? ARRAY_SIZE(causes_list) : ARRAY_SIZE(causes_list_v2); /* @@ -1168,7 +1137,7 @@ static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans) */ for (i = 0; i < arr_size; i++) { struct iwl_causes_list *causes = - (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) ? + (trans->cfg->device_family != IWL_DEVICE_FAMILY_22560) ? causes_list : causes_list_v2; iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val); @@ -1214,8 +1183,8 @@ void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie) if (!trans_pcie->msix_enabled) { if (trans->cfg->mq_rx_supported && test_bit(STATUS_DEVICE_ENABLED, &trans->status)) - iwl_write_prph(trans, UREG_CHICK, - UREG_CHICK_MSI_ENABLE); + iwl_write_umac_prph(trans, UREG_CHICK, + UREG_CHICK_MSI_ENABLE); return; } /* @@ -1224,7 +1193,7 @@ void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie) * prph. */ if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) - iwl_write_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE); + iwl_write_umac_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE); /* * Each cause from the causes list above and the RX causes is @@ -1530,8 +1499,6 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, iwl_clear_bit(trans, CSR_GP_CNTRL, BIT(trans->cfg->csr->flag_init_done)); - iwl_pcie_enable_rx_wake(trans, false); - if (reset) { /* * reset TX queues -- some of their registers reset during S3 @@ -1558,24 +1525,12 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, return 0; } - iwl_pcie_enable_rx_wake(trans, true); - iwl_set_bit(trans, CSR_GP_CNTRL, BIT(trans->cfg->csr->flag_mac_access_req)); - iwl_set_bit(trans, CSR_GP_CNTRL, - BIT(trans->cfg->csr->flag_init_done)); - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) - udelay(2); - - ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - BIT(trans->cfg->csr->flag_mac_clock_ready), - BIT(trans->cfg->csr->flag_mac_clock_ready), - 25000); - if (ret < 0) { - IWL_ERR(trans, "Failed to resume the device (mac ready)\n"); + ret = iwl_finish_nic_init(trans); + if (ret) return ret; - } /* * Reconfigure IVAR table in case of MSIX or reset ict table in @@ -1606,7 +1561,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, } IWL_DEBUG_POWER(trans, "WFPM value upon resume = 0x%08X\n", - iwl_read_prph(trans, WFPM_GP2)); + iwl_read_umac_prph(trans, WFPM_GP2)); val = iwl_read32(trans, CSR_RESET); if (val & CSR_RESET_REG_FLAG_NEVO_RESET) @@ -1755,15 +1710,18 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) return err; } - hpm = iwl_trans_read_prph(trans, HPM_DEBUG); + hpm = iwl_read_umac_prph_no_grab(trans, HPM_DEBUG); if (hpm != 0xa5a5a5a0 && (hpm & PERSISTENCE_BIT)) { - if (iwl_trans_read_prph(trans, PREG_PRPH_WPROT_0) & - PREG_WFPM_ACCESS) { + int wfpm_val = iwl_read_umac_prph_no_grab(trans, + PREG_PRPH_WPROT_0); + + if (wfpm_val & PREG_WFPM_ACCESS) { IWL_ERR(trans, "Error, can not clear persistence bit\n"); return -EPERM; } - iwl_trans_write_prph(trans, HPM_DEBUG, hpm & ~PERSISTENCE_BIT); + iwl_write_umac_prph_no_grab(trans, HPM_DEBUG, + hpm & ~PERSISTENCE_BIT); } iwl_trans_pcie_sw_reset(trans); @@ -1968,7 +1926,7 @@ static void iwl_trans_pcie_removal_wk(struct work_struct *wk) struct iwl_trans_pcie_removal *removal = container_of(wk, struct iwl_trans_pcie_removal, work); struct pci_dev *pdev = removal->pdev; - char *prop[] = {"EVENT=INACCESSIBLE", NULL}; + static char *prop[] = {"EVENT=INACCESSIBLE", NULL}; dev_err(&pdev->dev, "Device gone - attempting removal\n"); kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop); @@ -2285,6 +2243,7 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq; unsigned long now = jiffies; + bool overflow_tx; u8 wr_ptr; /* Make sure the NIC is still alive in the bus */ @@ -2296,18 +2255,37 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx); txq = trans_pcie->txq[txq_idx]; + + spin_lock_bh(&txq->lock); + overflow_tx = txq->overflow_tx || + !skb_queue_empty(&txq->overflow_q); + spin_unlock_bh(&txq->lock); + wr_ptr = READ_ONCE(txq->write_ptr); - while (txq->read_ptr != READ_ONCE(txq->write_ptr) && + while ((txq->read_ptr != READ_ONCE(txq->write_ptr) || + overflow_tx) && !time_after(jiffies, now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) { u8 write_ptr = READ_ONCE(txq->write_ptr); - if (WARN_ONCE(wr_ptr != write_ptr, + /* + * If write pointer moved during the wait, warn only + * if the TX came from op mode. In case TX came from + * trans layer (overflow TX) don't warn. + */ + if (WARN_ONCE(wr_ptr != write_ptr && !overflow_tx, "WR pointer moved while flushing %d -> %d\n", wr_ptr, write_ptr)) return -ETIMEDOUT; + wr_ptr = write_ptr; + usleep_range(1000, 2000); + + spin_lock_bh(&txq->lock); + overflow_tx = txq->overflow_tx || + !skb_queue_empty(&txq->overflow_q); + spin_unlock_bh(&txq->lock); } if (txq->read_ptr != txq->write_ptr) { @@ -2993,7 +2971,8 @@ static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans, i += sizeof(u32)) *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i)); else - for (i = FH_MEM_LOWER_BOUND_GEN2; i < FH_MEM_UPPER_BOUND_GEN2; + for (i = iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2); + i < iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2); i += sizeof(u32)) *val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans, i)); @@ -3018,11 +2997,11 @@ iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans, if (!iwl_trans_grab_nic_access(trans, &flags)) return 0; - iwl_write_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1); + iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1); for (i = 0; i < buf_size_in_dwords; i++) - buffer[i] = iwl_read_prph_no_grab(trans, - MON_DMARB_RD_DATA_ADDR); - iwl_write_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0); + buffer[i] = iwl_read_umac_prph_no_grab(trans, + MON_DMARB_RD_DATA_ADDR); + iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0); iwl_trans_release_nic_access(trans, &flags); @@ -3037,9 +3016,9 @@ iwl_trans_pcie_dump_pointers(struct iwl_trans *trans, /* If there was a dest TLV - use the values from there */ if (trans->ini_valid) { - base = MON_BUFF_BASE_ADDR_VER2; - write_ptr = MON_BUFF_WRPTR_VER2; - wrap_cnt = MON_BUFF_CYCLE_CNT_VER2; + base = iwl_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2); + write_ptr = iwl_umac_prph(trans, MON_BUFF_WRPTR_VER2); + wrap_cnt = iwl_umac_prph(trans, MON_BUFF_CYCLE_CNT_VER2); } else if (trans->dbg_dest_tlv) { write_ptr = le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg); wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count); @@ -3118,7 +3097,7 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans, return len; } -static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, int *len) +static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len) { if (trans->num_blocks) { *len += sizeof(struct iwl_fw_error_dump_data) + @@ -3173,8 +3152,7 @@ static struct iwl_trans_dump_data struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue]; struct iwl_fw_error_dump_txcmd *txcmd; struct iwl_trans_dump_data *dump_data; - u32 len, num_rbs = 0; - u32 monitor_len; + u32 len, num_rbs = 0, monitor_len = 0; int i, ptr; bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) && !trans->cfg->mq_rx_supported && @@ -3191,19 +3169,8 @@ static struct iwl_trans_dump_data cmdq->n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE); /* FW monitor */ - monitor_len = iwl_trans_get_fw_monitor_len(trans, &len); - - if (dump_mask == BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)) { - dump_data = vzalloc(len); - if (!dump_data) - return NULL; - - data = (void *)dump_data->data; - len = iwl_trans_pcie_dump_monitor(trans, &data, monitor_len); - dump_data->len = len; - - return dump_data; - } + if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)) + monitor_len = iwl_trans_get_fw_monitor_len(trans, &len); /* CSR registers */ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR)) @@ -3213,8 +3180,8 @@ static struct iwl_trans_dump_data if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) { if (trans->cfg->gen2) len += sizeof(*data) + - (FH_MEM_UPPER_BOUND_GEN2 - - FH_MEM_LOWER_BOUND_GEN2); + (iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2) - + iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2)); else len += sizeof(*data) + (FH_MEM_UPPER_BOUND - @@ -3236,10 +3203,10 @@ static struct iwl_trans_dump_data /* Paged memory for gen2 HW */ if (trans->cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) - for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++) + for (i = 0; i < trans->init_dram.paging_cnt; i++) len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_paging) + - trans_pcie->init_dram.paging[i].size; + trans->init_dram.paging[i].size; dump_data = vzalloc(len); if (!dump_data) @@ -3291,20 +3258,16 @@ static struct iwl_trans_dump_data /* Paged memory for gen2 HW */ if (trans->cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) { - for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++) { + for (i = 0; i < trans->init_dram.paging_cnt; i++) { struct iwl_fw_error_dump_paging *paging; - dma_addr_t addr = - trans_pcie->init_dram.paging[i].physical; - u32 page_len = trans_pcie->init_dram.paging[i].size; + u32 page_len = trans->init_dram.paging[i].size; data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING); data->len = cpu_to_le32(sizeof(*paging) + page_len); paging = (void *)data->data; paging->index = cpu_to_le32(i); - dma_sync_single_for_cpu(trans->dev, addr, page_len, - DMA_BIDIRECTIONAL); memcpy(paging->data, - trans_pcie->init_dram.paging[i].block, page_len); + trans->init_dram.paging[i].block, page_len); data = iwl_fw_error_next_data(data); len += sizeof(*data) + sizeof(*paging) + page_len; @@ -3541,25 +3504,18 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, * in-order to recognize C step driver should read chip version * id located at the AUX bus MISC address space. */ - iwl_set_bit(trans, CSR_GP_CNTRL, - BIT(trans->cfg->csr->flag_init_done)); - udelay(2); - - ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - BIT(trans->cfg->csr->flag_mac_clock_ready), - BIT(trans->cfg->csr->flag_mac_clock_ready), - 25000); - if (ret < 0) { - IWL_DEBUG_INFO(trans, "Failed to wake up the nic\n"); + ret = iwl_finish_nic_init(trans); + if (ret) goto out_no_pci; - } if (iwl_trans_grab_nic_access(trans, &flags)) { u32 hw_step; - hw_step = iwl_read_prph_no_grab(trans, WFPM_CTRL_REG); + hw_step = iwl_read_umac_prph_no_grab(trans, + WFPM_CTRL_REG); hw_step |= ENABLE_WFPM; - iwl_write_prph_no_grab(trans, WFPM_CTRL_REG, hw_step); + iwl_write_umac_prph_no_grab(trans, WFPM_CTRL_REG, + hw_step); hw_step = iwl_read_prph_no_grab(trans, AUX_MISC_REG); hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF; if (hw_step == 0x3) @@ -3569,24 +3525,25 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, } } - /* - * 9000-series integrated A-step has a problem with suspend/resume - * and sometimes even causes the whole platform to get stuck. This - * workaround makes the hardware not go into the problematic state. - */ - if (trans->cfg->integrated && - trans->cfg->device_family == IWL_DEVICE_FAMILY_9000 && - CSR_HW_REV_STEP(trans->hw_rev) == SILICON_A_STEP) - iwl_set_bit(trans, CSR_HOST_CHICKEN, - CSR_HOST_CHICKEN_PM_IDLE_SRC_DIS_SB_PME); + IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev); #if IS_ENABLED(CONFIG_IWLMVM) trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID); - if (cfg == &iwl22000_2ax_cfg_hr) { + if (cfg == &iwlax210_2ax_cfg_so_hr_a0) { + if (trans->hw_rev == CSR_HW_REV_TYPE_TY) { + trans->cfg = &iwlax210_2ax_cfg_ty_gf_a0; + } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == + CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) { + trans->cfg = &iwlax210_2ax_cfg_so_jf_a0; + } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == + CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF)) { + trans->cfg = &iwlax210_2ax_cfg_so_gf_a0; + } + } else if (cfg == &iwl_ax101_cfg_qu_hr) { if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) { - trans->cfg = &iwl22000_2ax_cfg_hr; + trans->cfg = &iwl_ax101_cfg_qu_hr; } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) { trans->cfg = &iwl22000_2ax_cfg_jf; @@ -3602,7 +3559,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, goto out_no_pci; } } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == - CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) { + CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && + (trans->cfg != &iwl22260_2ax_cfg || + trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) { u32 hw_status; hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS); @@ -3677,3 +3636,28 @@ out_no_pci: iwl_trans_free(trans); return ERR_PTR(ret); } + +void iwl_trans_sync_nmi(struct iwl_trans *trans) +{ + unsigned long timeout = jiffies + IWL_TRANS_NMI_TIMEOUT; + + iwl_disable_interrupts(trans); + iwl_force_nmi(trans); + while (time_after(timeout, jiffies)) { + u32 inta_hw = iwl_read32(trans, + CSR_MSIX_HW_INT_CAUSES_AD); + + /* Error detected by uCode */ + if (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR) { + /* Clear causes register */ + iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, + inta_hw & + MSIX_HW_INT_CAUSES_REG_SW_ERR); + break; + } + + mdelay(1); + } + iwl_enable_interrupts(trans); + iwl_trans_fw_error(trans); +} diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index 156ca1b1f621..88530d9f4a54 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -20,7 +20,7 @@ * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -214,7 +214,11 @@ static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans, { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd); - struct iwl_tfh_tb *tb = &tfd->tbs[idx]; + struct iwl_tfh_tb *tb; + + if (WARN_ON(idx >= IWL_TFH_NUM_TBS)) + return -EINVAL; + tb = &tfd->tbs[idx]; /* Each TFD can point to a maximum max_tbs Tx buffers */ if (le16_to_cpu(tfd->num_tbs) >= trans_pcie->max_tbs) { @@ -408,7 +412,7 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans, goto out_err; /* building the A-MSDU might have changed this data, memcpy it now */ - memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE); + memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); return tfd; out_err: @@ -469,7 +473,7 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans, tb_phys = iwl_pcie_get_first_tb_dma(txq, idx); /* The first TB points to bi-directional DMA data */ - memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE); + memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); @@ -834,14 +838,14 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, /* start the TFD with the minimum copy bytes */ tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); - memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size); + memcpy(&txq->first_tb_bufs[idx], out_cmd, tb0_size); iwl_pcie_gen2_set_tb(trans, tfd, iwl_pcie_get_first_tb_dma(txq, idx), tb0_size); /* map first command fragment, if any remains */ if (copy_size > tb0_size) { phys_addr = dma_map_single(trans->dev, - ((u8 *)&out_cmd->hdr) + tb0_size, + (u8 *)out_cmd + tb0_size, copy_size - tb0_size, DMA_TO_DEVICE); if (dma_mapping_error(trans->dev, phys_addr)) { @@ -961,9 +965,7 @@ static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans, cmd_str); ret = -ETIMEDOUT; - iwl_force_nmi(trans); - iwl_trans_fw_error(trans); - + iwl_trans_sync_nmi(trans); goto cancel; } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index ee990a7a5411..9fbd37d23e85 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -1,12 +1,14 @@ /****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation - * - * Portions of this file are derived from the ipw3945 project, as well - * as portions of the ieee80211 subsystem header files. + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -18,12 +20,46 @@ * more details. * * The full GNU General Public License is included in this distribution in the - * file called LICENSE. + * file called COPYING. * * Contact Information: * Intel Linux Wireless * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * + * BSD LICENSE + * + * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 - 2019 Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * *****************************************************************************/ #include #include @@ -959,7 +995,11 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) txq_id++) { bool cmd_queue = (txq_id == trans_pcie->cmd_queue); - slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; + if (cmd_queue) + slots_num = max_t(u32, TFD_CMD_SLOTS, + trans->cfg->min_txq_size); + else + slots_num = TFD_TX_CMD_SLOTS; trans_pcie->txq[txq_id] = &trans_pcie->txq_memory[txq_id]; ret = iwl_pcie_txq_alloc(trans, trans_pcie->txq[txq_id], slots_num, cmd_queue); @@ -1008,7 +1048,11 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) txq_id++) { bool cmd_queue = (txq_id == trans_pcie->cmd_queue); - slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; + if (cmd_queue) + slots_num = max_t(u32, TFD_CMD_SLOTS, + trans->cfg->min_txq_size); + else + slots_num = TFD_TX_CMD_SLOTS; ret = iwl_pcie_txq_init(trans, trans_pcie->txq[txq_id], slots_num, cmd_queue); if (ret) { @@ -1137,6 +1181,15 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, __skb_queue_head_init(&overflow_skbs); skb_queue_splice_init(&txq->overflow_q, &overflow_skbs); + /* + * We are going to transmit from the overflow queue. + * Remember this state so that wait_for_txq_empty will know we + * are adding more packets to the TFD queue. It cannot rely on + * the state of &txq->overflow_q, as we just emptied it, but + * haven't TXed the content yet. + */ + txq->overflow_tx = true; + /* * This is tricky: we are in reclaim path which is non * re-entrant, so noone will try to take the access the @@ -1165,6 +1218,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, iwl_wake_queue(trans, txq); spin_lock_bh(&txq->lock); + txq->overflow_tx = false; } if (txq->read_ptr == txq->write_ptr) { @@ -1906,9 +1960,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, iwl_get_cmd_string(trans, cmd->id)); ret = -ETIMEDOUT; - iwl_force_nmi(trans); - iwl_trans_fw_error(trans); - + iwl_trans_sync_nmi(trans); goto cancel; } @@ -2438,8 +2490,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, } /* building the A-MSDU might have changed this data, so memcpy it now */ - memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr, - IWL_FIRST_TB_SIZE); + memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE); tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr); /* Set up entry for this TFD in Tx byte-count array */ diff --git a/drivers/net/wireless/intersil/orinoco/mic.c b/drivers/net/wireless/intersil/orinoco/mic.c index 709d9ab3e7bc..67b0c05afbdb 100644 --- a/drivers/net/wireless/intersil/orinoco/mic.c +++ b/drivers/net/wireless/intersil/orinoco/mic.c @@ -18,16 +18,16 @@ int orinoco_mic_init(struct orinoco_private *priv) { priv->tx_tfm_mic = crypto_alloc_shash("michael_mic", 0, 0); if (IS_ERR(priv->tx_tfm_mic)) { - printk(KERN_DEBUG "orinoco_mic_init: could not allocate " - "crypto API michael_mic\n"); + printk(KERN_DEBUG "%s: could not allocate " + "crypto API michael_mic\n", __func__); priv->tx_tfm_mic = NULL; return -ENOMEM; } priv->rx_tfm_mic = crypto_alloc_shash("michael_mic", 0, 0); if (IS_ERR(priv->rx_tfm_mic)) { - printk(KERN_DEBUG "orinoco_mic_init: could not allocate " - "crypto API michael_mic\n"); + printk(KERN_DEBUG "%s: could not allocate " + "crypto API michael_mic\n", __func__); priv->rx_tfm_mic = NULL; return -ENOMEM; } @@ -52,7 +52,7 @@ int orinoco_mic(struct crypto_shash *tfm_michael, u8 *key, int err; if (tfm_michael == NULL) { - printk(KERN_WARNING "orinoco_mic: tfm_michael == NULL\n"); + printk(KERN_WARNING "%s: tfm_michael == NULL\n", __func__); return -1; } diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 320edcac4699..0838af04d681 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -1273,10 +1273,12 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw, * probably doesn't really matter. */ if (ieee80211_is_beacon(hdr->frame_control) || - ieee80211_is_probe_resp(hdr->frame_control)) + ieee80211_is_probe_resp(hdr->frame_control)) { + rx_status.boottime_ns = ktime_get_boot_ns(); now = data->abs_bcn_ts; - else + } else { now = mac80211_hwsim_get_tsf_raw(); + } /* Copy skb to all enabled radios that are on the current frequency */ spin_lock(&hwsim_radio_lock); @@ -2799,6 +2801,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, ieee80211_hw_set(hw, TDLS_WIDER_BW); if (rctbl) ieee80211_hw_set(hw, SUPPORTS_RC_TABLE); + ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID); hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS | WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | @@ -3554,7 +3557,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info) goto out_err; } - genlmsg_reply(skb, info); + res = genlmsg_reply(skb, info); break; } diff --git a/drivers/net/wireless/marvell/libertas/debugfs.c b/drivers/net/wireless/marvell/libertas/debugfs.c index c83f44f9ddf1..fe14814af300 100644 --- a/drivers/net/wireless/marvell/libertas/debugfs.c +++ b/drivers/net/wireless/marvell/libertas/debugfs.c @@ -708,8 +708,6 @@ void lbs_debugfs_init_one(struct lbs_private *priv, struct net_device *dev) goto exit; priv->debugfs_dir = debugfs_create_dir(dev->name, lbs_dir); - if (!priv->debugfs_dir) - goto exit; for (i=0; ievents_dir = debugfs_create_dir("subscribed_events", priv->debugfs_dir); - if (!priv->events_dir) - goto exit; for (i=0; iregs_dir = debugfs_create_dir("registers", priv->debugfs_dir); - if (!priv->regs_dir) - goto exit; for (i=0; idev.kobj), &boot_opts_group); + if (ret) + pr_err("failed to create boot_opts_group.\n"); + ret = sysfs_create_group(&(dev->dev.kobj), &mesh_ie_group); + if (ret) + pr_err("failed to create mesh_ie_group.\n"); } static void lbs_persist_config_remove(struct net_device *dev) diff --git a/drivers/net/wireless/marvell/libertas_tf/cmd.c b/drivers/net/wireless/marvell/libertas_tf/cmd.c index 909ac3685010..130f578daafd 100644 --- a/drivers/net/wireless/marvell/libertas_tf/cmd.c +++ b/drivers/net/wireless/marvell/libertas_tf/cmd.c @@ -256,7 +256,7 @@ static void lbtf_submit_command(struct lbtf_private *priv, command, le16_to_cpu(cmd->seqnum), cmdsize); lbtf_deb_hex(LBTF_DEB_CMD, "DNLD_CMD", (void *) cmdnode->cmdbuf, cmdsize); - ret = priv->hw_host_to_card(priv, MVMS_CMD, (u8 *) cmd, cmdsize); + ret = priv->ops->hw_host_to_card(priv, MVMS_CMD, (u8 *)cmd, cmdsize); spin_unlock_irqrestore(&priv->driver_lock, flags); if (ret) { @@ -737,10 +737,9 @@ int lbtf_process_rx_command(struct lbtf_private *priv) respcmd = le16_to_cpu(resp->command); result = le16_to_cpu(resp->result); - if (net_ratelimit()) - pr_info("libertastf: cmd response 0x%04x, seq %d, size %d\n", - respcmd, le16_to_cpu(resp->seqnum), - le16_to_cpu(resp->size)); + lbtf_deb_cmd("libertastf: cmd response 0x%04x, seq %d, size %d\n", + respcmd, le16_to_cpu(resp->seqnum), + le16_to_cpu(resp->size)); if (resp->seqnum != priv->cur_cmd->cmdbuf->seqnum) { spin_unlock_irqrestore(&priv->driver_lock, flags); diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c index 789337ea676a..a4b9ede70705 100644 --- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c +++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c @@ -42,14 +42,14 @@ MODULE_DEVICE_TABLE(usb, if_usb_table); static void if_usb_receive(struct urb *urb); static void if_usb_receive_fwload(struct urb *urb); -static int if_usb_prog_firmware(struct if_usb_card *cardp); +static int if_usb_prog_firmware(struct lbtf_private *priv); static int if_usb_host_to_card(struct lbtf_private *priv, uint8_t type, uint8_t *payload, uint16_t nb); static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload, uint16_t nb, u8 data); static void if_usb_free(struct if_usb_card *cardp); static int if_usb_submit_rx_urb(struct if_usb_card *cardp); -static int if_usb_reset_device(struct if_usb_card *cardp); +static int if_usb_reset_device(struct lbtf_private *priv); /** * if_usb_wrike_bulk_callback - call back to handle URB status @@ -131,6 +131,12 @@ static void if_usb_fw_timeo(struct timer_list *t) lbtf_deb_leave(LBTF_DEB_USB); } +static const struct lbtf_ops if_usb_ops = { + .hw_host_to_card = if_usb_host_to_card, + .hw_prog_firmware = if_usb_prog_firmware, + .hw_reset_device = if_usb_reset_device, +}; + /** * if_usb_probe - sets the configuration values * @@ -216,17 +222,11 @@ static int if_usb_probe(struct usb_interface *intf, goto dealloc; } - priv = lbtf_add_card(cardp, &udev->dev); + cardp->boot2_version = udev->descriptor.bcdDevice; + priv = lbtf_add_card(cardp, &udev->dev, &if_usb_ops); if (!priv) goto dealloc; - cardp->priv = priv; - - priv->hw_host_to_card = if_usb_host_to_card; - priv->hw_prog_firmware = if_usb_prog_firmware; - priv->hw_reset_device = if_usb_reset_device; - cardp->boot2_version = udev->descriptor.bcdDevice; - usb_get_dev(udev); usb_set_intfdata(intf, cardp); @@ -251,7 +251,7 @@ static void if_usb_disconnect(struct usb_interface *intf) lbtf_deb_enter(LBTF_DEB_MAIN); - if_usb_reset_device(cardp); + if_usb_reset_device(priv); if (priv) lbtf_remove_card(priv); @@ -334,8 +334,9 @@ static int if_usb_send_fw_pkt(struct if_usb_card *cardp) return 0; } -static int if_usb_reset_device(struct if_usb_card *cardp) +static int if_usb_reset_device(struct lbtf_private *priv) { + struct if_usb_card *cardp = priv->card; struct cmd_ds_802_11_reset *cmd = cardp->ep_out_buf + 4; int ret; @@ -433,8 +434,6 @@ static int __if_usb_submit_rx_urb(struct if_usb_card *cardp, skb_tail_pointer(skb), MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn, cardp); - cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET; - lbtf_deb_usb2(&cardp->udev->dev, "Pointer for rx_urb %p\n", cardp->rx_urb); ret = usb_submit_urb(cardp->rx_urb, GFP_ATOMIC); @@ -808,14 +807,17 @@ static int check_fwfile_format(const u8 *data, u32 totlen) } -static int if_usb_prog_firmware(struct if_usb_card *cardp) +static int if_usb_prog_firmware(struct lbtf_private *priv) { + struct if_usb_card *cardp = priv->card; int i = 0; static int reset_count = 10; int ret = 0; lbtf_deb_enter(LBTF_DEB_USB); + cardp->priv = priv; + kernel_param_lock(THIS_MODULE); ret = request_firmware(&cardp->fw, lbtf_fw_name, &cardp->udev->dev); if (ret < 0) { @@ -851,7 +853,7 @@ restart: if (cardp->bootcmdresp <= 0) { if (--reset_count >= 0) { - if_usb_reset_device(cardp); + if_usb_reset_device(priv); goto restart; } return -1; @@ -880,7 +882,7 @@ restart: if (!cardp->fwdnldover) { pr_info("failed to load fw, resetting device!\n"); if (--reset_count >= 0) { - if_usb_reset_device(cardp); + if_usb_reset_device(priv); goto restart; } @@ -889,8 +891,6 @@ restart: goto release_fw; } - cardp->priv->fw_ready = 1; - release_fw: release_firmware(cardp->fw); cardp->fw = NULL; diff --git a/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h b/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h index ad77b92d0b41..3ed1fbe28798 100644 --- a/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h +++ b/drivers/net/wireless/marvell/libertas_tf/libertas_tf.h @@ -173,10 +173,19 @@ struct channel_range { struct if_usb_card; +struct lbtf_ops { + /** Hardware access */ + int (*hw_host_to_card)(struct lbtf_private *priv, u8 type, + u8 *payload, u16 nb); + int (*hw_prog_firmware)(struct lbtf_private *priv); + int (*hw_reset_device)(struct lbtf_private *priv); +}; + /** Private structure for the MV device */ struct lbtf_private { void *card; struct ieee80211_hw *hw; + const struct lbtf_ops *ops; /* Command response buffer */ u8 cmd_resp_buff[LBS_UPLD_SIZE]; @@ -188,11 +197,6 @@ struct lbtf_private { struct work_struct cmd_work; struct work_struct tx_work; - /** Hardware access */ - int (*hw_host_to_card) (struct lbtf_private *priv, u8 type, u8 *payload, u16 nb); - int (*hw_prog_firmware) (struct if_usb_card *cardp); - int (*hw_reset_device) (struct if_usb_card *cardp); - /** Wlan adapter data structure*/ /** STATUS variables */ @@ -250,7 +254,6 @@ struct lbtf_private { struct ieee80211_supported_band band; struct lbtf_offset_value offsetvalue; - u8 fw_ready; u8 surpriseremoved; struct sk_buff_head bc_ps_buf; @@ -486,7 +489,8 @@ void lbtf_cmd_response_rx(struct lbtf_private *priv); /* main.c */ struct chan_freq_power *lbtf_get_region_cfp_table(u8 region, int *cfp_no); -struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev); +struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev, + const struct lbtf_ops *ops); int lbtf_remove_card(struct lbtf_private *priv); int lbtf_start_card(struct lbtf_private *priv); int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb); diff --git a/drivers/net/wireless/marvell/libertas_tf/main.c b/drivers/net/wireless/marvell/libertas_tf/main.c index 1d45da187b9b..5799e9886d83 100644 --- a/drivers/net/wireless/marvell/libertas_tf/main.c +++ b/drivers/net/wireless/marvell/libertas_tf/main.c @@ -118,11 +118,6 @@ static void lbtf_cmd_work(struct work_struct *work) priv->cmd_timed_out = 0; spin_unlock_irq(&priv->driver_lock); - if (!priv->fw_ready) { - lbtf_deb_leave_args(LBTF_DEB_CMD, "fw not ready"); - return; - } - /* Execute the next command */ if (!priv->cur_cmd) lbtf_execute_next_command(priv); @@ -130,37 +125,6 @@ static void lbtf_cmd_work(struct work_struct *work) lbtf_deb_leave(LBTF_DEB_CMD); } -/** - * lbtf_setup_firmware: initialize firmware. - * - * @priv A pointer to struct lbtf_private structure - * - * Returns: 0 on success. - */ -static int lbtf_setup_firmware(struct lbtf_private *priv) -{ - int ret = -1; - - lbtf_deb_enter(LBTF_DEB_FW); - /* - * Read priv address from HW - */ - eth_broadcast_addr(priv->current_addr); - ret = lbtf_update_hw_spec(priv); - if (ret) { - ret = -1; - goto done; - } - - lbtf_set_mac_control(priv); - lbtf_set_radio_control(priv); - - ret = 0; -done: - lbtf_deb_leave_args(LBTF_DEB_FW, "ret: %d", ret); - return ret; -} - /** * This function handles the timeout of command sending. * It will re-send the same command again. @@ -281,7 +245,7 @@ static void lbtf_tx_work(struct work_struct *work) BUG_ON(priv->tx_skb); spin_lock_irq(&priv->driver_lock); priv->tx_skb = skb; - err = priv->hw_host_to_card(priv, MVMS_DAT, skb->data, skb->len); + err = priv->ops->hw_host_to_card(priv, MVMS_DAT, skb->data, skb->len); spin_unlock_irq(&priv->driver_lock); if (err) { dev_kfree_skb_any(skb); @@ -294,38 +258,17 @@ static void lbtf_tx_work(struct work_struct *work) static int lbtf_op_start(struct ieee80211_hw *hw) { struct lbtf_private *priv = hw->priv; - void *card = priv->card; - int ret = -1; lbtf_deb_enter(LBTF_DEB_MACOPS); - if (!priv->fw_ready) - /* Upload firmware */ - if (priv->hw_prog_firmware(card)) - goto err_prog_firmware; - - /* poke the firmware */ priv->capability = WLAN_CAPABILITY_SHORT_PREAMBLE; priv->radioon = RADIO_ON; priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON; - ret = lbtf_setup_firmware(priv); - if (ret) - goto err_prog_firmware; - - if ((priv->fwrelease < LBTF_FW_VER_MIN) || - (priv->fwrelease > LBTF_FW_VER_MAX)) { - ret = -1; - goto err_prog_firmware; - } + lbtf_set_mac_control(priv); + lbtf_set_radio_control(priv); - printk(KERN_INFO "libertastf: Marvell WLAN 802.11 thinfirm adapter\n"); lbtf_deb_leave(LBTF_DEB_MACOPS); return 0; - -err_prog_firmware: - priv->hw_reset_device(card); - lbtf_deb_leave_args(LBTF_DEB_MACOPS, "error programming fw; ret=%d", ret); - return ret; } static void lbtf_op_stop(struct ieee80211_hw *hw) @@ -551,11 +494,15 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb) struct ieee80211_rx_status stats; struct rxpd *prxpd; int need_padding; - unsigned int flags; struct ieee80211_hdr *hdr; lbtf_deb_enter(LBTF_DEB_RX); + if (priv->radioon != RADIO_ON) { + lbtf_deb_rx("rx before we turned on the radio"); + goto done; + } + prxpd = (struct rxpd *) skb->data; memset(&stats, 0, sizeof(stats)); @@ -563,7 +510,7 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb) stats.flag |= RX_FLAG_FAILED_FCS_CRC; stats.freq = priv->cur_freq; stats.band = NL80211_BAND_2GHZ; - stats.signal = prxpd->snr; + stats.signal = prxpd->snr - prxpd->nf; priv->noise = prxpd->nf; /* Marvell rate index has a hole at value 4 */ if (prxpd->rx_rate > 4) @@ -572,7 +519,6 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb) skb_pull(skb, sizeof(struct rxpd)); hdr = (struct ieee80211_hdr *)skb->data; - flags = le32_to_cpu(*(__le32 *)(skb->data + 4)); need_padding = ieee80211_is_data_qos(hdr->frame_control); need_padding ^= ieee80211_has_a4(hdr->frame_control); @@ -594,19 +540,21 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb) ieee80211_rx_irqsafe(priv->hw, skb); +done: lbtf_deb_leave(LBTF_DEB_RX); return 0; } EXPORT_SYMBOL_GPL(lbtf_rx); /** - * lbtf_add_card: Add and initialize the card, no fw upload yet. + * lbtf_add_card: Add and initialize the card. * * @card A pointer to card * * Returns: pointer to struct lbtf_priv. */ -struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev) +struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev, + const struct lbtf_ops *ops) { struct ieee80211_hw *hw; struct lbtf_private *priv = NULL; @@ -623,10 +571,13 @@ struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev) priv->hw = hw; priv->card = card; + priv->ops = ops; priv->tx_skb = NULL; + priv->radioon = RADIO_OFF; hw->queues = 1; ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING); + ieee80211_hw_set(hw, SIGNAL_DBM); hw->extra_tx_headroom = sizeof(struct txpd); memcpy(priv->channels, lbtf_channels, sizeof(lbtf_channels)); memcpy(priv->rates, lbtf_rates, sizeof(lbtf_rates)); @@ -646,9 +597,31 @@ struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev) INIT_WORK(&priv->cmd_work, lbtf_cmd_work); INIT_WORK(&priv->tx_work, lbtf_tx_work); + + if (priv->ops->hw_prog_firmware(priv)) { + lbtf_deb_usbd(dmdev, "Error programming the firmware\n"); + priv->ops->hw_reset_device(priv); + goto err_init_adapter; + } + + eth_broadcast_addr(priv->current_addr); + if (lbtf_update_hw_spec(priv)) + goto err_init_adapter; + + if (priv->fwrelease < LBTF_FW_VER_MIN || + priv->fwrelease > LBTF_FW_VER_MAX) { + goto err_init_adapter; + } + + /* The firmware seems to start with the radio enabled. Turn it + * off before an actual mac80211 start callback is invoked. + */ + lbtf_set_radio_control(priv); + if (ieee80211_register_hw(hw)) goto err_init_adapter; + dev_info(dmdev, "libertastf: Marvell WLAN 802.11 thinfirm adapter\n"); goto done; err_init_adapter: @@ -676,7 +649,7 @@ int lbtf_remove_card(struct lbtf_private *priv) ieee80211_unregister_hw(hw); ieee80211_free_hw(hw); - lbtf_deb_leave(LBTF_DEB_MAIN); + lbtf_deb_leave(LBTF_DEB_MAIN); return 0; } EXPORT_SYMBOL_GPL(lbtf_remove_card); diff --git a/drivers/net/wireless/marvell/mwifiex/Kconfig b/drivers/net/wireless/marvell/mwifiex/Kconfig index 279167ddd293..524fd565cb2a 100644 --- a/drivers/net/wireless/marvell/mwifiex/Kconfig +++ b/drivers/net/wireless/marvell/mwifiex/Kconfig @@ -9,7 +9,7 @@ config MWIFIEX mwifiex. config MWIFIEX_SDIO - tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897/SD8997" + tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897/SD8977/SD8997" depends on MWIFIEX && MMC select FW_LOADER select WANT_DEV_COREDUMP diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 1467af22e394..c46f0a54a0c7 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -376,11 +376,20 @@ mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy, struct mwifiex_power_cfg power_cfg; int dbm = MBM_TO_DBM(mbm); - if (type == NL80211_TX_POWER_FIXED) { + switch (type) { + case NL80211_TX_POWER_FIXED: power_cfg.is_power_auto = 0; + power_cfg.is_power_fixed = 1; power_cfg.power_level = dbm; - } else { + break; + case NL80211_TX_POWER_LIMITED: + power_cfg.is_power_auto = 0; + power_cfg.is_power_fixed = 0; + power_cfg.power_level = dbm; + break; + case NL80211_TX_POWER_AUTOMATIC: power_cfg.is_power_auto = 1; + break; } priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); @@ -4310,11 +4319,13 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) wiphy->mgmt_stypes = mwifiex_mgmt_stypes; wiphy->max_remain_on_channel_duration = 5000; wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_AP); + if (ISSUPP_ADHOC_ENABLED(adapter->fw_cap_info)) + wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); + wiphy->bands[NL80211_BAND_2GHZ] = &mwifiex_band_2ghz; if (adapter->config_bands & BAND_A) wiphy->bands[NL80211_BAND_5GHZ] = &mwifiex_band_5ghz; @@ -4374,11 +4385,13 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) wiphy->available_antennas_tx = BIT(adapter->number_of_antenna) - 1; wiphy->available_antennas_rx = BIT(adapter->number_of_antenna) - 1; - wiphy->features |= NL80211_FEATURE_HT_IBSS | - NL80211_FEATURE_INACTIVITY_TIMER | + wiphy->features |= NL80211_FEATURE_INACTIVITY_TIMER | NL80211_FEATURE_LOW_PRIORITY_SCAN | NL80211_FEATURE_NEED_OBSS_SCAN; + if (ISSUPP_ADHOC_ENABLED(adapter->fw_cap_info)) + wiphy->features |= NL80211_FEATURE_HT_IBSS; + if (ISSUPP_RANDOM_MAC(adapter->fw_cap_info)) wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR | NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR | diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c index cbe4493b3266..8ab114cf3467 100644 --- a/drivers/net/wireless/marvell/mwifiex/debugfs.c +++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c @@ -922,9 +922,8 @@ mwifiex_reset_write(struct file *file, } #define MWIFIEX_DFS_ADD_FILE(name) do { \ - if (!debugfs_create_file(#name, 0644, priv->dfs_dev_dir, \ - priv, &mwifiex_dfs_##name##_fops)) \ - return; \ + debugfs_create_file(#name, 0644, priv->dfs_dev_dir, priv, \ + &mwifiex_dfs_##name##_fops); \ } while (0); #define MWIFIEX_DFS_FILE_OPS(name) \ diff --git a/drivers/net/wireless/marvell/mwifiex/ioctl.h b/drivers/net/wireless/marvell/mwifiex/ioctl.h index 48e154e1865d..0dd592ea6e83 100644 --- a/drivers/net/wireless/marvell/mwifiex/ioctl.h +++ b/drivers/net/wireless/marvell/mwifiex/ioctl.h @@ -267,6 +267,7 @@ struct mwifiex_ds_encrypt_key { struct mwifiex_power_cfg { u32 is_power_auto; + u32 is_power_fixed; u32 power_level; }; diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index d49fbd58afa7..a85648342d15 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -489,6 +489,8 @@ static void mwifiex_sdio_coredump(struct device *dev) #define SDIO_DEVICE_ID_MARVELL_8887 (0x9135) /* Device ID for SD8801 */ #define SDIO_DEVICE_ID_MARVELL_8801 (0x9139) +/* Device ID for SD8977 */ +#define SDIO_DEVICE_ID_MARVELL_8977 (0x9145) /* Device ID for SD8997 */ #define SDIO_DEVICE_ID_MARVELL_8997 (0x9141) @@ -507,6 +509,8 @@ static const struct sdio_device_id mwifiex_ids[] = { .driver_data = (unsigned long)&mwifiex_sdio_sd8887}, {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8801), .driver_data = (unsigned long)&mwifiex_sdio_sd8801}, + {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8977), + .driver_data = (unsigned long)&mwifiex_sdio_sd8977}, {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8997), .driver_data = (unsigned long)&mwifiex_sdio_sd8997}, {}, @@ -2726,4 +2730,5 @@ MODULE_FIRMWARE(SD8787_DEFAULT_FW_NAME); MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME); MODULE_FIRMWARE(SD8897_DEFAULT_FW_NAME); MODULE_FIRMWARE(SD8887_DEFAULT_FW_NAME); +MODULE_FIRMWARE(SD8977_DEFAULT_FW_NAME); MODULE_FIRMWARE(SD8997_DEFAULT_FW_NAME); diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h index dccf7fd1aef3..912de2cde8d9 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.h +++ b/drivers/net/wireless/marvell/mwifiex/sdio.h @@ -36,6 +36,7 @@ #define SD8897_DEFAULT_FW_NAME "mrvl/sd8897_uapsta.bin" #define SD8887_DEFAULT_FW_NAME "mrvl/sd8887_uapsta.bin" #define SD8801_DEFAULT_FW_NAME "mrvl/sd8801_uapsta.bin" +#define SD8977_DEFAULT_FW_NAME "mrvl/sd8977_uapsta.bin" #define SD8997_DEFAULT_FW_NAME "mrvl/sd8997_uapsta.bin" #define BLOCK_MODE 1 @@ -371,6 +372,59 @@ static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8897 = { 0x59, 0x5c, 0x5d}, }; +static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8977 = { + .start_rd_port = 0, + .start_wr_port = 0, + .base_0_reg = 0xF8, + .base_1_reg = 0xF9, + .poll_reg = 0x5C, + .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK | + CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK, + .host_int_rsr_reg = 0x4, + .host_int_status_reg = 0x0C, + .host_int_mask_reg = 0x08, + .status_reg_0 = 0xE8, + .status_reg_1 = 0xE9, + .sdio_int_mask = 0xff, + .data_port_mask = 0xffffffff, + .io_port_0_reg = 0xE4, + .io_port_1_reg = 0xE5, + .io_port_2_reg = 0xE6, + .max_mp_regs = 196, + .rd_bitmap_l = 0x10, + .rd_bitmap_u = 0x11, + .rd_bitmap_1l = 0x12, + .rd_bitmap_1u = 0x13, + .wr_bitmap_l = 0x14, + .wr_bitmap_u = 0x15, + .wr_bitmap_1l = 0x16, + .wr_bitmap_1u = 0x17, + .rd_len_p0_l = 0x18, + .rd_len_p0_u = 0x19, + .card_misc_cfg_reg = 0xd8, + .card_cfg_2_1_reg = 0xd9, + .cmd_rd_len_0 = 0xc0, + .cmd_rd_len_1 = 0xc1, + .cmd_rd_len_2 = 0xc2, + .cmd_rd_len_3 = 0xc3, + .cmd_cfg_0 = 0xc4, + .cmd_cfg_1 = 0xc5, + .cmd_cfg_2 = 0xc6, + .cmd_cfg_3 = 0xc7, + .fw_dump_host_ready = 0xcc, + .fw_dump_ctrl = 0xf0, + .fw_dump_start = 0xf1, + .fw_dump_end = 0xf8, + .func1_dump_reg_start = 0x10, + .func1_dump_reg_end = 0x17, + .func1_scratch_reg = 0xe8, + .func1_spec_reg_num = 13, + .func1_spec_reg_table = {0x08, 0x58, 0x5C, 0x5D, + 0x60, 0x61, 0x62, 0x64, + 0x65, 0x66, 0x68, 0x69, + 0x6a}, +}; + static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8997 = { .start_rd_port = 0, .start_wr_port = 0, @@ -532,6 +586,22 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = { .can_ext_scan = true, }; +static const struct mwifiex_sdio_device mwifiex_sdio_sd8977 = { + .firmware = SD8977_DEFAULT_FW_NAME, + .reg = &mwifiex_reg_sd8977, + .max_ports = 32, + .mp_agg_pkt_limit = 16, + .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K, + .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX, + .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX, + .supports_sdio_new_mode = true, + .has_control_mask = false, + .can_dump_fw = true, + .fw_dump_enh = true, + .can_auto_tdls = false, + .can_ext_scan = true, +}; + static const struct mwifiex_sdio_device mwifiex_sdio_sd8997 = { .firmware = SD8997_DEFAULT_FW_NAME, .reg = &mwifiex_reg_sd8997, diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c index b454b5f85503..ebc0e41e5d3b 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c @@ -688,6 +688,9 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv, txp_cfg = (struct host_cmd_ds_txpwr_cfg *) buf; txp_cfg->action = cpu_to_le16(HostCmd_ACT_GEN_SET); if (!power_cfg->is_power_auto) { + u16 dbm_min = power_cfg->is_power_fixed ? + dbm : priv->min_tx_power_level; + txp_cfg->mode = cpu_to_le32(1); pg_tlv = (struct mwifiex_types_power_group *) (buf + sizeof(struct host_cmd_ds_txpwr_cfg)); @@ -702,7 +705,7 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv, pg->last_rate_code = 0x03; pg->modulation_class = MOD_CLASS_HR_DSSS; pg->power_step = 0; - pg->power_min = (s8) dbm; + pg->power_min = (s8) dbm_min; pg->power_max = (s8) dbm; pg++; /* Power group for modulation class OFDM */ @@ -710,7 +713,7 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv, pg->last_rate_code = 0x07; pg->modulation_class = MOD_CLASS_OFDM; pg->power_step = 0; - pg->power_min = (s8) dbm; + pg->power_min = (s8) dbm_min; pg->power_max = (s8) dbm; pg++; /* Power group for modulation class HTBW20 */ @@ -718,7 +721,7 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv, pg->last_rate_code = 0x20; pg->modulation_class = MOD_CLASS_HT; pg->power_step = 0; - pg->power_min = (s8) dbm; + pg->power_min = (s8) dbm_min; pg->power_max = (s8) dbm; pg->ht_bandwidth = HT_BW_20; pg++; @@ -727,7 +730,7 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv, pg->last_rate_code = 0x20; pg->modulation_class = MOD_CLASS_HT; pg->power_step = 0; - pg->power_min = (s8) dbm; + pg->power_min = (s8) dbm_min; pg->power_max = (s8) dbm; pg->ht_bandwidth = HT_BW_40; } diff --git a/drivers/net/wireless/marvell/mwifiex/uap_event.c b/drivers/net/wireless/marvell/mwifiex/uap_event.c index e86217a6b9ca..ca759d9c0253 100644 --- a/drivers/net/wireless/marvell/mwifiex/uap_event.c +++ b/drivers/net/wireless/marvell/mwifiex/uap_event.c @@ -300,7 +300,7 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv) mwifiex_11h_handle_radar_detected(priv, adapter->event_skb); break; case EVENT_BT_COEX_WLAN_PARA_CHANGE: - dev_err(adapter->dev, "EVENT: BT coex wlan param update\n"); + mwifiex_dbg(adapter, EVENT, "event: BT coex wlan param update\n"); mwifiex_bt_coex_wlan_param_update_event(priv, adapter->event_skb); break; diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig index c30d8f5bbf2a..dbe8c70a8f73 100644 --- a/drivers/net/wireless/mediatek/mt76/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/Kconfig @@ -21,3 +21,4 @@ config MT76x02_USB source "drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig" source "drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig" +source "drivers/net/wireless/mediatek/mt76/mt7603/Kconfig" diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile index 1a45cb30f39f..3fd1b64b4aa7 100644 --- a/drivers/net/wireless/mediatek/mt76/Makefile +++ b/drivers/net/wireless/mediatek/mt76/Makefile @@ -4,9 +4,10 @@ obj-$(CONFIG_MT76x02_LIB) += mt76x02-lib.o obj-$(CONFIG_MT76x02_USB) += mt76x02-usb.o mt76-y := \ - mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o tx.o agg-rx.o + mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o \ + tx.o agg-rx.o mcu.o -mt76-usb-y := usb.o usb_trace.o usb_mcu.o +mt76-usb-y := usb.o usb_trace.o CFLAGS_trace.o := -I$(src) CFLAGS_usb_trace.o := -I$(src) @@ -21,3 +22,4 @@ mt76x02-usb-y := mt76x02_usb_mcu.o mt76x02_usb_core.o obj-$(CONFIG_MT76x0_COMMON) += mt76x0/ obj-$(CONFIG_MT76x2_COMMON) += mt76x2/ +obj-$(CONFIG_MT7603E) += mt7603/ diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index e2ba26378575..6eedc0ec7661 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -242,6 +242,30 @@ mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q) iowrite32(q->head, &q->regs->cpu_idx); } +static int +mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid, + struct sk_buff *skb, u32 tx_info) +{ + struct mt76_queue *q = &dev->q_tx[qid]; + struct mt76_queue_buf buf; + dma_addr_t addr; + + addr = dma_map_single(dev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(dev->dev, addr)) + return -ENOMEM; + + buf.addr = addr; + buf.len = skb->len; + + spin_lock_bh(&q->lock); + mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL); + mt76_dma_kick_queue(dev, q); + spin_unlock_bh(&q->lock); + + return 0; +} + int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, struct sk_buff *skb, struct mt76_wcid *wcid, struct ieee80211_sta *sta) @@ -300,7 +324,7 @@ int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, if (q->queued + (n + 1) / 2 >= q->ndesc - 1) goto unmap; - return dev->queue_ops->add_buf(dev, q, buf, n, tx_info, skb, t); + return mt76_dma_add_buf(dev, q, buf, n, tx_info, skb, t); unmap: ret = -ENOMEM; @@ -318,7 +342,7 @@ free: EXPORT_SYMBOL_GPL(mt76_dma_tx_queue_skb); static int -mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, bool napi) +mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) { dma_addr_t addr; void *buf; @@ -392,7 +416,7 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid) mt76_dma_rx_cleanup(dev, q); mt76_dma_sync_idx(dev, q); - mt76_dma_rx_fill(dev, q, false); + mt76_dma_rx_fill(dev, q); } static void @@ -417,10 +441,9 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, static int mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) { + int len, data_len, done = 0; struct sk_buff *skb; unsigned char *data; - int len; - int done = 0; bool more; while (done < budget) { @@ -430,6 +453,19 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) if (!data) break; + if (q->rx_head) + data_len = q->buf_size; + else + data_len = SKB_WITH_OVERHEAD(q->buf_size); + + if (data_len < len + q->buf_offset) { + dev_kfree_skb(q->rx_head); + q->rx_head = NULL; + + skb_free_frag(data); + continue; + } + if (q->rx_head) { mt76_add_fragment(dev, q, data, len, more); continue; @@ -440,12 +476,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) skb_free_frag(data); continue; } - skb_reserve(skb, q->buf_offset); - if (skb->tail + len > skb->end) { - dev_kfree_skb(skb); - continue; - } if (q == &dev->q_rx[MT_RXQ_MCU]) { u32 *rxfce = (u32 *) skb->cb; @@ -463,7 +494,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) dev->drv->rx_skb(dev, q - dev->q_rx, skb); } - mt76_dma_rx_fill(dev, q, true); + mt76_dma_rx_fill(dev, q); return done; } @@ -504,7 +535,7 @@ mt76_dma_init(struct mt76_dev *dev) for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) { netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll, 64); - mt76_dma_rx_fill(dev, &dev->q_rx[i], false); + mt76_dma_rx_fill(dev, &dev->q_rx[i]); skb_queue_head_init(&dev->rx_skb[i]); napi_enable(&dev->napi[i]); } @@ -515,17 +546,16 @@ mt76_dma_init(struct mt76_dev *dev) static const struct mt76_queue_ops mt76_dma_ops = { .init = mt76_dma_init, .alloc = mt76_dma_alloc_queue, - .add_buf = mt76_dma_add_buf, + .tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw, .tx_queue_skb = mt76_dma_tx_queue_skb, .tx_cleanup = mt76_dma_tx_cleanup, .rx_reset = mt76_dma_rx_reset, .kick = mt76_dma_kick_queue, }; -int mt76_dma_attach(struct mt76_dev *dev) +void mt76_dma_attach(struct mt76_dev *dev) { dev->queue_ops = &mt76_dma_ops; - return 0; } EXPORT_SYMBOL_GPL(mt76_dma_attach); diff --git a/drivers/net/wireless/mediatek/mt76/dma.h b/drivers/net/wireless/mediatek/mt76/dma.h index 357cc356342d..e3292df5e9b2 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.h +++ b/drivers/net/wireless/mediatek/mt76/dma.h @@ -54,7 +54,7 @@ enum mt76_mcu_evt_type { EVT_EVENT_DFS_DETECT_RSP, }; -int mt76_dma_attach(struct mt76_dev *dev); +void mt76_dma_attach(struct mt76_dev *dev); void mt76_dma_cleanup(struct mt76_dev *dev); #endif diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c index 530e5593765c..a1529920d877 100644 --- a/drivers/net/wireless/mediatek/mt76/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/eeprom.c @@ -54,22 +54,30 @@ mt76_get_of_eeprom(struct mt76_dev *dev, int len) part = np->name; mtd = get_mtd_device_nm(part); - if (IS_ERR(mtd)) - return PTR_ERR(mtd); + if (IS_ERR(mtd)) { + ret = PTR_ERR(mtd); + goto out_put_node; + } - if (size <= sizeof(*list)) - return -EINVAL; + if (size <= sizeof(*list)) { + ret = -EINVAL; + goto out_put_node; + } offset = be32_to_cpup(list); ret = mtd_read(mtd, offset, len, &retlen, dev->eeprom.data); put_mtd_device(mtd); if (ret) - return ret; + goto out_put_node; - if (retlen < len) - return -EINVAL; + if (retlen < len) { + ret = -EINVAL; + goto out_put_node; + } - return 0; +out_put_node: + of_node_put(np); + return ret; #else return -ENOENT; #endif diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 7b926dfa6b97..a033745adb2f 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -124,7 +124,7 @@ static void mt76_init_stream_cap(struct mt76_dev *dev, bool vht) { struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap; - int i, nstream = __sw_hweight8(dev->antenna_mask); + int i, nstream = hweight8(dev->antenna_mask); struct ieee80211_sta_vht_cap *vht_cap; u16 mcs_map = 0; @@ -269,7 +269,9 @@ mt76_check_sband(struct mt76_dev *dev, int band) } struct mt76_dev * -mt76_alloc_device(unsigned int size, const struct ieee80211_ops *ops) +mt76_alloc_device(struct device *pdev, unsigned int size, + const struct ieee80211_ops *ops, + const struct mt76_driver_ops *drv_ops) { struct ieee80211_hw *hw; struct mt76_dev *dev; @@ -280,6 +282,9 @@ mt76_alloc_device(unsigned int size, const struct ieee80211_ops *ops) dev = hw->priv; dev->hw = hw; + dev->dev = pdev; + dev->drv = drv_ops; + spin_lock_init(&dev->rx_lock); spin_lock_init(&dev->lock); spin_lock_init(&dev->cc_lock); @@ -328,6 +333,7 @@ int mt76_register_device(struct mt76_dev *dev, bool vht, ieee80211_hw_set(hw, MFP_CAPABLE); ieee80211_hw_set(hw, AP_LINK_PS); ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); + ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR); wiphy->flags |= WIPHY_FLAG_IBSS_RSN; @@ -547,7 +553,7 @@ mt76_check_ccmp_pn(struct sk_buff *skb) } static void -mt76_check_ps(struct mt76_dev *dev, struct sk_buff *skb) +mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb) { struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; @@ -566,6 +572,11 @@ mt76_check_ps(struct mt76_dev *dev, struct sk_buff *skb) sta = container_of((void *) wcid, struct ieee80211_sta, drv_priv); + if (status->signal <= 0) + ewma_signal_add(&wcid->rssi, -status->signal); + + wcid->inactive_count = 0; + if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags)) return; @@ -625,7 +636,7 @@ void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q, __skb_queue_head_init(&frames); while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) { - mt76_check_ps(dev, skb); + mt76_check_sta(dev, skb); mt76_rx_aggr_reorder(skb, &frames); } @@ -659,6 +670,7 @@ mt76_sta_add(struct mt76_dev *dev, struct ieee80211_vif *vif, mt76_txq_init(dev, sta->txq[i]); } + ewma_signal_init(&wcid->rssi); rcu_assign_pointer(dev->wcid[wcid->idx], wcid); out: @@ -702,6 +714,11 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, new_state == IEEE80211_STA_NONE) return mt76_sta_add(dev, vif, sta); + if (old_state == IEEE80211_STA_AUTH && + new_state == IEEE80211_STA_ASSOC && + dev->drv->sta_assoc) + dev->drv->sta_assoc(dev, vif, sta); + if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_NOTEXIST) mt76_sta_remove(dev, vif, sta); @@ -709,3 +726,60 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, return 0; } EXPORT_SYMBOL_GPL(mt76_sta_state); + +int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + int *dbm) +{ + struct mt76_dev *dev = hw->priv; + int n_chains = hweight8(dev->antenna_mask); + + *dbm = dev->txpower_cur / 2; + + /* convert from per-chain power to combined + * output on 2x2 devices + */ + if (n_chains > 1) + *dbm += 3; + + return 0; +} +EXPORT_SYMBOL_GPL(mt76_get_txpower); + +static void +__mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) +{ + if (vif->csa_active && ieee80211_csa_is_complete(vif)) + ieee80211_csa_finish(vif); +} + +void mt76_csa_finish(struct mt76_dev *dev) +{ + if (!dev->csa_complete) + return; + + ieee80211_iterate_active_interfaces_atomic(dev->hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + __mt76_csa_finish, dev); + + dev->csa_complete = 0; +} +EXPORT_SYMBOL_GPL(mt76_csa_finish); + +static void +__mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif) +{ + struct mt76_dev *dev = priv; + + if (!vif->csa_active) + return; + + dev->csa_complete |= ieee80211_csa_is_complete(vif); +} + +void mt76_csa_check(struct mt76_dev *dev) +{ + ieee80211_iterate_active_interfaces_atomic(dev->hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + __mt76_csa_check, dev); +} +EXPORT_SYMBOL_GPL(mt76_csa_check); diff --git a/drivers/net/wireless/mediatek/mt76/mcu.c b/drivers/net/wireless/mediatek/mt76/mcu.c new file mode 100644 index 000000000000..dbb57b593a87 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mcu.c @@ -0,0 +1,60 @@ +/* + * Copyright (C) 2019 Lorenzo Bianconi + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "mt76.h" + +struct sk_buff * +mt76_mcu_msg_alloc(const void *data, int head_len, + int data_len, int tail_len) +{ + struct sk_buff *skb; + + skb = alloc_skb(head_len + data_len + tail_len, + GFP_KERNEL); + if (!skb) + return NULL; + + skb_reserve(skb, head_len); + if (data && data_len) + skb_put_data(skb, data, data_len); + + return skb; +} +EXPORT_SYMBOL_GPL(mt76_mcu_msg_alloc); + +/* mmio */ +struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev, + unsigned long expires) +{ + unsigned long timeout; + + if (!time_is_after_jiffies(expires)) + return NULL; + + timeout = expires - jiffies; + wait_event_timeout(dev->mmio.mcu.wait, + !skb_queue_empty(&dev->mmio.mcu.res_q), + timeout); + return skb_dequeue(&dev->mmio.mcu.res_q); +} +EXPORT_SYMBOL_GPL(mt76_mcu_get_response); + +void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb) +{ + skb_queue_tail(&dev->mmio.mcu.res_q, skb); + wake_up(&dev->mmio.mcu.wait); +} +EXPORT_SYMBOL_GPL(mt76_mcu_rx_event); diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 5cd508a68609..5dfb0601f101 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -23,6 +23,7 @@ #include #include #include +#include #include #include "util.h" @@ -86,6 +87,7 @@ struct mt76u_buf { struct mt76_dev *dev; struct urb *urb; size_t len; + void *buf; bool done; }; @@ -156,6 +158,9 @@ struct mt76_queue_ops { struct sk_buff *skb, struct mt76_wcid *wcid, struct ieee80211_sta *sta); + int (*tx_queue_skb_raw)(struct mt76_dev *dev, enum mt76_txq_id qid, + struct sk_buff *skb, u32 tx_info); + void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush, int *len, u32 *info, bool *more); @@ -174,6 +179,8 @@ enum mt76_wcid_flags { #define MT76_N_WCIDS 128 +DECLARE_EWMA(signal, 10, 8); + struct mt76_wcid { struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS]; @@ -181,6 +188,9 @@ struct mt76_wcid { unsigned long flags; + struct ewma_signal rssi; + int inactive_count; + u8 idx; u8 hw_key_idx; @@ -239,7 +249,9 @@ struct mt76_rx_tid { #define MT_TX_CB_TXS_FAILED BIT(2) #define MT_PACKET_ID_MASK GENMASK(7, 0) -#define MT_PACKET_ID_NO_ACK MT_PACKET_ID_MASK +#define MT_PACKET_ID_NO_ACK 0 +#define MT_PACKET_ID_NO_SKB 1 +#define MT_PACKET_ID_FIRST 2 #define MT_TX_STATUS_SKB_TIMEOUT HZ @@ -292,6 +304,9 @@ struct mt76_driver_ops { int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); + void (*sta_assoc)(struct mt76_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); + void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); }; @@ -368,11 +383,11 @@ struct mt76_usb { u16 out_max_packet; u8 in_ep[__MT_EP_IN_MAX]; u16 in_max_packet; + bool sg_en; struct mt76u_mcu { struct mutex mutex; - struct completion cmpl; - struct mt76u_buf res; + u8 *data; u32 msg_seq; /* multiple reads */ @@ -421,6 +436,7 @@ struct mt76_dev { struct mt76_queue q_tx[__MT_TXQ_MAX]; struct mt76_queue q_rx[__MT_RXQ_MAX]; const struct mt76_queue_ops *queue_ops; + int tx_dma_idx[4]; wait_queue_head_t tx_wait; struct sk_buff_head status_list; @@ -454,6 +470,8 @@ struct mt76_dev { bool led_al; u8 led_pin; + u8 csa_complete; + u32 rxfilter; union { @@ -488,7 +506,7 @@ struct mt76_rx_status { u8 rate_idx; u8 nss; u8 band; - u8 signal; + s8 signal; u8 chains; s8 chain_signal[IEEE80211_MAX_CHAINS]; }; @@ -551,7 +569,7 @@ static inline u16 mt76_rev(struct mt76_dev *dev) #define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76)) #define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__) -#define mt76_queue_add_buf(dev, ...) (dev)->mt76.queue_ops->add_buf(&((dev)->mt76), __VA_ARGS__) +#define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__) #define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__) #define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__) #define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__) @@ -571,8 +589,9 @@ mt76_channel_state(struct mt76_dev *dev, struct ieee80211_channel *c) return &msband->chan[idx]; } -struct mt76_dev *mt76_alloc_device(unsigned int size, - const struct ieee80211_ops *ops); +struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size, + const struct ieee80211_ops *ops, + const struct mt76_driver_ops *drv_ops); int mt76_register_device(struct mt76_dev *dev, bool vht, struct ieee80211_rate *rates, int n_rates); void mt76_unregister_device(struct mt76_dev *dev); @@ -677,6 +696,14 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb); +int mt76_get_min_avg_rssi(struct mt76_dev *dev); + +int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + int *dbm); + +void mt76_csa_check(struct mt76_dev *dev); +void mt76_csa_finish(struct mt76_dev *dev); + /* internal */ void mt76_tx_free(struct mt76_dev *dev); struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev); @@ -703,14 +730,21 @@ static inline u8 q2ep(u8 qid) return qid + 1; } -static inline bool mt76u_check_sg(struct mt76_dev *dev) +static inline int +mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len, + int timeout) { struct usb_interface *intf = to_usb_interface(dev->dev); struct usb_device *udev = interface_to_usbdev(intf); + struct mt76_usb *usb = &dev->usb; + unsigned int pipe; + + if (actual_len) + pipe = usb_rcvbulkpipe(udev, usb->in_ep[MT_EP_IN_CMD_RESP]); + else + pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]); - return (udev->bus->sg_tablesize > 0 && - (udev->bus->no_sg_constraint || - udev->speed == USB_SPEED_WIRELESS)); + return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout); } int mt76u_vendor_request(struct mt76_dev *dev, u8 req, @@ -719,21 +753,17 @@ int mt76u_vendor_request(struct mt76_dev *dev, u8 req, void mt76u_single_wr(struct mt76_dev *dev, const u8 req, const u16 offset, const u32 val); int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf); -void mt76u_deinit(struct mt76_dev *dev); -int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf, - int nsgs, int len, int sglen, gfp_t gfp); -void mt76u_buf_free(struct mt76u_buf *buf); -int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index, - struct mt76u_buf *buf, gfp_t gfp, - usb_complete_t complete_fn, void *context); int mt76u_submit_rx_buffers(struct mt76_dev *dev); int mt76u_alloc_queues(struct mt76_dev *dev); void mt76u_stop_queues(struct mt76_dev *dev); void mt76u_stop_stat_wk(struct mt76_dev *dev); void mt76u_queues_deinit(struct mt76_dev *dev); -void mt76u_mcu_complete_urb(struct urb *urb); -int mt76u_mcu_init_rx(struct mt76_dev *dev); -void mt76u_mcu_deinit(struct mt76_dev *dev); +struct sk_buff * +mt76_mcu_msg_alloc(const void *data, int head_len, + int data_len, int tail_len); +void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb); +struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev, + unsigned long expires); #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7603/Kconfig new file mode 100644 index 000000000000..087945c3d8f3 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7603/Kconfig @@ -0,0 +1,9 @@ +config MT7603E + tristate "MediaTek MT7603E (PCIe) and MT76x8 WLAN support" + select MT76_CORE + depends on MAC80211 + depends on PCI + help + This adds support for MT7603E wireless PCIe devices and the WLAN core on + MT7628/MT7688 SoC devices + diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/Makefile b/drivers/net/wireless/mediatek/mt76/mt7603/Makefile new file mode 100644 index 000000000000..d95a30421c62 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7603/Makefile @@ -0,0 +1,6 @@ +obj-$(CONFIG_MT7603E) += mt7603e.o + +mt7603e-y := \ + pci.o soc.o main.o init.o mcu.o \ + core.o dma.o mac.o eeprom.o \ + beacon.o debugfs.o diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c new file mode 100644 index 000000000000..afcd86f735b4 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: ISC */ + +#include "mt7603.h" + +struct beacon_bc_data { + struct mt7603_dev *dev; + struct sk_buff_head q; + struct sk_buff *tail[MT7603_MAX_INTERFACES]; + int count[MT7603_MAX_INTERFACES]; +}; + +static void +mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) +{ + struct mt7603_dev *dev = (struct mt7603_dev *)priv; + struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv; + struct sk_buff *skb = NULL; + + if (!(dev->beacon_mask & BIT(mvif->idx))) + return; + + skb = ieee80211_beacon_get(mt76_hw(dev), vif); + if (!skb) + return; + + mt76_dma_tx_queue_skb(&dev->mt76, &dev->mt76.q_tx[MT_TXQ_BEACON], skb, + &mvif->sta.wcid, NULL); + + spin_lock_bh(&dev->ps_lock); + mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY | + FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, mvif->sta.wcid.idx) | + FIELD_PREP(MT_DMA_FQCR0_TARGET_QID, + dev->mt76.q_tx[MT_TXQ_CAB].hw_idx) | + FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, 3) | + FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, 8)); + + if (!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 0, 5000)) + dev->beacon_check = MT7603_WATCHDOG_TIMEOUT; + + spin_unlock_bh(&dev->ps_lock); +} + +static void +mt7603_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif) +{ + struct beacon_bc_data *data = priv; + struct mt7603_dev *dev = data->dev; + struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv; + struct ieee80211_tx_info *info; + struct sk_buff *skb; + + if (!(dev->beacon_mask & BIT(mvif->idx))) + return; + + skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif); + if (!skb) + return; + + info = IEEE80211_SKB_CB(skb); + info->control.vif = vif; + info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; + mt76_skb_set_moredata(skb, true); + __skb_queue_tail(&data->q, skb); + data->tail[mvif->idx] = skb; + data->count[mvif->idx]++; +} + +void mt7603_pre_tbtt_tasklet(unsigned long arg) +{ + struct mt7603_dev *dev = (struct mt7603_dev *)arg; + struct mt76_queue *q; + struct beacon_bc_data data = {}; + struct sk_buff *skb; + int i, nframes; + + data.dev = dev; + __skb_queue_head_init(&data.q); + + q = &dev->mt76.q_tx[MT_TXQ_BEACON]; + spin_lock_bh(&q->lock); + ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7603_update_beacon_iter, dev); + mt76_queue_kick(dev, q); + spin_unlock_bh(&q->lock); + + /* Flush all previous CAB queue packets */ + mt76_wr(dev, MT_WF_ARB_CAB_FLUSH, GENMASK(30, 16) | BIT(0)); + + mt76_queue_tx_cleanup(dev, MT_TXQ_CAB, false); + + mt76_csa_check(&dev->mt76); + if (dev->mt76.csa_complete) + goto out; + + q = &dev->mt76.q_tx[MT_TXQ_CAB]; + do { + nframes = skb_queue_len(&data.q); + ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7603_add_buffered_bc, &data); + } while (nframes != skb_queue_len(&data.q) && + skb_queue_len(&data.q) < 8); + + if (skb_queue_empty(&data.q)) + goto out; + + for (i = 0; i < ARRAY_SIZE(data.tail); i++) { + if (!data.tail[i]) + continue; + + mt76_skb_set_moredata(data.tail[i], false); + } + + spin_lock_bh(&q->lock); + while ((skb = __skb_dequeue(&data.q)) != NULL) { + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_vif *vif = info->control.vif; + struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv; + + mt76_dma_tx_queue_skb(&dev->mt76, q, skb, &mvif->sta.wcid, + NULL); + } + mt76_queue_kick(dev, q); + spin_unlock_bh(&q->lock); + + for (i = 0; i < ARRAY_SIZE(data.count); i++) + mt76_wr(dev, MT_WF_ARB_CAB_COUNT_B0_REG(i), + data.count[i] << MT_WF_ARB_CAB_COUNT_B0_SHIFT(i)); + + mt76_wr(dev, MT_WF_ARB_CAB_START, + MT_WF_ARB_CAB_START_BSSn(0) | + (MT_WF_ARB_CAB_START_BSS0n(1) * + ((1 << (MT7603_MAX_INTERFACES - 1)) - 1))); + +out: + mt76_queue_tx_cleanup(dev, MT_TXQ_BEACON, false); + if (dev->mt76.q_tx[MT_TXQ_BEACON].queued > + __sw_hweight8(dev->beacon_mask)) + dev->beacon_check++; +} + +void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval) +{ + u32 pre_tbtt = MT7603_PRE_TBTT_TIME / 64; + + if (idx >= 0) { + if (intval) + dev->beacon_mask |= BIT(idx); + else + dev->beacon_mask &= ~BIT(idx); + } + + if (!dev->beacon_mask || (!intval && idx < 0)) { + mt7603_irq_disable(dev, MT_INT_MAC_IRQ3); + mt76_clear(dev, MT_ARB_SCR, MT_ARB_SCR_BCNQ_OPMODE_MASK); + mt76_wr(dev, MT_HW_INT_MASK(3), 0); + return; + } + + dev->beacon_int = intval; + mt76_wr(dev, MT_TBTT, + FIELD_PREP(MT_TBTT_PERIOD, intval) | MT_TBTT_CAL_ENABLE); + + mt76_wr(dev, MT_TBTT_TIMER_CFG, 0x99); /* start timer */ + + mt76_rmw_field(dev, MT_ARB_SCR, MT_ARB_SCR_BCNQ_OPMODE_MASK, + MT_BCNQ_OPMODE_AP); + mt76_clear(dev, MT_ARB_SCR, MT_ARB_SCR_TBTT_BCN_PRIO); + mt76_set(dev, MT_ARB_SCR, MT_ARB_SCR_TBTT_BCAST_PRIO); + + mt76_wr(dev, MT_PRE_TBTT, pre_tbtt); + + mt76_set(dev, MT_HW_INT_MASK(3), + MT_HW_INT3_PRE_TBTT0 | MT_HW_INT3_TBTT0); + + mt76_set(dev, MT_WF_ARB_BCN_START, + MT_WF_ARB_BCN_START_BSSn(0) | + ((dev->beacon_mask >> 1) * MT_WF_ARB_BCN_START_BSS0n(1))); + mt7603_irq_enable(dev, MT_INT_MAC_IRQ3); + + if (dev->beacon_mask & ~BIT(0)) + mt76_set(dev, MT_LPON_SBTOR(0), MT_LPON_SBTOR_SUB_BSS_EN); + else + mt76_clear(dev, MT_LPON_SBTOR(0), MT_LPON_SBTOR_SUB_BSS_EN); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/core.c b/drivers/net/wireless/mediatek/mt76/mt7603/core.c new file mode 100644 index 000000000000..1086dcd376a0 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7603/core.c @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: ISC */ + +#include "mt7603.h" + +void mt7603_set_irq_mask(struct mt7603_dev *dev, u32 clear, u32 set) +{ + unsigned long flags; + + spin_lock_irqsave(&dev->mt76.mmio.irq_lock, flags); + dev->mt76.mmio.irqmask &= ~clear; + dev->mt76.mmio.irqmask |= set; + mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask); + spin_unlock_irqrestore(&dev->mt76.mmio.irq_lock, flags); +} + +void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q) +{ + struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); + + mt7603_irq_enable(dev, MT_INT_RX_DONE(q)); +} + +irqreturn_t mt7603_irq_handler(int irq, void *dev_instance) +{ + struct mt7603_dev *dev = dev_instance; + u32 intr; + + intr = mt76_rr(dev, MT_INT_SOURCE_CSR); + mt76_wr(dev, MT_INT_SOURCE_CSR, intr); + + if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state)) + return IRQ_NONE; + + intr &= dev->mt76.mmio.irqmask; + + if (intr & MT_INT_MAC_IRQ3) { + u32 hwintr = mt76_rr(dev, MT_HW_INT_STATUS(3)); + + mt76_wr(dev, MT_HW_INT_STATUS(3), hwintr); + if (hwintr & MT_HW_INT3_PRE_TBTT0) + tasklet_schedule(&dev->pre_tbtt_tasklet); + + if ((hwintr & MT_HW_INT3_TBTT0) && dev->mt76.csa_complete) + mt76_csa_finish(&dev->mt76); + } + + if (intr & MT_INT_TX_DONE_ALL) { + mt7603_irq_disable(dev, MT_INT_TX_DONE_ALL); + tasklet_schedule(&dev->tx_tasklet); + } + + if (intr & MT_INT_RX_DONE(0)) { + mt7603_irq_disable(dev, MT_INT_RX_DONE(0)); + napi_schedule(&dev->mt76.napi[0]); + } + + if (intr & MT_INT_RX_DONE(1)) { + mt7603_irq_disable(dev, MT_INT_RX_DONE(1)); + napi_schedule(&dev->mt76.napi[1]); + } + + return IRQ_HANDLED; +} + +u32 mt7603_reg_map(struct mt7603_dev *dev, u32 addr) +{ + u32 base = addr & GENMASK(31, 19); + u32 offset = addr & GENMASK(18, 0); + + dev->bus_ops->wr(&dev->mt76, MT_MCU_PCIE_REMAP_2, base); + + return MT_PCIE_REMAP_BASE_2 + offset; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c new file mode 100644 index 000000000000..f8b3b6ab6297 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: ISC */ + +#include "mt7603.h" + +static int +mt7603_reset_read(struct seq_file *s, void *data) +{ + struct mt7603_dev *dev = dev_get_drvdata(s->private); + static const char * const reset_cause_str[] = { + [RESET_CAUSE_TX_HANG] = "TX hang", + [RESET_CAUSE_TX_BUSY] = "TX DMA busy stuck", + [RESET_CAUSE_RX_BUSY] = "RX DMA busy stuck", + [RESET_CAUSE_RX_PSE_BUSY] = "RX PSE busy stuck", + [RESET_CAUSE_BEACON_STUCK] = "Beacon stuck", + [RESET_CAUSE_MCU_HANG] = "MCU hang", + [RESET_CAUSE_RESET_FAILED] = "PSE reset failed", + }; + int i; + + for (i = 0; i < ARRAY_SIZE(reset_cause_str); i++) { + if (!reset_cause_str[i]) + continue; + + seq_printf(s, "%20s: %u\n", reset_cause_str[i], + dev->reset_cause[i]); + } + + return 0; +} + +static int +mt7603_radio_read(struct seq_file *s, void *data) +{ + struct mt7603_dev *dev = dev_get_drvdata(s->private); + + seq_printf(s, "Sensitivity: %d\n", dev->sensitivity); + seq_printf(s, "False CCA: ofdm=%d cck=%d\n", + dev->false_cca_ofdm, dev->false_cca_cck); + + return 0; +} + +void mt7603_init_debugfs(struct mt7603_dev *dev) +{ + struct dentry *dir; + + dir = mt76_register_debugfs(&dev->mt76); + if (!dir) + return; + + debugfs_create_u32("reset_test", 0600, dir, &dev->reset_test); + debugfs_create_devm_seqfile(dev->mt76.dev, "reset", dir, + mt7603_reset_read); + debugfs_create_devm_seqfile(dev->mt76.dev, "radio", dir, + mt7603_radio_read); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c new file mode 100644 index 000000000000..d69e82c66ab2 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c @@ -0,0 +1,215 @@ +/* SPDX-License-Identifier: ISC */ + +#include "mt7603.h" +#include "mac.h" +#include "../dma.h" + +static int +mt7603_init_tx_queue(struct mt7603_dev *dev, struct mt76_queue *q, + int idx, int n_desc) +{ + int ret; + + q->hw_idx = idx; + q->regs = dev->mt76.mmio.regs + MT_TX_RING_BASE + idx * MT_RING_SIZE; + q->ndesc = n_desc; + + ret = mt76_queue_alloc(dev, q); + if (ret) + return ret; + + mt7603_irq_enable(dev, MT_INT_TX_DONE(idx)); + + return 0; +} + +static void +mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb) +{ + __le32 *txd = (__le32 *)skb->data; + struct mt7603_sta *msta; + struct mt76_wcid *wcid; + int idx; + u32 val; + + if (skb->len < sizeof(MT_TXD_SIZE) + sizeof(struct ieee80211_hdr)) + goto free; + + val = le32_to_cpu(txd[1]); + idx = FIELD_GET(MT_TXD1_WLAN_IDX, val); + skb->priority = FIELD_GET(MT_TXD1_TID, val); + + if (idx >= MT7603_WTBL_STA - 1) + goto free; + + wcid = rcu_dereference(dev->mt76.wcid[idx]); + if (!wcid) + goto free; + + msta = container_of(wcid, struct mt7603_sta, wcid); + val = le32_to_cpu(txd[0]); + skb_set_queue_mapping(skb, FIELD_GET(MT_TXD0_Q_IDX, val)); + + spin_lock_bh(&dev->ps_lock); + __skb_queue_tail(&msta->psq, skb); + if (skb_queue_len(&msta->psq) >= 64) { + skb = __skb_dequeue(&msta->psq); + dev_kfree_skb(skb); + } + spin_unlock_bh(&dev->ps_lock); + return; + +free: + dev_kfree_skb(skb); +} + +void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, + struct sk_buff *skb) +{ + struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); + __le32 *rxd = (__le32 *)skb->data; + __le32 *end = (__le32 *)&skb->data[skb->len]; + enum rx_pkt_type type; + + type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0])); + + if (q == MT_RXQ_MCU) { + if (type == PKT_TYPE_RX_EVENT) + mt76_mcu_rx_event(&dev->mt76, skb); + else + mt7603_rx_loopback_skb(dev, skb); + return; + } + + switch (type) { + case PKT_TYPE_TXS: + for (rxd++; rxd + 5 <= end; rxd += 5) + mt7603_mac_add_txs(dev, rxd); + dev_kfree_skb(skb); + break; + case PKT_TYPE_RX_EVENT: + mt76_mcu_rx_event(&dev->mt76, skb); + return; + case PKT_TYPE_NORMAL: + if (mt7603_mac_fill_rx(dev, skb) == 0) { + mt76_rx(&dev->mt76, q, skb); + return; + } + /* fall through */ + default: + dev_kfree_skb(skb); + break; + } +} + +static int +mt7603_init_rx_queue(struct mt7603_dev *dev, struct mt76_queue *q, + int idx, int n_desc, int bufsize) +{ + int ret; + + q->regs = dev->mt76.mmio.regs + MT_RX_RING_BASE + idx * MT_RING_SIZE; + q->ndesc = n_desc; + q->buf_size = bufsize; + + ret = mt76_queue_alloc(dev, q); + if (ret) + return ret; + + mt7603_irq_enable(dev, MT_INT_RX_DONE(idx)); + + return 0; +} + +static void +mt7603_tx_tasklet(unsigned long data) +{ + struct mt7603_dev *dev = (struct mt7603_dev *)data; + int i; + + dev->tx_dma_check = 0; + for (i = MT_TXQ_MCU; i >= 0; i--) + mt76_queue_tx_cleanup(dev, i, false); + + mt7603_irq_enable(dev, MT_INT_TX_DONE_ALL); +} + +int mt7603_dma_init(struct mt7603_dev *dev) +{ + static const u8 wmm_queue_map[] = { + [IEEE80211_AC_BK] = 0, + [IEEE80211_AC_BE] = 1, + [IEEE80211_AC_VI] = 2, + [IEEE80211_AC_VO] = 3, + }; + int ret; + int i; + + mt76_dma_attach(&dev->mt76); + + init_waitqueue_head(&dev->mt76.mmio.mcu.wait); + skb_queue_head_init(&dev->mt76.mmio.mcu.res_q); + + tasklet_init(&dev->tx_tasklet, mt7603_tx_tasklet, (unsigned long)dev); + + mt76_clear(dev, MT_WPDMA_GLO_CFG, + MT_WPDMA_GLO_CFG_TX_DMA_EN | + MT_WPDMA_GLO_CFG_RX_DMA_EN | + MT_WPDMA_GLO_CFG_DMA_BURST_SIZE | + MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); + + mt76_wr(dev, MT_WPDMA_RST_IDX, ~0); + mt7603_pse_client_reset(dev); + + for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) { + ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[i], + wmm_queue_map[i], + MT_TX_RING_SIZE); + if (ret) + return ret; + } + + ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD], + MT_TX_HW_QUEUE_MGMT, MT_TX_RING_SIZE); + if (ret) + return ret; + + ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU], + MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE); + if (ret) + return ret; + + ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_BEACON], + MT_TX_HW_QUEUE_BCN, MT_MCU_RING_SIZE); + if (ret) + return ret; + + ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_CAB], + MT_TX_HW_QUEUE_BMC, MT_MCU_RING_SIZE); + if (ret) + return ret; + + ret = mt7603_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1, + MT_MCU_RING_SIZE, MT_RX_BUF_SIZE); + if (ret) + return ret; + + ret = mt7603_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 0, + MT7603_RX_RING_SIZE, MT_RX_BUF_SIZE); + if (ret) + return ret; + + mt76_wr(dev, MT_DELAY_INT_CFG, 0); + return mt76_init_queues(dev); +} + +void mt7603_dma_cleanup(struct mt7603_dev *dev) +{ + mt76_clear(dev, MT_WPDMA_GLO_CFG, + MT_WPDMA_GLO_CFG_TX_DMA_EN | + MT_WPDMA_GLO_CFG_RX_DMA_EN | + MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); + + tasklet_kill(&dev->tx_tasklet); + mt76_dma_cleanup(&dev->mt76); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c new file mode 100644 index 000000000000..8c120e4461b0 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c @@ -0,0 +1,168 @@ +/* SPDX-License-Identifier: ISC */ + +#include "mt7603.h" +#include "eeprom.h" + +static int +mt7603_efuse_read(struct mt7603_dev *dev, u32 base, u16 addr, u8 *data) +{ + u32 val; + int i; + + val = mt76_rr(dev, base + MT_EFUSE_CTRL); + val &= ~(MT_EFUSE_CTRL_AIN | + MT_EFUSE_CTRL_MODE); + val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf); + val |= MT_EFUSE_CTRL_KICK; + mt76_wr(dev, base + MT_EFUSE_CTRL, val); + + if (!mt76_poll(dev, base + MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000)) + return -ETIMEDOUT; + + udelay(2); + + val = mt76_rr(dev, base + MT_EFUSE_CTRL); + if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT || + WARN_ON_ONCE(!(val & MT_EFUSE_CTRL_VALID))) { + memset(data, 0xff, 16); + return 0; + } + + for (i = 0; i < 4; i++) { + val = mt76_rr(dev, base + MT_EFUSE_RDATA(i)); + put_unaligned_le32(val, data + 4 * i); + } + + return 0; +} + +static int +mt7603_efuse_init(struct mt7603_dev *dev) +{ + u32 base = mt7603_reg_map(dev, MT_EFUSE_BASE); + int len = MT7603_EEPROM_SIZE; + void *buf; + int ret, i; + + if (mt76_rr(dev, base + MT_EFUSE_BASE_CTRL) & MT_EFUSE_BASE_CTRL_EMPTY) + return 0; + + dev->mt76.otp.data = devm_kzalloc(dev->mt76.dev, len, GFP_KERNEL); + dev->mt76.otp.size = len; + if (!dev->mt76.otp.data) + return -ENOMEM; + + buf = dev->mt76.otp.data; + for (i = 0; i + 16 <= len; i += 16) { + ret = mt7603_efuse_read(dev, base, i, buf + i); + if (ret) + return ret; + } + + return 0; +} + +static bool +mt7603_has_cal_free_data(struct mt7603_dev *dev, u8 *efuse) +{ + if (!efuse[MT_EE_TEMP_SENSOR_CAL]) + return false; + + if (get_unaligned_le16(efuse + MT_EE_TX_POWER_0_START_2G) == 0) + return false; + + if (get_unaligned_le16(efuse + MT_EE_TX_POWER_1_START_2G) == 0) + return false; + + if (!efuse[MT_EE_CP_FT_VERSION]) + return false; + + if (!efuse[MT_EE_XTAL_FREQ_OFFSET]) + return false; + + if (!efuse[MT_EE_XTAL_WF_RFCAL]) + return false; + + return true; +} + +static void +mt7603_apply_cal_free_data(struct mt7603_dev *dev, u8 *efuse) +{ + static const u8 cal_free_bytes[] = { + MT_EE_TEMP_SENSOR_CAL, + MT_EE_CP_FT_VERSION, + MT_EE_XTAL_FREQ_OFFSET, + MT_EE_XTAL_WF_RFCAL, + /* Skip for MT7628 */ + MT_EE_TX_POWER_0_START_2G, + MT_EE_TX_POWER_0_START_2G + 1, + MT_EE_TX_POWER_1_START_2G, + MT_EE_TX_POWER_1_START_2G + 1, + }; + u8 *eeprom = dev->mt76.eeprom.data; + int n = ARRAY_SIZE(cal_free_bytes); + int i; + + if (!mt7603_has_cal_free_data(dev, efuse)) + return; + + if (is_mt7628(dev)) + n -= 4; + + for (i = 0; i < n; i++) { + int offset = cal_free_bytes[i]; + + eeprom[offset] = efuse[offset]; + } +} + +static int +mt7603_eeprom_load(struct mt7603_dev *dev) +{ + int ret; + + ret = mt76_eeprom_init(&dev->mt76, MT7603_EEPROM_SIZE); + if (ret < 0) + return ret; + + return mt7603_efuse_init(dev); +} + +static int mt7603_check_eeprom(struct mt76_dev *dev) +{ + u16 val = get_unaligned_le16(dev->eeprom.data); + + switch (val) { + case 0x7628: + case 0x7603: + return 0; + default: + return -EINVAL; + } +} + +int mt7603_eeprom_init(struct mt7603_dev *dev) +{ + int ret; + + ret = mt7603_eeprom_load(dev); + if (ret < 0) + return ret; + + if (dev->mt76.otp.data) { + if (mt7603_check_eeprom(&dev->mt76) == 0) + mt7603_apply_cal_free_data(dev, dev->mt76.otp.data); + else + memcpy(dev->mt76.eeprom.data, dev->mt76.otp.data, + MT7603_EEPROM_SIZE); + } + + dev->mt76.cap.has_2ghz = true; + memcpy(dev->mt76.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR, + ETH_ALEN); + + mt76_eeprom_override(&dev->mt76); + + return 0; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.h new file mode 100644 index 000000000000..f27b99b7e359 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: ISC */ + +#ifndef __MT7603_EEPROM_H +#define __MT7603_EEPROM_H + +#include "mt7603.h" + +enum mt7603_eeprom_field { + MT_EE_CHIP_ID = 0x000, + MT_EE_VERSION = 0x002, + MT_EE_MAC_ADDR = 0x004, + MT_EE_NIC_CONF_0 = 0x034, + MT_EE_NIC_CONF_1 = 0x036, + MT_EE_NIC_CONF_2 = 0x042, + + MT_EE_XTAL_TRIM_1 = 0x03a, + + MT_EE_RSSI_OFFSET_2G = 0x046, + MT_EE_WIFI_RF_SETTING = 0x048, + MT_EE_RSSI_OFFSET_5G = 0x04a, + + MT_EE_TX_POWER_DELTA_BW40 = 0x050, + MT_EE_TX_POWER_DELTA_BW80 = 0x052, + + MT_EE_TX_POWER_EXT_PA_5G = 0x054, + + MT_EE_TEMP_SENSOR_CAL = 0x055, + + MT_EE_TX_POWER_0_START_2G = 0x056, + MT_EE_TX_POWER_1_START_2G = 0x05c, + + /* used as byte arrays */ +#define MT_TX_POWER_GROUP_SIZE_5G 5 +#define MT_TX_POWER_GROUPS_5G 6 + MT_EE_TX_POWER_0_START_5G = 0x062, + + MT_EE_TX_POWER_0_GRP3_TX_POWER_DELTA = 0x074, + MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE = 0x076, + + MT_EE_TX_POWER_1_START_5G = 0x080, + + MT_EE_TX_POWER_CCK = 0x0a0, + MT_EE_TX_POWER_OFDM_2G_6M = 0x0a2, + MT_EE_TX_POWER_OFDM_2G_24M = 0x0a4, + MT_EE_TX_POWER_OFDM_2G_54M = 0x0a6, + MT_EE_TX_POWER_HT_BPSK_QPSK = 0x0a8, + MT_EE_TX_POWER_HT_16_64_QAM = 0x0aa, + MT_EE_TX_POWER_HT_64_QAM = 0x0ac, + + MT_EE_ELAN_RX_MODE_GAIN = 0x0c0, + MT_EE_ELAN_RX_MODE_NF = 0x0c1, + MT_EE_ELAN_RX_MODE_P1DB = 0x0c2, + + MT_EE_ELAN_BYPASS_MODE_GAIN = 0x0c3, + MT_EE_ELAN_BYPASS_MODE_NF = 0x0c4, + MT_EE_ELAN_BYPASS_MODE_P1DB = 0x0c5, + + MT_EE_STEP_NUM_NEG_6_7 = 0x0c6, + MT_EE_STEP_NUM_NEG_4_5 = 0x0c8, + MT_EE_STEP_NUM_NEG_2_3 = 0x0ca, + MT_EE_STEP_NUM_NEG_0_1 = 0x0cc, + + MT_EE_REF_STEP_24G = 0x0ce, + + MT_EE_STEP_NUM_PLUS_1_2 = 0x0d0, + MT_EE_STEP_NUM_PLUS_3_4 = 0x0d2, + MT_EE_STEP_NUM_PLUS_5_6 = 0x0d4, + MT_EE_STEP_NUM_PLUS_7 = 0x0d6, + + MT_EE_CP_FT_VERSION = 0x0f0, + + MT_EE_XTAL_FREQ_OFFSET = 0x0f4, + MT_EE_XTAL_TRIM_2_COMP = 0x0f5, + MT_EE_XTAL_TRIM_3_COMP = 0x0f6, + MT_EE_XTAL_WF_RFCAL = 0x0f7, + + __MT_EE_MAX +}; + +enum mt7603_eeprom_source { + MT_EE_SRC_PROM, + MT_EE_SRC_EFUSE, + MT_EE_SRC_FLASH, +}; + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c new file mode 100644 index 000000000000..15cc8f33b34d --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c @@ -0,0 +1,578 @@ +/* SPDX-License-Identifier: ISC */ + +#include +#include "mt7603.h" +#include "mac.h" +#include "eeprom.h" + +const struct mt76_driver_ops mt7603_drv_ops = { + .txwi_size = MT_TXD_SIZE, + .tx_prepare_skb = mt7603_tx_prepare_skb, + .tx_complete_skb = mt7603_tx_complete_skb, + .rx_skb = mt7603_queue_rx_skb, + .rx_poll_complete = mt7603_rx_poll_complete, + .sta_ps = mt7603_sta_ps, + .sta_add = mt7603_sta_add, + .sta_assoc = mt7603_sta_assoc, + .sta_remove = mt7603_sta_remove, + .update_survey = mt7603_update_channel, +}; + +static void +mt7603_set_tmac_template(struct mt7603_dev *dev) +{ + u32 desc[5] = { + [1] = FIELD_PREP(MT_TXD3_REM_TX_COUNT, 0xf), + [3] = MT_TXD5_SW_POWER_MGMT + }; + u32 addr; + int i; + + addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR); + addr += MT_CLIENT_TMAC_INFO_TEMPLATE; + for (i = 0; i < ARRAY_SIZE(desc); i++) + mt76_wr(dev, addr + 4 * i, desc[i]); +} + +static void +mt7603_dma_sched_init(struct mt7603_dev *dev) +{ + int page_size = 128; + int page_count; + int max_len = 1792; + int max_amsdu_pages = 4096 / page_size; + int max_mcu_len = 4096; + int max_beacon_len = 512 * 4 + max_len; + int max_mcast_pages = 4 * max_len / page_size; + int reserved_count = 0; + int beacon_pages; + int mcu_pages; + int i; + + page_count = mt76_get_field(dev, MT_PSE_FC_P0, + MT_PSE_FC_P0_MAX_QUOTA); + beacon_pages = 4 * (max_beacon_len / page_size); + mcu_pages = max_mcu_len / page_size; + + mt76_wr(dev, MT_PSE_FRP, + FIELD_PREP(MT_PSE_FRP_P0, 7) | + FIELD_PREP(MT_PSE_FRP_P1, 6) | + FIELD_PREP(MT_PSE_FRP_P2_RQ2, 4)); + + mt76_wr(dev, MT_HIGH_PRIORITY_1, 0x55555553); + mt76_wr(dev, MT_HIGH_PRIORITY_2, 0x78555555); + + mt76_wr(dev, MT_QUEUE_PRIORITY_1, 0x2b1a096e); + mt76_wr(dev, MT_QUEUE_PRIORITY_2, 0x785f4d3c); + + mt76_wr(dev, MT_PRIORITY_MASK, 0xffffffff); + + mt76_wr(dev, MT_SCH_1, page_count | (2 << 28)); + mt76_wr(dev, MT_SCH_2, max_amsdu_pages); + + for (i = 0; i <= 4; i++) + mt76_wr(dev, MT_PAGE_COUNT(i), max_amsdu_pages); + reserved_count += 5 * max_amsdu_pages; + + mt76_wr(dev, MT_PAGE_COUNT(5), mcu_pages); + reserved_count += mcu_pages; + + mt76_wr(dev, MT_PAGE_COUNT(7), beacon_pages); + reserved_count += beacon_pages; + + mt76_wr(dev, MT_PAGE_COUNT(8), max_mcast_pages); + reserved_count += max_mcast_pages; + + if (is_mt7603(dev)) + reserved_count = 0; + + mt76_wr(dev, MT_RSV_MAX_THRESH, page_count - reserved_count); + + if (is_mt7603(dev) && mt76xx_rev(dev) >= MT7603_REV_E2) { + mt76_wr(dev, MT_GROUP_THRESH(0), + page_count - beacon_pages - mcu_pages); + mt76_wr(dev, MT_GROUP_THRESH(1), beacon_pages); + mt76_wr(dev, MT_BMAP_0, 0x0080ff5f); + mt76_wr(dev, MT_GROUP_THRESH(2), mcu_pages); + mt76_wr(dev, MT_BMAP_1, 0x00000020); + } else { + mt76_wr(dev, MT_GROUP_THRESH(0), page_count); + mt76_wr(dev, MT_BMAP_0, 0xffff); + } + + mt76_wr(dev, MT_SCH_4, 0); + + for (i = 0; i <= 15; i++) + mt76_wr(dev, MT_TXTIME_THRESH(i), 0xfffff); + + mt76_set(dev, MT_SCH_4, BIT(6)); +} + +static void +mt7603_phy_init(struct mt7603_dev *dev) +{ + int rx_chains = dev->mt76.antenna_mask; + int tx_chains = __sw_hweight8(rx_chains) - 1; + + mt76_rmw(dev, MT_WF_RMAC_RMCR, + (MT_WF_RMAC_RMCR_SMPS_MODE | + MT_WF_RMAC_RMCR_RX_STREAMS), + (FIELD_PREP(MT_WF_RMAC_RMCR_SMPS_MODE, 3) | + FIELD_PREP(MT_WF_RMAC_RMCR_RX_STREAMS, rx_chains))); + + mt76_rmw_field(dev, MT_TMAC_TCR, MT_TMAC_TCR_TX_STREAMS, + tx_chains); + + dev->agc0 = mt76_rr(dev, MT_AGC(0)); + dev->agc3 = mt76_rr(dev, MT_AGC(3)); +} + +static void +mt7603_mac_init(struct mt7603_dev *dev) +{ + u8 bc_addr[ETH_ALEN]; + u32 addr; + int i; + + mt76_wr(dev, MT_AGG_BA_SIZE_LIMIT_0, + (MT_AGG_SIZE_LIMIT(0) << 0 * MT_AGG_BA_SIZE_LIMIT_SHIFT) | + (MT_AGG_SIZE_LIMIT(1) << 1 * MT_AGG_BA_SIZE_LIMIT_SHIFT) | + (MT_AGG_SIZE_LIMIT(2) << 2 * MT_AGG_BA_SIZE_LIMIT_SHIFT) | + (MT_AGG_SIZE_LIMIT(3) << 3 * MT_AGG_BA_SIZE_LIMIT_SHIFT)); + + mt76_wr(dev, MT_AGG_BA_SIZE_LIMIT_1, + (MT_AGG_SIZE_LIMIT(4) << 0 * MT_AGG_BA_SIZE_LIMIT_SHIFT) | + (MT_AGG_SIZE_LIMIT(5) << 1 * MT_AGG_BA_SIZE_LIMIT_SHIFT) | + (MT_AGG_SIZE_LIMIT(6) << 2 * MT_AGG_BA_SIZE_LIMIT_SHIFT) | + (MT_AGG_SIZE_LIMIT(7) << 3 * MT_AGG_BA_SIZE_LIMIT_SHIFT)); + + mt76_wr(dev, MT_AGG_LIMIT, + FIELD_PREP(MT_AGG_LIMIT_AC(0), 24) | + FIELD_PREP(MT_AGG_LIMIT_AC(1), 24) | + FIELD_PREP(MT_AGG_LIMIT_AC(2), 24) | + FIELD_PREP(MT_AGG_LIMIT_AC(3), 24)); + + mt76_wr(dev, MT_AGG_LIMIT_1, + FIELD_PREP(MT_AGG_LIMIT_AC(0), 24) | + FIELD_PREP(MT_AGG_LIMIT_AC(1), 24) | + FIELD_PREP(MT_AGG_LIMIT_AC(2), 24) | + FIELD_PREP(MT_AGG_LIMIT_AC(3), 24)); + + mt76_wr(dev, MT_AGG_CONTROL, + FIELD_PREP(MT_AGG_CONTROL_BAR_RATE, 0x4b) | + FIELD_PREP(MT_AGG_CONTROL_CFEND_RATE, 0x69) | + MT_AGG_CONTROL_NO_BA_AR_RULE); + + mt76_wr(dev, MT_AGG_RETRY_CONTROL, + FIELD_PREP(MT_AGG_RETRY_CONTROL_BAR_LIMIT, 1) | + FIELD_PREP(MT_AGG_RETRY_CONTROL_RTS_LIMIT, 15)); + + mt76_rmw(dev, MT_DMA_DCR0, ~0xfffc, 4096); + + mt76_rmw(dev, MT_DMA_VCFR0, BIT(0), BIT(13)); + mt76_rmw(dev, MT_DMA_TMCFR0, BIT(0) | BIT(1), BIT(13)); + + mt76_clear(dev, MT_WF_RMAC_TMR_PA, BIT(31)); + + mt76_set(dev, MT_WF_RMACDR, MT_WF_RMACDR_MAXLEN_20BIT); + mt76_rmw(dev, MT_WF_RMAC_MAXMINLEN, 0xffffff, 0x19000); + + mt76_wr(dev, MT_WF_RFCR1, 0); + + mt76_set(dev, MT_TMAC_TCR, MT_TMAC_TCR_RX_RIFS_MODE); + + mt7603_set_tmac_template(dev); + + /* Enable RX group to HIF */ + addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR); + mt76_set(dev, addr + MT_CLIENT_RXINF, MT_CLIENT_RXINF_RXSH_GROUPS); + + /* Enable RX group to MCU */ + mt76_set(dev, MT_DMA_DCR1, GENMASK(13, 11)); + + mt76_rmw_field(dev, MT_AGG_PCR_RTS, MT_AGG_PCR_RTS_PKT_THR, 3); + mt76_set(dev, MT_TMAC_PCR, MT_TMAC_PCR_SPE_EN); + + /* include preamble detection in CCA trigger signal */ + mt76_rmw_field(dev, MT_TXREQ, MT_TXREQ_CCA_SRC_SEL, 2); + + mt76_wr(dev, MT_RXREQ, 4); + + /* Configure all rx packets to HIF */ + mt76_wr(dev, MT_DMA_RCFR0, 0xc0000000); + + /* Configure MCU txs selection with aggregation */ + mt76_wr(dev, MT_DMA_TCFR0, + FIELD_PREP(MT_DMA_TCFR_TXS_AGGR_TIMEOUT, 1) | /* 32 us */ + MT_DMA_TCFR_TXS_AGGR_COUNT); + + /* Configure HIF txs selection with aggregation */ + mt76_wr(dev, MT_DMA_TCFR1, + FIELD_PREP(MT_DMA_TCFR_TXS_AGGR_TIMEOUT, 1) | /* 32 us */ + MT_DMA_TCFR_TXS_AGGR_COUNT | /* Maximum count */ + MT_DMA_TCFR_TXS_BIT_MAP); + + mt76_wr(dev, MT_MCU_PCIE_REMAP_1, MT_PSE_WTBL_2_PHYS_ADDR); + + for (i = 0; i < MT7603_WTBL_SIZE; i++) + mt7603_wtbl_clear(dev, i); + + eth_broadcast_addr(bc_addr); + mt7603_wtbl_init(dev, MT7603_WTBL_RESERVED, -1, bc_addr); + dev->global_sta.wcid.idx = MT7603_WTBL_RESERVED; + rcu_assign_pointer(dev->mt76.wcid[MT7603_WTBL_RESERVED], + &dev->global_sta.wcid); + + mt76_rmw_field(dev, MT_LPON_BTEIR, MT_LPON_BTEIR_MBSS_MODE, 2); + mt76_rmw_field(dev, MT_WF_RMACDR, MT_WF_RMACDR_MBSSID_MASK, 2); + + mt76_wr(dev, MT_AGG_ARUCR, FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), 7)); + mt76_wr(dev, MT_AGG_ARDCR, + FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), 0) | + FIELD_PREP(MT_AGG_ARxCR_LIMIT(1), + max_t(int, 0, MT7603_RATE_RETRY - 2)) | + FIELD_PREP(MT_AGG_ARxCR_LIMIT(2), MT7603_RATE_RETRY - 1) | + FIELD_PREP(MT_AGG_ARxCR_LIMIT(3), MT7603_RATE_RETRY - 1) | + FIELD_PREP(MT_AGG_ARxCR_LIMIT(4), MT7603_RATE_RETRY - 1) | + FIELD_PREP(MT_AGG_ARxCR_LIMIT(5), MT7603_RATE_RETRY - 1) | + FIELD_PREP(MT_AGG_ARxCR_LIMIT(6), MT7603_RATE_RETRY - 1) | + FIELD_PREP(MT_AGG_ARxCR_LIMIT(7), MT7603_RATE_RETRY - 1)); + + mt76_wr(dev, MT_AGG_ARCR, + (MT_AGG_ARCR_INIT_RATE1 | + FIELD_PREP(MT_AGG_ARCR_RTS_RATE_THR, 2) | + MT_AGG_ARCR_RATE_DOWN_RATIO_EN | + FIELD_PREP(MT_AGG_ARCR_RATE_DOWN_RATIO, 1) | + FIELD_PREP(MT_AGG_ARCR_RATE_UP_EXTRA_TH, 4))); + + mt76_set(dev, MT_WTBL_RMVTCR, MT_WTBL_RMVTCR_RX_MV_MODE); + + mt76_clear(dev, MT_SEC_SCR, MT_SEC_SCR_MASK_ORDER); + mt76_clear(dev, MT_SEC_SCR, BIT(18)); + + /* Set secondary beacon time offsets */ + for (i = 0; i <= 4; i++) + mt76_rmw_field(dev, MT_LPON_SBTOR(i), MT_LPON_SBTOR_TIME_OFFSET, + (i + 1) * (20 + 4096)); +} + +static int +mt7603_init_hardware(struct mt7603_dev *dev) +{ + int i, ret; + + mt76_wr(dev, MT_INT_SOURCE_CSR, ~0); + + ret = mt7603_eeprom_init(dev); + if (ret < 0) + return ret; + + ret = mt7603_dma_init(dev); + if (ret) + return ret; + + mt76_wr(dev, MT_WPDMA_GLO_CFG, 0x52000850); + mt7603_mac_dma_start(dev); + dev->rxfilter = mt76_rr(dev, MT_WF_RFCR); + set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state); + + for (i = 0; i < MT7603_WTBL_SIZE; i++) { + mt76_wr(dev, MT_PSE_RTA, MT_PSE_RTA_BUSY | MT_PSE_RTA_WRITE | + FIELD_PREP(MT_PSE_RTA_TAG_ID, i)); + mt76_poll(dev, MT_PSE_RTA, MT_PSE_RTA_BUSY, 0, 5000); + } + + ret = mt7603_mcu_init(dev); + if (ret) + return ret; + + mt7603_dma_sched_init(dev); + mt7603_mcu_set_eeprom(dev); + mt7603_phy_init(dev); + mt7603_mac_init(dev); + + return 0; +} + +#define CCK_RATE(_idx, _rate) { \ + .bitrate = _rate, \ + .flags = IEEE80211_RATE_SHORT_PREAMBLE, \ + .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx), \ + .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + _idx), \ +} + +#define OFDM_RATE(_idx, _rate) { \ + .bitrate = _rate, \ + .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx), \ + .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \ +} + +static struct ieee80211_rate mt7603_rates[] = { + CCK_RATE(0, 10), + CCK_RATE(1, 20), + CCK_RATE(2, 55), + CCK_RATE(3, 110), + OFDM_RATE(11, 60), + OFDM_RATE(15, 90), + OFDM_RATE(10, 120), + OFDM_RATE(14, 180), + OFDM_RATE(9, 240), + OFDM_RATE(13, 360), + OFDM_RATE(8, 480), + OFDM_RATE(12, 540), +}; + +static const struct ieee80211_iface_limit if_limits[] = { + { + .max = 1, + .types = BIT(NL80211_IFTYPE_ADHOC) + }, { + .max = MT7603_MAX_INTERFACES, + .types = BIT(NL80211_IFTYPE_STATION) | +#ifdef CONFIG_MAC80211_MESH + BIT(NL80211_IFTYPE_MESH_POINT) | +#endif + BIT(NL80211_IFTYPE_AP) + }, +}; + +static const struct ieee80211_iface_combination if_comb[] = { + { + .limits = if_limits, + .n_limits = ARRAY_SIZE(if_limits), + .max_interfaces = 4, + .num_different_channels = 1, + .beacon_int_infra_match = true, + } +}; + +static void mt7603_led_set_config(struct mt76_dev *mt76, u8 delay_on, + u8 delay_off) +{ + struct mt7603_dev *dev = container_of(mt76, struct mt7603_dev, + mt76); + u32 val, addr; + + val = MT_LED_STATUS_DURATION(0xffff) | + MT_LED_STATUS_OFF(delay_off) | + MT_LED_STATUS_ON(delay_on); + + addr = mt7603_reg_map(dev, MT_LED_STATUS_0(mt76->led_pin)); + mt76_wr(dev, addr, val); + addr = mt7603_reg_map(dev, MT_LED_STATUS_1(mt76->led_pin)); + mt76_wr(dev, addr, val); + + val = MT_LED_CTRL_REPLAY(mt76->led_pin) | + MT_LED_CTRL_KICK(mt76->led_pin); + if (mt76->led_al) + val |= MT_LED_CTRL_POLARITY(mt76->led_pin); + addr = mt7603_reg_map(dev, MT_LED_CTRL); + mt76_wr(dev, addr, val); +} + +static int mt7603_led_set_blink(struct led_classdev *led_cdev, + unsigned long *delay_on, + unsigned long *delay_off) +{ + struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev, + led_cdev); + u8 delta_on, delta_off; + + delta_off = max_t(u8, *delay_off / 10, 1); + delta_on = max_t(u8, *delay_on / 10, 1); + + mt7603_led_set_config(mt76, delta_on, delta_off); + return 0; +} + +static void mt7603_led_set_brightness(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev, + led_cdev); + + if (!brightness) + mt7603_led_set_config(mt76, 0, 0xff); + else + mt7603_led_set_config(mt76, 0xff, 0); +} + +static u32 __mt7603_reg_addr(struct mt7603_dev *dev, u32 addr) +{ + if (addr < 0x100000) + return addr; + + return mt7603_reg_map(dev, addr); +} + +static u32 mt7603_rr(struct mt76_dev *mdev, u32 offset) +{ + struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); + u32 addr = __mt7603_reg_addr(dev, offset); + + return dev->bus_ops->rr(mdev, addr); +} + +static void mt7603_wr(struct mt76_dev *mdev, u32 offset, u32 val) +{ + struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); + u32 addr = __mt7603_reg_addr(dev, offset); + + dev->bus_ops->wr(mdev, addr, val); +} + +static u32 mt7603_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val) +{ + struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); + u32 addr = __mt7603_reg_addr(dev, offset); + + return dev->bus_ops->rmw(mdev, addr, mask, val); +} + +static void +mt7603_regd_notifier(struct wiphy *wiphy, + struct regulatory_request *request) +{ + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct mt7603_dev *dev = hw->priv; + + dev->ed_monitor = request->dfs_region == NL80211_DFS_ETSI; +} + +static int +mt7603_txpower_signed(int val) +{ + bool sign = val & BIT(6); + + if (!(val & BIT(7))) + return 0; + + val &= GENMASK(5, 0); + if (!sign) + val = -val; + + return val; +} + +static void +mt7603_init_txpower(struct mt7603_dev *dev, + struct ieee80211_supported_band *sband) +{ + struct ieee80211_channel *chan; + u8 *eeprom = (u8 *)dev->mt76.eeprom.data; + int target_power = eeprom[MT_EE_TX_POWER_0_START_2G + 2] & ~BIT(7); + u8 *rate_power = &eeprom[MT_EE_TX_POWER_CCK]; + int max_offset, cur_offset; + int i; + + if (target_power & BIT(6)) + target_power = -(target_power & GENMASK(5, 0)); + + max_offset = 0; + for (i = 0; i < 14; i++) { + cur_offset = mt7603_txpower_signed(rate_power[i]); + max_offset = max(max_offset, cur_offset); + } + + target_power += max_offset; + + dev->tx_power_limit = target_power; + dev->mt76.txpower_cur = target_power; + + target_power = DIV_ROUND_UP(target_power, 2); + + /* add 3 dBm for 2SS devices (combined output) */ + if (dev->mt76.antenna_mask & BIT(1)) + target_power += 3; + + for (i = 0; i < sband->n_channels; i++) { + chan = &sband->channels[i]; + chan->max_power = target_power; + } +} + + +int mt7603_register_device(struct mt7603_dev *dev) +{ + struct mt76_bus_ops *bus_ops; + struct ieee80211_hw *hw = mt76_hw(dev); + struct wiphy *wiphy = hw->wiphy; + int ret; + + dev->bus_ops = dev->mt76.bus; + bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops), + GFP_KERNEL); + if (!bus_ops) + return -ENOMEM; + + bus_ops->rr = mt7603_rr; + bus_ops->wr = mt7603_wr; + bus_ops->rmw = mt7603_rmw; + dev->mt76.bus = bus_ops; + + INIT_DELAYED_WORK(&dev->mac_work, mt7603_mac_work); + tasklet_init(&dev->pre_tbtt_tasklet, mt7603_pre_tbtt_tasklet, + (unsigned long)dev); + + /* Check for 7688, which only has 1SS */ + dev->mt76.antenna_mask = 3; + if (mt76_rr(dev, MT_EFUSE_BASE + 0x64) & BIT(4)) + dev->mt76.antenna_mask = 1; + + dev->slottime = 9; + + ret = mt7603_init_hardware(dev); + if (ret) + return ret; + + hw->queues = 4; + hw->max_rates = 3; + hw->max_report_rates = 7; + hw->max_rate_tries = 11; + + hw->sta_data_size = sizeof(struct mt7603_sta); + hw->vif_data_size = sizeof(struct mt7603_vif); + + wiphy->iface_combinations = if_comb; + wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); + + ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); + ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN); + + /* init led callbacks */ + if (IS_ENABLED(CONFIG_MT76_LEDS)) { + dev->mt76.led_cdev.brightness_set = mt7603_led_set_brightness; + dev->mt76.led_cdev.blink_set = mt7603_led_set_blink; + } + + wiphy->interface_modes = + BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_AP) | +#ifdef CONFIG_MAC80211_MESH + BIT(NL80211_IFTYPE_MESH_POINT) | +#endif + BIT(NL80211_IFTYPE_ADHOC); + + wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; + + wiphy->reg_notifier = mt7603_regd_notifier; + + ret = mt76_register_device(&dev->mt76, true, mt7603_rates, + ARRAY_SIZE(mt7603_rates)); + if (ret) + return ret; + + mt7603_init_debugfs(dev); + mt7603_init_txpower(dev, &dev->mt76.sband_2g.sband); + + return 0; +} + +void mt7603_unregister_device(struct mt7603_dev *dev) +{ + tasklet_disable(&dev->pre_tbtt_tasklet); + mt76_unregister_device(&dev->mt76); + mt7603_mcu_exit(dev); + mt7603_dma_cleanup(dev); + ieee80211_free_hw(mt76_hw(dev)); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c new file mode 100644 index 000000000000..0a0115861b51 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c @@ -0,0 +1,1749 @@ +/* SPDX-License-Identifier: ISC */ + +#include +#include +#include "mt7603.h" +#include "mac.h" + +#define MT_PSE_PAGE_SIZE 128 + +static u32 +mt7603_ac_queue_mask0(u32 mask) +{ + u32 ret = 0; + + ret |= GENMASK(3, 0) * !!(mask & BIT(0)); + ret |= GENMASK(8, 5) * !!(mask & BIT(1)); + ret |= GENMASK(13, 10) * !!(mask & BIT(2)); + ret |= GENMASK(19, 16) * !!(mask & BIT(3)); + return ret; +} + +static void +mt76_stop_tx_ac(struct mt7603_dev *dev, u32 mask) +{ + mt76_set(dev, MT_WF_ARB_TX_STOP_0, mt7603_ac_queue_mask0(mask)); +} + +static void +mt76_start_tx_ac(struct mt7603_dev *dev, u32 mask) +{ + mt76_set(dev, MT_WF_ARB_TX_START_0, mt7603_ac_queue_mask0(mask)); +} + +void mt7603_mac_set_timing(struct mt7603_dev *dev) +{ + u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) | + FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48); + u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) | + FIELD_PREP(MT_TIMEOUT_VAL_CCA, 24); + int offset = 3 * dev->coverage_class; + u32 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) | + FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset); + int sifs; + u32 val; + + if (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) + sifs = 16; + else + sifs = 10; + + mt76_set(dev, MT_ARB_SCR, + MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); + udelay(1); + + mt76_wr(dev, MT_TIMEOUT_CCK, cck + reg_offset); + mt76_wr(dev, MT_TIMEOUT_OFDM, ofdm + reg_offset); + mt76_wr(dev, MT_IFS, + FIELD_PREP(MT_IFS_EIFS, 360) | + FIELD_PREP(MT_IFS_RIFS, 2) | + FIELD_PREP(MT_IFS_SIFS, sifs) | + FIELD_PREP(MT_IFS_SLOT, dev->slottime)); + + if (dev->slottime < 20) + val = MT7603_CFEND_RATE_DEFAULT; + else + val = MT7603_CFEND_RATE_11B; + + mt76_rmw_field(dev, MT_AGG_CONTROL, MT_AGG_CONTROL_CFEND_RATE, val); + + mt76_clear(dev, MT_ARB_SCR, + MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); +} + +static void +mt7603_wtbl_update(struct mt7603_dev *dev, int idx, u32 mask) +{ + mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX, + FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask); + + mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); +} + +static u32 +mt7603_wtbl1_addr(int idx) +{ + return MT_WTBL1_BASE + idx * MT_WTBL1_SIZE; +} + +static u32 +mt7603_wtbl2_addr(int idx) +{ + /* Mapped to WTBL2 */ + return MT_PCIE_REMAP_BASE_1 + idx * MT_WTBL2_SIZE; +} + +static u32 +mt7603_wtbl3_addr(int idx) +{ + u32 base = mt7603_wtbl2_addr(MT7603_WTBL_SIZE); + + return base + idx * MT_WTBL3_SIZE; +} + +static u32 +mt7603_wtbl4_addr(int idx) +{ + u32 base = mt7603_wtbl3_addr(MT7603_WTBL_SIZE); + + return base + idx * MT_WTBL4_SIZE; +} + +void mt7603_wtbl_init(struct mt7603_dev *dev, int idx, int vif, + const u8 *mac_addr) +{ + const void *_mac = mac_addr; + u32 addr = mt7603_wtbl1_addr(idx); + u32 w0 = 0, w1 = 0; + int i; + + if (_mac) { + w0 = FIELD_PREP(MT_WTBL1_W0_ADDR_HI, + get_unaligned_le16(_mac + 4)); + w1 = FIELD_PREP(MT_WTBL1_W1_ADDR_LO, + get_unaligned_le32(_mac)); + } + + if (vif < 0) + vif = 0; + else + w0 |= MT_WTBL1_W0_RX_CHECK_A1; + w0 |= FIELD_PREP(MT_WTBL1_W0_MUAR_IDX, vif); + + mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); + + mt76_set(dev, addr + 0 * 4, w0); + mt76_set(dev, addr + 1 * 4, w1); + mt76_set(dev, addr + 2 * 4, MT_WTBL1_W2_ADMISSION_CONTROL); + + mt76_stop_tx_ac(dev, GENMASK(3, 0)); + addr = mt7603_wtbl2_addr(idx); + for (i = 0; i < MT_WTBL2_SIZE; i += 4) + mt76_wr(dev, addr + i, 0); + mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_WTBL2); + mt76_start_tx_ac(dev, GENMASK(3, 0)); + + addr = mt7603_wtbl3_addr(idx); + for (i = 0; i < MT_WTBL3_SIZE; i += 4) + mt76_wr(dev, addr + i, 0); + + addr = mt7603_wtbl4_addr(idx); + for (i = 0; i < MT_WTBL4_SIZE; i += 4) + mt76_wr(dev, addr + i, 0); +} + +static void +mt7603_wtbl_set_skip_tx(struct mt7603_dev *dev, int idx, bool enabled) +{ + u32 addr = mt7603_wtbl1_addr(idx); + u32 val = mt76_rr(dev, addr + 3 * 4); + + val &= ~MT_WTBL1_W3_SKIP_TX; + val |= enabled * MT_WTBL1_W3_SKIP_TX; + + mt76_wr(dev, addr + 3 * 4, val); +} + +void mt7603_filter_tx(struct mt7603_dev *dev, int idx, bool abort) +{ + int i, port, queue; + + if (abort) { + port = 3; /* PSE */ + queue = 8; /* free queue */ + } else { + port = 0; /* HIF */ + queue = 1; /* MCU queue */ + } + + mt7603_wtbl_set_skip_tx(dev, idx, true); + + mt76_wr(dev, MT_TX_ABORT, MT_TX_ABORT_EN | + FIELD_PREP(MT_TX_ABORT_WCID, idx)); + + for (i = 0; i < 4; i++) { + mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY | + FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, idx) | + FIELD_PREP(MT_DMA_FQCR0_TARGET_QID, i) | + FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, port) | + FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, queue)); + + WARN_ON_ONCE(!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, + 0, 5000)); + } + + mt76_wr(dev, MT_TX_ABORT, 0); + + mt7603_wtbl_set_skip_tx(dev, idx, false); +} + +void mt7603_wtbl_set_smps(struct mt7603_dev *dev, struct mt7603_sta *sta, + bool enabled) +{ + u32 addr = mt7603_wtbl1_addr(sta->wcid.idx); + + if (sta->smps == enabled) + return; + + mt76_rmw_field(dev, addr + 2 * 4, MT_WTBL1_W2_SMPS, enabled); + sta->smps = enabled; +} + +void mt7603_wtbl_set_ps(struct mt7603_dev *dev, struct mt7603_sta *sta, + bool enabled) +{ + int idx = sta->wcid.idx; + u32 addr; + + spin_lock_bh(&dev->ps_lock); + + if (sta->ps == enabled) + goto out; + + mt76_wr(dev, MT_PSE_RTA, + FIELD_PREP(MT_PSE_RTA_TAG_ID, idx) | + FIELD_PREP(MT_PSE_RTA_PORT_ID, 0) | + FIELD_PREP(MT_PSE_RTA_QUEUE_ID, 1) | + FIELD_PREP(MT_PSE_RTA_REDIRECT_EN, enabled) | + MT_PSE_RTA_WRITE | MT_PSE_RTA_BUSY); + + mt76_poll(dev, MT_PSE_RTA, MT_PSE_RTA_BUSY, 0, 5000); + + if (enabled) + mt7603_filter_tx(dev, idx, false); + + addr = mt7603_wtbl1_addr(idx); + mt76_set(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE); + mt76_rmw(dev, addr + 3 * 4, MT_WTBL1_W3_POWER_SAVE, + enabled * MT_WTBL1_W3_POWER_SAVE); + mt76_clear(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE); + sta->ps = enabled; + +out: + spin_unlock_bh(&dev->ps_lock); +} + +void mt7603_wtbl_clear(struct mt7603_dev *dev, int idx) +{ + int wtbl2_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL2_SIZE; + int wtbl2_frame = idx / wtbl2_frame_size; + int wtbl2_entry = idx % wtbl2_frame_size; + + int wtbl3_base_frame = MT_WTBL3_OFFSET / MT_PSE_PAGE_SIZE; + int wtbl3_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL3_SIZE; + int wtbl3_frame = wtbl3_base_frame + idx / wtbl3_frame_size; + int wtbl3_entry = (idx % wtbl3_frame_size) * 2; + + int wtbl4_base_frame = MT_WTBL4_OFFSET / MT_PSE_PAGE_SIZE; + int wtbl4_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL4_SIZE; + int wtbl4_frame = wtbl4_base_frame + idx / wtbl4_frame_size; + int wtbl4_entry = idx % wtbl4_frame_size; + + u32 addr = MT_WTBL1_BASE + idx * MT_WTBL1_SIZE; + int i; + + mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); + + mt76_wr(dev, addr + 0 * 4, + MT_WTBL1_W0_RX_CHECK_A1 | + MT_WTBL1_W0_RX_CHECK_A2 | + MT_WTBL1_W0_RX_VALID); + mt76_wr(dev, addr + 1 * 4, 0); + mt76_wr(dev, addr + 2 * 4, 0); + + mt76_set(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE); + + mt76_wr(dev, addr + 3 * 4, + FIELD_PREP(MT_WTBL1_W3_WTBL2_FRAME_ID, wtbl2_frame) | + FIELD_PREP(MT_WTBL1_W3_WTBL2_ENTRY_ID, wtbl2_entry) | + FIELD_PREP(MT_WTBL1_W3_WTBL4_FRAME_ID, wtbl4_frame) | + MT_WTBL1_W3_I_PSM | MT_WTBL1_W3_KEEP_I_PSM); + mt76_wr(dev, addr + 4 * 4, + FIELD_PREP(MT_WTBL1_W4_WTBL3_FRAME_ID, wtbl3_frame) | + FIELD_PREP(MT_WTBL1_W4_WTBL3_ENTRY_ID, wtbl3_entry) | + FIELD_PREP(MT_WTBL1_W4_WTBL4_ENTRY_ID, wtbl4_entry)); + + mt76_clear(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE); + + addr = mt7603_wtbl2_addr(idx); + + /* Clear BA information */ + mt76_wr(dev, addr + (15 * 4), 0); + + mt76_stop_tx_ac(dev, GENMASK(3, 0)); + for (i = 2; i <= 4; i++) + mt76_wr(dev, addr + (i * 4), 0); + mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_WTBL2); + mt76_start_tx_ac(dev, GENMASK(3, 0)); + + mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_RX_COUNT_CLEAR); + mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_TX_COUNT_CLEAR); + mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); +} + +void mt7603_wtbl_update_cap(struct mt7603_dev *dev, struct ieee80211_sta *sta) +{ + struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; + int idx = msta->wcid.idx; + u32 addr; + u32 val; + + addr = mt7603_wtbl1_addr(idx); + + val = mt76_rr(dev, addr + 2 * 4); + val &= MT_WTBL1_W2_KEY_TYPE | MT_WTBL1_W2_ADMISSION_CONTROL; + val |= FIELD_PREP(MT_WTBL1_W2_AMPDU_FACTOR, sta->ht_cap.ampdu_factor) | + FIELD_PREP(MT_WTBL1_W2_MPDU_DENSITY, sta->ht_cap.ampdu_density) | + MT_WTBL1_W2_TXS_BAF_REPORT; + + if (sta->ht_cap.cap) + val |= MT_WTBL1_W2_HT; + if (sta->vht_cap.cap) + val |= MT_WTBL1_W2_VHT; + + mt76_wr(dev, addr + 2 * 4, val); + + addr = mt7603_wtbl2_addr(idx); + val = mt76_rr(dev, addr + 9 * 4); + val &= ~(MT_WTBL2_W9_SHORT_GI_20 | MT_WTBL2_W9_SHORT_GI_40 | + MT_WTBL2_W9_SHORT_GI_80); + if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) + val |= MT_WTBL2_W9_SHORT_GI_20; + if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) + val |= MT_WTBL2_W9_SHORT_GI_40; + mt76_wr(dev, addr + 9 * 4, val); +} + +void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid) +{ + mt76_wr(dev, MT_BA_CONTROL_0, get_unaligned_le32(addr)); + mt76_wr(dev, MT_BA_CONTROL_1, + (get_unaligned_le16(addr + 4) | + FIELD_PREP(MT_BA_CONTROL_1_TID, tid) | + MT_BA_CONTROL_1_RESET)); +} + +void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn, + int ba_size) +{ + u32 addr = mt7603_wtbl2_addr(wcid); + u32 tid_mask = FIELD_PREP(MT_WTBL2_W15_BA_EN_TIDS, BIT(tid)) | + (MT_WTBL2_W15_BA_WIN_SIZE << + (tid * MT_WTBL2_W15_BA_WIN_SIZE_SHIFT)); + u32 tid_val; + int i; + + if (ba_size < 0) { + /* disable */ + mt76_clear(dev, addr + (15 * 4), tid_mask); + return; + } + mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); + + mt7603_mac_stop(dev); + switch (tid) { + case 0: + mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID0_SN, ssn); + break; + case 1: + mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID1_SN, ssn); + break; + case 2: + mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID2_SN_LO, + ssn); + mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID2_SN_HI, + ssn >> 8); + break; + case 3: + mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID3_SN, ssn); + break; + case 4: + mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID4_SN, ssn); + break; + case 5: + mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID5_SN_LO, + ssn); + mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID5_SN_HI, + ssn >> 4); + break; + case 6: + mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID6_SN, ssn); + break; + case 7: + mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID7_SN, ssn); + break; + } + mt7603_wtbl_update(dev, wcid, MT_WTBL_UPDATE_WTBL2); + mt7603_mac_start(dev); + + for (i = 7; i > 0; i--) { + if (ba_size >= MT_AGG_SIZE_LIMIT(i)) + break; + } + + tid_val = FIELD_PREP(MT_WTBL2_W15_BA_EN_TIDS, BIT(tid)) | + i << (tid * MT_WTBL2_W15_BA_WIN_SIZE_SHIFT); + + mt76_rmw(dev, addr + (15 * 4), tid_mask, tid_val); +} + +static int +mt7603_get_rate(struct mt7603_dev *dev, struct ieee80211_supported_band *sband, + int idx, bool cck) +{ + int offset = 0; + int len = sband->n_bitrates; + int i; + + if (cck) { + if (sband == &dev->mt76.sband_5g.sband) + return 0; + + idx &= ~BIT(2); /* short preamble */ + } else if (sband == &dev->mt76.sband_2g.sband) { + offset = 4; + } + + for (i = offset; i < len; i++) { + if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx) + return i; + } + + return 0; +} + +static struct mt76_wcid * +mt7603_rx_get_wcid(struct mt7603_dev *dev, u8 idx, bool unicast) +{ + struct mt7603_sta *sta; + struct mt76_wcid *wcid; + + if (idx >= ARRAY_SIZE(dev->mt76.wcid)) + return NULL; + + wcid = rcu_dereference(dev->mt76.wcid[idx]); + if (unicast || !wcid) + return wcid; + + if (!wcid->sta) + return NULL; + + sta = container_of(wcid, struct mt7603_sta, wcid); + if (!sta->vif) + return NULL; + + return &sta->vif->sta.wcid; +} + +static void +mt7603_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id) +{ + struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; + int hdr_len = ieee80211_get_hdrlen_from_skb(skb); + u8 *pn = status->iv; + u8 *hdr; + + __skb_push(skb, 8); + memmove(skb->data, skb->data + 8, hdr_len); + hdr = skb->data + hdr_len; + + hdr[0] = pn[5]; + hdr[1] = pn[4]; + hdr[2] = 0; + hdr[3] = 0x20 | (key_id << 6); + hdr[4] = pn[3]; + hdr[5] = pn[2]; + hdr[6] = pn[1]; + hdr[7] = pn[0]; + + status->flag &= ~RX_FLAG_IV_STRIPPED; +} + +int +mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb) +{ + struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; + struct ieee80211_supported_band *sband; + struct ieee80211_hdr *hdr; + __le32 *rxd = (__le32 *)skb->data; + u32 rxd0 = le32_to_cpu(rxd[0]); + u32 rxd1 = le32_to_cpu(rxd[1]); + u32 rxd2 = le32_to_cpu(rxd[2]); + bool unicast = rxd1 & MT_RXD1_NORMAL_U2M; + bool insert_ccmp_hdr = false; + bool remove_pad; + int idx; + int i; + + memset(status, 0, sizeof(*status)); + + i = FIELD_GET(MT_RXD1_NORMAL_CH_FREQ, rxd1); + sband = (i & 1) ? &dev->mt76.sband_5g.sband : &dev->mt76.sband_2g.sband; + i >>= 1; + + idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2); + status->wcid = mt7603_rx_get_wcid(dev, idx, unicast); + + status->band = sband->band; + if (i < sband->n_channels) + status->freq = sband->channels[i].center_freq; + + if (rxd2 & MT_RXD2_NORMAL_FCS_ERR) + status->flag |= RX_FLAG_FAILED_FCS_CRC; + + if (rxd2 & MT_RXD2_NORMAL_TKIP_MIC_ERR) + status->flag |= RX_FLAG_MMIC_ERROR; + + if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 && + !(rxd2 & (MT_RXD2_NORMAL_CLM | MT_RXD2_NORMAL_CM))) { + status->flag |= RX_FLAG_DECRYPTED; + status->flag |= RX_FLAG_IV_STRIPPED; + status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; + } + + remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET; + + if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) + return -EINVAL; + + if (!sband->channels) + return -EINVAL; + + rxd += 4; + if (rxd0 & MT_RXD0_NORMAL_GROUP_4) { + rxd += 4; + if ((u8 *)rxd - skb->data >= skb->len) + return -EINVAL; + } + if (rxd0 & MT_RXD0_NORMAL_GROUP_1) { + u8 *data = (u8 *)rxd; + + if (status->flag & RX_FLAG_DECRYPTED) { + status->iv[0] = data[5]; + status->iv[1] = data[4]; + status->iv[2] = data[3]; + status->iv[3] = data[2]; + status->iv[4] = data[1]; + status->iv[5] = data[0]; + + insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2); + } + + rxd += 4; + if ((u8 *)rxd - skb->data >= skb->len) + return -EINVAL; + } + if (rxd0 & MT_RXD0_NORMAL_GROUP_2) { + rxd += 2; + if ((u8 *)rxd - skb->data >= skb->len) + return -EINVAL; + } + if (rxd0 & MT_RXD0_NORMAL_GROUP_3) { + u32 rxdg0 = le32_to_cpu(rxd[0]); + u32 rxdg3 = le32_to_cpu(rxd[3]); + bool cck = false; + + i = FIELD_GET(MT_RXV1_TX_RATE, rxdg0); + switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) { + case MT_PHY_TYPE_CCK: + cck = true; + /* fall through */ + case MT_PHY_TYPE_OFDM: + i = mt7603_get_rate(dev, sband, i, cck); + break; + case MT_PHY_TYPE_HT_GF: + case MT_PHY_TYPE_HT: + status->encoding = RX_ENC_HT; + if (i > 15) + return -EINVAL; + break; + default: + return -EINVAL; + } + + if (rxdg0 & MT_RXV1_HT_SHORT_GI) + status->enc_flags |= RX_ENC_FLAG_SHORT_GI; + if (rxdg0 & MT_RXV1_HT_AD_CODE) + status->enc_flags |= RX_ENC_FLAG_LDPC; + + status->enc_flags |= RX_ENC_FLAG_STBC_MASK * + FIELD_GET(MT_RXV1_HT_STBC, rxdg0); + + status->rate_idx = i; + + status->chains = dev->mt76.antenna_mask; + status->chain_signal[0] = FIELD_GET(MT_RXV4_IB_RSSI0, rxdg3) + + dev->rssi_offset[0]; + status->chain_signal[1] = FIELD_GET(MT_RXV4_IB_RSSI1, rxdg3) + + dev->rssi_offset[1]; + + status->signal = status->chain_signal[0]; + if (status->chains & BIT(1)) + status->signal = max(status->signal, + status->chain_signal[1]); + + if (FIELD_GET(MT_RXV1_FRAME_MODE, rxdg0) == 1) + status->bw = RATE_INFO_BW_40; + + rxd += 6; + if ((u8 *)rxd - skb->data >= skb->len) + return -EINVAL; + } else { + return -EINVAL; + } + + skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad); + + if (insert_ccmp_hdr) { + u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); + + mt7603_insert_ccmp_hdr(skb, key_id); + } + + hdr = (struct ieee80211_hdr *)skb->data; + if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control)) + return 0; + + status->aggr = unicast && + !ieee80211_is_qos_nullfunc(hdr->frame_control); + status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; + status->seqno = hdr->seq_ctrl >> 4; + + return 0; +} + +static u16 +mt7603_mac_tx_rate_val(struct mt7603_dev *dev, + const struct ieee80211_tx_rate *rate, bool stbc, u8 *bw) +{ + u8 phy, nss, rate_idx; + u16 rateval; + + *bw = 0; + if (rate->flags & IEEE80211_TX_RC_MCS) { + rate_idx = rate->idx; + nss = 1 + (rate->idx >> 3); + phy = MT_PHY_TYPE_HT; + if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD) + phy = MT_PHY_TYPE_HT_GF; + if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + *bw = 1; + } else { + const struct ieee80211_rate *r; + int band = dev->mt76.chandef.chan->band; + u16 val; + + nss = 1; + r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx]; + if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) + val = r->hw_value_short; + else + val = r->hw_value; + + phy = val >> 8; + rate_idx = val & 0xff; + } + + rateval = (FIELD_PREP(MT_TX_RATE_IDX, rate_idx) | + FIELD_PREP(MT_TX_RATE_MODE, phy)); + + if (stbc && nss == 1) + rateval |= MT_TX_RATE_STBC; + + return rateval; +} + +void mt7603_wtbl_set_rates(struct mt7603_dev *dev, struct mt7603_sta *sta, + struct ieee80211_tx_rate *probe_rate, + struct ieee80211_tx_rate *rates) +{ + int wcid = sta->wcid.idx; + u32 addr = mt7603_wtbl2_addr(wcid); + bool stbc = false; + int n_rates = sta->n_rates; + u8 bw, bw_prev, bw_idx = 0; + u16 val[4]; + u16 probe_val; + u32 w9 = mt76_rr(dev, addr + 9 * 4); + int i; + + if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000)) + return; + + for (i = n_rates; i < 4; i++) + rates[i] = rates[n_rates - 1]; + + w9 &= MT_WTBL2_W9_SHORT_GI_20 | MT_WTBL2_W9_SHORT_GI_40 | + MT_WTBL2_W9_SHORT_GI_80; + + val[0] = mt7603_mac_tx_rate_val(dev, &rates[0], stbc, &bw); + bw_prev = bw; + + if (probe_rate) { + probe_val = mt7603_mac_tx_rate_val(dev, probe_rate, stbc, &bw); + if (bw) + bw_idx = 1; + else + bw_prev = 0; + } else { + probe_val = val[0]; + } + + w9 |= FIELD_PREP(MT_WTBL2_W9_CC_BW_SEL, bw); + w9 |= FIELD_PREP(MT_WTBL2_W9_BW_CAP, bw); + + val[1] = mt7603_mac_tx_rate_val(dev, &rates[1], stbc, &bw); + if (bw_prev) { + bw_idx = 3; + bw_prev = bw; + } + + val[2] = mt7603_mac_tx_rate_val(dev, &rates[2], stbc, &bw); + if (bw_prev) { + bw_idx = 5; + bw_prev = bw; + } + + val[3] = mt7603_mac_tx_rate_val(dev, &rates[3], stbc, &bw); + if (bw_prev) + bw_idx = 7; + + w9 |= FIELD_PREP(MT_WTBL2_W9_CHANGE_BW_RATE, + bw_idx ? bw_idx - 1 : 7); + + mt76_wr(dev, MT_WTBL_RIUCR0, w9); + + mt76_wr(dev, MT_WTBL_RIUCR1, + FIELD_PREP(MT_WTBL_RIUCR1_RATE0, probe_val) | + FIELD_PREP(MT_WTBL_RIUCR1_RATE1, val[0]) | + FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, val[0])); + + mt76_wr(dev, MT_WTBL_RIUCR2, + FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, val[0] >> 8) | + FIELD_PREP(MT_WTBL_RIUCR2_RATE3, val[1]) | + FIELD_PREP(MT_WTBL_RIUCR2_RATE4, val[1]) | + FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, val[2])); + + mt76_wr(dev, MT_WTBL_RIUCR3, + FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, val[2] >> 4) | + FIELD_PREP(MT_WTBL_RIUCR3_RATE6, val[2]) | + FIELD_PREP(MT_WTBL_RIUCR3_RATE7, val[3])); + + mt76_wr(dev, MT_WTBL_UPDATE, + FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid) | + MT_WTBL_UPDATE_RATE_UPDATE | + MT_WTBL_UPDATE_TX_COUNT_CLEAR); + + if (!sta->wcid.tx_rate_set) + mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); + + sta->rate_count = 2 * MT7603_RATE_RETRY * n_rates; + sta->wcid.tx_rate_set = true; +} + +static enum mt7603_cipher_type +mt7603_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data) +{ + memset(key_data, 0, 32); + if (!key) + return MT_CIPHER_NONE; + + if (key->keylen > 32) + return MT_CIPHER_NONE; + + memcpy(key_data, key->key, key->keylen); + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + return MT_CIPHER_WEP40; + case WLAN_CIPHER_SUITE_WEP104: + return MT_CIPHER_WEP104; + case WLAN_CIPHER_SUITE_TKIP: + /* Rx/Tx MIC keys are swapped */ + memcpy(key_data + 16, key->key + 24, 8); + memcpy(key_data + 24, key->key + 16, 8); + return MT_CIPHER_TKIP; + case WLAN_CIPHER_SUITE_CCMP: + return MT_CIPHER_AES_CCMP; + default: + return MT_CIPHER_NONE; + } +} + +int mt7603_wtbl_set_key(struct mt7603_dev *dev, int wcid, + struct ieee80211_key_conf *key) +{ + enum mt7603_cipher_type cipher; + u32 addr = mt7603_wtbl3_addr(wcid); + u8 key_data[32]; + int key_len = sizeof(key_data); + + cipher = mt7603_mac_get_key_info(key, key_data); + if (cipher == MT_CIPHER_NONE && key) + return -EOPNOTSUPP; + + if (key && (cipher == MT_CIPHER_WEP40 || cipher == MT_CIPHER_WEP104)) { + addr += key->keyidx * 16; + key_len = 16; + } + + mt76_wr_copy(dev, addr, key_data, key_len); + + addr = mt7603_wtbl1_addr(wcid); + mt76_rmw_field(dev, addr + 2 * 4, MT_WTBL1_W2_KEY_TYPE, cipher); + if (key) + mt76_rmw_field(dev, addr, MT_WTBL1_W0_KEY_IDX, key->keyidx); + mt76_rmw_field(dev, addr, MT_WTBL1_W0_RX_KEY_VALID, !!key); + + return 0; +} + +static int +mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi, + struct sk_buff *skb, struct mt76_queue *q, + struct mt76_wcid *wcid, struct ieee80211_sta *sta, + int pid, struct ieee80211_key_conf *key) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_tx_rate *rate = &info->control.rates[0]; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_vif *vif = info->control.vif; + struct mt7603_vif *mvif; + int wlan_idx; + int hdr_len = ieee80211_get_hdrlen_from_skb(skb); + int tx_count = 8; + u8 frame_type, frame_subtype; + u16 fc = le16_to_cpu(hdr->frame_control); + u8 vif_idx = 0; + u32 val; + u8 bw; + + if (vif) { + mvif = (struct mt7603_vif *)vif->drv_priv; + vif_idx = mvif->idx; + if (vif_idx && q >= &dev->mt76.q_tx[MT_TXQ_BEACON]) + vif_idx += 0x10; + } + + if (sta) { + struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; + + tx_count = msta->rate_count; + } + + if (wcid) + wlan_idx = wcid->idx; + else + wlan_idx = MT7603_WTBL_RESERVED; + + frame_type = (fc & IEEE80211_FCTL_FTYPE) >> 2; + frame_subtype = (fc & IEEE80211_FCTL_STYPE) >> 4; + + val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) | + FIELD_PREP(MT_TXD0_Q_IDX, q->hw_idx); + txwi[0] = cpu_to_le32(val); + + val = MT_TXD1_LONG_FORMAT | + FIELD_PREP(MT_TXD1_OWN_MAC, vif_idx) | + FIELD_PREP(MT_TXD1_TID, + skb->priority & IEEE80211_QOS_CTL_TID_MASK) | + FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) | + FIELD_PREP(MT_TXD1_HDR_INFO, hdr_len / 2) | + FIELD_PREP(MT_TXD1_WLAN_IDX, wlan_idx) | + FIELD_PREP(MT_TXD1_PROTECTED, !!key); + txwi[1] = cpu_to_le32(val); + + if (info->flags & IEEE80211_TX_CTL_NO_ACK) + txwi[1] |= cpu_to_le32(MT_TXD1_NO_ACK); + + val = FIELD_PREP(MT_TXD2_FRAME_TYPE, frame_type) | + FIELD_PREP(MT_TXD2_SUB_TYPE, frame_subtype) | + FIELD_PREP(MT_TXD2_MULTICAST, + is_multicast_ether_addr(hdr->addr1)); + txwi[2] = cpu_to_le32(val); + + if (!(info->flags & IEEE80211_TX_CTL_AMPDU)) + txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE); + + txwi[4] = 0; + + val = MT_TXD5_TX_STATUS_HOST | MT_TXD5_SW_POWER_MGMT | + FIELD_PREP(MT_TXD5_PID, pid); + txwi[5] = cpu_to_le32(val); + + txwi[6] = 0; + + if (rate->idx >= 0 && rate->count && + !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) { + bool stbc = info->flags & IEEE80211_TX_CTL_STBC; + u16 rateval = mt7603_mac_tx_rate_val(dev, rate, stbc, &bw); + + txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE); + + val = MT_TXD6_FIXED_BW | + FIELD_PREP(MT_TXD6_BW, bw) | + FIELD_PREP(MT_TXD6_TX_RATE, rateval); + txwi[6] |= cpu_to_le32(val); + + if (rate->flags & IEEE80211_TX_RC_SHORT_GI) + txwi[6] |= cpu_to_le32(MT_TXD6_SGI); + + if (!(rate->flags & IEEE80211_TX_RC_MCS)) + txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE); + + tx_count = rate->count; + } + + /* use maximum tx count for beacons and buffered multicast */ + if (q >= &dev->mt76.q_tx[MT_TXQ_BEACON]) + tx_count = 0x1f; + + val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count) | + FIELD_PREP(MT_TXD3_SEQ, le16_to_cpu(hdr->seq_ctrl)); + txwi[3] = cpu_to_le32(val); + + if (key) { + u64 pn = atomic64_inc_return(&key->tx_pn); + + txwi[3] |= cpu_to_le32(MT_TXD3_PN_VALID); + txwi[4] = cpu_to_le32(pn & GENMASK(31, 0)); + txwi[5] |= cpu_to_le32(FIELD_PREP(MT_TXD5_PN_HIGH, pn >> 32)); + } + + txwi[7] = 0; + + return 0; +} + +int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, + struct sk_buff *skb, struct mt76_queue *q, + struct mt76_wcid *wcid, struct ieee80211_sta *sta, + u32 *tx_info) +{ + struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); + struct mt7603_sta *msta = container_of(wcid, struct mt7603_sta, wcid); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_key_conf *key = info->control.hw_key; + int pid; + + if (!wcid) + wcid = &dev->global_sta.wcid; + + if (sta) { + msta = (struct mt7603_sta *)sta->drv_priv; + + if ((info->flags & (IEEE80211_TX_CTL_NO_PS_BUFFER | + IEEE80211_TX_CTL_CLEAR_PS_FILT)) || + (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE)) + mt7603_wtbl_set_ps(dev, msta, false); + } + + pid = mt76_tx_status_skb_add(mdev, wcid, skb); + + if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) { + spin_lock_bh(&dev->mt76.lock); + msta->rate_probe = true; + mt7603_wtbl_set_rates(dev, msta, &info->control.rates[0], + msta->rates); + spin_unlock_bh(&dev->mt76.lock); + } + + mt7603_mac_write_txwi(dev, txwi_ptr, skb, q, wcid, sta, pid, key); + + return 0; +} + +static bool +mt7603_fill_txs(struct mt7603_dev *dev, struct mt7603_sta *sta, + struct ieee80211_tx_info *info, __le32 *txs_data) +{ + struct ieee80211_supported_band *sband; + int final_idx = 0; + u32 final_rate; + u32 final_rate_flags; + bool final_mpdu; + bool ack_timeout; + bool fixed_rate; + bool probe; + bool ampdu; + bool cck = false; + int count; + u32 txs; + u8 pid; + int idx; + int i; + + fixed_rate = info->status.rates[0].count; + probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); + + txs = le32_to_cpu(txs_data[4]); + final_mpdu = txs & MT_TXS4_ACKED_MPDU; + ampdu = !fixed_rate && (txs & MT_TXS4_AMPDU); + pid = FIELD_GET(MT_TXS4_PID, txs); + count = FIELD_GET(MT_TXS4_TX_COUNT, txs); + + txs = le32_to_cpu(txs_data[0]); + final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs); + ack_timeout = txs & MT_TXS0_ACK_TIMEOUT; + + if (!ampdu && (txs & MT_TXS0_RTS_TIMEOUT)) + return false; + + if (txs & MT_TXS0_QUEUE_TIMEOUT) + return false; + + if (!ack_timeout) + info->flags |= IEEE80211_TX_STAT_ACK; + + info->status.ampdu_len = 1; + info->status.ampdu_ack_len = !!(info->flags & + IEEE80211_TX_STAT_ACK); + + if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU)) + info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU; + + if (fixed_rate && !probe) { + info->status.rates[0].count = count; + goto out; + } + + for (i = 0, idx = 0; i < ARRAY_SIZE(info->status.rates); i++) { + int cur_count = min_t(int, count, 2 * MT7603_RATE_RETRY); + + if (!i && probe) { + cur_count = 1; + } else { + info->status.rates[i] = sta->rates[idx]; + idx++; + } + + if (i && info->status.rates[i].idx < 0) { + info->status.rates[i - 1].count += count; + break; + } + + if (!count) { + info->status.rates[i].idx = -1; + break; + } + + info->status.rates[i].count = cur_count; + final_idx = i; + count -= cur_count; + } + +out: + final_rate_flags = info->status.rates[final_idx].flags; + + switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) { + case MT_PHY_TYPE_CCK: + cck = true; + /* fall through */ + case MT_PHY_TYPE_OFDM: + if (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) + sband = &dev->mt76.sband_5g.sband; + else + sband = &dev->mt76.sband_2g.sband; + final_rate &= GENMASK(5, 0); + final_rate = mt7603_get_rate(dev, sband, final_rate, cck); + final_rate_flags = 0; + break; + case MT_PHY_TYPE_HT_GF: + case MT_PHY_TYPE_HT: + final_rate_flags |= IEEE80211_TX_RC_MCS; + final_rate &= GENMASK(5, 0); + if (i > 15) + return false; + break; + default: + return false; + } + + info->status.rates[final_idx].idx = final_rate; + info->status.rates[final_idx].flags = final_rate_flags; + + return true; +} + +static bool +mt7603_mac_add_txs_skb(struct mt7603_dev *dev, struct mt7603_sta *sta, int pid, + __le32 *txs_data) +{ + struct mt76_dev *mdev = &dev->mt76; + struct sk_buff_head list; + struct sk_buff *skb; + + if (pid < MT_PACKET_ID_FIRST) + return false; + + mt76_tx_status_lock(mdev, &list); + skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list); + if (skb) { + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) { + spin_lock_bh(&dev->mt76.lock); + if (sta->rate_probe) { + mt7603_wtbl_set_rates(dev, sta, NULL, + sta->rates); + sta->rate_probe = false; + } + spin_unlock_bh(&dev->mt76.lock); + } + + if (!mt7603_fill_txs(dev, sta, info, txs_data)) { + ieee80211_tx_info_clear_status(info); + info->status.rates[0].idx = -1; + } + + mt76_tx_status_skb_done(mdev, skb, &list); + } + mt76_tx_status_unlock(mdev, &list); + + return !!skb; +} + +void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data) +{ + struct ieee80211_tx_info info = {}; + struct ieee80211_sta *sta = NULL; + struct mt7603_sta *msta = NULL; + struct mt76_wcid *wcid; + __le32 *txs_data = data; + u32 txs; + u8 wcidx; + u8 pid; + + txs = le32_to_cpu(txs_data[4]); + pid = FIELD_GET(MT_TXS4_PID, txs); + txs = le32_to_cpu(txs_data[3]); + wcidx = FIELD_GET(MT_TXS3_WCID, txs); + + if (pid == MT_PACKET_ID_NO_ACK) + return; + + if (wcidx >= ARRAY_SIZE(dev->mt76.wcid)) + return; + + rcu_read_lock(); + + wcid = rcu_dereference(dev->mt76.wcid[wcidx]); + if (!wcid) + goto out; + + msta = container_of(wcid, struct mt7603_sta, wcid); + sta = wcid_to_sta(wcid); + + if (mt7603_mac_add_txs_skb(dev, msta, pid, txs_data)) + goto out; + + if (wcidx >= MT7603_WTBL_STA || !sta) + goto out; + + if (mt7603_fill_txs(dev, msta, &info, txs_data)) + ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info); + +out: + rcu_read_unlock(); +} + +void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q, + struct mt76_queue_entry *e, bool flush) +{ + struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); + struct sk_buff *skb = e->skb; + + if (!e->txwi) { + dev_kfree_skb_any(skb); + return; + } + + if (q - dev->mt76.q_tx < 4) + dev->tx_hang_check = 0; + + mt76_tx_complete_skb(mdev, skb); +} + +static bool +wait_for_wpdma(struct mt7603_dev *dev) +{ + return mt76_poll(dev, MT_WPDMA_GLO_CFG, + MT_WPDMA_GLO_CFG_TX_DMA_BUSY | + MT_WPDMA_GLO_CFG_RX_DMA_BUSY, + 0, 1000); +} + +static void mt7603_pse_reset(struct mt7603_dev *dev) +{ + /* Clear previous reset result */ + if (!dev->reset_cause[RESET_CAUSE_RESET_FAILED]) + mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE_S); + + /* Reset PSE */ + mt76_set(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE); + + if (!mt76_poll_msec(dev, MT_MCU_DEBUG_RESET, + MT_MCU_DEBUG_RESET_PSE_S, + MT_MCU_DEBUG_RESET_PSE_S, 500)) { + dev->reset_cause[RESET_CAUSE_RESET_FAILED]++; + mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE); + } else { + dev->reset_cause[RESET_CAUSE_RESET_FAILED] = 0; + mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_QUEUES); + } + + if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] >= 3) + dev->reset_cause[RESET_CAUSE_RESET_FAILED] = 0; +} + +void mt7603_mac_dma_start(struct mt7603_dev *dev) +{ + mt7603_mac_start(dev); + + wait_for_wpdma(dev); + usleep_range(50, 100); + + mt76_set(dev, MT_WPDMA_GLO_CFG, + (MT_WPDMA_GLO_CFG_TX_DMA_EN | + MT_WPDMA_GLO_CFG_RX_DMA_EN | + FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3) | + MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE)); + + mt7603_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL); +} + +void mt7603_mac_start(struct mt7603_dev *dev) +{ + mt76_clear(dev, MT_ARB_SCR, + MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); + mt76_wr(dev, MT_WF_ARB_TX_START_0, ~0); + mt76_set(dev, MT_WF_ARB_RQCR, MT_WF_ARB_RQCR_RX_START); +} + +void mt7603_mac_stop(struct mt7603_dev *dev) +{ + mt76_set(dev, MT_ARB_SCR, + MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); + mt76_wr(dev, MT_WF_ARB_TX_START_0, 0); + mt76_clear(dev, MT_WF_ARB_RQCR, MT_WF_ARB_RQCR_RX_START); +} + +void mt7603_pse_client_reset(struct mt7603_dev *dev) +{ + u32 addr; + + addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR + + MT_CLIENT_RESET_TX); + + /* Clear previous reset state */ + mt76_clear(dev, addr, + MT_CLIENT_RESET_TX_R_E_1 | + MT_CLIENT_RESET_TX_R_E_2 | + MT_CLIENT_RESET_TX_R_E_1_S | + MT_CLIENT_RESET_TX_R_E_2_S); + + /* Start PSE client TX abort */ + mt76_set(dev, addr, MT_CLIENT_RESET_TX_R_E_1); + mt76_poll_msec(dev, addr, MT_CLIENT_RESET_TX_R_E_1_S, + MT_CLIENT_RESET_TX_R_E_1_S, 500); + + mt76_set(dev, addr, MT_CLIENT_RESET_TX_R_E_2); + mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_SW_RESET); + + /* Wait for PSE client to clear TX FIFO */ + mt76_poll_msec(dev, addr, MT_CLIENT_RESET_TX_R_E_2_S, + MT_CLIENT_RESET_TX_R_E_2_S, 500); + + /* Clear PSE client TX abort state */ + mt76_clear(dev, addr, + MT_CLIENT_RESET_TX_R_E_1 | + MT_CLIENT_RESET_TX_R_E_2); +} + +static void mt7603_dma_sched_reset(struct mt7603_dev *dev) +{ + if (!is_mt7628(dev)) + return; + + mt76_set(dev, MT_SCH_4, MT_SCH_4_RESET); + mt76_clear(dev, MT_SCH_4, MT_SCH_4_RESET); +} + +static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev) +{ + int beacon_int = dev->beacon_int; + u32 mask = dev->mt76.mmio.irqmask; + int i; + + ieee80211_stop_queues(dev->mt76.hw); + set_bit(MT76_RESET, &dev->mt76.state); + + /* lock/unlock all queues to ensure that no tx is pending */ + mt76_txq_schedule_all(&dev->mt76); + + tasklet_disable(&dev->tx_tasklet); + tasklet_disable(&dev->pre_tbtt_tasklet); + napi_disable(&dev->mt76.napi[0]); + napi_disable(&dev->mt76.napi[1]); + + mutex_lock(&dev->mt76.mutex); + + mt7603_beacon_set_timer(dev, -1, 0); + + if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] || + dev->cur_reset_cause == RESET_CAUSE_RX_PSE_BUSY || + dev->cur_reset_cause == RESET_CAUSE_BEACON_STUCK || + dev->cur_reset_cause == RESET_CAUSE_TX_HANG) + mt7603_pse_reset(dev); + + if (dev->reset_cause[RESET_CAUSE_RESET_FAILED]) + goto skip_dma_reset; + + mt7603_mac_stop(dev); + + mt76_clear(dev, MT_WPDMA_GLO_CFG, + MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN | + MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); + usleep_range(1000, 2000); + + mt7603_irq_disable(dev, mask); + + mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_FORCE_TX_EOF); + + mt7603_pse_client_reset(dev); + + for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++) + mt76_queue_tx_cleanup(dev, i, true); + + for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++) + mt76_queue_rx_reset(dev, i); + + mt7603_dma_sched_reset(dev); + + mt7603_mac_dma_start(dev); + + mt7603_irq_enable(dev, mask); + +skip_dma_reset: + clear_bit(MT76_RESET, &dev->mt76.state); + mutex_unlock(&dev->mt76.mutex); + + tasklet_enable(&dev->tx_tasklet); + tasklet_schedule(&dev->tx_tasklet); + + tasklet_enable(&dev->pre_tbtt_tasklet); + mt7603_beacon_set_timer(dev, -1, beacon_int); + + napi_enable(&dev->mt76.napi[0]); + napi_schedule(&dev->mt76.napi[0]); + + napi_enable(&dev->mt76.napi[1]); + napi_schedule(&dev->mt76.napi[1]); + + ieee80211_wake_queues(dev->mt76.hw); + mt76_txq_schedule_all(&dev->mt76); +} + +static u32 mt7603_dma_debug(struct mt7603_dev *dev, u8 index) +{ + u32 val; + + mt76_wr(dev, MT_WPDMA_DEBUG, + FIELD_PREP(MT_WPDMA_DEBUG_IDX, index) | + MT_WPDMA_DEBUG_SEL); + + val = mt76_rr(dev, MT_WPDMA_DEBUG); + return FIELD_GET(MT_WPDMA_DEBUG_VALUE, val); +} + +static bool mt7603_rx_fifo_busy(struct mt7603_dev *dev) +{ + if (is_mt7628(dev)) + return mt7603_dma_debug(dev, 9) & BIT(9); + + return mt7603_dma_debug(dev, 2) & BIT(8); +} + +static bool mt7603_rx_dma_busy(struct mt7603_dev *dev) +{ + if (!(mt76_rr(dev, MT_WPDMA_GLO_CFG) & MT_WPDMA_GLO_CFG_RX_DMA_BUSY)) + return false; + + return mt7603_rx_fifo_busy(dev); +} + +static bool mt7603_tx_dma_busy(struct mt7603_dev *dev) +{ + u32 val; + + if (!(mt76_rr(dev, MT_WPDMA_GLO_CFG) & MT_WPDMA_GLO_CFG_TX_DMA_BUSY)) + return false; + + val = mt7603_dma_debug(dev, 9); + return (val & BIT(8)) && (val & 0xf) != 0xf; +} + +static bool mt7603_tx_hang(struct mt7603_dev *dev) +{ + struct mt76_queue *q; + u32 dma_idx, prev_dma_idx; + int i; + + for (i = 0; i < 4; i++) { + q = &dev->mt76.q_tx[i]; + + if (!q->queued) + continue; + + prev_dma_idx = dev->tx_dma_idx[i]; + dma_idx = ioread32(&q->regs->dma_idx); + dev->tx_dma_idx[i] = dma_idx; + + if (dma_idx == prev_dma_idx && + dma_idx != ioread32(&q->regs->cpu_idx)) + break; + } + + return i < 4; +} + +static bool mt7603_rx_pse_busy(struct mt7603_dev *dev) +{ + u32 addr, val; + + if (mt76_rr(dev, MT_MCU_DEBUG_RESET) & MT_MCU_DEBUG_RESET_QUEUES) + return true; + + if (mt7603_rx_fifo_busy(dev)) + return false; + + addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR + MT_CLIENT_STATUS); + mt76_wr(dev, addr, 3); + val = mt76_rr(dev, addr) >> 16; + + if (is_mt7628(dev) && (val & 0x4001) == 0x4001) + return true; + + return (val & 0x8001) == 0x8001 || (val & 0xe001) == 0xe001; +} + +static bool +mt7603_watchdog_check(struct mt7603_dev *dev, u8 *counter, + enum mt7603_reset_cause cause, + bool (*check)(struct mt7603_dev *dev)) +{ + if (dev->reset_test == cause + 1) { + dev->reset_test = 0; + goto trigger; + } + + if (check) { + if (!check(dev) && *counter < MT7603_WATCHDOG_TIMEOUT) { + *counter = 0; + return false; + } + + (*counter)++; + } + + if (*counter < MT7603_WATCHDOG_TIMEOUT) + return false; +trigger: + dev->cur_reset_cause = cause; + dev->reset_cause[cause]++; + return true; +} + +void mt7603_update_channel(struct mt76_dev *mdev) +{ + struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); + struct mt76_channel_state *state; + ktime_t cur_time; + u32 busy; + + if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) + return; + + state = mt76_channel_state(&dev->mt76, dev->mt76.chandef.chan); + busy = mt76_rr(dev, MT_MIB_STAT_PSCCA); + + spin_lock_bh(&dev->mt76.cc_lock); + cur_time = ktime_get_boottime(); + state->cc_busy += busy; + state->cc_active += ktime_to_us(ktime_sub(cur_time, dev->survey_time)); + dev->survey_time = cur_time; + spin_unlock_bh(&dev->mt76.cc_lock); +} + +void +mt7603_edcca_set_strict(struct mt7603_dev *dev, bool val) +{ + u32 rxtd_6 = 0xd7c80000; + + if (val == dev->ed_strict_mode) + return; + + dev->ed_strict_mode = val; + + /* Ensure that ED/CCA does not trigger if disabled */ + if (!dev->ed_monitor) + rxtd_6 |= FIELD_PREP(MT_RXTD_6_CCAED_TH, 0x34); + else + rxtd_6 |= FIELD_PREP(MT_RXTD_6_CCAED_TH, 0x7d); + + if (dev->ed_monitor && !dev->ed_strict_mode) + rxtd_6 |= FIELD_PREP(MT_RXTD_6_ACI_TH, 0x0f); + else + rxtd_6 |= FIELD_PREP(MT_RXTD_6_ACI_TH, 0x10); + + mt76_wr(dev, MT_RXTD(6), rxtd_6); + + mt76_rmw_field(dev, MT_RXTD(13), MT_RXTD_13_ACI_TH_EN, + dev->ed_monitor && !dev->ed_strict_mode); +} + +static void +mt7603_edcca_check(struct mt7603_dev *dev) +{ + u32 val = mt76_rr(dev, MT_AGC(41)); + ktime_t cur_time; + int rssi0, rssi1; + u32 active; + u32 ed_busy; + + if (!dev->ed_monitor) + return; + + rssi0 = FIELD_GET(MT_AGC_41_RSSI_0, val); + if (rssi0 > 128) + rssi0 -= 256; + + rssi1 = FIELD_GET(MT_AGC_41_RSSI_1, val); + if (rssi1 > 128) + rssi1 -= 256; + + if (max(rssi0, rssi1) >= -40 && + dev->ed_strong_signal < MT7603_EDCCA_BLOCK_TH) + dev->ed_strong_signal++; + else if (dev->ed_strong_signal > 0) + dev->ed_strong_signal--; + + cur_time = ktime_get_boottime(); + ed_busy = mt76_rr(dev, MT_MIB_STAT_ED) & MT_MIB_STAT_ED_MASK; + + active = ktime_to_us(ktime_sub(cur_time, dev->ed_time)); + dev->ed_time = cur_time; + + if (!active) + return; + + if (100 * ed_busy / active > 90) { + if (dev->ed_trigger < 0) + dev->ed_trigger = 0; + dev->ed_trigger++; + } else { + if (dev->ed_trigger > 0) + dev->ed_trigger = 0; + dev->ed_trigger--; + } + + if (dev->ed_trigger > MT7603_EDCCA_BLOCK_TH || + dev->ed_strong_signal < MT7603_EDCCA_BLOCK_TH / 2) { + mt7603_edcca_set_strict(dev, true); + } else if (dev->ed_trigger < -MT7603_EDCCA_BLOCK_TH) { + mt7603_edcca_set_strict(dev, false); + } + + if (dev->ed_trigger > MT7603_EDCCA_BLOCK_TH) + dev->ed_trigger = MT7603_EDCCA_BLOCK_TH; + else if (dev->ed_trigger < -MT7603_EDCCA_BLOCK_TH) + dev->ed_trigger = -MT7603_EDCCA_BLOCK_TH; +} + +void mt7603_cca_stats_reset(struct mt7603_dev *dev) +{ + mt76_set(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_RESET); + mt76_clear(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_RESET); + mt76_set(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_EN); +} + +static void +mt7603_adjust_sensitivity(struct mt7603_dev *dev) +{ + u32 agc0 = dev->agc0, agc3 = dev->agc3; + u32 adj; + + if (!dev->sensitivity || dev->sensitivity < -100) { + dev->sensitivity = 0; + } else if (dev->sensitivity <= -84) { + adj = 7 + (dev->sensitivity + 92) / 2; + + agc0 = 0x56f0076f; + agc0 |= adj << 12; + agc0 |= adj << 16; + agc3 = 0x81d0d5e3; + } else if (dev->sensitivity <= -72) { + adj = 7 + (dev->sensitivity + 80) / 2; + + agc0 = 0x6af0006f; + agc0 |= adj << 8; + agc0 |= adj << 12; + agc0 |= adj << 16; + + agc3 = 0x8181d5e3; + } else { + if (dev->sensitivity > -54) + dev->sensitivity = -54; + + adj = 7 + (dev->sensitivity + 80) / 2; + + agc0 = 0x7ff0000f; + agc0 |= adj << 4; + agc0 |= adj << 8; + agc0 |= adj << 12; + agc0 |= adj << 16; + + agc3 = 0x818181e3; + } + + mt76_wr(dev, MT_AGC(0), agc0); + mt76_wr(dev, MT_AGC1(0), agc0); + + mt76_wr(dev, MT_AGC(3), agc3); + mt76_wr(dev, MT_AGC1(3), agc3); +} + +static void +mt7603_false_cca_check(struct mt7603_dev *dev) +{ + int pd_cck, pd_ofdm, mdrdy_cck, mdrdy_ofdm; + int false_cca; + int min_signal; + u32 val; + + val = mt76_rr(dev, MT_PHYCTRL_STAT_PD); + pd_cck = FIELD_GET(MT_PHYCTRL_STAT_PD_CCK, val); + pd_ofdm = FIELD_GET(MT_PHYCTRL_STAT_PD_OFDM, val); + + val = mt76_rr(dev, MT_PHYCTRL_STAT_MDRDY); + mdrdy_cck = FIELD_GET(MT_PHYCTRL_STAT_MDRDY_CCK, val); + mdrdy_ofdm = FIELD_GET(MT_PHYCTRL_STAT_MDRDY_OFDM, val); + + dev->false_cca_ofdm = pd_ofdm - mdrdy_ofdm; + dev->false_cca_cck = pd_cck - mdrdy_cck; + + mt7603_cca_stats_reset(dev); + + min_signal = mt76_get_min_avg_rssi(&dev->mt76); + if (!min_signal) { + dev->sensitivity = 0; + dev->last_cca_adj = jiffies; + goto out; + } + + min_signal -= 15; + + false_cca = dev->false_cca_ofdm + dev->false_cca_cck; + if (false_cca > 600) { + if (!dev->sensitivity) + dev->sensitivity = -92; + else + dev->sensitivity += 2; + dev->last_cca_adj = jiffies; + } else if (false_cca < 100 || + time_after(jiffies, dev->last_cca_adj + 10 * HZ)) { + dev->last_cca_adj = jiffies; + if (!dev->sensitivity) + goto out; + + dev->sensitivity -= 2; + } + + if (dev->sensitivity && dev->sensitivity > min_signal) { + dev->sensitivity = min_signal; + dev->last_cca_adj = jiffies; + } + +out: + mt7603_adjust_sensitivity(dev); +} + +void mt7603_mac_work(struct work_struct *work) +{ + struct mt7603_dev *dev = container_of(work, struct mt7603_dev, + mac_work.work); + bool reset = false; + + mt76_tx_status_check(&dev->mt76, NULL, false); + + mutex_lock(&dev->mt76.mutex); + + dev->mac_work_count++; + mt7603_update_channel(&dev->mt76); + mt7603_edcca_check(dev); + + if (dev->mac_work_count == 10) + mt7603_false_cca_check(dev); + + if (mt7603_watchdog_check(dev, &dev->rx_pse_check, + RESET_CAUSE_RX_PSE_BUSY, + mt7603_rx_pse_busy) || + mt7603_watchdog_check(dev, &dev->beacon_check, + RESET_CAUSE_BEACON_STUCK, + NULL) || + mt7603_watchdog_check(dev, &dev->tx_hang_check, + RESET_CAUSE_TX_HANG, + mt7603_tx_hang) || + mt7603_watchdog_check(dev, &dev->tx_dma_check, + RESET_CAUSE_TX_BUSY, + mt7603_tx_dma_busy) || + mt7603_watchdog_check(dev, &dev->rx_dma_check, + RESET_CAUSE_RX_BUSY, + mt7603_rx_dma_busy) || + mt7603_watchdog_check(dev, &dev->mcu_hang, + RESET_CAUSE_MCU_HANG, + NULL) || + dev->reset_cause[RESET_CAUSE_RESET_FAILED]) { + dev->beacon_check = 0; + dev->tx_dma_check = 0; + dev->tx_hang_check = 0; + dev->rx_dma_check = 0; + dev->rx_pse_check = 0; + dev->mcu_hang = 0; + dev->rx_dma_idx = ~0; + memset(dev->tx_dma_idx, 0xff, sizeof(dev->tx_dma_idx)); + reset = true; + dev->mac_work_count = 0; + } + + if (dev->mac_work_count >= 10) + dev->mac_work_count = 0; + + mutex_unlock(&dev->mt76.mutex); + + if (reset) + mt7603_mac_watchdog_reset(dev); + + ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work, + msecs_to_jiffies(MT7603_WATCHDOG_TIME)); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.h b/drivers/net/wireless/mediatek/mt76/mt7603/mac.h new file mode 100644 index 000000000000..17e34ecf2bfb --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.h @@ -0,0 +1,242 @@ +/* SPDX-License-Identifier: ISC */ + +#ifndef __MT7603_MAC_H +#define __MT7603_MAC_H + +#define MT_RXD0_LENGTH GENMASK(15, 0) +#define MT_RXD0_PKT_TYPE GENMASK(31, 29) + +#define MT_RXD0_NORMAL_ETH_TYPE_OFS GENMASK(22, 16) +#define MT_RXD0_NORMAL_IP_SUM BIT(23) +#define MT_RXD0_NORMAL_UDP_TCP_SUM BIT(24) +#define MT_RXD0_NORMAL_GROUP_1 BIT(25) +#define MT_RXD0_NORMAL_GROUP_2 BIT(26) +#define MT_RXD0_NORMAL_GROUP_3 BIT(27) +#define MT_RXD0_NORMAL_GROUP_4 BIT(28) + +enum rx_pkt_type { + PKT_TYPE_TXS = 0, + PKT_TYPE_TXRXV = 1, + PKT_TYPE_NORMAL = 2, + PKT_TYPE_RX_DUP_RFB = 3, + PKT_TYPE_RX_TMR = 4, + PKT_TYPE_RETRIEVE = 5, + PKT_TYPE_RX_EVENT = 7, +}; + +#define MT_RXD1_NORMAL_BSSID GENMASK(31, 26) +#define MT_RXD1_NORMAL_PAYLOAD_FORMAT GENMASK(25, 24) +#define MT_RXD1_NORMAL_HDR_TRANS BIT(23) +#define MT_RXD1_NORMAL_HDR_OFFSET BIT(22) +#define MT_RXD1_NORMAL_MAC_HDR_LEN GENMASK(21, 16) +#define MT_RXD1_NORMAL_CH_FREQ GENMASK(15, 8) +#define MT_RXD1_NORMAL_KEY_ID GENMASK(7, 6) +#define MT_RXD1_NORMAL_BEACON_UC BIT(5) +#define MT_RXD1_NORMAL_BEACON_MC BIT(4) +#define MT_RXD1_NORMAL_BCAST BIT(3) +#define MT_RXD1_NORMAL_MCAST BIT(2) +#define MT_RXD1_NORMAL_U2M BIT(1) +#define MT_RXD1_NORMAL_HTC_VLD BIT(0) + +#define MT_RXD2_NORMAL_NON_AMPDU BIT(31) +#define MT_RXD2_NORMAL_NON_AMPDU_SUB BIT(30) +#define MT_RXD2_NORMAL_NDATA BIT(29) +#define MT_RXD2_NORMAL_NULL_FRAME BIT(28) +#define MT_RXD2_NORMAL_FRAG BIT(27) +#define MT_RXD2_NORMAL_UDF_VALID BIT(26) +#define MT_RXD2_NORMAL_LLC_MIS BIT(25) +#define MT_RXD2_NORMAL_MAX_LEN_ERROR BIT(24) +#define MT_RXD2_NORMAL_AMSDU_ERR BIT(23) +#define MT_RXD2_NORMAL_LEN_MISMATCH BIT(22) +#define MT_RXD2_NORMAL_TKIP_MIC_ERR BIT(21) +#define MT_RXD2_NORMAL_ICV_ERR BIT(20) +#define MT_RXD2_NORMAL_CLM BIT(19) +#define MT_RXD2_NORMAL_CM BIT(18) +#define MT_RXD2_NORMAL_FCS_ERR BIT(17) +#define MT_RXD2_NORMAL_SW_BIT BIT(16) +#define MT_RXD2_NORMAL_SEC_MODE GENMASK(15, 12) +#define MT_RXD2_NORMAL_TID GENMASK(11, 8) +#define MT_RXD2_NORMAL_WLAN_IDX GENMASK(7, 0) + +#define MT_RXD3_NORMAL_PF_STS GENMASK(31, 30) +#define MT_RXD3_NORMAL_PF_MODE BIT(29) +#define MT_RXD3_NORMAL_CLS_BITMAP GENMASK(28, 19) +#define MT_RXD3_NORMAL_WOL GENMASK(18, 14) +#define MT_RXD3_NORMAL_MAGIC_PKT BIT(13) +#define MT_RXD3_NORMAL_OFLD GENMASK(12, 11) +#define MT_RXD3_NORMAL_CLS BIT(10) +#define MT_RXD3_NORMAL_PATTERN_DROP BIT(9) +#define MT_RXD3_NORMAL_TSF_COMPARE_LOSS BIT(8) +#define MT_RXD3_NORMAL_RXV_SEQ GENMASK(7, 0) + +#define MT_RXV1_VHTA1_B5_B4 GENMASK(31, 30) +#define MT_RXV1_VHTA2_B8_B1 GENMASK(29, 22) +#define MT_RXV1_HT_NO_SOUND BIT(21) +#define MT_RXV1_HT_SMOOTH BIT(20) +#define MT_RXV1_HT_SHORT_GI BIT(19) +#define MT_RXV1_HT_AGGR BIT(18) +#define MT_RXV1_VHTA1_B22 BIT(17) +#define MT_RXV1_FRAME_MODE GENMASK(16, 15) +#define MT_RXV1_TX_MODE GENMASK(14, 12) +#define MT_RXV1_HT_EXT_LTF GENMASK(11, 10) +#define MT_RXV1_HT_AD_CODE BIT(9) +#define MT_RXV1_HT_STBC GENMASK(8, 7) +#define MT_RXV1_TX_RATE GENMASK(6, 0) + +#define MT_RXV2_VHTA1_B16_B6 GENMASK(31, 21) +#define MT_RXV2_LENGTH GENMASK(20, 0) + +#define MT_RXV3_F_AGC1_CAL_GAIN GENMASK(31, 29) +#define MT_RXV3_F_AGC1_EQ_CAL BIT(28) +#define MT_RXV3_RCPI1 GENMASK(27, 20) +#define MT_RXV3_F_AGC0_CAL_GAIN GENMASK(19, 17) +#define MT_RXV3_F_AGC0_EQ_CAL BIT(16) +#define MT_RXV3_RCPI0 GENMASK(15, 8) +#define MT_RXV3_SEL_ANT BIT(7) +#define MT_RXV3_ACI_DET_X BIT(6) +#define MT_RXV3_OFDM_FREQ_TRANS_DETECT BIT(5) +#define MT_RXV3_VHTA1_B21_B17 GENMASK(4, 0) + +#define MT_RXV4_F_AGC_CAL_GAIN GENMASK(31, 29) +#define MT_RXV4_F_AGC2_EQ_CAL BIT(28) +#define MT_RXV4_IB_RSSI1 GENMASK(27, 20) +#define MT_RXV4_F_AGC_LPF_GAIN_X GENMASK(19, 16) +#define MT_RXV4_WB_RSSI_X GENMASK(15, 8) +#define MT_RXV4_IB_RSSI0 GENMASK(7, 0) + +#define MT_RXV5_LTF_SNR0 GENMASK(31, 26) +#define MT_RXV5_LTF_PROC_TIME GENMASK(25, 19) +#define MT_RXV5_FOE GENMASK(18, 7) +#define MT_RXV5_C_AGC_SATE GENMASK(6, 4) +#define MT_RXV5_F_AGC_LNA_GAIN_0 GENMASK(3, 2) +#define MT_RXV5_F_AGC_LNA_GAIN_1 GENMASK(1, 0) + +#define MT_RXV6_C_AGC_STATE GENMASK(30, 28) +#define MT_RXV6_NS_TS_FIELD GENMASK(27, 25) +#define MT_RXV6_RX_VALID BIT(24) +#define MT_RXV6_NF2 GENMASK(23, 16) +#define MT_RXV6_NF1 GENMASK(15, 8) +#define MT_RXV6_NF0 GENMASK(7, 0) + +enum mt7603_tx_header_format { + MT_HDR_FORMAT_802_3, + MT_HDR_FORMAT_CMD, + MT_HDR_FORMAT_802_11, + MT_HDR_FORMAT_802_11_EXT, +}; + +#define MT_TXD_SIZE (8 * 4) + +#define MT_TXD0_P_IDX BIT(31) +#define MT_TXD0_Q_IDX GENMASK(30, 27) +#define MT_TXD0_UTXB BIT(26) +#define MT_TXD0_UNXV BIT(25) +#define MT_TXD0_UDP_TCP_SUM BIT(24) +#define MT_TXD0_IP_SUM BIT(23) +#define MT_TXD0_ETH_TYPE_OFFSET GENMASK(22, 16) +#define MT_TXD0_TX_BYTES GENMASK(15, 0) + +#define MT_TXD1_OWN_MAC GENMASK(31, 26) +#define MT_TXD1_PROTECTED BIT(23) +#define MT_TXD1_TID GENMASK(22, 20) +#define MT_TXD1_NO_ACK BIT(19) +#define MT_TXD1_HDR_PAD GENMASK(18, 16) +#define MT_TXD1_LONG_FORMAT BIT(15) +#define MT_TXD1_HDR_FORMAT GENMASK(14, 13) +#define MT_TXD1_HDR_INFO GENMASK(12, 8) +#define MT_TXD1_WLAN_IDX GENMASK(7, 0) + +#define MT_TXD2_FIX_RATE BIT(31) +#define MT_TXD2_TIMING_MEASURE BIT(30) +#define MT_TXD2_BA_DISABLE BIT(29) +#define MT_TXD2_POWER_OFFSET GENMASK(28, 24) +#define MT_TXD2_MAX_TX_TIME GENMASK(23, 16) +#define MT_TXD2_FRAG GENMASK(15, 14) +#define MT_TXD2_HTC_VLD BIT(13) +#define MT_TXD2_DURATION BIT(12) +#define MT_TXD2_BIP BIT(11) +#define MT_TXD2_MULTICAST BIT(10) +#define MT_TXD2_RTS BIT(9) +#define MT_TXD2_SOUNDING BIT(8) +#define MT_TXD2_NDPA BIT(7) +#define MT_TXD2_NDP BIT(6) +#define MT_TXD2_FRAME_TYPE GENMASK(5, 4) +#define MT_TXD2_SUB_TYPE GENMASK(3, 0) + +#define MT_TXD3_SN_VALID BIT(31) +#define MT_TXD3_PN_VALID BIT(30) +#define MT_TXD3_SEQ GENMASK(27, 16) +#define MT_TXD3_REM_TX_COUNT GENMASK(15, 11) +#define MT_TXD3_TX_COUNT GENMASK(10, 6) + +#define MT_TXD4_PN_LOW GENMASK(31, 0) + +#define MT_TXD5_PN_HIGH GENMASK(31, 16) +#define MT_TXD5_SW_POWER_MGMT BIT(13) +#define MT_TXD5_BA_SEQ_CTRL BIT(12) +#define MT_TXD5_DA_SELECT BIT(11) +#define MT_TXD5_TX_STATUS_HOST BIT(10) +#define MT_TXD5_TX_STATUS_MCU BIT(9) +#define MT_TXD5_TX_STATUS_FMT BIT(8) +#define MT_TXD5_PID GENMASK(7, 0) + +#define MT_TXD6_SGI BIT(31) +#define MT_TXD6_LDPC BIT(30) +#define MT_TXD6_TX_RATE GENMASK(29, 18) +#define MT_TXD6_I_TXBF BIT(17) +#define MT_TXD6_E_TXBF BIT(16) +#define MT_TXD6_DYN_BW BIT(15) +#define MT_TXD6_ANT_PRI GENMASK(14, 12) +#define MT_TXD6_SPE_EN BIT(11) +#define MT_TXD6_FIXED_BW BIT(10) +#define MT_TXD6_BW GENMASK(9, 8) +#define MT_TXD6_ANT_ID GENMASK(7, 2) +#define MT_TXD6_FIXED_RATE BIT(0) + +#define MT_TX_RATE_STBC BIT(11) +#define MT_TX_RATE_NSS GENMASK(10, 9) +#define MT_TX_RATE_MODE GENMASK(8, 6) +#define MT_TX_RATE_IDX GENMASK(5, 0) + +#define MT_TXS0_ANTENNA GENMASK(31, 26) +#define MT_TXS0_TID GENMASK(25, 22) +#define MT_TXS0_BA_ERROR BIT(22) +#define MT_TXS0_PS_FLAG BIT(21) +#define MT_TXS0_TXOP_TIMEOUT BIT(20) +#define MT_TXS0_BIP_ERROR BIT(19) + +#define MT_TXS0_QUEUE_TIMEOUT BIT(18) +#define MT_TXS0_RTS_TIMEOUT BIT(17) +#define MT_TXS0_ACK_TIMEOUT BIT(16) +#define MT_TXS0_ACK_ERROR_MASK GENMASK(18, 16) + +#define MT_TXS0_TX_STATUS_HOST BIT(15) +#define MT_TXS0_TX_STATUS_MCU BIT(14) +#define MT_TXS0_TXS_FORMAT BIT(13) +#define MT_TXS0_FIXED_RATE BIT(12) +#define MT_TXS0_TX_RATE GENMASK(11, 0) + +#define MT_TXS1_F0_TIMESTAMP GENMASK(31, 0) +#define MT_TXS1_F1_NOISE_2 GENMASK(23, 16) +#define MT_TXS1_F1_NOISE_1 GENMASK(15, 8) +#define MT_TXS1_F1_NOISE_0 GENMASK(7, 0) + +#define MT_TXS2_F0_FRONT_TIME GENMASK(24, 0) +#define MT_TXS2_F1_RCPI_2 GENMASK(23, 16) +#define MT_TXS2_F1_RCPI_1 GENMASK(15, 8) +#define MT_TXS2_F1_RCPI_0 GENMASK(7, 0) + +#define MT_TXS3_WCID GENMASK(31, 24) +#define MT_TXS3_RXV_SEQNO GENMASK(23, 16) +#define MT_TXS3_TX_DELAY GENMASK(15, 0) + +#define MT_TXS4_LAST_TX_RATE GENMASK(31, 29) +#define MT_TXS4_TX_COUNT GENMASK(28, 24) +#define MT_TXS4_AMPDU BIT(23) +#define MT_TXS4_ACKED_MPDU BIT(22) +#define MT_TXS4_PID GENMASK(21, 14) +#define MT_TXS4_BW GENMASK(13, 12) +#define MT_TXS4_F0_SEQNO GENMASK(11, 0) +#define MT_TXS4_F1_TSSI GENMASK(11, 0) + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c new file mode 100644 index 000000000000..b10775ed92e6 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c @@ -0,0 +1,709 @@ +/* SPDX-License-Identifier: ISC */ + +#include +#include +#include +#include +#include "mt7603.h" +#include "eeprom.h" + +static int +mt7603_start(struct ieee80211_hw *hw) +{ + struct mt7603_dev *dev = hw->priv; + + mt7603_mac_start(dev); + dev->survey_time = ktime_get_boottime(); + set_bit(MT76_STATE_RUNNING, &dev->mt76.state); + mt7603_mac_work(&dev->mac_work.work); + + return 0; +} + +static void +mt7603_stop(struct ieee80211_hw *hw) +{ + struct mt7603_dev *dev = hw->priv; + + clear_bit(MT76_STATE_RUNNING, &dev->mt76.state); + cancel_delayed_work_sync(&dev->mac_work); + mt7603_mac_stop(dev); +} + +static int +mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv; + struct mt7603_dev *dev = hw->priv; + struct mt76_txq *mtxq; + u8 bc_addr[ETH_ALEN]; + int idx; + int ret = 0; + + mutex_lock(&dev->mt76.mutex); + + mvif->idx = ffs(~dev->vif_mask) - 1; + if (mvif->idx >= MT7603_MAX_INTERFACES) { + ret = -ENOSPC; + goto out; + } + + mt76_wr(dev, MT_MAC_ADDR0(mvif->idx), + get_unaligned_le32(vif->addr)); + mt76_wr(dev, MT_MAC_ADDR1(mvif->idx), + (get_unaligned_le16(vif->addr + 4) | + MT_MAC_ADDR1_VALID)); + + if (vif->type == NL80211_IFTYPE_AP) { + mt76_wr(dev, MT_BSSID0(mvif->idx), + get_unaligned_le32(vif->addr)); + mt76_wr(dev, MT_BSSID1(mvif->idx), + (get_unaligned_le16(vif->addr + 4) | + MT_BSSID1_VALID)); + } + + idx = MT7603_WTBL_RESERVED - 1 - mvif->idx; + dev->vif_mask |= BIT(mvif->idx); + mvif->sta.wcid.idx = idx; + mvif->sta.wcid.hw_key_idx = -1; + + eth_broadcast_addr(bc_addr); + mt7603_wtbl_init(dev, idx, mvif->idx, bc_addr); + + mtxq = (struct mt76_txq *)vif->txq->drv_priv; + mtxq->wcid = &mvif->sta.wcid; + mt76_txq_init(&dev->mt76, vif->txq); + rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid); + +out: + mutex_unlock(&dev->mt76.mutex); + + return ret; +} + +static void +mt7603_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv; + struct mt7603_dev *dev = hw->priv; + int idx = mvif->sta.wcid.idx; + + mt76_wr(dev, MT_MAC_ADDR0(mvif->idx), 0); + mt76_wr(dev, MT_MAC_ADDR1(mvif->idx), 0); + mt76_wr(dev, MT_BSSID0(mvif->idx), 0); + mt76_wr(dev, MT_BSSID1(mvif->idx), 0); + mt7603_beacon_set_timer(dev, mvif->idx, 0); + + rcu_assign_pointer(dev->mt76.wcid[idx], NULL); + mt76_txq_remove(&dev->mt76, vif->txq); + + mutex_lock(&dev->mt76.mutex); + dev->vif_mask &= ~BIT(mvif->idx); + mutex_unlock(&dev->mt76.mutex); +} + +static void +mt7603_init_edcca(struct mt7603_dev *dev) +{ + /* Set lower signal level to -65dBm */ + mt76_rmw_field(dev, MT_RXTD(8), MT_RXTD_8_LOWER_SIGNAL, 0x23); + + /* clear previous energy detect monitor results */ + mt76_rr(dev, MT_MIB_STAT_ED); + + if (dev->ed_monitor) + mt76_set(dev, MT_MIB_CTL, MT_MIB_CTL_ED_TIME); + else + mt76_clear(dev, MT_MIB_CTL, MT_MIB_CTL_ED_TIME); + + dev->ed_strict_mode = 0xff; + dev->ed_strong_signal = 0; + dev->ed_time = ktime_get_boottime(); + + mt7603_edcca_set_strict(dev, false); +} + +static int +mt7603_set_channel(struct mt7603_dev *dev, struct cfg80211_chan_def *def) +{ + u8 *rssi_data = (u8 *)dev->mt76.eeprom.data; + int idx, ret; + u8 bw = MT_BW_20; + bool failed = false; + + cancel_delayed_work_sync(&dev->mac_work); + + mutex_lock(&dev->mt76.mutex); + set_bit(MT76_RESET, &dev->mt76.state); + + mt76_set_channel(&dev->mt76); + mt7603_mac_stop(dev); + + if (def->width == NL80211_CHAN_WIDTH_40) + bw = MT_BW_40; + + dev->mt76.chandef = *def; + mt76_rmw_field(dev, MT_AGG_BWCR, MT_AGG_BWCR_BW, bw); + ret = mt7603_mcu_set_channel(dev); + if (ret) { + failed = true; + goto out; + } + + if (def->chan->band == NL80211_BAND_5GHZ) { + idx = 1; + rssi_data += MT_EE_RSSI_OFFSET_5G; + } else { + idx = 0; + rssi_data += MT_EE_RSSI_OFFSET_2G; + } + + memcpy(dev->rssi_offset, rssi_data, sizeof(dev->rssi_offset)); + + idx |= (def->chan - + mt76_hw(dev)->wiphy->bands[def->chan->band]->channels) << 1; + mt76_wr(dev, MT_WF_RMAC_CH_FREQ, idx); + mt7603_mac_set_timing(dev); + mt7603_mac_start(dev); + + clear_bit(MT76_RESET, &dev->mt76.state); + + mt76_txq_schedule_all(&dev->mt76); + + ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work, + MT7603_WATCHDOG_TIME); + + /* reset channel stats */ + mt76_clear(dev, MT_MIB_CTL, MT_MIB_CTL_READ_CLR_DIS); + mt76_set(dev, MT_MIB_CTL, + MT_MIB_CTL_CCA_NAV_TX | MT_MIB_CTL_PSCCA_TIME); + mt76_rr(dev, MT_MIB_STAT_PSCCA); + mt7603_cca_stats_reset(dev); + + dev->survey_time = ktime_get_boottime(); + + mt7603_init_edcca(dev); + +out: + mutex_unlock(&dev->mt76.mutex); + + if (failed) + mt7603_mac_work(&dev->mac_work.work); + + return ret; +} + +static int +mt7603_config(struct ieee80211_hw *hw, u32 changed) +{ + struct mt7603_dev *dev = hw->priv; + int ret = 0; + + if (changed & (IEEE80211_CONF_CHANGE_CHANNEL | + IEEE80211_CONF_CHANGE_POWER)) + ret = mt7603_set_channel(dev, &hw->conf.chandef); + + if (changed & IEEE80211_CONF_CHANGE_MONITOR) { + mutex_lock(&dev->mt76.mutex); + + if (!(hw->conf.flags & IEEE80211_CONF_MONITOR)) + dev->rxfilter |= MT_WF_RFCR_DROP_OTHER_UC; + else + dev->rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC; + + mt76_wr(dev, MT_WF_RFCR, dev->rxfilter); + + mutex_unlock(&dev->mt76.mutex); + } + + return ret; +} + +static void +mt7603_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, + unsigned int *total_flags, u64 multicast) +{ + struct mt7603_dev *dev = hw->priv; + u32 flags = 0; + +#define MT76_FILTER(_flag, _hw) do { \ + flags |= *total_flags & FIF_##_flag; \ + dev->rxfilter &= ~(_hw); \ + dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \ + } while (0) + + dev->rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS | + MT_WF_RFCR_DROP_OTHER_BEACON | + MT_WF_RFCR_DROP_FRAME_REPORT | + MT_WF_RFCR_DROP_PROBEREQ | + MT_WF_RFCR_DROP_MCAST_FILTERED | + MT_WF_RFCR_DROP_MCAST | + MT_WF_RFCR_DROP_BCAST | + MT_WF_RFCR_DROP_DUPLICATE | + MT_WF_RFCR_DROP_A2_BSSID | + MT_WF_RFCR_DROP_UNWANTED_CTL | + MT_WF_RFCR_DROP_STBC_MULTI); + + MT76_FILTER(OTHER_BSS, MT_WF_RFCR_DROP_OTHER_TIM | + MT_WF_RFCR_DROP_A3_MAC | + MT_WF_RFCR_DROP_A3_BSSID); + + MT76_FILTER(FCSFAIL, MT_WF_RFCR_DROP_FCSFAIL); + + MT76_FILTER(CONTROL, MT_WF_RFCR_DROP_CTS | + MT_WF_RFCR_DROP_RTS | + MT_WF_RFCR_DROP_CTL_RSV | + MT_WF_RFCR_DROP_NDPA); + + *total_flags = flags; + mt76_wr(dev, MT_WF_RFCR, dev->rxfilter); +} + +static void +mt7603_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, u32 changed) +{ + struct mt7603_dev *dev = hw->priv; + struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv; + + mutex_lock(&dev->mt76.mutex); + + if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BSSID)) { + if (info->assoc || info->ibss_joined) { + mt76_wr(dev, MT_BSSID0(mvif->idx), + get_unaligned_le32(info->bssid)); + mt76_wr(dev, MT_BSSID1(mvif->idx), + (get_unaligned_le16(info->bssid + 4) | + MT_BSSID1_VALID)); + } else { + mt76_wr(dev, MT_BSSID0(mvif->idx), 0); + mt76_wr(dev, MT_BSSID1(mvif->idx), 0); + } + } + + if (changed & BSS_CHANGED_ERP_SLOT) { + int slottime = info->use_short_slot ? 9 : 20; + + if (slottime != dev->slottime) { + dev->slottime = slottime; + mt7603_mac_set_timing(dev); + } + } + + if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON_INT)) { + int beacon_int = !!info->enable_beacon * info->beacon_int; + + tasklet_disable(&dev->pre_tbtt_tasklet); + mt7603_beacon_set_timer(dev, mvif->idx, beacon_int); + tasklet_enable(&dev->pre_tbtt_tasklet); + } + + mutex_unlock(&dev->mt76.mutex); +} + +int +mt7603_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); + struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; + struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv; + int idx; + int ret = 0; + + idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7603_WTBL_STA - 1); + if (idx < 0) + return -ENOSPC; + + __skb_queue_head_init(&msta->psq); + msta->ps = ~0; + msta->smps = ~0; + msta->wcid.sta = 1; + msta->wcid.idx = idx; + mt7603_wtbl_init(dev, idx, mvif->idx, sta->addr); + mt7603_wtbl_set_ps(dev, msta, false); + + if (vif->type == NL80211_IFTYPE_AP) + set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags); + + return ret; +} + +void +mt7603_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); + + mt7603_wtbl_update_cap(dev, sta); +} + +void +mt7603_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); + struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; + struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; + + spin_lock_bh(&dev->ps_lock); + __skb_queue_purge(&msta->psq); + mt7603_filter_tx(dev, wcid->idx, true); + spin_unlock_bh(&dev->ps_lock); + + mt7603_wtbl_clear(dev, wcid->idx); +} + +static void +mt7603_ps_tx_list(struct mt7603_dev *dev, struct sk_buff_head *list) +{ + struct sk_buff *skb; + + while ((skb = __skb_dequeue(list)) != NULL) + mt76_tx_queue_skb_raw(dev, skb_get_queue_mapping(skb), + skb, 0); +} + +void +mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) +{ + struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); + struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; + struct sk_buff_head list; + + mt76_stop_tx_queues(&dev->mt76, sta, false); + mt7603_wtbl_set_ps(dev, msta, ps); + if (ps) + return; + + __skb_queue_head_init(&list); + + spin_lock_bh(&dev->ps_lock); + skb_queue_splice_tail_init(&msta->psq, &list); + spin_unlock_bh(&dev->ps_lock); + + mt7603_ps_tx_list(dev, &list); +} + +static void +mt7603_release_buffered_frames(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + u16 tids, int nframes, + enum ieee80211_frame_release_type reason, + bool more_data) +{ + struct mt7603_dev *dev = hw->priv; + struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; + struct sk_buff_head list; + struct sk_buff *skb, *tmp; + + __skb_queue_head_init(&list); + + spin_lock_bh(&dev->ps_lock); + skb_queue_walk_safe(&msta->psq, skb, tmp) { + if (!nframes) + break; + + if (!(tids & BIT(skb->priority))) + continue; + + skb_set_queue_mapping(skb, MT_TXQ_PSD); + __skb_unlink(skb, &msta->psq); + __skb_queue_tail(&list, skb); + nframes--; + } + spin_unlock_bh(&dev->ps_lock); + + mt7603_ps_tx_list(dev, &list); + + if (nframes) + mt76_release_buffered_frames(hw, sta, tids, nframes, reason, + more_data); +} + +static int +mt7603_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct mt7603_dev *dev = hw->priv; + struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv; + struct mt7603_sta *msta = sta ? (struct mt7603_sta *)sta->drv_priv : + &mvif->sta; + struct mt76_wcid *wcid = &msta->wcid; + int idx = key->keyidx; + + /* fall back to sw encryption for unsupported ciphers */ + switch (key->cipher) { + case WLAN_CIPHER_SUITE_TKIP: + case WLAN_CIPHER_SUITE_CCMP: + break; + default: + return -EOPNOTSUPP; + } + + /* + * The hardware does not support per-STA RX GTK, fall back + * to software mode for these. + */ + if ((vif->type == NL80211_IFTYPE_ADHOC || + vif->type == NL80211_IFTYPE_MESH_POINT) && + (key->cipher == WLAN_CIPHER_SUITE_TKIP || + key->cipher == WLAN_CIPHER_SUITE_CCMP) && + !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + return -EOPNOTSUPP; + + if (cmd == SET_KEY) { + key->hw_key_idx = wcid->idx; + wcid->hw_key_idx = idx; + } else { + if (idx == wcid->hw_key_idx) + wcid->hw_key_idx = -1; + + key = NULL; + } + mt76_wcid_key_setup(&dev->mt76, wcid, key); + + return mt7603_wtbl_set_key(dev, wcid->idx, key); +} + +static int +mt7603_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue, + const struct ieee80211_tx_queue_params *params) +{ + struct mt7603_dev *dev = hw->priv; + u16 cw_min = (1 << 5) - 1; + u16 cw_max = (1 << 10) - 1; + u32 val; + + queue = dev->mt76.q_tx[queue].hw_idx; + + if (params->cw_min) + cw_min = params->cw_min; + if (params->cw_max) + cw_max = params->cw_max; + + mutex_lock(&dev->mt76.mutex); + mt7603_mac_stop(dev); + + val = mt76_rr(dev, MT_WMM_TXOP(queue)); + val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(queue)); + val |= params->txop << MT_WMM_TXOP_SHIFT(queue); + mt76_wr(dev, MT_WMM_TXOP(queue), val); + + val = mt76_rr(dev, MT_WMM_AIFSN); + val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(queue)); + val |= params->aifs << MT_WMM_AIFSN_SHIFT(queue); + mt76_wr(dev, MT_WMM_AIFSN, val); + + val = mt76_rr(dev, MT_WMM_CWMIN); + val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(queue)); + val |= cw_min << MT_WMM_CWMIN_SHIFT(queue); + mt76_wr(dev, MT_WMM_CWMIN, val); + + val = mt76_rr(dev, MT_WMM_CWMAX(queue)); + val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(queue)); + val |= cw_max << MT_WMM_CWMAX_SHIFT(queue); + mt76_wr(dev, MT_WMM_CWMAX(queue), val); + + mt7603_mac_start(dev); + mutex_unlock(&dev->mt76.mutex); + + return 0; +} + +static void +mt7603_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + const u8 *mac) +{ + struct mt7603_dev *dev = hw->priv; + + set_bit(MT76_SCANNING, &dev->mt76.state); + mt7603_beacon_set_timer(dev, -1, 0); +} + +static void +mt7603_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct mt7603_dev *dev = hw->priv; + + clear_bit(MT76_SCANNING, &dev->mt76.state); + mt7603_beacon_set_timer(dev, -1, dev->beacon_int); +} + +static void +mt7603_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 queues, bool drop) +{ +} + +static int +mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_ampdu_params *params) +{ + enum ieee80211_ampdu_mlme_action action = params->action; + struct mt7603_dev *dev = hw->priv; + struct ieee80211_sta *sta = params->sta; + struct ieee80211_txq *txq = sta->txq[params->tid]; + struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; + u16 tid = params->tid; + u16 *ssn = ¶ms->ssn; + u8 ba_size = params->buf_size; + struct mt76_txq *mtxq; + + if (!txq) + return -EINVAL; + + mtxq = (struct mt76_txq *)txq->drv_priv; + + switch (action) { + case IEEE80211_AMPDU_RX_START: + mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, *ssn, + params->buf_size); + mt7603_mac_rx_ba_reset(dev, sta->addr, tid); + break; + case IEEE80211_AMPDU_RX_STOP: + mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid); + break; + case IEEE80211_AMPDU_TX_OPERATIONAL: + mtxq->aggr = true; + mtxq->send_bar = false; + mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, ba_size); + break; + case IEEE80211_AMPDU_TX_STOP_FLUSH: + case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: + mtxq->aggr = false; + ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn); + mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, -1); + break; + case IEEE80211_AMPDU_TX_START: + mtxq->agg_ssn = *ssn << 4; + ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); + break; + case IEEE80211_AMPDU_TX_STOP_CONT: + mtxq->aggr = false; + mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, -1); + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + break; + } + + return 0; +} + +static void +mt7603_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt7603_dev *dev = hw->priv; + struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; + struct ieee80211_sta_rates *sta_rates = rcu_dereference(sta->rates); + int i; + + spin_lock_bh(&dev->mt76.lock); + for (i = 0; i < ARRAY_SIZE(msta->rates); i++) { + msta->rates[i].idx = sta_rates->rate[i].idx; + msta->rates[i].count = sta_rates->rate[i].count; + msta->rates[i].flags = sta_rates->rate[i].flags; + + if (msta->rates[i].idx < 0 || !msta->rates[i].count) + break; + } + msta->n_rates = i; + mt7603_wtbl_set_rates(dev, msta, NULL, msta->rates); + msta->rate_probe = false; + mt7603_wtbl_set_smps(dev, msta, + sta->smps_mode == IEEE80211_SMPS_DYNAMIC); + spin_unlock_bh(&dev->mt76.lock); +} + +static void +mt7603_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class) +{ + struct mt7603_dev *dev = hw->priv; + + dev->coverage_class = coverage_class; + mt7603_mac_set_timing(dev); +} + +static void mt7603_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, + struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_vif *vif = info->control.vif; + struct mt7603_dev *dev = hw->priv; + struct mt76_wcid *wcid = &dev->global_sta.wcid; + + if (control->sta) { + struct mt7603_sta *msta; + + msta = (struct mt7603_sta *)control->sta->drv_priv; + wcid = &msta->wcid; + } else if (vif) { + struct mt7603_vif *mvif; + + mvif = (struct mt7603_vif *)vif->drv_priv; + wcid = &mvif->sta.wcid; + } + + mt76_tx(&dev->mt76, control->sta, wcid, skb); +} + +static int +mt7603_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set) +{ + return 0; +} + +const struct ieee80211_ops mt7603_ops = { + .tx = mt7603_tx, + .start = mt7603_start, + .stop = mt7603_stop, + .add_interface = mt7603_add_interface, + .remove_interface = mt7603_remove_interface, + .config = mt7603_config, + .configure_filter = mt7603_configure_filter, + .bss_info_changed = mt7603_bss_info_changed, + .sta_state = mt76_sta_state, + .set_key = mt7603_set_key, + .conf_tx = mt7603_conf_tx, + .sw_scan_start = mt7603_sw_scan, + .sw_scan_complete = mt7603_sw_scan_complete, + .flush = mt7603_flush, + .ampdu_action = mt7603_ampdu_action, + .get_txpower = mt76_get_txpower, + .wake_tx_queue = mt76_wake_tx_queue, + .sta_rate_tbl_update = mt7603_sta_rate_tbl_update, + .release_buffered_frames = mt7603_release_buffered_frames, + .set_coverage_class = mt7603_set_coverage_class, + .set_tim = mt7603_set_tim, + .get_survey = mt76_get_survey, +}; + +MODULE_LICENSE("Dual BSD/GPL"); + +static int __init mt7603_init(void) +{ + int ret; + + ret = platform_driver_register(&mt76_wmac_driver); + if (ret) + return ret; + +#ifdef CONFIG_PCI + ret = pci_register_driver(&mt7603_pci_driver); + if (ret) + platform_driver_unregister(&mt76_wmac_driver); +#endif + return ret; +} + +static void __exit mt7603_exit(void) +{ +#ifdef CONFIG_PCI + pci_unregister_driver(&mt7603_pci_driver); +#endif + platform_driver_unregister(&mt76_wmac_driver); +} + +module_init(mt7603_init); +module_exit(mt7603_exit); diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c new file mode 100644 index 000000000000..4b0713f1fd5e --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c @@ -0,0 +1,483 @@ +/* SPDX-License-Identifier: ISC */ + +#include +#include "mt7603.h" +#include "mcu.h" +#include "eeprom.h" + +#define MCU_SKB_RESERVE 8 + +struct mt7603_fw_trailer { + char fw_ver[10]; + char build_date[15]; + __le32 dl_len; +} __packed; + +static int +__mt7603_mcu_msg_send(struct mt7603_dev *dev, struct sk_buff *skb, int cmd, + int query, int *wait_seq) +{ + int hdrlen = dev->mcu_running ? sizeof(struct mt7603_mcu_txd) : 12; + struct mt76_dev *mdev = &dev->mt76; + struct mt7603_mcu_txd *txd; + u8 seq; + + if (!skb) + return -EINVAL; + + seq = ++mdev->mmio.mcu.msg_seq & 0xf; + if (!seq) + seq = ++mdev->mmio.mcu.msg_seq & 0xf; + + txd = (struct mt7603_mcu_txd *)skb_push(skb, hdrlen); + memset(txd, 0, hdrlen); + + txd->len = cpu_to_le16(skb->len); + if (cmd == -MCU_CMD_FW_SCATTER) + txd->pq_id = cpu_to_le16(MCU_PORT_QUEUE_FW); + else + txd->pq_id = cpu_to_le16(MCU_PORT_QUEUE); + txd->pkt_type = MCU_PKT_ID; + txd->seq = seq; + + if (cmd < 0) { + txd->cid = -cmd; + } else { + txd->cid = MCU_CMD_EXT_CID; + txd->ext_cid = cmd; + if (query != MCU_Q_NA) + txd->ext_cid_ack = 1; + } + + txd->set_query = query; + + if (wait_seq) + *wait_seq = seq; + + return mt76_tx_queue_skb_raw(dev, MT_TXQ_MCU, skb, 0); +} + +static int +mt7603_mcu_msg_send(struct mt7603_dev *dev, struct sk_buff *skb, int cmd, + int query) +{ + struct mt76_dev *mdev = &dev->mt76; + unsigned long expires = jiffies + 3 * HZ; + struct mt7603_mcu_rxd *rxd; + int ret, seq; + + mutex_lock(&mdev->mmio.mcu.mutex); + + ret = __mt7603_mcu_msg_send(dev, skb, cmd, query, &seq); + if (ret) + goto out; + + while (1) { + bool check_seq = false; + + skb = mt76_mcu_get_response(&dev->mt76, expires); + if (!skb) { + dev_err(mdev->dev, + "MCU message %d (seq %d) timed out\n", + cmd, seq); + dev->mcu_hang = MT7603_WATCHDOG_TIMEOUT; + ret = -ETIMEDOUT; + break; + } + + rxd = (struct mt7603_mcu_rxd *)skb->data; + if (seq == rxd->seq) + check_seq = true; + + dev_kfree_skb(skb); + + if (check_seq) + break; + } + +out: + mutex_unlock(&mdev->mmio.mcu.mutex); + + return ret; +} + +static int +mt7603_mcu_init_download(struct mt7603_dev *dev, u32 addr, u32 len) +{ + struct { + __le32 addr; + __le32 len; + __le32 mode; + } req = { + .addr = cpu_to_le32(addr), + .len = cpu_to_le32(len), + .mode = cpu_to_le32(BIT(31)), + }; + struct sk_buff *skb = mt7603_mcu_msg_alloc(&req, sizeof(req)); + + return mt7603_mcu_msg_send(dev, skb, -MCU_CMD_TARGET_ADDRESS_LEN_REQ, + MCU_Q_NA); +} + +static int +mt7603_mcu_send_firmware(struct mt7603_dev *dev, const void *data, int len) +{ + struct sk_buff *skb; + int ret = 0; + + while (len > 0) { + int cur_len = min_t(int, 4096 - sizeof(struct mt7603_mcu_txd), + len); + + skb = mt7603_mcu_msg_alloc(data, cur_len); + if (!skb) + return -ENOMEM; + + ret = __mt7603_mcu_msg_send(dev, skb, -MCU_CMD_FW_SCATTER, + MCU_Q_NA, NULL); + if (ret) + break; + + data += cur_len; + len -= cur_len; + } + + return ret; +} + +static int +mt7603_mcu_start_firmware(struct mt7603_dev *dev, u32 addr) +{ + struct { + __le32 override; + __le32 addr; + } req = { + .override = cpu_to_le32(addr ? 1 : 0), + .addr = cpu_to_le32(addr), + }; + struct sk_buff *skb = mt7603_mcu_msg_alloc(&req, sizeof(req)); + + return mt7603_mcu_msg_send(dev, skb, -MCU_CMD_FW_START_REQ, + MCU_Q_NA); +} + +static int +mt7603_mcu_restart(struct mt7603_dev *dev) +{ + struct sk_buff *skb = mt7603_mcu_msg_alloc(NULL, 0); + + return mt7603_mcu_msg_send(dev, skb, -MCU_CMD_RESTART_DL_REQ, + MCU_Q_NA); +} + +static int +mt7603_load_firmware(struct mt7603_dev *dev) +{ + const struct firmware *fw; + const struct mt7603_fw_trailer *hdr; + const char *firmware; + int dl_len; + u32 addr, val; + int ret; + + if (is_mt7628(dev)) { + if (mt76xx_rev(dev) == MT7628_REV_E1) + firmware = MT7628_FIRMWARE_E1; + else + firmware = MT7628_FIRMWARE_E2; + } else { + if (mt76xx_rev(dev) < MT7603_REV_E2) + firmware = MT7603_FIRMWARE_E1; + else + firmware = MT7603_FIRMWARE_E2; + } + + ret = request_firmware(&fw, firmware, dev->mt76.dev); + if (ret) + return ret; + + if (!fw || !fw->data || fw->size < sizeof(*hdr)) { + dev_err(dev->mt76.dev, "Invalid firmware\n"); + ret = -EINVAL; + goto out; + } + + hdr = (const struct mt7603_fw_trailer *)(fw->data + fw->size - + sizeof(*hdr)); + + dev_info(dev->mt76.dev, "Firmware Version: %.10s\n", hdr->fw_ver); + dev_info(dev->mt76.dev, "Build Time: %.15s\n", hdr->build_date); + + addr = mt7603_reg_map(dev, 0x50012498); + mt76_wr(dev, addr, 0x5); + mt76_wr(dev, addr, 0x5); + udelay(1); + + /* switch to bypass mode */ + mt76_rmw(dev, MT_SCH_4, MT_SCH_4_FORCE_QID, + MT_SCH_4_BYPASS | FIELD_PREP(MT_SCH_4_FORCE_QID, 5)); + + val = mt76_rr(dev, MT_TOP_MISC2); + if (val & BIT(1)) { + dev_info(dev->mt76.dev, "Firmware already running...\n"); + goto running; + } + + if (!mt76_poll_msec(dev, MT_TOP_MISC2, BIT(0) | BIT(1), BIT(0), 500)) { + dev_err(dev->mt76.dev, "Timeout waiting for ROM code to become ready\n"); + ret = -EIO; + goto out; + } + + dl_len = le32_to_cpu(hdr->dl_len) + 4; + ret = mt7603_mcu_init_download(dev, MCU_FIRMWARE_ADDRESS, dl_len); + if (ret) { + dev_err(dev->mt76.dev, "Download request failed\n"); + goto out; + } + + ret = mt7603_mcu_send_firmware(dev, fw->data, dl_len); + if (ret) { + dev_err(dev->mt76.dev, "Failed to send firmware to device\n"); + goto out; + } + + ret = mt7603_mcu_start_firmware(dev, MCU_FIRMWARE_ADDRESS); + if (ret) { + dev_err(dev->mt76.dev, "Failed to start firmware\n"); + goto out; + } + + if (!mt76_poll_msec(dev, MT_TOP_MISC2, BIT(1), BIT(1), 500)) { + dev_err(dev->mt76.dev, "Timeout waiting for firmware to initialize\n"); + ret = -EIO; + goto out; + } + +running: + mt76_clear(dev, MT_SCH_4, MT_SCH_4_FORCE_QID | MT_SCH_4_BYPASS); + + mt76_set(dev, MT_SCH_4, BIT(8)); + mt76_clear(dev, MT_SCH_4, BIT(8)); + + dev->mcu_running = true; + dev_info(dev->mt76.dev, "firmware init done\n"); + +out: + release_firmware(fw); + + return ret; +} + +int mt7603_mcu_init(struct mt7603_dev *dev) +{ + mutex_init(&dev->mt76.mmio.mcu.mutex); + + return mt7603_load_firmware(dev); +} + +void mt7603_mcu_exit(struct mt7603_dev *dev) +{ + mt7603_mcu_restart(dev); + skb_queue_purge(&dev->mt76.mmio.mcu.res_q); +} + +int mt7603_mcu_set_eeprom(struct mt7603_dev *dev) +{ + static const u16 req_fields[] = { +#define WORD(_start) \ + _start, \ + _start + 1 +#define GROUP_2G(_start) \ + WORD(_start), \ + WORD(_start + 2), \ + WORD(_start + 4) + + MT_EE_NIC_CONF_0 + 1, + WORD(MT_EE_NIC_CONF_1), + MT_EE_WIFI_RF_SETTING, + MT_EE_TX_POWER_DELTA_BW40, + MT_EE_TX_POWER_DELTA_BW80 + 1, + MT_EE_TX_POWER_EXT_PA_5G, + MT_EE_TEMP_SENSOR_CAL, + GROUP_2G(MT_EE_TX_POWER_0_START_2G), + GROUP_2G(MT_EE_TX_POWER_1_START_2G), + WORD(MT_EE_TX_POWER_CCK), + WORD(MT_EE_TX_POWER_OFDM_2G_6M), + WORD(MT_EE_TX_POWER_OFDM_2G_24M), + WORD(MT_EE_TX_POWER_OFDM_2G_54M), + WORD(MT_EE_TX_POWER_HT_BPSK_QPSK), + WORD(MT_EE_TX_POWER_HT_16_64_QAM), + WORD(MT_EE_TX_POWER_HT_64_QAM), + MT_EE_ELAN_RX_MODE_GAIN, + MT_EE_ELAN_RX_MODE_NF, + MT_EE_ELAN_RX_MODE_P1DB, + MT_EE_ELAN_BYPASS_MODE_GAIN, + MT_EE_ELAN_BYPASS_MODE_NF, + MT_EE_ELAN_BYPASS_MODE_P1DB, + WORD(MT_EE_STEP_NUM_NEG_6_7), + WORD(MT_EE_STEP_NUM_NEG_4_5), + WORD(MT_EE_STEP_NUM_NEG_2_3), + WORD(MT_EE_STEP_NUM_NEG_0_1), + WORD(MT_EE_REF_STEP_24G), + WORD(MT_EE_STEP_NUM_PLUS_1_2), + WORD(MT_EE_STEP_NUM_PLUS_3_4), + WORD(MT_EE_STEP_NUM_PLUS_5_6), + MT_EE_STEP_NUM_PLUS_7, + MT_EE_XTAL_FREQ_OFFSET, + MT_EE_XTAL_TRIM_2_COMP, + MT_EE_XTAL_TRIM_3_COMP, + MT_EE_XTAL_WF_RFCAL, + + /* unknown fields below */ + WORD(0x24), + 0x34, + 0x39, + 0x3b, + WORD(0x42), + WORD(0x9e), + 0xf2, + WORD(0xf8), + 0xfa, + 0x12e, + WORD(0x130), WORD(0x132), WORD(0x134), WORD(0x136), + WORD(0x138), WORD(0x13a), WORD(0x13c), WORD(0x13e), + +#undef GROUP_2G +#undef WORD + + }; + struct req_data { + u16 addr; + u8 val; + u8 pad; + } __packed; + struct { + u8 buffer_mode; + u8 len; + u8 pad[2]; + } req_hdr = { + .buffer_mode = 1, + .len = ARRAY_SIZE(req_fields) - 1, + }; + struct sk_buff *skb; + struct req_data *data; + const int size = 0xff * sizeof(struct req_data); + u8 *eep = (u8 *)dev->mt76.eeprom.data; + int i; + + BUILD_BUG_ON(ARRAY_SIZE(req_fields) * sizeof(*data) > size); + + skb = mt7603_mcu_msg_alloc(NULL, size + sizeof(req_hdr)); + memcpy(skb_put(skb, sizeof(req_hdr)), &req_hdr, sizeof(req_hdr)); + data = (struct req_data *)skb_put(skb, size); + memset(data, 0, size); + + for (i = 0; i < ARRAY_SIZE(req_fields); i++) { + data[i].addr = cpu_to_le16(req_fields[i]); + data[i].val = eep[req_fields[i]]; + data[i].pad = 0; + } + + return mt7603_mcu_msg_send(dev, skb, MCU_EXT_CMD_EFUSE_BUFFER_MODE, + MCU_Q_SET); +} + +static int mt7603_mcu_set_tx_power(struct mt7603_dev *dev) +{ + struct { + u8 center_channel; + u8 tssi; + u8 temp_comp; + u8 target_power[2]; + u8 rate_power_delta[14]; + u8 bw_power_delta; + u8 ch_power_delta[6]; + u8 temp_comp_power[17]; + u8 reserved; + } req = { + .center_channel = dev->mt76.chandef.chan->hw_value, +#define EEP_VAL(n) ((u8 *)dev->mt76.eeprom.data)[n] + .tssi = EEP_VAL(MT_EE_NIC_CONF_1 + 1), + .temp_comp = EEP_VAL(MT_EE_NIC_CONF_1), + .target_power = { + EEP_VAL(MT_EE_TX_POWER_0_START_2G + 2), + EEP_VAL(MT_EE_TX_POWER_1_START_2G + 2) + }, + .bw_power_delta = EEP_VAL(MT_EE_TX_POWER_DELTA_BW40), + .ch_power_delta = { + EEP_VAL(MT_EE_TX_POWER_0_START_2G + 3), + EEP_VAL(MT_EE_TX_POWER_0_START_2G + 4), + EEP_VAL(MT_EE_TX_POWER_0_START_2G + 5), + EEP_VAL(MT_EE_TX_POWER_1_START_2G + 3), + EEP_VAL(MT_EE_TX_POWER_1_START_2G + 4), + EEP_VAL(MT_EE_TX_POWER_1_START_2G + 5) + }, +#undef EEP_VAL + }; + struct sk_buff *skb; + u8 *eep = (u8 *)dev->mt76.eeprom.data; + + memcpy(req.rate_power_delta, eep + MT_EE_TX_POWER_CCK, + sizeof(req.rate_power_delta)); + + memcpy(req.temp_comp_power, eep + MT_EE_STEP_NUM_NEG_6_7, + sizeof(req.temp_comp_power)); + + skb = mt7603_mcu_msg_alloc(&req, sizeof(req)); + return mt7603_mcu_msg_send(dev, skb, MCU_EXT_CMD_SET_TX_POWER_CTRL, + MCU_Q_SET); +} + +int mt7603_mcu_set_channel(struct mt7603_dev *dev) +{ + struct cfg80211_chan_def *chandef = &dev->mt76.chandef; + struct ieee80211_hw *hw = mt76_hw(dev); + int n_chains = __sw_hweight8(dev->mt76.antenna_mask); + struct { + u8 control_chan; + u8 center_chan; + u8 bw; + u8 tx_streams; + u8 rx_streams; + u8 _res0[7]; + u8 txpower[21]; + u8 _res1[3]; + } req = { + .control_chan = chandef->chan->hw_value, + .center_chan = chandef->chan->hw_value, + .bw = MT_BW_20, + .tx_streams = n_chains, + .rx_streams = n_chains, + }; + struct sk_buff *skb; + s8 tx_power; + int ret; + int i; + + if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_40) { + req.bw = MT_BW_40; + if (chandef->center_freq1 > chandef->chan->center_freq) + req.center_chan += 2; + else + req.center_chan -= 2; + } + + tx_power = hw->conf.power_level * 2; + if (dev->mt76.antenna_mask == 3) + tx_power -= 6; + tx_power = min(tx_power, dev->tx_power_limit); + + dev->mt76.txpower_cur = tx_power; + + for (i = 0; i < ARRAY_SIZE(req.txpower); i++) + req.txpower[i] = tx_power; + + skb = mt7603_mcu_msg_alloc(&req, sizeof(req)); + ret = mt7603_mcu_msg_send(dev, skb, MCU_EXT_CMD_CHANNEL_SWITCH, + MCU_Q_SET); + if (ret) + return ret; + + return mt7603_mcu_set_tx_power(dev); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.h new file mode 100644 index 000000000000..1bba369d5c8a --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: ISC */ + +#ifndef __MT7603_MCU_H +#define __MT7603_MCU_H + +struct mt7603_mcu_txd { + __le16 len; + __le16 pq_id; + + u8 cid; + u8 pkt_type; + u8 set_query; + u8 seq; + + u8 uc_d2b0_rev; + u8 ext_cid; + u8 uc_d2b2_rev; + u8 ext_cid_ack; + + u32 au4_d3_to_d7_rev[5]; +} __packed __aligned(4); + +struct mt7603_mcu_rxd { + __le16 len; + __le16 pkt_type_id; + + u8 eid; + u8 seq; + __le16 __rsv; + + u8 ext_eid; + u8 __rsv1[3]; +}; + +#define MCU_PKT_ID 0xa0 +#define MCU_PORT_QUEUE 0x8000 +#define MCU_PORT_QUEUE_FW 0xc000 + +#define MCU_FIRMWARE_ADDRESS 0x100000 + +enum { + MCU_Q_QUERY, + MCU_Q_SET, + MCU_Q_RESERVED, + MCU_Q_NA +}; + +enum { + MCU_CMD_TARGET_ADDRESS_LEN_REQ = 0x01, + MCU_CMD_FW_START_REQ = 0x02, + MCU_CMD_INIT_ACCESS_REG = 0x3, + MCU_CMD_PATCH_START_REQ = 0x05, + MCU_CMD_PATCH_FINISH_REQ = 0x07, + MCU_CMD_PATCH_SEM_CONTROL = 0x10, + MCU_CMD_HIF_LOOPBACK = 0x20, + MCU_CMD_CH_PRIVILEGE = 0x20, + MCU_CMD_ACCESS_REG = 0xC2, + MCU_CMD_EXT_CID = 0xED, + MCU_CMD_FW_SCATTER = 0xEE, + MCU_CMD_RESTART_DL_REQ = 0xEF, +}; + +enum { + MCU_EXT_CMD_RF_REG_ACCESS = 0x02, + MCU_EXT_CMD_RF_TEST = 0x04, + MCU_EXT_CMD_RADIO_ON_OFF_CTRL = 0x05, + MCU_EXT_CMD_WIFI_RX_DISABLE = 0x06, + MCU_EXT_CMD_PM_STATE_CTRL = 0x07, + MCU_EXT_CMD_CHANNEL_SWITCH = 0x08, + MCU_EXT_CMD_NIC_CAPABILITY = 0x09, + MCU_EXT_CMD_PWR_SAVING = 0x0A, + MCU_EXT_CMD_MULTIPLE_REG_ACCESS = 0x0E, + MCU_EXT_CMD_AP_PWR_SAVING_CAPABILITY = 0xF, + MCU_EXT_CMD_SEC_ADDREMOVE_KEY = 0x10, + MCU_EXT_CMD_SET_TX_POWER_CTRL = 0x11, + MCU_EXT_CMD_FW_LOG_2_HOST = 0x13, + MCU_EXT_CMD_PS_RETRIEVE_START = 0x14, + MCU_EXT_CMD_LED_CTRL = 0x17, + MCU_EXT_CMD_PACKET_FILTER = 0x18, + MCU_EXT_CMD_PWR_MGT_BIT_WIFI = 0x1B, + MCU_EXT_CMD_EFUSE_BUFFER_MODE = 0x21, + MCU_EXT_CMD_THERMAL_PROTECT = 0x23, + MCU_EXT_CMD_EDCA_SET = 0x27, + MCU_EXT_CMD_SLOT_TIME_SET = 0x28, + MCU_EXT_CMD_CONFIG_INTERNAL_SETTING = 0x29, + MCU_EXT_CMD_NOA_OFFLOAD_CTRL = 0x2B, + MCU_EXT_CMD_GET_THEMAL_SENSOR = 0x2C, + MCU_EXT_CMD_WAKEUP_OPTION = 0x2E, + MCU_EXT_CMD_AC_QUEUE_CONTROL = 0x31, + MCU_EXT_CMD_BCN_UPDATE = 0x33 +}; + +enum { + MCU_EXT_EVENT_CMD_RESULT = 0x0, + MCU_EXT_EVENT_RF_REG_ACCESS = 0x2, + MCU_EXT_EVENT_MULTI_CR_ACCESS = 0x0E, + MCU_EXT_EVENT_FW_LOG_2_HOST = 0x13, + MCU_EXT_EVENT_BEACON_LOSS = 0x1A, + MCU_EXT_EVENT_THERMAL_PROTECT = 0x22, + MCU_EXT_EVENT_BCN_UPDATE = 0x31, +}; + +static inline struct sk_buff * +mt7603_mcu_msg_alloc(const void *data, int len) +{ + return mt76_mcu_msg_alloc(data, sizeof(struct mt7603_mcu_txd), + len, 0); +} + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h new file mode 100644 index 000000000000..79f332429432 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h @@ -0,0 +1,253 @@ +/* SPDX-License-Identifier: ISC */ + +#ifndef __MT7603_H +#define __MT7603_H + +#include +#include +#include "../mt76.h" +#include "regs.h" + +#define MT7603_MAX_INTERFACES 4 +#define MT7603_WTBL_SIZE 128 +#define MT7603_WTBL_RESERVED (MT7603_WTBL_SIZE - 1) +#define MT7603_WTBL_STA (MT7603_WTBL_RESERVED - MT7603_MAX_INTERFACES) + +#define MT7603_RATE_RETRY 2 + +#define MT7603_RX_RING_SIZE 128 + +#define MT7603_FIRMWARE_E1 "mt7603_e1.bin" +#define MT7603_FIRMWARE_E2 "mt7603_e2.bin" +#define MT7628_FIRMWARE_E1 "mt7628_e1.bin" +#define MT7628_FIRMWARE_E2 "mt7628_e2.bin" + +#define MT7603_EEPROM_SIZE 1024 + +#define MT_AGG_SIZE_LIMIT(_n) (((_n) + 1) * 4) + +#define MT7603_PRE_TBTT_TIME 5000 /* ms */ + +#define MT7603_WATCHDOG_TIME 100 /* ms */ +#define MT7603_WATCHDOG_TIMEOUT 10 /* number of checks */ + +#define MT7603_EDCCA_BLOCK_TH 10 + +#define MT7603_CFEND_RATE_DEFAULT 0x69 /* chip default (24M) */ +#define MT7603_CFEND_RATE_11B 0x03 /* 11B LP, 11M */ + +struct mt7603_vif; +struct mt7603_sta; + +enum { + MT7603_REV_E1 = 0x00, + MT7603_REV_E2 = 0x10, + MT7628_REV_E1 = 0x8a00, +}; + +enum mt7603_bw { + MT_BW_20, + MT_BW_40, + MT_BW_80, +}; + +struct mt7603_sta { + struct mt76_wcid wcid; /* must be first */ + + struct mt7603_vif *vif; + + struct sk_buff_head psq; + + struct ieee80211_tx_rate rates[8]; + u8 rate_count; + u8 n_rates; + + u8 rate_probe; + u8 smps; + + u8 ps; +}; + +struct mt7603_vif { + struct mt7603_sta sta; /* must be first */ + + u8 idx; +}; + +enum mt7603_reset_cause { + RESET_CAUSE_TX_HANG, + RESET_CAUSE_TX_BUSY, + RESET_CAUSE_RX_BUSY, + RESET_CAUSE_BEACON_STUCK, + RESET_CAUSE_RX_PSE_BUSY, + RESET_CAUSE_MCU_HANG, + RESET_CAUSE_RESET_FAILED, + __RESET_CAUSE_MAX +}; + +struct mt7603_dev { + struct mt76_dev mt76; /* must be first */ + + const struct mt76_bus_ops *bus_ops; + + u32 rxfilter; + + u8 vif_mask; + + struct mt7603_sta global_sta; + + u32 agc0, agc3; + u32 false_cca_ofdm, false_cca_cck; + unsigned long last_cca_adj; + + u8 rssi_offset[3]; + + u8 slottime; + s16 coverage_class; + + s8 tx_power_limit; + + ktime_t survey_time; + ktime_t ed_time; + int beacon_int; + + struct mt76_queue q_rx; + + spinlock_t ps_lock; + + u8 mac_work_count; + + u8 mcu_running; + u8 ed_monitor; + + s8 ed_trigger; + u8 ed_strict_mode; + u8 ed_strong_signal; + + s8 sensitivity; + + u8 beacon_mask; + + u8 beacon_check; + u8 tx_hang_check; + u8 tx_dma_check; + u8 rx_dma_check; + u8 rx_pse_check; + u8 mcu_hang; + + enum mt7603_reset_cause cur_reset_cause; + + u16 tx_dma_idx[4]; + u16 rx_dma_idx; + + u32 reset_test; + + unsigned int reset_cause[__RESET_CAUSE_MAX]; + + struct delayed_work mac_work; + struct tasklet_struct tx_tasklet; + struct tasklet_struct pre_tbtt_tasklet; +}; + +extern const struct mt76_driver_ops mt7603_drv_ops; +extern const struct ieee80211_ops mt7603_ops; +extern struct pci_driver mt7603_pci_driver; +extern struct platform_driver mt76_wmac_driver; + +static inline bool is_mt7603(struct mt7603_dev *dev) +{ + return mt76xx_chip(dev) == 0x7603; +} + +static inline bool is_mt7628(struct mt7603_dev *dev) +{ + return mt76xx_chip(dev) == 0x7628; +} + +/* need offset to prevent conflict with ampdu_ack_len */ +#define MT_RATE_DRIVER_DATA_OFFSET 4 + +u32 mt7603_reg_map(struct mt7603_dev *dev, u32 addr); + +irqreturn_t mt7603_irq_handler(int irq, void *dev_instance); + +int mt7603_register_device(struct mt7603_dev *dev); +void mt7603_unregister_device(struct mt7603_dev *dev); +int mt7603_eeprom_init(struct mt7603_dev *dev); +int mt7603_dma_init(struct mt7603_dev *dev); +void mt7603_dma_cleanup(struct mt7603_dev *dev); +int mt7603_mcu_init(struct mt7603_dev *dev); +void mt7603_init_debugfs(struct mt7603_dev *dev); + +void mt7603_set_irq_mask(struct mt7603_dev *dev, u32 clear, u32 set); + +static inline void mt7603_irq_enable(struct mt7603_dev *dev, u32 mask) +{ + mt7603_set_irq_mask(dev, 0, mask); +} + +static inline void mt7603_irq_disable(struct mt7603_dev *dev, u32 mask) +{ + mt7603_set_irq_mask(dev, mask, 0); +} + +void mt7603_mac_dma_start(struct mt7603_dev *dev); +void mt7603_mac_start(struct mt7603_dev *dev); +void mt7603_mac_stop(struct mt7603_dev *dev); +void mt7603_mac_work(struct work_struct *work); +void mt7603_mac_set_timing(struct mt7603_dev *dev); +void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval); +int mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb); +void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data); +void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid); +void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn, + int ba_size); + +void mt7603_pse_client_reset(struct mt7603_dev *dev); + +int mt7603_mcu_set_channel(struct mt7603_dev *dev); +int mt7603_mcu_set_eeprom(struct mt7603_dev *dev); +void mt7603_mcu_exit(struct mt7603_dev *dev); + +void mt7603_wtbl_init(struct mt7603_dev *dev, int idx, int vif, + const u8 *mac_addr); +void mt7603_wtbl_clear(struct mt7603_dev *dev, int idx); +void mt7603_wtbl_update_cap(struct mt7603_dev *dev, struct ieee80211_sta *sta); +void mt7603_wtbl_set_rates(struct mt7603_dev *dev, struct mt7603_sta *sta, + struct ieee80211_tx_rate *probe_rate, + struct ieee80211_tx_rate *rates); +int mt7603_wtbl_set_key(struct mt7603_dev *dev, int wcid, + struct ieee80211_key_conf *key); +void mt7603_wtbl_set_ps(struct mt7603_dev *dev, struct mt7603_sta *sta, + bool enabled); +void mt7603_wtbl_set_smps(struct mt7603_dev *dev, struct mt7603_sta *sta, + bool enabled); +void mt7603_filter_tx(struct mt7603_dev *dev, int idx, bool abort); + +int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, + struct sk_buff *skb, struct mt76_queue *q, + struct mt76_wcid *wcid, struct ieee80211_sta *sta, + u32 *tx_info); + +void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q, + struct mt76_queue_entry *e, bool flush); + +void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, + struct sk_buff *skb); +void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q); +void mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps); +int mt7603_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +void mt7603_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +void mt7603_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); + +void mt7603_pre_tbtt_tasklet(unsigned long arg); + +void mt7603_update_channel(struct mt76_dev *mdev); + +void mt7603_edcca_set_strict(struct mt7603_dev *dev, bool val); +void mt7603_cca_stats_reset(struct mt7603_dev *dev); + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/pci.c b/drivers/net/wireless/mediatek/mt76/mt7603/pci.c new file mode 100644 index 000000000000..4acdbf5d8968 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7603/pci.c @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: ISC */ + +#include +#include +#include + +#include "mt7603.h" + +static const struct pci_device_id mt76pci_device_table[] = { + { PCI_DEVICE(0x14c3, 0x7603) }, + { }, +}; + +static int +mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct mt7603_dev *dev; + struct mt76_dev *mdev; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev)); + if (ret) + return ret; + + pci_set_master(pdev); + + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (ret) + return ret; + + mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7603_ops, + &mt7603_drv_ops); + if (!mdev) + return -ENOMEM; + + dev = container_of(mdev, struct mt7603_dev, mt76); + mt76_mmio_init(mdev, pcim_iomap_table(pdev)[0]); + + mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) | + (mt76_rr(dev, MT_HW_REV) & 0xff); + dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev); + + ret = devm_request_irq(mdev->dev, pdev->irq, mt7603_irq_handler, + IRQF_SHARED, KBUILD_MODNAME, dev); + if (ret) + goto error; + + ret = mt7603_register_device(dev); + if (ret) + goto error; + + return 0; +error: + ieee80211_free_hw(mt76_hw(dev)); + return ret; +} + +static void +mt76pci_remove(struct pci_dev *pdev) +{ + struct mt76_dev *mdev = pci_get_drvdata(pdev); + struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); + + mt7603_unregister_device(dev); +} + +MODULE_DEVICE_TABLE(pci, mt76pci_device_table); +MODULE_FIRMWARE(MT7603_FIRMWARE_E1); +MODULE_FIRMWARE(MT7603_FIRMWARE_E2); + +struct pci_driver mt7603_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = mt76pci_device_table, + .probe = mt76pci_probe, + .remove = mt76pci_remove, +}; diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h new file mode 100644 index 000000000000..da6827ae6cee --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h @@ -0,0 +1,774 @@ +/* SPDX-License-Identifier: ISC */ + +#ifndef __MT7603_REGS_H +#define __MT7603_REGS_H + +#define MT_HW_REV 0x1000 +#define MT_HW_CHIPID 0x1008 +#define MT_TOP_MISC2 0x1134 + +#define MT_MCU_BASE 0x2000 +#define MT_MCU(ofs) (MT_MCU_BASE + (ofs)) + +#define MT_MCU_PCIE_REMAP_1 MT_MCU(0x500) +#define MT_MCU_PCIE_REMAP_1_OFFSET GENMASK(17, 0) +#define MT_MCU_PCIE_REMAP_1_BASE GENMASK(31, 18) + +#define MT_MCU_PCIE_REMAP_2 MT_MCU(0x504) +#define MT_MCU_PCIE_REMAP_2_OFFSET GENMASK(18, 0) +#define MT_MCU_PCIE_REMAP_2_BASE GENMASK(31, 19) + +#define MT_HIF_BASE 0x4000 +#define MT_HIF(ofs) (MT_HIF_BASE + (ofs)) + +#define MT_INT_SOURCE_CSR MT_HIF(0x200) +#define MT_INT_MASK_CSR MT_HIF(0x204) +#define MT_DELAY_INT_CFG MT_HIF(0x210) + +#define MT_INT_RX_DONE(_n) BIT(_n) +#define MT_INT_RX_DONE_ALL GENMASK(1, 0) +#define MT_INT_TX_DONE_ALL GENMASK(19, 4) +#define MT_INT_TX_DONE(_n) BIT((_n) + 4) + +#define MT_INT_RX_COHERENT BIT(20) +#define MT_INT_TX_COHERENT BIT(21) +#define MT_INT_MAC_IRQ3 BIT(27) + +#define MT_INT_MCU_CMD BIT(30) + +#define MT_WPDMA_GLO_CFG MT_HIF(0x208) +#define MT_WPDMA_GLO_CFG_TX_DMA_EN BIT(0) +#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY BIT(1) +#define MT_WPDMA_GLO_CFG_RX_DMA_EN BIT(2) +#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY BIT(3) +#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE GENMASK(5, 4) +#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE BIT(6) +#define MT_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7) +#define MT_WPDMA_GLO_CFG_HDR_SEG_LEN GENMASK(15, 8) +#define MT_WPDMA_GLO_CFG_SW_RESET BIT(24) +#define MT_WPDMA_GLO_CFG_FORCE_TX_EOF BIT(25) +#define MT_WPDMA_GLO_CFG_CLK_GATE_DIS BIT(30) +#define MT_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31) + +#define MT_WPDMA_RST_IDX MT_HIF(0x20c) + +#define MT_WPDMA_DEBUG MT_HIF(0x244) +#define MT_WPDMA_DEBUG_VALUE GENMASK(17, 0) +#define MT_WPDMA_DEBUG_SEL BIT(27) +#define MT_WPDMA_DEBUG_IDX GENMASK(31, 28) + +#define MT_TX_RING_BASE MT_HIF(0x300) +#define MT_RX_RING_BASE MT_HIF(0x400) + +#define MT_TXTIME_THRESH_BASE MT_HIF(0x500) +#define MT_TXTIME_THRESH(n) (MT_TXTIME_THRESH_BASE + ((n) * 4)) + +#define MT_PAGE_COUNT_BASE MT_HIF(0x540) +#define MT_PAGE_COUNT(n) (MT_PAGE_COUNT_BASE + ((n) * 4)) + +#define MT_SCH_1 MT_HIF(0x588) +#define MT_SCH_2 MT_HIF(0x58c) +#define MT_SCH_3 MT_HIF(0x590) + +#define MT_SCH_4 MT_HIF(0x594) +#define MT_SCH_4_FORCE_QID GENMASK(4, 0) +#define MT_SCH_4_BYPASS BIT(5) +#define MT_SCH_4_RESET BIT(8) + +#define MT_GROUP_THRESH_BASE MT_HIF(0x598) +#define MT_GROUP_THRESH(n) (MT_GROUP_THRESH_BASE + ((n) * 4)) + +#define MT_QUEUE_PRIORITY_1 MT_HIF(0x580) +#define MT_QUEUE_PRIORITY_2 MT_HIF(0x584) + +#define MT_BMAP_0 MT_HIF(0x5b0) +#define MT_BMAP_1 MT_HIF(0x5b4) +#define MT_BMAP_2 MT_HIF(0x5b8) + +#define MT_HIGH_PRIORITY_1 MT_HIF(0x5bc) +#define MT_HIGH_PRIORITY_2 MT_HIF(0x5c0) + +#define MT_PRIORITY_MASK MT_HIF(0x5c4) + +#define MT_RSV_MAX_THRESH MT_HIF(0x5c8) + +#define MT_PSE_BASE 0x8000 +#define MT_PSE(ofs) (MT_PSE_BASE + (ofs)) + +#define MT_MCU_DEBUG_RESET MT_PSE(0x16c) +#define MT_MCU_DEBUG_RESET_PSE BIT(0) +#define MT_MCU_DEBUG_RESET_PSE_S BIT(1) +#define MT_MCU_DEBUG_RESET_QUEUES GENMASK(6, 2) + +#define MT_PSE_FC_P0 MT_PSE(0x120) +#define MT_PSE_FC_P0_MIN_RESERVE GENMASK(11, 0) +#define MT_PSE_FC_P0_MAX_QUOTA GENMASK(27, 16) + +#define MT_PSE_FRP MT_PSE(0x138) +#define MT_PSE_FRP_P0 GENMASK(2, 0) +#define MT_PSE_FRP_P1 GENMASK(5, 3) +#define MT_PSE_FRP_P2_RQ0 GENMASK(8, 6) +#define MT_PSE_FRP_P2_RQ1 GENMASK(11, 9) +#define MT_PSE_FRP_P2_RQ2 GENMASK(14, 12) + +#define MT_FC_RSV_COUNT_0 MT_PSE(0x13c) +#define MT_FC_RSV_COUNT_0_P0 GENMASK(11, 0) +#define MT_FC_RSV_COUNT_0_P1 GENMASK(27, 16) + +#define MT_FC_SP2_Q0Q1 MT_PSE(0x14c) +#define MT_FC_SP2_Q0Q1_SRC_COUNT_Q0 GENMASK(11, 0) +#define MT_FC_SP2_Q0Q1_SRC_COUNT_Q1 GENMASK(27, 16) + +#define MT_PSE_FW_SHARED MT_PSE(0x17c) + +#define MT_PSE_RTA MT_PSE(0x194) +#define MT_PSE_RTA_QUEUE_ID GENMASK(4, 0) +#define MT_PSE_RTA_PORT_ID GENMASK(6, 5) +#define MT_PSE_RTA_REDIRECT_EN BIT(7) +#define MT_PSE_RTA_TAG_ID GENMASK(15, 8) +#define MT_PSE_RTA_WRITE BIT(16) +#define MT_PSE_RTA_BUSY BIT(31) + +#define MT_WF_PHY_BASE 0x10000 +#define MT_WF_PHY_OFFSET 0x1000 +#define MT_WF_PHY(ofs) (MT_WF_PHY_BASE + (ofs)) + +#define MT_AGC_BASE MT_WF_PHY(0x500) +#define MT_AGC(n) (MT_AGC_BASE + ((n) * 4)) + +#define MT_AGC1_BASE MT_WF_PHY(0x1500) +#define MT_AGC1(n) (MT_AGC1_BASE + ((n) * 4)) + +#define MT_AGC_41_RSSI_0 GENMASK(23, 16) +#define MT_AGC_41_RSSI_1 GENMASK(7, 0) + +#define MT_RXTD_BASE MT_WF_PHY(0x600) +#define MT_RXTD(n) (MT_RXTD_BASE + ((n) * 4)) + +#define MT_RXTD_6_ACI_TH GENMASK(4, 0) +#define MT_RXTD_6_CCAED_TH GENMASK(14, 8) + +#define MT_RXTD_8_LOWER_SIGNAL GENMASK(5, 0) + +#define MT_RXTD_13_ACI_TH_EN BIT(0) + +#define MT_WF_PHY_CR_TSSI_BASE MT_WF_PHY(0xd00) +#define MT_WF_PHY_CR_TSSI(phy, n) (MT_WF_PHY_CR_TSSI_BASE + \ + ((phy) * MT_WF_PHY_OFFSET) + \ + ((n) * 4)) + +#define MT_PHYCTRL_BASE MT_WF_PHY(0x4100) +#define MT_PHYCTRL(n) (MT_PHYCTRL_BASE + ((n) * 4)) + +#define MT_PHYCTRL_2_STATUS_RESET BIT(6) +#define MT_PHYCTRL_2_STATUS_EN BIT(7) + +#define MT_PHYCTRL_STAT_PD MT_PHYCTRL(3) +#define MT_PHYCTRL_STAT_PD_OFDM GENMASK(31, 16) +#define MT_PHYCTRL_STAT_PD_CCK GENMASK(15, 0) + +#define MT_PHYCTRL_STAT_MDRDY MT_PHYCTRL(8) +#define MT_PHYCTRL_STAT_MDRDY_OFDM GENMASK(31, 16) +#define MT_PHYCTRL_STAT_MDRDY_CCK GENMASK(15, 0) + +#define MT_WF_AGG_BASE 0x21200 +#define MT_WF_AGG(ofs) (MT_WF_AGG_BASE + (ofs)) + +#define MT_AGG_ARCR MT_WF_AGG(0x010) +#define MT_AGG_ARCR_INIT_RATE1 BIT(0) +#define MT_AGG_ARCR_FB_SGI_DISABLE BIT(1) +#define MT_AGG_ARCR_RATE8_DOWN_WRAP BIT(2) +#define MT_AGG_ARCR_RTS_RATE_THR GENMASK(12, 8) +#define MT_AGG_ARCR_RATE_DOWN_RATIO GENMASK(17, 16) +#define MT_AGG_ARCR_RATE_DOWN_RATIO_EN BIT(19) +#define MT_AGG_ARCR_RATE_UP_EXTRA_TH GENMASK(22, 20) +#define MT_AGG_ARCR_SPE_DIS_TH GENMASK(27, 24) + +#define MT_AGG_ARUCR MT_WF_AGG(0x014) +#define MT_AGG_ARDCR MT_WF_AGG(0x018) +#define MT_AGG_ARxCR_LIMIT_SHIFT(_n) (4 * (_n)) +#define MT_AGG_ARxCR_LIMIT(_n) GENMASK(2 + \ + MT_AGG_ARxCR_LIMIT_SHIFT(_n), \ + MT_AGG_ARxCR_LIMIT_SHIFT(_n)) + +#define MT_AGG_LIMIT MT_WF_AGG(0x040) +#define MT_AGG_LIMIT_1 MT_WF_AGG(0x044) +#define MT_AGG_LIMIT_AC(_n) GENMASK(((_n) + 1) * 8 - 1, (_n) * 8) + +#define MT_AGG_BA_SIZE_LIMIT_0 MT_WF_AGG(0x048) +#define MT_AGG_BA_SIZE_LIMIT_1 MT_WF_AGG(0x04c) +#define MT_AGG_BA_SIZE_LIMIT_SHIFT 8 + +#define MT_AGG_PCR MT_WF_AGG(0x050) +#define MT_AGG_PCR_MM BIT(16) +#define MT_AGG_PCR_GF BIT(17) +#define MT_AGG_PCR_BW40 BIT(18) +#define MT_AGG_PCR_RIFS BIT(19) +#define MT_AGG_PCR_BW80 BIT(20) +#define MT_AGG_PCR_BW160 BIT(21) +#define MT_AGG_PCR_ERP BIT(22) + +#define MT_AGG_PCR_RTS MT_WF_AGG(0x054) +#define MT_AGG_PCR_RTS_THR GENMASK(19, 0) +#define MT_AGG_PCR_RTS_PKT_THR GENMASK(31, 25) + +#define MT_AGG_CONTROL MT_WF_AGG(0x070) +#define MT_AGG_CONTROL_NO_BA_RULE BIT(0) +#define MT_AGG_CONTROL_NO_BA_AR_RULE BIT(1) +#define MT_AGG_CONTROL_CFEND_SPE_EN BIT(3) +#define MT_AGG_CONTROL_CFEND_RATE GENMASK(15, 4) +#define MT_AGG_CONTROL_BAR_SPE_EN BIT(19) +#define MT_AGG_CONTROL_BAR_RATE GENMASK(31, 20) + +#define MT_AGG_TMP MT_WF_AGG(0x0d8) + +#define MT_AGG_BWCR MT_WF_AGG(0x0ec) +#define MT_AGG_BWCR_BW GENMASK(3, 2) + +#define MT_AGG_RETRY_CONTROL MT_WF_AGG(0x0f4) +#define MT_AGG_RETRY_CONTROL_RTS_LIMIT GENMASK(11, 7) +#define MT_AGG_RETRY_CONTROL_BAR_LIMIT GENMASK(15, 12) + +#define MT_WF_DMA_BASE 0x21c00 +#define MT_WF_DMA(ofs) (MT_WF_DMA_BASE + (ofs)) + +#define MT_DMA_DCR0 MT_WF_DMA(0x000) +#define MT_DMA_DCR1 MT_WF_DMA(0x004) + +#define MT_DMA_FQCR0 MT_WF_DMA(0x008) +#define MT_DMA_FQCR0_TARGET_WCID GENMASK(7, 0) +#define MT_DMA_FQCR0_TARGET_BSS GENMASK(13, 8) +#define MT_DMA_FQCR0_TARGET_QID GENMASK(20, 16) +#define MT_DMA_FQCR0_DEST_PORT_ID GENMASK(23, 22) +#define MT_DMA_FQCR0_DEST_QUEUE_ID GENMASK(28, 24) +#define MT_DMA_FQCR0_MODE BIT(29) +#define MT_DMA_FQCR0_STATUS BIT(30) +#define MT_DMA_FQCR0_BUSY BIT(31) + +#define MT_DMA_RCFR0 MT_WF_DMA(0x070) +#define MT_DMA_VCFR0 MT_WF_DMA(0x07c) + +#define MT_DMA_TCFR0 MT_WF_DMA(0x080) +#define MT_DMA_TCFR1 MT_WF_DMA(0x084) +#define MT_DMA_TCFR_TXS_AGGR_TIMEOUT GENMASK(27, 16) +#define MT_DMA_TCFR_TXS_QUEUE BIT(14) +#define MT_DMA_TCFR_TXS_AGGR_COUNT GENMASK(12, 8) +#define MT_DMA_TCFR_TXS_BIT_MAP GENMASK(6, 0) + +#define MT_DMA_TMCFR0 MT_WF_DMA(0x088) + +#define MT_WF_ARB_BASE 0x21400 +#define MT_WF_ARB(ofs) (MT_WF_ARB_BASE + (ofs)) + +#define MT_WMM_AIFSN MT_WF_ARB(0x020) +#define MT_WMM_AIFSN_MASK GENMASK(3, 0) +#define MT_WMM_AIFSN_SHIFT(_n) ((_n) * 4) + +#define MT_WMM_CWMAX_BASE MT_WF_ARB(0x028) +#define MT_WMM_CWMAX(_n) (MT_WMM_CWMAX_BASE + (((_n) / 2) << 2)) +#define MT_WMM_CWMAX_SHIFT(_n) (((_n) & 1) * 16) +#define MT_WMM_CWMAX_MASK GENMASK(15, 0) + +#define MT_WMM_CWMIN MT_WF_ARB(0x040) +#define MT_WMM_CWMIN_MASK GENMASK(7, 0) +#define MT_WMM_CWMIN_SHIFT(_n) ((_n) * 8) + +#define MT_WF_ARB_RQCR MT_WF_ARB(0x070) +#define MT_WF_ARB_RQCR_RX_START BIT(0) +#define MT_WF_ARB_RQCR_RXV_START BIT(4) +#define MT_WF_ARB_RQCR_RXV_R_EN BIT(7) +#define MT_WF_ARB_RQCR_RXV_T_EN BIT(8) + +#define MT_ARB_SCR MT_WF_ARB(0x080) +#define MT_ARB_SCR_BCNQ_OPMODE_MASK GENMASK(1, 0) +#define MT_ARB_SCR_BCNQ_OPMODE_SHIFT(n) ((n) * 2) +#define MT_ARB_SCR_TX_DISABLE BIT(8) +#define MT_ARB_SCR_RX_DISABLE BIT(9) +#define MT_ARB_SCR_BCNQ_EMPTY_SKIP BIT(28) +#define MT_ARB_SCR_TTTT_BTIM_PRIO BIT(29) +#define MT_ARB_SCR_TBTT_BCN_PRIO BIT(30) +#define MT_ARB_SCR_TBTT_BCAST_PRIO BIT(31) + +enum { + MT_BCNQ_OPMODE_STA = 0, + MT_BCNQ_OPMODE_AP = 1, + MT_BCNQ_OPMODE_ADHOC = 2, +}; + +#define MT_WF_ARB_TX_START_0 MT_WF_ARB(0x100) +#define MT_WF_ARB_TX_START_1 MT_WF_ARB(0x104) +#define MT_WF_ARB_TX_FLUSH_0 MT_WF_ARB(0x108) +#define MT_WF_ARB_TX_FLUSH_1 MT_WF_ARB(0x10c) +#define MT_WF_ARB_TX_STOP_0 MT_WF_ARB(0x110) +#define MT_WF_ARB_TX_STOP_1 MT_WF_ARB(0x114) + +#define MT_WF_ARB_BCN_START MT_WF_ARB(0x118) +#define MT_WF_ARB_BCN_START_BSSn(n) BIT(0 + (n)) +#define MT_WF_ARB_BCN_START_T_PRE_TTTT BIT(10) +#define MT_WF_ARB_BCN_START_T_TTTT BIT(11) +#define MT_WF_ARB_BCN_START_T_PRE_TBTT BIT(12) +#define MT_WF_ARB_BCN_START_T_TBTT BIT(13) +#define MT_WF_ARB_BCN_START_T_SLOT_IDLE BIT(14) +#define MT_WF_ARB_BCN_START_T_TX_START BIT(15) +#define MT_WF_ARB_BCN_START_BSS0n(n) BIT((n) ? 16 + ((n) - 1) : 0) + +#define MT_WF_ARB_BCN_FLUSH MT_WF_ARB(0x11c) +#define MT_WF_ARB_BCN_FLUSH_BSSn(n) BIT(0 + (n)) +#define MT_WF_ARB_BCN_FLUSH_BSS0n(n) BIT((n) ? 16 + ((n) - 1) : 0) + +#define MT_WF_ARB_CAB_START MT_WF_ARB(0x120) +#define MT_WF_ARB_CAB_START_BSSn(n) BIT(0 + (n)) +#define MT_WF_ARB_CAB_START_BSS0n(n) BIT((n) ? 16 + ((n) - 1) : 0) + +#define MT_WF_ARB_CAB_FLUSH MT_WF_ARB(0x124) +#define MT_WF_ARB_CAB_FLUSH_BSSn(n) BIT(0 + (n)) +#define MT_WF_ARB_CAB_FLUSH_BSS0n(n) BIT((n) ? 16 + ((n) - 1) : 0) + +#define MT_WF_ARB_CAB_COUNT(n) MT_WF_ARB(0x128 + (n) * 4) +#define MT_WF_ARB_CAB_COUNT_SHIFT 4 +#define MT_WF_ARB_CAB_COUNT_MASK GENMASK(3, 0) +#define MT_WF_ARB_CAB_COUNT_B0_REG(n) MT_WF_ARB_CAB_COUNT(((n) > 12 ? 2 : \ + ((n) > 4 ? 1 : 0))) +#define MT_WF_ARB_CAB_COUNT_B0_SHIFT(n) (((n) > 12 ? (n) - 12 : \ + ((n) > 4 ? (n) - 4 : \ + (n) ? (n) + 3 : 0)) * 4) + +#define MT_TX_ABORT MT_WF_ARB(0x134) +#define MT_TX_ABORT_EN BIT(0) +#define MT_TX_ABORT_WCID GENMASK(15, 8) + +#define MT_WF_TMAC_BASE 0x21600 +#define MT_WF_TMAC(ofs) (MT_WF_TMAC_BASE + (ofs)) + +#define MT_TMAC_TCR MT_WF_TMAC(0x000) +#define MT_TMAC_TCR_BLINK_SEL GENMASK(7, 6) +#define MT_TMAC_TCR_PRE_RTS_GUARD GENMASK(11, 8) +#define MT_TMAC_TCR_PRE_RTS_SEC_IDLE GENMASK(13, 12) +#define MT_TMAC_TCR_RTS_SIGTA BIT(14) +#define MT_TMAC_TCR_LDPC_OFS BIT(15) +#define MT_TMAC_TCR_TX_STREAMS GENMASK(17, 16) +#define MT_TMAC_TCR_SCH_IDLE_SEL GENMASK(19, 18) +#define MT_TMAC_TCR_SCH_DET_PER_IOD BIT(20) +#define MT_TMAC_TCR_DCH_DET_DISABLE BIT(21) +#define MT_TMAC_TCR_TX_RIFS BIT(22) +#define MT_TMAC_TCR_RX_RIFS_MODE BIT(23) +#define MT_TMAC_TCR_TXOP_TBTT_CTL BIT(24) +#define MT_TMAC_TCR_TBTT_TX_STOP_CTL BIT(25) +#define MT_TMAC_TCR_TXOP_BURST_STOP BIT(26) +#define MT_TMAC_TCR_RDG_RA_MODE BIT(27) +#define MT_TMAC_TCR_RDG_RESP BIT(29) +#define MT_TMAC_TCR_RDG_NO_PENDING BIT(30) +#define MT_TMAC_TCR_SMOOTHING BIT(31) + +#define MT_WMM_TXOP_BASE MT_WF_TMAC(0x010) +#define MT_WMM_TXOP(_n) (MT_WMM_TXOP_BASE + \ + ((((_n) / 2) ^ 0x1) << 2)) +#define MT_WMM_TXOP_SHIFT(_n) (((_n) & 1) * 16) +#define MT_WMM_TXOP_MASK GENMASK(15, 0) + +#define MT_TIMEOUT_CCK MT_WF_TMAC(0x090) +#define MT_TIMEOUT_OFDM MT_WF_TMAC(0x094) +#define MT_TIMEOUT_VAL_PLCP GENMASK(15, 0) +#define MT_TIMEOUT_VAL_CCA GENMASK(31, 16) + +#define MT_TXREQ MT_WF_TMAC(0x09c) +#define MT_TXREQ_CCA_SRC_SEL GENMASK(31, 30) + +#define MT_RXREQ MT_WF_TMAC(0x0a0) +#define MT_RXREQ_DELAY GENMASK(8, 0) + +#define MT_IFS MT_WF_TMAC(0x0a4) +#define MT_IFS_EIFS GENMASK(8, 0) +#define MT_IFS_RIFS GENMASK(14, 10) +#define MT_IFS_SIFS GENMASK(22, 16) +#define MT_IFS_SLOT GENMASK(30, 24) + +#define MT_TMAC_PCR MT_WF_TMAC(0x0b4) +#define MT_TMAC_PCR_RATE GENMASK(8, 0) +#define MT_TMAC_PCR_RATE_FIXED BIT(15) +#define MT_TMAC_PCR_ANT_ID GENMASK(21, 16) +#define MT_TMAC_PCR_ANT_ID_SEL BIT(22) +#define MT_TMAC_PCR_SPE_EN BIT(23) +#define MT_TMAC_PCR_ANT_PRI GENMASK(26, 24) +#define MT_TMAC_PCR_ANT_PRI_SEL GENMASK(27) + +#define MT_WF_RMAC_BASE 0x21800 +#define MT_WF_RMAC(ofs) (MT_WF_RMAC_BASE + (ofs)) + +#define MT_WF_RFCR MT_WF_RMAC(0x000) +#define MT_WF_RFCR_DROP_STBC_MULTI BIT(0) +#define MT_WF_RFCR_DROP_FCSFAIL BIT(1) +#define MT_WF_RFCR_DROP_VERSION BIT(3) +#define MT_WF_RFCR_DROP_PROBEREQ BIT(4) +#define MT_WF_RFCR_DROP_MCAST BIT(5) +#define MT_WF_RFCR_DROP_BCAST BIT(6) +#define MT_WF_RFCR_DROP_MCAST_FILTERED BIT(7) +#define MT_WF_RFCR_DROP_A3_MAC BIT(8) +#define MT_WF_RFCR_DROP_A3_BSSID BIT(9) +#define MT_WF_RFCR_DROP_A2_BSSID BIT(10) +#define MT_WF_RFCR_DROP_OTHER_BEACON BIT(11) +#define MT_WF_RFCR_DROP_FRAME_REPORT BIT(12) +#define MT_WF_RFCR_DROP_CTL_RSV BIT(13) +#define MT_WF_RFCR_DROP_CTS BIT(14) +#define MT_WF_RFCR_DROP_RTS BIT(15) +#define MT_WF_RFCR_DROP_DUPLICATE BIT(16) +#define MT_WF_RFCR_DROP_OTHER_BSS BIT(17) +#define MT_WF_RFCR_DROP_OTHER_UC BIT(18) +#define MT_WF_RFCR_DROP_OTHER_TIM BIT(19) +#define MT_WF_RFCR_DROP_NDPA BIT(20) +#define MT_WF_RFCR_DROP_UNWANTED_CTL BIT(21) + +#define MT_BSSID0(idx) MT_WF_RMAC(0x004 + (idx) * 8) +#define MT_BSSID1(idx) MT_WF_RMAC(0x008 + (idx) * 8) +#define MT_BSSID1_VALID BIT(16) + +#define MT_MAC_ADDR0(idx) MT_WF_RMAC(0x024 + (idx) * 8) +#define MT_MAC_ADDR1(idx) MT_WF_RMAC(0x028 + (idx) * 8) +#define MT_MAC_ADDR1_ADDR GENMASK(15, 0) +#define MT_MAC_ADDR1_VALID BIT(16) + +#define MT_BA_CONTROL_0 MT_WF_RMAC(0x068) +#define MT_BA_CONTROL_1 MT_WF_RMAC(0x06c) +#define MT_BA_CONTROL_1_ADDR GENMASK(15, 0) +#define MT_BA_CONTROL_1_TID GENMASK(19, 16) +#define MT_BA_CONTROL_1_IGNORE_TID BIT(20) +#define MT_BA_CONTROL_1_IGNORE_ALL BIT(21) +#define MT_BA_CONTROL_1_RESET BIT(22) + +#define MT_WF_RMACDR MT_WF_RMAC(0x078) +#define MT_WF_RMACDR_TSF_PROBERSP_DIS BIT(0) +#define MT_WF_RMACDR_TSF_TIM BIT(4) +#define MT_WF_RMACDR_MBSSID_MASK GENMASK(25, 24) +#define MT_WF_RMACDR_CHECK_HTC_BY_RATE BIT(26) +#define MT_WF_RMACDR_MAXLEN_20BIT BIT(30) + +#define MT_WF_RMAC_RMCR MT_WF_RMAC(0x080) +#define MT_WF_RMAC_RMCR_SMPS_MODE GENMASK(21, 20) +#define MT_WF_RMAC_RMCR_RX_STREAMS GENMASK(24, 22) +#define MT_WF_RMAC_RMCR_SMPS_RTS BIT(25) + +#define MT_WF_RMAC_CH_FREQ MT_WF_RMAC(0x090) +#define MT_WF_RMAC_MAXMINLEN MT_WF_RMAC(0x098) +#define MT_WF_RFCR1 MT_WF_RMAC(0x0a4) +#define MT_WF_RMAC_TMR_PA MT_WF_RMAC(0x0e0) + +#define MT_WF_SEC_BASE 0x21a00 +#define MT_WF_SEC(ofs) (MT_WF_SEC_BASE + (ofs)) + +#define MT_SEC_SCR MT_WF_SEC(0x004) +#define MT_SEC_SCR_MASK_ORDER GENMASK(1, 0) + +#define MT_WTBL_OFF_BASE 0x23000 +#define MT_WTBL_OFF(n) (MT_WTBL_OFF_BASE + (n)) + +#define MT_WTBL_UPDATE MT_WTBL_OFF(0x000) +#define MT_WTBL_UPDATE_WLAN_IDX GENMASK(7, 0) +#define MT_WTBL_UPDATE_WTBL2 BIT(11) +#define MT_WTBL_UPDATE_ADM_COUNT_CLEAR BIT(12) +#define MT_WTBL_UPDATE_RATE_UPDATE BIT(13) +#define MT_WTBL_UPDATE_TX_COUNT_CLEAR BIT(14) +#define MT_WTBL_UPDATE_RX_COUNT_CLEAR BIT(15) +#define MT_WTBL_UPDATE_BUSY BIT(16) + +#define MT_WTBL_RMVTCR MT_WTBL_OFF(0x008) +#define MT_WTBL_RMVTCR_RX_MV_MODE BIT(23) + +#define MT_LPON_BASE 0x24000 +#define MT_LPON(n) (MT_LPON_BASE + (n)) + +#define MT_LPON_BTEIR MT_LPON(0x020) +#define MT_LPON_BTEIR_MBSS_MODE GENMASK(31, 29) + +#define MT_PRE_TBTT MT_LPON(0x030) +#define MT_PRE_TBTT_MASK GENMASK(7, 0) +#define MT_PRE_TBTT_SHIFT 8 + +#define MT_TBTT MT_LPON(0x034) +#define MT_TBTT_PERIOD GENMASK(15, 0) +#define MT_TBTT_DTIM_PERIOD GENMASK(23, 16) +#define MT_TBTT_TBTT_WAKE_PERIOD GENMASK(27, 24) +#define MT_TBTT_DTIM_WAKE_PERIOD GENMASK(30, 28) +#define MT_TBTT_CAL_ENABLE BIT(31) + +#define MT_TBTT_TIMER_CFG MT_LPON(0x05c) + +#define MT_LPON_SBTOR(n) MT_LPON(0x0a0) +#define MT_LPON_SBTOR_SUB_BSS_EN BIT(29) +#define MT_LPON_SBTOR_TIME_OFFSET GENMASK(19, 0) + +#define MT_INT_WAKEUP_BASE 0x24400 +#define MT_INT_WAKEUP(n) (MT_INT_WAKEUP_BASE + (n)) + +#define MT_HW_INT_STATUS(n) MT_INT_WAKEUP(0x3c + (n) * 8) +#define MT_HW_INT_MASK(n) MT_INT_WAKEUP(0x40 + (n) * 8) + +#define MT_HW_INT3_TBTT0 BIT(15) +#define MT_HW_INT3_PRE_TBTT0 BIT(31) + +#define MT_WTBL1_BASE 0x28000 + +#define MT_WTBL_ON_BASE (MT_WTBL1_BASE + 0x2000) +#define MT_WTBL_ON(_n) (MT_WTBL_ON_BASE + (_n)) + +#define MT_WTBL_RIUCR0 MT_WTBL_ON(0x200) + +#define MT_WTBL_RIUCR1 MT_WTBL_ON(0x204) +#define MT_WTBL_RIUCR1_RATE0 GENMASK(11, 0) +#define MT_WTBL_RIUCR1_RATE1 GENMASK(23, 12) +#define MT_WTBL_RIUCR1_RATE2_LO GENMASK(31, 24) + +#define MT_WTBL_RIUCR2 MT_WTBL_ON(0x208) +#define MT_WTBL_RIUCR2_RATE2_HI GENMASK(3, 0) +#define MT_WTBL_RIUCR2_RATE3 GENMASK(15, 4) +#define MT_WTBL_RIUCR2_RATE4 GENMASK(27, 16) +#define MT_WTBL_RIUCR2_RATE5_LO GENMASK(31, 28) + +#define MT_WTBL_RIUCR3 MT_WTBL_ON(0x20c) +#define MT_WTBL_RIUCR3_RATE5_HI GENMASK(7, 0) +#define MT_WTBL_RIUCR3_RATE6 GENMASK(19, 8) +#define MT_WTBL_RIUCR3_RATE7 GENMASK(31, 20) + +#define MT_MIB_BASE 0x2c000 +#define MT_MIB(_n) (MT_MIB_BASE + (_n)) + +#define MT_MIB_CTL MT_MIB(0x00) +#define MT_MIB_CTL_PSCCA_TIME GENMASK(13, 11) +#define MT_MIB_CTL_CCA_NAV_TX GENMASK(16, 14) +#define MT_MIB_CTL_ED_TIME GENMASK(30, 28) +#define MT_MIB_CTL_READ_CLR_DIS BIT(31) + +#define MT_MIB_STAT(_n) MT_MIB(0x08 + (_n) * 4) + +#define MT_MIB_STAT_CCA MT_MIB_STAT(9) +#define MT_MIB_STAT_CCA_MASK GENMASK(23, 0) + +#define MT_MIB_STAT_PSCCA MT_MIB_STAT(16) +#define MT_MIB_STAT_PSCCA_MASK GENMASK(23, 0) + +#define MT_MIB_STAT_ED MT_MIB_STAT(18) +#define MT_MIB_STAT_ED_MASK GENMASK(23, 0) + +#define MT_PCIE_REMAP_BASE_1 0x40000 +#define MT_PCIE_REMAP_BASE_2 0x80000 + +#define MT_TX_HW_QUEUE_MGMT 4 +#define MT_TX_HW_QUEUE_MCU 5 +#define MT_TX_HW_QUEUE_BCN 7 +#define MT_TX_HW_QUEUE_BMC 8 + +#define MT_LED_BASE_PHYS 0x80024000 +#define MT_LED_PHYS(_n) (MT_LED_BASE_PHYS + (_n)) + +#define MT_LED_CTRL MT_LED_PHYS(0x00) + +#define MT_LED_CTRL_REPLAY(_n) BIT(0 + (8 * (_n))) +#define MT_LED_CTRL_POLARITY(_n) BIT(1 + (8 * (_n))) +#define MT_LED_CTRL_TX_BLINK_MODE(_n) BIT(2 + (8 * (_n))) +#define MT_LED_CTRL_TX_MANUAL_BLINK(_n) BIT(3 + (8 * (_n))) +#define MT_LED_CTRL_TX_OVER_BLINK(_n) BIT(5 + (8 * (_n))) +#define MT_LED_CTRL_KICK(_n) BIT(7 + (8 * (_n))) + +#define MT_LED_STATUS_0(_n) MT_LED_PHYS(0x10 + ((_n) * 8)) +#define MT_LED_STATUS_1(_n) MT_LED_PHYS(0x14 + ((_n) * 8)) +#define MT_LED_STATUS_OFF_MASK GENMASK(31, 24) +#define MT_LED_STATUS_OFF(_v) (((_v) << \ + __ffs(MT_LED_STATUS_OFF_MASK)) & \ + MT_LED_STATUS_OFF_MASK) +#define MT_LED_STATUS_ON_MASK GENMASK(23, 16) +#define MT_LED_STATUS_ON(_v) (((_v) << \ + __ffs(MT_LED_STATUS_ON_MASK)) & \ + MT_LED_STATUS_ON_MASK) +#define MT_LED_STATUS_DURATION_MASK GENMASK(15, 0) +#define MT_LED_STATUS_DURATION(_v) (((_v) << \ + __ffs(MT_LED_STATUS_DURATION_MASK)) &\ + MT_LED_STATUS_DURATION_MASK) + +#define MT_CLIENT_BASE_PHYS_ADDR 0x800c0000 + +#define MT_CLIENT_TMAC_INFO_TEMPLATE 0x040 + +#define MT_CLIENT_STATUS 0x06c + +#define MT_CLIENT_RESET_TX 0x070 +#define MT_CLIENT_RESET_TX_R_E_1 BIT(16) +#define MT_CLIENT_RESET_TX_R_E_2 BIT(17) +#define MT_CLIENT_RESET_TX_R_E_1_S BIT(20) +#define MT_CLIENT_RESET_TX_R_E_2_S BIT(21) + +#define MT_EFUSE_BASE 0x81070000 + +#define MT_EFUSE_BASE_CTRL 0x000 +#define MT_EFUSE_BASE_CTRL_EMPTY BIT(30) + +#define MT_EFUSE_CTRL 0x008 +#define MT_EFUSE_CTRL_AOUT GENMASK(5, 0) +#define MT_EFUSE_CTRL_MODE GENMASK(7, 6) +#define MT_EFUSE_CTRL_LDO_OFF_TIME GENMASK(13, 8) +#define MT_EFUSE_CTRL_LDO_ON_TIME GENMASK(15, 14) +#define MT_EFUSE_CTRL_AIN GENMASK(25, 16) +#define MT_EFUSE_CTRL_VALID BIT(29) +#define MT_EFUSE_CTRL_KICK BIT(30) +#define MT_EFUSE_CTRL_SEL BIT(31) + +#define MT_EFUSE_WDATA(_i) (0x010 + ((_i) * 4)) +#define MT_EFUSE_RDATA(_i) (0x030 + ((_i) * 4)) + +#define MT_CLIENT_RXINF 0x068 +#define MT_CLIENT_RXINF_RXSH_GROUPS GENMASK(2, 0) + +#define MT_PSE_BASE_PHYS_ADDR 0xa0000000 + +#define MT_PSE_WTBL_2_PHYS_ADDR 0xa5000000 + +#define MT_WTBL1_SIZE (8 * 4) +#define MT_WTBL2_SIZE (16 * 4) +#define MT_WTBL3_OFFSET (MT7603_WTBL_SIZE * MT_WTBL2_SIZE) +#define MT_WTBL3_SIZE (16 * 4) +#define MT_WTBL4_OFFSET (MT7603_WTBL_SIZE * MT_WTBL3_SIZE + \ + MT_WTBL3_OFFSET) +#define MT_WTBL4_SIZE (8 * 4) + +#define MT_WTBL1_W0_ADDR_HI GENMASK(15, 0) +#define MT_WTBL1_W0_MUAR_IDX GENMASK(21, 16) +#define MT_WTBL1_W0_RX_CHECK_A1 BIT(22) +#define MT_WTBL1_W0_KEY_IDX GENMASK(24, 23) +#define MT_WTBL1_W0_RX_CHECK_KEY_IDX BIT(25) +#define MT_WTBL1_W0_RX_KEY_VALID BIT(26) +#define MT_WTBL1_W0_RX_IK_VALID BIT(27) +#define MT_WTBL1_W0_RX_VALID BIT(28) +#define MT_WTBL1_W0_RX_CHECK_A2 BIT(29) +#define MT_WTBL1_W0_RX_DATA_VALID BIT(30) +#define MT_WTBL1_W0_WRITE_BURST BIT(31) + +#define MT_WTBL1_W1_ADDR_LO GENMASK(31, 0) + +#define MT_WTBL1_W2_MPDU_DENSITY GENMASK(2, 0) +#define MT_WTBL1_W2_KEY_TYPE GENMASK(6, 3) +#define MT_WTBL1_W2_EVEN_PN BIT(7) +#define MT_WTBL1_W2_TO_DS BIT(8) +#define MT_WTBL1_W2_FROM_DS BIT(9) +#define MT_WTBL1_W2_HEADER_TRANS BIT(10) +#define MT_WTBL1_W2_AMPDU_FACTOR GENMASK(13, 11) +#define MT_WTBL1_W2_PWR_MGMT BIT(14) +#define MT_WTBL1_W2_RDG BIT(15) +#define MT_WTBL1_W2_RTS BIT(16) +#define MT_WTBL1_W2_CFACK BIT(17) +#define MT_WTBL1_W2_RDG_BA BIT(18) +#define MT_WTBL1_W2_SMPS BIT(19) +#define MT_WTBL1_W2_TXS_BAF_REPORT BIT(20) +#define MT_WTBL1_W2_DYN_BW BIT(21) +#define MT_WTBL1_W2_LDPC BIT(22) +#define MT_WTBL1_W2_ITXBF BIT(23) +#define MT_WTBL1_W2_ETXBF BIT(24) +#define MT_WTBL1_W2_TXOP_PS BIT(25) +#define MT_WTBL1_W2_MESH BIT(26) +#define MT_WTBL1_W2_QOS BIT(27) +#define MT_WTBL1_W2_HT BIT(28) +#define MT_WTBL1_W2_VHT BIT(29) +#define MT_WTBL1_W2_ADMISSION_CONTROL BIT(30) +#define MT_WTBL1_W2_GROUP_ID BIT(31) + +#define MT_WTBL1_W3_WTBL2_FRAME_ID GENMASK(10, 0) +#define MT_WTBL1_W3_WTBL2_ENTRY_ID GENMASK(15, 11) +#define MT_WTBL1_W3_WTBL4_FRAME_ID GENMASK(26, 16) +#define MT_WTBL1_W3_CHECK_PER BIT(27) +#define MT_WTBL1_W3_KEEP_I_PSM BIT(28) +#define MT_WTBL1_W3_I_PSM BIT(29) +#define MT_WTBL1_W3_POWER_SAVE BIT(30) +#define MT_WTBL1_W3_SKIP_TX BIT(31) + +#define MT_WTBL1_W4_WTBL3_FRAME_ID GENMASK(10, 0) +#define MT_WTBL1_W4_WTBL3_ENTRY_ID GENMASK(16, 11) +#define MT_WTBL1_W4_WTBL4_ENTRY_ID GENMASK(22, 17) +#define MT_WTBL1_W4_PARTIAL_AID GENMASK(31, 23) + +#define MT_WTBL2_W0_PN_LO GENMASK(31, 0) + +#define MT_WTBL2_W1_PN_HI GENMASK(15, 0) +#define MT_WTBL2_W1_NON_QOS_SEQNO GENMASK(27, 16) + +#define MT_WTBL2_W2_TID0_SN GENMASK(11, 0) +#define MT_WTBL2_W2_TID1_SN GENMASK(23, 12) +#define MT_WTBL2_W2_TID2_SN_LO GENMASK(31, 24) + +#define MT_WTBL2_W3_TID2_SN_HI GENMASK(3, 0) +#define MT_WTBL2_W3_TID3_SN GENMASK(15, 4) +#define MT_WTBL2_W3_TID4_SN GENMASK(27, 16) +#define MT_WTBL2_W3_TID5_SN_LO GENMASK(31, 28) + +#define MT_WTBL2_W4_TID5_SN_HI GENMASK(7, 0) +#define MT_WTBL2_W4_TID6_SN GENMASK(19, 8) +#define MT_WTBL2_W4_TID7_SN GENMASK(31, 20) + +#define MT_WTBL2_W5_TX_COUNT_RATE1 GENMASK(15, 0) +#define MT_WTBL2_W5_FAIL_COUNT_RATE1 GENAMSK(31, 16) + +#define MT_WTBL2_W6_TX_COUNT_RATE2 GENMASK(7, 0) +#define MT_WTBL2_W6_TX_COUNT_RATE3 GENMASK(15, 8) +#define MT_WTBL2_W6_TX_COUNT_RATE4 GENMASK(23, 16) +#define MT_WTBL2_W6_TX_COUNT_RATE5 GENMASK(31, 24) + +#define MT_WTBL2_W7_TX_COUNT_CUR_BW GENMASK(15, 0) +#define MT_WTBL2_W7_FAIL_COUNT_CUR_BW GENMASK(31, 16) + +#define MT_WTBL2_W8_TX_COUNT_OTHER_BW GENMASK(15, 0) +#define MT_WTBL2_W8_FAIL_COUNT_OTHER_BW GENMASK(31, 16) + +#define MT_WTBL2_W9_POWER_OFFSET GENMASK(4, 0) +#define MT_WTBL2_W9_SPATIAL_EXT BIT(5) +#define MT_WTBL2_W9_ANT_PRIORITY GENMASK(8, 6) +#define MT_WTBL2_W9_CC_BW_SEL GENMASK(10, 9) +#define MT_WTBL2_W9_CHANGE_BW_RATE GENMASK(13, 11) +#define MT_WTBL2_W9_BW_CAP GENMASK(15, 14) +#define MT_WTBL2_W9_SHORT_GI_20 BIT(16) +#define MT_WTBL2_W9_SHORT_GI_40 BIT(17) +#define MT_WTBL2_W9_SHORT_GI_80 BIT(18) +#define MT_WTBL2_W9_SHORT_GI_160 BIT(19) +#define MT_WTBL2_W9_MPDU_FAIL_COUNT GENMASK(25, 23) +#define MT_WTBL2_W9_MPDU_OK_COUNT GENMASK(28, 26) +#define MT_WTBL2_W9_RATE_IDX GENMASK(31, 29) + +#define MT_WTBL2_W10_RATE1 GENMASK(11, 0) +#define MT_WTBL2_W10_RATE2 GENMASK(23, 12) +#define MT_WTBL2_W10_RATE3_LO GENMASK(31, 24) + +#define MT_WTBL2_W11_RATE3_HI GENMASK(3, 0) +#define MT_WTBL2_W11_RATE4 GENMASK(15, 4) +#define MT_WTBL2_W11_RATE5 GENMASK(27, 16) +#define MT_WTBL2_W11_RATE6_LO GENMASK(31, 28) + +#define MT_WTBL2_W12_RATE6_HI GENMASK(7, 0) +#define MT_WTBL2_W12_RATE7 GENMASK(19, 8) +#define MT_WTBL2_W12_RATE8 GENMASK(31, 20) + +#define MT_WTBL2_W13_AVG_RCPI0 GENMASK(7, 0) +#define MT_WTBL2_W13_AVG_RCPI1 GENMASK(15, 8) +#define MT_WTBL2_W13_AVG_RCPI2 GENAMSK(23, 16) + +#define MT_WTBL2_W14_CC_NOISE_1S GENMASK(6, 0) +#define MT_WTBL2_W14_CC_NOISE_2S GENMASK(13, 7) +#define MT_WTBL2_W14_CC_NOISE_3S GENMASK(20, 14) +#define MT_WTBL2_W14_CHAN_EST_RMS GENMASK(24, 21) +#define MT_WTBL2_W14_CC_NOISE_SEL BIT(15) +#define MT_WTBL2_W14_ANT_SEL GENMASK(31, 26) + +#define MT_WTBL2_W15_BA_WIN_SIZE GENMASK(2, 0) +#define MT_WTBL2_W15_BA_WIN_SIZE_SHIFT 3 +#define MT_WTBL2_W15_BA_EN_TIDS GENMASK(31, 24) + +#define MT_WTBL1_OR (MT_WTBL1_BASE + 0x2300) +#define MT_WTBL1_OR_PSM_WRITE BIT(31) + +enum mt7603_cipher_type { + MT_CIPHER_NONE, + MT_CIPHER_WEP40, + MT_CIPHER_TKIP, + MT_CIPHER_TKIP_NO_MIC, + MT_CIPHER_AES_CCMP, + MT_CIPHER_WEP104, + MT_CIPHER_BIP_CMAC_128, + MT_CIPHER_WEP128, + MT_CIPHER_WAPI, +}; + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c new file mode 100644 index 000000000000..e13fea80d970 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: ISC */ + +#include +#include +#include + +#include "mt7603.h" + +static int +mt76_wmac_probe(struct platform_device *pdev) +{ + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + struct mt7603_dev *dev; + void __iomem *mem_base; + struct mt76_dev *mdev; + int irq; + int ret; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "Failed to get device IRQ\n"); + return irq; + } + + mem_base = devm_ioremap_resource(&pdev->dev, res); + if (!mem_base) { + dev_err(&pdev->dev, "Failed to get memory resource\n"); + return -EINVAL; + } + + mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7603_ops, + &mt7603_drv_ops); + if (!mdev) + return -ENOMEM; + + dev = container_of(mdev, struct mt7603_dev, mt76); + mt76_mmio_init(mdev, mem_base); + + mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) | + (mt76_rr(dev, MT_HW_REV) & 0xff); + dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev); + + ret = devm_request_irq(mdev->dev, irq, mt7603_irq_handler, + IRQF_SHARED, KBUILD_MODNAME, dev); + if (ret) + goto error; + + ret = mt7603_register_device(dev); + if (ret) + goto error; + + return 0; +error: + ieee80211_free_hw(mt76_hw(dev)); + return ret; +} + +static int +mt76_wmac_remove(struct platform_device *pdev) +{ + struct mt76_dev *mdev = platform_get_drvdata(pdev); + struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); + + mt7603_unregister_device(dev); + + return 0; +} + +static const struct of_device_id of_wmac_match[] = { + { .compatible = "mediatek,mt7628-wmac" }, + {}, +}; + +MODULE_DEVICE_TABLE(of, of_wmac_match); +MODULE_FIRMWARE(MT7628_FIRMWARE_E1); +MODULE_FIRMWARE(MT7628_FIRMWARE_E2); + +struct platform_driver mt76_wmac_driver = { + .probe = mt76_wmac_probe, + .remove = mt76_wmac_remove, + .driver = { + .name = "mt76_wmac", + .of_match_table = of_wmac_match, + }, +}; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c index 497e762978cc..ab6dfc026acb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c @@ -152,11 +152,11 @@ static s8 mt76x0_get_delta(struct mt76x02_dev *dev) return mt76x02_rate_power_val(val); } -void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev) +void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev, + struct ieee80211_channel *chan, + struct mt76_rate_power *t) { - struct ieee80211_channel *chan = dev->mt76.chandef.chan; bool is_2ghz = chan->band == NL80211_BAND_2GHZ; - struct mt76_rate_power *t = &dev->mt76.rate_power; u16 val, addr; s8 delta; @@ -189,7 +189,7 @@ void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev) addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 8 : 0x126; val = mt76x02_eeprom_get(dev, addr); t->ht[4] = t->ht[5] = t->vht[4] = t->vht[5] = s6_to_s8(val); - t->ht[6] = t->vht[6] = s6_to_s8(val >> 8); + t->ht[6] = t->ht[7] = t->vht[6] = t->vht[7] = s6_to_s8(val >> 8); /* ht-vht mcs 1ss 0, 1, 2, 3 stbc */ addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 14 : 0xec; @@ -205,31 +205,31 @@ void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev) /* vht mcs 8, 9 5GHz */ val = mt76x02_eeprom_get(dev, 0x132); - t->vht[7] = s6_to_s8(val); - t->vht[8] = s6_to_s8(val >> 8); + t->vht[8] = s6_to_s8(val); + t->vht[9] = s6_to_s8(val >> 8); delta = mt76x0_tssi_enabled(dev) ? 0 : mt76x0_get_delta(dev); mt76x02_add_rate_power_offset(t, delta); } -void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info) +void mt76x0_get_power_info(struct mt76x02_dev *dev, + struct ieee80211_channel *chan, s8 *tp) { struct mt76x0_chan_map { u8 chan; u8 offset; } chan_map[] = { - { 2, 0 }, { 4, 1 }, { 6, 2 }, { 8, 3 }, - { 10, 4 }, { 12, 5 }, { 14, 6 }, { 38, 0 }, - { 44, 1 }, { 48, 2 }, { 54, 3 }, { 60, 4 }, - { 64, 5 }, { 102, 6 }, { 108, 7 }, { 112, 8 }, - { 118, 9 }, { 124, 10 }, { 128, 11 }, { 134, 12 }, - { 140, 13 }, { 151, 14 }, { 157, 15 }, { 161, 16 }, - { 167, 17 }, { 171, 18 }, { 173, 19 }, + { 2, 0 }, { 4, 2 }, { 6, 4 }, { 8, 6 }, + { 10, 8 }, { 12, 10 }, { 14, 12 }, { 38, 0 }, + { 44, 2 }, { 48, 4 }, { 54, 6 }, { 60, 8 }, + { 64, 10 }, { 102, 12 }, { 108, 14 }, { 112, 16 }, + { 118, 18 }, { 124, 20 }, { 128, 22 }, { 134, 24 }, + { 140, 26 }, { 151, 28 }, { 157, 30 }, { 161, 32 }, + { 167, 34 }, { 171, 36 }, { 175, 38 }, }; - struct ieee80211_channel *chan = dev->mt76.chandef.chan; u8 offset, addr; + int i, idx = 0; u16 data; - int i; if (mt76x0_tssi_enabled(dev)) { s8 target_power; @@ -239,14 +239,14 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info) else data = mt76x02_eeprom_get(dev, MT_EE_2G_TARGET_POWER); target_power = (data & 0xff) - dev->mt76.rate_power.ofdm[7]; - info[0] = target_power + mt76x0_get_delta(dev); - info[1] = 0; + *tp = target_power + mt76x0_get_delta(dev); return; } for (i = 0; i < ARRAY_SIZE(chan_map); i++) { - if (chan_map[i].chan <= chan->hw_value) { + if (chan->hw_value <= chan_map[i].chan) { + idx = (chan->hw_value == chan_map[i].chan); offset = chan_map[i].offset; break; } @@ -258,13 +258,16 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info) addr = MT_EE_TX_POWER_DELTA_BW80 + offset; } else { switch (chan->hw_value) { + case 42: + offset = 2; + break; case 58: offset = 8; break; case 106: offset = 14; break; - case 112: + case 122: offset = 20; break; case 155: @@ -277,14 +280,9 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info) } data = mt76x02_eeprom_get(dev, addr); - - info[0] = data; - if (!info[0] || info[0] > 0x3f) - info[0] = 5; - - info[1] = data >> 8; - if (!info[1] || info[1] > 0x3f) - info[1] = 5; + *tp = data >> (8 * idx); + if (*tp < 0 || *tp > 0x3f) + *tp = 5; } static int mt76x0_check_eeprom(struct mt76x02_dev *dev) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h index ee9ade9f3c8b..7f73034a23b1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h @@ -25,8 +25,11 @@ struct mt76x02_dev; int mt76x0_eeprom_init(struct mt76x02_dev *dev); void mt76x0_read_rx_gain(struct mt76x02_dev *dev); -void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev); -void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info); +void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev, + struct ieee80211_channel *chan, + struct mt76_rate_power *t); +void mt76x0_get_power_info(struct mt76x02_dev *dev, + struct ieee80211_channel *chan, s8 *tp); static inline s8 s6_to_s8(u32 val) { diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c index 87b575fe1c74..bcb72e019fd2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c @@ -18,6 +18,7 @@ #include "eeprom.h" #include "mcu.h" #include "initvals.h" +#include "../mt76x02_phy.h" static void mt76x0_vht_cap_mask(struct ieee80211_supported_band *sband) { @@ -186,6 +187,8 @@ void mt76x0_mac_stop(struct mt76x02_dev *dev) { int i = 200, ok = 0; + mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN); + /* Page count on TxQ */ while (i-- && ((mt76_rr(dev, 0x0438) & 0xffffffff) || (mt76_rr(dev, 0x0a30) & 0x000000ff) || @@ -262,27 +265,24 @@ int mt76x0_init_hardware(struct mt76x02_dev *dev) } EXPORT_SYMBOL_GPL(mt76x0_init_hardware); -struct mt76x02_dev * -mt76x0_alloc_device(struct device *pdev, - const struct mt76_driver_ops *drv_ops, - const struct ieee80211_ops *ops) +static void +mt76x0_init_txpower(struct mt76x02_dev *dev, + struct ieee80211_supported_band *sband) { - struct mt76x02_dev *dev; - struct mt76_dev *mdev; - - mdev = mt76_alloc_device(sizeof(*dev), ops); - if (!mdev) - return NULL; + struct ieee80211_channel *chan; + struct mt76_rate_power t; + s8 tp; + int i; - mdev->dev = pdev; - mdev->drv = drv_ops; + for (i = 0; i < sband->n_channels; i++) { + chan = &sband->channels[i]; - dev = container_of(mdev, struct mt76x02_dev, mt76); - mutex_init(&dev->phy_mutex); + mt76x0_get_tx_power_per_rate(dev, chan, &t); + mt76x0_get_power_info(dev, chan, &tp); - return dev; + chan->max_power = (mt76x02_get_max_rate_power(&t) + tp) / 2; + } } -EXPORT_SYMBOL_GPL(mt76x0_alloc_device); int mt76x0_register_device(struct mt76x02_dev *dev) { @@ -296,9 +296,14 @@ int mt76x0_register_device(struct mt76x02_dev *dev) if (ret) return ret; - /* overwrite unsupported features */ - if (dev->mt76.cap.has_5ghz) + if (dev->mt76.cap.has_5ghz) { + /* overwrite unsupported features */ mt76x0_vht_cap_mask(&dev->mt76.sband_5g.sband); + mt76x0_init_txpower(dev, &dev->mt76.sband_5g.sband); + } + + if (dev->mt76.cap.has_2ghz) + mt76x0_init_txpower(dev, &dev->mt76.sband_2g.sband); mt76x02_init_debugfs(dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h index a1657922758e..0290ba5869a5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h @@ -88,6 +88,7 @@ static const struct mt76_reg_pair mt76x0_mac_reg_table[] = { { MT_TX_PROT_CFG6, 0xe3f42004 }, { MT_TX_PROT_CFG7, 0xe3f42084 }, { MT_TX_PROT_CFG8, 0xe3f42104 }, + { MT_VHT_HT_FBK_CFG1, 0xedcba980 }, }; static const struct mt76_reg_pair mt76x0_bbp_init_tab[] = { diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c index a803a9b6a4c5..fee16ab21edb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c @@ -34,6 +34,8 @@ mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef) mt76_rr(dev, MT_CH_IDLE); mt76_rr(dev, MT_CH_BUSY); + mt76x02_edcca_init(dev, true); + if (mt76_is_mmio(dev)) { mt76x02_dfs_init_params(dev); tasklet_enable(&dev->pre_tbtt_tasklet); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h index 46629f61673b..51fbd75dff31 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h @@ -50,10 +50,6 @@ static inline bool is_mt7630(struct mt76x02_dev *dev) } /* Init */ -struct mt76x02_dev * -mt76x0_alloc_device(struct device *pdev, - const struct mt76_driver_ops *drv_ops, - const struct ieee80211_ops *ops); int mt76x0_init_hardware(struct mt76x02_dev *dev); int mt76x0_register_device(struct mt76x02_dev *dev); void mt76x0_chip_onoff(struct mt76x02_dev *dev, bool enable, bool reset); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c index d895b6f3dc44..f302162036d0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c @@ -30,7 +30,7 @@ static int mt76x0e_start(struct ieee80211_hw *hw) mt76x02_mac_start(dev); mt76x0_phy_calibrate(dev, true); ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work, - MT_CALIBRATE_INTERVAL); + MT_MAC_WORK_INTERVAL); ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work, MT_CALIBRATE_INTERVAL); set_bit(MT76_STATE_RUNNING, &dev->mt76.state); @@ -99,7 +99,7 @@ static const struct ieee80211_ops mt76x0e_ops = { .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update, .wake_tx_queue = mt76_wake_tx_queue, .get_survey = mt76_get_survey, - .get_txpower = mt76x02_get_txpower, + .get_txpower = mt76_get_txpower, .flush = mt76x0e_flush, .set_tim = mt76x0e_set_tim, .release_buffered_frames = mt76_release_buffered_frames, @@ -141,6 +141,15 @@ static int mt76x0e_register_device(struct mt76x02_dev *dev) mt76_clear(dev, 0x110, BIT(9)); mt76_set(dev, MT_MAX_LEN_CFG, BIT(13)); + mt76_wr(dev, MT_CH_TIME_CFG, + MT_CH_TIME_CFG_TIMER_EN | + MT_CH_TIME_CFG_TX_AS_BUSY | + MT_CH_TIME_CFG_RX_AS_BUSY | + MT_CH_TIME_CFG_NAV_AS_BUSY | + MT_CH_TIME_CFG_EIFS_AS_BUSY | + MT_CH_CCA_RC_EN | + FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1)); + err = mt76x0_register_device(dev); if (err < 0) return err; @@ -165,6 +174,7 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id) .sta_remove = mt76x02_sta_remove, }; struct mt76x02_dev *dev; + struct mt76_dev *mdev; int ret; ret = pcim_enable_device(pdev); @@ -181,16 +191,20 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (ret) return ret; - dev = mt76x0_alloc_device(&pdev->dev, &drv_ops, &mt76x0e_ops); - if (!dev) + mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt76x0e_ops, + &drv_ops); + if (!mdev) return -ENOMEM; - mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]); + dev = container_of(mdev, struct mt76x02_dev, mt76); + mutex_init(&dev->phy_mutex); + + mt76_mmio_init(mdev, pcim_iomap_table(pdev)[0]); - dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION); - dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev); + mdev->rev = mt76_rr(dev, MT_ASIC_VERSION); + dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev); - ret = devm_request_irq(dev->mt76.dev, pdev->irq, mt76x02_irq_handler, + ret = devm_request_irq(mdev->dev, pdev->irq, mt76x02_irq_handler, IRQF_SHARED, KBUILD_MODNAME, dev); if (ret) goto error; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c index 1eb1a802ed20..1fd22eb841c3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c @@ -845,17 +845,18 @@ static void mt76x0_phy_tssi_calibrate(struct mt76x02_dev *dev) void mt76x0_phy_set_txpower(struct mt76x02_dev *dev) { struct mt76_rate_power *t = &dev->mt76.rate_power; - u8 info[2]; + s8 info; - mt76x0_get_tx_power_per_rate(dev); - mt76x0_get_power_info(dev, info); + mt76x0_get_tx_power_per_rate(dev, dev->mt76.chandef.chan, t); + mt76x0_get_power_info(dev, dev->mt76.chandef.chan, &info); - mt76x02_add_rate_power_offset(t, info[0]); + mt76x02_add_rate_power_offset(t, info); mt76x02_limit_rate_power(t, dev->mt76.txpower_conf); dev->mt76.txpower_cur = mt76x02_get_max_rate_power(t); - mt76x02_add_rate_power_offset(t, -info[0]); + mt76x02_add_rate_power_offset(t, -info); - mt76x02_phy_set_txpower(dev, info[0], info[1]); + dev->target_power = info; + mt76x02_phy_set_txpower(dev, info, info); } void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on) @@ -1075,7 +1076,9 @@ mt76x0_phy_update_channel_gain(struct mt76x02_dev *dev) u8 gain_delta; int low_gain; - dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev); + dev->cal.avg_rssi_all = mt76_get_min_avg_rssi(&dev->mt76); + if (!dev->cal.avg_rssi_all) + dev->cal.avg_rssi_all = -75; low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) + (dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev)); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c index 0e6b43bb4678..91718647da02 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c @@ -79,7 +79,6 @@ static void mt76x0u_cleanup(struct mt76x02_dev *dev) clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state); mt76x0_chip_onoff(dev, false, false); mt76u_queues_deinit(&dev->mt76); - mt76u_mcu_deinit(&dev->mt76); } static void mt76x0u_mac_stop(struct mt76x02_dev *dev) @@ -118,7 +117,7 @@ static int mt76x0u_start(struct ieee80211_hw *hw) mt76x0_phy_calibrate(dev, true); ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work, - MT_CALIBRATE_INTERVAL); + MT_MAC_WORK_INTERVAL); ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work, MT_CALIBRATE_INTERVAL); set_bit(MT76_STATE_RUNNING, &dev->mt76.state); @@ -155,48 +154,54 @@ static const struct ieee80211_ops mt76x0u_ops = { .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update, .set_rts_threshold = mt76x02_set_rts_threshold, .wake_tx_queue = mt76_wake_tx_queue, - .get_txpower = mt76x02_get_txpower, + .get_txpower = mt76_get_txpower, }; -static int mt76x0u_register_device(struct mt76x02_dev *dev) +static int mt76x0u_init_hardware(struct mt76x02_dev *dev) { - struct ieee80211_hw *hw = dev->mt76.hw; int err; - err = mt76u_alloc_queues(&dev->mt76); - if (err < 0) - goto out_err; - - err = mt76u_mcu_init_rx(&dev->mt76); - if (err < 0) - goto out_err; - mt76x0_chip_onoff(dev, true, true); - if (!mt76x02_wait_for_mac(&dev->mt76)) { - err = -ETIMEDOUT; - goto out_err; - } + + if (!mt76x02_wait_for_mac(&dev->mt76)) + return -ETIMEDOUT; err = mt76x0u_mcu_init(dev); if (err < 0) - goto out_err; + return err; mt76x0_init_usb_dma(dev); err = mt76x0_init_hardware(dev); if (err < 0) - goto out_err; + return err; mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e); mt76_wr(dev, MT_TXOP_CTRL_CFG, FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) | FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58)); + return 0; +} + +static int mt76x0u_register_device(struct mt76x02_dev *dev) +{ + struct ieee80211_hw *hw = dev->mt76.hw; + int err; + + err = mt76u_alloc_queues(&dev->mt76); + if (err < 0) + goto out_err; + + err = mt76x0u_init_hardware(dev); + if (err < 0) + goto out_err; + err = mt76x0_register_device(dev); if (err < 0) goto out_err; /* check hw sg support in order to enable AMSDU */ - if (mt76u_check_sg(&dev->mt76)) + if (dev->mt76.usb.sg_en) hw->max_tx_fragments = MT_SG_MAX_SIZE; else hw->max_tx_fragments = 1; @@ -223,14 +228,18 @@ static int mt76x0u_probe(struct usb_interface *usb_intf, }; struct usb_device *usb_dev = interface_to_usbdev(usb_intf); struct mt76x02_dev *dev; + struct mt76_dev *mdev; u32 asic_rev, mac_rev; int ret; - dev = mt76x0_alloc_device(&usb_intf->dev, &drv_ops, - &mt76x0u_ops); - if (!dev) + mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), &mt76x0u_ops, + &drv_ops); + if (!mdev) return -ENOMEM; + dev = container_of(mdev, struct mt76x02_dev, mt76); + mutex_init(&dev->phy_mutex); + /* Quirk for Archer T1U */ if (id->driver_info) dev->no_2ghz = true; @@ -240,27 +249,27 @@ static int mt76x0u_probe(struct usb_interface *usb_intf, usb_set_intfdata(usb_intf, dev); - mt76x02u_init_mcu(&dev->mt76); - ret = mt76u_init(&dev->mt76, usb_intf); + mt76x02u_init_mcu(mdev); + ret = mt76u_init(mdev, usb_intf); if (ret) goto err; /* Disable the HW, otherwise MCU fail to initalize on hot reboot */ mt76x0_chip_onoff(dev, false, false); - if (!mt76x02_wait_for_mac(&dev->mt76)) { + if (!mt76x02_wait_for_mac(mdev)) { ret = -ETIMEDOUT; goto err; } asic_rev = mt76_rr(dev, MT_ASIC_VERSION); mac_rev = mt76_rr(dev, MT_MAC_CSR0); - dev_info(dev->mt76.dev, "ASIC revision: %08x MAC revision: %08x\n", + dev_info(mdev->dev, "ASIC revision: %08x MAC revision: %08x\n", asic_rev, mac_rev); /* Note: vendor driver skips this check for MT76X0U */ if (!(mt76_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL)) - dev_warn(dev->mt76.dev, "Warning: eFUSE not present\n"); + dev_warn(mdev->dev, "Warning: eFUSE not present\n"); ret = mt76x0u_register_device(dev); if (ret < 0) @@ -272,7 +281,7 @@ err: usb_set_intfdata(usb_intf, NULL); usb_put_dev(interface_to_usbdev(usb_intf)); - ieee80211_free_hw(dev->mt76.hw); + ieee80211_free_hw(mdev->hw); return ret; } @@ -297,11 +306,11 @@ static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf, pm_message_t state) { struct mt76x02_dev *dev = usb_get_intfdata(usb_intf); - struct mt76_usb *usb = &dev->mt76.usb; mt76u_stop_queues(&dev->mt76); mt76x0u_mac_stop(dev); - usb_kill_urb(usb->mcu.res.urb); + clear_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state); + mt76x0_chip_onoff(dev, false, false); return 0; } @@ -312,15 +321,6 @@ static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf) struct mt76_usb *usb = &dev->mt76.usb; int ret; - reinit_completion(&usb->mcu.cmpl); - ret = mt76u_submit_buf(&dev->mt76, USB_DIR_IN, - MT_EP_IN_CMD_RESP, - &usb->mcu.res, GFP_KERNEL, - mt76u_mcu_complete_urb, - &usb->mcu.cmpl); - if (ret < 0) - goto err; - ret = mt76u_submit_rx_buffers(&dev->mt76); if (ret < 0) goto err; @@ -328,7 +328,7 @@ static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf) tasklet_enable(&usb->rx_tasklet); tasklet_enable(&usb->tx_tasklet); - ret = mt76x0_init_hardware(dev); + ret = mt76x0u_init_hardware(dev); if (ret) goto err; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c index 9d7585029df9..4a282761ca58 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c @@ -15,6 +15,7 @@ */ #include #include +#include #include "mt76x0.h" #include "mcu.h" @@ -139,12 +140,6 @@ static int mt76x0u_load_firmware(struct mt76x02_dev *dev) FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20)); mt76x02u_mcu_fw_reset(dev); usleep_range(5000, 6000); -/* - mt76x0_rmw(dev, MT_PBF_CFG, 0, (MT_PBF_CFG_TX0Q_EN | - MT_PBF_CFG_TX1Q_EN | - MT_PBF_CFG_TX2Q_EN | - MT_PBF_CFG_TX3Q_EN)); -*/ mt76_wr(dev, MT_FCE_PSE_CTRL, 1); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h index 6782665049dd..6915cce5def9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h @@ -15,8 +15,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#ifndef __MT76X02_UTIL_H -#define __MT76X02_UTIL_H +#ifndef __MT76x02_H +#define __MT76x02_H #include @@ -27,6 +27,10 @@ #include "mt76x02_dma.h" #define MT_CALIBRATE_INTERVAL HZ +#define MT_MAC_WORK_INTERVAL (HZ / 10) + +#define MT_WATCHDOG_TIME (HZ / 10) +#define MT_TX_HANG_TH 10 #define MT_MAX_CHAINS 2 struct mt76x02_rx_freq_cal { @@ -70,6 +74,8 @@ struct mt76x02_dev { struct mutex phy_mutex; + u16 vif_mask; + u8 txdone_seq; DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x02_tx_status); @@ -79,6 +85,7 @@ struct mt76x02_dev { struct tasklet_struct pre_tbtt_tasklet; struct delayed_work cal_work; struct delayed_work mac_work; + struct delayed_work wdt_work; u32 aggr_stats[32]; @@ -89,6 +96,10 @@ struct mt76x02_dev { u8 tbtt_count; u16 beacon_int; + u32 tx_hang_reset; + u8 tx_hang_check; + u8 mcu_timeout; + struct mt76x02_calibration cal; s8 target_power; @@ -101,6 +112,13 @@ struct mt76x02_dev { u8 slottime; struct mt76x02_dfs_pattern_detector dfs_pd; + + /* edcca monitor */ + bool ed_tx_blocked; + bool ed_monitor; + u8 ed_trigger; + u8 ed_silent; + ktime_t ed_time; }; extern struct ieee80211_rate mt76x02_rates[12]; @@ -115,8 +133,7 @@ void mt76x02_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); void mt76x02_config_mac_addr_list(struct mt76x02_dev *dev); -void mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif, - unsigned int idx); + int mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif); void mt76x02_remove_interface(struct ieee80211_hw *hw, @@ -136,6 +153,7 @@ s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev, const struct ieee80211_tx_rate *rate); s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr, s8 max_txpwr_adj); +void mt76x02_wdt_work(struct work_struct *work); void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr); void mt76x02_set_tx_ackto(struct mt76x02_dev *dev); void mt76x02_set_coverage_class(struct ieee80211_hw *hw, @@ -158,8 +176,6 @@ void mt76x02_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const u8 *mac); void mt76x02_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif); -int mt76x02_get_txpower(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, int *dbm); void mt76x02_sta_ps(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps); void mt76x02_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -224,4 +240,4 @@ mt76x02_rx_get_sta_wcid(struct mt76x02_sta *sta, bool unicast) return &sta->vif->group_wcid; } -#endif +#endif /* __MT76x02_H */ diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c index a9d52ba1e270..7580c5c986ff 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c @@ -133,5 +133,7 @@ void mt76x02_init_debugfs(struct mt76x02_dev *dev) read_txpower); debugfs_create_devm_seqfile(dev->mt76.dev, "agc", dir, read_agc); + + debugfs_create_u32("tx_hang_reset", 0400, dir, &dev->tx_hang_reset); } EXPORT_SYMBOL_GPL(mt76x02_init_debugfs); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c index 054609c634a2..e4649103efd4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c @@ -881,12 +881,18 @@ mt76x02_dfs_set_domain(struct mt76x02_dev *dev, { struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; + mutex_lock(&dev->mt76.mutex); if (dfs_pd->region != region) { tasklet_disable(&dfs_pd->dfs_tasklet); + + dev->ed_monitor = region == NL80211_DFS_ETSI; + mt76x02_edcca_init(dev, true); + dfs_pd->region = region; mt76x02_dfs_init_params(dev); tasklet_enable(&dfs_pd->dfs_tasklet); } + mutex_unlock(&dev->mt76.mutex); } void mt76x02_regd_notifier(struct wiphy *wiphy, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c index c08bf371e527..91ff6598eccf 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c @@ -130,10 +130,8 @@ static __le16 mt76x02_mac_tx_rate_val(struct mt76x02_dev *dev, const struct ieee80211_tx_rate *rate, u8 *nss_val) { + u8 phy, rate_idx, nss, bw = 0; u16 rateval; - u8 phy, rate_idx; - u8 nss = 1; - u8 bw = 0; if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { rate_idx = rate->idx; @@ -164,7 +162,7 @@ mt76x02_mac_tx_rate_val(struct mt76x02_dev *dev, phy = val >> 8; rate_idx = val & 0xff; - bw = 0; + nss = 1; } rateval = FIELD_PREP(MT_RXWI_RATE_INDEX, rate_idx); @@ -293,6 +291,13 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi, memset(txwi, 0, sizeof(*txwi)); + if (!info->control.hw_key && wcid && wcid->hw_key_idx != 0xff && + ieee80211_has_protected(hdr->frame_control)) { + wcid = NULL; + ieee80211_get_tx_rates(info->control.vif, sta, skb, + info->control.rates, 1); + } + if (wcid) txwi->wcid = wcid->idx; else @@ -309,7 +314,7 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi, ccmp_pn[6] = pn >> 32; ccmp_pn[7] = pn >> 40; txwi->iv = *((__le32 *)&ccmp_pn[0]); - txwi->eiv = *((__le32 *)&ccmp_pn[1]); + txwi->eiv = *((__le32 *)&ccmp_pn[4]); } spin_lock_bh(&dev->mt76.lock); @@ -435,7 +440,7 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev, } if (wcid) { - if (stat->pktid) + if (stat->pktid >= MT_PACKET_ID_FIRST) status.skb = mt76_tx_status_skb_get(mdev, wcid, stat->pktid, &list); if (status.skb) @@ -478,7 +483,9 @@ out: } static int -mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate) +mt76x02_mac_process_rate(struct mt76x02_dev *dev, + struct mt76_rx_status *status, + u16 rate) { u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate); @@ -510,11 +517,15 @@ mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate) status->encoding = RX_ENC_HT; status->rate_idx = idx; break; - case MT_PHY_TYPE_VHT: + case MT_PHY_TYPE_VHT: { + u8 n_rxstream = dev->mt76.chainmask & 0xf; + status->encoding = RX_ENC_VHT; status->rate_idx = FIELD_GET(MT_RATE_INDEX_VHT_IDX, idx); - status->nss = FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1; + status->nss = min_t(u8, n_rxstream, + FIELD_GET(MT_RATE_INDEX_VHT_NSS, idx) + 1); break; + } default: return -EINVAL; } @@ -544,8 +555,11 @@ mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate) return 0; } -void mt76x02_mac_setaddr(struct mt76x02_dev *dev, u8 *addr) +void mt76x02_mac_setaddr(struct mt76x02_dev *dev, const u8 *addr) { + static const u8 null_addr[ETH_ALEN] = {}; + int i; + ether_addr_copy(dev->mt76.macaddr, addr); if (!is_valid_ether_addr(dev->mt76.macaddr)) { @@ -559,6 +573,16 @@ void mt76x02_mac_setaddr(struct mt76x02_dev *dev, u8 *addr) mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(dev->mt76.macaddr + 4) | FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff)); + + mt76_wr(dev, MT_MAC_BSSID_DW0, + get_unaligned_le32(dev->mt76.macaddr)); + mt76_wr(dev, MT_MAC_BSSID_DW1, + get_unaligned_le16(dev->mt76.macaddr + 4) | + FIELD_PREP(MT_MAC_BSSID_DW1_MBSS_MODE, 3) | /* 8 APs + 8 STAs */ + MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT); + + for (i = 0; i < 16; i++) + mt76x02_mac_set_bssid(dev, i, null_addr); } EXPORT_SYMBOL_GPL(mt76x02_mac_setaddr); @@ -584,7 +608,7 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb, u16 rate = le16_to_cpu(rxwi->rate); u16 tid_sn = le16_to_cpu(rxwi->tid_sn); bool unicast = rxwi->rxinfo & cpu_to_le32(MT_RXINFO_UNICAST); - int i, pad_len = 0, nstreams = dev->mt76.chainmask & 0xf; + int pad_len = 0, nstreams = dev->mt76.chainmask & 0xf; s8 signal; u8 pn_len; u8 wcid; @@ -644,12 +668,13 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb, status->chains = BIT(0); signal = mt76x02_mac_get_rssi(dev, rxwi->rssi[0], 0); - for (i = 1; i < nstreams; i++) { - status->chains |= BIT(i); - status->chain_signal[i] = mt76x02_mac_get_rssi(dev, - rxwi->rssi[i], - i); - signal = max_t(s8, signal, status->chain_signal[i]); + status->chain_signal[0] = signal; + if (nstreams > 1) { + status->chains |= BIT(1); + status->chain_signal[1] = mt76x02_mac_get_rssi(dev, + rxwi->rssi[1], + 1); + signal = max_t(s8, signal, status->chain_signal[1]); } status->signal = signal; status->freq = dev->mt76.chandef.chan->center_freq; @@ -658,12 +683,7 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb, status->tid = FIELD_GET(MT_RXWI_TID, tid_sn); status->seqno = FIELD_GET(MT_RXWI_SN, tid_sn); - if (sta) { - ewma_signal_add(&sta->rssi, status->signal); - sta->inactive_count = 0; - } - - return mt76x02_mac_process_rate(status, rate); + return mt76x02_mac_process_rate(dev, status, rate); } void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq) @@ -715,7 +735,7 @@ void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q, } EXPORT_SYMBOL_GPL(mt76x02_tx_complete_skb); -void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, u32 val) +void mt76x02_mac_set_rts_thresh(struct mt76x02_dev *dev, u32 val) { u32 data = 0; @@ -729,20 +749,89 @@ void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, u32 val) MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); mt76_rmw(dev, MT_OFDM_PROT_CFG, MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); - mt76_rmw(dev, MT_MM20_PROT_CFG, - MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); - mt76_rmw(dev, MT_MM40_PROT_CFG, - MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); - mt76_rmw(dev, MT_GF20_PROT_CFG, - MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); - mt76_rmw(dev, MT_GF40_PROT_CFG, - MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); - mt76_rmw(dev, MT_TX_PROT_CFG6, - MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); - mt76_rmw(dev, MT_TX_PROT_CFG7, - MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); - mt76_rmw(dev, MT_TX_PROT_CFG8, - MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); +} + +void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, bool legacy_prot, + int ht_mode) +{ + int mode = ht_mode & IEEE80211_HT_OP_MODE_PROTECTION; + bool non_gf = !!(ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); + u32 prot[6]; + u32 vht_prot[3]; + int i; + u16 rts_thr; + + for (i = 0; i < ARRAY_SIZE(prot); i++) { + prot[i] = mt76_rr(dev, MT_CCK_PROT_CFG + i * 4); + prot[i] &= ~MT_PROT_CFG_CTRL; + if (i >= 2) + prot[i] &= ~MT_PROT_CFG_RATE; + } + + for (i = 0; i < ARRAY_SIZE(vht_prot); i++) { + vht_prot[i] = mt76_rr(dev, MT_TX_PROT_CFG6 + i * 4); + vht_prot[i] &= ~(MT_PROT_CFG_CTRL | MT_PROT_CFG_RATE); + } + + rts_thr = mt76_get_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH); + + if (rts_thr != 0xffff) + prot[0] |= MT_PROT_CTRL_RTS_CTS; + + if (legacy_prot) { + prot[1] |= MT_PROT_CTRL_CTS2SELF; + + prot[2] |= MT_PROT_RATE_CCK_11; + prot[3] |= MT_PROT_RATE_CCK_11; + prot[4] |= MT_PROT_RATE_CCK_11; + prot[5] |= MT_PROT_RATE_CCK_11; + + vht_prot[0] |= MT_PROT_RATE_CCK_11; + vht_prot[1] |= MT_PROT_RATE_CCK_11; + vht_prot[2] |= MT_PROT_RATE_CCK_11; + } else { + if (rts_thr != 0xffff) + prot[1] |= MT_PROT_CTRL_RTS_CTS; + + prot[2] |= MT_PROT_RATE_OFDM_24; + prot[3] |= MT_PROT_RATE_DUP_OFDM_24; + prot[4] |= MT_PROT_RATE_OFDM_24; + prot[5] |= MT_PROT_RATE_DUP_OFDM_24; + + vht_prot[0] |= MT_PROT_RATE_OFDM_24; + vht_prot[1] |= MT_PROT_RATE_DUP_OFDM_24; + vht_prot[2] |= MT_PROT_RATE_SGI_OFDM_24; + } + + switch (mode) { + case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER: + case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED: + prot[2] |= MT_PROT_CTRL_RTS_CTS; + prot[3] |= MT_PROT_CTRL_RTS_CTS; + prot[4] |= MT_PROT_CTRL_RTS_CTS; + prot[5] |= MT_PROT_CTRL_RTS_CTS; + vht_prot[0] |= MT_PROT_CTRL_RTS_CTS; + vht_prot[1] |= MT_PROT_CTRL_RTS_CTS; + vht_prot[2] |= MT_PROT_CTRL_RTS_CTS; + break; + case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ: + prot[3] |= MT_PROT_CTRL_RTS_CTS; + prot[5] |= MT_PROT_CTRL_RTS_CTS; + vht_prot[1] |= MT_PROT_CTRL_RTS_CTS; + vht_prot[2] |= MT_PROT_CTRL_RTS_CTS; + break; + } + + if (non_gf) { + prot[4] |= MT_PROT_CTRL_RTS_CTS; + prot[5] |= MT_PROT_CTRL_RTS_CTS; + } + + for (i = 0; i < ARRAY_SIZE(prot); i++) + mt76_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]); + + for (i = 0; i < ARRAY_SIZE(vht_prot); i++) + mt76_wr(dev, MT_TX_PROT_CFG6 + i * 4, vht_prot[i]); } void mt76x02_update_channel(struct mt76_dev *mdev) @@ -774,8 +863,100 @@ static void mt76x02_check_mac_err(struct mt76x02_dev *dev) mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR); udelay(10); - mt76_clear(dev, MT_MAC_SYS_CTRL, - MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX); + mt76_wr(dev, MT_MAC_SYS_CTRL, + MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX); +} + +static void +mt76x02_edcca_tx_enable(struct mt76x02_dev *dev, bool enable) +{ + if (enable) { + u32 data; + + mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX); + mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_EN); + /* enable pa-lna */ + data = mt76_rr(dev, MT_TX_PIN_CFG); + data |= MT_TX_PIN_CFG_TXANT | + MT_TX_PIN_CFG_RXANT | + MT_TX_PIN_RFTR_EN | + MT_TX_PIN_TRSW_EN; + mt76_wr(dev, MT_TX_PIN_CFG, data); + } else { + mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX); + mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_EN); + /* disable pa-lna */ + mt76_clear(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_TXANT); + mt76_clear(dev, MT_TX_PIN_CFG, MT_TX_PIN_CFG_RXANT); + } + dev->ed_tx_blocked = !enable; +} + +void mt76x02_edcca_init(struct mt76x02_dev *dev, bool enable) +{ + dev->ed_trigger = 0; + dev->ed_silent = 0; + + if (dev->ed_monitor && enable) { + struct ieee80211_channel *chan = dev->mt76.chandef.chan; + u8 ed_th = chan->band == NL80211_BAND_5GHZ ? 0x0e : 0x20; + + mt76_clear(dev, MT_TX_LINK_CFG, MT_TX_CFACK_EN); + mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN); + mt76_rmw(dev, MT_BBP(AGC, 2), GENMASK(15, 0), + ed_th << 8 | ed_th); + mt76_set(dev, MT_TXOP_HLDR_ET, MT_TXOP_HLDR_TX40M_BLK_EN); + } else { + mt76_set(dev, MT_TX_LINK_CFG, MT_TX_CFACK_EN); + mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN); + if (is_mt76x2(dev)) { + mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070); + mt76_set(dev, MT_TXOP_HLDR_ET, + MT_TXOP_HLDR_TX40M_BLK_EN); + } else { + mt76_wr(dev, MT_BBP(AGC, 2), 0x003a6464); + mt76_clear(dev, MT_TXOP_HLDR_ET, + MT_TXOP_HLDR_TX40M_BLK_EN); + } + } + mt76x02_edcca_tx_enable(dev, true); + + /* clear previous CCA timer value */ + mt76_rr(dev, MT_ED_CCA_TIMER); + dev->ed_time = ktime_get_boottime(); +} +EXPORT_SYMBOL_GPL(mt76x02_edcca_init); + +#define MT_EDCCA_TH 92 +#define MT_EDCCA_BLOCK_TH 2 +static void mt76x02_edcca_check(struct mt76x02_dev *dev) +{ + ktime_t cur_time; + u32 active, val, busy; + + cur_time = ktime_get_boottime(); + val = mt76_rr(dev, MT_ED_CCA_TIMER); + + active = ktime_to_us(ktime_sub(cur_time, dev->ed_time)); + dev->ed_time = cur_time; + + busy = (val * 100) / active; + busy = min_t(u32, busy, 100); + + if (busy > MT_EDCCA_TH) { + dev->ed_trigger++; + dev->ed_silent = 0; + } else { + dev->ed_silent++; + dev->ed_trigger = 0; + } + + if (dev->ed_trigger > MT_EDCCA_BLOCK_TH && + !dev->ed_tx_blocked) + mt76x02_edcca_tx_enable(dev, false); + else if (dev->ed_silent > MT_EDCCA_BLOCK_TH && + dev->ed_tx_blocked) + mt76x02_edcca_tx_enable(dev, true); } void mt76x02_mac_work(struct work_struct *work) @@ -784,6 +965,8 @@ void mt76x02_mac_work(struct work_struct *work) mac_work.work); int i, idx; + mutex_lock(&dev->mt76.mutex); + mt76x02_update_channel(&dev->mt76); for (i = 0, idx = 0; i < 16; i++) { u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i)); @@ -792,14 +975,18 @@ void mt76x02_mac_work(struct work_struct *work) dev->aggr_stats[idx++] += val >> 16; } - /* XXX: check beacon stuck for ap mode */ if (!dev->beacon_mask) mt76x02_check_mac_err(dev); + if (dev->ed_monitor) + mt76x02_edcca_check(dev); + + mutex_unlock(&dev->mt76.mutex); + mt76_tx_status_check(&dev->mt76, NULL, false); ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work, - MT_CALIBRATE_INTERVAL); + MT_MAC_WORK_INTERVAL); } void mt76x02_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr) @@ -891,8 +1078,9 @@ int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx, return 0; } -void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev, - u8 vif_idx, bool val) +static void +__mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev, u8 vif_idx, + bool val, struct sk_buff *skb) { u8 old_mask = dev->beacon_mask; bool en; @@ -900,6 +1088,8 @@ void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev, if (val) { dev->beacon_mask |= BIT(vif_idx); + if (skb) + mt76x02_mac_set_beacon(dev, vif_idx, skb); } else { dev->beacon_mask &= ~BIT(vif_idx); mt76x02_mac_set_beacon(dev, vif_idx, NULL); @@ -910,14 +1100,37 @@ void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev, en = dev->beacon_mask; - mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en); reg = MT_BEACON_TIME_CFG_BEACON_TX | MT_BEACON_TIME_CFG_TBTT_EN | MT_BEACON_TIME_CFG_TIMER_EN; mt76_rmw(dev, MT_BEACON_TIME_CFG, reg, reg * en); + if (mt76_is_usb(dev)) + return; + + mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en); if (en) mt76x02_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT); else mt76x02_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT); } + +void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev, + struct ieee80211_vif *vif, bool val) +{ + u8 vif_idx = ((struct mt76x02_vif *)vif->drv_priv)->idx; + struct sk_buff *skb = NULL; + + if (mt76_is_mmio(dev)) + tasklet_disable(&dev->pre_tbtt_tasklet); + else if (val) + skb = ieee80211_beacon_get(mt76_hw(dev), vif); + + if (!dev->beacon_mask) + dev->tbtt_count = 0; + + __mt76x02_mac_set_beacon_enable(dev, vif_idx, val, skb); + + if (mt76_is_mmio(dev)) + tasklet_enable(&dev->pre_tbtt_tasklet); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h index 4e597004c445..6b1f25d2f64c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h @@ -18,8 +18,6 @@ #ifndef __MT76X02_MAC_H #define __MT76X02_MAC_H -#include - struct mt76x02_dev; struct mt76x02_tx_status { @@ -41,8 +39,6 @@ struct mt76x02_vif { u8 idx; }; -DECLARE_EWMA(signal, 10, 8); - struct mt76x02_sta { struct mt76_wcid wcid; /* must be first */ @@ -50,8 +46,6 @@ struct mt76x02_sta { struct mt76x02_tx_status status; int n_frames; - struct ewma_signal rssi; - int inactive_count; }; #define MT_RXINFO_BA BIT(0) @@ -194,8 +188,10 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev, struct mt76x02_tx_status *stat, u8 *update); int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb, void *rxi); -void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, u32 val); -void mt76x02_mac_setaddr(struct mt76x02_dev *dev, u8 *addr); +void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, bool legacy_prot, + int ht_mode); +void mt76x02_mac_set_rts_thresh(struct mt76x02_dev *dev, u32 val); +void mt76x02_mac_setaddr(struct mt76x02_dev *dev, const u8 *addr); void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi, struct sk_buff *skb, struct mt76_wcid *wcid, struct ieee80211_sta *sta, int len); @@ -208,6 +204,8 @@ void mt76x02_mac_work(struct work_struct *work); void mt76x02_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr); int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx, struct sk_buff *skb); -void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev, u8 vif_idx, - bool val); +void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev, + struct ieee80211_vif *vif, bool val); + +void mt76x02_edcca_init(struct mt76x02_dev *dev, bool enable); #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c index b7f4edb729e3..6501b853b65c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c @@ -21,70 +21,13 @@ #include "mt76x02_mcu.h" -static struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len) -{ - struct sk_buff *skb; - - skb = alloc_skb(len, GFP_KERNEL); - if (!skb) - return NULL; - memcpy(skb_put(skb, len), data, len); - - return skb; -} - -static struct sk_buff * -mt76x02_mcu_get_response(struct mt76x02_dev *dev, unsigned long expires) -{ - unsigned long timeout; - - if (!time_is_after_jiffies(expires)) - return NULL; - - timeout = expires - jiffies; - wait_event_timeout(dev->mt76.mmio.mcu.wait, - !skb_queue_empty(&dev->mt76.mmio.mcu.res_q), - timeout); - return skb_dequeue(&dev->mt76.mmio.mcu.res_q); -} - -static int -mt76x02_tx_queue_mcu(struct mt76x02_dev *dev, enum mt76_txq_id qid, - struct sk_buff *skb, int cmd, int seq) -{ - struct mt76_queue *q = &dev->mt76.q_tx[qid]; - struct mt76_queue_buf buf; - dma_addr_t addr; - u32 tx_info; - - tx_info = MT_MCU_MSG_TYPE_CMD | - FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) | - FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) | - FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) | - FIELD_PREP(MT_MCU_MSG_LEN, skb->len); - - addr = dma_map_single(dev->mt76.dev, skb->data, skb->len, - DMA_TO_DEVICE); - if (dma_mapping_error(dev->mt76.dev, addr)) - return -ENOMEM; - - buf.addr = addr; - buf.len = skb->len; - - spin_lock_bh(&q->lock); - mt76_queue_add_buf(dev, q, &buf, 1, tx_info, skb, NULL); - mt76_queue_kick(dev, q); - spin_unlock_bh(&q->lock); - - return 0; -} - int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data, int len, bool wait_resp) { struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); unsigned long expires = jiffies + HZ; struct sk_buff *skb; + u32 tx_info; int ret; u8 seq; @@ -98,7 +41,13 @@ int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data, if (!seq) seq = ++mdev->mmio.mcu.msg_seq & 0xf; - ret = mt76x02_tx_queue_mcu(dev, MT_TXQ_MCU, skb, cmd, seq); + tx_info = MT_MCU_MSG_TYPE_CMD | + FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) | + FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) | + FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) | + FIELD_PREP(MT_MCU_MSG_LEN, skb->len); + + ret = mt76_tx_queue_skb_raw(dev, MT_TXQ_MCU, skb, tx_info); if (ret) goto out; @@ -106,12 +55,13 @@ int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data, u32 *rxfce; bool check_seq = false; - skb = mt76x02_mcu_get_response(dev, expires); + skb = mt76_mcu_get_response(&dev->mt76, expires); if (!skb) { dev_err(mdev->dev, "MCU message %d (seq %d) timed out\n", cmd, seq); ret = -ETIMEDOUT; + dev->mcu_timeout = 1; break; } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h index 7e4004120102..a7b0d3e5df1d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h @@ -96,6 +96,12 @@ struct mt76x02_patch_header { u8 pad[2]; }; +static inline struct sk_buff * +mt76x02_mcu_msg_alloc(const void *data, int len) +{ + return mt76_mcu_msg_alloc(data, 0, len, 0); +} + int mt76x02_mcu_cleanup(struct mt76x02_dev *dev); int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type, u32 param); int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c index 66315410aebe..1229f19f2b02 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c @@ -79,24 +79,24 @@ mt76x02_resync_beacon_timer(struct mt76x02_dev *dev) * Beacon timer drifts by 1us every tick, the timer is configured * in 1/16 TU (64us) units. */ - if (dev->tbtt_count < 62) + if (dev->tbtt_count < 63) return; - if (dev->tbtt_count >= 64) { - dev->tbtt_count = 0; - return; - } - /* * The updated beacon interval takes effect after two TBTT, because * at this point the original interval has already been loaded into * the next TBTT_TIMER value */ - if (dev->tbtt_count == 62) + if (dev->tbtt_count == 63) timer_val -= 1; mt76_rmw_field(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_INTVAL, timer_val); + + if (dev->tbtt_count >= 64) { + dev->tbtt_count = 0; + return; + } } static void mt76x02_pre_tbtt_tasklet(unsigned long arg) @@ -116,14 +116,20 @@ static void mt76x02_pre_tbtt_tasklet(unsigned long arg) IEEE80211_IFACE_ITER_RESUME_ALL, mt76x02_update_beacon_iter, dev); + mt76_csa_check(&dev->mt76); + + if (dev->mt76.csa_complete) + return; + do { nframes = skb_queue_len(&data.q); ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), IEEE80211_IFACE_ITER_RESUME_ALL, mt76x02_add_buffered_bc, &data); - } while (nframes != skb_queue_len(&data.q)); + } while (nframes != skb_queue_len(&data.q) && + skb_queue_len(&data.q) < 8); - if (!nframes) + if (!skb_queue_len(&data.q)) return; for (i = 0; i < ARRAY_SIZE(data.tail); i++) { @@ -308,8 +314,12 @@ irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance) tasklet_schedule(&dev->pre_tbtt_tasklet); /* send buffered multicast frames now */ - if (intr & MT_INT_TBTT) - mt76_queue_kick(dev, &dev->mt76.q_tx[MT_TXQ_PSD]); + if (intr & MT_INT_TBTT) { + if (dev->mt76.csa_complete) + mt76_csa_finish(&dev->mt76); + else + mt76_queue_kick(dev, &dev->mt76.q_tx[MT_TXQ_PSD]); + } if (intr & MT_INT_TX_STAT) { mt76x02_mac_poll_tx_status(dev, true); @@ -384,3 +394,137 @@ void mt76x02_mac_start(struct mt76x02_dev *dev) MT_INT_TX_STAT); } EXPORT_SYMBOL_GPL(mt76x02_mac_start); + +static bool mt76x02_tx_hang(struct mt76x02_dev *dev) +{ + u32 dma_idx, prev_dma_idx; + struct mt76_queue *q; + int i; + + for (i = 0; i < 4; i++) { + q = &dev->mt76.q_tx[i]; + + if (!q->queued) + continue; + + prev_dma_idx = dev->mt76.tx_dma_idx[i]; + dma_idx = ioread32(&q->regs->dma_idx); + dev->mt76.tx_dma_idx[i] = dma_idx; + + if (prev_dma_idx == dma_idx) + break; + } + + return i < 4; +} + +static void mt76x02_watchdog_reset(struct mt76x02_dev *dev) +{ + u32 mask = dev->mt76.mmio.irqmask; + int i; + + ieee80211_stop_queues(dev->mt76.hw); + set_bit(MT76_RESET, &dev->mt76.state); + + tasklet_disable(&dev->pre_tbtt_tasklet); + tasklet_disable(&dev->tx_tasklet); + + for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++) + napi_disable(&dev->mt76.napi[i]); + + mutex_lock(&dev->mt76.mutex); + + if (dev->beacon_mask) + mt76_clear(dev, MT_BEACON_TIME_CFG, + MT_BEACON_TIME_CFG_BEACON_TX | + MT_BEACON_TIME_CFG_TBTT_EN); + + mt76x02_irq_disable(dev, mask); + + /* perform device reset */ + mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN); + mt76_wr(dev, MT_MAC_SYS_CTRL, 0); + mt76_clear(dev, MT_WPDMA_GLO_CFG, + MT_WPDMA_GLO_CFG_TX_DMA_EN | MT_WPDMA_GLO_CFG_RX_DMA_EN); + usleep_range(5000, 10000); + mt76_wr(dev, MT_INT_SOURCE_CSR, 0xffffffff); + + /* let fw reset DMA */ + mt76_set(dev, 0x734, 0x3); + + for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++) + mt76_queue_tx_cleanup(dev, i, true); + + for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++) + mt76_queue_rx_reset(dev, i); + + mt76_wr(dev, MT_MAC_SYS_CTRL, + MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX); + mt76_set(dev, MT_WPDMA_GLO_CFG, + MT_WPDMA_GLO_CFG_TX_DMA_EN | MT_WPDMA_GLO_CFG_RX_DMA_EN); + if (dev->ed_monitor) + mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN); + + if (dev->beacon_mask) + mt76_set(dev, MT_BEACON_TIME_CFG, + MT_BEACON_TIME_CFG_BEACON_TX | + MT_BEACON_TIME_CFG_TBTT_EN); + + mt76x02_irq_enable(dev, mask); + + mutex_unlock(&dev->mt76.mutex); + + clear_bit(MT76_RESET, &dev->mt76.state); + + tasklet_enable(&dev->tx_tasklet); + tasklet_schedule(&dev->tx_tasklet); + + tasklet_enable(&dev->pre_tbtt_tasklet); + + for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++) { + napi_enable(&dev->mt76.napi[i]); + napi_schedule(&dev->mt76.napi[i]); + } + + ieee80211_wake_queues(dev->mt76.hw); + + mt76_txq_schedule_all(&dev->mt76); +} + +static void mt76x02_check_tx_hang(struct mt76x02_dev *dev) +{ + if (mt76x02_tx_hang(dev)) { + if (++dev->tx_hang_check >= MT_TX_HANG_TH) + goto restart; + } else { + dev->tx_hang_check = 0; + } + + if (dev->mcu_timeout) + goto restart; + + return; + +restart: + mt76x02_watchdog_reset(dev); + + mutex_lock(&dev->mt76.mmio.mcu.mutex); + dev->mcu_timeout = 0; + mutex_unlock(&dev->mt76.mmio.mcu.mutex); + + dev->tx_hang_reset++; + dev->tx_hang_check = 0; + memset(dev->mt76.tx_dma_idx, 0xff, + sizeof(dev->mt76.tx_dma_idx)); +} + +void mt76x02_wdt_work(struct work_struct *work) +{ + struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev, + wdt_work.work); + + mt76x02_check_tx_hang(dev); + + ieee80211_queue_delayed_work(mt76_hw(dev), &dev->wdt_work, + MT_WATCHDOG_TIME); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c index 977a8e7e26df..a020c757ba5c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c @@ -132,53 +132,6 @@ void mt76x02_phy_set_txpower(struct mt76x02_dev *dev, int txp_0, int txp_1) } EXPORT_SYMBOL_GPL(mt76x02_phy_set_txpower); -int mt76x02_phy_get_min_avg_rssi(struct mt76x02_dev *dev) -{ - struct mt76x02_sta *sta; - struct mt76_wcid *wcid; - int i, j, min_rssi = 0; - s8 cur_rssi; - - local_bh_disable(); - rcu_read_lock(); - - for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid_mask); i++) { - unsigned long mask = dev->mt76.wcid_mask[i]; - - if (!mask) - continue; - - for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) { - if (!(mask & 1)) - continue; - - wcid = rcu_dereference(dev->mt76.wcid[j]); - if (!wcid) - continue; - - sta = container_of(wcid, struct mt76x02_sta, wcid); - spin_lock(&dev->mt76.rx_lock); - if (sta->inactive_count++ < 5) - cur_rssi = ewma_signal_read(&sta->rssi); - else - cur_rssi = 0; - spin_unlock(&dev->mt76.rx_lock); - - if (cur_rssi < min_rssi) - min_rssi = cur_rssi; - } - } - - rcu_read_unlock(); - local_bh_enable(); - - if (!min_rssi) - return -75; - - return min_rssi; -} -EXPORT_SYMBOL_GPL(mt76x02_phy_get_min_avg_rssi); - void mt76x02_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl) { int core_val, agc_val; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h index 2b316cf7c70c..d2971db06f13 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.h @@ -51,7 +51,6 @@ void mt76x02_limit_rate_power(struct mt76_rate_power *r, int limit); int mt76x02_get_max_rate_power(struct mt76_rate_power *r); void mt76x02_phy_set_rxpath(struct mt76x02_dev *dev); void mt76x02_phy_set_txdac(struct mt76x02_dev *dev); -int mt76x02_phy_get_min_avg_rssi(struct mt76x02_dev *dev); void mt76x02_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl); void mt76x02_phy_set_band(struct mt76x02_dev *dev, int band, bool primary_upper); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h index f7de77d09d28..7401cb94fb72 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h @@ -230,6 +230,29 @@ #define MT_COM_REG2 0x0738 #define MT_COM_REG3 0x073C +#define MT_LED_CTRL 0x0770 +#define MT_LED_CTRL_REPLAY(_n) BIT(0 + (8 * (_n))) +#define MT_LED_CTRL_POLARITY(_n) BIT(1 + (8 * (_n))) +#define MT_LED_CTRL_TX_BLINK_MODE(_n) BIT(2 + (8 * (_n))) +#define MT_LED_CTRL_KICK(_n) BIT(7 + (8 * (_n))) + +#define MT_LED_TX_BLINK_0 0x0774 +#define MT_LED_TX_BLINK_1 0x0778 + +#define MT_LED_S0_BASE 0x077C +#define MT_LED_S0(_n) (MT_LED_S0_BASE + 8 * (_n)) +#define MT_LED_S1_BASE 0x0780 +#define MT_LED_S1(_n) (MT_LED_S1_BASE + 8 * (_n)) +#define MT_LED_STATUS_OFF_MASK GENMASK(31, 24) +#define MT_LED_STATUS_OFF(_v) (((_v) << __ffs(MT_LED_STATUS_OFF_MASK)) & \ + MT_LED_STATUS_OFF_MASK) +#define MT_LED_STATUS_ON_MASK GENMASK(23, 16) +#define MT_LED_STATUS_ON(_v) (((_v) << __ffs(MT_LED_STATUS_ON_MASK)) & \ + MT_LED_STATUS_ON_MASK) +#define MT_LED_STATUS_DURATION_MASK GENMASK(15, 8) +#define MT_LED_STATUS_DURATION(_v) (((_v) << __ffs(MT_LED_STATUS_DURATION_MASK)) & \ + MT_LED_STATUS_DURATION_MASK) + #define MT_FCE_PSE_CTRL 0x0800 #define MT_FCE_PARAMETERS 0x0804 #define MT_FCE_CSO 0x0808 @@ -318,6 +341,7 @@ #define MT_CH_TIME_CFG_NAV_AS_BUSY BIT(3) #define MT_CH_TIME_CFG_EIFS_AS_BUSY BIT(4) #define MT_CH_TIME_CFG_MDRDY_CNT_EN BIT(5) +#define MT_CH_CCA_RC_EN BIT(6) #define MT_CH_TIME_CFG_CH_TIMER_CLR GENMASK(9, 8) #define MT_CH_TIME_CFG_MDRDY_CLR GENMASK(11, 10) @@ -378,6 +402,9 @@ #define MT_TX_PWR_CFG_4 0x1324 #define MT_TX_PIN_CFG 0x1328 #define MT_TX_PIN_CFG_TXANT GENMASK(3, 0) +#define MT_TX_PIN_CFG_RXANT GENMASK(11, 8) +#define MT_TX_PIN_RFTR_EN BIT(16) +#define MT_TX_PIN_TRSW_EN BIT(18) #define MT_TX_BAND_CFG 0x132c #define MT_TX_BAND_CFG_UPPER_40M BIT(0) @@ -398,6 +425,7 @@ #define MT_TXOP_CTRL_CFG 0x1340 #define MT_TXOP_TRUN_EN GENMASK(5, 0) #define MT_TXOP_EXT_CCA_DLY GENMASK(15, 8) +#define MT_TXOP_ED_CCA_EN BIT(20) #define MT_TX_RTS_CFG 0x1344 #define MT_TX_RTS_CFG_RETRY_LIMIT GENMASK(7, 0) @@ -409,6 +437,7 @@ #define MT_TX_RETRY_CFG 0x134c #define MT_TX_LINK_CFG 0x1350 +#define MT_TX_CFACK_EN BIT(12) #define MT_VHT_HT_FBK_CFG0 0x1354 #define MT_VHT_HT_FBK_CFG1 0x1358 #define MT_LG_FBK_CFG0 0x135c @@ -440,9 +469,10 @@ #define MT_PROT_TXOP_ALLOW_GF40 BIT(25) #define MT_PROT_RTS_THR_EN BIT(26) #define MT_PROT_RATE_CCK_11 0x0003 -#define MT_PROT_RATE_OFDM_6 0x4000 -#define MT_PROT_RATE_OFDM_24 0x4004 -#define MT_PROT_RATE_DUP_OFDM_24 0x4084 +#define MT_PROT_RATE_OFDM_6 0x2000 +#define MT_PROT_RATE_OFDM_24 0x2004 +#define MT_PROT_RATE_DUP_OFDM_24 0x2084 +#define MT_PROT_RATE_SGI_OFDM_24 0x2104 #define MT_PROT_TXOP_ALLOW_ALL GENMASK(25, 20) #define MT_PROT_TXOP_ALLOW_BW20 (MT_PROT_TXOP_ALLOW_ALL & \ ~MT_PROT_TXOP_ALLOW_MM40 & \ @@ -511,6 +541,7 @@ #define MT_RX_FILTR_CFG_CTRL_RSV BIT(16) #define MT_AUTO_RSP_CFG 0x1404 +#define MT_AUTO_RSP_EN BIT(0) #define MT_AUTO_RSP_PREAMB_SHORT BIT(4) #define MT_LEGACY_BASIC_RATE 0x1408 #define MT_HT_BASIC_RATE 0x140c @@ -532,6 +563,7 @@ #define MT_PN_PAD_MODE 0x150c #define MT_TXOP_HLDR_ET 0x1608 +#define MT_TXOP_HLDR_TX40M_BLK_EN BIT(1) #define MT_PROT_AUTO_TX_CFG 0x1648 #define MT_PROT_AUTO_TX_CFG_PROT_PADJ GENMASK(11, 8) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c index 4598cb2cc3ff..94f47248c59f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c @@ -22,7 +22,6 @@ void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb) { - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct mt76x02_dev *dev = hw->priv; struct ieee80211_vif *vif = info->control.vif; @@ -33,13 +32,7 @@ void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, msta = (struct mt76x02_sta *)control->sta->drv_priv; wcid = &msta->wcid; - /* sw encrypted frames */ - if (!info->control.hw_key && wcid->hw_key_idx != 0xff && - ieee80211_has_protected(hdr->frame_control)) - control->sta = NULL; - } - - if (vif && !control->sta) { + } else if (vif) { struct mt76x02_vif *mvif; mvif = (struct mt76x02_vif *)vif->drv_priv; @@ -58,8 +51,7 @@ void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, if (q == MT_RXQ_MCU) { /* this is used just by mmio code */ - skb_queue_tail(&mdev->mmio.mcu.res_q, skb); - wake_up(&mdev->mmio.mcu.wait); + mt76_mcu_rx_event(&dev->mt76, skb); return; } @@ -177,7 +169,7 @@ int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, if (ret < 0) return ret; - if (pid && pid != MT_PACKET_ID_NO_ACK) + if (pid >= MT_PACKET_ID_FIRST) qsel = MT_QSEL_MGMT; *tx_info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) | diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c index 81970cf777c0..43f07461c8d3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c @@ -49,7 +49,12 @@ int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags) FIELD_PREP(MT_TXD_INFO_DPORT, port) | flags; put_unaligned_le32(info, skb_push(skb, sizeof(info))); + /* Add zero pad of 4 - 7 bytes */ pad = round_up(skb->len, 4) + 4 - skb->len; + + /* First packet of a A-MSDU burst keeps track of the whole burst + * length, need to update lenght of it and the last packet. + */ skb_walk_frags(skb, iter) { last = iter; if (!iter->next) { @@ -59,11 +64,10 @@ int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags) } } - if (unlikely(pad)) { - if (skb_pad(last, pad)) - return -ENOMEM; - __skb_put(last, pad); - } + if (skb_pad(last, pad)) + return -ENOMEM; + __skb_put(last, pad); + return 0; } @@ -87,8 +91,7 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data, pid = mt76_tx_status_skb_add(mdev, wcid, skb); txwi->pktid = pid; - if ((pid && pid != MT_PACKET_ID_NO_ACK) || - q2ep(q->hw_idx) == MT_EP_OUT_HCCA) + if (pid >= MT_PACKET_ID_FIRST || q2ep(q->hw_idx) == MT_EP_OUT_HCCA) qsel = MT_QSEL_MGMT; else qsel = MT_QSEL_EDCA; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c index 6db789f90269..0cb8751321a1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c @@ -28,21 +28,6 @@ #define MT_TX_CPU_FROM_FCE_CPU_DESC_IDX 0x09a8 -static struct sk_buff * -mt76x02u_mcu_msg_alloc(const void *data, int len) -{ - struct sk_buff *skb; - - skb = alloc_skb(MT_CMD_HDR_LEN + len + 8, GFP_KERNEL); - if (!skb) - return NULL; - - skb_reserve(skb, MT_CMD_HDR_LEN); - skb_put_data(skb, data, len); - - return skb; -} - static void mt76x02u_multiple_mcu_reads(struct mt76_dev *dev, u8 *data, int len) { @@ -76,34 +61,21 @@ mt76x02u_multiple_mcu_reads(struct mt76_dev *dev, u8 *data, int len) static int mt76x02u_mcu_wait_resp(struct mt76_dev *dev, u8 seq) { struct mt76_usb *usb = &dev->usb; - struct mt76u_buf *buf = &usb->mcu.res; - struct urb *urb = buf->urb; - int i, ret; + u8 *data = usb->mcu.data; + int i, len, ret; u32 rxfce; - u8 *data; for (i = 0; i < 5; i++) { - if (!wait_for_completion_timeout(&usb->mcu.cmpl, - msecs_to_jiffies(300))) + ret = mt76u_bulk_msg(dev, data, MCU_RESP_URB_SIZE, &len, 300); + if (ret == -ETIMEDOUT) continue; + if (ret) + goto out; - if (urb->status) - return -EIO; - - data = sg_virt(&urb->sg[0]); if (usb->mcu.rp) - mt76x02u_multiple_mcu_reads(dev, data + 4, - urb->actual_length - 8); + mt76x02u_multiple_mcu_reads(dev, data + 4, len - 8); rxfce = get_unaligned_le32(data); - ret = mt76u_submit_buf(dev, USB_DIR_IN, - MT_EP_IN_CMD_RESP, - buf, GFP_KERNEL, - mt76u_mcu_complete_urb, - &usb->mcu.cmpl); - if (ret) - return ret; - if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce) && FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce) == EVT_CMD_DONE) return 0; @@ -112,27 +84,23 @@ static int mt76x02u_mcu_wait_resp(struct mt76_dev *dev, u8 seq) FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce), seq, FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce)); } - - dev_err(dev->dev, "error: %s timed out\n", __func__); - return -ETIMEDOUT; +out: + dev_err(dev->dev, "error: %s failed with %d\n", __func__, ret); + return ret; } static int __mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb, int cmd, bool wait_resp) { - struct usb_interface *intf = to_usb_interface(dev->dev); - struct usb_device *udev = interface_to_usbdev(intf); struct mt76_usb *usb = &dev->usb; - unsigned int pipe; - int ret, sent; + int ret; u8 seq = 0; u32 info; if (test_bit(MT76_REMOVED, &dev->state)) return 0; - pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]); if (wait_resp) { seq = ++usb->mcu.msg_seq & 0xf; if (!seq) @@ -146,7 +114,7 @@ __mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb, if (ret) return ret; - ret = usb_bulk_msg(udev, pipe, skb->data, skb->len, &sent, 500); + ret = mt76u_bulk_msg(dev, skb->data, skb->len, NULL, 500); if (ret) return ret; @@ -166,7 +134,7 @@ mt76x02u_mcu_send_msg(struct mt76_dev *dev, int cmd, const void *data, struct sk_buff *skb; int err; - skb = mt76x02u_mcu_msg_alloc(data, len); + skb = mt76_mcu_msg_alloc(data, MT_CMD_HDR_LEN, len, 8); if (!skb) return -ENOMEM; @@ -268,14 +236,12 @@ void mt76x02u_mcu_fw_reset(struct mt76x02_dev *dev) EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_reset); static int -__mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, struct mt76u_buf *buf, +__mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, u8 *data, const void *fw_data, int len, u32 dst_addr) { - u8 *data = sg_virt(&buf->urb->sg[0]); - DECLARE_COMPLETION_ONSTACK(cmpl); __le32 info; u32 val; - int err; + int err, data_len; info = cpu_to_le32(FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) | FIELD_PREP(MT_MCU_MSG_LEN, len) | @@ -291,25 +257,12 @@ __mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, struct mt76u_buf *buf, mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE, MT_FCE_DMA_LEN, len << 16); - buf->len = MT_CMD_HDR_LEN + len + sizeof(info); - err = mt76u_submit_buf(&dev->mt76, USB_DIR_OUT, - MT_EP_OUT_INBAND_CMD, - buf, GFP_KERNEL, - mt76u_mcu_complete_urb, &cmpl); - if (err < 0) - return err; - - if (!wait_for_completion_timeout(&cmpl, - msecs_to_jiffies(1000))) { - dev_err(dev->mt76.dev, "firmware upload timed out\n"); - usb_kill_urb(buf->urb); - return -ETIMEDOUT; - } + data_len = MT_CMD_HDR_LEN + len + sizeof(info); - if (mt76u_urb_error(buf->urb)) { - dev_err(dev->mt76.dev, "firmware upload failed: %d\n", - buf->urb->status); - return buf->urb->status; + err = mt76u_bulk_msg(&dev->mt76, data, data_len, NULL, 1000); + if (err) { + dev_err(dev->mt76.dev, "firmware upload failed: %d\n", err); + return err; } val = mt76_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX); @@ -322,17 +275,16 @@ __mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, struct mt76u_buf *buf, int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data, int data_len, u32 max_payload, u32 offset) { - int err, len, pos = 0, max_len = max_payload - 8; - struct mt76u_buf buf; + int len, err = 0, pos = 0, max_len = max_payload - 8; + u8 *buf; - err = mt76u_buf_alloc(&dev->mt76, &buf, 1, max_payload, max_payload, - GFP_KERNEL); - if (err < 0) - return err; + buf = kmalloc(max_payload, GFP_KERNEL); + if (!buf) + return -ENOMEM; while (data_len > 0) { len = min_t(int, data_len, max_len); - err = __mt76x02u_mcu_fw_send_data(dev, &buf, data + pos, + err = __mt76x02u_mcu_fw_send_data(dev, buf, data + pos, len, offset + pos); if (err < 0) break; @@ -341,7 +293,7 @@ int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data, pos += len; usleep_range(5000, 10000); } - mt76u_buf_free(&buf); + kfree(buf); return err; } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c index 38bd466cff16..a48c261b0c63 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c @@ -75,6 +75,58 @@ static const struct ieee80211_iface_combination mt76x02_if_comb[] = { } }; +static void +mt76x02_led_set_config(struct mt76_dev *mdev, u8 delay_on, + u8 delay_off) +{ + struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, + mt76); + u32 val; + + val = MT_LED_STATUS_DURATION(0xff) | + MT_LED_STATUS_OFF(delay_off) | + MT_LED_STATUS_ON(delay_on); + + mt76_wr(dev, MT_LED_S0(mdev->led_pin), val); + mt76_wr(dev, MT_LED_S1(mdev->led_pin), val); + + val = MT_LED_CTRL_REPLAY(mdev->led_pin) | + MT_LED_CTRL_KICK(mdev->led_pin); + if (mdev->led_al) + val |= MT_LED_CTRL_POLARITY(mdev->led_pin); + mt76_wr(dev, MT_LED_CTRL, val); +} + +static int +mt76x02_led_set_blink(struct led_classdev *led_cdev, + unsigned long *delay_on, + unsigned long *delay_off) +{ + struct mt76_dev *mdev = container_of(led_cdev, struct mt76_dev, + led_cdev); + u8 delta_on, delta_off; + + delta_off = max_t(u8, *delay_off / 10, 1); + delta_on = max_t(u8, *delay_on / 10, 1); + + mt76x02_led_set_config(mdev, delta_on, delta_off); + + return 0; +} + +static void +mt76x02_led_set_brightness(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct mt76_dev *mdev = container_of(led_cdev, struct mt76_dev, + led_cdev); + + if (!brightness) + mt76x02_led_set_config(mdev, 0, 0xff); + else + mt76x02_led_set_config(mdev, 0xff, 0); +} + void mt76x02_init_device(struct mt76x02_dev *dev) { struct ieee80211_hw *hw = mt76_hw(dev); @@ -88,27 +140,37 @@ void mt76x02_init_device(struct mt76x02_dev *dev) hw->max_rate_tries = 1; hw->extra_tx_headroom = 2; + wiphy->interface_modes = + BIT(NL80211_IFTYPE_STATION) | +#ifdef CONFIG_MAC80211_MESH + BIT(NL80211_IFTYPE_MESH_POINT) | +#endif + BIT(NL80211_IFTYPE_ADHOC); + if (mt76_is_usb(dev)) { hw->extra_tx_headroom += sizeof(struct mt76x02_txwi) + MT_DMA_HDR_LEN; - wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); } else { + INIT_DELAYED_WORK(&dev->wdt_work, mt76x02_wdt_work); + mt76x02_dfs_init_detector(dev); wiphy->reg_notifier = mt76x02_regd_notifier; wiphy->iface_combinations = mt76x02_if_comb; wiphy->n_iface_combinations = ARRAY_SIZE(mt76x02_if_comb); - wiphy->interface_modes = - BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_AP) | -#ifdef CONFIG_MAC80211_MESH - BIT(NL80211_IFTYPE_MESH_POINT) | -#endif - BIT(NL80211_IFTYPE_ADHOC); - - wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS); + wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP); + wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; + + /* init led callbacks */ + if (IS_ENABLED(CONFIG_MT76_LEDS)) { + dev->mt76.led_cdev.brightness_set = + mt76x02_led_set_brightness; + dev->mt76.led_cdev.blink_set = mt76x02_led_set_blink; + } } + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS); + hw->sta_data_size = sizeof(struct mt76x02_sta); hw->vif_data_size = sizeof(struct mt76x02_vif); @@ -189,8 +251,6 @@ int mt76x02_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, if (vif->type == NL80211_IFTYPE_AP) set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags); - ewma_signal_init(&msta->rssi); - return 0; } EXPORT_SYMBOL_GPL(mt76x02_sta_add); @@ -207,8 +267,9 @@ void mt76x02_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, } EXPORT_SYMBOL_GPL(mt76x02_sta_remove); -void mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif, - unsigned int idx) +static void +mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif, + unsigned int idx) { struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; struct mt76_txq *mtxq; @@ -221,7 +282,6 @@ void mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif, mt76_txq_init(&dev->mt76, vif->txq); } -EXPORT_SYMBOL_GPL(mt76x02_vif_init); int mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) @@ -248,6 +308,15 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) if (vif->type == NL80211_IFTYPE_STATION) idx += 8; + if (dev->vif_mask & BIT(idx)) + return -EBUSY; + + /* Allow to change address in HW if we create first interface. */ + if (!dev->vif_mask && !ether_addr_equal(dev->mt76.macaddr, vif->addr)) + mt76x02_mac_setaddr(dev, vif->addr); + + dev->vif_mask |= BIT(idx); + mt76x02_vif_init(dev, vif, idx); return 0; } @@ -257,8 +326,10 @@ void mt76x02_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct mt76x02_dev *dev = hw->priv; + struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; mt76_txq_remove(&dev->mt76, vif->txq); + dev->vif_mask &= ~BIT(mvif->idx); } EXPORT_SYMBOL_GPL(mt76x02_remove_interface); @@ -360,7 +431,7 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, } else { if (idx == wcid->hw_key_idx) { wcid->hw_key_idx = -1; - wcid->sw_iv = true; + wcid->sw_iv = false; } key = NULL; @@ -463,7 +534,7 @@ int mt76x02_set_rts_threshold(struct ieee80211_hw *hw, u32 val) return -EINVAL; mutex_lock(&dev->mt76.mutex); - mt76x02_mac_set_tx_protection(dev, val); + mt76x02_mac_set_rts_thresh(dev, val); mutex_unlock(&dev->mt76.mutex); return 0; @@ -546,24 +617,6 @@ void mt76x02_sw_scan_complete(struct ieee80211_hw *hw, } EXPORT_SYMBOL_GPL(mt76x02_sw_scan_complete); -int mt76x02_get_txpower(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, int *dbm) -{ - struct mt76x02_dev *dev = hw->priv; - u8 nstreams = dev->mt76.chainmask & 0xf; - - *dbm = dev->mt76.txpower_cur / 2; - - /* convert from per-chain power to combined - * output on 2x2 devices - */ - if (nstreams > 1) - *dbm += 3; - - return 0; -} -EXPORT_SYMBOL_GPL(mt76x02_get_txpower); - void mt76x02_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) { @@ -614,29 +667,26 @@ static void mt76x02_set_beacon_offsets(struct mt76x02_dev *dev) void mt76x02_init_beacon_config(struct mt76x02_dev *dev) { - static const u8 null_addr[ETH_ALEN] = {}; int i; - mt76_wr(dev, MT_MAC_BSSID_DW0, - get_unaligned_le32(dev->mt76.macaddr)); - mt76_wr(dev, MT_MAC_BSSID_DW1, - get_unaligned_le16(dev->mt76.macaddr + 4) | - FIELD_PREP(MT_MAC_BSSID_DW1_MBSS_MODE, 3) | /* 8 beacons */ - MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT); - - /* Fire a pre-TBTT interrupt 8 ms before TBTT */ - mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT, - 8 << 4); - mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER, - MT_DFS_GP_INTERVAL); - mt76_wr(dev, MT_INT_TIMER_EN, 0); + if (mt76_is_mmio(dev)) { + /* Fire a pre-TBTT interrupt 8 ms before TBTT */ + mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT, + 8 << 4); + mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER, + MT_DFS_GP_INTERVAL); + mt76_wr(dev, MT_INT_TIMER_EN, 0); + } + mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN | + MT_BEACON_TIME_CFG_TBTT_EN | + MT_BEACON_TIME_CFG_BEACON_TX)); + mt76_set(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_SYNC_MODE); mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xffff); - for (i = 0; i < 8; i++) { - mt76x02_mac_set_bssid(dev, i, null_addr); + for (i = 0; i < 8; i++) mt76x02_mac_set_beacon(dev, i, NULL); - } + mt76x02_set_beacon_offsets(dev); } EXPORT_SYMBOL_GPL(mt76x02_init_beacon_config); @@ -654,21 +704,20 @@ void mt76x02_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_BSSID) mt76x02_mac_set_bssid(dev, mvif->idx, info->bssid); - if (changed & BSS_CHANGED_BEACON_ENABLED) { - tasklet_disable(&dev->pre_tbtt_tasklet); - mt76x02_mac_set_beacon_enable(dev, mvif->idx, - info->enable_beacon); - tasklet_enable(&dev->pre_tbtt_tasklet); - } + if (changed & BSS_CHANGED_HT || changed & BSS_CHANGED_ERP_CTS_PROT) + mt76x02_mac_set_tx_protection(dev, info->use_cts_prot, + info->ht_operation_mode); if (changed & BSS_CHANGED_BEACON_INT) { mt76_rmw_field(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_INTVAL, info->beacon_int << 4); dev->beacon_int = info->beacon_int; - dev->tbtt_count = 0; } + if (changed & BSS_CHANGED_BEACON_ENABLED) + mt76x02_mac_set_beacon_enable(dev, vif, info->enable_beacon); + if (changed & BSS_CHANGED_ERP_PREAMBLE) mt76x02_mac_set_short_preamble(dev, info->use_short_preamble); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c index 54a9b5fac787..f8534362e2c8 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c @@ -143,6 +143,7 @@ void mt76_write_mac_initvals(struct mt76x02_dev *dev) { MT_VHT_HT_FBK_CFG1, 0xedcba980 }, { MT_PROT_AUTO_TX_CFG, 0x00830083 }, { MT_HT_CTRL_CFG, 0x000001ff }, + { MT_TX_LINK_CFG, 0x00001020 }, }; struct mt76_reg_pair prot_vals[] = { { MT_CCK_PROT_CFG, DEFAULT_PROT_CFG_CCK }, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.c index e25905c91ee2..e99d4c9bd428 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.c @@ -23,6 +23,9 @@ void mt76x2_mac_stop(struct mt76x02_dev *dev, bool force) u32 rts_cfg; int i; + mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN); + mt76_clear(dev, MT_TXOP_HLDR_ET, MT_TXOP_HLDR_TX40M_BLK_EN); + mt76_wr(dev, MT_MAC_SYS_CTRL, 0); rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h index 4c8e20bce920..42ff221d7706 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h @@ -25,6 +25,12 @@ struct mt76x02_vif; int mt76x2_mac_start(struct mt76x02_dev *dev); void mt76x2_mac_stop(struct mt76x02_dev *dev, bool force); -void mt76x2_mac_resume(struct mt76x02_dev *dev); + +static inline void mt76x2_mac_resume(struct mt76x02_dev *dev) +{ + mt76_wr(dev, MT_MAC_SYS_CTRL, + MT_MAC_SYS_CTRL_ENABLE_TX | + MT_MAC_SYS_CTRL_ENABLE_RX); +} #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h index acfa2b570c7c..40ef43926c06 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h @@ -26,29 +26,6 @@ #define MT_MCU_PCIE_REMAP_BASE2 0x0744 #define MT_MCU_PCIE_REMAP_BASE3 0x0748 -#define MT_LED_CTRL 0x0770 -#define MT_LED_CTRL_REPLAY(_n) BIT(0 + (8 * (_n))) -#define MT_LED_CTRL_POLARITY(_n) BIT(1 + (8 * (_n))) -#define MT_LED_CTRL_TX_BLINK_MODE(_n) BIT(2 + (8 * (_n))) -#define MT_LED_CTRL_KICK(_n) BIT(7 + (8 * (_n))) - -#define MT_LED_TX_BLINK_0 0x0774 -#define MT_LED_TX_BLINK_1 0x0778 - -#define MT_LED_S0_BASE 0x077C -#define MT_LED_S0(_n) (MT_LED_S0_BASE + 8 * (_n)) -#define MT_LED_S1_BASE 0x0780 -#define MT_LED_S1(_n) (MT_LED_S1_BASE + 8 * (_n)) -#define MT_LED_STATUS_OFF_MASK GENMASK(31, 24) -#define MT_LED_STATUS_OFF(_v) (((_v) << __ffs(MT_LED_STATUS_OFF_MASK)) & \ - MT_LED_STATUS_OFF_MASK) -#define MT_LED_STATUS_ON_MASK GENMASK(23, 16) -#define MT_LED_STATUS_ON(_v) (((_v) << __ffs(MT_LED_STATUS_ON_MASK)) & \ - MT_LED_STATUS_ON_MASK) -#define MT_LED_STATUS_DURATION_MASK GENMASK(15, 8) -#define MT_LED_STATUS_DURATION(_v) (((_v) << __ffs(MT_LED_STATUS_DURATION_MASK)) & \ - MT_LED_STATUS_DURATION_MASK) - #define MT_MCU_ROM_PATCH_OFFSET 0x80000 #define MT_MCU_ROM_PATCH_ADDR 0x90000 diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h index b259e4b50f1e..6c619f1c65c9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h @@ -49,11 +49,9 @@ static inline bool mt76x2_channel_silent(struct mt76x02_dev *dev) extern const struct ieee80211_ops mt76x2_ops; -struct mt76x02_dev *mt76x2_alloc_device(struct device *pdev); int mt76x2_register_device(struct mt76x02_dev *dev); void mt76x2_phy_power_on(struct mt76x02_dev *dev); -int mt76x2_init_hardware(struct mt76x02_dev *dev); void mt76x2_stop_hardware(struct mt76x02_dev *dev); int mt76x2_eeprom_init(struct mt76x02_dev *dev); int mt76x2_apply_calibration_data(struct mt76x02_dev *dev, int channel); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h index 0b0075411b34..76cb1f84eff5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h @@ -29,14 +29,12 @@ extern const struct ieee80211_ops mt76x2u_ops; -struct mt76x02_dev *mt76x2u_alloc_device(struct device *pdev); int mt76x2u_register_device(struct mt76x02_dev *dev); int mt76x2u_init_hardware(struct mt76x02_dev *dev); void mt76x2u_cleanup(struct mt76x02_dev *dev); void mt76x2u_stop_hw(struct mt76x02_dev *dev); int mt76x2u_mac_reset(struct mt76x02_dev *dev); -void mt76x2u_mac_resume(struct mt76x02_dev *dev); int mt76x2u_mac_start(struct mt76x02_dev *dev); int mt76x2u_mac_stop(struct mt76x02_dev *dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c index 92432fe97312..6274655e1f7e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c @@ -30,7 +30,19 @@ static const struct pci_device_id mt76pci_device_table[] = { static int mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { + static const struct mt76_driver_ops drv_ops = { + .txwi_size = sizeof(struct mt76x02_txwi), + .update_survey = mt76x02_update_channel, + .tx_prepare_skb = mt76x02_tx_prepare_skb, + .tx_complete_skb = mt76x02_tx_complete_skb, + .rx_skb = mt76x02_queue_rx_skb, + .rx_poll_complete = mt76x02_rx_poll_complete, + .sta_ps = mt76x02_sta_ps, + .sta_add = mt76x02_sta_add, + .sta_remove = mt76x02_sta_remove, + }; struct mt76x02_dev *dev; + struct mt76_dev *mdev; int ret; ret = pcim_enable_device(pdev); @@ -47,17 +59,19 @@ mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (ret) return ret; - dev = mt76x2_alloc_device(&pdev->dev); - if (!dev) + mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt76x2_ops, + &drv_ops); + if (!mdev) return -ENOMEM; - mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]); + dev = container_of(mdev, struct mt76x02_dev, mt76); + mt76_mmio_init(mdev, pcim_iomap_table(pdev)[0]); mt76x2_reset_wlan(dev, false); - dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION); - dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev); + mdev->rev = mt76_rr(dev, MT_ASIC_VERSION); + dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev); - ret = devm_request_irq(dev->mt76.dev, pdev->irq, mt76x02_irq_handler, + ret = devm_request_irq(mdev->dev, pdev->irq, mt76x02_irq_handler, IRQF_SHARED, KBUILD_MODNAME, dev); if (ret) goto error; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c index 7f4ea2d00f42..984d9c4c2e1a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c @@ -119,9 +119,7 @@ static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard) mt76_wr(dev, MT_MCU_CLOCK_CTL, 0x1401); mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN); - mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(macaddr)); - mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(macaddr + 4)); - + mt76x02_mac_setaddr(dev, macaddr); mt76x02_init_beacon_config(dev); if (!hard) return 0; @@ -151,6 +149,7 @@ static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard) MT_CH_TIME_CFG_RX_AS_BUSY | MT_CH_TIME_CFG_NAV_AS_BUSY | MT_CH_TIME_CFG_EIFS_AS_BUSY | + MT_CH_CCA_RC_EN | FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1)); mt76x02_set_tx_ackto(dev); @@ -174,13 +173,6 @@ int mt76x2_mac_start(struct mt76x02_dev *dev) return 0; } -void mt76x2_mac_resume(struct mt76x02_dev *dev) -{ - mt76_wr(dev, MT_MAC_SYS_CTRL, - MT_MAC_SYS_CTRL_ENABLE_TX | - MT_MAC_SYS_CTRL_ENABLE_RX); -} - static void mt76x2_power_on_rf_patch(struct mt76x02_dev *dev) { @@ -260,7 +252,7 @@ mt76x2_power_on(struct mt76x02_dev *dev) mt76x2_power_on_rf(dev, 1); } -int mt76x2_init_hardware(struct mt76x02_dev *dev) +static int mt76x2_init_hardware(struct mt76x02_dev *dev) { int ret; @@ -300,6 +292,7 @@ void mt76x2_stop_hardware(struct mt76x02_dev *dev) { cancel_delayed_work_sync(&dev->cal_work); cancel_delayed_work_sync(&dev->mac_work); + cancel_delayed_work_sync(&dev->wdt_work); mt76x02_mcu_set_radio_state(dev, false); mt76x2_mac_stop(dev, false); } @@ -313,81 +306,6 @@ void mt76x2_cleanup(struct mt76x02_dev *dev) mt76x02_mcu_cleanup(dev); } -struct mt76x02_dev *mt76x2_alloc_device(struct device *pdev) -{ - static const struct mt76_driver_ops drv_ops = { - .txwi_size = sizeof(struct mt76x02_txwi), - .update_survey = mt76x02_update_channel, - .tx_prepare_skb = mt76x02_tx_prepare_skb, - .tx_complete_skb = mt76x02_tx_complete_skb, - .rx_skb = mt76x02_queue_rx_skb, - .rx_poll_complete = mt76x02_rx_poll_complete, - .sta_ps = mt76x02_sta_ps, - .sta_add = mt76x02_sta_add, - .sta_remove = mt76x02_sta_remove, - }; - struct mt76x02_dev *dev; - struct mt76_dev *mdev; - - mdev = mt76_alloc_device(sizeof(*dev), &mt76x2_ops); - if (!mdev) - return NULL; - - dev = container_of(mdev, struct mt76x02_dev, mt76); - mdev->dev = pdev; - mdev->drv = &drv_ops; - - return dev; -} - -static void mt76x2_led_set_config(struct mt76_dev *mt76, u8 delay_on, - u8 delay_off) -{ - struct mt76x02_dev *dev = container_of(mt76, struct mt76x02_dev, - mt76); - u32 val; - - val = MT_LED_STATUS_DURATION(0xff) | - MT_LED_STATUS_OFF(delay_off) | - MT_LED_STATUS_ON(delay_on); - - mt76_wr(dev, MT_LED_S0(mt76->led_pin), val); - mt76_wr(dev, MT_LED_S1(mt76->led_pin), val); - - val = MT_LED_CTRL_REPLAY(mt76->led_pin) | - MT_LED_CTRL_KICK(mt76->led_pin); - if (mt76->led_al) - val |= MT_LED_CTRL_POLARITY(mt76->led_pin); - mt76_wr(dev, MT_LED_CTRL, val); -} - -static int mt76x2_led_set_blink(struct led_classdev *led_cdev, - unsigned long *delay_on, - unsigned long *delay_off) -{ - struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev, - led_cdev); - u8 delta_on, delta_off; - - delta_off = max_t(u8, *delay_off / 10, 1); - delta_on = max_t(u8, *delay_on / 10, 1); - - mt76x2_led_set_config(mt76, delta_on, delta_off); - return 0; -} - -static void mt76x2_led_set_brightness(struct led_classdev *led_cdev, - enum led_brightness brightness) -{ - struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev, - led_cdev); - - if (!brightness) - mt76x2_led_set_config(mt76, 0, 0xff); - else - mt76x2_led_set_config(mt76, 0xff, 0); -} - int mt76x2_register_device(struct mt76x02_dev *dev) { int ret; @@ -402,12 +320,6 @@ int mt76x2_register_device(struct mt76x02_dev *dev) mt76x02_config_mac_addr_list(dev); - /* init led callbacks */ - if (IS_ENABLED(CONFIG_MT76_LEDS)) { - dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness; - dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink; - } - ret = mt76_register_device(&dev->mt76, true, mt76x02_rates, ARRAY_SIZE(mt76x02_rates)); if (ret) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c index b54a32397486..878ce92405ed 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c @@ -33,7 +33,9 @@ mt76x2_start(struct ieee80211_hw *hw) goto out; ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work, - MT_CALIBRATE_INTERVAL); + MT_MAC_WORK_INTERVAL); + ieee80211_queue_delayed_work(mt76_hw(dev), &dev->wdt_work, + MT_WATCHDOG_TIME); set_bit(MT76_STATE_RUNNING, &dev->mt76.state); @@ -189,7 +191,7 @@ const struct ieee80211_ops mt76x2_ops = { .sw_scan_complete = mt76x02_sw_scan_complete, .flush = mt76x2_flush, .ampdu_action = mt76x02_ampdu_action, - .get_txpower = mt76x02_get_txpower, + .get_txpower = mt76_get_txpower, .wake_tx_queue = mt76_wake_tx_queue, .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update, .release_buffered_frames = mt76_release_buffered_frames, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c index da7cd40f56ff..cc1aebcb0696 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c @@ -74,6 +74,7 @@ mt76x2_phy_channel_calibrate(struct mt76x02_dev *dev, bool mac_stopped) mt76x2_mac_resume(dev); mt76x2_apply_gain_adj(dev); + mt76x02_edcca_init(dev, true); dev->cal.channel_cal_done = true; } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c index c9634a774705..1848e8ab2e21 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c @@ -241,7 +241,7 @@ void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev) t.offset1 = txp.chain[1].tssi_offset; mt76x2_mcu_tssi_comp(dev, &t); - if (t.pa_mode || dev->cal.dpd_cal_done) + if (t.pa_mode || dev->cal.dpd_cal_done || dev->ed_tx_blocked) return; usleep_range(10000, 20000); @@ -284,7 +284,9 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev) int low_gain; u32 val; - dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev); + dev->cal.avg_rssi_all = mt76_get_min_avg_rssi(&dev->mt76); + if (!dev->cal.avg_rssi_all) + dev->cal.avg_rssi_all = -75; low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) + (dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev)); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c index 4d1788eb3812..ddb6b2c48e01 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c @@ -36,24 +36,36 @@ static const struct usb_device_id mt76x2u_device_table[] = { static int mt76x2u_probe(struct usb_interface *intf, const struct usb_device_id *id) { + static const struct mt76_driver_ops drv_ops = { + .tx_prepare_skb = mt76x02u_tx_prepare_skb, + .tx_complete_skb = mt76x02u_tx_complete_skb, + .tx_status_data = mt76x02_tx_status_data, + .rx_skb = mt76x02_queue_rx_skb, + .sta_add = mt76x02_sta_add, + .sta_remove = mt76x02_sta_remove, + }; struct usb_device *udev = interface_to_usbdev(intf); struct mt76x02_dev *dev; + struct mt76_dev *mdev; int err; - dev = mt76x2u_alloc_device(&intf->dev); - if (!dev) + mdev = mt76_alloc_device(&intf->dev, sizeof(*dev), &mt76x2u_ops, + &drv_ops); + if (!mdev) return -ENOMEM; + dev = container_of(mdev, struct mt76x02_dev, mt76); + udev = usb_get_dev(udev); usb_reset_device(udev); - mt76x02u_init_mcu(&dev->mt76); - err = mt76u_init(&dev->mt76, intf); + mt76x02u_init_mcu(mdev); + err = mt76u_init(mdev, intf); if (err < 0) goto err; - dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION); - dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev); + mdev->rev = mt76_rr(dev, MT_ASIC_VERSION); + dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev); err = mt76x2u_register_device(dev); if (err < 0) @@ -88,11 +100,9 @@ static int __maybe_unused mt76x2u_suspend(struct usb_interface *intf, pm_message_t state) { struct mt76x02_dev *dev = usb_get_intfdata(intf); - struct mt76_usb *usb = &dev->mt76.usb; mt76u_stop_queues(&dev->mt76); mt76x2u_stop_hw(dev); - usb_kill_urb(usb->mcu.res.urb); return 0; } @@ -103,15 +113,6 @@ static int __maybe_unused mt76x2u_resume(struct usb_interface *intf) struct mt76_usb *usb = &dev->mt76.usb; int err; - reinit_completion(&usb->mcu.cmpl); - err = mt76u_submit_buf(&dev->mt76, USB_DIR_IN, - MT_EP_IN_CMD_RESP, - &usb->mcu.res, GFP_KERNEL, - mt76u_mcu_complete_urb, - &usb->mcu.cmpl); - if (err < 0) - goto err; - err = mt76u_submit_rx_buffers(&dev->mt76); if (err < 0) goto err; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c index 0be3784f44fb..1da90e58d942 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c @@ -134,30 +134,6 @@ static int mt76x2u_init_eeprom(struct mt76x02_dev *dev) return 0; } -struct mt76x02_dev *mt76x2u_alloc_device(struct device *pdev) -{ - static const struct mt76_driver_ops drv_ops = { - .tx_prepare_skb = mt76x02u_tx_prepare_skb, - .tx_complete_skb = mt76x02u_tx_complete_skb, - .tx_status_data = mt76x02_tx_status_data, - .rx_skb = mt76x02_queue_rx_skb, - .sta_add = mt76x02_sta_add, - .sta_remove = mt76x02_sta_remove, - }; - struct mt76x02_dev *dev; - struct mt76_dev *mdev; - - mdev = mt76_alloc_device(sizeof(*dev), &mt76x2u_ops); - if (!mdev) - return NULL; - - dev = container_of(mdev, struct mt76x02_dev, mt76); - mdev->dev = pdev; - mdev->drv = &drv_ops; - - return dev; -} - int mt76x2u_init_hardware(struct mt76x02_dev *dev) { int i, k, err; @@ -207,11 +183,7 @@ int mt76x2u_init_hardware(struct mt76x02_dev *dev) mt76x02_mac_shared_key_setup(dev, i, k, NULL); } - mt76_clear(dev, MT_BEACON_TIME_CFG, - MT_BEACON_TIME_CFG_TIMER_EN | - MT_BEACON_TIME_CFG_SYNC_MODE | - MT_BEACON_TIME_CFG_TBTT_EN | - MT_BEACON_TIME_CFG_BEACON_TX); + mt76x02_init_beacon_config(dev); mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e); mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x583f); @@ -242,10 +214,6 @@ int mt76x2u_register_device(struct mt76x02_dev *dev) if (err < 0) goto fail; - err = mt76u_mcu_init_rx(&dev->mt76); - if (err < 0) - goto fail; - err = mt76x2u_init_hardware(dev); if (err < 0) goto fail; @@ -256,7 +224,7 @@ int mt76x2u_register_device(struct mt76x02_dev *dev) goto fail; /* check hw sg support in order to enable AMSDU */ - if (mt76u_check_sg(&dev->mt76)) + if (dev->mt76.usb.sg_en) hw->max_tx_fragments = MT_SG_MAX_SIZE; else hw->max_tx_fragments = 1; @@ -287,5 +255,4 @@ void mt76x2u_cleanup(struct mt76x02_dev *dev) mt76x02_mcu_set_radio_state(dev, false); mt76x2u_stop_hw(dev); mt76u_queues_deinit(&dev->mt76); - mt76u_mcu_deinit(&dev->mt76); } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c index db2194a92e67..5e84b4535cb1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c @@ -143,8 +143,8 @@ int mt76x2u_mac_stop(struct mt76x02_dev *dev) rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG); mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT); - mt76_clear(dev, MT_TXOP_CTRL_CFG, BIT(20)); - mt76_clear(dev, MT_TXOP_HLDR_ET, BIT(1)); + mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN); + mt76_clear(dev, MT_TXOP_HLDR_ET, MT_TXOP_HLDR_TX40M_BLK_EN); /* wait tx dma to stop */ for (i = 0; i < 2000; i++) { @@ -211,12 +211,3 @@ int mt76x2u_mac_stop(struct mt76x02_dev *dev) return 0; } - -void mt76x2u_mac_resume(struct mt76x02_dev *dev) -{ - mt76_wr(dev, MT_MAC_SYS_CTRL, - MT_MAC_SYS_CTRL_ENABLE_TX | - MT_MAC_SYS_CTRL_ENABLE_RX); - mt76_set(dev, MT_TXOP_CTRL_CFG, BIT(20)); - mt76_set(dev, MT_TXOP_HLDR_ET, BIT(1)); -} diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c index 2b48cc51a30d..2ac78e4dc41a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c @@ -28,7 +28,7 @@ static int mt76x2u_start(struct ieee80211_hw *hw) goto out; ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work, - MT_CALIBRATE_INTERVAL); + MT_MAC_WORK_INTERVAL); set_bit(MT76_STATE_RUNNING, &dev->mt76.state); out: @@ -46,19 +46,6 @@ static void mt76x2u_stop(struct ieee80211_hw *hw) mutex_unlock(&dev->mt76.mutex); } -static int mt76x2u_add_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) -{ - struct mt76x02_dev *dev = hw->priv; - unsigned int idx = 8; - - if (!ether_addr_equal(dev->mt76.macaddr, vif->addr)) - mt76x02_mac_setaddr(dev, vif->addr); - - mt76x02_vif_init(dev, vif, idx); - return 0; -} - static int mt76x2u_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef) @@ -70,13 +57,12 @@ mt76x2u_set_channel(struct mt76x02_dev *dev, mt76_set_channel(&dev->mt76); - mt76_clear(dev, MT_TXOP_CTRL_CFG, BIT(20)); - mt76_clear(dev, MT_TXOP_HLDR_ET, BIT(1)); mt76x2_mac_stop(dev, false); err = mt76x2u_phy_set_channel(dev, chandef); - mt76x2u_mac_resume(dev); + mt76x2_mac_resume(dev); + mt76x02_edcca_init(dev, true); clear_bit(MT76_RESET, &dev->mt76.state); mt76_txq_schedule_all(&dev->mt76); @@ -125,7 +111,7 @@ const struct ieee80211_ops mt76x2u_ops = { .tx = mt76x02_tx, .start = mt76x2u_start, .stop = mt76x2u_stop, - .add_interface = mt76x2u_add_interface, + .add_interface = mt76x02_add_interface, .remove_interface = mt76x02_remove_interface, .sta_state = mt76_sta_state, .set_key = mt76x02_set_key, @@ -138,5 +124,5 @@ const struct ieee80211_ops mt76x2u_ops = { .sw_scan_start = mt76x02_sw_scan, .sw_scan_complete = mt76x02_sw_scan_complete, .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update, - .get_txpower = mt76x02_get_txpower, + .get_txpower = mt76_get_txpower, }; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c index 45a95ee3a415..152d41fe9ff5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c @@ -39,7 +39,7 @@ static void mt76x2u_mcu_load_ivb(struct mt76x02_dev *dev) static void mt76x2u_mcu_enable_patch(struct mt76x02_dev *dev) { struct mt76_usb *usb = &dev->mt76.usb; - const u8 data[] = { + static const u8 data[] = { 0x6f, 0xfc, 0x08, 0x01, 0x20, 0x04, 0x00, 0x00, 0x00, 0x09, 0x00, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c index 11d414d86c68..07f67cb6854c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c @@ -43,8 +43,9 @@ mt76x2u_phy_channel_calibrate(struct mt76x02_dev *dev, bool mac_stopped) mt76x02_mcu_calibrate(dev, MCU_CAL_TX_SHAPING, 0); if (!mac_stopped) - mt76x2u_mac_resume(dev); + mt76x2_mac_resume(dev); mt76x2_apply_gain_adj(dev); + mt76x02_edcca_init(dev, true); dev->cal.channel_cal_done = true; } diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index 7b711058807d..5a349fe3e576 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -170,21 +170,22 @@ mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, int pid; if (!wcid) - return 0; + return MT_PACKET_ID_NO_ACK; if (info->flags & IEEE80211_TX_CTL_NO_ACK) return MT_PACKET_ID_NO_ACK; if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS | IEEE80211_TX_CTL_RATE_CTRL_PROBE))) - return 0; + return MT_PACKET_ID_NO_SKB; spin_lock_bh(&dev->status_list.lock); memset(cb, 0, sizeof(*cb)); wcid->packet_id = (wcid->packet_id + 1) & MT_PACKET_ID_MASK; - if (!wcid->packet_id || wcid->packet_id == MT_PACKET_ID_NO_ACK) - wcid->packet_id = 1; + if (wcid->packet_id == MT_PACKET_ID_NO_ACK || + wcid->packet_id == MT_PACKET_ID_NO_SKB) + wcid->packet_id = MT_PACKET_ID_FIRST; pid = wcid->packet_id; cb->wcid = wcid->idx; @@ -204,9 +205,6 @@ mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid, { struct sk_buff *skb, *tmp; - if (pktid == MT_PACKET_ID_NO_ACK) - return NULL; - skb_queue_walk_safe(&dev->status_list, skb, tmp) { struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); @@ -216,7 +214,7 @@ mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid, if (cb->pktid == pktid) return skb; - if (!pktid && + if (pktid >= 0 && !time_after(jiffies, cb->jiffies + MT_TX_STATUS_SKB_TIMEOUT)) continue; @@ -330,7 +328,8 @@ mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta, info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE; if (last) - info->flags |= IEEE80211_TX_STATUS_EOSP; + info->flags |= IEEE80211_TX_STATUS_EOSP | + IEEE80211_TX_CTL_REQ_TX_STATUS; mt76_skb_set_moredata(skb, !last); dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, sta); @@ -394,6 +393,11 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq, bool probe; int idx; + if (test_bit(MT_WCID_FLAG_PS, &wcid->flags)) { + *empty = true; + return 0; + } + skb = mt76_txq_dequeue(dev, mtxq, false); if (!skb) { *empty = true; diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c index b061263453d4..ae6ada370597 100644 --- a/drivers/net/wireless/mediatek/mt76/usb.c +++ b/drivers/net/wireless/mediatek/mt76/usb.c @@ -22,6 +22,10 @@ #define MT_VEND_REQ_MAX_RETRY 10 #define MT_VEND_REQ_TOUT_MS 300 +static bool disable_usb_sg; +module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644); +MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support"); + /* should be called with usb_ctrl_mtx locked */ static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type, u16 val, u16 offset, @@ -241,6 +245,16 @@ mt76u_rd_rp(struct mt76_dev *dev, u32 base, return mt76u_req_rd_rp(dev, base, data, n); } +static bool mt76u_check_sg(struct mt76_dev *dev) +{ + struct usb_interface *intf = to_usb_interface(dev->dev); + struct usb_device *udev = interface_to_usbdev(intf); + + return (!disable_usb_sg && udev->bus->sg_tablesize > 0 && + (udev->bus->no_sg_constraint || + udev->speed == USB_SPEED_WIRELESS)); +} + static int mt76u_set_endpoints(struct usb_interface *intf, struct mt76_usb *usb) @@ -309,42 +323,66 @@ mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf, return i ? : -ENOMEM; } -int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf, - int nsgs, int len, int sglen, gfp_t gfp) +static int +mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q, + struct mt76u_buf *buf, int nsgs, gfp_t gfp) +{ + if (dev->usb.sg_en) { + return mt76u_fill_rx_sg(dev, buf, nsgs, q->buf_size, + SKB_WITH_OVERHEAD(q->buf_size)); + } else { + buf->buf = page_frag_alloc(&q->rx_page, q->buf_size, gfp); + return buf->buf ? 0 : -ENOMEM; + } +} + +static int +mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf) { - buf->urb = usb_alloc_urb(0, gfp); + struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; + + buf->len = SKB_WITH_OVERHEAD(q->buf_size); + buf->dev = dev; + + buf->urb = usb_alloc_urb(0, GFP_KERNEL); if (!buf->urb) return -ENOMEM; - buf->urb->sg = devm_kcalloc(dev->dev, nsgs, sizeof(*buf->urb->sg), - gfp); - if (!buf->urb->sg) - return -ENOMEM; + if (dev->usb.sg_en) { + buf->urb->sg = devm_kcalloc(dev->dev, MT_SG_MAX_SIZE, + sizeof(*buf->urb->sg), + GFP_KERNEL); + if (!buf->urb->sg) + return -ENOMEM; - sg_init_table(buf->urb->sg, nsgs); - buf->dev = dev; + sg_init_table(buf->urb->sg, MT_SG_MAX_SIZE); + } - return mt76u_fill_rx_sg(dev, buf, nsgs, len, sglen); + return mt76u_refill_rx(dev, q, buf, MT_SG_MAX_SIZE, GFP_KERNEL); } -EXPORT_SYMBOL_GPL(mt76u_buf_alloc); -void mt76u_buf_free(struct mt76u_buf *buf) +static void mt76u_buf_free(struct mt76u_buf *buf) { struct urb *urb = buf->urb; int i; for (i = 0; i < urb->num_sgs; i++) skb_free_frag(sg_virt(&urb->sg[i])); + + if (buf->buf) + skb_free_frag(buf->buf); + usb_free_urb(buf->urb); } -EXPORT_SYMBOL_GPL(mt76u_buf_free); -int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index, - struct mt76u_buf *buf, gfp_t gfp, - usb_complete_t complete_fn, void *context) +static void +mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index, + struct mt76u_buf *buf, usb_complete_t complete_fn, + void *context) { struct usb_interface *intf = to_usb_interface(dev->dev); struct usb_device *udev = interface_to_usbdev(intf); + u8 *data = buf->urb->num_sgs ? NULL : buf->buf; unsigned int pipe; if (dir == USB_DIR_IN) @@ -352,13 +390,21 @@ int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index, else pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]); - usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, buf->len, + usb_fill_bulk_urb(buf->urb, udev, pipe, data, buf->len, complete_fn, context); +} + +static int +mt76u_submit_buf(struct mt76_dev *dev, int dir, int index, + struct mt76u_buf *buf, gfp_t gfp, + usb_complete_t complete_fn, void *context) +{ + mt76u_fill_bulk_urb(dev, dir, index, buf, complete_fn, + context); trace_submit_urb(dev, buf->urb); return usb_submit_urb(buf->urb, gfp); } -EXPORT_SYMBOL_GPL(mt76u_submit_buf); static inline struct mt76u_buf *mt76u_get_next_rx_entry(struct mt76_queue *q) @@ -393,10 +439,11 @@ static int mt76u_get_rx_entry_len(u8 *data, u32 data_len) } static int -mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb) +mt76u_process_rx_entry(struct mt76_dev *dev, struct mt76u_buf *buf) { struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; - u8 *data = sg_virt(&urb->sg[0]); + struct urb *urb = buf->urb; + u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : buf->buf; int data_len, len, nsgs = 1; struct sk_buff *skb; @@ -407,21 +454,20 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb) if (len < 0) return 0; + data_len = urb->num_sgs ? urb->sg[0].length : buf->len; + data_len = min_t(int, len, data_len - MT_DMA_HDR_LEN); + if (MT_DMA_HDR_LEN + data_len > SKB_WITH_OVERHEAD(q->buf_size)) + return 0; + skb = build_skb(data, q->buf_size); if (!skb) return 0; - data_len = min_t(int, len, urb->sg[0].length - MT_DMA_HDR_LEN); skb_reserve(skb, MT_DMA_HDR_LEN); - if (skb->tail + data_len > skb->end) { - dev_kfree_skb(skb); - return 1; - } - __skb_put(skb, data_len); len -= data_len; - while (len > 0) { + while (len > 0 && nsgs < urb->num_sgs) { data_len = min_t(int, len, urb->sg[nsgs].length); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, sg_page(&urb->sg[nsgs]), @@ -449,7 +495,8 @@ static void mt76u_complete_rx(struct urb *urb) case -ENOENT: return; default: - dev_err(dev->dev, "rx urb failed: %d\n", urb->status); + dev_err_ratelimited(dev->dev, "rx urb failed: %d\n", + urb->status); /* fall through */ case 0: break; @@ -470,8 +517,8 @@ static void mt76u_rx_tasklet(unsigned long data) { struct mt76_dev *dev = (struct mt76_dev *)data; struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; - int err, nsgs, buf_len = q->buf_size; struct mt76u_buf *buf; + int err, count; rcu_read_lock(); @@ -480,11 +527,10 @@ static void mt76u_rx_tasklet(unsigned long data) if (!buf) break; - nsgs = mt76u_process_rx_entry(dev, buf->urb); - if (nsgs > 0) { - err = mt76u_fill_rx_sg(dev, buf, nsgs, - buf_len, - SKB_WITH_OVERHEAD(buf_len)); + count = mt76u_process_rx_entry(dev, buf); + if (count > 0) { + err = mt76u_refill_rx(dev, q, buf, count, + GFP_ATOMIC); if (err < 0) break; } @@ -521,8 +567,13 @@ EXPORT_SYMBOL_GPL(mt76u_submit_rx_buffers); static int mt76u_alloc_rx(struct mt76_dev *dev) { + struct mt76_usb *usb = &dev->usb; struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; - int i, err, nsgs; + int i, err; + + usb->mcu.data = devm_kmalloc(dev->dev, MCU_RESP_URB_SIZE, GFP_KERNEL); + if (!usb->mcu.data) + return -ENOMEM; spin_lock_init(&q->rx_page_lock); spin_lock_init(&q->lock); @@ -532,23 +583,13 @@ static int mt76u_alloc_rx(struct mt76_dev *dev) if (!q->entry) return -ENOMEM; - if (mt76u_check_sg(dev)) { - q->buf_size = MT_RX_BUF_SIZE; - nsgs = MT_SG_MAX_SIZE; - } else { - q->buf_size = PAGE_SIZE; - nsgs = 1; - } - - for (i = 0; i < MT_NUM_RX_ENTRIES; i++) { - err = mt76u_buf_alloc(dev, &q->entry[i].ubuf, - nsgs, q->buf_size, - SKB_WITH_OVERHEAD(q->buf_size), - GFP_KERNEL); + q->buf_size = dev->usb.sg_en ? MT_RX_BUF_SIZE : PAGE_SIZE; + q->ndesc = MT_NUM_RX_ENTRIES; + for (i = 0; i < q->ndesc; i++) { + err = mt76u_buf_alloc(dev, &q->entry[i].ubuf); if (err < 0) return err; } - q->ndesc = MT_NUM_RX_ENTRIES; return mt76u_submit_rx_buffers(dev); } @@ -585,6 +626,7 @@ static void mt76u_stop_rx(struct mt76_dev *dev) static void mt76u_tx_tasklet(unsigned long data) { struct mt76_dev *dev = (struct mt76_dev *)data; + struct mt76_queue_entry entry; struct mt76u_buf *buf; struct mt76_queue *q; bool wake; @@ -599,17 +641,18 @@ static void mt76u_tx_tasklet(unsigned long data) if (!buf->done || !q->queued) break; - dev->drv->tx_complete_skb(dev, q, - &q->entry[q->head], - false); - if (q->entry[q->head].schedule) { q->entry[q->head].schedule = false; q->swq_queued--; } + entry = q->entry[q->head]; q->head = (q->head + 1) % q->ndesc; q->queued--; + + spin_unlock_bh(&q->lock); + dev->drv->tx_complete_skb(dev, q, &entry, false); + spin_lock_bh(&q->lock); } mt76_txq_schedule(dev, q); wake = i < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8; @@ -667,21 +710,15 @@ static void mt76u_complete_tx(struct urb *urb) } static int -mt76u_tx_build_sg(struct sk_buff *skb, struct urb *urb) +mt76u_tx_build_sg(struct mt76_dev *dev, struct sk_buff *skb, + struct urb *urb) { - int nsgs = 1 + skb_shinfo(skb)->nr_frags; - struct sk_buff *iter; - - skb_walk_frags(skb, iter) - nsgs += 1 + skb_shinfo(iter)->nr_frags; - - memset(urb->sg, 0, sizeof(*urb->sg) * MT_SG_MAX_SIZE); - - nsgs = min_t(int, MT_SG_MAX_SIZE, nsgs); - sg_init_marker(urb->sg, nsgs); - urb->num_sgs = nsgs; + if (!dev->usb.sg_en) + return 0; - return skb_to_sgvec_nomark(skb, urb->sg, 0, skb->len); + sg_init_table(urb->sg, MT_SG_MAX_SIZE); + urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len); + return urb->num_sgs; } static int @@ -689,12 +726,8 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, struct sk_buff *skb, struct mt76_wcid *wcid, struct ieee80211_sta *sta) { - struct usb_interface *intf = to_usb_interface(dev->dev); - struct usb_device *udev = interface_to_usbdev(intf); - u8 ep = q2ep(q->hw_idx); struct mt76u_buf *buf; u16 idx = q->tail; - unsigned int pipe; int err; if (q->queued == q->ndesc) @@ -706,15 +739,16 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, return err; buf = &q->entry[idx].ubuf; + buf->buf = skb->data; + buf->len = skb->len; buf->done = false; - err = mt76u_tx_build_sg(skb, buf->urb); + err = mt76u_tx_build_sg(dev, skb, buf->urb); if (err < 0) return err; - pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[ep]); - usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, skb->len, - mt76u_complete_tx, buf); + mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx), + buf, mt76u_complete_tx, buf); q->tail = (q->tail + 1) % q->ndesc; q->entry[idx].skb = skb; @@ -749,10 +783,8 @@ static int mt76u_alloc_tx(struct mt76_dev *dev) { struct mt76u_buf *buf; struct mt76_queue *q; - size_t size; int i, j; - size = MT_SG_MAX_SIZE * sizeof(struct scatterlist); for (i = 0; i < IEEE80211_NUM_ACS; i++) { q = &dev->q_tx[i]; spin_lock_init(&q->lock); @@ -774,9 +806,15 @@ static int mt76u_alloc_tx(struct mt76_dev *dev) if (!buf->urb) return -ENOMEM; - buf->urb->sg = devm_kzalloc(dev->dev, size, GFP_KERNEL); - if (!buf->urb->sg) - return -ENOMEM; + if (dev->usb.sg_en) { + size_t size = MT_SG_MAX_SIZE * + sizeof(struct scatterlist); + + buf->urb->sg = devm_kzalloc(dev->dev, size, + GFP_KERNEL); + if (!buf->urb->sg) + return -ENOMEM; + } } } return 0; @@ -838,16 +876,9 @@ int mt76u_alloc_queues(struct mt76_dev *dev) err = mt76u_alloc_rx(dev); if (err < 0) - goto err; - - err = mt76u_alloc_tx(dev); - if (err < 0) - goto err; + return err; - return 0; -err: - mt76u_queues_deinit(dev); - return err; + return mt76u_alloc_tx(dev); } EXPORT_SYMBOL_GPL(mt76u_alloc_queues); @@ -875,13 +906,14 @@ int mt76u_init(struct mt76_dev *dev, INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data); skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]); - init_completion(&usb->mcu.cmpl); mutex_init(&usb->mcu.mutex); mutex_init(&usb->usb_ctrl_mtx); dev->bus = &mt76u_ops; dev->queue_ops = &usb_queue_ops; + usb->sg_en = mt76u_check_sg(dev); + return mt76u_set_endpoints(intf, usb); } EXPORT_SYMBOL_GPL(mt76u_init); diff --git a/drivers/net/wireless/mediatek/mt76/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/usb_mcu.c deleted file mode 100644 index 036be4163e69..000000000000 --- a/drivers/net/wireless/mediatek/mt76/usb_mcu.c +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (C) 2018 Lorenzo Bianconi - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#include "mt76.h" - -void mt76u_mcu_complete_urb(struct urb *urb) -{ - struct completion *cmpl = urb->context; - - complete(cmpl); -} -EXPORT_SYMBOL_GPL(mt76u_mcu_complete_urb); - -int mt76u_mcu_init_rx(struct mt76_dev *dev) -{ - struct mt76_usb *usb = &dev->usb; - int err; - - err = mt76u_buf_alloc(dev, &usb->mcu.res, 1, - MCU_RESP_URB_SIZE, MCU_RESP_URB_SIZE, - GFP_KERNEL); - if (err < 0) - return err; - - err = mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP, - &usb->mcu.res, GFP_KERNEL, - mt76u_mcu_complete_urb, - &usb->mcu.cmpl); - if (err < 0) - mt76u_buf_free(&usb->mcu.res); - - return err; -} -EXPORT_SYMBOL_GPL(mt76u_mcu_init_rx); - -void mt76u_mcu_deinit(struct mt76_dev *dev) -{ - struct mt76_usb *usb = &dev->usb; - - usb_kill_urb(usb->mcu.res.urb); - mt76u_buf_free(&usb->mcu.res); -} -EXPORT_SYMBOL_GPL(mt76u_mcu_deinit); diff --git a/drivers/net/wireless/mediatek/mt76/util.c b/drivers/net/wireless/mediatek/mt76/util.c index 0c35b8db58cd..69270c1a9091 100644 --- a/drivers/net/wireless/mediatek/mt76/util.c +++ b/drivers/net/wireless/mediatek/mt76/util.c @@ -75,4 +75,46 @@ int mt76_wcid_alloc(unsigned long *mask, int size) } EXPORT_SYMBOL_GPL(mt76_wcid_alloc); +int mt76_get_min_avg_rssi(struct mt76_dev *dev) +{ + struct mt76_wcid *wcid; + int i, j, min_rssi = 0; + s8 cur_rssi; + + local_bh_disable(); + rcu_read_lock(); + + for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) { + unsigned long mask = dev->wcid_mask[i]; + + if (!mask) + continue; + + for (j = i * BITS_PER_LONG; mask; j++, mask >>= 1) { + if (!(mask & 1)) + continue; + + wcid = rcu_dereference(dev->wcid[j]); + if (!wcid) + continue; + + spin_lock(&dev->rx_lock); + if (wcid->inactive_count++ < 5) + cur_rssi = -ewma_signal_read(&wcid->rssi); + else + cur_rssi = 0; + spin_unlock(&dev->rx_lock); + + if (cur_rssi < min_rssi) + min_rssi = cur_rssi; + } + } + + rcu_read_unlock(); + local_bh_enable(); + + return min_rssi; +} +EXPORT_SYMBOL_GPL(mt76_get_min_avg_rssi); + MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c index 7f3e3983b781..f7edeffb2b19 100644 --- a/drivers/net/wireless/mediatek/mt7601u/dma.c +++ b/drivers/net/wireless/mediatek/mt7601u/dma.c @@ -124,9 +124,9 @@ static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len) u16 dma_len = get_unaligned_le16(data); if (data_len < min_seg_len || - WARN_ON(!dma_len) || - WARN_ON(dma_len + MT_DMA_HDRS > data_len) || - WARN_ON(dma_len & 0x3)) + WARN_ON_ONCE(!dma_len) || + WARN_ON_ONCE(dma_len + MT_DMA_HDRS > data_len) || + WARN_ON_ONCE(dma_len & 0x3)) return 0; return MT_DMA_HDRS + dma_len; diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.h b/drivers/net/wireless/mediatek/mt7601u/eeprom.h index 662d12703b69..57b503ae63f1 100644 --- a/drivers/net/wireless/mediatek/mt7601u/eeprom.h +++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.h @@ -17,7 +17,7 @@ struct mt7601u_dev; -#define MT7601U_EE_MAX_VER 0x0c +#define MT7601U_EE_MAX_VER 0x0d #define MT7601U_EEPROM_SIZE 256 #define MT7601U_DEFAULT_TX_POWER 6 diff --git a/drivers/net/wireless/quantenna/Makefile b/drivers/net/wireless/quantenna/Makefile index baebfbde119e..cea83d178d2e 100644 --- a/drivers/net/wireless/quantenna/Makefile +++ b/drivers/net/wireless/quantenna/Makefile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 # # Copyright (c) 2015-2016 Quantenna Communications, Inc. # All rights reserved. diff --git a/drivers/net/wireless/quantenna/qtnfmac/bus.h b/drivers/net/wireless/quantenna/qtnfmac/bus.h index 528ca7f5e070..14b569b6d1b5 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/bus.h +++ b/drivers/net/wireless/quantenna/qtnfmac/bus.h @@ -1,18 +1,5 @@ -/* - * Copyright (c) 2015 Quantenna Communications - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2015 Quantenna Communications. All rights reserved. */ #ifndef QTNFMAC_BUS_H #define QTNFMAC_BUS_H @@ -135,7 +122,5 @@ static __always_inline void qtnf_bus_unlock(struct qtnf_bus *bus) int qtnf_core_attach(struct qtnf_bus *bus); void qtnf_core_detach(struct qtnf_bus *bus); -void qtnf_txflowblock(struct device *dev, bool state); -void qtnf_txcomplete(struct device *dev, struct sk_buff *txp, bool success); #endif /* QTNFMAC_BUS_H */ diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c index 51b33ec78fac..dcb0991432f4 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c @@ -1,18 +1,5 @@ -/* - * Copyright (c) 2012-2012 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #include #include @@ -66,9 +53,11 @@ static const u32 qtnf_cipher_suites[] = { static const struct ieee80211_txrx_stypes qtnf_mgmt_stypes[NUM_NL80211_IFTYPES] = { [NL80211_IFTYPE_STATION] = { - .tx = BIT(IEEE80211_STYPE_ACTION >> 4), + .tx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_AUTH >> 4), .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | - BIT(IEEE80211_STYPE_PROBE_REQ >> 4), + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | + BIT(IEEE80211_STYPE_AUTH >> 4), }, [NL80211_IFTYPE_AP] = { .tx = BIT(IEEE80211_STYPE_ACTION >> 4), @@ -122,7 +111,8 @@ qtnf_change_virtual_intf(struct wiphy *wiphy, struct vif_params *params) { struct qtnf_vif *vif = qtnf_netdev_get_priv(dev); - u8 *mac_addr; + u8 *mac_addr = NULL; + int use4addr = 0; int ret; ret = qtnf_validate_iface_combinations(wiphy, vif, type); @@ -132,14 +122,14 @@ qtnf_change_virtual_intf(struct wiphy *wiphy, return ret; } - if (params) + if (params) { mac_addr = params->macaddr; - else - mac_addr = NULL; + use4addr = params->use_4addr; + } qtnf_scan_done(vif->mac, true); - ret = qtnf_cmd_send_change_intf_type(vif, type, mac_addr); + ret = qtnf_cmd_send_change_intf_type(vif, type, use4addr, mac_addr); if (ret) { pr_err("VIF%u.%u: failed to change type to %d\n", vif->mac->macid, vif->vifid, type); @@ -190,6 +180,7 @@ static struct wireless_dev *qtnf_add_virtual_intf(struct wiphy *wiphy, struct qtnf_wmac *mac; struct qtnf_vif *vif; u8 *mac_addr = NULL; + int use4addr = 0; int ret; mac = wiphy_priv(wiphy); @@ -225,10 +216,12 @@ static struct wireless_dev *qtnf_add_virtual_intf(struct wiphy *wiphy, return ERR_PTR(-ENOTSUPP); } - if (params) + if (params) { mac_addr = params->macaddr; + use4addr = params->use_4addr; + } - ret = qtnf_cmd_send_add_intf(vif, type, mac_addr); + ret = qtnf_cmd_send_add_intf(vif, type, use4addr, mac_addr); if (ret) { pr_err("VIF%u.%u: failed to add VIF %pM\n", mac->macid, vif->vifid, mac_addr); @@ -359,11 +352,6 @@ static int qtnf_set_wiphy_params(struct wiphy *wiphy, u32 changed) return -EFAULT; } - if (changed & (WIPHY_PARAM_RETRY_LONG | WIPHY_PARAM_RETRY_SHORT)) { - pr_err("MAC%u: can't modify retry params\n", mac->macid); - return -EOPNOTSUPP; - } - ret = qtnf_cmd_send_update_phy_params(mac, changed); if (ret) pr_err("MAC%u: failed to update PHY params\n", mac->macid); @@ -650,6 +638,12 @@ qtnf_connect(struct wiphy *wiphy, struct net_device *dev, if (vif->wdev.iftype != NL80211_IFTYPE_STATION) return -EOPNOTSUPP; + if (sme->auth_type == NL80211_AUTHTYPE_SAE && + !(sme->flags & CONNECT_REQ_EXTERNAL_AUTH_SUPPORT)) { + pr_err("can not offload authentication to userspace\n"); + return -EOPNOTSUPP; + } + if (sme->bssid) ether_addr_copy(vif->bssid, sme->bssid); else @@ -666,6 +660,30 @@ out: return ret; } +static int +qtnf_external_auth(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_external_auth_params *auth) +{ + struct qtnf_vif *vif = qtnf_netdev_get_priv(dev); + int ret; + + if (vif->wdev.iftype != NL80211_IFTYPE_STATION) + return -EOPNOTSUPP; + + if (!ether_addr_equal(vif->bssid, auth->bssid)) + pr_warn("unexpected bssid: %pM", auth->bssid); + + ret = qtnf_cmd_send_external_auth(vif, auth); + if (ret) { + pr_err("VIF%u.%u: failed to report external auth\n", + vif->mac->macid, vif->vifid); + goto out; + } + +out: + return ret; +} + static int qtnf_disconnect(struct wiphy *wiphy, struct net_device *dev, u16 reason_code) @@ -960,6 +978,7 @@ static struct cfg80211_ops qtn_cfg80211_ops = { .set_default_mgmt_key = qtnf_set_default_mgmt_key, .scan = qtnf_scan, .connect = qtnf_connect, + .external_auth = qtnf_external_auth, .disconnect = qtnf_disconnect, .dump_survey = qtnf_dump_survey, .get_channel = qtnf_get_channel, @@ -1107,7 +1126,8 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME | WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD | WIPHY_FLAG_AP_UAPSD | - WIPHY_FLAG_HAS_CHANNEL_SWITCH; + WIPHY_FLAG_HAS_CHANNEL_SWITCH | + WIPHY_FLAG_4ADDR_STATION; wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; if (hw_info->hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD) @@ -1138,6 +1158,9 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) if (!(hw_info->hw_capab & QLINK_HW_CAPAB_OBSS_SCAN)) wiphy->features |= NL80211_FEATURE_NEED_OBSS_SCAN; + if (hw_info->hw_capab & QLINK_HW_CAPAB_SAE) + wiphy->features |= NL80211_FEATURE_SAE; + #ifdef CONFIG_PM if (macinfo->wowlan) wiphy->wowlan = macinfo->wowlan; diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h index b73425122a10..c374857283ac 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h @@ -1,18 +1,5 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #ifndef _QTN_FMAC_CFG80211_H_ #define _QTN_FMAC_CFG80211_H_ diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c index 659e7649fe22..85a2a58f4c16 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.c +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c @@ -1,17 +1,5 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #include #include @@ -72,6 +60,8 @@ static int qtnf_cmd_resp_result_decode(enum qlink_cmd_result qcode) return -EADDRINUSE; case QLINK_CMD_RESULT_EADDRNOTAVAIL: return -EADDRNOTAVAIL; + case QLINK_CMD_RESULT_EBUSY: + return -EBUSY; default: return -EFAULT; } @@ -97,14 +87,12 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus, vif_id = cmd->vifid; cmd->mhdr.len = cpu_to_le16(cmd_skb->len); - pr_debug("VIF%u.%u cmd=0x%.4X\n", mac_id, vif_id, - le16_to_cpu(cmd->cmd_id)); + pr_debug("VIF%u.%u cmd=0x%.4X\n", mac_id, vif_id, cmd_id); if (bus->fw_state != QTNF_FW_STATE_ACTIVE && - le16_to_cpu(cmd->cmd_id) != QLINK_CMD_FW_INIT) { + cmd_id != QLINK_CMD_FW_INIT) { pr_warn("VIF%u.%u: drop cmd 0x%.4X in fw state %d\n", - mac_id, vif_id, le16_to_cpu(cmd->cmd_id), - bus->fw_state); + mac_id, vif_id, cmd_id, bus->fw_state); dev_kfree_skb(cmd_skb); return -ENODEV; } @@ -138,7 +126,7 @@ out: return qtnf_cmd_resp_result_decode(le16_to_cpu(resp->result)); pr_warn("VIF%u.%u: cmd 0x%.4X failed: %d\n", - mac_id, vif_id, le16_to_cpu(cmd->cmd_id), ret); + mac_id, vif_id, cmd_id, ret); return ret; } @@ -732,6 +720,7 @@ out: static int qtnf_cmd_send_add_change_intf(struct qtnf_vif *vif, enum nl80211_iftype iftype, + int use4addr, u8 *mac_addr, enum qlink_cmd_type cmd_type) { @@ -749,6 +738,7 @@ static int qtnf_cmd_send_add_change_intf(struct qtnf_vif *vif, qtnf_bus_lock(vif->mac->bus); cmd = (struct qlink_cmd_manage_intf *)cmd_skb->data; + cmd->intf_info.use4addr = use4addr; switch (iftype) { case NL80211_IFTYPE_AP: @@ -784,17 +774,19 @@ out: return ret; } -int qtnf_cmd_send_add_intf(struct qtnf_vif *vif, - enum nl80211_iftype iftype, u8 *mac_addr) +int qtnf_cmd_send_add_intf(struct qtnf_vif *vif, enum nl80211_iftype iftype, + int use4addr, u8 *mac_addr) { - return qtnf_cmd_send_add_change_intf(vif, iftype, mac_addr, + return qtnf_cmd_send_add_change_intf(vif, iftype, use4addr, mac_addr, QLINK_CMD_ADD_INTF); } int qtnf_cmd_send_change_intf_type(struct qtnf_vif *vif, - enum nl80211_iftype iftype, u8 *mac_addr) + enum nl80211_iftype iftype, + int use4addr, + u8 *mac_addr) { - return qtnf_cmd_send_add_change_intf(vif, iftype, mac_addr, + return qtnf_cmd_send_add_change_intf(vif, iftype, use4addr, mac_addr, QLINK_CMD_CHANGE_INTF); } @@ -914,9 +906,8 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus, if (WARN_ON(resp->n_reg_rules > NL80211_MAX_SUPP_REG_RULES)) return -E2BIG; - hwinfo->rd = kzalloc(sizeof(*hwinfo->rd) - + sizeof(struct ieee80211_reg_rule) - * resp->n_reg_rules, GFP_KERNEL); + hwinfo->rd = kzalloc(struct_size(hwinfo->rd, reg_rules, + resp->n_reg_rules), GFP_KERNEL); if (!hwinfo->rd) return -ENOMEM; @@ -1558,11 +1549,11 @@ static int qtnf_cmd_resp_proc_phy_params(struct qtnf_wmac *mac, switch (tlv_type) { case QTN_TLV_ID_FRAG_THRESH: phy_thr = (void *)tlv; - mac_info->frag_thr = (u32)le16_to_cpu(phy_thr->thr); + mac_info->frag_thr = le32_to_cpu(phy_thr->thr); break; case QTN_TLV_ID_RTS_THRESH: phy_thr = (void *)tlv; - mac_info->rts_thr = (u32)le16_to_cpu(phy_thr->thr); + mac_info->rts_thr = le32_to_cpu(phy_thr->thr); break; case QTN_TLV_ID_SRETRY_LIMIT: limit = (void *)tlv; @@ -1810,15 +1801,23 @@ int qtnf_cmd_send_update_phy_params(struct qtnf_wmac *mac, u32 changed) qtnf_bus_lock(mac->bus); if (changed & WIPHY_PARAM_FRAG_THRESHOLD) - qtnf_cmd_skb_put_tlv_u16(cmd_skb, QTN_TLV_ID_FRAG_THRESH, + qtnf_cmd_skb_put_tlv_u32(cmd_skb, QTN_TLV_ID_FRAG_THRESH, wiphy->frag_threshold); if (changed & WIPHY_PARAM_RTS_THRESHOLD) - qtnf_cmd_skb_put_tlv_u16(cmd_skb, QTN_TLV_ID_RTS_THRESH, + qtnf_cmd_skb_put_tlv_u32(cmd_skb, QTN_TLV_ID_RTS_THRESH, wiphy->rts_threshold); if (changed & WIPHY_PARAM_COVERAGE_CLASS) qtnf_cmd_skb_put_tlv_u8(cmd_skb, QTN_TLV_ID_COVERAGE_CLASS, wiphy->coverage_class); + if (changed & WIPHY_PARAM_RETRY_LONG) + qtnf_cmd_skb_put_tlv_u8(cmd_skb, QTN_TLV_ID_LRETRY_LIMIT, + wiphy->retry_long); + + if (changed & WIPHY_PARAM_RETRY_SHORT) + qtnf_cmd_skb_put_tlv_u8(cmd_skb, QTN_TLV_ID_SRETRY_LIMIT, + wiphy->retry_short); + ret = qtnf_cmd_send(mac->bus, cmd_skb); if (ret) goto out; @@ -2323,6 +2322,35 @@ out: return ret; } +int qtnf_cmd_send_external_auth(struct qtnf_vif *vif, + struct cfg80211_external_auth_params *auth) +{ + struct sk_buff *cmd_skb; + struct qlink_cmd_external_auth *cmd; + int ret; + + cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, + QLINK_CMD_EXTERNAL_AUTH, + sizeof(*cmd)); + if (!cmd_skb) + return -ENOMEM; + + cmd = (struct qlink_cmd_external_auth *)cmd_skb->data; + + ether_addr_copy(cmd->bssid, auth->bssid); + cmd->status = cpu_to_le16(auth->status); + + qtnf_bus_lock(vif->mac->bus); + ret = qtnf_cmd_send(vif->mac->bus, cmd_skb); + if (ret) + goto out; + +out: + qtnf_bus_unlock(vif->mac->bus); + + return ret; +} + int qtnf_cmd_send_disconnect(struct qtnf_vif *vif, u16 reason_code) { struct sk_buff *cmd_skb; diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.h b/drivers/net/wireless/quantenna/qtnfmac/commands.h index 1ac41156c192..64f0b9dc8a14 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.h +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.h @@ -1,17 +1,5 @@ -/* - * Copyright (c) 2016 Quantenna Communications, Inc. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2016 Quantenna Communications. All rights reserved. */ #ifndef QLINK_COMMANDS_H_ #define QLINK_COMMANDS_H_ @@ -26,9 +14,11 @@ void qtnf_cmd_send_deinit_fw(struct qtnf_bus *bus); int qtnf_cmd_get_hw_info(struct qtnf_bus *bus); int qtnf_cmd_get_mac_info(struct qtnf_wmac *mac); int qtnf_cmd_send_add_intf(struct qtnf_vif *vif, enum nl80211_iftype iftype, - u8 *mac_addr); + int use4addr, u8 *mac_addr); int qtnf_cmd_send_change_intf_type(struct qtnf_vif *vif, - enum nl80211_iftype iftype, u8 *mac_addr); + enum nl80211_iftype iftype, + int use4addr, + u8 *mac_addr); int qtnf_cmd_send_del_intf(struct qtnf_vif *vif); int qtnf_cmd_band_info_get(struct qtnf_wmac *mac, struct ieee80211_supported_band *band); @@ -61,6 +51,8 @@ int qtnf_cmd_send_del_sta(struct qtnf_vif *vif, int qtnf_cmd_send_scan(struct qtnf_wmac *mac); int qtnf_cmd_send_connect(struct qtnf_vif *vif, struct cfg80211_connect_params *sme); +int qtnf_cmd_send_external_auth(struct qtnf_vif *vif, + struct cfg80211_external_auth_params *auth); int qtnf_cmd_send_disconnect(struct qtnf_vif *vif, u16 reason_code); int qtnf_cmd_send_updown_intf(struct qtnf_vif *vif, diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c index 5d18a4a917c9..ee1b75fda1dd 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.c +++ b/drivers/net/wireless/quantenna/qtnfmac/core.c @@ -1,18 +1,5 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #include #include @@ -195,6 +182,7 @@ static int qtnf_netdev_set_mac_address(struct net_device *ndev, void *addr) qtnf_scan_done(vif->mac, true); ret = qtnf_cmd_send_change_intf_type(vif, vif->wdev.iftype, + vif->wdev.use_4addr, sa->sa_data); if (ret) @@ -545,7 +533,8 @@ static int qtnf_core_mac_attach(struct qtnf_bus *bus, unsigned int macid) goto error; } - ret = qtnf_cmd_send_add_intf(vif, vif->wdev.iftype, vif->mac_addr); + ret = qtnf_cmd_send_add_intf(vif, vif->wdev.iftype, + vif->wdev.use_4addr, vif->mac_addr); if (ret) { pr_err("MAC%u: failed to add VIF\n", macid); goto error; diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h index 293055049caa..a31cff46e964 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.h +++ b/drivers/net/wireless/quantenna/qtnfmac/core.h @@ -1,18 +1,5 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #ifndef _QTN_FMAC_CORE_H_ #define _QTN_FMAC_CORE_H_ diff --git a/drivers/net/wireless/quantenna/qtnfmac/debug.c b/drivers/net/wireless/quantenna/qtnfmac/debug.c index 9f826b9ef5d9..598ece753a4b 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/debug.c +++ b/drivers/net/wireless/quantenna/qtnfmac/debug.c @@ -1,32 +1,11 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #include "debug.h" -#undef pr_fmt -#define pr_fmt(fmt) "qtnfmac dbg: %s: " fmt, __func__ - void qtnf_debugfs_init(struct qtnf_bus *bus, const char *name) { bus->dbg_dir = debugfs_create_dir(name, NULL); - - if (IS_ERR_OR_NULL(bus->dbg_dir)) { - pr_warn("failed to create debugfs root dir\n"); - bus->dbg_dir = NULL; - } } void qtnf_debugfs_remove(struct qtnf_bus *bus) @@ -38,9 +17,5 @@ void qtnf_debugfs_remove(struct qtnf_bus *bus) void qtnf_debugfs_add_entry(struct qtnf_bus *bus, const char *name, int (*fn)(struct seq_file *seq, void *data)) { - struct dentry *entry; - - entry = debugfs_create_devm_seqfile(bus->dev, name, bus->dbg_dir, fn); - if (IS_ERR_OR_NULL(entry)) - pr_warn("failed to add entry (%s)\n", name); + debugfs_create_devm_seqfile(bus->dev, name, bus->dbg_dir, fn); } diff --git a/drivers/net/wireless/quantenna/qtnfmac/debug.h b/drivers/net/wireless/quantenna/qtnfmac/debug.h index d6dd12b5d434..61b45536b83a 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/debug.h +++ b/drivers/net/wireless/quantenna/qtnfmac/debug.h @@ -1,18 +1,5 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #ifndef _QTN_FMAC_DEBUG_H_ #define _QTN_FMAC_DEBUG_H_ diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c index 8b542b431b75..6c1b886339ac 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/event.c +++ b/drivers/net/wireless/quantenna/qtnfmac/event.c @@ -1,18 +1,5 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #include #include @@ -158,6 +145,19 @@ qtnf_event_handle_bss_join(struct qtnf_vif *vif, const struct qlink_event_bss_join *join_info, u16 len) { + struct wiphy *wiphy = priv_to_wiphy(vif->mac); + enum ieee80211_statuscode status = le16_to_cpu(join_info->status); + struct cfg80211_chan_def chandef; + struct cfg80211_bss *bss = NULL; + u8 *ie = NULL; + size_t payload_len; + u16 tlv_type; + u16 tlv_value_len; + size_t tlv_full_len; + const struct qlink_tlv_hdr *tlv; + const u8 *rsp_ies = NULL; + size_t rsp_ies_len = 0; + if (unlikely(len < sizeof(*join_info))) { pr_err("VIF%u.%u: payload is too short (%u < %zu)\n", vif->mac->macid, vif->vifid, len, @@ -171,15 +171,131 @@ qtnf_event_handle_bss_join(struct qtnf_vif *vif, return -EPROTO; } - pr_debug("VIF%u.%u: BSSID:%pM\n", vif->mac->macid, vif->vifid, - join_info->bssid); + pr_debug("VIF%u.%u: BSSID:%pM status:%u\n", + vif->mac->macid, vif->vifid, join_info->bssid, status); + + if (status != WLAN_STATUS_SUCCESS) + goto done; + + qlink_chandef_q2cfg(wiphy, &join_info->chan, &chandef); + if (!cfg80211_chandef_valid(&chandef)) { + pr_warn("MAC%u.%u: bad channel freq=%u cf1=%u cf2=%u bw=%u\n", + vif->mac->macid, vif->vifid, + chandef.chan->center_freq, + chandef.center_freq1, + chandef.center_freq2, + chandef.width); + status = WLAN_STATUS_UNSPECIFIED_FAILURE; + goto done; + } + + bss = cfg80211_get_bss(wiphy, chandef.chan, join_info->bssid, + NULL, 0, IEEE80211_BSS_TYPE_ESS, + IEEE80211_PRIVACY_ANY); + if (!bss) { + pr_warn("VIF%u.%u: add missing BSS:%pM chan:%u\n", + vif->mac->macid, vif->vifid, + join_info->bssid, chandef.chan->hw_value); + + if (!vif->wdev.ssid_len) { + pr_warn("VIF%u.%u: SSID unknown for BSS:%pM\n", + vif->mac->macid, vif->vifid, + join_info->bssid); + status = WLAN_STATUS_UNSPECIFIED_FAILURE; + goto done; + } + + ie = kzalloc(2 + vif->wdev.ssid_len, GFP_KERNEL); + if (!ie) { + pr_warn("VIF%u.%u: IE alloc failed for BSS:%pM\n", + vif->mac->macid, vif->vifid, + join_info->bssid); + status = WLAN_STATUS_UNSPECIFIED_FAILURE; + goto done; + } + + ie[0] = WLAN_EID_SSID; + ie[1] = vif->wdev.ssid_len; + memcpy(ie + 2, vif->wdev.ssid, vif->wdev.ssid_len); + + bss = cfg80211_inform_bss(wiphy, chandef.chan, + CFG80211_BSS_FTYPE_UNKNOWN, + join_info->bssid, 0, + WLAN_CAPABILITY_ESS, 100, + ie, 2 + vif->wdev.ssid_len, + 0, GFP_KERNEL); + if (!bss) { + pr_warn("VIF%u.%u: can't connect to unknown BSS: %pM\n", + vif->mac->macid, vif->vifid, + join_info->bssid); + status = WLAN_STATUS_UNSPECIFIED_FAILURE; + goto done; + } + } + + payload_len = len - sizeof(*join_info); + tlv = (struct qlink_tlv_hdr *)join_info->ies; + + while (payload_len >= sizeof(struct qlink_tlv_hdr)) { + tlv_type = le16_to_cpu(tlv->type); + tlv_value_len = le16_to_cpu(tlv->len); + tlv_full_len = tlv_value_len + sizeof(struct qlink_tlv_hdr); + + if (payload_len < tlv_full_len) { + pr_warn("invalid %u TLV\n", tlv_type); + status = WLAN_STATUS_UNSPECIFIED_FAILURE; + goto done; + } + + if (tlv_type == QTN_TLV_ID_IE_SET) { + const struct qlink_tlv_ie_set *ie_set; + unsigned int ie_len; + + if (payload_len < sizeof(*ie_set)) { + pr_warn("invalid IE_SET TLV\n"); + status = WLAN_STATUS_UNSPECIFIED_FAILURE; + goto done; + } + + ie_set = (const struct qlink_tlv_ie_set *)tlv; + ie_len = tlv_value_len - + (sizeof(*ie_set) - sizeof(ie_set->hdr)); + + switch (ie_set->type) { + case QLINK_IE_SET_ASSOC_RESP: + if (ie_len) { + rsp_ies = ie_set->ie_data; + rsp_ies_len = ie_len; + } + break; + default: + pr_warn("unexpected IE type: %u\n", + ie_set->type); + break; + } + } - cfg80211_connect_result(vif->netdev, join_info->bssid, NULL, 0, NULL, - 0, le16_to_cpu(join_info->status), GFP_KERNEL); + payload_len -= tlv_full_len; + tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len); + } - if (le16_to_cpu(join_info->status) == WLAN_STATUS_SUCCESS) + if (payload_len) + pr_warn("VIF%u.%u: unexpected remaining payload: %zu\n", + vif->mac->macid, vif->vifid, payload_len); + +done: + cfg80211_connect_result(vif->netdev, join_info->bssid, NULL, 0, rsp_ies, + rsp_ies_len, status, GFP_KERNEL); + if (bss) { + if (!ether_addr_equal(vif->bssid, join_info->bssid)) + ether_addr_copy(vif->bssid, join_info->bssid); + cfg80211_put_bss(wiphy, bss); + } + + if (status == WLAN_STATUS_SUCCESS) netif_carrier_on(vif->netdev); + kfree(ie); return 0; } @@ -458,6 +574,43 @@ static int qtnf_event_handle_radar(struct qtnf_vif *vif, return 0; } +static int +qtnf_event_handle_external_auth(struct qtnf_vif *vif, + const struct qlink_event_external_auth *ev, + u16 len) +{ + struct cfg80211_external_auth_params auth = {0}; + struct wiphy *wiphy = priv_to_wiphy(vif->mac); + int ret; + + if (len < sizeof(*ev)) { + pr_err("MAC%u: payload is too short\n", vif->mac->macid); + return -EINVAL; + } + + if (!wiphy->registered || !vif->netdev) + return 0; + + if (ev->ssid_len) { + memcpy(auth.ssid.ssid, ev->ssid, ev->ssid_len); + auth.ssid.ssid_len = ev->ssid_len; + } + + auth.key_mgmt_suite = le32_to_cpu(ev->akm_suite); + ether_addr_copy(auth.bssid, ev->bssid); + auth.action = ev->action; + + pr_info("%s: external auth bss=%pM action=%u akm=%u\n", + vif->netdev->name, auth.bssid, auth.action, + auth.key_mgmt_suite); + + ret = cfg80211_external_auth_request(vif->netdev, &auth, GFP_KERNEL); + if (ret) + pr_warn("failed to offload external auth request\n"); + + return ret; +} + static int qtnf_event_parse(struct qtnf_wmac *mac, const struct sk_buff *event_skb) { @@ -516,6 +669,10 @@ static int qtnf_event_parse(struct qtnf_wmac *mac, ret = qtnf_event_handle_radar(vif, (const void *)event, event_len); break; + case QLINK_EVENT_EXTERNAL_AUTH: + ret = qtnf_event_handle_external_auth(vif, (const void *)event, + event_len); + break; default: pr_warn("unknown event type: %x\n", event_id); break; diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.h b/drivers/net/wireless/quantenna/qtnfmac/event.h index ae759b602c2a..533ad99d045d 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/event.h +++ b/drivers/net/wireless/quantenna/qtnfmac/event.h @@ -1,18 +1,5 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #ifndef _QTN_FMAC_EVENT_H_ #define _QTN_FMAC_EVENT_H_ diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c index 598edb814421..cbcda57105f3 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c @@ -559,6 +559,9 @@ static irqreturn_t qtnf_pcie_topaz_interrupt(int irq, void *data) if (!priv->msi_enabled && !qtnf_topaz_intx_asserted(ts)) return IRQ_NONE; + if (!priv->msi_enabled) + qtnf_deassert_intx(ts); + priv->pcie_irq_count++; qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_in); @@ -571,9 +574,6 @@ static irqreturn_t qtnf_pcie_topaz_interrupt(int irq, void *data) tasklet_hi_schedule(&priv->reclaim_tq); - if (!priv->msi_enabled) - qtnf_deassert_intx(ts); - return IRQ_HANDLED; } diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h index 8d62addea895..7798edcf7980 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h @@ -1,25 +1,12 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #ifndef _QTN_QLINK_H_ #define _QTN_QLINK_H_ #include -#define QLINK_PROTO_VER 11 +#define QLINK_PROTO_VER 13 #define QLINK_MACID_RSVD 0xFF #define QLINK_VIFID_RSVD 0xFF @@ -81,6 +68,7 @@ enum qlink_hw_capab { QLINK_HW_CAPAB_PWR_MGMT = BIT(4), QLINK_HW_CAPAB_OBSS_SCAN = BIT(5), QLINK_HW_CAPAB_SCAN_DWELL = BIT(6), + QLINK_HW_CAPAB_SAE = BIT(8), }; enum qlink_iface_type { @@ -105,7 +93,8 @@ struct qlink_intf_info { __le16 if_type; __le16 vlanid; u8 mac_addr[ETH_ALEN]; - u8 rsvd[2]; + u8 use4addr; + u8 rsvd[1]; } __packed; enum qlink_sta_flags { @@ -262,6 +251,7 @@ enum qlink_cmd_type { QLINK_CMD_DISCONNECT = 0x0061, QLINK_CMD_PM_SET = 0x0062, QLINK_CMD_WOWLAN_SET = 0x0063, + QLINK_CMD_EXTERNAL_AUTH = 0x0066, }; /** @@ -492,6 +482,20 @@ struct qlink_cmd_connect { u8 payload[0]; } __packed; +/** + * struct qlink_cmd_external_auth - data for QLINK_CMD_EXTERNAL_AUTH command + * + * @bssid: BSSID of the BSS to connect to + * @status: authentication status code + * @payload: variable portion of connection request. + */ +struct qlink_cmd_external_auth { + struct qlink_cmd chdr; + u8 bssid[ETH_ALEN]; + __le16 status; + u8 payload[0]; +} __packed; + /** * struct qlink_cmd_disconnect - data for QLINK_CMD_DISCONNECT command * @@ -733,6 +737,7 @@ enum qlink_cmd_result { QLINK_CMD_RESULT_EALREADY, QLINK_CMD_RESULT_EADDRINUSE, QLINK_CMD_RESULT_EADDRNOTAVAIL, + QLINK_CMD_RESULT_EBUSY, }; /** @@ -936,6 +941,7 @@ enum qlink_event_type { QLINK_EVENT_BSS_LEAVE = 0x0027, QLINK_EVENT_FREQ_CHANGE = 0x0028, QLINK_EVENT_RADAR = 0x0029, + QLINK_EVENT_EXTERNAL_AUTH = 0x0030, }; /** @@ -986,13 +992,16 @@ struct qlink_event_sta_deauth { /** * struct qlink_event_bss_join - data for QLINK_EVENT_BSS_JOIN event * + * @chan: new operating channel definition * @bssid: BSSID of a BSS which interface tried to joined. * @status: status of joining attempt, see &enum ieee80211_statuscode. */ struct qlink_event_bss_join { struct qlink_event ehdr; + struct qlink_chandef chan; u8 bssid[ETH_ALEN]; __le16 status; + u8 ies[0]; } __packed; /** @@ -1108,6 +1117,24 @@ struct qlink_event_radar { u8 rsvd[3]; } __packed; +/** + * struct qlink_event_external_auth - data for QLINK_EVENT_EXTERNAL_AUTH event + * + * @ssid: SSID announced by BSS + * @ssid_len: SSID length + * @bssid: BSSID of the BSS to connect to + * @akm_suite: AKM suite for external authentication + * @action: action type/trigger for external authentication + */ +struct qlink_event_external_auth { + struct qlink_event ehdr; + u8 ssid[IEEE80211_MAX_SSID_LEN]; + u8 ssid_len; + u8 bssid[ETH_ALEN]; + __le32 akm_suite; + u8 action; +} __packed; + /* QLINK TLVs (Type-Length Values) definitions */ @@ -1182,7 +1209,7 @@ struct qlink_iface_limit_record { struct qlink_tlv_frag_rts_thr { struct qlink_tlv_hdr hdr; - __le16 thr; + __le32 thr; } __packed; struct qlink_tlv_rlimit { diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c index aeeda81b09ea..72bfd17cb687 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c @@ -1,17 +1,5 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #include diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h index 960d5d97492f..781ea7fe79f2 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h @@ -1,18 +1,5 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #ifndef _QTN_FMAC_QLINK_UTIL_H_ #define _QTN_FMAC_QLINK_UTIL_H_ @@ -69,6 +56,17 @@ static inline void qtnf_cmd_skb_put_tlv_u16(struct sk_buff *skb, memcpy(hdr->val, &tmp, sizeof(tmp)); } +static inline void qtnf_cmd_skb_put_tlv_u32(struct sk_buff *skb, + u16 tlv_id, u32 value) +{ + struct qlink_tlv_hdr *hdr = skb_put(skb, sizeof(*hdr) + sizeof(value)); + __le32 tmp = cpu_to_le32(value); + + hdr->type = cpu_to_le16(tlv_id); + hdr->len = cpu_to_le16(sizeof(value)); + memcpy(hdr->val, &tmp, sizeof(tmp)); +} + u16 qlink_iface_type_to_nl_mask(u16 qlink_type); u8 qlink_chan_width_mask_to_nl(u16 qlink_mask); void qlink_chandef_q2cfg(struct wiphy *wiphy, diff --git a/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h b/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h index 40295a511224..82d879950b62 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h @@ -1,18 +1,5 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #ifndef _QTN_HW_IDS_H_ #define _QTN_HW_IDS_H_ diff --git a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c index 2ec334199c2b..ff678951d3b2 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c +++ b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c @@ -1,18 +1,5 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #include #include diff --git a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h index c2a3702a9ee7..52cac5439b03 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h +++ b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h @@ -1,18 +1,5 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #ifndef _QTN_FMAC_SHM_IPC_H_ #define _QTN_FMAC_SHM_IPC_H_ diff --git a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc_defs.h b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc_defs.h index 95a5f89a8b1a..78be70df1218 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/shm_ipc_defs.h +++ b/drivers/net/wireless/quantenna/qtnfmac/shm_ipc_defs.h @@ -1,18 +1,5 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #ifndef _QTN_FMAC_SHM_IPC_DEFS_H_ #define _QTN_FMAC_SHM_IPC_DEFS_H_ diff --git a/drivers/net/wireless/quantenna/qtnfmac/trans.c b/drivers/net/wireless/quantenna/qtnfmac/trans.c index 345f34ec9750..95356e280e23 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/trans.c +++ b/drivers/net/wireless/quantenna/qtnfmac/trans.c @@ -1,18 +1,5 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #include #include diff --git a/drivers/net/wireless/quantenna/qtnfmac/trans.h b/drivers/net/wireless/quantenna/qtnfmac/trans.h index 9a473e07af0f..c0b76f871b31 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/trans.h +++ b/drivers/net/wireless/quantenna/qtnfmac/trans.h @@ -1,18 +1,5 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #ifndef _QTN_FMAC_TRANS_H_ #define _QTN_FMAC_TRANS_H_ diff --git a/drivers/net/wireless/quantenna/qtnfmac/util.c b/drivers/net/wireless/quantenna/qtnfmac/util.c index 3bc96b264769..cda6f5f3f38a 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/util.c +++ b/drivers/net/wireless/quantenna/qtnfmac/util.c @@ -1,18 +1,5 @@ -/* - * Copyright (c) 2015-2016 Quantenna Communications, Inc. - * All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #include "util.h" #include "qtn_hw_ids.h" diff --git a/drivers/net/wireless/quantenna/qtnfmac/util.h b/drivers/net/wireless/quantenna/qtnfmac/util.h index b8744baac332..a14b7078a9c7 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/util.h +++ b/drivers/net/wireless/quantenna/qtnfmac/util.h @@ -1,18 +1,5 @@ -/* - * Copyright (c) 2015 Quantenna Communications - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2015 Quantenna Communications. All rights reserved. */ #ifndef QTNFMAC_UTIL_H #define QTNFMAC_UTIL_H diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index 0e95555aec62..a03b5284a050 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -2966,6 +2966,7 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev, struct channel_info *info) { u8 rfcsr; + int idx = rf->channel-1; rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1); rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3); @@ -3003,60 +3004,56 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev, rt2800_freq_cal_mode1(rt2x00dev); - if (rf->channel <= 14) { - int idx = rf->channel-1; - - if (rt2x00_has_cap_bt_coexist(rt2x00dev)) { - if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) { - /* r55/r59 value array of channel 1~14 */ - static const char r55_bt_rev[] = {0x83, 0x83, - 0x83, 0x73, 0x73, 0x63, 0x53, 0x53, - 0x53, 0x43, 0x43, 0x43, 0x43, 0x43}; - static const char r59_bt_rev[] = {0x0e, 0x0e, - 0x0e, 0x0e, 0x0e, 0x0b, 0x0a, 0x09, - 0x07, 0x07, 0x07, 0x07, 0x07, 0x07}; - - rt2800_rfcsr_write(rt2x00dev, 55, - r55_bt_rev[idx]); - rt2800_rfcsr_write(rt2x00dev, 59, - r59_bt_rev[idx]); - } else { - static const char r59_bt[] = {0x8b, 0x8b, 0x8b, - 0x8b, 0x8b, 0x8b, 0x8b, 0x8a, 0x89, - 0x88, 0x88, 0x86, 0x85, 0x84}; - - rt2800_rfcsr_write(rt2x00dev, 59, r59_bt[idx]); - } + if (rt2x00_has_cap_bt_coexist(rt2x00dev)) { + if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) { + /* r55/r59 value array of channel 1~14 */ + static const char r55_bt_rev[] = {0x83, 0x83, + 0x83, 0x73, 0x73, 0x63, 0x53, 0x53, + 0x53, 0x43, 0x43, 0x43, 0x43, 0x43}; + static const char r59_bt_rev[] = {0x0e, 0x0e, + 0x0e, 0x0e, 0x0e, 0x0b, 0x0a, 0x09, + 0x07, 0x07, 0x07, 0x07, 0x07, 0x07}; + + rt2800_rfcsr_write(rt2x00dev, 55, + r55_bt_rev[idx]); + rt2800_rfcsr_write(rt2x00dev, 59, + r59_bt_rev[idx]); } else { - if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) { - static const char r55_nonbt_rev[] = {0x23, 0x23, - 0x23, 0x23, 0x13, 0x13, 0x03, 0x03, - 0x03, 0x03, 0x03, 0x03, 0x03, 0x03}; - static const char r59_nonbt_rev[] = {0x07, 0x07, - 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, - 0x07, 0x07, 0x06, 0x05, 0x04, 0x04}; - - rt2800_rfcsr_write(rt2x00dev, 55, - r55_nonbt_rev[idx]); - rt2800_rfcsr_write(rt2x00dev, 59, - r59_nonbt_rev[idx]); - } else if (rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392) || - rt2x00_rt(rt2x00dev, RT6352)) { - static const char r59_non_bt[] = {0x8f, 0x8f, - 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8d, - 0x8a, 0x88, 0x88, 0x87, 0x87, 0x86}; - - rt2800_rfcsr_write(rt2x00dev, 59, - r59_non_bt[idx]); - } else if (rt2x00_rt(rt2x00dev, RT5350)) { - static const char r59_non_bt[] = {0x0b, 0x0b, - 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0a, - 0x0a, 0x09, 0x08, 0x07, 0x07, 0x06}; - - rt2800_rfcsr_write(rt2x00dev, 59, - r59_non_bt[idx]); - } + static const char r59_bt[] = {0x8b, 0x8b, 0x8b, + 0x8b, 0x8b, 0x8b, 0x8b, 0x8a, 0x89, + 0x88, 0x88, 0x86, 0x85, 0x84}; + + rt2800_rfcsr_write(rt2x00dev, 59, r59_bt[idx]); + } + } else { + if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) { + static const char r55_nonbt_rev[] = {0x23, 0x23, + 0x23, 0x23, 0x13, 0x13, 0x03, 0x03, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03}; + static const char r59_nonbt_rev[] = {0x07, 0x07, + 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, + 0x07, 0x07, 0x06, 0x05, 0x04, 0x04}; + + rt2800_rfcsr_write(rt2x00dev, 55, + r55_nonbt_rev[idx]); + rt2800_rfcsr_write(rt2x00dev, 59, + r59_nonbt_rev[idx]); + } else if (rt2x00_rt(rt2x00dev, RT5390) || + rt2x00_rt(rt2x00dev, RT5392) || + rt2x00_rt(rt2x00dev, RT6352)) { + static const char r59_non_bt[] = {0x8f, 0x8f, + 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8d, + 0x8a, 0x88, 0x88, 0x87, 0x87, 0x86}; + + rt2800_rfcsr_write(rt2x00dev, 59, + r59_non_bt[idx]); + } else if (rt2x00_rt(rt2x00dev, RT5350)) { + static const char r59_non_bt[] = {0x0b, 0x0b, + 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0a, + 0x0a, 0x09, 0x08, 0x07, 0x07, 0x06}; + + rt2800_rfcsr_write(rt2x00dev, 59, + r59_non_bt[idx]); } } } @@ -3861,10 +3858,12 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, if (rt2x00_rt(rt2x00dev, RT3572)) rt2800_rfcsr_write(rt2x00dev, 8, 0); - if (rt2x00_rt(rt2x00dev, RT6352)) + if (rt2x00_rt(rt2x00dev, RT6352)) { tx_pin = rt2800_register_read(rt2x00dev, TX_PIN_CFG); - else + rt2x00_set_field32(&tx_pin, TX_PIN_CFG_RFRX_EN, 1); + } else { tx_pin = 0; + } switch (rt2x00dev->default_ant.tx_chain_num) { case 3: @@ -3896,24 +3895,29 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, switch (rt2x00dev->default_ant.rx_chain_num) { case 3: /* Turn on tertiary LNAs */ - rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A2_EN, 1); - rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G2_EN, 1); + rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A2_EN, + rf->channel > 14); + rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G2_EN, + rf->channel <= 14); /* fall-through */ case 2: /* Turn on secondary LNAs */ - rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1); - rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1); + rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, + rf->channel > 14); + rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, + rf->channel <= 14); /* fall-through */ case 1: /* Turn on primary LNAs */ - rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, 1); - rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, 1); + rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, + rf->channel > 14); + rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, + rf->channel <= 14); break; } rt2x00_set_field32(&tx_pin, TX_PIN_CFG_RFTR_EN, 1); rt2x00_set_field32(&tx_pin, TX_PIN_CFG_TRSW_EN, 1); - rt2x00_set_field32(&tx_pin, TX_PIN_CFG_RFRX_EN, 1); /* mt7620 */ rt2800_register_write(rt2x00dev, TX_PIN_CFG, tx_pin); @@ -3985,13 +3989,12 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, rt2800_bbp_write(rt2x00dev, 195, 141); rt2800_bbp_write(rt2x00dev, 196, reg); - /* AGC init */ - if (rt2x00_rt(rt2x00dev, RT6352)) - reg = 0x04; - else - reg = rf->channel <= 14 ? 0x1c : 0x24; - - reg += 2 * rt2x00dev->lna_gain; + /* AGC init. + * Despite the vendor driver using different values here for + * RT6352 chip, we use 0x1c for now. This may have to be changed + * once TSSI got implemented. + */ + reg = (rf->channel <= 14 ? 0x1c : 0x24) + 2*rt2x00dev->lna_gain; rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, reg); rt2800_iq_calibrate(rt2x00dev, rf->channel); @@ -5477,7 +5480,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000); rt2800_register_write(rt2x00dev, MIMO_PS_CFG, 0x00000002); rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0x00150F0F); - rt2800_register_write(rt2x00dev, TX_ALC_VGA3, 0x06060606); + rt2800_register_write(rt2x00dev, TX_ALC_VGA3, 0x00000000); rt2800_register_write(rt2x00dev, TX0_BB_GAIN_ATTEN, 0x0); rt2800_register_write(rt2x00dev, TX1_BB_GAIN_ATTEN, 0x0); rt2800_register_write(rt2x00dev, TX0_RF_GAIN_ATTEN, 0x6C6C666C); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c index 61ba573e8bf1..05a2e8da412c 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c @@ -656,36 +656,24 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev) intf->driver_folder = debugfs_create_dir(intf->rt2x00dev->ops->name, rt2x00dev->hw->wiphy->debugfsdir); - if (IS_ERR(intf->driver_folder) || !intf->driver_folder) - goto exit; intf->driver_entry = rt2x00debug_create_file_driver("driver", intf, &intf->driver_blob); - if (IS_ERR(intf->driver_entry) || !intf->driver_entry) - goto exit; intf->chipset_entry = rt2x00debug_create_file_chipset("chipset", intf, &intf->chipset_blob); - if (IS_ERR(intf->chipset_entry) || !intf->chipset_entry) - goto exit; intf->dev_flags = debugfs_create_file("dev_flags", 0400, intf->driver_folder, intf, &rt2x00debug_fop_dev_flags); - if (IS_ERR(intf->dev_flags) || !intf->dev_flags) - goto exit; intf->cap_flags = debugfs_create_file("cap_flags", 0400, intf->driver_folder, intf, &rt2x00debug_fop_cap_flags); - if (IS_ERR(intf->cap_flags) || !intf->cap_flags) - goto exit; intf->register_folder = debugfs_create_dir("register", intf->driver_folder); - if (IS_ERR(intf->register_folder) || !intf->register_folder) - goto exit; #define RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(__intf, __name) \ ({ \ @@ -695,9 +683,6 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev) 0600, \ (__intf)->register_folder, \ &(__intf)->offset_##__name); \ - if (IS_ERR((__intf)->__name##_off_entry) || \ - !(__intf)->__name##_off_entry) \ - goto exit; \ \ (__intf)->__name##_val_entry = \ debugfs_create_file(__stringify(__name) "_value", \ @@ -705,9 +690,6 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev) (__intf)->register_folder, \ (__intf), \ &rt2x00debug_fop_##__name); \ - if (IS_ERR((__intf)->__name##_val_entry) || \ - !(__intf)->__name##_val_entry) \ - goto exit; \ } \ }) @@ -721,15 +703,10 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev) intf->queue_folder = debugfs_create_dir("queue", intf->driver_folder); - if (IS_ERR(intf->queue_folder) || !intf->queue_folder) - goto exit; intf->queue_frame_dump_entry = debugfs_create_file("dump", 0400, intf->queue_folder, intf, &rt2x00debug_fop_queue_dump); - if (IS_ERR(intf->queue_frame_dump_entry) - || !intf->queue_frame_dump_entry) - goto exit; skb_queue_head_init(&intf->frame_dump_skbqueue); init_waitqueue_head(&intf->frame_dump_waitqueue); @@ -747,10 +724,6 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev) #endif return; - -exit: - rt2x00debug_deregister(rt2x00dev); - rt2x00_err(rt2x00dev, "Failed to register debug handler\n"); } void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev) diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c index 4c5de8fc8f12..52b9fc480f8b 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c @@ -321,97 +321,12 @@ static int rt61pci_config_shared_key(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_crypto *crypto, struct ieee80211_key_conf *key) { - struct hw_key_entry key_entry; - struct rt2x00_field32 field; - u32 mask; - u32 reg; - - if (crypto->cmd == SET_KEY) { - /* - * rt2x00lib can't determine the correct free - * key_idx for shared keys. We have 1 register - * with key valid bits. The goal is simple, read - * the register, if that is full we have no slots - * left. - * Note that each BSS is allowed to have up to 4 - * shared keys, so put a mask over the allowed - * entries. - */ - mask = (0xf << crypto->bssidx); - - reg = rt2x00mmio_register_read(rt2x00dev, SEC_CSR0); - reg &= mask; - - if (reg && reg == mask) - return -ENOSPC; - - key->hw_key_idx += reg ? ffz(reg) : 0; - - /* - * Upload key to hardware - */ - memcpy(key_entry.key, crypto->key, - sizeof(key_entry.key)); - memcpy(key_entry.tx_mic, crypto->tx_mic, - sizeof(key_entry.tx_mic)); - memcpy(key_entry.rx_mic, crypto->rx_mic, - sizeof(key_entry.rx_mic)); - - reg = SHARED_KEY_ENTRY(key->hw_key_idx); - rt2x00mmio_register_multiwrite(rt2x00dev, reg, - &key_entry, sizeof(key_entry)); - - /* - * The cipher types are stored over 2 registers. - * bssidx 0 and 1 keys are stored in SEC_CSR1 and - * bssidx 1 and 2 keys are stored in SEC_CSR5. - * Using the correct defines correctly will cause overhead, - * so just calculate the correct offset. - */ - if (key->hw_key_idx < 8) { - field.bit_offset = (3 * key->hw_key_idx); - field.bit_mask = 0x7 << field.bit_offset; - - reg = rt2x00mmio_register_read(rt2x00dev, SEC_CSR1); - rt2x00_set_field32(®, field, crypto->cipher); - rt2x00mmio_register_write(rt2x00dev, SEC_CSR1, reg); - } else { - field.bit_offset = (3 * (key->hw_key_idx - 8)); - field.bit_mask = 0x7 << field.bit_offset; - - reg = rt2x00mmio_register_read(rt2x00dev, SEC_CSR5); - rt2x00_set_field32(®, field, crypto->cipher); - rt2x00mmio_register_write(rt2x00dev, SEC_CSR5, reg); - } - - /* - * The driver does not support the IV/EIV generation - * in hardware. However it doesn't support the IV/EIV - * inside the ieee80211 frame either, but requires it - * to be provided separately for the descriptor. - * rt2x00lib will cut the IV/EIV data out of all frames - * given to us by mac80211, but we must tell mac80211 - * to generate the IV/EIV data. - */ - key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; - } - /* - * SEC_CSR0 contains only single-bit fields to indicate - * a particular key is valid. Because using the FIELD32() - * defines directly will cause a lot of overhead, we use - * a calculation to determine the correct bit directly. + * Let the software handle the shared keys, + * since the hardware decryption does not work reliably, + * because the firmware does not know the key's keyidx. */ - mask = 1 << key->hw_key_idx; - - reg = rt2x00mmio_register_read(rt2x00dev, SEC_CSR0); - if (crypto->cmd == SET_KEY) - reg |= mask; - else if (crypto->cmd == DISABLE_KEY) - reg &= ~mask; - rt2x00mmio_register_write(rt2x00dev, SEC_CSR0, reg); - - return 0; + return -EOPNOTSUPP; } static int rt61pci_config_pairwise_key(struct rt2x00_dev *rt2x00dev, diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c index 33ad87528d9a..44a943d18b84 100644 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c @@ -959,7 +959,7 @@ static int translate_frame(ray_dev_t *local, struct tx_msg __iomem *ptx, if (proto == htons(ETH_P_AARP) || proto == htons(ETH_P_IPX)) { /* This is the selective translation table, only 2 entries */ writeb(0xf8, - &((struct snaphdr_t __iomem *)ptx->var)->org[3]); + &((struct snaphdr_t __iomem *)ptx->var)->org[2]); } /* Copy body of ethernet packet without ethernet header */ memcpy_toio((void __iomem *)&ptx->var + @@ -2211,7 +2211,7 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs, untranslate(local, skb, total_len); } } else { /* sniffer mode, so just pass whole packet */ - }; + } /************************/ /* Now pick up the rest of the fragments if any */ diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile b/drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile index 2966681efaef..5d6b06d3c02c 100644 --- a/drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile +++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile @@ -2,4 +2,4 @@ rtl818x_pci-objs := dev.o rtl8225.o sa2400.o max2820.o grf5101.o rtl8225se.o obj-$(CONFIG_RTL8180) += rtl818x_pci.o -ccflags-y += -Idrivers/net/wireless/realtek/rtl818x +ccflags-y += -I $(srctree)/$(src)/.. diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c index 225c1c8851cc..d5f65372356b 100644 --- a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c @@ -803,7 +803,7 @@ static void rtl8180_config_cardbus(struct ieee80211_hw *dev) rtl818x_iowrite16(priv, FEMR_SE, 0xffff); } else { reg16 = rtl818x_ioread16(priv, &priv->map->FEMR); - reg16 |= (1 << 15) | (1 << 14) | (1 << 4); + reg16 |= (1 << 15) | (1 << 14) | (1 << 4); rtl818x_iowrite16(priv, &priv->map->FEMR, reg16); } @@ -1723,8 +1723,8 @@ static int rtl8180_probe(struct pci_dev *pdev, { struct ieee80211_hw *dev; struct rtl8180_priv *priv; - unsigned long mem_addr, mem_len; - unsigned int io_addr, io_len; + unsigned long mem_len; + unsigned int io_len; int err; const char *chip_name, *rf_name = NULL; u32 reg; @@ -1743,9 +1743,7 @@ static int rtl8180_probe(struct pci_dev *pdev, goto err_disable_dev; } - io_addr = pci_resource_start(pdev, 0); io_len = pci_resource_len(pdev, 0); - mem_addr = pci_resource_start(pdev, 1); mem_len = pci_resource_len(pdev, 1); if (mem_len < sizeof(struct rtl818x_csr) || diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile b/drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile index ff074912a095..95bac73ece7c 100644 --- a/drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile +++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile @@ -2,4 +2,4 @@ rtl8187-objs := dev.o rtl8225.o leds.o rfkill.o obj-$(CONFIG_RTL8187) += rtl8187.o -ccflags-y += -Idrivers/net/wireless/realtek/rtl818x +ccflags-y += -I $(srctree)/$(src)/.. diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index ef9b502ce576..217d2a7a43c7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "wifi.h" #include "rc.h" @@ -452,6 +430,7 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw) SET_IEEE80211_PERM_ADDR(hw, rtlefuse->dev_addr); } else { u8 rtlmac1[] = { 0x00, 0xe0, 0x4c, 0x81, 0x92, 0x00 }; + get_random_bytes((rtlmac1 + (ETH_ALEN - 1)), 1); SET_IEEE80211_PERM_ADDR(hw, rtlmac1); } @@ -481,7 +460,6 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw) (void *)rtl_fwevt_wq_callback); INIT_DELAYED_WORK(&rtlpriv->works.c2hcmd_wq, (void *)rtl_c2hcmd_wq_callback); - } void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq) @@ -640,6 +618,7 @@ static void _rtl_query_shortgi(struct ieee80211_hw *hw, u8 rate_flag = info->control.rates[0].flags; u8 sgi_40 = 0, sgi_20 = 0, bw_40 = 0; u8 sgi_80 = 0, bw_80 = 0; + tcb_desc->use_shortgi = false; if (sta == NULL) @@ -1872,6 +1851,7 @@ int rtl_rx_agg_stop(struct ieee80211_hw *hw, return 0; } + int rtl_tx_agg_oper(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u16 tid) { @@ -2095,7 +2075,6 @@ void rtl_watchdog_wq_callback(void *data) * busytraffic we don't change channel */ if (mac->link_state >= MAC80211_LINKED) { - /* (1) get aver_rx_cnt_inperiod & aver_tx_cnt_inperiod */ for (idx = 0; idx <= 2; idx++) { rtlpriv->link_info.num_rx_in4period[idx] = @@ -2172,8 +2151,6 @@ label_lps_done: ; } - rtlpriv->link_info.num_rx_inperiod = 0; - rtlpriv->link_info.num_tx_inperiod = 0; for (tid = 0; tid <= 7; tid++) rtlpriv->link_info.tidtx_inperiod[tid] = 0; @@ -2236,6 +2213,8 @@ label_lps_done: rtlpriv->btcoexist.btc_info.in_4way = false; } + rtlpriv->link_info.num_rx_inperiod = 0; + rtlpriv->link_info.num_tx_inperiod = 0; rtlpriv->link_info.bcn_rx_inperiod = 0; /* <6> scan list */ @@ -2255,6 +2234,7 @@ void rtl_watch_dog_timer_callback(struct timer_list *t) mod_timer(&rtlpriv->works.watchdog_timer, jiffies + MSECS(RTL_WATCH_DOG_TIME)); } + void rtl_fwevt_wq_callback(void *data) { struct rtl_works *rtlworks = @@ -2311,11 +2291,10 @@ static void rtl_c2h_content_parsing(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal_ops *hal_ops = rtlpriv->cfg->ops; const struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops; - u8 cmd_id, cmd_seq, cmd_len; + u8 cmd_id, cmd_len; u8 *cmd_buf = NULL; cmd_id = GET_C2H_CMD_ID(skb->data); - cmd_seq = GET_C2H_SEQ(skb->data); cmd_len = skb->len - C2H_DATA_OFFSET; cmd_buf = GET_C2H_DATA_PTR(skb->data); @@ -2407,6 +2386,7 @@ void rtl_easy_concurrent_retrytimer_callback(struct timer_list *t) rtlpriv->cfg->ops->dualmac_easy_concurrent(hw); } + /********************************************************* * * frame process functions diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h index a7ae40eaa3cd..99565ad09cdc 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.h +++ b/drivers/net/wireless/realtek/rtlwifi/base.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL_BASE_H__ #define __RTL_BASE_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h index 02dff4c3f664..30a25480752d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h @@ -1,26 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * Larry Finger - * - ******************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2007-2011 Realtek Corporation.*/ #ifndef __HALBT_PRECOMP_H__ #define __HALBT_PRECOMP_H__ @@ -49,8 +28,6 @@ #include "halbtc8821a2ant.h" #include "halbtc8821a1ant.h" -#define GetDefaultAdapter(padapter) padapter - #define BIT0 0x00000001 #define BIT1 0x00000002 #define BIT2 0x00000004 diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c index f22fec093f1d..3ebc7c93b1e9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c @@ -1,36 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ -/************************************************************** - * Description: - * - * This file is for RTL8192E Co-exist mechanism - * - * History - * 2012/11/15 Cosa first check in. - * - **************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2012 Realtek Corporation.*/ /************************************************************** * include files diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h index b8c95c7afc06..41ac0d5dcc9c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h @@ -1,27 +1,6 @@ -/****************************************************************************** - * - * Copyright(c) 2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2012 Realtek Corporation.*/ + /***************************************************************** * The following is for 8192E 2Ant BT Co-exist definition *****************************************************************/ diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c index 59553db020ef..5f5739904edf 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2012 Realtek Corporation.*/ /*************************************************************** * Description: diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h index 934f27893c16..9d41e11388ad 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h @@ -1,27 +1,6 @@ -/****************************************************************************** - * - * Copyright(c) 2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2012 Realtek Corporation.*/ + /********************************************************************** * The following is for 8723B 1ANT BT Co-exist definition **********************************************************************/ diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c index 6597f7cb3411..9f7b9af5bdcd 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c @@ -1,27 +1,6 @@ -/****************************************************************************** - * - * Copyright(c) 2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2012 Realtek Corporation.*/ + /*************************************************************** * Description: * diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h index aa24da402fb9..08aad6ef4046 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h @@ -1,27 +1,6 @@ -/****************************************************************************** - * - * Copyright(c) 2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2012 Realtek Corporation.*/ + #ifndef _HAL8723B_2_ANT #define _HAL8723B_2_ANT diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c index b5d65872db84..fa5b73f81c57 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2012 Realtek Corporation.*/ /************************************************************** * Description: diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h index a498ff5b1b9c..a63fb79a971a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2012 Realtek Corporation.*/ /*=========================================== * The following is for 8821A 1ANT BT Co-exist definition diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c index 01a9d303603b..e9e211fda264 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2012 Realtek Corporation.*/ /************************************************************ * Description: diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h index ce3e58c4e660..3df0ee8bd37b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2012 Realtek Corporation.*/ /*=========================================== * The following is for 8821A 2Ant BT Co-exist definition diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8822bwifionly.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8822bwifionly.c index 951b8c1e0153..145d6f9c017a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8822bwifionly.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8822bwifionly.c @@ -1,17 +1,6 @@ -/****************************************************************************** - * - * Copyright(c) 2016-2017 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2016-2017 Realtek Corporation.*/ + #include "halbt_precomp.h" void ex_hal8822b_wifi_only_hw_config(struct wifi_only_cfg *wifionlycfg) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8822bwifionly.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8822bwifionly.h index 6ec356542eea..5fc66cea74b2 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8822bwifionly.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8822bwifionly.h @@ -1,17 +1,6 @@ -/****************************************************************************** - * - * Copyright(c) 2016-2017 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2016-2017 Realtek Corporation.*/ + #ifndef __INC_HAL8822BWIFIONLYHWCFG_H #define __INC_HAL8822BWIFIONLYHWCFG_H diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c index d748aab66aa2..2ac0481b29ef 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2007 - 2013 Realtek Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - ******************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2007-2013 Realtek Corporation.*/ #include "halbt_precomp.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h index 9eae87d19120..ee9aeddf1ebc 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h @@ -1,27 +1,6 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ + #ifndef __HALBTC_OUT_SRC_H__ #define __HALBTC_OUT_SRC_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c index cce4a37ea408..0e509c33e9e6 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c @@ -1,27 +1,6 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2013 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2013 Realtek Corporation.*/ + #include "../wifi.h" #include #include diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h index 8c996055de71..bf2cf8505aca 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h @@ -1,26 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2010 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2010 Realtek Corporation.*/ #ifndef __RTL_BTC_H__ #define __RTL_BTC_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/cam.c b/drivers/net/wireless/realtek/rtlwifi/cam.c index f7a7dcbf945e..bf0e0bb1f99b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/cam.c +++ b/drivers/net/wireless/realtek/rtlwifi/cam.c @@ -1,27 +1,6 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ + #include "wifi.h" #include "cam.h" #include diff --git a/drivers/net/wireless/realtek/rtlwifi/cam.h b/drivers/net/wireless/realtek/rtlwifi/cam.h index e2e647d511c1..2461fa9afda0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/cam.h +++ b/drivers/net/wireless/realtek/rtlwifi/cam.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL_CAM_H_ #define __RTL_CAM_H_ diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index 4bf7967590ca..f73e690bbe8e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "wifi.h" #include "core.h" @@ -210,6 +188,7 @@ static void rtl_op_tx(struct ieee80211_hw *hw, struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); struct rtl_tcb_desc tcb_desc; + memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc)); if (unlikely(is_hal_stop(rtlhal) || ppsc->rfpwr_state != ERFON)) @@ -368,12 +347,14 @@ static void rtl_op_remove_interface(struct ieee80211_hw *hw, mutex_unlock(&rtlpriv->locks.conf_mutex); } + static int rtl_op_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum nl80211_iftype new_type, bool p2p) { struct rtl_priv *rtlpriv = rtl_priv(hw); int ret; + rtl_op_remove_interface(hw, vif); vif->type = new_type; @@ -903,6 +884,7 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw, rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(&mac->rx_conf)); } + static int rtl_op_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) @@ -955,6 +937,7 @@ static int rtl_op_sta_remove(struct ieee80211_hw *hw, { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_sta_info *sta_entry; + if (sta) { RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, "Remove sta addr is %pM\n", sta->addr); @@ -967,6 +950,7 @@ static int rtl_op_sta_remove(struct ieee80211_hw *hw, } return 0; } + static int _rtl_get_hal_qnum(u16 queue) { int qnum; @@ -1088,6 +1072,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, /*TODO: reference to enum ieee80211_bss_change */ if (changed & BSS_CHANGED_ASSOC) { u8 mstatus; + if (bss_conf->assoc) { struct ieee80211_sta *sta = NULL; u8 keep_alive = 10; @@ -1316,6 +1301,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, * set in sta_add, and will be NULL here */ if (vif->type == NL80211_IFTYPE_STATION) { struct rtl_sta_info *sta_entry; + sta_entry = (struct rtl_sta_info *)sta->drv_priv; sta_entry->wireless_mode = mac->mode; } @@ -1957,5 +1943,7 @@ void rtl_dm_diginit(struct ieee80211_hw *hw, u32 cur_igvalue) dm_digtable->bt30_cur_igi = 0x32; dm_digtable->pre_cck_pd_state = CCK_PD_STAGE_MAX; dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_LOWRSSI; + dm_digtable->pre_cck_fa_state = 0; + dm_digtable->cur_cck_fa_state = 0; } EXPORT_SYMBOL(rtl_dm_diginit); diff --git a/drivers/net/wireless/realtek/rtlwifi/core.h b/drivers/net/wireless/realtek/rtlwifi/core.h index 782ac2fc4b28..7447ff456710 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.h +++ b/drivers/net/wireless/realtek/rtlwifi/core.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL_CORE_H__ #define __RTL_CORE_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.c b/drivers/net/wireless/realtek/rtlwifi/debug.c index d70385be9976..a051de16284d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/debug.c +++ b/drivers/net/wireless/realtek/rtlwifi/debug.c @@ -1,26 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "wifi.h" #include "cam.h" @@ -463,12 +442,9 @@ static const struct file_operations file_ops_common_write = { #define RTL_DEBUGFS_ADD_CORE(name, mode, fopname) \ do { \ rtl_debug_priv_ ##name.rtlpriv = rtlpriv; \ - if (!debugfs_create_file(#name, mode, \ - parent, &rtl_debug_priv_ ##name, \ - &file_ops_ ##fopname)) \ - pr_err("Unable to initialize debugfs:%s/%s\n", \ - rtlpriv->dbg.debugfs_name, \ - #name); \ + debugfs_create_file(#name, mode, parent, \ + &rtl_debug_priv_ ##name, \ + &file_ops_ ##fopname); \ } while (0) #define RTL_DEBUGFS_ADD(name) \ @@ -486,11 +462,6 @@ void rtl_debug_add_one(struct ieee80211_hw *hw) rtlpriv->dbg.debugfs_dir = debugfs_create_dir(rtlpriv->dbg.debugfs_name, debugfs_topdir); - if (!rtlpriv->dbg.debugfs_dir) { - pr_err("Unable to init debugfs:/%s/%s\n", rtlpriv->cfg->name, - rtlpriv->dbg.debugfs_name); - return; - } parent = rtlpriv->dbg.debugfs_dir; diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.h b/drivers/net/wireless/realtek/rtlwifi/debug.h index ad6834af618b..69f169d4d4ae 100644 --- a/drivers/net/wireless/realtek/rtlwifi/debug.h +++ b/drivers/net/wireless/realtek/rtlwifi/debug.h @@ -1,26 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL_DEBUG_H__ #define __RTL_DEBUG_H__ @@ -157,7 +136,7 @@ enum dbgp_flag_e { FEEPROM = 11, FPWR = 12, FDM = 13, - FDBGCtrl = 14, + FDBGCTRL = 14, FC2H = 15, FBT = 16, FINIT = 17, diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.c b/drivers/net/wireless/realtek/rtlwifi/efuse.c index 9729e51fce38..e68340dfd980 100644 --- a/drivers/net/wireless/realtek/rtlwifi/efuse.c +++ b/drivers/net/wireless/realtek/rtlwifi/efuse.c @@ -1,27 +1,6 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * Tmis program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * Tmis program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * Tme full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ + #include "wifi.h" #include "efuse.h" #include "pci.h" @@ -386,20 +365,20 @@ bool efuse_shadow_update_chk(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); - u8 section_idx, i, Base; + u8 section_idx, i, base; u16 words_need = 0, hdr_num = 0, totalbytes, efuse_used; bool wordchanged, result = true; for (section_idx = 0; section_idx < 16; section_idx++) { - Base = section_idx * 8; + base = section_idx * 8; wordchanged = false; for (i = 0; i < 8; i = i + 2) { - if ((rtlefuse->efuse_map[EFUSE_INIT_MAP][Base + i] != - rtlefuse->efuse_map[EFUSE_MODIFY_MAP][Base + i]) || - (rtlefuse->efuse_map[EFUSE_INIT_MAP][Base + i + 1] != - rtlefuse->efuse_map[EFUSE_MODIFY_MAP][Base + i + - 1])) { + if (rtlefuse->efuse_map[EFUSE_INIT_MAP][base + i] != + rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base + i] || + rtlefuse->efuse_map[EFUSE_INIT_MAP][base + i + 1] != + rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base + i + + 1]) { words_need++; wordchanged = true; } @@ -495,6 +474,7 @@ bool efuse_shadow_update(struct ieee80211_hw *hw) if (word_en != 0x0F) { u8 tmpdata[8]; + memcpy(tmpdata, &rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base], 8); @@ -508,7 +488,6 @@ bool efuse_shadow_update(struct ieee80211_hw *hw) break; } } - } efuse_power_switch(hw, true, false); @@ -540,7 +519,7 @@ void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw) } EXPORT_SYMBOL(rtl_efuse_shadow_map_update); -void efuse_force_write_vendor_Id(struct ieee80211_hw *hw) +void efuse_force_write_vendor_id(struct ieee80211_hw *hw) { u8 tmpdata[8] = { 0xFF, 0xFF, 0xEC, 0x10, 0xFF, 0xFF, 0xFF, 0xFF }; @@ -683,6 +662,7 @@ static int efuse_one_byte_write(struct ieee80211_hw *hw, u16 addr, u8 data) static void efuse_read_all_map(struct ieee80211_hw *hw, u8 *efuse) { struct rtl_priv *rtlpriv = rtl_priv(hw); + efuse_power_switch(hw, false, true); read_efuse(hw, 0, rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE], efuse); efuse_power_switch(hw, false, false); @@ -833,6 +813,7 @@ static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr, if (0x0F != (badworden & 0x0F)) { u8 reorg_offset = offset; u8 reorg_worden = badworden; + efuse_pg_packet_write(hw, reorg_offset, reorg_worden, originaldata); @@ -922,6 +903,7 @@ static void efuse_write_data_case2(struct ieee80211_hw *hw, u16 *efuse_addr, if (0x0F != (badworden & 0x0F)) { u8 reorg_offset = tmp_pkt.offset; u8 reorg_worden = badworden; + efuse_pg_packet_write(hw, reorg_offset, reorg_worden, originaldata); @@ -978,7 +960,6 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw, while (continual && (efuse_addr < (EFUSE_MAX_SIZE - rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN]))) { - if (write_state == PG_STATE_HEADER) { dataempty = true; badworden = 0x0F; @@ -1132,40 +1113,39 @@ void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); u8 tempval; - u16 tmpV16; + u16 tmpv16; if (pwrstate && (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE)) { - if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192CE && rtlhal->hw_type != HARDWARE_TYPE_RTL8192DE) { rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_ACCESS], 0x69); } else { - tmpV16 = + tmpv16 = rtl_read_word(rtlpriv, rtlpriv->cfg->maps[SYS_ISO_CTRL]); - if (!(tmpV16 & rtlpriv->cfg->maps[EFUSE_PWC_EV12V])) { - tmpV16 |= rtlpriv->cfg->maps[EFUSE_PWC_EV12V]; + if (!(tmpv16 & rtlpriv->cfg->maps[EFUSE_PWC_EV12V])) { + tmpv16 |= rtlpriv->cfg->maps[EFUSE_PWC_EV12V]; rtl_write_word(rtlpriv, rtlpriv->cfg->maps[SYS_ISO_CTRL], - tmpV16); + tmpv16); } } - tmpV16 = rtl_read_word(rtlpriv, + tmpv16 = rtl_read_word(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN]); - if (!(tmpV16 & rtlpriv->cfg->maps[EFUSE_FEN_ELDR])) { - tmpV16 |= rtlpriv->cfg->maps[EFUSE_FEN_ELDR]; + if (!(tmpv16 & rtlpriv->cfg->maps[EFUSE_FEN_ELDR])) { + tmpv16 |= rtlpriv->cfg->maps[EFUSE_FEN_ELDR]; rtl_write_word(rtlpriv, - rtlpriv->cfg->maps[SYS_FUNC_EN], tmpV16); + rtlpriv->cfg->maps[SYS_FUNC_EN], tmpv16); } - tmpV16 = rtl_read_word(rtlpriv, rtlpriv->cfg->maps[SYS_CLK]); - if ((!(tmpV16 & rtlpriv->cfg->maps[EFUSE_LOADER_CLK_EN])) || - (!(tmpV16 & rtlpriv->cfg->maps[EFUSE_ANA8M]))) { - tmpV16 |= (rtlpriv->cfg->maps[EFUSE_LOADER_CLK_EN] | + tmpv16 = rtl_read_word(rtlpriv, rtlpriv->cfg->maps[SYS_CLK]); + if ((!(tmpv16 & rtlpriv->cfg->maps[EFUSE_LOADER_CLK_EN])) || + (!(tmpv16 & rtlpriv->cfg->maps[EFUSE_ANA8M]))) { + tmpv16 |= (rtlpriv->cfg->maps[EFUSE_LOADER_CLK_EN] | rtlpriv->cfg->maps[EFUSE_ANA8M]); rtl_write_word(rtlpriv, - rtlpriv->cfg->maps[SYS_CLK], tmpV16); + rtlpriv->cfg->maps[SYS_CLK], tmpv16); } } @@ -1240,6 +1220,7 @@ static u16 efuse_get_current_size(struct ieee80211_hw *hw) static u8 efuse_calculate_word_cnts(u8 word_en) { u8 word_cnts = 0; + if (!(word_en & BIT(0))) word_cnts++; if (!(word_en & BIT(1))) diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.h b/drivers/net/wireless/realtek/rtlwifi/efuse.h index dfa31c13fc7a..1ec59f439382 100644 --- a/drivers/net/wireless/realtek/rtlwifi/efuse.h +++ b/drivers/net/wireless/realtek/rtlwifi/efuse.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL_EFUSE_H_ #define __RTL_EFUSE_H_ @@ -107,7 +85,7 @@ void efuse_shadow_write(struct ieee80211_hw *hw, u8 type, bool efuse_shadow_update(struct ieee80211_hw *hw); bool efuse_shadow_update_chk(struct ieee80211_hw *hw); void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw); -void efuse_force_write_vendor_Id(struct ieee80211_hw *hw); +void efuse_force_write_vendor_id(struct ieee80211_hw *hw); void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx); void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate); int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv, diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index 5d1fda16fc8c..48ca52102cef 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "wifi.h" #include "core.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.h b/drivers/net/wireless/realtek/rtlwifi/pci.h index 3fb56c845a61..866861626a0a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.h +++ b/drivers/net/wireless/realtek/rtlwifi/pci.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL_PCI_H__ #define __RTL_PCI_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c index 479a4cfc245d..70f04c2f5b17 100644 --- a/drivers/net/wireless/realtek/rtlwifi/ps.c +++ b/drivers/net/wireless/realtek/rtlwifi/ps.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "wifi.h" #include "base.h" @@ -740,6 +718,7 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data, static u8 p2p_oui_ie_type[4] = {0x50, 0x6f, 0x9a, 0x09}; u8 noa_num, index , i, noa_index = 0; bool find_p2p_ie = false , find_p2p_ps_ie = false; + pos = (u8 *)mgmt->u.beacon.variable; end = data + len; ie = NULL; diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.h b/drivers/net/wireless/realtek/rtlwifi/ps.h index 0df2b5203030..aaa2ed2bbe16 100644 --- a/drivers/net/wireless/realtek/rtlwifi/ps.h +++ b/drivers/net/wireless/realtek/rtlwifi/ps.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __REALTEK_RTL_PCI_PS_H__ #define __REALTEK_RTL_PCI_PS_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/pwrseqcmd.h b/drivers/net/wireless/realtek/rtlwifi/pwrseqcmd.h index 17ce0cb2c35c..db1765c6fef6 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pwrseqcmd.h +++ b/drivers/net/wireless/realtek/rtlwifi/pwrseqcmd.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL8723E_PWRSEQCMD_H__ #define __RTL8723E_PWRSEQCMD_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c index 6c78c6dabbdf..cf8e42a01015 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rc.c +++ b/drivers/net/wireless/realtek/rtlwifi/rc.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "wifi.h" #include "base.h" @@ -258,6 +236,7 @@ static void rtl_tx_status(void *ppriv, !(skb->protocol == cpu_to_be16(ETH_P_PAE))) { if (ieee80211_is_data_qos(fc)) { u8 tid = rtl_get_tid(skb); + if (_rtl_tx_aggr_check(rtlpriv, sta_entry, tid)) { sta_entry->tids[tid].agg.agg_state = @@ -315,6 +294,7 @@ static void rtl_rate_free_sta(void *rtlpriv, struct ieee80211_sta *sta, void *priv_sta) { struct rtl_rate_priv *rate_priv = priv_sta; + kfree(rate_priv); } diff --git a/drivers/net/wireless/realtek/rtlwifi/rc.h b/drivers/net/wireless/realtek/rtlwifi/rc.h index f29643d60d6b..1c0a17370093 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rc.h +++ b/drivers/net/wireless/realtek/rtlwifi/rc.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL_RC_H__ #define __RTL_RC_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c index 1bf3eb25c1da..6ccb5b93a595 100644 --- a/drivers/net/wireless/realtek/rtlwifi/regd.c +++ b/drivers/net/wireless/realtek/rtlwifi/regd.c @@ -1,32 +1,10 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "wifi.h" #include "regd.h" -static struct country_code_to_enum_rd allCountries[] = { +static struct country_code_to_enum_rd all_countries[] = { {COUNTRY_CODE_FCC, "US"}, {COUNTRY_CODE_IC, "US"}, {COUNTRY_CODE_ETSI, "EC"}, @@ -63,7 +41,6 @@ static struct country_code_to_enum_rd allCountries[] = { NL80211_RRF_PASSIVE_SCAN | \ NL80211_RRF_NO_OFDM) - /* 5G chan 36 - chan 64*/ #define RTL819x_5GHZ_5150_5350 \ REG_RULE(5150-10, 5350+10, 80, 0, 30, 0) @@ -391,9 +368,9 @@ static struct country_code_to_enum_rd *_rtl_regd_find_country(u16 countrycode) { int i; - for (i = 0; i < ARRAY_SIZE(allCountries); i++) { - if (allCountries[i].countrycode == countrycode) - return &allCountries[i]; + for (i = 0; i < ARRAY_SIZE(all_countries); i++) { + if (all_countries[i].countrycode == countrycode) + return &all_countries[i]; } return NULL; } diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.h b/drivers/net/wireless/realtek/rtlwifi/regd.h index f7f15bce35dd..3ba068511e34 100644 --- a/drivers/net/wireless/realtek/rtlwifi/regd.h +++ b/drivers/net/wireless/realtek/rtlwifi/regd.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL_REGD_H__ #define __RTL_REGD_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/def.h index 45c866d3ca88..fa2e1b063f68 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/def.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2013 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2013 Realtek Corporation.*/ #ifndef __RTL92C_DEF_H__ #define __RTL92C_DEF_H__ @@ -123,9 +101,6 @@ #define IS_92C_SERIAL(version) \ ((IS_81XXC(version) && IS_2T2R(version)) ? true : false) -#define IS_81xxC_VENDOR_UMC_A_CUT(version) \ - (IS_81XXC(version) ? ((IS_CHIP_VENDOR_UMC(version)) ? \ - ((GET_CVID_CUT_VERSION(version)) ? false : true) : false) : false) #define IS_81XXC_VENDOR_UMC_B_CUT(version) \ (IS_81XXC(version) ? (IS_CHIP_VENDOR_UMC(version) ? \ ((GET_CVID_CUT_VERSION(version) == B_CUT_VERSION) ? true \ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c index e05af7d60830..85360353f557 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2013 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2013 Realtek Corporation.*/ #include "../wifi.h" #include "../base.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.h index 50f26a9a97db..eb8090caeec2 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2013 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2013 Realtek Corporation.*/ #ifndef __RTL88E_DM_H__ #define __RTL88E_DM_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c index 63874512598b..203e7b574e84 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2013 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2013 Realtek Corporation.*/ #include "../wifi.h" #include "../pci.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h index b884c30c7b37..39ddb7afea9d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h @@ -1,26 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2013 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2013 Realtek Corporation.*/ #ifndef __RTL92C__FW__H__ #define __RTL92C__FW__H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c index cfc8762c55f4..454bab38b165 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2013 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2013 Realtek Corporation.*/ #include "../wifi.h" #include "../efuse.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h index 214cd2a32018..fd09b0712d17 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2013 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2013 Realtek Corporation.*/ #ifndef __RTL92CE_HW_H__ #define __RTL92CE_HW_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c index df3e214460db..4ef6d5907521 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2013 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2013 Realtek Corporation.*/ #include "../wifi.h" #include "../pci.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.h index 4b325b75faaf..67d3dc389ba0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2013 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2013 Realtek Corporation.*/ #ifndef __RTL92CE_LED_H__ #define __RTL92CE_LED_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c index 14a256062614..96d8f25b120f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2013 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2013 Realtek Corporation.*/ #include "../wifi.h" #include "../pci.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.h index b29bd77210f4..8157ef419eeb 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2013 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2013 Realtek Corporation.*/ #ifndef __RTL92C_PHY_H__ #define __RTL92C_PHY_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.c index 02013df968a0..d69497bf5d19 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2013 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2013 Realtek Corporation.*/ #include "../pwrseqcmd.h" #include "pwrseq.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.h index 8379a3e5198c..42e222c1795f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2013 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2013 Realtek Corporation.*/ #ifndef __RTL8723E_PWRSEQ_H__ #define __RTL8723E_PWRSEQ_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/reg.h index 0c0d64aea651..0fc8db8916fa 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/reg.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/reg.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2013 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2013 Realtek Corporation.*/ #ifndef __RTL92C_REG_H__ #define __RTL92C_REG_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c index 30798b12a363..0f401ad92c2e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2013 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2013 Realtek Corporation.*/ #include "../wifi.h" #include "reg.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.h index 0eca030e3238..05e27b40b2a9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2013 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2013 Realtek Corporation.*/ #ifndef __RTL92C_RF_H__ #define __RTL92C_RF_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c index 8c15ffd3568b..eab48fed61ed 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2013 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2013 Realtek Corporation.*/ #include "../wifi.h" #include "../core.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.h index 22398c3753a6..1407151503f8 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2013 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2013 Realtek Corporation.*/ #ifndef __RTL92CE_SW_H__ #define __RTL92CE_SW_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/table.c index 68bcb7fe6a65..a3c312c3ed9a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/table.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/table.c @@ -1,29 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2013 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Created on 2010/ 5/18, 1:41 - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2013 Realtek Corporation.*/ #include "table.h" u32 RTL8188EEPHY_REG_1TARRAY[] = { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/table.h index 403c4ddd236f..df6065602401 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/table.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/table.h @@ -1,29 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2013 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Created on 2010/ 5/18, 1:41 - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2013 Realtek Corporation.*/ #ifndef __RTL92CE_TABLE__H_ #define __RTL92CE_TABLE__H_ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c index 14dcb0816bc0..106011a24827 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2013 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2013 Realtek Corporation.*/ #include "../wifi.h" #include "../pci.h" @@ -415,7 +393,7 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw, (GET_RX_DESC_FAGGR(pdesc) == 1)); if (status->packet_report_type == NORMAL_RX) status->timestamp_low = GET_RX_DESC_TSFL(pdesc); - status->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc); + status->rx_is40mhzpacket = (bool)GET_RX_DESC_BW(pdesc); status->is_ht = (bool)GET_RX_DESC_RXHT(pdesc); status->is_cck = RTL8188_RX_HAL_IS_CCK_RATE(status->rate); @@ -442,7 +420,7 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw, if (status->crc) rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; - if (status->rx_is40Mhzpacket) + if (status->rx_is40mhzpacket) rx_status->bw = RATE_INFO_BW_40; if (status->is_ht) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h index 127ba977206f..c29d9bfa5bd4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2013 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2013 Realtek Corporation.*/ #ifndef __RTL92CE_TRX_H__ #define __RTL92CE_TRX_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c index 0b5a06ffa482..f2908ee5f860 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include #include "dm_common.h" @@ -447,7 +425,6 @@ static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw) if (dm_digtable->presta_cstate == dm_digtable->cursta_cstate || dm_digtable->cursta_cstate == DIG_STA_BEFORE_CONNECT || dm_digtable->cursta_cstate == DIG_STA_CONNECT) { - if (dm_digtable->cursta_cstate != DIG_STA_DISCONNECT) { dm_digtable->rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw); @@ -526,7 +503,6 @@ static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw) rtl92c_dm_cck_packet_detection_thresh(hw); dm_digtable->presta_cstate = dm_digtable->cursta_cstate; - } static void rtl92c_dm_dig(struct ieee80211_hw *hw) @@ -629,6 +605,7 @@ static void rtl92c_dm_pwdb_monitor(struct ieee80211_hw *hw) void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); + rtlpriv->dm.current_turbo_edca = false; rtlpriv->dm.is_any_nonbepkts = false; rtlpriv->dm.is_cur_rdlstate = false; @@ -682,7 +659,6 @@ static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw) if ((bt_change_edca) || ((!rtlpriv->dm.is_any_nonbepkts) && (!rtlpriv->dm.disable_framebursting))) { - cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt; cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt; @@ -707,6 +683,7 @@ static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw) } else { if (rtlpriv->dm.current_turbo_edca) { u8 tmp = AC0_BE; + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM, &tmp); rtlpriv->dm.current_turbo_edca = false; @@ -1657,7 +1634,6 @@ static void rtl92c_bt_ant_isolation(struct ieee80211_hw *hw, u8 tmp1byte) { struct rtl_priv *rtlpriv = rtl_priv(hw); - /* Only enable HW BT coexist when BT in "Busy" state. */ if (rtlpriv->mac80211.vendor == PEER_CISCO && rtlpriv->btcoexist.bt_service == BT_OTHER_ACTION) { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.h index 441604ff5858..c4ce9fc96163 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL92COMMON_DM_H__ #define __RTL92COMMON_DM_H__ @@ -49,7 +27,7 @@ #define DM_DIG_FA_TH1 0x100 #define DM_DIG_FA_TH2 0x200 -#define RXPATHSELECTION_SS_TH_lOW 30 +#define RXPATHSELECTION_SS_TH_LOW 30 #define RXPATHSELECTION_DIFF_TH 18 #define DM_RATR_STA_INIT 0 @@ -60,7 +38,7 @@ #define CTS2SELF_THVAL 30 #define REGC38_TH 20 -#define WAIOTTHVal 25 +#define WAIOTTHVAL 25 #define TXHIGHPWRLEVEL_NORMAL 0 #define TXHIGHPWRLEVEL_LEVEL1 1 diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c index f3bff66e85d0..18c76990a089 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "../pci.h" @@ -40,6 +18,7 @@ static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable) if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CU) { u32 value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); + if (enable) value32 |= MCUFWDL_EN; else @@ -47,8 +26,8 @@ static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable) rtl_write_dword(rtlpriv, REG_MCUFWDL, value32); } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CE) { u8 tmp; - if (enable) { + if (enable) { tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmp | 0x04); @@ -59,7 +38,6 @@ static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable) tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL + 2); rtl_write_byte(rtlpriv, REG_MCUFWDL + 2, tmp & 0xf7); } else { - tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL); rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp & 0xfe); @@ -79,27 +57,27 @@ static void _rtl92c_write_fw(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size); is_version_b = IS_NORMAL_CHIP(version); if (is_version_b) { - u32 pageNums, remainsize; + u32 pagenums, remainsize; u32 page, offset; if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CE) rtl_fill_dummy(bufferptr, &size); - pageNums = size / FW_8192C_PAGE_SIZE; + pagenums = size / FW_8192C_PAGE_SIZE; remainsize = size % FW_8192C_PAGE_SIZE; - if (pageNums > 4) + if (pagenums > 4) pr_err("Page numbers should not greater then 4\n"); - for (page = 0; page < pageNums; page++) { + for (page = 0; page < pagenums; page++) { offset = page * FW_8192C_PAGE_SIZE; rtl_fw_page_write(hw, page, (bufferptr + offset), FW_8192C_PAGE_SIZE); } if (remainsize) { - offset = pageNums * FW_8192C_PAGE_SIZE; - page = pageNums; + offset = pagenums * FW_8192C_PAGE_SIZE; + page = pagenums; rtl_fw_page_write(hw, page, (bufferptr + offset), remainsize); } @@ -118,7 +96,7 @@ static int _rtl92c_fw_free_to_go(struct ieee80211_hw *hw) do { value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); } while ((counter++ < FW_8192C_POLLING_TIMEOUT_COUNT) && - (!(value32 & FWDL_ChkSum_rpt))); + (!(value32 & FWDL_CHKSUM_RPT))); if (counter >= FW_8192C_POLLING_TIMEOUT_COUNT) { pr_err("chksum report fail! REG_MCUFWDL:0x%08x .\n", @@ -644,7 +622,6 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, "rtl92c_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n", u1rsvdpageloc, 3); - skb = dev_alloc_skb(totalpacketlen); skb_put_data(skb, &reserved_page_packet, totalpacketlen); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h index c5fa14bda387..888d9fc94bc2 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL92C__FW__COMMON__H__ #define __RTL92C__FW__COMMON__H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/main.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/main.c index 889bd1301154..97ad21c3964f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/main.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/main.c @@ -1,32 +1,9 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include - MODULE_AUTHOR("lizhaoming "); MODULE_AUTHOR("Realtek WlanFAE "); MODULE_AUTHOR("Georgia "); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c index 7c6e5d91439d..0efd19aa4fe5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "../rtl8192ce/reg.h" @@ -239,7 +217,7 @@ bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw) EXPORT_SYMBOL(_rtl92c_phy_bb8192c_config_parafile); -void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw, +void _rtl92c_store_pwrindex_diffrate_offset(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask, u32 data) { @@ -393,7 +371,7 @@ void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw, rtlphy->pwrgroup_cnt++; } } -EXPORT_SYMBOL(_rtl92c_store_pwrIndex_diffrate_offset); +EXPORT_SYMBOL(_rtl92c_store_pwrindex_diffrate_offset); void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw) { @@ -452,10 +430,10 @@ void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw) rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset = RFPGA0_XB_LSSIPARAMETER; - rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = rFPGA0_XAB_RFPARAMETER; - rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = rFPGA0_XAB_RFPARAMETER; - rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = rFPGA0_XCD_RFPARAMETER; - rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = rFPGA0_XCD_RFPARAMETER; + rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = RFPGA0_XAB_RFPARAMETER; + rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = RFPGA0_XAB_RFPARAMETER; + rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = RFPGA0_XCD_RFPARAMETER; + rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = RFPGA0_XCD_RFPARAMETER; rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE; rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE; @@ -769,6 +747,7 @@ static void _rtl92c_phy_sw_rf_seting(struct ieee80211_hw *hw, u8 channel) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version)) { if (channel == 6 && rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) { @@ -1120,19 +1099,19 @@ static void _rtl92c_phy_reload_mac_registers(struct ieee80211_hw *hw, static void _rtl92c_phy_path_adda_on(struct ieee80211_hw *hw, u32 *addareg, bool is_patha_on, bool is2t) { - u32 pathOn; + u32 pathon; u32 i; - pathOn = is_patha_on ? 0x04db25a4 : 0x0b1b25a4; + pathon = is_patha_on ? 0x04db25a4 : 0x0b1b25a4; if (false == is2t) { - pathOn = 0x0bdb25a0; + pathon = 0x0bdb25a0; rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0b1b25a0); } else { - rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathOn); + rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathon); } for (i = 1; i < IQK_ADDA_REG_NUM; i++) - rtl_set_bbreg(hw, addareg[i], MASKDWORD, pathOn); + rtl_set_bbreg(hw, addareg[i], MASKDWORD, pathon); } static void _rtl92c_phy_mac_setting_calibration(struct ieee80211_hw *hw, @@ -1361,7 +1340,7 @@ static void _rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, if (is_hal_stop(rtlhal)) { rtl_set_bbreg(hw, REG_LEDCFG0, BIT(23), 0x01); - rtl_set_bbreg(hw, rFPGA0_XAB_RFPARAMETER, BIT(13), 0x01); + rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(13), 0x01); } if (is2t) { if (bmain) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h index d11261e05a2e..75afa6253ad0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL92C_PHY_COMMON_H__ #define __RTL92C_PHY_COMMON_H__ @@ -44,9 +22,9 @@ #define LOOP_LIMIT 5 #define MAX_STALL_TIME 50 -#define AntennaDiversityValue 0x80 +#define ANTENNADIVERSITYVALUE 0x80 #define MAX_TXPWR_IDX_NMODE_92S 63 -#define Reset_Cnt_Limit 3 +#define RESET_CNT_LIMIT 3 #define IQK_ADDA_REG_NUM 16 #define IQK_MAC_REG_NUM 4 @@ -242,7 +220,7 @@ void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw, enum radio_path rfpath, u32 offset, u32 data); bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw); -void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw, +void _rtl92c_store_pwrindex_diffrate_offset(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask, u32 data); bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h index d2005d7e9ad2..147bf2407f8f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL92C_DEF_H__ #define __RTL92C_DEF_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c index 2c8205e46be4..a3e2c8a60967 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "../base.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.h index 9761d0ca31b0..eab42c1bd470 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL92C_DM_H__ #define __RTL92C_DM_H__ @@ -44,7 +22,7 @@ #define DM_DIG_FA_TH1 0x100 #define DM_DIG_FA_TH2 0x200 -#define RXPATHSELECTION_SS_TH_lOW 30 +#define RXPATHSELECTION_SS_TH_LOW 30 #define RXPATHSELECTION_DIFF_TH 18 #define DM_RATR_STA_INIT 0 @@ -55,7 +33,7 @@ #define CTS2SELF_THVAL 30 #define REGC38_TH 20 -#define WAIOTTHVal 25 +#define WAIOTTHVAL 25 #define TXHIGHPWRLEVEL_NORMAL 0 #define TXHIGHPWRLEVEL_LEVEL1 1 diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c index 4a81e0ef4b8e..a52dd64d528d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "../efuse.h" @@ -104,13 +82,13 @@ void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) *((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state; break; case HW_VAR_FWLPS_RF_ON:{ - enum rf_pwrstate rfState; + enum rf_pwrstate rfstate; u32 val_rcr; rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE, - (u8 *) (&rfState)); - if (rfState == ERFOFF) { + (u8 *)(&rfstate)); + if (rfstate == ERFOFF) { *((bool *) (val)) = true; } else { val_rcr = rtl_read_dword(rtlpriv, REG_RCR); @@ -166,6 +144,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) case HW_VAR_BASIC_RATE:{ u16 rate_cfg = ((u16 *) val)[0]; u8 rate_index = 0; + rate_cfg &= 0x15f; rate_cfg |= 0x01; rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff); @@ -219,6 +198,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) case HW_VAR_ACK_PREAMBLE:{ u8 reg_tmp; u8 short_preamble = (bool)*val; + reg_tmp = (mac->cur_40_prime_sc) << 5; if (short_preamble) reg_tmp |= 0x80; @@ -315,6 +295,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) } case HW_VAR_AC_PARAM:{ u8 e_aci = *(val); + rtl92c_dm_init_edca_turbo(hw); if (rtlpci->acm_method != EACMWAY2_SW) @@ -336,13 +317,13 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) if (acm) { switch (e_aci) { case AC0_BE: - acm_ctrl |= AcmHw_BeqEn; + acm_ctrl |= ACMHW_BEQEN; break; case AC2_VI: - acm_ctrl |= AcmHw_ViqEn; + acm_ctrl |= ACMHW_VIQEN; break; case AC3_VO: - acm_ctrl |= AcmHw_VoqEn; + acm_ctrl |= ACMHW_VOQEN; break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, @@ -353,13 +334,13 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) } else { switch (e_aci) { case AC0_BE: - acm_ctrl &= (~AcmHw_BeqEn); + acm_ctrl &= (~ACMHW_BEQEN); break; case AC2_VI: - acm_ctrl &= (~AcmHw_ViqEn); + acm_ctrl &= (~ACMHW_VIQEN); break; case AC3_VO: - acm_ctrl &= (~AcmHw_VoqEn); + acm_ctrl &= (~ACMHW_VOQEN); break; default: pr_err("switch case %#x not processed\n", @@ -478,6 +459,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) break; case HW_VAR_AID:{ u16 u2btmp; + u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT); u2btmp &= 0xC000; rtl_write_word(rtlpriv, REG_BCN_PSR_RPT, (u2btmp | @@ -584,23 +566,23 @@ static bool _rtl92ce_llt_table_init(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); unsigned short i; u8 txpktbuf_bndy; - u8 maxPage; + u8 maxpage; bool status; #if LLT_CONFIG == 1 - maxPage = 255; + maxpage = 255; txpktbuf_bndy = 252; #elif LLT_CONFIG == 2 - maxPage = 127; + maxpage = 127; txpktbuf_bndy = 124; #elif LLT_CONFIG == 3 - maxPage = 255; + maxpage = 255; txpktbuf_bndy = 174; #elif LLT_CONFIG == 4 - maxPage = 255; + maxpage = 255; txpktbuf_bndy = 246; #elif LLT_CONFIG == 5 - maxPage = 255; + maxpage = 255; txpktbuf_bndy = 246; #endif @@ -639,13 +621,13 @@ static bool _rtl92ce_llt_table_init(struct ieee80211_hw *hw) if (true != status) return status; - for (i = txpktbuf_bndy; i < maxPage; i++) { + for (i = txpktbuf_bndy; i < maxpage; i++) { status = _rtl92ce_llt_write(hw, i, (i + 1)); if (true != status) return status; } - status = _rtl92ce_llt_write(hw, maxPage, txpktbuf_bndy); + status = _rtl92ce_llt_write(hw, maxpage, txpktbuf_bndy); if (true != status) return status; @@ -683,6 +665,7 @@ static bool _rtl92ce_init_mac(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00); if (rtlpriv->btcoexist.bt_coexistence) { u32 value32; + value32 = rtl_read_dword(rtlpriv, REG_APS_FSMCO); value32 |= (SOP_ABG | SOP_AMB | XOP_BTCK); rtl_write_dword(rtlpriv, REG_APS_FSMCO, value32); @@ -908,11 +891,11 @@ void rtl92ce_enable_hw_security_config(struct ieee80211_hw *hw) return; } - sec_reg_value = SCR_TxEncEnable | SCR_RxDecEnable; + sec_reg_value = SCR_TXENCENABLE | SCR_RXDECENABLE; if (rtlpriv->sec.use_defaultkey) { - sec_reg_value |= SCR_TxUseDK; - sec_reg_value |= SCR_RxUseDK; + sec_reg_value |= SCR_TXUSEDK; + sec_reg_value |= SCR_RXUSEDK; } sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK); @@ -1267,6 +1250,7 @@ int rtl92ce_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type) void rtl92ce_set_qos(struct ieee80211_hw *hw, int aci) { struct rtl_priv *rtlpriv = rtl_priv(hw); + rtl92c_dm_init_edca_turbo(hw); switch (aci) { case AC1_BK: @@ -2301,7 +2285,6 @@ void rtl8192ce_bt_reg_init(struct ieee80211_hw *hw) rtlpriv->btcoexist.reg_bt_sco = 0; } - void rtl8192ce_bt_hw_init(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h index 6711ea1a75d9..fa1049d16cb7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL92CE_HW_H__ #define __RTL92CE_HW_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c index 7edf5af9046e..d6933d36ada2 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "../pci.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.h index f6edb9cd9b67..97ab1e00af5f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL92CE_LED_H__ #define __RTL92CE_LED_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c index 7c6d7fc1ef9a..f6574f31fa3b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "../pci.h" @@ -104,7 +82,7 @@ bool rtl92c_phy_bb_config(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, REG_RF_CTRL, RF_EN | RF_RSTB | RF_SDMRSTB); rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, FEN_PPLL | FEN_PCIEA | FEN_DIO_PCIE | - FEN_BB_GLB_RSTn | FEN_BBRSTB); + FEN_BB_GLB_RSTN | FEN_BBRSTB); rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL + 1, 0x80); regvaldw = rtl_read_dword(rtlpriv, REG_LEDCFG0); rtl_write_dword(rtlpriv, REG_LEDCFG0, regvaldw | BIT(23)); @@ -236,7 +214,7 @@ bool _rtl92ce_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw, for (i = 0; i < phy_regarray_pg_len; i = i + 3) { rtl_addr_delay(phy_regarray_table_pg[i]); - _rtl92c_store_pwrIndex_diffrate_offset(hw, + _rtl92c_store_pwrindex_diffrate_offset(hw, phy_regarray_table_pg[i], phy_regarray_table_pg[i + 1], phy_regarray_table_pg[i + 2]); @@ -464,13 +442,14 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw, if ((ppsc->rfpwr_state == ERFOFF) && RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) { bool rtstatus; - u32 InitializeCount = 0; + u32 initializecount = 0; + do { - InitializeCount++; + initializecount++; RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, "IPS Set eRf nic enable\n"); rtstatus = rtl_ps_enable_nic(hw); - } while (!rtstatus && (InitializeCount < 10)); + } while (!rtstatus && (initializecount < 10)); RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); } else { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h index 93f3bc0197b4..7582a162bd11 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL92C_PHY_H__ #define __RTL92C_PHY_H__ @@ -44,9 +22,9 @@ #define LOOP_LIMIT 5 #define MAX_STALL_TIME 50 -#define AntennaDiversityValue 0x80 +#define ANTENNADIVERSITYVALUE 0x80 #define MAX_TXPWR_IDX_NMODE_92S 63 -#define Reset_Cnt_Limit 3 +#define RESET_CNT_LIMIT 3 #define IQK_ADDA_REG_NUM 16 #define IQK_MAC_REG_NUM 4 @@ -122,7 +100,7 @@ void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw, void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw, enum radio_path rfpath, u32 offset, u32 data); -void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw, +void _rtl92c_store_pwrindex_diffrate_offset(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask, u32 data); bool _rtl92ce_phy_config_mac_with_headerfile(struct ieee80211_hw *hw); void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/reg.h index 9e3b58a5d2bb..431277ef1c98 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/reg.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/reg.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL92C_REG_H__ #define __RTL92C_REG_H__ @@ -702,7 +680,7 @@ #define PWC_EV12V BIT(15) #define FEN_BBRSTB BIT(0) -#define FEN_BB_GLB_RSTn BIT(1) +#define FEN_BB_GLB_RSTN BIT(1) #define FEN_USBA BIT(2) #define FEN_UPLL BIT(3) #define FEN_USBD BIT(4) @@ -722,7 +700,7 @@ #define PFM_ALDN BIT(1) #define PFM_LDKP BIT(2) #define PFM_WOWL BIT(3) -#define EnPDN BIT(4) +#define ENPDN BIT(4) #define PDN_PL BIT(5) #define APFM_ONMAC BIT(8) #define APFM_OFF BIT(9) @@ -837,19 +815,19 @@ #define LDOE25_EN BIT(31) #define RSM_EN BIT(0) -#define Timer_EN BIT(4) +#define TIMER_EN BIT(4) #define TRSW0EN BIT(2) #define TRSW1EN BIT(3) #define EROM_EN BIT(4) -#define EnBT BIT(5) -#define EnUart BIT(8) -#define Uart_910 BIT(9) -#define EnPMAC BIT(10) +#define ENBT BIT(5) +#define ENUART BIT(8) +#define UART_910 BIT(9) +#define ENPMAC BIT(10) #define SIC_SWRST BIT(11) -#define EnSIC BIT(12) +#define ENSIC BIT(12) #define SIC_23 BIT(13) -#define EnHDP BIT(14) +#define ENHDP BIT(14) #define SIC_LBK BIT(15) #define LED0PL BIT(4) @@ -858,7 +836,7 @@ #define MCUFWDL_EN BIT(0) #define MCUFWDL_RDY BIT(1) -#define FWDL_ChkSum_rpt BIT(2) +#define FWDL_CHKSUM_RPT BIT(2) #define MACINI_RDY BIT(3) #define BBINI_RDY BIT(4) #define RFINI_RDY BIT(5) @@ -1076,13 +1054,13 @@ #define DIS_TSF_UDT0_NORMAL_CHIP BIT(4) #define DIS_TSF_UDT0_TEST_CHIP BIT(5) -#define AcmHw_HwEn BIT(0) -#define AcmHw_BeqEn BIT(1) -#define AcmHw_ViqEn BIT(2) -#define AcmHw_VoqEn BIT(3) -#define AcmHw_BeqStatus BIT(4) -#define AcmHw_ViqStatus BIT(5) -#define AcmHw_VoqStatus BIT(6) +#define ACMHW_HWEN BIT(0) +#define ACMHW_BEQEN BIT(1) +#define ACMHW_VIQEN BIT(2) +#define ACMHW_VOQEN BIT(3) +#define ACMHW_BEQSTATUS BIT(4) +#define ACMHW_VIQSTATUS BIT(5) +#define ACMHW_VOQSTATUS BIT(6) #define APSDOFF BIT(6) #define APSDOFF_STATUS BIT(7) @@ -1121,7 +1099,7 @@ #define BM_DATA_EN BIT(17) #define MFBEN BIT(22) #define LSIGEN BIT(23) -#define EnMBID BIT(24) +#define ENMBID BIT(24) #define APP_BASSN BIT(27) #define APP_PHYSTS BIT(28) #define APP_ICV BIT(29) @@ -1150,12 +1128,12 @@ #define RXERR_RPT_RST BIT(27) #define _RXERR_RPT_SEL(type) ((type) << 28) -#define SCR_TxUseDK BIT(0) -#define SCR_RxUseDK BIT(1) -#define SCR_TxEncEnable BIT(2) -#define SCR_RxDecEnable BIT(3) -#define SCR_SKByA2 BIT(4) -#define SCR_NoSKMC BIT(5) +#define SCR_TXUSEDK BIT(0) +#define SCR_RXUSEDK BIT(1) +#define SCR_TXENCENABLE BIT(2) +#define SCR_RXDECENABLE BIT(3) +#define SCR_SKBYA2 BIT(4) +#define SCR_NOSKMC BIT(5) #define SCR_TXBCUSEDK BIT(6) #define SCR_RXBCUSEDK BIT(7) @@ -1208,7 +1186,7 @@ #define RPMAC_CCKPLCPHEADER 0x144 #define RPMAC_CCKCRC16 0x148 #define RPMAC_OFDMRXCRC32OK 0x170 -#define RPMAC_OFDMRXCRC32Er 0x174 +#define RPMAC_OFDMRXCRC32ER 0x174 #define RPMAC_OFDMRXPARITYER 0x178 #define RPMAC_OFDMRXCRC8ER 0x17c #define RPMAC_CCKCRXRC16ER 0x180 @@ -1246,8 +1224,8 @@ #define RFPGA0_XAB_RFINTERFACESW 0x870 #define RFPGA0_XCD_RFINTERFACESW 0x874 -#define rFPGA0_XAB_RFPARAMETER 0x878 -#define rFPGA0_XCD_RFPARAMETER 0x87c +#define RFPGA0_XAB_RFPARAMETER 0x878 +#define RFPGA0_XCD_RFPARAMETER 0x87c #define RFPGA0_ANALOGPARAMETER1 0x880 #define RFPGA0_ANALOGPARAMETER2 0x884 @@ -1521,8 +1499,8 @@ #define BCCKTXCRC16 0xffff #define BCCKTXSTATUS 0x1 #define BOFDMTXSTATUS 0x2 -#define IS_BB_REG_OFFSET_92S(_Offset) \ - ((_Offset >= 0x800) && (_Offset <= 0xfff)) +#define IS_BB_REG_OFFSET_92S(_offset) \ + (((_offset) >= 0x800) && ((_offset) <= 0xfff)) #define BRFMOD 0x1 #define BJAPANMODE 0x2 @@ -1715,7 +1693,6 @@ #define BCCK_RF_EXTEND 0x20000000 #define BCCK_RXAGC_SATLEVEL 0x1f000000 #define BCCK_RXAGC_SATCOUNT 0xe0 -#define bCCKRxRFSettle 0x1f #define BCCK_FIXED_RXAGC 0x8000 #define BCCK_ANTENNA_POLARITY 0x2000 #define BCCK_TXFILTER_TYPE 0x0c00 diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c index e68ed7f37c79..713859488744 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "reg.h" @@ -150,18 +128,18 @@ static void rtl92c_phy_get_power_base(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); - u32 powerBase0, powerBase1; + u32 powerbase0, powerbase1; u8 legacy_pwrdiff, ht20_pwrdiff; u8 i, powerlevel[2]; for (i = 0; i < 2; i++) { powerlevel[i] = ppowerlevel[i]; legacy_pwrdiff = rtlefuse->txpwr_legacyhtdiff[i][channel - 1]; - powerBase0 = powerlevel[i] + legacy_pwrdiff; + powerbase0 = powerlevel[i] + legacy_pwrdiff; - powerBase0 = (powerBase0 << 24) | (powerBase0 << 16) | - (powerBase0 << 8) | powerBase0; - *(ofdmbase + i) = powerBase0; + powerbase0 = (powerbase0 << 24) | (powerbase0 << 16) | + (powerbase0 << 8) | powerbase0; + *(ofdmbase + i) = powerbase0; RTPRINT(rtlpriv, FPHY, PHY_TXPWR, " [OFDM power base index rf(%c) = 0x%x]\n", i == 0 ? 'A' : 'B', *(ofdmbase + i)); @@ -172,11 +150,11 @@ static void rtl92c_phy_get_power_base(struct ieee80211_hw *hw, ht20_pwrdiff = rtlefuse->txpwr_ht20diff[i][channel - 1]; powerlevel[i] += ht20_pwrdiff; } - powerBase1 = powerlevel[i]; - powerBase1 = (powerBase1 << 24) | - (powerBase1 << 16) | (powerBase1 << 8) | powerBase1; + powerbase1 = powerlevel[i]; + powerbase1 = (powerbase1 << 24) | + (powerbase1 << 16) | (powerbase1 << 8) | powerbase1; - *(mcsbase + i) = powerBase1; + *(mcsbase + i) = powerbase1; RTPRINT(rtlpriv, FPHY, PHY_TXPWR, " [MCS power base index rf(%c) = 0x%x]\n", @@ -186,37 +164,37 @@ static void rtl92c_phy_get_power_base(struct ieee80211_hw *hw, static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw, u8 channel, u8 index, - u32 *powerBase0, - u32 *powerBase1, + u32 *powerbase0, + u32 *powerbase1, u32 *p_outwriteval) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u8 i, chnlgroup = 0, pwr_diff_limit[4]; - u32 writeVal, customer_limit, rf; + u32 writeval, customer_limit, rf; for (rf = 0; rf < 2; rf++) { switch (rtlefuse->eeprom_regulatory) { case 0: chnlgroup = 0; - writeVal = rtlphy->mcs_offset[chnlgroup][index + + writeval = rtlphy->mcs_offset[chnlgroup][index + (rf ? 8 : 0)] - + ((index < 2) ? powerBase0[rf] : powerBase1[rf]); + + ((index < 2) ? powerbase0[rf] : powerbase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - "RTK better performance, writeVal(%c) = 0x%x\n", - rf == 0 ? 'A' : 'B', writeVal); + "RTK better performance, writeval(%c) = 0x%x\n", + rf == 0 ? 'A' : 'B', writeval); break; case 1: if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { - writeVal = ((index < 2) ? powerBase0[rf] : - powerBase1[rf]); + writeval = ((index < 2) ? powerbase0[rf] : + powerbase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - "Realtek regulatory, 40MHz, writeVal(%c) = 0x%x\n", - rf == 0 ? 'A' : 'B', writeVal); + "Realtek regulatory, 40MHz, writeval(%c) = 0x%x\n", + rf == 0 ? 'A' : 'B', writeval); } else { if (rtlphy->pwrgroup_cnt == 1) chnlgroup = 0; @@ -231,23 +209,23 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw, chnlgroup++; } - writeVal = rtlphy->mcs_offset[chnlgroup] + writeval = rtlphy->mcs_offset[chnlgroup] [index + (rf ? 8 : 0)] + ((index < 2) ? - powerBase0[rf] : - powerBase1[rf]); + powerbase0[rf] : + powerbase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - "Realtek regulatory, 20MHz, writeVal(%c) = 0x%x\n", - rf == 0 ? 'A' : 'B', writeVal); + "Realtek regulatory, 20MHz, writeval(%c) = 0x%x\n", + rf == 0 ? 'A' : 'B', writeval); } break; case 2: - writeVal = - ((index < 2) ? powerBase0[rf] : powerBase1[rf]); + writeval = + ((index < 2) ? powerbase0[rf] : powerbase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - "Better regulatory, writeVal(%c) = 0x%x\n", - rf == 0 ? 'A' : 'B', writeVal); + "Better regulatory, writeval(%c) = 0x%x\n", + rf == 0 ? 'A' : 'B', writeval); break; case 3: chnlgroup = 0; @@ -297,36 +275,36 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw, "Customer's limit rf(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', customer_limit); - writeVal = customer_limit + - ((index < 2) ? powerBase0[rf] : powerBase1[rf]); + writeval = customer_limit + + ((index < 2) ? powerbase0[rf] : powerbase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - "Customer, writeVal rf(%c)= 0x%x\n", - rf == 0 ? 'A' : 'B', writeVal); + "Customer, writeval rf(%c)= 0x%x\n", + rf == 0 ? 'A' : 'B', writeval); break; default: chnlgroup = 0; - writeVal = rtlphy->mcs_offset[chnlgroup] + writeval = rtlphy->mcs_offset[chnlgroup] [index + (rf ? 8 : 0)] - + ((index < 2) ? powerBase0[rf] : powerBase1[rf]); + + ((index < 2) ? powerbase0[rf] : powerbase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - "RTK better performance, writeVal rf(%c) = 0x%x\n", - rf == 0 ? 'A' : 'B', writeVal); + "RTK better performance, writeval rf(%c) = 0x%x\n", + rf == 0 ? 'A' : 'B', writeval); break; } if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1) - writeVal = writeVal - 0x06060606; + writeval = writeval - 0x06060606; else if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT2) - writeVal = writeVal - 0x0c0c0c0c; - *(p_outwriteval + rf) = writeVal; + writeval = writeval - 0x0c0c0c0c; + *(p_outwriteval + rf) = writeval; } } static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw, - u8 index, u32 *pValue) + u8 index, u32 *value) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); @@ -342,29 +320,29 @@ static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw, RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12 }; u8 i, rf, pwr_val[4]; - u32 writeVal; + u32 writeval; u16 regoffset; for (rf = 0; rf < 2; rf++) { - writeVal = pValue[rf]; + writeval = value[rf]; for (i = 0; i < 4; i++) { - pwr_val[i] = (u8) ((writeVal & (0x7f << + pwr_val[i] = (u8)((writeval & (0x7f << (i * 8))) >> (i * 8)); if (pwr_val[i] > RF6052_MAX_TX_PWR) pwr_val[i] = RF6052_MAX_TX_PWR; } - writeVal = (pwr_val[3] << 24) | (pwr_val[2] << 16) | + writeval = (pwr_val[3] << 24) | (pwr_val[2] << 16) | (pwr_val[1] << 8) | pwr_val[0]; if (rf == 0) regoffset = regoffset_a[index]; else regoffset = regoffset_b[index]; - rtl_set_bbreg(hw, regoffset, MASKDWORD, writeVal); + rtl_set_bbreg(hw, regoffset, MASKDWORD, writeval); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - "Set 0x%x = %08x\n", regoffset, writeVal); + "Set 0x%x = %08x\n", regoffset, writeval); if (((get_rf_type(rtlphy) == RF_2T2R) && (regoffset == RTXAGC_A_MCS15_MCS12 || @@ -373,7 +351,7 @@ static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw, (regoffset == RTXAGC_A_MCS07_MCS04 || regoffset == RTXAGC_B_MCS07_MCS04))) { - writeVal = pwr_val[3]; + writeval = pwr_val[3]; if (regoffset == RTXAGC_A_MCS15_MCS12 || regoffset == RTXAGC_A_MCS07_MCS04) regoffset = 0xc90; @@ -382,9 +360,9 @@ static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw, regoffset = 0xc98; for (i = 0; i < 3; i++) { - writeVal = (writeVal > 6) ? (writeVal - 6) : 0; + writeval = (writeval > 6) ? (writeval - 6) : 0; rtl_write_byte(rtlpriv, (u32) (regoffset + i), - (u8) writeVal); + (u8)writeval); } } } @@ -393,20 +371,20 @@ static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw, void rtl92ce_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, u8 *ppowerlevel, u8 channel) { - u32 writeVal[2], powerBase0[2], powerBase1[2]; + u32 writeval[2], powerbase0[2], powerbase1[2]; u8 index; rtl92c_phy_get_power_base(hw, ppowerlevel, - channel, &powerBase0[0], &powerBase1[0]); + channel, &powerbase0[0], &powerbase1[0]); for (index = 0; index < 6; index++) { _rtl92c_get_txpower_writeval_by_regulatory(hw, channel, index, - &powerBase0[0], - &powerBase1[0], - &writeVal[0]); + &powerbase0[0], + &powerbase1[0], + &writeval[0]); - _rtl92c_write_ofdm_power_reg(hw, index, &writeVal[0]); + _rtl92c_write_ofdm_power_reg(hw, index, &writeval[0]); } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.h index 22c5e6f51331..6fa70224d2d4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL92C_RF_H__ #define __RTL92C_RF_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c index 71a6761d3648..a9c0111444bc 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "../core.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h index 9a1c89cbbda1..f2d121a60159 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL92CE_SW_H__ #define __RTL92CE_SW_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.c index 98b06d48a2dd..58878db404ed 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.c @@ -1,33 +1,8 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Created on 2010/ 5/18, 1:41 - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "table.h" - u32 RTL8192CEPHY_REG_2TARRAY[PHY_REG_2TARRAY_LENGTH] = { 0x024, 0x0011800f, 0x028, 0x00ffdb83, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.h index 51e4e07396a6..473af27f80d9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.h @@ -1,29 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Created on 2010/ 5/18, 1:41 - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL92CE_TABLE__H_ #define __RTL92CE_TABLE__H_ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c index d36e0060cc7a..18a0ab59631a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "../pci.h" @@ -58,6 +36,7 @@ static u8 _rtl92c_query_rxpwrpercentage(s8 antpower) static u8 _rtl92c_evm_db_to_percentage(s8 value) { s8 ret_val; + ret_val = value; if (ret_val >= 0) @@ -131,6 +110,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw, if (is_cck_rate) { u8 report, cck_highpwr; + cck_buf = (struct phy_sts_cck_8192s_t *)p_drvinfo; if (ppsc->rfpwr_state == ERFON) @@ -142,6 +122,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw, if (!cck_highpwr) { u8 cck_agc_rpt = cck_buf->cck_agc_rpt; + report = cck_buf->cck_agc_rpt & 0xc0; report = report >> 6; switch (report) { @@ -160,6 +141,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw, } } else { u8 cck_agc_rpt = cck_buf->cck_agc_rpt; + report = p_drvinfo->cfosho[0] & 0x60; report = report >> 5; switch (report) { @@ -204,6 +186,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw, /* (3) Get Signal Quality (EVM) */ if (packet_match_bssid) { u8 sq; + if (pstats->rx_pwdb_all > 40) sq = 100; else { @@ -340,6 +323,7 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw, struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc; struct ieee80211_hdr *hdr; u32 phystatus = GET_RX_DESC_PHYST(pdesc); + stats->length = (u16) GET_RX_DESC_PKT_LEN(pdesc); stats->rx_drvinfo_size = (u8) GET_RX_DESC_DRV_INFO_SIZE(pdesc) * RX_DRV_INFO_SIZE_UNIT; @@ -354,7 +338,7 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw, stats->isfirst_ampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1) && (GET_RX_DESC_FAGGR(pdesc) == 1)); stats->timestamp_low = GET_RX_DESC_TSFL(pdesc); - stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc); + stats->rx_is40mhzpacket = (bool)GET_RX_DESC_BW(pdesc); stats->is_ht = (bool)GET_RX_DESC_RXHT(pdesc); stats->is_cck = RX_HAL_IS_CCK_RATE(pdesc->rxmcs); @@ -368,7 +352,7 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw, if (stats->crc) rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; - if (stats->rx_is40Mhzpacket) + if (stats->rx_is40mhzpacket) rx_status->bw = RATE_INFO_BW_40; if (stats->is_ht) @@ -519,6 +503,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw, if (sta) { u8 ampdu_density = sta->ht_cap.ampdu_density; + SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density); } @@ -755,6 +740,7 @@ bool rtl92ce_is_tx_desc_closed(struct ieee80211_hw *hw, void rtl92ce_tx_polling(struct ieee80211_hw *hw, u8 hw_queue) { struct rtl_priv *rtlpriv = rtl_priv(hw); + if (hw_queue == BEACON_QUEUE) { rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG, BIT(4)); } else { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h index 91f0bd6b752f..fb1d4444a52f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL92CE_TRX_H__ #define __RTL92CE_TRX_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h index 316fe9990b6d..91e4427ab022 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../rtl8192ce/def.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c index 00fc0685317a..9d1167ff3b50 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "../base.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.h index ce71433792e3..2befc2f4e3fd 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../rtl8192ce/dm.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c index 1e60f70481f5..56cc3bc30860 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "../efuse.h" @@ -53,9 +31,9 @@ static void _rtl92cu_phy_param_tab_init(struct ieee80211_hw *hw) rtlphy->hwparam_tables[MAC_REG].pdata = RTL8192CUMAC_2T_ARRAY; if (IS_HIGHT_PA(rtlefuse->board_type)) { rtlphy->hwparam_tables[PHY_REG_PG].length = - RTL8192CUPHY_REG_Array_PG_HPLength; + RTL8192CUPHY_REG_ARRAY_PG_HPLENGTH; rtlphy->hwparam_tables[PHY_REG_PG].pdata = - RTL8192CUPHY_REG_Array_PG_HP; + RTL8192CUPHY_REG_ARRAY_PG_HP; } else { rtlphy->hwparam_tables[PHY_REG_PG].length = RTL8192CUPHY_REG_ARRAY_PGLENGTH; @@ -82,21 +60,21 @@ static void _rtl92cu_phy_param_tab_init(struct ieee80211_hw *hw) /* 1T */ if (IS_HIGHT_PA(rtlefuse->board_type)) { rtlphy->hwparam_tables[PHY_REG_1T].length = - RTL8192CUPHY_REG_1T_HPArrayLength; + RTL8192CUPHY_REG_1T_HPARRAYLENGTH; rtlphy->hwparam_tables[PHY_REG_1T].pdata = - RTL8192CUPHY_REG_1T_HPArray; + RTL8192CUPHY_REG_1T_HPARRAY; rtlphy->hwparam_tables[RADIOA_1T].length = - RTL8192CURadioA_1T_HPArrayLength; + RTL8192CURADIOA_1T_HPARRAYLENGTH; rtlphy->hwparam_tables[RADIOA_1T].pdata = - RTL8192CURadioA_1T_HPArray; + RTL8192CURADIOA_1T_HPARRAY; rtlphy->hwparam_tables[RADIOB_1T].length = RTL8192CURADIOB_1TARRAYLENGTH; rtlphy->hwparam_tables[RADIOB_1T].pdata = RTL8192CU_RADIOB_1TARRAY; rtlphy->hwparam_tables[AGCTAB_1T].length = - RTL8192CUAGCTAB_1T_HPArrayLength; + RTL8192CUAGCTAB_1T_HPARRAYLENGTH; rtlphy->hwparam_tables[AGCTAB_1T].pdata = - Rtl8192CUAGCTAB_1T_HPArray; + RTL8192CUAGCTAB_1T_HPARRAY; } else { rtlphy->hwparam_tables[PHY_REG_1T].length = RTL8192CUPHY_REG_1TARRAY_LENGTH; @@ -323,16 +301,16 @@ static void _rtl92cu_read_board_type(struct ieee80211_hw *hw, u8 *contents) { struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - u8 boardType; + u8 boardtype; if (IS_NORMAL_CHIP(rtlhal->version)) { - boardType = ((contents[EEPROM_RF_OPT1]) & + boardtype = ((contents[EEPROM_RF_OPT1]) & BOARD_TYPE_NORMAL_MASK) >> 5; /*bit[7:5]*/ } else { - boardType = contents[EEPROM_RF_OPT4]; - boardType &= BOARD_TYPE_TEST_MASK; + boardtype = contents[EEPROM_RF_OPT4]; + boardtype &= BOARD_TYPE_TEST_MASK; } - rtlefuse->board_type = boardType; + rtlefuse->board_type = boardtype; if (IS_HIGHT_PA(rtlefuse->board_type)) rtlefuse->external_pa = 1; pr_info("Board Type %x\n", rtlefuse->board_type); @@ -442,7 +420,7 @@ static int _rtl92cu_init_power_on(struct ieee80211_hw *hw) u16 value16; u8 value8; /* polling autoload done. */ - u32 pollingCount = 0; + u32 pollingcount = 0; do { if (rtl_read_byte(rtlpriv, REG_APS_FSMCO) & PFM_ALDN) { @@ -450,7 +428,7 @@ static int _rtl92cu_init_power_on(struct ieee80211_hw *hw) "Autoload Done!\n"); break; } - if (pollingCount++ > 100) { + if (pollingcount++ > 100) { pr_err("Failed to polling REG_APS_FSMCO[PFM_ALDN] done!\n"); return -ENODEV; } @@ -474,7 +452,7 @@ static int _rtl92cu_init_power_on(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL, value8); } /* auto enable WLAN */ - pollingCount = 0; + pollingcount = 0; value16 = rtl_read_word(rtlpriv, REG_APS_FSMCO); value16 |= APFM_ONMAC; rtl_write_word(rtlpriv, REG_APS_FSMCO, value16); @@ -483,7 +461,7 @@ static int _rtl92cu_init_power_on(struct ieee80211_hw *hw) pr_info("MAC auto ON okay!\n"); break; } - if (pollingCount++ > 1000) { + if (pollingcount++ > 1000) { pr_err("Failed to polling REG_APS_FSMCO[APFM_ONMAC] done!\n"); return -ENODEV; } @@ -495,12 +473,12 @@ static int _rtl92cu_init_power_on(struct ieee80211_hw *hw) value16 &= ~ISO_DIOR; rtl_write_word(rtlpriv, REG_SYS_ISO_CTRL, value16); /* Reconsider when to do this operation after asking HWSD. */ - pollingCount = 0; + pollingcount = 0; rtl_write_byte(rtlpriv, REG_APSD_CTRL, (rtl_read_byte(rtlpriv, REG_APSD_CTRL) & ~BIT(6))); do { - pollingCount++; - } while ((pollingCount < 200) && + pollingcount++; + } while ((pollingcount < 200) && (rtl_read_byte(rtlpriv, REG_APSD_CTRL) & BIT(7))); /* Enable MAC DMA/WMAC/SCHEDULE/SEC block */ value16 = rtl_read_word(rtlpriv, REG_CR); @@ -517,60 +495,60 @@ static void _rtl92cu_init_queue_reserved_page(struct ieee80211_hw *hw, { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - bool isChipN = IS_NORMAL_CHIP(rtlhal->version); - u32 outEPNum = (u32)out_ep_num; - u32 numHQ = 0; - u32 numLQ = 0; - u32 numNQ = 0; - u32 numPubQ; + bool ischipn = IS_NORMAL_CHIP(rtlhal->version); + u32 outepnum = (u32)out_ep_num; + u32 numhq = 0; + u32 numlq = 0; + u32 numnq = 0; + u32 numpubq; u32 value32; u8 value8; - u32 txQPageNum, txQPageUnit, txQRemainPage; + u32 txqpagenum, txqpageunit, txqremaininpage; if (!wmm_enable) { - numPubQ = (isChipN) ? CHIP_B_PAGE_NUM_PUBQ : + numpubq = (ischipn) ? CHIP_B_PAGE_NUM_PUBQ : CHIP_A_PAGE_NUM_PUBQ; - txQPageNum = TX_TOTAL_PAGE_NUMBER - numPubQ; + txqpagenum = TX_TOTAL_PAGE_NUMBER - numpubq; - txQPageUnit = txQPageNum/outEPNum; - txQRemainPage = txQPageNum % outEPNum; + txqpageunit = txqpagenum / outepnum; + txqremaininpage = txqpagenum % outepnum; if (queue_sel & TX_SELE_HQ) - numHQ = txQPageUnit; + numhq = txqpageunit; if (queue_sel & TX_SELE_LQ) - numLQ = txQPageUnit; + numlq = txqpageunit; /* HIGH priority queue always present in the configuration of * 2 out-ep. Remainder pages have assigned to High queue */ - if ((outEPNum > 1) && (txQRemainPage)) - numHQ += txQRemainPage; + if (outepnum > 1 && txqremaininpage) + numhq += txqremaininpage; /* NOTE: This step done before writting REG_RQPN. */ - if (isChipN) { + if (ischipn) { if (queue_sel & TX_SELE_NQ) - numNQ = txQPageUnit; - value8 = (u8)_NPQ(numNQ); + numnq = txqpageunit; + value8 = (u8)_NPQ(numnq); rtl_write_byte(rtlpriv, REG_RQPN_NPQ, value8); } } else { /* for WMM ,number of out-ep must more than or equal to 2! */ - numPubQ = isChipN ? WMM_CHIP_B_PAGE_NUM_PUBQ : + numpubq = ischipn ? WMM_CHIP_B_PAGE_NUM_PUBQ : WMM_CHIP_A_PAGE_NUM_PUBQ; if (queue_sel & TX_SELE_HQ) { - numHQ = isChipN ? WMM_CHIP_B_PAGE_NUM_HPQ : + numhq = ischipn ? WMM_CHIP_B_PAGE_NUM_HPQ : WMM_CHIP_A_PAGE_NUM_HPQ; } if (queue_sel & TX_SELE_LQ) { - numLQ = isChipN ? WMM_CHIP_B_PAGE_NUM_LPQ : + numlq = ischipn ? WMM_CHIP_B_PAGE_NUM_LPQ : WMM_CHIP_A_PAGE_NUM_LPQ; } /* NOTE: This step done before writting REG_RQPN. */ - if (isChipN) { + if (ischipn) { if (queue_sel & TX_SELE_NQ) - numNQ = WMM_CHIP_B_PAGE_NUM_NPQ; - value8 = (u8)_NPQ(numNQ); + numnq = WMM_CHIP_B_PAGE_NUM_NPQ; + value8 = (u8)_NPQ(numnq); rtl_write_byte(rtlpriv, REG_RQPN_NPQ, value8); } } /* TX DMA */ - value32 = _HPQ(numHQ) | _LPQ(numLQ) | _PUBQ(numPubQ) | LD_RQPN; + value32 = _HPQ(numhq) | _LPQ(numlq) | _PUBQ(numpubq) | LD_RQPN; rtl_write_dword(rtlpriv, REG_RQPN, value32); } @@ -597,20 +575,20 @@ static void _rtl92c_init_trx_buffer(struct ieee80211_hw *hw, bool wmm_enable) rtl_write_byte(rtlpriv, REG_PBP, value8); } -static void _rtl92c_init_chipN_reg_priority(struct ieee80211_hw *hw, u16 beQ, - u16 bkQ, u16 viQ, u16 voQ, - u16 mgtQ, u16 hiQ) +static void _rtl92c_init_chipn_reg_priority(struct ieee80211_hw *hw, u16 beq, + u16 bkq, u16 viq, u16 voq, + u16 mgtq, u16 hiq) { struct rtl_priv *rtlpriv = rtl_priv(hw); u16 value16 = (rtl_read_word(rtlpriv, REG_TRXDMA_CTRL) & 0x7); - value16 |= _TXDMA_BEQ_MAP(beQ) | _TXDMA_BKQ_MAP(bkQ) | - _TXDMA_VIQ_MAP(viQ) | _TXDMA_VOQ_MAP(voQ) | - _TXDMA_MGQ_MAP(mgtQ) | _TXDMA_HIQ_MAP(hiQ); + value16 |= _TXDMA_BEQ_MAP(beq) | _TXDMA_BKQ_MAP(bkq) | + _TXDMA_VIQ_MAP(viq) | _TXDMA_VOQ_MAP(voq) | + _TXDMA_MGQ_MAP(mgtq) | _TXDMA_HIQ_MAP(hiq); rtl_write_word(rtlpriv, REG_TRXDMA_CTRL, value16); } -static void _rtl92cu_init_chipN_one_out_ep_priority(struct ieee80211_hw *hw, +static void _rtl92cu_init_chipn_one_out_ep_priority(struct ieee80211_hw *hw, bool wmm_enable, u8 queue_sel) { @@ -630,96 +608,96 @@ static void _rtl92cu_init_chipN_one_out_ep_priority(struct ieee80211_hw *hw, WARN_ON(1); /* Shall not reach here! */ break; } - _rtl92c_init_chipN_reg_priority(hw, value, value, value, value, + _rtl92c_init_chipn_reg_priority(hw, value, value, value, value, value, value); pr_info("Tx queue select: 0x%02x\n", queue_sel); } -static void _rtl92cu_init_chipN_two_out_ep_priority(struct ieee80211_hw *hw, - bool wmm_enable, - u8 queue_sel) +static void _rtl92cu_init_chipn_two_out_ep_priority(struct ieee80211_hw *hw, + bool wmm_enable, + u8 queue_sel) { - u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ; - u16 uninitialized_var(valueHi); - u16 uninitialized_var(valueLow); + u16 beq, bkq, viq, voq, mgtq, hiq; + u16 uninitialized_var(valuehi); + u16 uninitialized_var(valuelow); switch (queue_sel) { case (TX_SELE_HQ | TX_SELE_LQ): - valueHi = QUEUE_HIGH; - valueLow = QUEUE_LOW; + valuehi = QUEUE_HIGH; + valuelow = QUEUE_LOW; break; case (TX_SELE_NQ | TX_SELE_LQ): - valueHi = QUEUE_NORMAL; - valueLow = QUEUE_LOW; + valuehi = QUEUE_NORMAL; + valuelow = QUEUE_LOW; break; case (TX_SELE_HQ | TX_SELE_NQ): - valueHi = QUEUE_HIGH; - valueLow = QUEUE_NORMAL; + valuehi = QUEUE_HIGH; + valuelow = QUEUE_NORMAL; break; default: WARN_ON(1); break; } if (!wmm_enable) { - beQ = valueLow; - bkQ = valueLow; - viQ = valueHi; - voQ = valueHi; - mgtQ = valueHi; - hiQ = valueHi; + beq = valuelow; + bkq = valuelow; + viq = valuehi; + voq = valuehi; + mgtq = valuehi; + hiq = valuehi; } else {/* for WMM ,CONFIG_OUT_EP_WIFI_MODE */ - beQ = valueHi; - bkQ = valueLow; - viQ = valueLow; - voQ = valueHi; - mgtQ = valueHi; - hiQ = valueHi; + beq = valuehi; + bkq = valuelow; + viq = valuelow; + voq = valuehi; + mgtq = valuehi; + hiq = valuehi; } - _rtl92c_init_chipN_reg_priority(hw, beQ, bkQ, viQ, voQ, mgtQ, hiQ); + _rtl92c_init_chipn_reg_priority(hw, beq, bkq, viq, voq, mgtq, hiq); pr_info("Tx queue select: 0x%02x\n", queue_sel); } -static void _rtl92cu_init_chipN_three_out_ep_priority(struct ieee80211_hw *hw, +static void _rtl92cu_init_chipn_three_out_ep_priority(struct ieee80211_hw *hw, bool wmm_enable, u8 queue_sel) { - u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ; + u16 beq, bkq, viq, voq, mgtq, hiq; if (!wmm_enable) { /* typical setting */ - beQ = QUEUE_LOW; - bkQ = QUEUE_LOW; - viQ = QUEUE_NORMAL; - voQ = QUEUE_HIGH; - mgtQ = QUEUE_HIGH; - hiQ = QUEUE_HIGH; + beq = QUEUE_LOW; + bkq = QUEUE_LOW; + viq = QUEUE_NORMAL; + voq = QUEUE_HIGH; + mgtq = QUEUE_HIGH; + hiq = QUEUE_HIGH; } else { /* for WMM */ - beQ = QUEUE_LOW; - bkQ = QUEUE_NORMAL; - viQ = QUEUE_NORMAL; - voQ = QUEUE_HIGH; - mgtQ = QUEUE_HIGH; - hiQ = QUEUE_HIGH; + beq = QUEUE_LOW; + bkq = QUEUE_NORMAL; + viq = QUEUE_NORMAL; + voq = QUEUE_HIGH; + mgtq = QUEUE_HIGH; + hiq = QUEUE_HIGH; } - _rtl92c_init_chipN_reg_priority(hw, beQ, bkQ, viQ, voQ, mgtQ, hiQ); + _rtl92c_init_chipn_reg_priority(hw, beq, bkq, viq, voq, mgtq, hiq); pr_info("Tx queue select :0x%02x..\n", queue_sel); } -static void _rtl92cu_init_chipN_queue_priority(struct ieee80211_hw *hw, +static void _rtl92cu_init_chipn_queue_priority(struct ieee80211_hw *hw, bool wmm_enable, u8 out_ep_num, u8 queue_sel) { switch (out_ep_num) { case 1: - _rtl92cu_init_chipN_one_out_ep_priority(hw, wmm_enable, + _rtl92cu_init_chipn_one_out_ep_priority(hw, wmm_enable, queue_sel); break; case 2: - _rtl92cu_init_chipN_two_out_ep_priority(hw, wmm_enable, + _rtl92cu_init_chipn_two_out_ep_priority(hw, wmm_enable, queue_sel); break; case 3: - _rtl92cu_init_chipN_three_out_ep_priority(hw, wmm_enable, + _rtl92cu_init_chipn_three_out_ep_priority(hw, wmm_enable, queue_sel); break; default: @@ -728,7 +706,7 @@ static void _rtl92cu_init_chipN_queue_priority(struct ieee80211_hw *hw, } } -static void _rtl92cu_init_chipT_queue_priority(struct ieee80211_hw *hw, +static void _rtl92cu_init_chipt_queue_priority(struct ieee80211_hw *hw, bool wmm_enable, u8 out_ep_num, u8 queue_sel) @@ -769,11 +747,12 @@ static void _rtl92cu_init_queue_priority(struct ieee80211_hw *hw, u8 queue_sel) { struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + if (IS_NORMAL_CHIP(rtlhal->version)) - _rtl92cu_init_chipN_queue_priority(hw, wmm_enable, out_ep_num, + _rtl92cu_init_chipn_queue_priority(hw, wmm_enable, out_ep_num, queue_sel); else - _rtl92cu_init_chipT_queue_priority(hw, wmm_enable, out_ep_num, + _rtl92cu_init_chipt_queue_priority(hw, wmm_enable, out_ep_num, queue_sel); } @@ -835,6 +814,7 @@ static int _rtl92cu_init_mac(struct ieee80211_hw *hw) u8 wmm_enable = false; /* TODO */ u8 out_ep_nums = rtlusb->out_ep_nums; u8 queue_sel = rtlusb->out_queue_sel; + err = _rtl92cu_init_power_on(hw); if (err) { @@ -889,10 +869,10 @@ void rtl92cu_enable_hw_security_config(struct ieee80211_hw *hw) "not open sw encryption\n"); return; } - sec_reg_value = SCR_TxEncEnable | SCR_RxDecEnable; + sec_reg_value = SCR_TXENCENABLE | SCR_RXDECENABLE; if (rtlpriv->sec.use_defaultkey) { - sec_reg_value |= SCR_TxUseDK; - sec_reg_value |= SCR_RxUseDK; + sec_reg_value |= SCR_TXUSEDK; + sec_reg_value |= SCR_RXUSEDK; } if (IS_NORMAL_CHIP(rtlhal->version)) sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK); @@ -921,7 +901,7 @@ static void _rtl92cu_hw_configure(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8)rtlusb->reg_bcn_ctrl_val); } -static void _InitPABias(struct ieee80211_hw *hw) +static void _initpabias(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); @@ -1017,14 +997,14 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw) rtl92c_phy_lc_calibrate(hw); } _rtl92cu_hw_configure(hw); - _InitPABias(hw); + _initpabias(hw); rtl92c_dm_init(hw); exit: local_irq_restore(flags); return err; } -static void _DisableRFAFEAndResetBB(struct ieee80211_hw *hw) +static void disable_rfafeandresetbb(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); /************************************** @@ -1034,20 +1014,21 @@ c. APSD_CTRL 0x600[7:0] = 0x40 d. SYS_FUNC_EN 0x02[7:0] = 0x16 reset BB state machine e. SYS_FUNC_EN 0x02[7:0] = 0x14 reset BB state machine ***************************************/ - u8 eRFPath = 0, value8 = 0; + u8 erfpath = 0, value8 = 0; + rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF); - rtl_set_rfreg(hw, (enum radio_path)eRFPath, 0x0, MASKBYTE0, 0x0); + rtl_set_rfreg(hw, (enum radio_path)erfpath, 0x0, MASKBYTE0, 0x0); value8 |= APSDOFF; rtl_write_byte(rtlpriv, REG_APSD_CTRL, value8); /*0x40*/ value8 = 0; - value8 |= (FEN_USBD | FEN_USBA | FEN_BB_GLB_RSTn); + value8 |= (FEN_USBD | FEN_USBA | FEN_BB_GLB_RSTN); rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, value8);/*0x16*/ - value8 &= (~FEN_BB_GLB_RSTn); + value8 &= (~FEN_BB_GLB_RSTN); rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, value8); /*0x14*/ } -static void _ResetDigitalProcedure1(struct ieee80211_hw *hw, bool bWithoutHWSM) +static void _resetdigitalprocedure1(struct ieee80211_hw *hw, bool withouthwsm) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); @@ -1098,7 +1079,7 @@ static void _ResetDigitalProcedure1(struct ieee80211_hw *hw, bool bWithoutHWSM) rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, 0x54); rtl_write_byte(rtlpriv, REG_MCUFWDL, 0); } - if (bWithoutHWSM) { + if (withouthwsm) { /***************************** Without HW auto state machine g.SYS_CLKR 0x08[15:0] = 0x30A3 disable MAC clock @@ -1113,7 +1094,7 @@ static void _ResetDigitalProcedure1(struct ieee80211_hw *hw, bool bWithoutHWSM) } } -static void _ResetDigitalProcedure2(struct ieee80211_hw *hw) +static void _resetdigitalprocedure2(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); /***************************** @@ -1125,7 +1106,7 @@ m. SYS_ISO_CTRL 0x01[7:0] = 0x83 isolated ELDR to PON rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL+1, 0x82); } -static void _DisableGPIO(struct ieee80211_hw *hw) +static void _disablegpio(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); /*************************************** @@ -1155,13 +1136,13 @@ n. LEDCFG 0x4C[15:0] = 0x8080 rtl_write_word(rtlpriv, REG_LEDCFG0, 0x8080); } -static void _DisableAnalog(struct ieee80211_hw *hw, bool bWithoutHWSM) +static void disable_analog(struct ieee80211_hw *hw, bool withouthwsm) { struct rtl_priv *rtlpriv = rtl_priv(hw); u16 value16 = 0; u8 value8 = 0; - if (bWithoutHWSM) { + if (withouthwsm) { /***************************** n. LDOA15_CTRL 0x20[7:0] = 0x04 disable A15 power o. LDOV12D_CTRL 0x21[7:0] = 0x54 disable digital core power @@ -1184,30 +1165,30 @@ i. APS_FSMCO 0x04[15:0] = 0x4802 set USB suspend rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0E); } -static void _CardDisableHWSM(struct ieee80211_hw *hw) +static void carddisable_hwsm(struct ieee80211_hw *hw) { /* ==== RF Off Sequence ==== */ - _DisableRFAFEAndResetBB(hw); + disable_rfafeandresetbb(hw); /* ==== Reset digital sequence ====== */ - _ResetDigitalProcedure1(hw, false); + _resetdigitalprocedure1(hw, false); /* ==== Pull GPIO PIN to balance level and LED control ====== */ - _DisableGPIO(hw); + _disablegpio(hw); /* ==== Disable analog sequence === */ - _DisableAnalog(hw, false); + disable_analog(hw, false); } -static void _CardDisableWithoutHWSM(struct ieee80211_hw *hw) +static void carddisablewithout_hwsm(struct ieee80211_hw *hw) { /*==== RF Off Sequence ==== */ - _DisableRFAFEAndResetBB(hw); + disable_rfafeandresetbb(hw); /* ==== Reset digital sequence ====== */ - _ResetDigitalProcedure1(hw, true); + _resetdigitalprocedure1(hw, true); /* ==== Pull GPIO PIN to balance level and LED control ====== */ - _DisableGPIO(hw); + _disablegpio(hw); /* ==== Reset digital sequence ====== */ - _ResetDigitalProcedure2(hw); + _resetdigitalprocedure2(hw); /* ==== Disable analog sequence === */ - _DisableAnalog(hw, true); + disable_analog(hw, true); } static void _rtl92cu_set_bcn_ctrl_reg(struct ieee80211_hw *hw, @@ -1226,6 +1207,7 @@ static void _rtl92cu_stop_tx_beacon(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtlpriv); u8 tmp1byte = 0; + if (IS_NORMAL_CHIP(rtlhal->version)) { tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2); rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, @@ -1353,10 +1335,10 @@ void rtl92cu_card_disable(struct ieee80211_hw *hw) _rtl92cu_set_media_status(hw, opmode); rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF); RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); - if (rtlusb->disableHWSM) - _CardDisableHWSM(hw); + if (rtlusb->disablehwsm) + carddisable_hwsm(hw); else - _CardDisableWithoutHWSM(hw); + carddisablewithout_hwsm(hw); /* after power off we should do iqk again */ rtlpriv->phy.iqk_initialized = false; @@ -1375,6 +1357,7 @@ void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid) if (check_bssid) { u8 tmp; + if (IS_NORMAL_CHIP(rtlhal->version)) { reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN); tmp = BIT(4); @@ -1387,6 +1370,7 @@ void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid) _rtl92cu_set_bcn_ctrl_reg(hw, 0, tmp); } else { u8 tmp; + if (IS_NORMAL_CHIP(rtlhal->version)) { reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN); tmp = BIT(4); @@ -1498,12 +1482,12 @@ void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) *((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state; break; case HW_VAR_FWLPS_RF_ON:{ - enum rf_pwrstate rfState; + enum rf_pwrstate rfstate; u32 val_rcr; rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE, - (u8 *)(&rfState)); - if (rfState == ERFOFF) { + (u8 *)(&rfstate)); + if (rfstate == ERFOFF) { *((bool *) (val)) = true; } else { val_rcr = rtl_read_dword(rtlpriv, REG_RCR); @@ -1630,7 +1614,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) &e_aci); } else { u8 sifstime = 0; - u8 u1bAIFS; + u8 u1baifs; if (IS_WIRELESS_MODE_A(wirelessmode) || IS_WIRELESS_MODE_N_24G(wirelessmode) || @@ -1638,21 +1622,22 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) sifstime = 16; else sifstime = 10; - u1bAIFS = sifstime + (2 * val[0]); + u1baifs = sifstime + (2 * val[0]); rtl_write_byte(rtlpriv, REG_EDCA_VO_PARAM, - u1bAIFS); + u1baifs); rtl_write_byte(rtlpriv, REG_EDCA_VI_PARAM, - u1bAIFS); + u1baifs); rtl_write_byte(rtlpriv, REG_EDCA_BE_PARAM, - u1bAIFS); + u1baifs); rtl_write_byte(rtlpriv, REG_EDCA_BK_PARAM, - u1bAIFS); + u1baifs); } break; } case HW_VAR_ACK_PREAMBLE:{ u8 reg_tmp; u8 short_preamble = (bool)*val; + reg_tmp = 0; if (short_preamble) reg_tmp |= 0x80; @@ -1903,6 +1888,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) break; case HW_VAR_KEEP_ALIVE:{ u8 array[2]; + array[0] = 0xff; array[1] = *((u8 *)val); rtl92c_fill_h2c_cmd(hw, H2C_92C_KEEP_ALIVE_CTRL, 2, @@ -1985,7 +1971,6 @@ static void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw, if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) || (!curtxbw_40mhz && curshortgi_20mhz))) { - ratr_value |= 0x10000000; tmp_ratr_value = (ratr_value >> 12); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.h index ebd168400d45..5c48c3fd45e4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL92CU_HW_H__ #define __RTL92CU_HW_H__ @@ -36,14 +14,12 @@ #define TX_TOTAL_PAGE_NUMBER 0xF8 #define TX_PAGE_BOUNDARY (TX_TOTAL_PAGE_NUMBER + 1) - #define CHIP_B_PAGE_NUM_PUBQ 0xE7 /* For Test Chip Setting * (HPQ + LPQ + PUBQ) shall be TX_TOTAL_PAGE_NUMBER */ #define CHIP_A_PAGE_NUM_PUBQ 0x7E - /* For Chip A Setting */ #define WMM_CHIP_A_TX_TOTAL_PAGE_NUMBER 0xF5 #define WMM_CHIP_A_TX_PAGE_BOUNDARY \ @@ -53,8 +29,6 @@ #define WMM_CHIP_A_PAGE_NUM_HPQ 0x29 #define WMM_CHIP_A_PAGE_NUM_LPQ 0x29 - - /* Note: For Chip B Setting ,modify later */ #define WMM_CHIP_B_TX_TOTAL_PAGE_NUMBER 0xF5 #define WMM_CHIP_B_TX_PAGE_BOUNDARY \ @@ -71,14 +45,14 @@ /* should be renamed and moved to another file */ enum _BOARD_TYPE_8192CUSB { BOARD_USB_DONGLE = 0, /* USB dongle */ - BOARD_USB_High_PA = 1, /* USB dongle - high power PA */ + BOARD_USB_HIGH_PA = 1, /* USB dongle - high power PA */ BOARD_MINICARD = 2, /* Minicard */ BOARD_USB_SOLO = 3, /* USB solo-Slim module */ BOARD_USB_COMBO = 4, /* USB Combo-Slim module */ }; #define IS_HIGHT_PA(boardtype) \ - ((boardtype == BOARD_USB_High_PA) ? true : false) + ((boardtype == BOARD_USB_HIGH_PA) ? true : false) #define RTL92C_DRIVER_INFO_SIZE 4 void rtl92cu_read_eeprom_info(struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c index 66d2784de67d..cc13a4a8f856 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c @@ -1,25 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "../usb.h" @@ -34,7 +14,7 @@ static void _rtl92cu_init_led(struct ieee80211_hw *hw, pled->ledon = false; } -static void _rtl92cu_deInit_led(struct rtl_led *pled) +static void rtl92cu_deinit_led(struct rtl_led *pled) { } @@ -108,8 +88,8 @@ void rtl92cu_deinit_sw_leds(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - _rtl92cu_deInit_led(&rtlpriv->ledctl.sw_led0); - _rtl92cu_deInit_led(&rtlpriv->ledctl.sw_led1); + rtl92cu_deinit_led(&rtlpriv->ledctl.sw_led0); + rtl92cu_deinit_led(&rtlpriv->ledctl.sw_led1); } static void _rtl92cu_sw_led_control(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.h index 551deb8afb6f..3fc1e7c8f78b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.h @@ -1,25 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL92CU_LED_H__ #define __RTL92CU_LED_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c index 5657b1e34ad0..b3ce8000d52d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * -****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "../pci.h" @@ -46,7 +24,6 @@ #define RX_EVM rx_evm_percentage #define RX_SIGQ rx_mimo_sig_qual - void rtl92c_read_chip_version(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -165,6 +142,7 @@ bool rtl92c_llt_write(struct ieee80211_hw *hw, u32 address, u32 data) } while (++count); return status; } + /** * rtl92c_init_LLT_table - Init LLT table * @io: io callback @@ -211,6 +189,7 @@ bool rtl92c_init_llt_table(struct ieee80211_hw *hw, u32 boundary) } return rst; } + void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index, u8 *p_macaddr, bool is_group, u8 enc_algo, bool is_wepkey, bool clear_all) @@ -392,6 +371,7 @@ void rtl92c_set_qos(struct ieee80211_hw *hw, int aci) void rtl92c_init_driver_info_size(struct ieee80211_hw *hw, u8 size) { struct rtl_priv *rtlpriv = rtl_priv(hw); + rtl_write_byte(rtlpriv, REG_RX_DRVINFO_SZ, size); } @@ -669,6 +649,7 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw, pstats->RX_SIGQ[1] = -1; if (is_cck_rate) { u8 report, cck_highpwr; + cck_buf = (struct phy_sts_cck_8192s_t *)p_drvinfo; if (!in_powersavemode) cck_highpwr = rtlphy->cck_high_power; @@ -676,6 +657,7 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw, cck_highpwr = false; if (!cck_highpwr) { u8 cck_agc_rpt = cck_buf->cck_agc_rpt; + report = cck_buf->cck_agc_rpt & 0xc0; report = report >> 6; switch (report) { @@ -694,6 +676,7 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw, } } else { u8 cck_agc_rpt = cck_buf->cck_agc_rpt; + report = p_drvinfo->cfosho[0] & 0x60; report = report >> 5; switch (report) { @@ -716,6 +699,7 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw, pstats->recvsignalpower = rx_pwr_all; if (packet_match_bssid) { u8 sq; + if (pstats->rx_pwdb_all > 40) sq = 100; else { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h index 8573b7e257d9..dd76a05829d5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL92C_MAC_H__ #define __RTL92C_MAC_H__ @@ -40,7 +18,6 @@ void rtl92c_enable_interrupt(struct ieee80211_hw *hw); void rtl92c_disable_interrupt(struct ieee80211_hw *hw); void rtl92c_set_qos(struct ieee80211_hw *hw, int aci); - /*--------------------------------------------------------------- * Hardware init functions *---------------------------------------------------------------*/ @@ -152,6 +129,4 @@ void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw, * Card disable functions *---------------------------------------------------------------*/ - - #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c index f068dd5317a7..9cd028cb2239 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "../pci.h" @@ -125,7 +103,7 @@ bool rtl92cu_phy_bb_config(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL + 1, 0xdb); rtl_write_byte(rtlpriv, REG_RF_CTRL, RF_EN | RF_RSTB | RF_SDMRSTB); rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, FEN_USBA | FEN_USBD | - FEN_BB_GLB_RSTn | FEN_BBRSTB); + FEN_BB_GLB_RSTN | FEN_BBRSTB); regval32 = rtl_read_dword(rtlpriv, 0x87c); rtl_write_dword(rtlpriv, 0x87c, regval32 & (~BIT(31))); rtl_write_byte(rtlpriv, REG_LDOHCI12_CTRL, 0x0f); @@ -143,7 +121,7 @@ bool _rtl92cu_phy_config_mac_with_headerfile(struct ieee80211_hw *hw) u32 arraylength; u32 *ptrarray; - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl819XMACPHY_Array\n"); + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl819XMACPHY_ARRAY\n"); arraylength = rtlphy->hwparam_tables[MAC_REG].length ; ptrarray = rtlphy->hwparam_tables[MAC_REG].pdata; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Img:RTL8192CUMAC_2T_ARRAY\n"); @@ -181,7 +159,7 @@ bool _rtl92cu_phy_config_bb_with_headerfile(struct ieee80211_hw *hw, phy_regarray_table[i + 1]); udelay(1); RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "The phy_regarray_table[0] is %x Rtl819XPHY_REGArray[1] is %x\n", + "The phy_regarray_table[0] is %x Rtl819XPHY_REGARRAY[1] is %x\n", phy_regarray_table[i], phy_regarray_table[i + 1]); } @@ -191,7 +169,7 @@ bool _rtl92cu_phy_config_bb_with_headerfile(struct ieee80211_hw *hw, agctab_array_table[i + 1]); udelay(1); RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "The agctab_array_table[0] is %x Rtl819XPHY_REGArray[1] is %x\n", + "The agctab_array_table[0] is %x Rtl819XPHY_REGARRAY[1] is %x\n", agctab_array_table[i], agctab_array_table[i + 1]); } @@ -214,7 +192,7 @@ bool _rtl92cu_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw, if (configtype == BASEBAND_CONFIG_PHY_REG) { for (i = 0; i < phy_regarray_pg_len; i = i + 3) { rtl_addr_delay(phy_regarray_table_pg[i]); - _rtl92c_store_pwrIndex_diffrate_offset(hw, + _rtl92c_store_pwrindex_diffrate_offset(hw, phy_regarray_table_pg[i], phy_regarray_table_pg[i + 1], phy_regarray_table_pg[i + 2]); @@ -408,14 +386,14 @@ static bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw, if ((ppsc->rfpwr_state == ERFOFF) && RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) { bool rtstatus; - u32 InitializeCount = 0; + u32 init_count = 0; do { - InitializeCount++; + init_count++; RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, "IPS Set eRf nic enable\n"); rtstatus = rtl_ps_enable_nic(hw); - } while (!rtstatus && (InitializeCount < 10)); + } while (!rtstatus && (init_count < 10)); RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); } else { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.h index a422c4db1a41..a3cc980c42fe 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../rtl8192ce/phy.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/reg.h index 8185886daa8e..b4b6cde23341 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/reg.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/reg.h @@ -1,26 +1,4 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../rtl8192ce/reg.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c index cf551785eb08..f3a336e0ea98 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "reg.h" @@ -148,17 +126,17 @@ static void rtl92c_phy_get_power_base(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); - u32 powerBase0, powerBase1; + u32 powerbase0, powerbase1; u8 legacy_pwrdiff = 0, ht20_pwrdiff = 0; u8 i, powerlevel[2]; for (i = 0; i < 2; i++) { powerlevel[i] = ppowerlevel[i]; legacy_pwrdiff = rtlefuse->txpwr_legacyhtdiff[i][channel - 1]; - powerBase0 = powerlevel[i] + legacy_pwrdiff; - powerBase0 = (powerBase0 << 24) | (powerBase0 << 16) | - (powerBase0 << 8) | powerBase0; - *(ofdmbase + i) = powerBase0; + powerbase0 = powerlevel[i] + legacy_pwrdiff; + powerbase0 = (powerbase0 << 24) | (powerbase0 << 16) | + (powerbase0 << 8) | powerbase0; + *(ofdmbase + i) = powerbase0; RTPRINT(rtlpriv, FPHY, PHY_TXPWR, " [OFDM power base index rf(%c) = 0x%x]\n", i == 0 ? 'A' : 'B', *(ofdmbase + i)); @@ -168,10 +146,10 @@ static void rtl92c_phy_get_power_base(struct ieee80211_hw *hw, ht20_pwrdiff = rtlefuse->txpwr_ht20diff[i][channel - 1]; powerlevel[i] += ht20_pwrdiff; } - powerBase1 = powerlevel[i]; - powerBase1 = (powerBase1 << 24) | - (powerBase1 << 16) | (powerBase1 << 8) | powerBase1; - *(mcsbase + i) = powerBase1; + powerbase1 = powerlevel[i]; + powerbase1 = (powerbase1 << 24) | + (powerbase1 << 16) | (powerbase1 << 8) | powerbase1; + *(mcsbase + i) = powerbase1; RTPRINT(rtlpriv, FPHY, PHY_TXPWR, " [MCS power base index rf(%c) = 0x%x]\n", i == 0 ? 'A' : 'B', *(mcsbase + i)); @@ -180,26 +158,26 @@ static void rtl92c_phy_get_power_base(struct ieee80211_hw *hw, static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw, u8 channel, u8 index, - u32 *powerBase0, - u32 *powerBase1, + u32 *powerbase0, + u32 *powerbase1, u32 *p_outwriteval) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u8 i, chnlgroup = 0, pwr_diff_limit[4]; - u32 writeVal, customer_limit, rf; + u32 writeval, customer_limit, rf; for (rf = 0; rf < 2; rf++) { switch (rtlefuse->eeprom_regulatory) { case 0: chnlgroup = 0; - writeVal = rtlphy->mcs_offset + writeval = rtlphy->mcs_offset [chnlgroup][index + (rf ? 8 : 0)] - + ((index < 2) ? powerBase0[rf] : powerBase1[rf]); + + ((index < 2) ? powerbase0[rf] : powerbase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - "RTK better performance,writeVal(%c) = 0x%x\n", - rf == 0 ? 'A' : 'B', writeVal); + "RTK better performance,writeval(%c) = 0x%x\n", + rf == 0 ? 'A' : 'B', writeval); break; case 1: if (rtlphy->pwrgroup_cnt == 1) @@ -217,20 +195,20 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw, else chnlgroup += 4; } - writeVal = rtlphy->mcs_offset[chnlgroup][index + + writeval = rtlphy->mcs_offset[chnlgroup][index + (rf ? 8 : 0)] + - ((index < 2) ? powerBase0[rf] : - powerBase1[rf]); + ((index < 2) ? powerbase0[rf] : + powerbase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - "Realtek regulatory, 20MHz, writeVal(%c) = 0x%x\n", - rf == 0 ? 'A' : 'B', writeVal); + "Realtek regulatory, 20MHz, writeval(%c) = 0x%x\n", + rf == 0 ? 'A' : 'B', writeval); break; case 2: - writeVal = ((index < 2) ? powerBase0[rf] : - powerBase1[rf]); + writeval = ((index < 2) ? powerbase0[rf] : + powerbase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - "Better regulatory,writeVal(%c) = 0x%x\n", - rf == 0 ? 'A' : 'B', writeVal); + "Better regulatory,writeval(%c) = 0x%x\n", + rf == 0 ? 'A' : 'B', writeval); break; case 3: chnlgroup = 0; @@ -275,36 +253,36 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw, RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Customer's limit rf(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', customer_limit); - writeVal = customer_limit + ((index < 2) ? - powerBase0[rf] : powerBase1[rf]); + writeval = customer_limit + ((index < 2) ? + powerbase0[rf] : powerbase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - "Customer, writeVal rf(%c)= 0x%x\n", - rf == 0 ? 'A' : 'B', writeVal); + "Customer, writeval rf(%c)= 0x%x\n", + rf == 0 ? 'A' : 'B', writeval); break; default: chnlgroup = 0; - writeVal = rtlphy->mcs_offset[chnlgroup] + writeval = rtlphy->mcs_offset[chnlgroup] [index + (rf ? 8 : 0)] + ((index < 2) ? - powerBase0[rf] : powerBase1[rf]); + powerbase0[rf] : powerbase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - "RTK better performance, writeValrf(%c) = 0x%x\n", - rf == 0 ? 'A' : 'B', writeVal); + "RTK better performance, writevalrf(%c) = 0x%x\n", + rf == 0 ? 'A' : 'B', writeval); break; } if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_LEVEL1) - writeVal = 0x14141414; + writeval = 0x14141414; else if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_LEVEL2) - writeVal = 0x00000000; + writeval = 0x00000000; if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1) - writeVal = writeVal - 0x06060606; - *(p_outwriteval + rf) = writeVal; + writeval = writeval - 0x06060606; + *(p_outwriteval + rf) = writeval; } } static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw, - u8 index, u32 *pValue) + u8 index, u32 *value) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); @@ -319,33 +297,33 @@ static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw, RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12 }; u8 i, rf, pwr_val[4]; - u32 writeVal; + u32 writeval; u16 regoffset; for (rf = 0; rf < 2; rf++) { - writeVal = pValue[rf]; + writeval = value[rf]; for (i = 0; i < 4; i++) { - pwr_val[i] = (u8)((writeVal & (0x7f << (i * 8))) >> + pwr_val[i] = (u8)((writeval & (0x7f << (i * 8))) >> (i * 8)); if (pwr_val[i] > RF6052_MAX_TX_PWR) pwr_val[i] = RF6052_MAX_TX_PWR; } - writeVal = (pwr_val[3] << 24) | (pwr_val[2] << 16) | + writeval = (pwr_val[3] << 24) | (pwr_val[2] << 16) | (pwr_val[1] << 8) | pwr_val[0]; if (rf == 0) regoffset = regoffset_a[index]; else regoffset = regoffset_b[index]; - rtl_set_bbreg(hw, regoffset, MASKDWORD, writeVal); + rtl_set_bbreg(hw, regoffset, MASKDWORD, writeval); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, - "Set 0x%x = %08x\n", regoffset, writeVal); + "Set 0x%x = %08x\n", regoffset, writeval); if (((get_rf_type(rtlphy) == RF_2T2R) && (regoffset == RTXAGC_A_MCS15_MCS12 || regoffset == RTXAGC_B_MCS15_MCS12)) || ((get_rf_type(rtlphy) != RF_2T2R) && (regoffset == RTXAGC_A_MCS07_MCS04 || regoffset == RTXAGC_B_MCS07_MCS04))) { - writeVal = pwr_val[3]; + writeval = pwr_val[3]; if (regoffset == RTXAGC_A_MCS15_MCS12 || regoffset == RTXAGC_A_MCS07_MCS04) regoffset = 0xc90; @@ -354,13 +332,13 @@ static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw, regoffset = 0xc98; for (i = 0; i < 3; i++) { if (i != 2) - writeVal = (writeVal > 8) ? - (writeVal - 8) : 0; + writeval = (writeval > 8) ? + (writeval - 8) : 0; else - writeVal = (writeVal > 6) ? - (writeVal - 6) : 0; + writeval = (writeval > 6) ? + (writeval - 6) : 0; rtl_write_byte(rtlpriv, (u32)(regoffset + i), - (u8)writeVal); + (u8)writeval); } } } @@ -369,18 +347,18 @@ static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw, void rtl92cu_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, u8 *ppowerlevel, u8 channel) { - u32 writeVal[2], powerBase0[2], powerBase1[2]; + u32 writeval[2], powerbase0[2], powerbase1[2]; u8 index = 0; rtl92c_phy_get_power_base(hw, ppowerlevel, - channel, &powerBase0[0], &powerBase1[0]); + channel, &powerbase0[0], &powerbase1[0]); for (index = 0; index < 6; index++) { _rtl92c_get_txpower_writeval_by_regulatory(hw, channel, index, - &powerBase0[0], - &powerBase1[0], - &writeVal[0]); - _rtl92c_write_ofdm_power_reg(hw, index, &writeVal[0]); + &powerbase0[0], + &powerbase1[0], + &writeval[0]); + _rtl92c_write_ofdm_power_reg(hw, index, &writeval[0]); } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.h index 07aec0b20cc9..2661e5f8f6da 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL92CU_RF_H__ #define __RTL92CU_RF_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c index 43e021b49260..c1c34dca39d2 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "../core.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h index 4ea2cb225580..662440c4cdc9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL92CU_SW_H__ #define __RTL92CU_SW_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.c index b3ac981d88c6..addeac90ee0c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "table.h" @@ -1269,7 +1247,7 @@ u32 RTL8192CUAGCTAB_1TARRAY[RTL8192CUAGCTAB_1TARRAYLENGTH] = { 0xc78, 0x621f001e, }; -u32 RTL8192CUPHY_REG_1T_HPArray[RTL8192CUPHY_REG_1T_HPArrayLength] = { +u32 RTL8192CUPHY_REG_1T_HPARRAY[RTL8192CUPHY_REG_1T_HPARRAYLENGTH] = { 0x024, 0x0011800f, 0x028, 0x00ffdb83, 0x040, 0x000c0004, @@ -1461,7 +1439,7 @@ u32 RTL8192CUPHY_REG_1T_HPArray[RTL8192CUPHY_REG_1T_HPArrayLength] = { 0xf00, 0x00000300, }; -u32 RTL8192CUPHY_REG_Array_PG_HP[RTL8192CUPHY_REG_Array_PG_HPLength] = { +u32 RTL8192CUPHY_REG_ARRAY_PG_HP[RTL8192CUPHY_REG_ARRAY_PG_HPLENGTH] = { 0xe00, 0xffffffff, 0x06080808, 0xe04, 0xffffffff, 0x00040406, 0xe08, 0x0000ff00, 0x00000000, @@ -1576,7 +1554,7 @@ u32 RTL8192CUPHY_REG_Array_PG_HP[RTL8192CUPHY_REG_Array_PG_HPLength] = { 0x868, 0xffffffff, 0x00000000, }; -u32 RTL8192CURadioA_1T_HPArray[RTL8192CURadioA_1T_HPArrayLength] = { +u32 RTL8192CURADIOA_1T_HPARRAY[RTL8192CURADIOA_1T_HPARRAYLENGTH] = { 0x000, 0x00030159, 0x001, 0x00031284, 0x002, 0x00098000, @@ -1720,7 +1698,7 @@ u32 RTL8192CURadioA_1T_HPArray[RTL8192CURadioA_1T_HPArrayLength] = { 0x000, 0x00030159, }; -u32 Rtl8192CUAGCTAB_1T_HPArray[RTL8192CUAGCTAB_1T_HPArrayLength] = { +u32 RTL8192CUAGCTAB_1T_HPARRAY[RTL8192CUAGCTAB_1T_HPARRAYLENGTH] = { 0xc78, 0x7b000001, 0xc78, 0x7b010001, 0xc78, 0x7b020001, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.h index 851bf53d246c..efc89f7db80f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL92CU_TABLE__H_ #define __RTL92CU_TABLE__H_ @@ -53,15 +31,15 @@ extern u32 RTL8192CUAGCTAB_2TARRAY[RTL8192CUAGCTAB_2TARRAYLENGTH]; #define RTL8192CUAGCTAB_1TARRAYLENGTH 320 extern u32 RTL8192CUAGCTAB_1TARRAY[RTL8192CUAGCTAB_1TARRAYLENGTH]; -#define RTL8192CUPHY_REG_1T_HPArrayLength 378 -extern u32 RTL8192CUPHY_REG_1T_HPArray[RTL8192CUPHY_REG_1T_HPArrayLength]; +#define RTL8192CUPHY_REG_1T_HPARRAYLENGTH 378 +extern u32 RTL8192CUPHY_REG_1T_HPARRAY[RTL8192CUPHY_REG_1T_HPARRAYLENGTH]; -#define RTL8192CUPHY_REG_Array_PG_HPLength 336 -extern u32 RTL8192CUPHY_REG_Array_PG_HP[RTL8192CUPHY_REG_Array_PG_HPLength]; +#define RTL8192CUPHY_REG_ARRAY_PG_HPLENGTH 336 +extern u32 RTL8192CUPHY_REG_ARRAY_PG_HP[RTL8192CUPHY_REG_ARRAY_PG_HPLENGTH]; -#define RTL8192CURadioA_1T_HPArrayLength 282 -extern u32 RTL8192CURadioA_1T_HPArray[RTL8192CURadioA_1T_HPArrayLength]; -#define RTL8192CUAGCTAB_1T_HPArrayLength 320 -extern u32 Rtl8192CUAGCTAB_1T_HPArray[RTL8192CUAGCTAB_1T_HPArrayLength]; +#define RTL8192CURADIOA_1T_HPARRAYLENGTH 282 +extern u32 RTL8192CURADIOA_1T_HPARRAY[RTL8192CURADIOA_1T_HPARRAYLENGTH]; +#define RTL8192CUAGCTAB_1T_HPARRAYLENGTH 320 +extern u32 RTL8192CUAGCTAB_1T_HPARRAY[RTL8192CUAGCTAB_1T_HPARRAYLENGTH]; #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c index 9ab56827124e..0020adc004a5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "../usb.h" @@ -36,7 +14,7 @@ #include "trx.h" #include "../rtl8192c/fw_common.h" -static int _ConfigVerTOutEP(struct ieee80211_hw *hw) +static int configvertoutep(struct ieee80211_hw *hw) { u8 ep_cfg, txqsele; u8 ep_nums = 0; @@ -69,7 +47,7 @@ static int _ConfigVerTOutEP(struct ieee80211_hw *hw) return (rtlusb->out_ep_nums == ep_nums) ? 0 : -EINVAL; } -static int _ConfigVerNOutEP(struct ieee80211_hw *hw) +static int configvernoutep(struct ieee80211_hw *hw) { u8 ep_cfg; u8 ep_nums = 0; @@ -98,7 +76,7 @@ static int _ConfigVerNOutEP(struct ieee80211_hw *hw) return (rtlusb->out_ep_nums == ep_nums) ? 0 : -EINVAL; } -static void _TwoOutEpMapping(struct ieee80211_hw *hw, bool bIsChipB, +static void twooutepmapping(struct ieee80211_hw *hw, bool is_chip8, bool bwificfg, struct rtl_ep_map *ep_map) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -126,10 +104,11 @@ static void _TwoOutEpMapping(struct ieee80211_hw *hw, bool bIsChipB, } } -static void _ThreeOutEpMapping(struct ieee80211_hw *hw, bool bwificfg, +static void threeoutepmapping(struct ieee80211_hw *hw, bool bwificfg, struct rtl_ep_map *ep_map) { struct rtl_priv *rtlpriv = rtl_priv(hw); + if (bwificfg) { /* for WMM */ RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "USB 3EP Setting for WMM.....\n"); @@ -153,7 +132,7 @@ static void _ThreeOutEpMapping(struct ieee80211_hw *hw, bool bwificfg, } } -static void _OneOutEpMapping(struct ieee80211_hw *hw, struct rtl_ep_map *ep_map) +static void oneoutepmapping(struct ieee80211_hw *hw, struct rtl_ep_map *ep_map) { ep_map->ep_mapping[RTL_TXQ_BE] = 2; ep_map->ep_mapping[RTL_TXQ_BK] = 2; @@ -163,30 +142,31 @@ static void _OneOutEpMapping(struct ieee80211_hw *hw, struct rtl_ep_map *ep_map) ep_map->ep_mapping[RTL_TXQ_BCN] = 2; ep_map->ep_mapping[RTL_TXQ_HI] = 2; } + static int _out_ep_mapping(struct ieee80211_hw *hw) { int err = 0; - bool bIsChipN, bwificfg = false; + bool ischipn, bwificfg = false; struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw); struct rtl_usb *rtlusb = rtl_usbdev(usb_priv); struct rtl_ep_map *ep_map = &(rtlusb->ep_map); - bIsChipN = IS_NORMAL_CHIP(rtlhal->version); + ischipn = IS_NORMAL_CHIP(rtlhal->version); switch (rtlusb->out_ep_nums) { case 2: - _TwoOutEpMapping(hw, bIsChipN, bwificfg, ep_map); + twooutepmapping(hw, ischipn, bwificfg, ep_map); break; case 3: /* Test chip doesn't support three out EPs. */ - if (!bIsChipN) { + if (!ischipn) { err = -EINVAL; goto err_out; } - _ThreeOutEpMapping(hw, bIsChipN, ep_map); + threeoutepmapping(hw, ischipn, ep_map); break; case 1: - _OneOutEpMapping(hw, ep_map); + oneoutepmapping(hw, ep_map); break; default: err = -EINVAL; @@ -196,15 +176,17 @@ err_out: return err; } + /* endpoint mapping */ int rtl8192cu_endpoint_mapping(struct ieee80211_hw *hw) { struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); int error = 0; + if (likely(IS_NORMAL_CHIP(rtlhal->version))) - error = _ConfigVerNOutEP(hw); + error = configvernoutep(hw); else - error = _ConfigVerTOutEP(hw); + error = configvertoutep(hw); if (error) goto err_out; error = _out_ep_mapping(hw); @@ -320,7 +302,7 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw, stats->isfirst_ampdu = (bool)((GET_RX_DESC_PAGGR(pdesc) == 1) && (GET_RX_DESC_FAGGR(pdesc) == 1)); stats->timestamp_low = GET_RX_DESC_TSFL(pdesc); - stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc); + stats->rx_is40mhzpacket = (bool)GET_RX_DESC_BW(pdesc); stats->is_ht = (bool)GET_RX_DESC_RX_HT(pdesc); rx_status->freq = hw->conf.chandef.chan->center_freq; rx_status->band = hw->conf.chandef.chan->band; @@ -387,7 +369,7 @@ static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb) stats.isampdu = (bool) ((GET_RX_DESC_PAGGR(rxdesc) == 1) && (GET_RX_DESC_FAGGR(rxdesc) == 1)); stats.timestamp_low = GET_RX_DESC_TSFL(rxdesc); - stats.rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(rxdesc); + stats.rx_is40mhzpacket = (bool)GET_RX_DESC_BW(rxdesc); stats.is_ht = (bool)GET_RX_DESC_RX_HT(rxdesc); /* TODO: is center_freq changed when doing scan? */ /* TODO: Shall we add protection or just skip those two step? */ @@ -464,6 +446,7 @@ static void _rtl_fill_usb_tx_desc(u8 *txdesc) SET_TX_DESC_LAST_SEG(txdesc, 1); SET_TX_DESC_FIRST_SEG(txdesc, 1); } + /** * For HW recovery information */ @@ -553,11 +536,13 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw, sta = ieee80211_find_sta(mac->vif, mac->bssid); if (sta) { u8 ampdu_density = sta->ht_cap.ampdu_density; + SET_TX_DESC_AMPDU_DENSITY(txdesc, ampdu_density); } rcu_read_unlock(); if (info->control.hw_key) { struct ieee80211_key_conf *keyconf = info->control.hw_key; + switch (keyconf->cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: @@ -610,28 +595,28 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "==>\n"); } -void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc, - u32 buffer_len, bool bIsPsPoll) +void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 *pdesc, + u32 buffer_len, bool is_pspoll) { /* Clear all status */ - memset(pDesc, 0, RTL_TX_HEADER_SIZE); - SET_TX_DESC_FIRST_SEG(pDesc, 1); /* bFirstSeg; */ - SET_TX_DESC_LAST_SEG(pDesc, 1); /* bLastSeg; */ - SET_TX_DESC_OFFSET(pDesc, RTL_TX_HEADER_SIZE); /* Offset = 32 */ - SET_TX_DESC_PKT_SIZE(pDesc, buffer_len); /* Buffer size + command hdr */ - SET_TX_DESC_QUEUE_SEL(pDesc, QSLT_MGNT); /* Fixed queue of Mgnt queue */ + memset(pdesc, 0, RTL_TX_HEADER_SIZE); + SET_TX_DESC_FIRST_SEG(pdesc, 1); /* bFirstSeg; */ + SET_TX_DESC_LAST_SEG(pdesc, 1); /* bLastSeg; */ + SET_TX_DESC_OFFSET(pdesc, RTL_TX_HEADER_SIZE); /* Offset = 32 */ + SET_TX_DESC_PKT_SIZE(pdesc, buffer_len); /* Buffer size + command hdr */ + SET_TX_DESC_QUEUE_SEL(pdesc, QSLT_MGNT); /* Fixed queue of Mgnt queue */ /* Set NAVUSEHDR to prevent Ps-poll AId filed to be changed to error * vlaue by Hw. */ - if (bIsPsPoll) { - SET_TX_DESC_NAV_USE_HDR(pDesc, 1); + if (is_pspoll) { + SET_TX_DESC_NAV_USE_HDR(pdesc, 1); } else { - SET_TX_DESC_HWSEQ_EN(pDesc, 1); /* Hw set sequence number */ - SET_TX_DESC_PKT_ID(pDesc, 0x100); /* set bit3 to 1. */ + SET_TX_DESC_HWSEQ_EN(pdesc, 1); /* Hw set sequence number */ + SET_TX_DESC_PKT_ID(pdesc, 0x100); /* set bit3 to 1. */ } - SET_TX_DESC_USE_RATE(pDesc, 1); /* use data rate which is set by Sw */ - SET_TX_DESC_OWN(pDesc, 1); - SET_TX_DESC_TX_RATE(pDesc, DESC_RATE1M); - _rtl_tx_desc_checksum(pDesc); + SET_TX_DESC_USE_RATE(pdesc, 1); /* use data rate which is set by Sw */ + SET_TX_DESC_OWN(pdesc, 1); + SET_TX_DESC_TX_RATE(pdesc, DESC_RATE1M); + _rtl_tx_desc_checksum(pdesc); } void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h index 15a66c547287..ae2e8aa212de 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL92CU_TRX_H__ #define __RTL92CU_TRX_H__ @@ -220,7 +198,6 @@ struct rx_drv_info_92c { #define SET_TX_DESC_OWN(__txdesc, __value) \ SET_BITS_TO_LE_4BYTE(__txdesc, 31, 1, __value) - /* Dword 1 */ #define SET_TX_DESC_MACID(__txdesc, __value) \ SET_BITS_TO_LE_4BYTE(__txdesc + 4, 0, 5, __value) @@ -377,7 +354,6 @@ struct rx_drv_info_92c { #define SET_TX_DESC_MCSG15_MAX_LEN(__txdesc, __value) \ SET_BITS_TO_LE_4BYTE(__txdesc + 28, 28, 4, __value) - int rtl8192cu_endpoint_mapping(struct ieee80211_hw *hw); u16 rtl8192cu_mq_to_hwq(__le16 fc, u16 mac80211_queue_index); bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw, @@ -397,8 +373,8 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 queue_index, struct rtl_tcb_desc *tcb_desc); -void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc, - u32 buffer_len, bool bIsPsPoll); +void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 *pdesc, + u32 buffer_len, bool ispspoll); void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool b_firstseg, bool b_lastseg, struct sk_buff *skb); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h index cb7b9b727e3a..fa33b05db052 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL92D_DEF_H__ #define __RTL92D_DEF_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c index ac6d554b67c8..7cc86bb387a1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "../base.h" @@ -864,7 +842,7 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter( else rf = 1; if (thermalvalue) { - ele_d = rtl_get_bbreg(hw, ROFDM0_XATxIQIMBALANCE, + ele_d = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD) & MASKOFDM_D; for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) { if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) { @@ -872,13 +850,13 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter( RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index=0x%x\n", - ROFDM0_XATxIQIMBALANCE, + ROFDM0_XATXIQIMBALANCE, ele_d, ofdm_index_old[0]); break; } } if (is2t) { - ele_d = rtl_get_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, + ele_d = rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, MASKDWORD) & MASKOFDM_D; for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) { if (ele_d == @@ -887,7 +865,7 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter( RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "Initial pathB ele_d reg 0x%x = 0x%lx, ofdm_index = 0x%x\n", - ROFDM0_XBTxIQIMBALANCE, ele_d, + ROFDM0_XBTXIQIMBALANCE, ele_d, ofdm_index_old[1]); break; } @@ -1059,11 +1037,11 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter( * regC94, element B is always 0 */ value32 = (ele_d << 22) | ((ele_c & 0x3F) << 16) | ele_a; - rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, + rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD, value32); value32 = (ele_c & 0x000003C0) >> 6; - rtl_set_bbreg(hw, ROFDM0_XCTxAFE, MASKH4BITS, + rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, value32); value32 = ((val_x * ele_d) >> 7) & 0x01; @@ -1071,11 +1049,11 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter( value32); } else { - rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, + rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD, ofdmswing_table [(u8)ofdm_index[0]]); - rtl_set_bbreg(hw, ROFDM0_XCTxAFE, MASKH4BITS, + rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, 0x00); rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24), 0x00); @@ -1172,21 +1150,21 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter( ((ele_c & 0x3F) << 16) | ele_a; rtl_set_bbreg(hw, - ROFDM0_XBTxIQIMBALANCE, + ROFDM0_XBTXIQIMBALANCE, MASKDWORD, value32); value32 = (ele_c & 0x000003C0) >> 6; - rtl_set_bbreg(hw, ROFDM0_XDTxAFE, + rtl_set_bbreg(hw, ROFDM0_XDTXAFE, MASKH4BITS, value32); value32 = ((val_x * ele_d) >> 7) & 0x01; rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(28), value32); } else { rtl_set_bbreg(hw, - ROFDM0_XBTxIQIMBALANCE, + ROFDM0_XBTXIQIMBALANCE, MASKDWORD, ofdmswing_table [(u8) ofdm_index[1]]); - rtl_set_bbreg(hw, ROFDM0_XDTxAFE, + rtl_set_bbreg(hw, ROFDM0_XDTXAFE, MASKH4BITS, 0x00); rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(28), 0x00); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h index 5d346ec366ce..939cc45bfebd 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL92C_DM_H__ #define __RTL92C_DM_H__ @@ -44,7 +22,7 @@ #define DM_DIG_FA_TH1 0x400 #define DM_DIG_FA_TH2 0x600 -#define RXPATHSELECTION_SS_TH_lOW 30 +#define RXPATHSELECTION_SS_TH_LOW 30 #define RXPATHSELECTION_DIFF_TH 18 #define DM_RATR_STA_INIT 0 diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c index 75bfa9dfef4a..2064813f9381 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "../pci.h" @@ -97,7 +75,7 @@ static int _rtl92d_fw_free_to_go(struct ieee80211_hw *hw) do { value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); } while ((counter++ < FW_8192D_POLLING_TIMEOUT_COUNT) && - (!(value32 & FWDL_ChkSum_rpt))); + (!(value32 & FWDL_CHKSUM_RPT))); if (counter >= FW_8192D_POLLING_TIMEOUT_COUNT) { pr_err("chksum report fail! REG_MCUFWDL:0x%08x\n", value32); @@ -621,7 +599,7 @@ void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished) struct sk_buff *skb = NULL; u32 totalpacketlen; bool rtstatus; - u8 u1RsvdPageLoc[3] = { 0 }; + u8 u1rsvdpageloc[3] = { 0 }; bool dlok = false; u8 *beacon; u8 *p_pspoll; @@ -640,7 +618,7 @@ void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished) SET_80211_PS_POLL_AID(p_pspoll, (mac->assoc_id | 0xc000)); SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid); SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr); - SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1RsvdPageLoc, PSPOLL_PG); + SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1rsvdpageloc, PSPOLL_PG); /*-------------------------------------------------------- (3) null data ---------------------------------------------------------*/ @@ -648,7 +626,7 @@ void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished) SET_80211_HDR_ADDRESS1(nullfunc, mac->bssid); SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr); SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid); - SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1RsvdPageLoc, NULL_PG); + SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1rsvdpageloc, NULL_PG); /*--------------------------------------------------------- (4) probe response ----------------------------------------------------------*/ @@ -656,14 +634,14 @@ void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished) SET_80211_HDR_ADDRESS1(p_probersp, mac->bssid); SET_80211_HDR_ADDRESS2(p_probersp, mac->mac_addr); SET_80211_HDR_ADDRESS3(p_probersp, mac->bssid); - SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1RsvdPageLoc, PROBERSP_PG); + SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1rsvdpageloc, PROBERSP_PG); totalpacketlen = TOTAL_RESERVED_PKT_LEN; RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, "rtl92d_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL", &reserved_page_packet[0], totalpacketlen); RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, "rtl92d_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL", - u1RsvdPageLoc, 3); + u1rsvdpageloc, 3); skb = dev_alloc_skb(totalpacketlen); if (!skb) { dlok = false; @@ -678,9 +656,9 @@ void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished) RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Set RSVD page location to Fw\n"); RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, - "H2C_RSVDPAGE", u1RsvdPageLoc, 3); + "H2C_RSVDPAGE", u1rsvdpageloc, 3); rtl92d_fill_h2c_cmd(hw, H2C_RSVDPAGE, - sizeof(u1RsvdPageLoc), u1RsvdPageLoc); + sizeof(u1rsvdpageloc), u1rsvdpageloc); } else RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "Set RSVD page location to Fw FAIL!!!!!!\n"); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h index 6b435236a28e..bd8ea6d66dff 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL92D__FW__H__ #define __RTL92D__FW__H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c index 80123fd97221..c7f29a9be50d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "../efuse.h" @@ -124,12 +102,12 @@ void rtl92de_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) *((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state; break; case HW_VAR_FWLPS_RF_ON:{ - enum rf_pwrstate rfState; + enum rf_pwrstate rfstate; u32 val_rcr; rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE, - (u8 *) (&rfState)); - if (rfState == ERFOFF) { + (u8 *)(&rfstate)); + if (rfstate == ERFOFF) { *((bool *) (val)) = true; } else { val_rcr = rtl_read_dword(rtlpriv, REG_RCR); @@ -280,23 +258,23 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) } case HW_VAR_AMPDU_FACTOR: { u8 factor_toset; - u32 regtoSet; + u32 regtoset; u8 *ptmp_byte = NULL; u8 index; if (rtlhal->macphymode == DUALMAC_DUALPHY) - regtoSet = 0xb9726641; + regtoset = 0xb9726641; else if (rtlhal->macphymode == DUALMAC_SINGLEPHY) - regtoSet = 0x66626641; + regtoset = 0x66626641; else - regtoSet = 0xb972a841; + regtoset = 0xb972a841; factor_toset = *val; if (factor_toset <= 3) { factor_toset = (1 << (factor_toset + 2)); if (factor_toset > 0xf) factor_toset = 0xf; for (index = 0; index < 4; index++) { - ptmp_byte = (u8 *) (®toSet) + index; + ptmp_byte = (u8 *)(®toset) + index; if ((*ptmp_byte & 0xf0) > (factor_toset << 4)) *ptmp_byte = (*ptmp_byte & 0x0f) @@ -305,7 +283,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) *ptmp_byte = (*ptmp_byte & 0xf0) | (factor_toset); } - rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, regtoSet); + rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, regtoset); RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, "Set HW_VAR_AMPDU_FACTOR: %#x\n", factor_toset); @@ -531,18 +509,18 @@ static bool _rtl92de_llt_table_init(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); unsigned short i; u8 txpktbuf_bndy; - u8 maxPage; + u8 maxpage; bool status; u32 value32; /* High+low page number */ u8 value8; /* normal page number */ if (rtlpriv->rtlhal.macphymode == SINGLEMAC_SINGLEPHY) { - maxPage = 255; + maxpage = 255; txpktbuf_bndy = 246; value8 = 0; value32 = 0x80bf0d29; } else { - maxPage = 127; + maxpage = 127; txpktbuf_bndy = 123; value8 = 0; value32 = 0x80750005; @@ -598,14 +576,14 @@ static bool _rtl92de_llt_table_init(struct ieee80211_hw *hw) /* This ring buffer is used as beacon buffer if we */ /* config this MAC as two MAC transfer. */ /* Otherwise used as local loopback buffer. */ - for (i = txpktbuf_bndy; i < maxPage; i++) { + for (i = txpktbuf_bndy; i < maxpage; i++) { status = _rtl92de_llt_write(hw, i, (i + 1)); if (true != status) return status; } /* Let last entry point to the start entry of ring buffer */ - status = _rtl92de_llt_write(hw, maxPage, txpktbuf_bndy); + status = _rtl92de_llt_write(hw, maxpage, txpktbuf_bndy); if (true != status) return status; @@ -1415,13 +1393,13 @@ void rtl92de_update_interrupt_mask(struct ieee80211_hw *hw, } static void _rtl92de_readpowervalue_fromprom(struct txpower_info *pwrinfo, - u8 *rom_content, bool autoLoadfail) + u8 *rom_content, bool autoloadfail) { u32 rfpath, eeaddr, group, offset1, offset2; u8 i; memset(pwrinfo, 0, sizeof(struct txpower_info)); - if (autoLoadfail) { + if (autoloadfail) { for (group = 0; group < CHANNEL_GROUP_MAX; group++) { for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) { if (group < CHANNEL_GROUP_MAX_2G) { @@ -1563,7 +1541,7 @@ static void _rtl92de_read_txpower_info(struct ieee80211_hw *hw, struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct txpower_info pwrinfo; u8 tempval[2], i, pwr, diff; - u32 ch, rfPath, group; + u32 ch, rfpath, group; _rtl92de_readpowervalue_fromprom(&pwrinfo, hwinfo, autoload_fail); if (!autoload_fail) { @@ -1643,25 +1621,25 @@ static void _rtl92de_read_txpower_info(struct ieee80211_hw *hw, "Delta_IQK = 0x%x Delta_LCK = 0x%x\n", rtlefuse->delta_iqk, rtlefuse->delta_lck); - for (rfPath = 0; rfPath < RF6052_MAX_PATH; rfPath++) { + for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) { for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++) { group = rtl92d_get_chnlgroup_fromarray((u8) ch); if (ch < CHANNEL_MAX_NUMBER_2G) - rtlefuse->txpwrlevel_cck[rfPath][ch] = - pwrinfo.cck_index[rfPath][group]; - rtlefuse->txpwrlevel_ht40_1s[rfPath][ch] = - pwrinfo.ht40_1sindex[rfPath][group]; - rtlefuse->txpwr_ht20diff[rfPath][ch] = - pwrinfo.ht20indexdiff[rfPath][group]; - rtlefuse->txpwr_legacyhtdiff[rfPath][ch] = - pwrinfo.ofdmindexdiff[rfPath][group]; - rtlefuse->pwrgroup_ht20[rfPath][ch] = - pwrinfo.ht20maxoffset[rfPath][group]; - rtlefuse->pwrgroup_ht40[rfPath][ch] = - pwrinfo.ht40maxoffset[rfPath][group]; - pwr = pwrinfo.ht40_1sindex[rfPath][group]; - diff = pwrinfo.ht40_2sindexdiff[rfPath][group]; - rtlefuse->txpwrlevel_ht40_2s[rfPath][ch] = + rtlefuse->txpwrlevel_cck[rfpath][ch] = + pwrinfo.cck_index[rfpath][group]; + rtlefuse->txpwrlevel_ht40_1s[rfpath][ch] = + pwrinfo.ht40_1sindex[rfpath][group]; + rtlefuse->txpwr_ht20diff[rfpath][ch] = + pwrinfo.ht20indexdiff[rfpath][group]; + rtlefuse->txpwr_legacyhtdiff[rfpath][ch] = + pwrinfo.ofdmindexdiff[rfpath][group]; + rtlefuse->pwrgroup_ht20[rfpath][ch] = + pwrinfo.ht20maxoffset[rfpath][group]; + rtlefuse->pwrgroup_ht40[rfpath][ch] = + pwrinfo.ht40maxoffset[rfpath][group]; + pwr = pwrinfo.ht40_1sindex[rfpath][group]; + diff = pwrinfo.ht40_2sindexdiff[rfpath][group]; + rtlefuse->txpwrlevel_ht40_2s[rfpath][ch] = (pwr > diff) ? (pwr - diff) : 0; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h index e6c702e69ecf..ea495216d394 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL92DE_HW_H__ #define __RTL92DE_HW_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c index 8851038c9eba..2b76a025deb8 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "../pci.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.h index 9874519704d3..7599c7e5ecc3 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL92CE_LED_H__ #define __RTL92CE_LED_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c index de98d88199d6..0ae6371b6318 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "../pci.h" @@ -506,16 +484,16 @@ static void _rtl92d_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw) rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE; /* Tx AFE control 1 */ - rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbal = ROFDM0_XATxIQIMBALANCE; - rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbal = ROFDM0_XBTxIQIMBALANCE; - rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbal = ROFDM0_XCTxIQIMBALANCE; - rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbal = ROFDM0_XDTxIQIMBALANCE; + rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbal = ROFDM0_XATXIQIMBALANCE; + rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbal = ROFDM0_XBTXIQIMBALANCE; + rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbal = ROFDM0_XCTXIQIMBALANCE; + rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbal = ROFDM0_XDTXIQIMBALANCE; /* Tx AFE control 2 */ - rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATxAFE; - rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTxAFE; - rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTxAFE; - rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTxAFE; + rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE; + rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE; + rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE; + rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE; /* Tranceiver LSSI Readback SI mode */ rtlphy->phyreg_def[RF90_PATH_A].rf_rb = RFPGA0_XA_LSSIREADBACK; @@ -764,7 +742,7 @@ bool rtl92d_phy_bb_config(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, REG_RF_CTRL, value | RF_EN | RF_RSTB | RF_SDMRSTB); rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, FEN_PPLL | FEN_PCIEA | - FEN_DIO_PCIE | FEN_BB_GLB_RSTn | FEN_BBRSTB); + FEN_DIO_PCIE | FEN_BB_GLB_RSTN | FEN_BBRSTB); rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL + 1, 0x80); if (!(IS_92D_SINGLEPHY(rtlpriv->rtlhal.version))) { regvaldw = rtl_read_dword(rtlpriv, REG_LEDCFG0); @@ -1480,11 +1458,11 @@ static u8 _rtl92d_phy_patha_iqk_5g_normal(struct ieee80211_hw *hw, u8 result = 0; u8 i; u8 retrycount = 2; - u32 TxOKBit = BIT(28), RxOKBit = BIT(27); + u32 TXOKBIT = BIT(28), RXOKBIT = BIT(27); if (rtlhal->interfaceindex == 1) { /* PHY1 */ - TxOKBit = BIT(31); - RxOKBit = BIT(30); + TXOKBIT = BIT(31); + RXOKBIT = BIT(30); } RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path A IQK!\n"); /* path-A IQK setting */ @@ -1526,7 +1504,7 @@ static u8 _rtl92d_phy_patha_iqk_5g_normal(struct ieee80211_hw *hw, RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe9c = 0x%x\n", rege9c); regea4 = rtl_get_bbreg(hw, 0xea4, MASKDWORD); RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xea4 = 0x%x\n", regea4); - if (!(regeac & TxOKBit) && + if (!(regeac & TXOKBIT) && (((rege94 & 0x03FF0000) >> 16) != 0x142)) { result |= 0x01; } else { /* if Tx not OK, ignore Rx */ @@ -1536,7 +1514,7 @@ static u8 _rtl92d_phy_patha_iqk_5g_normal(struct ieee80211_hw *hw, } /* if Tx is OK, check whether Rx is OK */ - if (!(regeac & RxOKBit) && + if (!(regeac & RXOKBIT) && (((regea4 & 0x03FF0000) >> 16) != 0x132)) { result |= 0x02; break; @@ -2165,7 +2143,7 @@ static void _rtl92d_phy_patha_fill_iqk_matrix(struct ieee80211_hw *hw, if (final_candidate == 0xFF) { return; } else if (iqk_ok) { - oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATxIQIMBALANCE, + oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD) >> 22) & 0x3FF; /* OFDM0_D */ val_x = result[final_candidate][0]; if ((val_x & 0x00000200) != 0) @@ -2174,7 +2152,7 @@ static void _rtl92d_phy_patha_fill_iqk_matrix(struct ieee80211_hw *hw, RTPRINT(rtlpriv, FINIT, INIT_IQK, "X = 0x%x, tx0_a = 0x%x, oldval_0 0x%x\n", val_x, tx0_a, oldval_0); - rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, 0x3FF, tx0_a); + rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x3FF, tx0_a); rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24), ((val_x * oldval_0 >> 7) & 0x1)); val_y = result[final_candidate][1]; @@ -2188,15 +2166,15 @@ static void _rtl92d_phy_patha_fill_iqk_matrix(struct ieee80211_hw *hw, RTPRINT(rtlpriv, FINIT, INIT_IQK, "Y = 0x%lx, tx0_c = 0x%lx\n", val_y, tx0_c); - rtl_set_bbreg(hw, ROFDM0_XCTxAFE, 0xF0000000, + rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000, ((tx0_c & 0x3C0) >> 6)); - rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, 0x003F0000, + rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x003F0000, (tx0_c & 0x3F)); if (is2t) rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(26), ((val_y * oldval_0 >> 7) & 0x1)); RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xC80 = 0x%x\n", - rtl_get_bbreg(hw, ROFDM0_XATxIQIMBALANCE, + rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD)); if (txonly) { RTPRINT(rtlpriv, FINIT, INIT_IQK, "only Tx OK\n"); @@ -2224,7 +2202,7 @@ static void _rtl92d_phy_pathb_fill_iqk_matrix(struct ieee80211_hw *hw, if (final_candidate == 0xFF) { return; } else if (iqk_ok) { - oldval_1 = (rtl_get_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, + oldval_1 = (rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, MASKDWORD) >> 22) & 0x3FF; val_x = result[final_candidate][4]; if ((val_x & 0x00000200) != 0) @@ -2232,7 +2210,7 @@ static void _rtl92d_phy_pathb_fill_iqk_matrix(struct ieee80211_hw *hw, tx1_a = (val_x * oldval_1) >> 8; RTPRINT(rtlpriv, FINIT, INIT_IQK, "X = 0x%x, tx1_a = 0x%x\n", val_x, tx1_a); - rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, 0x3FF, tx1_a); + rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x3FF, tx1_a); rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(28), ((val_x * oldval_1 >> 7) & 0x1)); val_y = result[final_candidate][5]; @@ -2243,9 +2221,9 @@ static void _rtl92d_phy_pathb_fill_iqk_matrix(struct ieee80211_hw *hw, tx1_c = (val_y * oldval_1) >> 8; RTPRINT(rtlpriv, FINIT, INIT_IQK, "Y = 0x%lx, tx1_c = 0x%lx\n", val_y, tx1_c); - rtl_set_bbreg(hw, ROFDM0_XDTxAFE, 0xF0000000, + rtl_set_bbreg(hw, ROFDM0_XDTXAFE, 0xF0000000, ((tx1_c & 0x3C0) >> 6)); - rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, 0x003F0000, + rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x003F0000, (tx1_c & 0x3F)); rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(30), ((val_y * oldval_1 >> 7) & 0x1)); @@ -3086,13 +3064,13 @@ bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw, if ((ppsc->rfpwr_state == ERFOFF) && RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) { bool rtstatus; - u32 InitializeCount = 0; + u32 initializecount = 0; do { - InitializeCount++; + initializecount++; RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, "IPS Set eRf nic enable\n"); rtstatus = rtl_ps_enable_nic(hw); - } while (!rtstatus && (InitializeCount < 10)); + } while (!rtstatus && (initializecount < 10)); RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); @@ -3387,9 +3365,9 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw) /* 5G LAN ON */ rtl_set_bbreg(hw, 0xB30, 0x00F00000, 0xa); /* TX BB gain shift*1,Just for testchip,0xc80,0xc88 */ - rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, MASKDWORD, + rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD, 0x40000100); - rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, MASKDWORD, + rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, MASKDWORD, 0x40000100); if (rtlhal->macphymode == DUALMAC_DUALPHY) { rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, @@ -3443,16 +3421,16 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw) rtl_set_bbreg(hw, 0xB30, 0x00F00000, 0x0); /* TX BB gain shift,Just for testchip,0xc80,0xc88 */ if (rtlefuse->internal_pa_5g[0]) - rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, MASKDWORD, + rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD, 0x2d4000b5); else - rtl_set_bbreg(hw, ROFDM0_XATxIQIMBALANCE, MASKDWORD, + rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD, 0x20000080); if (rtlefuse->internal_pa_5g[1]) - rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, MASKDWORD, + rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, MASKDWORD, 0x2d4000b5); else - rtl_set_bbreg(hw, ROFDM0_XBTxIQIMBALANCE, MASKDWORD, + rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, MASKDWORD, 0x20000080); if (rtlhal->macphymode == DUALMAC_DUALPHY) { rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, @@ -3481,10 +3459,10 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw) /* update IQK related settings */ rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, MASKDWORD, 0x40000100); rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, MASKDWORD, 0x40000100); - rtl_set_bbreg(hw, ROFDM0_XCTxAFE, 0xF0000000, 0x00); + rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000, 0x00); rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(30) | BIT(28) | BIT(26) | BIT(24), 0x00); - rtl_set_bbreg(hw, ROFDM0_XDTxAFE, 0xF0000000, 0x00); + rtl_set_bbreg(hw, ROFDM0_XDTXAFE, 0xF0000000, 0x00); rtl_set_bbreg(hw, 0xca0, 0xF0000000, 0x00); rtl_set_bbreg(hw, ROFDM0_AGCRSSITABLE, 0x0000F000, 0x00); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h index 58b56b523dbe..8d07c783a023 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL92D_PHY_H__ #define __RTL92D_PHY_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h index d4c4e76a9244..2783d7e7b227 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL92D_REG_H__ #define __RTL92D_REG_H__ @@ -752,7 +730,7 @@ /* SYS_FUNC_EN */ #define FEN_BBRSTB BIT(0) -#define FEN_BB_GLB_RSTn BIT(1) +#define FEN_BB_GLB_RSTN BIT(1) #define FEN_USBA BIT(2) #define FEN_UPLL BIT(3) #define FEN_USBD BIT(4) @@ -773,7 +751,7 @@ #define PFM_ALDN BIT(1) #define PFM_LDKP BIT(2) #define PFM_WOWL BIT(3) -#define EnPDN BIT(4) +#define ENPDN BIT(4) #define PDN_PL BIT(5) #define APFM_ONMAC BIT(8) #define APFM_OFF BIT(9) @@ -910,7 +888,7 @@ /* MCUFWDL */ #define MCUFWDL_EN BIT(0) #define MCUFWDL_RDY BIT(1) -#define FWDL_ChkSum_rpt BIT(2) +#define FWDL_CHKSUM_RPT BIT(2) #define MACINI_RDY BIT(3) #define BBINI_RDY BIT(4) #define RFINI_RDY BIT(5) @@ -1033,7 +1011,7 @@ #define RFPGA0_XA_LSSIPARAMETER 0x840 #define RFPGA0_XB_LSSIPARAMETER 0x844 -#define RFPGA0_RFWAkEUPPARAMETER 0x850 +#define RFPGA0_RFWAKEUPPARAMETER 0x850 #define RFPGA0_RFSLEEPUPPARAMETER 0x854 #define RFPGA0_XAB_SWITCHCONTROL 0x858 @@ -1135,14 +1113,14 @@ #define ROFDM0_AGCRSSITABLE 0xc78 #define ROFDM0_HTSTFAGC 0xc7c -#define ROFDM0_XATxIQIMBALANCE 0xc80 -#define ROFDM0_XATxAFE 0xc84 -#define ROFDM0_XBTxIQIMBALANCE 0xc88 -#define ROFDM0_XBTxAFE 0xc8c -#define ROFDM0_XCTxIQIMBALANCE 0xc90 -#define ROFDM0_XCTxAFE 0xc94 -#define ROFDM0_XDTxIQIMBALANCE 0xc98 -#define ROFDM0_XDTxAFE 0xc9c +#define ROFDM0_XATXIQIMBALANCE 0xc80 +#define ROFDM0_XATXAFE 0xc84 +#define ROFDM0_XBTXIQIMBALANCE 0xc88 +#define ROFDM0_XBTXAFE 0xc8c +#define ROFDM0_XCTXIQIMBALANCE 0xc90 +#define ROFDM0_XCTXAFE 0xc94 +#define ROFDM0_XDTXIQIMBALANCE 0xc98 +#define ROFDM0_XDTXAFE 0xc9c #define ROFDM0_RXHPPARAMETER 0xce0 #define ROFDM0_TXPSEUDONOISEWGT 0xce4 @@ -1186,7 +1164,7 @@ #define ROFDM_AGCREPORT 0xdd0 #define ROFDM_RXSNR 0xdd4 #define ROFDM_RXEVMCSI 0xdd8 -#define ROFDM_SIGReport 0xddc +#define ROFDM_SIGREPORT 0xddc /* 8. PageE(0xE00) */ #define RTXAGC_A_RATE18_06 0xe00 @@ -1228,7 +1206,7 @@ #define RF_IPA 0x15 #define RF_POW_ABILITY 0x17 #define RF_MODE_AG 0x18 -#define rRfChannel 0x18 +#define rfchannel 0x18 #define RF_CHNLBW 0x18 #define RF_TOP 0x19 diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c index 021d3c538ac2..915a36f7af5e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "reg.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h index c650a8dcdb26..4e646cc9ebc0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL92D_RF_H__ #define __RTL92D_RF_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c index d5ba2bace79b..99e5cd9a5c86 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "../core.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.h index fd7d036e9abc..19db4ce30e44 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL92DE_SW_H__ #define __RTL92DE_SW_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.c index 4badb183cf35..9b35c65d91f2 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.c @@ -1,28 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - * Created on 2010/12/23, 6:38 - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.h index 7fefc483ec28..2a45082f441f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.h @@ -1,28 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - * Created on 2010/ 5/18, 1:41 - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL92DE_TABLE__H_ #define __RTL92DE_TABLE__H_ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c index d7b023cf7400..d162884a9e00 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "../pci.h" @@ -494,7 +472,7 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats, stats->isfirst_ampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1) && (GET_RX_DESC_FAGGR(pdesc) == 1)); stats->timestamp_low = GET_RX_DESC_TSFL(pdesc); - stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc); + stats->rx_is40mhzpacket = (bool)GET_RX_DESC_BW(pdesc); stats->is_ht = (bool)GET_RX_DESC_RXHT(pdesc); rx_status->freq = hw->conf.chandef.chan->center_freq; rx_status->band = hw->conf.chandef.chan->band; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h index f7f776539438..36820070fd76 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL92DE_TRX_H__ #define __RTL92DE_TRX_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/def.h index 9f7e7bb8610b..fe1b7cdab1d5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/def.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #ifndef __RTL92E_DEF_H__ #define __RTL92E_DEF_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c index faed6e2dedf6..648f9108ed4b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #include "../wifi.h" #include "../base.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.h index 107d5a488fa8..ec48e5671b5b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #ifndef __RTL92E_DM_H__ #define __RTL92E_DM_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c index 84a0d0eb72e1..7c5b54b71a92 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #include "../wifi.h" #include "../pci.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h index 6a2fbf20d579..cbfecea232a3 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h @@ -1,26 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #ifndef __RTL92E__FW__H__ #define __RTL92E__FW__H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c index 3f2295fdb02d..53011c2a44f5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #include "../wifi.h" #include "../efuse.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h index 3a63bec9b0cc..fc224392615f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #ifndef __RTL92E_HW_H__ #define __RTL92E_HW_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c index 96c64785108b..78202ad4036e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #include "../wifi.h" #include "../pci.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.h index 8ef640a2ef7f..6d775e14846f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #ifndef __RTL92E_LED_H__ #define __RTL92E_LED_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c index 8b072ee8e0d5..222abc41669c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #include "../wifi.h" #include "../pci.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h index 49bd0e554c65..1a5dbc628379 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #ifndef __RTL92E_PHY_H__ #define __RTL92E_PHY_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.c index 1a701d007f0c..515c1c3d6b43 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #include "pwrseq.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.h index c570801508cc..2ae8347f7ebb 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #ifndef __RTL92E_PWRSEQ_H__ #define __RTL92E_PWRSEQ_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/reg.h index 1eaa1fab550d..0164e006f43e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/reg.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/reg.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #ifndef __RTL92E_REG_H__ #define __RTL92E_REG_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c index bc76a91da762..6b8ef680dc57 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #include "../wifi.h" #include "reg.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.h index 039c0133ad6b..d7b391599926 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #ifndef __RTL92E_RF_H__ #define __RTL92E_RF_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c index 9ea62599ecbb..b6ee7dae5943 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #include "../wifi.h" #include "../core.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.h index 21433d0332d0..36e29a2da0fd 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #ifndef __RTL92E_SW_H__ #define __RTL92E_SW_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/table.c index abcdd0670fd8..fb66f610bf5d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/table.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/table.c @@ -1,29 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Created on 2010/ 5/18, 1:41 - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #include "table.h" u32 RTL8192EE_PHY_REG_ARRAY[] = { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/table.h index bff9df88815d..0bc7ef58c3b9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/table.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/table.h @@ -1,29 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Created on 2010/ 5/18, 1:41 - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #ifndef __RTL92E_TABLE__H_ #define __RTL92E_TABLE__H_ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c index 14d6e3fc5767..09cf8180e4ff 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #include "../wifi.h" #include "../pci.h" @@ -393,7 +371,7 @@ bool rtl92ee_rx_query_desc(struct ieee80211_hw *hw, if (status->crc) rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; - if (status->rx_is40Mhzpacket) + if (status->rx_is40mhzpacket) rx_status->bw = RATE_INFO_BW_40; if (status->is_ht) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h index 45df3e79f490..a9e5e620a653 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #ifndef __RTL92E_TRX_H__ #define __RTL92E_TRX_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h index b5ba0554a0cd..bb6b60814762 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h @@ -1,27 +1,6 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ + #ifndef __REALTEK_92S_DEF_H__ #define __REALTEK_92S_DEF_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c index 44f510a94b09..a6e4384ceea1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "../base.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.h index 3af07efed73a..b9c5a92c2bd0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.h @@ -1,27 +1,6 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ + #ifndef __RTL_92S_DM_H__ #define __RTL_92S_DM_H__ @@ -79,7 +58,7 @@ enum dm_ratr_sta { #define DM_DIG_HIGH_PWR_THRESH_HIGH 75 #define DM_DIG_HIGH_PWR_THRESH_LOW 70 -#define DM_DIG_MIN_Netcore 0x12 +#define DM_DIG_MIN_NETCORE 0x12 void rtl92s_dm_watchdog(struct ieee80211_hw *hw); void rtl92s_dm_init(struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c index e7b1d7c0f542..541b7881735e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "../pci.h" @@ -158,7 +136,6 @@ static bool _rtl92s_firmware_downloadcode(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv = rtl_priv(hw); struct sk_buff *skb; struct rtl_tcb_desc *tcb_desc; - unsigned char *seg_ptr; u16 frag_threshold = MAX_FIRMWARE_CODE_SIZE; u16 frag_length, frag_offset = 0; u16 extra_descoffset = 0; @@ -188,9 +165,8 @@ static bool _rtl92s_firmware_downloadcode(struct ieee80211_hw *hw, if (!skb) return false; skb_reserve(skb, extra_descoffset); - seg_ptr = skb_put_data(skb, - code_virtual_address + frag_offset, - (u32)(frag_length - extra_descoffset)); + skb_put_data(skb, code_virtual_address + frag_offset, + (u32)(frag_length - extra_descoffset)); tcb_desc = (struct rtl_tcb_desc *)(skb->cb); tcb_desc->queue_index = TXCMD_QUEUE; @@ -569,14 +545,14 @@ static bool _rtl92s_firmware_set_h2c_cmd(struct ieee80211_hw *hw, u8 h2c_cmd, return true; } -void rtl92s_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 Mode) +void rtl92s_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode) { struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); struct h2c_set_pwrmode_parm pwrmode; u16 max_wakeup_period = 0; - pwrmode.mode = Mode; + pwrmode.mode = mode; pwrmode.flag_low_traffic_en = 0; pwrmode.flag_lpnav_en = 0; pwrmode.flag_rf_low_snr_en = 0; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.h index 5827aa32cef0..99c6f7eefd85 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.h @@ -1,27 +1,6 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ + #ifndef __REALTEK_FIRMWARE92S_H__ #define __REALTEK_FIRMWARE92S_H__ @@ -297,7 +276,7 @@ enum fw_h2c_cmd { H2C_JOINBSS_CMD, H2C_DISCONNECT_CMD, /*15*/ H2C_CREATEBSS_CMD, - H2C_SETOPMode_CMD, + H2C_SETOPMODE_CMD, H2C_SITESURVEY_CMD, H2C_SETAUTH_CMD, H2C_SETKEY_CMD, /*20*/ @@ -336,10 +315,10 @@ enum fw_h2c_cmd { /* The following macros are used for FW * CMD map and parameter updated. */ -#define FW_CMD_IO_CLR(rtlpriv, _Bit) \ +#define FW_CMD_IO_CLR(rtlpriv, _bit) \ do { \ udelay(1000); \ - rtlpriv->rtlhal.fwcmd_iomap &= (~_Bit); \ + rtlpriv->rtlhal.fwcmd_iomap &= (~_bit); \ } while (0) #define FW_CMD_IO_UPDATE(rtlpriv, _val) \ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c index 30dea7b9bc17..6d6e8994460d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "../efuse.h" @@ -258,7 +236,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) union aci_aifsn *p_aci_aifsn = (union aci_aifsn *)(&( mac->ac[0].aifs)); u8 acm = p_aci_aifsn->f.acm; - u8 acm_ctrl = rtl_read_byte(rtlpriv, AcmHwCtrl); + u8 acm_ctrl = rtl_read_byte(rtlpriv, ACMHWCTRL); acm_ctrl = acm_ctrl | ((rtlpci->acm_method == 2) ? 0x0 : 0x1); @@ -266,13 +244,13 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) if (acm) { switch (e_aci) { case AC0_BE: - acm_ctrl |= AcmHw_BeqEn; + acm_ctrl |= ACMHW_BEQEN; break; case AC2_VI: - acm_ctrl |= AcmHw_ViqEn; + acm_ctrl |= ACMHW_VIQEN; break; case AC3_VO: - acm_ctrl |= AcmHw_VoqEn; + acm_ctrl |= ACMHW_VOQEN; break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, @@ -283,13 +261,13 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) } else { switch (e_aci) { case AC0_BE: - acm_ctrl &= (~AcmHw_BeqEn); + acm_ctrl &= (~ACMHW_BEQEN); break; case AC2_VI: - acm_ctrl &= (~AcmHw_ViqEn); + acm_ctrl &= (~ACMHW_VIQEN); break; case AC3_VO: - acm_ctrl &= (~AcmHw_VoqEn); + acm_ctrl &= (~ACMHW_VOQEN); break; default: pr_err("switch case %#x not processed\n", @@ -300,7 +278,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE, "HW_VAR_ACM_CTRL Write 0x%X\n", acm_ctrl); - rtl_write_byte(rtlpriv, AcmHwCtrl, acm_ctrl); + rtl_write_byte(rtlpriv, ACMHWCTRL, acm_ctrl); break; } case HW_VAR_RCR:{ @@ -869,7 +847,7 @@ static void _rtl92se_macconfig_after_fwdownload(struct ieee80211_hw *hw) /* 10. Power Save Control Register (Offset: 0x0260 - 0x02DF) */ /* 11. General Purpose Register (Offset: 0x02E0 - 0x02FF) */ /* 12. Host Interrupt Status Register (Offset: 0x0300 - 0x030F) */ - /* 13. Test Mode and Debug Control Register (Offset: 0x0310 - 0x034F) */ + /* 13. Test mode and Debug Control Register (Offset: 0x0310 - 0x034F) */ /* 14. Set driver info, we only accept PHY status now. */ rtl_write_byte(rtlpriv, RXDRVINFO_SZ, 4); @@ -1641,7 +1619,7 @@ void rtl92se_update_interrupt_mask(struct ieee80211_hw *hw, rtl92se_enable_interrupt(hw); } -static void _rtl8192se_get_IC_Inferiority(struct ieee80211_hw *hw) +static void _rtl8192se_get_ic_inferiority(struct ieee80211_hw *hw) { struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); @@ -1704,7 +1682,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw) if (rtlefuse->autoload_failflag) return; - _rtl8192se_get_IC_Inferiority(hw); + _rtl8192se_get_ic_inferiority(hw); /* Read IC Version && Channel Plan */ /* VID, DID SE 0xA-D */ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h index fa836ceefc8f..5edbc1ecd261 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h @@ -1,27 +1,6 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ + #ifndef __REALTEK_PCI92SE_HW_H__ #define __REALTEK_PCI92SE_HW_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c index 33c307aca911..2d18bc1ee480 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "../pci.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.h index 90e265d9ffc6..c9e481a8d943 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.h @@ -1,27 +1,6 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ + #ifndef __REALTEK_PCI92SE_LED_H__ #define __REALTEK_PCI92SE_LED_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c index 86cb853f7169..d5c0eb462315 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "../pci.h" @@ -549,13 +527,13 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw, RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) { bool rtstatus; - u32 InitializeCount = 0; + u32 initializecount = 0; do { - InitializeCount++; + initializecount++; RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, "IPS Set eRf nic enable\n"); rtstatus = rtl_ps_enable_nic(hw); - } while (!rtstatus && (InitializeCount < 10)); + } while (!rtstatus && (initializecount < 10)); RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); @@ -935,7 +913,7 @@ static bool _rtl92s_phy_bb_config_parafile(struct ieee80211_hw *hw) if (!rtstatus) { pr_err("Write BB Reg Fail!!\n"); - goto phy_BB8190_Config_ParaFile_Fail; + goto phy_bb8190_config_parafile_fail; } /* 2. If EEPROM or EFUSE autoload OK, We must config by @@ -948,7 +926,7 @@ static bool _rtl92s_phy_bb_config_parafile(struct ieee80211_hw *hw) } if (!rtstatus) { pr_err("_rtl92s_phy_bb_config_parafile(): BB_PG Reg Fail!!\n"); - goto phy_BB8190_Config_ParaFile_Fail; + goto phy_bb8190_config_parafile_fail; } /* 3. BB AGC table Initialization */ @@ -956,7 +934,7 @@ static bool _rtl92s_phy_bb_config_parafile(struct ieee80211_hw *hw) if (!rtstatus) { pr_err("%s(): AGC Table Fail\n", __func__); - goto phy_BB8190_Config_ParaFile_Fail; + goto phy_bb8190_config_parafile_fail; } /* Check if the CCK HighPower is turned ON. */ @@ -964,7 +942,7 @@ static bool _rtl92s_phy_bb_config_parafile(struct ieee80211_hw *hw) rtlphy->cck_high_power = (bool)(rtl92s_phy_query_bb_reg(hw, RFPGA0_XA_HSSIPARAMETER2, 0x200)); -phy_BB8190_Config_ParaFile_Fail: +phy_bb8190_config_parafile_fail: return rtstatus; } @@ -1029,13 +1007,13 @@ bool rtl92s_phy_mac_config(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); u32 i; u32 arraylength; - u32 *ptraArray; + u32 *ptrarray; arraylength = MAC_2T_ARRAYLENGTH; - ptraArray = rtl8192semac_2t_array; + ptrarray = rtl8192semac_2t_array; for (i = 0; i < arraylength; i = i + 2) - rtl_write_byte(rtlpriv, ptraArray[i], (u8)ptraArray[i + 1]); + rtl_write_byte(rtlpriv, ptrarray[i], (u8)ptrarray[i + 1]); return true; } @@ -1128,7 +1106,7 @@ void rtl92s_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw) } static void _rtl92s_phy_get_txpower_index(struct ieee80211_hw *hw, u8 channel, - u8 *cckpowerlevel, u8 *ofdmpowerLevel) + u8 *cckpowerlevel, u8 *ofdmpowerlevel) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); @@ -1144,15 +1122,15 @@ static void _rtl92s_phy_get_txpower_index(struct ieee80211_hw *hw, u8 channel, /* 2. OFDM for 1T or 2T */ if (rtlphy->rf_type == RF_1T2R || rtlphy->rf_type == RF_1T1R) { /* Read HT 40 OFDM TX power */ - ofdmpowerLevel[0] = rtlefuse->txpwrlevel_ht40_1s[0][index]; - ofdmpowerLevel[1] = rtlefuse->txpwrlevel_ht40_1s[1][index]; + ofdmpowerlevel[0] = rtlefuse->txpwrlevel_ht40_1s[0][index]; + ofdmpowerlevel[1] = rtlefuse->txpwrlevel_ht40_1s[1][index]; } else if (rtlphy->rf_type == RF_2T2R) { /* Read HT 40 OFDM TX power */ - ofdmpowerLevel[0] = rtlefuse->txpwrlevel_ht40_2s[0][index]; - ofdmpowerLevel[1] = rtlefuse->txpwrlevel_ht40_2s[1][index]; + ofdmpowerlevel[0] = rtlefuse->txpwrlevel_ht40_2s[0][index]; + ofdmpowerlevel[1] = rtlefuse->txpwrlevel_ht40_2s[1][index]; } else { - ofdmpowerLevel[0] = 0; - ofdmpowerLevel[1] = 0; + ofdmpowerlevel[0] = 0; + ofdmpowerlevel[1] = 0; } } @@ -1171,7 +1149,7 @@ void rtl92s_phy_set_txpower(struct ieee80211_hw *hw, u8 channel) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); /* [0]:RF-A, [1]:RF-B */ - u8 cckpowerlevel[2], ofdmpowerLevel[2]; + u8 cckpowerlevel[2], ofdmpowerlevel[2]; if (!rtlefuse->txpwr_fromeprom) return; @@ -1183,18 +1161,18 @@ void rtl92s_phy_set_txpower(struct ieee80211_hw *hw, u8 channel) * 1. For CCK. * 2. For OFDM 1T or 2T */ _rtl92s_phy_get_txpower_index(hw, channel, &cckpowerlevel[0], - &ofdmpowerLevel[0]); + &ofdmpowerlevel[0]); RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Channel-%d, cckPowerLevel (A / B) = 0x%x / 0x%x, ofdmPowerLevel (A / B) = 0x%x / 0x%x\n", channel, cckpowerlevel[0], cckpowerlevel[1], - ofdmpowerLevel[0], ofdmpowerLevel[1]); + ofdmpowerlevel[0], ofdmpowerlevel[1]); _rtl92s_phy_ccxpower_indexcheck(hw, channel, &cckpowerlevel[0], - &ofdmpowerLevel[0]); + &ofdmpowerlevel[0]); rtl92s_phy_rf6052_set_ccktxpower(hw, cckpowerlevel[0]); - rtl92s_phy_rf6052_set_ofdmtxpower(hw, &ofdmpowerLevel[0], channel); + rtl92s_phy_rf6052_set_ofdmtxpower(hw, &ofdmpowerlevel[0], channel); } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.h index 7a3b6b623872..b8b5f097b67b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.h @@ -1,27 +1,6 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ + #ifndef __RTL92S_PHY_H__ #define __RTL92S_PHY_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/reg.h index 5d445c2afcf3..45f968e0e57c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/reg.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/reg.h @@ -1,27 +1,6 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ + #ifndef __REALTEK_92S_REG_H__ #define __REALTEK_92S_REG_H__ @@ -190,7 +169,7 @@ #define BCNTCFG 0x01E0 #define CWRR 0x01E2 #define ACMAVG 0x01E4 -#define AcmHwCtrl 0x01E7 +#define ACMHWCTRL 0x01E7 #define VO_ADMTM 0x01E8 #define VI_ADMTM 0x01EC #define BE_ADMTM 0x01F0 @@ -256,7 +235,7 @@ #define INTA_MASK 0x0300 #define ISR 0x0308 -/* 13. Test Mode and Debug Control Registers */ +/* 13. Test mode and Debug Control Registers */ #define DBG_PORT_SWITCH 0x003A #define BIST 0x0310 #define DBS 0x0314 @@ -346,9 +325,9 @@ #define SYS_SWHW_SEL BIT(14) #define SYS_FWHW_SEL BIT(15) -#define CmdEEPROM_En BIT(5) -#define CmdEERPOMSEL BIT(4) -#define Cmd9346CR_9356SEL BIT(4) +#define CMDEEPROM_EN BIT(5) +#define CMDEERPOMSEL BIT(4) +#define CMD9346CR_9356SEL BIT(4) #define AFE_MBEN BIT(1) #define AFE_BGEN BIT(0) @@ -369,9 +348,9 @@ #define APLL_EN BIT(0) -#define AFR_CardBEn BIT(0) +#define AFR_CARDBEN BIT(0) #define AFR_CLKRUN_SEL BIT(1) -#define AFR_FuncRegEn BIT(2) +#define AFR_FUNCREGEN BIT(2) #define APSDOFF_STATUS BIT(15) #define APSDOFF BIT(14) @@ -387,13 +366,13 @@ #define HCI_RXDMA_EN BIT(3) #define HCI_TXDMA_EN BIT(2) -#define StopHCCA BIT(6) -#define StopHigh BIT(5) -#define StopMgt BIT(4) -#define StopVO BIT(3) -#define StopVI BIT(2) -#define StopBE BIT(1) -#define StopBK BIT(0) +#define STOPHCCA BIT(6) +#define STOPHIGH BIT(5) +#define STOPMGT BIT(4) +#define STOPVO BIT(3) +#define STOPVI BIT(2) +#define STOPBE BIT(1) +#define STOPBK BIT(0) #define LBK_NORMAL 0x00 #define LBK_MAC_LB (BIT(0) | BIT(1) | BIT(3)) @@ -405,7 +384,7 @@ #define TXDMAPRE2FULL BIT(23) #define DISCW BIT(20) #define TCRICV BIT(19) -#define CfendForm BIT(17) +#define cfendform BIT(17) #define TCRCRC BIT(16) #define FAKE_IMEM_EN BIT(15) #define TSFRST BIT(9) @@ -530,7 +509,7 @@ #define RRSR_MCS5 BIT(17) #define RRSR_MCS6 BIT(18) #define RRSR_MCS7 BIT(19) -#define BRSR_AckShortPmb BIT(23) +#define BRSR_ACKSHORTPMB BIT(23) #define RATR_1M 0x00000001 #define RATR_2M 0x00000002 @@ -581,13 +560,13 @@ #define AC_PARAM_ECW_MIN_OFFSET 8 #define AC_PARAM_AIFS_OFFSET 0 -#define AcmHw_HwEn BIT(0) -#define AcmHw_BeqEn BIT(1) -#define AcmHw_ViqEn BIT(2) -#define AcmHw_VoqEn BIT(3) -#define AcmHw_BeqStatus BIT(4) -#define AcmHw_ViqStatus BIT(5) -#define AcmHw_VoqStatus BIT(6) +#define ACMHW_HWEN BIT(0) +#define ACMHW_BEQEN BIT(1) +#define ACMHW_VIQEN BIT(2) +#define ACMHW_VOQEN BIT(3) +#define ACMHW_BEQSTATUS BIT(4) +#define ACMHW_VIQSTATUS BIT(5) +#define ACMHW_VOQSTATUS BIT(6) #define RETRY_LIMIT_SHORT_SHIFT 8 #define RETRY_LIMIT_LONG_SHIFT 0 @@ -845,7 +824,7 @@ #define TCR_SAT BIT(24) #define RCR_MXDMA_OFFSET 8 #define RCR_FIFO_OFFSET 13 -#define RCR_OnlyErlPkt BIT(31) +#define RCR_ONLYERLPKT BIT(31) #define CWR 0xDC #define RETRYCTR 0xDE diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c index ea5b8ec45ec9..a37855f57e76 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "reg.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.h index e9ba283d05ad..a5959a228a35 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.h @@ -1,27 +1,6 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ + #ifndef __INC_RTL92S_RF_H #define __INC_RTL92S_RF_H diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c index d55554b7fa9a..d1d84e7d47a4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "../core.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h index af449d6714e6..a31efba0e6b5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.h @@ -1,25 +1,6 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ + #ifndef __REALTEK_PCI92SE_SW_H__ #define __REALTEK_PCI92SE_SW_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.c index 162578f05c85..776e28e99d53 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.c @@ -1,28 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - * Created on 2010/ 5/18, 1:41 - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "table.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.h index aa3c7687d226..4fd09f11f5bc 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.h @@ -1,20 +1,6 @@ -/****************************************************************************** - * Copyright(c) 2008 - 2012 Realtek Corporation. All rights reserved. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * - * Larry Finger - * - ******************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ + #ifndef __INC_HAL8192SE_FW_IMG_H #define __INC_HAL8192SE_FW_IMG_H diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c index e1904c39f147..efb432c6d785 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "../pci.h" @@ -275,7 +253,7 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats, stats->isfirst_ampdu = (bool) ((GET_RX_STATUS_DESC_PAGGR(pdesc) == 1) && (GET_RX_STATUS_DESC_FAGGR(pdesc) == 1)); stats->timestamp_low = GET_RX_STATUS_DESC_TSFL(pdesc); - stats->rx_is40Mhzpacket = (bool)GET_RX_STATUS_DESC_BW(pdesc); + stats->rx_is40mhzpacket = (bool)GET_RX_STATUS_DESC_BW(pdesc); stats->is_ht = (bool)GET_RX_STATUS_DESC_RX_HT(pdesc); stats->is_cck = SE_RX_HAL_IS_CCK_RATE(pdesc); @@ -288,7 +266,7 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats, if (stats->crc) rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; - if (stats->rx_is40Mhzpacket) + if (stats->rx_is40mhzpacket) rx_status->bw = RATE_INFO_BW_40; if (stats->is_ht) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h index 81a5445c04a3..90aa12fc6a7f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h @@ -1,27 +1,6 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ + #ifndef __REALTEK_PCI92SE_TRX_H__ #define __REALTEK_PCI92SE_TRX_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/btc.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/btc.h index 06c448c010fd..20a67dcc59cb 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/btc.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/btc.h @@ -1,26 +1,5 @@ -/****************************************************************************** - ** - ** Copyright(c) 2009-2012 Realtek Corporation. - ** - ** This program is free software; you can redistribute it and/or modify it - ** under the terms of version 2 of the GNU General Public License as - ** published by the Free Software Foundation. - ** - ** This program is distributed in the hope that it will be useful, but WITHOUT - ** ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - ** FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - ** more details. - ** - ** The full GNU General Public License is included in this distribution in the - ** file called LICENSE. - ** - ** Contact Information: - ** wlanfae - ** Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - ** Hsinchu 300, Taiwan. - ** Larry Finger - ** - ******************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL8723E_BTC_H__ #define __RTL8723E_BTC_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/def.h index 847544817549..42958df6b5d4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/def.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL8723E_DEF_H__ #define __RTL8723E_DEF_H__ @@ -116,7 +94,7 @@ #define IS_VENDOR_8723A_B_CUT(version) ((IS_8723_SERIES(version))\ ? ((GET_CVID_CUT_VERSION(version) == \ B_CUT_VERSION) ? true : false) : false) -#define IS_81xxC_VENDOR_UMC_B_CUT(version) ((IS_CHIP_VENDOR_UMC(version))\ +#define IS_81XXC_VENDOR_UMC_B_CUT(version) ((IS_CHIP_VENDOR_UMC(version))\ ? ((GET_CVID_CUT_VERSION(version) == \ B_CUT_VERSION) ? true : false) : false) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c index 42a6fba90ba9..514891ea2c64 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "../base.h" @@ -151,8 +129,14 @@ static u8 rtl8723e_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct dig_t *dm_digtable = &rtlpriv->dm_digtable; + struct rtl_mac *mac = rtl_mac(rtlpriv); long rssi_val_min = 0; + if (mac->link_state == MAC80211_LINKED && + mac->opmode == NL80211_IFTYPE_STATION && + rtlpriv->link_info.bcn_rx_inperiod == 0) + return 0; + if ((dm_digtable->curmultista_cstate == DIG_MULTISTA_CONNECT) && (dm_digtable->cursta_cstate == DIG_STA_CONNECT)) { if (rtlpriv->dm.entry_min_undec_sm_pwdb != 0) @@ -417,6 +401,8 @@ static void rtl8723e_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw) } else { rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd); rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x47); + dm_digtable->pre_cck_fa_state = 0; + dm_digtable->cur_cck_fa_state = 0; } dm_digtable->pre_cck_pd_state = dm_digtable->cur_cck_pd_state; @@ -665,7 +651,7 @@ void rtl8723e_dm_check_txpower_tracking(struct ieee80211_hw *hw) void rtl8723e_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rate_adaptive *p_ra = &(rtlpriv->ra); + struct rate_adaptive *p_ra = &rtlpriv->ra; p_ra->ratr_state = DM_RATR_STA_INIT; p_ra->pre_ratr_state = DM_RATR_STA_INIT; @@ -677,6 +663,89 @@ void rtl8723e_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw) } +void rtl8723e_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rate_adaptive *p_ra = &rtlpriv->ra; + u32 low_rssithresh_for_ra, high_rssithresh_for_ra; + struct ieee80211_sta *sta = NULL; + + if (is_hal_stop(rtlhal)) { + RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, + " driver is going to unload\n"); + return; + } + + if (!rtlpriv->dm.useramask) { + RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, + " driver does not control rate adaptive mask\n"); + return; + } + + if (mac->link_state == MAC80211_LINKED && + mac->opmode == NL80211_IFTYPE_STATION) { + switch (p_ra->pre_ratr_state) { + case DM_RATR_STA_HIGH: + high_rssithresh_for_ra = 50; + low_rssithresh_for_ra = 20; + break; + case DM_RATR_STA_MIDDLE: + high_rssithresh_for_ra = 55; + low_rssithresh_for_ra = 20; + break; + case DM_RATR_STA_LOW: + high_rssithresh_for_ra = 60; + low_rssithresh_for_ra = 25; + break; + default: + high_rssithresh_for_ra = 50; + low_rssithresh_for_ra = 20; + break; + } + + if (rtlpriv->link_info.bcn_rx_inperiod == 0) + switch (p_ra->pre_ratr_state) { + case DM_RATR_STA_HIGH: + default: + p_ra->ratr_state = DM_RATR_STA_MIDDLE; + break; + case DM_RATR_STA_MIDDLE: + case DM_RATR_STA_LOW: + p_ra->ratr_state = DM_RATR_STA_LOW; + break; + } + else if (rtlpriv->dm.undec_sm_pwdb > high_rssithresh_for_ra) + p_ra->ratr_state = DM_RATR_STA_HIGH; + else if (rtlpriv->dm.undec_sm_pwdb > low_rssithresh_for_ra) + p_ra->ratr_state = DM_RATR_STA_MIDDLE; + else + p_ra->ratr_state = DM_RATR_STA_LOW; + + if (p_ra->pre_ratr_state != p_ra->ratr_state) { + RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, + "RSSI = %ld\n", + rtlpriv->dm.undec_sm_pwdb); + RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, + "RSSI_LEVEL = %d\n", p_ra->ratr_state); + RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, + "PreState = %d, CurState = %d\n", + p_ra->pre_ratr_state, p_ra->ratr_state); + + rcu_read_lock(); + sta = rtl_find_sta(hw, mac->bssid); + if (sta) + rtlpriv->cfg->ops->update_rate_tbl(hw, sta, + p_ra->ratr_state, + true); + rcu_read_unlock(); + + p_ra->pre_ratr_state = p_ra->ratr_state; + } + } +} + void rtl8723e_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -826,7 +895,7 @@ void rtl8723e_dm_watchdog(struct ieee80211_hw *hw) rtl8723e_dm_dynamic_bb_powersaving(hw); rtl8723e_dm_dynamic_txpower(hw); rtl8723e_dm_check_txpower_tracking(hw); - /* rtl92c_dm_refresh_rate_adaptive_mask(hw); */ + rtl8723e_dm_refresh_rate_adaptive_mask(hw); rtl8723e_dm_bt_coexist(hw); rtl8723e_dm_check_edca_turbo(hw); } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.h index a113780af08a..bcad516f0d28 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL8723E_DM_H__ #define __RTL8723E_DM_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c index bf9859f74b6f..be451a6f7dbe 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "../pci.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h index 2e668fcfc5c2..5c843736de8d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h @@ -1,26 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL92C__FW__H__ #define __RTL92C__FW__H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.c index 5aac45d5a974..3ac31ec26517 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "hal_bt_coexist.h" #include "../pci.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h index 45719fdcb067..0455a3712f3e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h @@ -1,26 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL8723E_HAL_BT_COEXIST_H__ #define __RTL8723E_HAL_BT_COEXIST_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c index 788de88ab1ee..680198280f8f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c @@ -1,27 +1,6 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ + #include "hal_btc.h" #include "../pci.h" #include "phy.h" @@ -1426,7 +1405,6 @@ static void rtl8723e_dm_bt_reset_action_profile_state(struct ieee80211_hw *hw) static void _rtl8723e_dm_bt_coexist_2_ant(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 bt_retry_cnt; u8 bt_info_original; RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "[BTCoex] Get bt info by fw!!\n"); @@ -1438,7 +1416,6 @@ static void _rtl8723e_dm_bt_coexist_2_ant(struct ieee80211_hw *hw) "[BTCoex] c2h for bt_info not rcvd yet!!\n"); } - bt_retry_cnt = hal_coex_8723.bt_retry_cnt; bt_info_original = hal_coex_8723.c2h_bt_info_original; /* when bt inquiry or page scan, we have to set h2c 0x25 */ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.h index 756868897d8b..620677128f11 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.h @@ -1,26 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL8723E_HAL_BTC_H__ #define __RTL8723E_HAL_BTC_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c index f783e4a8083d..6bab162e1bb8 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "../efuse.h" @@ -698,7 +676,7 @@ static bool _rtl8712e_init_mac(struct ieee80211_hw *hw) /* HW Power on sequence */ if (!rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, - PWR_INTF_PCI_MSK, Rtl8723_NIC_ENABLE_FLOW)) + PWR_INTF_PCI_MSK, RTL8723_NIC_ENABLE_FLOW)) return false; bytetmp = rtl_read_byte(rtlpriv, REG_PCIE_CTRL_REG+2); @@ -988,7 +966,7 @@ int rtl8723e_hw_init(struct ieee80211_hw *hw) if (IS_VENDOR_UMC_A_CUT(rtlhal->version)) { rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G1, MASKDWORD, 0x30255); rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G2, MASKDWORD, 0x50a00); - } else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) { + } else if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version)) { rtl_set_rfreg(hw, RF90_PATH_A, 0x0C, MASKDWORD, 0x894AE); rtl_set_rfreg(hw, RF90_PATH_A, 0x0A, MASKDWORD, 0x1AF31); rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, MASKDWORD, 0x8F425); @@ -1284,7 +1262,7 @@ static void _rtl8723e_poweroff_adapter(struct ieee80211_hw *hw) /* Combo (PCIe + USB) Card and PCIe-MF Card */ /* 1. Run LPS WL RFOFF flow */ rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, - PWR_INTF_PCI_MSK, Rtl8723_NIC_LPS_ENTER_FLOW); + PWR_INTF_PCI_MSK, RTL8723_NIC_LPS_ENTER_FLOW); /* 2. 0x1F[7:0] = 0 */ /* turn off RF */ @@ -1304,7 +1282,7 @@ static void _rtl8723e_poweroff_adapter(struct ieee80211_hw *hw) /* HW card disable configuration. */ rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, - PWR_INTF_PCI_MSK, Rtl8723_NIC_DISABLE_FLOW); + PWR_INTF_PCI_MSK, RTL8723_NIC_DISABLE_FLOW); /* Reset MCU IO Wrapper */ u1b_tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h index c76e453f4f43..cf55f45d0779 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL8723E_HW_H__ #define __RTL8723E_HW_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c index d567b0df0e9f..5e503dbc463b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "../pci.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.h index c22b19f542a6..9f85845d23cd 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL92CE_LED_H__ #define __RTL92CE_LED_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c index 3f3327878b51..54a3aec1dfa7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "../pci.h" @@ -905,7 +883,7 @@ static void _rtl8723e_phy_sw_rf_seting(struct ieee80211_hw *hw, u8 channel) struct rtl_phy *rtlphy = &rtlpriv->phy; struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) { + if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version)) { if (channel == 6 && rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G1, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.h index b85f5c7c5c01..98bfe02f66d5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL92C_PHY_H__ #define __RTL92C_PHY_H__ @@ -48,7 +26,7 @@ #define MAX_STALL_TIME 50 #define ANTENNADIVERSITYVALUE 0x80 #define MAX_TXPWR_IDX_NMODE_92S 63 -#define Reset_Cnt_Limit 3 +#define reset_cnt_limit 3 #define IQK_ADDA_REG_NUM 16 #define IQK_MAC_REG_NUM 4 diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.c index 2f7f81af8a55..041e3113a500 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../pwrseqcmd.h" #include "pwrseq.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.h index e6c3aac3e9fd..d9247a8f3039 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL8723E_PWRSEQ_H__ #define __RTL8723E_PWRSEQ_H__ @@ -327,14 +305,14 @@ extern struct wlan_pwr_cfg rtl8723A_leave_lps_flow [RTL8723A_TRANS_LPS_TO_ACT_STEPS + RTL8723A_TRANS_END_STEPS]; /* RTL8723 Power Configuration CMDs for PCIe interface */ -#define Rtl8723_NIC_PWR_ON_FLOW rtl8723A_power_on_flow -#define Rtl8723_NIC_RF_OFF_FLOW rtl8723A_radio_off_flow -#define Rtl8723_NIC_DISABLE_FLOW rtl8723A_card_disable_flow -#define Rtl8723_NIC_ENABLE_FLOW rtl8723A_card_enable_flow -#define Rtl8723_NIC_SUSPEND_FLOW rtl8723A_suspend_flow -#define Rtl8723_NIC_RESUME_FLOW rtl8723A_resume_flow -#define Rtl8723_NIC_PDN_FLOW rtl8723A_hwpdn_flow -#define Rtl8723_NIC_LPS_ENTER_FLOW rtl8723A_enter_lps_flow -#define Rtl8723_NIC_LPS_LEAVE_FLOW rtl8723A_leave_lps_flow +#define RTL8723_NIC_PWR_ON_FLOW rtl8723A_power_on_flow +#define RTL8723_NIC_RF_OFF_FLOW rtl8723A_radio_off_flow +#define RTL8723_NIC_DISABLE_FLOW rtl8723A_card_disable_flow +#define RTL8723_NIC_ENABLE_FLOW rtl8723A_card_enable_flow +#define RTL8723_NIC_SUSPEND_FLOW rtl8723A_suspend_flow +#define RTL8723_NIC_RESUME_FLOW rtl8723A_resume_flow +#define RTL8723_NIC_PDN_FLOW rtl8723A_hwpdn_flow +#define RTL8723_NIC_LPS_ENTER_FLOW rtl8723A_enter_lps_flow +#define RTL8723_NIC_LPS_LEAVE_FLOW rtl8723A_leave_lps_flow #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/reg.h index 30938cd9fce5..8696614c7cdd 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/reg.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/reg.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL8723E_REG_H__ #define __RTL8723E_REG_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c index 89958b64b52d..9058527a7f94 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "reg.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.h index 7b44ebc0fac9..b445cfe65bf4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL8723E_RF_H__ #define __RTL8723E_RF_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c index 07b82700d1de..4b370410c83c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "../core.h" @@ -175,7 +153,7 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw) return 1; } - if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) + if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version)) fw_name = "rtlwifi/rtl8723fw_B.bin"; rtlpriv->max_fw_size = 0x6000; @@ -266,8 +244,8 @@ static struct rtl_hal_ops rtl8723e_hal_ops = { static struct rtl_mod_params rtl8723e_mod_params = { .sw_crypto = false, .inactiveps = true, - .swctrl_lps = false, - .fwctrl_lps = true, + .swctrl_lps = true, + .fwctrl_lps = false, .aspm_support = 1, .debug_level = 0, .debug_mask = 0, @@ -395,8 +373,8 @@ module_param_named(disable_watchdog, rtl8723e_mod_params.disable_watchdog, bool, 0444); MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); -MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); -MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); +MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 1)\n"); +MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 0)\n"); MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n"); MODULE_PARM_DESC(aspm, "Set to 1 to enable ASPM (default 1)\n"); MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)"); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.h index 46478780d262..200ce39a0dd7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL8723E_SW_H__ #define __RTL8723E_SW_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.c index 1bbee0bfac23..d895694be023 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.c @@ -1,29 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Created on 2010/ 5/18, 1:41 - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "table.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.h index a044f3c456fa..4897dbc73b9f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.h @@ -1,29 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Created on 2010/ 5/18, 1:41 - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL8723E_TABLE__H_ #define __RTL8723E_TABLE__H_ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c index d461d0c9631f..90dc91b0d35b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "../wifi.h" #include "../pci.h" @@ -302,7 +280,7 @@ bool rtl8723e_rx_query_desc(struct ieee80211_hw *hw, status->isfirst_ampdu = (bool)((GET_RX_DESC_PAGGR(pdesc) == 1) && (GET_RX_DESC_FAGGR(pdesc) == 1)); status->timestamp_low = GET_RX_DESC_TSFL(pdesc); - status->rx_is40Mhzpacket = (bool)GET_RX_DESC_BW(pdesc); + status->rx_is40mhzpacket = (bool)GET_RX_DESC_BW(pdesc); status->is_ht = (bool)GET_RX_DESC_RXHT(pdesc); status->is_cck = RX_HAL_IS_CCK_RATE(status->rate); @@ -316,7 +294,7 @@ bool rtl8723e_rx_query_desc(struct ieee80211_hw *hw, if (status->crc) rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; - if (status->rx_is40Mhzpacket) + if (status->rx_is40mhzpacket) rx_status->bw = RATE_INFO_BW_40; if (status->is_ht) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h index d592b08d4ac8..4a19ea76b290 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL8723E_TRX_H__ #define __RTL8723E_TRX_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/def.h index 5e5403d69220..f4886c868df8 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/def.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #ifndef __RTL8723BE_DEF_H__ #define __RTL8723BE_DEF_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c index 47e87a21ae27..b13fd3c0c832 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #include "../wifi.h" #include "../base.h" @@ -1017,12 +995,9 @@ static void rtl8723be_dm_check_edca_turbo(struct ieee80211_hw *hw) u32 edca_be = 0x5ea42b; u32 iot_peer = 0; bool b_is_cur_rdlstate; - bool b_last_is_cur_rdlstate = false; bool b_bias_on_rx = false; bool b_edca_turbo_on = false; - b_last_is_cur_rdlstate = rtlpriv->dm.is_cur_rdlstate; - cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt; cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.h index f752a2cad63d..c4f36e951747 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.h @@ -1,24 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #ifndef __RTL8723BE_DM_H__ #define __RTL8723BE_DM_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c index f2441fbb92f1..4d7fa27f55ca 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #include "../wifi.h" #include "../pci.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h index 948d28646364..97ea77464139 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h @@ -1,26 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #ifndef __RTL8723BE__FW__H__ #define __RTL8723BE__FW__H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c index b4f3f91b590e..979e5bfe5f45 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #include "../wifi.h" #include "../efuse.h" @@ -319,12 +297,12 @@ void rtl8723be_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) *((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state; break; case HW_VAR_FWLPS_RF_ON:{ - enum rf_pwrstate rfState; + enum rf_pwrstate rfstate; u32 val_rcr; rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE, - (u8 *)(&rfState)); - if (rfState == ERFOFF) { + (u8 *)(&rfstate)); + if (rfstate == ERFOFF) { *((bool *)(val)) = true; } else { val_rcr = rtl_read_dword(rtlpriv, REG_RCR); @@ -764,10 +742,10 @@ static bool _rtl8723be_llt_table_init(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); unsigned short i; u8 txpktbuf_bndy; - u8 maxPage; + u8 maxpage; bool status; - maxPage = 255; + maxpage = 255; txpktbuf_bndy = 245; rtl_write_dword(rtlpriv, REG_TRXFF_BNDY, @@ -792,13 +770,13 @@ static bool _rtl8723be_llt_table_init(struct ieee80211_hw *hw) if (!status) return status; - for (i = txpktbuf_bndy; i < maxPage; i++) { + for (i = txpktbuf_bndy; i < maxpage; i++) { status = _rtl8723be_llt_write(hw, i, (i + 1)); if (!status) return status; } - status = _rtl8723be_llt_write(hw, maxPage, txpktbuf_bndy); + status = _rtl8723be_llt_write(hw, maxpage, txpktbuf_bndy); if (!status) return status; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h index ae856a19e81a..eb797e9bb931 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #ifndef __RTL8723BE_HW_H__ #define __RTL8723BE_HW_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c index 4f7890d62c21..525f2c47da5b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #include "../wifi.h" #include "../pci.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.h index c57de379ee8d..8ac59374b632 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #ifndef __RTL8723BE_LED_H__ #define __RTL8723BE_LED_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c index 1263b12db5dc..aa8a0950fcea 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #include "../wifi.h" #include "../pci.h" @@ -332,7 +310,7 @@ static void _rtl8723be_phy_set_txpower_by_rate_base(struct ieee80211_hw *hw, "Invalid RateSection %d in Band 2.4G, Rf Path %d, %dTx in PHY_SetTxPowerByRateBase()\n", rate_section, path, txnum); break; - }; + } } else { RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Invalid Band %d in PHY_SetTxPowerByRateBase()\n", @@ -374,7 +352,7 @@ static u8 _rtl8723be_phy_get_txpower_by_rate_base(struct ieee80211_hw *hw, "Invalid RateSection %d in Band 2.4G, Rf Path %d, %dTx in PHY_GetTxPowerByRateBase()\n", rate_section, path, txnum); break; - }; + } } else { RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Invalid Band %d in PHY_GetTxPowerByRateBase()\n", @@ -694,7 +672,7 @@ static u8 _rtl8723be_get_rate_section_index(u32 regaddr) else if (regaddr >= 0xE20 && regaddr <= 0xE4C) index = (u8)((regaddr - 0xE20) / 4); break; - }; + } return index; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.h index 9021d4745ab7..a59813ea7c72 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #ifndef __RTL8723BE_PHY_H__ #define __RTL8723BE_PHY_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.c index a1bb1f6116fb..95adac66676e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #include "../pwrseqcmd.h" #include "pwrseq.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.h index 3367cfbc9502..57eac4864184 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #ifndef __RTL8723BE_PWRSEQ_H__ #define __RTL8723BE_PWRSEQ_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/reg.h index 95c4f8e206c7..28fd22b2a792 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/reg.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/reg.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #ifndef __RTL8723BE_REG_H__ #define __RTL8723BE_REG_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c index 48491454b878..af72e489e31c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #include "../wifi.h" #include "reg.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.h index f423e157020f..81537a1a4fdf 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #ifndef __RTL8723BE_RF_H__ #define __RTL8723BE_RF_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c index c9f7b042d9c6..00e6254bf82b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #include "../wifi.h" #include "../core.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.h index a7b25e769950..6ecacf9fbfd7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #ifndef __RTL8723BE_SW_H__ #define __RTL8723BE_SW_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c index 160fee8333ae..5864be89d187 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c @@ -1,29 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Created on 2010/ 5/18, 1:41 - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #include #include "table.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.h index 1deaffe22251..cf0c8d58cea1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.h @@ -1,29 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Created on 2010/ 5/18, 1:41 - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #ifndef __RTL8723BE_TABLE__H_ #define __RTL8723BE_TABLE__H_ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c index 9f8dfb5af774..9ada9a06c6ea 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #include "../wifi.h" #include "../pci.h" @@ -338,7 +316,7 @@ bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw, status->isampdu = (bool)(GET_RX_DESC_PAGGR(pdesc) == 1); status->isfirst_ampdu = (bool)(GET_RX_DESC_PAGGR(pdesc) == 1); status->timestamp_low = GET_RX_DESC_TSFL(pdesc); - status->rx_is40Mhzpacket = (bool)GET_RX_DESC_BW(pdesc); + status->rx_is40mhzpacket = (bool)GET_RX_DESC_BW(pdesc); status->bandwidth = (u8)GET_RX_DESC_BW(pdesc); status->macid = GET_RX_DESC_MACID(pdesc); status->is_ht = (bool)GET_RX_DESC_RXHT(pdesc); @@ -372,7 +350,7 @@ bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw, if (status->crc) rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; - if (status->rx_is40Mhzpacket) + if (status->rx_is40mhzpacket) rx_status->bw = RATE_INFO_BW_40; if (status->is_ht) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h index 609f7ad7f787..11e75a4e68bd 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #ifndef __RTL8723BE_TRX_H__ #define __RTL8723BE_TRX_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/dm_common.c index 064340641913..46ab90332eb7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/dm_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/dm_common.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #include "../wifi.h" #include "dm_common.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/dm_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/dm_common.h index 5c1b94ce2f86..6300be65aba0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/dm_common.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/dm_common.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #ifndef __DM_COMMON_H__ #define __DM_COMMON_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c index 521039c5d4ce..18ce2856a91b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #include "../wifi.h" #include "../pci.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h index 77c25a976233..b527fcbbdf08 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #ifndef __FW_COMMON_H__ #define __FW_COMMON_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/main.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/main.c index 9014a94fac6a..f5a9ecbf7363 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/main.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/main.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #include "../wifi.h" #include diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c index 43d24e1ee5e6..aae14c68bf69 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #include "../wifi.h" #include "phy_common.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.h index 83b891a9adb8..edf1c52f0ee2 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2014 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2014 Realtek Corporation.*/ #ifndef __PHY_COMMON__ #define __PHY_COMMON__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/def.h index 3fe3aaa5fe3c..827bc5f35d2a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/def.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/def.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2010 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2010 Realtek Corporation.*/ #ifndef __RTL8821AE_DEF_H__ #define __RTL8821AE_DEF_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c index 3be8c88971e2..49d05b631ba1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2010 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2010 Realtek Corporation.*/ #include "../wifi.h" #include "../base.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.h index 625a6bbb21fc..137ed735d80d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2010 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2010 Realtek Corporation.*/ #ifndef __RTL8821AE_DM_H__ #define __RTL8821AE_DM_H__ @@ -92,11 +70,11 @@ #define DM_REG_CCK_CCA_CNT_11N 0xA60 #define DM_REG_BB_PWR_SAV4_11N 0xA74 /*PAGE B */ -#define DM_REG_LNA_SWITCH_11N 0xB2C -#define DM_REG_PATH_SWITCH_11N 0xB30 -#define DM_REG_RSSI_CTRL_11N 0xB38 -#define DM_REG_CONFIG_ANTA_11N 0xB68 -#define DM_REG_RSSI_BT_11N 0xB9C +#define DM_REG_LNA_SWITCH_11N 0XB2C +#define DM_REG_PATH_SWITCH_11N 0XB30 +#define DM_REG_RSSI_CTRL_11N 0XB38 +#define DM_REG_CONFIG_ANTA_11N 0XB68 +#define DM_REG_RSSI_BT_11N 0XB9C /*PAGE C */ #define DM_REG_OFDM_FA_HOLDC_11N 0xC00 #define DM_REG_RX_PATH_11N 0xC04 diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c index d868a034659f..dc0eb692088f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2010 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2010 Realtek Corporation.*/ #include "../wifi.h" #include "../pci.h" @@ -844,9 +822,9 @@ static u8 reserved_page_packet_8821[TOTAL_RESERVED_PKT_LEN_8821] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* page 3: qos null data */ - 0xC8, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7, - 0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, - 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00, + 0xC8, 0x01, 0x00, 0x00, 0x84, 0xC9, 0XB2, 0xA7, + 0XB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, + 0x84, 0xC9, 0XB2, 0xA7, 0XB3, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -877,9 +855,9 @@ static u8 reserved_page_packet_8821[TOTAL_RESERVED_PKT_LEN_8821] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* page 4: BT qos null data */ - 0xC8, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7, - 0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, - 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00, + 0xC8, 0x01, 0x00, 0x00, 0x84, 0xC9, 0XB2, 0xA7, + 0XB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, + 0x84, 0xC9, 0XB2, 0xA7, 0XB3, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -911,9 +889,9 @@ static u8 reserved_page_packet_8821[TOTAL_RESERVED_PKT_LEN_8821] = { 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* page 5~7 is for wowlan */ /* page 5: ARP resp */ - 0x08, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7, - 0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, - 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00, + 0x08, 0x01, 0x00, 0x00, 0x84, 0xC9, 0XB2, 0xA7, + 0XB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, + 0x84, 0xC9, 0XB2, 0xA7, 0XB3, 0x6E, 0x00, 0x00, 0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00, 0x08, 0x06, 0x00, 0x01, 0x08, 0x00, 0x06, 0x04, 0x00, 0x02, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, 0x00, 0x00, @@ -1015,7 +993,7 @@ static u8 reserved_page_packet_8812[TOTAL_RESERVED_PKT_LEN_8812] = { /* page 0: beacon */ 0x80, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, - 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x60, 0x00, + 0x84, 0xC9, 0XB2, 0xA7, 0XB3, 0x6E, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x20, 0x04, 0x00, 0x03, 0x32, 0x31, 0x35, 0x01, 0x08, 0x82, 0x84, 0x8B, 0x96, 0x0C, @@ -1078,8 +1056,8 @@ static u8 reserved_page_packet_8812[TOTAL_RESERVED_PKT_LEN_8812] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* page 1: ps-poll */ - 0xA4, 0x10, 0x09, 0xC0, 0x84, 0xC9, 0xB2, 0xA7, - 0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, + 0xA4, 0x10, 0x09, 0xC0, 0x84, 0xC9, 0XB2, 0xA7, + 0XB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -1143,9 +1121,9 @@ static u8 reserved_page_packet_8812[TOTAL_RESERVED_PKT_LEN_8812] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* page 2: null data */ - 0x48, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7, - 0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, - 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00, + 0x48, 0x01, 0x00, 0x00, 0x84, 0xC9, 0XB2, 0xA7, + 0XB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, + 0x84, 0xC9, 0XB2, 0xA7, 0XB3, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -1208,9 +1186,9 @@ static u8 reserved_page_packet_8812[TOTAL_RESERVED_PKT_LEN_8812] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* page 3: Qos null data */ - 0xC8, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7, - 0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, - 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00, + 0xC8, 0x01, 0x00, 0x00, 0x84, 0xC9, 0XB2, 0xA7, + 0XB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, + 0x84, 0xC9, 0XB2, 0xA7, 0XB3, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -1273,9 +1251,9 @@ static u8 reserved_page_packet_8812[TOTAL_RESERVED_PKT_LEN_8812] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* page 4: BT Qos null data */ - 0xC8, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7, - 0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, - 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00, + 0xC8, 0x01, 0x00, 0x00, 0x84, 0xC9, 0XB2, 0xA7, + 0XB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, + 0x84, 0xC9, 0XB2, 0xA7, 0XB3, 0x6E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -1339,9 +1317,9 @@ static u8 reserved_page_packet_8812[TOTAL_RESERVED_PKT_LEN_8812] = { 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* page 5~7 is for wowlan */ /* page 5: ARP resp */ - 0x08, 0x01, 0x00, 0x00, 0x84, 0xC9, 0xB2, 0xA7, - 0xB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, - 0x84, 0xC9, 0xB2, 0xA7, 0xB3, 0x6E, 0x00, 0x00, + 0x08, 0x01, 0x00, 0x00, 0x84, 0xC9, 0XB2, 0xA7, + 0XB3, 0x6E, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, + 0x84, 0xC9, 0XB2, 0xA7, 0XB3, 0x6E, 0x00, 0x00, 0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00, 0x08, 0x06, 0x00, 0x01, 0x08, 0x00, 0x06, 0x04, 0x00, 0x02, 0x00, 0xE0, 0x4C, 0x02, 0x51, 0x02, 0x00, 0x00, @@ -1543,8 +1521,8 @@ void rtl8812ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, struct sk_buff *skb = NULL; u32 totalpacketlen; bool rtstatus; - u8 u1RsvdPageLoc[5] = { 0 }; - u8 u1RsvdPageLoc2[7] = { 0 }; + u8 u1rsvdpageloc[5] = { 0 }; + u8 u1rsvdpageloc2[7] = { 0 }; bool b_dlok = false; u8 *beacon; u8 *p_pspoll; @@ -1574,7 +1552,7 @@ void rtl8812ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid); SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr); - SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1RsvdPageLoc, PSPOLL_PG); + SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1rsvdpageloc, PSPOLL_PG); /*-------------------------------------------------------- * (3) null data @@ -1585,7 +1563,7 @@ void rtl8812ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr); SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid); - SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1RsvdPageLoc, NULL_PG); + SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1rsvdpageloc, NULL_PG); /*--------------------------------------------------------- * (4) Qos null data @@ -1596,7 +1574,7 @@ void rtl8812ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, SET_80211_HDR_ADDRESS2(qosnull, mac->mac_addr); SET_80211_HDR_ADDRESS3(qosnull, mac->bssid); - SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(u1RsvdPageLoc, QOSNULL_PG); + SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(u1rsvdpageloc, QOSNULL_PG); /*--------------------------------------------------------- * (5) BT Qos null data @@ -1607,7 +1585,7 @@ void rtl8812ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, SET_80211_HDR_ADDRESS2(btqosnull, mac->mac_addr); SET_80211_HDR_ADDRESS3(btqosnull, mac->bssid); - SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(u1RsvdPageLoc, BT_QOSNULL_PG); + SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(u1rsvdpageloc, BT_QOSNULL_PG); if (!dl_whole_packets) { totalpacketlen = 512 * (BT_QOSNULL_PG + 1) - 40; @@ -1622,20 +1600,20 @@ void rtl8812ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, SET_80211_HDR_ADDRESS2(arpresp, mac->mac_addr); SET_80211_HDR_ADDRESS3(arpresp, mac->bssid); - SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_ARP_RSP(u1RsvdPageLoc2, ARPRESP_PG); + SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_ARP_RSP(u1rsvdpageloc2, ARPRESP_PG); /*--------------------------------------------------------- * (7) Remote Wake Ctrl *---------------------------------------------------------- */ - SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_REMOTE_WAKE_CTRL_INFO(u1RsvdPageLoc2, + SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_REMOTE_WAKE_CTRL_INFO(u1rsvdpageloc2, REMOTE_PG); /*--------------------------------------------------------- * (8) GTK Ext Memory *---------------------------------------------------------- */ - SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_EXT_MEM(u1RsvdPageLoc2, GTKEXT_PG); + SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_EXT_MEM(u1rsvdpageloc2, GTKEXT_PG); totalpacketlen = TOTAL_RESERVED_PKT_LEN_8812 - 40; @@ -1654,14 +1632,14 @@ out: if (!b_dl_finished && b_dlok) { RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, - "H2C_RSVDPAGE:\n", u1RsvdPageLoc, 5); + "H2C_RSVDPAGE:\n", u1rsvdpageloc, 5); rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_RSVDPAGE, - sizeof(u1RsvdPageLoc), u1RsvdPageLoc); + sizeof(u1rsvdpageloc), u1rsvdpageloc); if (dl_whole_packets) { RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, - "wowlan H2C_RSVDPAGE:\n", u1RsvdPageLoc2, 7); + "wowlan H2C_RSVDPAGE:\n", u1rsvdpageloc2, 7); rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_AOAC_RSVDPAGE, - sizeof(u1RsvdPageLoc2), u1RsvdPageLoc2); + sizeof(u1rsvdpageloc2), u1rsvdpageloc2); } } @@ -1678,8 +1656,8 @@ void rtl8821ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, struct sk_buff *skb = NULL; u32 totalpacketlen; bool rtstatus; - u8 u1RsvdPageLoc[5] = { 0 }; - u8 u1RsvdPageLoc2[7] = { 0 }; + u8 u1rsvdpageloc[5] = { 0 }; + u8 u1rsvdpageloc2[7] = { 0 }; bool b_dlok = false; u8 *beacon; u8 *p_pspoll; @@ -1709,7 +1687,7 @@ void rtl8821ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid); SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr); - SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1RsvdPageLoc, PSPOLL_PG); + SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1rsvdpageloc, PSPOLL_PG); /*-------------------------------------------------------- * (3) null data @@ -1720,7 +1698,7 @@ void rtl8821ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr); SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid); - SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1RsvdPageLoc, NULL_PG); + SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1rsvdpageloc, NULL_PG); /*--------------------------------------------------------- * (4) Qos null data @@ -1731,7 +1709,7 @@ void rtl8821ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, SET_80211_HDR_ADDRESS2(qosnull, mac->mac_addr); SET_80211_HDR_ADDRESS3(qosnull, mac->bssid); - SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(u1RsvdPageLoc, QOSNULL_PG); + SET_H2CCMD_RSVDPAGE_LOC_QOS_NULL_DATA(u1rsvdpageloc, QOSNULL_PG); /*--------------------------------------------------------- * (5) Qos null data @@ -1742,7 +1720,7 @@ void rtl8821ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, SET_80211_HDR_ADDRESS2(btqosnull, mac->mac_addr); SET_80211_HDR_ADDRESS3(btqosnull, mac->bssid); - SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(u1RsvdPageLoc, BT_QOSNULL_PG); + SET_H2CCMD_RSVDPAGE_LOC_BT_QOS_NULL_DATA(u1rsvdpageloc, BT_QOSNULL_PG); if (!dl_whole_packets) { totalpacketlen = 256 * (BT_QOSNULL_PG + 1) - 40; @@ -1757,20 +1735,20 @@ void rtl8821ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, SET_80211_HDR_ADDRESS2(arpresp, mac->mac_addr); SET_80211_HDR_ADDRESS3(arpresp, mac->bssid); - SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_ARP_RSP(u1RsvdPageLoc2, ARPRESP_PG); + SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_ARP_RSP(u1rsvdpageloc2, ARPRESP_PG); /*--------------------------------------------------------- * (7) Remote Wake Ctrl *---------------------------------------------------------- */ - SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_REMOTE_WAKE_CTRL_INFO(u1RsvdPageLoc2, + SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_REMOTE_WAKE_CTRL_INFO(u1rsvdpageloc2, REMOTE_PG); /*--------------------------------------------------------- * (8) GTK Ext Memory *---------------------------------------------------------- */ - SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_EXT_MEM(u1RsvdPageLoc2, GTKEXT_PG); + SET_8821AE_H2CCMD_AOAC_RSVDPAGE_LOC_GTK_EXT_MEM(u1rsvdpageloc2, GTKEXT_PG); totalpacketlen = TOTAL_RESERVED_PKT_LEN_8821 - 40; @@ -1792,16 +1770,16 @@ out: RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Set RSVD page location to Fw.\n"); RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, - "H2C_RSVDPAGE:\n", u1RsvdPageLoc, 5); + "H2C_RSVDPAGE:\n", u1rsvdpageloc, 5); rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_RSVDPAGE, - sizeof(u1RsvdPageLoc), u1RsvdPageLoc); + sizeof(u1rsvdpageloc), u1rsvdpageloc); if (dl_whole_packets) { RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, "wowlan H2C_RSVDPAGE:\n", - u1RsvdPageLoc2, 7); + u1rsvdpageloc2, 7); rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_AOAC_RSVDPAGE, - sizeof(u1RsvdPageLoc2), - u1RsvdPageLoc2); + sizeof(u1rsvdpageloc2), + u1rsvdpageloc2); } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h index 99c902ff0b84..e11e496b7277 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h @@ -1,26 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2010 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2010 Realtek Corporation.*/ #ifndef __RTL8821AE__FW__H__ #define __RTL8821AE__FW__H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c index ba258318ee9f..198d419ebb9c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2010 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2010 Realtek Corporation.*/ #include "../wifi.h" #include "../efuse.h" @@ -2606,50 +2584,50 @@ static void _rtl8821ae_read_power_value_fromprom(struct ieee80211_hw *hw, u8 *hwinfo) { struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 rfPath, eeAddr = EEPROM_TX_PWR_INX, group, TxCount = 0; + u32 rfpath, eeaddr = EEPROM_TX_PWR_INX, group, txcount = 0; RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "hal_ReadPowerValueFromPROM8821ae(): hwinfo[0x%x]=0x%x\n", - (eeAddr+1), hwinfo[eeAddr+1]); - if (0xFF == hwinfo[eeAddr+1]) /*YJ,add,120316*/ + (eeaddr + 1), hwinfo[eeaddr + 1]); + if (hwinfo[eeaddr + 1] == 0xFF) /*YJ,add,120316*/ autoload_fail = true; if (autoload_fail) { RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "auto load fail : Use Default value!\n"); - for (rfPath = 0 ; rfPath < MAX_RF_PATH ; rfPath++) { + for (rfpath = 0 ; rfpath < MAX_RF_PATH ; rfpath++) { /*2.4G default value*/ for (group = 0 ; group < MAX_CHNL_GROUP_24G; group++) { - pwrinfo24g->index_cck_base[rfPath][group] = 0x2D; - pwrinfo24g->index_bw40_base[rfPath][group] = 0x2D; + pwrinfo24g->index_cck_base[rfpath][group] = 0x2D; + pwrinfo24g->index_bw40_base[rfpath][group] = 0x2D; } - for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) { - if (TxCount == 0) { - pwrinfo24g->bw20_diff[rfPath][0] = 0x02; - pwrinfo24g->ofdm_diff[rfPath][0] = 0x04; + for (txcount = 0; txcount < MAX_TX_COUNT; txcount++) { + if (txcount == 0) { + pwrinfo24g->bw20_diff[rfpath][0] = 0x02; + pwrinfo24g->ofdm_diff[rfpath][0] = 0x04; } else { - pwrinfo24g->bw20_diff[rfPath][TxCount] = 0xFE; - pwrinfo24g->bw40_diff[rfPath][TxCount] = 0xFE; - pwrinfo24g->cck_diff[rfPath][TxCount] = 0xFE; - pwrinfo24g->ofdm_diff[rfPath][TxCount] = 0xFE; + pwrinfo24g->bw20_diff[rfpath][txcount] = 0xFE; + pwrinfo24g->bw40_diff[rfpath][txcount] = 0xFE; + pwrinfo24g->cck_diff[rfpath][txcount] = 0xFE; + pwrinfo24g->ofdm_diff[rfpath][txcount] = 0xFE; } } /*5G default value*/ for (group = 0 ; group < MAX_CHNL_GROUP_5G; group++) - pwrinfo5g->index_bw40_base[rfPath][group] = 0x2A; - - for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) { - if (TxCount == 0) { - pwrinfo5g->ofdm_diff[rfPath][0] = 0x04; - pwrinfo5g->bw20_diff[rfPath][0] = 0x00; - pwrinfo5g->bw80_diff[rfPath][0] = 0xFE; - pwrinfo5g->bw160_diff[rfPath][0] = 0xFE; + pwrinfo5g->index_bw40_base[rfpath][group] = 0x2A; + + for (txcount = 0; txcount < MAX_TX_COUNT; txcount++) { + if (txcount == 0) { + pwrinfo5g->ofdm_diff[rfpath][0] = 0x04; + pwrinfo5g->bw20_diff[rfpath][0] = 0x00; + pwrinfo5g->bw80_diff[rfpath][0] = 0xFE; + pwrinfo5g->bw160_diff[rfpath][0] = 0xFE; } else { - pwrinfo5g->ofdm_diff[rfPath][0] = 0xFE; - pwrinfo5g->bw20_diff[rfPath][0] = 0xFE; - pwrinfo5g->bw40_diff[rfPath][0] = 0xFE; - pwrinfo5g->bw80_diff[rfPath][0] = 0xFE; - pwrinfo5g->bw160_diff[rfPath][0] = 0xFE; + pwrinfo5g->ofdm_diff[rfpath][0] = 0xFE; + pwrinfo5g->bw20_diff[rfpath][0] = 0xFE; + pwrinfo5g->bw40_diff[rfpath][0] = 0xFE; + pwrinfo5g->bw80_diff[rfpath][0] = 0xFE; + pwrinfo5g->bw160_diff[rfpath][0] = 0xFE; } } } @@ -2658,112 +2636,112 @@ static void _rtl8821ae_read_power_value_fromprom(struct ieee80211_hw *hw, rtl_priv(hw)->efuse.txpwr_fromeprom = true; - for (rfPath = 0 ; rfPath < MAX_RF_PATH ; rfPath++) { + for (rfpath = 0 ; rfpath < MAX_RF_PATH ; rfpath++) { /*2.4G default value*/ for (group = 0 ; group < MAX_CHNL_GROUP_24G; group++) { - pwrinfo24g->index_cck_base[rfPath][group] = hwinfo[eeAddr++]; - if (pwrinfo24g->index_cck_base[rfPath][group] == 0xFF) - pwrinfo24g->index_cck_base[rfPath][group] = 0x2D; + pwrinfo24g->index_cck_base[rfpath][group] = hwinfo[eeaddr++]; + if (pwrinfo24g->index_cck_base[rfpath][group] == 0xFF) + pwrinfo24g->index_cck_base[rfpath][group] = 0x2D; } for (group = 0 ; group < MAX_CHNL_GROUP_24G - 1; group++) { - pwrinfo24g->index_bw40_base[rfPath][group] = hwinfo[eeAddr++]; - if (pwrinfo24g->index_bw40_base[rfPath][group] == 0xFF) - pwrinfo24g->index_bw40_base[rfPath][group] = 0x2D; + pwrinfo24g->index_bw40_base[rfpath][group] = hwinfo[eeaddr++]; + if (pwrinfo24g->index_bw40_base[rfpath][group] == 0xFF) + pwrinfo24g->index_bw40_base[rfpath][group] = 0x2D; } - for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) { - if (TxCount == 0) { - pwrinfo24g->bw40_diff[rfPath][TxCount] = 0; + for (txcount = 0; txcount < MAX_TX_COUNT; txcount++) { + if (txcount == 0) { + pwrinfo24g->bw40_diff[rfpath][txcount] = 0; /*bit sign number to 8 bit sign number*/ - pwrinfo24g->bw20_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0xf0) >> 4; - if (pwrinfo24g->bw20_diff[rfPath][TxCount] & BIT(3)) - pwrinfo24g->bw20_diff[rfPath][TxCount] |= 0xF0; + pwrinfo24g->bw20_diff[rfpath][txcount] = (hwinfo[eeaddr] & 0xf0) >> 4; + if (pwrinfo24g->bw20_diff[rfpath][txcount] & BIT(3)) + pwrinfo24g->bw20_diff[rfpath][txcount] |= 0xF0; /*bit sign number to 8 bit sign number*/ - pwrinfo24g->ofdm_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0x0f); - if (pwrinfo24g->ofdm_diff[rfPath][TxCount] & BIT(3)) - pwrinfo24g->ofdm_diff[rfPath][TxCount] |= 0xF0; + pwrinfo24g->ofdm_diff[rfpath][txcount] = (hwinfo[eeaddr] & 0x0f); + if (pwrinfo24g->ofdm_diff[rfpath][txcount] & BIT(3)) + pwrinfo24g->ofdm_diff[rfpath][txcount] |= 0xF0; - pwrinfo24g->cck_diff[rfPath][TxCount] = 0; - eeAddr++; + pwrinfo24g->cck_diff[rfpath][txcount] = 0; + eeaddr++; } else { - pwrinfo24g->bw40_diff[rfPath][TxCount] = (hwinfo[eeAddr]&0xf0) >> 4; - if (pwrinfo24g->bw40_diff[rfPath][TxCount] & BIT(3)) - pwrinfo24g->bw40_diff[rfPath][TxCount] |= 0xF0; + pwrinfo24g->bw40_diff[rfpath][txcount] = (hwinfo[eeaddr] & 0xf0) >> 4; + if (pwrinfo24g->bw40_diff[rfpath][txcount] & BIT(3)) + pwrinfo24g->bw40_diff[rfpath][txcount] |= 0xF0; - pwrinfo24g->bw20_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0x0f); - if (pwrinfo24g->bw20_diff[rfPath][TxCount] & BIT(3)) - pwrinfo24g->bw20_diff[rfPath][TxCount] |= 0xF0; + pwrinfo24g->bw20_diff[rfpath][txcount] = (hwinfo[eeaddr] & 0x0f); + if (pwrinfo24g->bw20_diff[rfpath][txcount] & BIT(3)) + pwrinfo24g->bw20_diff[rfpath][txcount] |= 0xF0; - eeAddr++; + eeaddr++; - pwrinfo24g->ofdm_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0xf0) >> 4; - if (pwrinfo24g->ofdm_diff[rfPath][TxCount] & BIT(3)) - pwrinfo24g->ofdm_diff[rfPath][TxCount] |= 0xF0; + pwrinfo24g->ofdm_diff[rfpath][txcount] = (hwinfo[eeaddr] & 0xf0) >> 4; + if (pwrinfo24g->ofdm_diff[rfpath][txcount] & BIT(3)) + pwrinfo24g->ofdm_diff[rfpath][txcount] |= 0xF0; - pwrinfo24g->cck_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0x0f); - if (pwrinfo24g->cck_diff[rfPath][TxCount] & BIT(3)) - pwrinfo24g->cck_diff[rfPath][TxCount] |= 0xF0; + pwrinfo24g->cck_diff[rfpath][txcount] = (hwinfo[eeaddr] & 0x0f); + if (pwrinfo24g->cck_diff[rfpath][txcount] & BIT(3)) + pwrinfo24g->cck_diff[rfpath][txcount] |= 0xF0; - eeAddr++; + eeaddr++; } } /*5G default value*/ for (group = 0 ; group < MAX_CHNL_GROUP_5G; group++) { - pwrinfo5g->index_bw40_base[rfPath][group] = hwinfo[eeAddr++]; - if (pwrinfo5g->index_bw40_base[rfPath][group] == 0xFF) - pwrinfo5g->index_bw40_base[rfPath][group] = 0xFE; + pwrinfo5g->index_bw40_base[rfpath][group] = hwinfo[eeaddr++]; + if (pwrinfo5g->index_bw40_base[rfpath][group] == 0xFF) + pwrinfo5g->index_bw40_base[rfpath][group] = 0xFE; } - for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) { - if (TxCount == 0) { - pwrinfo5g->bw40_diff[rfPath][TxCount] = 0; + for (txcount = 0; txcount < MAX_TX_COUNT; txcount++) { + if (txcount == 0) { + pwrinfo5g->bw40_diff[rfpath][txcount] = 0; - pwrinfo5g->bw20_diff[rfPath][0] = (hwinfo[eeAddr] & 0xf0) >> 4; - if (pwrinfo5g->bw20_diff[rfPath][TxCount] & BIT(3)) - pwrinfo5g->bw20_diff[rfPath][TxCount] |= 0xF0; + pwrinfo5g->bw20_diff[rfpath][0] = (hwinfo[eeaddr] & 0xf0) >> 4; + if (pwrinfo5g->bw20_diff[rfpath][txcount] & BIT(3)) + pwrinfo5g->bw20_diff[rfpath][txcount] |= 0xF0; - pwrinfo5g->ofdm_diff[rfPath][0] = (hwinfo[eeAddr] & 0x0f); - if (pwrinfo5g->ofdm_diff[rfPath][TxCount] & BIT(3)) - pwrinfo5g->ofdm_diff[rfPath][TxCount] |= 0xF0; + pwrinfo5g->ofdm_diff[rfpath][0] = (hwinfo[eeaddr] & 0x0f); + if (pwrinfo5g->ofdm_diff[rfpath][txcount] & BIT(3)) + pwrinfo5g->ofdm_diff[rfpath][txcount] |= 0xF0; - eeAddr++; + eeaddr++; } else { - pwrinfo5g->bw40_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0xf0) >> 4; - if (pwrinfo5g->bw40_diff[rfPath][TxCount] & BIT(3)) - pwrinfo5g->bw40_diff[rfPath][TxCount] |= 0xF0; + pwrinfo5g->bw40_diff[rfpath][txcount] = (hwinfo[eeaddr] & 0xf0) >> 4; + if (pwrinfo5g->bw40_diff[rfpath][txcount] & BIT(3)) + pwrinfo5g->bw40_diff[rfpath][txcount] |= 0xF0; - pwrinfo5g->bw20_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0x0f); - if (pwrinfo5g->bw20_diff[rfPath][TxCount] & BIT(3)) - pwrinfo5g->bw20_diff[rfPath][TxCount] |= 0xF0; + pwrinfo5g->bw20_diff[rfpath][txcount] = (hwinfo[eeaddr] & 0x0f); + if (pwrinfo5g->bw20_diff[rfpath][txcount] & BIT(3)) + pwrinfo5g->bw20_diff[rfpath][txcount] |= 0xF0; - eeAddr++; + eeaddr++; } } - pwrinfo5g->ofdm_diff[rfPath][1] = (hwinfo[eeAddr] & 0xf0) >> 4; - pwrinfo5g->ofdm_diff[rfPath][2] = (hwinfo[eeAddr] & 0x0f); + pwrinfo5g->ofdm_diff[rfpath][1] = (hwinfo[eeaddr] & 0xf0) >> 4; + pwrinfo5g->ofdm_diff[rfpath][2] = (hwinfo[eeaddr] & 0x0f); - eeAddr++; + eeaddr++; - pwrinfo5g->ofdm_diff[rfPath][3] = (hwinfo[eeAddr] & 0x0f); + pwrinfo5g->ofdm_diff[rfpath][3] = (hwinfo[eeaddr] & 0x0f); - eeAddr++; + eeaddr++; - for (TxCount = 1; TxCount < MAX_TX_COUNT; TxCount++) { - if (pwrinfo5g->ofdm_diff[rfPath][TxCount] & BIT(3)) - pwrinfo5g->ofdm_diff[rfPath][TxCount] |= 0xF0; + for (txcount = 1; txcount < MAX_TX_COUNT; txcount++) { + if (pwrinfo5g->ofdm_diff[rfpath][txcount] & BIT(3)) + pwrinfo5g->ofdm_diff[rfpath][txcount] |= 0xF0; } - for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) { - pwrinfo5g->bw80_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0xf0) >> 4; + for (txcount = 0; txcount < MAX_TX_COUNT; txcount++) { + pwrinfo5g->bw80_diff[rfpath][txcount] = (hwinfo[eeaddr] & 0xf0) >> 4; /* 4bit sign number to 8 bit sign number */ - if (pwrinfo5g->bw80_diff[rfPath][TxCount] & BIT(3)) - pwrinfo5g->bw80_diff[rfPath][TxCount] |= 0xF0; + if (pwrinfo5g->bw80_diff[rfpath][txcount] & BIT(3)) + pwrinfo5g->bw80_diff[rfpath][txcount] |= 0xF0; /* 4bit sign number to 8 bit sign number */ - pwrinfo5g->bw160_diff[rfPath][TxCount] = (hwinfo[eeAddr] & 0x0f); - if (pwrinfo5g->bw160_diff[rfPath][TxCount] & BIT(3)) - pwrinfo5g->bw160_diff[rfPath][TxCount] |= 0xF0; + pwrinfo5g->bw160_diff[rfpath][txcount] = (hwinfo[eeaddr] & 0x0f); + if (pwrinfo5g->bw160_diff[rfpath][txcount] & BIT(3)) + pwrinfo5g->bw160_diff[rfpath][txcount] |= 0xF0; - eeAddr++; + eeaddr++; } } } @@ -2930,8 +2908,8 @@ static void _rtl8812ae_read_pa_type(struct ieee80211_hw *hw, u8 *hwinfo, struct rtl_hal *rtlhal = rtl_hal(rtlpriv); if (!autoload_fail) { - rtlhal->pa_type_2g = hwinfo[0xBC]; - rtlhal->lna_type_2g = hwinfo[0xBD]; + rtlhal->pa_type_2g = hwinfo[0XBC]; + rtlhal->lna_type_2g = hwinfo[0XBD]; if (rtlhal->pa_type_2g == 0xFF && rtlhal->lna_type_2g == 0xFF) { rtlhal->pa_type_2g = 0; rtlhal->lna_type_2g = 0; @@ -2943,8 +2921,8 @@ static void _rtl8812ae_read_pa_type(struct ieee80211_hw *hw, u8 *hwinfo, (rtlhal->lna_type_2g & BIT(3))) ? 1 : 0; - rtlhal->pa_type_5g = hwinfo[0xBC]; - rtlhal->lna_type_5g = hwinfo[0xBF]; + rtlhal->pa_type_5g = hwinfo[0XBC]; + rtlhal->lna_type_5g = hwinfo[0XBF]; if (rtlhal->pa_type_5g == 0xFF && rtlhal->lna_type_5g == 0xFF) { rtlhal->pa_type_5g = 0; rtlhal->lna_type_5g = 0; @@ -2969,18 +2947,18 @@ static void _rtl8812ae_read_amplifier_type(struct ieee80211_hw *hw, u8 *hwinfo, struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtlpriv); - u8 ext_type_pa_2g_a = (hwinfo[0xBD] & BIT(2)) >> 2; /* 0xBD[2] */ - u8 ext_type_pa_2g_b = (hwinfo[0xBD] & BIT(6)) >> 6; /* 0xBD[6] */ - u8 ext_type_pa_5g_a = (hwinfo[0xBF] & BIT(2)) >> 2; /* 0xBF[2] */ - u8 ext_type_pa_5g_b = (hwinfo[0xBF] & BIT(6)) >> 6; /* 0xBF[6] */ - /* 0xBD[1:0] */ - u8 ext_type_lna_2g_a = (hwinfo[0xBD] & (BIT(1) | BIT(0))) >> 0; - /* 0xBD[5:4] */ - u8 ext_type_lna_2g_b = (hwinfo[0xBD] & (BIT(5) | BIT(4))) >> 4; - /* 0xBF[1:0] */ - u8 ext_type_lna_5g_a = (hwinfo[0xBF] & (BIT(1) | BIT(0))) >> 0; - /* 0xBF[5:4] */ - u8 ext_type_lna_5g_b = (hwinfo[0xBF] & (BIT(5) | BIT(4))) >> 4; + u8 ext_type_pa_2g_a = (hwinfo[0XBD] & BIT(2)) >> 2; /* 0XBD[2] */ + u8 ext_type_pa_2g_b = (hwinfo[0XBD] & BIT(6)) >> 6; /* 0XBD[6] */ + u8 ext_type_pa_5g_a = (hwinfo[0XBF] & BIT(2)) >> 2; /* 0XBF[2] */ + u8 ext_type_pa_5g_b = (hwinfo[0XBF] & BIT(6)) >> 6; /* 0XBF[6] */ + /* 0XBD[1:0] */ + u8 ext_type_lna_2g_a = (hwinfo[0XBD] & (BIT(1) | BIT(0))) >> 0; + /* 0XBD[5:4] */ + u8 ext_type_lna_2g_b = (hwinfo[0XBD] & (BIT(5) | BIT(4))) >> 4; + /* 0XBF[1:0] */ + u8 ext_type_lna_5g_a = (hwinfo[0XBF] & (BIT(1) | BIT(0))) >> 0; + /* 0XBF[5:4] */ + u8 ext_type_lna_5g_b = (hwinfo[0XBF] & (BIT(5) | BIT(4))) >> 4; _rtl8812ae_read_pa_type(hw, hwinfo, autoload_fail); @@ -3008,8 +2986,8 @@ static void _rtl8821ae_read_pa_type(struct ieee80211_hw *hw, u8 *hwinfo, struct rtl_hal *rtlhal = rtl_hal(rtlpriv); if (!autoload_fail) { - rtlhal->pa_type_2g = hwinfo[0xBC]; - rtlhal->lna_type_2g = hwinfo[0xBD]; + rtlhal->pa_type_2g = hwinfo[0XBC]; + rtlhal->lna_type_2g = hwinfo[0XBD]; if (rtlhal->pa_type_2g == 0xFF && rtlhal->lna_type_2g == 0xFF) { rtlhal->pa_type_2g = 0; rtlhal->lna_type_2g = 0; @@ -3017,8 +2995,8 @@ static void _rtl8821ae_read_pa_type(struct ieee80211_hw *hw, u8 *hwinfo, rtlhal->external_pa_2g = (rtlhal->pa_type_2g & BIT(5)) ? 1 : 0; rtlhal->external_lna_2g = (rtlhal->lna_type_2g & BIT(7)) ? 1 : 0; - rtlhal->pa_type_5g = hwinfo[0xBC]; - rtlhal->lna_type_5g = hwinfo[0xBF]; + rtlhal->pa_type_5g = hwinfo[0XBC]; + rtlhal->lna_type_5g = hwinfo[0XBF]; if (rtlhal->pa_type_5g == 0xFF && rtlhal->lna_type_5g == 0xFF) { rtlhal->pa_type_5g = 0; rtlhal->lna_type_5g = 0; @@ -4033,10 +4011,10 @@ void rtl8821ae_add_wowlan_pattern(struct ieee80211_hw *hw, rtl_write_byte(rtlpriv, REG_PKT_BUFF_ACCESS_CTRL, RXPKT_BUF_SELECT); for (addr = 0; addr < WKFMCAM_ADDR_NUM; addr++) { /* Set Rx packet buffer offset. - * RxBufer pointer increases 1, + * RXBufer pointer increases 1, * we can access 8 bytes in Rx packet buffer. * CAM start offset (unit: 1 byte) = index*WKFMCAM_SIZE - * RxBufer addr = (CAM start offset + + * RXBufer addr = (CAM start offset + * per entry offset of a WKFM CAM)/8 * * index: The index of the wake up frame mask * * WKFMCAM_SIZE: the total size of one WKFM CAM diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h index e2ab783a2ad9..fb0fb3a501d9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2010 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2010 Realtek Corporation.*/ #ifndef __RTL8821AE_HW_H__ #define __RTL8821AE_HW_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c index 405c7541b386..dd7553e80ab1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2010 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2010 Realtek Corporation.*/ #include "../wifi.h" #include "../pci.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.h index 038e64e18ae8..249a37a8d9db 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2010 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2010 Realtek Corporation.*/ #ifndef __RTL8821AE_LED_H__ #define __RTL8821AE_LED_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c index a75451c246fd..408af144098e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2010 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2010 Realtek Corporation.*/ #include "../wifi.h" #include "../pci.h" @@ -475,7 +453,7 @@ u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8 band, const s8 auto_temp = -1; RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD, - "===> PHY_GetTxBBSwing_8812A, bbSwing_2G: %d, bbSwing_5G: %d,autoload_failflag=%d.\n", + "===> PHY_GetTXBBSwing_8812A, bbSwing_2G: %d, bbSwing_5G: %d,autoload_failflag=%d.\n", (int)swing_2g, (int)swing_5g, (int)rtlefuse->autoload_failflag); @@ -556,7 +534,7 @@ u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8 band, swing_a = (swing & 0x3) >> 0; /* 0xC6/C7[1:0] */ swing_b = (swing & 0xC) >> 2; /* 0xC6/C7[3:2] */ RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD, - "===> PHY_GetTxBBSwing_8812A, swingA: 0x%X, swingB: 0x%X\n", + "===> PHY_GetTXBBSwing_8812A, swingA: 0x%X, swingB: 0x%X\n", swing_a, swing_b); /* 3 Path-A */ @@ -614,7 +592,7 @@ u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8 band, } RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD, - "<=== PHY_GetTxBBSwing_8812A, out = 0x%X\n", out); + "<=== PHY_GetTXBBSwing_8812A, out = 0x%X\n", out); return out; } @@ -1078,52 +1056,52 @@ static void _rtl8821ae_phy_store_txpower_by_rate_base(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &rtlpriv->phy; - u16 rawValue = 0; + u16 rawvalue = 0; u8 base = 0, path = 0; for (path = RF90_PATH_A; path <= RF90_PATH_B; ++path) { - rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][0] >> 24) & 0xFF; - base = (rawValue >> 4) * 10 + (rawValue & 0xF); + rawvalue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][0] >> 24) & 0xFF; + base = (rawvalue >> 4) * 10 + (rawvalue & 0xF); _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, CCK, RF_1TX, base); - rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][2] >> 24) & 0xFF; - base = (rawValue >> 4) * 10 + (rawValue & 0xF); + rawvalue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][2] >> 24) & 0xFF; + base = (rawvalue >> 4) * 10 + (rawvalue & 0xF); _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, OFDM, RF_1TX, base); - rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][4] >> 24) & 0xFF; - base = (rawValue >> 4) * 10 + (rawValue & 0xF); + rawvalue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][4] >> 24) & 0xFF; + base = (rawvalue >> 4) * 10 + (rawvalue & 0xF); _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, HT_MCS0_MCS7, RF_1TX, base); - rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_2TX][6] >> 24) & 0xFF; - base = (rawValue >> 4) * 10 + (rawValue & 0xF); + rawvalue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_2TX][6] >> 24) & 0xFF; + base = (rawvalue >> 4) * 10 + (rawvalue & 0xF); _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, HT_MCS8_MCS15, RF_2TX, base); - rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][8] >> 24) & 0xFF; - base = (rawValue >> 4) * 10 + (rawValue & 0xF); + rawvalue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_1TX][8] >> 24) & 0xFF; + base = (rawvalue >> 4) * 10 + (rawvalue & 0xF); _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, VHT_1SSMCS0_1SSMCS9, RF_1TX, base); - rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_2TX][11] >> 8) & 0xFF; - base = (rawValue >> 4) * 10 + (rawValue & 0xF); + rawvalue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][path][RF_2TX][11] >> 8) & 0xFF; + base = (rawvalue >> 4) * 10 + (rawvalue & 0xF); _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_2_4G, path, VHT_2SSMCS0_2SSMCS9, RF_2TX, base); - rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_1TX][2] >> 24) & 0xFF; - base = (rawValue >> 4) * 10 + (rawValue & 0xF); + rawvalue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_1TX][2] >> 24) & 0xFF; + base = (rawvalue >> 4) * 10 + (rawvalue & 0xF); _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, OFDM, RF_1TX, base); - rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_1TX][4] >> 24) & 0xFF; - base = (rawValue >> 4) * 10 + (rawValue & 0xF); + rawvalue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_1TX][4] >> 24) & 0xFF; + base = (rawvalue >> 4) * 10 + (rawvalue & 0xF); _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, HT_MCS0_MCS7, RF_1TX, base); - rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_2TX][6] >> 24) & 0xFF; - base = (rawValue >> 4) * 10 + (rawValue & 0xF); + rawvalue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_2TX][6] >> 24) & 0xFF; + base = (rawvalue >> 4) * 10 + (rawvalue & 0xF); _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, HT_MCS8_MCS15, RF_2TX, base); - rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_1TX][8] >> 24) & 0xFF; - base = (rawValue >> 4) * 10 + (rawValue & 0xF); + rawvalue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_1TX][8] >> 24) & 0xFF; + base = (rawvalue >> 4) * 10 + (rawvalue & 0xF); _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, VHT_1SSMCS0_1SSMCS9, RF_1TX, base); - rawValue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_2TX][11] >> 8) & 0xFF; - base = (rawValue >> 4) * 10 + (rawValue & 0xF); + rawvalue = (u16)(rtlphy->tx_power_by_rate_offset[BAND_ON_5G][path][RF_2TX][11] >> 8) & 0xFF; + base = (rawvalue >> 4) * 10 + (rawvalue & 0xF); _rtl8821ae_phy_set_txpower_by_rate_base(hw, BAND_ON_5G, path, VHT_2SSMCS0_2SSMCS9, RF_2TX, base); } } @@ -1380,7 +1358,7 @@ static void _rtl8812ae_phy_convert_txpower_limit_to_power_index(struct ieee80211 } RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "TxPwrLimit_2_4G[regulation %d][bw %d][rateSection %d][channel %d] = %d\n(TxPwrLimit in dBm %d - BW40PwrLmt2_4G[channel %d][rfPath %d] %d)\n", + "TxPwrLimit_2_4G[regulation %d][bw %d][rateSection %d][channel %d] = %d\n(TxPwrLimit in dBm %d - BW40PwrLmt2_4G[channel %d][rfpath %d] %d)\n", regulation, bw, rate_section, channel, rtlphy->txpwr_limit_2_4g[regulation][bw] [rate_section][channel][rf_path], (temp_pwrlmt == 63) @@ -1445,7 +1423,7 @@ static void _rtl8812ae_phy_convert_txpower_limit_to_power_index(struct ieee80211 } RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "TxPwrLimit_5G[regulation %d][bw %d][rateSection %d][channel %d] =%d\n(TxPwrLimit in dBm %d - BW40PwrLmt5G[chnl group %d][rfPath %d] %d)\n", + "TxPwrLimit_5G[regulation %d][bw %d][rateSection %d][channel %d] =%d\n(TxPwrLimit in dBm %d - BW40PwrLmt5G[chnl group %d][rfpath %d] %d)\n", regulation, bw, rate_section, channel, rtlphy->txpwr_limit_5g[regulation] [bw][rate_section][channel][rf_path], @@ -1495,106 +1473,106 @@ static void _rtl8821ae_phy_convert_txpower_dbm_to_relative_value(struct ieee8021 { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &rtlpriv->phy; - u8 base = 0, rfPath = 0; + u8 base = 0, rfpath = 0; - for (rfPath = RF90_PATH_A; rfPath <= RF90_PATH_B; ++rfPath) { - base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_1TX, CCK); + for (rfpath = RF90_PATH_A; rfpath <= RF90_PATH_B; ++rfpath) { + base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfpath, RF_1TX, CCK); _phy_convert_txpower_dbm_to_relative_value( - &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][0], + &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfpath][RF_1TX][0], 0, 3, base); - base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_1TX, OFDM); + base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfpath, RF_1TX, OFDM); _phy_convert_txpower_dbm_to_relative_value( - &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][1], + &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfpath][RF_1TX][1], 0, 3, base); _phy_convert_txpower_dbm_to_relative_value( - &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][2], + &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfpath][RF_1TX][2], 0, 3, base); - base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_1TX, HT_MCS0_MCS7); + base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfpath, RF_1TX, HT_MCS0_MCS7); _phy_convert_txpower_dbm_to_relative_value( - &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][3], + &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfpath][RF_1TX][3], 0, 3, base); _phy_convert_txpower_dbm_to_relative_value( - &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][4], + &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfpath][RF_1TX][4], 0, 3, base); - base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_2TX, HT_MCS8_MCS15); + base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfpath, RF_2TX, HT_MCS8_MCS15); _phy_convert_txpower_dbm_to_relative_value( - &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_2TX][5], + &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfpath][RF_2TX][5], 0, 3, base); _phy_convert_txpower_dbm_to_relative_value( - &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_2TX][6], + &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfpath][RF_2TX][6], 0, 3, base); - base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_1TX, VHT_1SSMCS0_1SSMCS9); + base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfpath, RF_1TX, VHT_1SSMCS0_1SSMCS9); _phy_convert_txpower_dbm_to_relative_value( - &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][7], + &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfpath][RF_1TX][7], 0, 3, base); _phy_convert_txpower_dbm_to_relative_value( - &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][8], + &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfpath][RF_1TX][8], 0, 3, base); _phy_convert_txpower_dbm_to_relative_value( - &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][9], + &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfpath][RF_1TX][9], 0, 1, base); - base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfPath, RF_2TX, VHT_2SSMCS0_2SSMCS9); + base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_2_4G, rfpath, RF_2TX, VHT_2SSMCS0_2SSMCS9); _phy_convert_txpower_dbm_to_relative_value( - &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_1TX][9], + &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfpath][RF_1TX][9], 2, 3, base); _phy_convert_txpower_dbm_to_relative_value( - &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_2TX][10], + &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfpath][RF_2TX][10], 0, 3, base); _phy_convert_txpower_dbm_to_relative_value( - &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfPath][RF_2TX][11], + &rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfpath][RF_2TX][11], 0, 3, base); - base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfPath, RF_1TX, OFDM); + base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfpath, RF_1TX, OFDM); _phy_convert_txpower_dbm_to_relative_value( - &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][1], + &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfpath][RF_1TX][1], 0, 3, base); _phy_convert_txpower_dbm_to_relative_value( - &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][2], + &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfpath][RF_1TX][2], 0, 3, base); - base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfPath, RF_1TX, HT_MCS0_MCS7); + base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfpath, RF_1TX, HT_MCS0_MCS7); _phy_convert_txpower_dbm_to_relative_value( - &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][3], + &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfpath][RF_1TX][3], 0, 3, base); _phy_convert_txpower_dbm_to_relative_value( - &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][4], + &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfpath][RF_1TX][4], 0, 3, base); - base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfPath, RF_2TX, HT_MCS8_MCS15); + base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfpath, RF_2TX, HT_MCS8_MCS15); _phy_convert_txpower_dbm_to_relative_value( - &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_2TX][5], + &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfpath][RF_2TX][5], 0, 3, base); _phy_convert_txpower_dbm_to_relative_value( - &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_2TX][6], + &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfpath][RF_2TX][6], 0, 3, base); - base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfPath, RF_1TX, VHT_1SSMCS0_1SSMCS9); + base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfpath, RF_1TX, VHT_1SSMCS0_1SSMCS9); _phy_convert_txpower_dbm_to_relative_value( - &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][7], + &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfpath][RF_1TX][7], 0, 3, base); _phy_convert_txpower_dbm_to_relative_value( - &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][8], + &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfpath][RF_1TX][8], 0, 3, base); _phy_convert_txpower_dbm_to_relative_value( - &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][9], + &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfpath][RF_1TX][9], 0, 1, base); - base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfPath, RF_2TX, VHT_2SSMCS0_2SSMCS9); + base = _rtl8821ae_phy_get_txpower_by_rate_base(hw, BAND_ON_5G, rfpath, RF_2TX, VHT_2SSMCS0_2SSMCS9); _phy_convert_txpower_dbm_to_relative_value( - &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_1TX][9], + &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfpath][RF_1TX][9], 2, 3, base); _phy_convert_txpower_dbm_to_relative_value( - &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_2TX][10], + &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfpath][RF_2TX][10], 0, 3, base); _phy_convert_txpower_dbm_to_relative_value( - &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfPath][RF_2TX][11], + &rtlphy->tx_power_by_rate_offset[BAND_ON_5G][rfpath][RF_2TX][11], 0, 3, base); } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h index 1285e1adfe9d..35b7d0f70125 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2010 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2010 Realtek Corporation.*/ #ifndef __RTL8821AE_PHY_H__ #define __RTL8821AE_PHY_H__ @@ -59,9 +37,9 @@ #define LOOP_LIMIT 5 #define MAX_STALL_TIME 50 -#define AntennaDiversityValue 0x80 +#define ANTENNADIVERSITYVALUE 0x80 #define MAX_TXPWR_IDX_NMODE_92S 63 -#define Reset_Cnt_Limit 3 +#define RESET_CNT_LIMIT 3 #define IQK_ADDA_REG_NUM 16 #define IQK_MAC_REG_NUM 4 diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.c index 9ddf78a187dd..1e7b3c770ac2 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2010 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2010 Realtek Corporation.*/ #include "../pwrseqcmd.h" #include "pwrseq.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.h index 6dd575435c63..d6f3cbab4abc 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2010 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2010 Realtek Corporation.*/ #ifndef __RTL8821AE_PWRSEQ_H__ #define __RTL8821AE_PWRSEQ_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h index db8bc8a2de61..7d833b72c7ed 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2010 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2010 Realtek Corporation.*/ #ifndef __RTL8821AE_REG_H__ #define __RTL8821AE_REG_H__ @@ -696,7 +674,7 @@ #define EEPROM_CHANNEL_PLAN_TELEC 0x8 #define EEPROM_CHANNEL_PLAN_GLOBAL_DOMAIN 0x9 #define EEPROM_CHANNEL_PLAN_WORLD_WIDE_13 0xA -#define EEPROM_CHANNEL_PLAN_NCC 0xB +#define EEPROM_CHANNEL_PLAN_NCC 0XB #define EEPROM_CHANNEL_PLAN_BY_HW_MASK 0x80 #define EEPROM_CID_DEFAULT 0x0 @@ -718,10 +696,10 @@ #define EEPROM_TX_PWR_INX 0x10 -#define EEPROM_CHANNELPLAN 0xB8 -#define EEPROM_XTAL_8821AE 0xB9 -#define EEPROM_THERMAL_METER 0xBA -#define EEPROM_IQK_LCK_88E 0xBB +#define EEPROM_CHANNELPLAN 0XB8 +#define EEPROM_XTAL_8821AE 0XB9 +#define EEPROM_THERMAL_METER 0XBA +#define EEPROM_IQK_LCK_88E 0XBB #define EEPROM_RF_BOARD_OPTION 0xC1 #define EEPROM_RF_FEATURE_OPTION_88E 0xC2 @@ -1015,7 +993,7 @@ #define _LBMODE(x) (((x) & 0xF) << 24) #define MASK_LBMODE 0xF000000 #define LOOPBACK_NORMAL 0x0 -#define LOOPBACK_IMMEDIATELY 0xB +#define LOOPBACK_IMMEDIATELY 0XB #define LOOPBACK_MAC_DELAY 0x3 #define LOOPBACK_PHY 0x1 #define LOOPBACK_DMA 0x7 @@ -1430,7 +1408,7 @@ #define RCCK0_FACOUNTERUPPER 0xa58 #define RCCK0_CCA_CNT 0xa60 -/* PageB(0xB00) */ +/* PageB(0XB00) */ #define RPDP_ANTA 0xb00 #define RPDP_ANTA_4 0xb04 #define RPDP_ANTA_8 0xb08 @@ -1477,16 +1455,16 @@ #define RPM_RX3_ANTB 0xbf8 /*RSSI Dump*/ -#define RA_RSSI_DUMP 0xBF0 -#define RB_RSSI_DUMP 0xBF1 -#define RS1_RX_EVM_DUMP 0xBF4 -#define RS2_RX_EVM_DUMP 0xBF5 -#define RA_RX_SNR_DUMP 0xBF6 -#define RB_RX_SNR_DUMP 0xBF7 -#define RA_CFO_SHORT_DUMP 0xBF8 -#define RB_CFO_SHORT_DUMP 0xBFA -#define RA_CFO_LONG_DUMP 0xBEC -#define RB_CFO_LONG_DUMP 0xBEE +#define RA_RSSI_DUMP 0XBF0 +#define RB_RSSI_DUMP 0XBF1 +#define RS1_RX_EVM_DUMP 0XBF4 +#define RS2_RX_EVM_DUMP 0XBF5 +#define RA_RX_SNR_DUMP 0XBF6 +#define RB_RX_SNR_DUMP 0XBF7 +#define RA_CFO_SHORT_DUMP 0XBF8 +#define RB_CFO_SHORT_DUMP 0XBFA +#define RA_CFO_LONG_DUMP 0XBEC +#define RB_CFO_LONG_DUMP 0XBEE /*Page C*/ #define ROFDM0_LSTF 0xc00 diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c index 95489f41f8a0..a6e56872e063 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2010 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2010 Realtek Corporation.*/ #include "../wifi.h" #include "reg.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.h index efd22bd0b139..6e3c8bfb2048 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2010 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2010 Realtek Corporation.*/ #ifndef __RTL8821AE_RF_H__ #define __RTL8821AE_RF_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c index 77f6401021c9..eec7c4ecf3ad 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2010 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2010 Realtek Corporation.*/ #include "../wifi.h" #include "../core.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.h index d001e7ce3052..9d7610f84b20 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2010 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2010 Realtek Corporation.*/ #ifndef __RTL8821AE_SW_H__ #define __RTL8821AE_SW_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c index f87f9d03b9fa..85093b3e5373 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c @@ -1,29 +1,6 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2010 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Created on 2010/ 5/18, 1:41 - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2010 Realtek Corporation.*/ + #include #include "table.h" u32 RTL8812AE_PHY_REG_ARRAY[] = { @@ -134,30 +111,30 @@ u32 RTL8812AE_PHY_REG_ARRAY[] = { 0xA7C, 0x225B0606, 0xA80, 0x218075B2, 0xA84, 0x001F8C80, - 0xB00, 0x03100000, - 0xB04, 0x0000B000, - 0xB08, 0xAE0201EB, - 0xB0C, 0x01003207, - 0xB10, 0x00009807, - 0xB14, 0x01000000, - 0xB18, 0x00000002, - 0xB1C, 0x00000002, - 0xB20, 0x0000001F, - 0xB24, 0x03020100, - 0xB28, 0x07060504, - 0xB2C, 0x0B0A0908, - 0xB30, 0x0F0E0D0C, - 0xB34, 0x13121110, - 0xB38, 0x17161514, - 0xB3C, 0x0000003A, - 0xB40, 0x00000000, - 0xB44, 0x00000000, - 0xB48, 0x13000032, - 0xB4C, 0x48080000, - 0xB50, 0x00000000, - 0xB54, 0x00000000, - 0xB58, 0x00000000, - 0xB5C, 0x00000000, + 0XB00, 0x03100000, + 0XB04, 0x0000B000, + 0XB08, 0xAE0201EB, + 0XB0C, 0x01003207, + 0XB10, 0x00009807, + 0XB14, 0x01000000, + 0XB18, 0x00000002, + 0XB1C, 0x00000002, + 0XB20, 0x0000001F, + 0XB24, 0x03020100, + 0XB28, 0x07060504, + 0XB2C, 0x0B0A0908, + 0XB30, 0x0F0E0D0C, + 0XB34, 0x13121110, + 0XB38, 0x17161514, + 0XB3C, 0x0000003A, + 0XB40, 0x00000000, + 0XB44, 0x00000000, + 0XB48, 0x13000032, + 0XB4C, 0x48080000, + 0XB50, 0x00000000, + 0XB54, 0x00000000, + 0XB58, 0x00000000, + 0XB5C, 0x00000000, 0xC00, 0x00000007, 0xC04, 0x00042020, 0xC08, 0x80410231, @@ -197,7 +174,7 @@ u32 RTL8812AE_PHY_REG_ARRAY[] = { 0xC68, 0x59791979, 0xA0000000, 0x00000000, 0xC68, 0x59799979, - 0xB0000000, 0x00000000, + 0XB0000000, 0x00000000, 0xC6C, 0x59795979, 0xC70, 0x19795979, 0xC74, 0x19795979, @@ -367,30 +344,30 @@ u32 RTL8821AE_PHY_REG_ARRAY[] = { 0xA7C, 0x225B0606, 0xA80, 0x21805490, 0xA84, 0x001F0000, - 0xB00, 0x03100040, - 0xB04, 0x0000B000, - 0xB08, 0xAE0201EB, - 0xB0C, 0x01003207, - 0xB10, 0x00009807, - 0xB14, 0x01000000, - 0xB18, 0x00000002, - 0xB1C, 0x00000002, - 0xB20, 0x0000001F, - 0xB24, 0x03020100, - 0xB28, 0x07060504, - 0xB2C, 0x0B0A0908, - 0xB30, 0x0F0E0D0C, - 0xB34, 0x13121110, - 0xB38, 0x17161514, - 0xB3C, 0x0000003A, - 0xB40, 0x00000000, - 0xB44, 0x00000000, - 0xB48, 0x13000032, - 0xB4C, 0x48080000, - 0xB50, 0x00000000, - 0xB54, 0x00000000, - 0xB58, 0x00000000, - 0xB5C, 0x00000000, + 0XB00, 0x03100040, + 0XB04, 0x0000B000, + 0XB08, 0xAE0201EB, + 0XB0C, 0x01003207, + 0XB10, 0x00009807, + 0XB14, 0x01000000, + 0XB18, 0x00000002, + 0XB1C, 0x00000002, + 0XB20, 0x0000001F, + 0XB24, 0x03020100, + 0XB28, 0x07060504, + 0XB2C, 0x0B0A0908, + 0XB30, 0x0F0E0D0C, + 0XB34, 0x13121110, + 0XB38, 0x17161514, + 0XB3C, 0x0000003A, + 0XB40, 0x00000000, + 0XB44, 0x00000000, + 0XB48, 0x13000032, + 0XB4C, 0x48080000, + 0XB50, 0x00000000, + 0XB54, 0x00000000, + 0XB58, 0x00000000, + 0XB5C, 0x00000000, 0xC00, 0x00000007, 0xC04, 0x00042020, 0xC08, 0x80410231, @@ -521,12 +498,12 @@ u32 RTL8812AE_RADIOA_ARRAY[] = { 0x086, 0x00014B3A, 0xA0000000, 0x00000000, 0x086, 0x00014B38, - 0xB0000000, 0x00000000, + 0XB0000000, 0x00000000, 0x80000004, 0x00000000, 0x40000000, 0x00000000, 0x08B, 0x00080180, 0xA0000000, 0x00000000, 0x08B, 0x00087180, - 0xB0000000, 0x00000000, + 0XB0000000, 0x00000000, 0x0B1, 0x0001FC1A, 0x0B3, 0x000F0810, 0x0B4, 0x0001A78D, @@ -557,7 +534,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = { 0x03B, 0x00018248, 0x03B, 0x00010240, 0x03B, 0x00008240, - 0xB0000000, 0x00000000, + 0XB0000000, 0x00000000, 0x0EF, 0x00000100, 0x80000002, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x0000A4EE, @@ -583,7 +560,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = { 0x034, 0x000024E7, 0x034, 0x0000146B, 0x034, 0x0000006D, - 0xB0000000, 0x00000000, + 0XB0000000, 0x00000000, 0x0EF, 0x00000000, 0x0EF, 0x000020A2, 0x0DF, 0x00000080, @@ -712,7 +689,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = { 0x034, 0x000428C5, 0x034, 0x000418C2, 0x034, 0x000408C0, - 0xB0000000, 0x00000000, + 0XB0000000, 0x00000000, 0x80000008, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x0002A0B2, 0x034, 0x000290AF, @@ -749,7 +726,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = { 0x034, 0x000228C5, 0x034, 0x000218C2, 0x034, 0x000208C0, - 0xB0000000, 0x00000000, + 0XB0000000, 0x00000000, 0x80000008, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x0000A0B2, 0x034, 0x000090AF, @@ -786,7 +763,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = { 0x034, 0x000028C9, 0x034, 0x000018C6, 0x034, 0x000008C3, - 0xB0000000, 0x00000000, + 0XB0000000, 0x00000000, 0x0EF, 0x00000000, 0x80000008, 0x00000000, 0x40000000, 0x00000000, 0x018, 0x0001712A, @@ -824,7 +801,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = { 0x035, 0x000401D8, 0x035, 0x000481D8, 0x035, 0x000501D8, - 0xB0000000, 0x00000000, + 0XB0000000, 0x00000000, 0x0EF, 0x00000000, 0x80000008, 0x00000000, 0x40000000, 0x00000000, 0x018, 0x0001712A, @@ -871,7 +848,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = { 0x036, 0x000CCC35, 0x036, 0x000D4C35, 0x036, 0x000DCC35, - 0xB0000000, 0x00000000, + 0XB0000000, 0x00000000, 0x0EF, 0x00000000, 0x0EF, 0x00000008, 0x80000008, 0x00000000, 0x40000000, 0x00000000, @@ -886,7 +863,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = { 0x03C, 0x000002A8, 0x03C, 0x000005A2, 0x03C, 0x00000880, - 0xB0000000, 0x00000000, + 0XB0000000, 0x00000000, 0x0EF, 0x00000000, 0x018, 0x0001712A, 0x0EF, 0x00000002, @@ -910,7 +887,7 @@ u32 RTL8812AE_RADIOA_ARRAY[] = { 0x063, 0x000114EB, 0x064, 0x000196AC, 0x065, 0x000911D7, - 0xB0000000, 0x00000000, + 0XB0000000, 0x00000000, 0x008, 0x00008400, 0x01C, 0x000739D2, 0x0B4, 0x0001E78D, @@ -935,12 +912,12 @@ u32 RTL8812AE_RADIOB_ARRAY[] = { 0x086, 0x00014B3A, 0xA0000000, 0x00000000, 0x086, 0x00014B38, - 0xB0000000, 0x00000000, + 0XB0000000, 0x00000000, 0x80000004, 0x00000000, 0x40000000, 0x00000000, 0x08B, 0x00080180, 0xA0000000, 0x00000000, 0x08B, 0x00087180, - 0xB0000000, 0x00000000, + 0XB0000000, 0x00000000, 0x018, 0x00000006, 0x0EF, 0x00002000, 0x80000001, 0x00000000, 0x40000000, 0x00000000, @@ -967,7 +944,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = { 0x03B, 0x00018248, 0x03B, 0x00010240, 0x03B, 0x00008240, - 0xB0000000, 0x00000000, + 0XB0000000, 0x00000000, 0x0EF, 0x00000100, 0x80000002, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x0000A4EE, @@ -993,7 +970,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = { 0x034, 0x000024E7, 0x034, 0x0000146B, 0x034, 0x0000006D, - 0xB0000000, 0x00000000, + 0XB0000000, 0x00000000, 0x0EF, 0x00000000, 0x0EF, 0x000020A2, 0x0DF, 0x00000080, @@ -1122,7 +1099,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = { 0x034, 0x000428C5, 0x034, 0x000418C2, 0x034, 0x000408C0, - 0xB0000000, 0x00000000, + 0XB0000000, 0x00000000, 0x80000008, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x0002A0B2, 0x034, 0x000290AF, @@ -1159,7 +1136,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = { 0x034, 0x000228C5, 0x034, 0x000218C2, 0x034, 0x000208C0, - 0xB0000000, 0x00000000, + 0XB0000000, 0x00000000, 0x80000008, 0x00000000, 0x40000000, 0x00000000, 0x034, 0x0000A0B2, 0x034, 0x000090AF, @@ -1196,7 +1173,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = { 0x034, 0x000028C9, 0x034, 0x000018C6, 0x034, 0x000008C3, - 0xB0000000, 0x00000000, + 0XB0000000, 0x00000000, 0x0EF, 0x00000000, 0x80000008, 0x00000000, 0x40000000, 0x00000000, 0x018, 0x0001712A, @@ -1237,7 +1214,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = { 0x035, 0x000481D8, 0x035, 0x000501D8, 0x0EF, 0x00000000, - 0xB0000000, 0x00000000, + 0XB0000000, 0x00000000, 0x80000008, 0x00000000, 0x40000000, 0x00000000, 0x018, 0x0001712A, 0x0EF, 0x00000010, @@ -1283,7 +1260,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = { 0x036, 0x000CCC35, 0x036, 0x000D4C35, 0x036, 0x000DCC35, - 0xB0000000, 0x00000000, + 0XB0000000, 0x00000000, 0x0EF, 0x00000000, 0x0EF, 0x00000008, 0x80000008, 0x00000000, 0x40000000, 0x00000000, @@ -1298,7 +1275,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = { 0x03C, 0x000002A8, 0x03C, 0x000005A2, 0x03C, 0x00000880, - 0xB0000000, 0x00000000, + 0XB0000000, 0x00000000, 0x0EF, 0x00000000, 0x018, 0x0001712A, 0x0EF, 0x00000002, @@ -1327,7 +1304,7 @@ u32 RTL8812AE_RADIOB_ARRAY[] = { 0x063, 0x000114EB, 0x064, 0x000196AC, 0x065, 0x000911D7, - 0xB0000000, 0x00000000, + 0XB0000000, 0x00000000, 0x008, 0x00008400, }; @@ -1933,7 +1910,7 @@ u32 RTL8812AE_MAC_REG_ARRAY[] = { 0x011, 0x00000066, 0xA0000000, 0x00000000, 0x011, 0x0000005A, - 0xB0000000, 0x00000000, + 0XB0000000, 0x00000000, 0x025, 0x0000000F, 0x072, 0x00000000, 0x420, 0x00000080, @@ -2337,7 +2314,7 @@ u32 RTL8812AE_AGC_TAB_ARRAY[] = { 0x81C, 0x417A0001, 0x81C, 0x417C0001, 0x81C, 0x417E0001, - 0xB0000000, 0x00000000, + 0XB0000000, 0x00000000, 0x80000004, 0x00000000, 0x40000000, 0x00000000, 0x81C, 0xFC800001, 0x81C, 0xFB820001, @@ -2468,7 +2445,7 @@ u32 RTL8812AE_AGC_TAB_ARRAY[] = { 0x81C, 0x01FA0001, 0x81C, 0x01FC0001, 0x81C, 0x01FE0001, - 0xB0000000, 0x00000000, + 0XB0000000, 0x00000000, 0xC50, 0x00000022, 0xC50, 0x00000020, 0xE50, 0x00000022, @@ -2478,24 +2455,24 @@ u32 RTL8812AE_AGC_TAB_ARRAY[] = { u32 RTL8812AE_AGC_TAB_1TARRAYLEN = ARRAY_SIZE(RTL8812AE_AGC_TAB_ARRAY); u32 RTL8821AE_AGC_TAB_ARRAY[] = { - 0x81C, 0xBF000001, - 0x81C, 0xBF020001, - 0x81C, 0xBF040001, - 0x81C, 0xBF060001, - 0x81C, 0xBE080001, - 0x81C, 0xBD0A0001, - 0x81C, 0xBC0C0001, - 0x81C, 0xBA0E0001, - 0x81C, 0xB9100001, - 0x81C, 0xB8120001, - 0x81C, 0xB7140001, - 0x81C, 0xB6160001, - 0x81C, 0xB5180001, - 0x81C, 0xB41A0001, - 0x81C, 0xB31C0001, - 0x81C, 0xB21E0001, - 0x81C, 0xB1200001, - 0x81C, 0xB0220001, + 0x81C, 0XBF000001, + 0x81C, 0XBF020001, + 0x81C, 0XBF040001, + 0x81C, 0XBF060001, + 0x81C, 0XBE080001, + 0x81C, 0XBD0A0001, + 0x81C, 0XBC0C0001, + 0x81C, 0XBA0E0001, + 0x81C, 0XB9100001, + 0x81C, 0XB8120001, + 0x81C, 0XB7140001, + 0x81C, 0XB6160001, + 0x81C, 0XB5180001, + 0x81C, 0XB41A0001, + 0x81C, 0XB31C0001, + 0x81C, 0XB21E0001, + 0x81C, 0XB1200001, + 0x81C, 0XB0220001, 0x81C, 0xAF240001, 0x81C, 0xAE260001, 0x81C, 0xAD280001, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h index 36c2388b60bc..540159c25078 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h @@ -1,29 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2010 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Created on 2010/ 5/18, 1:41 - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2010 Realtek Corporation.*/ #ifndef __RTL8821AE_TABLE__H_ #define __RTL8821AE_TABLE__H_ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c index 0f2b7c619918..db5e628b17ed 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2010 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2010 Realtek Corporation.*/ #include "../wifi.h" #include "../pci.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h index 4ff0968dba81..a3feecad645d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2010 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2010 Realtek Corporation.*/ #ifndef __RTL8821AE_TRX_H__ #define __RTL8821AE_TRX_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/stats.c b/drivers/net/wireless/realtek/rtlwifi/stats.c index 61700fa05570..504ca587a16a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/stats.c +++ b/drivers/net/wireless/realtek/rtlwifi/stats.c @@ -1,27 +1,6 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ + #include "wifi.h" #include "stats.h" #include diff --git a/drivers/net/wireless/realtek/rtlwifi/stats.h b/drivers/net/wireless/realtek/rtlwifi/stats.h index bd0108f93182..581590729b0f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/stats.h +++ b/drivers/net/wireless/realtek/rtlwifi/stats.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL_STATS_H__ #define __RTL_STATS_H__ diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index 2ac5004d7a40..e24fda5e9087 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -1,25 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - *****************************************************************************/ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #include "wifi.h" #include "core.h" @@ -214,7 +194,7 @@ static void _usb_write32_async(struct rtl_priv *rtlpriv, u32 addr, u32 val) _usb_write_async(to_usb_device(dev), addr, val, 4); } -static void _usb_writeN_sync(struct rtl_priv *rtlpriv, u32 addr, void *data, +static void _usb_writen_sync(struct rtl_priv *rtlpriv, u32 addr, void *data, u16 len) { struct device *dev = rtlpriv->io.dev; @@ -249,7 +229,7 @@ static void _rtl_usb_io_handler_init(struct device *dev, rtlpriv->io.read8_sync = _usb_read8_sync; rtlpriv->io.read16_sync = _usb_read16_sync; rtlpriv->io.read32_sync = _usb_read32_sync; - rtlpriv->io.writeN_sync = _usb_writeN_sync; + rtlpriv->io.writen_sync = _usb_writen_sync; } static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw) @@ -287,6 +267,7 @@ static int _rtl_usb_init_tx(struct ieee80211_hw *hw) for (i = 0; i < __RTL_TXQ_NUM; i++) { u32 ep_num = rtlusb->ep_map.ep_mapping[i]; + if (!ep_num) { RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Invalid endpoint map setting!\n"); @@ -351,6 +332,7 @@ static int _rtl_usb_init(struct ieee80211_hw *hw) rtlusb->out_ep_nums = rtlusb->in_ep_nums = 0; for (epidx = 0; epidx < epnums; epidx++) { struct usb_endpoint_descriptor *pep_desc; + pep_desc = &usb_intf->cur_altsetting->endpoint[epidx].desc; if (usb_endpoint_dir_in(pep_desc)) @@ -413,7 +395,7 @@ static void rtl_usb_init_sw(struct ieee80211_hw *hw) rtlusb->irq_mask[0] = 0xFFFFFFFF; /* HIMR_EX - turn all on */ rtlusb->irq_mask[1] = 0xFFFFFFFF; - rtlusb->disableHWSM = true; + rtlusb->disablehwsm = true; } static void _rtl_rx_completed(struct urb *urb); @@ -773,6 +755,7 @@ static int rtl_usb_start(struct ieee80211_hw *hw) return err; } + /** * * diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.h b/drivers/net/wireless/realtek/rtlwifi/usb.h index c91cec04bfaf..3bf85b23eec1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.h +++ b/drivers/net/wireless/realtek/rtlwifi/usb.h @@ -1,25 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL_USB_H__ #define __RTL_USB_H__ @@ -37,7 +17,6 @@ #define USB_HIGH_SPEED_BULK_SIZE 512 #define USB_FULL_SPEED_BULK_SIZE 64 - #define RTL_USB_MAX_TXQ_NUM 4 /* max tx queue */ #define RTL_USB_MAX_EP_NUM 6 /* max ep number */ #define RTL_USB_MAX_TX_URBS_NUM 8 @@ -73,11 +52,11 @@ static inline void _rtl_install_trx_info(struct rtl_usb *rtlusb, u32 ep_num) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + info->rate_driver_data[0] = rtlusb; info->rate_driver_data[1] = (void *)(__kernel_size_t)ep_num; } - /* Add suspend/resume later */ enum rtl_usb_state { USB_STATE_STOP = 0, @@ -104,7 +83,7 @@ struct rtl_usb { /* Bcn control register setting */ u32 reg_bcn_ctrl_val; /* for 88/92cu card disable */ - u8 disableHWSM; + u8 disablehwsm; /*QOS & EDCA */ enum acm_method acm_method; /* irq . HIMR,HIMR_EX */ @@ -153,8 +132,6 @@ struct rtl_usb_priv { #define rtl_usbpriv(hw) (((struct rtl_usb_priv *)(rtl_priv(hw))->priv)) #define rtl_usbdev(usbpriv) (&((usbpriv)->dev)) - - int rtl_usb_probe(struct usb_interface *intf, const struct usb_device_id *id, struct rtl_hal_cfg *rtl92cu_hal_cfg); diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 87bc21bb5e8b..e32e9ffa3192 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -1,27 +1,5 @@ -/****************************************************************************** - * - * Copyright(c) 2009-2012 Realtek Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in the - * file called LICENSE. - * - * Contact Information: - * wlanfae - * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, - * Hsinchu 300, Taiwan. - * - * Larry Finger - * - *****************************************************************************/ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL_WIFI_H__ #define __RTL_WIFI_H__ @@ -263,7 +241,7 @@ struct rtlwifi_firmware_header { u8 date; u8 hour; u8 minute; - __le16 ramcodeSize; + __le16 ramcodesize; __le16 rsvd2; __le32 svnindex; __le32 rsvd3; @@ -435,8 +413,8 @@ enum hw_variables { HW_VAR_MULTICAST_REG = 0x1, HW_VAR_BASIC_RATE = 0x2, HW_VAR_BSSID = 0x3, - HW_VAR_MEDIA_STATUS= 0x4, - HW_VAR_SECURITY_CONF= 0x5, + HW_VAR_MEDIA_STATUS = 0x4, + HW_VAR_SECURITY_CONF = 0x5, HW_VAR_BEACON_INTERVAL = 0x6, HW_VAR_ATIM_WINDOW = 0x7, HW_VAR_LISTEN_INTERVAL = 0x8, @@ -453,7 +431,7 @@ enum hw_variables { HW_VAR_ACK_PREAMBLE = 0x13, HW_VAR_CW_CONFIG = 0x14, HW_VAR_CW_VALUES = 0x15, - HW_VAR_RATE_FALLBACK_CONTROL= 0x16, + HW_VAR_RATE_FALLBACK_CONTROL = 0x16, HW_VAR_CONTENTION_WINDOW = 0x17, HW_VAR_RETRY_COUNT = 0x18, HW_VAR_TR_SWITCH = 0x19, @@ -465,11 +443,11 @@ enum hw_variables { HW_VAR_MCS_RATE_AVAILABLE = 0x1f, HW_VAR_AC_PARAM = 0x20, HW_VAR_ACM_CTRL = 0x21, - HW_VAR_DIS_Req_Qsize = 0x22, + HW_VAR_DIS_REQ_QSIZE = 0x22, HW_VAR_CCX_CHNL_LOAD = 0x23, HW_VAR_CCX_NOISE_HISTOGRAM = 0x24, HW_VAR_CCX_CLM_NHM = 0x25, - HW_VAR_TxOPLimit = 0x26, + HW_VAR_TXOPLIMIT = 0x26, HW_VAR_TURBO_MODE = 0x27, HW_VAR_RF_STATE = 0x28, HW_VAR_RF_OFF_BY_HW = 0x29, @@ -522,7 +500,7 @@ enum hw_variables { HW_VAR_BCN_VALID = 0x55, HW_VAR_FWLPS_RF_ON = 0x56, HW_VAR_DUAL_TSF_RST = 0x57, - HW_VAR_SWITCH_EPHY_WoWLAN = 0x58, + HW_VAR_SWITCH_EPHY_WOWLAN = 0x58, HW_VAR_INT_MIGRATION = 0x59, HW_VAR_INT_AC = 0x5a, HW_VAR_RF_TIMING = 0x5b, @@ -620,7 +598,8 @@ enum ht_channel_width { }; /* Ref: 802.11i sepc D10.0 7.3.2.25.1 -Cipher Suites Encryption Algorithms */ + * Cipher Suites Encryption Algorithms + */ enum rt_enc_alg { NO_ENCRYPTION = 0, WEP40_ENCRYPTION = 1, @@ -770,7 +749,8 @@ enum rtl_var_map { RTL_IMR_ROK, /*Receive DMA OK Interrupt */ RTL_IMR_HSISR_IND, /*HSISR Interrupt*/ RTL_IBSS_INT_MASKS, /*(RTL_IMR_BCNINT | RTL_IMR_TBDOK | - * RTL_IMR_TBDER) */ + * RTL_IMR_TBDER) + */ RTL_IMR_C2HCMD, /*fw interrupt*/ /*CCK Rates, TxHT = 0 */ @@ -814,8 +794,8 @@ enum _fw_ps_mode { FW_PS_UAPSD_MODE = 6, FW_PS_IBSS_MODE = 7, FW_PS_WWLAN_MODE = 8, - FW_PS_PM_Radio_Off = 9, - FW_PS_PM_Card_Disable = 10, + FW_PS_PM_RADIO_OFF = 9, + FW_PS_PM_CARD_DISABLE = 10, }; enum rt_psmode { @@ -849,8 +829,8 @@ enum rtl_led_pin { /*QoS related.*/ /*acm implementation method.*/ enum acm_method { - eAcmWay0_SwAndHw = 0, - eAcmWay1_HW = 1, + EACMWAY0_SWANDHW = 0, + EACMWAY1_HW = 1, EACMWAY2_SW = 2, }; @@ -867,8 +847,9 @@ enum band_type { BANDMAX }; -/*aci/aifsn Field. -Ref: WMM spec 2.2.2: WME Parameter Element, p.12.*/ +/* aci/aifsn Field. + * Ref: WMM spec 2.2.2: WME Parameter Element, p.12. + */ union aci_aifsn { u8 char_data; @@ -1084,7 +1065,8 @@ struct rtl_probe_rsp { __le16 beacon_interval; __le16 capability; /*SSID, supported rates, FH params, DS params, - CF params, IBSS params, TIM (if beacon), RSN */ + * CF params, IBSS params, TIM (if beacon), RSN + */ struct rtl_info_element info_element[0]; } __packed; @@ -1158,7 +1140,8 @@ struct wireless_stats { long rx_snr_db[4]; /*Correct smoothed ss in Dbm, only used - in driver to report real power now. */ + * in driver to report real power now. + */ long recv_signal_power; long signal_quality; long last_sigstrength_inpercent; @@ -1166,8 +1149,9 @@ struct wireless_stats { u32 rssi_calculate_cnt; u32 pwdb_all_cnt; - /*Transformed, in dbm. Beautified signal - strength for UI, not correct. */ + /* Transformed, in dbm. Beautified signal + * strength for UI, not correct. + */ long signal_strength; u8 rx_rssi_percentage[4]; @@ -1478,15 +1462,15 @@ struct rtl_io { /*PCI IO map */ unsigned long pci_base_addr; /*device I/O address */ - void (*write8_async) (struct rtl_priv *rtlpriv, u32 addr, u8 val); - void (*write16_async) (struct rtl_priv *rtlpriv, u32 addr, u16 val); - void (*write32_async) (struct rtl_priv *rtlpriv, u32 addr, u32 val); - void (*writeN_sync) (struct rtl_priv *rtlpriv, u32 addr, void *buf, - u16 len); + void (*write8_async)(struct rtl_priv *rtlpriv, u32 addr, u8 val); + void (*write16_async)(struct rtl_priv *rtlpriv, u32 addr, u16 val); + void (*write32_async)(struct rtl_priv *rtlpriv, u32 addr, u32 val); + void (*writen_sync)(struct rtl_priv *rtlpriv, u32 addr, void *buf, + u16 len); - u8(*read8_sync) (struct rtl_priv *rtlpriv, u32 addr); - u16(*read16_sync) (struct rtl_priv *rtlpriv, u32 addr); - u32(*read32_sync) (struct rtl_priv *rtlpriv, u32 addr); + u8 (*read8_sync)(struct rtl_priv *rtlpriv, u32 addr); + u16 (*read16_sync)(struct rtl_priv *rtlpriv, u32 addr); + u32 (*read32_sync)(struct rtl_priv *rtlpriv, u32 addr); }; @@ -1711,7 +1695,8 @@ struct rtl_hal { bool during_mac1init_radioa; bool reloadtxpowerindex; /* True if IMR or IQK have done - for 2.4G in scan progress */ + * for 2.4G in scan progress + */ bool load_imrandiqk_setting_for2g; bool disable_amsdu_8k; @@ -1750,12 +1735,14 @@ struct rtl_security { u32 hwsec_cam_bitmap; u8 hwsec_cam_sta_addr[TOTAL_CAM_ENTRY][ETH_ALEN]; /*local Key buffer, indx 0 is for - pairwise key 1-4 is for agoup key. */ + * pairwise key 1-4 is for agoup key. + */ u8 key_buf[KEY_BUF_SIZE][MAX_KEY_LEN]; u8 key_len[KEY_BUF_SIZE]; /*The pointer of Pairwise Key, - it always points to KeyBuf[4] */ + * it always points to KeyBuf[4] + */ u8 *pairwise_key; }; @@ -1898,7 +1885,7 @@ struct rtl_dm { struct rtl_efuse { const struct rtl_efuse_ops *efuse_ops; - bool autoLoad_ok; + bool autoload_ok; bool bootfromefuse; u16 max_physical_size; @@ -2019,11 +2006,10 @@ struct rtl_ps_ctl { bool rfchange_inprogress; bool swrf_processing; bool hwradiooff; - /* - * just for PCIE ASPM + /* just for PCIE ASPM * If it supports ASPM, Offset[560h] = 0x40, * otherwise Offset[560h] = 0x00. - * */ + */ bool support_aspm; bool support_backdoor; @@ -2103,10 +2089,9 @@ struct rtl_stats { u8 nic_type; u16 length; u8 signalquality; /*in 0-100 index. */ - /* - * Real power in dBm for this packet, + /* Real power in dBm for this packet, * no beautification and aggregation. - * */ + */ s32 recvsignalpower; s8 rxpower; /*in dBm Translate from PWdB */ u8 signalstrength; /*in 0-100 index. */ @@ -2125,7 +2110,7 @@ struct rtl_stats { u8 rx_bufshift; bool isampdu; bool isfirst_ampdu; - bool rx_is40Mhzpacket; + bool rx_is40mhzpacket; u8 rx_packet_bw; u32 rx_pwdb_all; u8 rx_mimo_signalstrength[4]; /*in 0~100 index */ @@ -2158,7 +2143,6 @@ struct rtl_stats { u32 macid_valid_entry[2]; }; - struct rt_link_detect { /* count for roaming */ u32 bcn_rx_inperiod; @@ -2232,114 +2216,114 @@ struct rtl_int { }; struct rtl_hal_ops { - int (*init_sw_vars) (struct ieee80211_hw *hw); - void (*deinit_sw_vars) (struct ieee80211_hw *hw); + int (*init_sw_vars)(struct ieee80211_hw *hw); + void (*deinit_sw_vars)(struct ieee80211_hw *hw); void (*read_chip_version)(struct ieee80211_hw *hw); - void (*read_eeprom_info) (struct ieee80211_hw *hw); - void (*interrupt_recognized) (struct ieee80211_hw *hw, - struct rtl_int *intvec); - int (*hw_init) (struct ieee80211_hw *hw); - void (*hw_disable) (struct ieee80211_hw *hw); - void (*hw_suspend) (struct ieee80211_hw *hw); - void (*hw_resume) (struct ieee80211_hw *hw); - void (*enable_interrupt) (struct ieee80211_hw *hw); - void (*disable_interrupt) (struct ieee80211_hw *hw); - int (*set_network_type) (struct ieee80211_hw *hw, - enum nl80211_iftype type); + void (*read_eeprom_info)(struct ieee80211_hw *hw); + void (*interrupt_recognized)(struct ieee80211_hw *hw, + struct rtl_int *intvec); + int (*hw_init)(struct ieee80211_hw *hw); + void (*hw_disable)(struct ieee80211_hw *hw); + void (*hw_suspend)(struct ieee80211_hw *hw); + void (*hw_resume)(struct ieee80211_hw *hw); + void (*enable_interrupt)(struct ieee80211_hw *hw); + void (*disable_interrupt)(struct ieee80211_hw *hw); + int (*set_network_type)(struct ieee80211_hw *hw, + enum nl80211_iftype type); void (*set_chk_bssid)(struct ieee80211_hw *hw, - bool check_bssid); - void (*set_bw_mode) (struct ieee80211_hw *hw, - enum nl80211_channel_type ch_type); - u8(*switch_channel) (struct ieee80211_hw *hw); - void (*set_qos) (struct ieee80211_hw *hw, int aci); - void (*set_bcn_reg) (struct ieee80211_hw *hw); - void (*set_bcn_intv) (struct ieee80211_hw *hw); - void (*update_interrupt_mask) (struct ieee80211_hw *hw, - u32 add_msr, u32 rm_msr); - void (*get_hw_reg) (struct ieee80211_hw *hw, u8 variable, u8 *val); - void (*set_hw_reg) (struct ieee80211_hw *hw, u8 variable, u8 *val); - void (*update_rate_tbl) (struct ieee80211_hw *hw, - struct ieee80211_sta *sta, u8 rssi_leve, - bool update_bw); + bool check_bssid); + void (*set_bw_mode)(struct ieee80211_hw *hw, + enum nl80211_channel_type ch_type); + u8 (*switch_channel)(struct ieee80211_hw *hw); + void (*set_qos)(struct ieee80211_hw *hw, int aci); + void (*set_bcn_reg)(struct ieee80211_hw *hw); + void (*set_bcn_intv)(struct ieee80211_hw *hw); + void (*update_interrupt_mask)(struct ieee80211_hw *hw, + u32 add_msr, u32 rm_msr); + void (*get_hw_reg)(struct ieee80211_hw *hw, u8 variable, u8 *val); + void (*set_hw_reg)(struct ieee80211_hw *hw, u8 variable, u8 *val); + void (*update_rate_tbl)(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, u8 rssi_leve, + bool update_bw); void (*pre_fill_tx_bd_desc)(struct ieee80211_hw *hw, u8 *tx_bd_desc, u8 *desc, u8 queue_index, struct sk_buff *skb, dma_addr_t addr); - void (*update_rate_mask) (struct ieee80211_hw *hw, u8 rssi_level); + void (*update_rate_mask)(struct ieee80211_hw *hw, u8 rssi_level); u16 (*rx_desc_buff_remained_cnt)(struct ieee80211_hw *hw, u8 queue_index); void (*rx_check_dma_ok)(struct ieee80211_hw *hw, u8 *header_desc, u8 queue_index); - void (*fill_tx_desc) (struct ieee80211_hw *hw, - struct ieee80211_hdr *hdr, u8 *pdesc_tx, - u8 *pbd_desc_tx, - struct ieee80211_tx_info *info, - struct ieee80211_sta *sta, - struct sk_buff *skb, u8 hw_queue, - struct rtl_tcb_desc *ptcb_desc); - void (*fill_fake_txdesc) (struct ieee80211_hw *hw, u8 *pDesc, - u32 buffer_len, bool bIsPsPoll); - void (*fill_tx_cmddesc) (struct ieee80211_hw *hw, u8 *pdesc, - bool firstseg, bool lastseg, - struct sk_buff *skb); + void (*fill_tx_desc)(struct ieee80211_hw *hw, + struct ieee80211_hdr *hdr, u8 *pdesc_tx, + u8 *pbd_desc_tx, + struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, + struct sk_buff *skb, u8 hw_queue, + struct rtl_tcb_desc *ptcb_desc); + void (*fill_fake_txdesc)(struct ieee80211_hw *hw, u8 *pdesc, + u32 buffer_len, bool bsspspoll); + void (*fill_tx_cmddesc)(struct ieee80211_hw *hw, u8 *pdesc, + bool firstseg, bool lastseg, + struct sk_buff *skb); void (*fill_tx_special_desc)(struct ieee80211_hw *hw, u8 *pdesc, u8 *pbd_desc, struct sk_buff *skb, u8 hw_queue); - bool (*query_rx_desc) (struct ieee80211_hw *hw, - struct rtl_stats *stats, - struct ieee80211_rx_status *rx_status, - u8 *pdesc, struct sk_buff *skb); - void (*set_channel_access) (struct ieee80211_hw *hw); - bool (*radio_onoff_checking) (struct ieee80211_hw *hw, u8 *valid); - void (*dm_watchdog) (struct ieee80211_hw *hw); - void (*scan_operation_backup) (struct ieee80211_hw *hw, u8 operation); - bool (*set_rf_power_state) (struct ieee80211_hw *hw, - enum rf_pwrstate rfpwr_state); - void (*led_control) (struct ieee80211_hw *hw, - enum led_ctl_mode ledaction); + bool (*query_rx_desc)(struct ieee80211_hw *hw, + struct rtl_stats *stats, + struct ieee80211_rx_status *rx_status, + u8 *pdesc, struct sk_buff *skb); + void (*set_channel_access)(struct ieee80211_hw *hw); + bool (*radio_onoff_checking)(struct ieee80211_hw *hw, u8 *valid); + void (*dm_watchdog)(struct ieee80211_hw *hw); + void (*scan_operation_backup)(struct ieee80211_hw *hw, u8 operation); + bool (*set_rf_power_state)(struct ieee80211_hw *hw, + enum rf_pwrstate rfpwr_state); + void (*led_control)(struct ieee80211_hw *hw, + enum led_ctl_mode ledaction); void (*set_desc)(struct ieee80211_hw *hw, u8 *pdesc, bool istx, u8 desc_name, u8 *val); u64 (*get_desc)(struct ieee80211_hw *hw, u8 *pdesc, bool istx, u8 desc_name); - bool (*is_tx_desc_closed) (struct ieee80211_hw *hw, - u8 hw_queue, u16 index); - void (*tx_polling) (struct ieee80211_hw *hw, u8 hw_queue); - void (*enable_hw_sec) (struct ieee80211_hw *hw); - void (*set_key) (struct ieee80211_hw *hw, u32 key_index, - u8 *macaddr, bool is_group, u8 enc_algo, - bool is_wepkey, bool clear_all); - void (*init_sw_leds) (struct ieee80211_hw *hw); - void (*deinit_sw_leds) (struct ieee80211_hw *hw); - u32 (*get_bbreg) (struct ieee80211_hw *hw, u32 regaddr, u32 bitmask); - void (*set_bbreg) (struct ieee80211_hw *hw, u32 regaddr, u32 bitmask, - u32 data); - u32 (*get_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath, - u32 regaddr, u32 bitmask); - void (*set_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath, - u32 regaddr, u32 bitmask, u32 data); - void (*linked_set_reg) (struct ieee80211_hw *hw); - void (*chk_switch_dmdp) (struct ieee80211_hw *hw); - void (*dualmac_easy_concurrent) (struct ieee80211_hw *hw); - void (*dualmac_switch_to_dmdp) (struct ieee80211_hw *hw); - bool (*phy_rf6052_config) (struct ieee80211_hw *hw); - void (*phy_rf6052_set_cck_txpower) (struct ieee80211_hw *hw, - u8 *powerlevel); - void (*phy_rf6052_set_ofdm_txpower) (struct ieee80211_hw *hw, - u8 *ppowerlevel, u8 channel); - bool (*config_bb_with_headerfile) (struct ieee80211_hw *hw, - u8 configtype); - bool (*config_bb_with_pgheaderfile) (struct ieee80211_hw *hw, - u8 configtype); - void (*phy_lc_calibrate) (struct ieee80211_hw *hw, bool is2t); - void (*phy_set_bw_mode_callback) (struct ieee80211_hw *hw); - void (*dm_dynamic_txpower) (struct ieee80211_hw *hw); - void (*c2h_command_handle) (struct ieee80211_hw *hw); - void (*bt_wifi_media_status_notify) (struct ieee80211_hw *hw, - bool mstate); - void (*bt_coex_off_before_lps) (struct ieee80211_hw *hw); - void (*fill_h2c_cmd) (struct ieee80211_hw *hw, u8 element_id, - u32 cmd_len, u8 *p_cmdbuffer); + bool (*is_tx_desc_closed)(struct ieee80211_hw *hw, + u8 hw_queue, u16 index); + void (*tx_polling)(struct ieee80211_hw *hw, u8 hw_queue); + void (*enable_hw_sec)(struct ieee80211_hw *hw); + void (*set_key)(struct ieee80211_hw *hw, u32 key_index, + u8 *macaddr, bool is_group, u8 enc_algo, + bool is_wepkey, bool clear_all); + void (*init_sw_leds)(struct ieee80211_hw *hw); + void (*deinit_sw_leds)(struct ieee80211_hw *hw); + u32 (*get_bbreg)(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask); + void (*set_bbreg)(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask, + u32 data); + u32 (*get_rfreg)(struct ieee80211_hw *hw, enum radio_path rfpath, + u32 regaddr, u32 bitmask); + void (*set_rfreg)(struct ieee80211_hw *hw, enum radio_path rfpath, + u32 regaddr, u32 bitmask, u32 data); + void (*linked_set_reg)(struct ieee80211_hw *hw); + void (*chk_switch_dmdp)(struct ieee80211_hw *hw); + void (*dualmac_easy_concurrent)(struct ieee80211_hw *hw); + void (*dualmac_switch_to_dmdp)(struct ieee80211_hw *hw); + bool (*phy_rf6052_config)(struct ieee80211_hw *hw); + void (*phy_rf6052_set_cck_txpower)(struct ieee80211_hw *hw, + u8 *powerlevel); + void (*phy_rf6052_set_ofdm_txpower)(struct ieee80211_hw *hw, + u8 *ppowerlevel, u8 channel); + bool (*config_bb_with_headerfile)(struct ieee80211_hw *hw, + u8 configtype); + bool (*config_bb_with_pgheaderfile)(struct ieee80211_hw *hw, + u8 configtype); + void (*phy_lc_calibrate)(struct ieee80211_hw *hw, bool is2t); + void (*phy_set_bw_mode_callback)(struct ieee80211_hw *hw); + void (*dm_dynamic_txpower)(struct ieee80211_hw *hw); + void (*c2h_command_handle)(struct ieee80211_hw *hw); + void (*bt_wifi_media_status_notify)(struct ieee80211_hw *hw, + bool mstate); + void (*bt_coex_off_before_lps)(struct ieee80211_hw *hw); + void (*fill_h2c_cmd)(struct ieee80211_hw *hw, u8 element_id, + u32 cmd_len, u8 *p_cmdbuffer); void (*set_default_port_id_cmd)(struct ieee80211_hw *hw); - bool (*get_btc_status) (void); + bool (*get_btc_status)(void); bool (*is_fw_header)(struct rtlwifi_firmware_header *hdr); void (*add_wowlan_pattern)(struct ieee80211_hw *hw, struct rtl_wow_pattern *rtl_pattern, @@ -2352,24 +2336,24 @@ struct rtl_hal_ops { struct rtl_intf_ops { /*com */ void (*read_efuse_byte)(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf); - int (*adapter_start) (struct ieee80211_hw *hw); - void (*adapter_stop) (struct ieee80211_hw *hw); + int (*adapter_start)(struct ieee80211_hw *hw); + void (*adapter_stop)(struct ieee80211_hw *hw); bool (*check_buddy_priv)(struct ieee80211_hw *hw, struct rtl_priv **buddy_priv); - int (*adapter_tx) (struct ieee80211_hw *hw, - struct ieee80211_sta *sta, - struct sk_buff *skb, - struct rtl_tcb_desc *ptcb_desc); + int (*adapter_tx)(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + struct sk_buff *skb, + struct rtl_tcb_desc *ptcb_desc); void (*flush)(struct ieee80211_hw *hw, u32 queues, bool drop); - int (*reset_trx_ring) (struct ieee80211_hw *hw); - bool (*waitq_insert) (struct ieee80211_hw *hw, - struct ieee80211_sta *sta, - struct sk_buff *skb); + int (*reset_trx_ring)(struct ieee80211_hw *hw); + bool (*waitq_insert)(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + struct sk_buff *skb); /*pci */ - void (*disable_aspm) (struct ieee80211_hw *hw); - void (*enable_aspm) (struct ieee80211_hw *hw); + void (*disable_aspm)(struct ieee80211_hw *hw); + void (*enable_aspm)(struct ieee80211_hw *hw); /*usb */ }; @@ -2447,7 +2431,8 @@ struct rtl_hal_cfg { enum rtl_spec_ver spec_ver; /*this map used for some registers or vars - defined int HAL but used in MAIN */ + * defined int HAL but used in MAIN + */ u32 maps[RTL_VAR_MAP_MAX]; }; @@ -2609,7 +2594,8 @@ struct dig_t { struct rtl_global_var { /* from this list we can get - * other adapter's rtl_priv */ + * other adapter's rtl_priv + */ struct list_head glb_priv_list; spinlock_t glb_list_lock; }; @@ -2688,30 +2674,30 @@ struct bt_coexist_info { }; struct rtl_btc_ops { - void (*btc_init_variables) (struct rtl_priv *rtlpriv); + void (*btc_init_variables)(struct rtl_priv *rtlpriv); void (*btc_init_variables_wifi_only)(struct rtl_priv *rtlpriv); void (*btc_deinit_variables)(struct rtl_priv *rtlpriv); - void (*btc_init_hal_vars) (struct rtl_priv *rtlpriv); + void (*btc_init_hal_vars)(struct rtl_priv *rtlpriv); void (*btc_power_on_setting)(struct rtl_priv *rtlpriv); - void (*btc_init_hw_config) (struct rtl_priv *rtlpriv); + void (*btc_init_hw_config)(struct rtl_priv *rtlpriv); void (*btc_init_hw_config_wifi_only)(struct rtl_priv *rtlpriv); - void (*btc_ips_notify) (struct rtl_priv *rtlpriv, u8 type); + void (*btc_ips_notify)(struct rtl_priv *rtlpriv, u8 type); void (*btc_lps_notify)(struct rtl_priv *rtlpriv, u8 type); - void (*btc_scan_notify) (struct rtl_priv *rtlpriv, u8 scantype); + void (*btc_scan_notify)(struct rtl_priv *rtlpriv, u8 scantype); void (*btc_scan_notify_wifi_only)(struct rtl_priv *rtlpriv, u8 scantype); - void (*btc_connect_notify) (struct rtl_priv *rtlpriv, u8 action); - void (*btc_mediastatus_notify) (struct rtl_priv *rtlpriv, - enum rt_media_status mstatus); - void (*btc_periodical) (struct rtl_priv *rtlpriv); + void (*btc_connect_notify)(struct rtl_priv *rtlpriv, u8 action); + void (*btc_mediastatus_notify)(struct rtl_priv *rtlpriv, + enum rt_media_status mstatus); + void (*btc_periodical)(struct rtl_priv *rtlpriv); void (*btc_halt_notify)(struct rtl_priv *rtlpriv); - void (*btc_btinfo_notify) (struct rtl_priv *rtlpriv, - u8 *tmp_buf, u8 length); + void (*btc_btinfo_notify)(struct rtl_priv *rtlpriv, + u8 *tmp_buf, u8 length); void (*btc_btmpinfo_notify)(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length); - bool (*btc_is_limited_dig) (struct rtl_priv *rtlpriv); - bool (*btc_is_disable_edca_turbo) (struct rtl_priv *rtlpriv); - bool (*btc_is_bt_disabled) (struct rtl_priv *rtlpriv); + bool (*btc_is_limited_dig)(struct rtl_priv *rtlpriv); + bool (*btc_is_disable_edca_turbo)(struct rtl_priv *rtlpriv); + bool (*btc_is_bt_disabled)(struct rtl_priv *rtlpriv); void (*btc_special_packet_notify)(struct rtl_priv *rtlpriv, u8 pkt_type); void (*btc_switch_band_notify)(struct rtl_priv *rtlpriv, u8 type, @@ -2797,16 +2783,16 @@ struct rtl_priv { struct rtl_debug dbg; int max_fw_size; - /* - *hal_cfg : for diff cards - *intf_ops : for diff interrface usb/pcie + /* hal_cfg : for diff cards + * intf_ops : for diff interrface usb/pcie */ struct rtl_hal_cfg *cfg; const struct rtl_intf_ops *intf_ops; - /*this var will be set by set_bit, - and was used to indicate status of - interface or hardware */ + /* this var will be set by set_bit, + * and was used to indicate status of + * interface or hardware + */ unsigned long status; /* tables for dm */ @@ -2842,10 +2828,11 @@ struct rtl_priv { #ifdef CONFIG_PM struct wiphy_wowlan_support wowlan; #endif - /*This must be the last item so - that it points to the data allocated - beyond this structure like: - rtl_pci_priv or rtl_usb_priv */ + /* This must be the last item so + * that it points to the data allocated + * beyond this structure like: + * rtl_pci_priv or rtl_usb_priv + */ u8 priv[0] __aligned(sizeof(void *)); }; @@ -2855,10 +2842,7 @@ struct rtl_priv { #define rtl_efuse(rtlpriv) (&((rtlpriv)->efuse)) #define rtl_psc(rtlpriv) (&((rtlpriv)->psc)) - -/*************************************** - Bluetooth Co-existence Related -****************************************/ +/* Bluetooth Co-existence Related */ enum bt_ant_num { ANT_X2 = 0, @@ -2907,14 +2891,13 @@ enum bt_radio_shared { BT_RADIO_INDIVIDUAL = 1, }; - /**************************************** - mem access macro define start - Call endian free function when - 1. Read/write packet content. - 2. Before write integer to IO. - 3. After read integer from IO. -****************************************/ + * mem access macro define start + * Call endian free function when + * 1. Read/write packet content. + * 2. Before write integer to IO. + * 3. After read integer from IO. + ****************************************/ /* Convert little data endian to host ordering */ #define EF1BYTE(_val) \ ((u8)(_val)) @@ -2970,8 +2953,9 @@ enum bt_radio_shared { (EF1BYTE(*((u8 *)(__pstart)))) /*Description: -Translate subfield (continuous bits in little-endian) of 4-byte -value to host byte ordering.*/ + * Translate subfield (continuous bits in little-endian) of 4-byte + * value to host byte ordering. + */ #define LE_BITS_TO_4BYTE(__pstart, __bitoffset, __bitlen) \ ( \ (LE_P4BYTE_TO_HOST_4BYTE(__pstart) >> (__bitoffset)) & \ @@ -3033,9 +3017,7 @@ value to host byte ordering.*/ #define N_BYTE_ALIGMENT(__value, __aligment) ((__aligment == 1) ? \ (__value) : (((__value + __aligment - 1) / __aligment) * __aligment)) -/**************************************** - mem access macro define end -****************************************/ +/* mem access macro define end */ #define byte(x, n) ((x >> (8 * n)) & 0xff) @@ -3170,7 +3152,7 @@ static inline void rtl_set_bbreg(struct ieee80211_hw *hw, u32 regaddr, } static inline void rtl_set_bbreg_with_dwmask(struct ieee80211_hw *hw, - u32 regaddr, u32 data) + u32 regaddr, u32 data) { rtl_set_bbreg(hw, regaddr, 0xffffffff, data); } @@ -3241,9 +3223,10 @@ static inline struct ieee80211_sta *get_sta(struct ieee80211_hw *hw, } static inline struct ieee80211_sta *rtl_find_sta(struct ieee80211_hw *hw, - u8 *mac_addr) + u8 *mac_addr) { struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + return ieee80211_find_sta(mac->vif, mac_addr); } diff --git a/drivers/net/wireless/rsi/rsi_91x_debugfs.c b/drivers/net/wireless/rsi/rsi_91x_debugfs.c index 8c6ca8e689e4..c71b41e45423 100644 --- a/drivers/net/wireless/rsi/rsi_91x_debugfs.c +++ b/drivers/net/wireless/rsi/rsi_91x_debugfs.c @@ -59,7 +59,7 @@ static int rsi_sdio_stats_read(struct seq_file *seq, void *data) } /** - * rsi_sdio_stats_open() - This funtion calls single open function of seq_file + * rsi_sdio_stats_open() - This function calls single open function of seq_file * to open file and read contents from it. * @inode: Pointer to the inode structure. * @file: Pointer to the file structure. @@ -93,7 +93,7 @@ static int rsi_version_read(struct seq_file *seq, void *data) } /** - * rsi_version_open() - This funtion calls single open function of seq_file to + * rsi_version_open() - This function calls single open function of seq_file to * open file and read contents from it. * @inode: Pointer to the inode structure. * @file: Pointer to the file structure. @@ -178,7 +178,7 @@ static int rsi_stats_read(struct seq_file *seq, void *data) } /** - * rsi_stats_open() - This funtion calls single open function of seq_file to + * rsi_stats_open() - This function calls single open function of seq_file to * open file and read contents from it. * @inode: Pointer to the inode structure. * @file: Pointer to the file structure. @@ -207,7 +207,7 @@ static int rsi_debug_zone_read(struct seq_file *seq, void *data) } /** - * rsi_debug_read() - This funtion calls single open function of seq_file to + * rsi_debug_read() - This function calls single open function of seq_file to * open file and read contents from it. * @inode: Pointer to the inode structure. * @file: Pointer to the file structure. @@ -297,11 +297,6 @@ int rsi_init_dbgfs(struct rsi_hw *adapter) dev_dbgfs->subdir = debugfs_create_dir(devdir, NULL); - if (!dev_dbgfs->subdir) { - kfree(dev_dbgfs); - return -ENOMEM; - } - for (ii = 0; ii < adapter->num_debugfs_entries; ii++) { files = &dev_debugfs_files[ii]; dev_dbgfs->rsi_files[ii] = diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c index 182b06629371..1dbaab2a96b7 100644 --- a/drivers/net/wireless/rsi/rsi_91x_hal.c +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c @@ -100,6 +100,9 @@ int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb) mgmt_desc->frame_type = TX_DOT11_MGMT; mgmt_desc->header_len = MIN_802_11_HDR_LEN; mgmt_desc->xtend_desc_size = header_size - FRAME_DESC_SZ; + + if (ieee80211_is_probe_req(wh->frame_control)) + mgmt_desc->frame_info = cpu_to_le16(RSI_INSERT_SEQ_IN_FW); mgmt_desc->frame_info |= cpu_to_le16(RATE_INFO_ENABLE); if (is_broadcast_ether_addr(wh->addr1)) mgmt_desc->frame_info |= cpu_to_le16(RSI_BROADCAST_PKT); diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index e56fc83faf0e..831046e760f8 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -229,6 +229,69 @@ static void rsi_register_rates_channels(struct rsi_hw *adapter, int band) /* sbands->ht_cap.mcs.rx_highest = 0x82; */ } +static int rsi_mac80211_hw_scan_start(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_scan_request *hw_req) +{ + struct cfg80211_scan_request *scan_req = &hw_req->req; + struct rsi_hw *adapter = hw->priv; + struct rsi_common *common = adapter->priv; + struct ieee80211_bss_conf *bss = &vif->bss_conf; + + rsi_dbg(INFO_ZONE, "***** Hardware scan start *****\n"); + common->mac_ops_resumed = false; + + if (common->fsm_state != FSM_MAC_INIT_DONE) + return -ENODEV; + + if ((common->wow_flags & RSI_WOW_ENABLED) || + scan_req->n_channels == 0) + return -EINVAL; + + /* Scan already in progress. So return */ + if (common->bgscan_en) + return -EBUSY; + + /* If STA is not connected, return with special value 1, in order + * to start sw_scan in mac80211 + */ + if (!bss->assoc) + return 1; + + mutex_lock(&common->mutex); + common->hwscan = scan_req; + if (!rsi_send_bgscan_params(common, RSI_START_BGSCAN)) { + if (!rsi_send_bgscan_probe_req(common, vif)) { + rsi_dbg(INFO_ZONE, "Background scan started...\n"); + common->bgscan_en = true; + } + } + mutex_unlock(&common->mutex); + + return 0; +} + +static void rsi_mac80211_cancel_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct rsi_hw *adapter = hw->priv; + struct rsi_common *common = adapter->priv; + struct cfg80211_scan_info info; + + rsi_dbg(INFO_ZONE, "***** Hardware scan stop *****\n"); + mutex_lock(&common->mutex); + + if (common->bgscan_en) { + if (!rsi_send_bgscan_params(common, RSI_STOP_BGSCAN)) + common->bgscan_en = false; + info.aborted = false; + ieee80211_scan_completed(adapter->hw, &info); + rsi_dbg(INFO_ZONE, "Back ground scan cancelled\n"); + } + common->hwscan = NULL; + mutex_unlock(&common->mutex); +} + /** * rsi_mac80211_detach() - This function is used to de-initialize the * Mac80211 stack. @@ -308,6 +371,10 @@ static void rsi_mac80211_tx(struct ieee80211_hw *hw, { struct rsi_hw *adapter = hw->priv; struct rsi_common *common = adapter->priv; + struct ieee80211_hdr *wlh = (struct ieee80211_hdr *)skb->data; + + if (ieee80211_is_auth(wlh->frame_control)) + common->mac_ops_resumed = false; rsi_core_xmit(common, skb); } @@ -615,7 +682,8 @@ static int rsi_mac80211_config(struct ieee80211_hw *hw, } /* Power save parameters */ - if (changed & IEEE80211_CONF_CHANGE_PS) { + if ((changed & IEEE80211_CONF_CHANGE_PS) && + !common->mac_ops_resumed) { struct ieee80211_vif *vif, *sta_vif = NULL; unsigned long flags; int i, set_ps = 1; @@ -748,15 +816,15 @@ static void rsi_mac80211_bss_info_changed(struct ieee80211_hw *hw, adapter->ps_info.dtim_interval_duration = bss->dtim_period; adapter->ps_info.listen_interval = conf->listen_interval; - /* If U-APSD is updated, send ps parameters to firmware */ - if (bss->assoc) { - if (common->uapsd_bitmap) { - rsi_dbg(INFO_ZONE, "Configuring UAPSD\n"); - rsi_conf_uapsd(adapter, vif); + /* If U-APSD is updated, send ps parameters to firmware */ + if (bss->assoc) { + if (common->uapsd_bitmap) { + rsi_dbg(INFO_ZONE, "Configuring UAPSD\n"); + rsi_conf_uapsd(adapter, vif); + } + } else { + common->uapsd_bitmap = 0; } - } else { - common->uapsd_bitmap = 0; - } } if (changed & BSS_CHANGED_CQM) { @@ -1270,7 +1338,7 @@ static void rsi_fill_rx_status(struct ieee80211_hw *hw, } /** - * rsi_indicate_pkt_to_os() - This function sends recieved packet to mac80211. + * rsi_indicate_pkt_to_os() - This function sends received packet to mac80211. * @common: Pointer to the driver private structure. * @skb: Pointer to the socket buffer structure. * @@ -1833,6 +1901,10 @@ int rsi_config_wowlan(struct rsi_hw *adapter, struct cfg80211_wowlan *wowlan) return 0; } rsi_dbg(INFO_ZONE, "TRIGGERS %x\n", triggers); + + if (common->coex_mode > 1) + rsi_disable_ps(adapter, adapter->vifs[0]); + rsi_send_wowlan_request(common, triggers, 1); /** @@ -1876,8 +1948,13 @@ static int rsi_mac80211_resume(struct ieee80211_hw *hw) rsi_dbg(INFO_ZONE, "%s: mac80211 resume\n", __func__); - if (common->hibernate_resume) - return 0; + if (common->hibernate_resume) { + common->mac_ops_resumed = true; + /* Device need a complete restart of all MAC operations. + * returning 1 will serve this purpose. + */ + return 1; + } mutex_lock(&common->mutex); rsi_send_wowlan_request(common, 0, 0); @@ -1917,6 +1994,8 @@ static const struct ieee80211_ops mac80211_ops = { .suspend = rsi_mac80211_suspend, .resume = rsi_mac80211_resume, #endif + .hw_scan = rsi_mac80211_hw_scan_start, + .cancel_hw_scan = rsi_mac80211_cancel_hw_scan, }; /** @@ -1999,6 +2078,9 @@ int rsi_mac80211_attach(struct rsi_common *common) common->max_stations = wiphy->max_ap_assoc_sta; rsi_dbg(ERR_ZONE, "Max Stations Allowed = %d\n", common->max_stations); hw->sta_data_size = sizeof(struct rsi_sta); + + wiphy->max_scan_ssids = RSI_MAX_SCAN_SSIDS; + wiphy->max_scan_ie_len = RSI_MAX_SCAN_IE_LEN; wiphy->flags = WIPHY_FLAG_REPORTS_OBSS; wiphy->flags |= WIPHY_FLAG_AP_UAPSD; wiphy->features |= NL80211_FEATURE_INACTIVITY_TIMER; diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c index 01d99ed985ee..29d83049c5f5 100644 --- a/drivers/net/wireless/rsi/rsi_91x_main.c +++ b/drivers/net/wireless/rsi/rsi_91x_main.c @@ -121,11 +121,8 @@ static struct sk_buff *rsi_prepare_skb(struct rsi_common *common, u32 pkt_len, u8 extended_desc) { - struct ieee80211_tx_info *info; struct sk_buff *skb = NULL; u8 payload_offset; - struct ieee80211_vif *vif; - struct ieee80211_hdr *wh; if (WARN(!pkt_len, "%s: Dummy pkt received", __func__)) return NULL; @@ -144,10 +141,7 @@ static struct sk_buff *rsi_prepare_skb(struct rsi_common *common, payload_offset = (extended_desc + FRAME_DESC_SZ); skb_put(skb, pkt_len); memcpy((skb->data), (buffer + payload_offset), skb->len); - wh = (struct ieee80211_hdr *)skb->data; - vif = rsi_get_vif(common->priv, wh->addr1); - info = IEEE80211_SKB_CB(skb); return skb; } @@ -328,6 +322,7 @@ struct rsi_hw *rsi_91x_init(u16 oper_mode) } rsi_default_ps_params(adapter); + init_bgscan_params(common); spin_lock_init(&adapter->ps_lock); timer_setup(&common->roc_timer, rsi_roc_timeout, 0); init_completion(&common->wlan_init_completion); diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index 1095df7d9573..844f2fac298f 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -15,6 +15,7 @@ */ #include +#include #include "rsi_mgmt.h" #include "rsi_common.h" #include "rsi_ps.h" @@ -236,6 +237,18 @@ static void rsi_set_default_parameters(struct rsi_common *common) common->dtim_cnt = RSI_DTIM_COUNT; } +void init_bgscan_params(struct rsi_common *common) +{ + memset((u8 *)&common->bgscan, 0, sizeof(struct rsi_bgscan_params)); + common->bgscan.bgscan_threshold = RSI_DEF_BGSCAN_THRLD; + common->bgscan.roam_threshold = RSI_DEF_ROAM_THRLD; + common->bgscan.bgscan_periodicity = RSI_BGSCAN_PERIODICITY; + common->bgscan.num_bgscan_channels = 0; + common->bgscan.two_probe = 1; + common->bgscan.active_scan_duration = RSI_ACTIVE_SCAN_TIME; + common->bgscan.passive_scan_duration = RSI_PASSIVE_SCAN_TIME; +} + /** * rsi_set_contention_vals() - This function sets the contention values for the * backoff procedure. @@ -396,8 +409,8 @@ static int rsi_load_radio_caps(struct rsi_common *common) * rsi_mgmt_pkt_to_core() - This function is the entry point for Mgmt module. * @common: Pointer to the driver private structure. * @msg: Pointer to received packet. - * @msg_len: Length of the recieved packet. - * @type: Type of recieved packet. + * @msg_len: Length of the received packet. + * @type: Type of received packet. * * Return: 0 on success, -1 on failure. */ @@ -1534,7 +1547,7 @@ int rsi_send_ps_request(struct rsi_hw *adapter, bool enable, } /** - * rsi_set_antenna() - This fuction send antenna configuration request + * rsi_set_antenna() - This function send antenna configuration request * to device * * @common: Pointer to the driver private structure. @@ -1628,6 +1641,111 @@ int rsi_send_wowlan_request(struct rsi_common *common, u16 flags, } #endif +int rsi_send_bgscan_params(struct rsi_common *common, int enable) +{ + struct rsi_bgscan_params *params = &common->bgscan; + struct cfg80211_scan_request *scan_req = common->hwscan; + struct rsi_bgscan_config *bgscan; + struct sk_buff *skb; + u16 frame_len = sizeof(*bgscan); + u8 i; + + rsi_dbg(MGMT_TX_ZONE, "%s: Sending bgscan params frame\n", __func__); + + skb = dev_alloc_skb(frame_len); + if (!skb) + return -ENOMEM; + memset(skb->data, 0, frame_len); + + bgscan = (struct rsi_bgscan_config *)skb->data; + rsi_set_len_qno(&bgscan->desc_dword0.len_qno, + (frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); + bgscan->desc_dword0.frame_type = BG_SCAN_PARAMS; + bgscan->bgscan_threshold = cpu_to_le16(params->bgscan_threshold); + bgscan->roam_threshold = cpu_to_le16(params->roam_threshold); + if (enable) + bgscan->bgscan_periodicity = + cpu_to_le16(params->bgscan_periodicity); + bgscan->active_scan_duration = + cpu_to_le16(params->active_scan_duration); + bgscan->passive_scan_duration = + cpu_to_le16(params->passive_scan_duration); + bgscan->two_probe = params->two_probe; + + bgscan->num_bgscan_channels = scan_req->n_channels; + for (i = 0; i < bgscan->num_bgscan_channels; i++) + bgscan->channels2scan[i] = + cpu_to_le16(scan_req->channels[i]->hw_value); + + skb_put(skb, frame_len); + + return rsi_send_internal_mgmt_frame(common, skb); +} + +/* This function sends the probe request to be used by firmware in + * background scan + */ +int rsi_send_bgscan_probe_req(struct rsi_common *common, + struct ieee80211_vif *vif) +{ + struct cfg80211_scan_request *scan_req = common->hwscan; + struct rsi_bgscan_probe *bgscan; + struct sk_buff *skb; + struct sk_buff *probereq_skb; + u16 frame_len = sizeof(*bgscan); + size_t ssid_len = 0; + u8 *ssid = NULL; + + rsi_dbg(MGMT_TX_ZONE, + "%s: Sending bgscan probe req frame\n", __func__); + + if (common->priv->sc_nvifs <= 0) + return -ENODEV; + + if (scan_req->n_ssids) { + ssid = scan_req->ssids[0].ssid; + ssid_len = scan_req->ssids[0].ssid_len; + } + + skb = dev_alloc_skb(frame_len + MAX_BGSCAN_PROBE_REQ_LEN); + if (!skb) + return -ENOMEM; + memset(skb->data, 0, frame_len + MAX_BGSCAN_PROBE_REQ_LEN); + + bgscan = (struct rsi_bgscan_probe *)skb->data; + bgscan->desc_dword0.frame_type = BG_SCAN_PROBE_REQ; + bgscan->flags = cpu_to_le16(HOST_BG_SCAN_TRIG); + if (common->band == NL80211_BAND_5GHZ) { + bgscan->mgmt_rate = cpu_to_le16(RSI_RATE_6); + bgscan->def_chan = cpu_to_le16(40); + } else { + bgscan->mgmt_rate = cpu_to_le16(RSI_RATE_1); + bgscan->def_chan = cpu_to_le16(11); + } + bgscan->channel_scan_time = cpu_to_le16(RSI_CHANNEL_SCAN_TIME); + + probereq_skb = ieee80211_probereq_get(common->priv->hw, vif->addr, ssid, + ssid_len, scan_req->ie_len); + if (!probereq_skb) { + dev_kfree_skb(skb); + return -ENOMEM; + } + + memcpy(&skb->data[frame_len], probereq_skb->data, probereq_skb->len); + + bgscan->probe_req_length = cpu_to_le16(probereq_skb->len); + + rsi_set_len_qno(&bgscan->desc_dword0.len_qno, + (frame_len - FRAME_DESC_SZ + probereq_skb->len), + RSI_WIFI_MGMT_Q); + + skb_put(skb, frame_len + probereq_skb->len); + + dev_kfree_skb(probereq_skb); + + return rsi_send_internal_mgmt_frame(common, skb); +} + /** * rsi_handle_ta_confirm_type() - This function handles the confirm frames. * @common: Pointer to the driver private structure. @@ -1771,9 +1889,28 @@ static int rsi_handle_ta_confirm_type(struct rsi_common *common, return 0; } break; + + case SCAN_REQUEST: + rsi_dbg(INFO_ZONE, "Set channel confirm\n"); + break; + case WAKEUP_SLEEP_REQUEST: rsi_dbg(INFO_ZONE, "Wakeup/Sleep confirmation.\n"); return rsi_handle_ps_confirm(adapter, msg); + + case BG_SCAN_PROBE_REQ: + rsi_dbg(INFO_ZONE, "BG scan complete event\n"); + if (common->bgscan_en) { + struct cfg80211_scan_info info; + + if (!rsi_send_bgscan_params(common, RSI_STOP_BGSCAN)) + common->bgscan_en = 0; + info.aborted = false; + ieee80211_scan_completed(adapter->hw, &info); + } + rsi_dbg(INFO_ZONE, "Background scan completed\n"); + break; + default: rsi_dbg(INFO_ZONE, "%s: Invalid TA confirm pkt received\n", __func__); @@ -1822,7 +1959,7 @@ int rsi_handle_card_ready(struct rsi_common *common, u8 *msg) /** * rsi_mgmt_pkt_recv() - This function processes the management packets - * recieved from the hardware. + * received from the hardware. * @common: Pointer to the driver private structure. * @msg: Pointer to the received packet. * @@ -1870,6 +2007,35 @@ int rsi_mgmt_pkt_recv(struct rsi_common *common, u8 *msg) return -1; rsi_send_beacon(common); break; + case WOWLAN_WAKEUP_REASON: + rsi_dbg(ERR_ZONE, "\n\nWakeup Type: %x\n", msg[15]); + switch (msg[15]) { + case RSI_UNICAST_MAGIC_PKT: + rsi_dbg(ERR_ZONE, + "*** Wakeup for Unicast magic packet ***\n"); + break; + case RSI_BROADCAST_MAGICPKT: + rsi_dbg(ERR_ZONE, + "*** Wakeup for Broadcast magic packet ***\n"); + break; + case RSI_EAPOL_PKT: + rsi_dbg(ERR_ZONE, + "*** Wakeup for GTK renewal ***\n"); + break; + case RSI_DISCONNECT_PKT: + rsi_dbg(ERR_ZONE, + "*** Wakeup for Disconnect ***\n"); + break; + case RSI_HW_BMISS_PKT: + rsi_dbg(ERR_ZONE, + "*** Wakeup for HW Beacon miss ***\n"); + break; + default: + rsi_dbg(ERR_ZONE, + "##### Un-intentional Wakeup #####\n"); + break; + } + break; case RX_DOT11_MGMT: return rsi_mgmt_pkt_to_core(common, msg, msg_len); default: diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c index 5733e440ecaf..3430d7a0899e 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c @@ -138,7 +138,7 @@ static int rsi_issue_sdiocommand(struct sdio_func *func, } /** - * rsi_handle_interrupt() - This function is called upon the occurence + * rsi_handle_interrupt() - This function is called upon the occurrence * of an interrupt. * @function: Pointer to the sdio_func structure. * @@ -230,16 +230,19 @@ static void rsi_reset_card(struct sdio_func *pfunction) rsi_dbg(ERR_ZONE, "%s: CMD0 failed : %d\n", __func__, err); /* Issue CMD5, arg = 0 */ - err = rsi_issue_sdiocommand(pfunction, SD_IO_SEND_OP_COND, 0, - (MMC_RSP_R4 | MMC_CMD_BCR), &resp); - if (err) - rsi_dbg(ERR_ZONE, "%s: CMD5 failed : %d\n", __func__, err); - card->ocr = resp; + if (!host->ocr_avail) { + err = rsi_issue_sdiocommand(pfunction, SD_IO_SEND_OP_COND, 0, + (MMC_RSP_R4 | MMC_CMD_BCR), &resp); + if (err) + rsi_dbg(ERR_ZONE, "%s: CMD5 failed : %d\n", + __func__, err); + host->ocr_avail = resp; + } /* Issue CMD5, arg = ocr. Wait till card is ready */ for (i = 0; i < 100; i++) { err = rsi_issue_sdiocommand(pfunction, SD_IO_SEND_OP_COND, - card->ocr, + host->ocr_avail, (MMC_RSP_R4 | MMC_CMD_BCR), &resp); if (err) { rsi_dbg(ERR_ZONE, "%s: CMD5 failed : %d\n", @@ -872,7 +875,7 @@ static int rsi_init_sdio_interface(struct rsi_hw *adapter, goto fail; } - rsi_dbg(INIT_ZONE, "%s: Setup card succesfully\n", __func__); + rsi_dbg(INIT_ZONE, "%s: Setup card successfully\n", __func__); status = rsi_init_sdio_slave_regs(adapter); if (status) { @@ -1129,6 +1132,12 @@ static void rsi_disconnect(struct sdio_func *pfunction) rsi_mac80211_detach(adapter); mdelay(10); + if (IS_ENABLED(CONFIG_RSI_COEX) && adapter->priv->coex_mode > 1 && + adapter->priv->bt_adapter) { + rsi_bt_ops.detach(adapter->priv->bt_adapter); + adapter->priv->bt_adapter = NULL; + } + /* Reset Chip */ rsi_reset_chip(adapter); @@ -1305,6 +1314,12 @@ static int rsi_freeze(struct device *dev) rsi_dbg(ERR_ZONE, "##### Device can not wake up through WLAN\n"); + if (IS_ENABLED(CONFIG_RSI_COEX) && common->coex_mode > 1 && + common->bt_adapter) { + rsi_bt_ops.detach(common->bt_adapter); + common->bt_adapter = NULL; + } + ret = rsi_sdio_disable_interrupts(pfunction); if (sdev->write_fail) @@ -1352,6 +1367,12 @@ static void rsi_shutdown(struct device *dev) if (rsi_config_wowlan(adapter, wowlan)) rsi_dbg(ERR_ZONE, "Failed to configure WoWLAN\n"); + if (IS_ENABLED(CONFIG_RSI_COEX) && adapter->priv->coex_mode > 1 && + adapter->priv->bt_adapter) { + rsi_bt_ops.detach(adapter->priv->bt_adapter); + adapter->priv->bt_adapter = NULL; + } + rsi_sdio_disable_interrupts(sdev->pfunction); if (sdev->write_fail) @@ -1375,7 +1396,7 @@ static int rsi_restore(struct device *dev) common->iface_down = true; adapter->sc_nvifs = 0; - ieee80211_restart_hw(adapter->hw); + adapter->ps_state = PS_NONE; common->wow_flags = 0; common->iface_down = false; diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index f360690396dd..ac0ef5ea6ffb 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -252,7 +252,7 @@ static int rsi_usb_reg_write(struct usb_device *usbdev, /** * rsi_rx_done_handler() - This function is called when a packet is received - * from USB stack. This is callback to recieve done. + * from USB stack. This is callback to receive done. * @urb: Received URB. * * Return: None. @@ -816,6 +816,13 @@ static void rsi_disconnect(struct usb_interface *pfunction) return; rsi_mac80211_detach(adapter); + + if (IS_ENABLED(CONFIG_RSI_COEX) && adapter->priv->coex_mode > 1 && + adapter->priv->bt_adapter) { + rsi_bt_ops.detach(adapter->priv->bt_adapter); + adapter->priv->bt_adapter = NULL; + } + rsi_reset_card(adapter); rsi_deinit_usb_interface(adapter); rsi_91x_deinit(adapter); diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h index a084f224bb03..35d13f35e9b0 100644 --- a/drivers/net/wireless/rsi/rsi_main.h +++ b/drivers/net/wireless/rsi/rsi_main.h @@ -164,6 +164,24 @@ struct transmit_q_stats { u32 total_tx_pkt_freed[NUM_EDCA_QUEUES + 2]; }; +#define MAX_BGSCAN_CHANNELS_DUAL_BAND 38 +#define MAX_BGSCAN_PROBE_REQ_LEN 0x64 +#define RSI_DEF_BGSCAN_THRLD 0x0 +#define RSI_DEF_ROAM_THRLD 0xa +#define RSI_BGSCAN_PERIODICITY 0x1e +#define RSI_ACTIVE_SCAN_TIME 0x14 +#define RSI_PASSIVE_SCAN_TIME 0x46 +#define RSI_CHANNEL_SCAN_TIME 20 +struct rsi_bgscan_params { + u16 bgscan_threshold; + u16 roam_threshold; + u16 bgscan_periodicity; + u8 num_bgscan_channels; + u8 two_probe; + u16 active_scan_duration; + u16 passive_scan_duration; +}; + struct vif_priv { bool is_ht; bool sgi; @@ -289,6 +307,11 @@ struct rsi_common { bool eapol4_confirm; void *bt_adapter; + + struct cfg80211_scan_request *hwscan; + struct rsi_bgscan_params bgscan; + u8 bgscan_en; + u8 mac_ops_resumed; }; struct eepromrw_info { diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h index 359fbdf85739..ea83faa15c7e 100644 --- a/drivers/net/wireless/rsi/rsi_mgmt.h +++ b/drivers/net/wireless/rsi/rsi_mgmt.h @@ -228,6 +228,9 @@ #define RSI_MAX_TX_AGGR_FRMS 8 #define RSI_MAX_RX_AGGR_FRMS 8 +#define RSI_MAX_SCAN_SSIDS 16 +#define RSI_MAX_SCAN_IE_LEN 256 + enum opmode { RSI_OPMODE_UNSUPPORTED = -1, RSI_OPMODE_AP = 0, @@ -623,6 +626,34 @@ struct rsi_wowlan_req { u16 host_sleep_status; } __packed; +#define RSI_START_BGSCAN 1 +#define RSI_STOP_BGSCAN 0 +#define HOST_BG_SCAN_TRIG BIT(4) +struct rsi_bgscan_config { + struct rsi_cmd_desc_dword0 desc_dword0; + __le64 reserved; + __le32 reserved1; + __le16 bgscan_threshold; + __le16 roam_threshold; + __le16 bgscan_periodicity; + u8 num_bgscan_channels; + u8 two_probe; + __le16 active_scan_duration; + __le16 passive_scan_duration; + __le16 channels2scan[MAX_BGSCAN_CHANNELS_DUAL_BAND]; +} __packed; + +struct rsi_bgscan_probe { + struct rsi_cmd_desc_dword0 desc_dword0; + __le64 reserved; + __le32 reserved1; + __le16 mgmt_rate; + __le16 flags; + __le16 def_chan; + __le16 channel_scan_time; + __le16 probe_req_length; +} __packed; + static inline u32 rsi_get_queueno(u8 *addr, u16 offset) { return (le16_to_cpu(*(__le16 *)&addr[offset]) & 0x7000) >> 12; @@ -694,4 +725,8 @@ int rsi_send_wowlan_request(struct rsi_common *common, u16 flags, #endif int rsi_send_ps_request(struct rsi_hw *adapter, bool enable, struct ieee80211_vif *vif); +void init_bgscan_params(struct rsi_common *common); +int rsi_send_bgscan_params(struct rsi_common *common, int enable); +int rsi_send_bgscan_probe_req(struct rsi_common *common, + struct ieee80211_vif *vif); #endif diff --git a/drivers/net/wireless/st/cw1200/debug.c b/drivers/net/wireless/st/cw1200/debug.c index 2231ba08bc1f..d94266d9d0b8 100644 --- a/drivers/net/wireless/st/cw1200/debug.c +++ b/drivers/net/wireless/st/cw1200/debug.c @@ -371,28 +371,14 @@ int cw1200_debug_init(struct cw1200_common *priv) d->debugfs_phy = debugfs_create_dir("cw1200", priv->hw->wiphy->debugfsdir); - if (!d->debugfs_phy) - goto err; - - if (!debugfs_create_file("status", 0400, d->debugfs_phy, - priv, &cw1200_status_fops)) - goto err; - - if (!debugfs_create_file("counters", 0400, d->debugfs_phy, - priv, &cw1200_counters_fops)) - goto err; - - if (!debugfs_create_file("wsm_dumps", 0200, d->debugfs_phy, - priv, &fops_wsm_dumps)) - goto err; + debugfs_create_file("status", 0400, d->debugfs_phy, priv, + &cw1200_status_fops); + debugfs_create_file("counters", 0400, d->debugfs_phy, priv, + &cw1200_counters_fops); + debugfs_create_file("wsm_dumps", 0200, d->debugfs_phy, priv, + &fops_wsm_dumps); return 0; - -err: - priv->debug = NULL; - debugfs_remove_recursive(d->debugfs_phy); - kfree(d); - return ret; } void cw1200_debug_release(struct cw1200_common *priv) diff --git a/drivers/net/wireless/st/cw1200/fwio.c b/drivers/net/wireless/st/cw1200/fwio.c index 30e7646d04af..b7881232499c 100644 --- a/drivers/net/wireless/st/cw1200/fwio.c +++ b/drivers/net/wireless/st/cw1200/fwio.c @@ -465,8 +465,8 @@ int cw1200_load_firmware(struct cw1200_common *priv) if (!(val32 & ST90TDS_CONFIG_ACCESS_MODE_BIT)) { pr_err("Device is already in QUEUE mode!\n"); - ret = -EINVAL; - goto out; + ret = -EINVAL; + goto out; } switch (priv->hw_type) { diff --git a/drivers/net/wireless/st/cw1200/queue.c b/drivers/net/wireless/st/cw1200/queue.c index 7c31b63b8258..7895efefa95d 100644 --- a/drivers/net/wireless/st/cw1200/queue.c +++ b/drivers/net/wireless/st/cw1200/queue.c @@ -283,7 +283,6 @@ int cw1200_queue_put(struct cw1200_queue *queue, struct cw1200_txpriv *txpriv) { int ret = 0; - LIST_HEAD(gc_list); struct cw1200_queue_stats *stats = queue->stats; if (txpriv->link_id >= queue->stats->map_capacity) diff --git a/drivers/net/wireless/st/cw1200/scan.c b/drivers/net/wireless/st/cw1200/scan.c index 0a9eac93dd01..71e9b91cf15b 100644 --- a/drivers/net/wireless/st/cw1200/scan.c +++ b/drivers/net/wireless/st/cw1200/scan.c @@ -84,8 +84,11 @@ int cw1200_hw_scan(struct ieee80211_hw *hw, frame.skb = ieee80211_probereq_get(hw, priv->vif->addr, NULL, 0, req->ie_len); - if (!frame.skb) + if (!frame.skb) { + mutex_unlock(&priv->conf_mutex); + up(&priv->scan.lock); return -ENOMEM; + } if (req->ie_len) skb_put_data(frame.skb, req->ie, req->ie_len); diff --git a/drivers/net/wireless/ti/wl1251/debugfs.c b/drivers/net/wireless/ti/wl1251/debugfs.c index 448da1f8c22f..c99b23aaa70e 100644 --- a/drivers/net/wireless/ti/wl1251/debugfs.c +++ b/drivers/net/wireless/ti/wl1251/debugfs.c @@ -54,11 +54,6 @@ static const struct file_operations name## _ops = { \ #define DEBUGFS_ADD(name, parent) \ wl->debugfs.name = debugfs_create_file(#name, 0400, parent, \ wl, &name## _ops); \ - if (IS_ERR(wl->debugfs.name)) { \ - ret = PTR_ERR(wl->debugfs.name); \ - wl->debugfs.name = NULL; \ - goto out; \ - } #define DEBUGFS_DEL(name) \ do { \ @@ -354,10 +349,8 @@ static void wl1251_debugfs_delete_files(struct wl1251 *wl) DEBUGFS_DEL(excessive_retries); } -static int wl1251_debugfs_add_files(struct wl1251 *wl) +static void wl1251_debugfs_add_files(struct wl1251 *wl) { - int ret = 0; - DEBUGFS_FWSTATS_ADD(tx, internal_desc_overflow); DEBUGFS_FWSTATS_ADD(rx, out_of_mem); @@ -453,12 +446,6 @@ static int wl1251_debugfs_add_files(struct wl1251 *wl) DEBUGFS_ADD(tx_queue_status, wl->debugfs.rootdir); DEBUGFS_ADD(retry_count, wl->debugfs.rootdir); DEBUGFS_ADD(excessive_retries, wl->debugfs.rootdir); - -out: - if (ret < 0) - wl1251_debugfs_delete_files(wl); - - return ret; } void wl1251_debugfs_reset(struct wl1251 *wl) @@ -471,56 +458,20 @@ void wl1251_debugfs_reset(struct wl1251 *wl) int wl1251_debugfs_init(struct wl1251 *wl) { - int ret; + wl->stats.fw_stats = kzalloc(sizeof(*wl->stats.fw_stats), GFP_KERNEL); + if (!wl->stats.fw_stats) + return -ENOMEM; wl->debugfs.rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL); - if (IS_ERR(wl->debugfs.rootdir)) { - ret = PTR_ERR(wl->debugfs.rootdir); - wl->debugfs.rootdir = NULL; - goto err; - } - wl->debugfs.fw_statistics = debugfs_create_dir("fw-statistics", wl->debugfs.rootdir); - if (IS_ERR(wl->debugfs.fw_statistics)) { - ret = PTR_ERR(wl->debugfs.fw_statistics); - wl->debugfs.fw_statistics = NULL; - goto err_root; - } - - wl->stats.fw_stats = kzalloc(sizeof(*wl->stats.fw_stats), - GFP_KERNEL); - - if (!wl->stats.fw_stats) { - ret = -ENOMEM; - goto err_fw; - } - wl->stats.fw_stats_update = jiffies; - ret = wl1251_debugfs_add_files(wl); - - if (ret < 0) - goto err_file; + wl1251_debugfs_add_files(wl); return 0; - -err_file: - kfree(wl->stats.fw_stats); - wl->stats.fw_stats = NULL; - -err_fw: - debugfs_remove(wl->debugfs.fw_statistics); - wl->debugfs.fw_statistics = NULL; - -err_root: - debugfs_remove(wl->debugfs.rootdir); - wl->debugfs.rootdir = NULL; - -err: - return ret; } void wl1251_debugfs_exit(struct wl1251 *wl) diff --git a/drivers/net/wireless/ti/wl12xx/debugfs.c b/drivers/net/wireless/ti/wl12xx/debugfs.c index 0521cbf858cf..6c3c04eca8fd 100644 --- a/drivers/net/wireless/ti/wl12xx/debugfs.c +++ b/drivers/net/wireless/ti/wl12xx/debugfs.c @@ -125,20 +125,10 @@ WL12XX_DEBUGFS_FWSTATS_FILE(rxpipe, tx_xfr_host_int_trig_rx_data, "%u"); int wl12xx_debugfs_add_files(struct wl1271 *wl, struct dentry *rootdir) { - int ret = 0; - struct dentry *entry, *stats, *moddir; + struct dentry *stats, *moddir; moddir = debugfs_create_dir(KBUILD_MODNAME, rootdir); - if (!moddir || IS_ERR(moddir)) { - entry = moddir; - goto err; - } - stats = debugfs_create_dir("fw_stats", moddir); - if (!stats || IS_ERR(stats)) { - entry = stats; - goto err; - } DEBUGFS_FWSTATS_ADD(tx, internal_desc_overflow); @@ -232,12 +222,4 @@ int wl12xx_debugfs_add_files(struct wl1271 *wl, DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data); return 0; - -err: - if (IS_ERR(entry)) - ret = PTR_ERR(entry); - else - ret = -ENOMEM; - - return ret; } diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c index 597e934c4630..5f4ec997ca59 100644 --- a/drivers/net/wireless/ti/wl18xx/debugfs.c +++ b/drivers/net/wireless/ti/wl18xx/debugfs.c @@ -422,20 +422,10 @@ static const struct file_operations radar_debug_mode_ops = { int wl18xx_debugfs_add_files(struct wl1271 *wl, struct dentry *rootdir) { - int ret = 0; - struct dentry *entry, *stats, *moddir; + struct dentry *stats, *moddir; moddir = debugfs_create_dir(KBUILD_MODNAME, rootdir); - if (!moddir || IS_ERR(moddir)) { - entry = moddir; - goto err; - } - stats = debugfs_create_dir("fw_stats", moddir); - if (!stats || IS_ERR(stats)) { - entry = stats; - goto err; - } DEBUGFS_ADD(clear_fw_stats, stats); @@ -590,12 +580,4 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl, DEBUGFS_ADD(dynamic_fw_traces, moddir); return 0; - -err: - if (IS_ERR(entry)) - ret = PTR_ERR(entry); - else - ret = -ENOMEM; - - return ret; } diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c index 903968735a74..348be0aed97e 100644 --- a/drivers/net/wireless/ti/wlcore/cmd.c +++ b/drivers/net/wireless/ti/wlcore/cmd.c @@ -1427,7 +1427,7 @@ int wl1271_cmd_set_sta_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0); if (ret < 0) { wl1271_warning("could not set keys"); - goto out; + goto out; } out: diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c index aeb74e74698e..68acd901d384 100644 --- a/drivers/net/wireless/ti/wlcore/debugfs.c +++ b/drivers/net/wireless/ti/wlcore/debugfs.c @@ -1301,11 +1301,10 @@ static const struct file_operations fw_logger_ops = { .llseek = default_llseek, }; -static int wl1271_debugfs_add_files(struct wl1271 *wl, - struct dentry *rootdir) +static void wl1271_debugfs_add_files(struct wl1271 *wl, + struct dentry *rootdir) { - int ret = 0; - struct dentry *entry, *streaming; + struct dentry *streaming; DEBUGFS_ADD(tx_queue_len, rootdir); DEBUGFS_ADD(retry_count, rootdir); @@ -1330,23 +1329,11 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl, DEBUGFS_ADD(fw_logger, rootdir); streaming = debugfs_create_dir("rx_streaming", rootdir); - if (!streaming || IS_ERR(streaming)) - goto err; DEBUGFS_ADD_PREFIX(rx_streaming, interval, streaming); DEBUGFS_ADD_PREFIX(rx_streaming, always, streaming); DEBUGFS_ADD_PREFIX(dev, mem, rootdir); - - return 0; - -err: - if (IS_ERR(entry)) - ret = PTR_ERR(entry); - else - ret = -ENOMEM; - - return ret; } void wl1271_debugfs_reset(struct wl1271 *wl) @@ -1367,11 +1354,6 @@ int wl1271_debugfs_init(struct wl1271 *wl) rootdir = debugfs_create_dir(KBUILD_MODNAME, wl->hw->wiphy->debugfsdir); - if (IS_ERR(rootdir)) { - ret = PTR_ERR(rootdir); - goto out; - } - wl->stats.fw_stats = kzalloc(wl->stats.fw_stats_len, GFP_KERNEL); if (!wl->stats.fw_stats) { ret = -ENOMEM; @@ -1380,9 +1362,7 @@ int wl1271_debugfs_init(struct wl1271 *wl) wl->stats.fw_stats_update = jiffies; - ret = wl1271_debugfs_add_files(wl, rootdir); - if (ret < 0) - goto out_exit; + wl1271_debugfs_add_files(wl, rootdir); ret = wlcore_debugfs_init(wl, rootdir); if (ret < 0) diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h index bf14676e6515..a4952c4f587e 100644 --- a/drivers/net/wireless/ti/wlcore/debugfs.h +++ b/drivers/net/wireless/ti/wlcore/debugfs.h @@ -53,19 +53,15 @@ static const struct file_operations name## _ops = { \ #define DEBUGFS_ADD(name, parent) \ do { \ - entry = debugfs_create_file(#name, 0400, parent, \ - wl, &name## _ops); \ - if (!entry || IS_ERR(entry)) \ - goto err; \ + debugfs_create_file(#name, 0400, parent, \ + wl, &name## _ops); \ } while (0) #define DEBUGFS_ADD_PREFIX(prefix, name, parent) \ do { \ - entry = debugfs_create_file(#name, 0400, parent, \ + debugfs_create_file(#name, 0400, parent, \ wl, &prefix## _## name## _ops); \ - if (!entry || IS_ERR(entry)) \ - goto err; \ } while (0) #define DEBUGFS_FWSTATS_FILE(sub, name, fmt, struct_type) \ diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 26b187336875..2e12de813a5b 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -1085,8 +1085,11 @@ static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt) goto out; ret = wl12xx_fetch_firmware(wl, plt); - if (ret < 0) - goto out; + if (ret < 0) { + kfree(wl->fw_status); + kfree(wl->raw_fw_status); + kfree(wl->tx_res_if); + } out: return ret; diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index bd10165d7eec..4d4b07701149 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c @@ -164,6 +164,12 @@ static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue) } sdio_claim_host(func); + /* + * To guarantee that the SDIO card is power cycled, as required to make + * the FW programming to succeed, let's do a brute force HW reset. + */ + mmc_hw_reset(card->host); + sdio_enable_func(func); sdio_release_host(func); @@ -174,20 +180,13 @@ static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue) { struct sdio_func *func = dev_to_sdio_func(glue->dev); struct mmc_card *card = func->card; - int error; sdio_claim_host(func); sdio_disable_func(func); sdio_release_host(func); /* Let runtime PM know the card is powered off */ - error = pm_runtime_put(&card->dev); - if (error < 0 && error != -EBUSY) { - dev_err(&card->dev, "%s failed: %i\n", __func__, error); - - return error; - } - + pm_runtime_put(&card->dev); return 0; } diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c index 3a93e4d9828b..606999f102eb 100644 --- a/drivers/net/wireless/virt_wifi.c +++ b/drivers/net/wireless/virt_wifi.c @@ -14,11 +14,6 @@ #include #include -#include -#include -#include -#include - static struct wiphy *common_wiphy; struct virt_wifi_wiphy_priv { @@ -365,7 +360,6 @@ static struct wiphy *virt_wifi_make_wiphy(void) wiphy->bands[NL80211_BAND_5GHZ] = &band_5ghz; wiphy->bands[NL80211_BAND_60GHZ] = NULL; - wiphy->regulatory_flags = REGULATORY_WIPHY_SELF_MANAGED; wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); priv = wiphy_priv(wiphy); @@ -429,13 +423,11 @@ static int virt_wifi_net_device_open(struct net_device *dev) static int virt_wifi_net_device_stop(struct net_device *dev) { struct virt_wifi_netdev_priv *n_priv = netdev_priv(dev); - struct virt_wifi_wiphy_priv *w_priv; n_priv->is_up = false; if (!dev->ieee80211_ptr) return 0; - w_priv = wiphy_priv(dev->ieee80211_ptr->wiphy); virt_wifi_cancel_scan(dev->ieee80211_ptr->wiphy); virt_wifi_cancel_connect(dev); diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c index 0ccb021f1e78..10d580c3dea3 100644 --- a/drivers/net/xen-netback/hash.c +++ b/drivers/net/xen-netback/hash.c @@ -454,6 +454,8 @@ void xenvif_init_hash(struct xenvif *vif) if (xenvif_hash_cache_size == 0) return; + BUG_ON(vif->hash.cache.count); + spin_lock_init(&vif->hash.cache.lock); INIT_LIST_HEAD(&vif->hash.cache.list); } diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 182d6770f102..6da12518e693 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -153,6 +153,13 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb, { struct xenvif *vif = netdev_priv(dev); unsigned int size = vif->hash.size; + unsigned int num_queues; + + /* If queues are not set up internally - always return 0 + * as the packet going to be dropped anyway */ + num_queues = READ_ONCE(vif->num_queues); + if (num_queues < 1) + return 0; if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) return fallback(dev, skb, NULL) % dev->real_num_tx_queues; diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 80aae3a32c2a..1d9940d4e8c7 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -1072,11 +1072,6 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s skb_frag_size_set(&frags[i], len); } - /* Copied all the bits from the frag list -- free it. */ - skb_frag_list_init(skb); - xenvif_skb_zerocopy_prepare(queue, nskb); - kfree_skb(nskb); - /* Release all the original (foreign) frags. */ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) skb_frag_unref(skb, f); @@ -1145,6 +1140,8 @@ static int xenvif_tx_submit(struct xenvif_queue *queue) xenvif_fill_frags(queue, skb); if (unlikely(skb_has_frag_list(skb))) { + struct sk_buff *nskb = skb_shinfo(skb)->frag_list; + xenvif_skb_zerocopy_prepare(queue, nskb); if (xenvif_handle_frag_list(queue, skb)) { if (net_ratelimit()) netdev_err(queue->vif->dev, @@ -1153,6 +1150,9 @@ static int xenvif_tx_submit(struct xenvif_queue *queue) kfree_skb(skb); continue; } + /* Copied all the bits from the frag list -- free it. */ + skb_frag_list_init(skb); + kfree_skb(nskb); } skb->dev = queue->vif->dev; @@ -1169,15 +1169,24 @@ static int xenvif_tx_submit(struct xenvif_queue *queue) continue; } - skb_probe_transport_header(skb, 0); + skb_probe_transport_header(skb); /* If the packet is GSO then we will have just set up the * transport header offset in checksum_setup so it's now * straightforward to calculate gso_segs. */ if (skb_is_gso(skb)) { - int mss = skb_shinfo(skb)->gso_size; - int hdrlen = skb_transport_header(skb) - + int mss, hdrlen; + + /* GSO implies having the L4 header. */ + WARN_ON_ONCE(!skb_transport_header_was_set(skb)); + if (unlikely(!skb_transport_header_was_set(skb))) { + kfree_skb(skb); + continue; + } + + mss = skb_shinfo(skb)->gso_size; + hdrlen = skb_transport_header(skb) - skb_mac_header(skb) + tcp_hdrlen(skb); diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 2625740bdc4a..330ddb64930f 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -655,7 +655,7 @@ static void frontend_changed(struct xenbus_device *dev, set_backend_state(be, XenbusStateClosed); if (xenbus_dev_is_online(dev)) break; - /* fall through if not online */ + /* fall through - if not online */ case XenbusStateUnknown: set_backend_state(be, XenbusStateClosed); device_unregister(&dev->dev); diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.h b/drivers/ntb/hw/intel/ntb_hw_intel.h index c49ff8970ce3..e071e28bca3f 100644 --- a/drivers/ntb/hw/intel/ntb_hw_intel.h +++ b/drivers/ntb/hw/intel/ntb_hw_intel.h @@ -53,6 +53,7 @@ #include #include +#include /* PCI device IDs */ #define PCI_DEVICE_ID_INTEL_NTB_B2B_JSF 0x3725 @@ -218,33 +219,4 @@ static inline int pdev_is_gen3(struct pci_dev *pdev) return 0; } -#ifndef ioread64 -#ifdef readq -#define ioread64 readq -#else -#define ioread64 _ioread64 -static inline u64 _ioread64(void __iomem *mmio) -{ - u64 low, high; - - low = ioread32(mmio); - high = ioread32(mmio + sizeof(u32)); - return low | (high << 32); -} -#endif -#endif - -#ifndef iowrite64 -#ifdef writeq -#define iowrite64 writeq -#else -#define iowrite64 _iowrite64 -static inline void _iowrite64(u64 val, void __iomem *mmio) -{ - iowrite32(val, mmio); - iowrite32(val >> 32, mmio + sizeof(u32)); -} -#endif -#endif - #endif diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c index f1eaa3c4d46a..f2df2d39c65b 100644 --- a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c +++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c @@ -13,13 +13,14 @@ * */ -#include -#include +#include +#include #include #include -#include +#include #include #include +#include MODULE_DESCRIPTION("Microsemi Switchtec(tm) NTB Driver"); MODULE_VERSION("0.1"); @@ -36,35 +37,6 @@ module_param(use_lut_mws, bool, 0644); MODULE_PARM_DESC(use_lut_mws, "Enable the use of the LUT based memory windows"); -#ifndef ioread64 -#ifdef readq -#define ioread64 readq -#else -#define ioread64 _ioread64 -static inline u64 _ioread64(void __iomem *mmio) -{ - u64 low, high; - - low = ioread32(mmio); - high = ioread32(mmio + sizeof(u32)); - return low | (high << 32); -} -#endif -#endif - -#ifndef iowrite64 -#ifdef writeq -#define iowrite64 writeq -#else -#define iowrite64 _iowrite64 -static inline void _iowrite64(u64 val, void __iomem *mmio) -{ - iowrite32(val, mmio); - iowrite32(val >> 32, mmio + sizeof(u32)); -} -#endif -#endif - #define SWITCHTEC_NTB_MAGIC 0x45CC0001 #define MAX_MWS 128 diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index b123b0dcf274..4671776f5623 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -541,9 +541,9 @@ static int arena_clear_freelist_error(struct arena_info *arena, u32 lane) static int btt_freelist_init(struct arena_info *arena) { - int old, new, ret; - u32 i, map_entry; - struct log_entry log_new, log_old; + int new, ret; + struct log_entry log_new; + u32 i, map_entry, log_oldmap, log_newmap; arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry), GFP_KERNEL); @@ -551,24 +551,26 @@ static int btt_freelist_init(struct arena_info *arena) return -ENOMEM; for (i = 0; i < arena->nfree; i++) { - old = btt_log_read(arena, i, &log_old, LOG_OLD_ENT); - if (old < 0) - return old; - new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT); if (new < 0) return new; + /* old and new map entries with any flags stripped out */ + log_oldmap = ent_lba(le32_to_cpu(log_new.old_map)); + log_newmap = ent_lba(le32_to_cpu(log_new.new_map)); + /* sub points to the next one to be overwritten */ arena->freelist[i].sub = 1 - new; arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq)); - arena->freelist[i].block = le32_to_cpu(log_new.old_map); + arena->freelist[i].block = log_oldmap; /* * FIXME: if error clearing fails during init, we want to make * the BTT read-only */ - if (ent_e_flag(log_new.old_map)) { + if (ent_e_flag(log_new.old_map) && + !ent_normal(log_new.old_map)) { + arena->freelist[i].has_err = 1; ret = arena_clear_freelist_error(arena, i); if (ret) dev_err_ratelimited(to_dev(arena), @@ -576,7 +578,7 @@ static int btt_freelist_init(struct arena_info *arena) } /* This implies a newly created or untouched flog entry */ - if (log_new.old_map == log_new.new_map) + if (log_oldmap == log_newmap) continue; /* Check if map recovery is needed */ @@ -584,8 +586,15 @@ static int btt_freelist_init(struct arena_info *arena) NULL, NULL, 0); if (ret) return ret; - if ((le32_to_cpu(log_new.new_map) != map_entry) && - (le32_to_cpu(log_new.old_map) == map_entry)) { + + /* + * The map_entry from btt_read_map is stripped of any flag bits, + * so use the stripped out versions from the log as well for + * testing whether recovery is needed. For restoration, use the + * 'raw' version of the log entries as that captured what we + * were going to write originally. + */ + if ((log_newmap != map_entry) && (log_oldmap == map_entry)) { /* * Last transaction wrote the flog, but wasn't able * to complete the map write. So fix up the map. diff --git a/drivers/nvdimm/btt.h b/drivers/nvdimm/btt.h index db3cb6d4d0d4..ddff49c707b0 100644 --- a/drivers/nvdimm/btt.h +++ b/drivers/nvdimm/btt.h @@ -44,6 +44,8 @@ #define ent_e_flag(ent) (!!(ent & MAP_ERR_MASK)) #define ent_z_flag(ent) (!!(ent & MAP_TRIM_MASK)) #define set_e_flag(ent) (ent |= MAP_ERR_MASK) +/* 'normal' is both e and z flags set */ +#define ent_normal(ent) (ent_e_flag(ent) && ent_z_flag(ent)) enum btt_init_state { INIT_UNCHECKED = 0, diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c index 795ad4ff35ca..b72a303176c7 100644 --- a/drivers/nvdimm/btt_devs.c +++ b/drivers/nvdimm/btt_devs.c @@ -159,11 +159,19 @@ static ssize_t size_show(struct device *dev, } static DEVICE_ATTR_RO(size); +static ssize_t log_zero_flags_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "Y\n"); +} +static DEVICE_ATTR_RO(log_zero_flags); + static struct attribute *nd_btt_attributes[] = { &dev_attr_sector_size.attr, &dev_attr_namespace.attr, &dev_attr_uuid.attr, &dev_attr_size.attr, + &dev_attr_log_zero_flags.attr, NULL, }; diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index dca5f7a805cb..7bbff0af29b2 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -534,11 +535,15 @@ void __nd_device_register(struct device *dev) set_dev_node(dev, to_nd_region(dev)->numa_node); dev->bus = &nvdimm_bus_type; - if (dev->parent) + if (dev->parent) { get_device(dev->parent); + if (dev_to_node(dev) == NUMA_NO_NODE) + set_dev_node(dev, dev_to_node(dev->parent)); + } get_device(dev); - async_schedule_domain(nd_async_device_register, dev, - &nd_async_domain); + + async_schedule_dev_domain(nd_async_device_register, dev, + &nd_async_domain); } void nd_device_register(struct device *dev) diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index efe412a6b5b9..91b9abbf689c 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c @@ -11,6 +11,7 @@ * General Public License for more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include #include #include #include @@ -25,6 +26,10 @@ static DEFINE_IDA(dimm_ida); +static bool noblk; +module_param(noblk, bool, 0444); +MODULE_PARM_DESC(noblk, "force disable BLK / local alias support"); + /* * Retrieve bus and dimm handle and return if this bus supports * get_config_data commands @@ -551,6 +556,8 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus, nvdimm->dimm_id = dimm_id; nvdimm->provider_data = provider_data; + if (noblk) + flags |= 1 << NDD_NOBLK; nvdimm->flags = flags; nvdimm->cmd_mask = cmd_mask; nvdimm->num_flush = num_flush; diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c index a11bf4e6b451..f3d753d3169c 100644 --- a/drivers/nvdimm/label.c +++ b/drivers/nvdimm/label.c @@ -392,6 +392,7 @@ int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd) return 0; /* no label, nothing to reserve */ for_each_clear_bit_le(slot, free, nslot) { + struct nvdimm *nvdimm = to_nvdimm(ndd->dev); struct nd_namespace_label *nd_label; struct nd_region *nd_region = NULL; u8 label_uuid[NSLABEL_UUID_LEN]; @@ -406,6 +407,8 @@ int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd) memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN); flags = __le32_to_cpu(nd_label->flags); + if (test_bit(NDD_NOBLK, &nvdimm->flags)) + flags &= ~NSLABEL_FLAG_LOCAL; nd_label_gen_id(&label_id, label_uuid, flags); res = nvdimm_allocate_dpa(ndd, &label_id, __le64_to_cpu(nd_label->dpa), @@ -755,7 +758,7 @@ static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class, static int __pmem_label_update(struct nd_region *nd_region, struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm, - int pos) + int pos, unsigned long flags) { struct nd_namespace_common *ndns = &nspm->nsio.common; struct nd_interleave_set *nd_set = nd_region->nd_set; @@ -796,7 +799,7 @@ static int __pmem_label_update(struct nd_region *nd_region, memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN); if (nspm->alt_name) memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN); - nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_UPDATING); + nd_label->flags = __cpu_to_le32(flags); nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings); nd_label->position = __cpu_to_le16(pos); nd_label->isetcookie = __cpu_to_le64(cookie); @@ -1249,13 +1252,13 @@ static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid) int nd_pmem_namespace_label_update(struct nd_region *nd_region, struct nd_namespace_pmem *nspm, resource_size_t size) { - int i; + int i, rc; for (i = 0; i < nd_region->ndr_mappings; i++) { struct nd_mapping *nd_mapping = &nd_region->mapping[i]; struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); struct resource *res; - int rc, count = 0; + int count = 0; if (size == 0) { rc = del_labels(nd_mapping, nspm->uuid); @@ -1273,7 +1276,20 @@ int nd_pmem_namespace_label_update(struct nd_region *nd_region, if (rc < 0) return rc; - rc = __pmem_label_update(nd_region, nd_mapping, nspm, i); + rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, + NSLABEL_FLAG_UPDATING); + if (rc) + return rc; + } + + if (size == 0) + return 0; + + /* Clear the UPDATING flag per UEFI 2.7 expectations */ + for (i = 0; i < nd_region->ndr_mappings; i++) { + struct nd_mapping *nd_mapping = &nd_region->mapping[i]; + + rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, 0); if (rc) return rc; } diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 4b077555ac70..7849bf1812c4 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -138,6 +138,7 @@ bool nd_is_uuid_unique(struct device *dev, u8 *uuid) bool pmem_should_map_pages(struct device *dev) { struct nd_region *nd_region = to_nd_region(dev->parent); + struct nd_namespace_common *ndns = to_ndns(dev); struct nd_namespace_io *nsio; if (!IS_ENABLED(CONFIG_ZONE_DEVICE)) @@ -149,6 +150,9 @@ bool pmem_should_map_pages(struct device *dev) if (is_nd_pfn(dev) || is_nd_btt(dev)) return false; + if (ndns->force_raw) + return false; + nsio = to_nd_namespace_io(dev); if (region_intersects(nsio->res.start, resource_size(&nsio->res), IORESOURCE_SYSTEM_RAM, @@ -1506,13 +1510,13 @@ static ssize_t __holder_class_store(struct device *dev, const char *buf) if (dev->driver || ndns->claim) return -EBUSY; - if (strcmp(buf, "btt") == 0 || strcmp(buf, "btt\n") == 0) + if (sysfs_streq(buf, "btt")) ndns->claim_class = btt_claim_class(dev); - else if (strcmp(buf, "pfn") == 0 || strcmp(buf, "pfn\n") == 0) + else if (sysfs_streq(buf, "pfn")) ndns->claim_class = NVDIMM_CCLASS_PFN; - else if (strcmp(buf, "dax") == 0 || strcmp(buf, "dax\n") == 0) + else if (sysfs_streq(buf, "dax")) ndns->claim_class = NVDIMM_CCLASS_DAX; - else if (strcmp(buf, "") == 0 || strcmp(buf, "\n") == 0) + else if (sysfs_streq(buf, "")) ndns->claim_class = NVDIMM_CCLASS_NONE; else return -EINVAL; @@ -2492,6 +2496,12 @@ static int init_active_labels(struct nd_region *nd_region) if (!label_ent) break; label = nd_label_active(ndd, j); + if (test_bit(NDD_NOBLK, &nvdimm->flags)) { + u32 flags = __le32_to_cpu(label->flags); + + flags &= ~NSLABEL_FLAG_LOCAL; + label->flags = __cpu_to_le32(flags); + } label_ent->label = label; mutex_lock(&nd_mapping->lock); diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c index 0a701837dfc0..11b9821eba85 100644 --- a/drivers/nvdimm/of_pmem.c +++ b/drivers/nvdimm/of_pmem.c @@ -108,7 +108,6 @@ static struct platform_driver of_pmem_region_driver = { .remove = of_pmem_region_remove, .driver = { .name = "of_pmem", - .owner = THIS_MODULE, .of_match_table = of_pmem_region_match, }, }; diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index 6f22272e8d80..d271bd731af7 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -580,6 +580,11 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns) } EXPORT_SYMBOL(nd_pfn_probe); +static u32 info_block_reserve(void) +{ + return ALIGN(SZ_8K, PAGE_SIZE); +} + /* * We hotplug memory at section granularity, pad the reserved area from * the previous section base to the namespace base address. @@ -593,7 +598,7 @@ static unsigned long init_altmap_base(resource_size_t base) static unsigned long init_altmap_reserve(resource_size_t base) { - unsigned long reserve = PHYS_PFN(SZ_8K); + unsigned long reserve = info_block_reserve() >> PAGE_SHIFT; unsigned long base_pfn = PHYS_PFN(base); reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn); @@ -608,6 +613,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap) u64 offset = le64_to_cpu(pfn_sb->dataoff); u32 start_pad = __le32_to_cpu(pfn_sb->start_pad); u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc); + u32 reserve = info_block_reserve(); struct nd_namespace_common *ndns = nd_pfn->ndns; struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); resource_size_t base = nsio->res.start + start_pad; @@ -621,7 +627,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap) res->end -= end_trunc; if (nd_pfn->mode == PFN_MODE_RAM) { - if (offset < SZ_8K) + if (offset < reserve) return -EINVAL; nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns); pgmap->altmap_valid = false; @@ -634,7 +640,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap) le64_to_cpu(nd_pfn->pfn_sb->npfns), nd_pfn->npfns); memcpy(altmap, &__altmap, sizeof(*altmap)); - altmap->free = PHYS_PFN(offset - SZ_8K); + altmap->free = PHYS_PFN(offset - reserve); altmap->alloc = 0; pgmap->altmap_valid = true; } else @@ -678,18 +684,17 @@ static void trim_pfn_device(struct nd_pfn *nd_pfn, u32 *start_pad, u32 *end_trun if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE) == REGION_MIXED || !IS_ALIGNED(end, nd_pfn->align) - || nd_region_conflict(nd_region, start, size + adjust)) + || nd_region_conflict(nd_region, start, size)) *end_trunc = end - phys_pmem_align_down(nd_pfn, end); } static int nd_pfn_init(struct nd_pfn *nd_pfn) { - u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0; struct nd_namespace_common *ndns = nd_pfn->ndns; struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); + u32 start_pad, end_trunc, reserve = info_block_reserve(); resource_size_t start, size; struct nd_region *nd_region; - u32 start_pad, end_trunc; struct nd_pfn_sb *pfn_sb; unsigned long npfns; phys_addr_t offset; @@ -734,7 +739,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) */ start = nsio->res.start + start_pad; size = resource_size(&nsio->res); - npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - SZ_8K) + npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - reserve) / PAGE_SIZE); if (nd_pfn->mode == PFN_MODE_PMEM) { /* @@ -742,11 +747,10 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) * when populating the vmemmap. This *should* be equal to * PMD_SIZE for most architectures. */ - offset = ALIGN(start + SZ_8K + 64 * npfns + dax_label_reserve, + offset = ALIGN(start + reserve + 64 * npfns, max(nd_pfn->align, PMD_SIZE)) - start; } else if (nd_pfn->mode == PFN_MODE_RAM) - offset = ALIGN(start + SZ_8K + dax_label_reserve, - nd_pfn->align) - start; + offset = ALIGN(start + reserve, nd_pfn->align) - start; else return -ENXIO; diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index e2818f94f292..3b58baa44b5c 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -1003,6 +1003,13 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, if (test_bit(NDD_UNARMED, &nvdimm->flags)) ro = 1; + + if (test_bit(NDD_NOBLK, &nvdimm->flags) + && dev_type == &nd_blk_device_type) { + dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not BLK capable\n", + caller, dev_name(&nvdimm->dev), i); + return NULL; + } } if (dev_type == &nd_blk_device_type) { diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 150e49723c15..07bf2bff3a76 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1,15 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* * NVM Express device driver * Copyright (c) 2011-2014, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. */ #include @@ -151,11 +143,8 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) } EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync); -static void nvme_delete_ctrl_work(struct work_struct *work) +static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl) { - struct nvme_ctrl *ctrl = - container_of(work, struct nvme_ctrl, delete_work); - dev_info(ctrl->device, "Removing ctrl: NQN \"%s\"\n", ctrl->opts->subsysnqn); @@ -167,6 +156,14 @@ static void nvme_delete_ctrl_work(struct work_struct *work) nvme_put_ctrl(ctrl); } +static void nvme_delete_ctrl_work(struct work_struct *work) +{ + struct nvme_ctrl *ctrl = + container_of(work, struct nvme_ctrl, delete_work); + + nvme_do_delete_ctrl(ctrl); +} + int nvme_delete_ctrl(struct nvme_ctrl *ctrl) { if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) @@ -177,7 +174,7 @@ int nvme_delete_ctrl(struct nvme_ctrl *ctrl) } EXPORT_SYMBOL_GPL(nvme_delete_ctrl); -int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl) +static int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl) { int ret = 0; @@ -186,13 +183,13 @@ int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl) * can free the controller. */ nvme_get_ctrl(ctrl); - ret = nvme_delete_ctrl(ctrl); + if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) + ret = -EBUSY; if (!ret) - flush_work(&ctrl->delete_work); + nvme_do_delete_ctrl(ctrl); nvme_put_ctrl(ctrl); return ret; } -EXPORT_SYMBOL_GPL(nvme_delete_ctrl_sync); static inline bool nvme_ns_has_pi(struct nvme_ns *ns) { @@ -611,6 +608,22 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, return BLK_STS_OK; } +static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns, + struct request *req, struct nvme_command *cmnd) +{ + if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) + return nvme_setup_discard(ns, req, cmnd); + + cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes; + cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id); + cmnd->write_zeroes.slba = + cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); + cmnd->write_zeroes.length = + cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); + cmnd->write_zeroes.control = 0; + return BLK_STS_OK; +} + static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, struct request *req, struct nvme_command *cmnd) { @@ -705,7 +718,8 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req, nvme_setup_flush(ns, cmd); break; case REQ_OP_WRITE_ZEROES: - /* currently only aliased to deallocate for a few ctrls: */ + ret = nvme_setup_write_zeroes(ns, req, cmd); + break; case REQ_OP_DISCARD: ret = nvme_setup_discard(ns, req, cmd); break; @@ -1253,6 +1267,7 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, * effects say only one namespace is affected. */ if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) { + mutex_lock(&ctrl->scan_lock); nvme_start_freeze(ctrl); nvme_wait_freeze(ctrl); } @@ -1281,8 +1296,10 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects) */ if (effects & NVME_CMD_EFFECTS_LBCC) nvme_update_formats(ctrl); - if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) + if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) { nvme_unfreeze(ctrl); + mutex_unlock(&ctrl->scan_lock); + } if (effects & NVME_CMD_EFFECTS_CCC) nvme_init_identify(ctrl); if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) @@ -1509,6 +1526,37 @@ static void nvme_config_discard(struct nvme_ns *ns) blk_queue_max_write_zeroes_sectors(queue, UINT_MAX); } +static inline void nvme_config_write_zeroes(struct nvme_ns *ns) +{ + u32 max_sectors; + unsigned short bs = 1 << ns->lba_shift; + + if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES)) + return; + /* + * Even though NVMe spec explicitly states that MDTS is not + * applicable to the write-zeroes:- "The restriction does not apply to + * commands that do not transfer data between the host and the + * controller (e.g., Write Uncorrectable ro Write Zeroes command).". + * In order to be more cautious use controller's max_hw_sectors value + * to configure the maximum sectors for the write-zeroes which is + * configured based on the controller's MDTS field in the + * nvme_init_identify() if available. + */ + if (ns->ctrl->max_hw_sectors == UINT_MAX) + max_sectors = ((u32)(USHRT_MAX + 1) * bs) >> 9; + else + max_sectors = ((u32)(ns->ctrl->max_hw_sectors + 1) * bs) >> 9; + + blk_queue_max_write_zeroes_sectors(ns->queue, max_sectors); +} + +static inline void nvme_ns_config_oncs(struct nvme_ns *ns) +{ + nvme_config_discard(ns); + nvme_config_write_zeroes(ns); +} + static void nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid, struct nvme_id_ns *id, struct nvme_ns_ids *ids) { @@ -1562,7 +1610,7 @@ static void nvme_update_disk_info(struct gendisk *disk, capacity = 0; set_capacity(disk, capacity); - nvme_config_discard(ns); + nvme_ns_config_oncs(ns); if (id->nsattr & (1 << 0)) set_disk_ro(disk, true); @@ -2277,6 +2325,9 @@ static struct attribute *nvme_subsys_attrs[] = { &subsys_attr_serial.attr, &subsys_attr_firmware_rev.attr, &subsys_attr_subsysnqn.attr, +#ifdef CONFIG_NVME_MULTIPATH + &subsys_attr_iopolicy.attr, +#endif NULL, }; @@ -2329,6 +2380,9 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev)); subsys->vendor_id = le16_to_cpu(id->vid); subsys->cmic = id->cmic; +#ifdef CONFIG_NVME_MULTIPATH + subsys->iopolicy = NVME_IOPOLICY_NUMA; +#endif subsys->dev.class = nvme_subsys_class; subsys->dev.release = nvme_release_subsystem; @@ -3160,21 +3214,23 @@ static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns) return 0; } -static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) +static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) { struct nvme_ns *ns; struct gendisk *disk; struct nvme_id_ns *id; char disk_name[DISK_NAME_LEN]; - int node = ctrl->numa_node, flags = GENHD_FL_EXT_DEVT; + int node = ctrl->numa_node, flags = GENHD_FL_EXT_DEVT, ret; ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); if (!ns) - return; + return -ENOMEM; ns->queue = blk_mq_init_queue(ctrl->tagset); - if (IS_ERR(ns->queue)) + if (IS_ERR(ns->queue)) { + ret = PTR_ERR(ns->queue); goto out_free_ns; + } blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue); if (ctrl->ops->flags & NVME_F_PCI_P2PDMA) @@ -3190,20 +3246,27 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) nvme_set_queue_limits(ctrl, ns->queue); id = nvme_identify_ns(ctrl, nsid); - if (!id) + if (!id) { + ret = -EIO; goto out_free_queue; + } - if (id->ncap == 0) + if (id->ncap == 0) { + ret = -EINVAL; goto out_free_id; + } - if (nvme_init_ns_head(ns, nsid, id)) + ret = nvme_init_ns_head(ns, nsid, id); + if (ret) goto out_free_id; nvme_setup_streams_ns(ctrl, ns); nvme_set_disk_name(disk_name, ns, ctrl, &flags); disk = alloc_disk_node(0, node); - if (!disk) + if (!disk) { + ret = -ENOMEM; goto out_unlink_ns; + } disk->fops = &nvme_fops; disk->private_data = ns; @@ -3215,7 +3278,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) __nvme_revalidate_disk(disk, id); if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) { - if (nvme_nvm_register(ns, disk_name, node)) { + ret = nvme_nvm_register(ns, disk_name, node); + if (ret) { dev_warn(ctrl->device, "LightNVM init failure\n"); goto out_put_disk; } @@ -3233,7 +3297,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) nvme_fault_inject_init(ns); kfree(id); - return; + return 0; out_put_disk: put_disk(ns->disk); out_unlink_ns: @@ -3246,6 +3310,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) blk_cleanup_queue(ns->queue); out_free_ns: kfree(ns); + return ret; } static void nvme_ns_remove(struct nvme_ns *ns) @@ -3401,6 +3466,7 @@ static void nvme_scan_work(struct work_struct *work) if (nvme_identify_ctrl(ctrl, &id)) return; + mutex_lock(&ctrl->scan_lock); nn = le32_to_cpu(id->nn); if (ctrl->vs >= NVME_VS(1, 1, 0) && !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) { @@ -3409,6 +3475,7 @@ static void nvme_scan_work(struct work_struct *work) } nvme_scan_ns_sequential(ctrl, nn); out_free_id: + mutex_unlock(&ctrl->scan_lock); kfree(id); down_write(&ctrl->namespaces_rwsem); list_sort(NULL, &ctrl->namespaces, ns_cmp); @@ -3591,8 +3658,6 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl) nvme_stop_keep_alive(ctrl); flush_work(&ctrl->async_event_work); cancel_work_sync(&ctrl->fw_act_work); - if (ctrl->ops->stop_ctrl) - ctrl->ops->stop_ctrl(ctrl); } EXPORT_SYMBOL_GPL(nvme_stop_ctrl); @@ -3652,6 +3717,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, ctrl->state = NVME_CTRL_NEW; spin_lock_init(&ctrl->lock); + mutex_init(&ctrl->scan_lock); INIT_LIST_HEAD(&ctrl->namespaces); init_rwsem(&ctrl->namespaces_rwsem); ctrl->dev = dev; diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 3eb908c50e1a..d4cb826f58ff 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -1,15 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* * NVMe over Fabrics common host code. * Copyright (c) 2015-2016 HGST, a Western Digital Company. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include @@ -430,6 +422,7 @@ EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue); * @qid: NVMe I/O queue number for the new I/O connection between * host and target (note qid == 0 is illegal as this is * the Admin queue, per NVMe standard). + * @poll: Whether or not to poll for the completion of the connect cmd. * * This function issues a fabrics-protocol connection * of a NVMe I/O queue (via NVMe Fabrics "Connect" command) diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h index 478343b73e38..3044d8b99a24 100644 --- a/drivers/nvme/host/fabrics.h +++ b/drivers/nvme/host/fabrics.h @@ -1,15 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * NVMe over Fabrics common host code. * Copyright (c) 2015-2016 HGST, a Western Digital Company. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. */ #ifndef _NVME_FABRICS_H #define _NVME_FABRICS_H 1 diff --git a/drivers/nvme/host/fault_inject.c b/drivers/nvme/host/fault_inject.c index 02632266ac06..4cfd2c9222d4 100644 --- a/drivers/nvme/host/fault_inject.c +++ b/drivers/nvme/host/fault_inject.c @@ -1,8 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 /* * fault injection support for nvme. * * Copyright (c) 2018, Oracle and/or its affiliates - * */ #include diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 89accc76d71c..b29b12498a1a 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -1,18 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2016 Avago Technologies. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful. - * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, - * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A - * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO - * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID. - * See the GNU General Public License for more details, a copy of which - * can be found in the file COPYING included with this package - * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index b759c25c89c8..949e29e1d782 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -1,23 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 /* * nvme-lightnvm.c - LightNVM NVMe device * * Copyright (C) 2014-2015 IT University of Copenhagen * Initial release: Matias Bjorling - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; see the file COPYING. If not, write to - * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, - * USA. - * */ #include "nvme.h" diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index b9fff3b8ed1b..2839bb70badf 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -1,14 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2017-2018 Christoph Hellwig. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. */ #include @@ -141,7 +133,10 @@ static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node) test_bit(NVME_NS_ANA_PENDING, &ns->flags)) continue; - distance = node_distance(node, ns->ctrl->numa_node); + if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA) + distance = node_distance(node, ns->ctrl->numa_node); + else + distance = LOCAL_DISTANCE; switch (ns->ana_state) { case NVME_ANA_OPTIMIZED: @@ -168,6 +163,47 @@ static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node) return found; } +static struct nvme_ns *nvme_next_ns(struct nvme_ns_head *head, + struct nvme_ns *ns) +{ + ns = list_next_or_null_rcu(&head->list, &ns->siblings, struct nvme_ns, + siblings); + if (ns) + return ns; + return list_first_or_null_rcu(&head->list, struct nvme_ns, siblings); +} + +static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head, + int node, struct nvme_ns *old) +{ + struct nvme_ns *ns, *found, *fallback = NULL; + + if (list_is_singular(&head->list)) + return old; + + for (ns = nvme_next_ns(head, old); + ns != old; + ns = nvme_next_ns(head, ns)) { + if (ns->ctrl->state != NVME_CTRL_LIVE || + test_bit(NVME_NS_ANA_PENDING, &ns->flags)) + continue; + + if (ns->ana_state == NVME_ANA_OPTIMIZED) { + found = ns; + goto out; + } + if (ns->ana_state == NVME_ANA_NONOPTIMIZED) + fallback = ns; + } + + if (!fallback) + return NULL; + found = fallback; +out: + rcu_assign_pointer(head->current_path[node], found); + return found; +} + static inline bool nvme_path_is_optimized(struct nvme_ns *ns) { return ns->ctrl->state == NVME_CTRL_LIVE && @@ -180,6 +216,8 @@ inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head) struct nvme_ns *ns; ns = srcu_dereference(head->current_path[node], &head->srcu); + if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_RR && ns) + ns = nvme_round_robin_path(head, node, ns); if (unlikely(!ns || !nvme_path_is_optimized(ns))) ns = __nvme_find_path(head, node); return ns; @@ -471,6 +509,44 @@ void nvme_mpath_stop(struct nvme_ctrl *ctrl) cancel_work_sync(&ctrl->ana_work); } +#define SUBSYS_ATTR_RW(_name, _mode, _show, _store) \ + struct device_attribute subsys_attr_##_name = \ + __ATTR(_name, _mode, _show, _store) + +static const char *nvme_iopolicy_names[] = { + [NVME_IOPOLICY_NUMA] = "numa", + [NVME_IOPOLICY_RR] = "round-robin", +}; + +static ssize_t nvme_subsys_iopolicy_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvme_subsystem *subsys = + container_of(dev, struct nvme_subsystem, dev); + + return sprintf(buf, "%s\n", + nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]); +} + +static ssize_t nvme_subsys_iopolicy_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct nvme_subsystem *subsys = + container_of(dev, struct nvme_subsystem, dev); + int i; + + for (i = 0; i < ARRAY_SIZE(nvme_iopolicy_names); i++) { + if (sysfs_streq(buf, nvme_iopolicy_names[i])) { + WRITE_ONCE(subsys->iopolicy, i); + return count; + } + } + + return -EINVAL; +} +SUBSYS_ATTR_RW(iopolicy, S_IRUGO | S_IWUSR, + nvme_subsys_iopolicy_show, nvme_subsys_iopolicy_store); + static ssize_t ana_grpid_show(struct device *dev, struct device_attribute *attr, char *buf) { diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index ab961bdeea89..b91f1838bbd5 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -1,14 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2011-2014, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. */ #ifndef _NVME_H @@ -154,6 +146,7 @@ struct nvme_ctrl { enum nvme_ctrl_state state; bool identified; spinlock_t lock; + struct mutex scan_lock; const struct nvme_ctrl_ops *ops; struct request_queue *admin_q; struct request_queue *connect_q; @@ -251,6 +244,11 @@ struct nvme_ctrl { unsigned long discard_page_busy; }; +enum nvme_iopolicy { + NVME_IOPOLICY_NUMA, + NVME_IOPOLICY_RR, +}; + struct nvme_subsystem { int instance; struct device dev; @@ -270,6 +268,9 @@ struct nvme_subsystem { u8 cmic; u16 vendor_id; struct ida ns_ida; +#ifdef CONFIG_NVME_MULTIPATH + enum nvme_iopolicy iopolicy; +#endif }; /* @@ -363,7 +364,6 @@ struct nvme_ctrl_ops { void (*submit_async_event)(struct nvme_ctrl *ctrl); void (*delete_ctrl)(struct nvme_ctrl *ctrl); int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size); - void (*stop_ctrl)(struct nvme_ctrl *ctrl); }; #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS @@ -458,7 +458,6 @@ void nvme_stop_keep_alive(struct nvme_ctrl *ctrl); int nvme_reset_ctrl(struct nvme_ctrl *ctrl); int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl); int nvme_delete_ctrl(struct nvme_ctrl *ctrl); -int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl); int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, void *log, size_t size, u64 offset); @@ -491,6 +490,7 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) extern struct device_attribute dev_attr_ana_grpid; extern struct device_attribute dev_attr_ana_state; +extern struct device_attribute subsys_attr_iopolicy; #else static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 9bc585415d9b..92bad1c810ac 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1,15 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* * NVM Express device driver * Copyright (c) 2011-2014, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. */ #include @@ -157,6 +149,8 @@ static int queue_count_set(const char *val, const struct kernel_param *kp) int n = 0, ret; ret = kstrtoint(val, 10, &n); + if (ret) + return ret; if (n > num_possible_cpus()) n = num_possible_cpus(); @@ -2041,53 +2035,52 @@ static int nvme_setup_host_mem(struct nvme_dev *dev) return ret; } -/* irq_queues covers admin queue */ -static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues) +/* + * nirqs is the number of interrupts available for write and read + * queues. The core already reserved an interrupt for the admin queue. + */ +static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs) { - unsigned int this_w_queues = write_queues; - - WARN_ON(!irq_queues); - - /* - * Setup read/write queue split, assign admin queue one independent - * irq vector if irq_queues is > 1. - */ - if (irq_queues <= 2) { - dev->io_queues[HCTX_TYPE_DEFAULT] = 1; - dev->io_queues[HCTX_TYPE_READ] = 0; - return; - } + struct nvme_dev *dev = affd->priv; + unsigned int nr_read_queues; /* - * If 'write_queues' is set, ensure it leaves room for at least - * one read queue and one admin queue + * If there is no interupt available for queues, ensure that + * the default queue is set to 1. The affinity set size is + * also set to one, but the irq core ignores it for this case. + * + * If only one interrupt is available or 'write_queue' == 0, combine + * write and read queues. + * + * If 'write_queues' > 0, ensure it leaves room for at least one read + * queue. */ - if (this_w_queues >= irq_queues) - this_w_queues = irq_queues - 2; - - /* - * If 'write_queues' is set to zero, reads and writes will share - * a queue set. - */ - if (!this_w_queues) { - dev->io_queues[HCTX_TYPE_DEFAULT] = irq_queues - 1; - dev->io_queues[HCTX_TYPE_READ] = 0; + if (!nrirqs) { + nrirqs = 1; + nr_read_queues = 0; + } else if (nrirqs == 1 || !write_queues) { + nr_read_queues = 0; + } else if (write_queues >= nrirqs) { + nr_read_queues = 1; } else { - dev->io_queues[HCTX_TYPE_DEFAULT] = this_w_queues; - dev->io_queues[HCTX_TYPE_READ] = irq_queues - this_w_queues - 1; + nr_read_queues = nrirqs - write_queues; } + + dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; + affd->set_size[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; + dev->io_queues[HCTX_TYPE_READ] = nr_read_queues; + affd->set_size[HCTX_TYPE_READ] = nr_read_queues; + affd->nr_sets = nr_read_queues ? 2 : 1; } static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) { struct pci_dev *pdev = to_pci_dev(dev->dev); - int irq_sets[2]; struct irq_affinity affd = { - .pre_vectors = 1, - .nr_sets = ARRAY_SIZE(irq_sets), - .sets = irq_sets, + .pre_vectors = 1, + .calc_sets = nvme_calc_irq_sets, + .priv = dev, }; - int result = 0; unsigned int irq_queues, this_p_queues; /* @@ -2103,51 +2096,12 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) } dev->io_queues[HCTX_TYPE_POLL] = this_p_queues; - /* - * For irq sets, we have to ask for minvec == maxvec. This passes - * any reduction back to us, so we can adjust our queue counts and - * IRQ vector needs. - */ - do { - nvme_calc_io_queues(dev, irq_queues); - irq_sets[0] = dev->io_queues[HCTX_TYPE_DEFAULT]; - irq_sets[1] = dev->io_queues[HCTX_TYPE_READ]; - if (!irq_sets[1]) - affd.nr_sets = 1; - - /* - * If we got a failure and we're down to asking for just - * 1 + 1 queues, just ask for a single vector. We'll share - * that between the single IO queue and the admin queue. - * Otherwise, we assign one independent vector to admin queue. - */ - if (irq_queues > 1) - irq_queues = irq_sets[0] + irq_sets[1] + 1; - - result = pci_alloc_irq_vectors_affinity(pdev, irq_queues, - irq_queues, - PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd); - - /* - * Need to reduce our vec counts. If we get ENOSPC, the - * platform should support mulitple vecs, we just need - * to decrease our ask. If we get EINVAL, the platform - * likely does not. Back down to ask for just one vector. - */ - if (result == -ENOSPC) { - irq_queues--; - if (!irq_queues) - return result; - continue; - } else if (result == -EINVAL) { - irq_queues = 1; - continue; - } else if (result <= 0) - return -EIO; - break; - } while (1); + /* Initialize for the single interrupt case */ + dev->io_queues[HCTX_TYPE_DEFAULT] = 1; + dev->io_queues[HCTX_TYPE_READ] = 0; - return result; + return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues, + PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd); } static void nvme_disable_io_queues(struct nvme_dev *dev) @@ -2557,27 +2511,18 @@ static void nvme_reset_work(struct work_struct *work) if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) nvme_dev_disable(dev, false); - /* - * Introduce CONNECTING state from nvme-fc/rdma transports to mark the - * initializing procedure here. - */ - if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { - dev_warn(dev->ctrl.device, - "failed to mark controller CONNECTING\n"); - goto out; - } - + mutex_lock(&dev->shutdown_lock); result = nvme_pci_enable(dev); if (result) - goto out; + goto out_unlock; result = nvme_pci_configure_admin_queue(dev); if (result) - goto out; + goto out_unlock; result = nvme_alloc_admin_tags(dev); if (result) - goto out; + goto out_unlock; /* * Limit the max command size to prevent iod->sg allocations going @@ -2585,6 +2530,17 @@ static void nvme_reset_work(struct work_struct *work) */ dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1; dev->ctrl.max_segments = NVME_MAX_SEGS; + mutex_unlock(&dev->shutdown_lock); + + /* + * Introduce CONNECTING state from nvme-fc/rdma transports to mark the + * initializing procedure here. + */ + if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { + dev_warn(dev->ctrl.device, + "failed to mark controller CONNECTING\n"); + goto out; + } result = nvme_init_identify(&dev->ctrl); if (result) @@ -2649,6 +2605,8 @@ static void nvme_reset_work(struct work_struct *work) nvme_start_ctrl(&dev->ctrl); return; + out_unlock: + mutex_unlock(&dev->shutdown_lock); out: nvme_remove_dead_ctrl(dev, result); } @@ -3020,6 +2978,7 @@ static struct pci_driver nvme_driver = { static int __init nvme_init(void) { + BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2); return pci_register_driver(&nvme_driver); } diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 52abc3a6de12..11a5ecae78c8 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1,15 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* * NVMe over Fabrics RDMA host code. * Copyright (c) 2015-2016 HGST, a Western Digital Company. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include @@ -942,14 +934,6 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl, } } -static void nvme_rdma_stop_ctrl(struct nvme_ctrl *nctrl) -{ - struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); - - cancel_work_sync(&ctrl->err_work); - cancel_delayed_work_sync(&ctrl->reconnect_work); -} - static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl) { struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); @@ -1158,7 +1142,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue, struct nvme_rdma_device *dev = queue->device; struct ib_device *ibdev = dev->dev; - if (!blk_rq_payload_bytes(rq)) + if (!blk_rq_nr_phys_segments(rq)) return; if (req->mr) { @@ -1281,7 +1265,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, c->common.flags |= NVME_CMD_SGL_METABUF; - if (!blk_rq_payload_bytes(rq)) + if (!blk_rq_nr_phys_segments(rq)) return nvme_rdma_set_sg_null(c); req->sg_table.sgl = req->first_sgl; @@ -1854,6 +1838,9 @@ static const struct blk_mq_ops nvme_rdma_admin_mq_ops = { static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown) { + cancel_work_sync(&ctrl->err_work); + cancel_delayed_work_sync(&ctrl->reconnect_work); + nvme_rdma_teardown_io_queues(ctrl, shutdown); if (shutdown) nvme_shutdown_ctrl(&ctrl->ctrl); @@ -1902,7 +1889,6 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { .submit_async_event = nvme_rdma_submit_async_event, .delete_ctrl = nvme_rdma_delete_ctrl, .get_address = nvmf_get_address, - .stop_ctrl = nvme_rdma_stop_ctrl, }; /* diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 5f0a00425242..208ee518af65 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -1822,6 +1822,9 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work) static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown) { + cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work); + cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work); + nvme_tcp_teardown_io_queues(ctrl, shutdown); if (shutdown) nvme_shutdown_ctrl(ctrl); @@ -1859,12 +1862,6 @@ out_fail: nvme_tcp_reconnect_or_remove(ctrl); } -static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl) -{ - cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work); - cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work); -} - static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl) { struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); @@ -2115,7 +2112,6 @@ static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = { .submit_async_event = nvme_tcp_submit_async_event, .delete_ctrl = nvme_tcp_delete_ctrl, .get_address = nvmf_get_address, - .stop_ctrl = nvme_tcp_stop_ctrl, }; static bool diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c index 5566dda3237a..58456de78bb2 100644 --- a/drivers/nvme/host/trace.c +++ b/drivers/nvme/host/trace.c @@ -1,15 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* * NVM Express device driver tracepoints * Copyright (c) 2018 Johannes Thumshirn, SUSE Linux GmbH - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. */ #include diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h index 3564120aa7b3..244d7c177e5a 100644 --- a/drivers/nvme/host/trace.h +++ b/drivers/nvme/host/trace.h @@ -1,15 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * NVM Express device driver tracepoints * Copyright (c) 2018 Johannes Thumshirn, SUSE Linux GmbH - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. */ #undef TRACE_SYSTEM diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 11baeb14c388..76250181fee0 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -1,15 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* * NVMe admin command implementation. * Copyright (c) 2015-2016 HGST, a Western Digital Company. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index 618bbd006544..adb79545cdd7 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -1,15 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Configfs interface for the NVMe target. * Copyright (c) 2015-2016 HGST, a Western Digital Company. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 88d260f31835..d44ede147263 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -1,15 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Common code for the NVMe target. * Copyright (c) 2015-2016 HGST, a Western Digital Company. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c index d2cb71a0b419..c872b47a88f3 100644 --- a/drivers/nvme/target/discovery.c +++ b/drivers/nvme/target/discovery.c @@ -1,15 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Discovery service for the NVMe over Fabrics target. * Copyright (C) 2016 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include @@ -331,7 +323,7 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req) cmd->get_log_page.lid); req->error_loc = offsetof(struct nvme_get_log_page_command, lid); - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; } case nvme_admin_identify: req->data_len = NVME_IDENTIFY_DATA_SIZE; diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c index 6cf1fd9eb32e..3a76ebc3d155 100644 --- a/drivers/nvme/target/fabrics-cmd.c +++ b/drivers/nvme/target/fabrics-cmd.c @@ -1,15 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* * NVMe Fabrics command implementation. * Copyright (c) 2015-2016 HGST, a Western Digital Company. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index f98f5c5bea26..1e9654f04c60 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -1,18 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2016 Avago Technologies. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful. - * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, - * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A - * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO - * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID. - * See the GNU General Public License for more details, a copy of which - * can be found in the file COPYING included with this package - * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index 291f4121f516..381b5a90c48b 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c @@ -1,17 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2016 Avago Technologies. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful. - * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, - * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A - * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO - * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID. - * See the GNU General Public License for more details, a copy of which - * can be found in the file COPYING included with this package */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index b6d030d3259f..71dfedbadc26 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -1,15 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* * NVMe I/O command implementation. * Copyright (c) 2015-2016 HGST, a Western Digital Company. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 4aac1b4a8112..b9f623ab01f3 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -1,15 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* * NVMe over Fabrics loopback device. * Copyright (c) 2015-2016 HGST, a Western Digital Company. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 3e4719fdba85..51e49efd7849 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -1,14 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2015-2016 HGST, a Western Digital Company. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. */ #ifndef _NVMET_H diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index a884e3a0e8af..ef893addf341 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -1,15 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* * NVMe over Fabrics RDMA target. * Copyright (c) 2015-2016 HGST, a Western Digital Company. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig index 0a7a470ee859..530d570724c9 100644 --- a/drivers/nvmem/Kconfig +++ b/drivers/nvmem/Kconfig @@ -26,7 +26,7 @@ config NVMEM_IMX_IIM config NVMEM_IMX_OCOTP tristate "i.MX6 On-Chip OTP Controller support" - depends on SOC_IMX6 || COMPILE_TEST + depends on SOC_IMX6 || SOC_IMX7D || COMPILE_TEST depends on HAS_IOMEM help This is a driver for the On-Chip OTP Controller (OCOTP) available on @@ -192,4 +192,14 @@ config SC27XX_EFUSE This driver can also be built as a module. If so, the module will be called nvmem-sc27xx-efuse. +config NVMEM_ZYNQMP + bool "Xilinx ZYNQMP SoC nvmem firmware support" + depends on ARCH_ZYNQMP + help + This is a driver to access hardware related data like + soc revision, IDCODE... etc by using the firmware + interface. + + If sure, say yes. If unsure, say no. + endif diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile index 4e8c61628f1a..2ece8ffffdda 100644 --- a/drivers/nvmem/Makefile +++ b/drivers/nvmem/Makefile @@ -41,3 +41,5 @@ obj-$(CONFIG_RAVE_SP_EEPROM) += nvmem-rave-sp-eeprom.o nvmem-rave-sp-eeprom-y := rave-sp-eeprom.o obj-$(CONFIG_SC27XX_EFUSE) += nvmem-sc27xx-efuse.o nvmem-sc27xx-efuse-y := sc27xx-efuse.o +obj-$(CONFIG_NVMEM_ZYNQMP) += nvmem_zynqmp_nvmem.o +nvmem_zynqmp_nvmem-y := zynqmp_nvmem.o diff --git a/drivers/nvmem/bcm-ocotp.c b/drivers/nvmem/bcm-ocotp.c index 4159b3f41d79..a8097511582a 100644 --- a/drivers/nvmem/bcm-ocotp.c +++ b/drivers/nvmem/bcm-ocotp.c @@ -11,13 +11,14 @@ * GNU General Public License for more details. */ +#include #include #include #include #include #include #include -#include +#include #include /* @@ -78,9 +79,9 @@ static struct otpc_map otp_map_v2 = { }; struct otpc_priv { - struct device *dev; - void __iomem *base; - struct otpc_map *map; + struct device *dev; + void __iomem *base; + const struct otpc_map *map; struct nvmem_config *config; }; @@ -237,16 +238,22 @@ static struct nvmem_config bcm_otpc_nvmem_config = { }; static const struct of_device_id bcm_otpc_dt_ids[] = { - { .compatible = "brcm,ocotp" }, - { .compatible = "brcm,ocotp-v2" }, + { .compatible = "brcm,ocotp", .data = &otp_map }, + { .compatible = "brcm,ocotp-v2", .data = &otp_map_v2 }, { }, }; MODULE_DEVICE_TABLE(of, bcm_otpc_dt_ids); +static const struct acpi_device_id bcm_otpc_acpi_ids[] = { + { .id = "BRCM0700", .driver_data = (kernel_ulong_t)&otp_map }, + { .id = "BRCM0701", .driver_data = (kernel_ulong_t)&otp_map_v2 }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(acpi, bcm_otpc_acpi_ids); + static int bcm_otpc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct device_node *dn = dev->of_node; struct resource *res; struct otpc_priv *priv; struct nvmem_device *nvmem; @@ -257,14 +264,9 @@ static int bcm_otpc_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; - if (of_device_is_compatible(dev->of_node, "brcm,ocotp")) - priv->map = &otp_map; - else if (of_device_is_compatible(dev->of_node, "brcm,ocotp-v2")) - priv->map = &otp_map_v2; - else { - dev_err(dev, "%s otpc config map not defined\n", __func__); - return -EINVAL; - } + priv->map = device_get_match_data(dev); + if (!priv->map) + return -ENODEV; /* Get OTP base address register. */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -281,7 +283,7 @@ static int bcm_otpc_probe(struct platform_device *pdev) reset_start_bit(priv->base); /* Read size of memory in words. */ - err = of_property_read_u32(dn, "brcm,ocotp-size", &num_words); + err = device_property_read_u32(dev, "brcm,ocotp-size", &num_words); if (err) { dev_err(dev, "size parameter not specified\n"); return -EINVAL; @@ -294,7 +296,7 @@ static int bcm_otpc_probe(struct platform_device *pdev) bcm_otpc_nvmem_config.dev = dev; bcm_otpc_nvmem_config.priv = priv; - if (of_device_is_compatible(dev->of_node, "brcm,ocotp-v2")) { + if (priv->map == &otp_map_v2) { bcm_otpc_nvmem_config.word_size = 8; bcm_otpc_nvmem_config.stride = 8; } @@ -315,6 +317,7 @@ static struct platform_driver bcm_otpc_driver = { .driver = { .name = "brcm-otpc", .of_match_table = bcm_otpc_dt_ids, + .acpi_match_table = ACPI_PTR(bcm_otpc_acpi_ids), }, }; module_platform_driver(bcm_otpc_driver); diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index f7301bb4ef3b..f24008b66826 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -525,12 +525,14 @@ out: static struct nvmem_cell * nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id) { - struct nvmem_cell *cell = NULL; + struct nvmem_cell *iter, *cell = NULL; mutex_lock(&nvmem_mutex); - list_for_each_entry(cell, &nvmem->cells, node) { - if (strcmp(cell_id, cell->name) == 0) + list_for_each_entry(iter, &nvmem->cells, node) { + if (strcmp(cell_id, iter->name) == 0) { + cell = iter; break; + } } mutex_unlock(&nvmem_mutex); @@ -646,8 +648,8 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) config->name ? config->id : nvmem->id); } - nvmem->read_only = device_property_present(config->dev, "read-only") | - config->read_only; + nvmem->read_only = device_property_present(config->dev, "read-only") || + config->read_only || !nvmem->reg_write; if (config->root_only) nvmem->dev.groups = nvmem->read_only ? @@ -686,9 +688,7 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) if (rval) goto err_remove_cells; - rval = blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem); - if (rval) - goto err_remove_cells; + blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem); return nvmem; @@ -809,6 +809,7 @@ static struct nvmem_device *__nvmem_device_get(struct device_node *np, "could not increase module refcount for cell %s\n", nvmem_dev_name(nvmem)); + put_device(&nvmem->dev); return ERR_PTR(-EINVAL); } @@ -819,6 +820,7 @@ static struct nvmem_device *__nvmem_device_get(struct device_node *np, static void __nvmem_device_put(struct nvmem_device *nvmem) { + put_device(&nvmem->dev); module_put(nvmem->owner); kref_put(&nvmem->refcnt, nvmem_device_release); } @@ -837,13 +839,14 @@ struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id) { struct device_node *nvmem_np; - int index; + int index = 0; - index = of_property_match_string(np, "nvmem-names", id); + if (id) + index = of_property_match_string(np, "nvmem-names", id); nvmem_np = of_parse_phandle(np, "nvmem", index); if (!nvmem_np) - return ERR_PTR(-EINVAL); + return ERR_PTR(-ENOENT); return __nvmem_device_get(nvmem_np, NULL); } @@ -871,7 +874,7 @@ struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name) } - return nvmem_find(dev_name); + return __nvmem_device_get(NULL, dev_name); } EXPORT_SYMBOL_GPL(nvmem_device_get); @@ -972,7 +975,7 @@ nvmem_cell_get_from_lookup(struct device *dev, const char *con_id) if (IS_ERR(nvmem)) { /* Provider may not be registered yet. */ cell = ERR_CAST(nvmem); - goto out; + break; } cell = nvmem_find_cell_by_name(nvmem, @@ -980,12 +983,11 @@ nvmem_cell_get_from_lookup(struct device *dev, const char *con_id) if (!cell) { __nvmem_device_put(nvmem); cell = ERR_PTR(-ENOENT); - goto out; } + break; } } -out: mutex_unlock(&nvmem_lookup_mutex); return cell; } @@ -994,12 +996,14 @@ out: static struct nvmem_cell * nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np) { - struct nvmem_cell *cell = NULL; + struct nvmem_cell *iter, *cell = NULL; mutex_lock(&nvmem_mutex); - list_for_each_entry(cell, &nvmem->cells, node) { - if (np == cell->np) + list_for_each_entry(iter, &nvmem->cells, node) { + if (np == iter->np) { + cell = iter; break; + } } mutex_unlock(&nvmem_mutex); @@ -1031,7 +1035,7 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id) cell_np = of_parse_phandle(np, "nvmem-cells", index); if (!cell_np) - return ERR_PTR(-EINVAL); + return ERR_PTR(-ENOENT); nvmem_np = of_get_next_parent(cell_np); if (!nvmem_np) diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c index afb429a417fe..08a9b1ef8ae4 100644 --- a/drivers/nvmem/imx-ocotp.c +++ b/drivers/nvmem/imx-ocotp.c @@ -427,19 +427,32 @@ static const struct ocotp_params imx6ul_params = { .set_timing = imx_ocotp_set_imx6_timing, }; +static const struct ocotp_params imx6ull_params = { + .nregs = 64, + .bank_address_words = 0, + .set_timing = imx_ocotp_set_imx6_timing, +}; + static const struct ocotp_params imx7d_params = { .nregs = 64, .bank_address_words = 4, .set_timing = imx_ocotp_set_imx7_timing, }; +static const struct ocotp_params imx7ulp_params = { + .nregs = 256, + .bank_address_words = 0, +}; + static const struct of_device_id imx_ocotp_dt_ids[] = { { .compatible = "fsl,imx6q-ocotp", .data = &imx6q_params }, { .compatible = "fsl,imx6sl-ocotp", .data = &imx6sl_params }, { .compatible = "fsl,imx6sx-ocotp", .data = &imx6sx_params }, { .compatible = "fsl,imx6ul-ocotp", .data = &imx6ul_params }, + { .compatible = "fsl,imx6ull-ocotp", .data = &imx6ull_params }, { .compatible = "fsl,imx7d-ocotp", .data = &imx7d_params }, { .compatible = "fsl,imx6sll-ocotp", .data = &imx6sll_params }, + { .compatible = "fsl,imx7ulp-ocotp", .data = &imx7ulp_params }, { }, }; MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids); diff --git a/drivers/nvmem/sc27xx-efuse.c b/drivers/nvmem/sc27xx-efuse.c index 33185d8d82cf..c6ee21018d80 100644 --- a/drivers/nvmem/sc27xx-efuse.c +++ b/drivers/nvmem/sc27xx-efuse.c @@ -106,10 +106,12 @@ static int sc27xx_efuse_poll_status(struct sc27xx_efuse *efuse, u32 bits) static int sc27xx_efuse_read(void *context, u32 offset, void *val, size_t bytes) { struct sc27xx_efuse *efuse = context; - u32 buf; + u32 buf, blk_index = offset / SC27XX_EFUSE_BLOCK_WIDTH; + u32 blk_offset = (offset % SC27XX_EFUSE_BLOCK_WIDTH) * BITS_PER_BYTE; int ret; - if (offset > SC27XX_EFUSE_BLOCK_MAX || bytes > SC27XX_EFUSE_BLOCK_WIDTH) + if (blk_index > SC27XX_EFUSE_BLOCK_MAX || + bytes > SC27XX_EFUSE_BLOCK_WIDTH) return -EINVAL; ret = sc27xx_efuse_lock(efuse); @@ -133,7 +135,7 @@ static int sc27xx_efuse_read(void *context, u32 offset, void *val, size_t bytes) /* Set the block address to be read. */ ret = regmap_write(efuse->regmap, efuse->base + SC27XX_EFUSE_BLOCK_INDEX, - offset & SC27XX_EFUSE_BLOCK_MASK); + blk_index & SC27XX_EFUSE_BLOCK_MASK); if (ret) goto disable_efuse; @@ -171,8 +173,10 @@ disable_efuse: unlock_efuse: sc27xx_efuse_unlock(efuse); - if (!ret) + if (!ret) { + buf >>= blk_offset; memcpy(val, &buf, bytes); + } return ret; } diff --git a/drivers/nvmem/zynqmp_nvmem.c b/drivers/nvmem/zynqmp_nvmem.c new file mode 100644 index 000000000000..490c8fcaec80 --- /dev/null +++ b/drivers/nvmem/zynqmp_nvmem.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2019 Xilinx, Inc. + */ + +#include +#include +#include +#include +#include + +#define SILICON_REVISION_MASK 0xF + +struct zynqmp_nvmem_data { + struct device *dev; + struct nvmem_device *nvmem; +}; + +static int zynqmp_nvmem_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + int ret; + int idcode, version; + struct zynqmp_nvmem_data *priv = context; + + const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops(); + + if (!eemi_ops || !eemi_ops->get_chipid) + return -ENXIO; + + ret = eemi_ops->get_chipid(&idcode, &version); + if (ret < 0) + return ret; + + dev_dbg(priv->dev, "Read chipid val %x %x\n", idcode, version); + *(int *)val = version & SILICON_REVISION_MASK; + + return 0; +} + +static struct nvmem_config econfig = { + .name = "zynqmp-nvmem", + .owner = THIS_MODULE, + .word_size = 1, + .size = 1, + .read_only = true, +}; + +static const struct of_device_id zynqmp_nvmem_match[] = { + { .compatible = "xlnx,zynqmp-nvmem-fw", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, zynqmp_nvmem_match); + +static int zynqmp_nvmem_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct zynqmp_nvmem_data *priv; + + priv = devm_kzalloc(dev, sizeof(struct zynqmp_nvmem_data), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = dev; + econfig.dev = dev; + econfig.reg_read = zynqmp_nvmem_read; + econfig.priv = priv; + + priv->nvmem = devm_nvmem_register(dev, &econfig); + + return PTR_ERR_OR_ZERO(priv->nvmem); +} + +static struct platform_driver zynqmp_nvmem_driver = { + .probe = zynqmp_nvmem_probe, + .driver = { + .name = "zynqmp-nvmem", + .of_match_table = zynqmp_nvmem_match, + }, +}; + +module_platform_driver(zynqmp_nvmem_driver); + +MODULE_AUTHOR("Michal Simek , Nava kishore Manne "); +MODULE_DESCRIPTION("ZynqMP NVMEM driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig index ad3fcad4d75b..37c2ccbefecd 100644 --- a/drivers/of/Kconfig +++ b/drivers/of/Kconfig @@ -43,6 +43,7 @@ config OF_FLATTREE config OF_EARLY_FLATTREE bool + select DMA_DECLARE_COHERENT if HAS_DMA select OF_FLATTREE config OF_PROMTREE @@ -81,10 +82,9 @@ config OF_MDIO OpenFirmware MDIO bus (Ethernet PHY) accessors config OF_RESERVED_MEM - depends on OF_EARLY_FLATTREE bool - help - Helpers to allow for reservation of memory regions + depends on OF_EARLY_FLATTREE + default y if DMA_DECLARE_COHERENT || DMA_CMA config OF_RESOLVE bool diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 9cc1461aac7d..4734223ab702 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -1181,7 +1181,13 @@ int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base, static void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) { - return memblock_alloc(size, align); + void *ptr = memblock_alloc(size, align); + + if (!ptr) + panic("%s: Failed to allocate %llu bytes align=0x%llx\n", + __func__, size, align); + + return ptr; } bool __init early_init_dt_verify(void *params) diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index 5ad1342f5682..de6157357e26 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include @@ -463,7 +462,6 @@ int of_phy_register_fixed_link(struct device_node *np) struct device_node *fixed_link_node; u32 fixed_link_prop[5]; const char *managed; - int link_gpio = -1; if (of_property_read_string(np, "managed", &managed) == 0 && strcmp(managed, "in-band-status") == 0) { @@ -485,11 +483,7 @@ int of_phy_register_fixed_link(struct device_node *np) status.pause = of_property_read_bool(fixed_link_node, "pause"); status.asym_pause = of_property_read_bool(fixed_link_node, "asym-pause"); - link_gpio = of_get_named_gpio_flags(fixed_link_node, - "link-gpios", 0, NULL); of_node_put(fixed_link_node); - if (link_gpio == -EPROBE_DEFER) - return -EPROBE_DEFER; goto register_phy; } @@ -508,8 +502,7 @@ int of_phy_register_fixed_link(struct device_node *np) return -ENODEV; register_phy: - return PTR_ERR_OR_ZERO(fixed_phy_register(PHY_POLL, &status, link_gpio, - np)); + return PTR_ERR_OR_ZERO(fixed_phy_register(PHY_POLL, &status, np)); } EXPORT_SYMBOL(of_phy_register_fixed_link); diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c index 1977ee0adcb1..6a36bc0b3d64 100644 --- a/drivers/of/of_reserved_mem.c +++ b/drivers/of/of_reserved_mem.c @@ -26,33 +26,23 @@ static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS]; static int reserved_mem_count; -int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size, +static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size, phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap, phys_addr_t *res_base) { phys_addr_t base; - /* - * We use __memblock_alloc_base() because memblock_alloc_base() - * panic()s on allocation failure. - */ + end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end; align = !align ? SMP_CACHE_BYTES : align; - base = __memblock_alloc_base(size, align, end); + base = memblock_find_in_range(start, end, size, align); if (!base) return -ENOMEM; - /* - * Check if the allocated region fits in to start..end window - */ - if (base < start) { - memblock_free(base, size); - return -ENOMEM; - } - *res_base = base; if (nomap) return memblock_remove(base, size); - return 0; + + return memblock_reserve(base, size); } /** @@ -340,10 +330,6 @@ int of_reserved_mem_device_init_by_idx(struct device *dev, mutex_lock(&of_rmem_assigned_device_mutex); list_add(&rd->list, &of_rmem_assigned_device_list); mutex_unlock(&of_rmem_assigned_device_mutex); - /* ensure that dma_ops is set for virtual devices - * using reserved memory - */ - of_dma_configure(dev, np, true); dev_info(dev, "assigned reserved memory node %s\n", rmem->name); } else { diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index 84427384654d..cccde756b510 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -1116,15 +1116,22 @@ static void update_node_properties(struct device_node *np, for (prop = np->properties; prop != NULL; prop = save_next) { save_next = prop->next; ret = of_add_property(dup, prop); - if (ret) + if (ret) { + if (ret == -EEXIST && !strcmp(prop->name, "name")) + continue; pr_err("unittest internal error: unable to add testdata property %pOF/%s", np, prop->name); + } } } /** * attach_node_and_children - attaches nodes - * and its children to live tree + * and its children to live tree. + * CAUTION: misleading function name - if node @np already exists in + * the live tree then children of @np are *not* attached to the live + * tree. This works for the current test devicetree nodes because such + * nodes do not have child nodes. * * @np: Node to attach to live tree */ @@ -2234,7 +2241,13 @@ static struct device_node *overlay_base_root; static void * __init dt_alloc_memory(u64 size, u64 align) { - return memblock_alloc(size, align); + void *ptr = memblock_alloc(size, align); + + if (!ptr) + panic("%s: Failed to allocate %llu bytes align=0x%llx\n", + __func__, size, align); + + return ptr; } /* @@ -2514,6 +2527,10 @@ static int __init of_unittest(void) int res; /* adding data for unittest */ + + if (IS_ENABLED(CONFIG_UML)) + unittest_unflatten_overlay_base(); + res = unittest_data_add(); if (res) return res; diff --git a/drivers/opp/core.c b/drivers/opp/core.c index 18f1639dbc4a..d7f97167cac3 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -130,6 +130,24 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) } EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq); +/** + * dev_pm_opp_get_level() - Gets the level corresponding to an available opp + * @opp: opp for which level value has to be returned for + * + * Return: level read from device tree corresponding to the opp, else + * return 0. + */ +unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp) +{ + if (IS_ERR_OR_NULL(opp) || !opp->available) { + pr_err("%s: Invalid parameters\n", __func__); + return 0; + } + + return opp->level; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_get_level); + /** * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not * @opp: opp for which turbo mode is being verified @@ -533,9 +551,8 @@ static int _set_opp_voltage(struct device *dev, struct regulator *reg, return ret; } -static inline int -_generic_set_opp_clk_only(struct device *dev, struct clk *clk, - unsigned long old_freq, unsigned long freq) +static inline int _generic_set_opp_clk_only(struct device *dev, struct clk *clk, + unsigned long freq) { int ret; @@ -572,7 +589,7 @@ static int _generic_set_opp_regulator(const struct opp_table *opp_table, } /* Change frequency */ - ret = _generic_set_opp_clk_only(dev, opp_table->clk, old_freq, freq); + ret = _generic_set_opp_clk_only(dev, opp_table->clk, freq); if (ret) goto restore_voltage; @@ -586,7 +603,7 @@ static int _generic_set_opp_regulator(const struct opp_table *opp_table, return 0; restore_freq: - if (_generic_set_opp_clk_only(dev, opp_table->clk, freq, old_freq)) + if (_generic_set_opp_clk_only(dev, opp_table->clk, old_freq)) dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n", __func__, old_freq); restore_voltage: @@ -759,7 +776,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) opp->supplies); } else { /* Only frequency scaling */ - ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq); + ret = _generic_set_opp_clk_only(dev, clk, freq); } /* Scaling down? Configure required OPPs after frequency */ @@ -793,7 +810,6 @@ static struct opp_device *_add_opp_dev_unlocked(const struct device *dev, struct opp_table *opp_table) { struct opp_device *opp_dev; - int ret; opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL); if (!opp_dev) @@ -805,10 +821,7 @@ static struct opp_device *_add_opp_dev_unlocked(const struct device *dev, list_add(&opp_dev->node, &opp_table->dev_list); /* Create debugfs entries for the opp_table */ - ret = opp_debug_register(opp_dev, opp_table); - if (ret) - dev_err(dev, "%s: Failed to register opp debugfs (%d)\n", - __func__, ret); + opp_debug_register(opp_dev, opp_table); return opp_dev; } @@ -1229,10 +1242,7 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, new_opp->opp_table = opp_table; kref_init(&new_opp->kref); - ret = opp_debug_create_one(new_opp, opp_table); - if (ret) - dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n", - __func__, ret); + opp_debug_create_one(new_opp, opp_table); if (!_opp_supported_by_regulators(new_opp, opp_table)) { new_opp->available = false; diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c index e6828e5f81b0..a1c57fe14de4 100644 --- a/drivers/opp/debugfs.c +++ b/drivers/opp/debugfs.c @@ -35,7 +35,7 @@ void opp_debug_remove_one(struct dev_pm_opp *opp) debugfs_remove_recursive(opp->dentry); } -static bool opp_debug_create_supplies(struct dev_pm_opp *opp, +static void opp_debug_create_supplies(struct dev_pm_opp *opp, struct opp_table *opp_table, struct dentry *pdentry) { @@ -50,30 +50,21 @@ static bool opp_debug_create_supplies(struct dev_pm_opp *opp, /* Create per-opp directory */ d = debugfs_create_dir(name, pdentry); - if (!d) - return false; + debugfs_create_ulong("u_volt_target", S_IRUGO, d, + &opp->supplies[i].u_volt); - if (!debugfs_create_ulong("u_volt_target", S_IRUGO, d, - &opp->supplies[i].u_volt)) - return false; + debugfs_create_ulong("u_volt_min", S_IRUGO, d, + &opp->supplies[i].u_volt_min); - if (!debugfs_create_ulong("u_volt_min", S_IRUGO, d, - &opp->supplies[i].u_volt_min)) - return false; + debugfs_create_ulong("u_volt_max", S_IRUGO, d, + &opp->supplies[i].u_volt_max); - if (!debugfs_create_ulong("u_volt_max", S_IRUGO, d, - &opp->supplies[i].u_volt_max)) - return false; - - if (!debugfs_create_ulong("u_amp", S_IRUGO, d, - &opp->supplies[i].u_amp)) - return false; + debugfs_create_ulong("u_amp", S_IRUGO, d, + &opp->supplies[i].u_amp); } - - return true; } -int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table) +void opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table) { struct dentry *pdentry = opp_table->dentry; struct dentry *d; @@ -95,40 +86,23 @@ int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table) /* Create per-opp directory */ d = debugfs_create_dir(name, pdentry); - if (!d) - return -ENOMEM; - - if (!debugfs_create_bool("available", S_IRUGO, d, &opp->available)) - return -ENOMEM; - - if (!debugfs_create_bool("dynamic", S_IRUGO, d, &opp->dynamic)) - return -ENOMEM; - - if (!debugfs_create_bool("turbo", S_IRUGO, d, &opp->turbo)) - return -ENOMEM; - - if (!debugfs_create_bool("suspend", S_IRUGO, d, &opp->suspend)) - return -ENOMEM; - - if (!debugfs_create_u32("performance_state", S_IRUGO, d, &opp->pstate)) - return -ENOMEM; - if (!debugfs_create_ulong("rate_hz", S_IRUGO, d, &opp->rate)) - return -ENOMEM; + debugfs_create_bool("available", S_IRUGO, d, &opp->available); + debugfs_create_bool("dynamic", S_IRUGO, d, &opp->dynamic); + debugfs_create_bool("turbo", S_IRUGO, d, &opp->turbo); + debugfs_create_bool("suspend", S_IRUGO, d, &opp->suspend); + debugfs_create_u32("performance_state", S_IRUGO, d, &opp->pstate); + debugfs_create_ulong("rate_hz", S_IRUGO, d, &opp->rate); + debugfs_create_ulong("clock_latency_ns", S_IRUGO, d, + &opp->clock_latency_ns); - if (!opp_debug_create_supplies(opp, opp_table, d)) - return -ENOMEM; - - if (!debugfs_create_ulong("clock_latency_ns", S_IRUGO, d, - &opp->clock_latency_ns)) - return -ENOMEM; + opp_debug_create_supplies(opp, opp_table, d); opp->dentry = d; - return 0; } -static int opp_list_debug_create_dir(struct opp_device *opp_dev, - struct opp_table *opp_table) +static void opp_list_debug_create_dir(struct opp_device *opp_dev, + struct opp_table *opp_table) { const struct device *dev = opp_dev->dev; struct dentry *d; @@ -137,36 +111,21 @@ static int opp_list_debug_create_dir(struct opp_device *opp_dev, /* Create device specific directory */ d = debugfs_create_dir(opp_table->dentry_name, rootdir); - if (!d) { - dev_err(dev, "%s: Failed to create debugfs dir\n", __func__); - return -ENOMEM; - } opp_dev->dentry = d; opp_table->dentry = d; - - return 0; } -static int opp_list_debug_create_link(struct opp_device *opp_dev, - struct opp_table *opp_table) +static void opp_list_debug_create_link(struct opp_device *opp_dev, + struct opp_table *opp_table) { - const struct device *dev = opp_dev->dev; char name[NAME_MAX]; - struct dentry *d; opp_set_dev_name(opp_dev->dev, name); /* Create device specific directory link */ - d = debugfs_create_symlink(name, rootdir, opp_table->dentry_name); - if (!d) { - dev_err(dev, "%s: Failed to create link\n", __func__); - return -ENOMEM; - } - - opp_dev->dentry = d; - - return 0; + opp_dev->dentry = debugfs_create_symlink(name, rootdir, + opp_table->dentry_name); } /** @@ -177,20 +136,13 @@ static int opp_list_debug_create_link(struct opp_device *opp_dev, * Dynamically adds device specific directory in debugfs 'opp' directory. If the * device-opp is shared with other devices, then links will be created for all * devices except the first. - * - * Return: 0 on success, otherwise negative error. */ -int opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table) +void opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table) { - if (!rootdir) { - pr_debug("%s: Uninitialized rootdir\n", __func__); - return -EINVAL; - } - if (opp_table->dentry) - return opp_list_debug_create_link(opp_dev, opp_table); - - return opp_list_debug_create_dir(opp_dev, opp_table); + opp_list_debug_create_link(opp_dev, opp_table); + else + opp_list_debug_create_dir(opp_dev, opp_table); } static void opp_migrate_dentry(struct opp_device *opp_dev, @@ -252,10 +204,6 @@ static int __init opp_debug_init(void) { /* Create /sys/kernel/debug/opp directory */ rootdir = debugfs_create_dir("opp", NULL); - if (!rootdir) { - pr_err("%s: Failed to create root directory\n", __func__); - return -ENOMEM; - } return 0; } diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 06f0f632ec47..62504b18f198 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -20,6 +20,7 @@ #include #include #include +#include #include "opp.h" @@ -594,6 +595,8 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table, new_opp->rate = (unsigned long)rate; } + of_property_read_u32(np, "opp-level", &new_opp->level); + /* Check if the OPP supports hardware's hierarchy of versions or not */ if (!_opp_is_supported(dev, opp_table, np)) { dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate); @@ -1047,3 +1050,101 @@ struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp) return of_node_get(opp->np); } EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node); + +/* + * Callback function provided to the Energy Model framework upon registration. + * This computes the power estimated by @CPU at @kHz if it is the frequency + * of an existing OPP, or at the frequency of the first OPP above @kHz otherwise + * (see dev_pm_opp_find_freq_ceil()). This function updates @kHz to the ceiled + * frequency and @mW to the associated power. The power is estimated as + * P = C * V^2 * f with C being the CPU's capacitance and V and f respectively + * the voltage and frequency of the OPP. + * + * Returns -ENODEV if the CPU device cannot be found, -EINVAL if the power + * calculation failed because of missing parameters, 0 otherwise. + */ +static int __maybe_unused _get_cpu_power(unsigned long *mW, unsigned long *kHz, + int cpu) +{ + struct device *cpu_dev; + struct dev_pm_opp *opp; + struct device_node *np; + unsigned long mV, Hz; + u32 cap; + u64 tmp; + int ret; + + cpu_dev = get_cpu_device(cpu); + if (!cpu_dev) + return -ENODEV; + + np = of_node_get(cpu_dev->of_node); + if (!np) + return -EINVAL; + + ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap); + of_node_put(np); + if (ret) + return -EINVAL; + + Hz = *kHz * 1000; + opp = dev_pm_opp_find_freq_ceil(cpu_dev, &Hz); + if (IS_ERR(opp)) + return -EINVAL; + + mV = dev_pm_opp_get_voltage(opp) / 1000; + dev_pm_opp_put(opp); + if (!mV) + return -EINVAL; + + tmp = (u64)cap * mV * mV * (Hz / 1000000); + do_div(tmp, 1000000000); + + *mW = (unsigned long)tmp; + *kHz = Hz / 1000; + + return 0; +} + +/** + * dev_pm_opp_of_register_em() - Attempt to register an Energy Model + * @cpus : CPUs for which an Energy Model has to be registered + * + * This checks whether the "dynamic-power-coefficient" devicetree property has + * been specified, and tries to register an Energy Model with it if it has. + */ +void dev_pm_opp_of_register_em(struct cpumask *cpus) +{ + struct em_data_callback em_cb = EM_DATA_CB(_get_cpu_power); + int ret, nr_opp, cpu = cpumask_first(cpus); + struct device *cpu_dev; + struct device_node *np; + u32 cap; + + cpu_dev = get_cpu_device(cpu); + if (!cpu_dev) + return; + + nr_opp = dev_pm_opp_get_opp_count(cpu_dev); + if (nr_opp <= 0) + return; + + np = of_node_get(cpu_dev->of_node); + if (!np) + return; + + /* + * Register an EM only if the 'dynamic-power-coefficient' property is + * set in devicetree. It is assumed the voltage values are known if that + * property is set since it is useless otherwise. If voltages are not + * known, just let the EM registration fail with an error to alert the + * user about the inconsistent configuration. + */ + ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap); + of_node_put(np); + if (ret || !cap) + return; + + em_register_perf_domain(cpus, nr_opp, &em_cb); +} +EXPORT_SYMBOL_GPL(dev_pm_opp_of_register_em); diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h index e24d81497375..569b3525aa67 100644 --- a/drivers/opp/opp.h +++ b/drivers/opp/opp.h @@ -60,6 +60,7 @@ extern struct list_head opp_tables; * @suspend: true if suspend OPP * @pstate: Device's power domain's performance state. * @rate: Frequency in hertz + * @level: Performance level * @supplies: Power supplies voltage/current values * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's * frequency from any other OPP's frequency. @@ -80,6 +81,7 @@ struct dev_pm_opp { bool suspend; unsigned int pstate; unsigned long rate; + unsigned int level; struct dev_pm_opp_supply *supplies; @@ -236,18 +238,17 @@ static inline void _of_opp_free_required_opps(struct opp_table *opp_table, #ifdef CONFIG_DEBUG_FS void opp_debug_remove_one(struct dev_pm_opp *opp); -int opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table); -int opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table); +void opp_debug_create_one(struct dev_pm_opp *opp, struct opp_table *opp_table); +void opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table); void opp_debug_unregister(struct opp_device *opp_dev, struct opp_table *opp_table); #else static inline void opp_debug_remove_one(struct dev_pm_opp *opp) {} -static inline int opp_debug_create_one(struct dev_pm_opp *opp, - struct opp_table *opp_table) -{ return 0; } -static inline int opp_debug_register(struct opp_device *opp_dev, - struct opp_table *opp_table) -{ return 0; } +static inline void opp_debug_create_one(struct dev_pm_opp *opp, + struct opp_table *opp_table) { } + +static inline void opp_debug_register(struct opp_device *opp_dev, + struct opp_table *opp_table) { } static inline void opp_debug_unregister(struct opp_device *opp_dev, struct opp_table *opp_table) diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index 8d2fc84119c6..8937ba70d817 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c @@ -55,6 +55,8 @@ #include /* for register_module() */ #include +#include "iommu.h" + /* ** Choose "ccio" since that's what HP-UX calls it. ** Make it easier for folks to migrate from one to the other :^) @@ -710,8 +712,8 @@ ccio_dma_supported(struct device *dev, u64 mask) return 0; } - /* only support 32-bit devices (ie PCI/GSC) */ - return (int)(mask == 0xffffffffUL); + /* only support 32-bit or better devices (ie PCI/GSC) */ + return (int)(mask >= 0xffffffffUL); } /** @@ -1517,6 +1519,7 @@ static int __init ccio_probe(struct parisc_device *dev) { int i; struct ioc *ioc, **ioc_p = &ioc_list; + struct pci_hba_data *hba; ioc = kzalloc(sizeof(struct ioc), GFP_KERNEL); if (ioc == NULL) { @@ -1543,11 +1546,13 @@ static int __init ccio_probe(struct parisc_device *dev) ccio_ioc_init(ioc); ccio_init_resources(ioc); hppa_dma_ops = &ccio_ops; - dev->dev.platform_data = kzalloc(sizeof(struct pci_hba_data), GFP_KERNEL); + hba = kzalloc(sizeof(*hba), GFP_KERNEL); /* if this fails, no I/O cards will work, so may as well bug */ - BUG_ON(dev->dev.platform_data == NULL); - HBA_DATA(dev->dev.platform_data)->iommu = ioc; + BUG_ON(hba == NULL); + + hba->iommu = ioc; + dev->dev.platform_data = hba; #ifdef CONFIG_PROC_FS if (ioc_count == 0) { diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c index dfeea458a789..846b59d15999 100644 --- a/drivers/parisc/dino.c +++ b/drivers/parisc/dino.c @@ -59,6 +59,7 @@ #include #include "gsc.h" +#include "iommu.h" #undef DINO_DEBUG @@ -153,12 +154,10 @@ struct dino_device #endif }; -/* Looks nice and keeps the compiler happy */ -#define DINO_DEV(d) ({ \ - void *__pdata = d; \ - BUG_ON(!__pdata); \ - (struct dino_device *)__pdata; }) - +static inline struct dino_device *DINO_DEV(struct pci_hba_data *hba) +{ + return container_of(hba, struct dino_device, hba); +} /* * Dino Configuration Space Accessor Functions diff --git a/drivers/parisc/eisa.c b/drivers/parisc/eisa.c index 9ff434f354bd..5657a1d3eb2b 100644 --- a/drivers/parisc/eisa.c +++ b/drivers/parisc/eisa.c @@ -45,6 +45,8 @@ #include #include +#include "iommu.h" + #if 0 #define EISA_DBG(msg, arg...) printk(KERN_DEBUG "eisa: " msg, ## arg) #else diff --git a/drivers/parisc/hppb.c b/drivers/parisc/hppb.c index ebc7b617e5d0..3b3481c0d81d 100644 --- a/drivers/parisc/hppb.c +++ b/drivers/parisc/hppb.c @@ -23,6 +23,8 @@ #include #include +#include "iommu.h" + struct hppb_card { unsigned long hpa; struct resource mmio_region; diff --git a/drivers/parisc/iommu.h b/drivers/parisc/iommu.h new file mode 100644 index 000000000000..240059cd8185 --- /dev/null +++ b/drivers/parisc/iommu.h @@ -0,0 +1,55 @@ +#ifndef _IOMMU_H +#define _IOMMU_H 1 + +#include + +struct parisc_device; +struct ioc; + +static inline struct pci_hba_data *parisc_walk_tree(struct device *dev) +{ + struct device *otherdev; + + if (likely(dev->platform_data)) + return dev->platform_data; + + /* OK, just traverse the bus to find it */ + for (otherdev = dev->parent; + otherdev; + otherdev = otherdev->parent) { + if (otherdev->platform_data) { + dev->platform_data = otherdev->platform_data; + break; + } + } + + return dev->platform_data; +} + +static inline struct ioc *GET_IOC(struct device *dev) +{ + struct pci_hba_data *pdata = parisc_walk_tree(dev); + + if (!pdata) + return NULL; + return pdata->iommu; +} + +#ifdef CONFIG_IOMMU_CCIO +void *ccio_get_iommu(const struct parisc_device *dev); +int ccio_request_resource(const struct parisc_device *dev, + struct resource *res); +int ccio_allocate_resource(const struct parisc_device *dev, + struct resource *res, unsigned long size, + unsigned long min, unsigned long max, unsigned long align); +#else /* !CONFIG_IOMMU_CCIO */ +#define ccio_get_iommu(dev) NULL +#define ccio_request_resource(dev, res) insert_resource(&iomem_resource, res) +#define ccio_allocate_resource(dev, res, size, min, max, align) \ + allocate_resource(&iomem_resource, res, size, min, max, \ + align, NULL, NULL) +#endif /* !CONFIG_IOMMU_CCIO */ + +void *sba_get_iommu(struct parisc_device *dev); + +#endif /* _IOMMU_H */ diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c index 144c77dfe4b1..1be571c20062 100644 --- a/drivers/parisc/iosapic.c +++ b/drivers/parisc/iosapic.c @@ -126,21 +126,10 @@ ** o disable IRdT - call disable_irq(vector[line]->processor_irq) */ - -/* FIXME: determine which include files are really needed */ -#include -#include -#include #include -#include -#include -#include -#include /* get in-line asm for swab */ #include #include -#include -#include /* read/write functions */ #ifdef CONFIG_SUPERIO #include #endif @@ -168,12 +157,8 @@ #define DBG_IRT(x...) #endif -#ifdef CONFIG_64BIT -#define COMPARE_IRTE_ADDR(irte, hpa) ((irte)->dest_iosapic_addr == (hpa)) -#else #define COMPARE_IRTE_ADDR(irte, hpa) \ - ((irte)->dest_iosapic_addr == ((hpa) | 0xffffffff00000000ULL)) -#endif + ((irte)->dest_iosapic_addr == F_EXTEND(hpa)) #define IOSAPIC_REG_SELECT 0x00 #define IOSAPIC_REG_WINDOW 0x10 diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c index 69bd98421eb1..d4701589bc8c 100644 --- a/drivers/parisc/lba_pci.c +++ b/drivers/parisc/lba_pci.c @@ -49,6 +49,8 @@ #include #include /* read/write stuff */ +#include "iommu.h" + #undef DEBUG_LBA /* general stuff */ #undef DEBUG_LBA_PORT /* debug I/O Port access */ #undef DEBUG_LBA_CFG /* debug Config Space Access (ie PCI Bus walk) */ @@ -109,12 +111,10 @@ static u32 lba_t32; #define LBA_SKIP_PROBE(d) ((d)->flags & LBA_FLAG_SKIP_PROBE) - -/* Looks nice and keeps the compiler happy */ -#define LBA_DEV(d) ({ \ - void *__pdata = d; \ - BUG_ON(!__pdata); \ - (struct lba_device *)__pdata; }) +static inline struct lba_device *LBA_DEV(struct pci_hba_data *hba) +{ + return container_of(hba, struct lba_device, hba); +} /* ** Only allow 8 subsidiary busses per LBA @@ -1275,7 +1275,7 @@ lba_legacy_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev) r->flags = IORESOURCE_MEM; /* mmio_mask also clears Enable bit */ r->start &= mmio_mask; - r->start = PCI_HOST_ADDR(HBA_DATA(lba_dev), r->start); + r->start = PCI_HOST_ADDR(&lba_dev->hba, r->start); rsize = ~ READ_REG32(lba_dev->hba.base_addr + LBA_LMMIO_MASK); /* @@ -1321,7 +1321,7 @@ lba_legacy_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev) r->flags = IORESOURCE_MEM; /* mmio_mask also clears Enable bit */ r->start &= mmio_mask; - r->start = PCI_HOST_ADDR(HBA_DATA(lba_dev), r->start); + r->start = PCI_HOST_ADDR(&lba_dev->hba, r->start); rsize = READ_REG32(lba_dev->hba.base_addr + LBA_ELMMIO_MASK); r->end = r->start + ~rsize; } @@ -1562,7 +1562,7 @@ lba_driver_probe(struct parisc_device *dev) /* ------------ Second : initialize common stuff ---------- */ pci_bios = &lba_bios_ops; - pcibios_register_hba(HBA_DATA(lba_dev)); + pcibios_register_hba(&lba_dev->hba); spin_lock_init(&lba_dev->lba_lock); if (lba_hw_init(lba_dev)) @@ -1743,3 +1743,15 @@ static void quirk_diva_aux_disable(struct pci_dev *dev) } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA_AUX, quirk_diva_aux_disable); + +static void quirk_tosca_aux_disable(struct pci_dev *dev) +{ + if (dev->subsystem_vendor != PCI_VENDOR_ID_HP || + dev->subsystem_device != 0x104a) + return; + + dev_info(&dev->dev, "Hiding Tosca secondary built-in AUX serial device"); + dev->device = 0; +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA, + quirk_tosca_aux_disable); diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index 42172eb32235..afaf8e6aefe6 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -49,6 +49,8 @@ #include /* for is_pdc_pat() */ #include +#include "iommu.h" + #define MODULE_NAME "SBA" /* diff --git a/drivers/parport/daisy.c b/drivers/parport/daisy.c index 5484a46dafda..56dd83a45e55 100644 --- a/drivers/parport/daisy.c +++ b/drivers/parport/daisy.c @@ -213,10 +213,12 @@ void parport_daisy_fini(struct parport *port) struct pardevice *parport_open(int devnum, const char *name) { struct daisydev *p = topology; + struct pardev_cb par_cb; struct parport *port; struct pardevice *dev; int daisy; + memset(&par_cb, 0, sizeof(par_cb)); spin_lock(&topology_lock); while (p && p->devnum != devnum) p = p->next; @@ -230,7 +232,7 @@ struct pardevice *parport_open(int devnum, const char *name) port = parport_get_port(p->port); spin_unlock(&topology_lock); - dev = parport_register_device(port, name, NULL, NULL, NULL, 0, NULL); + dev = parport_register_dev_model(port, name, &par_cb, devnum); parport_put_port(port); if (!dev) return NULL; @@ -480,3 +482,31 @@ static int assign_addrs(struct parport *port) kfree(deviceid); return detected; } + +static int daisy_drv_probe(struct pardevice *par_dev) +{ + struct device_driver *drv = par_dev->dev.driver; + + if (strcmp(drv->name, "daisy_drv")) + return -ENODEV; + if (strcmp(par_dev->name, daisy_dev_name)) + return -ENODEV; + + return 0; +} + +static struct parport_driver daisy_driver = { + .name = "daisy_drv", + .probe = daisy_drv_probe, + .devmodel = true, +}; + +int daisy_drv_init(void) +{ + return parport_register_driver(&daisy_driver); +} + +void daisy_drv_exit(void) +{ + parport_unregister_driver(&daisy_driver); +} diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c index 9c8249f74479..6296dbb83d47 100644 --- a/drivers/parport/parport_pc.c +++ b/drivers/parport/parport_pc.c @@ -1377,7 +1377,7 @@ static struct superio_struct *find_superio(struct parport *p) { int i; for (i = 0; i < NR_SUPERIOS; i++) - if (superios[i].io != p->base) + if (superios[i].io == p->base) return &superios[i]; return NULL; } diff --git a/drivers/parport/probe.c b/drivers/parport/probe.c index e035174ba205..e5e6a463a941 100644 --- a/drivers/parport/probe.c +++ b/drivers/parport/probe.c @@ -257,7 +257,7 @@ static ssize_t parport_read_device_id (struct parport *port, char *buffer, ssize_t parport_device_id (int devnum, char *buffer, size_t count) { ssize_t retval = -ENXIO; - struct pardevice *dev = parport_open (devnum, "Device ID probe"); + struct pardevice *dev = parport_open(devnum, daisy_dev_name); if (!dev) return -ENXIO; diff --git a/drivers/parport/share.c b/drivers/parport/share.c index 5dc53d420ca8..0171b8dbcdcd 100644 --- a/drivers/parport/share.c +++ b/drivers/parport/share.c @@ -137,11 +137,19 @@ static struct bus_type parport_bus_type = { int parport_bus_init(void) { - return bus_register(&parport_bus_type); + int retval; + + retval = bus_register(&parport_bus_type); + if (retval) + return retval; + daisy_drv_init(); + + return 0; } void parport_bus_exit(void) { + daisy_drv_exit(); bus_unregister(&parport_bus_type); } diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c index 5b78f3b1b918..97c08146534a 100644 --- a/drivers/pci/ats.c +++ b/drivers/pci/ats.c @@ -142,6 +142,33 @@ int pci_ats_queue_depth(struct pci_dev *dev) } EXPORT_SYMBOL_GPL(pci_ats_queue_depth); +/** + * pci_ats_page_aligned - Return Page Aligned Request bit status. + * @pdev: the PCI device + * + * Returns 1, if the Untranslated Addresses generated by the device + * are always aligned or 0 otherwise. + * + * Per PCIe spec r4.0, sec 10.5.1.2, if the Page Aligned Request bit + * is set, it indicates the Untranslated Addresses generated by the + * device are always aligned to a 4096 byte boundary. + */ +int pci_ats_page_aligned(struct pci_dev *pdev) +{ + u16 cap; + + if (!pdev->ats_cap) + return 0; + + pci_read_config_word(pdev, pdev->ats_cap + PCI_ATS_CAP, &cap); + + if (cap & PCI_ATS_CAP_PAGE_ALIGNED) + return 1; + + return 0; +} +EXPORT_SYMBOL_GPL(pci_ats_page_aligned); + #ifdef CONFIG_PCI_PRI /** * pci_enable_pri - Enable PRI capability @@ -368,6 +395,36 @@ int pci_pasid_features(struct pci_dev *pdev) } EXPORT_SYMBOL_GPL(pci_pasid_features); +/** + * pci_prg_resp_pasid_required - Return PRG Response PASID Required bit + * status. + * @pdev: PCI device structure + * + * Returns 1 if PASID is required in PRG Response Message, 0 otherwise. + * + * Even though the PRG response PASID status is read from PRI Status + * Register, since this API will mainly be used by PASID users, this + * function is defined within #ifdef CONFIG_PCI_PASID instead of + * CONFIG_PCI_PRI. + */ +int pci_prg_resp_pasid_required(struct pci_dev *pdev) +{ + u16 status; + int pos; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI); + if (!pos) + return 0; + + pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status); + + if (status & PCI_PRI_STATUS_PASID) + return 1; + + return 0; +} +EXPORT_SYMBOL_GPL(pci_prg_resp_pasid_required); + #define PASID_NUMBER_SHIFT 8 #define PASID_NUMBER_MASK (0x1f << PASID_NUMBER_SHIFT) /** diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig index 6671946dbf66..6012f3059acd 100644 --- a/drivers/pci/controller/Kconfig +++ b/drivers/pci/controller/Kconfig @@ -175,7 +175,7 @@ config PCIE_IPROC_MSI config PCIE_ALTERA bool "Altera PCIe controller" - depends on ARM || NIOS2 || COMPILE_TEST + depends on ARM || NIOS2 || ARM64 || COMPILE_TEST help Say Y here if you want to enable PCIe controller support on Altera FPGA. diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig index 548c58223868..6ea74b1c0d94 100644 --- a/drivers/pci/controller/dwc/Kconfig +++ b/drivers/pci/controller/dwc/Kconfig @@ -89,8 +89,8 @@ config PCI_EXYNOS select PCIE_DW_HOST config PCI_IMX6 - bool "Freescale i.MX6/7 PCIe controller" - depends on SOC_IMX6Q || SOC_IMX7D || (ARM && COMPILE_TEST) + bool "Freescale i.MX6/7/8 PCIe controller" + depends on SOC_IMX6Q || SOC_IMX7D || (ARM64 && ARCH_MXC) || COMPILE_TEST depends on PCI_MSI_IRQ_DOMAIN select PCIE_DW_HOST diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile index 7bcdcdf5024e..b5f3b83cc2b3 100644 --- a/drivers/pci/controller/dwc/Makefile +++ b/drivers/pci/controller/dwc/Makefile @@ -8,7 +8,7 @@ obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o obj-$(CONFIG_PCI_IMX6) += pci-imx6.o obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone.o -obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o +obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o pci-layerscape-ep.o obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c index a32d6dde7a57..ae84a69ae63a 100644 --- a/drivers/pci/controller/dwc/pci-dra7xx.c +++ b/drivers/pci/controller/dwc/pci-dra7xx.c @@ -81,6 +81,10 @@ #define MSI_REQ_GRANT BIT(0) #define MSI_VECTOR_SHIFT 7 +#define PCIE_1LANE_2LANE_SELECTION BIT(13) +#define PCIE_B1C0_MODE_SEL BIT(2) +#define PCIE_B0_B1_TSYNCEN BIT(0) + struct dra7xx_pcie { struct dw_pcie *pci; void __iomem *base; /* DT ti_conf */ @@ -93,6 +97,7 @@ struct dra7xx_pcie { struct dra7xx_pcie_of_data { enum dw_pcie_device_mode mode; + u32 b1co_mode_sel_mask; }; #define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev) @@ -389,9 +394,22 @@ static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no, return 0; } +static const struct pci_epc_features dra7xx_pcie_epc_features = { + .linkup_notifier = true, + .msi_capable = true, + .msix_capable = false, +}; + +static const struct pci_epc_features* +dra7xx_pcie_get_features(struct dw_pcie_ep *ep) +{ + return &dra7xx_pcie_epc_features; +} + static struct dw_pcie_ep_ops pcie_ep_ops = { .ep_init = dra7xx_pcie_ep_init, .raise_irq = dra7xx_pcie_raise_irq, + .get_features = dra7xx_pcie_get_features, }; static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx, @@ -499,6 +517,10 @@ static int dra7xx_pcie_enable_phy(struct dra7xx_pcie *dra7xx) int i; for (i = 0; i < phy_count; i++) { + ret = phy_set_mode(dra7xx->phy[i], PHY_MODE_PCIE); + if (ret < 0) + goto err_phy; + ret = phy_init(dra7xx->phy[i]); if (ret < 0) goto err_phy; @@ -529,6 +551,26 @@ static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = { .mode = DW_PCIE_EP_TYPE, }; +static const struct dra7xx_pcie_of_data dra746_pcie_rc_of_data = { + .b1co_mode_sel_mask = BIT(2), + .mode = DW_PCIE_RC_TYPE, +}; + +static const struct dra7xx_pcie_of_data dra726_pcie_rc_of_data = { + .b1co_mode_sel_mask = GENMASK(3, 2), + .mode = DW_PCIE_RC_TYPE, +}; + +static const struct dra7xx_pcie_of_data dra746_pcie_ep_of_data = { + .b1co_mode_sel_mask = BIT(2), + .mode = DW_PCIE_EP_TYPE, +}; + +static const struct dra7xx_pcie_of_data dra726_pcie_ep_of_data = { + .b1co_mode_sel_mask = GENMASK(3, 2), + .mode = DW_PCIE_EP_TYPE, +}; + static const struct of_device_id of_dra7xx_pcie_match[] = { { .compatible = "ti,dra7-pcie", @@ -538,6 +580,22 @@ static const struct of_device_id of_dra7xx_pcie_match[] = { .compatible = "ti,dra7-pcie-ep", .data = &dra7xx_pcie_ep_of_data, }, + { + .compatible = "ti,dra746-pcie-rc", + .data = &dra746_pcie_rc_of_data, + }, + { + .compatible = "ti,dra726-pcie-rc", + .data = &dra726_pcie_rc_of_data, + }, + { + .compatible = "ti,dra746-pcie-ep", + .data = &dra746_pcie_ep_of_data, + }, + { + .compatible = "ti,dra726-pcie-ep", + .data = &dra726_pcie_ep_of_data, + }, {}, }; @@ -583,6 +641,34 @@ static int dra7xx_pcie_unaligned_memaccess(struct device *dev) return ret; } +static int dra7xx_pcie_configure_two_lane(struct device *dev, + u32 b1co_mode_sel_mask) +{ + struct device_node *np = dev->of_node; + struct regmap *pcie_syscon; + unsigned int pcie_reg; + u32 mask; + u32 val; + + pcie_syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-lane-sel"); + if (IS_ERR(pcie_syscon)) { + dev_err(dev, "unable to get ti,syscon-lane-sel\n"); + return -EINVAL; + } + + if (of_property_read_u32_index(np, "ti,syscon-lane-sel", 1, + &pcie_reg)) { + dev_err(dev, "couldn't get lane selection reg offset\n"); + return -EINVAL; + } + + mask = b1co_mode_sel_mask | PCIE_B0_B1_TSYNCEN; + val = PCIE_B1C0_MODE_SEL | PCIE_B0_B1_TSYNCEN; + regmap_update_bits(pcie_syscon, pcie_reg, mask, val); + + return 0; +} + static int __init dra7xx_pcie_probe(struct platform_device *pdev) { u32 reg; @@ -603,6 +689,7 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) const struct of_device_id *match; const struct dra7xx_pcie_of_data *data; enum dw_pcie_device_mode mode; + u32 b1co_mode_sel_mask; match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev); if (!match) @@ -610,6 +697,7 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) data = (struct dra7xx_pcie_of_data *)match->data; mode = (enum dw_pcie_device_mode)data->mode; + b1co_mode_sel_mask = data->b1co_mode_sel_mask; dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL); if (!dra7xx) @@ -665,6 +753,12 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) dra7xx->pci = pci; dra7xx->phy_count = phy_count; + if (phy_count == 2) { + ret = dra7xx_pcie_configure_two_lane(dev, b1co_mode_sel_mask); + if (ret < 0) + dra7xx->phy_count = 1; /* Fallback to x1 lane mode */ + } + ret = dra7xx_pcie_enable_phy(dra7xx); if (ret) { dev_err(dev, "failed to enable phy\n"); diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 52e47dac028f..3d627f94a166 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -8,6 +8,7 @@ * Author: Sean Cross */ +#include #include #include #include @@ -18,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -32,6 +34,12 @@ #include "pcie-designware.h" +#define IMX8MQ_GPR_PCIE_REF_USE_PAD BIT(9) +#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN BIT(10) +#define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE BIT(11) +#define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE GENMASK(11, 8) +#define IMX8MQ_PCIE2_BASE_ADDR 0x33c00000 + #define to_imx6_pcie(x) dev_get_drvdata((x)->dev) enum imx6_pcie_variants { @@ -39,6 +47,15 @@ enum imx6_pcie_variants { IMX6SX, IMX6QP, IMX7D, + IMX8MQ, +}; + +#define IMX6_PCIE_FLAG_IMX6_PHY BIT(0) +#define IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE BIT(1) + +struct imx6_pcie_drvdata { + enum imx6_pcie_variants variant; + u32 flags; }; struct imx6_pcie { @@ -49,11 +66,12 @@ struct imx6_pcie { struct clk *pcie_phy; struct clk *pcie_inbound_axi; struct clk *pcie; + struct clk *pcie_aux; struct regmap *iomuxc_gpr; + u32 controller_id; struct reset_control *pciephy_reset; struct reset_control *apps_reset; struct reset_control *turnoff_reset; - enum imx6_pcie_variants variant; u32 tx_deemph_gen1; u32 tx_deemph_gen2_3p5db; u32 tx_deemph_gen2_6db; @@ -61,11 +79,13 @@ struct imx6_pcie { u32 tx_swing_low; int link_gen; struct regulator *vpcie; + void __iomem *phy_base; /* power domain for pcie */ struct device *pd_pcie; /* power domain for pcie phy */ struct device *pd_pcie_phy; + const struct imx6_pcie_drvdata *drvdata; }; /* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */ @@ -101,7 +121,6 @@ struct imx6_pcie { #define PCIE_PHY_STAT_ACK_LOC 16 #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C -#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) /* PHY registers (not memory-mapped) */ #define PCIE_PHY_ATEOVRD 0x10 @@ -117,6 +136,23 @@ struct imx6_pcie { #define PCIE_PHY_RX_ASIC_OUT 0x100D #define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0) +/* iMX7 PCIe PHY registers */ +#define PCIE_PHY_CMN_REG4 0x14 +/* These are probably the bits that *aren't* DCC_FB_EN */ +#define PCIE_PHY_CMN_REG4_DCC_FB_EN 0x29 + +#define PCIE_PHY_CMN_REG15 0x54 +#define PCIE_PHY_CMN_REG15_DLY_4 BIT(2) +#define PCIE_PHY_CMN_REG15_PLL_PD BIT(5) +#define PCIE_PHY_CMN_REG15_OVRD_PLL_PD BIT(7) + +#define PCIE_PHY_CMN_REG24 0x90 +#define PCIE_PHY_CMN_REG24_RX_EQ BIT(6) +#define PCIE_PHY_CMN_REG24_RX_EQ_SEL BIT(3) + +#define PCIE_PHY_CMN_REG26 0x98 +#define PCIE_PHY_CMN_REG26_ATT_MODE 0xBC + #define PHY_RX_OVRD_IN_LO 0x1005 #define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5) #define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3) @@ -251,6 +287,9 @@ static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie) { u32 tmp; + if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY)) + return; + pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp); tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | PHY_RX_OVRD_IN_LO_RX_PLL_EN); @@ -264,6 +303,7 @@ static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie) pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp); } +#ifdef CONFIG_ARM /* Added for PCI abort handling */ static int imx6q_pcie_abort_handler(unsigned long addr, unsigned int fsr, struct pt_regs *regs) @@ -297,6 +337,7 @@ static int imx6q_pcie_abort_handler(unsigned long addr, return 1; } +#endif static int imx6_pcie_attach_pd(struct device *dev) { @@ -310,6 +351,9 @@ static int imx6_pcie_attach_pd(struct device *dev) imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie"); if (IS_ERR(imx6_pcie->pd_pcie)) return PTR_ERR(imx6_pcie->pd_pcie); + /* Do nothing when power domain missing */ + if (!imx6_pcie->pd_pcie) + return 0; link = device_link_add(dev, imx6_pcie->pd_pcie, DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME | @@ -323,13 +367,13 @@ static int imx6_pcie_attach_pd(struct device *dev) if (IS_ERR(imx6_pcie->pd_pcie_phy)) return PTR_ERR(imx6_pcie->pd_pcie_phy); - device_link_add(dev, imx6_pcie->pd_pcie_phy, + link = device_link_add(dev, imx6_pcie->pd_pcie_phy, DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE); - if (IS_ERR(link)) { - dev_err(dev, "Failed to add device_link to pcie_phy pd: %ld\n", PTR_ERR(link)); - return PTR_ERR(link); + if (!link) { + dev_err(dev, "Failed to add device_link to pcie_phy pd.\n"); + return -EINVAL; } return 0; @@ -339,8 +383,9 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) { struct device *dev = imx6_pcie->pci->dev; - switch (imx6_pcie->variant) { + switch (imx6_pcie->drvdata->variant) { case IMX7D: + case IMX8MQ: reset_control_assert(imx6_pcie->pciephy_reset); reset_control_assert(imx6_pcie->apps_reset); break; @@ -375,13 +420,20 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) } } +static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie) +{ + WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ); + return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14; +} + static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) { struct dw_pcie *pci = imx6_pcie->pci; struct device *dev = pci->dev; + unsigned int offset; int ret = 0; - switch (imx6_pcie->variant) { + switch (imx6_pcie->drvdata->variant) { case IMX6SX: ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi); if (ret) { @@ -409,6 +461,25 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) break; case IMX7D: break; + case IMX8MQ: + ret = clk_prepare_enable(imx6_pcie->pcie_aux); + if (ret) { + dev_err(dev, "unable to enable pcie_aux clock\n"); + break; + } + + offset = imx6_pcie_grp_offset(imx6_pcie); + /* + * Set the over ride low and enabled + * make sure that REF_CLK is turned on. + */ + regmap_update_bits(imx6_pcie->iomuxc_gpr, offset, + IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE, + 0); + regmap_update_bits(imx6_pcie->iomuxc_gpr, offset, + IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN, + IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN); + break; } return ret; @@ -484,9 +555,32 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) !imx6_pcie->gpio_active_high); } - switch (imx6_pcie->variant) { + switch (imx6_pcie->drvdata->variant) { + case IMX8MQ: + reset_control_deassert(imx6_pcie->pciephy_reset); + break; case IMX7D: reset_control_deassert(imx6_pcie->pciephy_reset); + + /* Workaround for ERR010728, failure of PCI-e PLL VCO to + * oscillate, especially when cold. This turns off "Duty-cycle + * Corrector" and other mysterious undocumented things. + */ + if (likely(imx6_pcie->phy_base)) { + /* De-assert DCC_FB_EN */ + writel(PCIE_PHY_CMN_REG4_DCC_FB_EN, + imx6_pcie->phy_base + PCIE_PHY_CMN_REG4); + /* Assert RX_EQS and RX_EQS_SEL */ + writel(PCIE_PHY_CMN_REG24_RX_EQ_SEL + | PCIE_PHY_CMN_REG24_RX_EQ, + imx6_pcie->phy_base + PCIE_PHY_CMN_REG24); + /* Assert ATT_MODE */ + writel(PCIE_PHY_CMN_REG26_ATT_MODE, + imx6_pcie->phy_base + PCIE_PHY_CMN_REG26); + } else { + dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n"); + } + imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie); break; case IMX6SX: @@ -520,9 +614,37 @@ err_pcie_phy: } } +static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie) +{ + unsigned int mask, val; + + if (imx6_pcie->drvdata->variant == IMX8MQ && + imx6_pcie->controller_id == 1) { + mask = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE; + val = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE, + PCI_EXP_TYPE_ROOT_PORT); + } else { + mask = IMX6Q_GPR12_DEVICE_TYPE; + val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE, + PCI_EXP_TYPE_ROOT_PORT); + } + + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, mask, val); +} + static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie) { - switch (imx6_pcie->variant) { + switch (imx6_pcie->drvdata->variant) { + case IMX8MQ: + /* + * TODO: Currently this code assumes external + * oscillator is being used + */ + regmap_update_bits(imx6_pcie->iomuxc_gpr, + imx6_pcie_grp_offset(imx6_pcie), + IMX8MQ_GPR_PCIE_REF_USE_PAD, + IMX8MQ_GPR_PCIE_REF_USE_PAD); + break; case IMX7D: regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0); @@ -558,8 +680,7 @@ static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie) break; } - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, - IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12); + imx6_pcie_configure_type(imx6_pcie); } static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie) @@ -568,6 +689,9 @@ static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie) int mult, div; u32 val; + if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY)) + return 0; + switch (phy_rate) { case 125000000: /* @@ -644,7 +768,7 @@ static void imx6_pcie_ltssm_enable(struct device *dev) { struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); - switch (imx6_pcie->variant) { + switch (imx6_pcie->drvdata->variant) { case IMX6Q: case IMX6SX: case IMX6QP: @@ -653,6 +777,7 @@ static void imx6_pcie_ltssm_enable(struct device *dev) IMX6Q_GPR12_PCIE_CTL_2); break; case IMX7D: + case IMX8MQ: reset_control_deassert(imx6_pcie->apps_reset); break; } @@ -697,7 +822,8 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie) tmp |= PORT_LOGIC_SPEED_CHANGE; dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp); - if (imx6_pcie->variant != IMX7D) { + if (imx6_pcie->drvdata->flags & + IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE) { /* * On i.MX7, DIRECT_SPEED_CHANGE behaves differently * from i.MX6 family when no link speed transition @@ -794,7 +920,7 @@ static void imx6_pcie_ltssm_disable(struct device *dev) { struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); - switch (imx6_pcie->variant) { + switch (imx6_pcie->drvdata->variant) { case IMX6SX: case IMX6QP: regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, @@ -820,7 +946,7 @@ static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie) } /* Others poke directly at IOMUXC registers */ - switch (imx6_pcie->variant) { + switch (imx6_pcie->drvdata->variant) { case IMX6SX: regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6SX_GPR12_PCIE_PM_TURN_OFF, @@ -850,7 +976,7 @@ static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie) clk_disable_unprepare(imx6_pcie->pcie_phy); clk_disable_unprepare(imx6_pcie->pcie_bus); - switch (imx6_pcie->variant) { + switch (imx6_pcie->drvdata->variant) { case IMX6SX: clk_disable_unprepare(imx6_pcie->pcie_inbound_axi); break; @@ -859,6 +985,9 @@ static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie) IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, IMX7D_GPR12_PCIE_PHY_REFCLK_SEL); break; + case IMX8MQ: + clk_disable_unprepare(imx6_pcie->pcie_aux); + break; default: break; } @@ -866,8 +995,8 @@ static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie) static inline bool imx6_pcie_supports_suspend(struct imx6_pcie *imx6_pcie) { - return (imx6_pcie->variant == IMX7D || - imx6_pcie->variant == IMX6SX); + return (imx6_pcie->drvdata->variant == IMX7D || + imx6_pcie->drvdata->variant == IMX6SX); } static int imx6_pcie_suspend_noirq(struct device *dev) @@ -916,6 +1045,7 @@ static int imx6_pcie_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct dw_pcie *pci; struct imx6_pcie *imx6_pcie; + struct device_node *np; struct resource *dbi_base; struct device_node *node = dev->of_node; int ret; @@ -933,8 +1063,24 @@ static int imx6_pcie_probe(struct platform_device *pdev) pci->ops = &dw_pcie_ops; imx6_pcie->pci = pci; - imx6_pcie->variant = - (enum imx6_pcie_variants)of_device_get_match_data(dev); + imx6_pcie->drvdata = of_device_get_match_data(dev); + + /* Find the PHY if one is defined, only imx7d uses it */ + np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0); + if (np) { + struct resource res; + + ret = of_address_to_resource(np, 0, &res); + if (ret) { + dev_err(dev, "Unable to map PCIe PHY\n"); + return ret; + } + imx6_pcie->phy_base = devm_ioremap_resource(dev, &res); + if (IS_ERR(imx6_pcie->phy_base)) { + dev_err(dev, "Unable to map PCIe PHY\n"); + return PTR_ERR(imx6_pcie->phy_base); + } + } dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); pci->dbi_base = devm_ioremap_resource(dev, dbi_base); @@ -978,7 +1124,7 @@ static int imx6_pcie_probe(struct platform_device *pdev) return PTR_ERR(imx6_pcie->pcie); } - switch (imx6_pcie->variant) { + switch (imx6_pcie->drvdata->variant) { case IMX6SX: imx6_pcie->pcie_inbound_axi = devm_clk_get(dev, "pcie_inbound_axi"); @@ -987,7 +1133,17 @@ static int imx6_pcie_probe(struct platform_device *pdev) return PTR_ERR(imx6_pcie->pcie_inbound_axi); } break; + case IMX8MQ: + imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux"); + if (IS_ERR(imx6_pcie->pcie_aux)) { + dev_err(dev, "pcie_aux clock source missing or invalid\n"); + return PTR_ERR(imx6_pcie->pcie_aux); + } + /* fall through */ case IMX7D: + if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR) + imx6_pcie->controller_id = 1; + imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, "pciephy"); if (IS_ERR(imx6_pcie->pciephy_reset)) { @@ -1084,11 +1240,36 @@ static void imx6_pcie_shutdown(struct platform_device *pdev) imx6_pcie_assert_core_reset(imx6_pcie); } +static const struct imx6_pcie_drvdata drvdata[] = { + [IMX6Q] = { + .variant = IMX6Q, + .flags = IMX6_PCIE_FLAG_IMX6_PHY | + IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE, + }, + [IMX6SX] = { + .variant = IMX6SX, + .flags = IMX6_PCIE_FLAG_IMX6_PHY | + IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE, + }, + [IMX6QP] = { + .variant = IMX6QP, + .flags = IMX6_PCIE_FLAG_IMX6_PHY | + IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE, + }, + [IMX7D] = { + .variant = IMX7D, + }, + [IMX8MQ] = { + .variant = IMX8MQ, + }, +}; + static const struct of_device_id imx6_pcie_of_match[] = { - { .compatible = "fsl,imx6q-pcie", .data = (void *)IMX6Q, }, - { .compatible = "fsl,imx6sx-pcie", .data = (void *)IMX6SX, }, - { .compatible = "fsl,imx6qp-pcie", .data = (void *)IMX6QP, }, - { .compatible = "fsl,imx7d-pcie", .data = (void *)IMX7D, }, + { .compatible = "fsl,imx6q-pcie", .data = &drvdata[IMX6Q], }, + { .compatible = "fsl,imx6sx-pcie", .data = &drvdata[IMX6SX], }, + { .compatible = "fsl,imx6qp-pcie", .data = &drvdata[IMX6QP], }, + { .compatible = "fsl,imx7d-pcie", .data = &drvdata[IMX7D], }, + { .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], } , {}, }; @@ -1105,6 +1286,7 @@ static struct platform_driver imx6_pcie_driver = { static int __init imx6_pcie_init(void) { +#ifdef CONFIG_ARM /* * Since probe() can be deferred we need to make sure that * hook_fault_code is not called after __init memory is freed @@ -1114,6 +1296,7 @@ static int __init imx6_pcie_init(void) */ hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0, "external abort on non-linefetch"); +#endif return platform_driver_register(&imx6_pcie_driver); } diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c new file mode 100644 index 000000000000..a42c9c3ae1cc --- /dev/null +++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCIe controller EP driver for Freescale Layerscape SoCs + * + * Copyright (C) 2018 NXP Semiconductor. + * + * Author: Xiaowei Bao + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pcie-designware.h" + +#define PCIE_DBI2_OFFSET 0x1000 /* DBI2 base address*/ + +struct ls_pcie_ep { + struct dw_pcie *pci; +}; + +#define to_ls_pcie_ep(x) dev_get_drvdata((x)->dev) + +static int ls_pcie_establish_link(struct dw_pcie *pci) +{ + return 0; +} + +static const struct dw_pcie_ops ls_pcie_ep_ops = { + .start_link = ls_pcie_establish_link, +}; + +static const struct of_device_id ls_pcie_ep_of_match[] = { + { .compatible = "fsl,ls-pcie-ep",}, + { }, +}; + +static const struct pci_epc_features ls_pcie_epc_features = { + .linkup_notifier = false, + .msi_capable = true, + .msix_capable = false, +}; + +static const struct pci_epc_features* +ls_pcie_ep_get_features(struct dw_pcie_ep *ep) +{ + return &ls_pcie_epc_features; +} + +static void ls_pcie_ep_init(struct dw_pcie_ep *ep) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + enum pci_barno bar; + + for (bar = BAR_0; bar <= BAR_5; bar++) + dw_pcie_ep_reset_bar(pci, bar); +} + +static int ls_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, + enum pci_epc_irq_type type, u16 interrupt_num) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + + switch (type) { + case PCI_EPC_IRQ_LEGACY: + return dw_pcie_ep_raise_legacy_irq(ep, func_no); + case PCI_EPC_IRQ_MSI: + return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); + case PCI_EPC_IRQ_MSIX: + return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num); + default: + dev_err(pci->dev, "UNKNOWN IRQ type\n"); + return -EINVAL; + } +} + +static struct dw_pcie_ep_ops pcie_ep_ops = { + .ep_init = ls_pcie_ep_init, + .raise_irq = ls_pcie_ep_raise_irq, + .get_features = ls_pcie_ep_get_features, +}; + +static int __init ls_add_pcie_ep(struct ls_pcie_ep *pcie, + struct platform_device *pdev) +{ + struct dw_pcie *pci = pcie->pci; + struct device *dev = pci->dev; + struct dw_pcie_ep *ep; + struct resource *res; + int ret; + + ep = &pci->ep; + ep->ops = &pcie_ep_ops; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); + if (!res) + return -EINVAL; + + ep->phys_base = res->start; + ep->addr_size = resource_size(res); + + ret = dw_pcie_ep_init(ep); + if (ret) { + dev_err(dev, "failed to initialize endpoint\n"); + return ret; + } + + return 0; +} + +static int __init ls_pcie_ep_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct dw_pcie *pci; + struct ls_pcie_ep *pcie; + struct resource *dbi_base; + int ret; + + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); + if (!pci) + return -ENOMEM; + + dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); + pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base); + if (IS_ERR(pci->dbi_base)) + return PTR_ERR(pci->dbi_base); + + pci->dbi_base2 = pci->dbi_base + PCIE_DBI2_OFFSET; + pci->dev = dev; + pci->ops = &ls_pcie_ep_ops; + pcie->pci = pci; + + platform_set_drvdata(pdev, pcie); + + ret = ls_add_pcie_ep(pcie, pdev); + + return ret; +} + +static struct platform_driver ls_pcie_ep_driver = { + .driver = { + .name = "layerscape-pcie-ep", + .of_match_table = ls_pcie_ep_of_match, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver_probe(ls_pcie_ep_driver, ls_pcie_ep_probe); diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c index b171b6bc15c8..0c389a30ef5d 100644 --- a/drivers/pci/controller/dwc/pcie-armada8k.c +++ b/drivers/pci/controller/dwc/pcie-armada8k.c @@ -22,7 +22,6 @@ #include #include #include -#include #include "pcie-designware.h" @@ -30,7 +29,6 @@ struct armada8k_pcie { struct dw_pcie *pci; struct clk *clk; struct clk *clk_reg; - struct gpio_desc *reset_gpio; }; #define PCIE_VENDOR_REGS_OFFSET 0x8000 @@ -139,12 +137,6 @@ static int armada8k_pcie_host_init(struct pcie_port *pp) struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct armada8k_pcie *pcie = to_armada8k_pcie(pci); - if (pcie->reset_gpio) { - /* assert and then deassert the reset signal */ - gpiod_set_value_cansleep(pcie->reset_gpio, 1); - msleep(100); - gpiod_set_value_cansleep(pcie->reset_gpio, 0); - } dw_pcie_setup_rc(pp); armada8k_pcie_establish_link(pcie); @@ -257,14 +249,6 @@ static int armada8k_pcie_probe(struct platform_device *pdev) goto fail_clkreg; } - /* Get reset gpio signal and hold asserted (logically high) */ - pcie->reset_gpio = devm_gpiod_get_optional(dev, "reset", - GPIOD_OUT_HIGH); - if (IS_ERR(pcie->reset_gpio)) { - ret = PTR_ERR(pcie->reset_gpio); - goto fail_clkreg; - } - platform_set_drvdata(pdev, pcie); ret = armada8k_add_pcie_port(pcie, pdev); diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index a543c45c7224..24f5a775ad34 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -355,6 +355,17 @@ static int dw_pcie_ep_start(struct pci_epc *epc) return pci->ops->start_link(pci); } +static const struct pci_epc_features* +dw_pcie_ep_get_features(struct pci_epc *epc, u8 func_no) +{ + struct dw_pcie_ep *ep = epc_get_drvdata(epc); + + if (!ep->ops->get_features) + return NULL; + + return ep->ops->get_features(ep); +} + static const struct pci_epc_ops epc_ops = { .write_header = dw_pcie_ep_write_header, .set_bar = dw_pcie_ep_set_bar, @@ -368,6 +379,7 @@ static const struct pci_epc_ops epc_ops = { .raise_irq = dw_pcie_ep_raise_irq, .start = dw_pcie_ep_start, .stop = dw_pcie_ep_stop, + .get_features = dw_pcie_ep_get_features, }; int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no) @@ -465,8 +477,10 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, iounmap(msix_tbl); - if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT) + if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT) { + dev_dbg(pci->dev, "MSI-X entry ctrl set\n"); return -EPERM; + } ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr, epc->mem->page_size); diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index 721d60a5d9e4..25087d3c9a82 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -120,9 +120,9 @@ static void dw_chained_msi_isr(struct irq_desc *desc) chained_irq_exit(chip, desc); } -static void dw_pci_setup_msi_msg(struct irq_data *data, struct msi_msg *msg) +static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg) { - struct pcie_port *pp = irq_data_get_irq_chip_data(data); + struct pcie_port *pp = irq_data_get_irq_chip_data(d); struct dw_pcie *pci = to_dw_pcie_from_pp(pp); u64 msi_target; @@ -135,61 +135,61 @@ static void dw_pci_setup_msi_msg(struct irq_data *data, struct msi_msg *msg) msg->address_hi = upper_32_bits(msi_target); if (pp->ops->get_msi_data) - msg->data = pp->ops->get_msi_data(pp, data->hwirq); + msg->data = pp->ops->get_msi_data(pp, d->hwirq); else - msg->data = data->hwirq; + msg->data = d->hwirq; dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n", - (int)data->hwirq, msg->address_hi, msg->address_lo); + (int)d->hwirq, msg->address_hi, msg->address_lo); } -static int dw_pci_msi_set_affinity(struct irq_data *irq_data, +static int dw_pci_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force) { return -EINVAL; } -static void dw_pci_bottom_mask(struct irq_data *data) +static void dw_pci_bottom_mask(struct irq_data *d) { - struct pcie_port *pp = irq_data_get_irq_chip_data(data); + struct pcie_port *pp = irq_data_get_irq_chip_data(d); unsigned int res, bit, ctrl; unsigned long flags; raw_spin_lock_irqsave(&pp->lock, flags); if (pp->ops->msi_clear_irq) { - pp->ops->msi_clear_irq(pp, data->hwirq); + pp->ops->msi_clear_irq(pp, d->hwirq); } else { - ctrl = data->hwirq / MAX_MSI_IRQS_PER_CTRL; + ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; - bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL; + bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; - pp->irq_status[ctrl] &= ~(1 << bit); + pp->irq_mask[ctrl] |= BIT(bit); dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4, - ~pp->irq_status[ctrl]); + pp->irq_mask[ctrl]); } raw_spin_unlock_irqrestore(&pp->lock, flags); } -static void dw_pci_bottom_unmask(struct irq_data *data) +static void dw_pci_bottom_unmask(struct irq_data *d) { - struct pcie_port *pp = irq_data_get_irq_chip_data(data); + struct pcie_port *pp = irq_data_get_irq_chip_data(d); unsigned int res, bit, ctrl; unsigned long flags; raw_spin_lock_irqsave(&pp->lock, flags); if (pp->ops->msi_set_irq) { - pp->ops->msi_set_irq(pp, data->hwirq); + pp->ops->msi_set_irq(pp, d->hwirq); } else { - ctrl = data->hwirq / MAX_MSI_IRQS_PER_CTRL; + ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; - bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL; + bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; - pp->irq_status[ctrl] |= 1 << bit; + pp->irq_mask[ctrl] &= ~BIT(bit); dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4, - ~pp->irq_status[ctrl]); + pp->irq_mask[ctrl]); } raw_spin_unlock_irqrestore(&pp->lock, flags); @@ -207,7 +207,7 @@ static void dw_pci_bottom_ack(struct irq_data *d) raw_spin_lock_irqsave(&pp->lock, flags); - dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, 1 << bit); + dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, BIT(bit)); if (pp->ops->msi_irq_ack) pp->ops->msi_irq_ack(d->hwirq, pp); @@ -255,13 +255,13 @@ static int dw_pcie_irq_domain_alloc(struct irq_domain *domain, static void dw_pcie_irq_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { - struct irq_data *data = irq_domain_get_irq_data(domain, virq); - struct pcie_port *pp = irq_data_get_irq_chip_data(data); + struct irq_data *d = irq_domain_get_irq_data(domain, virq); + struct pcie_port *pp = irq_data_get_irq_chip_data(d); unsigned long flags; raw_spin_lock_irqsave(&pp->lock, flags); - bitmap_release_region(pp->msi_irq_in_use, data->hwirq, + bitmap_release_region(pp->msi_irq_in_use, d->hwirq, order_base_2(nr_irqs)); raw_spin_unlock_irqrestore(&pp->lock, flags); @@ -439,7 +439,7 @@ int dw_pcie_host_init(struct pcie_port *pp) if (ret) pci->num_viewport = 2; - if (IS_ENABLED(CONFIG_PCI_MSI)) { + if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_enabled()) { /* * If a specific SoC driver needs to change the * default number of vectors, it needs to implement @@ -512,8 +512,9 @@ error: return ret; } -static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, - u32 devfn, int where, int size, u32 *val) +static int dw_pcie_access_other_conf(struct pcie_port *pp, struct pci_bus *bus, + u32 devfn, int where, int size, u32 *val, + bool write) { int ret, type; u32 busdev, cfg_size; @@ -521,9 +522,6 @@ static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, void __iomem *va_cfg_base; struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - if (pp->ops->rd_other_conf) - return pp->ops->rd_other_conf(pp, bus, devfn, where, size, val); - busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | PCIE_ATU_FUNC(PCI_FUNC(devfn)); @@ -542,7 +540,11 @@ static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, type, cpu_addr, busdev, cfg_size); - ret = dw_pcie_read(va_cfg_base + where, size, val); + if (write) + ret = dw_pcie_write(va_cfg_base + where, size, *val); + else + ret = dw_pcie_read(va_cfg_base + where, size, val); + if (pci->num_viewport <= 2) dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, PCIE_ATU_TYPE_IO, pp->io_base, @@ -551,43 +553,26 @@ static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, return ret; } +static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, + u32 devfn, int where, int size, u32 *val) +{ + if (pp->ops->rd_other_conf) + return pp->ops->rd_other_conf(pp, bus, devfn, where, + size, val); + + return dw_pcie_access_other_conf(pp, bus, devfn, where, size, val, + false); +} + static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, u32 devfn, int where, int size, u32 val) { - int ret, type; - u32 busdev, cfg_size; - u64 cpu_addr; - void __iomem *va_cfg_base; - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - if (pp->ops->wr_other_conf) - return pp->ops->wr_other_conf(pp, bus, devfn, where, size, val); - - busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | - PCIE_ATU_FUNC(PCI_FUNC(devfn)); + return pp->ops->wr_other_conf(pp, bus, devfn, where, + size, val); - if (bus->parent->number == pp->root_bus_nr) { - type = PCIE_ATU_TYPE_CFG0; - cpu_addr = pp->cfg0_base; - cfg_size = pp->cfg0_size; - va_cfg_base = pp->va_cfg0_base; - } else { - type = PCIE_ATU_TYPE_CFG1; - cpu_addr = pp->cfg1_base; - cfg_size = pp->cfg1_size; - va_cfg_base = pp->va_cfg1_base; - } - - dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, - type, cpu_addr, - busdev, cfg_size); - ret = dw_pcie_write(va_cfg_base + where, size, val); - if (pci->num_viewport <= 2) - dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, - PCIE_ATU_TYPE_IO, pp->io_base, - pp->io_bus_addr, pp->io_size); - - return ret; + return dw_pcie_access_other_conf(pp, bus, devfn, where, size, &val, + true); } static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus, @@ -665,13 +650,13 @@ void dw_pcie_setup_rc(struct pcie_port *pp) /* Initialize IRQ Status array */ for (ctrl = 0; ctrl < num_ctrls; ctrl++) { + pp->irq_mask[ctrl] = ~0; dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + (ctrl * MSI_REG_CTRL_BLOCK_SIZE), - 4, ~0); + 4, pp->irq_mask[ctrl]); dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + (ctrl * MSI_REG_CTRL_BLOCK_SIZE), 4, ~0); - pp->irq_status[ctrl] = 0; } /* Setup RC BARs */ diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c index c12bf794d69c..932dbd0b34b6 100644 --- a/drivers/pci/controller/dwc/pcie-designware-plat.c +++ b/drivers/pci/controller/dwc/pcie-designware-plat.c @@ -13,11 +13,9 @@ #include #include #include -#include #include #include #include -#include #include #include @@ -70,14 +68,10 @@ static const struct dw_pcie_ops dw_pcie_ops = { static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - struct pci_epc *epc = ep->epc; enum pci_barno bar; for (bar = BAR_0; bar <= BAR_5; bar++) dw_pcie_ep_reset_bar(pci, bar); - - epc->features |= EPC_FEATURE_NO_LINKUP_NOTIFIER; - epc->features |= EPC_FEATURE_MSIX_AVAILABLE; } static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, @@ -100,9 +94,22 @@ static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, return 0; } +static const struct pci_epc_features dw_plat_pcie_epc_features = { + .linkup_notifier = false, + .msi_capable = true, + .msix_capable = true, +}; + +static const struct pci_epc_features* +dw_plat_pcie_get_features(struct dw_pcie_ep *ep) +{ + return &dw_plat_pcie_epc_features; +} + static struct dw_pcie_ep_ops pcie_ep_ops = { .ep_init = dw_plat_pcie_ep_init, .raise_irq = dw_plat_pcie_ep_raise_irq, + .get_features = dw_plat_pcie_get_features, }; static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie, diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index 93ef8c31fb39..31f6331ca46f 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c @@ -22,7 +22,7 @@ int dw_pcie_read(void __iomem *addr, int size, u32 *val) { - if ((uintptr_t)addr & (size - 1)) { + if (!IS_ALIGNED((uintptr_t)addr, size)) { *val = 0; return PCIBIOS_BAD_REGISTER_NUMBER; } @@ -43,7 +43,7 @@ int dw_pcie_read(void __iomem *addr, int size, u32 *val) int dw_pcie_write(void __iomem *addr, int size, u32 val) { - if ((uintptr_t)addr & (size - 1)) + if (!IS_ALIGNED((uintptr_t)addr, size)) return PCIBIOS_BAD_REGISTER_NUMBER; if (size == 4) @@ -306,7 +306,7 @@ void dw_pcie_disable_atu(struct dw_pcie *pci, int index, } dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index); - dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~PCIE_ATU_ENABLE); + dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, (u32)~PCIE_ATU_ENABLE); } int dw_pcie_wait_for_link(struct dw_pcie *pci) diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index 9943d8c68335..377f4c0b52da 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -11,6 +11,7 @@ #ifndef _PCIE_DESIGNWARE_H #define _PCIE_DESIGNWARE_H +#include #include #include #include @@ -30,23 +31,25 @@ /* Synopsys-specific PCIe configuration registers */ #define PCIE_PORT_LINK_CONTROL 0x710 -#define PORT_LINK_MODE_MASK (0x3f << 16) -#define PORT_LINK_MODE_1_LANES (0x1 << 16) -#define PORT_LINK_MODE_2_LANES (0x3 << 16) -#define PORT_LINK_MODE_4_LANES (0x7 << 16) -#define PORT_LINK_MODE_8_LANES (0xf << 16) +#define PORT_LINK_MODE_MASK GENMASK(21, 16) +#define PORT_LINK_MODE(n) FIELD_PREP(PORT_LINK_MODE_MASK, n) +#define PORT_LINK_MODE_1_LANES PORT_LINK_MODE(0x1) +#define PORT_LINK_MODE_2_LANES PORT_LINK_MODE(0x3) +#define PORT_LINK_MODE_4_LANES PORT_LINK_MODE(0x7) +#define PORT_LINK_MODE_8_LANES PORT_LINK_MODE(0xf) #define PCIE_PORT_DEBUG0 0x728 #define PORT_LOGIC_LTSSM_STATE_MASK 0x1f #define PORT_LOGIC_LTSSM_STATE_L0 0x11 #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C -#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) -#define PORT_LOGIC_LINK_WIDTH_MASK (0x1f << 8) -#define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8) -#define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8) -#define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8) -#define PORT_LOGIC_LINK_WIDTH_8_LANES (0x8 << 8) +#define PORT_LOGIC_SPEED_CHANGE BIT(17) +#define PORT_LOGIC_LINK_WIDTH_MASK GENMASK(12, 8) +#define PORT_LOGIC_LINK_WIDTH(n) FIELD_PREP(PORT_LOGIC_LINK_WIDTH_MASK, n) +#define PORT_LOGIC_LINK_WIDTH_1_LANES PORT_LOGIC_LINK_WIDTH(0x1) +#define PORT_LOGIC_LINK_WIDTH_2_LANES PORT_LOGIC_LINK_WIDTH(0x2) +#define PORT_LOGIC_LINK_WIDTH_4_LANES PORT_LOGIC_LINK_WIDTH(0x4) +#define PORT_LOGIC_LINK_WIDTH_8_LANES PORT_LOGIC_LINK_WIDTH(0x8) #define PCIE_MSI_ADDR_LO 0x820 #define PCIE_MSI_ADDR_HI 0x824 @@ -55,30 +58,30 @@ #define PCIE_MSI_INTR0_STATUS 0x830 #define PCIE_ATU_VIEWPORT 0x900 -#define PCIE_ATU_REGION_INBOUND (0x1 << 31) -#define PCIE_ATU_REGION_OUTBOUND (0x0 << 31) -#define PCIE_ATU_REGION_INDEX2 (0x2 << 0) -#define PCIE_ATU_REGION_INDEX1 (0x1 << 0) -#define PCIE_ATU_REGION_INDEX0 (0x0 << 0) +#define PCIE_ATU_REGION_INBOUND BIT(31) +#define PCIE_ATU_REGION_OUTBOUND 0 +#define PCIE_ATU_REGION_INDEX2 0x2 +#define PCIE_ATU_REGION_INDEX1 0x1 +#define PCIE_ATU_REGION_INDEX0 0x0 #define PCIE_ATU_CR1 0x904 -#define PCIE_ATU_TYPE_MEM (0x0 << 0) -#define PCIE_ATU_TYPE_IO (0x2 << 0) -#define PCIE_ATU_TYPE_CFG0 (0x4 << 0) -#define PCIE_ATU_TYPE_CFG1 (0x5 << 0) +#define PCIE_ATU_TYPE_MEM 0x0 +#define PCIE_ATU_TYPE_IO 0x2 +#define PCIE_ATU_TYPE_CFG0 0x4 +#define PCIE_ATU_TYPE_CFG1 0x5 #define PCIE_ATU_CR2 0x908 -#define PCIE_ATU_ENABLE (0x1 << 31) -#define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30) +#define PCIE_ATU_ENABLE BIT(31) +#define PCIE_ATU_BAR_MODE_ENABLE BIT(30) #define PCIE_ATU_LOWER_BASE 0x90C #define PCIE_ATU_UPPER_BASE 0x910 #define PCIE_ATU_LIMIT 0x914 #define PCIE_ATU_LOWER_TARGET 0x918 -#define PCIE_ATU_BUS(x) (((x) & 0xff) << 24) -#define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19) -#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16) +#define PCIE_ATU_BUS(x) FIELD_PREP(GENMASK(31, 24), x) +#define PCIE_ATU_DEV(x) FIELD_PREP(GENMASK(23, 19), x) +#define PCIE_ATU_FUNC(x) FIELD_PREP(GENMASK(18, 16), x) #define PCIE_ATU_UPPER_TARGET 0x91C #define PCIE_MISC_CONTROL_1_OFF 0x8BC -#define PCIE_DBI_RO_WR_EN (0x1 << 0) +#define PCIE_DBI_RO_WR_EN BIT(0) /* * iATU Unroll-specific register definitions @@ -105,7 +108,7 @@ ((region) << 9) #define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \ - (((region) << 9) | (0x1 << 8)) + (((region) << 9) | BIT(8)) #define MAX_MSI_IRQS 256 #define MAX_MSI_IRQS_PER_CTRL 32 @@ -177,7 +180,7 @@ struct pcie_port { struct irq_domain *msi_domain; dma_addr_t msi_data; u32 num_vectors; - u32 irq_status[MAX_MSI_CTRLS]; + u32 irq_mask[MAX_MSI_CTRLS]; raw_spinlock_t lock; DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS); }; @@ -192,6 +195,7 @@ struct dw_pcie_ep_ops { void (*ep_init)(struct dw_pcie_ep *ep); int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no, enum pci_epc_irq_type type, u16 interrupt_num); + const struct pci_epc_features* (*get_features)(struct dw_pcie_ep *ep); }; struct dw_pcie_ep { diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index d185ea5fe996..a7f703556790 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -1228,7 +1228,7 @@ static int qcom_pcie_probe(struct platform_device *pdev) pcie->ops = of_device_get_match_data(dev); - pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW); + pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH); if (IS_ERR(pcie->reset)) { ret = PTR_ERR(pcie->reset); goto err_pm_runtime_put; diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c index 750081c1cb48..eb58dfdaba1b 100644 --- a/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c @@ -466,7 +466,7 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge, } } -struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = { +static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = { .read_pcie = advk_pci_bridge_emul_pcie_conf_read, .write_pcie = advk_pci_bridge_emul_pcie_conf_write, }; @@ -499,7 +499,7 @@ static void advk_sw_pci_bridge_init(struct advk_pcie *pcie) bridge->data = pcie; bridge->ops = &advk_pci_bridge_emul_ops; - pci_bridge_emul_init(bridge); + pci_bridge_emul_init(bridge, 0); } diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index 9ba4d12c179c..95441a35eceb 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -391,14 +391,6 @@ struct hv_interrupt_entry { u32 data; }; -#define HV_VP_SET_BANK_COUNT_MAX 5 /* current implementation limit */ - -struct hv_vp_set { - u64 format; /* 0 (HvGenericSetSparse4k) */ - u64 valid_banks; - u64 masks[HV_VP_SET_BANK_COUNT_MAX]; -}; - /* * flags for hv_device_interrupt_target.flags */ @@ -410,7 +402,7 @@ struct hv_device_interrupt_target { u32 flags; union { u64 vp_mask; - struct hv_vp_set vp_set; + struct hv_vpset vp_set; }; }; @@ -420,7 +412,7 @@ struct retarget_msi_interrupt { struct hv_interrupt_entry int_entry; u64 reserved2; struct hv_device_interrupt_target int_target; -} __packed; +} __packed __aligned(8); /* * Driver specific state. @@ -460,12 +452,16 @@ struct hv_pcibus_device { struct msi_controller msi_chip; struct irq_domain *irq_domain; - /* hypercall arg, must not cross page boundary */ - struct retarget_msi_interrupt retarget_msi_interrupt_params; - spinlock_t retarget_msi_interrupt_lock; struct workqueue_struct *wq; + + /* hypercall arg, must not cross page boundary */ + struct retarget_msi_interrupt retarget_msi_interrupt_params; + + /* + * Don't put anything here: retarget_msi_interrupt_params must be last + */ }; /* @@ -910,12 +906,12 @@ static void hv_irq_unmask(struct irq_data *data) struct retarget_msi_interrupt *params; struct hv_pcibus_device *hbus; struct cpumask *dest; + cpumask_var_t tmp; struct pci_bus *pbus; struct pci_dev *pdev; unsigned long flags; u32 var_size = 0; - int cpu_vmbus; - int cpu; + int cpu, nr_bank; u64 res; dest = irq_data_get_effective_affinity_mask(data); @@ -955,28 +951,27 @@ static void hv_irq_unmask(struct irq_data *data) */ params->int_target.flags |= HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET; - params->int_target.vp_set.valid_banks = - (1ull << HV_VP_SET_BANK_COUNT_MAX) - 1; - /* - * var-sized hypercall, var-size starts after vp_mask (thus - * vp_set.format does not count, but vp_set.valid_banks does). - */ - var_size = 1 + HV_VP_SET_BANK_COUNT_MAX; + if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) { + res = 1; + goto exit_unlock; + } - for_each_cpu_and(cpu, dest, cpu_online_mask) { - cpu_vmbus = hv_cpu_number_to_vp_number(cpu); + cpumask_and(tmp, dest, cpu_online_mask); + nr_bank = cpumask_to_vpset(¶ms->int_target.vp_set, tmp); + free_cpumask_var(tmp); - if (cpu_vmbus >= HV_VP_SET_BANK_COUNT_MAX * 64) { - dev_err(&hbus->hdev->device, - "too high CPU %d", cpu_vmbus); - res = 1; - goto exit_unlock; - } - - params->int_target.vp_set.masks[cpu_vmbus / 64] |= - (1ULL << (cpu_vmbus & 63)); + if (nr_bank <= 0) { + res = 1; + goto exit_unlock; } + + /* + * var-sized hypercall, var-size starts after vp_mask (thus + * vp_set.format does not count, but vp_set.valid_bank_mask + * does). + */ + var_size = 1 + nr_bank; } else { for_each_cpu_and(cpu, dest, cpu_online_mask) { params->int_target.vp_mask |= diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c index fa0fc46edb0c..d3a0419e42f2 100644 --- a/drivers/pci/controller/pci-mvebu.c +++ b/drivers/pci/controller/pci-mvebu.c @@ -583,7 +583,7 @@ static void mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port) bridge->data = port; bridge->ops = &mvebu_pci_bridge_emul_ops; - pci_bridge_emul_init(bridge); + pci_bridge_emul_init(bridge, PCI_BRIDGE_EMUL_NO_PREFETCHABLE_BAR); } static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys) diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c index 7d05e51205b3..27edcebd1726 100644 --- a/drivers/pci/controller/pcie-altera.c +++ b/drivers/pci/controller/pcie-altera.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -37,7 +38,12 @@ #define RP_LTSSM_MASK 0x1f #define LTSSM_L0 0xf -#define PCIE_CAP_OFFSET 0x80 +#define S10_RP_TX_CNTRL 0x2004 +#define S10_RP_RXCPL_REG 0x2008 +#define S10_RP_RXCPL_STATUS 0x200C +#define S10_RP_CFG_ADDR(pcie, reg) \ + (((pcie)->hip_base) + (reg) + (1 << 20)) + /* TLP configuration type 0 and 1 */ #define TLP_FMTTYPE_CFGRD0 0x04 /* Configuration Read Type 0 */ #define TLP_FMTTYPE_CFGWR0 0x44 /* Configuration Write Type 0 */ @@ -49,18 +55,19 @@ #define RP_DEVFN 0 #define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn)) #define TLP_CFGRD_DW0(pcie, bus) \ - ((((bus == pcie->root_bus_nr) ? TLP_FMTTYPE_CFGRD0 \ - : TLP_FMTTYPE_CFGRD1) << 24) | \ - TLP_PAYLOAD_SIZE) + ((((bus == pcie->root_bus_nr) ? pcie->pcie_data->cfgrd0 \ + : pcie->pcie_data->cfgrd1) << 24) | \ + TLP_PAYLOAD_SIZE) #define TLP_CFGWR_DW0(pcie, bus) \ - ((((bus == pcie->root_bus_nr) ? TLP_FMTTYPE_CFGWR0 \ - : TLP_FMTTYPE_CFGWR1) << 24) | \ - TLP_PAYLOAD_SIZE) + ((((bus == pcie->root_bus_nr) ? pcie->pcie_data->cfgwr0 \ + : pcie->pcie_data->cfgwr1) << 24) | \ + TLP_PAYLOAD_SIZE) #define TLP_CFG_DW1(pcie, tag, be) \ - (((TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be)) + (((TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be)) #define TLP_CFG_DW2(bus, devfn, offset) \ (((bus) << 24) | ((devfn) << 16) | (offset)) #define TLP_COMP_STATUS(s) (((s) >> 13) & 7) +#define TLP_BYTE_COUNT(s) (((s) >> 0) & 0xfff) #define TLP_HDR_SIZE 3 #define TLP_LOOP 500 @@ -69,14 +76,47 @@ #define DWORD_MASK 3 +#define S10_TLP_FMTTYPE_CFGRD0 0x05 +#define S10_TLP_FMTTYPE_CFGRD1 0x04 +#define S10_TLP_FMTTYPE_CFGWR0 0x45 +#define S10_TLP_FMTTYPE_CFGWR1 0x44 + +enum altera_pcie_version { + ALTERA_PCIE_V1 = 0, + ALTERA_PCIE_V2, +}; + struct altera_pcie { struct platform_device *pdev; - void __iomem *cra_base; /* DT Cra */ + void __iomem *cra_base; + void __iomem *hip_base; int irq; u8 root_bus_nr; struct irq_domain *irq_domain; struct resource bus_range; struct list_head resources; + const struct altera_pcie_data *pcie_data; +}; + +struct altera_pcie_ops { + int (*tlp_read_pkt)(struct altera_pcie *pcie, u32 *value); + void (*tlp_write_pkt)(struct altera_pcie *pcie, u32 *headers, + u32 data, bool align); + bool (*get_link_status)(struct altera_pcie *pcie); + int (*rp_read_cfg)(struct altera_pcie *pcie, int where, + int size, u32 *value); + int (*rp_write_cfg)(struct altera_pcie *pcie, u8 busno, + int where, int size, u32 value); +}; + +struct altera_pcie_data { + const struct altera_pcie_ops *ops; + enum altera_pcie_version version; + u32 cap_offset; /* PCIe capability structure register offset */ + u32 cfgrd0; + u32 cfgrd1; + u32 cfgwr0; + u32 cfgwr1; }; struct tlp_rp_regpair_t { @@ -101,6 +141,15 @@ static bool altera_pcie_link_up(struct altera_pcie *pcie) return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0); } +static bool s10_altera_pcie_link_up(struct altera_pcie *pcie) +{ + void __iomem *addr = S10_RP_CFG_ADDR(pcie, + pcie->pcie_data->cap_offset + + PCI_EXP_LNKSTA); + + return !!(readw(addr) & PCI_EXP_LNKSTA_DLLLA); +} + /* * Altera PCIe port uses BAR0 of RC's configuration space as the translation * from PCI bus to native BUS. Entire DDR region is mapped into PCIe space @@ -128,12 +177,18 @@ static void tlp_write_tx(struct altera_pcie *pcie, cra_writel(pcie, tlp_rp_regdata->ctrl, RP_TX_CNTRL); } +static void s10_tlp_write_tx(struct altera_pcie *pcie, u32 reg0, u32 ctrl) +{ + cra_writel(pcie, reg0, RP_TX_REG0); + cra_writel(pcie, ctrl, S10_RP_TX_CNTRL); +} + static bool altera_pcie_valid_device(struct altera_pcie *pcie, struct pci_bus *bus, int dev) { /* If there is no link, then there is no device */ if (bus->number != pcie->root_bus_nr) { - if (!altera_pcie_link_up(pcie)) + if (!pcie->pcie_data->ops->get_link_status(pcie)) return false; } @@ -183,6 +238,53 @@ static int tlp_read_packet(struct altera_pcie *pcie, u32 *value) return PCIBIOS_DEVICE_NOT_FOUND; } +static int s10_tlp_read_packet(struct altera_pcie *pcie, u32 *value) +{ + u32 ctrl; + u32 comp_status; + u32 dw[4]; + u32 count; + struct device *dev = &pcie->pdev->dev; + + for (count = 0; count < TLP_LOOP; count++) { + ctrl = cra_readl(pcie, S10_RP_RXCPL_STATUS); + if (ctrl & RP_RXCPL_SOP) { + /* Read first DW */ + dw[0] = cra_readl(pcie, S10_RP_RXCPL_REG); + break; + } + + udelay(5); + } + + /* SOP detection failed, return error */ + if (count == TLP_LOOP) + return PCIBIOS_DEVICE_NOT_FOUND; + + count = 1; + + /* Poll for EOP */ + while (count < ARRAY_SIZE(dw)) { + ctrl = cra_readl(pcie, S10_RP_RXCPL_STATUS); + dw[count++] = cra_readl(pcie, S10_RP_RXCPL_REG); + if (ctrl & RP_RXCPL_EOP) { + comp_status = TLP_COMP_STATUS(dw[1]); + if (comp_status) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (value && TLP_BYTE_COUNT(dw[1]) == sizeof(u32) && + count == 4) + *value = dw[3]; + + return PCIBIOS_SUCCESSFUL; + } + } + + dev_warn(dev, "Malformed TLP packet\n"); + + return PCIBIOS_DEVICE_NOT_FOUND; +} + static void tlp_write_packet(struct altera_pcie *pcie, u32 *headers, u32 data, bool align) { @@ -210,6 +312,15 @@ static void tlp_write_packet(struct altera_pcie *pcie, u32 *headers, tlp_write_tx(pcie, &tlp_rp_regdata); } +static void s10_tlp_write_packet(struct altera_pcie *pcie, u32 *headers, + u32 data, bool dummy) +{ + s10_tlp_write_tx(pcie, headers[0], RP_TX_SOP); + s10_tlp_write_tx(pcie, headers[1], 0); + s10_tlp_write_tx(pcie, headers[2], 0); + s10_tlp_write_tx(pcie, data, RP_TX_EOP); +} + static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn, int where, u8 byte_en, u32 *value) { @@ -219,9 +330,9 @@ static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn, headers[1] = TLP_CFG_DW1(pcie, TLP_READ_TAG, byte_en); headers[2] = TLP_CFG_DW2(bus, devfn, where); - tlp_write_packet(pcie, headers, 0, false); + pcie->pcie_data->ops->tlp_write_pkt(pcie, headers, 0, false); - return tlp_read_packet(pcie, value); + return pcie->pcie_data->ops->tlp_read_pkt(pcie, value); } static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn, @@ -236,11 +347,13 @@ static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn, /* check alignment to Qword */ if ((where & 0x7) == 0) - tlp_write_packet(pcie, headers, value, true); + pcie->pcie_data->ops->tlp_write_pkt(pcie, headers, + value, true); else - tlp_write_packet(pcie, headers, value, false); + pcie->pcie_data->ops->tlp_write_pkt(pcie, headers, + value, false); - ret = tlp_read_packet(pcie, NULL); + ret = pcie->pcie_data->ops->tlp_read_pkt(pcie, NULL); if (ret != PCIBIOS_SUCCESSFUL) return ret; @@ -254,6 +367,53 @@ static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn, return PCIBIOS_SUCCESSFUL; } +static int s10_rp_read_cfg(struct altera_pcie *pcie, int where, + int size, u32 *value) +{ + void __iomem *addr = S10_RP_CFG_ADDR(pcie, where); + + switch (size) { + case 1: + *value = readb(addr); + break; + case 2: + *value = readw(addr); + break; + default: + *value = readl(addr); + break; + } + + return PCIBIOS_SUCCESSFUL; +} + +static int s10_rp_write_cfg(struct altera_pcie *pcie, u8 busno, + int where, int size, u32 value) +{ + void __iomem *addr = S10_RP_CFG_ADDR(pcie, where); + + switch (size) { + case 1: + writeb(value, addr); + break; + case 2: + writew(value, addr); + break; + default: + writel(value, addr); + break; + } + + /* + * Monitor changes to PCI_PRIMARY_BUS register on root port + * and update local copy of root bus number accordingly. + */ + if (busno == pcie->root_bus_nr && where == PCI_PRIMARY_BUS) + pcie->root_bus_nr = value & 0xff; + + return PCIBIOS_SUCCESSFUL; +} + static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno, unsigned int devfn, int where, int size, u32 *value) @@ -262,6 +422,10 @@ static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno, u32 data; u8 byte_en; + if (busno == pcie->root_bus_nr && pcie->pcie_data->ops->rp_read_cfg) + return pcie->pcie_data->ops->rp_read_cfg(pcie, where, + size, value); + switch (size) { case 1: byte_en = 1 << (where & 3); @@ -302,6 +466,10 @@ static int _altera_pcie_cfg_write(struct altera_pcie *pcie, u8 busno, u32 shift = 8 * (where & 3); u8 byte_en; + if (busno == pcie->root_bus_nr && pcie->pcie_data->ops->rp_write_cfg) + return pcie->pcie_data->ops->rp_write_cfg(pcie, busno, + where, size, value); + switch (size) { case 1: data32 = (value & 0xff) << shift; @@ -365,7 +533,8 @@ static int altera_read_cap_word(struct altera_pcie *pcie, u8 busno, int ret; ret = _altera_pcie_cfg_read(pcie, busno, devfn, - PCIE_CAP_OFFSET + offset, sizeof(*value), + pcie->pcie_data->cap_offset + offset, + sizeof(*value), &data); *value = data; return ret; @@ -375,7 +544,8 @@ static int altera_write_cap_word(struct altera_pcie *pcie, u8 busno, unsigned int devfn, int offset, u16 value) { return _altera_pcie_cfg_write(pcie, busno, devfn, - PCIE_CAP_OFFSET + offset, sizeof(value), + pcie->pcie_data->cap_offset + offset, + sizeof(value), value); } @@ -403,7 +573,7 @@ static void altera_wait_link_retrain(struct altera_pcie *pcie) /* Wait for link is up */ start_jiffies = jiffies; for (;;) { - if (altera_pcie_link_up(pcie)) + if (pcie->pcie_data->ops->get_link_status(pcie)) break; if (time_after(jiffies, start_jiffies + LINK_UP_TIMEOUT)) { @@ -418,7 +588,7 @@ static void altera_pcie_retrain(struct altera_pcie *pcie) { u16 linkcap, linkstat, linkctl; - if (!altera_pcie_link_up(pcie)) + if (!pcie->pcie_data->ops->get_link_status(pcie)) return; /* @@ -540,12 +710,20 @@ static int altera_pcie_parse_dt(struct altera_pcie *pcie) struct device *dev = &pcie->pdev->dev; struct platform_device *pdev = pcie->pdev; struct resource *cra; + struct resource *hip; cra = platform_get_resource_byname(pdev, IORESOURCE_MEM, "Cra"); pcie->cra_base = devm_ioremap_resource(dev, cra); if (IS_ERR(pcie->cra_base)) return PTR_ERR(pcie->cra_base); + if (pcie->pcie_data->version == ALTERA_PCIE_V2) { + hip = platform_get_resource_byname(pdev, IORESOURCE_MEM, "Hip"); + pcie->hip_base = devm_ioremap_resource(&pdev->dev, hip); + if (IS_ERR(pcie->hip_base)) + return PTR_ERR(pcie->hip_base); + } + /* setup IRQ */ pcie->irq = platform_get_irq(pdev, 0); if (pcie->irq < 0) { @@ -562,6 +740,48 @@ static void altera_pcie_host_init(struct altera_pcie *pcie) altera_pcie_retrain(pcie); } +static const struct altera_pcie_ops altera_pcie_ops_1_0 = { + .tlp_read_pkt = tlp_read_packet, + .tlp_write_pkt = tlp_write_packet, + .get_link_status = altera_pcie_link_up, +}; + +static const struct altera_pcie_ops altera_pcie_ops_2_0 = { + .tlp_read_pkt = s10_tlp_read_packet, + .tlp_write_pkt = s10_tlp_write_packet, + .get_link_status = s10_altera_pcie_link_up, + .rp_read_cfg = s10_rp_read_cfg, + .rp_write_cfg = s10_rp_write_cfg, +}; + +static const struct altera_pcie_data altera_pcie_1_0_data = { + .ops = &altera_pcie_ops_1_0, + .cap_offset = 0x80, + .version = ALTERA_PCIE_V1, + .cfgrd0 = TLP_FMTTYPE_CFGRD0, + .cfgrd1 = TLP_FMTTYPE_CFGRD1, + .cfgwr0 = TLP_FMTTYPE_CFGWR0, + .cfgwr1 = TLP_FMTTYPE_CFGWR1, +}; + +static const struct altera_pcie_data altera_pcie_2_0_data = { + .ops = &altera_pcie_ops_2_0, + .version = ALTERA_PCIE_V2, + .cap_offset = 0x70, + .cfgrd0 = S10_TLP_FMTTYPE_CFGRD0, + .cfgrd1 = S10_TLP_FMTTYPE_CFGRD1, + .cfgwr0 = S10_TLP_FMTTYPE_CFGWR0, + .cfgwr1 = S10_TLP_FMTTYPE_CFGWR1, +}; + +static const struct of_device_id altera_pcie_of_match[] = { + {.compatible = "altr,pcie-root-port-1.0", + .data = &altera_pcie_1_0_data }, + {.compatible = "altr,pcie-root-port-2.0", + .data = &altera_pcie_2_0_data }, + {}, +}; + static int altera_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -570,6 +790,7 @@ static int altera_pcie_probe(struct platform_device *pdev) struct pci_bus *child; struct pci_host_bridge *bridge; int ret; + const struct of_device_id *match; bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); if (!bridge) @@ -578,6 +799,12 @@ static int altera_pcie_probe(struct platform_device *pdev) pcie = pci_host_bridge_priv(bridge); pcie->pdev = pdev; + match = of_match_device(altera_pcie_of_match, &pdev->dev); + if (!match) + return -ENODEV; + + pcie->pcie_data = match->data; + ret = altera_pcie_parse_dt(pcie); if (ret) { dev_err(dev, "Parsing DT failed\n"); @@ -628,11 +855,6 @@ static int altera_pcie_probe(struct platform_device *pdev) return ret; } -static const struct of_device_id altera_pcie_of_match[] = { - { .compatible = "altr,pcie-root-port-1.0", }, - {}, -}; - static struct platform_driver altera_pcie_driver = { .probe = altera_pcie_probe, .driver = { diff --git a/drivers/pci/controller/pcie-cadence-ep.c b/drivers/pci/controller/pcie-cadence-ep.c index c3a088910f48..def7820cb824 100644 --- a/drivers/pci/controller/pcie-cadence-ep.c +++ b/drivers/pci/controller/pcie-cadence-ep.c @@ -396,21 +396,21 @@ static int cdns_pcie_ep_start(struct pci_epc *epc) cfg |= BIT(epf->func_no); cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, cfg); - /* - * The PCIe links are automatically established by the controller - * once for all at powerup: the software can neither start nor stop - * those links later at runtime. - * - * Then we only have to notify the EP core that our links are already - * established. However we don't call directly pci_epc_linkup() because - * we've already locked the epc->lock. - */ - list_for_each_entry(epf, &epc->pci_epf, list) - pci_epf_linkup(epf); - return 0; } +static const struct pci_epc_features cdns_pcie_epc_features = { + .linkup_notifier = false, + .msi_capable = true, + .msix_capable = false, +}; + +static const struct pci_epc_features* +cdns_pcie_ep_get_features(struct pci_epc *epc, u8 func_no) +{ + return &cdns_pcie_epc_features; +} + static const struct pci_epc_ops cdns_pcie_epc_ops = { .write_header = cdns_pcie_ep_write_header, .set_bar = cdns_pcie_ep_set_bar, @@ -421,6 +421,7 @@ static const struct pci_epc_ops cdns_pcie_epc_ops = { .get_msi = cdns_pcie_ep_get_msi, .raise_irq = cdns_pcie_ep_raise_irq, .start = cdns_pcie_ep_start, + .get_features = cdns_pcie_ep_get_features, }; static const struct of_device_id cdns_pcie_ep_of_match[] = { diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c index 55e471c18e8d..0b6c72804e03 100644 --- a/drivers/pci/controller/pcie-mediatek.c +++ b/drivers/pci/controller/pcie-mediatek.c @@ -90,6 +90,12 @@ #define AHB2PCIE_SIZE(x) ((x) & GENMASK(4, 0)) #define PCIE_AXI_WINDOW0 0x448 #define WIN_ENABLE BIT(7) +/* + * Define PCIe to AHB window size as 2^33 to support max 8GB address space + * translate, support least 4GB DRAM size access from EP DMA(physical DRAM + * start from 0x40000000). + */ +#define PCIE2AHB_SIZE 0x21 /* PCIe V2 configuration transaction header */ #define PCIE_CFG_HEADER0 0x460 @@ -654,7 +660,6 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port) struct resource *mem = &pcie->mem; const struct mtk_pcie_soc *soc = port->pcie->soc; u32 val; - size_t size; int err; /* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */ @@ -706,15 +711,15 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port) mtk_pcie_enable_msi(port); /* Set AHB to PCIe translation windows */ - size = mem->end - mem->start; - val = lower_32_bits(mem->start) | AHB2PCIE_SIZE(fls(size)); + val = lower_32_bits(mem->start) | + AHB2PCIE_SIZE(fls(resource_size(mem))); writel(val, port->base + PCIE_AHB_TRANS_BASE0_L); val = upper_32_bits(mem->start); writel(val, port->base + PCIE_AHB_TRANS_BASE0_H); /* Set PCIe to AXI translation memory space.*/ - val = fls(0xffffffff) | WIN_ENABLE; + val = PCIE2AHB_SIZE | WIN_ENABLE; writel(val, port->base + PCIE_AXI_WINDOW0); return 0; diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c index b8163c56a142..a5d799e2dff2 100644 --- a/drivers/pci/controller/pcie-rockchip-ep.c +++ b/drivers/pci/controller/pcie-rockchip-ep.c @@ -499,12 +499,21 @@ static int rockchip_pcie_ep_start(struct pci_epc *epc) rockchip_pcie_write(rockchip, cfg, PCIE_CORE_PHY_FUNC_CFG); - list_for_each_entry(epf, &epc->pci_epf, list) - pci_epf_linkup(epf); - return 0; } +static const struct pci_epc_features rockchip_pcie_epc_features = { + .linkup_notifier = false, + .msi_capable = true, + .msix_capable = false, +}; + +static const struct pci_epc_features* +rockchip_pcie_ep_get_features(struct pci_epc *epc, u8 func_no) +{ + return &rockchip_pcie_epc_features; +} + static const struct pci_epc_ops rockchip_pcie_epc_ops = { .write_header = rockchip_pcie_ep_write_header, .set_bar = rockchip_pcie_ep_set_bar, @@ -515,6 +524,7 @@ static const struct pci_epc_ops rockchip_pcie_epc_ops = { .get_msi = rockchip_pcie_ep_get_msi, .raise_irq = rockchip_pcie_ep_raise_irq, .start = rockchip_pcie_ep_start, + .get_features = rockchip_pcie_ep_get_features, }; static int rockchip_pcie_parse_ep_dt(struct rockchip_pcie *rockchip, diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index 3890812cdf87..cf6816b55b5e 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -571,6 +571,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) LIST_HEAD(resources); resource_size_t offset[2] = {0}; resource_size_t membar2_offset = 0x2000, busn_start = 0; + struct pci_bus *child; /* * Shadow registers may exist in certain VMD device ids which allow @@ -698,7 +699,19 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) vmd_attach_resources(vmd); vmd_setup_dma_ops(vmd); dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain); - pci_rescan_bus(vmd->bus); + + pci_scan_child_bus(vmd->bus); + pci_assign_unassigned_bus_resources(vmd->bus); + + /* + * VMD root buses are virtual and don't return true on pci_is_pcie() + * and will fail pcie_bus_configure_settings() early. It can instead be + * run on each of the real root ports. + */ + list_for_each_entry(child, &vmd->bus->children, node) + pcie_bus_configure_settings(child); + + pci_bus_add_devices(vmd->bus); WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj, "domain"), "Can't create symlink to domain\n"); diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c index 3e86fa3c7da3..d0b91da49bf4 100644 --- a/drivers/pci/endpoint/functions/pci-epf-test.c +++ b/drivers/pci/endpoint/functions/pci-epf-test.c @@ -47,9 +47,8 @@ struct pci_epf_test { void *reg[6]; struct pci_epf *epf; enum pci_barno test_reg_bar; - bool linkup_notifier; - bool msix_available; struct delayed_work cmd_handler; + const struct pci_epc_features *epc_features; }; struct pci_epf_test_reg { @@ -71,11 +70,6 @@ static struct pci_epf_header test_header = { .interrupt_pin = PCI_INTERRUPT_INTA, }; -struct pci_epf_test_data { - enum pci_barno test_reg_bar; - bool linkup_notifier; -}; - static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 }; static int pci_epf_test_copy(struct pci_epf_test *epf_test) @@ -175,7 +169,7 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test) goto err_map_addr; } - memcpy(buf, src_addr, reg->size); + memcpy_fromio(buf, src_addr, reg->size); crc32 = crc32_le(~0, buf, reg->size); if (crc32 != reg->checksum) @@ -230,7 +224,7 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test) get_random_bytes(buf, reg->size); reg->checksum = crc32_le(~0, buf, reg->size); - memcpy(dst_addr, buf, reg->size); + memcpy_toio(dst_addr, buf, reg->size); /* * wait 1ms inorder for the write to complete. Without this delay L3 @@ -402,13 +396,15 @@ static int pci_epf_test_set_bar(struct pci_epf *epf) struct device *dev = &epf->dev; struct pci_epf_test *epf_test = epf_get_drvdata(epf); enum pci_barno test_reg_bar = epf_test->test_reg_bar; + const struct pci_epc_features *epc_features; + + epc_features = epf_test->epc_features; for (bar = BAR_0; bar <= BAR_5; bar++) { epf_bar = &epf->bar[bar]; - epf_bar->flags |= upper_32_bits(epf_bar->size) ? - PCI_BASE_ADDRESS_MEM_TYPE_64 : - PCI_BASE_ADDRESS_MEM_TYPE_32; + if (!!(epc_features->reserved_bar & (1 << bar))) + continue; ret = pci_epc_set_bar(epc, epf->func_no, epf_bar); if (ret) { @@ -433,9 +429,13 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf) { struct pci_epf_test *epf_test = epf_get_drvdata(epf); struct device *dev = &epf->dev; + struct pci_epf_bar *epf_bar; void *base; int bar; enum pci_barno test_reg_bar = epf_test->test_reg_bar; + const struct pci_epc_features *epc_features; + + epc_features = epf_test->epc_features; base = pci_epf_alloc_space(epf, sizeof(struct pci_epf_test_reg), test_reg_bar); @@ -446,37 +446,69 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf) epf_test->reg[test_reg_bar] = base; for (bar = BAR_0; bar <= BAR_5; bar++) { + epf_bar = &epf->bar[bar]; if (bar == test_reg_bar) continue; + + if (!!(epc_features->reserved_bar & (1 << bar))) + continue; + base = pci_epf_alloc_space(epf, bar_size[bar], bar); if (!base) dev_err(dev, "Failed to allocate space for BAR%d\n", bar); epf_test->reg[bar] = base; + if (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) + bar++; } return 0; } +static void pci_epf_configure_bar(struct pci_epf *epf, + const struct pci_epc_features *epc_features) +{ + struct pci_epf_bar *epf_bar; + bool bar_fixed_64bit; + int i; + + for (i = BAR_0; i <= BAR_5; i++) { + epf_bar = &epf->bar[i]; + bar_fixed_64bit = !!(epc_features->bar_fixed_64bit & (1 << i)); + if (bar_fixed_64bit) + epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64; + if (epc_features->bar_fixed_size[i]) + bar_size[i] = epc_features->bar_fixed_size[i]; + } +} + static int pci_epf_test_bind(struct pci_epf *epf) { int ret; struct pci_epf_test *epf_test = epf_get_drvdata(epf); struct pci_epf_header *header = epf->header; + const struct pci_epc_features *epc_features; + enum pci_barno test_reg_bar = BAR_0; struct pci_epc *epc = epf->epc; struct device *dev = &epf->dev; + bool linkup_notifier = false; + bool msix_capable = false; + bool msi_capable = true; if (WARN_ON_ONCE(!epc)) return -EINVAL; - if (epc->features & EPC_FEATURE_NO_LINKUP_NOTIFIER) - epf_test->linkup_notifier = false; - else - epf_test->linkup_notifier = true; - - epf_test->msix_available = epc->features & EPC_FEATURE_MSIX_AVAILABLE; + epc_features = pci_epc_get_features(epc, epf->func_no); + if (epc_features) { + linkup_notifier = epc_features->linkup_notifier; + msix_capable = epc_features->msix_capable; + msi_capable = epc_features->msi_capable; + test_reg_bar = pci_epc_get_first_free_bar(epc_features); + pci_epf_configure_bar(epf, epc_features); + } - epf_test->test_reg_bar = EPC_FEATURE_GET_BAR(epc->features); + epf_test->test_reg_bar = test_reg_bar; + epf_test->epc_features = epc_features; ret = pci_epc_write_header(epc, epf->func_no, header); if (ret) { @@ -492,13 +524,15 @@ static int pci_epf_test_bind(struct pci_epf *epf) if (ret) return ret; - ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts); - if (ret) { - dev_err(dev, "MSI configuration failed\n"); - return ret; + if (msi_capable) { + ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts); + if (ret) { + dev_err(dev, "MSI configuration failed\n"); + return ret; + } } - if (epf_test->msix_available) { + if (msix_capable) { ret = pci_epc_set_msix(epc, epf->func_no, epf->msix_interrupts); if (ret) { dev_err(dev, "MSI-X configuration failed\n"); @@ -506,7 +540,7 @@ static int pci_epf_test_bind(struct pci_epf *epf) } } - if (!epf_test->linkup_notifier) + if (!linkup_notifier) queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work); return 0; @@ -523,17 +557,6 @@ static int pci_epf_test_probe(struct pci_epf *epf) { struct pci_epf_test *epf_test; struct device *dev = &epf->dev; - const struct pci_epf_device_id *match; - struct pci_epf_test_data *data; - enum pci_barno test_reg_bar = BAR_0; - bool linkup_notifier = true; - - match = pci_epf_match_device(pci_epf_test_ids, epf); - data = (struct pci_epf_test_data *)match->driver_data; - if (data) { - test_reg_bar = data->test_reg_bar; - linkup_notifier = data->linkup_notifier; - } epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL); if (!epf_test) @@ -541,8 +564,6 @@ static int pci_epf_test_probe(struct pci_epf *epf) epf->header = &test_header; epf_test->epf = epf; - epf_test->test_reg_bar = test_reg_bar; - epf_test->linkup_notifier = linkup_notifier; INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler); diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c index 094dcc3203b8..e4712a0f249c 100644 --- a/drivers/pci/endpoint/pci-epc-core.c +++ b/drivers/pci/endpoint/pci-epc-core.c @@ -83,6 +83,59 @@ err: } EXPORT_SYMBOL_GPL(pci_epc_get); +/** + * pci_epc_get_first_free_bar() - helper to get first unreserved BAR + * @epc_features: pci_epc_features structure that holds the reserved bar bitmap + * + * Invoke to get the first unreserved BAR that can be used for endpoint + * function. For any incorrect value in reserved_bar return '0'. + */ +unsigned int pci_epc_get_first_free_bar(const struct pci_epc_features + *epc_features) +{ + int free_bar; + + if (!epc_features) + return 0; + + free_bar = ffz(epc_features->reserved_bar); + if (free_bar > 5) + return 0; + + return free_bar; +} +EXPORT_SYMBOL_GPL(pci_epc_get_first_free_bar); + +/** + * pci_epc_get_features() - get the features supported by EPC + * @epc: the features supported by *this* EPC device will be returned + * @func_no: the features supported by the EPC device specific to the + * endpoint function with func_no will be returned + * + * Invoke to get the features provided by the EPC which may be + * specific to an endpoint function. Returns pci_epc_features on success + * and NULL for any failures. + */ +const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc, + u8 func_no) +{ + const struct pci_epc_features *epc_features; + unsigned long flags; + + if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) + return NULL; + + if (!epc->ops->get_features) + return NULL; + + spin_lock_irqsave(&epc->lock, flags); + epc_features = epc->ops->get_features(epc, func_no); + spin_unlock_irqrestore(&epc->lock, flags); + + return epc_features; +} +EXPORT_SYMBOL_GPL(pci_epc_get_features); + /** * pci_epc_stop() - stop the PCI link * @epc: the link of the EPC device that has to be stopped diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c index 825fa24427a3..8bfdcd291196 100644 --- a/drivers/pci/endpoint/pci-epf-core.c +++ b/drivers/pci/endpoint/pci-epf-core.c @@ -131,7 +131,9 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar) epf->bar[bar].phys_addr = phys_addr; epf->bar[bar].size = size; epf->bar[bar].barno = bar; - epf->bar[bar].flags = PCI_BASE_ADDRESS_SPACE_MEMORY; + epf->bar[bar].flags |= upper_32_bits(size) ? + PCI_BASE_ADDRESS_MEM_TYPE_64 : + PCI_BASE_ADDRESS_MEM_TYPE_32; return space; } diff --git a/drivers/pci/hotplug/ibmphp.h b/drivers/pci/hotplug/ibmphp.h index b89f850c3a4e..e90a4ebf6550 100644 --- a/drivers/pci/hotplug/ibmphp.h +++ b/drivers/pci/hotplug/ibmphp.h @@ -378,7 +378,6 @@ int ibmphp_add_pfmem_from_mem(struct resource_node *); struct bus_node *ibmphp_find_res_bus(u8); void ibmphp_print_test(void); /* for debugging purposes */ -void ibmphp_hpc_initvars(void); int ibmphp_hpc_readslot(struct slot *, u8, u8 *); int ibmphp_hpc_writeslot(struct slot *, u8); void ibmphp_lock_operations(void); diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c index 08a58e911fc2..17124254d897 100644 --- a/drivers/pci/hotplug/ibmphp_core.c +++ b/drivers/pci/hotplug/ibmphp_core.c @@ -1277,8 +1277,6 @@ static int __init ibmphp_init(void) ibmphp_debug = debug; - ibmphp_hpc_initvars(); - for (i = 0; i < 16; i++) irqs[i] = 0; diff --git a/drivers/pci/hotplug/ibmphp_hpc.c b/drivers/pci/hotplug/ibmphp_hpc.c index 752c384cbd4c..508a62a6b5f9 100644 --- a/drivers/pci/hotplug/ibmphp_hpc.c +++ b/drivers/pci/hotplug/ibmphp_hpc.c @@ -15,13 +15,13 @@ #include #include +#include #include #include #include #include #include #include -#include #include #include "ibmphp.h" @@ -88,10 +88,10 @@ static int to_debug = 0; //---------------------------------------------------------------------------- // global variables //---------------------------------------------------------------------------- -static struct mutex sem_hpcaccess; // lock access to HPC -static struct semaphore semOperations; // lock all operations and +static DEFINE_MUTEX(sem_hpcaccess); // lock access to HPC +static DEFINE_MUTEX(operations_mutex); // lock all operations and // access to data structures -static struct semaphore sem_exit; // make sure polling thread goes away +static DECLARE_COMPLETION(exit_complete); // make sure polling thread goes away static struct task_struct *ibmphp_poll_thread; //---------------------------------------------------------------------------- // local function prototypes @@ -109,23 +109,6 @@ static int hpc_wait_ctlr_notworking(int, struct controller *, void __iomem *, u8 //---------------------------------------------------------------------------- -/*---------------------------------------------------------------------- -* Name: ibmphp_hpc_initvars -* -* Action: initialize semaphores and variables -*---------------------------------------------------------------------*/ -void __init ibmphp_hpc_initvars(void) -{ - debug("%s - Entry\n", __func__); - - mutex_init(&sem_hpcaccess); - sema_init(&semOperations, 1); - sema_init(&sem_exit, 0); - to_debug = 0; - - debug("%s - Exit\n", __func__); -} - /*---------------------------------------------------------------------- * Name: i2c_ctrl_read * @@ -780,7 +763,7 @@ void free_hpc_access(void) *---------------------------------------------------------------------*/ void ibmphp_lock_operations(void) { - down(&semOperations); + mutex_lock(&operations_mutex); to_debug = 1; } @@ -790,7 +773,7 @@ void ibmphp_lock_operations(void) void ibmphp_unlock_operations(void) { debug("%s - Entry\n", __func__); - up(&semOperations); + mutex_unlock(&operations_mutex); to_debug = 0; debug("%s - Exit\n", __func__); } @@ -816,7 +799,7 @@ static int poll_hpc(void *data) while (!kthread_should_stop()) { /* try to get the lock to do some kind of hardware access */ - down(&semOperations); + mutex_lock(&operations_mutex); switch (poll_state) { case POLL_LATCH_REGISTER: @@ -871,13 +854,13 @@ static int poll_hpc(void *data) break; case POLL_SLEEP: /* don't sleep with a lock on the hardware */ - up(&semOperations); + mutex_unlock(&operations_mutex); msleep(POLL_INTERVAL_SEC * 1000); if (kthread_should_stop()) goto out_sleep; - down(&semOperations); + mutex_lock(&operations_mutex); if (poll_count >= POLL_LATCH_CNT) { poll_count = 0; @@ -887,12 +870,12 @@ static int poll_hpc(void *data) break; } /* give up the hardware semaphore */ - up(&semOperations); + mutex_unlock(&operations_mutex); /* sleep for a short time just for good measure */ out_sleep: msleep(100); } - up(&sem_exit); + complete(&exit_complete); debug("%s - Exit\n", __func__); return 0; } @@ -1060,9 +1043,9 @@ void __exit ibmphp_hpc_stop_poll_thread(void) debug("after locking operations\n"); // wait for poll thread to exit - debug("before sem_exit down\n"); - down(&sem_exit); - debug("after sem_exit down\n"); + debug("before exit_complete down\n"); + wait_for_completion(&exit_complete); + debug("after exit_completion down\n"); // cleanup debug("before free_hpc_access\n"); @@ -1070,8 +1053,6 @@ void __exit ibmphp_hpc_stop_poll_thread(void) debug("after free_hpc_access\n"); ibmphp_unlock_operations(); debug("after unlock operations\n"); - up(&sem_exit); - debug("after sem exit up\n"); debug("%s - Exit\n", __func__); } diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 7dd443aea5a5..6a2365cd794e 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -156,9 +156,9 @@ static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd, slot_ctrl |= (cmd & mask); ctrl->cmd_busy = 1; smp_mb(); + ctrl->slot_ctrl = slot_ctrl; pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, slot_ctrl); ctrl->cmd_started = jiffies; - ctrl->slot_ctrl = slot_ctrl; /* * Controllers with the Intel CF118 and similar errata advertise @@ -736,12 +736,25 @@ void pcie_clear_hotplug_events(struct controller *ctrl) void pcie_enable_interrupt(struct controller *ctrl) { - pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_HPIE, PCI_EXP_SLTCTL_HPIE); + u16 mask; + + mask = PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE; + pcie_write_cmd(ctrl, mask, mask); } void pcie_disable_interrupt(struct controller *ctrl) { - pcie_write_cmd(ctrl, 0, PCI_EXP_SLTCTL_HPIE); + u16 mask; + + /* + * Mask hot-plug interrupt to prevent it triggering immediately + * when the link goes inactive (we still get PME when any of the + * enabled events is detected). Same goes with Link Layer State + * changed event which generates PME immediately when the link goes + * inactive so mask it as well. + */ + mask = PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE; + pcie_write_cmd(ctrl, 0, mask); } /* @@ -920,3 +933,5 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0400, PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0401, PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl); +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_HXT, 0x0401, + PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl); diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 4c0b47867258..73986825d221 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -532,7 +532,7 @@ error_attrs: } static struct msi_desc * -msi_setup_entry(struct pci_dev *dev, int nvec, const struct irq_affinity *affd) +msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity *affd) { struct irq_affinity_desc *masks = NULL; struct msi_desc *entry; @@ -597,7 +597,7 @@ static int msi_verify_entries(struct pci_dev *dev) * which could have been allocated. */ static int msi_capability_init(struct pci_dev *dev, int nvec, - const struct irq_affinity *affd) + struct irq_affinity *affd) { struct msi_desc *entry; int ret; @@ -669,7 +669,7 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries) static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, struct msix_entry *entries, int nvec, - const struct irq_affinity *affd) + struct irq_affinity *affd) { struct irq_affinity_desc *curmsk, *masks = NULL; struct msi_desc *entry; @@ -736,7 +736,7 @@ static void msix_program_entries(struct pci_dev *dev, * requested MSI-X entries with allocated irqs or non-zero for otherwise. **/ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, - int nvec, const struct irq_affinity *affd) + int nvec, struct irq_affinity *affd) { int ret; u16 control; @@ -932,7 +932,7 @@ int pci_msix_vec_count(struct pci_dev *dev) EXPORT_SYMBOL(pci_msix_vec_count); static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, - int nvec, const struct irq_affinity *affd) + int nvec, struct irq_affinity *affd) { int nr_entries; int i, j; @@ -1018,7 +1018,7 @@ int pci_msi_enabled(void) EXPORT_SYMBOL(pci_msi_enabled); static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, - const struct irq_affinity *affd) + struct irq_affinity *affd) { int nvec; int rc; @@ -1035,13 +1035,6 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, if (maxvec < minvec) return -ERANGE; - /* - * If the caller is passing in sets, we can't support a range of - * vectors. The caller needs to handle that. - */ - if (affd && affd->nr_sets && minvec != maxvec) - return -EINVAL; - if (WARN_ON_ONCE(dev->msi_enabled)) return -EINVAL; @@ -1086,20 +1079,13 @@ EXPORT_SYMBOL(pci_enable_msi); static int __pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, int minvec, - int maxvec, const struct irq_affinity *affd) + int maxvec, struct irq_affinity *affd) { int rc, nvec = maxvec; if (maxvec < minvec) return -ERANGE; - /* - * If the caller is passing in sets, we can't support a range of - * supported vectors. The caller needs to handle that. - */ - if (affd && affd->nr_sets && minvec != maxvec) - return -EINVAL; - if (WARN_ON_ONCE(dev->msix_enabled)) return -EINVAL; @@ -1165,9 +1151,9 @@ EXPORT_SYMBOL(pci_enable_msix_range); */ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, unsigned int max_vecs, unsigned int flags, - const struct irq_affinity *affd) + struct irq_affinity *affd) { - static const struct irq_affinity msi_default_affd; + struct irq_affinity msi_default_affd = {0}; int msix_vecs = -ENOSPC; int msi_vecs = -ENOSPC; @@ -1196,6 +1182,13 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, /* use legacy irq if allowed */ if (flags & PCI_IRQ_LEGACY) { if (min_vecs == 1 && dev->irq) { + /* + * Invoke the affinity spreading logic to ensure that + * the device driver can adjust queue configuration + * for the single interrupt case. + */ + if (affd) + irq_create_affinity_masks(1, affd); pci_intx(dev, 1); return 1; } diff --git a/drivers/pci/of.c b/drivers/pci/of.c index 4c4217d0c3f1..3d32da15c215 100644 --- a/drivers/pci/of.c +++ b/drivers/pci/of.c @@ -113,7 +113,7 @@ struct device_node *of_pci_find_child_device(struct device_node *parent, * a fake root for all functions of a multi-function * device we go down them as well. */ - if (!strcmp(node->name, "multifunc-device")) { + if (of_node_name_eq(node, "multifunc-device")) { for_each_child_of_node(node, node2) { if (__of_pci_pci_compare(node2, devfn)) { of_node_put(node); diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c index 129738362d90..83fb077d0b41 100644 --- a/drivers/pci/pci-bridge-emul.c +++ b/drivers/pci/pci-bridge-emul.c @@ -24,29 +24,6 @@ #define PCI_CAP_PCIE_START PCI_BRIDGE_CONF_END #define PCI_CAP_PCIE_END (PCI_CAP_PCIE_START + PCI_EXP_SLTSTA2 + 2) -/* - * Initialize a pci_bridge_emul structure to represent a fake PCI - * bridge configuration space. The caller needs to have initialized - * the PCI configuration space with whatever values make sense - * (typically at least vendor, device, revision), the ->ops pointer, - * and optionally ->data and ->has_pcie. - */ -void pci_bridge_emul_init(struct pci_bridge_emul *bridge) -{ - bridge->conf.class_revision |= PCI_CLASS_BRIDGE_PCI << 16; - bridge->conf.header_type = PCI_HEADER_TYPE_BRIDGE; - bridge->conf.cache_line_size = 0x10; - bridge->conf.status = PCI_STATUS_CAP_LIST; - - if (bridge->has_pcie) { - bridge->conf.capabilities_pointer = PCI_CAP_PCIE_START; - bridge->pcie_conf.cap_id = PCI_CAP_ID_EXP; - /* Set PCIe v2, root port, slot support */ - bridge->pcie_conf.cap = PCI_EXP_TYPE_ROOT_PORT << 4 | 2 | - PCI_EXP_FLAGS_SLOT; - } -} - struct pci_bridge_reg_behavior { /* Read-only bits */ u32 ro; @@ -283,6 +260,61 @@ const static struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = { }, }; +/* + * Initialize a pci_bridge_emul structure to represent a fake PCI + * bridge configuration space. The caller needs to have initialized + * the PCI configuration space with whatever values make sense + * (typically at least vendor, device, revision), the ->ops pointer, + * and optionally ->data and ->has_pcie. + */ +int pci_bridge_emul_init(struct pci_bridge_emul *bridge, + unsigned int flags) +{ + bridge->conf.class_revision |= PCI_CLASS_BRIDGE_PCI << 16; + bridge->conf.header_type = PCI_HEADER_TYPE_BRIDGE; + bridge->conf.cache_line_size = 0x10; + bridge->conf.status = PCI_STATUS_CAP_LIST; + bridge->pci_regs_behavior = kmemdup(pci_regs_behavior, + sizeof(pci_regs_behavior), + GFP_KERNEL); + if (!bridge->pci_regs_behavior) + return -ENOMEM; + + if (bridge->has_pcie) { + bridge->conf.capabilities_pointer = PCI_CAP_PCIE_START; + bridge->pcie_conf.cap_id = PCI_CAP_ID_EXP; + /* Set PCIe v2, root port, slot support */ + bridge->pcie_conf.cap = PCI_EXP_TYPE_ROOT_PORT << 4 | 2 | + PCI_EXP_FLAGS_SLOT; + bridge->pcie_cap_regs_behavior = + kmemdup(pcie_cap_regs_behavior, + sizeof(pcie_cap_regs_behavior), + GFP_KERNEL); + if (!bridge->pcie_cap_regs_behavior) { + kfree(bridge->pci_regs_behavior); + return -ENOMEM; + } + } + + if (flags & PCI_BRIDGE_EMUL_NO_PREFETCHABLE_BAR) { + bridge->pci_regs_behavior[PCI_PREF_MEMORY_BASE / 4].ro = ~0; + bridge->pci_regs_behavior[PCI_PREF_MEMORY_BASE / 4].rw = 0; + } + + return 0; +} + +/* + * Cleanup a pci_bridge_emul structure that was previously initilized + * using pci_bridge_emul_init(). + */ +void pci_bridge_emul_cleanup(struct pci_bridge_emul *bridge) +{ + if (bridge->has_pcie) + kfree(bridge->pcie_cap_regs_behavior); + kfree(bridge->pci_regs_behavior); +} + /* * Should be called by the PCI controller driver when reading the PCI * configuration space of the fake bridge. It will call back the @@ -312,11 +344,11 @@ int pci_bridge_emul_conf_read(struct pci_bridge_emul *bridge, int where, reg -= PCI_CAP_PCIE_START; read_op = bridge->ops->read_pcie; cfgspace = (u32 *) &bridge->pcie_conf; - behavior = pcie_cap_regs_behavior; + behavior = bridge->pcie_cap_regs_behavior; } else { read_op = bridge->ops->read_base; cfgspace = (u32 *) &bridge->conf; - behavior = pci_regs_behavior; + behavior = bridge->pci_regs_behavior; } if (read_op) @@ -383,11 +415,11 @@ int pci_bridge_emul_conf_write(struct pci_bridge_emul *bridge, int where, reg -= PCI_CAP_PCIE_START; write_op = bridge->ops->write_pcie; cfgspace = (u32 *) &bridge->pcie_conf; - behavior = pcie_cap_regs_behavior; + behavior = bridge->pcie_cap_regs_behavior; } else { write_op = bridge->ops->write_base; cfgspace = (u32 *) &bridge->conf; - behavior = pci_regs_behavior; + behavior = bridge->pci_regs_behavior; } /* Keep all bits, except the RW bits */ diff --git a/drivers/pci/pci-bridge-emul.h b/drivers/pci/pci-bridge-emul.h index 9d510ccf738b..e65b1b79899d 100644 --- a/drivers/pci/pci-bridge-emul.h +++ b/drivers/pci/pci-bridge-emul.h @@ -107,15 +107,26 @@ struct pci_bridge_emul_ops { u32 old, u32 new, u32 mask); }; +struct pci_bridge_reg_behavior; + struct pci_bridge_emul { struct pci_bridge_emul_conf conf; struct pci_bridge_emul_pcie_conf pcie_conf; struct pci_bridge_emul_ops *ops; + struct pci_bridge_reg_behavior *pci_regs_behavior; + struct pci_bridge_reg_behavior *pcie_cap_regs_behavior; void *data; bool has_pcie; }; -void pci_bridge_emul_init(struct pci_bridge_emul *bridge); +enum { + PCI_BRIDGE_EMUL_NO_PREFETCHABLE_BAR = BIT(0), +}; + +int pci_bridge_emul_init(struct pci_bridge_emul *bridge, + unsigned int flags); +void pci_bridge_emul_cleanup(struct pci_bridge_emul *bridge); + int pci_bridge_emul_conf_read(struct pci_bridge_emul *bridge, int where, int size, u32 *value); int pci_bridge_emul_conf_write(struct pci_bridge_emul *bridge, int where, diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 79b1610a8beb..71853befd435 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -100,7 +100,7 @@ static ssize_t new_id_store(struct device_driver *driver, const char *buf, { struct pci_driver *pdrv = to_pci_driver(driver); const struct pci_device_id *ids = pdrv->id_table; - __u32 vendor, device, subvendor = PCI_ANY_ID, + u32 vendor, device, subvendor = PCI_ANY_ID, subdevice = PCI_ANY_ID, class = 0, class_mask = 0; unsigned long driver_data = 0; int fields = 0; @@ -168,7 +168,7 @@ static ssize_t remove_id_store(struct device_driver *driver, const char *buf, { struct pci_dynid *dynid, *n; struct pci_driver *pdrv = to_pci_driver(driver); - __u32 vendor, device, subvendor = PCI_ANY_ID, + u32 vendor, device, subvendor = PCI_ANY_ID, subdevice = PCI_ANY_ID, class = 0, class_mask = 0; int fields = 0; size_t retval = -ENODEV; diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 9ecfe13157c0..25794c27c7a4 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -412,8 +412,7 @@ static ssize_t msi_bus_store(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RW(msi_bus); -static ssize_t bus_rescan_store(struct bus_type *bus, const char *buf, - size_t count) +static ssize_t rescan_store(struct bus_type *bus, const char *buf, size_t count) { unsigned long val; struct pci_bus *b = NULL; @@ -429,7 +428,7 @@ static ssize_t bus_rescan_store(struct bus_type *bus, const char *buf, } return count; } -static BUS_ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, bus_rescan_store); +static BUS_ATTR_WO(rescan); static struct attribute *pci_bus_attrs[] = { &bus_attr_rescan.attr, diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index c25acace7d91..7c1b362f599a 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -861,7 +861,7 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) need_restore = true; - /* Fall-through: force to D0 */ + /* Fall-through - force to D0 */ default: pmcsr = 0; break; @@ -1233,7 +1233,6 @@ static void pci_restore_pcie_state(struct pci_dev *dev) pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]); } - static int pci_save_pcix_state(struct pci_dev *dev) { int pos; @@ -1270,6 +1269,45 @@ static void pci_restore_pcix_state(struct pci_dev *dev) pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]); } +static void pci_save_ltr_state(struct pci_dev *dev) +{ + int ltr; + struct pci_cap_saved_state *save_state; + u16 *cap; + + if (!pci_is_pcie(dev)) + return; + + ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR); + if (!ltr) + return; + + save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR); + if (!save_state) { + pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n"); + return; + } + + cap = (u16 *)&save_state->cap.data[0]; + pci_read_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap++); + pci_read_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, cap++); +} + +static void pci_restore_ltr_state(struct pci_dev *dev) +{ + struct pci_cap_saved_state *save_state; + int ltr; + u16 *cap; + + save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR); + ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR); + if (!save_state || !ltr) + return; + + cap = (u16 *)&save_state->cap.data[0]; + pci_write_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap++); + pci_write_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, *cap++); +} /** * pci_save_state - save the PCI configuration space of a device before suspending @@ -1291,6 +1329,7 @@ int pci_save_state(struct pci_dev *dev) if (i != 0) return i; + pci_save_ltr_state(dev); pci_save_dpc_state(dev); return pci_save_vc_state(dev); } @@ -1390,7 +1429,12 @@ void pci_restore_state(struct pci_dev *dev) if (!dev->state_saved) return; - /* PCI Express register must be restored first */ + /* + * Restore max latencies (in the LTR capability) before enabling + * LTR itself (in the PCIe capability). + */ + pci_restore_ltr_state(dev); + pci_restore_pcie_state(dev); pci_restore_pasid_state(dev); pci_restore_pri_state(dev); @@ -2260,7 +2304,7 @@ static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup) case PCI_D2: if (pci_no_d1d2(dev)) break; - /* else: fall through */ + /* else, fall through */ default: target_state = state; } @@ -2501,6 +2545,25 @@ void pci_config_pm_runtime_put(struct pci_dev *pdev) pm_runtime_put_sync(parent); } +static const struct dmi_system_id bridge_d3_blacklist[] = { +#ifdef CONFIG_X86 + { + /* + * Gigabyte X299 root port is not marked as hotplug capable + * which allows Linux to power manage it. However, this + * confuses the BIOS SMI handler so don't power manage root + * ports on that system. + */ + .ident = "X299 DESIGNARE EX-CF", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."), + DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"), + }, + }, +#endif + { } +}; + /** * pci_bridge_d3_possible - Is it possible to put the bridge into D3 * @bridge: Bridge to check @@ -2546,6 +2609,9 @@ bool pci_bridge_d3_possible(struct pci_dev *bridge) if (bridge->is_hotplug_bridge) return false; + if (dmi_check_system(bridge_d3_blacklist)) + return false; + /* * It should be safe to put PCIe ports from 2015 or newer * to D3. @@ -2998,6 +3064,11 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev) if (error) pci_err(dev, "unable to preallocate PCI-X save buffer\n"); + error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR, + 2 * sizeof(u16)); + if (error) + pci_err(dev, "unable to allocate suspend buffer for LTR\n"); + pci_allocate_vc_save_buffers(dev); } @@ -5058,39 +5129,42 @@ unlock: return 0; } -/* Save and disable devices from the top of the tree down */ -static void pci_bus_save_and_disable(struct pci_bus *bus) +/* + * Save and disable devices from the top of the tree down while holding + * the @dev mutex lock for the entire tree. + */ +static void pci_bus_save_and_disable_locked(struct pci_bus *bus) { struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { - pci_dev_lock(dev); pci_dev_save_and_disable(dev); - pci_dev_unlock(dev); if (dev->subordinate) - pci_bus_save_and_disable(dev->subordinate); + pci_bus_save_and_disable_locked(dev->subordinate); } } /* - * Restore devices from top of the tree down - parent bridges need to be - * restored before we can get to subordinate devices. + * Restore devices from top of the tree down while holding @dev mutex lock + * for the entire tree. Parent bridges need to be restored before we can + * get to subordinate devices. */ -static void pci_bus_restore(struct pci_bus *bus) +static void pci_bus_restore_locked(struct pci_bus *bus) { struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { - pci_dev_lock(dev); pci_dev_restore(dev); - pci_dev_unlock(dev); if (dev->subordinate) - pci_bus_restore(dev->subordinate); + pci_bus_restore_locked(dev->subordinate); } } -/* Save and disable devices from the top of the tree down */ -static void pci_slot_save_and_disable(struct pci_slot *slot) +/* + * Save and disable devices from the top of the tree down while holding + * the @dev mutex lock for the entire tree. + */ +static void pci_slot_save_and_disable_locked(struct pci_slot *slot) { struct pci_dev *dev; @@ -5099,26 +5173,25 @@ static void pci_slot_save_and_disable(struct pci_slot *slot) continue; pci_dev_save_and_disable(dev); if (dev->subordinate) - pci_bus_save_and_disable(dev->subordinate); + pci_bus_save_and_disable_locked(dev->subordinate); } } /* - * Restore devices from top of the tree down - parent bridges need to be - * restored before we can get to subordinate devices. + * Restore devices from top of the tree down while holding @dev mutex lock + * for the entire tree. Parent bridges need to be restored before we can + * get to subordinate devices. */ -static void pci_slot_restore(struct pci_slot *slot) +static void pci_slot_restore_locked(struct pci_slot *slot) { struct pci_dev *dev; list_for_each_entry(dev, &slot->bus->devices, bus_list) { if (!dev->slot || dev->slot != slot) continue; - pci_dev_lock(dev); pci_dev_restore(dev); - pci_dev_unlock(dev); if (dev->subordinate) - pci_bus_restore(dev->subordinate); + pci_bus_restore_locked(dev->subordinate); } } @@ -5177,17 +5250,15 @@ static int __pci_reset_slot(struct pci_slot *slot) if (rc) return rc; - pci_slot_save_and_disable(slot); - if (pci_slot_trylock(slot)) { + pci_slot_save_and_disable_locked(slot); might_sleep(); rc = pci_reset_hotplug_slot(slot->hotplug, 0); + pci_slot_restore_locked(slot); pci_slot_unlock(slot); } else rc = -EAGAIN; - pci_slot_restore(slot); - return rc; } @@ -5273,17 +5344,15 @@ static int __pci_reset_bus(struct pci_bus *bus) if (rc) return rc; - pci_bus_save_and_disable(bus); - if (pci_bus_trylock(bus)) { + pci_bus_save_and_disable_locked(bus); might_sleep(); rc = pci_bridge_secondary_bus_reset(bus->self); + pci_bus_restore_locked(bus); pci_bus_unlock(bus); } else rc = -EAGAIN; - pci_bus_restore(bus); - return rc; } @@ -6000,8 +6069,7 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev) * to enable the kernel to reassign new resource * window later on. */ - if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE && - (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { + if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) { r = &dev->resource[i]; if (!(r->flags & IORESOURCE_MEM)) @@ -6034,19 +6102,18 @@ static ssize_t pci_get_resource_alignment_param(char *buf, size_t size) return count; } -static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf) +static ssize_t resource_alignment_show(struct bus_type *bus, char *buf) { return pci_get_resource_alignment_param(buf, PAGE_SIZE); } -static ssize_t pci_resource_alignment_store(struct bus_type *bus, +static ssize_t resource_alignment_store(struct bus_type *bus, const char *buf, size_t count) { return pci_set_resource_alignment_param(buf, count); } -static BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show, - pci_resource_alignment_store); +static BUS_ATTR_RW(resource_alignment); static int __init pci_resource_alignment_sysfs_init(void) { diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig index 44742b2e1126..5cbdbca904ac 100644 --- a/drivers/pci/pcie/Kconfig +++ b/drivers/pci/pcie/Kconfig @@ -6,10 +6,9 @@ config PCIEPORTBUS bool "PCI Express Port Bus support" depends on PCI help - This automatically enables PCI Express Port Bus support. Users can - choose Native Hot-Plug support, Advanced Error Reporting support, - Power Management Event support and Virtual Channel support to run - on PCI Express Ports (Root or Switch). + This enables PCI Express Port Bus support. Users can then enable + support for Native Hot-Plug, Advanced Error Reporting, Power + Management Events, and Downstream Port Containment. # # Include service Kconfig here diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile index ab514083d5d4..f1d7bc1e5efa 100644 --- a/drivers/pci/pcie/Makefile +++ b/drivers/pci/pcie/Makefile @@ -3,6 +3,7 @@ # Makefile for PCI Express features and port driver pcieportdrv-y := portdrv_core.o portdrv_pci.o err.o +pcieportdrv-y += bw_notification.o obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c index fed29de783e0..f8fc2114ad39 100644 --- a/drivers/pci/pcie/aer.c +++ b/drivers/pci/pcie/aer.c @@ -117,7 +117,7 @@ bool pci_aer_available(void) static int ecrc_policy = ECRC_POLICY_DEFAULT; -static const char *ecrc_policy_str[] = { +static const char * const ecrc_policy_str[] = { [ECRC_POLICY_DEFAULT] = "bios", [ECRC_POLICY_OFF] = "off", [ECRC_POLICY_ON] = "on" @@ -203,11 +203,8 @@ void pcie_ecrc_get_policy(char *str) { int i; - for (i = 0; i < ARRAY_SIZE(ecrc_policy_str); i++) - if (!strncmp(str, ecrc_policy_str[i], - strlen(ecrc_policy_str[i]))) - break; - if (i >= ARRAY_SIZE(ecrc_policy_str)) + i = match_string(ecrc_policy_str, ARRAY_SIZE(ecrc_policy_str), str); + if (i < 0) return; ecrc_policy = i; diff --git a/drivers/pci/pcie/bw_notification.c b/drivers/pci/pcie/bw_notification.c new file mode 100644 index 000000000000..d2eae3b7cc0f --- /dev/null +++ b/drivers/pci/pcie/bw_notification.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * PCI Express Link Bandwidth Notification services driver + * Author: Alexandru Gagniuc + * + * Copyright (C) 2019, Dell Inc + * + * The PCIe Link Bandwidth Notification provides a way to notify the + * operating system when the link width or data rate changes. This + * capability is required for all root ports and downstream ports + * supporting links wider than x1 and/or multiple link speeds. + * + * This service port driver hooks into the bandwidth notification interrupt + * and warns when links become degraded in operation. + */ + +#include "../pci.h" +#include "portdrv.h" + +static bool pcie_link_bandwidth_notification_supported(struct pci_dev *dev) +{ + int ret; + u32 lnk_cap; + + ret = pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnk_cap); + return (ret == PCIBIOS_SUCCESSFUL) && (lnk_cap & PCI_EXP_LNKCAP_LBNC); +} + +static void pcie_enable_link_bandwidth_notification(struct pci_dev *dev) +{ + u16 lnk_ctl; + + pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &lnk_ctl); + lnk_ctl |= PCI_EXP_LNKCTL_LBMIE; + pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl); +} + +static void pcie_disable_link_bandwidth_notification(struct pci_dev *dev) +{ + u16 lnk_ctl; + + pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &lnk_ctl); + lnk_ctl &= ~PCI_EXP_LNKCTL_LBMIE; + pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl); +} + +static irqreturn_t pcie_bw_notification_handler(int irq, void *context) +{ + struct pcie_device *srv = context; + struct pci_dev *port = srv->port; + struct pci_dev *dev; + u16 link_status, events; + int ret; + + ret = pcie_capability_read_word(port, PCI_EXP_LNKSTA, &link_status); + events = link_status & PCI_EXP_LNKSTA_LBMS; + + if (ret != PCIBIOS_SUCCESSFUL || !events) + return IRQ_NONE; + + /* + * Print status from downstream devices, not this root port or + * downstream switch port. + */ + down_read(&pci_bus_sem); + list_for_each_entry(dev, &port->subordinate->devices, bus_list) + __pcie_print_link_status(dev, false); + up_read(&pci_bus_sem); + + pcie_update_link_speed(port->subordinate, link_status); + pcie_capability_write_word(port, PCI_EXP_LNKSTA, events); + return IRQ_HANDLED; +} + +static int pcie_bandwidth_notification_probe(struct pcie_device *srv) +{ + int ret; + + /* Single-width or single-speed ports do not have to support this. */ + if (!pcie_link_bandwidth_notification_supported(srv->port)) + return -ENODEV; + + ret = request_threaded_irq(srv->irq, NULL, pcie_bw_notification_handler, + IRQF_SHARED, "PCIe BW notif", srv); + if (ret) + return ret; + + pcie_enable_link_bandwidth_notification(srv->port); + + return 0; +} + +static void pcie_bandwidth_notification_remove(struct pcie_device *srv) +{ + pcie_disable_link_bandwidth_notification(srv->port); + free_irq(srv->irq, srv); +} + +static struct pcie_port_service_driver pcie_bandwidth_notification_driver = { + .name = "pcie_bw_notification", + .port_type = PCIE_ANY_PORT, + .service = PCIE_PORT_SERVICE_BWNOTIF, + .probe = pcie_bandwidth_notification_probe, + .remove = pcie_bandwidth_notification_remove, +}; + +int __init pcie_bandwidth_notification_init(void) +{ + return pcie_port_service_register(&pcie_bandwidth_notification_driver); +} diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c index e435d12e61a0..7b77754a82de 100644 --- a/drivers/pci/pcie/dpc.c +++ b/drivers/pci/pcie/dpc.c @@ -202,6 +202,28 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc) pci_write_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, status); } +static int dpc_get_aer_uncorrect_severity(struct pci_dev *dev, + struct aer_err_info *info) +{ + int pos = dev->aer_cap; + u32 status, mask, sev; + + pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); + pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask); + status &= ~mask; + if (!status) + return 0; + + pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &sev); + status &= sev; + if (status) + info->severity = AER_FATAL; + else + info->severity = AER_NONFATAL; + + return 1; +} + static irqreturn_t dpc_handler(int irq, void *context) { struct aer_err_info info; @@ -229,9 +251,12 @@ static irqreturn_t dpc_handler(int irq, void *context) /* show RP PIO error detail information */ if (dpc->rp_extensions && reason == 3 && ext_reason == 0) dpc_process_rp_pio_error(dpc); - else if (reason == 0 && aer_get_device_error_info(pdev, &info)) { + else if (reason == 0 && + dpc_get_aer_uncorrect_severity(pdev, &info) && + aer_get_device_error_info(pdev, &info)) { aer_print_error(pdev, &info); pci_cleanup_aer_uncorrect_error_status(pdev); + pci_aer_clear_fatal_status(pdev); } /* We configure DPC so it only triggers on ERR_FATAL */ diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c index 0dbcf429089f..54d593d10396 100644 --- a/drivers/pci/pcie/pme.c +++ b/drivers/pci/pcie/pme.c @@ -363,6 +363,16 @@ static bool pcie_pme_check_wakeup(struct pci_bus *bus) return false; } +static void pcie_pme_disable_interrupt(struct pci_dev *port, + struct pcie_pme_service_data *data) +{ + spin_lock_irq(&data->lock); + pcie_pme_interrupt_enable(port, false); + pcie_clear_root_pme_status(port); + data->noirq = true; + spin_unlock_irq(&data->lock); +} + /** * pcie_pme_suspend - Suspend PCIe PME service device. * @srv: PCIe service device to suspend. @@ -387,11 +397,7 @@ static int pcie_pme_suspend(struct pcie_device *srv) return 0; } - spin_lock_irq(&data->lock); - pcie_pme_interrupt_enable(port, false); - pcie_clear_root_pme_status(port); - data->noirq = true; - spin_unlock_irq(&data->lock); + pcie_pme_disable_interrupt(port, data); synchronize_irq(srv->irq); @@ -426,35 +432,13 @@ static int pcie_pme_resume(struct pcie_device *srv) * @srv - PCIe service device to remove. */ static void pcie_pme_remove(struct pcie_device *srv) -{ - pcie_pme_suspend(srv); - free_irq(srv->irq, srv); - kfree(get_service_data(srv)); -} - -static int pcie_pme_runtime_suspend(struct pcie_device *srv) -{ - struct pcie_pme_service_data *data = get_service_data(srv); - - spin_lock_irq(&data->lock); - pcie_pme_interrupt_enable(srv->port, false); - pcie_clear_root_pme_status(srv->port); - data->noirq = true; - spin_unlock_irq(&data->lock); - - return 0; -} - -static int pcie_pme_runtime_resume(struct pcie_device *srv) { struct pcie_pme_service_data *data = get_service_data(srv); - spin_lock_irq(&data->lock); - pcie_pme_interrupt_enable(srv->port, true); - data->noirq = false; - spin_unlock_irq(&data->lock); - - return 0; + pcie_pme_disable_interrupt(srv->port, data); + free_irq(srv->irq, srv); + cancel_work_sync(&data->work); + kfree(data); } static struct pcie_port_service_driver pcie_pme_driver = { @@ -464,8 +448,6 @@ static struct pcie_port_service_driver pcie_pme_driver = { .probe = pcie_pme_probe, .suspend = pcie_pme_suspend, - .runtime_suspend = pcie_pme_runtime_suspend, - .runtime_resume = pcie_pme_runtime_resume, .resume = pcie_pme_resume, .remove = pcie_pme_remove, }; diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h index fbbf00b0992e..1d50dc58ac40 100644 --- a/drivers/pci/pcie/portdrv.h +++ b/drivers/pci/pcie/portdrv.h @@ -20,8 +20,10 @@ #define PCIE_PORT_SERVICE_HP (1 << PCIE_PORT_SERVICE_HP_SHIFT) #define PCIE_PORT_SERVICE_DPC_SHIFT 3 /* Downstream Port Containment */ #define PCIE_PORT_SERVICE_DPC (1 << PCIE_PORT_SERVICE_DPC_SHIFT) +#define PCIE_PORT_SERVICE_BWNOTIF_SHIFT 4 /* Bandwidth notification */ +#define PCIE_PORT_SERVICE_BWNOTIF (1 << PCIE_PORT_SERVICE_BWNOTIF_SHIFT) -#define PCIE_PORT_DEVICE_MAXSERVICES 4 +#define PCIE_PORT_DEVICE_MAXSERVICES 5 #ifdef CONFIG_PCIEAER int pcie_aer_init(void); @@ -47,6 +49,8 @@ int pcie_dpc_init(void); static inline int pcie_dpc_init(void) { return 0; } #endif +int pcie_bandwidth_notification_init(void); + /* Port Type */ #define PCIE_ANY_PORT (~0) diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index f458ac9cb70c..7d04f9d087a6 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c @@ -99,7 +99,7 @@ static int pcie_message_numbers(struct pci_dev *dev, int mask, */ static int pcie_port_enable_irq_vec(struct pci_dev *dev, int *irqs, int mask) { - int nr_entries, nvec; + int nr_entries, nvec, pcie_irq; u32 pme = 0, aer = 0, dpc = 0; /* Allocate the maximum possible number of MSI/MSI-X vectors */ @@ -135,10 +135,13 @@ static int pcie_port_enable_irq_vec(struct pci_dev *dev, int *irqs, int mask) return nr_entries; } - /* PME and hotplug share an MSI/MSI-X vector */ - if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP)) { - irqs[PCIE_PORT_SERVICE_PME_SHIFT] = pci_irq_vector(dev, pme); - irqs[PCIE_PORT_SERVICE_HP_SHIFT] = pci_irq_vector(dev, pme); + /* PME, hotplug and bandwidth notification share an MSI/MSI-X vector */ + if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP | + PCIE_PORT_SERVICE_BWNOTIF)) { + pcie_irq = pci_irq_vector(dev, pme); + irqs[PCIE_PORT_SERVICE_PME_SHIFT] = pcie_irq; + irqs[PCIE_PORT_SERVICE_HP_SHIFT] = pcie_irq; + irqs[PCIE_PORT_SERVICE_BWNOTIF_SHIFT] = pcie_irq; } if (mask & PCIE_PORT_SERVICE_AER) @@ -250,6 +253,10 @@ static int get_port_device_capability(struct pci_dev *dev) pci_aer_available() && services & PCIE_PORT_SERVICE_AER) services |= PCIE_PORT_SERVICE_DPC; + if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM || + pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) + services |= PCIE_PORT_SERVICE_BWNOTIF; + return services; } diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 0acca3596807..0a87091a0800 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -182,10 +182,12 @@ static void pcie_portdrv_err_resume(struct pci_dev *dev) /* * LINUX Device Driver Model */ -static const struct pci_device_id port_pci_ids[] = { { +static const struct pci_device_id port_pci_ids[] = { /* handle any PCI-Express port */ - PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x00), ~0), - }, { /* end: all zeroes */ } + { PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x00), ~0) }, + /* subtractive decode PCI-to-PCI bridge, class type is 060401h */ + { PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x01), ~0) }, + { }, }; static const struct pci_error_handlers pcie_portdrv_err_handler = { @@ -238,6 +240,7 @@ static void __init pcie_init_services(void) pcie_pme_init(); pcie_dpc_init(); pcie_hp_init(); + pcie_bandwidth_notification_init(); } static int __init pcie_portdrv_init(void) diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 257b9f6f2ebb..2ec0df04e0dc 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -121,13 +121,13 @@ static u64 pci_size(u64 base, u64 maxbase, u64 mask) * Get the lowest of them to find the decode size, and from that * the extent. */ - size = (size & ~(size-1)) - 1; + size = size & ~(size-1); /* * base == maxbase can be valid only if the BAR has already been * programmed with all 1s. */ - if (base == maxbase && ((base | size) & mask) != mask) + if (base == maxbase && ((base | (size - 1)) & mask) != mask) return 0; return size; @@ -278,7 +278,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, /* Above 32-bit boundary; try to reallocate */ res->flags |= IORESOURCE_UNSET; res->start = 0; - res->end = sz64; + res->end = sz64 - 1; pci_info(dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n", pos, (unsigned long long)l64); goto out; @@ -286,7 +286,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, } region.start = l64; - region.end = l64 + sz64; + region.end = l64 + sz64 - 1; pcibios_bus_to_resource(dev->bus, res, ®ion); pcibios_resource_to_bus(dev->bus, &inverted_region, res); @@ -348,6 +348,57 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) } } +static void pci_read_bridge_windows(struct pci_dev *bridge) +{ + u16 io; + u32 pmem, tmp; + + pci_read_config_word(bridge, PCI_IO_BASE, &io); + if (!io) { + pci_write_config_word(bridge, PCI_IO_BASE, 0xe0f0); + pci_read_config_word(bridge, PCI_IO_BASE, &io); + pci_write_config_word(bridge, PCI_IO_BASE, 0x0); + } + if (io) + bridge->io_window = 1; + + /* + * DECchip 21050 pass 2 errata: the bridge may miss an address + * disconnect boundary by one PCI data phase. Workaround: do not + * use prefetching on this device. + */ + if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001) + return; + + pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); + if (!pmem) { + pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, + 0xffe0fff0); + pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); + pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0); + } + if (!pmem) + return; + + bridge->pref_window = 1; + + if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) { + + /* + * Bridge claims to have a 64-bit prefetchable memory + * window; verify that the upper bits are actually + * writable. + */ + pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &pmem); + pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, + 0xffffffff); + pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp); + pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, pmem); + if (tmp) + bridge->pref_64_window = 1; + } +} + static void pci_read_bridge_io(struct pci_bus *child) { struct pci_dev *dev = child->self; @@ -1728,9 +1779,6 @@ int pci_setup_device(struct pci_dev *dev) break; case PCI_HEADER_TYPE_BRIDGE: /* bridge header */ - if (class != PCI_CLASS_BRIDGE_PCI) - goto bad; - /* * The PCI-to-PCI bridge spec requires that subtractive * decoding (i.e. transparent) bridge must have programming @@ -1739,6 +1787,7 @@ int pci_setup_device(struct pci_dev *dev) pci_read_irq(dev); dev->transparent = ((dev->class & 0xff) == 1); pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); + pci_read_bridge_windows(dev); set_pcie_hotplug_bridge(dev); pos = pci_find_capability(dev, PCI_CAP_ID_SSVID); if (pos) { @@ -1856,8 +1905,6 @@ static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp) pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, hpp->latency_timer); pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl); - if (hpp->enable_serr) - pci_bctl |= PCI_BRIDGE_CTL_SERR; if (hpp->enable_perr) pci_bctl |= PCI_BRIDGE_CTL_PARITY; pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl); @@ -2071,11 +2118,8 @@ static void pci_configure_ltr(struct pci_dev *dev) { #ifdef CONFIG_PCIEASPM struct pci_host_bridge *host = pci_find_host_bridge(dev->bus); - u32 cap; struct pci_dev *bridge; - - if (!host->native_ltr) - return; + u32 cap, ctl; if (!pci_is_pcie(dev)) return; @@ -2084,22 +2128,35 @@ static void pci_configure_ltr(struct pci_dev *dev) if (!(cap & PCI_EXP_DEVCAP2_LTR)) return; - /* - * Software must not enable LTR in an Endpoint unless the Root - * Complex and all intermediate Switches indicate support for LTR. - * PCIe r3.1, sec 6.18. - */ - if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) - dev->ltr_path = 1; - else { + pcie_capability_read_dword(dev, PCI_EXP_DEVCTL2, &ctl); + if (ctl & PCI_EXP_DEVCTL2_LTR_EN) { + if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) { + dev->ltr_path = 1; + return; + } + bridge = pci_upstream_bridge(dev); if (bridge && bridge->ltr_path) dev->ltr_path = 1; + + return; } - if (dev->ltr_path) + if (!host->native_ltr) + return; + + /* + * Software must not enable LTR in an Endpoint unless the Root + * Complex and all intermediate Switches indicate support for LTR. + * PCIe r4.0, sec 6.18. + */ + if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT || + ((bridge = pci_upstream_bridge(dev)) && + bridge->ltr_path)) { pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_DEVCTL2_LTR_EN); + dev->ltr_path = 1; + } #endif } @@ -2129,6 +2186,24 @@ static void pci_configure_eetlp_prefix(struct pci_dev *dev) #endif } +static void pci_configure_serr(struct pci_dev *dev) +{ + u16 control; + + if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { + + /* + * A bridge will not forward ERR_ messages coming from an + * endpoint unless SERR# forwarding is enabled. + */ + pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &control); + if (!(control & PCI_BRIDGE_CTL_SERR)) { + control |= PCI_BRIDGE_CTL_SERR; + pci_write_config_word(dev, PCI_BRIDGE_CONTROL, control); + } + } +} + static void pci_configure_device(struct pci_dev *dev) { struct hotplug_params hpp; @@ -2139,6 +2214,7 @@ static void pci_configure_device(struct pci_dev *dev) pci_configure_relaxed_ordering(dev); pci_configure_ltr(dev); pci_configure_eetlp_prefix(dev); + pci_configure_serr(dev); memset(&hpp, 0, sizeof(hpp)); ret = pci_get_hp_params(dev, &hpp); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index b0a413f3f7ca..a59ad09ce911 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -639,8 +639,9 @@ static void quirk_synopsys_haps(struct pci_dev *pdev) break; } } -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SYNOPSYS, PCI_ANY_ID, - quirk_synopsys_haps); +DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, PCI_ANY_ID, + PCI_CLASS_SERIAL_USB_XHCI, 0, + quirk_synopsys_haps); /* * Let's make the southbridge information explicit instead of having to @@ -2138,7 +2139,7 @@ static void quirk_netmos(struct pci_dev *dev) if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM && dev->subsystem_device == 0x0299) return; - /* else: fall through */ + /* else, fall through */ case PCI_DEVICE_ID_NETMOS_9735: case PCI_DEVICE_ID_NETMOS_9745: case PCI_DEVICE_ID_NETMOS_9845: @@ -4519,6 +4520,8 @@ static const struct pci_dev_acs_enabled { /* QCOM QDF2xxx root ports */ { PCI_VENDOR_ID_QCOM, 0x0400, pci_quirk_qcom_rp_acs }, { PCI_VENDOR_ID_QCOM, 0x0401, pci_quirk_qcom_rp_acs }, + /* HXT SD4800 root ports. The ACS design is same as QCOM QDF2xxx */ + { PCI_VENDOR_ID_HXT, 0x0401, pci_quirk_qcom_rp_acs }, /* Intel PCH root ports */ { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs }, { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_spt_pch_acs }, diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index ed960436df5e..ec44a0f3a7ac 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -735,58 +735,21 @@ int pci_claim_bridge_resource(struct pci_dev *bridge, int i) base/limit registers must be read-only and read as 0. */ static void pci_bridge_check_ranges(struct pci_bus *bus) { - u16 io; - u32 pmem; struct pci_dev *bridge = bus->self; - struct resource *b_res; + struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES]; - b_res = &bridge->resource[PCI_BRIDGE_RESOURCES]; b_res[1].flags |= IORESOURCE_MEM; - pci_read_config_word(bridge, PCI_IO_BASE, &io); - if (!io) { - pci_write_config_word(bridge, PCI_IO_BASE, 0xe0f0); - pci_read_config_word(bridge, PCI_IO_BASE, &io); - pci_write_config_word(bridge, PCI_IO_BASE, 0x0); - } - if (io) + if (bridge->io_window) b_res[0].flags |= IORESOURCE_IO; - /* DECchip 21050 pass 2 errata: the bridge may miss an address - disconnect boundary by one PCI data phase. - Workaround: do not use prefetching on this device. */ - if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001) - return; - - pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); - if (!pmem) { - pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, - 0xffe0fff0); - pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); - pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0); - } - if (pmem) { + if (bridge->pref_window) { b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH; - if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == - PCI_PREF_RANGE_TYPE_64) { + if (bridge->pref_64_window) { b_res[2].flags |= IORESOURCE_MEM_64; b_res[2].flags |= PCI_PREF_RANGE_TYPE_64; } } - - /* double check if bridge does support 64 bit pref */ - if (b_res[2].flags & IORESOURCE_MEM_64) { - u32 mem_base_hi, tmp; - pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, - &mem_base_hi); - pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, - 0xffffffff); - pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp); - if (!tmp) - b_res[2].flags &= ~IORESOURCE_MEM_64; - pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, - mem_base_hi); - } } /* Helper function for sizing routines: find first available @@ -1223,12 +1186,12 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head) if (!b) continue; - switch (dev->class >> 8) { - case PCI_CLASS_BRIDGE_CARDBUS: + switch (dev->hdr_type) { + case PCI_HEADER_TYPE_CARDBUS: pci_bus_size_cardbus(b, realloc_head); break; - case PCI_CLASS_BRIDGE_PCI: + case PCI_HEADER_TYPE_BRIDGE: default: __pci_bus_size_bridges(b, realloc_head); break; @@ -1239,12 +1202,12 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head) if (pci_is_root_bus(bus)) return; - switch (bus->self->class >> 8) { - case PCI_CLASS_BRIDGE_CARDBUS: + switch (bus->self->hdr_type) { + case PCI_HEADER_TYPE_CARDBUS: /* don't size cardbuses yet. */ break; - case PCI_CLASS_BRIDGE_PCI: + case PCI_HEADER_TYPE_BRIDGE: pci_bridge_check_ranges(bus); if (bus->self->is_hotplug_bridge) { additional_io_size = pci_hotplug_io_size; @@ -1393,13 +1356,13 @@ void __pci_bus_assign_resources(const struct pci_bus *bus, __pci_bus_assign_resources(b, realloc_head, fail_head); - switch (dev->class >> 8) { - case PCI_CLASS_BRIDGE_PCI: + switch (dev->hdr_type) { + case PCI_HEADER_TYPE_BRIDGE: if (!pci_is_enabled(dev)) pci_setup_bridge(b); break; - case PCI_CLASS_BRIDGE_CARDBUS: + case PCI_HEADER_TYPE_CARDBUS: pci_setup_cardbus(b); break; diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c index 1bfeb160c5b1..bfd03e023308 100644 --- a/drivers/perf/arm-cci.c +++ b/drivers/perf/arm-cci.c @@ -1327,15 +1327,6 @@ static int cci_pmu_event_init(struct perf_event *event) if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) return -EOPNOTSUPP; - /* We have no filtering of any kind */ - if (event->attr.exclude_user || - event->attr.exclude_kernel || - event->attr.exclude_hv || - event->attr.exclude_idle || - event->attr.exclude_host || - event->attr.exclude_guest) - return -EINVAL; - /* * Following the example set by other "uncore" PMUs, we accept any CPU * and rewrite its affinity dynamically rather than having perf core @@ -1433,6 +1424,7 @@ static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev) .stop = cci_pmu_stop, .read = pmu_read, .attr_groups = pmu_attr_groups, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }; cci_pmu->plat_device = pdev; diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c index 7dd850e02f19..2ae76026e947 100644 --- a/drivers/perf/arm-ccn.c +++ b/drivers/perf/arm-ccn.c @@ -741,10 +741,7 @@ static int arm_ccn_pmu_event_init(struct perf_event *event) return -EOPNOTSUPP; } - if (has_branch_stack(event) || event->attr.exclude_user || - event->attr.exclude_kernel || event->attr.exclude_hv || - event->attr.exclude_idle || event->attr.exclude_host || - event->attr.exclude_guest) { + if (has_branch_stack(event)) { dev_dbg(ccn->dev, "Can't exclude execution levels!\n"); return -EINVAL; } @@ -1290,6 +1287,7 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn) .read = arm_ccn_pmu_event_read, .pmu_enable = arm_ccn_pmu_enable, .pmu_disable = arm_ccn_pmu_disable, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }; /* No overflow interrupt? Have to use a timer instead. */ diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c index 660cb8ac886a..5851de56bbd0 100644 --- a/drivers/perf/arm_dsu_pmu.c +++ b/drivers/perf/arm_dsu_pmu.c @@ -562,13 +562,7 @@ static int dsu_pmu_event_init(struct perf_event *event) return -EINVAL; } - if (has_branch_stack(event) || - event->attr.exclude_user || - event->attr.exclude_kernel || - event->attr.exclude_hv || - event->attr.exclude_idle || - event->attr.exclude_host || - event->attr.exclude_guest) { + if (has_branch_stack(event)) { dev_dbg(dsu_pmu->pmu.dev, "Can't support filtering\n"); return -EINVAL; } @@ -735,6 +729,7 @@ static int dsu_pmu_device_probe(struct platform_device *pdev) .read = dsu_pmu_read, .attr_groups = dsu_pmu_attr_groups, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }; rc = perf_pmu_register(&dsu_pmu->pmu, name, -1); diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index d0b7dd8fb184..eec75b97e7ea 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -356,13 +356,6 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) return ret; } -static int -event_requires_mode_exclusion(struct perf_event_attr *attr) -{ - return attr->exclude_idle || attr->exclude_user || - attr->exclude_kernel || attr->exclude_hv; -} - static int __hw_perf_event_init(struct perf_event *event) { @@ -393,9 +386,8 @@ __hw_perf_event_init(struct perf_event *event) /* * Check whether we need to exclude the counter from certain modes. */ - if ((!armpmu->set_event_filter || - armpmu->set_event_filter(hwc, &event->attr)) && - event_requires_mode_exclusion(&event->attr)) { + if (armpmu->set_event_filter && + armpmu->set_event_filter(hwc, &event->attr)) { pr_debug("ARM performance counters do not support " "mode exclusion\n"); return -EOPNOTSUPP; @@ -867,6 +859,9 @@ int armpmu_register(struct arm_pmu *pmu) if (ret) return ret; + if (!pmu->set_event_filter) + pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE; + ret = perf_pmu_register(&pmu->pmu, pmu->name, -1); if (ret) goto out_destroy; diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c index 8e46a9dad2fa..7cb766dafe85 100644 --- a/drivers/perf/arm_spe_pmu.c +++ b/drivers/perf/arm_spe_pmu.c @@ -824,10 +824,10 @@ static void arm_spe_pmu_read(struct perf_event *event) { } -static void *arm_spe_pmu_setup_aux(int cpu, void **pages, int nr_pages, - bool snapshot) +static void *arm_spe_pmu_setup_aux(struct perf_event *event, void **pages, + int nr_pages, bool snapshot) { - int i; + int i, cpu = event->cpu; struct page **pglist; struct arm_spe_pmu_buf *buf; diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c index 69372e2bc93c..0eba947c2ee9 100644 --- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c @@ -396,6 +396,7 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev) .stop = hisi_uncore_pmu_stop, .read = hisi_uncore_pmu_read, .attr_groups = hisi_ddrc_pmu_attr_groups, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }; ret = perf_pmu_register(&ddrc_pmu->pmu, name, -1); diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c index 443906e0aff3..2553a844ebf6 100644 --- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c @@ -407,6 +407,7 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev) .stop = hisi_uncore_pmu_stop, .read = hisi_uncore_pmu_read, .attr_groups = hisi_hha_pmu_attr_groups, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }; ret = perf_pmu_register(&hha_pmu->pmu, name, -1); diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c index 0bde5d919b2e..cf1cc34f402a 100644 --- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c @@ -397,6 +397,7 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev) .stop = hisi_uncore_pmu_stop, .read = hisi_uncore_pmu_read, .attr_groups = hisi_l3c_pmu_attr_groups, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }; ret = perf_pmu_register(&l3c_pmu->pmu, name, -1); diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c index 9efd2413240c..f028cbc3443c 100644 --- a/drivers/perf/hisilicon/hisi_uncore_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c @@ -142,15 +142,6 @@ int hisi_uncore_pmu_event_init(struct perf_event *event) if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) return -EOPNOTSUPP; - /* counters do not have these bits */ - if (event->attr.exclude_user || - event->attr.exclude_kernel || - event->attr.exclude_host || - event->attr.exclude_guest || - event->attr.exclude_hv || - event->attr.exclude_idle) - return -EINVAL; - /* * The uncore counters not specific to any CPU, so cannot * support per-task diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c index 842135cf35a3..091b4d7d32c4 100644 --- a/drivers/perf/qcom_l2_pmu.c +++ b/drivers/perf/qcom_l2_pmu.c @@ -509,14 +509,6 @@ static int l2_cache_event_init(struct perf_event *event) return -EOPNOTSUPP; } - /* We cannot filter accurately so we just don't allow it. */ - if (event->attr.exclude_user || event->attr.exclude_kernel || - event->attr.exclude_hv || event->attr.exclude_idle) { - dev_dbg_ratelimited(&l2cache_pmu->pdev->dev, - "Can't exclude execution levels\n"); - return -EOPNOTSUPP; - } - if (((L2_EVT_GROUP(event->attr.config) > L2_EVT_GROUP_MAX) || ((event->attr.config & ~L2_EVT_MASK) != 0)) && (event->attr.config != L2CYCLE_CTR_RAW_CODE)) { @@ -982,6 +974,7 @@ static int l2_cache_pmu_probe(struct platform_device *pdev) .stop = l2_cache_event_stop, .read = l2_cache_event_read, .attr_groups = l2_cache_pmu_attr_grps, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }; l2cache_pmu->num_counters = get_num_counters(); diff --git a/drivers/perf/qcom_l3_pmu.c b/drivers/perf/qcom_l3_pmu.c index 2dc63d61f2ea..5d70646da8c7 100644 --- a/drivers/perf/qcom_l3_pmu.c +++ b/drivers/perf/qcom_l3_pmu.c @@ -494,13 +494,6 @@ static int qcom_l3_cache__event_init(struct perf_event *event) if (event->attr.type != event->pmu->type) return -ENOENT; - /* - * There are no per-counter mode filters in the PMU. - */ - if (event->attr.exclude_user || event->attr.exclude_kernel || - event->attr.exclude_hv || event->attr.exclude_idle) - return -EINVAL; - /* * Sampling not supported since these events are not core-attributable. */ @@ -777,6 +770,7 @@ static int qcom_l3_cache_pmu_probe(struct platform_device *pdev) .read = qcom_l3_cache__event_read, .attr_groups = qcom_l3_cache_pmu_attr_grps, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }; memrc = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c index c9a1701d3e54..43d76c85da56 100644 --- a/drivers/perf/thunderx2_pmu.c +++ b/drivers/perf/thunderx2_pmu.c @@ -424,15 +424,6 @@ static int tx2_uncore_event_init(struct perf_event *event) if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) return -EINVAL; - /* We have no filtering of any kind */ - if (event->attr.exclude_user || - event->attr.exclude_kernel || - event->attr.exclude_hv || - event->attr.exclude_idle || - event->attr.exclude_host || - event->attr.exclude_guest) - return -EINVAL; - if (event->cpu < 0) return -EINVAL; @@ -572,6 +563,7 @@ static int tx2_uncore_pmu_register( .start = tx2_uncore_event_start, .stop = tx2_uncore_event_stop, .read = tx2_uncore_event_read, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }; tx2_pmu->pmu.name = devm_kasprintf(dev, GFP_KERNEL, diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c index 0dc9ff0f8894..27574e85f351 100644 --- a/drivers/perf/xgene_pmu.c +++ b/drivers/perf/xgene_pmu.c @@ -917,11 +917,6 @@ static int xgene_perf_event_init(struct perf_event *event) if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) return -EINVAL; - /* SOC counters do not have usr/os/guest/host bits */ - if (event->attr.exclude_user || event->attr.exclude_kernel || - event->attr.exclude_host || event->attr.exclude_guest) - return -EINVAL; - if (event->cpu < 0) return -EINVAL; /* @@ -1057,7 +1052,6 @@ static void xgene_perf_start(struct perf_event *event, int flags) static void xgene_perf_stop(struct perf_event *event, int flags) { struct hw_perf_event *hw = &event->hw; - u64 config; if (hw->state & PERF_HES_UPTODATE) return; @@ -1069,7 +1063,6 @@ static void xgene_perf_stop(struct perf_event *event, int flags) if (hw->state & PERF_HES_UPTODATE) return; - config = hw->config; xgene_perf_read(event); hw->state |= PERF_HES_UPTODATE; } @@ -1136,6 +1129,7 @@ static int xgene_init_perf(struct xgene_pmu_dev *pmu_dev, char *name) .start = xgene_perf_start, .stop = xgene_perf_stop, .read = xgene_perf_read, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, }; /* Hardware counter init */ diff --git a/drivers/phy/allwinner/Kconfig b/drivers/phy/allwinner/Kconfig index cdc1e745ba47..fb1204bcc454 100644 --- a/drivers/phy/allwinner/Kconfig +++ b/drivers/phy/allwinner/Kconfig @@ -17,6 +17,18 @@ config PHY_SUN4I_USB This driver controls the entire USB PHY block, both the USB OTG parts, as well as the 2 regular USB 2 host PHYs. +config PHY_SUN6I_MIPI_DPHY + tristate "Allwinner A31 MIPI D-PHY Support" + depends on ARCH_SUNXI && HAS_IOMEM && OF + depends on RESET_CONTROLLER + select GENERIC_PHY + select GENERIC_PHY_MIPI_DPHY + select REGMAP_MMIO + help + Choose this option if you have an Allwinner SoC with + MIPI-DSI support. If M is selected, the module will be + called sun6i_mipi_dphy. + config PHY_SUN9I_USB tristate "Allwinner sun9i SoC USB PHY driver" depends on ARCH_SUNXI && HAS_IOMEM && OF diff --git a/drivers/phy/allwinner/Makefile b/drivers/phy/allwinner/Makefile index 8605529c01a1..7d0053efbfaa 100644 --- a/drivers/phy/allwinner/Makefile +++ b/drivers/phy/allwinner/Makefile @@ -1,2 +1,3 @@ obj-$(CONFIG_PHY_SUN4I_USB) += phy-sun4i-usb.o +obj-$(CONFIG_PHY_SUN6I_MIPI_DPHY) += phy-sun6i-mipi-dphy.o obj-$(CONFIG_PHY_SUN9I_USB) += phy-sun9i-usb.o diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dphy.c b/drivers/phy/allwinner/phy-sun6i-mipi-dphy.c similarity index 70% rename from drivers/gpu/drm/sun4i/sun6i_mipi_dphy.c rename to drivers/phy/allwinner/phy-sun6i-mipi-dphy.c index e4d19431fa0e..79c8af5c7c1d 100644 --- a/drivers/gpu/drm/sun4i/sun6i_mipi_dphy.c +++ b/drivers/phy/allwinner/phy-sun6i-mipi-dphy.c @@ -8,11 +8,14 @@ #include #include +#include #include +#include #include #include -#include "sun6i_mipi_dsi.h" +#include +#include #define SUN6I_DPHY_GCTL_REG 0x00 #define SUN6I_DPHY_GCTL_LANE_NUM(n) ((((n) - 1) & 3) << 4) @@ -81,12 +84,46 @@ #define SUN6I_DPHY_DBG5_REG 0xf4 -int sun6i_dphy_init(struct sun6i_dphy *dphy, unsigned int lanes) +struct sun6i_dphy { + struct clk *bus_clk; + struct clk *mod_clk; + struct regmap *regs; + struct reset_control *reset; + + struct phy *phy; + struct phy_configure_opts_mipi_dphy config; +}; + +static int sun6i_dphy_init(struct phy *phy) { + struct sun6i_dphy *dphy = phy_get_drvdata(phy); + reset_control_deassert(dphy->reset); clk_prepare_enable(dphy->mod_clk); clk_set_rate_exclusive(dphy->mod_clk, 150000000); + return 0; +} + +static int sun6i_dphy_configure(struct phy *phy, union phy_configure_opts *opts) +{ + struct sun6i_dphy *dphy = phy_get_drvdata(phy); + int ret; + + ret = phy_mipi_dphy_config_validate(&opts->mipi_dphy); + if (ret) + return ret; + + memcpy(&dphy->config, opts, sizeof(dphy->config)); + + return 0; +} + +static int sun6i_dphy_power_on(struct phy *phy) +{ + struct sun6i_dphy *dphy = phy_get_drvdata(phy); + u8 lanes_mask = GENMASK(dphy->config.lanes - 1, 0); + regmap_write(dphy->regs, SUN6I_DPHY_TX_CTL_REG, SUN6I_DPHY_TX_CTL_HS_TX_CLK_CONT); @@ -111,16 +148,9 @@ int sun6i_dphy_init(struct sun6i_dphy *dphy, unsigned int lanes) SUN6I_DPHY_TX_TIME4_HS_TX_ANA1(3)); regmap_write(dphy->regs, SUN6I_DPHY_GCTL_REG, - SUN6I_DPHY_GCTL_LANE_NUM(lanes) | + SUN6I_DPHY_GCTL_LANE_NUM(dphy->config.lanes) | SUN6I_DPHY_GCTL_EN); - return 0; -} - -int sun6i_dphy_power_on(struct sun6i_dphy *dphy, unsigned int lanes) -{ - u8 lanes_mask = GENMASK(lanes - 1, 0); - regmap_write(dphy->regs, SUN6I_DPHY_ANA0_REG, SUN6I_DPHY_ANA0_REG_PWS | SUN6I_DPHY_ANA0_REG_DMPC | @@ -181,16 +211,20 @@ int sun6i_dphy_power_on(struct sun6i_dphy *dphy, unsigned int lanes) return 0; } -int sun6i_dphy_power_off(struct sun6i_dphy *dphy) +static int sun6i_dphy_power_off(struct phy *phy) { + struct sun6i_dphy *dphy = phy_get_drvdata(phy); + regmap_update_bits(dphy->regs, SUN6I_DPHY_ANA1_REG, SUN6I_DPHY_ANA1_REG_VTTMODE, 0); return 0; } -int sun6i_dphy_exit(struct sun6i_dphy *dphy) +static int sun6i_dphy_exit(struct phy *phy) { + struct sun6i_dphy *dphy = phy_get_drvdata(phy); + clk_rate_exclusive_put(dphy->mod_clk); clk_disable_unprepare(dphy->mod_clk); reset_control_assert(dphy->reset); @@ -198,6 +232,15 @@ int sun6i_dphy_exit(struct sun6i_dphy *dphy) return 0; } + +static struct phy_ops sun6i_dphy_ops = { + .configure = sun6i_dphy_configure, + .power_on = sun6i_dphy_power_on, + .power_off = sun6i_dphy_power_off, + .init = sun6i_dphy_init, + .exit = sun6i_dphy_exit, +}; + static struct regmap_config sun6i_dphy_regmap_config = { .reg_bits = 32, .val_bits = 32, @@ -206,87 +249,70 @@ static struct regmap_config sun6i_dphy_regmap_config = { .name = "mipi-dphy", }; -static const struct of_device_id sun6i_dphy_of_table[] = { - { .compatible = "allwinner,sun6i-a31-mipi-dphy" }, - { } -}; - -int sun6i_dphy_probe(struct sun6i_dsi *dsi, struct device_node *node) +static int sun6i_dphy_probe(struct platform_device *pdev) { + struct phy_provider *phy_provider; struct sun6i_dphy *dphy; - struct resource res; + struct resource *res; void __iomem *regs; - int ret; - - if (!of_match_node(sun6i_dphy_of_table, node)) { - dev_err(dsi->dev, "Incompatible D-PHY\n"); - return -EINVAL; - } - dphy = devm_kzalloc(dsi->dev, sizeof(*dphy), GFP_KERNEL); + dphy = devm_kzalloc(&pdev->dev, sizeof(*dphy), GFP_KERNEL); if (!dphy) return -ENOMEM; - ret = of_address_to_resource(node, 0, &res); - if (ret) { - dev_err(dsi->dev, "phy: Couldn't get our resources\n"); - return ret; - } - - regs = devm_ioremap_resource(dsi->dev, &res); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(regs)) { - dev_err(dsi->dev, "Couldn't map the DPHY encoder registers\n"); + dev_err(&pdev->dev, "Couldn't map the DPHY encoder registers\n"); return PTR_ERR(regs); } - dphy->regs = devm_regmap_init_mmio(dsi->dev, regs, - &sun6i_dphy_regmap_config); + dphy->regs = devm_regmap_init_mmio_clk(&pdev->dev, "bus", + regs, &sun6i_dphy_regmap_config); if (IS_ERR(dphy->regs)) { - dev_err(dsi->dev, "Couldn't create the DPHY encoder regmap\n"); + dev_err(&pdev->dev, "Couldn't create the DPHY encoder regmap\n"); return PTR_ERR(dphy->regs); } - dphy->reset = of_reset_control_get_shared(node, NULL); + dphy->reset = devm_reset_control_get_shared(&pdev->dev, NULL); if (IS_ERR(dphy->reset)) { - dev_err(dsi->dev, "Couldn't get our reset line\n"); + dev_err(&pdev->dev, "Couldn't get our reset line\n"); return PTR_ERR(dphy->reset); } - dphy->bus_clk = of_clk_get_by_name(node, "bus"); - if (IS_ERR(dphy->bus_clk)) { - dev_err(dsi->dev, "Couldn't get the DPHY bus clock\n"); - ret = PTR_ERR(dphy->bus_clk); - goto err_free_reset; - } - regmap_mmio_attach_clk(dphy->regs, dphy->bus_clk); - - dphy->mod_clk = of_clk_get_by_name(node, "mod"); + dphy->mod_clk = devm_clk_get(&pdev->dev, "mod"); if (IS_ERR(dphy->mod_clk)) { - dev_err(dsi->dev, "Couldn't get the DPHY mod clock\n"); - ret = PTR_ERR(dphy->mod_clk); - goto err_free_bus; + dev_err(&pdev->dev, "Couldn't get the DPHY mod clock\n"); + return PTR_ERR(dphy->mod_clk); } - dsi->dphy = dphy; + dphy->phy = devm_phy_create(&pdev->dev, NULL, &sun6i_dphy_ops); + if (IS_ERR(dphy->phy)) { + dev_err(&pdev->dev, "failed to create PHY\n"); + return PTR_ERR(dphy->phy); + } - return 0; + phy_set_drvdata(dphy->phy, dphy); + phy_provider = devm_of_phy_provider_register(&pdev->dev, of_phy_simple_xlate); -err_free_bus: - regmap_mmio_detach_clk(dphy->regs); - clk_put(dphy->bus_clk); -err_free_reset: - reset_control_put(dphy->reset); - return ret; + return PTR_ERR_OR_ZERO(phy_provider); } -int sun6i_dphy_remove(struct sun6i_dsi *dsi) -{ - struct sun6i_dphy *dphy = dsi->dphy; - - regmap_mmio_detach_clk(dphy->regs); - clk_put(dphy->mod_clk); - clk_put(dphy->bus_clk); - reset_control_put(dphy->reset); +static const struct of_device_id sun6i_dphy_of_table[] = { + { .compatible = "allwinner,sun6i-a31-mipi-dphy" }, + { } +}; +MODULE_DEVICE_TABLE(of, sun6i_dphy_of_table); + +static struct platform_driver sun6i_dphy_platform_driver = { + .probe = sun6i_dphy_probe, + .driver = { + .name = "sun6i-mipi-dphy", + .of_match_table = sun6i_dphy_of_table, + }, +}; +module_platform_driver(sun6i_dphy_platform_driver); - return 0; -} +MODULE_AUTHOR("Maxime Ripard "); +MODULE_DESCRIPTION("Allwinner A31 MIPI D-PHY Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/phy/broadcom/phy-bcm-sr-pcie.c b/drivers/phy/broadcom/phy-bcm-sr-pcie.c index c10e95f86de5..96a3af126a78 100644 --- a/drivers/phy/broadcom/phy-bcm-sr-pcie.c +++ b/drivers/phy/broadcom/phy-bcm-sr-pcie.c @@ -78,8 +78,8 @@ struct sr_pcie_phy_core { static const u8 pipemux_table[] = { /* PIPEMUX = 0, EP 1x16 */ 0x00, - /* PIPEMUX = 1, EP 2x8 */ - 0x00, + /* PIPEMUX = 1, EP 1x8 + RC 1x8, core 7 */ + 0x80, /* PIPEMUX = 2, EP 4x4 */ 0x00, /* PIPEMUX = 3, RC 2x8, cores 0, 7 */ diff --git a/drivers/phy/cadence/Kconfig b/drivers/phy/cadence/Kconfig index 2b8c0851ff33..31f18b67dd7c 100644 --- a/drivers/phy/cadence/Kconfig +++ b/drivers/phy/cadence/Kconfig @@ -1,6 +1,7 @@ # # Phy drivers for Cadence PHYs # + config PHY_CADENCE_DP tristate "Cadence MHDP DisplayPort PHY driver" depends on OF @@ -9,9 +10,19 @@ config PHY_CADENCE_DP help Support for Cadence MHDP DisplayPort PHY. +config PHY_CADENCE_DPHY + tristate "Cadence D-PHY Support" + depends on HAS_IOMEM && OF + select GENERIC_PHY + select GENERIC_PHY_MIPI_DPHY + help + Choose this option if you have a Cadence D-PHY in your + system. If M is selected, the module will be called + cdns-dphy. + config PHY_CADENCE_SIERRA tristate "Cadence Sierra PHY Driver" depends on OF && HAS_IOMEM && RESET_CONTROLLER select GENERIC_PHY help - Enable this to support the Cadence Sierra PHY driver \ No newline at end of file + Enable this to support the Cadence Sierra PHY driver diff --git a/drivers/phy/cadence/Makefile b/drivers/phy/cadence/Makefile index 412349af0492..2f9e3457b954 100644 --- a/drivers/phy/cadence/Makefile +++ b/drivers/phy/cadence/Makefile @@ -1,2 +1,3 @@ obj-$(CONFIG_PHY_CADENCE_DP) += phy-cadence-dp.o +obj-$(CONFIG_PHY_CADENCE_DPHY) += cdns-dphy.o obj-$(CONFIG_PHY_CADENCE_SIERRA) += phy-cadence-sierra.o diff --git a/drivers/phy/cadence/cdns-dphy.c b/drivers/phy/cadence/cdns-dphy.c new file mode 100644 index 000000000000..90c4e9b5aac8 --- /dev/null +++ b/drivers/phy/cadence/cdns-dphy.c @@ -0,0 +1,391 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright: 2017-2018 Cadence Design Systems, Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define REG_WAKEUP_TIME_NS 800 +#define DPHY_PLL_RATE_HZ 108000000 + +/* DPHY registers */ +#define DPHY_PMA_CMN(reg) (reg) +#define DPHY_PMA_LCLK(reg) (0x100 + (reg)) +#define DPHY_PMA_LDATA(lane, reg) (0x200 + ((lane) * 0x100) + (reg)) +#define DPHY_PMA_RCLK(reg) (0x600 + (reg)) +#define DPHY_PMA_RDATA(lane, reg) (0x700 + ((lane) * 0x100) + (reg)) +#define DPHY_PCS(reg) (0xb00 + (reg)) + +#define DPHY_CMN_SSM DPHY_PMA_CMN(0x20) +#define DPHY_CMN_SSM_EN BIT(0) +#define DPHY_CMN_TX_MODE_EN BIT(9) + +#define DPHY_CMN_PWM DPHY_PMA_CMN(0x40) +#define DPHY_CMN_PWM_DIV(x) ((x) << 20) +#define DPHY_CMN_PWM_LOW(x) ((x) << 10) +#define DPHY_CMN_PWM_HIGH(x) (x) + +#define DPHY_CMN_FBDIV DPHY_PMA_CMN(0x4c) +#define DPHY_CMN_FBDIV_VAL(low, high) (((high) << 11) | ((low) << 22)) +#define DPHY_CMN_FBDIV_FROM_REG (BIT(10) | BIT(21)) + +#define DPHY_CMN_OPIPDIV DPHY_PMA_CMN(0x50) +#define DPHY_CMN_IPDIV_FROM_REG BIT(0) +#define DPHY_CMN_IPDIV(x) ((x) << 1) +#define DPHY_CMN_OPDIV_FROM_REG BIT(6) +#define DPHY_CMN_OPDIV(x) ((x) << 7) + +#define DPHY_PSM_CFG DPHY_PCS(0x4) +#define DPHY_PSM_CFG_FROM_REG BIT(0) +#define DPHY_PSM_CLK_DIV(x) ((x) << 1) + +#define DSI_HBP_FRAME_OVERHEAD 12 +#define DSI_HSA_FRAME_OVERHEAD 14 +#define DSI_HFP_FRAME_OVERHEAD 6 +#define DSI_HSS_VSS_VSE_FRAME_OVERHEAD 4 +#define DSI_BLANKING_FRAME_OVERHEAD 6 +#define DSI_NULL_FRAME_OVERHEAD 6 +#define DSI_EOT_PKT_SIZE 4 + +struct cdns_dphy_cfg { + u8 pll_ipdiv; + u8 pll_opdiv; + u16 pll_fbdiv; + unsigned int nlanes; +}; + +enum cdns_dphy_clk_lane_cfg { + DPHY_CLK_CFG_LEFT_DRIVES_ALL = 0, + DPHY_CLK_CFG_LEFT_DRIVES_RIGHT = 1, + DPHY_CLK_CFG_LEFT_DRIVES_LEFT = 2, + DPHY_CLK_CFG_RIGHT_DRIVES_ALL = 3, +}; + +struct cdns_dphy; +struct cdns_dphy_ops { + int (*probe)(struct cdns_dphy *dphy); + void (*remove)(struct cdns_dphy *dphy); + void (*set_psm_div)(struct cdns_dphy *dphy, u8 div); + void (*set_clk_lane_cfg)(struct cdns_dphy *dphy, + enum cdns_dphy_clk_lane_cfg cfg); + void (*set_pll_cfg)(struct cdns_dphy *dphy, + const struct cdns_dphy_cfg *cfg); + unsigned long (*get_wakeup_time_ns)(struct cdns_dphy *dphy); +}; + +struct cdns_dphy { + struct cdns_dphy_cfg cfg; + void __iomem *regs; + struct clk *psm_clk; + struct clk *pll_ref_clk; + const struct cdns_dphy_ops *ops; + struct phy *phy; +}; + +static int cdns_dsi_get_dphy_pll_cfg(struct cdns_dphy *dphy, + struct cdns_dphy_cfg *cfg, + struct phy_configure_opts_mipi_dphy *opts, + unsigned int *dsi_hfp_ext) +{ + unsigned long pll_ref_hz = clk_get_rate(dphy->pll_ref_clk); + u64 dlane_bps; + + memset(cfg, 0, sizeof(*cfg)); + + if (pll_ref_hz < 9600000 || pll_ref_hz >= 150000000) + return -EINVAL; + else if (pll_ref_hz < 19200000) + cfg->pll_ipdiv = 1; + else if (pll_ref_hz < 38400000) + cfg->pll_ipdiv = 2; + else if (pll_ref_hz < 76800000) + cfg->pll_ipdiv = 4; + else + cfg->pll_ipdiv = 8; + + dlane_bps = opts->hs_clk_rate; + + if (dlane_bps > 2500000000UL || dlane_bps < 160000000UL) + return -EINVAL; + else if (dlane_bps >= 1250000000) + cfg->pll_opdiv = 1; + else if (dlane_bps >= 630000000) + cfg->pll_opdiv = 2; + else if (dlane_bps >= 320000000) + cfg->pll_opdiv = 4; + else if (dlane_bps >= 160000000) + cfg->pll_opdiv = 8; + + cfg->pll_fbdiv = DIV_ROUND_UP_ULL(dlane_bps * 2 * cfg->pll_opdiv * + cfg->pll_ipdiv, + pll_ref_hz); + + return 0; +} + +static int cdns_dphy_setup_psm(struct cdns_dphy *dphy) +{ + unsigned long psm_clk_hz = clk_get_rate(dphy->psm_clk); + unsigned long psm_div; + + if (!psm_clk_hz || psm_clk_hz > 100000000) + return -EINVAL; + + psm_div = DIV_ROUND_CLOSEST(psm_clk_hz, 1000000); + if (dphy->ops->set_psm_div) + dphy->ops->set_psm_div(dphy, psm_div); + + return 0; +} + +static void cdns_dphy_set_clk_lane_cfg(struct cdns_dphy *dphy, + enum cdns_dphy_clk_lane_cfg cfg) +{ + if (dphy->ops->set_clk_lane_cfg) + dphy->ops->set_clk_lane_cfg(dphy, cfg); +} + +static void cdns_dphy_set_pll_cfg(struct cdns_dphy *dphy, + const struct cdns_dphy_cfg *cfg) +{ + if (dphy->ops->set_pll_cfg) + dphy->ops->set_pll_cfg(dphy, cfg); +} + +static unsigned long cdns_dphy_get_wakeup_time_ns(struct cdns_dphy *dphy) +{ + return dphy->ops->get_wakeup_time_ns(dphy); +} + +static unsigned long cdns_dphy_ref_get_wakeup_time_ns(struct cdns_dphy *dphy) +{ + /* Default wakeup time is 800 ns (in a simulated environment). */ + return 800; +} + +static void cdns_dphy_ref_set_pll_cfg(struct cdns_dphy *dphy, + const struct cdns_dphy_cfg *cfg) +{ + u32 fbdiv_low, fbdiv_high; + + fbdiv_low = (cfg->pll_fbdiv / 4) - 2; + fbdiv_high = cfg->pll_fbdiv - fbdiv_low - 2; + + writel(DPHY_CMN_IPDIV_FROM_REG | DPHY_CMN_OPDIV_FROM_REG | + DPHY_CMN_IPDIV(cfg->pll_ipdiv) | + DPHY_CMN_OPDIV(cfg->pll_opdiv), + dphy->regs + DPHY_CMN_OPIPDIV); + writel(DPHY_CMN_FBDIV_FROM_REG | + DPHY_CMN_FBDIV_VAL(fbdiv_low, fbdiv_high), + dphy->regs + DPHY_CMN_FBDIV); + writel(DPHY_CMN_PWM_HIGH(6) | DPHY_CMN_PWM_LOW(0x101) | + DPHY_CMN_PWM_DIV(0x8), + dphy->regs + DPHY_CMN_PWM); +} + +static void cdns_dphy_ref_set_psm_div(struct cdns_dphy *dphy, u8 div) +{ + writel(DPHY_PSM_CFG_FROM_REG | DPHY_PSM_CLK_DIV(div), + dphy->regs + DPHY_PSM_CFG); +} + +/* + * This is the reference implementation of DPHY hooks. Specific integration of + * this IP may have to re-implement some of them depending on how they decided + * to wire things in the SoC. + */ +static const struct cdns_dphy_ops ref_dphy_ops = { + .get_wakeup_time_ns = cdns_dphy_ref_get_wakeup_time_ns, + .set_pll_cfg = cdns_dphy_ref_set_pll_cfg, + .set_psm_div = cdns_dphy_ref_set_psm_div, +}; + +static int cdns_dphy_config_from_opts(struct phy *phy, + struct phy_configure_opts_mipi_dphy *opts, + struct cdns_dphy_cfg *cfg) +{ + struct cdns_dphy *dphy = phy_get_drvdata(phy); + unsigned int dsi_hfp_ext = 0; + int ret; + + ret = phy_mipi_dphy_config_validate(opts); + if (ret) + return ret; + + ret = cdns_dsi_get_dphy_pll_cfg(dphy, cfg, + opts, &dsi_hfp_ext); + if (ret) + return ret; + + opts->wakeup = cdns_dphy_get_wakeup_time_ns(dphy) / 1000; + + return 0; +} + +static int cdns_dphy_validate(struct phy *phy, enum phy_mode mode, int submode, + union phy_configure_opts *opts) +{ + struct cdns_dphy_cfg cfg = { 0 }; + + if (mode != PHY_MODE_MIPI_DPHY) + return -EINVAL; + + return cdns_dphy_config_from_opts(phy, &opts->mipi_dphy, &cfg); +} + +static int cdns_dphy_configure(struct phy *phy, union phy_configure_opts *opts) +{ + struct cdns_dphy *dphy = phy_get_drvdata(phy); + struct cdns_dphy_cfg cfg = { 0 }; + int ret; + + ret = cdns_dphy_config_from_opts(phy, &opts->mipi_dphy, &cfg); + if (ret) + return ret; + + /* + * Configure the internal PSM clk divider so that the DPHY has a + * 1MHz clk (or something close). + */ + ret = cdns_dphy_setup_psm(dphy); + if (ret) + return ret; + + /* + * Configure attach clk lanes to data lanes: the DPHY has 2 clk lanes + * and 8 data lanes, each clk lane can be attache different set of + * data lanes. The 2 groups are named 'left' and 'right', so here we + * just say that we want the 'left' clk lane to drive the 'left' data + * lanes. + */ + cdns_dphy_set_clk_lane_cfg(dphy, DPHY_CLK_CFG_LEFT_DRIVES_LEFT); + + /* + * Configure the DPHY PLL that will be used to generate the TX byte + * clk. + */ + cdns_dphy_set_pll_cfg(dphy, &cfg); + + return 0; +} + +static int cdns_dphy_power_on(struct phy *phy) +{ + struct cdns_dphy *dphy = phy_get_drvdata(phy); + + clk_prepare_enable(dphy->psm_clk); + clk_prepare_enable(dphy->pll_ref_clk); + + /* Start TX state machine. */ + writel(DPHY_CMN_SSM_EN | DPHY_CMN_TX_MODE_EN, + dphy->regs + DPHY_CMN_SSM); + + return 0; +} + +static int cdns_dphy_power_off(struct phy *phy) +{ + struct cdns_dphy *dphy = phy_get_drvdata(phy); + + clk_disable_unprepare(dphy->pll_ref_clk); + clk_disable_unprepare(dphy->psm_clk); + + return 0; +} + +static const struct phy_ops cdns_dphy_ops = { + .configure = cdns_dphy_configure, + .validate = cdns_dphy_validate, + .power_on = cdns_dphy_power_on, + .power_off = cdns_dphy_power_off, +}; + +static int cdns_dphy_probe(struct platform_device *pdev) +{ + struct phy_provider *phy_provider; + struct cdns_dphy *dphy; + struct resource *res; + int ret; + + dphy = devm_kzalloc(&pdev->dev, sizeof(*dphy), GFP_KERNEL); + if (!dphy) + return -ENOMEM; + dev_set_drvdata(&pdev->dev, dphy); + + dphy->ops = of_device_get_match_data(&pdev->dev); + if (!dphy->ops) + return -EINVAL; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + dphy->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(dphy->regs)) + return PTR_ERR(dphy->regs); + + dphy->psm_clk = devm_clk_get(&pdev->dev, "psm"); + if (IS_ERR(dphy->psm_clk)) + return PTR_ERR(dphy->psm_clk); + + dphy->pll_ref_clk = devm_clk_get(&pdev->dev, "pll_ref"); + if (IS_ERR(dphy->pll_ref_clk)) + return PTR_ERR(dphy->pll_ref_clk); + + if (dphy->ops->probe) { + ret = dphy->ops->probe(dphy); + if (ret) + return ret; + } + + dphy->phy = devm_phy_create(&pdev->dev, NULL, &cdns_dphy_ops); + if (IS_ERR(dphy->phy)) { + dev_err(&pdev->dev, "failed to create PHY\n"); + if (dphy->ops->remove) + dphy->ops->remove(dphy); + return PTR_ERR(dphy->phy); + } + + phy_set_drvdata(dphy->phy, dphy); + phy_provider = devm_of_phy_provider_register(&pdev->dev, + of_phy_simple_xlate); + + return PTR_ERR_OR_ZERO(phy_provider); +} + +static int cdns_dphy_remove(struct platform_device *pdev) +{ + struct cdns_dphy *dphy = dev_get_drvdata(&pdev->dev); + + if (dphy->ops->remove) + dphy->ops->remove(dphy); + + return 0; +} + +static const struct of_device_id cdns_dphy_of_match[] = { + { .compatible = "cdns,dphy", .data = &ref_dphy_ops }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, cdns_dphy_of_match); + +static struct platform_driver cdns_dphy_platform_driver = { + .probe = cdns_dphy_probe, + .remove = cdns_dphy_remove, + .driver = { + .name = "cdns-mipi-dphy", + .of_match_table = cdns_dphy_of_match, + }, +}; +module_platform_driver(cdns_dphy_platform_driver); + +MODULE_AUTHOR("Maxime Ripard "); +MODULE_DESCRIPTION("Cadence MIPI D-PHY Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/phy/freescale/Kconfig b/drivers/phy/freescale/Kconfig index f050bd4e97e0..832670b4952b 100644 --- a/drivers/phy/freescale/Kconfig +++ b/drivers/phy/freescale/Kconfig @@ -2,4 +2,4 @@ config PHY_FSL_IMX8MQ_USB tristate "Freescale i.MX8M USB3 PHY" depends on OF && HAS_IOMEM select GENERIC_PHY - default SOC_IMX8MQ + default ARCH_MXC && ARM64 diff --git a/drivers/phy/marvell/Kconfig b/drivers/phy/marvell/Kconfig index 6fb4b56e4c14..9ba872325dad 100644 --- a/drivers/phy/marvell/Kconfig +++ b/drivers/phy/marvell/Kconfig @@ -21,6 +21,37 @@ config PHY_BERLIN_USB help Enable this to support the USB PHY on Marvell Berlin SoCs. +config PHY_MVEBU_A3700_COMPHY + tristate "Marvell A3700 comphy driver" + depends on ARCH_MVEBU || COMPILE_TEST + depends on OF + depends on HAVE_ARM_SMCCC + default y + select GENERIC_PHY + help + This driver allows to control the comphy, a hardware block providing + shared serdes PHYs on Marvell Armada 3700. Its serdes lanes can be + used by various controllers: Ethernet, SATA, USB3, PCIe. + +config PHY_MVEBU_A3700_UTMI + tristate "Marvell A3700 UTMI driver" + depends on ARCH_MVEBU || COMPILE_TEST + depends on OF + default y + select GENERIC_PHY + help + Enable this to support Marvell A3700 UTMI PHY driver. + +config PHY_MVEBU_A38X_COMPHY + tristate "Marvell Armada 38x comphy driver" + depends on ARCH_MVEBU || COMPILE_TEST + depends on OF + select GENERIC_PHY + help + This driver allows to control the comphy, an hardware block providing + shared serdes PHYs on Marvell Armada 38x. Its serdes lanes can be + used by various controllers (Ethernet, sata, usb, PCIe...). + config PHY_MVEBU_CP110_COMPHY tristate "Marvell CP110 comphy driver" depends on ARCH_MVEBU || COMPILE_TEST diff --git a/drivers/phy/marvell/Makefile b/drivers/phy/marvell/Makefile index 3975b144f8ec..434eb9ca6cc3 100644 --- a/drivers/phy/marvell/Makefile +++ b/drivers/phy/marvell/Makefile @@ -2,6 +2,9 @@ obj-$(CONFIG_ARMADA375_USBCLUSTER_PHY) += phy-armada375-usb2.o obj-$(CONFIG_PHY_BERLIN_SATA) += phy-berlin-sata.o obj-$(CONFIG_PHY_BERLIN_USB) += phy-berlin-usb.o +obj-$(CONFIG_PHY_MVEBU_A3700_COMPHY) += phy-mvebu-a3700-comphy.o +obj-$(CONFIG_PHY_MVEBU_A3700_UTMI) += phy-mvebu-a3700-utmi.o +obj-$(CONFIG_PHY_MVEBU_A38X_COMPHY) += phy-armada38x-comphy.o obj-$(CONFIG_PHY_MVEBU_CP110_COMPHY) += phy-mvebu-cp110-comphy.o obj-$(CONFIG_PHY_MVEBU_SATA) += phy-mvebu-sata.o obj-$(CONFIG_PHY_PXA_28NM_HSIC) += phy-pxa-28nm-hsic.o diff --git a/drivers/phy/marvell/phy-armada375-usb2.c b/drivers/phy/marvell/phy-armada375-usb2.c index 1a3db288c0a9..fa5dc9462d09 100644 --- a/drivers/phy/marvell/phy-armada375-usb2.c +++ b/drivers/phy/marvell/phy-armada375-usb2.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * USB cluster support for Armada 375 platform. * @@ -5,10 +6,6 @@ * * Gregory CLEMENT * - * This file is licensed under the terms of the GNU General Public - * License version 2 or later. This program is licensed "as is" - * without any warranty of any kind, whether express or implied. - * * Armada 375 comes with an USB2 host and device controller and an * USB3 controller. The USB cluster control register allows to manage * common features of both USB controllers. @@ -18,7 +15,6 @@ #include #include #include -#include #include #include #include @@ -142,7 +138,6 @@ static const struct of_device_id of_usb_cluster_table[] = { { .compatible = "marvell,armada-375-usb-cluster", }, { /* end of list */ }, }; -MODULE_DEVICE_TABLE(of, of_usb_cluster_table); static struct platform_driver armada375_usb_phy_driver = { .probe = armada375_usb_phy_probe, @@ -151,8 +146,4 @@ static struct platform_driver armada375_usb_phy_driver = { .name = "armada-375-usb-cluster", } }; -module_platform_driver(armada375_usb_phy_driver); - -MODULE_DESCRIPTION("Armada 375 USB cluster driver"); -MODULE_AUTHOR("Gregory CLEMENT "); -MODULE_LICENSE("GPL"); +builtin_platform_driver(armada375_usb_phy_driver); diff --git a/drivers/phy/marvell/phy-armada38x-comphy.c b/drivers/phy/marvell/phy-armada38x-comphy.c new file mode 100644 index 000000000000..3e00bc679d4e --- /dev/null +++ b/drivers/phy/marvell/phy-armada38x-comphy.c @@ -0,0 +1,237 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 Russell King, Deep Blue Solutions Ltd. + * + * Partly derived from CP110 comphy driver by Antoine Tenart + * + */ +#include +#include +#include +#include +#include +#include + +#define MAX_A38X_COMPHY 6 +#define MAX_A38X_PORTS 3 + +#define COMPHY_CFG1 0x00 +#define COMPHY_CFG1_GEN_TX(x) ((x) << 26) +#define COMPHY_CFG1_GEN_TX_MSK COMPHY_CFG1_GEN_TX(15) +#define COMPHY_CFG1_GEN_RX(x) ((x) << 22) +#define COMPHY_CFG1_GEN_RX_MSK COMPHY_CFG1_GEN_RX(15) +#define GEN_SGMII_1_25GBPS 6 +#define GEN_SGMII_3_125GBPS 8 + +#define COMPHY_STAT1 0x18 +#define COMPHY_STAT1_PLL_RDY_TX BIT(3) +#define COMPHY_STAT1_PLL_RDY_RX BIT(2) + +#define COMPHY_SELECTOR 0xfc + +struct a38x_comphy; + +struct a38x_comphy_lane { + void __iomem *base; + struct a38x_comphy *priv; + unsigned int n; + + int port; +}; + +struct a38x_comphy { + void __iomem *base; + struct device *dev; + struct a38x_comphy_lane lane[MAX_A38X_COMPHY]; +}; + +static const u8 gbe_mux[MAX_A38X_COMPHY][MAX_A38X_PORTS] = { + { 0, 0, 0 }, + { 4, 5, 0 }, + { 0, 4, 0 }, + { 0, 0, 4 }, + { 0, 3, 0 }, + { 0, 0, 3 }, +}; + +static void a38x_comphy_set_reg(struct a38x_comphy_lane *lane, + unsigned int offset, u32 mask, u32 value) +{ + u32 val; + + val = readl_relaxed(lane->base + offset) & ~mask; + writel(val | value, lane->base + offset); +} + +static void a38x_comphy_set_speed(struct a38x_comphy_lane *lane, + unsigned int gen_tx, unsigned int gen_rx) +{ + a38x_comphy_set_reg(lane, COMPHY_CFG1, + COMPHY_CFG1_GEN_TX_MSK | COMPHY_CFG1_GEN_RX_MSK, + COMPHY_CFG1_GEN_TX(gen_tx) | + COMPHY_CFG1_GEN_RX(gen_rx)); +} + +static int a38x_comphy_poll(struct a38x_comphy_lane *lane, + unsigned int offset, u32 mask, u32 value) +{ + u32 val; + int ret; + + ret = readl_relaxed_poll_timeout_atomic(lane->base + offset, val, + (val & mask) == value, + 1000, 150000); + + if (ret) + dev_err(lane->priv->dev, + "comphy%u: timed out waiting for status\n", lane->n); + + return ret; +} + +/* + * We only support changing the speed for comphys configured for GBE. + * Since that is all we do, we only poll for PLL ready status. + */ +static int a38x_comphy_set_mode(struct phy *phy, enum phy_mode mode, int sub) +{ + struct a38x_comphy_lane *lane = phy_get_drvdata(phy); + unsigned int gen; + + if (mode != PHY_MODE_ETHERNET) + return -EINVAL; + + switch (sub) { + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: + gen = GEN_SGMII_1_25GBPS; + break; + + case PHY_INTERFACE_MODE_2500BASEX: + gen = GEN_SGMII_3_125GBPS; + break; + + default: + return -EINVAL; + } + + a38x_comphy_set_speed(lane, gen, gen); + + return a38x_comphy_poll(lane, COMPHY_STAT1, + COMPHY_STAT1_PLL_RDY_TX | + COMPHY_STAT1_PLL_RDY_RX, + COMPHY_STAT1_PLL_RDY_TX | + COMPHY_STAT1_PLL_RDY_RX); +} + +static const struct phy_ops a38x_comphy_ops = { + .set_mode = a38x_comphy_set_mode, + .owner = THIS_MODULE, +}; + +static struct phy *a38x_comphy_xlate(struct device *dev, + struct of_phandle_args *args) +{ + struct a38x_comphy_lane *lane; + struct phy *phy; + u32 val; + + if (WARN_ON(args->args[0] >= MAX_A38X_PORTS)) + return ERR_PTR(-EINVAL); + + phy = of_phy_simple_xlate(dev, args); + if (IS_ERR(phy)) + return phy; + + lane = phy_get_drvdata(phy); + if (lane->port >= 0) + return ERR_PTR(-EBUSY); + + lane->port = args->args[0]; + + val = readl_relaxed(lane->priv->base + COMPHY_SELECTOR); + val = (val >> (4 * lane->n)) & 0xf; + + if (!gbe_mux[lane->n][lane->port] || + val != gbe_mux[lane->n][lane->port]) { + dev_warn(lane->priv->dev, + "comphy%u: not configured for GBE\n", lane->n); + phy = ERR_PTR(-EINVAL); + } + + return phy; +} + +static int a38x_comphy_probe(struct platform_device *pdev) +{ + struct phy_provider *provider; + struct device_node *child; + struct a38x_comphy *priv; + struct resource *res; + void __iomem *base; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + priv->dev = &pdev->dev; + priv->base = base; + + for_each_available_child_of_node(pdev->dev.of_node, child) { + struct phy *phy; + int ret; + u32 val; + + ret = of_property_read_u32(child, "reg", &val); + if (ret < 0) { + dev_err(&pdev->dev, "missing 'reg' property (%d)\n", + ret); + continue; + } + + if (val >= MAX_A38X_COMPHY || priv->lane[val].base) { + dev_err(&pdev->dev, "invalid 'reg' property\n"); + continue; + } + + phy = devm_phy_create(&pdev->dev, child, &a38x_comphy_ops); + if (IS_ERR(phy)) + return PTR_ERR(phy); + + priv->lane[val].base = base + 0x28 * val; + priv->lane[val].priv = priv; + priv->lane[val].n = val; + priv->lane[val].port = -1; + phy_set_drvdata(phy, &priv->lane[val]); + } + + dev_set_drvdata(&pdev->dev, priv); + + provider = devm_of_phy_provider_register(&pdev->dev, a38x_comphy_xlate); + + return PTR_ERR_OR_ZERO(provider); +} + +static const struct of_device_id a38x_comphy_of_match_table[] = { + { .compatible = "marvell,armada-380-comphy" }, + { }, +}; +MODULE_DEVICE_TABLE(of, a38x_comphy_of_match_table); + +static struct platform_driver a38x_comphy_driver = { + .probe = a38x_comphy_probe, + .driver = { + .name = "armada-38x-comphy", + .of_match_table = a38x_comphy_of_match_table, + }, +}; +module_platform_driver(a38x_comphy_driver); + +MODULE_AUTHOR("Russell King "); +MODULE_DESCRIPTION("Common PHY driver for Armada 38x SoCs"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c new file mode 100644 index 000000000000..8812a104c233 --- /dev/null +++ b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c @@ -0,0 +1,318 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 Marvell + * + * Authors: + * Evan Wang + * Miquèl Raynal + * + * Structure inspired from phy-mvebu-cp110-comphy.c written by Antoine Tenart. + * SMC call initial support done by Grzegorz Jaszczyk. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define MVEBU_A3700_COMPHY_LANES 3 +#define MVEBU_A3700_COMPHY_PORTS 2 + +/* COMPHY Fast SMC function identifiers */ +#define COMPHY_SIP_POWER_ON 0x82000001 +#define COMPHY_SIP_POWER_OFF 0x82000002 +#define COMPHY_SIP_PLL_LOCK 0x82000003 + +#define COMPHY_FW_MODE_SATA 0x1 +#define COMPHY_FW_MODE_SGMII 0x2 +#define COMPHY_FW_MODE_HS_SGMII 0x3 +#define COMPHY_FW_MODE_USB3H 0x4 +#define COMPHY_FW_MODE_USB3D 0x5 +#define COMPHY_FW_MODE_PCIE 0x6 +#define COMPHY_FW_MODE_RXAUI 0x7 +#define COMPHY_FW_MODE_XFI 0x8 +#define COMPHY_FW_MODE_SFI 0x9 +#define COMPHY_FW_MODE_USB3 0xa + +#define COMPHY_FW_SPEED_1_25G 0 /* SGMII 1G */ +#define COMPHY_FW_SPEED_2_5G 1 +#define COMPHY_FW_SPEED_3_125G 2 /* SGMII 2.5G */ +#define COMPHY_FW_SPEED_5G 3 +#define COMPHY_FW_SPEED_5_15625G 4 /* XFI 5G */ +#define COMPHY_FW_SPEED_6G 5 +#define COMPHY_FW_SPEED_10_3125G 6 /* XFI 10G */ +#define COMPHY_FW_SPEED_MAX 0x3F + +#define COMPHY_FW_MODE(mode) ((mode) << 12) +#define COMPHY_FW_NET(mode, idx, speed) (COMPHY_FW_MODE(mode) | \ + ((idx) << 8) | \ + ((speed) << 2)) +#define COMPHY_FW_PCIE(mode, idx, speed, width) (COMPHY_FW_NET(mode, idx, speed) | \ + ((width) << 18)) + +struct mvebu_a3700_comphy_conf { + unsigned int lane; + enum phy_mode mode; + int submode; + unsigned int port; + u32 fw_mode; +}; + +#define MVEBU_A3700_COMPHY_CONF(_lane, _mode, _smode, _port, _fw) \ + { \ + .lane = _lane, \ + .mode = _mode, \ + .submode = _smode, \ + .port = _port, \ + .fw_mode = _fw, \ + } + +#define MVEBU_A3700_COMPHY_CONF_GEN(_lane, _mode, _port, _fw) \ + MVEBU_A3700_COMPHY_CONF(_lane, _mode, PHY_INTERFACE_MODE_NA, _port, _fw) + +#define MVEBU_A3700_COMPHY_CONF_ETH(_lane, _smode, _port, _fw) \ + MVEBU_A3700_COMPHY_CONF(_lane, PHY_MODE_ETHERNET, _smode, _port, _fw) + +static const struct mvebu_a3700_comphy_conf mvebu_a3700_comphy_modes[] = { + /* lane 0 */ + MVEBU_A3700_COMPHY_CONF_GEN(0, PHY_MODE_USB_HOST_SS, 0, + COMPHY_FW_MODE_USB3H), + MVEBU_A3700_COMPHY_CONF_ETH(0, PHY_INTERFACE_MODE_SGMII, 1, + COMPHY_FW_MODE_SGMII), + MVEBU_A3700_COMPHY_CONF_ETH(0, PHY_INTERFACE_MODE_2500BASEX, 1, + COMPHY_FW_MODE_HS_SGMII), + /* lane 1 */ + MVEBU_A3700_COMPHY_CONF_GEN(1, PHY_MODE_PCIE, 0, + COMPHY_FW_MODE_PCIE), + MVEBU_A3700_COMPHY_CONF_ETH(1, PHY_INTERFACE_MODE_SGMII, 0, + COMPHY_FW_MODE_SGMII), + MVEBU_A3700_COMPHY_CONF_ETH(1, PHY_INTERFACE_MODE_2500BASEX, 0, + COMPHY_FW_MODE_HS_SGMII), + /* lane 2 */ + MVEBU_A3700_COMPHY_CONF_GEN(2, PHY_MODE_SATA, 0, + COMPHY_FW_MODE_SATA), + MVEBU_A3700_COMPHY_CONF_GEN(2, PHY_MODE_USB_HOST_SS, 0, + COMPHY_FW_MODE_USB3H), +}; + +struct mvebu_a3700_comphy_lane { + struct device *dev; + unsigned int id; + enum phy_mode mode; + int submode; + int port; +}; + +static int mvebu_a3700_comphy_smc(unsigned long function, unsigned long lane, + unsigned long mode) +{ + struct arm_smccc_res res; + + arm_smccc_smc(function, lane, mode, 0, 0, 0, 0, 0, &res); + + return res.a0; +} + +static int mvebu_a3700_comphy_get_fw_mode(int lane, int port, + enum phy_mode mode, + int submode) +{ + int i, n = ARRAY_SIZE(mvebu_a3700_comphy_modes); + + /* Unused PHY mux value is 0x0 */ + if (mode == PHY_MODE_INVALID) + return -EINVAL; + + for (i = 0; i < n; i++) { + if (mvebu_a3700_comphy_modes[i].lane == lane && + mvebu_a3700_comphy_modes[i].port == port && + mvebu_a3700_comphy_modes[i].mode == mode && + mvebu_a3700_comphy_modes[i].submode == submode) + break; + } + + if (i == n) + return -EINVAL; + + return mvebu_a3700_comphy_modes[i].fw_mode; +} + +static int mvebu_a3700_comphy_set_mode(struct phy *phy, enum phy_mode mode, + int submode) +{ + struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy); + int fw_mode; + + if (submode == PHY_INTERFACE_MODE_1000BASEX) + submode = PHY_INTERFACE_MODE_SGMII; + + fw_mode = mvebu_a3700_comphy_get_fw_mode(lane->id, lane->port, mode, + submode); + if (fw_mode < 0) { + dev_err(lane->dev, "invalid COMPHY mode\n"); + return fw_mode; + } + + /* Just remember the mode, ->power_on() will do the real setup */ + lane->mode = mode; + lane->submode = submode; + + return 0; +} + +static int mvebu_a3700_comphy_power_on(struct phy *phy) +{ + struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy); + u32 fw_param; + int fw_mode; + + fw_mode = mvebu_a3700_comphy_get_fw_mode(lane->id, lane->port, + lane->mode, lane->submode); + if (fw_mode < 0) { + dev_err(lane->dev, "invalid COMPHY mode\n"); + return fw_mode; + } + + switch (lane->mode) { + case PHY_MODE_USB_HOST_SS: + dev_dbg(lane->dev, "set lane %d to USB3 host mode\n", lane->id); + fw_param = COMPHY_FW_MODE(fw_mode); + break; + case PHY_MODE_SATA: + dev_dbg(lane->dev, "set lane %d to SATA mode\n", lane->id); + fw_param = COMPHY_FW_MODE(fw_mode); + break; + case PHY_MODE_ETHERNET: + switch (lane->submode) { + case PHY_INTERFACE_MODE_SGMII: + dev_dbg(lane->dev, "set lane %d to SGMII mode\n", + lane->id); + fw_param = COMPHY_FW_NET(fw_mode, lane->port, + COMPHY_FW_SPEED_1_25G); + break; + case PHY_INTERFACE_MODE_2500BASEX: + dev_dbg(lane->dev, "set lane %d to HS SGMII mode\n", + lane->id); + fw_param = COMPHY_FW_NET(fw_mode, lane->port, + COMPHY_FW_SPEED_3_125G); + break; + default: + dev_err(lane->dev, "unsupported PHY submode (%d)\n", + lane->submode); + return -ENOTSUPP; + } + break; + case PHY_MODE_PCIE: + dev_dbg(lane->dev, "set lane %d to PCIe mode\n", lane->id); + fw_param = COMPHY_FW_PCIE(fw_mode, lane->port, + COMPHY_FW_SPEED_5G, + phy->attrs.bus_width); + break; + default: + dev_err(lane->dev, "unsupported PHY mode (%d)\n", lane->mode); + return -ENOTSUPP; + } + + return mvebu_a3700_comphy_smc(COMPHY_SIP_POWER_ON, lane->id, fw_param); +} + +static int mvebu_a3700_comphy_power_off(struct phy *phy) +{ + struct mvebu_a3700_comphy_lane *lane = phy_get_drvdata(phy); + + return mvebu_a3700_comphy_smc(COMPHY_SIP_POWER_OFF, lane->id, 0); +} + +static const struct phy_ops mvebu_a3700_comphy_ops = { + .power_on = mvebu_a3700_comphy_power_on, + .power_off = mvebu_a3700_comphy_power_off, + .set_mode = mvebu_a3700_comphy_set_mode, + .owner = THIS_MODULE, +}; + +static struct phy *mvebu_a3700_comphy_xlate(struct device *dev, + struct of_phandle_args *args) +{ + struct mvebu_a3700_comphy_lane *lane; + struct phy *phy; + + if (WARN_ON(args->args[0] >= MVEBU_A3700_COMPHY_PORTS)) + return ERR_PTR(-EINVAL); + + phy = of_phy_simple_xlate(dev, args); + if (IS_ERR(phy)) + return phy; + + lane = phy_get_drvdata(phy); + lane->port = args->args[0]; + + return phy; +} + +static int mvebu_a3700_comphy_probe(struct platform_device *pdev) +{ + struct phy_provider *provider; + struct device_node *child; + + for_each_available_child_of_node(pdev->dev.of_node, child) { + struct mvebu_a3700_comphy_lane *lane; + struct phy *phy; + int ret; + u32 lane_id; + + ret = of_property_read_u32(child, "reg", &lane_id); + if (ret < 0) { + dev_err(&pdev->dev, "missing 'reg' property (%d)\n", + ret); + continue; + } + + if (lane_id >= MVEBU_A3700_COMPHY_LANES) { + dev_err(&pdev->dev, "invalid 'reg' property\n"); + continue; + } + + lane = devm_kzalloc(&pdev->dev, sizeof(*lane), GFP_KERNEL); + if (!lane) + return -ENOMEM; + + phy = devm_phy_create(&pdev->dev, child, + &mvebu_a3700_comphy_ops); + if (IS_ERR(phy)) + return PTR_ERR(phy); + + lane->dev = &pdev->dev; + lane->mode = PHY_MODE_INVALID; + lane->submode = PHY_INTERFACE_MODE_NA; + lane->id = lane_id; + lane->port = -1; + phy_set_drvdata(phy, lane); + } + + provider = devm_of_phy_provider_register(&pdev->dev, + mvebu_a3700_comphy_xlate); + return PTR_ERR_OR_ZERO(provider); +} + +static const struct of_device_id mvebu_a3700_comphy_of_match_table[] = { + { .compatible = "marvell,comphy-a3700" }, + { }, +}; +MODULE_DEVICE_TABLE(of, mvebu_a3700_comphy_of_match_table); + +static struct platform_driver mvebu_a3700_comphy_driver = { + .probe = mvebu_a3700_comphy_probe, + .driver = { + .name = "mvebu-a3700-comphy", + .of_match_table = mvebu_a3700_comphy_of_match_table, + }, +}; +module_platform_driver(mvebu_a3700_comphy_driver); + +MODULE_AUTHOR("Miquèl Raynal "); +MODULE_DESCRIPTION("Common PHY driver for A3700"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/marvell/phy-mvebu-a3700-utmi.c b/drivers/phy/marvell/phy-mvebu-a3700-utmi.c new file mode 100644 index 000000000000..94a29dea57af --- /dev/null +++ b/drivers/phy/marvell/phy-mvebu-a3700-utmi.c @@ -0,0 +1,278 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 Marvell + * + * Authors: + * Igal Liberman + * Miquèl Raynal + * + * Marvell A3700 UTMI PHY driver + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/* Armada 3700 UTMI PHY registers */ +#define USB2_PHY_PLL_CTRL_REG0 0x0 +#define PLL_REF_DIV_OFF 0 +#define PLL_REF_DIV_MASK GENMASK(6, 0) +#define PLL_REF_DIV_5 5 +#define PLL_FB_DIV_OFF 16 +#define PLL_FB_DIV_MASK GENMASK(24, 16) +#define PLL_FB_DIV_96 96 +#define PLL_SEL_LPFR_OFF 28 +#define PLL_SEL_LPFR_MASK GENMASK(29, 28) +#define PLL_READY BIT(31) +#define USB2_PHY_CAL_CTRL 0x8 +#define PHY_PLLCAL_DONE BIT(31) +#define PHY_IMPCAL_DONE BIT(23) +#define USB2_RX_CHAN_CTRL1 0x18 +#define USB2PHY_SQCAL_DONE BIT(31) +#define USB2_PHY_OTG_CTRL 0x34 +#define PHY_PU_OTG BIT(4) +#define USB2_PHY_CHRGR_DETECT 0x38 +#define PHY_CDP_EN BIT(2) +#define PHY_DCP_EN BIT(3) +#define PHY_PD_EN BIT(4) +#define PHY_PU_CHRG_DTC BIT(5) +#define PHY_CDP_DM_AUTO BIT(7) +#define PHY_ENSWITCH_DP BIT(12) +#define PHY_ENSWITCH_DM BIT(13) + +/* Armada 3700 USB miscellaneous registers */ +#define USB2_PHY_CTRL(usb32) (usb32 ? 0x20 : 0x4) +#define RB_USB2PHY_PU BIT(0) +#define USB2_DP_PULLDN_DEV_MODE BIT(5) +#define USB2_DM_PULLDN_DEV_MODE BIT(6) +#define RB_USB2PHY_SUSPM(usb32) (usb32 ? BIT(14) : BIT(7)) + +#define PLL_LOCK_DELAY_US 10000 +#define PLL_LOCK_TIMEOUT_US 1000000 + +/** + * struct mvebu_a3700_utmi_caps - PHY capabilities + * + * @usb32: Flag indicating which PHY is in use (impacts the register map): + * - The UTMI PHY wired to the USB3/USB2 controller (otg) + * - The UTMI PHY wired to the USB2 controller (host only) + * @ops: PHY operations + */ +struct mvebu_a3700_utmi_caps { + int usb32; + const struct phy_ops *ops; +}; + +/** + * struct mvebu_a3700_utmi - PHY driver data + * + * @regs: PHY registers + * @usb_mis: Regmap with USB miscellaneous registers including PHY ones + * @caps: PHY capabilities + * @phy: PHY handle + */ +struct mvebu_a3700_utmi { + void __iomem *regs; + struct regmap *usb_misc; + const struct mvebu_a3700_utmi_caps *caps; + struct phy *phy; +}; + +static int mvebu_a3700_utmi_phy_power_on(struct phy *phy) +{ + struct mvebu_a3700_utmi *utmi = phy_get_drvdata(phy); + struct device *dev = &phy->dev; + int usb32 = utmi->caps->usb32; + int ret = 0; + u32 reg; + + /* + * Setup PLL. 40MHz clock used to be the default, being 25MHz now. + * See "PLL Settings for Typical REFCLK" table. + */ + reg = readl(utmi->regs + USB2_PHY_PLL_CTRL_REG0); + reg &= ~(PLL_REF_DIV_MASK | PLL_FB_DIV_MASK | PLL_SEL_LPFR_MASK); + reg |= (PLL_REF_DIV_5 << PLL_REF_DIV_OFF) | + (PLL_FB_DIV_96 << PLL_FB_DIV_OFF); + writel(reg, utmi->regs + USB2_PHY_PLL_CTRL_REG0); + + /* Enable PHY pull up and disable USB2 suspend */ + regmap_update_bits(utmi->usb_misc, USB2_PHY_CTRL(usb32), + RB_USB2PHY_SUSPM(usb32) | RB_USB2PHY_PU, + RB_USB2PHY_SUSPM(usb32) | RB_USB2PHY_PU); + + if (usb32) { + /* Power up OTG module */ + reg = readl(utmi->regs + USB2_PHY_OTG_CTRL); + reg |= PHY_PU_OTG; + writel(reg, utmi->regs + USB2_PHY_OTG_CTRL); + + /* Disable PHY charger detection */ + reg = readl(utmi->regs + USB2_PHY_CHRGR_DETECT); + reg &= ~(PHY_CDP_EN | PHY_DCP_EN | PHY_PD_EN | PHY_PU_CHRG_DTC | + PHY_CDP_DM_AUTO | PHY_ENSWITCH_DP | PHY_ENSWITCH_DM); + writel(reg, utmi->regs + USB2_PHY_CHRGR_DETECT); + + /* Disable PHY DP/DM pull-down (used for device mode) */ + regmap_update_bits(utmi->usb_misc, USB2_PHY_CTRL(usb32), + USB2_DP_PULLDN_DEV_MODE | + USB2_DM_PULLDN_DEV_MODE, 0); + } + + /* Wait for PLL calibration */ + ret = readl_poll_timeout(utmi->regs + USB2_PHY_CAL_CTRL, reg, + reg & PHY_PLLCAL_DONE, + PLL_LOCK_DELAY_US, PLL_LOCK_TIMEOUT_US); + if (ret) { + dev_err(dev, "Failed to end USB2 PLL calibration\n"); + return ret; + } + + /* Wait for impedance calibration */ + ret = readl_poll_timeout(utmi->regs + USB2_PHY_CAL_CTRL, reg, + reg & PHY_IMPCAL_DONE, + PLL_LOCK_DELAY_US, PLL_LOCK_TIMEOUT_US); + if (ret) { + dev_err(dev, "Failed to end USB2 impedance calibration\n"); + return ret; + } + + /* Wait for squelch calibration */ + ret = readl_poll_timeout(utmi->regs + USB2_RX_CHAN_CTRL1, reg, + reg & USB2PHY_SQCAL_DONE, + PLL_LOCK_DELAY_US, PLL_LOCK_TIMEOUT_US); + if (ret) { + dev_err(dev, "Failed to end USB2 unknown calibration\n"); + return ret; + } + + /* Wait for PLL to be locked */ + ret = readl_poll_timeout(utmi->regs + USB2_PHY_PLL_CTRL_REG0, reg, + reg & PLL_READY, + PLL_LOCK_DELAY_US, PLL_LOCK_TIMEOUT_US); + if (ret) + dev_err(dev, "Failed to lock USB2 PLL\n"); + + return ret; +} + +static int mvebu_a3700_utmi_phy_power_off(struct phy *phy) +{ + struct mvebu_a3700_utmi *utmi = phy_get_drvdata(phy); + int usb32 = utmi->caps->usb32; + u32 reg; + + /* Disable PHY pull-up and enable USB2 suspend */ + reg = readl(utmi->regs + USB2_PHY_CTRL(usb32)); + reg &= ~(RB_USB2PHY_PU | RB_USB2PHY_SUSPM(usb32)); + writel(reg, utmi->regs + USB2_PHY_CTRL(usb32)); + + /* Power down OTG module */ + if (usb32) { + reg = readl(utmi->regs + USB2_PHY_OTG_CTRL); + reg &= ~PHY_PU_OTG; + writel(reg, utmi->regs + USB2_PHY_OTG_CTRL); + } + + return 0; +} + +static const struct phy_ops mvebu_a3700_utmi_phy_ops = { + .power_on = mvebu_a3700_utmi_phy_power_on, + .power_off = mvebu_a3700_utmi_phy_power_off, + .owner = THIS_MODULE, +}; + +static const struct mvebu_a3700_utmi_caps mvebu_a3700_utmi_otg_phy_caps = { + .usb32 = true, + .ops = &mvebu_a3700_utmi_phy_ops, +}; + +static const struct mvebu_a3700_utmi_caps mvebu_a3700_utmi_host_phy_caps = { + .usb32 = false, + .ops = &mvebu_a3700_utmi_phy_ops, +}; + +static const struct of_device_id mvebu_a3700_utmi_of_match[] = { + { + .compatible = "marvell,a3700-utmi-otg-phy", + .data = &mvebu_a3700_utmi_otg_phy_caps, + }, + { + .compatible = "marvell,a3700-utmi-host-phy", + .data = &mvebu_a3700_utmi_host_phy_caps, + }, + {}, +}; +MODULE_DEVICE_TABLE(of, mvebu_a3700_utmi_of_match); + +static int mvebu_a3700_utmi_phy_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mvebu_a3700_utmi *utmi; + struct phy_provider *provider; + struct resource *res; + + utmi = devm_kzalloc(dev, sizeof(*utmi), GFP_KERNEL); + if (!utmi) + return -ENOMEM; + + /* Get UTMI memory region */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "Missing UTMI PHY memory resource\n"); + return -ENODEV; + } + + utmi->regs = devm_ioremap_resource(dev, res); + if (IS_ERR(utmi->regs)) + return PTR_ERR(utmi->regs); + + /* Get miscellaneous Host/PHY region */ + utmi->usb_misc = syscon_regmap_lookup_by_phandle(dev->of_node, + "marvell,usb-misc-reg"); + if (IS_ERR(utmi->usb_misc)) { + dev_err(dev, + "Missing USB misc purpose system controller\n"); + return PTR_ERR(utmi->usb_misc); + } + + /* Retrieve PHY capabilities */ + utmi->caps = of_device_get_match_data(dev); + + /* Instantiate the PHY */ + utmi->phy = devm_phy_create(dev, NULL, utmi->caps->ops); + if (IS_ERR(utmi->phy)) { + dev_err(dev, "Failed to create the UTMI PHY\n"); + return PTR_ERR(utmi->phy); + } + + phy_set_drvdata(utmi->phy, utmi); + + /* Ensure the PHY is powered off */ + utmi->caps->ops->power_off(utmi->phy); + + provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + + return PTR_ERR_OR_ZERO(provider); +} + +static struct platform_driver mvebu_a3700_utmi_driver = { + .probe = mvebu_a3700_utmi_phy_probe, + .driver = { + .name = "mvebu-a3700-utmi-phy", + .owner = THIS_MODULE, + .of_match_table = mvebu_a3700_utmi_of_match, + }, +}; +module_platform_driver(mvebu_a3700_utmi_driver); + +MODULE_AUTHOR("Igal Liberman "); +MODULE_AUTHOR("Miquel Raynal "); +MODULE_DESCRIPTION("Marvell EBU A3700 UTMI PHY driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c index 187cccde53b5..d98e0451f6a1 100644 --- a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c +++ b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c @@ -580,8 +580,6 @@ static struct phy *mvebu_comphy_xlate(struct device *dev, return phy; lane = phy_get_drvdata(phy); - if (lane->port >= 0) - return ERR_PTR(-EBUSY); lane->port = args->args[0]; return phy; diff --git a/drivers/phy/marvell/phy-mvebu-sata.c b/drivers/phy/marvell/phy-mvebu-sata.c index 768ce92e81ce..369fece2be7a 100644 --- a/drivers/phy/marvell/phy-mvebu-sata.c +++ b/drivers/phy/marvell/phy-mvebu-sata.c @@ -10,7 +10,7 @@ */ #include -#include +#include #include #include #include @@ -122,7 +122,6 @@ static const struct of_device_id phy_mvebu_sata_of_match[] = { { .compatible = "marvell,mvebu-sata-phy" }, { }, }; -MODULE_DEVICE_TABLE(of, phy_mvebu_sata_of_match); static struct platform_driver phy_mvebu_sata_driver = { .probe = phy_mvebu_sata_probe, @@ -131,8 +130,4 @@ static struct platform_driver phy_mvebu_sata_driver = { .of_match_table = phy_mvebu_sata_of_match, } }; -module_platform_driver(phy_mvebu_sata_driver); - -MODULE_AUTHOR("Andrew Lunn "); -MODULE_DESCRIPTION("Marvell MVEBU SATA PHY driver"); -MODULE_LICENSE("GPL v2"); +builtin_platform_driver(phy_mvebu_sata_driver); diff --git a/drivers/phy/phy-core-mipi-dphy.c b/drivers/phy/phy-core-mipi-dphy.c index 465fa1b91a5f..14e0551cd319 100644 --- a/drivers/phy/phy-core-mipi-dphy.c +++ b/drivers/phy/phy-core-mipi-dphy.c @@ -65,12 +65,12 @@ int phy_mipi_dphy_get_default_config(unsigned long pixel_clock, */ cfg->hs_trail = max(4 * 8 * ui, 60000 + 4 * 4 * ui); - cfg->init = 100000000; + cfg->init = 100; cfg->lpx = 60000; cfg->ta_get = 5 * cfg->lpx; cfg->ta_go = 4 * cfg->lpx; cfg->ta_sure = 2 * cfg->lpx; - cfg->wakeup = 1000000000; + cfg->wakeup = 1000; cfg->hs_clk_rate = hs_clk_rate; cfg->lanes = lanes; @@ -143,7 +143,7 @@ int phy_mipi_dphy_config_validate(struct phy_configure_opts_mipi_dphy *cfg) if (cfg->hs_trail < max(8 * ui, 60000 + 4 * ui)) return -EINVAL; - if (cfg->init < 100000000) + if (cfg->init < 100) return -EINVAL; if (cfg->lpx < 50000) @@ -158,7 +158,7 @@ int phy_mipi_dphy_config_validate(struct phy_configure_opts_mipi_dphy *cfg) if (cfg->ta_sure < cfg->lpx || cfg->ta_sure > (2 * cfg->lpx)) return -EINVAL; - if (cfg->wakeup < 1000000000) + if (cfg->wakeup < 1000) return -EINVAL; return 0; diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c index 19b05e824ee4..cb38f6e8614c 100644 --- a/drivers/phy/phy-core.c +++ b/drivers/phy/phy-core.c @@ -1112,14 +1112,4 @@ static int __init phy_core_init(void) return 0; } -module_init(phy_core_init); - -static void __exit phy_core_exit(void) -{ - class_destroy(phy_class); -} -module_exit(phy_core_exit); - -MODULE_DESCRIPTION("Generic PHY Framework"); -MODULE_AUTHOR("Kishon Vijay Abraham I "); -MODULE_LICENSE("GPL v2"); +device_initcall(phy_core_init); diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c index b4006818e1b6..08d6f6f7f039 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp.c @@ -687,6 +687,116 @@ static const struct qmp_phy_init_tbl sdm845_ufsphy_pcs_tbl[] = { QMP_PHY_INIT_CFG(QPHY_V3_PCS_MULTI_LANE_CTRL1, 0x02), }; +static const struct qmp_phy_init_tbl msm8998_usb3_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL2, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0xab), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0xea), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x34), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x15), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_CFG, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_BG_TIMER, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_INITVAL, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_MODE, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x31), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x85), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x07), +}; + +static const struct qmp_phy_init_tbl msm8998_usb3_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x00), +}; + +static const struct qmp_phy_init_tbl msm8998_usb3_rx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4e), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x18), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x43), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x1c), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x75), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FO_GAIN, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_GAIN, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_ENABLES, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_VGA_CAL_CNTRL2, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_00, 0x05), +}; + +static const struct qmp_phy_init_tbl msm8998_usb3_pcs_tbl[] = { + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x40), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG2, 0x1b), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V0, 0x9f), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V1, 0x9f), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V2, 0xb7), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V3, 0x4e), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V4, 0x65), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_LS, 0x6b), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x15), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0d), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TX_LARGE_AMP_DRV_LVL, 0x15), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V1, 0x0d), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V2, 0x15), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V2, 0x0d), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V3, 0x15), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V3, 0x0d), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V4, 0x15), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V4, 0x0d), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_LS, 0x15), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_LS, 0x0d), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RATE_SLEW_CNTRL, 0x02), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0x8a), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86), + QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13), +}; + + /* struct qmp_phy_cfg - per-PHY initialization config */ struct qmp_phy_cfg { /* phy-type - PCIE/UFS/USB */ @@ -1036,6 +1146,33 @@ static const struct qmp_phy_cfg sdm845_ufsphy_cfg = { .no_pcs_sw_reset = true, }; +static const struct qmp_phy_cfg msm8998_usb3phy_cfg = { + .type = PHY_TYPE_USB3, + .nlanes = 1, + + .serdes_tbl = msm8998_usb3_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(msm8998_usb3_serdes_tbl), + .tx_tbl = msm8998_usb3_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(msm8998_usb3_tx_tbl), + .rx_tbl = msm8998_usb3_rx_tbl, + .rx_tbl_num = ARRAY_SIZE(msm8998_usb3_rx_tbl), + .pcs_tbl = msm8998_usb3_pcs_tbl, + .pcs_tbl_num = ARRAY_SIZE(msm8998_usb3_pcs_tbl), + .clk_list = msm8996_phy_clk_l, + .num_clks = ARRAY_SIZE(msm8996_phy_clk_l), + .reset_list = msm8996_usb3phy_reset_l, + .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = qmp_v3_usb3phy_regs_layout, + + .start_ctrl = SERDES_START | PCS_START, + .pwrdn_ctrl = SW_PWRDN, + .mask_pcs_ready = PHYSTATUS, + + .is_dual_lane_phy = true, +}; + static void qcom_qmp_phy_configure(void __iomem *base, const unsigned int *regs, const struct qmp_phy_init_tbl tbl[], @@ -1735,6 +1872,9 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = { }, { .compatible = "qcom,msm8996-qmp-usb3-phy", .data = &msm8996_usb3phy_cfg, + }, { + .compatible = "qcom,msm8998-qmp-ufs-phy", + .data = &sdm845_ufsphy_cfg, }, { .compatible = "qcom,ipq8074-qmp-pcie-phy", .data = &ipq8074_pciephy_cfg, @@ -1747,6 +1887,9 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = { }, { .compatible = "qcom,sdm845-qmp-ufs-phy", .data = &sdm845_ufsphy_cfg, + }, { + .compatible = "qcom,msm8998-qmp-usb3-phy", + .data = &msm8998_usb3phy_cfg, }, { }, }; diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h index d201cc307151..a1b6cdee9a08 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp.h +++ b/drivers/phy/qualcomm/phy-qcom-qmp.h @@ -174,6 +174,7 @@ #define QSERDES_V3_COM_DIV_FRAC_START1_MODE1 0x0c4 #define QSERDES_V3_COM_DIV_FRAC_START2_MODE1 0x0c8 #define QSERDES_V3_COM_DIV_FRAC_START3_MODE1 0x0cc +#define QSERDES_V3_COM_INTEGLOOP_INITVAL 0x0d0 #define QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0 0x0d8 #define QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0 0x0dc #define QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE1 0x0e0 @@ -201,6 +202,7 @@ #define QSERDES_V3_COM_DEBUG_BUS2 0x170 #define QSERDES_V3_COM_DEBUG_BUS3 0x174 #define QSERDES_V3_COM_DEBUG_BUS_SEL 0x178 +#define QSERDES_V3_COM_CMN_MODE 0x184 /* Only for QMP V3 PHY - TX registers */ #define QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX 0x044 @@ -211,6 +213,7 @@ #define QSERDES_V3_TX_RCV_DETECT_LVL_2 0x0a4 /* Only for QMP V3 PHY - RX registers */ +#define QSERDES_V3_RX_UCDR_FO_GAIN 0x008 #define QSERDES_V3_RX_UCDR_SO_GAIN_HALF 0x00c #define QSERDES_V3_RX_UCDR_SO_GAIN 0x014 #define QSERDES_V3_RX_UCDR_SVS_SO_GAIN_HALF 0x024 @@ -219,6 +222,7 @@ #define QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN 0x030 #define QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE 0x034 #define QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW 0x03c +#define QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_HIGH 0x040 #define QSERDES_V3_RX_UCDR_PI_CONTROLS 0x044 #define QSERDES_V3_RX_RX_TERM_BW 0x07c #define QSERDES_V3_RX_VGA_CAL_CNTRL1 0x0bc diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c index 9177989f22d1..8fd7ce139772 100644 --- a/drivers/phy/qualcomm/phy-qcom-qusb2.c +++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c @@ -152,6 +152,31 @@ static const struct qusb2_phy_init_tbl msm8996_init_tbl[] = { QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_PWR_CTRL, 0x00), }; +static const unsigned int msm8998_regs_layout[] = { + [QUSB2PHY_PLL_CORE_INPUT_OVERRIDE] = 0xa8, + [QUSB2PHY_PLL_STATUS] = 0x1a0, + [QUSB2PHY_PORT_TUNE1] = 0x23c, + [QUSB2PHY_PORT_TUNE2] = 0x240, + [QUSB2PHY_PORT_TUNE3] = 0x244, + [QUSB2PHY_PORT_TUNE4] = 0x248, + [QUSB2PHY_PORT_TEST1] = 0x24c, + [QUSB2PHY_PORT_TEST2] = 0x250, + [QUSB2PHY_PORT_POWERDOWN] = 0x210, + [QUSB2PHY_INTR_CTRL] = 0x22c, +}; + +static const struct qusb2_phy_init_tbl msm8998_init_tbl[] = { + QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_ANALOG_CONTROLS_TWO, 0x13), + QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_CLOCK_INVERTERS, 0x7c), + QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_CMODE, 0x80), + QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_LOCK_DELAY, 0x0a), + + QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TUNE1, 0xa5), + QUSB2_PHY_INIT_CFG_L(QUSB2PHY_PORT_TUNE2, 0x09), + + QUSB2_PHY_INIT_CFG(QUSB2PHY_PLL_DIGITAL_TIMERS_TWO, 0x19), +}; + static const unsigned int sdm845_regs_layout[] = { [QUSB2PHY_PLL_CORE_INPUT_OVERRIDE] = 0xa8, [QUSB2PHY_PLL_STATUS] = 0x1a0, @@ -221,6 +246,18 @@ static const struct qusb2_phy_cfg msm8996_phy_cfg = { .autoresume_en = BIT(3), }; +static const struct qusb2_phy_cfg msm8998_phy_cfg = { + .tbl = msm8998_init_tbl, + .tbl_num = ARRAY_SIZE(msm8998_init_tbl), + .regs = msm8998_regs_layout, + + .disable_ctrl = POWER_DOWN, + .mask_core_ready = CORE_READY_STATUS, + .has_pll_override = true, + .autoresume_en = BIT(0), + .update_tune1_with_efuse = true, +}; + static const struct qusb2_phy_cfg sdm845_phy_cfg = { .tbl = sdm845_init_tbl, .tbl_num = ARRAY_SIZE(sdm845_init_tbl), @@ -733,6 +770,9 @@ static const struct of_device_id qusb2_phy_of_match_table[] = { { .compatible = "qcom,msm8996-qusb2-phy", .data = &msm8996_phy_cfg, + }, { + .compatible = "qcom,msm8998-qusb2-phy", + .data = &msm8998_phy_cfg, }, { .compatible = "qcom,sdm845-qusb2-phy", .data = &sdm845_phy_cfg, diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-i.h b/drivers/phy/qualcomm/phy-qcom-ufs-i.h index 681644e43248..f798fb64de94 100644 --- a/drivers/phy/qualcomm/phy-qcom-ufs-i.h +++ b/drivers/phy/qualcomm/phy-qcom-ufs-i.h @@ -23,24 +23,7 @@ #include #include #include - -#define readl_poll_timeout(addr, val, cond, sleep_us, timeout_us) \ -({ \ - ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \ - might_sleep_if(timeout_us); \ - for (;;) { \ - (val) = readl(addr); \ - if (cond) \ - break; \ - if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \ - (val) = readl(addr); \ - break; \ - } \ - if (sleep_us) \ - usleep_range(DIV_ROUND_UP(sleep_us, 4), sleep_us); \ - } \ - (cond) ? 0 : -ETIMEDOUT; \ -}) +#include #define UFS_QCOM_PHY_CAL_ENTRY(reg, val) \ { \ diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c index 91fba60267a0..ba07121c3eff 100644 --- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c +++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c @@ -55,16 +55,16 @@ enum rockchip_usb2phy_host_state { }; /** - * Different states involved in USB charger detection. - * USB_CHG_STATE_UNDEFINED USB charger is not connected or detection + * enum usb_chg_state - Different states involved in USB charger detection. + * @USB_CHG_STATE_UNDEFINED: USB charger is not connected or detection * process is not yet started. - * USB_CHG_STATE_WAIT_FOR_DCD Waiting for Data pins contact. - * USB_CHG_STATE_DCD_DONE Data pin contact is detected. - * USB_CHG_STATE_PRIMARY_DONE Primary detection is completed (Detects + * @USB_CHG_STATE_WAIT_FOR_DCD: Waiting for Data pins contact. + * @USB_CHG_STATE_DCD_DONE: Data pin contact is detected. + * @USB_CHG_STATE_PRIMARY_DONE: Primary detection is completed (Detects * between SDP and DCP/CDP). - * USB_CHG_STATE_SECONDARY_DONE Secondary detection is completed (Detects - * between DCP and CDP). - * USB_CHG_STATE_DETECTED USB charger type is determined. + * @USB_CHG_STATE_SECONDARY_DONE: Secondary detection is completed (Detects + * between DCP and CDP). + * @USB_CHG_STATE_DETECTED: USB charger type is determined. */ enum usb_chg_state { USB_CHG_STATE_UNDEFINED = 0, @@ -94,7 +94,7 @@ struct usb2phy_reg { }; /** - * struct rockchip_chg_det_reg: usb charger detect registers + * struct rockchip_chg_det_reg - usb charger detect registers * @cp_det: charging port detected successfully. * @dcp_det: dedicated charging port detected successfully. * @dp_det: assert data pin connect successfully. @@ -120,7 +120,7 @@ struct rockchip_chg_det_reg { }; /** - * struct rockchip_usb2phy_port_cfg: usb-phy port configuration. + * struct rockchip_usb2phy_port_cfg - usb-phy port configuration. * @phy_sus: phy suspend register. * @bvalid_det_en: vbus valid rise detection enable register. * @bvalid_det_st: vbus valid rise detection status register. @@ -148,10 +148,11 @@ struct rockchip_usb2phy_port_cfg { }; /** - * struct rockchip_usb2phy_cfg: usb-phy configuration. + * struct rockchip_usb2phy_cfg - usb-phy configuration. * @reg: the address offset of grf for usb-phy config. * @num_ports: specify how many ports that the phy has. * @clkout_ctl: keep on/turn off output clk of phy. + * @port_cfgs: usb-phy port configurations. * @chg_det: charger detection registers. */ struct rockchip_usb2phy_cfg { @@ -163,12 +164,10 @@ struct rockchip_usb2phy_cfg { }; /** - * struct rockchip_usb2phy_port: usb-phy port data. + * struct rockchip_usb2phy_port - usb-phy port data. + * @phy: generic phy. * @port_id: flag for otg port or host port. * @suspended: phy suspended flag. - * @utmi_avalid: utmi avalid status usage flag. - * true - use avalid to get vbus status - * flase - use bvalid to get vbus status * @vbus_attached: otg device vbus status. * @bvalid_irq: IRQ number assigned for vbus valid rise detection. * @ls_irq: IRQ number assigned for linestate detection. @@ -178,7 +177,7 @@ struct rockchip_usb2phy_cfg { * @chg_work: charge detect work. * @otg_sm_work: OTG state machine work. * @sm_work: HOST state machine work. - * @phy_cfg: port register configuration, assigned by driver data. + * @port_cfg: port register configuration, assigned by driver data. * @event_nb: hold event notification callback. * @state: define OTG enumeration states before device reset. * @mode: the dr_mode of the controller. @@ -187,7 +186,6 @@ struct rockchip_usb2phy_port { struct phy *phy; unsigned int port_id; bool suspended; - bool utmi_avalid; bool vbus_attached; int bvalid_irq; int ls_irq; @@ -203,12 +201,13 @@ struct rockchip_usb2phy_port { }; /** - * struct rockchip_usb2phy: usb2.0 phy driver data. + * struct rockchip_usb2phy - usb2.0 phy driver data. + * @dev: pointer to device. * @grf: General Register Files regmap. * @usbgrf: USB General Register Files regmap. * @clk: clock struct of phy input clk. * @clk480m: clock struct of phy output clk. - * @clk_hw: clock struct of phy output clk management. + * @clk480m_hw: clock struct of phy output clk management. * @chg_state: states involved in USB charger detection. * @chg_type: USB charger types. * @dcd_retries: The retry count used to track Data contact @@ -542,12 +541,8 @@ static void rockchip_usb2phy_otg_sm_work(struct work_struct *work) unsigned long delay; bool vbus_attach, sch_work, notify_charger; - if (rport->utmi_avalid) - vbus_attach = property_enabled(rphy->grf, - &rport->port_cfg->utmi_avalid); - else - vbus_attach = property_enabled(rphy->grf, - &rport->port_cfg->utmi_bvalid); + vbus_attach = property_enabled(rphy->grf, + &rport->port_cfg->utmi_bvalid); sch_work = false; notify_charger = false; @@ -1021,9 +1016,6 @@ static int rockchip_usb2phy_otg_port_init(struct rockchip_usb2phy *rphy, INIT_DELAYED_WORK(&rport->chg_work, rockchip_chg_detect_work); INIT_DELAYED_WORK(&rport->otg_sm_work, rockchip_usb2phy_otg_sm_work); - rport->utmi_avalid = - of_property_read_bool(child_np, "rockchip,utmi-avalid"); - /* * Some SoCs use one interrupt with otg-id/otg-bvalid/linestate * interrupts muxed together, so probe the otg-mux interrupt first, diff --git a/drivers/phy/ti/Kconfig b/drivers/phy/ti/Kconfig index c4709ed7fb0e..103efc456a12 100644 --- a/drivers/phy/ti/Kconfig +++ b/drivers/phy/ti/Kconfig @@ -33,12 +33,11 @@ config OMAP_CONTROL_PHY config OMAP_USB2 tristate "OMAP USB2 PHY Driver" - depends on ARCH_OMAP2PLUS + depends on ARCH_OMAP2PLUS || ARCH_K3 depends on USB_SUPPORT select GENERIC_PHY select USB_PHY - select OMAP_CONTROL_PHY - depends on OMAP_OCP2SCP + select OMAP_CONTROL_PHY if ARCH_OMAP2PLUS help Enable this to support the transceiver that is part of SOC. This driver takes care of all the PHY functionality apart from comparator. @@ -50,7 +49,6 @@ config TI_PIPE3 depends on ARCH_OMAP2PLUS || COMPILE_TEST select GENERIC_PHY select OMAP_CONTROL_PHY - depends on OMAP_OCP2SCP help Enable this to support the PIPE3 PHY that is part of TI SOCs. This driver takes care of all the PHY functionality apart from comparator. diff --git a/drivers/phy/ti/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c index fe909fd8144f..e871f2983a0e 100644 --- a/drivers/phy/ti/phy-omap-usb2.c +++ b/drivers/phy/ti/phy-omap-usb2.c @@ -36,6 +36,10 @@ #define USB2PHY_DISCON_BYP_LATCH (1 << 31) #define USB2PHY_ANA_CONFIG1 0x4c +#define AM654_USB2_OTG_PD BIT(8) +#define AM654_USB2_VBUS_DET_EN BIT(5) +#define AM654_USB2_VBUSVALID_DET_EN BIT(4) + /** * omap_usb2_set_comparator - links the comparator present in the sytem with * this phy @@ -135,9 +139,9 @@ static int omap_usb_power_on(struct phy *x) static int omap_usb2_disable_clocks(struct omap_usb *phy) { - clk_disable(phy->wkupclk); + clk_disable_unprepare(phy->wkupclk); if (!IS_ERR(phy->optclk)) - clk_disable(phy->optclk); + clk_disable_unprepare(phy->optclk); return 0; } @@ -146,14 +150,14 @@ static int omap_usb2_enable_clocks(struct omap_usb *phy) { int ret; - ret = clk_enable(phy->wkupclk); + ret = clk_prepare_enable(phy->wkupclk); if (ret < 0) { dev_err(phy->dev, "Failed to enable wkupclk %d\n", ret); goto err0; } if (!IS_ERR(phy->optclk)) { - ret = clk_enable(phy->optclk); + ret = clk_prepare_enable(phy->optclk); if (ret < 0) { dev_err(phy->dev, "Failed to enable optclk %d\n", ret); goto err1; @@ -245,6 +249,15 @@ static const struct usb_phy_data am437x_usb2_data = { .power_off = AM437X_USB2_PHY_PD | AM437X_USB2_OTG_PD, }; +static const struct usb_phy_data am654_usb2_data = { + .label = "am654_usb2", + .flags = OMAP_USB2_CALIBRATE_FALSE_DISCONNECT, + .mask = AM654_USB2_OTG_PD | AM654_USB2_VBUS_DET_EN | + AM654_USB2_VBUSVALID_DET_EN, + .power_on = AM654_USB2_VBUS_DET_EN | AM654_USB2_VBUSVALID_DET_EN, + .power_off = AM654_USB2_OTG_PD, +}; + static const struct of_device_id omap_usb2_id_table[] = { { .compatible = "ti,omap-usb2", @@ -266,6 +279,10 @@ static const struct of_device_id omap_usb2_id_table[] = { .compatible = "ti,am437x-usb2", .data = &am437x_usb2_data, }, + { + .compatible = "ti,am654-usb2", + .data = &am654_usb2_data, + }, {}, }; MODULE_DEVICE_TABLE(of, omap_usb2_id_table); @@ -346,63 +363,72 @@ static int omap_usb2_probe(struct platform_device *pdev) } } - otg->set_host = omap_usb_set_host; - otg->set_peripheral = omap_usb_set_peripheral; - if (phy_data->flags & OMAP_USB2_HAS_SET_VBUS) - otg->set_vbus = omap_usb_set_vbus; - if (phy_data->flags & OMAP_USB2_HAS_START_SRP) - otg->start_srp = omap_usb_start_srp; - otg->usb_phy = &phy->phy; - - platform_set_drvdata(pdev, phy); - pm_runtime_enable(phy->dev); - - generic_phy = devm_phy_create(phy->dev, NULL, &ops); - if (IS_ERR(generic_phy)) { - pm_runtime_disable(phy->dev); - return PTR_ERR(generic_phy); - } - - phy_set_drvdata(generic_phy, phy); - omap_usb_power_off(generic_phy); - - phy_provider = devm_of_phy_provider_register(phy->dev, - of_phy_simple_xlate); - if (IS_ERR(phy_provider)) { - pm_runtime_disable(phy->dev); - return PTR_ERR(phy_provider); - } phy->wkupclk = devm_clk_get(phy->dev, "wkupclk"); if (IS_ERR(phy->wkupclk)) { - dev_warn(&pdev->dev, "unable to get wkupclk, trying old name\n"); + if (PTR_ERR(phy->wkupclk) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + dev_warn(&pdev->dev, "unable to get wkupclk %ld, trying old name\n", + PTR_ERR(phy->wkupclk)); phy->wkupclk = devm_clk_get(phy->dev, "usb_phy_cm_clk32k"); + if (IS_ERR(phy->wkupclk)) { - dev_err(&pdev->dev, "unable to get usb_phy_cm_clk32k\n"); - pm_runtime_disable(phy->dev); + if (PTR_ERR(phy->wkupclk) != -EPROBE_DEFER) + dev_err(&pdev->dev, "unable to get usb_phy_cm_clk32k\n"); return PTR_ERR(phy->wkupclk); } else { dev_warn(&pdev->dev, "found usb_phy_cm_clk32k, please fix DTS\n"); } } - clk_prepare(phy->wkupclk); phy->optclk = devm_clk_get(phy->dev, "refclk"); if (IS_ERR(phy->optclk)) { + if (PTR_ERR(phy->optclk) == -EPROBE_DEFER) + return -EPROBE_DEFER; + dev_dbg(&pdev->dev, "unable to get refclk, trying old name\n"); phy->optclk = devm_clk_get(phy->dev, "usb_otg_ss_refclk960m"); + if (IS_ERR(phy->optclk)) { - dev_dbg(&pdev->dev, - "unable to get usb_otg_ss_refclk960m\n"); + if (PTR_ERR(phy->optclk) != -EPROBE_DEFER) { + dev_dbg(&pdev->dev, + "unable to get usb_otg_ss_refclk960m\n"); + } } else { dev_warn(&pdev->dev, "found usb_otg_ss_refclk960m, please fix DTS\n"); } } - if (!IS_ERR(phy->optclk)) - clk_prepare(phy->optclk); + otg->set_host = omap_usb_set_host; + otg->set_peripheral = omap_usb_set_peripheral; + if (phy_data->flags & OMAP_USB2_HAS_SET_VBUS) + otg->set_vbus = omap_usb_set_vbus; + if (phy_data->flags & OMAP_USB2_HAS_START_SRP) + otg->start_srp = omap_usb_start_srp; + otg->usb_phy = &phy->phy; + + platform_set_drvdata(pdev, phy); + pm_runtime_enable(phy->dev); + + generic_phy = devm_phy_create(phy->dev, NULL, &ops); + if (IS_ERR(generic_phy)) { + pm_runtime_disable(phy->dev); + return PTR_ERR(generic_phy); + } + + phy_set_drvdata(generic_phy, phy); + omap_usb_power_off(generic_phy); + + phy_provider = devm_of_phy_provider_register(phy->dev, + of_phy_simple_xlate); + if (IS_ERR(phy_provider)) { + pm_runtime_disable(phy->dev); + return PTR_ERR(phy_provider); + } + usb_add_phy_dev(&phy->phy); @@ -413,9 +439,6 @@ static int omap_usb2_remove(struct platform_device *pdev) { struct omap_usb *phy = platform_get_drvdata(pdev); - clk_unprepare(phy->wkupclk); - if (!IS_ERR(phy->optclk)) - clk_unprepare(phy->optclk); usb_remove_phy(&phy->phy); pm_runtime_disable(phy->dev); diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c index f180aa44a422..183d1ffe6a75 100644 --- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c +++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c @@ -341,6 +341,7 @@ static const struct gpio_chip bcm2835_gpio_chip = { .get_direction = bcm2835_gpio_get_direction, .get = bcm2835_gpio_get, .set = bcm2835_gpio_set, + .set_config = gpiochip_generic_config, .base = -1, .ngpio = BCM2835_NUM_GPIOS, .can_sleep = false, @@ -960,7 +961,7 @@ static int bcm2835_pinconf_set(struct pinctrl_dev *pctldev, break; default: - return -EINVAL; + return -ENOTSUPP; } /* switch param type */ } /* for each config */ @@ -969,6 +970,7 @@ static int bcm2835_pinconf_set(struct pinctrl_dev *pctldev, } static const struct pinconf_ops bcm2835_pinconf_ops = { + .is_generic = true, .pin_config_get = bcm2835_pinconf_get, .pin_config_set = bcm2835_pinconf_set, }; diff --git a/drivers/pinctrl/berlin/pinctrl-as370.c b/drivers/pinctrl/berlin/pinctrl-as370.c index d2bb811fc5fa..44f8ccdbeeff 100644 --- a/drivers/pinctrl/berlin/pinctrl-as370.c +++ b/drivers/pinctrl/berlin/pinctrl-as370.c @@ -36,13 +36,13 @@ static const struct berlin_desc_group as370_soc_pinctrl_groups[] = { BERLIN_PINCTRL_GROUP("I2S1_DO2", 0x0, 0x3, 0x0c, BERLIN_PINCTRL_FUNCTION(0x0, "por"), /* CORE RSTB */ BERLIN_PINCTRL_FUNCTION(0x1, "i2s1"), /* DO2 */ - BERLIN_PINCTRL_FUNCTION(0x2, "pwm4"), + BERLIN_PINCTRL_FUNCTION(0x2, "pwm"), /* PWM4 */ BERLIN_PINCTRL_FUNCTION(0x3, "gpio"), /* GPIO4 */ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG4 */ BERLIN_PINCTRL_GROUP("I2S1_DO3", 0x0, 0x3, 0x0f, BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO5 */ BERLIN_PINCTRL_FUNCTION(0x1, "i2s1"), /* DO3 */ - BERLIN_PINCTRL_FUNCTION(0x2, "pwm5"), + BERLIN_PINCTRL_FUNCTION(0x2, "pwm"), /* PWM5 */ BERLIN_PINCTRL_FUNCTION(0x3, "spififib"), /* SPDIFIB */ BERLIN_PINCTRL_FUNCTION(0x4, "spdifo"), /* SPDIFO */ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG5 */ @@ -61,24 +61,24 @@ static const struct berlin_desc_group as370_soc_pinctrl_groups[] = { BERLIN_PINCTRL_GROUP("I2S2_DI0", 0x0, 0x3, 0x1b, BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO9 */ BERLIN_PINCTRL_FUNCTION(0x1, "i2s2"), /* DI0 */ - BERLIN_PINCTRL_FUNCTION(0x2, "pwm2"), + BERLIN_PINCTRL_FUNCTION(0x2, "pwm"), /* PWM2 */ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG9 */ BERLIN_PINCTRL_GROUP("I2S2_DI1", 0x4, 0x3, 0x00, BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO10 */ BERLIN_PINCTRL_FUNCTION(0x1, "i2s2"), /* DI1 */ - BERLIN_PINCTRL_FUNCTION(0x2, "pwm3"), + BERLIN_PINCTRL_FUNCTION(0x2, "pwm"), /* PWM3 */ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG10 */ BERLIN_PINCTRL_GROUP("I2S2_DI2", 0x4, 0x3, 0x03, BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO11 */ BERLIN_PINCTRL_FUNCTION(0x1, "i2s2"), /* DI2 */ - BERLIN_PINCTRL_FUNCTION(0x2, "pwm6"), + BERLIN_PINCTRL_FUNCTION(0x2, "pwm"), /* PWM6 */ BERLIN_PINCTRL_FUNCTION(0x3, "spdific"), /* SPDIFIC */ BERLIN_PINCTRL_FUNCTION(0x4, "spdifo"), /* SPDIFO */ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG11 */ BERLIN_PINCTRL_GROUP("I2S2_DI3", 0x4, 0x3, 0x06, BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO12 */ BERLIN_PINCTRL_FUNCTION(0x1, "i2s2"), /* DI3 */ - BERLIN_PINCTRL_FUNCTION(0x2, "pwm7"), + BERLIN_PINCTRL_FUNCTION(0x2, "pwm"), /* PWM7 */ BERLIN_PINCTRL_FUNCTION(0x3, "spdifia"), /* SPDIFIA */ BERLIN_PINCTRL_FUNCTION(0x4, "spdifo"), /* SPDIFO */ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG12 */ @@ -98,14 +98,14 @@ static const struct berlin_desc_group as370_soc_pinctrl_groups[] = { BERLIN_PINCTRL_GROUP("PDM_DI2", 0x4, 0x3, 0x12, BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO16 */ BERLIN_PINCTRL_FUNCTION(0x1, "pdm"), /* DI2 */ - BERLIN_PINCTRL_FUNCTION(0x2, "pwm4"), + BERLIN_PINCTRL_FUNCTION(0x2, "pwm"), /* PWM4 */ BERLIN_PINCTRL_FUNCTION(0x3, "spdifid"), /* SPDIFID */ BERLIN_PINCTRL_FUNCTION(0x4, "spdifo"), /* SPDIFO */ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG16 */ BERLIN_PINCTRL_GROUP("PDM_DI3", 0x4, 0x3, 0x15, BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO17 */ BERLIN_PINCTRL_FUNCTION(0x1, "pdm"), /* DI3 */ - BERLIN_PINCTRL_FUNCTION(0x2, "pwm5"), + BERLIN_PINCTRL_FUNCTION(0x2, "pwm"), /* PWM5 */ BERLIN_PINCTRL_FUNCTION(0x3, "spdifi"), /* SPDIFI */ BERLIN_PINCTRL_FUNCTION(0x4, "spdifo"), /* SPDIFO */ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG17 */ @@ -139,11 +139,11 @@ static const struct berlin_desc_group as370_soc_pinctrl_groups[] = { BERLIN_PINCTRL_FUNCTION(0x1, "emmc")), /* DATA7 */ BERLIN_PINCTRL_GROUP("NAND_ALE", 0x8, 0x3, 0x12, BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* ALE */ - BERLIN_PINCTRL_FUNCTION(0x2, "pwm6"), + BERLIN_PINCTRL_FUNCTION(0x2, "pwm"), /* PWM6 */ BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO18 */ BERLIN_PINCTRL_GROUP("NAND_CLE", 0x8, 0x3, 0x15, BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* CLE */ - BERLIN_PINCTRL_FUNCTION(0x2, "pwm7"), + BERLIN_PINCTRL_FUNCTION(0x2, "pwm"), /* PWM7 */ BERLIN_PINCTRL_FUNCTION(0x3, "gpio")), /* GPIO19 */ BERLIN_PINCTRL_GROUP("NAND_WEn", 0x8, 0x3, 0x18, BERLIN_PINCTRL_FUNCTION(0x0, "nand"), /* WEn */ @@ -169,12 +169,12 @@ static const struct berlin_desc_group as370_soc_pinctrl_groups[] = { BERLIN_PINCTRL_GROUP("SPI1_SS1n", 0xc, 0x3, 0x0c, BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS1n */ BERLIN_PINCTRL_FUNCTION(0x2, "gpio"), /* GPIO26 */ - BERLIN_PINCTRL_FUNCTION(0x3, "pwm2")), + BERLIN_PINCTRL_FUNCTION(0x3, "pwm")), /* PWM2 */ BERLIN_PINCTRL_GROUP("SPI1_SS2n", 0xc, 0x3, 0x0f, BERLIN_PINCTRL_FUNCTION(0x0, "uart0"), /* RXD */ BERLIN_PINCTRL_FUNCTION(0x1, "spi1"), /* SS2n */ BERLIN_PINCTRL_FUNCTION(0x2, "gpio"), /* GPIO27 */ - BERLIN_PINCTRL_FUNCTION(0x3, "pwm3")), + BERLIN_PINCTRL_FUNCTION(0x3, "pwm")), /* PWM3 */ BERLIN_PINCTRL_GROUP("SPI1_SS3n", 0xc, 0x3, 0x12, BERLIN_PINCTRL_FUNCTION(0x0, "uart0"), /* TXD */ BERLIN_PINCTRL_FUNCTION(0x1, "spi1"), /* SS3n */ @@ -182,11 +182,11 @@ static const struct berlin_desc_group as370_soc_pinctrl_groups[] = { BERLIN_PINCTRL_GROUP("SPI1_SCLK", 0xc, 0x3, 0x15, BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SCLK */ BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO29 */ - BERLIN_PINCTRL_FUNCTION(0x3, "pwm4")), + BERLIN_PINCTRL_FUNCTION(0x3, "pwm")), /* PWM4 */ BERLIN_PINCTRL_GROUP("SPI1_SDO", 0xc, 0x3, 0x18, BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SDO */ BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO30 */ - BERLIN_PINCTRL_FUNCTION(0x3, "pwm5")), + BERLIN_PINCTRL_FUNCTION(0x3, "pwm")), /* PWM5 */ BERLIN_PINCTRL_GROUP("SPI1_SDI", 0xc, 0x3, 0x1b, BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SDI */ BERLIN_PINCTRL_FUNCTION(0x1, "gpio")), /* GPIO31 */ @@ -209,51 +209,51 @@ static const struct berlin_desc_group as370_soc_pinctrl_groups[] = { BERLIN_PINCTRL_GROUP("TMS", 0x10, 0x3, 0x0f, BERLIN_PINCTRL_FUNCTION(0x0, "jtag"), /* TMS */ BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO37 */ - BERLIN_PINCTRL_FUNCTION(0x4, "pwm0")), + BERLIN_PINCTRL_FUNCTION(0x4, "pwm")), /* PWM0 */ BERLIN_PINCTRL_GROUP("TDI", 0x10, 0x3, 0x12, BERLIN_PINCTRL_FUNCTION(0x0, "jtag"), /* TDI */ BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO38 */ - BERLIN_PINCTRL_FUNCTION(0x4, "pwm1")), + BERLIN_PINCTRL_FUNCTION(0x4, "pwm")), /* PWM1 */ BERLIN_PINCTRL_GROUP("TDO", 0x10, 0x3, 0x15, BERLIN_PINCTRL_FUNCTION(0x0, "jtag"), /* TDO */ BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO39 */ - BERLIN_PINCTRL_FUNCTION(0x4, "pwm0")), + BERLIN_PINCTRL_FUNCTION(0x4, "pwm")), /* PWM0 */ BERLIN_PINCTRL_GROUP("PWM6", 0x10, 0x3, 0x18, BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO40 */ - BERLIN_PINCTRL_FUNCTION(0x1, "pwm6")), + BERLIN_PINCTRL_FUNCTION(0x1, "pwm")), /* PWM6 */ BERLIN_PINCTRL_GROUP("PWM7", 0x10, 0x3, 0x1b, BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO41 */ - BERLIN_PINCTRL_FUNCTION(0x1, "pwm7")), + BERLIN_PINCTRL_FUNCTION(0x1, "pwm")), /* PWM7 */ BERLIN_PINCTRL_GROUP("PWM0", 0x14, 0x3, 0x00, BERLIN_PINCTRL_FUNCTION(0x0, "por"), /* VDDCPUSOC RSTB */ - BERLIN_PINCTRL_FUNCTION(0x1, "pwm0"), + BERLIN_PINCTRL_FUNCTION(0x1, "pwm"), /* PWM0 */ BERLIN_PINCTRL_FUNCTION(0x2, "gpio")), /* GPIO42 */ BERLIN_PINCTRL_GROUP("PWM1", 0x14, 0x3, 0x03, BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO43 */ - BERLIN_PINCTRL_FUNCTION(0x1, "pwm1")), + BERLIN_PINCTRL_FUNCTION(0x1, "pwm")), /* PWM1 */ BERLIN_PINCTRL_GROUP("PWM2", 0x14, 0x3, 0x06, BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO44 */ - BERLIN_PINCTRL_FUNCTION(0x1, "pwm2")), + BERLIN_PINCTRL_FUNCTION(0x1, "pwm")), /* PWM2 */ BERLIN_PINCTRL_GROUP("PWM3", 0x14, 0x3, 0x09, BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO45 */ - BERLIN_PINCTRL_FUNCTION(0x1, "pwm3")), + BERLIN_PINCTRL_FUNCTION(0x1, "pwm")), /* PWM3 */ BERLIN_PINCTRL_GROUP("PWM4", 0x14, 0x3, 0x0c, BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO46 */ - BERLIN_PINCTRL_FUNCTION(0x1, "pwm4")), + BERLIN_PINCTRL_FUNCTION(0x1, "pwm")), /* PWM4 */ BERLIN_PINCTRL_GROUP("PWM5", 0x14, 0x3, 0x0f, BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO47 */ - BERLIN_PINCTRL_FUNCTION(0x1, "pwm5")), + BERLIN_PINCTRL_FUNCTION(0x1, "pwm")), /* PWM5 */ BERLIN_PINCTRL_GROUP("URT1_RTSn", 0x14, 0x3, 0x12, BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO48 */ BERLIN_PINCTRL_FUNCTION(0x1, "uart1"), /* RTSn */ - BERLIN_PINCTRL_FUNCTION(0x2, "pwm6"), + BERLIN_PINCTRL_FUNCTION(0x2, "pwm"), /* PWM6 */ BERLIN_PINCTRL_FUNCTION(0x3, "tw1a"), /* SCL */ BERLIN_PINCTRL_FUNCTION(0x4, "aio"), /* DBG0 */ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG18 */ BERLIN_PINCTRL_GROUP("URT1_CTSn", 0x14, 0x3, 0x15, BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO49 */ BERLIN_PINCTRL_FUNCTION(0x1, "uart1"), /* CTSn */ - BERLIN_PINCTRL_FUNCTION(0x2, "pwm7"), + BERLIN_PINCTRL_FUNCTION(0x2, "pwm"), /* PWM7 */ BERLIN_PINCTRL_FUNCTION(0x3, "tw1a"), /* SDA */ BERLIN_PINCTRL_FUNCTION(0x4, "aio"), /* DBG1 */ BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG19 */ @@ -308,11 +308,11 @@ static const struct berlin_desc_group as370_soc_pinctrl_groups[] = { BERLIN_PINCTRL_GROUP("SD0_CDn", 0x1c, 0x3, 0x00, BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO62 */ BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), /* CDn */ - BERLIN_PINCTRL_FUNCTION(0x3, "pwm2")), + BERLIN_PINCTRL_FUNCTION(0x3, "pwm")), /* PWM2 */ BERLIN_PINCTRL_GROUP("SD0_WP", 0x1c, 0x3, 0x03, BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO63 */ BERLIN_PINCTRL_FUNCTION(0x1, "sd0"), /* WP */ - BERLIN_PINCTRL_FUNCTION(0x3, "pwm3")), + BERLIN_PINCTRL_FUNCTION(0x3, "pwm")), /* PWM3 */ }; static const struct berlin_pinctrl_desc as370_soc_pinctrl_data = { diff --git a/drivers/pinctrl/cirrus/pinctrl-madera-core.c b/drivers/pinctrl/cirrus/pinctrl-madera-core.c index a5dda832024a..7c9694593f79 100644 --- a/drivers/pinctrl/cirrus/pinctrl-madera-core.c +++ b/drivers/pinctrl/cirrus/pinctrl-madera-core.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig index 72b869d888e2..0d8387851b87 100644 --- a/drivers/pinctrl/freescale/Kconfig +++ b/drivers/pinctrl/freescale/Kconfig @@ -122,6 +122,13 @@ config PINCTRL_IMX7ULP help Say Y here to enable the imx7ulp pinctrl driver +config PINCTRL_IMX8MM + bool "IMX8MM pinctrl driver" + depends on ARCH_MXC && ARM64 + select PINCTRL_IMX + help + Say Y here to enable the imx8mm pinctrl driver + config PINCTRL_IMX8MQ bool "IMX8MQ pinctrl driver" depends on ARCH_MXC && ARM64 @@ -129,9 +136,16 @@ config PINCTRL_IMX8MQ help Say Y here to enable the imx8mq pinctrl driver +config PINCTRL_IMX8QM + bool "IMX8QM pinctrl driver" + depends on IMX_SCU && ARCH_MXC && ARM64 + select PINCTRL_IMX_SCU + help + Say Y here to enable the imx8qm pinctrl driver + config PINCTRL_IMX8QXP bool "IMX8QXP pinctrl driver" - depends on ARCH_MXC && ARM64 + depends on IMX_SCU && ARCH_MXC && ARM64 select PINCTRL_IMX_SCU help Say Y here to enable the imx8qxp pinctrl driver diff --git a/drivers/pinctrl/freescale/Makefile b/drivers/pinctrl/freescale/Makefile index 6ee398a3e406..02020a76bd9c 100644 --- a/drivers/pinctrl/freescale/Makefile +++ b/drivers/pinctrl/freescale/Makefile @@ -18,7 +18,9 @@ obj-$(CONFIG_PINCTRL_IMX6SX) += pinctrl-imx6sx.o obj-$(CONFIG_PINCTRL_IMX6UL) += pinctrl-imx6ul.o obj-$(CONFIG_PINCTRL_IMX7D) += pinctrl-imx7d.o obj-$(CONFIG_PINCTRL_IMX7ULP) += pinctrl-imx7ulp.o +obj-$(CONFIG_PINCTRL_IMX8MM) += pinctrl-imx8mm.o obj-$(CONFIG_PINCTRL_IMX8MQ) += pinctrl-imx8mq.o +obj-$(CONFIG_PINCTRL_IMX8QM) += pinctrl-imx8qm.o obj-$(CONFIG_PINCTRL_IMX8QXP) += pinctrl-imx8qxp.o obj-$(CONFIG_PINCTRL_VF610) += pinctrl-vf610.o obj-$(CONFIG_PINCTRL_MXS) += pinctrl-mxs.o diff --git a/drivers/pinctrl/freescale/pinctrl-imx8mm.c b/drivers/pinctrl/freescale/pinctrl-imx8mm.c new file mode 100644 index 000000000000..6d1038af59f4 --- /dev/null +++ b/drivers/pinctrl/freescale/pinctrl-imx8mm.c @@ -0,0 +1,348 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2017-2018 NXP + */ + +#include +#include +#include +#include +#include + +#include "pinctrl-imx.h" + +enum imx8mm_pads { + MX8MM_PAD_RESERVE0 = 0, + MX8MM_PAD_RESERVE1 = 1, + MX8MM_PAD_RESERVE2 = 2, + MX8MM_PAD_RESERVE3 = 3, + MX8MM_PAD_RESERVE4 = 4, + MX8MM_PAD_RESERVE5 = 5, + MX8MM_PAD_RESERVE6 = 6, + MX8MM_PAD_RESERVE7 = 7, + MX8MM_PAD_RESERVE8 = 8, + MX8MM_PAD_RESERVE9 = 9, + MX8MM_IOMUXC_GPIO1_IO00 = 10, + MX8MM_IOMUXC_GPIO1_IO01 = 11, + MX8MM_IOMUXC_GPIO1_IO02 = 12, + MX8MM_IOMUXC_GPIO1_IO03 = 13, + MX8MM_IOMUXC_GPIO1_IO04 = 14, + MX8MM_IOMUXC_GPIO1_IO05 = 15, + MX8MM_IOMUXC_GPIO1_IO06 = 16, + MX8MM_IOMUXC_GPIO1_IO07 = 17, + MX8MM_IOMUXC_GPIO1_IO08 = 18, + MX8MM_IOMUXC_GPIO1_IO09 = 19, + MX8MM_IOMUXC_GPIO1_IO10 = 20, + MX8MM_IOMUXC_GPIO1_IO11 = 21, + MX8MM_IOMUXC_GPIO1_IO12 = 22, + MX8MM_IOMUXC_GPIO1_IO13 = 23, + MX8MM_IOMUXC_GPIO1_IO14 = 24, + MX8MM_IOMUXC_GPIO1_IO15 = 25, + MX8MM_IOMUXC_ENET_MDC = 26, + MX8MM_IOMUXC_ENET_MDIO = 27, + MX8MM_IOMUXC_ENET_TD3 = 28, + MX8MM_IOMUXC_ENET_TD2 = 29, + MX8MM_IOMUXC_ENET_TD1 = 30, + MX8MM_IOMUXC_ENET_TD0 = 31, + MX8MM_IOMUXC_ENET_TX_CTL = 32, + MX8MM_IOMUXC_ENET_TXC = 33, + MX8MM_IOMUXC_ENET_RX_CTL = 34, + MX8MM_IOMUXC_ENET_RXC = 35, + MX8MM_IOMUXC_ENET_RD0 = 36, + MX8MM_IOMUXC_ENET_RD1 = 37, + MX8MM_IOMUXC_ENET_RD2 = 38, + MX8MM_IOMUXC_ENET_RD3 = 39, + MX8MM_IOMUXC_SD1_CLK = 40, + MX8MM_IOMUXC_SD1_CMD = 41, + MX8MM_IOMUXC_SD1_DATA0 = 42, + MX8MM_IOMUXC_SD1_DATA1 = 43, + MX8MM_IOMUXC_SD1_DATA2 = 44, + MX8MM_IOMUXC_SD1_DATA3 = 45, + MX8MM_IOMUXC_SD1_DATA4 = 46, + MX8MM_IOMUXC_SD1_DATA5 = 47, + MX8MM_IOMUXC_SD1_DATA6 = 48, + MX8MM_IOMUXC_SD1_DATA7 = 49, + MX8MM_IOMUXC_SD1_RESET_B = 50, + MX8MM_IOMUXC_SD1_STROBE = 51, + MX8MM_IOMUXC_SD2_CD_B = 52, + MX8MM_IOMUXC_SD2_CLK = 53, + MX8MM_IOMUXC_SD2_CMD = 54, + MX8MM_IOMUXC_SD2_DATA0 = 55, + MX8MM_IOMUXC_SD2_DATA1 = 56, + MX8MM_IOMUXC_SD2_DATA2 = 57, + MX8MM_IOMUXC_SD2_DATA3 = 58, + MX8MM_IOMUXC_SD2_RESET_B = 59, + MX8MM_IOMUXC_SD2_WP = 60, + MX8MM_IOMUXC_NAND_ALE = 61, + MX8MM_IOMUXC_NAND_CE0 = 62, + MX8MM_IOMUXC_NAND_CE1 = 63, + MX8MM_IOMUXC_NAND_CE2 = 64, + MX8MM_IOMUXC_NAND_CE3 = 65, + MX8MM_IOMUXC_NAND_CLE = 66, + MX8MM_IOMUXC_NAND_DATA00 = 67, + MX8MM_IOMUXC_NAND_DATA01 = 68, + MX8MM_IOMUXC_NAND_DATA02 = 69, + MX8MM_IOMUXC_NAND_DATA03 = 70, + MX8MM_IOMUXC_NAND_DATA04 = 71, + MX8MM_IOMUXC_NAND_DATA05 = 72, + MX8MM_IOMUXC_NAND_DATA06 = 73, + MX8MM_IOMUXC_NAND_DATA07 = 74, + MX8MM_IOMUXC_NAND_DQS = 75, + MX8MM_IOMUXC_NAND_RE_B = 76, + MX8MM_IOMUXC_NAND_READY_B = 77, + MX8MM_IOMUXC_NAND_WE_B = 78, + MX8MM_IOMUXC_NAND_WP_B = 79, + MX8MM_IOMUXC_SAI5_RXFS = 80, + MX8MM_IOMUXC_SAI5_RXC = 81, + MX8MM_IOMUXC_SAI5_RXD0 = 82, + MX8MM_IOMUXC_SAI5_RXD1 = 83, + MX8MM_IOMUXC_SAI5_RXD2 = 84, + MX8MM_IOMUXC_SAI5_RXD3 = 85, + MX8MM_IOMUXC_SAI5_MCLK = 86, + MX8MM_IOMUXC_SAI1_RXFS = 87, + MX8MM_IOMUXC_SAI1_RXC = 88, + MX8MM_IOMUXC_SAI1_RXD0 = 89, + MX8MM_IOMUXC_SAI1_RXD1 = 90, + MX8MM_IOMUXC_SAI1_RXD2 = 91, + MX8MM_IOMUXC_SAI1_RXD3 = 92, + MX8MM_IOMUXC_SAI1_RXD4 = 93, + MX8MM_IOMUXC_SAI1_RXD5 = 94, + MX8MM_IOMUXC_SAI1_RXD6 = 95, + MX8MM_IOMUXC_SAI1_RXD7 = 96, + MX8MM_IOMUXC_SAI1_TXFS = 97, + MX8MM_IOMUXC_SAI1_TXC = 98, + MX8MM_IOMUXC_SAI1_TXD0 = 99, + MX8MM_IOMUXC_SAI1_TXD1 = 100, + MX8MM_IOMUXC_SAI1_TXD2 = 101, + MX8MM_IOMUXC_SAI1_TXD3 = 102, + MX8MM_IOMUXC_SAI1_TXD4 = 103, + MX8MM_IOMUXC_SAI1_TXD5 = 104, + MX8MM_IOMUXC_SAI1_TXD6 = 105, + MX8MM_IOMUXC_SAI1_TXD7 = 106, + MX8MM_IOMUXC_SAI1_MCLK = 107, + MX8MM_IOMUXC_SAI2_RXFS = 108, + MX8MM_IOMUXC_SAI2_RXC = 109, + MX8MM_IOMUXC_SAI2_RXD0 = 110, + MX8MM_IOMUXC_SAI2_TXFS = 111, + MX8MM_IOMUXC_SAI2_TXC = 112, + MX8MM_IOMUXC_SAI2_TXD0 = 113, + MX8MM_IOMUXC_SAI2_MCLK = 114, + MX8MM_IOMUXC_SAI3_RXFS = 115, + MX8MM_IOMUXC_SAI3_RXC = 116, + MX8MM_IOMUXC_SAI3_RXD = 117, + MX8MM_IOMUXC_SAI3_TXFS = 118, + MX8MM_IOMUXC_SAI3_TXC = 119, + MX8MM_IOMUXC_SAI3_TXD = 120, + MX8MM_IOMUXC_SAI3_MCLK = 121, + MX8MM_IOMUXC_SPDIF_TX = 122, + MX8MM_IOMUXC_SPDIF_RX = 123, + MX8MM_IOMUXC_SPDIF_EXT_CLK = 124, + MX8MM_IOMUXC_ECSPI1_SCLK = 125, + MX8MM_IOMUXC_ECSPI1_MOSI = 126, + MX8MM_IOMUXC_ECSPI1_MISO = 127, + MX8MM_IOMUXC_ECSPI1_SS0 = 128, + MX8MM_IOMUXC_ECSPI2_SCLK = 129, + MX8MM_IOMUXC_ECSPI2_MOSI = 130, + MX8MM_IOMUXC_ECSPI2_MISO = 131, + MX8MM_IOMUXC_ECSPI2_SS0 = 132, + MX8MM_IOMUXC_I2C1_SCL = 133, + MX8MM_IOMUXC_I2C1_SDA = 134, + MX8MM_IOMUXC_I2C2_SCL = 135, + MX8MM_IOMUXC_I2C2_SDA = 136, + MX8MM_IOMUXC_I2C3_SCL = 137, + MX8MM_IOMUXC_I2C3_SDA = 138, + MX8MM_IOMUXC_I2C4_SCL = 139, + MX8MM_IOMUXC_I2C4_SDA = 140, + MX8MM_IOMUXC_UART1_RXD = 141, + MX8MM_IOMUXC_UART1_TXD = 142, + MX8MM_IOMUXC_UART2_RXD = 143, + MX8MM_IOMUXC_UART2_TXD = 144, + MX8MM_IOMUXC_UART3_RXD = 145, + MX8MM_IOMUXC_UART3_TXD = 146, + MX8MM_IOMUXC_UART4_RXD = 147, + MX8MM_IOMUXC_UART4_TXD = 148, +}; + +/* Pad names for the pinmux subsystem */ +static const struct pinctrl_pin_desc imx8mm_pinctrl_pads[] = { + IMX_PINCTRL_PIN(MX8MM_PAD_RESERVE0), + IMX_PINCTRL_PIN(MX8MM_PAD_RESERVE1), + IMX_PINCTRL_PIN(MX8MM_PAD_RESERVE2), + IMX_PINCTRL_PIN(MX8MM_PAD_RESERVE3), + IMX_PINCTRL_PIN(MX8MM_PAD_RESERVE4), + IMX_PINCTRL_PIN(MX8MM_PAD_RESERVE5), + IMX_PINCTRL_PIN(MX8MM_PAD_RESERVE6), + IMX_PINCTRL_PIN(MX8MM_PAD_RESERVE7), + IMX_PINCTRL_PIN(MX8MM_PAD_RESERVE8), + IMX_PINCTRL_PIN(MX8MM_PAD_RESERVE9), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_GPIO1_IO00), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_GPIO1_IO01), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_GPIO1_IO02), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_GPIO1_IO03), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_GPIO1_IO04), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_GPIO1_IO05), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_GPIO1_IO06), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_GPIO1_IO07), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_GPIO1_IO08), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_GPIO1_IO09), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_GPIO1_IO10), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_GPIO1_IO11), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_GPIO1_IO12), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_GPIO1_IO13), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_GPIO1_IO14), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_GPIO1_IO15), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_ENET_MDC), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_ENET_MDIO), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_ENET_TD3), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_ENET_TD2), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_ENET_TD1), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_ENET_TD0), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_ENET_TX_CTL), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_ENET_TXC), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_ENET_RX_CTL), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_ENET_RXC), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_ENET_RD0), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_ENET_RD1), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_ENET_RD2), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_ENET_RD3), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD1_CLK), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD1_CMD), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD1_DATA0), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD1_DATA1), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD1_DATA2), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD1_DATA3), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD1_DATA4), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD1_DATA5), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD1_DATA6), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD1_DATA7), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD1_RESET_B), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD1_STROBE), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD2_CD_B), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD2_CLK), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD2_CMD), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD2_DATA0), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD2_DATA1), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD2_DATA2), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD2_DATA3), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD2_RESET_B), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SD2_WP), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_NAND_ALE), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_NAND_CE0), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_NAND_CE1), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_NAND_CE2), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_NAND_CE3), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_NAND_CLE), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_NAND_DATA00), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_NAND_DATA01), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_NAND_DATA02), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_NAND_DATA03), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_NAND_DATA04), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_NAND_DATA05), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_NAND_DATA06), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_NAND_DATA07), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_NAND_DQS), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_NAND_RE_B), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_NAND_READY_B), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_NAND_WE_B), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_NAND_WP_B), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI5_RXFS), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI5_RXC), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI5_RXD0), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI5_RXD1), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI5_RXD2), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI5_RXD3), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI5_MCLK), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_RXFS), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_RXC), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_RXD0), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_RXD1), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_RXD2), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_RXD3), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_RXD4), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_RXD5), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_RXD6), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_RXD7), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_TXFS), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_TXC), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_TXD0), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_TXD1), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_TXD2), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_TXD3), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_TXD4), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_TXD5), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_TXD6), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_TXD7), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI1_MCLK), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI2_RXFS), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI2_RXC), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI2_RXD0), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI2_TXFS), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI2_TXC), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI2_TXD0), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI2_MCLK), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI3_RXFS), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI3_RXC), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI3_RXD), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI3_TXFS), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI3_TXC), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI3_TXD), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SAI3_MCLK), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SPDIF_TX), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SPDIF_RX), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_SPDIF_EXT_CLK), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_ECSPI1_SCLK), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_ECSPI1_MOSI), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_ECSPI1_MISO), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_ECSPI1_SS0), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_ECSPI2_SCLK), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_ECSPI2_MOSI), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_ECSPI2_MISO), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_ECSPI2_SS0), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_I2C1_SCL), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_I2C1_SDA), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_I2C2_SCL), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_I2C2_SDA), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_I2C3_SCL), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_I2C3_SDA), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_I2C4_SCL), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_I2C4_SDA), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_UART1_RXD), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_UART1_TXD), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_UART2_RXD), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_UART2_TXD), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_UART3_RXD), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_UART3_TXD), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_UART4_RXD), + IMX_PINCTRL_PIN(MX8MM_IOMUXC_UART4_TXD), +}; + +static const struct imx_pinctrl_soc_info imx8mm_pinctrl_info = { + .pins = imx8mm_pinctrl_pads, + .npins = ARRAY_SIZE(imx8mm_pinctrl_pads), + .gpr_compatible = "fsl,imx8mm-iomuxc-gpr", +}; + +static const struct of_device_id imx8mm_pinctrl_of_match[] = { + { .compatible = "fsl,imx8mm-iomuxc", .data = &imx8mm_pinctrl_info, }, + { /* sentinel */ } +}; + +static int imx8mm_pinctrl_probe(struct platform_device *pdev) +{ + return imx_pinctrl_probe(pdev, &imx8mm_pinctrl_info); +} + +static struct platform_driver imx8mm_pinctrl_driver = { + .driver = { + .name = "imx8mm-pinctrl", + .of_match_table = of_match_ptr(imx8mm_pinctrl_of_match), + .suppress_bind_attrs = true, + }, + .probe = imx8mm_pinctrl_probe, +}; + +static int __init imx8mm_pinctrl_init(void) +{ + return platform_driver_register(&imx8mm_pinctrl_driver); +} +arch_initcall(imx8mm_pinctrl_init); diff --git a/drivers/pinctrl/freescale/pinctrl-imx8qm.c b/drivers/pinctrl/freescale/pinctrl-imx8qm.c new file mode 100644 index 000000000000..0b6029b29731 --- /dev/null +++ b/drivers/pinctrl/freescale/pinctrl-imx8qm.c @@ -0,0 +1,326 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017~2018 NXP + * Dong Aisheng + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pinctrl-imx.h" + +static const struct pinctrl_pin_desc imx8qm_pinctrl_pads[] = { + IMX_PINCTRL_PIN(IMX8QM_SIM0_CLK), + IMX_PINCTRL_PIN(IMX8QM_SIM0_RST), + IMX_PINCTRL_PIN(IMX8QM_SIM0_IO), + IMX_PINCTRL_PIN(IMX8QM_SIM0_PD), + IMX_PINCTRL_PIN(IMX8QM_SIM0_POWER_EN), + IMX_PINCTRL_PIN(IMX8QM_SIM0_GPIO0_00), + IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_1V8_3V3_SIM), + IMX_PINCTRL_PIN(IMX8QM_M40_I2C0_SCL), + IMX_PINCTRL_PIN(IMX8QM_M40_I2C0_SDA), + IMX_PINCTRL_PIN(IMX8QM_M40_GPIO0_00), + IMX_PINCTRL_PIN(IMX8QM_M40_GPIO0_01), + IMX_PINCTRL_PIN(IMX8QM_M41_I2C0_SCL), + IMX_PINCTRL_PIN(IMX8QM_M41_I2C0_SDA), + IMX_PINCTRL_PIN(IMX8QM_M41_GPIO0_00), + IMX_PINCTRL_PIN(IMX8QM_M41_GPIO0_01), + IMX_PINCTRL_PIN(IMX8QM_GPT0_CLK), + IMX_PINCTRL_PIN(IMX8QM_GPT0_CAPTURE), + IMX_PINCTRL_PIN(IMX8QM_GPT0_COMPARE), + IMX_PINCTRL_PIN(IMX8QM_GPT1_CLK), + IMX_PINCTRL_PIN(IMX8QM_GPT1_CAPTURE), + IMX_PINCTRL_PIN(IMX8QM_GPT1_COMPARE), + IMX_PINCTRL_PIN(IMX8QM_UART0_RX), + IMX_PINCTRL_PIN(IMX8QM_UART0_TX), + IMX_PINCTRL_PIN(IMX8QM_UART0_RTS_B), + IMX_PINCTRL_PIN(IMX8QM_UART0_CTS_B), + IMX_PINCTRL_PIN(IMX8QM_UART1_TX), + IMX_PINCTRL_PIN(IMX8QM_UART1_RX), + IMX_PINCTRL_PIN(IMX8QM_UART1_RTS_B), + IMX_PINCTRL_PIN(IMX8QM_UART1_CTS_B), + IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_1V8_3V3_GPIOLH), + IMX_PINCTRL_PIN(IMX8QM_SCU_PMIC_MEMC_ON), + IMX_PINCTRL_PIN(IMX8QM_SCU_WDOG_OUT), + IMX_PINCTRL_PIN(IMX8QM_PMIC_I2C_SDA), + IMX_PINCTRL_PIN(IMX8QM_PMIC_I2C_SCL), + IMX_PINCTRL_PIN(IMX8QM_PMIC_EARLY_WARNING), + IMX_PINCTRL_PIN(IMX8QM_PMIC_INT_B), + IMX_PINCTRL_PIN(IMX8QM_SCU_GPIO0_00), + IMX_PINCTRL_PIN(IMX8QM_SCU_GPIO0_01), + IMX_PINCTRL_PIN(IMX8QM_SCU_GPIO0_02), + IMX_PINCTRL_PIN(IMX8QM_SCU_GPIO0_03), + IMX_PINCTRL_PIN(IMX8QM_SCU_GPIO0_04), + IMX_PINCTRL_PIN(IMX8QM_SCU_GPIO0_05), + IMX_PINCTRL_PIN(IMX8QM_SCU_GPIO0_06), + IMX_PINCTRL_PIN(IMX8QM_SCU_GPIO0_07), + IMX_PINCTRL_PIN(IMX8QM_SCU_BOOT_MODE0), + IMX_PINCTRL_PIN(IMX8QM_SCU_BOOT_MODE1), + IMX_PINCTRL_PIN(IMX8QM_SCU_BOOT_MODE2), + IMX_PINCTRL_PIN(IMX8QM_SCU_BOOT_MODE3), + IMX_PINCTRL_PIN(IMX8QM_SCU_BOOT_MODE4), + IMX_PINCTRL_PIN(IMX8QM_SCU_BOOT_MODE5), + IMX_PINCTRL_PIN(IMX8QM_LVDS0_GPIO00), + IMX_PINCTRL_PIN(IMX8QM_LVDS0_GPIO01), + IMX_PINCTRL_PIN(IMX8QM_LVDS0_I2C0_SCL), + IMX_PINCTRL_PIN(IMX8QM_LVDS0_I2C0_SDA), + IMX_PINCTRL_PIN(IMX8QM_LVDS0_I2C1_SCL), + IMX_PINCTRL_PIN(IMX8QM_LVDS0_I2C1_SDA), + IMX_PINCTRL_PIN(IMX8QM_LVDS1_GPIO00), + IMX_PINCTRL_PIN(IMX8QM_LVDS1_GPIO01), + IMX_PINCTRL_PIN(IMX8QM_LVDS1_I2C0_SCL), + IMX_PINCTRL_PIN(IMX8QM_LVDS1_I2C0_SDA), + IMX_PINCTRL_PIN(IMX8QM_LVDS1_I2C1_SCL), + IMX_PINCTRL_PIN(IMX8QM_LVDS1_I2C1_SDA), + IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_1V8_3V3_LVDSGPIO), + IMX_PINCTRL_PIN(IMX8QM_MIPI_DSI0_I2C0_SCL), + IMX_PINCTRL_PIN(IMX8QM_MIPI_DSI0_I2C0_SDA), + IMX_PINCTRL_PIN(IMX8QM_MIPI_DSI0_GPIO0_00), + IMX_PINCTRL_PIN(IMX8QM_MIPI_DSI0_GPIO0_01), + IMX_PINCTRL_PIN(IMX8QM_MIPI_DSI1_I2C0_SCL), + IMX_PINCTRL_PIN(IMX8QM_MIPI_DSI1_I2C0_SDA), + IMX_PINCTRL_PIN(IMX8QM_MIPI_DSI1_GPIO0_00), + IMX_PINCTRL_PIN(IMX8QM_MIPI_DSI1_GPIO0_01), + IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_1V8_3V3_MIPIDSIGPIO), + IMX_PINCTRL_PIN(IMX8QM_MIPI_CSI0_MCLK_OUT), + IMX_PINCTRL_PIN(IMX8QM_MIPI_CSI0_I2C0_SCL), + IMX_PINCTRL_PIN(IMX8QM_MIPI_CSI0_I2C0_SDA), + IMX_PINCTRL_PIN(IMX8QM_MIPI_CSI0_GPIO0_00), + IMX_PINCTRL_PIN(IMX8QM_MIPI_CSI0_GPIO0_01), + IMX_PINCTRL_PIN(IMX8QM_MIPI_CSI1_MCLK_OUT), + IMX_PINCTRL_PIN(IMX8QM_MIPI_CSI1_GPIO0_00), + IMX_PINCTRL_PIN(IMX8QM_MIPI_CSI1_GPIO0_01), + IMX_PINCTRL_PIN(IMX8QM_MIPI_CSI1_I2C0_SCL), + IMX_PINCTRL_PIN(IMX8QM_MIPI_CSI1_I2C0_SDA), + IMX_PINCTRL_PIN(IMX8QM_HDMI_TX0_TS_SCL), + IMX_PINCTRL_PIN(IMX8QM_HDMI_TX0_TS_SDA), + IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_3V3_HDMIGPIO), + IMX_PINCTRL_PIN(IMX8QM_ESAI1_FSR), + IMX_PINCTRL_PIN(IMX8QM_ESAI1_FST), + IMX_PINCTRL_PIN(IMX8QM_ESAI1_SCKR), + IMX_PINCTRL_PIN(IMX8QM_ESAI1_SCKT), + IMX_PINCTRL_PIN(IMX8QM_ESAI1_TX0), + IMX_PINCTRL_PIN(IMX8QM_ESAI1_TX1), + IMX_PINCTRL_PIN(IMX8QM_ESAI1_TX2_RX3), + IMX_PINCTRL_PIN(IMX8QM_ESAI1_TX3_RX2), + IMX_PINCTRL_PIN(IMX8QM_ESAI1_TX4_RX1), + IMX_PINCTRL_PIN(IMX8QM_ESAI1_TX5_RX0), + IMX_PINCTRL_PIN(IMX8QM_SPDIF0_RX), + IMX_PINCTRL_PIN(IMX8QM_SPDIF0_TX), + IMX_PINCTRL_PIN(IMX8QM_SPDIF0_EXT_CLK), + IMX_PINCTRL_PIN(IMX8QM_SPI3_SCK), + IMX_PINCTRL_PIN(IMX8QM_SPI3_SDO), + IMX_PINCTRL_PIN(IMX8QM_SPI3_SDI), + IMX_PINCTRL_PIN(IMX8QM_SPI3_CS0), + IMX_PINCTRL_PIN(IMX8QM_SPI3_CS1), + IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_1V8_3V3_GPIORHB), + IMX_PINCTRL_PIN(IMX8QM_ESAI0_FSR), + IMX_PINCTRL_PIN(IMX8QM_ESAI0_FST), + IMX_PINCTRL_PIN(IMX8QM_ESAI0_SCKR), + IMX_PINCTRL_PIN(IMX8QM_ESAI0_SCKT), + IMX_PINCTRL_PIN(IMX8QM_ESAI0_TX0), + IMX_PINCTRL_PIN(IMX8QM_ESAI0_TX1), + IMX_PINCTRL_PIN(IMX8QM_ESAI0_TX2_RX3), + IMX_PINCTRL_PIN(IMX8QM_ESAI0_TX3_RX2), + IMX_PINCTRL_PIN(IMX8QM_ESAI0_TX4_RX1), + IMX_PINCTRL_PIN(IMX8QM_ESAI0_TX5_RX0), + IMX_PINCTRL_PIN(IMX8QM_MCLK_IN0), + IMX_PINCTRL_PIN(IMX8QM_MCLK_OUT0), + IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_1V8_3V3_GPIORHC), + IMX_PINCTRL_PIN(IMX8QM_SPI0_SCK), + IMX_PINCTRL_PIN(IMX8QM_SPI0_SDO), + IMX_PINCTRL_PIN(IMX8QM_SPI0_SDI), + IMX_PINCTRL_PIN(IMX8QM_SPI0_CS0), + IMX_PINCTRL_PIN(IMX8QM_SPI0_CS1), + IMX_PINCTRL_PIN(IMX8QM_SPI2_SCK), + IMX_PINCTRL_PIN(IMX8QM_SPI2_SDO), + IMX_PINCTRL_PIN(IMX8QM_SPI2_SDI), + IMX_PINCTRL_PIN(IMX8QM_SPI2_CS0), + IMX_PINCTRL_PIN(IMX8QM_SPI2_CS1), + IMX_PINCTRL_PIN(IMX8QM_SAI1_RXC), + IMX_PINCTRL_PIN(IMX8QM_SAI1_RXD), + IMX_PINCTRL_PIN(IMX8QM_SAI1_RXFS), + IMX_PINCTRL_PIN(IMX8QM_SAI1_TXC), + IMX_PINCTRL_PIN(IMX8QM_SAI1_TXD), + IMX_PINCTRL_PIN(IMX8QM_SAI1_TXFS), + IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_1V8_3V3_GPIORHT), + IMX_PINCTRL_PIN(IMX8QM_ADC_IN7), + IMX_PINCTRL_PIN(IMX8QM_ADC_IN6), + IMX_PINCTRL_PIN(IMX8QM_ADC_IN5), + IMX_PINCTRL_PIN(IMX8QM_ADC_IN4), + IMX_PINCTRL_PIN(IMX8QM_ADC_IN3), + IMX_PINCTRL_PIN(IMX8QM_ADC_IN2), + IMX_PINCTRL_PIN(IMX8QM_ADC_IN1), + IMX_PINCTRL_PIN(IMX8QM_ADC_IN0), + IMX_PINCTRL_PIN(IMX8QM_MLB_SIG), + IMX_PINCTRL_PIN(IMX8QM_MLB_CLK), + IMX_PINCTRL_PIN(IMX8QM_MLB_DATA), + IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_1V8_3V3_GPIOLHT), + IMX_PINCTRL_PIN(IMX8QM_FLEXCAN0_RX), + IMX_PINCTRL_PIN(IMX8QM_FLEXCAN0_TX), + IMX_PINCTRL_PIN(IMX8QM_FLEXCAN1_RX), + IMX_PINCTRL_PIN(IMX8QM_FLEXCAN1_TX), + IMX_PINCTRL_PIN(IMX8QM_FLEXCAN2_RX), + IMX_PINCTRL_PIN(IMX8QM_FLEXCAN2_TX), + IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_1V8_3V3_GPIOTHR), + IMX_PINCTRL_PIN(IMX8QM_USB_SS3_TC0), + IMX_PINCTRL_PIN(IMX8QM_USB_SS3_TC1), + IMX_PINCTRL_PIN(IMX8QM_USB_SS3_TC2), + IMX_PINCTRL_PIN(IMX8QM_USB_SS3_TC3), + IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_3V3_USB3IO), + IMX_PINCTRL_PIN(IMX8QM_USDHC1_RESET_B), + IMX_PINCTRL_PIN(IMX8QM_USDHC1_VSELECT), + IMX_PINCTRL_PIN(IMX8QM_USDHC2_RESET_B), + IMX_PINCTRL_PIN(IMX8QM_USDHC2_VSELECT), + IMX_PINCTRL_PIN(IMX8QM_USDHC2_WP), + IMX_PINCTRL_PIN(IMX8QM_USDHC2_CD_B), + IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_1V8_3V3_VSELSEP), + IMX_PINCTRL_PIN(IMX8QM_ENET0_MDIO), + IMX_PINCTRL_PIN(IMX8QM_ENET0_MDC), + IMX_PINCTRL_PIN(IMX8QM_ENET0_REFCLK_125M_25M), + IMX_PINCTRL_PIN(IMX8QM_ENET1_REFCLK_125M_25M), + IMX_PINCTRL_PIN(IMX8QM_ENET1_MDIO), + IMX_PINCTRL_PIN(IMX8QM_ENET1_MDC), + IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_1V8_3V3_GPIOCT), + IMX_PINCTRL_PIN(IMX8QM_QSPI1A_SS0_B), + IMX_PINCTRL_PIN(IMX8QM_QSPI1A_SS1_B), + IMX_PINCTRL_PIN(IMX8QM_QSPI1A_SCLK), + IMX_PINCTRL_PIN(IMX8QM_QSPI1A_DQS), + IMX_PINCTRL_PIN(IMX8QM_QSPI1A_DATA3), + IMX_PINCTRL_PIN(IMX8QM_QSPI1A_DATA2), + IMX_PINCTRL_PIN(IMX8QM_QSPI1A_DATA1), + IMX_PINCTRL_PIN(IMX8QM_QSPI1A_DATA0), + IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_1V8_3V3_QSPI1), + IMX_PINCTRL_PIN(IMX8QM_QSPI0A_DATA0), + IMX_PINCTRL_PIN(IMX8QM_QSPI0A_DATA1), + IMX_PINCTRL_PIN(IMX8QM_QSPI0A_DATA2), + IMX_PINCTRL_PIN(IMX8QM_QSPI0A_DATA3), + IMX_PINCTRL_PIN(IMX8QM_QSPI0A_DQS), + IMX_PINCTRL_PIN(IMX8QM_QSPI0A_SS0_B), + IMX_PINCTRL_PIN(IMX8QM_QSPI0A_SS1_B), + IMX_PINCTRL_PIN(IMX8QM_QSPI0A_SCLK), + IMX_PINCTRL_PIN(IMX8QM_QSPI0B_SCLK), + IMX_PINCTRL_PIN(IMX8QM_QSPI0B_DATA0), + IMX_PINCTRL_PIN(IMX8QM_QSPI0B_DATA1), + IMX_PINCTRL_PIN(IMX8QM_QSPI0B_DATA2), + IMX_PINCTRL_PIN(IMX8QM_QSPI0B_DATA3), + IMX_PINCTRL_PIN(IMX8QM_QSPI0B_DQS), + IMX_PINCTRL_PIN(IMX8QM_QSPI0B_SS0_B), + IMX_PINCTRL_PIN(IMX8QM_QSPI0B_SS1_B), + IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_1V8_3V3_QSPI0), + IMX_PINCTRL_PIN(IMX8QM_PCIE_CTRL0_CLKREQ_B), + IMX_PINCTRL_PIN(IMX8QM_PCIE_CTRL0_WAKE_B), + IMX_PINCTRL_PIN(IMX8QM_PCIE_CTRL0_PERST_B), + IMX_PINCTRL_PIN(IMX8QM_PCIE_CTRL1_CLKREQ_B), + IMX_PINCTRL_PIN(IMX8QM_PCIE_CTRL1_WAKE_B), + IMX_PINCTRL_PIN(IMX8QM_PCIE_CTRL1_PERST_B), + IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_1V8_3V3_PCIESEP), + IMX_PINCTRL_PIN(IMX8QM_USB_HSIC0_DATA), + IMX_PINCTRL_PIN(IMX8QM_USB_HSIC0_STROBE), + IMX_PINCTRL_PIN(IMX8QM_CALIBRATION_0_HSIC), + IMX_PINCTRL_PIN(IMX8QM_CALIBRATION_1_HSIC), + IMX_PINCTRL_PIN(IMX8QM_EMMC0_CLK), + IMX_PINCTRL_PIN(IMX8QM_EMMC0_CMD), + IMX_PINCTRL_PIN(IMX8QM_EMMC0_DATA0), + IMX_PINCTRL_PIN(IMX8QM_EMMC0_DATA1), + IMX_PINCTRL_PIN(IMX8QM_EMMC0_DATA2), + IMX_PINCTRL_PIN(IMX8QM_EMMC0_DATA3), + IMX_PINCTRL_PIN(IMX8QM_EMMC0_DATA4), + IMX_PINCTRL_PIN(IMX8QM_EMMC0_DATA5), + IMX_PINCTRL_PIN(IMX8QM_EMMC0_DATA6), + IMX_PINCTRL_PIN(IMX8QM_EMMC0_DATA7), + IMX_PINCTRL_PIN(IMX8QM_EMMC0_STROBE), + IMX_PINCTRL_PIN(IMX8QM_EMMC0_RESET_B), + IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_1V8_3V3_SD1FIX), + IMX_PINCTRL_PIN(IMX8QM_USDHC1_CLK), + IMX_PINCTRL_PIN(IMX8QM_USDHC1_CMD), + IMX_PINCTRL_PIN(IMX8QM_USDHC1_DATA0), + IMX_PINCTRL_PIN(IMX8QM_USDHC1_DATA1), + IMX_PINCTRL_PIN(IMX8QM_CTL_NAND_RE_P_N), + IMX_PINCTRL_PIN(IMX8QM_USDHC1_DATA2), + IMX_PINCTRL_PIN(IMX8QM_USDHC1_DATA3), + IMX_PINCTRL_PIN(IMX8QM_CTL_NAND_DQS_P_N), + IMX_PINCTRL_PIN(IMX8QM_USDHC1_DATA4), + IMX_PINCTRL_PIN(IMX8QM_USDHC1_DATA5), + IMX_PINCTRL_PIN(IMX8QM_USDHC1_DATA6), + IMX_PINCTRL_PIN(IMX8QM_USDHC1_DATA7), + IMX_PINCTRL_PIN(IMX8QM_USDHC1_STROBE), + IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_1V8_3V3_VSEL2), + IMX_PINCTRL_PIN(IMX8QM_USDHC2_CLK), + IMX_PINCTRL_PIN(IMX8QM_USDHC2_CMD), + IMX_PINCTRL_PIN(IMX8QM_USDHC2_DATA0), + IMX_PINCTRL_PIN(IMX8QM_USDHC2_DATA1), + IMX_PINCTRL_PIN(IMX8QM_USDHC2_DATA2), + IMX_PINCTRL_PIN(IMX8QM_USDHC2_DATA3), + IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_1V8_3V3_VSEL3), + IMX_PINCTRL_PIN(IMX8QM_ENET0_RGMII_TXC), + IMX_PINCTRL_PIN(IMX8QM_ENET0_RGMII_TX_CTL), + IMX_PINCTRL_PIN(IMX8QM_ENET0_RGMII_TXD0), + IMX_PINCTRL_PIN(IMX8QM_ENET0_RGMII_TXD1), + IMX_PINCTRL_PIN(IMX8QM_ENET0_RGMII_TXD2), + IMX_PINCTRL_PIN(IMX8QM_ENET0_RGMII_TXD3), + IMX_PINCTRL_PIN(IMX8QM_ENET0_RGMII_RXC), + IMX_PINCTRL_PIN(IMX8QM_ENET0_RGMII_RX_CTL), + IMX_PINCTRL_PIN(IMX8QM_ENET0_RGMII_RXD0), + IMX_PINCTRL_PIN(IMX8QM_ENET0_RGMII_RXD1), + IMX_PINCTRL_PIN(IMX8QM_ENET0_RGMII_RXD2), + IMX_PINCTRL_PIN(IMX8QM_ENET0_RGMII_RXD3), + IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB), + IMX_PINCTRL_PIN(IMX8QM_ENET1_RGMII_TXC), + IMX_PINCTRL_PIN(IMX8QM_ENET1_RGMII_TX_CTL), + IMX_PINCTRL_PIN(IMX8QM_ENET1_RGMII_TXD0), + IMX_PINCTRL_PIN(IMX8QM_ENET1_RGMII_TXD1), + IMX_PINCTRL_PIN(IMX8QM_ENET1_RGMII_TXD2), + IMX_PINCTRL_PIN(IMX8QM_ENET1_RGMII_TXD3), + IMX_PINCTRL_PIN(IMX8QM_ENET1_RGMII_RXC), + IMX_PINCTRL_PIN(IMX8QM_ENET1_RGMII_RX_CTL), + IMX_PINCTRL_PIN(IMX8QM_ENET1_RGMII_RXD0), + IMX_PINCTRL_PIN(IMX8QM_ENET1_RGMII_RXD1), + IMX_PINCTRL_PIN(IMX8QM_ENET1_RGMII_RXD2), + IMX_PINCTRL_PIN(IMX8QM_ENET1_RGMII_RXD3), + IMX_PINCTRL_PIN(IMX8QM_COMP_CTL_GPIO_1V8_3V3_ENET_ENETA), +}; + +static const struct imx_pinctrl_soc_info imx8qm_pinctrl_info = { + .pins = imx8qm_pinctrl_pads, + .npins = ARRAY_SIZE(imx8qm_pinctrl_pads), + .flags = IMX_USE_SCU, +}; + +static const struct of_device_id imx8qm_pinctrl_of_match[] = { + { .compatible = "fsl,imx8qm-iomuxc", }, + { /* sentinel */ } +}; + +static int imx8qm_pinctrl_probe(struct platform_device *pdev) +{ + int ret; + + ret = imx_pinctrl_sc_ipc_init(pdev); + if (ret) + return ret; + + return imx_pinctrl_probe(pdev, &imx8qm_pinctrl_info); +} + +static struct platform_driver imx8qm_pinctrl_driver = { + .driver = { + .name = "imx8qm-pinctrl", + .of_match_table = of_match_ptr(imx8qm_pinctrl_of_match), + .suppress_bind_attrs = true, + }, + .probe = imx8qm_pinctrl_probe, +}; + +static int __init imx8qm_pinctrl_init(void) +{ + return platform_driver_register(&imx8qm_pinctrl_driver); +} +arch_initcall(imx8qm_pinctrl_init); diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 05044e323ea5..03ec7a5d9d0b 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -1513,7 +1513,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"), - DMI_MATCH(DMI_BOARD_VERSION, "1.0"), + DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), }, }, { @@ -1521,7 +1521,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "HP"), DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"), - DMI_MATCH(DMI_BOARD_VERSION, "1.0"), + DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), }, }, { @@ -1529,7 +1529,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"), - DMI_MATCH(DMI_BOARD_VERSION, "1.0"), + DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), }, }, { @@ -1537,7 +1537,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), DMI_MATCH(DMI_PRODUCT_NAME, "Celes"), - DMI_MATCH(DMI_BOARD_VERSION, "1.0"), + DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), }, }, {} diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig index 1817786ab6aa..a005cbccb4f7 100644 --- a/drivers/pinctrl/mediatek/Kconfig +++ b/drivers/pinctrl/mediatek/Kconfig @@ -45,12 +45,14 @@ config PINCTRL_MT2701 config PINCTRL_MT7623 bool "Mediatek MT7623 pin control with generic binding" depends on MACH_MT7623 || COMPILE_TEST + depends on OF default MACH_MT7623 select PINCTRL_MTK_MOORE config PINCTRL_MT7629 bool "Mediatek MT7629 pin control" depends on MACH_MT7629 || COMPILE_TEST + depends on OF default MACH_MT7629 select PINCTRL_MTK_MOORE @@ -92,6 +94,7 @@ config PINCTRL_MT6797 config PINCTRL_MT7622 bool "MediaTek MT7622 pin control" + depends on OF depends on ARM64 || COMPILE_TEST default ARM64 && ARCH_MEDIATEK select PINCTRL_MTK_MOORE diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c index 4a9e0d4c2bbc..b1c368455d30 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c @@ -290,7 +290,13 @@ static int mtk_xt_set_gpio_as_eint(void *data, unsigned long eint_n) return err; err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SMT, MTK_ENABLE); - if (err) + /* SMT is supposed to be supported by every real GPIO and doesn't + * support virtual GPIOs, so the extra condition err != -ENOTSUPP + * is just for adding EINT support to these virtual GPIOs. It should + * add an extra flag in the pin descriptor when more pins with + * distinctive characteristic come out. + */ + if (err && err != -ENOTSUPP) return err; return 0; diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c index ea87d739f534..96a4a72708e4 100644 --- a/drivers/pinctrl/meson/pinctrl-meson.c +++ b/drivers/pinctrl/meson/pinctrl-meson.c @@ -31,6 +31,9 @@ * In some cases the register ranges for pull enable and pull * direction are the same and thus there are only 3 register ranges. * + * Since Meson G12A SoC, the ao register ranges for gpio, pull enable + * and pull direction are the same, so there are only 2 register ranges. + * * For the pull and GPIO configuration every bank uses a contiguous * set of bits in the register sets described above; the same register * can be shared by more banks with different offsets. @@ -488,21 +491,26 @@ static int meson_pinctrl_parse_dt(struct meson_pinctrl *pc, return PTR_ERR(pc->reg_mux); } - pc->reg_pull = meson_map_resource(pc, gpio_np, "pull"); - if (IS_ERR(pc->reg_pull)) { - dev_err(pc->dev, "pull registers not found\n"); - return PTR_ERR(pc->reg_pull); + pc->reg_gpio = meson_map_resource(pc, gpio_np, "gpio"); + if (IS_ERR(pc->reg_gpio)) { + dev_err(pc->dev, "gpio registers not found\n"); + return PTR_ERR(pc->reg_gpio); } + pc->reg_pull = meson_map_resource(pc, gpio_np, "pull"); + /* Use gpio region if pull one is not present */ + if (IS_ERR(pc->reg_pull)) + pc->reg_pull = pc->reg_gpio; + pc->reg_pullen = meson_map_resource(pc, gpio_np, "pull-enable"); /* Use pull region if pull-enable one is not present */ if (IS_ERR(pc->reg_pullen)) pc->reg_pullen = pc->reg_pull; - pc->reg_gpio = meson_map_resource(pc, gpio_np, "gpio"); - if (IS_ERR(pc->reg_gpio)) { - dev_err(pc->dev, "gpio registers not found\n"); - return PTR_ERR(pc->reg_gpio); + pc->reg_ds = meson_map_resource(pc, gpio_np, "ds"); + if (IS_ERR(pc->reg_ds)) { + dev_dbg(pc->dev, "ds registers not found - skipping\n"); + pc->reg_ds = NULL; } return 0; diff --git a/drivers/pinctrl/meson/pinctrl-meson.h b/drivers/pinctrl/meson/pinctrl-meson.h index eff61ea1c67e..5eaab925f427 100644 --- a/drivers/pinctrl/meson/pinctrl-meson.h +++ b/drivers/pinctrl/meson/pinctrl-meson.h @@ -120,6 +120,7 @@ struct meson_pinctrl { struct regmap *reg_pullen; struct regmap *reg_pull; struct regmap *reg_gpio; + struct regmap *reg_ds; struct gpio_chip chip; struct device_node *of_node; }; diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c index c69ca95b1ad5..7f76000cc12e 100644 --- a/drivers/pinctrl/meson/pinctrl-meson8b.c +++ b/drivers/pinctrl/meson/pinctrl-meson8b.c @@ -346,6 +346,8 @@ static const unsigned int eth_rx_dv_pins[] = { DIF_1_P }; static const unsigned int eth_rx_clk_pins[] = { DIF_1_N }; static const unsigned int eth_txd0_1_pins[] = { DIF_2_P }; static const unsigned int eth_txd1_1_pins[] = { DIF_2_N }; +static const unsigned int eth_rxd3_pins[] = { DIF_2_P }; +static const unsigned int eth_rxd2_pins[] = { DIF_2_N }; static const unsigned int eth_tx_en_pins[] = { DIF_3_P }; static const unsigned int eth_ref_clk_pins[] = { DIF_3_N }; static const unsigned int eth_mdc_pins[] = { DIF_4_P }; @@ -599,6 +601,8 @@ static struct meson_pmx_group meson8b_cbus_groups[] = { GROUP(eth_ref_clk, 6, 8), GROUP(eth_mdc, 6, 9), GROUP(eth_mdio_en, 6, 10), + GROUP(eth_rxd3, 7, 22), + GROUP(eth_rxd2, 7, 23), }; static struct meson_pmx_group meson8b_aobus_groups[] = { @@ -693,7 +697,7 @@ static const char * const sd_a_groups[] = { static const char * const sdxc_a_groups[] = { "sdxc_d0_0_a", "sdxc_d13_0_a", "sdxc_d47_a", "sdxc_clk_a", - "sdxc_cmd_a", "sdxc_d0_1_a", "sdxc_d0_13_1_a" + "sdxc_cmd_a", "sdxc_d0_1_a", "sdxc_d13_1_a" }; static const char * const pcm_a_groups[] = { @@ -748,7 +752,7 @@ static const char * const ethernet_groups[] = { "eth_tx_clk", "eth_tx_en", "eth_txd1_0", "eth_txd1_1", "eth_txd0_0", "eth_txd0_1", "eth_rx_clk", "eth_rx_dv", "eth_rxd1", "eth_rxd0", "eth_mdio_en", "eth_mdc", "eth_ref_clk", - "eth_txd2", "eth_txd3" + "eth_txd2", "eth_txd3", "eth_rxd3", "eth_rxd2" }; static const char * const i2c_a_groups[] = { diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c index aa48b3f23c7f..6462d3ca7ceb 100644 --- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c +++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c @@ -170,8 +170,8 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = { PIN_GRP_GPIO("pwm1", 12, 1, BIT(4), "pwm"), PIN_GRP_GPIO("pwm2", 13, 1, BIT(5), "pwm"), PIN_GRP_GPIO("pwm3", 14, 1, BIT(6), "pwm"), - PIN_GRP_GPIO("pmic1", 17, 1, BIT(7), "pmic"), - PIN_GRP_GPIO("pmic0", 16, 1, BIT(8), "pmic"), + PIN_GRP_GPIO("pmic1", 7, 1, BIT(7), "pmic"), + PIN_GRP_GPIO("pmic0", 6, 1, BIT(8), "pmic"), PIN_GRP_GPIO("i2c2", 2, 2, BIT(9), "i2c"), PIN_GRP_GPIO("i2c1", 0, 2, BIT(10), "i2c"), PIN_GRP_GPIO("spi_cs1", 17, 1, BIT(12), "spi"), @@ -195,8 +195,11 @@ static struct armada_37xx_pin_group armada_37xx_sb_groups[] = { PIN_GRP_GPIO("usb2_drvvbus1", 1, 1, BIT(1), "drvbus"), PIN_GRP_GPIO("sdio_sb", 24, 6, BIT(2), "sdio"), PIN_GRP_GPIO("rgmii", 6, 12, BIT(3), "mii"), - PIN_GRP_GPIO("pcie1", 3, 2, BIT(4), "pcie"), - PIN_GRP_GPIO("ptp", 20, 3, BIT(5), "ptp"), + PIN_GRP_GPIO("smi", 18, 2, BIT(4), "smi"), + PIN_GRP_GPIO("pcie1", 3, 1, BIT(5), "pcie"), + PIN_GRP_GPIO("pcie1_clkreq", 4, 1, BIT(9), "pcie"), + PIN_GRP_GPIO("pcie1_wakeup", 5, 1, BIT(10), "pcie"), + PIN_GRP_GPIO("ptp", 20, 3, BIT(11) | BIT(12) | BIT(13), "ptp"), PIN_GRP("ptp_clk", 21, 1, BIT(6), "ptp", "mii"), PIN_GRP("ptp_trig", 22, 1, BIT(7), "ptp", "mii"), PIN_GRP_GPIO_3("mii_col", 23, 1, BIT(8) | BIT(14), 0, BIT(8), BIT(14), @@ -1104,8 +1107,8 @@ static int armada_3700_pinctrl_resume(struct device *dev) * to other IO drivers. */ static const struct dev_pm_ops armada_3700_pinctrl_pm_ops = { - .suspend_late = armada_3700_pinctrl_suspend, - .resume_early = armada_3700_pinctrl_resume, + .suspend_noirq = armada_3700_pinctrl_suspend, + .resume_noirq = armada_3700_pinctrl_resume, }; #define PINCTRL_ARMADA_37XX_DEV_PM_OPS (&armada_3700_pinctrl_pm_ops) diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c index 4cc2c47f8778..ec02739bd21b 100644 --- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c +++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c @@ -1056,17 +1056,22 @@ static struct nmk_gpio_chip *nmk_gpio_populate_chip(struct device_node *np, } if (of_property_read_u32(np, "gpio-bank", &id)) { dev_err(&pdev->dev, "populate: gpio-bank property not found\n"); + platform_device_put(gpio_pdev); return ERR_PTR(-EINVAL); } /* Already populated? */ nmk_chip = nmk_gpio_chips[id]; - if (nmk_chip) + if (nmk_chip) { + platform_device_put(gpio_pdev); return nmk_chip; + } nmk_chip = devm_kzalloc(&pdev->dev, sizeof(*nmk_chip), GFP_KERNEL); - if (!nmk_chip) + if (!nmk_chip) { + platform_device_put(gpio_pdev); return ERR_PTR(-ENOMEM); + } nmk_chip->bank = id; chip = &nmk_chip->chip; @@ -1077,13 +1082,17 @@ static struct nmk_gpio_chip *nmk_gpio_populate_chip(struct device_node *np, res = platform_get_resource(gpio_pdev, IORESOURCE_MEM, 0); base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(base)) + if (IS_ERR(base)) { + platform_device_put(gpio_pdev); return ERR_CAST(base); + } nmk_chip->addr = base; clk = clk_get(&gpio_pdev->dev, NULL); - if (IS_ERR(clk)) + if (IS_ERR(clk)) { + platform_device_put(gpio_pdev); return (void *) clk; + } clk_prepare(clk); nmk_chip->clk = clk; diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c index 2c7229380f08..2678603df14b 100644 --- a/drivers/pinctrl/pinconf.c +++ b/drivers/pinctrl/pinconf.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include #include @@ -369,225 +368,6 @@ static int pinconf_groups_show(struct seq_file *s, void *what) DEFINE_SHOW_ATTRIBUTE(pinconf_pins); DEFINE_SHOW_ATTRIBUTE(pinconf_groups); -#define MAX_NAME_LEN 15 - -struct dbg_cfg { - enum pinctrl_map_type map_type; - char dev_name[MAX_NAME_LEN + 1]; - char state_name[MAX_NAME_LEN + 1]; - char pin_name[MAX_NAME_LEN + 1]; -}; - -/* - * Goal is to keep this structure as global in order to simply read the - * pinconf-config file after a write to check config is as expected - */ -static struct dbg_cfg pinconf_dbg_conf; - -/** - * pinconf_dbg_config_print() - display the pinctrl config from the pinctrl - * map, of the dev/pin/state that was last written to pinconf-config file. - * @s: string filled in with config description - * @d: not used - */ -static int pinconf_dbg_config_print(struct seq_file *s, void *d) -{ - struct pinctrl_maps *maps_node; - const struct pinctrl_map *map; - const struct pinctrl_map *found = NULL; - struct pinctrl_dev *pctldev; - struct dbg_cfg *dbg = &pinconf_dbg_conf; - int i; - - mutex_lock(&pinctrl_maps_mutex); - - /* Parse the pinctrl map and look for the elected pin/state */ - for_each_maps(maps_node, i, map) { - if (map->type != dbg->map_type) - continue; - if (strcmp(map->dev_name, dbg->dev_name)) - continue; - if (strcmp(map->name, dbg->state_name)) - continue; - - if (!strcmp(map->data.configs.group_or_pin, dbg->pin_name)) { - /* We found the right pin */ - found = map; - break; - } - } - - if (!found) { - seq_printf(s, "No config found for dev/state/pin, expected:\n"); - seq_printf(s, "Searched dev:%s\n", dbg->dev_name); - seq_printf(s, "Searched state:%s\n", dbg->state_name); - seq_printf(s, "Searched pin:%s\n", dbg->pin_name); - seq_printf(s, "Use: modify config_pin "\ - " \n"); - goto exit; - } - - pctldev = get_pinctrl_dev_from_devname(found->ctrl_dev_name); - seq_printf(s, "Dev %s has config of %s in state %s:\n", - dbg->dev_name, dbg->pin_name, dbg->state_name); - pinconf_show_config(s, pctldev, found->data.configs.configs, - found->data.configs.num_configs); - -exit: - mutex_unlock(&pinctrl_maps_mutex); - - return 0; -} - -/** - * pinconf_dbg_config_write() - modify the pinctrl config in the pinctrl - * map, of a dev/pin/state entry based on user entries to pinconf-config - * @user_buf: contains the modification request with expected format: - * modify - * modify is literal string, alternatives like add/delete not supported yet - * is the configuration to be changed. Supported configs are - * "config_pin" or "config_group", alternatives like config_mux are not - * supported yet. - * are values that should match the pinctrl-maps - * reflects the new config and is driver dependent - */ -static ssize_t pinconf_dbg_config_write(struct file *file, - const char __user *user_buf, size_t count, loff_t *ppos) -{ - struct pinctrl_maps *maps_node; - const struct pinctrl_map *map; - const struct pinctrl_map *found = NULL; - struct pinctrl_dev *pctldev; - const struct pinconf_ops *confops = NULL; - struct dbg_cfg *dbg = &pinconf_dbg_conf; - const struct pinctrl_map_configs *configs; - char config[MAX_NAME_LEN + 1]; - char buf[128]; - char *b = &buf[0]; - int buf_size; - char *token; - int i; - - /* Get userspace string and assure termination */ - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - buf[buf_size] = 0; - - /* - * need to parse entry and extract parameters: - * modify configs_pin devicename state pinname newvalue - */ - - /* Get arg: 'modify' */ - token = strsep(&b, " "); - if (!token) - return -EINVAL; - if (strcmp(token, "modify")) - return -EINVAL; - - /* - * Get arg type: "config_pin" and "config_group" - * types are supported so far - */ - token = strsep(&b, " "); - if (!token) - return -EINVAL; - if (!strcmp(token, "config_pin")) - dbg->map_type = PIN_MAP_TYPE_CONFIGS_PIN; - else if (!strcmp(token, "config_group")) - dbg->map_type = PIN_MAP_TYPE_CONFIGS_GROUP; - else - return -EINVAL; - - /* get arg 'device_name' */ - token = strsep(&b, " "); - if (!token) - return -EINVAL; - if (strlen(token) >= MAX_NAME_LEN) - return -EINVAL; - strncpy(dbg->dev_name, token, MAX_NAME_LEN); - - /* get arg 'state_name' */ - token = strsep(&b, " "); - if (!token) - return -EINVAL; - if (strlen(token) >= MAX_NAME_LEN) - return -EINVAL; - strncpy(dbg->state_name, token, MAX_NAME_LEN); - - /* get arg 'pin_name' */ - token = strsep(&b, " "); - if (!token) - return -EINVAL; - if (strlen(token) >= MAX_NAME_LEN) - return -EINVAL; - strncpy(dbg->pin_name, token, MAX_NAME_LEN); - - /* get new_value of config' */ - token = strsep(&b, " "); - if (!token) - return -EINVAL; - if (strlen(token) >= MAX_NAME_LEN) - return -EINVAL; - strncpy(config, token, MAX_NAME_LEN); - - mutex_lock(&pinctrl_maps_mutex); - - /* Parse the pinctrl map and look for the selected dev/state/pin */ - for_each_maps(maps_node, i, map) { - if (strcmp(map->dev_name, dbg->dev_name)) - continue; - if (map->type != dbg->map_type) - continue; - if (strcmp(map->name, dbg->state_name)) - continue; - - /* we found the right pin / state, so overwrite config */ - if (!strcmp(map->data.configs.group_or_pin, dbg->pin_name)) { - found = map; - break; - } - } - - if (!found) { - count = -EINVAL; - goto exit; - } - - pctldev = get_pinctrl_dev_from_devname(found->ctrl_dev_name); - if (pctldev) - confops = pctldev->desc->confops; - - if (confops && confops->pin_config_dbg_parse_modify) { - configs = &found->data.configs; - for (i = 0; i < configs->num_configs; i++) { - confops->pin_config_dbg_parse_modify(pctldev, - config, - &configs->configs[i]); - } - } - -exit: - mutex_unlock(&pinctrl_maps_mutex); - - return count; -} - -static int pinconf_dbg_config_open(struct inode *inode, struct file *file) -{ - return single_open(file, pinconf_dbg_config_print, inode->i_private); -} - -static const struct file_operations pinconf_dbg_pinconfig_fops = { - .open = pinconf_dbg_config_open, - .write = pinconf_dbg_config_write, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .owner = THIS_MODULE, -}; - void pinconf_init_device_debugfs(struct dentry *devroot, struct pinctrl_dev *pctldev) { @@ -595,8 +375,6 @@ void pinconf_init_device_debugfs(struct dentry *devroot, devroot, pctldev, &pinconf_pins_fops); debugfs_create_file("pinconf-groups", S_IFREG | S_IRUGO, devroot, pctldev, &pinconf_groups_fops); - debugfs_create_file("pinconf-config", (S_IRUGO | S_IWUSR | S_IWGRP), - devroot, pctldev, &pinconf_dbg_pinconfig_fops); } #endif diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index 2a7d638978d8..6689995fa3aa 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c @@ -489,7 +489,7 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type) /* * If WAKE_INT_MASTER_REG.MaskStsEn is set, a software write to the * debounce registers of any GPIO will block wake/interrupt status - * generation for *all* GPIOs for a lenght of time that depends on + * generation for *all* GPIOs for a length of time that depends on * WAKE_INT_MASTER_REG.MaskStsLength[11:0]. During this period the * INTERRUPT_ENABLE bit will read as 0. * diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c index 3d49bbbcdbc7..cb7c432769a5 100644 --- a/drivers/pinctrl/pinctrl-at91.c +++ b/drivers/pinctrl/pinctrl-at91.c @@ -59,6 +59,9 @@ static int gpio_banks; #define OUTPUT (1 << 7) #define OUTPUT_VAL_SHIFT 8 #define OUTPUT_VAL (0x1 << OUTPUT_VAL_SHIFT) +#define SLEWRATE_SHIFT 9 +#define SLEWRATE_MASK 0x1 +#define SLEWRATE (SLEWRATE_MASK << SLEWRATE_SHIFT) #define DEBOUNCE (1 << 16) #define DEBOUNCE_VAL_SHIFT 17 #define DEBOUNCE_VAL (0x3fff << DEBOUNCE_VAL_SHIFT) @@ -72,10 +75,22 @@ static int gpio_banks; * DRIVE_STRENGTH_DEFAULT is just a placeholder to avoid changing the drive * strength when there is no dt config for it. */ -#define DRIVE_STRENGTH_DEFAULT (0 << DRIVE_STRENGTH_SHIFT) -#define DRIVE_STRENGTH_LOW (1 << DRIVE_STRENGTH_SHIFT) -#define DRIVE_STRENGTH_MED (2 << DRIVE_STRENGTH_SHIFT) -#define DRIVE_STRENGTH_HI (3 << DRIVE_STRENGTH_SHIFT) +enum drive_strength_bit { + DRIVE_STRENGTH_BIT_DEF, + DRIVE_STRENGTH_BIT_LOW, + DRIVE_STRENGTH_BIT_MED, + DRIVE_STRENGTH_BIT_HI, +}; + +#define DRIVE_STRENGTH_BIT_MSK(name) (DRIVE_STRENGTH_BIT_##name << \ + DRIVE_STRENGTH_SHIFT) + +enum slewrate_bit { + SLEWRATE_BIT_DIS, + SLEWRATE_BIT_ENA, +}; + +#define SLEWRATE_BIT_MSK(name) (SLEWRATE_BIT_##name << SLEWRATE_SHIFT) /** * struct at91_pmx_func - describes AT91 pinmux functions @@ -166,6 +181,8 @@ struct at91_pinctrl_mux_ops { unsigned (*get_drivestrength)(void __iomem *pio, unsigned pin); void (*set_drivestrength)(void __iomem *pio, unsigned pin, u32 strength); + unsigned (*get_slewrate)(void __iomem *pio, unsigned pin); + void (*set_slewrate)(void __iomem *pio, unsigned pin, u32 slewrate); /* irq */ int (*irq_type)(struct irq_data *d, unsigned type); }; @@ -551,7 +568,7 @@ static unsigned at91_mux_sama5d3_get_drivestrength(void __iomem *pio, /* SAMA5 strength is 1:1 with our defines, * except 0 is equivalent to low per datasheet */ if (!tmp) - tmp = DRIVE_STRENGTH_LOW; + tmp = DRIVE_STRENGTH_BIT_MSK(LOW); return tmp; } @@ -564,11 +581,32 @@ static unsigned at91_mux_sam9x5_get_drivestrength(void __iomem *pio, /* strength is inverse in SAM9x5s hardware with the pinctrl defines * hardware: 0 = hi, 1 = med, 2 = low, 3 = rsvd */ - tmp = DRIVE_STRENGTH_HI - tmp; + tmp = DRIVE_STRENGTH_BIT_MSK(HI) - tmp; return tmp; } +static unsigned at91_mux_sam9x60_get_drivestrength(void __iomem *pio, + unsigned pin) +{ + unsigned tmp = readl_relaxed(pio + SAM9X60_PIO_DRIVER1); + + if (tmp & BIT(pin)) + return DRIVE_STRENGTH_BIT_HI; + + return DRIVE_STRENGTH_BIT_LOW; +} + +static unsigned at91_mux_sam9x60_get_slewrate(void __iomem *pio, unsigned pin) +{ + unsigned tmp = readl_relaxed(pio + SAM9X60_PIO_SLEWR); + + if ((tmp & BIT(pin))) + return SLEWRATE_BIT_ENA; + + return SLEWRATE_BIT_DIS; +} + static void set_drive_strength(void __iomem *reg, unsigned pin, u32 strength) { unsigned tmp = readl_relaxed(reg); @@ -600,12 +638,51 @@ static void at91_mux_sam9x5_set_drivestrength(void __iomem *pio, unsigned pin, /* strength is inverse on SAM9x5s with our defines * 0 = hi, 1 = med, 2 = low, 3 = rsvd */ - setting = DRIVE_STRENGTH_HI - setting; + setting = DRIVE_STRENGTH_BIT_MSK(HI) - setting; set_drive_strength(pio + at91sam9x5_get_drive_register(pin), pin, setting); } +static void at91_mux_sam9x60_set_drivestrength(void __iomem *pio, unsigned pin, + u32 setting) +{ + unsigned int tmp; + + if (setting <= DRIVE_STRENGTH_BIT_DEF || + setting == DRIVE_STRENGTH_BIT_MED || + setting > DRIVE_STRENGTH_BIT_HI) + return; + + tmp = readl_relaxed(pio + SAM9X60_PIO_DRIVER1); + + /* Strength is 0: low, 1: hi */ + if (setting == DRIVE_STRENGTH_BIT_LOW) + tmp &= ~BIT(pin); + else + tmp |= BIT(pin); + + writel_relaxed(tmp, pio + SAM9X60_PIO_DRIVER1); +} + +static void at91_mux_sam9x60_set_slewrate(void __iomem *pio, unsigned pin, + u32 setting) +{ + unsigned int tmp; + + if (setting < SLEWRATE_BIT_DIS || setting > SLEWRATE_BIT_ENA) + return; + + tmp = readl_relaxed(pio + SAM9X60_PIO_SLEWR); + + if (setting == SLEWRATE_BIT_DIS) + tmp &= ~BIT(pin); + else + tmp |= BIT(pin); + + writel_relaxed(tmp, pio + SAM9X60_PIO_SLEWR); +} + static struct at91_pinctrl_mux_ops at91rm9200_ops = { .get_periph = at91_mux_get_periph, .mux_A_periph = at91_mux_set_A_periph, @@ -634,6 +711,28 @@ static struct at91_pinctrl_mux_ops at91sam9x5_ops = { .irq_type = alt_gpio_irq_type, }; +static const struct at91_pinctrl_mux_ops sam9x60_ops = { + .get_periph = at91_mux_pio3_get_periph, + .mux_A_periph = at91_mux_pio3_set_A_periph, + .mux_B_periph = at91_mux_pio3_set_B_periph, + .mux_C_periph = at91_mux_pio3_set_C_periph, + .mux_D_periph = at91_mux_pio3_set_D_periph, + .get_deglitch = at91_mux_pio3_get_deglitch, + .set_deglitch = at91_mux_pio3_set_deglitch, + .get_debounce = at91_mux_pio3_get_debounce, + .set_debounce = at91_mux_pio3_set_debounce, + .get_pulldown = at91_mux_pio3_get_pulldown, + .set_pulldown = at91_mux_pio3_set_pulldown, + .get_schmitt_trig = at91_mux_pio3_get_schmitt_trig, + .disable_schmitt_trig = at91_mux_pio3_disable_schmitt_trig, + .get_drivestrength = at91_mux_sam9x60_get_drivestrength, + .set_drivestrength = at91_mux_sam9x60_set_drivestrength, + .get_slewrate = at91_mux_sam9x60_get_slewrate, + .set_slewrate = at91_mux_sam9x60_set_slewrate, + .irq_type = alt_gpio_irq_type, + +}; + static struct at91_pinctrl_mux_ops sama5d3_ops = { .get_periph = at91_mux_pio3_get_periph, .mux_A_periph = at91_mux_pio3_set_A_periph, @@ -893,6 +992,8 @@ static int at91_pinconf_get(struct pinctrl_dev *pctldev, if (info->ops->get_drivestrength) *config |= (info->ops->get_drivestrength(pio, pin) << DRIVE_STRENGTH_SHIFT); + if (info->ops->get_slewrate) + *config |= (info->ops->get_slewrate(pio, pin) << SLEWRATE_SHIFT); if (at91_mux_get_output(pio, pin, &out)) *config |= OUTPUT | (out << OUTPUT_VAL_SHIFT); @@ -944,6 +1045,9 @@ static int at91_pinconf_set(struct pinctrl_dev *pctldev, info->ops->set_drivestrength(pio, pin, (config & DRIVE_STRENGTH) >> DRIVE_STRENGTH_SHIFT); + if (info->ops->set_slewrate) + info->ops->set_slewrate(pio, pin, + (config & SLEWRATE) >> SLEWRATE_SHIFT); } /* for each config */ @@ -959,11 +1063,11 @@ static int at91_pinconf_set(struct pinctrl_dev *pctldev, } \ } while (0) -#define DBG_SHOW_FLAG_MASKED(mask,flag) do { \ +#define DBG_SHOW_FLAG_MASKED(mask, flag, name) do { \ if ((config & mask) == flag) { \ if (num_conf) \ seq_puts(s, "|"); \ - seq_puts(s, #flag); \ + seq_puts(s, #name); \ num_conf++; \ } \ } while (0) @@ -981,9 +1085,13 @@ static void at91_pinconf_dbg_show(struct pinctrl_dev *pctldev, DBG_SHOW_FLAG(PULL_DOWN); DBG_SHOW_FLAG(DIS_SCHMIT); DBG_SHOW_FLAG(DEGLITCH); - DBG_SHOW_FLAG_MASKED(DRIVE_STRENGTH, DRIVE_STRENGTH_LOW); - DBG_SHOW_FLAG_MASKED(DRIVE_STRENGTH, DRIVE_STRENGTH_MED); - DBG_SHOW_FLAG_MASKED(DRIVE_STRENGTH, DRIVE_STRENGTH_HI); + DBG_SHOW_FLAG_MASKED(DRIVE_STRENGTH, DRIVE_STRENGTH_BIT_MSK(LOW), + DRIVE_STRENGTH_LOW); + DBG_SHOW_FLAG_MASKED(DRIVE_STRENGTH, DRIVE_STRENGTH_BIT_MSK(MED), + DRIVE_STRENGTH_MED); + DBG_SHOW_FLAG_MASKED(DRIVE_STRENGTH, DRIVE_STRENGTH_BIT_MSK(HI), + DRIVE_STRENGTH_HI); + DBG_SHOW_FLAG(SLEWRATE); DBG_SHOW_FLAG(DEBOUNCE); if (config & DEBOUNCE) { val = config >> DEBOUNCE_VAL_SHIFT; @@ -1155,6 +1263,7 @@ static const struct of_device_id at91_pinctrl_of_match[] = { { .compatible = "atmel,sama5d3-pinctrl", .data = &sama5d3_ops }, { .compatible = "atmel,at91sam9x5-pinctrl", .data = &at91sam9x5_ops }, { .compatible = "atmel,at91rm9200-pinctrl", .data = &at91rm9200_ops }, + { .compatible = "microchip,sam9x60-pinctrl", .data = &sam9x60_ops }, { /* sentinel */ } }; @@ -1697,6 +1806,7 @@ static const struct gpio_chip at91_gpio_template = { static const struct of_device_id at91_gpio_of_match[] = { { .compatible = "atmel,at91sam9x5-gpio", .data = &at91sam9x5_ops, }, { .compatible = "atmel,at91rm9200-gpio", .data = &at91rm9200_ops }, + { .compatible = "microchip,sam9x60-gpio", .data = &sam9x60_ops }, { /* sentinel */ } }; diff --git a/drivers/pinctrl/pinctrl-at91.h b/drivers/pinctrl/pinctrl-at91.h index 79b957f1dfa2..223620f14b05 100644 --- a/drivers/pinctrl/pinctrl-at91.h +++ b/drivers/pinctrl/pinctrl-at91.h @@ -69,4 +69,7 @@ #define AT91SAM9X5_PIO_DRIVER1 0x114 /*PIO Driver 1 register offset*/ #define AT91SAM9X5_PIO_DRIVER2 0x118 /*PIO Driver 2 register offset*/ +#define SAM9X60_PIO_SLEWR 0x110 /* PIO Slew Rate Control Register */ +#define SAM9X60_PIO_DRIVER1 0x118 /* PIO Driver 1 register offset */ + #endif diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c index db6b48ea5f47..bc21ceb15d68 100644 --- a/drivers/pinctrl/pinctrl-ingenic.c +++ b/drivers/pinctrl/pinctrl-ingenic.c @@ -233,6 +233,19 @@ static int jz4725b_pwm_pwm2_pins[] = { 0x4c, }; static int jz4725b_pwm_pwm3_pins[] = { 0x4d, }; static int jz4725b_pwm_pwm4_pins[] = { 0x4e, }; static int jz4725b_pwm_pwm5_pins[] = { 0x4f, }; +static int jz4725b_lcd_8bit_pins[] = { + 0x72, 0x73, 0x74, + 0x60, 0x61, 0x62, 0x63, + 0x64, 0x65, 0x66, 0x67, +}; +static int jz4725b_lcd_16bit_pins[] = { + 0x68, 0x69, 0x6a, 0x6b, + 0x6c, 0x6d, 0x6e, 0x6f, +}; +static int jz4725b_lcd_18bit_pins[] = { 0x70, 0x71, }; +static int jz4725b_lcd_24bit_pins[] = { 0x76, 0x77, 0x78, 0x79, }; +static int jz4725b_lcd_special_pins[] = { 0x76, 0x77, 0x78, 0x79, }; +static int jz4725b_lcd_generic_pins[] = { 0x75, }; static int jz4725b_mmc0_1bit_funcs[] = { 1, 1, 1, }; static int jz4725b_mmc0_4bit_funcs[] = { 1, 0, 1, }; @@ -251,6 +264,12 @@ static int jz4725b_pwm_pwm2_funcs[] = { 0, }; static int jz4725b_pwm_pwm3_funcs[] = { 0, }; static int jz4725b_pwm_pwm4_funcs[] = { 0, }; static int jz4725b_pwm_pwm5_funcs[] = { 0, }; +static int jz4725b_lcd_8bit_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; +static int jz4725b_lcd_16bit_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, }; +static int jz4725b_lcd_18bit_funcs[] = { 0, 0, }; +static int jz4725b_lcd_24bit_funcs[] = { 1, 1, 1, 1, }; +static int jz4725b_lcd_special_funcs[] = { 0, 0, 0, 0, }; +static int jz4725b_lcd_generic_funcs[] = { 0, }; static const struct group_desc jz4725b_groups[] = { INGENIC_PIN_GROUP("mmc0-1bit", jz4725b_mmc0_1bit), @@ -270,6 +289,12 @@ static const struct group_desc jz4725b_groups[] = { INGENIC_PIN_GROUP("pwm3", jz4725b_pwm_pwm3), INGENIC_PIN_GROUP("pwm4", jz4725b_pwm_pwm4), INGENIC_PIN_GROUP("pwm5", jz4725b_pwm_pwm5), + INGENIC_PIN_GROUP("lcd-8bit", jz4725b_lcd_8bit), + INGENIC_PIN_GROUP("lcd-16bit", jz4725b_lcd_16bit), + INGENIC_PIN_GROUP("lcd-18bit", jz4725b_lcd_18bit), + INGENIC_PIN_GROUP("lcd-24bit", jz4725b_lcd_24bit), + INGENIC_PIN_GROUP("lcd-special", jz4725b_lcd_special), + INGENIC_PIN_GROUP("lcd-generic", jz4725b_lcd_generic), }; static const char *jz4725b_mmc0_groups[] = { "mmc0-1bit", "mmc0-4bit", }; @@ -285,6 +310,10 @@ static const char *jz4725b_pwm2_groups[] = { "pwm2", }; static const char *jz4725b_pwm3_groups[] = { "pwm3", }; static const char *jz4725b_pwm4_groups[] = { "pwm4", }; static const char *jz4725b_pwm5_groups[] = { "pwm5", }; +static const char *jz4725b_lcd_groups[] = { + "lcd-8bit", "lcd-16bit", "lcd-18bit", "lcd-24bit", + "lcd-special", "lcd-generic", +}; static const struct function_desc jz4725b_functions[] = { { "mmc0", jz4725b_mmc0_groups, ARRAY_SIZE(jz4725b_mmc0_groups), }, @@ -297,6 +326,7 @@ static const struct function_desc jz4725b_functions[] = { { "pwm3", jz4725b_pwm3_groups, ARRAY_SIZE(jz4725b_pwm3_groups), }, { "pwm4", jz4725b_pwm4_groups, ARRAY_SIZE(jz4725b_pwm4_groups), }, { "pwm5", jz4725b_pwm5_groups, ARRAY_SIZE(jz4725b_pwm5_groups), }, + { "lcd", jz4725b_lcd_groups, ARRAY_SIZE(jz4725b_lcd_groups), }, }; static const struct ingenic_chip_info jz4725b_chip_info = { @@ -321,47 +351,57 @@ static int jz4770_uart0_data_pins[] = { 0xa0, 0xa3, }; static int jz4770_uart0_hwflow_pins[] = { 0xa1, 0xa2, }; static int jz4770_uart1_data_pins[] = { 0x7a, 0x7c, }; static int jz4770_uart1_hwflow_pins[] = { 0x7b, 0x7d, }; -static int jz4770_uart2_data_pins[] = { 0x66, 0x67, }; -static int jz4770_uart2_hwflow_pins[] = { 0x65, 0x64, }; +static int jz4770_uart2_data_pins[] = { 0x5c, 0x5e, }; +static int jz4770_uart2_hwflow_pins[] = { 0x5d, 0x5f, }; static int jz4770_uart3_data_pins[] = { 0x6c, 0x85, }; static int jz4770_uart3_hwflow_pins[] = { 0x88, 0x89, }; -static int jz4770_uart4_data_pins[] = { 0x54, 0x4a, }; -static int jz4770_mmc0_8bit_a_pins[] = { 0x04, 0x05, 0x06, 0x07, 0x18, }; -static int jz4770_mmc0_4bit_a_pins[] = { 0x15, 0x16, 0x17, }; static int jz4770_mmc0_1bit_a_pins[] = { 0x12, 0x13, 0x14, }; -static int jz4770_mmc0_4bit_e_pins[] = { 0x95, 0x96, 0x97, }; +static int jz4770_mmc0_4bit_a_pins[] = { 0x15, 0x16, 0x17, }; static int jz4770_mmc0_1bit_e_pins[] = { 0x9c, 0x9d, 0x94, }; -static int jz4770_mmc1_4bit_d_pins[] = { 0x75, 0x76, 0x77, }; +static int jz4770_mmc0_4bit_e_pins[] = { 0x95, 0x96, 0x97, }; +static int jz4770_mmc0_8bit_e_pins[] = { 0x98, 0x99, 0x9a, 0x9b, }; static int jz4770_mmc1_1bit_d_pins[] = { 0x78, 0x79, 0x74, }; -static int jz4770_mmc1_4bit_e_pins[] = { 0x95, 0x96, 0x97, }; +static int jz4770_mmc1_4bit_d_pins[] = { 0x75, 0x76, 0x77, }; static int jz4770_mmc1_1bit_e_pins[] = { 0x9c, 0x9d, 0x94, }; -static int jz4770_nemc_data_pins[] = { +static int jz4770_mmc1_4bit_e_pins[] = { 0x95, 0x96, 0x97, }; +static int jz4770_mmc1_8bit_e_pins[] = { 0x98, 0x99, 0x9a, 0x9b, }; +static int jz4770_mmc2_1bit_b_pins[] = { 0x3c, 0x3d, 0x34, }; +static int jz4770_mmc2_4bit_b_pins[] = { 0x35, 0x3e, 0x3f, }; +static int jz4770_mmc2_1bit_e_pins[] = { 0x9c, 0x9d, 0x94, }; +static int jz4770_mmc2_4bit_e_pins[] = { 0x95, 0x96, 0x97, }; +static int jz4770_mmc2_8bit_e_pins[] = { 0x98, 0x99, 0x9a, 0x9b, }; +static int jz4770_nemc_8bit_data_pins[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, }; +static int jz4770_nemc_16bit_data_pins[] = { + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, +}; static int jz4770_nemc_cle_ale_pins[] = { 0x20, 0x21, }; static int jz4770_nemc_addr_pins[] = { 0x22, 0x23, 0x24, 0x25, }; static int jz4770_nemc_rd_we_pins[] = { 0x10, 0x11, }; static int jz4770_nemc_frd_fwe_pins[] = { 0x12, 0x13, }; +static int jz4770_nemc_wait_pins[] = { 0x1b, }; static int jz4770_nemc_cs1_pins[] = { 0x15, }; static int jz4770_nemc_cs2_pins[] = { 0x16, }; static int jz4770_nemc_cs3_pins[] = { 0x17, }; static int jz4770_nemc_cs4_pins[] = { 0x18, }; static int jz4770_nemc_cs5_pins[] = { 0x19, }; static int jz4770_nemc_cs6_pins[] = { 0x1a, }; -static int jz4770_i2c0_pins[] = { 0x6e, 0x6f, }; -static int jz4770_i2c1_pins[] = { 0x8e, 0x8f, }; +static int jz4770_i2c0_pins[] = { 0x7e, 0x7f, }; +static int jz4770_i2c1_pins[] = { 0x9e, 0x9f, }; static int jz4770_i2c2_pins[] = { 0xb0, 0xb1, }; -static int jz4770_i2c3_pins[] = { 0x6a, 0x6b, }; -static int jz4770_i2c4_e_pins[] = { 0x8c, 0x8d, }; -static int jz4770_i2c4_f_pins[] = { 0xb9, 0xb8, }; -static int jz4770_cim_pins[] = { - 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, +static int jz4770_cim_8bit_pins[] = { + 0x26, 0x27, 0x28, 0x29, + 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, +}; +static int jz4770_cim_12bit_pins[] = { + 0x32, 0x33, 0xb0, 0xb1, }; -static int jz4770_lcd_32bit_pins[] = { +static int jz4770_lcd_24bit_pins[] = { 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, - 0x58, 0x59, 0x51, + 0x58, 0x59, 0x5a, 0x5b, }; static int jz4770_pwm_pwm0_pins[] = { 0x80, }; static int jz4770_pwm_pwm1_pins[] = { 0x81, }; @@ -371,30 +411,41 @@ static int jz4770_pwm_pwm4_pins[] = { 0x84, }; static int jz4770_pwm_pwm5_pins[] = { 0x85, }; static int jz4770_pwm_pwm6_pins[] = { 0x6a, }; static int jz4770_pwm_pwm7_pins[] = { 0x6b, }; +static int jz4770_mac_rmii_pins[] = { + 0xa9, 0xab, 0xaa, 0xac, 0xa5, 0xa4, 0xad, 0xae, 0xa6, 0xa8, +}; +static int jz4770_mac_mii_pins[] = { 0xa7, 0xaf, }; static int jz4770_uart0_data_funcs[] = { 0, 0, }; static int jz4770_uart0_hwflow_funcs[] = { 0, 0, }; static int jz4770_uart1_data_funcs[] = { 0, 0, }; static int jz4770_uart1_hwflow_funcs[] = { 0, 0, }; -static int jz4770_uart2_data_funcs[] = { 1, 1, }; -static int jz4770_uart2_hwflow_funcs[] = { 1, 1, }; +static int jz4770_uart2_data_funcs[] = { 0, 0, }; +static int jz4770_uart2_hwflow_funcs[] = { 0, 0, }; static int jz4770_uart3_data_funcs[] = { 0, 1, }; static int jz4770_uart3_hwflow_funcs[] = { 0, 0, }; -static int jz4770_uart4_data_funcs[] = { 2, 2, }; -static int jz4770_mmc0_8bit_a_funcs[] = { 1, 1, 1, 1, 1, }; -static int jz4770_mmc0_4bit_a_funcs[] = { 1, 1, 1, }; static int jz4770_mmc0_1bit_a_funcs[] = { 1, 1, 0, }; -static int jz4770_mmc0_4bit_e_funcs[] = { 0, 0, 0, }; +static int jz4770_mmc0_4bit_a_funcs[] = { 1, 1, 1, }; static int jz4770_mmc0_1bit_e_funcs[] = { 0, 0, 0, }; -static int jz4770_mmc1_4bit_d_funcs[] = { 0, 0, 0, }; +static int jz4770_mmc0_4bit_e_funcs[] = { 0, 0, 0, }; +static int jz4770_mmc0_8bit_e_funcs[] = { 0, 0, 0, 0, }; static int jz4770_mmc1_1bit_d_funcs[] = { 0, 0, 0, }; -static int jz4770_mmc1_4bit_e_funcs[] = { 1, 1, 1, }; +static int jz4770_mmc1_4bit_d_funcs[] = { 0, 0, 0, }; static int jz4770_mmc1_1bit_e_funcs[] = { 1, 1, 1, }; -static int jz4770_nemc_data_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, }; +static int jz4770_mmc1_4bit_e_funcs[] = { 1, 1, 1, }; +static int jz4770_mmc1_8bit_e_funcs[] = { 1, 1, 1, 1, }; +static int jz4770_mmc2_1bit_b_funcs[] = { 0, 0, 0, }; +static int jz4770_mmc2_4bit_b_funcs[] = { 0, 0, 0, }; +static int jz4770_mmc2_1bit_e_funcs[] = { 2, 2, 2, }; +static int jz4770_mmc2_4bit_e_funcs[] = { 2, 2, 2, }; +static int jz4770_mmc2_8bit_e_funcs[] = { 2, 2, 2, 2, }; +static int jz4770_nemc_8bit_data_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, }; +static int jz4770_nemc_16bit_data_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, }; static int jz4770_nemc_cle_ale_funcs[] = { 0, 0, }; static int jz4770_nemc_addr_funcs[] = { 0, 0, 0, 0, }; static int jz4770_nemc_rd_we_funcs[] = { 0, 0, }; static int jz4770_nemc_frd_fwe_funcs[] = { 0, 0, }; +static int jz4770_nemc_wait_funcs[] = { 0, }; static int jz4770_nemc_cs1_funcs[] = { 0, }; static int jz4770_nemc_cs2_funcs[] = { 0, }; static int jz4770_nemc_cs3_funcs[] = { 0, }; @@ -404,14 +455,13 @@ static int jz4770_nemc_cs6_funcs[] = { 0, }; static int jz4770_i2c0_funcs[] = { 0, 0, }; static int jz4770_i2c1_funcs[] = { 0, 0, }; static int jz4770_i2c2_funcs[] = { 2, 2, }; -static int jz4770_i2c3_funcs[] = { 1, 1, }; -static int jz4770_i2c4_e_funcs[] = { 1, 1, }; -static int jz4770_i2c4_f_funcs[] = { 1, 1, }; -static int jz4770_cim_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; -static int jz4770_lcd_32bit_funcs[] = { +static int jz4770_cim_8bit_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; +static int jz4770_cim_12bit_funcs[] = { 0, 0, 0, 0, }; +static int jz4770_lcd_24bit_funcs[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, + 0, 0, 0, 0, }; static int jz4770_pwm_pwm0_funcs[] = { 0, }; static int jz4770_pwm_pwm1_funcs[] = { 0, }; @@ -421,6 +471,8 @@ static int jz4770_pwm_pwm4_funcs[] = { 0, }; static int jz4770_pwm_pwm5_funcs[] = { 0, }; static int jz4770_pwm_pwm6_funcs[] = { 0, }; static int jz4770_pwm_pwm7_funcs[] = { 0, }; +static int jz4770_mac_rmii_funcs[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; +static int jz4770_mac_mii_funcs[] = { 0, 0, }; static const struct group_desc jz4770_groups[] = { INGENIC_PIN_GROUP("uart0-data", jz4770_uart0_data), @@ -431,21 +483,28 @@ static const struct group_desc jz4770_groups[] = { INGENIC_PIN_GROUP("uart2-hwflow", jz4770_uart2_hwflow), INGENIC_PIN_GROUP("uart3-data", jz4770_uart3_data), INGENIC_PIN_GROUP("uart3-hwflow", jz4770_uart3_hwflow), - INGENIC_PIN_GROUP("uart4-data", jz4770_uart4_data), - INGENIC_PIN_GROUP("mmc0-8bit-a", jz4770_mmc0_8bit_a), - INGENIC_PIN_GROUP("mmc0-4bit-a", jz4770_mmc0_4bit_a), INGENIC_PIN_GROUP("mmc0-1bit-a", jz4770_mmc0_1bit_a), - INGENIC_PIN_GROUP("mmc0-4bit-e", jz4770_mmc0_4bit_e), + INGENIC_PIN_GROUP("mmc0-4bit-a", jz4770_mmc0_4bit_a), INGENIC_PIN_GROUP("mmc0-1bit-e", jz4770_mmc0_1bit_e), - INGENIC_PIN_GROUP("mmc1-4bit-d", jz4770_mmc1_4bit_d), + INGENIC_PIN_GROUP("mmc0-4bit-e", jz4770_mmc0_4bit_e), + INGENIC_PIN_GROUP("mmc0-8bit-e", jz4770_mmc0_8bit_e), INGENIC_PIN_GROUP("mmc1-1bit-d", jz4770_mmc1_1bit_d), - INGENIC_PIN_GROUP("mmc1-4bit-e", jz4770_mmc1_4bit_e), + INGENIC_PIN_GROUP("mmc1-4bit-d", jz4770_mmc1_4bit_d), INGENIC_PIN_GROUP("mmc1-1bit-e", jz4770_mmc1_1bit_e), - INGENIC_PIN_GROUP("nemc-data", jz4770_nemc_data), + INGENIC_PIN_GROUP("mmc1-4bit-e", jz4770_mmc1_4bit_e), + INGENIC_PIN_GROUP("mmc1-8bit-e", jz4770_mmc1_8bit_e), + INGENIC_PIN_GROUP("mmc2-1bit-b", jz4770_mmc2_1bit_b), + INGENIC_PIN_GROUP("mmc2-4bit-b", jz4770_mmc2_4bit_b), + INGENIC_PIN_GROUP("mmc2-1bit-e", jz4770_mmc2_1bit_e), + INGENIC_PIN_GROUP("mmc2-4bit-e", jz4770_mmc2_4bit_e), + INGENIC_PIN_GROUP("mmc2-8bit-e", jz4770_mmc2_8bit_e), + INGENIC_PIN_GROUP("nemc-8bit-data", jz4770_nemc_8bit_data), + INGENIC_PIN_GROUP("nemc-16bit-data", jz4770_nemc_16bit_data), INGENIC_PIN_GROUP("nemc-cle-ale", jz4770_nemc_cle_ale), INGENIC_PIN_GROUP("nemc-addr", jz4770_nemc_addr), INGENIC_PIN_GROUP("nemc-rd-we", jz4770_nemc_rd_we), INGENIC_PIN_GROUP("nemc-frd-fwe", jz4770_nemc_frd_fwe), + INGENIC_PIN_GROUP("nemc-wait", jz4770_nemc_wait), INGENIC_PIN_GROUP("nemc-cs1", jz4770_nemc_cs1), INGENIC_PIN_GROUP("nemc-cs2", jz4770_nemc_cs2), INGENIC_PIN_GROUP("nemc-cs3", jz4770_nemc_cs3), @@ -455,11 +514,9 @@ static const struct group_desc jz4770_groups[] = { INGENIC_PIN_GROUP("i2c0-data", jz4770_i2c0), INGENIC_PIN_GROUP("i2c1-data", jz4770_i2c1), INGENIC_PIN_GROUP("i2c2-data", jz4770_i2c2), - INGENIC_PIN_GROUP("i2c3-data", jz4770_i2c3), - INGENIC_PIN_GROUP("i2c4-data-e", jz4770_i2c4_e), - INGENIC_PIN_GROUP("i2c4-data-f", jz4770_i2c4_f), - INGENIC_PIN_GROUP("cim-data", jz4770_cim), - INGENIC_PIN_GROUP("lcd-32bit", jz4770_lcd_32bit), + INGENIC_PIN_GROUP("cim-data-8bit", jz4770_cim_8bit), + INGENIC_PIN_GROUP("cim-data-12bit", jz4770_cim_12bit), + INGENIC_PIN_GROUP("lcd-24bit", jz4770_lcd_24bit), { "lcd-no-pins", }, INGENIC_PIN_GROUP("pwm0", jz4770_pwm_pwm0), INGENIC_PIN_GROUP("pwm1", jz4770_pwm_pwm1), @@ -469,32 +526,41 @@ static const struct group_desc jz4770_groups[] = { INGENIC_PIN_GROUP("pwm5", jz4770_pwm_pwm5), INGENIC_PIN_GROUP("pwm6", jz4770_pwm_pwm6), INGENIC_PIN_GROUP("pwm7", jz4770_pwm_pwm7), + INGENIC_PIN_GROUP("mac-rmii", jz4770_mac_rmii), + INGENIC_PIN_GROUP("mac-mii", jz4770_mac_mii), }; static const char *jz4770_uart0_groups[] = { "uart0-data", "uart0-hwflow", }; static const char *jz4770_uart1_groups[] = { "uart1-data", "uart1-hwflow", }; static const char *jz4770_uart2_groups[] = { "uart2-data", "uart2-hwflow", }; static const char *jz4770_uart3_groups[] = { "uart3-data", "uart3-hwflow", }; -static const char *jz4770_uart4_groups[] = { "uart4-data", }; static const char *jz4770_mmc0_groups[] = { - "mmc0-8bit-a", "mmc0-4bit-a", "mmc0-1bit-a", - "mmc0-1bit-e", "mmc0-4bit-e", + "mmc0-1bit-a", "mmc0-4bit-a", + "mmc0-1bit-e", "mmc0-4bit-e", "mmc0-8bit-e", }; static const char *jz4770_mmc1_groups[] = { - "mmc1-1bit-d", "mmc1-4bit-d", "mmc1-1bit-e", "mmc1-4bit-e", + "mmc1-1bit-d", "mmc1-4bit-d", + "mmc1-1bit-e", "mmc1-4bit-e", "mmc1-8bit-e", +}; +static const char *jz4770_mmc2_groups[] = { + "mmc2-1bit-b", "mmc2-4bit-b", + "mmc2-1bit-e", "mmc2-4bit-e", "mmc2-8bit-e", }; static const char *jz4770_nemc_groups[] = { - "nemc-data", "nemc-cle-ale", "nemc-addr", "nemc-rd-we", "nemc-frd-fwe", + "nemc-8bit-data", "nemc-16bit-data", "nemc-cle-ale", + "nemc-addr", "nemc-rd-we", "nemc-frd-fwe", "nemc-wait", }; static const char *jz4770_cs1_groups[] = { "nemc-cs1", }; +static const char *jz4770_cs2_groups[] = { "nemc-cs2", }; +static const char *jz4770_cs3_groups[] = { "nemc-cs3", }; +static const char *jz4770_cs4_groups[] = { "nemc-cs4", }; +static const char *jz4770_cs5_groups[] = { "nemc-cs5", }; static const char *jz4770_cs6_groups[] = { "nemc-cs6", }; static const char *jz4770_i2c0_groups[] = { "i2c0-data", }; static const char *jz4770_i2c1_groups[] = { "i2c1-data", }; static const char *jz4770_i2c2_groups[] = { "i2c2-data", }; -static const char *jz4770_i2c3_groups[] = { "i2c3-data", }; -static const char *jz4770_i2c4_groups[] = { "i2c4-data-e", "i2c4-data-f", }; -static const char *jz4770_cim_groups[] = { "cim-data", }; -static const char *jz4770_lcd_groups[] = { "lcd-32bit", "lcd-no-pins", }; +static const char *jz4770_cim_groups[] = { "cim-data-8bit", "cim-data-12bit", }; +static const char *jz4770_lcd_groups[] = { "lcd-24bit", "lcd-no-pins", }; static const char *jz4770_pwm0_groups[] = { "pwm0", }; static const char *jz4770_pwm1_groups[] = { "pwm1", }; static const char *jz4770_pwm2_groups[] = { "pwm2", }; @@ -503,23 +569,26 @@ static const char *jz4770_pwm4_groups[] = { "pwm4", }; static const char *jz4770_pwm5_groups[] = { "pwm5", }; static const char *jz4770_pwm6_groups[] = { "pwm6", }; static const char *jz4770_pwm7_groups[] = { "pwm7", }; +static const char *jz4770_mac_groups[] = { "mac-rmii", "mac-mii", }; static const struct function_desc jz4770_functions[] = { { "uart0", jz4770_uart0_groups, ARRAY_SIZE(jz4770_uart0_groups), }, { "uart1", jz4770_uart1_groups, ARRAY_SIZE(jz4770_uart1_groups), }, { "uart2", jz4770_uart2_groups, ARRAY_SIZE(jz4770_uart2_groups), }, { "uart3", jz4770_uart3_groups, ARRAY_SIZE(jz4770_uart3_groups), }, - { "uart4", jz4770_uart4_groups, ARRAY_SIZE(jz4770_uart4_groups), }, { "mmc0", jz4770_mmc0_groups, ARRAY_SIZE(jz4770_mmc0_groups), }, { "mmc1", jz4770_mmc1_groups, ARRAY_SIZE(jz4770_mmc1_groups), }, + { "mmc2", jz4770_mmc2_groups, ARRAY_SIZE(jz4770_mmc2_groups), }, { "nemc", jz4770_nemc_groups, ARRAY_SIZE(jz4770_nemc_groups), }, { "nemc-cs1", jz4770_cs1_groups, ARRAY_SIZE(jz4770_cs1_groups), }, + { "nemc-cs2", jz4770_cs2_groups, ARRAY_SIZE(jz4770_cs2_groups), }, + { "nemc-cs3", jz4770_cs3_groups, ARRAY_SIZE(jz4770_cs3_groups), }, + { "nemc-cs4", jz4770_cs4_groups, ARRAY_SIZE(jz4770_cs4_groups), }, + { "nemc-cs5", jz4770_cs5_groups, ARRAY_SIZE(jz4770_cs5_groups), }, { "nemc-cs6", jz4770_cs6_groups, ARRAY_SIZE(jz4770_cs6_groups), }, { "i2c0", jz4770_i2c0_groups, ARRAY_SIZE(jz4770_i2c0_groups), }, { "i2c1", jz4770_i2c1_groups, ARRAY_SIZE(jz4770_i2c1_groups), }, { "i2c2", jz4770_i2c2_groups, ARRAY_SIZE(jz4770_i2c2_groups), }, - { "i2c3", jz4770_i2c3_groups, ARRAY_SIZE(jz4770_i2c3_groups), }, - { "i2c4", jz4770_i2c4_groups, ARRAY_SIZE(jz4770_i2c4_groups), }, { "cim", jz4770_cim_groups, ARRAY_SIZE(jz4770_cim_groups), }, { "lcd", jz4770_lcd_groups, ARRAY_SIZE(jz4770_lcd_groups), }, { "pwm0", jz4770_pwm0_groups, ARRAY_SIZE(jz4770_pwm0_groups), }, @@ -530,6 +599,7 @@ static const struct function_desc jz4770_functions[] = { { "pwm5", jz4770_pwm5_groups, ARRAY_SIZE(jz4770_pwm5_groups), }, { "pwm6", jz4770_pwm6_groups, ARRAY_SIZE(jz4770_pwm6_groups), }, { "pwm7", jz4770_pwm7_groups, ARRAY_SIZE(jz4770_pwm7_groups), }, + { "mac", jz4770_mac_groups, ARRAY_SIZE(jz4770_mac_groups), }, }; static const struct ingenic_chip_info jz4770_chip_info = { @@ -542,7 +612,140 @@ static const struct ingenic_chip_info jz4770_chip_info = { .pull_downs = jz4770_pull_downs, }; -static u32 gpio_ingenic_read_reg(struct ingenic_gpio_chip *jzgc, u8 reg) +static int jz4780_uart2_data_pins[] = { 0x66, 0x67, }; +static int jz4780_uart2_hwflow_pins[] = { 0x65, 0x64, }; +static int jz4780_uart4_data_pins[] = { 0x54, 0x4a, }; +static int jz4780_mmc0_8bit_a_pins[] = { 0x04, 0x05, 0x06, 0x07, 0x18, }; +static int jz4780_i2c3_pins[] = { 0x6a, 0x6b, }; +static int jz4780_i2c4_e_pins[] = { 0x8c, 0x8d, }; +static int jz4780_i2c4_f_pins[] = { 0xb9, 0xb8, }; + +static int jz4780_uart2_data_funcs[] = { 1, 1, }; +static int jz4780_uart2_hwflow_funcs[] = { 1, 1, }; +static int jz4780_uart4_data_funcs[] = { 2, 2, }; +static int jz4780_mmc0_8bit_a_funcs[] = { 1, 1, 1, 1, 1, }; +static int jz4780_i2c3_funcs[] = { 1, 1, }; +static int jz4780_i2c4_e_funcs[] = { 1, 1, }; +static int jz4780_i2c4_f_funcs[] = { 1, 1, }; + +static const struct group_desc jz4780_groups[] = { + INGENIC_PIN_GROUP("uart0-data", jz4770_uart0_data), + INGENIC_PIN_GROUP("uart0-hwflow", jz4770_uart0_hwflow), + INGENIC_PIN_GROUP("uart1-data", jz4770_uart1_data), + INGENIC_PIN_GROUP("uart1-hwflow", jz4770_uart1_hwflow), + INGENIC_PIN_GROUP("uart2-data", jz4780_uart2_data), + INGENIC_PIN_GROUP("uart2-hwflow", jz4780_uart2_hwflow), + INGENIC_PIN_GROUP("uart3-data", jz4770_uart3_data), + INGENIC_PIN_GROUP("uart3-hwflow", jz4770_uart3_hwflow), + INGENIC_PIN_GROUP("uart4-data", jz4780_uart4_data), + INGENIC_PIN_GROUP("mmc0-1bit-a", jz4770_mmc0_1bit_a), + INGENIC_PIN_GROUP("mmc0-4bit-a", jz4770_mmc0_4bit_a), + INGENIC_PIN_GROUP("mmc0-8bit-a", jz4780_mmc0_8bit_a), + INGENIC_PIN_GROUP("mmc0-1bit-e", jz4770_mmc0_1bit_e), + INGENIC_PIN_GROUP("mmc0-4bit-e", jz4770_mmc0_4bit_e), + INGENIC_PIN_GROUP("mmc1-1bit-d", jz4770_mmc1_1bit_d), + INGENIC_PIN_GROUP("mmc1-4bit-d", jz4770_mmc1_4bit_d), + INGENIC_PIN_GROUP("mmc1-1bit-e", jz4770_mmc1_1bit_e), + INGENIC_PIN_GROUP("mmc1-4bit-e", jz4770_mmc1_4bit_e), + INGENIC_PIN_GROUP("mmc2-1bit-b", jz4770_mmc2_1bit_b), + INGENIC_PIN_GROUP("mmc2-4bit-b", jz4770_mmc2_4bit_b), + INGENIC_PIN_GROUP("mmc2-1bit-e", jz4770_mmc2_1bit_e), + INGENIC_PIN_GROUP("mmc2-4bit-e", jz4770_mmc2_4bit_e), + INGENIC_PIN_GROUP("nemc-data", jz4770_nemc_8bit_data), + INGENIC_PIN_GROUP("nemc-cle-ale", jz4770_nemc_cle_ale), + INGENIC_PIN_GROUP("nemc-addr", jz4770_nemc_addr), + INGENIC_PIN_GROUP("nemc-rd-we", jz4770_nemc_rd_we), + INGENIC_PIN_GROUP("nemc-frd-fwe", jz4770_nemc_frd_fwe), + INGENIC_PIN_GROUP("nemc-wait", jz4770_nemc_wait), + INGENIC_PIN_GROUP("nemc-cs1", jz4770_nemc_cs1), + INGENIC_PIN_GROUP("nemc-cs2", jz4770_nemc_cs2), + INGENIC_PIN_GROUP("nemc-cs3", jz4770_nemc_cs3), + INGENIC_PIN_GROUP("nemc-cs4", jz4770_nemc_cs4), + INGENIC_PIN_GROUP("nemc-cs5", jz4770_nemc_cs5), + INGENIC_PIN_GROUP("nemc-cs6", jz4770_nemc_cs6), + INGENIC_PIN_GROUP("i2c0-data", jz4770_i2c0), + INGENIC_PIN_GROUP("i2c1-data", jz4770_i2c1), + INGENIC_PIN_GROUP("i2c2-data", jz4770_i2c2), + INGENIC_PIN_GROUP("i2c3-data", jz4780_i2c3), + INGENIC_PIN_GROUP("i2c4-data-e", jz4780_i2c4_e), + INGENIC_PIN_GROUP("i2c4-data-f", jz4780_i2c4_f), + INGENIC_PIN_GROUP("cim-data", jz4770_cim_8bit), + INGENIC_PIN_GROUP("lcd-24bit", jz4770_lcd_24bit), + { "lcd-no-pins", }, + INGENIC_PIN_GROUP("pwm0", jz4770_pwm_pwm0), + INGENIC_PIN_GROUP("pwm1", jz4770_pwm_pwm1), + INGENIC_PIN_GROUP("pwm2", jz4770_pwm_pwm2), + INGENIC_PIN_GROUP("pwm3", jz4770_pwm_pwm3), + INGENIC_PIN_GROUP("pwm4", jz4770_pwm_pwm4), + INGENIC_PIN_GROUP("pwm5", jz4770_pwm_pwm5), + INGENIC_PIN_GROUP("pwm6", jz4770_pwm_pwm6), + INGENIC_PIN_GROUP("pwm7", jz4770_pwm_pwm7), +}; + +static const char *jz4780_uart2_groups[] = { "uart2-data", "uart2-hwflow", }; +static const char *jz4780_uart4_groups[] = { "uart4-data", }; +static const char *jz4780_mmc0_groups[] = { + "mmc0-1bit-a", "mmc0-4bit-a", "mmc0-8bit-a", + "mmc0-1bit-e", "mmc0-4bit-e", +}; +static const char *jz4780_mmc1_groups[] = { + "mmc1-1bit-d", "mmc1-4bit-d", "mmc1-1bit-e", "mmc1-4bit-e", +}; +static const char *jz4780_mmc2_groups[] = { + "mmc2-1bit-b", "mmc2-4bit-b", "mmc2-1bit-e", "mmc2-4bit-e", +}; +static const char *jz4780_nemc_groups[] = { + "nemc-data", "nemc-cle-ale", "nemc-addr", + "nemc-rd-we", "nemc-frd-fwe", "nemc-wait", +}; +static const char *jz4780_i2c3_groups[] = { "i2c3-data", }; +static const char *jz4780_i2c4_groups[] = { "i2c4-data-e", "i2c4-data-f", }; +static const char *jz4780_cim_groups[] = { "cim-data", }; + +static const struct function_desc jz4780_functions[] = { + { "uart0", jz4770_uart0_groups, ARRAY_SIZE(jz4770_uart0_groups), }, + { "uart1", jz4770_uart1_groups, ARRAY_SIZE(jz4770_uart1_groups), }, + { "uart2", jz4780_uart2_groups, ARRAY_SIZE(jz4780_uart2_groups), }, + { "uart3", jz4770_uart3_groups, ARRAY_SIZE(jz4770_uart3_groups), }, + { "uart4", jz4780_uart4_groups, ARRAY_SIZE(jz4780_uart4_groups), }, + { "mmc0", jz4780_mmc0_groups, ARRAY_SIZE(jz4780_mmc0_groups), }, + { "mmc1", jz4780_mmc1_groups, ARRAY_SIZE(jz4780_mmc1_groups), }, + { "mmc2", jz4780_mmc2_groups, ARRAY_SIZE(jz4780_mmc2_groups), }, + { "nemc", jz4780_nemc_groups, ARRAY_SIZE(jz4780_nemc_groups), }, + { "nemc-cs1", jz4770_cs1_groups, ARRAY_SIZE(jz4770_cs1_groups), }, + { "nemc-cs2", jz4770_cs2_groups, ARRAY_SIZE(jz4770_cs2_groups), }, + { "nemc-cs3", jz4770_cs3_groups, ARRAY_SIZE(jz4770_cs3_groups), }, + { "nemc-cs4", jz4770_cs4_groups, ARRAY_SIZE(jz4770_cs4_groups), }, + { "nemc-cs5", jz4770_cs5_groups, ARRAY_SIZE(jz4770_cs5_groups), }, + { "nemc-cs6", jz4770_cs6_groups, ARRAY_SIZE(jz4770_cs6_groups), }, + { "i2c0", jz4770_i2c0_groups, ARRAY_SIZE(jz4770_i2c0_groups), }, + { "i2c1", jz4770_i2c1_groups, ARRAY_SIZE(jz4770_i2c1_groups), }, + { "i2c2", jz4770_i2c2_groups, ARRAY_SIZE(jz4770_i2c2_groups), }, + { "i2c3", jz4780_i2c3_groups, ARRAY_SIZE(jz4780_i2c3_groups), }, + { "i2c4", jz4780_i2c4_groups, ARRAY_SIZE(jz4780_i2c4_groups), }, + { "cim", jz4780_cim_groups, ARRAY_SIZE(jz4780_cim_groups), }, + { "lcd", jz4770_lcd_groups, ARRAY_SIZE(jz4770_lcd_groups), }, + { "pwm0", jz4770_pwm0_groups, ARRAY_SIZE(jz4770_pwm0_groups), }, + { "pwm1", jz4770_pwm1_groups, ARRAY_SIZE(jz4770_pwm1_groups), }, + { "pwm2", jz4770_pwm2_groups, ARRAY_SIZE(jz4770_pwm2_groups), }, + { "pwm3", jz4770_pwm3_groups, ARRAY_SIZE(jz4770_pwm3_groups), }, + { "pwm4", jz4770_pwm4_groups, ARRAY_SIZE(jz4770_pwm4_groups), }, + { "pwm5", jz4770_pwm5_groups, ARRAY_SIZE(jz4770_pwm5_groups), }, + { "pwm6", jz4770_pwm6_groups, ARRAY_SIZE(jz4770_pwm6_groups), }, + { "pwm7", jz4770_pwm7_groups, ARRAY_SIZE(jz4770_pwm7_groups), }, +}; + +static const struct ingenic_chip_info jz4780_chip_info = { + .num_chips = 6, + .groups = jz4780_groups, + .num_groups = ARRAY_SIZE(jz4780_groups), + .functions = jz4780_functions, + .num_functions = ARRAY_SIZE(jz4780_functions), + .pull_ups = jz4770_pull_ups, + .pull_downs = jz4770_pull_downs, +}; + +static u32 ingenic_gpio_read_reg(struct ingenic_gpio_chip *jzgc, u8 reg) { unsigned int val; @@ -551,7 +754,7 @@ static u32 gpio_ingenic_read_reg(struct ingenic_gpio_chip *jzgc, u8 reg) return (u32) val; } -static void gpio_ingenic_set_bit(struct ingenic_gpio_chip *jzgc, +static void ingenic_gpio_set_bit(struct ingenic_gpio_chip *jzgc, u8 reg, u8 offset, bool set) { if (set) @@ -565,7 +768,7 @@ static void gpio_ingenic_set_bit(struct ingenic_gpio_chip *jzgc, static inline bool ingenic_gpio_get_value(struct ingenic_gpio_chip *jzgc, u8 offset) { - unsigned int val = gpio_ingenic_read_reg(jzgc, GPIO_PIN); + unsigned int val = ingenic_gpio_read_reg(jzgc, GPIO_PIN); return !!(val & BIT(offset)); } @@ -574,9 +777,9 @@ static void ingenic_gpio_set_value(struct ingenic_gpio_chip *jzgc, u8 offset, int value) { if (jzgc->jzpc->version >= ID_JZ4770) - gpio_ingenic_set_bit(jzgc, JZ4770_GPIO_PAT0, offset, !!value); + ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_PAT0, offset, !!value); else - gpio_ingenic_set_bit(jzgc, JZ4740_GPIO_DATA, offset, !!value); + ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, offset, !!value); } static void irq_set_type(struct ingenic_gpio_chip *jzgc, @@ -594,21 +797,21 @@ static void irq_set_type(struct ingenic_gpio_chip *jzgc, switch (type) { case IRQ_TYPE_EDGE_RISING: - gpio_ingenic_set_bit(jzgc, reg2, offset, true); - gpio_ingenic_set_bit(jzgc, reg1, offset, true); + ingenic_gpio_set_bit(jzgc, reg2, offset, true); + ingenic_gpio_set_bit(jzgc, reg1, offset, true); break; case IRQ_TYPE_EDGE_FALLING: - gpio_ingenic_set_bit(jzgc, reg2, offset, false); - gpio_ingenic_set_bit(jzgc, reg1, offset, true); + ingenic_gpio_set_bit(jzgc, reg2, offset, false); + ingenic_gpio_set_bit(jzgc, reg1, offset, true); break; case IRQ_TYPE_LEVEL_HIGH: - gpio_ingenic_set_bit(jzgc, reg2, offset, true); - gpio_ingenic_set_bit(jzgc, reg1, offset, false); + ingenic_gpio_set_bit(jzgc, reg2, offset, true); + ingenic_gpio_set_bit(jzgc, reg1, offset, false); break; case IRQ_TYPE_LEVEL_LOW: default: - gpio_ingenic_set_bit(jzgc, reg2, offset, false); - gpio_ingenic_set_bit(jzgc, reg1, offset, false); + ingenic_gpio_set_bit(jzgc, reg2, offset, false); + ingenic_gpio_set_bit(jzgc, reg1, offset, false); break; } } @@ -618,7 +821,7 @@ static void ingenic_gpio_irq_mask(struct irq_data *irqd) struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc); - gpio_ingenic_set_bit(jzgc, GPIO_MSK, irqd->hwirq, true); + ingenic_gpio_set_bit(jzgc, GPIO_MSK, irqd->hwirq, true); } static void ingenic_gpio_irq_unmask(struct irq_data *irqd) @@ -626,7 +829,7 @@ static void ingenic_gpio_irq_unmask(struct irq_data *irqd) struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc); - gpio_ingenic_set_bit(jzgc, GPIO_MSK, irqd->hwirq, false); + ingenic_gpio_set_bit(jzgc, GPIO_MSK, irqd->hwirq, false); } static void ingenic_gpio_irq_enable(struct irq_data *irqd) @@ -636,9 +839,9 @@ static void ingenic_gpio_irq_enable(struct irq_data *irqd) int irq = irqd->hwirq; if (jzgc->jzpc->version >= ID_JZ4770) - gpio_ingenic_set_bit(jzgc, JZ4770_GPIO_INT, irq, true); + ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_INT, irq, true); else - gpio_ingenic_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, true); + ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, true); ingenic_gpio_irq_unmask(irqd); } @@ -652,9 +855,9 @@ static void ingenic_gpio_irq_disable(struct irq_data *irqd) ingenic_gpio_irq_mask(irqd); if (jzgc->jzpc->version >= ID_JZ4770) - gpio_ingenic_set_bit(jzgc, JZ4770_GPIO_INT, irq, false); + ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_INT, irq, false); else - gpio_ingenic_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, false); + ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, false); } static void ingenic_gpio_irq_ack(struct irq_data *irqd) @@ -677,9 +880,9 @@ static void ingenic_gpio_irq_ack(struct irq_data *irqd) } if (jzgc->jzpc->version >= ID_JZ4770) - gpio_ingenic_set_bit(jzgc, JZ4770_GPIO_FLAG, irq, false); + ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_FLAG, irq, false); else - gpio_ingenic_set_bit(jzgc, JZ4740_GPIO_DATA, irq, true); + ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, irq, true); } static int ingenic_gpio_irq_set_type(struct irq_data *irqd, unsigned int type) @@ -734,9 +937,9 @@ static void ingenic_gpio_irq_handler(struct irq_desc *desc) chained_irq_enter(irq_chip, desc); if (jzgc->jzpc->version >= ID_JZ4770) - flag = gpio_ingenic_read_reg(jzgc, JZ4770_GPIO_FLAG); + flag = ingenic_gpio_read_reg(jzgc, JZ4770_GPIO_FLAG); else - flag = gpio_ingenic_read_reg(jzgc, JZ4740_GPIO_FLAG); + flag = ingenic_gpio_read_reg(jzgc, JZ4740_GPIO_FLAG); for_each_set_bit(i, &flag, 32) generic_handle_irq(irq_linear_revmap(gc->irq.domain, i)); @@ -1185,7 +1388,9 @@ static int __init ingenic_pinctrl_probe(struct platform_device *pdev) else jzpc->version = (enum jz_version)id->driver_data; - if (jzpc->version >= ID_JZ4770) + if (jzpc->version >= ID_JZ4780) + chip_info = &jz4780_chip_info; + else if (jzpc->version >= ID_JZ4770) chip_info = &jz4770_chip_info; else if (jzpc->version >= ID_JZ4725B) chip_info = &jz4725b_chip_info; diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c index b03481ef99a1..5d7a8514def9 100644 --- a/drivers/pinctrl/pinctrl-mcp23s08.c +++ b/drivers/pinctrl/pinctrl-mcp23s08.c @@ -68,6 +68,7 @@ struct mcp23s08 { struct mutex lock; struct gpio_chip chip; + struct irq_chip irq_chip; struct regmap *regmap; struct device *dev; @@ -607,15 +608,6 @@ static void mcp23s08_irq_bus_unlock(struct irq_data *data) mutex_unlock(&mcp->lock); } -static struct irq_chip mcp23s08_irq_chip = { - .name = "gpio-mcp23xxx", - .irq_mask = mcp23s08_irq_mask, - .irq_unmask = mcp23s08_irq_unmask, - .irq_set_type = mcp23s08_irq_set_type, - .irq_bus_lock = mcp23s08_irq_bus_lock, - .irq_bus_sync_unlock = mcp23s08_irq_bus_unlock, -}; - static int mcp23s08_irq_setup(struct mcp23s08 *mcp) { struct gpio_chip *chip = &mcp->chip; @@ -645,7 +637,7 @@ static int mcp23s08_irqchip_setup(struct mcp23s08 *mcp) int err; err = gpiochip_irqchip_add_nested(chip, - &mcp23s08_irq_chip, + &mcp->irq_chip, 0, handle_simple_irq, IRQ_TYPE_NONE); @@ -656,7 +648,7 @@ static int mcp23s08_irqchip_setup(struct mcp23s08 *mcp) } gpiochip_set_nested_irqchip(chip, - &mcp23s08_irq_chip, + &mcp->irq_chip, mcp->irq); return 0; @@ -832,8 +824,13 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev, break; case MCP_TYPE_S18: + one_regmap_config = + devm_kmemdup(dev, &mcp23x17_regmap, + sizeof(struct regmap_config), GFP_KERNEL); + if (!one_regmap_config) + return -ENOMEM; mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp, - &mcp23x17_regmap); + one_regmap_config); mcp->reg_shift = 1; mcp->chip.ngpio = 16; mcp->chip.label = "mcp23s18"; @@ -1042,6 +1039,13 @@ static int mcp230xx_probe(struct i2c_client *client, return -ENOMEM; mcp->irq = client->irq; + mcp->irq_chip.name = dev_name(&client->dev); + mcp->irq_chip.irq_mask = mcp23s08_irq_mask; + mcp->irq_chip.irq_unmask = mcp23s08_irq_unmask; + mcp->irq_chip.irq_set_type = mcp23s08_irq_set_type; + mcp->irq_chip.irq_bus_lock = mcp23s08_irq_bus_lock; + mcp->irq_chip.irq_bus_sync_unlock = mcp23s08_irq_bus_unlock; + status = mcp23s08_probe_one(mcp, &client->dev, client, client->addr, id->driver_data, pdata->base, 0); if (status) @@ -1139,8 +1143,7 @@ static int mcp23s08_probe(struct spi_device *spi) return -ENODEV; data = devm_kzalloc(&spi->dev, - sizeof(*data) + chips * sizeof(struct mcp23s08), - GFP_KERNEL); + struct_size(data, chip, chips), GFP_KERNEL); if (!data) return -ENOMEM; @@ -1152,6 +1155,13 @@ static int mcp23s08_probe(struct spi_device *spi) chips--; data->mcp[addr] = &data->chip[chips]; data->mcp[addr]->irq = spi->irq; + data->mcp[addr]->irq_chip.name = dev_name(&spi->dev); + data->mcp[addr]->irq_chip.irq_mask = mcp23s08_irq_mask; + data->mcp[addr]->irq_chip.irq_unmask = mcp23s08_irq_unmask; + data->mcp[addr]->irq_chip.irq_set_type = mcp23s08_irq_set_type; + data->mcp[addr]->irq_chip.irq_bus_lock = mcp23s08_irq_bus_lock; + data->mcp[addr]->irq_chip.irq_bus_sync_unlock = + mcp23s08_irq_bus_unlock; status = mcp23s08_probe_one(data->mcp[addr], &spi->dev, spi, 0x40 | (addr << 1), type, pdata->base, addr); diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig index 836e9f3eae4c..2e66ab72c10b 100644 --- a/drivers/pinctrl/qcom/Kconfig +++ b/drivers/pinctrl/qcom/Kconfig @@ -137,6 +137,7 @@ config PINCTRL_QCOM_SPMI_PMIC select PINMUX select PINCONF select GENERIC_PINCONF + select IRQ_DOMAIN_HIERARCHY help This is the pinctrl, pinmux, pinconf and gpiolib driver for the Qualcomm GPIO and MPP blocks found in the Qualcomm PMIC's chips, @@ -149,6 +150,7 @@ config PINCTRL_QCOM_SSBI_PMIC select PINMUX select PINCONF select GENERIC_PINCONF + select IRQ_DOMAIN_HIERARCHY help This is the pinctrl, pinmux, pinconf and gpiolib driver for the Qualcomm GPIO and MPP blocks found in the Qualcomm PMIC's chips, diff --git a/drivers/pinctrl/qcom/pinctrl-qcs404.c b/drivers/pinctrl/qcom/pinctrl-qcs404.c index 7aae52a09ff0..1c6ba978c69f 100644 --- a/drivers/pinctrl/qcom/pinctrl-qcs404.c +++ b/drivers/pinctrl/qcom/pinctrl-qcs404.c @@ -79,7 +79,7 @@ enum { .intr_cfg_reg = 0, \ .intr_status_reg = 0, \ .intr_target_reg = 0, \ - .tile = NORTH, \ + .tile = SOUTH, \ .mux_bit = -1, \ .pull_bit = pull, \ .drv_bit = drv, \ @@ -95,31 +95,6 @@ enum { .intr_detection_width = -1, \ } -#define UFS_RESET(pg_name, offset) \ - { \ - .name = #pg_name, \ - .pins = pg_name##_pins, \ - .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \ - .ctl_reg = offset, \ - .io_reg = offset + 0x4, \ - .intr_cfg_reg = 0, \ - .intr_status_reg = 0, \ - .intr_target_reg = 0, \ - .tile = NORTH, \ - .mux_bit = -1, \ - .pull_bit = 3, \ - .drv_bit = 0, \ - .oe_bit = -1, \ - .in_bit = -1, \ - .out_bit = 0, \ - .intr_enable_bit = -1, \ - .intr_status_bit = -1, \ - .intr_target_bit = -1, \ - .intr_raw_status_bit = -1, \ - .intr_polarity_bit = -1, \ - .intr_detection_bit = -1, \ - .intr_detection_width = -1, \ - } static const struct pinctrl_pin_desc qcs404_pins[] = { PINCTRL_PIN(0, "GPIO_0"), PINCTRL_PIN(1, "GPIO_1"), diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c index 4458d44dfcf6..76e57ae2f6e8 100644 --- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c +++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c @@ -12,6 +12,7 @@ */ #include +#include #include #include #include @@ -136,7 +137,6 @@ enum pmic_gpio_func_index { /** * struct pmic_gpio_pad - keep current GPIO settings * @base: Address base in SPMI device. - * @irq: IRQ number which this GPIO generate. * @is_enabled: Set to false when GPIO should be put in high Z state. * @out_value: Cached pin output value * @have_buffer: Set to true if GPIO output could be configured in push-pull, @@ -156,7 +156,6 @@ enum pmic_gpio_func_index { */ struct pmic_gpio_pad { u16 base; - int irq; bool is_enabled; bool out_value; bool have_buffer; @@ -179,6 +178,8 @@ struct pmic_gpio_state { struct regmap *map; struct pinctrl_dev *ctrl; struct gpio_chip chip; + struct fwnode_handle *fwnode; + struct irq_domain *domain; }; static const struct pinconf_generic_params pmic_gpio_bindings[] = { @@ -674,11 +675,11 @@ static void pmic_gpio_config_dbg_show(struct pinctrl_dev *pctldev, else seq_printf(s, " %-4s", pad->output_enabled ? "out" : "in"); + seq_printf(s, " %-4s", pad->out_value ? "high" : "low"); seq_printf(s, " %-7s", pmic_gpio_functions[function]); seq_printf(s, " vin-%d", pad->power_source); seq_printf(s, " %-27s", biases[pad->pullup]); seq_printf(s, " %-10s", buffer_types[pad->buffer_type]); - seq_printf(s, " %-4s", pad->out_value ? "high" : "low"); seq_printf(s, " %-7s", strengths[pad->strength]); seq_printf(s, " atest-%d", pad->atest); seq_printf(s, " dtest-%d", pad->dtest_buffer); @@ -761,11 +762,18 @@ static int pmic_gpio_of_xlate(struct gpio_chip *chip, static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned pin) { struct pmic_gpio_state *state = gpiochip_get_data(chip); - struct pmic_gpio_pad *pad; + struct irq_fwspec fwspec; - pad = state->ctrl->desc->pins[pin].drv_data; + fwspec.fwnode = state->fwnode; + fwspec.param_count = 2; + fwspec.param[0] = pin + PMIC_GPIO_PHYSICAL_OFFSET; + /* + * Set the type to a safe value temporarily. This will be overwritten + * later with the proper value by irq_set_type. + */ + fwspec.param[1] = IRQ_TYPE_EDGE_RISING; - return pad->irq; + return irq_create_fwspec_mapping(&fwspec); } static void pmic_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip) @@ -935,8 +943,79 @@ static int pmic_gpio_populate(struct pmic_gpio_state *state, return 0; } +static struct irq_chip pmic_gpio_irq_chip = { + .name = "spmi-gpio", + .irq_ack = irq_chip_ack_parent, + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_set_type = irq_chip_set_type_parent, + .irq_set_wake = irq_chip_set_wake_parent, + .flags = IRQCHIP_MASK_ON_SUSPEND, +}; + +static int pmic_gpio_domain_translate(struct irq_domain *domain, + struct irq_fwspec *fwspec, + unsigned long *hwirq, + unsigned int *type) +{ + struct pmic_gpio_state *state = container_of(domain->host_data, + struct pmic_gpio_state, + chip); + + if (fwspec->param_count != 2 || + fwspec->param[0] < 1 || fwspec->param[0] > state->chip.ngpio) + return -EINVAL; + + *hwirq = fwspec->param[0] - PMIC_GPIO_PHYSICAL_OFFSET; + *type = fwspec->param[1]; + + return 0; +} + +static int pmic_gpio_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *data) +{ + struct pmic_gpio_state *state = container_of(domain->host_data, + struct pmic_gpio_state, + chip); + struct irq_fwspec *fwspec = data; + struct irq_fwspec parent_fwspec; + irq_hw_number_t hwirq; + unsigned int type; + int ret, i; + + ret = pmic_gpio_domain_translate(domain, fwspec, &hwirq, &type); + if (ret) + return ret; + + for (i = 0; i < nr_irqs; i++) + irq_domain_set_info(domain, virq + i, hwirq + i, + &pmic_gpio_irq_chip, state, + handle_level_irq, NULL, NULL); + + parent_fwspec.fwnode = domain->parent->fwnode; + parent_fwspec.param_count = 4; + parent_fwspec.param[0] = 0; + parent_fwspec.param[1] = hwirq + 0xc0; + parent_fwspec.param[2] = 0; + parent_fwspec.param[3] = fwspec->param[1]; + + return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, + &parent_fwspec); +} + +static const struct irq_domain_ops pmic_gpio_domain_ops = { + .activate = gpiochip_irq_domain_activate, + .alloc = pmic_gpio_domain_alloc, + .deactivate = gpiochip_irq_domain_deactivate, + .free = irq_domain_free_irqs_common, + .translate = pmic_gpio_domain_translate, +}; + static int pmic_gpio_probe(struct platform_device *pdev) { + struct irq_domain *parent_domain; + struct device_node *parent_node; struct device *dev = &pdev->dev; struct pinctrl_pin_desc *pindesc; struct pinctrl_desc *pctrldesc; @@ -951,13 +1030,7 @@ static int pmic_gpio_probe(struct platform_device *pdev) return ret; } - npins = platform_irq_count(pdev); - if (!npins) - return -EINVAL; - if (npins < 0) - return npins; - - BUG_ON(npins > ARRAY_SIZE(pmic_gpio_groups)); + npins = (uintptr_t) device_get_match_data(&pdev->dev); state = devm_kzalloc(dev, sizeof(*state), GFP_KERNEL); if (!state) @@ -999,10 +1072,6 @@ static int pmic_gpio_probe(struct platform_device *pdev) pindesc->number = i; pindesc->name = pmic_gpio_groups[i]; - pad->irq = platform_get_irq(pdev, i); - if (pad->irq < 0) - return pad->irq; - pad->base = reg + i * PMIC_GPIO_ADDRESS_RANGE; ret = pmic_gpio_populate(state, pad); @@ -1022,10 +1091,28 @@ static int pmic_gpio_probe(struct platform_device *pdev) if (IS_ERR(state->ctrl)) return PTR_ERR(state->ctrl); + parent_node = of_irq_find_parent(state->dev->of_node); + if (!parent_node) + return -ENXIO; + + parent_domain = irq_find_host(parent_node); + of_node_put(parent_node); + if (!parent_domain) + return -ENXIO; + + state->fwnode = of_node_to_fwnode(state->dev->of_node); + state->domain = irq_domain_create_hierarchy(parent_domain, 0, + state->chip.ngpio, + state->fwnode, + &pmic_gpio_domain_ops, + &state->chip); + if (!state->domain) + return -ENODEV; + ret = gpiochip_add_data(&state->chip, state); if (ret) { dev_err(state->dev, "can't add gpio chip\n"); - return ret; + goto err_chip_add_data; } /* @@ -1051,6 +1138,8 @@ static int pmic_gpio_probe(struct platform_device *pdev) err_range: gpiochip_remove(&state->chip); +err_chip_add_data: + irq_domain_remove(state->domain); return ret; } @@ -1059,17 +1148,21 @@ static int pmic_gpio_remove(struct platform_device *pdev) struct pmic_gpio_state *state = platform_get_drvdata(pdev); gpiochip_remove(&state->chip); + irq_domain_remove(state->domain); return 0; } static const struct of_device_id pmic_gpio_of_match[] = { - { .compatible = "qcom,pm8916-gpio" }, /* 4 GPIO's */ - { .compatible = "qcom,pm8941-gpio" }, /* 36 GPIO's */ - { .compatible = "qcom,pm8994-gpio" }, /* 22 GPIO's */ - { .compatible = "qcom,pmi8994-gpio" }, /* 10 GPIO's */ - { .compatible = "qcom,pma8084-gpio" }, /* 22 GPIO's */ - { .compatible = "qcom,pms405-gpio" }, /* 12 GPIO's, holes on 1 9 10 */ - { .compatible = "qcom,spmi-gpio" }, /* Generic */ + { .compatible = "qcom,pm8005-gpio", .data = (void *) 4 }, + { .compatible = "qcom,pm8916-gpio", .data = (void *) 4 }, + { .compatible = "qcom,pm8941-gpio", .data = (void *) 36 }, + { .compatible = "qcom,pm8994-gpio", .data = (void *) 22 }, + { .compatible = "qcom,pmi8994-gpio", .data = (void *) 10 }, + { .compatible = "qcom,pm8998-gpio", .data = (void *) 26 }, + { .compatible = "qcom,pmi8998-gpio", .data = (void *) 14 }, + { .compatible = "qcom,pma8084-gpio", .data = (void *) 22 }, + /* pms405 has 12 GPIOs with holes on 1, 9, and 10 */ + { .compatible = "qcom,pms405-gpio", .data = (void *) 12 }, { }, }; diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c index ded7d765af2e..08dd62b5cebe 100644 --- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c +++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c @@ -55,6 +55,8 @@ #define PM8XXX_MAX_GPIOS 44 +#define PM8XXX_GPIO_PHYSICAL_OFFSET 1 + /* custom pinconf parameters */ #define PM8XXX_QCOM_DRIVE_STRENGH (PIN_CONFIG_END + 1) #define PM8XXX_QCOM_PULL_UP_STRENGTH (PIN_CONFIG_END + 2) @@ -99,6 +101,9 @@ struct pm8xxx_gpio { struct pinctrl_desc desc; unsigned npins; + + struct fwnode_handle *fwnode; + struct irq_domain *domain; }; static const struct pinconf_generic_params pm8xxx_gpio_bindings[] = { @@ -499,11 +504,12 @@ static int pm8xxx_gpio_get(struct gpio_chip *chip, unsigned offset) if (pin->mode == PM8XXX_GPIO_MODE_OUTPUT) { ret = pin->output_value; - } else { + } else if (pin->irq >= 0) { ret = irq_get_irqchip_state(pin->irq, IRQCHIP_STATE_LINE_LEVEL, &state); if (!ret) ret = !!state; - } + } else + ret = -EINVAL; return ret; } @@ -533,16 +539,39 @@ static int pm8xxx_gpio_of_xlate(struct gpio_chip *chip, if (flags) *flags = gpio_desc->args[1]; - return gpio_desc->args[0] - 1; + return gpio_desc->args[0] - PM8XXX_GPIO_PHYSICAL_OFFSET; } static int pm8xxx_gpio_to_irq(struct gpio_chip *chip, unsigned offset) +{ + struct pm8xxx_gpio *pctrl = gpiochip_get_data(chip); + struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data; + struct irq_fwspec fwspec; + int ret; + + fwspec.fwnode = pctrl->fwnode; + fwspec.param_count = 2; + fwspec.param[0] = offset + PM8XXX_GPIO_PHYSICAL_OFFSET; + fwspec.param[1] = IRQ_TYPE_EDGE_RISING; + + ret = irq_create_fwspec_mapping(&fwspec); + + /* + * Cache the IRQ since pm8xxx_gpio_get() needs this to get determine the + * line level. + */ + pin->irq = ret; + + return ret; +} + +static void pm8xxx_gpio_free(struct gpio_chip *chip, unsigned int offset) { struct pm8xxx_gpio *pctrl = gpiochip_get_data(chip); struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data; - return pin->irq; + pin->irq = -1; } #ifdef CONFIG_DEBUG_FS @@ -571,7 +600,7 @@ static void pm8xxx_gpio_dbg_show_one(struct seq_file *s, "no", "high", "medium", "low" }; - seq_printf(s, " gpio%-2d:", offset + 1); + seq_printf(s, " gpio%-2d:", offset + PM8XXX_GPIO_PHYSICAL_OFFSET); if (pin->disable) { seq_puts(s, " ---"); } else { @@ -603,6 +632,7 @@ static void pm8xxx_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip) #endif static const struct gpio_chip pm8xxx_gpio_template = { + .free = pm8xxx_gpio_free, .direction_input = pm8xxx_gpio_direction_input, .direction_output = pm8xxx_gpio_direction_output, .get = pm8xxx_gpio_get, @@ -664,13 +694,75 @@ static int pm8xxx_pin_populate(struct pm8xxx_gpio *pctrl, return 0; } +static struct irq_chip pm8xxx_irq_chip = { + .name = "ssbi-gpio", + .irq_mask_ack = irq_chip_mask_ack_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_set_type = irq_chip_set_type_parent, + .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE, +}; + +static int pm8xxx_domain_translate(struct irq_domain *domain, + struct irq_fwspec *fwspec, + unsigned long *hwirq, + unsigned int *type) +{ + struct pm8xxx_gpio *pctrl = container_of(domain->host_data, + struct pm8xxx_gpio, chip); + + if (fwspec->param_count != 2 || fwspec->param[0] < 1 || + fwspec->param[0] > pctrl->chip.ngpio) + return -EINVAL; + + *hwirq = fwspec->param[0] - PM8XXX_GPIO_PHYSICAL_OFFSET; + *type = fwspec->param[1]; + + return 0; +} + +static int pm8xxx_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *data) +{ + struct pm8xxx_gpio *pctrl = container_of(domain->host_data, + struct pm8xxx_gpio, chip); + struct irq_fwspec *fwspec = data; + struct irq_fwspec parent_fwspec; + irq_hw_number_t hwirq; + unsigned int type; + int ret, i; + + ret = pm8xxx_domain_translate(domain, fwspec, &hwirq, &type); + if (ret) + return ret; + + for (i = 0; i < nr_irqs; i++) + irq_domain_set_info(domain, virq + i, hwirq + i, + &pm8xxx_irq_chip, pctrl, handle_level_irq, + NULL, NULL); + + parent_fwspec.fwnode = domain->parent->fwnode; + parent_fwspec.param_count = 2; + parent_fwspec.param[0] = hwirq + 0xc0; + parent_fwspec.param[1] = fwspec->param[1]; + + return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, + &parent_fwspec); +} + +static const struct irq_domain_ops pm8xxx_domain_ops = { + .activate = gpiochip_irq_domain_activate, + .alloc = pm8xxx_domain_alloc, + .deactivate = gpiochip_irq_domain_deactivate, + .free = irq_domain_free_irqs_common, + .translate = pm8xxx_domain_translate, +}; + static const struct of_device_id pm8xxx_gpio_of_match[] = { - { .compatible = "qcom,pm8018-gpio" }, - { .compatible = "qcom,pm8038-gpio" }, - { .compatible = "qcom,pm8058-gpio" }, - { .compatible = "qcom,pm8917-gpio" }, - { .compatible = "qcom,pm8921-gpio" }, - { .compatible = "qcom,ssbi-gpio" }, + { .compatible = "qcom,pm8018-gpio", .data = (void *) 6 }, + { .compatible = "qcom,pm8038-gpio", .data = (void *) 12 }, + { .compatible = "qcom,pm8058-gpio", .data = (void *) 44 }, + { .compatible = "qcom,pm8917-gpio", .data = (void *) 38 }, + { .compatible = "qcom,pm8921-gpio", .data = (void *) 44 }, { }, }; MODULE_DEVICE_TABLE(of, pm8xxx_gpio_of_match); @@ -678,22 +770,18 @@ MODULE_DEVICE_TABLE(of, pm8xxx_gpio_of_match); static int pm8xxx_gpio_probe(struct platform_device *pdev) { struct pm8xxx_pin_data *pin_data; + struct irq_domain *parent_domain; + struct device_node *parent_node; struct pinctrl_pin_desc *pins; struct pm8xxx_gpio *pctrl; - int ret; - int i, npins; + int ret, i; pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL); if (!pctrl) return -ENOMEM; pctrl->dev = &pdev->dev; - npins = platform_irq_count(pdev); - if (!npins) - return -EINVAL; - if (npins < 0) - return npins; - pctrl->npins = npins; + pctrl->npins = (uintptr_t) device_get_match_data(&pdev->dev); pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL); if (!pctrl->regmap) { @@ -720,12 +808,7 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev) for (i = 0; i < pctrl->desc.npins; i++) { pin_data[i].reg = SSBI_REG_ADDR_GPIO(i); - pin_data[i].irq = platform_get_irq(pdev, i); - if (pin_data[i].irq < 0) { - dev_err(&pdev->dev, - "missing interrupts for pin %d\n", i); - return pin_data[i].irq; - } + pin_data[i].irq = -1; ret = pm8xxx_pin_populate(pctrl, &pin_data[i]); if (ret) @@ -756,10 +839,29 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev) pctrl->chip.of_gpio_n_cells = 2; pctrl->chip.label = dev_name(pctrl->dev); pctrl->chip.ngpio = pctrl->npins; + + parent_node = of_irq_find_parent(pctrl->dev->of_node); + if (!parent_node) + return -ENXIO; + + parent_domain = irq_find_host(parent_node); + of_node_put(parent_node); + if (!parent_domain) + return -ENXIO; + + pctrl->fwnode = of_node_to_fwnode(pctrl->dev->of_node); + pctrl->domain = irq_domain_create_hierarchy(parent_domain, 0, + pctrl->chip.ngpio, + pctrl->fwnode, + &pm8xxx_domain_ops, + &pctrl->chip); + if (!pctrl->domain) + return -ENODEV; + ret = gpiochip_add_data(&pctrl->chip, pctrl); if (ret) { dev_err(&pdev->dev, "failed register gpiochip\n"); - return ret; + goto err_chip_add_data; } /* @@ -789,6 +891,8 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev) unregister_gpiochip: gpiochip_remove(&pctrl->chip); +err_chip_add_data: + irq_domain_remove(pctrl->domain); return ret; } @@ -798,6 +902,7 @@ static int pm8xxx_gpio_remove(struct platform_device *pdev) struct pm8xxx_gpio *pctrl = platform_get_drvdata(pdev); gpiochip_remove(&pctrl->chip); + irq_domain_remove(pctrl->domain); return 0; } diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c index f49ea3d92aa1..ebc27b06718c 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos.c @@ -325,13 +325,6 @@ err_domains: return ret; } -static u32 exynos_eint_wake_mask = 0xffffffff; - -u32 exynos_get_eint_wake_mask(void) -{ - return exynos_eint_wake_mask; -} - static int exynos_wkup_irq_set_wake(struct irq_data *irqd, unsigned int on) { struct irq_chip *chip = irq_data_get_irq_chip(irqd); @@ -342,10 +335,9 @@ static int exynos_wkup_irq_set_wake(struct irq_data *irqd, unsigned int on) pr_info("wake %s for irq %d\n", on ? "enabled" : "disabled", irqd->irq); if (!on) - exynos_eint_wake_mask |= bit; + our_chip->eint_wake_mask_value |= bit; else - exynos_eint_wake_mask &= ~bit; - our_chip->eint_wake_mask_value = exynos_eint_wake_mask; + our_chip->eint_wake_mask_value &= ~bit; return 0; } diff --git a/drivers/pinctrl/sh-pfc/pfc-emev2.c b/drivers/pinctrl/sh-pfc/pfc-emev2.c index dc271c3243df..310c6f3ee7cc 100644 --- a/drivers/pinctrl/sh-pfc/pfc-emev2.c +++ b/drivers/pinctrl/sh-pfc/pfc-emev2.c @@ -1260,6 +1260,14 @@ static const char * const dtv_groups[] = { "dtv_b", }; +static const char * const err_rst_reqb_groups[] = { + "err_rst_reqb", +}; + +static const char * const ext_clki_groups[] = { + "ext_clki", +}; + static const char * const iic0_groups[] = { "iic0", }; @@ -1282,6 +1290,10 @@ static const char * const lcd_groups[] = { "yuv3", }; +static const char * const lowpwr_groups[] = { + "lowpwr", +}; + static const char * const ntsc_groups[] = { "ntsc_clk", "ntsc_data", @@ -1295,6 +1307,10 @@ static const char * const pwm1_groups[] = { "pwm1", }; +static const char * const ref_clko_groups[] = { + "ref_clko", +}; + static const char * const sd_groups[] = { "sd_cki", }; @@ -1388,13 +1404,17 @@ static const struct sh_pfc_function pinmux_functions[] = { SH_PFC_FUNCTION(cam), SH_PFC_FUNCTION(cf), SH_PFC_FUNCTION(dtv), + SH_PFC_FUNCTION(err_rst_reqb), + SH_PFC_FUNCTION(ext_clki), SH_PFC_FUNCTION(iic0), SH_PFC_FUNCTION(iic1), SH_PFC_FUNCTION(jtag), SH_PFC_FUNCTION(lcd), + SH_PFC_FUNCTION(lowpwr), SH_PFC_FUNCTION(ntsc), SH_PFC_FUNCTION(pwm0), SH_PFC_FUNCTION(pwm1), + SH_PFC_FUNCTION(ref_clko), SH_PFC_FUNCTION(sd), SH_PFC_FUNCTION(sdi0), SH_PFC_FUNCTION(sdi1), diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c index 6bcdb4b5e69e..068b5e6334d1 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c @@ -1264,8 +1264,8 @@ static const struct sh_pfc_pin pinmux_pins[] = { /* Pins not associated with a GPIO port */ SH_PFC_PIN_NAMED(3, 20, C20), - SH_PFC_PIN_NAMED(20, 1, T1), - SH_PFC_PIN_NAMED(25, 2, Y2), + SH_PFC_PIN_NAMED(1, 20, A20), + SH_PFC_PIN_NAMED(2, 25, B25), }; /* - macro */ @@ -1400,7 +1400,7 @@ HSPI_PFC_DAT(hspi1_a, HSPI_CLK1_A, HSPI_CS1_A, HSPI_RX1_A, HSPI_TX1_A); HSPI_PFC_PIN(hspi1_b, RCAR_GP_PIN(0, 27), RCAR_GP_PIN(0, 26), - PIN_NUMBER(20, 1), PIN_NUMBER(25, 2)); + PIN_NUMBER(1, 20), PIN_NUMBER(2, 25)); HSPI_PFC_DAT(hspi1_b, HSPI_CLK1_B, HSPI_CS1_B, HSPI_RX1_B, HSPI_TX1_B); diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c index ab7a35392cd8..a84229cb8cd4 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c @@ -10,7 +10,9 @@ #include #include +#include +#include "core.h" #include "sh_pfc.h" /* @@ -5691,7 +5693,22 @@ static int r8a7790_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin, u32 *poc return 31 - (pin & 0x1f); } +static const struct soc_device_attribute r8a7790_tdsel[] = { + { .soc_id = "r8a7790", .revision = "ES1.0" }, + { /* sentinel */ } +}; + +static int r8a7790_pinmux_soc_init(struct sh_pfc *pfc) +{ + /* Initialize TDSEL on old revisions */ + if (soc_device_match(r8a7790_tdsel)) + sh_pfc_write(pfc, 0xe6060088, 0x00155554); + + return 0; +} + static const struct sh_pfc_soc_operations r8a7790_pinmux_ops = { + .init = r8a7790_pinmux_soc_init, .pin_to_pocctrl = r8a7790_pin_to_pocctrl, }; diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c index 2859231aaffc..d8b13d4e9bbf 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c @@ -4317,7 +4317,7 @@ static const unsigned int vin1_clk_pins[] = { static const unsigned int vin1_clk_mux[] = { VI1_CLK_MARK, }; -static const union vin_data vin1_b_data_pins = { +static const union vin_data vin1_data_b_pins = { .data24 = { /* B */ RCAR_GP_PIN(3, 0), RCAR_GP_PIN(3, 1), @@ -4336,7 +4336,7 @@ static const union vin_data vin1_b_data_pins = { RCAR_GP_PIN(2, 19), RCAR_GP_PIN(2, 20), }, }; -static const union vin_data vin1_b_data_mux = { +static const union vin_data vin1_data_b_mux = { .data24 = { /* B */ VI1_DATA0_B_MARK, VI1_DATA1_B_MARK, @@ -4355,7 +4355,7 @@ static const union vin_data vin1_b_data_mux = { VI1_R6_B_MARK, VI1_R7_B_MARK, }, }; -static const unsigned int vin1_b_data18_pins[] = { +static const unsigned int vin1_data18_b_pins[] = { /* B */ RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 9), RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11), @@ -4369,7 +4369,7 @@ static const unsigned int vin1_b_data18_pins[] = { RCAR_GP_PIN(2, 17), RCAR_GP_PIN(2, 18), RCAR_GP_PIN(2, 19), RCAR_GP_PIN(2, 20), }; -static const unsigned int vin1_b_data18_mux[] = { +static const unsigned int vin1_data18_b_mux[] = { /* B */ VI1_DATA2_B_MARK, VI1_DATA3_B_MARK, VI1_DATA4_B_MARK, VI1_DATA5_B_MARK, @@ -4383,30 +4383,30 @@ static const unsigned int vin1_b_data18_mux[] = { VI1_R4_B_MARK, VI1_R5_B_MARK, VI1_R6_B_MARK, VI1_R7_B_MARK, }; -static const unsigned int vin1_b_sync_pins[] = { +static const unsigned int vin1_sync_b_pins[] = { RCAR_GP_PIN(3, 17), /* HSYNC */ RCAR_GP_PIN(3, 18), /* VSYNC */ }; -static const unsigned int vin1_b_sync_mux[] = { +static const unsigned int vin1_sync_b_mux[] = { VI1_HSYNC_N_B_MARK, VI1_VSYNC_N_B_MARK, }; -static const unsigned int vin1_b_field_pins[] = { +static const unsigned int vin1_field_b_pins[] = { RCAR_GP_PIN(3, 20), }; -static const unsigned int vin1_b_field_mux[] = { +static const unsigned int vin1_field_b_mux[] = { VI1_FIELD_B_MARK, }; -static const unsigned int vin1_b_clkenb_pins[] = { +static const unsigned int vin1_clkenb_b_pins[] = { RCAR_GP_PIN(3, 19), }; -static const unsigned int vin1_b_clkenb_mux[] = { +static const unsigned int vin1_clkenb_b_mux[] = { VI1_CLKENB_B_MARK, }; -static const unsigned int vin1_b_clk_pins[] = { +static const unsigned int vin1_clk_b_pins[] = { RCAR_GP_PIN(3, 16), }; -static const unsigned int vin1_b_clk_mux[] = { +static const unsigned int vin1_clk_b_mux[] = { VI1_CLK_B_MARK, }; /* - VIN2 ----------------------------------------------------------------- */ @@ -4784,17 +4784,17 @@ static const struct { SH_PFC_PIN_GROUP(vin1_field), SH_PFC_PIN_GROUP(vin1_clkenb), SH_PFC_PIN_GROUP(vin1_clk), - VIN_DATA_PIN_GROUP(vin1_b_data, 24), - VIN_DATA_PIN_GROUP(vin1_b_data, 20), - SH_PFC_PIN_GROUP(vin1_b_data18), - VIN_DATA_PIN_GROUP(vin1_b_data, 16), - VIN_DATA_PIN_GROUP(vin1_b_data, 12), - VIN_DATA_PIN_GROUP(vin1_b_data, 10), - VIN_DATA_PIN_GROUP(vin1_b_data, 8), - SH_PFC_PIN_GROUP(vin1_b_sync), - SH_PFC_PIN_GROUP(vin1_b_field), - SH_PFC_PIN_GROUP(vin1_b_clkenb), - SH_PFC_PIN_GROUP(vin1_b_clk), + VIN_DATA_PIN_GROUP(vin1_data, 24, _b), + VIN_DATA_PIN_GROUP(vin1_data, 20, _b), + SH_PFC_PIN_GROUP(vin1_data18_b), + VIN_DATA_PIN_GROUP(vin1_data, 16, _b), + VIN_DATA_PIN_GROUP(vin1_data, 12, _b), + VIN_DATA_PIN_GROUP(vin1_data, 10, _b), + VIN_DATA_PIN_GROUP(vin1_data, 8, _b), + SH_PFC_PIN_GROUP(vin1_sync_b), + SH_PFC_PIN_GROUP(vin1_field_b), + SH_PFC_PIN_GROUP(vin1_clkenb_b), + SH_PFC_PIN_GROUP(vin1_clk_b), SH_PFC_PIN_GROUP(vin2_data8), SH_PFC_PIN_GROUP(vin2_sync), SH_PFC_PIN_GROUP(vin2_field), @@ -5236,7 +5236,7 @@ static const char * const scifb2_groups[] = { "scifb2_data_b", "scifb2_clk_b", "scifb2_ctrl_b", - "scifb0_data_c", + "scifb2_data_c", "scifb2_clk_c", "scifb2_data_d", }; @@ -5335,17 +5335,17 @@ static const char * const vin1_groups[] = { "vin1_field", "vin1_clkenb", "vin1_clk", - "vin1_b_data24", - "vin1_b_data20", - "vin1_b_data18", - "vin1_b_data16", - "vin1_b_data12", - "vin1_b_data10", - "vin1_b_data8", - "vin1_b_sync", - "vin1_b_field", - "vin1_b_clkenb", - "vin1_b_clk", + "vin1_data24_b", + "vin1_data20_b", + "vin1_data18_b", + "vin1_data16_b", + "vin1_data12_b", + "vin1_data10_b", + "vin1_data8_b", + "vin1_sync_b", + "vin1_field_b", + "vin1_clkenb_b", + "vin1_clk_b", }; static const char * const vin2_groups[] = { diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7792.c b/drivers/pinctrl/sh-pfc/pfc-r8a7792.c index a623459b234e..d36da5652de6 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7792.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7792.c @@ -1913,6 +1913,7 @@ static const char * const vin1_groups[] = { "vin1_data8", "vin1_data24_b", "vin1_data20_b", + "vin1_data18_b", "vin1_data16_b", "vin1_sync", "vin1_field", diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c index fcf1339c4058..958a5f714c93 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c @@ -8,6 +8,7 @@ */ #include +#include #include "core.h" #include "sh_pfc.h" @@ -5560,7 +5561,22 @@ static int r8a7794_pin_to_pocctrl(struct sh_pfc *pfc, unsigned int pin, u32 *poc return -EINVAL; } +static const struct soc_device_attribute r8a7794_tdsel[] = { + { .soc_id = "r8a7794", .revision = "ES1.0" }, + { /* sentinel */ } +}; + +static int r8a7794_pinmux_soc_init(struct sh_pfc *pfc) +{ + /* Initialize TDSEL on old revisions */ + if (soc_device_match(r8a7794_tdsel)) + sh_pfc_write(pfc, 0xe6060068, 0x55555500); + + return 0; +} + static const struct sh_pfc_soc_operations r8a7794_pinmux_ops = { + .init = r8a7794_pinmux_soc_init, .pin_to_pocctrl = r8a7794_pin_to_pocctrl, }; diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c index 01105bb83598..db9add1405c5 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c @@ -4098,67 +4098,29 @@ static const unsigned int vin4_clk_mux[] = { }; /* - VIN5 ------------------------------------------------------------------- */ -static const unsigned int vin5_data8_pins[] = { - RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 1), - RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3), - RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5), - RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7), -}; -static const unsigned int vin5_data8_mux[] = { - VI5_DATA0_MARK, VI5_DATA1_MARK, - VI5_DATA2_MARK, VI5_DATA3_MARK, - VI5_DATA4_MARK, VI5_DATA5_MARK, - VI5_DATA6_MARK, VI5_DATA7_MARK, -}; -static const unsigned int vin5_data10_pins[] = { - RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 1), - RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3), - RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5), - RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7), - RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13), -}; -static const unsigned int vin5_data10_mux[] = { - VI5_DATA0_MARK, VI5_DATA1_MARK, - VI5_DATA2_MARK, VI5_DATA3_MARK, - VI5_DATA4_MARK, VI5_DATA5_MARK, - VI5_DATA6_MARK, VI5_DATA7_MARK, - VI5_DATA8_MARK, VI5_DATA9_MARK, -}; -static const unsigned int vin5_data12_pins[] = { - RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 1), - RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3), - RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5), - RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7), - RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13), - RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 15), -}; -static const unsigned int vin5_data12_mux[] = { - VI5_DATA0_MARK, VI5_DATA1_MARK, - VI5_DATA2_MARK, VI5_DATA3_MARK, - VI5_DATA4_MARK, VI5_DATA5_MARK, - VI5_DATA6_MARK, VI5_DATA7_MARK, - VI5_DATA8_MARK, VI5_DATA9_MARK, - VI5_DATA10_MARK, VI5_DATA11_MARK, -}; -static const unsigned int vin5_data16_pins[] = { - RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 1), - RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3), - RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5), - RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7), - RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13), - RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 15), - RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5), - RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7), +static const union vin_data16 vin5_data_pins = { + .data16 = { + RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 1), + RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3), + RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5), + RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7), + RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13), + RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 15), + RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5), + RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7), + }, }; -static const unsigned int vin5_data16_mux[] = { - VI5_DATA0_MARK, VI5_DATA1_MARK, - VI5_DATA2_MARK, VI5_DATA3_MARK, - VI5_DATA4_MARK, VI5_DATA5_MARK, - VI5_DATA6_MARK, VI5_DATA7_MARK, - VI5_DATA8_MARK, VI5_DATA9_MARK, - VI5_DATA10_MARK, VI5_DATA11_MARK, - VI5_DATA12_MARK, VI5_DATA13_MARK, - VI5_DATA14_MARK, VI5_DATA15_MARK, +static const union vin_data16 vin5_data_mux = { + .data16 = { + VI5_DATA0_MARK, VI5_DATA1_MARK, + VI5_DATA2_MARK, VI5_DATA3_MARK, + VI5_DATA4_MARK, VI5_DATA5_MARK, + VI5_DATA6_MARK, VI5_DATA7_MARK, + VI5_DATA8_MARK, VI5_DATA9_MARK, + VI5_DATA10_MARK, VI5_DATA11_MARK, + VI5_DATA12_MARK, VI5_DATA13_MARK, + VI5_DATA14_MARK, VI5_DATA15_MARK, + }, }; static const unsigned int vin5_sync_pins[] = { /* HSYNC#, VSYNC# */ @@ -4530,10 +4492,10 @@ static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(vin4_field), SH_PFC_PIN_GROUP(vin4_clkenb), SH_PFC_PIN_GROUP(vin4_clk), - SH_PFC_PIN_GROUP(vin5_data8), - SH_PFC_PIN_GROUP(vin5_data10), - SH_PFC_PIN_GROUP(vin5_data12), - SH_PFC_PIN_GROUP(vin5_data16), + VIN_DATA_PIN_GROUP(vin5_data, 8), + VIN_DATA_PIN_GROUP(vin5_data, 10), + VIN_DATA_PIN_GROUP(vin5_data, 12), + VIN_DATA_PIN_GROUP(vin5_data, 16), SH_PFC_PIN_GROUP(vin5_sync), SH_PFC_PIN_GROUP(vin5_field), SH_PFC_PIN_GROUP(vin5_clkenb), diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c index 4b98ad8d93d9..72348a4f2ece 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7796.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7796.c @@ -4070,67 +4070,29 @@ static const unsigned int vin4_clk_mux[] = { }; /* - VIN5 ------------------------------------------------------------------- */ -static const unsigned int vin5_data8_pins[] = { - RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 1), - RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3), - RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5), - RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7), -}; -static const unsigned int vin5_data8_mux[] = { - VI5_DATA0_MARK, VI5_DATA1_MARK, - VI5_DATA2_MARK, VI5_DATA3_MARK, - VI5_DATA4_MARK, VI5_DATA5_MARK, - VI5_DATA6_MARK, VI5_DATA7_MARK, -}; -static const unsigned int vin5_data10_pins[] = { - RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 1), - RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3), - RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5), - RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7), - RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13), -}; -static const unsigned int vin5_data10_mux[] = { - VI5_DATA0_MARK, VI5_DATA1_MARK, - VI5_DATA2_MARK, VI5_DATA3_MARK, - VI5_DATA4_MARK, VI5_DATA5_MARK, - VI5_DATA6_MARK, VI5_DATA7_MARK, - VI5_DATA8_MARK, VI5_DATA9_MARK, -}; -static const unsigned int vin5_data12_pins[] = { - RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 1), - RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3), - RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5), - RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7), - RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13), - RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 15), -}; -static const unsigned int vin5_data12_mux[] = { - VI5_DATA0_MARK, VI5_DATA1_MARK, - VI5_DATA2_MARK, VI5_DATA3_MARK, - VI5_DATA4_MARK, VI5_DATA5_MARK, - VI5_DATA6_MARK, VI5_DATA7_MARK, - VI5_DATA8_MARK, VI5_DATA9_MARK, - VI5_DATA10_MARK, VI5_DATA11_MARK, -}; -static const unsigned int vin5_data16_pins[] = { - RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 1), - RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3), - RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5), - RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7), - RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13), - RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 15), - RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5), - RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7), +static const union vin_data16 vin5_data_pins = { + .data16 = { + RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 1), + RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3), + RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5), + RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7), + RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13), + RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 15), + RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5), + RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7), + }, }; -static const unsigned int vin5_data16_mux[] = { - VI5_DATA0_MARK, VI5_DATA1_MARK, - VI5_DATA2_MARK, VI5_DATA3_MARK, - VI5_DATA4_MARK, VI5_DATA5_MARK, - VI5_DATA6_MARK, VI5_DATA7_MARK, - VI5_DATA8_MARK, VI5_DATA9_MARK, - VI5_DATA10_MARK, VI5_DATA11_MARK, - VI5_DATA12_MARK, VI5_DATA13_MARK, - VI5_DATA14_MARK, VI5_DATA15_MARK, +static const union vin_data16 vin5_data_mux = { + .data16 = { + VI5_DATA0_MARK, VI5_DATA1_MARK, + VI5_DATA2_MARK, VI5_DATA3_MARK, + VI5_DATA4_MARK, VI5_DATA5_MARK, + VI5_DATA6_MARK, VI5_DATA7_MARK, + VI5_DATA8_MARK, VI5_DATA9_MARK, + VI5_DATA10_MARK, VI5_DATA11_MARK, + VI5_DATA12_MARK, VI5_DATA13_MARK, + VI5_DATA14_MARK, VI5_DATA15_MARK, + }, }; static const unsigned int vin5_sync_pins[] = { /* HSYNC#, VSYNC# */ @@ -4468,10 +4430,10 @@ static const struct { SH_PFC_PIN_GROUP(vin4_field), SH_PFC_PIN_GROUP(vin4_clkenb), SH_PFC_PIN_GROUP(vin4_clk), - SH_PFC_PIN_GROUP(vin5_data8), - SH_PFC_PIN_GROUP(vin5_data10), - SH_PFC_PIN_GROUP(vin5_data12), - SH_PFC_PIN_GROUP(vin5_data16), + VIN_DATA_PIN_GROUP(vin5_data, 8), + VIN_DATA_PIN_GROUP(vin5_data, 10), + VIN_DATA_PIN_GROUP(vin5_data, 12), + VIN_DATA_PIN_GROUP(vin5_data, 16), SH_PFC_PIN_GROUP(vin5_sync), SH_PFC_PIN_GROUP(vin5_field), SH_PFC_PIN_GROUP(vin5_clkenb), diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c index a7c4882de09e..14c4b671cddf 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a77965.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a77965.c @@ -554,7 +554,7 @@ MOD_SEL0_4_3 MOD_SEL1_4 \ FM(AVB_RX_CTL) FM(AVB_RXC) FM(AVB_RD0) FM(AVB_RD1) FM(AVB_RD2) FM(AVB_RD3) \ FM(AVB_TXCREFCLK) FM(AVB_MDIO) \ FM(PRESETOUT) \ - FM(DU_DOTCLKIN0) FM(DU_DOTCLKIN1) FM(DU_DOTCLKIN2) \ + FM(DU_DOTCLKIN0) FM(DU_DOTCLKIN1) FM(DU_DOTCLKIN3) \ FM(TMS) FM(TDO) FM(ASEBRK) FM(MLB_REF) FM(TDI) FM(TCK) FM(TRST) FM(EXTALR) enum { @@ -1566,7 +1566,7 @@ static const struct sh_pfc_pin pinmux_pins[] = { SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('E'), 5, QSPI1_MISO_IO1, CFG_FLAGS), SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('P'), 7, DU_DOTCLKIN0, CFG_FLAGS), SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('P'), 8, DU_DOTCLKIN1, CFG_FLAGS), - SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('R'), 8, DU_DOTCLKIN2, CFG_FLAGS), + SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('R'), 8, DU_DOTCLKIN3, CFG_FLAGS), SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('R'), 26, TRST#, SH_PFC_PIN_CFG_PULL_UP | SH_PFC_PIN_CFG_PULL_DOWN), SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('R'), 29, TDI, SH_PFC_PIN_CFG_PULL_UP | SH_PFC_PIN_CFG_PULL_DOWN), SH_PFC_PIN_NAMED_CFG(ROW_GROUP_A('R'), 30, TMS, CFG_FLAGS), @@ -1850,6 +1850,280 @@ static const unsigned int canfd1_data_mux[] = { CANFD1_TX_MARK, CANFD1_RX_MARK, }; +/* - DRIF0 --------------------------------------------------------------- */ +static const unsigned int drif0_ctrl_a_pins[] = { + /* CLK, SYNC */ + RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9), +}; + +static const unsigned int drif0_ctrl_a_mux[] = { + RIF0_CLK_A_MARK, RIF0_SYNC_A_MARK, +}; + +static const unsigned int drif0_data0_a_pins[] = { + /* D0 */ + RCAR_GP_PIN(6, 10), +}; + +static const unsigned int drif0_data0_a_mux[] = { + RIF0_D0_A_MARK, +}; + +static const unsigned int drif0_data1_a_pins[] = { + /* D1 */ + RCAR_GP_PIN(6, 7), +}; + +static const unsigned int drif0_data1_a_mux[] = { + RIF0_D1_A_MARK, +}; + +static const unsigned int drif0_ctrl_b_pins[] = { + /* CLK, SYNC */ + RCAR_GP_PIN(5, 0), RCAR_GP_PIN(5, 4), +}; + +static const unsigned int drif0_ctrl_b_mux[] = { + RIF0_CLK_B_MARK, RIF0_SYNC_B_MARK, +}; + +static const unsigned int drif0_data0_b_pins[] = { + /* D0 */ + RCAR_GP_PIN(5, 1), +}; + +static const unsigned int drif0_data0_b_mux[] = { + RIF0_D0_B_MARK, +}; + +static const unsigned int drif0_data1_b_pins[] = { + /* D1 */ + RCAR_GP_PIN(5, 2), +}; + +static const unsigned int drif0_data1_b_mux[] = { + RIF0_D1_B_MARK, +}; + +static const unsigned int drif0_ctrl_c_pins[] = { + /* CLK, SYNC */ + RCAR_GP_PIN(5, 12), RCAR_GP_PIN(5, 15), +}; + +static const unsigned int drif0_ctrl_c_mux[] = { + RIF0_CLK_C_MARK, RIF0_SYNC_C_MARK, +}; + +static const unsigned int drif0_data0_c_pins[] = { + /* D0 */ + RCAR_GP_PIN(5, 13), +}; + +static const unsigned int drif0_data0_c_mux[] = { + RIF0_D0_C_MARK, +}; + +static const unsigned int drif0_data1_c_pins[] = { + /* D1 */ + RCAR_GP_PIN(5, 14), +}; + +static const unsigned int drif0_data1_c_mux[] = { + RIF0_D1_C_MARK, +}; + +/* - DRIF1 --------------------------------------------------------------- */ +static const unsigned int drif1_ctrl_a_pins[] = { + /* CLK, SYNC */ + RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18), +}; + +static const unsigned int drif1_ctrl_a_mux[] = { + RIF1_CLK_A_MARK, RIF1_SYNC_A_MARK, +}; + +static const unsigned int drif1_data0_a_pins[] = { + /* D0 */ + RCAR_GP_PIN(6, 19), +}; + +static const unsigned int drif1_data0_a_mux[] = { + RIF1_D0_A_MARK, +}; + +static const unsigned int drif1_data1_a_pins[] = { + /* D1 */ + RCAR_GP_PIN(6, 20), +}; + +static const unsigned int drif1_data1_a_mux[] = { + RIF1_D1_A_MARK, +}; + +static const unsigned int drif1_ctrl_b_pins[] = { + /* CLK, SYNC */ + RCAR_GP_PIN(5, 9), RCAR_GP_PIN(5, 3), +}; + +static const unsigned int drif1_ctrl_b_mux[] = { + RIF1_CLK_B_MARK, RIF1_SYNC_B_MARK, +}; + +static const unsigned int drif1_data0_b_pins[] = { + /* D0 */ + RCAR_GP_PIN(5, 7), +}; + +static const unsigned int drif1_data0_b_mux[] = { + RIF1_D0_B_MARK, +}; + +static const unsigned int drif1_data1_b_pins[] = { + /* D1 */ + RCAR_GP_PIN(5, 8), +}; + +static const unsigned int drif1_data1_b_mux[] = { + RIF1_D1_B_MARK, +}; + +static const unsigned int drif1_ctrl_c_pins[] = { + /* CLK, SYNC */ + RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 11), +}; + +static const unsigned int drif1_ctrl_c_mux[] = { + RIF1_CLK_C_MARK, RIF1_SYNC_C_MARK, +}; + +static const unsigned int drif1_data0_c_pins[] = { + /* D0 */ + RCAR_GP_PIN(5, 6), +}; + +static const unsigned int drif1_data0_c_mux[] = { + RIF1_D0_C_MARK, +}; + +static const unsigned int drif1_data1_c_pins[] = { + /* D1 */ + RCAR_GP_PIN(5, 10), +}; + +static const unsigned int drif1_data1_c_mux[] = { + RIF1_D1_C_MARK, +}; + +/* - DRIF2 --------------------------------------------------------------- */ +static const unsigned int drif2_ctrl_a_pins[] = { + /* CLK, SYNC */ + RCAR_GP_PIN(6, 8), RCAR_GP_PIN(6, 9), +}; + +static const unsigned int drif2_ctrl_a_mux[] = { + RIF2_CLK_A_MARK, RIF2_SYNC_A_MARK, +}; + +static const unsigned int drif2_data0_a_pins[] = { + /* D0 */ + RCAR_GP_PIN(6, 7), +}; + +static const unsigned int drif2_data0_a_mux[] = { + RIF2_D0_A_MARK, +}; + +static const unsigned int drif2_data1_a_pins[] = { + /* D1 */ + RCAR_GP_PIN(6, 10), +}; + +static const unsigned int drif2_data1_a_mux[] = { + RIF2_D1_A_MARK, +}; + +static const unsigned int drif2_ctrl_b_pins[] = { + /* CLK, SYNC */ + RCAR_GP_PIN(6, 26), RCAR_GP_PIN(6, 27), +}; + +static const unsigned int drif2_ctrl_b_mux[] = { + RIF2_CLK_B_MARK, RIF2_SYNC_B_MARK, +}; + +static const unsigned int drif2_data0_b_pins[] = { + /* D0 */ + RCAR_GP_PIN(6, 30), +}; + +static const unsigned int drif2_data0_b_mux[] = { + RIF2_D0_B_MARK, +}; + +static const unsigned int drif2_data1_b_pins[] = { + /* D1 */ + RCAR_GP_PIN(6, 31), +}; + +static const unsigned int drif2_data1_b_mux[] = { + RIF2_D1_B_MARK, +}; + +/* - DRIF3 --------------------------------------------------------------- */ +static const unsigned int drif3_ctrl_a_pins[] = { + /* CLK, SYNC */ + RCAR_GP_PIN(6, 17), RCAR_GP_PIN(6, 18), +}; + +static const unsigned int drif3_ctrl_a_mux[] = { + RIF3_CLK_A_MARK, RIF3_SYNC_A_MARK, +}; + +static const unsigned int drif3_data0_a_pins[] = { + /* D0 */ + RCAR_GP_PIN(6, 19), +}; + +static const unsigned int drif3_data0_a_mux[] = { + RIF3_D0_A_MARK, +}; + +static const unsigned int drif3_data1_a_pins[] = { + /* D1 */ + RCAR_GP_PIN(6, 20), +}; + +static const unsigned int drif3_data1_a_mux[] = { + RIF3_D1_A_MARK, +}; + +static const unsigned int drif3_ctrl_b_pins[] = { + /* CLK, SYNC */ + RCAR_GP_PIN(6, 24), RCAR_GP_PIN(6, 25), +}; + +static const unsigned int drif3_ctrl_b_mux[] = { + RIF3_CLK_B_MARK, RIF3_SYNC_B_MARK, +}; + +static const unsigned int drif3_data0_b_pins[] = { + /* D0 */ + RCAR_GP_PIN(6, 28), +}; + +static const unsigned int drif3_data0_b_mux[] = { + RIF3_D0_B_MARK, +}; + +static const unsigned int drif3_data1_b_pins[] = { + /* D1 */ + RCAR_GP_PIN(6, 29), +}; + +static const unsigned int drif3_data1_b_mux[] = { + RIF3_D1_B_MARK, +}; + /* - DU --------------------------------------------------------------------- */ static const unsigned int du_rgb666_pins[] = { /* R[7:2], G[7:2], B[7:2] */ @@ -3760,6 +4034,42 @@ static const unsigned int ssi9_ctrl_b_mux[] = { SSI_SCK9_B_MARK, SSI_WS9_B_MARK, }; +/* - TMU -------------------------------------------------------------------- */ +static const unsigned int tmu_tclk1_a_pins[] = { + /* TCLK */ + RCAR_GP_PIN(6, 23), +}; + +static const unsigned int tmu_tclk1_a_mux[] = { + TCLK1_A_MARK, +}; + +static const unsigned int tmu_tclk1_b_pins[] = { + /* TCLK */ + RCAR_GP_PIN(5, 19), +}; + +static const unsigned int tmu_tclk1_b_mux[] = { + TCLK1_B_MARK, +}; + +static const unsigned int tmu_tclk2_a_pins[] = { + /* TCLK */ + RCAR_GP_PIN(6, 19), +}; + +static const unsigned int tmu_tclk2_a_mux[] = { + TCLK2_A_MARK, +}; + +static const unsigned int tmu_tclk2_b_pins[] = { + /* TCLK */ + RCAR_GP_PIN(6, 28), +}; + +static const unsigned int tmu_tclk2_b_mux[] = { + TCLK2_B_MARK, +}; /* - USB0 ------------------------------------------------------------------- */ static const unsigned int usb0_pins[] = { @@ -4037,6 +4347,36 @@ static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(canfd0_data_a), SH_PFC_PIN_GROUP(canfd0_data_b), SH_PFC_PIN_GROUP(canfd1_data), + SH_PFC_PIN_GROUP(drif0_ctrl_a), + SH_PFC_PIN_GROUP(drif0_data0_a), + SH_PFC_PIN_GROUP(drif0_data1_a), + SH_PFC_PIN_GROUP(drif0_ctrl_b), + SH_PFC_PIN_GROUP(drif0_data0_b), + SH_PFC_PIN_GROUP(drif0_data1_b), + SH_PFC_PIN_GROUP(drif0_ctrl_c), + SH_PFC_PIN_GROUP(drif0_data0_c), + SH_PFC_PIN_GROUP(drif0_data1_c), + SH_PFC_PIN_GROUP(drif1_ctrl_a), + SH_PFC_PIN_GROUP(drif1_data0_a), + SH_PFC_PIN_GROUP(drif1_data1_a), + SH_PFC_PIN_GROUP(drif1_ctrl_b), + SH_PFC_PIN_GROUP(drif1_data0_b), + SH_PFC_PIN_GROUP(drif1_data1_b), + SH_PFC_PIN_GROUP(drif1_ctrl_c), + SH_PFC_PIN_GROUP(drif1_data0_c), + SH_PFC_PIN_GROUP(drif1_data1_c), + SH_PFC_PIN_GROUP(drif2_ctrl_a), + SH_PFC_PIN_GROUP(drif2_data0_a), + SH_PFC_PIN_GROUP(drif2_data1_a), + SH_PFC_PIN_GROUP(drif2_ctrl_b), + SH_PFC_PIN_GROUP(drif2_data0_b), + SH_PFC_PIN_GROUP(drif2_data1_b), + SH_PFC_PIN_GROUP(drif3_ctrl_a), + SH_PFC_PIN_GROUP(drif3_data0_a), + SH_PFC_PIN_GROUP(drif3_data1_a), + SH_PFC_PIN_GROUP(drif3_ctrl_b), + SH_PFC_PIN_GROUP(drif3_data0_b), + SH_PFC_PIN_GROUP(drif3_data1_b), SH_PFC_PIN_GROUP(du_rgb666), SH_PFC_PIN_GROUP(du_rgb888), SH_PFC_PIN_GROUP(du_clk_out_0), @@ -4280,6 +4620,10 @@ static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(ssi9_data_b), SH_PFC_PIN_GROUP(ssi9_ctrl_a), SH_PFC_PIN_GROUP(ssi9_ctrl_b), + SH_PFC_PIN_GROUP(tmu_tclk1_a), + SH_PFC_PIN_GROUP(tmu_tclk1_b), + SH_PFC_PIN_GROUP(tmu_tclk2_a), + SH_PFC_PIN_GROUP(tmu_tclk2_b), SH_PFC_PIN_GROUP(usb0), SH_PFC_PIN_GROUP(usb1), SH_PFC_PIN_GROUP(usb30), @@ -4367,6 +4711,48 @@ static const char * const canfd1_groups[] = { "canfd1_data", }; +static const char * const drif0_groups[] = { + "drif0_ctrl_a", + "drif0_data0_a", + "drif0_data1_a", + "drif0_ctrl_b", + "drif0_data0_b", + "drif0_data1_b", + "drif0_ctrl_c", + "drif0_data0_c", + "drif0_data1_c", +}; + +static const char * const drif1_groups[] = { + "drif1_ctrl_a", + "drif1_data0_a", + "drif1_data1_a", + "drif1_ctrl_b", + "drif1_data0_b", + "drif1_data1_b", + "drif1_ctrl_c", + "drif1_data0_c", + "drif1_data1_c", +}; + +static const char * const drif2_groups[] = { + "drif2_ctrl_a", + "drif2_data0_a", + "drif2_data1_a", + "drif2_ctrl_b", + "drif2_data0_b", + "drif2_data1_b", +}; + +static const char * const drif3_groups[] = { + "drif3_ctrl_a", + "drif3_data0_a", + "drif3_data1_a", + "drif3_ctrl_b", + "drif3_data0_b", + "drif3_data1_b", +}; + static const char * const du_groups[] = { "du_rgb666", "du_rgb888", @@ -4711,6 +5097,13 @@ static const char * const ssi_groups[] = { "ssi9_ctrl_b", }; +static const char * const tmu_groups[] = { + "tmu_tclk1_a", + "tmu_tclk1_b", + "tmu_tclk2_a", + "tmu_tclk2_b", +}; + static const char * const usb0_groups[] = { "usb0", }; @@ -4763,6 +5156,10 @@ static const struct sh_pfc_function pinmux_functions[] = { SH_PFC_FUNCTION(can_clk), SH_PFC_FUNCTION(canfd0), SH_PFC_FUNCTION(canfd1), + SH_PFC_FUNCTION(drif0), + SH_PFC_FUNCTION(drif1), + SH_PFC_FUNCTION(drif2), + SH_PFC_FUNCTION(drif3), SH_PFC_FUNCTION(du), SH_PFC_FUNCTION(hscif0), SH_PFC_FUNCTION(hscif1), @@ -4797,6 +5194,7 @@ static const struct sh_pfc_function pinmux_functions[] = { SH_PFC_FUNCTION(sdhi2), SH_PFC_FUNCTION(sdhi3), SH_PFC_FUNCTION(ssi), + SH_PFC_FUNCTION(tmu), SH_PFC_FUNCTION(usb0), SH_PFC_FUNCTION(usb1), SH_PFC_FUNCTION(usb30), @@ -5740,7 +6138,7 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = { [31] = PIN_A_NUMBER('P', 8), /* DU_DOTCLKIN1 */ } }, { PINMUX_BIAS_REG("PUEN3", 0xe606040c, "PUD3", 0xe606044c) { - [ 0] = PIN_A_NUMBER('R', 8), /* DU_DOTCLKIN2 */ + [ 0] = PIN_A_NUMBER('R', 8), /* DU_DOTCLKIN3 */ [ 1] = PIN_NONE, [ 2] = PIN_A_NUMBER('D', 38), /* FSCLKST */ [ 3] = PIN_A_NUMBER('D', 39), /* EXTALR*/ diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77970.c b/drivers/pinctrl/sh-pfc/pfc-r8a77970.c index 4a7ab84b366b..c5e67ba29f7c 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a77970.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a77970.c @@ -1578,47 +1578,25 @@ static const unsigned int tmu_tclk2_b_mux[] = { }; /* - VIN0 ------------------------------------------------------------------- */ -static const unsigned int vin0_data8_pins[] = { - RCAR_GP_PIN(2, 4), RCAR_GP_PIN(2, 5), - RCAR_GP_PIN(2, 6), RCAR_GP_PIN(2, 7), - RCAR_GP_PIN(2, 8), RCAR_GP_PIN(2, 9), - RCAR_GP_PIN(2, 10), RCAR_GP_PIN(2, 11), -}; -static const unsigned int vin0_data8_mux[] = { - VI0_DATA0_MARK, VI0_DATA1_MARK, - VI0_DATA2_MARK, VI0_DATA3_MARK, - VI0_DATA4_MARK, VI0_DATA5_MARK, - VI0_DATA6_MARK, VI0_DATA7_MARK, +static const union vin_data12 vin0_data_pins = { + .data12 = { + RCAR_GP_PIN(2, 4), RCAR_GP_PIN(2, 5), + RCAR_GP_PIN(2, 6), RCAR_GP_PIN(2, 7), + RCAR_GP_PIN(2, 8), RCAR_GP_PIN(2, 9), + RCAR_GP_PIN(2, 10), RCAR_GP_PIN(2, 11), + RCAR_GP_PIN(2, 12), RCAR_GP_PIN(2, 13), + RCAR_GP_PIN(2, 14), RCAR_GP_PIN(2, 15), + }, }; -static const unsigned int vin0_data10_pins[] = { - RCAR_GP_PIN(2, 4), RCAR_GP_PIN(2, 5), - RCAR_GP_PIN(2, 6), RCAR_GP_PIN(2, 7), - RCAR_GP_PIN(2, 8), RCAR_GP_PIN(2, 9), - RCAR_GP_PIN(2, 10), RCAR_GP_PIN(2, 11), - RCAR_GP_PIN(2, 12), RCAR_GP_PIN(2, 13), -}; -static const unsigned int vin0_data10_mux[] = { - VI0_DATA0_MARK, VI0_DATA1_MARK, - VI0_DATA2_MARK, VI0_DATA3_MARK, - VI0_DATA4_MARK, VI0_DATA5_MARK, - VI0_DATA6_MARK, VI0_DATA7_MARK, - VI0_DATA8_MARK, VI0_DATA9_MARK, -}; -static const unsigned int vin0_data12_pins[] = { - RCAR_GP_PIN(2, 4), RCAR_GP_PIN(2, 5), - RCAR_GP_PIN(2, 6), RCAR_GP_PIN(2, 7), - RCAR_GP_PIN(2, 8), RCAR_GP_PIN(2, 9), - RCAR_GP_PIN(2, 10), RCAR_GP_PIN(2, 11), - RCAR_GP_PIN(2, 12), RCAR_GP_PIN(2, 13), - RCAR_GP_PIN(2, 14), RCAR_GP_PIN(2, 15), -}; -static const unsigned int vin0_data12_mux[] = { - VI0_DATA0_MARK, VI0_DATA1_MARK, - VI0_DATA2_MARK, VI0_DATA3_MARK, - VI0_DATA4_MARK, VI0_DATA5_MARK, - VI0_DATA6_MARK, VI0_DATA7_MARK, - VI0_DATA8_MARK, VI0_DATA9_MARK, - VI0_DATA10_MARK, VI0_DATA11_MARK, +static const union vin_data12 vin0_data_mux = { + .data12 = { + VI0_DATA0_MARK, VI0_DATA1_MARK, + VI0_DATA2_MARK, VI0_DATA3_MARK, + VI0_DATA4_MARK, VI0_DATA5_MARK, + VI0_DATA6_MARK, VI0_DATA7_MARK, + VI0_DATA8_MARK, VI0_DATA9_MARK, + VI0_DATA10_MARK, VI0_DATA11_MARK, + }, }; static const unsigned int vin0_sync_pins[] = { /* HSYNC#, VSYNC# */ @@ -1650,47 +1628,25 @@ static const unsigned int vin0_clk_mux[] = { }; /* - VIN1 ------------------------------------------------------------------- */ -static const unsigned int vin1_data8_pins[] = { - RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 5), - RCAR_GP_PIN(3, 6), RCAR_GP_PIN(3, 7), - RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 9), - RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11), -}; -static const unsigned int vin1_data8_mux[] = { - VI1_DATA0_MARK, VI1_DATA1_MARK, - VI1_DATA2_MARK, VI1_DATA3_MARK, - VI1_DATA4_MARK, VI1_DATA5_MARK, - VI1_DATA6_MARK, VI1_DATA7_MARK, -}; -static const unsigned int vin1_data10_pins[] = { - RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 5), - RCAR_GP_PIN(3, 6), RCAR_GP_PIN(3, 7), - RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 9), - RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11), - RCAR_GP_PIN(3, 12), RCAR_GP_PIN(3, 13), -}; -static const unsigned int vin1_data10_mux[] = { - VI1_DATA0_MARK, VI1_DATA1_MARK, - VI1_DATA2_MARK, VI1_DATA3_MARK, - VI1_DATA4_MARK, VI1_DATA5_MARK, - VI1_DATA6_MARK, VI1_DATA7_MARK, - VI1_DATA8_MARK, VI1_DATA9_MARK, -}; -static const unsigned int vin1_data12_pins[] = { - RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 5), - RCAR_GP_PIN(3, 6), RCAR_GP_PIN(3, 7), - RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 9), - RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11), - RCAR_GP_PIN(3, 12), RCAR_GP_PIN(3, 13), - RCAR_GP_PIN(3, 14), RCAR_GP_PIN(3, 15), +static const union vin_data12 vin1_data_pins = { + .data12 = { + RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 5), + RCAR_GP_PIN(3, 6), RCAR_GP_PIN(3, 7), + RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 9), + RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11), + RCAR_GP_PIN(3, 12), RCAR_GP_PIN(3, 13), + RCAR_GP_PIN(3, 14), RCAR_GP_PIN(3, 15), + }, }; -static const unsigned int vin1_data12_mux[] = { - VI1_DATA0_MARK, VI1_DATA1_MARK, - VI1_DATA2_MARK, VI1_DATA3_MARK, - VI1_DATA4_MARK, VI1_DATA5_MARK, - VI1_DATA6_MARK, VI1_DATA7_MARK, - VI1_DATA8_MARK, VI1_DATA9_MARK, - VI1_DATA10_MARK, VI1_DATA11_MARK, +static const union vin_data12 vin1_data_mux = { + .data12 = { + VI1_DATA0_MARK, VI1_DATA1_MARK, + VI1_DATA2_MARK, VI1_DATA3_MARK, + VI1_DATA4_MARK, VI1_DATA5_MARK, + VI1_DATA6_MARK, VI1_DATA7_MARK, + VI1_DATA8_MARK, VI1_DATA9_MARK, + VI1_DATA10_MARK, VI1_DATA11_MARK, + }, }; static const unsigned int vin1_sync_pins[] = { /* HSYNC#, VSYNC# */ @@ -1831,16 +1787,16 @@ static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(tmu_tclk1_b), SH_PFC_PIN_GROUP(tmu_tclk2_a), SH_PFC_PIN_GROUP(tmu_tclk2_b), - SH_PFC_PIN_GROUP(vin0_data8), - SH_PFC_PIN_GROUP(vin0_data10), - SH_PFC_PIN_GROUP(vin0_data12), + VIN_DATA_PIN_GROUP(vin0_data, 8), + VIN_DATA_PIN_GROUP(vin0_data, 10), + VIN_DATA_PIN_GROUP(vin0_data, 12), SH_PFC_PIN_GROUP(vin0_sync), SH_PFC_PIN_GROUP(vin0_field), SH_PFC_PIN_GROUP(vin0_clkenb), SH_PFC_PIN_GROUP(vin0_clk), - SH_PFC_PIN_GROUP(vin1_data8), - SH_PFC_PIN_GROUP(vin1_data10), - SH_PFC_PIN_GROUP(vin1_data12), + VIN_DATA_PIN_GROUP(vin1_data, 8), + VIN_DATA_PIN_GROUP(vin1_data, 10), + VIN_DATA_PIN_GROUP(vin1_data, 12), SH_PFC_PIN_GROUP(vin1_sync), SH_PFC_PIN_GROUP(vin1_field), SH_PFC_PIN_GROUP(vin1_clkenb), diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77980.c b/drivers/pinctrl/sh-pfc/pfc-r8a77980.c index 8bef24502f0c..b807b67ae143 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a77980.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a77980.c @@ -1970,47 +1970,25 @@ static const unsigned int vin0_clk_mux[] = { }; /* - VIN1 ------------------------------------------------------------------- */ -static const unsigned int vin1_data8_pins[] = { - RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 5), - RCAR_GP_PIN(3, 6), RCAR_GP_PIN(3, 7), - RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 9), - RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11), -}; -static const unsigned int vin1_data8_mux[] = { - VI1_DATA0_MARK, VI1_DATA1_MARK, - VI1_DATA2_MARK, VI1_DATA3_MARK, - VI1_DATA4_MARK, VI1_DATA5_MARK, - VI1_DATA6_MARK, VI1_DATA7_MARK, -}; -static const unsigned int vin1_data10_pins[] = { - RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 5), - RCAR_GP_PIN(3, 6), RCAR_GP_PIN(3, 7), - RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 9), - RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11), - RCAR_GP_PIN(3, 12), RCAR_GP_PIN(3, 13), -}; -static const unsigned int vin1_data10_mux[] = { - VI1_DATA0_MARK, VI1_DATA1_MARK, - VI1_DATA2_MARK, VI1_DATA3_MARK, - VI1_DATA4_MARK, VI1_DATA5_MARK, - VI1_DATA6_MARK, VI1_DATA7_MARK, - VI1_DATA8_MARK, VI1_DATA9_MARK, -}; -static const unsigned int vin1_data12_pins[] = { - RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 5), - RCAR_GP_PIN(3, 6), RCAR_GP_PIN(3, 7), - RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 9), - RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11), - RCAR_GP_PIN(3, 12), RCAR_GP_PIN(3, 13), - RCAR_GP_PIN(3, 14), RCAR_GP_PIN(3, 15), +static const union vin_data12 vin1_data_pins = { + .data12 = { + RCAR_GP_PIN(3, 4), RCAR_GP_PIN(3, 5), + RCAR_GP_PIN(3, 6), RCAR_GP_PIN(3, 7), + RCAR_GP_PIN(3, 8), RCAR_GP_PIN(3, 9), + RCAR_GP_PIN(3, 10), RCAR_GP_PIN(3, 11), + RCAR_GP_PIN(3, 12), RCAR_GP_PIN(3, 13), + RCAR_GP_PIN(3, 14), RCAR_GP_PIN(3, 15), + }, }; -static const unsigned int vin1_data12_mux[] = { - VI1_DATA0_MARK, VI1_DATA1_MARK, - VI1_DATA2_MARK, VI1_DATA3_MARK, - VI1_DATA4_MARK, VI1_DATA5_MARK, - VI1_DATA6_MARK, VI1_DATA7_MARK, - VI1_DATA8_MARK, VI1_DATA9_MARK, - VI1_DATA10_MARK, VI1_DATA11_MARK, +static const union vin_data12 vin1_data_mux = { + .data12 = { + VI1_DATA0_MARK, VI1_DATA1_MARK, + VI1_DATA2_MARK, VI1_DATA3_MARK, + VI1_DATA4_MARK, VI1_DATA5_MARK, + VI1_DATA6_MARK, VI1_DATA7_MARK, + VI1_DATA8_MARK, VI1_DATA9_MARK, + VI1_DATA10_MARK, VI1_DATA11_MARK, + }, }; static const unsigned int vin1_sync_pins[] = { /* VI1_VSYNC#, VI1_HSYNC# */ @@ -2182,9 +2160,9 @@ static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(vin0_field), SH_PFC_PIN_GROUP(vin0_clkenb), SH_PFC_PIN_GROUP(vin0_clk), - SH_PFC_PIN_GROUP(vin1_data8), - SH_PFC_PIN_GROUP(vin1_data10), - SH_PFC_PIN_GROUP(vin1_data12), + VIN_DATA_PIN_GROUP(vin1_data, 8), + VIN_DATA_PIN_GROUP(vin1_data, 10), + VIN_DATA_PIN_GROUP(vin1_data, 12), SH_PFC_PIN_GROUP(vin1_sync), SH_PFC_PIN_GROUP(vin1_field), SH_PFC_PIN_GROUP(vin1_clkenb), diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77990.c b/drivers/pinctrl/sh-pfc/pfc-r8a77990.c index e40908dc37e0..151640c30e9d 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a77990.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a77990.c @@ -30,7 +30,16 @@ PORT_GP_CFG_1(3, 15, fn, sfx, CFG_FLAGS), \ PORT_GP_CFG_11(4, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \ PORT_GP_CFG_20(5, fn, sfx, CFG_FLAGS), \ - PORT_GP_CFG_18(6, fn, sfx, CFG_FLAGS) + PORT_GP_CFG_9(6, fn, sfx, CFG_FLAGS), \ + PORT_GP_CFG_1(6, 9, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \ + PORT_GP_CFG_1(6, 10, fn, sfx, CFG_FLAGS), \ + PORT_GP_CFG_1(6, 11, fn, sfx, CFG_FLAGS), \ + PORT_GP_CFG_1(6, 12, fn, sfx, CFG_FLAGS), \ + PORT_GP_CFG_1(6, 13, fn, sfx, CFG_FLAGS), \ + PORT_GP_CFG_1(6, 14, fn, sfx, CFG_FLAGS), \ + PORT_GP_CFG_1(6, 15, fn, sfx, CFG_FLAGS), \ + PORT_GP_CFG_1(6, 16, fn, sfx, CFG_FLAGS), \ + PORT_GP_CFG_1(6, 17, fn, sfx, CFG_FLAGS) /* * F_() : just information * FM() : macro for FN_xxx / xxx_MARK @@ -391,29 +400,33 @@ FM(IP12_23_20) IP12_23_20 FM(IP13_23_20) IP13_23_20 FM(IP14_23_20) IP14_23_20 FM FM(IP12_27_24) IP12_27_24 FM(IP13_27_24) IP13_27_24 FM(IP14_27_24) IP14_27_24 FM(IP15_27_24) IP15_27_24 \ FM(IP12_31_28) IP12_31_28 FM(IP13_31_28) IP13_31_28 FM(IP14_31_28) IP14_31_28 FM(IP15_31_28) IP15_31_28 +/* The bit numbering in MOD_SEL fields is reversed */ +#define REV4(f0, f1, f2, f3) f0 f2 f1 f3 +#define REV8(f0, f1, f2, f3, f4, f5, f6, f7) f0 f4 f2 f6 f1 f5 f3 f7 + /* MOD_SEL0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */ -#define MOD_SEL0_30_29 FM(SEL_ADGB_0) FM(SEL_ADGB_1) FM(SEL_ADGB_2) F_(0, 0) +#define MOD_SEL0_30_29 REV4(FM(SEL_ADGB_0), FM(SEL_ADGB_1), FM(SEL_ADGB_2), F_(0, 0)) #define MOD_SEL0_28 FM(SEL_DRIF0_0) FM(SEL_DRIF0_1) -#define MOD_SEL0_27_26 FM(SEL_FM_0) FM(SEL_FM_1) FM(SEL_FM_2) F_(0, 0) +#define MOD_SEL0_27_26 REV4(FM(SEL_FM_0), FM(SEL_FM_1), FM(SEL_FM_2), F_(0, 0)) #define MOD_SEL0_25 FM(SEL_FSO_0) FM(SEL_FSO_1) #define MOD_SEL0_24 FM(SEL_HSCIF0_0) FM(SEL_HSCIF0_1) #define MOD_SEL0_23 FM(SEL_HSCIF1_0) FM(SEL_HSCIF1_1) #define MOD_SEL0_22 FM(SEL_HSCIF2_0) FM(SEL_HSCIF2_1) -#define MOD_SEL0_21_20 FM(SEL_I2C1_0) FM(SEL_I2C1_1) FM(SEL_I2C1_2) FM(SEL_I2C1_3) -#define MOD_SEL0_19_18_17 FM(SEL_I2C2_0) FM(SEL_I2C2_1) FM(SEL_I2C2_2) FM(SEL_I2C2_3) FM(SEL_I2C2_4) F_(0, 0) F_(0, 0) F_(0, 0) +#define MOD_SEL0_21_20 REV4(FM(SEL_I2C1_0), FM(SEL_I2C1_1), FM(SEL_I2C1_2), FM(SEL_I2C1_3)) +#define MOD_SEL0_19_18_17 REV8(FM(SEL_I2C2_0), FM(SEL_I2C2_1), FM(SEL_I2C2_2), FM(SEL_I2C2_3), FM(SEL_I2C2_4), F_(0, 0), F_(0, 0), F_(0, 0)) #define MOD_SEL0_16 FM(SEL_NDFC_0) FM(SEL_NDFC_1) #define MOD_SEL0_15 FM(SEL_PWM0_0) FM(SEL_PWM0_1) #define MOD_SEL0_14 FM(SEL_PWM1_0) FM(SEL_PWM1_1) -#define MOD_SEL0_13_12 FM(SEL_PWM2_0) FM(SEL_PWM2_1) FM(SEL_PWM2_2) F_(0, 0) -#define MOD_SEL0_11_10 FM(SEL_PWM3_0) FM(SEL_PWM3_1) FM(SEL_PWM3_2) F_(0, 0) +#define MOD_SEL0_13_12 REV4(FM(SEL_PWM2_0), FM(SEL_PWM2_1), FM(SEL_PWM2_2), F_(0, 0)) +#define MOD_SEL0_11_10 REV4(FM(SEL_PWM3_0), FM(SEL_PWM3_1), FM(SEL_PWM3_2), F_(0, 0)) #define MOD_SEL0_9 FM(SEL_PWM4_0) FM(SEL_PWM4_1) #define MOD_SEL0_8 FM(SEL_PWM5_0) FM(SEL_PWM5_1) #define MOD_SEL0_7 FM(SEL_PWM6_0) FM(SEL_PWM6_1) -#define MOD_SEL0_6_5 FM(SEL_REMOCON_0) FM(SEL_REMOCON_1) FM(SEL_REMOCON_2) F_(0, 0) +#define MOD_SEL0_6_5 REV4(FM(SEL_REMOCON_0), FM(SEL_REMOCON_1), FM(SEL_REMOCON_2), F_(0, 0)) #define MOD_SEL0_4 FM(SEL_SCIF_0) FM(SEL_SCIF_1) #define MOD_SEL0_3 FM(SEL_SCIF0_0) FM(SEL_SCIF0_1) #define MOD_SEL0_2 FM(SEL_SCIF2_0) FM(SEL_SCIF2_1) -#define MOD_SEL0_1_0 FM(SEL_SPEED_PULSE_IF_0) FM(SEL_SPEED_PULSE_IF_1) FM(SEL_SPEED_PULSE_IF_2) F_(0, 0) +#define MOD_SEL0_1_0 REV4(FM(SEL_SPEED_PULSE_IF_0), FM(SEL_SPEED_PULSE_IF_1), FM(SEL_SPEED_PULSE_IF_2), F_(0, 0)) /* MOD_SEL1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */ #define MOD_SEL1_31 FM(SEL_SIMCARD_0) FM(SEL_SIMCARD_1) @@ -422,18 +435,18 @@ FM(IP12_31_28) IP12_31_28 FM(IP13_31_28) IP13_31_28 FM(IP14_31_28) IP14_31_28 FM #define MOD_SEL1_28 FM(SEL_USB_20_CH0_0) FM(SEL_USB_20_CH0_1) #define MOD_SEL1_26 FM(SEL_DRIF2_0) FM(SEL_DRIF2_1) #define MOD_SEL1_25 FM(SEL_DRIF3_0) FM(SEL_DRIF3_1) -#define MOD_SEL1_24_23_22 FM(SEL_HSCIF3_0) FM(SEL_HSCIF3_1) FM(SEL_HSCIF3_2) FM(SEL_HSCIF3_3) FM(SEL_HSCIF3_4) F_(0, 0) F_(0, 0) F_(0, 0) -#define MOD_SEL1_21_20_19 FM(SEL_HSCIF4_0) FM(SEL_HSCIF4_1) FM(SEL_HSCIF4_2) FM(SEL_HSCIF4_3) FM(SEL_HSCIF4_4) F_(0, 0) F_(0, 0) F_(0, 0) +#define MOD_SEL1_24_23_22 REV8(FM(SEL_HSCIF3_0), FM(SEL_HSCIF3_1), FM(SEL_HSCIF3_2), FM(SEL_HSCIF3_3), FM(SEL_HSCIF3_4), F_(0, 0), F_(0, 0), F_(0, 0)) +#define MOD_SEL1_21_20_19 REV8(FM(SEL_HSCIF4_0), FM(SEL_HSCIF4_1), FM(SEL_HSCIF4_2), FM(SEL_HSCIF4_3), FM(SEL_HSCIF4_4), F_(0, 0), F_(0, 0), F_(0, 0)) #define MOD_SEL1_18 FM(SEL_I2C6_0) FM(SEL_I2C6_1) #define MOD_SEL1_17 FM(SEL_I2C7_0) FM(SEL_I2C7_1) #define MOD_SEL1_16 FM(SEL_MSIOF2_0) FM(SEL_MSIOF2_1) #define MOD_SEL1_15 FM(SEL_MSIOF3_0) FM(SEL_MSIOF3_1) -#define MOD_SEL1_14_13 FM(SEL_SCIF3_0) FM(SEL_SCIF3_1) FM(SEL_SCIF3_2) F_(0, 0) -#define MOD_SEL1_12_11 FM(SEL_SCIF4_0) FM(SEL_SCIF4_1) FM(SEL_SCIF4_2) F_(0, 0) -#define MOD_SEL1_10_9 FM(SEL_SCIF5_0) FM(SEL_SCIF5_1) FM(SEL_SCIF5_2) F_(0, 0) +#define MOD_SEL1_14_13 REV4(FM(SEL_SCIF3_0), FM(SEL_SCIF3_1), FM(SEL_SCIF3_2), F_(0, 0)) +#define MOD_SEL1_12_11 REV4(FM(SEL_SCIF4_0), FM(SEL_SCIF4_1), FM(SEL_SCIF4_2), F_(0, 0)) +#define MOD_SEL1_10_9 REV4(FM(SEL_SCIF5_0), FM(SEL_SCIF5_1), FM(SEL_SCIF5_2), F_(0, 0)) #define MOD_SEL1_8 FM(SEL_VIN4_0) FM(SEL_VIN4_1) #define MOD_SEL1_7 FM(SEL_VIN5_0) FM(SEL_VIN5_1) -#define MOD_SEL1_6_5 FM(SEL_ADGC_0) FM(SEL_ADGC_1) FM(SEL_ADGC_2) F_(0, 0) +#define MOD_SEL1_6_5 REV4(FM(SEL_ADGC_0), FM(SEL_ADGC_1), FM(SEL_ADGC_2), F_(0, 0)) #define MOD_SEL1_4 FM(SEL_SSI9_0) FM(SEL_SSI9_1) #define PINMUX_MOD_SELS \ @@ -1060,7 +1073,7 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_GPSR(IP11_11_8, RIF1_SYNC), PINMUX_IPSR_GPSR(IP11_11_8, TS_SCK1), - PINMUX_IPSR_GPSR(IP11_15_12, TX0_A), + PINMUX_IPSR_MSEL(IP11_15_12, TX0_A, SEL_SCIF0_0), PINMUX_IPSR_GPSR(IP11_15_12, HTX1_A), PINMUX_IPSR_MSEL(IP11_15_12, SSI_WS2_A, SEL_SSI2_0), PINMUX_IPSR_GPSR(IP11_15_12, RIF1_D0), @@ -1099,7 +1112,7 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_MSEL(IP12_3_0, SSI_WS9_B, SEL_SSI9_1), PINMUX_IPSR_GPSR(IP12_3_0, AUDIO_CLKOUT3_B), - PINMUX_IPSR_GPSR(IP12_7_4, SCK2_A), + PINMUX_IPSR_MSEL(IP12_7_4, SCK2_A, SEL_SCIF2_0), PINMUX_IPSR_MSEL(IP12_7_4, HSCK0_A, SEL_HSCIF0_0), PINMUX_IPSR_MSEL(IP12_7_4, AUDIO_CLKB_A, SEL_ADGB_0), PINMUX_IPSR_GPSR(IP12_7_4, CTS1_N), @@ -1107,14 +1120,14 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_MSEL(IP12_7_4, REMOCON_A, SEL_REMOCON_0), PINMUX_IPSR_MSEL(IP12_7_4, SCIF_CLK_B, SEL_SCIF_1), - PINMUX_IPSR_GPSR(IP12_11_8, TX2_A), + PINMUX_IPSR_MSEL(IP12_11_8, TX2_A, SEL_SCIF2_0), PINMUX_IPSR_MSEL(IP12_11_8, HRX0_A, SEL_HSCIF0_0), PINMUX_IPSR_GPSR(IP12_11_8, AUDIO_CLKOUT2_A), PINMUX_IPSR_MSEL(IP12_11_8, SCL1_A, SEL_I2C1_0), PINMUX_IPSR_MSEL(IP12_11_8, FSO_CFE_0_N_A, SEL_FSO_0), PINMUX_IPSR_GPSR(IP12_11_8, TS_SDEN1), - PINMUX_IPSR_GPSR(IP12_15_12, RX2_A), + PINMUX_IPSR_MSEL(IP12_15_12, RX2_A, SEL_SCIF2_0), PINMUX_IPSR_GPSR(IP12_15_12, HTX0_A), PINMUX_IPSR_GPSR(IP12_15_12, AUDIO_CLKOUT3_A), PINMUX_IPSR_MSEL(IP12_15_12, SDA1_A, SEL_I2C1_0), @@ -1126,11 +1139,11 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_GPSR(IP12_23_20, MSIOF0_RXD), PINMUX_IPSR_GPSR(IP12_23_20, SSI_WS78), - PINMUX_IPSR_GPSR(IP12_23_20, TX2_B), + PINMUX_IPSR_MSEL(IP12_23_20, TX2_B, SEL_SCIF2_1), PINMUX_IPSR_GPSR(IP12_27_24, MSIOF0_TXD), PINMUX_IPSR_GPSR(IP12_27_24, SSI_SDATA7), - PINMUX_IPSR_GPSR(IP12_27_24, RX2_B), + PINMUX_IPSR_MSEL(IP12_27_24, RX2_B, SEL_SCIF2_1), PINMUX_IPSR_GPSR(IP12_31_28, MSIOF0_SYNC), PINMUX_IPSR_GPSR(IP12_31_28, AUDIO_CLKOUT_B), @@ -1170,7 +1183,7 @@ static const u16 pinmux_data[] = { PINMUX_IPSR_MSEL(IP13_19_16, SIM0_D_A, SEL_SIMCARD_0), PINMUX_IPSR_GPSR(IP13_23_20, MLB_DAT), - PINMUX_IPSR_GPSR(IP13_23_20, TX0_B), + PINMUX_IPSR_MSEL(IP13_23_20, TX0_B, SEL_SCIF0_1), PINMUX_IPSR_MSEL(IP13_23_20, RIF0_SYNC_A, SEL_DRIF0_0), PINMUX_IPSR_GPSR(IP13_23_20, SIM0_CLK_A), @@ -1586,6 +1599,199 @@ static const unsigned int canfd1_data_mux[] = { CANFD1_TX_MARK, CANFD1_RX_MARK, }; +/* - DRIF0 --------------------------------------------------------------- */ +static const unsigned int drif0_ctrl_a_pins[] = { + /* CLK, SYNC */ + RCAR_GP_PIN(5, 7), RCAR_GP_PIN(5, 19), +}; + +static const unsigned int drif0_ctrl_a_mux[] = { + RIF0_CLK_A_MARK, RIF0_SYNC_A_MARK, +}; + +static const unsigned int drif0_data0_a_pins[] = { + /* D0 */ + RCAR_GP_PIN(5, 17), +}; + +static const unsigned int drif0_data0_a_mux[] = { + RIF0_D0_A_MARK, +}; + +static const unsigned int drif0_data1_a_pins[] = { + /* D1 */ + RCAR_GP_PIN(5, 18), +}; + +static const unsigned int drif0_data1_a_mux[] = { + RIF0_D1_A_MARK, +}; + +static const unsigned int drif0_ctrl_b_pins[] = { + /* CLK, SYNC */ + RCAR_GP_PIN(3, 12), RCAR_GP_PIN(3, 15), +}; + +static const unsigned int drif0_ctrl_b_mux[] = { + RIF0_CLK_B_MARK, RIF0_SYNC_B_MARK, +}; + +static const unsigned int drif0_data0_b_pins[] = { + /* D0 */ + RCAR_GP_PIN(3, 13), +}; + +static const unsigned int drif0_data0_b_mux[] = { + RIF0_D0_B_MARK, +}; + +static const unsigned int drif0_data1_b_pins[] = { + /* D1 */ + RCAR_GP_PIN(3, 14), +}; + +static const unsigned int drif0_data1_b_mux[] = { + RIF0_D1_B_MARK, +}; + +/* - DRIF1 --------------------------------------------------------------- */ +static const unsigned int drif1_ctrl_pins[] = { + /* CLK, SYNC */ + RCAR_GP_PIN(5, 4), RCAR_GP_PIN(5, 1), +}; + +static const unsigned int drif1_ctrl_mux[] = { + RIF1_CLK_MARK, RIF1_SYNC_MARK, +}; + +static const unsigned int drif1_data0_pins[] = { + /* D0 */ + RCAR_GP_PIN(5, 2), +}; + +static const unsigned int drif1_data0_mux[] = { + RIF1_D0_MARK, +}; + +static const unsigned int drif1_data1_pins[] = { + /* D1 */ + RCAR_GP_PIN(5, 3), +}; + +static const unsigned int drif1_data1_mux[] = { + RIF1_D1_MARK, +}; + +/* - DRIF2 --------------------------------------------------------------- */ +static const unsigned int drif2_ctrl_a_pins[] = { + /* CLK, SYNC */ + RCAR_GP_PIN(2, 6), RCAR_GP_PIN(2, 7), +}; + +static const unsigned int drif2_ctrl_a_mux[] = { + RIF2_CLK_A_MARK, RIF2_SYNC_A_MARK, +}; + +static const unsigned int drif2_data0_a_pins[] = { + /* D0 */ + RCAR_GP_PIN(2, 8), +}; + +static const unsigned int drif2_data0_a_mux[] = { + RIF2_D0_A_MARK, +}; + +static const unsigned int drif2_data1_a_pins[] = { + /* D1 */ + RCAR_GP_PIN(2, 9), +}; + +static const unsigned int drif2_data1_a_mux[] = { + RIF2_D1_A_MARK, +}; + +static const unsigned int drif2_ctrl_b_pins[] = { + /* CLK, SYNC */ + RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5), +}; + +static const unsigned int drif2_ctrl_b_mux[] = { + RIF2_CLK_B_MARK, RIF2_SYNC_B_MARK, +}; + +static const unsigned int drif2_data0_b_pins[] = { + /* D0 */ + RCAR_GP_PIN(1, 6), +}; + +static const unsigned int drif2_data0_b_mux[] = { + RIF2_D0_B_MARK, +}; + +static const unsigned int drif2_data1_b_pins[] = { + /* D1 */ + RCAR_GP_PIN(1, 7), +}; + +static const unsigned int drif2_data1_b_mux[] = { + RIF2_D1_B_MARK, +}; + +/* - DRIF3 --------------------------------------------------------------- */ +static const unsigned int drif3_ctrl_a_pins[] = { + /* CLK, SYNC */ + RCAR_GP_PIN(2, 10), RCAR_GP_PIN(2, 11), +}; + +static const unsigned int drif3_ctrl_a_mux[] = { + RIF3_CLK_A_MARK, RIF3_SYNC_A_MARK, +}; + +static const unsigned int drif3_data0_a_pins[] = { + /* D0 */ + RCAR_GP_PIN(2, 12), +}; + +static const unsigned int drif3_data0_a_mux[] = { + RIF3_D0_A_MARK, +}; + +static const unsigned int drif3_data1_a_pins[] = { + /* D1 */ + RCAR_GP_PIN(2, 13), +}; + +static const unsigned int drif3_data1_a_mux[] = { + RIF3_D1_A_MARK, +}; + +static const unsigned int drif3_ctrl_b_pins[] = { + /* CLK, SYNC */ + RCAR_GP_PIN(0, 8), RCAR_GP_PIN(0, 9), +}; + +static const unsigned int drif3_ctrl_b_mux[] = { + RIF3_CLK_B_MARK, RIF3_SYNC_B_MARK, +}; + +static const unsigned int drif3_data0_b_pins[] = { + /* D0 */ + RCAR_GP_PIN(0, 10), +}; + +static const unsigned int drif3_data0_b_mux[] = { + RIF3_D0_B_MARK, +}; + +static const unsigned int drif3_data1_b_pins[] = { + /* D1 */ + RCAR_GP_PIN(0, 11), +}; + +static const unsigned int drif3_data1_b_mux[] = { + RIF3_D1_B_MARK, +}; + /* - DU --------------------------------------------------------------------- */ static const unsigned int du_rgb666_pins[] = { /* R[7:2], G[7:2], B[7:2] */ @@ -3243,6 +3449,43 @@ static const unsigned int ssi9_ctrl_b_mux[] = { SSI_SCK9_B_MARK, SSI_WS9_B_MARK, }; +/* - TMU -------------------------------------------------------------------- */ +static const unsigned int tmu_tclk1_a_pins[] = { + /* TCLK */ + RCAR_GP_PIN(3, 12), +}; + +static const unsigned int tmu_tclk1_a_mux[] = { + TCLK1_A_MARK, +}; + +static const unsigned int tmu_tclk1_b_pins[] = { + /* TCLK */ + RCAR_GP_PIN(5, 17), +}; + +static const unsigned int tmu_tclk1_b_mux[] = { + TCLK1_B_MARK, +}; + +static const unsigned int tmu_tclk2_a_pins[] = { + /* TCLK */ + RCAR_GP_PIN(3, 13), +}; + +static const unsigned int tmu_tclk2_a_mux[] = { + TCLK2_A_MARK, +}; + +static const unsigned int tmu_tclk2_b_pins[] = { + /* TCLK */ + RCAR_GP_PIN(5, 18), +}; + +static const unsigned int tmu_tclk2_b_mux[] = { + TCLK2_B_MARK, +}; + /* - USB0 ------------------------------------------------------------------- */ static const unsigned int usb0_a_pins[] = { /* PWEN, OVC */ @@ -3523,8 +3766,8 @@ static const unsigned int vin5_clk_b_mux[] = { }; static const struct { - struct sh_pfc_pin_group common[241]; - struct sh_pfc_pin_group automotive[2]; + struct sh_pfc_pin_group common[245]; + struct sh_pfc_pin_group automotive[23]; } pinmux_groups = { .common = { SH_PFC_PIN_GROUP(audio_clk_a), @@ -3735,6 +3978,10 @@ static const struct { SH_PFC_PIN_GROUP(ssi9_data), SH_PFC_PIN_GROUP(ssi9_ctrl_a), SH_PFC_PIN_GROUP(ssi9_ctrl_b), + SH_PFC_PIN_GROUP(tmu_tclk1_a), + SH_PFC_PIN_GROUP(tmu_tclk1_b), + SH_PFC_PIN_GROUP(tmu_tclk2_a), + SH_PFC_PIN_GROUP(tmu_tclk2_b), SH_PFC_PIN_GROUP(usb0_a), SH_PFC_PIN_GROUP(usb0_b), SH_PFC_PIN_GROUP(usb0_id), @@ -3772,6 +4019,27 @@ static const struct { .automotive = { SH_PFC_PIN_GROUP(canfd0_data), SH_PFC_PIN_GROUP(canfd1_data), + SH_PFC_PIN_GROUP(drif0_ctrl_a), + SH_PFC_PIN_GROUP(drif0_data0_a), + SH_PFC_PIN_GROUP(drif0_data1_a), + SH_PFC_PIN_GROUP(drif0_ctrl_b), + SH_PFC_PIN_GROUP(drif0_data0_b), + SH_PFC_PIN_GROUP(drif0_data1_b), + SH_PFC_PIN_GROUP(drif1_ctrl), + SH_PFC_PIN_GROUP(drif1_data0), + SH_PFC_PIN_GROUP(drif1_data1), + SH_PFC_PIN_GROUP(drif2_ctrl_a), + SH_PFC_PIN_GROUP(drif2_data0_a), + SH_PFC_PIN_GROUP(drif2_data1_a), + SH_PFC_PIN_GROUP(drif2_ctrl_b), + SH_PFC_PIN_GROUP(drif2_data0_b), + SH_PFC_PIN_GROUP(drif2_data1_b), + SH_PFC_PIN_GROUP(drif3_ctrl_a), + SH_PFC_PIN_GROUP(drif3_data0_a), + SH_PFC_PIN_GROUP(drif3_data1_a), + SH_PFC_PIN_GROUP(drif3_ctrl_b), + SH_PFC_PIN_GROUP(drif3_data0_b), + SH_PFC_PIN_GROUP(drif3_data1_b), } }; @@ -3826,6 +4094,39 @@ static const char * const canfd1_groups[] = { "canfd1_data", }; +static const char * const drif0_groups[] = { + "drif0_ctrl_a", + "drif0_data0_a", + "drif0_data1_a", + "drif0_ctrl_b", + "drif0_data0_b", + "drif0_data1_b", +}; + +static const char * const drif1_groups[] = { + "drif1_ctrl", + "drif1_data0", + "drif1_data1", +}; + +static const char * const drif2_groups[] = { + "drif2_ctrl_a", + "drif2_data0_a", + "drif2_data1_a", + "drif2_ctrl_b", + "drif2_data0_b", + "drif2_data1_b", +}; + +static const char * const drif3_groups[] = { + "drif3_ctrl_a", + "drif3_data0_a", + "drif3_data1_a", + "drif3_ctrl_b", + "drif3_data0_b", + "drif3_data1_b", +}; + static const char * const du_groups[] = { "du_rgb666", "du_rgb888", @@ -4111,6 +4412,13 @@ static const char * const ssi_groups[] = { "ssi9_ctrl_b", }; +static const char * const tmu_groups[] = { + "tmu_tclk1_a", + "tmu_tclk1_b", + "tmu_tclk2_a", + "tmu_tclk2_b", +}; + static const char * const usb0_groups[] = { "usb0_a", "usb0_b", @@ -4157,8 +4465,8 @@ static const char * const vin5_groups[] = { }; static const struct { - struct sh_pfc_function common[44]; - struct sh_pfc_function automotive[2]; + struct sh_pfc_function common[45]; + struct sh_pfc_function automotive[6]; } pinmux_functions = { .common = { SH_PFC_FUNCTION(audio_clk), @@ -4201,6 +4509,7 @@ static const struct { SH_PFC_FUNCTION(sdhi1), SH_PFC_FUNCTION(sdhi3), SH_PFC_FUNCTION(ssi), + SH_PFC_FUNCTION(tmu), SH_PFC_FUNCTION(usb0), SH_PFC_FUNCTION(usb30), SH_PFC_FUNCTION(vin4), @@ -4209,6 +4518,10 @@ static const struct { .automotive = { SH_PFC_FUNCTION(canfd0), SH_PFC_FUNCTION(canfd1), + SH_PFC_FUNCTION(drif0), + SH_PFC_FUNCTION(drif1), + SH_PFC_FUNCTION(drif2), + SH_PFC_FUNCTION(drif3), } }; @@ -4914,17 +5227,6 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = { { /* sentinel */ }, }; -static bool pin_has_pud(unsigned int pin) -{ - /* Some pins are pull-up only */ - switch (pin) { - case RCAR_GP_PIN(6, 9): /* USB30_OVC */ - return false; - } - - return true; -} - static unsigned int r8a77990_pinmux_get_bias(struct sh_pfc *pfc, unsigned int pin) { @@ -4937,7 +5239,7 @@ static unsigned int r8a77990_pinmux_get_bias(struct sh_pfc *pfc, if (!(sh_pfc_read(pfc, reg->puen) & BIT(bit))) return PIN_CONFIG_BIAS_DISABLE; - else if (!pin_has_pud(pin) || (sh_pfc_read(pfc, reg->pud) & BIT(bit))) + else if (sh_pfc_read(pfc, reg->pud) & BIT(bit)) return PIN_CONFIG_BIAS_PULL_UP; else return PIN_CONFIG_BIAS_PULL_DOWN; @@ -4958,13 +5260,11 @@ static void r8a77990_pinmux_set_bias(struct sh_pfc *pfc, unsigned int pin, if (bias != PIN_CONFIG_BIAS_DISABLE) enable |= BIT(bit); - if (pin_has_pud(pin)) { - updown = sh_pfc_read(pfc, reg->pud) & ~BIT(bit); - if (bias == PIN_CONFIG_BIAS_PULL_UP) - updown |= BIT(bit); + updown = sh_pfc_read(pfc, reg->pud) & ~BIT(bit); + if (bias == PIN_CONFIG_BIAS_PULL_UP) + updown |= BIT(bit); - sh_pfc_write(pfc, reg->pud, updown); - } + sh_pfc_write(pfc, reg->pud, updown); sh_pfc_write(pfc, reg->puen, enable); } diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77995.c b/drivers/pinctrl/sh-pfc/pfc-r8a77995.c index 84d78db381e3..9e377e3b9cb3 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a77995.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a77995.c @@ -381,6 +381,9 @@ FM(IP12_23_20) IP12_23_20 \ FM(IP12_27_24) IP12_27_24 \ FM(IP12_31_28) IP12_31_28 \ +/* The bit numbering in MOD_SEL fields is reversed */ +#define REV4(f0, f1, f2, f3) f0 f2 f1 f3 + /* MOD_SEL0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ #define MOD_SEL0_30 FM(SEL_MSIOF2_0) FM(SEL_MSIOF2_1) #define MOD_SEL0_29 FM(SEL_I2C3_0) FM(SEL_I2C3_1) @@ -388,10 +391,10 @@ FM(IP12_31_28) IP12_31_28 \ #define MOD_SEL0_27 FM(SEL_MSIOF3_0) FM(SEL_MSIOF3_1) #define MOD_SEL0_26 FM(SEL_HSCIF3_0) FM(SEL_HSCIF3_1) #define MOD_SEL0_25 FM(SEL_SCIF4_0) FM(SEL_SCIF4_1) -#define MOD_SEL0_24_23 FM(SEL_PWM0_0) FM(SEL_PWM0_1) FM(SEL_PWM0_2) F_(0, 0) -#define MOD_SEL0_22_21 FM(SEL_PWM1_0) FM(SEL_PWM1_1) FM(SEL_PWM1_2) F_(0, 0) -#define MOD_SEL0_20_19 FM(SEL_PWM2_0) FM(SEL_PWM2_1) FM(SEL_PWM2_2) F_(0, 0) -#define MOD_SEL0_18_17 FM(SEL_PWM3_0) FM(SEL_PWM3_1) FM(SEL_PWM3_2) F_(0, 0) +#define MOD_SEL0_24_23 REV4(FM(SEL_PWM0_0), FM(SEL_PWM0_1), FM(SEL_PWM0_2), F_(0, 0)) +#define MOD_SEL0_22_21 REV4(FM(SEL_PWM1_0), FM(SEL_PWM1_1), FM(SEL_PWM1_2), F_(0, 0)) +#define MOD_SEL0_20_19 REV4(FM(SEL_PWM2_0), FM(SEL_PWM2_1), FM(SEL_PWM2_2), F_(0, 0)) +#define MOD_SEL0_18_17 REV4(FM(SEL_PWM3_0), FM(SEL_PWM3_1), FM(SEL_PWM3_2), F_(0, 0)) #define MOD_SEL0_15 FM(SEL_IRQ_0_0) FM(SEL_IRQ_0_1) #define MOD_SEL0_14 FM(SEL_IRQ_1_0) FM(SEL_IRQ_1_1) #define MOD_SEL0_13 FM(SEL_IRQ_2_0) FM(SEL_IRQ_2_1) diff --git a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c index 085770e66895..ef3da8bf1d87 100644 --- a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c +++ b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c @@ -3354,7 +3354,8 @@ static const char * const fsic_groups[] = { "fsic_sclk_out", "fsic_data_in", "fsic_data_out", - "fsic_spdif", + "fsic_spdif_0", + "fsic_spdif_1", }; static const char * const fsid_groups[] = { diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c index 274d5ff87078..c97d2ba7677c 100644 --- a/drivers/pinctrl/sh-pfc/pinctrl.c +++ b/drivers/pinctrl/sh-pfc/pinctrl.c @@ -347,6 +347,8 @@ static int sh_pfc_func_set_mux(struct pinctrl_dev *pctldev, unsigned selector, unsigned int i; int ret = 0; + dev_dbg(pctldev->dev, "Configuring pin group %s\n", grp->name); + spin_lock_irqsave(&pfc->lock, flags); for (i = 0; i < grp->nr_pins; ++i) { diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h index 46d477ff5109..56016cb76769 100644 --- a/drivers/pinctrl/sh-pfc/sh_pfc.h +++ b/drivers/pinctrl/sh-pfc/sh_pfc.h @@ -126,7 +126,8 @@ struct pinmux_cfg_reg { * one for each possible combination of the register field bit values. */ #define PINMUX_CFG_REG(name, r, r_width, f_width) \ - .reg = r, .reg_width = r_width, .field_width = f_width, \ + .reg = r, .reg_width = r_width, \ + .field_width = f_width + BUILD_BUG_ON_ZERO(r_width % f_width), \ .enum_ids = (const u16 [(r_width / f_width) * (1 << f_width)]) /* diff --git a/drivers/pinctrl/sirf/pinctrl-atlas7.c b/drivers/pinctrl/sirf/pinctrl-atlas7.c index 4ba171827428..8a0eee044264 100644 --- a/drivers/pinctrl/sirf/pinctrl-atlas7.c +++ b/drivers/pinctrl/sirf/pinctrl-atlas7.c @@ -6007,8 +6007,8 @@ static int atlas7_gpio_probe(struct platform_device *pdev) } /* retrieve gpio descriptor data */ - a7gc = devm_kzalloc(&pdev->dev, sizeof(*a7gc) + - sizeof(struct atlas7_gpio_bank) * nbank, GFP_KERNEL); + a7gc = devm_kzalloc(&pdev->dev, struct_size(a7gc, banks, nbank), + GFP_KERNEL); if (!a7gc) return -ENOMEM; diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c index 2e42d738b589..2b3bd1a41f21 100644 --- a/drivers/pinctrl/sirf/pinctrl-sirf.c +++ b/drivers/pinctrl/sirf/pinctrl-sirf.c @@ -782,7 +782,7 @@ static void sirfsoc_gpio_set_pulldown(struct sirfsoc_gpio_chip *sgpio, static int sirfsoc_gpio_probe(struct device_node *np) { int i, err = 0; - static struct sirfsoc_gpio_chip *sgpio; + struct sirfsoc_gpio_chip *sgpio; struct sirfsoc_gpio_bank *bank; void __iomem *regs; struct platform_device *pdev; diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c index 813eccbb9aaf..0b9ff5aa6bb5 100644 --- a/drivers/pinctrl/stm32/pinctrl-stm32.c +++ b/drivers/pinctrl/stm32/pinctrl-stm32.c @@ -414,7 +414,7 @@ static int stm32_pctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev, unsigned int num_configs; bool has_config = 0; unsigned reserve = 0; - int num_pins, num_funcs, maps_per_pin, i, err; + int num_pins, num_funcs, maps_per_pin, i, err = 0; pctl = pinctrl_dev_get_drvdata(pctldev); @@ -441,41 +441,45 @@ static int stm32_pctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev, if (has_config && num_pins >= 1) maps_per_pin++; - if (!num_pins || !maps_per_pin) - return -EINVAL; + if (!num_pins || !maps_per_pin) { + err = -EINVAL; + goto exit; + } reserve = num_pins * maps_per_pin; err = pinctrl_utils_reserve_map(pctldev, map, reserved_maps, num_maps, reserve); if (err) - return err; + goto exit; for (i = 0; i < num_pins; i++) { err = of_property_read_u32_index(node, "pinmux", i, &pinfunc); if (err) - return err; + goto exit; pin = STM32_GET_PIN_NO(pinfunc); func = STM32_GET_PIN_FUNC(pinfunc); if (!stm32_pctrl_is_function_valid(pctl, pin, func)) { dev_err(pctl->dev, "invalid function.\n"); - return -EINVAL; + err = -EINVAL; + goto exit; } grp = stm32_pctrl_find_group_by_pin(pctl, pin); if (!grp) { dev_err(pctl->dev, "unable to match pin %d to group\n", pin); - return -EINVAL; + err = -EINVAL; + goto exit; } err = stm32_pctrl_dt_node_to_map_func(pctl, pin, func, grp, map, reserved_maps, num_maps); if (err) - return err; + goto exit; if (has_config) { err = pinctrl_utils_add_map_configs(pctldev, map, @@ -483,11 +487,13 @@ static int stm32_pctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev, configs, num_configs, PIN_MAP_TYPE_CONFIGS_GROUP); if (err) - return err; + goto exit; } } - return 0; +exit: + kfree(configs); + return err; } static int stm32_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev, @@ -577,8 +583,8 @@ static int stm32_pmx_get_func_groups(struct pinctrl_dev *pctldev, return 0; } -static void stm32_pmx_set_mode(struct stm32_gpio_bank *bank, - int pin, u32 mode, u32 alt) +static int stm32_pmx_set_mode(struct stm32_gpio_bank *bank, + int pin, u32 mode, u32 alt) { struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent); u32 val; @@ -614,6 +620,8 @@ static void stm32_pmx_set_mode(struct stm32_gpio_bank *bank, unlock: spin_unlock_irqrestore(&bank->lock, flags); clk_disable(bank->clk); + + return err; } void stm32_pmx_get_mode(struct stm32_gpio_bank *bank, int pin, u32 *mode, @@ -670,9 +678,7 @@ static int stm32_pmx_set_mux(struct pinctrl_dev *pctldev, mode = stm32_gpio_get_mode(function); alt = stm32_gpio_get_alt(function); - stm32_pmx_set_mode(bank, pin, mode, alt); - - return 0; + return stm32_pmx_set_mode(bank, pin, mode, alt); } static int stm32_pmx_gpio_set_direction(struct pinctrl_dev *pctldev, @@ -682,9 +688,7 @@ static int stm32_pmx_gpio_set_direction(struct pinctrl_dev *pctldev, struct stm32_gpio_bank *bank = gpiochip_get_data(range->gc); int pin = stm32_gpio_pin(gpio); - stm32_pmx_set_mode(bank, pin, !input, 0); - - return 0; + return stm32_pmx_set_mode(bank, pin, !input, 0); } static const struct pinmux_ops stm32_pmx_ops = { @@ -698,8 +702,8 @@ static const struct pinmux_ops stm32_pmx_ops = { /* Pinconf functions */ -static void stm32_pconf_set_driving(struct stm32_gpio_bank *bank, - unsigned offset, u32 drive) +static int stm32_pconf_set_driving(struct stm32_gpio_bank *bank, + unsigned offset, u32 drive) { struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent); unsigned long flags; @@ -728,6 +732,8 @@ static void stm32_pconf_set_driving(struct stm32_gpio_bank *bank, unlock: spin_unlock_irqrestore(&bank->lock, flags); clk_disable(bank->clk); + + return err; } static u32 stm32_pconf_get_driving(struct stm32_gpio_bank *bank, @@ -748,8 +754,8 @@ static u32 stm32_pconf_get_driving(struct stm32_gpio_bank *bank, return (val >> offset); } -static void stm32_pconf_set_speed(struct stm32_gpio_bank *bank, - unsigned offset, u32 speed) +static int stm32_pconf_set_speed(struct stm32_gpio_bank *bank, + unsigned offset, u32 speed) { struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent); unsigned long flags; @@ -778,6 +784,8 @@ static void stm32_pconf_set_speed(struct stm32_gpio_bank *bank, unlock: spin_unlock_irqrestore(&bank->lock, flags); clk_disable(bank->clk); + + return err; } static u32 stm32_pconf_get_speed(struct stm32_gpio_bank *bank, @@ -798,8 +806,8 @@ static u32 stm32_pconf_get_speed(struct stm32_gpio_bank *bank, return (val >> (offset * 2)); } -static void stm32_pconf_set_bias(struct stm32_gpio_bank *bank, - unsigned offset, u32 bias) +static int stm32_pconf_set_bias(struct stm32_gpio_bank *bank, + unsigned offset, u32 bias) { struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent); unsigned long flags; @@ -828,6 +836,8 @@ static void stm32_pconf_set_bias(struct stm32_gpio_bank *bank, unlock: spin_unlock_irqrestore(&bank->lock, flags); clk_disable(bank->clk); + + return err; } static u32 stm32_pconf_get_bias(struct stm32_gpio_bank *bank, @@ -890,22 +900,22 @@ static int stm32_pconf_parse_conf(struct pinctrl_dev *pctldev, switch (param) { case PIN_CONFIG_DRIVE_PUSH_PULL: - stm32_pconf_set_driving(bank, offset, 0); + ret = stm32_pconf_set_driving(bank, offset, 0); break; case PIN_CONFIG_DRIVE_OPEN_DRAIN: - stm32_pconf_set_driving(bank, offset, 1); + ret = stm32_pconf_set_driving(bank, offset, 1); break; case PIN_CONFIG_SLEW_RATE: - stm32_pconf_set_speed(bank, offset, arg); + ret = stm32_pconf_set_speed(bank, offset, arg); break; case PIN_CONFIG_BIAS_DISABLE: - stm32_pconf_set_bias(bank, offset, 0); + ret = stm32_pconf_set_bias(bank, offset, 0); break; case PIN_CONFIG_BIAS_PULL_UP: - stm32_pconf_set_bias(bank, offset, 1); + ret = stm32_pconf_set_bias(bank, offset, 1); break; case PIN_CONFIG_BIAS_PULL_DOWN: - stm32_pconf_set_bias(bank, offset, 2); + ret = stm32_pconf_set_bias(bank, offset, 2); break; case PIN_CONFIG_OUTPUT: __stm32_gpio_set(bank, offset, arg); diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c index aa8b58125568..ef4268cc6227 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c @@ -588,7 +588,7 @@ static const unsigned int h6_irq_bank_map[] = { 1, 5, 6, 7 }; static const struct sunxi_pinctrl_desc h6_pinctrl_data = { .pins = h6_pins, .npins = ARRAY_SIZE(h6_pins), - .irq_banks = 3, + .irq_banks = 4, .irq_bank_map = h6_irq_bank_map, .irq_read_needs_mux = true, }; diff --git a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c index c63086c98335..e05dd9a5551d 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80-r.c @@ -153,6 +153,7 @@ static const struct sunxi_pinctrl_desc sun9i_a80_r_pinctrl_data = { .pin_base = PL_BASE, .irq_banks = 2, .disable_strict_mode = true, + .has_io_bias_cfg = true, }; static int sun9i_a80_r_pinctrl_probe(struct platform_device *pdev) diff --git a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c index 5553c0eb0f41..da37d594a13d 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c @@ -722,6 +722,7 @@ static const struct sunxi_pinctrl_desc sun9i_a80_pinctrl_data = { .npins = ARRAY_SIZE(sun9i_a80_pins), .irq_banks = 5, .disable_strict_mode = true, + .has_io_bias_cfg = true, }; static int sun9i_a80_pinctrl_probe(struct platform_device *pdev) diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c index 5d9184d18c16..8dd25caea2cf 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c @@ -603,6 +603,45 @@ static const struct pinconf_ops sunxi_pconf_ops = { .pin_config_group_set = sunxi_pconf_group_set, }; +static int sunxi_pinctrl_set_io_bias_cfg(struct sunxi_pinctrl *pctl, + unsigned pin, + struct regulator *supply) +{ + u32 val, reg; + int uV; + + if (!pctl->desc->has_io_bias_cfg) + return 0; + + uV = regulator_get_voltage(supply); + if (uV < 0) + return uV; + + /* Might be dummy regulator with no voltage set */ + if (uV == 0) + return 0; + + /* Configured value must be equal or greater to actual voltage */ + if (uV <= 1800000) + val = 0x0; /* 1.8V */ + else if (uV <= 2500000) + val = 0x6; /* 2.5V */ + else if (uV <= 2800000) + val = 0x9; /* 2.8V */ + else if (uV <= 3000000) + val = 0xA; /* 3.0V */ + else + val = 0xD; /* 3.3V */ + + pin -= pctl->desc->pin_base; + + reg = readl(pctl->membase + sunxi_grp_config_reg(pin)); + reg &= ~IO_BIAS_MASK; + writel(reg | val, pctl->membase + sunxi_grp_config_reg(pin)); + + return 0; +} + static int sunxi_pmx_get_funcs_cnt(struct pinctrl_dev *pctldev) { struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); @@ -698,26 +737,24 @@ static int sunxi_pmx_request(struct pinctrl_dev *pctldev, unsigned offset) { struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); unsigned short bank = offset / PINS_PER_BANK; - struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank]; - struct regulator *reg; + unsigned short bank_offset = bank - pctl->desc->pin_base / + PINS_PER_BANK; + struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank_offset]; + struct regulator *reg = s_reg->regulator; + char supply[16]; int ret; - reg = s_reg->regulator; - if (!reg) { - char supply[16]; - - snprintf(supply, sizeof(supply), "vcc-p%c", 'a' + bank); - reg = regulator_get(pctl->dev, supply); - if (IS_ERR(reg)) { - dev_err(pctl->dev, "Couldn't get bank P%c regulator\n", - 'A' + bank); - return PTR_ERR(reg); - } - - s_reg->regulator = reg; - refcount_set(&s_reg->refcount, 1); - } else { + if (reg) { refcount_inc(&s_reg->refcount); + return 0; + } + + snprintf(supply, sizeof(supply), "vcc-p%c", 'a' + bank); + reg = regulator_get(pctl->dev, supply); + if (IS_ERR(reg)) { + dev_err(pctl->dev, "Couldn't get bank P%c regulator\n", + 'A' + bank); + return PTR_ERR(reg); } ret = regulator_enable(reg); @@ -727,13 +764,15 @@ static int sunxi_pmx_request(struct pinctrl_dev *pctldev, unsigned offset) goto out; } + sunxi_pinctrl_set_io_bias_cfg(pctl, offset, reg); + + s_reg->regulator = reg; + refcount_set(&s_reg->refcount, 1); + return 0; out: - if (refcount_dec_and_test(&s_reg->refcount)) { - regulator_put(s_reg->regulator); - s_reg->regulator = NULL; - } + regulator_put(s_reg->regulator); return ret; } @@ -742,7 +781,9 @@ static int sunxi_pmx_free(struct pinctrl_dev *pctldev, unsigned offset) { struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); unsigned short bank = offset / PINS_PER_BANK; - struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank]; + unsigned short bank_offset = bank - pctl->desc->pin_base / + PINS_PER_BANK; + struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank_offset]; if (!refcount_dec_and_test(&s_reg->refcount)) return 0; diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h index e340d2a24b44..ee15ab067b5f 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h @@ -79,6 +79,10 @@ #define IRQ_LEVEL_LOW 0x03 #define IRQ_EDGE_BOTH 0x04 +#define GRP_CFG_REG 0x300 + +#define IO_BIAS_MASK GENMASK(3, 0) + #define SUN4I_FUNC_INPUT 0 #define SUN4I_FUNC_IRQ 6 @@ -113,6 +117,7 @@ struct sunxi_pinctrl_desc { const unsigned int *irq_bank_map; bool irq_read_needs_mux; bool disable_strict_mode; + bool has_io_bias_cfg; }; struct sunxi_pinctrl_function { @@ -136,7 +141,7 @@ struct sunxi_pinctrl { struct gpio_chip *chip; const struct sunxi_pinctrl_desc *desc; struct device *dev; - struct sunxi_pinctrl_regulator regulators[12]; + struct sunxi_pinctrl_regulator regulators[9]; struct irq_domain *domain; struct sunxi_pinctrl_function *functions; unsigned nfunctions; @@ -338,6 +343,13 @@ static inline u32 sunxi_irq_status_offset(u16 irq) return irq_num * IRQ_STATUS_IRQ_BITS; } +static inline u32 sunxi_grp_config_reg(u16 pin) +{ + u8 bank = pin / PINS_PER_BANK; + + return GRP_CFG_REG + bank * 0x4; +} + int sunxi_pinctrl_init_with_variant(struct platform_device *pdev, const struct sunxi_pinctrl_desc *desc, unsigned long variant); diff --git a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c index a4bc506a01a3..e5e7f1f22813 100644 --- a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c +++ b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c @@ -263,9 +263,9 @@ static int ti_iodelay_pinconf_set(struct ti_iodelay_device *iod, reg_val |= reg->unlock_val << __ffs(reg->lock_mask); r = regmap_update_bits(iod->regmap, cfg->offset, reg_mask, reg_val); - dev_info(dev, "Set reg 0x%x Delay(a: %d g: %d), Elements(C=%d F=%d)0x%x\n", - cfg->offset, cfg->a_delay, cfg->g_delay, c_elements, - f_elements, reg_val); + dev_dbg(dev, "Set reg 0x%x Delay(a: %d g: %d), Elements(C=%d F=%d)0x%x\n", + cfg->offset, cfg->a_delay, cfg->g_delay, c_elements, + f_elements, reg_val); return r; } @@ -923,7 +923,6 @@ static struct platform_driver ti_iodelay_driver = { .probe = ti_iodelay_probe, .remove = ti_iodelay_remove, .driver = { - .owner = THIS_MODULE, .name = DRIVER_NAME, .of_match_table = ti_iodelay_of_match, }, diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig index 16b1615958aa..9186d81a51cc 100644 --- a/drivers/platform/chrome/Kconfig +++ b/drivers/platform/chrome/Kconfig @@ -49,9 +49,6 @@ config CHROMEOS_TBMC To compile this driver as a module, choose M here: the module will be called chromeos_tbmc. -config CROS_EC_CTL - tristate - config CROS_EC_I2C tristate "ChromeOS Embedded Controller (I2C)" depends on MFD_CROS_EC && I2C @@ -111,4 +108,50 @@ config CROS_KBD_LED_BACKLIGHT To compile this driver as a module, choose M here: the module will be called cros_kbd_led_backlight. +config CROS_EC_LIGHTBAR + tristate "Chromebook Pixel's lightbar support" + depends on MFD_CROS_EC_CHARDEV + default MFD_CROS_EC_CHARDEV + help + This option exposes the Chromebook Pixel's lightbar to + userspace. + + To compile this driver as a module, choose M here: the + module will be called cros_ec_lightbar. + +config CROS_EC_VBC + tristate "ChromeOS EC vboot context support" + depends on MFD_CROS_EC_CHARDEV && OF + default MFD_CROS_EC_CHARDEV + help + This option exposes the ChromeOS EC vboot context nvram to + userspace. + + To compile this driver as a module, choose M here: the + module will be called cros_ec_vbc. + +config CROS_EC_DEBUGFS + tristate "Export ChromeOS EC internals in DebugFS" + depends on MFD_CROS_EC_CHARDEV && DEBUG_FS + default MFD_CROS_EC_CHARDEV + help + This option exposes the ChromeOS EC device internals to + userspace. + + To compile this driver as a module, choose M here: the + module will be called cros_ec_debugfs. + +config CROS_EC_SYSFS + tristate "ChromeOS EC control and information through sysfs" + depends on MFD_CROS_EC_CHARDEV && SYSFS + default MFD_CROS_EC_CHARDEV + help + This option exposes some sysfs attributes to control and get + information from ChromeOS EC. + + To compile this driver as a module, choose M here: the + module will be called cros_ec_sysfs. + +source "drivers/platform/chrome/wilco_ec/Kconfig" + endif # CHROMEOS_PLATFORMS diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile index cd591bf872bb..1e2f0029b597 100644 --- a/drivers/platform/chrome/Makefile +++ b/drivers/platform/chrome/Makefile @@ -3,9 +3,6 @@ obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o obj-$(CONFIG_CHROMEOS_PSTORE) += chromeos_pstore.o obj-$(CONFIG_CHROMEOS_TBMC) += chromeos_tbmc.o -cros_ec_ctl-objs := cros_ec_sysfs.o cros_ec_lightbar.o \ - cros_ec_vbc.o cros_ec_debugfs.o -obj-$(CONFIG_CROS_EC_CTL) += cros_ec_ctl.o obj-$(CONFIG_CROS_EC_I2C) += cros_ec_i2c.o obj-$(CONFIG_CROS_EC_SPI) += cros_ec_spi.o cros_ec_lpcs-objs := cros_ec_lpc.o cros_ec_lpc_reg.o @@ -13,3 +10,9 @@ cros_ec_lpcs-$(CONFIG_CROS_EC_LPC_MEC) += cros_ec_lpc_mec.o obj-$(CONFIG_CROS_EC_LPC) += cros_ec_lpcs.o obj-$(CONFIG_CROS_EC_PROTO) += cros_ec_proto.o obj-$(CONFIG_CROS_KBD_LED_BACKLIGHT) += cros_kbd_led_backlight.o +obj-$(CONFIG_CROS_EC_LIGHTBAR) += cros_ec_lightbar.o +obj-$(CONFIG_CROS_EC_VBC) += cros_ec_vbc.o +obj-$(CONFIG_CROS_EC_DEBUGFS) += cros_ec_debugfs.o +obj-$(CONFIG_CROS_EC_SYSFS) += cros_ec_sysfs.o + +obj-$(CONFIG_WILCO_EC) += wilco_ec/ diff --git a/drivers/platform/chrome/chromeos_pstore.c b/drivers/platform/chrome/chromeos_pstore.c index b0693fdec8c6..d13770785fb5 100644 --- a/drivers/platform/chrome/chromeos_pstore.c +++ b/drivers/platform/chrome/chromeos_pstore.c @@ -1,12 +1,7 @@ -/* - * chromeos_pstore.c - Driver to instantiate Chromebook ramoops device - * - * Copyright (C) 2013 Google, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, version 2 of the License. - */ +// SPDX-License-Identifier: GPL-2.0 +// Driver to instantiate Chromebook ramoops device. +// +// Copyright (C) 2013 Google, Inc. #include #include @@ -138,5 +133,5 @@ static void __exit chromeos_pstore_exit(void) module_init(chromeos_pstore_init); module_exit(chromeos_pstore_exit); -MODULE_DESCRIPTION("Chrome OS pstore module"); -MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("ChromeOS pstore module"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c index c62ee8e610a0..900c7073c46f 100644 --- a/drivers/platform/chrome/cros_ec_debugfs.c +++ b/drivers/platform/chrome/cros_ec_debugfs.c @@ -1,21 +1,7 @@ -/* - * cros_ec_debugfs - debug logs for Chrome OS EC - * - * Copyright 2015 Google, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ +// SPDX-License-Identifier: GPL-2.0+ +// Debug logs for the ChromeOS EC +// +// Copyright (C) 2015 Google, Inc. #include #include @@ -23,12 +9,16 @@ #include #include #include +#include #include +#include #include #include #include #include +#define DRV_NAME "cros-ec-debugfs" + #define LOG_SHIFT 14 #define LOG_SIZE (1 << LOG_SHIFT) #define LOG_POLL_SEC 10 @@ -423,8 +413,9 @@ static int cros_ec_create_pdinfo(struct cros_ec_debugfs *debug_info) return 0; } -int cros_ec_debugfs_init(struct cros_ec_dev *ec) +static int cros_ec_debugfs_probe(struct platform_device *pd) { + struct cros_ec_dev *ec = dev_get_drvdata(pd->dev.parent); struct cros_ec_platform *ec_platform = dev_get_platdata(ec->dev); const char *name = ec_platform->ec_name; struct cros_ec_debugfs *debug_info; @@ -453,40 +444,57 @@ int cros_ec_debugfs_init(struct cros_ec_dev *ec) ec->debug_info = debug_info; + dev_set_drvdata(&pd->dev, ec); + return 0; remove_debugfs: debugfs_remove_recursive(debug_info->dir); return ret; } -EXPORT_SYMBOL(cros_ec_debugfs_init); -void cros_ec_debugfs_remove(struct cros_ec_dev *ec) +static int cros_ec_debugfs_remove(struct platform_device *pd) { - if (!ec->debug_info) - return; + struct cros_ec_dev *ec = dev_get_drvdata(pd->dev.parent); debugfs_remove_recursive(ec->debug_info->dir); cros_ec_cleanup_console_log(ec->debug_info); + + return 0; } -EXPORT_SYMBOL(cros_ec_debugfs_remove); -void cros_ec_debugfs_suspend(struct cros_ec_dev *ec) +static int __maybe_unused cros_ec_debugfs_suspend(struct device *dev) { - /* - * cros_ec_debugfs_init() failures are non-fatal; it's also possible - * that we initted things but decided that console log wasn't supported. - * We'll use the same set of checks that cros_ec_debugfs_remove() + - * cros_ec_cleanup_console_log() end up using to handle those cases. - */ - if (ec->debug_info && ec->debug_info->log_buffer.buf) - cancel_delayed_work_sync(&ec->debug_info->log_poll_work); + struct cros_ec_dev *ec = dev_get_drvdata(dev); + + cancel_delayed_work_sync(&ec->debug_info->log_poll_work); + + return 0; } -EXPORT_SYMBOL(cros_ec_debugfs_suspend); -void cros_ec_debugfs_resume(struct cros_ec_dev *ec) +static int __maybe_unused cros_ec_debugfs_resume(struct device *dev) { - if (ec->debug_info && ec->debug_info->log_buffer.buf) - schedule_delayed_work(&ec->debug_info->log_poll_work, 0); + struct cros_ec_dev *ec = dev_get_drvdata(dev); + + schedule_delayed_work(&ec->debug_info->log_poll_work, 0); + + return 0; } -EXPORT_SYMBOL(cros_ec_debugfs_resume); + +static SIMPLE_DEV_PM_OPS(cros_ec_debugfs_pm_ops, + cros_ec_debugfs_suspend, cros_ec_debugfs_resume); + +static struct platform_driver cros_ec_debugfs_driver = { + .driver = { + .name = DRV_NAME, + .pm = &cros_ec_debugfs_pm_ops, + }, + .probe = cros_ec_debugfs_probe, + .remove = cros_ec_debugfs_remove, +}; + +module_platform_driver(cros_ec_debugfs_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Debug logs for ChromeOS EC"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/platform/chrome/cros_ec_i2c.c b/drivers/platform/chrome/cros_ec_i2c.c index ef9b4763356f..61d75395f86d 100644 --- a/drivers/platform/chrome/cros_ec_i2c.c +++ b/drivers/platform/chrome/cros_ec_i2c.c @@ -1,17 +1,7 @@ -/* - * ChromeOS EC multi-function device (I2C) - * - * Copyright (C) 2012 Google, Inc - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ +// SPDX-License-Identifier: GPL-2.0 +// I2C interface for ChromeOS Embedded Controller +// +// Copyright (C) 2012 Google, Inc #include #include @@ -317,15 +307,6 @@ static int cros_ec_i2c_probe(struct i2c_client *client, return 0; } -static int cros_ec_i2c_remove(struct i2c_client *client) -{ - struct cros_ec_device *ec_dev = i2c_get_clientdata(client); - - cros_ec_remove(ec_dev); - - return 0; -} - #ifdef CONFIG_PM_SLEEP static int cros_ec_i2c_suspend(struct device *dev) { @@ -376,11 +357,10 @@ static struct i2c_driver cros_ec_driver = { .pm = &cros_ec_i2c_pm_ops, }, .probe = cros_ec_i2c_probe, - .remove = cros_ec_i2c_remove, .id_table = cros_ec_i2c_id, }; module_i2c_driver(cros_ec_driver); -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("ChromeOS EC multi function device"); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("I2C interface for ChromeOS Embedded Controller"); diff --git a/drivers/platform/chrome/cros_ec_lightbar.c b/drivers/platform/chrome/cros_ec_lightbar.c index 68193bb53383..d30a6650b0b5 100644 --- a/drivers/platform/chrome/cros_ec_lightbar.c +++ b/drivers/platform/chrome/cros_ec_lightbar.c @@ -1,23 +1,7 @@ -/* - * cros_ec_lightbar - expose the Chromebook Pixel lightbar to userspace - * - * Copyright (C) 2014 Google, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#define pr_fmt(fmt) "cros_ec_lightbar: " fmt +// SPDX-License-Identifier: GPL-2.0+ +// Expose the Chromebook Pixel lightbar to userspace +// +// Copyright (C) 2014 Google, Inc. #include #include @@ -33,6 +17,8 @@ #include #include +#define DRV_NAME "cros-ec-lightbar" + /* Rate-limit the lightbar interface to prevent DoS. */ static unsigned long lb_interval_jiffies = 50 * HZ / 1000; @@ -41,7 +27,6 @@ static unsigned long lb_interval_jiffies = 50 * HZ / 1000; * If this is true, we won't do anything during suspend/resume. */ static bool userspace_control; -static struct cros_ec_dev *ec_with_lightbar; static ssize_t interval_msec_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -373,15 +358,12 @@ error: return ret; } -int lb_manual_suspend_ctrl(struct cros_ec_dev *ec, uint8_t enable) +static int lb_manual_suspend_ctrl(struct cros_ec_dev *ec, uint8_t enable) { struct ec_params_lightbar *param; struct cros_ec_command *msg; int ret; - if (ec != ec_with_lightbar) - return 0; - msg = alloc_lightbar_cmd_msg(ec); if (!msg) return -ENOMEM; @@ -408,25 +390,6 @@ error: return ret; } -EXPORT_SYMBOL(lb_manual_suspend_ctrl); - -int lb_suspend(struct cros_ec_dev *ec) -{ - if (userspace_control || ec != ec_with_lightbar) - return 0; - - return lb_send_empty_cmd(ec, LIGHTBAR_CMD_SUSPEND); -} -EXPORT_SYMBOL(lb_suspend); - -int lb_resume(struct cros_ec_dev *ec) -{ - if (userspace_control || ec != ec_with_lightbar) - return 0; - - return lb_send_empty_cmd(ec, LIGHTBAR_CMD_RESUME); -} -EXPORT_SYMBOL(lb_resume); static ssize_t sequence_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -584,36 +547,91 @@ static struct attribute *__lb_cmds_attrs[] = { NULL, }; -bool ec_has_lightbar(struct cros_ec_dev *ec) +struct attribute_group cros_ec_lightbar_attr_group = { + .name = "lightbar", + .attrs = __lb_cmds_attrs, +}; + +static int cros_ec_lightbar_probe(struct platform_device *pd) +{ + struct cros_ec_dev *ec_dev = dev_get_drvdata(pd->dev.parent); + struct cros_ec_platform *pdata = dev_get_platdata(ec_dev->dev); + struct device *dev = &pd->dev; + int ret; + + /* + * Only instantiate the lightbar if the EC name is 'cros_ec'. Other EC + * devices like 'cros_pd' doesn't have a lightbar. + */ + if (strcmp(pdata->ec_name, CROS_EC_DEV_NAME) != 0) + return -ENODEV; + + /* + * Ask then for the lightbar version, if it's 0 then the 'cros_ec' + * doesn't have a lightbar. + */ + if (!get_lightbar_version(ec_dev, NULL, NULL)) + return -ENODEV; + + /* Take control of the lightbar from the EC. */ + lb_manual_suspend_ctrl(ec_dev, 1); + + ret = sysfs_create_group(&ec_dev->class_dev.kobj, + &cros_ec_lightbar_attr_group); + if (ret < 0) + dev_err(dev, "failed to create %s attributes. err=%d\n", + cros_ec_lightbar_attr_group.name, ret); + + return ret; +} + +static int cros_ec_lightbar_remove(struct platform_device *pd) { - return !!get_lightbar_version(ec, NULL, NULL); + struct cros_ec_dev *ec_dev = dev_get_drvdata(pd->dev.parent); + + sysfs_remove_group(&ec_dev->class_dev.kobj, + &cros_ec_lightbar_attr_group); + + /* Let the EC take over the lightbar again. */ + lb_manual_suspend_ctrl(ec_dev, 0); + + return 0; } -static umode_t cros_ec_lightbar_attrs_are_visible(struct kobject *kobj, - struct attribute *a, int n) +static int __maybe_unused cros_ec_lightbar_resume(struct device *dev) { - struct device *dev = container_of(kobj, struct device, kobj); - struct cros_ec_dev *ec = to_cros_ec_dev(dev); - struct platform_device *pdev = to_platform_device(ec->dev); - struct cros_ec_platform *pdata = pdev->dev.platform_data; - int is_cros_ec; + struct cros_ec_dev *ec_dev = dev_get_drvdata(dev); - is_cros_ec = strcmp(pdata->ec_name, CROS_EC_DEV_NAME); + if (userspace_control) + return 0; + + return lb_send_empty_cmd(ec_dev, LIGHTBAR_CMD_RESUME); +} - if (is_cros_ec != 0) +static int __maybe_unused cros_ec_lightbar_suspend(struct device *dev) +{ + struct cros_ec_dev *ec_dev = dev_get_drvdata(dev); + + if (userspace_control) return 0; - /* Only instantiate this stuff if the EC has a lightbar */ - if (ec_has_lightbar(ec)) { - ec_with_lightbar = ec; - return a->mode; - } - return 0; + return lb_send_empty_cmd(ec_dev, LIGHTBAR_CMD_SUSPEND); } -struct attribute_group cros_ec_lightbar_attr_group = { - .name = "lightbar", - .attrs = __lb_cmds_attrs, - .is_visible = cros_ec_lightbar_attrs_are_visible, +static SIMPLE_DEV_PM_OPS(cros_ec_lightbar_pm_ops, + cros_ec_lightbar_suspend, cros_ec_lightbar_resume); + +static struct platform_driver cros_ec_lightbar_driver = { + .driver = { + .name = DRV_NAME, + .pm = &cros_ec_lightbar_pm_ops, + }, + .probe = cros_ec_lightbar_probe, + .remove = cros_ec_lightbar_remove, }; -EXPORT_SYMBOL(cros_ec_lightbar_attr_group); + +module_platform_driver(cros_ec_lightbar_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Expose the Chromebook Pixel's lightbar to userspace"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c index e1b75775cd4a..c9c240fbe7c6 100644 --- a/drivers/platform/chrome/cros_ec_lpc.c +++ b/drivers/platform/chrome/cros_ec_lpc.c @@ -1,25 +1,15 @@ -/* - * cros_ec_lpc - LPC access to the Chrome OS Embedded Controller - * - * Copyright (C) 2012-2015 Google, Inc - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * This driver uses the Chrome OS EC byte-level message-based protocol for - * communicating the keyboard state (which keys are pressed) from a keyboard EC - * to the AP over some bus (such as i2c, lpc, spi). The EC does debouncing, - * but everything else (including deghosting) is done here. The main - * motivation for this is to keep the EC firmware as simple as possible, since - * it cannot be easily upgraded and EC flash/IRAM space is relatively - * expensive. - */ +// SPDX-License-Identifier: GPL-2.0 +// LPC interface for ChromeOS Embedded Controller +// +// Copyright (C) 2012-2015 Google, Inc +// +// This driver uses the ChromeOS EC byte-level message-based protocol for +// communicating the keyboard state (which keys are pressed) from a keyboard EC +// to the AP over some bus (such as i2c, lpc, spi). The EC does debouncing, +// but everything else (including deghosting) is done here. The main +// motivation for this is to keep the EC firmware as simple as possible, since +// it cannot be easily upgraded and EC flash/IRAM space is relatively +// expensive. #include #include @@ -327,7 +317,6 @@ static int cros_ec_lpc_probe(struct platform_device *pdev) static int cros_ec_lpc_remove(struct platform_device *pdev) { - struct cros_ec_device *ec_dev; struct acpi_device *adev; adev = ACPI_COMPANION(&pdev->dev); @@ -335,9 +324,6 @@ static int cros_ec_lpc_remove(struct platform_device *pdev) acpi_remove_notify_handler(adev->handle, ACPI_ALL_NOTIFY, cros_ec_lpc_acpi_notify); - ec_dev = platform_get_drvdata(pdev); - cros_ec_remove(ec_dev); - return 0; } diff --git a/drivers/platform/chrome/cros_ec_lpc_mec.c b/drivers/platform/chrome/cros_ec_lpc_mec.c index c4edfa83e493..d8890bafb55d 100644 --- a/drivers/platform/chrome/cros_ec_lpc_mec.c +++ b/drivers/platform/chrome/cros_ec_lpc_mec.c @@ -1,29 +1,10 @@ -/* - * cros_ec_lpc_mec - LPC variant I/O for Microchip EC - * - * Copyright (C) 2016 Google, Inc - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * This driver uses the Chrome OS EC byte-level message-based protocol for - * communicating the keyboard state (which keys are pressed) from a keyboard EC - * to the AP over some bus (such as i2c, lpc, spi). The EC does debouncing, - * but everything else (including deghosting) is done here. The main - * motivation for this is to keep the EC firmware as simple as possible, since - * it cannot be easily upgraded and EC flash/IRAM space is relatively - * expensive. - */ +// SPDX-License-Identifier: GPL-2.0 +// LPC variant I/O for Microchip EC +// +// Copyright (C) 2016 Google, Inc #include #include -#include #include #include @@ -34,6 +15,7 @@ * EC mutex because memmap data may be accessed without it being held. */ static struct mutex io_mutex; +static u16 mec_emi_base, mec_emi_end; /* * cros_ec_lpc_mec_emi_write_address @@ -46,10 +28,37 @@ static struct mutex io_mutex; static void cros_ec_lpc_mec_emi_write_address(u16 addr, enum cros_ec_lpc_mec_emi_access_mode access_type) { - /* Address relative to start of EMI range */ - addr -= MEC_EMI_RANGE_START; - outb((addr & 0xfc) | access_type, MEC_EMI_EC_ADDRESS_B0); - outb((addr >> 8) & 0x7f, MEC_EMI_EC_ADDRESS_B1); + outb((addr & 0xfc) | access_type, MEC_EMI_EC_ADDRESS_B0(mec_emi_base)); + outb((addr >> 8) & 0x7f, MEC_EMI_EC_ADDRESS_B1(mec_emi_base)); +} + +/** + * cros_ec_lpc_mec_in_range() - Determine if addresses are in MEC EMI range. + * + * @offset: Address offset + * @length: Number of bytes to check + * + * Return: 1 if in range, 0 if not, and -EINVAL on failure + * such as the mec range not being initialized + */ +int cros_ec_lpc_mec_in_range(unsigned int offset, unsigned int length) +{ + if (length == 0) + return -EINVAL; + + if (WARN_ON(mec_emi_base == 0 || mec_emi_end == 0)) + return -EINVAL; + + if (offset >= mec_emi_base && offset < mec_emi_end) { + if (WARN_ON(offset + length - 1 >= mec_emi_end)) + return -EINVAL; + return 1; + } + + if (WARN_ON(offset + length > mec_emi_base && offset < mec_emi_end)) + return -EINVAL; + + return 0; } /* @@ -71,6 +80,11 @@ u8 cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type, u8 sum = 0; enum cros_ec_lpc_mec_emi_access_mode access, new_access; + /* Return checksum of 0 if window is not initialized */ + WARN_ON(mec_emi_base == 0 || mec_emi_end == 0); + if (mec_emi_base == 0 || mec_emi_end == 0) + return 0; + /* * Long access cannot be used on misaligned data since reading B0 loads * the data register and writing B3 flushes. @@ -86,9 +100,9 @@ u8 cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type, cros_ec_lpc_mec_emi_write_address(offset, access); /* Skip bytes in case of misaligned offset */ - io_addr = MEC_EMI_EC_DATA_B0 + (offset & 0x3); + io_addr = MEC_EMI_EC_DATA_B0(mec_emi_base) + (offset & 0x3); while (i < length) { - while (io_addr <= MEC_EMI_EC_DATA_B3) { + while (io_addr <= MEC_EMI_EC_DATA_B3(mec_emi_base)) { if (io_type == MEC_IO_READ) buf[i] = inb(io_addr++); else @@ -118,7 +132,7 @@ u8 cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type, } /* Access [B0, B3] on each loop pass */ - io_addr = MEC_EMI_EC_DATA_B0; + io_addr = MEC_EMI_EC_DATA_B0(mec_emi_base); } done: @@ -128,9 +142,11 @@ done: } EXPORT_SYMBOL(cros_ec_lpc_io_bytes_mec); -void cros_ec_lpc_mec_init(void) +void cros_ec_lpc_mec_init(unsigned int base, unsigned int end) { mutex_init(&io_mutex); + mec_emi_base = base; + mec_emi_end = end; } EXPORT_SYMBOL(cros_ec_lpc_mec_init); diff --git a/drivers/platform/chrome/cros_ec_lpc_mec.h b/drivers/platform/chrome/cros_ec_lpc_mec.h index 105068c0e919..aa1018f6b0f2 100644 --- a/drivers/platform/chrome/cros_ec_lpc_mec.h +++ b/drivers/platform/chrome/cros_ec_lpc_mec.h @@ -1,31 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* - * cros_ec_lpc_mec - LPC variant I/O for Microchip EC + * LPC variant I/O for Microchip EC * * Copyright (C) 2016 Google, Inc - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * This driver uses the Chrome OS EC byte-level message-based protocol for - * communicating the keyboard state (which keys are pressed) from a keyboard EC - * to the AP over some bus (such as i2c, lpc, spi). The EC does debouncing, - * but everything else (including deghosting) is done here. The main - * motivation for this is to keep the EC firmware as simple as possible, since - * it cannot be easily upgraded and EC flash/IRAM space is relatively - * expensive. */ #ifndef __CROS_EC_LPC_MEC_H #define __CROS_EC_LPC_MEC_H -#include - enum cros_ec_lpc_mec_emi_access_mode { /* 8-bit access */ ACCESS_TYPE_BYTE = 0x0, @@ -45,27 +27,23 @@ enum cros_ec_lpc_mec_io_type { MEC_IO_WRITE, }; -/* Access IO ranges 0x800 thru 0x9ff using EMI interface instead of LPC */ -#define MEC_EMI_RANGE_START EC_HOST_CMD_REGION0 -#define MEC_EMI_RANGE_END (EC_LPC_ADDR_MEMMAP + EC_MEMMAP_SIZE) - /* EMI registers are relative to base */ -#define MEC_EMI_BASE 0x800 -#define MEC_EMI_HOST_TO_EC (MEC_EMI_BASE + 0) -#define MEC_EMI_EC_TO_HOST (MEC_EMI_BASE + 1) -#define MEC_EMI_EC_ADDRESS_B0 (MEC_EMI_BASE + 2) -#define MEC_EMI_EC_ADDRESS_B1 (MEC_EMI_BASE + 3) -#define MEC_EMI_EC_DATA_B0 (MEC_EMI_BASE + 4) -#define MEC_EMI_EC_DATA_B1 (MEC_EMI_BASE + 5) -#define MEC_EMI_EC_DATA_B2 (MEC_EMI_BASE + 6) -#define MEC_EMI_EC_DATA_B3 (MEC_EMI_BASE + 7) +#define MEC_EMI_HOST_TO_EC(MEC_EMI_BASE) ((MEC_EMI_BASE) + 0) +#define MEC_EMI_EC_TO_HOST(MEC_EMI_BASE) ((MEC_EMI_BASE) + 1) +#define MEC_EMI_EC_ADDRESS_B0(MEC_EMI_BASE) ((MEC_EMI_BASE) + 2) +#define MEC_EMI_EC_ADDRESS_B1(MEC_EMI_BASE) ((MEC_EMI_BASE) + 3) +#define MEC_EMI_EC_DATA_B0(MEC_EMI_BASE) ((MEC_EMI_BASE) + 4) +#define MEC_EMI_EC_DATA_B1(MEC_EMI_BASE) ((MEC_EMI_BASE) + 5) +#define MEC_EMI_EC_DATA_B2(MEC_EMI_BASE) ((MEC_EMI_BASE) + 6) +#define MEC_EMI_EC_DATA_B3(MEC_EMI_BASE) ((MEC_EMI_BASE) + 7) -/* - * cros_ec_lpc_mec_init +/** + * cros_ec_lpc_mec_init() - Initialize MEC I/O. * - * Initialize MEC I/O. + * @base: MEC EMI Base address + * @end: MEC EMI End address */ -void cros_ec_lpc_mec_init(void); +void cros_ec_lpc_mec_init(unsigned int base, unsigned int end); /* * cros_ec_lpc_mec_destroy @@ -74,6 +52,17 @@ void cros_ec_lpc_mec_init(void); */ void cros_ec_lpc_mec_destroy(void); +/** + * cros_ec_lpc_mec_in_range() - Determine if addresses are in MEC EMI range. + * + * @offset: Address offset + * @length: Number of bytes to check + * + * Return: 1 if in range, 0 if not, and -EINVAL on failure + * such as the mec range not being initialized + */ +int cros_ec_lpc_mec_in_range(unsigned int offset, unsigned int length); + /** * cros_ec_lpc_io_bytes_mec - Read / write bytes to MEC EMI port * diff --git a/drivers/platform/chrome/cros_ec_lpc_reg.c b/drivers/platform/chrome/cros_ec_lpc_reg.c index fc23d535c404..0f5cd0ac8b49 100644 --- a/drivers/platform/chrome/cros_ec_lpc_reg.c +++ b/drivers/platform/chrome/cros_ec_lpc_reg.c @@ -1,25 +1,7 @@ -/* - * cros_ec_lpc_reg - LPC access to the Chrome OS Embedded Controller - * - * Copyright (C) 2016 Google, Inc - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * This driver uses the Chrome OS EC byte-level message-based protocol for - * communicating the keyboard state (which keys are pressed) from a keyboard EC - * to the AP over some bus (such as i2c, lpc, spi). The EC does debouncing, - * but everything else (including deghosting) is done here. The main - * motivation for this is to keep the EC firmware as simple as possible, since - * it cannot be easily upgraded and EC flash/IRAM space is relatively - * expensive. - */ +// SPDX-License-Identifier: GPL-2.0 +// LPC interface for ChromeOS Embedded Controller +// +// Copyright (C) 2016 Google, Inc #include #include @@ -59,51 +41,36 @@ static u8 lpc_write_bytes(unsigned int offset, unsigned int length, u8 *msg) u8 cros_ec_lpc_read_bytes(unsigned int offset, unsigned int length, u8 *dest) { - if (length == 0) - return 0; - - /* Access desired range through EMI interface */ - if (offset >= MEC_EMI_RANGE_START && offset <= MEC_EMI_RANGE_END) { - /* Ensure we don't straddle EMI region */ - if (WARN_ON(offset + length - 1 > MEC_EMI_RANGE_END)) - return 0; + int in_range = cros_ec_lpc_mec_in_range(offset, length); - return cros_ec_lpc_io_bytes_mec(MEC_IO_READ, offset, length, - dest); - } - - if (WARN_ON(offset + length > MEC_EMI_RANGE_START && - offset < MEC_EMI_RANGE_START)) + if (in_range < 0) return 0; - return lpc_read_bytes(offset, length, dest); + return in_range ? + cros_ec_lpc_io_bytes_mec(MEC_IO_READ, + offset - EC_HOST_CMD_REGION0, + length, dest) : + lpc_read_bytes(offset, length, dest); } u8 cros_ec_lpc_write_bytes(unsigned int offset, unsigned int length, u8 *msg) { - if (length == 0) - return 0; - - /* Access desired range through EMI interface */ - if (offset >= MEC_EMI_RANGE_START && offset <= MEC_EMI_RANGE_END) { - /* Ensure we don't straddle EMI region */ - if (WARN_ON(offset + length - 1 > MEC_EMI_RANGE_END)) - return 0; + int in_range = cros_ec_lpc_mec_in_range(offset, length); - return cros_ec_lpc_io_bytes_mec(MEC_IO_WRITE, offset, length, - msg); - } - - if (WARN_ON(offset + length > MEC_EMI_RANGE_START && - offset < MEC_EMI_RANGE_START)) + if (in_range < 0) return 0; - return lpc_write_bytes(offset, length, msg); + return in_range ? + cros_ec_lpc_io_bytes_mec(MEC_IO_WRITE, + offset - EC_HOST_CMD_REGION0, + length, msg) : + lpc_write_bytes(offset, length, msg); } void cros_ec_lpc_reg_init(void) { - cros_ec_lpc_mec_init(); + cros_ec_lpc_mec_init(EC_HOST_CMD_REGION0, + EC_LPC_ADDR_MEMMAP + EC_MEMMAP_SIZE); } void cros_ec_lpc_reg_destroy(void) diff --git a/drivers/platform/chrome/cros_ec_lpc_reg.h b/drivers/platform/chrome/cros_ec_lpc_reg.h index 1c12c38b306a..416fd2572182 100644 --- a/drivers/platform/chrome/cros_ec_lpc_reg.h +++ b/drivers/platform/chrome/cros_ec_lpc_reg.h @@ -1,24 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* - * cros_ec_lpc_reg - LPC access to the Chrome OS Embedded Controller + * LPC interface for ChromeOS Embedded Controller * * Copyright (C) 2016 Google, Inc - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * This driver uses the Chrome OS EC byte-level message-based protocol for - * communicating the keyboard state (which keys are pressed) from a keyboard EC - * to the AP over some bus (such as i2c, lpc, spi). The EC does debouncing, - * but everything else (including deghosting) is done here. The main - * motivation for this is to keep the EC firmware as simple as possible, since - * it cannot be easily upgraded and EC flash/IRAM space is relatively - * expensive. */ #ifndef __CROS_EC_LPC_REG_H diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c index cc7baf0ecb3c..97a068dff192 100644 --- a/drivers/platform/chrome/cros_ec_proto.c +++ b/drivers/platform/chrome/cros_ec_proto.c @@ -1,18 +1,7 @@ -/* - * ChromeOS EC communication protocol helper functions - * - * Copyright (C) 2015 Google, Inc - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ +// SPDX-License-Identifier: GPL-2.0 +// ChromeOS EC communication protocol helper functions +// +// Copyright (C) 2015 Google, Inc #include #include diff --git a/drivers/platform/chrome/cros_ec_spi.c b/drivers/platform/chrome/cros_ec_spi.c index 2060d1483043..ffc38f9d4829 100644 --- a/drivers/platform/chrome/cros_ec_spi.c +++ b/drivers/platform/chrome/cros_ec_spi.c @@ -1,17 +1,7 @@ -/* - * ChromeOS EC multi-function device (SPI) - * - * Copyright (C) 2012 Google, Inc - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ +// SPDX-License-Identifier: GPL-2.0 +// SPI interface for ChromeOS Embedded Controller +// +// Copyright (C) 2012 Google, Inc #include #include @@ -685,16 +675,6 @@ static int cros_ec_spi_probe(struct spi_device *spi) return 0; } -static int cros_ec_spi_remove(struct spi_device *spi) -{ - struct cros_ec_device *ec_dev; - - ec_dev = spi_get_drvdata(spi); - cros_ec_remove(ec_dev); - - return 0; -} - #ifdef CONFIG_PM_SLEEP static int cros_ec_spi_suspend(struct device *dev) { @@ -733,11 +713,10 @@ static struct spi_driver cros_ec_driver_spi = { .pm = &cros_ec_spi_pm_ops, }, .probe = cros_ec_spi_probe, - .remove = cros_ec_spi_remove, .id_table = cros_ec_spi_id, }; module_spi_driver(cros_ec_driver_spi); MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("ChromeOS EC multi function device (SPI)"); +MODULE_DESCRIPTION("SPI interface for ChromeOS Embedded Controller"); diff --git a/drivers/platform/chrome/cros_ec_sysfs.c b/drivers/platform/chrome/cros_ec_sysfs.c index f34a50121064..fe0b7614ae1b 100644 --- a/drivers/platform/chrome/cros_ec_sysfs.c +++ b/drivers/platform/chrome/cros_ec_sysfs.c @@ -1,23 +1,7 @@ -/* - * cros_ec_sysfs - expose the Chrome OS EC through sysfs - * - * Copyright (C) 2014 Google, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#define pr_fmt(fmt) "cros_ec_sysfs: " fmt +// SPDX-License-Identifier: GPL-2.0+ +// Expose the ChromeOS EC through sysfs +// +// Copyright (C) 2014 Google, Inc. #include #include @@ -34,6 +18,8 @@ #include #include +#define DRV_NAME "cros-ec-sysfs" + /* Accessor functions */ static ssize_t reboot_show(struct device *dev, @@ -353,7 +339,39 @@ struct attribute_group cros_ec_attr_group = { .attrs = __ec_attrs, .is_visible = cros_ec_ctrl_visible, }; -EXPORT_SYMBOL(cros_ec_attr_group); + +static int cros_ec_sysfs_probe(struct platform_device *pd) +{ + struct cros_ec_dev *ec_dev = dev_get_drvdata(pd->dev.parent); + struct device *dev = &pd->dev; + int ret; + + ret = sysfs_create_group(&ec_dev->class_dev.kobj, &cros_ec_attr_group); + if (ret < 0) + dev_err(dev, "failed to create attributes. err=%d\n", ret); + + return ret; +} + +static int cros_ec_sysfs_remove(struct platform_device *pd) +{ + struct cros_ec_dev *ec_dev = dev_get_drvdata(pd->dev.parent); + + sysfs_remove_group(&ec_dev->class_dev.kobj, &cros_ec_attr_group); + + return 0; +} + +static struct platform_driver cros_ec_sysfs_driver = { + .driver = { + .name = DRV_NAME, + }, + .probe = cros_ec_sysfs_probe, + .remove = cros_ec_sysfs_remove, +}; + +module_platform_driver(cros_ec_sysfs_driver); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("ChromeOS EC control driver"); +MODULE_DESCRIPTION("Expose the ChromeOS EC through sysfs"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/platform/chrome/cros_ec_vbc.c b/drivers/platform/chrome/cros_ec_vbc.c index 5356f26bc022..8392a1ec33a7 100644 --- a/drivers/platform/chrome/cros_ec_vbc.c +++ b/drivers/platform/chrome/cros_ec_vbc.c @@ -1,29 +1,18 @@ -/* - * cros_ec_vbc - Expose the vboot context nvram to userspace - * - * Copyright (C) 2015 Collabora Ltd. - * - * based on vendor driver, - * - * Copyright (C) 2012 The Chromium OS Authors - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ +// SPDX-License-Identifier: GPL-2.0+ +// Expose the vboot context nvram to userspace +// +// Copyright (C) 2012 Google, Inc. +// Copyright (C) 2015 Collabora Ltd. #include #include #include #include +#include #include +#define DRV_NAME "cros-ec-vbc" + static ssize_t vboot_context_read(struct file *filp, struct kobject *kobj, struct bin_attribute *att, char *buf, loff_t pos, size_t count) @@ -105,21 +94,6 @@ static ssize_t vboot_context_write(struct file *filp, struct kobject *kobj, return data_sz; } -static umode_t cros_ec_vbc_is_visible(struct kobject *kobj, - struct bin_attribute *a, int n) -{ - struct device *dev = container_of(kobj, struct device, kobj); - struct cros_ec_dev *ec = to_cros_ec_dev(dev); - struct device_node *np = ec->ec_dev->dev->of_node; - - if (IS_ENABLED(CONFIG_OF) && np) { - if (of_property_read_bool(np, "google,has-vbc-nvram")) - return a->attr.mode; - } - - return 0; -} - static BIN_ATTR_RW(vboot_context, 16); static struct bin_attribute *cros_ec_vbc_bin_attrs[] = { @@ -130,6 +104,43 @@ static struct bin_attribute *cros_ec_vbc_bin_attrs[] = { struct attribute_group cros_ec_vbc_attr_group = { .name = "vbc", .bin_attrs = cros_ec_vbc_bin_attrs, - .is_bin_visible = cros_ec_vbc_is_visible, }; -EXPORT_SYMBOL(cros_ec_vbc_attr_group); + +static int cros_ec_vbc_probe(struct platform_device *pd) +{ + struct cros_ec_dev *ec_dev = dev_get_drvdata(pd->dev.parent); + struct device *dev = &pd->dev; + int ret; + + ret = sysfs_create_group(&ec_dev->class_dev.kobj, + &cros_ec_vbc_attr_group); + if (ret < 0) + dev_err(dev, "failed to create %s attributes. err=%d\n", + cros_ec_vbc_attr_group.name, ret); + + return ret; +} + +static int cros_ec_vbc_remove(struct platform_device *pd) +{ + struct cros_ec_dev *ec_dev = dev_get_drvdata(pd->dev.parent); + + sysfs_remove_group(&ec_dev->class_dev.kobj, + &cros_ec_vbc_attr_group); + + return 0; +} + +static struct platform_driver cros_ec_vbc_driver = { + .driver = { + .name = DRV_NAME, + }, + .probe = cros_ec_vbc_probe, + .remove = cros_ec_vbc_remove, +}; + +module_platform_driver(cros_ec_vbc_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Expose the vboot context nvram to userspace"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/platform/chrome/cros_kbd_led_backlight.c b/drivers/platform/chrome/cros_kbd_led_backlight.c index ca3e4da852b4..aa409f0201fb 100644 --- a/drivers/platform/chrome/cros_kbd_led_backlight.c +++ b/drivers/platform/chrome/cros_kbd_led_backlight.c @@ -1,18 +1,7 @@ -/* - * Keyboard backlight LED driver for Chrome OS. - * - * Copyright (C) 2012 Google, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ +// SPDX-License-Identifier: GPL-2.0+ +// Keyboard backlight LED driver for ChromeOS +// +// Copyright (C) 2012 Google, Inc. #include #include diff --git a/drivers/platform/chrome/wilco_ec/Kconfig b/drivers/platform/chrome/wilco_ec/Kconfig new file mode 100644 index 000000000000..e09e4cebe9b4 --- /dev/null +++ b/drivers/platform/chrome/wilco_ec/Kconfig @@ -0,0 +1,20 @@ +config WILCO_EC + tristate "ChromeOS Wilco Embedded Controller" + depends on ACPI && X86 && CROS_EC_LPC && CROS_EC_LPC_MEC + help + If you say Y here, you get support for talking to the ChromeOS + Wilco EC over an eSPI bus. This uses a simple byte-level protocol + with a checksum. + + To compile this driver as a module, choose M here: the + module will be called wilco_ec. + +config WILCO_EC_DEBUGFS + tristate "Enable raw access to EC via debugfs" + depends on WILCO_EC + help + If you say Y here, you get support for sending raw commands to + the Wilco EC via debugfs. These commands do not do any byte + manipulation and allow for testing arbitrary commands. This + interface is intended for debug only and will not be present + on production devices. diff --git a/drivers/platform/chrome/wilco_ec/Makefile b/drivers/platform/chrome/wilco_ec/Makefile new file mode 100644 index 000000000000..063e7fb4ea17 --- /dev/null +++ b/drivers/platform/chrome/wilco_ec/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 + +wilco_ec-objs := core.o mailbox.o +obj-$(CONFIG_WILCO_EC) += wilco_ec.o +wilco_ec_debugfs-objs := debugfs.o +obj-$(CONFIG_WILCO_EC_DEBUGFS) += wilco_ec_debugfs.o diff --git a/drivers/platform/chrome/wilco_ec/core.c b/drivers/platform/chrome/wilco_ec/core.c new file mode 100644 index 000000000000..05e1e2be1c91 --- /dev/null +++ b/drivers/platform/chrome/wilco_ec/core.c @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Core driver for Wilco Embedded Controller + * + * Copyright 2018 Google LLC + * + * This is the entry point for the drivers that control the Wilco EC. + * This driver is responsible for several tasks: + * - Initialize the register interface that is used by wilco_ec_mailbox() + * - Create a platform device which is picked up by the debugfs driver + * - Create a platform device which is picked up by the RTC driver + */ + +#include +#include +#include +#include +#include +#include + +#include "../cros_ec_lpc_mec.h" + +#define DRV_NAME "wilco-ec" + +static struct resource *wilco_get_resource(struct platform_device *pdev, + int index) +{ + struct device *dev = &pdev->dev; + struct resource *res; + + res = platform_get_resource(pdev, IORESOURCE_IO, index); + if (!res) { + dev_dbg(dev, "Couldn't find IO resource %d\n", index); + return res; + } + + return devm_request_region(dev, res->start, resource_size(res), + dev_name(dev)); +} + +static int wilco_ec_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct wilco_ec_device *ec; + int ret; + + ec = devm_kzalloc(dev, sizeof(*ec), GFP_KERNEL); + if (!ec) + return -ENOMEM; + + platform_set_drvdata(pdev, ec); + ec->dev = dev; + mutex_init(&ec->mailbox_lock); + + /* Largest data buffer size requirement is extended data response */ + ec->data_size = sizeof(struct wilco_ec_response) + + EC_MAILBOX_DATA_SIZE_EXTENDED; + ec->data_buffer = devm_kzalloc(dev, ec->data_size, GFP_KERNEL); + if (!ec->data_buffer) + return -ENOMEM; + + /* Prepare access to IO regions provided by ACPI */ + ec->io_data = wilco_get_resource(pdev, 0); /* Host Data */ + ec->io_command = wilco_get_resource(pdev, 1); /* Host Command */ + ec->io_packet = wilco_get_resource(pdev, 2); /* MEC EMI */ + if (!ec->io_data || !ec->io_command || !ec->io_packet) + return -ENODEV; + + /* Initialize cros_ec register interface for communication */ + cros_ec_lpc_mec_init(ec->io_packet->start, + ec->io_packet->start + EC_MAILBOX_DATA_SIZE); + + /* + * Register a child device that will be found by the debugfs driver. + * Ignore failure. + */ + ec->debugfs_pdev = platform_device_register_data(dev, + "wilco-ec-debugfs", + PLATFORM_DEVID_AUTO, + NULL, 0); + + /* Register a child device that will be found by the RTC driver. */ + ec->rtc_pdev = platform_device_register_data(dev, "rtc-wilco-ec", + PLATFORM_DEVID_AUTO, + NULL, 0); + if (IS_ERR(ec->rtc_pdev)) { + dev_err(dev, "Failed to create RTC platform device\n"); + ret = PTR_ERR(ec->rtc_pdev); + goto unregister_debugfs; + } + + return 0; + +unregister_debugfs: + if (ec->debugfs_pdev) + platform_device_unregister(ec->debugfs_pdev); + cros_ec_lpc_mec_destroy(); + return ret; +} + +static int wilco_ec_remove(struct platform_device *pdev) +{ + struct wilco_ec_device *ec = platform_get_drvdata(pdev); + + platform_device_unregister(ec->rtc_pdev); + if (ec->debugfs_pdev) + platform_device_unregister(ec->debugfs_pdev); + + /* Teardown cros_ec interface */ + cros_ec_lpc_mec_destroy(); + + return 0; +} + +static const struct acpi_device_id wilco_ec_acpi_device_ids[] = { + { "GOOG000C", 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, wilco_ec_acpi_device_ids); + +static struct platform_driver wilco_ec_driver = { + .driver = { + .name = DRV_NAME, + .acpi_match_table = wilco_ec_acpi_device_ids, + }, + .probe = wilco_ec_probe, + .remove = wilco_ec_remove, +}; + +module_platform_driver(wilco_ec_driver); + +MODULE_AUTHOR("Nick Crews "); +MODULE_AUTHOR("Duncan Laurie "); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("ChromeOS Wilco Embedded Controller driver"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/platform/chrome/wilco_ec/debugfs.c b/drivers/platform/chrome/wilco_ec/debugfs.c new file mode 100644 index 000000000000..c090db2cd5be --- /dev/null +++ b/drivers/platform/chrome/wilco_ec/debugfs.c @@ -0,0 +1,238 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * debugfs attributes for Wilco EC + * + * Copyright 2019 Google LLC + * + * There is only one attribute used for debugging, called raw. + * You can write a hexadecimal sentence to raw, and that series of bytes + * will be sent to the EC. Then, you can read the bytes of response + * by reading from raw. + * + * For writing: + * Bytes 0-1 indicate the message type: + * 00 F0 = Execute Legacy Command + * 00 F2 = Read/Write NVRAM Property + * Byte 2 provides the command code + * Bytes 3+ consist of the data passed in the request + * + * When referencing the EC interface spec, byte 2 corresponds to MBOX[0], + * byte 3 corresponds to MBOX[1], etc. + * + * At least three bytes are required, for the msg type and command, + * with additional bytes optional for additional data. + * + * Example: + * // Request EC info type 3 (EC firmware build date) + * $ echo 00 f0 38 00 03 00 > raw + * // View the result. The decoded ASCII result "12/21/18" is + * // included after the raw hex. + * $ cat raw + * 00 31 32 2f 32 31 2f 31 38 00 38 00 01 00 2f 00 .12/21/18.8... + */ + +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "wilco-ec-debugfs" + +/* The 256 raw bytes will take up more space when represented as a hex string */ +#define FORMATTED_BUFFER_SIZE (EC_MAILBOX_DATA_SIZE_EXTENDED * 4) + +struct wilco_ec_debugfs { + struct wilco_ec_device *ec; + struct dentry *dir; + size_t response_size; + u8 raw_data[EC_MAILBOX_DATA_SIZE_EXTENDED]; + u8 formatted_data[FORMATTED_BUFFER_SIZE]; +}; +static struct wilco_ec_debugfs *debug_info; + +/** + * parse_hex_sentence() - Convert a ascii hex representation into byte array. + * @in: Input buffer of ascii. + * @isize: Length of input buffer. + * @out: Output buffer. + * @osize: Length of output buffer, e.g. max number of bytes to parse. + * + * An valid input is a series of ascii hexadecimal numbers, separated by spaces. + * An example valid input is + * " 00 f2 0 000076 6 0 ff" + * + * If an individual "word" within the hex sentence is longer than MAX_WORD_SIZE, + * then the sentence is illegal, and parsing will fail. + * + * Return: Number of bytes parsed, or negative error code on failure. + */ +static int parse_hex_sentence(const char *in, int isize, u8 *out, int osize) +{ + int n_parsed = 0; + int word_start = 0; + int word_end; + int word_len; + /* Temp buffer for holding a "word" of chars that represents one byte */ + #define MAX_WORD_SIZE 16 + char tmp[MAX_WORD_SIZE + 1]; + u8 byte; + + while (word_start < isize && n_parsed < osize) { + /* Find the start of the next word */ + while (word_start < isize && isspace(in[word_start])) + word_start++; + /* reached the end of the input before next word? */ + if (word_start >= isize) + break; + + /* Find the end of this word */ + word_end = word_start; + while (word_end < isize && !isspace(in[word_end])) + word_end++; + + /* Copy to a tmp NULL terminated string */ + word_len = word_end - word_start; + if (word_len > MAX_WORD_SIZE) + return -EINVAL; + memcpy(tmp, in + word_start, word_len); + tmp[word_len] = '\0'; + + /* + * Convert from hex string, place in output. If fails to parse, + * just return -EINVAL because specific error code is only + * relevant for this one word, returning it would be confusing. + */ + if (kstrtou8(tmp, 16, &byte)) + return -EINVAL; + out[n_parsed++] = byte; + + word_start = word_end; + } + return n_parsed; +} + +/* The message type takes up two bytes*/ +#define TYPE_AND_DATA_SIZE ((EC_MAILBOX_DATA_SIZE) + 2) + +static ssize_t raw_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + char *buf = debug_info->formatted_data; + struct wilco_ec_message msg; + u8 request_data[TYPE_AND_DATA_SIZE]; + ssize_t kcount; + int ret; + + if (count > FORMATTED_BUFFER_SIZE) + return -EINVAL; + + kcount = simple_write_to_buffer(buf, FORMATTED_BUFFER_SIZE, ppos, + user_buf, count); + if (kcount < 0) + return kcount; + + ret = parse_hex_sentence(buf, kcount, request_data, TYPE_AND_DATA_SIZE); + if (ret < 0) + return ret; + /* Need at least two bytes for message type and one for command */ + if (ret < 3) + return -EINVAL; + + /* Clear response data buffer */ + memset(debug_info->raw_data, '\0', EC_MAILBOX_DATA_SIZE_EXTENDED); + + msg.type = request_data[0] << 8 | request_data[1]; + msg.flags = WILCO_EC_FLAG_RAW; + msg.command = request_data[2]; + msg.request_data = ret > 3 ? request_data + 3 : 0; + msg.request_size = ret - 3; + msg.response_data = debug_info->raw_data; + msg.response_size = EC_MAILBOX_DATA_SIZE; + + /* Telemetry commands use extended response data */ + if (msg.type == WILCO_EC_MSG_TELEMETRY_LONG) { + msg.flags |= WILCO_EC_FLAG_EXTENDED_DATA; + msg.response_size = EC_MAILBOX_DATA_SIZE_EXTENDED; + } + + ret = wilco_ec_mailbox(debug_info->ec, &msg); + if (ret < 0) + return ret; + debug_info->response_size = ret; + + return count; +} + +static ssize_t raw_read(struct file *file, char __user *user_buf, size_t count, + loff_t *ppos) +{ + int fmt_len = 0; + + if (debug_info->response_size) { + fmt_len = hex_dump_to_buffer(debug_info->raw_data, + debug_info->response_size, + 16, 1, debug_info->formatted_data, + FORMATTED_BUFFER_SIZE, true); + /* Only return response the first time it is read */ + debug_info->response_size = 0; + } + + return simple_read_from_buffer(user_buf, count, ppos, + debug_info->formatted_data, fmt_len); +} + +static const struct file_operations fops_raw = { + .owner = THIS_MODULE, + .read = raw_read, + .write = raw_write, + .llseek = no_llseek, +}; + +/** + * wilco_ec_debugfs_probe() - Create the debugfs node + * @pdev: The platform device, probably created in core.c + * + * Try to create a debugfs node. If it fails, then we don't want to change + * behavior at all, this is for debugging after all. Just fail silently. + * + * Return: 0 always. + */ +static int wilco_ec_debugfs_probe(struct platform_device *pdev) +{ + struct wilco_ec_device *ec = dev_get_drvdata(pdev->dev.parent); + + debug_info = devm_kzalloc(&pdev->dev, sizeof(*debug_info), GFP_KERNEL); + if (!debug_info) + return 0; + debug_info->ec = ec; + debug_info->dir = debugfs_create_dir("wilco_ec", NULL); + if (!debug_info->dir) + return 0; + debugfs_create_file("raw", 0644, debug_info->dir, NULL, &fops_raw); + + return 0; +} + +static int wilco_ec_debugfs_remove(struct platform_device *pdev) +{ + debugfs_remove_recursive(debug_info->dir); + + return 0; +} + +static struct platform_driver wilco_ec_debugfs_driver = { + .driver = { + .name = DRV_NAME, + }, + .probe = wilco_ec_debugfs_probe, + .remove = wilco_ec_debugfs_remove, +}; + +module_platform_driver(wilco_ec_debugfs_driver); + +MODULE_ALIAS("platform:" DRV_NAME); +MODULE_AUTHOR("Nick Crews "); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Wilco EC debugfs driver"); diff --git a/drivers/platform/chrome/wilco_ec/mailbox.c b/drivers/platform/chrome/wilco_ec/mailbox.c new file mode 100644 index 000000000000..f6ff29a11f1a --- /dev/null +++ b/drivers/platform/chrome/wilco_ec/mailbox.c @@ -0,0 +1,237 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Mailbox interface for Wilco Embedded Controller + * + * Copyright 2018 Google LLC + * + * The Wilco EC is similar to a typical ChromeOS embedded controller. + * It uses the same MEC based low-level communication and a similar + * protocol, but with some important differences. The EC firmware does + * not support the same mailbox commands so it is not registered as a + * cros_ec device type. + * + * Most messages follow a standard format, but there are some exceptions + * and an interface is provided to do direct/raw transactions that do not + * make assumptions about byte placement. + */ + +#include +#include +#include +#include +#include + +#include "../cros_ec_lpc_mec.h" + +/* Version of mailbox interface */ +#define EC_MAILBOX_VERSION 0 + +/* Command to start mailbox transaction */ +#define EC_MAILBOX_START_COMMAND 0xda + +/* Version of EC protocol */ +#define EC_MAILBOX_PROTO_VERSION 3 + +/* Number of header bytes to be counted as data bytes */ +#define EC_MAILBOX_DATA_EXTRA 2 + +/* Maximum timeout */ +#define EC_MAILBOX_TIMEOUT HZ + +/* EC response flags */ +#define EC_CMDR_DATA BIT(0) /* Data ready for host to read */ +#define EC_CMDR_PENDING BIT(1) /* Write pending to EC */ +#define EC_CMDR_BUSY BIT(2) /* EC is busy processing a command */ +#define EC_CMDR_CMD BIT(3) /* Last host write was a command */ + +/** + * wilco_ec_response_timed_out() - Wait for EC response. + * @ec: EC device. + * + * Return: true if EC timed out, false if EC did not time out. + */ +static bool wilco_ec_response_timed_out(struct wilco_ec_device *ec) +{ + unsigned long timeout = jiffies + EC_MAILBOX_TIMEOUT; + + do { + if (!(inb(ec->io_command->start) & + (EC_CMDR_PENDING | EC_CMDR_BUSY))) + return false; + usleep_range(100, 200); + } while (time_before(jiffies, timeout)); + + return true; +} + +/** + * wilco_ec_checksum() - Compute 8-bit checksum over data range. + * @data: Data to checksum. + * @size: Number of bytes to checksum. + * + * Return: 8-bit checksum of provided data. + */ +static u8 wilco_ec_checksum(const void *data, size_t size) +{ + u8 *data_bytes = (u8 *)data; + u8 checksum = 0; + size_t i; + + for (i = 0; i < size; i++) + checksum += data_bytes[i]; + + return checksum; +} + +/** + * wilco_ec_prepare() - Prepare the request structure for the EC. + * @msg: EC message with request information. + * @rq: EC request structure to fill. + */ +static void wilco_ec_prepare(struct wilco_ec_message *msg, + struct wilco_ec_request *rq) +{ + memset(rq, 0, sizeof(*rq)); + + /* Handle messages without trimming bytes from the request */ + if (msg->request_size && msg->flags & WILCO_EC_FLAG_RAW_REQUEST) { + rq->reserved_raw = *(u8 *)msg->request_data; + msg->request_size--; + memmove(msg->request_data, msg->request_data + 1, + msg->request_size); + } + + /* Fill in request packet */ + rq->struct_version = EC_MAILBOX_PROTO_VERSION; + rq->mailbox_id = msg->type; + rq->mailbox_version = EC_MAILBOX_VERSION; + rq->data_size = msg->request_size + EC_MAILBOX_DATA_EXTRA; + rq->command = msg->command; + + /* Checksum header and data */ + rq->checksum = wilco_ec_checksum(rq, sizeof(*rq)); + rq->checksum += wilco_ec_checksum(msg->request_data, msg->request_size); + rq->checksum = -rq->checksum; +} + +/** + * wilco_ec_transfer() - Perform actual data transfer. + * @ec: EC device. + * @msg: EC message data for request and response. + * @rq: Filled in request structure + * + * Context: ec->mailbox_lock should be held while using this function. + * Return: number of bytes received or negative error code on failure. + */ +static int wilco_ec_transfer(struct wilco_ec_device *ec, + struct wilco_ec_message *msg, + struct wilco_ec_request *rq) +{ + struct wilco_ec_response *rs; + u8 checksum; + u8 flag; + size_t size; + + /* Write request header, then data */ + cros_ec_lpc_io_bytes_mec(MEC_IO_WRITE, 0, sizeof(*rq), (u8 *)rq); + cros_ec_lpc_io_bytes_mec(MEC_IO_WRITE, sizeof(*rq), msg->request_size, + msg->request_data); + + /* Start the command */ + outb(EC_MAILBOX_START_COMMAND, ec->io_command->start); + + /* For some commands (eg shutdown) the EC will not respond, that's OK */ + if (msg->flags & WILCO_EC_FLAG_NO_RESPONSE) { + dev_dbg(ec->dev, "EC does not respond to this command\n"); + return 0; + } + + /* Wait for it to complete */ + if (wilco_ec_response_timed_out(ec)) { + dev_dbg(ec->dev, "response timed out\n"); + return -ETIMEDOUT; + } + + /* Check result */ + flag = inb(ec->io_data->start); + if (flag) { + dev_dbg(ec->dev, "bad response: 0x%02x\n", flag); + return -EIO; + } + + if (msg->flags & WILCO_EC_FLAG_EXTENDED_DATA) + size = EC_MAILBOX_DATA_SIZE_EXTENDED; + else + size = EC_MAILBOX_DATA_SIZE; + + /* Read back response */ + rs = ec->data_buffer; + checksum = cros_ec_lpc_io_bytes_mec(MEC_IO_READ, 0, + sizeof(*rs) + size, (u8 *)rs); + if (checksum) { + dev_dbg(ec->dev, "bad packet checksum 0x%02x\n", rs->checksum); + return -EBADMSG; + } + + /* Check that the EC reported success */ + msg->result = rs->result; + if (msg->result) { + dev_dbg(ec->dev, "bad response: 0x%02x\n", msg->result); + return -EBADMSG; + } + + /* Check the returned data size, skipping the header */ + if (rs->data_size != size) { + dev_dbg(ec->dev, "unexpected packet size (%u != %zu)", + rs->data_size, size); + return -EMSGSIZE; + } + + /* Skip 1 response data byte unless specified */ + size = (msg->flags & WILCO_EC_FLAG_RAW_RESPONSE) ? 0 : 1; + if ((ssize_t) rs->data_size - size < msg->response_size) { + dev_dbg(ec->dev, "response data too short (%zd < %zu)", + (ssize_t) rs->data_size - size, msg->response_size); + return -EMSGSIZE; + } + + /* Ignore response data bytes as requested */ + memcpy(msg->response_data, rs->data + size, msg->response_size); + + /* Return actual amount of data received */ + return msg->response_size; +} + +/** + * wilco_ec_mailbox() - Send EC request and receive EC response. + * @ec: EC device. + * @msg: EC message data for request and response. + * + * On entry msg->type, msg->flags, msg->command, msg->request_size, + * msg->response_size, and msg->request_data should all be filled in. + * + * On exit msg->result and msg->response_data will be filled. + * + * Return: number of bytes received or negative error code on failure. + */ +int wilco_ec_mailbox(struct wilco_ec_device *ec, struct wilco_ec_message *msg) +{ + struct wilco_ec_request *rq; + int ret; + + dev_dbg(ec->dev, "cmd=%02x type=%04x flags=%02x rslen=%zu rqlen=%zu\n", + msg->command, msg->type, msg->flags, msg->response_size, + msg->request_size); + + /* Prepare request packet */ + rq = ec->data_buffer; + wilco_ec_prepare(msg, rq); + + mutex_lock(&ec->mailbox_lock); + ret = wilco_ec_transfer(ec, msg, rq); + mutex_unlock(&ec->mailbox_lock); + + return ret; + +} +EXPORT_SYMBOL_GPL(wilco_ec_mailbox); diff --git a/drivers/platform/goldfish/Kconfig b/drivers/platform/goldfish/Kconfig index 479031aa4f88..74fdfa68d1f2 100644 --- a/drivers/platform/goldfish/Kconfig +++ b/drivers/platform/goldfish/Kconfig @@ -2,7 +2,7 @@ menuconfig GOLDFISH bool "Platform support for Goldfish virtual devices" depends on X86_32 || X86_64 || ARM || ARM64 || MIPS depends on HAS_IOMEM - ---help--- + help Say Y here to get to see options for the Goldfish virtual platform. This option alone does not add any kernel code. @@ -12,7 +12,7 @@ if GOLDFISH config GOLDFISH_PIPE tristate "Goldfish virtual device for QEMU pipes" - ---help--- + help This is a virtual device to drive the QEMU pipe interface used by the Goldfish Android Virtual Device. diff --git a/drivers/platform/mellanox/mlxreg-hotplug.c b/drivers/platform/mellanox/mlxreg-hotplug.c index b6d44550d98c..687ce6817d0d 100644 --- a/drivers/platform/mellanox/mlxreg-hotplug.c +++ b/drivers/platform/mellanox/mlxreg-hotplug.c @@ -248,7 +248,8 @@ mlxreg_hotplug_work_helper(struct mlxreg_hotplug_priv_data *priv, struct mlxreg_core_item *item) { struct mlxreg_core_data *data; - u32 asserted, regval, bit; + unsigned long asserted; + u32 regval, bit; int ret; /* @@ -281,7 +282,7 @@ mlxreg_hotplug_work_helper(struct mlxreg_hotplug_priv_data *priv, asserted = item->cache ^ regval; item->cache = regval; - for_each_set_bit(bit, (unsigned long *)&asserted, 8) { + for_each_set_bit(bit, &asserted, 8) { data = item->data + bit; if (regval & BIT(bit)) { if (item->inversed) @@ -495,7 +496,9 @@ static int mlxreg_hotplug_set_irq(struct mlxreg_hotplug_priv_data *priv) { struct mlxreg_core_hotplug_platform_data *pdata; struct mlxreg_core_item *item; - int i, ret; + struct mlxreg_core_data *data; + u32 regval; + int i, j, ret; pdata = dev_get_platdata(&priv->pdev->dev); item = pdata->items; @@ -507,6 +510,25 @@ static int mlxreg_hotplug_set_irq(struct mlxreg_hotplug_priv_data *priv) if (ret) goto out; + /* + * Verify if hardware configuration requires to disable + * interrupt capability for some of components. + */ + data = item->data; + for (j = 0; j < item->count; j++, data++) { + /* Verify if the attribute has capability register. */ + if (data->capability) { + /* Read capability register. */ + ret = regmap_read(priv->regmap, + data->capability, ®val); + if (ret) + goto out; + + if (!(regval & data->bit)) + item->mask &= ~BIT(j); + } + } + /* Set group initial status as mask and unmask group event. */ if (item->inversed) { item->cache = item->mask; diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 5e2109c54c7c..a1ed13183559 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -905,6 +905,7 @@ config TOSHIBA_WMI config ACPI_CMPC tristate "CMPC Laptop Extras" depends on ACPI && INPUT + depends on BACKLIGHT_LCD_SUPPORT depends on RFKILL || RFKILL=n select BACKLIGHT_CLASS_DEVICE help @@ -1128,6 +1129,7 @@ config INTEL_OAKTRAIL config SAMSUNG_Q10 tristate "Samsung Q10 Extras" depends on ACPI + depends on BACKLIGHT_LCD_SUPPORT select BACKLIGHT_CLASS_DEVICE ---help--- This driver provides support for backlight control on Samsung Q10 @@ -1301,6 +1303,20 @@ config HUAWEI_WMI To compile this driver as a module, choose M here: the module will be called huawei-wmi. +config PCENGINES_APU2 + tristate "PC Engines APUv2/3 front button and LEDs driver" + depends on INPUT && INPUT_KEYBOARD + depends on LEDS_CLASS + select GPIO_AMD_FCH + select KEYBOARD_GPIO_POLLED + select LEDS_GPIO + help + This driver provides support for the front button and LEDs on + PC Engines APUv2/APUv3 board. + + To compile this driver as a module, choose M here: the module + will be called pcengines-apuv2. + endif # X86_PLATFORM_DEVICES config PMC_ATOM diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index ce8da260c223..86cb76677bc8 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile @@ -96,3 +96,4 @@ obj-$(CONFIG_INTEL_TURBO_MAX_3) += intel_turbo_max_3.o obj-$(CONFIG_INTEL_CHTDC_TI_PWRBTN) += intel_chtdc_ti_pwrbtn.o obj-$(CONFIG_I2C_MULTI_INSTANTIATE) += i2c-multi-instantiate.o obj-$(CONFIG_INTEL_ATOMISP2_PM) += intel_atomisp2_pm.o +obj-$(CONFIG_PCENGINES_APU2) += pcengines-apuv2.o diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 37b5de541270..ee1fa93708ec 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -2265,12 +2265,12 @@ static int asus_wmi_probe(struct platform_device *pdev) int ret; if (!wmi_has_guid(ASUS_WMI_MGMT_GUID)) { - pr_warn("Management GUID not found\n"); + pr_warn("ASUS Management GUID not found\n"); return -ENODEV; } if (wdrv->event_guid && !wmi_has_guid(wdrv->event_guid)) { - pr_warn("Event GUID not found\n"); + pr_warn("ASUS Event GUID not found\n"); return -ENODEV; } @@ -2320,11 +2320,6 @@ EXPORT_SYMBOL_GPL(asus_wmi_unregister_driver); static int __init asus_wmi_init(void) { - if (!wmi_has_guid(ASUS_WMI_MGMT_GUID)) { - pr_info("Asus Management GUID not found\n"); - return -ENODEV; - } - pr_info("ASUS WMI generic driver loaded\n"); return 0; } diff --git a/drivers/platform/x86/dell-smbios-wmi.c b/drivers/platform/x86/dell-smbios-wmi.c index cf2229ece9ff..c3ed3c8c17b9 100644 --- a/drivers/platform/x86/dell-smbios-wmi.c +++ b/drivers/platform/x86/dell-smbios-wmi.c @@ -277,4 +277,4 @@ void exit_dell_smbios_wmi(void) wmi_driver_unregister(&dell_smbios_wmi_driver); } -MODULE_ALIAS("wmi:" DELL_WMI_SMBIOS_GUID); +MODULE_DEVICE_TABLE(wmi, dell_smbios_wmi_id_table); diff --git a/drivers/platform/x86/dell-wmi-descriptor.c b/drivers/platform/x86/dell-wmi-descriptor.c index 072821aa47fc..14ab250b7d5a 100644 --- a/drivers/platform/x86/dell-wmi-descriptor.c +++ b/drivers/platform/x86/dell-wmi-descriptor.c @@ -207,7 +207,7 @@ static struct wmi_driver dell_wmi_descriptor_driver = { module_wmi_driver(dell_wmi_descriptor_driver); -MODULE_ALIAS("wmi:" DELL_WMI_DESCRIPTOR_GUID); +MODULE_DEVICE_TABLE(wmi, dell_wmi_descriptor_id_table); MODULE_AUTHOR("Mario Limonciello "); MODULE_DESCRIPTION("Dell WMI descriptor driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c index 16c7f3d9a335..d118bb73fcae 100644 --- a/drivers/platform/x86/dell-wmi.c +++ b/drivers/platform/x86/dell-wmi.c @@ -50,8 +50,6 @@ MODULE_LICENSE("GPL"); static bool wmi_requires_smbios_request; -MODULE_ALIAS("wmi:"DELL_EVENT_GUID); - struct dell_wmi_priv { struct input_dev *input_dev; u32 interface_version; @@ -267,6 +265,9 @@ static const struct key_entry dell_wmi_keymap_type_0010[] = { /* Fn-lock switched to multimedia keys */ { KE_IGNORE, 0x1, { KEY_RESERVED } }, + /* Keyboard backlight change notification */ + { KE_IGNORE, 0x3f, { KEY_RESERVED } }, + /* Mic mute */ { KE_KEY, 0x150, { KEY_MICMUTE } }, @@ -738,3 +739,5 @@ static void __exit dell_wmi_exit(void) wmi_driver_unregister(&dell_wmi_driver); } module_exit(dell_wmi_exit); + +MODULE_DEVICE_TABLE(wmi, dell_wmi_id_table); diff --git a/drivers/platform/x86/dell_rbu.c b/drivers/platform/x86/dell_rbu.c index ccefa84f7305..031c68903583 100644 --- a/drivers/platform/x86/dell_rbu.c +++ b/drivers/platform/x86/dell_rbu.c @@ -59,7 +59,6 @@ static struct _rbu_data { unsigned long image_update_buffer_size; unsigned long bios_image_size; int image_update_ordernum; - int dma_alloc; spinlock_t lock; unsigned long packet_read_count; unsigned long num_packets; @@ -89,7 +88,6 @@ static struct packet_data packet_data_head; static struct platform_device *rbu_device; static int context; -static dma_addr_t dell_rbu_dmaaddr; static void init_packet_head(void) { @@ -380,12 +378,8 @@ static void img_update_free(void) */ memset(rbu_data.image_update_buffer, 0, rbu_data.image_update_buffer_size); - if (rbu_data.dma_alloc == 1) - dma_free_coherent(NULL, rbu_data.bios_image_size, - rbu_data.image_update_buffer, dell_rbu_dmaaddr); - else - free_pages((unsigned long) rbu_data.image_update_buffer, - rbu_data.image_update_ordernum); + free_pages((unsigned long) rbu_data.image_update_buffer, + rbu_data.image_update_ordernum); /* * Re-initialize the rbu_data variables after a free @@ -394,7 +388,6 @@ static void img_update_free(void) rbu_data.image_update_buffer = NULL; rbu_data.image_update_buffer_size = 0; rbu_data.bios_image_size = 0; - rbu_data.dma_alloc = 0; } /* @@ -410,10 +403,8 @@ static void img_update_free(void) static int img_update_realloc(unsigned long size) { unsigned char *image_update_buffer = NULL; - unsigned long rc; unsigned long img_buf_phys_addr; int ordernum; - int dma_alloc = 0; /* * check if the buffer of sufficient size has been @@ -444,36 +435,23 @@ static int img_update_realloc(unsigned long size) ordernum = get_order(size); image_update_buffer = - (unsigned char *) __get_free_pages(GFP_KERNEL, ordernum); - - img_buf_phys_addr = - (unsigned long) virt_to_phys(image_update_buffer); - - if (img_buf_phys_addr > BIOS_SCAN_LIMIT) { - free_pages((unsigned long) image_update_buffer, ordernum); - ordernum = -1; - image_update_buffer = dma_alloc_coherent(NULL, size, - &dell_rbu_dmaaddr, GFP_KERNEL); - dma_alloc = 1; - } - + (unsigned char *)__get_free_pages(GFP_DMA32, ordernum); spin_lock(&rbu_data.lock); - - if (image_update_buffer != NULL) { - rbu_data.image_update_buffer = image_update_buffer; - rbu_data.image_update_buffer_size = size; - rbu_data.bios_image_size = - rbu_data.image_update_buffer_size; - rbu_data.image_update_ordernum = ordernum; - rbu_data.dma_alloc = dma_alloc; - rc = 0; - } else { + if (!image_update_buffer) { pr_debug("Not enough memory for image update:" "size = %ld\n", size); - rc = -ENOMEM; + return -ENOMEM; } - return rc; + img_buf_phys_addr = (unsigned long)virt_to_phys(image_update_buffer); + if (WARN_ON_ONCE(img_buf_phys_addr > BIOS_SCAN_LIMIT)) + return -EINVAL; /* can't happen per definition */ + + rbu_data.image_update_buffer = image_update_buffer; + rbu_data.image_update_buffer_size = size; + rbu_data.bios_image_size = rbu_data.image_update_buffer_size; + rbu_data.image_update_ordernum = ordernum; + return 0; } static ssize_t read_packet_data(char *buffer, loff_t pos, size_t count) diff --git a/drivers/platform/x86/huawei-wmi.c b/drivers/platform/x86/huawei-wmi.c index 59872f87b741..52fcac5b393a 100644 --- a/drivers/platform/x86/huawei-wmi.c +++ b/drivers/platform/x86/huawei-wmi.c @@ -201,8 +201,7 @@ static struct wmi_driver huawei_wmi_driver = { module_wmi_driver(huawei_wmi_driver); -MODULE_ALIAS("wmi:"WMI0_EVENT_GUID); -MODULE_ALIAS("wmi:"AMW0_EVENT_GUID); +MODULE_DEVICE_TABLE(wmi, huawei_wmi_id_table); MODULE_AUTHOR("Ayman Bagabas "); MODULE_DESCRIPTION("Huawei WMI hotkeys"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/x86/i2c-multi-instantiate.c b/drivers/platform/x86/i2c-multi-instantiate.c index 3d893e0ac250..197d8a192721 100644 --- a/drivers/platform/x86/i2c-multi-instantiate.c +++ b/drivers/platform/x86/i2c-multi-instantiate.c @@ -159,6 +159,14 @@ static const struct i2c_inst_data bsg1160_data[] = { {} }; +static const struct i2c_inst_data bsg2150_data[] = { + { "bmc150_accel", IRQ_RESOURCE_GPIO, 0 }, + { "bmc150_magn" }, + /* The resources describe a 3th client, but it is not really there. */ + { "bsg2150_dummy_dev" }, + {} +}; + static const struct i2c_inst_data int3515_data[] = { { "tps6598x", IRQ_RESOURCE_APIC, 0 }, { "tps6598x", IRQ_RESOURCE_APIC, 1 }, @@ -173,6 +181,7 @@ static const struct i2c_inst_data int3515_data[] = { */ static const struct acpi_device_id i2c_multi_inst_acpi_ids[] = { { "BSG1160", (unsigned long)bsg1160_data }, + { "BSG2150", (unsigned long)bsg2150_data }, { "INT3515", (unsigned long)int3515_data }, { } }; diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index 1589dffab9fa..c53ae86b59c7 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -989,7 +989,7 @@ static const struct dmi_system_id no_hw_rfkill_list[] = { .ident = "Lenovo RESCUER R720-15IKBN", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_BOARD_NAME, "80WW"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo R720-15IKBN"), }, }, { @@ -1090,6 +1090,27 @@ static const struct dmi_system_id no_hw_rfkill_list[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 310-15ISK"), }, }, + { + .ident = "Lenovo ideapad 330-15ICH", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 330-15ICH"), + }, + }, + { + .ident = "Lenovo ideapad 530S-14ARR", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 530S-14ARR"), + }, + }, + { + .ident = "Lenovo ideapad S130-14IGM", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad S130-14IGM"), + }, + }, { .ident = "Lenovo ideapad Y700-14ISK", .matches = { @@ -1153,6 +1174,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Legion Y530-15ICH"), }, }, + { + .ident = "Lenovo Legion Y530-15ICH-1060", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Legion Y530-15ICH-1060"), + }, + }, { .ident = "Lenovo Legion Y720-15IKB", .matches = { @@ -1244,6 +1272,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 920-13IKB"), }, }, + { + .ident = "Lenovo YOGA C930-13IKB", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA C930-13IKB"), + }, + }, { .ident = "Lenovo Zhaoyang E42-80", .matches = { diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c index e28bcf61b126..bc0d55a59015 100644 --- a/drivers/platform/x86/intel-hid.c +++ b/drivers/platform/x86/intel-hid.c @@ -363,7 +363,7 @@ wakeup: * the 5-button array, but still send notifies with power button * event code to this device object on power button actions. * - * Report the power button press; catch and ignore the button release. + * Report the power button press and release. */ if (!priv->array) { if (event == 0xce) { @@ -372,8 +372,11 @@ wakeup: return; } - if (event == 0xcf) + if (event == 0xcf) { + input_report_key(priv->input_dev, KEY_POWER, 0); + input_sync(priv->input_dev); return; + } } /* 0xC0 is for HID events, other values are for 5 button array */ diff --git a/drivers/platform/x86/intel-wmi-thunderbolt.c b/drivers/platform/x86/intel-wmi-thunderbolt.c index 9ded8e2af312..4dfa61434a76 100644 --- a/drivers/platform/x86/intel-wmi-thunderbolt.c +++ b/drivers/platform/x86/intel-wmi-thunderbolt.c @@ -88,7 +88,7 @@ static struct wmi_driver intel_wmi_thunderbolt_driver = { module_wmi_driver(intel_wmi_thunderbolt_driver); -MODULE_ALIAS("wmi:" INTEL_WMI_THUNDERBOLT_GUID); +MODULE_DEVICE_TABLE(wmi, intel_wmi_thunderbolt_id_table); MODULE_AUTHOR("Mario Limonciello "); MODULE_DESCRIPTION("Intel WMI Thunderbolt force power driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/x86/intel_cht_int33fe.c b/drivers/platform/x86/intel_cht_int33fe.c index 02bc74608cf3..6fa3cced6f8e 100644 --- a/drivers/platform/x86/intel_cht_int33fe.c +++ b/drivers/platform/x86/intel_cht_int33fe.c @@ -32,7 +32,7 @@ struct cht_int33fe_data { struct i2c_client *fusb302; struct i2c_client *pi3usb30532; /* Contain a list-head must be per device */ - struct device_connection connections[5]; + struct device_connection connections[4]; }; /* @@ -174,16 +174,13 @@ static int cht_int33fe_probe(struct platform_device *pdev) data->connections[0].endpoint[0] = "port0"; data->connections[0].endpoint[1] = "i2c-pi3usb30532"; - data->connections[0].id = "typec-switch"; + data->connections[0].id = "orientation-switch"; data->connections[1].endpoint[0] = "port0"; data->connections[1].endpoint[1] = "i2c-pi3usb30532"; - data->connections[1].id = "typec-mux"; - data->connections[2].endpoint[0] = "port0"; - data->connections[2].endpoint[1] = "i2c-pi3usb30532"; - data->connections[2].id = "idff01m01"; - data->connections[3].endpoint[0] = "i2c-fusb302"; - data->connections[3].endpoint[1] = "intel_xhci_usb_sw-role-switch"; - data->connections[3].id = "usb-role-switch"; + data->connections[1].id = "mode-switch"; + data->connections[2].endpoint[0] = "i2c-fusb302"; + data->connections[2].endpoint[1] = "intel_xhci_usb_sw-role-switch"; + data->connections[2].id = "usb-role-switch"; device_connections_add(data->connections); diff --git a/drivers/platform/x86/intel_int0002_vgpio.c b/drivers/platform/x86/intel_int0002_vgpio.c index 4b8f7305fc8a..1694a9aec77c 100644 --- a/drivers/platform/x86/intel_int0002_vgpio.c +++ b/drivers/platform/x86/intel_int0002_vgpio.c @@ -51,11 +51,14 @@ #define GPE0A_STS_PORT 0x420 #define GPE0A_EN_PORT 0x428 -#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, } +#define BAYTRAIL 0x01 +#define CHERRYTRAIL 0x02 + +#define ICPU(model, data) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, data } static const struct x86_cpu_id int0002_cpu_ids[] = { - ICPU(INTEL_FAM6_ATOM_SILVERMONT), /* Valleyview, Bay Trail */ - ICPU(INTEL_FAM6_ATOM_AIRMONT), /* Braswell, Cherry Trail */ + ICPU(INTEL_FAM6_ATOM_SILVERMONT, BAYTRAIL), /* Valleyview, Bay Trail */ + ICPU(INTEL_FAM6_ATOM_AIRMONT, CHERRYTRAIL), /* Braswell, Cherry Trail */ {} }; @@ -135,7 +138,7 @@ static irqreturn_t int0002_irq(int irq, void *data) return IRQ_HANDLED; } -static struct irq_chip int0002_irqchip = { +static struct irq_chip int0002_byt_irqchip = { .name = DRV_NAME, .irq_ack = int0002_irq_ack, .irq_mask = int0002_irq_mask, @@ -143,10 +146,22 @@ static struct irq_chip int0002_irqchip = { .irq_set_wake = int0002_irq_set_wake, }; +static struct irq_chip int0002_cht_irqchip = { + .name = DRV_NAME, + .irq_ack = int0002_irq_ack, + .irq_mask = int0002_irq_mask, + .irq_unmask = int0002_irq_unmask, + /* + * No set_wake, on CHT the IRQ is typically shared with the ACPI SCI + * and we don't want to mess with the ACPI SCI irq settings. + */ +}; + static int int0002_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; const struct x86_cpu_id *cpu_id; + struct irq_chip *irq_chip; struct gpio_chip *chip; int irq, ret; @@ -195,14 +210,19 @@ static int int0002_probe(struct platform_device *pdev) return ret; } - ret = gpiochip_irqchip_add(chip, &int0002_irqchip, 0, handle_edge_irq, + if (cpu_id->driver_data == BAYTRAIL) + irq_chip = &int0002_byt_irqchip; + else + irq_chip = &int0002_cht_irqchip; + + ret = gpiochip_irqchip_add(chip, irq_chip, 0, handle_edge_irq, IRQ_TYPE_NONE); if (ret) { dev_err(dev, "Error adding irqchip: %d\n", ret); return ret; } - gpiochip_set_chained_irqchip(chip, &int0002_irqchip, irq, NULL); + gpiochip_set_chained_irqchip(chip, irq_chip, irq, NULL); return 0; } diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c index 22dbf115782e..f2c621b55f49 100644 --- a/drivers/platform/x86/intel_pmc_core.c +++ b/drivers/platform/x86/intel_pmc_core.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -22,14 +23,24 @@ #include #include +#include #include "intel_pmc_core.h" -#define ICPU(model, data) \ - { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (kernel_ulong_t)data } - static struct pmc_dev pmc; +/* PKGC MSRs are common across Intel Core SoCs */ +static const struct pmc_bit_map msr_map[] = { + {"Package C2", MSR_PKG_C2_RESIDENCY}, + {"Package C3", MSR_PKG_C3_RESIDENCY}, + {"Package C6", MSR_PKG_C6_RESIDENCY}, + {"Package C7", MSR_PKG_C7_RESIDENCY}, + {"Package C8", MSR_PKG_C8_RESIDENCY}, + {"Package C9", MSR_PKG_C9_RESIDENCY}, + {"Package C10", MSR_PKG_C10_RESIDENCY}, + {} +}; + static const struct pmc_bit_map spt_pll_map[] = { {"MIPI PLL", SPT_PMC_BIT_MPHY_CMN_LANE0}, {"GEN2 USB2PCIE2 PLL", SPT_PMC_BIT_MPHY_CMN_LANE1}, @@ -108,6 +119,7 @@ static const struct pmc_bit_map spt_ltr_show_map[] = { {"SATA", SPT_PMC_LTR_SATA}, {"GIGABIT_ETHERNET", SPT_PMC_LTR_GBE}, {"XHCI", SPT_PMC_LTR_XHCI}, + {"Reserved", SPT_PMC_LTR_RESERVED}, {"ME", SPT_PMC_LTR_ME}, /* EVA is Enterprise Value Add, doesn't really exist on PCH */ {"EVA", SPT_PMC_LTR_EVA}, @@ -131,6 +143,7 @@ static const struct pmc_reg_map spt_reg_map = { .mphy_sts = spt_mphy_map, .pll_sts = spt_pll_map, .ltr_show_sts = spt_ltr_show_map, + .msr_sts = msr_map, .slp_s0_offset = SPT_PMC_SLP_S0_RES_COUNTER_OFFSET, .ltr_ignore_offset = SPT_PMC_LTR_IGNORE_OFFSET, .regmap_length = SPT_PMC_MMIO_REG_LEN, @@ -139,6 +152,7 @@ static const struct pmc_reg_map spt_reg_map = { .pm_cfg_offset = SPT_PMC_PM_CFG_OFFSET, .pm_read_disable_bit = SPT_PMC_READ_DISABLE_BIT, .ltr_ignore_max = SPT_NUM_IP_IGN_ALLOWED, + .pm_vric1_offset = SPT_PMC_VRIC1_OFFSET, }; /* Cannonlake: PGD PFET Enable Ack Status Register(s) bitmap */ @@ -168,25 +182,26 @@ static const struct pmc_bit_map cnp_pfear_map[] = { {"SDX", BIT(4)}, {"SPE", BIT(5)}, {"Fuse", BIT(6)}, - {"Res_23", BIT(7)}, + /* Reserved for Cannonlake but valid for Icelake */ + {"SBR8", BIT(7)}, {"CSME_FSC", BIT(0)}, {"USB3_OTG", BIT(1)}, {"EXI", BIT(2)}, {"CSE", BIT(3)}, - {"csme_kvm", BIT(4)}, - {"csme_pmt", BIT(5)}, - {"csme_clink", BIT(6)}, - {"csme_ptio", BIT(7)}, - - {"csme_usbr", BIT(0)}, - {"csme_susram", BIT(1)}, - {"csme_smt1", BIT(2)}, + {"CSME_KVM", BIT(4)}, + {"CSME_PMT", BIT(5)}, + {"CSME_CLINK", BIT(6)}, + {"CSME_PTIO", BIT(7)}, + + {"CSME_USBR", BIT(0)}, + {"CSME_SUSRAM", BIT(1)}, + {"CSME_SMT1", BIT(2)}, {"CSME_SMT4", BIT(3)}, - {"csme_sms2", BIT(4)}, - {"csme_sms1", BIT(5)}, - {"csme_rtc", BIT(6)}, - {"csme_psf", BIT(7)}, + {"CSME_SMS2", BIT(4)}, + {"CSME_SMS1", BIT(5)}, + {"CSME_RTC", BIT(6)}, + {"CSME_PSF", BIT(7)}, {"SBR0", BIT(0)}, {"SBR1", BIT(1)}, @@ -203,7 +218,7 @@ static const struct pmc_bit_map cnp_pfear_map[] = { {"CNVI", BIT(3)}, {"UFS0", BIT(4)}, {"EMMC", BIT(5)}, - {"Res_6", BIT(6)}, + {"SPF", BIT(6)}, {"SBR6", BIT(7)}, {"SBR7", BIT(0)}, @@ -211,6 +226,20 @@ static const struct pmc_bit_map cnp_pfear_map[] = { {"HDA_PGD4", BIT(2)}, {"HDA_PGD5", BIT(3)}, {"HDA_PGD6", BIT(4)}, + /* Reserved for Cannonlake but valid for Icelake */ + {"PSF6", BIT(5)}, + {"PSF7", BIT(6)}, + {"PSF8", BIT(7)}, + + /* Icelake generation onwards only */ + {"RES_65", BIT(0)}, + {"RES_66", BIT(1)}, + {"RES_67", BIT(2)}, + {"TAM", BIT(3)}, + {"GBETSN", BIT(4)}, + {"TBTLSX", BIT(5)}, + {"RES_71", BIT(6)}, + {"RES_72", BIT(7)}, {} }; @@ -276,6 +305,7 @@ static const struct pmc_bit_map cnp_ltr_show_map[] = { {"SATA", CNP_PMC_LTR_SATA}, {"GIGABIT_ETHERNET", CNP_PMC_LTR_GBE}, {"XHCI", CNP_PMC_LTR_XHCI}, + {"Reserved", CNP_PMC_LTR_RESERVED}, {"ME", CNP_PMC_LTR_ME}, /* EVA is Enterprise Value Add, doesn't really exist on PCH */ {"EVA", CNP_PMC_LTR_EVA}, @@ -291,6 +321,8 @@ static const struct pmc_bit_map cnp_ltr_show_map[] = { {"ISH", CNP_PMC_LTR_ISH}, {"UFSX2", CNP_PMC_LTR_UFSX2}, {"EMMC", CNP_PMC_LTR_EMMC}, + /* Reserved for Cannonlake but valid for Icelake */ + {"WIGIG", ICL_PMC_LTR_WIGIG}, /* Below two cannot be used for LTR_IGNORE */ {"CURRENT_PLATFORM", CNP_PMC_LTR_CUR_PLT}, {"AGGREGATED_SYSTEM", CNP_PMC_LTR_CUR_ASLT}, @@ -302,6 +334,7 @@ static const struct pmc_reg_map cnp_reg_map = { .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET, .slps0_dbg_maps = cnp_slps0_dbg_maps, .ltr_show_sts = cnp_ltr_show_map, + .msr_sts = msr_map, .slps0_dbg_offset = CNP_PMC_SLPS0_DBG_OFFSET, .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET, .regmap_length = CNP_PMC_MMIO_REG_LEN, @@ -312,6 +345,22 @@ static const struct pmc_reg_map cnp_reg_map = { .ltr_ignore_max = CNP_NUM_IP_IGN_ALLOWED, }; +static const struct pmc_reg_map icl_reg_map = { + .pfear_sts = cnp_pfear_map, + .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET, + .slps0_dbg_maps = cnp_slps0_dbg_maps, + .ltr_show_sts = cnp_ltr_show_map, + .msr_sts = msr_map, + .slps0_dbg_offset = CNP_PMC_SLPS0_DBG_OFFSET, + .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET, + .regmap_length = CNP_PMC_MMIO_REG_LEN, + .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A, + .ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES, + .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET, + .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT, + .ltr_ignore_max = ICL_NUM_IP_IGN_ALLOWED, +}; + static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset) { return readb(pmcdev->regbase + offset); @@ -328,9 +377,9 @@ static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int writel(val, pmcdev->regbase + reg_offset); } -static inline u32 pmc_core_adjust_slp_s0_step(u32 value) +static inline u64 pmc_core_adjust_slp_s0_step(u32 value) { - return value * SPT_PMC_SLP_S0_RES_COUNTER_STEP; + return (u64)value * SPT_PMC_SLP_S0_RES_COUNTER_STEP; } static int pmc_core_dev_state_get(void *data, u64 *val) @@ -380,7 +429,8 @@ static int pmc_core_ppfear_show(struct seq_file *s, void *unused) index < PPFEAR_MAX_NUM_ENTRIES; index++, iter++) pf_regs[index] = pmc_core_reg_read_byte(pmcdev, iter); - for (index = 0; map[index].name; index++) + for (index = 0; map[index].name && + index < pmcdev->map->ppfear_buckets * 8; index++) pmc_core_display_map(s, index, pf_regs[index / 8], map); return 0; @@ -677,6 +727,25 @@ static int pmc_core_ltr_show(struct seq_file *s, void *unused) } DEFINE_SHOW_ATTRIBUTE(pmc_core_ltr); +static int pmc_core_pkgc_show(struct seq_file *s, void *unused) +{ + struct pmc_dev *pmcdev = s->private; + const struct pmc_bit_map *map = pmcdev->map->msr_sts; + u64 pcstate_count; + int index; + + for (index = 0; map[index].name ; index++) { + if (rdmsrl_safe(map[index].bit_mask, &pcstate_count)) + continue; + + seq_printf(s, "%-8s : 0x%llx\n", map[index].name, + pcstate_count); + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(pmc_core_pkgc); + static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev) { debugfs_remove_recursive(pmcdev->dbgfs_dir); @@ -701,7 +770,10 @@ static int pmc_core_dbgfs_register(struct pmc_dev *pmcdev) debugfs_create_file("ltr_ignore", 0644, dir, pmcdev, &pmc_core_ltr_ignore_ops); - debugfs_create_file("ltr_show", 0644, dir, pmcdev, &pmc_core_ltr_fops); + debugfs_create_file("ltr_show", 0444, dir, pmcdev, &pmc_core_ltr_fops); + + debugfs_create_file("package_cstate_show", 0444, dir, pmcdev, + &pmc_core_pkgc_fops); if (pmcdev->map->pll_sts) debugfs_create_file("pll_status", 0444, dir, pmcdev, @@ -735,11 +807,12 @@ static inline void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev) #endif /* CONFIG_DEBUG_FS */ static const struct x86_cpu_id intel_pmc_core_ids[] = { - ICPU(INTEL_FAM6_SKYLAKE_MOBILE, &spt_reg_map), - ICPU(INTEL_FAM6_SKYLAKE_DESKTOP, &spt_reg_map), - ICPU(INTEL_FAM6_KABYLAKE_MOBILE, &spt_reg_map), - ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, &spt_reg_map), - ICPU(INTEL_FAM6_CANNONLAKE_MOBILE, &cnp_reg_map), + INTEL_CPU_FAM6(SKYLAKE_MOBILE, spt_reg_map), + INTEL_CPU_FAM6(SKYLAKE_DESKTOP, spt_reg_map), + INTEL_CPU_FAM6(KABYLAKE_MOBILE, spt_reg_map), + INTEL_CPU_FAM6(KABYLAKE_DESKTOP, spt_reg_map), + INTEL_CPU_FAM6(CANNONLAKE_MOBILE, cnp_reg_map), + INTEL_CPU_FAM6(ICELAKE_MOBILE, icl_reg_map), {} }; @@ -750,6 +823,37 @@ static const struct pci_device_id pmc_pci_ids[] = { { 0, }, }; +/* + * This quirk can be used on those platforms where + * the platform BIOS enforces 24Mhx Crystal to shutdown + * before PMC can assert SLP_S0#. + */ +int quirk_xtal_ignore(const struct dmi_system_id *id) +{ + struct pmc_dev *pmcdev = &pmc; + u32 value; + + value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_vric1_offset); + /* 24MHz Crystal Shutdown Qualification Disable */ + value |= SPT_PMC_VRIC1_XTALSDQDIS; + /* Low Voltage Mode Enable */ + value &= ~SPT_PMC_VRIC1_SLPS0LVEN; + pmc_core_reg_write(pmcdev, pmcdev->map->pm_vric1_offset, value); + return 0; +} + +static const struct dmi_system_id pmc_core_dmi_table[] = { + { + .callback = quirk_xtal_ignore, + .ident = "HP Elite x2 1013 G3", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite x2 1013 G3"), + }, + }, + {} +}; + static int __init pmc_core_probe(void) { struct pmc_dev *pmcdev = &pmc; @@ -768,7 +872,7 @@ static int __init pmc_core_probe(void) * Sunrisepoint PCH regmap can't be used. Use Cannonlake PCH regmap * in this case. */ - if (!pci_dev_present(pmc_pci_ids)) + if (pmcdev->map == &spt_reg_map && !pci_dev_present(pmc_pci_ids)) pmcdev->map = &cnp_reg_map; if (lpit_read_residency_count_address(&slp_s0_addr)) @@ -791,6 +895,7 @@ static int __init pmc_core_probe(void) return err; } + dmi_check_system(pmc_core_dmi_table); pr_info(" initialized\n"); return 0; } diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h index 89554cba5758..88d9c0653a5f 100644 --- a/drivers/platform/x86/intel_pmc_core.h +++ b/drivers/platform/x86/intel_pmc_core.h @@ -25,6 +25,7 @@ #define SPT_PMC_MTPMC_OFFSET 0x20 #define SPT_PMC_MFPMC_OFFSET 0x38 #define SPT_PMC_LTR_IGNORE_OFFSET 0x30C +#define SPT_PMC_VRIC1_OFFSET 0x31c #define SPT_PMC_MPHY_CORE_STS_0 0x1143 #define SPT_PMC_MPHY_CORE_STS_1 0x1142 #define SPT_PMC_MPHY_COM_STS_0 0x1155 @@ -32,7 +33,7 @@ #define SPT_PMC_SLP_S0_RES_COUNTER_STEP 0x64 #define PMC_BASE_ADDR_MASK ~(SPT_PMC_MMIO_REG_LEN - 1) #define MTPMC_MASK 0xffff0000 -#define PPFEAR_MAX_NUM_ENTRIES 5 +#define PPFEAR_MAX_NUM_ENTRIES 12 #define SPT_PPFEAR_NUM_ENTRIES 5 #define SPT_PMC_READ_DISABLE_BIT 0x16 #define SPT_PMC_MSG_FULL_STS_BIT 0x18 @@ -46,6 +47,7 @@ #define SPT_PMC_LTR_SATA 0x368 #define SPT_PMC_LTR_GBE 0x36C #define SPT_PMC_LTR_XHCI 0x370 +#define SPT_PMC_LTR_RESERVED 0x374 #define SPT_PMC_LTR_ME 0x378 #define SPT_PMC_LTR_EVA 0x37C #define SPT_PMC_LTR_SPC 0x380 @@ -135,6 +137,9 @@ enum ppfear_regs { #define SPT_PMC_BIT_MPHY_CMN_LANE2 BIT(2) #define SPT_PMC_BIT_MPHY_CMN_LANE3 BIT(3) +#define SPT_PMC_VRIC1_SLPS0LVEN BIT(13) +#define SPT_PMC_VRIC1_XTALSDQDIS BIT(22) + /* Cannonlake Power Management Controller register offsets */ #define CNP_PMC_SLPS0_DBG_OFFSET 0x10B4 #define CNP_PMC_PM_CFG_OFFSET 0x1818 @@ -156,6 +161,7 @@ enum ppfear_regs { #define CNP_PMC_LTR_SATA 0x1B68 #define CNP_PMC_LTR_GBE 0x1B6C #define CNP_PMC_LTR_XHCI 0x1B70 +#define CNP_PMC_LTR_RESERVED 0x1B74 #define CNP_PMC_LTR_ME 0x1B78 #define CNP_PMC_LTR_EVA 0x1B7C #define CNP_PMC_LTR_SPC 0x1B80 @@ -176,6 +182,10 @@ enum ppfear_regs { #define LTR_REQ_SNOOP BIT(15) #define LTR_REQ_NONSNOOP BIT(31) +#define ICL_PPFEAR_NUM_ENTRIES 9 +#define ICL_NUM_IP_IGN_ALLOWED 20 +#define ICL_PMC_LTR_WIGIG 0x1BFC + struct pmc_bit_map { const char *name; u32 bit_mask; @@ -208,6 +218,7 @@ struct pmc_reg_map { const struct pmc_bit_map *pll_sts; const struct pmc_bit_map **slps0_dbg_maps; const struct pmc_bit_map *ltr_show_sts; + const struct pmc_bit_map *msr_sts; const u32 slp_s0_offset; const u32 ltr_ignore_offset; const int regmap_length; @@ -217,6 +228,7 @@ struct pmc_reg_map { const int pm_read_disable_bit; const u32 slps0_dbg_offset; const u32 ltr_ignore_max; + const u32 pm_vric1_offset; }; /** diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c index df3fcd36776a..48fa7573e29b 100644 --- a/drivers/platform/x86/mlx-platform.c +++ b/drivers/platform/x86/mlx-platform.c @@ -25,6 +25,7 @@ #define MLXPLAT_CPLD_LPC_REG_CPLD1_VER_OFFSET 0x00 #define MLXPLAT_CPLD_LPC_REG_CPLD2_VER_OFFSET 0x01 #define MLXPLAT_CPLD_LPC_REG_CPLD3_VER_OFFSET 0x02 +#define MLXPLAT_CPLD_LPC_REG_CPLD4_VER_OFFSET 0x03 #define MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET 0x1d #define MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET 0x1e #define MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET 0x1f @@ -33,6 +34,7 @@ #define MLXPLAT_CPLD_LPC_REG_LED3_OFFSET 0x22 #define MLXPLAT_CPLD_LPC_REG_LED4_OFFSET 0x23 #define MLXPLAT_CPLD_LPC_REG_LED5_OFFSET 0x24 +#define MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION 0x2a #define MLXPLAT_CPLD_LPC_REG_GP1_OFFSET 0x30 #define MLXPLAT_CPLD_LPC_REG_WP1_OFFSET 0x31 #define MLXPLAT_CPLD_LPC_REG_GP2_OFFSET 0x32 @@ -67,6 +69,9 @@ #define MLXPLAT_CPLD_LPC_REG_TACHO10_OFFSET 0xee #define MLXPLAT_CPLD_LPC_REG_TACHO11_OFFSET 0xef #define MLXPLAT_CPLD_LPC_REG_TACHO12_OFFSET 0xf0 +#define MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET 0xf5 +#define MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET 0xf6 +#define MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET 0xf7 #define MLXPLAT_CPLD_LPC_IO_RANGE 0x100 #define MLXPLAT_CPLD_LPC_I2C_CH1_OFF 0xdb #define MLXPLAT_CPLD_LPC_I2C_CH2_OFF 0xda @@ -584,36 +589,48 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_fan_items_data[] = { .label = "fan1", .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET, .mask = BIT(0), + .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET, + .bit = BIT(0), .hpdev.nr = MLXPLAT_CPLD_NR_NONE, }, { .label = "fan2", .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET, .mask = BIT(1), + .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET, + .bit = BIT(1), .hpdev.nr = MLXPLAT_CPLD_NR_NONE, }, { .label = "fan3", .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET, .mask = BIT(2), + .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET, + .bit = BIT(2), .hpdev.nr = MLXPLAT_CPLD_NR_NONE, }, { .label = "fan4", .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET, .mask = BIT(3), + .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET, + .bit = BIT(3), .hpdev.nr = MLXPLAT_CPLD_NR_NONE, }, { .label = "fan5", .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET, .mask = BIT(4), + .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET, + .bit = BIT(4), .hpdev.nr = MLXPLAT_CPLD_NR_NONE, }, { .label = "fan6", .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET, .mask = BIT(5), + .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET, + .bit = BIT(5), .hpdev.nr = MLXPLAT_CPLD_NR_NONE, }, }; @@ -816,61 +833,90 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_led_data[] = { .label = "fan1:green", .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET, .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK, + .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET, + .bit = BIT(0), }, { .label = "fan1:orange", .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET, .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK, + .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET, + .bit = BIT(0), }, { .label = "fan2:green", .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET, .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK, + .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET, + .bit = BIT(1), }, { .label = "fan2:orange", .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET, .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK, + .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET, + .bit = BIT(1), }, { .label = "fan3:green", .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET, .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK, + .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET, + .bit = BIT(2), }, { .label = "fan3:orange", .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET, .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK, + .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET, + .bit = BIT(2), }, { .label = "fan4:green", .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET, .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK, + .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET, + .bit = BIT(3), }, { .label = "fan4:orange", .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET, .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK, + .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET, + .bit = BIT(3), }, { .label = "fan5:green", .reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET, .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK, + .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET, + .bit = BIT(4), }, { .label = "fan5:orange", .reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET, .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK, + .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET, + .bit = BIT(4), }, { .label = "fan6:green", .reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET, .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK, + .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET, + .bit = BIT(5), }, { .label = "fan6:orange", .reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET, .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK, + .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET, + .bit = BIT(5), + }, + { + .label = "uid:blue", + .reg = MLXPLAT_CPLD_LPC_REG_LED5_OFFSET, + .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK, }, }; @@ -1099,6 +1145,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = { .bit = GENMASK(7, 0), .mode = 0444, }, + { + .label = "cpld4_version", + .reg = MLXPLAT_CPLD_LPC_REG_CPLD4_VER_OFFSET, + .bit = GENMASK(7, 0), + .mode = 0444, + }, { .label = "reset_long_pb", .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET, @@ -1184,6 +1236,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = { .bit = 1, .mode = 0444, }, + { + .label = "fan_dir", + .reg = MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION, + .bit = GENMASK(7, 0), + .mode = 0444, + }, }; static struct mlxreg_core_platform_data mlxplat_default_ng_regs_io_data = { @@ -1201,61 +1259,85 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_fan_data[] = { .label = "tacho1", .reg = MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET, .mask = GENMASK(7, 0), + .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET, + .bit = BIT(0), }, { .label = "tacho2", .reg = MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET, .mask = GENMASK(7, 0), + .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET, + .bit = BIT(1), }, { .label = "tacho3", .reg = MLXPLAT_CPLD_LPC_REG_TACHO3_OFFSET, .mask = GENMASK(7, 0), + .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET, + .bit = BIT(2), }, { .label = "tacho4", .reg = MLXPLAT_CPLD_LPC_REG_TACHO4_OFFSET, .mask = GENMASK(7, 0), + .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET, + .bit = BIT(3), }, { .label = "tacho5", .reg = MLXPLAT_CPLD_LPC_REG_TACHO5_OFFSET, .mask = GENMASK(7, 0), + .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET, + .bit = BIT(4), }, { .label = "tacho6", .reg = MLXPLAT_CPLD_LPC_REG_TACHO6_OFFSET, .mask = GENMASK(7, 0), + .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET, + .bit = BIT(5), }, { .label = "tacho7", .reg = MLXPLAT_CPLD_LPC_REG_TACHO7_OFFSET, .mask = GENMASK(7, 0), + .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET, + .bit = BIT(6), }, { .label = "tacho8", .reg = MLXPLAT_CPLD_LPC_REG_TACHO8_OFFSET, .mask = GENMASK(7, 0), + .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET, + .bit = BIT(7), }, { .label = "tacho9", .reg = MLXPLAT_CPLD_LPC_REG_TACHO9_OFFSET, .mask = GENMASK(7, 0), + .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET, + .bit = BIT(0), }, { .label = "tacho10", .reg = MLXPLAT_CPLD_LPC_REG_TACHO10_OFFSET, .mask = GENMASK(7, 0), + .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET, + .bit = BIT(1), }, { .label = "tacho11", .reg = MLXPLAT_CPLD_LPC_REG_TACHO11_OFFSET, .mask = GENMASK(7, 0), + .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET, + .bit = BIT(2), }, { .label = "tacho12", .reg = MLXPLAT_CPLD_LPC_REG_TACHO12_OFFSET, .mask = GENMASK(7, 0), + .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET, + .bit = BIT(3), }, }; @@ -1299,6 +1381,7 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_CPLD1_VER_OFFSET: case MLXPLAT_CPLD_LPC_REG_CPLD2_VER_OFFSET: case MLXPLAT_CPLD_LPC_REG_CPLD3_VER_OFFSET: + case MLXPLAT_CPLD_LPC_REG_CPLD4_VER_OFFSET: case MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET: case MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET: case MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET: @@ -1307,6 +1390,7 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_LED3_OFFSET: case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET: case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET: + case MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION: case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET: case MLXPLAT_CPLD_LPC_REG_WP1_OFFSET: case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET: @@ -1341,6 +1425,9 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_TACHO11_OFFSET: case MLXPLAT_CPLD_LPC_REG_TACHO12_OFFSET: case MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET: + case MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET: + case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET: + case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET: return true; } return false; @@ -1352,6 +1439,7 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_CPLD1_VER_OFFSET: case MLXPLAT_CPLD_LPC_REG_CPLD2_VER_OFFSET: case MLXPLAT_CPLD_LPC_REG_CPLD3_VER_OFFSET: + case MLXPLAT_CPLD_LPC_REG_CPLD4_VER_OFFSET: case MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET: case MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET: case MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET: @@ -1360,6 +1448,7 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_LED3_OFFSET: case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET: case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET: + case MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION: case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET: case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET: case MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET: @@ -1392,6 +1481,9 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_TACHO11_OFFSET: case MLXPLAT_CPLD_LPC_REG_TACHO12_OFFSET: case MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET: + case MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET: + case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET: + case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET: return true; } return false; @@ -1613,6 +1705,13 @@ static const struct dmi_system_id mlxplat_dmi_table[] __initconst = { DMI_MATCH(DMI_PRODUCT_NAME, "MSN34"), }, }, + { + .callback = mlxplat_dmi_qmb7xx_matched, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"), + DMI_MATCH(DMI_PRODUCT_NAME, "MSN38"), + }, + }, { .callback = mlxplat_dmi_default_matched, .matches = { @@ -1643,6 +1742,12 @@ static const struct dmi_system_id mlxplat_dmi_table[] __initconst = { DMI_MATCH(DMI_BOARD_NAME, "VMOD0005"), }, }, + { + .callback = mlxplat_dmi_qmb7xx_matched, + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "VMOD0007"), + }, + }, { } }; diff --git a/drivers/platform/x86/pcengines-apuv2.c b/drivers/platform/x86/pcengines-apuv2.c new file mode 100644 index 000000000000..c1ca931e1fab --- /dev/null +++ b/drivers/platform/x86/pcengines-apuv2.c @@ -0,0 +1,260 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/* + * PC-Engines APUv2/APUv3 board platform driver + * for gpio buttons and LEDs + * + * Copyright (C) 2018 metux IT consult + * Author: Enrico Weigelt + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * NOTE: this driver only supports APUv2/3 - not APUv1, as this one + * has completely different register layouts + */ + +/* register mappings */ +#define APU2_GPIO_REG_LED1 AMD_FCH_GPIO_REG_GPIO57 +#define APU2_GPIO_REG_LED2 AMD_FCH_GPIO_REG_GPIO58 +#define APU2_GPIO_REG_LED3 AMD_FCH_GPIO_REG_GPIO59_DEVSLP1 +#define APU2_GPIO_REG_MODESW AMD_FCH_GPIO_REG_GPIO32_GE1 +#define APU2_GPIO_REG_SIMSWAP AMD_FCH_GPIO_REG_GPIO33_GE2 + +/* order in which the gpio lines are defined in the register list */ +#define APU2_GPIO_LINE_LED1 0 +#define APU2_GPIO_LINE_LED2 1 +#define APU2_GPIO_LINE_LED3 2 +#define APU2_GPIO_LINE_MODESW 3 +#define APU2_GPIO_LINE_SIMSWAP 4 + +/* gpio device */ + +static int apu2_gpio_regs[] = { + [APU2_GPIO_LINE_LED1] = APU2_GPIO_REG_LED1, + [APU2_GPIO_LINE_LED2] = APU2_GPIO_REG_LED2, + [APU2_GPIO_LINE_LED3] = APU2_GPIO_REG_LED3, + [APU2_GPIO_LINE_MODESW] = APU2_GPIO_REG_MODESW, + [APU2_GPIO_LINE_SIMSWAP] = APU2_GPIO_REG_SIMSWAP, +}; + +static const char * const apu2_gpio_names[] = { + [APU2_GPIO_LINE_LED1] = "front-led1", + [APU2_GPIO_LINE_LED2] = "front-led2", + [APU2_GPIO_LINE_LED3] = "front-led3", + [APU2_GPIO_LINE_MODESW] = "front-button", + [APU2_GPIO_LINE_SIMSWAP] = "simswap", +}; + +static const struct amd_fch_gpio_pdata board_apu2 = { + .gpio_num = ARRAY_SIZE(apu2_gpio_regs), + .gpio_reg = apu2_gpio_regs, + .gpio_names = apu2_gpio_names, +}; + +/* gpio leds device */ + +static const struct gpio_led apu2_leds[] = { + { .name = "apu:green:1" }, + { .name = "apu:green:2" }, + { .name = "apu:green:3" } +}; + +static const struct gpio_led_platform_data apu2_leds_pdata = { + .num_leds = ARRAY_SIZE(apu2_leds), + .leds = apu2_leds, +}; + +struct gpiod_lookup_table gpios_led_table = { + .dev_id = "leds-gpio", + .table = { + GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_LED1, + NULL, 0, GPIO_ACTIVE_LOW), + GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_LED2, + NULL, 1, GPIO_ACTIVE_LOW), + GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_LED3, + NULL, 2, GPIO_ACTIVE_LOW), + } +}; + +/* gpio keyboard device */ + +static struct gpio_keys_button apu2_keys_buttons[] = { + { + .code = KEY_SETUP, + .active_low = 1, + .desc = "front button", + .type = EV_KEY, + .debounce_interval = 10, + .value = 1, + }, +}; + +static const struct gpio_keys_platform_data apu2_keys_pdata = { + .buttons = apu2_keys_buttons, + .nbuttons = ARRAY_SIZE(apu2_keys_buttons), + .poll_interval = 100, + .rep = 0, + .name = "apu2-keys", +}; + +struct gpiod_lookup_table gpios_key_table = { + .dev_id = "gpio-keys-polled", + .table = { + GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_MODESW, + NULL, 0, GPIO_ACTIVE_LOW), + } +}; + +/* board setup */ + +/* note: matching works on string prefix, so "apu2" must come before "apu" */ +static const struct dmi_system_id apu_gpio_dmi_table[] __initconst = { + + /* APU2 w/ legacy bios < 4.0.8 */ + { + .ident = "apu2", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"), + DMI_MATCH(DMI_BOARD_NAME, "APU2") + }, + .driver_data = (void *)&board_apu2, + }, + /* APU2 w/ legacy bios >= 4.0.8 */ + { + .ident = "apu2", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"), + DMI_MATCH(DMI_BOARD_NAME, "apu2") + }, + .driver_data = (void *)&board_apu2, + }, + /* APU2 w/ maainline bios */ + { + .ident = "apu2", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"), + DMI_MATCH(DMI_BOARD_NAME, "PC Engines apu2") + }, + .driver_data = (void *)&board_apu2, + }, + + /* APU3 w/ legacy bios < 4.0.8 */ + { + .ident = "apu3", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"), + DMI_MATCH(DMI_BOARD_NAME, "APU3") + }, + .driver_data = (void *)&board_apu2, + }, + /* APU3 w/ legacy bios >= 4.0.8 */ + { + .ident = "apu3", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"), + DMI_MATCH(DMI_BOARD_NAME, "apu3") + }, + .driver_data = (void *)&board_apu2, + }, + /* APU3 w/ mainline bios */ + { + .ident = "apu3", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "PC Engines"), + DMI_MATCH(DMI_BOARD_NAME, "PC Engines apu3") + }, + .driver_data = (void *)&board_apu2, + }, + {} +}; + +static struct platform_device *apu_gpio_pdev; +static struct platform_device *apu_leds_pdev; +static struct platform_device *apu_keys_pdev; + +static struct platform_device * __init apu_create_pdev( + const char *name, + const void *pdata, + size_t sz) +{ + struct platform_device *pdev; + + pdev = platform_device_register_resndata(NULL, + name, + PLATFORM_DEVID_NONE, + NULL, + 0, + pdata, + sz); + + if (IS_ERR(pdev)) + pr_err("failed registering %s: %ld\n", name, PTR_ERR(pdev)); + + return pdev; +} + +static int __init apu_board_init(void) +{ + const struct dmi_system_id *id; + + id = dmi_first_match(apu_gpio_dmi_table); + if (!id) { + pr_err("failed to detect apu board via dmi\n"); + return -ENODEV; + } + + gpiod_add_lookup_table(&gpios_led_table); + gpiod_add_lookup_table(&gpios_key_table); + + apu_gpio_pdev = apu_create_pdev( + AMD_FCH_GPIO_DRIVER_NAME, + id->driver_data, + sizeof(struct amd_fch_gpio_pdata)); + + apu_leds_pdev = apu_create_pdev( + "leds-gpio", + &apu2_leds_pdata, + sizeof(apu2_leds_pdata)); + + apu_keys_pdev = apu_create_pdev( + "gpio-keys-polled", + &apu2_keys_pdata, + sizeof(apu2_keys_pdata)); + + return 0; +} + +static void __exit apu_board_exit(void) +{ + gpiod_remove_lookup_table(&gpios_led_table); + gpiod_remove_lookup_table(&gpios_key_table); + + platform_device_unregister(apu_keys_pdev); + platform_device_unregister(apu_leds_pdev); + platform_device_unregister(apu_gpio_pdev); +} + +module_init(apu_board_init); +module_exit(apu_board_exit); + +MODULE_AUTHOR("Enrico Weigelt, metux IT consult "); +MODULE_DESCRIPTION("PC Engines APUv2/APUv3 board GPIO/LED/keys driver"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(dmi, apu_gpio_dmi_table); +MODULE_ALIAS("platform:pcengines-apuv2"); +MODULE_SOFTDEP("pre: platform:" AMD_FCH_GPIO_DRIVER_NAME); +MODULE_SOFTDEP("pre: platform:leds-gpio"); +MODULE_SOFTDEP("pre: platform:gpio_keys_polled"); diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index b205b037fd61..4bfbfa3f78e6 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c @@ -4392,7 +4392,7 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context) list_add(&interrupt->list, &dev->interrupts); interrupt->irq.triggering = p->triggering; interrupt->irq.polarity = p->polarity; - interrupt->irq.sharable = p->sharable; + interrupt->irq.shareable = p->shareable; interrupt->irq.interrupt_count = 1; interrupt->irq.interrupts[0] = p->interrupts[i]; } @@ -4546,7 +4546,7 @@ static int sony_pic_enable(struct acpi_device *device, memcpy(&resource->res3.data.irq, &irq->irq, sizeof(struct acpi_resource_irq)); /* we requested a shared irq */ - resource->res3.data.irq.sharable = ACPI_SHARED; + resource->res3.data.irq.shareable = ACPI_SHARED; resource->res4.type = ACPI_RESOURCE_TYPE_END_TAG; resource->res4.length = sizeof(struct acpi_resource); @@ -4565,7 +4565,7 @@ static int sony_pic_enable(struct acpi_device *device, memcpy(&resource->res2.data.irq, &irq->irq, sizeof(struct acpi_resource_irq)); /* we requested a shared irq */ - resource->res2.data.irq.sharable = ACPI_SHARED; + resource->res2.data.irq.shareable = ACPI_SHARED; resource->res3.type = ACPI_RESOURCE_TYPE_END_TAG; resource->res3.length = sizeof(struct acpi_resource); @@ -4779,7 +4779,7 @@ static int sony_pic_add(struct acpi_device *device) irq->irq.interrupts[0], irq->irq.triggering, irq->irq.polarity, - irq->irq.sharable); + irq->irq.shareable); spic_dev.cur_irq = irq; break; } diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c index 8c5d47c0aea6..2d56ff7c8230 100644 --- a/drivers/platform/x86/touchscreen_dmi.c +++ b/drivers/platform/x86/touchscreen_dmi.c @@ -41,6 +41,20 @@ static const struct ts_dmi_data chuwi_hi8_data = { .properties = chuwi_hi8_props, }; +static const struct property_entry chuwi_hi8_air_props[] = { + PROPERTY_ENTRY_U32("touchscreen-size-x", 1728), + PROPERTY_ENTRY_U32("touchscreen-size-y", 1148), + PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-chuwi-hi8-air.fw"), + PROPERTY_ENTRY_U32("silead,max-fingers", 10), + { } +}; + +static const struct ts_dmi_data chuwi_hi8_air_data = { + .acpi_name = "MSSL1680:00", + .properties = chuwi_hi8_air_props, +}; + static const struct property_entry chuwi_hi8_pro_props[] = { PROPERTY_ENTRY_U32("touchscreen-min-x", 6), PROPERTY_ENTRY_U32("touchscreen-min-y", 3), @@ -58,6 +72,25 @@ static const struct ts_dmi_data chuwi_hi8_pro_data = { .properties = chuwi_hi8_pro_props, }; +static const struct property_entry chuwi_hi10_air_props[] = { + PROPERTY_ENTRY_U32("touchscreen-size-x", 1981), + PROPERTY_ENTRY_U32("touchscreen-size-y", 1271), + PROPERTY_ENTRY_U32("touchscreen-min-x", 99), + PROPERTY_ENTRY_U32("touchscreen-min-y", 9), + PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), + PROPERTY_ENTRY_U32("touchscreen-fuzz-x", 5), + PROPERTY_ENTRY_U32("touchscreen-fuzz-y", 4), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi10-air.fw"), + PROPERTY_ENTRY_U32("silead,max-fingers", 10), + PROPERTY_ENTRY_BOOL("silead,home-button"), + { } +}; + +static const struct ts_dmi_data chuwi_hi10_air_data = { + .acpi_name = "MSSL1680:00", + .properties = chuwi_hi10_air_props, +}; + static const struct property_entry chuwi_vi8_props[] = { PROPERTY_ENTRY_U32("touchscreen-min-x", 4), PROPERTY_ENTRY_U32("touchscreen-min-y", 6), @@ -369,6 +402,24 @@ static const struct ts_dmi_data pov_mobii_wintab_p800w_v21_data = { .properties = pov_mobii_wintab_p800w_v21_props, }; +static const struct property_entry pov_mobii_wintab_p1006w_v10_props[] = { + PROPERTY_ENTRY_U32("touchscreen-min-x", 1), + PROPERTY_ENTRY_U32("touchscreen-min-y", 3), + PROPERTY_ENTRY_U32("touchscreen-size-x", 1984), + PROPERTY_ENTRY_U32("touchscreen-size-y", 1520), + PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), + PROPERTY_ENTRY_STRING("firmware-name", + "gsl3692-pov-mobii-wintab-p1006w-v10.fw"), + PROPERTY_ENTRY_U32("silead,max-fingers", 10), + PROPERTY_ENTRY_BOOL("silead,home-button"), + { } +}; + +static const struct ts_dmi_data pov_mobii_wintab_p1006w_v10_data = { + .acpi_name = "MSSL1680:00", + .properties = pov_mobii_wintab_p1006w_v10_props, +}; + static const struct property_entry teclast_x3_plus_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1980), PROPERTY_ENTRY_U32("touchscreen-size-y", 1500), @@ -497,6 +548,15 @@ static const struct dmi_system_id touchscreen_dmi_table[] = { DMI_MATCH(DMI_BIOS_VERSION, "H1D_S806_206"), }, }, + { + /* Chuwi Hi8 Air (CWI543) */ + .driver_data = (void *)&chuwi_hi8_air_data, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Default string"), + DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"), + DMI_MATCH(DMI_PRODUCT_NAME, "Hi8 Air"), + }, + }, { /* Chuwi Hi8 Pro (CWI513) */ .driver_data = (void *)&chuwi_hi8_pro_data, @@ -505,6 +565,14 @@ static const struct dmi_system_id touchscreen_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "X1D3_C806N"), }, }, + { + /* Chuwi Hi10 Air */ + .driver_data = (void *)&chuwi_hi10_air_data, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"), + DMI_MATCH(DMI_PRODUCT_SKU, "P1W6_C109D_B"), + }, + }, { /* Chuwi Vi8 (CWI506) */ .driver_data = (void *)&chuwi_vi8_data, @@ -706,6 +774,17 @@ static const struct dmi_system_id touchscreen_dmi_table[] = { DMI_MATCH(DMI_BIOS_DATE, "08/22/2014"), }, }, + { + /* Point of View mobii wintab p1006w (v1.0) */ + .driver_data = (void *)&pov_mobii_wintab_p1006w_v10_data, + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Insyde"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "BayTrail"), + /* Note 105b is Foxcon's USB/PCI vendor id */ + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "105B"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "0E57"), + }, + }, { /* Teclast X3 Plus */ .driver_data = (void *)&teclast_x3_plus_data, diff --git a/drivers/platform/x86/wmi-bmof.c b/drivers/platform/x86/wmi-bmof.c index c4530ba715e8..8751a13134be 100644 --- a/drivers/platform/x86/wmi-bmof.c +++ b/drivers/platform/x86/wmi-bmof.c @@ -119,7 +119,7 @@ static struct wmi_driver wmi_bmof_driver = { module_wmi_driver(wmi_bmof_driver); -MODULE_ALIAS("wmi:" WMI_BMOF_GUID); +MODULE_DEVICE_TABLE(wmi, wmi_bmof_id_table); MODULE_AUTHOR("Andrew Lutomirski "); MODULE_DESCRIPTION("WMI embedded Binary MOF driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index bea35be68706..7b26b6ccf1a0 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c @@ -768,7 +768,10 @@ static int wmi_dev_match(struct device *dev, struct device_driver *driver) struct wmi_block *wblock = dev_to_wblock(dev); const struct wmi_device_id *id = wmi_driver->id_table; - while (id->guid_string) { + if (id == NULL) + return 0; + + while (*id->guid_string) { uuid_le driver_guid; if (WARN_ON(uuid_le_to_bin(id->guid_string, &driver_guid))) diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c index 43d8ed577e70..c79417ca1b3c 100644 --- a/drivers/pnp/pnpacpi/rsparser.c +++ b/drivers/pnp/pnpacpi/rsparser.c @@ -215,7 +215,7 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, if (i >= 0) { flags = acpi_dev_irq_flags(gpio->triggering, gpio->polarity, - gpio->sharable); + gpio->shareable); } else { flags = IORESOURCE_DISABLED; } @@ -324,7 +324,7 @@ static __init void pnpacpi_parse_irq_option(struct pnp_dev *dev, if (p->interrupts[i]) __set_bit(p->interrupts[i], map.bits); - flags = acpi_dev_irq_flags(p->triggering, p->polarity, p->sharable); + flags = acpi_dev_irq_flags(p->triggering, p->polarity, p->shareable); pnp_register_irq_resource(dev, option_flags, &map, flags); } @@ -348,7 +348,7 @@ static __init void pnpacpi_parse_ext_irq_option(struct pnp_dev *dev, } } - flags = acpi_dev_irq_flags(p->triggering, p->polarity, p->sharable); + flags = acpi_dev_irq_flags(p->triggering, p->polarity, p->shareable); pnp_register_irq_resource(dev, option_flags, &map, flags); } @@ -681,7 +681,7 @@ static void pnpacpi_encode_irq(struct pnp_dev *dev, decode_irq_flags(dev, p->flags, &triggering, &polarity, &shareable); irq->triggering = triggering; irq->polarity = polarity; - irq->sharable = shareable; + irq->shareable = shareable; irq->interrupt_count = 1; irq->interrupts[0] = p->start; @@ -689,7 +689,7 @@ static void pnpacpi_encode_irq(struct pnp_dev *dev, (int) p->start, triggering == ACPI_LEVEL_SENSITIVE ? "level" : "edge", polarity == ACPI_ACTIVE_LOW ? "low" : "high", - irq->sharable == ACPI_SHARED ? "shared" : "exclusive", + irq->shareable == ACPI_SHARED ? "shared" : "exclusive", irq->descriptor_length); } @@ -711,14 +711,14 @@ static void pnpacpi_encode_ext_irq(struct pnp_dev *dev, extended_irq->producer_consumer = ACPI_CONSUMER; extended_irq->triggering = triggering; extended_irq->polarity = polarity; - extended_irq->sharable = shareable; + extended_irq->shareable = shareable; extended_irq->interrupt_count = 1; extended_irq->interrupts[0] = p->start; pnp_dbg(&dev->dev, " encode irq %d %s %s %s\n", (int) p->start, triggering == ACPI_LEVEL_SENSITIVE ? "level" : "edge", polarity == ACPI_ACTIVE_LOW ? "low" : "high", - extended_irq->sharable == ACPI_SHARED ? "shared" : "exclusive"); + extended_irq->shareable == ACPI_SHARED ? "shared" : "exclusive"); } static void pnpacpi_encode_dma(struct pnp_dev *dev, diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c index f44a9ffcc2ab..44ca983a49a1 100644 --- a/drivers/power/reset/at91-reset.c +++ b/drivers/power/reset/at91-reset.c @@ -44,6 +44,9 @@ enum reset_type { RESET_TYPE_WATCHDOG = 2, RESET_TYPE_SOFTWARE = 3, RESET_TYPE_USER = 4, + RESET_TYPE_CPU_FAIL = 6, + RESET_TYPE_XTAL_FAIL = 7, + RESET_TYPE_ULP2 = 8, }; static void __iomem *at91_ramc_base[2], *at91_rstc_base; @@ -164,6 +167,15 @@ static void __init at91_reset_status(struct platform_device *pdev) case RESET_TYPE_USER: reason = "user reset"; break; + case RESET_TYPE_CPU_FAIL: + reason = "CPU clock failure detection"; + break; + case RESET_TYPE_XTAL_FAIL: + reason = "32.768 kHz crystal failure detection"; + break; + case RESET_TYPE_ULP2: + reason = "ULP2 reset"; + break; default: reason = "unknown reset"; break; @@ -183,6 +195,7 @@ static const struct of_device_id at91_reset_of_match[] = { { .compatible = "atmel,at91sam9g45-rstc", .data = at91sam9g45_restart }, { .compatible = "atmel,sama5d3-rstc", .data = sama5d3_restart }, { .compatible = "atmel,samx7-rstc", .data = samx7_restart }, + { .compatible = "microchip,sam9x60-rstc", .data = samx7_restart }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, at91_reset_of_match); diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c index 084c8ba9749d..9ff2461820d8 100644 --- a/drivers/power/supply/axp288_fuel_gauge.c +++ b/drivers/power/supply/axp288_fuel_gauge.c @@ -307,22 +307,12 @@ static int fuel_gauge_debug_show(struct seq_file *s, void *data) return 0; } -static int debug_open(struct inode *inode, struct file *file) -{ - return single_open(file, fuel_gauge_debug_show, inode->i_private); -} - -static const struct file_operations fg_debug_fops = { - .open = debug_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(fuel_gauge_debug); static void fuel_gauge_create_debugfs(struct axp288_fg_info *info) { info->debug_file = debugfs_create_file("fuelgauge", 0666, NULL, - info, &fg_debug_fops); + info, &fuel_gauge_debug_fops); } static void fuel_gauge_remove_debugfs(struct axp288_fg_info *info) diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c index 3f6fb49c956c..66991e6f75d9 100644 --- a/drivers/power/supply/bq25890_charger.c +++ b/drivers/power/supply/bq25890_charger.c @@ -436,7 +436,7 @@ static int bq25890_power_supply_get_property(struct power_supply *psy, break; case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX: - val->intval = bq25890_tables[TBL_ICHG].rt.max; + val->intval = bq25890_find_val(bq->init_data.ichg, TBL_ICHG); break; case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE: @@ -454,7 +454,7 @@ static int bq25890_power_supply_get_property(struct power_supply *psy, break; case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX: - val->intval = bq25890_tables[TBL_VREG].rt.max; + val->intval = bq25890_find_val(bq->init_data.vreg, TBL_VREG); break; case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT: diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c index 6dbbe95844a3..29b3a4056865 100644 --- a/drivers/power/supply/bq27xxx_battery.c +++ b/drivers/power/supply/bq27xxx_battery.c @@ -1555,27 +1555,14 @@ static bool bq27xxx_battery_dead(struct bq27xxx_device_info *di, u16 flags) return flags & (BQ27XXX_FLAG_SOC1 | BQ27XXX_FLAG_SOCF); } -/* - * Read flag register. - * Return < 0 if something fails. - */ static int bq27xxx_battery_read_health(struct bq27xxx_device_info *di) { - int flags; - bool has_singe_flag = di->opts & BQ27XXX_O_ZERO; - - flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag); - if (flags < 0) { - dev_err(di->dev, "error reading flag register:%d\n", flags); - return flags; - } - /* Unlikely but important to return first */ - if (unlikely(bq27xxx_battery_overtemp(di, flags))) + if (unlikely(bq27xxx_battery_overtemp(di, di->cache.flags))) return POWER_SUPPLY_HEALTH_OVERHEAT; - if (unlikely(bq27xxx_battery_undertemp(di, flags))) + if (unlikely(bq27xxx_battery_undertemp(di, di->cache.flags))) return POWER_SUPPLY_HEALTH_COLD; - if (unlikely(bq27xxx_battery_dead(di, flags))) + if (unlikely(bq27xxx_battery_dead(di, di->cache.flags))) return POWER_SUPPLY_HEALTH_DEAD; return POWER_SUPPLY_HEALTH_GOOD; @@ -1612,6 +1599,7 @@ void bq27xxx_battery_update(struct bq27xxx_device_info *di) cache.capacity = bq27xxx_battery_read_soc(di); if (di->regs[BQ27XXX_REG_AE] != INVALID_REG_ADDR) cache.energy = bq27xxx_battery_read_energy(di); + di->cache.flags = cache.flags; cache.health = bq27xxx_battery_read_health(di); } if (di->regs[BQ27XXX_REG_CYCT] != INVALID_REG_ADDR) diff --git a/drivers/power/supply/charger-manager.c b/drivers/power/supply/charger-manager.c index 38be91f21cc4..2e8db5e6de0b 100644 --- a/drivers/power/supply/charger-manager.c +++ b/drivers/power/supply/charger-manager.c @@ -4,7 +4,7 @@ * * This driver enables to monitor battery health and control charger * during suspend-to-mem. - * Charger manager depends on other devices. register this later than + * Charger manager depends on other devices. Register this later than * the depending devices. * * This program is free software; you can redistribute it and/or modify @@ -29,7 +29,7 @@ #include /* - * Default termperature threshold for charging. + * Default temperature threshold for charging. * Every temperature units are in tenth of centigrade. */ #define CM_DEFAULT_RECHARGE_TEMP_DIFF 50 @@ -356,7 +356,7 @@ static bool is_polling_required(struct charger_manager *cm) * Note that Charger Manager keeps the charger enabled regardless whether * the charger is charging or not (because battery is full or no external * power source exists) except when CM needs to disable chargers forcibly - * bacause of emergency causes; when the battery is overheated or too cold. + * because of emergency causes; when the battery is overheated or too cold. */ static int try_charger_enable(struct charger_manager *cm, bool enable) { @@ -643,7 +643,7 @@ static int cm_check_thermal_status(struct charger_manager *cm) if (ret) { /* FIXME: * No information of battery temperature might - * occur hazadous result. We have to handle it + * occur hazardous result. We have to handle it * depending on battery type. */ dev_err(cm->dev, "Failed to get battery temperature\n"); @@ -693,7 +693,7 @@ static bool _cm_monitor(struct charger_manager *cm) uevent_notify(cm, default_event_names[temp_alrt]); /* - * Check whole charging duration and discharing duration + * Check whole charging duration and discharging duration * after full-batt. */ } else if (!cm->emergency_stop && check_charging_duration(cm)) { @@ -866,7 +866,7 @@ static void battout_handler(struct charger_manager *cm) } /** - * misc_event_handler - Handler for other evnets + * misc_event_handler - Handler for other events * @cm: the Charger Manager representing the battery. * @type: the Charger Manager representing the battery. */ @@ -1218,7 +1218,7 @@ static int charger_extcon_init(struct charger_manager *cm, } /** - * charger_manager_register_extcon - Register extcon device to recevie state + * charger_manager_register_extcon - Register extcon device to receive state * of charger cable. * @cm: the Charger Manager representing the battery. * @@ -1538,7 +1538,7 @@ static struct charger_desc *of_cm_parse_desc(struct device *dev) of_property_read_u32(np, "cm-discharging-max", &desc->discharging_max_duration_ms); - /* battery charger regualtors */ + /* battery charger regulators */ desc->num_charger_regulators = of_get_child_count(np); if (desc->num_charger_regulators) { struct charger_regulator *chg_regs; @@ -1801,7 +1801,7 @@ static int charger_manager_probe(struct platform_device *pdev) /* * Charger-manager have to check the charging state right after - * tialization of charger-manager and then update current charging + * initialization of charger-manager and then update current charging * state. */ cm_monitor(); diff --git a/drivers/power/supply/cpcap-charger.c b/drivers/power/supply/cpcap-charger.c index c843eaff8ad0..c3ed7b476676 100644 --- a/drivers/power/supply/cpcap-charger.c +++ b/drivers/power/supply/cpcap-charger.c @@ -458,6 +458,7 @@ static void cpcap_usb_detect(struct work_struct *work) goto out_err; } + power_supply_changed(ddata->usb); return; out_err: diff --git a/drivers/power/supply/ds2782_battery.c b/drivers/power/supply/ds2782_battery.c index 019c58493e3d..04b0fe7d7d62 100644 --- a/drivers/power/supply/ds2782_battery.c +++ b/drivers/power/supply/ds2782_battery.c @@ -319,17 +319,17 @@ static void ds278x_power_supply_init(struct power_supply_desc *battery) static int ds278x_battery_remove(struct i2c_client *client) { struct ds278x_info *info = i2c_get_clientdata(client); + int id = info->id; power_supply_unregister(info->battery); + cancel_delayed_work_sync(&info->bat_work); kfree(info->battery_desc.name); + kfree(info); mutex_lock(&battery_lock); - idr_remove(&battery_id, info->id); + idr_remove(&battery_id, id); mutex_unlock(&battery_lock); - cancel_delayed_work(&info->bat_work); - - kfree(info); return 0; } diff --git a/drivers/power/supply/goldfish_battery.c b/drivers/power/supply/goldfish_battery.c index f5c525e4482a..ad969d9fc981 100644 --- a/drivers/power/supply/goldfish_battery.c +++ b/drivers/power/supply/goldfish_battery.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL /* * Power supply driver for the goldfish emulator * @@ -5,15 +6,6 @@ * Copyright (C) 2012 Intel, Inc. * Copyright (C) 2013 Intel, Inc. * Author: Mike Lockwood - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include @@ -40,27 +32,30 @@ struct goldfish_battery_data { #define GOLDFISH_BATTERY_WRITE(data, addr, x) \ (writel(x, data->reg_base + addr)) -/* - * Temporary variable used between goldfish_battery_probe() and - * goldfish_battery_open(). - */ -static struct goldfish_battery_data *battery_data; - enum { /* status register */ - BATTERY_INT_STATUS = 0x00, + BATTERY_INT_STATUS = 0x00, /* set this to enable IRQ */ - BATTERY_INT_ENABLE = 0x04, - - BATTERY_AC_ONLINE = 0x08, - BATTERY_STATUS = 0x0C, - BATTERY_HEALTH = 0x10, - BATTERY_PRESENT = 0x14, - BATTERY_CAPACITY = 0x18, + BATTERY_INT_ENABLE = 0x04, + + BATTERY_AC_ONLINE = 0x08, + BATTERY_STATUS = 0x0C, + BATTERY_HEALTH = 0x10, + BATTERY_PRESENT = 0x14, + BATTERY_CAPACITY = 0x18, + BATTERY_VOLTAGE = 0x1C, + BATTERY_TEMP = 0x20, + BATTERY_CHARGE_COUNTER = 0x24, + BATTERY_VOLTAGE_MAX = 0x28, + BATTERY_CURRENT_MAX = 0x2C, + BATTERY_CURRENT_NOW = 0x30, + BATTERY_CURRENT_AVG = 0x34, + BATTERY_CHARGE_FULL_UAH = 0x38, + BATTERY_CYCLE_COUNT = 0x40, BATTERY_STATUS_CHANGED = 1U << 0, AC_STATUS_CHANGED = 1U << 1, - BATTERY_INT_MASK = BATTERY_STATUS_CHANGED | AC_STATUS_CHANGED, + BATTERY_INT_MASK = BATTERY_STATUS_CHANGED | AC_STATUS_CHANGED, }; @@ -75,6 +70,12 @@ static int goldfish_ac_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_ONLINE: val->intval = GOLDFISH_BATTERY_READ(data, BATTERY_AC_ONLINE); break; + case POWER_SUPPLY_PROP_VOLTAGE_MAX: + val->intval = GOLDFISH_BATTERY_READ(data, BATTERY_VOLTAGE_MAX); + break; + case POWER_SUPPLY_PROP_CURRENT_MAX: + val->intval = GOLDFISH_BATTERY_READ(data, BATTERY_CURRENT_MAX); + break; default: ret = -EINVAL; break; @@ -105,6 +106,29 @@ static int goldfish_battery_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_CAPACITY: val->intval = GOLDFISH_BATTERY_READ(data, BATTERY_CAPACITY); break; + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + val->intval = GOLDFISH_BATTERY_READ(data, BATTERY_VOLTAGE); + break; + case POWER_SUPPLY_PROP_TEMP: + val->intval = GOLDFISH_BATTERY_READ(data, BATTERY_TEMP); + break; + case POWER_SUPPLY_PROP_CHARGE_COUNTER: + val->intval = GOLDFISH_BATTERY_READ(data, + BATTERY_CHARGE_COUNTER); + break; + case POWER_SUPPLY_PROP_CURRENT_NOW: + val->intval = GOLDFISH_BATTERY_READ(data, BATTERY_CURRENT_NOW); + break; + case POWER_SUPPLY_PROP_CURRENT_AVG: + val->intval = GOLDFISH_BATTERY_READ(data, BATTERY_CURRENT_AVG); + break; + case POWER_SUPPLY_PROP_CHARGE_FULL: + val->intval = GOLDFISH_BATTERY_READ(data, + BATTERY_CHARGE_FULL_UAH); + break; + case POWER_SUPPLY_PROP_CYCLE_COUNT: + val->intval = GOLDFISH_BATTERY_READ(data, BATTERY_CYCLE_COUNT); + break; default: ret = -EINVAL; break; @@ -119,10 +143,19 @@ static enum power_supply_property goldfish_battery_props[] = { POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_TECHNOLOGY, POWER_SUPPLY_PROP_CAPACITY, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_TEMP, + POWER_SUPPLY_PROP_CHARGE_COUNTER, + POWER_SUPPLY_PROP_CURRENT_NOW, + POWER_SUPPLY_PROP_CURRENT_AVG, + POWER_SUPPLY_PROP_CHARGE_FULL, + POWER_SUPPLY_PROP_CYCLE_COUNT, }; static enum power_supply_property goldfish_ac_props[] = { POWER_SUPPLY_PROP_ONLINE, + POWER_SUPPLY_PROP_VOLTAGE_MAX, + POWER_SUPPLY_PROP_CURRENT_MAX, }; static irqreturn_t goldfish_battery_interrupt(int irq, void *dev_id) @@ -193,8 +226,9 @@ static int goldfish_battery_probe(struct platform_device *pdev) return -ENODEV; } - ret = devm_request_irq(&pdev->dev, data->irq, goldfish_battery_interrupt, - IRQF_SHARED, pdev->name, data); + ret = devm_request_irq(&pdev->dev, data->irq, + goldfish_battery_interrupt, + IRQF_SHARED, pdev->name, data); if (ret) return ret; @@ -212,7 +246,6 @@ static int goldfish_battery_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, data); - battery_data = data; GOLDFISH_BATTERY_WRITE(data, BATTERY_INT_ENABLE, BATTERY_INT_MASK); return 0; @@ -224,7 +257,6 @@ static int goldfish_battery_remove(struct platform_device *pdev) power_supply_unregister(data->battery); power_supply_unregister(data->ac); - battery_data = NULL; return 0; } diff --git a/drivers/power/supply/isp1704_charger.c b/drivers/power/supply/isp1704_charger.c index 95af5f305838..a63cb5dcfa08 100644 --- a/drivers/power/supply/isp1704_charger.c +++ b/drivers/power/supply/isp1704_charger.c @@ -30,13 +30,12 @@ #include #include #include -#include +#include #include #include #include #include -#include /* Vendor specific Power Control register */ #define ISP1704_PWR_CTRL 0x3d @@ -60,6 +59,7 @@ struct isp1704_charger { struct device *dev; struct power_supply *psy; struct power_supply_desc psy_desc; + struct gpio_desc *enable_gpio; struct usb_phy *phy; struct notifier_block nb; struct work_struct work; @@ -81,18 +81,9 @@ static inline int isp1704_write(struct isp1704_charger *isp, u32 reg, u32 val) return usb_phy_io_write(isp->phy, val, reg); } -/* - * Disable/enable the power from the isp1704 if a function for it - * has been provided with platform data. - */ static void isp1704_charger_set_power(struct isp1704_charger *isp, bool on) { - struct isp1704_charger_data *board = isp->dev->platform_data; - - if (board && board->set_power) - board->set_power(on); - else if (board) - gpio_set_value(board->enable_gpio, on); + gpiod_set_value(isp->enable_gpio, on); } /* @@ -405,46 +396,19 @@ static int isp1704_charger_probe(struct platform_device *pdev) int ret = -ENODEV; struct power_supply_config psy_cfg = {}; - struct isp1704_charger_data *pdata = dev_get_platdata(&pdev->dev); - struct device_node *np = pdev->dev.of_node; - - if (np) { - int gpio = of_get_named_gpio(np, "nxp,enable-gpio", 0); - - if (gpio < 0) { - dev_err(&pdev->dev, "missing DT GPIO nxp,enable-gpio\n"); - return gpio; - } - - pdata = devm_kzalloc(&pdev->dev, - sizeof(struct isp1704_charger_data), GFP_KERNEL); - if (!pdata) { - ret = -ENOMEM; - goto fail0; - } - pdata->enable_gpio = gpio; - - dev_info(&pdev->dev, "init gpio %d\n", pdata->enable_gpio); - - ret = devm_gpio_request_one(&pdev->dev, pdata->enable_gpio, - GPIOF_OUT_INIT_HIGH, "isp1704_reset"); - if (ret) { - dev_err(&pdev->dev, "gpio request failed\n"); - goto fail0; - } - } - - if (!pdata) { - dev_err(&pdev->dev, "missing platform data!\n"); - return -ENODEV; - } - - isp = devm_kzalloc(&pdev->dev, sizeof(*isp), GFP_KERNEL); if (!isp) return -ENOMEM; - if (np) + isp->enable_gpio = devm_gpiod_get(&pdev->dev, "nxp,enable", + GPIOD_OUT_HIGH); + if (IS_ERR(isp->enable_gpio)) { + ret = PTR_ERR(isp->enable_gpio); + dev_err(&pdev->dev, "Could not get reset gpio: %d\n", ret); + return ret; + } + + if (pdev->dev.of_node) isp->phy = devm_usb_get_phy_by_phandle(&pdev->dev, "usb-phy", 0); else isp->phy = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2); diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c index 2a8d75e5e930..581c6bd23388 100644 --- a/drivers/power/supply/max17042_battery.c +++ b/drivers/power/supply/max17042_battery.c @@ -995,6 +995,13 @@ static const struct power_supply_desc max17042_no_current_sense_psy_desc = { .num_properties = ARRAY_SIZE(max17042_battery_props) - 2, }; +static void max17042_stop_work(void *data) +{ + struct max17042_chip *chip = data; + + cancel_work_sync(&chip->work); +} + static int max17042_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -1101,6 +1108,9 @@ static int max17042_probe(struct i2c_client *client, regmap_read(chip->regmap, MAX17042_STATUS, &val); if (val & STATUS_POR_BIT) { INIT_WORK(&chip->work, max17042_init_worker); + ret = devm_add_action(&client->dev, max17042_stop_work, chip); + if (ret) + return ret; schedule_work(&chip->work); } else { chip->init_complete = 1; diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c index 569790ea6917..c917a8b43b2b 100644 --- a/drivers/power/supply/power_supply_core.c +++ b/drivers/power/supply/power_supply_core.c @@ -156,8 +156,6 @@ static void power_supply_deferred_register_work(struct work_struct *work) } #ifdef CONFIG_OF -#include - static int __power_supply_populate_supplied_from(struct device *dev, void *data) { @@ -575,6 +573,7 @@ int power_supply_get_battery_info(struct power_supply *psy, info->energy_full_design_uwh = -EINVAL; info->charge_full_design_uah = -EINVAL; info->voltage_min_design_uv = -EINVAL; + info->voltage_max_design_uv = -EINVAL; info->precharge_current_ua = -EINVAL; info->charge_term_current_ua = -EINVAL; info->constant_charge_current_max_ua = -EINVAL; @@ -615,6 +614,8 @@ int power_supply_get_battery_info(struct power_supply *psy, &info->charge_full_design_uah); of_property_read_u32(battery_np, "voltage-min-design-microvolt", &info->voltage_min_design_uv); + of_property_read_u32(battery_np, "voltage-max-design-microvolt", + &info->voltage_max_design_uv); of_property_read_u32(battery_np, "precharge-current-microamp", &info->precharge_current_ua); of_property_read_u32(battery_np, "charge-term-current-microamp", diff --git a/drivers/power/supply/sc27xx_fuel_gauge.c b/drivers/power/supply/sc27xx_fuel_gauge.c index 76da1895b782..24895cc3b41e 100644 --- a/drivers/power/supply/sc27xx_fuel_gauge.c +++ b/drivers/power/supply/sc27xx_fuel_gauge.c @@ -72,6 +72,7 @@ * @lock: protect the structure * @gpiod: GPIO for battery detection * @channel: IIO channel to get battery temperature + * @charge_chan: IIO channel to get charge voltage * @internal_resist: the battery internal resistance in mOhm * @total_cap: the total capacity of the battery in mAh * @init_cap: the initial capacity of the battery in mAh @@ -92,6 +93,7 @@ struct sc27xx_fgu_data { struct mutex lock; struct gpio_desc *gpiod; struct iio_channel *channel; + struct iio_channel *charge_chan; bool bat_present; int internal_resist; int total_cap; @@ -169,10 +171,37 @@ static int sc27xx_fgu_save_boot_mode(struct sc27xx_fgu_data *data, if (ret) return ret; + /* + * Since the user area registers are put on power always-on region, + * then these registers changing time will be a little long. Thus + * here we should delay 200us to wait until values are updated + * successfully according to the datasheet. + */ + udelay(200); + + ret = regmap_update_bits(data->regmap, + data->base + SC27XX_FGU_USER_AREA_SET, + SC27XX_FGU_MODE_AREA_MASK, + boot_mode << SC27XX_FGU_MODE_AREA_SHIFT); + if (ret) + return ret; + + /* + * Since the user area registers are put on power always-on region, + * then these registers changing time will be a little long. Thus + * here we should delay 200us to wait until values are updated + * successfully according to the datasheet. + */ + udelay(200); + + /* + * According to the datasheet, we should set the USER_AREA_CLEAR to 0 to + * make the user area data available, otherwise we can not save the user + * area data. + */ return regmap_update_bits(data->regmap, - data->base + SC27XX_FGU_USER_AREA_SET, - SC27XX_FGU_MODE_AREA_MASK, - boot_mode << SC27XX_FGU_MODE_AREA_SHIFT); + data->base + SC27XX_FGU_USER_AREA_CLEAR, + SC27XX_FGU_MODE_AREA_MASK, 0); } static int sc27xx_fgu_save_last_cap(struct sc27xx_fgu_data *data, int cap) @@ -186,9 +215,36 @@ static int sc27xx_fgu_save_last_cap(struct sc27xx_fgu_data *data, int cap) if (ret) return ret; + /* + * Since the user area registers are put on power always-on region, + * then these registers changing time will be a little long. Thus + * here we should delay 200us to wait until values are updated + * successfully according to the datasheet. + */ + udelay(200); + + ret = regmap_update_bits(data->regmap, + data->base + SC27XX_FGU_USER_AREA_SET, + SC27XX_FGU_CAP_AREA_MASK, cap); + if (ret) + return ret; + + /* + * Since the user area registers are put on power always-on region, + * then these registers changing time will be a little long. Thus + * here we should delay 200us to wait until values are updated + * successfully according to the datasheet. + */ + udelay(200); + + /* + * According to the datasheet, we should set the USER_AREA_CLEAR to 0 to + * make the user area data available, otherwise we can not save the user + * area data. + */ return regmap_update_bits(data->regmap, - data->base + SC27XX_FGU_USER_AREA_SET, - SC27XX_FGU_CAP_AREA_MASK, cap); + data->base + SC27XX_FGU_USER_AREA_CLEAR, + SC27XX_FGU_CAP_AREA_MASK, 0); } static int sc27xx_fgu_read_last_cap(struct sc27xx_fgu_data *data, int *cap) @@ -391,6 +447,18 @@ static int sc27xx_fgu_get_vbat_ocv(struct sc27xx_fgu_data *data, int *val) return 0; } +static int sc27xx_fgu_get_charge_vol(struct sc27xx_fgu_data *data, int *val) +{ + int ret, vol; + + ret = iio_read_channel_processed(data->charge_chan, &vol); + if (ret < 0) + return ret; + + *val = vol * 1000; + return 0; +} + static int sc27xx_fgu_get_temp(struct sc27xx_fgu_data *data, int *temp) { return iio_read_channel_processed(data->channel, temp); @@ -502,6 +570,14 @@ static int sc27xx_fgu_get_property(struct power_supply *psy, val->intval = value; break; + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE: + ret = sc27xx_fgu_get_charge_vol(data, &value); + if (ret) + goto error; + + val->intval = value; + break; + case POWER_SUPPLY_PROP_CURRENT_NOW: case POWER_SUPPLY_PROP_CURRENT_AVG: ret = sc27xx_fgu_get_current(data, &value); @@ -567,6 +643,7 @@ static enum power_supply_property sc27xx_fgu_props[] = { POWER_SUPPLY_PROP_VOLTAGE_OCV, POWER_SUPPLY_PROP_CURRENT_NOW, POWER_SUPPLY_PROP_CURRENT_AVG, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE, }; static const struct power_supply_desc sc27xx_fgu_desc = { @@ -708,7 +785,7 @@ static int sc27xx_fgu_cap_to_clbcnt(struct sc27xx_fgu_data *data, int capacity) * Convert current capacity (mAh) to coulomb counter according to the * formula: 1 mAh =3.6 coulomb. */ - return DIV_ROUND_CLOSEST(cur_cap * 36, 10); + return DIV_ROUND_CLOSEST(cur_cap * 36 * data->cur_1000ma_adc, 10); } static int sc27xx_fgu_calibration(struct sc27xx_fgu_data *data) @@ -907,6 +984,12 @@ static int sc27xx_fgu_probe(struct platform_device *pdev) return PTR_ERR(data->channel); } + data->charge_chan = devm_iio_channel_get(&pdev->dev, "charge-vol"); + if (IS_ERR(data->charge_chan)) { + dev_err(&pdev->dev, "failed to get charge IIO channel\n"); + return PTR_ERR(data->charge_chan); + } + data->gpiod = devm_gpiod_get(&pdev->dev, "bat-detect", GPIOD_IN); if (IS_ERR(data->gpiod)) { dev_err(&pdev->dev, "failed to get battery detection GPIO\n"); diff --git a/drivers/power/supply/twl4030_charger.c b/drivers/power/supply/twl4030_charger.c index 0e202d4273fb..4299873a1118 100644 --- a/drivers/power/supply/twl4030_charger.c +++ b/drivers/power/supply/twl4030_charger.c @@ -809,7 +809,9 @@ static int twl4030_bci_get_property(struct power_supply *psy, is_charging = state & TWL4030_MSTATEC_AC; if (!is_charging) { u8 s; - twl4030_bci_read(TWL4030_BCIMDEN, &s); + ret = twl4030_bci_read(TWL4030_BCIMDEN, &s); + if (ret < 0) + return ret; if (psy->desc->type == POWER_SUPPLY_TYPE_USB) is_charging = s & 1; else diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c index 6cdb2c14eee4..4347f15165f8 100644 --- a/drivers/powercap/intel_rapl.c +++ b/drivers/powercap/intel_rapl.c @@ -1156,6 +1156,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = { INTEL_CPU_FAM6(KABYLAKE_MOBILE, rapl_defaults_core), INTEL_CPU_FAM6(KABYLAKE_DESKTOP, rapl_defaults_core), INTEL_CPU_FAM6(CANNONLAKE_MOBILE, rapl_defaults_core), + INTEL_CPU_FAM6(ICELAKE_MOBILE, rapl_defaults_core), INTEL_CPU_FAM6(ATOM_SILVERMONT, rapl_defaults_byt), INTEL_CPU_FAM6(ATOM_AIRMONT, rapl_defaults_cht), @@ -1164,6 +1165,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = { INTEL_CPU_FAM6(ATOM_GOLDMONT, rapl_defaults_core), INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, rapl_defaults_core), INTEL_CPU_FAM6(ATOM_GOLDMONT_X, rapl_defaults_core), + INTEL_CPU_FAM6(ATOM_TREMONT_X, rapl_defaults_core), INTEL_CPU_FAM6(XEON_PHI_KNL, rapl_defaults_hsw_server), INTEL_CPU_FAM6(XEON_PHI_KNM, rapl_defaults_hsw_server), diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig index d137c480db46..7fe18636915a 100644 --- a/drivers/ptp/Kconfig +++ b/drivers/ptp/Kconfig @@ -43,7 +43,7 @@ config PTP_1588_CLOCK_DTE config PTP_1588_CLOCK_QORIQ tristate "Freescale QorIQ 1588 timer as PTP clock" - depends on GIANFAR || FSL_DPAA_ETH + depends on GIANFAR || FSL_DPAA_ETH || FSL_ENETC || FSL_ENETC_VF depends on PTP_1588_CLOCK default y help @@ -53,7 +53,7 @@ config PTP_1588_CLOCK_QORIQ packets using the SO_TIMESTAMPING API. To compile this driver as a module, choose M here: the module - will be called ptp_qoriq. + will be called ptp-qoriq. config PTP_1588_CLOCK_IXP46X tristate "Intel IXP46x as PTP clock" diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile index 19efa9cfa950..677d1d178a3e 100644 --- a/drivers/ptp/Makefile +++ b/drivers/ptp/Makefile @@ -9,4 +9,6 @@ obj-$(CONFIG_PTP_1588_CLOCK_DTE) += ptp_dte.o obj-$(CONFIG_PTP_1588_CLOCK_IXP46X) += ptp_ixp46x.o obj-$(CONFIG_PTP_1588_CLOCK_PCH) += ptp_pch.o obj-$(CONFIG_PTP_1588_CLOCK_KVM) += ptp_kvm.o -obj-$(CONFIG_PTP_1588_CLOCK_QORIQ) += ptp_qoriq.o +obj-$(CONFIG_PTP_1588_CLOCK_QORIQ) += ptp-qoriq.o +ptp-qoriq-y += ptp_qoriq.o +ptp-qoriq-$(CONFIG_DEBUG_FS) += ptp_qoriq_debugfs.o diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c index 48f3594a7458..79bd102c9bbc 100644 --- a/drivers/ptp/ptp_clock.c +++ b/drivers/ptp/ptp_clock.c @@ -124,7 +124,7 @@ static int ptp_clock_gettime(struct posix_clock *pc, struct timespec64 *tp) return err; } -static int ptp_clock_adjtime(struct posix_clock *pc, struct timex *tx) +static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx) { struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); struct ptp_clock_info *ops; diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c index fdd49c26bbcc..53775362aac6 100644 --- a/drivers/ptp/ptp_qoriq.c +++ b/drivers/ptp/ptp_qoriq.c @@ -22,7 +22,6 @@ #include #include -#include #include #include #include @@ -37,152 +36,188 @@ * Register access functions */ -/* Caller must hold qoriq_ptp->lock. */ -static u64 tmr_cnt_read(struct qoriq_ptp *qoriq_ptp) +/* Caller must hold ptp_qoriq->lock. */ +static u64 tmr_cnt_read(struct ptp_qoriq *ptp_qoriq) { - struct qoriq_ptp_registers *regs = &qoriq_ptp->regs; + struct ptp_qoriq_registers *regs = &ptp_qoriq->regs; u64 ns; u32 lo, hi; - lo = qoriq_read(®s->ctrl_regs->tmr_cnt_l); - hi = qoriq_read(®s->ctrl_regs->tmr_cnt_h); + lo = ptp_qoriq->read(®s->ctrl_regs->tmr_cnt_l); + hi = ptp_qoriq->read(®s->ctrl_regs->tmr_cnt_h); ns = ((u64) hi) << 32; ns |= lo; return ns; } -/* Caller must hold qoriq_ptp->lock. */ -static void tmr_cnt_write(struct qoriq_ptp *qoriq_ptp, u64 ns) +/* Caller must hold ptp_qoriq->lock. */ +static void tmr_cnt_write(struct ptp_qoriq *ptp_qoriq, u64 ns) { - struct qoriq_ptp_registers *regs = &qoriq_ptp->regs; + struct ptp_qoriq_registers *regs = &ptp_qoriq->regs; u32 hi = ns >> 32; u32 lo = ns & 0xffffffff; - qoriq_write(®s->ctrl_regs->tmr_cnt_l, lo); - qoriq_write(®s->ctrl_regs->tmr_cnt_h, hi); + ptp_qoriq->write(®s->ctrl_regs->tmr_cnt_l, lo); + ptp_qoriq->write(®s->ctrl_regs->tmr_cnt_h, hi); } -/* Caller must hold qoriq_ptp->lock. */ -static void set_alarm(struct qoriq_ptp *qoriq_ptp) +/* Caller must hold ptp_qoriq->lock. */ +static void set_alarm(struct ptp_qoriq *ptp_qoriq) { - struct qoriq_ptp_registers *regs = &qoriq_ptp->regs; + struct ptp_qoriq_registers *regs = &ptp_qoriq->regs; u64 ns; u32 lo, hi; - ns = tmr_cnt_read(qoriq_ptp) + 1500000000ULL; + ns = tmr_cnt_read(ptp_qoriq) + 1500000000ULL; ns = div_u64(ns, 1000000000UL) * 1000000000ULL; - ns -= qoriq_ptp->tclk_period; + ns -= ptp_qoriq->tclk_period; hi = ns >> 32; lo = ns & 0xffffffff; - qoriq_write(®s->alarm_regs->tmr_alarm1_l, lo); - qoriq_write(®s->alarm_regs->tmr_alarm1_h, hi); + ptp_qoriq->write(®s->alarm_regs->tmr_alarm1_l, lo); + ptp_qoriq->write(®s->alarm_regs->tmr_alarm1_h, hi); } -/* Caller must hold qoriq_ptp->lock. */ -static void set_fipers(struct qoriq_ptp *qoriq_ptp) +/* Caller must hold ptp_qoriq->lock. */ +static void set_fipers(struct ptp_qoriq *ptp_qoriq) { - struct qoriq_ptp_registers *regs = &qoriq_ptp->regs; + struct ptp_qoriq_registers *regs = &ptp_qoriq->regs; - set_alarm(qoriq_ptp); - qoriq_write(®s->fiper_regs->tmr_fiper1, qoriq_ptp->tmr_fiper1); - qoriq_write(®s->fiper_regs->tmr_fiper2, qoriq_ptp->tmr_fiper2); + set_alarm(ptp_qoriq); + ptp_qoriq->write(®s->fiper_regs->tmr_fiper1, ptp_qoriq->tmr_fiper1); + ptp_qoriq->write(®s->fiper_regs->tmr_fiper2, ptp_qoriq->tmr_fiper2); +} + +static int extts_clean_up(struct ptp_qoriq *ptp_qoriq, int index, + bool update_event) +{ + struct ptp_qoriq_registers *regs = &ptp_qoriq->regs; + struct ptp_clock_event event; + void __iomem *reg_etts_l; + void __iomem *reg_etts_h; + u32 valid, stat, lo, hi; + + switch (index) { + case 0: + valid = ETS1_VLD; + reg_etts_l = ®s->etts_regs->tmr_etts1_l; + reg_etts_h = ®s->etts_regs->tmr_etts1_h; + break; + case 1: + valid = ETS2_VLD; + reg_etts_l = ®s->etts_regs->tmr_etts2_l; + reg_etts_h = ®s->etts_regs->tmr_etts2_h; + break; + default: + return -EINVAL; + } + + event.type = PTP_CLOCK_EXTTS; + event.index = index; + + do { + lo = ptp_qoriq->read(reg_etts_l); + hi = ptp_qoriq->read(reg_etts_h); + + if (update_event) { + event.timestamp = ((u64) hi) << 32; + event.timestamp |= lo; + ptp_clock_event(ptp_qoriq->clock, &event); + } + + stat = ptp_qoriq->read(®s->ctrl_regs->tmr_stat); + } while (ptp_qoriq->extts_fifo_support && (stat & valid)); + + return 0; } /* * Interrupt service routine */ -static irqreturn_t isr(int irq, void *priv) +irqreturn_t ptp_qoriq_isr(int irq, void *priv) { - struct qoriq_ptp *qoriq_ptp = priv; - struct qoriq_ptp_registers *regs = &qoriq_ptp->regs; + struct ptp_qoriq *ptp_qoriq = priv; + struct ptp_qoriq_registers *regs = &ptp_qoriq->regs; struct ptp_clock_event event; u64 ns; - u32 ack = 0, lo, hi, mask, val; + u32 ack = 0, lo, hi, mask, val, irqs; + + spin_lock(&ptp_qoriq->lock); + + val = ptp_qoriq->read(®s->ctrl_regs->tmr_tevent); + mask = ptp_qoriq->read(®s->ctrl_regs->tmr_temask); + + spin_unlock(&ptp_qoriq->lock); - val = qoriq_read(®s->ctrl_regs->tmr_tevent); + irqs = val & mask; - if (val & ETS1) { + if (irqs & ETS1) { ack |= ETS1; - hi = qoriq_read(®s->etts_regs->tmr_etts1_h); - lo = qoriq_read(®s->etts_regs->tmr_etts1_l); - event.type = PTP_CLOCK_EXTTS; - event.index = 0; - event.timestamp = ((u64) hi) << 32; - event.timestamp |= lo; - ptp_clock_event(qoriq_ptp->clock, &event); + extts_clean_up(ptp_qoriq, 0, true); } - if (val & ETS2) { + if (irqs & ETS2) { ack |= ETS2; - hi = qoriq_read(®s->etts_regs->tmr_etts2_h); - lo = qoriq_read(®s->etts_regs->tmr_etts2_l); - event.type = PTP_CLOCK_EXTTS; - event.index = 1; - event.timestamp = ((u64) hi) << 32; - event.timestamp |= lo; - ptp_clock_event(qoriq_ptp->clock, &event); + extts_clean_up(ptp_qoriq, 1, true); } - if (val & ALM2) { + if (irqs & ALM2) { ack |= ALM2; - if (qoriq_ptp->alarm_value) { + if (ptp_qoriq->alarm_value) { event.type = PTP_CLOCK_ALARM; event.index = 0; - event.timestamp = qoriq_ptp->alarm_value; - ptp_clock_event(qoriq_ptp->clock, &event); + event.timestamp = ptp_qoriq->alarm_value; + ptp_clock_event(ptp_qoriq->clock, &event); } - if (qoriq_ptp->alarm_interval) { - ns = qoriq_ptp->alarm_value + qoriq_ptp->alarm_interval; + if (ptp_qoriq->alarm_interval) { + ns = ptp_qoriq->alarm_value + ptp_qoriq->alarm_interval; hi = ns >> 32; lo = ns & 0xffffffff; - spin_lock(&qoriq_ptp->lock); - qoriq_write(®s->alarm_regs->tmr_alarm2_l, lo); - qoriq_write(®s->alarm_regs->tmr_alarm2_h, hi); - spin_unlock(&qoriq_ptp->lock); - qoriq_ptp->alarm_value = ns; + ptp_qoriq->write(®s->alarm_regs->tmr_alarm2_l, lo); + ptp_qoriq->write(®s->alarm_regs->tmr_alarm2_h, hi); + ptp_qoriq->alarm_value = ns; } else { - qoriq_write(®s->ctrl_regs->tmr_tevent, ALM2); - spin_lock(&qoriq_ptp->lock); - mask = qoriq_read(®s->ctrl_regs->tmr_temask); + spin_lock(&ptp_qoriq->lock); + mask = ptp_qoriq->read(®s->ctrl_regs->tmr_temask); mask &= ~ALM2EN; - qoriq_write(®s->ctrl_regs->tmr_temask, mask); - spin_unlock(&qoriq_ptp->lock); - qoriq_ptp->alarm_value = 0; - qoriq_ptp->alarm_interval = 0; + ptp_qoriq->write(®s->ctrl_regs->tmr_temask, mask); + spin_unlock(&ptp_qoriq->lock); + ptp_qoriq->alarm_value = 0; + ptp_qoriq->alarm_interval = 0; } } - if (val & PP1) { + if (irqs & PP1) { ack |= PP1; event.type = PTP_CLOCK_PPS; - ptp_clock_event(qoriq_ptp->clock, &event); + ptp_clock_event(ptp_qoriq->clock, &event); } if (ack) { - qoriq_write(®s->ctrl_regs->tmr_tevent, ack); + ptp_qoriq->write(®s->ctrl_regs->tmr_tevent, ack); return IRQ_HANDLED; } else return IRQ_NONE; } +EXPORT_SYMBOL_GPL(ptp_qoriq_isr); /* * PTP clock operations */ -static int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) +int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) { u64 adj, diff; u32 tmr_add; int neg_adj = 0; - struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps); - struct qoriq_ptp_registers *regs = &qoriq_ptp->regs; + struct ptp_qoriq *ptp_qoriq = container_of(ptp, struct ptp_qoriq, caps); + struct ptp_qoriq_registers *regs = &ptp_qoriq->regs; if (scaled_ppm < 0) { neg_adj = 1; scaled_ppm = -scaled_ppm; } - tmr_add = qoriq_ptp->tmr_add; + tmr_add = ptp_qoriq->tmr_add; adj = tmr_add; /* calculate diff as adj*(scaled_ppm/65536)/1000000 @@ -194,73 +229,76 @@ static int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff; - qoriq_write(®s->ctrl_regs->tmr_add, tmr_add); + ptp_qoriq->write(®s->ctrl_regs->tmr_add, tmr_add); return 0; } +EXPORT_SYMBOL_GPL(ptp_qoriq_adjfine); -static int ptp_qoriq_adjtime(struct ptp_clock_info *ptp, s64 delta) +int ptp_qoriq_adjtime(struct ptp_clock_info *ptp, s64 delta) { s64 now; unsigned long flags; - struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps); + struct ptp_qoriq *ptp_qoriq = container_of(ptp, struct ptp_qoriq, caps); - spin_lock_irqsave(&qoriq_ptp->lock, flags); + spin_lock_irqsave(&ptp_qoriq->lock, flags); - now = tmr_cnt_read(qoriq_ptp); + now = tmr_cnt_read(ptp_qoriq); now += delta; - tmr_cnt_write(qoriq_ptp, now); - set_fipers(qoriq_ptp); + tmr_cnt_write(ptp_qoriq, now); + set_fipers(ptp_qoriq); - spin_unlock_irqrestore(&qoriq_ptp->lock, flags); + spin_unlock_irqrestore(&ptp_qoriq->lock, flags); return 0; } +EXPORT_SYMBOL_GPL(ptp_qoriq_adjtime); -static int ptp_qoriq_gettime(struct ptp_clock_info *ptp, - struct timespec64 *ts) +int ptp_qoriq_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { u64 ns; unsigned long flags; - struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps); + struct ptp_qoriq *ptp_qoriq = container_of(ptp, struct ptp_qoriq, caps); - spin_lock_irqsave(&qoriq_ptp->lock, flags); + spin_lock_irqsave(&ptp_qoriq->lock, flags); - ns = tmr_cnt_read(qoriq_ptp); + ns = tmr_cnt_read(ptp_qoriq); - spin_unlock_irqrestore(&qoriq_ptp->lock, flags); + spin_unlock_irqrestore(&ptp_qoriq->lock, flags); *ts = ns_to_timespec64(ns); return 0; } +EXPORT_SYMBOL_GPL(ptp_qoriq_gettime); -static int ptp_qoriq_settime(struct ptp_clock_info *ptp, - const struct timespec64 *ts) +int ptp_qoriq_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) { u64 ns; unsigned long flags; - struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps); + struct ptp_qoriq *ptp_qoriq = container_of(ptp, struct ptp_qoriq, caps); ns = timespec64_to_ns(ts); - spin_lock_irqsave(&qoriq_ptp->lock, flags); + spin_lock_irqsave(&ptp_qoriq->lock, flags); - tmr_cnt_write(qoriq_ptp, ns); - set_fipers(qoriq_ptp); + tmr_cnt_write(ptp_qoriq, ns); + set_fipers(ptp_qoriq); - spin_unlock_irqrestore(&qoriq_ptp->lock, flags); + spin_unlock_irqrestore(&ptp_qoriq->lock, flags); return 0; } +EXPORT_SYMBOL_GPL(ptp_qoriq_settime); -static int ptp_qoriq_enable(struct ptp_clock_info *ptp, - struct ptp_clock_request *rq, int on) +int ptp_qoriq_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) { - struct qoriq_ptp *qoriq_ptp = container_of(ptp, struct qoriq_ptp, caps); - struct qoriq_ptp_registers *regs = &qoriq_ptp->regs; + struct ptp_qoriq *ptp_qoriq = container_of(ptp, struct ptp_qoriq, caps); + struct ptp_qoriq_registers *regs = &ptp_qoriq->regs; unsigned long flags; - u32 bit, mask; + u32 bit, mask = 0; switch (rq->type) { case PTP_CLK_REQ_EXTTS: @@ -274,33 +312,34 @@ static int ptp_qoriq_enable(struct ptp_clock_info *ptp, default: return -EINVAL; } - spin_lock_irqsave(&qoriq_ptp->lock, flags); - mask = qoriq_read(®s->ctrl_regs->tmr_temask); - if (on) - mask |= bit; - else - mask &= ~bit; - qoriq_write(®s->ctrl_regs->tmr_temask, mask); - spin_unlock_irqrestore(&qoriq_ptp->lock, flags); - return 0; - case PTP_CLK_REQ_PPS: - spin_lock_irqsave(&qoriq_ptp->lock, flags); - mask = qoriq_read(®s->ctrl_regs->tmr_temask); if (on) - mask |= PP1EN; - else - mask &= ~PP1EN; - qoriq_write(®s->ctrl_regs->tmr_temask, mask); - spin_unlock_irqrestore(&qoriq_ptp->lock, flags); - return 0; + extts_clean_up(ptp_qoriq, rq->extts.index, false); - default: break; + case PTP_CLK_REQ_PPS: + bit = PP1EN; + break; + default: + return -EOPNOTSUPP; + } + + spin_lock_irqsave(&ptp_qoriq->lock, flags); + + mask = ptp_qoriq->read(®s->ctrl_regs->tmr_temask); + if (on) { + mask |= bit; + ptp_qoriq->write(®s->ctrl_regs->tmr_tevent, bit); + } else { + mask &= ~bit; } - return -EOPNOTSUPP; + ptp_qoriq->write(®s->ctrl_regs->tmr_temask, mask); + + spin_unlock_irqrestore(&ptp_qoriq->lock, flags); + return 0; } +EXPORT_SYMBOL_GPL(ptp_qoriq_enable); static const struct ptp_clock_info ptp_qoriq_caps = { .owner = THIS_MODULE, @@ -319,7 +358,7 @@ static const struct ptp_clock_info ptp_qoriq_caps = { }; /** - * qoriq_ptp_nominal_freq - calculate nominal frequency according to + * ptp_qoriq_nominal_freq - calculate nominal frequency according to * reference clock frequency * * @clk_src: reference clock frequency @@ -330,7 +369,7 @@ static const struct ptp_clock_info ptp_qoriq_caps = { * * Return the nominal frequency */ -static u32 qoriq_ptp_nominal_freq(u32 clk_src) +static u32 ptp_qoriq_nominal_freq(u32 clk_src) { u32 remainder = 0; @@ -350,9 +389,9 @@ static u32 qoriq_ptp_nominal_freq(u32 clk_src) } /** - * qoriq_ptp_auto_config - calculate a set of default configurations + * ptp_qoriq_auto_config - calculate a set of default configurations * - * @qoriq_ptp: pointer to qoriq_ptp + * @ptp_qoriq: pointer to ptp_qoriq * @node: pointer to device_node * * If below dts properties are not provided, this function will be @@ -366,7 +405,7 @@ static u32 qoriq_ptp_nominal_freq(u32 clk_src) * * Return 0 if success */ -static int qoriq_ptp_auto_config(struct qoriq_ptp *qoriq_ptp, +static int ptp_qoriq_auto_config(struct ptp_qoriq *ptp_qoriq, struct device_node *node) { struct clk *clk; @@ -376,7 +415,7 @@ static int qoriq_ptp_auto_config(struct qoriq_ptp *qoriq_ptp, u32 remainder = 0; u32 clk_src = 0; - qoriq_ptp->cksel = DEFAULT_CKSEL; + ptp_qoriq->cksel = DEFAULT_CKSEL; clk = of_clk_get(node, 0); if (!IS_ERR(clk)) { @@ -389,12 +428,12 @@ static int qoriq_ptp_auto_config(struct qoriq_ptp *qoriq_ptp, return -EINVAL; } - nominal_freq = qoriq_ptp_nominal_freq(clk_src); + nominal_freq = ptp_qoriq_nominal_freq(clk_src); if (!nominal_freq) return -EINVAL; - qoriq_ptp->tclk_period = 1000000000UL / nominal_freq; - qoriq_ptp->tmr_prsc = DEFAULT_TMR_PRSC; + ptp_qoriq->tclk_period = 1000000000UL / nominal_freq; + ptp_qoriq->tmr_prsc = DEFAULT_TMR_PRSC; /* Calculate initial frequency compensation value for TMR_ADD register. * freq_comp = ceil(2^32 / freq_ratio) @@ -405,164 +444,193 @@ static int qoriq_ptp_auto_config(struct qoriq_ptp *qoriq_ptp, if (remainder) freq_comp++; - qoriq_ptp->tmr_add = freq_comp; - qoriq_ptp->tmr_fiper1 = DEFAULT_FIPER1_PERIOD - qoriq_ptp->tclk_period; - qoriq_ptp->tmr_fiper2 = DEFAULT_FIPER2_PERIOD - qoriq_ptp->tclk_period; + ptp_qoriq->tmr_add = freq_comp; + ptp_qoriq->tmr_fiper1 = DEFAULT_FIPER1_PERIOD - ptp_qoriq->tclk_period; + ptp_qoriq->tmr_fiper2 = DEFAULT_FIPER2_PERIOD - ptp_qoriq->tclk_period; /* max_adj = 1000000000 * (freq_ratio - 1.0) - 1 * freq_ratio = reference_clock_freq / nominal_freq */ max_adj = 1000000000ULL * (clk_src - nominal_freq); max_adj = div_u64(max_adj, nominal_freq) - 1; - qoriq_ptp->caps.max_adj = max_adj; + ptp_qoriq->caps.max_adj = max_adj; return 0; } -static int qoriq_ptp_probe(struct platform_device *dev) +int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base, + const struct ptp_clock_info *caps) { - struct device_node *node = dev->dev.of_node; - struct qoriq_ptp *qoriq_ptp; - struct qoriq_ptp_registers *regs; + struct device_node *node = ptp_qoriq->dev->of_node; + struct ptp_qoriq_registers *regs; struct timespec64 now; - int err = -ENOMEM; - u32 tmr_ctrl; unsigned long flags; - void __iomem *base; - - qoriq_ptp = kzalloc(sizeof(*qoriq_ptp), GFP_KERNEL); - if (!qoriq_ptp) - goto no_memory; + u32 tmr_ctrl; - err = -EINVAL; + ptp_qoriq->base = base; + ptp_qoriq->caps = *caps; - qoriq_ptp->caps = ptp_qoriq_caps; + if (of_property_read_u32(node, "fsl,cksel", &ptp_qoriq->cksel)) + ptp_qoriq->cksel = DEFAULT_CKSEL; - if (of_property_read_u32(node, "fsl,cksel", &qoriq_ptp->cksel)) - qoriq_ptp->cksel = DEFAULT_CKSEL; + if (of_property_read_bool(node, "fsl,extts-fifo")) + ptp_qoriq->extts_fifo_support = true; + else + ptp_qoriq->extts_fifo_support = false; if (of_property_read_u32(node, - "fsl,tclk-period", &qoriq_ptp->tclk_period) || + "fsl,tclk-period", &ptp_qoriq->tclk_period) || of_property_read_u32(node, - "fsl,tmr-prsc", &qoriq_ptp->tmr_prsc) || + "fsl,tmr-prsc", &ptp_qoriq->tmr_prsc) || of_property_read_u32(node, - "fsl,tmr-add", &qoriq_ptp->tmr_add) || + "fsl,tmr-add", &ptp_qoriq->tmr_add) || of_property_read_u32(node, - "fsl,tmr-fiper1", &qoriq_ptp->tmr_fiper1) || + "fsl,tmr-fiper1", &ptp_qoriq->tmr_fiper1) || of_property_read_u32(node, - "fsl,tmr-fiper2", &qoriq_ptp->tmr_fiper2) || + "fsl,tmr-fiper2", &ptp_qoriq->tmr_fiper2) || of_property_read_u32(node, - "fsl,max-adj", &qoriq_ptp->caps.max_adj)) { + "fsl,max-adj", &ptp_qoriq->caps.max_adj)) { pr_warn("device tree node missing required elements, try automatic configuration\n"); - if (qoriq_ptp_auto_config(qoriq_ptp, node)) - goto no_config; + if (ptp_qoriq_auto_config(ptp_qoriq, node)) + return -ENODEV; } - err = -ENODEV; + if (of_property_read_bool(node, "little-endian")) { + ptp_qoriq->read = qoriq_read_le; + ptp_qoriq->write = qoriq_write_le; + } else { + ptp_qoriq->read = qoriq_read_be; + ptp_qoriq->write = qoriq_write_be; + } + + /* The eTSEC uses differnt memory map with DPAA/ENETC */ + if (of_device_is_compatible(node, "fsl,etsec-ptp")) { + ptp_qoriq->regs.ctrl_regs = base + ETSEC_CTRL_REGS_OFFSET; + ptp_qoriq->regs.alarm_regs = base + ETSEC_ALARM_REGS_OFFSET; + ptp_qoriq->regs.fiper_regs = base + ETSEC_FIPER_REGS_OFFSET; + ptp_qoriq->regs.etts_regs = base + ETSEC_ETTS_REGS_OFFSET; + } else { + ptp_qoriq->regs.ctrl_regs = base + CTRL_REGS_OFFSET; + ptp_qoriq->regs.alarm_regs = base + ALARM_REGS_OFFSET; + ptp_qoriq->regs.fiper_regs = base + FIPER_REGS_OFFSET; + ptp_qoriq->regs.etts_regs = base + ETTS_REGS_OFFSET; + } + + ktime_get_real_ts64(&now); + ptp_qoriq_settime(&ptp_qoriq->caps, &now); + + tmr_ctrl = + (ptp_qoriq->tclk_period & TCLK_PERIOD_MASK) << TCLK_PERIOD_SHIFT | + (ptp_qoriq->cksel & CKSEL_MASK) << CKSEL_SHIFT; + + spin_lock_init(&ptp_qoriq->lock); + spin_lock_irqsave(&ptp_qoriq->lock, flags); + + regs = &ptp_qoriq->regs; + ptp_qoriq->write(®s->ctrl_regs->tmr_ctrl, tmr_ctrl); + ptp_qoriq->write(®s->ctrl_regs->tmr_add, ptp_qoriq->tmr_add); + ptp_qoriq->write(®s->ctrl_regs->tmr_prsc, ptp_qoriq->tmr_prsc); + ptp_qoriq->write(®s->fiper_regs->tmr_fiper1, ptp_qoriq->tmr_fiper1); + ptp_qoriq->write(®s->fiper_regs->tmr_fiper2, ptp_qoriq->tmr_fiper2); + set_alarm(ptp_qoriq); + ptp_qoriq->write(®s->ctrl_regs->tmr_ctrl, + tmr_ctrl|FIPERST|RTPE|TE|FRD); + + spin_unlock_irqrestore(&ptp_qoriq->lock, flags); + + ptp_qoriq->clock = ptp_clock_register(&ptp_qoriq->caps, ptp_qoriq->dev); + if (IS_ERR(ptp_qoriq->clock)) + return PTR_ERR(ptp_qoriq->clock); + + ptp_qoriq->phc_index = ptp_clock_index(ptp_qoriq->clock); + ptp_qoriq_create_debugfs(ptp_qoriq); + return 0; +} +EXPORT_SYMBOL_GPL(ptp_qoriq_init); - qoriq_ptp->irq = platform_get_irq(dev, 0); +void ptp_qoriq_free(struct ptp_qoriq *ptp_qoriq) +{ + struct ptp_qoriq_registers *regs = &ptp_qoriq->regs; + + ptp_qoriq->write(®s->ctrl_regs->tmr_temask, 0); + ptp_qoriq->write(®s->ctrl_regs->tmr_ctrl, 0); + + ptp_qoriq_remove_debugfs(ptp_qoriq); + ptp_clock_unregister(ptp_qoriq->clock); + iounmap(ptp_qoriq->base); + free_irq(ptp_qoriq->irq, ptp_qoriq); +} +EXPORT_SYMBOL_GPL(ptp_qoriq_free); + +static int ptp_qoriq_probe(struct platform_device *dev) +{ + struct ptp_qoriq *ptp_qoriq; + int err = -ENOMEM; + void __iomem *base; + + ptp_qoriq = kzalloc(sizeof(*ptp_qoriq), GFP_KERNEL); + if (!ptp_qoriq) + goto no_memory; - if (qoriq_ptp->irq < 0) { + ptp_qoriq->dev = &dev->dev; + + err = -ENODEV; + + ptp_qoriq->irq = platform_get_irq(dev, 0); + if (ptp_qoriq->irq < 0) { pr_err("irq not in device tree\n"); goto no_node; } - if (request_irq(qoriq_ptp->irq, isr, IRQF_SHARED, DRIVER, qoriq_ptp)) { + if (request_irq(ptp_qoriq->irq, ptp_qoriq_isr, IRQF_SHARED, + DRIVER, ptp_qoriq)) { pr_err("request_irq failed\n"); goto no_node; } - qoriq_ptp->rsrc = platform_get_resource(dev, IORESOURCE_MEM, 0); - if (!qoriq_ptp->rsrc) { + ptp_qoriq->rsrc = platform_get_resource(dev, IORESOURCE_MEM, 0); + if (!ptp_qoriq->rsrc) { pr_err("no resource\n"); goto no_resource; } - if (request_resource(&iomem_resource, qoriq_ptp->rsrc)) { + if (request_resource(&iomem_resource, ptp_qoriq->rsrc)) { pr_err("resource busy\n"); goto no_resource; } - spin_lock_init(&qoriq_ptp->lock); - - base = ioremap(qoriq_ptp->rsrc->start, - resource_size(qoriq_ptp->rsrc)); + base = ioremap(ptp_qoriq->rsrc->start, + resource_size(ptp_qoriq->rsrc)); if (!base) { pr_err("ioremap ptp registers failed\n"); goto no_ioremap; } - qoriq_ptp->base = base; - - if (of_device_is_compatible(node, "fsl,fman-ptp-timer")) { - qoriq_ptp->regs.ctrl_regs = base + FMAN_CTRL_REGS_OFFSET; - qoriq_ptp->regs.alarm_regs = base + FMAN_ALARM_REGS_OFFSET; - qoriq_ptp->regs.fiper_regs = base + FMAN_FIPER_REGS_OFFSET; - qoriq_ptp->regs.etts_regs = base + FMAN_ETTS_REGS_OFFSET; - } else { - qoriq_ptp->regs.ctrl_regs = base + CTRL_REGS_OFFSET; - qoriq_ptp->regs.alarm_regs = base + ALARM_REGS_OFFSET; - qoriq_ptp->regs.fiper_regs = base + FIPER_REGS_OFFSET; - qoriq_ptp->regs.etts_regs = base + ETTS_REGS_OFFSET; - } - - ktime_get_real_ts64(&now); - ptp_qoriq_settime(&qoriq_ptp->caps, &now); - - tmr_ctrl = - (qoriq_ptp->tclk_period & TCLK_PERIOD_MASK) << TCLK_PERIOD_SHIFT | - (qoriq_ptp->cksel & CKSEL_MASK) << CKSEL_SHIFT; - - spin_lock_irqsave(&qoriq_ptp->lock, flags); - - regs = &qoriq_ptp->regs; - qoriq_write(®s->ctrl_regs->tmr_ctrl, tmr_ctrl); - qoriq_write(®s->ctrl_regs->tmr_add, qoriq_ptp->tmr_add); - qoriq_write(®s->ctrl_regs->tmr_prsc, qoriq_ptp->tmr_prsc); - qoriq_write(®s->fiper_regs->tmr_fiper1, qoriq_ptp->tmr_fiper1); - qoriq_write(®s->fiper_regs->tmr_fiper2, qoriq_ptp->tmr_fiper2); - set_alarm(qoriq_ptp); - qoriq_write(®s->ctrl_regs->tmr_ctrl, tmr_ctrl|FIPERST|RTPE|TE|FRD); - - spin_unlock_irqrestore(&qoriq_ptp->lock, flags); - - qoriq_ptp->clock = ptp_clock_register(&qoriq_ptp->caps, &dev->dev); - if (IS_ERR(qoriq_ptp->clock)) { - err = PTR_ERR(qoriq_ptp->clock); + err = ptp_qoriq_init(ptp_qoriq, base, &ptp_qoriq_caps); + if (err) goto no_clock; - } - qoriq_ptp->phc_index = ptp_clock_index(qoriq_ptp->clock); - - platform_set_drvdata(dev, qoriq_ptp); + platform_set_drvdata(dev, ptp_qoriq); return 0; no_clock: - iounmap(qoriq_ptp->base); + iounmap(ptp_qoriq->base); no_ioremap: - release_resource(qoriq_ptp->rsrc); + release_resource(ptp_qoriq->rsrc); no_resource: - free_irq(qoriq_ptp->irq, qoriq_ptp); -no_config: + free_irq(ptp_qoriq->irq, ptp_qoriq); no_node: - kfree(qoriq_ptp); + kfree(ptp_qoriq); no_memory: return err; } -static int qoriq_ptp_remove(struct platform_device *dev) +static int ptp_qoriq_remove(struct platform_device *dev) { - struct qoriq_ptp *qoriq_ptp = platform_get_drvdata(dev); - struct qoriq_ptp_registers *regs = &qoriq_ptp->regs; - - qoriq_write(®s->ctrl_regs->tmr_temask, 0); - qoriq_write(®s->ctrl_regs->tmr_ctrl, 0); - - ptp_clock_unregister(qoriq_ptp->clock); - iounmap(qoriq_ptp->base); - release_resource(qoriq_ptp->rsrc); - free_irq(qoriq_ptp->irq, qoriq_ptp); - kfree(qoriq_ptp); + struct ptp_qoriq *ptp_qoriq = platform_get_drvdata(dev); + ptp_qoriq_free(ptp_qoriq); + release_resource(ptp_qoriq->rsrc); + kfree(ptp_qoriq); return 0; } @@ -573,16 +641,16 @@ static const struct of_device_id match_table[] = { }; MODULE_DEVICE_TABLE(of, match_table); -static struct platform_driver qoriq_ptp_driver = { +static struct platform_driver ptp_qoriq_driver = { .driver = { .name = "ptp_qoriq", .of_match_table = match_table, }, - .probe = qoriq_ptp_probe, - .remove = qoriq_ptp_remove, + .probe = ptp_qoriq_probe, + .remove = ptp_qoriq_remove, }; -module_platform_driver(qoriq_ptp_driver); +module_platform_driver(ptp_qoriq_driver); MODULE_AUTHOR("Richard Cochran "); MODULE_DESCRIPTION("PTP clock for Freescale QorIQ 1588 timer"); diff --git a/drivers/ptp/ptp_qoriq_debugfs.c b/drivers/ptp/ptp_qoriq_debugfs.c new file mode 100644 index 000000000000..e8dddcedf288 --- /dev/null +++ b/drivers/ptp/ptp_qoriq_debugfs.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright 2019 NXP + */ +#include +#include +#include + +static int ptp_qoriq_fiper1_lpbk_get(void *data, u64 *val) +{ + struct ptp_qoriq *ptp_qoriq = data; + struct ptp_qoriq_registers *regs = &ptp_qoriq->regs; + u32 ctrl; + + ctrl = ptp_qoriq->read(®s->ctrl_regs->tmr_ctrl); + *val = ctrl & PP1L ? 1 : 0; + + return 0; +} + +static int ptp_qoriq_fiper1_lpbk_set(void *data, u64 val) +{ + struct ptp_qoriq *ptp_qoriq = data; + struct ptp_qoriq_registers *regs = &ptp_qoriq->regs; + u32 ctrl; + + ctrl = ptp_qoriq->read(®s->ctrl_regs->tmr_ctrl); + if (val == 0) + ctrl &= ~PP1L; + else + ctrl |= PP1L; + + ptp_qoriq->write(®s->ctrl_regs->tmr_ctrl, ctrl); + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(ptp_qoriq_fiper1_fops, ptp_qoriq_fiper1_lpbk_get, + ptp_qoriq_fiper1_lpbk_set, "%llu\n"); + +static int ptp_qoriq_fiper2_lpbk_get(void *data, u64 *val) +{ + struct ptp_qoriq *ptp_qoriq = data; + struct ptp_qoriq_registers *regs = &ptp_qoriq->regs; + u32 ctrl; + + ctrl = ptp_qoriq->read(®s->ctrl_regs->tmr_ctrl); + *val = ctrl & PP2L ? 1 : 0; + + return 0; +} + +static int ptp_qoriq_fiper2_lpbk_set(void *data, u64 val) +{ + struct ptp_qoriq *ptp_qoriq = data; + struct ptp_qoriq_registers *regs = &ptp_qoriq->regs; + u32 ctrl; + + ctrl = ptp_qoriq->read(®s->ctrl_regs->tmr_ctrl); + if (val == 0) + ctrl &= ~PP2L; + else + ctrl |= PP2L; + + ptp_qoriq->write(®s->ctrl_regs->tmr_ctrl, ctrl); + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(ptp_qoriq_fiper2_fops, ptp_qoriq_fiper2_lpbk_get, + ptp_qoriq_fiper2_lpbk_set, "%llu\n"); + +void ptp_qoriq_create_debugfs(struct ptp_qoriq *ptp_qoriq) +{ + struct dentry *root; + + root = debugfs_create_dir(dev_name(ptp_qoriq->dev), NULL); + if (IS_ERR(root)) + return; + if (!root) + goto err_root; + + ptp_qoriq->debugfs_root = root; + + if (!debugfs_create_file_unsafe("fiper1-loopback", 0600, root, + ptp_qoriq, &ptp_qoriq_fiper1_fops)) + goto err_node; + if (!debugfs_create_file_unsafe("fiper2-loopback", 0600, root, + ptp_qoriq, &ptp_qoriq_fiper2_fops)) + goto err_node; + return; + +err_node: + debugfs_remove_recursive(root); + ptp_qoriq->debugfs_root = NULL; +err_root: + dev_err(ptp_qoriq->dev, "failed to initialize debugfs\n"); +} + +void ptp_qoriq_remove_debugfs(struct ptp_qoriq *ptp_qoriq) +{ + debugfs_remove_recursive(ptp_qoriq->debugfs_root); + ptp_qoriq->debugfs_root = NULL; +} diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig index a8f47df0655a..54f8238aac0d 100644 --- a/drivers/pwm/Kconfig +++ b/drivers/pwm/Kconfig @@ -192,14 +192,23 @@ config PWM_IMG To compile this driver as a module, choose M here: the module will be called pwm-img -config PWM_IMX - tristate "i.MX PWM support" +config PWM_IMX1 + tristate "i.MX1 PWM support" depends on ARCH_MXC help - Generic PWM framework driver for i.MX. + Generic PWM framework driver for i.MX1 and i.MX21 To compile this driver as a module, choose M here: the module - will be called pwm-imx. + will be called pwm-imx1. + +config PWM_IMX27 + tristate "i.MX27 PWM support" + depends on ARCH_MXC + help + Generic PWM framework driver for i.MX27 and later i.MX SoCs. + + To compile this driver as a module, choose M here: the module + will be called pwm-imx27. config PWM_JZ4740 tristate "Ingenic JZ47xx PWM support" diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile index 9c676a0dadf5..448825e892bc 100644 --- a/drivers/pwm/Makefile +++ b/drivers/pwm/Makefile @@ -17,7 +17,8 @@ obj-$(CONFIG_PWM_EP93XX) += pwm-ep93xx.o obj-$(CONFIG_PWM_FSL_FTM) += pwm-fsl-ftm.o obj-$(CONFIG_PWM_HIBVT) += pwm-hibvt.o obj-$(CONFIG_PWM_IMG) += pwm-img.o -obj-$(CONFIG_PWM_IMX) += pwm-imx.o +obj-$(CONFIG_PWM_IMX1) += pwm-imx1.o +obj-$(CONFIG_PWM_IMX27) += pwm-imx27.o obj-$(CONFIG_PWM_JZ4740) += pwm-jz4740.o obj-$(CONFIG_PWM_LP3943) += pwm-lp3943.o obj-$(CONFIG_PWM_LPC18XX_SCT) += pwm-lpc18xx-sct.o diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index 1581f6ab1b1f..3149204567f3 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -472,7 +472,10 @@ int pwm_apply_state(struct pwm_device *pwm, struct pwm_state *state) state->duty_cycle > state->period) return -EINVAL; - if (!memcmp(state, &pwm->state, sizeof(*state))) + if (state->period == pwm->state.period && + state->duty_cycle == pwm->state.duty_cycle && + state->polarity == pwm->state.polarity && + state->enabled == pwm->state.enabled) return 0; if (pwm->chip->ops->apply) { @@ -1033,10 +1036,7 @@ static int pwm_seq_show(struct seq_file *s, void *v) dev_name(chip->dev), chip->npwm, (chip->npwm != 1) ? "s" : ""); - if (chip->ops->dbg_show) - chip->ops->dbg_show(chip, s); - else - pwm_dbg_show(chip, s); + pwm_dbg_show(chip, s); return 0; } diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c index 530d7dc5f1b5..a9fd6f0d408c 100644 --- a/drivers/pwm/pwm-atmel.c +++ b/drivers/pwm/pwm-atmel.c @@ -48,16 +48,6 @@ #define PWMV2_CPRD 0x0C #define PWMV2_CPRDUPD 0x10 -/* - * Max value for duty and period - * - * Although the duty and period register is 32 bit, - * however only the LSB 16 bits are significant. - */ -#define PWM_MAX_DTY 0xFFFF -#define PWM_MAX_PRD 0xFFFF -#define PRD_MAX_PRES 10 - struct atmel_pwm_registers { u8 period; u8 period_upd; @@ -65,11 +55,21 @@ struct atmel_pwm_registers { u8 duty_upd; }; +struct atmel_pwm_config { + u32 max_period; + u32 max_pres; +}; + +struct atmel_pwm_data { + struct atmel_pwm_registers regs; + struct atmel_pwm_config cfg; +}; + struct atmel_pwm_chip { struct pwm_chip chip; struct clk *clk; void __iomem *base; - const struct atmel_pwm_registers *regs; + const struct atmel_pwm_data *data; unsigned int updated_pwms; /* ISR is cleared when read, ensure only one thread does that */ @@ -121,10 +121,10 @@ static int atmel_pwm_calculate_cprd_and_pres(struct pwm_chip *chip, cycles *= clk_get_rate(atmel_pwm->clk); do_div(cycles, NSEC_PER_SEC); - for (*pres = 0; cycles > PWM_MAX_PRD; cycles >>= 1) + for (*pres = 0; cycles > atmel_pwm->data->cfg.max_period; cycles >>= 1) (*pres)++; - if (*pres > PRD_MAX_PRES) { + if (*pres > atmel_pwm->data->cfg.max_pres) { dev_err(chip->dev, "pres exceeds the maximum value\n"); return -EINVAL; } @@ -150,15 +150,15 @@ static void atmel_pwm_update_cdty(struct pwm_chip *chip, struct pwm_device *pwm, struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip); u32 val; - if (atmel_pwm->regs->duty_upd == - atmel_pwm->regs->period_upd) { + if (atmel_pwm->data->regs.duty_upd == + atmel_pwm->data->regs.period_upd) { val = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, PWM_CMR); val &= ~PWM_CMR_UPD_CDTY; atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWM_CMR, val); } atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, - atmel_pwm->regs->duty_upd, cdty); + atmel_pwm->data->regs.duty_upd, cdty); } static void atmel_pwm_set_cprd_cdty(struct pwm_chip *chip, @@ -168,9 +168,9 @@ static void atmel_pwm_set_cprd_cdty(struct pwm_chip *chip, struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip); atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, - atmel_pwm->regs->duty, cdty); + atmel_pwm->data->regs.duty, cdty); atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, - atmel_pwm->regs->period, cprd); + atmel_pwm->data->regs.period, cprd); } static void atmel_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm, @@ -225,7 +225,7 @@ static int atmel_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, cstate.polarity == state->polarity && cstate.period == state->period) { cprd = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, - atmel_pwm->regs->period); + atmel_pwm->data->regs.period); atmel_pwm_calculate_cdty(state, cprd, &cdty); atmel_pwm_update_cdty(chip, pwm, cdty); return 0; @@ -277,27 +277,55 @@ static const struct pwm_ops atmel_pwm_ops = { .owner = THIS_MODULE, }; -static const struct atmel_pwm_registers atmel_pwm_regs_v1 = { - .period = PWMV1_CPRD, - .period_upd = PWMV1_CUPD, - .duty = PWMV1_CDTY, - .duty_upd = PWMV1_CUPD, +static const struct atmel_pwm_data atmel_sam9rl_pwm_data = { + .regs = { + .period = PWMV1_CPRD, + .period_upd = PWMV1_CUPD, + .duty = PWMV1_CDTY, + .duty_upd = PWMV1_CUPD, + }, + .cfg = { + /* 16 bits to keep period and duty. */ + .max_period = 0xffff, + .max_pres = 10, + }, +}; + +static const struct atmel_pwm_data atmel_sama5_pwm_data = { + .regs = { + .period = PWMV2_CPRD, + .period_upd = PWMV2_CPRDUPD, + .duty = PWMV2_CDTY, + .duty_upd = PWMV2_CDTYUPD, + }, + .cfg = { + /* 16 bits to keep period and duty. */ + .max_period = 0xffff, + .max_pres = 10, + }, }; -static const struct atmel_pwm_registers atmel_pwm_regs_v2 = { - .period = PWMV2_CPRD, - .period_upd = PWMV2_CPRDUPD, - .duty = PWMV2_CDTY, - .duty_upd = PWMV2_CDTYUPD, +static const struct atmel_pwm_data mchp_sam9x60_pwm_data = { + .regs = { + .period = PWMV1_CPRD, + .period_upd = PWMV1_CUPD, + .duty = PWMV1_CDTY, + .duty_upd = PWMV1_CUPD, + }, + .cfg = { + /* 32 bits to keep period and duty. */ + .max_period = 0xffffffff, + .max_pres = 10, + }, }; static const struct platform_device_id atmel_pwm_devtypes[] = { { .name = "at91sam9rl-pwm", - .driver_data = (kernel_ulong_t)&atmel_pwm_regs_v1, + .driver_data = (kernel_ulong_t)&atmel_sam9rl_pwm_data, }, { .name = "sama5d3-pwm", - .driver_data = (kernel_ulong_t)&atmel_pwm_regs_v2, + .driver_data = (kernel_ulong_t)&atmel_sama5_pwm_data, }, { /* sentinel */ }, @@ -307,20 +335,23 @@ MODULE_DEVICE_TABLE(platform, atmel_pwm_devtypes); static const struct of_device_id atmel_pwm_dt_ids[] = { { .compatible = "atmel,at91sam9rl-pwm", - .data = &atmel_pwm_regs_v1, + .data = &atmel_sam9rl_pwm_data, }, { .compatible = "atmel,sama5d3-pwm", - .data = &atmel_pwm_regs_v2, + .data = &atmel_sama5_pwm_data, }, { .compatible = "atmel,sama5d2-pwm", - .data = &atmel_pwm_regs_v2, + .data = &atmel_sama5_pwm_data, + }, { + .compatible = "microchip,sam9x60-pwm", + .data = &mchp_sam9x60_pwm_data, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, atmel_pwm_dt_ids); -static inline const struct atmel_pwm_registers * +static inline const struct atmel_pwm_data * atmel_pwm_get_driver_data(struct platform_device *pdev) { const struct platform_device_id *id; @@ -330,18 +361,18 @@ atmel_pwm_get_driver_data(struct platform_device *pdev) id = platform_get_device_id(pdev); - return (struct atmel_pwm_registers *)id->driver_data; + return (struct atmel_pwm_data *)id->driver_data; } static int atmel_pwm_probe(struct platform_device *pdev) { - const struct atmel_pwm_registers *regs; + const struct atmel_pwm_data *data; struct atmel_pwm_chip *atmel_pwm; struct resource *res; int ret; - regs = atmel_pwm_get_driver_data(pdev); - if (!regs) + data = atmel_pwm_get_driver_data(pdev); + if (!data) return -ENODEV; atmel_pwm = devm_kzalloc(&pdev->dev, sizeof(*atmel_pwm), GFP_KERNEL); @@ -373,7 +404,7 @@ static int atmel_pwm_probe(struct platform_device *pdev) atmel_pwm->chip.base = -1; atmel_pwm->chip.npwm = 4; - atmel_pwm->regs = regs; + atmel_pwm->data = data; atmel_pwm->updated_pwms = 0; mutex_init(&atmel_pwm->isr_lock); diff --git a/drivers/pwm/pwm-bcm-kona.c b/drivers/pwm/pwm-bcm-kona.c index 09a95aeb3a70..81da91df2529 100644 --- a/drivers/pwm/pwm-bcm-kona.c +++ b/drivers/pwm/pwm-bcm-kona.c @@ -45,25 +45,25 @@ * high or low depending on its state at that exact instant. */ -#define PWM_CONTROL_OFFSET (0x00000000) +#define PWM_CONTROL_OFFSET 0x00000000 #define PWM_CONTROL_SMOOTH_SHIFT(chan) (24 + (chan)) #define PWM_CONTROL_TYPE_SHIFT(chan) (16 + (chan)) #define PWM_CONTROL_POLARITY_SHIFT(chan) (8 + (chan)) #define PWM_CONTROL_TRIGGER_SHIFT(chan) (chan) -#define PRESCALE_OFFSET (0x00000004) +#define PRESCALE_OFFSET 0x00000004 #define PRESCALE_SHIFT(chan) ((chan) << 2) #define PRESCALE_MASK(chan) (0x7 << PRESCALE_SHIFT(chan)) -#define PRESCALE_MIN (0x00000000) -#define PRESCALE_MAX (0x00000007) +#define PRESCALE_MIN 0x00000000 +#define PRESCALE_MAX 0x00000007 #define PERIOD_COUNT_OFFSET(chan) (0x00000008 + ((chan) << 3)) -#define PERIOD_COUNT_MIN (0x00000002) -#define PERIOD_COUNT_MAX (0x00ffffff) +#define PERIOD_COUNT_MIN 0x00000002 +#define PERIOD_COUNT_MAX 0x00ffffff #define DUTY_CYCLE_HIGH_OFFSET(chan) (0x0000000c + ((chan) << 3)) -#define DUTY_CYCLE_HIGH_MIN (0x00000000) -#define DUTY_CYCLE_HIGH_MAX (0x00ffffff) +#define DUTY_CYCLE_HIGH_MIN 0x00000000 +#define DUTY_CYCLE_HIGH_MAX 0x00ffffff struct kona_pwmc { struct pwm_chip chip; diff --git a/drivers/pwm/pwm-hibvt.c b/drivers/pwm/pwm-hibvt.c index 27c107e78d59..a0b09603d13d 100644 --- a/drivers/pwm/pwm-hibvt.c +++ b/drivers/pwm/pwm-hibvt.c @@ -49,15 +49,30 @@ struct hibvt_pwm_chip { struct clk *clk; void __iomem *base; struct reset_control *rstc; + const struct hibvt_pwm_soc *soc; }; struct hibvt_pwm_soc { u32 num_pwms; + bool quirk_force_enable; }; -static const struct hibvt_pwm_soc pwm_soc[2] = { - { .num_pwms = 4 }, - { .num_pwms = 8 }, +static const struct hibvt_pwm_soc hi3516cv300_soc_info = { + .num_pwms = 4, +}; + +static const struct hibvt_pwm_soc hi3519v100_soc_info = { + .num_pwms = 8, +}; + +static const struct hibvt_pwm_soc hi3559v100_shub_soc_info = { + .num_pwms = 8, + .quirk_force_enable = true, +}; + +static const struct hibvt_pwm_soc hi3559v100_soc_info = { + .num_pwms = 2, + .quirk_force_enable = true, }; static inline struct hibvt_pwm_chip *to_hibvt_pwm_chip(struct pwm_chip *chip) @@ -148,13 +163,23 @@ static void hibvt_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, static int hibvt_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, struct pwm_state *state) { + struct hibvt_pwm_chip *hi_pwm_chip = to_hibvt_pwm_chip(chip); + if (state->polarity != pwm->state.polarity) hibvt_pwm_set_polarity(chip, pwm, state->polarity); if (state->period != pwm->state.period || - state->duty_cycle != pwm->state.duty_cycle) + state->duty_cycle != pwm->state.duty_cycle) { hibvt_pwm_config(chip, pwm, state->duty_cycle, state->period); + /* + * Some implementations require the PWM to be enabled twice + * each time the duty cycle is refreshed. + */ + if (hi_pwm_chip->soc->quirk_force_enable && state->enabled) + hibvt_pwm_enable(chip, pwm); + } + if (state->enabled != pwm->state.enabled) { if (state->enabled) hibvt_pwm_enable(chip, pwm); @@ -198,6 +223,7 @@ static int hibvt_pwm_probe(struct platform_device *pdev) pwm_chip->chip.npwm = soc->num_pwms; pwm_chip->chip.of_xlate = of_pwm_xlate_with_flags; pwm_chip->chip.of_pwm_n_cells = 3; + pwm_chip->soc = soc; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); pwm_chip->base = devm_ioremap_resource(&pdev->dev, res); @@ -250,8 +276,14 @@ static int hibvt_pwm_remove(struct platform_device *pdev) } static const struct of_device_id hibvt_pwm_of_match[] = { - { .compatible = "hisilicon,hi3516cv300-pwm", .data = &pwm_soc[0] }, - { .compatible = "hisilicon,hi3519v100-pwm", .data = &pwm_soc[1] }, + { .compatible = "hisilicon,hi3516cv300-pwm", + .data = &hi3516cv300_soc_info }, + { .compatible = "hisilicon,hi3519v100-pwm", + .data = &hi3519v100_soc_info }, + { .compatible = "hisilicon,hi3559v100-shub-pwm", + .data = &hi3559v100_shub_soc_info }, + { .compatible = "hisilicon,hi3559v100-pwm", + .data = &hi3559v100_soc_info }, { } }; MODULE_DEVICE_TABLE(of, hibvt_pwm_of_match); diff --git a/drivers/pwm/pwm-imx1.c b/drivers/pwm/pwm-imx1.c new file mode 100644 index 000000000000..f8b2c2e001a7 --- /dev/null +++ b/drivers/pwm/pwm-imx1.c @@ -0,0 +1,199 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * simple driver for PWM (Pulse Width Modulator) controller + * + * Derived from pxa PWM driver by eric miao + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MX1_PWMC 0x00 /* PWM Control Register */ +#define MX1_PWMS 0x04 /* PWM Sample Register */ +#define MX1_PWMP 0x08 /* PWM Period Register */ + +#define MX1_PWMC_EN BIT(4) + +struct pwm_imx1_chip { + struct clk *clk_ipg; + struct clk *clk_per; + void __iomem *mmio_base; + struct pwm_chip chip; +}; + +#define to_pwm_imx1_chip(chip) container_of(chip, struct pwm_imx1_chip, chip) + +static int pwm_imx1_clk_prepare_enable(struct pwm_chip *chip) +{ + struct pwm_imx1_chip *imx = to_pwm_imx1_chip(chip); + int ret; + + ret = clk_prepare_enable(imx->clk_ipg); + if (ret) + return ret; + + ret = clk_prepare_enable(imx->clk_per); + if (ret) { + clk_disable_unprepare(imx->clk_ipg); + return ret; + } + + return 0; +} + +static void pwm_imx1_clk_disable_unprepare(struct pwm_chip *chip) +{ + struct pwm_imx1_chip *imx = to_pwm_imx1_chip(chip); + + clk_disable_unprepare(imx->clk_per); + clk_disable_unprepare(imx->clk_ipg); +} + +static int pwm_imx1_config(struct pwm_chip *chip, + struct pwm_device *pwm, int duty_ns, int period_ns) +{ + struct pwm_imx1_chip *imx = to_pwm_imx1_chip(chip); + u32 max, p; + + /* + * The PWM subsystem allows for exact frequencies. However, + * I cannot connect a scope on my device to the PWM line and + * thus cannot provide the program the PWM controller + * exactly. Instead, I'm relying on the fact that the + * Bootloader (u-boot or WinCE+haret) has programmed the PWM + * function group already. So I'll just modify the PWM sample + * register to follow the ratio of duty_ns vs. period_ns + * accordingly. + * + * This is good enough for programming the brightness of + * the LCD backlight. + * + * The real implementation would divide PERCLK[0] first by + * both the prescaler (/1 .. /128) and then by CLKSEL + * (/2 .. /16). + */ + max = readl(imx->mmio_base + MX1_PWMP); + p = max * duty_ns / period_ns; + + writel(max - p, imx->mmio_base + MX1_PWMS); + + return 0; +} + +static int pwm_imx1_enable(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct pwm_imx1_chip *imx = to_pwm_imx1_chip(chip); + u32 value; + int ret; + + ret = pwm_imx1_clk_prepare_enable(chip); + if (ret < 0) + return ret; + + value = readl(imx->mmio_base + MX1_PWMC); + value |= MX1_PWMC_EN; + writel(value, imx->mmio_base + MX1_PWMC); + + return 0; +} + +static void pwm_imx1_disable(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct pwm_imx1_chip *imx = to_pwm_imx1_chip(chip); + u32 value; + + value = readl(imx->mmio_base + MX1_PWMC); + value &= ~MX1_PWMC_EN; + writel(value, imx->mmio_base + MX1_PWMC); + + pwm_imx1_clk_disable_unprepare(chip); +} + +static const struct pwm_ops pwm_imx1_ops = { + .enable = pwm_imx1_enable, + .disable = pwm_imx1_disable, + .config = pwm_imx1_config, + .owner = THIS_MODULE, +}; + +static const struct of_device_id pwm_imx1_dt_ids[] = { + { .compatible = "fsl,imx1-pwm", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, pwm_imx1_dt_ids); + +static int pwm_imx1_probe(struct platform_device *pdev) +{ + struct pwm_imx1_chip *imx; + struct resource *r; + + imx = devm_kzalloc(&pdev->dev, sizeof(*imx), GFP_KERNEL); + if (!imx) + return -ENOMEM; + + platform_set_drvdata(pdev, imx); + + imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); + if (IS_ERR(imx->clk_ipg)) { + dev_err(&pdev->dev, "getting ipg clock failed with %ld\n", + PTR_ERR(imx->clk_ipg)); + return PTR_ERR(imx->clk_ipg); + } + + imx->clk_per = devm_clk_get(&pdev->dev, "per"); + if (IS_ERR(imx->clk_per)) { + int ret = PTR_ERR(imx->clk_per); + + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, + "failed to get peripheral clock: %d\n", + ret); + + return ret; + } + + imx->chip.ops = &pwm_imx1_ops; + imx->chip.dev = &pdev->dev; + imx->chip.base = -1; + imx->chip.npwm = 1; + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + imx->mmio_base = devm_ioremap_resource(&pdev->dev, r); + if (IS_ERR(imx->mmio_base)) + return PTR_ERR(imx->mmio_base); + + return pwmchip_add(&imx->chip); +} + +static int pwm_imx1_remove(struct platform_device *pdev) +{ + struct pwm_imx1_chip *imx = platform_get_drvdata(pdev); + + pwm_imx1_clk_disable_unprepare(&imx->chip); + + return pwmchip_remove(&imx->chip); +} + +static struct platform_driver pwm_imx1_driver = { + .driver = { + .name = "pwm-imx1", + .of_match_table = pwm_imx1_dt_ids, + }, + .probe = pwm_imx1_probe, + .remove = pwm_imx1_remove, +}; +module_platform_driver(pwm_imx1_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Sascha Hauer "); diff --git a/drivers/pwm/pwm-imx.c b/drivers/pwm/pwm-imx27.c similarity index 59% rename from drivers/pwm/pwm-imx.c rename to drivers/pwm/pwm-imx27.c index 55a3a363d5be..806130654211 100644 --- a/drivers/pwm/pwm-imx.c +++ b/drivers/pwm/pwm-imx27.c @@ -19,16 +19,6 @@ #include #include -/* i.MX1 and i.MX21 share the same PWM function block: */ - -#define MX1_PWMC 0x00 /* PWM Control Register */ -#define MX1_PWMS 0x04 /* PWM Sample Register */ -#define MX1_PWMP 0x08 /* PWM Period Register */ - -#define MX1_PWMC_EN BIT(4) - -/* i.MX27, i.MX31, i.MX35 share the same PWM function block: */ - #define MX3_PWMCR 0x00 /* PWM Control Register */ #define MX3_PWMSR 0x04 /* PWM Status Register */ #define MX3_PWMSAR 0x0C /* PWM Sample Register */ @@ -86,21 +76,18 @@ /* PWMPR register value of 0xffff has the same effect as 0xfffe */ #define MX3_PWMPR_MAX 0xfffe -struct imx_chip { +struct pwm_imx27_chip { struct clk *clk_ipg; - struct clk *clk_per; - void __iomem *mmio_base; - struct pwm_chip chip; }; -#define to_imx_chip(chip) container_of(chip, struct imx_chip, chip) +#define to_pwm_imx27_chip(chip) container_of(chip, struct pwm_imx27_chip, chip) -static int imx_pwm_clk_prepare_enable(struct pwm_chip *chip) +static int pwm_imx27_clk_prepare_enable(struct pwm_chip *chip) { - struct imx_chip *imx = to_imx_chip(chip); + struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip); int ret; ret = clk_prepare_enable(imx->clk_ipg); @@ -116,35 +103,32 @@ static int imx_pwm_clk_prepare_enable(struct pwm_chip *chip) return 0; } -static void imx_pwm_clk_disable_unprepare(struct pwm_chip *chip) +static void pwm_imx27_clk_disable_unprepare(struct pwm_chip *chip) { - struct imx_chip *imx = to_imx_chip(chip); + struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip); clk_disable_unprepare(imx->clk_per); clk_disable_unprepare(imx->clk_ipg); } -static void imx_pwm_get_state(struct pwm_chip *chip, - struct pwm_device *pwm, struct pwm_state *state) +static void pwm_imx27_get_state(struct pwm_chip *chip, + struct pwm_device *pwm, struct pwm_state *state) { - struct imx_chip *imx = to_imx_chip(chip); - u32 period, prescaler, pwm_clk, ret, val; + struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip); + u32 period, prescaler, pwm_clk, val; u64 tmp; + int ret; - ret = imx_pwm_clk_prepare_enable(chip); + ret = pwm_imx27_clk_prepare_enable(chip); if (ret < 0) return; val = readl(imx->mmio_base + MX3_PWMCR); - if (val & MX3_PWMCR_EN) { + if (val & MX3_PWMCR_EN) state->enabled = true; - ret = imx_pwm_clk_prepare_enable(chip); - if (ret) - return; - } else { + else state->enabled = false; - } switch (FIELD_GET(MX3_PWMCR_POUTC, val)) { case MX3_PWMCR_POUTC_NORMAL: @@ -176,70 +160,13 @@ static void imx_pwm_get_state(struct pwm_chip *chip, state->duty_cycle = 0; } - imx_pwm_clk_disable_unprepare(chip); -} - -static int imx_pwm_config_v1(struct pwm_chip *chip, - struct pwm_device *pwm, int duty_ns, int period_ns) -{ - struct imx_chip *imx = to_imx_chip(chip); - - /* - * The PWM subsystem allows for exact frequencies. However, - * I cannot connect a scope on my device to the PWM line and - * thus cannot provide the program the PWM controller - * exactly. Instead, I'm relying on the fact that the - * Bootloader (u-boot or WinCE+haret) has programmed the PWM - * function group already. So I'll just modify the PWM sample - * register to follow the ratio of duty_ns vs. period_ns - * accordingly. - * - * This is good enough for programming the brightness of - * the LCD backlight. - * - * The real implementation would divide PERCLK[0] first by - * both the prescaler (/1 .. /128) and then by CLKSEL - * (/2 .. /16). - */ - u32 max = readl(imx->mmio_base + MX1_PWMP); - u32 p = max * duty_ns / period_ns; - writel(max - p, imx->mmio_base + MX1_PWMS); - - return 0; -} - -static int imx_pwm_enable_v1(struct pwm_chip *chip, struct pwm_device *pwm) -{ - struct imx_chip *imx = to_imx_chip(chip); - u32 val; - int ret; - - ret = imx_pwm_clk_prepare_enable(chip); - if (ret < 0) - return ret; - - val = readl(imx->mmio_base + MX1_PWMC); - val |= MX1_PWMC_EN; - writel(val, imx->mmio_base + MX1_PWMC); - - return 0; -} - -static void imx_pwm_disable_v1(struct pwm_chip *chip, struct pwm_device *pwm) -{ - struct imx_chip *imx = to_imx_chip(chip); - u32 val; - - val = readl(imx->mmio_base + MX1_PWMC); - val &= ~MX1_PWMC_EN; - writel(val, imx->mmio_base + MX1_PWMC); - - imx_pwm_clk_disable_unprepare(chip); + if (!state->enabled) + pwm_imx27_clk_disable_unprepare(chip); } -static void imx_pwm_sw_reset(struct pwm_chip *chip) +static void pwm_imx27_sw_reset(struct pwm_chip *chip) { - struct imx_chip *imx = to_imx_chip(chip); + struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip); struct device *dev = chip->dev; int wait_count = 0; u32 cr; @@ -255,10 +182,10 @@ static void imx_pwm_sw_reset(struct pwm_chip *chip) dev_warn(dev, "software reset timeout\n"); } -static void imx_pwm_wait_fifo_slot(struct pwm_chip *chip, - struct pwm_device *pwm) +static void pwm_imx27_wait_fifo_slot(struct pwm_chip *chip, + struct pwm_device *pwm) { - struct imx_chip *imx = to_imx_chip(chip); + struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip); struct device *dev = chip->dev; unsigned int period_ms; int fifoav; @@ -277,11 +204,11 @@ static void imx_pwm_wait_fifo_slot(struct pwm_chip *chip, } } -static int imx_pwm_apply_v2(struct pwm_chip *chip, struct pwm_device *pwm, - struct pwm_state *state) +static int pwm_imx27_apply(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state) { unsigned long period_cycles, duty_cycles, prescale; - struct imx_chip *imx = to_imx_chip(chip); + struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip); struct pwm_state cstate; unsigned long long c; int ret; @@ -318,13 +245,13 @@ static int imx_pwm_apply_v2(struct pwm_chip *chip, struct pwm_device *pwm, * enabled. */ if (cstate.enabled) { - imx_pwm_wait_fifo_slot(chip, pwm); + pwm_imx27_wait_fifo_slot(chip, pwm); } else { - ret = imx_pwm_clk_prepare_enable(chip); + ret = pwm_imx27_clk_prepare_enable(chip); if (ret) return ret; - imx_pwm_sw_reset(chip); + pwm_imx27_sw_reset(chip); } writel(duty_cycles, imx->mmio_base + MX3_PWMSAR); @@ -343,64 +270,35 @@ static int imx_pwm_apply_v2(struct pwm_chip *chip, struct pwm_device *pwm, } else if (cstate.enabled) { writel(0, imx->mmio_base + MX3_PWMCR); - imx_pwm_clk_disable_unprepare(chip); + pwm_imx27_clk_disable_unprepare(chip); } return 0; } -static const struct pwm_ops imx_pwm_ops_v1 = { - .enable = imx_pwm_enable_v1, - .disable = imx_pwm_disable_v1, - .config = imx_pwm_config_v1, +static const struct pwm_ops pwm_imx27_ops = { + .apply = pwm_imx27_apply, + .get_state = pwm_imx27_get_state, .owner = THIS_MODULE, }; -static const struct pwm_ops imx_pwm_ops_v2 = { - .apply = imx_pwm_apply_v2, - .get_state = imx_pwm_get_state, - .owner = THIS_MODULE, -}; - -struct imx_pwm_data { - bool polarity_supported; - const struct pwm_ops *ops; -}; - -static struct imx_pwm_data imx_pwm_data_v1 = { - .ops = &imx_pwm_ops_v1, -}; - -static struct imx_pwm_data imx_pwm_data_v2 = { - .polarity_supported = true, - .ops = &imx_pwm_ops_v2, -}; - -static const struct of_device_id imx_pwm_dt_ids[] = { - { .compatible = "fsl,imx1-pwm", .data = &imx_pwm_data_v1, }, - { .compatible = "fsl,imx27-pwm", .data = &imx_pwm_data_v2, }, +static const struct of_device_id pwm_imx27_dt_ids[] = { + { .compatible = "fsl,imx27-pwm", }, { /* sentinel */ } }; -MODULE_DEVICE_TABLE(of, imx_pwm_dt_ids); +MODULE_DEVICE_TABLE(of, pwm_imx27_dt_ids); -static int imx_pwm_probe(struct platform_device *pdev) +static int pwm_imx27_probe(struct platform_device *pdev) { - const struct of_device_id *of_id = - of_match_device(imx_pwm_dt_ids, &pdev->dev); - const struct imx_pwm_data *data; - struct imx_chip *imx; + struct pwm_imx27_chip *imx; struct resource *r; - int ret = 0; - - if (!of_id) - return -ENODEV; - - data = of_id->data; imx = devm_kzalloc(&pdev->dev, sizeof(*imx), GFP_KERNEL); if (imx == NULL) return -ENOMEM; + platform_set_drvdata(pdev, imx); + imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); if (IS_ERR(imx->clk_ipg)) { dev_err(&pdev->dev, "getting ipg clock failed with %ld\n", @@ -410,57 +308,51 @@ static int imx_pwm_probe(struct platform_device *pdev) imx->clk_per = devm_clk_get(&pdev->dev, "per"); if (IS_ERR(imx->clk_per)) { - dev_err(&pdev->dev, "getting per clock failed with %ld\n", - PTR_ERR(imx->clk_per)); - return PTR_ERR(imx->clk_per); + int ret = PTR_ERR(imx->clk_per); + + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, + "failed to get peripheral clock: %d\n", + ret); + + return ret; } - imx->chip.ops = data->ops; + imx->chip.ops = &pwm_imx27_ops; imx->chip.dev = &pdev->dev; imx->chip.base = -1; imx->chip.npwm = 1; - if (data->polarity_supported) { - dev_dbg(&pdev->dev, "PWM supports output inversion\n"); - imx->chip.of_xlate = of_pwm_xlate_with_flags; - imx->chip.of_pwm_n_cells = 3; - } + imx->chip.of_xlate = of_pwm_xlate_with_flags; + imx->chip.of_pwm_n_cells = 3; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); imx->mmio_base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(imx->mmio_base)) return PTR_ERR(imx->mmio_base); - ret = pwmchip_add(&imx->chip); - if (ret < 0) - return ret; - - platform_set_drvdata(pdev, imx); - return 0; + return pwmchip_add(&imx->chip); } -static int imx_pwm_remove(struct platform_device *pdev) +static int pwm_imx27_remove(struct platform_device *pdev) { - struct imx_chip *imx; + struct pwm_imx27_chip *imx; imx = platform_get_drvdata(pdev); - if (imx == NULL) - return -ENODEV; - imx_pwm_clk_disable_unprepare(&imx->chip); + pwm_imx27_clk_disable_unprepare(&imx->chip); return pwmchip_remove(&imx->chip); } static struct platform_driver imx_pwm_driver = { - .driver = { - .name = "imx-pwm", - .of_match_table = imx_pwm_dt_ids, + .driver = { + .name = "pwm-imx27", + .of_match_table = pwm_imx27_dt_ids, }, - .probe = imx_pwm_probe, - .remove = imx_pwm_remove, + .probe = pwm_imx27_probe, + .remove = pwm_imx27_remove, }; - module_platform_driver(imx_pwm_driver); MODULE_LICENSE("GPL v2"); diff --git a/drivers/pwm/pwm-mtk-disp.c b/drivers/pwm/pwm-mtk-disp.c index 893940d45f0d..15803c71fe80 100644 --- a/drivers/pwm/pwm-mtk-disp.c +++ b/drivers/pwm/pwm-mtk-disp.c @@ -277,10 +277,21 @@ static const struct mtk_pwm_data mt8173_pwm_data = { .commit_mask = 0x1, }; +static const struct mtk_pwm_data mt8183_pwm_data = { + .enable_mask = BIT(0), + .con0 = 0x18, + .con0_sel = 0x0, + .con1 = 0x1c, + .has_commit = false, + .bls_debug = 0x80, + .bls_debug_mask = 0x3, +}; + static const struct of_device_id mtk_disp_pwm_of_match[] = { { .compatible = "mediatek,mt2701-disp-pwm", .data = &mt2701_pwm_data}, { .compatible = "mediatek,mt6595-disp-pwm", .data = &mt8173_pwm_data}, { .compatible = "mediatek,mt8173-disp-pwm", .data = &mt8173_pwm_data}, + { .compatible = "mediatek,mt8183-disp-pwm", .data = &mt8183_pwm_data}, { } }; MODULE_DEVICE_TABLE(of, mtk_disp_pwm_of_match); diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c index a41812fc6f95..cfe7dd1b448e 100644 --- a/drivers/pwm/pwm-rcar.c +++ b/drivers/pwm/pwm-rcar.c @@ -8,6 +8,8 @@ #include #include #include +#include +#include #include #include #include @@ -68,19 +70,15 @@ static void rcar_pwm_update(struct rcar_pwm_chip *rp, u32 mask, u32 data, static int rcar_pwm_get_clock_division(struct rcar_pwm_chip *rp, int period_ns) { unsigned long clk_rate = clk_get_rate(rp->clk); - unsigned long long max; /* max cycle / nanoseconds */ - unsigned int div; + u64 div, tmp; if (clk_rate == 0) return -EINVAL; - for (div = 0; div <= RCAR_PWM_MAX_DIVISION; div++) { - max = (unsigned long long)NSEC_PER_SEC * RCAR_PWM_MAX_CYCLE * - (1 << div); - do_div(max, clk_rate); - if (period_ns <= max) - break; - } + div = (u64)NSEC_PER_SEC * RCAR_PWM_MAX_CYCLE; + tmp = (u64)period_ns * clk_rate + div - 1; + tmp = div64_u64(tmp, div); + div = ilog2(tmp - 1) + 1; return (div <= RCAR_PWM_MAX_DIVISION) ? div : -ERANGE; } @@ -139,39 +137,8 @@ static void rcar_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm) pm_runtime_put(chip->dev); } -static int rcar_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, - int duty_ns, int period_ns) +static int rcar_pwm_enable(struct rcar_pwm_chip *rp) { - struct rcar_pwm_chip *rp = to_rcar_pwm_chip(chip); - int div, ret; - - div = rcar_pwm_get_clock_division(rp, period_ns); - if (div < 0) - return div; - - /* - * Let the core driver set pwm->period if disabled and duty_ns == 0. - * But, this driver should prevent to set the new duty_ns if current - * duty_cycle is not set - */ - if (!pwm_is_enabled(pwm) && !duty_ns && !pwm->state.duty_cycle) - return 0; - - rcar_pwm_update(rp, RCAR_PWMCR_SYNC, RCAR_PWMCR_SYNC, RCAR_PWMCR); - - ret = rcar_pwm_set_counter(rp, div, duty_ns, period_ns); - if (!ret) - rcar_pwm_set_clock_control(rp, div); - - /* The SYNC should be set to 0 even if rcar_pwm_set_counter failed */ - rcar_pwm_update(rp, RCAR_PWMCR_SYNC, 0, RCAR_PWMCR); - - return ret; -} - -static int rcar_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) -{ - struct rcar_pwm_chip *rp = to_rcar_pwm_chip(chip); u32 value; /* Don't enable the PWM device if CYC0 or PH0 is 0 */ @@ -185,19 +152,51 @@ static int rcar_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) return 0; } -static void rcar_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) +static void rcar_pwm_disable(struct rcar_pwm_chip *rp) +{ + rcar_pwm_update(rp, RCAR_PWMCR_EN0, 0, RCAR_PWMCR); +} + +static int rcar_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state) { struct rcar_pwm_chip *rp = to_rcar_pwm_chip(chip); + struct pwm_state cur_state; + int div, ret; - rcar_pwm_update(rp, RCAR_PWMCR_EN0, 0, RCAR_PWMCR); + /* This HW/driver only supports normal polarity */ + pwm_get_state(pwm, &cur_state); + if (state->polarity != PWM_POLARITY_NORMAL) + return -ENOTSUPP; + + if (!state->enabled) { + rcar_pwm_disable(rp); + return 0; + } + + div = rcar_pwm_get_clock_division(rp, state->period); + if (div < 0) + return div; + + rcar_pwm_update(rp, RCAR_PWMCR_SYNC, RCAR_PWMCR_SYNC, RCAR_PWMCR); + + ret = rcar_pwm_set_counter(rp, div, state->duty_cycle, state->period); + if (!ret) + rcar_pwm_set_clock_control(rp, div); + + /* The SYNC should be set to 0 even if rcar_pwm_set_counter failed */ + rcar_pwm_update(rp, RCAR_PWMCR_SYNC, 0, RCAR_PWMCR); + + if (!ret && state->enabled) + ret = rcar_pwm_enable(rp); + + return ret; } static const struct pwm_ops rcar_pwm_ops = { .request = rcar_pwm_request, .free = rcar_pwm_free, - .config = rcar_pwm_config, - .enable = rcar_pwm_enable, - .disable = rcar_pwm_disable, + .apply = rcar_pwm_apply, .owner = THIS_MODULE, }; @@ -279,18 +278,16 @@ static int rcar_pwm_suspend(struct device *dev) static int rcar_pwm_resume(struct device *dev) { struct pwm_device *pwm = rcar_pwm_dev_to_pwm_dev(dev); + struct pwm_state state; if (!test_bit(PWMF_REQUESTED, &pwm->flags)) return 0; pm_runtime_get_sync(dev); - rcar_pwm_config(pwm->chip, pwm, pwm->state.duty_cycle, - pwm->state.period); - if (pwm_is_enabled(pwm)) - rcar_pwm_enable(pwm->chip, pwm); + pwm_get_state(pwm, &state); - return 0; + return rcar_pwm_apply(pwm->chip, pwm, &state); } #endif /* CONFIG_PM_SLEEP */ static SIMPLE_DEV_PM_OPS(rcar_pwm_pm_ops, rcar_pwm_suspend, rcar_pwm_resume); diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index cbe467ff1aba..1e1f42e210a0 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c @@ -2149,6 +2149,7 @@ static void mport_release_mapping(struct kref *ref) switch (map->dir) { case MAP_INBOUND: rio_unmap_inb_region(mport, map->phys_addr); + /* fall through */ case MAP_DMA: dma_free_coherent(mport->dev.parent, map->size, map->virt_addr, map->phys_addr); diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c index 73e4b407f162..ad5e303dda05 100644 --- a/drivers/rapidio/rio-sysfs.c +++ b/drivers/rapidio/rio-sysfs.c @@ -290,8 +290,7 @@ const struct attribute_group *rio_dev_groups[] = { NULL, }; -static ssize_t bus_scan_store(struct bus_type *bus, const char *buf, - size_t count) +static ssize_t scan_store(struct bus_type *bus, const char *buf, size_t count) { long val; int rc; @@ -314,7 +313,7 @@ exit: return rc; } -static BUS_ATTR(scan, (S_IWUSR|S_IWGRP), NULL, bus_scan_store); +static BUS_ATTR_WO(scan); static struct attribute *rio_bus_attrs[] = { &bus_attr_scan.attr, diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c index bad0e0ea4f30..cf45829585cb 100644 --- a/drivers/rapidio/rio_cm.c +++ b/drivers/rapidio/rio_cm.c @@ -1215,7 +1215,9 @@ static int riocm_ch_listen(u16 ch_id) riocm_debug(CHOP, "(ch_%d)", ch_id); ch = riocm_get_channel(ch_id); - if (!ch || !riocm_cmp_exch(ch, RIO_CM_CHAN_BOUND, RIO_CM_LISTEN)) + if (!ch) + return -EINVAL; + if (!riocm_cmp_exch(ch, RIO_CM_CHAN_BOUND, RIO_CM_LISTEN)) ret = -EINVAL; riocm_put_channel(ch); return ret; diff --git a/drivers/ras/ras.c b/drivers/ras/ras.c index 3f38907320dc..95540ea8dd9d 100644 --- a/drivers/ras/ras.c +++ b/drivers/ras/ras.c @@ -14,7 +14,7 @@ #define TRACE_INCLUDE_PATH ../../include/ras #include -void log_non_standard_event(const uuid_le *sec_type, const uuid_le *fru_id, +void log_non_standard_event(const guid_t *sec_type, const guid_t *fru_id, const char *fru_text, const u8 sev, const u8 *err, const u32 len) { diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c index 28f55248eb90..753a6a1b30c3 100644 --- a/drivers/regulator/88pm8607.c +++ b/drivers/regulator/88pm8607.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include @@ -22,12 +21,7 @@ struct pm8607_regulator_info { struct regulator_desc desc; - struct pm860x_chip *chip; - struct regulator_dev *regulator; - struct i2c_client *i2c; - struct i2c_client *i2c_8606; - unsigned int *vol_table; unsigned int *vol_suspend; int slope_double; @@ -210,13 +204,15 @@ static const unsigned int LDO14_suspend_table[] = { static int pm8607_list_voltage(struct regulator_dev *rdev, unsigned index) { struct pm8607_regulator_info *info = rdev_get_drvdata(rdev); - int ret = -EINVAL; + int ret; + + ret = regulator_list_voltage_table(rdev, index); + if (ret < 0) + return ret; + + if (info->slope_double) + ret <<= 1; - if (info->vol_table && (index < rdev->desc->n_voltages)) { - ret = info->vol_table[index]; - if (info->slope_double) - ret <<= 1; - } return ret; } @@ -257,6 +253,7 @@ static const struct regulator_ops pm8606_preg_ops = { .type = REGULATOR_VOLTAGE, \ .id = PM8607_ID_##vreg, \ .owner = THIS_MODULE, \ + .volt_table = vreg##_table, \ .n_voltages = ARRAY_SIZE(vreg##_table), \ .vsel_reg = PM8607_##vreg, \ .vsel_mask = ARRAY_SIZE(vreg##_table) - 1, \ @@ -266,7 +263,6 @@ static const struct regulator_ops pm8606_preg_ops = { .enable_mask = 1 << (ebit), \ }, \ .slope_double = (0), \ - .vol_table = (unsigned int *)&vreg##_table, \ .vol_suspend = (unsigned int *)&vreg##_suspend_table, \ } @@ -278,6 +274,7 @@ static const struct regulator_ops pm8606_preg_ops = { .type = REGULATOR_VOLTAGE, \ .id = PM8607_ID_LDO##_id, \ .owner = THIS_MODULE, \ + .volt_table = LDO##_id##_table, \ .n_voltages = ARRAY_SIZE(LDO##_id##_table), \ .vsel_reg = PM8607_##vreg, \ .vsel_mask = (ARRAY_SIZE(LDO##_id##_table) - 1) << (shift), \ @@ -285,7 +282,6 @@ static const struct regulator_ops pm8606_preg_ops = { .enable_mask = 1 << (ebit), \ }, \ .slope_double = (0), \ - .vol_table = (unsigned int *)&LDO##_id##_table, \ .vol_suspend = (unsigned int *)&LDO##_id##_suspend_table, \ } @@ -349,6 +345,7 @@ static int pm8607_regulator_probe(struct platform_device *pdev) struct pm8607_regulator_info *info = NULL; struct regulator_init_data *pdata = dev_get_platdata(&pdev->dev); struct regulator_config config = { }; + struct regulator_dev *rdev; struct resource *res; int i; @@ -371,13 +368,9 @@ static int pm8607_regulator_probe(struct platform_device *pdev) /* i is used to check regulator ID */ i = -1; } - info->i2c = (chip->id == CHIP_PM8607) ? chip->client : chip->companion; - info->i2c_8606 = (chip->id == CHIP_PM8607) ? chip->companion : - chip->client; - info->chip = chip; /* check DVC ramp slope double */ - if ((i == PM8607_ID_BUCK3) && info->chip->buck3_double) + if ((i == PM8607_ID_BUCK3) && chip->buck3_double) info->slope_double = 1; config.dev = &pdev->dev; @@ -392,12 +385,11 @@ static int pm8607_regulator_probe(struct platform_device *pdev) else config.regmap = chip->regmap_companion; - info->regulator = devm_regulator_register(&pdev->dev, &info->desc, - &config); - if (IS_ERR(info->regulator)) { + rdev = devm_regulator_register(&pdev->dev, &info->desc, &config); + if (IS_ERR(rdev)) { dev_err(&pdev->dev, "failed to register regulator %s\n", info->desc.name); - return PTR_ERR(info->regulator); + return PTR_ERR(rdev); } platform_set_drvdata(pdev, info); diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index ee60a222f5eb..b7f249ee5e68 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig @@ -180,6 +180,17 @@ config REGULATOR_BCM590XX BCM590xx PMUs. This will enable support for the software controllable LDO/Switching regulators. +config REGULATOR_BD70528 + tristate "ROHM BD70528 Power Regulator" + depends on MFD_ROHM_BD70528 + help + This driver supports voltage regulators on ROHM BD70528 PMIC. + This will enable support for the software controllable buck + and LDO regulators. + + This driver can also be built as a module. If so, the module + will be called bd70528-regulator. + config REGULATOR_BD718XX tristate "ROHM BD71837 Power Regulator" depends on MFD_ROHM_BD718XX @@ -457,6 +468,14 @@ config REGULATOR_MAX77620 chip to control Step-Down DC-DC and LDOs. Say Y here to enable the regulator driver. +config REGULATOR_MAX77650 + tristate "Maxim MAX77650/77651 regulator support" + depends on MFD_MAX77650 + help + Regulator driver for MAX77650/77651 PMIC from Maxim + Semiconductor. This device has a SIMO with three independent + power rails and an LDO. + config REGULATOR_MAX8649 tristate "Maxim 8649 voltage regulator" depends on I2C @@ -484,7 +503,7 @@ config REGULATOR_MAX8925 tristate "Maxim MAX8925 Power Management IC" depends on MFD_MAX8925 help - Say y here to support the voltage regulaltor of Maxim MAX8925 PMIC. + Say y here to support the voltage regulator of Maxim MAX8925 PMIC. config REGULATOR_MAX8952 tristate "Maxim MAX8952 Power Management IC" @@ -501,7 +520,7 @@ config REGULATOR_MAX8973 select REGMAP_I2C help The MAXIM MAX8973 high-efficiency. three phase, DC-DC step-down - switching regulator delievers up to 9A of output current. Each + switching regulator delivers up to 9A of output current. Each phase operates at a 2MHz fixed frequency with a 120 deg shift from the adjacent phase, allowing the use of small magnetic component. @@ -646,7 +665,7 @@ config REGULATOR_PCF50633 tristate "NXP PCF50633 regulator driver" depends on MFD_PCF50633 help - Say Y here to support the voltage regulators and convertors + Say Y here to support the voltage regulators and converters on PCF50633 config REGULATOR_PFUZE100 @@ -924,7 +943,7 @@ config REGULATOR_TPS65132 select REGMAP_I2C help This driver supports TPS65132 single inductor - dual output - power supply specifcally designed for display panels. + power supply specifically designed for display panels. config REGULATOR_TPS65217 tristate "TI TPS65217 Power regulators" diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile index b12e1c9b2118..1169f8a27d91 100644 --- a/drivers/regulator/Makefile +++ b/drivers/regulator/Makefile @@ -27,6 +27,7 @@ obj-$(CONFIG_REGULATOR_AS3711) += as3711-regulator.o obj-$(CONFIG_REGULATOR_AS3722) += as3722-regulator.o obj-$(CONFIG_REGULATOR_AXP20X) += axp20x-regulator.o obj-$(CONFIG_REGULATOR_BCM590XX) += bcm590xx-regulator.o +obj-$(CONFIG_REGULATOR_BD70528) += bd70528-regulator.o obj-$(CONFIG_REGULATOR_BD718XX) += bd718x7-regulator.o obj-$(CONFIG_REGULATOR_BD9571MWV) += bd9571mwv-regulator.o obj-$(CONFIG_REGULATOR_DA903X) += da903x.o @@ -60,6 +61,7 @@ obj-$(CONFIG_REGULATOR_LTC3676) += ltc3676.o obj-$(CONFIG_REGULATOR_MAX14577) += max14577-regulator.o obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o obj-$(CONFIG_REGULATOR_MAX77620) += max77620-regulator.o +obj-$(CONFIG_REGULATOR_MAX77650) += max77650-regulator.o obj-$(CONFIG_REGULATOR_MAX8649) += max8649.o obj-$(CONFIG_REGULATOR_MAX8660) += max8660.o obj-$(CONFIG_REGULATOR_MAX8907) += max8907-regulator.o diff --git a/drivers/regulator/act8865-regulator.c b/drivers/regulator/act8865-regulator.c index 21e20483bd91..e0239cf3f56d 100644 --- a/drivers/regulator/act8865-regulator.c +++ b/drivers/regulator/act8865-regulator.c @@ -131,7 +131,7 @@ * ACT8865 voltage number */ #define ACT8865_VOLTAGE_NUM 64 -#define ACT8600_SUDCDC_VOLTAGE_NUM 255 +#define ACT8600_SUDCDC_VOLTAGE_NUM 256 struct act8865 { struct regmap *regmap; @@ -222,7 +222,8 @@ static const struct regulator_linear_range act8600_sudcdc_voltage_ranges[] = { REGULATOR_LINEAR_RANGE(3000000, 0, 63, 0), REGULATOR_LINEAR_RANGE(3000000, 64, 159, 100000), REGULATOR_LINEAR_RANGE(12600000, 160, 191, 200000), - REGULATOR_LINEAR_RANGE(19000000, 191, 255, 400000), + REGULATOR_LINEAR_RANGE(19000000, 192, 247, 400000), + REGULATOR_LINEAR_RANGE(41400000, 248, 255, 0), }; static struct regulator_ops act8865_ops = { diff --git a/drivers/regulator/act8945a-regulator.c b/drivers/regulator/act8945a-regulator.c index 603db77723b6..caa61d306a69 100644 --- a/drivers/regulator/act8945a-regulator.c +++ b/drivers/regulator/act8945a-regulator.c @@ -87,7 +87,8 @@ static const struct regulator_linear_range act8945a_voltage_ranges[] = { static int act8945a_set_suspend_state(struct regulator_dev *rdev, bool enable) { struct regmap *regmap = rdev->regmap; - int id = rdev->desc->id, reg, val; + int id = rdev_get_id(rdev); + int reg, val; switch (id) { case ACT8945A_ID_DCDC1: @@ -159,7 +160,7 @@ static int act8945a_set_mode(struct regulator_dev *rdev, unsigned int mode) { struct act8945a_pmic *act8945a = rdev_get_drvdata(rdev); struct regmap *regmap = rdev->regmap; - int id = rdev->desc->id; + int id = rdev_get_id(rdev); int reg, ret, val = 0; switch (id) { @@ -190,11 +191,11 @@ static int act8945a_set_mode(struct regulator_dev *rdev, unsigned int mode) switch (mode) { case REGULATOR_MODE_STANDBY: - if (rdev->desc->id > ACT8945A_ID_DCDC3) + if (id > ACT8945A_ID_DCDC3) val = BIT(5); break; case REGULATOR_MODE_NORMAL: - if (rdev->desc->id <= ACT8945A_ID_DCDC3) + if (id <= ACT8945A_ID_DCDC3) val = BIT(5); break; default: @@ -213,7 +214,7 @@ static int act8945a_set_mode(struct regulator_dev *rdev, unsigned int mode) static unsigned int act8945a_get_mode(struct regulator_dev *rdev) { struct act8945a_pmic *act8945a = rdev_get_drvdata(rdev); - int id = rdev->desc->id; + int id = rdev_get_id(rdev); if (id < ACT8945A_ID_DCDC1 || id >= ACT8945A_ID_MAX) return -EINVAL; diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c index b9a93049e41e..bf3ab405eed1 100644 --- a/drivers/regulator/arizona-ldo1.c +++ b/drivers/regulator/arizona-ldo1.c @@ -40,35 +40,10 @@ struct arizona_ldo1 { struct gpio_desc *ena_gpiod; }; -static int arizona_ldo1_hc_list_voltage(struct regulator_dev *rdev, - unsigned int selector) -{ - if (selector >= rdev->desc->n_voltages) - return -EINVAL; - - if (selector == rdev->desc->n_voltages - 1) - return 1800000; - else - return rdev->desc->min_uV + (rdev->desc->uV_step * selector); -} - -static int arizona_ldo1_hc_map_voltage(struct regulator_dev *rdev, - int min_uV, int max_uV) -{ - int sel; - - sel = DIV_ROUND_UP(min_uV - rdev->desc->min_uV, rdev->desc->uV_step); - if (sel >= rdev->desc->n_voltages) - sel = rdev->desc->n_voltages - 1; - - return sel; -} - static int arizona_ldo1_hc_set_voltage_sel(struct regulator_dev *rdev, unsigned sel) { - struct arizona_ldo1 *ldo = rdev_get_drvdata(rdev); - struct regmap *regmap = ldo->regmap; + struct regmap *regmap = rdev_get_regmap(rdev); unsigned int val; int ret; @@ -85,16 +60,12 @@ static int arizona_ldo1_hc_set_voltage_sel(struct regulator_dev *rdev, if (val) return 0; - val = sel << ARIZONA_LDO1_VSEL_SHIFT; - - return regmap_update_bits(regmap, ARIZONA_LDO1_CONTROL_1, - ARIZONA_LDO1_VSEL_MASK, val); + return regulator_set_voltage_sel_regmap(rdev, sel); } static int arizona_ldo1_hc_get_voltage_sel(struct regulator_dev *rdev) { - struct arizona_ldo1 *ldo = rdev_get_drvdata(rdev); - struct regmap *regmap = ldo->regmap; + struct regmap *regmap = rdev_get_regmap(rdev); unsigned int val; int ret; @@ -105,32 +76,35 @@ static int arizona_ldo1_hc_get_voltage_sel(struct regulator_dev *rdev) if (val & ARIZONA_LDO1_HI_PWR) return rdev->desc->n_voltages - 1; - ret = regmap_read(regmap, ARIZONA_LDO1_CONTROL_1, &val); - if (ret != 0) - return ret; - - return (val & ARIZONA_LDO1_VSEL_MASK) >> ARIZONA_LDO1_VSEL_SHIFT; + return regulator_get_voltage_sel_regmap(rdev); } static const struct regulator_ops arizona_ldo1_hc_ops = { - .list_voltage = arizona_ldo1_hc_list_voltage, - .map_voltage = arizona_ldo1_hc_map_voltage, + .list_voltage = regulator_list_voltage_linear_range, + .map_voltage = regulator_map_voltage_linear_range, .get_voltage_sel = arizona_ldo1_hc_get_voltage_sel, .set_voltage_sel = arizona_ldo1_hc_set_voltage_sel, .get_bypass = regulator_get_bypass_regmap, .set_bypass = regulator_set_bypass_regmap, }; +static const struct regulator_linear_range arizona_ldo1_hc_ranges[] = { + REGULATOR_LINEAR_RANGE(900000, 0, 0x6, 50000), + REGULATOR_LINEAR_RANGE(1800000, 0x7, 0x7, 0), +}; + static const struct regulator_desc arizona_ldo1_hc = { .name = "LDO1", .supply_name = "LDOVDD", .type = REGULATOR_VOLTAGE, .ops = &arizona_ldo1_hc_ops, + .vsel_reg = ARIZONA_LDO1_CONTROL_1, + .vsel_mask = ARIZONA_LDO1_VSEL_MASK, .bypass_reg = ARIZONA_LDO1_CONTROL_1, .bypass_mask = ARIZONA_LDO1_BYPASS, - .min_uV = 900000, - .uV_step = 50000, + .linear_ranges = arizona_ldo1_hc_ranges, + .n_linear_ranges = ARRAY_SIZE(arizona_ldo1_hc_ranges), .n_voltages = 8, .enable_time = 1500, .ramp_delay = 24000, diff --git a/drivers/regulator/as3722-regulator.c b/drivers/regulator/as3722-regulator.c index 66337e12719b..e5fed289b52d 100644 --- a/drivers/regulator/as3722-regulator.c +++ b/drivers/regulator/as3722-regulator.c @@ -886,7 +886,7 @@ static int as3722_regulator_probe(struct platform_device *pdev) as3722_regs->desc[id].min_uV = 410000; } else { as3722_regs->desc[id].n_voltages = - AS3722_SD0_VSEL_MAX + 1, + AS3722_SD0_VSEL_MAX + 1; as3722_regs->desc[id].min_uV = 610000; } as3722_regs->desc[id].uV_step = 10000; diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c index 48af859fd053..fba8f58ab769 100644 --- a/drivers/regulator/axp20x-regulator.c +++ b/drivers/regulator/axp20x-regulator.c @@ -367,13 +367,12 @@ static const int axp209_dcdc2_ldo3_slew_rates[] = { static int axp20x_set_ramp_delay(struct regulator_dev *rdev, int ramp) { struct axp20x_dev *axp20x = rdev_get_drvdata(rdev); - const struct regulator_desc *desc = rdev->desc; + const struct regulator_desc *desc; u8 reg, mask, enable, cfg = 0xff; const int *slew_rates; int rate_count = 0; - if (!rdev) - return -EINVAL; + desc = rdev->desc; switch (axp20x->variant) { case AXP209_ID: @@ -436,11 +435,13 @@ static int axp20x_set_ramp_delay(struct regulator_dev *rdev, int ramp) static int axp20x_regulator_enable_regmap(struct regulator_dev *rdev) { struct axp20x_dev *axp20x = rdev_get_drvdata(rdev); - const struct regulator_desc *desc = rdev->desc; + const struct regulator_desc *desc; if (!rdev) return -EINVAL; + desc = rdev->desc; + switch (axp20x->variant) { case AXP209_ID: if ((desc->id == AXP20X_LDO3) && @@ -573,7 +574,7 @@ static const struct regulator_desc axp22x_regulators[] = { AXP22X_DCDC3_V_OUT, AXP22X_DCDC3_V_OUT_MASK, AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_DCDC3_MASK), AXP_DESC(AXP22X, DCDC4, "dcdc4", "vin4", 600, 1540, 20, - AXP22X_DCDC4_V_OUT, AXP22X_DCDC4_V_OUT, + AXP22X_DCDC4_V_OUT, AXP22X_DCDC4_V_OUT_MASK, AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_DCDC4_MASK), AXP_DESC(AXP22X, DCDC5, "dcdc5", "vin5", 1000, 2550, 50, AXP22X_DCDC5_V_OUT, AXP22X_DCDC5_V_OUT_MASK, @@ -719,7 +720,7 @@ static const struct regulator_desc axp803_regulators[] = { AXP22X_ALDO1_V_OUT, AXP22X_ALDO1_V_OUT_MASK, AXP22X_PWR_OUT_CTRL3, AXP806_PWR_OUT_ALDO1_MASK), AXP_DESC(AXP803, ALDO2, "aldo2", "aldoin", 700, 3300, 100, - AXP22X_ALDO2_V_OUT, AXP22X_ALDO2_V_OUT, + AXP22X_ALDO2_V_OUT, AXP22X_ALDO2_V_OUT_MASK, AXP22X_PWR_OUT_CTRL3, AXP806_PWR_OUT_ALDO2_MASK), AXP_DESC(AXP803, ALDO3, "aldo3", "aldoin", 700, 3300, 100, AXP22X_ALDO3_V_OUT, AXP22X_ALDO3_V_OUT_MASK, @@ -729,7 +730,7 @@ static const struct regulator_desc axp803_regulators[] = { AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO1_MASK), AXP_DESC_RANGES(AXP803, DLDO2, "dldo2", "dldoin", axp803_dldo2_ranges, AXP803_DLDO2_NUM_VOLTAGES, - AXP22X_DLDO2_V_OUT, AXP22X_DLDO2_V_OUT, + AXP22X_DLDO2_V_OUT, AXP22X_DLDO2_V_OUT_MASK, AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO2_MASK), AXP_DESC(AXP803, DLDO3, "dldo3", "dldoin", 700, 3300, 100, AXP22X_DLDO3_V_OUT, AXP22X_DLDO3_V_OUT_MASK, @@ -744,7 +745,7 @@ static const struct regulator_desc axp803_regulators[] = { AXP22X_ELDO2_V_OUT, AXP22X_ELDO2_V_OUT_MASK, AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO2_MASK), AXP_DESC(AXP803, ELDO3, "eldo3", "eldoin", 700, 1900, 50, - AXP22X_ELDO3_V_OUT, AXP22X_ELDO3_V_OUT, + AXP22X_ELDO3_V_OUT, AXP22X_ELDO3_V_OUT_MASK, AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO3_MASK), AXP_DESC(AXP803, FLDO1, "fldo1", "fldoin", 700, 1450, 50, AXP803_FLDO1_V_OUT, AXP803_FLDO1_V_OUT_MASK, @@ -791,7 +792,7 @@ static const struct regulator_desc axp806_regulators[] = { AXP806_DCDCA_V_CTRL, AXP806_DCDCA_V_CTRL_MASK, AXP806_PWR_OUT_CTRL1, AXP806_PWR_OUT_DCDCA_MASK), AXP_DESC(AXP806, DCDCB, "dcdcb", "vinb", 1000, 2550, 50, - AXP806_DCDCB_V_CTRL, AXP806_DCDCB_V_CTRL, + AXP806_DCDCB_V_CTRL, AXP806_DCDCB_V_CTRL_MASK, AXP806_PWR_OUT_CTRL1, AXP806_PWR_OUT_DCDCB_MASK), AXP_DESC_RANGES(AXP806, DCDCC, "dcdcc", "vinc", axp806_dcdca_ranges, AXP806_DCDCA_NUM_VOLTAGES, @@ -817,7 +818,7 @@ static const struct regulator_desc axp806_regulators[] = { AXP806_BLDO1_V_CTRL, AXP806_BLDO1_V_CTRL_MASK, AXP806_PWR_OUT_CTRL2, AXP806_PWR_OUT_BLDO1_MASK), AXP_DESC(AXP806, BLDO2, "bldo2", "bldoin", 700, 1900, 100, - AXP806_BLDO2_V_CTRL, AXP806_BLDO2_V_CTRL, + AXP806_BLDO2_V_CTRL, AXP806_BLDO2_V_CTRL_MASK, AXP806_PWR_OUT_CTRL2, AXP806_PWR_OUT_BLDO2_MASK), AXP_DESC(AXP806, BLDO3, "bldo3", "bldoin", 700, 1900, 100, AXP806_BLDO3_V_CTRL, AXP806_BLDO3_V_CTRL_MASK, @@ -952,7 +953,7 @@ static const struct regulator_desc axp813_regulators[] = { AXP22X_ALDO1_V_OUT, AXP22X_ALDO1_V_OUT_MASK, AXP22X_PWR_OUT_CTRL3, AXP806_PWR_OUT_ALDO1_MASK), AXP_DESC(AXP813, ALDO2, "aldo2", "aldoin", 700, 3300, 100, - AXP22X_ALDO2_V_OUT, AXP22X_ALDO2_V_OUT, + AXP22X_ALDO2_V_OUT, AXP22X_ALDO2_V_OUT_MASK, AXP22X_PWR_OUT_CTRL3, AXP806_PWR_OUT_ALDO2_MASK), AXP_DESC(AXP813, ALDO3, "aldo3", "aldoin", 700, 3300, 100, AXP22X_ALDO3_V_OUT, AXP22X_ALDO3_V_OUT_MASK, @@ -962,7 +963,7 @@ static const struct regulator_desc axp813_regulators[] = { AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO1_MASK), AXP_DESC_RANGES(AXP813, DLDO2, "dldo2", "dldoin", axp803_dldo2_ranges, AXP803_DLDO2_NUM_VOLTAGES, - AXP22X_DLDO2_V_OUT, AXP22X_DLDO2_V_OUT, + AXP22X_DLDO2_V_OUT, AXP22X_DLDO2_V_OUT_MASK, AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO2_MASK), AXP_DESC(AXP813, DLDO3, "dldo3", "dldoin", 700, 3300, 100, AXP22X_DLDO3_V_OUT, AXP22X_DLDO3_V_OUT_MASK, @@ -977,7 +978,7 @@ static const struct regulator_desc axp813_regulators[] = { AXP22X_ELDO2_V_OUT, AXP22X_ELDO2_V_OUT_MASK, AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO2_MASK), AXP_DESC(AXP813, ELDO3, "eldo3", "eldoin", 700, 1900, 50, - AXP22X_ELDO3_V_OUT, AXP22X_ELDO3_V_OUT, + AXP22X_ELDO3_V_OUT, AXP22X_ELDO3_V_OUT_MASK, AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO3_MASK), /* to do / check ... */ AXP_DESC(AXP813, FLDO1, "fldo1", "fldoin", 700, 1450, 50, diff --git a/drivers/regulator/bcm590xx-regulator.c b/drivers/regulator/bcm590xx-regulator.c index 92d6d7b10cf7..e49c0a7d5dd5 100644 --- a/drivers/regulator/bcm590xx-regulator.c +++ b/drivers/regulator/bcm590xx-regulator.c @@ -242,8 +242,12 @@ static int bcm590xx_get_enable_register(int id) case BCM590XX_REG_SDSR2: reg = BCM590XX_SDSR2PMCTRL1; break; + case BCM590XX_REG_VSR: + reg = BCM590XX_VSRPMCTRL1; + break; case BCM590XX_REG_VBUS: reg = BCM590XX_OTG_CTRL; + break; } diff --git a/drivers/regulator/bd70528-regulator.c b/drivers/regulator/bd70528-regulator.c new file mode 100644 index 000000000000..30e3ed430a8a --- /dev/null +++ b/drivers/regulator/bd70528-regulator.c @@ -0,0 +1,289 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018 ROHM Semiconductors +// bd70528-regulator.c ROHM BD70528MWV regulator driver + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define BUCK_RAMPRATE_250MV 0 +#define BUCK_RAMPRATE_125MV 1 +#define BUCK_RAMP_MAX 250 + +static const struct regulator_linear_range bd70528_buck1_volts[] = { + REGULATOR_LINEAR_RANGE(1200000, 0x00, 0x1, 600000), + REGULATOR_LINEAR_RANGE(2750000, 0x2, 0xf, 50000), +}; +static const struct regulator_linear_range bd70528_buck2_volts[] = { + REGULATOR_LINEAR_RANGE(1200000, 0x00, 0x1, 300000), + REGULATOR_LINEAR_RANGE(1550000, 0x2, 0xd, 50000), + REGULATOR_LINEAR_RANGE(3000000, 0xe, 0xf, 300000), +}; +static const struct regulator_linear_range bd70528_buck3_volts[] = { + REGULATOR_LINEAR_RANGE(800000, 0x00, 0xd, 50000), + REGULATOR_LINEAR_RANGE(1800000, 0xe, 0xf, 0), +}; + +/* All LDOs have same voltage ranges */ +static const struct regulator_linear_range bd70528_ldo_volts[] = { + REGULATOR_LINEAR_RANGE(1650000, 0x0, 0x07, 50000), + REGULATOR_LINEAR_RANGE(2100000, 0x8, 0x0f, 100000), + REGULATOR_LINEAR_RANGE(2850000, 0x10, 0x19, 50000), + REGULATOR_LINEAR_RANGE(3300000, 0x19, 0x1f, 0), +}; + +/* Also both LEDs support same voltages */ +static const unsigned int led_volts[] = { + 20000, 30000 +}; + +static int bd70528_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay) +{ + if (ramp_delay > 0 && ramp_delay <= BUCK_RAMP_MAX) { + unsigned int ramp_value = BUCK_RAMPRATE_250MV; + + if (ramp_delay <= 125) + ramp_value = BUCK_RAMPRATE_125MV; + + return regmap_update_bits(rdev->regmap, rdev->desc->vsel_reg, + BD70528_MASK_BUCK_RAMP, + ramp_value << BD70528_SIFT_BUCK_RAMP); + } + dev_err(&rdev->dev, "%s: ramp_delay: %d not supported\n", + rdev->desc->name, ramp_delay); + return -EINVAL; +} + +static int bd70528_led_set_voltage_sel(struct regulator_dev *rdev, + unsigned int sel) +{ + int ret; + + ret = regulator_is_enabled_regmap(rdev); + if (ret < 0) + return ret; + + if (ret == 0) + return regulator_set_voltage_sel_regmap(rdev, sel); + + dev_err(&rdev->dev, + "LED voltage change not allowed when led is enabled\n"); + + return -EBUSY; +} + +static const struct regulator_ops bd70528_buck_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_linear_range, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_time_sel = regulator_set_voltage_time_sel, + .set_ramp_delay = bd70528_set_ramp_delay, +}; + +static const struct regulator_ops bd70528_ldo_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_linear_range, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_time_sel = regulator_set_voltage_time_sel, + .set_ramp_delay = bd70528_set_ramp_delay, +}; + +static const struct regulator_ops bd70528_led_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_table, + .set_voltage_sel = bd70528_led_set_voltage_sel, + .get_voltage_sel = regulator_get_voltage_sel_regmap, +}; + +static const struct regulator_desc bd70528_desc[] = { + { + .name = "buck1", + .of_match = of_match_ptr("BUCK1"), + .regulators_node = of_match_ptr("regulators"), + .id = BD70528_BUCK1, + .ops = &bd70528_buck_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd70528_buck1_volts, + .n_linear_ranges = ARRAY_SIZE(bd70528_buck1_volts), + .n_voltages = BD70528_BUCK_VOLTS, + .enable_reg = BD70528_REG_BUCK1_EN, + .enable_mask = BD70528_MASK_RUN_EN, + .vsel_reg = BD70528_REG_BUCK1_VOLT, + .vsel_mask = BD70528_MASK_BUCK_VOLT, + .owner = THIS_MODULE, + }, + { + .name = "buck2", + .of_match = of_match_ptr("BUCK2"), + .regulators_node = of_match_ptr("regulators"), + .id = BD70528_BUCK2, + .ops = &bd70528_buck_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd70528_buck2_volts, + .n_linear_ranges = ARRAY_SIZE(bd70528_buck2_volts), + .n_voltages = BD70528_BUCK_VOLTS, + .enable_reg = BD70528_REG_BUCK2_EN, + .enable_mask = BD70528_MASK_RUN_EN, + .vsel_reg = BD70528_REG_BUCK2_VOLT, + .vsel_mask = BD70528_MASK_BUCK_VOLT, + .owner = THIS_MODULE, + }, + { + .name = "buck3", + .of_match = of_match_ptr("BUCK3"), + .regulators_node = of_match_ptr("regulators"), + .id = BD70528_BUCK3, + .ops = &bd70528_buck_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd70528_buck3_volts, + .n_linear_ranges = ARRAY_SIZE(bd70528_buck3_volts), + .n_voltages = BD70528_BUCK_VOLTS, + .enable_reg = BD70528_REG_BUCK3_EN, + .enable_mask = BD70528_MASK_RUN_EN, + .vsel_reg = BD70528_REG_BUCK3_VOLT, + .vsel_mask = BD70528_MASK_BUCK_VOLT, + .owner = THIS_MODULE, + }, + { + .name = "ldo1", + .of_match = of_match_ptr("LDO1"), + .regulators_node = of_match_ptr("regulators"), + .id = BD70528_LDO1, + .ops = &bd70528_ldo_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd70528_ldo_volts, + .n_linear_ranges = ARRAY_SIZE(bd70528_ldo_volts), + .n_voltages = BD70528_LDO_VOLTS, + .enable_reg = BD70528_REG_LDO1_EN, + .enable_mask = BD70528_MASK_RUN_EN, + .vsel_reg = BD70528_REG_LDO1_VOLT, + .vsel_mask = BD70528_MASK_LDO_VOLT, + .owner = THIS_MODULE, + }, + { + .name = "ldo2", + .of_match = of_match_ptr("LDO2"), + .regulators_node = of_match_ptr("regulators"), + .id = BD70528_LDO2, + .ops = &bd70528_ldo_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd70528_ldo_volts, + .n_linear_ranges = ARRAY_SIZE(bd70528_ldo_volts), + .n_voltages = BD70528_LDO_VOLTS, + .enable_reg = BD70528_REG_LDO2_EN, + .enable_mask = BD70528_MASK_RUN_EN, + .vsel_reg = BD70528_REG_LDO2_VOLT, + .vsel_mask = BD70528_MASK_LDO_VOLT, + .owner = THIS_MODULE, + }, + { + .name = "ldo3", + .of_match = of_match_ptr("LDO3"), + .regulators_node = of_match_ptr("regulators"), + .id = BD70528_LDO3, + .ops = &bd70528_ldo_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd70528_ldo_volts, + .n_linear_ranges = ARRAY_SIZE(bd70528_ldo_volts), + .n_voltages = BD70528_LDO_VOLTS, + .enable_reg = BD70528_REG_LDO3_EN, + .enable_mask = BD70528_MASK_RUN_EN, + .vsel_reg = BD70528_REG_LDO3_VOLT, + .vsel_mask = BD70528_MASK_LDO_VOLT, + .owner = THIS_MODULE, + }, + { + .name = "ldo_led1", + .of_match = of_match_ptr("LDO_LED1"), + .regulators_node = of_match_ptr("regulators"), + .id = BD70528_LED1, + .ops = &bd70528_led_ops, + .type = REGULATOR_VOLTAGE, + .volt_table = &led_volts[0], + .n_voltages = ARRAY_SIZE(led_volts), + .enable_reg = BD70528_REG_LED_EN, + .enable_mask = BD70528_MASK_LED1_EN, + .vsel_reg = BD70528_REG_LED_VOLT, + .vsel_mask = BD70528_MASK_LED1_VOLT, + .owner = THIS_MODULE, + }, + { + .name = "ldo_led2", + .of_match = of_match_ptr("LDO_LED2"), + .regulators_node = of_match_ptr("regulators"), + .id = BD70528_LED2, + .ops = &bd70528_led_ops, + .type = REGULATOR_VOLTAGE, + .volt_table = &led_volts[0], + .n_voltages = ARRAY_SIZE(led_volts), + .enable_reg = BD70528_REG_LED_EN, + .enable_mask = BD70528_MASK_LED2_EN, + .vsel_reg = BD70528_REG_LED_VOLT, + .vsel_mask = BD70528_MASK_LED2_VOLT, + .owner = THIS_MODULE, + }, + +}; + +static int bd70528_probe(struct platform_device *pdev) +{ + struct rohm_regmap_dev *bd70528; + int i; + struct regulator_config config = { + .dev = pdev->dev.parent, + }; + + bd70528 = dev_get_drvdata(pdev->dev.parent); + if (!bd70528) { + dev_err(&pdev->dev, "No MFD driver data\n"); + return -EINVAL; + } + + config.regmap = bd70528->regmap; + + for (i = 0; i < ARRAY_SIZE(bd70528_desc); i++) { + struct regulator_dev *rdev; + + rdev = devm_regulator_register(&pdev->dev, &bd70528_desc[i], + &config); + if (IS_ERR(rdev)) { + dev_err(&pdev->dev, + "failed to register %s regulator\n", + bd70528_desc[i].name); + return PTR_ERR(rdev); + } + } + return 0; +} + +static struct platform_driver bd70528_regulator = { + .driver = { + .name = "bd70528-pmic" + }, + .probe = bd70528_probe, +}; + +module_platform_driver(bd70528_regulator); + +MODULE_AUTHOR("Matti Vaittinen "); +MODULE_DESCRIPTION("BD70528 voltage regulator driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c index b8dcdc21dc22..b2191be49670 100644 --- a/drivers/regulator/bd718x7-regulator.c +++ b/drivers/regulator/bd718x7-regulator.c @@ -79,7 +79,7 @@ static int bd718xx_set_voltage_sel_pickable_restricted( return regulator_set_voltage_sel_pickable_regmap(rdev, sel); } -static struct regulator_ops bd718xx_pickable_range_ldo_ops = { +static const struct regulator_ops bd718xx_pickable_range_ldo_ops = { .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .is_enabled = regulator_is_enabled_regmap, @@ -88,7 +88,7 @@ static struct regulator_ops bd718xx_pickable_range_ldo_ops = { .get_voltage_sel = regulator_get_voltage_sel_pickable_regmap, }; -static struct regulator_ops bd718xx_pickable_range_buck_ops = { +static const struct regulator_ops bd718xx_pickable_range_buck_ops = { .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .is_enabled = regulator_is_enabled_regmap, @@ -98,7 +98,7 @@ static struct regulator_ops bd718xx_pickable_range_buck_ops = { .set_voltage_time_sel = regulator_set_voltage_time_sel, }; -static struct regulator_ops bd718xx_ldo_regulator_ops = { +static const struct regulator_ops bd718xx_ldo_regulator_ops = { .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .is_enabled = regulator_is_enabled_regmap, @@ -107,7 +107,7 @@ static struct regulator_ops bd718xx_ldo_regulator_ops = { .get_voltage_sel = regulator_get_voltage_sel_regmap, }; -static struct regulator_ops bd718xx_ldo_regulator_nolinear_ops = { +static const struct regulator_ops bd718xx_ldo_regulator_nolinear_ops = { .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .is_enabled = regulator_is_enabled_regmap, @@ -116,7 +116,7 @@ static struct regulator_ops bd718xx_ldo_regulator_nolinear_ops = { .get_voltage_sel = regulator_get_voltage_sel_regmap, }; -static struct regulator_ops bd718xx_buck_regulator_ops = { +static const struct regulator_ops bd718xx_buck_regulator_ops = { .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .is_enabled = regulator_is_enabled_regmap, @@ -126,7 +126,7 @@ static struct regulator_ops bd718xx_buck_regulator_ops = { .set_voltage_time_sel = regulator_set_voltage_time_sel, }; -static struct regulator_ops bd718xx_buck_regulator_nolinear_ops = { +static const struct regulator_ops bd718xx_buck_regulator_nolinear_ops = { .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .is_enabled = regulator_is_enabled_regmap, @@ -137,7 +137,7 @@ static struct regulator_ops bd718xx_buck_regulator_nolinear_ops = { .set_voltage_time_sel = regulator_set_voltage_time_sel, }; -static struct regulator_ops bd718xx_dvs_buck_regulator_ops = { +static const struct regulator_ops bd718xx_dvs_buck_regulator_ops = { .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .is_enabled = regulator_is_enabled_regmap, @@ -350,6 +350,135 @@ static const struct reg_init bd71837_ldo6_inits[] = { }, }; +#define NUM_DVS_BUCKS 4 + +struct of_dvs_setting { + const char *prop; + unsigned int reg; +}; + +static int set_dvs_levels(const struct of_dvs_setting *dvs, + struct device_node *np, + const struct regulator_desc *desc, + struct regmap *regmap) +{ + int ret, i; + unsigned int uv; + + ret = of_property_read_u32(np, dvs->prop, &uv); + if (ret) { + if (ret != -EINVAL) + return ret; + return 0; + } + + for (i = 0; i < desc->n_voltages; i++) { + ret = regulator_desc_list_voltage_linear_range(desc, i); + if (ret < 0) + continue; + if (ret == uv) { + i <<= ffs(desc->vsel_mask) - 1; + ret = regmap_update_bits(regmap, dvs->reg, + DVS_BUCK_RUN_MASK, i); + break; + } + } + return ret; +} + +static int buck4_set_hw_dvs_levels(struct device_node *np, + const struct regulator_desc *desc, + struct regulator_config *cfg) +{ + int ret, i; + const struct of_dvs_setting dvs[] = { + { + .prop = "rohm,dvs-run-voltage", + .reg = BD71837_REG_BUCK4_VOLT_RUN, + }, + }; + + for (i = 0; i < ARRAY_SIZE(dvs); i++) { + ret = set_dvs_levels(&dvs[i], np, desc, cfg->regmap); + if (ret) + break; + } + return ret; +} +static int buck3_set_hw_dvs_levels(struct device_node *np, + const struct regulator_desc *desc, + struct regulator_config *cfg) +{ + int ret, i; + const struct of_dvs_setting dvs[] = { + { + .prop = "rohm,dvs-run-voltage", + .reg = BD71837_REG_BUCK3_VOLT_RUN, + }, + }; + + for (i = 0; i < ARRAY_SIZE(dvs); i++) { + ret = set_dvs_levels(&dvs[i], np, desc, cfg->regmap); + if (ret) + break; + } + return ret; +} + +static int buck2_set_hw_dvs_levels(struct device_node *np, + const struct regulator_desc *desc, + struct regulator_config *cfg) +{ + int ret, i; + const struct of_dvs_setting dvs[] = { + { + .prop = "rohm,dvs-run-voltage", + .reg = BD718XX_REG_BUCK2_VOLT_RUN, + }, + { + .prop = "rohm,dvs-idle-voltage", + .reg = BD718XX_REG_BUCK2_VOLT_IDLE, + }, + }; + + + + for (i = 0; i < ARRAY_SIZE(dvs); i++) { + ret = set_dvs_levels(&dvs[i], np, desc, cfg->regmap); + if (ret) + break; + } + return ret; +} + +static int buck1_set_hw_dvs_levels(struct device_node *np, + const struct regulator_desc *desc, + struct regulator_config *cfg) +{ + int ret, i; + const struct of_dvs_setting dvs[] = { + { + .prop = "rohm,dvs-run-voltage", + .reg = BD718XX_REG_BUCK1_VOLT_RUN, + }, + { + .prop = "rohm,dvs-idle-voltage", + .reg = BD718XX_REG_BUCK1_VOLT_IDLE, + }, + { + .prop = "rohm,dvs-suspend-voltage", + .reg = BD718XX_REG_BUCK1_VOLT_SUSP, + }, + }; + + for (i = 0; i < ARRAY_SIZE(dvs); i++) { + ret = set_dvs_levels(&dvs[i], np, desc, cfg->regmap); + if (ret) + break; + } + return ret; +} + static const struct bd718xx_regulator_data bd71847_regulators[] = { { .desc = { @@ -368,6 +497,7 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = { .enable_reg = BD718XX_REG_BUCK1_CTRL, .enable_mask = BD718XX_BUCK_EN, .owner = THIS_MODULE, + .of_parse_cb = buck1_set_hw_dvs_levels, }, .init = { .reg = BD718XX_REG_BUCK1_CTRL, @@ -391,6 +521,7 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = { .enable_reg = BD718XX_REG_BUCK2_CTRL, .enable_mask = BD718XX_BUCK_EN, .owner = THIS_MODULE, + .of_parse_cb = buck2_set_hw_dvs_levels, }, .init = { .reg = BD718XX_REG_BUCK2_CTRL, @@ -662,6 +793,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = { .enable_reg = BD718XX_REG_BUCK1_CTRL, .enable_mask = BD718XX_BUCK_EN, .owner = THIS_MODULE, + .of_parse_cb = buck1_set_hw_dvs_levels, }, .init = { .reg = BD718XX_REG_BUCK1_CTRL, @@ -685,6 +817,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = { .enable_reg = BD718XX_REG_BUCK2_CTRL, .enable_mask = BD718XX_BUCK_EN, .owner = THIS_MODULE, + .of_parse_cb = buck2_set_hw_dvs_levels, }, .init = { .reg = BD718XX_REG_BUCK2_CTRL, @@ -708,6 +841,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = { .enable_reg = BD71837_REG_BUCK3_CTRL, .enable_mask = BD718XX_BUCK_EN, .owner = THIS_MODULE, + .of_parse_cb = buck3_set_hw_dvs_levels, }, .init = { .reg = BD71837_REG_BUCK3_CTRL, @@ -731,6 +865,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = { .enable_reg = BD71837_REG_BUCK4_CTRL, .enable_mask = BD718XX_BUCK_EN, .owner = THIS_MODULE, + .of_parse_cb = buck4_set_hw_dvs_levels, }, .init = { .reg = BD71837_REG_BUCK4_CTRL, @@ -1029,6 +1164,7 @@ static int bd718xx_probe(struct platform_device *pdev) }; int i, j, err; + bool use_snvs; mfd = dev_get_drvdata(pdev->dev.parent); if (!mfd) { @@ -1055,27 +1191,28 @@ static int bd718xx_probe(struct platform_device *pdev) BD718XX_REG_REGLOCK); } - /* At poweroff transition PMIC HW disables EN bit for regulators but - * leaves SEL bit untouched. So if state transition from POWEROFF - * is done to SNVS - then all power rails controlled by SW (having - * SEL bit set) stay disabled as EN is cleared. This may result boot - * failure if any crucial systems are powered by these rails. - * + use_snvs = of_property_read_bool(pdev->dev.parent->of_node, + "rohm,reset-snvs-powered"); + + /* * Change the next stage from poweroff to be READY instead of SNVS * for all reset types because OTP loading at READY will clear SEL * bit allowing HW defaults for power rails to be used */ - err = regmap_update_bits(mfd->regmap, BD718XX_REG_TRANS_COND1, - BD718XX_ON_REQ_POWEROFF_MASK | - BD718XX_SWRESET_POWEROFF_MASK | - BD718XX_WDOG_POWEROFF_MASK | - BD718XX_KEY_L_POWEROFF_MASK, - BD718XX_POWOFF_TO_RDY); - if (err) { - dev_err(&pdev->dev, "Failed to change reset target\n"); - goto err; - } else { - dev_dbg(&pdev->dev, "Changed all resets from SVNS to READY\n"); + if (!use_snvs) { + err = regmap_update_bits(mfd->regmap, BD718XX_REG_TRANS_COND1, + BD718XX_ON_REQ_POWEROFF_MASK | + BD718XX_SWRESET_POWEROFF_MASK | + BD718XX_WDOG_POWEROFF_MASK | + BD718XX_KEY_L_POWEROFF_MASK, + BD718XX_POWOFF_TO_RDY); + if (err) { + dev_err(&pdev->dev, "Failed to change reset target\n"); + goto err; + } else { + dev_dbg(&pdev->dev, + "Changed all resets from SVNS to READY\n"); + } } for (i = 0; i < pmic_regulators[mfd->chip_type].r_amount; i++) { @@ -1098,19 +1235,33 @@ static int bd718xx_probe(struct platform_device *pdev) err = PTR_ERR(rdev); goto err; } - /* Regulator register gets the regulator constraints and + + /* + * Regulator register gets the regulator constraints and * applies them (set_machine_constraints). This should have * turned the control register(s) to correct values and we * can now switch the control from PMIC state machine to the * register interface + * + * At poweroff transition PMIC HW disables EN bit for + * regulators but leaves SEL bit untouched. So if state + * transition from POWEROFF is done to SNVS - then all power + * rails controlled by SW (having SEL bit set) stay disabled + * as EN is cleared. This will result boot failure if any + * crucial systems are powered by these rails. We don't + * enable SW control for crucial regulators if snvs state is + * used */ - err = regmap_update_bits(mfd->regmap, r->init.reg, - r->init.mask, r->init.val); - if (err) { - dev_err(&pdev->dev, - "Failed to write BUCK/LDO SEL bit for (%s)\n", - desc->name); - goto err; + if (!use_snvs || !rdev->constraints->always_on || + !rdev->constraints->boot_on) { + err = regmap_update_bits(mfd->regmap, r->init.reg, + r->init.mask, r->init.val); + if (err) { + dev_err(&pdev->dev, + "Failed to take control for (%s)\n", + desc->name); + goto err; + } } for (j = 0; j < r->additional_init_amnt; j++) { err = regmap_update_bits(mfd->regmap, diff --git a/drivers/regulator/bd9571mwv-regulator.c b/drivers/regulator/bd9571mwv-regulator.c index e12dd1f750f3..e690c2ce5b3c 100644 --- a/drivers/regulator/bd9571mwv-regulator.c +++ b/drivers/regulator/bd9571mwv-regulator.c @@ -100,7 +100,7 @@ static int bd9571mwv_reg_set_voltage_sel_regmap(struct regulator_dev *rdev, } /* Operations permitted on AVS voltage regulator */ -static struct regulator_ops avs_ops = { +static const struct regulator_ops avs_ops = { .set_voltage_sel = bd9571mwv_avs_set_voltage_sel_regmap, .map_voltage = regulator_map_voltage_linear, .get_voltage_sel = bd9571mwv_avs_get_voltage_sel_regmap, @@ -108,7 +108,7 @@ static struct regulator_ops avs_ops = { }; /* Operations permitted on voltage regulators */ -static struct regulator_ops reg_ops = { +static const struct regulator_ops reg_ops = { .set_voltage_sel = bd9571mwv_reg_set_voltage_sel_regmap, .map_voltage = regulator_map_voltage_linear, .get_voltage_sel = regulator_get_voltage_sel_regmap, @@ -116,13 +116,13 @@ static struct regulator_ops reg_ops = { }; /* Operations permitted on voltage monitors */ -static struct regulator_ops vid_ops = { +static const struct regulator_ops vid_ops = { .map_voltage = regulator_map_voltage_linear, .get_voltage_sel = regulator_get_voltage_sel_regmap, .list_voltage = regulator_list_voltage_linear, }; -static struct regulator_desc regulators[] = { +static const struct regulator_desc regulators[] = { BD9571MWV_REG("VD09", "vd09", VD09, avs_ops, 0, 0x7f, 0x80, 600000, 10000, 0x3c), BD9571MWV_REG("VD18", "vd18", VD18, vid_ops, BD9571MWV_VD18_VID, 0xf, diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index b9d7b45c7295..68473d0cc57e 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include @@ -82,7 +81,6 @@ struct regulator_enable_gpio { struct gpio_desc *gpiod; u32 enable_count; /* a number of enabled shared GPIO */ u32 request_count; /* a number of requested shared GPIO */ - unsigned int ena_gpio_invert:1; }; /* @@ -145,14 +143,6 @@ static bool regulator_ops_is_valid(struct regulator_dev *rdev, int ops) return false; } -static inline struct regulator_dev *rdev_get_supply(struct regulator_dev *rdev) -{ - if (rdev && rdev->supply) - return rdev->supply->rdev; - - return NULL; -} - /** * regulator_lock_nested - lock a single regulator * @rdev: regulator source @@ -326,7 +316,7 @@ err_unlock: * @rdev: regulator source * @ww_ctx: w/w mutex acquire context * - * Unlock all regulators related with rdev by coupling or suppling. + * Unlock all regulators related with rdev by coupling or supplying. */ static void regulator_unlock_dependent(struct regulator_dev *rdev, struct ww_acquire_ctx *ww_ctx) @@ -341,7 +331,7 @@ static void regulator_unlock_dependent(struct regulator_dev *rdev, * @ww_ctx: w/w mutex acquire context * * This function as a wrapper on regulator_lock_recursive(), which locks - * all regulators related with rdev by coupling or suppling. + * all regulators related with rdev by coupling or supplying. */ static void regulator_lock_dependent(struct regulator_dev *rdev, struct ww_acquire_ctx *ww_ctx) @@ -924,14 +914,14 @@ static int drms_uA_update(struct regulator_dev *rdev) int current_uA = 0, output_uV, input_uV, err; unsigned int mode; - lockdep_assert_held_once(&rdev->mutex.base); - /* * first check to see if we can set modes at all, otherwise just * tell the consumer everything is OK. */ - if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_DRMS)) + if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_DRMS)) { + rdev_dbg(rdev, "DRMS operation not allowed\n"); return 0; + } if (!rdev->desc->ops->get_optimum_mode && !rdev->desc->ops->set_load) @@ -1003,7 +993,7 @@ static int suspend_set_state(struct regulator_dev *rdev, if (rstate == NULL) return 0; - /* If we have no suspend mode configration don't set anything; + /* If we have no suspend mode configuration don't set anything; * only warn if the driver implements set_suspend_voltage or * set_suspend_mode callback. */ @@ -1131,7 +1121,7 @@ static int machine_constraints_voltage(struct regulator_dev *rdev, int current_uV = _regulator_get_voltage(rdev); if (current_uV == -ENOTRECOVERABLE) { - /* This regulator can't be read and must be initted */ + /* This regulator can't be read and must be initialized */ rdev_info(rdev, "Setting %d-%duV\n", rdev->constraints->min_uV, rdev->constraints->max_uV); @@ -1349,7 +1339,9 @@ static int set_machine_constraints(struct regulator_dev *rdev, * We'll only apply the initial system load if an * initial mode wasn't specified. */ + regulator_lock(rdev); drms_uA_update(rdev); + regulator_unlock(rdev); } if ((rdev->constraints->ramp_delay || rdev->constraints->ramp_disable) @@ -1780,7 +1772,7 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) struct device *dev = rdev->dev.parent; int ret; - /* No supply to resovle? */ + /* No supply to resolve? */ if (!rdev->supply_name) return 0; @@ -2058,15 +2050,7 @@ static void _regulator_put(struct regulator *regulator) debugfs_remove_recursive(regulator->debugfs); if (regulator->dev) { - int count = 0; - struct regulator *r; - - list_for_each_entry(r, &rdev->consumer_list, list) - if (r->dev == regulator->dev) - count++; - - if (count == 1) - device_link_remove(regulator->dev, &rdev->dev); + device_link_remove(regulator->dev, &rdev->dev); /* remove any sysfs entries */ sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name); @@ -2237,38 +2221,21 @@ static int regulator_ena_gpio_request(struct regulator_dev *rdev, { struct regulator_enable_gpio *pin; struct gpio_desc *gpiod; - int ret; - if (config->ena_gpiod) - gpiod = config->ena_gpiod; - else - gpiod = gpio_to_desc(config->ena_gpio); + gpiod = config->ena_gpiod; list_for_each_entry(pin, ®ulator_ena_gpio_list, list) { if (pin->gpiod == gpiod) { - rdev_dbg(rdev, "GPIO %d is already used\n", - config->ena_gpio); + rdev_dbg(rdev, "GPIO is already used\n"); goto update_ena_gpio_to_rdev; } } - if (!config->ena_gpiod) { - ret = gpio_request_one(config->ena_gpio, - GPIOF_DIR_OUT | config->ena_gpio_flags, - rdev_get_name(rdev)); - if (ret) - return ret; - } - pin = kzalloc(sizeof(struct regulator_enable_gpio), GFP_KERNEL); - if (pin == NULL) { - if (!config->ena_gpiod) - gpio_free(config->ena_gpio); + if (pin == NULL) return -ENOMEM; - } pin->gpiod = gpiod; - pin->ena_gpio_invert = config->ena_gpio_invert; list_add(&pin->list, ®ulator_ena_gpio_list); update_ena_gpio_to_rdev: @@ -2289,7 +2256,6 @@ static void regulator_ena_gpio_free(struct regulator_dev *rdev) if (pin->gpiod == rdev->ena_pin->gpiod) { if (pin->request_count <= 1) { pin->request_count = 0; - gpiod_put(pin->gpiod); list_del(&pin->list); kfree(pin); rdev->ena_pin = NULL; @@ -2319,8 +2285,7 @@ static int regulator_ena_gpio_ctrl(struct regulator_dev *rdev, bool enable) if (enable) { /* Enable GPIO at initial use */ if (pin->enable_count == 0) - gpiod_set_value_cansleep(pin->gpiod, - !pin->ena_gpio_invert); + gpiod_set_value_cansleep(pin->gpiod, 1); pin->enable_count++; } else { @@ -2331,8 +2296,7 @@ static int regulator_ena_gpio_ctrl(struct regulator_dev *rdev, bool enable) /* Disable GPIO if not used */ if (pin->enable_count <= 1) { - gpiod_set_value_cansleep(pin->gpiod, - pin->ena_gpio_invert); + gpiod_set_value_cansleep(pin->gpiod, 0); pin->enable_count = 0; } } @@ -2409,7 +2373,7 @@ static int _regulator_do_enable(struct regulator_dev *rdev) * timer wrapping. * in case of multiple timer wrapping, either it can be * detected by out-of-range remaining, or it cannot be - * detected and we gets a panelty of + * detected and we get a penalty of * _regulator_enable_delay(). */ remaining = intended - start_jiffy; @@ -2809,7 +2773,7 @@ static void regulator_disable_work(struct work_struct *work) /** * regulator_disable_deferred - disable regulator output with delay * @regulator: regulator source - * @ms: miliseconds until the regulator is disabled + * @ms: milliseconds until the regulator is disabled * * Execute regulator_disable() on the regulator after a delay. This * is intended for use with devices that require some time to quiesce. @@ -4943,7 +4907,7 @@ regulator_register(const struct regulator_desc *regulator_desc, * device tree until we have handled it over to the core. If the * config that was passed in to this function DOES NOT contain * a descriptor, and the config after this call DOES contain - * a descriptor, we definately got one from parsing the device + * a descriptor, we definitely got one from parsing the device * tree. */ if (!cfg->ena_gpiod && config->ena_gpiod) @@ -4975,15 +4939,13 @@ regulator_register(const struct regulator_desc *regulator_desc, goto clean; } - if (config->ena_gpiod || - ((config->ena_gpio || config->ena_gpio_initialized) && - gpio_is_valid(config->ena_gpio))) { + if (config->ena_gpiod) { mutex_lock(®ulator_list_mutex); ret = regulator_ena_gpio_request(rdev, config); mutex_unlock(®ulator_list_mutex); if (ret != 0) { - rdev_err(rdev, "Failed to request enable GPIO%d: %d\n", - config->ena_gpio, ret); + rdev_err(rdev, "Failed to request enable GPIO: %d\n", + ret); goto clean; } /* The regulator core took over the GPIO descriptor */ @@ -5251,6 +5213,12 @@ struct device *rdev_get_dev(struct regulator_dev *rdev) } EXPORT_SYMBOL_GPL(rdev_get_dev); +struct regmap *rdev_get_regmap(struct regulator_dev *rdev) +{ + return rdev->regmap; +} +EXPORT_SYMBOL_GPL(rdev_get_regmap); + void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data) { return reg_init_data->driver_data; diff --git a/drivers/regulator/cpcap-regulator.c b/drivers/regulator/cpcap-regulator.c index 2131457937b7..e7dab5c4d1d1 100644 --- a/drivers/regulator/cpcap-regulator.c +++ b/drivers/regulator/cpcap-regulator.c @@ -100,12 +100,11 @@ struct cpcap_regulator { struct regulator_desc rdesc; const u16 assign_reg; const u16 assign_mask; - const u16 vsel_shift; }; #define CPCAP_REG(_ID, reg, assignment_reg, assignment_mask, val_tbl, \ - mode_mask, volt_mask, volt_shft, \ - mode_val, off_val, volt_trans_time) { \ + mode_mask, volt_mask, mode_val, off_val, \ + volt_trans_time) { \ .rdesc = { \ .name = #_ID, \ .of_match = of_match_ptr(#_ID), \ @@ -127,7 +126,6 @@ struct cpcap_regulator { }, \ .assign_reg = (assignment_reg), \ .assign_mask = (assignment_mask), \ - .vsel_shift = (volt_shft), \ } struct cpcap_ddata { @@ -336,155 +334,155 @@ static const unsigned int vaudio_val_tbl[] = { 0, 2775000, }; * SW1 to SW4 and SW6 seems to be unused for mapphone. Note that VSIM and * VSIMCARD have a shared resource assignment bit. */ -static struct cpcap_regulator omap4_regulators[] = { +static const struct cpcap_regulator omap4_regulators[] = { CPCAP_REG(SW1, CPCAP_REG_S1C1, CPCAP_REG_ASSIGN2, CPCAP_BIT_SW1_SEL, unknown_val_tbl, - 0, 0, 0, 0, 0, 0), + 0, 0, 0, 0, 0), CPCAP_REG(SW2, CPCAP_REG_S2C1, CPCAP_REG_ASSIGN2, CPCAP_BIT_SW2_SEL, unknown_val_tbl, - 0, 0, 0, 0, 0, 0), + 0, 0, 0, 0, 0), CPCAP_REG(SW3, CPCAP_REG_S3C, CPCAP_REG_ASSIGN2, CPCAP_BIT_SW3_SEL, unknown_val_tbl, - 0, 0, 0, 0, 0, 0), + 0, 0, 0, 0, 0), CPCAP_REG(SW4, CPCAP_REG_S4C1, CPCAP_REG_ASSIGN2, CPCAP_BIT_SW4_SEL, unknown_val_tbl, - 0, 0, 0, 0, 0, 0), + 0, 0, 0, 0, 0), CPCAP_REG(SW5, CPCAP_REG_S5C, CPCAP_REG_ASSIGN2, CPCAP_BIT_SW5_SEL, sw5_val_tbl, - 0x28, 0, 0, 0x20 | CPCAP_REG_OFF_MODE_SEC, 0, 0), + 0x28, 0, 0x20 | CPCAP_REG_OFF_MODE_SEC, 0, 0), CPCAP_REG(SW6, CPCAP_REG_S6C, CPCAP_REG_ASSIGN2, CPCAP_BIT_SW6_SEL, unknown_val_tbl, - 0, 0, 0, 0, 0, 0), + 0, 0, 0, 0, 0), CPCAP_REG(VCAM, CPCAP_REG_VCAMC, CPCAP_REG_ASSIGN2, CPCAP_BIT_VCAM_SEL, vcam_val_tbl, - 0x87, 0x30, 4, 0x3, 0, 420), + 0x87, 0x30, 0x3, 0, 420), CPCAP_REG(VCSI, CPCAP_REG_VCSIC, CPCAP_REG_ASSIGN3, CPCAP_BIT_VCSI_SEL, vcsi_val_tbl, - 0x47, 0x10, 4, 0x43, 0x41, 350), + 0x47, 0x10, 0x43, 0x41, 350), CPCAP_REG(VDAC, CPCAP_REG_VDACC, CPCAP_REG_ASSIGN3, CPCAP_BIT_VDAC_SEL, vdac_val_tbl, - 0x87, 0x30, 4, 0x3, 0, 420), + 0x87, 0x30, 0x3, 0, 420), CPCAP_REG(VDIG, CPCAP_REG_VDIGC, CPCAP_REG_ASSIGN2, CPCAP_BIT_VDIG_SEL, vdig_val_tbl, - 0x87, 0x30, 4, 0x82, 0, 420), + 0x87, 0x30, 0x82, 0, 420), CPCAP_REG(VFUSE, CPCAP_REG_VFUSEC, CPCAP_REG_ASSIGN3, CPCAP_BIT_VFUSE_SEL, vfuse_val_tbl, - 0x80, 0xf, 0, 0x80, 0, 420), + 0x80, 0xf, 0x80, 0, 420), CPCAP_REG(VHVIO, CPCAP_REG_VHVIOC, CPCAP_REG_ASSIGN3, CPCAP_BIT_VHVIO_SEL, vhvio_val_tbl, - 0x17, 0, 0, 0, 0x12, 0), + 0x17, 0, 0, 0x12, 0), CPCAP_REG(VSDIO, CPCAP_REG_VSDIOC, CPCAP_REG_ASSIGN2, CPCAP_BIT_VSDIO_SEL, vsdio_val_tbl, - 0x87, 0x38, 3, 0x82, 0, 420), + 0x87, 0x38, 0x82, 0, 420), CPCAP_REG(VPLL, CPCAP_REG_VPLLC, CPCAP_REG_ASSIGN3, CPCAP_BIT_VPLL_SEL, vpll_val_tbl, - 0x43, 0x18, 3, 0x2, 0, 420), + 0x43, 0x18, 0x2, 0, 420), CPCAP_REG(VRF1, CPCAP_REG_VRF1C, CPCAP_REG_ASSIGN3, CPCAP_BIT_VRF1_SEL, vrf1_val_tbl, - 0xac, 0x2, 1, 0x4, 0, 10), + 0xac, 0x2, 0x4, 0, 10), CPCAP_REG(VRF2, CPCAP_REG_VRF2C, CPCAP_REG_ASSIGN3, CPCAP_BIT_VRF2_SEL, vrf2_val_tbl, - 0x23, 0x8, 3, 0, 0, 10), + 0x23, 0x8, 0, 0, 10), CPCAP_REG(VRFREF, CPCAP_REG_VRFREFC, CPCAP_REG_ASSIGN3, CPCAP_BIT_VRFREF_SEL, vrfref_val_tbl, - 0x23, 0x8, 3, 0, 0, 420), + 0x23, 0x8, 0, 0, 420), CPCAP_REG(VWLAN1, CPCAP_REG_VWLAN1C, CPCAP_REG_ASSIGN3, CPCAP_BIT_VWLAN1_SEL, vwlan1_val_tbl, - 0x47, 0x10, 4, 0, 0, 420), + 0x47, 0x10, 0, 0, 420), CPCAP_REG(VWLAN2, CPCAP_REG_VWLAN2C, CPCAP_REG_ASSIGN3, CPCAP_BIT_VWLAN2_SEL, vwlan2_val_tbl, - 0x20c, 0xc0, 6, 0x20c, 0, 420), + 0x20c, 0xc0, 0x20c, 0, 420), CPCAP_REG(VSIM, CPCAP_REG_VSIMC, CPCAP_REG_ASSIGN3, 0xffff, vsim_val_tbl, - 0x23, 0x8, 3, 0x3, 0, 420), + 0x23, 0x8, 0x3, 0, 420), CPCAP_REG(VSIMCARD, CPCAP_REG_VSIMC, CPCAP_REG_ASSIGN3, 0xffff, vsimcard_val_tbl, - 0x1e80, 0x8, 3, 0x1e00, 0, 420), + 0x1e80, 0x8, 0x1e00, 0, 420), CPCAP_REG(VVIB, CPCAP_REG_VVIBC, CPCAP_REG_ASSIGN3, CPCAP_BIT_VVIB_SEL, vvib_val_tbl, - 0x1, 0xc, 2, 0x1, 0, 500), + 0x1, 0xc, 0x1, 0, 500), CPCAP_REG(VUSB, CPCAP_REG_VUSBC, CPCAP_REG_ASSIGN3, CPCAP_BIT_VUSB_SEL, vusb_val_tbl, - 0x11c, 0x40, 6, 0xc, 0, 0), + 0x11c, 0x40, 0xc, 0, 0), CPCAP_REG(VAUDIO, CPCAP_REG_VAUDIOC, CPCAP_REG_ASSIGN4, CPCAP_BIT_VAUDIO_SEL, vaudio_val_tbl, - 0x16, 0x1, 0, 0x4, 0, 0), + 0x16, 0x1, 0x4, 0, 0), { /* sentinel */ }, }; -static struct cpcap_regulator xoom_regulators[] = { +static const struct cpcap_regulator xoom_regulators[] = { CPCAP_REG(SW1, CPCAP_REG_S1C1, CPCAP_REG_ASSIGN2, CPCAP_BIT_SW1_SEL, unknown_val_tbl, - 0, 0, 0, 0, 0, 0), + 0, 0, 0, 0, 0), CPCAP_REG(SW2, CPCAP_REG_S2C1, CPCAP_REG_ASSIGN2, CPCAP_BIT_SW2_SEL, sw2_sw4_val_tbl, - 0xf00, 0x7f, 0, 0x800, 0, 120), + 0xf00, 0x7f, 0x800, 0, 120), CPCAP_REG(SW3, CPCAP_REG_S3C, CPCAP_REG_ASSIGN2, CPCAP_BIT_SW3_SEL, unknown_val_tbl, - 0, 0, 0, 0, 0, 0), + 0, 0, 0, 0, 0), CPCAP_REG(SW4, CPCAP_REG_S4C1, CPCAP_REG_ASSIGN2, CPCAP_BIT_SW4_SEL, sw2_sw4_val_tbl, - 0xf00, 0x7f, 0, 0x900, 0, 100), + 0xf00, 0x7f, 0x900, 0, 100), CPCAP_REG(SW5, CPCAP_REG_S5C, CPCAP_REG_ASSIGN2, CPCAP_BIT_SW5_SEL, sw5_val_tbl, - 0x2a, 0, 0, 0x22, 0, 0), + 0x2a, 0, 0x22, 0, 0), CPCAP_REG(SW6, CPCAP_REG_S6C, CPCAP_REG_ASSIGN2, CPCAP_BIT_SW6_SEL, unknown_val_tbl, - 0, 0, 0, 0, 0, 0), + 0, 0, 0, 0, 0), CPCAP_REG(VCAM, CPCAP_REG_VCAMC, CPCAP_REG_ASSIGN2, CPCAP_BIT_VCAM_SEL, vcam_val_tbl, - 0x87, 0x30, 4, 0x7, 0, 420), + 0x87, 0x30, 0x7, 0, 420), CPCAP_REG(VCSI, CPCAP_REG_VCSIC, CPCAP_REG_ASSIGN3, CPCAP_BIT_VCSI_SEL, vcsi_val_tbl, - 0x47, 0x10, 4, 0x7, 0, 350), + 0x47, 0x10, 0x7, 0, 350), CPCAP_REG(VDAC, CPCAP_REG_VDACC, CPCAP_REG_ASSIGN3, CPCAP_BIT_VDAC_SEL, vdac_val_tbl, - 0x87, 0x30, 4, 0x3, 0, 420), + 0x87, 0x30, 0x3, 0, 420), CPCAP_REG(VDIG, CPCAP_REG_VDIGC, CPCAP_REG_ASSIGN2, CPCAP_BIT_VDIG_SEL, vdig_val_tbl, - 0x87, 0x30, 4, 0x5, 0, 420), + 0x87, 0x30, 0x5, 0, 420), CPCAP_REG(VFUSE, CPCAP_REG_VFUSEC, CPCAP_REG_ASSIGN3, CPCAP_BIT_VFUSE_SEL, vfuse_val_tbl, - 0x80, 0xf, 0, 0x80, 0, 420), + 0x80, 0xf, 0x80, 0, 420), CPCAP_REG(VHVIO, CPCAP_REG_VHVIOC, CPCAP_REG_ASSIGN3, CPCAP_BIT_VHVIO_SEL, vhvio_val_tbl, - 0x17, 0, 0, 0x2, 0, 0), + 0x17, 0, 0x2, 0, 0), CPCAP_REG(VSDIO, CPCAP_REG_VSDIOC, CPCAP_REG_ASSIGN2, CPCAP_BIT_VSDIO_SEL, vsdio_val_tbl, - 0x87, 0x38, 3, 0x2, 0, 420), + 0x87, 0x38, 0x2, 0, 420), CPCAP_REG(VPLL, CPCAP_REG_VPLLC, CPCAP_REG_ASSIGN3, CPCAP_BIT_VPLL_SEL, vpll_val_tbl, - 0x43, 0x18, 3, 0x1, 0, 420), + 0x43, 0x18, 0x1, 0, 420), CPCAP_REG(VRF1, CPCAP_REG_VRF1C, CPCAP_REG_ASSIGN3, CPCAP_BIT_VRF1_SEL, vrf1_val_tbl, - 0xac, 0x2, 1, 0xc, 0, 10), + 0xac, 0x2, 0xc, 0, 10), CPCAP_REG(VRF2, CPCAP_REG_VRF2C, CPCAP_REG_ASSIGN3, CPCAP_BIT_VRF2_SEL, vrf2_val_tbl, - 0x23, 0x8, 3, 0x3, 0, 10), + 0x23, 0x8, 0x3, 0, 10), CPCAP_REG(VRFREF, CPCAP_REG_VRFREFC, CPCAP_REG_ASSIGN3, CPCAP_BIT_VRFREF_SEL, vrfref_val_tbl, - 0x23, 0x8, 3, 0x3, 0, 420), + 0x23, 0x8, 0x3, 0, 420), CPCAP_REG(VWLAN1, CPCAP_REG_VWLAN1C, CPCAP_REG_ASSIGN3, CPCAP_BIT_VWLAN1_SEL, vwlan1_val_tbl, - 0x47, 0x10, 4, 0x5, 0, 420), + 0x47, 0x10, 0x5, 0, 420), CPCAP_REG(VWLAN2, CPCAP_REG_VWLAN2C, CPCAP_REG_ASSIGN3, CPCAP_BIT_VWLAN2_SEL, vwlan2_val_tbl, - 0x20c, 0xc0, 6, 0x8, 0, 420), + 0x20c, 0xc0, 0x8, 0, 420), CPCAP_REG(VSIM, CPCAP_REG_VSIMC, CPCAP_REG_ASSIGN3, 0xffff, vsim_val_tbl, - 0x23, 0x8, 3, 0x3, 0, 420), + 0x23, 0x8, 0x3, 0, 420), CPCAP_REG(VSIMCARD, CPCAP_REG_VSIMC, CPCAP_REG_ASSIGN3, 0xffff, vsimcard_val_tbl, - 0x1e80, 0x8, 3, 0x1e00, 0, 420), + 0x1e80, 0x8, 0x1e00, 0, 420), CPCAP_REG(VVIB, CPCAP_REG_VVIBC, CPCAP_REG_ASSIGN3, CPCAP_BIT_VVIB_SEL, vvib_val_tbl, - 0x1, 0xc, 2, 0, 0x1, 500), + 0x1, 0xc, 0, 0x1, 500), CPCAP_REG(VUSB, CPCAP_REG_VUSBC, CPCAP_REG_ASSIGN3, CPCAP_BIT_VUSB_SEL, vusb_val_tbl, - 0x11c, 0x40, 6, 0xc, 0, 0), + 0x11c, 0x40, 0xc, 0, 0), CPCAP_REG(VAUDIO, CPCAP_REG_VAUDIOC, CPCAP_REG_ASSIGN4, CPCAP_BIT_VAUDIO_SEL, vaudio_val_tbl, - 0x16, 0x1, 0, 0x4, 0, 0), + 0x16, 0x1, 0x4, 0, 0), { /* sentinel */ }, }; diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c index 207cb3859dcc..cefa3558236d 100644 --- a/drivers/regulator/da9052-regulator.c +++ b/drivers/regulator/da9052-regulator.c @@ -290,10 +290,10 @@ static const struct regulator_ops da9052_ldo_ops = { .disable = regulator_disable_regmap, }; -#define DA9052_LDO(_id, step, min, max, sbits, ebits, abits) \ +#define DA9052_LDO(_id, _name, step, min, max, sbits, ebits, abits) \ {\ .reg_desc = {\ - .name = #_id,\ + .name = #_name,\ .ops = &da9052_ldo_ops,\ .type = REGULATOR_VOLTAGE,\ .id = DA9052_ID_##_id,\ @@ -310,10 +310,10 @@ static const struct regulator_ops da9052_ldo_ops = { .activate_bit = (abits),\ } -#define DA9052_DCDC(_id, step, min, max, sbits, ebits, abits) \ +#define DA9052_DCDC(_id, _name, step, min, max, sbits, ebits, abits) \ {\ .reg_desc = {\ - .name = #_id,\ + .name = #_name,\ .ops = &da9052_dcdc_ops,\ .type = REGULATOR_VOLTAGE,\ .id = DA9052_ID_##_id,\ @@ -331,37 +331,37 @@ static const struct regulator_ops da9052_ldo_ops = { } static struct da9052_regulator_info da9052_regulator_info[] = { - DA9052_DCDC(BUCK1, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBCOREGO), - DA9052_DCDC(BUCK2, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBPROGO), - DA9052_DCDC(BUCK3, 25, 950, 2525, 6, 6, DA9052_SUPPLY_VBMEMGO), - DA9052_DCDC(BUCK4, 50, 1800, 3600, 5, 6, 0), - DA9052_LDO(LDO1, 50, 600, 1800, 5, 6, 0), - DA9052_LDO(LDO2, 25, 600, 1800, 6, 6, DA9052_SUPPLY_VLDO2GO), - DA9052_LDO(LDO3, 25, 1725, 3300, 6, 6, DA9052_SUPPLY_VLDO3GO), - DA9052_LDO(LDO4, 25, 1725, 3300, 6, 6, 0), - DA9052_LDO(LDO5, 50, 1200, 3600, 6, 6, 0), - DA9052_LDO(LDO6, 50, 1200, 3600, 6, 6, 0), - DA9052_LDO(LDO7, 50, 1200, 3600, 6, 6, 0), - DA9052_LDO(LDO8, 50, 1200, 3600, 6, 6, 0), - DA9052_LDO(LDO9, 50, 1250, 3650, 6, 6, 0), - DA9052_LDO(LDO10, 50, 1200, 3600, 6, 6, 0), + DA9052_DCDC(BUCK1, buck1, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBCOREGO), + DA9052_DCDC(BUCK2, buck2, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBPROGO), + DA9052_DCDC(BUCK3, buck3, 25, 950, 2525, 6, 6, DA9052_SUPPLY_VBMEMGO), + DA9052_DCDC(BUCK4, buck4, 50, 1800, 3600, 5, 6, 0), + DA9052_LDO(LDO1, ldo1, 50, 600, 1800, 5, 6, 0), + DA9052_LDO(LDO2, ldo2, 25, 600, 1800, 6, 6, DA9052_SUPPLY_VLDO2GO), + DA9052_LDO(LDO3, ldo3, 25, 1725, 3300, 6, 6, DA9052_SUPPLY_VLDO3GO), + DA9052_LDO(LDO4, ldo4, 25, 1725, 3300, 6, 6, 0), + DA9052_LDO(LDO5, ldo5, 50, 1200, 3600, 6, 6, 0), + DA9052_LDO(LDO6, ldo6, 50, 1200, 3600, 6, 6, 0), + DA9052_LDO(LDO7, ldo7, 50, 1200, 3600, 6, 6, 0), + DA9052_LDO(LDO8, ldo8, 50, 1200, 3600, 6, 6, 0), + DA9052_LDO(LDO9, ldo9, 50, 1250, 3650, 6, 6, 0), + DA9052_LDO(LDO10, ldo10, 50, 1200, 3600, 6, 6, 0), }; static struct da9052_regulator_info da9053_regulator_info[] = { - DA9052_DCDC(BUCK1, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBCOREGO), - DA9052_DCDC(BUCK2, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBPROGO), - DA9052_DCDC(BUCK3, 25, 950, 2525, 6, 6, DA9052_SUPPLY_VBMEMGO), - DA9052_DCDC(BUCK4, 25, 950, 2525, 6, 6, 0), - DA9052_LDO(LDO1, 50, 600, 1800, 5, 6, 0), - DA9052_LDO(LDO2, 25, 600, 1800, 6, 6, DA9052_SUPPLY_VLDO2GO), - DA9052_LDO(LDO3, 25, 1725, 3300, 6, 6, DA9052_SUPPLY_VLDO3GO), - DA9052_LDO(LDO4, 25, 1725, 3300, 6, 6, 0), - DA9052_LDO(LDO5, 50, 1200, 3600, 6, 6, 0), - DA9052_LDO(LDO6, 50, 1200, 3600, 6, 6, 0), - DA9052_LDO(LDO7, 50, 1200, 3600, 6, 6, 0), - DA9052_LDO(LDO8, 50, 1200, 3600, 6, 6, 0), - DA9052_LDO(LDO9, 50, 1250, 3650, 6, 6, 0), - DA9052_LDO(LDO10, 50, 1200, 3600, 6, 6, 0), + DA9052_DCDC(BUCK1, buck1, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBCOREGO), + DA9052_DCDC(BUCK2, buck2, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBPROGO), + DA9052_DCDC(BUCK3, buck3, 25, 950, 2525, 6, 6, DA9052_SUPPLY_VBMEMGO), + DA9052_DCDC(BUCK4, buck4, 25, 950, 2525, 6, 6, 0), + DA9052_LDO(LDO1, ldo1, 50, 600, 1800, 5, 6, 0), + DA9052_LDO(LDO2, ldo2, 25, 600, 1800, 6, 6, DA9052_SUPPLY_VLDO2GO), + DA9052_LDO(LDO3, ldo3, 25, 1725, 3300, 6, 6, DA9052_SUPPLY_VLDO3GO), + DA9052_LDO(LDO4, ldo4, 25, 1725, 3300, 6, 6, 0), + DA9052_LDO(LDO5, ldo5, 50, 1200, 3600, 6, 6, 0), + DA9052_LDO(LDO6, ldo6, 50, 1200, 3600, 6, 6, 0), + DA9052_LDO(LDO7, ldo7, 50, 1200, 3600, 6, 6, 0), + DA9052_LDO(LDO8, ldo8, 50, 1200, 3600, 6, 6, 0), + DA9052_LDO(LDO9, ldo9, 50, 1250, 3650, 6, 6, 0), + DA9052_LDO(LDO10, ldo10, 50, 1200, 3600, 6, 6, 0), }; static inline struct da9052_regulator_info *find_regulator_info(u8 chip_id, diff --git a/drivers/regulator/da9055-regulator.c b/drivers/regulator/da9055-regulator.c index 588c3d2445cf..3c6fac793658 100644 --- a/drivers/regulator/da9055-regulator.c +++ b/drivers/regulator/da9055-regulator.c @@ -48,7 +48,9 @@ #define DA9055_ID_LDO6 7 /* DA9055 BUCK current limit */ -static const int da9055_current_limits[] = { 500000, 600000, 700000, 800000 }; +static const unsigned int da9055_current_limits[] = { + 500000, 600000, 700000, 800000 +}; struct da9055_conf_reg { int reg; @@ -169,39 +171,6 @@ static int da9055_ldo_set_mode(struct regulator_dev *rdev, unsigned int mode) val << volt.sl_shift); } -static int da9055_buck_get_current_limit(struct regulator_dev *rdev) -{ - struct da9055_regulator *regulator = rdev_get_drvdata(rdev); - struct da9055_regulator_info *info = regulator->info; - int ret; - - ret = da9055_reg_read(regulator->da9055, DA9055_REG_BUCK_LIM); - if (ret < 0) - return ret; - - ret &= info->mode.mask; - return da9055_current_limits[ret >> info->mode.shift]; -} - -static int da9055_buck_set_current_limit(struct regulator_dev *rdev, int min_uA, - int max_uA) -{ - struct da9055_regulator *regulator = rdev_get_drvdata(rdev); - struct da9055_regulator_info *info = regulator->info; - int i; - - for (i = ARRAY_SIZE(da9055_current_limits) - 1; i >= 0; i--) { - if ((min_uA <= da9055_current_limits[i]) && - (da9055_current_limits[i] <= max_uA)) - return da9055_reg_update(regulator->da9055, - DA9055_REG_BUCK_LIM, - info->mode.mask, - i << info->mode.shift); - } - - return -EINVAL; -} - static int da9055_regulator_get_voltage_sel(struct regulator_dev *rdev) { struct da9055_regulator *regulator = rdev_get_drvdata(rdev); @@ -329,8 +298,8 @@ static const struct regulator_ops da9055_buck_ops = { .get_mode = da9055_buck_get_mode, .set_mode = da9055_buck_set_mode, - .get_current_limit = da9055_buck_get_current_limit, - .set_current_limit = da9055_buck_set_current_limit, + .get_current_limit = regulator_get_current_limit_regmap, + .set_current_limit = regulator_set_current_limit_regmap, .get_voltage_sel = da9055_regulator_get_voltage_sel, .set_voltage_sel = da9055_regulator_set_voltage_sel, @@ -407,6 +376,10 @@ static const struct regulator_ops da9055_ldo_ops = { .uV_step = (step) * 1000,\ .linear_min_sel = (voffset),\ .owner = THIS_MODULE,\ + .curr_table = da9055_current_limits,\ + .n_current_limits = ARRAY_SIZE(da9055_current_limits),\ + .csel_reg = DA9055_REG_BUCK_LIM,\ + .csel_mask = (mbits),\ },\ .conf = {\ .reg = DA9055_REG_BCORE_CONT + DA9055_ID_##_id, \ @@ -457,7 +430,6 @@ static int da9055_gpio_init(struct da9055_regulator *regulator, int gpio_mux = pdata->gpio_ren[id]; config->ena_gpiod = pdata->ena_gpiods[id]; - config->ena_gpio_invert = 1; /* * GPI pin is muxed with regulator to control the diff --git a/drivers/regulator/da9062-regulator.c b/drivers/regulator/da9062-regulator.c index 34a70d9dc450..b064d8a19d4c 100644 --- a/drivers/regulator/da9062-regulator.c +++ b/drivers/regulator/da9062-regulator.c @@ -126,7 +126,7 @@ static int da9062_set_current_limit(struct regulator_dev *rdev, const struct da9062_regulator_info *rinfo = regl->info; int n, tval; - for (n = 0; n < rinfo->n_current_limits; n++) { + for (n = rinfo->n_current_limits - 1; n >= 0; n--) { tval = rinfo->current_limits[n]; if (tval >= min_ua && tval <= max_ua) return regmap_field_write(regl->ilimit, n); @@ -992,7 +992,6 @@ static int da9062_regulator_probe(struct platform_device *pdev) struct regulator_config config = { }; const struct da9062_regulator_info *rinfo; int irq, n, ret; - size_t size; int max_regulators; switch (chip->chip_type) { @@ -1010,9 +1009,8 @@ static int da9062_regulator_probe(struct platform_device *pdev) } /* Allocate memory required by usable regulators */ - size = sizeof(struct da9062_regulators) + - max_regulators * sizeof(struct da9062_regulator); - regulators = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + regulators = devm_kzalloc(&pdev->dev, struct_size(regulators, regulator, + max_regulators), GFP_KERNEL); if (!regulators) return -ENOMEM; @@ -1029,31 +1027,50 @@ static int da9062_regulator_probe(struct platform_device *pdev) regl->desc.type = REGULATOR_VOLTAGE; regl->desc.owner = THIS_MODULE; - if (regl->info->mode.reg) + if (regl->info->mode.reg) { regl->mode = devm_regmap_field_alloc( &pdev->dev, chip->regmap, regl->info->mode); - if (regl->info->suspend.reg) + if (IS_ERR(regl->mode)) + return PTR_ERR(regl->mode); + } + + if (regl->info->suspend.reg) { regl->suspend = devm_regmap_field_alloc( &pdev->dev, chip->regmap, regl->info->suspend); - if (regl->info->sleep.reg) + if (IS_ERR(regl->suspend)) + return PTR_ERR(regl->suspend); + } + + if (regl->info->sleep.reg) { regl->sleep = devm_regmap_field_alloc( &pdev->dev, chip->regmap, regl->info->sleep); - if (regl->info->suspend_sleep.reg) + if (IS_ERR(regl->sleep)) + return PTR_ERR(regl->sleep); + } + + if (regl->info->suspend_sleep.reg) { regl->suspend_sleep = devm_regmap_field_alloc( &pdev->dev, chip->regmap, regl->info->suspend_sleep); - if (regl->info->ilimit.reg) + if (IS_ERR(regl->suspend_sleep)) + return PTR_ERR(regl->suspend_sleep); + } + + if (regl->info->ilimit.reg) { regl->ilimit = devm_regmap_field_alloc( &pdev->dev, chip->regmap, regl->info->ilimit); + if (IS_ERR(regl->ilimit)) + return PTR_ERR(regl->ilimit); + } /* Register regulator */ memset(&config, 0, sizeof(config)); diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c index 8cbcd2a3eb20..2b0c7a85306a 100644 --- a/drivers/regulator/da9063-regulator.c +++ b/drivers/regulator/da9063-regulator.c @@ -167,7 +167,7 @@ static int da9063_set_current_limit(struct regulator_dev *rdev, const struct da9063_regulator_info *rinfo = regl->info; int n, tval; - for (n = 0; n < rinfo->n_current_limits; n++) { + for (n = rinfo->n_current_limits - 1; n >= 0; n--) { tval = rinfo->current_limits[n]; if (tval >= min_uA && tval <= max_uA) return regmap_field_write(regl->ilimit, n); @@ -739,7 +739,6 @@ static int da9063_regulator_probe(struct platform_device *pdev) struct regulator_config config; bool bcores_merged, bmem_bio_merged; int id, irq, n, n_regulators, ret, val; - size_t size; regl_pdata = da9063_pdata ? da9063_pdata->regulators_pdata : NULL; @@ -784,9 +783,8 @@ static int da9063_regulator_probe(struct platform_device *pdev) n_regulators--; /* remove BMEM_BIO_MERGED */ /* Allocate memory required by usable regulators */ - size = sizeof(struct da9063_regulators) + - n_regulators * sizeof(struct da9063_regulator); - regulators = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + regulators = devm_kzalloc(&pdev->dev, struct_size(regulators, + regulator, n_regulators), GFP_KERNEL); if (!regulators) return -ENOMEM; @@ -835,21 +833,40 @@ static int da9063_regulator_probe(struct platform_device *pdev) regl->desc.type = REGULATOR_VOLTAGE; regl->desc.owner = THIS_MODULE; - if (regl->info->mode.reg) + if (regl->info->mode.reg) { regl->mode = devm_regmap_field_alloc(&pdev->dev, da9063->regmap, regl->info->mode); - if (regl->info->suspend.reg) + if (IS_ERR(regl->mode)) + return PTR_ERR(regl->mode); + } + + if (regl->info->suspend.reg) { regl->suspend = devm_regmap_field_alloc(&pdev->dev, da9063->regmap, regl->info->suspend); - if (regl->info->sleep.reg) + if (IS_ERR(regl->suspend)) + return PTR_ERR(regl->suspend); + } + + if (regl->info->sleep.reg) { regl->sleep = devm_regmap_field_alloc(&pdev->dev, da9063->regmap, regl->info->sleep); - if (regl->info->suspend_sleep.reg) + if (IS_ERR(regl->sleep)) + return PTR_ERR(regl->sleep); + } + + if (regl->info->suspend_sleep.reg) { regl->suspend_sleep = devm_regmap_field_alloc(&pdev->dev, da9063->regmap, regl->info->suspend_sleep); - if (regl->info->ilimit.reg) + if (IS_ERR(regl->suspend_sleep)) + return PTR_ERR(regl->suspend_sleep); + } + + if (regl->info->ilimit.reg) { regl->ilimit = devm_regmap_field_alloc(&pdev->dev, da9063->regmap, regl->info->ilimit); + if (IS_ERR(regl->ilimit)) + return PTR_ERR(regl->ilimit); + } /* Register regulator */ memset(&config, 0, sizeof(config)); diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c index 84dba64ed11e..528303771723 100644 --- a/drivers/regulator/da9210-regulator.c +++ b/drivers/regulator/da9210-regulator.c @@ -41,10 +41,6 @@ static const struct regmap_config da9210_regmap_config = { .val_bits = 8, }; -static int da9210_set_current_limit(struct regulator_dev *rdev, int min_uA, - int max_uA); -static int da9210_get_current_limit(struct regulator_dev *rdev); - static const struct regulator_ops da9210_buck_ops = { .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, @@ -52,8 +48,8 @@ static const struct regulator_ops da9210_buck_ops = { .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .list_voltage = regulator_list_voltage_linear, - .set_current_limit = da9210_set_current_limit, - .get_current_limit = da9210_get_current_limit, + .set_current_limit = regulator_set_current_limit_regmap, + .get_current_limit = regulator_get_current_limit_regmap, }; /* Default limits measured in millivolts and milliamps */ @@ -62,7 +58,7 @@ static const struct regulator_ops da9210_buck_ops = { #define DA9210_STEP_MV 10 /* Current limits for buck (uA) indices corresponds with register values */ -static const int da9210_buck_limits[] = { +static const unsigned int da9210_buck_limits[] = { 1600000, 1800000, 2000000, 2200000, 2400000, 2600000, 2800000, 3000000, 3200000, 3400000, 3600000, 3800000, 4000000, 4200000, 4400000, 4600000 }; @@ -80,47 +76,12 @@ static const struct regulator_desc da9210_reg = { .enable_reg = DA9210_REG_BUCK_CONT, .enable_mask = DA9210_BUCK_EN, .owner = THIS_MODULE, + .curr_table = da9210_buck_limits, + .n_current_limits = ARRAY_SIZE(da9210_buck_limits), + .csel_reg = DA9210_REG_BUCK_ILIM, + .csel_mask = DA9210_BUCK_ILIM_MASK, }; -static int da9210_set_current_limit(struct regulator_dev *rdev, int min_uA, - int max_uA) -{ - struct da9210 *chip = rdev_get_drvdata(rdev); - unsigned int sel; - int i; - - /* search for closest to maximum */ - for (i = ARRAY_SIZE(da9210_buck_limits)-1; i >= 0; i--) { - if (min_uA <= da9210_buck_limits[i] && - max_uA >= da9210_buck_limits[i]) { - sel = i; - sel = sel << DA9210_BUCK_ILIM_SHIFT; - return regmap_update_bits(chip->regmap, - DA9210_REG_BUCK_ILIM, - DA9210_BUCK_ILIM_MASK, sel); - } - } - - return -EINVAL; -} - -static int da9210_get_current_limit(struct regulator_dev *rdev) -{ - struct da9210 *chip = rdev_get_drvdata(rdev); - unsigned int data; - unsigned int sel; - int ret; - - ret = regmap_read(chip->regmap, DA9210_REG_BUCK_ILIM, &data); - if (ret < 0) - return ret; - - /* select one of 16 values: 0000 (1600mA) to 1111 (4600mA) */ - sel = (data & DA9210_BUCK_ILIM_MASK) >> DA9210_BUCK_ILIM_SHIFT; - - return da9210_buck_limits[sel]; -} - static irqreturn_t da9210_irq_handler(int irq, void *data) { struct da9210 *chip = data; diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c index a3bc8037153e..771a06d1900d 100644 --- a/drivers/regulator/fan53555.c +++ b/drivers/regulator/fan53555.c @@ -40,7 +40,6 @@ /* VSEL bit definitions */ #define VSEL_BUCK_EN (1 << 7) #define VSEL_MODE (1 << 6) -#define VSEL_NSEL_MASK 0x3F /* Chip ID and Verison */ #define DIE_ID 0x0F /* ID1 */ #define DIE_REV 0x0F /* ID2 */ @@ -49,14 +48,26 @@ #define CTL_SLEW_MASK (0x7 << 4) #define CTL_SLEW_SHIFT 4 #define CTL_RESET (1 << 2) +#define CTL_MODE_VSEL0_MODE BIT(0) +#define CTL_MODE_VSEL1_MODE BIT(1) #define FAN53555_NVOLTAGES 64 /* Numbers of voltages */ +#define FAN53526_NVOLTAGES 128 enum fan53555_vendor { - FAN53555_VENDOR_FAIRCHILD = 0, + FAN53526_VENDOR_FAIRCHILD = 0, + FAN53555_VENDOR_FAIRCHILD, FAN53555_VENDOR_SILERGY, }; +enum { + FAN53526_CHIP_ID_01 = 1, +}; + +enum { + FAN53526_CHIP_REV_08 = 8, +}; + /* IC Type */ enum { FAN53555_CHIP_ID_00 = 0, @@ -94,8 +105,12 @@ struct fan53555_device_info { /* Voltage range and step(linear) */ unsigned int vsel_min; unsigned int vsel_step; + unsigned int vsel_count; /* Voltage slew rate limiting */ unsigned int slew_rate; + /* Mode */ + unsigned int mode_reg; + unsigned int mode_mask; /* Sleep voltage cache */ unsigned int sleep_vol_cache; }; @@ -111,7 +126,7 @@ static int fan53555_set_suspend_voltage(struct regulator_dev *rdev, int uV) if (ret < 0) return ret; ret = regmap_update_bits(di->regmap, di->sleep_reg, - VSEL_NSEL_MASK, ret); + di->desc.vsel_mask, ret); if (ret < 0) return ret; /* Cache the sleep voltage setting. @@ -143,11 +158,11 @@ static int fan53555_set_mode(struct regulator_dev *rdev, unsigned int mode) switch (mode) { case REGULATOR_MODE_FAST: - regmap_update_bits(di->regmap, di->vol_reg, - VSEL_MODE, VSEL_MODE); + regmap_update_bits(di->regmap, di->mode_reg, + di->mode_mask, di->mode_mask); break; case REGULATOR_MODE_NORMAL: - regmap_update_bits(di->regmap, di->vol_reg, VSEL_MODE, 0); + regmap_update_bits(di->regmap, di->vol_reg, di->mode_mask, 0); break; default: return -EINVAL; @@ -161,10 +176,10 @@ static unsigned int fan53555_get_mode(struct regulator_dev *rdev) unsigned int val; int ret = 0; - ret = regmap_read(di->regmap, di->vol_reg, &val); + ret = regmap_read(di->regmap, di->mode_reg, &val); if (ret < 0) return ret; - if (val & VSEL_MODE) + if (val & di->mode_mask) return REGULATOR_MODE_FAST; else return REGULATOR_MODE_NORMAL; @@ -219,6 +234,34 @@ static const struct regulator_ops fan53555_regulator_ops = { .set_suspend_disable = fan53555_set_suspend_disable, }; +static int fan53526_voltages_setup_fairchild(struct fan53555_device_info *di) +{ + /* Init voltage range and step */ + switch (di->chip_id) { + case FAN53526_CHIP_ID_01: + switch (di->chip_rev) { + case FAN53526_CHIP_REV_08: + di->vsel_min = 600000; + di->vsel_step = 6250; + break; + default: + dev_err(di->dev, + "Chip ID %d with rev %d not supported!\n", + di->chip_id, di->chip_rev); + return -EINVAL; + } + break; + default: + dev_err(di->dev, + "Chip ID %d not supported!\n", di->chip_id); + return -EINVAL; + } + + di->vsel_count = FAN53526_NVOLTAGES; + + return 0; +} + static int fan53555_voltages_setup_fairchild(struct fan53555_device_info *di) { /* Init voltage range and step */ @@ -257,6 +300,8 @@ static int fan53555_voltages_setup_fairchild(struct fan53555_device_info *di) return -EINVAL; } + di->vsel_count = FAN53555_NVOLTAGES; + return 0; } @@ -274,6 +319,8 @@ static int fan53555_voltages_setup_silergy(struct fan53555_device_info *di) return -EINVAL; } + di->vsel_count = FAN53555_NVOLTAGES; + return 0; } @@ -302,7 +349,35 @@ static int fan53555_device_setup(struct fan53555_device_info *di, return -EINVAL; } + /* Setup mode control register */ + switch (di->vendor) { + case FAN53526_VENDOR_FAIRCHILD: + di->mode_reg = FAN53555_CONTROL; + + switch (pdata->sleep_vsel_id) { + case FAN53555_VSEL_ID_0: + di->mode_mask = CTL_MODE_VSEL1_MODE; + break; + case FAN53555_VSEL_ID_1: + di->mode_mask = CTL_MODE_VSEL0_MODE; + break; + } + break; + case FAN53555_VENDOR_FAIRCHILD: + case FAN53555_VENDOR_SILERGY: + di->mode_reg = di->vol_reg; + di->mode_mask = VSEL_MODE; + break; + default: + dev_err(di->dev, "vendor %d not supported!\n", di->vendor); + return -EINVAL; + } + + /* Setup voltage range */ switch (di->vendor) { + case FAN53526_VENDOR_FAIRCHILD: + ret = fan53526_voltages_setup_fairchild(di); + break; case FAN53555_VENDOR_FAIRCHILD: ret = fan53555_voltages_setup_fairchild(di); break; @@ -326,13 +401,13 @@ static int fan53555_regulator_register(struct fan53555_device_info *di, rdesc->supply_name = "vin"; rdesc->ops = &fan53555_regulator_ops; rdesc->type = REGULATOR_VOLTAGE; - rdesc->n_voltages = FAN53555_NVOLTAGES; + rdesc->n_voltages = di->vsel_count; rdesc->enable_reg = di->vol_reg; rdesc->enable_mask = VSEL_BUCK_EN; rdesc->min_uV = di->vsel_min; rdesc->uV_step = di->vsel_step; rdesc->vsel_reg = di->vol_reg; - rdesc->vsel_mask = VSEL_NSEL_MASK; + rdesc->vsel_mask = di->vsel_count - 1; rdesc->owner = THIS_MODULE; di->rdev = devm_regulator_register(di->dev, &di->desc, config); @@ -368,6 +443,9 @@ static struct fan53555_platform_data *fan53555_parse_dt(struct device *dev, static const struct of_device_id fan53555_dt_ids[] = { { + .compatible = "fcs,fan53526", + .data = (void *)FAN53526_VENDOR_FAIRCHILD, + }, { .compatible = "fcs,fan53555", .data = (void *)FAN53555_VENDOR_FAIRCHILD }, { @@ -412,11 +490,13 @@ static int fan53555_regulator_probe(struct i2c_client *client, } else { /* if no ramp constraint set, get the pdata ramp_delay */ if (!di->regulator->constraints.ramp_delay) { - int slew_idx = (pdata->slew_rate & 0x7) - ? pdata->slew_rate : 0; + if (pdata->slew_rate >= ARRAY_SIZE(slew_rates)) { + dev_err(&client->dev, "Invalid slew_rate\n"); + return -EINVAL; + } di->regulator->constraints.ramp_delay - = slew_rates[slew_idx]; + = slew_rates[pdata->slew_rate]; } di->vendor = id->driver_data; @@ -467,6 +547,9 @@ static int fan53555_regulator_probe(struct i2c_client *client, static const struct i2c_device_id fan53555_id[] = { { + .name = "fan53526", + .driver_data = FAN53526_VENDOR_FAIRCHILD + }, { .name = "fan53555", .driver_data = FAN53555_VENDOR_FAIRCHILD }, { diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c index 9abdb9130766..b5afc9db2c61 100644 --- a/drivers/regulator/fixed.c +++ b/drivers/regulator/fixed.c @@ -79,15 +79,6 @@ of_get_fixed_voltage_config(struct device *dev, of_property_read_u32(np, "startup-delay-us", &config->startup_delay); - /* - * FIXME: we pulled active low/high and open drain handling into - * gpiolib so it will be handled there. Delete this in the second - * step when we also remove the custom inversion handling for all - * legacy boardfiles. - */ - config->enable_high = 1; - config->gpio_is_open_drain = 0; - if (of_find_property(np, "vin-supply", NULL)) config->input_supply = "vin"; @@ -151,24 +142,14 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev) drvdata->desc.fixed_uV = config->microvolts; - cfg.ena_gpio_invert = !config->enable_high; - if (config->enabled_at_boot) { - if (config->enable_high) - gflags = GPIOD_OUT_HIGH; - else - gflags = GPIOD_OUT_LOW; - } else { - if (config->enable_high) - gflags = GPIOD_OUT_LOW; - else - gflags = GPIOD_OUT_HIGH; - } - if (config->gpio_is_open_drain) { - if (gflags == GPIOD_OUT_HIGH) - gflags = GPIOD_OUT_HIGH_OPEN_DRAIN; - else - gflags = GPIOD_OUT_LOW_OPEN_DRAIN; - } + /* + * The signal will be inverted by the GPIO core if flagged so in the + * decriptor. + */ + if (config->enabled_at_boot) + gflags = GPIOD_OUT_HIGH; + else + gflags = GPIOD_OUT_LOW; /* * Some fixed regulators share the enable line between two diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c index b2f5ec4f658a..6157001df0a4 100644 --- a/drivers/regulator/gpio-regulator.c +++ b/drivers/regulator/gpio-regulator.c @@ -30,16 +30,15 @@ #include #include #include -#include +#include #include #include -#include struct gpio_regulator_data { struct regulator_desc desc; struct regulator_dev *dev; - struct gpio *gpios; + struct gpio_desc **gpiods; int nr_gpios; struct gpio_regulator_state *states; @@ -82,7 +81,7 @@ static int gpio_regulator_set_voltage(struct regulator_dev *dev, for (ptr = 0; ptr < data->nr_gpios; ptr++) { state = (target & (1 << ptr)) >> ptr; - gpio_set_value_cansleep(data->gpios[ptr].gpio, state); + gpiod_set_value_cansleep(data->gpiods[ptr], state); } data->state = target; @@ -119,7 +118,7 @@ static int gpio_regulator_set_current_limit(struct regulator_dev *dev, for (ptr = 0; ptr < data->nr_gpios; ptr++) { state = (target & (1 << ptr)) >> ptr; - gpio_set_value_cansleep(data->gpios[ptr].gpio, state); + gpiod_set_value_cansleep(data->gpiods[ptr], state); } data->state = target; @@ -138,7 +137,8 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np, { struct gpio_regulator_config *config; const char *regtype; - int proplen, gpio, i; + int proplen, i; + int ngpios; int ret; config = devm_kzalloc(dev, @@ -153,59 +153,36 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np, config->supply_name = config->init_data->constraints.name; - if (of_property_read_bool(np, "enable-active-high")) - config->enable_high = true; - if (of_property_read_bool(np, "enable-at-boot")) config->enabled_at_boot = true; of_property_read_u32(np, "startup-delay-us", &config->startup_delay); - config->enable_gpio = of_get_named_gpio(np, "enable-gpio", 0); - if (config->enable_gpio < 0 && config->enable_gpio != -ENOENT) - return ERR_PTR(config->enable_gpio); - - /* Fetch GPIOs. - optional property*/ - ret = of_gpio_count(np); - if ((ret < 0) && (ret != -ENOENT)) - return ERR_PTR(ret); - - if (ret > 0) { - config->nr_gpios = ret; - config->gpios = devm_kcalloc(dev, - config->nr_gpios, sizeof(struct gpio), - GFP_KERNEL); - if (!config->gpios) + /* Fetch GPIO init levels */ + ngpios = gpiod_count(dev, NULL); + if (ngpios > 0) { + config->gflags = devm_kzalloc(dev, + sizeof(enum gpiod_flags) + * ngpios, + GFP_KERNEL); + if (!config->gflags) return ERR_PTR(-ENOMEM); - proplen = of_property_count_u32_elems(np, "gpios-states"); - /* optional property */ - if (proplen < 0) - proplen = 0; + for (i = 0; i < ngpios; i++) { + u32 val; - if (proplen > 0 && proplen != config->nr_gpios) { - dev_warn(dev, "gpios <-> gpios-states mismatch\n"); - proplen = 0; - } + ret = of_property_read_u32_index(np, "gpios-states", i, + &val); - for (i = 0; i < config->nr_gpios; i++) { - gpio = of_get_named_gpio(np, "gpios", i); - if (gpio < 0) { - if (gpio != -ENOENT) - return ERR_PTR(gpio); - break; - } - config->gpios[i].gpio = gpio; - config->gpios[i].label = config->supply_name; - if (proplen > 0) { - of_property_read_u32_index(np, "gpios-states", - i, &ret); - if (ret) - config->gpios[i].flags = - GPIOF_OUT_INIT_HIGH; - } + /* Default to high per specification */ + if (ret) + config->gflags[i] = GPIOD_OUT_HIGH; + else + config->gflags[i] = + val ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW; } } + config->ngpios = ngpios; /* Fetch states. */ proplen = of_property_count_u32_elems(np, "states"); @@ -251,59 +228,56 @@ static struct regulator_ops gpio_regulator_current_ops = { static int gpio_regulator_probe(struct platform_device *pdev) { - struct gpio_regulator_config *config = dev_get_platdata(&pdev->dev); - struct device_node *np = pdev->dev.of_node; + struct device *dev = &pdev->dev; + struct gpio_regulator_config *config = dev_get_platdata(dev); + struct device_node *np = dev->of_node; struct gpio_regulator_data *drvdata; struct regulator_config cfg = { }; - int ptr, ret, state; + enum gpiod_flags gflags; + int ptr, ret, state, i; - drvdata = devm_kzalloc(&pdev->dev, sizeof(struct gpio_regulator_data), + drvdata = devm_kzalloc(dev, sizeof(struct gpio_regulator_data), GFP_KERNEL); if (drvdata == NULL) return -ENOMEM; if (np) { - config = of_get_gpio_regulator_config(&pdev->dev, np, + config = of_get_gpio_regulator_config(dev, np, &drvdata->desc); if (IS_ERR(config)) return PTR_ERR(config); } - drvdata->desc.name = kstrdup(config->supply_name, GFP_KERNEL); + drvdata->desc.name = devm_kstrdup(dev, config->supply_name, GFP_KERNEL); if (drvdata->desc.name == NULL) { - dev_err(&pdev->dev, "Failed to allocate supply name\n"); + dev_err(dev, "Failed to allocate supply name\n"); return -ENOMEM; } - if (config->nr_gpios != 0) { - drvdata->gpios = kmemdup(config->gpios, - config->nr_gpios * sizeof(struct gpio), - GFP_KERNEL); - if (drvdata->gpios == NULL) { - dev_err(&pdev->dev, "Failed to allocate gpio data\n"); - ret = -ENOMEM; - goto err_name; - } - - drvdata->nr_gpios = config->nr_gpios; - ret = gpio_request_array(drvdata->gpios, drvdata->nr_gpios); - if (ret) { - if (ret != -EPROBE_DEFER) - dev_err(&pdev->dev, - "Could not obtain regulator setting GPIOs: %d\n", - ret); - goto err_memgpio; - } + drvdata->gpiods = devm_kzalloc(dev, sizeof(struct gpio_desc *), + GFP_KERNEL); + if (!drvdata->gpiods) + return -ENOMEM; + for (i = 0; i < config->ngpios; i++) { + drvdata->gpiods[i] = devm_gpiod_get_index(dev, + NULL, + i, + config->gflags[i]); + if (IS_ERR(drvdata->gpiods[i])) + return PTR_ERR(drvdata->gpiods[i]); + /* This is good to know */ + gpiod_set_consumer_name(drvdata->gpiods[i], drvdata->desc.name); } + drvdata->nr_gpios = config->ngpios; - drvdata->states = kmemdup(config->states, - config->nr_states * - sizeof(struct gpio_regulator_state), - GFP_KERNEL); + drvdata->states = devm_kmemdup(dev, + config->states, + config->nr_states * + sizeof(struct gpio_regulator_state), + GFP_KERNEL); if (drvdata->states == NULL) { - dev_err(&pdev->dev, "Failed to allocate state data\n"); - ret = -ENOMEM; - goto err_stategpio; + dev_err(dev, "Failed to allocate state data\n"); + return -ENOMEM; } drvdata->nr_states = config->nr_states; @@ -322,61 +296,46 @@ static int gpio_regulator_probe(struct platform_device *pdev) drvdata->desc.ops = &gpio_regulator_current_ops; break; default: - dev_err(&pdev->dev, "No regulator type set\n"); - ret = -EINVAL; - goto err_memstate; + dev_err(dev, "No regulator type set\n"); + return -EINVAL; } /* build initial state from gpio init data. */ state = 0; for (ptr = 0; ptr < drvdata->nr_gpios; ptr++) { - if (config->gpios[ptr].flags & GPIOF_OUT_INIT_HIGH) + if (config->gflags[ptr] == GPIOD_OUT_HIGH) state |= (1 << ptr); } drvdata->state = state; - cfg.dev = &pdev->dev; + cfg.dev = dev; cfg.init_data = config->init_data; cfg.driver_data = drvdata; cfg.of_node = np; - if (gpio_is_valid(config->enable_gpio)) { - cfg.ena_gpio = config->enable_gpio; - cfg.ena_gpio_initialized = true; - } - cfg.ena_gpio_invert = !config->enable_high; - if (config->enabled_at_boot) { - if (config->enable_high) - cfg.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH; - else - cfg.ena_gpio_flags |= GPIOF_OUT_INIT_LOW; - } else { - if (config->enable_high) - cfg.ena_gpio_flags |= GPIOF_OUT_INIT_LOW; - else - cfg.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH; - } + /* + * The signal will be inverted by the GPIO core if flagged so in the + * decriptor. + */ + if (config->enabled_at_boot) + gflags = GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE; + else + gflags = GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_NONEXCLUSIVE; + + cfg.ena_gpiod = gpiod_get_optional(dev, "enable", gflags); + if (IS_ERR(cfg.ena_gpiod)) + return PTR_ERR(cfg.ena_gpiod); drvdata->dev = regulator_register(&drvdata->desc, &cfg); if (IS_ERR(drvdata->dev)) { ret = PTR_ERR(drvdata->dev); - dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret); - goto err_memstate; + dev_err(dev, "Failed to register regulator: %d\n", ret); + return ret; } platform_set_drvdata(pdev, drvdata); return 0; - -err_memstate: - kfree(drvdata->states); -err_stategpio: - gpio_free_array(drvdata->gpios, drvdata->nr_gpios); -err_memgpio: - kfree(drvdata->gpios); -err_name: - kfree(drvdata->desc.name); - return ret; } static int gpio_regulator_remove(struct platform_device *pdev) @@ -385,13 +344,6 @@ static int gpio_regulator_remove(struct platform_device *pdev) regulator_unregister(drvdata->dev); - gpio_free_array(drvdata->gpios, drvdata->nr_gpios); - - kfree(drvdata->states); - kfree(drvdata->gpios); - - kfree(drvdata->desc.name); - return 0; } diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c index 5686a1335bd3..32d3f0499e2d 100644 --- a/drivers/regulator/helpers.c +++ b/drivers/regulator/helpers.c @@ -594,28 +594,30 @@ int regulator_list_voltage_pickable_linear_range(struct regulator_dev *rdev, EXPORT_SYMBOL_GPL(regulator_list_voltage_pickable_linear_range); /** - * regulator_list_voltage_linear_range - List voltages for linear ranges + * regulator_desc_list_voltage_linear_range - List voltages for linear ranges * - * @rdev: Regulator device + * @desc: Regulator desc for regulator which volatges are to be listed * @selector: Selector to convert into a voltage * * Regulators with a series of simple linear mappings between voltages - * and selectors can set linear_ranges in the regulator descriptor and - * then use this function as their list_voltage() operation, + * and selectors who have set linear_ranges in the regulator descriptor + * can use this function prior regulator registration to list voltages. + * This is useful when voltages need to be listed during device-tree + * parsing. */ -int regulator_list_voltage_linear_range(struct regulator_dev *rdev, - unsigned int selector) +int regulator_desc_list_voltage_linear_range(const struct regulator_desc *desc, + unsigned int selector) { const struct regulator_linear_range *range; int i; - if (!rdev->desc->n_linear_ranges) { - BUG_ON(!rdev->desc->n_linear_ranges); + if (!desc->n_linear_ranges) { + BUG_ON(!desc->n_linear_ranges); return -EINVAL; } - for (i = 0; i < rdev->desc->n_linear_ranges; i++) { - range = &rdev->desc->linear_ranges[i]; + for (i = 0; i < desc->n_linear_ranges; i++) { + range = &desc->linear_ranges[i]; if (!(selector >= range->min_sel && selector <= range->max_sel)) @@ -628,6 +630,23 @@ int regulator_list_voltage_linear_range(struct regulator_dev *rdev, return -EINVAL; } +EXPORT_SYMBOL_GPL(regulator_desc_list_voltage_linear_range); + +/** + * regulator_list_voltage_linear_range - List voltages for linear ranges + * + * @rdev: Regulator device + * @selector: Selector to convert into a voltage + * + * Regulators with a series of simple linear mappings between voltages + * and selectors can set linear_ranges in the regulator descriptor and + * then use this function as their list_voltage() operation, + */ +int regulator_list_voltage_linear_range(struct regulator_dev *rdev, + unsigned int selector) +{ + return regulator_desc_list_voltage_linear_range(rdev->desc, selector); +} EXPORT_SYMBOL_GPL(regulator_list_voltage_linear_range); /** @@ -761,3 +780,89 @@ int regulator_set_active_discharge_regmap(struct regulator_dev *rdev, rdev->desc->active_discharge_mask, val); } EXPORT_SYMBOL_GPL(regulator_set_active_discharge_regmap); + +/** + * regulator_set_current_limit_regmap - set_current_limit for regmap users + * + * @rdev: regulator to operate on + * @min_uA: Lower bound for current limit + * @max_uA: Upper bound for current limit + * + * Regulators that use regmap for their register I/O can set curr_table, + * csel_reg and csel_mask fields in their descriptor and then use this + * as their set_current_limit operation, saving some code. + */ +int regulator_set_current_limit_regmap(struct regulator_dev *rdev, + int min_uA, int max_uA) +{ + unsigned int n_currents = rdev->desc->n_current_limits; + int i, sel = -1; + + if (n_currents == 0) + return -EINVAL; + + if (rdev->desc->curr_table) { + const unsigned int *curr_table = rdev->desc->curr_table; + bool ascend = curr_table[n_currents - 1] > curr_table[0]; + + /* search for closest to maximum */ + if (ascend) { + for (i = n_currents - 1; i >= 0; i--) { + if (min_uA <= curr_table[i] && + curr_table[i] <= max_uA) { + sel = i; + break; + } + } + } else { + for (i = 0; i < n_currents; i++) { + if (min_uA <= curr_table[i] && + curr_table[i] <= max_uA) { + sel = i; + break; + } + } + } + } + + if (sel < 0) + return -EINVAL; + + sel <<= ffs(rdev->desc->csel_mask) - 1; + + return regmap_update_bits(rdev->regmap, rdev->desc->csel_reg, + rdev->desc->csel_mask, sel); +} +EXPORT_SYMBOL_GPL(regulator_set_current_limit_regmap); + +/** + * regulator_get_current_limit_regmap - get_current_limit for regmap users + * + * @rdev: regulator to operate on + * + * Regulators that use regmap for their register I/O can set the + * csel_reg and csel_mask fields in their descriptor and then use this + * as their get_current_limit operation, saving some code. + */ +int regulator_get_current_limit_regmap(struct regulator_dev *rdev) +{ + unsigned int val; + int ret; + + ret = regmap_read(rdev->regmap, rdev->desc->csel_reg, &val); + if (ret != 0) + return ret; + + val &= rdev->desc->csel_mask; + val >>= ffs(rdev->desc->csel_mask) - 1; + + if (rdev->desc->curr_table) { + if (val >= rdev->desc->n_current_limits) + return -EINVAL; + + return rdev->desc->curr_table[val]; + } + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(regulator_get_current_limit_regmap); diff --git a/drivers/regulator/hi655x-regulator.c b/drivers/regulator/hi655x-regulator.c index 36ae54b53814..bba24a6fdb1e 100644 --- a/drivers/regulator/hi655x-regulator.c +++ b/drivers/regulator/hi655x-regulator.c @@ -28,7 +28,6 @@ struct hi655x_regulator { unsigned int disable_reg; unsigned int status_reg; - unsigned int ctrl_regs; unsigned int ctrl_mask; struct regulator_desc rdesc; }; diff --git a/drivers/regulator/isl6271a-regulator.c b/drivers/regulator/isl6271a-regulator.c index 4abd8e9c81e5..6f28bba81d13 100644 --- a/drivers/regulator/isl6271a-regulator.c +++ b/drivers/regulator/isl6271a-regulator.c @@ -31,7 +31,6 @@ /* PMIC details */ struct isl_pmic { struct i2c_client *client; - struct regulator_dev *rdev[3]; struct mutex mtx; }; @@ -66,14 +65,14 @@ static int isl6271a_set_voltage_sel(struct regulator_dev *dev, return err; } -static struct regulator_ops isl_core_ops = { +static const struct regulator_ops isl_core_ops = { .get_voltage_sel = isl6271a_get_voltage_sel, .set_voltage_sel = isl6271a_set_voltage_sel, .list_voltage = regulator_list_voltage_linear, .map_voltage = regulator_map_voltage_linear, }; -static struct regulator_ops isl_fixed_ops = { +static const struct regulator_ops isl_fixed_ops = { .list_voltage = regulator_list_voltage_linear, }; @@ -109,6 +108,7 @@ static const struct regulator_desc isl_rd[] = { static int isl6271a_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { + struct regulator_dev *rdev; struct regulator_config config = { }; struct regulator_init_data *init_data = dev_get_platdata(&i2c->dev); struct isl_pmic *pmic; @@ -133,11 +133,10 @@ static int isl6271a_probe(struct i2c_client *i2c, config.init_data = NULL; config.driver_data = pmic; - pmic->rdev[i] = devm_regulator_register(&i2c->dev, &isl_rd[i], - &config); - if (IS_ERR(pmic->rdev[i])) { + rdev = devm_regulator_register(&i2c->dev, &isl_rd[i], &config); + if (IS_ERR(rdev)) { dev_err(&i2c->dev, "failed to register %s\n", id->name); - return PTR_ERR(pmic->rdev[i]); + return PTR_ERR(rdev); } } diff --git a/drivers/regulator/lm363x-regulator.c b/drivers/regulator/lm363x-regulator.c index 8c0e8419c43f..c876e161052a 100644 --- a/drivers/regulator/lm363x-regulator.c +++ b/drivers/regulator/lm363x-regulator.c @@ -258,6 +258,9 @@ static int lm363x_regulator_probe(struct platform_device *pdev) * Register update is required if the pin is used. */ gpiod = lm363x_regulator_of_get_enable_gpio(dev, id); + if (IS_ERR(gpiod)) + return PTR_ERR(gpiod); + if (gpiod) { cfg.ena_gpiod = gpiod; @@ -265,8 +268,7 @@ static int lm363x_regulator_probe(struct platform_device *pdev) LM3632_EXT_EN_MASK, LM3632_EXT_EN_MASK); if (ret) { - if (gpiod) - gpiod_put(gpiod); + gpiod_put(gpiod); dev_err(dev, "External pin err: %d\n", ret); return ret; } diff --git a/drivers/regulator/lochnagar-regulator.c b/drivers/regulator/lochnagar-regulator.c index 5a89e6d4b9a6..ff97cc50f2eb 100644 --- a/drivers/regulator/lochnagar-regulator.c +++ b/drivers/regulator/lochnagar-regulator.c @@ -194,7 +194,7 @@ static const struct regulator_desc lochnagar_regulators[] = { .name = "VDDCORE", .supply_name = "SYSVDD", .type = REGULATOR_VOLTAGE, - .n_voltages = 57, + .n_voltages = 66, .ops = &lochnagar_vddcore_ops, .id = LOCHNAGAR_VDDCORE, @@ -226,14 +226,15 @@ static const struct of_device_id lochnagar_of_match[] = { }, { .compatible = "cirrus,lochnagar2-mic2vdd", - .data = &lochnagar_regulators[LOCHNAGAR_MIC1VDD], + .data = &lochnagar_regulators[LOCHNAGAR_MIC2VDD], }, { .compatible = "cirrus,lochnagar2-vddcore", .data = &lochnagar_regulators[LOCHNAGAR_VDDCORE], }, - {}, + {} }; +MODULE_DEVICE_TABLE(of, lochnagar_of_match); static int lochnagar_regulator_probe(struct platform_device *pdev) { diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c index 204b5c5270e0..9e45112658ba 100644 --- a/drivers/regulator/lp3971.c +++ b/drivers/regulator/lp3971.c @@ -159,7 +159,7 @@ static int lp3971_ldo_set_voltage_sel(struct regulator_dev *dev, selector << LDO_VOL_CONTR_SHIFT(ldo)); } -static struct regulator_ops lp3971_ldo_ops = { +static const struct regulator_ops lp3971_ldo_ops = { .list_voltage = regulator_list_voltage_table, .map_voltage = regulator_map_voltage_ascend, .is_enabled = lp3971_ldo_is_enabled, @@ -233,7 +233,7 @@ static int lp3971_dcdc_set_voltage_sel(struct regulator_dev *dev, 0 << BUCK_VOL_CHANGE_SHIFT(buck)); } -static struct regulator_ops lp3971_dcdc_ops = { +static const struct regulator_ops lp3971_dcdc_ops = { .list_voltage = regulator_list_voltage_table, .map_voltage = regulator_map_voltage_ascend, .is_enabled = lp3971_dcdc_is_enabled, diff --git a/drivers/regulator/lp3972.c b/drivers/regulator/lp3972.c index ff0c275f902e..fb098198b688 100644 --- a/drivers/regulator/lp3972.c +++ b/drivers/regulator/lp3972.c @@ -305,7 +305,7 @@ static int lp3972_ldo_set_voltage_sel(struct regulator_dev *dev, return ret; } -static struct regulator_ops lp3972_ldo_ops = { +static const struct regulator_ops lp3972_ldo_ops = { .list_voltage = regulator_list_voltage_table, .map_voltage = regulator_map_voltage_ascend, .is_enabled = lp3972_ldo_is_enabled, @@ -386,7 +386,7 @@ static int lp3972_dcdc_set_voltage_sel(struct regulator_dev *dev, LP3972_VOL_CHANGE_FLAG_MASK, 0); } -static struct regulator_ops lp3972_dcdc_ops = { +static const struct regulator_ops lp3972_dcdc_ops = { .list_voltage = regulator_list_voltage_table, .map_voltage = regulator_map_voltage_ascend, .is_enabled = lp3972_dcdc_is_enabled, diff --git a/drivers/regulator/lp872x.c b/drivers/regulator/lp872x.c index 38992112fd6e..ca95257ce252 100644 --- a/drivers/regulator/lp872x.c +++ b/drivers/regulator/lp872x.c @@ -353,64 +353,6 @@ static int lp872x_buck_get_voltage_sel(struct regulator_dev *rdev) return val & LP872X_VOUT_M; } -static int lp8725_buck_set_current_limit(struct regulator_dev *rdev, - int min_uA, int max_uA) -{ - struct lp872x *lp = rdev_get_drvdata(rdev); - enum lp872x_regulator_id buck = rdev_get_id(rdev); - int i; - u8 addr; - - switch (buck) { - case LP8725_ID_BUCK1: - addr = LP8725_BUCK1_VOUT2; - break; - case LP8725_ID_BUCK2: - addr = LP8725_BUCK2_VOUT2; - break; - default: - return -EINVAL; - } - - for (i = ARRAY_SIZE(lp8725_buck_uA) - 1; i >= 0; i--) { - if (lp8725_buck_uA[i] >= min_uA && - lp8725_buck_uA[i] <= max_uA) - return lp872x_update_bits(lp, addr, - LP8725_BUCK_CL_M, - i << LP8725_BUCK_CL_S); - } - - return -EINVAL; -} - -static int lp8725_buck_get_current_limit(struct regulator_dev *rdev) -{ - struct lp872x *lp = rdev_get_drvdata(rdev); - enum lp872x_regulator_id buck = rdev_get_id(rdev); - u8 addr, val; - int ret; - - switch (buck) { - case LP8725_ID_BUCK1: - addr = LP8725_BUCK1_VOUT2; - break; - case LP8725_ID_BUCK2: - addr = LP8725_BUCK2_VOUT2; - break; - default: - return -EINVAL; - } - - ret = lp872x_read_byte(lp, addr, &val); - if (ret) - return ret; - - val = (val & LP8725_BUCK_CL_M) >> LP8725_BUCK_CL_S; - - return (val < ARRAY_SIZE(lp8725_buck_uA)) ? - lp8725_buck_uA[val] : -EINVAL; -} - static int lp872x_buck_set_mode(struct regulator_dev *rdev, unsigned int mode) { struct lp872x *lp = rdev_get_drvdata(rdev); @@ -478,7 +420,7 @@ static unsigned int lp872x_buck_get_mode(struct regulator_dev *rdev) return val & mask ? REGULATOR_MODE_FAST : REGULATOR_MODE_NORMAL; } -static struct regulator_ops lp872x_ldo_ops = { +static const struct regulator_ops lp872x_ldo_ops = { .list_voltage = regulator_list_voltage_table, .map_voltage = regulator_map_voltage_ascend, .set_voltage_sel = regulator_set_voltage_sel_regmap, @@ -489,7 +431,7 @@ static struct regulator_ops lp872x_ldo_ops = { .enable_time = lp872x_regulator_enable_time, }; -static struct regulator_ops lp8720_buck_ops = { +static const struct regulator_ops lp8720_buck_ops = { .list_voltage = regulator_list_voltage_table, .map_voltage = regulator_map_voltage_ascend, .set_voltage_sel = lp872x_buck_set_voltage_sel, @@ -502,7 +444,7 @@ static struct regulator_ops lp8720_buck_ops = { .get_mode = lp872x_buck_get_mode, }; -static struct regulator_ops lp8725_buck_ops = { +static const struct regulator_ops lp8725_buck_ops = { .list_voltage = regulator_list_voltage_table, .map_voltage = regulator_map_voltage_ascend, .set_voltage_sel = lp872x_buck_set_voltage_sel, @@ -513,11 +455,11 @@ static struct regulator_ops lp8725_buck_ops = { .enable_time = lp872x_regulator_enable_time, .set_mode = lp872x_buck_set_mode, .get_mode = lp872x_buck_get_mode, - .set_current_limit = lp8725_buck_set_current_limit, - .get_current_limit = lp8725_buck_get_current_limit, + .set_current_limit = regulator_set_current_limit_regmap, + .get_current_limit = regulator_get_current_limit_regmap, }; -static struct regulator_desc lp8720_regulator_desc[] = { +static const struct regulator_desc lp8720_regulator_desc[] = { { .name = "ldo1", .of_match = of_match_ptr("ldo1"), @@ -602,7 +544,7 @@ static struct regulator_desc lp8720_regulator_desc[] = { }, }; -static struct regulator_desc lp8725_regulator_desc[] = { +static const struct regulator_desc lp8725_regulator_desc[] = { { .name = "ldo1", .of_match = of_match_ptr("ldo1"), @@ -712,6 +654,10 @@ static struct regulator_desc lp8725_regulator_desc[] = { .owner = THIS_MODULE, .enable_reg = LP872X_GENERAL_CFG, .enable_mask = LP8725_BUCK1_EN_M, + .curr_table = lp8725_buck_uA, + .n_current_limits = ARRAY_SIZE(lp8725_buck_uA), + .csel_reg = LP8725_BUCK1_VOUT2, + .csel_mask = LP8725_BUCK_CL_M, }, { .name = "buck2", @@ -724,6 +670,10 @@ static struct regulator_desc lp8725_regulator_desc[] = { .owner = THIS_MODULE, .enable_reg = LP872X_GENERAL_CFG, .enable_mask = LP8725_BUCK2_EN_M, + .curr_table = lp8725_buck_uA, + .n_current_limits = ARRAY_SIZE(lp8725_buck_uA), + .csel_reg = LP8725_BUCK2_VOUT2, + .csel_mask = LP8725_BUCK_CL_M, }, }; @@ -820,7 +770,7 @@ static struct regulator_init_data static int lp872x_regulator_register(struct lp872x *lp) { - struct regulator_desc *desc; + const struct regulator_desc *desc; struct regulator_config cfg = { }; struct regulator_dev *rdev; int i; diff --git a/drivers/regulator/lp873x-regulator.c b/drivers/regulator/lp873x-regulator.c index 70e3df653381..b55de293ca7a 100644 --- a/drivers/regulator/lp873x-regulator.c +++ b/drivers/regulator/lp873x-regulator.c @@ -39,6 +39,10 @@ .ramp_delay = _delay, \ .linear_ranges = _lr, \ .n_linear_ranges = ARRAY_SIZE(_lr), \ + .curr_table = lp873x_buck_uA, \ + .n_current_limits = ARRAY_SIZE(lp873x_buck_uA), \ + .csel_reg = (_cr), \ + .csel_mask = LP873X_BUCK0_CTRL_2_BUCK0_ILIM,\ }, \ .ctrl2_reg = _cr, \ } @@ -61,7 +65,7 @@ static const struct regulator_linear_range ldo0_ldo1_ranges[] = { REGULATOR_LINEAR_RANGE(800000, 0x0, 0x19, 100000), }; -static unsigned int lp873x_buck_ramp_delay[] = { +static const unsigned int lp873x_buck_ramp_delay[] = { 30000, 15000, 10000, 7500, 3800, 1900, 940, 470 }; @@ -108,45 +112,8 @@ static int lp873x_buck_set_ramp_delay(struct regulator_dev *rdev, return 0; } -static int lp873x_buck_set_current_limit(struct regulator_dev *rdev, - int min_uA, int max_uA) -{ - int id = rdev_get_id(rdev); - struct lp873x *lp873 = rdev_get_drvdata(rdev); - int i; - - for (i = ARRAY_SIZE(lp873x_buck_uA) - 1; i >= 0; i--) { - if (lp873x_buck_uA[i] >= min_uA && - lp873x_buck_uA[i] <= max_uA) - return regmap_update_bits(lp873->regmap, - regulators[id].ctrl2_reg, - LP873X_BUCK0_CTRL_2_BUCK0_ILIM, - i << __ffs(LP873X_BUCK0_CTRL_2_BUCK0_ILIM)); - } - - return -EINVAL; -} - -static int lp873x_buck_get_current_limit(struct regulator_dev *rdev) -{ - int id = rdev_get_id(rdev); - struct lp873x *lp873 = rdev_get_drvdata(rdev); - int ret; - unsigned int val; - - ret = regmap_read(lp873->regmap, regulators[id].ctrl2_reg, &val); - if (ret) - return ret; - - val = (val & LP873X_BUCK0_CTRL_2_BUCK0_ILIM) >> - __ffs(LP873X_BUCK0_CTRL_2_BUCK0_ILIM); - - return (val < ARRAY_SIZE(lp873x_buck_uA)) ? - lp873x_buck_uA[val] : -EINVAL; -} - /* Operations permitted on BUCK0, BUCK1 */ -static struct regulator_ops lp873x_buck01_ops = { +static const struct regulator_ops lp873x_buck01_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, @@ -156,12 +123,12 @@ static struct regulator_ops lp873x_buck01_ops = { .map_voltage = regulator_map_voltage_linear_range, .set_voltage_time_sel = regulator_set_voltage_time_sel, .set_ramp_delay = lp873x_buck_set_ramp_delay, - .set_current_limit = lp873x_buck_set_current_limit, - .get_current_limit = lp873x_buck_get_current_limit, + .set_current_limit = regulator_set_current_limit_regmap, + .get_current_limit = regulator_get_current_limit_regmap, }; /* Operations permitted on LDO0 and LDO1 */ -static struct regulator_ops lp873x_ldo01_ops = { +static const struct regulator_ops lp873x_ldo01_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, diff --git a/drivers/regulator/lp8755.c b/drivers/regulator/lp8755.c index 244822bb63cd..14fd38807134 100644 --- a/drivers/regulator/lp8755.c +++ b/drivers/regulator/lp8755.c @@ -315,7 +315,7 @@ out_i2c_error: .vsel_mask = LP8755_BUCK_VOUT_M,\ } -static struct regulator_desc lp8755_regulators[] = { +static const struct regulator_desc lp8755_regulators[] = { lp8755_buck_desc(0), lp8755_buck_desc(1), lp8755_buck_desc(2), @@ -386,7 +386,7 @@ static irqreturn_t lp8755_irq_handler(int irq, void *data) if (ret < 0) goto err_i2c; - /* send OCP event to all regualtor devices */ + /* send OCP event to all regulator devices */ if ((flag1 & 0x01) && (pchip->irqmask & 0x01)) for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++) if (pchip->rdev[icnt] != NULL) @@ -394,7 +394,7 @@ static irqreturn_t lp8755_irq_handler(int irq, void *data) LP8755_EVENT_OCP, NULL); - /* send OVP event to all regualtor devices */ + /* send OVP event to all regulator devices */ if ((flag1 & 0x02) && (pchip->irqmask & 0x02)) for (icnt = 0; icnt < LP8755_BUCK_MAX; icnt++) if (pchip->rdev[icnt] != NULL) diff --git a/drivers/regulator/lp87565-regulator.c b/drivers/regulator/lp87565-regulator.c index c192357d1dea..4ed41731a5b1 100644 --- a/drivers/regulator/lp87565-regulator.c +++ b/drivers/regulator/lp87565-regulator.c @@ -51,7 +51,7 @@ static const struct regulator_linear_range buck0_1_2_3_ranges[] = { REGULATOR_LINEAR_RANGE(1420000, 0x9e, 0xff, 20000), }; -static unsigned int lp87565_buck_ramp_delay[] = { +static const unsigned int lp87565_buck_ramp_delay[] = { 30000, 15000, 10000, 7500, 3800, 1900, 940, 470 }; @@ -140,7 +140,7 @@ static int lp87565_buck_get_current_limit(struct regulator_dev *rdev) } /* Operations permitted on BUCK0, BUCK1 */ -static struct regulator_ops lp87565_buck_ops = { +static const struct regulator_ops lp87565_buck_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, diff --git a/drivers/regulator/lp8788-buck.c b/drivers/regulator/lp8788-buck.c index ec46290b647e..a7d30550bb5f 100644 --- a/drivers/regulator/lp8788-buck.c +++ b/drivers/regulator/lp8788-buck.c @@ -95,12 +95,10 @@ struct lp8788_buck { void *dvs; }; -/* BUCK 1 ~ 4 voltage table */ -static const int lp8788_buck_vtbl[] = { - 500000, 800000, 850000, 900000, 950000, 1000000, 1050000, 1100000, - 1150000, 1200000, 1250000, 1300000, 1350000, 1400000, 1450000, 1500000, - 1550000, 1600000, 1650000, 1700000, 1750000, 1800000, 1850000, 1900000, - 1950000, 2000000, +/* BUCK 1 ~ 4 voltage ranges */ +static const struct regulator_linear_range buck_volt_ranges[] = { + REGULATOR_LINEAR_RANGE(500000, 0, 0, 0), + REGULATOR_LINEAR_RANGE(800000, 1, 25, 50000), }; static void lp8788_buck1_set_dvs(struct lp8788_buck *buck) @@ -345,8 +343,8 @@ static unsigned int lp8788_buck_get_mode(struct regulator_dev *rdev) } static const struct regulator_ops lp8788_buck12_ops = { - .list_voltage = regulator_list_voltage_table, - .map_voltage = regulator_map_voltage_ascend, + .list_voltage = regulator_list_voltage_linear_range, + .map_voltage = regulator_map_voltage_linear_range, .set_voltage_sel = lp8788_buck12_set_voltage_sel, .get_voltage_sel = lp8788_buck12_get_voltage_sel, .enable = regulator_enable_regmap, @@ -358,8 +356,8 @@ static const struct regulator_ops lp8788_buck12_ops = { }; static const struct regulator_ops lp8788_buck34_ops = { - .list_voltage = regulator_list_voltage_table, - .map_voltage = regulator_map_voltage_ascend, + .list_voltage = regulator_list_voltage_linear_range, + .map_voltage = regulator_map_voltage_linear_range, .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .enable = regulator_enable_regmap, @@ -370,13 +368,14 @@ static const struct regulator_ops lp8788_buck34_ops = { .get_mode = lp8788_buck_get_mode, }; -static struct regulator_desc lp8788_buck_desc[] = { +static const struct regulator_desc lp8788_buck_desc[] = { { .name = "buck1", .id = BUCK1, .ops = &lp8788_buck12_ops, - .n_voltages = ARRAY_SIZE(lp8788_buck_vtbl), - .volt_table = lp8788_buck_vtbl, + .n_voltages = 26, + .linear_ranges = buck_volt_ranges, + .n_linear_ranges = ARRAY_SIZE(buck_volt_ranges), .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, .enable_reg = LP8788_EN_BUCK, @@ -386,8 +385,9 @@ static struct regulator_desc lp8788_buck_desc[] = { .name = "buck2", .id = BUCK2, .ops = &lp8788_buck12_ops, - .n_voltages = ARRAY_SIZE(lp8788_buck_vtbl), - .volt_table = lp8788_buck_vtbl, + .n_voltages = 26, + .linear_ranges = buck_volt_ranges, + .n_linear_ranges = ARRAY_SIZE(buck_volt_ranges), .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, .enable_reg = LP8788_EN_BUCK, @@ -397,8 +397,9 @@ static struct regulator_desc lp8788_buck_desc[] = { .name = "buck3", .id = BUCK3, .ops = &lp8788_buck34_ops, - .n_voltages = ARRAY_SIZE(lp8788_buck_vtbl), - .volt_table = lp8788_buck_vtbl, + .n_voltages = 26, + .linear_ranges = buck_volt_ranges, + .n_linear_ranges = ARRAY_SIZE(buck_volt_ranges), .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, .vsel_reg = LP8788_BUCK3_VOUT, @@ -410,8 +411,9 @@ static struct regulator_desc lp8788_buck_desc[] = { .name = "buck4", .id = BUCK4, .ops = &lp8788_buck34_ops, - .n_voltages = ARRAY_SIZE(lp8788_buck_vtbl), - .volt_table = lp8788_buck_vtbl, + .n_voltages = 26, + .linear_ranges = buck_volt_ranges, + .n_linear_ranges = ARRAY_SIZE(buck_volt_ranges), .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, .vsel_reg = LP8788_BUCK4_VOUT, diff --git a/drivers/regulator/lp8788-ldo.c b/drivers/regulator/lp8788-ldo.c index 2ee22e7ea675..a2ef146e6b3a 100644 --- a/drivers/regulator/lp8788-ldo.c +++ b/drivers/regulator/lp8788-ldo.c @@ -186,7 +186,7 @@ static const struct regulator_ops lp8788_ldo_voltage_fixed_ops = { .enable_time = lp8788_ldo_enable_time, }; -static struct regulator_desc lp8788_dldo_desc[] = { +static const struct regulator_desc lp8788_dldo_desc[] = { { .name = "dldo1", .id = DLDO1, @@ -343,7 +343,7 @@ static struct regulator_desc lp8788_dldo_desc[] = { }, }; -static struct regulator_desc lp8788_aldo_desc[] = { +static const struct regulator_desc lp8788_aldo_desc[] = { { .name = "aldo1", .id = ALDO1, diff --git a/drivers/regulator/ltc3676.c b/drivers/regulator/ltc3676.c index 71fd0f2a4b76..e6d66e492b85 100644 --- a/drivers/regulator/ltc3676.c +++ b/drivers/regulator/ltc3676.c @@ -241,61 +241,10 @@ static struct regulator_desc ltc3676_regulators[LTC3676_NUM_REGULATORS] = { LTC3676_FIXED_REG(LDO4, ldo4, LDOB, 2), }; -static bool ltc3676_writeable_reg(struct device *dev, unsigned int reg) +static bool ltc3676_readable_writeable_reg(struct device *dev, unsigned int reg) { switch (reg) { - case LTC3676_IRQSTAT: - case LTC3676_BUCK1: - case LTC3676_BUCK2: - case LTC3676_BUCK3: - case LTC3676_BUCK4: - case LTC3676_LDOA: - case LTC3676_LDOB: - case LTC3676_SQD1: - case LTC3676_SQD2: - case LTC3676_CNTRL: - case LTC3676_DVB1A: - case LTC3676_DVB1B: - case LTC3676_DVB2A: - case LTC3676_DVB2B: - case LTC3676_DVB3A: - case LTC3676_DVB3B: - case LTC3676_DVB4A: - case LTC3676_DVB4B: - case LTC3676_MSKIRQ: - case LTC3676_MSKPG: - case LTC3676_USER: - case LTC3676_HRST: - case LTC3676_CLIRQ: - return true; - } - return false; -} - -static bool ltc3676_readable_reg(struct device *dev, unsigned int reg) -{ - switch (reg) { - case LTC3676_IRQSTAT: - case LTC3676_BUCK1: - case LTC3676_BUCK2: - case LTC3676_BUCK3: - case LTC3676_BUCK4: - case LTC3676_LDOA: - case LTC3676_LDOB: - case LTC3676_SQD1: - case LTC3676_SQD2: - case LTC3676_CNTRL: - case LTC3676_DVB1A: - case LTC3676_DVB1B: - case LTC3676_DVB2A: - case LTC3676_DVB2B: - case LTC3676_DVB3A: - case LTC3676_DVB3B: - case LTC3676_DVB4A: - case LTC3676_DVB4B: - case LTC3676_MSKIRQ: - case LTC3676_MSKPG: - case LTC3676_USER: + case LTC3676_BUCK1 ... LTC3676_IRQSTAT: case LTC3676_HRST: case LTC3676_CLIRQ: return true; @@ -306,9 +255,7 @@ static bool ltc3676_readable_reg(struct device *dev, unsigned int reg) static bool ltc3676_volatile_reg(struct device *dev, unsigned int reg) { switch (reg) { - case LTC3676_IRQSTAT: - case LTC3676_PGSTATL: - case LTC3676_PGSTATRT: + case LTC3676_IRQSTAT ... LTC3676_PGSTATRT: return true; } return false; @@ -317,8 +264,8 @@ static bool ltc3676_volatile_reg(struct device *dev, unsigned int reg) static const struct regmap_config ltc3676_regmap_config = { .reg_bits = 8, .val_bits = 8, - .writeable_reg = ltc3676_writeable_reg, - .readable_reg = ltc3676_readable_reg, + .writeable_reg = ltc3676_readable_writeable_reg, + .readable_reg = ltc3676_readable_writeable_reg, .volatile_reg = ltc3676_volatile_reg, .max_register = LTC3676_CLIRQ, .use_single_read = true, @@ -442,5 +389,5 @@ static struct i2c_driver ltc3676_driver = { module_i2c_driver(ltc3676_driver); MODULE_AUTHOR("Tim Harvey "); -MODULE_DESCRIPTION("Regulator driver for Linear Technology LTC1376"); +MODULE_DESCRIPTION("Regulator driver for Linear Technology LTC3676"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/regulator/max14577-regulator.c b/drivers/regulator/max14577-regulator.c index bc7f4751bf9c..85a88a9e4d42 100644 --- a/drivers/regulator/max14577-regulator.c +++ b/drivers/regulator/max14577-regulator.c @@ -324,4 +324,3 @@ module_exit(max14577_regulator_exit); MODULE_AUTHOR("Krzysztof Kozlowski "); MODULE_DESCRIPTION("Maxim 14577/77836 regulator driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:max14577-regulator"); diff --git a/drivers/regulator/max77620-regulator.c b/drivers/regulator/max77620-regulator.c index b94e3a721721..1607ac673e44 100644 --- a/drivers/regulator/max77620-regulator.c +++ b/drivers/regulator/max77620-regulator.c @@ -1,7 +1,7 @@ /* * Maxim MAX77620 Regulator driver * - * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. * * Author: Mallikarjun Kasoju * Laxman Dewangan @@ -690,6 +690,7 @@ static const struct regulator_ops max77620_regulator_ops = { .active_discharge_mask = MAX77620_SD_CFG1_ADE_MASK, \ .active_discharge_reg = MAX77620_REG_##_id##_CFG, \ .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ }, \ } @@ -721,6 +722,7 @@ static const struct regulator_ops max77620_regulator_ops = { .active_discharge_mask = MAX77620_LDO_CFG2_ADE_MASK, \ .active_discharge_reg = MAX77620_REG_##_id##_CFG2, \ .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ }, \ } @@ -803,6 +805,14 @@ static int max77620_regulator_probe(struct platform_device *pdev) rdesc = &rinfo[id].desc; pmic->rinfo[id] = &max77620_regs_info[id]; pmic->enable_power_mode[id] = MAX77620_POWER_MODE_NORMAL; + pmic->reg_pdata[id].active_fps_src = -1; + pmic->reg_pdata[id].active_fps_pd_slot = -1; + pmic->reg_pdata[id].active_fps_pu_slot = -1; + pmic->reg_pdata[id].suspend_fps_src = -1; + pmic->reg_pdata[id].suspend_fps_pd_slot = -1; + pmic->reg_pdata[id].suspend_fps_pu_slot = -1; + pmic->reg_pdata[id].power_ok = -1; + pmic->reg_pdata[id].ramp_rate_setting = -1; ret = max77620_read_slew_rate(pmic, id); if (ret < 0) diff --git a/drivers/regulator/max77650-regulator.c b/drivers/regulator/max77650-regulator.c new file mode 100644 index 000000000000..31ebf34b01ec --- /dev/null +++ b/drivers/regulator/max77650-regulator.c @@ -0,0 +1,498 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (C) 2018 BayLibre SAS +// Author: Bartosz Golaszewski +// +// Regulator driver for MAXIM 77650/77651 charger/power-supply. + +#include +#include +#include +#include +#include +#include + +#define MAX77650_REGULATOR_EN_CTRL_MASK GENMASK(3, 0) +#define MAX77650_REGULATOR_EN_CTRL_BITS(_reg) \ + ((_reg) & MAX77650_REGULATOR_EN_CTRL_MASK) +#define MAX77650_REGULATOR_ENABLED GENMASK(2, 1) +#define MAX77650_REGULATOR_DISABLED BIT(2) + +#define MAX77650_REGULATOR_V_LDO_MASK GENMASK(6, 0) +#define MAX77650_REGULATOR_V_SBB_MASK GENMASK(5, 0) + +#define MAX77650_REGULATOR_AD_MASK BIT(3) +#define MAX77650_REGULATOR_AD_DISABLED 0x00 +#define MAX77650_REGULATOR_AD_ENABLED BIT(3) + +#define MAX77650_REGULATOR_CURR_LIM_MASK GENMASK(7, 6) + +enum { + MAX77650_REGULATOR_ID_LDO = 0, + MAX77650_REGULATOR_ID_SBB0, + MAX77650_REGULATOR_ID_SBB1, + MAX77650_REGULATOR_ID_SBB2, + MAX77650_REGULATOR_NUM_REGULATORS, +}; + +struct max77650_regulator_desc { + struct regulator_desc desc; + unsigned int regA; + unsigned int regB; +}; + +static const u32 max77651_sbb1_regulator_volt_table[] = { + 2400000, 3200000, 4000000, 4800000, + 2450000, 3250000, 4050000, 4850000, + 2500000, 3300000, 4100000, 4900000, + 2550000, 3350000, 4150000, 4950000, + 2600000, 3400000, 4200000, 5000000, + 2650000, 3450000, 4250000, 5050000, + 2700000, 3500000, 4300000, 5100000, + 2750000, 3550000, 4350000, 5150000, + 2800000, 3600000, 4400000, 5200000, + 2850000, 3650000, 4450000, 5250000, + 2900000, 3700000, 4500000, 0, + 2950000, 3750000, 4550000, 0, + 3000000, 3800000, 4600000, 0, + 3050000, 3850000, 4650000, 0, + 3100000, 3900000, 4700000, 0, + 3150000, 3950000, 4750000, 0, +}; + +#define MAX77651_REGULATOR_SBB1_SEL_DEC(_val) \ + (((_val & 0x3c) >> 2) | ((_val & 0x03) << 4)) +#define MAX77651_REGULATOR_SBB1_SEL_ENC(_val) \ + (((_val & 0x30) >> 4) | ((_val & 0x0f) << 2)) + +#define MAX77650_REGULATOR_SBB1_SEL_DECR(_val) \ + do { \ + _val = MAX77651_REGULATOR_SBB1_SEL_DEC(_val); \ + _val--; \ + _val = MAX77651_REGULATOR_SBB1_SEL_ENC(_val); \ + } while (0) + +#define MAX77650_REGULATOR_SBB1_SEL_INCR(_val) \ + do { \ + _val = MAX77651_REGULATOR_SBB1_SEL_DEC(_val); \ + _val++; \ + _val = MAX77651_REGULATOR_SBB1_SEL_ENC(_val); \ + } while (0) + +static const unsigned int max77650_current_limit_table[] = { + 1000000, 866000, 707000, 500000, +}; + +static int max77650_regulator_is_enabled(struct regulator_dev *rdev) +{ + struct max77650_regulator_desc *rdesc; + struct regmap *map; + int val, rv, en; + + rdesc = rdev_get_drvdata(rdev); + map = rdev_get_regmap(rdev); + + rv = regmap_read(map, rdesc->regB, &val); + if (rv) + return rv; + + en = MAX77650_REGULATOR_EN_CTRL_BITS(val); + + return en != MAX77650_REGULATOR_DISABLED; +} + +static int max77650_regulator_enable(struct regulator_dev *rdev) +{ + struct max77650_regulator_desc *rdesc; + struct regmap *map; + + rdesc = rdev_get_drvdata(rdev); + map = rdev_get_regmap(rdev); + + return regmap_update_bits(map, rdesc->regB, + MAX77650_REGULATOR_EN_CTRL_MASK, + MAX77650_REGULATOR_ENABLED); +} + +static int max77650_regulator_disable(struct regulator_dev *rdev) +{ + struct max77650_regulator_desc *rdesc; + struct regmap *map; + + rdesc = rdev_get_drvdata(rdev); + map = rdev_get_regmap(rdev); + + return regmap_update_bits(map, rdesc->regB, + MAX77650_REGULATOR_EN_CTRL_MASK, + MAX77650_REGULATOR_DISABLED); +} + +static int max77650_regulator_set_voltage_sel(struct regulator_dev *rdev, + unsigned int sel) +{ + int rv = 0, curr, diff; + bool ascending; + + /* + * If the regulator is disabled, we can program the desired + * voltage right away. + */ + if (!max77650_regulator_is_enabled(rdev)) + return regulator_set_voltage_sel_regmap(rdev, sel); + + /* + * Otherwise we need to manually ramp the output voltage up/down + * one step at a time. + */ + + curr = regulator_get_voltage_sel_regmap(rdev); + if (curr < 0) + return curr; + + diff = curr - sel; + if (diff == 0) + return 0; /* Already there. */ + else if (diff > 0) + ascending = false; + else + ascending = true; + + /* + * Make sure we'll get to the right voltage and break the loop even if + * the selector equals 0. + */ + for (ascending ? curr++ : curr--;; ascending ? curr++ : curr--) { + rv = regulator_set_voltage_sel_regmap(rdev, curr); + if (rv) + return rv; + + if (curr == sel) + break; + } + + return 0; +} + +/* + * Special case: non-linear voltage table for max77651 SBB1 - software + * must ensure the voltage is ramped in 50mV increments. + */ +static int max77651_regulator_sbb1_set_voltage_sel(struct regulator_dev *rdev, + unsigned int sel) +{ + int rv = 0, curr, vcurr, vdest, vdiff; + + /* + * If the regulator is disabled, we can program the desired + * voltage right away. + */ + if (!max77650_regulator_is_enabled(rdev)) + return regulator_set_voltage_sel_regmap(rdev, sel); + + curr = regulator_get_voltage_sel_regmap(rdev); + if (curr < 0) + return curr; + + if (curr == sel) + return 0; /* Already there. */ + + vcurr = max77651_sbb1_regulator_volt_table[curr]; + vdest = max77651_sbb1_regulator_volt_table[sel]; + vdiff = vcurr - vdest; + + for (;;) { + if (vdiff > 0) + MAX77650_REGULATOR_SBB1_SEL_DECR(curr); + else + MAX77650_REGULATOR_SBB1_SEL_INCR(curr); + + rv = regulator_set_voltage_sel_regmap(rdev, curr); + if (rv) + return rv; + + if (curr == sel) + break; + }; + + return 0; +} + +static const struct regulator_ops max77650_regulator_LDO_ops = { + .is_enabled = max77650_regulator_is_enabled, + .enable = max77650_regulator_enable, + .disable = max77650_regulator_disable, + .list_voltage = regulator_list_voltage_linear, + .map_voltage = regulator_map_voltage_linear, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = max77650_regulator_set_voltage_sel, + .set_active_discharge = regulator_set_active_discharge_regmap, +}; + +static const struct regulator_ops max77650_regulator_SBB_ops = { + .is_enabled = max77650_regulator_is_enabled, + .enable = max77650_regulator_enable, + .disable = max77650_regulator_disable, + .list_voltage = regulator_list_voltage_linear, + .map_voltage = regulator_map_voltage_linear, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = max77650_regulator_set_voltage_sel, + .get_current_limit = regulator_get_current_limit_regmap, + .set_current_limit = regulator_set_current_limit_regmap, + .set_active_discharge = regulator_set_active_discharge_regmap, +}; + +/* Special case for max77651 SBB1 - non-linear voltage mapping. */ +static const struct regulator_ops max77651_SBB1_regulator_ops = { + .is_enabled = max77650_regulator_is_enabled, + .enable = max77650_regulator_enable, + .disable = max77650_regulator_disable, + .list_voltage = regulator_list_voltage_table, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = max77651_regulator_sbb1_set_voltage_sel, + .get_current_limit = regulator_get_current_limit_regmap, + .set_current_limit = regulator_set_current_limit_regmap, + .set_active_discharge = regulator_set_active_discharge_regmap, +}; + +static struct max77650_regulator_desc max77650_LDO_desc = { + .desc = { + .name = "ldo", + .of_match = of_match_ptr("ldo"), + .regulators_node = of_match_ptr("regulators"), + .supply_name = "in-ldo", + .id = MAX77650_REGULATOR_ID_LDO, + .ops = &max77650_regulator_LDO_ops, + .min_uV = 1350000, + .uV_step = 12500, + .n_voltages = 128, + .vsel_mask = MAX77650_REGULATOR_V_LDO_MASK, + .vsel_reg = MAX77650_REG_CNFG_LDO_A, + .active_discharge_off = MAX77650_REGULATOR_AD_DISABLED, + .active_discharge_on = MAX77650_REGULATOR_AD_ENABLED, + .active_discharge_mask = MAX77650_REGULATOR_AD_MASK, + .active_discharge_reg = MAX77650_REG_CNFG_LDO_B, + .enable_time = 100, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + }, + .regA = MAX77650_REG_CNFG_LDO_A, + .regB = MAX77650_REG_CNFG_LDO_B, +}; + +static struct max77650_regulator_desc max77650_SBB0_desc = { + .desc = { + .name = "sbb0", + .of_match = of_match_ptr("sbb0"), + .regulators_node = of_match_ptr("regulators"), + .supply_name = "in-sbb0", + .id = MAX77650_REGULATOR_ID_SBB0, + .ops = &max77650_regulator_SBB_ops, + .min_uV = 800000, + .uV_step = 25000, + .n_voltages = 64, + .vsel_mask = MAX77650_REGULATOR_V_SBB_MASK, + .vsel_reg = MAX77650_REG_CNFG_SBB0_A, + .active_discharge_off = MAX77650_REGULATOR_AD_DISABLED, + .active_discharge_on = MAX77650_REGULATOR_AD_ENABLED, + .active_discharge_mask = MAX77650_REGULATOR_AD_MASK, + .active_discharge_reg = MAX77650_REG_CNFG_SBB0_B, + .enable_time = 100, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .csel_reg = MAX77650_REG_CNFG_SBB0_A, + .csel_mask = MAX77650_REGULATOR_CURR_LIM_MASK, + .curr_table = max77650_current_limit_table, + .n_current_limits = ARRAY_SIZE(max77650_current_limit_table), + }, + .regA = MAX77650_REG_CNFG_SBB0_A, + .regB = MAX77650_REG_CNFG_SBB0_B, +}; + +static struct max77650_regulator_desc max77650_SBB1_desc = { + .desc = { + .name = "sbb1", + .of_match = of_match_ptr("sbb1"), + .regulators_node = of_match_ptr("regulators"), + .supply_name = "in-sbb1", + .id = MAX77650_REGULATOR_ID_SBB1, + .ops = &max77650_regulator_SBB_ops, + .min_uV = 800000, + .uV_step = 12500, + .n_voltages = 64, + .vsel_mask = MAX77650_REGULATOR_V_SBB_MASK, + .vsel_reg = MAX77650_REG_CNFG_SBB1_A, + .active_discharge_off = MAX77650_REGULATOR_AD_DISABLED, + .active_discharge_on = MAX77650_REGULATOR_AD_ENABLED, + .active_discharge_mask = MAX77650_REGULATOR_AD_MASK, + .active_discharge_reg = MAX77650_REG_CNFG_SBB1_B, + .enable_time = 100, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .csel_reg = MAX77650_REG_CNFG_SBB1_A, + .csel_mask = MAX77650_REGULATOR_CURR_LIM_MASK, + .curr_table = max77650_current_limit_table, + .n_current_limits = ARRAY_SIZE(max77650_current_limit_table), + }, + .regA = MAX77650_REG_CNFG_SBB1_A, + .regB = MAX77650_REG_CNFG_SBB1_B, +}; + +static struct max77650_regulator_desc max77651_SBB1_desc = { + .desc = { + .name = "sbb1", + .of_match = of_match_ptr("sbb1"), + .regulators_node = of_match_ptr("regulators"), + .supply_name = "in-sbb1", + .id = MAX77650_REGULATOR_ID_SBB1, + .ops = &max77651_SBB1_regulator_ops, + .volt_table = max77651_sbb1_regulator_volt_table, + .n_voltages = ARRAY_SIZE(max77651_sbb1_regulator_volt_table), + .vsel_mask = MAX77650_REGULATOR_V_SBB_MASK, + .vsel_reg = MAX77650_REG_CNFG_SBB1_A, + .active_discharge_off = MAX77650_REGULATOR_AD_DISABLED, + .active_discharge_on = MAX77650_REGULATOR_AD_ENABLED, + .active_discharge_mask = MAX77650_REGULATOR_AD_MASK, + .active_discharge_reg = MAX77650_REG_CNFG_SBB1_B, + .enable_time = 100, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .csel_reg = MAX77650_REG_CNFG_SBB1_A, + .csel_mask = MAX77650_REGULATOR_CURR_LIM_MASK, + .curr_table = max77650_current_limit_table, + .n_current_limits = ARRAY_SIZE(max77650_current_limit_table), + }, + .regA = MAX77650_REG_CNFG_SBB1_A, + .regB = MAX77650_REG_CNFG_SBB1_B, +}; + +static struct max77650_regulator_desc max77650_SBB2_desc = { + .desc = { + .name = "sbb2", + .of_match = of_match_ptr("sbb2"), + .regulators_node = of_match_ptr("regulators"), + .supply_name = "in-sbb0", + .id = MAX77650_REGULATOR_ID_SBB2, + .ops = &max77650_regulator_SBB_ops, + .min_uV = 800000, + .uV_step = 50000, + .n_voltages = 64, + .vsel_mask = MAX77650_REGULATOR_V_SBB_MASK, + .vsel_reg = MAX77650_REG_CNFG_SBB2_A, + .active_discharge_off = MAX77650_REGULATOR_AD_DISABLED, + .active_discharge_on = MAX77650_REGULATOR_AD_ENABLED, + .active_discharge_mask = MAX77650_REGULATOR_AD_MASK, + .active_discharge_reg = MAX77650_REG_CNFG_SBB2_B, + .enable_time = 100, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .csel_reg = MAX77650_REG_CNFG_SBB2_A, + .csel_mask = MAX77650_REGULATOR_CURR_LIM_MASK, + .curr_table = max77650_current_limit_table, + .n_current_limits = ARRAY_SIZE(max77650_current_limit_table), + }, + .regA = MAX77650_REG_CNFG_SBB2_A, + .regB = MAX77650_REG_CNFG_SBB2_B, +}; + +static struct max77650_regulator_desc max77651_SBB2_desc = { + .desc = { + .name = "sbb2", + .of_match = of_match_ptr("sbb2"), + .regulators_node = of_match_ptr("regulators"), + .supply_name = "in-sbb0", + .id = MAX77650_REGULATOR_ID_SBB2, + .ops = &max77650_regulator_SBB_ops, + .min_uV = 2400000, + .uV_step = 50000, + .n_voltages = 64, + .vsel_mask = MAX77650_REGULATOR_V_SBB_MASK, + .vsel_reg = MAX77650_REG_CNFG_SBB2_A, + .active_discharge_off = MAX77650_REGULATOR_AD_DISABLED, + .active_discharge_on = MAX77650_REGULATOR_AD_ENABLED, + .active_discharge_mask = MAX77650_REGULATOR_AD_MASK, + .active_discharge_reg = MAX77650_REG_CNFG_SBB2_B, + .enable_time = 100, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .csel_reg = MAX77650_REG_CNFG_SBB2_A, + .csel_mask = MAX77650_REGULATOR_CURR_LIM_MASK, + .curr_table = max77650_current_limit_table, + .n_current_limits = ARRAY_SIZE(max77650_current_limit_table), + }, + .regA = MAX77650_REG_CNFG_SBB2_A, + .regB = MAX77650_REG_CNFG_SBB2_B, +}; + +static int max77650_regulator_probe(struct platform_device *pdev) +{ + struct max77650_regulator_desc **rdescs; + struct max77650_regulator_desc *rdesc; + struct regulator_config config = { }; + struct device *dev, *parent; + struct regulator_dev *rdev; + struct regmap *map; + unsigned int val; + int i, rv; + + dev = &pdev->dev; + parent = dev->parent; + + if (!dev->of_node) + dev->of_node = parent->of_node; + + rdescs = devm_kcalloc(dev, MAX77650_REGULATOR_NUM_REGULATORS, + sizeof(*rdescs), GFP_KERNEL); + if (!rdescs) + return -ENOMEM; + + map = dev_get_regmap(parent, NULL); + if (!map) + return -ENODEV; + + rv = regmap_read(map, MAX77650_REG_CID, &val); + if (rv) + return rv; + + rdescs[MAX77650_REGULATOR_ID_LDO] = &max77650_LDO_desc; + rdescs[MAX77650_REGULATOR_ID_SBB0] = &max77650_SBB0_desc; + + switch (MAX77650_CID_BITS(val)) { + case MAX77650_CID_77650A: + case MAX77650_CID_77650C: + rdescs[MAX77650_REGULATOR_ID_SBB1] = &max77650_SBB1_desc; + rdescs[MAX77650_REGULATOR_ID_SBB2] = &max77650_SBB2_desc; + break; + case MAX77650_CID_77651A: + case MAX77650_CID_77651B: + rdescs[MAX77650_REGULATOR_ID_SBB1] = &max77651_SBB1_desc; + rdescs[MAX77650_REGULATOR_ID_SBB2] = &max77651_SBB2_desc; + break; + default: + return -ENODEV; + } + + config.dev = parent; + + for (i = 0; i < MAX77650_REGULATOR_NUM_REGULATORS; i++) { + rdesc = rdescs[i]; + config.driver_data = rdesc; + + rdev = devm_regulator_register(dev, &rdesc->desc, &config); + if (IS_ERR(rdev)) + return PTR_ERR(rdev); + } + + return 0; +} + +static struct platform_driver max77650_regulator_driver = { + .driver = { + .name = "max77650-regulator", + }, + .probe = max77650_regulator_probe, +}; +module_platform_driver(max77650_regulator_driver); + +MODULE_DESCRIPTION("MAXIM 77650/77651 regulator driver"); +MODULE_AUTHOR("Bartosz Golaszewski "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/regulator/max77802-regulator.c b/drivers/regulator/max77802-regulator.c index c30cf5c9f2de..ea7b50397300 100644 --- a/drivers/regulator/max77802-regulator.c +++ b/drivers/regulator/max77802-regulator.c @@ -248,9 +248,9 @@ static int max77802_set_ramp_delay_2bit(struct regulator_dev *rdev, unsigned int ramp_value; if (id > MAX77802_BUCK4) { - dev_warn(&rdev->dev, - "%s: regulator: ramp delay not supported\n", - rdev->desc->name); + dev_warn(&rdev->dev, + "%s: regulator: ramp delay not supported\n", + rdev->desc->name); return -EINVAL; } ramp_value = max77802_find_ramp_value(rdev, ramp_table_77802_2bit, diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c index 8fd1adc9c9a9..ab558b26cd7c 100644 --- a/drivers/regulator/mc13783-regulator.c +++ b/drivers/regulator/mc13783-regulator.c @@ -226,69 +226,69 @@ static const unsigned int mc13783_pwgtdrv_val[] = { 5500000, }; -static struct regulator_ops mc13783_gpo_regulator_ops; +static const struct regulator_ops mc13783_gpo_regulator_ops; -#define MC13783_DEFINE(prefix, name, reg, vsel_reg, voltages) \ - MC13xxx_DEFINE(MC13783_REG_, name, reg, vsel_reg, voltages, \ +#define MC13783_DEFINE(prefix, name, node, reg, vsel_reg, voltages) \ + MC13xxx_DEFINE(MC13783_REG_, name, node, reg, vsel_reg, voltages, \ mc13xxx_regulator_ops) -#define MC13783_FIXED_DEFINE(prefix, name, reg, voltages) \ - MC13xxx_FIXED_DEFINE(MC13783_REG_, name, reg, voltages, \ +#define MC13783_FIXED_DEFINE(prefix, name, node, reg, voltages) \ + MC13xxx_FIXED_DEFINE(MC13783_REG_, name, node, reg, voltages, \ mc13xxx_fixed_regulator_ops) -#define MC13783_GPO_DEFINE(prefix, name, reg, voltages) \ - MC13xxx_GPO_DEFINE(MC13783_REG_, name, reg, voltages, \ +#define MC13783_GPO_DEFINE(prefix, name, node, reg, voltages) \ + MC13xxx_GPO_DEFINE(MC13783_REG_, name, node, reg, voltages, \ mc13783_gpo_regulator_ops) -#define MC13783_DEFINE_SW(_name, _reg, _vsel_reg, _voltages) \ - MC13783_DEFINE(REG, _name, _reg, _vsel_reg, _voltages) -#define MC13783_DEFINE_REGU(_name, _reg, _vsel_reg, _voltages) \ - MC13783_DEFINE(REG, _name, _reg, _vsel_reg, _voltages) +#define MC13783_DEFINE_SW(_name, _node, _reg, _vsel_reg, _voltages) \ + MC13783_DEFINE(REG, _name, _node, _reg, _vsel_reg, _voltages) +#define MC13783_DEFINE_REGU(_name, _node, _reg, _vsel_reg, _voltages) \ + MC13783_DEFINE(REG, _name, _node, _reg, _vsel_reg, _voltages) static struct mc13xxx_regulator mc13783_regulators[] = { - MC13783_DEFINE_SW(SW1A, SWITCHERS0, SWITCHERS0, mc13783_sw1x_val), - MC13783_DEFINE_SW(SW1B, SWITCHERS1, SWITCHERS1, mc13783_sw1x_val), - MC13783_DEFINE_SW(SW2A, SWITCHERS2, SWITCHERS2, mc13783_sw2x_val), - MC13783_DEFINE_SW(SW2B, SWITCHERS3, SWITCHERS3, mc13783_sw2x_val), - MC13783_DEFINE_SW(SW3, SWITCHERS5, SWITCHERS5, mc13783_sw3_val), - - MC13783_FIXED_DEFINE(REG, VAUDIO, REGULATORMODE0, mc13783_vaudio_val), - MC13783_FIXED_DEFINE(REG, VIOHI, REGULATORMODE0, mc13783_viohi_val), - MC13783_DEFINE_REGU(VIOLO, REGULATORMODE0, REGULATORSETTING0, + MC13783_DEFINE_SW(SW1A, sw1a, SWITCHERS0, SWITCHERS0, mc13783_sw1x_val), + MC13783_DEFINE_SW(SW1B, sw1b, SWITCHERS1, SWITCHERS1, mc13783_sw1x_val), + MC13783_DEFINE_SW(SW2A, sw2a, SWITCHERS2, SWITCHERS2, mc13783_sw2x_val), + MC13783_DEFINE_SW(SW2B, sw2b, SWITCHERS3, SWITCHERS3, mc13783_sw2x_val), + MC13783_DEFINE_SW(SW3, sw3, SWITCHERS5, SWITCHERS5, mc13783_sw3_val), + + MC13783_FIXED_DEFINE(REG, VAUDIO, vaudio, REGULATORMODE0, mc13783_vaudio_val), + MC13783_FIXED_DEFINE(REG, VIOHI, viohi, REGULATORMODE0, mc13783_viohi_val), + MC13783_DEFINE_REGU(VIOLO, violo, REGULATORMODE0, REGULATORSETTING0, mc13783_violo_val), - MC13783_DEFINE_REGU(VDIG, REGULATORMODE0, REGULATORSETTING0, + MC13783_DEFINE_REGU(VDIG, vdig, REGULATORMODE0, REGULATORSETTING0, mc13783_vdig_val), - MC13783_DEFINE_REGU(VGEN, REGULATORMODE0, REGULATORSETTING0, + MC13783_DEFINE_REGU(VGEN, vgen, REGULATORMODE0, REGULATORSETTING0, mc13783_vgen_val), - MC13783_DEFINE_REGU(VRFDIG, REGULATORMODE0, REGULATORSETTING0, + MC13783_DEFINE_REGU(VRFDIG, vrfdig, REGULATORMODE0, REGULATORSETTING0, mc13783_vrfdig_val), - MC13783_DEFINE_REGU(VRFREF, REGULATORMODE0, REGULATORSETTING0, + MC13783_DEFINE_REGU(VRFREF, vrfref, REGULATORMODE0, REGULATORSETTING0, mc13783_vrfref_val), - MC13783_DEFINE_REGU(VRFCP, REGULATORMODE0, REGULATORSETTING0, + MC13783_DEFINE_REGU(VRFCP, vrfcp, REGULATORMODE0, REGULATORSETTING0, mc13783_vrfcp_val), - MC13783_DEFINE_REGU(VSIM, REGULATORMODE1, REGULATORSETTING0, + MC13783_DEFINE_REGU(VSIM, vsim, REGULATORMODE1, REGULATORSETTING0, mc13783_vsim_val), - MC13783_DEFINE_REGU(VESIM, REGULATORMODE1, REGULATORSETTING0, + MC13783_DEFINE_REGU(VESIM, vesim, REGULATORMODE1, REGULATORSETTING0, mc13783_vesim_val), - MC13783_DEFINE_REGU(VCAM, REGULATORMODE1, REGULATORSETTING0, + MC13783_DEFINE_REGU(VCAM, vcam, REGULATORMODE1, REGULATORSETTING0, mc13783_vcam_val), - MC13783_FIXED_DEFINE(REG, VRFBG, REGULATORMODE1, mc13783_vrfbg_val), - MC13783_DEFINE_REGU(VVIB, REGULATORMODE1, REGULATORSETTING1, + MC13783_FIXED_DEFINE(REG, VRFBG, vrfbg, REGULATORMODE1, mc13783_vrfbg_val), + MC13783_DEFINE_REGU(VVIB, vvib, REGULATORMODE1, REGULATORSETTING1, mc13783_vvib_val), - MC13783_DEFINE_REGU(VRF1, REGULATORMODE1, REGULATORSETTING1, + MC13783_DEFINE_REGU(VRF1, vrf1, REGULATORMODE1, REGULATORSETTING1, mc13783_vrf_val), - MC13783_DEFINE_REGU(VRF2, REGULATORMODE1, REGULATORSETTING1, + MC13783_DEFINE_REGU(VRF2, vrf2, REGULATORMODE1, REGULATORSETTING1, mc13783_vrf_val), - MC13783_DEFINE_REGU(VMMC1, REGULATORMODE1, REGULATORSETTING1, + MC13783_DEFINE_REGU(VMMC1, vmmc1, REGULATORMODE1, REGULATORSETTING1, mc13783_vmmc_val), - MC13783_DEFINE_REGU(VMMC2, REGULATORMODE1, REGULATORSETTING1, + MC13783_DEFINE_REGU(VMMC2, vmmc2, REGULATORMODE1, REGULATORSETTING1, mc13783_vmmc_val), - MC13783_GPO_DEFINE(REG, GPO1, POWERMISC, mc13783_gpo_val), - MC13783_GPO_DEFINE(REG, GPO2, POWERMISC, mc13783_gpo_val), - MC13783_GPO_DEFINE(REG, GPO3, POWERMISC, mc13783_gpo_val), - MC13783_GPO_DEFINE(REG, GPO4, POWERMISC, mc13783_gpo_val), - MC13783_GPO_DEFINE(REG, PWGT1SPI, POWERMISC, mc13783_pwgtdrv_val), - MC13783_GPO_DEFINE(REG, PWGT2SPI, POWERMISC, mc13783_pwgtdrv_val), + MC13783_GPO_DEFINE(REG, GPO1, gpo1, POWERMISC, mc13783_gpo_val), + MC13783_GPO_DEFINE(REG, GPO2, gpo1, POWERMISC, mc13783_gpo_val), + MC13783_GPO_DEFINE(REG, GPO3, gpo1, POWERMISC, mc13783_gpo_val), + MC13783_GPO_DEFINE(REG, GPO4, gpo1, POWERMISC, mc13783_gpo_val), + MC13783_GPO_DEFINE(REG, PWGT1SPI, pwgt1spi, POWERMISC, mc13783_pwgtdrv_val), + MC13783_GPO_DEFINE(REG, PWGT2SPI, pwgt2spi, POWERMISC, mc13783_pwgtdrv_val), }; static int mc13783_powermisc_rmw(struct mc13xxx_regulator_priv *priv, u32 mask, @@ -380,7 +380,7 @@ static int mc13783_gpo_regulator_is_enabled(struct regulator_dev *rdev) return (val & mc13xxx_regulators[id].enable_bit) != 0; } -static struct regulator_ops mc13783_gpo_regulator_ops = { +static const struct regulator_ops mc13783_gpo_regulator_ops = { .enable = mc13783_gpo_regulator_enable, .disable = mc13783_gpo_regulator_disable, .is_enabled = mc13783_gpo_regulator_is_enabled, diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c index f3fba1cc1379..a731e826a037 100644 --- a/drivers/regulator/mc13892-regulator.c +++ b/drivers/regulator/mc13892-regulator.c @@ -242,61 +242,61 @@ static const unsigned int mc13892_pwgtdrv[] = { 5000000, }; -static struct regulator_ops mc13892_gpo_regulator_ops; -static struct regulator_ops mc13892_sw_regulator_ops; +static const struct regulator_ops mc13892_gpo_regulator_ops; +static const struct regulator_ops mc13892_sw_regulator_ops; -#define MC13892_FIXED_DEFINE(name, reg, voltages) \ - MC13xxx_FIXED_DEFINE(MC13892_, name, reg, voltages, \ +#define MC13892_FIXED_DEFINE(name, node, reg, voltages) \ + MC13xxx_FIXED_DEFINE(MC13892_, name, node, reg, voltages, \ mc13xxx_fixed_regulator_ops) -#define MC13892_GPO_DEFINE(name, reg, voltages) \ - MC13xxx_GPO_DEFINE(MC13892_, name, reg, voltages, \ +#define MC13892_GPO_DEFINE(name, node, reg, voltages) \ + MC13xxx_GPO_DEFINE(MC13892_, name, node, reg, voltages, \ mc13892_gpo_regulator_ops) -#define MC13892_SW_DEFINE(name, reg, vsel_reg, voltages) \ - MC13xxx_DEFINE(MC13892_, name, reg, vsel_reg, voltages, \ +#define MC13892_SW_DEFINE(name, node, reg, vsel_reg, voltages) \ + MC13xxx_DEFINE(MC13892_, name, node, reg, vsel_reg, voltages, \ mc13892_sw_regulator_ops) -#define MC13892_DEFINE_REGU(name, reg, vsel_reg, voltages) \ - MC13xxx_DEFINE(MC13892_, name, reg, vsel_reg, voltages, \ +#define MC13892_DEFINE_REGU(name, node, reg, vsel_reg, voltages) \ + MC13xxx_DEFINE(MC13892_, name, node, reg, vsel_reg, voltages, \ mc13xxx_regulator_ops) static struct mc13xxx_regulator mc13892_regulators[] = { - MC13892_DEFINE_REGU(VCOINCELL, POWERCTL0, POWERCTL0, mc13892_vcoincell), - MC13892_SW_DEFINE(SW1, SWITCHERS0, SWITCHERS0, mc13892_sw1), - MC13892_SW_DEFINE(SW2, SWITCHERS1, SWITCHERS1, mc13892_sw), - MC13892_SW_DEFINE(SW3, SWITCHERS2, SWITCHERS2, mc13892_sw), - MC13892_SW_DEFINE(SW4, SWITCHERS3, SWITCHERS3, mc13892_sw), - MC13892_FIXED_DEFINE(SWBST, SWITCHERS5, mc13892_swbst), - MC13892_FIXED_DEFINE(VIOHI, REGULATORMODE0, mc13892_viohi), - MC13892_DEFINE_REGU(VPLL, REGULATORMODE0, REGULATORSETTING0, + MC13892_DEFINE_REGU(VCOINCELL, vcoincell, POWERCTL0, POWERCTL0, mc13892_vcoincell), + MC13892_SW_DEFINE(SW1, sw1, SWITCHERS0, SWITCHERS0, mc13892_sw1), + MC13892_SW_DEFINE(SW2, sw2, SWITCHERS1, SWITCHERS1, mc13892_sw), + MC13892_SW_DEFINE(SW3, sw3, SWITCHERS2, SWITCHERS2, mc13892_sw), + MC13892_SW_DEFINE(SW4, sw4, SWITCHERS3, SWITCHERS3, mc13892_sw), + MC13892_FIXED_DEFINE(SWBST, swbst, SWITCHERS5, mc13892_swbst), + MC13892_FIXED_DEFINE(VIOHI, viohi, REGULATORMODE0, mc13892_viohi), + MC13892_DEFINE_REGU(VPLL, vpll, REGULATORMODE0, REGULATORSETTING0, mc13892_vpll), - MC13892_DEFINE_REGU(VDIG, REGULATORMODE0, REGULATORSETTING0, + MC13892_DEFINE_REGU(VDIG, vdig, REGULATORMODE0, REGULATORSETTING0, mc13892_vdig), - MC13892_DEFINE_REGU(VSD, REGULATORMODE1, REGULATORSETTING1, + MC13892_DEFINE_REGU(VSD, vsd, REGULATORMODE1, REGULATORSETTING1, mc13892_vsd), - MC13892_DEFINE_REGU(VUSB2, REGULATORMODE0, REGULATORSETTING0, + MC13892_DEFINE_REGU(VUSB2, vusb2, REGULATORMODE0, REGULATORSETTING0, mc13892_vusb2), - MC13892_DEFINE_REGU(VVIDEO, REGULATORMODE1, REGULATORSETTING1, + MC13892_DEFINE_REGU(VVIDEO, vvideo, REGULATORMODE1, REGULATORSETTING1, mc13892_vvideo), - MC13892_DEFINE_REGU(VAUDIO, REGULATORMODE1, REGULATORSETTING1, + MC13892_DEFINE_REGU(VAUDIO, vaudio, REGULATORMODE1, REGULATORSETTING1, mc13892_vaudio), - MC13892_DEFINE_REGU(VCAM, REGULATORMODE1, REGULATORSETTING0, + MC13892_DEFINE_REGU(VCAM, vcam, REGULATORMODE1, REGULATORSETTING0, mc13892_vcam), - MC13892_DEFINE_REGU(VGEN1, REGULATORMODE0, REGULATORSETTING0, + MC13892_DEFINE_REGU(VGEN1, vgen1, REGULATORMODE0, REGULATORSETTING0, mc13892_vgen1), - MC13892_DEFINE_REGU(VGEN2, REGULATORMODE0, REGULATORSETTING0, + MC13892_DEFINE_REGU(VGEN2, vgen2, REGULATORMODE0, REGULATORSETTING0, mc13892_vgen2), - MC13892_DEFINE_REGU(VGEN3, REGULATORMODE1, REGULATORSETTING0, + MC13892_DEFINE_REGU(VGEN3, vgen3, REGULATORMODE1, REGULATORSETTING0, mc13892_vgen3), - MC13892_FIXED_DEFINE(VUSB, USB1, mc13892_vusb), - MC13892_GPO_DEFINE(GPO1, POWERMISC, mc13892_gpo), - MC13892_GPO_DEFINE(GPO2, POWERMISC, mc13892_gpo), - MC13892_GPO_DEFINE(GPO3, POWERMISC, mc13892_gpo), - MC13892_GPO_DEFINE(GPO4, POWERMISC, mc13892_gpo), - MC13892_GPO_DEFINE(PWGT1SPI, POWERMISC, mc13892_pwgtdrv), - MC13892_GPO_DEFINE(PWGT2SPI, POWERMISC, mc13892_pwgtdrv), + MC13892_FIXED_DEFINE(VUSB, vusb, USB1, mc13892_vusb), + MC13892_GPO_DEFINE(GPO1, gpo1, POWERMISC, mc13892_gpo), + MC13892_GPO_DEFINE(GPO2, gpo2, POWERMISC, mc13892_gpo), + MC13892_GPO_DEFINE(GPO3, gpo3, POWERMISC, mc13892_gpo), + MC13892_GPO_DEFINE(GPO4, gpo4, POWERMISC, mc13892_gpo), + MC13892_GPO_DEFINE(PWGT1SPI, pwgt1spi, POWERMISC, mc13892_pwgtdrv), + MC13892_GPO_DEFINE(PWGT2SPI, pwgt2spi, POWERMISC, mc13892_pwgtdrv), }; static int mc13892_powermisc_rmw(struct mc13xxx_regulator_priv *priv, u32 mask, @@ -387,7 +387,7 @@ static int mc13892_gpo_regulator_is_enabled(struct regulator_dev *rdev) } -static struct regulator_ops mc13892_gpo_regulator_ops = { +static const struct regulator_ops mc13892_gpo_regulator_ops = { .enable = mc13892_gpo_regulator_enable, .disable = mc13892_gpo_regulator_disable, .is_enabled = mc13892_gpo_regulator_is_enabled, @@ -479,7 +479,7 @@ static int mc13892_sw_regulator_set_voltage_sel(struct regulator_dev *rdev, return ret; } -static struct regulator_ops mc13892_sw_regulator_ops = { +static const struct regulator_ops mc13892_sw_regulator_ops = { .list_voltage = regulator_list_voltage_table, .map_voltage = regulator_map_voltage_ascend, .set_voltage_sel = mc13892_sw_regulator_set_voltage_sel, diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c index 2243138d8a58..8ff19150ca92 100644 --- a/drivers/regulator/mc13xxx-regulator-core.c +++ b/drivers/regulator/mc13xxx-regulator-core.c @@ -99,7 +99,7 @@ static int mc13xxx_regulator_get_voltage(struct regulator_dev *rdev) return rdev->desc->volt_table[val]; } -struct regulator_ops mc13xxx_regulator_ops = { +const struct regulator_ops mc13xxx_regulator_ops = { .enable = mc13xxx_regulator_enable, .disable = mc13xxx_regulator_disable, .is_enabled = mc13xxx_regulator_is_enabled, @@ -127,7 +127,7 @@ int mc13xxx_fixed_regulator_set_voltage(struct regulator_dev *rdev, int min_uV, } EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_set_voltage); -struct regulator_ops mc13xxx_fixed_regulator_ops = { +const struct regulator_ops mc13xxx_fixed_regulator_ops = { .enable = mc13xxx_regulator_enable, .disable = mc13xxx_regulator_disable, .is_enabled = mc13xxx_regulator_is_enabled, diff --git a/drivers/regulator/mc13xxx.h b/drivers/regulator/mc13xxx.h index 2ab9bfd93b4e..ba7eff1070bd 100644 --- a/drivers/regulator/mc13xxx.h +++ b/drivers/regulator/mc13xxx.h @@ -53,13 +53,13 @@ static inline struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt( } #endif -extern struct regulator_ops mc13xxx_regulator_ops; -extern struct regulator_ops mc13xxx_fixed_regulator_ops; +extern const struct regulator_ops mc13xxx_regulator_ops; +extern const struct regulator_ops mc13xxx_fixed_regulator_ops; -#define MC13xxx_DEFINE(prefix, _name, _reg, _vsel_reg, _voltages, _ops) \ +#define MC13xxx_DEFINE(prefix, _name, _node, _reg, _vsel_reg, _voltages, _ops) \ [prefix ## _name] = { \ .desc = { \ - .name = #_name, \ + .name = #_node, \ .n_voltages = ARRAY_SIZE(_voltages), \ .volt_table = _voltages, \ .ops = &_ops, \ @@ -74,10 +74,10 @@ extern struct regulator_ops mc13xxx_fixed_regulator_ops; .vsel_mask = prefix ## _vsel_reg ## _ ## _name ## VSEL_M,\ } -#define MC13xxx_FIXED_DEFINE(prefix, _name, _reg, _voltages, _ops) \ +#define MC13xxx_FIXED_DEFINE(prefix, _name, _node, _reg, _voltages, _ops) \ [prefix ## _name] = { \ .desc = { \ - .name = #_name, \ + .name = #_node, \ .n_voltages = ARRAY_SIZE(_voltages), \ .volt_table = _voltages, \ .ops = &_ops, \ @@ -89,10 +89,10 @@ extern struct regulator_ops mc13xxx_fixed_regulator_ops; .enable_bit = prefix ## _reg ## _ ## _name ## EN, \ } -#define MC13xxx_GPO_DEFINE(prefix, _name, _reg, _voltages, _ops) \ +#define MC13xxx_GPO_DEFINE(prefix, _name, _node, _reg, _voltages, _ops) \ [prefix ## _name] = { \ .desc = { \ - .name = #_name, \ + .name = #_node, \ .n_voltages = ARRAY_SIZE(_voltages), \ .volt_table = _voltages, \ .ops = &_ops, \ @@ -104,9 +104,9 @@ extern struct regulator_ops mc13xxx_fixed_regulator_ops; .enable_bit = prefix ## _reg ## _ ## _name ## EN, \ } -#define MC13xxx_DEFINE_SW(_name, _reg, _vsel_reg, _voltages, ops) \ - MC13xxx_DEFINE(SW, _name, _reg, _vsel_reg, _voltages, ops) -#define MC13xxx_DEFINE_REGU(_name, _reg, _vsel_reg, _voltages, ops) \ - MC13xxx_DEFINE(REGU, _name, _reg, _vsel_reg, _voltages, ops) +#define MC13xxx_DEFINE_SW(_name, _node, _reg, _vsel_reg, _voltages, ops) \ + MC13xxx_DEFINE(SW, _name, _node, _reg, _vsel_reg, _voltages, ops) +#define MC13xxx_DEFINE_REGU(_name, _node, _reg, _vsel_reg, _voltages, ops) \ + MC13xxx_DEFINE(REGU, _name, _node, _reg, _vsel_reg, _voltages, ops) #endif diff --git a/drivers/regulator/mcp16502.c b/drivers/regulator/mcp16502.c index 3479ae009b0b..3a8004abe044 100644 --- a/drivers/regulator/mcp16502.c +++ b/drivers/regulator/mcp16502.c @@ -17,6 +17,7 @@ #include #include #include +#include #define VDD_LOW_SEL 0x0D #define VDD_HIGH_SEL 0x3F @@ -546,7 +547,6 @@ static struct i2c_driver mcp16502_drv = { module_i2c_driver(mcp16502_drv); -MODULE_VERSION("1.0"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("MCP16502 PMIC driver"); MODULE_AUTHOR("Andrei Stefanescu andrei.stefanescu@microchip.com"); diff --git a/drivers/regulator/mt6311-regulator.c b/drivers/regulator/mt6311-regulator.c index 0495716fd35f..01d69f43d2b0 100644 --- a/drivers/regulator/mt6311-regulator.c +++ b/drivers/regulator/mt6311-regulator.c @@ -38,13 +38,9 @@ static const struct regmap_config mt6311_regmap_config = { #define MT6311_MAX_UV 1393750 #define MT6311_STEP_UV 6250 -static const struct regulator_linear_range buck_volt_range[] = { - REGULATOR_LINEAR_RANGE(MT6311_MIN_UV, 0, 0x7f, MT6311_STEP_UV), -}; - static const struct regulator_ops mt6311_buck_ops = { - .list_voltage = regulator_list_voltage_linear_range, - .map_voltage = regulator_map_voltage_linear_range, + .list_voltage = regulator_list_voltage_linear, + .map_voltage = regulator_map_voltage_linear, .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_time_sel = regulator_set_voltage_time_sel, @@ -71,8 +67,6 @@ static const struct regulator_ops mt6311_ldo_ops = { .min_uV = MT6311_MIN_UV,\ .uV_step = MT6311_STEP_UV,\ .owner = THIS_MODULE,\ - .linear_ranges = buck_volt_range, \ - .n_linear_ranges = ARRAY_SIZE(buck_volt_range), \ .enable_reg = MT6311_VDVFS11_CON9,\ .enable_mask = MT6311_PMIC_VDVFS11_EN_MASK,\ .vsel_reg = MT6311_VDVFS11_CON12,\ diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c index ffa5fc3724e4..7b6bf3536271 100644 --- a/drivers/regulator/of_regulator.c +++ b/drivers/regulator/of_regulator.c @@ -255,7 +255,7 @@ static void of_get_regulation_constraints(struct device_node *np, * @desc: regulator description * * Populates regulator_init_data structure by extracting data from device - * tree node, returns a pointer to the populated struture or NULL if memory + * tree node, returns a pointer to the populated structure or NULL if memory * alloc fails. */ struct regulator_init_data *of_get_regulator_init_data(struct device *dev, @@ -547,7 +547,7 @@ bool of_check_coupling_data(struct regulator_dev *rdev) NULL); if (c_n_phandles != n_phandles) { - dev_err(&rdev->dev, "number of couped reg phandles mismatch\n"); + dev_err(&rdev->dev, "number of coupled reg phandles mismatch\n"); ret = false; goto clean; } diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c index c2cc392a27d4..7fb9e8dd834e 100644 --- a/drivers/regulator/palmas-regulator.c +++ b/drivers/regulator/palmas-regulator.c @@ -382,7 +382,7 @@ static struct palmas_sleep_requestor_info tps65917_sleep_req_info[] = { EXTERNAL_REQUESTOR_TPS65917(LDO5, 2, 4), }; -static unsigned int palmas_smps_ramp_delay[4] = {0, 10000, 5000, 2500}; +static const unsigned int palmas_smps_ramp_delay[4] = {0, 10000, 5000, 2500}; #define SMPS_CTRL_MODE_OFF 0x00 #define SMPS_CTRL_MODE_ON 0x01 diff --git a/drivers/regulator/pv88060-regulator.c b/drivers/regulator/pv88060-regulator.c index a9446056435f..1600f9821891 100644 --- a/drivers/regulator/pv88060-regulator.c +++ b/drivers/regulator/pv88060-regulator.c @@ -53,10 +53,6 @@ enum { struct pv88060_regulator { struct regulator_desc desc; - /* Current limiting */ - unsigned n_current_limits; - const int *current_limits; - unsigned int limit_mask; unsigned int conf; /* buck configuration register */ }; @@ -75,7 +71,7 @@ static const struct regmap_config pv88060_regmap_config = { * Entry indexes corresponds to register values. */ -static const int pv88060_buck1_limits[] = { +static const unsigned int pv88060_buck1_limits[] = { 1496000, 2393000, 3291000, 4189000 }; @@ -128,40 +124,6 @@ static int pv88060_buck_set_mode(struct regulator_dev *rdev, PV88060_BUCK_MODE_MASK, val); } -static int pv88060_set_current_limit(struct regulator_dev *rdev, int min, - int max) -{ - struct pv88060_regulator *info = rdev_get_drvdata(rdev); - int i; - - /* search for closest to maximum */ - for (i = info->n_current_limits; i >= 0; i--) { - if (min <= info->current_limits[i] - && max >= info->current_limits[i]) { - return regmap_update_bits(rdev->regmap, - info->conf, - info->limit_mask, - i << PV88060_BUCK_ILIM_SHIFT); - } - } - - return -EINVAL; -} - -static int pv88060_get_current_limit(struct regulator_dev *rdev) -{ - struct pv88060_regulator *info = rdev_get_drvdata(rdev); - unsigned int data; - int ret; - - ret = regmap_read(rdev->regmap, info->conf, &data); - if (ret < 0) - return ret; - - data = (data & info->limit_mask) >> PV88060_BUCK_ILIM_SHIFT; - return info->current_limits[data]; -} - static const struct regulator_ops pv88060_buck_ops = { .get_mode = pv88060_buck_get_mode, .set_mode = pv88060_buck_set_mode, @@ -171,8 +133,8 @@ static const struct regulator_ops pv88060_buck_ops = { .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .list_voltage = regulator_list_voltage_linear, - .set_current_limit = pv88060_set_current_limit, - .get_current_limit = pv88060_get_current_limit, + .set_current_limit = regulator_set_current_limit_regmap, + .get_current_limit = regulator_get_current_limit_regmap, }; static const struct regulator_ops pv88060_ldo_ops = { @@ -184,6 +146,12 @@ static const struct regulator_ops pv88060_ldo_ops = { .list_voltage = regulator_list_voltage_linear, }; +static const struct regulator_ops pv88060_sw_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, +}; + #define PV88060_BUCK(chip, regl_name, min, step, max, limits_array) \ {\ .desc = {\ @@ -201,10 +169,11 @@ static const struct regulator_ops pv88060_ldo_ops = { .enable_mask = PV88060_BUCK_EN, \ .vsel_reg = PV88060_REG_##regl_name##_CONF0,\ .vsel_mask = PV88060_VBUCK_MASK,\ + .curr_table = limits_array,\ + .n_current_limits = ARRAY_SIZE(limits_array),\ + .csel_reg = PV88060_REG_##regl_name##_CONF1,\ + .csel_mask = PV88060_BUCK_ILIM_MASK,\ },\ - .current_limits = limits_array,\ - .n_current_limits = ARRAY_SIZE(limits_array),\ - .limit_mask = PV88060_BUCK_ILIM_MASK, \ .conf = PV88060_REG_##regl_name##_CONF1,\ } @@ -237,9 +206,8 @@ static const struct regulator_ops pv88060_ldo_ops = { .regulators_node = of_match_ptr("regulators"),\ .type = REGULATOR_VOLTAGE,\ .owner = THIS_MODULE,\ - .ops = &pv88060_ldo_ops,\ - .min_uV = max,\ - .uV_step = 0,\ + .ops = &pv88060_sw_ops,\ + .fixed_uV = max,\ .n_voltages = 1,\ .enable_reg = PV88060_REG_##regl_name##_CONF,\ .enable_mask = PV88060_SW_EN,\ diff --git a/drivers/regulator/pv88080-regulator.c b/drivers/regulator/pv88080-regulator.c index 9a08cb2de501..bdddacdbeb99 100644 --- a/drivers/regulator/pv88080-regulator.c +++ b/drivers/regulator/pv88080-regulator.c @@ -45,12 +45,7 @@ enum pv88080_types { struct pv88080_regulator { struct regulator_desc desc; - /* Current limiting */ - unsigned int n_current_limits; - const int *current_limits; - unsigned int limit_mask; unsigned int mode_reg; - unsigned int limit_reg; unsigned int conf2; unsigned int conf5; }; @@ -102,11 +97,11 @@ static const struct regmap_config pv88080_regmap_config = { * Entry indexes corresponds to register values. */ -static const int pv88080_buck1_limits[] = { +static const unsigned int pv88080_buck1_limits[] = { 3230000, 5130000, 6960000, 8790000 }; -static const int pv88080_buck23_limits[] = { +static const unsigned int pv88080_buck23_limits[] = { 1496000, 2393000, 3291000, 4189000 }; @@ -272,40 +267,6 @@ static int pv88080_buck_set_mode(struct regulator_dev *rdev, PV88080_BUCK1_MODE_MASK, val); } -static int pv88080_set_current_limit(struct regulator_dev *rdev, int min, - int max) -{ - struct pv88080_regulator *info = rdev_get_drvdata(rdev); - int i; - - /* search for closest to maximum */ - for (i = info->n_current_limits; i >= 0; i--) { - if (min <= info->current_limits[i] - && max >= info->current_limits[i]) { - return regmap_update_bits(rdev->regmap, - info->limit_reg, - info->limit_mask, - i << PV88080_BUCK1_ILIM_SHIFT); - } - } - - return -EINVAL; -} - -static int pv88080_get_current_limit(struct regulator_dev *rdev) -{ - struct pv88080_regulator *info = rdev_get_drvdata(rdev); - unsigned int data; - int ret; - - ret = regmap_read(rdev->regmap, info->limit_reg, &data); - if (ret < 0) - return ret; - - data = (data & info->limit_mask) >> PV88080_BUCK1_ILIM_SHIFT; - return info->current_limits[data]; -} - static const struct regulator_ops pv88080_buck_ops = { .get_mode = pv88080_buck_get_mode, .set_mode = pv88080_buck_set_mode, @@ -315,8 +276,8 @@ static const struct regulator_ops pv88080_buck_ops = { .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .list_voltage = regulator_list_voltage_linear, - .set_current_limit = pv88080_set_current_limit, - .get_current_limit = pv88080_get_current_limit, + .set_current_limit = regulator_set_current_limit_regmap, + .get_current_limit = regulator_get_current_limit_regmap, }; static const struct regulator_ops pv88080_hvbuck_ops = { @@ -341,9 +302,9 @@ static const struct regulator_ops pv88080_hvbuck_ops = { .min_uV = min, \ .uV_step = step, \ .n_voltages = ((max) - (min))/(step) + 1, \ + .curr_table = limits_array, \ + .n_current_limits = ARRAY_SIZE(limits_array), \ },\ - .current_limits = limits_array, \ - .n_current_limits = ARRAY_SIZE(limits_array), \ } #define PV88080_HVBUCK(chip, regl_name, min, step, max) \ @@ -521,9 +482,9 @@ static int pv88080_i2c_probe(struct i2c_client *i2c, if (init_data) config.init_data = &init_data[i]; - pv88080_regulator_info[i].limit_reg + pv88080_regulator_info[i].desc.csel_reg = regmap_config->buck_regmap[i].buck_limit_reg; - pv88080_regulator_info[i].limit_mask + pv88080_regulator_info[i].desc.csel_mask = regmap_config->buck_regmap[i].buck_limit_mask; pv88080_regulator_info[i].mode_reg = regmap_config->buck_regmap[i].buck_mode_reg; diff --git a/drivers/regulator/pv88090-regulator.c b/drivers/regulator/pv88090-regulator.c index 7a0c15957bd0..6e97cc6df2ee 100644 --- a/drivers/regulator/pv88090-regulator.c +++ b/drivers/regulator/pv88090-regulator.c @@ -42,10 +42,6 @@ enum { struct pv88090_regulator { struct regulator_desc desc; - /* Current limiting */ - unsigned int n_current_limits; - const int *current_limits; - unsigned int limit_mask; unsigned int conf; unsigned int conf2; }; @@ -71,14 +67,14 @@ static const struct regmap_config pv88090_regmap_config = { * Entry indexes corresponds to register values. */ -static const int pv88090_buck1_limits[] = { +static const unsigned int pv88090_buck1_limits[] = { 220000, 440000, 660000, 880000, 1100000, 1320000, 1540000, 1760000, 1980000, 2200000, 2420000, 2640000, 2860000, 3080000, 3300000, 3520000, 3740000, 3960000, 4180000, 4400000, 4620000, 4840000, 5060000, 5280000, 5500000, 5720000, 5940000, 6160000, 6380000, 6600000, 6820000, 7040000 }; -static const int pv88090_buck23_limits[] = { +static const unsigned int pv88090_buck23_limits[] = { 1496000, 2393000, 3291000, 4189000 }; @@ -150,40 +146,6 @@ static int pv88090_buck_set_mode(struct regulator_dev *rdev, PV88090_BUCK1_MODE_MASK, val); } -static int pv88090_set_current_limit(struct regulator_dev *rdev, int min, - int max) -{ - struct pv88090_regulator *info = rdev_get_drvdata(rdev); - int i; - - /* search for closest to maximum */ - for (i = info->n_current_limits; i >= 0; i--) { - if (min <= info->current_limits[i] - && max >= info->current_limits[i]) { - return regmap_update_bits(rdev->regmap, - info->conf, - info->limit_mask, - i << PV88090_BUCK1_ILIM_SHIFT); - } - } - - return -EINVAL; -} - -static int pv88090_get_current_limit(struct regulator_dev *rdev) -{ - struct pv88090_regulator *info = rdev_get_drvdata(rdev); - unsigned int data; - int ret; - - ret = regmap_read(rdev->regmap, info->conf, &data); - if (ret < 0) - return ret; - - data = (data & info->limit_mask) >> PV88090_BUCK1_ILIM_SHIFT; - return info->current_limits[data]; -} - static const struct regulator_ops pv88090_buck_ops = { .get_mode = pv88090_buck_get_mode, .set_mode = pv88090_buck_set_mode, @@ -193,8 +155,8 @@ static const struct regulator_ops pv88090_buck_ops = { .set_voltage_sel = regulator_set_voltage_sel_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .list_voltage = regulator_list_voltage_linear, - .set_current_limit = pv88090_set_current_limit, - .get_current_limit = pv88090_get_current_limit, + .set_current_limit = regulator_set_current_limit_regmap, + .get_current_limit = regulator_get_current_limit_regmap, }; static const struct regulator_ops pv88090_ldo_ops = { @@ -223,10 +185,11 @@ static const struct regulator_ops pv88090_ldo_ops = { .enable_mask = PV88090_##regl_name##_EN, \ .vsel_reg = PV88090_REG_##regl_name##_CONF0, \ .vsel_mask = PV88090_V##regl_name##_MASK, \ + .curr_table = limits_array, \ + .n_current_limits = ARRAY_SIZE(limits_array), \ + .csel_reg = PV88090_REG_##regl_name##_CONF1, \ + .csel_mask = PV88090_##regl_name##_ILIM_MASK, \ },\ - .current_limits = limits_array, \ - .n_current_limits = ARRAY_SIZE(limits_array), \ - .limit_mask = PV88090_##regl_name##_ILIM_MASK, \ .conf = PV88090_REG_##regl_name##_CONF1, \ .conf2 = PV88090_REG_##regl_name##_CONF2, \ } diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c index a2fd140eff81..3f53f9134b32 100644 --- a/drivers/regulator/pwm-regulator.c +++ b/drivers/regulator/pwm-regulator.c @@ -40,9 +40,6 @@ struct pwm_regulator_data { /* regulator descriptor */ struct regulator_desc desc; - /* Regulator ops */ - struct regulator_ops ops; - int state; /* Enable GPIO */ @@ -231,7 +228,7 @@ static int pwm_regulator_set_voltage(struct regulator_dev *rdev, return 0; } -static struct regulator_ops pwm_regulator_voltage_table_ops = { +static const struct regulator_ops pwm_regulator_voltage_table_ops = { .set_voltage_sel = pwm_regulator_set_voltage_sel, .get_voltage_sel = pwm_regulator_get_voltage_sel, .list_voltage = pwm_regulator_list_voltage, @@ -241,7 +238,7 @@ static struct regulator_ops pwm_regulator_voltage_table_ops = { .is_enabled = pwm_regulator_is_enabled, }; -static struct regulator_ops pwm_regulator_voltage_continuous_ops = { +static const struct regulator_ops pwm_regulator_voltage_continuous_ops = { .get_voltage = pwm_regulator_get_voltage, .set_voltage = pwm_regulator_set_voltage, .enable = pwm_regulator_enable, @@ -249,7 +246,7 @@ static struct regulator_ops pwm_regulator_voltage_continuous_ops = { .is_enabled = pwm_regulator_is_enabled, }; -static struct regulator_desc pwm_regulator_desc = { +static const struct regulator_desc pwm_regulator_desc = { .name = "pwm-regulator", .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, @@ -287,9 +284,7 @@ static int pwm_regulator_init_table(struct platform_device *pdev, drvdata->state = -EINVAL; drvdata->duty_cycle_table = duty_cycle_table; - memcpy(&drvdata->ops, &pwm_regulator_voltage_table_ops, - sizeof(drvdata->ops)); - drvdata->desc.ops = &drvdata->ops; + drvdata->desc.ops = &pwm_regulator_voltage_table_ops; drvdata->desc.n_voltages = length / sizeof(*duty_cycle_table); return 0; @@ -301,9 +296,7 @@ static int pwm_regulator_init_continuous(struct platform_device *pdev, u32 dutycycle_range[2] = { 0, 100 }; u32 dutycycle_unit = 100; - memcpy(&drvdata->ops, &pwm_regulator_voltage_continuous_ops, - sizeof(drvdata->ops)); - drvdata->desc.ops = &drvdata->ops; + drvdata->desc.ops = &pwm_regulator_voltage_continuous_ops; drvdata->desc.continuous_voltage_range = true; of_property_read_u32_array(pdev->dev.of_node, diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c index f5bca77d67c1..68bc23df4213 100644 --- a/drivers/regulator/qcom_smd-regulator.c +++ b/drivers/regulator/qcom_smd-regulator.c @@ -31,6 +31,11 @@ struct qcom_rpm_reg { int is_enabled; int uV; + u32 load; + + unsigned int enabled_updated:1; + unsigned int uv_updated:1; + unsigned int load_updated:1; }; struct rpm_regulator_req { @@ -43,30 +48,59 @@ struct rpm_regulator_req { #define RPM_KEY_UV 0x00007675 /* "uv" */ #define RPM_KEY_MA 0x0000616d /* "ma" */ -static int rpm_reg_write_active(struct qcom_rpm_reg *vreg, - struct rpm_regulator_req *req, - size_t size) +static int rpm_reg_write_active(struct qcom_rpm_reg *vreg) { - return qcom_rpm_smd_write(vreg->rpm, - QCOM_SMD_RPM_ACTIVE_STATE, - vreg->type, - vreg->id, - req, size); + struct rpm_regulator_req req[3]; + int reqlen = 0; + int ret; + + if (vreg->enabled_updated) { + req[reqlen].key = cpu_to_le32(RPM_KEY_SWEN); + req[reqlen].nbytes = cpu_to_le32(sizeof(u32)); + req[reqlen].value = cpu_to_le32(vreg->is_enabled); + reqlen++; + } + + if (vreg->uv_updated && vreg->is_enabled) { + req[reqlen].key = cpu_to_le32(RPM_KEY_UV); + req[reqlen].nbytes = cpu_to_le32(sizeof(u32)); + req[reqlen].value = cpu_to_le32(vreg->uV); + reqlen++; + } + + if (vreg->load_updated && vreg->is_enabled) { + req[reqlen].key = cpu_to_le32(RPM_KEY_MA); + req[reqlen].nbytes = cpu_to_le32(sizeof(u32)); + req[reqlen].value = cpu_to_le32(vreg->load / 1000); + reqlen++; + } + + if (!reqlen) + return 0; + + ret = qcom_rpm_smd_write(vreg->rpm, QCOM_SMD_RPM_ACTIVE_STATE, + vreg->type, vreg->id, + req, sizeof(req[0]) * reqlen); + if (!ret) { + vreg->enabled_updated = 0; + vreg->uv_updated = 0; + vreg->load_updated = 0; + } + + return ret; } static int rpm_reg_enable(struct regulator_dev *rdev) { struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev); - struct rpm_regulator_req req; int ret; - req.key = cpu_to_le32(RPM_KEY_SWEN); - req.nbytes = cpu_to_le32(sizeof(u32)); - req.value = cpu_to_le32(1); + vreg->is_enabled = 1; + vreg->enabled_updated = 1; - ret = rpm_reg_write_active(vreg, &req, sizeof(req)); - if (!ret) - vreg->is_enabled = 1; + ret = rpm_reg_write_active(vreg); + if (ret) + vreg->is_enabled = 0; return ret; } @@ -81,16 +115,14 @@ static int rpm_reg_is_enabled(struct regulator_dev *rdev) static int rpm_reg_disable(struct regulator_dev *rdev) { struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev); - struct rpm_regulator_req req; int ret; - req.key = cpu_to_le32(RPM_KEY_SWEN); - req.nbytes = cpu_to_le32(sizeof(u32)); - req.value = 0; + vreg->is_enabled = 0; + vreg->enabled_updated = 1; - ret = rpm_reg_write_active(vreg, &req, sizeof(req)); - if (!ret) - vreg->is_enabled = 0; + ret = rpm_reg_write_active(vreg); + if (ret) + vreg->is_enabled = 1; return ret; } @@ -108,16 +140,15 @@ static int rpm_reg_set_voltage(struct regulator_dev *rdev, unsigned *selector) { struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev); - struct rpm_regulator_req req; - int ret = 0; + int ret; + int old_uV = vreg->uV; - req.key = cpu_to_le32(RPM_KEY_UV); - req.nbytes = cpu_to_le32(sizeof(u32)); - req.value = cpu_to_le32(min_uV); + vreg->uV = min_uV; + vreg->uv_updated = 1; - ret = rpm_reg_write_active(vreg, &req, sizeof(req)); - if (!ret) - vreg->uV = min_uV; + ret = rpm_reg_write_active(vreg); + if (ret) + vreg->uV = old_uV; return ret; } @@ -125,13 +156,16 @@ static int rpm_reg_set_voltage(struct regulator_dev *rdev, static int rpm_reg_set_load(struct regulator_dev *rdev, int load_uA) { struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev); - struct rpm_regulator_req req; + u32 old_load = vreg->load; + int ret; - req.key = cpu_to_le32(RPM_KEY_MA); - req.nbytes = cpu_to_le32(sizeof(u32)); - req.value = cpu_to_le32(load_uA / 1000); + vreg->load = load_uA; + vreg->load_updated = 1; + ret = rpm_reg_write_active(vreg); + if (ret) + vreg->load = old_load; - return rpm_reg_write_active(vreg, &req, sizeof(req)); + return ret; } static const struct regulator_ops rpm_smps_ldo_ops = { diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c index 213b68743cc8..23713e16c286 100644 --- a/drivers/regulator/rk808-regulator.c +++ b/drivers/regulator/rk808-regulator.c @@ -1,5 +1,5 @@ /* - * Regulator driver for Rockchip RK808/RK818 + * Regulator driver for Rockchip RK805/RK808/RK818 * * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd * @@ -363,28 +363,28 @@ static int rk808_set_suspend_disable(struct regulator_dev *rdev) rdev->desc->enable_mask); } -static struct regulator_ops rk805_reg_ops = { - .list_voltage = regulator_list_voltage_linear, - .map_voltage = regulator_map_voltage_linear, - .get_voltage_sel = regulator_get_voltage_sel_regmap, - .set_voltage_sel = regulator_set_voltage_sel_regmap, - .enable = regulator_enable_regmap, - .disable = regulator_disable_regmap, - .is_enabled = regulator_is_enabled_regmap, - .set_suspend_voltage = rk808_set_suspend_voltage, - .set_suspend_enable = rk805_set_suspend_enable, - .set_suspend_disable = rk805_set_suspend_disable, +static const struct regulator_ops rk805_reg_ops = { + .list_voltage = regulator_list_voltage_linear, + .map_voltage = regulator_map_voltage_linear, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .set_suspend_voltage = rk808_set_suspend_voltage, + .set_suspend_enable = rk805_set_suspend_enable, + .set_suspend_disable = rk805_set_suspend_disable, }; -static struct regulator_ops rk805_switch_ops = { - .enable = regulator_enable_regmap, - .disable = regulator_disable_regmap, - .is_enabled = regulator_is_enabled_regmap, - .set_suspend_enable = rk805_set_suspend_enable, - .set_suspend_disable = rk805_set_suspend_disable, +static const struct regulator_ops rk805_switch_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .set_suspend_enable = rk805_set_suspend_enable, + .set_suspend_disable = rk805_set_suspend_disable, }; -static struct regulator_ops rk808_buck1_2_ops = { +static const struct regulator_ops rk808_buck1_2_ops = { .list_voltage = regulator_list_voltage_linear, .map_voltage = regulator_map_voltage_linear, .get_voltage_sel = rk808_buck1_2_get_voltage_sel_regmap, @@ -399,7 +399,7 @@ static struct regulator_ops rk808_buck1_2_ops = { .set_suspend_disable = rk808_set_suspend_disable, }; -static struct regulator_ops rk808_reg_ops = { +static const struct regulator_ops rk808_reg_ops = { .list_voltage = regulator_list_voltage_linear, .map_voltage = regulator_map_voltage_linear, .get_voltage_sel = regulator_get_voltage_sel_regmap, @@ -412,7 +412,7 @@ static struct regulator_ops rk808_reg_ops = { .set_suspend_disable = rk808_set_suspend_disable, }; -static struct regulator_ops rk808_reg_ops_ranges = { +static const struct regulator_ops rk808_reg_ops_ranges = { .list_voltage = regulator_list_voltage_linear_range, .map_voltage = regulator_map_voltage_linear_range, .get_voltage_sel = regulator_get_voltage_sel_regmap, @@ -425,7 +425,7 @@ static struct regulator_ops rk808_reg_ops_ranges = { .set_suspend_disable = rk808_set_suspend_disable, }; -static struct regulator_ops rk808_switch_ops = { +static const struct regulator_ops rk808_switch_ops = { .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .is_enabled = regulator_is_enabled_regmap, @@ -433,6 +433,12 @@ static struct regulator_ops rk808_switch_ops = { .set_suspend_disable = rk808_set_suspend_disable, }; +static const struct regulator_linear_range rk805_buck_1_2_voltage_ranges[] = { + REGULATOR_LINEAR_RANGE(712500, 0, 59, 12500), + REGULATOR_LINEAR_RANGE(1800000, 60, 62, 200000), + REGULATOR_LINEAR_RANGE(2300000, 63, 63, 0), +}; + static const struct regulator_desc rk805_reg[] = { { .name = "DCDC_REG1", @@ -440,11 +446,11 @@ static const struct regulator_desc rk805_reg[] = { .of_match = of_match_ptr("DCDC_REG1"), .regulators_node = of_match_ptr("regulators"), .id = RK805_ID_DCDC1, - .ops = &rk805_reg_ops, + .ops = &rk808_reg_ops_ranges, .type = REGULATOR_VOLTAGE, - .min_uV = 712500, - .uV_step = 12500, .n_voltages = 64, + .linear_ranges = rk805_buck_1_2_voltage_ranges, + .n_linear_ranges = ARRAY_SIZE(rk805_buck_1_2_voltage_ranges), .vsel_reg = RK805_BUCK1_ON_VSEL_REG, .vsel_mask = RK818_BUCK_VSEL_MASK, .enable_reg = RK805_DCDC_EN_REG, @@ -456,11 +462,11 @@ static const struct regulator_desc rk805_reg[] = { .of_match = of_match_ptr("DCDC_REG2"), .regulators_node = of_match_ptr("regulators"), .id = RK805_ID_DCDC2, - .ops = &rk805_reg_ops, + .ops = &rk808_reg_ops_ranges, .type = REGULATOR_VOLTAGE, - .min_uV = 712500, - .uV_step = 12500, .n_voltages = 64, + .linear_ranges = rk805_buck_1_2_voltage_ranges, + .n_linear_ranges = ARRAY_SIZE(rk805_buck_1_2_voltage_ranges), .vsel_reg = RK805_BUCK2_ON_VSEL_REG, .vsel_mask = RK818_BUCK_VSEL_MASK, .enable_reg = RK805_DCDC_EN_REG, @@ -796,7 +802,7 @@ static struct platform_driver rk808_regulator_driver = { module_platform_driver(rk808_regulator_driver); -MODULE_DESCRIPTION("regulator driver for the RK808/RK818 series PMICs"); +MODULE_DESCRIPTION("regulator driver for the RK805/RK808/RK818 series PMICs"); MODULE_AUTHOR("Chris Zhong "); MODULE_AUTHOR("Zhang Qing "); MODULE_AUTHOR("Wadim Egorov "); diff --git a/drivers/regulator/rt5033-regulator.c b/drivers/regulator/rt5033-regulator.c index 96d2c18e051a..639cbadc044a 100644 --- a/drivers/regulator/rt5033-regulator.c +++ b/drivers/regulator/rt5033-regulator.c @@ -16,14 +16,14 @@ #include #include -static struct regulator_ops rt5033_safe_ldo_ops = { +static const struct regulator_ops rt5033_safe_ldo_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .list_voltage = regulator_list_voltage_linear, }; -static struct regulator_ops rt5033_buck_ops = { +static const struct regulator_ops rt5033_buck_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, diff --git a/drivers/regulator/s2mpa01.c b/drivers/regulator/s2mpa01.c index 095d25f3d2ea..58a1fe583a6c 100644 --- a/drivers/regulator/s2mpa01.c +++ b/drivers/regulator/s2mpa01.c @@ -298,13 +298,13 @@ static const struct regulator_desc regulators[] = { regulator_desc_ldo(2, STEP_50_MV), regulator_desc_ldo(3, STEP_50_MV), regulator_desc_ldo(4, STEP_50_MV), - regulator_desc_ldo(5, STEP_50_MV), + regulator_desc_ldo(5, STEP_25_MV), regulator_desc_ldo(6, STEP_25_MV), regulator_desc_ldo(7, STEP_50_MV), regulator_desc_ldo(8, STEP_50_MV), regulator_desc_ldo(9, STEP_50_MV), regulator_desc_ldo(10, STEP_50_MV), - regulator_desc_ldo(11, STEP_25_MV), + regulator_desc_ldo(11, STEP_50_MV), regulator_desc_ldo(12, STEP_50_MV), regulator_desc_ldo(13, STEP_50_MV), regulator_desc_ldo(14, STEP_50_MV), @@ -315,11 +315,11 @@ static const struct regulator_desc regulators[] = { regulator_desc_ldo(19, STEP_50_MV), regulator_desc_ldo(20, STEP_50_MV), regulator_desc_ldo(21, STEP_50_MV), - regulator_desc_ldo(22, STEP_25_MV), - regulator_desc_ldo(23, STEP_25_MV), + regulator_desc_ldo(22, STEP_50_MV), + regulator_desc_ldo(23, STEP_50_MV), regulator_desc_ldo(24, STEP_50_MV), regulator_desc_ldo(25, STEP_50_MV), - regulator_desc_ldo(26, STEP_50_MV), + regulator_desc_ldo(26, STEP_25_MV), regulator_desc_buck1_4(1), regulator_desc_buck1_4(2), regulator_desc_buck1_4(3), diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c index ee4a23ab0663..134c62db36c5 100644 --- a/drivers/regulator/s2mps11.c +++ b/drivers/regulator/s2mps11.c @@ -362,7 +362,7 @@ static const struct regulator_desc s2mps11_regulators[] = { regulator_desc_s2mps11_ldo(32, STEP_50_MV), regulator_desc_s2mps11_ldo(33, STEP_50_MV), regulator_desc_s2mps11_ldo(34, STEP_50_MV), - regulator_desc_s2mps11_ldo(35, STEP_50_MV), + regulator_desc_s2mps11_ldo(35, STEP_25_MV), regulator_desc_s2mps11_ldo(36, STEP_50_MV), regulator_desc_s2mps11_ldo(37, STEP_50_MV), regulator_desc_s2mps11_ldo(38, STEP_50_MV), @@ -372,8 +372,8 @@ static const struct regulator_desc s2mps11_regulators[] = { regulator_desc_s2mps11_buck1_4(4), regulator_desc_s2mps11_buck5, regulator_desc_s2mps11_buck67810(6, MIN_600_MV, STEP_6_25_MV), - regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_6_25_MV), - regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_6_25_MV), + regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_12_5_MV), + regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_12_5_MV), regulator_desc_s2mps11_buck9, regulator_desc_s2mps11_buck67810(10, MIN_750_MV, STEP_12_5_MV), }; diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c index b581f01f3395..bb9d1a083299 100644 --- a/drivers/regulator/s5m8767.c +++ b/drivers/regulator/s5m8767.c @@ -115,7 +115,7 @@ static const struct sec_voltage_desc *reg_voltage_map[] = { [S5M8767_BUCK9] = &buck_voltage_val3, }; -static unsigned int s5m8767_opmode_reg[][4] = { +static const unsigned int s5m8767_opmode_reg[][4] = { /* {OFF, ON, LOWPOWER, SUSPEND} */ /* LDO1 ... LDO28 */ {0x0, 0x3, 0x2, 0x1}, /* LDO1 */ @@ -339,13 +339,9 @@ static int s5m8767_set_voltage_time_sel(struct regulator_dev *rdev, unsigned int new_sel) { struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev); - const struct sec_voltage_desc *desc; - int reg_id = rdev_get_id(rdev); - - desc = reg_voltage_map[reg_id]; if ((old_sel < new_sel) && s5m8767->ramp_delay) - return DIV_ROUND_UP(desc->step * (new_sel - old_sel), + return DIV_ROUND_UP(rdev->desc->uV_step * (new_sel - old_sel), s5m8767->ramp_delay * 1000); return 0; } diff --git a/drivers/regulator/stm32-vrefbuf.c b/drivers/regulator/stm32-vrefbuf.c index e0a9c445ed67..ba2f24949dc9 100644 --- a/drivers/regulator/stm32-vrefbuf.c +++ b/drivers/regulator/stm32-vrefbuf.c @@ -15,6 +15,7 @@ #include #include #include +#include /* STM32 VREFBUF registers */ #define STM32_VREFBUF_CSR 0x00 @@ -25,9 +26,12 @@ #define STM32_HIZ BIT(1) #define STM32_ENVR BIT(0) +#define STM32_VREFBUF_AUTO_SUSPEND_DELAY_MS 10 + struct stm32_vrefbuf { void __iomem *base; struct clk *clk; + struct device *dev; }; static const unsigned int stm32_vrefbuf_voltages[] = { @@ -38,9 +42,16 @@ static const unsigned int stm32_vrefbuf_voltages[] = { static int stm32_vrefbuf_enable(struct regulator_dev *rdev) { struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev); - u32 val = readl_relaxed(priv->base + STM32_VREFBUF_CSR); + u32 val; int ret; + ret = pm_runtime_get_sync(priv->dev); + if (ret < 0) { + pm_runtime_put_noidle(priv->dev); + return ret; + } + + val = readl_relaxed(priv->base + STM32_VREFBUF_CSR); val = (val & ~STM32_HIZ) | STM32_ENVR; writel_relaxed(val, priv->base + STM32_VREFBUF_CSR); @@ -59,45 +70,95 @@ static int stm32_vrefbuf_enable(struct regulator_dev *rdev) writel_relaxed(val, priv->base + STM32_VREFBUF_CSR); } + pm_runtime_mark_last_busy(priv->dev); + pm_runtime_put_autosuspend(priv->dev); + return ret; } static int stm32_vrefbuf_disable(struct regulator_dev *rdev) { struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev); - u32 val = readl_relaxed(priv->base + STM32_VREFBUF_CSR); + u32 val; + int ret; + ret = pm_runtime_get_sync(priv->dev); + if (ret < 0) { + pm_runtime_put_noidle(priv->dev); + return ret; + } + + val = readl_relaxed(priv->base + STM32_VREFBUF_CSR); val = (val & ~STM32_ENVR) | STM32_HIZ; writel_relaxed(val, priv->base + STM32_VREFBUF_CSR); + pm_runtime_mark_last_busy(priv->dev); + pm_runtime_put_autosuspend(priv->dev); + return 0; } static int stm32_vrefbuf_is_enabled(struct regulator_dev *rdev) { struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev); + int ret; + + ret = pm_runtime_get_sync(priv->dev); + if (ret < 0) { + pm_runtime_put_noidle(priv->dev); + return ret; + } + + ret = readl_relaxed(priv->base + STM32_VREFBUF_CSR) & STM32_ENVR; - return readl_relaxed(priv->base + STM32_VREFBUF_CSR) & STM32_ENVR; + pm_runtime_mark_last_busy(priv->dev); + pm_runtime_put_autosuspend(priv->dev); + + return ret; } static int stm32_vrefbuf_set_voltage_sel(struct regulator_dev *rdev, unsigned sel) { struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev); - u32 val = readl_relaxed(priv->base + STM32_VREFBUF_CSR); + u32 val; + int ret; + ret = pm_runtime_get_sync(priv->dev); + if (ret < 0) { + pm_runtime_put_noidle(priv->dev); + return ret; + } + + val = readl_relaxed(priv->base + STM32_VREFBUF_CSR); val = (val & ~STM32_VRS) | FIELD_PREP(STM32_VRS, sel); writel_relaxed(val, priv->base + STM32_VREFBUF_CSR); + pm_runtime_mark_last_busy(priv->dev); + pm_runtime_put_autosuspend(priv->dev); + return 0; } static int stm32_vrefbuf_get_voltage_sel(struct regulator_dev *rdev) { struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev); - u32 val = readl_relaxed(priv->base + STM32_VREFBUF_CSR); + u32 val; + int ret; - return FIELD_GET(STM32_VRS, val); + ret = pm_runtime_get_sync(priv->dev); + if (ret < 0) { + pm_runtime_put_noidle(priv->dev); + return ret; + } + + val = readl_relaxed(priv->base + STM32_VREFBUF_CSR); + ret = FIELD_GET(STM32_VRS, val); + + pm_runtime_mark_last_busy(priv->dev); + pm_runtime_put_autosuspend(priv->dev); + + return ret; } static const struct regulator_ops stm32_vrefbuf_volt_ops = { @@ -130,6 +191,7 @@ static int stm32_vrefbuf_probe(struct platform_device *pdev) priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; + priv->dev = &pdev->dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); priv->base = devm_ioremap_resource(&pdev->dev, res); @@ -140,10 +202,17 @@ static int stm32_vrefbuf_probe(struct platform_device *pdev) if (IS_ERR(priv->clk)) return PTR_ERR(priv->clk); + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, + STM32_VREFBUF_AUTO_SUSPEND_DELAY_MS); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_enable(&pdev->dev); + ret = clk_prepare_enable(priv->clk); if (ret) { dev_err(&pdev->dev, "clk prepare failed with error %d\n", ret); - return ret; + goto err_pm_stop; } config.dev = &pdev->dev; @@ -161,10 +230,17 @@ static int stm32_vrefbuf_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, rdev); + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); + return 0; err_clk_dis: clk_disable_unprepare(priv->clk); +err_pm_stop: + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); return ret; } @@ -174,12 +250,42 @@ static int stm32_vrefbuf_remove(struct platform_device *pdev) struct regulator_dev *rdev = platform_get_drvdata(pdev); struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev); + pm_runtime_get_sync(&pdev->dev); regulator_unregister(rdev); clk_disable_unprepare(priv->clk); + pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); return 0; }; +static int __maybe_unused stm32_vrefbuf_runtime_suspend(struct device *dev) +{ + struct regulator_dev *rdev = dev_get_drvdata(dev); + struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev); + + clk_disable_unprepare(priv->clk); + + return 0; +} + +static int __maybe_unused stm32_vrefbuf_runtime_resume(struct device *dev) +{ + struct regulator_dev *rdev = dev_get_drvdata(dev); + struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev); + + return clk_prepare_enable(priv->clk); +} + +static const struct dev_pm_ops stm32_vrefbuf_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(stm32_vrefbuf_runtime_suspend, + stm32_vrefbuf_runtime_resume, + NULL) +}; + static const struct of_device_id stm32_vrefbuf_of_match[] = { { .compatible = "st,stm32-vrefbuf", }, {}, @@ -192,6 +298,7 @@ static struct platform_driver stm32_vrefbuf_driver = { .driver = { .name = "stm32-vrefbuf", .of_match_table = of_match_ptr(stm32_vrefbuf_of_match), + .pm = &stm32_vrefbuf_pm_ops, }, }; module_platform_driver(stm32_vrefbuf_driver); diff --git a/drivers/regulator/stpmic1_regulator.c b/drivers/regulator/stpmic1_regulator.c index 16ba0297f709..f09061473613 100644 --- a/drivers/regulator/stpmic1_regulator.c +++ b/drivers/regulator/stpmic1_regulator.c @@ -12,8 +12,10 @@ #include #include +#include + /** - * stpmic1 regulator description + * stpmic1 regulator description: this structure is used as driver data * @desc: regulator framework description * @mask_reset_reg: mask reset register address * @mask_reset_mask: mask rank and mask reset register mask @@ -28,28 +30,9 @@ struct stpmic1_regulator_cfg { u8 icc_mask; }; -/** - * stpmic1 regulator data: this structure is used as driver data - * @regul_id: regulator id - * @reg_node: DT node of regulator (unused on non-DT platforms) - * @cfg: stpmic specific regulator description - * @mask_reset: mask_reset bit value - * @irq_curlim: current limit interrupt number - * @regmap: point to parent regmap structure - */ -struct stpmic1_regulator { - unsigned int regul_id; - struct device_node *reg_node; - struct stpmic1_regulator_cfg *cfg; - u8 mask_reset; - int irq_curlim; - struct regmap *regmap; -}; - static int stpmic1_set_mode(struct regulator_dev *rdev, unsigned int mode); static unsigned int stpmic1_get_mode(struct regulator_dev *rdev); static int stpmic1_set_icc(struct regulator_dev *rdev); -static int stpmic1_regulator_parse_dt(void *driver_data); static unsigned int stpmic1_map_mode(unsigned int mode); enum { @@ -72,15 +55,13 @@ enum { /* Enable time worst case is 5000mV/(2250uV/uS) */ #define PMIC_ENABLE_TIME_US 2200 -#define STPMIC1_BUCK_MODE_NORMAL 0 -#define STPMIC1_BUCK_MODE_LP BUCK_HPLP_ENABLE_MASK - -struct regulator_linear_range buck1_ranges[] = { - REGULATOR_LINEAR_RANGE(600000, 0, 30, 25000), - REGULATOR_LINEAR_RANGE(1350000, 31, 63, 0), +static const struct regulator_linear_range buck1_ranges[] = { + REGULATOR_LINEAR_RANGE(725000, 0, 4, 0), + REGULATOR_LINEAR_RANGE(725000, 5, 36, 25000), + REGULATOR_LINEAR_RANGE(1500000, 37, 63, 0), }; -struct regulator_linear_range buck2_ranges[] = { +static const struct regulator_linear_range buck2_ranges[] = { REGULATOR_LINEAR_RANGE(1000000, 0, 17, 0), REGULATOR_LINEAR_RANGE(1050000, 18, 19, 0), REGULATOR_LINEAR_RANGE(1100000, 20, 21, 0), @@ -94,7 +75,7 @@ struct regulator_linear_range buck2_ranges[] = { REGULATOR_LINEAR_RANGE(1500000, 36, 63, 0), }; -struct regulator_linear_range buck3_ranges[] = { +static const struct regulator_linear_range buck3_ranges[] = { REGULATOR_LINEAR_RANGE(1000000, 0, 19, 0), REGULATOR_LINEAR_RANGE(1100000, 20, 23, 0), REGULATOR_LINEAR_RANGE(1200000, 24, 27, 0), @@ -102,10 +83,9 @@ struct regulator_linear_range buck3_ranges[] = { REGULATOR_LINEAR_RANGE(1400000, 32, 35, 0), REGULATOR_LINEAR_RANGE(1500000, 36, 55, 100000), REGULATOR_LINEAR_RANGE(3400000, 56, 63, 0), - }; -struct regulator_linear_range buck4_ranges[] = { +static const struct regulator_linear_range buck4_ranges[] = { REGULATOR_LINEAR_RANGE(600000, 0, 27, 25000), REGULATOR_LINEAR_RANGE(1300000, 28, 29, 0), REGULATOR_LINEAR_RANGE(1350000, 30, 31, 0), @@ -113,24 +93,21 @@ struct regulator_linear_range buck4_ranges[] = { REGULATOR_LINEAR_RANGE(1450000, 34, 35, 0), REGULATOR_LINEAR_RANGE(1500000, 36, 60, 100000), REGULATOR_LINEAR_RANGE(3900000, 61, 63, 0), - }; -struct regulator_linear_range ldo1_ranges[] = { +static const struct regulator_linear_range ldo1_ranges[] = { REGULATOR_LINEAR_RANGE(1700000, 0, 7, 0), REGULATOR_LINEAR_RANGE(1700000, 8, 24, 100000), REGULATOR_LINEAR_RANGE(3300000, 25, 31, 0), - }; -struct regulator_linear_range ldo2_ranges[] = { +static const struct regulator_linear_range ldo2_ranges[] = { REGULATOR_LINEAR_RANGE(1700000, 0, 7, 0), REGULATOR_LINEAR_RANGE(1700000, 8, 24, 100000), REGULATOR_LINEAR_RANGE(3300000, 25, 30, 0), - }; -struct regulator_linear_range ldo3_ranges[] = { +static const struct regulator_linear_range ldo3_ranges[] = { REGULATOR_LINEAR_RANGE(1700000, 0, 7, 0), REGULATOR_LINEAR_RANGE(1700000, 8, 24, 100000), REGULATOR_LINEAR_RANGE(3300000, 25, 30, 0), @@ -138,18 +115,18 @@ struct regulator_linear_range ldo3_ranges[] = { REGULATOR_LINEAR_RANGE(500000, 31, 31, 0), }; -struct regulator_linear_range ldo5_ranges[] = { +static const struct regulator_linear_range ldo5_ranges[] = { REGULATOR_LINEAR_RANGE(1700000, 0, 7, 0), REGULATOR_LINEAR_RANGE(1700000, 8, 30, 100000), REGULATOR_LINEAR_RANGE(3900000, 31, 31, 0), }; -struct regulator_linear_range ldo6_ranges[] = { +static const struct regulator_linear_range ldo6_ranges[] = { REGULATOR_LINEAR_RANGE(900000, 0, 24, 100000), REGULATOR_LINEAR_RANGE(3300000, 25, 31, 0), }; -static struct regulator_ops stpmic1_ldo_ops = { +static const struct regulator_ops stpmic1_ldo_ops = { .list_voltage = regulator_list_voltage_linear_range, .map_voltage = regulator_map_voltage_linear_range, .is_enabled = regulator_is_enabled_regmap, @@ -157,11 +134,10 @@ static struct regulator_ops stpmic1_ldo_ops = { .disable = regulator_disable_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_sel = regulator_set_voltage_sel_regmap, - .set_pull_down = regulator_set_pull_down_regmap, .set_over_current_protection = stpmic1_set_icc, }; -static struct regulator_ops stpmic1_ldo3_ops = { +static const struct regulator_ops stpmic1_ldo3_ops = { .list_voltage = regulator_list_voltage_linear_range, .map_voltage = regulator_map_voltage_iterate, .is_enabled = regulator_is_enabled_regmap, @@ -169,21 +145,19 @@ static struct regulator_ops stpmic1_ldo3_ops = { .disable = regulator_disable_regmap, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_sel = regulator_set_voltage_sel_regmap, - .set_pull_down = regulator_set_pull_down_regmap, .get_bypass = regulator_get_bypass_regmap, .set_bypass = regulator_set_bypass_regmap, .set_over_current_protection = stpmic1_set_icc, }; -static struct regulator_ops stpmic1_ldo4_fixed_regul_ops = { +static const struct regulator_ops stpmic1_ldo4_fixed_regul_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, - .set_pull_down = regulator_set_pull_down_regmap, .set_over_current_protection = stpmic1_set_icc, }; -static struct regulator_ops stpmic1_buck_ops = { +static const struct regulator_ops stpmic1_buck_ops = { .list_voltage = regulator_list_voltage_linear_range, .map_voltage = regulator_map_voltage_linear_range, .is_enabled = regulator_is_enabled_regmap, @@ -197,20 +171,27 @@ static struct regulator_ops stpmic1_buck_ops = { .set_over_current_protection = stpmic1_set_icc, }; -static struct regulator_ops stpmic1_vref_ddr_ops = { +static const struct regulator_ops stpmic1_vref_ddr_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, - .set_pull_down = regulator_set_pull_down_regmap, }; -static struct regulator_ops stpmic1_switch_regul_ops = { +static const struct regulator_ops stpmic1_boost_regul_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .set_over_current_protection = stpmic1_set_icc, }; +static const struct regulator_ops stpmic1_switch_regul_ops = { + .is_enabled = regulator_is_enabled_regmap, + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .set_over_current_protection = stpmic1_set_icc, + .set_active_discharge = regulator_set_active_discharge_regmap, +}; + #define REG_LDO(ids, base) { \ .name = #ids, \ .id = STPMIC1_##ids, \ @@ -227,8 +208,6 @@ static struct regulator_ops stpmic1_switch_regul_ops = { .enable_val = 1, \ .disable_val = 0, \ .enable_time = PMIC_ENABLE_TIME_US, \ - .pull_down_reg = ids##_PULL_DOWN_REG, \ - .pull_down_mask = ids##_PULL_DOWN_MASK, \ .supply_name = #base, \ } @@ -252,8 +231,6 @@ static struct regulator_ops stpmic1_switch_regul_ops = { .bypass_mask = LDO_BYPASS_MASK, \ .bypass_val_on = LDO_BYPASS_MASK, \ .bypass_val_off = 0, \ - .pull_down_reg = ids##_PULL_DOWN_REG, \ - .pull_down_mask = ids##_PULL_DOWN_MASK, \ .supply_name = #base, \ } @@ -271,8 +248,6 @@ static struct regulator_ops stpmic1_switch_regul_ops = { .enable_val = 1, \ .disable_val = 0, \ .enable_time = PMIC_ENABLE_TIME_US, \ - .pull_down_reg = ids##_PULL_DOWN_REG, \ - .pull_down_mask = ids##_PULL_DOWN_MASK, \ .supply_name = #base, \ } @@ -312,12 +287,47 @@ static struct regulator_ops stpmic1_switch_regul_ops = { .enable_val = 1, \ .disable_val = 0, \ .enable_time = PMIC_ENABLE_TIME_US, \ - .pull_down_reg = ids##_PULL_DOWN_REG, \ - .pull_down_mask = ids##_PULL_DOWN_MASK, \ .supply_name = #base, \ } -#define REG_SWITCH(ids, base, reg, mask, val) { \ +#define REG_BOOST(ids, base) { \ + .name = #ids, \ + .id = STPMIC1_##ids, \ + .n_voltages = 1, \ + .ops = &stpmic1_boost_regul_ops, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + .min_uV = 0, \ + .fixed_uV = 5000000, \ + .enable_reg = BST_SW_CR, \ + .enable_mask = BOOST_ENABLED, \ + .enable_val = BOOST_ENABLED, \ + .disable_val = 0, \ + .enable_time = PMIC_ENABLE_TIME_US, \ + .supply_name = #base, \ +} + +#define REG_VBUS_OTG(ids, base) { \ + .name = #ids, \ + .id = STPMIC1_##ids, \ + .n_voltages = 1, \ + .ops = &stpmic1_switch_regul_ops, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + .min_uV = 0, \ + .fixed_uV = 5000000, \ + .enable_reg = BST_SW_CR, \ + .enable_mask = USBSW_OTG_SWITCH_ENABLED, \ + .enable_val = USBSW_OTG_SWITCH_ENABLED, \ + .disable_val = 0, \ + .enable_time = PMIC_ENABLE_TIME_US, \ + .supply_name = #base, \ + .active_discharge_reg = BST_SW_CR, \ + .active_discharge_mask = VBUS_OTG_DISCHARGE, \ + .active_discharge_on = VBUS_OTG_DISCHARGE, \ +} + +#define REG_SW_OUT(ids, base) { \ .name = #ids, \ .id = STPMIC1_##ids, \ .n_voltages = 1, \ @@ -326,15 +336,18 @@ static struct regulator_ops stpmic1_switch_regul_ops = { .owner = THIS_MODULE, \ .min_uV = 0, \ .fixed_uV = 5000000, \ - .enable_reg = (reg), \ - .enable_mask = (mask), \ - .enable_val = (val), \ + .enable_reg = BST_SW_CR, \ + .enable_mask = SWIN_SWOUT_ENABLED, \ + .enable_val = SWIN_SWOUT_ENABLED, \ .disable_val = 0, \ .enable_time = PMIC_ENABLE_TIME_US, \ .supply_name = #base, \ + .active_discharge_reg = BST_SW_CR, \ + .active_discharge_mask = SW_OUT_DISCHARGE, \ + .active_discharge_on = SW_OUT_DISCHARGE, \ } -struct stpmic1_regulator_cfg stpmic1_regulator_cfgs[] = { +static const struct stpmic1_regulator_cfg stpmic1_regulator_cfgs[] = { [STPMIC1_BUCK1] = { .desc = REG_BUCK(BUCK1, buck1), .icc_reg = BUCKS_ICCTO_CR, @@ -411,23 +424,17 @@ struct stpmic1_regulator_cfg stpmic1_regulator_cfgs[] = { .mask_reset_mask = BIT(6), }, [STPMIC1_BOOST] = { - .desc = REG_SWITCH(BOOST, boost, BST_SW_CR, - BOOST_ENABLED, - BOOST_ENABLED), + .desc = REG_BOOST(BOOST, boost), .icc_reg = BUCKS_ICCTO_CR, .icc_mask = BIT(6), }, [STPMIC1_VBUS_OTG] = { - .desc = REG_SWITCH(VBUS_OTG, pwr_sw1, BST_SW_CR, - USBSW_OTG_SWITCH_ENABLED, - USBSW_OTG_SWITCH_ENABLED), + .desc = REG_VBUS_OTG(VBUS_OTG, pwr_sw1), .icc_reg = BUCKS_ICCTO_CR, .icc_mask = BIT(4), }, [STPMIC1_SW_OUT] = { - .desc = REG_SWITCH(SW_OUT, pwr_sw2, BST_SW_CR, - SWIN_SWOUT_ENABLED, - SWIN_SWOUT_ENABLED), + .desc = REG_SW_OUT(SW_OUT, pwr_sw2), .icc_reg = BUCKS_ICCTO_CR, .icc_mask = BIT(5), }, @@ -448,8 +455,9 @@ static unsigned int stpmic1_map_mode(unsigned int mode) static unsigned int stpmic1_get_mode(struct regulator_dev *rdev) { int value; + struct regmap *regmap = rdev_get_regmap(rdev); - regmap_read(rdev->regmap, rdev->desc->enable_reg, &value); + regmap_read(regmap, rdev->desc->enable_reg, &value); if (value & STPMIC1_BUCK_MODE_LP) return REGULATOR_MODE_STANDBY; @@ -460,6 +468,7 @@ static unsigned int stpmic1_get_mode(struct regulator_dev *rdev) static int stpmic1_set_mode(struct regulator_dev *rdev, unsigned int mode) { int value; + struct regmap *regmap = rdev_get_regmap(rdev); switch (mode) { case REGULATOR_MODE_NORMAL: @@ -472,17 +481,18 @@ static int stpmic1_set_mode(struct regulator_dev *rdev, unsigned int mode) return -EINVAL; } - return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, + return regmap_update_bits(regmap, rdev->desc->enable_reg, STPMIC1_BUCK_MODE_LP, value); } static int stpmic1_set_icc(struct regulator_dev *rdev) { - struct stpmic1_regulator *regul = rdev_get_drvdata(rdev); + struct stpmic1_regulator_cfg *cfg = rdev_get_drvdata(rdev); + struct regmap *regmap = rdev_get_regmap(rdev); /* enable switch off in case of over current */ - return regmap_update_bits(regul->regmap, regul->cfg->icc_reg, - regul->cfg->icc_mask, regul->cfg->icc_mask); + return regmap_update_bits(regmap, cfg->icc_reg, cfg->icc_mask, + cfg->icc_mask); } static irqreturn_t stpmic1_curlim_irq_handler(int irq, void *data) @@ -501,46 +511,13 @@ static irqreturn_t stpmic1_curlim_irq_handler(int irq, void *data) return IRQ_HANDLED; } -static int stpmic1_regulator_init(struct platform_device *pdev, - struct regulator_dev *rdev) -{ - struct stpmic1_regulator *regul = rdev_get_drvdata(rdev); - int ret = 0; - - /* set mask reset */ - if (regul->mask_reset && regul->cfg->mask_reset_reg != 0) { - ret = regmap_update_bits(regul->regmap, - regul->cfg->mask_reset_reg, - regul->cfg->mask_reset_mask, - regul->cfg->mask_reset_mask); - if (ret) { - dev_err(&pdev->dev, "set mask reset failed\n"); - return ret; - } - } - - /* setup an irq handler for over-current detection */ - if (regul->irq_curlim > 0) { - ret = devm_request_threaded_irq(&pdev->dev, - regul->irq_curlim, NULL, - stpmic1_curlim_irq_handler, - IRQF_ONESHOT | IRQF_SHARED, - pdev->name, rdev); - if (ret) { - dev_err(&pdev->dev, "Request IRQ failed\n"); - return ret; - } - } - return 0; -} - #define MATCH(_name, _id) \ [STPMIC1_##_id] = { \ .name = #_name, \ .desc = &stpmic1_regulator_cfgs[STPMIC1_##_id].desc, \ } -static struct of_regulator_match stpmic1_regulators_matches[] = { +static struct of_regulator_match stpmic1_matches[] = { MATCH(buck1, BUCK1), MATCH(buck2, BUCK2), MATCH(buck3, BUCK3), @@ -557,94 +534,75 @@ static struct of_regulator_match stpmic1_regulators_matches[] = { MATCH(pwr_sw2, SW_OUT), }; -static int stpmic1_regulator_parse_dt(void *driver_data) -{ - struct stpmic1_regulator *regul = - (struct stpmic1_regulator *)driver_data; - - if (!regul) - return -EINVAL; - - if (of_get_property(regul->reg_node, "st,mask-reset", NULL)) - regul->mask_reset = 1; - - regul->irq_curlim = of_irq_get(regul->reg_node, 0); - - return 0; -} - -static struct -regulator_dev *stpmic1_regulator_register(struct platform_device *pdev, int id, - struct regulator_init_data *init_data, - struct stpmic1_regulator *regul) +static int stpmic1_regulator_register(struct platform_device *pdev, int id, + struct of_regulator_match *match, + const struct stpmic1_regulator_cfg *cfg) { struct stpmic1 *pmic_dev = dev_get_drvdata(pdev->dev.parent); struct regulator_dev *rdev; struct regulator_config config = {}; + int ret = 0; + int irq; config.dev = &pdev->dev; - config.init_data = init_data; - config.of_node = stpmic1_regulators_matches[id].of_node; + config.init_data = match->init_data; + config.of_node = match->of_node; config.regmap = pmic_dev->regmap; - config.driver_data = regul; - - regul->regul_id = id; - regul->reg_node = config.of_node; - regul->cfg = &stpmic1_regulator_cfgs[id]; - regul->regmap = pmic_dev->regmap; + config.driver_data = (void *)cfg; - rdev = devm_regulator_register(&pdev->dev, ®ul->cfg->desc, &config); + rdev = devm_regulator_register(&pdev->dev, &cfg->desc, &config); if (IS_ERR(rdev)) { dev_err(&pdev->dev, "failed to register %s regulator\n", - regul->cfg->desc.name); + cfg->desc.name); + return PTR_ERR(rdev); + } + + /* set mask reset */ + if (of_get_property(config.of_node, "st,mask-reset", NULL) && + cfg->mask_reset_reg != 0) { + ret = regmap_update_bits(pmic_dev->regmap, + cfg->mask_reset_reg, + cfg->mask_reset_mask, + cfg->mask_reset_mask); + if (ret) { + dev_err(&pdev->dev, "set mask reset failed\n"); + return ret; + } } - return rdev; + /* setup an irq handler for over-current detection */ + irq = of_irq_get(config.of_node, 0); + if (irq > 0) { + ret = devm_request_threaded_irq(&pdev->dev, + irq, NULL, + stpmic1_curlim_irq_handler, + IRQF_ONESHOT | IRQF_SHARED, + pdev->name, rdev); + if (ret) { + dev_err(&pdev->dev, "Request IRQ failed\n"); + return ret; + } + } + return 0; } static int stpmic1_regulator_probe(struct platform_device *pdev) { - struct regulator_dev *rdev; - struct stpmic1_regulator *regul; - struct regulator_init_data *init_data; - struct device_node *np; int i, ret; - np = pdev->dev.of_node; - - ret = of_regulator_match(&pdev->dev, np, - stpmic1_regulators_matches, - ARRAY_SIZE(stpmic1_regulators_matches)); + ret = of_regulator_match(&pdev->dev, pdev->dev.of_node, stpmic1_matches, + ARRAY_SIZE(stpmic1_matches)); if (ret < 0) { dev_err(&pdev->dev, "Error in PMIC regulator device tree node"); return ret; } - regul = devm_kzalloc(&pdev->dev, ARRAY_SIZE(stpmic1_regulator_cfgs) * - sizeof(struct stpmic1_regulator), - GFP_KERNEL); - if (!regul) - return -ENOMEM; - for (i = 0; i < ARRAY_SIZE(stpmic1_regulator_cfgs); i++) { - /* Parse DT & find regulators to register */ - init_data = stpmic1_regulators_matches[i].init_data; - if (init_data) - init_data->regulator_init = &stpmic1_regulator_parse_dt; - - rdev = stpmic1_regulator_register(pdev, i, init_data, regul); - if (IS_ERR(rdev)) - return PTR_ERR(rdev); - - ret = stpmic1_regulator_init(pdev, rdev); - if (ret) { - dev_err(&pdev->dev, - "failed to initialize regulator %d\n", ret); + ret = stpmic1_regulator_register(pdev, i, &stpmic1_matches[i], + &stpmic1_regulator_cfgs[i]); + if (ret < 0) return ret; - } - - regul++; } dev_dbg(&pdev->dev, "stpmic1_regulator driver probed\n"); diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c index 6209beee1018..95708d34876b 100644 --- a/drivers/regulator/tps65218-regulator.c +++ b/drivers/regulator/tps65218-regulator.c @@ -188,7 +188,8 @@ static struct regulator_ops tps65218_ldo1_dcdc34_ops = { .set_suspend_disable = tps65218_pmic_set_suspend_disable, }; -static const int ls3_currents[] = { 100, 200, 500, 1000 }; +static const int ls3_currents[] = { 100000, 200000, 500000, 1000000 }; + static int tps65218_pmic_set_input_current_lim(struct regulator_dev *dev, int lim_uA) @@ -204,7 +205,8 @@ static int tps65218_pmic_set_input_current_lim(struct regulator_dev *dev, return -EINVAL; return tps65218_set_bits(tps, dev->desc->csel_reg, dev->desc->csel_mask, - index << 2, TPS65218_PROTECT_L1); + index << __builtin_ctz(dev->desc->csel_mask), + TPS65218_PROTECT_L1); } static int tps65218_pmic_set_current_limit(struct regulator_dev *dev, @@ -214,7 +216,7 @@ static int tps65218_pmic_set_current_limit(struct regulator_dev *dev, unsigned int num_currents = ARRAY_SIZE(ls3_currents); struct tps65218 *tps = rdev_get_drvdata(dev); - while (index < num_currents && ls3_currents[index] < max_uA) + while (index < num_currents && ls3_currents[index] <= max_uA) index++; index--; @@ -223,7 +225,8 @@ static int tps65218_pmic_set_current_limit(struct regulator_dev *dev, return -EINVAL; return tps65218_set_bits(tps, dev->desc->csel_reg, dev->desc->csel_mask, - index << 2, TPS65218_PROTECT_L1); + index << __builtin_ctz(dev->desc->csel_mask), + TPS65218_PROTECT_L1); } static int tps65218_pmic_get_current_limit(struct regulator_dev *dev) @@ -236,12 +239,13 @@ static int tps65218_pmic_get_current_limit(struct regulator_dev *dev) if (retval < 0) return retval; - index = (index & dev->desc->csel_mask) >> 2; + index = (index & dev->desc->csel_mask) >> + __builtin_ctz(dev->desc->csel_mask); return ls3_currents[index]; } -static struct regulator_ops tps65218_ls3_ops = { +static struct regulator_ops tps65218_ls23_ops = { .is_enabled = regulator_is_enabled_regmap, .enable = tps65218_pmic_enable, .disable = tps65218_pmic_disable, @@ -303,8 +307,13 @@ static const struct regulator_desc regulators[] = { TPS65218_ENABLE2_LDO1_EN, 0, 0, ldo1_dcdc3_ranges, 2, 0, 0, TPS65218_REG_SEQ6, TPS65218_SEQ6_LDO1_SEQ_MASK), + TPS65218_REGULATOR("LS2", "regulator-ls2", TPS65218_LS_2, + REGULATOR_CURRENT, tps65218_ls23_ops, 0, 0, 0, + TPS65218_REG_ENABLE2, TPS65218_ENABLE2_LS2_EN, + TPS65218_REG_CONFIG2, TPS65218_CONFIG2_LS2ILIM_MASK, + NULL, 0, 0, 0, 0, 0), TPS65218_REGULATOR("LS3", "regulator-ls3", TPS65218_LS_3, - REGULATOR_CURRENT, tps65218_ls3_ops, 0, 0, 0, + REGULATOR_CURRENT, tps65218_ls23_ops, 0, 0, 0, TPS65218_REG_ENABLE2, TPS65218_ENABLE2_LS3_EN, TPS65218_REG_CONFIG2, TPS65218_CONFIG2_LS3ILIM_MASK, NULL, 0, 0, 0, 0, 0), diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c index 884c7505ed91..402ea43c77d1 100644 --- a/drivers/regulator/twl-regulator.c +++ b/drivers/regulator/twl-regulator.c @@ -576,14 +576,9 @@ static int twlreg_probe(struct platform_device *pdev) struct regulator_init_data *initdata; struct regulation_constraints *c; struct regulator_dev *rdev; - const struct of_device_id *match; struct regulator_config config = { }; - match = of_match_device(twl_of_match, &pdev->dev); - if (!match) - return -ENODEV; - - template = match->data; + template = of_device_get_match_data(&pdev->dev); if (!template) return -ENODEV; diff --git a/drivers/regulator/twl6030-regulator.c b/drivers/regulator/twl6030-regulator.c index 219cbd910dbf..15f19df6bc5d 100644 --- a/drivers/regulator/twl6030-regulator.c +++ b/drivers/regulator/twl6030-regulator.c @@ -31,9 +31,6 @@ struct twlreg_info { /* twl resource ID, for resource control state machine */ u8 id; - /* chip constraints on regulator behavior */ - u16 min_mV; - u8 flags; /* used by regulator core */ @@ -247,32 +244,11 @@ static int twl6030coresmps_get_voltage(struct regulator_dev *rdev) return -ENODEV; } -static struct regulator_ops twl6030coresmps_ops = { +static const struct regulator_ops twl6030coresmps_ops = { .set_voltage = twl6030coresmps_set_voltage, .get_voltage = twl6030coresmps_get_voltage, }; -static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned sel) -{ - struct twlreg_info *info = rdev_get_drvdata(rdev); - - switch (sel) { - case 0: - return 0; - case 1 ... 24: - /* Linear mapping from 00000001 to 00011000: - * Absolute voltage value = 1.0 V + 0.1 V × (sel – 00000001) - */ - return (info->min_mV + 100 * (sel - 1)) * 1000; - case 25 ... 30: - return -EINVAL; - case 31: - return 2750000; - default: - return -EINVAL; - } -} - static int twl6030ldo_set_voltage_sel(struct regulator_dev *rdev, unsigned selector) { @@ -290,8 +266,8 @@ static int twl6030ldo_get_voltage_sel(struct regulator_dev *rdev) return vsel; } -static struct regulator_ops twl6030ldo_ops = { - .list_voltage = twl6030ldo_list_voltage, +static const struct regulator_ops twl6030ldo_ops = { + .list_voltage = regulator_list_voltage_linear_range, .set_voltage_sel = twl6030ldo_set_voltage_sel, .get_voltage_sel = twl6030ldo_get_voltage_sel, @@ -305,7 +281,7 @@ static struct regulator_ops twl6030ldo_ops = { .get_status = twl6030reg_get_status, }; -static struct regulator_ops twl6030fixed_ops = { +static const struct regulator_ops twl6030fixed_ops = { .list_voltage = regulator_list_voltage_linear, .enable = twl6030reg_enable, @@ -496,7 +472,7 @@ static int twl6030smps_get_voltage_sel(struct regulator_dev *rdev) return twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS); } -static struct regulator_ops twlsmps_ops = { +static const struct regulator_ops twlsmps_ops = { .list_voltage = twl6030smps_list_voltage, .map_voltage = twl6030smps_map_voltage, @@ -513,6 +489,11 @@ static struct regulator_ops twlsmps_ops = { }; /*----------------------------------------------------------------------*/ +static const struct regulator_linear_range twl6030ldo_linear_range[] = { + REGULATOR_LINEAR_RANGE(0, 0, 0, 0), + REGULATOR_LINEAR_RANGE(1000000, 1, 24, 100000), + REGULATOR_LINEAR_RANGE(2750000, 31, 31, 0), +}; #define TWL6030_ADJUSTABLE_SMPS(label) \ static const struct twlreg_info TWL6030_INFO_##label = { \ @@ -525,28 +506,30 @@ static const struct twlreg_info TWL6030_INFO_##label = { \ }, \ } -#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts) \ +#define TWL6030_ADJUSTABLE_LDO(label, offset) \ static const struct twlreg_info TWL6030_INFO_##label = { \ .base = offset, \ - .min_mV = min_mVolts, \ .desc = { \ .name = #label, \ .id = TWL6030_REG_##label, \ .n_voltages = 32, \ + .linear_ranges = twl6030ldo_linear_range, \ + .n_linear_ranges = ARRAY_SIZE(twl6030ldo_linear_range), \ .ops = &twl6030ldo_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ }, \ } -#define TWL6032_ADJUSTABLE_LDO(label, offset, min_mVolts) \ +#define TWL6032_ADJUSTABLE_LDO(label, offset) \ static const struct twlreg_info TWL6032_INFO_##label = { \ .base = offset, \ - .min_mV = min_mVolts, \ .desc = { \ .name = #label, \ .id = TWL6032_REG_##label, \ .n_voltages = 32, \ + .linear_ranges = twl6030ldo_linear_range, \ + .n_linear_ranges = ARRAY_SIZE(twl6030ldo_linear_range), \ .ops = &twl6030ldo_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ @@ -557,7 +540,6 @@ static const struct twlreg_info TWL6032_INFO_##label = { \ static const struct twlreg_info TWLFIXED_INFO_##label = { \ .base = offset, \ .id = 0, \ - .min_mV = mVolts, \ .desc = { \ .name = #label, \ .id = TWL6030##_REG_##label, \ @@ -574,7 +556,6 @@ static const struct twlreg_info TWLFIXED_INFO_##label = { \ #define TWL6032_ADJUSTABLE_SMPS(label, offset) \ static const struct twlreg_info TWLSMPS_INFO_##label = { \ .base = offset, \ - .min_mV = 600, \ .desc = { \ .name = #label, \ .id = TWL6032_REG_##label, \ @@ -592,22 +573,22 @@ static const struct twlreg_info TWLSMPS_INFO_##label = { \ TWL6030_ADJUSTABLE_SMPS(VDD1); TWL6030_ADJUSTABLE_SMPS(VDD2); TWL6030_ADJUSTABLE_SMPS(VDD3); -TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000); -TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000); -TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000); -TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000); -TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000); -TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000); +TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54); +TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58); +TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c); +TWL6030_ADJUSTABLE_LDO(VMMC, 0x68); +TWL6030_ADJUSTABLE_LDO(VPP, 0x6c); +TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74); /* 6025 are renamed compared to 6030 versions */ -TWL6032_ADJUSTABLE_LDO(LDO2, 0x54, 1000); -TWL6032_ADJUSTABLE_LDO(LDO4, 0x58, 1000); -TWL6032_ADJUSTABLE_LDO(LDO3, 0x5c, 1000); -TWL6032_ADJUSTABLE_LDO(LDO5, 0x68, 1000); -TWL6032_ADJUSTABLE_LDO(LDO1, 0x6c, 1000); -TWL6032_ADJUSTABLE_LDO(LDO7, 0x74, 1000); -TWL6032_ADJUSTABLE_LDO(LDO6, 0x60, 1000); -TWL6032_ADJUSTABLE_LDO(LDOLN, 0x64, 1000); -TWL6032_ADJUSTABLE_LDO(LDOUSB, 0x70, 1000); +TWL6032_ADJUSTABLE_LDO(LDO2, 0x54); +TWL6032_ADJUSTABLE_LDO(LDO4, 0x58); +TWL6032_ADJUSTABLE_LDO(LDO3, 0x5c); +TWL6032_ADJUSTABLE_LDO(LDO5, 0x68); +TWL6032_ADJUSTABLE_LDO(LDO1, 0x6c); +TWL6032_ADJUSTABLE_LDO(LDO7, 0x74); +TWL6032_ADJUSTABLE_LDO(LDO6, 0x60); +TWL6032_ADJUSTABLE_LDO(LDOLN, 0x64); +TWL6032_ADJUSTABLE_LDO(LDOUSB, 0x70); TWL6030_FIXED_LDO(VANA, 0x50, 2100, 0); TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 0); TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 0); @@ -687,14 +668,9 @@ static int twlreg_probe(struct platform_device *pdev) struct regulator_init_data *initdata; struct regulation_constraints *c; struct regulator_dev *rdev; - const struct of_device_id *match; struct regulator_config config = { }; - match = of_match_device(twl_of_match, &pdev->dev); - if (!match) - return -ENODEV; - - template = match->data; + template = of_device_get_match_data(&pdev->dev); if (!template) return -ENODEV; diff --git a/drivers/regulator/uniphier-regulator.c b/drivers/regulator/uniphier-regulator.c index abf22acbd13e..9026d5a3e964 100644 --- a/drivers/regulator/uniphier-regulator.c +++ b/drivers/regulator/uniphier-regulator.c @@ -32,7 +32,7 @@ struct uniphier_regulator_priv { const struct uniphier_regulator_soc_data *data; }; -static struct regulator_ops uniphier_regulator_ops = { +static const struct regulator_ops uniphier_regulator_ops = { .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, .is_enabled = regulator_is_enabled_regmap, @@ -87,8 +87,10 @@ static int uniphier_regulator_probe(struct platform_device *pdev) } regmap = devm_regmap_init_mmio(dev, base, priv->data->regconf); - if (IS_ERR(regmap)) - return PTR_ERR(regmap); + if (IS_ERR(regmap)) { + ret = PTR_ERR(regmap); + goto out_rst_assert; + } config.dev = dev; config.driver_data = priv; diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c index 5a5bc4bb08d2..12b422373580 100644 --- a/drivers/regulator/wm831x-dcdc.c +++ b/drivers/regulator/wm831x-dcdc.c @@ -205,33 +205,10 @@ static irqreturn_t wm831x_dcdc_oc_irq(int irq, void *data) * BUCKV specifics */ -static int wm831x_buckv_list_voltage(struct regulator_dev *rdev, - unsigned selector) -{ - if (selector <= 0x8) - return 600000; - if (selector <= WM831X_BUCKV_MAX_SELECTOR) - return 600000 + ((selector - 0x8) * 12500); - return -EINVAL; -} - -static int wm831x_buckv_map_voltage(struct regulator_dev *rdev, - int min_uV, int max_uV) -{ - u16 vsel; - - if (min_uV < 600000) - vsel = 0; - else if (min_uV <= 1800000) - vsel = DIV_ROUND_UP(min_uV - 600000, 12500) + 8; - else - return -EINVAL; - - if (wm831x_buckv_list_voltage(rdev, vsel) > max_uV) - return -EINVAL; - - return vsel; -} +static const struct regulator_linear_range wm831x_buckv_ranges[] = { + REGULATOR_LINEAR_RANGE(600000, 0, 0x7, 0), + REGULATOR_LINEAR_RANGE(600000, 0x8, 0x68, 12500), +}; static int wm831x_buckv_set_dvs(struct regulator_dev *rdev, int state) { @@ -309,7 +286,7 @@ static int wm831x_buckv_set_suspend_voltage(struct regulator_dev *rdev, u16 reg = dcdc->base + WM831X_DCDC_SLEEP_CONTROL; int vsel; - vsel = wm831x_buckv_map_voltage(rdev, uV, uV); + vsel = regulator_map_voltage_linear_range(rdev, uV, uV); if (vsel < 0) return vsel; @@ -327,52 +304,18 @@ static int wm831x_buckv_get_voltage_sel(struct regulator_dev *rdev) } /* Current limit options */ -static u16 wm831x_dcdc_ilim[] = { - 125, 250, 375, 500, 625, 750, 875, 1000 +static const unsigned int wm831x_dcdc_ilim[] = { + 125000, 250000, 375000, 500000, 625000, 750000, 875000, 1000000 }; -static int wm831x_buckv_set_current_limit(struct regulator_dev *rdev, - int min_uA, int max_uA) -{ - struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); - struct wm831x *wm831x = dcdc->wm831x; - u16 reg = dcdc->base + WM831X_DCDC_CONTROL_2; - int i; - - for (i = ARRAY_SIZE(wm831x_dcdc_ilim) - 1; i >= 0; i--) { - if ((min_uA <= wm831x_dcdc_ilim[i]) && - (wm831x_dcdc_ilim[i] <= max_uA)) - return wm831x_set_bits(wm831x, reg, - WM831X_DC1_HC_THR_MASK, - i << WM831X_DC1_HC_THR_SHIFT); - } - - return -EINVAL; -} - -static int wm831x_buckv_get_current_limit(struct regulator_dev *rdev) -{ - struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); - struct wm831x *wm831x = dcdc->wm831x; - u16 reg = dcdc->base + WM831X_DCDC_CONTROL_2; - int val; - - val = wm831x_reg_read(wm831x, reg); - if (val < 0) - return val; - - val = (val & WM831X_DC1_HC_THR_MASK) >> WM831X_DC1_HC_THR_SHIFT; - return wm831x_dcdc_ilim[val]; -} - static const struct regulator_ops wm831x_buckv_ops = { .set_voltage_sel = wm831x_buckv_set_voltage_sel, .get_voltage_sel = wm831x_buckv_get_voltage_sel, - .list_voltage = wm831x_buckv_list_voltage, - .map_voltage = wm831x_buckv_map_voltage, + .list_voltage = regulator_list_voltage_linear_range, + .map_voltage = regulator_map_voltage_linear_range, .set_suspend_voltage = wm831x_buckv_set_suspend_voltage, - .set_current_limit = wm831x_buckv_set_current_limit, - .get_current_limit = wm831x_buckv_get_current_limit, + .set_current_limit = regulator_set_current_limit_regmap, + .get_current_limit = regulator_get_current_limit_regmap, .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, @@ -492,10 +435,16 @@ static int wm831x_buckv_probe(struct platform_device *pdev) dcdc->desc.id = id; dcdc->desc.type = REGULATOR_VOLTAGE; dcdc->desc.n_voltages = WM831X_BUCKV_MAX_SELECTOR + 1; + dcdc->desc.linear_ranges = wm831x_buckv_ranges; + dcdc->desc.n_linear_ranges = ARRAY_SIZE(wm831x_buckv_ranges); dcdc->desc.ops = &wm831x_buckv_ops; dcdc->desc.owner = THIS_MODULE; dcdc->desc.enable_reg = WM831X_DCDC_ENABLE; dcdc->desc.enable_mask = 1 << id; + dcdc->desc.csel_reg = dcdc->base + WM831X_DCDC_CONTROL_2; + dcdc->desc.csel_mask = WM831X_DC1_HC_THR_MASK; + dcdc->desc.n_current_limits = ARRAY_SIZE(wm831x_dcdc_ilim); + dcdc->desc.curr_table = wm831x_dcdc_ilim; ret = wm831x_reg_read(wm831x, dcdc->base + WM831X_DCDC_ON_CONFIG); if (ret < 0) { diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig index 2e01bd833ffd..2c8c23db92fb 100644 --- a/drivers/reset/Kconfig +++ b/drivers/reset/Kconfig @@ -40,6 +40,14 @@ config RESET_BERLIN help This enables the reset controller driver for Marvell Berlin SoCs. +config RESET_BRCMSTB + tristate "Broadcom STB reset controller" + depends on ARCH_BRCMSTB || COMPILE_TEST + default ARCH_BRCMSTB + help + This enables the reset controller driver for Broadcom STB SoCs using + a SUN_TOP_CTRL_SW_INIT style controller. + config RESET_HSDK bool "Synopsys HSDK Reset Driver" depends on HAS_IOMEM @@ -48,9 +56,9 @@ config RESET_HSDK This enables the reset controller driver for HSDK board. config RESET_IMX7 - bool "i.MX7 Reset Driver" if COMPILE_TEST + bool "i.MX7/8 Reset Driver" if COMPILE_TEST depends on HAS_IOMEM - default SOC_IMX7D + default SOC_IMX7D || (ARM64 && ARCH_MXC) select MFD_SYSCON help This enables the reset controller driver for i.MX7 SoCs. diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile index dc7874df78d9..61456b8f659c 100644 --- a/drivers/reset/Makefile +++ b/drivers/reset/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_RESET_A10SR) += reset-a10sr.o obj-$(CONFIG_RESET_ATH79) += reset-ath79.o obj-$(CONFIG_RESET_AXS10X) += reset-axs10x.o obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o +obj-$(CONFIG_RESET_BRCMSTB) += reset-brcmstb.o obj-$(CONFIG_RESET_HSDK) += reset-hsdk.o obj-$(CONFIG_RESET_IMX7) += reset-imx7.o obj-$(CONFIG_RESET_LANTIQ) += reset-lantiq.o @@ -26,4 +27,5 @@ obj-$(CONFIG_RESET_TI_SYSCON) += reset-ti-syscon.o obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o obj-$(CONFIG_RESET_UNIPHIER_GLUE) += reset-uniphier-glue.o obj-$(CONFIG_RESET_ZYNQ) += reset-zynq.o +obj-$(CONFIG_ARCH_ZYNQMP) += reset-zynqmp.o diff --git a/drivers/reset/reset-brcmstb.c b/drivers/reset/reset-brcmstb.c new file mode 100644 index 000000000000..a608f445dad6 --- /dev/null +++ b/drivers/reset/reset-brcmstb.c @@ -0,0 +1,132 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Broadcom STB generic reset controller for SW_INIT style reset controller + * + * Author: Florian Fainelli + * Copyright (C) 2018 Broadcom + */ +#include +#include +#include +#include +#include +#include +#include +#include + +struct brcmstb_reset { + void __iomem *base; + struct reset_controller_dev rcdev; +}; + +#define SW_INIT_SET 0x00 +#define SW_INIT_CLEAR 0x04 +#define SW_INIT_STATUS 0x08 + +#define SW_INIT_BIT(id) BIT((id) & 0x1f) +#define SW_INIT_BANK(id) ((id) >> 5) + +/* A full bank contains extra registers that we are not utilizing but still + * qualify as a single bank. + */ +#define SW_INIT_BANK_SIZE 0x18 + +static inline +struct brcmstb_reset *to_brcmstb(struct reset_controller_dev *rcdev) +{ + return container_of(rcdev, struct brcmstb_reset, rcdev); +} + +static int brcmstb_reset_assert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + unsigned int off = SW_INIT_BANK(id) * SW_INIT_BANK_SIZE; + struct brcmstb_reset *priv = to_brcmstb(rcdev); + + writel_relaxed(SW_INIT_BIT(id), priv->base + off + SW_INIT_SET); + + return 0; +} + +static int brcmstb_reset_deassert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + unsigned int off = SW_INIT_BANK(id) * SW_INIT_BANK_SIZE; + struct brcmstb_reset *priv = to_brcmstb(rcdev); + + writel_relaxed(SW_INIT_BIT(id), priv->base + off + SW_INIT_CLEAR); + /* Maximum reset delay after de-asserting a line and seeing block + * operation is typically 14us for the worst case, build some slack + * here. + */ + usleep_range(100, 200); + + return 0; +} + +static int brcmstb_reset_status(struct reset_controller_dev *rcdev, + unsigned long id) +{ + unsigned int off = SW_INIT_BANK(id) * SW_INIT_BANK_SIZE; + struct brcmstb_reset *priv = to_brcmstb(rcdev); + + return readl_relaxed(priv->base + off + SW_INIT_STATUS) & + SW_INIT_BIT(id); +} + +static const struct reset_control_ops brcmstb_reset_ops = { + .assert = brcmstb_reset_assert, + .deassert = brcmstb_reset_deassert, + .status = brcmstb_reset_status, +}; + +static int brcmstb_reset_probe(struct platform_device *pdev) +{ + struct device *kdev = &pdev->dev; + struct brcmstb_reset *priv; + struct resource *res; + + priv = devm_kzalloc(kdev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!IS_ALIGNED(res->start, SW_INIT_BANK_SIZE) || + !IS_ALIGNED(resource_size(res), SW_INIT_BANK_SIZE)) { + dev_err(kdev, "incorrect register range\n"); + return -EINVAL; + } + + priv->base = devm_ioremap_resource(kdev, res); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + dev_set_drvdata(kdev, priv); + + priv->rcdev.owner = THIS_MODULE; + priv->rcdev.nr_resets = DIV_ROUND_DOWN_ULL(resource_size(res), + SW_INIT_BANK_SIZE) * 32; + priv->rcdev.ops = &brcmstb_reset_ops; + priv->rcdev.of_node = kdev->of_node; + /* Use defaults: 1 cell and simple xlate function */ + + return devm_reset_controller_register(kdev, &priv->rcdev); +} + +static const struct of_device_id brcmstb_reset_of_match[] = { + { .compatible = "brcm,brcmstb-reset" }, + { /* sentinel */ } +}; + +static struct platform_driver brcmstb_reset_driver = { + .probe = brcmstb_reset_probe, + .driver = { + .name = "brcmstb-reset", + .of_match_table = brcmstb_reset_of_match, + }, +}; +module_platform_driver(brcmstb_reset_driver); + +MODULE_AUTHOR("Broadcom"); +MODULE_DESCRIPTION("Broadcom STB reset controller"); +MODULE_LICENSE("GPL"); diff --git a/drivers/reset/reset-imx7.c b/drivers/reset/reset-imx7.c index 77911fa8f31d..aed76e33a0a9 100644 --- a/drivers/reset/reset-imx7.c +++ b/drivers/reset/reset-imx7.c @@ -17,14 +17,27 @@ #include #include +#include #include #include #include #include +#include + +struct imx7_src_signal { + unsigned int offset, bit; +}; + +struct imx7_src_variant { + const struct imx7_src_signal *signals; + unsigned int signals_num; + struct reset_control_ops ops; +}; struct imx7_src { struct reset_controller_dev rcdev; struct regmap *regmap; + const struct imx7_src_signal *signals; }; enum imx7_src_registers { @@ -39,9 +52,14 @@ enum imx7_src_registers { SRC_DDRC_RCR = 0x1000, }; -struct imx7_src_signal { - unsigned int offset, bit; -}; +static int imx7_reset_update(struct imx7_src *imx7src, + unsigned long id, unsigned int value) +{ + const struct imx7_src_signal *signal = &imx7src->signals[id]; + + return regmap_update_bits(imx7src->regmap, + signal->offset, signal->bit, value); +} static const struct imx7_src_signal imx7_src_signals[IMX7_RESET_NUM] = { [IMX7_RESET_A7_CORE_POR_RESET0] = { SRC_A7RCR0, BIT(0) }, @@ -81,8 +99,8 @@ static int imx7_reset_set(struct reset_controller_dev *rcdev, unsigned long id, bool assert) { struct imx7_src *imx7src = to_imx7_src(rcdev); - const struct imx7_src_signal *signal = &imx7_src_signals[id]; - unsigned int value = assert ? signal->bit : 0; + const unsigned int bit = imx7src->signals[id].bit; + unsigned int value = assert ? bit : 0; switch (id) { case IMX7_RESET_PCIEPHY: @@ -95,12 +113,11 @@ static int imx7_reset_set(struct reset_controller_dev *rcdev, break; case IMX7_RESET_PCIE_CTRL_APPS_EN: - value = (assert) ? 0 : signal->bit; + value = assert ? 0 : bit; break; } - return regmap_update_bits(imx7src->regmap, - signal->offset, signal->bit, value); + return imx7_reset_update(imx7src, id, value); } static int imx7_reset_assert(struct reset_controller_dev *rcdev, @@ -115,9 +132,133 @@ static int imx7_reset_deassert(struct reset_controller_dev *rcdev, return imx7_reset_set(rcdev, id, false); } -static const struct reset_control_ops imx7_reset_ops = { - .assert = imx7_reset_assert, - .deassert = imx7_reset_deassert, +static const struct imx7_src_variant variant_imx7 = { + .signals = imx7_src_signals, + .signals_num = ARRAY_SIZE(imx7_src_signals), + .ops = { + .assert = imx7_reset_assert, + .deassert = imx7_reset_deassert, + }, +}; + +enum imx8mq_src_registers { + SRC_A53RCR0 = 0x0004, + SRC_HDMI_RCR = 0x0030, + SRC_DISP_RCR = 0x0034, + SRC_GPU_RCR = 0x0040, + SRC_VPU_RCR = 0x0044, + SRC_PCIE2_RCR = 0x0048, + SRC_MIPIPHY1_RCR = 0x004c, + SRC_MIPIPHY2_RCR = 0x0050, + SRC_DDRC2_RCR = 0x1004, +}; + +static const struct imx7_src_signal imx8mq_src_signals[IMX8MQ_RESET_NUM] = { + [IMX8MQ_RESET_A53_CORE_POR_RESET0] = { SRC_A53RCR0, BIT(0) }, + [IMX8MQ_RESET_A53_CORE_POR_RESET1] = { SRC_A53RCR0, BIT(1) }, + [IMX8MQ_RESET_A53_CORE_POR_RESET2] = { SRC_A53RCR0, BIT(2) }, + [IMX8MQ_RESET_A53_CORE_POR_RESET3] = { SRC_A53RCR0, BIT(3) }, + [IMX8MQ_RESET_A53_CORE_RESET0] = { SRC_A53RCR0, BIT(4) }, + [IMX8MQ_RESET_A53_CORE_RESET1] = { SRC_A53RCR0, BIT(5) }, + [IMX8MQ_RESET_A53_CORE_RESET2] = { SRC_A53RCR0, BIT(6) }, + [IMX8MQ_RESET_A53_CORE_RESET3] = { SRC_A53RCR0, BIT(7) }, + [IMX8MQ_RESET_A53_DBG_RESET0] = { SRC_A53RCR0, BIT(8) }, + [IMX8MQ_RESET_A53_DBG_RESET1] = { SRC_A53RCR0, BIT(9) }, + [IMX8MQ_RESET_A53_DBG_RESET2] = { SRC_A53RCR0, BIT(10) }, + [IMX8MQ_RESET_A53_DBG_RESET3] = { SRC_A53RCR0, BIT(11) }, + [IMX8MQ_RESET_A53_ETM_RESET0] = { SRC_A53RCR0, BIT(12) }, + [IMX8MQ_RESET_A53_ETM_RESET1] = { SRC_A53RCR0, BIT(13) }, + [IMX8MQ_RESET_A53_ETM_RESET2] = { SRC_A53RCR0, BIT(14) }, + [IMX8MQ_RESET_A53_ETM_RESET3] = { SRC_A53RCR0, BIT(15) }, + [IMX8MQ_RESET_A53_SOC_DBG_RESET] = { SRC_A53RCR0, BIT(20) }, + [IMX8MQ_RESET_A53_L2RESET] = { SRC_A53RCR0, BIT(21) }, + [IMX8MQ_RESET_SW_NON_SCLR_M4C_RST] = { SRC_M4RCR, BIT(0) }, + [IMX8MQ_RESET_OTG1_PHY_RESET] = { SRC_USBOPHY1_RCR, BIT(0) }, + [IMX8MQ_RESET_OTG2_PHY_RESET] = { SRC_USBOPHY2_RCR, BIT(0) }, + [IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N] = { SRC_MIPIPHY_RCR, BIT(1) }, + [IMX8MQ_RESET_MIPI_DSI_RESET_N] = { SRC_MIPIPHY_RCR, BIT(2) }, + [IMX8MQ_RESET_MIPI_DIS_DPI_RESET_N] = { SRC_MIPIPHY_RCR, BIT(3) }, + [IMX8MQ_RESET_MIPI_DIS_ESC_RESET_N] = { SRC_MIPIPHY_RCR, BIT(4) }, + [IMX8MQ_RESET_MIPI_DIS_PCLK_RESET_N] = { SRC_MIPIPHY_RCR, BIT(5) }, + [IMX8MQ_RESET_PCIEPHY] = { SRC_PCIEPHY_RCR, + BIT(2) | BIT(1) }, + [IMX8MQ_RESET_PCIEPHY_PERST] = { SRC_PCIEPHY_RCR, BIT(3) }, + [IMX8MQ_RESET_PCIE_CTRL_APPS_EN] = { SRC_PCIEPHY_RCR, BIT(6) }, + [IMX8MQ_RESET_PCIE_CTRL_APPS_TURNOFF] = { SRC_PCIEPHY_RCR, BIT(11) }, + [IMX8MQ_RESET_HDMI_PHY_APB_RESET] = { SRC_HDMI_RCR, BIT(0) }, + [IMX8MQ_RESET_DISP_RESET] = { SRC_DISP_RCR, BIT(0) }, + [IMX8MQ_RESET_GPU_RESET] = { SRC_GPU_RCR, BIT(0) }, + [IMX8MQ_RESET_VPU_RESET] = { SRC_VPU_RCR, BIT(0) }, + [IMX8MQ_RESET_PCIEPHY2] = { SRC_PCIE2_RCR, + BIT(2) | BIT(1) }, + [IMX8MQ_RESET_PCIEPHY2_PERST] = { SRC_PCIE2_RCR, BIT(3) }, + [IMX8MQ_RESET_PCIE2_CTRL_APPS_EN] = { SRC_PCIE2_RCR, BIT(6) }, + [IMX8MQ_RESET_PCIE2_CTRL_APPS_TURNOFF] = { SRC_PCIE2_RCR, BIT(11) }, + [IMX8MQ_RESET_MIPI_CSI1_CORE_RESET] = { SRC_MIPIPHY1_RCR, BIT(0) }, + [IMX8MQ_RESET_MIPI_CSI1_PHY_REF_RESET] = { SRC_MIPIPHY1_RCR, BIT(1) }, + [IMX8MQ_RESET_MIPI_CSI1_ESC_RESET] = { SRC_MIPIPHY1_RCR, BIT(2) }, + [IMX8MQ_RESET_MIPI_CSI2_CORE_RESET] = { SRC_MIPIPHY2_RCR, BIT(0) }, + [IMX8MQ_RESET_MIPI_CSI2_PHY_REF_RESET] = { SRC_MIPIPHY2_RCR, BIT(1) }, + [IMX8MQ_RESET_MIPI_CSI2_ESC_RESET] = { SRC_MIPIPHY2_RCR, BIT(2) }, + [IMX8MQ_RESET_DDRC1_PRST] = { SRC_DDRC_RCR, BIT(0) }, + [IMX8MQ_RESET_DDRC1_CORE_RESET] = { SRC_DDRC_RCR, BIT(1) }, + [IMX8MQ_RESET_DDRC1_PHY_RESET] = { SRC_DDRC_RCR, BIT(2) }, + [IMX8MQ_RESET_DDRC2_PHY_RESET] = { SRC_DDRC2_RCR, BIT(0) }, + [IMX8MQ_RESET_DDRC2_CORE_RESET] = { SRC_DDRC2_RCR, BIT(1) }, + [IMX8MQ_RESET_DDRC2_PRST] = { SRC_DDRC2_RCR, BIT(2) }, +}; + +static int imx8mq_reset_set(struct reset_controller_dev *rcdev, + unsigned long id, bool assert) +{ + struct imx7_src *imx7src = to_imx7_src(rcdev); + const unsigned int bit = imx7src->signals[id].bit; + unsigned int value = assert ? bit : 0; + + switch (id) { + case IMX8MQ_RESET_PCIEPHY: + case IMX8MQ_RESET_PCIEPHY2: /* fallthrough */ + /* + * wait for more than 10us to release phy g_rst and + * btnrst + */ + if (!assert) + udelay(10); + break; + + case IMX8MQ_RESET_PCIE_CTRL_APPS_EN: + case IMX8MQ_RESET_PCIE2_CTRL_APPS_EN: /* fallthrough */ + case IMX8MQ_RESET_MIPI_DIS_PCLK_RESET_N: /* fallthrough */ + case IMX8MQ_RESET_MIPI_DIS_ESC_RESET_N: /* fallthrough */ + case IMX8MQ_RESET_MIPI_DIS_DPI_RESET_N: /* fallthrough */ + case IMX8MQ_RESET_MIPI_DSI_RESET_N: /* fallthrough */ + case IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N: /* fallthrough */ + value = assert ? 0 : bit; + break; + } + + return imx7_reset_update(imx7src, id, value); +} + +static int imx8mq_reset_assert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + return imx8mq_reset_set(rcdev, id, true); +} + +static int imx8mq_reset_deassert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + return imx8mq_reset_set(rcdev, id, false); +} + +static const struct imx7_src_variant variant_imx8mq = { + .signals = imx8mq_src_signals, + .signals_num = ARRAY_SIZE(imx8mq_src_signals), + .ops = { + .assert = imx8mq_reset_assert, + .deassert = imx8mq_reset_deassert, + }, }; static int imx7_reset_probe(struct platform_device *pdev) @@ -125,11 +266,13 @@ static int imx7_reset_probe(struct platform_device *pdev) struct imx7_src *imx7src; struct device *dev = &pdev->dev; struct regmap_config config = { .name = "src" }; + const struct imx7_src_variant *variant = of_device_get_match_data(dev); imx7src = devm_kzalloc(dev, sizeof(*imx7src), GFP_KERNEL); if (!imx7src) return -ENOMEM; + imx7src->signals = variant->signals; imx7src->regmap = syscon_node_to_regmap(dev->of_node); if (IS_ERR(imx7src->regmap)) { dev_err(dev, "Unable to get imx7-src regmap"); @@ -138,15 +281,16 @@ static int imx7_reset_probe(struct platform_device *pdev) regmap_attach_dev(dev, imx7src->regmap, &config); imx7src->rcdev.owner = THIS_MODULE; - imx7src->rcdev.nr_resets = IMX7_RESET_NUM; - imx7src->rcdev.ops = &imx7_reset_ops; + imx7src->rcdev.nr_resets = variant->signals_num; + imx7src->rcdev.ops = &variant->ops; imx7src->rcdev.of_node = dev->of_node; return devm_reset_controller_register(dev, &imx7src->rcdev); } static const struct of_device_id imx7_reset_dt_ids[] = { - { .compatible = "fsl,imx7d-src", }, + { .compatible = "fsl,imx7d-src", .data = &variant_imx7 }, + { .compatible = "fsl,imx8mq-src", .data = &variant_imx8mq }, { /* sentinel */ }, }; diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c index 318cfc51c441..96953992c2bb 100644 --- a/drivers/reset/reset-socfpga.c +++ b/drivers/reset/reset-socfpga.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -18,7 +19,6 @@ #include "reset-simple.h" #define SOCFPGA_NR_BANKS 8 -void __init socfpga_reset_init(void); static int a10_reset_init(struct device_node *np) { diff --git a/drivers/reset/reset-sunxi.c b/drivers/reset/reset-sunxi.c index db9a1a75523f..b06d724d8f21 100644 --- a/drivers/reset/reset-sunxi.c +++ b/drivers/reset/reset-sunxi.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/reset/reset-zynqmp.c b/drivers/reset/reset-zynqmp.c new file mode 100644 index 000000000000..2ef1f13aa47b --- /dev/null +++ b/drivers/reset/reset-zynqmp.c @@ -0,0 +1,114 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2018 Xilinx, Inc. + * + */ + +#include +#include +#include +#include +#include + +#define ZYNQMP_NR_RESETS (ZYNQMP_PM_RESET_END - ZYNQMP_PM_RESET_START) +#define ZYNQMP_RESET_ID ZYNQMP_PM_RESET_START + +struct zynqmp_reset_data { + struct reset_controller_dev rcdev; + const struct zynqmp_eemi_ops *eemi_ops; +}; + +static inline struct zynqmp_reset_data * +to_zynqmp_reset_data(struct reset_controller_dev *rcdev) +{ + return container_of(rcdev, struct zynqmp_reset_data, rcdev); +} + +static int zynqmp_reset_assert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev); + + return priv->eemi_ops->reset_assert(ZYNQMP_RESET_ID + id, + PM_RESET_ACTION_ASSERT); +} + +static int zynqmp_reset_deassert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev); + + return priv->eemi_ops->reset_assert(ZYNQMP_RESET_ID + id, + PM_RESET_ACTION_RELEASE); +} + +static int zynqmp_reset_status(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev); + int val, err; + + err = priv->eemi_ops->reset_get_status(ZYNQMP_RESET_ID + id, &val); + if (err) + return err; + + return val; +} + +static int zynqmp_reset_reset(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct zynqmp_reset_data *priv = to_zynqmp_reset_data(rcdev); + + return priv->eemi_ops->reset_assert(ZYNQMP_RESET_ID + id, + PM_RESET_ACTION_PULSE); +} + +static struct reset_control_ops zynqmp_reset_ops = { + .reset = zynqmp_reset_reset, + .assert = zynqmp_reset_assert, + .deassert = zynqmp_reset_deassert, + .status = zynqmp_reset_status, +}; + +static int zynqmp_reset_probe(struct platform_device *pdev) +{ + struct zynqmp_reset_data *priv; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + platform_set_drvdata(pdev, priv); + + priv->eemi_ops = zynqmp_pm_get_eemi_ops(); + if (!priv->eemi_ops) + return -ENXIO; + + priv->rcdev.ops = &zynqmp_reset_ops; + priv->rcdev.owner = THIS_MODULE; + priv->rcdev.of_node = pdev->dev.of_node; + priv->rcdev.nr_resets = ZYNQMP_NR_RESETS; + + return devm_reset_controller_register(&pdev->dev, &priv->rcdev); +} + +static const struct of_device_id zynqmp_reset_dt_ids[] = { + { .compatible = "xlnx,zynqmp-reset", }, + { /* sentinel */ }, +}; + +static struct platform_driver zynqmp_reset_driver = { + .probe = zynqmp_reset_probe, + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = zynqmp_reset_dt_ids, + }, +}; + +static int __init zynqmp_reset_init(void) +{ + return platform_driver_register(&zynqmp_reset_driver); +} + +arch_initcall(zynqmp_reset_init); diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 225b0b8516f3..a71734c41693 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -185,6 +185,16 @@ config RTC_DRV_ABB5ZES3 This driver can also be built as a module. If so, the module will be called rtc-ab-b5ze-s3. +config RTC_DRV_ABEOZ9 + select REGMAP_I2C + tristate "Abracon AB-RTCMC-32.768kHz-EOZ9" + help + If you say yes here you get support for the Abracon + AB-RTCMC-32.768kHz-EOA9 I2C RTC chip. + + This driver can also be built as a module. If so, the module + will be called rtc-ab-e0z9. + config RTC_DRV_ABX80X tristate "Abracon ABx80x" select WATCHDOG_CORE if WATCHDOG @@ -601,9 +611,10 @@ config RTC_DRV_RX8010 will be called rtc-rx8010. config RTC_DRV_RX8581 - tristate "Epson RX-8581" + tristate "Epson RX-8571/RX-8581" help - If you say yes here you will get support for the Epson RX-8581. + If you say yes here you will get support for the Epson RX-8571/ + RX-8581. This driver can also be built as a module. If so the module will be called rtc-rx8581. @@ -626,6 +637,15 @@ config RTC_DRV_EM3027 This driver can also be built as a module. If so, the module will be called rtc-em3027. +config RTC_DRV_RV3028 + tristate "Micro Crystal RV3028" + help + If you say yes here you get support for the Micro Crystal + RV3028. + + This driver can also be built as a module. If so, the module + will be called rtc-rv3028. + config RTC_DRV_RV8803 tristate "Micro Crystal RV8803, Epson RX8900" help @@ -646,6 +666,15 @@ config RTC_DRV_S5M This driver can also be built as a module. If so, the module will be called rtc-s5m. +config RTC_DRV_SD3078 + tristate "ZXW Crystal SD3078" + help + If you say yes here you get support for the ZXW Crystal + SD3078 RTC chips. + + This driver can also be built as a module. If so, the module + will be called rtc-sd3078 + endif # I2C comment "SPI RTC drivers" @@ -1285,6 +1314,17 @@ config RTC_DRV_IMXDI This driver can also be built as a module, if so, the module will be called "rtc-imxdi". +config RTC_DRV_MESON + tristate "Amlogic Meson RTC" + depends on (ARM && ARCH_MESON) || COMPILE_TEST + select REGMAP_MMIO + help + Support for the RTC block on the Amlogic Meson6, Meson8, Meson8b + and Meson8m2 SoCs. + + This driver can also be built as a module, if so, the module + will be called "rtc-meson". + config RTC_DRV_OMAP tristate "TI OMAP Real Time Clock" depends on ARCH_OMAP || ARCH_DAVINCI || COMPILE_TEST @@ -1508,6 +1548,16 @@ config RTC_DRV_ARMADA38X This driver can also be built as a module. If so, the module will be called armada38x-rtc. +config RTC_DRV_CADENCE + tristate "Cadence RTC driver" + depends on OF && HAS_IOMEM + help + If you say Y here you will get access to Cadence RTC IP + found on certain SOCs. + + To compile this driver as a module, choose M here: the + module will be called rtc-cadence. + config RTC_DRV_FTRTC010 tristate "Faraday Technology FTRTC010 RTC" depends on HAS_IOMEM @@ -1679,6 +1729,7 @@ config RTC_DRV_SNVS config RTC_DRV_IMX_SC depends on IMX_SCU + depends on HAVE_ARM_SMCCC tristate "NXP i.MX System Controller RTC support" help If you say yes here you get support for the NXP i.MX System @@ -1795,8 +1846,7 @@ comment "HID Sensor RTC drivers" config RTC_DRV_HID_SENSOR_TIME tristate "HID Sensor Time" depends on USB_HID - select IIO - select HID_SENSOR_HUB + depends on HID_SENSOR_HUB && IIO select HID_SENSOR_IIO_COMMON help Say yes here to build support for the HID Sensors of type Time. @@ -1814,4 +1864,15 @@ config RTC_DRV_GOLDFISH Goldfish is a code name for the virtual platform developed by Google for Android emulation. +config RTC_DRV_WILCO_EC + tristate "Wilco EC RTC" + depends on WILCO_EC + default m + help + If you say yes here, you get read/write support for the Real Time + Clock on the Wilco Embedded Controller (Wilco is a kind of Chromebook) + + This can also be built as a module. If so, the module will + be named "rtc_wilco_ec". + endif # RTC_CLASS diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index df022d820bee..fe3962496685 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -28,6 +28,7 @@ obj-$(CONFIG_RTC_DRV_88PM860X) += rtc-88pm860x.o obj-$(CONFIG_RTC_DRV_AB3100) += rtc-ab3100.o obj-$(CONFIG_RTC_DRV_AB8500) += rtc-ab8500.o obj-$(CONFIG_RTC_DRV_ABB5ZES3) += rtc-ab-b5ze-s3.o +obj-$(CONFIG_RTC_DRV_ABEOZ9) += rtc-ab-eoz9.o obj-$(CONFIG_RTC_DRV_ABX80X) += rtc-abx80x.o obj-$(CONFIG_RTC_DRV_AC100) += rtc-ac100.o obj-$(CONFIG_RTC_DRV_ARMADA38X) += rtc-armada38x.o @@ -39,6 +40,7 @@ obj-$(CONFIG_RTC_DRV_AU1XXX) += rtc-au1xxx.o obj-$(CONFIG_RTC_DRV_BQ32K) += rtc-bq32k.o obj-$(CONFIG_RTC_DRV_BQ4802) += rtc-bq4802.o obj-$(CONFIG_RTC_DRV_BRCMSTB) += rtc-brcmstb-waketimer.o +obj-$(CONFIG_RTC_DRV_CADENCE) += rtc-cadence.o obj-$(CONFIG_RTC_DRV_CMOS) += rtc-cmos.o obj-$(CONFIG_RTC_DRV_COH901331) += rtc-coh901331.o obj-$(CONFIG_RTC_DRV_CPCAP) += rtc-cpcap.o @@ -100,6 +102,7 @@ obj-$(CONFIG_RTC_DRV_MAX8997) += rtc-max8997.o obj-$(CONFIG_RTC_DRV_MAX8998) += rtc-max8998.o obj-$(CONFIG_RTC_DRV_MC13XXX) += rtc-mc13xxx.o obj-$(CONFIG_RTC_DRV_MCP795) += rtc-mcp795.o +obj-$(CONFIG_RTC_DRV_MESON) += rtc-meson.o obj-$(CONFIG_RTC_DRV_MOXART) += rtc-moxart.o obj-$(CONFIG_RTC_DRV_MPC5121) += rtc-mpc5121.o obj-$(CONFIG_RTC_DRV_MSM6242) += rtc-msm6242.o @@ -137,6 +140,7 @@ obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o obj-$(CONFIG_RTC_DRV_RTD119X) += rtc-rtd119x.o +obj-$(CONFIG_RTC_DRV_RV3028) += rtc-rv3028.o obj-$(CONFIG_RTC_DRV_RV3029C2) += rtc-rv3029c2.o obj-$(CONFIG_RTC_DRV_RV8803) += rtc-rv8803.o obj-$(CONFIG_RTC_DRV_RX4581) += rtc-rx4581.o @@ -149,6 +153,7 @@ obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o obj-$(CONFIG_RTC_DRV_S5M) += rtc-s5m.o obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o obj-$(CONFIG_RTC_DRV_SC27XX) += rtc-sc27xx.o +obj-$(CONFIG_RTC_DRV_SD3078) += rtc-sd3078.o obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o obj-$(CONFIG_RTC_DRV_SIRFSOC) += rtc-sirfsoc.o obj-$(CONFIG_RTC_DRV_SNVS) += rtc-snvs.o @@ -172,6 +177,7 @@ obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o obj-$(CONFIG_RTC_DRV_VR41XX) += rtc-vr41xx.o obj-$(CONFIG_RTC_DRV_VRTC) += rtc-mrst.o obj-$(CONFIG_RTC_DRV_VT8500) += rtc-vt8500.o +obj-$(CONFIG_RTC_DRV_WILCO_EC) += rtc-wilco-ec.o obj-$(CONFIG_RTC_DRV_WM831X) += rtc-wm831x.o obj-$(CONFIG_RTC_DRV_WM8350) += rtc-wm8350.o obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o diff --git a/drivers/rtc/dev.c b/drivers/rtc/dev.c index 43d962a9c210..1d006ef4bb57 100644 --- a/drivers/rtc/dev.c +++ b/drivers/rtc/dev.c @@ -178,11 +178,6 @@ rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) remove_wait_queue(&rtc->irq_queue, &wait); if (ret == 0) { - /* Check for any data updates */ - if (rtc->ops->read_callback) - data = rtc->ops->read_callback(rtc->dev.parent, - data); - if (sizeof(int) != sizeof(long) && count == sizeof(unsigned int)) ret = put_user(data, (unsigned int __user *)buf) ?: diff --git a/drivers/rtc/lib.c b/drivers/rtc/lib.c index ef160da84220..9714cb3d1e29 100644 --- a/drivers/rtc/lib.c +++ b/drivers/rtc/lib.c @@ -100,7 +100,7 @@ int rtc_valid_tm(struct rtc_time *tm) if (tm->tm_year < 70 || ((unsigned)tm->tm_mon) >= 12 || tm->tm_mday < 1 - || tm->tm_mday > rtc_month_days(tm->tm_mon, tm->tm_year + 1900) + || tm->tm_mday > rtc_month_days(tm->tm_mon, ((unsigned)tm->tm_year + 1900)) || ((unsigned)tm->tm_hour) >= 24 || ((unsigned)tm->tm_min) >= 60 || ((unsigned)tm->tm_sec) >= 60) @@ -116,8 +116,8 @@ EXPORT_SYMBOL(rtc_valid_tm); */ time64_t rtc_tm_to_time64(struct rtc_time *tm) { - return mktime64(tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday, - tm->tm_hour, tm->tm_min, tm->tm_sec); + return mktime64(((unsigned)tm->tm_year + 1900), tm->tm_mon + 1, + tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec); } EXPORT_SYMBOL(rtc_tm_to_time64); diff --git a/drivers/rtc/rtc-88pm80x.c b/drivers/rtc/rtc-88pm80x.c index cab293cb2bf0..1fc48ebd3cd0 100644 --- a/drivers/rtc/rtc-88pm80x.c +++ b/drivers/rtc/rtc-88pm80x.c @@ -114,12 +114,14 @@ static int pm80x_rtc_read_time(struct device *dev, struct rtc_time *tm) unsigned char buf[4]; unsigned long ticks, base, data; regmap_raw_read(info->map, PM800_RTC_EXPIRE2_1, buf, 4); - base = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0]; + base = ((unsigned long)buf[3] << 24) | (buf[2] << 16) | + (buf[1] << 8) | buf[0]; dev_dbg(info->dev, "%x-%x-%x-%x\n", buf[0], buf[1], buf[2], buf[3]); /* load 32-bit read-only counter */ regmap_raw_read(info->map, PM800_RTC_COUNTER1, buf, 4); - data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0]; + data = ((unsigned long)buf[3] << 24) | (buf[2] << 16) | + (buf[1] << 8) | buf[0]; ticks = base + data; dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n", base, data, ticks); @@ -137,7 +139,8 @@ static int pm80x_rtc_set_time(struct device *dev, struct rtc_time *tm) /* load 32-bit read-only counter */ regmap_raw_read(info->map, PM800_RTC_COUNTER1, buf, 4); - data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0]; + data = ((unsigned long)buf[3] << 24) | (buf[2] << 16) | + (buf[1] << 8) | buf[0]; base = ticks - data; dev_dbg(info->dev, "set base:0x%lx, RO count:0x%lx, ticks:0x%lx\n", base, data, ticks); @@ -158,11 +161,13 @@ static int pm80x_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) int ret; regmap_raw_read(info->map, PM800_RTC_EXPIRE2_1, buf, 4); - base = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0]; + base = ((unsigned long)buf[3] << 24) | (buf[2] << 16) | + (buf[1] << 8) | buf[0]; dev_dbg(info->dev, "%x-%x-%x-%x\n", buf[0], buf[1], buf[2], buf[3]); regmap_raw_read(info->map, PM800_RTC_EXPIRE1_1, buf, 4); - data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0]; + data = ((unsigned long)buf[3] << 24) | (buf[2] << 16) | + (buf[1] << 8) | buf[0]; ticks = base + data; dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n", base, data, ticks); @@ -185,12 +190,14 @@ static int pm80x_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) regmap_update_bits(info->map, PM800_RTC_CONTROL, PM800_ALARM1_EN, 0); regmap_raw_read(info->map, PM800_RTC_EXPIRE2_1, buf, 4); - base = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0]; + base = ((unsigned long)buf[3] << 24) | (buf[2] << 16) | + (buf[1] << 8) | buf[0]; dev_dbg(info->dev, "%x-%x-%x-%x\n", buf[0], buf[1], buf[2], buf[3]); /* load 32-bit read-only counter */ regmap_raw_read(info->map, PM800_RTC_COUNTER1, buf, 4); - data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0]; + data = ((unsigned long)buf[3] << 24) | (buf[2] << 16) | + (buf[1] << 8) | buf[0]; ticks = base + data; dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n", base, data, ticks); diff --git a/drivers/rtc/rtc-88pm860x.c b/drivers/rtc/rtc-88pm860x.c index 01ffc0ef8033..d25282b4a7dd 100644 --- a/drivers/rtc/rtc-88pm860x.c +++ b/drivers/rtc/rtc-88pm860x.c @@ -115,11 +115,13 @@ static int pm860x_rtc_read_time(struct device *dev, struct rtc_time *tm) pm860x_page_bulk_read(info->i2c, REG0_ADDR, 8, buf); dev_dbg(info->dev, "%x-%x-%x-%x-%x-%x-%x-%x\n", buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]); - base = (buf[1] << 24) | (buf[3] << 16) | (buf[5] << 8) | buf[7]; + base = ((unsigned long)buf[1] << 24) | (buf[3] << 16) | + (buf[5] << 8) | buf[7]; /* load 32-bit read-only counter */ pm860x_bulk_read(info->i2c, PM8607_RTC_COUNTER1, 4, buf); - data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0]; + data = ((unsigned long)buf[3] << 24) | (buf[2] << 16) | + (buf[1] << 8) | buf[0]; ticks = base + data; dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n", base, data, ticks); @@ -145,7 +147,8 @@ static int pm860x_rtc_set_time(struct device *dev, struct rtc_time *tm) /* load 32-bit read-only counter */ pm860x_bulk_read(info->i2c, PM8607_RTC_COUNTER1, 4, buf); - data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0]; + data = ((unsigned long)buf[3] << 24) | (buf[2] << 16) | + (buf[1] << 8) | buf[0]; base = ticks - data; dev_dbg(info->dev, "set base:0x%lx, RO count:0x%lx, ticks:0x%lx\n", base, data, ticks); @@ -170,10 +173,12 @@ static int pm860x_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) pm860x_page_bulk_read(info->i2c, REG0_ADDR, 8, buf); dev_dbg(info->dev, "%x-%x-%x-%x-%x-%x-%x-%x\n", buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]); - base = (buf[1] << 24) | (buf[3] << 16) | (buf[5] << 8) | buf[7]; + base = ((unsigned long)buf[1] << 24) | (buf[3] << 16) | + (buf[5] << 8) | buf[7]; pm860x_bulk_read(info->i2c, PM8607_RTC_EXPIRE1, 4, buf); - data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0]; + data = ((unsigned long)buf[3] << 24) | (buf[2] << 16) | + (buf[1] << 8) | buf[0]; ticks = base + data; dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n", base, data, ticks); @@ -198,11 +203,13 @@ static int pm860x_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) pm860x_page_bulk_read(info->i2c, REG0_ADDR, 8, buf); dev_dbg(info->dev, "%x-%x-%x-%x-%x-%x-%x-%x\n", buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]); - base = (buf[1] << 24) | (buf[3] << 16) | (buf[5] << 8) | buf[7]; + base = ((unsigned long)buf[1] << 24) | (buf[3] << 16) | + (buf[5] << 8) | buf[7]; /* load 32-bit read-only counter */ pm860x_bulk_read(info->i2c, PM8607_RTC_COUNTER1, 4, buf); - data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0]; + data = ((unsigned long)buf[3] << 24) | (buf[2] << 16) | + (buf[1] << 8) | buf[0]; ticks = base + data; dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n", base, data, ticks); diff --git a/drivers/rtc/rtc-ab-eoz9.c b/drivers/rtc/rtc-ab-eoz9.c new file mode 100644 index 000000000000..e4f6e0061ccf --- /dev/null +++ b/drivers/rtc/rtc-ab-eoz9.c @@ -0,0 +1,465 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Real Time Clock driver for AB-RTCMC-32.768kHz-EOZ9 chip. + * Copyright (C) 2019 Orolia + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define ABEOZ9_REG_CTRL1 0x00 +#define ABEOZ9_REG_CTRL1_MASK GENMASK(7, 0) +#define ABEOZ9_REG_CTRL1_WE BIT(0) +#define ABEOZ9_REG_CTRL1_TE BIT(1) +#define ABEOZ9_REG_CTRL1_TAR BIT(2) +#define ABEOZ9_REG_CTRL1_EERE BIT(3) +#define ABEOZ9_REG_CTRL1_SRON BIT(4) +#define ABEOZ9_REG_CTRL1_TD0 BIT(5) +#define ABEOZ9_REG_CTRL1_TD1 BIT(6) +#define ABEOZ9_REG_CTRL1_CLKINT BIT(7) + +#define ABEOZ9_REG_CTRL_INT 0x01 +#define ABEOZ9_REG_CTRL_INT_AIE BIT(0) +#define ABEOZ9_REG_CTRL_INT_TIE BIT(1) +#define ABEOZ9_REG_CTRL_INT_V1IE BIT(2) +#define ABEOZ9_REG_CTRL_INT_V2IE BIT(3) +#define ABEOZ9_REG_CTRL_INT_SRIE BIT(4) + +#define ABEOZ9_REG_CTRL_INT_FLAG 0x02 +#define ABEOZ9_REG_CTRL_INT_FLAG_AF BIT(0) +#define ABEOZ9_REG_CTRL_INT_FLAG_TF BIT(1) +#define ABEOZ9_REG_CTRL_INT_FLAG_V1IF BIT(2) +#define ABEOZ9_REG_CTRL_INT_FLAG_V2IF BIT(3) +#define ABEOZ9_REG_CTRL_INT_FLAG_SRF BIT(4) + +#define ABEOZ9_REG_CTRL_STATUS 0x03 +#define ABEOZ9_REG_CTRL_STATUS_V1F BIT(2) +#define ABEOZ9_REG_CTRL_STATUS_V2F BIT(3) +#define ABEOZ9_REG_CTRL_STATUS_SR BIT(4) +#define ABEOZ9_REG_CTRL_STATUS_PON BIT(5) +#define ABEOZ9_REG_CTRL_STATUS_EEBUSY BIT(7) + +#define ABEOZ9_REG_SEC 0x08 +#define ABEOZ9_REG_MIN 0x09 +#define ABEOZ9_REG_HOURS 0x0A +#define ABEOZ9_HOURS_PM BIT(6) +#define ABEOZ9_REG_DAYS 0x0B +#define ABEOZ9_REG_WEEKDAYS 0x0C +#define ABEOZ9_REG_MONTHS 0x0D +#define ABEOZ9_REG_YEARS 0x0E + +#define ABEOZ9_SEC_LEN 7 + +#define ABEOZ9_REG_REG_TEMP 0x20 +#define ABEOZ953_TEMP_MAX 120 +#define ABEOZ953_TEMP_MIN -60 + +#define ABEOZ9_REG_EEPROM 0x30 +#define ABEOZ9_REG_EEPROM_MASK GENMASK(8, 0) +#define ABEOZ9_REG_EEPROM_THP BIT(0) +#define ABEOZ9_REG_EEPROM_THE BIT(1) +#define ABEOZ9_REG_EEPROM_FD0 BIT(2) +#define ABEOZ9_REG_EEPROM_FD1 BIT(3) +#define ABEOZ9_REG_EEPROM_R1K BIT(4) +#define ABEOZ9_REG_EEPROM_R5K BIT(5) +#define ABEOZ9_REG_EEPROM_R20K BIT(6) +#define ABEOZ9_REG_EEPROM_R80K BIT(7) + +struct abeoz9_rtc_data { + struct rtc_device *rtc; + struct regmap *regmap; + struct device *hwmon_dev; +}; + +static int abeoz9_check_validity(struct device *dev) +{ + struct abeoz9_rtc_data *data = dev_get_drvdata(dev); + struct regmap *regmap = data->regmap; + int ret; + int val; + + ret = regmap_read(regmap, ABEOZ9_REG_CTRL_STATUS, &val); + if (ret < 0) { + dev_err(dev, + "unable to get CTRL_STATUS register (%d)\n", ret); + return ret; + } + + if (val & ABEOZ9_REG_CTRL_STATUS_PON) { + dev_warn(dev, "power-on reset detected, date is invalid\n"); + return -EINVAL; + } + + if (val & ABEOZ9_REG_CTRL_STATUS_V1F) { + dev_warn(dev, + "voltage drops below VLOW1 threshold, date is invalid\n"); + return -EINVAL; + } + + if ((val & ABEOZ9_REG_CTRL_STATUS_V2F)) { + dev_warn(dev, + "voltage drops below VLOW2 threshold, date is invalid\n"); + return -EINVAL; + } + + return 0; +} + +static int abeoz9_reset_validity(struct regmap *regmap) +{ + return regmap_update_bits(regmap, ABEOZ9_REG_CTRL_STATUS, + ABEOZ9_REG_CTRL_STATUS_V1F | + ABEOZ9_REG_CTRL_STATUS_V2F | + ABEOZ9_REG_CTRL_STATUS_PON, + 0); +} + +static int abeoz9_rtc_get_time(struct device *dev, struct rtc_time *tm) +{ + struct abeoz9_rtc_data *data = dev_get_drvdata(dev); + u8 regs[ABEOZ9_SEC_LEN]; + int ret; + + ret = abeoz9_check_validity(dev); + if (ret) + return ret; + + ret = regmap_bulk_read(data->regmap, ABEOZ9_REG_SEC, + regs, + sizeof(regs)); + if (ret) { + dev_err(dev, "reading RTC time failed (%d)\n", ret); + return ret; + } + + tm->tm_sec = bcd2bin(regs[ABEOZ9_REG_SEC - ABEOZ9_REG_SEC] & 0x7F); + tm->tm_min = bcd2bin(regs[ABEOZ9_REG_MIN - ABEOZ9_REG_SEC] & 0x7F); + + if (regs[ABEOZ9_REG_HOURS - ABEOZ9_REG_SEC] & ABEOZ9_HOURS_PM) { + tm->tm_hour = + bcd2bin(regs[ABEOZ9_REG_HOURS - ABEOZ9_REG_SEC] & 0x1f); + if (regs[ABEOZ9_REG_HOURS - ABEOZ9_REG_SEC] & ABEOZ9_HOURS_PM) + tm->tm_hour += 12; + } else { + tm->tm_hour = bcd2bin(regs[ABEOZ9_REG_HOURS - ABEOZ9_REG_SEC]); + } + + tm->tm_mday = bcd2bin(regs[ABEOZ9_REG_DAYS - ABEOZ9_REG_SEC]); + tm->tm_wday = bcd2bin(regs[ABEOZ9_REG_WEEKDAYS - ABEOZ9_REG_SEC]); + tm->tm_mon = bcd2bin(regs[ABEOZ9_REG_MONTHS - ABEOZ9_REG_SEC]) - 1; + tm->tm_year = bcd2bin(regs[ABEOZ9_REG_YEARS - ABEOZ9_REG_SEC]) + 100; + + return ret; +} + +static int abeoz9_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + struct abeoz9_rtc_data *data = dev_get_drvdata(dev); + struct regmap *regmap = data->regmap; + u8 regs[ABEOZ9_SEC_LEN]; + int ret; + + regs[ABEOZ9_REG_SEC - ABEOZ9_REG_SEC] = bin2bcd(tm->tm_sec); + regs[ABEOZ9_REG_MIN - ABEOZ9_REG_SEC] = bin2bcd(tm->tm_min); + regs[ABEOZ9_REG_HOURS - ABEOZ9_REG_SEC] = bin2bcd(tm->tm_hour); + regs[ABEOZ9_REG_DAYS - ABEOZ9_REG_SEC] = bin2bcd(tm->tm_mday); + regs[ABEOZ9_REG_WEEKDAYS - ABEOZ9_REG_SEC] = bin2bcd(tm->tm_wday); + regs[ABEOZ9_REG_MONTHS - ABEOZ9_REG_SEC] = bin2bcd(tm->tm_mon + 1); + regs[ABEOZ9_REG_YEARS - ABEOZ9_REG_SEC] = bin2bcd(tm->tm_year - 100); + + ret = regmap_bulk_write(data->regmap, ABEOZ9_REG_SEC, + regs, + sizeof(regs)); + + if (ret) { + dev_err(dev, "set RTC time failed (%d)\n", ret); + return ret; + } + + return abeoz9_reset_validity(regmap); +} + +static int abeoz9_trickle_parse_dt(struct device_node *node) +{ + u32 ohms = 0; + + if (of_property_read_u32(node, "trickle-resistor-ohms", &ohms)) + return 0; + + switch (ohms) { + case 1000: + return ABEOZ9_REG_EEPROM_R1K; + case 5000: + return ABEOZ9_REG_EEPROM_R5K; + case 20000: + return ABEOZ9_REG_EEPROM_R20K; + case 80000: + return ABEOZ9_REG_EEPROM_R80K; + default: + return 0; + } +} + +static int abeoz9_rtc_setup(struct device *dev, struct device_node *node) +{ + struct abeoz9_rtc_data *data = dev_get_drvdata(dev); + struct regmap *regmap = data->regmap; + int ret; + + /* Enable Self Recovery, Clock for Watch and EEPROM refresh functions */ + ret = regmap_update_bits(regmap, ABEOZ9_REG_CTRL1, + ABEOZ9_REG_CTRL1_MASK, + ABEOZ9_REG_CTRL1_WE | + ABEOZ9_REG_CTRL1_EERE | + ABEOZ9_REG_CTRL1_SRON); + if (ret < 0) { + dev_err(dev, "unable to set CTRL_1 register (%d)\n", ret); + return ret; + } + + ret = regmap_write(regmap, ABEOZ9_REG_CTRL_INT, 0); + if (ret < 0) { + dev_err(dev, + "unable to set control CTRL_INT register (%d)\n", + ret); + return ret; + } + + ret = regmap_write(regmap, ABEOZ9_REG_CTRL_INT_FLAG, 0); + if (ret < 0) { + dev_err(dev, + "unable to set control CTRL_INT_FLAG register (%d)\n", + ret); + return ret; + } + + ret = abeoz9_trickle_parse_dt(node); + + /* Enable built-in termometer */ + ret |= ABEOZ9_REG_EEPROM_THE; + + ret = regmap_update_bits(regmap, ABEOZ9_REG_EEPROM, + ABEOZ9_REG_EEPROM_MASK, + ret); + if (ret < 0) { + dev_err(dev, "unable to set EEPROM register (%d)\n", ret); + return ret; + } + + return ret; +} + +static const struct rtc_class_ops rtc_ops = { + .read_time = abeoz9_rtc_get_time, + .set_time = abeoz9_rtc_set_time, +}; + +static const struct regmap_config abeoz9_rtc_regmap_config = { + .reg_bits = 8, + .val_bits = 8, +}; + +#if IS_REACHABLE(CONFIG_HWMON) + +static int abeoz9z3_temp_read(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, long *temp) +{ + struct abeoz9_rtc_data *data = dev_get_drvdata(dev); + struct regmap *regmap = data->regmap; + int ret; + unsigned int val; + + ret = regmap_read(regmap, ABEOZ9_REG_CTRL_STATUS, &val); + if (ret < 0) + return ret; + + if ((val & ABEOZ9_REG_CTRL_STATUS_V1F) || + (val & ABEOZ9_REG_CTRL_STATUS_V2F)) { + dev_err(dev, + "thermometer might be disabled due to low voltage\n"); + return -EINVAL; + } + + switch (attr) { + case hwmon_temp_input: + ret = regmap_read(regmap, ABEOZ9_REG_REG_TEMP, &val); + if (ret < 0) + return ret; + *temp = 1000 * (val + ABEOZ953_TEMP_MIN); + return 0; + case hwmon_temp_max: + *temp = 1000 * ABEOZ953_TEMP_MAX; + return 0; + case hwmon_temp_min: + *temp = 1000 * ABEOZ953_TEMP_MIN; + return 0; + default: + return -EOPNOTSUPP; + } +} + +static umode_t abeoz9_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + switch (attr) { + case hwmon_temp_input: + case hwmon_temp_max: + case hwmon_temp_min: + return 0444; + default: + return 0; + } +} + +static const u32 abeoz9_chip_config[] = { + HWMON_C_REGISTER_TZ, + 0 +}; + +static const struct hwmon_channel_info abeoz9_chip = { + .type = hwmon_chip, + .config = abeoz9_chip_config, +}; + +static const u32 abeoz9_temp_config[] = { + HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN, + 0 +}; + +static const struct hwmon_channel_info abeoz9_temp = { + .type = hwmon_temp, + .config = abeoz9_temp_config, +}; + +static const struct hwmon_channel_info *abeoz9_info[] = { + &abeoz9_chip, + &abeoz9_temp, + NULL +}; + +static const struct hwmon_ops abeoz9_hwmon_ops = { + .is_visible = abeoz9_is_visible, + .read = abeoz9z3_temp_read, +}; + +static const struct hwmon_chip_info abeoz9_chip_info = { + .ops = &abeoz9_hwmon_ops, + .info = abeoz9_info, +}; + +static void abeoz9_hwmon_register(struct device *dev, + struct abeoz9_rtc_data *data) +{ + data->hwmon_dev = + devm_hwmon_device_register_with_info(dev, + "abeoz9", + data, + &abeoz9_chip_info, + NULL); + if (IS_ERR(data->hwmon_dev)) { + dev_warn(dev, "unable to register hwmon device %ld\n", + PTR_ERR(data->hwmon_dev)); + } +} + +#else + +static void abeoz9_hwmon_register(struct device *dev, + struct abeoz9_rtc_data *data) +{ +} + +#endif + +static int abeoz9_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct abeoz9_rtc_data *data = NULL; + struct device *dev = &client->dev; + struct regmap *regmap; + int ret; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C | + I2C_FUNC_SMBUS_BYTE_DATA | + I2C_FUNC_SMBUS_I2C_BLOCK)) { + ret = -ENODEV; + goto err; + } + + regmap = devm_regmap_init_i2c(client, &abeoz9_rtc_regmap_config); + if (IS_ERR(regmap)) { + ret = PTR_ERR(regmap); + dev_err(dev, "regmap allocation failed: %d\n", ret); + goto err; + } + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) { + ret = -ENOMEM; + goto err; + } + + data->regmap = regmap; + dev_set_drvdata(dev, data); + + ret = abeoz9_rtc_setup(dev, client->dev.of_node); + if (ret) + goto err; + + data->rtc = devm_rtc_allocate_device(dev); + ret = PTR_ERR_OR_ZERO(data->rtc); + if (ret) + goto err; + + data->rtc->ops = &rtc_ops; + data->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000; + data->rtc->range_max = RTC_TIMESTAMP_END_2099; + + ret = rtc_register_device(data->rtc); + if (ret) + goto err; + + abeoz9_hwmon_register(dev, data); + return 0; + +err: + dev_err(dev, "unable to register RTC device (%d)\n", ret); + return ret; +} + +#ifdef CONFIG_OF +static const struct of_device_id abeoz9_dt_match[] = { + { .compatible = "abracon,abeoz9" }, + { }, +}; +MODULE_DEVICE_TABLE(of, abeoz9_dt_match); +#endif + +static const struct i2c_device_id abeoz9_id[] = { + { "abeoz9", 0 }, + { } +}; + +static struct i2c_driver abeoz9_driver = { + .driver = { + .name = "rtc-ab-eoz9", + .of_match_table = of_match_ptr(abeoz9_dt_match), + }, + .probe = abeoz9_probe, + .id_table = abeoz9_id, +}; + +module_i2c_driver(abeoz9_driver); + +MODULE_AUTHOR("Artem Panfilov "); +MODULE_DESCRIPTION("Abracon AB-RTCMC-32.768kHz-EOZ9 RTC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c index 4d24f7288ad7..6ddcad642d1e 100644 --- a/drivers/rtc/rtc-abx80x.c +++ b/drivers/rtc/rtc-abx80x.c @@ -5,7 +5,7 @@ * Copyright 2014-2015 Macq S.A. * * Author: Philippe De Muyter - * Author: Alexandre Belloni + * Author: Alexandre Belloni * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -46,6 +46,9 @@ #define ABX8XX_CTRL_ARST BIT(2) #define ABX8XX_CTRL_12_24 BIT(6) +#define ABX8XX_REG_CTRL2 0x11 +#define ABX8XX_CTRL2_RSVD BIT(5) + #define ABX8XX_REG_IRQ 0x12 #define ABX8XX_IRQ_AIE BIT(2) #define ABX8XX_IRQ_IM_1_4 (0x3 << 5) @@ -78,6 +81,9 @@ #define ABX8XX_REG_ID0 0x28 +#define ABX8XX_REG_OUT_CTRL 0x30 +#define ABX8XX_OUT_CTRL_EXDS BIT(4) + #define ABX8XX_REG_TRICKLE 0x20 #define ABX8XX_TRICKLE_CHARGE_ENABLE 0xa0 #define ABX8XX_TRICKLE_STANDARD_DIODE 0x8 @@ -86,7 +92,7 @@ static u8 trickle_resistors[] = {0, 3, 6, 11}; enum abx80x_chip {AB0801, AB0803, AB0804, AB0805, - AB1801, AB1803, AB1804, AB1805, ABX80X}; + AB1801, AB1803, AB1804, AB1805, RV1805, ABX80X}; struct abx80x_cap { u16 pn; @@ -103,6 +109,7 @@ static struct abx80x_cap abx80x_caps[] = { [AB1803] = {.pn = 0x1803}, [AB1804] = {.pn = 0x1804, .has_tc = true, .has_wdog = true}, [AB1805] = {.pn = 0x1805, .has_tc = true, .has_wdog = true}, + [RV1805] = {.pn = 0x1805, .has_tc = true, .has_wdog = true}, [ABX80X] = {.pn = 0} }; @@ -723,6 +730,62 @@ static int abx80x_probe(struct i2c_client *client, return -EIO; } + /* Configure RV1805 specifics */ + if (part == RV1805) { + /* + * Avoid accidentally entering test mode. This can happen + * on the RV1805 in case the reserved bit 5 in control2 + * register is set. RV-1805-C3 datasheet indicates that + * the bit should be cleared in section 11h - Control2. + */ + data = i2c_smbus_read_byte_data(client, ABX8XX_REG_CTRL2); + if (data < 0) { + dev_err(&client->dev, + "Unable to read control2 register\n"); + return -EIO; + } + + err = i2c_smbus_write_byte_data(client, ABX8XX_REG_CTRL2, + data & ~ABX8XX_CTRL2_RSVD); + if (err < 0) { + dev_err(&client->dev, + "Unable to write control2 register\n"); + return -EIO; + } + + /* + * Avoid extra power leakage. The RV1805 uses smaller + * 10pin package and the EXTI input is not present. + * Disable it to avoid leakage. + */ + data = i2c_smbus_read_byte_data(client, ABX8XX_REG_OUT_CTRL); + if (data < 0) { + dev_err(&client->dev, + "Unable to read output control register\n"); + return -EIO; + } + + /* + * Write the configuration key register to enable access to + * the config2 register + */ + err = i2c_smbus_write_byte_data(client, ABX8XX_REG_CFG_KEY, + ABX8XX_CFG_KEY_MISC); + if (err < 0) { + dev_err(&client->dev, + "Unable to write configuration key\n"); + return -EIO; + } + + err = i2c_smbus_write_byte_data(client, ABX8XX_REG_OUT_CTRL, + data | ABX8XX_OUT_CTRL_EXDS); + if (err < 0) { + dev_err(&client->dev, + "Unable to write output control register\n"); + return -EIO; + } + } + /* part autodetection */ if (part == ABX80X) { for (i = 0; abx80x_caps[i].pn; i++) @@ -826,7 +889,7 @@ static const struct i2c_device_id abx80x_id[] = { { "ab1803", AB1803 }, { "ab1804", AB1804 }, { "ab1805", AB1805 }, - { "rv1805", AB1805 }, + { "rv1805", RV1805 }, { } }; MODULE_DEVICE_TABLE(i2c, abx80x_id); @@ -843,6 +906,6 @@ static struct i2c_driver abx80x_driver = { module_i2c_driver(abx80x_driver); MODULE_AUTHOR("Philippe De Muyter "); -MODULE_AUTHOR("Alexandre Belloni "); +MODULE_AUTHOR("Alexandre Belloni "); MODULE_DESCRIPTION("Abracon ABX80X RTC driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/rtc/rtc-cadence.c b/drivers/rtc/rtc-cadence.c new file mode 100644 index 000000000000..3b7d643c8a63 --- /dev/null +++ b/drivers/rtc/rtc-cadence.c @@ -0,0 +1,423 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2019 Cadence + * + * Authors: + * Jan Kotas + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Registers */ +#define CDNS_RTC_CTLR 0x00 +#define CDNS_RTC_HMR 0x04 +#define CDNS_RTC_TIMR 0x08 +#define CDNS_RTC_CALR 0x0C +#define CDNS_RTC_TIMAR 0x10 +#define CDNS_RTC_CALAR 0x14 +#define CDNS_RTC_AENR 0x18 +#define CDNS_RTC_EFLR 0x1C +#define CDNS_RTC_IENR 0x20 +#define CDNS_RTC_IDISR 0x24 +#define CDNS_RTC_IMSKR 0x28 +#define CDNS_RTC_STSR 0x2C +#define CDNS_RTC_KRTCR 0x30 + +/* Control */ +#define CDNS_RTC_CTLR_TIME BIT(0) +#define CDNS_RTC_CTLR_CAL BIT(1) +#define CDNS_RTC_CTLR_TIME_CAL (CDNS_RTC_CTLR_TIME | CDNS_RTC_CTLR_CAL) + +/* Status */ +#define CDNS_RTC_STSR_VT BIT(0) +#define CDNS_RTC_STSR_VC BIT(1) +#define CDNS_RTC_STSR_VTA BIT(2) +#define CDNS_RTC_STSR_VCA BIT(3) +#define CDNS_RTC_STSR_VT_VC (CDNS_RTC_STSR_VT | CDNS_RTC_STSR_VC) +#define CDNS_RTC_STSR_VTA_VCA (CDNS_RTC_STSR_VTA | CDNS_RTC_STSR_VCA) + +/* Keep RTC */ +#define CDNS_RTC_KRTCR_KRTC BIT(0) + +/* Alarm, Event, Interrupt */ +#define CDNS_RTC_AEI_HOS BIT(0) +#define CDNS_RTC_AEI_SEC BIT(1) +#define CDNS_RTC_AEI_MIN BIT(2) +#define CDNS_RTC_AEI_HOUR BIT(3) +#define CDNS_RTC_AEI_DATE BIT(4) +#define CDNS_RTC_AEI_MNTH BIT(5) +#define CDNS_RTC_AEI_ALRM BIT(6) + +/* Time */ +#define CDNS_RTC_TIME_H GENMASK(7, 0) +#define CDNS_RTC_TIME_S GENMASK(14, 8) +#define CDNS_RTC_TIME_M GENMASK(22, 16) +#define CDNS_RTC_TIME_HR GENMASK(29, 24) +#define CDNS_RTC_TIME_PM BIT(30) +#define CDNS_RTC_TIME_CH BIT(31) + +/* Calendar */ +#define CDNS_RTC_CAL_DAY GENMASK(2, 0) +#define CDNS_RTC_CAL_M GENMASK(7, 3) +#define CDNS_RTC_CAL_D GENMASK(13, 8) +#define CDNS_RTC_CAL_Y GENMASK(23, 16) +#define CDNS_RTC_CAL_C GENMASK(29, 24) +#define CDNS_RTC_CAL_CH BIT(31) + +#define CDNS_RTC_MAX_REGS_TRIES 3 + +struct cdns_rtc { + struct rtc_device *rtc_dev; + struct clk *pclk; + struct clk *ref_clk; + void __iomem *regs; + int irq; +}; + +static void cdns_rtc_set_enabled(struct cdns_rtc *crtc, bool enabled) +{ + u32 reg = enabled ? 0x0 : CDNS_RTC_CTLR_TIME_CAL; + + writel(reg, crtc->regs + CDNS_RTC_CTLR); +} + +static bool cdns_rtc_get_enabled(struct cdns_rtc *crtc) +{ + return !(readl(crtc->regs + CDNS_RTC_CTLR) & CDNS_RTC_CTLR_TIME_CAL); +} + +static irqreturn_t cdns_rtc_irq_handler(int irq, void *id) +{ + struct device *dev = id; + struct cdns_rtc *crtc = dev_get_drvdata(dev); + + /* Reading the register clears it */ + if (!(readl(crtc->regs + CDNS_RTC_EFLR) & CDNS_RTC_AEI_ALRM)) + return IRQ_NONE; + + rtc_update_irq(crtc->rtc_dev, 1, RTC_IRQF | RTC_AF); + return IRQ_HANDLED; +} + +static u32 cdns_rtc_time2reg(struct rtc_time *tm) +{ + return FIELD_PREP(CDNS_RTC_TIME_S, bin2bcd(tm->tm_sec)) + | FIELD_PREP(CDNS_RTC_TIME_M, bin2bcd(tm->tm_min)) + | FIELD_PREP(CDNS_RTC_TIME_HR, bin2bcd(tm->tm_hour)); +} + +static void cdns_rtc_reg2time(u32 reg, struct rtc_time *tm) +{ + tm->tm_sec = bcd2bin(FIELD_GET(CDNS_RTC_TIME_S, reg)); + tm->tm_min = bcd2bin(FIELD_GET(CDNS_RTC_TIME_M, reg)); + tm->tm_hour = bcd2bin(FIELD_GET(CDNS_RTC_TIME_HR, reg)); +} + +static int cdns_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + struct cdns_rtc *crtc = dev_get_drvdata(dev); + u32 reg; + + /* If the RTC is disabled, assume the values are invalid */ + if (!cdns_rtc_get_enabled(crtc)) + return -EINVAL; + + cdns_rtc_set_enabled(crtc, false); + + reg = readl(crtc->regs + CDNS_RTC_TIMR); + cdns_rtc_reg2time(reg, tm); + + reg = readl(crtc->regs + CDNS_RTC_CALR); + tm->tm_mday = bcd2bin(FIELD_GET(CDNS_RTC_CAL_D, reg)); + tm->tm_mon = bcd2bin(FIELD_GET(CDNS_RTC_CAL_M, reg)) - 1; + tm->tm_year = bcd2bin(FIELD_GET(CDNS_RTC_CAL_Y, reg)) + + bcd2bin(FIELD_GET(CDNS_RTC_CAL_C, reg)) * 100 - 1900; + tm->tm_wday = bcd2bin(FIELD_GET(CDNS_RTC_CAL_DAY, reg)) - 1; + + cdns_rtc_set_enabled(crtc, true); + return 0; +} + +static int cdns_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + struct cdns_rtc *crtc = dev_get_drvdata(dev); + u32 timr, calr, stsr; + int ret = -EIO; + int year = tm->tm_year + 1900; + int tries; + + cdns_rtc_set_enabled(crtc, false); + + timr = cdns_rtc_time2reg(tm); + + calr = FIELD_PREP(CDNS_RTC_CAL_D, bin2bcd(tm->tm_mday)) + | FIELD_PREP(CDNS_RTC_CAL_M, bin2bcd(tm->tm_mon + 1)) + | FIELD_PREP(CDNS_RTC_CAL_Y, bin2bcd(year % 100)) + | FIELD_PREP(CDNS_RTC_CAL_C, bin2bcd(year / 100)) + | FIELD_PREP(CDNS_RTC_CAL_DAY, tm->tm_wday + 1); + + /* Update registers, check valid flags */ + for (tries = 0; tries < CDNS_RTC_MAX_REGS_TRIES; tries++) { + writel(timr, crtc->regs + CDNS_RTC_TIMR); + writel(calr, crtc->regs + CDNS_RTC_CALR); + stsr = readl(crtc->regs + CDNS_RTC_STSR); + + if ((stsr & CDNS_RTC_STSR_VT_VC) == CDNS_RTC_STSR_VT_VC) { + ret = 0; + break; + } + } + + cdns_rtc_set_enabled(crtc, true); + return ret; +} + +static int cdns_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) +{ + struct cdns_rtc *crtc = dev_get_drvdata(dev); + + if (enabled) { + writel((CDNS_RTC_AEI_SEC | CDNS_RTC_AEI_MIN | CDNS_RTC_AEI_HOUR + | CDNS_RTC_AEI_DATE | CDNS_RTC_AEI_MNTH), + crtc->regs + CDNS_RTC_AENR); + writel(CDNS_RTC_AEI_ALRM, crtc->regs + CDNS_RTC_IENR); + } else { + writel(0, crtc->regs + CDNS_RTC_AENR); + writel(CDNS_RTC_AEI_ALRM, crtc->regs + CDNS_RTC_IDISR); + } + + return 0; +} + +static int cdns_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) +{ + struct cdns_rtc *crtc = dev_get_drvdata(dev); + u32 reg; + + reg = readl(crtc->regs + CDNS_RTC_TIMAR); + cdns_rtc_reg2time(reg, &alarm->time); + + reg = readl(crtc->regs + CDNS_RTC_CALAR); + alarm->time.tm_mday = bcd2bin(FIELD_GET(CDNS_RTC_CAL_D, reg)); + alarm->time.tm_mon = bcd2bin(FIELD_GET(CDNS_RTC_CAL_M, reg)) - 1; + + return 0; +} + +static int cdns_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) +{ + struct cdns_rtc *crtc = dev_get_drvdata(dev); + int ret = -EIO; + int tries; + u32 timar, calar, stsr; + + cdns_rtc_alarm_irq_enable(dev, 0); + + timar = cdns_rtc_time2reg(&alarm->time); + calar = FIELD_PREP(CDNS_RTC_CAL_D, bin2bcd(alarm->time.tm_mday)) + | FIELD_PREP(CDNS_RTC_CAL_M, bin2bcd(alarm->time.tm_mon + 1)); + + /* Update registers, check valid alarm flags */ + for (tries = 0; tries < CDNS_RTC_MAX_REGS_TRIES; tries++) { + writel(timar, crtc->regs + CDNS_RTC_TIMAR); + writel(calar, crtc->regs + CDNS_RTC_CALAR); + stsr = readl(crtc->regs + CDNS_RTC_STSR); + + if ((stsr & CDNS_RTC_STSR_VTA_VCA) == CDNS_RTC_STSR_VTA_VCA) { + ret = 0; + break; + } + } + + if (!ret) + cdns_rtc_alarm_irq_enable(dev, alarm->enabled); + return ret; +} + +static const struct rtc_class_ops cdns_rtc_ops = { + .read_time = cdns_rtc_read_time, + .set_time = cdns_rtc_set_time, + .read_alarm = cdns_rtc_read_alarm, + .set_alarm = cdns_rtc_set_alarm, + .alarm_irq_enable = cdns_rtc_alarm_irq_enable, +}; + +static int cdns_rtc_probe(struct platform_device *pdev) +{ + struct cdns_rtc *crtc; + struct resource *res; + int ret; + unsigned long ref_clk_freq; + + crtc = devm_kzalloc(&pdev->dev, sizeof(*crtc), GFP_KERNEL); + if (!crtc) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + crtc->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(crtc->regs)) + return PTR_ERR(crtc->regs); + + crtc->irq = platform_get_irq(pdev, 0); + if (crtc->irq < 0) + return -EINVAL; + + crtc->pclk = devm_clk_get(&pdev->dev, "pclk"); + if (IS_ERR(crtc->pclk)) { + ret = PTR_ERR(crtc->pclk); + dev_err(&pdev->dev, + "Failed to retrieve the peripheral clock, %d\n", ret); + return ret; + } + + crtc->ref_clk = devm_clk_get(&pdev->dev, "ref_clk"); + if (IS_ERR(crtc->ref_clk)) { + ret = PTR_ERR(crtc->ref_clk); + dev_err(&pdev->dev, + "Failed to retrieve the reference clock, %d\n", ret); + return ret; + } + + crtc->rtc_dev = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(crtc->rtc_dev)) { + ret = PTR_ERR(crtc->rtc_dev); + dev_err(&pdev->dev, + "Failed to allocate the RTC device, %d\n", ret); + return ret; + } + + platform_set_drvdata(pdev, crtc); + + ret = clk_prepare_enable(crtc->pclk); + if (ret) { + dev_err(&pdev->dev, + "Failed to enable the peripheral clock, %d\n", ret); + return ret; + } + + ret = clk_prepare_enable(crtc->ref_clk); + if (ret) { + dev_err(&pdev->dev, + "Failed to enable the reference clock, %d\n", ret); + goto err_disable_pclk; + } + + ref_clk_freq = clk_get_rate(crtc->ref_clk); + if ((ref_clk_freq != 1) && (ref_clk_freq != 100)) { + dev_err(&pdev->dev, + "Invalid reference clock frequency %lu Hz.\n", + ref_clk_freq); + ret = -EINVAL; + goto err_disable_ref_clk; + } + + ret = devm_request_irq(&pdev->dev, crtc->irq, + cdns_rtc_irq_handler, 0, + dev_name(&pdev->dev), &pdev->dev); + if (ret) { + dev_err(&pdev->dev, + "Failed to request interrupt for the device, %d\n", + ret); + goto err_disable_ref_clk; + } + + /* The RTC supports 01.01.1900 - 31.12.2999 */ + crtc->rtc_dev->range_min = mktime64(1900, 1, 1, 0, 0, 0); + crtc->rtc_dev->range_max = mktime64(2999, 12, 31, 23, 59, 59); + + crtc->rtc_dev->ops = &cdns_rtc_ops; + device_init_wakeup(&pdev->dev, true); + + /* Always use 24-hour mode and keep the RTC values */ + writel(0, crtc->regs + CDNS_RTC_HMR); + writel(CDNS_RTC_KRTCR_KRTC, crtc->regs + CDNS_RTC_KRTCR); + + ret = rtc_register_device(crtc->rtc_dev); + if (ret) { + dev_err(&pdev->dev, + "Failed to register the RTC device, %d\n", ret); + goto err_disable_wakeup; + } + + return 0; + +err_disable_wakeup: + device_init_wakeup(&pdev->dev, false); + +err_disable_ref_clk: + clk_disable_unprepare(crtc->ref_clk); + +err_disable_pclk: + clk_disable_unprepare(crtc->pclk); + + return ret; +} + +static int cdns_rtc_remove(struct platform_device *pdev) +{ + struct cdns_rtc *crtc = platform_get_drvdata(pdev); + + cdns_rtc_alarm_irq_enable(&pdev->dev, 0); + device_init_wakeup(&pdev->dev, 0); + + clk_disable_unprepare(crtc->pclk); + clk_disable_unprepare(crtc->ref_clk); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int cdns_rtc_suspend(struct device *dev) +{ + struct cdns_rtc *crtc = dev_get_drvdata(dev); + + if (device_may_wakeup(dev)) + enable_irq_wake(crtc->irq); + + return 0; +} + +static int cdns_rtc_resume(struct device *dev) +{ + struct cdns_rtc *crtc = dev_get_drvdata(dev); + + if (device_may_wakeup(dev)) + disable_irq_wake(crtc->irq); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(cdns_rtc_pm_ops, cdns_rtc_suspend, cdns_rtc_resume); + +static const struct of_device_id cdns_rtc_of_match[] = { + { .compatible = "cdns,rtc-r109v3" }, + { }, +}; +MODULE_DEVICE_TABLE(of, cdns_rtc_of_match); + +static struct platform_driver cdns_rtc_driver = { + .driver = { + .name = "cdns-rtc", + .of_match_table = cdns_rtc_of_match, + .pm = &cdns_rtc_pm_ops, + }, + .probe = cdns_rtc_probe, + .remove = cdns_rtc_remove, +}; +module_platform_driver(cdns_rtc_driver); + +MODULE_AUTHOR("Jan Kotas "); +MODULE_DESCRIPTION("Cadence RTC driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:cdns-rtc"); diff --git a/drivers/rtc/rtc-coh901331.c b/drivers/rtc/rtc-coh901331.c index fc5cf5c44ae7..0b232c84f674 100644 --- a/drivers/rtc/rtc-coh901331.c +++ b/drivers/rtc/rtc-coh901331.c @@ -235,9 +235,13 @@ static int coh901331_suspend(struct device *dev) static int coh901331_resume(struct device *dev) { + int ret; struct coh901331_port *rtap = dev_get_drvdata(dev); - clk_prepare(rtap->clk); + ret = clk_prepare(rtap->clk); + if (ret) + return ret; + if (device_may_wakeup(dev)) { disable_irq_wake(rtap->irq); } else { diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index 74b31dce484f..07530fe1da2a 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c @@ -114,6 +114,33 @@ enum ds_type { # define RX8025_BIT_VDET 0x40 # define RX8025_BIT_XST 0x20 +#define RX8130_REG_ALARM_MIN 0x17 +#define RX8130_REG_ALARM_HOUR 0x18 +#define RX8130_REG_ALARM_WEEK_OR_DAY 0x19 +#define RX8130_REG_EXTENSION 0x1c +#define RX8130_REG_EXTENSION_WADA BIT(3) +#define RX8130_REG_FLAG 0x1d +#define RX8130_REG_FLAG_VLF BIT(1) +#define RX8130_REG_FLAG_AF BIT(3) +#define RX8130_REG_CONTROL0 0x1e +#define RX8130_REG_CONTROL0_AIE BIT(3) + +#define MCP794XX_REG_CONTROL 0x07 +# define MCP794XX_BIT_ALM0_EN 0x10 +# define MCP794XX_BIT_ALM1_EN 0x20 +#define MCP794XX_REG_ALARM0_BASE 0x0a +#define MCP794XX_REG_ALARM0_CTRL 0x0d +#define MCP794XX_REG_ALARM1_BASE 0x11 +#define MCP794XX_REG_ALARM1_CTRL 0x14 +# define MCP794XX_BIT_ALMX_IF BIT(3) +# define MCP794XX_BIT_ALMX_C0 BIT(4) +# define MCP794XX_BIT_ALMX_C1 BIT(5) +# define MCP794XX_BIT_ALMX_C2 BIT(6) +# define MCP794XX_BIT_ALMX_POL BIT(7) +# define MCP794XX_MSK_ALMX_MATCH (MCP794XX_BIT_ALMX_C0 | \ + MCP794XX_BIT_ALMX_C1 | \ + MCP794XX_BIT_ALMX_C2) + #define M41TXX_REG_CONTROL 0x07 # define M41TXX_BIT_OUT BIT(7) # define M41TXX_BIT_FT BIT(6) @@ -158,313 +185,45 @@ struct chip_desc { bool); }; -static int ds1307_get_time(struct device *dev, struct rtc_time *t); -static int ds1307_set_time(struct device *dev, struct rtc_time *t); -static int ds1337_read_alarm(struct device *dev, struct rtc_wkalrm *t); -static int ds1337_set_alarm(struct device *dev, struct rtc_wkalrm *t); -static int ds1307_alarm_irq_enable(struct device *dev, unsigned int enabled); -static u8 do_trickle_setup_ds1339(struct ds1307 *, u32 ohms, bool diode); -static irqreturn_t rx8130_irq(int irq, void *dev_id); -static int rx8130_read_alarm(struct device *dev, struct rtc_wkalrm *t); -static int rx8130_set_alarm(struct device *dev, struct rtc_wkalrm *t); -static int rx8130_alarm_irq_enable(struct device *dev, unsigned int enabled); -static irqreturn_t mcp794xx_irq(int irq, void *dev_id); -static int mcp794xx_read_alarm(struct device *dev, struct rtc_wkalrm *t); -static int mcp794xx_set_alarm(struct device *dev, struct rtc_wkalrm *t); -static int mcp794xx_alarm_irq_enable(struct device *dev, unsigned int enabled); -static int m41txx_rtc_read_offset(struct device *dev, long *offset); -static int m41txx_rtc_set_offset(struct device *dev, long offset); +static const struct chip_desc chips[last_ds_type]; -static const struct rtc_class_ops rx8130_rtc_ops = { - .read_time = ds1307_get_time, - .set_time = ds1307_set_time, - .read_alarm = rx8130_read_alarm, - .set_alarm = rx8130_set_alarm, - .alarm_irq_enable = rx8130_alarm_irq_enable, -}; +static int ds1307_get_time(struct device *dev, struct rtc_time *t) +{ + struct ds1307 *ds1307 = dev_get_drvdata(dev); + int tmp, ret; + const struct chip_desc *chip = &chips[ds1307->type]; + u8 regs[7]; -static const struct rtc_class_ops mcp794xx_rtc_ops = { - .read_time = ds1307_get_time, - .set_time = ds1307_set_time, - .read_alarm = mcp794xx_read_alarm, - .set_alarm = mcp794xx_set_alarm, - .alarm_irq_enable = mcp794xx_alarm_irq_enable, -}; + if (ds1307->type == rx_8130) { + unsigned int regflag; + ret = regmap_read(ds1307->regmap, RX8130_REG_FLAG, ®flag); + if (ret) { + dev_err(dev, "%s error %d\n", "read", ret); + return ret; + } -static const struct rtc_class_ops m41txx_rtc_ops = { - .read_time = ds1307_get_time, - .set_time = ds1307_set_time, - .read_alarm = ds1337_read_alarm, - .set_alarm = ds1337_set_alarm, - .alarm_irq_enable = ds1307_alarm_irq_enable, - .read_offset = m41txx_rtc_read_offset, - .set_offset = m41txx_rtc_set_offset, -}; + if (regflag & RX8130_REG_FLAG_VLF) { + dev_warn_once(dev, "oscillator failed, set time!\n"); + return -EINVAL; + } + } -static const struct chip_desc chips[last_ds_type] = { - [ds_1307] = { - .nvram_offset = 8, - .nvram_size = 56, - }, - [ds_1308] = { - .nvram_offset = 8, - .nvram_size = 56, - }, - [ds_1337] = { - .alarm = 1, - .century_reg = DS1307_REG_MONTH, - .century_bit = DS1337_BIT_CENTURY, - }, - [ds_1338] = { - .nvram_offset = 8, - .nvram_size = 56, - }, - [ds_1339] = { - .alarm = 1, - .century_reg = DS1307_REG_MONTH, - .century_bit = DS1337_BIT_CENTURY, - .bbsqi_bit = DS1339_BIT_BBSQI, - .trickle_charger_reg = 0x10, - .do_trickle_setup = &do_trickle_setup_ds1339, - }, - [ds_1340] = { - .century_reg = DS1307_REG_HOUR, - .century_enable_bit = DS1340_BIT_CENTURY_EN, - .century_bit = DS1340_BIT_CENTURY, - .do_trickle_setup = &do_trickle_setup_ds1339, - .trickle_charger_reg = 0x08, - }, - [ds_1341] = { - .century_reg = DS1307_REG_MONTH, - .century_bit = DS1337_BIT_CENTURY, - }, - [ds_1388] = { - .offset = 1, - .trickle_charger_reg = 0x0a, - }, - [ds_3231] = { - .alarm = 1, - .century_reg = DS1307_REG_MONTH, - .century_bit = DS1337_BIT_CENTURY, - .bbsqi_bit = DS3231_BIT_BBSQW, - }, - [rx_8130] = { - .alarm = 1, - /* this is battery backed SRAM */ - .nvram_offset = 0x20, - .nvram_size = 4, /* 32bit (4 word x 8 bit) */ - .offset = 0x10, - .irq_handler = rx8130_irq, - .rtc_ops = &rx8130_rtc_ops, - }, - [m41t0] = { - .rtc_ops = &m41txx_rtc_ops, - }, - [m41t00] = { - .rtc_ops = &m41txx_rtc_ops, - }, - [m41t11] = { - /* this is battery backed SRAM */ - .nvram_offset = 8, - .nvram_size = 56, - .rtc_ops = &m41txx_rtc_ops, - }, - [mcp794xx] = { - .alarm = 1, - /* this is battery backed SRAM */ - .nvram_offset = 0x20, - .nvram_size = 0x40, - .irq_handler = mcp794xx_irq, - .rtc_ops = &mcp794xx_rtc_ops, - }, -}; + /* read the RTC date and time registers all at once */ + ret = regmap_bulk_read(ds1307->regmap, chip->offset, regs, + sizeof(regs)); + if (ret) { + dev_err(dev, "%s error %d\n", "read", ret); + return ret; + } -static const struct i2c_device_id ds1307_id[] = { - { "ds1307", ds_1307 }, - { "ds1308", ds_1308 }, - { "ds1337", ds_1337 }, - { "ds1338", ds_1338 }, - { "ds1339", ds_1339 }, - { "ds1388", ds_1388 }, - { "ds1340", ds_1340 }, - { "ds1341", ds_1341 }, - { "ds3231", ds_3231 }, - { "m41t0", m41t0 }, - { "m41t00", m41t00 }, - { "m41t11", m41t11 }, - { "mcp7940x", mcp794xx }, - { "mcp7941x", mcp794xx }, - { "pt7c4338", ds_1307 }, - { "rx8025", rx_8025 }, - { "isl12057", ds_1337 }, - { "rx8130", rx_8130 }, - { } -}; -MODULE_DEVICE_TABLE(i2c, ds1307_id); + dev_dbg(dev, "%s: %7ph\n", "read", regs); -#ifdef CONFIG_OF -static const struct of_device_id ds1307_of_match[] = { - { - .compatible = "dallas,ds1307", - .data = (void *)ds_1307 - }, - { - .compatible = "dallas,ds1308", - .data = (void *)ds_1308 - }, - { - .compatible = "dallas,ds1337", - .data = (void *)ds_1337 - }, - { - .compatible = "dallas,ds1338", - .data = (void *)ds_1338 - }, - { - .compatible = "dallas,ds1339", - .data = (void *)ds_1339 - }, - { - .compatible = "dallas,ds1388", - .data = (void *)ds_1388 - }, - { - .compatible = "dallas,ds1340", - .data = (void *)ds_1340 - }, - { - .compatible = "dallas,ds1341", - .data = (void *)ds_1341 - }, - { - .compatible = "maxim,ds3231", - .data = (void *)ds_3231 - }, - { - .compatible = "st,m41t0", - .data = (void *)m41t0 - }, - { - .compatible = "st,m41t00", - .data = (void *)m41t00 - }, - { - .compatible = "st,m41t11", - .data = (void *)m41t11 - }, - { - .compatible = "microchip,mcp7940x", - .data = (void *)mcp794xx - }, - { - .compatible = "microchip,mcp7941x", - .data = (void *)mcp794xx - }, - { - .compatible = "pericom,pt7c4338", - .data = (void *)ds_1307 - }, - { - .compatible = "epson,rx8025", - .data = (void *)rx_8025 - }, - { - .compatible = "isil,isl12057", - .data = (void *)ds_1337 - }, - { - .compatible = "epson,rx8130", - .data = (void *)rx_8130 - }, - { } -}; -MODULE_DEVICE_TABLE(of, ds1307_of_match); -#endif - -#ifdef CONFIG_ACPI -static const struct acpi_device_id ds1307_acpi_ids[] = { - { .id = "DS1307", .driver_data = ds_1307 }, - { .id = "DS1308", .driver_data = ds_1308 }, - { .id = "DS1337", .driver_data = ds_1337 }, - { .id = "DS1338", .driver_data = ds_1338 }, - { .id = "DS1339", .driver_data = ds_1339 }, - { .id = "DS1388", .driver_data = ds_1388 }, - { .id = "DS1340", .driver_data = ds_1340 }, - { .id = "DS1341", .driver_data = ds_1341 }, - { .id = "DS3231", .driver_data = ds_3231 }, - { .id = "M41T0", .driver_data = m41t0 }, - { .id = "M41T00", .driver_data = m41t00 }, - { .id = "M41T11", .driver_data = m41t11 }, - { .id = "MCP7940X", .driver_data = mcp794xx }, - { .id = "MCP7941X", .driver_data = mcp794xx }, - { .id = "PT7C4338", .driver_data = ds_1307 }, - { .id = "RX8025", .driver_data = rx_8025 }, - { .id = "ISL12057", .driver_data = ds_1337 }, - { .id = "RX8130", .driver_data = rx_8130 }, - { } -}; -MODULE_DEVICE_TABLE(acpi, ds1307_acpi_ids); -#endif - -/* - * The ds1337 and ds1339 both have two alarms, but we only use the first - * one (with a "seconds" field). For ds1337 we expect nINTA is our alarm - * signal; ds1339 chips have only one alarm signal. - */ -static irqreturn_t ds1307_irq(int irq, void *dev_id) -{ - struct ds1307 *ds1307 = dev_id; - struct mutex *lock = &ds1307->rtc->ops_lock; - int stat, ret; - - mutex_lock(lock); - ret = regmap_read(ds1307->regmap, DS1337_REG_STATUS, &stat); - if (ret) - goto out; - - if (stat & DS1337_BIT_A1I) { - stat &= ~DS1337_BIT_A1I; - regmap_write(ds1307->regmap, DS1337_REG_STATUS, stat); - - ret = regmap_update_bits(ds1307->regmap, DS1337_REG_CONTROL, - DS1337_BIT_A1IE, 0); - if (ret) - goto out; - - rtc_update_irq(ds1307->rtc, 1, RTC_AF | RTC_IRQF); - } - -out: - mutex_unlock(lock); - - return IRQ_HANDLED; -} - -/*----------------------------------------------------------------------*/ - -static int ds1307_get_time(struct device *dev, struct rtc_time *t) -{ - struct ds1307 *ds1307 = dev_get_drvdata(dev); - int tmp, ret; - const struct chip_desc *chip = &chips[ds1307->type]; - u8 regs[7]; - - /* read the RTC date and time registers all at once */ - ret = regmap_bulk_read(ds1307->regmap, chip->offset, regs, - sizeof(regs)); - if (ret) { - dev_err(dev, "%s error %d\n", "read", ret); - return ret; - } - - dev_dbg(dev, "%s: %7ph\n", "read", regs); - - /* if oscillator fail bit is set, no data can be trusted */ - if (ds1307->type == m41t0 && - regs[DS1307_REG_MIN] & M41T0_BIT_OF) { - dev_warn_once(dev, "oscillator failed, set time!\n"); - return -EINVAL; - } + /* if oscillator fail bit is set, no data can be trusted */ + if (ds1307->type == m41t0 && + regs[DS1307_REG_MIN] & M41T0_BIT_OF) { + dev_warn_once(dev, "oscillator failed, set time!\n"); + return -EINVAL; + } t->tm_sec = bcd2bin(regs[DS1307_REG_SECS] & 0x7f); t->tm_min = bcd2bin(regs[DS1307_REG_MIN] & 0x7f); @@ -548,6 +307,17 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t) dev_err(dev, "%s error %d\n", "write", result); return result; } + + if (ds1307->type == rx_8130) { + /* clear Voltage Loss Flag as data is available now */ + result = regmap_write(ds1307->regmap, RX8130_REG_FLAG, + ~(u8)RX8130_REG_FLAG_VLF); + if (result) { + dev_err(dev, "%s error %d\n", "write", result); + return result; + } + } + return 0; } @@ -666,29 +436,28 @@ static int ds1307_alarm_irq_enable(struct device *dev, unsigned int enabled) enabled ? DS1337_BIT_A1IE : 0); } -static const struct rtc_class_ops ds13xx_rtc_ops = { - .read_time = ds1307_get_time, - .set_time = ds1307_set_time, - .read_alarm = ds1337_read_alarm, - .set_alarm = ds1337_set_alarm, - .alarm_irq_enable = ds1307_alarm_irq_enable, -}; - -/*----------------------------------------------------------------------*/ - -/* - * Alarm support for rx8130 devices. - */ +static u8 do_trickle_setup_ds1339(struct ds1307 *ds1307, u32 ohms, bool diode) +{ + u8 setup = (diode) ? DS1307_TRICKLE_CHARGER_DIODE : + DS1307_TRICKLE_CHARGER_NO_DIODE; -#define RX8130_REG_ALARM_MIN 0x07 -#define RX8130_REG_ALARM_HOUR 0x08 -#define RX8130_REG_ALARM_WEEK_OR_DAY 0x09 -#define RX8130_REG_EXTENSION 0x0c -#define RX8130_REG_EXTENSION_WADA BIT(3) -#define RX8130_REG_FLAG 0x0d -#define RX8130_REG_FLAG_AF BIT(3) -#define RX8130_REG_CONTROL0 0x0e -#define RX8130_REG_CONTROL0_AIE BIT(3) + switch (ohms) { + case 250: + setup |= DS1307_TRICKLE_CHARGER_250_OHM; + break; + case 2000: + setup |= DS1307_TRICKLE_CHARGER_2K_OHM; + break; + case 4000: + setup |= DS1307_TRICKLE_CHARGER_4K_OHM; + break; + default: + dev_warn(ds1307->dev, + "Unsupported ohm value %u in dt\n", ohms); + return 0; + } + return setup; +} static irqreturn_t rx8130_irq(int irq, void *dev_id) { @@ -785,8 +554,8 @@ static int rx8130_set_alarm(struct device *dev, struct rtc_wkalrm *t) if (ret < 0) return ret; - ctl[0] &= ~RX8130_REG_EXTENSION_WADA; - ctl[1] |= RX8130_REG_FLAG_AF; + ctl[0] &= RX8130_REG_EXTENSION_WADA; + ctl[1] &= ~RX8130_REG_FLAG_AF; ctl[2] &= ~RX8130_REG_CONTROL0_AIE; ret = regmap_bulk_write(ds1307->regmap, RX8130_REG_EXTENSION, ctl, @@ -809,8 +578,7 @@ static int rx8130_set_alarm(struct device *dev, struct rtc_wkalrm *t) ctl[2] |= RX8130_REG_CONTROL0_AIE; - return regmap_bulk_write(ds1307->regmap, RX8130_REG_EXTENSION, ctl, - sizeof(ctl)); + return regmap_write(ds1307->regmap, RX8130_REG_CONTROL0, ctl[2]); } static int rx8130_alarm_irq_enable(struct device *dev, unsigned int enabled) @@ -833,54 +601,459 @@ static int rx8130_alarm_irq_enable(struct device *dev, unsigned int enabled) return regmap_write(ds1307->regmap, RX8130_REG_CONTROL0, reg); } -/*----------------------------------------------------------------------*/ +static irqreturn_t mcp794xx_irq(int irq, void *dev_id) +{ + struct ds1307 *ds1307 = dev_id; + struct mutex *lock = &ds1307->rtc->ops_lock; + int reg, ret; + + mutex_lock(lock); + + /* Check and clear alarm 0 interrupt flag. */ + ret = regmap_read(ds1307->regmap, MCP794XX_REG_ALARM0_CTRL, ®); + if (ret) + goto out; + if (!(reg & MCP794XX_BIT_ALMX_IF)) + goto out; + reg &= ~MCP794XX_BIT_ALMX_IF; + ret = regmap_write(ds1307->regmap, MCP794XX_REG_ALARM0_CTRL, reg); + if (ret) + goto out; + + /* Disable alarm 0. */ + ret = regmap_update_bits(ds1307->regmap, MCP794XX_REG_CONTROL, + MCP794XX_BIT_ALM0_EN, 0); + if (ret) + goto out; + + rtc_update_irq(ds1307->rtc, 1, RTC_AF | RTC_IRQF); + +out: + mutex_unlock(lock); + + return IRQ_HANDLED; +} + +static int mcp794xx_read_alarm(struct device *dev, struct rtc_wkalrm *t) +{ + struct ds1307 *ds1307 = dev_get_drvdata(dev); + u8 regs[10]; + int ret; + + if (!test_bit(HAS_ALARM, &ds1307->flags)) + return -EINVAL; + + /* Read control and alarm 0 registers. */ + ret = regmap_bulk_read(ds1307->regmap, MCP794XX_REG_CONTROL, regs, + sizeof(regs)); + if (ret) + return ret; + + t->enabled = !!(regs[0] & MCP794XX_BIT_ALM0_EN); + + /* Report alarm 0 time assuming 24-hour and day-of-month modes. */ + t->time.tm_sec = bcd2bin(regs[3] & 0x7f); + t->time.tm_min = bcd2bin(regs[4] & 0x7f); + t->time.tm_hour = bcd2bin(regs[5] & 0x3f); + t->time.tm_wday = bcd2bin(regs[6] & 0x7) - 1; + t->time.tm_mday = bcd2bin(regs[7] & 0x3f); + t->time.tm_mon = bcd2bin(regs[8] & 0x1f) - 1; + t->time.tm_year = -1; + t->time.tm_yday = -1; + t->time.tm_isdst = -1; + + dev_dbg(dev, "%s, sec=%d min=%d hour=%d wday=%d mday=%d mon=%d " + "enabled=%d polarity=%d irq=%d match=%lu\n", __func__, + t->time.tm_sec, t->time.tm_min, t->time.tm_hour, + t->time.tm_wday, t->time.tm_mday, t->time.tm_mon, t->enabled, + !!(regs[6] & MCP794XX_BIT_ALMX_POL), + !!(regs[6] & MCP794XX_BIT_ALMX_IF), + (regs[6] & MCP794XX_MSK_ALMX_MATCH) >> 4); + + return 0; +} + +/* + * We may have a random RTC weekday, therefore calculate alarm weekday based + * on current weekday we read from the RTC timekeeping regs + */ +static int mcp794xx_alm_weekday(struct device *dev, struct rtc_time *tm_alarm) +{ + struct rtc_time tm_now; + int days_now, days_alarm, ret; + + ret = ds1307_get_time(dev, &tm_now); + if (ret) + return ret; + + days_now = div_s64(rtc_tm_to_time64(&tm_now), 24 * 60 * 60); + days_alarm = div_s64(rtc_tm_to_time64(tm_alarm), 24 * 60 * 60); + + return (tm_now.tm_wday + days_alarm - days_now) % 7 + 1; +} + +static int mcp794xx_set_alarm(struct device *dev, struct rtc_wkalrm *t) +{ + struct ds1307 *ds1307 = dev_get_drvdata(dev); + unsigned char regs[10]; + int wday, ret; + + if (!test_bit(HAS_ALARM, &ds1307->flags)) + return -EINVAL; + + wday = mcp794xx_alm_weekday(dev, &t->time); + if (wday < 0) + return wday; + + dev_dbg(dev, "%s, sec=%d min=%d hour=%d wday=%d mday=%d mon=%d " + "enabled=%d pending=%d\n", __func__, + t->time.tm_sec, t->time.tm_min, t->time.tm_hour, + t->time.tm_wday, t->time.tm_mday, t->time.tm_mon, + t->enabled, t->pending); + + /* Read control and alarm 0 registers. */ + ret = regmap_bulk_read(ds1307->regmap, MCP794XX_REG_CONTROL, regs, + sizeof(regs)); + if (ret) + return ret; + + /* Set alarm 0, using 24-hour and day-of-month modes. */ + regs[3] = bin2bcd(t->time.tm_sec); + regs[4] = bin2bcd(t->time.tm_min); + regs[5] = bin2bcd(t->time.tm_hour); + regs[6] = wday; + regs[7] = bin2bcd(t->time.tm_mday); + regs[8] = bin2bcd(t->time.tm_mon + 1); + + /* Clear the alarm 0 interrupt flag. */ + regs[6] &= ~MCP794XX_BIT_ALMX_IF; + /* Set alarm match: second, minute, hour, day, date, month. */ + regs[6] |= MCP794XX_MSK_ALMX_MATCH; + /* Disable interrupt. We will not enable until completely programmed */ + regs[0] &= ~MCP794XX_BIT_ALM0_EN; + + ret = regmap_bulk_write(ds1307->regmap, MCP794XX_REG_CONTROL, regs, + sizeof(regs)); + if (ret) + return ret; + + if (!t->enabled) + return 0; + regs[0] |= MCP794XX_BIT_ALM0_EN; + return regmap_write(ds1307->regmap, MCP794XX_REG_CONTROL, regs[0]); +} + +static int mcp794xx_alarm_irq_enable(struct device *dev, unsigned int enabled) +{ + struct ds1307 *ds1307 = dev_get_drvdata(dev); + + if (!test_bit(HAS_ALARM, &ds1307->flags)) + return -EINVAL; + + return regmap_update_bits(ds1307->regmap, MCP794XX_REG_CONTROL, + MCP794XX_BIT_ALM0_EN, + enabled ? MCP794XX_BIT_ALM0_EN : 0); +} + +static int m41txx_rtc_read_offset(struct device *dev, long *offset) +{ + struct ds1307 *ds1307 = dev_get_drvdata(dev); + unsigned int ctrl_reg; + u8 val; + + regmap_read(ds1307->regmap, M41TXX_REG_CONTROL, &ctrl_reg); + + val = ctrl_reg & M41TXX_M_CALIBRATION; + + /* check if positive */ + if (ctrl_reg & M41TXX_BIT_CALIB_SIGN) + *offset = (val * M41TXX_POS_OFFSET_STEP_PPB); + else + *offset = -(val * M41TXX_NEG_OFFSET_STEP_PPB); + + return 0; +} + +static int m41txx_rtc_set_offset(struct device *dev, long offset) +{ + struct ds1307 *ds1307 = dev_get_drvdata(dev); + unsigned int ctrl_reg; + + if ((offset < M41TXX_MIN_OFFSET) || (offset > M41TXX_MAX_OFFSET)) + return -ERANGE; + + if (offset >= 0) { + ctrl_reg = DIV_ROUND_CLOSEST(offset, + M41TXX_POS_OFFSET_STEP_PPB); + ctrl_reg |= M41TXX_BIT_CALIB_SIGN; + } else { + ctrl_reg = DIV_ROUND_CLOSEST(abs(offset), + M41TXX_NEG_OFFSET_STEP_PPB); + } + + return regmap_update_bits(ds1307->regmap, M41TXX_REG_CONTROL, + M41TXX_M_CALIBRATION | M41TXX_BIT_CALIB_SIGN, + ctrl_reg); +} + +static const struct rtc_class_ops rx8130_rtc_ops = { + .read_time = ds1307_get_time, + .set_time = ds1307_set_time, + .read_alarm = rx8130_read_alarm, + .set_alarm = rx8130_set_alarm, + .alarm_irq_enable = rx8130_alarm_irq_enable, +}; + +static const struct rtc_class_ops mcp794xx_rtc_ops = { + .read_time = ds1307_get_time, + .set_time = ds1307_set_time, + .read_alarm = mcp794xx_read_alarm, + .set_alarm = mcp794xx_set_alarm, + .alarm_irq_enable = mcp794xx_alarm_irq_enable, +}; + +static const struct rtc_class_ops m41txx_rtc_ops = { + .read_time = ds1307_get_time, + .set_time = ds1307_set_time, + .read_alarm = ds1337_read_alarm, + .set_alarm = ds1337_set_alarm, + .alarm_irq_enable = ds1307_alarm_irq_enable, + .read_offset = m41txx_rtc_read_offset, + .set_offset = m41txx_rtc_set_offset, +}; + +static const struct chip_desc chips[last_ds_type] = { + [ds_1307] = { + .nvram_offset = 8, + .nvram_size = 56, + }, + [ds_1308] = { + .nvram_offset = 8, + .nvram_size = 56, + }, + [ds_1337] = { + .alarm = 1, + .century_reg = DS1307_REG_MONTH, + .century_bit = DS1337_BIT_CENTURY, + }, + [ds_1338] = { + .nvram_offset = 8, + .nvram_size = 56, + }, + [ds_1339] = { + .alarm = 1, + .century_reg = DS1307_REG_MONTH, + .century_bit = DS1337_BIT_CENTURY, + .bbsqi_bit = DS1339_BIT_BBSQI, + .trickle_charger_reg = 0x10, + .do_trickle_setup = &do_trickle_setup_ds1339, + }, + [ds_1340] = { + .century_reg = DS1307_REG_HOUR, + .century_enable_bit = DS1340_BIT_CENTURY_EN, + .century_bit = DS1340_BIT_CENTURY, + .do_trickle_setup = &do_trickle_setup_ds1339, + .trickle_charger_reg = 0x08, + }, + [ds_1341] = { + .century_reg = DS1307_REG_MONTH, + .century_bit = DS1337_BIT_CENTURY, + }, + [ds_1388] = { + .offset = 1, + .trickle_charger_reg = 0x0a, + }, + [ds_3231] = { + .alarm = 1, + .century_reg = DS1307_REG_MONTH, + .century_bit = DS1337_BIT_CENTURY, + .bbsqi_bit = DS3231_BIT_BBSQW, + }, + [rx_8130] = { + .alarm = 1, + /* this is battery backed SRAM */ + .nvram_offset = 0x20, + .nvram_size = 4, /* 32bit (4 word x 8 bit) */ + .offset = 0x10, + .irq_handler = rx8130_irq, + .rtc_ops = &rx8130_rtc_ops, + }, + [m41t0] = { + .rtc_ops = &m41txx_rtc_ops, + }, + [m41t00] = { + .rtc_ops = &m41txx_rtc_ops, + }, + [m41t11] = { + /* this is battery backed SRAM */ + .nvram_offset = 8, + .nvram_size = 56, + .rtc_ops = &m41txx_rtc_ops, + }, + [mcp794xx] = { + .alarm = 1, + /* this is battery backed SRAM */ + .nvram_offset = 0x20, + .nvram_size = 0x40, + .irq_handler = mcp794xx_irq, + .rtc_ops = &mcp794xx_rtc_ops, + }, +}; + +static const struct i2c_device_id ds1307_id[] = { + { "ds1307", ds_1307 }, + { "ds1308", ds_1308 }, + { "ds1337", ds_1337 }, + { "ds1338", ds_1338 }, + { "ds1339", ds_1339 }, + { "ds1388", ds_1388 }, + { "ds1340", ds_1340 }, + { "ds1341", ds_1341 }, + { "ds3231", ds_3231 }, + { "m41t0", m41t0 }, + { "m41t00", m41t00 }, + { "m41t11", m41t11 }, + { "mcp7940x", mcp794xx }, + { "mcp7941x", mcp794xx }, + { "pt7c4338", ds_1307 }, + { "rx8025", rx_8025 }, + { "isl12057", ds_1337 }, + { "rx8130", rx_8130 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, ds1307_id); + +#ifdef CONFIG_OF +static const struct of_device_id ds1307_of_match[] = { + { + .compatible = "dallas,ds1307", + .data = (void *)ds_1307 + }, + { + .compatible = "dallas,ds1308", + .data = (void *)ds_1308 + }, + { + .compatible = "dallas,ds1337", + .data = (void *)ds_1337 + }, + { + .compatible = "dallas,ds1338", + .data = (void *)ds_1338 + }, + { + .compatible = "dallas,ds1339", + .data = (void *)ds_1339 + }, + { + .compatible = "dallas,ds1388", + .data = (void *)ds_1388 + }, + { + .compatible = "dallas,ds1340", + .data = (void *)ds_1340 + }, + { + .compatible = "dallas,ds1341", + .data = (void *)ds_1341 + }, + { + .compatible = "maxim,ds3231", + .data = (void *)ds_3231 + }, + { + .compatible = "st,m41t0", + .data = (void *)m41t0 + }, + { + .compatible = "st,m41t00", + .data = (void *)m41t00 + }, + { + .compatible = "st,m41t11", + .data = (void *)m41t11 + }, + { + .compatible = "microchip,mcp7940x", + .data = (void *)mcp794xx + }, + { + .compatible = "microchip,mcp7941x", + .data = (void *)mcp794xx + }, + { + .compatible = "pericom,pt7c4338", + .data = (void *)ds_1307 + }, + { + .compatible = "epson,rx8025", + .data = (void *)rx_8025 + }, + { + .compatible = "isil,isl12057", + .data = (void *)ds_1337 + }, + { + .compatible = "epson,rx8130", + .data = (void *)rx_8130 + }, + { } +}; +MODULE_DEVICE_TABLE(of, ds1307_of_match); +#endif + +#ifdef CONFIG_ACPI +static const struct acpi_device_id ds1307_acpi_ids[] = { + { .id = "DS1307", .driver_data = ds_1307 }, + { .id = "DS1308", .driver_data = ds_1308 }, + { .id = "DS1337", .driver_data = ds_1337 }, + { .id = "DS1338", .driver_data = ds_1338 }, + { .id = "DS1339", .driver_data = ds_1339 }, + { .id = "DS1388", .driver_data = ds_1388 }, + { .id = "DS1340", .driver_data = ds_1340 }, + { .id = "DS1341", .driver_data = ds_1341 }, + { .id = "DS3231", .driver_data = ds_3231 }, + { .id = "M41T0", .driver_data = m41t0 }, + { .id = "M41T00", .driver_data = m41t00 }, + { .id = "M41T11", .driver_data = m41t11 }, + { .id = "MCP7940X", .driver_data = mcp794xx }, + { .id = "MCP7941X", .driver_data = mcp794xx }, + { .id = "PT7C4338", .driver_data = ds_1307 }, + { .id = "RX8025", .driver_data = rx_8025 }, + { .id = "ISL12057", .driver_data = ds_1337 }, + { .id = "RX8130", .driver_data = rx_8130 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, ds1307_acpi_ids); +#endif /* - * Alarm support for mcp794xx devices. + * The ds1337 and ds1339 both have two alarms, but we only use the first + * one (with a "seconds" field). For ds1337 we expect nINTA is our alarm + * signal; ds1339 chips have only one alarm signal. */ - -#define MCP794XX_REG_CONTROL 0x07 -# define MCP794XX_BIT_ALM0_EN 0x10 -# define MCP794XX_BIT_ALM1_EN 0x20 -#define MCP794XX_REG_ALARM0_BASE 0x0a -#define MCP794XX_REG_ALARM0_CTRL 0x0d -#define MCP794XX_REG_ALARM1_BASE 0x11 -#define MCP794XX_REG_ALARM1_CTRL 0x14 -# define MCP794XX_BIT_ALMX_IF BIT(3) -# define MCP794XX_BIT_ALMX_C0 BIT(4) -# define MCP794XX_BIT_ALMX_C1 BIT(5) -# define MCP794XX_BIT_ALMX_C2 BIT(6) -# define MCP794XX_BIT_ALMX_POL BIT(7) -# define MCP794XX_MSK_ALMX_MATCH (MCP794XX_BIT_ALMX_C0 | \ - MCP794XX_BIT_ALMX_C1 | \ - MCP794XX_BIT_ALMX_C2) - -static irqreturn_t mcp794xx_irq(int irq, void *dev_id) +static irqreturn_t ds1307_irq(int irq, void *dev_id) { - struct ds1307 *ds1307 = dev_id; - struct mutex *lock = &ds1307->rtc->ops_lock; - int reg, ret; + struct ds1307 *ds1307 = dev_id; + struct mutex *lock = &ds1307->rtc->ops_lock; + int stat, ret; mutex_lock(lock); - - /* Check and clear alarm 0 interrupt flag. */ - ret = regmap_read(ds1307->regmap, MCP794XX_REG_ALARM0_CTRL, ®); - if (ret) - goto out; - if (!(reg & MCP794XX_BIT_ALMX_IF)) - goto out; - reg &= ~MCP794XX_BIT_ALMX_IF; - ret = regmap_write(ds1307->regmap, MCP794XX_REG_ALARM0_CTRL, reg); + ret = regmap_read(ds1307->regmap, DS1337_REG_STATUS, &stat); if (ret) goto out; - /* Disable alarm 0. */ - ret = regmap_update_bits(ds1307->regmap, MCP794XX_REG_CONTROL, - MCP794XX_BIT_ALM0_EN, 0); - if (ret) - goto out; + if (stat & DS1337_BIT_A1I) { + stat &= ~DS1337_BIT_A1I; + regmap_write(ds1307->regmap, DS1337_REG_STATUS, stat); - rtc_update_irq(ds1307->rtc, 1, RTC_AF | RTC_IRQF); + ret = regmap_update_bits(ds1307->regmap, DS1337_REG_CONTROL, + DS1337_BIT_A1IE, 0); + if (ret) + goto out; + + rtc_update_irq(ds1307->rtc, 1, RTC_AF | RTC_IRQF); + } out: mutex_unlock(lock); @@ -888,167 +1061,15 @@ out: return IRQ_HANDLED; } -static int mcp794xx_read_alarm(struct device *dev, struct rtc_wkalrm *t) -{ - struct ds1307 *ds1307 = dev_get_drvdata(dev); - u8 regs[10]; - int ret; - - if (!test_bit(HAS_ALARM, &ds1307->flags)) - return -EINVAL; - - /* Read control and alarm 0 registers. */ - ret = regmap_bulk_read(ds1307->regmap, MCP794XX_REG_CONTROL, regs, - sizeof(regs)); - if (ret) - return ret; - - t->enabled = !!(regs[0] & MCP794XX_BIT_ALM0_EN); - - /* Report alarm 0 time assuming 24-hour and day-of-month modes. */ - t->time.tm_sec = bcd2bin(regs[3] & 0x7f); - t->time.tm_min = bcd2bin(regs[4] & 0x7f); - t->time.tm_hour = bcd2bin(regs[5] & 0x3f); - t->time.tm_wday = bcd2bin(regs[6] & 0x7) - 1; - t->time.tm_mday = bcd2bin(regs[7] & 0x3f); - t->time.tm_mon = bcd2bin(regs[8] & 0x1f) - 1; - t->time.tm_year = -1; - t->time.tm_yday = -1; - t->time.tm_isdst = -1; - - dev_dbg(dev, "%s, sec=%d min=%d hour=%d wday=%d mday=%d mon=%d " - "enabled=%d polarity=%d irq=%d match=%lu\n", __func__, - t->time.tm_sec, t->time.tm_min, t->time.tm_hour, - t->time.tm_wday, t->time.tm_mday, t->time.tm_mon, t->enabled, - !!(regs[6] & MCP794XX_BIT_ALMX_POL), - !!(regs[6] & MCP794XX_BIT_ALMX_IF), - (regs[6] & MCP794XX_MSK_ALMX_MATCH) >> 4); - - return 0; -} - -/* - * We may have a random RTC weekday, therefore calculate alarm weekday based - * on current weekday we read from the RTC timekeeping regs - */ -static int mcp794xx_alm_weekday(struct device *dev, struct rtc_time *tm_alarm) -{ - struct rtc_time tm_now; - int days_now, days_alarm, ret; - - ret = ds1307_get_time(dev, &tm_now); - if (ret) - return ret; - - days_now = div_s64(rtc_tm_to_time64(&tm_now), 24 * 60 * 60); - days_alarm = div_s64(rtc_tm_to_time64(tm_alarm), 24 * 60 * 60); - - return (tm_now.tm_wday + days_alarm - days_now) % 7 + 1; -} - -static int mcp794xx_set_alarm(struct device *dev, struct rtc_wkalrm *t) -{ - struct ds1307 *ds1307 = dev_get_drvdata(dev); - unsigned char regs[10]; - int wday, ret; - - if (!test_bit(HAS_ALARM, &ds1307->flags)) - return -EINVAL; - - wday = mcp794xx_alm_weekday(dev, &t->time); - if (wday < 0) - return wday; - - dev_dbg(dev, "%s, sec=%d min=%d hour=%d wday=%d mday=%d mon=%d " - "enabled=%d pending=%d\n", __func__, - t->time.tm_sec, t->time.tm_min, t->time.tm_hour, - t->time.tm_wday, t->time.tm_mday, t->time.tm_mon, - t->enabled, t->pending); - - /* Read control and alarm 0 registers. */ - ret = regmap_bulk_read(ds1307->regmap, MCP794XX_REG_CONTROL, regs, - sizeof(regs)); - if (ret) - return ret; - - /* Set alarm 0, using 24-hour and day-of-month modes. */ - regs[3] = bin2bcd(t->time.tm_sec); - regs[4] = bin2bcd(t->time.tm_min); - regs[5] = bin2bcd(t->time.tm_hour); - regs[6] = wday; - regs[7] = bin2bcd(t->time.tm_mday); - regs[8] = bin2bcd(t->time.tm_mon + 1); - - /* Clear the alarm 0 interrupt flag. */ - regs[6] &= ~MCP794XX_BIT_ALMX_IF; - /* Set alarm match: second, minute, hour, day, date, month. */ - regs[6] |= MCP794XX_MSK_ALMX_MATCH; - /* Disable interrupt. We will not enable until completely programmed */ - regs[0] &= ~MCP794XX_BIT_ALM0_EN; - - ret = regmap_bulk_write(ds1307->regmap, MCP794XX_REG_CONTROL, regs, - sizeof(regs)); - if (ret) - return ret; - - if (!t->enabled) - return 0; - regs[0] |= MCP794XX_BIT_ALM0_EN; - return regmap_write(ds1307->regmap, MCP794XX_REG_CONTROL, regs[0]); -} - -static int mcp794xx_alarm_irq_enable(struct device *dev, unsigned int enabled) -{ - struct ds1307 *ds1307 = dev_get_drvdata(dev); - - if (!test_bit(HAS_ALARM, &ds1307->flags)) - return -EINVAL; - - return regmap_update_bits(ds1307->regmap, MCP794XX_REG_CONTROL, - MCP794XX_BIT_ALM0_EN, - enabled ? MCP794XX_BIT_ALM0_EN : 0); -} - -static int m41txx_rtc_read_offset(struct device *dev, long *offset) -{ - struct ds1307 *ds1307 = dev_get_drvdata(dev); - unsigned int ctrl_reg; - u8 val; - - regmap_read(ds1307->regmap, M41TXX_REG_CONTROL, &ctrl_reg); - - val = ctrl_reg & M41TXX_M_CALIBRATION; - - /* check if positive */ - if (ctrl_reg & M41TXX_BIT_CALIB_SIGN) - *offset = (val * M41TXX_POS_OFFSET_STEP_PPB); - else - *offset = -(val * M41TXX_NEG_OFFSET_STEP_PPB); - - return 0; -} - -static int m41txx_rtc_set_offset(struct device *dev, long offset) -{ - struct ds1307 *ds1307 = dev_get_drvdata(dev); - unsigned int ctrl_reg; - - if ((offset < M41TXX_MIN_OFFSET) || (offset > M41TXX_MAX_OFFSET)) - return -ERANGE; - - if (offset >= 0) { - ctrl_reg = DIV_ROUND_CLOSEST(offset, - M41TXX_POS_OFFSET_STEP_PPB); - ctrl_reg |= M41TXX_BIT_CALIB_SIGN; - } else { - ctrl_reg = DIV_ROUND_CLOSEST(abs(offset), - M41TXX_NEG_OFFSET_STEP_PPB); - } +/*----------------------------------------------------------------------*/ - return regmap_update_bits(ds1307->regmap, M41TXX_REG_CONTROL, - M41TXX_M_CALIBRATION | M41TXX_BIT_CALIB_SIGN, - ctrl_reg); -} +static const struct rtc_class_ops ds13xx_rtc_ops = { + .read_time = ds1307_get_time, + .set_time = ds1307_set_time, + .read_alarm = ds1337_read_alarm, + .set_alarm = ds1337_set_alarm, + .alarm_irq_enable = ds1307_alarm_irq_enable, +}; static ssize_t frequency_test_store(struct device *dev, struct device_attribute *attr, @@ -1137,30 +1158,6 @@ static int ds1307_nvram_write(void *priv, unsigned int offset, void *val, /*----------------------------------------------------------------------*/ -static u8 do_trickle_setup_ds1339(struct ds1307 *ds1307, - u32 ohms, bool diode) -{ - u8 setup = (diode) ? DS1307_TRICKLE_CHARGER_DIODE : - DS1307_TRICKLE_CHARGER_NO_DIODE; - - switch (ohms) { - case 250: - setup |= DS1307_TRICKLE_CHARGER_250_OHM; - break; - case 2000: - setup |= DS1307_TRICKLE_CHARGER_2K_OHM; - break; - case 4000: - setup |= DS1307_TRICKLE_CHARGER_4K_OHM; - break; - default: - dev_warn(ds1307->dev, - "Unsupported ohm value %u in dt\n", ohms); - return 0; - } - return setup; -} - static u8 ds1307_trickle_init(struct ds1307 *ds1307, const struct chip_desc *chip) { diff --git a/drivers/rtc/rtc-ds1672.c b/drivers/rtc/rtc-ds1672.c index 9caaccccaa57..b1ebca099b0d 100644 --- a/drivers/rtc/rtc-ds1672.c +++ b/drivers/rtc/rtc-ds1672.c @@ -58,7 +58,8 @@ static int ds1672_get_datetime(struct i2c_client *client, struct rtc_time *tm) "%s: raw read data - counters=%02x,%02x,%02x,%02x\n", __func__, buf[0], buf[1], buf[2], buf[3]); - time = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0]; + time = ((unsigned long)buf[3] << 24) | (buf[2] << 16) | + (buf[1] << 8) | buf[0]; rtc_time_to_tm(time, tm); diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c index e5ad527cb75e..d03f5d212eea 100644 --- a/drivers/rtc/rtc-hym8563.c +++ b/drivers/rtc/rtc-hym8563.c @@ -109,6 +109,8 @@ static int hym8563_rtc_read_time(struct device *dev, struct rtc_time *tm) } ret = i2c_smbus_read_i2c_block_data(client, HYM8563_SEC, 7, buf); + if (ret < 0) + return ret; tm->tm_sec = bcd2bin(buf[0] & HYM8563_SEC_MASK); tm->tm_min = bcd2bin(buf[1] & HYM8563_MIN_MASK); diff --git a/drivers/rtc/rtc-imx-sc.c b/drivers/rtc/rtc-imx-sc.c index 7ff08544532a..19642bfd913a 100644 --- a/drivers/rtc/rtc-imx-sc.c +++ b/drivers/rtc/rtc-imx-sc.c @@ -3,6 +3,7 @@ * Copyright 2018 NXP. */ +#include #include #include #include @@ -12,6 +13,9 @@ #define IMX_SC_TIMER_FUNC_GET_RTC_SEC1970 9 #define IMX_SC_TIMER_FUNC_SET_RTC_TIME 6 +#define IMX_SIP_SRTC 0xC2000002 +#define IMX_SIP_SRTC_SET_TIME 0x0 + static struct imx_sc_ipc *rtc_ipc_handle; static struct rtc_device *imx_sc_rtc; @@ -37,13 +41,28 @@ static int imx_sc_rtc_read_time(struct device *dev, struct rtc_time *tm) return ret; } - rtc_time_to_tm(msg.time, tm); + rtc_time64_to_tm(msg.time, tm); return 0; } +static int imx_sc_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + struct arm_smccc_res res; + + /* pack 2 time parameters into 1 register, 16 bits for each */ + arm_smccc_smc(IMX_SIP_SRTC, IMX_SIP_SRTC_SET_TIME, + ((tm->tm_year + 1900) << 16) | (tm->tm_mon + 1), + (tm->tm_mday << 16) | tm->tm_hour, + (tm->tm_min << 16) | tm->tm_sec, + 0, 0, 0, &res); + + return res.a0; +} + static const struct rtc_class_ops imx_sc_rtc_ops = { .read_time = imx_sc_rtc_read_time, + .set_time = imx_sc_rtc_set_time, }; static int imx_sc_rtc_probe(struct platform_device *pdev) diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c index 37ab3e1d25f5..471e395b20db 100644 --- a/drivers/rtc/rtc-isl1208.c +++ b/drivers/rtc/rtc-isl1208.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -73,10 +74,50 @@ static struct i2c_driver isl1208_driver; /* ISL1208 various variants */ -enum { +enum isl1208_id { TYPE_ISL1208 = 0, + TYPE_ISL1209, TYPE_ISL1218, TYPE_ISL1219, + ISL_LAST_ID +}; + +/* Chip capabilities table */ +static const struct isl1208_config { + const char name[8]; + unsigned int nvmem_length; + unsigned has_tamper:1; + unsigned has_timestamp:1; +} isl1208_configs[] = { + [TYPE_ISL1208] = { "isl1208", 2, false, false }, + [TYPE_ISL1209] = { "isl1209", 2, true, false }, + [TYPE_ISL1218] = { "isl1218", 8, false, false }, + [TYPE_ISL1219] = { "isl1219", 2, true, true }, +}; + +static const struct i2c_device_id isl1208_id[] = { + { "isl1208", TYPE_ISL1208 }, + { "isl1209", TYPE_ISL1209 }, + { "isl1218", TYPE_ISL1218 }, + { "isl1219", TYPE_ISL1219 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, isl1208_id); + +static const struct of_device_id isl1208_of_match[] = { + { .compatible = "isil,isl1208", .data = &isl1208_configs[TYPE_ISL1208] }, + { .compatible = "isil,isl1209", .data = &isl1208_configs[TYPE_ISL1209] }, + { .compatible = "isil,isl1218", .data = &isl1208_configs[TYPE_ISL1218] }, + { .compatible = "isil,isl1219", .data = &isl1208_configs[TYPE_ISL1219] }, + { } +}; +MODULE_DEVICE_TABLE(of, isl1208_of_match); + +/* Device state */ +struct isl1208_state { + struct nvmem_config nvmem_config; + struct rtc_device *rtc; + const struct isl1208_config *config; }; /* block read */ @@ -161,6 +202,7 @@ isl1208_i2c_get_atr(struct i2c_client *client) return atr; } +/* returns adjustment value + 100 */ static int isl1208_i2c_get_dtr(struct i2c_client *client) { @@ -171,7 +213,7 @@ isl1208_i2c_get_dtr(struct i2c_client *client) /* dtr encodes adjustments of {-60,-40,-20,0,20,40,60} ppm */ dtr = ((dtr & 0x3) * 20) * (dtr & (1 << 2) ? -1 : 1); - return dtr; + return dtr + 100; } static int @@ -248,8 +290,8 @@ isl1208_rtc_proc(struct device *dev, struct seq_file *seq) (sr & ISL1208_REG_SR_RTCF) ? "bad" : "okay"); dtr = isl1208_i2c_get_dtr(client); - if (dtr >= 0 - 1) - seq_printf(seq, "digital_trim\t: %d ppm\n", dtr); + if (dtr >= 0) + seq_printf(seq, "digital_trim\t: %d ppm\n", dtr - 100); atr = isl1208_i2c_get_atr(client); if (atr >= 0) @@ -556,7 +598,7 @@ isl1208_rtc_interrupt(int irq, void *data) { unsigned long timeout = jiffies + msecs_to_jiffies(1000); struct i2c_client *client = data; - struct rtc_device *rtc = i2c_get_clientdata(client); + struct isl1208_state *isl1208 = i2c_get_clientdata(client); int handled = 0, sr, err; /* @@ -579,7 +621,7 @@ isl1208_rtc_interrupt(int irq, void *data) if (sr & ISL1208_REG_SR_ALM) { dev_dbg(&client->dev, "alarm!\n"); - rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF); + rtc_update_irq(isl1208->rtc, 1, RTC_IRQF | RTC_AF); /* Clear the alarm */ sr &= ~ISL1208_REG_SR_ALM; @@ -596,11 +638,12 @@ isl1208_rtc_interrupt(int irq, void *data) return err; } - if (sr & ISL1208_REG_SR_EVT) { - sysfs_notify(&rtc->dev.kobj, NULL, - dev_attr_timestamp0.attr.name); + if (isl1208->config->has_tamper && (sr & ISL1208_REG_SR_EVT)) { dev_warn(&client->dev, "event detected"); handled = 1; + if (isl1208->config->has_timestamp) + sysfs_notify(&isl1208->rtc->dev.kobj, NULL, + dev_attr_timestamp0.attr.name); } return handled ? IRQ_HANDLED : IRQ_NONE; @@ -637,7 +680,7 @@ isl1208_sysfs_show_dtrim(struct device *dev, if (dtr < 0) return dtr; - return sprintf(buf, "%d ppm\n", dtr); + return sprintf(buf, "%d ppm\n", dtr - 100); } static DEVICE_ATTR(dtrim, S_IRUGO, isl1208_sysfs_show_dtrim, NULL); @@ -700,6 +743,46 @@ static const struct attribute_group isl1219_rtc_sysfs_files = { .attrs = isl1219_rtc_attrs, }; +static int isl1208_nvmem_read(void *priv, unsigned int off, void *buf, + size_t count) +{ + struct isl1208_state *isl1208 = priv; + struct i2c_client *client = to_i2c_client(isl1208->rtc->dev.parent); + int ret; + + /* nvmem sanitizes offset/count for us, but count==0 is possible */ + if (!count) + return count; + ret = isl1208_i2c_read_regs(client, ISL1208_REG_USR1 + off, buf, + count); + return ret == 0 ? count : ret; +} + +static int isl1208_nvmem_write(void *priv, unsigned int off, void *buf, + size_t count) +{ + struct isl1208_state *isl1208 = priv; + struct i2c_client *client = to_i2c_client(isl1208->rtc->dev.parent); + int ret; + + /* nvmem sanitizes off/count for us, but count==0 is possible */ + if (!count) + return count; + ret = isl1208_i2c_set_regs(client, ISL1208_REG_USR1 + off, buf, + count); + + return ret == 0 ? count : ret; +} + +static const struct nvmem_config isl1208_nvmem_config = { + .name = "isl1208_nvram", + .word_size = 1, + .stride = 1, + /* .size from chip specific config */ + .reg_read = isl1208_nvmem_read, + .reg_write = isl1208_nvmem_write, +}; + static int isl1208_setup_irq(struct i2c_client *client, int irq) { int rc = devm_request_threaded_irq(&client->dev, irq, NULL, @@ -722,7 +805,7 @@ static int isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id) { int rc = 0; - struct rtc_device *rtc; + struct isl1208_state *isl1208; int evdet_irq = -1; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) @@ -731,13 +814,33 @@ isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id) if (isl1208_i2c_validate_client(client) < 0) return -ENODEV; - rtc = devm_rtc_allocate_device(&client->dev); - if (IS_ERR(rtc)) - return PTR_ERR(rtc); + /* Allocate driver state, point i2c client data to it */ + isl1208 = devm_kzalloc(&client->dev, sizeof(*isl1208), GFP_KERNEL); + if (!isl1208) + return -ENOMEM; + i2c_set_clientdata(client, isl1208); + + /* Determine which chip we have */ + if (client->dev.of_node) { + isl1208->config = of_device_get_match_data(&client->dev); + if (!isl1208->config) + return -ENODEV; + } else { + if (id->driver_data >= ISL_LAST_ID) + return -ENODEV; + isl1208->config = &isl1208_configs[id->driver_data]; + } + + isl1208->rtc = devm_rtc_allocate_device(&client->dev); + if (IS_ERR(isl1208->rtc)) + return PTR_ERR(isl1208->rtc); - rtc->ops = &isl1208_rtc_ops; + isl1208->rtc->ops = &isl1208_rtc_ops; - i2c_set_clientdata(client, rtc); + /* Setup nvmem configuration in driver state struct */ + isl1208->nvmem_config = isl1208_nvmem_config; + isl1208->nvmem_config.size = isl1208->config->nvmem_length; + isl1208->nvmem_config.priv = isl1208; rc = isl1208_i2c_get_sr(client); if (rc < 0) { @@ -749,7 +852,7 @@ isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id) dev_warn(&client->dev, "rtc power failure detected, " "please set clock.\n"); - if (id->driver_data == TYPE_ISL1219) { + if (isl1208->config->has_tamper) { struct device_node *np = client->dev.of_node; u32 evienb; @@ -770,13 +873,15 @@ isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id) dev_err(&client->dev, "could not enable tamper detection\n"); return rc; } - rc = rtc_add_group(rtc, &isl1219_rtc_sysfs_files); + evdet_irq = of_irq_get_byname(np, "evdet"); + } + if (isl1208->config->has_timestamp) { + rc = rtc_add_group(isl1208->rtc, &isl1219_rtc_sysfs_files); if (rc) return rc; - evdet_irq = of_irq_get_byname(np, "evdet"); } - rc = rtc_add_group(rtc, &isl1208_rtc_sysfs_files); + rc = rtc_add_group(isl1208->rtc, &isl1208_rtc_sysfs_files); if (rc) return rc; @@ -790,24 +895,12 @@ isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id) if (rc) return rc; - return rtc_register_device(rtc); -} - -static const struct i2c_device_id isl1208_id[] = { - { "isl1208", TYPE_ISL1208 }, - { "isl1218", TYPE_ISL1218 }, - { "isl1219", TYPE_ISL1219 }, - { } -}; -MODULE_DEVICE_TABLE(i2c, isl1208_id); + rc = rtc_nvmem_register(isl1208->rtc, &isl1208->nvmem_config); + if (rc) + return rc; -static const struct of_device_id isl1208_of_match[] = { - { .compatible = "isil,isl1208" }, - { .compatible = "isil,isl1218" }, - { .compatible = "isil,isl1219" }, - { } -}; -MODULE_DEVICE_TABLE(of, isl1208_of_match); + return rtc_register_device(isl1208->rtc); +} static struct i2c_driver isl1208_driver = { .driver = { diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c index 2f1772a358ca..18a6f15e313d 100644 --- a/drivers/rtc/rtc-mc146818-lib.c +++ b/drivers/rtc/rtc-mc146818-lib.c @@ -82,7 +82,7 @@ unsigned int mc146818_get_time(struct rtc_time *time) time->tm_year += real_year - 72; #endif - if (century) + if (century > 20) time->tm_year += (century - 19) * 100; /* diff --git a/drivers/rtc/rtc-meson.c b/drivers/rtc/rtc-meson.c new file mode 100644 index 000000000000..e08b981dfc21 --- /dev/null +++ b/drivers/rtc/rtc-meson.c @@ -0,0 +1,407 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * RTC driver for the interal RTC block in the Amlogic Meson6, Meson8, + * Meson8b and Meson8m2 SoCs. + * + * The RTC is split in to two parts, the AHB front end and a simple serial + * connection to the actual registers. This driver manages both parts. + * + * Copyright (c) 2018 Martin Blumenstingl + * Copyright (c) 2015 Ben Dooks for Codethink Ltd + * Based on origin by Carlo Caione + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* registers accessed from cpu bus */ +#define RTC_ADDR0 0x00 + #define RTC_ADDR0_LINE_SCLK BIT(0) + #define RTC_ADDR0_LINE_SEN BIT(1) + #define RTC_ADDR0_LINE_SDI BIT(2) + #define RTC_ADDR0_START_SER BIT(17) + #define RTC_ADDR0_WAIT_SER BIT(22) + #define RTC_ADDR0_DATA GENMASK(31, 24) + +#define RTC_ADDR1 0x04 + #define RTC_ADDR1_SDO BIT(0) + #define RTC_ADDR1_S_READY BIT(1) + +#define RTC_ADDR2 0x08 +#define RTC_ADDR3 0x0c + +#define RTC_REG4 0x10 + #define RTC_REG4_STATIC_VALUE GENMASK(7, 0) + +/* rtc registers accessed via rtc-serial interface */ +#define RTC_COUNTER (0) +#define RTC_SEC_ADJ (2) +#define RTC_REGMEM_0 (4) +#define RTC_REGMEM_1 (5) +#define RTC_REGMEM_2 (6) +#define RTC_REGMEM_3 (7) + +#define RTC_ADDR_BITS (3) /* number of address bits to send */ +#define RTC_DATA_BITS (32) /* number of data bits to tx/rx */ + +#define MESON_STATIC_BIAS_CUR (0x5 << 1) +#define MESON_STATIC_VOLTAGE (0x3 << 11) +#define MESON_STATIC_DEFAULT (MESON_STATIC_BIAS_CUR | MESON_STATIC_VOLTAGE) + +struct meson_rtc { + struct rtc_device *rtc; /* rtc device we created */ + struct device *dev; /* device we bound from */ + struct reset_control *reset; /* reset source */ + struct regulator *vdd; /* voltage input */ + struct regmap *peripheral; /* peripheral registers */ + struct regmap *serial; /* serial registers */ +}; + +static const struct regmap_config meson_rtc_peripheral_regmap_config = { + .name = "peripheral-registers", + .reg_bits = 8, + .val_bits = 32, + .reg_stride = 4, + .max_register = RTC_REG4, + .fast_io = true, +}; + +/* RTC front-end serialiser controls */ + +static void meson_rtc_sclk_pulse(struct meson_rtc *rtc) +{ + udelay(5); + regmap_update_bits(rtc->peripheral, RTC_ADDR0, RTC_ADDR0_LINE_SCLK, 0); + udelay(5); + regmap_update_bits(rtc->peripheral, RTC_ADDR0, RTC_ADDR0_LINE_SCLK, + RTC_ADDR0_LINE_SCLK); +} + +static void meson_rtc_send_bit(struct meson_rtc *rtc, unsigned int bit) +{ + regmap_update_bits(rtc->peripheral, RTC_ADDR0, RTC_ADDR0_LINE_SDI, + bit ? RTC_ADDR0_LINE_SDI : 0); + meson_rtc_sclk_pulse(rtc); +} + +static void meson_rtc_send_bits(struct meson_rtc *rtc, u32 data, + unsigned int nr) +{ + u32 bit = 1 << (nr - 1); + + while (bit) { + meson_rtc_send_bit(rtc, data & bit); + bit >>= 1; + } +} + +static void meson_rtc_set_dir(struct meson_rtc *rtc, u32 mode) +{ + regmap_update_bits(rtc->peripheral, RTC_ADDR0, RTC_ADDR0_LINE_SEN, 0); + regmap_update_bits(rtc->peripheral, RTC_ADDR0, RTC_ADDR0_LINE_SDI, 0); + meson_rtc_send_bit(rtc, mode); + regmap_update_bits(rtc->peripheral, RTC_ADDR0, RTC_ADDR0_LINE_SDI, 0); +} + +static u32 meson_rtc_get_data(struct meson_rtc *rtc) +{ + u32 tmp, val = 0; + int bit; + + for (bit = 0; bit < RTC_DATA_BITS; bit++) { + meson_rtc_sclk_pulse(rtc); + val <<= 1; + + regmap_read(rtc->peripheral, RTC_ADDR1, &tmp); + val |= tmp & RTC_ADDR1_SDO; + } + + return val; +} + +static int meson_rtc_get_bus(struct meson_rtc *rtc) +{ + int ret, retries = 3; + u32 val; + + /* prepare bus for transfers, set all lines low */ + val = RTC_ADDR0_LINE_SDI | RTC_ADDR0_LINE_SEN | RTC_ADDR0_LINE_SCLK; + regmap_update_bits(rtc->peripheral, RTC_ADDR0, val, 0); + + for (retries = 0; retries < 3; retries++) { + /* wait for the bus to be ready */ + if (!regmap_read_poll_timeout(rtc->peripheral, RTC_ADDR1, val, + val & RTC_ADDR1_S_READY, 10, + 10000)) + return 0; + + dev_warn(rtc->dev, "failed to get bus, resetting RTC\n"); + + ret = reset_control_reset(rtc->reset); + if (ret) + return ret; + } + + dev_err(rtc->dev, "bus is not ready\n"); + return -ETIMEDOUT; +} + +static int meson_rtc_serial_bus_reg_read(void *context, unsigned int reg, + unsigned int *data) +{ + struct meson_rtc *rtc = context; + int ret; + + ret = meson_rtc_get_bus(rtc); + if (ret) + return ret; + + regmap_update_bits(rtc->peripheral, RTC_ADDR0, RTC_ADDR0_LINE_SEN, + RTC_ADDR0_LINE_SEN); + meson_rtc_send_bits(rtc, reg, RTC_ADDR_BITS); + meson_rtc_set_dir(rtc, 0); + *data = meson_rtc_get_data(rtc); + + return 0; +} + +static int meson_rtc_serial_bus_reg_write(void *context, unsigned int reg, + unsigned int data) +{ + struct meson_rtc *rtc = context; + int ret; + + ret = meson_rtc_get_bus(rtc); + if (ret) + return ret; + + regmap_update_bits(rtc->peripheral, RTC_ADDR0, RTC_ADDR0_LINE_SEN, + RTC_ADDR0_LINE_SEN); + meson_rtc_send_bits(rtc, data, RTC_DATA_BITS); + meson_rtc_send_bits(rtc, reg, RTC_ADDR_BITS); + meson_rtc_set_dir(rtc, 1); + + return 0; +} + +static const struct regmap_bus meson_rtc_serial_bus = { + .reg_read = meson_rtc_serial_bus_reg_read, + .reg_write = meson_rtc_serial_bus_reg_write, +}; + +static const struct regmap_config meson_rtc_serial_regmap_config = { + .name = "serial-registers", + .reg_bits = 4, + .reg_stride = 1, + .val_bits = 32, + .max_register = RTC_REGMEM_3, + .fast_io = false, +}; + +static int meson_rtc_write_static(struct meson_rtc *rtc, u32 data) +{ + u32 tmp; + + regmap_write(rtc->peripheral, RTC_REG4, + FIELD_PREP(RTC_REG4_STATIC_VALUE, (data >> 8))); + + /* write the static value and start the auto serializer */ + tmp = FIELD_PREP(RTC_ADDR0_DATA, (data & 0xff)) | RTC_ADDR0_START_SER; + regmap_update_bits(rtc->peripheral, RTC_ADDR0, + RTC_ADDR0_DATA | RTC_ADDR0_START_SER, tmp); + + /* wait for the auto serializer to complete */ + return regmap_read_poll_timeout(rtc->peripheral, RTC_REG4, tmp, + !(tmp & RTC_ADDR0_WAIT_SER), 10, + 10000); +} + +/* RTC interface layer functions */ + +static int meson_rtc_gettime(struct device *dev, struct rtc_time *tm) +{ + struct meson_rtc *rtc = dev_get_drvdata(dev); + u32 time; + int ret; + + ret = regmap_read(rtc->serial, RTC_COUNTER, &time); + if (!ret) + rtc_time64_to_tm(time, tm); + + return ret; +} + +static int meson_rtc_settime(struct device *dev, struct rtc_time *tm) +{ + struct meson_rtc *rtc = dev_get_drvdata(dev); + + return regmap_write(rtc->serial, RTC_COUNTER, rtc_tm_to_time64(tm)); +} + +static const struct rtc_class_ops meson_rtc_ops = { + .read_time = meson_rtc_gettime, + .set_time = meson_rtc_settime, +}; + +/* NVMEM interface layer functions */ + +static int meson_rtc_regmem_read(void *context, unsigned int offset, + void *buf, size_t bytes) +{ + struct meson_rtc *rtc = context; + unsigned int read_offset, read_size; + + read_offset = RTC_REGMEM_0 + (offset / 4); + read_size = bytes / 4; + + return regmap_bulk_read(rtc->serial, read_offset, buf, read_size); +} + +static int meson_rtc_regmem_write(void *context, unsigned int offset, + void *buf, size_t bytes) +{ + struct meson_rtc *rtc = context; + unsigned int write_offset, write_size; + + write_offset = RTC_REGMEM_0 + (offset / 4); + write_size = bytes / 4; + + return regmap_bulk_write(rtc->serial, write_offset, buf, write_size); +} + +static int meson_rtc_probe(struct platform_device *pdev) +{ + struct nvmem_config meson_rtc_nvmem_config = { + .name = "meson-rtc-regmem", + .type = NVMEM_TYPE_BATTERY_BACKED, + .word_size = 4, + .stride = 4, + .size = 4 * 4, + .reg_read = meson_rtc_regmem_read, + .reg_write = meson_rtc_regmem_write, + }; + struct device *dev = &pdev->dev; + struct meson_rtc *rtc; + struct resource *res; + void __iomem *base; + int ret; + u32 tm; + + rtc = devm_kzalloc(dev, sizeof(struct meson_rtc), GFP_KERNEL); + if (!rtc) + return -ENOMEM; + + rtc->rtc = devm_rtc_allocate_device(dev); + if (IS_ERR(rtc->rtc)) + return PTR_ERR(rtc->rtc); + + platform_set_drvdata(pdev, rtc); + + rtc->dev = dev; + + rtc->rtc->ops = &meson_rtc_ops; + rtc->rtc->range_max = U32_MAX; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + rtc->peripheral = devm_regmap_init_mmio(dev, base, + &meson_rtc_peripheral_regmap_config); + if (IS_ERR(rtc->peripheral)) { + dev_err(dev, "failed to create peripheral regmap\n"); + return PTR_ERR(rtc->peripheral); + } + + rtc->reset = devm_reset_control_get(dev, NULL); + if (IS_ERR(rtc->reset)) { + dev_err(dev, "missing reset line\n"); + return PTR_ERR(rtc->reset); + } + + rtc->vdd = devm_regulator_get(dev, "vdd"); + if (IS_ERR(rtc->vdd)) { + dev_err(dev, "failed to get the vdd-supply\n"); + return PTR_ERR(rtc->vdd); + } + + ret = regulator_enable(rtc->vdd); + if (ret) { + dev_err(dev, "failed to enable vdd-supply\n"); + return ret; + } + + ret = meson_rtc_write_static(rtc, MESON_STATIC_DEFAULT); + if (ret) { + dev_err(dev, "failed to set static values\n"); + goto out_disable_vdd; + } + + rtc->serial = devm_regmap_init(dev, &meson_rtc_serial_bus, rtc, + &meson_rtc_serial_regmap_config); + if (IS_ERR(rtc->serial)) { + dev_err(dev, "failed to create serial regmap\n"); + ret = PTR_ERR(rtc->serial); + goto out_disable_vdd; + } + + /* + * check if we can read RTC counter, if not then the RTC is probably + * not functional. If it isn't probably best to not bind. + */ + ret = regmap_read(rtc->serial, RTC_COUNTER, &tm); + if (ret) { + dev_err(dev, "cannot read RTC counter, RTC not functional\n"); + goto out_disable_vdd; + } + + meson_rtc_nvmem_config.priv = rtc; + ret = rtc_nvmem_register(rtc->rtc, &meson_rtc_nvmem_config); + if (ret) + goto out_disable_vdd; + + ret = rtc_register_device(rtc->rtc); + if (ret) + goto out_disable_vdd; + + return 0; + +out_disable_vdd: + regulator_disable(rtc->vdd); + return ret; +} + +static const struct of_device_id meson_rtc_dt_match[] = { + { .compatible = "amlogic,meson6-rtc", }, + { .compatible = "amlogic,meson8-rtc", }, + { .compatible = "amlogic,meson8b-rtc", }, + { .compatible = "amlogic,meson8m2-rtc", }, + { }, +}; +MODULE_DEVICE_TABLE(of, meson_rtc_dt_match); + +static struct platform_driver meson_rtc_driver = { + .probe = meson_rtc_probe, + .driver = { + .name = "meson-rtc", + .of_match_table = of_match_ptr(meson_rtc_dt_match), + }, +}; +module_platform_driver(meson_rtc_driver); + +MODULE_DESCRIPTION("Amlogic Meson RTC Driver"); +MODULE_AUTHOR("Ben Dooks "); +MODULE_AUTHOR("Martin Blumenstingl "); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:meson-rtc"); diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c index 283c2335b01b..f6ce63c443a0 100644 --- a/drivers/rtc/rtc-pcf85063.c +++ b/drivers/rtc/rtc-pcf85063.c @@ -27,17 +27,11 @@ */ #define PCF85063_REG_CTRL1 0x00 /* status */ +#define PCF85063_REG_CTRL1_CAP_SEL BIT(0) #define PCF85063_REG_CTRL1_STOP BIT(5) -#define PCF85063_REG_CTRL2 0x01 #define PCF85063_REG_SC 0x04 /* datetime */ #define PCF85063_REG_SC_OS 0x80 -#define PCF85063_REG_MN 0x05 -#define PCF85063_REG_HR 0x06 -#define PCF85063_REG_DM 0x07 -#define PCF85063_REG_DW 0x08 -#define PCF85063_REG_MO 0x09 -#define PCF85063_REG_YR 0x0A static struct i2c_driver pcf85063_driver; @@ -180,6 +174,39 @@ static const struct rtc_class_ops pcf85063_rtc_ops = { .set_time = pcf85063_rtc_set_time }; +static int pcf85063_load_capacitance(struct i2c_client *client) +{ + u32 load; + int rc; + u8 reg; + + rc = i2c_smbus_read_byte_data(client, PCF85063_REG_CTRL1); + if (rc < 0) + return rc; + + reg = rc; + load = 7000; + of_property_read_u32(client->dev.of_node, "quartz-load-femtofarads", + &load); + + switch (load) { + default: + dev_warn(&client->dev, "Unknown quartz-load-femtofarads value: %d. Assuming 7000", + load); + /* fall through */ + case 7000: + reg &= ~PCF85063_REG_CTRL1_CAP_SEL; + break; + case 12500: + reg |= PCF85063_REG_CTRL1_CAP_SEL; + break; + } + + rc = i2c_smbus_write_byte_data(client, PCF85063_REG_CTRL1, reg); + + return rc; +} + static int pcf85063_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -197,6 +224,11 @@ static int pcf85063_probe(struct i2c_client *client, return err; } + err = pcf85063_load_capacitance(client); + if (err < 0) + dev_warn(&client->dev, "failed to set xtal load capacitance: %d", + err); + rtc = devm_rtc_device_register(&client->dev, pcf85063_driver.driver.name, &pcf85063_rtc_ops, THIS_MODULE); diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c index 3fcd2cbafc84..b5c61a70b5df 100644 --- a/drivers/rtc/rtc-pcf8523.c +++ b/drivers/rtc/rtc-pcf8523.c @@ -97,8 +97,9 @@ static int pcf8523_voltage_low(struct i2c_client *client) return !!(value & REG_CONTROL3_BLF); } -static int pcf8523_select_capacitance(struct i2c_client *client, bool high) +static int pcf8523_load_capacitance(struct i2c_client *client) { + u32 load; u8 value; int err; @@ -106,14 +107,24 @@ static int pcf8523_select_capacitance(struct i2c_client *client, bool high) if (err < 0) return err; - if (!high) - value &= ~REG_CONTROL1_CAP_SEL; - else + load = 12500; + of_property_read_u32(client->dev.of_node, "quartz-load-femtofarads", + &load); + + switch (load) { + default: + dev_warn(&client->dev, "Unknown quartz-load-femtofarads value: %d. Assuming 12500", + load); + /* fall through */ + case 12500: value |= REG_CONTROL1_CAP_SEL; + break; + case 7000: + value &= ~REG_CONTROL1_CAP_SEL; + break; + } err = pcf8523_write(client, REG_CONTROL1, value); - if (err < 0) - return err; return err; } @@ -347,9 +358,10 @@ static int pcf8523_probe(struct i2c_client *client, if (!pcf) return -ENOMEM; - err = pcf8523_select_capacitance(client, true); + err = pcf8523_load_capacitance(client); if (err < 0) - return err; + dev_warn(&client->dev, "failed to set xtal load capacitance: %d", + err); err = pcf8523_set_pm(client, 0); if (err < 0) @@ -374,6 +386,7 @@ MODULE_DEVICE_TABLE(i2c, pcf8523_id); #ifdef CONFIG_OF static const struct of_device_id pcf8523_of_match[] = { { .compatible = "nxp,pcf8523" }, + { .compatible = "microcrystal,rv8523" }, { } }; MODULE_DEVICE_TABLE(of, pcf8523_of_match); diff --git a/drivers/rtc/rtc-pic32.c b/drivers/rtc/rtc-pic32.c index d7ef0a6f8931..1c4de6e90da0 100644 --- a/drivers/rtc/rtc-pic32.c +++ b/drivers/rtc/rtc-pic32.c @@ -1,18 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * PIC32 RTC driver * * Joshua Henderson * Copyright (C) 2016 Microchip Technology Inc. All rights reserved. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. */ #include #include @@ -180,22 +172,16 @@ static int pic32_rtc_settime(struct device *dev, struct rtc_time *tm) { struct pic32_rtc_dev *pdata = dev_get_drvdata(dev); void __iomem *base = pdata->reg_base; - int year = tm->tm_year - 100; dev_dbg(dev, "set time %ptR\n", tm); - if (year < 0 || year >= 100) { - dev_err(dev, "rtc only supports 100 years\n"); - return -EINVAL; - } - clk_enable(pdata->clk); writeb(bin2bcd(tm->tm_sec), base + PIC32_RTCSEC); writeb(bin2bcd(tm->tm_min), base + PIC32_RTCMIN); writeb(bin2bcd(tm->tm_hour), base + PIC32_RTCHOUR); writeb(bin2bcd(tm->tm_mday), base + PIC32_RTCDAY); writeb(bin2bcd(tm->tm_mon + 1), base + PIC32_RTCMON); - writeb(bin2bcd(year), base + PIC32_RTCYEAR); + writeb(bin2bcd(tm->tm_year - 100), base + PIC32_RTCYEAR); clk_disable(pdata->clk); return 0; @@ -348,13 +334,17 @@ static int pic32_rtc_probe(struct platform_device *pdev) device_init_wakeup(&pdev->dev, 1); - pdata->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, - &pic32_rtcops, - THIS_MODULE); - if (IS_ERR(pdata->rtc)) { - ret = PTR_ERR(pdata->rtc); + pdata->rtc = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(pdata->rtc)) + return PTR_ERR(pdata->rtc); + + pdata->rtc->ops = &pic32_rtcops; + pdata->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000; + pdata->rtc->range_max = RTC_TIMESTAMP_END_2099; + + ret = rtc_register_device(pdata->rtc); + if (ret) goto err_nortc; - } pdata->rtc->max_user_freq = 128; diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c index 1074e3dbfc1d..cda020700744 100644 --- a/drivers/rtc/rtc-pm8xxx.c +++ b/drivers/rtc/rtc-pm8xxx.c @@ -213,7 +213,8 @@ static int pm8xxx_rtc_read_time(struct device *dev, struct rtc_time *tm) } } - secs = value[0] | (value[1] << 8) | (value[2] << 16) | (value[3] << 24); + secs = value[0] | (value[1] << 8) | (value[2] << 16) | + ((unsigned long)value[3] << 24); rtc_time_to_tm(secs, tm); @@ -284,7 +285,8 @@ static int pm8xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) return rc; } - secs = value[0] | (value[1] << 8) | (value[2] << 16) | (value[3] << 24); + secs = value[0] | (value[1] << 8) | (value[2] << 16) | + ((unsigned long)value[3] << 24); rtc_time_to_tm(secs, &alarm->time); diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c index c5038329058c..66a473a3c3fe 100644 --- a/drivers/rtc/rtc-rs5c372.c +++ b/drivers/rtc/rtc-rs5c372.c @@ -52,8 +52,10 @@ # define RS5C_CTRL1_CT4 (4 << 0) /* 1 Hz level irq */ #define RS5C_REG_CTRL2 15 # define RS5C372_CTRL2_24 (1 << 5) -# define R2025_CTRL2_XST (1 << 5) -# define RS5C_CTRL2_XSTP (1 << 4) /* only if !R2025S/D */ +# define RS5C_CTRL2_XSTP (1 << 4) /* only if !R2x2x */ +# define R2x2x_CTRL2_VDET (1 << 6) /* only if R2x2x */ +# define R2x2x_CTRL2_XSTP (1 << 5) /* only if R2x2x */ +# define R2x2x_CTRL2_PON (1 << 4) /* only if R2x2x */ # define RS5C_CTRL2_CTFG (1 << 2) # define RS5C_CTRL2_AAFG (1 << 1) /* or WAFG */ # define RS5C_CTRL2_BAFG (1 << 0) /* or DAFG */ @@ -212,10 +214,27 @@ static int rs5c372_rtc_read_time(struct device *dev, struct rtc_time *tm) struct i2c_client *client = to_i2c_client(dev); struct rs5c372 *rs5c = i2c_get_clientdata(client); int status = rs5c_get_regs(rs5c); + unsigned char ctrl2 = rs5c->regs[RS5C_REG_CTRL2]; if (status < 0) return status; + switch (rs5c->type) { + case rtc_r2025sd: + case rtc_r2221tl: + if ((rs5c->type == rtc_r2025sd && !(ctrl2 & R2x2x_CTRL2_XSTP)) || + (rs5c->type == rtc_r2221tl && (ctrl2 & R2x2x_CTRL2_XSTP))) { + dev_warn(&client->dev, "rtc oscillator interruption detected. Please reset the rtc clock.\n"); + return -EINVAL; + } + break; + default: + if (ctrl2 & RS5C_CTRL2_XSTP) { + dev_warn(&client->dev, "rtc oscillator interruption detected. Please reset the rtc clock.\n"); + return -EINVAL; + } + } + tm->tm_sec = bcd2bin(rs5c->regs[RS5C372_REG_SECS] & 0x7f); tm->tm_min = bcd2bin(rs5c->regs[RS5C372_REG_MINS] & 0x7f); tm->tm_hour = rs5c_reg2hr(rs5c, rs5c->regs[RS5C372_REG_HOURS]); @@ -243,6 +262,7 @@ static int rs5c372_rtc_set_time(struct device *dev, struct rtc_time *tm) struct i2c_client *client = to_i2c_client(dev); struct rs5c372 *rs5c = i2c_get_clientdata(client); unsigned char buf[7]; + unsigned char ctrl2; int addr; dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d " @@ -261,7 +281,32 @@ static int rs5c372_rtc_set_time(struct device *dev, struct rtc_time *tm) buf[6] = bin2bcd(tm->tm_year - 100); if (i2c_smbus_write_i2c_block_data(client, addr, sizeof(buf), buf) < 0) { - dev_err(&client->dev, "%s: write error\n", __func__); + dev_dbg(&client->dev, "%s: write error in line %i\n", + __func__, __LINE__); + return -EIO; + } + + addr = RS5C_ADDR(RS5C_REG_CTRL2); + ctrl2 = i2c_smbus_read_byte_data(client, addr); + + /* clear rtc warning bits */ + switch (rs5c->type) { + case rtc_r2025sd: + case rtc_r2221tl: + ctrl2 &= ~(R2x2x_CTRL2_VDET | R2x2x_CTRL2_PON); + if (rs5c->type == rtc_r2025sd) + ctrl2 |= R2x2x_CTRL2_XSTP; + else + ctrl2 &= ~R2x2x_CTRL2_XSTP; + break; + default: + ctrl2 &= ~RS5C_CTRL2_XSTP; + break; + } + + if (i2c_smbus_write_byte_data(client, addr, ctrl2) < 0) { + dev_dbg(&client->dev, "%s: write error in line %i\n", + __func__, __LINE__); return -EIO; } @@ -519,20 +564,25 @@ static int rs5c_oscillator_setup(struct rs5c372 *rs5c372) unsigned char buf[2]; int addr, i, ret = 0; - if (rs5c372->type == rtc_r2025sd) { - if (rs5c372->regs[RS5C_REG_CTRL2] & R2025_CTRL2_XST) - return ret; - rs5c372->regs[RS5C_REG_CTRL2] |= R2025_CTRL2_XST; - } else { - if (!(rs5c372->regs[RS5C_REG_CTRL2] & RS5C_CTRL2_XSTP)) - return ret; - rs5c372->regs[RS5C_REG_CTRL2] &= ~RS5C_CTRL2_XSTP; - } - addr = RS5C_ADDR(RS5C_REG_CTRL1); buf[0] = rs5c372->regs[RS5C_REG_CTRL1]; buf[1] = rs5c372->regs[RS5C_REG_CTRL2]; + switch (rs5c372->type) { + case rtc_r2025sd: + if (buf[1] & R2x2x_CTRL2_XSTP) + return ret; + break; + case rtc_r2221tl: + if (!(buf[1] & R2x2x_CTRL2_XSTP)) + return ret; + break; + default: + if (!(buf[1] & RS5C_CTRL2_XSTP)) + return ret; + break; + } + /* use 24hr mode */ switch (rs5c372->type) { case rtc_rs5c372a: diff --git a/drivers/rtc/rtc-rv3028.c b/drivers/rtc/rtc-rv3028.c new file mode 100644 index 000000000000..06884ebb7a61 --- /dev/null +++ b/drivers/rtc/rtc-rv3028.c @@ -0,0 +1,732 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * RTC driver for the Micro Crystal RV3028 + * + * Copyright (C) 2019 Micro Crystal SA + * + * Alexandre Belloni + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define RV3028_SEC 0x00 +#define RV3028_MIN 0x01 +#define RV3028_HOUR 0x02 +#define RV3028_WDAY 0x03 +#define RV3028_DAY 0x04 +#define RV3028_MONTH 0x05 +#define RV3028_YEAR 0x06 +#define RV3028_ALARM_MIN 0x07 +#define RV3028_ALARM_HOUR 0x08 +#define RV3028_ALARM_DAY 0x09 +#define RV3028_STATUS 0x0E +#define RV3028_CTRL1 0x0F +#define RV3028_CTRL2 0x10 +#define RV3028_EVT_CTRL 0x13 +#define RV3028_TS_COUNT 0x14 +#define RV3028_TS_SEC 0x15 +#define RV3028_RAM1 0x1F +#define RV3028_EEPROM_ADDR 0x25 +#define RV3028_EEPROM_DATA 0x26 +#define RV3028_EEPROM_CMD 0x27 +#define RV3028_CLKOUT 0x35 +#define RV3028_OFFSET 0x36 +#define RV3028_BACKUP 0x37 + +#define RV3028_STATUS_PORF BIT(0) +#define RV3028_STATUS_EVF BIT(1) +#define RV3028_STATUS_AF BIT(2) +#define RV3028_STATUS_TF BIT(3) +#define RV3028_STATUS_UF BIT(4) +#define RV3028_STATUS_BSF BIT(5) +#define RV3028_STATUS_CLKF BIT(6) +#define RV3028_STATUS_EEBUSY BIT(7) + +#define RV3028_CTRL1_EERD BIT(3) +#define RV3028_CTRL1_WADA BIT(5) + +#define RV3028_CTRL2_RESET BIT(0) +#define RV3028_CTRL2_12_24 BIT(1) +#define RV3028_CTRL2_EIE BIT(2) +#define RV3028_CTRL2_AIE BIT(3) +#define RV3028_CTRL2_TIE BIT(4) +#define RV3028_CTRL2_UIE BIT(5) +#define RV3028_CTRL2_TSE BIT(7) + +#define RV3028_EVT_CTRL_TSR BIT(2) + +#define RV3028_EEPROM_CMD_WRITE 0x21 +#define RV3028_EEPROM_CMD_READ 0x22 + +#define RV3028_EEBUSY_POLL 10000 +#define RV3028_EEBUSY_TIMEOUT 100000 + +#define RV3028_BACKUP_TCE BIT(5) +#define RV3028_BACKUP_TCR_MASK GENMASK(1,0) + +#define OFFSET_STEP_PPT 953674 + +enum rv3028_type { + rv_3028, +}; + +struct rv3028_data { + struct regmap *regmap; + struct rtc_device *rtc; + enum rv3028_type type; +}; + +static u16 rv3028_trickle_resistors[] = {1000, 3000, 6000, 11000}; + +static ssize_t timestamp0_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct rv3028_data *rv3028 = dev_get_drvdata(dev->parent); + + regmap_update_bits(rv3028->regmap, RV3028_EVT_CTRL, RV3028_EVT_CTRL_TSR, + RV3028_EVT_CTRL_TSR); + + return count; +}; + +static ssize_t timestamp0_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct rv3028_data *rv3028 = dev_get_drvdata(dev->parent); + struct rtc_time tm; + int ret, count; + u8 date[6]; + + ret = regmap_read(rv3028->regmap, RV3028_TS_COUNT, &count); + if (ret) + return ret; + + if (!count) + return 0; + + ret = regmap_bulk_read(rv3028->regmap, RV3028_TS_SEC, date, + sizeof(date)); + if (ret) + return ret; + + tm.tm_sec = bcd2bin(date[0]); + tm.tm_min = bcd2bin(date[1]); + tm.tm_hour = bcd2bin(date[2]); + tm.tm_mday = bcd2bin(date[3]); + tm.tm_mon = bcd2bin(date[4]) - 1; + tm.tm_year = bcd2bin(date[5]) + 100; + + ret = rtc_valid_tm(&tm); + if (ret) + return ret; + + return sprintf(buf, "%llu\n", + (unsigned long long)rtc_tm_to_time64(&tm)); +}; + +static DEVICE_ATTR_RW(timestamp0); + +static ssize_t timestamp0_count_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct rv3028_data *rv3028 = dev_get_drvdata(dev->parent); + int ret, count; + + ret = regmap_read(rv3028->regmap, RV3028_TS_COUNT, &count); + if (ret) + return ret; + + return sprintf(buf, "%u\n", count); +}; + +static DEVICE_ATTR_RO(timestamp0_count); + +static struct attribute *rv3028_attrs[] = { + &dev_attr_timestamp0.attr, + &dev_attr_timestamp0_count.attr, + NULL +}; + +static const struct attribute_group rv3028_attr_group = { + .attrs = rv3028_attrs, +}; + +static irqreturn_t rv3028_handle_irq(int irq, void *dev_id) +{ + struct rv3028_data *rv3028 = dev_id; + unsigned long events = 0; + u32 status = 0, ctrl = 0; + + if (regmap_read(rv3028->regmap, RV3028_STATUS, &status) < 0 || + status == 0) { + return IRQ_NONE; + } + + if (status & RV3028_STATUS_PORF) + dev_warn(&rv3028->rtc->dev, "Voltage low, data loss detected.\n"); + + if (status & RV3028_STATUS_TF) { + status |= RV3028_STATUS_TF; + ctrl |= RV3028_CTRL2_TIE; + events |= RTC_PF; + } + + if (status & RV3028_STATUS_AF) { + status |= RV3028_STATUS_AF; + ctrl |= RV3028_CTRL2_AIE; + events |= RTC_AF; + } + + if (status & RV3028_STATUS_UF) { + status |= RV3028_STATUS_UF; + ctrl |= RV3028_CTRL2_UIE; + events |= RTC_UF; + } + + if (events) { + rtc_update_irq(rv3028->rtc, 1, events); + regmap_update_bits(rv3028->regmap, RV3028_STATUS, status, 0); + regmap_update_bits(rv3028->regmap, RV3028_CTRL2, ctrl, 0); + } + + if (status & RV3028_STATUS_EVF) { + sysfs_notify(&rv3028->rtc->dev.kobj, NULL, + dev_attr_timestamp0.attr.name); + dev_warn(&rv3028->rtc->dev, "event detected"); + } + + return IRQ_HANDLED; +} + +static int rv3028_get_time(struct device *dev, struct rtc_time *tm) +{ + struct rv3028_data *rv3028 = dev_get_drvdata(dev); + u8 date[7]; + int ret, status; + + ret = regmap_read(rv3028->regmap, RV3028_STATUS, &status); + if (ret < 0) + return ret; + + if (status & RV3028_STATUS_PORF) { + dev_warn(dev, "Voltage low, data is invalid.\n"); + return -EINVAL; + } + + ret = regmap_bulk_read(rv3028->regmap, RV3028_SEC, date, sizeof(date)); + if (ret) + return ret; + + tm->tm_sec = bcd2bin(date[RV3028_SEC] & 0x7f); + tm->tm_min = bcd2bin(date[RV3028_MIN] & 0x7f); + tm->tm_hour = bcd2bin(date[RV3028_HOUR] & 0x3f); + tm->tm_wday = ilog2(date[RV3028_WDAY] & 0x7f); + tm->tm_mday = bcd2bin(date[RV3028_DAY] & 0x3f); + tm->tm_mon = bcd2bin(date[RV3028_MONTH] & 0x1f) - 1; + tm->tm_year = bcd2bin(date[RV3028_YEAR]) + 100; + + return 0; +} + +static int rv3028_set_time(struct device *dev, struct rtc_time *tm) +{ + struct rv3028_data *rv3028 = dev_get_drvdata(dev); + u8 date[7]; + int ret; + + date[RV3028_SEC] = bin2bcd(tm->tm_sec); + date[RV3028_MIN] = bin2bcd(tm->tm_min); + date[RV3028_HOUR] = bin2bcd(tm->tm_hour); + date[RV3028_WDAY] = 1 << (tm->tm_wday); + date[RV3028_DAY] = bin2bcd(tm->tm_mday); + date[RV3028_MONTH] = bin2bcd(tm->tm_mon + 1); + date[RV3028_YEAR] = bin2bcd(tm->tm_year - 100); + + /* + * Writing to the Seconds register has the same effect as setting RESET + * bit to 1 + */ + ret = regmap_bulk_write(rv3028->regmap, RV3028_SEC, date, + sizeof(date)); + if (ret) + return ret; + + ret = regmap_update_bits(rv3028->regmap, RV3028_STATUS, + RV3028_STATUS_PORF, 0); + + return ret; +} + +static int rv3028_get_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct rv3028_data *rv3028 = dev_get_drvdata(dev); + u8 alarmvals[3]; + int status, ctrl, ret; + + ret = regmap_bulk_read(rv3028->regmap, RV3028_ALARM_MIN, alarmvals, + sizeof(alarmvals)); + if (ret) + return ret; + + ret = regmap_read(rv3028->regmap, RV3028_STATUS, &status); + if (ret < 0) + return ret; + + ret = regmap_read(rv3028->regmap, RV3028_CTRL2, &ctrl); + if (ret < 0) + return ret; + + alrm->time.tm_sec = 0; + alrm->time.tm_min = bcd2bin(alarmvals[0] & 0x7f); + alrm->time.tm_hour = bcd2bin(alarmvals[1] & 0x3f); + alrm->time.tm_mday = bcd2bin(alarmvals[2] & 0x3f); + + alrm->enabled = !!(ctrl & RV3028_CTRL2_AIE); + alrm->pending = (status & RV3028_STATUS_AF) && alrm->enabled; + + return 0; +} + +static int rv3028_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct rv3028_data *rv3028 = dev_get_drvdata(dev); + u8 alarmvals[3]; + u8 ctrl = 0; + int ret; + + /* The alarm has no seconds, round up to nearest minute */ + if (alrm->time.tm_sec) { + time64_t alarm_time = rtc_tm_to_time64(&alrm->time); + + alarm_time += 60 - alrm->time.tm_sec; + rtc_time64_to_tm(alarm_time, &alrm->time); + } + + ret = regmap_update_bits(rv3028->regmap, RV3028_CTRL2, + RV3028_CTRL2_AIE | RV3028_CTRL2_UIE, 0); + if (ret) + return ret; + + alarmvals[0] = bin2bcd(alrm->time.tm_min); + alarmvals[1] = bin2bcd(alrm->time.tm_hour); + alarmvals[2] = bin2bcd(alrm->time.tm_mday); + + ret = regmap_update_bits(rv3028->regmap, RV3028_STATUS, + RV3028_STATUS_AF, 0); + if (ret) + return ret; + + ret = regmap_bulk_write(rv3028->regmap, RV3028_ALARM_MIN, alarmvals, + sizeof(alarmvals)); + if (ret) + return ret; + + if (alrm->enabled) { + if (rv3028->rtc->uie_rtctimer.enabled) + ctrl |= RV3028_CTRL2_UIE; + if (rv3028->rtc->aie_timer.enabled) + ctrl |= RV3028_CTRL2_AIE; + } + + ret = regmap_update_bits(rv3028->regmap, RV3028_CTRL2, + RV3028_CTRL2_UIE | RV3028_CTRL2_AIE, ctrl); + + return ret; +} + +static int rv3028_alarm_irq_enable(struct device *dev, unsigned int enabled) +{ + struct rv3028_data *rv3028 = dev_get_drvdata(dev); + int ctrl = 0, ret; + + if (enabled) { + if (rv3028->rtc->uie_rtctimer.enabled) + ctrl |= RV3028_CTRL2_UIE; + if (rv3028->rtc->aie_timer.enabled) + ctrl |= RV3028_CTRL2_AIE; + } + + ret = regmap_update_bits(rv3028->regmap, RV3028_STATUS, + RV3028_STATUS_AF | RV3028_STATUS_UF, 0); + if (ret) + return ret; + + ret = regmap_update_bits(rv3028->regmap, RV3028_CTRL2, + RV3028_CTRL2_UIE | RV3028_CTRL2_AIE, ctrl); + if (ret) + return ret; + + return 0; +} + +static int rv3028_read_offset(struct device *dev, long *offset) +{ + struct rv3028_data *rv3028 = dev_get_drvdata(dev); + int ret, value, steps; + + ret = regmap_read(rv3028->regmap, RV3028_OFFSET, &value); + if (ret < 0) + return ret; + + steps = sign_extend32(value << 1, 8); + + ret = regmap_read(rv3028->regmap, RV3028_BACKUP, &value); + if (ret < 0) + return ret; + + steps += value >> 7; + + *offset = DIV_ROUND_CLOSEST(steps * OFFSET_STEP_PPT, 1000); + + return 0; +} + +static int rv3028_set_offset(struct device *dev, long offset) +{ + struct rv3028_data *rv3028 = dev_get_drvdata(dev); + int ret; + + offset = clamp(offset, -244141L, 243187L) * 1000; + offset = DIV_ROUND_CLOSEST(offset, OFFSET_STEP_PPT); + + ret = regmap_write(rv3028->regmap, RV3028_OFFSET, offset >> 1); + if (ret < 0) + return ret; + + return regmap_update_bits(rv3028->regmap, RV3028_BACKUP, BIT(7), + offset << 7); +} + +static int rv3028_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) +{ + struct rv3028_data *rv3028 = dev_get_drvdata(dev); + int status, ret = 0; + + switch (cmd) { + case RTC_VL_READ: + ret = regmap_read(rv3028->regmap, RV3028_STATUS, &status); + if (ret < 0) + return ret; + + if (status & RV3028_STATUS_PORF) + dev_warn(&rv3028->rtc->dev, "Voltage low, data loss detected.\n"); + + status &= RV3028_STATUS_PORF; + + if (copy_to_user((void __user *)arg, &status, sizeof(int))) + return -EFAULT; + + return 0; + + case RTC_VL_CLR: + ret = regmap_update_bits(rv3028->regmap, RV3028_STATUS, + RV3028_STATUS_PORF, 0); + + return ret; + + default: + return -ENOIOCTLCMD; + } +} + +static int rv3028_nvram_write(void *priv, unsigned int offset, void *val, + size_t bytes) +{ + return regmap_bulk_write(priv, RV3028_RAM1 + offset, val, bytes); +} + +static int rv3028_nvram_read(void *priv, unsigned int offset, void *val, + size_t bytes) +{ + return regmap_bulk_read(priv, RV3028_RAM1 + offset, val, bytes); +} + +static int rv3028_eeprom_write(void *priv, unsigned int offset, void *val, + size_t bytes) +{ + u32 status, ctrl1; + int i, ret, err; + u8 *buf = val; + + ret = regmap_read(priv, RV3028_CTRL1, &ctrl1); + if (ret) + return ret; + + if (!(ctrl1 & RV3028_CTRL1_EERD)) { + ret = regmap_update_bits(priv, RV3028_CTRL1, + RV3028_CTRL1_EERD, RV3028_CTRL1_EERD); + if (ret) + return ret; + + ret = regmap_read_poll_timeout(priv, RV3028_STATUS, status, + !(status & RV3028_STATUS_EEBUSY), + RV3028_EEBUSY_POLL, + RV3028_EEBUSY_TIMEOUT); + if (ret) + goto restore_eerd; + } + + for (i = 0; i < bytes; i++) { + ret = regmap_write(priv, RV3028_EEPROM_ADDR, offset + i); + if (ret) + goto restore_eerd; + + ret = regmap_write(priv, RV3028_EEPROM_DATA, buf[i]); + if (ret) + goto restore_eerd; + + ret = regmap_write(priv, RV3028_EEPROM_CMD, 0x0); + if (ret) + goto restore_eerd; + + ret = regmap_write(priv, RV3028_EEPROM_CMD, + RV3028_EEPROM_CMD_WRITE); + if (ret) + goto restore_eerd; + + usleep_range(RV3028_EEBUSY_POLL, RV3028_EEBUSY_TIMEOUT); + + ret = regmap_read_poll_timeout(priv, RV3028_STATUS, status, + !(status & RV3028_STATUS_EEBUSY), + RV3028_EEBUSY_POLL, + RV3028_EEBUSY_TIMEOUT); + if (ret) + goto restore_eerd; + } + +restore_eerd: + if (!(ctrl1 & RV3028_CTRL1_EERD)) + { + err = regmap_update_bits(priv, RV3028_CTRL1, RV3028_CTRL1_EERD, + 0); + if (err && !ret) + ret = err; + } + + return ret; +} + +static int rv3028_eeprom_read(void *priv, unsigned int offset, void *val, + size_t bytes) +{ + u32 status, ctrl1, data; + int i, ret, err; + u8 *buf = val; + + ret = regmap_read(priv, RV3028_CTRL1, &ctrl1); + if (ret) + return ret; + + if (!(ctrl1 & RV3028_CTRL1_EERD)) { + ret = regmap_update_bits(priv, RV3028_CTRL1, + RV3028_CTRL1_EERD, RV3028_CTRL1_EERD); + if (ret) + return ret; + + ret = regmap_read_poll_timeout(priv, RV3028_STATUS, status, + !(status & RV3028_STATUS_EEBUSY), + RV3028_EEBUSY_POLL, + RV3028_EEBUSY_TIMEOUT); + if (ret) + goto restore_eerd; + } + + for (i = 0; i < bytes; i++) { + ret = regmap_write(priv, RV3028_EEPROM_ADDR, offset + i); + if (ret) + goto restore_eerd; + + ret = regmap_write(priv, RV3028_EEPROM_CMD, 0x0); + if (ret) + goto restore_eerd; + + ret = regmap_write(priv, RV3028_EEPROM_CMD, + RV3028_EEPROM_CMD_READ); + if (ret) + goto restore_eerd; + + ret = regmap_read_poll_timeout(priv, RV3028_STATUS, status, + !(status & RV3028_STATUS_EEBUSY), + RV3028_EEBUSY_POLL, + RV3028_EEBUSY_TIMEOUT); + if (ret) + goto restore_eerd; + + ret = regmap_read(priv, RV3028_EEPROM_DATA, &data); + if (ret) + goto restore_eerd; + buf[i] = data; + } + +restore_eerd: + if (!(ctrl1 & RV3028_CTRL1_EERD)) + { + err = regmap_update_bits(priv, RV3028_CTRL1, RV3028_CTRL1_EERD, + 0); + if (err && !ret) + ret = err; + } + + return ret; +} + +static struct rtc_class_ops rv3028_rtc_ops = { + .read_time = rv3028_get_time, + .set_time = rv3028_set_time, + .read_offset = rv3028_read_offset, + .set_offset = rv3028_set_offset, + .ioctl = rv3028_ioctl, +}; + +static const struct regmap_config regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0x37, +}; + +static int rv3028_probe(struct i2c_client *client) +{ + struct rv3028_data *rv3028; + int ret, status; + u32 ohms; + struct nvmem_config nvmem_cfg = { + .name = "rv3028_nvram", + .word_size = 1, + .stride = 1, + .size = 2, + .type = NVMEM_TYPE_BATTERY_BACKED, + .reg_read = rv3028_nvram_read, + .reg_write = rv3028_nvram_write, + }; + struct nvmem_config eeprom_cfg = { + .name = "rv3028_eeprom", + .word_size = 1, + .stride = 1, + .size = 43, + .type = NVMEM_TYPE_EEPROM, + .reg_read = rv3028_eeprom_read, + .reg_write = rv3028_eeprom_write, + }; + + rv3028 = devm_kzalloc(&client->dev, sizeof(struct rv3028_data), + GFP_KERNEL); + if (!rv3028) + return -ENOMEM; + + rv3028->regmap = devm_regmap_init_i2c(client, ®map_config); + + i2c_set_clientdata(client, rv3028); + + ret = regmap_read(rv3028->regmap, RV3028_STATUS, &status); + if (ret < 0) + return ret; + + if (status & RV3028_STATUS_PORF) + dev_warn(&client->dev, "Voltage low, data loss detected.\n"); + + if (status & RV3028_STATUS_AF) + dev_warn(&client->dev, "An alarm may have been missed.\n"); + + rv3028->rtc = devm_rtc_allocate_device(&client->dev); + if (IS_ERR(rv3028->rtc)) { + return PTR_ERR(rv3028->rtc); + } + + if (client->irq > 0) { + ret = devm_request_threaded_irq(&client->dev, client->irq, + NULL, rv3028_handle_irq, + IRQF_TRIGGER_LOW | IRQF_ONESHOT, + "rv3028", rv3028); + if (ret) { + dev_warn(&client->dev, "unable to request IRQ, alarms disabled\n"); + client->irq = 0; + } else { + rv3028_rtc_ops.read_alarm = rv3028_get_alarm; + rv3028_rtc_ops.set_alarm = rv3028_set_alarm; + rv3028_rtc_ops.alarm_irq_enable = rv3028_alarm_irq_enable; + } + } + + ret = regmap_update_bits(rv3028->regmap, RV3028_CTRL1, + RV3028_CTRL1_WADA, RV3028_CTRL1_WADA); + if (ret) + return ret; + + /* setup timestamping */ + ret = regmap_update_bits(rv3028->regmap, RV3028_CTRL2, + RV3028_CTRL2_EIE | RV3028_CTRL2_TSE, + RV3028_CTRL2_EIE | RV3028_CTRL2_TSE); + if (ret) + return ret; + + /* setup trickle charger */ + if (!device_property_read_u32(&client->dev, "trickle-resistor-ohms", + &ohms)) { + int i; + + for (i = 0; i < ARRAY_SIZE(rv3028_trickle_resistors); i++) + if (ohms == rv3028_trickle_resistors[i]) + break; + + if (i < ARRAY_SIZE(rv3028_trickle_resistors)) { + ret = regmap_update_bits(rv3028->regmap, RV3028_BACKUP, + RV3028_BACKUP_TCE | + RV3028_BACKUP_TCR_MASK, + RV3028_BACKUP_TCE | i); + if (ret) + return ret; + } else { + dev_warn(&client->dev, "invalid trickle resistor value\n"); + } + } + + ret = rtc_add_group(rv3028->rtc, &rv3028_attr_group); + if (ret) + return ret; + + rv3028->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000; + rv3028->rtc->range_max = RTC_TIMESTAMP_END_2099; + rv3028->rtc->ops = &rv3028_rtc_ops; + ret = rtc_register_device(rv3028->rtc); + if (ret) + return ret; + + nvmem_cfg.priv = rv3028->regmap; + rtc_nvmem_register(rv3028->rtc, &nvmem_cfg); + eeprom_cfg.priv = rv3028->regmap; + rtc_nvmem_register(rv3028->rtc, &eeprom_cfg); + + rv3028->rtc->max_user_freq = 1; + + return 0; +} + +static const struct of_device_id rv3028_of_match[] = { + { .compatible = "microcrystal,rv3028", }, + { } +}; +MODULE_DEVICE_TABLE(of, rv3028_of_match); + +static struct i2c_driver rv3028_driver = { + .driver = { + .name = "rtc-rv3028", + .of_match_table = of_match_ptr(rv3028_of_match), + }, + .probe_new = rv3028_probe, +}; +module_i2c_driver(rv3028_driver); + +MODULE_AUTHOR("Alexandre Belloni "); +MODULE_DESCRIPTION("Micro Crystal RV3028 RTC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c index 450a0b831a2d..0b102c3cf5a4 100644 --- a/drivers/rtc/rtc-rv8803.c +++ b/drivers/rtc/rtc-rv8803.c @@ -1,13 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 /* * RTC driver for the Micro Crystal RV8803 * * Copyright (C) 2015 Micro Crystal SA - * - * Alexandre Belloni - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. + * Alexandre Belloni * */ @@ -236,9 +232,6 @@ static int rv8803_set_time(struct device *dev, struct rtc_time *tm) u8 date[7]; int ctrl, flags, ret; - if ((tm->tm_year < 100) || (tm->tm_year > 199)) - return -EINVAL; - ctrl = rv8803_read_reg(rv8803->client, RV8803_CTRL); if (ctrl < 0) return ctrl; @@ -602,6 +595,8 @@ static int rv8803_probe(struct i2c_client *client, rv8803->rtc->ops = &rv8803_rtc_ops; rv8803->rtc->nvram_old_abi = true; + rv8803->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000; + rv8803->rtc->range_max = RTC_TIMESTAMP_END_2099; err = rtc_register_device(rv8803->rtc); if (err) return err; @@ -648,6 +643,6 @@ static struct i2c_driver rv8803_driver = { }; module_i2c_driver(rv8803_driver); -MODULE_AUTHOR("Alexandre Belloni "); +MODULE_AUTHOR("Alexandre Belloni "); MODULE_DESCRIPTION("Micro Crystal RV8803 RTC driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/rtc/rtc-rx8581.c b/drivers/rtc/rtc-rx8581.c index eac882169744..776e3a2b89e8 100644 --- a/drivers/rtc/rtc-rx8581.c +++ b/drivers/rtc/rtc-rx8581.c @@ -15,6 +15,8 @@ #include #include #include +#include +#include #include #include #include @@ -51,11 +53,19 @@ #define RX8581_CTRL_STOP 0x02 /* STOP bit */ #define RX8581_CTRL_RESET 0x01 /* RESET bit */ +#define RX8571_USER_RAM 0x10 +#define RX8571_NVRAM_SIZE 0x10 + struct rx8581 { struct regmap *regmap; struct rtc_device *rtc; }; +struct rx85x1_config { + struct regmap_config regmap; + unsigned int num_nvram; +}; + /* * In the routines that deal directly with the rx8581 hardware, we use * rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch. @@ -181,25 +191,103 @@ static const struct rtc_class_ops rx8581_rtc_ops = { .set_time = rx8581_rtc_set_time, }; -static int rx8581_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int rx8571_nvram_read(void *priv, unsigned int offset, void *val, + size_t bytes) { - struct rx8581 *rx8581; - static const struct regmap_config config = { + struct rx8581 *rx8581 = priv; + + return regmap_bulk_read(rx8581->regmap, RX8571_USER_RAM + offset, + val, bytes); +} + +static int rx8571_nvram_write(void *priv, unsigned int offset, void *val, + size_t bytes) +{ + struct rx8581 *rx8581 = priv; + + return regmap_bulk_write(rx8581->regmap, RX8571_USER_RAM + offset, + val, bytes); +} + +static int rx85x1_nvram_read(void *priv, unsigned int offset, void *val, + size_t bytes) +{ + struct rx8581 *rx8581 = priv; + unsigned int tmp_val; + int ret; + + ret = regmap_read(rx8581->regmap, RX8581_REG_RAM, &tmp_val); + (*(unsigned char *)val) = (unsigned char) tmp_val; + + return ret; +} + +static int rx85x1_nvram_write(void *priv, unsigned int offset, void *val, + size_t bytes) +{ + struct rx8581 *rx8581 = priv; + unsigned char tmp_val; + + tmp_val = *((unsigned char *)val); + return regmap_write(rx8581->regmap, RX8581_REG_RAM, + (unsigned int)tmp_val); +} + +static const struct rx85x1_config rx8581_config = { + .regmap = { .reg_bits = 8, .val_bits = 8, .max_register = 0xf, + }, + .num_nvram = 1 +}; + +static const struct rx85x1_config rx8571_config = { + .regmap = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0x1f, + }, + .num_nvram = 2 +}; + +static int rx8581_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct rx8581 *rx8581; + const struct rx85x1_config *config = &rx8581_config; + const void *data = of_device_get_match_data(&client->dev); + static struct nvmem_config nvmem_cfg[] = { + { + .name = "rx85x1-", + .word_size = 1, + .stride = 1, + .size = 1, + .reg_read = rx85x1_nvram_read, + .reg_write = rx85x1_nvram_write, + }, { + .name = "rx8571-", + .word_size = 1, + .stride = 1, + .size = RX8571_NVRAM_SIZE, + .reg_read = rx8571_nvram_read, + .reg_write = rx8571_nvram_write, + }, }; + int ret, i; dev_dbg(&client->dev, "%s\n", __func__); + if (data) + config = data; + rx8581 = devm_kzalloc(&client->dev, sizeof(struct rx8581), GFP_KERNEL); if (!rx8581) return -ENOMEM; i2c_set_clientdata(client, rx8581); - rx8581->regmap = devm_regmap_init_i2c(client, &config); + rx8581->regmap = devm_regmap_init_i2c(client, &config->regmap); if (IS_ERR(rx8581->regmap)) return PTR_ERR(rx8581->regmap); @@ -213,7 +301,14 @@ static int rx8581_probe(struct i2c_client *client, rx8581->rtc->start_secs = 0; rx8581->rtc->set_start_time = true; - return rtc_register_device(rx8581->rtc); + ret = rtc_register_device(rx8581->rtc); + + for (i = 0; i < config->num_nvram; i++) { + nvmem_cfg[i].priv = rx8581; + rtc_nvmem_register(rx8581->rtc, &nvmem_cfg[i]); + } + + return ret; } static const struct i2c_device_id rx8581_id[] = { @@ -223,8 +318,9 @@ static const struct i2c_device_id rx8581_id[] = { MODULE_DEVICE_TABLE(i2c, rx8581_id); static const struct of_device_id rx8581_of_match[] = { - { .compatible = "epson,rx8581" }, - { } + { .compatible = "epson,rx8571", .data = &rx8571_config }, + { .compatible = "epson,rx8581", .data = &rx8581_config }, + { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, rx8581_of_match); @@ -240,5 +336,5 @@ static struct i2c_driver rx8581_driver = { module_i2c_driver(rx8581_driver); MODULE_AUTHOR("Martyn Welch "); -MODULE_DESCRIPTION("Epson RX-8581 RTC driver"); +MODULE_DESCRIPTION("Epson RX-8571/RX-8581 RTC driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index 04c68178c42d..e81a2b22a5c3 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -39,7 +40,7 @@ struct s3c_rtc { void __iomem *base; struct clk *rtc_clk; struct clk *rtc_src_clk; - bool clk_disabled; + bool alarm_enabled; const struct s3c_rtc_data *data; @@ -47,7 +48,7 @@ struct s3c_rtc { int irq_tick; spinlock_t pie_lock; - spinlock_t alarm_clk_lock; + spinlock_t alarm_lock; int ticnt_save; int ticnt_en_save; @@ -70,44 +71,27 @@ struct s3c_rtc_data { static int s3c_rtc_enable_clk(struct s3c_rtc *info) { - unsigned long irq_flags; - int ret = 0; + int ret; - spin_lock_irqsave(&info->alarm_clk_lock, irq_flags); + ret = clk_enable(info->rtc_clk); + if (ret) + return ret; - if (info->clk_disabled) { - ret = clk_enable(info->rtc_clk); - if (ret) - goto out; - - if (info->data->needs_src_clk) { - ret = clk_enable(info->rtc_src_clk); - if (ret) { - clk_disable(info->rtc_clk); - goto out; - } + if (info->data->needs_src_clk) { + ret = clk_enable(info->rtc_src_clk); + if (ret) { + clk_disable(info->rtc_clk); + return ret; } - info->clk_disabled = false; } - -out: - spin_unlock_irqrestore(&info->alarm_clk_lock, irq_flags); - - return ret; + return 0; } static void s3c_rtc_disable_clk(struct s3c_rtc *info) { - unsigned long irq_flags; - - spin_lock_irqsave(&info->alarm_clk_lock, irq_flags); - if (!info->clk_disabled) { - if (info->data->needs_src_clk) - clk_disable(info->rtc_src_clk); - clk_disable(info->rtc_clk); - info->clk_disabled = true; - } - spin_unlock_irqrestore(&info->alarm_clk_lock, irq_flags); + if (info->data->needs_src_clk) + clk_disable(info->rtc_src_clk); + clk_disable(info->rtc_clk); } /* IRQ Handlers */ @@ -135,6 +119,7 @@ static irqreturn_t s3c_rtc_alarmirq(int irq, void *id) static int s3c_rtc_setaie(struct device *dev, unsigned int enabled) { struct s3c_rtc *info = dev_get_drvdata(dev); + unsigned long flags; unsigned int tmp; int ret; @@ -151,17 +136,19 @@ static int s3c_rtc_setaie(struct device *dev, unsigned int enabled) writeb(tmp, info->base + S3C2410_RTCALM); - s3c_rtc_disable_clk(info); + spin_lock_irqsave(&info->alarm_lock, flags); - if (enabled) { - ret = s3c_rtc_enable_clk(info); - if (ret) - return ret; - } else { + if (info->alarm_enabled && !enabled) s3c_rtc_disable_clk(info); - } + else if (!info->alarm_enabled && enabled) + ret = s3c_rtc_enable_clk(info); - return 0; + info->alarm_enabled = enabled; + spin_unlock_irqrestore(&info->alarm_lock, flags); + + s3c_rtc_disable_clk(info); + + return ret; } /* Set RTC frequency */ @@ -357,10 +344,10 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) writeb(alrm_en, info->base + S3C2410_RTCALM); - s3c_rtc_disable_clk(info); - s3c_rtc_setaie(dev, alrm->enabled); + s3c_rtc_disable_clk(info); + return 0; } @@ -456,16 +443,6 @@ static int s3c_rtc_remove(struct platform_device *pdev) return 0; } -static const struct of_device_id s3c_rtc_dt_match[]; - -static const struct s3c_rtc_data *s3c_rtc_get_data(struct platform_device *pdev) -{ - const struct of_device_id *match; - - match = of_match_node(s3c_rtc_dt_match, pdev->dev.of_node); - return match->data; -} - static int s3c_rtc_probe(struct platform_device *pdev) { struct s3c_rtc *info = NULL; @@ -485,13 +462,13 @@ static int s3c_rtc_probe(struct platform_device *pdev) } info->dev = &pdev->dev; - info->data = s3c_rtc_get_data(pdev); + info->data = of_device_get_match_data(&pdev->dev); if (!info->data) { dev_err(&pdev->dev, "failed getting s3c_rtc_data\n"); return -EINVAL; } spin_lock_init(&info->pie_lock); - spin_lock_init(&info->alarm_clk_lock); + spin_lock_init(&info->alarm_lock); platform_set_drvdata(pdev, info); @@ -591,6 +568,8 @@ static int s3c_rtc_probe(struct platform_device *pdev) s3c_rtc_setfreq(info, 1); + s3c_rtc_disable_clk(info); + return 0; err_nortc: diff --git a/drivers/rtc/rtc-sd3078.c b/drivers/rtc/rtc-sd3078.c new file mode 100644 index 000000000000..42cb90db7f94 --- /dev/null +++ b/drivers/rtc/rtc-sd3078.c @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Real Time Clock (RTC) Driver for sd3078 + * Copyright (C) 2018 Zoro Li + */ + +#include +#include +#include +#include +#include +#include + +#define SD3078_REG_SC 0x00 +#define SD3078_REG_MN 0x01 +#define SD3078_REG_HR 0x02 +#define SD3078_REG_DW 0x03 +#define SD3078_REG_DM 0x04 +#define SD3078_REG_MO 0x05 +#define SD3078_REG_YR 0x06 + +#define SD3078_REG_CTRL1 0x0f +#define SD3078_REG_CTRL2 0x10 +#define SD3078_REG_CTRL3 0x11 + +#define KEY_WRITE1 0x80 +#define KEY_WRITE2 0x04 +#define KEY_WRITE3 0x80 + +#define NUM_TIME_REGS (SD3078_REG_YR - SD3078_REG_SC + 1) + +/* + * The sd3078 has write protection + * and we can choose whether or not to use it. + * Write protection is turned off by default. + */ +#define WRITE_PROTECT_EN 0 + +struct sd3078 { + struct rtc_device *rtc; + struct regmap *regmap; +}; + +/* + * In order to prevent arbitrary modification of the time register, + * when modification of the register, + * the "write" bit needs to be written in a certain order. + * 1. set WRITE1 bit + * 2. set WRITE2 bit + * 3. set WRITE3 bit + */ +static void sd3078_enable_reg_write(struct sd3078 *sd3078) +{ + regmap_update_bits(sd3078->regmap, SD3078_REG_CTRL2, + KEY_WRITE1, KEY_WRITE1); + regmap_update_bits(sd3078->regmap, SD3078_REG_CTRL1, + KEY_WRITE2, KEY_WRITE2); + regmap_update_bits(sd3078->regmap, SD3078_REG_CTRL1, + KEY_WRITE3, KEY_WRITE3); +} + +#if WRITE_PROTECT_EN +/* + * In order to prevent arbitrary modification of the time register, + * we should disable the write function. + * when disable write, + * the "write" bit needs to be clear in a certain order. + * 1. clear WRITE2 bit + * 2. clear WRITE3 bit + * 3. clear WRITE1 bit + */ +static void sd3078_disable_reg_write(struct sd3078 *sd3078) +{ + regmap_update_bits(sd3078->regmap, SD3078_REG_CTRL1, + KEY_WRITE2, 0); + regmap_update_bits(sd3078->regmap, SD3078_REG_CTRL1, + KEY_WRITE3, 0); + regmap_update_bits(sd3078->regmap, SD3078_REG_CTRL2, + KEY_WRITE1, 0); +} +#endif + +static int sd3078_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + unsigned char hour; + unsigned char rtc_data[NUM_TIME_REGS] = {0}; + struct i2c_client *client = to_i2c_client(dev); + struct sd3078 *sd3078 = i2c_get_clientdata(client); + int ret; + + ret = regmap_bulk_read(sd3078->regmap, SD3078_REG_SC, rtc_data, + NUM_TIME_REGS); + if (ret < 0) { + dev_err(dev, "reading from RTC failed with err:%d\n", ret); + return ret; + } + + tm->tm_sec = bcd2bin(rtc_data[SD3078_REG_SC] & 0x7F); + tm->tm_min = bcd2bin(rtc_data[SD3078_REG_MN] & 0x7F); + + /* + * The sd3078 supports 12/24 hour mode. + * When getting time, + * we need to convert the 12 hour mode to the 24 hour mode. + */ + hour = rtc_data[SD3078_REG_HR]; + if (hour & 0x80) /* 24H MODE */ + tm->tm_hour = bcd2bin(rtc_data[SD3078_REG_HR] & 0x3F); + else if (hour & 0x20) /* 12H MODE PM */ + tm->tm_hour = bcd2bin(rtc_data[SD3078_REG_HR] & 0x1F) + 12; + else /* 12H MODE AM */ + tm->tm_hour = bcd2bin(rtc_data[SD3078_REG_HR] & 0x1F); + + tm->tm_mday = bcd2bin(rtc_data[SD3078_REG_DM] & 0x3F); + tm->tm_wday = rtc_data[SD3078_REG_DW] & 0x07; + tm->tm_mon = bcd2bin(rtc_data[SD3078_REG_MO] & 0x1F) - 1; + tm->tm_year = bcd2bin(rtc_data[SD3078_REG_YR]) + 100; + + return 0; +} + +static int sd3078_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + unsigned char rtc_data[NUM_TIME_REGS]; + struct i2c_client *client = to_i2c_client(dev); + struct sd3078 *sd3078 = i2c_get_clientdata(client); + int ret; + + rtc_data[SD3078_REG_SC] = bin2bcd(tm->tm_sec); + rtc_data[SD3078_REG_MN] = bin2bcd(tm->tm_min); + rtc_data[SD3078_REG_HR] = bin2bcd(tm->tm_hour) | 0x80; + rtc_data[SD3078_REG_DM] = bin2bcd(tm->tm_mday); + rtc_data[SD3078_REG_DW] = tm->tm_wday & 0x07; + rtc_data[SD3078_REG_MO] = bin2bcd(tm->tm_mon) + 1; + rtc_data[SD3078_REG_YR] = bin2bcd(tm->tm_year - 100); + +#if WRITE_PROTECT_EN + sd3078_enable_reg_write(sd3078); +#endif + + ret = regmap_bulk_write(sd3078->regmap, SD3078_REG_SC, rtc_data, + NUM_TIME_REGS); + if (ret < 0) { + dev_err(dev, "writing to RTC failed with err:%d\n", ret); + return ret; + } + +#if WRITE_PROTECT_EN + sd3078_disable_reg_write(sd3078); +#endif + + return 0; +} + +static const struct rtc_class_ops sd3078_rtc_ops = { + .read_time = sd3078_rtc_read_time, + .set_time = sd3078_rtc_set_time, +}; + +static const struct regmap_config regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0x11, +}; + +static int sd3078_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + int ret; + struct sd3078 *sd3078; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) + return -ENODEV; + + sd3078 = devm_kzalloc(&client->dev, sizeof(*sd3078), GFP_KERNEL); + if (!sd3078) + return -ENOMEM; + + sd3078->regmap = devm_regmap_init_i2c(client, ®map_config); + if (IS_ERR(sd3078->regmap)) { + dev_err(&client->dev, "regmap allocation failed\n"); + return PTR_ERR(sd3078->regmap); + } + + i2c_set_clientdata(client, sd3078); + + sd3078->rtc = devm_rtc_allocate_device(&client->dev); + if (IS_ERR(sd3078->rtc)) + return PTR_ERR(sd3078->rtc); + + sd3078->rtc->ops = &sd3078_rtc_ops; + sd3078->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000; + sd3078->rtc->range_max = RTC_TIMESTAMP_END_2099; + + ret = rtc_register_device(sd3078->rtc); + if (ret) { + dev_err(&client->dev, "failed to register rtc device\n"); + return ret; + } + + sd3078_enable_reg_write(sd3078); + + return 0; +} + +static const struct i2c_device_id sd3078_id[] = { + {"sd3078", 0}, + { } +}; +MODULE_DEVICE_TABLE(i2c, sd3078_id); + +static const struct of_device_id rtc_dt_match[] = { + { .compatible = "whwave,sd3078" }, + {}, +}; +MODULE_DEVICE_TABLE(of, rtc_dt_match); + +static struct i2c_driver sd3078_driver = { + .driver = { + .name = "sd3078", + .of_match_table = of_match_ptr(rtc_dt_match), + }, + .probe = sd3078_probe, + .id_table = sd3078_id, +}; + +module_i2c_driver(sd3078_driver); + +MODULE_AUTHOR("Dianlong Li "); +MODULE_DESCRIPTION("SD3078 RTC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c index b2483a749ac4..0b9eff19149b 100644 --- a/drivers/rtc/rtc-snvs.c +++ b/drivers/rtc/rtc-snvs.c @@ -239,6 +239,9 @@ static irqreturn_t snvs_rtc_irq_handler(int irq, void *dev_id) u32 lpsr; u32 events = 0; + if (data->clk) + clk_enable(data->clk); + regmap_read(data->regmap, data->offset + SNVS_LPSR, &lpsr); if (lpsr & SNVS_LPSR_LPTA) { @@ -253,6 +256,9 @@ static irqreturn_t snvs_rtc_irq_handler(int irq, void *dev_id) /* clear interrupt status */ regmap_write(data->regmap, data->offset + SNVS_LPSR, lpsr); + if (data->clk) + clk_disable(data->clk); + return events ? IRQ_HANDLED : IRQ_NONE; } diff --git a/drivers/rtc/rtc-tx4939.c b/drivers/rtc/rtc-tx4939.c index 61c110b2045f..2d24babc4057 100644 --- a/drivers/rtc/rtc-tx4939.c +++ b/drivers/rtc/rtc-tx4939.c @@ -1,11 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 /* * TX4939 internal RTC driver * Based on RBTX49xx patch from CELF patch archive. * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * * (C) Copyright TOSHIBA CORPORATION 2005-2007 */ #include @@ -65,10 +62,11 @@ static int tx4939_rtc_cmd(struct tx4939_rtc_reg __iomem *rtcreg, int cmd) return 0; } -static int tx4939_rtc_set_mmss(struct device *dev, unsigned long secs) +static int tx4939_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct tx4939rtc_plat_data *pdata = get_tx4939rtc_plat_data(dev); struct tx4939_rtc_reg __iomem *rtcreg = pdata->rtcreg; + unsigned long secs = rtc_tm_to_time64(tm); int i, ret; unsigned char buf[6]; @@ -111,7 +109,7 @@ static int tx4939_rtc_read_time(struct device *dev, struct rtc_time *tm) spin_unlock_irq(&pdata->lock); sec = ((unsigned long)buf[5] << 24) | (buf[4] << 16) | (buf[3] << 8) | buf[2]; - rtc_time_to_tm(sec, tm); + rtc_time64_to_tm(sec, tm); return 0; } @@ -123,14 +121,7 @@ static int tx4939_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) unsigned long sec; unsigned char buf[6]; - if (alrm->time.tm_sec < 0 || - alrm->time.tm_min < 0 || - alrm->time.tm_hour < 0 || - alrm->time.tm_mday < 0 || - alrm->time.tm_mon < 0 || - alrm->time.tm_year < 0) - return -EINVAL; - rtc_tm_to_time(&alrm->time, &sec); + sec = rtc_tm_to_time64(&alrm->time); buf[0] = 0; buf[1] = 0; buf[2] = sec; @@ -173,7 +164,7 @@ static int tx4939_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) spin_unlock_irq(&pdata->lock); sec = ((unsigned long)buf[5] << 24) | (buf[4] << 16) | (buf[3] << 8) | buf[2]; - rtc_time_to_tm(sec, &alrm->time); + rtc_time64_to_tm(sec, &alrm->time); return rtc_valid_tm(&alrm->time); } @@ -210,7 +201,7 @@ static const struct rtc_class_ops tx4939_rtc_ops = { .read_time = tx4939_rtc_read_time, .read_alarm = tx4939_rtc_read_alarm, .set_alarm = tx4939_rtc_set_alarm, - .set_mmss = tx4939_rtc_set_mmss, + .set_time = tx4939_rtc_set_time, .alarm_irq_enable = tx4939_rtc_alarm_irq_enable, }; @@ -283,6 +274,7 @@ static int __init tx4939_rtc_probe(struct platform_device *pdev) rtc->ops = &tx4939_rtc_ops; rtc->nvram_old_abi = true; + rtc->range_max = U32_MAX; pdata->rtc = rtc; @@ -315,5 +307,5 @@ module_platform_driver_probe(tx4939_rtc_driver, tx4939_rtc_probe); MODULE_AUTHOR("Atsushi Nemoto "); MODULE_DESCRIPTION("TX4939 internal RTC driver"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:tx4939rtc"); diff --git a/drivers/rtc/rtc-wilco-ec.c b/drivers/rtc/rtc-wilco-ec.c new file mode 100644 index 000000000000..e62bda0cb53e --- /dev/null +++ b/drivers/rtc/rtc-wilco-ec.c @@ -0,0 +1,177 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * RTC interface for Wilco Embedded Controller with R/W abilities + * + * Copyright 2018 Google LLC + * + * The corresponding platform device is typically registered in + * drivers/platform/chrome/wilco_ec/core.c + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define EC_COMMAND_CMOS 0x7c +#define EC_CMOS_TOD_WRITE 0x02 +#define EC_CMOS_TOD_READ 0x08 + +/** + * struct ec_rtc_read - Format of RTC returned by EC. + * @second: Second value (0..59) + * @minute: Minute value (0..59) + * @hour: Hour value (0..23) + * @day: Day value (1..31) + * @month: Month value (1..12) + * @year: Year value (full year % 100) + * @century: Century value (full year / 100) + * + * All values are presented in binary (not BCD). + */ +struct ec_rtc_read { + u8 second; + u8 minute; + u8 hour; + u8 day; + u8 month; + u8 year; + u8 century; +} __packed; + +/** + * struct ec_rtc_write - Format of RTC sent to the EC. + * @param: EC_CMOS_TOD_WRITE + * @century: Century value (full year / 100) + * @year: Year value (full year % 100) + * @month: Month value (1..12) + * @day: Day value (1..31) + * @hour: Hour value (0..23) + * @minute: Minute value (0..59) + * @second: Second value (0..59) + * @weekday: Day of the week (0=Saturday) + * + * All values are presented in BCD. + */ +struct ec_rtc_write { + u8 param; + u8 century; + u8 year; + u8 month; + u8 day; + u8 hour; + u8 minute; + u8 second; + u8 weekday; +} __packed; + +static int wilco_ec_rtc_read(struct device *dev, struct rtc_time *tm) +{ + struct wilco_ec_device *ec = dev_get_drvdata(dev->parent); + u8 param = EC_CMOS_TOD_READ; + struct ec_rtc_read rtc; + struct wilco_ec_message msg = { + .type = WILCO_EC_MSG_LEGACY, + .flags = WILCO_EC_FLAG_RAW_RESPONSE, + .command = EC_COMMAND_CMOS, + .request_data = ¶m, + .request_size = sizeof(param), + .response_data = &rtc, + .response_size = sizeof(rtc), + }; + int ret; + + ret = wilco_ec_mailbox(ec, &msg); + if (ret < 0) + return ret; + + tm->tm_sec = rtc.second; + tm->tm_min = rtc.minute; + tm->tm_hour = rtc.hour; + tm->tm_mday = rtc.day; + tm->tm_mon = rtc.month - 1; + tm->tm_year = rtc.year + (rtc.century * 100) - 1900; + tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year); + + /* Don't compute day of week, we don't need it. */ + tm->tm_wday = -1; + + return 0; +} + +static int wilco_ec_rtc_write(struct device *dev, struct rtc_time *tm) +{ + struct wilco_ec_device *ec = dev_get_drvdata(dev->parent); + struct ec_rtc_write rtc; + struct wilco_ec_message msg = { + .type = WILCO_EC_MSG_LEGACY, + .flags = WILCO_EC_FLAG_RAW_RESPONSE, + .command = EC_COMMAND_CMOS, + .request_data = &rtc, + .request_size = sizeof(rtc), + }; + int year = tm->tm_year + 1900; + /* + * Convert from 0=Sunday to 0=Saturday for the EC + * We DO need to set weekday because the EC controls battery charging + * schedules that depend on the day of the week. + */ + int wday = tm->tm_wday == 6 ? 0 : tm->tm_wday + 1; + int ret; + + rtc.param = EC_CMOS_TOD_WRITE; + rtc.century = bin2bcd(year / 100); + rtc.year = bin2bcd(year % 100); + rtc.month = bin2bcd(tm->tm_mon + 1); + rtc.day = bin2bcd(tm->tm_mday); + rtc.hour = bin2bcd(tm->tm_hour); + rtc.minute = bin2bcd(tm->tm_min); + rtc.second = bin2bcd(tm->tm_sec); + rtc.weekday = bin2bcd(wday); + + ret = wilco_ec_mailbox(ec, &msg); + if (ret < 0) + return ret; + + return 0; +} + +static const struct rtc_class_ops wilco_ec_rtc_ops = { + .read_time = wilco_ec_rtc_read, + .set_time = wilco_ec_rtc_write, +}; + +static int wilco_ec_rtc_probe(struct platform_device *pdev) +{ + struct rtc_device *rtc; + + rtc = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(rtc)) + return PTR_ERR(rtc); + + rtc->ops = &wilco_ec_rtc_ops; + /* EC only supports this century */ + rtc->range_min = RTC_TIMESTAMP_BEGIN_2000; + rtc->range_max = RTC_TIMESTAMP_END_2099; + rtc->owner = THIS_MODULE; + + return rtc_register_device(rtc); +} + +static struct platform_driver wilco_ec_rtc_driver = { + .driver = { + .name = "rtc-wilco-ec", + }, + .probe = wilco_ec_rtc_probe, +}; + +module_platform_driver(wilco_ec_rtc_driver); + +MODULE_ALIAS("platform:rtc-wilco-ec"); +MODULE_AUTHOR("Nick Crews "); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Wilco EC RTC driver"); diff --git a/drivers/rtc/rtc-zynqmp.c b/drivers/rtc/rtc-zynqmp.c index c532bd13fbe5..bb950945ec7f 100644 --- a/drivers/rtc/rtc-zynqmp.c +++ b/drivers/rtc/rtc-zynqmp.c @@ -49,7 +49,6 @@ #define RTC_CALIB_DEF 0x198233 #define RTC_CALIB_MASK 0x1FFFFF -#define RTC_SEC_MAX_VAL 0xFFFFFFFF struct xlnx_rtc_dev { struct rtc_device *rtc; @@ -71,9 +70,6 @@ static int xlnx_rtc_set_time(struct device *dev, struct rtc_time *tm) */ new_time = rtc_tm_to_time64(tm) + 1; - if (new_time > RTC_SEC_MAX_VAL) - return -EINVAL; - /* * Writing into calibration register will clear the Tick Counter and * force the next second to be signaled exactly in 1 second period @@ -154,9 +150,6 @@ static int xlnx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) alarm_time = rtc_tm_to_time64(&alrm->time); - if (alarm_time > RTC_SEC_MAX_VAL) - return -EINVAL; - writel((u32)alarm_time, (xrtcdev->reg_base + RTC_ALRM)); xlnx_rtc_alarm_irq_enable(dev, alrm->enabled); @@ -222,6 +215,13 @@ static int xlnx_rtc_probe(struct platform_device *pdev) platform_set_drvdata(pdev, xrtcdev); + xrtcdev->rtc = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(xrtcdev->rtc)) + return PTR_ERR(xrtcdev->rtc); + + xrtcdev->rtc->ops = &xlnx_rtc_ops; + xrtcdev->rtc->range_max = U32_MAX; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); xrtcdev->reg_base = devm_ioremap_resource(&pdev->dev, res); @@ -263,9 +263,7 @@ static int xlnx_rtc_probe(struct platform_device *pdev) device_init_wakeup(&pdev->dev, 1); - xrtcdev->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, - &xlnx_rtc_ops, THIS_MODULE); - return PTR_ERR_OR_ZERO(xrtcdev->rtc); + return rtc_register_device(xrtcdev->rtc); } static int xlnx_rtc_remove(struct platform_device *pdev) diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 397af07e4d88..e03304fe25bb 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -3965,13 +3965,11 @@ int dasd_generic_restore_device(struct ccw_device *cdev) EXPORT_SYMBOL_GPL(dasd_generic_restore_device); static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, - void *rdc_buffer, int rdc_buffer_size, int magic) { struct dasd_ccw_req *cqr; struct ccw1 *ccw; - unsigned long *idaw; cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device, NULL); @@ -3986,16 +3984,8 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, ccw = cqr->cpaddr; ccw->cmd_code = CCW_CMD_RDC; - if (idal_is_needed(rdc_buffer, rdc_buffer_size)) { - idaw = (unsigned long *) (cqr->data); - ccw->cda = (__u32)(addr_t) idaw; - ccw->flags = CCW_FLAG_IDA; - idaw = idal_create_words(idaw, rdc_buffer, rdc_buffer_size); - } else { - ccw->cda = (__u32)(addr_t) rdc_buffer; - ccw->flags = 0; - } - + ccw->cda = (__u32)(addr_t) cqr->data; + ccw->flags = 0; ccw->count = rdc_buffer_size; cqr->startdev = device; cqr->memdev = device; @@ -4013,12 +4003,13 @@ int dasd_generic_read_dev_chars(struct dasd_device *device, int magic, int ret; struct dasd_ccw_req *cqr; - cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size, - magic); + cqr = dasd_generic_build_rdc(device, rdc_buffer_size, magic); if (IS_ERR(cqr)) return PTR_ERR(cqr); ret = dasd_sleep_on(cqr); + if (ret == 0) + memcpy(rdc_buffer, cqr->data, rdc_buffer_size); dasd_sfree_request(cqr, cqr->memdev); return ret; } diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 4e7b55a14b1a..6e294b4d3635 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -4469,6 +4469,14 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp) usrparm.psf_data &= 0x7fffffffULL; usrparm.rssd_result &= 0x7fffffffULL; } + /* at least 2 bytes are accessed and should be allocated */ + if (usrparm.psf_data_len < 2) { + DBF_DEV_EVENT(DBF_WARNING, device, + "Symmetrix ioctl invalid data length %d", + usrparm.psf_data_len); + rc = -EINVAL; + goto out; + } /* alloc I/O data area */ psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA); rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA); diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h index b3fcc24b1182..367e9d384d85 100644 --- a/drivers/s390/char/sclp.h +++ b/drivers/s390/char/sclp.h @@ -195,7 +195,9 @@ struct read_info_sccb { u16 hcpua; /* 120-121 */ u8 _pad_122[124 - 122]; /* 122-123 */ u32 hmfai; /* 124-127 */ - u8 _pad_128[4096 - 128]; /* 128-4095 */ + u8 _pad_128[134 - 128]; /* 128-133 */ + u8 byte_134; /* 134 */ + u8 _pad_135[4096 - 135]; /* 135-4095 */ } __packed __aligned(PAGE_SIZE); struct read_storage_sccb { diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c index e792cee3b51c..8332788681c4 100644 --- a/drivers/s390/char/sclp_early.c +++ b/drivers/s390/char/sclp_early.c @@ -44,6 +44,8 @@ static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb) S390_lowcore.machine_flags |= MACHINE_FLAG_ESOP; if (sccb->fac91 & 0x40) S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_GUEST; + if (sccb->cpuoff > 134) + sclp.has_diag318 = !!(sccb->byte_134 & 0x80); sclp.rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; sclp.rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; sclp.rzm <<= 20; diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index e324d890a4f6..a59887fad13e 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c @@ -181,7 +181,7 @@ static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr, } static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr, - void **sbals_array, int i) + struct qdio_buffer **sbals_array, int i) { struct qdio_q *prev; int j; @@ -212,8 +212,8 @@ static void setup_queues(struct qdio_irq *irq_ptr, struct qdio_initialize *qdio_init) { struct qdio_q *q; - void **input_sbal_array = qdio_init->input_sbal_addr_array; - void **output_sbal_array = qdio_init->output_sbal_addr_array; + struct qdio_buffer **input_sbal_array = qdio_init->input_sbal_addr_array; + struct qdio_buffer **output_sbal_array = qdio_init->output_sbal_addr_array; struct qdio_outbuf_state *output_sbal_state_array = qdio_init->output_sbal_state_array; int i; diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c index 70a006ba4d05..384b3987eeb4 100644 --- a/drivers/s390/cio/vfio_ccw_cp.c +++ b/drivers/s390/cio/vfio_ccw_cp.c @@ -283,6 +283,33 @@ static long copy_ccw_from_iova(struct channel_program *cp, #define ccw_is_chain(_ccw) ((_ccw)->flags & (CCW_FLAG_CC | CCW_FLAG_DC)) +/* + * is_cpa_within_range() + * + * @cpa: channel program address being questioned + * @head: address of the beginning of a CCW chain + * @len: number of CCWs within the chain + * + * Determine whether the address of a CCW (whether a new chain, + * or the target of a TIC) falls within a range (including the end points). + * + * Returns 1 if yes, 0 if no. + */ +static inline int is_cpa_within_range(u32 cpa, u32 head, int len) +{ + u32 tail = head + (len - 1) * sizeof(struct ccw1); + + return (head <= cpa && cpa <= tail); +} + +static inline int is_tic_within_range(struct ccw1 *ccw, u32 head, int len) +{ + if (!ccw_is_tic(ccw)) + return 0; + + return is_cpa_within_range(ccw->cda, head, len); +} + static struct ccwchain *ccwchain_alloc(struct channel_program *cp, int len) { struct ccwchain *chain; @@ -392,7 +419,15 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp) return -EOPNOTSUPP; } - if ((!ccw_is_chain(ccw)) && (!ccw_is_tic(ccw))) + /* + * We want to keep counting if the current CCW has the + * command-chaining flag enabled, or if it is a TIC CCW + * that loops back into the current chain. The latter + * is used for device orientation, where the CCW PRIOR to + * the TIC can either jump to the TIC or a CCW immediately + * after the TIC, depending on the results of its operation. + */ + if (!ccw_is_chain(ccw) && !is_tic_within_range(ccw, iova, cnt)) break; ccw++; @@ -408,13 +443,11 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp) static int tic_target_chain_exists(struct ccw1 *tic, struct channel_program *cp) { struct ccwchain *chain; - u32 ccw_head, ccw_tail; + u32 ccw_head; list_for_each_entry(chain, &cp->ccwchain_list, next) { ccw_head = chain->ch_iova; - ccw_tail = ccw_head + (chain->ch_len - 1) * sizeof(struct ccw1); - - if ((ccw_head <= tic->cda) && (tic->cda <= ccw_tail)) + if (is_cpa_within_range(tic->cda, ccw_head, chain->ch_len)) return 1; } @@ -481,13 +514,11 @@ static int ccwchain_fetch_tic(struct ccwchain *chain, { struct ccw1 *ccw = chain->ch_ccw + idx; struct ccwchain *iter; - u32 ccw_head, ccw_tail; + u32 ccw_head; list_for_each_entry(iter, &cp->ccwchain_list, next) { ccw_head = iter->ch_iova; - ccw_tail = ccw_head + (iter->ch_len - 1) * sizeof(struct ccw1); - - if ((ccw_head <= ccw->cda) && (ccw->cda <= ccw_tail)) { + if (is_cpa_within_range(ccw->cda, ccw_head, iter->ch_len)) { ccw->cda = (__u32) (addr_t) (((char *)iter->ch_ccw) + (ccw->cda - ccw_head)); return 0; @@ -829,7 +860,7 @@ void cp_update_scsw(struct channel_program *cp, union scsw *scsw) { struct ccwchain *chain; u32 cpa = scsw->cmd.cpa; - u32 ccw_head, ccw_tail; + u32 ccw_head; /* * LATER: @@ -839,9 +870,7 @@ void cp_update_scsw(struct channel_program *cp, union scsw *scsw) */ list_for_each_entry(chain, &cp->ccwchain_list, next) { ccw_head = (u32)(u64)chain->ch_ccw; - ccw_tail = (u32)(u64)(chain->ch_ccw + chain->ch_len - 1); - - if ((ccw_head <= cpa) && (cpa <= ccw_tail)) { + if (is_cpa_within_range(cpa, ccw_head, chain->ch_len)) { /* * (cpa - ccw_head) is the offset value of the host * physical ccw to its chain head. diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 48ea0004a56d..e15816ff1265 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -248,7 +248,8 @@ static inline int ap_test_config(unsigned int *field, unsigned int nr) static inline int ap_test_config_card_id(unsigned int id) { if (!ap_configuration) /* QCI not supported */ - return 1; + /* only ids 0...3F may be probed */ + return id < 0x40 ? 1 : 0; return ap_test_config(ap_configuration->apm, id); } @@ -1334,6 +1335,16 @@ static int __match_queue_device_with_qid(struct device *dev, void *data) return is_queue_dev(dev) && to_ap_queue(dev)->qid == (int)(long) data; } +/* + * Helper function to be used with bus_find_dev + * matches any queue device with given queue id + */ +static int __match_queue_device_with_queue_id(struct device *dev, void *data) +{ + return is_queue_dev(dev) + && AP_QID_QUEUE(to_ap_queue(dev)->qid) == (int)(long) data; +} + /* * Helper function for ap_scan_bus(). * Does the scan bus job for the given adapter id. @@ -1434,8 +1445,13 @@ static void _ap_scan_bus_adapter(int id) borked = aq->state == AP_STATE_BORKED; spin_unlock_bh(&aq->lock); } - if (borked) /* Remove broken device */ + if (borked) { + /* Remove broken device */ + AP_DBF(DBF_DEBUG, + "removing broken queue=%02x.%04x\n", + id, dom); device_unregister(dev); + } put_device(dev); continue; } @@ -1505,7 +1521,7 @@ static void ap_scan_bus(struct work_struct *unused) struct device *dev = bus_find_device(&ap_bus_type, NULL, (void *)(long) ap_domain_index, - __match_queue_device_with_qid); + __match_queue_device_with_queue_id); if (dev) put_device(dev); else diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index bfc66e4a9de1..d0059eae5d94 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -91,7 +91,8 @@ enum ap_state { AP_STATE_WORKING, AP_STATE_QUEUE_FULL, AP_STATE_SUSPEND_WAIT, - AP_STATE_BORKED, + AP_STATE_UNBOUND, /* momentary not bound to a driver */ + AP_STATE_BORKED, /* broken */ NR_AP_STATES }; diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c index 576ac08777c5..ba261210c6da 100644 --- a/drivers/s390/crypto/ap_queue.c +++ b/drivers/s390/crypto/ap_queue.c @@ -420,6 +420,10 @@ static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = { [AP_EVENT_POLL] = ap_sm_suspend_read, [AP_EVENT_TIMEOUT] = ap_sm_nop, }, + [AP_STATE_UNBOUND] = { + [AP_EVENT_POLL] = ap_sm_nop, + [AP_EVENT_TIMEOUT] = ap_sm_nop, + }, [AP_STATE_BORKED] = { [AP_EVENT_POLL] = ap_sm_nop, [AP_EVENT_TIMEOUT] = ap_sm_nop, @@ -725,6 +729,7 @@ static void __ap_flush_queue(struct ap_queue *aq) ap_msg->rc = -EAGAIN; ap_msg->receive(aq, ap_msg, NULL); } + aq->queue_count = 0; } void ap_flush_queue(struct ap_queue *aq) @@ -743,7 +748,7 @@ void ap_queue_remove(struct ap_queue *aq) /* reset with zero, also clears irq registration */ spin_lock_bh(&aq->lock); ap_zapq(aq->qid); - aq->state = AP_STATE_BORKED; + aq->state = AP_STATE_UNBOUND; spin_unlock_bh(&aq->lock); } EXPORT_SYMBOL(ap_queue_remove); diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c index 2f92bbed4bf6..3e85d665c572 100644 --- a/drivers/s390/crypto/pkey_api.c +++ b/drivers/s390/crypto/pkey_api.c @@ -1079,7 +1079,7 @@ int pkey_verifykey(const struct pkey_seckey *seckey, rc = mkvp_cache_fetch(cardnr, domain, mkvp); if (rc) goto out; - if (t->mkvp == mkvp[1]) { + if (t->mkvp == mkvp[1] && t->mkvp != mkvp[0]) { DEBUG_DBG("%s secure key has old mkvp\n", __func__); if (pattributes) *pattributes |= PKEY_VERIFY_ATTR_OLD_MKVP; diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c index 31c6c847eaca..e9824c35c34f 100644 --- a/drivers/s390/crypto/vfio_ap_drv.c +++ b/drivers/s390/crypto/vfio_ap_drv.c @@ -15,7 +15,6 @@ #include "vfio_ap_private.h" #define VFIO_AP_ROOT_NAME "vfio_ap" -#define VFIO_AP_DEV_TYPE_NAME "ap_matrix" #define VFIO_AP_DEV_NAME "matrix" MODULE_AUTHOR("IBM Corporation"); @@ -24,10 +23,6 @@ MODULE_LICENSE("GPL v2"); static struct ap_driver vfio_ap_drv; -static struct device_type vfio_ap_dev_type = { - .name = VFIO_AP_DEV_TYPE_NAME, -}; - struct ap_matrix_dev *matrix_dev; /* Only type 10 adapters (CEX4 and later) are supported @@ -62,6 +57,22 @@ static void vfio_ap_matrix_dev_release(struct device *dev) kfree(matrix_dev); } +static int matrix_bus_match(struct device *dev, struct device_driver *drv) +{ + return 1; +} + +static struct bus_type matrix_bus = { + .name = "matrix", + .match = &matrix_bus_match, +}; + +static struct device_driver matrix_driver = { + .name = "vfio_ap", + .bus = &matrix_bus, + .suppress_bind_attrs = true, +}; + static int vfio_ap_matrix_dev_create(void) { int ret; @@ -71,6 +82,10 @@ static int vfio_ap_matrix_dev_create(void) if (IS_ERR(root_device)) return PTR_ERR(root_device); + ret = bus_register(&matrix_bus); + if (ret) + goto bus_register_err; + matrix_dev = kzalloc(sizeof(*matrix_dev), GFP_KERNEL); if (!matrix_dev) { ret = -ENOMEM; @@ -87,30 +102,41 @@ static int vfio_ap_matrix_dev_create(void) mutex_init(&matrix_dev->lock); INIT_LIST_HEAD(&matrix_dev->mdev_list); - matrix_dev->device.type = &vfio_ap_dev_type; dev_set_name(&matrix_dev->device, "%s", VFIO_AP_DEV_NAME); matrix_dev->device.parent = root_device; + matrix_dev->device.bus = &matrix_bus; matrix_dev->device.release = vfio_ap_matrix_dev_release; - matrix_dev->device.driver = &vfio_ap_drv.driver; + matrix_dev->vfio_ap_drv = &vfio_ap_drv; ret = device_register(&matrix_dev->device); if (ret) goto matrix_reg_err; + ret = driver_register(&matrix_driver); + if (ret) + goto matrix_drv_err; + return 0; +matrix_drv_err: + device_unregister(&matrix_dev->device); matrix_reg_err: put_device(&matrix_dev->device); matrix_alloc_err: + bus_unregister(&matrix_bus); +bus_register_err: root_device_unregister(root_device); - return ret; } static void vfio_ap_matrix_dev_destroy(void) { + struct device *root_device = matrix_dev->device.parent; + + driver_unregister(&matrix_driver); device_unregister(&matrix_dev->device); - root_device_unregister(matrix_dev->device.parent); + bus_unregister(&matrix_bus); + root_device_unregister(root_device); } static int __init vfio_ap_init(void) diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index 272ef427dcc0..900b9cf20ca5 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -198,8 +198,8 @@ static int vfio_ap_verify_queue_reserved(unsigned long *apid, qres.apqi = apqi; qres.reserved = false; - ret = driver_for_each_device(matrix_dev->device.driver, NULL, &qres, - vfio_ap_has_queue); + ret = driver_for_each_device(&matrix_dev->vfio_ap_drv->driver, NULL, + &qres, vfio_ap_has_queue); if (ret) return ret; diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h index 5675492233c7..76b7f98e47e9 100644 --- a/drivers/s390/crypto/vfio_ap_private.h +++ b/drivers/s390/crypto/vfio_ap_private.h @@ -40,6 +40,7 @@ struct ap_matrix_dev { struct ap_config_info info; struct list_head mdev_list; struct mutex lock; + struct ap_driver *vfio_ap_drv; }; extern struct ap_matrix_dev *matrix_dev; diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile index f2d6bbe57a6f..bc55ec316adb 100644 --- a/drivers/s390/net/Makefile +++ b/drivers/s390/net/Makefile @@ -9,7 +9,7 @@ obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o obj-$(CONFIG_SMSGIUCV) += smsgiucv.o obj-$(CONFIG_SMSGIUCV_EVENT) += smsgiucv_app.o obj-$(CONFIG_LCS) += lcs.o -qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o +qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o qeth_ethtool.o obj-$(CONFIG_QETH) += qeth.o qeth_l2-y += qeth_l2_main.o qeth_l2_sys.o obj-$(CONFIG_QETH_L2) += qeth_l2.o diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c index ed8e58f09054..3e132592c1fe 100644 --- a/drivers/s390/net/ism_drv.c +++ b/drivers/s390/net/ism_drv.c @@ -141,10 +141,13 @@ static int register_ieq(struct ism_dev *ism) static int unregister_sba(struct ism_dev *ism) { + int ret; + if (!ism->sba) return 0; - if (ism_cmd_simple(ism, ISM_UNREG_SBA)) + ret = ism_cmd_simple(ism, ISM_UNREG_SBA); + if (ret && ret != ISM_ERROR) return -EIO; dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, @@ -158,10 +161,13 @@ static int unregister_sba(struct ism_dev *ism) static int unregister_ieq(struct ism_dev *ism) { + int ret; + if (!ism->ieq) return 0; - if (ism_cmd_simple(ism, ISM_UNREG_IEQ)) + ret = ism_cmd_simple(ism, ISM_UNREG_IEQ); + if (ret && ret != ISM_ERROR) return -EIO; dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, @@ -287,7 +293,7 @@ static int ism_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb) cmd.request.dmb_tok = dmb->dmb_tok; ret = ism_cmd(ism, &cmd); - if (ret) + if (ret && ret != ISM_ERROR) goto out; ism_free_dmb(ism, dmb); diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 0ee026947f20..c851cf6e01c4 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -18,10 +18,10 @@ #include #include #include -#include #include #include #include +#include #include #include @@ -34,6 +34,8 @@ #include #include +#include + #include "qeth_core_mpc.h" /** @@ -112,59 +114,6 @@ static inline u32 qeth_get_device_id(struct ccw_device *cdev) #define CCW_DEVID(cdev) (qeth_get_device_id(cdev)) #define CARD_DEVID(card) (CCW_DEVID(CARD_RDEV(card))) -/** - * card stuff - */ -struct qeth_perf_stats { - unsigned int bufs_rec; - unsigned int bufs_sent; - unsigned int buf_elements_sent; - - unsigned int skbs_sent_pack; - unsigned int bufs_sent_pack; - - unsigned int sc_dp_p; - unsigned int sc_p_dp; - /* qdio_cq_handler: number of times called, time spent in */ - __u64 cq_start_time; - unsigned int cq_cnt; - unsigned int cq_time; - /* qdio_input_handler: number of times called, time spent in */ - __u64 inbound_start_time; - unsigned int inbound_cnt; - unsigned int inbound_time; - /* qeth_send_packet: number of times called, time spent in */ - __u64 outbound_start_time; - unsigned int outbound_cnt; - unsigned int outbound_time; - /* qdio_output_handler: number of times called, time spent in */ - __u64 outbound_handler_start_time; - unsigned int outbound_handler_cnt; - unsigned int outbound_handler_time; - /* number of calls to and time spent in do_QDIO for inbound queue */ - __u64 inbound_do_qdio_start_time; - unsigned int inbound_do_qdio_cnt; - unsigned int inbound_do_qdio_time; - /* number of calls to and time spent in do_QDIO for outbound queues */ - __u64 outbound_do_qdio_start_time; - unsigned int outbound_do_qdio_cnt; - unsigned int outbound_do_qdio_time; - unsigned int large_send_bytes; - unsigned int large_send_cnt; - unsigned int sg_skbs_sent; - /* initial values when measuring starts */ - unsigned long initial_rx_packets; - unsigned long initial_tx_packets; - /* inbound scatter gather data */ - unsigned int sg_skbs_rx; - unsigned int sg_frags_rx; - unsigned int sg_alloc_page_rx; - unsigned int tx_csum; - unsigned int tx_lin; - unsigned int tx_linfail; - unsigned int rx_csum; -}; - /* Routing stuff */ struct qeth_routing_info { enum qeth_routing_types type; @@ -494,10 +443,53 @@ enum qeth_out_q_states { QETH_OUT_Q_LOCKED_FLUSH, }; +#define QETH_CARD_STAT_ADD(_c, _stat, _val) ((_c)->stats._stat += (_val)) +#define QETH_CARD_STAT_INC(_c, _stat) QETH_CARD_STAT_ADD(_c, _stat, 1) + +#define QETH_TXQ_STAT_ADD(_q, _stat, _val) ((_q)->stats._stat += (_val)) +#define QETH_TXQ_STAT_INC(_q, _stat) QETH_TXQ_STAT_ADD(_q, _stat, 1) + +struct qeth_card_stats { + u64 rx_bufs; + u64 rx_skb_csum; + u64 rx_sg_skbs; + u64 rx_sg_frags; + u64 rx_sg_alloc_page; + + /* rtnl_link_stats64 */ + u64 rx_packets; + u64 rx_bytes; + u64 rx_errors; + u64 rx_dropped; + u64 rx_multicast; + u64 tx_errors; +}; + +struct qeth_out_q_stats { + u64 bufs; + u64 bufs_pack; + u64 buf_elements; + u64 skbs_pack; + u64 skbs_sg; + u64 skbs_csum; + u64 skbs_tso; + u64 skbs_linearized; + u64 skbs_linearized_fail; + u64 tso_bytes; + u64 packing_mode_switch; + + /* rtnl_link_stats64 */ + u64 tx_packets; + u64 tx_bytes; + u64 tx_errors; + u64 tx_dropped; +}; + struct qeth_qdio_out_q { struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q]; struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q]; struct qdio_outbuf_state *bufstates; /* convenience pointer */ + struct qeth_out_q_stats stats; int queue_no; struct qeth_card *card; atomic_t state; @@ -527,7 +519,7 @@ struct qeth_qdio_info { /* output */ int no_out_queues; - struct qeth_qdio_out_q **out_qs; + struct qeth_qdio_out_q *out_qs[QETH_MAX_QUEUES]; struct qdio_outbuf_state *out_bufstates; /* priority queueing */ @@ -559,8 +551,6 @@ enum qeth_card_states { CARD_STATE_DOWN, CARD_STATE_HARDSETUP, CARD_STATE_SOFTSETUP, - CARD_STATE_UP, - CARD_STATE_RECOVER, }; /** @@ -594,8 +584,8 @@ struct qeth_channel; struct qeth_cmd_buffer { enum qeth_cmd_buffer_state state; struct qeth_channel *channel; + struct qeth_reply *reply; unsigned char *data; - int rc; void (*callback)(struct qeth_card *card, struct qeth_channel *channel, struct qeth_cmd_buffer *iob); }; @@ -672,6 +662,7 @@ struct qeth_card_info { unsigned short chpid; __u16 func_level; char mcl_level[QETH_MCL_LENGTH + 1]; + u8 open_when_online:1; int guestlan; int mac_bits; enum qeth_card_types type; @@ -701,7 +692,6 @@ struct qeth_card_options { struct qeth_vnicc_info vnicc; /* VNICC options */ int fake_broadcast; enum qeth_discipline_id layer; - int performance_stats; int rx_sg_cb; enum qeth_ipa_isolation_modes isolation; enum qeth_ipa_isolation_modes prev_isolation; @@ -741,11 +731,6 @@ struct qeth_discipline { struct qeth_ipa_cmd *cmd); }; -struct qeth_vlan_vid { - struct list_head list; - unsigned short vid; -}; - enum qeth_addr_disposition { QETH_DISP_ADDR_DELETE = 0, QETH_DISP_ADDR_DO_NOTHING = 1, @@ -782,18 +767,16 @@ struct qeth_card { struct qeth_channel data; struct net_device *dev; - struct net_device_stats stats; - + struct qeth_card_stats stats; struct qeth_card_info info; struct qeth_token token; struct qeth_seqno seqno; struct qeth_card_options options; + struct workqueue_struct *event_wq; wait_queue_head_t wait_q; spinlock_t mclock; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; - struct mutex vid_list_mutex; /* vid_list */ - struct list_head vid_list; DECLARE_HASHTABLE(mac_htable, 4); DECLARE_HASHTABLE(ip_htable, 4); DECLARE_HASHTABLE(ip_mc_htable, 4); @@ -802,13 +785,11 @@ struct qeth_card { unsigned long thread_start_mask; unsigned long thread_allowed_mask; unsigned long thread_running_mask; - struct task_struct *recovery_task; spinlock_t ip_lock; struct qeth_ipato ipato; struct list_head cmd_waiter_list; /* QDIO buffer handling */ struct qeth_qdio_info qdio; - struct qeth_perf_stats perf_stats; int read_or_write_problem; struct qeth_osn_info osn_info; struct qeth_discipline *discipline; @@ -825,6 +806,11 @@ struct qeth_card { struct work_struct close_dev_work; }; +static inline bool qeth_card_hw_is_reachable(struct qeth_card *card) +{ + return card->state == CARD_STATE_SOFTSETUP; +} + struct qeth_trap_id { __u16 lparnr; char vmname[8]; @@ -865,11 +851,6 @@ static inline int qeth_get_elements_for_range(addr_t start, addr_t end) return PFN_UP(end) - PFN_DOWN(start); } -static inline int qeth_get_micros(void) -{ - return (int) (get_tod_clock() >> 12); -} - static inline int qeth_get_ip_version(struct sk_buff *skb) { struct vlan_ethhdr *veth = vlan_eth_hdr(skb); @@ -894,8 +875,7 @@ static inline void qeth_rx_csum(struct qeth_card *card, struct sk_buff *skb, if ((card->dev->features & NETIF_F_RXCSUM) && (flags & QETH_HDR_EXT_CSUM_TRANSP_REQ)) { skb->ip_summed = CHECKSUM_UNNECESSARY; - if (card->options.performance_stats) - card->perf_stats.rx_csum++; + QETH_CARD_STAT_INC(card, rx_skb_csum); } else { skb->ip_summed = CHECKSUM_NONE; } @@ -957,14 +937,14 @@ static inline struct qeth_qdio_out_q *qeth_get_tx_queue(struct qeth_card *card, extern struct qeth_discipline qeth_l2_discipline; extern struct qeth_discipline qeth_l3_discipline; +extern const struct ethtool_ops qeth_ethtool_ops; +extern const struct ethtool_ops qeth_osn_ethtool_ops; extern const struct attribute_group *qeth_generic_attr_groups[]; extern const struct attribute_group *qeth_osn_attr_groups[]; extern const struct attribute_group qeth_device_attr_group; extern const struct attribute_group qeth_device_blkt_group; extern const struct device_type qeth_generic_devtype; -extern struct workqueue_struct *qeth_wq; -int qeth_card_hw_is_reachable(struct qeth_card *); const char *qeth_get_cardname_short(struct qeth_card *); int qeth_realloc_buffer_pool(struct qeth_card *, int); int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id); @@ -976,11 +956,8 @@ extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS]; struct net_device *qeth_clone_netdev(struct net_device *orig); struct qeth_card *qeth_get_card_by_busid(char *bus_id); -void qeth_set_recovery_task(struct qeth_card *); -void qeth_clear_recovery_task(struct qeth_card *); void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int); int qeth_threads_running(struct qeth_card *, unsigned long); -int qeth_wait_for_threads(struct qeth_card *, unsigned long); int qeth_do_run_thread(struct qeth_card *, unsigned long); void qeth_clear_thread_start_bit(struct qeth_card *, unsigned long); void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long); @@ -1004,38 +981,29 @@ void qeth_clear_working_pool_list(struct qeth_card *); void qeth_clear_cmd_buffers(struct qeth_channel *); void qeth_clear_qdio_buffers(struct qeth_card *); void qeth_setadp_promisc_mode(struct qeth_card *); -struct net_device_stats *qeth_get_stats(struct net_device *); int qeth_setadpparms_change_macaddr(struct qeth_card *); void qeth_tx_timeout(struct net_device *); void qeth_prepare_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *); void qeth_release_buffer(struct qeth_channel *, struct qeth_cmd_buffer *); -void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob); +void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, + u16 cmd_length); struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *); int qeth_query_switch_attributes(struct qeth_card *card, struct qeth_switch_info *sw_info); -int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *, - int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long), - void *reply_param); +int qeth_query_card_info(struct qeth_card *card, + struct carrier_info *carrier_info); unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset); int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, struct sk_buff *skb, struct qeth_hdr *hdr, unsigned int offset, unsigned int hd_len, int elements_needed); int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); -int qeth_core_get_sset_count(struct net_device *, int); -void qeth_core_get_ethtool_stats(struct net_device *, - struct ethtool_stats *, u64 *); -void qeth_core_get_strings(struct net_device *, u32, u8 *); -void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *); void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...); -int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev, - struct ethtool_link_ksettings *cmd); int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback); int qeth_configure_cq(struct qeth_card *, enum qeth_cq); int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action); void qeth_trace_features(struct qeth_card *); -void qeth_close_dev(struct qeth_card *); int qeth_setassparms_cb(struct qeth_card *, struct qeth_reply *, unsigned long); struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *, enum qeth_ipa_funcs, @@ -1047,11 +1015,16 @@ netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t); netdev_features_t qeth_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features); +void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats); +int qeth_open(struct net_device *dev); +int qeth_stop(struct net_device *dev); + int qeth_vm_request_mac(struct qeth_card *card); int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, struct qeth_qdio_out_q *queue, int ipv, int cast_type, - void (*fill_header)(struct qeth_card *card, struct qeth_hdr *hdr, - struct sk_buff *skb, int ipv, int cast_type, + void (*fill_header)(struct qeth_qdio_out_q *queue, + struct qeth_hdr *hdr, struct sk_buff *skb, + int ipv, int cast_type, unsigned int data_len)); /* exports for OSN */ diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index e63e03143ca7..197b0f5b63e7 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -74,35 +74,15 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf); static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); -struct workqueue_struct *qeth_wq; -EXPORT_SYMBOL_GPL(qeth_wq); - -int qeth_card_hw_is_reachable(struct qeth_card *card) -{ - return (card->state == CARD_STATE_SOFTSETUP) || - (card->state == CARD_STATE_UP); -} -EXPORT_SYMBOL_GPL(qeth_card_hw_is_reachable); - static void qeth_close_dev_handler(struct work_struct *work) { struct qeth_card *card; card = container_of(work, struct qeth_card, close_dev_work); QETH_CARD_TEXT(card, 2, "cldevhdl"); - rtnl_lock(); - dev_close(card->dev); - rtnl_unlock(); ccwgroup_set_offline(card->gdev); } -void qeth_close_dev(struct qeth_card *card) -{ - QETH_CARD_TEXT(card, 2, "cldevsubm"); - queue_work(qeth_wq, &card->close_dev_work); -} -EXPORT_SYMBOL_GPL(qeth_close_dev); - static const char *qeth_get_cardname(struct qeth_card *card) { if (card->info.guestlan) { @@ -193,23 +173,6 @@ const char *qeth_get_cardname_short(struct qeth_card *card) return "n/a"; } -void qeth_set_recovery_task(struct qeth_card *card) -{ - card->recovery_task = current; -} -EXPORT_SYMBOL_GPL(qeth_set_recovery_task); - -void qeth_clear_recovery_task(struct qeth_card *card) -{ - card->recovery_task = NULL; -} -EXPORT_SYMBOL_GPL(qeth_clear_recovery_task); - -static bool qeth_is_recovery_task(const struct qeth_card *card) -{ - return card->recovery_task == current; -} - void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, int clear_start_mask) { @@ -236,15 +199,6 @@ int qeth_threads_running(struct qeth_card *card, unsigned long threads) } EXPORT_SYMBOL_GPL(qeth_threads_running); -int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads) -{ - if (qeth_is_recovery_task(card)) - return 0; - return wait_event_interruptible(card->wait_q, - qeth_threads_running(card, threads) == 0); -} -EXPORT_SYMBOL_GPL(qeth_wait_for_threads); - void qeth_clear_working_pool_list(struct qeth_card *card) { struct qeth_buffer_pool_entry *pool_entry, *tmp; @@ -292,8 +246,7 @@ int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt) { QETH_CARD_TEXT(card, 2, "realcbp"); - if ((card->state != CARD_STATE_DOWN) && - (card->state != CARD_STATE_RECOVER)) + if (card->state != CARD_STATE_DOWN) return -EPERM; /* TODO: steel/add buffers from/to a running card's buffer pool (?) */ @@ -566,6 +519,7 @@ static int __qeth_issue_next_read(struct qeth_card *card) QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n", rc, CARD_DEVID(card)); atomic_set(&channel->irq_pending, 0); + qeth_release_buffer(channel, iob); card->read_or_write_problem = 1; qeth_schedule_recovery(card); wake_up(&card->wait_q); @@ -592,6 +546,7 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card) if (reply) { refcount_set(&reply->refcnt, 1); atomic_set(&reply->received, 0); + init_waitqueue_head(&reply->wait_q); } return reply; } @@ -607,6 +562,26 @@ static void qeth_put_reply(struct qeth_reply *reply) kfree(reply); } +static void qeth_enqueue_reply(struct qeth_card *card, struct qeth_reply *reply) +{ + spin_lock_irq(&card->lock); + list_add_tail(&reply->list, &card->cmd_waiter_list); + spin_unlock_irq(&card->lock); +} + +static void qeth_dequeue_reply(struct qeth_card *card, struct qeth_reply *reply) +{ + spin_lock_irq(&card->lock); + list_del(&reply->list); + spin_unlock_irq(&card->lock); +} + +static void qeth_notify_reply(struct qeth_reply *reply) +{ + atomic_inc(&reply->received); + wake_up(&reply->wait_q); +} + static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc, struct qeth_card *card) { @@ -644,7 +619,7 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, dev_err(&card->gdev->dev, "Interface %s is down because the adjacent port is no longer in reflective relay mode\n", QETH_CARD_IFNAME(card)); - qeth_close_dev(card); + schedule_work(&card->close_dev_work); } else { dev_warn(&card->gdev->dev, "The link for interface %s on CHPID 0x%X failed\n", @@ -683,19 +658,15 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, void qeth_clear_ipacmd_list(struct qeth_card *card) { - struct qeth_reply *reply, *r; + struct qeth_reply *reply; unsigned long flags; QETH_CARD_TEXT(card, 4, "clipalst"); spin_lock_irqsave(&card->lock, flags); - list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) { - qeth_get_reply(reply); + list_for_each_entry(reply, &card->cmd_waiter_list, list) { reply->rc = -EIO; - atomic_inc(&reply->received); - list_del_init(&reply->list); - wake_up(&reply->wait_q); - qeth_put_reply(reply); + qeth_notify_reply(reply); } spin_unlock_irqrestore(&card->lock, flags); } @@ -752,7 +723,10 @@ void qeth_release_buffer(struct qeth_channel *channel, spin_lock_irqsave(&channel->iob_lock, flags); iob->state = BUF_STATE_FREE; iob->callback = qeth_send_control_data_cb; - iob->rc = 0; + if (iob->reply) { + qeth_put_reply(iob->reply); + iob->reply = NULL; + } spin_unlock_irqrestore(&channel->iob_lock, flags); wake_up(&channel->wait_q); } @@ -765,6 +739,17 @@ static void qeth_release_buffer_cb(struct qeth_card *card, qeth_release_buffer(channel, iob); } +static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc) +{ + struct qeth_reply *reply = iob->reply; + + if (reply) { + reply->rc = rc; + qeth_notify_reply(reply); + } + qeth_release_buffer(iob->channel, iob); +} + static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *channel) { struct qeth_cmd_buffer *buffer = NULL; @@ -800,9 +785,9 @@ static void qeth_send_control_data_cb(struct qeth_card *card, struct qeth_cmd_buffer *iob) { struct qeth_ipa_cmd *cmd = NULL; - struct qeth_reply *reply, *r; + struct qeth_reply *reply = NULL; + struct qeth_reply *r; unsigned long flags; - int keep_reply; int rc = 0; QETH_CARD_TEXT(card, 4, "sndctlcb"); @@ -834,44 +819,40 @@ static void qeth_send_control_data_cb(struct qeth_card *card, goto out; } + /* match against pending cmd requests */ spin_lock_irqsave(&card->lock, flags); - list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) { - if ((reply->seqno == QETH_IDX_COMMAND_SEQNO) || - ((cmd) && (reply->seqno == cmd->hdr.seqno))) { + list_for_each_entry(r, &card->cmd_waiter_list, list) { + if ((r->seqno == QETH_IDX_COMMAND_SEQNO) || + (cmd && (r->seqno == cmd->hdr.seqno))) { + reply = r; + /* take the object outside the lock */ qeth_get_reply(reply); - list_del_init(&reply->list); - spin_unlock_irqrestore(&card->lock, flags); - keep_reply = 0; - if (reply->callback != NULL) { - if (cmd) { - reply->offset = (__u16)((char *)cmd - - (char *)iob->data); - keep_reply = reply->callback(card, - reply, - (unsigned long)cmd); - } else - keep_reply = reply->callback(card, - reply, - (unsigned long)iob); - } - if (cmd) - reply->rc = (u16) cmd->hdr.return_code; - else if (iob->rc) - reply->rc = iob->rc; - if (keep_reply) { - spin_lock_irqsave(&card->lock, flags); - list_add_tail(&reply->list, - &card->cmd_waiter_list); - spin_unlock_irqrestore(&card->lock, flags); - } else { - atomic_inc(&reply->received); - wake_up(&reply->wait_q); - } - qeth_put_reply(reply); - goto out; + break; } } spin_unlock_irqrestore(&card->lock, flags); + + if (!reply) + goto out; + + if (!reply->callback) { + rc = 0; + } else { + if (cmd) { + reply->offset = (u16)((char *)cmd - (char *)iob->data); + rc = reply->callback(card, reply, (unsigned long)cmd); + } else { + rc = reply->callback(card, reply, (unsigned long)iob); + } + } + + if (rc <= 0) { + reply->rc = rc; + qeth_notify_reply(reply); + } + + qeth_put_reply(reply); + out: memcpy(&card->seqno.pdu_hdr_ack, QETH_PDU_HEADER_SEQ_NO(iob->data), @@ -1002,9 +983,8 @@ static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev, return 0; } -static long qeth_check_irb_error(struct qeth_card *card, - struct ccw_device *cdev, unsigned long intparm, - struct irb *irb) +static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev, + unsigned long intparm, struct irb *irb) { if (!IS_ERR(irb)) return 0; @@ -1015,7 +995,7 @@ static long qeth_check_irb_error(struct qeth_card *card, CCW_DEVID(cdev)); QETH_CARD_TEXT(card, 2, "ckirberr"); QETH_CARD_TEXT_(card, 2, " rc%d", -EIO); - break; + return -EIO; case -ETIMEDOUT: dev_warn(&cdev->dev, "A hardware operation timed out" " on the device\n"); @@ -1027,14 +1007,14 @@ static long qeth_check_irb_error(struct qeth_card *card, wake_up(&card->wait_q); } } - break; + return -ETIMEDOUT; default: QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n", PTR_ERR(irb), CCW_DEVID(cdev)); QETH_CARD_TEXT(card, 2, "ckirberr"); QETH_CARD_TEXT(card, 2, " rc???"); + return PTR_ERR(irb); } - return PTR_ERR(irb); } static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, @@ -1069,10 +1049,11 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, if (qeth_intparm_is_iob(intparm)) iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm); - if (qeth_check_irb_error(card, cdev, intparm, irb)) { + rc = qeth_check_irb_error(card, cdev, intparm, irb); + if (rc) { /* IO was terminated, free its resources. */ if (iob) - qeth_release_buffer(iob->channel, iob); + qeth_cancel_cmd(iob, rc); atomic_set(&channel->irq_pending, 0); wake_up(&card->wait_q); return; @@ -1127,6 +1108,8 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, rc = qeth_get_problem(card, cdev, irb); if (rc) { card->read_or_write_problem = 1; + if (iob) + qeth_cancel_cmd(iob, rc); qeth_clear_ipacmd_list(card); qeth_schedule_recovery(card); goto out; @@ -1288,7 +1271,6 @@ static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers) channel->iob[cnt].state = BUF_STATE_FREE; channel->iob[cnt].channel = channel; channel->iob[cnt].callback = qeth_send_control_data_cb; - channel->iob[cnt].rc = 0; } if (cnt < QETH_CMD_BUFFER_NO) { qeth_clean_channel(channel); @@ -1430,7 +1412,6 @@ static void qeth_setup_card(struct qeth_card *card) spin_lock_init(&card->thread_mask_lock); mutex_init(&card->conf_mutex); mutex_init(&card->discipline_mutex); - mutex_init(&card->vid_list_mutex); INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread); INIT_LIST_HEAD(&card->cmd_waiter_list); init_waitqueue_head(&card->wait_q); @@ -1466,6 +1447,10 @@ static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev) CARD_RDEV(card) = gdev->cdev[0]; CARD_WDEV(card) = gdev->cdev[1]; CARD_DDEV(card) = gdev->cdev[2]; + + card->event_wq = alloc_ordered_workqueue("%s", 0, dev_name(&gdev->dev)); + if (!card->event_wq) + goto out_wq; if (qeth_setup_channel(&card->read, true)) goto out_ip; if (qeth_setup_channel(&card->write, true)) @@ -1481,6 +1466,8 @@ out_data: out_channel: qeth_clean_channel(&card->read); out_ip: + destroy_workqueue(card->event_wq); +out_wq: dev_set_drvdata(&gdev->dev, NULL); kfree(card); out: @@ -1809,6 +1796,7 @@ static int qeth_idx_activate_get_answer(struct qeth_card *card, QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc); QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); atomic_set(&channel->irq_pending, 0); + qeth_release_buffer(channel, iob); wake_up(&card->wait_q); return rc; } @@ -1878,6 +1866,7 @@ static int qeth_idx_activate_channel(struct qeth_card *card, rc); QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); atomic_set(&channel->irq_pending, 0); + qeth_release_buffer(channel, iob); wake_up(&card->wait_q); return rc; } @@ -2008,7 +1997,7 @@ void qeth_prepare_control_data(struct qeth_card *card, int len, card->seqno.pdu_hdr++; memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data), &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH); - QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN); + QETH_DBF_HEX(CTRL, 2, iob->data, min(len, QETH_DBF_CTRL_LEN)); } EXPORT_SYMBOL_GPL(qeth_prepare_control_data); @@ -2025,24 +2014,22 @@ EXPORT_SYMBOL_GPL(qeth_prepare_control_data); * for the IPA commands. * @reply_param: private pointer passed to the callback * - * Returns the value of the `return_code' field of the response - * block returned from the hardware, or other error indication. - * Value of zero indicates successful execution of the command. - * * Callback function gets called one or more times, with cb_cmd * pointing to the response returned by the hardware. Callback - * function must return non-zero if more reply blocks are expected, - * and zero if the last or only reply block is received. Callback - * function can get the value of the reply_param pointer from the + * function must return + * > 0 if more reply blocks are expected, + * 0 if the last or only reply block is received, and + * < 0 on error. + * Callback function can get the value of the reply_param pointer from the * field 'param' of the structure qeth_reply. */ -int qeth_send_control_data(struct qeth_card *card, int len, - struct qeth_cmd_buffer *iob, - int (*reply_cb)(struct qeth_card *cb_card, - struct qeth_reply *cb_reply, - unsigned long cb_cmd), - void *reply_param) +static int qeth_send_control_data(struct qeth_card *card, int len, + struct qeth_cmd_buffer *iob, + int (*reply_cb)(struct qeth_card *cb_card, + struct qeth_reply *cb_reply, + unsigned long cb_cmd), + void *reply_param) { struct qeth_channel *channel = iob->channel; int rc; @@ -2058,12 +2045,15 @@ int qeth_send_control_data(struct qeth_card *card, int len, } reply = qeth_alloc_reply(card); if (!reply) { + qeth_release_buffer(channel, iob); return -ENOMEM; } reply->callback = reply_cb; reply->param = reply_param; - init_waitqueue_head(&reply->wait_q); + /* pairs with qeth_release_buffer(): */ + qeth_get_reply(reply); + iob->reply = reply; while (atomic_cmpxchg(&channel->irq_pending, 0, 1)) ; @@ -2078,9 +2068,7 @@ int qeth_send_control_data(struct qeth_card *card, int len, } qeth_prepare_control_data(card, len, iob); - spin_lock_irq(&card->lock); - list_add_tail(&reply->list, &card->cmd_waiter_list); - spin_unlock_irq(&card->lock); + qeth_enqueue_reply(card, reply); timeout = jiffies + event_timeout; @@ -2093,10 +2081,8 @@ int qeth_send_control_data(struct qeth_card *card, int len, QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n", CARD_DEVID(card), rc); QETH_CARD_TEXT_(card, 2, " err%d", rc); - spin_lock_irq(&card->lock); - list_del_init(&reply->list); + qeth_dequeue_reply(card, reply); qeth_put_reply(reply); - spin_unlock_irq(&card->lock); qeth_release_buffer(channel, iob); atomic_set(&channel->irq_pending, 0); wake_up(&card->wait_q); @@ -2118,21 +2104,16 @@ int qeth_send_control_data(struct qeth_card *card, int len, } } + qeth_dequeue_reply(card, reply); rc = reply->rc; qeth_put_reply(reply); return rc; time_err: - reply->rc = -ETIME; - spin_lock_irq(&card->lock); - list_del_init(&reply->list); - spin_unlock_irq(&card->lock); - atomic_inc(&reply->received); - rc = reply->rc; + qeth_dequeue_reply(card, reply); qeth_put_reply(reply); - return rc; + return -ETIME; } -EXPORT_SYMBOL_GPL(qeth_send_control_data); static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) @@ -2337,9 +2318,8 @@ static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply, QETH_DBF_TEXT(SETUP, 2, "olmlimit"); dev_err(&card->gdev->dev, "A connection could not be " "established because of an OLM limit\n"); - iob->rc = -EMLINK; + return -EMLINK; } - QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); return 0; } @@ -2389,11 +2369,12 @@ static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx) return 0; } -static void qeth_free_qdio_out_buf(struct qeth_qdio_out_q *q) +static void qeth_free_output_queue(struct qeth_qdio_out_q *q) { if (!q) return; + qeth_clear_outq_buffers(q, 1); qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); kfree(q); } @@ -2432,12 +2413,6 @@ static int qeth_alloc_qdio_buffers(struct qeth_card *card) goto out_freeinq; /* outbound */ - card->qdio.out_qs = - kcalloc(card->qdio.no_out_queues, - sizeof(struct qeth_qdio_out_q *), - GFP_KERNEL); - if (!card->qdio.out_qs) - goto out_freepool; for (i = 0; i < card->qdio.no_out_queues; ++i) { card->qdio.out_qs[i] = qeth_alloc_qdio_out_buf(); if (!card->qdio.out_qs[i]) @@ -2468,12 +2443,9 @@ out_freeoutqbufs: } out_freeoutq: while (i > 0) { - qeth_free_qdio_out_buf(card->qdio.out_qs[--i]); - qeth_clear_outq_buffers(card->qdio.out_qs[i], 1); + qeth_free_output_queue(card->qdio.out_qs[--i]); + card->qdio.out_qs[i] = NULL; } - kfree(card->qdio.out_qs); - card->qdio.out_qs = NULL; -out_freepool: qeth_free_buffer_pool(card); out_freeinq: qeth_free_qdio_queue(card->qdio.in_q); @@ -2502,13 +2474,9 @@ static void qeth_free_qdio_buffers(struct qeth_card *card) /* inbound buffer pool */ qeth_free_buffer_pool(card); /* free outbound qdio_qs */ - if (card->qdio.out_qs) { - for (i = 0; i < card->qdio.no_out_queues; ++i) { - qeth_clear_outq_buffers(card->qdio.out_qs[i], 1); - qeth_free_qdio_out_buf(card->qdio.out_qs[i]); - } - kfree(card->qdio.out_qs); - card->qdio.out_qs = NULL; + for (i = 0; i < card->qdio.no_out_queues; i++) { + qeth_free_output_queue(card->qdio.out_qs[i]); + card->qdio.out_qs[i] = NULL; } } @@ -2715,8 +2683,7 @@ static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry( } else { free_page((unsigned long)entry->elements[i]); entry->elements[i] = page_address(page); - if (card->options.performance_stats) - card->perf_stats.sg_alloc_page_rx++; + QETH_CARD_STAT_INC(card, rx_sg_alloc_page); } } } @@ -2835,14 +2802,20 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card, cmd->hdr.prot_version = prot; } -void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob) +void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, + u16 cmd_length) { + u16 total_length = IPA_PDU_HEADER_SIZE + cmd_length; u8 prot_type = qeth_mpc_select_prot_type(card); memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); + memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2); memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1); + memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2); + memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2); memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); + memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2); } EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd); @@ -2853,7 +2826,7 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card, iob = qeth_get_buffer(&card->write); if (iob) { - qeth_prepare_ipa_cmd(card, iob); + qeth_prepare_ipa_cmd(card, iob, sizeof(struct qeth_ipa_cmd)); qeth_fill_ipacmd_header(card, __ipa_cmd(iob), ipacmd, prot); } else { dev_warn(&card->gdev->dev, @@ -2866,6 +2839,14 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card, } EXPORT_SYMBOL_GPL(qeth_get_ipacmd_buffer); +static int qeth_send_ipa_cmd_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + + return (cmd->hdr.return_code) ? -EIO : 0; +} + /** * qeth_send_ipa_cmd() - send an IPA command * @@ -2877,11 +2858,15 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, unsigned long), void *reply_param) { + u16 length; int rc; QETH_CARD_TEXT(card, 4, "sendipa"); - rc = qeth_send_control_data(card, IPA_CMD_LENGTH, - iob, reply_cb, reply_param); + + if (reply_cb == NULL) + reply_cb = qeth_send_ipa_cmd_cb; + memcpy(&length, QETH_IPA_PDU_LEN_TOTAL(iob->data), 2); + rc = qeth_send_control_data(card, length, iob, reply_cb, reply_param); if (rc == -ETIME) { qeth_clear_ipacmd_list(card); qeth_schedule_recovery(card); @@ -2890,9 +2875,19 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, } EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd); +static int qeth_send_startlan_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + + if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE) + return -ENETDOWN; + + return (cmd->hdr.return_code) ? -EIO : 0; +} + static int qeth_send_startlan(struct qeth_card *card) { - int rc; struct qeth_cmd_buffer *iob; QETH_DBF_TEXT(SETUP, 2, "strtlan"); @@ -2900,8 +2895,7 @@ static int qeth_send_startlan(struct qeth_card *card) iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0); if (!iob) return -ENOMEM; - rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); - return rc; + return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL); } static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd) @@ -2919,7 +2913,7 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card, QETH_CARD_TEXT(card, 3, "quyadpcb"); if (qeth_setadpparms_inspect_rc(cmd)) - return 0; + return -EIO; if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) { card->info.link_type = @@ -2974,19 +2968,18 @@ static int qeth_query_ipassists_cb(struct qeth_card *card, cmd = (struct qeth_ipa_cmd *) data; switch (cmd->hdr.return_code) { + case IPA_RC_SUCCESS: + break; case IPA_RC_NOTSUPP: case IPA_RC_L2_UNSUPPORTED_CMD: QETH_DBF_TEXT(SETUP, 2, "ipaunsup"); card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS; card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS; - return 0; + return -EOPNOTSUPP; default: - if (cmd->hdr.return_code) { - QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n", - CARD_DEVID(card), - cmd->hdr.return_code); - return 0; - } + QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n", + CARD_DEVID(card), cmd->hdr.return_code); + return -EIO; } if (cmd->hdr.prot_version == QETH_PROT_IPV4) { @@ -3024,7 +3017,7 @@ static int qeth_query_switch_attributes_cb(struct qeth_card *card, QETH_CARD_TEXT(card, 2, "qswiatcb"); if (qeth_setadpparms_inspect_rc(cmd)) - return 0; + return -EIO; sw_info = (struct qeth_switch_info *)reply->param; attrs = &cmd->data.setadapterparms.data.query_switch_attributes; @@ -3056,15 +3049,15 @@ int qeth_query_switch_attributes(struct qeth_card *card, static int qeth_query_setdiagass_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; - __u16 rc; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + u16 rc = cmd->hdr.return_code; - cmd = (struct qeth_ipa_cmd *)data; - rc = cmd->hdr.return_code; - if (rc) + if (rc) { QETH_CARD_TEXT_(card, 2, "diagq:%x", rc); - else - card->info.diagass_support = cmd->data.diagass.ext; + return -EIO; + } + + card->info.diagass_support = cmd->data.diagass.ext; return 0; } @@ -3111,13 +3104,13 @@ static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid) static int qeth_hw_trap_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; - __u16 rc; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + u16 rc = cmd->hdr.return_code; - cmd = (struct qeth_ipa_cmd *)data; - rc = cmd->hdr.return_code; - if (rc) + if (rc) { QETH_CARD_TEXT_(card, 2, "trapc:%x", rc); + return -EIO; + } return 0; } @@ -3166,7 +3159,7 @@ static int qeth_check_qdio_errors(struct qeth_card *card, buf->element[14].sflags); QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error); if ((buf->element[15].sflags) == 0x12) { - card->stats.rx_dropped++; + QETH_CARD_STAT_INC(card, rx_dropped); return 0; } else return 1; @@ -3230,17 +3223,8 @@ static void qeth_queue_input_buffer(struct qeth_card *card, int index) * 'index') un-requeued -> this buffer is the first buffer that * will be requeued the next time */ - if (card->options.performance_stats) { - card->perf_stats.inbound_do_qdio_cnt++; - card->perf_stats.inbound_do_qdio_start_time = - qeth_get_micros(); - } rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, queue->next_buf_to_init, count); - if (card->options.performance_stats) - card->perf_stats.inbound_do_qdio_time += - qeth_get_micros() - - card->perf_stats.inbound_do_qdio_start_time; if (rc) { QETH_CARD_TEXT(card, 2, "qinberr"); } @@ -3317,8 +3301,7 @@ static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) >= QETH_HIGH_WATERMARK_PACK){ /* switch non-PACKING -> PACKING */ QETH_CARD_TEXT(queue->card, 6, "np->pack"); - if (queue->card->options.performance_stats) - queue->card->perf_stats.sc_dp_p++; + QETH_TXQ_STAT_INC(queue, packing_mode_switch); queue->do_pack = 1; } } @@ -3337,8 +3320,7 @@ static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) <= QETH_LOW_WATERMARK_PACK) { /* switch PACKING -> non-PACKING */ QETH_CARD_TEXT(queue->card, 6, "pack->np"); - if (queue->card->options.performance_stats) - queue->card->perf_stats.sc_p_dp++; + QETH_TXQ_STAT_INC(queue, packing_mode_switch); queue->do_pack = 0; return qeth_prep_flush_pack_buffer(queue); } @@ -3392,25 +3374,16 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, } } + QETH_TXQ_STAT_ADD(queue, bufs, count); netif_trans_update(queue->card->dev); - if (queue->card->options.performance_stats) { - queue->card->perf_stats.outbound_do_qdio_cnt++; - queue->card->perf_stats.outbound_do_qdio_start_time = - qeth_get_micros(); - } qdio_flags = QDIO_FLAG_SYNC_OUTPUT; if (atomic_read(&queue->set_pci_flags_count)) qdio_flags |= QDIO_FLAG_PCI_OUT; atomic_add(count, &queue->used_buffers); - rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags, queue->queue_no, index, count); - if (queue->card->options.performance_stats) - queue->card->perf_stats.outbound_do_qdio_time += - qeth_get_micros() - - queue->card->perf_stats.outbound_do_qdio_start_time; if (rc) { - queue->card->stats.tx_errors += count; + QETH_TXQ_STAT_ADD(queue, tx_errors, count); /* ignore temporary SIGA errors without busy condition */ if (rc == -ENOBUFS) return; @@ -3425,8 +3398,6 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, qeth_schedule_recovery(queue->card); return; } - if (queue->card->options.performance_stats) - queue->card->perf_stats.bufs_sent += count; } static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) @@ -3457,10 +3428,8 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count)) flush_cnt += qeth_prep_flush_pack_buffer(queue); - if (queue->card->options.performance_stats && - q_was_packing) - queue->card->perf_stats.bufs_sent_pack += - flush_cnt; + if (q_was_packing) + QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt); if (flush_cnt) qeth_flush_buffers(queue, index, flush_cnt); atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); @@ -3490,8 +3459,7 @@ int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq) goto out; } - if (card->state != CARD_STATE_DOWN && - card->state != CARD_STATE_RECOVER) { + if (card->state != CARD_STATE_DOWN) { rc = -1; goto out; } @@ -3515,7 +3483,7 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, int rc; if (!qeth_is_cq(card, queue)) - goto out; + return; QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element); QETH_CARD_TEXT_(card, 5, "qcqhc%d", count); @@ -3524,12 +3492,7 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, if (qdio_err) { netif_stop_queue(card->dev); qeth_schedule_recovery(card); - goto out; - } - - if (card->options.performance_stats) { - card->perf_stats.cq_cnt++; - card->perf_stats.cq_start_time = qeth_get_micros(); + return; } for (i = first_element; i < first_element + count; ++i) { @@ -3557,16 +3520,6 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, } card->qdio.c_q->next_buf_to_init = (card->qdio.c_q->next_buf_to_init + count) % QDIO_MAX_BUFFERS_PER_Q; - - netif_wake_queue(card->dev); - - if (card->options.performance_stats) { - int delta_t = qeth_get_micros(); - delta_t -= card->perf_stats.cq_start_time; - card->perf_stats.cq_time += delta_t; - } -out: - return; } static void qeth_qdio_input_handler(struct ccw_device *ccwdev, @@ -3602,11 +3555,7 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev, qeth_schedule_recovery(card); return; } - if (card->options.performance_stats) { - card->perf_stats.outbound_handler_cnt++; - card->perf_stats.outbound_handler_start_time = - qeth_get_micros(); - } + for (i = first_element; i < (first_element + count); ++i) { int bidx = i % QDIO_MAX_BUFFERS_PER_Q; buffer = queue->bufs[bidx]; @@ -3652,9 +3601,6 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev, qeth_check_outbound_queue(queue); netif_wake_queue(queue->card->dev); - if (card->options.performance_stats) - card->perf_stats.outbound_handler_time += qeth_get_micros() - - card->perf_stats.outbound_handler_start_time; } /* We cannot use outbound queue 3 for unicast packets on HiperSockets */ @@ -3775,11 +3721,12 @@ EXPORT_SYMBOL_GPL(qeth_count_elements); * The number of needed buffer elements is returned in @elements. * Error to create the hdr is indicated by returning with < 0. */ -static int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb, - struct qeth_hdr **hdr, unsigned int hdr_len, - unsigned int proto_len, unsigned int *elements) +static int qeth_add_hw_header(struct qeth_qdio_out_q *queue, + struct sk_buff *skb, struct qeth_hdr **hdr, + unsigned int hdr_len, unsigned int proto_len, + unsigned int *elements) { - const unsigned int max_elements = QETH_MAX_BUFFER_ELEMENTS(card); + const unsigned int max_elements = QETH_MAX_BUFFER_ELEMENTS(queue->card); const unsigned int contiguous = proto_len ? proto_len : 1; unsigned int __elements; addr_t start, end; @@ -3818,15 +3765,12 @@ check_layout: } rc = skb_linearize(skb); - if (card->options.performance_stats) { - if (rc) - card->perf_stats.tx_linfail++; - else - card->perf_stats.tx_lin++; - } - if (rc) + if (rc) { + QETH_TXQ_STAT_INC(queue, skbs_linearized_fail); return rc; + } + QETH_TXQ_STAT_INC(queue, skbs_linearized); /* Linearization changed the layout, re-evaluate: */ goto check_layout; } @@ -3928,7 +3872,6 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, { struct qdio_buffer *buffer = buf->buffer; bool is_first_elem = true; - int flush_cnt = 0; __skb_queue_tail(&buf->skb_list, skb); @@ -3949,24 +3892,21 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, if (!queue->do_pack) { QETH_CARD_TEXT(queue->card, 6, "fillbfnp"); - /* set state to PRIMED -> will be flushed */ - atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); - flush_cnt = 1; } else { QETH_CARD_TEXT(queue->card, 6, "fillbfpa"); - if (queue->card->options.performance_stats) - queue->card->perf_stats.skbs_sent_pack++; - if (buf->next_element_to_fill >= - QETH_MAX_BUFFER_ELEMENTS(queue->card)) { - /* - * packed buffer if full -> set state PRIMED - * -> will be flushed - */ - atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); - flush_cnt = 1; - } + + QETH_TXQ_STAT_INC(queue, skbs_pack); + /* If the buffer still has free elements, keep using it. */ + if (buf->next_element_to_fill < + QETH_MAX_BUFFER_ELEMENTS(queue->card)) + return 0; } - return flush_cnt; + + /* flush out the buffer */ + atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); + queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) % + QDIO_MAX_BUFFERS_PER_Q; + return 1; } static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, @@ -3982,7 +3922,6 @@ static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, */ if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) return -EBUSY; - queue->next_buf_to_fill = (index + 1) % QDIO_MAX_BUFFERS_PER_Q; qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len); qeth_flush_buffers(queue, index, 1); return 0; @@ -4040,10 +3979,9 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, } } } - tmp = qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len); - queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) % - QDIO_MAX_BUFFERS_PER_Q; - flush_count += tmp; + + flush_count += qeth_fill_buffer(queue, buffer, skb, hdr, offset, + hd_len); if (flush_count) qeth_flush_buffers(queue, start_index, flush_count); else if (!atomic_read(&queue->set_pci_flags_count)) @@ -4071,8 +4009,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, } out: /* at this point the queue is UNLOCKED again */ - if (queue->card->options.performance_stats && do_pack) - queue->card->perf_stats.bufs_sent_pack += flush_count; + if (do_pack) + QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count); return rc; } @@ -4096,8 +4034,9 @@ static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr, int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, struct qeth_qdio_out_q *queue, int ipv, int cast_type, - void (*fill_header)(struct qeth_card *card, struct qeth_hdr *hdr, - struct sk_buff *skb, int ipv, int cast_type, + void (*fill_header)(struct qeth_qdio_out_q *queue, + struct qeth_hdr *hdr, struct sk_buff *skb, + int ipv, int cast_type, unsigned int data_len)) { unsigned int proto_len, hw_hdr_len; @@ -4122,7 +4061,7 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, if (rc) return rc; - push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, proto_len, + push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len, &elements); if (push_len < 0) return push_len; @@ -4132,7 +4071,7 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, data_offset = push_len + proto_len; } memset(hdr, 0, hw_hdr_len); - fill_header(card, hdr, skb, ipv, cast_type, frame_len); + fill_header(queue, hdr, skb, ipv, cast_type, frame_len); if (is_tso) qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr, frame_len - proto_len, skb, proto_len); @@ -4149,14 +4088,12 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, } if (!rc) { - if (card->options.performance_stats) { - card->perf_stats.buf_elements_sent += elements; - if (is_sg) - card->perf_stats.sg_skbs_sent++; - if (is_tso) { - card->perf_stats.large_send_bytes += frame_len; - card->perf_stats.large_send_cnt++; - } + QETH_TXQ_STAT_ADD(queue, buf_elements, elements); + if (is_sg) + QETH_TXQ_STAT_INC(queue, skbs_sg); + if (is_tso) { + QETH_TXQ_STAT_INC(queue, skbs_tso); + QETH_TXQ_STAT_ADD(queue, tso_bytes, frame_len); } } else { if (!push_len) @@ -4183,7 +4120,7 @@ static int qeth_setadp_promisc_mode_cb(struct qeth_card *card, setparms->data.mode = SET_PROMISC_MODE_OFF; } card->info.promisc_mode = setparms->data.mode; - return 0; + return (cmd->hdr.return_code) ? -EIO : 0; } void qeth_setadp_promisc_mode(struct qeth_card *card) @@ -4215,18 +4152,6 @@ void qeth_setadp_promisc_mode(struct qeth_card *card) } EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode); -struct net_device_stats *qeth_get_stats(struct net_device *dev) -{ - struct qeth_card *card; - - card = dev->ml_priv; - - QETH_CARD_TEXT(card, 5, "getstat"); - - return &card->stats; -} -EXPORT_SYMBOL_GPL(qeth_get_stats); - static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { @@ -4235,12 +4160,15 @@ static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card, QETH_CARD_TEXT(card, 4, "chgmaccb"); if (qeth_setadpparms_inspect_rc(cmd)) - return 0; + return -EIO; adp_cmd = &cmd->data.setadapterparms; + if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr)) + return -EADDRNOTAVAIL; + if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) && !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC)) - return 0; + return -EADDRNOTAVAIL; ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr); return 0; @@ -4279,7 +4207,7 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, QETH_CARD_TEXT(card, 4, "setaccb"); if (cmd->hdr.return_code) - return 0; + return -EIO; qeth_setadpparms_inspect_rc(cmd); access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; @@ -4353,7 +4281,7 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, card->options.isolation = card->options.prev_isolation; break; } - return 0; + return (cmd->hdr.return_code) ? -EIO : 0; } static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, @@ -4417,7 +4345,7 @@ void qeth_tx_timeout(struct net_device *dev) card = dev->ml_priv; QETH_CARD_TEXT(card, 4, "txtimeo"); - card->stats.tx_errors++; + QETH_CARD_STAT_INC(card, tx_errors); qeth_schedule_recovery(card); } EXPORT_SYMBOL_GPL(qeth_tx_timeout); @@ -4487,27 +4415,6 @@ static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum) return rc; } -static int qeth_send_ipa_snmp_cmd(struct qeth_card *card, - struct qeth_cmd_buffer *iob, int len, - int (*reply_cb)(struct qeth_card *, struct qeth_reply *, - unsigned long), - void *reply_param) -{ - u16 s1, s2; - - QETH_CARD_TEXT(card, 4, "sendsnmp"); - - /* adjust PDU length fields in IPA_PDU_HEADER */ - s1 = (u32) IPA_PDU_HEADER_SIZE + len; - s2 = (u32) len; - memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2); - memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2); - memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2); - memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2); - return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob, - reply_cb, reply_param); -} - static int qeth_snmp_command_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long sdata) { @@ -4525,13 +4432,13 @@ static int qeth_snmp_command_cb(struct qeth_card *card, if (cmd->hdr.return_code) { QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code); - return 0; + return -EIO; } if (cmd->data.setadapterparms.hdr.return_code) { cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code; QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code); - return 0; + return -EIO; } data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data)); if (cmd->data.setadapterparms.hdr.seq_no == 1) { @@ -4546,9 +4453,8 @@ static int qeth_snmp_command_cb(struct qeth_card *card, /* check if there is enough room in userspace */ if ((qinfo->udata_len - qinfo->udata_offset) < data_len) { - QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOMEM); - cmd->hdr.return_code = IPA_RC_ENOMEM; - return 0; + QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC); + return -ENOSPC; } QETH_CARD_TEXT_(card, 4, "snore%i", cmd->data.setadapterparms.hdr.used_total); @@ -4613,10 +4519,13 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata) rc = -ENOMEM; goto out; } + + /* for large requests, fix-up the length fields: */ + qeth_prepare_ipa_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len); + cmd = __ipa_cmd(iob); memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len); - rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len, - qeth_snmp_command_cb, (void *)&qinfo); + rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo); if (rc) QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n", CARD_DEVID(card), rc); @@ -4640,16 +4549,14 @@ static int qeth_setadpparms_query_oat_cb(struct qeth_card *card, QETH_CARD_TEXT(card, 3, "qoatcb"); if (qeth_setadpparms_inspect_rc(cmd)) - return 0; + return -EIO; priv = (struct qeth_qoat_priv *)reply->param; resdatalen = cmd->data.setadapterparms.hdr.cmdlength; resdata = (char *)data + 28; - if (resdatalen > (priv->buffer_len - priv->response_len)) { - cmd->hdr.return_code = IPA_RC_FFFF; - return 0; - } + if (resdatalen > (priv->buffer_len - priv->response_len)) + return -ENOSPC; memcpy((priv->buffer + priv->response_len), resdata, resdatalen); @@ -4722,9 +4629,7 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata) if (copy_to_user(udata, &oat_data, sizeof(struct qeth_query_oat_data))) rc = -EFAULT; - } else - if (rc == IPA_RC_FFFF) - rc = -EFAULT; + } out_free: vfree(priv.buffer); @@ -4741,7 +4646,7 @@ static int qeth_query_card_info_cb(struct qeth_card *card, QETH_CARD_TEXT(card, 2, "qcrdincb"); if (qeth_setadpparms_inspect_rc(cmd)) - return 0; + return -EIO; card_info = &cmd->data.setadapterparms.data.card_info; carrier_info->card_type = card_info->card_type; @@ -4750,8 +4655,8 @@ static int qeth_query_card_info_cb(struct qeth_card *card, return 0; } -static int qeth_query_card_info(struct qeth_card *card, - struct carrier_info *carrier_info) +int qeth_query_card_info(struct qeth_card *card, + struct carrier_info *carrier_info) { struct qeth_cmd_buffer *iob; @@ -4979,8 +4884,8 @@ static int qeth_qdio_establish(struct qeth_card *card) init_data.output_handler = qeth_qdio_output_handler; init_data.queue_start_poll_array = queue_start_poll; init_data.int_parm = (unsigned long) card; - init_data.input_sbal_addr_array = (void **) in_sbal_ptrs; - init_data.output_sbal_addr_array = (void **) out_sbal_ptrs; + init_data.input_sbal_addr_array = in_sbal_ptrs; + init_data.output_sbal_addr_array = out_sbal_ptrs; init_data.output_sbal_state_array = card->qdio.out_bufstates; init_data.scan_threshold = (card->info.type == QETH_CARD_TYPE_IQD) ? 1 : 32; @@ -5028,6 +4933,7 @@ static void qeth_core_free_card(struct qeth_card *card) qeth_clean_channel(&card->read); qeth_clean_channel(&card->write); qeth_clean_channel(&card->data); + destroy_workqueue(card->event_wq); qeth_free_qdio_buffers(card); unregister_service_level(&card->qeth_service_level); dev_set_drvdata(&card->gdev->dev, NULL); @@ -5142,25 +5048,16 @@ retriable: rc = qeth_send_startlan(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); - if (rc == IPA_RC_LAN_OFFLINE) { - dev_warn(&card->gdev->dev, - "The LAN is offline\n"); + if (rc == -ENETDOWN) { + dev_warn(&card->gdev->dev, "The LAN is offline\n"); *carrier_ok = false; } else { - rc = -ENODEV; goto out; } } else { *carrier_ok = true; } - if (qeth_netdev_is_registered(card->dev)) { - if (*carrier_ok) - netif_carrier_on(card->dev); - else - netif_carrier_off(card->dev); - } - card->options.ipa4.supported_funcs = 0; card->options.ipa6.supported_funcs = 0; card->options.adp.supported_funcs = 0; @@ -5306,7 +5203,7 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card, QETH_CARD_TEXT(card, 4, "unexeob"); QETH_CARD_HEX(card, 2, buffer, sizeof(void *)); dev_kfree_skb_any(skb); - card->stats.rx_errors++; + QETH_CARD_STAT_INC(card, rx_errors); return NULL; } element++; @@ -5318,16 +5215,17 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card, } *__element = element; *__offset = offset; - if (use_rx_sg && card->options.performance_stats) { - card->perf_stats.sg_skbs_rx++; - card->perf_stats.sg_frags_rx += skb_shinfo(skb)->nr_frags; + if (use_rx_sg) { + QETH_CARD_STAT_INC(card, rx_sg_skbs); + QETH_CARD_STAT_ADD(card, rx_sg_frags, + skb_shinfo(skb)->nr_frags); } return skb; no_mem: if (net_ratelimit()) { QETH_CARD_TEXT(card, 2, "noskbmem"); } - card->stats.rx_dropped++; + QETH_CARD_STAT_INC(card, rx_dropped); return NULL; } EXPORT_SYMBOL_GPL(qeth_core_get_next_skb); @@ -5340,11 +5238,6 @@ int qeth_poll(struct napi_struct *napi, int budget) int done; int new_budget = budget; - if (card->options.performance_stats) { - card->perf_stats.inbound_cnt++; - card->perf_stats.inbound_start_time = qeth_get_micros(); - } - while (1) { if (!card->rx.b_count) { card->rx.qdio_err = 0; @@ -5373,8 +5266,7 @@ int qeth_poll(struct napi_struct *napi, int budget) done = 1; if (done) { - if (card->options.performance_stats) - card->perf_stats.bufs_rec++; + QETH_CARD_STAT_INC(card, rx_bufs); qeth_put_buffer_pool_entry(card, buffer->pool_entry); qeth_queue_input_buffer(card, card->rx.b_index); @@ -5402,9 +5294,6 @@ int qeth_poll(struct napi_struct *napi, int budget) if (qdio_start_irq(card->data.ccwdev, 0)) napi_schedule(&card->napi); out: - if (card->options.performance_stats) - card->perf_stats.inbound_time += qeth_get_micros() - - card->perf_stats.inbound_start_time; return work_done; } EXPORT_SYMBOL_GPL(qeth_poll); @@ -5424,7 +5313,7 @@ static int qeth_setassparms_get_caps_cb(struct qeth_card *card, struct qeth_ipa_caps *caps = reply->param; if (qeth_setassparms_inspect_rc(cmd)) - return 0; + return -EIO; caps->supported = cmd->data.setassparms.data.caps.supported; caps->enabled = cmd->data.setassparms.data.caps.enabled; @@ -5434,18 +5323,18 @@ static int qeth_setassparms_get_caps_cb(struct qeth_card *card, int qeth_setassparms_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; QETH_CARD_TEXT(card, 4, "defadpcb"); - cmd = (struct qeth_ipa_cmd *) data; - if (cmd->hdr.return_code == 0) { - cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; - if (cmd->hdr.prot_version == QETH_PROT_IPV4) - card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled; - if (cmd->hdr.prot_version == QETH_PROT_IPV6) - card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled; - } + if (cmd->hdr.return_code) + return -EIO; + + cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; + if (cmd->hdr.prot_version == QETH_PROT_IPV4) + card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled; + if (cmd->hdr.prot_version == QETH_PROT_IPV6) + card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled; return 0; } EXPORT_SYMBOL_GPL(qeth_setassparms_cb); @@ -5691,7 +5580,10 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card) SET_NETDEV_DEV(dev, &card->gdev->dev); netif_carrier_off(dev); - if (!IS_OSN(card)) { + if (IS_OSN(card)) { + dev->ethtool_ops = &qeth_osn_ethtool_ops; + } else { + dev->ethtool_ops = &qeth_ethtool_ops; dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->hw_features |= NETIF_F_SG; dev->vlan_features |= NETIF_F_SG; @@ -5937,12 +5829,6 @@ int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) if (!card) return -ENODEV; - if (!qeth_card_hw_is_reachable(card)) - return -ENODEV; - - if (card->info.type == QETH_CARD_TYPE_OSN) - return -EPERM; - switch (cmd) { case SIOC_QETH_ADP_SET_SNMP_CONTROL: rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data); @@ -5982,454 +5868,91 @@ int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) } EXPORT_SYMBOL_GPL(qeth_do_ioctl); -static struct { - const char str[ETH_GSTRING_LEN]; -} qeth_ethtool_stats_keys[] = { -/* 0 */{"rx skbs"}, - {"rx buffers"}, - {"tx skbs"}, - {"tx buffers"}, - {"tx skbs no packing"}, - {"tx buffers no packing"}, - {"tx skbs packing"}, - {"tx buffers packing"}, - {"tx sg skbs"}, - {"tx buffer elements"}, -/* 10 */{"rx sg skbs"}, - {"rx sg frags"}, - {"rx sg page allocs"}, - {"tx large kbytes"}, - {"tx large count"}, - {"tx pk state ch n->p"}, - {"tx pk state ch p->n"}, - {"tx pk watermark low"}, - {"tx pk watermark high"}, - {"queue 0 buffer usage"}, -/* 20 */{"queue 1 buffer usage"}, - {"queue 2 buffer usage"}, - {"queue 3 buffer usage"}, - {"rx poll time"}, - {"rx poll count"}, - {"rx do_QDIO time"}, - {"rx do_QDIO count"}, - {"tx handler time"}, - {"tx handler count"}, - {"tx time"}, -/* 30 */{"tx count"}, - {"tx do_QDIO time"}, - {"tx do_QDIO count"}, - {"tx csum"}, - {"tx lin"}, - {"tx linfail"}, - {"cq handler count"}, - {"cq handler time"}, - {"rx csum"} -}; - -int qeth_core_get_sset_count(struct net_device *dev, int stringset) -{ - switch (stringset) { - case ETH_SS_STATS: - return (sizeof(qeth_ethtool_stats_keys) / ETH_GSTRING_LEN); - default: - return -EINVAL; - } -} -EXPORT_SYMBOL_GPL(qeth_core_get_sset_count); - -void qeth_core_get_ethtool_stats(struct net_device *dev, - struct ethtool_stats *stats, u64 *data) -{ - struct qeth_card *card = dev->ml_priv; - data[0] = card->stats.rx_packets - - card->perf_stats.initial_rx_packets; - data[1] = card->perf_stats.bufs_rec; - data[2] = card->stats.tx_packets - - card->perf_stats.initial_tx_packets; - data[3] = card->perf_stats.bufs_sent; - data[4] = card->stats.tx_packets - card->perf_stats.initial_tx_packets - - card->perf_stats.skbs_sent_pack; - data[5] = card->perf_stats.bufs_sent - card->perf_stats.bufs_sent_pack; - data[6] = card->perf_stats.skbs_sent_pack; - data[7] = card->perf_stats.bufs_sent_pack; - data[8] = card->perf_stats.sg_skbs_sent; - data[9] = card->perf_stats.buf_elements_sent; - data[10] = card->perf_stats.sg_skbs_rx; - data[11] = card->perf_stats.sg_frags_rx; - data[12] = card->perf_stats.sg_alloc_page_rx; - data[13] = (card->perf_stats.large_send_bytes >> 10); - data[14] = card->perf_stats.large_send_cnt; - data[15] = card->perf_stats.sc_dp_p; - data[16] = card->perf_stats.sc_p_dp; - data[17] = QETH_LOW_WATERMARK_PACK; - data[18] = QETH_HIGH_WATERMARK_PACK; - data[19] = atomic_read(&card->qdio.out_qs[0]->used_buffers); - data[20] = (card->qdio.no_out_queues > 1) ? - atomic_read(&card->qdio.out_qs[1]->used_buffers) : 0; - data[21] = (card->qdio.no_out_queues > 2) ? - atomic_read(&card->qdio.out_qs[2]->used_buffers) : 0; - data[22] = (card->qdio.no_out_queues > 3) ? - atomic_read(&card->qdio.out_qs[3]->used_buffers) : 0; - data[23] = card->perf_stats.inbound_time; - data[24] = card->perf_stats.inbound_cnt; - data[25] = card->perf_stats.inbound_do_qdio_time; - data[26] = card->perf_stats.inbound_do_qdio_cnt; - data[27] = card->perf_stats.outbound_handler_time; - data[28] = card->perf_stats.outbound_handler_cnt; - data[29] = card->perf_stats.outbound_time; - data[30] = card->perf_stats.outbound_cnt; - data[31] = card->perf_stats.outbound_do_qdio_time; - data[32] = card->perf_stats.outbound_do_qdio_cnt; - data[33] = card->perf_stats.tx_csum; - data[34] = card->perf_stats.tx_lin; - data[35] = card->perf_stats.tx_linfail; - data[36] = card->perf_stats.cq_cnt; - data[37] = card->perf_stats.cq_time; - data[38] = card->perf_stats.rx_csum; -} -EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats); - -void qeth_core_get_strings(struct net_device *dev, u32 stringset, u8 *data) -{ - switch (stringset) { - case ETH_SS_STATS: - memcpy(data, &qeth_ethtool_stats_keys, - sizeof(qeth_ethtool_stats_keys)); - break; - default: - WARN_ON(1); - break; - } -} -EXPORT_SYMBOL_GPL(qeth_core_get_strings); - -void qeth_core_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) -{ - struct qeth_card *card = dev->ml_priv; - - strlcpy(info->driver, IS_LAYER2(card) ? "qeth_l2" : "qeth_l3", - sizeof(info->driver)); - strlcpy(info->version, "1.0", sizeof(info->version)); - strlcpy(info->fw_version, card->info.mcl_level, - sizeof(info->fw_version)); - snprintf(info->bus_info, sizeof(info->bus_info), "%s/%s/%s", - CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card)); -} -EXPORT_SYMBOL_GPL(qeth_core_get_drvinfo); - -/* Helper function to fill 'advertising' and 'supported' which are the same. */ -/* Autoneg and full-duplex are supported and advertised unconditionally. */ -/* Always advertise and support all speeds up to specified, and only one */ -/* specified port type. */ -static void qeth_set_cmd_adv_sup(struct ethtool_link_ksettings *cmd, - int maxspeed, int porttype) -{ - ethtool_link_ksettings_zero_link_mode(cmd, supported); - ethtool_link_ksettings_zero_link_mode(cmd, advertising); - ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising); - - ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); - ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); - - switch (porttype) { - case PORT_TP: - ethtool_link_ksettings_add_link_mode(cmd, supported, TP); - ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); - break; - case PORT_FIBRE: - ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); - ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); - break; - default: - ethtool_link_ksettings_add_link_mode(cmd, supported, TP); - ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); - WARN_ON_ONCE(1); - } - - /* partially does fall through, to also select lower speeds */ - switch (maxspeed) { - case SPEED_25000: - ethtool_link_ksettings_add_link_mode(cmd, supported, - 25000baseSR_Full); - ethtool_link_ksettings_add_link_mode(cmd, advertising, - 25000baseSR_Full); - break; - case SPEED_10000: - ethtool_link_ksettings_add_link_mode(cmd, supported, - 10000baseT_Full); - ethtool_link_ksettings_add_link_mode(cmd, advertising, - 10000baseT_Full); - case SPEED_1000: - ethtool_link_ksettings_add_link_mode(cmd, supported, - 1000baseT_Full); - ethtool_link_ksettings_add_link_mode(cmd, advertising, - 1000baseT_Full); - ethtool_link_ksettings_add_link_mode(cmd, supported, - 1000baseT_Half); - ethtool_link_ksettings_add_link_mode(cmd, advertising, - 1000baseT_Half); - case SPEED_100: - ethtool_link_ksettings_add_link_mode(cmd, supported, - 100baseT_Full); - ethtool_link_ksettings_add_link_mode(cmd, advertising, - 100baseT_Full); - ethtool_link_ksettings_add_link_mode(cmd, supported, - 100baseT_Half); - ethtool_link_ksettings_add_link_mode(cmd, advertising, - 100baseT_Half); - case SPEED_10: - ethtool_link_ksettings_add_link_mode(cmd, supported, - 10baseT_Full); - ethtool_link_ksettings_add_link_mode(cmd, advertising, - 10baseT_Full); - ethtool_link_ksettings_add_link_mode(cmd, supported, - 10baseT_Half); - ethtool_link_ksettings_add_link_mode(cmd, advertising, - 10baseT_Half); - /* end fallthrough */ - break; - default: - ethtool_link_ksettings_add_link_mode(cmd, supported, - 10baseT_Full); - ethtool_link_ksettings_add_link_mode(cmd, advertising, - 10baseT_Full); - ethtool_link_ksettings_add_link_mode(cmd, supported, - 10baseT_Half); - ethtool_link_ksettings_add_link_mode(cmd, advertising, - 10baseT_Half); - WARN_ON_ONCE(1); - } -} - -int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev, - struct ethtool_link_ksettings *cmd) -{ - struct qeth_card *card = netdev->ml_priv; - enum qeth_link_types link_type; - struct carrier_info carrier_info; - int rc; - - if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan)) - link_type = QETH_LINK_TYPE_10GBIT_ETH; - else - link_type = card->info.link_type; - - cmd->base.duplex = DUPLEX_FULL; - cmd->base.autoneg = AUTONEG_ENABLE; - cmd->base.phy_address = 0; - cmd->base.mdio_support = 0; - cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; - cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID; - - switch (link_type) { - case QETH_LINK_TYPE_FAST_ETH: - case QETH_LINK_TYPE_LANE_ETH100: - cmd->base.speed = SPEED_100; - cmd->base.port = PORT_TP; - break; - case QETH_LINK_TYPE_GBIT_ETH: - case QETH_LINK_TYPE_LANE_ETH1000: - cmd->base.speed = SPEED_1000; - cmd->base.port = PORT_FIBRE; - break; - case QETH_LINK_TYPE_10GBIT_ETH: - cmd->base.speed = SPEED_10000; - cmd->base.port = PORT_FIBRE; - break; - case QETH_LINK_TYPE_25GBIT_ETH: - cmd->base.speed = SPEED_25000; - cmd->base.port = PORT_FIBRE; - break; - default: - cmd->base.speed = SPEED_10; - cmd->base.port = PORT_TP; - } - qeth_set_cmd_adv_sup(cmd, cmd->base.speed, cmd->base.port); - - /* Check if we can obtain more accurate information. */ - /* If QUERY_CARD_INFO command is not supported or fails, */ - /* just return the heuristics that was filled above. */ - if (!qeth_card_hw_is_reachable(card)) - return -ENODEV; - rc = qeth_query_card_info(card, &carrier_info); - if (rc == -EOPNOTSUPP) /* for old hardware, return heuristic */ - return 0; - if (rc) /* report error from the hardware operation */ - return rc; - /* on success, fill in the information got from the hardware */ - - netdev_dbg(netdev, - "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n", - carrier_info.card_type, - carrier_info.port_mode, - carrier_info.port_speed); - - /* Update attributes for which we've obtained more authoritative */ - /* information, leave the rest the way they where filled above. */ - switch (carrier_info.card_type) { - case CARD_INFO_TYPE_1G_COPPER_A: - case CARD_INFO_TYPE_1G_COPPER_B: - cmd->base.port = PORT_TP; - qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port); - break; - case CARD_INFO_TYPE_1G_FIBRE_A: - case CARD_INFO_TYPE_1G_FIBRE_B: - cmd->base.port = PORT_FIBRE; - qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port); - break; - case CARD_INFO_TYPE_10G_FIBRE_A: - case CARD_INFO_TYPE_10G_FIBRE_B: - cmd->base.port = PORT_FIBRE; - qeth_set_cmd_adv_sup(cmd, SPEED_10000, cmd->base.port); - break; - } - - switch (carrier_info.port_mode) { - case CARD_INFO_PORTM_FULLDUPLEX: - cmd->base.duplex = DUPLEX_FULL; - break; - case CARD_INFO_PORTM_HALFDUPLEX: - cmd->base.duplex = DUPLEX_HALF; - break; - } - - switch (carrier_info.port_speed) { - case CARD_INFO_PORTS_10M: - cmd->base.speed = SPEED_10; - break; - case CARD_INFO_PORTS_100M: - cmd->base.speed = SPEED_100; - break; - case CARD_INFO_PORTS_1G: - cmd->base.speed = SPEED_1000; - break; - case CARD_INFO_PORTS_10G: - cmd->base.speed = SPEED_10000; - break; - case CARD_INFO_PORTS_25G: - cmd->base.speed = SPEED_25000; - break; - } - - return 0; -} -EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_link_ksettings); - -/* Callback to handle checksum offload command reply from OSA card. - * Verify that required features have been enabled on the card. - * Return error in hdr->return_code as this value is checked by caller. - * - * Always returns zero to indicate no further messages from the OSA card. - */ -static int qeth_ipa_checksum_run_cmd_cb(struct qeth_card *card, - struct qeth_reply *reply, - unsigned long data) +static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply, + unsigned long data) { struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; - struct qeth_checksum_cmd *chksum_cb = - (struct qeth_checksum_cmd *)reply->param; + u32 *features = reply->param; - QETH_CARD_TEXT(card, 4, "chkdoccb"); if (qeth_setassparms_inspect_rc(cmd)) - return 0; + return -EIO; - memset(chksum_cb, 0, sizeof(*chksum_cb)); - if (cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) { - chksum_cb->supported = - cmd->data.setassparms.data.chksum.supported; - QETH_CARD_TEXT_(card, 3, "strt:%x", chksum_cb->supported); - } - if (cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_ENABLE) { - chksum_cb->supported = - cmd->data.setassparms.data.chksum.supported; - chksum_cb->enabled = - cmd->data.setassparms.data.chksum.enabled; - QETH_CARD_TEXT_(card, 3, "supp:%x", chksum_cb->supported); - QETH_CARD_TEXT_(card, 3, "enab:%x", chksum_cb->enabled); - } + *features = cmd->data.setassparms.data.flags_32bit; return 0; } -/* Send command to OSA card and check results. */ -static int qeth_ipa_checksum_run_cmd(struct qeth_card *card, - enum qeth_ipa_funcs ipa_func, - __u16 cmd_code, long data, - struct qeth_checksum_cmd *chksum_cb, - enum qeth_prot_versions prot) +static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype, + enum qeth_prot_versions prot) { - struct qeth_cmd_buffer *iob; - - QETH_CARD_TEXT(card, 4, "chkdocmd"); - iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, - sizeof(__u32), prot); - if (!iob) - return -ENOMEM; - - __ipa_cmd(iob)->data.setassparms.data.flags_32bit = (__u32) data; - return qeth_send_ipa_cmd(card, iob, qeth_ipa_checksum_run_cmd_cb, - chksum_cb); + return qeth_send_simple_setassparms_prot(card, cstype, + IPA_CMD_ASS_STOP, 0, prot); } -static int qeth_send_checksum_on(struct qeth_card *card, int cstype, - enum qeth_prot_versions prot) +static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype, + enum qeth_prot_versions prot) { u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP; - struct qeth_checksum_cmd chksum_cb; + struct qeth_cmd_buffer *iob; + struct qeth_ipa_caps caps; + u32 features; int rc; - if (prot == QETH_PROT_IPV4) + /* some L3 HW requires combined L3+L4 csum offload: */ + if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 && + cstype == IPA_OUTBOUND_CHECKSUM) required_features |= QETH_IPA_CHECKSUM_IP_HDR; - rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_START, 0, - &chksum_cb, prot); - if (!rc) { - if ((required_features & chksum_cb.supported) != - required_features) - rc = -EIO; - else if (!(QETH_IPA_CHECKSUM_LP2LP & chksum_cb.supported) && - cstype == IPA_INBOUND_CHECKSUM) - dev_warn(&card->gdev->dev, - "Hardware checksumming is performed only if %s and its peer use different OSA Express 3 ports\n", - QETH_CARD_IFNAME(card)); - } - if (rc) { - qeth_send_simple_setassparms_prot(card, cstype, - IPA_CMD_ASS_STOP, 0, prot); - dev_warn(&card->gdev->dev, - "Starting HW IPv%d checksumming for %s failed, using SW checksumming\n", - prot, QETH_CARD_IFNAME(card)); + + iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0, + prot); + if (!iob) + return -ENOMEM; + + rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features); + if (rc) return rc; + + if ((required_features & features) != required_features) { + qeth_set_csum_off(card, cstype, prot); + return -EOPNOTSUPP; } - rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_ENABLE, - chksum_cb.supported, &chksum_cb, + + iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE, 4, prot); - if (!rc) { - if ((required_features & chksum_cb.enabled) != - required_features) - rc = -EIO; + if (!iob) { + qeth_set_csum_off(card, cstype, prot); + return -ENOMEM; } + + if (features & QETH_IPA_CHECKSUM_LP2LP) + required_features |= QETH_IPA_CHECKSUM_LP2LP; + __ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features; + rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps); if (rc) { - qeth_send_simple_setassparms_prot(card, cstype, - IPA_CMD_ASS_STOP, 0, prot); - dev_warn(&card->gdev->dev, - "Enabling HW IPv%d checksumming for %s failed, using SW checksumming\n", - prot, QETH_CARD_IFNAME(card)); + qeth_set_csum_off(card, cstype, prot); return rc; } + if (!qeth_ipa_caps_supported(&caps, required_features) || + !qeth_ipa_caps_enabled(&caps, required_features)) { + qeth_set_csum_off(card, cstype, prot); + return -EOPNOTSUPP; + } + dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n", cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot); + if (!qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP) && + cstype == IPA_OUTBOUND_CHECKSUM) + dev_warn(&card->gdev->dev, + "Hardware checksumming is performed only if %s and its peer use different OSA Express 3 ports\n", + QETH_CARD_IFNAME(card)); return 0; } static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype, enum qeth_prot_versions prot) { - int rc = (on) ? qeth_send_checksum_on(card, cstype, prot) - : qeth_send_simple_setassparms_prot(card, cstype, - IPA_CMD_ASS_STOP, 0, - prot); - return rc ? -EIO : 0; + return on ? qeth_set_csum_on(card, cstype, prot) : + qeth_set_csum_off(card, cstype, prot); } static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply, @@ -6439,7 +5962,7 @@ static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply, struct qeth_tso_start_data *tso_data = reply->param; if (qeth_setassparms_inspect_rc(cmd)) - return 0; + return -EIO; tso_data->mss = cmd->data.setassparms.data.tso.mss; tso_data->supported = cmd->data.setassparms.data.tso.supported; @@ -6505,10 +6028,7 @@ static int qeth_set_tso_on(struct qeth_card *card, static int qeth_set_ipa_tso(struct qeth_card *card, bool on, enum qeth_prot_versions prot) { - int rc = on ? qeth_set_tso_on(card, prot) : - qeth_set_tso_off(card, prot); - - return rc ? -EIO : 0; + return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot); } static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on) @@ -6534,8 +6054,6 @@ static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on) return (rc_ipv6) ? rc_ipv6 : rc_ipv4; } -#define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \ - NETIF_F_IPV6_CSUM | NETIF_F_TSO6) /** * qeth_enable_hw_features() - (Re-)Enable HW functions for device features * @dev: a net_device @@ -6545,17 +6063,20 @@ void qeth_enable_hw_features(struct net_device *dev) struct qeth_card *card = dev->ml_priv; netdev_features_t features; - rtnl_lock(); features = dev->features; - /* force-off any feature that needs an IPA sequence. + /* force-off any feature that might need an IPA sequence. * netdev_update_features() will restart them. */ - dev->features &= ~QETH_HW_FEATURES; + dev->features &= ~dev->hw_features; + /* toggle VLAN filter, so that VIDs are re-programmed: */ + if (IS_LAYER2(card) && IS_VM_NIC(card)) { + dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + } netdev_update_features(dev); if (features != dev->features) dev_warn(&card->gdev->dev, "Device recovery failed to restore all offload features\n"); - rtnl_unlock(); } EXPORT_SYMBOL_GPL(qeth_enable_hw_features); @@ -6624,10 +6145,7 @@ netdev_features_t qeth_fix_features(struct net_device *dev, features &= ~NETIF_F_TSO; if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO)) features &= ~NETIF_F_TSO6; - /* if the card isn't up, remove features that require hw changes */ - if (card->state == CARD_STATE_DOWN || - card->state == CARD_STATE_RECOVER) - features &= ~QETH_HW_FEATURES; + QETH_DBF_HEX(SETUP, 2, &features, sizeof(features)); return features; } @@ -6659,18 +6177,70 @@ netdev_features_t qeth_features_check(struct sk_buff *skb, } EXPORT_SYMBOL_GPL(qeth_features_check); +void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct qeth_card *card = dev->ml_priv; + struct qeth_qdio_out_q *queue; + unsigned int i; + + QETH_CARD_TEXT(card, 5, "getstat"); + + stats->rx_packets = card->stats.rx_packets; + stats->rx_bytes = card->stats.rx_bytes; + stats->rx_errors = card->stats.rx_errors; + stats->rx_dropped = card->stats.rx_dropped; + stats->multicast = card->stats.rx_multicast; + stats->tx_errors = card->stats.tx_errors; + + for (i = 0; i < card->qdio.no_out_queues; i++) { + queue = card->qdio.out_qs[i]; + + stats->tx_packets += queue->stats.tx_packets; + stats->tx_bytes += queue->stats.tx_bytes; + stats->tx_errors += queue->stats.tx_errors; + stats->tx_dropped += queue->stats.tx_dropped; + } +} +EXPORT_SYMBOL_GPL(qeth_get_stats64); + +int qeth_open(struct net_device *dev) +{ + struct qeth_card *card = dev->ml_priv; + + QETH_CARD_TEXT(card, 4, "qethopen"); + + if (qdio_stop_irq(CARD_DDEV(card), 0) < 0) + return -EIO; + + card->data.state = CH_STATE_UP; + netif_start_queue(dev); + + napi_enable(&card->napi); + local_bh_disable(); + napi_schedule(&card->napi); + /* kick-start the NAPI softirq: */ + local_bh_enable(); + return 0; +} +EXPORT_SYMBOL_GPL(qeth_open); + +int qeth_stop(struct net_device *dev) +{ + struct qeth_card *card = dev->ml_priv; + + QETH_CARD_TEXT(card, 4, "qethstop"); + netif_tx_disable(dev); + napi_disable(&card->napi); + return 0; +} +EXPORT_SYMBOL_GPL(qeth_stop); + static int __init qeth_core_init(void) { int rc; pr_info("loading core functions\n"); - qeth_wq = create_singlethread_workqueue("qeth_wq"); - if (!qeth_wq) { - rc = -ENOMEM; - goto out_err; - } - rc = qeth_register_dbf_views(); if (rc) goto dbf_err; @@ -6712,8 +6282,6 @@ slab_err: register_err: qeth_unregister_dbf_views(); dbf_err: - destroy_workqueue(qeth_wq); -out_err: pr_err("Initializing the qeth device driver failed\n"); return rc; } @@ -6721,7 +6289,6 @@ out_err: static void __exit qeth_core_exit(void) { qeth_clear_dbf_list(); - destroy_workqueue(qeth_wq); ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver); ccw_driver_unregister(&qeth_ccw_driver); kmem_cache_destroy(qeth_qdio_outbuf_cache); diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c index 16fc51ad0514..e3f4866c158e 100644 --- a/drivers/s390/net/qeth_core_mpc.c +++ b/drivers/s390/net/qeth_core_mpc.c @@ -125,24 +125,13 @@ unsigned char DM_ACT[] = { unsigned char IPA_PDU_HEADER[] = { 0x00, 0xe0, 0x00, 0x00, 0x77, 0x77, 0x77, 0x77, - 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, - (IPA_PDU_HEADER_SIZE+sizeof(struct qeth_ipa_cmd)) / 256, - (IPA_PDU_HEADER_SIZE+sizeof(struct qeth_ipa_cmd)) % 256, + 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xc1, 0x03, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, - sizeof(struct qeth_ipa_cmd) / 256, - sizeof(struct qeth_ipa_cmd) % 256, - 0x00, - sizeof(struct qeth_ipa_cmd) / 256, - sizeof(struct qeth_ipa_cmd) % 256, - 0x05, - 0x77, 0x77, 0x77, 0x77, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x05, 0x77, 0x77, 0x77, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x01, 0x00, - sizeof(struct qeth_ipa_cmd) / 256, - sizeof(struct qeth_ipa_cmd) % 256, - 0x00, 0x00, 0x00, 0x40, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, }; struct ipa_rc_msg { @@ -212,12 +201,10 @@ static const struct ipa_rc_msg qeth_ipa_rc_msg[] = { {IPA_RC_LAN_OFFLINE, "STRTLAN_LAN_DISABLED - LAN offline"}, {IPA_RC_VEPA_TO_VEB_TRANSITION, "Adj. switch disabled port mode RR"}, {IPA_RC_INVALID_IP_VERSION2, "Invalid IP version"}, - {IPA_RC_ENOMEM, "Memory problem"}, + /* default for qeth_get_ipa_msg(): */ {IPA_RC_FFFF, "Unknown Error"} }; - - const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc) { int x; diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h index 1ab321926f64..f8c5d4a9be13 100644 --- a/drivers/s390/net/qeth_core_mpc.h +++ b/drivers/s390/net/qeth_core_mpc.h @@ -21,8 +21,6 @@ extern unsigned char IPA_PDU_HEADER[]; #define QETH_IPA_CMD_DEST_ADDR(buffer) (buffer + 0x2c) -#define IPA_CMD_LENGTH (IPA_PDU_HEADER_SIZE + sizeof(struct qeth_ipa_cmd)) - #define QETH_SEQ_NO_LENGTH 4 #define QETH_MPC_TOKEN_LENGTH 4 #define QETH_MCL_LENGTH 4 @@ -81,7 +79,9 @@ enum qeth_card_types { #define IS_IQD(card) ((card)->info.type == QETH_CARD_TYPE_IQD) #define IS_OSD(card) ((card)->info.type == QETH_CARD_TYPE_OSD) +#define IS_OSM(card) ((card)->info.type == QETH_CARD_TYPE_OSM) #define IS_OSN(card) ((card)->info.type == QETH_CARD_TYPE_OSN) +#define IS_OSX(card) ((card)->info.type == QETH_CARD_TYPE_OSX) #define IS_VM_NIC(card) ((card)->info.guestlan) #define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18 @@ -230,7 +230,6 @@ enum qeth_ipa_return_codes { IPA_RC_LAN_OFFLINE = 0xe080, IPA_RC_VEPA_TO_VEB_TRANSITION = 0xe090, IPA_RC_INVALID_IP_VERSION2 = 0xf001, - IPA_RC_ENOMEM = 0xfffe, IPA_RC_FFFF = 0xffff }; /* for VNIC Characteristics */ @@ -418,12 +417,6 @@ enum qeth_ipa_checksum_bits { QETH_IPA_CHECKSUM_LP2LP = 0x0020 }; -/* IPA Assist checksum offload reply layout. */ -struct qeth_checksum_cmd { - __u32 supported; - __u32 enabled; -} __packed; - enum qeth_ipa_large_send_caps { QETH_IPA_LARGE_SEND_TCP = 0x00000001, }; @@ -439,7 +432,6 @@ struct qeth_ipacmd_setassparms { union { __u32 flags_32bit; struct qeth_ipa_caps caps; - struct qeth_checksum_cmd chksum; struct qeth_arp_cache_entry arp_entry; struct qeth_arp_query_data query_arp; struct qeth_tso_start_data tso; @@ -833,15 +825,10 @@ enum qeth_ipa_arp_return_codes { extern const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc); extern const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd); -#define QETH_SETASS_BASE_LEN (IPA_PDU_HEADER_SIZE + \ - sizeof(struct qeth_ipacmd_hdr) + \ - sizeof(struct qeth_ipacmd_setassparms_hdr)) #define QETH_SETADP_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \ sizeof(struct qeth_ipacmd_setadpparms_hdr)) #define QETH_SNMP_SETADP_CMDLENGTH 16 -#define QETH_ARP_DATA_SIZE 3968 -#define QETH_ARP_CMD_LEN (QETH_ARP_DATA_SIZE + 8) /* Helper functions */ #define IS_IPA_REPLY(cmd) ((cmd->hdr.initiator == IPA_CMD_INITIATOR_HOST) || \ (cmd->hdr.initiator == IPA_CMD_INITIATOR_OSA_REPLY)) diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c index 30f61608fa22..56deeb6f7bc0 100644 --- a/drivers/s390/net/qeth_core_sys.c +++ b/drivers/s390/net/qeth_core_sys.c @@ -29,13 +29,11 @@ static ssize_t qeth_dev_state_show(struct device *dev, case CARD_STATE_HARDSETUP: return sprintf(buf, "HARDSETUP\n"); case CARD_STATE_SOFTSETUP: + if (card->dev->flags & IFF_UP) + return sprintf(buf, "UP (LAN %s)\n", + netif_carrier_ok(card->dev) ? "ONLINE" : + "OFFLINE"); return sprintf(buf, "SOFTSETUP\n"); - case CARD_STATE_UP: - return sprintf(buf, "UP (LAN %s)\n", - netif_carrier_ok(card->dev) ? "ONLINE" : - "OFFLINE"); - case CARD_STATE_RECOVER: - return sprintf(buf, "RECOVER\n"); default: return sprintf(buf, "UNKNOWN\n"); } @@ -126,8 +124,7 @@ static ssize_t qeth_dev_portno_store(struct device *dev, return -EINVAL; mutex_lock(&card->conf_mutex); - if ((card->state != CARD_STATE_DOWN) && - (card->state != CARD_STATE_RECOVER)) { + if (card->state != CARD_STATE_DOWN) { rc = -EPERM; goto out; } @@ -202,8 +199,7 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev, return -EINVAL; mutex_lock(&card->conf_mutex); - if ((card->state != CARD_STATE_DOWN) && - (card->state != CARD_STATE_RECOVER)) { + if (card->state != CARD_STATE_DOWN) { rc = -EPERM; goto out; } @@ -285,8 +281,7 @@ static ssize_t qeth_dev_bufcnt_store(struct device *dev, return -EINVAL; mutex_lock(&card->conf_mutex); - if ((card->state != CARD_STATE_DOWN) && - (card->state != CARD_STATE_RECOVER)) { + if (card->state != CARD_STATE_DOWN) { rc = -EPERM; goto out; } @@ -316,7 +311,7 @@ static ssize_t qeth_dev_recover_store(struct device *dev, if (!card) return -EINVAL; - if (card->state != CARD_STATE_UP) + if (!qeth_card_hw_is_reachable(card)) return -EPERM; i = simple_strtoul(buf, &tmp, 16); @@ -336,35 +331,36 @@ static ssize_t qeth_dev_performance_stats_show(struct device *dev, if (!card) return -EINVAL; - return sprintf(buf, "%i\n", card->options.performance_stats ? 1:0); + return sprintf(buf, "1\n"); } static ssize_t qeth_dev_performance_stats_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct qeth_card *card = dev_get_drvdata(dev); - char *tmp; - int i, rc = 0; + struct qeth_qdio_out_q *queue; + unsigned int i; + bool reset; + int rc; if (!card) return -EINVAL; - mutex_lock(&card->conf_mutex); - i = simple_strtoul(buf, &tmp, 16); - if ((i == 0) || (i == 1)) { - if (i == card->options.performance_stats) - goto out; - card->options.performance_stats = i; - if (i == 0) - memset(&card->perf_stats, 0, - sizeof(struct qeth_perf_stats)); - card->perf_stats.initial_rx_packets = card->stats.rx_packets; - card->perf_stats.initial_tx_packets = card->stats.tx_packets; - } else - rc = -EINVAL; -out: - mutex_unlock(&card->conf_mutex); - return rc ? rc : count; + rc = kstrtobool(buf, &reset); + if (rc) + return rc; + + if (reset) { + memset(&card->stats, 0, sizeof(card->stats)); + for (i = 0; i < card->qdio.no_out_queues; i++) { + queue = card->qdio.out_qs[i]; + if (!queue) + break; + memset(&queue->stats, 0, sizeof(queue->stats)); + } + } + + return count; } static DEVICE_ATTR(performance_stats, 0644, qeth_dev_performance_stats_show, @@ -420,7 +416,6 @@ static ssize_t qeth_dev_layer2_store(struct device *dev, goto out; } - card->info.mac_bits = 0; if (card->discipline) { /* start with a new, pristine netdevice: */ ndev = qeth_clone_netdev(card->dev); @@ -633,8 +628,7 @@ static ssize_t qeth_dev_blkt_store(struct qeth_card *card, return -EINVAL; mutex_lock(&card->conf_mutex); - if ((card->state != CARD_STATE_DOWN) && - (card->state != CARD_STATE_RECOVER)) { + if (card->state != CARD_STATE_DOWN) { rc = -EPERM; goto out; } diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c new file mode 100644 index 000000000000..93a53fed4cf8 --- /dev/null +++ b/drivers/s390/net/qeth_ethtool.c @@ -0,0 +1,370 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corp. 2018 + */ + +#define KMSG_COMPONENT "qeth" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include +#include "qeth_core.h" + + +#define QETH_TXQ_STAT(_name, _stat) { \ + .name = _name, \ + .offset = offsetof(struct qeth_out_q_stats, _stat) \ +} + +#define QETH_CARD_STAT(_name, _stat) { \ + .name = _name, \ + .offset = offsetof(struct qeth_card_stats, _stat) \ +} + +struct qeth_stats { + char name[ETH_GSTRING_LEN]; + unsigned int offset; +}; + +static const struct qeth_stats txq_stats[] = { + QETH_TXQ_STAT("IO buffers", bufs), + QETH_TXQ_STAT("IO buffer elements", buf_elements), + QETH_TXQ_STAT("packed IO buffers", bufs_pack), + QETH_TXQ_STAT("skbs", tx_packets), + QETH_TXQ_STAT("packed skbs", skbs_pack), + QETH_TXQ_STAT("SG skbs", skbs_sg), + QETH_TXQ_STAT("HW csum skbs", skbs_csum), + QETH_TXQ_STAT("TSO skbs", skbs_tso), + QETH_TXQ_STAT("linearized skbs", skbs_linearized), + QETH_TXQ_STAT("linearized+error skbs", skbs_linearized_fail), + QETH_TXQ_STAT("TSO bytes", tso_bytes), + QETH_TXQ_STAT("Packing mode switches", packing_mode_switch), +}; + +static const struct qeth_stats card_stats[] = { + QETH_CARD_STAT("rx0 IO buffers", rx_bufs), + QETH_CARD_STAT("rx0 HW csum skbs", rx_skb_csum), + QETH_CARD_STAT("rx0 SG skbs", rx_sg_skbs), + QETH_CARD_STAT("rx0 SG page frags", rx_sg_frags), + QETH_CARD_STAT("rx0 SG page allocs", rx_sg_alloc_page), +}; + +#define TXQ_STATS_LEN ARRAY_SIZE(txq_stats) +#define CARD_STATS_LEN ARRAY_SIZE(card_stats) + +static void qeth_add_stat_data(u64 **dst, void *src, + const struct qeth_stats stats[], + unsigned int size) +{ + unsigned int i; + char *stat; + + for (i = 0; i < size; i++) { + stat = (char *)src + stats[i].offset; + **dst = *(u64 *)stat; + (*dst)++; + } +} + +static void qeth_add_stat_strings(u8 **data, const char *prefix, + const struct qeth_stats stats[], + unsigned int size) +{ + unsigned int i; + + for (i = 0; i < size; i++) { + snprintf(*data, ETH_GSTRING_LEN, "%s%s", prefix, stats[i].name); + *data += ETH_GSTRING_LEN; + } +} + +static int qeth_get_sset_count(struct net_device *dev, int stringset) +{ + struct qeth_card *card = dev->ml_priv; + + switch (stringset) { + case ETH_SS_STATS: + return CARD_STATS_LEN + + card->qdio.no_out_queues * TXQ_STATS_LEN; + default: + return -EINVAL; + } +} + +static void qeth_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct qeth_card *card = dev->ml_priv; + unsigned int i; + + qeth_add_stat_data(&data, &card->stats, card_stats, CARD_STATS_LEN); + for (i = 0; i < card->qdio.no_out_queues; i++) + qeth_add_stat_data(&data, &card->qdio.out_qs[i]->stats, + txq_stats, TXQ_STATS_LEN); +} + +static void qeth_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *param) +{ + struct qeth_card *card = dev->ml_priv; + + param->rx_max_pending = QDIO_MAX_BUFFERS_PER_Q; + param->rx_mini_max_pending = 0; + param->rx_jumbo_max_pending = 0; + param->tx_max_pending = QDIO_MAX_BUFFERS_PER_Q; + + param->rx_pending = card->qdio.in_buf_pool.buf_count; + param->rx_mini_pending = 0; + param->rx_jumbo_pending = 0; + param->tx_pending = QDIO_MAX_BUFFERS_PER_Q; +} + +static void qeth_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + struct qeth_card *card = dev->ml_priv; + char prefix[ETH_GSTRING_LEN] = ""; + unsigned int i; + + switch (stringset) { + case ETH_SS_STATS: + qeth_add_stat_strings(&data, prefix, card_stats, + CARD_STATS_LEN); + for (i = 0; i < card->qdio.no_out_queues; i++) { + snprintf(prefix, ETH_GSTRING_LEN, "tx%u ", i); + qeth_add_stat_strings(&data, prefix, txq_stats, + TXQ_STATS_LEN); + } + break; + default: + WARN_ON(1); + break; + } +} + +static void qeth_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct qeth_card *card = dev->ml_priv; + + strlcpy(info->driver, IS_LAYER2(card) ? "qeth_l2" : "qeth_l3", + sizeof(info->driver)); + strlcpy(info->version, "1.0", sizeof(info->version)); + strlcpy(info->fw_version, card->info.mcl_level, + sizeof(info->fw_version)); + snprintf(info->bus_info, sizeof(info->bus_info), "%s/%s/%s", + CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card)); +} + +/* Helper function to fill 'advertising' and 'supported' which are the same. */ +/* Autoneg and full-duplex are supported and advertised unconditionally. */ +/* Always advertise and support all speeds up to specified, and only one */ +/* specified port type. */ +static void qeth_set_cmd_adv_sup(struct ethtool_link_ksettings *cmd, + int maxspeed, int porttype) +{ + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising); + + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); + + switch (porttype) { + case PORT_TP: + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); + break; + case PORT_FIBRE: + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + break; + default: + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); + WARN_ON_ONCE(1); + } + + /* partially does fall through, to also select lower speeds */ + switch (maxspeed) { + case SPEED_25000: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 25000baseSR_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 25000baseSR_Full); + break; + case SPEED_10000: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseT_Full); + /* fall through */ + case SPEED_1000: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, + 1000baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseT_Half); + /* fall through */ + case SPEED_100: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 100baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 100baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, + 100baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 100baseT_Half); + /* fall through */ + case SPEED_10: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Half); + break; + default: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Half); + WARN_ON_ONCE(1); + } +} + + +static int qeth_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct qeth_card *card = netdev->ml_priv; + enum qeth_link_types link_type; + struct carrier_info carrier_info; + int rc; + + if (IS_IQD(card) || IS_VM_NIC(card)) + link_type = QETH_LINK_TYPE_10GBIT_ETH; + else + link_type = card->info.link_type; + + cmd->base.duplex = DUPLEX_FULL; + cmd->base.autoneg = AUTONEG_ENABLE; + cmd->base.phy_address = 0; + cmd->base.mdio_support = 0; + cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; + cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID; + + switch (link_type) { + case QETH_LINK_TYPE_FAST_ETH: + case QETH_LINK_TYPE_LANE_ETH100: + cmd->base.speed = SPEED_100; + cmd->base.port = PORT_TP; + break; + case QETH_LINK_TYPE_GBIT_ETH: + case QETH_LINK_TYPE_LANE_ETH1000: + cmd->base.speed = SPEED_1000; + cmd->base.port = PORT_FIBRE; + break; + case QETH_LINK_TYPE_10GBIT_ETH: + cmd->base.speed = SPEED_10000; + cmd->base.port = PORT_FIBRE; + break; + case QETH_LINK_TYPE_25GBIT_ETH: + cmd->base.speed = SPEED_25000; + cmd->base.port = PORT_FIBRE; + break; + default: + cmd->base.speed = SPEED_10; + cmd->base.port = PORT_TP; + } + qeth_set_cmd_adv_sup(cmd, cmd->base.speed, cmd->base.port); + + /* Check if we can obtain more accurate information. */ + /* If QUERY_CARD_INFO command is not supported or fails, */ + /* just return the heuristics that was filled above. */ + rc = qeth_query_card_info(card, &carrier_info); + if (rc == -EOPNOTSUPP) /* for old hardware, return heuristic */ + return 0; + if (rc) /* report error from the hardware operation */ + return rc; + /* on success, fill in the information got from the hardware */ + + netdev_dbg(netdev, + "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n", + carrier_info.card_type, + carrier_info.port_mode, + carrier_info.port_speed); + + /* Update attributes for which we've obtained more authoritative */ + /* information, leave the rest the way they where filled above. */ + switch (carrier_info.card_type) { + case CARD_INFO_TYPE_1G_COPPER_A: + case CARD_INFO_TYPE_1G_COPPER_B: + cmd->base.port = PORT_TP; + qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port); + break; + case CARD_INFO_TYPE_1G_FIBRE_A: + case CARD_INFO_TYPE_1G_FIBRE_B: + cmd->base.port = PORT_FIBRE; + qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port); + break; + case CARD_INFO_TYPE_10G_FIBRE_A: + case CARD_INFO_TYPE_10G_FIBRE_B: + cmd->base.port = PORT_FIBRE; + qeth_set_cmd_adv_sup(cmd, SPEED_10000, cmd->base.port); + break; + } + + switch (carrier_info.port_mode) { + case CARD_INFO_PORTM_FULLDUPLEX: + cmd->base.duplex = DUPLEX_FULL; + break; + case CARD_INFO_PORTM_HALFDUPLEX: + cmd->base.duplex = DUPLEX_HALF; + break; + } + + switch (carrier_info.port_speed) { + case CARD_INFO_PORTS_10M: + cmd->base.speed = SPEED_10; + break; + case CARD_INFO_PORTS_100M: + cmd->base.speed = SPEED_100; + break; + case CARD_INFO_PORTS_1G: + cmd->base.speed = SPEED_1000; + break; + case CARD_INFO_PORTS_10G: + cmd->base.speed = SPEED_10000; + break; + case CARD_INFO_PORTS_25G: + cmd->base.speed = SPEED_25000; + break; + } + + return 0; +} + +const struct ethtool_ops qeth_ethtool_ops = { + .get_link = ethtool_op_get_link, + .get_ringparam = qeth_get_ringparam, + .get_strings = qeth_get_strings, + .get_ethtool_stats = qeth_get_ethtool_stats, + .get_sset_count = qeth_get_sset_count, + .get_drvinfo = qeth_get_drvinfo, + .get_link_ksettings = qeth_get_link_ksettings, +}; + +const struct ethtool_ops qeth_osn_ethtool_ops = { + .get_strings = qeth_get_strings, + .get_ethtool_stats = qeth_get_ethtool_stats, + .get_sset_count = qeth_get_sset_count, + .get_drvinfo = qeth_get_drvinfo, +}; diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index f108d4b44605..8efb2e8ff8f4 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -25,7 +25,6 @@ #include "qeth_l2.h" static int qeth_l2_set_offline(struct ccwgroup_device *); -static int qeth_l2_stop(struct net_device *); static void qeth_bridgeport_query_support(struct qeth_card *card); static void qeth_bridge_state_change(struct qeth_card *card, struct qeth_ipa_cmd *cmd); @@ -36,7 +35,7 @@ static void qeth_l2_vnicc_init(struct qeth_card *card); static bool qeth_l2_vnicc_recover_timeout(struct qeth_card *card, u32 vnicc, u32 *timeout); -static int qeth_setdelmac_makerc(struct qeth_card *card, int retcode) +static int qeth_l2_setdelmac_makerc(struct qeth_card *card, u16 retcode) { int rc; @@ -63,9 +62,6 @@ static int qeth_setdelmac_makerc(struct qeth_card *card, int retcode) case IPA_RC_L2_MAC_NOT_FOUND: rc = -ENOENT; break; - case -ENOMEM: - rc = -ENOMEM; - break; default: rc = -EIO; break; @@ -73,6 +69,15 @@ static int qeth_setdelmac_makerc(struct qeth_card *card, int retcode) return rc; } +static int qeth_l2_send_setdelmac_cb(struct qeth_card *card, + struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + + return qeth_l2_setdelmac_makerc(card, cmd->hdr.return_code); +} + static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac, enum qeth_ipa_cmds ipacmd) { @@ -86,8 +91,7 @@ static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac, cmd = __ipa_cmd(iob); cmd->data.setdelmac.mac_length = ETH_ALEN; ether_addr_copy(cmd->data.setdelmac.mac, mac); - return qeth_setdelmac_makerc(card, qeth_send_ipa_cmd(card, iob, - NULL, NULL)); + return qeth_send_ipa_cmd(card, iob, qeth_l2_send_setdelmac_cb, NULL); } static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) @@ -98,8 +102,7 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC); if (rc == 0) { dev_info(&card->gdev->dev, - "MAC address %pM successfully registered on device %s\n", - mac, card->dev->name); + "MAC address %pM successfully registered\n", mac); } else { switch (rc) { case -EEXIST: @@ -171,9 +174,9 @@ static int qeth_l2_get_cast_type(struct qeth_card *card, struct sk_buff *skb) return RTN_UNICAST; } -static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, - struct sk_buff *skb, int ipv, int cast_type, - unsigned int data_len) +static void qeth_l2_fill_header(struct qeth_qdio_out_q *queue, + struct qeth_hdr *hdr, struct sk_buff *skb, + int ipv, int cast_type, unsigned int data_len) { struct vlan_ethhdr *veth = vlan_eth_hdr(skb); @@ -185,8 +188,7 @@ static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2; if (skb->ip_summed == CHECKSUM_PARTIAL) { qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv); - if (card->options.performance_stats) - card->perf_stats.tx_csum++; + QETH_TXQ_STAT_INC(queue, skbs_csum); } } @@ -207,7 +209,7 @@ static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, } } -static int qeth_setdelvlan_makerc(struct qeth_card *card, int retcode) +static int qeth_l2_setdelvlan_makerc(struct qeth_card *card, u16 retcode) { if (retcode) QETH_CARD_TEXT_(card, 2, "err%04x", retcode); @@ -223,8 +225,6 @@ static int qeth_setdelvlan_makerc(struct qeth_card *card, int retcode) return -ENOENT; case IPA_RC_L2_VLAN_ID_NOT_ALLOWED: return -EPERM; - case -ENOMEM: - return -ENOMEM; default: return -EIO; } @@ -242,9 +242,8 @@ static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card, cmd->data.setdelvlan.vlan_id, CARD_DEVID(card), cmd->hdr.return_code); QETH_CARD_TEXT_(card, 2, "L2VL%4x", cmd->hdr.command); - QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code); } - return 0; + return qeth_l2_setdelvlan_makerc(card, cmd->hdr.return_code); } static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i, @@ -259,101 +258,40 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i, return -ENOMEM; cmd = __ipa_cmd(iob); cmd->data.setdelvlan.vlan_id = i; - return qeth_setdelvlan_makerc(card, qeth_send_ipa_cmd(card, iob, - qeth_l2_send_setdelvlan_cb, NULL)); -} - -static void qeth_l2_process_vlans(struct qeth_card *card) -{ - struct qeth_vlan_vid *id; - - QETH_CARD_TEXT(card, 3, "L2prcvln"); - mutex_lock(&card->vid_list_mutex); - list_for_each_entry(id, &card->vid_list, list) { - qeth_l2_send_setdelvlan(card, id->vid, IPA_CMD_SETVLAN); - } - mutex_unlock(&card->vid_list_mutex); + return qeth_send_ipa_cmd(card, iob, qeth_l2_send_setdelvlan_cb, NULL); } static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) { struct qeth_card *card = dev->ml_priv; - struct qeth_vlan_vid *id; - int rc; QETH_CARD_TEXT_(card, 4, "aid:%d", vid); if (!vid) return 0; - if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { - QETH_CARD_TEXT(card, 3, "aidREC"); - return 0; - } - id = kmalloc(sizeof(*id), GFP_KERNEL); - if (id) { - id->vid = vid; - rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN); - if (rc) { - kfree(id); - return rc; - } - mutex_lock(&card->vid_list_mutex); - list_add_tail(&id->list, &card->vid_list); - mutex_unlock(&card->vid_list_mutex); - } else { - return -ENOMEM; - } - return 0; + + return qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN); } static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) { - struct qeth_vlan_vid *id, *tmpid = NULL; struct qeth_card *card = dev->ml_priv; - int rc = 0; QETH_CARD_TEXT_(card, 4, "kid:%d", vid); - if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { - QETH_CARD_TEXT(card, 3, "kidREC"); + if (!vid) return 0; - } - mutex_lock(&card->vid_list_mutex); - list_for_each_entry(id, &card->vid_list, list) { - if (id->vid == vid) { - list_del(&id->list); - tmpid = id; - break; - } - } - mutex_unlock(&card->vid_list_mutex); - if (tmpid) { - rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN); - kfree(tmpid); - } - return rc; + + return qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN); } -static void qeth_l2_stop_card(struct qeth_card *card, int recovery_mode) +static void qeth_l2_stop_card(struct qeth_card *card) { QETH_DBF_TEXT(SETUP , 2, "stopcard"); QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); qeth_set_allowed_threads(card, 0, 1); - if (card->read.state == CH_STATE_UP && - card->write.state == CH_STATE_UP && - (card->state == CARD_STATE_UP)) { - if (recovery_mode && - card->info.type != QETH_CARD_TYPE_OSN) { - qeth_l2_stop(card->dev); - } else { - rtnl_lock(); - dev_close(card->dev); - rtnl_unlock(); - } - card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; - card->state = CARD_STATE_SOFTSETUP; - } + if (card->state == CARD_STATE_SOFTSETUP) { qeth_l2_del_all_macs(card); qeth_clear_ipacmd_list(card); @@ -369,6 +307,9 @@ static void qeth_l2_stop_card(struct qeth_card *card, int recovery_mode) qeth_clear_cmd_buffers(&card->read); qeth_clear_cmd_buffers(&card->write); } + + flush_workqueue(card->event_wq); + card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; } static int qeth_l2_process_inbound_buffer(struct qeth_card *card, @@ -416,8 +357,8 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card, } work_done++; budget--; - card->stats.rx_packets++; - card->stats.rx_bytes += len; + QETH_CARD_STAT_INC(card, rx_packets); + QETH_CARD_STAT_ADD(card, rx_bytes, len); } return work_done; } @@ -441,7 +382,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card) if (!IS_OSN(card)) { rc = qeth_setadpparms_change_macaddr(card); - if (!rc && is_valid_ether_addr(card->dev->dev_addr)) + if (!rc) goto out; QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %x: %#x\n", CARD_DEVID(card), rc); @@ -460,6 +401,26 @@ out: return 0; } +static void qeth_l2_register_dev_addr(struct qeth_card *card) +{ + if (!is_valid_ether_addr(card->dev->dev_addr)) + qeth_l2_request_initial_mac(card); + + if (!IS_OSN(card) && !qeth_l2_send_setmac(card, card->dev->dev_addr)) + card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; +} + +static int qeth_l2_validate_addr(struct net_device *dev) +{ + struct qeth_card *card = dev->ml_priv; + + if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED) + return eth_validate_addr(dev); + + QETH_CARD_TEXT(card, 4, "nomacadr"); + return -EPERM; +} + static int qeth_l2_set_mac_address(struct net_device *dev, void *p) { struct sockaddr *addr = p; @@ -469,9 +430,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p) QETH_CARD_TEXT(card, 3, "setmac"); - if (card->info.type == QETH_CARD_TYPE_OSN || - card->info.type == QETH_CARD_TYPE_OSM || - card->info.type == QETH_CARD_TYPE_OSX) { + if (IS_OSM(card) || IS_OSX(card)) { QETH_CARD_TEXT(card, 3, "setmcTYP"); return -EOPNOTSUPP; } @@ -479,39 +438,22 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { - QETH_CARD_TEXT(card, 3, "setmcREC"); - return -ERESTARTSYS; - } - - /* avoid racing against concurrent state change: */ - if (!mutex_trylock(&card->conf_mutex)) - return -EAGAIN; - - if (!qeth_card_hw_is_reachable(card)) { - ether_addr_copy(dev->dev_addr, addr->sa_data); - goto out_unlock; - } - /* don't register the same address twice */ if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) && (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) - goto out_unlock; + return 0; /* add the new address, switch over, drop the old */ rc = qeth_l2_send_setmac(card, addr->sa_data); if (rc) - goto out_unlock; + return rc; ether_addr_copy(old_addr, dev->dev_addr); ether_addr_copy(dev->dev_addr, addr->sa_data); if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED) qeth_l2_remove_mac(card, old_addr); card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; - -out_unlock: - mutex_unlock(&card->conf_mutex); - return rc; + return 0; } static void qeth_promisc_to_bridge(struct qeth_card *card) @@ -582,13 +524,7 @@ static void qeth_l2_set_rx_mode(struct net_device *dev) int i; int rc; - if (card->info.type == QETH_CARD_TYPE_OSN) - return; - QETH_CARD_TEXT(card, 3, "setmulti"); - if (qeth_threads_running(card, QETH_RECOVER_THREAD) && - (card->state != CARD_STATE_UP)) - return; spin_lock_bh(&card->mclock); @@ -673,17 +609,8 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, int tx_bytes = skb->len; int rc; - if (card->state != CARD_STATE_UP) { - card->stats.tx_carrier_errors++; - goto tx_drop; - } - queue = qeth_get_tx_queue(card, skb, ipv, cast_type); - if (card->options.performance_stats) { - card->perf_stats.outbound_cnt++; - card->perf_stats.outbound_start_time = qeth_get_micros(); - } netif_stop_queue(dev); if (IS_OSN(card)) @@ -693,81 +620,21 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, qeth_l2_fill_header); if (!rc) { - card->stats.tx_packets++; - card->stats.tx_bytes += tx_bytes; - if (card->options.performance_stats) - card->perf_stats.outbound_time += qeth_get_micros() - - card->perf_stats.outbound_start_time; + QETH_TXQ_STAT_INC(queue, tx_packets); + QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes); netif_wake_queue(dev); return NETDEV_TX_OK; } else if (rc == -EBUSY) { return NETDEV_TX_BUSY; } /* else fall through */ -tx_drop: - card->stats.tx_dropped++; - card->stats.tx_errors++; + QETH_TXQ_STAT_INC(queue, tx_dropped); + QETH_TXQ_STAT_INC(queue, tx_errors); dev_kfree_skb_any(skb); netif_wake_queue(dev); return NETDEV_TX_OK; } -static int __qeth_l2_open(struct net_device *dev) -{ - struct qeth_card *card = dev->ml_priv; - int rc = 0; - - QETH_CARD_TEXT(card, 4, "qethopen"); - if (card->state == CARD_STATE_UP) - return rc; - if (card->state != CARD_STATE_SOFTSETUP) - return -ENODEV; - - if ((card->info.type != QETH_CARD_TYPE_OSN) && - (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))) { - QETH_CARD_TEXT(card, 4, "nomacadr"); - return -EPERM; - } - card->data.state = CH_STATE_UP; - card->state = CARD_STATE_UP; - netif_start_queue(dev); - - if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) { - napi_enable(&card->napi); - local_bh_disable(); - napi_schedule(&card->napi); - /* kick-start the NAPI softirq: */ - local_bh_enable(); - } else - rc = -EIO; - return rc; -} - -static int qeth_l2_open(struct net_device *dev) -{ - struct qeth_card *card = dev->ml_priv; - - QETH_CARD_TEXT(card, 5, "qethope_"); - if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { - QETH_CARD_TEXT(card, 3, "openREC"); - return -ERESTARTSYS; - } - return __qeth_l2_open(dev); -} - -static int qeth_l2_stop(struct net_device *dev) -{ - struct qeth_card *card = dev->ml_priv; - - QETH_CARD_TEXT(card, 4, "qethstop"); - netif_tx_disable(dev); - if (card->state == CARD_STATE_UP) { - card->state = CARD_STATE_SOFTSETUP; - napi_disable(&card->napi); - } - return 0; -} - static const struct device_type qeth_l2_devtype = { .name = "qeth_layer2", .groups = qeth_l2_attr_groups, @@ -783,7 +650,7 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev) if (rc) return rc; } - INIT_LIST_HEAD(&card->vid_list); + hash_init(card->mac_htable); card->info.hwtrap = 0; qeth_l2_vnicc_set_defaults(card); @@ -801,33 +668,19 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev) if (cgdev->state == CCWGROUP_ONLINE) qeth_l2_set_offline(cgdev); + + cancel_work_sync(&card->close_dev_work); if (qeth_netdev_is_registered(card->dev)) unregister_netdev(card->dev); } -static const struct ethtool_ops qeth_l2_ethtool_ops = { - .get_link = ethtool_op_get_link, - .get_strings = qeth_core_get_strings, - .get_ethtool_stats = qeth_core_get_ethtool_stats, - .get_sset_count = qeth_core_get_sset_count, - .get_drvinfo = qeth_core_get_drvinfo, - .get_link_ksettings = qeth_core_ethtool_get_link_ksettings, -}; - -static const struct ethtool_ops qeth_l2_osn_ops = { - .get_strings = qeth_core_get_strings, - .get_ethtool_stats = qeth_core_get_ethtool_stats, - .get_sset_count = qeth_core_get_sset_count, - .get_drvinfo = qeth_core_get_drvinfo, -}; - static const struct net_device_ops qeth_l2_netdev_ops = { - .ndo_open = qeth_l2_open, - .ndo_stop = qeth_l2_stop, - .ndo_get_stats = qeth_get_stats, + .ndo_open = qeth_open, + .ndo_stop = qeth_stop, + .ndo_get_stats64 = qeth_get_stats64, .ndo_start_xmit = qeth_l2_hard_start_xmit, .ndo_features_check = qeth_features_check, - .ndo_validate_addr = eth_validate_addr, + .ndo_validate_addr = qeth_l2_validate_addr, .ndo_set_rx_mode = qeth_l2_set_rx_mode, .ndo_do_ioctl = qeth_do_ioctl, .ndo_set_mac_address = qeth_l2_set_mac_address, @@ -838,27 +691,36 @@ static const struct net_device_ops qeth_l2_netdev_ops = { .ndo_set_features = qeth_set_features }; +static const struct net_device_ops qeth_osn_netdev_ops = { + .ndo_open = qeth_open, + .ndo_stop = qeth_stop, + .ndo_get_stats64 = qeth_get_stats64, + .ndo_start_xmit = qeth_l2_hard_start_xmit, + .ndo_validate_addr = eth_validate_addr, + .ndo_tx_timeout = qeth_tx_timeout, +}; + static int qeth_l2_setup_netdev(struct qeth_card *card, bool carrier_ok) { int rc; - if (qeth_netdev_is_registered(card->dev)) - return 0; - - card->dev->priv_flags |= IFF_UNICAST_FLT; - card->dev->netdev_ops = &qeth_l2_netdev_ops; - if (card->info.type == QETH_CARD_TYPE_OSN) { - card->dev->ethtool_ops = &qeth_l2_osn_ops; + if (IS_OSN(card)) { + card->dev->netdev_ops = &qeth_osn_netdev_ops; card->dev->flags |= IFF_NOARP; - } else { - card->dev->ethtool_ops = &qeth_l2_ethtool_ops; - card->dev->needed_headroom = sizeof(struct qeth_hdr); + goto add_napi; } - if (card->info.type == QETH_CARD_TYPE_OSM) + card->dev->needed_headroom = sizeof(struct qeth_hdr); + card->dev->netdev_ops = &qeth_l2_netdev_ops; + card->dev->priv_flags |= IFF_UNICAST_FLT; + + if (IS_OSM(card)) { card->dev->features |= NETIF_F_VLAN_CHALLENGED; - else + } else { + if (!IS_VM_NIC(card)) + card->dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + } if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) { card->dev->features |= NETIF_F_SG; @@ -892,8 +754,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card, bool carrier_ok) PAGE_SIZE * (QDIO_MAX_ELEMENTS_PER_BUFFER - 1)); } - if (!is_valid_ether_addr(card->dev->dev_addr)) - qeth_l2_request_initial_mac(card); +add_napi: netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT); rc = register_netdev(card->dev); if (!rc && carrier_ok) @@ -924,11 +785,11 @@ static void qeth_l2_trace_features(struct qeth_card *card) sizeof(card->options.vnicc.sup_chars)); } -static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) +static int qeth_l2_set_online(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); + struct net_device *dev = card->dev; int rc = 0; - enum qeth_card_states recover_flag; bool carrier_ok; mutex_lock(&card->discipline_mutex); @@ -936,25 +797,12 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) QETH_DBF_TEXT(SETUP, 2, "setonlin"); QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); - recover_flag = card->state; rc = qeth_core_hardsetup_card(card, &carrier_ok); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc); rc = -ENODEV; goto out_remove; } - qeth_bridgeport_query_support(card); - if (card->options.sbp.supported_funcs) - dev_info(&card->gdev->dev, - "The device represents a Bridge Capable Port\n"); - - rc = qeth_l2_setup_netdev(card, carrier_ok); - if (rc) - goto out_remove; - - if (card->info.type != QETH_CARD_TYPE_OSN && - !qeth_l2_send_setmac(card, card->dev->dev_addr)) - card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) { if (card->info.hwtrap && @@ -963,6 +811,13 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) } else card->info.hwtrap = 0; + qeth_bridgeport_query_support(card); + if (card->options.sbp.supported_funcs) + dev_info(&card->gdev->dev, + "The device represents a Bridge Capable Port\n"); + + qeth_l2_register_dev_addr(card); + /* for the rx_bcast characteristic, init VNICC after setmac */ qeth_l2_vnicc_init(card); @@ -984,11 +839,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) goto out_remove; } - if (card->info.type != QETH_CARD_TYPE_OSN) - qeth_l2_process_vlans(card); - - netif_tx_disable(card->dev); - rc = qeth_init_qdio_queues(card); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); @@ -999,17 +849,25 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) qeth_set_allowed_threads(card, 0xffffffff, 0); - qeth_enable_hw_features(card->dev); - if (recover_flag == CARD_STATE_RECOVER) { - if (recovery_mode && - card->info.type != QETH_CARD_TYPE_OSN) { - __qeth_l2_open(card->dev); - qeth_l2_set_rx_mode(card->dev); - } else { - rtnl_lock(); - dev_open(card->dev, NULL); - rtnl_unlock(); + if (!qeth_netdev_is_registered(dev)) { + rc = qeth_l2_setup_netdev(card, carrier_ok); + if (rc) + goto out_remove; + } else { + rtnl_lock(); + if (carrier_ok) + netif_carrier_on(dev); + else + netif_carrier_off(dev); + + netif_device_attach(dev); + qeth_enable_hw_features(dev); + + if (card->info.open_when_online) { + card->info.open_when_online = 0; + dev_open(dev, NULL); } + rtnl_unlock(); } /* let user_space know that device is online */ kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); @@ -1018,44 +876,42 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) return 0; out_remove: - qeth_l2_stop_card(card, 0); + qeth_l2_stop_card(card); ccw_device_set_offline(CARD_DDEV(card)); ccw_device_set_offline(CARD_WDEV(card)); ccw_device_set_offline(CARD_RDEV(card)); qdio_free(CARD_DDEV(card)); - if (recover_flag == CARD_STATE_RECOVER) - card->state = CARD_STATE_RECOVER; - else - card->state = CARD_STATE_DOWN; + card->state = CARD_STATE_DOWN; + mutex_unlock(&card->conf_mutex); mutex_unlock(&card->discipline_mutex); return rc; } -static int qeth_l2_set_online(struct ccwgroup_device *gdev) -{ - return __qeth_l2_set_online(gdev, 0); -} - static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev, int recovery_mode) { struct qeth_card *card = dev_get_drvdata(&cgdev->dev); int rc = 0, rc2 = 0, rc3 = 0; - enum qeth_card_states recover_flag; mutex_lock(&card->discipline_mutex); mutex_lock(&card->conf_mutex); QETH_DBF_TEXT(SETUP, 3, "setoffl"); QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *)); - netif_carrier_off(card->dev); - recover_flag = card->state; if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) { qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); card->info.hwtrap = 1; } - qeth_l2_stop_card(card, recovery_mode); + + rtnl_lock(); + card->info.open_when_online = card->dev->flags & IFF_UP; + dev_close(card->dev); + netif_device_detach(card->dev); + netif_carrier_off(card->dev); + rtnl_unlock(); + + qeth_l2_stop_card(card); rc = ccw_device_set_offline(CARD_DDEV(card)); rc2 = ccw_device_set_offline(CARD_WDEV(card)); rc3 = ccw_device_set_offline(CARD_RDEV(card)); @@ -1064,8 +920,7 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev, if (rc) QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); qdio_free(CARD_DDEV(card)); - if (recover_flag == CARD_STATE_UP) - card->state = CARD_STATE_RECOVER; + /* let user_space know that device is offline */ kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE); mutex_unlock(&card->conf_mutex); @@ -1090,18 +945,16 @@ static int qeth_l2_recover(void *ptr) QETH_CARD_TEXT(card, 2, "recover2"); dev_warn(&card->gdev->dev, "A recovery process has been started for the device\n"); - qeth_set_recovery_task(card); __qeth_l2_set_offline(card->gdev, 1); - rc = __qeth_l2_set_online(card->gdev, 1); + rc = qeth_l2_set_online(card->gdev); if (!rc) dev_info(&card->gdev->dev, "Device successfully recovered!\n"); else { - qeth_close_dev(card); + ccwgroup_set_offline(card->gdev); dev_warn(&card->gdev->dev, "The qeth device driver " "failed to recover an error on the device\n"); } - qeth_clear_recovery_task(card); qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); return 0; @@ -1122,37 +975,23 @@ static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); - netif_device_detach(card->dev); qeth_set_allowed_threads(card, 0, 1); wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); if (gdev->state == CCWGROUP_OFFLINE) return 0; - if (card->state == CARD_STATE_UP) { - if (card->info.hwtrap) - qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); - __qeth_l2_set_offline(card->gdev, 1); - } else - __qeth_l2_set_offline(card->gdev, 0); + + qeth_l2_set_offline(gdev); return 0; } static int qeth_l2_pm_resume(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); - int rc = 0; + int rc; - if (card->state == CARD_STATE_RECOVER) { - rc = __qeth_l2_set_online(card->gdev, 1); - if (rc) { - rtnl_lock(); - dev_close(card->dev); - rtnl_unlock(); - } - } else - rc = __qeth_l2_set_online(card->gdev, 0); + rc = qeth_l2_set_online(gdev); qeth_set_allowed_threads(card, 0xffffffff, 0); - netif_device_attach(card->dev); if (rc) dev_warn(&card->gdev->dev, "The qeth device driver " "failed to recover an error on the device\n"); @@ -1224,20 +1063,14 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len, } static int qeth_osn_send_ipa_cmd(struct qeth_card *card, - struct qeth_cmd_buffer *iob, int data_len) + struct qeth_cmd_buffer *iob) { - u16 s1, s2; + u16 length; QETH_CARD_TEXT(card, 4, "osndipa"); - qeth_prepare_ipa_cmd(card, iob); - s1 = (u16)(IPA_PDU_HEADER_SIZE + data_len); - s2 = (u16)data_len; - memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2); - memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2); - memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2); - memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2); - return qeth_osn_send_control_data(card, s1, iob); + memcpy(&length, QETH_IPA_PDU_LEN_TOTAL(iob->data), 2); + return qeth_osn_send_control_data(card, length, iob); } int qeth_osn_assist(struct net_device *dev, void *data, int data_len) @@ -1254,8 +1087,9 @@ int qeth_osn_assist(struct net_device *dev, void *data, int data_len) if (!qeth_card_hw_is_reachable(card)) return -ENODEV; iob = qeth_wait_for_buffer(&card->write); + qeth_prepare_ipa_cmd(card, iob, (u16) data_len); memcpy(__ipa_cmd(iob), data, data_len); - return qeth_osn_send_ipa_cmd(card, iob, data_len); + return qeth_osn_send_ipa_cmd(card, iob); } EXPORT_SYMBOL(qeth_osn_assist); @@ -1434,7 +1268,7 @@ static void qeth_bridge_state_change(struct qeth_card *card, data->card = card; memcpy(&data->qports, qports, sizeof(struct qeth_sbp_state_change) + extrasize); - queue_work(qeth_wq, &data->worker); + queue_work(card->event_wq, &data->worker); } struct qeth_bridge_host_data { @@ -1506,14 +1340,12 @@ static void qeth_bridge_host_event(struct qeth_card *card, data->card = card; memcpy(&data->hostevs, hostevs, sizeof(struct qeth_ipacmd_addr_change) + extrasize); - queue_work(qeth_wq, &data->worker); + queue_work(card->event_wq, &data->worker); } /* SETBRIDGEPORT support; sending commands */ struct _qeth_sbp_cbctl { - u16 ipa_rc; - u16 cmd_rc; union { u32 supported; struct { @@ -1523,23 +1355,21 @@ struct _qeth_sbp_cbctl { } data; }; -/** - * qeth_bridgeport_makerc() - derive "traditional" error from hardware codes. - * @card: qeth_card structure pointer, for debug messages. - * @cbctl: state structure with hardware return codes. - * @setcmd: IPA command code - * - * Returns negative errno-compatible error indication or 0 on success. - */ static int qeth_bridgeport_makerc(struct qeth_card *card, - struct _qeth_sbp_cbctl *cbctl, enum qeth_ipa_sbp_cmd setcmd) + struct qeth_ipa_cmd *cmd) { + struct qeth_ipacmd_setbridgeport *sbp = &cmd->data.sbp; + enum qeth_ipa_sbp_cmd setcmd = sbp->hdr.command_code; + u16 ipa_rc = cmd->hdr.return_code; + u16 sbp_rc = sbp->hdr.return_code; int rc; - int is_iqd = (card->info.type == QETH_CARD_TYPE_IQD); - if ((is_iqd && (cbctl->ipa_rc == IPA_RC_SUCCESS)) || - (!is_iqd && (cbctl->ipa_rc == cbctl->cmd_rc))) - switch (cbctl->cmd_rc) { + if (ipa_rc == IPA_RC_SUCCESS && sbp_rc == IPA_RC_SUCCESS) + return 0; + + if ((IS_IQD(card) && ipa_rc == IPA_RC_SUCCESS) || + (!IS_IQD(card) && ipa_rc == sbp_rc)) { + switch (sbp_rc) { case IPA_RC_SUCCESS: rc = 0; break; @@ -1603,8 +1433,8 @@ static int qeth_bridgeport_makerc(struct qeth_card *card, default: rc = -EIO; } - else - switch (cbctl->ipa_rc) { + } else { + switch (ipa_rc) { case IPA_RC_NOTSUPP: rc = -EOPNOTSUPP; break; @@ -1614,10 +1444,11 @@ static int qeth_bridgeport_makerc(struct qeth_card *card, default: rc = -EIO; } + } if (rc) { - QETH_CARD_TEXT_(card, 2, "SBPi%04x", cbctl->ipa_rc); - QETH_CARD_TEXT_(card, 2, "SBPc%04x", cbctl->cmd_rc); + QETH_CARD_TEXT_(card, 2, "SBPi%04x", ipa_rc); + QETH_CARD_TEXT_(card, 2, "SBPc%04x", sbp_rc); } return rc; } @@ -1649,15 +1480,15 @@ static int qeth_bridgeport_query_support_cb(struct qeth_card *card, { struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param; + int rc; + QETH_CARD_TEXT(card, 2, "brqsupcb"); - cbctl->ipa_rc = cmd->hdr.return_code; - cbctl->cmd_rc = cmd->data.sbp.hdr.return_code; - if ((cbctl->ipa_rc == 0) && (cbctl->cmd_rc == 0)) { - cbctl->data.supported = - cmd->data.sbp.data.query_cmds_supp.supported_cmds; - } else { - cbctl->data.supported = 0; - } + rc = qeth_bridgeport_makerc(card, cmd); + if (rc) + return rc; + + cbctl->data.supported = + cmd->data.sbp.data.query_cmds_supp.supported_cmds; return 0; } @@ -1678,12 +1509,11 @@ static void qeth_bridgeport_query_support(struct qeth_card *card) sizeof(struct qeth_sbp_query_cmds_supp)); if (!iob) return; + if (qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_support_cb, - (void *)&cbctl) || - qeth_bridgeport_makerc(card, &cbctl, - IPA_SBP_QUERY_COMMANDS_SUPPORTED)) { - /* non-zero makerc signifies failure, and produce messages */ + &cbctl)) { card->options.sbp.role = QETH_SBP_ROLE_NONE; + card->options.sbp.supported_funcs = 0; return; } card->options.sbp.supported_funcs = cbctl.data.supported; @@ -1695,16 +1525,16 @@ static int qeth_bridgeport_query_ports_cb(struct qeth_card *card, struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; struct qeth_sbp_query_ports *qports = &cmd->data.sbp.data.query_ports; struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param; + int rc; QETH_CARD_TEXT(card, 2, "brqprtcb"); - cbctl->ipa_rc = cmd->hdr.return_code; - cbctl->cmd_rc = cmd->data.sbp.hdr.return_code; - if ((cbctl->ipa_rc != 0) || (cbctl->cmd_rc != 0)) - return 0; + rc = qeth_bridgeport_makerc(card, cmd); + if (rc) + return rc; + if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) { - cbctl->cmd_rc = 0xffff; QETH_CARD_TEXT_(card, 2, "SBPs%04x", qports->entry_length); - return 0; + return -EINVAL; } /* first entry contains the state of the local port */ if (qports->num_entries > 0) { @@ -1729,7 +1559,6 @@ static int qeth_bridgeport_query_ports_cb(struct qeth_card *card, int qeth_bridgeport_query_ports(struct qeth_card *card, enum qeth_sbp_roles *role, enum qeth_sbp_states *state) { - int rc = 0; struct qeth_cmd_buffer *iob; struct _qeth_sbp_cbctl cbctl = { .data = { @@ -1746,22 +1575,18 @@ int qeth_bridgeport_query_ports(struct qeth_card *card, iob = qeth_sbp_build_cmd(card, IPA_SBP_QUERY_BRIDGE_PORTS, 0); if (!iob) return -ENOMEM; - rc = qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_ports_cb, - (void *)&cbctl); - if (rc < 0) - return rc; - return qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS); + + return qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_ports_cb, + &cbctl); } static int qeth_bridgeport_set_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; - struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param; + QETH_CARD_TEXT(card, 2, "brsetrcb"); - cbctl->ipa_rc = cmd->hdr.return_code; - cbctl->cmd_rc = cmd->data.sbp.hdr.return_code; - return 0; + return qeth_bridgeport_makerc(card, cmd); } /** @@ -1773,10 +1598,8 @@ static int qeth_bridgeport_set_cb(struct qeth_card *card, */ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role) { - int rc = 0; int cmdlength; struct qeth_cmd_buffer *iob; - struct _qeth_sbp_cbctl cbctl; enum qeth_ipa_sbp_cmd setcmd; QETH_CARD_TEXT(card, 2, "brsetrol"); @@ -1801,11 +1624,8 @@ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role) iob = qeth_sbp_build_cmd(card, setcmd, cmdlength); if (!iob) return -ENOMEM; - rc = qeth_send_ipa_cmd(card, iob, qeth_bridgeport_set_cb, - (void *)&cbctl); - if (rc < 0) - return rc; - return qeth_bridgeport_makerc(card, &cbctl, setcmd); + + return qeth_send_ipa_cmd(card, iob, qeth_bridgeport_set_cb, NULL); } /** @@ -1909,7 +1729,7 @@ static bool qeth_bridgeport_is_in_use(struct qeth_card *card) /* VNIC Characteristics support */ /* handle VNICC IPA command return codes; convert to error codes */ -static int qeth_l2_vnicc_makerc(struct qeth_card *card, int ipa_rc) +static int qeth_l2_vnicc_makerc(struct qeth_card *card, u16 ipa_rc) { int rc; @@ -1967,7 +1787,7 @@ static int qeth_l2_vnicc_request_cb(struct qeth_card *card, QETH_CARD_TEXT(card, 2, "vniccrcb"); if (cmd->hdr.return_code) - return 0; + return qeth_l2_vnicc_makerc(card, cmd->hdr.return_code); /* return results to caller */ card->options.vnicc.sup_chars = rep->hdr.sup; card->options.vnicc.cur_chars = rep->hdr.cur; @@ -1988,7 +1808,6 @@ static int qeth_l2_vnicc_request(struct qeth_card *card, struct qeth_ipacmd_vnicc *req; struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; - int rc; QETH_CARD_TEXT(card, 2, "vniccreq"); @@ -2031,10 +1850,7 @@ static int qeth_l2_vnicc_request(struct qeth_card *card, } /* send request */ - rc = qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, - (void *) cbctl); - - return qeth_l2_vnicc_makerc(card, rc); + return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, cbctl); } /* VNICC query VNIC characteristics request */ diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 42a7cdc59b76..7e68d9d16859 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -40,7 +40,6 @@ static int qeth_l3_set_offline(struct ccwgroup_device *); -static int qeth_l3_stop(struct net_device *); static void qeth_l3_set_rx_mode(struct net_device *dev); static int qeth_l3_register_addr_entry(struct qeth_card *, struct qeth_ipaddr *); @@ -254,8 +253,7 @@ static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) } else rc = qeth_l3_register_addr_entry(card, addr); - if (!rc || (rc == IPA_RC_DUPLICATE_IP_ADDRESS) || - (rc == IPA_RC_LAN_OFFLINE)) { + if (!rc || rc == -EADDRINUSE || rc == -ENETDOWN) { addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; if (addr->ref_counter < 1) { qeth_l3_deregister_addr_entry(card, addr); @@ -339,10 +337,28 @@ static void qeth_l3_recover_ip(struct qeth_card *card) } +static int qeth_l3_setdelip_cb(struct qeth_card *card, struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + + switch (cmd->hdr.return_code) { + case IPA_RC_SUCCESS: + return 0; + case IPA_RC_DUPLICATE_IP_ADDRESS: + return -EADDRINUSE; + case IPA_RC_MC_ADDR_NOT_FOUND: + return -ENOENT; + case IPA_RC_LAN_OFFLINE: + return -ENETDOWN; + default: + return -EIO; + } +} + static int qeth_l3_send_setdelmc(struct qeth_card *card, struct qeth_ipaddr *addr, int ipacmd) { - int rc; struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; @@ -359,9 +375,7 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card, else memcpy(&cmd->data.setdelipm.ip4, &addr->u.a4.addr, 4); - rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); - - return rc; + return qeth_send_ipa_cmd(card, iob, qeth_l3_setdelip_cb, NULL); } static void qeth_l3_fill_netmask(u8 *netmask, unsigned int len) @@ -423,7 +437,7 @@ static int qeth_l3_send_setdelip(struct qeth_card *card, cmd->data.setdelip4.flags = flags; } - return qeth_send_ipa_cmd(card, iob, NULL, NULL); + return qeth_send_ipa_cmd(card, iob, qeth_l3_setdelip_cb, NULL); } static int qeth_l3_send_setrouting(struct qeth_card *card, @@ -943,12 +957,13 @@ static int qeth_l3_start_ipassists(struct qeth_card *card) static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; - cmd = (struct qeth_ipa_cmd *) data; - if (cmd->hdr.return_code == 0) - ether_addr_copy(card->dev->dev_addr, - cmd->data.create_destroy_addr.unique_id); + if (cmd->hdr.return_code) + return -EIO; + + ether_addr_copy(card->dev->dev_addr, + cmd->data.create_destroy_addr.unique_id); return 0; } @@ -976,19 +991,18 @@ static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card) static int qeth_l3_get_unique_id_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; - cmd = (struct qeth_ipa_cmd *) data; - if (cmd->hdr.return_code == 0) + if (cmd->hdr.return_code == 0) { card->info.unique_id = *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]); - else { - card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED | - UNIQUE_ID_NOT_BY_CARD; - dev_warn(&card->gdev->dev, "The network adapter failed to " - "generate a unique ID\n"); + return 0; } - return 0; + + card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED | + UNIQUE_ID_NOT_BY_CARD; + dev_warn(&card->gdev->dev, "The network adapter failed to generate a unique ID\n"); + return -EIO; } static int qeth_l3_get_unique_id(struct qeth_card *card) @@ -1071,7 +1085,7 @@ qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply, cmd->data.diagass.action, CARD_DEVID(card)); } - return 0; + return rc ? -EIO : 0; } static int @@ -1281,10 +1295,6 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, QETH_CARD_TEXT_(card, 4, "kid:%d", vid); - if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { - QETH_CARD_TEXT(card, 3, "kidREC"); - return 0; - } clear_bit(vid, card->active_vlans); qeth_l3_set_rx_mode(dev); return 0; @@ -1305,12 +1315,11 @@ static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr); else ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr); - - card->stats.multicast++; + QETH_CARD_STAT_INC(card, rx_multicast); break; case QETH_CAST_BROADCAST: ether_addr_copy(tg_addr, card->dev->broadcast); - card->stats.multicast++; + QETH_CARD_STAT_INC(card, rx_multicast); break; default: if (card->options.sniffer) @@ -1391,33 +1400,23 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card, } work_done++; budget--; - card->stats.rx_packets++; - card->stats.rx_bytes += len; + QETH_CARD_STAT_INC(card, rx_packets); + QETH_CARD_STAT_ADD(card, rx_bytes, len); } return work_done; } -static void qeth_l3_stop_card(struct qeth_card *card, int recovery_mode) +static void qeth_l3_stop_card(struct qeth_card *card) { QETH_DBF_TEXT(SETUP, 2, "stopcard"); QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); qeth_set_allowed_threads(card, 0, 1); + if (card->options.sniffer && (card->info.promisc_mode == SET_PROMISC_MODE_ON)) qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE); - if (card->read.state == CH_STATE_UP && - card->write.state == CH_STATE_UP && - (card->state == CARD_STATE_UP)) { - if (recovery_mode) - qeth_l3_stop(card->dev); - else { - rtnl_lock(); - dev_close(card->dev); - rtnl_unlock(); - } - card->state = CARD_STATE_SOFTSETUP; - } + if (card->state == CARD_STATE_SOFTSETUP) { qeth_l3_clear_ip_htable(card, 1); qeth_clear_ipacmd_list(card); @@ -1433,6 +1432,8 @@ static void qeth_l3_stop_card(struct qeth_card *card, int recovery_mode) qeth_clear_cmd_buffers(&card->read); qeth_clear_cmd_buffers(&card->write); } + + flush_workqueue(card->event_wq); } /* @@ -1473,9 +1474,7 @@ static void qeth_l3_set_rx_mode(struct net_device *dev) int i, rc; QETH_CARD_TEXT(card, 3, "setmulti"); - if (qeth_threads_running(card, QETH_RECOVER_THREAD) && - (card->state != CARD_STATE_UP)) - return; + if (!card->options.sniffer) { spin_lock_bh(&card->mclock); @@ -1486,14 +1485,14 @@ static void qeth_l3_set_rx_mode(struct net_device *dev) switch (addr->disp_flag) { case QETH_DISP_ADDR_DELETE: rc = qeth_l3_deregister_addr_entry(card, addr); - if (!rc || rc == IPA_RC_MC_ADDR_NOT_FOUND) { + if (!rc || rc == -ENOENT) { hash_del(&addr->hnode); kfree(addr); } break; case QETH_DISP_ADDR_ADD: rc = qeth_l3_register_addr_entry(card, addr); - if (rc && rc != IPA_RC_LAN_OFFLINE) { + if (rc && rc != -ENETDOWN) { hash_del(&addr->hnode); kfree(addr); break; @@ -1514,7 +1513,7 @@ static void qeth_l3_set_rx_mode(struct net_device *dev) qeth_l3_handle_promisc_mode(card); } -static int qeth_l3_arp_makerc(int rc) +static int qeth_l3_arp_makerc(u16 rc) { switch (rc) { case IPA_RC_SUCCESS: @@ -1531,8 +1530,18 @@ static int qeth_l3_arp_makerc(int rc) } } +static int qeth_l3_arp_cmd_cb(struct qeth_card *card, struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + + qeth_setassparms_cb(card, reply, data); + return qeth_l3_arp_makerc(cmd->hdr.return_code); +} + static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries) { + struct qeth_cmd_buffer *iob; int rc; QETH_CARD_TEXT(card, 3, "arpstnoe"); @@ -1547,13 +1556,19 @@ static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries) if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { return -EOPNOTSUPP; } - rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING, - IPA_CMD_ASS_ARP_SET_NO_ENTRIES, - no_entries); + + iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, + IPA_CMD_ASS_ARP_SET_NO_ENTRIES, 4, + QETH_PROT_IPV4); + if (!iob) + return -ENOMEM; + + __ipa_cmd(iob)->data.setassparms.data.flags_32bit = (u32) no_entries; + rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_cmd_cb, NULL); if (rc) QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on device %x: %#x\n", CARD_DEVID(card), rc); - return qeth_l3_arp_makerc(rc); + return rc; } static __u32 get_arp_entry_size(struct qeth_card *card, @@ -1604,7 +1619,6 @@ static int qeth_l3_arp_query_cb(struct qeth_card *card, struct qeth_ipa_cmd *cmd; struct qeth_arp_query_data *qdata; struct qeth_arp_query_info *qinfo; - int i; int e; int entrybytes_done; int stripped_bytes; @@ -1618,13 +1632,13 @@ static int qeth_l3_arp_query_cb(struct qeth_card *card, if (cmd->hdr.return_code) { QETH_CARD_TEXT(card, 4, "arpcberr"); QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code); - return 0; + return qeth_l3_arp_makerc(cmd->hdr.return_code); } if (cmd->data.setassparms.hdr.return_code) { cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; QETH_CARD_TEXT(card, 4, "setaperr"); QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code); - return 0; + return qeth_l3_arp_makerc(cmd->hdr.return_code); } qdata = &cmd->data.setassparms.data.query_arp; QETH_CARD_TEXT_(card, 4, "anoen%i", qdata->no_entries); @@ -1651,9 +1665,9 @@ static int qeth_l3_arp_query_cb(struct qeth_card *card, break; if ((qinfo->udata_len - qinfo->udata_offset) < esize) { - QETH_CARD_TEXT_(card, 4, "qaer3%i", -ENOMEM); - cmd->hdr.return_code = IPA_RC_ENOMEM; - goto out_error; + QETH_CARD_TEXT_(card, 4, "qaer3%i", -ENOSPC); + memset(qinfo->udata, 0, 4); + return -ENOSPC; } memcpy(qinfo->udata + qinfo->udata_offset, @@ -1676,10 +1690,6 @@ static int qeth_l3_arp_query_cb(struct qeth_card *card, memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET, &qdata->reply_bits, 2); QETH_CARD_TEXT_(card, 4, "rc%i", 0); return 0; -out_error: - i = 0; - memcpy(qinfo->udata, &i, 4); - return 0; } static int qeth_l3_query_arp_cache_info(struct qeth_card *card, @@ -1701,13 +1711,11 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card, return -ENOMEM; cmd = __ipa_cmd(iob); cmd->data.setassparms.data.query_arp.request_bits = 0x000F; - rc = qeth_send_control_data(card, - QETH_SETASS_BASE_LEN + QETH_ARP_CMD_LEN, - iob, qeth_l3_arp_query_cb, qinfo); + rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_query_cb, qinfo); if (rc) QETH_DBF_MESSAGE(2, "Error while querying ARP cache on device %x: %#x\n", CARD_DEVID(card), rc); - return qeth_l3_arp_makerc(rc); + return rc; } static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata) @@ -1789,16 +1797,16 @@ static int qeth_l3_arp_modify_entry(struct qeth_card *card, cmd_entry = &__ipa_cmd(iob)->data.setassparms.data.arp_entry; ether_addr_copy(cmd_entry->macaddr, entry->macaddr); memcpy(cmd_entry->ipaddr, entry->ipaddr, 4); - rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL); + rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_cmd_cb, NULL); if (rc) QETH_DBF_MESSAGE(2, "Could not modify (cmd: %#x) ARP entry on device %x: %#x\n", arp_cmd, CARD_DEVID(card), rc); - - return qeth_l3_arp_makerc(rc); + return rc; } static int qeth_l3_arp_flush_cache(struct qeth_card *card) { + struct qeth_cmd_buffer *iob; int rc; QETH_CARD_TEXT(card, 3, "arpflush"); @@ -1813,12 +1821,18 @@ static int qeth_l3_arp_flush_cache(struct qeth_card *card) if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { return -EOPNOTSUPP; } - rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING, - IPA_CMD_ASS_ARP_FLUSH_CACHE, 0); + + iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, + IPA_CMD_ASS_ARP_FLUSH_CACHE, 0, + QETH_PROT_IPV4); + if (!iob) + return -ENOMEM; + + rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_cmd_cb, NULL); if (rc) QETH_DBF_MESSAGE(2, "Could not flush ARP cache on device %x: %#x\n", CARD_DEVID(card), rc); - return qeth_l3_arp_makerc(rc); + return rc; } static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) @@ -1920,12 +1934,13 @@ static u8 qeth_l3_cast_type_to_flag(int cast_type) return QETH_CAST_UNICAST; } -static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, - struct sk_buff *skb, int ipv, int cast_type, - unsigned int data_len) +static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue, + struct qeth_hdr *hdr, struct sk_buff *skb, + int ipv, int cast_type, unsigned int data_len) { struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3; struct vlan_ethhdr *veth = vlan_eth_hdr(skb); + struct qeth_card *card = queue->card; hdr->hdr.l3.length = data_len; @@ -1947,8 +1962,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, /* some HW requires combined L3+L4 csum offload: */ if (ipv == 4) hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_HDR_REQ; - if (card->options.performance_stats) - card->perf_stats.tx_csum++; + QETH_TXQ_STAT_INC(queue, skbs_csum); } } @@ -2049,6 +2063,8 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, int tx_bytes = skb->len; int rc; + queue = qeth_get_tx_queue(card, skb, ipv, cast_type); + if (IS_IQD(card)) { if (card->options.sniffer) goto tx_drop; @@ -2058,20 +2074,9 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, goto tx_drop; } - if (card->state != CARD_STATE_UP) { - card->stats.tx_carrier_errors++; - goto tx_drop; - } - if (cast_type == RTN_BROADCAST && !card->info.broadcast_capable) goto tx_drop; - queue = qeth_get_tx_queue(card, skb, ipv, cast_type); - - if (card->options.performance_stats) { - card->perf_stats.outbound_cnt++; - card->perf_stats.outbound_start_time = qeth_get_micros(); - } netif_stop_queue(dev); if (ipv == 4 || IS_IQD(card)) @@ -2081,11 +2086,8 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, qeth_l3_fill_header); if (!rc) { - card->stats.tx_packets++; - card->stats.tx_bytes += tx_bytes; - if (card->options.performance_stats) - card->perf_stats.outbound_time += qeth_get_micros() - - card->perf_stats.outbound_start_time; + QETH_TXQ_STAT_INC(queue, tx_packets); + QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes); netif_wake_queue(dev); return NETDEV_TX_OK; } else if (rc == -EBUSY) { @@ -2093,72 +2095,13 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, } /* else fall through */ tx_drop: - card->stats.tx_dropped++; - card->stats.tx_errors++; + QETH_TXQ_STAT_INC(queue, tx_dropped); + QETH_TXQ_STAT_INC(queue, tx_errors); dev_kfree_skb_any(skb); netif_wake_queue(dev); return NETDEV_TX_OK; } -static int __qeth_l3_open(struct net_device *dev) -{ - struct qeth_card *card = dev->ml_priv; - int rc = 0; - - QETH_CARD_TEXT(card, 4, "qethopen"); - if (card->state == CARD_STATE_UP) - return rc; - if (card->state != CARD_STATE_SOFTSETUP) - return -ENODEV; - card->data.state = CH_STATE_UP; - card->state = CARD_STATE_UP; - netif_start_queue(dev); - - if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) { - napi_enable(&card->napi); - local_bh_disable(); - napi_schedule(&card->napi); - /* kick-start the NAPI softirq: */ - local_bh_enable(); - } else - rc = -EIO; - return rc; -} - -static int qeth_l3_open(struct net_device *dev) -{ - struct qeth_card *card = dev->ml_priv; - - QETH_CARD_TEXT(card, 5, "qethope_"); - if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { - QETH_CARD_TEXT(card, 3, "openREC"); - return -ERESTARTSYS; - } - return __qeth_l3_open(dev); -} - -static int qeth_l3_stop(struct net_device *dev) -{ - struct qeth_card *card = dev->ml_priv; - - QETH_CARD_TEXT(card, 4, "qethstop"); - netif_tx_disable(dev); - if (card->state == CARD_STATE_UP) { - card->state = CARD_STATE_SOFTSETUP; - napi_disable(&card->napi); - } - return 0; -} - -static const struct ethtool_ops qeth_l3_ethtool_ops = { - .get_link = ethtool_op_get_link, - .get_strings = qeth_core_get_strings, - .get_ethtool_stats = qeth_core_get_ethtool_stats, - .get_sset_count = qeth_core_get_sset_count, - .get_drvinfo = qeth_core_get_drvinfo, - .get_link_ksettings = qeth_core_ethtool_get_link_ksettings, -}; - /* * we need NOARP for IPv4 but we want neighbor solicitation for IPv6. Setting * NOARP on the netdevice is no option because it also turns off neighbor @@ -2193,9 +2136,9 @@ static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb, } static const struct net_device_ops qeth_l3_netdev_ops = { - .ndo_open = qeth_l3_open, - .ndo_stop = qeth_l3_stop, - .ndo_get_stats = qeth_get_stats, + .ndo_open = qeth_open, + .ndo_stop = qeth_stop, + .ndo_get_stats64 = qeth_get_stats64, .ndo_start_xmit = qeth_l3_hard_start_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = qeth_l3_set_rx_mode, @@ -2208,9 +2151,9 @@ static const struct net_device_ops qeth_l3_netdev_ops = { }; static const struct net_device_ops qeth_l3_osa_netdev_ops = { - .ndo_open = qeth_l3_open, - .ndo_stop = qeth_l3_stop, - .ndo_get_stats = qeth_get_stats, + .ndo_open = qeth_open, + .ndo_stop = qeth_stop, + .ndo_get_stats64 = qeth_get_stats64, .ndo_start_xmit = qeth_l3_hard_start_xmit, .ndo_features_check = qeth_l3_osa_features_check, .ndo_validate_addr = eth_validate_addr, @@ -2229,9 +2172,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok) unsigned int headroom; int rc; - if (qeth_netdev_is_registered(card->dev)) - return 0; - if (card->info.type == QETH_CARD_TYPE_OSD || card->info.type == QETH_CARD_TYPE_OSX) { if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) || @@ -2283,7 +2223,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok) return -ENODEV; card->dev->needed_headroom = headroom; - card->dev->ethtool_ops = &qeth_l3_ethtool_ops; card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; @@ -2338,17 +2277,18 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) if (cgdev->state == CCWGROUP_ONLINE) qeth_l3_set_offline(cgdev); + cancel_work_sync(&card->close_dev_work); if (qeth_netdev_is_registered(card->dev)) unregister_netdev(card->dev); qeth_l3_clear_ip_htable(card, 0); qeth_l3_clear_ipato_list(card); } -static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) +static int qeth_l3_set_online(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); + struct net_device *dev = card->dev; int rc = 0; - enum qeth_card_states recover_flag; bool carrier_ok; mutex_lock(&card->discipline_mutex); @@ -2356,7 +2296,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) QETH_DBF_TEXT(SETUP, 2, "setonlin"); QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); - recover_flag = card->state; rc = qeth_core_hardsetup_card(card, &carrier_ok); if (rc) { QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc); @@ -2364,10 +2303,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) goto out_remove; } - rc = qeth_l3_setup_netdev(card, carrier_ok); - if (rc) - goto out_remove; - if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) { if (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)) @@ -2397,7 +2332,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) if (rc) QETH_DBF_TEXT_(SETUP, 2, "5err%04x", rc); } - netif_tx_disable(card->dev); rc = qeth_init_qdio_queues(card); if (rc) { @@ -2410,14 +2344,23 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) qeth_set_allowed_threads(card, 0xffffffff, 0); qeth_l3_recover_ip(card); - qeth_enable_hw_features(card->dev); - if (recover_flag == CARD_STATE_RECOVER) { + if (!qeth_netdev_is_registered(dev)) { + rc = qeth_l3_setup_netdev(card, carrier_ok); + if (rc) + goto out_remove; + } else { rtnl_lock(); - if (recovery_mode) { - __qeth_l3_open(card->dev); - qeth_l3_set_rx_mode(card->dev); - } else { - dev_open(card->dev, NULL); + if (carrier_ok) + netif_carrier_on(dev); + else + netif_carrier_off(dev); + + netif_device_attach(dev); + qeth_enable_hw_features(dev); + + if (card->info.open_when_online) { + card->info.open_when_online = 0; + dev_open(dev, NULL); } rtnl_unlock(); } @@ -2428,45 +2371,43 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) mutex_unlock(&card->discipline_mutex); return 0; out_remove: - qeth_l3_stop_card(card, 0); + qeth_l3_stop_card(card); ccw_device_set_offline(CARD_DDEV(card)); ccw_device_set_offline(CARD_WDEV(card)); ccw_device_set_offline(CARD_RDEV(card)); qdio_free(CARD_DDEV(card)); - if (recover_flag == CARD_STATE_RECOVER) - card->state = CARD_STATE_RECOVER; - else - card->state = CARD_STATE_DOWN; + card->state = CARD_STATE_DOWN; + mutex_unlock(&card->conf_mutex); mutex_unlock(&card->discipline_mutex); return rc; } -static int qeth_l3_set_online(struct ccwgroup_device *gdev) -{ - return __qeth_l3_set_online(gdev, 0); -} - static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev, int recovery_mode) { struct qeth_card *card = dev_get_drvdata(&cgdev->dev); int rc = 0, rc2 = 0, rc3 = 0; - enum qeth_card_states recover_flag; mutex_lock(&card->discipline_mutex); mutex_lock(&card->conf_mutex); QETH_DBF_TEXT(SETUP, 3, "setoffl"); QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *)); - netif_carrier_off(card->dev); - recover_flag = card->state; if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) { qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); card->info.hwtrap = 1; } - qeth_l3_stop_card(card, recovery_mode); - if ((card->options.cq == QETH_CQ_ENABLED) && card->dev) { + + rtnl_lock(); + card->info.open_when_online = card->dev->flags & IFF_UP; + dev_close(card->dev); + netif_device_detach(card->dev); + netif_carrier_off(card->dev); + rtnl_unlock(); + + qeth_l3_stop_card(card); + if (card->options.cq == QETH_CQ_ENABLED) { rtnl_lock(); call_netdevice_notifiers(NETDEV_REBOOT, card->dev); rtnl_unlock(); @@ -2479,8 +2420,7 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev, if (rc) QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); qdio_free(CARD_DDEV(card)); - if (recover_flag == CARD_STATE_UP) - card->state = CARD_STATE_RECOVER; + /* let user_space know that device is offline */ kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE); mutex_unlock(&card->conf_mutex); @@ -2506,18 +2446,16 @@ static int qeth_l3_recover(void *ptr) QETH_CARD_TEXT(card, 2, "recover2"); dev_warn(&card->gdev->dev, "A recovery process has been started for the device\n"); - qeth_set_recovery_task(card); __qeth_l3_set_offline(card->gdev, 1); - rc = __qeth_l3_set_online(card->gdev, 1); + rc = qeth_l3_set_online(card->gdev); if (!rc) dev_info(&card->gdev->dev, "Device successfully recovered!\n"); else { - qeth_close_dev(card); + ccwgroup_set_offline(card->gdev); dev_warn(&card->gdev->dev, "The qeth device driver " "failed to recover an error on the device\n"); } - qeth_clear_recovery_task(card); qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); return 0; @@ -2527,37 +2465,23 @@ static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); - netif_device_detach(card->dev); qeth_set_allowed_threads(card, 0, 1); wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); if (gdev->state == CCWGROUP_OFFLINE) return 0; - if (card->state == CARD_STATE_UP) { - if (card->info.hwtrap) - qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); - __qeth_l3_set_offline(card->gdev, 1); - } else - __qeth_l3_set_offline(card->gdev, 0); + + qeth_l3_set_offline(gdev); return 0; } static int qeth_l3_pm_resume(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); - int rc = 0; + int rc; - if (card->state == CARD_STATE_RECOVER) { - rc = __qeth_l3_set_online(card->gdev, 1); - if (rc) { - rtnl_lock(); - dev_close(card->dev); - rtnl_unlock(); - } - } else - rc = __qeth_l3_set_online(card->gdev, 0); + rc = qeth_l3_set_online(gdev); qeth_set_allowed_threads(card, 0xffffffff, 0); - netif_device_attach(card->dev); if (rc) dev_warn(&card->gdev->dev, "The qeth device driver " "failed to recover an error on the device\n"); diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index 45ac6d8705c6..cff518b0f904 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c @@ -167,8 +167,7 @@ static ssize_t qeth_l3_dev_fake_broadcast_store(struct device *dev, return -EINVAL; mutex_lock(&card->conf_mutex); - if ((card->state != CARD_STATE_DOWN) && - (card->state != CARD_STATE_RECOVER)) { + if (card->state != CARD_STATE_DOWN) { rc = -EPERM; goto out; } @@ -213,8 +212,7 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev, return -EPERM; mutex_lock(&card->conf_mutex); - if ((card->state != CARD_STATE_DOWN) && - (card->state != CARD_STATE_RECOVER)) { + if (card->state != CARD_STATE_DOWN) { rc = -EPERM; goto out; } @@ -280,8 +278,7 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev, if (card->info.type != QETH_CARD_TYPE_IQD) return -EPERM; - if (card->state != CARD_STATE_DOWN && - card->state != CARD_STATE_RECOVER) + if (card->state != CARD_STATE_DOWN) return -EPERM; if (card->options.sniffer) return -EPERM; @@ -356,8 +353,7 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev, return -EINVAL; mutex_lock(&card->conf_mutex); - if ((card->state != CARD_STATE_DOWN) && - (card->state != CARD_STATE_RECOVER)) { + if (card->state != CARD_STATE_DOWN) { rc = -EPERM; goto out; } diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 9cf30d124b9e..e390f8c6d5f3 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -403,7 +403,6 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device) goto failed; /* report size limit per scatter-gather segment */ - adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN; adapter->ccw_device->dev.dma_parms = &adapter->dma_parms; adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM; diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 10c4e8e3fd59..661436a92f8e 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -294,8 +294,8 @@ static void zfcp_qdio_setup_init_data(struct qdio_initialize *id, id->input_handler = zfcp_qdio_int_resp; id->output_handler = zfcp_qdio_int_req; id->int_parm = (unsigned long) qdio; - id->input_sbal_addr_array = (void **) (qdio->res_q); - id->output_sbal_addr_array = (void **) (qdio->req_q); + id->input_sbal_addr_array = qdio->res_q; + id->output_sbal_addr_array = qdio->req_q; id->scan_threshold = QDIO_MAX_BUFFERS_PER_Q - ZFCP_QDIO_MAX_SBALS_PER_REQ * 2; } diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 00acc7144bbc..f4f6a07c5222 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -428,6 +428,8 @@ static struct scsi_host_template zfcp_scsi_host_template = { .max_sectors = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1) * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8, /* GCD, adjusted later */ + /* report size limit per scatter-gather segment */ + .max_segment_size = ZFCP_QDIO_SBALE_LEN, .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1, .shost_attrs = zfcp_sysfs_shost_attrs, .sdev_attrs = zfcp_sysfs_sdev_attrs, diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index ae1d56da671d..74c328321889 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c @@ -272,6 +272,8 @@ static void virtio_ccw_drop_indicators(struct virtio_ccw_device *vcdev) { struct virtio_ccw_vq_info *info; + if (!vcdev->airq_info) + return; list_for_each_entry(info, &vcdev->virtqueues, node) drop_airq_indicator(info->vq, vcdev->airq_info); } @@ -413,7 +415,7 @@ static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev, ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF); if (ret) return ret; - return vcdev->config_block->num; + return vcdev->config_block->num ?: -ENOENT; } static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw) @@ -973,6 +975,13 @@ static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status) kfree(ccw); } +static const char *virtio_ccw_bus_name(struct virtio_device *vdev) +{ + struct virtio_ccw_device *vcdev = to_vc_device(vdev); + + return dev_name(&vcdev->cdev->dev); +} + static const struct virtio_config_ops virtio_ccw_config_ops = { .get_features = virtio_ccw_get_features, .finalize_features = virtio_ccw_finalize_features, @@ -983,6 +992,7 @@ static const struct virtio_config_ops virtio_ccw_config_ops = { .reset = virtio_ccw_reset, .find_vqs = virtio_ccw_find_vqs, .del_vqs = virtio_ccw_del_vqs, + .bus_name = virtio_ccw_bus_name, }; diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index a3c20e3a8b7c..3337b1e80412 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -2009,7 +2009,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) struct Scsi_Host *host = NULL; TW_Device_Extension *tw_dev; unsigned long mem_addr, mem_len; - int retval = -ENODEV; + int retval; retval = pci_enable_device(pdev); if (retval) { @@ -2020,8 +2020,10 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) pci_set_master(pdev); pci_try_set_mwi(pdev); - if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) || - dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { + retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (retval) + retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (retval) { TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask"); retval = -ENODEV; goto out_disable_device; @@ -2240,8 +2242,10 @@ static int twa_resume(struct pci_dev *pdev) pci_set_master(pdev); pci_try_set_mwi(pdev); - if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) || - dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { + retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (retval) + retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (retval) { TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume"); retval = -ENODEV; goto out_disable_device; diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c index cd096104bcec..dda6fa857709 100644 --- a/drivers/scsi/3w-sas.c +++ b/drivers/scsi/3w-sas.c @@ -1573,8 +1573,10 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) pci_set_master(pdev); pci_try_set_mwi(pdev); - if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) || - dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { + retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (retval) + retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (retval) { TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask"); retval = -ENODEV; goto out_disable_device; @@ -1805,8 +1807,10 @@ static int twl_resume(struct pci_dev *pdev) pci_set_master(pdev); pci_try_set_mwi(pdev); - if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) || - dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { + retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (retval) + retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (retval) { TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume"); retval = -ENODEV; goto out_disable_device; diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c index 128d658d472a..16957d7ac414 100644 --- a/drivers/scsi/53c700.c +++ b/drivers/scsi/53c700.c @@ -295,7 +295,7 @@ NCR_700_detect(struct scsi_host_template *tpnt, if(tpnt->sdev_attrs == NULL) tpnt->sdev_attrs = NCR_700_dev_attrs; - memory = dma_alloc_attrs(hostdata->dev, TOTAL_MEM_SIZE, &pScript, + memory = dma_alloc_attrs(dev, TOTAL_MEM_SIZE, &pScript, GFP_KERNEL, DMA_ATTR_NON_CONSISTENT); if(memory == NULL) { printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n"); diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index f38882f6f37d..d528018e6fa8 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -665,7 +665,7 @@ config SCSI_DMX3191D config SCSI_GDTH tristate "Intel/ICP (former GDT SCSI Disk Array) RAID Controller support" - depends on (ISA || EISA || PCI) && SCSI && ISA_DMA_API + depends on PCI && SCSI ---help--- Formerly called GDT SCSI Disk Array Controller Support. @@ -1196,8 +1196,6 @@ config SCSI_AM53C974 PCscsi/PCnet (Am53/79C974) solutions. This is a new implementation base on the generic esp_scsi driver. - Documentation can be found in . - Note that this driver does NOT support Tekram DC390W/U/F, which are based on NCR/Symbios chips. Use "NCR53C8XX SCSI support" for those. @@ -1369,14 +1367,14 @@ config ATARI_SCSI tristate "Atari native SCSI support" depends on ATARI && SCSI select SCSI_SPI_ATTRS - select NVRAM ---help--- If you have an Atari with built-in NCR5380 SCSI controller (TT, Falcon, ...) say Y to get it supported. Of course also, if you have a compatible SCSI controller (e.g. for Medusa). - To compile this driver as a module, choose M here: the - module will be called atari_scsi. + To compile this driver as a module, choose M here: the module will + be called atari_scsi. If you also enable NVRAM support, the SCSI + host's ID is taken from the setting in TT RTC NVRAM. This driver supports both styles of NCR integration into the system: the TT style (separate DMA), and the Falcon style (via @@ -1517,6 +1515,4 @@ source "drivers/scsi/pcmcia/Kconfig" source "drivers/scsi/device_handler/Kconfig" -source "drivers/scsi/osd/Kconfig" - endmenu diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index fcb41ae329c4..8826111fdf4a 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -150,7 +150,6 @@ obj-$(CONFIG_CHR_DEV_SG) += sg.o obj-$(CONFIG_CHR_DEV_SCH) += ch.o obj-$(CONFIG_SCSI_ENCLOSURE) += ses.o -obj-$(CONFIG_SCSI_OSD_INITIATOR) += osd/ obj-$(CONFIG_SCSI_HISI_SAS) += hisi_sas/ # This goes last, so that "real" scsi devices probe earlier diff --git a/drivers/scsi/aacraid/Makefile b/drivers/scsi/aacraid/Makefile index 1bd9fd18f7f3..3893b95b140b 100644 --- a/drivers/scsi/aacraid/Makefile +++ b/drivers/scsi/aacraid/Makefile @@ -4,5 +4,3 @@ obj-$(CONFIG_SCSI_AACRAID) := aacraid.o aacraid-objs := linit.o aachba.o commctrl.o comminit.o commsup.o \ dpcsup.o rx.o sa.o rkt.o nark.o src.o - -ccflags-y := -Idrivers/scsi diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 75ab5ff6b78c..6085aa087a2f 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -3455,7 +3455,7 @@ static int delete_disk(struct aac_dev *dev, void __user *arg) } } -int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg) +int aac_dev_ioctl(struct aac_dev *dev, unsigned int cmd, void __user *arg) { switch (cmd) { case FSACTL_QUERY_DISK: diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 3291d1c16864..1df5171594b8 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -2706,12 +2706,12 @@ void aac_set_intx_mode(struct aac_dev *dev); int aac_get_config_status(struct aac_dev *dev, int commit_flag); int aac_get_containers(struct aac_dev *dev); int aac_scsi_cmd(struct scsi_cmnd *cmd); -int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg); +int aac_dev_ioctl(struct aac_dev *dev, unsigned int cmd, void __user *arg); #ifndef shost_to_class #define shost_to_class(shost) &shost->shost_dev #endif ssize_t aac_get_serial_number(struct device *dev, char *buf); -int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg); +int aac_do_ioctl(struct aac_dev *dev, unsigned int cmd, void __user *arg); int aac_rx_init(struct aac_dev *dev); int aac_rkt_init(struct aac_dev *dev); int aac_nark_init(struct aac_dev *dev); diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index e2899ff7913e..f0ff40332753 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -1060,7 +1060,7 @@ static int aac_send_reset_adapter(struct aac_dev *dev, void __user *arg) return retval; } -int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg) +int aac_do_ioctl(struct aac_dev *dev, unsigned int cmd, void __user *arg) { int status; diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index d5a6aa9676c8..e67e032936ef 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -1303,8 +1303,9 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) ADD : DELETE; break; } - case AifBuManagerEvent: - aac_handle_aif_bu(dev, aifcmd); + break; + case AifBuManagerEvent: + aac_handle_aif_bu(dev, aifcmd); break; } @@ -1376,18 +1377,19 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) container = 0; retry_next: - if (device_config_needed == NOTHING) - for (; container < dev->maximum_num_containers; ++container) { - if ((dev->fsa_dev[container].config_waiting_on == 0) && - (dev->fsa_dev[container].config_needed != NOTHING) && - time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) { - device_config_needed = - dev->fsa_dev[container].config_needed; - dev->fsa_dev[container].config_needed = NOTHING; - channel = CONTAINER_TO_CHANNEL(container); - id = CONTAINER_TO_ID(container); - lun = CONTAINER_TO_LUN(container); - break; + if (device_config_needed == NOTHING) { + for (; container < dev->maximum_num_containers; ++container) { + if ((dev->fsa_dev[container].config_waiting_on == 0) && + (dev->fsa_dev[container].config_needed != NOTHING) && + time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) { + device_config_needed = + dev->fsa_dev[container].config_needed; + dev->fsa_dev[container].config_needed = NOTHING; + channel = CONTAINER_TO_CHANNEL(container); + id = CONTAINER_TO_ID(container); + lun = CONTAINER_TO_LUN(container); + break; + } } } if (device_config_needed == NOTHING) diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 7e56a11836c1..a45f81ec80ce 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -616,7 +616,8 @@ static struct device_attribute *aac_dev_attrs[] = { NULL, }; -static int aac_ioctl(struct scsi_device *sdev, int cmd, void __user * arg) +static int aac_ioctl(struct scsi_device *sdev, unsigned int cmd, + void __user *arg) { struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata; if (!capable(CAP_SYS_RAWIO)) @@ -852,8 +853,7 @@ static u8 aac_eh_tmf_hard_reset_fib(struct aac_hba_map_info *info, address = (u64)fib->hw_error_pa; rst->error_ptr_hi = cpu_to_le32((u32)(address >> 32)); - rst->error_ptr_lo = cpu_to_le32 - ((u32)(address & 0xffffffff)); + rst->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff)); rst->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE); fib->hbacmd_size = sizeof(*rst); @@ -1206,7 +1206,8 @@ static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long return ret; } -static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) +static int aac_compat_ioctl(struct scsi_device *sdev, unsigned int cmd, + void __user *arg) { struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata; if (!capable(CAP_SYS_RAWIO)) diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index 8377aec0649d..97bb9e9d201c 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c @@ -1157,7 +1157,7 @@ out: dev_err(&dev->pdev->dev, "%s: %s status = %d", __func__, state_str[state], rc); -return rc; + return rc; } /** * aac_srcv_init - initialize an SRCv card diff --git a/drivers/scsi/aic7xxx/Makefile b/drivers/scsi/aic7xxx/Makefile index c15be2590d1c..e0188ecd85b2 100644 --- a/drivers/scsi/aic7xxx/Makefile +++ b/drivers/scsi/aic7xxx/Makefile @@ -34,7 +34,6 @@ aic79xx-y += aic79xx_osm.o \ aic79xx_proc.o \ aic79xx_osm_pci.o -ccflags-y += -Idrivers/scsi ifdef WARNINGS_BECOME_ERRORS ccflags-y += -Werror endif diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c index 9ee75c9a9aa1..7e5044bf05c0 100644 --- a/drivers/scsi/aic7xxx/aic79xx_core.c +++ b/drivers/scsi/aic7xxx/aic79xx_core.c @@ -2285,6 +2285,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat) switch (scb->hscb->task_management) { case SIU_TASKMGMT_ABORT_TASK: tag = SCB_GET_TAG(scb); + /* fall through */ case SIU_TASKMGMT_ABORT_TASK_SET: case SIU_TASKMGMT_CLEAR_TASK_SET: lun = scb->hscb->lun; @@ -2295,6 +2296,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat) break; case SIU_TASKMGMT_LUN_RESET: lun = scb->hscb->lun; + /* fall through */ case SIU_TASKMGMT_TARGET_RESET: { struct ahd_devinfo devinfo; @@ -6550,8 +6552,8 @@ ahd_fini_scbdata(struct ahd_softc *ahd) kfree(sns_map); } ahd_dma_tag_destroy(ahd, scb_data->sense_dmat); - /* FALLTHROUGH */ } + /* fall through */ case 6: { struct map_node *sg_map; @@ -6565,8 +6567,8 @@ ahd_fini_scbdata(struct ahd_softc *ahd) kfree(sg_map); } ahd_dma_tag_destroy(ahd, scb_data->sg_dmat); - /* FALLTHROUGH */ } + /* fall through */ case 5: { struct map_node *hscb_map; @@ -7209,6 +7211,7 @@ ahd_init(struct ahd_softc *ahd) case FLX_CSTAT_OVER: case FLX_CSTAT_UNDER: warn_user++; + /* fall through */ case FLX_CSTAT_INVALID: case FLX_CSTAT_OKAY: if (warn_user == 0 && bootverbose == 0) @@ -8413,7 +8416,7 @@ ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel, if ((scb->flags & SCB_ACTIVE) == 0) printk("Inactive SCB in Waiting List\n"); ahd_done_with_status(ahd, scb, status); - /* FALLTHROUGH */ + /* fall through */ case SEARCH_REMOVE: ahd_rem_wscb(ahd, scbid, prev, next, tid); *list_tail = prev; @@ -8422,6 +8425,7 @@ ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel, break; case SEARCH_PRINT: printk("0x%x ", scbid); + /* fall through */ case SEARCH_COUNT: prev = scbid; break; @@ -9547,8 +9551,8 @@ ahd_download_instr(struct ahd_softc *ahd, u_int instrptr, uint8_t *dconsts) { fmt3_ins = &instr.format3; fmt3_ins->address = ahd_resolve_seqaddr(ahd, fmt3_ins->address); - /* FALLTHROUGH */ } + /* fall through */ case AIC_OP_OR: case AIC_OP_AND: case AIC_OP_XOR: @@ -9559,7 +9563,7 @@ ahd_download_instr(struct ahd_softc *ahd, u_int instrptr, uint8_t *dconsts) fmt1_ins->immediate = dconsts[fmt1_ins->immediate]; } fmt1_ins->parity = 0; - /* FALLTHROUGH */ + /* fall through */ case AIC_OP_ROL: { int i, count; diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c index f83f79b07b50..bbdae67774f0 100644 --- a/drivers/scsi/aic94xx/aic94xx_init.c +++ b/drivers/scsi/aic94xx/aic94xx_init.c @@ -280,7 +280,7 @@ static ssize_t asd_show_dev_rev(struct device *dev, return snprintf(buf, PAGE_SIZE, "%s\n", asd_dev_rev[asd_ha->revision_id]); } -static DEVICE_ATTR(revision, S_IRUGO, asd_show_dev_rev, NULL); +static DEVICE_ATTR(aic_revision, S_IRUGO, asd_show_dev_rev, NULL); static ssize_t asd_show_dev_bios_build(struct device *dev, struct device_attribute *attr,char *buf) @@ -477,7 +477,7 @@ static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha) { int err; - err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_revision); + err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision); if (err) return err; @@ -499,13 +499,13 @@ err_update_bios: err_biosb: device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build); err_rev: - device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision); + device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision); return err; } static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha) { - device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision); + device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision); device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build); device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn); device_remove_file(&asd_ha->pcidev->dev, &dev_attr_update_bios); @@ -769,9 +769,11 @@ static int asd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) if (err) goto Err_remove; - err = -ENODEV; - if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) || - dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32))) { + err = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)); + if (err) + err = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32)); + if (err) { + err = -ENODEV; asd_printk("no suitable DMA mask for %s\n", pci_name(dev)); goto Err_remove; } diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h index 9c397a2794d6..9220bcf8388f 100644 --- a/drivers/scsi/arcmsr/arcmsr.h +++ b/drivers/scsi/arcmsr/arcmsr.h @@ -49,7 +49,7 @@ struct device_attribute; #define ARCMSR_MAX_OUTSTANDING_CMD 1024 #define ARCMSR_DEFAULT_OUTSTANDING_CMD 128 #define ARCMSR_MIN_OUTSTANDING_CMD 32 -#define ARCMSR_DRIVER_VERSION "v1.40.00.09-20180709" +#define ARCMSR_DRIVER_VERSION "v1.40.00.10-20190116" #define ARCMSR_SCSI_INITIATOR_ID 255 #define ARCMSR_MAX_XFER_SECTORS 512 #define ARCMSR_MAX_XFER_SECTORS_B 4096 @@ -739,7 +739,7 @@ struct AdapterControlBlock #define ACB_ADAPTER_TYPE_C 0x00000002 /* hbc L IOP */ #define ACB_ADAPTER_TYPE_D 0x00000003 /* hbd M IOP */ #define ACB_ADAPTER_TYPE_E 0x00000004 /* hba L IOP */ - u32 roundup_ccbsize; + u32 ioqueue_size; struct pci_dev * pdev; struct Scsi_Host * host; unsigned long vir2phy_offset; @@ -747,6 +747,7 @@ struct AdapterControlBlock uint32_t outbound_int_enable; uint32_t cdb_phyaddr_hi32; uint32_t reg_mu_acc_handle0; + uint64_t cdb_phyadd_hipart; spinlock_t eh_lock; spinlock_t ccblist_lock; spinlock_t postq_lock; @@ -855,11 +856,11 @@ struct AdapterControlBlock ******************************************************************************* */ struct CommandControlBlock{ - /*x32:sizeof struct_CCB=(32+60)byte, x64:sizeof struct_CCB=(64+60)byte*/ + /*x32:sizeof struct_CCB=(64+60)byte, x64:sizeof struct_CCB=(64+60)byte*/ struct list_head list; /*x32: 8byte, x64: 16byte*/ struct scsi_cmnd *pcmd; /*8 bytes pointer of linux scsi command */ struct AdapterControlBlock *acb; /*x32: 4byte, x64: 8byte*/ - uint32_t cdb_phyaddr; /*x32: 4byte, x64: 4byte*/ + unsigned long cdb_phyaddr; /*x32: 4byte, x64: 8byte*/ uint32_t arc_cdb_size; /*x32:4byte,x64:4byte*/ uint16_t ccb_flags; /*x32: 2byte, x64: 2byte*/ #define CCB_FLAG_READ 0x0000 @@ -875,10 +876,10 @@ struct CommandControlBlock{ uint32_t smid; #if BITS_PER_LONG == 64 /* ======================512+64 bytes======================== */ - uint32_t reserved[4]; /*16 byte*/ + uint32_t reserved[3]; /*12 byte*/ #else /* ======================512+32 bytes======================== */ - // uint32_t reserved; /*4 byte*/ + uint32_t reserved[8]; /*32 byte*/ #endif /* ======================================================= */ struct ARCMSR_CDB arcmsr_cdb; diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 57c6fa388bf6..88053b15c363 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -91,6 +91,10 @@ static int cmd_per_lun = ARCMSR_DEFAULT_CMD_PERLUN; module_param(cmd_per_lun, int, S_IRUGO); MODULE_PARM_DESC(cmd_per_lun, " device queue depth(1 ~ 128), default is 32"); +static int dma_mask_64 = 0; +module_param(dma_mask_64, int, S_IRUGO); +MODULE_PARM_DESC(dma_mask_64, " set DMA mask to 64 bits(0 ~ 1), dma_mask_64=1(64 bits), =0(32 bits)"); + static int set_date_time = 0; module_param(set_date_time, int, S_IRUGO); MODULE_PARM_DESC(set_date_time, " send date, time to iop(0 ~ 1), set_date_time=1(enable), default(=0) is disable"); @@ -223,13 +227,13 @@ static struct pci_driver arcmsr_pci_driver = { **************************************************************************** */ -static void arcmsr_free_mu(struct AdapterControlBlock *acb) +static void arcmsr_free_io_queue(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_B: case ACB_ADAPTER_TYPE_D: case ACB_ADAPTER_TYPE_E: { - dma_free_coherent(&acb->pdev->dev, acb->roundup_ccbsize, + dma_free_coherent(&acb->pdev->dev, acb->ioqueue_size, acb->dma_coherent2, acb->dma_coherent_handle2); break; } @@ -576,6 +580,58 @@ static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb) } } +static void arcmsr_hbaB_assign_regAddr(struct AdapterControlBlock *acb) +{ + struct MessageUnit_B *reg = acb->pmuB; + + if (acb->pdev->device == PCI_DEVICE_ID_ARECA_1203) { + reg->drv2iop_doorbell = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_1203); + reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK_1203); + reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_1203); + reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK_1203); + } else { + reg->drv2iop_doorbell= MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL); + reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK); + reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL); + reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK); + } + reg->message_wbuffer = MEM_BASE1(ARCMSR_MESSAGE_WBUFFER); + reg->message_rbuffer = MEM_BASE1(ARCMSR_MESSAGE_RBUFFER); + reg->message_rwbuffer = MEM_BASE1(ARCMSR_MESSAGE_RWBUFFER); +} + +static void arcmsr_hbaD_assign_regAddr(struct AdapterControlBlock *acb) +{ + struct MessageUnit_D *reg = acb->pmuD; + + reg->chip_id = MEM_BASE0(ARCMSR_ARC1214_CHIP_ID); + reg->cpu_mem_config = MEM_BASE0(ARCMSR_ARC1214_CPU_MEMORY_CONFIGURATION); + reg->i2o_host_interrupt_mask = MEM_BASE0(ARCMSR_ARC1214_I2_HOST_INTERRUPT_MASK); + reg->sample_at_reset = MEM_BASE0(ARCMSR_ARC1214_SAMPLE_RESET); + reg->reset_request = MEM_BASE0(ARCMSR_ARC1214_RESET_REQUEST); + reg->host_int_status = MEM_BASE0(ARCMSR_ARC1214_MAIN_INTERRUPT_STATUS); + reg->pcief0_int_enable = MEM_BASE0(ARCMSR_ARC1214_PCIE_F0_INTERRUPT_ENABLE); + reg->inbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE0); + reg->inbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE1); + reg->outbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE0); + reg->outbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE1); + reg->inbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_INBOUND_DOORBELL); + reg->outbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL); + reg->outbound_doorbell_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL_ENABLE); + reg->inboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_LOW); + reg->inboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_HIGH); + reg->inboundlist_write_pointer = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_WRITE_POINTER); + reg->outboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_LOW); + reg->outboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_HIGH); + reg->outboundlist_copy_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_COPY_POINTER); + reg->outboundlist_read_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_READ_POINTER); + reg->outboundlist_interrupt_cause = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_CAUSE); + reg->outboundlist_interrupt_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_ENABLE); + reg->message_wbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_WBUFFER); + reg->message_rbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RBUFFER); + reg->msgcode_rwbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RWBUFFER); +} + static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb) { bool rtn = true; @@ -585,88 +641,39 @@ static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb) switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_B: { - struct MessageUnit_B *reg; - acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_B), 32); - dma_coherent = dma_alloc_coherent(&pdev->dev, - acb->roundup_ccbsize, - &dma_coherent_handle, - GFP_KERNEL); + acb->ioqueue_size = roundup(sizeof(struct MessageUnit_B), 32); + dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size, + &dma_coherent_handle, GFP_KERNEL); if (!dma_coherent) { pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); return false; } acb->dma_coherent_handle2 = dma_coherent_handle; acb->dma_coherent2 = dma_coherent; - reg = (struct MessageUnit_B *)dma_coherent; - acb->pmuB = reg; - if (acb->pdev->device == PCI_DEVICE_ID_ARECA_1203) { - reg->drv2iop_doorbell = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_1203); - reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK_1203); - reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_1203); - reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK_1203); - } else { - reg->drv2iop_doorbell = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL); - reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK); - reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL); - reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK); - } - reg->message_wbuffer = MEM_BASE1(ARCMSR_MESSAGE_WBUFFER); - reg->message_rbuffer = MEM_BASE1(ARCMSR_MESSAGE_RBUFFER); - reg->message_rwbuffer = MEM_BASE1(ARCMSR_MESSAGE_RWBUFFER); + acb->pmuB = (struct MessageUnit_B *)dma_coherent; + arcmsr_hbaB_assign_regAddr(acb); } break; case ACB_ADAPTER_TYPE_D: { - struct MessageUnit_D *reg; - - acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_D), 32); - dma_coherent = dma_alloc_coherent(&pdev->dev, - acb->roundup_ccbsize, - &dma_coherent_handle, - GFP_KERNEL); + acb->ioqueue_size = roundup(sizeof(struct MessageUnit_D), 32); + dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size, + &dma_coherent_handle, GFP_KERNEL); if (!dma_coherent) { pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); return false; } acb->dma_coherent_handle2 = dma_coherent_handle; acb->dma_coherent2 = dma_coherent; - reg = (struct MessageUnit_D *)dma_coherent; - acb->pmuD = reg; - reg->chip_id = MEM_BASE0(ARCMSR_ARC1214_CHIP_ID); - reg->cpu_mem_config = MEM_BASE0(ARCMSR_ARC1214_CPU_MEMORY_CONFIGURATION); - reg->i2o_host_interrupt_mask = MEM_BASE0(ARCMSR_ARC1214_I2_HOST_INTERRUPT_MASK); - reg->sample_at_reset = MEM_BASE0(ARCMSR_ARC1214_SAMPLE_RESET); - reg->reset_request = MEM_BASE0(ARCMSR_ARC1214_RESET_REQUEST); - reg->host_int_status = MEM_BASE0(ARCMSR_ARC1214_MAIN_INTERRUPT_STATUS); - reg->pcief0_int_enable = MEM_BASE0(ARCMSR_ARC1214_PCIE_F0_INTERRUPT_ENABLE); - reg->inbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE0); - reg->inbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE1); - reg->outbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE0); - reg->outbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE1); - reg->inbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_INBOUND_DOORBELL); - reg->outbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL); - reg->outbound_doorbell_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL_ENABLE); - reg->inboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_LOW); - reg->inboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_HIGH); - reg->inboundlist_write_pointer = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_WRITE_POINTER); - reg->outboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_LOW); - reg->outboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_HIGH); - reg->outboundlist_copy_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_COPY_POINTER); - reg->outboundlist_read_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_READ_POINTER); - reg->outboundlist_interrupt_cause = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_CAUSE); - reg->outboundlist_interrupt_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_ENABLE); - reg->message_wbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_WBUFFER); - reg->message_rbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RBUFFER); - reg->msgcode_rwbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RWBUFFER); + acb->pmuD = (struct MessageUnit_D *)dma_coherent; + arcmsr_hbaD_assign_regAddr(acb); } break; case ACB_ADAPTER_TYPE_E: { uint32_t completeQ_size; completeQ_size = sizeof(struct deliver_completeQ) * ARCMSR_MAX_HBE_DONEQUEUE + 128; - acb->roundup_ccbsize = roundup(completeQ_size, 32); - dma_coherent = dma_alloc_coherent(&pdev->dev, - acb->roundup_ccbsize, - &dma_coherent_handle, - GFP_KERNEL); + acb->ioqueue_size = roundup(completeQ_size, 32); + dma_coherent = dma_alloc_coherent(&pdev->dev, acb->ioqueue_size, + &dma_coherent_handle, GFP_KERNEL); if (!dma_coherent){ pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no); return false; @@ -674,7 +681,7 @@ static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb) acb->dma_coherent_handle2 = dma_coherent_handle; acb->dma_coherent2 = dma_coherent; acb->pCompletionQ = dma_coherent; - acb->completionQ_entry = acb->roundup_ccbsize / sizeof(struct deliver_completeQ); + acb->completionQ_entry = acb->ioqueue_size / sizeof(struct deliver_completeQ); acb->doneq_index = 0; } break; @@ -691,11 +698,11 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) dma_addr_t dma_coherent_handle; struct CommandControlBlock *ccb_tmp; int i = 0, j = 0; - dma_addr_t cdb_phyaddr; + unsigned long cdb_phyaddr, next_ccb_phy; unsigned long roundup_ccbsize; unsigned long max_xfer_len; unsigned long max_sg_entrys; - uint32_t firm_config_version; + uint32_t firm_config_version, curr_phy_upper32; for (i = 0; i < ARCMSR_MAX_TARGETID; i++) for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) @@ -712,6 +719,7 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) acb->host->sg_tablesize = max_sg_entrys; roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32); acb->uncache_size = roundup_ccbsize * acb->maxFreeCCB; + acb->uncache_size += acb->ioqueue_size; dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL); if(!dma_coherent){ printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no); @@ -722,9 +730,10 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) memset(dma_coherent, 0, acb->uncache_size); acb->ccbsize = roundup_ccbsize; ccb_tmp = dma_coherent; + curr_phy_upper32 = upper_32_bits(dma_coherent_handle); acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle; for(i = 0; i < acb->maxFreeCCB; i++){ - cdb_phyaddr = dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb); + cdb_phyaddr = (unsigned long)dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb); switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: case ACB_ADAPTER_TYPE_B: @@ -740,10 +749,34 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) ccb_tmp->acb = acb; ccb_tmp->smid = (u32)i << 16; INIT_LIST_HEAD(&ccb_tmp->list); - list_add_tail(&ccb_tmp->list, &acb->ccb_free_list); + next_ccb_phy = dma_coherent_handle + roundup_ccbsize; + if (upper_32_bits(next_ccb_phy) != curr_phy_upper32) { + acb->maxFreeCCB = i; + acb->host->can_queue = i; + break; + } + else + list_add_tail(&ccb_tmp->list, &acb->ccb_free_list); ccb_tmp = (struct CommandControlBlock *)((unsigned long)ccb_tmp + roundup_ccbsize); - dma_coherent_handle = dma_coherent_handle + roundup_ccbsize; + dma_coherent_handle = next_ccb_phy; } + acb->dma_coherent_handle2 = dma_coherent_handle; + acb->dma_coherent2 = ccb_tmp; + switch (acb->adapter_type) { + case ACB_ADAPTER_TYPE_B: + acb->pmuB = (struct MessageUnit_B *)acb->dma_coherent2; + arcmsr_hbaB_assign_regAddr(acb); + break; + case ACB_ADAPTER_TYPE_D: + acb->pmuD = (struct MessageUnit_D *)acb->dma_coherent2; + arcmsr_hbaD_assign_regAddr(acb); + break; + case ACB_ADAPTER_TYPE_E: + acb->pCompletionQ = acb->dma_coherent2; + acb->completionQ_entry = acb->ioqueue_size / sizeof(struct deliver_completeQ); + acb->doneq_index = 0; + break; + } return 0; } @@ -894,6 +927,31 @@ static void arcmsr_init_set_datetime_timer(struct AdapterControlBlock *pacb) add_timer(&pacb->refresh_timer); } +static int arcmsr_set_dma_mask(struct AdapterControlBlock *acb) +{ + struct pci_dev *pcidev = acb->pdev; + + if (IS_DMA64) { + if (((acb->adapter_type == ACB_ADAPTER_TYPE_A) && !dma_mask_64) || + dma_set_mask(&pcidev->dev, DMA_BIT_MASK(64))) + goto dma32; + if (dma_set_coherent_mask(&pcidev->dev, DMA_BIT_MASK(64)) || + dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64))) { + printk("arcmsr: set DMA 64 mask failed\n"); + return -ENXIO; + } + } else { +dma32: + if (dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32)) || + dma_set_coherent_mask(&pcidev->dev, DMA_BIT_MASK(32)) || + dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32))) { + printk("arcmsr: set DMA 32-bit mask failed\n"); + return -ENXIO; + } + } + return 0; +} + static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct Scsi_Host *host; @@ -908,22 +966,15 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id) if(!host){ goto pci_disable_dev; } - error = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); - if(error){ - error = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); - if(error){ - printk(KERN_WARNING - "scsi%d: No suitable DMA mask available\n", - host->host_no); - goto scsi_host_release; - } - } init_waitqueue_head(&wait_q); bus = pdev->bus->number; dev_fun = pdev->devfn; acb = (struct AdapterControlBlock *) host->hostdata; memset(acb,0,sizeof(struct AdapterControlBlock)); acb->pdev = pdev; + acb->adapter_type = id->driver_data; + if (arcmsr_set_dma_mask(acb)) + goto scsi_host_release; acb->host = host; host->max_lun = ARCMSR_MAX_TARGETLUN; host->max_id = ARCMSR_MAX_TARGETID; /*16:8*/ @@ -953,7 +1004,6 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id) ACB_F_MESSAGE_WQBUFFER_READED); acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; INIT_LIST_HEAD(&acb->ccb_free_list); - acb->adapter_type = id->driver_data; error = arcmsr_remap_pciregion(acb); if(!error){ goto pci_release_regs; @@ -965,9 +1015,10 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id) if(!error){ goto free_hbb_mu; } + arcmsr_free_io_queue(acb); error = arcmsr_alloc_ccb_pool(acb); if(error){ - goto free_hbb_mu; + goto unmap_pci_region; } error = scsi_add_host(host, &pdev->dev); if(error){ @@ -995,8 +1046,9 @@ scsi_host_remove: scsi_remove_host(host); free_ccb_pool: arcmsr_free_ccb_pool(acb); + goto unmap_pci_region; free_hbb_mu: - arcmsr_free_mu(acb); + arcmsr_free_io_queue(acb); unmap_pci_region: arcmsr_unmap_pciregion(acb); pci_release_regs: @@ -1042,7 +1094,6 @@ static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state) static int arcmsr_resume(struct pci_dev *pdev) { - int error; struct Scsi_Host *host = pci_get_drvdata(pdev); struct AdapterControlBlock *acb = (struct AdapterControlBlock *)host->hostdata; @@ -1054,24 +1105,30 @@ static int arcmsr_resume(struct pci_dev *pdev) pr_warn("%s: pci_enable_device error\n", __func__); return -ENODEV; } - error = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); - if (error) { - error = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); - if (error) { - pr_warn("scsi%d: No suitable DMA mask available\n", - host->host_no); - goto controller_unregister; - } - } + if (arcmsr_set_dma_mask(acb)) + goto controller_unregister; pci_set_master(pdev); if (arcmsr_request_irq(pdev, acb) == FAILED) goto controller_stop; - if (acb->adapter_type == ACB_ADAPTER_TYPE_E) { + switch (acb->adapter_type) { + case ACB_ADAPTER_TYPE_B: { + struct MessageUnit_B *reg = acb->pmuB; + uint32_t i; + for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) { + reg->post_qbuffer[i] = 0; + reg->done_qbuffer[i] = 0; + } + reg->postq_index = 0; + reg->doneq_index = 0; + break; + } + case ACB_ADAPTER_TYPE_E: writel(0, &acb->pmuE->host_int_status); writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell); acb->in_doorbell = 0; acb->out_doorbell = 0; acb->doneq_index = 0; + break; } arcmsr_iop_init(acb); arcmsr_init_get_devmap_timer(acb); @@ -1351,10 +1408,12 @@ static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct Comma static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb) { int i = 0; - uint32_t flag_ccb, ccb_cdb_phy; + uint32_t flag_ccb; struct ARCMSR_CDB *pARCMSR_CDB; bool error; struct CommandControlBlock *pCCB; + unsigned long ccb_cdb_phy, cdb_phy_hipart; + switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { @@ -1366,7 +1425,10 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb) writel(outbound_intstatus, ®->outbound_intstatus);/*clear interrupt*/ while(((flag_ccb = readl(®->outbound_queueport)) != 0xFFFFFFFF) && (i++ < acb->maxOutstanding)) { - pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/ + ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff; + if (acb->cdb_phyadd_hipart) + ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; + pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; arcmsr_drain_donequeue(acb, pCCB, error); @@ -1382,7 +1444,10 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb) flag_ccb = reg->done_qbuffer[i]; if (flag_ccb != 0) { reg->done_qbuffer[i] = 0; - pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/ + ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff; + if (acb->cdb_phyadd_hipart) + ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; + pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; arcmsr_drain_donequeue(acb, pCCB, error); @@ -1399,7 +1464,9 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb) /*need to do*/ flag_ccb = readl(®->outbound_queueport_low); ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0); - pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+ccb_cdb_phy);/*frame must be 32 bytes aligned*/ + if (acb->cdb_phyadd_hipart) + ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; + pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false; arcmsr_drain_donequeue(acb, pCCB, error); @@ -1427,9 +1494,13 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb) ((toggle ^ 0x4000) + 1); doneq_index = pmu->doneq_index; spin_unlock_irqrestore(&acb->doneq_lock, flags); + cdb_phy_hipart = pmu->done_qbuffer[doneq_index & + 0xFFF].addressHigh; addressLow = pmu->done_qbuffer[doneq_index & 0xFFF].addressLow; ccb_cdb_phy = (addressLow & 0xFFFFFFF0); + if (acb->cdb_phyadd_hipart) + ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; pARCMSR_CDB = (struct ARCMSR_CDB *) (acb->vir2phy_offset + ccb_cdb_phy); pCCB = container_of(pARCMSR_CDB, @@ -1506,7 +1577,6 @@ static void arcmsr_free_pcidev(struct AdapterControlBlock *acb) pdev = acb->pdev; arcmsr_free_irq(pdev, acb); arcmsr_free_ccb_pool(acb); - arcmsr_free_mu(acb); arcmsr_unmap_pciregion(acb); pci_release_regions(pdev); scsi_host_put(host); @@ -1564,7 +1634,6 @@ static void arcmsr_remove(struct pci_dev *pdev) } arcmsr_free_irq(pdev, acb); arcmsr_free_ccb_pool(acb); - arcmsr_free_mu(acb); arcmsr_unmap_pciregion(acb); pci_release_regions(pdev); scsi_host_put(host); @@ -1749,12 +1818,8 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size; ccb_post_stamp = (cdb_phyaddr | ((arc_cdb_size - 1) >> 6) | 1); - if (acb->cdb_phyaddr_hi32) { - writel(acb->cdb_phyaddr_hi32, &phbcmu->inbound_queueport_high); - writel(ccb_post_stamp, &phbcmu->inbound_queueport_low); - } else { - writel(ccb_post_stamp, &phbcmu->inbound_queueport_low); - } + writel(upper_32_bits(ccb->cdb_phyaddr), &phbcmu->inbound_queueport_high); + writel(ccb_post_stamp, &phbcmu->inbound_queueport_low); } break; case ACB_ADAPTER_TYPE_D: { @@ -1767,8 +1832,8 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr spin_lock_irqsave(&acb->postq_lock, flags); postq_index = pmu->postq_index; pinbound_srb = (struct InBound_SRB *)&(pmu->post_qbuffer[postq_index & 0xFF]); - pinbound_srb->addressHigh = dma_addr_hi32(cdb_phyaddr); - pinbound_srb->addressLow = dma_addr_lo32(cdb_phyaddr); + pinbound_srb->addressHigh = upper_32_bits(ccb->cdb_phyaddr); + pinbound_srb->addressLow = cdb_phyaddr; pinbound_srb->length = ccb->arc_cdb_size >> 2; arcmsr_cdb->msgContext = dma_addr_lo32(cdb_phyaddr); toggle = postq_index & 0x4000; @@ -2304,8 +2369,13 @@ static void arcmsr_hbaA_postqueue_isr(struct AdapterControlBlock *acb) struct ARCMSR_CDB *pARCMSR_CDB; struct CommandControlBlock *pCCB; bool error; + unsigned long cdb_phy_addr; + while ((flag_ccb = readl(®->outbound_queueport)) != 0xFFFFFFFF) { - pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/ + cdb_phy_addr = (flag_ccb << 5) & 0xffffffff; + if (acb->cdb_phyadd_hipart) + cdb_phy_addr = cdb_phy_addr | acb->cdb_phyadd_hipart; + pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + cdb_phy_addr); pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; arcmsr_drain_donequeue(acb, pCCB, error); @@ -2319,13 +2389,18 @@ static void arcmsr_hbaB_postqueue_isr(struct AdapterControlBlock *acb) struct ARCMSR_CDB *pARCMSR_CDB; struct CommandControlBlock *pCCB; bool error; + unsigned long cdb_phy_addr; + index = reg->doneq_index; while ((flag_ccb = reg->done_qbuffer[index]) != 0) { - reg->done_qbuffer[index] = 0; - pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/ + cdb_phy_addr = (flag_ccb << 5) & 0xffffffff; + if (acb->cdb_phyadd_hipart) + cdb_phy_addr = cdb_phy_addr | acb->cdb_phyadd_hipart; + pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + cdb_phy_addr); pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; arcmsr_drain_donequeue(acb, pCCB, error); + reg->done_qbuffer[index] = 0; index++; index %= ARCMSR_MAX_HBB_POSTQUEUE; reg->doneq_index = index; @@ -2337,7 +2412,8 @@ static void arcmsr_hbaC_postqueue_isr(struct AdapterControlBlock *acb) struct MessageUnit_C __iomem *phbcmu; struct ARCMSR_CDB *arcmsr_cdb; struct CommandControlBlock *ccb; - uint32_t flag_ccb, ccb_cdb_phy, throttling = 0; + uint32_t flag_ccb, throttling = 0; + unsigned long ccb_cdb_phy; int error; phbcmu = acb->pmuC; @@ -2347,6 +2423,8 @@ static void arcmsr_hbaC_postqueue_isr(struct AdapterControlBlock *acb) while ((flag_ccb = readl(&phbcmu->outbound_queueport_low)) != 0xFFFFFFFF) { ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0); + if (acb->cdb_phyadd_hipart) + ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); ccb = container_of(arcmsr_cdb, struct CommandControlBlock, @@ -2367,12 +2445,12 @@ static void arcmsr_hbaC_postqueue_isr(struct AdapterControlBlock *acb) static void arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock *acb) { u32 outbound_write_pointer, doneq_index, index_stripped, toggle; - uint32_t addressLow, ccb_cdb_phy; + uint32_t addressLow; int error; struct MessageUnit_D *pmu; struct ARCMSR_CDB *arcmsr_cdb; struct CommandControlBlock *ccb; - unsigned long flags; + unsigned long flags, ccb_cdb_phy, cdb_phy_hipart; spin_lock_irqsave(&acb->doneq_lock, flags); pmu = acb->pmuD; @@ -2386,9 +2464,13 @@ static void arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock *acb) pmu->doneq_index = index_stripped ? (index_stripped | toggle) : ((toggle ^ 0x4000) + 1); doneq_index = pmu->doneq_index; + cdb_phy_hipart = pmu->done_qbuffer[doneq_index & + 0xFFF].addressHigh; addressLow = pmu->done_qbuffer[doneq_index & 0xFFF].addressLow; ccb_cdb_phy = (addressLow & 0xFFFFFFF0); + if (acb->cdb_phyadd_hipart) + ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); ccb = container_of(arcmsr_cdb, @@ -3229,7 +3311,9 @@ static int arcmsr_hbaA_polling_ccbdone(struct AdapterControlBlock *acb, uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0; int rtn; bool error; - polling_hba_ccb_retry: + unsigned long ccb_cdb_phy; + +polling_hba_ccb_retry: poll_count++; outbound_intstatus = readl(®->outbound_intstatus) & acb->outbound_int_enable; writel(outbound_intstatus, ®->outbound_intstatus);/*clear interrupt*/ @@ -3247,7 +3331,10 @@ static int arcmsr_hbaA_polling_ccbdone(struct AdapterControlBlock *acb, goto polling_hba_ccb_retry; } } - arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5)); + ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff; + if (acb->cdb_phyadd_hipart) + ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; + arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb); poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0; if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) { @@ -3285,8 +3372,9 @@ static int arcmsr_hbaB_polling_ccbdone(struct AdapterControlBlock *acb, uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0; int index, rtn; bool error; - polling_hbb_ccb_retry: + unsigned long ccb_cdb_phy; +polling_hbb_ccb_retry: poll_count++; /* clear doorbell interrupt */ writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); @@ -3312,7 +3400,10 @@ static int arcmsr_hbaB_polling_ccbdone(struct AdapterControlBlock *acb, index %= ARCMSR_MAX_HBB_POSTQUEUE; reg->doneq_index = index; /* check if command done with no error*/ - arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5)); + ccb_cdb_phy = (flag_ccb << 5) & 0xffffffff; + if (acb->cdb_phyadd_hipart) + ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; + arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb); poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0; if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) { @@ -3345,12 +3436,14 @@ static int arcmsr_hbaC_polling_ccbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_ccb) { struct MessageUnit_C __iomem *reg = acb->pmuC; - uint32_t flag_ccb, ccb_cdb_phy; + uint32_t flag_ccb; struct ARCMSR_CDB *arcmsr_cdb; bool error; struct CommandControlBlock *pCCB; uint32_t poll_ccb_done = 0, poll_count = 0; int rtn; + unsigned long ccb_cdb_phy; + polling_hbc_ccb_retry: poll_count++; while (1) { @@ -3369,7 +3462,9 @@ polling_hbc_ccb_retry: } flag_ccb = readl(®->outbound_queueport_low); ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0); - arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);/*frame must be 32 bytes aligned*/ + if (acb->cdb_phyadd_hipart) + ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; + arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); pCCB = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb); poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0; /* check ifcommand done with no error*/ @@ -3403,9 +3498,9 @@ static int arcmsr_hbaD_polling_ccbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_ccb) { bool error; - uint32_t poll_ccb_done = 0, poll_count = 0, flag_ccb, ccb_cdb_phy; + uint32_t poll_ccb_done = 0, poll_count = 0, flag_ccb; int rtn, doneq_index, index_stripped, outbound_write_pointer, toggle; - unsigned long flags; + unsigned long flags, ccb_cdb_phy, cdb_phy_hipart; struct ARCMSR_CDB *arcmsr_cdb; struct CommandControlBlock *pCCB; struct MessageUnit_D *pmu = acb->pmuD; @@ -3437,8 +3532,12 @@ polling_hbaD_ccb_retry: ((toggle ^ 0x4000) + 1); doneq_index = pmu->doneq_index; spin_unlock_irqrestore(&acb->doneq_lock, flags); + cdb_phy_hipart = pmu->done_qbuffer[doneq_index & + 0xFFF].addressHigh; flag_ccb = pmu->done_qbuffer[doneq_index & 0xFFF].addressLow; ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0); + if (acb->cdb_phyadd_hipart) + ccb_cdb_phy = ccb_cdb_phy | acb->cdb_phyadd_hipart; arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); pCCB = container_of(arcmsr_cdb, struct CommandControlBlock, @@ -3680,6 +3779,7 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) cdb_phyaddr = lower_32_bits(dma_coherent_handle); cdb_phyaddr_hi32 = upper_32_bits(dma_coherent_handle); acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32; + acb->cdb_phyadd_hipart = ((uint64_t)cdb_phyaddr_hi32) << 32; /* *********************************************************************** ** if adapter type B, set window of "post command Q" @@ -3744,7 +3844,6 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) } break; case ACB_ADAPTER_TYPE_C: { - if (cdb_phyaddr_hi32 != 0) { struct MessageUnit_C __iomem *reg = acb->pmuC; printk(KERN_NOTICE "arcmsr%d: cdb_phyaddr_hi32=0x%x\n", @@ -3759,7 +3858,6 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) return 1; } } - } break; case ACB_ADAPTER_TYPE_D: { uint32_t __iomem *rwbuffer; @@ -3793,7 +3891,7 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) cdb_phyaddr_hi32 = (uint32_t)((dma_coherent_handle >> 16) >> 16); writel(cdb_phyaddr, ®->msgcode_rwbuffer[5]); writel(cdb_phyaddr_hi32, ®->msgcode_rwbuffer[6]); - writel(acb->roundup_ccbsize, ®->msgcode_rwbuffer[7]); + writel(acb->ioqueue_size, ®->msgcode_rwbuffer[7]); writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, ®->inbound_msgaddr0); acb->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE; writel(acb->out_doorbell, ®->iobound_doorbell); diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c index a503dc50c4f8..e809493d0d06 100644 --- a/drivers/scsi/atari_scsi.c +++ b/drivers/scsi/atari_scsi.c @@ -757,15 +757,17 @@ static int __init atari_scsi_probe(struct platform_device *pdev) if (setup_hostid >= 0) { atari_scsi_template.this_id = setup_hostid & 7; - } else { + } else if (IS_REACHABLE(CONFIG_NVRAM)) { /* Test if a host id is set in the NVRam */ - if (ATARIHW_PRESENT(TT_CLK) && nvram_check_checksum()) { - unsigned char b = nvram_read_byte(16); + if (ATARIHW_PRESENT(TT_CLK)) { + unsigned char b; + loff_t offset = 16; + ssize_t count = nvram_read(&b, 1, &offset); /* Arbitration enabled? (for TOS) * If yes, use configured host ID */ - if (b & 0x80) + if ((count == 1) && (b & 0x80)) atari_scsi_template.this_id = b & 7; } } diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 74e260027c7d..76e49d902609 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -3566,7 +3566,7 @@ static void be2iscsi_enable_msix(struct beiscsi_hba *phba) /* if eqid_count == 1 fall back to INTX */ if (enable_msix && nvec > 1) { - const struct irq_affinity desc = { .post_vectors = 1 }; + struct irq_affinity desc = { .post_vectors = 1 }; if (pci_alloc_irq_vectors_affinity(phba->pcidev, 2, nvec, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc) < 0) { diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c index b4f2c1d8742e..646f09f66443 100644 --- a/drivers/scsi/bfa/bfa_fcs_lport.c +++ b/drivers/scsi/bfa/bfa_fcs_lport.c @@ -6430,9 +6430,7 @@ bfa_fcs_vport_sm_logo_for_stop(struct bfa_fcs_vport_s *vport, switch (event) { case BFA_FCS_VPORT_SM_OFFLINE: bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE); - /* - * !!! fall through !!! - */ + /* fall through */ case BFA_FCS_VPORT_SM_RSP_OK: case BFA_FCS_VPORT_SM_RSP_ERROR: @@ -6458,9 +6456,7 @@ bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport, switch (event) { case BFA_FCS_VPORT_SM_OFFLINE: bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE); - /* - * !!! fall through !!! - */ + /* fall through */ case BFA_FCS_VPORT_SM_RSP_OK: case BFA_FCS_VPORT_SM_RSP_ERROR: diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c index de50349a39ce..1e400f2aaece 100644 --- a/drivers/scsi/bfa/bfa_fcs_rport.c +++ b/drivers/scsi/bfa/bfa_fcs_rport.c @@ -427,17 +427,13 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event) case RPSM_EVENT_LOGO_RCVD: bfa_fcs_rport_send_logo_acc(rport); - /* - * !! fall through !! - */ + /* fall through */ case RPSM_EVENT_PRLO_RCVD: if (rport->prlo == BFA_TRUE) bfa_fcs_rport_send_prlo_acc(rport); bfa_fcxp_discard(rport->fcxp); - /* - * !! fall through !! - */ + /* fall through */ case RPSM_EVENT_FAILED: if (rport->plogi_retries < BFA_FCS_RPORT_MAX_RETRIES) { rport->plogi_retries++; @@ -868,9 +864,7 @@ bfa_fcs_rport_sm_adisc_online(struct bfa_fcs_rport_s *rport, * At least go offline when a PLOGI is received. */ bfa_fcxp_discard(rport->fcxp); - /* - * !!! fall through !!! - */ + /* fall through */ case RPSM_EVENT_FAILED: case RPSM_EVENT_ADDRESS_CHANGE: @@ -1056,6 +1050,7 @@ bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport, case RPSM_EVENT_LOGO_RCVD: bfa_fcs_rport_send_logo_acc(rport); + /* fall through */ case RPSM_EVENT_PRLO_RCVD: if (rport->prlo == BFA_TRUE) bfa_fcs_rport_send_prlo_acc(rport); @@ -1144,9 +1139,7 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport, bfa_fcs_rport_send_plogiacc(rport, NULL); break; } - /* - * !! fall through !! - */ + /* fall through */ case RPSM_EVENT_ADDRESS_CHANGE: if (!bfa_fcs_lport_is_online(rport->port)) { @@ -1303,6 +1296,7 @@ bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport, case RPSM_EVENT_LOGO_RCVD: bfa_fcs_rport_send_logo_acc(rport); + /* fall through */ case RPSM_EVENT_PRLO_RCVD: if (rport->prlo == BFA_TRUE) bfa_fcs_rport_send_prlo_acc(rport); @@ -1346,6 +1340,7 @@ bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport, case RPSM_EVENT_LOGO_RCVD: bfa_fcs_rport_send_logo_acc(rport); + /* fall through */ case RPSM_EVENT_PRLO_RCVD: if (rport->prlo == BFA_TRUE) bfa_fcs_rport_send_prlo_acc(rport); diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c index 9631877aba4f..79a55c3615be 100644 --- a/drivers/scsi/bfa/bfa_ioc.c +++ b/drivers/scsi/bfa/bfa_ioc.c @@ -978,9 +978,7 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event) case IOCPF_E_INITFAIL: bfa_iocpf_timer_stop(ioc); - /* - * !!! fall through !!! - */ + /* fall through */ case IOCPF_E_TIMEOUT: writel(1, ioc->ioc_regs.ioc_sem_reg); @@ -1056,9 +1054,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event) case IOCPF_E_FAIL: bfa_iocpf_timer_stop(ioc); - /* - * !!! fall through !!! - */ + /* fall through */ case IOCPF_E_TIMEOUT: bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); @@ -6007,6 +6003,7 @@ bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf, case BFA_DCONF_SM_IOCDISABLE: case BFA_DCONF_SM_FLASH_COMP: bfa_timer_stop(&dconf->timer); + /* fall through */ case BFA_DCONF_SM_TIMEOUT: bfa_sm_set_state(dconf, bfa_dconf_sm_uninit); bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE); diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index 42a0caf6740d..88880a66a189 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c @@ -727,7 +727,7 @@ bfad_init_timer(struct bfad_s *bfad) int bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad) { - int rc = -ENODEV; + int rc = -ENODEV; if (pci_enable_device(pdev)) { printk(KERN_ERR "pci_enable_device fail %p\n", pdev); @@ -739,8 +739,12 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad) pci_set_master(pdev); - if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) || - dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (rc) + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + + if (rc) { + rc = -ENODEV; printk(KERN_ERR "dma_set_mask_and_coherent fail %p\n", pdev); goto out_release_region; } @@ -1534,6 +1538,7 @@ bfad_pci_slot_reset(struct pci_dev *pdev) { struct bfad_s *bfad = pci_get_drvdata(pdev); u8 byte; + int rc; dev_printk(KERN_ERR, &pdev->dev, "bfad_pci_slot_reset flags: 0x%x\n", bfad->bfad_flags); @@ -1561,8 +1566,11 @@ bfad_pci_slot_reset(struct pci_dev *pdev) pci_save_state(pdev); pci_set_master(pdev); - if (dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(64)) || - dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(32))) + rc = dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(64)); + if (rc) + rc = dma_set_mask_and_coherent(&bfad->pcidev->dev, + DMA_BIT_MASK(32)); + if (rc) goto out_disable_device; if (restart_bfa(bfad) == -1) diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c index 349cfe7d055e..bfcd87c0dc74 100644 --- a/drivers/scsi/bfa/bfad_debugfs.c +++ b/drivers/scsi/bfa/bfad_debugfs.c @@ -460,11 +460,6 @@ bfad_debugfs_init(struct bfad_port_s *port) if (!bfa_debugfs_root) { bfa_debugfs_root = debugfs_create_dir("bfa", NULL); atomic_set(&bfa_debugfs_port_count, 0); - if (!bfa_debugfs_root) { - printk(KERN_WARNING - "BFA debugfs root dir creation failed\n"); - goto err; - } } /* Setup the pci_dev debugfs directory for the port */ @@ -472,12 +467,6 @@ bfad_debugfs_init(struct bfad_port_s *port) if (!port->port_debugfs_root) { port->port_debugfs_root = debugfs_create_dir(name, bfa_debugfs_root); - if (!port->port_debugfs_root) { - printk(KERN_WARNING - "bfa %s: debugfs root creation failed\n", - bfad->pci_name); - goto err; - } atomic_inc(&bfa_debugfs_port_count); @@ -489,16 +478,9 @@ bfad_debugfs_init(struct bfad_port_s *port) port->port_debugfs_root, port, file->fops); - if (!bfad->bfad_dentry_files[i]) { - printk(KERN_WARNING - "bfa %s: debugfs %s creation failed\n", - bfad->pci_name, file->name); - goto err; - } } } -err: return; } diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 2e4e7159ebf9..a75e74ad1698 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -1438,7 +1438,7 @@ bind_err: static struct bnx2fc_interface * bnx2fc_interface_create(struct bnx2fc_hba *hba, struct net_device *netdev, - enum fip_state fip_mode) + enum fip_mode fip_mode) { struct fcoe_ctlr_device *ctlr_dev; struct bnx2fc_interface *interface; diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 350257c13a5b..bc9f2a2365f4 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -240,6 +240,7 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba) return NULL; } + cmgr->hba = hba; cmgr->free_list = kcalloc(arr_sz, sizeof(*cmgr->free_list), GFP_KERNEL); if (!cmgr->free_list) { @@ -256,7 +257,6 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba) goto mem_err; } - cmgr->hba = hba; cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1); for (i = 0; i < arr_sz; i++) { @@ -295,7 +295,7 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba) /* Allocate pool of io_bdts - one for each bnx2fc_cmd */ mem_size = num_ios * sizeof(struct io_bdt *); - cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL); + cmgr->io_bdt_pool = kzalloc(mem_size, GFP_KERNEL); if (!cmgr->io_bdt_pool) { printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n"); goto mem_err; diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index 69c75426c5eb..c5fa5f3b00e9 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -577,7 +577,7 @@ static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba) hba->dummy_buffer, hba->dummy_buf_dma); hba->dummy_buffer = NULL; } - return; + return; } /** diff --git a/drivers/scsi/csiostor/csio_attr.c b/drivers/scsi/csiostor/csio_attr.c index 9bd2bd8dc2be..200e50089711 100644 --- a/drivers/scsi/csiostor/csio_attr.c +++ b/drivers/scsi/csiostor/csio_attr.c @@ -497,7 +497,6 @@ out: static int csio_fcoe_free_vnp(struct csio_hw *hw, struct csio_lnode *ln) { - struct csio_lnode *pln; struct csio_mb *mbp; struct fw_fcoe_vnp_cmd *rsp; int ret = 0; @@ -514,8 +513,6 @@ csio_fcoe_free_vnp(struct csio_hw *hw, struct csio_lnode *ln) goto out; } - pln = ln->pln; - csio_fcoe_vnp_free_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO, ln->fcf_flowid, ln->vnp_flowid, NULL); diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c index cf629380a981..a6dd704d7f2d 100644 --- a/drivers/scsi/csiostor/csio_init.c +++ b/drivers/scsi/csiostor/csio_init.c @@ -167,14 +167,10 @@ csio_dfs_destroy(struct csio_hw *hw) * csio_dfs_init - Debug filesystem initialization for the module. * */ -static int +static void csio_dfs_init(void) { csio_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); - if (!csio_debugfs_root) - pr_warn("Could not create debugfs entry, continuing\n"); - - return 0; } /* @@ -210,8 +206,11 @@ csio_pci_init(struct pci_dev *pdev, int *bars) pci_set_master(pdev); pci_try_set_mwi(pdev); - if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) || - dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { + rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (rv) + rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (rv) { + rv = -ENODEV; dev_err(&pdev->dev, "No suitable DMA available.\n"); goto err_release_regions; } diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c index bc5547a62c00..462560b2855e 100644 --- a/drivers/scsi/csiostor/csio_scsi.c +++ b/drivers/scsi/csiostor/csio_scsi.c @@ -1984,15 +1984,15 @@ inval_scmnd: /* FW successfully aborted the request */ if (host_byte(cmnd->result) == DID_REQUEUE) { csio_info(hw, - "Aborted SCSI command to (%d:%llu) serial#:0x%lx\n", + "Aborted SCSI command to (%d:%llu) tag %u\n", cmnd->device->id, cmnd->device->lun, - cmnd->serial_number); + cmnd->request->tag); return SUCCESS; } else { csio_info(hw, - "Failed to abort SCSI command, (%d:%llu) serial#:0x%lx\n", + "Failed to abort SCSI command, (%d:%llu) tag %u\n", cmnd->device->id, cmnd->device->lun, - cmnd->serial_number); + cmnd->request->tag); return FAILED; } } diff --git a/drivers/scsi/cxgbi/Makefile b/drivers/scsi/cxgbi/Makefile index a73781ac1800..f78c9cc460a2 100644 --- a/drivers/scsi/cxgbi/Makefile +++ b/drivers/scsi/cxgbi/Makefile @@ -1,4 +1,4 @@ -ccflags-y += -Idrivers/net/ethernet/chelsio/libcxgb +ccflags-y += -I $(srctree)/drivers/net/ethernet/chelsio/libcxgb obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libcxgbi.o cxgb3i/ obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libcxgbi.o cxgb4i/ diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index d26f50af00ea..d44914e5e415 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -1210,7 +1210,8 @@ static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb) csk->skb_ulp_lhdr = skb; cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR); - if (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt) { + if ((CHELSIO_CHIP_VERSION(lldi->adapter_type) <= CHELSIO_T5) && + (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt)) { pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n", csk->tid, cxgbi_skcb_tcp_seq(skb), csk->rcv_nxt); @@ -2134,8 +2135,7 @@ static void *t4_uld_add(const struct cxgb4_lld_info *lldi) cdev->itp = &cxgb4i_iscsi_transport; cdev->owner = THIS_MODULE; - cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0])) - << FW_VIID_PFN_S; + cdev->pfvf = FW_PFVF_CMD_PFN_V(lldi->pf); pr_info("cdev 0x%p,%s, pfvf %u.\n", cdev, lldi->ports[0]->name, cdev->pfvf); diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index 245742557c03..006372b3fba2 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -1212,7 +1212,7 @@ scmd_get_params(struct scsi_cmnd *sc, struct scatterlist **sgl, unsigned int *sgcnt, unsigned int *dlen, unsigned int prot) { - struct scsi_data_buffer *sdb = prot ? scsi_prot(sc) : scsi_out(sc); + struct scsi_data_buffer *sdb = prot ? scsi_prot(sc) : &sc->sdb; *sgl = sdb->table.sgl; *sgcnt = sdb->table.nents; @@ -1428,8 +1428,7 @@ static void task_release_itt(struct iscsi_task *task, itt_t hdr_itt) log_debug(1 << CXGBI_DBG_DDP, "cdev 0x%p, task 0x%p, release tag 0x%x.\n", cdev, task, tag); - if (sc && - (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) && + if (sc && sc->sc_data_direction == DMA_FROM_DEVICE && cxgbi_ppm_is_ddp_tag(ppm, tag)) { struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo; @@ -1461,9 +1460,7 @@ static int task_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt) u32 tag = 0; int err = -EINVAL; - if (sc && - (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) - ) { + if (sc && sc->sc_data_direction == DMA_FROM_DEVICE) { struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task); struct cxgbi_task_tag_info *ttinfo = &tdata->ttinfo; @@ -1897,7 +1894,7 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode) if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) && (opcode == ISCSI_OP_SCSI_DATA_OUT || (opcode == ISCSI_OP_SCSI_CMD && - (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_TO_DEVICE)))) + sc->sc_data_direction == DMA_TO_DEVICE))) /* data could goes into skb head */ headroom += min_t(unsigned int, SKB_MAX_HEAD(cdev->skb_tx_rsvd), @@ -1972,7 +1969,7 @@ int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset, return 0; if (task->sc) { - struct scsi_data_buffer *sdb = scsi_out(task->sc); + struct scsi_data_buffer *sdb = &task->sc->sdb; struct scatterlist *sg = NULL; int err; diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h index 8908a20065c8..4d90106fcb37 100644 --- a/drivers/scsi/cxlflash/common.h +++ b/drivers/scsi/cxlflash/common.h @@ -334,7 +334,8 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t c, res_hndl_t r, u8 mode); void cxlflash_list_init(void); void cxlflash_term_global_luns(void); void cxlflash_free_errpage(void); -int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg); +int cxlflash_ioctl(struct scsi_device *sdev, unsigned int cmd, + void __user *arg); void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg); int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg); void cxlflash_term_local_luns(struct cxlflash_cfg *cfg); diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index bfa13e3b191c..7096810fd222 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -3282,7 +3282,7 @@ static int cxlflash_chr_open(struct inode *inode, struct file *file) * * Return: A string identifying the decoded host ioctl. */ -static char *decode_hioctl(int cmd) +static char *decode_hioctl(unsigned int cmd) { switch (cmd) { case HT_CXLFLASH_LUN_PROVISION: @@ -3687,6 +3687,7 @@ static int cxlflash_probe(struct pci_dev *pdev, host->max_cmd_len = CXLFLASH_MAX_CDB_LEN; cfg = shost_priv(host); + cfg->state = STATE_PROBING; cfg->host = host; rc = alloc_mem(cfg); if (rc) { @@ -3775,6 +3776,7 @@ out: return rc; out_remove: + cfg->state = STATE_PROBED; cxlflash_remove(pdev); goto out; } diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c index acac6152f50b..1a94a469051e 100644 --- a/drivers/scsi/cxlflash/superpipe.c +++ b/drivers/scsi/cxlflash/superpipe.c @@ -1924,7 +1924,7 @@ out: * * Return: A string identifying the decoded ioctl. */ -static char *decode_ioctl(int cmd) +static char *decode_ioctl(unsigned int cmd) { switch (cmd) { case DK_CXLFLASH_ATTACH: @@ -2051,7 +2051,7 @@ err1: * * Return: 0 on success, -errno on failure */ -static int ioctl_common(struct scsi_device *sdev, int cmd) +static int ioctl_common(struct scsi_device *sdev, unsigned int cmd) { struct cxlflash_cfg *cfg = shost_priv(sdev->host); struct device *dev = &cfg->dev->dev; @@ -2096,7 +2096,7 @@ out: * * Return: 0 on success, -errno on failure */ -int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) +int cxlflash_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg) { typedef int (*sioctl) (struct scsi_device *, void *); @@ -2179,8 +2179,7 @@ int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) } if (unlikely(copy_from_user(&buf, arg, size))) { - dev_err(dev, "%s: copy_from_user() fail " - "size=%lu cmd=%d (%s) arg=%p\n", + dev_err(dev, "%s: copy_from_user() fail size=%lu cmd=%u (%s) arg=%p\n", __func__, size, cmd, decode_ioctl(cmd), arg); rc = -EFAULT; goto cxlflash_ioctl_exit; @@ -2203,8 +2202,7 @@ int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) rc = do_ioctl(sdev, (void *)&buf); if (likely(!rc)) if (unlikely(copy_to_user(arg, &buf, size))) { - dev_err(dev, "%s: copy_to_user() fail " - "size=%lu cmd=%d (%s) arg=%p\n", + dev_err(dev, "%s: copy_to_user() fail size=%lu cmd=%u (%s) arg=%p\n", __func__, size, cmd, decode_ioctl(cmd), arg); rc = -EFAULT; } diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c index 70d1a18278af..abdc34affdf6 100644 --- a/drivers/scsi/dpt_i2o.c +++ b/drivers/scsi/dpt_i2o.c @@ -588,46 +588,6 @@ static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host) return 0; } -/* - * Turn a struct scsi_cmnd * into a unique 32 bit 'context'. - */ -static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd) -{ - return (u32)cmd->serial_number; -} - -/* - * Go from a u32 'context' to a struct scsi_cmnd * . - * This could probably be made more efficient. - */ -static struct scsi_cmnd * - adpt_cmd_from_context(adpt_hba * pHba, u32 context) -{ - struct scsi_cmnd * cmd; - struct scsi_device * d; - - if (context == 0) - return NULL; - - spin_unlock(pHba->host->host_lock); - shost_for_each_device(d, pHba->host) { - unsigned long flags; - spin_lock_irqsave(&d->list_lock, flags); - list_for_each_entry(cmd, &d->cmd_list, list) { - if (((u32)cmd->serial_number == context)) { - spin_unlock_irqrestore(&d->list_lock, flags); - scsi_device_put(d); - spin_lock(pHba->host->host_lock); - return cmd; - } - } - spin_unlock_irqrestore(&d->list_lock, flags); - } - spin_lock(pHba->host->host_lock); - - return NULL; -} - /* * Turn a pointer to ioctl reply data into an u32 'context' */ @@ -685,9 +645,6 @@ static int adpt_abort(struct scsi_cmnd * cmd) u32 msg[5]; int rcode; - if(cmd->serial_number == 0){ - return FAILED; - } pHba = (adpt_hba*) cmd->device->host->hostdata[0]; printk(KERN_INFO"%s: Trying to Abort\n",pHba->name); if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) { @@ -699,8 +656,9 @@ static int adpt_abort(struct scsi_cmnd * cmd) msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0; msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid; msg[2] = 0; - msg[3]= 0; - msg[4] = adpt_cmd_to_context(cmd); + msg[3]= 0; + /* Add 1 to avoid firmware treating it as invalid command */ + msg[4] = cmd->request->tag + 1; if (pHba->host) spin_lock_irq(pHba->host->host_lock); rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER); @@ -2198,20 +2156,27 @@ static irqreturn_t adpt_isr(int irq, void *dev_id) status = I2O_POST_WAIT_OK; } if(!(context & 0x40000000)) { - cmd = adpt_cmd_from_context(pHba, - readl(reply+12)); + /* + * The request tag is one less than the command tag + * as the firmware might treat a 0 tag as invalid + */ + cmd = scsi_host_find_tag(pHba->host, + readl(reply + 12) - 1); if(cmd != NULL) { printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context); } } adpt_i2o_post_wait_complete(context, status); } else { // SCSI message - cmd = adpt_cmd_from_context (pHba, readl(reply+12)); + /* + * The request tag is one less than the command tag + * as the firmware might treat a 0 tag as invalid + */ + cmd = scsi_host_find_tag(pHba->host, + readl(reply + 12) - 1); if(cmd != NULL){ scsi_dma_unmap(cmd); - if(cmd->serial_number != 0) { // If not timedout - adpt_i2o_to_scsi(reply, cmd); - } + adpt_i2o_to_scsi(reply, cmd); } } writel(m, pHba->reply_port); @@ -2277,7 +2242,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d // I2O_CMD_SCSI_EXEC msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid); msg[2] = 0; - msg[3] = adpt_cmd_to_context(cmd); /* Want SCSI control block back */ + /* Add 1 to avoid firmware treating it as invalid command */ + msg[3] = cmd->request->tag + 1; // Our cards use the transaction context as the tag for queueing // Adaptec/DPT Private stuff msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16); @@ -2693,9 +2659,6 @@ static void adpt_fail_posted_scbs(adpt_hba* pHba) unsigned long flags; spin_lock_irqsave(&d->list_lock, flags); list_for_each_entry(cmd, &d->cmd_list, list) { - if(cmd->serial_number == 0){ - continue; - } cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1); cmd->scsi_done(cmd); } diff --git a/drivers/scsi/esas2r/esas2r.h b/drivers/scsi/esas2r/esas2r.h index 858c3b33db78..7f43b95f4e94 100644 --- a/drivers/scsi/esas2r/esas2r.h +++ b/drivers/scsi/esas2r/esas2r.h @@ -965,8 +965,8 @@ struct esas2r_adapter { const char *esas2r_info(struct Scsi_Host *); int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq, struct esas2r_sas_nvram *data); -int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg); -int esas2r_ioctl(struct scsi_device *dev, int cmd, void __user *arg); +int esas2r_ioctl_handler(void *hostdata, unsigned int cmd, void __user *arg); +int esas2r_ioctl(struct scsi_device *dev, unsigned int cmd, void __user *arg); u8 handle_hba_ioctl(struct esas2r_adapter *a, struct atto_ioctl *ioctl_hba); int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd); diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c index 46b2c83ba21f..950cd92df2ff 100644 --- a/drivers/scsi/esas2r/esas2r_init.c +++ b/drivers/scsi/esas2r/esas2r_init.c @@ -1241,6 +1241,7 @@ static bool esas2r_format_init_msg(struct esas2r_adapter *a, a->init_msg = ESAS2R_INIT_MSG_GET_INIT; break; } + /* fall through */ case ESAS2R_INIT_MSG_GET_INIT: if (msg == ESAS2R_INIT_MSG_GET_INIT) { @@ -1254,7 +1255,7 @@ static bool esas2r_format_init_msg(struct esas2r_adapter *a, esas2r_hdebug("FAILED"); } } - /* fall through */ + /* fall through */ default: rq->req_stat = RS_SUCCESS; diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c index 34bcc8c04ff4..3d130523c288 100644 --- a/drivers/scsi/esas2r/esas2r_ioctl.c +++ b/drivers/scsi/esas2r/esas2r_ioctl.c @@ -1274,7 +1274,7 @@ int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq, /* This function only cares about ATTO-specific ioctls (atto_express_ioctl) */ -int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg) +int esas2r_ioctl_handler(void *hostdata, unsigned int cmd, void __user *arg) { struct atto_express_ioctl *ioctl = NULL; struct esas2r_adapter *a; @@ -1292,9 +1292,8 @@ int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg) ioctl = memdup_user(arg, sizeof(struct atto_express_ioctl)); if (IS_ERR(ioctl)) { esas2r_log(ESAS2R_LOG_WARN, - "ioctl_handler access_ok failed for cmd %d, " - "address %p", cmd, - arg); + "ioctl_handler access_ok failed for cmd %u, address %p", + cmd, arg); return PTR_ERR(ioctl); } @@ -1493,7 +1492,7 @@ int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg) ioctl_done: if (err < 0) { - esas2r_log(ESAS2R_LOG_WARN, "err %d on ioctl cmd %d", err, + esas2r_log(ESAS2R_LOG_WARN, "err %d on ioctl cmd %u", err, cmd); switch (err) { @@ -1518,9 +1517,8 @@ ioctl_done: err = __copy_to_user(arg, ioctl, sizeof(struct atto_express_ioctl)); if (err != 0) { esas2r_log(ESAS2R_LOG_WARN, - "ioctl_handler copy_to_user didn't copy " - "everything (err %d, cmd %d)", err, - cmd); + "ioctl_handler copy_to_user didn't copy everything (err %d, cmd %u)", + err, cmd); kfree(ioctl); return -EFAULT; @@ -1531,7 +1529,7 @@ ioctl_done: return 0; } -int esas2r_ioctl(struct scsi_device *sd, int cmd, void __user *arg) +int esas2r_ioctl(struct scsi_device *sd, unsigned int cmd, void __user *arg) { return esas2r_ioctl_handler(sd->host->hostdata, cmd, arg); } diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c index 64397d441bae..fdbda5c05aa0 100644 --- a/drivers/scsi/esas2r/esas2r_main.c +++ b/drivers/scsi/esas2r/esas2r_main.c @@ -623,7 +623,7 @@ static int esas2r_proc_major; long esas2r_proc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) { return esas2r_ioctl_handler(esas2r_proc_host->hostdata, - (int)cmd, (void __user *)arg); + cmd, (void __user *)arg); } static void __exit esas2r_exit(void) diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index cd19be3f3405..8ba8862d3292 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -389,7 +389,7 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe, * Returns: pointer to a struct fcoe_interface or NULL on error */ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev, - enum fip_state fip_mode) + enum fip_mode fip_mode) { struct fcoe_ctlr_device *ctlr_dev; struct fcoe_ctlr *ctlr; diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index 54da3166da8d..7dc4ffa24430 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -147,7 +147,7 @@ static void fcoe_ctlr_map_dest(struct fcoe_ctlr *fip) * fcoe_ctlr_init() - Initialize the FCoE Controller instance * @fip: The FCoE controller to initialize */ -void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode) +void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_mode mode) { fcoe_ctlr_set_state(fip, FIP_ST_LINK_WAIT); fip->mode = mode; @@ -454,7 +454,10 @@ void fcoe_ctlr_link_up(struct fcoe_ctlr *fip) mutex_unlock(&fip->ctlr_mutex); fc_linkup(fip->lp); } else if (fip->state == FIP_ST_LINK_WAIT) { - fcoe_ctlr_set_state(fip, fip->mode); + if (fip->mode == FIP_MODE_NON_FIP) + fcoe_ctlr_set_state(fip, FIP_ST_NON_FIP); + else + fcoe_ctlr_set_state(fip, FIP_ST_AUTO); switch (fip->mode) { default: LIBFCOE_FIP_DBG(fip, "invalid mode %d\n", fip->mode); diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c index 5c8310bade61..c3dcbdc3aa64 100644 --- a/drivers/scsi/fcoe/fcoe_sysfs.c +++ b/drivers/scsi/fcoe/fcoe_sysfs.c @@ -671,8 +671,19 @@ static const struct device_type fcoe_fcf_device_type = { .release = fcoe_fcf_device_release, }; -static BUS_ATTR(ctlr_create, S_IWUSR, NULL, fcoe_ctlr_create_store); -static BUS_ATTR(ctlr_destroy, S_IWUSR, NULL, fcoe_ctlr_destroy_store); +static ssize_t ctlr_create_store(struct bus_type *bus, const char *buf, + size_t count) +{ + return fcoe_ctlr_create_store(bus, buf, count); +} +static BUS_ATTR_WO(ctlr_create); + +static ssize_t ctlr_destroy_store(struct bus_type *bus, const char *buf, + size_t count) +{ + return fcoe_ctlr_destroy_store(bus, buf, count); +} +static BUS_ATTR_WO(ctlr_destroy); static struct attribute *fcoe_bus_attrs[] = { &bus_attr_ctlr_create.attr, diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c index f4909cd206d3..29fe3426f9f2 100644 --- a/drivers/scsi/fcoe/fcoe_transport.c +++ b/drivers/scsi/fcoe/fcoe_transport.c @@ -855,7 +855,6 @@ out_nodev: mutex_unlock(&ft_mutex); return rc; } -EXPORT_SYMBOL(fcoe_ctlr_destroy_store); /** * fcoe_transport_create() - Create a fcoe interface @@ -873,7 +872,7 @@ static int fcoe_transport_create(const char *buffer, int rc = -ENODEV; struct net_device *netdev = NULL; struct fcoe_transport *ft = NULL; - enum fip_state fip_mode = (enum fip_state)(long)kp->arg; + enum fip_mode fip_mode = (enum fip_mode)kp->arg; mutex_lock(&ft_mutex); diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index d094ba59ed15..477513dc23b7 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h @@ -39,7 +39,7 @@ #define DRV_NAME "fnic" #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" -#define DRV_VERSION "1.6.0.34" +#define DRV_VERSION "1.6.0.47" #define PFX DRV_NAME ": " #define DFX DRV_NAME "%d: " @@ -49,7 +49,7 @@ #define FNIC_MAX_IO_REQ 1024 /* scsi_cmnd tag map entries */ #define FNIC_DFLT_IO_REQ 256 /* Default scsi_cmnd tag map entries */ #define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */ -#define FNIC_DFLT_QUEUE_DEPTH 32 +#define FNIC_DFLT_QUEUE_DEPTH 256 #define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */ /* @@ -128,6 +128,7 @@ __fnic_set_state_flags(fnicp, st_flags, 1) extern unsigned int fnic_log_level; +extern unsigned int io_completions; #define FNIC_MAIN_LOGGING 0x01 #define FNIC_FCS_LOGGING 0x02 @@ -196,6 +197,7 @@ enum fnic_state { #define FNIC_WQ_MAX 1 #define FNIC_RQ_MAX 1 #define FNIC_CQ_MAX (FNIC_WQ_COPY_MAX + FNIC_WQ_MAX + FNIC_RQ_MAX) +#define FNIC_DFLT_IO_COMPLETIONS 256 struct mempool; diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c index 139fffa3658a..21991c99db7c 100644 --- a/drivers/scsi/fnic/fnic_debugfs.c +++ b/drivers/scsi/fnic/fnic_debugfs.c @@ -54,23 +54,9 @@ int fnic_debugfs_init(void) { int rc = -1; fnic_trace_debugfs_root = debugfs_create_dir("fnic", NULL); - if (!fnic_trace_debugfs_root) { - printk(KERN_DEBUG "Cannot create debugfs root\n"); - return rc; - } - - if (!fnic_trace_debugfs_root) { - printk(KERN_DEBUG - "fnic root directory doesn't exist in debugfs\n"); - return rc; - } fnic_stats_debugfs_root = debugfs_create_dir("statistics", fnic_trace_debugfs_root); - if (!fnic_stats_debugfs_root) { - printk(KERN_DEBUG "Cannot create Statistics directory\n"); - return rc; - } /* Allocate memory to structure */ fc_trc_flag = (struct fc_trace_flag_type *) @@ -356,39 +342,19 @@ static const struct file_operations fnic_trace_debugfs_fops = { * it will also create file trace_enable to control enable/disable of * trace logging into trace buffer. */ -int fnic_trace_debugfs_init(void) +void fnic_trace_debugfs_init(void) { - int rc = -1; - if (!fnic_trace_debugfs_root) { - printk(KERN_DEBUG - "FNIC Debugfs root directory doesn't exist\n"); - return rc; - } fnic_trace_enable = debugfs_create_file("tracing_enable", S_IFREG|S_IRUGO|S_IWUSR, fnic_trace_debugfs_root, &(fc_trc_flag->fnic_trace), &fnic_trace_ctrl_fops); - if (!fnic_trace_enable) { - printk(KERN_DEBUG - "Cannot create trace_enable file under debugfs\n"); - return rc; - } - fnic_trace_debugfs_file = debugfs_create_file("trace", S_IFREG|S_IRUGO|S_IWUSR, fnic_trace_debugfs_root, &(fc_trc_flag->fnic_trace), &fnic_trace_debugfs_fops); - - if (!fnic_trace_debugfs_file) { - printk(KERN_DEBUG - "Cannot create trace file under debugfs\n"); - return rc; - } - rc = 0; - return rc; } /* @@ -419,37 +385,20 @@ void fnic_trace_debugfs_terminate(void) * trace logging into trace buffer. */ -int fnic_fc_trace_debugfs_init(void) +void fnic_fc_trace_debugfs_init(void) { - int rc = -1; - - if (!fnic_trace_debugfs_root) { - pr_err("fnic:Debugfs root directory doesn't exist\n"); - return rc; - } - fnic_fc_trace_enable = debugfs_create_file("fc_trace_enable", S_IFREG|S_IRUGO|S_IWUSR, fnic_trace_debugfs_root, &(fc_trc_flag->fc_trace), &fnic_trace_ctrl_fops); - if (!fnic_fc_trace_enable) { - pr_err("fnic: Failed create fc_trace_enable file\n"); - return rc; - } - fnic_fc_trace_clear = debugfs_create_file("fc_trace_clear", S_IFREG|S_IRUGO|S_IWUSR, fnic_trace_debugfs_root, &(fc_trc_flag->fc_clear), &fnic_trace_ctrl_fops); - if (!fnic_fc_trace_clear) { - pr_err("fnic: Failed to create fc_trace_enable file\n"); - return rc; - } - fnic_fc_rdata_trace_debugfs_file = debugfs_create_file("fc_trace_rdata", S_IFREG|S_IRUGO|S_IWUSR, @@ -457,24 +406,12 @@ int fnic_fc_trace_debugfs_init(void) &(fc_trc_flag->fc_normal_file), &fnic_trace_debugfs_fops); - if (!fnic_fc_rdata_trace_debugfs_file) { - pr_err("fnic: Failed create fc_rdata_trace file\n"); - return rc; - } - fnic_fc_trace_debugfs_file = debugfs_create_file("fc_trace", S_IFREG|S_IRUGO|S_IWUSR, fnic_trace_debugfs_root, &(fc_trc_flag->fc_row_file), &fnic_trace_debugfs_fops); - - if (!fnic_fc_trace_debugfs_file) { - pr_err("fnic: Failed to create fc_trace file\n"); - return rc; - } - rc = 0; - return rc; } /* @@ -757,45 +694,26 @@ static const struct file_operations fnic_reset_debugfs_fops = { * It will create file stats and reset_stats under statistics/host# directory * to log per fnic stats. */ -int fnic_stats_debugfs_init(struct fnic *fnic) +void fnic_stats_debugfs_init(struct fnic *fnic) { - int rc = -1; char name[16]; snprintf(name, sizeof(name), "host%d", fnic->lport->host->host_no); - if (!fnic_stats_debugfs_root) { - printk(KERN_DEBUG "fnic_stats root doesn't exist\n"); - return rc; - } fnic->fnic_stats_debugfs_host = debugfs_create_dir(name, fnic_stats_debugfs_root); - if (!fnic->fnic_stats_debugfs_host) { - printk(KERN_DEBUG "Cannot create host directory\n"); - return rc; - } fnic->fnic_stats_debugfs_file = debugfs_create_file("stats", S_IFREG|S_IRUGO|S_IWUSR, fnic->fnic_stats_debugfs_host, fnic, &fnic_stats_debugfs_fops); - if (!fnic->fnic_stats_debugfs_file) { - printk(KERN_DEBUG "Cannot create host stats file\n"); - return rc; - } fnic->fnic_reset_debugfs_file = debugfs_create_file("reset_stats", S_IFREG|S_IRUGO|S_IWUSR, fnic->fnic_stats_debugfs_host, fnic, &fnic_reset_debugfs_fops); - if (!fnic->fnic_reset_debugfs_file) { - printk(KERN_DEBUG "Cannot create host stats file\n"); - return rc; - } - rc = 0; - return rc; } /* diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c index 844ef688fa91..911a5adc289c 100644 --- a/drivers/scsi/fnic/fnic_fcs.c +++ b/drivers/scsi/fnic/fnic_fcs.c @@ -65,11 +65,21 @@ void fnic_handle_link(struct work_struct *work) fnic->link_status = vnic_dev_link_status(fnic->vdev); fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev); + atomic64_set(&fnic->fnic_stats.misc_stats.current_port_speed, + vnic_dev_port_speed(fnic->vdev)); + shost_printk(KERN_INFO, fnic->lport->host, "Current vnic speed set to : %llu\n", + (u64)atomic64_read( + &fnic->fnic_stats.misc_stats.current_port_speed)); + switch (vnic_dev_port_speed(fnic->vdev)) { case DCEM_PORTSPEED_10G: fc_host_speed(fnic->lport->host) = FC_PORTSPEED_10GBIT; fnic->lport->link_supported_speeds = FC_PORTSPEED_10GBIT; break; + case DCEM_PORTSPEED_20G: + fc_host_speed(fnic->lport->host) = FC_PORTSPEED_20GBIT; + fnic->lport->link_supported_speeds = FC_PORTSPEED_20GBIT; + break; case DCEM_PORTSPEED_25G: fc_host_speed(fnic->lport->host) = FC_PORTSPEED_25GBIT; fnic->lport->link_supported_speeds = FC_PORTSPEED_25GBIT; diff --git a/drivers/scsi/fnic/fnic_io.h b/drivers/scsi/fnic/fnic_io.h index e0bc659ed71f..1cb6a68c8e4e 100644 --- a/drivers/scsi/fnic/fnic_io.h +++ b/drivers/scsi/fnic/fnic_io.h @@ -70,9 +70,10 @@ enum fnic_port_speeds { DCEM_PORTSPEED_NONE = 0, DCEM_PORTSPEED_1G = 1000, DCEM_PORTSPEED_10G = 10000, + DCEM_PORTSPEED_20G = 20000, + DCEM_PORTSPEED_25G = 25000, DCEM_PORTSPEED_40G = 40000, DCEM_PORTSPEED_4x10G = 41000, - DCEM_PORTSPEED_25G = 25000, DCEM_PORTSPEED_100G = 100000, }; #endif /* _FNIC_IO_H_ */ diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c index 4e3a50202e8c..da4602b63495 100644 --- a/drivers/scsi/fnic/fnic_isr.c +++ b/drivers/scsi/fnic/fnic_isr.c @@ -51,7 +51,7 @@ static irqreturn_t fnic_isr_legacy(int irq, void *data) } if (pba & (1 << FNIC_INTX_WQ_RQ_COPYWQ)) { - work_done += fnic_wq_copy_cmpl_handler(fnic, -1); + work_done += fnic_wq_copy_cmpl_handler(fnic, io_completions); work_done += fnic_wq_cmpl_handler(fnic, -1); work_done += fnic_rq_cmpl_handler(fnic, -1); @@ -72,7 +72,7 @@ static irqreturn_t fnic_isr_msi(int irq, void *data) fnic->fnic_stats.misc_stats.last_isr_time = jiffies; atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); - work_done += fnic_wq_copy_cmpl_handler(fnic, -1); + work_done += fnic_wq_copy_cmpl_handler(fnic, io_completions); work_done += fnic_wq_cmpl_handler(fnic, -1); work_done += fnic_rq_cmpl_handler(fnic, -1); @@ -125,7 +125,7 @@ static irqreturn_t fnic_isr_msix_wq_copy(int irq, void *data) fnic->fnic_stats.misc_stats.last_isr_time = jiffies; atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); - wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, -1); + wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, io_completions); vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY], wq_copy_work_done, 1 /* unmask intr */, diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index 5b3534b0deda..18584ab27c32 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -69,6 +69,11 @@ unsigned int fnic_log_level; module_param(fnic_log_level, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels"); + +unsigned int io_completions = FNIC_DFLT_IO_COMPLETIONS; +module_param(io_completions, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(io_completions, "Max CQ entries to process at a time"); + unsigned int fnic_trace_max_pages = 16; module_param(fnic_trace_max_pages, uint, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages " @@ -178,6 +183,9 @@ static void fnic_get_host_speed(struct Scsi_Host *shost) case DCEM_PORTSPEED_10G: fc_host_speed(shost) = FC_PORTSPEED_10GBIT; break; + case DCEM_PORTSPEED_20G: + fc_host_speed(shost) = FC_PORTSPEED_20GBIT; + break; case DCEM_PORTSPEED_25G: fc_host_speed(shost) = FC_PORTSPEED_25GBIT; break; @@ -500,7 +508,7 @@ static int fnic_cleanup(struct fnic *fnic) } /* Clean up completed IOs and FCS frames */ - fnic_wq_copy_cmpl_handler(fnic, -1); + fnic_wq_copy_cmpl_handler(fnic, io_completions); fnic_wq_cmpl_handler(fnic, -1); fnic_rq_cmpl_handler(fnic, -1); @@ -578,12 +586,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) host->transportt = fnic_fc_transport; - err = fnic_stats_debugfs_init(fnic); - if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "Failed to initialize debugfs for stats\n"); - fnic_stats_debugfs_remove(fnic); - } + fnic_stats_debugfs_init(fnic); /* Setup PCI resources */ pci_set_drvdata(pdev, fnic); @@ -650,12 +653,20 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_iounmap; } + err = vnic_dev_cmd_init(fnic->vdev); + if (err) { + shost_printk(KERN_ERR, fnic->lport->host, + "vnic_dev_cmd_init() returns %d, aborting\n", + err); + goto err_out_vnic_unregister; + } + err = fnic_dev_wait(fnic->vdev, vnic_dev_open, - vnic_dev_open_done, 0); + vnic_dev_open_done, CMD_OPENF_RQ_ENABLE_THEN_POST); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "vNIC dev open failed, aborting.\n"); - goto err_out_vnic_unregister; + goto err_out_dev_cmd_deinit; } err = vnic_dev_init(fnic->vdev, 0); @@ -796,6 +807,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* allocate RQ buffers and post them to RQ*/ for (i = 0; i < fnic->rq_count; i++) { + vnic_rq_enable(&fnic->rq[i]); err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame); if (err) { shost_printk(KERN_ERR, fnic->lport->host, @@ -870,15 +882,11 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Enable all queues */ for (i = 0; i < fnic->raw_wq_count; i++) vnic_wq_enable(&fnic->wq[i]); - for (i = 0; i < fnic->rq_count; i++) - vnic_rq_enable(&fnic->rq[i]); for (i = 0; i < fnic->wq_copy_count; i++) vnic_wq_copy_enable(&fnic->wq_copy[i]); fc_fabric_login(lp); - vnic_dev_enable(fnic->vdev); - err = fnic_request_intr(fnic); if (err) { shost_printk(KERN_ERR, fnic->lport->host, @@ -886,6 +894,8 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_free_exch_mgr; } + vnic_dev_enable(fnic->vdev); + for (i = 0; i < fnic->intr_count; i++) vnic_intr_unmask(&fnic->intr[i]); @@ -914,6 +924,7 @@ err_out_clear_intr: fnic_clear_intr_mode(fnic); err_out_dev_close: vnic_dev_close(fnic->vdev); +err_out_dev_cmd_deinit: err_out_vnic_unregister: vnic_dev_unregister(fnic->vdev); err_out_iounmap: diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index cafbcfb85bfa..80608b53897b 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -180,20 +180,19 @@ void __fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags, unsigned long clearbits) { - struct Scsi_Host *host = fnic->lport->host; - int sh_locked = spin_is_locked(host->host_lock); unsigned long flags = 0; + unsigned long host_lock_flags = 0; - if (!sh_locked) - spin_lock_irqsave(host->host_lock, flags); + spin_lock_irqsave(&fnic->fnic_lock, flags); + spin_lock_irqsave(fnic->lport->host->host_lock, host_lock_flags); if (clearbits) fnic->state_flags &= ~st_flags; else fnic->state_flags |= st_flags; - if (!sh_locked) - spin_unlock_irqrestore(host->host_lock, flags); + spin_unlock_irqrestore(fnic->lport->host->host_lock, host_lock_flags); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); return; } @@ -1326,13 +1325,32 @@ int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do) unsigned int wq_work_done = 0; unsigned int i, cq_index; unsigned int cur_work_done; + struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; + u64 start_jiffies = 0; + u64 end_jiffies = 0; + u64 delta_jiffies = 0; + u64 delta_ms = 0; for (i = 0; i < fnic->wq_copy_count; i++) { cq_index = i + fnic->raw_wq_count + fnic->rq_count; + + start_jiffies = jiffies; cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index], fnic_fcpio_cmpl_handler, copy_work_to_do); + end_jiffies = jiffies; + wq_work_done += cur_work_done; + delta_jiffies = end_jiffies - start_jiffies; + if (delta_jiffies > + (u64) atomic64_read(&misc_stats->max_isr_jiffies)) { + atomic64_set(&misc_stats->max_isr_jiffies, + delta_jiffies); + delta_ms = jiffies_to_msecs(delta_jiffies); + atomic64_set(&misc_stats->max_isr_time_ms, delta_ms); + atomic64_set(&misc_stats->corr_work_done, + cur_work_done); + } } return wq_work_done; } @@ -1397,8 +1415,9 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id) cleanup_scsi_cmd: sc->result = DID_TRANSPORT_DISRUPTED << 16; FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, - "%s: sc duration = %lu DID_TRANSPORT_DISRUPTED\n", - __func__, (jiffies - start_time)); + "%s: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n", + __func__, sc->request->tag, sc, + (jiffies - start_time)); if (atomic64_read(&fnic->io_cmpl_skip)) atomic64_dec(&fnic->io_cmpl_skip); @@ -1407,6 +1426,11 @@ cleanup_scsi_cmd: /* Complete the command to SCSI */ if (sc->scsi_done) { + if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) + shost_printk(KERN_ERR, fnic->lport->host, + "Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n", + sc->request->tag, sc); + FNIC_TRACE(fnic_cleanup_io, sc->device->host->host_no, i, sc, jiffies_to_msecs(jiffies - start_time), diff --git a/drivers/scsi/fnic/fnic_stats.h b/drivers/scsi/fnic/fnic_stats.h index 9daa6ada6fa0..086f729f3c46 100644 --- a/drivers/scsi/fnic/fnic_stats.h +++ b/drivers/scsi/fnic/fnic_stats.h @@ -97,6 +97,9 @@ struct vlan_stats { struct misc_stats { u64 last_isr_time; u64 last_ack_time; + atomic64_t max_isr_jiffies; + atomic64_t max_isr_time_ms; + atomic64_t corr_work_done; atomic64_t isr_count; atomic64_t max_cq_entries; atomic64_t ack_index_out_of_range; @@ -113,6 +116,7 @@ struct misc_stats { atomic64_t queue_fulls; atomic64_t rport_not_ready; atomic64_t frame_errors; + atomic64_t current_port_speed; }; struct fnic_stats { @@ -134,6 +138,6 @@ struct stats_debug_info { }; int fnic_get_stats_data(struct stats_debug_info *, struct fnic_stats *); -int fnic_stats_debugfs_init(struct fnic *); +void fnic_stats_debugfs_init(struct fnic *); void fnic_stats_debugfs_remove(struct fnic *); #endif /* _FNIC_STATS_H_ */ diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c index bf0fd2aeb92e..9621831e17ba 100644 --- a/drivers/scsi/fnic/fnic_trace.c +++ b/drivers/scsi/fnic/fnic_trace.c @@ -409,6 +409,9 @@ int fnic_get_stats_data(struct stats_debug_info *debug, len += snprintf(debug->debug_buffer + len, buf_size - len, "Last ISR time: %llu (%8llu.%09lu)\n" "Last ACK time: %llu (%8llu.%09lu)\n" + "Max ISR jiffies: %llu\n" + "Max ISR time (ms) (0 denotes < 1 ms): %llu\n" + "Corr. work done: %llu\n" "Number of ISRs: %lld\n" "Maximum CQ Entries: %lld\n" "Number of ACK index out of range: %lld\n" @@ -428,6 +431,9 @@ int fnic_get_stats_data(struct stats_debug_info *debug, (s64)val1.tv_sec, val1.tv_nsec, (u64)stats->misc_stats.last_ack_time, (s64)val2.tv_sec, val2.tv_nsec, + (u64)atomic64_read(&stats->misc_stats.max_isr_jiffies), + (u64)atomic64_read(&stats->misc_stats.max_isr_time_ms), + (u64)atomic64_read(&stats->misc_stats.corr_work_done), (u64)atomic64_read(&stats->misc_stats.isr_count), (u64)atomic64_read(&stats->misc_stats.max_cq_entries), (u64)atomic64_read(&stats->misc_stats.ack_index_out_of_range), @@ -446,6 +452,11 @@ int fnic_get_stats_data(struct stats_debug_info *debug, (u64)atomic64_read(&stats->misc_stats.rport_not_ready), (u64)atomic64_read(&stats->misc_stats.frame_errors)); + len += snprintf(debug->debug_buffer + len, buf_size - len, + "Firmware reported port seed: %llu\n", + (u64)atomic64_read( + &stats->misc_stats.current_port_speed)); + return len; } @@ -503,15 +514,10 @@ int fnic_trace_buf_init(void) fnic_trace_entries.page_offset[i] = fnic_buf_head; fnic_buf_head += FNIC_ENTRY_SIZE_BYTES; } - err = fnic_trace_debugfs_init(); - if (err < 0) { - pr_err("fnic: Failed to initialize debugfs for tracing\n"); - goto err_fnic_trace_debugfs_init; - } + fnic_trace_debugfs_init(); pr_info("fnic: Successfully Initialized Trace Buffer\n"); return err; -err_fnic_trace_debugfs_init: - fnic_trace_free(); + err_fnic_trace_buf_init: return err; } @@ -596,16 +602,10 @@ int fnic_fc_trace_init(void) fc_trace_entries.page_offset[i] = fc_trace_buf_head; fc_trace_buf_head += FC_TRC_SIZE_BYTES; } - err = fnic_fc_trace_debugfs_init(); - if (err < 0) { - pr_err("fnic: Failed to initialize FC_CTLR tracing.\n"); - goto err_fnic_fc_ctlr_trace_debugfs_init; - } + fnic_fc_trace_debugfs_init(); pr_info("fnic: Successfully Initialized FC_CTLR Trace Buffer\n"); return err; -err_fnic_fc_ctlr_trace_debugfs_init: - fnic_fc_trace_free(); err_fnic_fc_ctlr_trace_buf_init: return err; } diff --git a/drivers/scsi/fnic/fnic_trace.h b/drivers/scsi/fnic/fnic_trace.h index e375d0c2eaaf..8aa55c1e2025 100644 --- a/drivers/scsi/fnic/fnic_trace.h +++ b/drivers/scsi/fnic/fnic_trace.h @@ -111,7 +111,7 @@ int fnic_trace_buf_init(void); void fnic_trace_free(void); int fnic_debugfs_init(void); void fnic_debugfs_terminate(void); -int fnic_trace_debugfs_init(void); +void fnic_trace_debugfs_init(void); void fnic_trace_debugfs_terminate(void); /* Fnic FC CTLR Trace releated function */ @@ -123,7 +123,7 @@ int fnic_fc_trace_get_data(fnic_dbgfs_t *fnic_dbgfs_prt, u8 rdata_flag); void copy_and_format_trace_data(struct fc_trace_hdr *tdata, fnic_dbgfs_t *fnic_dbgfs_prt, int *len, u8 rdata_flag); -int fnic_fc_trace_debugfs_init(void); +void fnic_fc_trace_debugfs_init(void); void fnic_fc_trace_debugfs_terminate(void); #endif diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c index 434447ea24b8..78af9cc2009b 100644 --- a/drivers/scsi/fnic/vnic_dev.c +++ b/drivers/scsi/fnic/vnic_dev.c @@ -27,6 +27,24 @@ #include "vnic_devcmd.h" #include "vnic_dev.h" #include "vnic_stats.h" +#include "vnic_wq.h" + +struct devcmd2_controller { + struct vnic_wq_ctrl *wq_ctrl; + struct vnic_dev_ring results_ring; + struct vnic_wq wq; + struct vnic_devcmd2 *cmd_ring; + struct devcmd2_result *result; + u16 next_result; + u16 result_size; + int color; +}; + +enum vnic_proxy_type { + PROXY_NONE, + PROXY_BY_BDF, + PROXY_BY_INDEX, +}; struct vnic_res { void __iomem *vaddr; @@ -48,6 +66,12 @@ struct vnic_dev { dma_addr_t stats_pa; struct vnic_devcmd_fw_info *fw_info; dma_addr_t fw_info_pa; + enum vnic_proxy_type proxy; + u32 proxy_index; + u64 args[VNIC_DEVCMD_NARGS]; + struct devcmd2_controller *devcmd2; + int (*devcmd_rtn)(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + int wait); }; #define VNIC_MAX_RES_HDR_SIZE \ @@ -119,6 +143,7 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev, } break; case RES_TYPE_INTR_PBA_LEGACY: + case RES_TYPE_DEVCMD2: case RES_TYPE_DEVCMD: len = count; break; @@ -229,8 +254,7 @@ void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring) } } -int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, - u64 *a0, u64 *a1, int wait) +int vnic_dev_cmd1(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait) { struct vnic_devcmd __iomem *devcmd = vdev->devcmd; int delay; @@ -244,6 +268,8 @@ int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, EBUSY, /* ERR_EBUSY */ }; int err; + u64 *a0 = &vdev->args[0]; + u64 *a1 = &vdev->args[1]; status = ioread32(&devcmd->status); if (status & STAT_BUSY) { @@ -290,6 +316,223 @@ int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, return -ETIMEDOUT; } +int vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + int wait) +{ + struct devcmd2_controller *dc2c = vdev->devcmd2; + struct devcmd2_result *result; + u8 color; + unsigned int i; + int delay; + int err; + u32 fetch_index; + u32 posted; + u32 new_posted; + + posted = ioread32(&dc2c->wq_ctrl->posted_index); + fetch_index = ioread32(&dc2c->wq_ctrl->fetch_index); + + if (posted == 0xFFFFFFFF || fetch_index == 0xFFFFFFFF) { + /* Hardware surprise removal: return error */ + pr_err("%s: devcmd2 invalid posted or fetch index on cmd %d\n", + pci_name(vdev->pdev), _CMD_N(cmd)); + pr_err("%s: fetch index: %u, posted index: %u\n", + pci_name(vdev->pdev), fetch_index, posted); + + return -ENODEV; + + } + + new_posted = (posted + 1) % DEVCMD2_RING_SIZE; + + if (new_posted == fetch_index) { + pr_err("%s: devcmd2 wq full while issuing cmd %d\n", + pci_name(vdev->pdev), _CMD_N(cmd)); + pr_err("%s: fetch index: %u, posted index: %u\n", + pci_name(vdev->pdev), fetch_index, posted); + return -EBUSY; + + } + dc2c->cmd_ring[posted].cmd = cmd; + dc2c->cmd_ring[posted].flags = 0; + + if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) + dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT; + if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) { + for (i = 0; i < VNIC_DEVCMD_NARGS; i++) + dc2c->cmd_ring[posted].args[i] = vdev->args[i]; + + } + + /* Adding write memory barrier prevents compiler and/or CPU + * reordering, thus avoiding descriptor posting before + * descriptor is initialized. Otherwise, hardware can read + * stale descriptor fields. + */ + wmb(); + iowrite32(new_posted, &dc2c->wq_ctrl->posted_index); + + if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT) + return 0; + + result = dc2c->result + dc2c->next_result; + color = dc2c->color; + + dc2c->next_result++; + if (dc2c->next_result == dc2c->result_size) { + dc2c->next_result = 0; + dc2c->color = dc2c->color ? 0 : 1; + } + + for (delay = 0; delay < wait; delay++) { + udelay(100); + if (result->color == color) { + if (result->error) { + err = -(int) result->error; + if (err != ERR_ECMDUNKNOWN || + cmd != CMD_CAPABILITY) + pr_err("%s:Error %d devcmd %d\n", + pci_name(vdev->pdev), + err, _CMD_N(cmd)); + return err; + } + if (_CMD_DIR(cmd) & _CMD_DIR_READ) { + rmb(); /*prevent reorder while reding result*/ + for (i = 0; i < VNIC_DEVCMD_NARGS; i++) + vdev->args[i] = result->results[i]; + } + return 0; + } + } + + pr_err("%s:Timed out devcmd %d\n", pci_name(vdev->pdev), _CMD_N(cmd)); + + return -ETIMEDOUT; +} + + +int vnic_dev_init_devcmd1(struct vnic_dev *vdev) +{ + vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0); + if (!vdev->devcmd) + return -ENODEV; + + vdev->devcmd_rtn = &vnic_dev_cmd1; + return 0; +} + + +int vnic_dev_init_devcmd2(struct vnic_dev *vdev) +{ + int err; + unsigned int fetch_index; + + if (vdev->devcmd2) + return 0; + + vdev->devcmd2 = kzalloc(sizeof(*vdev->devcmd2), GFP_ATOMIC); + if (!vdev->devcmd2) + return -ENOMEM; + + vdev->devcmd2->color = 1; + vdev->devcmd2->result_size = DEVCMD2_RING_SIZE; + err = vnic_wq_devcmd2_alloc(vdev, &vdev->devcmd2->wq, + DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE); + if (err) + goto err_free_devcmd2; + + fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index); + if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */ + pr_err("error in devcmd2 init"); + return -ENODEV; + } + + /* + * Don't change fetch_index ever and + * set posted_index same as fetch_index + * when setting up the WQ for devcmd2. + */ + vnic_wq_init_start(&vdev->devcmd2->wq, 0, fetch_index, + fetch_index, 0, 0); + + vnic_wq_enable(&vdev->devcmd2->wq); + + err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring, + DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE); + if (err) + goto err_free_wq; + + vdev->devcmd2->result = + (struct devcmd2_result *) vdev->devcmd2->results_ring.descs; + vdev->devcmd2->cmd_ring = + (struct vnic_devcmd2 *) vdev->devcmd2->wq.ring.descs; + vdev->devcmd2->wq_ctrl = vdev->devcmd2->wq.ctrl; + vdev->args[0] = (u64) vdev->devcmd2->results_ring.base_addr | + VNIC_PADDR_TARGET; + vdev->args[1] = DEVCMD2_RING_SIZE; + + err = vnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, 1000); + if (err) + goto err_free_desc_ring; + + vdev->devcmd_rtn = &vnic_dev_cmd2; + + return 0; + +err_free_desc_ring: + vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring); +err_free_wq: + vnic_wq_disable(&vdev->devcmd2->wq); + vnic_wq_free(&vdev->devcmd2->wq); +err_free_devcmd2: + kfree(vdev->devcmd2); + vdev->devcmd2 = NULL; + + return err; +} + + +void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev) +{ + vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring); + vnic_wq_disable(&vdev->devcmd2->wq); + vnic_wq_free(&vdev->devcmd2->wq); + kfree(vdev->devcmd2); + vdev->devcmd2 = NULL; + vdev->devcmd_rtn = &vnic_dev_cmd1; +} + + +int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev, + enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait) +{ + int err; + + vdev->args[0] = *a0; + vdev->args[1] = *a1; + + err = (*vdev->devcmd_rtn)(vdev, cmd, wait); + + *a0 = vdev->args[0]; + *a1 = vdev->args[1]; + + return err; +} + + +int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, + u64 *a0, u64 *a1, int wait) +{ + memset(vdev->args, 0, sizeof(vdev->args)); + + switch (vdev->proxy) { + case PROXY_NONE: + default: + return vnic_dev_cmd_no_proxy(vdev, cmd, a0, a1, wait); + } +} + + int vnic_dev_fw_info(struct vnic_dev *vdev, struct vnic_devcmd_fw_info **fw_info) { @@ -664,6 +907,8 @@ void vnic_dev_unregister(struct vnic_dev *vdev) dma_free_coherent(&vdev->pdev->dev, sizeof(struct vnic_devcmd_fw_info), vdev->fw_info, vdev->fw_info_pa); + if (vdev->devcmd2) + vnic_dev_deinit_devcmd2(vdev); kfree(vdev); } } @@ -683,13 +928,26 @@ struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, if (vnic_dev_discover_res(vdev, bar)) goto err_out; - vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0); - if (!vdev->devcmd) - goto err_out; - return vdev; err_out: vnic_dev_unregister(vdev); return NULL; } + +int vnic_dev_cmd_init(struct vnic_dev *vdev) +{ + int err; + void *p; + + p = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0); + if (p) { + pr_err("fnic: DEVCMD2 resource found!\n"); + err = vnic_dev_init_devcmd2(vdev); + } else { + pr_err("fnic: DEVCMD2 not found, fall back to Devcmd\n"); + err = vnic_dev_init_devcmd1(vdev); + } + + return err; +} diff --git a/drivers/scsi/fnic/vnic_dev.h b/drivers/scsi/fnic/vnic_dev.h index 40d4195f562b..ef5309a5df5d 100644 --- a/drivers/scsi/fnic/vnic_dev.h +++ b/drivers/scsi/fnic/vnic_dev.h @@ -36,6 +36,7 @@ #define vnic_dev_fw_info fnic_dev_fw_info #define vnic_dev_spec fnic_dev_spec #define vnic_dev_stats_clear fnic_dev_stats_clear +#define vnic_dev_cmd_init fnic_dev_cmd_init #define vnic_dev_stats_dump fnic_dev_stats_dump #define vnic_dev_hang_notify fnic_dev_hang_notify #define vnic_dev_packet_filter fnic_dev_packet_filter @@ -128,6 +129,7 @@ int vnic_dev_fw_info(struct vnic_dev *vdev, int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size, void *value); int vnic_dev_stats_clear(struct vnic_dev *vdev); +int vnic_dev_cmd_init(struct vnic_dev *vdev); int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats); int vnic_dev_hang_notify(struct vnic_dev *vdev); void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, diff --git a/drivers/scsi/fnic/vnic_devcmd.h b/drivers/scsi/fnic/vnic_devcmd.h index 3e2fcbda6aed..c5dde556dc7c 100644 --- a/drivers/scsi/fnic/vnic_devcmd.h +++ b/drivers/scsi/fnic/vnic_devcmd.h @@ -170,7 +170,8 @@ enum vnic_devcmd_cmd { /* variant of CMD_INIT, with provisioning info * (u64)a0=paddr of vnic_devcmd_provinfo - * (u32)a1=sizeof provision info */ + * (u32)a1=sizeof provision info + */ CMD_INIT_PROV_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 27), /* enable virtual link */ @@ -262,12 +263,132 @@ enum vnic_devcmd_cmd { * non-zero for resetting vlan to the default * out: (u16)a0=old default vlan */ - CMD_SET_DEFAULT_VLAN = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 46) + CMD_SET_DEFAULT_VLAN = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 46), + + /* init_prov_info2: + * Variant of CMD_INIT_PROV_INFO, where it will not try to enable + * the vnic until CMD_ENABLE2 is issued. + * (u64)a0=paddr of vnic_devcmd_provinfo + * (u32)a1=sizeof provision info + */ + CMD_INIT_PROV_INFO2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 47), + + /* enable2: + * (u32)a0=0 ==> standby + * =CMD_ENABLE2_ACTIVE ==> active + */ + CMD_ENABLE2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 48), + + /* + * cmd_status: + * Returns the status of the specified command + * Input: + * a0 = command for which status is being queried. + * Possible values are: + * CMD_SOFT_RESET + * CMD_HANG_RESET + * CMD_OPEN + * CMD_INIT + * CMD_INIT_PROV_INFO + * CMD_DEINIT + * CMD_INIT_PROV_INFO2 + * CMD_ENABLE2 + * Output: + * if status == STAT_ERROR + * a0 = ERR_ENOTSUPPORTED - status for command in a0 is + * not supported + * if status == STAT_NONE + * a0 = status of the devcmd specified in a0 as follows. + * ERR_SUCCESS - command in a0 completed successfully + * ERR_EINPROGRESS - command in a0 is still in progress + */ + CMD_STATUS = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 49), + + /* + * Returns interrupt coalescing timer conversion factors. + * After calling this devcmd, ENIC driver can convert + * interrupt coalescing timer in usec into CPU cycles as follows: + * + * intr_timer_cycles = intr_timer_usec * multiplier / divisor + * + * Interrupt coalescing timer in usecs can be be converted/obtained + * from CPU cycles as follows: + * + * intr_timer_usec = intr_timer_cycles * divisor / multiplier + * + * in: none + * out: (u32)a0 = multiplier + * (u32)a1 = divisor + * (u32)a2 = maximum timer value in usec + */ + CMD_INTR_COAL_CONVERT = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 50), + + /* + * ISCSI DUMP API: + * in: (u64)a0=paddr of the param or param itself + * (u32)a1=ISCSI_CMD_xxx + */ + CMD_ISCSI_DUMP_REQ = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 51), + + /* + * ISCSI DUMP STATUS API: + * in: (u32)a0=cmd tag + * in: (u32)a1=ISCSI_CMD_xxx + * out: (u32)a0=cmd status + */ + CMD_ISCSI_DUMP_STATUS = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 52), + + /* + * Subvnic migration from MQ <--> VF. + * Enable the LIF migration from MQ to VF and vice versa. MQ and VF + * indexes are statically bound at the time of initialization. + * Based on the + * direction of migration, the resources of either MQ or the VF shall + * be attached to the LIF. + * in: (u32)a0=Direction of Migration + * 0=> Migrate to VF + * 1=> Migrate to MQ + * (u32)a1=VF index (MQ index) + */ + CMD_MIGRATE_SUBVNIC = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 53), + + /* + * Register / Deregister the notification block for MQ subvnics + * in: + * (u64)a0=paddr to notify (set paddr=0 to unset) + * (u32)a1 & 0x00000000ffffffff=sizeof(struct vnic_devcmd_notify) + * (u16)a1 & 0x0000ffff00000000=intr num (-1 for no intr) + * out: + * (u32)a1 = effective size + */ + CMD_SUBVNIC_NOTIFY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 54), + + /* + * Set the predefined mac address as default + * in: + * (u48)a0=mac addr + */ + CMD_SET_MAC_ADDR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 55), + + /* Update the provisioning info of the given VIF + * (u64)a0=paddr of vnic_devcmd_provinfo + * (u32)a1=sizeof provision info + */ + CMD_PROV_INFO_UPDATE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 56), + + /* + * Initialization for the devcmd2 interface. + * in: (u64) a0=host result buffer physical address + * in: (u16) a1=number of entries in result buffer + */ + CMD_INITIALIZE_DEVCMD2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 57) }; /* flags for CMD_OPEN */ #define CMD_OPENF_OPROM 0x1 /* open coming from option rom */ +#define CMD_OPENF_RQ_ENABLE_THEN_POST 0x2 + /* flags for CMD_INIT */ #define CMD_INITF_DEFAULT_MAC 0x1 /* init with default mac addr */ @@ -345,4 +466,39 @@ struct vnic_devcmd { u64 args[VNIC_DEVCMD_NARGS]; /* RW cmd args (little-endian) */ }; +/* + * Version 2 of the interface. + * + * Some things are carried over, notably the vnic_devcmd_cmd enum. + */ + +/* + * Flags for vnic_devcmd2.flags + */ + +#define DEVCMD2_FNORESULT 0x1 /* Don't copy result to host */ + +#define VNIC_DEVCMD2_NARGS VNIC_DEVCMD_NARGS + +struct vnic_devcmd2 { + u16 pad; + u16 flags; + u32 cmd; /* same command #defines as original */ + u64 args[VNIC_DEVCMD2_NARGS]; +}; + +#define VNIC_DEVCMD2_NRESULTS VNIC_DEVCMD_NARGS +struct devcmd2_result { + u64 results[VNIC_DEVCMD2_NRESULTS]; + u32 pad; + u16 completed_index; /* into copy WQ */ + u8 error; /* same error codes as original */ + u8 color; /* 0 or 1 as with completion queues */ +}; + +#define DEVCMD2_RING_SIZE 32 +#define DEVCMD2_DESC_SIZE 128 + +#define DEVCMD2_RESULTS_SIZE_MAX ((1 << 16) - 1) + #endif /* _VNIC_DEVCMD_H_ */ diff --git a/drivers/scsi/fnic/vnic_resource.h b/drivers/scsi/fnic/vnic_resource.h index 2d842f79d41a..7c6163f73bd3 100644 --- a/drivers/scsi/fnic/vnic_resource.h +++ b/drivers/scsi/fnic/vnic_resource.h @@ -41,6 +41,13 @@ enum vnic_res_type { RES_TYPE_RSVD7, RES_TYPE_DEVCMD, /* Device command region */ RES_TYPE_PASS_THRU_PAGE, /* Pass-thru page */ + RES_TYPE_SUBVNIC, /* subvnic resource type */ + RES_TYPE_MQ_WQ, /* MQ Work queues */ + RES_TYPE_MQ_RQ, /* MQ Receive queues */ + RES_TYPE_MQ_CQ, /* MQ Completion queues */ + RES_TYPE_DEPRECATED1, /* Old version of devcmd 2 */ + RES_TYPE_DEPRECATED2, /* Old version of devcmd 2 */ + RES_TYPE_DEVCMD2, /* Device control region */ RES_TYPE_MAX, /* Count of resource types */ }; diff --git a/drivers/scsi/fnic/vnic_rq.c b/drivers/scsi/fnic/vnic_rq.c index fd2068f5ae16..6a35b1be0032 100644 --- a/drivers/scsi/fnic/vnic_rq.c +++ b/drivers/scsi/fnic/vnic_rq.c @@ -27,12 +27,9 @@ static int vnic_rq_alloc_bufs(struct vnic_rq *rq) { struct vnic_rq_buf *buf; - struct vnic_dev *vdev; unsigned int i, j, count = rq->ring.desc_count; unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count); - vdev = rq->vdev; - for (i = 0; i < blks; i++) { rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC); if (!rq->bufs[i]) { @@ -171,7 +168,7 @@ void vnic_rq_clean(struct vnic_rq *rq, struct vnic_rq_buf *buf; u32 fetch_index; - BUG_ON(ioread32(&rq->ctrl->enable)); + WARN_ON(ioread32(&rq->ctrl->enable)); buf = rq->to_clean; diff --git a/drivers/scsi/fnic/vnic_wq.c b/drivers/scsi/fnic/vnic_wq.c index a414135460db..015af2cdabaf 100644 --- a/drivers/scsi/fnic/vnic_wq.c +++ b/drivers/scsi/fnic/vnic_wq.c @@ -24,15 +24,32 @@ #include "vnic_dev.h" #include "vnic_wq.h" + +int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq, + unsigned int index, enum vnic_res_type res_type) +{ + wq->ctrl = vnic_dev_get_res(vdev, res_type, index); + + if (!wq->ctrl) + return -EINVAL; + + return 0; +} + + +int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq, + unsigned int desc_count, unsigned int desc_size) +{ + return vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); +} + + static int vnic_wq_alloc_bufs(struct vnic_wq *wq) { struct vnic_wq_buf *buf; - struct vnic_dev *vdev; unsigned int i, j, count = wq->ring.desc_count; unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count); - vdev = wq->vdev; - for (i = 0; i < blks; i++) { wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC); if (!wq->bufs[i]) { @@ -111,6 +128,52 @@ int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, return 0; } + +int vnic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, + unsigned int desc_count, unsigned int desc_size) +{ + int err; + + wq->index = 0; + wq->vdev = vdev; + + err = vnic_wq_get_ctrl(vdev, wq, 0, RES_TYPE_DEVCMD2); + if (err) { + pr_err("Failed to get devcmd2 resource\n"); + return err; + } + vnic_wq_disable(wq); + + err = vnic_wq_alloc_ring(vdev, wq, desc_count, desc_size); + if (err) + return err; + return 0; +} + +void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, + unsigned int fetch_index, unsigned int posted_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset) +{ + u64 paddr; + unsigned int count = wq->ring.desc_count; + + paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; + writeq(paddr, &wq->ctrl->ring_base); + iowrite32(count, &wq->ctrl->ring_size); + iowrite32(fetch_index, &wq->ctrl->fetch_index); + iowrite32(posted_index, &wq->ctrl->posted_index); + iowrite32(cq_index, &wq->ctrl->cq_index); + iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable); + iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset); + iowrite32(0, &wq->ctrl->error_status); + + wq->to_use = wq->to_clean = + &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES] + [fetch_index % VNIC_WQ_BUF_BLK_ENTRIES]; +} + + void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) diff --git a/drivers/scsi/fnic/vnic_wq.h b/drivers/scsi/fnic/vnic_wq.h index 5cd094f79281..5d1e0a44d94a 100644 --- a/drivers/scsi/fnic/vnic_wq.h +++ b/drivers/scsi/fnic/vnic_wq.h @@ -33,6 +33,8 @@ #define vnic_wq_service fnic_wq_service #define vnic_wq_free fnic_wq_free #define vnic_wq_alloc fnic_wq_alloc +#define vnic_wq_devcmd2_alloc fnic_wq_devcmd2_alloc +#define vnic_wq_init_start fnic_wq_init_start #define vnic_wq_init fnic_wq_init #define vnic_wq_error_status fnic_wq_error_status #define vnic_wq_enable fnic_wq_enable @@ -163,6 +165,12 @@ static inline void vnic_wq_service(struct vnic_wq *wq, void vnic_wq_free(struct vnic_wq *wq); int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, unsigned int desc_count, unsigned int desc_size); +int vnic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, + unsigned int desc_count, unsigned int desc_size); +void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, + unsigned int fetch_index, unsigned int posted_index, + unsigned int error_interrupt_enable, + unsigned int error_interrupt_offset); void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset); diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c index 194c294f9b6c..e7f1dd4f3b66 100644 --- a/drivers/scsi/gdth.c +++ b/drivers/scsi/gdth.c @@ -1,6 +1,6 @@ /************************************************************************ * Linux driver for * - * ICP vortex GmbH: GDT ISA/EISA/PCI Disk Array Controllers * + * ICP vortex GmbH: GDT PCI Disk Array Controllers * * Intel Corporation: Storage RAID Controllers * * * * gdth.c * @@ -32,15 +32,10 @@ ************************************************************************/ /* All GDT Disk Array Controllers are fully supported by this driver. - * This includes the PCI/EISA/ISA SCSI Disk Array Controllers and the + * This includes the PCI SCSI Disk Array Controllers and the * PCI Fibre Channel Disk Array Controllers. See gdth.h for a complete * list of all controller types. * - * If you have one or more GDT3000/3020 EISA controllers with - * controller BIOS disabled, you have to set the IRQ values with the - * command line option "gdth=irq1,irq2,...", where the irq1,irq2,... are - * the IRQ values for the EISA controllers. - * * After the optional list of IRQ values, other possible * command line options are: * disable:Y disable driver @@ -61,14 +56,12 @@ * access a shared resource from several nodes, * appropriate controller firmware required * shared_access:N enable driver reserve/release protocol - * probe_eisa_isa:Y scan for EISA/ISA controllers - * probe_eisa_isa:N do not scan for EISA/ISA controllers * force_dma32:Y use only 32 bit DMA mode * force_dma32:N use 64 bit DMA mode, if supported * * The default values are: "gdth=disable:N,reserve_mode:1,reverse_scan:N, * max_ids:127,rescan:N,hdr_channel:0, - * shared_access:Y,probe_eisa_isa:N,force_dma32:N". + * shared_access:Y,force_dma32:N". * Here is another example: "gdth=reserve_list:0,1,2,0,0,1,3,0,rescan:Y". * * When loading the gdth driver as a module, the same options are available. @@ -79,7 +72,7 @@ * * Default: "modprobe gdth disable=0 reserve_mode=1 reverse_scan=0 * max_ids=127 rescan=0 hdr_channel=0 shared_access=0 - * probe_eisa_isa=0 force_dma32=0" + * force_dma32=0" * The other example: "modprobe gdth reserve_list=0,1,2,0,0,1,3,0 rescan=1". */ @@ -96,10 +89,6 @@ * phase: unused */ - -/* interrupt coalescing */ -/* #define INT_COAL */ - /* statistics */ #define GDTH_STATISTICS @@ -122,10 +111,6 @@ #include #include #include - -#ifdef GDTH_RTC -#include -#endif #include #include @@ -192,79 +177,9 @@ static void gdth_scsi_done(struct scsi_cmnd *scp); #ifdef DEBUG_GDTH static u8 DebugState = DEBUG_GDTH; - -#ifdef __SERIAL__ -#define MAX_SERBUF 160 -static void ser_init(void); -static void ser_puts(char *str); -static void ser_putc(char c); -static int ser_printk(const char *fmt, ...); -static char strbuf[MAX_SERBUF+1]; -#ifdef __COM2__ -#define COM_BASE 0x2f8 -#else -#define COM_BASE 0x3f8 -#endif -static void ser_init() -{ - unsigned port=COM_BASE; - - outb(0x80,port+3); - outb(0,port+1); - /* 19200 Baud, if 9600: outb(12,port) */ - outb(6, port); - outb(3,port+3); - outb(0,port+1); - /* - ser_putc('I'); - ser_putc(' '); - */ -} - -static void ser_puts(char *str) -{ - char *ptr; - - ser_init(); - for (ptr=str;*ptr;++ptr) - ser_putc(*ptr); -} - -static void ser_putc(char c) -{ - unsigned port=COM_BASE; - - while ((inb(port+5) & 0x20)==0); - outb(c,port); - if (c==0x0a) - { - while ((inb(port+5) & 0x20)==0); - outb(0x0d,port); - } -} - -static int ser_printk(const char *fmt, ...) -{ - va_list args; - int i; - - va_start(args,fmt); - i = vsprintf(strbuf,fmt,args); - ser_puts(strbuf); - va_end(args); - return i; -} - -#define TRACE(a) {if (DebugState==1) {ser_printk a;}} -#define TRACE2(a) {if (DebugState==1 || DebugState==2) {ser_printk a;}} -#define TRACE3(a) {if (DebugState!=0) {ser_printk a;}} - -#else /* !__SERIAL__ */ #define TRACE(a) {if (DebugState==1) {printk a;}} #define TRACE2(a) {if (DebugState==1 || DebugState==2) {printk a;}} #define TRACE3(a) {if (DebugState!=0) {printk a;}} -#endif - #else /* !DEBUG */ #define TRACE(a) #define TRACE2(a) @@ -273,9 +188,6 @@ static int ser_printk(const char *fmt, ...) #ifdef GDTH_STATISTICS static u32 max_rq=0, max_index=0, max_sg=0; -#ifdef INT_COAL -static u32 max_int_coal=0; -#endif static u32 act_ints=0, act_ios=0, act_stats=0, act_rq=0; static struct timer_list gdth_timer; #endif @@ -286,12 +198,6 @@ static struct timer_list gdth_timer; #define BUS_L2P(a,b) ((b)>(a)->virt_bus ? (b-1):(b)) -#ifdef CONFIG_ISA -static u8 gdth_drq_tab[4] = {5,6,7,7}; /* DRQ table */ -#endif -#if defined(CONFIG_EISA) || defined(CONFIG_ISA) -static u8 gdth_irq_tab[6] = {0,10,11,12,14,0}; /* IRQ table */ -#endif static u8 gdth_polling; /* polling if TRUE */ static int gdth_ctr_count = 0; /* controller count */ static LIST_HEAD(gdth_instances); /* controller list */ @@ -325,10 +231,6 @@ static u8 gdth_direction_tab[0x100] = { }; /* LILO and modprobe/insmod parameters */ -/* IRQ list for GDT3000/3020 EISA controllers */ -static int irq[MAXHA] __initdata = -{0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff}; /* disable driver flag */ static int disable __initdata = 0; /* reserve flag */ @@ -348,13 +250,10 @@ static int max_ids = MAXID; static int rescan = 0; /* shared access */ static int shared_access = 1; -/* enable support for EISA and ISA controllers */ -static int probe_eisa_isa = 0; /* 64 bit DMA mode, support for drives > 2 TB, if force_dma32 = 0 */ static int force_dma32 = 0; /* parameters for modprobe/insmod */ -module_param_hw_array(irq, int, irq, NULL, 0); module_param(disable, int, 0); module_param(reserve_mode, int, 0); module_param_array(reserve_list, int, NULL, 0); @@ -363,7 +262,6 @@ module_param(hdr_channel, int, 0); module_param(max_ids, int, 0); module_param(rescan, int, 0); module_param(shared_access, int, 0); -module_param(probe_eisa_isa, int, 0); module_param(force_dma32, int, 0); MODULE_AUTHOR("Achim Leubner"); MODULE_LICENSE("GPL"); @@ -515,45 +413,6 @@ static void gdth_eval_mapping(u32 size, u32 *cyls, int *heads, int *secs) } } -/* controller search and initialization functions */ -#ifdef CONFIG_EISA -static int __init gdth_search_eisa(u16 eisa_adr) -{ - u32 id; - - TRACE(("gdth_search_eisa() adr. %x\n",eisa_adr)); - id = inl(eisa_adr+ID0REG); - if (id == GDT3A_ID || id == GDT3B_ID) { /* GDT3000A or GDT3000B */ - if ((inb(eisa_adr+EISAREG) & 8) == 0) - return 0; /* not EISA configured */ - return 1; - } - if (id == GDT3_ID) /* GDT3000 */ - return 1; - - return 0; -} -#endif /* CONFIG_EISA */ - -#ifdef CONFIG_ISA -static int __init gdth_search_isa(u32 bios_adr) -{ - void __iomem *addr; - u32 id; - - TRACE(("gdth_search_isa() bios adr. %x\n",bios_adr)); - if ((addr = ioremap(bios_adr+BIOS_ID_OFFS, sizeof(u32))) != NULL) { - id = readl(addr); - iounmap(addr); - if (id == GDT2_ID) /* GDT2000 */ - return 1; - } - return 0; -} -#endif /* CONFIG_ISA */ - -#ifdef CONFIG_PCI - static bool gdth_search_vortex(u16 device) { if (device <= PCI_DEVICE_ID_VORTEX_GDT6555) @@ -656,204 +515,7 @@ static int gdth_pci_init_one(struct pci_dev *pdev, return 0; } -#endif /* CONFIG_PCI */ -#ifdef CONFIG_EISA -static int __init gdth_init_eisa(u16 eisa_adr,gdth_ha_str *ha) -{ - u32 retries,id; - u8 prot_ver,eisacf,i,irq_found; - - TRACE(("gdth_init_eisa() adr. %x\n",eisa_adr)); - - /* disable board interrupts, deinitialize services */ - outb(0xff,eisa_adr+EDOORREG); - outb(0x00,eisa_adr+EDENABREG); - outb(0x00,eisa_adr+EINTENABREG); - - outb(0xff,eisa_adr+LDOORREG); - retries = INIT_RETRIES; - gdth_delay(20); - while (inb(eisa_adr+EDOORREG) != 0xff) { - if (--retries == 0) { - printk("GDT-EISA: Initialization error (DEINIT failed)\n"); - return 0; - } - gdth_delay(1); - TRACE2(("wait for DEINIT: retries=%d\n",retries)); - } - prot_ver = inb(eisa_adr+MAILBOXREG); - outb(0xff,eisa_adr+EDOORREG); - if (prot_ver != PROTOCOL_VERSION) { - printk("GDT-EISA: Illegal protocol version\n"); - return 0; - } - ha->bmic = eisa_adr; - ha->brd_phys = (u32)eisa_adr >> 12; - - outl(0,eisa_adr+MAILBOXREG); - outl(0,eisa_adr+MAILBOXREG+4); - outl(0,eisa_adr+MAILBOXREG+8); - outl(0,eisa_adr+MAILBOXREG+12); - - /* detect IRQ */ - if ((id = inl(eisa_adr+ID0REG)) == GDT3_ID) { - ha->oem_id = OEM_ID_ICP; - ha->type = GDT_EISA; - ha->stype = id; - outl(1,eisa_adr+MAILBOXREG+8); - outb(0xfe,eisa_adr+LDOORREG); - retries = INIT_RETRIES; - gdth_delay(20); - while (inb(eisa_adr+EDOORREG) != 0xfe) { - if (--retries == 0) { - printk("GDT-EISA: Initialization error (get IRQ failed)\n"); - return 0; - } - gdth_delay(1); - } - ha->irq = inb(eisa_adr+MAILBOXREG); - outb(0xff,eisa_adr+EDOORREG); - TRACE2(("GDT3000/3020: IRQ=%d\n",ha->irq)); - /* check the result */ - if (ha->irq == 0) { - TRACE2(("Unknown IRQ, use IRQ table from cmd line !\n")); - for (i = 0, irq_found = FALSE; - i < MAXHA && irq[i] != 0xff; ++i) { - if (irq[i]==10 || irq[i]==11 || irq[i]==12 || irq[i]==14) { - irq_found = TRUE; - break; - } - } - if (irq_found) { - ha->irq = irq[i]; - irq[i] = 0; - printk("GDT-EISA: Can not detect controller IRQ,\n"); - printk("Use IRQ setting from command line (IRQ = %d)\n", - ha->irq); - } else { - printk("GDT-EISA: Initialization error (unknown IRQ), Enable\n"); - printk("the controller BIOS or use command line parameters\n"); - return 0; - } - } - } else { - eisacf = inb(eisa_adr+EISAREG) & 7; - if (eisacf > 4) /* level triggered */ - eisacf -= 4; - ha->irq = gdth_irq_tab[eisacf]; - ha->oem_id = OEM_ID_ICP; - ha->type = GDT_EISA; - ha->stype = id; - } - - ha->dma64_support = 0; - return 1; -} -#endif /* CONFIG_EISA */ - -#ifdef CONFIG_ISA -static int __init gdth_init_isa(u32 bios_adr,gdth_ha_str *ha) -{ - register gdt2_dpram_str __iomem *dp2_ptr; - int i; - u8 irq_drq,prot_ver; - u32 retries; - - TRACE(("gdth_init_isa() bios adr. %x\n",bios_adr)); - - ha->brd = ioremap(bios_adr, sizeof(gdt2_dpram_str)); - if (ha->brd == NULL) { - printk("GDT-ISA: Initialization error (DPMEM remap error)\n"); - return 0; - } - dp2_ptr = ha->brd; - writeb(1, &dp2_ptr->io.memlock); /* switch off write protection */ - /* reset interface area */ - memset_io(&dp2_ptr->u, 0, sizeof(dp2_ptr->u)); - if (readl(&dp2_ptr->u) != 0) { - printk("GDT-ISA: Initialization error (DPMEM write error)\n"); - iounmap(ha->brd); - return 0; - } - - /* disable board interrupts, read DRQ and IRQ */ - writeb(0xff, &dp2_ptr->io.irqdel); - writeb(0x00, &dp2_ptr->io.irqen); - writeb(0x00, &dp2_ptr->u.ic.S_Status); - writeb(0x00, &dp2_ptr->u.ic.Cmd_Index); - - irq_drq = readb(&dp2_ptr->io.rq); - for (i=0; i<3; ++i) { - if ((irq_drq & 1)==0) - break; - irq_drq >>= 1; - } - ha->drq = gdth_drq_tab[i]; - - irq_drq = readb(&dp2_ptr->io.rq) >> 3; - for (i=1; i<5; ++i) { - if ((irq_drq & 1)==0) - break; - irq_drq >>= 1; - } - ha->irq = gdth_irq_tab[i]; - - /* deinitialize services */ - writel(bios_adr, &dp2_ptr->u.ic.S_Info[0]); - writeb(0xff, &dp2_ptr->u.ic.S_Cmd_Indx); - writeb(0, &dp2_ptr->io.event); - retries = INIT_RETRIES; - gdth_delay(20); - while (readb(&dp2_ptr->u.ic.S_Status) != 0xff) { - if (--retries == 0) { - printk("GDT-ISA: Initialization error (DEINIT failed)\n"); - iounmap(ha->brd); - return 0; - } - gdth_delay(1); - } - prot_ver = (u8)readl(&dp2_ptr->u.ic.S_Info[0]); - writeb(0, &dp2_ptr->u.ic.Status); - writeb(0xff, &dp2_ptr->io.irqdel); - if (prot_ver != PROTOCOL_VERSION) { - printk("GDT-ISA: Illegal protocol version\n"); - iounmap(ha->brd); - return 0; - } - - ha->oem_id = OEM_ID_ICP; - ha->type = GDT_ISA; - ha->ic_all_size = sizeof(dp2_ptr->u); - ha->stype= GDT2_ID; - ha->brd_phys = bios_adr >> 4; - - /* special request to controller BIOS */ - writel(0x00, &dp2_ptr->u.ic.S_Info[0]); - writel(0x00, &dp2_ptr->u.ic.S_Info[1]); - writel(0x01, &dp2_ptr->u.ic.S_Info[2]); - writel(0x00, &dp2_ptr->u.ic.S_Info[3]); - writeb(0xfe, &dp2_ptr->u.ic.S_Cmd_Indx); - writeb(0, &dp2_ptr->io.event); - retries = INIT_RETRIES; - gdth_delay(20); - while (readb(&dp2_ptr->u.ic.S_Status) != 0xfe) { - if (--retries == 0) { - printk("GDT-ISA: Initialization error\n"); - iounmap(ha->brd); - return 0; - } - gdth_delay(1); - } - writeb(0, &dp2_ptr->u.ic.Status); - writeb(0xff, &dp2_ptr->io.irqdel); - - ha->dma64_support = 0; - return 1; -} -#endif /* CONFIG_ISA */ - -#ifdef CONFIG_PCI static int gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr, gdth_ha_str *ha) { @@ -1228,30 +890,19 @@ static int gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr, return 1; } -#endif /* CONFIG_PCI */ /* controller protocol functions */ static void gdth_enable_int(gdth_ha_str *ha) { unsigned long flags; - gdt2_dpram_str __iomem *dp2_ptr; gdt6_dpram_str __iomem *dp6_ptr; gdt6m_dpram_str __iomem *dp6m_ptr; TRACE(("gdth_enable_int() hanum %d\n",ha->hanum)); spin_lock_irqsave(&ha->smp_lock, flags); - if (ha->type == GDT_EISA) { - outb(0xff, ha->bmic + EDOORREG); - outb(0xff, ha->bmic + EDENABREG); - outb(0x01, ha->bmic + EINTENABREG); - } else if (ha->type == GDT_ISA) { - dp2_ptr = ha->brd; - writeb(1, &dp2_ptr->io.irqdel); - writeb(0, &dp2_ptr->u.ic.Cmd_Index); - writeb(1, &dp2_ptr->io.irqen); - } else if (ha->type == GDT_PCI) { + if (ha->type == GDT_PCI) { dp6_ptr = ha->brd; writeb(1, &dp6_ptr->io.irqdel); writeb(0, &dp6_ptr->u.ic.Cmd_Index); @@ -1275,12 +926,7 @@ static u8 gdth_get_status(gdth_ha_str *ha) TRACE(("gdth_get_status() irq %d ctr_count %d\n", ha->irq, gdth_ctr_count)); - if (ha->type == GDT_EISA) - IStatus = inb((u16)ha->bmic + EDOORREG); - else if (ha->type == GDT_ISA) - IStatus = - readb(&((gdt2_dpram_str __iomem *)ha->brd)->u.ic.Cmd_Index); - else if (ha->type == GDT_PCI) + if (ha->type == GDT_PCI) IStatus = readb(&((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Cmd_Index); else if (ha->type == GDT_PCINEW) @@ -1298,11 +944,7 @@ static int gdth_test_busy(gdth_ha_str *ha) TRACE(("gdth_test_busy() hanum %d\n", ha->hanum)); - if (ha->type == GDT_EISA) - gdtsema0 = (int)inb(ha->bmic + SEMA0REG); - else if (ha->type == GDT_ISA) - gdtsema0 = (int)readb(&((gdt2_dpram_str __iomem *)ha->brd)->u.ic.Sema0); - else if (ha->type == GDT_PCI) + if (ha->type == GDT_PCI) gdtsema0 = (int)readb(&((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Sema0); else if (ha->type == GDT_PCINEW) gdtsema0 = (int)inb(PTR2USHORT(&ha->plx->sema0_reg)); @@ -1336,11 +978,7 @@ static void gdth_set_sema0(gdth_ha_str *ha) { TRACE(("gdth_set_sema0() hanum %d\n", ha->hanum)); - if (ha->type == GDT_EISA) { - outb(1, ha->bmic + SEMA0REG); - } else if (ha->type == GDT_ISA) { - writeb(1, &((gdt2_dpram_str __iomem *)ha->brd)->u.ic.Sema0); - } else if (ha->type == GDT_PCI) { + if (ha->type == GDT_PCI) { writeb(1, &((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Sema0); } else if (ha->type == GDT_PCINEW) { outb(1, PTR2USHORT(&ha->plx->sema0_reg)); @@ -1356,7 +994,6 @@ static void gdth_copy_command(gdth_ha_str *ha) register gdt6m_dpram_str __iomem *dp6m_ptr; register gdt6c_dpram_str __iomem *dp6c_ptr; gdt6_dpram_str __iomem *dp6_ptr; - gdt2_dpram_str __iomem *dp2_ptr; u16 cp_count,dp_offset,cmd_no; TRACE(("gdth_copy_command() hanum %d\n", ha->hanum)); @@ -1367,8 +1004,6 @@ static void gdth_copy_command(gdth_ha_str *ha) cmd_ptr = ha->pccb; ++ha->cmd_cnt; - if (ha->type == GDT_EISA) - return; /* no DPMEM, no copy */ /* set cpcount dword aligned */ if (cp_count & 3) @@ -1377,14 +1012,7 @@ static void gdth_copy_command(gdth_ha_str *ha) ha->cmd_offs_dpmem += cp_count; /* set offset and service, copy command to DPMEM */ - if (ha->type == GDT_ISA) { - dp2_ptr = ha->brd; - writew(dp_offset + DPMEM_COMMAND_OFFSET, - &dp2_ptr->u.ic.comm_queue[cmd_no].offset); - writew((u16)cmd_ptr->Service, - &dp2_ptr->u.ic.comm_queue[cmd_no].serv_id); - memcpy_toio(&dp2_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count); - } else if (ha->type == GDT_PCI) { + if (ha->type == GDT_PCI) { dp6_ptr = ha->brd; writew(dp_offset + DPMEM_COMMAND_OFFSET, &dp6_ptr->u.ic.comm_queue[cmd_no].offset); @@ -1430,13 +1058,7 @@ static void gdth_release_event(gdth_ha_str *ha) if (ha->pccb->OpCode == GDT_INIT) ha->pccb->Service |= 0x80; - if (ha->type == GDT_EISA) { - if (ha->pccb->OpCode == GDT_INIT) /* store DMA buffer */ - outl(ha->ccb_phys, ha->bmic + MAILBOXREG); - outb(ha->pccb->Service, ha->bmic + LDOORREG); - } else if (ha->type == GDT_ISA) { - writeb(0, &((gdt2_dpram_str __iomem *)ha->brd)->io.event); - } else if (ha->type == GDT_PCI) { + if (ha->type == GDT_PCI) { writeb(0, &((gdt6_dpram_str __iomem *)ha->brd)->io.event); } else if (ha->type == GDT_PCINEW) { outb(1, PTR2USHORT(&ha->plx->ldoor_reg)); @@ -1560,15 +1182,7 @@ static int gdth_search_drives(gdth_ha_str *ha) gdth_arcdl_str *alst; gdth_alist_str *alst2; gdth_oem_str_ioctl *oemstr; -#ifdef INT_COAL - gdth_perf_modes *pmod; -#endif -#ifdef GDTH_RTC - u8 rtc[12]; - unsigned long flags; -#endif - TRACE(("gdth_search_drives() hanum %d\n", ha->hanum)); ok = 0; @@ -1588,29 +1202,6 @@ static int gdth_search_drives(gdth_ha_str *ha) } TRACE2(("gdth_search_drives(): SCREENSERVICE initialized\n")); -#ifdef GDTH_RTC - /* read realtime clock info, send to controller */ - /* 1. wait for the falling edge of update flag */ - spin_lock_irqsave(&rtc_lock, flags); - for (j = 0; j < 1000000; ++j) - if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) - break; - for (j = 0; j < 1000000; ++j) - if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)) - break; - /* 2. read info */ - do { - for (j = 0; j < 12; ++j) - rtc[j] = CMOS_READ(j); - } while (rtc[0] != CMOS_READ(0)); - spin_unlock_irqrestore(&rtc_lock, flags); - TRACE2(("gdth_search_drives(): RTC: %x/%x/%x\n",*(u32 *)&rtc[0], - *(u32 *)&rtc[4], *(u32 *)&rtc[8])); - /* 3. send to controller firmware */ - gdth_internal_cmd(ha, SCREENSERVICE, GDT_REALTIME, *(u32 *)&rtc[0], - *(u32 *)&rtc[4], *(u32 *)&rtc[8]); -#endif - /* unfreeze all IOs */ gdth_internal_cmd(ha, CACHESERVICE, GDT_UNFREEZE_IO, 0, 0, 0); @@ -1633,35 +1224,6 @@ static int gdth_search_drives(gdth_ha_str *ha) cdev_cnt = (u16)ha->info; ha->fw_vers = ha->service; -#ifdef INT_COAL - if (ha->type == GDT_PCIMPR) { - /* set perf. modes */ - pmod = (gdth_perf_modes *)ha->pscratch; - pmod->version = 1; - pmod->st_mode = 1; /* enable one status buffer */ - *((u64 *)&pmod->st_buff_addr1) = ha->coal_stat_phys; - pmod->st_buff_indx1 = COALINDEX; - pmod->st_buff_addr2 = 0; - pmod->st_buff_u_addr2 = 0; - pmod->st_buff_indx2 = 0; - pmod->st_buff_size = sizeof(gdth_coal_status) * MAXOFFSETS; - pmod->cmd_mode = 0; // disable all cmd buffers - pmod->cmd_buff_addr1 = 0; - pmod->cmd_buff_u_addr1 = 0; - pmod->cmd_buff_indx1 = 0; - pmod->cmd_buff_addr2 = 0; - pmod->cmd_buff_u_addr2 = 0; - pmod->cmd_buff_indx2 = 0; - pmod->cmd_buff_size = 0; - pmod->reserved1 = 0; - pmod->reserved2 = 0; - if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, SET_PERF_MODES, - INVALID_CHANNEL,sizeof(gdth_perf_modes))) { - printk("GDT-HA %d: Interrupt coalescing activated\n", ha->hanum); - } - } -#endif - /* detect number of buses - try new IOCTL */ iocr = (gdth_raw_iochan_str *)ha->pscratch; iocr->hdr.version = 0xffffffff; @@ -2433,9 +1995,6 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, TRACE(("gdth_fill_cache_cmd() cmd 0x%x cmdsize %d hdrive %d\n", scp->cmnd[0],scp->cmd_len,hdrive)); - if (ha->type==GDT_EISA && ha->cmd_cnt>0) - return 0; - mode64 = (ha->cache_feat & GDT_64BIT) ? TRUE : FALSE; /* test for READ_16, WRITE_16 if !mode64 ? --- not required, should not occur due to error return on @@ -2518,9 +2077,9 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, if (scsi_bufflen(scp)) { cmndinfo->dma_dir = (read_write == 1 ? - PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); - sgcnt = pci_map_sg(ha->pdev, scsi_sglist(scp), scsi_sg_count(scp), - cmndinfo->dma_dir); + DMA_TO_DEVICE : DMA_FROM_DEVICE); + sgcnt = dma_map_sg(&ha->pdev->dev, scsi_sglist(scp), + scsi_sg_count(scp), cmndinfo->dma_dir); if (mode64) { struct scatterlist *sl; @@ -2528,12 +2087,6 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, cmdp->u.cache64.sg_canz = sgcnt; scsi_for_each_sg(scp, sl, sgcnt, i) { cmdp->u.cache64.sg_lst[i].sg_ptr = sg_dma_address(sl); -#ifdef GDTH_DMA_STATISTICS - if (cmdp->u.cache64.sg_lst[i].sg_ptr > (u64)0xffffffff) - ha->dma64_cnt++; - else - ha->dma32_cnt++; -#endif cmdp->u.cache64.sg_lst[i].sg_len = sg_dma_len(sl); } } else { @@ -2543,9 +2096,6 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, cmdp->u.cache.sg_canz = sgcnt; scsi_for_each_sg(scp, sl, sgcnt, i) { cmdp->u.cache.sg_lst[i].sg_ptr = sg_dma_address(sl); -#ifdef GDTH_DMA_STATISTICS - ha->dma32_cnt++; -#endif cmdp->u.cache.sg_lst[i].sg_len = sg_dma_len(sl); } } @@ -2603,8 +2153,6 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 b) dma_addr_t sense_paddr; int cmd_index, sgcnt, mode64; u8 t,l; - struct page *page; - unsigned long offset; struct gdth_cmndinfo *cmndinfo; t = scp->device->id; @@ -2613,9 +2161,6 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 b) TRACE(("gdth_fill_raw_cmd() cmd 0x%x bus %d ID %d LUN %d\n", scp->cmnd[0],b,t,l)); - if (ha->type==GDT_EISA && ha->cmd_cnt>0) - return 0; - mode64 = (ha->raw_feat & GDT_64BIT) ? TRUE : FALSE; cmdp->Service = SCSIRAWSERVICE; @@ -2649,10 +2194,8 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 b) } } else { - page = virt_to_page(scp->sense_buffer); - offset = (unsigned long)scp->sense_buffer & ~PAGE_MASK; - sense_paddr = pci_map_page(ha->pdev,page,offset, - 16,PCI_DMA_FROMDEVICE); + sense_paddr = dma_map_single(&ha->pdev->dev, scp->sense_buffer, 16, + DMA_FROM_DEVICE); cmndinfo->sense_paddr = sense_paddr; cmdp->OpCode = GDT_WRITE; /* always */ @@ -2693,9 +2236,9 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 b) } if (scsi_bufflen(scp)) { - cmndinfo->dma_dir = PCI_DMA_BIDIRECTIONAL; - sgcnt = pci_map_sg(ha->pdev, scsi_sglist(scp), scsi_sg_count(scp), - cmndinfo->dma_dir); + cmndinfo->dma_dir = DMA_BIDIRECTIONAL; + sgcnt = dma_map_sg(&ha->pdev->dev, scsi_sglist(scp), + scsi_sg_count(scp), cmndinfo->dma_dir); if (mode64) { struct scatterlist *sl; @@ -2703,12 +2246,6 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 b) cmdp->u.raw64.sg_ranz = sgcnt; scsi_for_each_sg(scp, sl, sgcnt, i) { cmdp->u.raw64.sg_lst[i].sg_ptr = sg_dma_address(sl); -#ifdef GDTH_DMA_STATISTICS - if (cmdp->u.raw64.sg_lst[i].sg_ptr > (u64)0xffffffff) - ha->dma64_cnt++; - else - ha->dma32_cnt++; -#endif cmdp->u.raw64.sg_lst[i].sg_len = sg_dma_len(sl); } } else { @@ -2718,9 +2255,6 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 b) cmdp->u.raw.sg_ranz = sgcnt; scsi_for_each_sg(scp, sl, sgcnt, i) { cmdp->u.raw.sg_lst[i].sg_ptr = sg_dma_address(sl); -#ifdef GDTH_DMA_STATISTICS - ha->dma32_cnt++; -#endif cmdp->u.raw.sg_lst[i].sg_len = sg_dma_len(sl); } } @@ -2778,9 +2312,6 @@ static int gdth_special_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp) cmdp= ha->pccb; TRACE2(("gdth_special_cmd(): ")); - if (ha->type==GDT_EISA && ha->cmd_cnt>0) - return 0; - *cmdp = *cmndinfo->internal_cmd_str; cmdp->RequestBuffer = scp; @@ -2959,18 +2490,11 @@ static irqreturn_t __gdth_interrupt(gdth_ha_str *ha, { gdt6m_dpram_str __iomem *dp6m_ptr = NULL; gdt6_dpram_str __iomem *dp6_ptr; - gdt2_dpram_str __iomem *dp2_ptr; struct scsi_cmnd *scp; int rval, i; u8 IStatus; u16 Service; unsigned long flags = 0; -#ifdef INT_COAL - int coalesced = FALSE; - int next = FALSE; - gdth_coal_status *pcs = NULL; - int act_int_coal = 0; -#endif TRACE(("gdth_interrupt() IRQ %d\n", ha->irq)); @@ -2997,53 +2521,7 @@ static irqreturn_t __gdth_interrupt(gdth_ha_str *ha, ++act_ints; #endif -#ifdef INT_COAL - /* See if the fw is returning coalesced status */ - if (IStatus == COALINDEX) { - /* Coalesced status. Setup the initial status - buffer pointer and flags */ - pcs = ha->coal_stat; - coalesced = TRUE; - next = TRUE; - } - - do { - if (coalesced) { - /* For coalesced requests all status - information is found in the status buffer */ - IStatus = (u8)(pcs->status & 0xff); - } -#endif - - if (ha->type == GDT_EISA) { - if (IStatus & 0x80) { /* error flag */ - IStatus &= ~0x80; - ha->status = inw(ha->bmic + MAILBOXREG+8); - TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status)); - } else /* no error */ - ha->status = S_OK; - ha->info = inl(ha->bmic + MAILBOXREG+12); - ha->service = inw(ha->bmic + MAILBOXREG+10); - ha->info2 = inl(ha->bmic + MAILBOXREG+4); - - outb(0xff, ha->bmic + EDOORREG); /* acknowledge interrupt */ - outb(0x00, ha->bmic + SEMA1REG); /* reset status semaphore */ - } else if (ha->type == GDT_ISA) { - dp2_ptr = ha->brd; - if (IStatus & 0x80) { /* error flag */ - IStatus &= ~0x80; - ha->status = readw(&dp2_ptr->u.ic.Status); - TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status)); - } else /* no error */ - ha->status = S_OK; - ha->info = readl(&dp2_ptr->u.ic.Info[0]); - ha->service = readw(&dp2_ptr->u.ic.Service); - ha->info2 = readl(&dp2_ptr->u.ic.Info[1]); - - writeb(0xff, &dp2_ptr->io.irqdel); /* acknowledge interrupt */ - writeb(0, &dp2_ptr->u.ic.Cmd_Index);/* reset command index */ - writeb(0, &dp2_ptr->io.Sema1); /* reset status semaphore */ - } else if (ha->type == GDT_PCI) { + if (ha->type == GDT_PCI) { dp6_ptr = ha->brd; if (IStatus & 0x80) { /* error flag */ IStatus &= ~0x80; @@ -3075,28 +2553,15 @@ static irqreturn_t __gdth_interrupt(gdth_ha_str *ha, dp6m_ptr = ha->brd; if (IStatus & 0x80) { /* error flag */ IStatus &= ~0x80; -#ifdef INT_COAL - if (coalesced) - ha->status = pcs->ext_status & 0xffff; - else -#endif - ha->status = readw(&dp6m_ptr->i960r.status); + ha->status = readw(&dp6m_ptr->i960r.status); TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status)); } else /* no error */ ha->status = S_OK; -#ifdef INT_COAL - /* get information */ - if (coalesced) { - ha->info = pcs->info0; - ha->info2 = pcs->info1; - ha->service = (pcs->ext_status >> 16) & 0xffff; - } else -#endif - { - ha->info = readl(&dp6m_ptr->i960r.info[0]); - ha->service = readw(&dp6m_ptr->i960r.service); - ha->info2 = readl(&dp6m_ptr->i960r.info[1]); - } + + ha->info = readl(&dp6m_ptr->i960r.info[0]); + ha->service = readw(&dp6m_ptr->i960r.service); + ha->info2 = readl(&dp6m_ptr->i960r.info[1]); + /* event string */ if (IStatus == ASYNCINDEX) { if (ha->service != SCREENSERVICE && @@ -3111,15 +2576,8 @@ static irqreturn_t __gdth_interrupt(gdth_ha_str *ha, } } } -#ifdef INT_COAL - /* Make sure that non coalesced interrupts get cleared - before being handled by gdth_async_event/gdth_sync_event */ - if (!coalesced) -#endif - { - writeb(0xff, &dp6m_ptr->i960r.edoor_reg); - writeb(0, &dp6m_ptr->i960r.sema1_reg); - } + writeb(0xff, &dp6m_ptr->i960r.edoor_reg); + writeb(0, &dp6m_ptr->i960r.sema1_reg); } else { TRACE2(("gdth_interrupt() unknown controller type\n")); if (!gdth_polling) @@ -3182,31 +2640,6 @@ static irqreturn_t __gdth_interrupt(gdth_ha_str *ha, gdth_scsi_done(scp); } -#ifdef INT_COAL - if (coalesced) { - /* go to the next status in the status buffer */ - ++pcs; -#ifdef GDTH_STATISTICS - ++act_int_coal; - if (act_int_coal > max_int_coal) { - max_int_coal = act_int_coal; - printk("GDT: max_int_coal = %d\n",(u16)max_int_coal); - } -#endif - /* see if there is another status */ - if (pcs->status == 0) - /* Stop the coalesce loop */ - next = FALSE; - } - } while (next); - - /* coalescing only for new GDT_PCIMPR controllers available */ - if (ha->type == GDT_PCIMPR && coalesced) { - writeb(0xff, &dp6m_ptr->i960r.edoor_reg); - writeb(0, &dp6m_ptr->i960r.sema1_reg); - } -#endif - gdth_next(ha); return IRQ_HANDLED; } @@ -3313,12 +2746,12 @@ static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index, return 2; } if (scsi_bufflen(scp)) - pci_unmap_sg(ha->pdev, scsi_sglist(scp), scsi_sg_count(scp), + dma_unmap_sg(&ha->pdev->dev, scsi_sglist(scp), scsi_sg_count(scp), cmndinfo->dma_dir); if (cmndinfo->sense_paddr) - pci_unmap_page(ha->pdev, cmndinfo->sense_paddr, 16, - PCI_DMA_FROMDEVICE); + dma_unmap_page(&ha->pdev->dev, cmndinfo->sense_paddr, 16, + DMA_FROM_DEVICE); if (ha->status == S_OK) { cmndinfo->status = S_OK; @@ -3610,12 +3043,7 @@ static int gdth_async_event(gdth_ha_str *ha) + sizeof(u64); ha->cmd_cnt = 0; gdth_copy_command(ha); - if (ha->type == GDT_EISA) - printk("[EISA slot %d] ",(u16)ha->brd_phys); - else if (ha->type == GDT_ISA) - printk("[DPMEM 0x%4X] ",(u16)ha->brd_phys); - else - printk("[PCI %d/%d] ",(u16)(ha->brd_phys>>8), + printk("[PCI %d/%d] ",(u16)(ha->brd_phys>>8), (u16)((ha->brd_phys>>3)&0x1f)); gdth_release_event(ha); } @@ -3756,23 +3184,12 @@ static inline void gdth_timer_init(void) static void __init internal_setup(char *str,int *ints) { - int i, argc; + int i; char *cur_str, *argv; TRACE2(("internal_setup() str %s ints[0] %d\n", str ? str:"NULL", ints ? ints[0]:0)); - /* read irq[] from ints[] */ - if (ints) { - argc = ints[0]; - if (argc > 0) { - if (argc > MAXHA) - argc = MAXHA; - for (i = 0; i < argc; ++i) - irq[i] = ints[i+1]; - } - } - /* analyse string */ argv = str; while (argv && (cur_str = strchr(argv, ':'))) { @@ -3799,8 +3216,6 @@ static void __init internal_setup(char *str,int *ints) rescan = val; else if (!strncmp(argv, "shared_access:", 14)) shared_access = val; - else if (!strncmp(argv, "probe_eisa_isa:", 15)) - probe_eisa_isa = val; else if (!strncmp(argv, "reserve_list:", 13)) { reserve_list[0] = val; for (i = 1; i < MAX_RES_ARGS; i++) { @@ -3847,18 +3262,7 @@ static const char *gdth_ctr_name(gdth_ha_str *ha) { TRACE2(("gdth_ctr_name()\n")); - if (ha->type == GDT_EISA) { - switch (ha->stype) { - case GDT3_ID: - return("GDT3000/3020"); - case GDT3A_ID: - return("GDT3000A/3020A/3050A"); - case GDT3B_ID: - return("GDT3000B/3010A"); - } - } else if (ha->type == GDT_ISA) { - return("GDT2000/2020"); - } else if (ha->type == GDT_PCI) { + if (ha->type == GDT_PCI) { switch (ha->pdev->device) { case PCI_DEVICE_ID_VORTEX_GDT60x0: return("GDT6000/6020/6050"); @@ -4155,131 +3559,147 @@ static int ioc_resetdrv(void __user *arg, char *cmnd) return 0; } -static int ioc_general(void __user *arg, char *cmnd) +static void gdth_ioc_cacheservice(gdth_ha_str *ha, gdth_ioctl_general *gen, + u64 paddr) { - gdth_ioctl_general gen; - char *buf = NULL; - u64 paddr; - gdth_ha_str *ha; - int rval; - - if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general))) - return -EFAULT; - ha = gdth_find_ha(gen.ionode); - if (!ha) - return -EFAULT; - - if (gen.data_len > INT_MAX) - return -EINVAL; - if (gen.sense_len > INT_MAX) - return -EINVAL; - if (gen.data_len + gen.sense_len > INT_MAX) - return -EINVAL; - - if (gen.data_len + gen.sense_len != 0) { - if (!(buf = gdth_ioctl_alloc(ha, gen.data_len + gen.sense_len, - FALSE, &paddr))) - return -EFAULT; - if (copy_from_user(buf, arg + sizeof(gdth_ioctl_general), - gen.data_len + gen.sense_len)) { - gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr); - return -EFAULT; - } + if (ha->cache_feat & GDT_64BIT) { + /* copy elements from 32-bit IOCTL structure */ + gen->command.u.cache64.BlockCnt = gen->command.u.cache.BlockCnt; + gen->command.u.cache64.BlockNo = gen->command.u.cache.BlockNo; + gen->command.u.cache64.DeviceNo = gen->command.u.cache.DeviceNo; + + if (ha->cache_feat & SCATTER_GATHER) { + gen->command.u.cache64.DestAddr = (u64)-1; + gen->command.u.cache64.sg_canz = 1; + gen->command.u.cache64.sg_lst[0].sg_ptr = paddr; + gen->command.u.cache64.sg_lst[0].sg_len = gen->data_len; + gen->command.u.cache64.sg_lst[1].sg_len = 0; + } else { + gen->command.u.cache64.DestAddr = paddr; + gen->command.u.cache64.sg_canz = 0; + } + } else { + if (ha->cache_feat & SCATTER_GATHER) { + gen->command.u.cache.DestAddr = 0xffffffff; + gen->command.u.cache.sg_canz = 1; + gen->command.u.cache.sg_lst[0].sg_ptr = (u32)paddr; + gen->command.u.cache.sg_lst[0].sg_len = gen->data_len; + gen->command.u.cache.sg_lst[1].sg_len = 0; + } else { + gen->command.u.cache.DestAddr = paddr; + gen->command.u.cache.sg_canz = 0; + } + } +} - if (gen.command.OpCode == GDT_IOCTL) { - gen.command.u.ioctl.p_param = paddr; - } else if (gen.command.Service == CACHESERVICE) { - if (ha->cache_feat & GDT_64BIT) { - /* copy elements from 32-bit IOCTL structure */ - gen.command.u.cache64.BlockCnt = gen.command.u.cache.BlockCnt; - gen.command.u.cache64.BlockNo = gen.command.u.cache.BlockNo; - gen.command.u.cache64.DeviceNo = gen.command.u.cache.DeviceNo; - /* addresses */ - if (ha->cache_feat & SCATTER_GATHER) { - gen.command.u.cache64.DestAddr = (u64)-1; - gen.command.u.cache64.sg_canz = 1; - gen.command.u.cache64.sg_lst[0].sg_ptr = paddr; - gen.command.u.cache64.sg_lst[0].sg_len = gen.data_len; - gen.command.u.cache64.sg_lst[1].sg_len = 0; - } else { - gen.command.u.cache64.DestAddr = paddr; - gen.command.u.cache64.sg_canz = 0; - } - } else { - if (ha->cache_feat & SCATTER_GATHER) { - gen.command.u.cache.DestAddr = 0xffffffff; - gen.command.u.cache.sg_canz = 1; - gen.command.u.cache.sg_lst[0].sg_ptr = (u32)paddr; - gen.command.u.cache.sg_lst[0].sg_len = gen.data_len; - gen.command.u.cache.sg_lst[1].sg_len = 0; - } else { - gen.command.u.cache.DestAddr = paddr; - gen.command.u.cache.sg_canz = 0; - } - } - } else if (gen.command.Service == SCSIRAWSERVICE) { - if (ha->raw_feat & GDT_64BIT) { - /* copy elements from 32-bit IOCTL structure */ - char cmd[16]; - gen.command.u.raw64.sense_len = gen.command.u.raw.sense_len; - gen.command.u.raw64.bus = gen.command.u.raw.bus; - gen.command.u.raw64.lun = gen.command.u.raw.lun; - gen.command.u.raw64.target = gen.command.u.raw.target; - memcpy(cmd, gen.command.u.raw.cmd, 16); - memcpy(gen.command.u.raw64.cmd, cmd, 16); - gen.command.u.raw64.clen = gen.command.u.raw.clen; - gen.command.u.raw64.sdlen = gen.command.u.raw.sdlen; - gen.command.u.raw64.direction = gen.command.u.raw.direction; - /* addresses */ - if (ha->raw_feat & SCATTER_GATHER) { - gen.command.u.raw64.sdata = (u64)-1; - gen.command.u.raw64.sg_ranz = 1; - gen.command.u.raw64.sg_lst[0].sg_ptr = paddr; - gen.command.u.raw64.sg_lst[0].sg_len = gen.data_len; - gen.command.u.raw64.sg_lst[1].sg_len = 0; - } else { - gen.command.u.raw64.sdata = paddr; - gen.command.u.raw64.sg_ranz = 0; +static void gdth_ioc_scsiraw(gdth_ha_str *ha, gdth_ioctl_general *gen, + u64 paddr) +{ + if (ha->raw_feat & GDT_64BIT) { + /* copy elements from 32-bit IOCTL structure */ + char cmd[16]; + + gen->command.u.raw64.sense_len = gen->command.u.raw.sense_len; + gen->command.u.raw64.bus = gen->command.u.raw.bus; + gen->command.u.raw64.lun = gen->command.u.raw.lun; + gen->command.u.raw64.target = gen->command.u.raw.target; + memcpy(cmd, gen->command.u.raw.cmd, 16); + memcpy(gen->command.u.raw64.cmd, cmd, 16); + gen->command.u.raw64.clen = gen->command.u.raw.clen; + gen->command.u.raw64.sdlen = gen->command.u.raw.sdlen; + gen->command.u.raw64.direction = gen->command.u.raw.direction; + + /* addresses */ + if (ha->raw_feat & SCATTER_GATHER) { + gen->command.u.raw64.sdata = (u64)-1; + gen->command.u.raw64.sg_ranz = 1; + gen->command.u.raw64.sg_lst[0].sg_ptr = paddr; + gen->command.u.raw64.sg_lst[0].sg_len = gen->data_len; + gen->command.u.raw64.sg_lst[1].sg_len = 0; + } else { + gen->command.u.raw64.sdata = paddr; + gen->command.u.raw64.sg_ranz = 0; } - gen.command.u.raw64.sense_data = paddr + gen.data_len; - } else { - if (ha->raw_feat & SCATTER_GATHER) { - gen.command.u.raw.sdata = 0xffffffff; - gen.command.u.raw.sg_ranz = 1; - gen.command.u.raw.sg_lst[0].sg_ptr = (u32)paddr; - gen.command.u.raw.sg_lst[0].sg_len = gen.data_len; - gen.command.u.raw.sg_lst[1].sg_len = 0; - } else { - gen.command.u.raw.sdata = paddr; - gen.command.u.raw.sg_ranz = 0; + + gen->command.u.raw64.sense_data = paddr + gen->data_len; + } else { + if (ha->raw_feat & SCATTER_GATHER) { + gen->command.u.raw.sdata = 0xffffffff; + gen->command.u.raw.sg_ranz = 1; + gen->command.u.raw.sg_lst[0].sg_ptr = (u32)paddr; + gen->command.u.raw.sg_lst[0].sg_len = gen->data_len; + gen->command.u.raw.sg_lst[1].sg_len = 0; + } else { + gen->command.u.raw.sdata = paddr; + gen->command.u.raw.sg_ranz = 0; } - gen.command.u.raw.sense_data = (u32)paddr + gen.data_len; - } - } else { - gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr); - return -EFAULT; - } - } - rval = __gdth_execute(ha->sdev, &gen.command, cmnd, gen.timeout, &gen.info); - if (rval < 0) { - gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr); - return rval; - } - gen.status = rval; + gen->command.u.raw.sense_data = (u32)paddr + gen->data_len; + } +} - if (copy_to_user(arg + sizeof(gdth_ioctl_general), buf, - gen.data_len + gen.sense_len)) { - gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr); - return -EFAULT; - } - if (copy_to_user(arg, &gen, - sizeof(gdth_ioctl_general) - sizeof(gdth_cmd_str))) { - gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr); - return -EFAULT; - } - gdth_ioctl_free(ha, gen.data_len+gen.sense_len, buf, paddr); - return 0; +static int ioc_general(void __user *arg, char *cmnd) +{ + gdth_ioctl_general gen; + gdth_ha_str *ha; + char *buf = NULL; + dma_addr_t paddr; + int rval; + + if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general))) + return -EFAULT; + ha = gdth_find_ha(gen.ionode); + if (!ha) + return -EFAULT; + + if (gen.data_len > INT_MAX) + return -EINVAL; + if (gen.sense_len > INT_MAX) + return -EINVAL; + if (gen.data_len + gen.sense_len > INT_MAX) + return -EINVAL; + + if (gen.data_len + gen.sense_len > 0) { + buf = dma_alloc_coherent(&ha->pdev->dev, + gen.data_len + gen.sense_len, &paddr, + GFP_KERNEL); + if (!buf) + return -EFAULT; + + rval = -EFAULT; + if (copy_from_user(buf, arg + sizeof(gdth_ioctl_general), + gen.data_len + gen.sense_len)) + goto out_free_buf; + + if (gen.command.OpCode == GDT_IOCTL) + gen.command.u.ioctl.p_param = paddr; + else if (gen.command.Service == CACHESERVICE) + gdth_ioc_cacheservice(ha, &gen, paddr); + else if (gen.command.Service == SCSIRAWSERVICE) + gdth_ioc_scsiraw(ha, &gen, paddr); + else + goto out_free_buf; + } + + rval = __gdth_execute(ha->sdev, &gen.command, cmnd, gen.timeout, + &gen.info); + if (rval < 0) + goto out_free_buf; + gen.status = rval; + + rval = -EFAULT; + if (copy_to_user(arg + sizeof(gdth_ioctl_general), buf, + gen.data_len + gen.sense_len)) + goto out_free_buf; + if (copy_to_user(arg, &gen, + sizeof(gdth_ioctl_general) - sizeof(gdth_cmd_str))) + goto out_free_buf; + + rval = 0; +out_free_buf: + dma_free_coherent(&ha->pdev->dev, gen.data_len + gen.sense_len, buf, + paddr); + return rval; } static int ioc_hdrlist(void __user *arg, char *cmnd) @@ -4514,22 +3934,17 @@ static int gdth_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) (NULL == (ha = gdth_find_ha(ctrt.ionode)))) return -EFAULT; - if (ha->type == GDT_ISA || ha->type == GDT_EISA) { - ctrt.type = (u8)((ha->stype>>20) - 0x10); + if (ha->type != GDT_PCIMPR) { + ctrt.type = (u8)((ha->stype<<4) + 6); } else { - if (ha->type != GDT_PCIMPR) { - ctrt.type = (u8)((ha->stype<<4) + 6); - } else { - ctrt.type = - (ha->oem_id == OEM_ID_INTEL ? 0xfd : 0xfe); - if (ha->stype >= 0x300) - ctrt.ext_type = 0x6000 | ha->pdev->subsystem_device; - else - ctrt.ext_type = 0x6000 | ha->stype; - } - ctrt.device_id = ha->pdev->device; - ctrt.sub_device_id = ha->pdev->subsystem_device; + ctrt.type = (ha->oem_id == OEM_ID_INTEL ? 0xfd : 0xfe); + if (ha->stype >= 0x300) + ctrt.ext_type = 0x6000 | ha->pdev->subsystem_device; + else + ctrt.ext_type = 0x6000 | ha->stype; } + ctrt.device_id = ha->pdev->device; + ctrt.sub_device_id = ha->pdev->subsystem_device; ctrt.info = ha->brd_phys; ctrt.oem_id = ha->oem_id; if (copy_to_user(argp, &ctrt, sizeof(gdth_ioctl_ctrtype))) @@ -4683,272 +4098,6 @@ static struct scsi_host_template gdth_template = { .no_write_same = 1, }; -#ifdef CONFIG_ISA -static int __init gdth_isa_probe_one(u32 isa_bios) -{ - struct Scsi_Host *shp; - gdth_ha_str *ha; - dma_addr_t scratch_dma_handle = 0; - int error, i; - - if (!gdth_search_isa(isa_bios)) - return -ENXIO; - - shp = scsi_host_alloc(&gdth_template, sizeof(gdth_ha_str)); - if (!shp) - return -ENOMEM; - ha = shost_priv(shp); - - error = -ENODEV; - if (!gdth_init_isa(isa_bios,ha)) - goto out_host_put; - - /* controller found and initialized */ - printk("Configuring GDT-ISA HA at BIOS 0x%05X IRQ %u DRQ %u\n", - isa_bios, ha->irq, ha->drq); - - error = request_irq(ha->irq, gdth_interrupt, 0, "gdth", ha); - if (error) { - printk("GDT-ISA: Unable to allocate IRQ\n"); - goto out_host_put; - } - - error = request_dma(ha->drq, "gdth"); - if (error) { - printk("GDT-ISA: Unable to allocate DMA channel\n"); - goto out_free_irq; - } - - set_dma_mode(ha->drq,DMA_MODE_CASCADE); - enable_dma(ha->drq); - shp->unchecked_isa_dma = 1; - shp->irq = ha->irq; - shp->dma_channel = ha->drq; - - ha->hanum = gdth_ctr_count++; - ha->shost = shp; - - ha->pccb = &ha->cmdext; - ha->ccb_phys = 0L; - ha->pdev = NULL; - - error = -ENOMEM; - - ha->pscratch = pci_alloc_consistent(ha->pdev, GDTH_SCRATCH, - &scratch_dma_handle); - if (!ha->pscratch) - goto out_dec_counters; - ha->scratch_phys = scratch_dma_handle; - - ha->pmsg = pci_alloc_consistent(ha->pdev, sizeof(gdth_msg_str), - &scratch_dma_handle); - if (!ha->pmsg) - goto out_free_pscratch; - ha->msg_phys = scratch_dma_handle; - -#ifdef INT_COAL - ha->coal_stat = pci_alloc_consistent(ha->pdev, - sizeof(gdth_coal_status) * MAXOFFSETS, - &scratch_dma_handle); - if (!ha->coal_stat) - goto out_free_pmsg; - ha->coal_stat_phys = scratch_dma_handle; -#endif - - ha->scratch_busy = FALSE; - ha->req_first = NULL; - ha->tid_cnt = MAX_HDRIVES; - if (max_ids > 0 && max_ids < ha->tid_cnt) - ha->tid_cnt = max_ids; - for (i = 0; i < GDTH_MAXCMDS; ++i) - ha->cmd_tab[i].cmnd = UNUSED_CMND; - ha->scan_mode = rescan ? 0x10 : 0; - - error = -ENODEV; - if (!gdth_search_drives(ha)) { - printk("GDT-ISA: Error during device scan\n"); - goto out_free_coal_stat; - } - - if (hdr_channel < 0 || hdr_channel > ha->bus_cnt) - hdr_channel = ha->bus_cnt; - ha->virt_bus = hdr_channel; - - if (ha->cache_feat & ha->raw_feat & ha->screen_feat & GDT_64BIT) - shp->max_cmd_len = 16; - - shp->max_id = ha->tid_cnt; - shp->max_lun = MAXLUN; - shp->max_channel = ha->bus_cnt; - - spin_lock_init(&ha->smp_lock); - gdth_enable_int(ha); - - error = scsi_add_host(shp, NULL); - if (error) - goto out_free_coal_stat; - list_add_tail(&ha->list, &gdth_instances); - gdth_timer_init(); - - scsi_scan_host(shp); - - return 0; - - out_free_coal_stat: -#ifdef INT_COAL - pci_free_consistent(ha->pdev, sizeof(gdth_coal_status) * MAXOFFSETS, - ha->coal_stat, ha->coal_stat_phys); - out_free_pmsg: -#endif - pci_free_consistent(ha->pdev, sizeof(gdth_msg_str), - ha->pmsg, ha->msg_phys); - out_free_pscratch: - pci_free_consistent(ha->pdev, GDTH_SCRATCH, - ha->pscratch, ha->scratch_phys); - out_dec_counters: - gdth_ctr_count--; - out_free_irq: - free_irq(ha->irq, ha); - out_host_put: - scsi_host_put(shp); - return error; -} -#endif /* CONFIG_ISA */ - -#ifdef CONFIG_EISA -static int __init gdth_eisa_probe_one(u16 eisa_slot) -{ - struct Scsi_Host *shp; - gdth_ha_str *ha; - dma_addr_t scratch_dma_handle = 0; - int error, i; - - if (!gdth_search_eisa(eisa_slot)) - return -ENXIO; - - shp = scsi_host_alloc(&gdth_template, sizeof(gdth_ha_str)); - if (!shp) - return -ENOMEM; - ha = shost_priv(shp); - - error = -ENODEV; - if (!gdth_init_eisa(eisa_slot,ha)) - goto out_host_put; - - /* controller found and initialized */ - printk("Configuring GDT-EISA HA at Slot %d IRQ %u\n", - eisa_slot >> 12, ha->irq); - - error = request_irq(ha->irq, gdth_interrupt, 0, "gdth", ha); - if (error) { - printk("GDT-EISA: Unable to allocate IRQ\n"); - goto out_host_put; - } - - shp->unchecked_isa_dma = 0; - shp->irq = ha->irq; - shp->dma_channel = 0xff; - - ha->hanum = gdth_ctr_count++; - ha->shost = shp; - - TRACE2(("EISA detect Bus 0: hanum %d\n", ha->hanum)); - - ha->pccb = &ha->cmdext; - ha->ccb_phys = 0L; - - error = -ENOMEM; - - ha->pdev = NULL; - ha->pscratch = pci_alloc_consistent(ha->pdev, GDTH_SCRATCH, - &scratch_dma_handle); - if (!ha->pscratch) - goto out_free_irq; - ha->scratch_phys = scratch_dma_handle; - - ha->pmsg = pci_alloc_consistent(ha->pdev, sizeof(gdth_msg_str), - &scratch_dma_handle); - if (!ha->pmsg) - goto out_free_pscratch; - ha->msg_phys = scratch_dma_handle; - -#ifdef INT_COAL - ha->coal_stat = pci_alloc_consistent(ha->pdev, - sizeof(gdth_coal_status) * MAXOFFSETS, - &scratch_dma_handle); - if (!ha->coal_stat) - goto out_free_pmsg; - ha->coal_stat_phys = scratch_dma_handle; -#endif - - ha->ccb_phys = pci_map_single(ha->pdev,ha->pccb, - sizeof(gdth_cmd_str), PCI_DMA_BIDIRECTIONAL); - if (!ha->ccb_phys) - goto out_free_coal_stat; - - ha->scratch_busy = FALSE; - ha->req_first = NULL; - ha->tid_cnt = MAX_HDRIVES; - if (max_ids > 0 && max_ids < ha->tid_cnt) - ha->tid_cnt = max_ids; - for (i = 0; i < GDTH_MAXCMDS; ++i) - ha->cmd_tab[i].cmnd = UNUSED_CMND; - ha->scan_mode = rescan ? 0x10 : 0; - - if (!gdth_search_drives(ha)) { - printk("GDT-EISA: Error during device scan\n"); - error = -ENODEV; - goto out_free_ccb_phys; - } - - if (hdr_channel < 0 || hdr_channel > ha->bus_cnt) - hdr_channel = ha->bus_cnt; - ha->virt_bus = hdr_channel; - - if (ha->cache_feat & ha->raw_feat & ha->screen_feat & GDT_64BIT) - shp->max_cmd_len = 16; - - shp->max_id = ha->tid_cnt; - shp->max_lun = MAXLUN; - shp->max_channel = ha->bus_cnt; - - spin_lock_init(&ha->smp_lock); - gdth_enable_int(ha); - - error = scsi_add_host(shp, NULL); - if (error) - goto out_free_ccb_phys; - list_add_tail(&ha->list, &gdth_instances); - gdth_timer_init(); - - scsi_scan_host(shp); - - return 0; - - out_free_ccb_phys: - pci_unmap_single(ha->pdev,ha->ccb_phys, sizeof(gdth_cmd_str), - PCI_DMA_BIDIRECTIONAL); - out_free_coal_stat: -#ifdef INT_COAL - pci_free_consistent(ha->pdev, sizeof(gdth_coal_status) * MAXOFFSETS, - ha->coal_stat, ha->coal_stat_phys); - out_free_pmsg: -#endif - pci_free_consistent(ha->pdev, sizeof(gdth_msg_str), - ha->pmsg, ha->msg_phys); - out_free_pscratch: - pci_free_consistent(ha->pdev, GDTH_SCRATCH, - ha->pscratch, ha->scratch_phys); - out_free_irq: - free_irq(ha->irq, ha); - gdth_ctr_count--; - out_host_put: - scsi_host_put(shp); - return error; -} -#endif /* CONFIG_EISA */ - -#ifdef CONFIG_PCI static int gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out) { struct Scsi_Host *shp; @@ -4993,27 +4142,18 @@ static int gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out) error = -ENOMEM; - ha->pscratch = pci_alloc_consistent(ha->pdev, GDTH_SCRATCH, - &scratch_dma_handle); + ha->pscratch = dma_alloc_coherent(&ha->pdev->dev, GDTH_SCRATCH, + &scratch_dma_handle, GFP_KERNEL); if (!ha->pscratch) goto out_free_irq; ha->scratch_phys = scratch_dma_handle; - ha->pmsg = pci_alloc_consistent(ha->pdev, sizeof(gdth_msg_str), - &scratch_dma_handle); + ha->pmsg = dma_alloc_coherent(&ha->pdev->dev, sizeof(gdth_msg_str), + &scratch_dma_handle, GFP_KERNEL); if (!ha->pmsg) goto out_free_pscratch; ha->msg_phys = scratch_dma_handle; -#ifdef INT_COAL - ha->coal_stat = pci_alloc_consistent(ha->pdev, - sizeof(gdth_coal_status) * MAXOFFSETS, - &scratch_dma_handle); - if (!ha->coal_stat) - goto out_free_pmsg; - ha->coal_stat_phys = scratch_dma_handle; -#endif - ha->scratch_busy = FALSE; ha->req_first = NULL; ha->tid_cnt = pdev->device >= 0x200 ? MAXID : MAX_HDRIVES; @@ -5026,7 +4166,7 @@ static int gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out) error = -ENODEV; if (!gdth_search_drives(ha)) { printk("GDT-PCI %d: Error during device scan\n", ha->hanum); - goto out_free_coal_stat; + goto out_free_pmsg; } if (hdr_channel < 0 || hdr_channel > ha->bus_cnt) @@ -5036,19 +4176,19 @@ static int gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out) /* 64-bit DMA only supported from FW >= x.43 */ if (!(ha->cache_feat & ha->raw_feat & ha->screen_feat & GDT_64BIT) || !ha->dma64_support) { - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { printk(KERN_WARNING "GDT-PCI %d: " "Unable to set 32-bit DMA\n", ha->hanum); - goto out_free_coal_stat; + goto out_free_pmsg; } } else { shp->max_cmd_len = 16; - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { printk("GDT-PCI %d: 64-bit DMA enabled\n", ha->hanum); - } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { + } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { printk(KERN_WARNING "GDT-PCI %d: " "Unable to set 64/32-bit DMA\n", ha->hanum); - goto out_free_coal_stat; + goto out_free_pmsg; } } @@ -5061,7 +4201,7 @@ static int gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out) error = scsi_add_host(shp, &pdev->dev); if (error) - goto out_free_coal_stat; + goto out_free_pmsg; list_add_tail(&ha->list, &gdth_instances); pci_set_drvdata(ha->pdev, ha); @@ -5073,16 +4213,11 @@ static int gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out) return 0; - out_free_coal_stat: -#ifdef INT_COAL - pci_free_consistent(ha->pdev, sizeof(gdth_coal_status) * MAXOFFSETS, - ha->coal_stat, ha->coal_stat_phys); out_free_pmsg: -#endif - pci_free_consistent(ha->pdev, sizeof(gdth_msg_str), + dma_free_coherent(&ha->pdev->dev, sizeof(gdth_msg_str), ha->pmsg, ha->msg_phys); out_free_pscratch: - pci_free_consistent(ha->pdev, GDTH_SCRATCH, + dma_free_coherent(&ha->pdev->dev, GDTH_SCRATCH, ha->pscratch, ha->scratch_phys); out_free_irq: free_irq(ha->irq, ha); @@ -5091,7 +4226,6 @@ static int gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out) scsi_host_put(shp); return error; } -#endif /* CONFIG_PCI */ static void gdth_remove_one(gdth_ha_str *ha) { @@ -5111,24 +4245,15 @@ static void gdth_remove_one(gdth_ha_str *ha) if (shp->irq) free_irq(shp->irq,ha); -#ifdef CONFIG_ISA - if (shp->dma_channel != 0xff) - free_dma(shp->dma_channel); -#endif -#ifdef INT_COAL - if (ha->coal_stat) - pci_free_consistent(ha->pdev, sizeof(gdth_coal_status) * - MAXOFFSETS, ha->coal_stat, ha->coal_stat_phys); -#endif if (ha->pscratch) - pci_free_consistent(ha->pdev, GDTH_SCRATCH, + dma_free_coherent(&ha->pdev->dev, GDTH_SCRATCH, ha->pscratch, ha->scratch_phys); if (ha->pmsg) - pci_free_consistent(ha->pdev, sizeof(gdth_msg_str), + dma_free_coherent(&ha->pdev->dev, sizeof(gdth_msg_str), ha->pmsg, ha->msg_phys); if (ha->ccb_phys) - pci_unmap_single(ha->pdev,ha->ccb_phys, - sizeof(gdth_cmd_str),PCI_DMA_BIDIRECTIONAL); + dma_unmap_single(&ha->pdev->dev, ha->ccb_phys, + sizeof(gdth_cmd_str), DMA_BIDIRECTIONAL); scsi_host_put(shp); } @@ -5167,26 +4292,6 @@ static int __init gdth_init(void) gdth_clear_events(); timer_setup(&gdth_timer, gdth_timeout, 0); - /* As default we do not probe for EISA or ISA controllers */ - if (probe_eisa_isa) { - /* scanning for controllers, at first: ISA controller */ -#ifdef CONFIG_ISA - u32 isa_bios; - for (isa_bios = 0xc8000UL; isa_bios <= 0xd8000UL; - isa_bios += 0x8000UL) - gdth_isa_probe_one(isa_bios); -#endif -#ifdef CONFIG_EISA - { - u16 eisa_slot; - for (eisa_slot = 0x1000; eisa_slot <= 0x8000; - eisa_slot += 0x1000) - gdth_eisa_probe_one(eisa_slot); - } -#endif - } - -#ifdef CONFIG_PCI /* scanning for PCI controllers */ if (pci_register_driver(&gdth_pci_driver)) { gdth_ha_str *ha; @@ -5195,7 +4300,6 @@ static int __init gdth_init(void) gdth_remove_one(ha); return -ENODEV; } -#endif /* CONFIG_PCI */ TRACE2(("gdth_detect() %d controller detected\n", gdth_ctr_count)); @@ -5216,9 +4320,7 @@ static void __exit gdth_exit(void) del_timer_sync(&gdth_timer); #endif -#ifdef CONFIG_PCI pci_unregister_driver(&gdth_pci_driver); -#endif list_for_each_entry(ha, &gdth_instances, list) gdth_remove_one(ha); diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h index ee6ffcf388e8..5a13d406d40e 100644 --- a/drivers/scsi/gdth.h +++ b/drivers/scsi/gdth.h @@ -38,17 +38,9 @@ #define OEM_ID_INTEL 0x8000 /* controller classes */ -#define GDT_ISA 0x01 /* ISA controller */ -#define GDT_EISA 0x02 /* EISA controller */ #define GDT_PCI 0x03 /* PCI controller */ #define GDT_PCINEW 0x04 /* new PCI controller */ #define GDT_PCIMPR 0x05 /* PCI MPR controller */ -/* GDT_EISA, controller subtypes EISA */ -#define GDT3_ID 0x0130941c /* GDT3000/3020 */ -#define GDT3A_ID 0x0230941c /* GDT3000A/3020A/3050A */ -#define GDT3B_ID 0x0330941c /* GDT3000B/3010A */ -/* GDT_ISA */ -#define GDT2_ID 0x0120941c /* GDT2000/2020 */ #ifndef PCI_DEVICE_ID_VORTEX_GDT60x0 /* GDT_PCI */ @@ -281,17 +273,6 @@ #define GDTH_DATA_IN 0x01000000L /* data from target */ #define GDTH_DATA_OUT 0x00000000L /* data to target */ -/* BMIC registers (EISA controllers) */ -#define ID0REG 0x0c80 /* board ID */ -#define EINTENABREG 0x0c89 /* interrupt enable */ -#define SEMA0REG 0x0c8a /* command semaphore */ -#define SEMA1REG 0x0c8b /* status semaphore */ -#define LDOORREG 0x0c8d /* local doorbell */ -#define EDENABREG 0x0c8e /* EISA system doorbell enab. */ -#define EDOORREG 0x0c8f /* EISA system doorbell */ -#define MAILBOXREG 0x0c90 /* mailbox reg. (16 bytes) */ -#define EISAREG 0x0cc0 /* EISA configuration */ - /* other defines */ #define LINUX_OS 8 /* used for cache optim. */ #define SECS32 0x1f /* round capacity */ @@ -706,21 +687,11 @@ typedef struct { u8 fw_magic; /* contr. ID from firmware */ } __attribute__((packed)) gdt_pci_sram; -/* SRAM structure EISA controllers (but NOT GDT3000/3020) */ -typedef struct { - u8 os_used[16]; /* OS code per service */ - u16 need_deinit; /* switch betw. BIOS/driver */ - u8 switch_support; /* see need_deinit */ - u8 padding; -} __attribute__((packed)) gdt_eisa_sram; - - /* DPRAM ISA controllers */ typedef struct { union { struct { u8 bios_used[0x3c00-32]; /* 15KB - 32Bytes BIOS */ - u32 magic; /* controller (EISA) ID */ u16 need_deinit; /* switch betw. BIOS/driver */ u8 switch_support; /* see need_deinit */ u8 padding[9]; @@ -843,7 +814,6 @@ typedef struct { u16 cache_feat; /* feat. cache serv. (s/g,..)*/ u16 raw_feat; /* feat. raw service (s/g,..)*/ u16 screen_feat; /* feat. raw service (s/g,..)*/ - u16 bmic; /* BMIC address (EISA) */ void __iomem *brd; /* DPRAM address */ u32 brd_phys; /* slot number/BIOS address */ gdt6c_plx_regs *plx; /* PLX regs (new PCI contr.) */ diff --git a/drivers/scsi/gdth_ioctl.h b/drivers/scsi/gdth_ioctl.h index 4c91894ac244..ee4c9bf1022a 100644 --- a/drivers/scsi/gdth_ioctl.h +++ b/drivers/scsi/gdth_ioctl.h @@ -27,11 +27,7 @@ #define GDTH_MAXSG 32 /* max. s/g elements */ #define MAX_LDRIVES 255 /* max. log. drive count */ -#ifdef GDTH_IOCTL_PROC -#define MAX_HDRIVES 100 /* max. host drive count */ -#else #define MAX_HDRIVES MAX_LDRIVES /* max. host drive count */ -#endif /* scatter/gather element */ typedef struct { @@ -178,91 +174,6 @@ typedef struct { gdth_evt_data event_data; } __attribute__((packed)) gdth_evt_str; - -#ifdef GDTH_IOCTL_PROC -/* IOCTL structure (write) */ -typedef struct { - u32 magic; /* IOCTL magic */ - u16 ioctl; /* IOCTL */ - u16 ionode; /* controller number */ - u16 service; /* controller service */ - u16 timeout; /* timeout */ - union { - struct { - u8 command[512]; /* controller command */ - u8 data[1]; /* add. data */ - } general; - struct { - u8 lock; /* lock/unlock */ - u8 drive_cnt; /* drive count */ - u16 drives[MAX_HDRIVES];/* drives */ - } lockdrv; - struct { - u8 lock; /* lock/unlock */ - u8 channel; /* channel */ - } lockchn; - struct { - int erase; /* erase event ? */ - int handle; - u8 evt[EVENT_SIZE]; /* event structure */ - } event; - struct { - u8 bus; /* SCSI bus */ - u8 target; /* target ID */ - u8 lun; /* LUN */ - u8 cmd_len; /* command length */ - u8 cmd[12]; /* SCSI command */ - } scsi; - struct { - u16 hdr_no; /* host drive number */ - u8 flag; /* old meth./add/remove */ - } rescan; - } iu; -} gdth_iowr_str; - -/* IOCTL structure (read) */ -typedef struct { - u32 size; /* buffer size */ - u32 status; /* IOCTL error code */ - union { - struct { - u8 data[1]; /* data */ - } general; - struct { - u16 version; /* driver version */ - } drvers; - struct { - u8 type; /* controller type */ - u16 info; /* slot etc. */ - u16 oem_id; /* OEM ID */ - u16 bios_ver; /* not used */ - u16 access; /* not used */ - u16 ext_type; /* extended type */ - u16 device_id; /* device ID */ - u16 sub_device_id; /* sub device ID */ - } ctrtype; - struct { - u8 version; /* OS version */ - u8 subversion; /* OS subversion */ - u16 revision; /* revision */ - } osvers; - struct { - u16 count; /* controller count */ - } ctrcnt; - struct { - int handle; - u8 evt[EVENT_SIZE]; /* event structure */ - } event; - struct { - u8 bus; /* SCSI bus, 0xff: invalid */ - u8 target; /* target ID */ - u8 lun; /* LUN */ - u8 cluster_type; /* cluster properties */ - } hdr_list[MAX_HDRIVES]; /* index is host drive number */ - } iu; -} gdth_iord_str; -#endif - /* GDTIOCTL_GENERAL */ typedef struct { u16 ionode; /* controller number */ diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c index 3a9751a80225..381d849726ac 100644 --- a/drivers/scsi/gdth_proc.c +++ b/drivers/scsi/gdth_proc.c @@ -31,7 +31,6 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer, int i, found; gdth_cmd_str gdtcmd; gdth_cpar_str *pcpar; - u64 paddr; char cmnd[MAX_COMMAND_SIZE]; memset(cmnd, 0xff, 12); @@ -113,13 +112,23 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer, } if (wb_mode) { - if (!gdth_ioctl_alloc(ha, sizeof(gdth_cpar_str), TRUE, &paddr)) - return(-EBUSY); + unsigned long flags; + + BUILD_BUG_ON(sizeof(gdth_cpar_str) > GDTH_SCRATCH); + + spin_lock_irqsave(&ha->smp_lock, flags); + if (ha->scratch_busy) { + spin_unlock_irqrestore(&ha->smp_lock, flags); + return -EBUSY; + } + ha->scratch_busy = TRUE; + spin_unlock_irqrestore(&ha->smp_lock, flags); + pcpar = (gdth_cpar_str *)ha->pscratch; memcpy( pcpar, &ha->cpar, sizeof(gdth_cpar_str) ); gdtcmd.Service = CACHESERVICE; gdtcmd.OpCode = GDT_IOCTL; - gdtcmd.u.ioctl.p_param = paddr; + gdtcmd.u.ioctl.p_param = ha->scratch_phys; gdtcmd.u.ioctl.param_size = sizeof(gdth_cpar_str); gdtcmd.u.ioctl.subfunc = CACHE_CONFIG; gdtcmd.u.ioctl.channel = INVALID_CHANNEL; @@ -127,7 +136,10 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer, gdth_execute(host, &gdtcmd, cmnd, 30, NULL); - gdth_ioctl_free(ha, GDTH_SCRATCH, ha->pscratch, paddr); + spin_lock_irqsave(&ha->smp_lock, flags); + ha->scratch_busy = FALSE; + spin_unlock_irqrestore(&ha->smp_lock, flags); + printk("Done.\n"); return(orig_length); } @@ -143,7 +155,7 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host) int id, i, j, k, sec, flag; int no_mdrv = 0, drv_no, is_mirr; u32 cnt; - u64 paddr; + dma_addr_t paddr; int rc = -ENOMEM; gdth_cmd_str *gdtcmd; @@ -217,20 +229,14 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host) " Serial No.: \t0x%8X\tCache RAM size:\t%d KB\n", ha->binfo.ser_no, ha->binfo.memsize / 1024); -#ifdef GDTH_DMA_STATISTICS - /* controller statistics */ - seq_puts(m, "\nController Statistics:\n"); - seq_printf(m, - " 32-bit DMA buffer:\t%lu\t64-bit DMA buffer:\t%lu\n", - ha->dma32_cnt, ha->dma64_cnt); -#endif - if (ha->more_proc) { + size_t size = max_t(size_t, GDTH_SCRATCH, sizeof(gdth_hget_str)); + /* more information: 2. about physical devices */ seq_puts(m, "\nPhysical Devices:"); flag = FALSE; - buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr); + buf = dma_alloc_coherent(&ha->pdev->dev, size, &paddr, GFP_KERNEL); if (!buf) goto stop_output; for (i = 0; i < ha->bus_cnt; ++i) { @@ -323,7 +329,6 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host) } } } - gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); if (!flag) seq_puts(m, "\n --\n"); @@ -332,9 +337,6 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host) seq_puts(m, "\nLogical Drives:"); flag = FALSE; - buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr); - if (!buf) - goto stop_output; for (i = 0; i < MAX_LDRIVES; ++i) { if (!ha->hdr[i].is_logdrv) continue; @@ -408,8 +410,7 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host) seq_printf(m, " To Array Drv.:\t%s\n", hrec); } - gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); - + if (!flag) seq_puts(m, "\n --\n"); @@ -417,9 +418,6 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host) seq_puts(m, "\nArray Drives:"); flag = FALSE; - buf = gdth_ioctl_alloc(ha, GDTH_SCRATCH, FALSE, &paddr); - if (!buf) - goto stop_output; for (i = 0; i < MAX_LDRIVES; ++i) { if (!(ha->hdr[i].is_arraydrv && ha->hdr[i].is_master)) continue; @@ -468,8 +466,7 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host) hrec); } } - gdth_ioctl_free(ha, GDTH_SCRATCH, buf, paddr); - + if (!flag) seq_puts(m, "\n --\n"); @@ -477,9 +474,6 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host) seq_puts(m, "\nHost Drives:"); flag = FALSE; - buf = gdth_ioctl_alloc(ha, sizeof(gdth_hget_str), FALSE, &paddr); - if (!buf) - goto stop_output; for (i = 0; i < MAX_LDRIVES; ++i) { if (!ha->hdr[i].is_logdrv || (ha->hdr[i].is_arraydrv && !ha->hdr[i].is_master)) @@ -510,7 +504,7 @@ int gdth_show_info(struct seq_file *m, struct Scsi_Host *host) } } } - gdth_ioctl_free(ha, sizeof(gdth_hget_str), buf, paddr); + dma_free_coherent(&ha->pdev->dev, size, buf, paddr); for (i = 0; i < MAX_HDRIVES; ++i) { if (!(ha->hdr[i].present)) @@ -563,65 +557,6 @@ free_fail: return rc; } -static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch, - u64 *paddr) -{ - unsigned long flags; - char *ret_val; - - if (size == 0) - return NULL; - - spin_lock_irqsave(&ha->smp_lock, flags); - - if (!ha->scratch_busy && size <= GDTH_SCRATCH) { - ha->scratch_busy = TRUE; - ret_val = ha->pscratch; - *paddr = ha->scratch_phys; - } else if (scratch) { - ret_val = NULL; - } else { - dma_addr_t dma_addr; - - ret_val = pci_alloc_consistent(ha->pdev, size, &dma_addr); - *paddr = dma_addr; - } - - spin_unlock_irqrestore(&ha->smp_lock, flags); - return ret_val; -} - -static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, u64 paddr) -{ - unsigned long flags; - - if (buf == ha->pscratch) { - spin_lock_irqsave(&ha->smp_lock, flags); - ha->scratch_busy = FALSE; - spin_unlock_irqrestore(&ha->smp_lock, flags); - } else { - pci_free_consistent(ha->pdev, size, buf, paddr); - } -} - -#ifdef GDTH_IOCTL_PROC -static int gdth_ioctl_check_bin(gdth_ha_str *ha, u16 size) -{ - unsigned long flags; - int ret_val; - - spin_lock_irqsave(&ha->smp_lock, flags); - - ret_val = FALSE; - if (ha->scratch_busy) { - if (((gdth_iord_str *)ha->pscratch)->size == (u32)size) - ret_val = TRUE; - } - spin_unlock_irqrestore(&ha->smp_lock, flags); - return ret_val; -} -#endif - static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id) { unsigned long flags; diff --git a/drivers/scsi/gdth_proc.h b/drivers/scsi/gdth_proc.h index d7d0aa283695..4cc5377cb92e 100644 --- a/drivers/scsi/gdth_proc.h +++ b/drivers/scsi/gdth_proc.h @@ -12,9 +12,6 @@ int gdth_execute(struct Scsi_Host *shost, gdth_cmd_str *gdtcmd, char *cmnd, static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer, int length, gdth_ha_str *ha); -static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch, - u64 *paddr); -static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, u64 paddr); static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id); #endif diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index af291947a54d..6c87bd34509a 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -14,6 +14,7 @@ #include #include +#include #include #include #include @@ -29,7 +30,7 @@ #define HISI_SAS_MAX_PHYS 9 #define HISI_SAS_MAX_QUEUES 32 -#define HISI_SAS_QUEUE_SLOTS 512 +#define HISI_SAS_QUEUE_SLOTS 4096 #define HISI_SAS_MAX_ITCT_ENTRIES 1024 #define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES #define HISI_SAS_RESET_BIT 0 @@ -40,20 +41,25 @@ #define HISI_SAS_COMMAND_TABLE_SZ (sizeof(union hisi_sas_command_table)) #define hisi_sas_status_buf_addr(buf) \ - (buf + offsetof(struct hisi_sas_slot_buf_table, status_buffer)) -#define hisi_sas_status_buf_addr_mem(slot) hisi_sas_status_buf_addr(slot->buf) + ((buf) + offsetof(struct hisi_sas_slot_buf_table, status_buffer)) +#define hisi_sas_status_buf_addr_mem(slot) hisi_sas_status_buf_addr((slot)->buf) #define hisi_sas_status_buf_addr_dma(slot) \ - hisi_sas_status_buf_addr(slot->buf_dma) + hisi_sas_status_buf_addr((slot)->buf_dma) #define hisi_sas_cmd_hdr_addr(buf) \ - (buf + offsetof(struct hisi_sas_slot_buf_table, command_header)) -#define hisi_sas_cmd_hdr_addr_mem(slot) hisi_sas_cmd_hdr_addr(slot->buf) -#define hisi_sas_cmd_hdr_addr_dma(slot) hisi_sas_cmd_hdr_addr(slot->buf_dma) + ((buf) + offsetof(struct hisi_sas_slot_buf_table, command_header)) +#define hisi_sas_cmd_hdr_addr_mem(slot) hisi_sas_cmd_hdr_addr((slot)->buf) +#define hisi_sas_cmd_hdr_addr_dma(slot) hisi_sas_cmd_hdr_addr((slot)->buf_dma) #define hisi_sas_sge_addr(buf) \ - (buf + offsetof(struct hisi_sas_slot_buf_table, sge_page)) -#define hisi_sas_sge_addr_mem(slot) hisi_sas_sge_addr(slot->buf) -#define hisi_sas_sge_addr_dma(slot) hisi_sas_sge_addr(slot->buf_dma) + ((buf) + offsetof(struct hisi_sas_slot_buf_table, sge_page)) +#define hisi_sas_sge_addr_mem(slot) hisi_sas_sge_addr((slot)->buf) +#define hisi_sas_sge_addr_dma(slot) hisi_sas_sge_addr((slot)->buf_dma) + +#define hisi_sas_sge_dif_addr(buf) \ + ((buf) + offsetof(struct hisi_sas_slot_dif_buf_table, sge_dif_page)) +#define hisi_sas_sge_dif_addr_mem(slot) hisi_sas_sge_dif_addr((slot)->buf) +#define hisi_sas_sge_dif_addr_dma(slot) hisi_sas_sge_dif_addr((slot)->buf_dma) #define HISI_SAS_MAX_SSP_RESP_SZ (sizeof(struct ssp_frame_hdr) + 1024) #define HISI_SAS_MAX_SMP_RESP_SZ 1028 @@ -73,7 +79,13 @@ SHOST_DIF_TYPE2_PROTECTION | \ SHOST_DIF_TYPE3_PROTECTION) -#define HISI_SAS_PROT_MASK (HISI_SAS_DIF_PROT_MASK) +#define HISI_SAS_DIX_PROT_MASK (SHOST_DIX_TYPE1_PROTECTION | \ + SHOST_DIX_TYPE2_PROTECTION | \ + SHOST_DIX_TYPE3_PROTECTION) + +#define HISI_SAS_PROT_MASK (HISI_SAS_DIF_PROT_MASK | HISI_SAS_DIX_PROT_MASK) + +#define HISI_SAS_WAIT_PHYUP_TIMEOUT 20 struct hisi_hba; @@ -82,11 +94,6 @@ enum { PORT_TYPE_SATA = (1U << 0), }; -enum dev_status { - HISI_SAS_DEV_NORMAL, - HISI_SAS_DEV_EH, -}; - enum { HISI_SAS_INT_ABT_CMD = 0, HISI_SAS_INT_ABT_DEV = 1, @@ -145,6 +152,7 @@ struct hisi_sas_phy { struct asd_sas_phy sas_phy; struct sas_identify identify; struct completion *reset_completion; + struct timer_list timer; spinlock_t lock; u64 port_id; /* from hw */ u64 frame_rcvd_size; @@ -165,6 +173,7 @@ struct hisi_sas_port { struct hisi_sas_cq { struct hisi_hba *hisi_hba; + const struct cpumask *pci_irq_mask; struct tasklet_struct tasklet; int rd_point; int id; @@ -187,7 +196,7 @@ struct hisi_sas_device { enum sas_device_type dev_type; int device_id; int sata_idx; - u8 dev_status; + spinlock_t lock; /* For protecting slots */ }; struct hisi_sas_tmf_task { @@ -203,12 +212,14 @@ struct hisi_sas_slot { struct sas_task *task; struct hisi_sas_port *port; u64 n_elem; + u64 n_elem_dif; int dlvry_queue; int dlvry_queue_slot; int cmplt_queue; int cmplt_queue_slot; int abort; int ready; + int device_id; void *cmd_hdr; dma_addr_t cmd_hdr_dma; struct timer_list internal_abort_timer; @@ -220,6 +231,24 @@ struct hisi_sas_slot { u16 idx; }; +#define HISI_SAS_DEBUGFS_REG(x) {#x, x} + +struct hisi_sas_debugfs_reg_lu { + char *name; + int off; +}; + +struct hisi_sas_debugfs_reg { + const struct hisi_sas_debugfs_reg_lu *lu; + int count; + int base_off; + union { + u32 (*read_global_reg)(struct hisi_hba *hisi_hba, u32 off); + u32 (*read_port_reg)(struct hisi_hba *hisi_hba, int port, + u32 off); + }; +}; + struct hisi_sas_hw { int (*hw_init)(struct hisi_hba *hisi_hba); void (*setup_itct)(struct hisi_hba *hisi_hba, @@ -227,7 +256,7 @@ struct hisi_sas_hw { int (*slot_index_alloc)(struct hisi_hba *hisi_hba, struct domain_device *device); struct hisi_sas_device *(*alloc_dev)(struct domain_device *device); - void (*sl_notify)(struct hisi_hba *hisi_hba, int phy_no); + void (*sl_notify_ssp)(struct hisi_hba *hisi_hba, int phy_no); int (*get_free_slot)(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq); void (*start_delivery)(struct hisi_sas_dq *dq); void (*prep_ssp)(struct hisi_hba *hisi_hba, @@ -259,11 +288,16 @@ struct hisi_sas_hw { u32 (*get_phys_state)(struct hisi_hba *hisi_hba); int (*write_gpio)(struct hisi_hba *hisi_hba, u8 reg_type, u8 reg_index, u8 reg_count, u8 *write_data); - void (*wait_cmds_complete_timeout)(struct hisi_hba *hisi_hba, - int delay_ms, int timeout_ms); + int (*wait_cmds_complete_timeout)(struct hisi_hba *hisi_hba, + int delay_ms, int timeout_ms); + void (*snapshot_prepare)(struct hisi_hba *hisi_hba); + void (*snapshot_restore)(struct hisi_hba *hisi_hba); int max_command_entries; int complete_hdr_size; struct scsi_host_template *sht; + + const struct hisi_sas_debugfs_reg *debugfs_reg_global; + const struct hisi_sas_debugfs_reg *debugfs_reg_port; }; struct hisi_hba { @@ -329,9 +363,25 @@ struct hisi_hba { const struct hisi_sas_hw *hw; /* Low level hw interface */ unsigned long sata_dev_bitmap[BITS_TO_LONGS(HISI_SAS_MAX_DEVICES)]; struct work_struct rst_work; + struct work_struct debugfs_work; u32 phy_state; u32 intr_coal_ticks; /* Time of interrupt coalesce in us */ u32 intr_coal_count; /* Interrupt count to coalesce */ + + int cq_nvecs; + unsigned int *reply_map; + + /* debugfs memories */ + u32 *debugfs_global_reg; + u32 *debugfs_port_reg[HISI_SAS_MAX_PHYS]; + void *debugfs_complete_hdr[HISI_SAS_MAX_QUEUES]; + struct hisi_sas_cmd_hdr *debugfs_cmd_hdr[HISI_SAS_MAX_QUEUES]; + struct hisi_sas_iost *debugfs_iost; + struct hisi_sas_itct *debugfs_itct; + + struct dentry *debugfs_dir; + struct dentry *debugfs_dump_dentry; + bool debugfs_snapshot; }; /* Generic HW DMA host memory structures */ @@ -430,6 +480,11 @@ struct hisi_sas_sge_page { struct hisi_sas_sge sge[HISI_SAS_SGE_PAGE_CNT]; } __aligned(16); +#define HISI_SAS_SGE_DIF_PAGE_CNT SG_CHUNK_SIZE +struct hisi_sas_sge_dif_page { + struct hisi_sas_sge sge[HISI_SAS_SGE_DIF_PAGE_CNT]; +} __aligned(16); + struct hisi_sas_command_table_ssp { struct ssp_frame_hdr hdr; union { @@ -460,9 +515,18 @@ struct hisi_sas_slot_buf_table { struct hisi_sas_sge_page sge_page; }; +struct hisi_sas_slot_dif_buf_table { + struct hisi_sas_slot_buf_table slot_buf; + struct hisi_sas_sge_dif_page sge_dif_page; +}; + extern struct scsi_transport_template *hisi_sas_stt; + +extern bool hisi_sas_debugfs_enable; +extern struct dentry *hisi_sas_debugfs_dir; + extern void hisi_sas_stop_phys(struct hisi_hba *hisi_hba); -extern int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost); +extern int hisi_sas_alloc(struct hisi_hba *hisi_hba); extern void hisi_sas_free(struct hisi_hba *hisi_hba); extern u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction); @@ -487,10 +551,14 @@ extern void hisi_sas_init_mem(struct hisi_hba *hisi_hba); extern void hisi_sas_rst_work_handler(struct work_struct *work); extern void hisi_sas_sync_rst_work_handler(struct work_struct *work); extern void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba); +extern void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no); extern bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy, enum hisi_sas_phy_event event); extern void hisi_sas_release_tasks(struct hisi_hba *hisi_hba); extern u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max); extern void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba); extern void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba); +extern void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba); +extern void hisi_sas_debugfs_exit(struct hisi_hba *hisi_hba); +extern void hisi_sas_debugfs_work_handler(struct work_struct *work); #endif diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index eed7fc5b3389..13ca5a0bdf6b 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -144,7 +144,7 @@ EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag); */ u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max) { - u16 rate = 0; + u8 rate = 0; int i; max -= SAS_LINK_RATE_1_5_GBPS; @@ -241,8 +241,9 @@ static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba) void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, struct hisi_sas_slot *slot) { - struct hisi_sas_dq *dq = &hisi_hba->dq[slot->dlvry_queue]; unsigned long flags; + int device_id = slot->device_id; + struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id]; if (task) { struct device *dev = hisi_hba->dev; @@ -252,17 +253,24 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, task->lldd_task = NULL; - if (!sas_protocol_ata(task->task_proto)) + if (!sas_protocol_ata(task->task_proto)) { + struct sas_ssp_task *ssp_task = &task->ssp_task; + struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; + if (slot->n_elem) dma_unmap_sg(dev, task->scatter, task->num_scatter, task->data_dir); + if (slot->n_elem_dif) + dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), + scsi_prot_sg_count(scsi_cmnd), + task->data_dir); + } } - - spin_lock_irqsave(&dq->lock, flags); + spin_lock_irqsave(&sas_dev->lock, flags); list_del_init(&slot->entry); - spin_unlock_irqrestore(&dq->lock, flags); + spin_unlock_irqrestore(&sas_dev->lock, flags); memset(slot, 0, offsetof(struct hisi_sas_slot, buf)); @@ -380,6 +388,59 @@ prep_out: return rc; } +static void hisi_sas_dif_dma_unmap(struct hisi_hba *hisi_hba, + struct sas_task *task, int n_elem_dif) +{ + struct device *dev = hisi_hba->dev; + + if (n_elem_dif) { + struct sas_ssp_task *ssp_task = &task->ssp_task; + struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; + + dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), + scsi_prot_sg_count(scsi_cmnd), + task->data_dir); + } +} + +static int hisi_sas_dif_dma_map(struct hisi_hba *hisi_hba, + int *n_elem_dif, struct sas_task *task) +{ + struct device *dev = hisi_hba->dev; + struct sas_ssp_task *ssp_task; + struct scsi_cmnd *scsi_cmnd; + int rc; + + if (task->num_scatter) { + ssp_task = &task->ssp_task; + scsi_cmnd = ssp_task->cmd; + + if (scsi_prot_sg_count(scsi_cmnd)) { + *n_elem_dif = dma_map_sg(dev, + scsi_prot_sglist(scsi_cmnd), + scsi_prot_sg_count(scsi_cmnd), + task->data_dir); + + if (!*n_elem_dif) + return -ENOMEM; + + if (*n_elem_dif > HISI_SAS_SGE_DIF_PAGE_CNT) { + dev_err(dev, "task prep: n_elem_dif(%d) too large\n", + *n_elem_dif); + rc = -EINVAL; + goto err_out_dif_dma_unmap; + } + } + } + + return 0; + +err_out_dif_dma_unmap: + dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd), + scsi_prot_sg_count(scsi_cmnd), task->data_dir); + return rc; +} + static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq **dq_pointer, bool is_tmf, struct hisi_sas_tmf_task *tmf, @@ -394,7 +455,7 @@ static int hisi_sas_task_prep(struct sas_task *task, struct asd_sas_port *sas_port = device->port; struct device *dev = hisi_hba->dev; int dlvry_queue_slot, dlvry_queue, rc, slot_idx; - int n_elem = 0, n_elem_req = 0, n_elem_resp = 0; + int n_elem = 0, n_elem_dif = 0, n_elem_req = 0, n_elem_resp = 0; struct hisi_sas_dq *dq; unsigned long flags; int wr_q_index; @@ -410,7 +471,14 @@ static int hisi_sas_task_prep(struct sas_task *task, return -ECOMM; } - *dq_pointer = dq = sas_dev->dq; + if (hisi_hba->reply_map) { + int cpu = raw_smp_processor_id(); + unsigned int dq_index = hisi_hba->reply_map[cpu]; + + *dq_pointer = dq = &hisi_hba->dq[dq_index]; + } else { + *dq_pointer = dq = sas_dev->dq; + } port = to_hisi_sas_port(sas_port); if (port && !port->port_attached) { @@ -427,6 +495,12 @@ static int hisi_sas_task_prep(struct sas_task *task, if (rc < 0) goto prep_out; + if (!sas_protocol_ata(task->task_proto)) { + rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task); + if (rc < 0) + goto err_out_dma_unmap; + } + if (hisi_hba->hw->slot_index_alloc) rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device); else { @@ -445,7 +519,7 @@ static int hisi_sas_task_prep(struct sas_task *task, rc = hisi_sas_slot_index_alloc(hisi_hba, scsi_cmnd); } if (rc < 0) - goto err_out_dma_unmap; + goto err_out_dif_dma_unmap; slot_idx = rc; slot = &hisi_hba->slot_info[slot_idx]; @@ -459,13 +533,17 @@ static int hisi_sas_task_prep(struct sas_task *task, } list_add_tail(&slot->delivery, &dq->list); - list_add_tail(&slot->entry, &sas_dev->list); spin_unlock_irqrestore(&dq->lock, flags); + spin_lock_irqsave(&sas_dev->lock, flags); + list_add_tail(&slot->entry, &sas_dev->list); + spin_unlock_irqrestore(&sas_dev->lock, flags); dlvry_queue = dq->id; dlvry_queue_slot = wr_q_index; + slot->device_id = sas_dev->device_id; slot->n_elem = n_elem; + slot->n_elem_dif = n_elem_dif; slot->dlvry_queue = dlvry_queue; slot->dlvry_queue_slot = dlvry_queue_slot; cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue]; @@ -509,6 +587,9 @@ static int hisi_sas_task_prep(struct sas_task *task, err_out_tag: hisi_sas_slot_index_free(hisi_hba, slot_idx); +err_out_dif_dma_unmap: + if (!sas_protocol_ata(task->task_proto)) + hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif); err_out_dma_unmap: hisi_sas_dma_unmap(hisi_hba, task, n_elem, n_elem_req, n_elem_resp); @@ -626,11 +707,11 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device) hisi_hba->devices[i].device_id = i; sas_dev = &hisi_hba->devices[i]; - sas_dev->dev_status = HISI_SAS_DEV_NORMAL; sas_dev->dev_type = device->dev_type; sas_dev->hisi_hba = hisi_hba; sas_dev->sas_device = device; sas_dev->dq = dq; + spin_lock_init(&sas_dev->lock); INIT_LIST_HEAD(&hisi_hba->devices[i].list); break; } @@ -778,7 +859,8 @@ static void hisi_sas_phyup_work(struct work_struct *work) struct asd_sas_phy *sas_phy = &phy->sas_phy; int phy_no = sas_phy->id; - hisi_hba->hw->sl_notify(hisi_hba, phy_no); /* This requires a sleep */ + if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP) + hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no); hisi_sas_bytes_dmaed(hisi_hba, phy_no); } @@ -808,6 +890,30 @@ bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy, } EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event); +static void hisi_sas_wait_phyup_timedout(struct timer_list *t) +{ + struct hisi_sas_phy *phy = from_timer(phy, t, timer); + struct hisi_hba *hisi_hba = phy->hisi_hba; + struct device *dev = hisi_hba->dev; + int phy_no = phy->sas_phy.id; + + dev_warn(dev, "phy%d wait phyup timeout, issuing link reset\n", phy_no); + hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); +} + +void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no) +{ + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct device *dev = hisi_hba->dev; + + if (!timer_pending(&phy->timer)) { + dev_dbg(dev, "phy%d OOB ready\n", phy_no); + phy->timer.expires = jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT * HZ; + add_timer(&phy->timer); + } +} +EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready); + static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no) { struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; @@ -836,6 +942,8 @@ static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no) INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]); spin_lock_init(&phy->lock); + + timer_setup(&phy->timer, hisi_sas_wait_phyup_timedout, 0); } static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy) @@ -926,7 +1034,7 @@ static void hisi_sas_dev_gone(struct domain_device *device) if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) { hisi_sas_internal_task_abort(hisi_hba, device, - HISI_SAS_INT_ABT_DEV, 0); + HISI_SAS_INT_ABT_DEV, 0); hisi_sas_dereg_device(hisi_hba, device); @@ -946,7 +1054,7 @@ static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags) return hisi_sas_task_exec(task, gfp_flags, 0, NULL); } -static void hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no, +static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no, struct sas_phy_linkrates *r) { struct sas_phy_linkrates _r; @@ -955,6 +1063,9 @@ static void hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no, struct asd_sas_phy *sas_phy = &phy->sas_phy; enum sas_linkrate min, max; + if (r->minimum_linkrate > SAS_LINK_RATE_1_5_GBPS) + return -EINVAL; + if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) { max = sas_phy->phy->maximum_linkrate; min = r->minimum_linkrate; @@ -962,7 +1073,7 @@ static void hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no, max = r->maximum_linkrate; min = sas_phy->phy->minimum_linkrate; } else - return; + return -EINVAL; _r.maximum_linkrate = max; _r.minimum_linkrate = min; @@ -974,6 +1085,8 @@ static void hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no, msleep(100); hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r); hisi_hba->hw->phy_start(hisi_hba, phy_no); + + return 0; } static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, @@ -999,8 +1112,7 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, break; case PHY_FUNC_SET_LINK_RATE: - hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata); - break; + return hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata); case PHY_FUNC_GET_EVENTS: if (hisi_hba->hw->get_events) { hisi_hba->hw->get_events(hisi_hba, phy_no); @@ -1068,7 +1180,7 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device, task->task_done = hisi_sas_task_done; task->slow_task->timer.function = hisi_sas_tmf_timedout; - task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ; + task->slow_task->timer.expires = jiffies + TASK_TIMEOUT * HZ; add_timer(&task->slow_task->timer); res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf); @@ -1429,6 +1541,9 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba) struct Scsi_Host *shost = hisi_hba->shost; int rc; + if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct) + queue_work(hisi_hba->wq, &hisi_hba->debugfs_work); + if (!hisi_hba->hw->soft_reset) return -1; @@ -1491,7 +1606,6 @@ static int hisi_sas_abort_task(struct sas_task *task) task->task_state_flags |= SAS_TASK_STATE_ABORTED; spin_unlock_irqrestore(&task->task_state_lock, flags); - sas_dev->dev_status = HISI_SAS_DEV_EH; if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { struct scsi_cmnd *cmnd = task->uldd_task; struct hisi_sas_slot *slot = task->lldd_task; @@ -1527,7 +1641,8 @@ static int hisi_sas_abort_task(struct sas_task *task) task->task_proto & SAS_PROTOCOL_STP) { if (task->dev->dev_type == SAS_SATA_DEV) { rc = hisi_sas_internal_task_abort(hisi_hba, device, - HISI_SAS_INT_ABT_DEV, 0); + HISI_SAS_INT_ABT_DEV, + 0); if (rc < 0) { dev_err(dev, "abort task: internal abort failed\n"); goto out; @@ -1542,7 +1657,7 @@ static int hisi_sas_abort_task(struct sas_task *task) struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue]; rc = hisi_sas_internal_task_abort(hisi_hba, device, - HISI_SAS_INT_ABT_CMD, tag); + HISI_SAS_INT_ABT_CMD, tag); if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) && task->lldd_task) { /* @@ -1568,7 +1683,7 @@ static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun) int rc = TMF_RESP_FUNC_FAILED; rc = hisi_sas_internal_task_abort(hisi_hba, device, - HISI_SAS_INT_ABT_DEV, 0); + HISI_SAS_INT_ABT_DEV, 0); if (rc < 0) { dev_err(dev, "abort task set: internal abort rc=%d\n", rc); return TMF_RESP_FUNC_FAILED; @@ -1586,8 +1701,8 @@ static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun) static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun) { - int rc = TMF_RESP_FUNC_FAILED; struct hisi_sas_tmf_task tmf_task; + int rc; tmf_task.tmf = TMF_CLEAR_ACA; rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); @@ -1635,17 +1750,12 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) static int hisi_sas_I_T_nexus_reset(struct domain_device *device) { - struct hisi_sas_device *sas_dev = device->lldd_dev; struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); struct device *dev = hisi_hba->dev; - int rc = TMF_RESP_FUNC_FAILED; - - if (sas_dev->dev_status != HISI_SAS_DEV_EH) - return TMF_RESP_FUNC_FAILED; - sas_dev->dev_status = HISI_SAS_DEV_NORMAL; + int rc; rc = hisi_sas_internal_task_abort(hisi_hba, device, - HISI_SAS_INT_ABT_DEV, 0); + HISI_SAS_INT_ABT_DEV, 0); if (rc < 0) { dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc); return TMF_RESP_FUNC_FAILED; @@ -1667,7 +1777,6 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun) struct device *dev = hisi_hba->dev; int rc = TMF_RESP_FUNC_FAILED; - sas_dev->dev_status = HISI_SAS_DEV_EH; if (dev_is_sata(device)) { struct sas_phy *phy; @@ -1691,7 +1800,7 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun) struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET }; rc = hisi_sas_internal_task_abort(hisi_hba, device, - HISI_SAS_INT_ABT_DEV, 0); + HISI_SAS_INT_ABT_DEV, 0); if (rc < 0) { dev_err(dev, "lu_reset: internal abort failed\n"); goto out; @@ -1777,7 +1886,7 @@ static int hisi_sas_query_task(struct sas_task *task) static int hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, struct sas_task *task, int abort_flag, - int task_tag) + int task_tag, struct hisi_sas_dq *dq) { struct domain_device *device = task->dev; struct hisi_sas_device *sas_dev = device->lldd_dev; @@ -1786,7 +1895,6 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, struct hisi_sas_slot *slot; struct asd_sas_port *sas_port = device->port; struct hisi_sas_cmd_hdr *cmd_hdr_base; - struct hisi_sas_dq *dq = sas_dev->dq; int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx; unsigned long flags, flags_dq = 0; int wr_q_index; @@ -1816,10 +1924,14 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, } list_add_tail(&slot->delivery, &dq->list); spin_unlock_irqrestore(&dq->lock, flags_dq); + spin_lock_irqsave(&sas_dev->lock, flags); + list_add_tail(&slot->entry, &sas_dev->list); + spin_unlock_irqrestore(&sas_dev->lock, flags); dlvry_queue = dq->id; dlvry_queue_slot = wr_q_index; + slot->device_id = sas_dev->device_id; slot->n_elem = n_elem; slot->dlvry_queue = dlvry_queue; slot->dlvry_queue_slot = dlvry_queue_slot; @@ -1843,7 +1955,6 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, WRITE_ONCE(slot->ready, 1); /* send abort command to the chip */ spin_lock_irqsave(&dq->lock, flags); - list_add_tail(&slot->entry, &sas_dev->list); hisi_hba->hw->start_delivery(dq); spin_unlock_irqrestore(&dq->lock, flags); @@ -1858,18 +1969,19 @@ err_out: } /** - * hisi_sas_internal_task_abort -- execute an internal + * _hisi_sas_internal_task_abort -- execute an internal * abort command for single IO command or a device * @hisi_hba: host controller struct * @device: domain device * @abort_flag: mode of operation, device or single IO * @tag: tag of IO to be aborted (only relevant to single * IO mode) + * @dq: delivery queue for this internal abort command */ static int -hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, - struct domain_device *device, - int abort_flag, int tag) +_hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, + struct domain_device *device, int abort_flag, + int tag, struct hisi_sas_dq *dq) { struct sas_task *task; struct hisi_sas_device *sas_dev = device->lldd_dev; @@ -1893,11 +2005,11 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, task->task_proto = device->tproto; task->task_done = hisi_sas_task_done; task->slow_task->timer.function = hisi_sas_tmf_timedout; - task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT*HZ; + task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT * HZ; add_timer(&task->slow_task->timer); res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id, - task, abort_flag, tag); + task, abort_flag, tag, dq); if (res) { del_timer(&task->slow_task->timer); dev_err(dev, "internal task abort: executing internal task failed: %d\n", @@ -1923,6 +2035,7 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, slot->task = NULL; } dev_err(dev, "internal task abort: timeout and not done.\n"); + res = -EIO; goto exit; } else @@ -1953,6 +2066,46 @@ exit: return res; } +static int +hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, + struct domain_device *device, + int abort_flag, int tag) +{ + struct hisi_sas_slot *slot; + struct device *dev = hisi_hba->dev; + struct hisi_sas_dq *dq; + int i, rc; + + switch (abort_flag) { + case HISI_SAS_INT_ABT_CMD: + slot = &hisi_hba->slot_info[tag]; + dq = &hisi_hba->dq[slot->dlvry_queue]; + return _hisi_sas_internal_task_abort(hisi_hba, device, + abort_flag, tag, dq); + case HISI_SAS_INT_ABT_DEV: + for (i = 0; i < hisi_hba->cq_nvecs; i++) { + struct hisi_sas_cq *cq = &hisi_hba->cq[i]; + const struct cpumask *mask = cq->pci_irq_mask; + + if (mask && !cpumask_intersects(cpu_online_mask, mask)) + continue; + dq = &hisi_hba->dq[i]; + rc = _hisi_sas_internal_task_abort(hisi_hba, device, + abort_flag, tag, + dq); + if (rc) + return rc; + } + break; + default: + dev_err(dev, "Unrecognised internal abort flag (%d)\n", + abort_flag); + return -EINVAL; + } + + return 0; +} + static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy) { hisi_sas_port_notify_formed(sas_phy); @@ -2019,7 +2172,7 @@ void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba) { int i; - for (i = 0; i < hisi_hba->queue_count; i++) { + for (i = 0; i < hisi_hba->cq_nvecs; i++) { struct hisi_sas_cq *cq = &hisi_hba->cq[i]; tasklet_kill(&cq->tasklet); @@ -2048,14 +2201,18 @@ static struct sas_domain_function_template hisi_sas_transport_ops = { void hisi_sas_init_mem(struct hisi_hba *hisi_hba) { - int i, s, max_command_entries = hisi_hba->hw->max_command_entries; + int i, s, j, max_command_entries = hisi_hba->hw->max_command_entries; + struct hisi_sas_breakpoint *sata_breakpoint = hisi_hba->sata_breakpoint; for (i = 0; i < hisi_hba->queue_count; i++) { struct hisi_sas_cq *cq = &hisi_hba->cq[i]; struct hisi_sas_dq *dq = &hisi_hba->dq[i]; + struct hisi_sas_cmd_hdr *cmd_hdr = hisi_hba->cmd_hdr[i]; + + s = sizeof(struct hisi_sas_cmd_hdr); + for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++) + memset(&cmd_hdr[j], 0, s); - s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; - memset(hisi_hba->cmd_hdr[i], 0, s); dq->wr_point = 0; s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; @@ -2072,12 +2229,13 @@ void hisi_sas_init_mem(struct hisi_hba *hisi_hba) s = max_command_entries * sizeof(struct hisi_sas_breakpoint); memset(hisi_hba->breakpoint, 0, s); - s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint); - memset(hisi_hba->sata_breakpoint, 0, s); + s = sizeof(struct hisi_sas_sata_breakpoint); + for (j = 0; j < HISI_SAS_MAX_ITCT_ENTRIES; j++) + memset(&sata_breakpoint[j], 0, s); } EXPORT_SYMBOL_GPL(hisi_sas_init_mem); -int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost) +int hisi_sas_alloc(struct hisi_hba *hisi_hba) { struct device *dev = hisi_hba->dev; int i, j, s, max_command_entries = hisi_hba->hw->max_command_entries; @@ -2095,7 +2253,6 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost) for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED; hisi_hba->devices[i].device_id = i; - hisi_hba->devices[i].dev_status = HISI_SAS_DEV_NORMAL; } for (i = 0; i < hisi_hba->queue_count; i++) { @@ -2131,10 +2288,9 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost) s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma, - GFP_KERNEL); + GFP_KERNEL | __GFP_ZERO); if (!hisi_hba->itct) goto err_out; - memset(hisi_hba->itct, 0, s); hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries, sizeof(struct hisi_sas_slot), @@ -2144,19 +2300,24 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost) /* roundup to avoid overly large block size */ max_command_entries_ru = roundup(max_command_entries, 64); - sz_slot_buf_ru = roundup(sizeof(struct hisi_sas_slot_buf_table), 64); + if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK) + sz_slot_buf_ru = sizeof(struct hisi_sas_slot_dif_buf_table); + else + sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table); + sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64); s = lcm(max_command_entries_ru, sz_slot_buf_ru); blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s; slots_per_blk = s / sz_slot_buf_ru; + for (i = 0; i < blk_cnt; i++) { - struct hisi_sas_slot_buf_table *buf; - dma_addr_t buf_dma; int slot_index = i * slots_per_blk; + dma_addr_t buf_dma; + void *buf; - buf = dmam_alloc_coherent(dev, s, &buf_dma, GFP_KERNEL); + buf = dmam_alloc_coherent(dev, s, &buf_dma, + GFP_KERNEL | __GFP_ZERO); if (!buf) goto err_out; - memset(buf, 0, s); for (j = 0; j < slots_per_blk; j++, slot_index++) { struct hisi_sas_slot *slot; @@ -2166,8 +2327,8 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost) slot->buf_dma = buf_dma; slot->idx = slot_index; - buf++; - buf_dma += sizeof(*buf); + buf += sz_slot_buf_ru; + buf_dma += sz_slot_buf_ru; } } @@ -2323,6 +2484,7 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, struct Scsi_Host *shost; struct hisi_hba *hisi_hba; struct device *dev = &pdev->dev; + int error; shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba)); if (!shost) { @@ -2343,8 +2505,11 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, if (hisi_sas_get_fw_info(hisi_hba) < 0) goto err_out; - if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) && - dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) { + error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); + if (error) + error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + + if (error) { dev_err(dev, "No usable DMA addressing method\n"); goto err_out; } @@ -2361,7 +2526,7 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, goto err_out; } - if (hisi_sas_alloc(hisi_hba, shost)) { + if (hisi_sas_alloc(hisi_hba)) { hisi_sas_free(hisi_hba); goto err_out; } @@ -2457,6 +2622,555 @@ err_out_ha: } EXPORT_SYMBOL_GPL(hisi_sas_probe); +struct dentry *hisi_sas_debugfs_dir; + +static void hisi_sas_debugfs_snapshot_cq_reg(struct hisi_hba *hisi_hba) +{ + int queue_entry_size = hisi_hba->hw->complete_hdr_size; + int i; + + for (i = 0; i < hisi_hba->queue_count; i++) + memcpy(hisi_hba->debugfs_complete_hdr[i], + hisi_hba->complete_hdr[i], + HISI_SAS_QUEUE_SLOTS * queue_entry_size); +} + +static void hisi_sas_debugfs_snapshot_dq_reg(struct hisi_hba *hisi_hba) +{ + int queue_entry_size = sizeof(struct hisi_sas_cmd_hdr); + int i; + + for (i = 0; i < hisi_hba->queue_count; i++) { + struct hisi_sas_cmd_hdr *debugfs_cmd_hdr, *cmd_hdr; + int j; + + debugfs_cmd_hdr = hisi_hba->debugfs_cmd_hdr[i]; + cmd_hdr = hisi_hba->cmd_hdr[i]; + + for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++) + memcpy(&debugfs_cmd_hdr[j], &cmd_hdr[j], + queue_entry_size); + } +} + +static void hisi_sas_debugfs_snapshot_port_reg(struct hisi_hba *hisi_hba) +{ + const struct hisi_sas_debugfs_reg *port = + hisi_hba->hw->debugfs_reg_port; + int i, phy_cnt; + u32 offset; + u32 *databuf; + + for (phy_cnt = 0; phy_cnt < hisi_hba->n_phy; phy_cnt++) { + databuf = (u32 *)hisi_hba->debugfs_port_reg[phy_cnt]; + for (i = 0; i < port->count; i++, databuf++) { + offset = port->base_off + 4 * i; + *databuf = port->read_port_reg(hisi_hba, phy_cnt, + offset); + } + } +} + +static void hisi_sas_debugfs_snapshot_global_reg(struct hisi_hba *hisi_hba) +{ + u32 *databuf = (u32 *)hisi_hba->debugfs_global_reg; + const struct hisi_sas_debugfs_reg *global = + hisi_hba->hw->debugfs_reg_global; + int i; + + for (i = 0; i < global->count; i++, databuf++) + *databuf = global->read_global_reg(hisi_hba, 4 * i); +} + +static void hisi_sas_debugfs_snapshot_itct_reg(struct hisi_hba *hisi_hba) +{ + void *databuf = hisi_hba->debugfs_itct; + struct hisi_sas_itct *itct; + int i; + + itct = hisi_hba->itct; + + for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) { + memcpy(databuf, itct, sizeof(struct hisi_sas_itct)); + databuf += sizeof(struct hisi_sas_itct); + } +} + +static void hisi_sas_debugfs_snapshot_iost_reg(struct hisi_hba *hisi_hba) +{ + int max_command_entries = hisi_hba->hw->max_command_entries; + void *databuf = hisi_hba->debugfs_iost; + struct hisi_sas_iost *iost; + int i; + + iost = hisi_hba->iost; + + for (i = 0; i < max_command_entries; i++, iost++) { + memcpy(databuf, iost, sizeof(struct hisi_sas_iost)); + databuf += sizeof(struct hisi_sas_iost); + } +} + +static const char * +hisi_sas_debugfs_to_reg_name(int off, int base_off, + const struct hisi_sas_debugfs_reg_lu *lu) +{ + for (; lu->name; lu++) { + if (off == lu->off - base_off) + return lu->name; + } + + return NULL; +} + +static void hisi_sas_debugfs_print_reg(u32 *regs_val, const void *ptr, + struct seq_file *s) +{ + const struct hisi_sas_debugfs_reg *reg = ptr; + int i; + + for (i = 0; i < reg->count; i++) { + int off = i * 4; + const char *name; + + name = hisi_sas_debugfs_to_reg_name(off, reg->base_off, + reg->lu); + + if (name) + seq_printf(s, "0x%08x 0x%08x %s\n", off, + regs_val[i], name); + else + seq_printf(s, "0x%08x 0x%08x\n", off, + regs_val[i]); + } +} + +static int hisi_sas_debugfs_global_show(struct seq_file *s, void *p) +{ + struct hisi_hba *hisi_hba = s->private; + const struct hisi_sas_hw *hw = hisi_hba->hw; + const struct hisi_sas_debugfs_reg *reg_global = hw->debugfs_reg_global; + + hisi_sas_debugfs_print_reg(hisi_hba->debugfs_global_reg, + reg_global, s); + + return 0; +} + +static int hisi_sas_debugfs_global_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, hisi_sas_debugfs_global_show, + inode->i_private); +} + +static const struct file_operations hisi_sas_debugfs_global_fops = { + .open = hisi_sas_debugfs_global_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static int hisi_sas_debugfs_port_show(struct seq_file *s, void *p) +{ + struct hisi_sas_phy *phy = s->private; + struct hisi_hba *hisi_hba = phy->hisi_hba; + const struct hisi_sas_hw *hw = hisi_hba->hw; + const struct hisi_sas_debugfs_reg *reg_port = hw->debugfs_reg_port; + u32 *databuf = hisi_hba->debugfs_port_reg[phy->sas_phy.id]; + + hisi_sas_debugfs_print_reg(databuf, reg_port, s); + + return 0; +} + +static int hisi_sas_debugfs_port_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, hisi_sas_debugfs_port_show, inode->i_private); +} + +static const struct file_operations hisi_sas_debugfs_port_fops = { + .open = hisi_sas_debugfs_port_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static int hisi_sas_show_row_64(struct seq_file *s, int index, + int sz, __le64 *ptr) +{ + int i; + + /* completion header size not fixed per HW version */ + seq_printf(s, "index %04d:\n\t", index); + for (i = 1; i <= sz / 8; i++, ptr++) { + seq_printf(s, " 0x%016llx", le64_to_cpu(*ptr)); + if (!(i % 2)) + seq_puts(s, "\n\t"); + } + + seq_puts(s, "\n"); + + return 0; +} + +static int hisi_sas_show_row_32(struct seq_file *s, int index, + int sz, __le32 *ptr) +{ + int i; + + /* completion header size not fixed per HW version */ + seq_printf(s, "index %04d:\n\t", index); + for (i = 1; i <= sz / 4; i++, ptr++) { + seq_printf(s, " 0x%08x", le32_to_cpu(*ptr)); + if (!(i % 4)) + seq_puts(s, "\n\t"); + } + seq_puts(s, "\n"); + + return 0; +} + +static int hisi_sas_cq_show_slot(struct seq_file *s, int slot, void *cq_ptr) +{ + struct hisi_sas_cq *cq = cq_ptr; + struct hisi_hba *hisi_hba = cq->hisi_hba; + void *complete_queue = hisi_hba->debugfs_complete_hdr[cq->id]; + __le32 *complete_hdr = complete_queue + + (hisi_hba->hw->complete_hdr_size * slot); + + return hisi_sas_show_row_32(s, slot, + hisi_hba->hw->complete_hdr_size, + complete_hdr); +} + +static int hisi_sas_debugfs_cq_show(struct seq_file *s, void *p) +{ + struct hisi_sas_cq *cq = s->private; + int slot, ret; + + for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) { + ret = hisi_sas_cq_show_slot(s, slot, cq); + if (ret) + return ret; + } + return 0; +} + +static int hisi_sas_debugfs_cq_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, hisi_sas_debugfs_cq_show, inode->i_private); +} + +static const struct file_operations hisi_sas_debugfs_cq_fops = { + .open = hisi_sas_debugfs_cq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static int hisi_sas_dq_show_slot(struct seq_file *s, int slot, void *dq_ptr) +{ + struct hisi_sas_dq *dq = dq_ptr; + struct hisi_hba *hisi_hba = dq->hisi_hba; + void *cmd_queue = hisi_hba->debugfs_cmd_hdr[dq->id]; + __le32 *cmd_hdr = cmd_queue + + sizeof(struct hisi_sas_cmd_hdr) * slot; + + return hisi_sas_show_row_32(s, slot, sizeof(struct hisi_sas_cmd_hdr), + cmd_hdr); +} + +static int hisi_sas_debugfs_dq_show(struct seq_file *s, void *p) +{ + int slot, ret; + + for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) { + ret = hisi_sas_dq_show_slot(s, slot, s->private); + if (ret) + return ret; + } + return 0; +} + +static int hisi_sas_debugfs_dq_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, hisi_sas_debugfs_dq_show, inode->i_private); +} + +static const struct file_operations hisi_sas_debugfs_dq_fops = { + .open = hisi_sas_debugfs_dq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static int hisi_sas_debugfs_iost_show(struct seq_file *s, void *p) +{ + struct hisi_hba *hisi_hba = s->private; + struct hisi_sas_iost *debugfs_iost = hisi_hba->debugfs_iost; + int i, ret, max_command_entries = hisi_hba->hw->max_command_entries; + __le64 *iost = &debugfs_iost->qw0; + + for (i = 0; i < max_command_entries; i++, debugfs_iost++) { + ret = hisi_sas_show_row_64(s, i, sizeof(*debugfs_iost), + iost); + if (ret) + return ret; + } + + return 0; +} + +static int hisi_sas_debugfs_iost_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, hisi_sas_debugfs_iost_show, inode->i_private); +} + +static const struct file_operations hisi_sas_debugfs_iost_fops = { + .open = hisi_sas_debugfs_iost_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static int hisi_sas_debugfs_itct_show(struct seq_file *s, void *p) +{ + int i, ret; + struct hisi_hba *hisi_hba = s->private; + struct hisi_sas_itct *debugfs_itct = hisi_hba->debugfs_itct; + __le64 *itct = &debugfs_itct->qw0; + + for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, debugfs_itct++) { + ret = hisi_sas_show_row_64(s, i, sizeof(*debugfs_itct), + itct); + if (ret) + return ret; + } + + return 0; +} + +static int hisi_sas_debugfs_itct_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, hisi_sas_debugfs_itct_show, inode->i_private); +} + +static const struct file_operations hisi_sas_debugfs_itct_fops = { + .open = hisi_sas_debugfs_itct_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba) +{ + struct dentry *dump_dentry; + struct dentry *dentry; + char name[256]; + int p; + int c; + int d; + + /* Create dump dir inside device dir */ + dump_dentry = debugfs_create_dir("dump", hisi_hba->debugfs_dir); + hisi_hba->debugfs_dump_dentry = dump_dentry; + + debugfs_create_file("global", 0400, dump_dentry, hisi_hba, + &hisi_sas_debugfs_global_fops); + + /* Create port dir and files */ + dentry = debugfs_create_dir("port", dump_dentry); + for (p = 0; p < hisi_hba->n_phy; p++) { + snprintf(name, 256, "%d", p); + + debugfs_create_file(name, 0400, dentry, &hisi_hba->phy[p], + &hisi_sas_debugfs_port_fops); + } + + /* Create CQ dir and files */ + dentry = debugfs_create_dir("cq", dump_dentry); + for (c = 0; c < hisi_hba->queue_count; c++) { + snprintf(name, 256, "%d", c); + + debugfs_create_file(name, 0400, dentry, &hisi_hba->cq[c], + &hisi_sas_debugfs_cq_fops); + } + + /* Create DQ dir and files */ + dentry = debugfs_create_dir("dq", dump_dentry); + for (d = 0; d < hisi_hba->queue_count; d++) { + snprintf(name, 256, "%d", d); + + debugfs_create_file(name, 0400, dentry, &hisi_hba->dq[d], + &hisi_sas_debugfs_dq_fops); + } + + debugfs_create_file("iost", 0400, dump_dentry, hisi_hba, + &hisi_sas_debugfs_iost_fops); + + debugfs_create_file("itct", 0400, dump_dentry, hisi_hba, + &hisi_sas_debugfs_itct_fops); + + return; +} + +static void hisi_sas_debugfs_snapshot_regs(struct hisi_hba *hisi_hba) +{ + hisi_hba->hw->snapshot_prepare(hisi_hba); + + hisi_sas_debugfs_snapshot_global_reg(hisi_hba); + hisi_sas_debugfs_snapshot_port_reg(hisi_hba); + hisi_sas_debugfs_snapshot_cq_reg(hisi_hba); + hisi_sas_debugfs_snapshot_dq_reg(hisi_hba); + hisi_sas_debugfs_snapshot_itct_reg(hisi_hba); + hisi_sas_debugfs_snapshot_iost_reg(hisi_hba); + + hisi_sas_debugfs_create_files(hisi_hba); + + hisi_hba->hw->snapshot_restore(hisi_hba); +} + +static ssize_t hisi_sas_debugfs_trigger_dump_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct hisi_hba *hisi_hba = file->f_inode->i_private; + char buf[8]; + + /* A bit racy, but don't care too much since it's only debugfs */ + if (hisi_hba->debugfs_snapshot) + return -EFAULT; + + if (count > 8) + return -EFAULT; + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + + if (buf[0] != '1') + return -EFAULT; + + queue_work(hisi_hba->wq, &hisi_hba->debugfs_work); + + return count; +} + +static const struct file_operations hisi_sas_debugfs_trigger_dump_fops = { + .write = &hisi_sas_debugfs_trigger_dump_write, + .owner = THIS_MODULE, +}; + +void hisi_sas_debugfs_work_handler(struct work_struct *work) +{ + struct hisi_hba *hisi_hba = + container_of(work, struct hisi_hba, debugfs_work); + + if (hisi_hba->debugfs_snapshot) + return; + hisi_hba->debugfs_snapshot = true; + + hisi_sas_debugfs_snapshot_regs(hisi_hba); +} +EXPORT_SYMBOL_GPL(hisi_sas_debugfs_work_handler); + +void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba) +{ + int max_command_entries = hisi_hba->hw->max_command_entries; + struct device *dev = hisi_hba->dev; + int p, i, c, d; + size_t sz; + + hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev), + hisi_sas_debugfs_dir); + debugfs_create_file("trigger_dump", 0600, + hisi_hba->debugfs_dir, + hisi_hba, + &hisi_sas_debugfs_trigger_dump_fops); + + /* Alloc buffer for global */ + sz = hisi_hba->hw->debugfs_reg_global->count * 4; + hisi_hba->debugfs_global_reg = + devm_kmalloc(dev, sz, GFP_KERNEL); + + if (!hisi_hba->debugfs_global_reg) + goto fail_global; + + /* Alloc buffer for port */ + sz = hisi_hba->hw->debugfs_reg_port->count * 4; + for (p = 0; p < hisi_hba->n_phy; p++) { + hisi_hba->debugfs_port_reg[p] = + devm_kmalloc(dev, sz, GFP_KERNEL); + + if (!hisi_hba->debugfs_port_reg[p]) + goto fail_port; + } + + /* Alloc buffer for cq */ + sz = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; + for (c = 0; c < hisi_hba->queue_count; c++) { + hisi_hba->debugfs_complete_hdr[c] = + devm_kmalloc(dev, sz, GFP_KERNEL); + + if (!hisi_hba->debugfs_complete_hdr[c]) + goto fail_cq; + } + + /* Alloc buffer for dq */ + sz = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; + for (d = 0; d < hisi_hba->queue_count; d++) { + hisi_hba->debugfs_cmd_hdr[d] = + devm_kmalloc(dev, sz, GFP_KERNEL); + + if (!hisi_hba->debugfs_cmd_hdr[d]) + goto fail_iost_dq; + } + + /* Alloc buffer for iost */ + sz = max_command_entries * sizeof(struct hisi_sas_iost); + + hisi_hba->debugfs_iost = devm_kmalloc(dev, sz, GFP_KERNEL); + if (!hisi_hba->debugfs_iost) + goto fail_iost_dq; + + /* Alloc buffer for itct */ + /* New memory allocation must be locate before itct */ + sz = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); + + hisi_hba->debugfs_itct = devm_kmalloc(dev, sz, GFP_KERNEL); + if (!hisi_hba->debugfs_itct) + goto fail_itct; + + return; +fail_itct: + devm_kfree(dev, hisi_hba->debugfs_iost); +fail_iost_dq: + for (i = 0; i < d; i++) + devm_kfree(dev, hisi_hba->debugfs_cmd_hdr[i]); +fail_cq: + for (i = 0; i < c; i++) + devm_kfree(dev, hisi_hba->debugfs_complete_hdr[i]); +fail_port: + for (i = 0; i < p; i++) + devm_kfree(dev, hisi_hba->debugfs_port_reg[i]); + devm_kfree(dev, hisi_hba->debugfs_global_reg); +fail_global: + debugfs_remove_recursive(hisi_hba->debugfs_dir); + dev_dbg(dev, "failed to init debugfs!\n"); +} +EXPORT_SYMBOL_GPL(hisi_sas_debugfs_init); + +void hisi_sas_debugfs_exit(struct hisi_hba *hisi_hba) +{ + debugfs_remove_recursive(hisi_hba->debugfs_dir); +} +EXPORT_SYMBOL_GPL(hisi_sas_debugfs_exit); + int hisi_sas_remove(struct platform_device *pdev) { struct sas_ha_struct *sha = platform_get_drvdata(pdev); @@ -2475,18 +3189,28 @@ int hisi_sas_remove(struct platform_device *pdev) } EXPORT_SYMBOL_GPL(hisi_sas_remove); +bool hisi_sas_debugfs_enable; +EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable); +module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444); +MODULE_PARM_DESC(hisi_sas_debugfs_enable, "Enable driver debugfs (default disabled)"); + static __init int hisi_sas_init(void) { hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops); if (!hisi_sas_stt) return -ENOMEM; + if (hisi_sas_debugfs_enable) + hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL); + return 0; } static __exit void hisi_sas_exit(void) { sas_release_transport(hisi_sas_stt); + + debugfs_remove(hisi_sas_debugfs_dir); } module_init(hisi_sas_init); diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c index 28ab52a021cf..293807443480 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c @@ -835,7 +835,7 @@ static void phys_init_v1_hw(struct hisi_hba *hisi_hba) mod_timer(timer, jiffies + HZ); } -static void sl_notify_v1_hw(struct hisi_hba *hisi_hba, int phy_no) +static void sl_notify_ssp_v1_hw(struct hisi_hba *hisi_hba, int phy_no) { u32 sl_control; @@ -1749,6 +1749,8 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba) } } + hisi_hba->cq_nvecs = hisi_hba->queue_count; + return 0; } @@ -1826,7 +1828,7 @@ static struct scsi_host_template sht_v1_hw = { static const struct hisi_sas_hw hisi_sas_v1_hw = { .hw_init = hisi_sas_v1_init, .setup_itct = setup_itct_v1_hw, - .sl_notify = sl_notify_v1_hw, + .sl_notify_ssp = sl_notify_ssp_v1_hw, .clear_itct = clear_itct_v1_hw, .prep_smp = prep_smp_v1_hw, .prep_ssp = prep_ssp_v1_hw, diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index c8ebff3ba559..e40cc6b3b67b 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -868,12 +868,12 @@ hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device) hisi_hba->devices[i].device_id = i; sas_dev = &hisi_hba->devices[i]; - sas_dev->dev_status = HISI_SAS_DEV_NORMAL; sas_dev->dev_type = device->dev_type; sas_dev->hisi_hba = hisi_hba; sas_dev->sas_device = device; sas_dev->sata_idx = sata_idx; sas_dev->dq = dq; + spin_lock_init(&sas_dev->lock); INIT_LIST_HEAD(&hisi_hba->devices[i].list); break; } @@ -1589,7 +1589,7 @@ static void phys_init_v2_hw(struct hisi_hba *hisi_hba) } } -static void sl_notify_v2_hw(struct hisi_hba *hisi_hba, int phy_no) +static void sl_notify_ssp_v2_hw(struct hisi_hba *hisi_hba, int phy_no) { u32 sl_control; @@ -2677,6 +2677,8 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba) if (is_sata_phy_v2_hw(hisi_hba, phy_no)) goto end; + del_timer(&phy->timer); + if (phy_no == 8) { u32 port_state = hisi_sas_read32(hisi_hba, PORT_STATE); @@ -2756,6 +2758,7 @@ static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba) struct hisi_sas_port *port = phy->port; struct device *dev = hisi_hba->dev; + del_timer(&phy->timer); hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1); phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); @@ -2944,6 +2947,9 @@ static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p) if (irq_value0 & CHL_INT0_SL_RX_BCST_ACK_MSK) phy_bcast_v2_hw(phy_no, hisi_hba); + if (irq_value0 & CHL_INT0_PHY_RDY_MSK) + hisi_sas_phy_oob_ready(hisi_hba, phy_no); + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, irq_value0 & (~CHL_INT0_HOTPLUG_TOUT_MSK) @@ -3227,6 +3233,8 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p) unsigned long flags; int phy_no, offset; + del_timer(&phy->timer); + phy_no = sas_phy->id; initial_fis = &hisi_hba->initial_fis[phy_no]; fis = &initial_fis->fis; @@ -3393,6 +3401,8 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba) tasklet_init(t, cq_tasklet_v2_hw, (unsigned long)cq); } + hisi_hba->cq_nvecs = hisi_hba->queue_count; + return 0; free_cq_int_irqs: @@ -3542,8 +3552,8 @@ static int write_gpio_v2_hw(struct hisi_hba *hisi_hba, u8 reg_type, return 0; } -static void wait_cmds_complete_timeout_v2_hw(struct hisi_hba *hisi_hba, - int delay_ms, int timeout_ms) +static int wait_cmds_complete_timeout_v2_hw(struct hisi_hba *hisi_hba, + int delay_ms, int timeout_ms) { struct device *dev = hisi_hba->dev; int entries, entries_old = 0, time; @@ -3557,7 +3567,12 @@ static void wait_cmds_complete_timeout_v2_hw(struct hisi_hba *hisi_hba, msleep(delay_ms); } + if (time >= timeout_ms) + return -ETIMEDOUT; + dev_dbg(dev, "wait commands complete %dms\n", time); + + return 0; } static struct device_attribute *host_attrs_v2_hw[] = { @@ -3590,7 +3605,7 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = { .setup_itct = setup_itct_v2_hw, .slot_index_alloc = slot_index_alloc_quirk_v2_hw, .alloc_dev = alloc_dev_quirk_v2_hw, - .sl_notify = sl_notify_v2_hw, + .sl_notify_ssp = sl_notify_ssp_v2_hw, .get_wideport_bitmap = get_wideport_bitmap_v2_hw, .clear_itct = clear_itct_v2_hw, .free_device = free_device_v2_hw, diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index c92b3822c408..9ec8848ec541 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -11,7 +11,7 @@ #include "hisi_sas.h" #define DRV_NAME "hisi_sas_v3_hw" -/* global registers need init*/ +/* global registers need init */ #define DLVRY_QUEUE_ENABLE 0x0 #define IOST_BASE_ADDR_LO 0x8 #define IOST_BASE_ADDR_HI 0xc @@ -186,6 +186,7 @@ #define CHL_INT0_MSK (PORT_BASE + 0x1c0) #define CHL_INT1_MSK (PORT_BASE + 0x1c4) #define CHL_INT2_MSK (PORT_BASE + 0x1c8) +#define SAS_EC_INT_COAL_TIME (PORT_BASE + 0x1cc) #define CHL_INT_COAL_EN (PORT_BASE + 0x1d0) #define SAS_RX_TRAIN_TIMER (PORT_BASE + 0x2a4) #define PHY_CTRL_RDY_MSK (PORT_BASE + 0x2b0) @@ -205,6 +206,7 @@ #define ERR_CNT_DWS_LOST (PORT_BASE + 0x380) #define ERR_CNT_RESET_PROB (PORT_BASE + 0x384) #define ERR_CNT_INVLD_DW (PORT_BASE + 0x390) +#define ERR_CNT_CODE_ERR (PORT_BASE + 0x394) #define ERR_CNT_DISP_ERR (PORT_BASE + 0x398) #define DEFAULT_ITCT_HW 2048 /* reset value, not reprogrammed */ @@ -397,6 +399,11 @@ struct hisi_sas_err_record_v3 { #define USR_DATA_BLOCK_SZ_OFF 20 #define USR_DATA_BLOCK_SZ_MSK (0x3 << USR_DATA_BLOCK_SZ_OFF) #define T10_CHK_MSK_OFF 16 +#define T10_CHK_REF_TAG_MSK (0xf0 << T10_CHK_MSK_OFF) +#define T10_CHK_APP_TAG_MSK (0xc << T10_CHK_MSK_OFF) + +#define BASE_VECTORS_V3_HW 16 +#define MIN_AFFINE_VECTORS_V3_HW (BASE_VECTORS_V3_HW + 1) static bool hisi_sas_intr_conv; MODULE_PARM_DESC(intr_conv, "interrupt converge enable (0-1)"); @@ -406,6 +413,11 @@ static int prot_mask; module_param(prot_mask, int, 0); MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=0x0 "); +static bool auto_affine_msi_experimental; +module_param(auto_affine_msi_experimental, bool, 0444); +MODULE_PARM_DESC(auto_affine_msi_experimental, "Enable auto-affinity of MSI IRQs as experimental:\n" + "default is off"); + static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) { void __iomem *regs = hisi_hba->regs + off; @@ -716,7 +728,7 @@ static void clear_itct_v3_hw(struct hisi_hba *hisi_hba, hisi_sas_write32(hisi_hba, ENT_INT_SRC3, ENT_INT_SRC3_ITC_INT_MSK); - /* clear the itct table*/ + /* clear the itct table */ reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK); hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val); @@ -868,7 +880,7 @@ static void phys_init_v3_hw(struct hisi_hba *hisi_hba) } } -static void sl_notify_v3_hw(struct hisi_hba *hisi_hba, int phy_no) +static void sl_notify_ssp_v3_hw(struct hisi_hba *hisi_hba, int phy_no) { u32 sl_control; @@ -967,19 +979,44 @@ static void prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba, hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot)); - hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF); + hdr->sg_len |= cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF); +} + +static void prep_prd_sge_dif_v3_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_slot *slot, + struct hisi_sas_cmd_hdr *hdr, + struct scatterlist *scatter, + int n_elem) +{ + struct hisi_sas_sge_dif_page *sge_dif_page; + struct scatterlist *sg; + int i; + + sge_dif_page = hisi_sas_sge_dif_addr_mem(slot); + + for_each_sg(scatter, sg, n_elem, i) { + struct hisi_sas_sge *entry = &sge_dif_page->sge[i]; + + entry->addr = cpu_to_le64(sg_dma_address(sg)); + entry->page_ctrl_0 = 0; + entry->page_ctrl_1 = 0; + entry->data_len = cpu_to_le32(sg_dma_len(sg)); + entry->data_off = 0; + } + + hdr->dif_prd_table_addr = + cpu_to_le64(hisi_sas_sge_dif_addr_dma(slot)); + + hdr->sg_len |= cpu_to_le32(n_elem << CMD_HDR_DIF_SGL_LEN_OFF); } static u32 get_prot_chk_msk_v3_hw(struct scsi_cmnd *scsi_cmnd) { unsigned char prot_flags = scsi_cmnd->prot_flags; - if (prot_flags & SCSI_PROT_TRANSFER_PI) { - if (prot_flags & SCSI_PROT_REF_CHECK) - return 0xc << 16; - return 0xfc << 16; - } - return 0; + if (prot_flags & SCSI_PROT_REF_CHECK) + return T10_CHK_APP_TAG_MSK; + return T10_CHK_REF_TAG_MSK | T10_CHK_APP_TAG_MSK; } static void fill_prot_v3_hw(struct scsi_cmnd *scsi_cmnd, @@ -990,15 +1027,33 @@ static void fill_prot_v3_hw(struct scsi_cmnd *scsi_cmnd, u32 lbrt_chk_val = t10_pi_ref_tag(scsi_cmnd->request); switch (prot_op) { + case SCSI_PROT_READ_INSERT: + prot->dw0 |= T10_INSRT_EN_MSK; + prot->lbrtgv = lbrt_chk_val; + break; case SCSI_PROT_READ_STRIP: prot->dw0 |= (T10_RMV_EN_MSK | T10_CHK_EN_MSK); prot->lbrtcv = lbrt_chk_val; prot->dw4 |= get_prot_chk_msk_v3_hw(scsi_cmnd); break; + case SCSI_PROT_READ_PASS: + prot->dw0 |= T10_CHK_EN_MSK; + prot->lbrtcv = lbrt_chk_val; + prot->dw4 |= get_prot_chk_msk_v3_hw(scsi_cmnd); + break; case SCSI_PROT_WRITE_INSERT: prot->dw0 |= T10_INSRT_EN_MSK; prot->lbrtgv = lbrt_chk_val; break; + case SCSI_PROT_WRITE_STRIP: + prot->dw0 |= (T10_RMV_EN_MSK | T10_CHK_EN_MSK); + prot->lbrtcv = lbrt_chk_val; + break; + case SCSI_PROT_WRITE_PASS: + prot->dw0 |= T10_CHK_EN_MSK; + prot->lbrtcv = lbrt_chk_val; + prot->dw4 |= get_prot_chk_msk_v3_hw(scsi_cmnd); + break; default: WARN(1, "prot_op(0x%x) is not valid\n", prot_op); break; @@ -1033,8 +1088,8 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba, struct sas_ssp_task *ssp_task = &task->ssp_task; struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; struct hisi_sas_tmf_task *tmf = slot->tmf; - unsigned char prot_op = scsi_get_prot_op(scsi_cmnd); int has_data = 0, priority = !!tmf; + unsigned char prot_op; u8 *buf_cmd; u32 dw1 = 0, dw2 = 0, len = 0; @@ -1049,6 +1104,7 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba, dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF; dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF; } else { + prot_op = scsi_get_prot_op(scsi_cmnd); dw1 |= 1 << CMD_HDR_FRAME_TYPE_OFF; switch (scsi_cmnd->sc_data_direction) { case DMA_TO_DEVICE: @@ -1074,9 +1130,15 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba, hdr->dw2 = cpu_to_le32(dw2); hdr->transfer_tags = cpu_to_le32(slot->idx); - if (has_data) + if (has_data) { prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter, - slot->n_elem); + slot->n_elem); + + if (scsi_prot_sg_count(scsi_cmnd)) + prep_prd_sge_dif_v3_hw(hisi_hba, slot, hdr, + scsi_prot_sglist(scsi_cmnd), + slot->n_elem_dif); + } hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot)); hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot)); @@ -1117,18 +1179,19 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba, fill_prot_v3_hw(scsi_cmnd, &prot); memcpy(buf_cmd_prot, &prot, sizeof(struct hisi_sas_protect_iu_v3_hw)); - /* * For READ, we need length of info read to memory, while for * WRITE we need length of data written to the disk. */ - if (prot_op == SCSI_PROT_WRITE_INSERT) { + if (prot_op == SCSI_PROT_WRITE_INSERT || + prot_op == SCSI_PROT_READ_INSERT || + prot_op == SCSI_PROT_WRITE_PASS || + prot_op == SCSI_PROT_READ_PASS) { unsigned int interval = scsi_prot_interval(scsi_cmnd); unsigned int ilog2_interval = ilog2(interval); len = (task->total_xfer_len >> ilog2_interval) * 8; } - } hdr->dw1 = cpu_to_le32(dw1); @@ -1288,6 +1351,7 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) struct device *dev = hisi_hba->dev; unsigned long flags; + del_timer(&phy->timer); hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1); port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA); @@ -1381,9 +1445,11 @@ end: static irqreturn_t phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba) { + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; u32 phy_state, sl_ctrl, txid_auto; struct device *dev = hisi_hba->dev; + del_timer(&phy->timer); hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1); phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); @@ -1552,6 +1618,19 @@ static void handle_chl_int2_v3_hw(struct hisi_hba *hisi_hba, int phy_no) hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, irq_value); } +static void handle_chl_int0_v3_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT0); + + if (irq_value0 & CHL_INT0_PHY_RDY_MSK) + hisi_sas_phy_oob_ready(hisi_hba, phy_no); + + hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, + irq_value0 & (~CHL_INT0_SL_RX_BCST_ACK_MSK) + & (~CHL_INT0_SL_PHY_ENABLE_MSK) + & (~CHL_INT0_NOT_RDY_MSK)); +} + static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p) { struct hisi_hba *hisi_hba = p; @@ -1562,8 +1641,8 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p) & 0xeeeeeeee; while (irq_msk) { - u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no, - CHL_INT0); + if (irq_msk & (2 << (phy_no * 4))) + handle_chl_int0_v3_hw(hisi_hba, phy_no); if (irq_msk & (4 << (phy_no * 4))) handle_chl_int1_v3_hw(hisi_hba, phy_no); @@ -1571,13 +1650,6 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p) if (irq_msk & (8 << (phy_no * 4))) handle_chl_int2_v3_hw(hisi_hba, phy_no); - if (irq_msk & (2 << (phy_no * 4)) && irq_value0) { - hisi_sas_phy_write32(hisi_hba, phy_no, - CHL_INT0, irq_value0 - & (~CHL_INT0_SL_RX_BCST_ACK_MSK) - & (~CHL_INT0_SL_PHY_ENABLE_MSK) - & (~CHL_INT0_NOT_RDY_MSK)); - } irq_msk &= ~(0xe << (phy_no * 4)); phy_no++; } @@ -1644,6 +1716,7 @@ static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p) u32 irq_value, irq_msk; struct hisi_hba *hisi_hba = p; struct device *dev = hisi_hba->dev; + struct pci_dev *pdev = hisi_hba->pci_dev; int i; irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3); @@ -1675,6 +1748,17 @@ static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p) error->msg, irq_value); queue_work(hisi_hba->wq, &hisi_hba->rst_work); } + + if (pdev->revision < 0x21) { + u32 reg_val; + + reg_val = hisi_sas_read32(hisi_hba, + AXI_MASTER_CFG_BASE + + AM_CTRL_GLOBAL); + reg_val |= AM_CTRL_SHUTDOWN_REQ_MSK; + hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + + AM_CTRL_GLOBAL, reg_val); + } } if (irq_value & BIT(ENT_INT_SRC3_ITC_INT_OFF)) { @@ -1959,21 +2043,68 @@ static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p) return IRQ_HANDLED; } +static void setup_reply_map_v3_hw(struct hisi_hba *hisi_hba, int nvecs) +{ + const struct cpumask *mask; + int queue, cpu; + + for (queue = 0; queue < nvecs; queue++) { + struct hisi_sas_cq *cq = &hisi_hba->cq[queue]; + + mask = pci_irq_get_affinity(hisi_hba->pci_dev, queue + + BASE_VECTORS_V3_HW); + if (!mask) + goto fallback; + cq->pci_irq_mask = mask; + for_each_cpu(cpu, mask) + hisi_hba->reply_map[cpu] = queue; + } + return; + +fallback: + for_each_possible_cpu(cpu) + hisi_hba->reply_map[cpu] = cpu % hisi_hba->queue_count; + /* Don't clean all CQ masks */ +} + static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) { struct device *dev = hisi_hba->dev; struct pci_dev *pdev = hisi_hba->pci_dev; int vectors, rc; int i, k; - int max_msi = HISI_SAS_MSI_COUNT_V3_HW; - - vectors = pci_alloc_irq_vectors(hisi_hba->pci_dev, 1, - max_msi, PCI_IRQ_MSI); - if (vectors < max_msi) { - dev_err(dev, "could not allocate all msi (%d)\n", vectors); - return -ENOENT; + int max_msi = HISI_SAS_MSI_COUNT_V3_HW, min_msi; + + if (auto_affine_msi_experimental) { + struct irq_affinity desc = { + .pre_vectors = BASE_VECTORS_V3_HW, + }; + + min_msi = MIN_AFFINE_VECTORS_V3_HW; + + hisi_hba->reply_map = devm_kcalloc(dev, nr_cpu_ids, + sizeof(unsigned int), + GFP_KERNEL); + if (!hisi_hba->reply_map) + return -ENOMEM; + vectors = pci_alloc_irq_vectors_affinity(hisi_hba->pci_dev, + min_msi, max_msi, + PCI_IRQ_MSI | + PCI_IRQ_AFFINITY, + &desc); + if (vectors < 0) + return -ENOENT; + setup_reply_map_v3_hw(hisi_hba, vectors - BASE_VECTORS_V3_HW); + } else { + min_msi = max_msi; + vectors = pci_alloc_irq_vectors(hisi_hba->pci_dev, min_msi, + max_msi, PCI_IRQ_MSI); + if (vectors < 0) + return vectors; } + hisi_hba->cq_nvecs = vectors - BASE_VECTORS_V3_HW; + rc = devm_request_irq(dev, pci_irq_vector(pdev, 1), int_phy_up_down_bcast_v3_hw, 0, DRV_NAME " phy", hisi_hba); @@ -2002,7 +2133,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) } /* Init tasklets for cq only */ - for (i = 0; i < hisi_hba->queue_count; i++) { + for (i = 0; i < hisi_hba->cq_nvecs; i++) { struct hisi_sas_cq *cq = &hisi_hba->cq[i]; struct tasklet_struct *t = &cq->tasklet; int nr = hisi_sas_intr_conv ? 16 : 16 + i; @@ -2201,8 +2332,8 @@ static int write_gpio_v3_hw(struct hisi_hba *hisi_hba, u8 reg_type, return 0; } -static void wait_cmds_complete_timeout_v3_hw(struct hisi_hba *hisi_hba, - int delay_ms, int timeout_ms) +static int wait_cmds_complete_timeout_v3_hw(struct hisi_hba *hisi_hba, + int delay_ms, int timeout_ms) { struct device *dev = hisi_hba->dev; int entries, entries_old = 0, time; @@ -2216,7 +2347,12 @@ static void wait_cmds_complete_timeout_v3_hw(struct hisi_hba *hisi_hba, msleep(delay_ms); } + if (time >= timeout_ms) + return -ETIMEDOUT; + dev_dbg(dev, "wait commands complete %dms\n", time); + + return 0; } static ssize_t intr_conv_v3_hw_show(struct device *dev, @@ -2332,6 +2468,159 @@ static struct device_attribute *host_attrs_v3_hw[] = { NULL }; +static const struct hisi_sas_debugfs_reg_lu debugfs_port_reg_lu[] = { + HISI_SAS_DEBUGFS_REG(PHY_CFG), + HISI_SAS_DEBUGFS_REG(HARD_PHY_LINKRATE), + HISI_SAS_DEBUGFS_REG(PROG_PHY_LINK_RATE), + HISI_SAS_DEBUGFS_REG(PHY_CTRL), + HISI_SAS_DEBUGFS_REG(SL_CFG), + HISI_SAS_DEBUGFS_REG(AIP_LIMIT), + HISI_SAS_DEBUGFS_REG(SL_CONTROL), + HISI_SAS_DEBUGFS_REG(RX_PRIMS_STATUS), + HISI_SAS_DEBUGFS_REG(TX_ID_DWORD0), + HISI_SAS_DEBUGFS_REG(TX_ID_DWORD1), + HISI_SAS_DEBUGFS_REG(TX_ID_DWORD2), + HISI_SAS_DEBUGFS_REG(TX_ID_DWORD3), + HISI_SAS_DEBUGFS_REG(TX_ID_DWORD4), + HISI_SAS_DEBUGFS_REG(TX_ID_DWORD5), + HISI_SAS_DEBUGFS_REG(TX_ID_DWORD6), + HISI_SAS_DEBUGFS_REG(TXID_AUTO), + HISI_SAS_DEBUGFS_REG(RX_IDAF_DWORD0), + HISI_SAS_DEBUGFS_REG(RXOP_CHECK_CFG_H), + HISI_SAS_DEBUGFS_REG(STP_LINK_TIMER), + HISI_SAS_DEBUGFS_REG(STP_LINK_TIMEOUT_STATE), + HISI_SAS_DEBUGFS_REG(CON_CFG_DRIVER), + HISI_SAS_DEBUGFS_REG(SAS_SSP_CON_TIMER_CFG), + HISI_SAS_DEBUGFS_REG(SAS_SMP_CON_TIMER_CFG), + HISI_SAS_DEBUGFS_REG(SAS_STP_CON_TIMER_CFG), + HISI_SAS_DEBUGFS_REG(CHL_INT0), + HISI_SAS_DEBUGFS_REG(CHL_INT1), + HISI_SAS_DEBUGFS_REG(CHL_INT2), + HISI_SAS_DEBUGFS_REG(CHL_INT0_MSK), + HISI_SAS_DEBUGFS_REG(CHL_INT1_MSK), + HISI_SAS_DEBUGFS_REG(CHL_INT2_MSK), + HISI_SAS_DEBUGFS_REG(SAS_EC_INT_COAL_TIME), + HISI_SAS_DEBUGFS_REG(CHL_INT_COAL_EN), + HISI_SAS_DEBUGFS_REG(SAS_RX_TRAIN_TIMER), + HISI_SAS_DEBUGFS_REG(PHY_CTRL_RDY_MSK), + HISI_SAS_DEBUGFS_REG(PHYCTRL_NOT_RDY_MSK), + HISI_SAS_DEBUGFS_REG(PHYCTRL_DWS_RESET_MSK), + HISI_SAS_DEBUGFS_REG(PHYCTRL_PHY_ENA_MSK), + HISI_SAS_DEBUGFS_REG(SL_RX_BCAST_CHK_MSK), + HISI_SAS_DEBUGFS_REG(PHYCTRL_OOB_RESTART_MSK), + HISI_SAS_DEBUGFS_REG(DMA_TX_STATUS), + HISI_SAS_DEBUGFS_REG(DMA_RX_STATUS), + HISI_SAS_DEBUGFS_REG(COARSETUNE_TIME), + HISI_SAS_DEBUGFS_REG(ERR_CNT_DWS_LOST), + HISI_SAS_DEBUGFS_REG(ERR_CNT_RESET_PROB), + HISI_SAS_DEBUGFS_REG(ERR_CNT_INVLD_DW), + HISI_SAS_DEBUGFS_REG(ERR_CNT_CODE_ERR), + HISI_SAS_DEBUGFS_REG(ERR_CNT_DISP_ERR), + {} +}; + +static const struct hisi_sas_debugfs_reg debugfs_port_reg = { + .lu = debugfs_port_reg_lu, + .count = 0x100, + .base_off = PORT_BASE, + .read_port_reg = hisi_sas_phy_read32, +}; + +static const struct hisi_sas_debugfs_reg_lu debugfs_global_reg_lu[] = { + HISI_SAS_DEBUGFS_REG(DLVRY_QUEUE_ENABLE), + HISI_SAS_DEBUGFS_REG(PHY_CONTEXT), + HISI_SAS_DEBUGFS_REG(PHY_STATE), + HISI_SAS_DEBUGFS_REG(PHY_PORT_NUM_MA), + HISI_SAS_DEBUGFS_REG(PHY_CONN_RATE), + HISI_SAS_DEBUGFS_REG(ITCT_CLR), + HISI_SAS_DEBUGFS_REG(IO_SATA_BROKEN_MSG_ADDR_LO), + HISI_SAS_DEBUGFS_REG(IO_SATA_BROKEN_MSG_ADDR_HI), + HISI_SAS_DEBUGFS_REG(SATA_INITI_D2H_STORE_ADDR_LO), + HISI_SAS_DEBUGFS_REG(SATA_INITI_D2H_STORE_ADDR_HI), + HISI_SAS_DEBUGFS_REG(CFG_MAX_TAG), + HISI_SAS_DEBUGFS_REG(HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL), + HISI_SAS_DEBUGFS_REG(HGC_SAS_TXFAIL_RETRY_CTRL), + HISI_SAS_DEBUGFS_REG(HGC_GET_ITV_TIME), + HISI_SAS_DEBUGFS_REG(DEVICE_MSG_WORK_MODE), + HISI_SAS_DEBUGFS_REG(OPENA_WT_CONTI_TIME), + HISI_SAS_DEBUGFS_REG(I_T_NEXUS_LOSS_TIME), + HISI_SAS_DEBUGFS_REG(MAX_CON_TIME_LIMIT_TIME), + HISI_SAS_DEBUGFS_REG(BUS_INACTIVE_LIMIT_TIME), + HISI_SAS_DEBUGFS_REG(REJECT_TO_OPEN_LIMIT_TIME), + HISI_SAS_DEBUGFS_REG(CQ_INT_CONVERGE_EN), + HISI_SAS_DEBUGFS_REG(CFG_AGING_TIME), + HISI_SAS_DEBUGFS_REG(HGC_DFX_CFG2), + HISI_SAS_DEBUGFS_REG(CFG_ABT_SET_QUERY_IPTT), + HISI_SAS_DEBUGFS_REG(CFG_ABT_SET_IPTT_DONE), + HISI_SAS_DEBUGFS_REG(HGC_IOMB_PROC1_STATUS), + HISI_SAS_DEBUGFS_REG(CHNL_INT_STATUS), + HISI_SAS_DEBUGFS_REG(HGC_AXI_FIFO_ERR_INFO), + HISI_SAS_DEBUGFS_REG(INT_COAL_EN), + HISI_SAS_DEBUGFS_REG(OQ_INT_COAL_TIME), + HISI_SAS_DEBUGFS_REG(OQ_INT_COAL_CNT), + HISI_SAS_DEBUGFS_REG(ENT_INT_COAL_TIME), + HISI_SAS_DEBUGFS_REG(ENT_INT_COAL_CNT), + HISI_SAS_DEBUGFS_REG(OQ_INT_SRC), + HISI_SAS_DEBUGFS_REG(OQ_INT_SRC_MSK), + HISI_SAS_DEBUGFS_REG(ENT_INT_SRC1), + HISI_SAS_DEBUGFS_REG(ENT_INT_SRC2), + HISI_SAS_DEBUGFS_REG(ENT_INT_SRC3), + HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK1), + HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK2), + HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK3), + HISI_SAS_DEBUGFS_REG(CHNL_PHYUPDOWN_INT_MSK), + HISI_SAS_DEBUGFS_REG(CHNL_ENT_INT_MSK), + HISI_SAS_DEBUGFS_REG(HGC_COM_INT_MSK), + HISI_SAS_DEBUGFS_REG(SAS_ECC_INTR), + HISI_SAS_DEBUGFS_REG(SAS_ECC_INTR_MSK), + HISI_SAS_DEBUGFS_REG(HGC_ERR_STAT_EN), + HISI_SAS_DEBUGFS_REG(CQE_SEND_CNT), + HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_DEPTH), + HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_WR_PTR), + HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_RD_PTR), + HISI_SAS_DEBUGFS_REG(HYPER_STREAM_ID_EN_CFG), + HISI_SAS_DEBUGFS_REG(OQ0_INT_SRC_MSK), + HISI_SAS_DEBUGFS_REG(COMPL_Q_0_DEPTH), + HISI_SAS_DEBUGFS_REG(COMPL_Q_0_WR_PTR), + HISI_SAS_DEBUGFS_REG(COMPL_Q_0_RD_PTR), + HISI_SAS_DEBUGFS_REG(AWQOS_AWCACHE_CFG), + HISI_SAS_DEBUGFS_REG(ARQOS_ARCACHE_CFG), + HISI_SAS_DEBUGFS_REG(HILINK_ERR_DFX), + HISI_SAS_DEBUGFS_REG(SAS_GPIO_CFG_0), + HISI_SAS_DEBUGFS_REG(SAS_GPIO_CFG_1), + HISI_SAS_DEBUGFS_REG(SAS_GPIO_TX_0_1), + HISI_SAS_DEBUGFS_REG(SAS_CFG_DRIVE_VLD), + {} +}; + +static const struct hisi_sas_debugfs_reg debugfs_global_reg = { + .lu = debugfs_global_reg_lu, + .count = 0x800, + .read_global_reg = hisi_sas_read32, +}; + +static void debugfs_snapshot_prepare_v3_hw(struct hisi_hba *hisi_hba) +{ + struct device *dev = hisi_hba->dev; + + set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); + + hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0); + + if (wait_cmds_complete_timeout_v3_hw(hisi_hba, 100, 5000) == -ETIMEDOUT) + dev_dbg(dev, "Wait commands complete timeout!\n"); + + hisi_sas_kill_tasklets(hisi_hba); +} + +static void debugfs_snapshot_restore_v3_hw(struct hisi_hba *hisi_hba) +{ + hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, + (u32)((1ULL << hisi_hba->queue_count) - 1)); + + clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); +} + static struct scsi_host_template sht_v3_hw = { .name = DRV_NAME, .module = THIS_MODULE, @@ -2344,6 +2633,7 @@ static struct scsi_host_template sht_v3_hw = { .bios_param = sas_bios_param, .this_id = -1, .sg_tablesize = HISI_SAS_SGE_PAGE_CNT, + .sg_prot_tablesize = HISI_SAS_SGE_PAGE_CNT, .max_sectors = SCSI_DEFAULT_MAX_SECTORS, .eh_device_reset_handler = sas_eh_device_reset_handler, .eh_target_reset_handler = sas_eh_target_reset_handler, @@ -2360,7 +2650,7 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = { .get_wideport_bitmap = get_wideport_bitmap_v3_hw, .complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr), .clear_itct = clear_itct_v3_hw, - .sl_notify = sl_notify_v3_hw, + .sl_notify_ssp = sl_notify_ssp_v3_hw, .prep_ssp = prep_ssp_v3_hw, .prep_smp = prep_smp_v3_hw, .prep_stp = prep_ata_v3_hw, @@ -2380,6 +2670,10 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = { .get_events = phy_get_events_v3_hw, .write_gpio = write_gpio_v3_hw, .wait_cmds_complete_timeout = wait_cmds_complete_timeout_v3_hw, + .debugfs_reg_global = &debugfs_global_reg, + .debugfs_reg_port = &debugfs_port_reg, + .snapshot_prepare = debugfs_snapshot_prepare_v3_hw, + .snapshot_restore = debugfs_snapshot_restore_v3_hw, }; static struct Scsi_Host * @@ -2397,6 +2691,7 @@ hisi_sas_shost_alloc_pci(struct pci_dev *pdev) hisi_hba = shost_priv(shost); INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler); + INIT_WORK(&hisi_hba->debugfs_work, hisi_sas_debugfs_work_handler); hisi_hba->hw = &hisi_sas_v3_hw; hisi_hba->pci_dev = pdev; hisi_hba->dev = dev; @@ -2414,7 +2709,7 @@ hisi_sas_shost_alloc_pci(struct pci_dev *pdev) if (hisi_sas_get_fw_info(hisi_hba) < 0) goto err_out; - if (hisi_sas_alloc(hisi_hba, shost)) { + if (hisi_sas_alloc(hisi_hba)) { hisi_sas_free(hisi_hba); goto err_out; } @@ -2447,10 +2742,12 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (rc) goto err_out_disable_device; - if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) || - dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (rc) + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (rc) { dev_err(dev, "No usable DMA addressing method\n"); - rc = -EIO; + rc = -ENODEV; goto err_out_regions; } @@ -2511,8 +2808,14 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n", prot_mask); scsi_host_set_prot(hisi_hba->shost, prot_mask); + if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK) + scsi_host_set_guard(hisi_hba->shost, + SHOST_DIX_GUARD_CRC); } + if (hisi_sas_debugfs_enable) + hisi_sas_debugfs_init(hisi_hba); + rc = scsi_add_host(shost, dev); if (rc) goto err_out_ha; @@ -2549,7 +2852,7 @@ hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba) free_irq(pci_irq_vector(pdev, 1), hisi_hba); free_irq(pci_irq_vector(pdev, 2), hisi_hba); free_irq(pci_irq_vector(pdev, 11), hisi_hba); - for (i = 0; i < hisi_hba->queue_count; i++) { + for (i = 0; i < hisi_hba->cq_nvecs; i++) { struct hisi_sas_cq *cq = &hisi_hba->cq[i]; int nr = hisi_sas_intr_conv ? 16 : 16 + i; @@ -2565,6 +2868,8 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev) struct hisi_hba *hisi_hba = sha->lldd_ha; struct Scsi_Host *shost = sha->core.shost; + hisi_sas_debugfs_exit(hisi_hba); + if (timer_pending(&hisi_hba->timer)) del_timer(&hisi_hba->timer); diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index ff67ef5d5347..f044e7d10d63 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -251,10 +251,11 @@ static int number_of_controllers; static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); -static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg); +static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd, + void __user *arg); #ifdef CONFIG_COMPAT -static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, +static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd, void __user *arg); #endif @@ -1327,7 +1328,7 @@ static int hpsa_scsi_add_entry(struct ctlr_info *h, dev_warn(&h->pdev->dev, "physical device with no LUN=0," " suspect firmware bug or unsupported hardware " "configuration.\n"); - return -1; + return -1; } lun_assigned: @@ -4110,7 +4111,7 @@ static int hpsa_gather_lun_info(struct ctlr_info *h, "maximum logical LUNs (%d) exceeded. " "%d LUNs ignored.\n", HPSA_MAX_LUN, *nlogicals - HPSA_MAX_LUN); - *nlogicals = HPSA_MAX_LUN; + *nlogicals = HPSA_MAX_LUN; } if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) { dev_warn(&h->pdev->dev, @@ -6127,7 +6128,7 @@ static void cmd_free(struct ctlr_info *h, struct CommandList *c) #ifdef CONFIG_COMPAT -static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, +static int hpsa_ioctl32_passthru(struct scsi_device *dev, unsigned int cmd, void __user *arg) { IOCTL32_Command_struct __user *arg32 = @@ -6164,7 +6165,7 @@ static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, } static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, - int cmd, void __user *arg) + unsigned int cmd, void __user *arg) { BIG_IOCTL32_Command_struct __user *arg32 = (BIG_IOCTL32_Command_struct __user *) arg; @@ -6201,7 +6202,8 @@ static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, return err; } -static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg) +static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd, + void __user *arg) { switch (cmd) { case CCISS_GETPCIINFO: @@ -6521,7 +6523,8 @@ static void check_ioctl_unit_attention(struct ctlr_info *h, /* * ioctl */ -static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg) +static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd, + void __user *arg) { struct ctlr_info *h; void __user *argp = (void __user *)arg; diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c index 3eedfd4f8f57..251c084a6ff0 100644 --- a/drivers/scsi/hptiop.c +++ b/drivers/scsi/hptiop.c @@ -1292,6 +1292,7 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id) dma_addr_t start_phy; void *start_virt; u32 offset, i, req_size; + int rc; dprintk("hptiop_probe(%p)\n", pcidev); @@ -1308,9 +1309,12 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id) /* Enable 64bit DMA if possible */ iop_ops = (struct hptiop_adapter_ops *)id->driver_data; - if (dma_set_mask(&pcidev->dev, - DMA_BIT_MASK(iop_ops->hw_dma_bit_mask)) || - dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32))) { + rc = dma_set_mask(&pcidev->dev, + DMA_BIT_MASK(iop_ops->hw_dma_bit_mask)); + if (rc) + rc = dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32)); + + if (rc) { printk(KERN_ERR "hptiop: fail to set dma_mask\n"); goto disable_pci_device; } diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c index cc9cae469c4b..7ca277e28d63 100644 --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c @@ -3788,11 +3788,6 @@ static int ibmvscsis_write_pending(struct se_cmd *se_cmd) return 0; } -static int ibmvscsis_write_pending_status(struct se_cmd *se_cmd) -{ - return 0; -} - static void ibmvscsis_set_default_node_attrs(struct se_node_acl *nacl) { } @@ -4053,7 +4048,6 @@ static const struct target_core_fabric_ops ibmvscsis_ops = { .release_cmd = ibmvscsis_release_cmd, .sess_get_index = ibmvscsis_sess_get_index, .write_pending = ibmvscsis_write_pending, - .write_pending_status = ibmvscsis_write_pending_status, .set_default_node_attributes = ibmvscsis_set_default_node_attrs, .get_cmd_state = ibmvscsis_get_cmd_state, .queue_data_in = ibmvscsis_queue_data_in, diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index d1b4025a4503..6d053e220153 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -6696,7 +6696,8 @@ err_nodev: * Return value: * 0 on success / other on failure **/ -static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) +static int ipr_ioctl(struct scsi_device *sdev, unsigned int cmd, + void __user *arg) { struct ipr_resource_entry *res; diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index cae6368ebb98..ed3debce2819 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -518,7 +518,7 @@ static int iscsi_sw_tcp_pdu_init(struct iscsi_task *task, if (!task->sc) iscsi_sw_tcp_send_linear_data_prep(conn, task->data, count); else { - struct scsi_data_buffer *sdb = scsi_out(task->sc); + struct scsi_data_buffer *sdb = &task->sc->sdb; err = iscsi_sw_tcp_send_data_prep(conn, sdb->table.sgl, sdb->table.nents, offset, @@ -952,12 +952,6 @@ static umode_t iscsi_sw_tcp_attr_is_visible(int param_type, int param) return 0; } -static int iscsi_sw_tcp_slave_alloc(struct scsi_device *sdev) -{ - blk_queue_flag_set(QUEUE_FLAG_BIDI, sdev->request_queue); - return 0; -} - static int iscsi_sw_tcp_slave_configure(struct scsi_device *sdev) { struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(sdev->host); @@ -985,7 +979,6 @@ static struct scsi_host_template iscsi_sw_tcp_sht = { .eh_device_reset_handler= iscsi_eh_device_reset, .eh_target_reset_handler = iscsi_eh_recover_target, .dma_boundary = PAGE_SIZE - 1, - .slave_alloc = iscsi_sw_tcp_slave_alloc, .slave_configure = iscsi_sw_tcp_slave_configure, .target_alloc = iscsi_target_alloc, .proc_name = "iscsi_tcp", diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index be83590ed955..ff943f477d6f 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -1726,14 +1726,14 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, fc_frame_payload_op(fp) != ELS_LS_ACC) { FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n"); fc_lport_error(lport, fp); - goto err; + goto out; } flp = fc_frame_payload_get(fp, sizeof(*flp)); if (!flp) { FC_LPORT_DBG(lport, "FLOGI bad response\n"); fc_lport_error(lport, fp); - goto err; + goto out; } mfs = ntohs(flp->fl_csp.sp_bb_data) & @@ -1743,7 +1743,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, " "lport->mfs:%hu\n", mfs, lport->mfs); fc_lport_error(lport, fp); - goto err; + goto out; } if (mfs <= lport->mfs) { diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 9192a1d9dec6..dfba4921b265 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -184,7 +184,6 @@ void fc_rport_destroy(struct kref *kref) struct fc_rport_priv *rdata; rdata = container_of(kref, struct fc_rport_priv, kref); - WARN_ON(!list_empty(&rdata->peers)); kfree_rcu(rdata, rcu); } EXPORT_SYMBOL(fc_rport_destroy); diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index b8d325ce8754..21309d5b456d 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -228,32 +228,6 @@ static int iscsi_prep_ecdb_ahs(struct iscsi_task *task) return 0; } -static int iscsi_prep_bidi_ahs(struct iscsi_task *task) -{ - struct scsi_cmnd *sc = task->sc; - struct iscsi_rlength_ahdr *rlen_ahdr; - int rc; - - rlen_ahdr = iscsi_next_hdr(task); - rc = iscsi_add_hdr(task, sizeof(*rlen_ahdr)); - if (rc) - return rc; - - rlen_ahdr->ahslength = - cpu_to_be16(sizeof(rlen_ahdr->read_length) + - sizeof(rlen_ahdr->reserved)); - rlen_ahdr->ahstype = ISCSI_AHSTYPE_RLENGTH; - rlen_ahdr->reserved = 0; - rlen_ahdr->read_length = cpu_to_be32(scsi_in(sc)->length); - - ISCSI_DBG_SESSION(task->conn->session, - "bidi-in rlen_ahdr->read_length(%d) " - "rlen_ahdr->ahslength(%d)\n", - be32_to_cpu(rlen_ahdr->read_length), - be16_to_cpu(rlen_ahdr->ahslength)); - return 0; -} - /** * iscsi_check_tmf_restrictions - check if a task is affected by TMF * @task: iscsi task @@ -392,13 +366,6 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) memcpy(hdr->cdb, sc->cmnd, cmd_len); task->imm_count = 0; - if (scsi_bidi_cmnd(sc)) { - hdr->flags |= ISCSI_FLAG_CMD_READ; - rc = iscsi_prep_bidi_ahs(task); - if (rc) - return rc; - } - if (scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) task->protected = true; @@ -473,12 +440,10 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) conn->scsicmd_pdus_cnt++; ISCSI_DBG_SESSION(session, "iscsi prep [%s cid %d sc %p cdb 0x%x " - "itt 0x%x len %d bidi_len %d cmdsn %d win %d]\n", - scsi_bidi_cmnd(sc) ? "bidirectional" : + "itt 0x%x len %d cmdsn %d win %d]\n", sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read", conn->id, sc, sc->cmnd[0], task->itt, transfer_length, - scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0, session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1); return 0; @@ -647,12 +612,7 @@ static void fail_scsi_task(struct iscsi_task *task, int err) state = ISCSI_TASK_ABRT_TMF; sc->result = err << 16; - if (!scsi_bidi_cmnd(sc)) - scsi_set_resid(sc, scsi_bufflen(sc)); - else { - scsi_out(sc)->resid = scsi_out(sc)->length; - scsi_in(sc)->resid = scsi_in(sc)->length; - } + scsi_set_resid(sc, scsi_bufflen(sc)); /* regular RX path uses back_lock */ spin_lock_bh(&conn->session->back_lock); @@ -907,14 +867,7 @@ invalid_datalen: if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW | ISCSI_FLAG_CMD_BIDI_OVERFLOW)) { - int res_count = be32_to_cpu(rhdr->bi_residual_count); - - if (scsi_bidi_cmnd(sc) && res_count > 0 && - (rhdr->flags & ISCSI_FLAG_CMD_BIDI_OVERFLOW || - res_count <= scsi_in(sc)->length)) - scsi_in(sc)->resid = res_count; - else - sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; + sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; } if (rhdr->flags & (ISCSI_FLAG_CMD_UNDERFLOW | @@ -961,8 +914,8 @@ iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, if (res_count > 0 && (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW || - res_count <= scsi_in(sc)->length)) - scsi_in(sc)->resid = res_count; + res_count <= sc->sdb.length)) + scsi_set_resid(sc, res_count); else sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; } @@ -1459,7 +1412,13 @@ static int iscsi_xmit_task(struct iscsi_conn *conn) if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) return -ENODATA; + spin_lock_bh(&conn->session->back_lock); + if (conn->task == NULL) { + spin_unlock_bh(&conn->session->back_lock); + return -ENODATA; + } __iscsi_get_task(task); + spin_unlock_bh(&conn->session->back_lock); spin_unlock_bh(&conn->session->frwd_lock); rc = conn->session->tt->xmit_task(task); spin_lock_bh(&conn->session->frwd_lock); @@ -1804,12 +1763,7 @@ fault: spin_unlock_bh(&session->frwd_lock); ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n", sc->cmnd[0], reason); - if (!scsi_bidi_cmnd(sc)) - scsi_set_resid(sc, scsi_bufflen(sc)); - else { - scsi_out(sc)->resid = scsi_out(sc)->length; - scsi_in(sc)->resid = scsi_in(sc)->length; - } + scsi_set_resid(sc, scsi_bufflen(sc)); sc->scsi_done(sc); return 0; } diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c index 8a6b1b3f8277..9923e9e3b884 100644 --- a/drivers/scsi/libiscsi_tcp.c +++ b/drivers/scsi/libiscsi_tcp.c @@ -495,7 +495,7 @@ static int iscsi_tcp_data_in(struct iscsi_conn *conn, struct iscsi_task *task) struct iscsi_tcp_task *tcp_task = task->dd_data; struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)tcp_conn->in.hdr; int datasn = be32_to_cpu(rhdr->datasn); - unsigned total_in_length = scsi_in(task->sc)->length; + unsigned total_in_length = task->sc->sdb.length; /* * lib iscsi will update this in the completion handling if there @@ -580,11 +580,11 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task) data_length, session->max_burst); data_offset = be32_to_cpu(rhdr->data_offset); - if (data_offset + data_length > scsi_out(task->sc)->length) { + if (data_offset + data_length > task->sc->sdb.length) { iscsi_conn_printk(KERN_ERR, conn, "invalid R2T with data len %u at offset %u " "and total length %d\n", data_length, - data_offset, scsi_out(task->sc)->length); + data_offset, task->sc->sdb.length); return ISCSI_ERR_DATALEN; } @@ -696,7 +696,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr) if (tcp_conn->in.datalen) { struct iscsi_tcp_task *tcp_task = task->dd_data; struct ahash_request *rx_hash = NULL; - struct scsi_data_buffer *sdb = scsi_in(task->sc); + struct scsi_data_buffer *sdb = &task->sc->sdb; /* * Setup copy of Data-In into the struct scsi_cmnd diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 17eb4185f29d..17b45a0c7bc3 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -25,6 +25,7 @@ #include #include #include +#include #include "sas_internal.h" @@ -614,7 +615,14 @@ int sas_smp_phy_control(struct domain_device *dev, int phy_id, } res = smp_execute_task(dev, pc_req, PC_REQ_SIZE, pc_resp,PC_RESP_SIZE); - + if (res) { + pr_err("ex %016llx phy%02d PHY control failed: %d\n", + SAS_ADDR(dev->sas_addr), phy_id, res); + } else if (pc_resp[2] != SMP_RESP_FUNC_ACC) { + pr_err("ex %016llx phy%02d PHY control failed: function result 0x%x\n", + SAS_ADDR(dev->sas_addr), phy_id, pc_resp[2]); + res = pc_resp[2]; + } kfree(pc_resp); kfree(pc_req); return res; @@ -689,10 +697,10 @@ int sas_smp_get_phy_events(struct sas_phy *phy) if (res) goto out; - phy->invalid_dword_count = scsi_to_u32(&resp[12]); - phy->running_disparity_error_count = scsi_to_u32(&resp[16]); - phy->loss_of_dword_sync_count = scsi_to_u32(&resp[20]); - phy->phy_reset_problem_count = scsi_to_u32(&resp[24]); + phy->invalid_dword_count = get_unaligned_be32(&resp[12]); + phy->running_disparity_error_count = get_unaligned_be32(&resp[16]); + phy->loss_of_dword_sync_count = get_unaligned_be32(&resp[20]); + phy->phy_reset_problem_count = get_unaligned_be32(&resp[24]); out: kfree(req); @@ -817,6 +825,26 @@ static struct domain_device *sas_ex_discover_end_dev( #ifdef CONFIG_SCSI_SAS_ATA if ((phy->attached_tproto & SAS_PROTOCOL_STP) || phy->attached_sata_dev) { + if (child->linkrate > parent->min_linkrate) { + struct sas_phy_linkrates rates = { + .maximum_linkrate = parent->min_linkrate, + .minimum_linkrate = parent->min_linkrate, + }; + int ret; + + pr_notice("ex %016llx phy%02d SATA device linkrate > min pathway connection rate, attempting to lower device linkrate\n", + SAS_ADDR(child->sas_addr), phy_id); + ret = sas_smp_phy_control(parent, phy_id, + PHY_FUNC_LINK_RESET, &rates); + if (ret) { + pr_err("ex %016llx phy%02d SATA device could not set linkrate (%d)\n", + SAS_ADDR(child->sas_addr), phy_id, ret); + goto out_free; + } + pr_notice("ex %016llx phy%02d SATA device set linkrate successfully\n", + SAS_ADDR(child->sas_addr), phy_id); + child->linkrate = child->min_linkrate; + } res = sas_get_ata_info(child, phy); if (res) goto out_free; @@ -828,6 +856,7 @@ static struct domain_device *sas_ex_discover_end_dev( rphy = sas_end_device_alloc(phy->port); if (!rphy) goto out_free; + rphy->identify.phy_identifier = phy_id; child->rphy = rphy; get_device(&rphy->dev); @@ -854,6 +883,7 @@ static struct domain_device *sas_ex_discover_end_dev( child->rphy = rphy; get_device(&rphy->dev); + rphy->identify.phy_identifier = phy_id; sas_fill_in_rphy(child, rphy); list_add_tail(&child->disco_list_node, &parent->port->disco_list); diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index c43a00a9d819..b775445892af 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -799,7 +799,7 @@ out: shost->host_failed, tries); } -int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) +int sas_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg) { struct domain_device *dev = sdev_to_domain_dev(sdev); diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index ebdfe5b26937..41d849f283f6 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -84,8 +84,6 @@ struct lpfc_sli2_slim; #define LPFC_HB_MBOX_INTERVAL 5 /* Heart beat interval in seconds. */ #define LPFC_HB_MBOX_TIMEOUT 30 /* Heart beat timeout in seconds. */ -#define LPFC_LOOK_AHEAD_OFF 0 /* Look ahead logic is turned off */ - /* Error Attention event polling interval */ #define LPFC_ERATT_POLL_INTERVAL 5 /* EATT poll interval in seconds */ @@ -146,6 +144,7 @@ struct lpfc_nvmet_ctxbuf { struct lpfc_nvmet_rcv_ctx *context; struct lpfc_iocbq *iocbq; struct lpfc_sglq *sglq; + struct work_struct defer_work; }; struct lpfc_dma_pool { @@ -235,8 +234,6 @@ typedef struct lpfc_vpd { } sli3Feat; } lpfc_vpd_t; -struct lpfc_scsi_buf; - /* * lpfc stat counters @@ -466,6 +463,7 @@ struct lpfc_vport { uint32_t cfg_use_adisc; uint32_t cfg_discovery_threads; uint32_t cfg_log_verbose; + uint32_t cfg_enable_fc4_type; uint32_t cfg_max_luns; uint32_t cfg_enable_da_id; uint32_t cfg_max_scsicmpl_time; @@ -479,6 +477,7 @@ struct lpfc_vport { struct dentry *debug_disc_trc; struct dentry *debug_nodelist; struct dentry *debug_nvmestat; + struct dentry *debug_scsistat; struct dentry *debug_nvmektime; struct dentry *debug_cpucheck; struct dentry *vport_debugfs_root; @@ -596,6 +595,13 @@ struct lpfc_mbox_ext_buf_ctx { struct list_head ext_dmabuf_list; }; +struct lpfc_epd_pool { + /* Expedite pool */ + struct list_head list; + u32 count; + spinlock_t lock; /* lock for expedite pool */ +}; + struct lpfc_ras_fwlog { uint8_t *fwlog_buff; uint32_t fw_buffcount; /* Buffer size posted to FW */ @@ -617,20 +623,19 @@ struct lpfc_ras_fwlog { struct lpfc_hba { /* SCSI interface function jump table entries */ - int (*lpfc_new_scsi_buf) - (struct lpfc_vport *, int); - struct lpfc_scsi_buf * (*lpfc_get_scsi_buf) - (struct lpfc_hba *, struct lpfc_nodelist *); + struct lpfc_io_buf * (*lpfc_get_scsi_buf) + (struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, + struct scsi_cmnd *cmnd); int (*lpfc_scsi_prep_dma_buf) - (struct lpfc_hba *, struct lpfc_scsi_buf *); + (struct lpfc_hba *, struct lpfc_io_buf *); void (*lpfc_scsi_unprep_dma_buf) - (struct lpfc_hba *, struct lpfc_scsi_buf *); + (struct lpfc_hba *, struct lpfc_io_buf *); void (*lpfc_release_scsi_buf) - (struct lpfc_hba *, struct lpfc_scsi_buf *); + (struct lpfc_hba *, struct lpfc_io_buf *); void (*lpfc_rampdown_queue_depth) (struct lpfc_hba *); void (*lpfc_scsi_prep_cmnd) - (struct lpfc_vport *, struct lpfc_scsi_buf *, + (struct lpfc_vport *, struct lpfc_io_buf *, struct lpfc_nodelist *); /* IOCB interface function jump table entries */ @@ -673,13 +678,17 @@ struct lpfc_hba { (struct lpfc_hba *); int (*lpfc_bg_scsi_prep_dma_buf) - (struct lpfc_hba *, struct lpfc_scsi_buf *); + (struct lpfc_hba *, struct lpfc_io_buf *); /* Add new entries here */ + /* expedite pool */ + struct lpfc_epd_pool epd_pool; + /* SLI4 specific HBA data structure */ struct lpfc_sli4_hba sli4_hba; struct workqueue_struct *wq; + struct delayed_work eq_delay_work; struct lpfc_sli sli; uint8_t pci_dev_grp; /* lpfc PCI dev group: 0x0, 0x1, 0x2,... */ @@ -713,7 +722,6 @@ struct lpfc_hba { #define HBA_FCOE_MODE 0x4 /* HBA function in FCoE Mode */ #define HBA_SP_QUEUE_EVT 0x8 /* Slow-path qevt posted to worker thread*/ #define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */ -#define FCP_XRI_ABORT_EVENT 0x20 #define ELS_XRI_ABORT_EVENT 0x40 #define ASYNC_EVENT 0x80 #define LINK_DISABLED 0x100 /* Link disabled by user */ @@ -784,12 +792,12 @@ struct lpfc_hba { uint8_t nvmet_support; /* driver supports NVMET */ #define LPFC_NVMET_MAX_PORTS 32 uint8_t mds_diags_support; - uint32_t initial_imax; uint8_t bbcredit_support; uint8_t enab_exp_wqcq_pages; /* HBA Config Parameters */ uint32_t cfg_ack0; + uint32_t cfg_xri_rebalancing; uint32_t cfg_enable_npiv; uint32_t cfg_enable_rrq; uint32_t cfg_topology; @@ -811,12 +819,14 @@ struct lpfc_hba { uint32_t cfg_use_msi; uint32_t cfg_auto_imax; uint32_t cfg_fcp_imax; + uint32_t cfg_cq_poll_threshold; + uint32_t cfg_cq_max_proc_limit; uint32_t cfg_fcp_cpu_map; - uint32_t cfg_fcp_io_channel; + uint32_t cfg_hdw_queue; + uint32_t cfg_irq_chann; uint32_t cfg_suppress_rsp; uint32_t cfg_nvme_oas; uint32_t cfg_nvme_embed_cmd; - uint32_t cfg_nvme_io_channel; uint32_t cfg_nvmet_mrq_post; uint32_t cfg_nvmet_mrq; uint32_t cfg_enable_nvmet; @@ -852,6 +862,7 @@ struct lpfc_hba { uint32_t cfg_prot_guard; uint32_t cfg_hostmem_hgp; uint32_t cfg_log_verbose; + uint32_t cfg_enable_fc4_type; uint32_t cfg_aer_support; uint32_t cfg_sriov_nr_virtfn; uint32_t cfg_request_firmware_upgrade; @@ -872,15 +883,12 @@ struct lpfc_hba { uint32_t cfg_ras_fwlog_level; uint32_t cfg_ras_fwlog_buffsize; uint32_t cfg_ras_fwlog_func; - uint32_t cfg_enable_fc4_type; uint32_t cfg_enable_bbcr; /* Enable BB Credit Recovery */ uint32_t cfg_enable_dpp; /* Enable Direct Packet Push */ - uint32_t cfg_xri_split; #define LPFC_ENABLE_FCP 1 #define LPFC_ENABLE_NVME 2 #define LPFC_ENABLE_BOTH 3 uint32_t cfg_enable_pbde; - uint32_t io_channel_irqs; /* number of irqs for io channels */ struct nvmet_fc_target_port *targetport; lpfc_vpd_t vpd; /* vital product data */ @@ -952,14 +960,6 @@ struct lpfc_hba { struct timer_list eratt_poll; uint32_t eratt_poll_interval; - /* - * stat counters - */ - atomic_t fc4ScsiInputRequests; - atomic_t fc4ScsiOutputRequests; - atomic_t fc4ScsiControlRequests; - atomic_t fc4ScsiIoCmpls; - uint64_t bg_guard_err_cnt; uint64_t bg_apptag_err_cnt; uint64_t bg_reftag_err_cnt; @@ -970,13 +970,6 @@ struct lpfc_hba { struct list_head lpfc_scsi_buf_list_get; struct list_head lpfc_scsi_buf_list_put; uint32_t total_scsi_bufs; - spinlock_t nvme_buf_list_get_lock; /* NVME buf alloc list lock */ - spinlock_t nvme_buf_list_put_lock; /* NVME buf free list lock */ - struct list_head lpfc_nvme_buf_list_get; - struct list_head lpfc_nvme_buf_list_put; - uint32_t total_nvme_bufs; - uint32_t get_nvme_bufs; - uint32_t put_nvme_bufs; struct list_head lpfc_iocb_list; uint32_t total_iocbq_bufs; struct list_head active_rrq_list; @@ -1033,6 +1026,7 @@ struct lpfc_hba { #ifdef CONFIG_SCSI_LPFC_DEBUG_FS struct dentry *hba_debugfs_root; atomic_t debugfs_vport_count; + struct dentry *debug_multixri_pools; struct dentry *debug_hbqinfo; struct dentry *debug_dumpHostSlim; struct dentry *debug_dumpHBASlim; @@ -1050,6 +1044,10 @@ struct lpfc_hba { struct dentry *debug_nvmeio_trc; struct lpfc_debugfs_nvmeio_trc *nvmeio_trc; + struct dentry *debug_hdwqinfo; +#ifdef LPFC_HDWQ_LOCK_STAT + struct dentry *debug_lockstat; +#endif atomic_t nvmeio_trc_cnt; uint32_t nvmeio_trc_size; uint32_t nvmeio_trc_output_idx; @@ -1090,7 +1088,6 @@ struct lpfc_hba { uint8_t temp_sensor_support; /* Fields used for heart beat. */ - unsigned long last_eqdelay_time; unsigned long last_completion_time; unsigned long skipped_hb; struct timer_list hb_tmofunc; @@ -1164,16 +1161,12 @@ struct lpfc_hba { uint16_t sfp_warning; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS -#define LPFC_CHECK_CPU_CNT 32 - uint32_t cpucheck_rcv_io[LPFC_CHECK_CPU_CNT]; - uint32_t cpucheck_xmt_io[LPFC_CHECK_CPU_CNT]; - uint32_t cpucheck_cmpl_io[LPFC_CHECK_CPU_CNT]; - uint32_t cpucheck_ccmpl_io[LPFC_CHECK_CPU_CNT]; uint16_t cpucheck_on; #define LPFC_CHECK_OFF 0 #define LPFC_CHECK_NVME_IO 1 #define LPFC_CHECK_NVMET_RCV 2 #define LPFC_CHECK_NVMET_IO 4 +#define LPFC_CHECK_SCSI_IO 8 uint16_t ktime_on; uint64_t ktime_data_samples; uint64_t ktime_status_samples; @@ -1297,3 +1290,23 @@ lpfc_phba_elsring(struct lpfc_hba *phba) } return &phba->sli.sli3_ring[LPFC_ELS_RING]; } + +/** + * lpfc_sli4_mod_hba_eq_delay - update EQ delay + * @phba: Pointer to HBA context object. + * @q: The Event Queue to update. + * @delay: The delay value (in us) to be written. + * + **/ +static inline void +lpfc_sli4_mod_hba_eq_delay(struct lpfc_hba *phba, struct lpfc_queue *eq, + u32 delay) +{ + struct lpfc_register reg_data; + + reg_data.word0 = 0; + bf_set(lpfc_sliport_eqdelay_id, ®_data, eq->queue_id); + bf_set(lpfc_sliport_eqdelay_delay, ®_data, delay); + writel(reg_data.word0, phba->sli4_hba.u.if_type2.EQDregaddr); + eq->q_mode = delay; +} diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 4bae72cbf3f6..ce3e541434dc 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -64,9 +64,6 @@ #define LPFC_MIN_MRQ_POST 512 #define LPFC_MAX_MRQ_POST 2048 -#define LPFC_MAX_NVME_INFO_TMP_LEN 100 -#define LPFC_NVME_INFO_MORE_STR "\nCould be more info...\n" - /* * Write key size should be multiple of 4. If write key is changed * make sure that library write key is also changed. @@ -155,7 +152,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, struct lpfc_nvme_rport *rport; struct lpfc_nodelist *ndlp; struct nvme_fc_remote_port *nrport; - struct lpfc_nvme_ctrl_stat *cstat; + struct lpfc_fc4_ctrl_stat *cstat; uint64_t data1, data2, data3; uint64_t totin, totout, tot; char *statep; @@ -163,7 +160,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, int len = 0; char tmp[LPFC_MAX_NVME_INFO_TMP_LEN] = {0}; - if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) { + if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) { len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n"); return len; } @@ -334,11 +331,10 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, rcu_read_lock(); scnprintf(tmp, sizeof(tmp), - "XRI Dist lpfc%d Total %d NVME %d SCSI %d ELS %d\n", + "XRI Dist lpfc%d Total %d IO %d ELS %d\n", phba->brd_no, phba->sli4_hba.max_cfg_param.max_xri, - phba->sli4_hba.nvme_xri_max, - phba->sli4_hba.scsi_xri_max, + phba->sli4_hba.io_xri_max, lpfc_sli4_get_els_iocb_cnt(phba)); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; @@ -457,13 +453,13 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, totin = 0; totout = 0; - for (i = 0; i < phba->cfg_nvme_io_channel; i++) { - cstat = &lport->cstat[i]; - tot = atomic_read(&cstat->fc4NvmeIoCmpls); + for (i = 0; i < phba->cfg_hdw_queue; i++) { + cstat = &phba->sli4_hba.hdwq[i].nvme_cstat; + tot = cstat->io_cmpls; totin += tot; - data1 = atomic_read(&cstat->fc4NvmeInputRequests); - data2 = atomic_read(&cstat->fc4NvmeOutputRequests); - data3 = atomic_read(&cstat->fc4NvmeControlRequests); + data1 = cstat->input_requests; + data2 = cstat->output_requests; + data3 = cstat->control_requests; totout += (data1 + data2 + data3); } scnprintf(tmp, sizeof(tmp), @@ -509,6 +505,57 @@ buffer_done: return len; } +static ssize_t +lpfc_scsi_stat_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = shost_priv(shost); + struct lpfc_hba *phba = vport->phba; + int len; + struct lpfc_fc4_ctrl_stat *cstat; + u64 data1, data2, data3; + u64 tot, totin, totout; + int i; + char tmp[LPFC_MAX_SCSI_INFO_TMP_LEN] = {0}; + + if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP) || + (phba->sli_rev != LPFC_SLI_REV4)) + return 0; + + scnprintf(buf, PAGE_SIZE, "SCSI HDWQ Statistics\n"); + + totin = 0; + totout = 0; + for (i = 0; i < phba->cfg_hdw_queue; i++) { + cstat = &phba->sli4_hba.hdwq[i].scsi_cstat; + tot = cstat->io_cmpls; + totin += tot; + data1 = cstat->input_requests; + data2 = cstat->output_requests; + data3 = cstat->control_requests; + totout += (data1 + data2 + data3); + + scnprintf(tmp, sizeof(tmp), "HDWQ (%d): Rd %016llx Wr %016llx " + "IO %016llx ", i, data1, data2, data3); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), "Cmpl %016llx OutIO %016llx\n", + tot, ((data1 + data2 + data3) - tot)); + if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) + goto buffer_done; + } + scnprintf(tmp, sizeof(tmp), "Total FCP Cmpl %016llx Issue %016llx " + "OutIO %016llx\n", totin, totout, totout - totin); + strlcat(buf, tmp, PAGE_SIZE); + +buffer_done: + len = strnlen(buf, PAGE_SIZE); + + return len; +} + static ssize_t lpfc_bg_info_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -2574,6 +2621,7 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \ static DEVICE_ATTR(nvme_info, 0444, lpfc_nvme_info_show, NULL); +static DEVICE_ATTR(scsi_stat, 0444, lpfc_scsi_stat_show, NULL); static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL); static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL); static DEVICE_ATTR(bg_apptag_err, S_IRUGO, lpfc_bg_apptag_err_show, NULL); @@ -3724,28 +3772,12 @@ LPFC_ATTR_R(nvmet_mrq_post, * lpfc_enable_fc4_type: Defines what FC4 types are supported. * Supported Values: 1 - register just FCP * 3 - register both FCP and NVME - * Supported values are [1,3]. Default value is 1 + * Supported values are [1,3]. Default value is 3 */ -LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_FCP, +LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH, LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH, "Enable FC4 Protocol support - FCP / NVME"); -/* - * lpfc_xri_split: Defines the division of XRI resources between SCSI and NVME - * This parameter is only used if: - * lpfc_enable_fc4_type is 3 - register both FCP and NVME and - * port is not configured for NVMET. - * - * ELS/CT always get 10% of XRIs, up to a maximum of 250 - * The remaining XRIs get split up based on lpfc_xri_split per port: - * - * Supported Values are in percentages - * the xri_split value is the percentage the SCSI port will get. The remaining - * percentage will go to NVME. - */ -LPFC_ATTR_R(xri_split, 50, 10, 90, - "Percentage of FCP XRI resources versus NVME"); - /* # lpfc_log_verbose: Only turn this flag on if you are willing to risk being # deluged with LOTS of information. @@ -4903,6 +4935,8 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr, struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; struct lpfc_hba *phba = vport->phba; + struct lpfc_eq_intr_info *eqi; + uint32_t usdelay; int val = 0, i; /* fcp_imax is only valid for SLI4 */ @@ -4923,12 +4957,27 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr, if (val && (val < LPFC_MIN_IMAX || val > LPFC_MAX_IMAX)) return -EINVAL; + phba->cfg_auto_imax = (val) ? 0 : 1; + if (phba->cfg_fcp_imax && !val) { + queue_delayed_work(phba->wq, &phba->eq_delay_work, + msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); + + for_each_present_cpu(i) { + eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i); + eqi->icnt = 0; + } + } + phba->cfg_fcp_imax = (uint32_t)val; - phba->initial_imax = phba->cfg_fcp_imax; - for (i = 0; i < phba->io_channel_irqs; i += LPFC_MAX_EQ_DELAY_EQID_CNT) + if (phba->cfg_fcp_imax) + usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax; + else + usdelay = 0; + + for (i = 0; i < phba->cfg_irq_chann; i += LPFC_MAX_EQ_DELAY_EQID_CNT) lpfc_modify_hba_eq_delay(phba, i, LPFC_MAX_EQ_DELAY_EQID_CNT, - val); + usdelay); return strlen(buf); } @@ -4982,15 +5031,119 @@ lpfc_fcp_imax_init(struct lpfc_hba *phba, int val) static DEVICE_ATTR_RW(lpfc_fcp_imax); +/** + * lpfc_cq_max_proc_limit_store + * + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: string with the cq max processing limit of cqes + * @count: unused variable. + * + * Description: + * If val is in a valid range, then set value on each cq + * + * Returns: + * The length of the buf: if successful + * -ERANGE: if val is not in the valid range + * -EINVAL: if bad value format or intended mode is not supported. + **/ +static ssize_t +lpfc_cq_max_proc_limit_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_queue *eq, *cq; + unsigned long val; + int i; + + /* cq_max_proc_limit is only valid for SLI4 */ + if (phba->sli_rev != LPFC_SLI_REV4) + return -EINVAL; + + /* Sanity check on user data */ + if (!isdigit(buf[0])) + return -EINVAL; + if (kstrtoul(buf, 0, &val)) + return -EINVAL; + + if (val < LPFC_CQ_MIN_PROC_LIMIT || val > LPFC_CQ_MAX_PROC_LIMIT) + return -ERANGE; + + phba->cfg_cq_max_proc_limit = (uint32_t)val; + + /* set the values on the cq's */ + for (i = 0; i < phba->cfg_irq_chann; i++) { + eq = phba->sli4_hba.hdwq[i].hba_eq; + if (!eq) + continue; + + list_for_each_entry(cq, &eq->child_list, list) + cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, + cq->entry_count); + } + + return strlen(buf); +} + /* - * lpfc_auto_imax: Controls Auto-interrupt coalescing values support. - * 0 No auto_imax support - * 1 auto imax on - * Auto imax will change the value of fcp_imax on a per EQ basis, using - * the EQ Delay Multiplier, depending on the activity for that EQ. - * Value range [0,1]. Default value is 1. + * lpfc_cq_max_proc_limit: The maximum number CQE entries processed in an + * itteration of CQ processing. */ -LPFC_ATTR_RW(auto_imax, 1, 0, 1, "Enable Auto imax"); +static int lpfc_cq_max_proc_limit = LPFC_CQ_DEF_MAX_PROC_LIMIT; +module_param(lpfc_cq_max_proc_limit, int, 0644); +MODULE_PARM_DESC(lpfc_cq_max_proc_limit, + "Set the maximum number CQEs processed in an iteration of " + "CQ processing"); +lpfc_param_show(cq_max_proc_limit) + +/* + * lpfc_cq_poll_threshold: Set the threshold of CQE completions in a + * single handler call which should request a polled completion rather + * than re-enabling interrupts. + */ +LPFC_ATTR_RW(cq_poll_threshold, LPFC_CQ_DEF_THRESHOLD_TO_POLL, + LPFC_CQ_MIN_THRESHOLD_TO_POLL, + LPFC_CQ_MAX_THRESHOLD_TO_POLL, + "CQE Processing Threshold to enable Polling"); + +/** + * lpfc_cq_max_proc_limit_init - Set the initial cq max_proc_limit + * @phba: lpfc_hba pointer. + * @val: entry limit + * + * Description: + * If val is in a valid range, then initialize the adapter's maximum + * value. + * + * Returns: + * Always returns 0 for success, even if value not always set to + * requested value. If value out of range or not supported, will fall + * back to default. + **/ +static int +lpfc_cq_max_proc_limit_init(struct lpfc_hba *phba, int val) +{ + phba->cfg_cq_max_proc_limit = LPFC_CQ_DEF_MAX_PROC_LIMIT; + + if (phba->sli_rev != LPFC_SLI_REV4) + return 0; + + if (val >= LPFC_CQ_MIN_PROC_LIMIT && val <= LPFC_CQ_MAX_PROC_LIMIT) { + phba->cfg_cq_max_proc_limit = val; + return 0; + } + + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0371 "LPFC_DRIVER_NAME"_cq_max_proc_limit: " + "%d out of range, using default\n", + phba->cfg_cq_max_proc_limit); + + return 0; +} + +static DEVICE_ATTR_RW(lpfc_cq_max_proc_limit); /** * lpfc_state_show - Display current driver CPU affinity @@ -5023,50 +5176,70 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr, case 1: len += snprintf(buf + len, PAGE_SIZE-len, "fcp_cpu_map: HBA centric mapping (%d): " - "%d online CPUs\n", - phba->cfg_fcp_cpu_map, - phba->sli4_hba.num_online_cpu); - break; - case 2: - len += snprintf(buf + len, PAGE_SIZE-len, - "fcp_cpu_map: Driver centric mapping (%d): " - "%d online CPUs\n", - phba->cfg_fcp_cpu_map, - phba->sli4_hba.num_online_cpu); + "%d of %d CPUs online from %d possible CPUs\n", + phba->cfg_fcp_cpu_map, num_online_cpus(), + num_present_cpus(), + phba->sli4_hba.num_possible_cpu); break; } - while (phba->sli4_hba.curr_disp_cpu < phba->sli4_hba.num_present_cpu) { + while (phba->sli4_hba.curr_disp_cpu < + phba->sli4_hba.num_possible_cpu) { cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu]; - /* margin should fit in this and the truncated message */ - if (cpup->irq == LPFC_VECTOR_MAP_EMPTY) - len += snprintf(buf + len, PAGE_SIZE-len, - "CPU %02d io_chan %02d " - "physid %d coreid %d\n", + if (!cpu_present(phba->sli4_hba.curr_disp_cpu)) + len += snprintf(buf + len, PAGE_SIZE - len, + "CPU %02d not present\n", + phba->sli4_hba.curr_disp_cpu); + else if (cpup->irq == LPFC_VECTOR_MAP_EMPTY) { + if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY) + len += snprintf( + buf + len, PAGE_SIZE - len, + "CPU %02d hdwq None " + "physid %d coreid %d ht %d\n", phba->sli4_hba.curr_disp_cpu, - cpup->channel_id, cpup->phys_id, - cpup->core_id); - else - len += snprintf(buf + len, PAGE_SIZE-len, - "CPU %02d io_chan %02d " - "physid %d coreid %d IRQ %d\n", + cpup->phys_id, + cpup->core_id, cpup->hyper); + else + len += snprintf( + buf + len, PAGE_SIZE - len, + "CPU %02d EQ %04d hdwq %04d " + "physid %d coreid %d ht %d\n", + phba->sli4_hba.curr_disp_cpu, + cpup->eq, cpup->hdwq, cpup->phys_id, + cpup->core_id, cpup->hyper); + } else { + if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY) + len += snprintf( + buf + len, PAGE_SIZE - len, + "CPU %02d hdwq None " + "physid %d coreid %d ht %d IRQ %d\n", + phba->sli4_hba.curr_disp_cpu, + cpup->phys_id, + cpup->core_id, cpup->hyper, cpup->irq); + else + len += snprintf( + buf + len, PAGE_SIZE - len, + "CPU %02d EQ %04d hdwq %04d " + "physid %d coreid %d ht %d IRQ %d\n", phba->sli4_hba.curr_disp_cpu, - cpup->channel_id, cpup->phys_id, - cpup->core_id, cpup->irq); + cpup->eq, cpup->hdwq, cpup->phys_id, + cpup->core_id, cpup->hyper, cpup->irq); + } phba->sli4_hba.curr_disp_cpu++; /* display max number of CPUs keeping some margin */ if (phba->sli4_hba.curr_disp_cpu < - phba->sli4_hba.num_present_cpu && + phba->sli4_hba.num_possible_cpu && (len >= (PAGE_SIZE - 64))) { - len += snprintf(buf + len, PAGE_SIZE-len, "more...\n"); + len += snprintf(buf + len, + PAGE_SIZE - len, "more...\n"); break; } } - if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_present_cpu) + if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_possible_cpu) phba->sli4_hba.curr_disp_cpu = 0; return len; @@ -5094,14 +5267,13 @@ lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr, # lpfc_fcp_cpu_map: Defines how to map CPUs to IRQ vectors # for the HBA. # -# Value range is [0 to 2]. Default value is LPFC_DRIVER_CPU_MAP (2). +# Value range is [0 to 1]. Default value is LPFC_HBA_CPU_MAP (1). # 0 - Do not affinitze IRQ vectors # 1 - Affintize HBA vectors with respect to each HBA # (start with CPU0 for each HBA) -# 2 - Affintize HBA vectors with respect to the entire driver -# (round robin thru all CPUs across all HBAs) +# This also defines how Hardware Queues are mapped to specific CPUs. */ -static int lpfc_fcp_cpu_map = LPFC_DRIVER_CPU_MAP; +static int lpfc_fcp_cpu_map = LPFC_HBA_CPU_MAP; module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(lpfc_fcp_cpu_map, "Defines how to map CPUs to IRQ vectors per HBA"); @@ -5135,7 +5307,7 @@ lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3326 lpfc_fcp_cpu_map: %d out of range, using " "default\n", val); - phba->cfg_fcp_cpu_map = LPFC_DRIVER_CPU_MAP; + phba->cfg_fcp_cpu_map = LPFC_HBA_CPU_MAP; return 0; } @@ -5234,14 +5406,21 @@ static DEVICE_ATTR_RW(lpfc_max_scsicmpl_time); */ LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support"); +/* +# lpfc_xri_rebalancing: enable or disable XRI rebalancing feature +# range is [0,1]. Default value is 1. +*/ +LPFC_ATTR_R(xri_rebalancing, 1, 0, 1, "Enable/Disable XRI rebalancing"); + /* * lpfc_io_sched: Determine scheduling algrithmn for issuing FCP cmds * range is [0,1]. Default value is 0. - * For [0], FCP commands are issued to Work Queues ina round robin fashion. + * For [0], FCP commands are issued to Work Queues based on upper layer + * hardware queue index. * For [1], FCP commands are issued to a Work Queue associated with the * current CPU. * - * LPFC_FCP_SCHED_ROUND_ROBIN == 0 + * LPFC_FCP_SCHED_BY_HDWQ == 0 * LPFC_FCP_SCHED_BY_CPU == 1 * * The driver dynamically sets this to 1 (BY_CPU) if it's able to set up cpu @@ -5249,11 +5428,11 @@ LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support"); * CPU. Otherwise, the default 0 (Round Robin) scheduling of FCP/NVME I/Os * through WQs will be used. */ -LPFC_ATTR_RW(fcp_io_sched, LPFC_FCP_SCHED_ROUND_ROBIN, - LPFC_FCP_SCHED_ROUND_ROBIN, +LPFC_ATTR_RW(fcp_io_sched, LPFC_FCP_SCHED_BY_CPU, + LPFC_FCP_SCHED_BY_HDWQ, LPFC_FCP_SCHED_BY_CPU, "Determine scheduling algorithm for " - "issuing commands [0] - Round Robin, [1] - Current CPU"); + "issuing commands [0] - Hardware Queue, [1] - Current CPU"); /* * lpfc_ns_query: Determine algrithmn for NameServer queries after RSCN @@ -5415,41 +5594,39 @@ LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2, "Embed NVME Command in WQE"); /* - * lpfc_fcp_io_channel: Set the number of FCP IO channels the driver - * will advertise it supports to the SCSI layer. This also will map to - * the number of WQs the driver will create. + * lpfc_hdw_queue: Set the number of Hardware Queues the driver + * will advertise it supports to the NVME and SCSI layers. This also + * will map to the number of CQ/WQ pairs the driver will create. * - * 0 = Configure the number of io channels to the number of active CPUs. - * 1,32 = Manually specify how many io channels to use. + * The NVME Layer will try to create this many, plus 1 administrative + * hardware queue. The administrative queue will always map to WQ 0 + * A hardware IO queue maps (qidx) to a specific driver CQ/WQ. * - * Value range is [0,32]. Default value is 4. + * 0 = Configure the number of hdw queues to the number of active CPUs. + * 1,128 = Manually specify how many hdw queues to use. + * + * Value range is [0,128]. Default value is 0. */ -LPFC_ATTR_R(fcp_io_channel, - LPFC_FCP_IO_CHAN_DEF, - LPFC_HBA_IO_CHAN_MIN, LPFC_HBA_IO_CHAN_MAX, - "Set the number of FCP I/O channels"); +LPFC_ATTR_R(hdw_queue, + LPFC_HBA_HDWQ_DEF, + LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX, + "Set the number of I/O Hardware Queues"); /* - * lpfc_nvme_io_channel: Set the number of IO hardware queues the driver - * will advertise it supports to the NVME layer. This also will map to - * the number of WQs the driver will create. - * - * This module parameter is valid when lpfc_enable_fc4_type is set - * to support NVME. - * - * The NVME Layer will try to create this many, plus 1 administrative - * hardware queue. The administrative queue will always map to WQ 0 - * A hardware IO queue maps (qidx) to a specific driver WQ. + * lpfc_irq_chann: Set the number of IRQ vectors that are available + * for Hardware Queues to utilize. This also will map to the number + * of EQ / MSI-X vectors the driver will create. This should never be + * more than the number of Hardware Queues * - * 0 = Configure the number of io channels to the number of active CPUs. - * 1,32 = Manually specify how many io channels to use. + * 0 = Configure number of IRQ Channels to the number of active CPUs. + * 1,128 = Manually specify how many IRQ Channels to use. * - * Value range is [0,32]. Default value is 0. + * Value range is [0,128]. Default value is 0. */ -LPFC_ATTR_R(nvme_io_channel, - LPFC_NVME_IO_CHAN_DEF, - LPFC_HBA_IO_CHAN_MIN, LPFC_HBA_IO_CHAN_MAX, - "Set the number of NVME I/O channels"); +LPFC_ATTR_R(irq_chann, + LPFC_HBA_HDWQ_DEF, + LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX, + "Set the number of I/O IRQ Channels"); /* # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. @@ -5491,16 +5668,6 @@ LPFC_ATTR_RW(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature."); */ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); -/* -# lpfc_fcp_look_ahead: Look ahead for completions in FCP start routine -# 0 = disabled (default) -# 1 = enabled -# Value range is [0,1]. Default value is 0. -# -# This feature in under investigation and may be supported in the future. -*/ -unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF; - /* # lpfc_prot_mask: i # - Bit mask of host protection capabilities used to register with the @@ -5677,6 +5844,7 @@ LPFC_ATTR_RW(enable_dpp, 1, 0, 1, "Enable Direct Packet Push"); struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_nvme_info, + &dev_attr_scsi_stat, &dev_attr_bg_info, &dev_attr_bg_guard_err, &dev_attr_bg_apptag_err, @@ -5704,11 +5872,11 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_nodev_tmo, &dev_attr_lpfc_devloss_tmo, &dev_attr_lpfc_enable_fc4_type, - &dev_attr_lpfc_xri_split, &dev_attr_lpfc_fcp_class, &dev_attr_lpfc_use_adisc, &dev_attr_lpfc_first_burst_size, &dev_attr_lpfc_ack0, + &dev_attr_lpfc_xri_rebalancing, &dev_attr_lpfc_topology, &dev_attr_lpfc_scan_down, &dev_attr_lpfc_link_speed, @@ -5742,12 +5910,13 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_use_msi, &dev_attr_lpfc_nvme_oas, &dev_attr_lpfc_nvme_embed_cmd, - &dev_attr_lpfc_auto_imax, &dev_attr_lpfc_fcp_imax, + &dev_attr_lpfc_cq_poll_threshold, + &dev_attr_lpfc_cq_max_proc_limit, &dev_attr_lpfc_fcp_cpu_map, - &dev_attr_lpfc_fcp_io_channel, + &dev_attr_lpfc_hdw_queue, + &dev_attr_lpfc_irq_chann, &dev_attr_lpfc_suppress_rsp, - &dev_attr_lpfc_nvme_io_channel, &dev_attr_lpfc_nvmet_mrq, &dev_attr_lpfc_nvmet_mrq_post, &dev_attr_lpfc_nvme_enable_fb, @@ -6775,6 +6944,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_multi_ring_rctl_init(phba, lpfc_multi_ring_rctl); lpfc_multi_ring_type_init(phba, lpfc_multi_ring_type); lpfc_ack0_init(phba, lpfc_ack0); + lpfc_xri_rebalancing_init(phba, lpfc_xri_rebalancing); lpfc_topology_init(phba, lpfc_topology); lpfc_link_speed_init(phba, lpfc_link_speed); lpfc_poll_tmo_init(phba, lpfc_poll_tmo); @@ -6787,8 +6957,9 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_use_msi_init(phba, lpfc_use_msi); lpfc_nvme_oas_init(phba, lpfc_nvme_oas); lpfc_nvme_embed_cmd_init(phba, lpfc_nvme_embed_cmd); - lpfc_auto_imax_init(phba, lpfc_auto_imax); lpfc_fcp_imax_init(phba, lpfc_fcp_imax); + lpfc_cq_poll_threshold_init(phba, lpfc_cq_poll_threshold); + lpfc_cq_max_proc_limit_init(phba, lpfc_cq_max_proc_limit); lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map); lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); @@ -6824,8 +6995,8 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) /* Initialize first burst. Target vs Initiator are different. */ lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb); lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size); - lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel); - lpfc_nvme_io_channel_init(phba, lpfc_nvme_io_channel); + lpfc_hdw_queue_init(phba, lpfc_hdw_queue); + lpfc_irq_chann_init(phba, lpfc_irq_chann); lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr); lpfc_enable_dpp_init(phba, lpfc_enable_dpp); @@ -6834,38 +7005,27 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) phba->nvmet_support = 0; phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; phba->cfg_enable_bbcr = 0; + phba->cfg_xri_rebalancing = 0; } else { /* We MUST have FCP support */ if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) phba->cfg_enable_fc4_type |= LPFC_ENABLE_FCP; } - if (phba->cfg_auto_imax && !phba->cfg_fcp_imax) - phba->cfg_auto_imax = 0; - phba->initial_imax = phba->cfg_fcp_imax; + phba->cfg_auto_imax = (phba->cfg_fcp_imax) ? 0 : 1; phba->cfg_enable_pbde = 0; /* A value of 0 means use the number of CPUs found in the system */ - if (phba->cfg_fcp_io_channel == 0) - phba->cfg_fcp_io_channel = phba->sli4_hba.num_present_cpu; - if (phba->cfg_nvme_io_channel == 0) - phba->cfg_nvme_io_channel = phba->sli4_hba.num_present_cpu; - - if (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME) - phba->cfg_fcp_io_channel = 0; - - if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) - phba->cfg_nvme_io_channel = 0; - - if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel) - phba->io_channel_irqs = phba->cfg_fcp_io_channel; - else - phba->io_channel_irqs = phba->cfg_nvme_io_channel; + if (phba->cfg_hdw_queue == 0) + phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu; + if (phba->cfg_irq_chann == 0) + phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu; + if (phba->cfg_irq_chann > phba->cfg_hdw_queue) + phba->cfg_irq_chann = phba->cfg_hdw_queue; phba->cfg_soft_wwnn = 0L; phba->cfg_soft_wwpn = 0L; - lpfc_xri_split_init(phba, lpfc_xri_split); lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); lpfc_hba_log_verbose_init(phba, lpfc_log_verbose); @@ -6903,16 +7063,16 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) void lpfc_nvme_mod_param_dep(struct lpfc_hba *phba) { - if (phba->cfg_nvme_io_channel > phba->sli4_hba.num_present_cpu) - phba->cfg_nvme_io_channel = phba->sli4_hba.num_present_cpu; - - if (phba->cfg_fcp_io_channel > phba->sli4_hba.num_present_cpu) - phba->cfg_fcp_io_channel = phba->sli4_hba.num_present_cpu; + if (phba->cfg_hdw_queue > phba->sli4_hba.num_present_cpu) + phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu; + if (phba->cfg_irq_chann > phba->sli4_hba.num_present_cpu) + phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu; + if (phba->cfg_irq_chann > phba->cfg_hdw_queue) + phba->cfg_irq_chann = phba->cfg_hdw_queue; if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME && phba->nvmet_support) { phba->cfg_enable_fc4_type &= ~LPFC_ENABLE_FCP; - phba->cfg_fcp_io_channel = 0; lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, "6013 %s x%x fb_size x%x, fb_max x%x\n", @@ -6929,11 +7089,11 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba) } if (!phba->cfg_nvmet_mrq) - phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel; + phba->cfg_nvmet_mrq = phba->cfg_irq_chann; /* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */ - if (phba->cfg_nvmet_mrq > phba->cfg_nvme_io_channel) { - phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel; + if (phba->cfg_nvmet_mrq > phba->cfg_irq_chann) { + phba->cfg_nvmet_mrq = phba->cfg_irq_chann; lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, "6018 Adjust lpfc_nvmet_mrq to %d\n", phba->cfg_nvmet_mrq); @@ -6947,11 +7107,6 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba) phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF; phba->cfg_nvmet_fb_size = 0; } - - if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel) - phba->io_channel_irqs = phba->cfg_fcp_io_channel; - else - phba->io_channel_irqs = phba->cfg_nvme_io_channel; } /** diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 2dc564e59430..f2494d3b365c 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -2947,7 +2947,7 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys); cmd->un.cont64[i].tus.f.bdeSize = ((struct lpfc_dmabufext *)mp[i])->size; - cmd->ulpBdeCount = ++i; + cmd->ulpBdeCount = ++i; if ((--num_bde > 0) && (i < 2)) continue; @@ -4682,7 +4682,7 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job, * Don't allow mailbox commands to be sent when blocked or when in * the middle of discovery */ - if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) { + if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) { rc = -EAGAIN; goto job_done; } diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 39f3fa988732..e0b14d791b8c 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -199,11 +199,6 @@ void lpfc_reset_hba(struct lpfc_hba *); int lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *hd, spinlock_t *slock); -int lpfc_fof_queue_create(struct lpfc_hba *); -int lpfc_fof_queue_setup(struct lpfc_hba *); -int lpfc_fof_queue_destroy(struct lpfc_hba *); -irqreturn_t lpfc_sli4_fof_intr_handler(int, void *); - int lpfc_sli_setup(struct lpfc_hba *); int lpfc_sli4_setup(struct lpfc_hba *phba); void lpfc_sli_queue_init(struct lpfc_hba *phba); @@ -320,8 +315,8 @@ void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *, LPFC_MBOXQ_t *); int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t, struct lpfc_iocbq *, uint32_t); -int lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t rnum, - struct lpfc_iocbq *iocbq); +int lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, + struct lpfc_iocbq *pwqe); struct lpfc_sglq *__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xri); struct lpfc_sglq *__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq); @@ -445,7 +440,6 @@ extern spinlock_t _dump_buf_lock; extern int _dump_buf_done; extern spinlock_t pgcnt_lock; extern unsigned int pgcnt; -extern unsigned int lpfc_fcp_look_ahead; /* Interface exported by fabric iocb scheduler */ void lpfc_fabric_abort_nport(struct lpfc_nodelist *); @@ -520,8 +514,13 @@ int lpfc_sli4_read_config(struct lpfc_hba *); void lpfc_sli4_node_prep(struct lpfc_hba *); int lpfc_sli4_els_sgl_update(struct lpfc_hba *phba); int lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba); -int lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba); -int lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba); +int lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *sglist); +int lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf); +int lpfc_sli4_io_sgl_update(struct lpfc_hba *phba); +int lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba, + struct list_head *blist, int xricnt); +int lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc); +void lpfc_io_free(struct lpfc_hba *phba); void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *); uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *); int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t); @@ -574,6 +573,21 @@ void lpfc_nvme_mod_param_dep(struct lpfc_hba *phba); void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_wcqe_complete *abts_cmpl); +void lpfc_create_multixri_pools(struct lpfc_hba *phba); +void lpfc_create_destroy_pools(struct lpfc_hba *phba); +void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid); +void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 cnt); +void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid); +void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid); +void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid); +#ifdef LPFC_MXP_STAT +void lpfc_snapshot_mxp(struct lpfc_hba *, u32); +#endif +struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba, + struct lpfc_nodelist *ndlp, u32 hwqid, + int); +void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *ncmd, + struct lpfc_sli4_hdw_queue *qp); void lpfc_nvme_cmd_template(void); void lpfc_nvmet_cmd_template(void); extern int lpfc_enable_nvmet_cnt; diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 552da8bf43e4..7290573110fe 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -1656,16 +1656,16 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, CtReq->un.rft.PortId = cpu_to_be32(vport->fc_myDID); /* Register FC4 FCP type if enabled. */ - if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || - (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) + if (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH || + vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP) CtReq->un.rft.fcpReg = 1; /* Register NVME type if enabled. Defined LE and swapped. * rsvd[0] is used as word1 because of the hard-coded * word0 usage in the ct_request data structure. */ - if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || - (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) + if (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH || + vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME) CtReq->un.rft.rsvd[0] = cpu_to_be32(LPFC_FC4_TYPE_BITMASK); @@ -1732,8 +1732,8 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, * caller can specify NVME (type x28) as well. But only * these that FC4 type is supported. */ - if (((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || - (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) && + if (((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) && (context == FC_TYPE_NVME)) { if ((vport == phba->pport) && phba->nvmet_support) { CtReq->un.rff.fbits = (FC4_FEATURE_TARGET | @@ -1744,8 +1744,8 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, } CtReq->un.rff.type_code = context; - } else if (((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || - (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) && + } else if (((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) && (context == FC_TYPE_FCP)) CtReq->un.rff.type_code = context; diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index a58f0b3f03a9..1215eaa530db 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2007-2015 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -378,6 +378,271 @@ skipit: return len; } +static int lpfc_debugfs_last_xripool; + +/** + * lpfc_debugfs_common_xri_data - Dump Hardware Queue info to a buffer + * @phba: The HBA to gather host buffer info from. + * @buf: The buffer to dump log into. + * @size: The maximum amount of data to process. + * + * Description: + * This routine dumps the Hardware Queue info from the @phba to @buf up to + * @size number of bytes. A header that describes the current hdwq state will be + * dumped to @buf first and then info on each hdwq entry will be dumped to @buf + * until @size bytes have been dumped or all the hdwq info has been dumped. + * + * Notes: + * This routine will rotate through each configured Hardware Queue each + * time called. + * + * Return Value: + * This routine returns the amount of bytes that were dumped into @buf and will + * not exceed @size. + **/ +static int +lpfc_debugfs_commonxripools_data(struct lpfc_hba *phba, char *buf, int size) +{ + struct lpfc_sli4_hdw_queue *qp; + int len = 0; + int i, out; + unsigned long iflag; + + for (i = 0; i < phba->cfg_hdw_queue; i++) { + if (len > (LPFC_DUMP_MULTIXRIPOOL_SIZE - 80)) + break; + qp = &phba->sli4_hba.hdwq[lpfc_debugfs_last_xripool]; + + len += snprintf(buf + len, size - len, "HdwQ %d Info ", i); + spin_lock_irqsave(&qp->abts_scsi_buf_list_lock, iflag); + spin_lock(&qp->abts_nvme_buf_list_lock); + spin_lock(&qp->io_buf_list_get_lock); + spin_lock(&qp->io_buf_list_put_lock); + out = qp->total_io_bufs - (qp->get_io_bufs + qp->put_io_bufs + + qp->abts_scsi_io_bufs + qp->abts_nvme_io_bufs); + len += snprintf(buf + len, size - len, + "tot:%d get:%d put:%d mt:%d " + "ABTS scsi:%d nvme:%d Out:%d\n", + qp->total_io_bufs, qp->get_io_bufs, qp->put_io_bufs, + qp->empty_io_bufs, qp->abts_scsi_io_bufs, + qp->abts_nvme_io_bufs, out); + spin_unlock(&qp->io_buf_list_put_lock); + spin_unlock(&qp->io_buf_list_get_lock); + spin_unlock(&qp->abts_nvme_buf_list_lock); + spin_unlock_irqrestore(&qp->abts_scsi_buf_list_lock, iflag); + + lpfc_debugfs_last_xripool++; + if (lpfc_debugfs_last_xripool >= phba->cfg_hdw_queue) + lpfc_debugfs_last_xripool = 0; + } + + return len; +} + +/** + * lpfc_debugfs_multixripools_data - Display multi-XRI pools information + * @phba: The HBA to gather host buffer info from. + * @buf: The buffer to dump log into. + * @size: The maximum amount of data to process. + * + * Description: + * This routine displays current multi-XRI pools information including XRI + * count in public, private and txcmplq. It also displays current high and + * low watermark. + * + * Return Value: + * This routine returns the amount of bytes that were dumped into @buf and will + * not exceed @size. + **/ +static int +lpfc_debugfs_multixripools_data(struct lpfc_hba *phba, char *buf, int size) +{ + u32 i; + u32 hwq_count; + struct lpfc_sli4_hdw_queue *qp; + struct lpfc_multixri_pool *multixri_pool; + struct lpfc_pvt_pool *pvt_pool; + struct lpfc_pbl_pool *pbl_pool; + u32 txcmplq_cnt; + char tmp[LPFC_DEBUG_OUT_LINE_SZ] = {0}; + + if (phba->sli_rev != LPFC_SLI_REV4) + return 0; + + if (!phba->sli4_hba.hdwq) + return 0; + + if (!phba->cfg_xri_rebalancing) { + i = lpfc_debugfs_commonxripools_data(phba, buf, size); + return i; + } + + /* + * Pbl: Current number of free XRIs in public pool + * Pvt: Current number of free XRIs in private pool + * Busy: Current number of outstanding XRIs + * HWM: Current high watermark + * pvt_empty: Incremented by 1 when IO submission fails (no xri) + * pbl_empty: Incremented by 1 when all pbl_pool are empty during + * IO submission + */ + scnprintf(tmp, sizeof(tmp), + "HWQ: Pbl Pvt Busy HWM | pvt_empty pbl_empty "); + if (strlcat(buf, tmp, size) >= size) + return strnlen(buf, size); + +#ifdef LPFC_MXP_STAT + /* + * MAXH: Max high watermark seen so far + * above_lmt: Incremented by 1 if xri_owned > xri_limit during + * IO submission + * below_lmt: Incremented by 1 if xri_owned <= xri_limit during + * IO submission + * locPbl_hit: Incremented by 1 if successfully get a batch of XRI from + * local pbl_pool + * othPbl_hit: Incremented by 1 if successfully get a batch of XRI from + * other pbl_pool + */ + scnprintf(tmp, sizeof(tmp), + "MAXH above_lmt below_lmt locPbl_hit othPbl_hit"); + if (strlcat(buf, tmp, size) >= size) + return strnlen(buf, size); + + /* + * sPbl: snapshot of Pbl 15 sec after stat gets cleared + * sPvt: snapshot of Pvt 15 sec after stat gets cleared + * sBusy: snapshot of Busy 15 sec after stat gets cleared + */ + scnprintf(tmp, sizeof(tmp), + " | sPbl sPvt sBusy"); + if (strlcat(buf, tmp, size) >= size) + return strnlen(buf, size); +#endif + + scnprintf(tmp, sizeof(tmp), "\n"); + if (strlcat(buf, tmp, size) >= size) + return strnlen(buf, size); + + hwq_count = phba->cfg_hdw_queue; + for (i = 0; i < hwq_count; i++) { + qp = &phba->sli4_hba.hdwq[i]; + multixri_pool = qp->p_multixri_pool; + if (!multixri_pool) + continue; + pbl_pool = &multixri_pool->pbl_pool; + pvt_pool = &multixri_pool->pvt_pool; + txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt; + if (qp->nvme_wq) + txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt; + + scnprintf(tmp, sizeof(tmp), + "%03d: %4d %4d %4d %4d | %10d %10d ", + i, pbl_pool->count, pvt_pool->count, + txcmplq_cnt, pvt_pool->high_watermark, + qp->empty_io_bufs, multixri_pool->pbl_empty_count); + if (strlcat(buf, tmp, size) >= size) + break; + +#ifdef LPFC_MXP_STAT + scnprintf(tmp, sizeof(tmp), + "%4d %10d %10d %10d %10d", + multixri_pool->stat_max_hwm, + multixri_pool->above_limit_count, + multixri_pool->below_limit_count, + multixri_pool->local_pbl_hit_count, + multixri_pool->other_pbl_hit_count); + if (strlcat(buf, tmp, size) >= size) + break; + + scnprintf(tmp, sizeof(tmp), + " | %4d %4d %5d", + multixri_pool->stat_pbl_count, + multixri_pool->stat_pvt_count, + multixri_pool->stat_busy_count); + if (strlcat(buf, tmp, size) >= size) + break; +#endif + + scnprintf(tmp, sizeof(tmp), "\n"); + if (strlcat(buf, tmp, size) >= size) + break; + } + return strnlen(buf, size); +} + + +#ifdef LPFC_HDWQ_LOCK_STAT +static int lpfc_debugfs_last_lock; + +/** + * lpfc_debugfs_lockstat_data - Dump Hardware Queue info to a buffer + * @phba: The HBA to gather host buffer info from. + * @buf: The buffer to dump log into. + * @size: The maximum amount of data to process. + * + * Description: + * This routine dumps the Hardware Queue info from the @phba to @buf up to + * @size number of bytes. A header that describes the current hdwq state will be + * dumped to @buf first and then info on each hdwq entry will be dumped to @buf + * until @size bytes have been dumped or all the hdwq info has been dumped. + * + * Notes: + * This routine will rotate through each configured Hardware Queue each + * time called. + * + * Return Value: + * This routine returns the amount of bytes that were dumped into @buf and will + * not exceed @size. + **/ +static int +lpfc_debugfs_lockstat_data(struct lpfc_hba *phba, char *buf, int size) +{ + struct lpfc_sli4_hdw_queue *qp; + int len = 0; + int i; + + if (phba->sli_rev != LPFC_SLI_REV4) + return 0; + + if (!phba->sli4_hba.hdwq) + return 0; + + for (i = 0; i < phba->cfg_hdw_queue; i++) { + if (len > (LPFC_HDWQINFO_SIZE - 100)) + break; + qp = &phba->sli4_hba.hdwq[lpfc_debugfs_last_lock]; + + len += snprintf(buf + len, size - len, "HdwQ %03d Lock ", i); + if (phba->cfg_xri_rebalancing) { + len += snprintf(buf + len, size - len, + "get_pvt:%d mv_pvt:%d " + "mv2pub:%d mv2pvt:%d " + "put_pvt:%d put_pub:%d wq:%d\n", + qp->lock_conflict.alloc_pvt_pool, + qp->lock_conflict.mv_from_pvt_pool, + qp->lock_conflict.mv_to_pub_pool, + qp->lock_conflict.mv_to_pvt_pool, + qp->lock_conflict.free_pvt_pool, + qp->lock_conflict.free_pub_pool, + qp->lock_conflict.wq_access); + } else { + len += snprintf(buf + len, size - len, + "get:%d put:%d free:%d wq:%d\n", + qp->lock_conflict.alloc_xri_get, + qp->lock_conflict.alloc_xri_put, + qp->lock_conflict.free_xri, + qp->lock_conflict.wq_access); + } + + lpfc_debugfs_last_lock++; + if (lpfc_debugfs_last_lock >= phba->cfg_hdw_queue) + lpfc_debugfs_last_lock = 0; + } + + return len; +} +#endif + static int lpfc_debugfs_last_hba_slim_off; /** @@ -773,11 +1038,11 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) struct lpfc_nvmet_tgtport *tgtp; struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp; struct nvme_fc_local_port *localport; - struct lpfc_nvme_ctrl_stat *cstat; + struct lpfc_fc4_ctrl_stat *cstat; struct lpfc_nvme_lport *lport; uint64_t data1, data2, data3; uint64_t tot, totin, totout; - int cnt, i, maxch; + int cnt, i; int len = 0; if (phba->nvmet_support) { @@ -863,17 +1128,17 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) len += snprintf(buf + len, size - len, "\n"); cnt = 0; - spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_for_each_entry_safe(ctxp, next_ctxp, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, list) { cnt++; } - spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); if (cnt) { len += snprintf(buf + len, size - len, "ABORT: %d ctx entries\n", cnt); - spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_for_each_entry_safe(ctxp, next_ctxp, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, list) { @@ -885,7 +1150,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) ctxp->oxid, ctxp->state, ctxp->flag); } - spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); } /* Calculate outstanding IOs */ @@ -901,7 +1166,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) phba->sli4_hba.nvmet_io_wait_total, tot); } else { - if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) + if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) return len; localport = vport->localport; @@ -912,26 +1177,22 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) return len; len += snprintf(buf + len, size - len, - "\nNVME Lport Statistics\n"); + "\nNVME HDWQ Statistics\n"); len += snprintf(buf + len, size - len, "LS: Xmt %016x Cmpl %016x\n", atomic_read(&lport->fc4NvmeLsRequests), atomic_read(&lport->fc4NvmeLsCmpls)); - if (phba->cfg_nvme_io_channel < 32) - maxch = phba->cfg_nvme_io_channel; - else - maxch = 32; totin = 0; totout = 0; - for (i = 0; i < phba->cfg_nvme_io_channel; i++) { - cstat = &lport->cstat[i]; - tot = atomic_read(&cstat->fc4NvmeIoCmpls); + for (i = 0; i < phba->cfg_hdw_queue; i++) { + cstat = &phba->sli4_hba.hdwq[i].nvme_cstat; + tot = cstat->io_cmpls; totin += tot; - data1 = atomic_read(&cstat->fc4NvmeInputRequests); - data2 = atomic_read(&cstat->fc4NvmeOutputRequests); - data3 = atomic_read(&cstat->fc4NvmeControlRequests); + data1 = cstat->input_requests; + data2 = cstat->output_requests; + data3 = cstat->control_requests; totout += (data1 + data2 + data3); /* Limit to 32, debugfs display buffer limitation */ @@ -939,7 +1200,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) continue; len += snprintf(buf + len, PAGE_SIZE - len, - "FCP (%d): Rd %016llx Wr %016llx " + "HDWQ (%d): Rd %016llx Wr %016llx " "IO %016llx ", i, data1, data2, data3); len += snprintf(buf + len, PAGE_SIZE - len, @@ -979,6 +1240,66 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size) return len; } +/** + * lpfc_debugfs_scsistat_data - Dump target node list to a buffer + * @vport: The vport to gather target node info from. + * @buf: The buffer to dump log into. + * @size: The maximum amount of data to process. + * + * Description: + * This routine dumps the SCSI statistics associated with @vport + * + * Return Value: + * This routine returns the amount of bytes that were dumped into @buf and will + * not exceed @size. + **/ +static int +lpfc_debugfs_scsistat_data(struct lpfc_vport *vport, char *buf, int size) +{ + int len; + struct lpfc_hba *phba = vport->phba; + struct lpfc_fc4_ctrl_stat *cstat; + u64 data1, data2, data3; + u64 tot, totin, totout; + int i; + char tmp[LPFC_MAX_SCSI_INFO_TMP_LEN] = {0}; + + if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP) || + (phba->sli_rev != LPFC_SLI_REV4)) + return 0; + + scnprintf(buf, size, "SCSI HDWQ Statistics\n"); + + totin = 0; + totout = 0; + for (i = 0; i < phba->cfg_hdw_queue; i++) { + cstat = &phba->sli4_hba.hdwq[i].scsi_cstat; + tot = cstat->io_cmpls; + totin += tot; + data1 = cstat->input_requests; + data2 = cstat->output_requests; + data3 = cstat->control_requests; + totout += (data1 + data2 + data3); + + scnprintf(tmp, sizeof(tmp), "HDWQ (%d): Rd %016llx Wr %016llx " + "IO %016llx ", i, data1, data2, data3); + if (strlcat(buf, tmp, size) >= size) + goto buffer_done; + + scnprintf(tmp, sizeof(tmp), "Cmpl %016llx OutIO %016llx\n", + tot, ((data1 + data2 + data3) - tot)); + if (strlcat(buf, tmp, size) >= size) + goto buffer_done; + } + scnprintf(tmp, sizeof(tmp), "Total FCP Cmpl %016llx Issue %016llx " + "OutIO %016llx\n", totin, totout, totout - totin); + strlcat(buf, tmp, size); + +buffer_done: + len = strnlen(buf, size); + + return len; +} /** * lpfc_debugfs_nvmektime_data - Dump target node list to a buffer @@ -1299,62 +1620,73 @@ static int lpfc_debugfs_cpucheck_data(struct lpfc_vport *vport, char *buf, int size) { struct lpfc_hba *phba = vport->phba; - int i; + struct lpfc_sli4_hdw_queue *qp; + int i, j, max_cnt; int len = 0; - uint32_t tot_xmt = 0; - uint32_t tot_rcv = 0; - uint32_t tot_cmpl = 0; - uint32_t tot_ccmpl = 0; + uint32_t tot_xmt; + uint32_t tot_rcv; + uint32_t tot_cmpl; - if (phba->nvmet_support == 0) { - /* NVME Initiator */ - len += snprintf(buf + len, PAGE_SIZE - len, - "CPUcheck %s\n", - (phba->cpucheck_on & LPFC_CHECK_NVME_IO ? - "Enabled" : "Disabled")); - for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { - if (i >= LPFC_CHECK_CPU_CNT) - break; - len += snprintf(buf + len, PAGE_SIZE - len, - "%02d: xmit x%08x cmpl x%08x\n", - i, phba->cpucheck_xmt_io[i], - phba->cpucheck_cmpl_io[i]); - tot_xmt += phba->cpucheck_xmt_io[i]; - tot_cmpl += phba->cpucheck_cmpl_io[i]; - } + len += snprintf(buf + len, PAGE_SIZE - len, + "CPUcheck %s ", + (phba->cpucheck_on & LPFC_CHECK_NVME_IO ? + "Enabled" : "Disabled")); + if (phba->nvmet_support) { len += snprintf(buf + len, PAGE_SIZE - len, - "tot:xmit x%08x cmpl x%08x\n", - tot_xmt, tot_cmpl); - return len; + "%s\n", + (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV ? + "Rcv Enabled\n" : "Rcv Disabled\n")); + } else { + len += snprintf(buf + len, PAGE_SIZE - len, "\n"); } + max_cnt = size - LPFC_DEBUG_OUT_LINE_SZ; + + for (i = 0; i < phba->cfg_hdw_queue; i++) { + qp = &phba->sli4_hba.hdwq[i]; + + tot_rcv = 0; + tot_xmt = 0; + tot_cmpl = 0; + for (j = 0; j < LPFC_CHECK_CPU_CNT; j++) { + tot_xmt += qp->cpucheck_xmt_io[j]; + tot_cmpl += qp->cpucheck_cmpl_io[j]; + if (phba->nvmet_support) + tot_rcv += qp->cpucheck_rcv_io[j]; + } + + /* Only display Hardware Qs with something */ + if (!tot_xmt && !tot_cmpl && !tot_rcv) + continue; - /* NVME Target */ - len += snprintf(buf + len, PAGE_SIZE - len, - "CPUcheck %s ", - (phba->cpucheck_on & LPFC_CHECK_NVMET_IO ? - "IO Enabled - " : "IO Disabled - ")); - len += snprintf(buf + len, PAGE_SIZE - len, - "%s\n", - (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV ? - "Rcv Enabled\n" : "Rcv Disabled\n")); - for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { - if (i >= LPFC_CHECK_CPU_CNT) - break; len += snprintf(buf + len, PAGE_SIZE - len, - "%02d: xmit x%08x ccmpl x%08x " - "cmpl x%08x rcv x%08x\n", - i, phba->cpucheck_xmt_io[i], - phba->cpucheck_ccmpl_io[i], - phba->cpucheck_cmpl_io[i], - phba->cpucheck_rcv_io[i]); - tot_xmt += phba->cpucheck_xmt_io[i]; - tot_rcv += phba->cpucheck_rcv_io[i]; - tot_cmpl += phba->cpucheck_cmpl_io[i]; - tot_ccmpl += phba->cpucheck_ccmpl_io[i]; + "HDWQ %03d: ", i); + for (j = 0; j < LPFC_CHECK_CPU_CNT; j++) { + /* Only display non-zero counters */ + if (!qp->cpucheck_xmt_io[j] && + !qp->cpucheck_cmpl_io[j] && + !qp->cpucheck_rcv_io[j]) + continue; + if (phba->nvmet_support) { + len += snprintf(buf + len, PAGE_SIZE - len, + "CPU %03d: %x/%x/%x ", j, + qp->cpucheck_rcv_io[j], + qp->cpucheck_xmt_io[j], + qp->cpucheck_cmpl_io[j]); + } else { + len += snprintf(buf + len, PAGE_SIZE - len, + "CPU %03d: %x/%x ", j, + qp->cpucheck_xmt_io[j], + qp->cpucheck_cmpl_io[j]); + } + } + len += snprintf(buf + len, PAGE_SIZE - len, + "Total: %x\n", tot_xmt); + if (len >= max_cnt) { + len += snprintf(buf + len, PAGE_SIZE - len, + "Truncated ...\n"); + return len; + } } - len += snprintf(buf + len, PAGE_SIZE - len, - "tot:xmit x%08x ccmpl x%08x cmpl x%08x rcv x%08x\n", - tot_xmt, tot_ccmpl, tot_cmpl, tot_rcv); return len; } @@ -1501,7 +1833,7 @@ lpfc_debugfs_disc_trc_open(struct inode *inode, struct file *file) int rc = -ENOMEM; if (!lpfc_debugfs_max_disc_trc) { - rc = -ENOSPC; + rc = -ENOSPC; goto out; } @@ -1551,7 +1883,7 @@ lpfc_debugfs_slow_ring_trc_open(struct inode *inode, struct file *file) int rc = -ENOMEM; if (!lpfc_debugfs_max_slow_ring_trc) { - rc = -ENOSPC; + rc = -ENOSPC; goto out; } @@ -1619,6 +1951,135 @@ out: return rc; } +/** + * lpfc_debugfs_multixripools_open - Open the multixripool debugfs buffer + * @inode: The inode pointer that contains a hba pointer. + * @file: The file pointer to attach the log output. + * + * Description: + * This routine is the entry point for the debugfs open file operation. It gets + * the hba from the i_private field in @inode, allocates the necessary buffer + * for the log, fills the buffer from the in-memory log for this hba, and then + * returns a pointer to that log in the private_data field in @file. + * + * Returns: + * This function returns zero if successful. On error it will return a negative + * error value. + **/ +static int +lpfc_debugfs_multixripools_open(struct inode *inode, struct file *file) +{ + struct lpfc_hba *phba = inode->i_private; + struct lpfc_debug *debug; + int rc = -ENOMEM; + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + /* Round to page boundary */ + debug->buffer = kzalloc(LPFC_DUMP_MULTIXRIPOOL_SIZE, GFP_KERNEL); + if (!debug->buffer) { + kfree(debug); + goto out; + } + + debug->len = lpfc_debugfs_multixripools_data( + phba, debug->buffer, LPFC_DUMP_MULTIXRIPOOL_SIZE); + + debug->i_private = inode->i_private; + file->private_data = debug; + + rc = 0; +out: + return rc; +} + +#ifdef LPFC_HDWQ_LOCK_STAT +/** + * lpfc_debugfs_lockstat_open - Open the lockstat debugfs buffer + * @inode: The inode pointer that contains a vport pointer. + * @file: The file pointer to attach the log output. + * + * Description: + * This routine is the entry point for the debugfs open file operation. It gets + * the vport from the i_private field in @inode, allocates the necessary buffer + * for the log, fills the buffer from the in-memory log for this vport, and then + * returns a pointer to that log in the private_data field in @file. + * + * Returns: + * This function returns zero if successful. On error it will return a negative + * error value. + **/ +static int +lpfc_debugfs_lockstat_open(struct inode *inode, struct file *file) +{ + struct lpfc_hba *phba = inode->i_private; + struct lpfc_debug *debug; + int rc = -ENOMEM; + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + /* Round to page boundary */ + debug->buffer = kmalloc(LPFC_HDWQINFO_SIZE, GFP_KERNEL); + if (!debug->buffer) { + kfree(debug); + goto out; + } + + debug->len = lpfc_debugfs_lockstat_data(phba, debug->buffer, + LPFC_HBQINFO_SIZE); + file->private_data = debug; + + rc = 0; +out: + return rc; +} + +static ssize_t +lpfc_debugfs_lockstat_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + struct lpfc_sli4_hdw_queue *qp; + char mybuf[64]; + char *pbuf; + int i; + + /* Protect copy from user */ + if (!access_ok(buf, nbytes)) + return -EFAULT; + + memset(mybuf, 0, sizeof(mybuf)); + + if (copy_from_user(mybuf, buf, nbytes)) + return -EFAULT; + pbuf = &mybuf[0]; + + if ((strncmp(pbuf, "reset", strlen("reset")) == 0) || + (strncmp(pbuf, "zero", strlen("zero")) == 0)) { + for (i = 0; i < phba->cfg_hdw_queue; i++) { + qp = &phba->sli4_hba.hdwq[i]; + qp->lock_conflict.alloc_xri_get = 0; + qp->lock_conflict.alloc_xri_put = 0; + qp->lock_conflict.free_xri = 0; + qp->lock_conflict.wq_access = 0; + qp->lock_conflict.alloc_pvt_pool = 0; + qp->lock_conflict.mv_from_pvt_pool = 0; + qp->lock_conflict.mv_to_pub_pool = 0; + qp->lock_conflict.mv_to_pvt_pool = 0; + qp->lock_conflict.free_pvt_pool = 0; + qp->lock_conflict.free_pub_pool = 0; + qp->lock_conflict.wq_access = 0; + } + } + return nbytes; +} +#endif + /** * lpfc_debugfs_dumpHBASlim_open - Open the Dump HBA SLIM debugfs buffer * @inode: The inode pointer that contains a vport pointer. @@ -1994,20 +2455,89 @@ lpfc_debugfs_release(struct inode *inode, struct file *file) kfree(debug->buffer); kfree(debug); - return 0; -} + return 0; +} + +static int +lpfc_debugfs_dumpDataDif_release(struct inode *inode, struct file *file) +{ + struct lpfc_debug *debug = file->private_data; + + debug->buffer = NULL; + kfree(debug); + + return 0; +} + +/** + * lpfc_debugfs_multixripools_write - Clear multi-XRI pools statistics + * @file: The file pointer to read from. + * @buf: The buffer to copy the user data from. + * @nbytes: The number of bytes to get. + * @ppos: The position in the file to start reading from. + * + * Description: + * This routine clears multi-XRI pools statistics when buf contains "clear". + * + * Return Value: + * It returns the @nbytges passing in from debugfs user space when successful. + * In case of error conditions, it returns proper error code back to the user + * space. + **/ +static ssize_t +lpfc_debugfs_multixripools_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + char mybuf[64]; + char *pbuf; + u32 i; + u32 hwq_count; + struct lpfc_sli4_hdw_queue *qp; + struct lpfc_multixri_pool *multixri_pool; + + if (nbytes > 64) + nbytes = 64; + + /* Protect copy from user */ + if (!access_ok(buf, nbytes)) + return -EFAULT; + + memset(mybuf, 0, sizeof(mybuf)); -static int -lpfc_debugfs_dumpDataDif_release(struct inode *inode, struct file *file) -{ - struct lpfc_debug *debug = file->private_data; + if (copy_from_user(mybuf, buf, nbytes)) + return -EFAULT; + pbuf = &mybuf[0]; - debug->buffer = NULL; - kfree(debug); + if ((strncmp(pbuf, "clear", strlen("clear"))) == 0) { + hwq_count = phba->cfg_hdw_queue; + for (i = 0; i < hwq_count; i++) { + qp = &phba->sli4_hba.hdwq[i]; + multixri_pool = qp->p_multixri_pool; + if (!multixri_pool) + continue; - return 0; -} + qp->empty_io_bufs = 0; + multixri_pool->pbl_empty_count = 0; +#ifdef LPFC_MXP_STAT + multixri_pool->above_limit_count = 0; + multixri_pool->below_limit_count = 0; + multixri_pool->stat_max_hwm = 0; + multixri_pool->local_pbl_hit_count = 0; + multixri_pool->other_pbl_hit_count = 0; + + multixri_pool->stat_pbl_count = 0; + multixri_pool->stat_pvt_count = 0; + multixri_pool->stat_busy_count = 0; + multixri_pool->stat_snapshot_taken = 0; +#endif + } + return strlen(pbuf); + } + return -EINVAL; +} static int lpfc_debugfs_nvmestat_open(struct inode *inode, struct file *file) @@ -2097,6 +2627,64 @@ lpfc_debugfs_nvmestat_write(struct file *file, const char __user *buf, return nbytes; } +static int +lpfc_debugfs_scsistat_open(struct inode *inode, struct file *file) +{ + struct lpfc_vport *vport = inode->i_private; + struct lpfc_debug *debug; + int rc = -ENOMEM; + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + goto out; + + /* Round to page boundary */ + debug->buffer = kzalloc(LPFC_SCSISTAT_SIZE, GFP_KERNEL); + if (!debug->buffer) { + kfree(debug); + goto out; + } + + debug->len = lpfc_debugfs_scsistat_data(vport, debug->buffer, + LPFC_SCSISTAT_SIZE); + + debug->i_private = inode->i_private; + file->private_data = debug; + + rc = 0; +out: + return rc; +} + +static ssize_t +lpfc_debugfs_scsistat_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private; + struct lpfc_hba *phba = vport->phba; + char mybuf[6] = {0}; + int i; + + /* Protect copy from user */ + if (!access_ok(buf, nbytes)) + return -EFAULT; + + if (copy_from_user(mybuf, buf, (nbytes >= sizeof(mybuf)) ? + (sizeof(mybuf) - 1) : nbytes)) + return -EFAULT; + + if ((strncmp(&mybuf[0], "reset", strlen("reset")) == 0) || + (strncmp(&mybuf[0], "zero", strlen("zero")) == 0)) { + for (i = 0; i < phba->cfg_hdw_queue; i++) { + memset(&phba->sli4_hba.hdwq[i].scsi_cstat, 0, + sizeof(phba->sli4_hba.hdwq[i].scsi_cstat)); + } + } + + return nbytes; +} + static int lpfc_debugfs_nvmektime_open(struct inode *inode, struct file *file) { @@ -2348,7 +2936,7 @@ lpfc_debugfs_cpucheck_open(struct inode *inode, struct file *file) } debug->len = lpfc_debugfs_cpucheck_data(vport, debug->buffer, - LPFC_NVMEKTIME_SIZE); + LPFC_CPUCHECK_SIZE); debug->i_private = inode->i_private; file->private_data = debug; @@ -2365,9 +2953,10 @@ lpfc_debugfs_cpucheck_write(struct file *file, const char __user *buf, struct lpfc_debug *debug = file->private_data; struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private; struct lpfc_hba *phba = vport->phba; + struct lpfc_sli4_hdw_queue *qp; char mybuf[64]; char *pbuf; - int i; + int i, j; if (nbytes > 64) nbytes = 64; @@ -2379,11 +2968,21 @@ lpfc_debugfs_cpucheck_write(struct file *file, const char __user *buf, pbuf = &mybuf[0]; if ((strncmp(pbuf, "on", sizeof("on") - 1) == 0)) { + if (phba->nvmet_support) + phba->cpucheck_on |= LPFC_CHECK_NVMET_IO; + else + phba->cpucheck_on |= (LPFC_CHECK_NVME_IO | + LPFC_CHECK_SCSI_IO); + return strlen(pbuf); + } else if ((strncmp(pbuf, "nvme_on", sizeof("nvme_on") - 1) == 0)) { if (phba->nvmet_support) phba->cpucheck_on |= LPFC_CHECK_NVMET_IO; else phba->cpucheck_on |= LPFC_CHECK_NVME_IO; return strlen(pbuf); + } else if ((strncmp(pbuf, "scsi_on", sizeof("scsi_on") - 1) == 0)) { + phba->cpucheck_on |= LPFC_CHECK_SCSI_IO; + return strlen(pbuf); } else if ((strncmp(pbuf, "rcv", sizeof("rcv") - 1) == 0)) { if (phba->nvmet_support) @@ -2397,13 +2996,14 @@ lpfc_debugfs_cpucheck_write(struct file *file, const char __user *buf, return strlen(pbuf); } else if ((strncmp(pbuf, "zero", sizeof("zero") - 1) == 0)) { - for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { - if (i >= LPFC_CHECK_CPU_CNT) - break; - phba->cpucheck_rcv_io[i] = 0; - phba->cpucheck_xmt_io[i] = 0; - phba->cpucheck_cmpl_io[i] = 0; - phba->cpucheck_ccmpl_io[i] = 0; + for (i = 0; i < phba->cfg_hdw_queue; i++) { + qp = &phba->sli4_hba.hdwq[i]; + + for (j = 0; j < LPFC_CHECK_CPU_CNT; j++) { + qp->cpucheck_rcv_io[j] = 0; + qp->cpucheck_xmt_io[j] = 0; + qp->cpucheck_cmpl_io[j] = 0; + } } return strlen(pbuf); } @@ -3166,10 +3766,10 @@ __lpfc_idiag_print_wq(struct lpfc_queue *qp, char *wqtype, (unsigned long long)qp->q_cnt_4); len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\t\tWQID[%02d], QE-CNT[%04d], QE-SZ[%04d], " - "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]", + "HST-IDX[%04d], PRT-IDX[%04d], NTFI[%03d]", qp->queue_id, qp->entry_count, qp->entry_size, qp->host_index, - qp->hba_index, qp->entry_repost); + qp->hba_index, qp->notify_interval); len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); return len; @@ -3182,21 +3782,23 @@ lpfc_idiag_wqs_for_cq(struct lpfc_hba *phba, char *wqtype, char *pbuffer, struct lpfc_queue *qp; int qidx; - for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) { - qp = phba->sli4_hba.fcp_wq[qidx]; + for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { + qp = phba->sli4_hba.hdwq[qidx].fcp_wq; if (qp->assoc_qid != cq_id) continue; *len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len); if (*len >= max_cnt) return 1; } - for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) { - qp = phba->sli4_hba.nvme_wq[qidx]; - if (qp->assoc_qid != cq_id) - continue; - *len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len); - if (*len >= max_cnt) - return 1; + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { + qp = phba->sli4_hba.hdwq[qidx].nvme_wq; + if (qp->assoc_qid != cq_id) + continue; + *len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len); + if (*len >= max_cnt) + return 1; + } } return 0; } @@ -3217,10 +3819,10 @@ __lpfc_idiag_print_cq(struct lpfc_queue *qp, char *cqtype, qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\tCQID[%02d], QE-CNT[%04d], QE-SZ[%04d], " - "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]", + "HST-IDX[%04d], NTFI[%03d], PLMT[%03d]", qp->queue_id, qp->entry_count, qp->entry_size, qp->host_index, - qp->hba_index, qp->entry_repost); + qp->notify_interval, qp->max_proc_limit); len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); @@ -3243,15 +3845,15 @@ __lpfc_idiag_print_rqpair(struct lpfc_queue *qp, struct lpfc_queue *datqp, qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\t\tHQID[%02d], QE-CNT[%04d], QE-SZ[%04d], " - "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]\n", + "HST-IDX[%04d], PRT-IDX[%04d], NTFI[%03d]\n", qp->queue_id, qp->entry_count, qp->entry_size, - qp->host_index, qp->hba_index, qp->entry_repost); + qp->host_index, qp->hba_index, qp->notify_interval); len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\t\tDQID[%02d], QE-CNT[%04d], QE-SZ[%04d], " - "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]\n", + "HST-IDX[%04d], PRT-IDX[%04d], NTFI[%03d]\n", datqp->queue_id, datqp->entry_count, datqp->entry_size, datqp->host_index, - datqp->hba_index, datqp->entry_repost); + datqp->hba_index, datqp->notify_interval); return len; } @@ -3260,31 +3862,25 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer, int *len, int max_cnt, int eqidx, int eq_id) { struct lpfc_queue *qp; - int qidx, rc; + int rc; - for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) { - qp = phba->sli4_hba.fcp_cq[qidx]; - if (qp->assoc_qid != eq_id) - continue; + qp = phba->sli4_hba.hdwq[eqidx].fcp_cq; - *len = __lpfc_idiag_print_cq(qp, "FCP", pbuffer, *len); + *len = __lpfc_idiag_print_cq(qp, "FCP", pbuffer, *len); - /* Reset max counter */ - qp->CQ_max_cqe = 0; + /* Reset max counter */ + qp->CQ_max_cqe = 0; - if (*len >= max_cnt) - return 1; + if (*len >= max_cnt) + return 1; - rc = lpfc_idiag_wqs_for_cq(phba, "FCP", pbuffer, len, - max_cnt, qp->queue_id); - if (rc) - return 1; - } + rc = lpfc_idiag_wqs_for_cq(phba, "FCP", pbuffer, len, + max_cnt, qp->queue_id); + if (rc) + return 1; - for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) { - qp = phba->sli4_hba.nvme_cq[qidx]; - if (qp->assoc_qid != eq_id) - continue; + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + qp = phba->sli4_hba.hdwq[eqidx].nvme_cq; *len = __lpfc_idiag_print_cq(qp, "NVME", pbuffer, *len); @@ -3295,7 +3891,7 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer, return 1; rc = lpfc_idiag_wqs_for_cq(phba, "NVME", pbuffer, len, - max_cnt, qp->queue_id); + max_cnt, qp->queue_id); if (rc) return 1; } @@ -3338,9 +3934,10 @@ __lpfc_idiag_print_eq(struct lpfc_queue *qp, char *eqtype, (unsigned long long)qp->q_cnt_4, qp->q_mode); len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "EQID[%02d], QE-CNT[%04d], QE-SZ[%04d], " - "HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]", + "HST-IDX[%04d], NTFI[%03d], PLMT[%03d], AFFIN[%03d]", qp->queue_id, qp->entry_count, qp->entry_size, - qp->host_index, qp->hba_index, qp->entry_repost); + qp->host_index, qp->notify_interval, + qp->max_proc_limit, qp->chann); len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n"); return len; @@ -3387,24 +3984,19 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes, spin_lock_irq(&phba->hbalock); /* Fast-path event queue */ - if (phba->sli4_hba.hba_eq && phba->io_channel_irqs) { + if (phba->sli4_hba.hdwq && phba->cfg_hdw_queue) { x = phba->lpfc_idiag_last_eq; - if (phba->cfg_fof && (x >= phba->io_channel_irqs)) { - phba->lpfc_idiag_last_eq = 0; - goto fof; - } phba->lpfc_idiag_last_eq++; - if (phba->lpfc_idiag_last_eq >= phba->io_channel_irqs) - if (phba->cfg_fof == 0) - phba->lpfc_idiag_last_eq = 0; + if (phba->lpfc_idiag_last_eq >= phba->cfg_hdw_queue) + phba->lpfc_idiag_last_eq = 0; len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, - "EQ %d out of %d HBA EQs\n", - x, phba->io_channel_irqs); + "HDWQ %d out of %d HBA HDWQs\n", + x, phba->cfg_hdw_queue); /* Fast-path EQ */ - qp = phba->sli4_hba.hba_eq[x]; + qp = phba->sli4_hba.hdwq[x].hba_eq; if (!qp) goto out; @@ -3479,35 +4071,6 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes, goto out; } -fof: - if (phba->cfg_fof) { - /* FOF EQ */ - qp = phba->sli4_hba.fof_eq; - len = __lpfc_idiag_print_eq(qp, "FOF", pbuffer, len); - - /* Reset max counter */ - if (qp) - qp->EQ_max_eqe = 0; - - if (len >= max_cnt) - goto too_big; - - /* OAS CQ */ - qp = phba->sli4_hba.oas_cq; - len = __lpfc_idiag_print_cq(qp, "OAS", pbuffer, len); - /* Reset max counter */ - if (qp) - qp->CQ_max_cqe = 0; - if (len >= max_cnt) - goto too_big; - - /* OAS WQ */ - qp = phba->sli4_hba.oas_wq; - len = __lpfc_idiag_print_wq(qp, "OAS", pbuffer, len); - if (len >= max_cnt) - goto too_big; - } - spin_unlock_irq(&phba->hbalock); return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); @@ -3725,9 +4288,9 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, switch (quetp) { case LPFC_IDIAG_EQ: /* HBA event queue */ - if (phba->sli4_hba.hba_eq) { - for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) { - qp = phba->sli4_hba.hba_eq[qidx]; + if (phba->sli4_hba.hdwq) { + for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { + qp = phba->sli4_hba.hdwq[qidx].hba_eq; if (qp && qp->queue_id == queid) { /* Sanity check */ rc = lpfc_idiag_que_param_check(qp, @@ -3776,10 +4339,10 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, goto pass_check; } /* FCP complete queue */ - if (phba->sli4_hba.fcp_cq) { - for (qidx = 0; qidx < phba->cfg_fcp_io_channel; + if (phba->sli4_hba.hdwq) { + for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { - qp = phba->sli4_hba.fcp_cq[qidx]; + qp = phba->sli4_hba.hdwq[qidx].fcp_cq; if (qp && qp->queue_id == queid) { /* Sanity check */ rc = lpfc_idiag_que_param_check( @@ -3792,23 +4355,20 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, } } /* NVME complete queue */ - if (phba->sli4_hba.nvme_cq) { + if (phba->sli4_hba.hdwq) { qidx = 0; do { - if (phba->sli4_hba.nvme_cq[qidx] && - phba->sli4_hba.nvme_cq[qidx]->queue_id == - queid) { + qp = phba->sli4_hba.hdwq[qidx].nvme_cq; + if (qp && qp->queue_id == queid) { /* Sanity check */ rc = lpfc_idiag_que_param_check( - phba->sli4_hba.nvme_cq[qidx], - index, count); + qp, index, count); if (rc) goto error_out; - idiag.ptr_private = - phba->sli4_hba.nvme_cq[qidx]; + idiag.ptr_private = qp; goto pass_check; } - } while (++qidx < phba->cfg_nvme_io_channel); + } while (++qidx < phba->cfg_hdw_queue); } goto error_out; break; @@ -3849,11 +4409,11 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, idiag.ptr_private = phba->sli4_hba.nvmels_wq; goto pass_check; } - /* FCP work queue */ - if (phba->sli4_hba.fcp_wq) { - for (qidx = 0; qidx < phba->cfg_fcp_io_channel; - qidx++) { - qp = phba->sli4_hba.fcp_wq[qidx]; + + if (phba->sli4_hba.hdwq) { + /* FCP/SCSI work queue */ + for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { + qp = phba->sli4_hba.hdwq[qidx].fcp_wq; if (qp && qp->queue_id == queid) { /* Sanity check */ rc = lpfc_idiag_que_param_check( @@ -3864,12 +4424,9 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, goto pass_check; } } - } - /* NVME work queue */ - if (phba->sli4_hba.nvme_wq) { - for (qidx = 0; qidx < phba->cfg_nvme_io_channel; - qidx++) { - qp = phba->sli4_hba.nvme_wq[qidx]; + /* NVME work queue */ + for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { + qp = phba->sli4_hba.hdwq[qidx].nvme_wq; if (qp && qp->queue_id == queid) { /* Sanity check */ rc = lpfc_idiag_que_param_check( @@ -3882,26 +4439,6 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, } } - /* NVME work queues */ - if (phba->sli4_hba.nvme_wq) { - for (qidx = 0; qidx < phba->cfg_nvme_io_channel; - qidx++) { - if (!phba->sli4_hba.nvme_wq[qidx]) - continue; - if (phba->sli4_hba.nvme_wq[qidx]->queue_id == - queid) { - /* Sanity check */ - rc = lpfc_idiag_que_param_check( - phba->sli4_hba.nvme_wq[qidx], - index, count); - if (rc) - goto error_out; - idiag.ptr_private = - phba->sli4_hba.nvme_wq[qidx]; - goto pass_check; - } - } - } goto error_out; break; case LPFC_IDIAG_RQ: @@ -4866,6 +5403,16 @@ static const struct file_operations lpfc_debugfs_op_nodelist = { .release = lpfc_debugfs_release, }; +#undef lpfc_debugfs_op_multixripools +static const struct file_operations lpfc_debugfs_op_multixripools = { + .owner = THIS_MODULE, + .open = lpfc_debugfs_multixripools_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_debugfs_read, + .write = lpfc_debugfs_multixripools_write, + .release = lpfc_debugfs_release, +}; + #undef lpfc_debugfs_op_hbqinfo static const struct file_operations lpfc_debugfs_op_hbqinfo = { .owner = THIS_MODULE, @@ -4875,6 +5422,18 @@ static const struct file_operations lpfc_debugfs_op_hbqinfo = { .release = lpfc_debugfs_release, }; +#ifdef LPFC_HDWQ_LOCK_STAT +#undef lpfc_debugfs_op_lockstat +static const struct file_operations lpfc_debugfs_op_lockstat = { + .owner = THIS_MODULE, + .open = lpfc_debugfs_lockstat_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_debugfs_read, + .write = lpfc_debugfs_lockstat_write, + .release = lpfc_debugfs_release, +}; +#endif + #undef lpfc_debugfs_op_dumpHBASlim static const struct file_operations lpfc_debugfs_op_dumpHBASlim = { .owner = THIS_MODULE, @@ -4903,6 +5462,16 @@ static const struct file_operations lpfc_debugfs_op_nvmestat = { .release = lpfc_debugfs_release, }; +#undef lpfc_debugfs_op_scsistat +static const struct file_operations lpfc_debugfs_op_scsistat = { + .owner = THIS_MODULE, + .open = lpfc_debugfs_scsistat_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_debugfs_read, + .write = lpfc_debugfs_scsistat_write, + .release = lpfc_debugfs_release, +}; + #undef lpfc_debugfs_op_nvmektime static const struct file_operations lpfc_debugfs_op_nvmektime = { .owner = THIS_MODULE, @@ -5280,11 +5849,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) if (!lpfc_debugfs_root) { lpfc_debugfs_root = debugfs_create_dir("lpfc", NULL); atomic_set(&lpfc_debugfs_hba_count, 0); - if (!lpfc_debugfs_root) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0408 Cannot create debugfs root\n"); - goto debug_failed; - } } if (!lpfc_debugfs_start_time) lpfc_debugfs_start_time = jiffies; @@ -5295,25 +5859,42 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) pport_setup = true; phba->hba_debugfs_root = debugfs_create_dir(name, lpfc_debugfs_root); - if (!phba->hba_debugfs_root) { + atomic_inc(&lpfc_debugfs_hba_count); + atomic_set(&phba->debugfs_vport_count, 0); + + /* Multi-XRI pools */ + snprintf(name, sizeof(name), "multixripools"); + phba->debug_multixri_pools = + debugfs_create_file(name, S_IFREG | 0644, + phba->hba_debugfs_root, + phba, + &lpfc_debugfs_op_multixripools); + if (!phba->debug_multixri_pools) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0412 Cannot create debugfs hba\n"); + "0527 Cannot create debugfs multixripools\n"); goto debug_failed; } - atomic_inc(&lpfc_debugfs_hba_count); - atomic_set(&phba->debugfs_vport_count, 0); /* Setup hbqinfo */ snprintf(name, sizeof(name), "hbqinfo"); phba->debug_hbqinfo = - debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, - phba->hba_debugfs_root, - phba, &lpfc_debugfs_op_hbqinfo); - if (!phba->debug_hbqinfo) { + debugfs_create_file(name, S_IFREG | 0644, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_hbqinfo); + +#ifdef LPFC_HDWQ_LOCK_STAT + /* Setup lockstat */ + snprintf(name, sizeof(name), "lockstat"); + phba->debug_lockstat = + debugfs_create_file(name, S_IFREG | 0644, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_lockstat); + if (!phba->debug_lockstat) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0411 Cannot create debugfs hbqinfo\n"); + "0913 Cant create debugfs lockstat\n"); goto debug_failed; } +#endif /* Setup dumpHBASlim */ if (phba->sli_rev < LPFC_SLI_REV4) { @@ -5323,12 +5904,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dumpHBASlim); - if (!phba->debug_dumpHBASlim) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0413 Cannot create debugfs " - "dumpHBASlim\n"); - goto debug_failed; - } } else phba->debug_dumpHBASlim = NULL; @@ -5340,12 +5915,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dumpHostSlim); - if (!phba->debug_dumpHostSlim) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0414 Cannot create debugfs " - "dumpHostSlim\n"); - goto debug_failed; - } } else phba->debug_dumpHostSlim = NULL; @@ -5355,11 +5924,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dumpData); - if (!phba->debug_dumpData) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0800 Cannot create debugfs dumpData\n"); - goto debug_failed; - } /* Setup dumpDif */ snprintf(name, sizeof(name), "dumpDif"); @@ -5367,11 +5931,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dumpDif); - if (!phba->debug_dumpDif) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0801 Cannot create debugfs dumpDif\n"); - goto debug_failed; - } /* Setup DIF Error Injections */ snprintf(name, sizeof(name), "InjErrLBA"); @@ -5379,11 +5938,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dif_err); - if (!phba->debug_InjErrLBA) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0807 Cannot create debugfs InjErrLBA\n"); - goto debug_failed; - } phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; snprintf(name, sizeof(name), "InjErrNPortID"); @@ -5391,88 +5945,48 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dif_err); - if (!phba->debug_InjErrNPortID) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0809 Cannot create debugfs InjErrNPortID\n"); - goto debug_failed; - } snprintf(name, sizeof(name), "InjErrWWPN"); phba->debug_InjErrWWPN = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dif_err); - if (!phba->debug_InjErrWWPN) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0810 Cannot create debugfs InjErrWWPN\n"); - goto debug_failed; - } snprintf(name, sizeof(name), "writeGuardInjErr"); phba->debug_writeGuard = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dif_err); - if (!phba->debug_writeGuard) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0802 Cannot create debugfs writeGuard\n"); - goto debug_failed; - } snprintf(name, sizeof(name), "writeAppInjErr"); phba->debug_writeApp = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dif_err); - if (!phba->debug_writeApp) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0803 Cannot create debugfs writeApp\n"); - goto debug_failed; - } snprintf(name, sizeof(name), "writeRefInjErr"); phba->debug_writeRef = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dif_err); - if (!phba->debug_writeRef) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0804 Cannot create debugfs writeRef\n"); - goto debug_failed; - } snprintf(name, sizeof(name), "readGuardInjErr"); phba->debug_readGuard = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dif_err); - if (!phba->debug_readGuard) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0808 Cannot create debugfs readGuard\n"); - goto debug_failed; - } snprintf(name, sizeof(name), "readAppInjErr"); phba->debug_readApp = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dif_err); - if (!phba->debug_readApp) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0805 Cannot create debugfs readApp\n"); - goto debug_failed; - } snprintf(name, sizeof(name), "readRefInjErr"); phba->debug_readRef = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_dif_err); - if (!phba->debug_readRef) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0806 Cannot create debugfs readApp\n"); - goto debug_failed; - } /* Setup slow ring trace */ if (lpfc_debugfs_max_slow_ring_trc) { @@ -5496,12 +6010,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_slow_ring_trc); - if (!phba->debug_slow_ring_trc) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0415 Cannot create debugfs " - "slow_ring_trace\n"); - goto debug_failed; - } if (!phba->slow_ring_trc) { phba->slow_ring_trc = kmalloc( (sizeof(struct lpfc_debugfs_trc) * @@ -5524,11 +6032,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) debugfs_create_file(name, 0644, phba->hba_debugfs_root, phba, &lpfc_debugfs_op_nvmeio_trc); - if (!phba->debug_nvmeio_trc) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0574 No create debugfs nvmeio_trc\n"); - goto debug_failed; - } atomic_set(&phba->nvmeio_trc_cnt, 0); if (lpfc_debugfs_max_nvmeio_trc) { @@ -5576,11 +6079,6 @@ nvmeio_off: if (!vport->vport_debugfs_root) { vport->vport_debugfs_root = debugfs_create_dir(name, phba->hba_debugfs_root); - if (!vport->vport_debugfs_root) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0417 Can't create debugfs\n"); - goto debug_failed; - } atomic_inc(&phba->debugfs_vport_count); } @@ -5617,31 +6115,26 @@ nvmeio_off: debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, vport->vport_debugfs_root, vport, &lpfc_debugfs_op_disc_trc); - if (!vport->debug_disc_trc) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0419 Cannot create debugfs " - "discovery_trace\n"); - goto debug_failed; - } snprintf(name, sizeof(name), "nodelist"); vport->debug_nodelist = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, vport->vport_debugfs_root, vport, &lpfc_debugfs_op_nodelist); - if (!vport->debug_nodelist) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "2985 Can't create debugfs nodelist\n"); - goto debug_failed; - } snprintf(name, sizeof(name), "nvmestat"); vport->debug_nvmestat = debugfs_create_file(name, 0644, vport->vport_debugfs_root, vport, &lpfc_debugfs_op_nvmestat); - if (!vport->debug_nvmestat) { + + snprintf(name, sizeof(name), "scsistat"); + vport->debug_scsistat = + debugfs_create_file(name, 0644, + vport->vport_debugfs_root, + vport, &lpfc_debugfs_op_scsistat); + if (!vport->debug_scsistat) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0811 Cannot create debugfs nvmestat\n"); + "0914 Cannot create debugfs scsistat\n"); goto debug_failed; } @@ -5650,22 +6143,12 @@ nvmeio_off: debugfs_create_file(name, 0644, vport->vport_debugfs_root, vport, &lpfc_debugfs_op_nvmektime); - if (!vport->debug_nvmektime) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0815 Cannot create debugfs nvmektime\n"); - goto debug_failed; - } snprintf(name, sizeof(name), "cpucheck"); vport->debug_cpucheck = debugfs_create_file(name, 0644, vport->vport_debugfs_root, vport, &lpfc_debugfs_op_cpucheck); - if (!vport->debug_cpucheck) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0819 Cannot create debugfs cpucheck\n"); - goto debug_failed; - } /* * The following section is for additional directories/files for the @@ -5685,11 +6168,6 @@ nvmeio_off: if (!phba->idiag_root) { phba->idiag_root = debugfs_create_dir(name, phba->hba_debugfs_root); - if (!phba->idiag_root) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "2922 Can't create idiag debugfs\n"); - goto debug_failed; - } /* Initialize iDiag data structure */ memset(&idiag, 0, sizeof(idiag)); } @@ -5700,11 +6178,6 @@ nvmeio_off: phba->idiag_pci_cfg = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->idiag_root, phba, &lpfc_idiag_op_pciCfg); - if (!phba->idiag_pci_cfg) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "2923 Can't create idiag debugfs\n"); - goto debug_failed; - } idiag.offset.last_rd = 0; } @@ -5714,11 +6187,6 @@ nvmeio_off: phba->idiag_bar_acc = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->idiag_root, phba, &lpfc_idiag_op_barAcc); - if (!phba->idiag_bar_acc) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "3056 Can't create idiag debugfs\n"); - goto debug_failed; - } idiag.offset.last_rd = 0; } @@ -5728,11 +6196,6 @@ nvmeio_off: phba->idiag_que_info = debugfs_create_file(name, S_IFREG|S_IRUGO, phba->idiag_root, phba, &lpfc_idiag_op_queInfo); - if (!phba->idiag_que_info) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "2924 Can't create idiag debugfs\n"); - goto debug_failed; - } } /* iDiag access PCI function queue */ @@ -5741,11 +6204,6 @@ nvmeio_off: phba->idiag_que_acc = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->idiag_root, phba, &lpfc_idiag_op_queAcc); - if (!phba->idiag_que_acc) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "2926 Can't create idiag debugfs\n"); - goto debug_failed; - } } /* iDiag access PCI function doorbell registers */ @@ -5754,11 +6212,6 @@ nvmeio_off: phba->idiag_drb_acc = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->idiag_root, phba, &lpfc_idiag_op_drbAcc); - if (!phba->idiag_drb_acc) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "2927 Can't create idiag debugfs\n"); - goto debug_failed; - } } /* iDiag access PCI function control registers */ @@ -5767,11 +6220,6 @@ nvmeio_off: phba->idiag_ctl_acc = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->idiag_root, phba, &lpfc_idiag_op_ctlAcc); - if (!phba->idiag_ctl_acc) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "2981 Can't create idiag debugfs\n"); - goto debug_failed; - } } /* iDiag access mbox commands */ @@ -5780,11 +6228,6 @@ nvmeio_off: phba->idiag_mbx_acc = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, phba->idiag_root, phba, &lpfc_idiag_op_mbxAcc); - if (!phba->idiag_mbx_acc) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "2980 Can't create idiag debugfs\n"); - goto debug_failed; - } } /* iDiag extents access commands */ @@ -5796,12 +6239,6 @@ nvmeio_off: S_IFREG|S_IRUGO|S_IWUSR, phba->idiag_root, phba, &lpfc_idiag_op_extAcc); - if (!phba->idiag_ext_acc) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "2986 Cant create " - "idiag debugfs\n"); - goto debug_failed; - } } } @@ -5839,6 +6276,9 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport) debugfs_remove(vport->debug_nvmestat); /* nvmestat */ vport->debug_nvmestat = NULL; + debugfs_remove(vport->debug_scsistat); /* scsistat */ + vport->debug_scsistat = NULL; + debugfs_remove(vport->debug_nvmektime); /* nvmektime */ vport->debug_nvmektime = NULL; @@ -5853,9 +6293,16 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport) if (atomic_read(&phba->debugfs_vport_count) == 0) { + debugfs_remove(phba->debug_multixri_pools); /* multixripools*/ + phba->debug_multixri_pools = NULL; + debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */ phba->debug_hbqinfo = NULL; +#ifdef LPFC_HDWQ_LOCK_STAT + debugfs_remove(phba->debug_lockstat); /* lockstat */ + phba->debug_lockstat = NULL; +#endif debugfs_remove(phba->debug_dumpHBASlim); /* HBASlim */ phba->debug_dumpHBASlim = NULL; @@ -5988,11 +6435,13 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba) lpfc_debug_dump_wq(phba, DUMP_ELS, 0); lpfc_debug_dump_wq(phba, DUMP_NVMELS, 0); - for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) lpfc_debug_dump_wq(phba, DUMP_FCP, idx); - for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++) - lpfc_debug_dump_wq(phba, DUMP_NVME, idx); + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) + lpfc_debug_dump_wq(phba, DUMP_NVME, idx); + } lpfc_debug_dump_hdr_rq(phba); lpfc_debug_dump_dat_rq(phba); @@ -6003,15 +6452,17 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba) lpfc_debug_dump_cq(phba, DUMP_ELS, 0); lpfc_debug_dump_cq(phba, DUMP_NVMELS, 0); - for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) lpfc_debug_dump_cq(phba, DUMP_FCP, idx); - for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++) - lpfc_debug_dump_cq(phba, DUMP_NVME, idx); + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) + lpfc_debug_dump_cq(phba, DUMP_NVME, idx); + } /* * Dump Event Queues (EQs) */ - for (idx = 0; idx < phba->io_channel_irqs; idx++) + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) lpfc_debug_dump_hba_eq(phba, idx); } diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h index 30efc7bf91bd..93ab7dfb8ee0 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.h +++ b/drivers/scsi/lpfc/lpfc_debugfs.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2007-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -50,6 +50,9 @@ #define LPFC_CPUCHECK_SIZE 8192 #define LPFC_NVMEIO_TRC_SIZE 8192 +/* scsistat output buffer size */ +#define LPFC_SCSISTAT_SIZE 8192 + #define LPFC_DEBUG_OUT_LINE_SZ 80 /* @@ -284,6 +287,9 @@ struct lpfc_idiag { #endif +/* multixripool output buffer size */ +#define LPFC_DUMP_MULTIXRIPOOL_SIZE 8192 + enum { DUMP_FCP, DUMP_NVME, @@ -410,10 +416,10 @@ lpfc_debug_dump_wq(struct lpfc_hba *phba, int qtype, int wqidx) char *qtypestr; if (qtype == DUMP_FCP) { - wq = phba->sli4_hba.fcp_wq[wqidx]; + wq = phba->sli4_hba.hdwq[wqidx].fcp_wq; qtypestr = "FCP"; } else if (qtype == DUMP_NVME) { - wq = phba->sli4_hba.nvme_wq[wqidx]; + wq = phba->sli4_hba.hdwq[wqidx].nvme_wq; qtypestr = "NVME"; } else if (qtype == DUMP_MBX) { wq = phba->sli4_hba.mbx_wq; @@ -454,14 +460,15 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx) int eqidx; /* fcp/nvme wq and cq are 1:1, thus same indexes */ + eq = NULL; if (qtype == DUMP_FCP) { - wq = phba->sli4_hba.fcp_wq[wqidx]; - cq = phba->sli4_hba.fcp_cq[wqidx]; + wq = phba->sli4_hba.hdwq[wqidx].fcp_wq; + cq = phba->sli4_hba.hdwq[wqidx].fcp_cq; qtypestr = "FCP"; } else if (qtype == DUMP_NVME) { - wq = phba->sli4_hba.nvme_wq[wqidx]; - cq = phba->sli4_hba.nvme_cq[wqidx]; + wq = phba->sli4_hba.hdwq[wqidx].nvme_wq; + cq = phba->sli4_hba.hdwq[wqidx].nvme_cq; qtypestr = "NVME"; } else if (qtype == DUMP_MBX) { wq = phba->sli4_hba.mbx_wq; @@ -478,17 +485,17 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx) } else return; - for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++) { - if (cq->assoc_qid == phba->sli4_hba.hba_eq[eqidx]->queue_id) + for (eqidx = 0; eqidx < phba->cfg_hdw_queue; eqidx++) { + eq = phba->sli4_hba.hdwq[eqidx].hba_eq; + if (cq->assoc_qid == eq->queue_id) break; } - if (eqidx == phba->io_channel_irqs) { + if (eqidx == phba->cfg_hdw_queue) { pr_err("Couldn't find EQ for CQ. Using EQ[0]\n"); eqidx = 0; + eq = phba->sli4_hba.hdwq[0].hba_eq; } - eq = phba->sli4_hba.hba_eq[eqidx]; - if (qtype == DUMP_FCP || qtype == DUMP_NVME) pr_err("%s CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]" "->EQ[Idx:%d|Qid:%d]:\n", @@ -516,7 +523,7 @@ lpfc_debug_dump_hba_eq(struct lpfc_hba *phba, int qidx) { struct lpfc_queue *qp; - qp = phba->sli4_hba.hba_eq[qidx]; + qp = phba->sli4_hba.hdwq[qidx].hba_eq; pr_err("EQ[Idx:%d|Qid:%d]\n", qidx, qp->queue_id); @@ -564,21 +571,21 @@ lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid) { int wq_idx; - for (wq_idx = 0; wq_idx < phba->cfg_fcp_io_channel; wq_idx++) - if (phba->sli4_hba.fcp_wq[wq_idx]->queue_id == qid) + for (wq_idx = 0; wq_idx < phba->cfg_hdw_queue; wq_idx++) + if (phba->sli4_hba.hdwq[wq_idx].fcp_wq->queue_id == qid) break; - if (wq_idx < phba->cfg_fcp_io_channel) { + if (wq_idx < phba->cfg_hdw_queue) { pr_err("FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid); - lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[wq_idx]); + lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].fcp_wq); return; } - for (wq_idx = 0; wq_idx < phba->cfg_nvme_io_channel; wq_idx++) - if (phba->sli4_hba.nvme_wq[wq_idx]->queue_id == qid) + for (wq_idx = 0; wq_idx < phba->cfg_hdw_queue; wq_idx++) + if (phba->sli4_hba.hdwq[wq_idx].nvme_wq->queue_id == qid) break; - if (wq_idx < phba->cfg_nvme_io_channel) { + if (wq_idx < phba->cfg_hdw_queue) { pr_err("NVME WQ[Idx:%d|Qid:%d]\n", wq_idx, qid); - lpfc_debug_dump_q(phba->sli4_hba.nvme_wq[wq_idx]); + lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].nvme_wq); return; } @@ -646,23 +653,23 @@ lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid) { int cq_idx; - for (cq_idx = 0; cq_idx < phba->cfg_fcp_io_channel; cq_idx++) - if (phba->sli4_hba.fcp_cq[cq_idx]->queue_id == qid) + for (cq_idx = 0; cq_idx < phba->cfg_hdw_queue; cq_idx++) + if (phba->sli4_hba.hdwq[cq_idx].fcp_cq->queue_id == qid) break; - if (cq_idx < phba->cfg_fcp_io_channel) { + if (cq_idx < phba->cfg_hdw_queue) { pr_err("FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid); - lpfc_debug_dump_q(phba->sli4_hba.fcp_cq[cq_idx]); + lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].fcp_cq); return; } - for (cq_idx = 0; cq_idx < phba->cfg_nvme_io_channel; cq_idx++) - if (phba->sli4_hba.nvme_cq[cq_idx]->queue_id == qid) + for (cq_idx = 0; cq_idx < phba->cfg_hdw_queue; cq_idx++) + if (phba->sli4_hba.hdwq[cq_idx].nvme_cq->queue_id == qid) break; - if (cq_idx < phba->cfg_nvme_io_channel) { + if (cq_idx < phba->cfg_hdw_queue) { pr_err("NVME CQ[Idx:%d|Qid:%d]\n", cq_idx, qid); - lpfc_debug_dump_q(phba->sli4_hba.nvme_cq[cq_idx]); + lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].nvme_cq); return; } @@ -697,13 +704,13 @@ lpfc_debug_dump_eq_by_id(struct lpfc_hba *phba, int qid) { int eq_idx; - for (eq_idx = 0; eq_idx < phba->io_channel_irqs; eq_idx++) - if (phba->sli4_hba.hba_eq[eq_idx]->queue_id == qid) + for (eq_idx = 0; eq_idx < phba->cfg_hdw_queue; eq_idx++) + if (phba->sli4_hba.hdwq[eq_idx].hba_eq->queue_id == qid) break; - if (eq_idx < phba->io_channel_irqs) { + if (eq_idx < phba->cfg_hdw_queue) { printk(KERN_ERR "FCP EQ[Idx:%d|Qid:%d]\n", eq_idx, qid); - lpfc_debug_dump_q(phba->sli4_hba.hba_eq[eq_idx]); + lpfc_debug_dump_q(phba->sli4_hba.hdwq[eq_idx].hba_eq); return; } } diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index b3a4789468c3..fc077cb87900 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -2827,8 +2827,8 @@ out: !(vport->fc_flag & FC_PT2PT_PLOGI)) { phba->pport->fc_myDID = 0; - if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || - (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { + if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { if (phba->nvmet_support) lpfc_nvmet_update_targetport(phba); else diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index b183b882d506..aa4961a2caf8 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -638,8 +638,6 @@ lpfc_work_done(struct lpfc_hba *phba) if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) { if (phba->hba_flag & HBA_RRQ_ACTIVE) lpfc_handle_rrq_active(phba); - if (phba->hba_flag & FCP_XRI_ABORT_EVENT) - lpfc_sli4_fcp_xri_abort_event_proc(phba); if (phba->hba_flag & ELS_XRI_ABORT_EVENT) lpfc_sli4_els_xri_abort_event_proc(phba); if (phba->hba_flag & ASYNC_EVENT) @@ -859,10 +857,9 @@ lpfc_port_link_failure(struct lpfc_vport *vport) void lpfc_linkdown_port(struct lpfc_vport *vport) { - struct lpfc_hba *phba = vport->phba; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); - if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME) + if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME) fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0); @@ -925,8 +922,8 @@ lpfc_linkdown(struct lpfc_hba *phba) vports[i]->fc_myDID = 0; - if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || - (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { + if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { if (phba->nvmet_support) lpfc_nvmet_update_targetport(phba); else @@ -1012,7 +1009,7 @@ lpfc_linkup_port(struct lpfc_vport *vport) (vport != phba->pport)) return; - if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME) + if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME) fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0); @@ -3660,8 +3657,8 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) spin_unlock_irq(shost->host_lock); vport->fc_myDID = 0; - if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || - (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { + if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { if (phba->nvmet_support) lpfc_nvmet_update_targetport(phba); else @@ -3923,11 +3920,9 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) int lpfc_issue_gidft(struct lpfc_vport *vport) { - struct lpfc_hba *phba = vport->phba; - /* Good status, issue CT Request to NameServer */ - if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || - (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) { + if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) { if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_FCP)) { /* Cannot issue NameServer FCP Query, so finish up * discovery @@ -3942,8 +3937,8 @@ lpfc_issue_gidft(struct lpfc_vport *vport) vport->gidft_inp++; } - if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || - (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { + if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_NVME)) { /* Cannot issue NameServer NVME Query, so finish up * discovery @@ -4059,12 +4054,12 @@ out: lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0); lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0); - if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || - (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) + if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, FC_TYPE_FCP); - if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || - (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) + if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, FC_TYPE_NVME); @@ -4100,7 +4095,7 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) struct fc_rport_identifiers rport_ids; struct lpfc_hba *phba = vport->phba; - if (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME) + if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME) return; /* Remote port has reappeared. Re-register w/ FC transport */ @@ -4175,9 +4170,8 @@ lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp) { struct fc_rport *rport = ndlp->rport; struct lpfc_vport *vport = ndlp->vport; - struct lpfc_hba *phba = vport->phba; - if (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME) + if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME) return; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index c15b9b6fb840..ff875b833192 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2009-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -194,7 +194,7 @@ struct lpfc_sli_intf { #define LPFC_ACT_INTR_CNT 4 /* Algrithmns for scheduling FCP commands to WQs */ -#define LPFC_FCP_SCHED_ROUND_ROBIN 0 +#define LPFC_FCP_SCHED_BY_HDWQ 0 #define LPFC_FCP_SCHED_BY_CPU 1 /* Algrithmns for NameServer Query after RSCN */ @@ -208,12 +208,18 @@ struct lpfc_sli_intf { /* Configuration of Interrupts / sec for entire HBA port */ #define LPFC_MIN_IMAX 5000 #define LPFC_MAX_IMAX 5000000 -#define LPFC_DEF_IMAX 150000 +#define LPFC_DEF_IMAX 0 + +#define LPFC_IMAX_THRESHOLD 1000 +#define LPFC_MAX_AUTO_EQ_DELAY 120 +#define LPFC_EQ_DELAY_STEP 15 +#define LPFC_EQD_ISR_TRIGGER 20000 +/* 1s intervals */ +#define LPFC_EQ_DELAY_MSECS 1000 #define LPFC_MIN_CPU_MAP 0 -#define LPFC_MAX_CPU_MAP 2 +#define LPFC_MAX_CPU_MAP 1 #define LPFC_HBA_CPU_MAP 1 -#define LPFC_DRIVER_CPU_MAP 2 /* Default */ /* PORT_CAPABILITIES constants. */ #define LPFC_MAX_SUPPORTED_PAGES 8 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index bede11e16349..3b5873f6751e 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -37,6 +37,7 @@ #include #include #include +#include #include #include @@ -71,7 +72,6 @@ unsigned long _dump_buf_dif_order; spinlock_t _dump_buf_lock; /* Used when mapping IRQ vectors in a driver centric manner */ -uint16_t *lpfc_used_cpu; uint32_t lpfc_present_cpu; static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); @@ -93,6 +93,8 @@ static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); static void lpfc_sli4_disable_intr(struct lpfc_hba *); static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); static void lpfc_sli4_oas_verify(struct lpfc_hba *phba); +static uint16_t lpfc_find_eq_handle(struct lpfc_hba *, uint16_t); +static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int); static struct scsi_transport_template *lpfc_transport_template = NULL; static struct scsi_transport_template *lpfc_vport_transport_template = NULL; @@ -1037,14 +1039,14 @@ lpfc_hba_down_post_s3(struct lpfc_hba *phba) static int lpfc_hba_down_post_s4(struct lpfc_hba *phba) { - struct lpfc_scsi_buf *psb, *psb_next; + struct lpfc_io_buf *psb, *psb_next; struct lpfc_nvmet_rcv_ctx *ctxp, *ctxp_next; + struct lpfc_sli4_hdw_queue *qp; LIST_HEAD(aborts); LIST_HEAD(nvme_aborts); LIST_HEAD(nvmet_aborts); - unsigned long iflag = 0; struct lpfc_sglq *sglq_entry = NULL; - int cnt; + int cnt, idx; lpfc_sli_hbqbuf_free_all(phba); @@ -1071,55 +1073,65 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) spin_unlock(&phba->sli4_hba.sgl_list_lock); - /* abts_scsi_buf_list_lock required because worker thread uses this + + /* abts_xxxx_buf_list_lock required because worker thread uses this * list. */ - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { - spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); - list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, - &aborts); - spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); - } - - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { - spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); - list_splice_init(&phba->sli4_hba.lpfc_abts_nvme_buf_list, - &nvme_aborts); - list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list, - &nvmet_aborts); - spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); - } - - spin_unlock_irq(&phba->hbalock); + cnt = 0; + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { + qp = &phba->sli4_hba.hdwq[idx]; - list_for_each_entry_safe(psb, psb_next, &aborts, list) { - psb->pCmd = NULL; - psb->status = IOSTAT_SUCCESS; - } - spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); - list_splice(&aborts, &phba->lpfc_scsi_buf_list_put); - spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); + spin_lock(&qp->abts_scsi_buf_list_lock); + list_splice_init(&qp->lpfc_abts_scsi_buf_list, + &aborts); - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { - cnt = 0; - list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) { + list_for_each_entry_safe(psb, psb_next, &aborts, list) { psb->pCmd = NULL; psb->status = IOSTAT_SUCCESS; cnt++; } - spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag); - phba->put_nvme_bufs += cnt; - list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put); - spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag); + spin_lock(&qp->io_buf_list_put_lock); + list_splice_init(&aborts, &qp->lpfc_io_buf_list_put); + qp->put_io_bufs += qp->abts_scsi_io_bufs; + qp->abts_scsi_io_bufs = 0; + spin_unlock(&qp->io_buf_list_put_lock); + spin_unlock(&qp->abts_scsi_buf_list_lock); + + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + spin_lock(&qp->abts_nvme_buf_list_lock); + list_splice_init(&qp->lpfc_abts_nvme_buf_list, + &nvme_aborts); + list_for_each_entry_safe(psb, psb_next, &nvme_aborts, + list) { + psb->pCmd = NULL; + psb->status = IOSTAT_SUCCESS; + cnt++; + } + spin_lock(&qp->io_buf_list_put_lock); + qp->put_io_bufs += qp->abts_nvme_io_bufs; + qp->abts_nvme_io_bufs = 0; + list_splice_init(&nvme_aborts, + &qp->lpfc_io_buf_list_put); + spin_unlock(&qp->io_buf_list_put_lock); + spin_unlock(&qp->abts_nvme_buf_list_lock); + + } + } + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); + list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list, + &nvmet_aborts); + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) { ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP); lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); } } + spin_unlock_irq(&phba->hbalock); lpfc_sli4_free_sp_events(phba); - return 0; + return cnt; } /** @@ -1239,6 +1251,96 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) return; } +static void +lpfc_hb_eq_delay_work(struct work_struct *work) +{ + struct lpfc_hba *phba = container_of(to_delayed_work(work), + struct lpfc_hba, eq_delay_work); + struct lpfc_eq_intr_info *eqi, *eqi_new; + struct lpfc_queue *eq, *eq_next; + unsigned char *eqcnt = NULL; + uint32_t usdelay; + int i; + + if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING) + return; + + if (phba->link_state == LPFC_HBA_ERROR || + phba->pport->fc_flag & FC_OFFLINE_MODE) + goto requeue; + + eqcnt = kcalloc(num_possible_cpus(), sizeof(unsigned char), + GFP_KERNEL); + if (!eqcnt) + goto requeue; + + for (i = 0; i < phba->cfg_irq_chann; i++) { + eq = phba->sli4_hba.hdwq[i].hba_eq; + if (eq && eqcnt[eq->last_cpu] < 2) + eqcnt[eq->last_cpu]++; + continue; + } + + for_each_present_cpu(i) { + if (phba->cfg_irq_chann > 1 && eqcnt[i] < 2) + continue; + + eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i); + + usdelay = (eqi->icnt / LPFC_IMAX_THRESHOLD) * + LPFC_EQ_DELAY_STEP; + if (usdelay > LPFC_MAX_AUTO_EQ_DELAY) + usdelay = LPFC_MAX_AUTO_EQ_DELAY; + + eqi->icnt = 0; + + list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) { + if (eq->last_cpu != i) { + eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info, + eq->last_cpu); + list_move_tail(&eq->cpu_list, &eqi_new->list); + continue; + } + if (usdelay != eq->q_mode) + lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1, + usdelay); + } + } + + kfree(eqcnt); + +requeue: + queue_delayed_work(phba->wq, &phba->eq_delay_work, + msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); +} + +/** + * lpfc_hb_mxp_handler - Multi-XRI pools handler to adjust XRI distribution + * @phba: pointer to lpfc hba data structure. + * + * For each heartbeat, this routine does some heuristic methods to adjust + * XRI distribution. The goal is to fully utilize free XRIs. + **/ +static void lpfc_hb_mxp_handler(struct lpfc_hba *phba) +{ + u32 i; + u32 hwq_count; + + hwq_count = phba->cfg_hdw_queue; + for (i = 0; i < hwq_count; i++) { + /* Adjust XRIs in private pool */ + lpfc_adjust_pvt_pool_count(phba, i); + + /* Adjust high watermark */ + lpfc_adjust_high_watermark(phba, i); + +#ifdef LPFC_MXP_STAT + /* Snapshot pbl, pvt and busy count */ + lpfc_snapshot_mxp(phba, i); +#endif + } +} + /** * lpfc_hb_timeout_handler - The HBA-timer timeout handler * @phba: pointer to lpfc hba data structure. @@ -1264,16 +1366,11 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) int retval, i; struct lpfc_sli *psli = &phba->sli; LIST_HEAD(completions); - struct lpfc_queue *qp; - unsigned long time_elapsed; - uint32_t tick_cqe, max_cqe, val; - uint64_t tot, data1, data2, data3; - struct lpfc_nvmet_tgtport *tgtp; - struct lpfc_register reg_data; - struct nvme_fc_local_port *localport; - struct lpfc_nvme_lport *lport; - struct lpfc_nvme_ctrl_stat *cstat; - void __iomem *eqdreg = phba->sli4_hba.u.if_type2.EQDregaddr; + + if (phba->cfg_xri_rebalancing) { + /* Multi-XRI pools handler */ + lpfc_hb_mxp_handler(phba); + } vports = lpfc_create_vport_work_array(phba); if (vports != NULL) @@ -1288,107 +1385,6 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) (phba->pport->fc_flag & FC_OFFLINE_MODE)) return; - if (phba->cfg_auto_imax) { - if (!phba->last_eqdelay_time) { - phba->last_eqdelay_time = jiffies; - goto skip_eqdelay; - } - time_elapsed = jiffies - phba->last_eqdelay_time; - phba->last_eqdelay_time = jiffies; - - tot = 0xffff; - /* Check outstanding IO count */ - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { - if (phba->nvmet_support) { - tgtp = phba->targetport->private; - /* Calculate outstanding IOs */ - tot = atomic_read(&tgtp->rcv_fcp_cmd_drop); - tot += atomic_read(&tgtp->xmt_fcp_release); - tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot; - } else { - localport = phba->pport->localport; - if (!localport || !localport->private) - goto skip_eqdelay; - lport = (struct lpfc_nvme_lport *) - localport->private; - tot = 0; - for (i = 0; - i < phba->cfg_nvme_io_channel; i++) { - cstat = &lport->cstat[i]; - data1 = atomic_read( - &cstat->fc4NvmeInputRequests); - data2 = atomic_read( - &cstat->fc4NvmeOutputRequests); - data3 = atomic_read( - &cstat->fc4NvmeControlRequests); - tot += (data1 + data2 + data3); - tot -= atomic_read( - &cstat->fc4NvmeIoCmpls); - } - } - } - - /* Interrupts per sec per EQ */ - val = phba->cfg_fcp_imax / phba->io_channel_irqs; - tick_cqe = val / CONFIG_HZ; /* Per tick per EQ */ - - /* Assume 1 CQE/ISR, calc max CQEs allowed for time duration */ - max_cqe = time_elapsed * tick_cqe; - - for (i = 0; i < phba->io_channel_irqs; i++) { - /* Fast-path EQ */ - qp = phba->sli4_hba.hba_eq[i]; - if (!qp) - continue; - - /* Use no EQ delay if we don't have many outstanding - * IOs, or if we are only processing 1 CQE/ISR or less. - * Otherwise, assume we can process up to lpfc_fcp_imax - * interrupts per HBA. - */ - if (tot < LPFC_NODELAY_MAX_IO || - qp->EQ_cqe_cnt <= max_cqe) - val = 0; - else - val = phba->cfg_fcp_imax; - - if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) { - /* Use EQ Delay Register method */ - - /* Convert for EQ Delay register */ - if (val) { - /* First, interrupts per sec per EQ */ - val = phba->cfg_fcp_imax / - phba->io_channel_irqs; - - /* us delay between each interrupt */ - val = LPFC_SEC_TO_USEC / val; - } - if (val != qp->q_mode) { - reg_data.word0 = 0; - bf_set(lpfc_sliport_eqdelay_id, - ®_data, qp->queue_id); - bf_set(lpfc_sliport_eqdelay_delay, - ®_data, val); - writel(reg_data.word0, eqdreg); - } - } else { - /* Use mbox command method */ - if (val != qp->q_mode) - lpfc_modify_hba_eq_delay(phba, i, - 1, val); - } - - /* - * val is cfg_fcp_imax or 0 for mbox delay or us delay - * between interrupts for EQDR. - */ - qp->q_mode = val; - qp->EQ_cqe_cnt = 0; - } - } - -skip_eqdelay: spin_lock_irq(&phba->pport->work_port_lock); if (time_after(phba->last_completion_time + @@ -2943,7 +2939,9 @@ lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) void lpfc_stop_hba_timers(struct lpfc_hba *phba) { - lpfc_stop_vport_timers(phba->pport); + if (phba->pport) + lpfc_stop_vport_timers(phba->pport); + cancel_delayed_work_sync(&phba->eq_delay_work); del_timer_sync(&phba->sli.mbox_tmo); del_timer_sync(&phba->fabric_block_timer); del_timer_sync(&phba->eratt_poll); @@ -3070,6 +3068,242 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba) lpfc_destroy_vport_work_array(phba, vports); } +/** + * lpfc_create_expedite_pool - create expedite pool + * @phba: pointer to lpfc hba data structure. + * + * This routine moves a batch of XRIs from lpfc_io_buf_list_put of HWQ 0 + * to expedite pool. Mark them as expedite. + **/ +void lpfc_create_expedite_pool(struct lpfc_hba *phba) +{ + struct lpfc_sli4_hdw_queue *qp; + struct lpfc_io_buf *lpfc_ncmd; + struct lpfc_io_buf *lpfc_ncmd_next; + struct lpfc_epd_pool *epd_pool; + unsigned long iflag; + + epd_pool = &phba->epd_pool; + qp = &phba->sli4_hba.hdwq[0]; + + spin_lock_init(&epd_pool->lock); + spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); + spin_lock(&epd_pool->lock); + INIT_LIST_HEAD(&epd_pool->list); + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, + &qp->lpfc_io_buf_list_put, list) { + list_move_tail(&lpfc_ncmd->list, &epd_pool->list); + lpfc_ncmd->expedite = true; + qp->put_io_bufs--; + epd_pool->count++; + if (epd_pool->count >= XRI_BATCH) + break; + } + spin_unlock(&epd_pool->lock); + spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); +} + +/** + * lpfc_destroy_expedite_pool - destroy expedite pool + * @phba: pointer to lpfc hba data structure. + * + * This routine returns XRIs from expedite pool to lpfc_io_buf_list_put + * of HWQ 0. Clear the mark. + **/ +void lpfc_destroy_expedite_pool(struct lpfc_hba *phba) +{ + struct lpfc_sli4_hdw_queue *qp; + struct lpfc_io_buf *lpfc_ncmd; + struct lpfc_io_buf *lpfc_ncmd_next; + struct lpfc_epd_pool *epd_pool; + unsigned long iflag; + + epd_pool = &phba->epd_pool; + qp = &phba->sli4_hba.hdwq[0]; + + spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); + spin_lock(&epd_pool->lock); + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, + &epd_pool->list, list) { + list_move_tail(&lpfc_ncmd->list, + &qp->lpfc_io_buf_list_put); + lpfc_ncmd->flags = false; + qp->put_io_bufs++; + epd_pool->count--; + } + spin_unlock(&epd_pool->lock); + spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); +} + +/** + * lpfc_create_multixri_pools - create multi-XRI pools + * @phba: pointer to lpfc hba data structure. + * + * This routine initialize public, private per HWQ. Then, move XRIs from + * lpfc_io_buf_list_put to public pool. High and low watermark are also + * Initialized. + **/ +void lpfc_create_multixri_pools(struct lpfc_hba *phba) +{ + u32 i, j; + u32 hwq_count; + u32 count_per_hwq; + struct lpfc_io_buf *lpfc_ncmd; + struct lpfc_io_buf *lpfc_ncmd_next; + unsigned long iflag; + struct lpfc_sli4_hdw_queue *qp; + struct lpfc_multixri_pool *multixri_pool; + struct lpfc_pbl_pool *pbl_pool; + struct lpfc_pvt_pool *pvt_pool; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n", + phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu, + phba->sli4_hba.io_xri_cnt); + + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) + lpfc_create_expedite_pool(phba); + + hwq_count = phba->cfg_hdw_queue; + count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count; + + for (i = 0; i < hwq_count; i++) { + multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL); + + if (!multixri_pool) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "1238 Failed to allocate memory for " + "multixri_pool\n"); + + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) + lpfc_destroy_expedite_pool(phba); + + j = 0; + while (j < i) { + qp = &phba->sli4_hba.hdwq[j]; + kfree(qp->p_multixri_pool); + j++; + } + phba->cfg_xri_rebalancing = 0; + return; + } + + qp = &phba->sli4_hba.hdwq[i]; + qp->p_multixri_pool = multixri_pool; + + multixri_pool->xri_limit = count_per_hwq; + multixri_pool->rrb_next_hwqid = i; + + /* Deal with public free xri pool */ + pbl_pool = &multixri_pool->pbl_pool; + spin_lock_init(&pbl_pool->lock); + spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); + spin_lock(&pbl_pool->lock); + INIT_LIST_HEAD(&pbl_pool->list); + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, + &qp->lpfc_io_buf_list_put, list) { + list_move_tail(&lpfc_ncmd->list, &pbl_pool->list); + qp->put_io_bufs--; + pbl_pool->count++; + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n", + pbl_pool->count, i); + spin_unlock(&pbl_pool->lock); + spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); + + /* Deal with private free xri pool */ + pvt_pool = &multixri_pool->pvt_pool; + pvt_pool->high_watermark = multixri_pool->xri_limit / 2; + pvt_pool->low_watermark = XRI_BATCH; + spin_lock_init(&pvt_pool->lock); + spin_lock_irqsave(&pvt_pool->lock, iflag); + INIT_LIST_HEAD(&pvt_pool->list); + pvt_pool->count = 0; + spin_unlock_irqrestore(&pvt_pool->lock, iflag); + } +} + +/** + * lpfc_destroy_multixri_pools - destroy multi-XRI pools + * @phba: pointer to lpfc hba data structure. + * + * This routine returns XRIs from public/private to lpfc_io_buf_list_put. + **/ +void lpfc_destroy_multixri_pools(struct lpfc_hba *phba) +{ + u32 i; + u32 hwq_count; + struct lpfc_io_buf *lpfc_ncmd; + struct lpfc_io_buf *lpfc_ncmd_next; + unsigned long iflag; + struct lpfc_sli4_hdw_queue *qp; + struct lpfc_multixri_pool *multixri_pool; + struct lpfc_pbl_pool *pbl_pool; + struct lpfc_pvt_pool *pvt_pool; + + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) + lpfc_destroy_expedite_pool(phba); + + hwq_count = phba->cfg_hdw_queue; + + for (i = 0; i < hwq_count; i++) { + qp = &phba->sli4_hba.hdwq[i]; + multixri_pool = qp->p_multixri_pool; + if (!multixri_pool) + continue; + + qp->p_multixri_pool = NULL; + + spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag); + + /* Deal with public free xri pool */ + pbl_pool = &multixri_pool->pbl_pool; + spin_lock(&pbl_pool->lock); + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n", + pbl_pool->count, i); + + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, + &pbl_pool->list, list) { + list_move_tail(&lpfc_ncmd->list, + &qp->lpfc_io_buf_list_put); + qp->put_io_bufs++; + pbl_pool->count--; + } + + INIT_LIST_HEAD(&pbl_pool->list); + pbl_pool->count = 0; + + spin_unlock(&pbl_pool->lock); + + /* Deal with private free xri pool */ + pvt_pool = &multixri_pool->pvt_pool; + spin_lock(&pvt_pool->lock); + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n", + pvt_pool->count, i); + + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, + &pvt_pool->list, list) { + list_move_tail(&lpfc_ncmd->list, + &qp->lpfc_io_buf_list_put); + qp->put_io_bufs++; + pvt_pool->count--; + } + + INIT_LIST_HEAD(&pvt_pool->list); + pvt_pool->count = 0; + + spin_unlock(&pvt_pool->lock); + spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag); + + kfree(multixri_pool); + } +} + /** * lpfc_online - Initialize and bring a HBA online * @phba: pointer to lpfc hba data structure. @@ -3152,6 +3386,9 @@ lpfc_online(struct lpfc_hba *phba) } lpfc_destroy_vport_work_array(phba, vports); + if (phba->cfg_xri_rebalancing) + lpfc_create_multixri_pools(phba); + lpfc_unblock_mgmt_io(phba); return 0; } @@ -3310,6 +3547,9 @@ lpfc_offline(struct lpfc_hba *phba) spin_unlock_irq(shost->host_lock); } lpfc_destroy_vport_work_array(phba, vports); + + if (phba->cfg_xri_rebalancing) + lpfc_destroy_multixri_pools(phba); } /** @@ -3323,7 +3563,7 @@ lpfc_offline(struct lpfc_hba *phba) static void lpfc_scsi_free(struct lpfc_hba *phba) { - struct lpfc_scsi_buf *sb, *sb_next; + struct lpfc_io_buf *sb, *sb_next; if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) return; @@ -3355,50 +3595,57 @@ lpfc_scsi_free(struct lpfc_hba *phba) spin_unlock(&phba->scsi_buf_list_get_lock); spin_unlock_irq(&phba->hbalock); } + /** - * lpfc_nvme_free - Free all the NVME buffers and IOCBs from driver lists + * lpfc_io_free - Free all the IO buffers and IOCBs from driver lists * @phba: pointer to lpfc hba data structure. * - * This routine is to free all the NVME buffers and IOCBs from the driver + * This routine is to free all the IO buffers and IOCBs from the driver * list back to kernel. It is called from lpfc_pci_remove_one to free * the internal resources before the device is removed from the system. **/ -static void -lpfc_nvme_free(struct lpfc_hba *phba) +void +lpfc_io_free(struct lpfc_hba *phba) { - struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next; - - if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) - return; + struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next; + struct lpfc_sli4_hdw_queue *qp; + int idx; spin_lock_irq(&phba->hbalock); - /* Release all the lpfc_nvme_bufs maintained by this host. */ - spin_lock(&phba->nvme_buf_list_put_lock); - list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, - &phba->lpfc_nvme_buf_list_put, list) { - list_del(&lpfc_ncmd->list); - phba->put_nvme_bufs--; - dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data, - lpfc_ncmd->dma_handle); - kfree(lpfc_ncmd); - phba->total_nvme_bufs--; + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { + qp = &phba->sli4_hba.hdwq[idx]; + /* Release all the lpfc_nvme_bufs maintained by this host. */ + spin_lock(&qp->io_buf_list_put_lock); + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, + &qp->lpfc_io_buf_list_put, + list) { + list_del(&lpfc_ncmd->list); + qp->put_io_bufs--; + dma_pool_free(phba->lpfc_sg_dma_buf_pool, + lpfc_ncmd->data, lpfc_ncmd->dma_handle); + kfree(lpfc_ncmd); + qp->total_io_bufs--; + } + spin_unlock(&qp->io_buf_list_put_lock); + + spin_lock(&qp->io_buf_list_get_lock); + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, + &qp->lpfc_io_buf_list_get, + list) { + list_del(&lpfc_ncmd->list); + qp->get_io_bufs--; + dma_pool_free(phba->lpfc_sg_dma_buf_pool, + lpfc_ncmd->data, lpfc_ncmd->dma_handle); + kfree(lpfc_ncmd); + qp->total_io_bufs--; + } + spin_unlock(&qp->io_buf_list_get_lock); } - spin_unlock(&phba->nvme_buf_list_put_lock); - spin_lock(&phba->nvme_buf_list_get_lock); - list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, - &phba->lpfc_nvme_buf_list_get, list) { - list_del(&lpfc_ncmd->list); - phba->get_nvme_bufs--; - dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data, - lpfc_ncmd->dma_handle); - kfree(lpfc_ncmd); - phba->total_nvme_bufs--; - } - spin_unlock(&phba->nvme_buf_list_get_lock); spin_unlock_irq(&phba->hbalock); } + /** * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping * @phba: pointer to lpfc hba data structure. @@ -3640,8 +3887,102 @@ out_free_mem: return rc; } +int +lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf) +{ + LIST_HEAD(blist); + struct lpfc_sli4_hdw_queue *qp; + struct lpfc_io_buf *lpfc_cmd; + struct lpfc_io_buf *iobufp, *prev_iobufp; + int idx, cnt, xri, inserted; + + cnt = 0; + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { + qp = &phba->sli4_hba.hdwq[idx]; + spin_lock_irq(&qp->io_buf_list_get_lock); + spin_lock(&qp->io_buf_list_put_lock); + + /* Take everything off the get and put lists */ + list_splice_init(&qp->lpfc_io_buf_list_get, &blist); + list_splice(&qp->lpfc_io_buf_list_put, &blist); + INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); + INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); + cnt += qp->get_io_bufs + qp->put_io_bufs; + qp->get_io_bufs = 0; + qp->put_io_bufs = 0; + qp->total_io_bufs = 0; + spin_unlock(&qp->io_buf_list_put_lock); + spin_unlock_irq(&qp->io_buf_list_get_lock); + } + + /* + * Take IO buffers off blist and put on cbuf sorted by XRI. + * This is because POST_SGL takes a sequential range of XRIs + * to post to the firmware. + */ + for (idx = 0; idx < cnt; idx++) { + list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list); + if (!lpfc_cmd) + return cnt; + if (idx == 0) { + list_add_tail(&lpfc_cmd->list, cbuf); + continue; + } + xri = lpfc_cmd->cur_iocbq.sli4_xritag; + inserted = 0; + prev_iobufp = NULL; + list_for_each_entry(iobufp, cbuf, list) { + if (xri < iobufp->cur_iocbq.sli4_xritag) { + if (prev_iobufp) + list_add(&lpfc_cmd->list, + &prev_iobufp->list); + else + list_add(&lpfc_cmd->list, cbuf); + inserted = 1; + break; + } + prev_iobufp = iobufp; + } + if (!inserted) + list_add_tail(&lpfc_cmd->list, cbuf); + } + return cnt; +} + +int +lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf) +{ + struct lpfc_sli4_hdw_queue *qp; + struct lpfc_io_buf *lpfc_cmd; + int idx, cnt; + + qp = phba->sli4_hba.hdwq; + cnt = 0; + while (!list_empty(cbuf)) { + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { + list_remove_head(cbuf, lpfc_cmd, + struct lpfc_io_buf, list); + if (!lpfc_cmd) + return cnt; + cnt++; + qp = &phba->sli4_hba.hdwq[idx]; + lpfc_cmd->hdwq_no = idx; + lpfc_cmd->hdwq = qp; + lpfc_cmd->cur_iocbq.wqe_cmpl = NULL; + lpfc_cmd->cur_iocbq.iocb_cmpl = NULL; + spin_lock(&qp->io_buf_list_put_lock); + list_add_tail(&lpfc_cmd->list, + &qp->lpfc_io_buf_list_put); + qp->put_io_bufs++; + qp->total_io_bufs++; + spin_unlock(&qp->io_buf_list_put_lock); + } + } + return cnt; +} + /** - * lpfc_sli4_scsi_sgl_update - update xri-sgl sizing and mapping + * lpfc_sli4_io_sgl_update - update xri-sgl sizing and mapping * @phba: pointer to lpfc hba data structure. * * This routine first calculates the sizes of the current els and allocated @@ -3653,94 +3994,192 @@ out_free_mem: * 0 - successful (for now, it always returns 0) **/ int -lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba) +lpfc_sli4_io_sgl_update(struct lpfc_hba *phba) { - struct lpfc_scsi_buf *psb, *psb_next; - uint16_t i, lxri, els_xri_cnt, scsi_xri_cnt; - LIST_HEAD(scsi_sgl_list); - int rc; - - /* - * update on pci function's els xri-sgl list - */ - els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); - phba->total_scsi_bufs = 0; + struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL; + uint16_t i, lxri, els_xri_cnt; + uint16_t io_xri_cnt, io_xri_max; + LIST_HEAD(io_sgl_list); + int rc, cnt; /* - * update on pci function's allocated scsi xri-sgl list + * update on pci function's allocated nvme xri-sgl list */ - /* maximum number of xris available for scsi buffers */ - phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri - - els_xri_cnt; - if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) - return 0; + /* maximum number of xris available for nvme buffers */ + els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); + io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; + phba->sli4_hba.io_xri_max = io_xri_max; - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) - phba->sli4_hba.scsi_xri_max = /* Split them up */ - (phba->sli4_hba.scsi_xri_max * - phba->cfg_xri_split) / 100; + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "6074 Current allocated XRI sgl count:%d, " + "maximum XRI count:%d\n", + phba->sli4_hba.io_xri_cnt, + phba->sli4_hba.io_xri_max); - spin_lock_irq(&phba->scsi_buf_list_get_lock); - spin_lock(&phba->scsi_buf_list_put_lock); - list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list); - list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list); - spin_unlock(&phba->scsi_buf_list_put_lock); - spin_unlock_irq(&phba->scsi_buf_list_get_lock); + cnt = lpfc_io_buf_flush(phba, &io_sgl_list); - lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "6060 Current allocated SCSI xri-sgl count:%d, " - "maximum SCSI xri count:%d (split:%d)\n", - phba->sli4_hba.scsi_xri_cnt, - phba->sli4_hba.scsi_xri_max, phba->cfg_xri_split); - - if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) { - /* max scsi xri shrinked below the allocated scsi buffers */ - scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt - - phba->sli4_hba.scsi_xri_max; - /* release the extra allocated scsi buffers */ - for (i = 0; i < scsi_xri_cnt; i++) { - list_remove_head(&scsi_sgl_list, psb, - struct lpfc_scsi_buf, list); - if (psb) { + if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) { + /* max nvme xri shrunk below the allocated nvme buffers */ + io_xri_cnt = phba->sli4_hba.io_xri_cnt - + phba->sli4_hba.io_xri_max; + /* release the extra allocated nvme buffers */ + for (i = 0; i < io_xri_cnt; i++) { + list_remove_head(&io_sgl_list, lpfc_ncmd, + struct lpfc_io_buf, list); + if (lpfc_ncmd) { dma_pool_free(phba->lpfc_sg_dma_buf_pool, - psb->data, psb->dma_handle); - kfree(psb); + lpfc_ncmd->data, + lpfc_ncmd->dma_handle); + kfree(lpfc_ncmd); } } - spin_lock_irq(&phba->scsi_buf_list_get_lock); - phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt; - spin_unlock_irq(&phba->scsi_buf_list_get_lock); + phba->sli4_hba.io_xri_cnt -= io_xri_cnt; } - /* update xris associated to remaining allocated scsi buffers */ - psb = NULL; - psb_next = NULL; - list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) { + /* update xris associated to remaining allocated nvme buffers */ + lpfc_ncmd = NULL; + lpfc_ncmd_next = NULL; + phba->sli4_hba.io_xri_cnt = cnt; + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, + &io_sgl_list, list) { lxri = lpfc_sli4_next_xritag(phba); if (lxri == NO_XRI) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2560 Failed to allocate xri for " - "scsi buffer\n"); + "6075 Failed to allocate xri for " + "nvme buffer\n"); rc = -ENOMEM; goto out_free_mem; } - psb->cur_iocbq.sli4_lxritag = lxri; - psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; + lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri; + lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; } - spin_lock_irq(&phba->scsi_buf_list_get_lock); - spin_lock(&phba->scsi_buf_list_put_lock); - list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get); - INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); - spin_unlock(&phba->scsi_buf_list_put_lock); - spin_unlock_irq(&phba->scsi_buf_list_get_lock); + cnt = lpfc_io_buf_replenish(phba, &io_sgl_list); return 0; out_free_mem: - lpfc_scsi_free(phba); + lpfc_io_free(phba); return rc; } +/** + * lpfc_new_io_buf - IO buffer allocator for HBA with SLI4 IF spec + * @vport: The virtual port for which this call being executed. + * @num_to_allocate: The requested number of buffers to allocate. + * + * This routine allocates nvme buffers for device with SLI-4 interface spec, + * the nvme buffer contains all the necessary information needed to initiate + * an I/O. After allocating up to @num_to_allocate IO buffers and put + * them on a list, it post them to the port by using SGL block post. + * + * Return codes: + * int - number of IO buffers that were allocated and posted. + * 0 = failure, less than num_to_alloc is a partial failure. + **/ +int +lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc) +{ + struct lpfc_io_buf *lpfc_ncmd; + struct lpfc_iocbq *pwqeq; + uint16_t iotag, lxri = 0; + int bcnt, num_posted; + LIST_HEAD(prep_nblist); + LIST_HEAD(post_nblist); + LIST_HEAD(nvme_nblist); + + /* Sanity check to ensure our sizing is right for both SCSI and NVME */ + if (sizeof(struct lpfc_io_buf) > LPFC_COMMON_IO_BUF_SZ) { + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, + "6426 Common buffer size %ld exceeds %d\n", + sizeof(struct lpfc_io_buf), + LPFC_COMMON_IO_BUF_SZ); + return 0; + } + + phba->sli4_hba.io_xri_cnt = 0; + for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { + lpfc_ncmd = kzalloc(LPFC_COMMON_IO_BUF_SZ, GFP_KERNEL); + if (!lpfc_ncmd) + break; + /* + * Get memory from the pci pool to map the virt space to + * pci bus space for an I/O. The DMA buffer includes the + * number of SGE's necessary to support the sg_tablesize. + */ + lpfc_ncmd->data = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, + GFP_KERNEL, + &lpfc_ncmd->dma_handle); + if (!lpfc_ncmd->data) { + kfree(lpfc_ncmd); + break; + } + memset(lpfc_ncmd->data, 0, phba->cfg_sg_dma_buf_size); + + /* + * 4K Page alignment is CRITICAL to BlockGuard, double check + * to be sure. + */ + if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) && + (((unsigned long)(lpfc_ncmd->data) & + (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, + "3369 Memory alignment err: addr=%lx\n", + (unsigned long)lpfc_ncmd->data); + dma_pool_free(phba->lpfc_sg_dma_buf_pool, + lpfc_ncmd->data, lpfc_ncmd->dma_handle); + kfree(lpfc_ncmd); + break; + } + + lxri = lpfc_sli4_next_xritag(phba); + if (lxri == NO_XRI) { + dma_pool_free(phba->lpfc_sg_dma_buf_pool, + lpfc_ncmd->data, lpfc_ncmd->dma_handle); + kfree(lpfc_ncmd); + break; + } + pwqeq = &lpfc_ncmd->cur_iocbq; + + /* Allocate iotag for lpfc_ncmd->cur_iocbq. */ + iotag = lpfc_sli_next_iotag(phba, pwqeq); + if (iotag == 0) { + dma_pool_free(phba->lpfc_sg_dma_buf_pool, + lpfc_ncmd->data, lpfc_ncmd->dma_handle); + kfree(lpfc_ncmd); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6121 Failed to allocate IOTAG for" + " XRI:0x%x\n", lxri); + lpfc_sli4_free_xri(phba, lxri); + break; + } + pwqeq->sli4_lxritag = lxri; + pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; + pwqeq->context1 = lpfc_ncmd; + + /* Initialize local short-hand pointers. */ + lpfc_ncmd->dma_sgl = lpfc_ncmd->data; + lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle; + lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd; + spin_lock_init(&lpfc_ncmd->buf_lock); + + /* add the nvme buffer to a post list */ + list_add_tail(&lpfc_ncmd->list, &post_nblist); + phba->sli4_hba.io_xri_cnt++; + } + lpfc_printf_log(phba, KERN_INFO, LOG_NVME, + "6114 Allocate %d out of %d requested new NVME " + "buffers\n", bcnt, num_to_alloc); + + /* post the list of nvme buffer sgls to port if available */ + if (!list_empty(&post_nblist)) + num_posted = lpfc_sli4_post_io_sgl_list( + phba, &post_nblist, bcnt); + else + num_posted = 0; + + return num_posted; +} + static uint64_t lpfc_get_wwpn(struct lpfc_hba *phba) { @@ -3776,111 +4215,6 @@ lpfc_get_wwpn(struct lpfc_hba *phba) return rol64(wwn, 32); } -/** - * lpfc_sli4_nvme_sgl_update - update xri-sgl sizing and mapping - * @phba: pointer to lpfc hba data structure. - * - * This routine first calculates the sizes of the current els and allocated - * scsi sgl lists, and then goes through all sgls to updates the physical - * XRIs assigned due to port function reset. During port initialization, the - * current els and allocated scsi sgl lists are 0s. - * - * Return codes - * 0 - successful (for now, it always returns 0) - **/ -int -lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba) -{ - struct lpfc_nvme_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL; - uint16_t i, lxri, els_xri_cnt; - uint16_t nvme_xri_cnt, nvme_xri_max; - LIST_HEAD(nvme_sgl_list); - int rc, cnt; - - phba->total_nvme_bufs = 0; - phba->get_nvme_bufs = 0; - phba->put_nvme_bufs = 0; - - if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) - return 0; - /* - * update on pci function's allocated nvme xri-sgl list - */ - - /* maximum number of xris available for nvme buffers */ - els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); - nvme_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; - phba->sli4_hba.nvme_xri_max = nvme_xri_max; - phba->sli4_hba.nvme_xri_max -= phba->sli4_hba.scsi_xri_max; - - lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "6074 Current allocated NVME xri-sgl count:%d, " - "maximum NVME xri count:%d\n", - phba->sli4_hba.nvme_xri_cnt, - phba->sli4_hba.nvme_xri_max); - - spin_lock_irq(&phba->nvme_buf_list_get_lock); - spin_lock(&phba->nvme_buf_list_put_lock); - list_splice_init(&phba->lpfc_nvme_buf_list_get, &nvme_sgl_list); - list_splice(&phba->lpfc_nvme_buf_list_put, &nvme_sgl_list); - cnt = phba->get_nvme_bufs + phba->put_nvme_bufs; - phba->get_nvme_bufs = 0; - phba->put_nvme_bufs = 0; - spin_unlock(&phba->nvme_buf_list_put_lock); - spin_unlock_irq(&phba->nvme_buf_list_get_lock); - - if (phba->sli4_hba.nvme_xri_cnt > phba->sli4_hba.nvme_xri_max) { - /* max nvme xri shrunk below the allocated nvme buffers */ - spin_lock_irq(&phba->nvme_buf_list_get_lock); - nvme_xri_cnt = phba->sli4_hba.nvme_xri_cnt - - phba->sli4_hba.nvme_xri_max; - spin_unlock_irq(&phba->nvme_buf_list_get_lock); - /* release the extra allocated nvme buffers */ - for (i = 0; i < nvme_xri_cnt; i++) { - list_remove_head(&nvme_sgl_list, lpfc_ncmd, - struct lpfc_nvme_buf, list); - if (lpfc_ncmd) { - dma_pool_free(phba->lpfc_sg_dma_buf_pool, - lpfc_ncmd->data, - lpfc_ncmd->dma_handle); - kfree(lpfc_ncmd); - } - } - spin_lock_irq(&phba->nvme_buf_list_get_lock); - phba->sli4_hba.nvme_xri_cnt -= nvme_xri_cnt; - spin_unlock_irq(&phba->nvme_buf_list_get_lock); - } - - /* update xris associated to remaining allocated nvme buffers */ - lpfc_ncmd = NULL; - lpfc_ncmd_next = NULL; - list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, - &nvme_sgl_list, list) { - lxri = lpfc_sli4_next_xritag(phba); - if (lxri == NO_XRI) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "6075 Failed to allocate xri for " - "nvme buffer\n"); - rc = -ENOMEM; - goto out_free_mem; - } - lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri; - lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; - } - spin_lock_irq(&phba->nvme_buf_list_get_lock); - spin_lock(&phba->nvme_buf_list_put_lock); - list_splice_init(&nvme_sgl_list, &phba->lpfc_nvme_buf_list_get); - phba->get_nvme_bufs = cnt; - INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put); - spin_unlock(&phba->nvme_buf_list_put_lock); - spin_unlock_irq(&phba->nvme_buf_list_get_lock); - return 0; - -out_free_mem: - lpfc_nvme_free(phba); - return rc; -} - /** * lpfc_create_port - Create an FC port * @phba: pointer to lpfc hba data structure. @@ -3956,17 +4290,29 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) vport->fc_rscn_flush = 0; lpfc_get_vport_cfgparam(vport); + /* Adjust value in vport */ + vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type; + shost->unique_id = instance; shost->max_id = LPFC_MAX_TARGET; shost->max_lun = vport->cfg_max_luns; shost->this_id = -1; shost->max_cmd_len = 16; - shost->nr_hw_queues = phba->cfg_fcp_io_channel; + if (phba->sli_rev == LPFC_SLI_REV4) { + if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) + shost->nr_hw_queues = phba->cfg_hdw_queue; + else + shost->nr_hw_queues = phba->sli4_hba.num_present_cpu; + shost->dma_boundary = phba->sli4_hba.pc_sli4_params.sge_supp_len-1; shost->sg_tablesize = phba->cfg_scsi_seg_cnt; - } + } else + /* SLI-3 has a limited number of hardware queues (3), + * thus there is only one for FCP processing. + */ + shost->nr_hw_queues = 1; /* * Set initial can_queue value since 0 is no longer supported and @@ -4220,7 +4566,8 @@ lpfc_stop_port_s4(struct lpfc_hba *phba) { /* Reset some HBA SLI4 setup states */ lpfc_stop_hba_timers(phba); - phba->pport->work_port_events = 0; + if (phba->pport) + phba->pport->work_port_events = 0; phba->sli4_hba.intr_enable = 0; } @@ -5819,24 +6166,11 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) "NVME" : " "), (phba->nvmet_support ? "NVMET" : " ")); - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { - /* Initialize the scsi buffer list used by driver for scsi IO */ - spin_lock_init(&phba->scsi_buf_list_get_lock); - INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); - spin_lock_init(&phba->scsi_buf_list_put_lock); - INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); - } - - if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && - (phba->nvmet_support == 0)) { - /* Initialize the NVME buffer list used by driver for NVME IO */ - spin_lock_init(&phba->nvme_buf_list_get_lock); - INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_get); - phba->get_nvme_bufs = 0; - spin_lock_init(&phba->nvme_buf_list_put_lock); - INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put); - phba->put_nvme_bufs = 0; - } + /* Initialize the IO buffer list used by driver for SLI3 SCSI */ + spin_lock_init(&phba->scsi_buf_list_get_lock); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); + spin_lock_init(&phba->scsi_buf_list_put_lock); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); /* Initialize the fabric iocb list */ INIT_LIST_HEAD(&phba->fabric_iocb_list); @@ -5860,6 +6194,8 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) /* Heartbeat timer */ timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0); + INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work); + return 0; } @@ -5877,7 +6213,7 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) static int lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) { - int rc; + int rc, entry_sz; /* * Initialize timers used by driver @@ -5922,6 +6258,11 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt; lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; + if (phba->sli_rev == LPFC_SLI_REV4) + entry_sz = sizeof(struct sli4_sge); + else + entry_sz = sizeof(struct ulp_bde64); + /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ if (phba->cfg_enable_bg) { /* @@ -5935,7 +6276,7 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) */ phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + - (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64)); + (LPFC_MAX_SG_SEG_CNT * entry_sz); if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; @@ -5950,7 +6291,7 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) */ phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + - ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); + ((phba->cfg_sg_seg_cnt + 2) * entry_sz); /* Total BDEs in BPL for scsi_sg_list */ phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; @@ -6031,14 +6372,13 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; struct lpfc_mqe *mqe; int longs; - int fof_vectors = 0; int extra; uint64_t wwn; u32 if_type; u32 if_fam; - phba->sli4_hba.num_online_cpu = num_online_cpus(); phba->sli4_hba.num_present_cpu = lpfc_present_cpu; + phba->sli4_hba.num_possible_cpu = num_possible_cpus(); phba->sli4_hba.curr_disp_cpu = 0; /* Get all the module params for configuring this host */ @@ -6200,8 +6540,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { /* Initialize the Abort nvme buffer list used by driver */ - spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock); - INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list); + spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list); } @@ -6337,6 +6676,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) " NVME_TARGET_FC infrastructure" " is not in kernel\n"); #endif + /* Not supported for NVMET */ + phba->cfg_xri_rebalancing = 0; break; } } @@ -6405,8 +6746,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) /* Verify OAS is supported */ lpfc_sli4_oas_verify(phba); - if (phba->cfg_fof) - fof_vectors = 1; /* Verify RAS support on adapter */ lpfc_sli4_ras_init(phba); @@ -6450,9 +6789,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) goto out_remove_rpi_hdrs; } - phba->sli4_hba.hba_eq_hdl = kcalloc(fof_vectors + phba->io_channel_irqs, - sizeof(struct lpfc_hba_eq_hdl), - GFP_KERNEL); + phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann, + sizeof(struct lpfc_hba_eq_hdl), + GFP_KERNEL); if (!phba->sli4_hba.hba_eq_hdl) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2572 Failed allocate memory for " @@ -6461,7 +6800,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) goto out_free_fcf_rr_bmask; } - phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_present_cpu, + phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(struct lpfc_vector_map_info), GFP_KERNEL); if (!phba->sli4_hba.cpu_map) { @@ -6471,21 +6810,14 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) rc = -ENOMEM; goto out_free_hba_eq_hdl; } - if (lpfc_used_cpu == NULL) { - lpfc_used_cpu = kcalloc(lpfc_present_cpu, sizeof(uint16_t), - GFP_KERNEL); - if (!lpfc_used_cpu) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3335 Failed allocate memory for msi-x " - "interrupt vector mapping\n"); - kfree(phba->sli4_hba.cpu_map); - rc = -ENOMEM; - goto out_free_hba_eq_hdl; - } - for (i = 0; i < lpfc_present_cpu; i++) - lpfc_used_cpu[i] = LPFC_VECTOR_MAP_EMPTY; - } + phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info); + if (!phba->sli4_hba.eq_info) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3321 Failed allocation for per_cpu stats\n"); + rc = -ENOMEM; + goto out_free_hba_cpu_map; + } /* * Enable sr-iov virtual functions if supported and configured * through the module parameter. @@ -6505,6 +6837,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) return 0; +out_free_hba_cpu_map: + kfree(phba->sli4_hba.cpu_map); out_free_hba_eq_hdl: kfree(phba->sli4_hba.hba_eq_hdl); out_free_fcf_rr_bmask: @@ -6534,10 +6868,12 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) { struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; + free_percpu(phba->sli4_hba.eq_info); + /* Free memory allocated for msi-x interrupt vector to CPU mapping */ kfree(phba->sli4_hba.cpu_map); + phba->sli4_hba.num_possible_cpu = 0; phba->sli4_hba.num_present_cpu = 0; - phba->sli4_hba.num_online_cpu = 0; phba->sli4_hba.curr_disp_cpu = 0; /* Free memory allocated for fast-path work queue handles */ @@ -6875,11 +7211,8 @@ lpfc_init_sgl_list(struct lpfc_hba *phba) /* els xri-sgl book keeping */ phba->sli4_hba.els_xri_cnt = 0; - /* scsi xri-buffer book keeping */ - phba->sli4_hba.scsi_xri_cnt = 0; - /* nvme xri-buffer book keeping */ - phba->sli4_hba.nvme_xri_cnt = 0; + phba->sli4_hba.io_xri_cnt = 0; } /** @@ -7093,6 +7426,9 @@ lpfc_hba_alloc(struct pci_dev *pdev) static void lpfc_hba_free(struct lpfc_hba *phba) { + if (phba->sli_rev == LPFC_SLI_REV4) + kfree(phba->sli4_hba.hdwq); + /* Release the driver assigned board number */ idr_remove(&lpfc_hba_index, phba->brd_no); @@ -7128,10 +7464,6 @@ lpfc_create_shost(struct lpfc_hba *phba) phba->fc_arbtov = FF_DEF_ARBTOV; atomic_set(&phba->sdev_cnt, 0); - atomic_set(&phba->fc4ScsiInputRequests, 0); - atomic_set(&phba->fc4ScsiOutputRequests, 0); - atomic_set(&phba->fc4ScsiControlRequests, 0); - atomic_set(&phba->fc4ScsiIoCmpls, 0); vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); if (!vport) return -ENODEV; @@ -7361,15 +7693,18 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) unsigned long bar0map_len, bar2map_len; int i, hbq_count; void *ptr; - int error = -ENODEV; + int error; if (!pdev) - return error; + return -ENODEV; /* Set the device DMA mask size */ - if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) || - dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) + error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (error) + error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (error) return error; + error = -ENODEV; /* Get the bus address of Bar0 and Bar2 and the number of bytes * required by each mapping. @@ -7906,7 +8241,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) struct lpfc_rsrc_desc_fcfcoe *desc; char *pdesc_0; uint16_t forced_link_speed; - uint32_t if_type; + uint32_t if_type, qmin; int length, i, rc = 0, rc2; pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); @@ -8011,38 +8346,44 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) phba->sli4_hba.max_cfg_param.max_rq); /* - * Calculate NVME queue resources based on how - * many WQ/CQs are available. + * Calculate queue resources based on how + * many WQ/CQ/EQs are available. */ - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { - length = phba->sli4_hba.max_cfg_param.max_wq; - if (phba->sli4_hba.max_cfg_param.max_cq < - phba->sli4_hba.max_cfg_param.max_wq) - length = phba->sli4_hba.max_cfg_param.max_cq; + qmin = phba->sli4_hba.max_cfg_param.max_wq; + if (phba->sli4_hba.max_cfg_param.max_cq < qmin) + qmin = phba->sli4_hba.max_cfg_param.max_cq; + if (phba->sli4_hba.max_cfg_param.max_eq < qmin) + qmin = phba->sli4_hba.max_cfg_param.max_eq; + /* + * Whats left after this can go toward NVME / FCP. + * The minus 4 accounts for ELS, NVME LS, MBOX + * plus one extra. When configured for + * NVMET, FCP io channel WQs are not created. + */ + qmin -= 4; - /* - * Whats left after this can go toward NVME. - * The minus 6 accounts for ELS, NVME LS, MBOX - * fof plus a couple extra. When configured for - * NVMET, FCP io channel WQs are not created. - */ - length -= 6; - if (!phba->nvmet_support) - length -= phba->cfg_fcp_io_channel; - - if (phba->cfg_nvme_io_channel > length) { - lpfc_printf_log( - phba, KERN_ERR, LOG_SLI, - "2005 Reducing NVME IO channel to %d: " - "WQ %d CQ %d NVMEIO %d FCPIO %d\n", - length, + /* If NVME is configured, double the number of CQ/WQs needed */ + if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && + !phba->nvmet_support) + qmin /= 2; + + /* Check to see if there is enough for NVME */ + if ((phba->cfg_irq_chann > qmin) || + (phba->cfg_hdw_queue > qmin)) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2005 Reducing Queues: " + "WQ %d CQ %d EQ %d: min %d: " + "IRQ %d HDWQ %d\n", phba->sli4_hba.max_cfg_param.max_wq, phba->sli4_hba.max_cfg_param.max_cq, - phba->cfg_nvme_io_channel, - phba->cfg_fcp_io_channel); + phba->sli4_hba.max_cfg_param.max_eq, + qmin, phba->cfg_irq_chann, + phba->cfg_hdw_queue); - phba->cfg_nvme_io_channel = length; - } + if (phba->cfg_irq_chann > qmin) + phba->cfg_irq_chann = qmin; + if (phba->cfg_hdw_queue > qmin) + phba->cfg_hdw_queue = qmin; } } @@ -8254,53 +8595,22 @@ lpfc_setup_endian_order(struct lpfc_hba *phba) static int lpfc_sli4_queue_verify(struct lpfc_hba *phba) { - int io_channel; - int fof_vectors = phba->cfg_fof ? 1 : 0; - /* * Sanity check for configured queue parameters against the run-time * device parameters */ - /* Sanity check on HBA EQ parameters */ - io_channel = phba->io_channel_irqs; - - if (phba->sli4_hba.num_online_cpu < io_channel) { - lpfc_printf_log(phba, - KERN_ERR, LOG_INIT, - "3188 Reducing IO channels to match number of " - "online CPUs: from %d to %d\n", - io_channel, phba->sli4_hba.num_online_cpu); - io_channel = phba->sli4_hba.num_online_cpu; - } - - if (io_channel + fof_vectors > phba->sli4_hba.max_cfg_param.max_eq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2575 Reducing IO channels to match number of " - "available EQs: from %d to %d\n", - io_channel, - phba->sli4_hba.max_cfg_param.max_eq); - io_channel = phba->sli4_hba.max_cfg_param.max_eq - fof_vectors; - } - - /* The actual number of FCP / NVME event queues adopted */ - if (io_channel != phba->io_channel_irqs) - phba->io_channel_irqs = io_channel; - if (phba->cfg_fcp_io_channel > io_channel) - phba->cfg_fcp_io_channel = io_channel; - if (phba->cfg_nvme_io_channel > io_channel) - phba->cfg_nvme_io_channel = io_channel; if (phba->nvmet_support) { - if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq) - phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel; + if (phba->cfg_irq_chann < phba->cfg_nvmet_mrq) + phba->cfg_nvmet_mrq = phba->cfg_irq_chann; } if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX) phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX; lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n", - phba->io_channel_irqs, phba->cfg_fcp_io_channel, - phba->cfg_nvme_io_channel, phba->cfg_nvmet_mrq); + "2574 IO channels: hdwQ %d IRQ %d MRQ: %d\n", + phba->cfg_hdw_queue, phba->cfg_irq_chann, + phba->cfg_nvmet_mrq); /* Get EQ depth from module parameter, fake the default for now */ phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; @@ -8327,7 +8637,9 @@ lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx) return 1; } qdesc->qe_valid = 1; - phba->sli4_hba.nvme_cq[wqidx] = qdesc; + qdesc->hdwq = wqidx; + qdesc->chann = lpfc_find_cpu_handle(phba, wqidx, LPFC_FIND_BY_HDWQ); + phba->sli4_hba.hdwq[wqidx].nvme_cq = qdesc; qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT); @@ -8337,7 +8649,9 @@ lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx) wqidx); return 1; } - phba->sli4_hba.nvme_wq[wqidx] = qdesc; + qdesc->hdwq = wqidx; + qdesc->chann = wqidx; + phba->sli4_hba.hdwq[wqidx].nvme_wq = qdesc; list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); return 0; } @@ -8365,7 +8679,9 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx) return 1; } qdesc->qe_valid = 1; - phba->sli4_hba.fcp_cq[wqidx] = qdesc; + qdesc->hdwq = wqidx; + qdesc->chann = lpfc_find_cpu_handle(phba, wqidx, LPFC_FIND_BY_HDWQ); + phba->sli4_hba.hdwq[wqidx].fcp_cq = qdesc; /* Create Fast Path FCP WQs */ if (phba->enab_exp_wqcq_pages) { @@ -8386,7 +8702,9 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx) wqidx); return 1; } - phba->sli4_hba.fcp_wq[wqidx] = qdesc; + qdesc->hdwq = wqidx; + qdesc->chann = wqidx; + phba->sli4_hba.hdwq[wqidx].fcp_wq = qdesc; list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); return 0; } @@ -8409,16 +8727,14 @@ int lpfc_sli4_queue_create(struct lpfc_hba *phba) { struct lpfc_queue *qdesc; - int idx, io_channel; + int idx, eqidx; + struct lpfc_sli4_hdw_queue *qp; + struct lpfc_eq_intr_info *eqi; /* * Create HBA Record arrays. * Both NVME and FCP will share that same vectors / EQs */ - io_channel = phba->io_channel_irqs; - if (!io_channel) - return -ERANGE; - phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; @@ -8430,87 +8746,36 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; - phba->sli4_hba.hba_eq = kcalloc(io_channel, - sizeof(struct lpfc_queue *), - GFP_KERNEL); - if (!phba->sli4_hba.hba_eq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2576 Failed allocate memory for " - "fast-path EQ record array\n"); - goto out_error; - } - - if (phba->cfg_fcp_io_channel) { - phba->sli4_hba.fcp_cq = kcalloc(phba->cfg_fcp_io_channel, - sizeof(struct lpfc_queue *), - GFP_KERNEL); - if (!phba->sli4_hba.fcp_cq) { + if (!phba->sli4_hba.hdwq) { + phba->sli4_hba.hdwq = kcalloc( + phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue), + GFP_KERNEL); + if (!phba->sli4_hba.hdwq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2577 Failed allocate memory for " - "fast-path CQ record array\n"); + "6427 Failed allocate memory for " + "fast-path Hardware Queue array\n"); goto out_error; } - phba->sli4_hba.fcp_wq = kcalloc(phba->cfg_fcp_io_channel, - sizeof(struct lpfc_queue *), - GFP_KERNEL); - if (!phba->sli4_hba.fcp_wq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2578 Failed allocate memory for " - "fast-path FCP WQ record array\n"); - goto out_error; - } - /* - * Since the first EQ can have multiple CQs associated with it, - * this array is used to quickly see if we have a FCP fast-path - * CQ match. - */ - phba->sli4_hba.fcp_cq_map = kcalloc(phba->cfg_fcp_io_channel, - sizeof(uint16_t), - GFP_KERNEL); - if (!phba->sli4_hba.fcp_cq_map) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2545 Failed allocate memory for " - "fast-path CQ map\n"); - goto out_error; + /* Prepare hardware queues to take IO buffers */ + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { + qp = &phba->sli4_hba.hdwq[idx]; + spin_lock_init(&qp->io_buf_list_get_lock); + spin_lock_init(&qp->io_buf_list_put_lock); + INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get); + INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); + qp->get_io_bufs = 0; + qp->put_io_bufs = 0; + qp->total_io_bufs = 0; + spin_lock_init(&qp->abts_scsi_buf_list_lock); + INIT_LIST_HEAD(&qp->lpfc_abts_scsi_buf_list); + qp->abts_scsi_io_bufs = 0; + spin_lock_init(&qp->abts_nvme_buf_list_lock); + INIT_LIST_HEAD(&qp->lpfc_abts_nvme_buf_list); + qp->abts_nvme_io_bufs = 0; } } - if (phba->cfg_nvme_io_channel) { - phba->sli4_hba.nvme_cq = kcalloc(phba->cfg_nvme_io_channel, - sizeof(struct lpfc_queue *), - GFP_KERNEL); - if (!phba->sli4_hba.nvme_cq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "6077 Failed allocate memory for " - "fast-path CQ record array\n"); - goto out_error; - } - - phba->sli4_hba.nvme_wq = kcalloc(phba->cfg_nvme_io_channel, - sizeof(struct lpfc_queue *), - GFP_KERNEL); - if (!phba->sli4_hba.nvme_wq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2581 Failed allocate memory for " - "fast-path NVME WQ record array\n"); - goto out_error; - } - - /* - * Since the first EQ can have multiple CQs associated with it, - * this array is used to quickly see if we have a NVME fast-path - * CQ match. - */ - phba->sli4_hba.nvme_cq_map = kcalloc(phba->cfg_nvme_io_channel, - sizeof(uint16_t), - GFP_KERNEL); - if (!phba->sli4_hba.nvme_cq_map) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "6078 Failed allocate memory for " - "fast-path CQ map\n"); - goto out_error; - } - + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { if (phba->nvmet_support) { phba->sli4_hba.nvmet_cqset = kcalloc( phba->cfg_nvmet_mrq, @@ -8548,8 +8813,19 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list); /* Create HBA Event Queues (EQs) */ - for (idx = 0; idx < io_channel; idx++) { - /* Create EQs */ + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { + /* + * If there are more Hardware Queues than available + * CQs, multiple Hardware Queues may share a common EQ. + */ + if (idx >= phba->cfg_irq_chann) { + /* Share an existing EQ */ + eqidx = lpfc_find_eq_handle(phba, idx); + phba->sli4_hba.hdwq[idx].hba_eq = + phba->sli4_hba.hdwq[eqidx].hba_eq; + continue; + } + /* Create an EQ */ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, phba->sli4_hba.eq_esize, phba->sli4_hba.eq_ecount); @@ -8559,33 +8835,51 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) goto out_error; } qdesc->qe_valid = 1; - phba->sli4_hba.hba_eq[idx] = qdesc; + qdesc->hdwq = idx; + + /* Save the CPU this EQ is affinitised to */ + eqidx = lpfc_find_eq_handle(phba, idx); + qdesc->chann = lpfc_find_cpu_handle(phba, eqidx, + LPFC_FIND_BY_EQ); + phba->sli4_hba.hdwq[idx].hba_eq = qdesc; + qdesc->last_cpu = qdesc->chann; + eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu); + list_add(&qdesc->cpu_list, &eqi->list); } - /* FCP and NVME io channels are not required to be balanced */ - for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) + /* Allocate SCSI SLI4 CQ/WQs */ + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { if (lpfc_alloc_fcp_wq_cq(phba, idx)) goto out_error; + } - for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++) - if (lpfc_alloc_nvme_wq_cq(phba, idx)) - goto out_error; + /* Allocate NVME SLI4 CQ/WQs */ + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { + if (lpfc_alloc_nvme_wq_cq(phba, idx)) + goto out_error; + } - if (phba->nvmet_support) { - for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { - qdesc = lpfc_sli4_queue_alloc(phba, + if (phba->nvmet_support) { + for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { + qdesc = lpfc_sli4_queue_alloc( + phba, LPFC_DEFAULT_PAGE_SIZE, phba->sli4_hba.cq_esize, phba->sli4_hba.cq_ecount); - if (!qdesc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3142 Failed allocate NVME " - "CQ Set (%d)\n", idx); - goto out_error; + if (!qdesc) { + lpfc_printf_log( + phba, KERN_ERR, LOG_INIT, + "3142 Failed allocate NVME " + "CQ Set (%d)\n", idx); + goto out_error; + } + qdesc->qe_valid = 1; + qdesc->hdwq = idx; + qdesc->chann = idx; + phba->sli4_hba.nvmet_cqset[idx] = qdesc; } - qdesc->qe_valid = 1; - phba->sli4_hba.nvmet_cqset[idx] = qdesc; } } @@ -8615,6 +8909,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) goto out_error; } qdesc->qe_valid = 1; + qdesc->chann = 0; phba->sli4_hba.els_cq = qdesc; @@ -8632,6 +8927,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) "0505 Failed allocate slow-path MQ\n"); goto out_error; } + qdesc->chann = 0; phba->sli4_hba.mbx_wq = qdesc; /* @@ -8647,6 +8943,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) "0504 Failed allocate slow-path ELS WQ\n"); goto out_error; } + qdesc->chann = 0; phba->sli4_hba.els_wq = qdesc; list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); @@ -8660,6 +8957,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) "6079 Failed allocate NVME LS CQ\n"); goto out_error; } + qdesc->chann = 0; qdesc->qe_valid = 1; phba->sli4_hba.nvmels_cq = qdesc; @@ -8672,6 +8970,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) "6080 Failed allocate NVME LS WQ\n"); goto out_error; } + qdesc->chann = 0; phba->sli4_hba.nvmels_wq = qdesc; list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); } @@ -8702,7 +9001,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) } phba->sli4_hba.dat_rq = qdesc; - if (phba->nvmet_support) { + if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && + phba->nvmet_support) { for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { /* Create NVMET Receive Queue for header */ qdesc = lpfc_sli4_queue_alloc(phba, @@ -8715,6 +9015,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) "receive HRQ\n"); goto out_error; } + qdesc->hdwq = idx; phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc; /* Only needed for header of RQ pair */ @@ -8741,13 +9042,29 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) "receive DRQ\n"); goto out_error; } + qdesc->hdwq = idx; phba->sli4_hba.nvmet_mrq_data[idx] = qdesc; } } - /* Create the Queues needed for Flash Optimized Fabric operations */ - if (phba->cfg_fof) - lpfc_fof_queue_create(phba); +#if defined(BUILD_NVME) + /* Clear NVME stats */ + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { + memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0, + sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat)); + } + } +#endif + + /* Clear SCSI stats */ + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { + memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0, + sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat)); + } + } + return 0; out_error: @@ -8780,11 +9097,25 @@ lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max) } static inline void -lpfc_sli4_release_queue_map(uint16_t **qmap) +lpfc_sli4_release_hdwq(struct lpfc_hba *phba) { - if (*qmap != NULL) { - kfree(*qmap); - *qmap = NULL; + struct lpfc_sli4_hdw_queue *hdwq; + uint32_t idx; + + hdwq = phba->sli4_hba.hdwq; + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { + if (idx < phba->cfg_irq_chann) + lpfc_sli4_queue_free(hdwq[idx].hba_eq); + hdwq[idx].hba_eq = NULL; + + lpfc_sli4_queue_free(hdwq[idx].fcp_cq); + lpfc_sli4_queue_free(hdwq[idx].nvme_cq); + lpfc_sli4_queue_free(hdwq[idx].fcp_wq); + lpfc_sli4_queue_free(hdwq[idx].nvme_wq); + hdwq[idx].fcp_cq = NULL; + hdwq[idx].nvme_cq = NULL; + hdwq[idx].fcp_wq = NULL; + hdwq[idx].nvme_wq = NULL; } } @@ -8803,33 +9134,9 @@ lpfc_sli4_release_queue_map(uint16_t **qmap) void lpfc_sli4_queue_destroy(struct lpfc_hba *phba) { - if (phba->cfg_fof) - lpfc_fof_queue_destroy(phba); - /* Release HBA eqs */ - lpfc_sli4_release_queues(&phba->sli4_hba.hba_eq, phba->io_channel_irqs); - - /* Release FCP cqs */ - lpfc_sli4_release_queues(&phba->sli4_hba.fcp_cq, - phba->cfg_fcp_io_channel); - - /* Release FCP wqs */ - lpfc_sli4_release_queues(&phba->sli4_hba.fcp_wq, - phba->cfg_fcp_io_channel); - - /* Release FCP CQ mapping array */ - lpfc_sli4_release_queue_map(&phba->sli4_hba.fcp_cq_map); - - /* Release NVME cqs */ - lpfc_sli4_release_queues(&phba->sli4_hba.nvme_cq, - phba->cfg_nvme_io_channel); - - /* Release NVME wqs */ - lpfc_sli4_release_queues(&phba->sli4_hba.nvme_wq, - phba->cfg_nvme_io_channel); - - /* Release NVME CQ mapping array */ - lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map); + if (phba->sli4_hba.hdwq) + lpfc_sli4_release_hdwq(phba); if (phba->nvmet_support) { lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset, @@ -8910,10 +9217,9 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq, qidx, (uint32_t)rc); return rc; } - cq->chann = qidx; if (qtype != LPFC_MBOX) { - /* Setup nvme_cq_map for fast lookup */ + /* Setup cq_map for fast lookup */ if (cq_map) *cq_map = cq->queue_id; @@ -8930,7 +9236,6 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq, /* no need to tear down cq - caller will do so */ return rc; } - wq->chann = qidx; /* Bind this CQ/WQ to the NVME ring */ pring = wq->pring; @@ -8959,6 +9264,38 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq, return 0; } +/** + * lpfc_setup_cq_lookup - Setup the CQ lookup table + * @phba: pointer to lpfc hba data structure. + * + * This routine will populate the cq_lookup table by all + * available CQ queue_id's. + **/ +void +lpfc_setup_cq_lookup(struct lpfc_hba *phba) +{ + struct lpfc_queue *eq, *childq; + struct lpfc_sli4_hdw_queue *qp; + int qidx; + + qp = phba->sli4_hba.hdwq; + memset(phba->sli4_hba.cq_lookup, 0, + (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1))); + for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { + eq = qp[qidx].hba_eq; + if (!eq) + continue; + list_for_each_entry(childq, &eq->child_list, list) { + if (childq->queue_id > phba->sli4_hba.cq_max) + continue; + if ((childq->subtype == LPFC_FCP) || + (childq->subtype == LPFC_NVME)) + phba->sli4_hba.cq_lookup[childq->queue_id] = + childq; + } + } +} + /** * lpfc_sli4_queue_setup - Set up all the SLI4 queues * @phba: pointer to lpfc hba data structure. @@ -8976,9 +9313,10 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) { uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; + struct lpfc_sli4_hdw_queue *qp; LPFC_MBOXQ_t *mboxq; int qidx; - uint32_t length, io_channel; + uint32_t length, usdelay; int rc = -ENOMEM; /* Check for dual-ULP support */ @@ -9029,25 +9367,25 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) /* * Set up HBA Event Queues (EQs) */ - io_channel = phba->io_channel_irqs; + qp = phba->sli4_hba.hdwq; /* Set up HBA event queue */ - if (io_channel && !phba->sli4_hba.hba_eq) { + if (!qp) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3147 Fast-path EQs not allocated\n"); rc = -ENOMEM; goto out_error; } - for (qidx = 0; qidx < io_channel; qidx++) { - if (!phba->sli4_hba.hba_eq[qidx]) { + for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { + if (!qp[qidx].hba_eq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0522 Fast-path EQ (%d) not " "allocated\n", qidx); rc = -ENOMEM; goto out_destroy; } - rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[qidx], - phba->cfg_fcp_imax); + rc = lpfc_eq_create(phba, qp[qidx].hba_eq, + phba->cfg_fcp_imax); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0523 Failed setup of fast-path EQ " @@ -9056,26 +9394,17 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) goto out_destroy; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "2584 HBA EQ setup: queue[%d]-id=%d\n", - qidx, phba->sli4_hba.hba_eq[qidx]->queue_id); + "2584 HBA EQ setup: queue[%d]-id=%d\n", qidx, + qp[qidx].hba_eq->queue_id); } - if (phba->cfg_nvme_io_channel) { - if (!phba->sli4_hba.nvme_cq || !phba->sli4_hba.nvme_wq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "6084 Fast-path NVME %s array not allocated\n", - (phba->sli4_hba.nvme_cq) ? "CQ" : "WQ"); - rc = -ENOMEM; - goto out_destroy; - } - - for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) { + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { rc = lpfc_create_wq_cq(phba, - phba->sli4_hba.hba_eq[ - qidx % io_channel], - phba->sli4_hba.nvme_cq[qidx], - phba->sli4_hba.nvme_wq[qidx], - &phba->sli4_hba.nvme_cq_map[qidx], + qp[qidx].hba_eq, + qp[qidx].nvme_cq, + qp[qidx].nvme_wq, + &phba->sli4_hba.hdwq[qidx].nvme_cq_map, qidx, LPFC_NVME); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -9087,31 +9416,19 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) } } - if (phba->cfg_fcp_io_channel) { - /* Set up fast-path FCP Response Complete Queue */ - if (!phba->sli4_hba.fcp_cq || !phba->sli4_hba.fcp_wq) { + for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { + rc = lpfc_create_wq_cq(phba, + qp[qidx].hba_eq, + qp[qidx].fcp_cq, + qp[qidx].fcp_wq, + &phba->sli4_hba.hdwq[qidx].fcp_cq_map, + qidx, LPFC_FCP); + if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3148 Fast-path FCP %s array not allocated\n", - phba->sli4_hba.fcp_cq ? "WQ" : "CQ"); - rc = -ENOMEM; - goto out_destroy; - } - - for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) { - rc = lpfc_create_wq_cq(phba, - phba->sli4_hba.hba_eq[ - qidx % io_channel], - phba->sli4_hba.fcp_cq[qidx], - phba->sli4_hba.fcp_wq[qidx], - &phba->sli4_hba.fcp_cq_map[qidx], - qidx, LPFC_FCP); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0535 Failed to setup fastpath " "FCP WQ/CQ (%d), rc = 0x%x\n", qidx, (uint32_t)rc); - goto out_destroy; - } + goto out_destroy; } } @@ -9130,7 +9447,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) goto out_destroy; } - rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0], + rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, phba->sli4_hba.mbx_cq, phba->sli4_hba.mbx_wq, NULL, 0, LPFC_MBOX); @@ -9151,7 +9468,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) if (phba->cfg_nvmet_mrq > 1) { rc = lpfc_cq_create_set(phba, phba->sli4_hba.nvmet_cqset, - phba->sli4_hba.hba_eq, + qp, LPFC_WCQ, LPFC_NVMET); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -9163,7 +9480,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) } else { /* Set up NVMET Receive Complete Queue */ rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0], - phba->sli4_hba.hba_eq[0], + qp[0].hba_eq, LPFC_WCQ, LPFC_NVMET); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -9177,7 +9494,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) "6090 NVMET CQ setup: cq-id=%d, " "parent eq-id=%d\n", phba->sli4_hba.nvmet_cqset[0]->queue_id, - phba->sli4_hba.hba_eq[0]->queue_id); + qp[0].hba_eq->queue_id); } } @@ -9189,14 +9506,14 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) rc = -ENOMEM; goto out_destroy; } - rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0], - phba->sli4_hba.els_cq, - phba->sli4_hba.els_wq, - NULL, 0, LPFC_ELS); + rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, + phba->sli4_hba.els_cq, + phba->sli4_hba.els_wq, + NULL, 0, LPFC_ELS); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0529 Failed setup of ELS WQ/CQ: rc = 0x%x\n", - (uint32_t)rc); + "0525 Failed setup of ELS WQ/CQ: rc = 0x%x\n", + (uint32_t)rc); goto out_destroy; } lpfc_printf_log(phba, KERN_INFO, LOG_INIT, @@ -9204,7 +9521,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) phba->sli4_hba.els_wq->queue_id, phba->sli4_hba.els_cq->queue_id); - if (phba->cfg_nvme_io_channel) { + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { /* Set up NVME LS Complete Queue */ if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -9213,14 +9530,14 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) rc = -ENOMEM; goto out_destroy; } - rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0], - phba->sli4_hba.nvmels_cq, - phba->sli4_hba.nvmels_wq, - NULL, 0, LPFC_NVME_LS); + rc = lpfc_create_wq_cq(phba, qp[0].hba_eq, + phba->sli4_hba.nvmels_cq, + phba->sli4_hba.nvmels_wq, + NULL, 0, LPFC_NVME_LS); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0529 Failed setup of NVVME LS WQ/CQ: " - "rc = 0x%x\n", (uint32_t)rc); + "0526 Failed setup of NVVME LS WQ/CQ: " + "rc = 0x%x\n", (uint32_t)rc); goto out_destroy; } @@ -9306,20 +9623,29 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) phba->sli4_hba.dat_rq->queue_id, phba->sli4_hba.els_cq->queue_id); - if (phba->cfg_fof) { - rc = lpfc_fof_queue_setup(phba); - if (rc) { + if (phba->cfg_fcp_imax) + usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax; + else + usdelay = 0; + + for (qidx = 0; qidx < phba->cfg_irq_chann; + qidx += LPFC_MAX_EQ_DELAY_EQID_CNT) + lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT, + usdelay); + + if (phba->sli4_hba.cq_max) { + kfree(phba->sli4_hba.cq_lookup); + phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1), + sizeof(struct lpfc_queue *), GFP_KERNEL); + if (!phba->sli4_hba.cq_lookup) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0549 Failed setup of FOF Queues: " - "rc = 0x%x\n", rc); + "0549 Failed setup of CQ Lookup table: " + "size 0x%x\n", phba->sli4_hba.cq_max); + rc = -ENOMEM; goto out_destroy; } + lpfc_setup_cq_lookup(phba); } - - for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY_EQID_CNT) - lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT, - phba->cfg_fcp_imax); - return 0; out_destroy: @@ -9343,12 +9669,9 @@ out_error: void lpfc_sli4_queue_unset(struct lpfc_hba *phba) { + struct lpfc_sli4_hdw_queue *qp; int qidx; - /* Unset the queues created for Flash Optimized Fabric operations */ - if (phba->cfg_fof) - lpfc_fof_queue_destroy(phba); - /* Unset mailbox command work queue */ if (phba->sli4_hba.mbx_wq) lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); @@ -9366,17 +9689,6 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba) lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); - /* Unset FCP work queue */ - if (phba->sli4_hba.fcp_wq) - for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) - lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[qidx]); - - /* Unset NVME work queue */ - if (phba->sli4_hba.nvme_wq) { - for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) - lpfc_wq_destroy(phba, phba->sli4_hba.nvme_wq[qidx]); - } - /* Unset mailbox command complete queue */ if (phba->sli4_hba.mbx_cq) lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); @@ -9389,11 +9701,6 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba) if (phba->sli4_hba.nvmels_cq) lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq); - /* Unset NVME response complete queue */ - if (phba->sli4_hba.nvme_cq) - for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) - lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]); - if (phba->nvmet_support) { /* Unset NVMET MRQ queue */ if (phba->sli4_hba.nvmet_mrq_hdr) { @@ -9412,15 +9719,22 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba) } } - /* Unset FCP response complete queue */ - if (phba->sli4_hba.fcp_cq) - for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) - lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[qidx]); + /* Unset fast-path SLI4 queues */ + if (phba->sli4_hba.hdwq) { + for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { + qp = &phba->sli4_hba.hdwq[qidx]; + lpfc_wq_destroy(phba, qp->fcp_wq); + lpfc_wq_destroy(phba, qp->nvme_wq); + lpfc_cq_destroy(phba, qp->fcp_cq); + lpfc_cq_destroy(phba, qp->nvme_cq); + if (qidx < phba->cfg_irq_chann) + lpfc_eq_destroy(phba, qp->hba_eq); + } + } - /* Unset fast-path event queue */ - if (phba->sli4_hba.hba_eq) - for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) - lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[qidx]); + kfree(phba->sli4_hba.cq_lookup); + phba->sli4_hba.cq_lookup = NULL; + phba->sli4_hba.cq_max = 0; } /** @@ -9742,11 +10056,13 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) uint32_t if_type; if (!pdev) - return error; + return -ENODEV; /* Set the device DMA mask size */ - if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) || - dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) + error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (error) + error = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (error) return error; /* @@ -9808,7 +10124,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) dev_printk(KERN_ERR, &pdev->dev, "ioremap failed for SLI4 PCI config " "registers.\n"); - goto out; + goto out; } lpfc_sli4_bar0_register_memmap(phba, if_type); } @@ -9913,13 +10229,13 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) case LPFC_SLI_INTF_IF_TYPE_0: case LPFC_SLI_INTF_IF_TYPE_2: phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr; - phba->sli4_hba.sli4_eq_release = lpfc_sli4_eq_release; - phba->sli4_hba.sli4_cq_release = lpfc_sli4_cq_release; + phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db; + phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db; break; case LPFC_SLI_INTF_IF_TYPE_6: phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr; - phba->sli4_hba.sli4_eq_release = lpfc_sli4_if6_eq_release; - phba->sli4_hba.sli4_cq_release = lpfc_sli4_if6_cq_release; + phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db; + phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db; break; default: break; @@ -10199,26 +10515,98 @@ lpfc_sli_disable_intr(struct lpfc_hba *phba) phba->sli.slistat.sli_intr = 0; } +/** + * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified EQ + * @phba: pointer to lpfc hba data structure. + * @id: EQ vector index or Hardware Queue index + * @match: LPFC_FIND_BY_EQ = match by EQ + * LPFC_FIND_BY_HDWQ = match by Hardware Queue + */ +static uint16_t +lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match) +{ + struct lpfc_vector_map_info *cpup; + int cpu; + + /* Find the desired phys_id for the specified EQ */ + for_each_present_cpu(cpu) { + cpup = &phba->sli4_hba.cpu_map[cpu]; + if ((match == LPFC_FIND_BY_EQ) && + (cpup->irq != LPFC_VECTOR_MAP_EMPTY) && + (cpup->eq == id)) + return cpu; + if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id)) + return cpu; + } + return 0; +} + +/** + * lpfc_find_eq_handle - Find the EQ that corresponds to the specified + * Hardware Queue + * @phba: pointer to lpfc hba data structure. + * @hdwq: Hardware Queue index + */ +static uint16_t +lpfc_find_eq_handle(struct lpfc_hba *phba, uint16_t hdwq) +{ + struct lpfc_vector_map_info *cpup; + int cpu; + + /* Find the desired phys_id for the specified EQ */ + for_each_present_cpu(cpu) { + cpup = &phba->sli4_hba.cpu_map[cpu]; + if (cpup->hdwq == hdwq) + return cpup->eq; + } + return 0; +} + +#ifdef CONFIG_X86 +/** + * lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded + * @phba: pointer to lpfc hba data structure. + * @cpu: CPU map index + * @phys_id: CPU package physical id + * @core_id: CPU core id + */ +static int +lpfc_find_hyper(struct lpfc_hba *phba, int cpu, + uint16_t phys_id, uint16_t core_id) +{ + struct lpfc_vector_map_info *cpup; + int idx; + + for_each_present_cpu(idx) { + cpup = &phba->sli4_hba.cpu_map[idx]; + /* Does the cpup match the one we are looking for */ + if ((cpup->phys_id == phys_id) && + (cpup->core_id == core_id) && + (cpu != idx)) + return 1; + } + return 0; +} +#endif + /** * lpfc_cpu_affinity_check - Check vector CPU affinity mappings * @phba: pointer to lpfc hba data structure. * @vectors: number of msix vectors allocated. * * The routine will figure out the CPU affinity assignment for every - * MSI-X vector allocated for the HBA. The hba_eq_hdl will be updated - * with a pointer to the CPU mask that defines ALL the CPUs this vector - * can be associated with. If the vector can be unquely associated with - * a single CPU, that CPU will be recorded in hba_eq_hdl[index].cpu. + * MSI-X vector allocated for the HBA. * In addition, the CPU to IO channel mapping will be calculated * and the phba->sli4_hba.cpu_map array will reflect this. */ static void lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) { + int i, cpu, idx; + int max_phys_id, min_phys_id; + int max_core_id, min_core_id; struct lpfc_vector_map_info *cpup; - int index = 0; - int vec = 0; - int cpu; + const struct cpumask *maskp; #ifdef CONFIG_X86 struct cpuinfo_x86 *cpuinfo; #endif @@ -10226,32 +10614,71 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) /* Init cpu_map array */ memset(phba->sli4_hba.cpu_map, 0xff, (sizeof(struct lpfc_vector_map_info) * - phba->sli4_hba.num_present_cpu)); + phba->sli4_hba.num_possible_cpu)); + + max_phys_id = 0; + min_phys_id = 0xffff; + max_core_id = 0; + min_core_id = 0xffff; /* Update CPU map with physical id and core id of each CPU */ - cpup = phba->sli4_hba.cpu_map; - for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { + for_each_present_cpu(cpu) { + cpup = &phba->sli4_hba.cpu_map[cpu]; #ifdef CONFIG_X86 cpuinfo = &cpu_data(cpu); cpup->phys_id = cpuinfo->phys_proc_id; cpup->core_id = cpuinfo->cpu_core_id; + cpup->hyper = lpfc_find_hyper(phba, cpu, + cpup->phys_id, cpup->core_id); #else /* No distinction between CPUs for other platforms */ cpup->phys_id = 0; - cpup->core_id = 0; + cpup->core_id = cpu; + cpup->hyper = 0; #endif - cpup->channel_id = index; /* For now round robin */ - cpup->irq = pci_irq_vector(phba->pcidev, vec); - vec++; - if (vec >= vectors) - vec = 0; - index++; - if (index >= phba->cfg_fcp_io_channel) - index = 0; - cpup++; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3328 CPU physid %d coreid %d\n", + cpup->phys_id, cpup->core_id); + + if (cpup->phys_id > max_phys_id) + max_phys_id = cpup->phys_id; + if (cpup->phys_id < min_phys_id) + min_phys_id = cpup->phys_id; + + if (cpup->core_id > max_core_id) + max_core_id = cpup->core_id; + if (cpup->core_id < min_core_id) + min_core_id = cpup->core_id; } -} + for_each_possible_cpu(i) { + struct lpfc_eq_intr_info *eqi = + per_cpu_ptr(phba->sli4_hba.eq_info, i); + + INIT_LIST_HEAD(&eqi->list); + eqi->icnt = 0; + } + + for (idx = 0; idx < phba->cfg_irq_chann; idx++) { + maskp = pci_irq_get_affinity(phba->pcidev, idx); + if (!maskp) + continue; + + for_each_cpu_and(cpu, maskp, cpu_present_mask) { + cpup = &phba->sli4_hba.cpu_map[cpu]; + cpup->eq = idx; + cpup->hdwq = idx; + cpup->irq = pci_irq_vector(phba->pcidev, idx); + + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3336 Set Affinity: CPU %d " + "hdwq %d irq %d\n", + cpu, cpup->hdwq, cpup->irq); + } + } + return; +} /** * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device @@ -10271,12 +10698,10 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) char *name; /* Set up MSI-X multi-message vectors */ - vectors = phba->io_channel_irqs; - if (phba->cfg_fof) - vectors++; + vectors = phba->cfg_irq_chann; rc = pci_alloc_irq_vectors(phba->pcidev, - (phba->nvmet_support) ? 1 : 2, + 1, vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); if (rc < 0) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, @@ -10294,17 +10719,10 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) phba->sli4_hba.hba_eq_hdl[index].idx = index; phba->sli4_hba.hba_eq_hdl[index].phba = phba; - atomic_set(&phba->sli4_hba.hba_eq_hdl[index].hba_eq_in_use, 1); - if (phba->cfg_fof && (index == (vectors - 1))) - rc = request_irq(pci_irq_vector(phba->pcidev, index), - &lpfc_sli4_fof_intr_handler, 0, - name, - &phba->sli4_hba.hba_eq_hdl[index]); - else - rc = request_irq(pci_irq_vector(phba->pcidev, index), - &lpfc_sli4_hba_intr_handler, 0, - name, - &phba->sli4_hba.hba_eq_hdl[index]); + rc = request_irq(pci_irq_vector(phba->pcidev, index), + &lpfc_sli4_hba_intr_handler, 0, + name, + &phba->sli4_hba.hba_eq_hdl[index]); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0486 MSI-X fast-path (%d) " @@ -10313,24 +10731,16 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) } } - if (phba->cfg_fof) - vectors--; - - if (vectors != phba->io_channel_irqs) { + if (vectors != phba->cfg_irq_chann) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3238 Reducing IO channels to match number of " "MSI-X vectors, requested %d got %d\n", - phba->io_channel_irqs, vectors); - if (phba->cfg_fcp_io_channel > vectors) - phba->cfg_fcp_io_channel = vectors; - if (phba->cfg_nvme_io_channel > vectors) - phba->cfg_nvme_io_channel = vectors; - if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel) - phba->io_channel_irqs = phba->cfg_fcp_io_channel; - else - phba->io_channel_irqs = phba->cfg_nvme_io_channel; + phba->cfg_irq_chann, vectors); + if (phba->cfg_irq_chann > vectors) + phba->cfg_irq_chann = vectors; + if (phba->cfg_nvmet_mrq > vectors) + phba->cfg_nvmet_mrq = vectors; } - lpfc_cpu_affinity_check(phba, vectors); return rc; @@ -10385,15 +10795,11 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba) return rc; } - for (index = 0; index < phba->io_channel_irqs; index++) { + for (index = 0; index < phba->cfg_irq_chann; index++) { phba->sli4_hba.hba_eq_hdl[index].idx = index; phba->sli4_hba.hba_eq_hdl[index].phba = phba; } - if (phba->cfg_fof) { - phba->sli4_hba.hba_eq_hdl[index].idx = index; - phba->sli4_hba.hba_eq_hdl[index].phba = phba; - } return 0; } @@ -10454,17 +10860,10 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) phba->intr_type = INTx; intr_mode = 0; - for (idx = 0; idx < phba->io_channel_irqs; idx++) { - eqhdl = &phba->sli4_hba.hba_eq_hdl[idx]; - eqhdl->idx = idx; - eqhdl->phba = phba; - atomic_set(&eqhdl->hba_eq_in_use, 1); - } - if (phba->cfg_fof) { + for (idx = 0; idx < phba->cfg_irq_chann; idx++) { eqhdl = &phba->sli4_hba.hba_eq_hdl[idx]; eqhdl->idx = idx; eqhdl->phba = phba; - atomic_set(&eqhdl->hba_eq_in_use, 1); } } } @@ -10488,13 +10887,13 @@ lpfc_sli4_disable_intr(struct lpfc_hba *phba) int index; /* Free up MSI-X multi-message vectors */ - for (index = 0; index < phba->io_channel_irqs; index++) - free_irq(pci_irq_vector(phba->pcidev, index), - &phba->sli4_hba.hba_eq_hdl[index]); - - if (phba->cfg_fof) + for (index = 0; index < phba->cfg_irq_chann; index++) { + irq_set_affinity_hint( + pci_irq_vector(phba->pcidev, index), + NULL); free_irq(pci_irq_vector(phba->pcidev, index), &phba->sli4_hba.hba_eq_hdl[index]); + } } else { free_irq(phba->pcidev->irq, phba); } @@ -10555,8 +10954,10 @@ lpfc_unset_hba(struct lpfc_hba *phba) static void lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) { + struct lpfc_sli4_hdw_queue *qp; + int idx, ccnt, fcnt; int wait_time = 0; - int nvme_xri_cmpl = 1; + int io_xri_cmpl = 1; int nvmet_xri_cmpl = 1; int fcp_xri_cmpl = 1; int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); @@ -10571,17 +10972,32 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) lpfc_nvme_wait_for_io_drain(phba); - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) - fcp_xri_cmpl = - list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); + ccnt = 0; + fcnt = 0; + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { + qp = &phba->sli4_hba.hdwq[idx]; + fcp_xri_cmpl = list_empty( + &qp->lpfc_abts_scsi_buf_list); + if (!fcp_xri_cmpl) /* if list is NOT empty */ + fcnt++; + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + io_xri_cmpl = list_empty( + &qp->lpfc_abts_nvme_buf_list); + if (!io_xri_cmpl) /* if list is NOT empty */ + ccnt++; + } + } + if (ccnt) + io_xri_cmpl = 0; + if (fcnt) + fcp_xri_cmpl = 0; + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { - nvme_xri_cmpl = - list_empty(&phba->sli4_hba.lpfc_abts_nvme_buf_list); nvmet_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); } - while (!fcp_xri_cmpl || !els_xri_cmpl || !nvme_xri_cmpl || + while (!fcp_xri_cmpl || !els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) { if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { if (!nvmet_xri_cmpl) @@ -10589,7 +11005,7 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) "6424 NVMET XRI exchange busy " "wait time: %d seconds.\n", wait_time/1000); - if (!nvme_xri_cmpl) + if (!io_xri_cmpl) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "6100 NVME XRI exchange busy " "wait time: %d seconds.\n", @@ -10610,17 +11026,31 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; } + + ccnt = 0; + fcnt = 0; + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { + qp = &phba->sli4_hba.hdwq[idx]; + fcp_xri_cmpl = list_empty( + &qp->lpfc_abts_scsi_buf_list); + if (!fcp_xri_cmpl) /* if list is NOT empty */ + fcnt++; + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + io_xri_cmpl = list_empty( + &qp->lpfc_abts_nvme_buf_list); + if (!io_xri_cmpl) /* if list is NOT empty */ + ccnt++; + } + } + if (ccnt) + io_xri_cmpl = 0; + if (fcnt) + fcp_xri_cmpl = 0; + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { - nvme_xri_cmpl = list_empty( - &phba->sli4_hba.lpfc_abts_nvme_buf_list); nvmet_xri_cmpl = list_empty( &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); } - - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) - fcp_xri_cmpl = list_empty( - &phba->sli4_hba.lpfc_abts_scsi_buf_list); - els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); @@ -10645,7 +11075,8 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba) struct pci_dev *pdev = phba->pcidev; lpfc_stop_hba_timers(phba); - phba->sli4_hba.intr_enable = 0; + if (phba->pport) + phba->sli4_hba.intr_enable = 0; /* * Gracefully wait out the potential current outstanding asynchronous @@ -10864,8 +11295,6 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) phba->nvme_support = 0; phba->nvmet_support = 0; phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF; - phba->cfg_nvme_io_channel = 0; - phba->io_channel_irqs = phba->cfg_fcp_io_channel; lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME, "6101 Disabling NVME support: " "Not supported by firmware: %d %d\n", @@ -11190,6 +11619,8 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev) * corresponding pools here. */ lpfc_scsi_free(phba); + lpfc_free_iocb_list(phba); + lpfc_mem_free_all(phba); dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), @@ -11815,28 +12246,11 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) /* Get the default values for Model Name and Description */ lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); - /* Create SCSI host to the physical port */ - error = lpfc_create_shost(phba); - if (error) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1415 Failed to create scsi host.\n"); - goto out_unset_driver_resource; - } - - /* Configure sysfs attributes */ - vport = phba->pport; - error = lpfc_alloc_sysfs_attr(vport); - if (error) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1416 Failed to allocate sysfs attr\n"); - goto out_destroy_shost; - } - - shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ /* Now, trying to enable interrupt and bring up the device */ cfg_mode = phba->cfg_use_msi; /* Put device to a known state before enabling interrupt */ + phba->pport = NULL; lpfc_stop_port(phba); /* Configure and enable interrupt */ @@ -11845,18 +12259,34 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0426 Failed to enable interrupt.\n"); error = -ENODEV; - goto out_free_sysfs_attr; + goto out_unset_driver_resource; } /* Default to single EQ for non-MSI-X */ if (phba->intr_type != MSIX) { - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) - phba->cfg_fcp_io_channel = 1; + phba->cfg_irq_chann = 1; if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { - phba->cfg_nvme_io_channel = 1; if (phba->nvmet_support) phba->cfg_nvmet_mrq = 1; } - phba->io_channel_irqs = 1; + } + lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann); + + /* Create SCSI host to the physical port */ + error = lpfc_create_shost(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1415 Failed to create scsi host.\n"); + goto out_disable_intr; + } + vport = phba->pport; + shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */ + + /* Configure sysfs attributes */ + error = lpfc_alloc_sysfs_attr(vport); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1416 Failed to allocate sysfs attr\n"); + goto out_destroy_shost; } /* Set up SLI-4 HBA */ @@ -11864,7 +12294,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1421 Failed to set up hba\n"); error = -ENODEV; - goto out_disable_intr; + goto out_free_sysfs_attr; } /* Log the current active interrupt mode */ @@ -11877,19 +12307,20 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) /* NVME support in FW earlier in the driver load corrects the * FC4 type making a check for nvme_support unnecessary. */ - if ((phba->nvmet_support == 0) && - (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) { - /* Create NVME binding with nvme_fc_transport. This - * ensures the vport is initialized. If the localport - * create fails, it should not unload the driver to - * support field issues. - */ - error = lpfc_nvme_create_localport(vport); - if (error) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "6004 NVME registration failed, " - "error x%x\n", - error); + if (phba->nvmet_support == 0) { + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + /* Create NVME binding with nvme_fc_transport. This + * ensures the vport is initialized. If the localport + * create fails, it should not unload the driver to + * support field issues. + */ + error = lpfc_nvme_create_localport(vport); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "6004 NVME registration " + "failed, error x%x\n", + error); + } } } @@ -11905,12 +12336,12 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) return 0; -out_disable_intr: - lpfc_sli4_disable_intr(phba); out_free_sysfs_attr: lpfc_free_sysfs_attr(vport); out_destroy_shost: lpfc_destroy_shost(phba); +out_disable_intr: + lpfc_sli4_disable_intr(phba); out_unset_driver_resource: lpfc_unset_driver_resource_phase2(phba); out_unset_driver_resource_s4: @@ -11973,13 +12404,16 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev) lpfc_nvmet_destroy_targetport(phba); lpfc_nvme_destroy_localport(vport); + /* De-allocate multi-XRI pools */ + if (phba->cfg_xri_rebalancing) + lpfc_destroy_multixri_pools(phba); + /* * Bring down the SLI Layer. This step disables all interrupts, * clears the rings, discards all mailbox commands, and resets * the HBA FCoE function. */ lpfc_debugfs_terminate(vport); - lpfc_sli4_hba_unset(phba); lpfc_stop_hba_timers(phba); spin_lock_irq(&phba->port_list_lock); @@ -11989,9 +12423,9 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev) /* Perform scsi free before driver resource_unset since scsi * buffers are released to their corresponding pools here. */ - lpfc_scsi_free(phba); - lpfc_nvme_free(phba); + lpfc_io_free(phba); lpfc_free_iocb_list(phba); + lpfc_sli4_hba_unset(phba); lpfc_unset_driver_resource_phase2(phba); lpfc_sli4_driver_resource_unset(phba); @@ -12653,165 +13087,6 @@ lpfc_sli4_ras_init(struct lpfc_hba *phba) } } -/** - * lpfc_fof_queue_setup - Set up all the fof queues - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to set up all the fof queues for the FC HBA - * operation. - * - * Return codes - * 0 - successful - * -ENOMEM - No available memory - **/ -int -lpfc_fof_queue_setup(struct lpfc_hba *phba) -{ - struct lpfc_sli_ring *pring; - int rc; - - rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX); - if (rc) - return -ENOMEM; - - if (phba->cfg_fof) { - - rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq, - phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP); - if (rc) - goto out_oas_cq; - - rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq, - phba->sli4_hba.oas_cq, LPFC_FCP); - if (rc) - goto out_oas_wq; - - /* Bind this CQ/WQ to the NVME ring */ - pring = phba->sli4_hba.oas_wq->pring; - pring->sli.sli4.wqp = - (void *)phba->sli4_hba.oas_wq; - phba->sli4_hba.oas_cq->pring = pring; - } - - return 0; - -out_oas_wq: - lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq); -out_oas_cq: - lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq); - return rc; - -} - -/** - * lpfc_fof_queue_create - Create all the fof queues - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to allocate all the fof queues for the FC HBA - * operation. For each SLI4 queue type, the parameters such as queue entry - * count (queue depth) shall be taken from the module parameter. For now, - * we just use some constant number as place holder. - * - * Return codes - * 0 - successful - * -ENOMEM - No availble memory - * -EIO - The mailbox failed to complete successfully. - **/ -int -lpfc_fof_queue_create(struct lpfc_hba *phba) -{ - struct lpfc_queue *qdesc; - uint32_t wqesize; - - /* Create FOF EQ */ - qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, - phba->sli4_hba.eq_esize, - phba->sli4_hba.eq_ecount); - if (!qdesc) - goto out_error; - - qdesc->qe_valid = 1; - phba->sli4_hba.fof_eq = qdesc; - - if (phba->cfg_fof) { - - /* Create OAS CQ */ - if (phba->enab_exp_wqcq_pages) - qdesc = lpfc_sli4_queue_alloc(phba, - LPFC_EXPANDED_PAGE_SIZE, - phba->sli4_hba.cq_esize, - LPFC_CQE_EXP_COUNT); - else - qdesc = lpfc_sli4_queue_alloc(phba, - LPFC_DEFAULT_PAGE_SIZE, - phba->sli4_hba.cq_esize, - phba->sli4_hba.cq_ecount); - if (!qdesc) - goto out_error; - - qdesc->qe_valid = 1; - phba->sli4_hba.oas_cq = qdesc; - - /* Create OAS WQ */ - if (phba->enab_exp_wqcq_pages) { - wqesize = (phba->fcp_embed_io) ? - LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; - qdesc = lpfc_sli4_queue_alloc(phba, - LPFC_EXPANDED_PAGE_SIZE, - wqesize, - LPFC_WQE_EXP_COUNT); - } else - qdesc = lpfc_sli4_queue_alloc(phba, - LPFC_DEFAULT_PAGE_SIZE, - phba->sli4_hba.wq_esize, - phba->sli4_hba.wq_ecount); - - if (!qdesc) - goto out_error; - - phba->sli4_hba.oas_wq = qdesc; - list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); - - } - return 0; - -out_error: - lpfc_fof_queue_destroy(phba); - return -ENOMEM; -} - -/** - * lpfc_fof_queue_destroy - Destroy all the fof queues - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to release all the SLI4 queues with the FC HBA - * operation. - * - * Return codes - * 0 - successful - **/ -int -lpfc_fof_queue_destroy(struct lpfc_hba *phba) -{ - /* Release FOF Event queue */ - if (phba->sli4_hba.fof_eq != NULL) { - lpfc_sli4_queue_free(phba->sli4_hba.fof_eq); - phba->sli4_hba.fof_eq = NULL; - } - - /* Release OAS Completion queue */ - if (phba->sli4_hba.oas_cq != NULL) { - lpfc_sli4_queue_free(phba->sli4_hba.oas_cq); - phba->sli4_hba.oas_cq = NULL; - } - - /* Release OAS Work queue */ - if (phba->sli4_hba.oas_wq != NULL) { - lpfc_sli4_queue_free(phba->sli4_hba.oas_wq); - phba->sli4_hba.oas_wq = NULL; - } - return 0; -} MODULE_DEVICE_TABLE(pci, lpfc_id_table); @@ -12883,7 +13158,6 @@ lpfc_init(void) lpfc_nvmet_cmd_template(); /* Initialize in case vector mapping is needed */ - lpfc_used_cpu = NULL; lpfc_present_cpu = num_present_cpus(); error = pci_register_driver(&lpfc_driver); @@ -12922,7 +13196,6 @@ lpfc_exit(void) (1L << _dump_buf_dif_order), _dump_buf_dif); free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); } - kfree(lpfc_used_cpu); idr_destroy(&lpfc_hba_index); } diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 4d3b94317515..8abe933bad09 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -2095,8 +2095,8 @@ lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq) if (phba->nvmet_support) { bf_set(lpfc_mbx_rq_ftr_rq_mrqp, &mboxq->u.mqe.un.req_ftrs, 1); /* iaab/iaar NOT set for now */ - bf_set(lpfc_mbx_rq_ftr_rq_iaab, &mboxq->u.mqe.un.req_ftrs, 0); - bf_set(lpfc_mbx_rq_ftr_rq_iaar, &mboxq->u.mqe.un.req_ftrs, 0); + bf_set(lpfc_mbx_rq_ftr_rq_iaab, &mboxq->u.mqe.un.req_ftrs, 0); + bf_set(lpfc_mbx_rq_ftr_rq_iaar, &mboxq->u.mqe.un.req_ftrs, 0); } return; } diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 96bc3789a166..6172682a24ba 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -825,7 +825,7 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, "rport rolechg: role:x%x did:x%x flg:x%x", roles, ndlp->nlp_DID, ndlp->nlp_flag); - if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME) + if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME) fc_remote_port_rolechg(rport, roles); } } @@ -1789,8 +1789,8 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, * is configured try it. */ ndlp->nlp_fc4_type |= NLP_FC4_FCP; - if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || - (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { + if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || + (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) { ndlp->nlp_fc4_type |= NLP_FC4_NVME; /* We need to update the localport also */ lpfc_nvme_update_localport(vport); @@ -1804,7 +1804,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, * should just issue PRLI for FCP. Otherwise issue * GFT_ID to determine if remote port supports NVME. */ - if (phba->cfg_enable_fc4_type != LPFC_ENABLE_FCP) { + if (vport->cfg_enable_fc4_type != LPFC_ENABLE_FCP) { rc = lpfc_ns_cmd(vport, SLI_CTNS_GFT_ID, 0, ndlp->nlp_DID); return ndlp->nlp_state; diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index 8c9f79042228..55ab9d3ee4ba 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -56,12 +56,12 @@ /* NVME initiator-based functions */ -static struct lpfc_nvme_buf * +static struct lpfc_io_buf * lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, - int expedite); + int idx, int expedite); static void -lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_nvme_buf *); +lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_io_buf *); static struct nvme_fc_port_template lpfc_nvme_template; @@ -239,7 +239,7 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport, if (qidx) { str = "IO "; /* IO queue */ qhandle->index = ((qidx - 1) % - vport->phba->cfg_nvme_io_channel); + lpfc_nvme_template.max_hw_queues); } else { str = "ADM"; /* Admin queue */ qhandle->index = qidx; @@ -247,7 +247,7 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport, lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, "6073 Binding %s HdwQueue %d (cpu %d) to " - "io_channel %d qhandle %p\n", str, + "hdw_queue %d qhandle %p\n", str, qidx, qhandle->cpu_id, qhandle->index, qhandle); *handle = (void *)qhandle; return 0; @@ -529,7 +529,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, lpfc_nvmeio_data(phba, "NVME LS XMIT: xri x%x iotag x%x to x%06x\n", genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID); - rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, genwqe); + rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe); if (rc) { lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, "6045 Issue GEN REQ WQE to NPORT x%x " @@ -761,7 +761,7 @@ lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport, /* Fix up the existing sgls for NVME IO. */ static inline void lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport, - struct lpfc_nvme_buf *lpfc_ncmd, + struct lpfc_io_buf *lpfc_ncmd, struct nvmefc_fcp_req *nCmd) { struct lpfc_hba *phba = vport->phba; @@ -784,7 +784,7 @@ lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport, * rather than the virtual memory to ease the restore * operation. */ - sgl = lpfc_ncmd->nvme_sgl; + sgl = lpfc_ncmd->dma_sgl; sgl->sge_len = cpu_to_le32(nCmd->cmdlen); if (phba->cfg_nvme_embed_cmd) { sgl->addr_hi = 0; @@ -858,7 +858,7 @@ lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport, #ifdef CONFIG_SCSI_LPFC_DEBUG_FS static void lpfc_nvme_ktime(struct lpfc_hba *phba, - struct lpfc_nvme_buf *lpfc_ncmd) + struct lpfc_io_buf *lpfc_ncmd) { uint64_t seg1, seg2, seg3, seg4; uint64_t segsum; @@ -956,57 +956,54 @@ static void lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, struct lpfc_wcqe_complete *wcqe) { - struct lpfc_nvme_buf *lpfc_ncmd = - (struct lpfc_nvme_buf *)pwqeIn->context1; + struct lpfc_io_buf *lpfc_ncmd = + (struct lpfc_io_buf *)pwqeIn->context1; struct lpfc_vport *vport = pwqeIn->vport; struct nvmefc_fcp_req *nCmd; struct nvme_fc_ersp_iu *ep; struct nvme_fc_cmd_iu *cp; - struct lpfc_nvme_rport *rport; struct lpfc_nodelist *ndlp; struct lpfc_nvme_fcpreq_priv *freqpriv; struct lpfc_nvme_lport *lport; - struct lpfc_nvme_ctrl_stat *cstat; - unsigned long flags; - uint32_t code, status, idx; + uint32_t code, status, idx, cpu; uint16_t cid, sqhd, data; uint32_t *ptr; /* Sanity check on return of outstanding command */ - if (!lpfc_ncmd || !lpfc_ncmd->nvmeCmd || !lpfc_ncmd->nrport) { - if (!lpfc_ncmd) { - lpfc_printf_vlog(vport, KERN_ERR, - LOG_NODE | LOG_NVME_IOERR, - "6071 Null lpfc_ncmd pointer. No " - "release, skip completion\n"); - return; - } + if (!lpfc_ncmd) { + lpfc_printf_vlog(vport, KERN_ERR, + LOG_NODE | LOG_NVME_IOERR, + "6071 Null lpfc_ncmd pointer. No " + "release, skip completion\n"); + return; + } + + /* Guard against abort handler being called at same time */ + spin_lock(&lpfc_ncmd->buf_lock); + if (!lpfc_ncmd->nvmeCmd) { + spin_unlock(&lpfc_ncmd->buf_lock); lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, "6066 Missing cmpl ptrs: lpfc_ncmd %p, " - "nvmeCmd %p nrport %p\n", - lpfc_ncmd, lpfc_ncmd->nvmeCmd, - lpfc_ncmd->nrport); + "nvmeCmd %p\n", + lpfc_ncmd, lpfc_ncmd->nvmeCmd); /* Release the lpfc_ncmd regardless of the missing elements. */ lpfc_release_nvme_buf(phba, lpfc_ncmd); return; } nCmd = lpfc_ncmd->nvmeCmd; - rport = lpfc_ncmd->nrport; status = bf_get(lpfc_wcqe_c_status, wcqe); + idx = lpfc_ncmd->cur_iocbq.hba_wqidx; + phba->sli4_hba.hdwq[idx].nvme_cstat.io_cmpls++; + if (vport->localport) { lport = (struct lpfc_nvme_lport *)vport->localport->private; - if (lport) { - idx = lpfc_ncmd->cur_iocbq.hba_wqidx; - cstat = &lport->cstat[idx]; - atomic_inc(&cstat->fc4NvmeIoCmpls); - if (status) { - if (bf_get(lpfc_wcqe_c_xb, wcqe)) - atomic_inc(&lport->cmpl_fcp_xb); - atomic_inc(&lport->cmpl_fcp_err); - } + if (lport && status) { + if (bf_get(lpfc_wcqe_c_xb, wcqe)) + atomic_inc(&lport->cmpl_fcp_xb); + atomic_inc(&lport->cmpl_fcp_err); } } @@ -1017,18 +1014,11 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, * Catch race where our node has transitioned, but the * transport is still transitioning. */ - ndlp = rport->ndlp; + ndlp = lpfc_ncmd->ndlp; if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, - "6061 rport %p, DID x%06x node not ready.\n", - rport, rport->remoteport->port_id); - - ndlp = lpfc_findnode_did(vport, rport->remoteport->port_id); - if (!ndlp) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, - "6062 Ignoring NVME cmpl. No ndlp\n"); - goto out_err; - } + lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, + "6062 Ignoring NVME cmpl. No ndlp\n"); + goto out_err; } code = bf_get(lpfc_wcqe_c_code, wcqe); @@ -1148,13 +1138,17 @@ out_err: lpfc_nvme_ktime(phba, lpfc_ncmd); } if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) { - if (lpfc_ncmd->cpu != smp_processor_id()) - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, - "6701 CPU Check cmpl: " - "cpu %d expect %d\n", - smp_processor_id(), lpfc_ncmd->cpu); - if (lpfc_ncmd->cpu < LPFC_CHECK_CPU_CNT) - phba->cpucheck_cmpl_io[lpfc_ncmd->cpu]++; + idx = lpfc_ncmd->cur_iocbq.hba_wqidx; + cpu = smp_processor_id(); + if (cpu < LPFC_CHECK_CPU_CNT) { + if (lpfc_ncmd->cpu != cpu) + lpfc_printf_vlog(vport, + KERN_INFO, LOG_NVME_IOERR, + "6701 CPU Check cmpl: " + "cpu %d expect %d\n", + cpu, lpfc_ncmd->cpu); + phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++; + } } #endif @@ -1165,13 +1159,11 @@ out_err: if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) { freqpriv = nCmd->private; freqpriv->nvme_buf = NULL; - nCmd->done(nCmd); lpfc_ncmd->nvmeCmd = NULL; - } - - spin_lock_irqsave(&phba->hbalock, flags); - lpfc_ncmd->nrport = NULL; - spin_unlock_irqrestore(&phba->hbalock, flags); + spin_unlock(&lpfc_ncmd->buf_lock); + nCmd->done(nCmd); + } else + spin_unlock(&lpfc_ncmd->buf_lock); /* Call release with XB=1 to queue the IO into the abort list. */ lpfc_release_nvme_buf(phba, lpfc_ncmd); @@ -1196,9 +1188,9 @@ out_err: **/ static int lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, - struct lpfc_nvme_buf *lpfc_ncmd, + struct lpfc_io_buf *lpfc_ncmd, struct lpfc_nodelist *pnode, - struct lpfc_nvme_ctrl_stat *cstat) + struct lpfc_fc4_ctrl_stat *cstat) { struct lpfc_hba *phba = vport->phba; struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd; @@ -1206,7 +1198,7 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, union lpfc_wqe128 *wqe = &pwqeq->wqe; uint32_t req_len; - if (!pnode || !NLP_CHK_NODE_ACT(pnode)) + if (!NLP_CHK_NODE_ACT(pnode)) return -EINVAL; /* @@ -1236,7 +1228,7 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, } else { wqe->fcp_iwrite.initial_xfer_len = 0; } - atomic_inc(&cstat->fc4NvmeOutputRequests); + cstat->output_requests++; } else { /* From the iread template, initialize words 7 - 11 */ memcpy(&wqe->words[7], @@ -1249,13 +1241,13 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, /* Word 5 */ wqe->fcp_iread.rsrvd5 = 0; - atomic_inc(&cstat->fc4NvmeInputRequests); + cstat->input_requests++; } } else { /* From the icmnd template, initialize words 4 - 11 */ memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4], sizeof(uint32_t) * 8); - atomic_inc(&cstat->fc4NvmeControlRequests); + cstat->control_requests++; } /* * Finish initializing those WQE fields that are independent @@ -1302,12 +1294,12 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, **/ static int lpfc_nvme_prep_io_dma(struct lpfc_vport *vport, - struct lpfc_nvme_buf *lpfc_ncmd) + struct lpfc_io_buf *lpfc_ncmd) { struct lpfc_hba *phba = vport->phba; struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd; union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe; - struct sli4_sge *sgl = lpfc_ncmd->nvme_sgl; + struct sli4_sge *sgl = lpfc_ncmd->dma_sgl; struct scatterlist *data_sg; struct sli4_sge *first_data_sgl; struct ulp_bde64 *bde; @@ -1396,6 +1388,8 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport, } } else { + lpfc_ncmd->seg_cnt = 0; + /* For this clause to be valid, the payload_length * and sg_cnt must zero. */ @@ -1435,13 +1429,13 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, { int ret = 0; int expedite = 0; - int idx; + int idx, cpu; struct lpfc_nvme_lport *lport; - struct lpfc_nvme_ctrl_stat *cstat; + struct lpfc_fc4_ctrl_stat *cstat; struct lpfc_vport *vport; struct lpfc_hba *phba; struct lpfc_nodelist *ndlp; - struct lpfc_nvme_buf *lpfc_ncmd; + struct lpfc_io_buf *lpfc_ncmd; struct lpfc_nvme_rport *rport; struct lpfc_nvme_qhandle *lpfc_queue_info; struct lpfc_nvme_fcpreq_priv *freqpriv; @@ -1559,7 +1553,15 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, } } - lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, expedite); + /* Lookup Hardware Queue index based on fcp_io_sched module parameter */ + if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) { + idx = lpfc_queue_info->index; + } else { + cpu = smp_processor_id(); + idx = phba->sli4_hba.cpu_map[cpu].hdwq; + } + + lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, idx, expedite); if (lpfc_ncmd == NULL) { atomic_inc(&lport->xmt_fcp_noxri); lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, @@ -1586,9 +1588,8 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, */ freqpriv->nvme_buf = lpfc_ncmd; lpfc_ncmd->nvmeCmd = pnvme_fcreq; - lpfc_ncmd->nrport = rport; lpfc_ncmd->ndlp = ndlp; - lpfc_ncmd->start_time = jiffies; + lpfc_ncmd->qidx = lpfc_queue_info->qidx; /* * Issue the IO on the WQ indicated by index in the hw_queue_handle. @@ -1598,9 +1599,8 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, * index to use and that they have affinitized a CPU to this hardware * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ. */ - idx = lpfc_queue_info->index; lpfc_ncmd->cur_iocbq.hba_wqidx = idx; - cstat = &lport->cstat[idx]; + cstat = &phba->sli4_hba.hdwq[idx].nvme_cstat; lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp, cstat); ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd); @@ -1618,7 +1618,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, lpfc_ncmd->cur_iocbq.sli4_xritag, lpfc_queue_info->index, ndlp->nlp_DID); - ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq); + ret = lpfc_sli4_issue_wqe(phba, lpfc_ncmd->hdwq, &lpfc_ncmd->cur_iocbq); if (ret) { atomic_inc(&lport->xmt_fcp_wqerr); lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, @@ -1629,26 +1629,26 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, goto out_free_nvme_buf; } + if (phba->cfg_xri_rebalancing) + lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_ncmd->hdwq_no); + #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (lpfc_ncmd->ts_cmd_start) lpfc_ncmd->ts_cmd_wqput = ktime_get_ns(); if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) { - lpfc_ncmd->cpu = smp_processor_id(); - if (lpfc_ncmd->cpu != lpfc_queue_info->index) { - /* Check for admin queue */ - if (lpfc_queue_info->qidx) { + cpu = smp_processor_id(); + if (cpu < LPFC_CHECK_CPU_CNT) { + lpfc_ncmd->cpu = cpu; + if (idx != cpu) lpfc_printf_vlog(vport, - KERN_ERR, LOG_NVME_IOERR, + KERN_INFO, LOG_NVME_IOERR, "6702 CPU Check cmd: " "cpu %d wq %d\n", lpfc_ncmd->cpu, lpfc_queue_info->index); - } - lpfc_ncmd->cpu = lpfc_queue_info->index; + phba->sli4_hba.hdwq[idx].cpucheck_xmt_io[cpu]++; } - if (lpfc_ncmd->cpu < LPFC_CHECK_CPU_CNT) - phba->cpucheck_xmt_io[lpfc_ncmd->cpu]++; } #endif return 0; @@ -1656,11 +1656,11 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, out_free_nvme_buf: if (lpfc_ncmd->nvmeCmd->sg_cnt) { if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE) - atomic_dec(&cstat->fc4NvmeOutputRequests); + cstat->output_requests--; else - atomic_dec(&cstat->fc4NvmeInputRequests); + cstat->input_requests--; } else - atomic_dec(&cstat->fc4NvmeControlRequests); + cstat->control_requests--; lpfc_release_nvme_buf(phba, lpfc_ncmd); out_fail: return ret; @@ -1720,7 +1720,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, struct lpfc_nvme_lport *lport; struct lpfc_vport *vport; struct lpfc_hba *phba; - struct lpfc_nvme_buf *lpfc_nbuf; + struct lpfc_io_buf *lpfc_nbuf; struct lpfc_iocbq *abts_buf; struct lpfc_iocbq *nvmereq_wqe; struct lpfc_nvme_fcpreq_priv *freqpriv; @@ -1788,6 +1788,9 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, } nvmereq_wqe = &lpfc_nbuf->cur_iocbq; + /* Guard against IO completion being called at same time */ + spin_lock(&lpfc_nbuf->buf_lock); + /* * The lpfc_nbuf and the mapped nvme_fcreq in the driver's * state must match the nvme_fcreq passed by the nvme @@ -1796,24 +1799,22 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, * has not seen it yet. */ if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) { - spin_unlock_irqrestore(&phba->hbalock, flags); lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, "6143 NVME req mismatch: " "lpfc_nbuf %p nvmeCmd %p, " "pnvme_fcreq %p. Skipping Abort xri x%x\n", lpfc_nbuf, lpfc_nbuf->nvmeCmd, pnvme_fcreq, nvmereq_wqe->sli4_xritag); - return; + goto out_unlock; } /* Don't abort IOs no longer on the pending queue. */ if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) { - spin_unlock_irqrestore(&phba->hbalock, flags); lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, "6142 NVME IO req %p not queued - skipping " "abort req xri x%x\n", pnvme_fcreq, nvmereq_wqe->sli4_xritag); - return; + goto out_unlock; } atomic_inc(&lport->xmt_fcp_abort); @@ -1823,24 +1824,22 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, /* Outstanding abort is in progress */ if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) { - spin_unlock_irqrestore(&phba->hbalock, flags); lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, "6144 Outstanding NVME I/O Abort Request " "still pending on nvme_fcreq %p, " "lpfc_ncmd %p xri x%x\n", pnvme_fcreq, lpfc_nbuf, nvmereq_wqe->sli4_xritag); - return; + goto out_unlock; } abts_buf = __lpfc_sli_get_iocbq(phba); if (!abts_buf) { - spin_unlock_irqrestore(&phba->hbalock, flags); lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, "6136 No available abort wqes. Skipping " "Abts req for nvme_fcreq %p xri x%x\n", pnvme_fcreq, nvmereq_wqe->sli4_xritag); - return; + goto out_unlock; } /* Ready - mark outstanding as aborted by driver. */ @@ -1883,7 +1882,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, abts_buf->hba_wqidx = nvmereq_wqe->hba_wqidx; abts_buf->vport = vport; abts_buf->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl; - ret_val = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_buf); + ret_val = lpfc_sli4_issue_wqe(phba, lpfc_nbuf->hdwq, abts_buf); + spin_unlock(&lpfc_nbuf->buf_lock); spin_unlock_irqrestore(&phba->hbalock, flags); if (ret_val) { lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, @@ -1899,6 +1899,12 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, "ox_id x%x on reqtag x%x\n", nvmereq_wqe->sli4_xritag, abts_buf->iotag); + return; + +out_unlock: + spin_unlock(&lpfc_nbuf->buf_lock); + spin_unlock_irqrestore(&phba->hbalock, flags); + return; } /* Declare and initialization an instance of the FC NVME template. */ @@ -1928,454 +1934,63 @@ static struct nvme_fc_port_template lpfc_nvme_template = { }; /** - * lpfc_sli4_post_nvme_sgl_block - post a block of nvme sgl list to firmware - * @phba: pointer to lpfc hba data structure. - * @nblist: pointer to nvme buffer list. - * @count: number of scsi buffers on the list. - * - * This routine is invoked to post a block of @count scsi sgl pages from a - * SCSI buffer list @nblist to the HBA using non-embedded mailbox command. - * No Lock is held. - * - **/ -static int -lpfc_sli4_post_nvme_sgl_block(struct lpfc_hba *phba, - struct list_head *nblist, - int count) -{ - struct lpfc_nvme_buf *lpfc_ncmd; - struct lpfc_mbx_post_uembed_sgl_page1 *sgl; - struct sgl_page_pairs *sgl_pg_pairs; - void *viraddr; - LPFC_MBOXQ_t *mbox; - uint32_t reqlen, alloclen, pg_pairs; - uint32_t mbox_tmo; - uint16_t xritag_start = 0; - int rc = 0; - uint32_t shdr_status, shdr_add_status; - dma_addr_t pdma_phys_bpl1; - union lpfc_sli4_cfg_shdr *shdr; - - /* Calculate the requested length of the dma memory */ - reqlen = count * sizeof(struct sgl_page_pairs) + - sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); - if (reqlen > SLI4_PAGE_SIZE) { - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, - "6118 Block sgl registration required DMA " - "size (%d) great than a page\n", reqlen); - return -ENOMEM; - } - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mbox) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "6119 Failed to allocate mbox cmd memory\n"); - return -ENOMEM; - } - - /* Allocate DMA memory and set up the non-embedded mailbox command */ - alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, - LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, - LPFC_SLI4_MBX_NEMBED); - - if (alloclen < reqlen) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "6120 Allocated DMA memory size (%d) is " - "less than the requested DMA memory " - "size (%d)\n", alloclen, reqlen); - lpfc_sli4_mbox_cmd_free(phba, mbox); - return -ENOMEM; - } - - /* Get the first SGE entry from the non-embedded DMA memory */ - viraddr = mbox->sge_array->addr[0]; - - /* Set up the SGL pages in the non-embedded DMA pages */ - sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; - sgl_pg_pairs = &sgl->sgl_pg_pairs; - - pg_pairs = 0; - list_for_each_entry(lpfc_ncmd, nblist, list) { - /* Set up the sge entry */ - sgl_pg_pairs->sgl_pg0_addr_lo = - cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl)); - sgl_pg_pairs->sgl_pg0_addr_hi = - cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl)); - if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) - pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl + - SGL_PAGE_SIZE; - else - pdma_phys_bpl1 = 0; - sgl_pg_pairs->sgl_pg1_addr_lo = - cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); - sgl_pg_pairs->sgl_pg1_addr_hi = - cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); - /* Keep the first xritag on the list */ - if (pg_pairs == 0) - xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag; - sgl_pg_pairs++; - pg_pairs++; - } - bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); - bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); - /* Perform endian conversion if necessary */ - sgl->word0 = cpu_to_le32(sgl->word0); - - if (!phba->sli4_hba.intr_enable) - rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); - else { - mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); - rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); - } - shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr; - shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); - shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); - if (rc != MBX_TIMEOUT) - lpfc_sli4_mbox_cmd_free(phba, mbox); - if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "6125 POST_SGL_BLOCK mailbox command failed " - "status x%x add_status x%x mbx status x%x\n", - shdr_status, shdr_add_status, rc); - rc = -ENXIO; - } - return rc; -} - -/** - * lpfc_post_nvme_sgl_list - Post blocks of nvme buffer sgls from a list - * @phba: pointer to lpfc hba data structure. - * @post_nblist: pointer to the nvme buffer list. - * - * This routine walks a list of nvme buffers that was passed in. It attempts - * to construct blocks of nvme buffer sgls which contains contiguous xris and - * uses the non-embedded SGL block post mailbox commands to post to the port. - * For single NVME buffer sgl with non-contiguous xri, if any, it shall use - * embedded SGL post mailbox command for posting. The @post_nblist passed in - * must be local list, thus no lock is needed when manipulate the list. - * - * Returns: 0 = failure, non-zero number of successfully posted buffers. - **/ -static int -lpfc_post_nvme_sgl_list(struct lpfc_hba *phba, - struct list_head *post_nblist, int sb_count) -{ - struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next; - int status, sgl_size; - int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0; - dma_addr_t pdma_phys_sgl1; - int last_xritag = NO_XRI; - int cur_xritag; - LIST_HEAD(prep_nblist); - LIST_HEAD(blck_nblist); - LIST_HEAD(nvme_nblist); - - /* sanity check */ - if (sb_count <= 0) - return -EINVAL; - - sgl_size = phba->cfg_sg_dma_buf_size; - - list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) { - list_del_init(&lpfc_ncmd->list); - block_cnt++; - if ((last_xritag != NO_XRI) && - (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) { - /* a hole in xri block, form a sgl posting block */ - list_splice_init(&prep_nblist, &blck_nblist); - post_cnt = block_cnt - 1; - /* prepare list for next posting block */ - list_add_tail(&lpfc_ncmd->list, &prep_nblist); - block_cnt = 1; - } else { - /* prepare list for next posting block */ - list_add_tail(&lpfc_ncmd->list, &prep_nblist); - /* enough sgls for non-embed sgl mbox command */ - if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { - list_splice_init(&prep_nblist, &blck_nblist); - post_cnt = block_cnt; - block_cnt = 0; - } - } - num_posting++; - last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag; - - /* end of repost sgl list condition for NVME buffers */ - if (num_posting == sb_count) { - if (post_cnt == 0) { - /* last sgl posting block */ - list_splice_init(&prep_nblist, &blck_nblist); - post_cnt = block_cnt; - } else if (block_cnt == 1) { - /* last single sgl with non-contiguous xri */ - if (sgl_size > SGL_PAGE_SIZE) - pdma_phys_sgl1 = - lpfc_ncmd->dma_phys_sgl + - SGL_PAGE_SIZE; - else - pdma_phys_sgl1 = 0; - cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag; - status = lpfc_sli4_post_sgl(phba, - lpfc_ncmd->dma_phys_sgl, - pdma_phys_sgl1, cur_xritag); - if (status) { - /* failure, put on abort nvme list */ - lpfc_ncmd->flags |= LPFC_SBUF_XBUSY; - } else { - /* success, put on NVME buffer list */ - lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY; - lpfc_ncmd->status = IOSTAT_SUCCESS; - num_posted++; - } - /* success, put on NVME buffer sgl list */ - list_add_tail(&lpfc_ncmd->list, &nvme_nblist); - } - } - - /* continue until a nembed page worth of sgls */ - if (post_cnt == 0) - continue; - - /* post block of NVME buffer list sgls */ - status = lpfc_sli4_post_nvme_sgl_block(phba, &blck_nblist, - post_cnt); - - /* don't reset xirtag due to hole in xri block */ - if (block_cnt == 0) - last_xritag = NO_XRI; - - /* reset NVME buffer post count for next round of posting */ - post_cnt = 0; - - /* put posted NVME buffer-sgl posted on NVME buffer sgl list */ - while (!list_empty(&blck_nblist)) { - list_remove_head(&blck_nblist, lpfc_ncmd, - struct lpfc_nvme_buf, list); - if (status) { - /* failure, put on abort nvme list */ - lpfc_ncmd->flags |= LPFC_SBUF_XBUSY; - } else { - /* success, put on NVME buffer list */ - lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY; - lpfc_ncmd->status = IOSTAT_SUCCESS; - num_posted++; - } - list_add_tail(&lpfc_ncmd->list, &nvme_nblist); - } - } - /* Push NVME buffers with sgl posted to the available list */ - while (!list_empty(&nvme_nblist)) { - list_remove_head(&nvme_nblist, lpfc_ncmd, - struct lpfc_nvme_buf, list); - lpfc_release_nvme_buf(phba, lpfc_ncmd); - } - return num_posted; -} - -/** - * lpfc_repost_nvme_sgl_list - Repost all the allocated nvme buffer sgls - * @phba: pointer to lpfc hba data structure. - * - * This routine walks the list of nvme buffers that have been allocated and - * repost them to the port by using SGL block post. This is needed after a - * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine - * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list - * to the lpfc_nvme_buf_list. If the repost fails, reject all nvme buffers. - * - * Returns: 0 = success, non-zero failure. - **/ -int -lpfc_repost_nvme_sgl_list(struct lpfc_hba *phba) -{ - LIST_HEAD(post_nblist); - int num_posted, rc = 0; - - /* get all NVME buffers need to repost to a local list */ - spin_lock_irq(&phba->nvme_buf_list_get_lock); - spin_lock(&phba->nvme_buf_list_put_lock); - list_splice_init(&phba->lpfc_nvme_buf_list_get, &post_nblist); - list_splice(&phba->lpfc_nvme_buf_list_put, &post_nblist); - phba->get_nvme_bufs = 0; - phba->put_nvme_bufs = 0; - spin_unlock(&phba->nvme_buf_list_put_lock); - spin_unlock_irq(&phba->nvme_buf_list_get_lock); - - /* post the list of nvme buffer sgls to port if available */ - if (!list_empty(&post_nblist)) { - num_posted = lpfc_post_nvme_sgl_list(phba, &post_nblist, - phba->sli4_hba.nvme_xri_cnt); - /* failed to post any nvme buffer, return error */ - if (num_posted == 0) - rc = -EIO; - } - return rc; -} - -/** - * lpfc_new_nvme_buf - Scsi buffer allocator for HBA with SLI4 IF spec - * @vport: The virtual port for which this call being executed. - * @num_to_allocate: The requested number of buffers to allocate. + * lpfc_get_nvme_buf - Get a nvme buffer from io_buf_list of the HBA + * @phba: The HBA for which this call is being executed. * - * This routine allocates nvme buffers for device with SLI-4 interface spec, - * the nvme buffer contains all the necessary information needed to initiate - * a NVME I/O. After allocating up to @num_to_allocate NVME buffers and put - * them on a list, it post them to the port by using SGL block post. + * This routine removes a nvme buffer from head of @hdwq io_buf_list + * and returns to caller. * * Return codes: - * int - number of nvme buffers that were allocated and posted. - * 0 = failure, less than num_to_alloc is a partial failure. + * NULL - Error + * Pointer to lpfc_nvme_buf - Success **/ -static int -lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc) +static struct lpfc_io_buf * +lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, + int idx, int expedite) { - struct lpfc_hba *phba = vport->phba; - struct lpfc_nvme_buf *lpfc_ncmd; + struct lpfc_io_buf *lpfc_ncmd; + struct lpfc_sli4_hdw_queue *qp; + struct sli4_sge *sgl; struct lpfc_iocbq *pwqeq; union lpfc_wqe128 *wqe; - struct sli4_sge *sgl; - dma_addr_t pdma_phys_sgl; - uint16_t iotag, lxri = 0; - int bcnt, num_posted; - LIST_HEAD(prep_nblist); - LIST_HEAD(post_nblist); - LIST_HEAD(nvme_nblist); - - for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { - lpfc_ncmd = kzalloc(sizeof(struct lpfc_nvme_buf), GFP_KERNEL); - if (!lpfc_ncmd) - break; - /* - * Get memory from the pci pool to map the virt space to - * pci bus space for an I/O. The DMA buffer includes the - * number of SGE's necessary to support the sg_tablesize. - */ - lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool, - GFP_KERNEL, - &lpfc_ncmd->dma_handle); - if (!lpfc_ncmd->data) { - kfree(lpfc_ncmd); - break; - } - lxri = lpfc_sli4_next_xritag(phba); - if (lxri == NO_XRI) { - dma_pool_free(phba->lpfc_sg_dma_buf_pool, - lpfc_ncmd->data, lpfc_ncmd->dma_handle); - kfree(lpfc_ncmd); - break; - } + lpfc_ncmd = lpfc_get_io_buf(phba, NULL, idx, expedite); + + if (lpfc_ncmd) { pwqeq = &(lpfc_ncmd->cur_iocbq); wqe = &pwqeq->wqe; - /* Allocate iotag for lpfc_ncmd->cur_iocbq. */ - iotag = lpfc_sli_next_iotag(phba, pwqeq); - if (iotag == 0) { - dma_pool_free(phba->lpfc_sg_dma_buf_pool, - lpfc_ncmd->data, lpfc_ncmd->dma_handle); - kfree(lpfc_ncmd); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, - "6121 Failed to allocated IOTAG for" - " XRI:0x%x\n", lxri); - lpfc_sli4_free_xri(phba, lxri); - break; - } - pwqeq->sli4_lxritag = lxri; - pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri]; - pwqeq->iocb_flag |= LPFC_IO_NVME; - pwqeq->context1 = lpfc_ncmd; + /* Setup key fields in buffer that may have been changed + * if other protocols used this buffer. + */ + pwqeq->iocb_flag = LPFC_IO_NVME; pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl; - - /* Initialize local short-hand pointers. */ - lpfc_ncmd->nvme_sgl = lpfc_ncmd->data; - sgl = lpfc_ncmd->nvme_sgl; - pdma_phys_sgl = lpfc_ncmd->dma_handle; - lpfc_ncmd->dma_phys_sgl = pdma_phys_sgl; + lpfc_ncmd->start_time = jiffies; + lpfc_ncmd->flags = 0; /* Rsp SGE will be filled in when we rcv an IO * from the NVME Layer to be sent. * The cmd is going to be embedded so we need a SKIP SGE. */ + sgl = lpfc_ncmd->dma_sgl; bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); bf_set(lpfc_sli4_sge_last, sgl, 0); sgl->word2 = cpu_to_le32(sgl->word2); /* Fill in word 3 / sgl_len during cmd submission */ - lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd; - /* Initialize WQE */ memset(wqe, 0, sizeof(union lpfc_wqe)); - /* add the nvme buffer to a post list */ - list_add_tail(&lpfc_ncmd->list, &post_nblist); - spin_lock_irq(&phba->nvme_buf_list_get_lock); - phba->sli4_hba.nvme_xri_cnt++; - spin_unlock_irq(&phba->nvme_buf_list_get_lock); - } - lpfc_printf_log(phba, KERN_INFO, LOG_NVME, - "6114 Allocate %d out of %d requested new NVME " - "buffers\n", bcnt, num_to_alloc); - - /* post the list of nvme buffer sgls to port if available */ - if (!list_empty(&post_nblist)) - num_posted = lpfc_post_nvme_sgl_list(phba, - &post_nblist, bcnt); - else - num_posted = 0; - - return num_posted; -} - -static inline struct lpfc_nvme_buf * -lpfc_nvme_buf(struct lpfc_hba *phba) -{ - struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next; - - list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, - &phba->lpfc_nvme_buf_list_get, list) { - list_del_init(&lpfc_ncmd->list); - phba->get_nvme_bufs--; - return lpfc_ncmd; - } - return NULL; -} - -/** - * lpfc_get_nvme_buf - Get a nvme buffer from lpfc_nvme_buf_list of the HBA - * @phba: The HBA for which this call is being executed. - * - * This routine removes a nvme buffer from head of @phba lpfc_nvme_buf_list list - * and returns to caller. - * - * Return codes: - * NULL - Error - * Pointer to lpfc_nvme_buf - Success - **/ -static struct lpfc_nvme_buf * -lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, - int expedite) -{ - struct lpfc_nvme_buf *lpfc_ncmd = NULL; - unsigned long iflag = 0; + if (lpfc_ndlp_check_qdepth(phba, ndlp)) { + atomic_inc(&ndlp->cmd_pending); + lpfc_ncmd->flags |= LPFC_SBUF_BUMP_QDEPTH; + } - spin_lock_irqsave(&phba->nvme_buf_list_get_lock, iflag); - if (phba->get_nvme_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite) - lpfc_ncmd = lpfc_nvme_buf(phba); - if (!lpfc_ncmd) { - spin_lock(&phba->nvme_buf_list_put_lock); - list_splice(&phba->lpfc_nvme_buf_list_put, - &phba->lpfc_nvme_buf_list_get); - phba->get_nvme_bufs += phba->put_nvme_bufs; - INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put); - phba->put_nvme_bufs = 0; - spin_unlock(&phba->nvme_buf_list_put_lock); - if (phba->get_nvme_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite) - lpfc_ncmd = lpfc_nvme_buf(phba); + } else { + qp = &phba->sli4_hba.hdwq[idx]; + qp->empty_io_bufs++; } - spin_unlock_irqrestore(&phba->nvme_buf_list_get_lock, iflag); - if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_ncmd) { - atomic_inc(&ndlp->cmd_pending); - lpfc_ncmd->flags |= LPFC_BUMP_QDEPTH; - } return lpfc_ncmd; } @@ -2385,22 +2000,23 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, * @lpfc_ncmd: The nvme buffer which is being released. * * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba - * lpfc_nvme_buf_list list. For SLI4 XRI's are tied to the nvme buffer + * lpfc_io_buf_list list. For SLI4 XRI's are tied to the nvme buffer * and cannot be reused for at least RA_TOV amount of time if it was * aborted. **/ static void -lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd) +lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd) { + struct lpfc_sli4_hdw_queue *qp; unsigned long iflag = 0; - if ((lpfc_ncmd->flags & LPFC_BUMP_QDEPTH) && lpfc_ncmd->ndlp) + if ((lpfc_ncmd->flags & LPFC_SBUF_BUMP_QDEPTH) && lpfc_ncmd->ndlp) atomic_dec(&lpfc_ncmd->ndlp->cmd_pending); - lpfc_ncmd->nonsg_phys = 0; lpfc_ncmd->ndlp = NULL; - lpfc_ncmd->flags &= ~LPFC_BUMP_QDEPTH; + lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH; + qp = lpfc_ncmd->hdwq; if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) { lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, "6310 XB release deferred for " @@ -2408,20 +2024,13 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd) lpfc_ncmd->cur_iocbq.sli4_xritag, lpfc_ncmd->cur_iocbq.iotag); - spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, - iflag); + spin_lock_irqsave(&qp->abts_nvme_buf_list_lock, iflag); list_add_tail(&lpfc_ncmd->list, - &phba->sli4_hba.lpfc_abts_nvme_buf_list); - spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, - iflag); - } else { - lpfc_ncmd->nvmeCmd = NULL; - lpfc_ncmd->cur_iocbq.iocb_flag = LPFC_IO_NVME; - spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag); - list_add_tail(&lpfc_ncmd->list, &phba->lpfc_nvme_buf_list_put); - phba->put_nvme_bufs++; - spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag); - } + &qp->lpfc_abts_nvme_buf_list); + qp->abts_nvme_io_bufs++; + spin_unlock_irqrestore(&qp->abts_nvme_buf_list_lock, iflag); + } else + lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp); } /** @@ -2448,8 +2057,6 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport) struct nvme_fc_port_info nfcp_info; struct nvme_fc_local_port *localport; struct lpfc_nvme_lport *lport; - struct lpfc_nvme_ctrl_stat *cstat; - int len, i; /* Initialize this localport instance. The vport wwn usage ensures * that NPIV is accounted for. @@ -2464,12 +2071,13 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport) * allocate + 3, one for cmd, one for rsp and one for this alignment */ lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; - lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel; - cstat = kmalloc((sizeof(struct lpfc_nvme_ctrl_stat) * - phba->cfg_nvme_io_channel), GFP_KERNEL); - if (!cstat) - return -ENOMEM; + /* Advertise how many hw queues we support based on fcp_io_sched */ + if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) + lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue; + else + lpfc_nvme_template.max_hw_queues = + phba->sli4_hba.num_present_cpu; /* localport is allocated from the stack, but the registration * call allocates heap memory as well as the private area. @@ -2493,7 +2101,6 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport) lport = (struct lpfc_nvme_lport *)localport->private; vport->localport = localport; lport->vport = vport; - lport->cstat = cstat; vport->nvmei_support = 1; atomic_set(&lport->xmt_fcp_noxri, 0); @@ -2510,25 +2117,6 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport) atomic_set(&lport->cmpl_ls_err, 0); atomic_set(&lport->fc4NvmeLsRequests, 0); atomic_set(&lport->fc4NvmeLsCmpls, 0); - - for (i = 0; i < phba->cfg_nvme_io_channel; i++) { - cstat = &lport->cstat[i]; - atomic_set(&cstat->fc4NvmeInputRequests, 0); - atomic_set(&cstat->fc4NvmeOutputRequests, 0); - atomic_set(&cstat->fc4NvmeControlRequests, 0); - atomic_set(&cstat->fc4NvmeIoCmpls, 0); - } - - /* Don't post more new bufs if repost already recovered - * the nvme sgls. - */ - if (phba->sli4_hba.nvme_xri_cnt == 0) { - len = lpfc_new_nvme_buf(vport, - phba->sli4_hba.nvme_xri_max); - vport->phba->total_nvme_bufs += len; - } - } else { - kfree(cstat); } return ret; @@ -2591,7 +2179,6 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport) #if (IS_ENABLED(CONFIG_NVME_FC)) struct nvme_fc_local_port *localport; struct lpfc_nvme_lport *lport; - struct lpfc_nvme_ctrl_stat *cstat; int ret; DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp); @@ -2600,7 +2187,6 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport) localport = vport->localport; lport = (struct lpfc_nvme_lport *)localport->private; - cstat = lport->cstat; lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, "6011 Destroying NVME localport %p\n", @@ -2617,7 +2203,6 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport) */ lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp); vport->localport = NULL; - kfree(cstat); /* Regardless of the unregister upcall response, clear * nvmei_support. All rports are unregistered and the @@ -2909,27 +2494,28 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) **/ void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba, - struct sli4_wcqe_xri_aborted *axri) + struct sli4_wcqe_xri_aborted *axri, int idx) { uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); - struct lpfc_nvme_buf *lpfc_ncmd, *next_lpfc_ncmd; + struct lpfc_io_buf *lpfc_ncmd, *next_lpfc_ncmd; struct nvmefc_fcp_req *nvme_cmd = NULL; struct lpfc_nodelist *ndlp; + struct lpfc_sli4_hdw_queue *qp; unsigned long iflag = 0; if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) return; + qp = &phba->sli4_hba.hdwq[idx]; spin_lock_irqsave(&phba->hbalock, iflag); - spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); + spin_lock(&qp->abts_nvme_buf_list_lock); list_for_each_entry_safe(lpfc_ncmd, next_lpfc_ncmd, - &phba->sli4_hba.lpfc_abts_nvme_buf_list, - list) { + &qp->lpfc_abts_nvme_buf_list, list) { if (lpfc_ncmd->cur_iocbq.sli4_xritag == xri) { list_del_init(&lpfc_ncmd->list); + qp->abts_nvme_io_bufs--; lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY; lpfc_ncmd->status = IOSTAT_SUCCESS; - spin_unlock( - &phba->sli4_hba.abts_nvme_buf_list_lock); + spin_unlock(&qp->abts_nvme_buf_list_lock); spin_unlock_irqrestore(&phba->hbalock, iflag); ndlp = lpfc_ncmd->ndlp; @@ -2955,7 +2541,7 @@ lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba, return; } } - spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); + spin_unlock(&qp->abts_nvme_buf_list_lock); spin_unlock_irqrestore(&phba->hbalock, iflag); lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, @@ -2979,14 +2565,16 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba) struct lpfc_sli_ring *pring; u32 i, wait_cnt = 0; - if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.nvme_wq) + if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq) return; /* Cycle through all NVME rings and make sure all outstanding * WQEs have been removed from the txcmplqs. */ - for (i = 0; i < phba->cfg_nvme_io_channel; i++) { - pring = phba->sli4_hba.nvme_wq[i]->pring; + for (i = 0; i < phba->cfg_hdw_queue; i++) { + if (!phba->sli4_hba.hdwq[i].nvme_wq) + continue; + pring = phba->sli4_hba.hdwq[i].nvme_wq->pring; if (!pring) continue; diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h index b234d0298994..593c48ff634e 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.h +++ b/drivers/scsi/lpfc/lpfc_nvme.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -30,6 +30,9 @@ #define LPFC_NVME_FB_SHIFT 9 #define LPFC_NVME_MAX_FB (1 << 20) /* 1M */ +#define LPFC_MAX_NVME_INFO_TMP_LEN 100 +#define LPFC_NVME_INFO_MORE_STR "\nCould be more info...\n" + #define lpfc_ndlp_get_nrport(ndlp) \ ((!ndlp->nrport || (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG)) \ ? NULL : ndlp->nrport) @@ -40,19 +43,11 @@ struct lpfc_nvme_qhandle { uint32_t cpu_id; /* current cpu id at time of create */ }; -struct lpfc_nvme_ctrl_stat { - atomic_t fc4NvmeInputRequests; - atomic_t fc4NvmeOutputRequests; - atomic_t fc4NvmeControlRequests; - atomic_t fc4NvmeIoCmpls; -}; - /* Declare nvme-based local and remote port definitions. */ struct lpfc_nvme_lport { struct lpfc_vport *vport; struct completion *lport_unreg_cmp; /* Add stats counters here */ - struct lpfc_nvme_ctrl_stat *cstat; atomic_t fc4NvmeLsRequests; atomic_t fc4NvmeLsCmpls; atomic_t xmt_fcp_noxri; @@ -76,57 +71,6 @@ struct lpfc_nvme_rport { struct completion rport_unreg_done; }; -struct lpfc_nvme_buf { - struct list_head list; - struct nvmefc_fcp_req *nvmeCmd; - struct lpfc_nvme_rport *nrport; - struct lpfc_nodelist *ndlp; - - uint32_t timeout; - - uint16_t flags; /* TBD convert exch_busy to flags */ -#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */ -#define LPFC_BUMP_QDEPTH 0x2 /* bumped queue depth counter */ - uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */ - uint16_t status; /* From IOCB Word 7- ulpStatus */ - uint16_t cpu; - uint16_t qidx; - uint16_t sqid; - uint32_t result; /* From IOCB Word 4. */ - - uint32_t seg_cnt; /* Number of scatter-gather segments returned by - * dma_map_sg. The driver needs this for calls - * to dma_unmap_sg. - */ - dma_addr_t nonsg_phys; /* Non scatter-gather physical address. */ - - /* - * data and dma_handle are the kernel virtual and bus address of the - * dma-able buffer containing the fcp_cmd, fcp_rsp and a scatter - * gather bde list that supports the sg_tablesize value. - */ - void *data; - dma_addr_t dma_handle; - - struct sli4_sge *nvme_sgl; - dma_addr_t dma_phys_sgl; - - /* cur_iocbq has phys of the dma-able buffer. - * Iotag is in here - */ - struct lpfc_iocbq cur_iocbq; - - wait_queue_head_t *waitq; - unsigned long start_time; -#ifdef CONFIG_SCSI_LPFC_DEBUG_FS - uint64_t ts_cmd_start; - uint64_t ts_last_cmd; - uint64_t ts_cmd_wqput; - uint64_t ts_isr_cmpl; - uint64_t ts_data_nvme; -#endif -}; - struct lpfc_nvme_fcpreq_priv { - struct lpfc_nvme_buf *nvme_buf; + struct lpfc_io_buf *nvme_buf; }; diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 95fee83090eb..361e2b103648 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channsel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -73,6 +73,9 @@ static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *, uint32_t, uint16_t); static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *, struct lpfc_nvmet_rcv_ctx *); +static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *); + +static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf); static union lpfc_wqe128 lpfc_tsend_cmd_template; static union lpfc_wqe128 lpfc_treceive_cmd_template; @@ -220,21 +223,19 @@ lpfc_nvmet_cmd_template(void) void lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp) { - unsigned long iflag; + lockdep_assert_held(&ctxp->ctxlock); lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, "6313 NVMET Defer ctx release xri x%x flg x%x\n", ctxp->oxid, ctxp->flag); - spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag); - if (ctxp->flag & LPFC_NVMET_CTX_RLS) { - spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, - iflag); + if (ctxp->flag & LPFC_NVMET_CTX_RLS) return; - } + ctxp->flag |= LPFC_NVMET_CTX_RLS; + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list); - spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag); + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); } /** @@ -325,7 +326,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) struct rqb_dmabuf *nvmebuf; struct lpfc_nvmet_ctx_info *infop; uint32_t *payload; - uint32_t size, oxid, sid, rc; + uint32_t size, oxid, sid; int cpu; unsigned long iflag; @@ -341,6 +342,20 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) "6411 NVMET free, already free IO x%x: %d %d\n", ctxp->oxid, ctxp->state, ctxp->entry_cnt); } + + if (ctxp->rqb_buffer) { + nvmebuf = ctxp->rqb_buffer; + spin_lock_irqsave(&ctxp->ctxlock, iflag); + ctxp->rqb_buffer = NULL; + if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) { + ctxp->flag &= ~LPFC_NVMET_CTX_REUSE_WQ; + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); + nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); + } else { + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); + lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ + } + } ctxp->state = LPFC_NVMET_STE_FREE; spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag); @@ -388,46 +403,30 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) } #endif atomic_inc(&tgtp->rcv_fcp_cmd_in); - /* - * The calling sequence should be: - * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done - * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp. - * When we return from nvmet_fc_rcv_fcp_req, all relevant info - * the NVME command / FC header is stored. - * A buffer has already been reposted for this IO, so just free - * the nvmebuf. - */ - rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req, - payload, size); - /* Process FCP command */ - if (rc == 0) { - ctxp->rqb_buffer = NULL; - atomic_inc(&tgtp->rcv_fcp_cmd_out); - nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); - return; - } + /* flag new work queued, replacement buffer has already + * been reposted + */ + spin_lock_irqsave(&ctxp->ctxlock, iflag); + ctxp->flag |= LPFC_NVMET_CTX_REUSE_WQ; + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); - /* Processing of FCP command is deferred */ - if (rc == -EOVERFLOW) { - lpfc_nvmeio_data(phba, - "NVMET RCV BUSY: xri x%x sz %d " - "from %06x\n", - oxid, size, sid); - atomic_inc(&tgtp->rcv_fcp_cmd_out); - return; + if (!queue_work(phba->wq, &ctx_buf->defer_work)) { + atomic_inc(&tgtp->rcv_fcp_cmd_drop); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME, + "6181 Unable to queue deferred work " + "for oxid x%x. " + "FCP Drop IO [x%x x%x x%x]\n", + ctxp->oxid, + atomic_read(&tgtp->rcv_fcp_cmd_in), + atomic_read(&tgtp->rcv_fcp_cmd_out), + atomic_read(&tgtp->xmt_fcp_release)); + + spin_lock_irqsave(&ctxp->ctxlock, iflag); + lpfc_nvmet_defer_release(phba, ctxp); + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); + lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); } - atomic_inc(&tgtp->rcv_fcp_cmd_drop); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, - "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", - ctxp->oxid, rc, - atomic_read(&tgtp->rcv_fcp_cmd_in), - atomic_read(&tgtp->rcv_fcp_cmd_out), - atomic_read(&tgtp->xmt_fcp_release)); - - lpfc_nvmet_defer_release(phba, ctxp); - lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); - nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); return; } spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag); @@ -744,16 +743,6 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, ktime_get_ns(); } } - if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { - id = smp_processor_id(); - if (ctxp->cpu != id) - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, - "6703 CPU Check cmpl: " - "cpu %d expect %d\n", - id, ctxp->cpu); - if (ctxp->cpu < LPFC_CHECK_CPU_CNT) - phba->cpucheck_cmpl_io[id]++; - } #endif rsp->done(rsp); #ifdef CONFIG_SCSI_LPFC_DEBUG_FS @@ -771,19 +760,22 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, ctxp->ts_isr_data = cmdwqe->isr_timestamp; ctxp->ts_data_nvme = ktime_get_ns(); } - if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { - id = smp_processor_id(); +#endif + rsp->done(rsp); + } +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { + id = smp_processor_id(); + if (id < LPFC_CHECK_CPU_CNT) { if (ctxp->cpu != id) - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, "6704 CPU Check cmdcmpl: " "cpu %d expect %d\n", id, ctxp->cpu); - if (ctxp->cpu < LPFC_CHECK_CPU_CNT) - phba->cpucheck_ccmpl_io[id]++; + phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_cmpl_io[id]++; } -#endif - rsp->done(rsp); } +#endif } static int @@ -852,7 +844,7 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport, lpfc_nvmeio_data(phba, "NVMET LS RESP: xri x%x wqidx x%x len x%x\n", ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen); - rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, nvmewqeq); + rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq); if (rc == WQE_SUCCESS) { /* * Okay to repost buffer here, but wait till cmpl @@ -908,18 +900,22 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, else ctxp->ts_nvme_data = ktime_get_ns(); } + + /* Setup the hdw queue if not already set */ + if (!ctxp->hdwq) + ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid]; + if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) { int id = smp_processor_id(); - ctxp->cpu = id; - if (id < LPFC_CHECK_CPU_CNT) - phba->cpucheck_xmt_io[id]++; - if (rsp->hwqid != id) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, - "6705 CPU Check OP: " - "cpu %d expect %d\n", - id, rsp->hwqid); - ctxp->cpu = rsp->hwqid; + if (id < LPFC_CHECK_CPU_CNT) { + if (rsp->hwqid != id) + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, + "6705 CPU Check OP: " + "cpu %d expect %d\n", + id, rsp->hwqid); + phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_xmt_io[id]++; } + ctxp->cpu = id; /* Setup cpu for cmpl check */ } #endif @@ -954,7 +950,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, ctxp->oxid, rsp->op, rsp->rsplen); ctxp->flag |= LPFC_NVMET_IO_INP; - rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq); + rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq); if (rc == WQE_SUCCESS) { #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (!ctxp->ts_cmd_nvme) @@ -973,7 +969,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, * WQE release CQE */ ctxp->flag |= LPFC_NVMET_DEFER_WQFULL; - wq = phba->sli4_hba.nvme_wq[rsp->hwqid]; + wq = ctxp->hdwq->nvme_wq; pring = wq->pring; spin_lock_irqsave(&pring->ring_lock, iflags); list_add_tail(&nvmewqeq->list, &wq->wqfull_list); @@ -1024,6 +1020,9 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport, if (phba->pport->load_flag & FC_UNLOADING) return; + if (!ctxp->hdwq) + ctxp->hdwq = &phba->sli4_hba.hdwq[0]; + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, "6103 NVMET Abort op: oxri x%x flg x%x ste %d\n", ctxp->oxid, ctxp->flag, ctxp->state); @@ -1034,7 +1033,6 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport, atomic_inc(&lpfc_nvmep->xmt_fcp_abort); spin_lock_irqsave(&ctxp->ctxlock, flags); - ctxp->state = LPFC_NVMET_STE_ABORT; /* Since iaab/iaar are NOT set, we need to check * if the firmware is in process of aborting IO @@ -1046,13 +1044,14 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport, ctxp->flag |= LPFC_NVMET_ABORT_OP; if (ctxp->flag & LPFC_NVMET_DEFER_WQFULL) { + spin_unlock_irqrestore(&ctxp->ctxlock, flags); lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); - wq = phba->sli4_hba.nvme_wq[ctxp->wqeq->hba_wqidx]; - spin_unlock_irqrestore(&ctxp->ctxlock, flags); + wq = ctxp->hdwq->nvme_wq; lpfc_nvmet_wqfull_flush(phba, wq, ctxp); return; } + spin_unlock_irqrestore(&ctxp->ctxlock, flags); /* An state of LPFC_NVMET_STE_RCV means we have just received * the NVME command and have not started processing it. @@ -1064,7 +1063,6 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport, else lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); - spin_unlock_irqrestore(&ctxp->ctxlock, flags); } static void @@ -1078,14 +1076,18 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport, unsigned long flags; bool aborting = false; - if (ctxp->state != LPFC_NVMET_STE_DONE && - ctxp->state != LPFC_NVMET_STE_ABORT) { + spin_lock_irqsave(&ctxp->ctxlock, flags); + if (ctxp->flag & LPFC_NVMET_XBUSY) + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, + "6027 NVMET release with XBUSY flag x%x" + " oxid x%x\n", + ctxp->flag, ctxp->oxid); + else if (ctxp->state != LPFC_NVMET_STE_DONE && + ctxp->state != LPFC_NVMET_STE_ABORT) lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, "6413 NVMET release bad state %d %d oxid x%x\n", ctxp->state, ctxp->entry_cnt, ctxp->oxid); - } - spin_lock_irqsave(&ctxp->ctxlock, flags); if ((ctxp->flag & LPFC_NVMET_ABORT_OP) || (ctxp->flag & LPFC_NVMET_XBUSY)) { aborting = true; @@ -1114,6 +1116,8 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req); struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer; struct lpfc_hba *phba = ctxp->phba; + unsigned long iflag; + lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n", ctxp->oxid, ctxp->size, smp_processor_id()); @@ -1132,6 +1136,9 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, /* Free the nvmebuf since a new buffer already replaced it */ nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); + spin_lock_irqsave(&ctxp->ctxlock, iflag); + ctxp->rqb_buffer = NULL; + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); } static struct nvmet_fc_target_template lpfc_tgttemplate = { @@ -1163,9 +1170,9 @@ __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba, spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags); list_for_each_entry_safe(ctx_buf, next_ctx_buf, &infop->nvmet_ctx_list, list) { - spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_del_init(&ctx_buf->list); - spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag); ctx_buf->sglq->state = SGL_FREED; @@ -1195,9 +1202,9 @@ lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba) /* Cycle the the entire CPU context list for every MRQ */ for (i = 0; i < phba->cfg_nvmet_mrq; i++) { - for (j = 0; j < phba->sli4_hba.num_present_cpu; j++) { + for_each_present_cpu(j) { + infop = lpfc_get_ctx_list(phba, j, i); __lpfc_nvmet_clean_io_for_cpu(phba, infop); - infop++; /* next */ } } kfree(phba->sli4_hba.nvmet_ctx_info); @@ -1212,14 +1219,14 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) union lpfc_wqe128 *wqe; struct lpfc_nvmet_ctx_info *last_infop; struct lpfc_nvmet_ctx_info *infop; - int i, j, idx; + int i, j, idx, cpu; lpfc_printf_log(phba, KERN_INFO, LOG_NVME, "6403 Allocate NVMET resources for %d XRIs\n", phba->sli4_hba.nvmet_xri_cnt); phba->sli4_hba.nvmet_ctx_info = kcalloc( - phba->sli4_hba.num_present_cpu * phba->cfg_nvmet_mrq, + phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq, sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL); if (!phba->sli4_hba.nvmet_ctx_info) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -1247,13 +1254,12 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) * of the IO completion. Thus a context that was allocated for MRQ A * whose IO completed on CPU B will be freed to cpuB/mrqA. */ - infop = phba->sli4_hba.nvmet_ctx_info; - for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { + for_each_possible_cpu(i) { for (j = 0; j < phba->cfg_nvmet_mrq; j++) { + infop = lpfc_get_ctx_list(phba, i, j); INIT_LIST_HEAD(&infop->nvmet_ctx_list); spin_lock_init(&infop->nvmet_ctx_list_lock); infop->nvmet_ctx_list_cnt = 0; - infop++; } } @@ -1263,8 +1269,10 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) * MRQ 1 cycling thru CPUs 0 - X, and so on. */ for (j = 0; j < phba->cfg_nvmet_mrq; j++) { - last_infop = lpfc_get_ctx_list(phba, 0, j); - for (i = phba->sli4_hba.num_present_cpu - 1; i >= 0; i--) { + last_infop = lpfc_get_ctx_list(phba, + cpumask_first(cpu_present_mask), + j); + for (i = phba->sli4_hba.num_possible_cpu - 1; i >= 0; i--) { infop = lpfc_get_ctx_list(phba, i, j); infop->nvmet_ctx_next_cpu = last_infop; last_infop = infop; @@ -1275,6 +1283,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) * received command on a per xri basis. */ idx = 0; + cpu = cpumask_first(cpu_present_mask); for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) { ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL); if (!ctx_buf) { @@ -1322,13 +1331,14 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) "6407 Ran out of NVMET XRIs\n"); return -ENOMEM; } + INIT_WORK(&ctx_buf->defer_work, lpfc_nvmet_fcp_rqst_defer_work); /* * Add ctx to MRQidx context list. Our initial assumption * is MRQidx will be associated with CPUidx. This association * can change on the fly. */ - infop = lpfc_get_ctx_list(phba, idx, idx); + infop = lpfc_get_ctx_list(phba, cpu, idx); spin_lock(&infop->nvmet_ctx_list_lock); list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list); infop->nvmet_ctx_list_cnt++; @@ -1336,11 +1346,18 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) /* Spread ctx structures evenly across all MRQs */ idx++; - if (idx >= phba->cfg_nvmet_mrq) + if (idx >= phba->cfg_nvmet_mrq) { idx = 0; + cpu = cpumask_first(cpu_present_mask); + continue; + } + cpu = cpumask_next(cpu, cpu_present_mask); + if (cpu == nr_cpu_ids) + cpu = cpumask_first(cpu_present_mask); + } - for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { + for_each_present_cpu(i) { for (j = 0; j < phba->cfg_nvmet_mrq; j++) { infop = lpfc_get_ctx_list(phba, i, j); lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, @@ -1378,7 +1395,7 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba) * allocate + 3, one for cmd, one for rsp and one for this alignment */ lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; - lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel; + lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue; lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP; #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) @@ -1503,13 +1520,14 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, } spin_lock_irqsave(&phba->hbalock, iflag); - spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_for_each_entry_safe(ctxp, next_ctxp, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, list) { if (ctxp->ctxbuf->sglq->sli4_xritag != xri) continue; + spin_lock(&ctxp->ctxlock); /* Check if we already received a free context call * and we have completed processing an abort situation. */ @@ -1519,7 +1537,8 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, released = true; } ctxp->flag &= ~LPFC_NVMET_XBUSY; - spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); + spin_unlock(&ctxp->ctxlock); + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); rrq_empty = list_empty(&phba->active_rrq_list); spin_unlock_irqrestore(&phba->hbalock, iflag); @@ -1543,14 +1562,13 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, lpfc_worker_wake_up(phba); return; } - spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); spin_unlock_irqrestore(&phba->hbalock, iflag); } int lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, struct fc_frame_header *fc_hdr) - { #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) struct lpfc_hba *phba = vport->phba; @@ -1562,14 +1580,14 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, xri = be16_to_cpu(fc_hdr->fh_ox_id); spin_lock_irqsave(&phba->hbalock, iflag); - spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock); + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_for_each_entry_safe(ctxp, next_ctxp, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list, list) { if (ctxp->ctxbuf->sglq->sli4_xritag != xri) continue; - spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); spin_unlock_irqrestore(&phba->hbalock, iflag); spin_lock_irqsave(&ctxp->ctxlock, iflag); @@ -1590,7 +1608,7 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1); return 0; } - spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock); + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); spin_unlock_irqrestore(&phba->hbalock, iflag); lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n", @@ -1658,6 +1676,7 @@ lpfc_nvmet_wqfull_process(struct lpfc_hba *phba, #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) struct lpfc_sli_ring *pring; struct lpfc_iocbq *nvmewqeq; + struct lpfc_nvmet_rcv_ctx *ctxp; unsigned long iflags; int rc; @@ -1671,7 +1690,8 @@ lpfc_nvmet_wqfull_process(struct lpfc_hba *phba, list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq, list); spin_unlock_irqrestore(&pring->ring_lock, iflags); - rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq); + ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmewqeq->context2; + rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq); spin_lock_irqsave(&pring->ring_lock, iflags); if (rc == -EBUSY) { /* WQ was full again, so put it back on the list */ @@ -1699,8 +1719,8 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) return; if (phba->targetport) { tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; - for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) { - wq = phba->sli4_hba.nvme_wq[qidx]; + for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { + wq = phba->sli4_hba.hdwq[qidx].nvme_wq; lpfc_nvmet_wqfull_flush(phba, wq, NULL); } tgtp->tport_unreg_cmp = &tport_unreg_cmp; @@ -1775,6 +1795,7 @@ dropit: ctxp->state = LPFC_NVMET_STE_LS_RCV; ctxp->entry_cnt = 1; ctxp->rqb_buffer = (void *)nvmebuf; + ctxp->hdwq = &phba->sli4_hba.hdwq[0]; lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n", oxid, size, sid); @@ -1814,6 +1835,86 @@ dropit: #endif } +static void +lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf) +{ +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) + struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context; + struct lpfc_hba *phba = ctxp->phba; + struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer; + struct lpfc_nvmet_tgtport *tgtp; + uint32_t *payload; + uint32_t rc; + unsigned long iflags; + + if (!nvmebuf) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "6159 process_rcv_fcp_req, nvmebuf is NULL, " + "oxid: x%x flg: x%x state: x%x\n", + ctxp->oxid, ctxp->flag, ctxp->state); + spin_lock_irqsave(&ctxp->ctxlock, iflags); + lpfc_nvmet_defer_release(phba, ctxp); + spin_unlock_irqrestore(&ctxp->ctxlock, iflags); + lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, + ctxp->oxid); + return; + } + + payload = (uint32_t *)(nvmebuf->dbuf.virt); + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + /* + * The calling sequence should be: + * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done + * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp. + * When we return from nvmet_fc_rcv_fcp_req, all relevant info + * the NVME command / FC header is stored. + * A buffer has already been reposted for this IO, so just free + * the nvmebuf. + */ + rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req, + payload, ctxp->size); + /* Process FCP command */ + if (rc == 0) { + atomic_inc(&tgtp->rcv_fcp_cmd_out); + return; + } + + /* Processing of FCP command is deferred */ + if (rc == -EOVERFLOW) { + lpfc_nvmeio_data(phba, "NVMET RCV BUSY: xri x%x sz %d " + "from %06x\n", + ctxp->oxid, ctxp->size, ctxp->sid); + atomic_inc(&tgtp->rcv_fcp_cmd_out); + atomic_inc(&tgtp->defer_fod); + return; + } + atomic_inc(&tgtp->rcv_fcp_cmd_drop); + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", + ctxp->oxid, rc, + atomic_read(&tgtp->rcv_fcp_cmd_in), + atomic_read(&tgtp->rcv_fcp_cmd_out), + atomic_read(&tgtp->xmt_fcp_release)); + lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n", + ctxp->oxid, ctxp->size, ctxp->sid); + spin_lock_irqsave(&ctxp->ctxlock, iflags); + lpfc_nvmet_defer_release(phba, ctxp); + spin_unlock_irqrestore(&ctxp->ctxlock, iflags); + lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); +#endif +} + +static void +lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *work) +{ +#if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) + struct lpfc_nvmet_ctxbuf *ctx_buf = + container_of(work, struct lpfc_nvmet_ctxbuf, defer_work); + + lpfc_nvmet_process_rcv_fcp_req(ctx_buf); +#endif +} + static struct lpfc_nvmet_ctxbuf * lpfc_nvmet_replenish_context(struct lpfc_hba *phba, struct lpfc_nvmet_ctx_info *current_infop) @@ -1838,7 +1939,7 @@ lpfc_nvmet_replenish_context(struct lpfc_hba *phba, else get_infop = current_infop->nvmet_ctx_next_cpu; - for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { + for (i = 0; i < phba->sli4_hba.num_possible_cpu; i++) { if (get_infop == current_infop) { get_infop = get_infop->nvmet_ctx_next_cpu; continue; @@ -1896,12 +1997,9 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf; struct lpfc_nvmet_ctx_info *current_infop; uint32_t *payload; - uint32_t size, oxid, sid, rc, qno; + uint32_t size, oxid, sid, qno; unsigned long iflag; int current_cpu; -#ifdef CONFIG_SCSI_LPFC_DEBUG_FS - uint32_t id; -#endif if (!IS_ENABLED(CONFIG_NVME_TARGET_FC)) return; @@ -1910,11 +2008,9 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, if (!nvmebuf || !phba->targetport) { lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, "6157 NVMET FCP Drop IO\n"); - oxid = 0; - size = 0; - sid = 0; - ctxp = NULL; - goto dropit; + if (nvmebuf) + lpfc_rq_buf_free(phba, &nvmebuf->hbuf); + return; } /* @@ -1942,9 +2038,14 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) { - id = smp_processor_id(); - if (id < LPFC_CHECK_CPU_CNT) - phba->cpucheck_rcv_io[id]++; + if (current_cpu < LPFC_CHECK_CPU_CNT) { + if (idx != current_cpu) + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, + "6703 CPU Check rcv: " + "cpu %d expect %d\n", + current_cpu, idx); + phba->sli4_hba.hdwq[idx].cpucheck_rcv_io[current_cpu]++; + } } #endif @@ -1995,6 +2096,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, ctxp->flag = 0; ctxp->ctxbuf = ctx_buf; ctxp->rqb_buffer = (void *)nvmebuf; + ctxp->hdwq = NULL; spin_lock_init(&ctxp->ctxlock); #ifdef CONFIG_SCSI_LPFC_DEBUG_FS @@ -2015,67 +2117,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, #endif atomic_inc(&tgtp->rcv_fcp_cmd_in); - /* - * The calling sequence should be: - * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done - * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp. - * When we return from nvmet_fc_rcv_fcp_req, all relevant info in - * the NVME command / FC header is stored, so we are free to repost - * the buffer. - */ - rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req, - payload, size); - - /* Process FCP command */ - if (rc == 0) { - ctxp->rqb_buffer = NULL; - atomic_inc(&tgtp->rcv_fcp_cmd_out); - lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ - return; - } - - /* Processing of FCP command is deferred */ - if (rc == -EOVERFLOW) { - /* - * Post a brand new DMA buffer to RQ and defer - * freeing rcv buffer till .defer_rcv callback - */ - qno = nvmebuf->idx; - lpfc_post_rq_buffer( - phba, phba->sli4_hba.nvmet_mrq_hdr[qno], - phba->sli4_hba.nvmet_mrq_data[qno], 1, qno); - - lpfc_nvmeio_data(phba, - "NVMET RCV BUSY: xri x%x sz %d from %06x\n", - oxid, size, sid); - atomic_inc(&tgtp->rcv_fcp_cmd_out); - atomic_inc(&tgtp->defer_fod); - return; - } - ctxp->rqb_buffer = nvmebuf; - - atomic_inc(&tgtp->rcv_fcp_cmd_drop); - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, - "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n", - ctxp->oxid, rc, - atomic_read(&tgtp->rcv_fcp_cmd_in), - atomic_read(&tgtp->rcv_fcp_cmd_out), - atomic_read(&tgtp->xmt_fcp_release)); -dropit: - lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n", - oxid, size, sid); - if (oxid) { - lpfc_nvmet_defer_release(phba, ctxp); - lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid); - lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ - return; - } - - if (ctx_buf) - lpfc_nvmet_ctxbuf_post(phba, ctx_buf); - - if (nvmebuf) - lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */ + lpfc_nvmet_process_rcv_fcp_req(ctx_buf); } /** @@ -2660,15 +2702,17 @@ lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, if (ctxp->flag & LPFC_NVMET_ABORT_OP) atomic_inc(&tgtp->xmt_fcp_abort_cmpl); + spin_lock_irqsave(&ctxp->ctxlock, flags); ctxp->state = LPFC_NVMET_STE_DONE; /* Check if we already received a free context call * and we have completed processing an abort situation. */ - spin_lock_irqsave(&ctxp->ctxlock, flags); if ((ctxp->flag & LPFC_NVMET_CTX_RLS) && !(ctxp->flag & LPFC_NVMET_XBUSY)) { + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_del(&ctxp->list); + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); released = true; } ctxp->flag &= ~LPFC_NVMET_ABORT_OP; @@ -2734,6 +2778,7 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, } tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; + spin_lock_irqsave(&ctxp->ctxlock, flags); if (ctxp->flag & LPFC_NVMET_ABORT_OP) atomic_inc(&tgtp->xmt_fcp_abort_cmpl); @@ -2748,10 +2793,11 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, * and we have completed processing an abort situation. */ ctxp->state = LPFC_NVMET_STE_DONE; - spin_lock_irqsave(&ctxp->ctxlock, flags); if ((ctxp->flag & LPFC_NVMET_CTX_RLS) && !(ctxp->flag & LPFC_NVMET_XBUSY)) { + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_del(&ctxp->list); + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); released = true; } ctxp->flag &= ~LPFC_NVMET_ABORT_OP; @@ -2957,12 +3003,15 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE); /* No failure to an ABTS request. */ + spin_lock_irqsave(&ctxp->ctxlock, flags); ctxp->flag &= ~LPFC_NVMET_ABORT_OP; + spin_unlock_irqrestore(&ctxp->ctxlock, flags); return 0; } /* Issue ABTS for this WQE based on iotag */ ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba); + spin_lock_irqsave(&ctxp->ctxlock, flags); if (!ctxp->abort_wqeq) { atomic_inc(&tgtp->xmt_abort_rsp_error); lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, @@ -2970,11 +3019,13 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, "xri: x%x\n", ctxp->oxid); /* No failure to an ABTS request. */ ctxp->flag &= ~LPFC_NVMET_ABORT_OP; + spin_unlock_irqrestore(&ctxp->ctxlock, flags); return 0; } abts_wqeq = ctxp->abort_wqeq; abts_wqe = &abts_wqeq->wqe; ctxp->state = LPFC_NVMET_STE_ABORT; + spin_unlock_irqrestore(&ctxp->ctxlock, flags); /* Announce entry to new IO submit field. */ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, @@ -2995,7 +3046,9 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, "NVME Req now. hba_flag x%x oxid x%x\n", phba->hba_flag, ctxp->oxid); lpfc_sli_release_iocbq(phba, abts_wqeq); + spin_lock_irqsave(&ctxp->ctxlock, flags); ctxp->flag &= ~LPFC_NVMET_ABORT_OP; + spin_unlock_irqrestore(&ctxp->ctxlock, flags); return 0; } @@ -3008,7 +3061,9 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, "still pending on oxid x%x\n", ctxp->oxid); lpfc_sli_release_iocbq(phba, abts_wqeq); + spin_lock_irqsave(&ctxp->ctxlock, flags); ctxp->flag &= ~LPFC_NVMET_ABORT_OP; + spin_unlock_irqrestore(&ctxp->ctxlock, flags); return 0; } @@ -3052,7 +3107,10 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, abts_wqeq->iocb_flag |= LPFC_IO_NVME; abts_wqeq->context2 = ctxp; abts_wqeq->vport = phba->pport; - rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq); + if (!ctxp->hdwq) + ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx]; + + rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq); spin_unlock_irqrestore(&phba->hbalock, flags); if (rc == WQE_SUCCESS) { atomic_inc(&tgtp->xmt_abort_sol); @@ -3060,7 +3118,9 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, } atomic_inc(&tgtp->xmt_abort_rsp_error); + spin_lock_irqsave(&ctxp->ctxlock, flags); ctxp->flag &= ~LPFC_NVMET_ABORT_OP; + spin_unlock_irqrestore(&ctxp->ctxlock, flags); lpfc_sli_release_iocbq(phba, abts_wqeq); lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, "6166 Failed ABORT issue_wqe with status x%x " @@ -3069,7 +3129,6 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, return 1; } - static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp, @@ -3078,6 +3137,7 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba, struct lpfc_nvmet_tgtport *tgtp; struct lpfc_iocbq *abts_wqeq; unsigned long flags; + bool released = false; int rc; tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; @@ -3104,7 +3164,10 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba, abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp; abts_wqeq->iocb_cmpl = NULL; abts_wqeq->iocb_flag |= LPFC_IO_NVMET; - rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq); + if (!ctxp->hdwq) + ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx]; + + rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq); spin_unlock_irqrestore(&phba->hbalock, flags); if (rc == WQE_SUCCESS) { return 0; @@ -3112,8 +3175,12 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba, aerr: spin_lock_irqsave(&ctxp->ctxlock, flags); - if (ctxp->flag & LPFC_NVMET_CTX_RLS) + if (ctxp->flag & LPFC_NVMET_CTX_RLS) { + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_del(&ctxp->list); + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); + released = true; + } ctxp->flag &= ~(LPFC_NVMET_ABORT_OP | LPFC_NVMET_CTX_RLS); spin_unlock_irqrestore(&ctxp->ctxlock, flags); @@ -3121,7 +3188,8 @@ aerr: lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n", ctxp->oxid, rc); - lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); + if (released) + lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); return 1; } @@ -3173,7 +3241,7 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba, abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp; abts_wqeq->iocb_cmpl = 0; abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS; - rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq); + rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq); spin_unlock_irqrestore(&phba->hbalock, flags); if (rc == WQE_SUCCESS) { atomic_inc(&tgtp->xmt_abort_unsol); diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h index 0ec1082ce7ef..368deea2bcf8 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.h +++ b/drivers/scsi/lpfc/lpfc_nvmet.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -137,9 +137,11 @@ struct lpfc_nvmet_rcv_ctx { #define LPFC_NVMET_XBUSY 0x4 /* XB bit set on IO cmpl */ #define LPFC_NVMET_CTX_RLS 0x8 /* ctx free requested */ #define LPFC_NVMET_ABTS_RCV 0x10 /* ABTS received on exchange */ +#define LPFC_NVMET_CTX_REUSE_WQ 0x20 /* ctx reused via WQ */ #define LPFC_NVMET_DEFER_WQFULL 0x40 /* Waiting on a free WQE */ struct rqb_dmabuf *rqb_buffer; struct lpfc_nvmet_ctxbuf *ctxbuf; + struct lpfc_sli4_hdw_queue *hdwq; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS uint64_t ts_isr_cmd; diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index b4f1a840b3b4..c98f264f1d83 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -83,9 +83,9 @@ lpfc_rport_data_from_scsi_device(struct scsi_device *sdev) } static void -lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); +lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb); static void -lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); +lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb); static int lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc); @@ -180,9 +180,9 @@ lpfc_cmd_guard_csum(struct scsi_cmnd *sc) **/ static void lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba, - struct lpfc_scsi_buf *lpfc_cmd) + struct lpfc_io_buf *lpfc_cmd) { - struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; + struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; if (sgl) { sgl += 1; sgl->word2 = le32_to_cpu(sgl->word2); @@ -200,7 +200,7 @@ lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba, * function updates the statistical data for the command completion. **/ static void -lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) +lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) { struct lpfc_rport_data *rdata; struct lpfc_nodelist *pnode; @@ -389,12 +389,12 @@ static int lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) { struct lpfc_hba *phba = vport->phba; - struct lpfc_scsi_buf *psb; + struct lpfc_io_buf *psb; struct ulp_bde64 *bpl; IOCB_t *iocb; dma_addr_t pdma_phys_fcp_cmd; dma_addr_t pdma_phys_fcp_rsp; - dma_addr_t pdma_phys_bpl; + dma_addr_t pdma_phys_sgl; uint16_t iotag; int bcnt, bpl_size; @@ -408,7 +408,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) (int)sizeof(struct fcp_rsp), bpl_size); for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { - psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); + psb = kzalloc(sizeof(struct lpfc_io_buf), GFP_KERNEL); if (!psb) break; @@ -438,14 +438,14 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) psb->fcp_cmnd = psb->data; psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); - psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) + + psb->dma_sgl = psb->data + sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp); /* Initialize local short-hand pointers. */ - bpl = psb->fcp_bpl; + bpl = (struct ulp_bde64 *)psb->dma_sgl; pdma_phys_fcp_cmd = psb->dma_handle; pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd); - pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) + + pdma_phys_sgl = psb->dma_handle + sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp); /* @@ -496,9 +496,9 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); iocb->un.fcpi64.bdl.addrLow = - putPaddrLow(pdma_phys_bpl); + putPaddrLow(pdma_phys_sgl); iocb->un.fcpi64.bdl.addrHigh = - putPaddrHigh(pdma_phys_bpl); + putPaddrHigh(pdma_phys_sgl); iocb->ulpBdeCount = 1; iocb->ulpLe = 1; } @@ -506,6 +506,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) psb->status = IOSTAT_SUCCESS; /* Put it back into the SCSI buffer list */ psb->cur_iocbq.context1 = psb; + spin_lock_init(&psb->buf_lock); lpfc_release_scsi_buf_s3(phba, psb); } @@ -524,20 +525,27 @@ void lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; - struct lpfc_scsi_buf *psb, *next_psb; + struct lpfc_io_buf *psb, *next_psb; + struct lpfc_sli4_hdw_queue *qp; unsigned long iflag = 0; + int idx; - if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) + if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) return; + spin_lock_irqsave(&phba->hbalock, iflag); - spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); - list_for_each_entry_safe(psb, next_psb, - &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) { - if (psb->rdata && psb->rdata->pnode - && psb->rdata->pnode->vport == vport) - psb->rdata = NULL; + for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { + qp = &phba->sli4_hba.hdwq[idx]; + + spin_lock(&qp->abts_scsi_buf_list_lock); + list_for_each_entry_safe(psb, next_psb, + &qp->lpfc_abts_scsi_buf_list, list) { + if (psb->rdata && psb->rdata->pnode && + psb->rdata->pnode->vport == vport) + psb->rdata = NULL; + } + spin_unlock(&qp->abts_scsi_buf_list_lock); } - spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); spin_unlock_irqrestore(&phba->hbalock, iflag); } @@ -551,11 +559,12 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport) **/ void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, - struct sli4_wcqe_xri_aborted *axri) + struct sli4_wcqe_xri_aborted *axri, int idx) { uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); - struct lpfc_scsi_buf *psb, *next_psb; + struct lpfc_io_buf *psb, *next_psb; + struct lpfc_sli4_hdw_queue *qp; unsigned long iflag = 0; struct lpfc_iocbq *iocbq; int i; @@ -565,16 +574,19 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) return; + + qp = &phba->sli4_hba.hdwq[idx]; spin_lock_irqsave(&phba->hbalock, iflag); - spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); + spin_lock(&qp->abts_scsi_buf_list_lock); list_for_each_entry_safe(psb, next_psb, - &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) { + &qp->lpfc_abts_scsi_buf_list, list) { if (psb->cur_iocbq.sli4_xritag == xri) { list_del(&psb->list); + qp->abts_scsi_io_bufs--; psb->exch_busy = 0; psb->status = IOSTAT_SUCCESS; spin_unlock( - &phba->sli4_hba.abts_scsi_buf_list_lock); + &qp->abts_scsi_buf_list_lock); if (psb->rdata && psb->rdata->pnode) ndlp = psb->rdata->pnode; else @@ -593,7 +605,7 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, return; } } - spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); + spin_unlock(&qp->abts_scsi_buf_list_lock); for (i = 1; i <= phba->sli.last_iotag; i++) { iocbq = phba->sli.iocbq_lookup[i]; @@ -602,7 +614,7 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, continue; if (iocbq->sli4_xritag != xri) continue; - psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); + psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); psb->exch_busy = 0; spin_unlock_irqrestore(&phba->hbalock, iflag); if (!list_empty(&pring->txq)) @@ -613,359 +625,6 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, spin_unlock_irqrestore(&phba->hbalock, iflag); } -/** - * lpfc_sli4_post_scsi_sgl_list - Post blocks of scsi buffer sgls from a list - * @phba: pointer to lpfc hba data structure. - * @post_sblist: pointer to the scsi buffer list. - * - * This routine walks a list of scsi buffers that was passed in. It attempts - * to construct blocks of scsi buffer sgls which contains contiguous xris and - * uses the non-embedded SGL block post mailbox commands to post to the port. - * For single SCSI buffer sgl with non-contiguous xri, if any, it shall use - * embedded SGL post mailbox command for posting. The @post_sblist passed in - * must be local list, thus no lock is needed when manipulate the list. - * - * Returns: 0 = failure, non-zero number of successfully posted buffers. - **/ -static int -lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba, - struct list_head *post_sblist, int sb_count) -{ - struct lpfc_scsi_buf *psb, *psb_next; - int status, sgl_size; - int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0; - dma_addr_t pdma_phys_bpl1; - int last_xritag = NO_XRI; - LIST_HEAD(prep_sblist); - LIST_HEAD(blck_sblist); - LIST_HEAD(scsi_sblist); - - /* sanity check */ - if (sb_count <= 0) - return -EINVAL; - - sgl_size = phba->cfg_sg_dma_buf_size - - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); - - list_for_each_entry_safe(psb, psb_next, post_sblist, list) { - list_del_init(&psb->list); - block_cnt++; - if ((last_xritag != NO_XRI) && - (psb->cur_iocbq.sli4_xritag != last_xritag + 1)) { - /* a hole in xri block, form a sgl posting block */ - list_splice_init(&prep_sblist, &blck_sblist); - post_cnt = block_cnt - 1; - /* prepare list for next posting block */ - list_add_tail(&psb->list, &prep_sblist); - block_cnt = 1; - } else { - /* prepare list for next posting block */ - list_add_tail(&psb->list, &prep_sblist); - /* enough sgls for non-embed sgl mbox command */ - if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { - list_splice_init(&prep_sblist, &blck_sblist); - post_cnt = block_cnt; - block_cnt = 0; - } - } - num_posting++; - last_xritag = psb->cur_iocbq.sli4_xritag; - - /* end of repost sgl list condition for SCSI buffers */ - if (num_posting == sb_count) { - if (post_cnt == 0) { - /* last sgl posting block */ - list_splice_init(&prep_sblist, &blck_sblist); - post_cnt = block_cnt; - } else if (block_cnt == 1) { - /* last single sgl with non-contiguous xri */ - if (sgl_size > SGL_PAGE_SIZE) - pdma_phys_bpl1 = psb->dma_phys_bpl + - SGL_PAGE_SIZE; - else - pdma_phys_bpl1 = 0; - status = lpfc_sli4_post_sgl(phba, - psb->dma_phys_bpl, - pdma_phys_bpl1, - psb->cur_iocbq.sli4_xritag); - if (status) { - /* failure, put on abort scsi list */ - psb->exch_busy = 1; - } else { - /* success, put on SCSI buffer list */ - psb->exch_busy = 0; - psb->status = IOSTAT_SUCCESS; - num_posted++; - } - /* success, put on SCSI buffer sgl list */ - list_add_tail(&psb->list, &scsi_sblist); - } - } - - /* continue until a nembed page worth of sgls */ - if (post_cnt == 0) - continue; - - /* post block of SCSI buffer list sgls */ - status = lpfc_sli4_post_scsi_sgl_block(phba, &blck_sblist, - post_cnt); - - /* don't reset xirtag due to hole in xri block */ - if (block_cnt == 0) - last_xritag = NO_XRI; - - /* reset SCSI buffer post count for next round of posting */ - post_cnt = 0; - - /* put posted SCSI buffer-sgl posted on SCSI buffer sgl list */ - while (!list_empty(&blck_sblist)) { - list_remove_head(&blck_sblist, psb, - struct lpfc_scsi_buf, list); - if (status) { - /* failure, put on abort scsi list */ - psb->exch_busy = 1; - } else { - /* success, put on SCSI buffer list */ - psb->exch_busy = 0; - psb->status = IOSTAT_SUCCESS; - num_posted++; - } - list_add_tail(&psb->list, &scsi_sblist); - } - } - /* Push SCSI buffers with sgl posted to the availble list */ - while (!list_empty(&scsi_sblist)) { - list_remove_head(&scsi_sblist, psb, - struct lpfc_scsi_buf, list); - lpfc_release_scsi_buf_s4(phba, psb); - } - return num_posted; -} - -/** - * lpfc_sli4_repost_scsi_sgl_list - Repost all the allocated scsi buffer sgls - * @phba: pointer to lpfc hba data structure. - * - * This routine walks the list of scsi buffers that have been allocated and - * repost them to the port by using SGL block post. This is needed after a - * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine - * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list - * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers. - * - * Returns: 0 = success, non-zero failure. - **/ -int -lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba) -{ - LIST_HEAD(post_sblist); - int num_posted, rc = 0; - - /* get all SCSI buffers need to repost to a local list */ - spin_lock_irq(&phba->scsi_buf_list_get_lock); - spin_lock(&phba->scsi_buf_list_put_lock); - list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist); - list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist); - spin_unlock(&phba->scsi_buf_list_put_lock); - spin_unlock_irq(&phba->scsi_buf_list_get_lock); - - /* post the list of scsi buffer sgls to port if available */ - if (!list_empty(&post_sblist)) { - num_posted = lpfc_sli4_post_scsi_sgl_list(phba, &post_sblist, - phba->sli4_hba.scsi_xri_cnt); - /* failed to post any scsi buffer, return error */ - if (num_posted == 0) - rc = -EIO; - } - return rc; -} - -/** - * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec - * @vport: The virtual port for which this call being executed. - * @num_to_allocate: The requested number of buffers to allocate. - * - * This routine allocates scsi buffers for device with SLI-4 interface spec, - * the scsi buffer contains all the necessary information needed to initiate - * a SCSI I/O. After allocating up to @num_to_allocate SCSI buffers and put - * them on a list, it post them to the port by using SGL block post. - * - * Return codes: - * int - number of scsi buffers that were allocated and posted. - * 0 = failure, less than num_to_alloc is a partial failure. - **/ -static int -lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) -{ - struct lpfc_hba *phba = vport->phba; - struct lpfc_scsi_buf *psb; - struct sli4_sge *sgl; - IOCB_t *iocb; - dma_addr_t pdma_phys_fcp_cmd; - dma_addr_t pdma_phys_fcp_rsp; - dma_addr_t pdma_phys_bpl; - uint16_t iotag, lxri = 0; - int bcnt, num_posted, sgl_size; - LIST_HEAD(prep_sblist); - LIST_HEAD(post_sblist); - LIST_HEAD(scsi_sblist); - - sgl_size = phba->cfg_sg_dma_buf_size - - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); - - lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, - "9068 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n", - num_to_alloc, phba->cfg_sg_dma_buf_size, sgl_size, - (int)sizeof(struct fcp_cmnd), - (int)sizeof(struct fcp_rsp)); - - for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { - psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); - if (!psb) - break; - /* - * Get memory from the pci pool to map the virt space to - * pci bus space for an I/O. The DMA buffer includes space - * for the struct fcp_cmnd, struct fcp_rsp and the number - * of bde's necessary to support the sg_tablesize. - */ - psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool, - GFP_KERNEL, &psb->dma_handle); - if (!psb->data) { - kfree(psb); - break; - } - - /* - * 4K Page alignment is CRITICAL to BlockGuard, double check - * to be sure. - */ - if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) && - (((unsigned long)(psb->data) & - (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { - lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "3369 Memory alignment error " - "addr=%lx\n", - (unsigned long)psb->data); - dma_pool_free(phba->lpfc_sg_dma_buf_pool, - psb->data, psb->dma_handle); - kfree(psb); - break; - } - - - lxri = lpfc_sli4_next_xritag(phba); - if (lxri == NO_XRI) { - dma_pool_free(phba->lpfc_sg_dma_buf_pool, - psb->data, psb->dma_handle); - kfree(psb); - break; - } - - /* Allocate iotag for psb->cur_iocbq. */ - iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); - if (iotag == 0) { - dma_pool_free(phba->lpfc_sg_dma_buf_pool, - psb->data, psb->dma_handle); - kfree(psb); - lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "3368 Failed to allocate IOTAG for" - " XRI:0x%x\n", lxri); - lpfc_sli4_free_xri(phba, lxri); - break; - } - psb->cur_iocbq.sli4_lxritag = lxri; - psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; - psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; - psb->fcp_bpl = psb->data; - psb->fcp_cmnd = (psb->data + sgl_size); - psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd + - sizeof(struct fcp_cmnd)); - - /* Initialize local short-hand pointers. */ - sgl = (struct sli4_sge *)psb->fcp_bpl; - pdma_phys_bpl = psb->dma_handle; - pdma_phys_fcp_cmd = (psb->dma_handle + sgl_size); - pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd); - - /* - * The first two bdes are the FCP_CMD and FCP_RSP. - * The balance are sg list bdes. Initialize the - * first two and leave the rest for queuecommand. - */ - sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd)); - sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd)); - sgl->word2 = le32_to_cpu(sgl->word2); - bf_set(lpfc_sli4_sge_last, sgl, 0); - sgl->word2 = cpu_to_le32(sgl->word2); - sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd)); - sgl++; - - /* Setup the physical region for the FCP RSP */ - sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp)); - sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp)); - sgl->word2 = le32_to_cpu(sgl->word2); - bf_set(lpfc_sli4_sge_last, sgl, 1); - sgl->word2 = cpu_to_le32(sgl->word2); - sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp)); - - /* - * Since the IOCB for the FCP I/O is built into this - * lpfc_scsi_buf, initialize it with all known data now. - */ - iocb = &psb->cur_iocbq.iocb; - iocb->un.fcpi64.bdl.ulpIoTag32 = 0; - iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64; - /* setting the BLP size to 2 * sizeof BDE may not be correct. - * We are setting the bpl to point to out sgl. An sgl's - * entries are 16 bytes, a bpl entries are 12 bytes. - */ - iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); - iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd); - iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd); - iocb->ulpBdeCount = 1; - iocb->ulpLe = 1; - iocb->ulpClass = CLASS3; - psb->cur_iocbq.context1 = psb; - psb->dma_phys_bpl = pdma_phys_bpl; - - /* add the scsi buffer to a post list */ - list_add_tail(&psb->list, &post_sblist); - spin_lock_irq(&phba->scsi_buf_list_get_lock); - phba->sli4_hba.scsi_xri_cnt++; - spin_unlock_irq(&phba->scsi_buf_list_get_lock); - } - lpfc_printf_log(phba, KERN_INFO, LOG_BG | LOG_FCP, - "3021 Allocate %d out of %d requested new SCSI " - "buffers\n", bcnt, num_to_alloc); - - /* post the list of scsi buffer sgls to port if available */ - if (!list_empty(&post_sblist)) - num_posted = lpfc_sli4_post_scsi_sgl_list(phba, - &post_sblist, bcnt); - else - num_posted = 0; - - return num_posted; -} - -/** - * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator - * @vport: The virtual port for which this call being executed. - * @num_to_allocate: The requested number of buffers to allocate. - * - * This routine wraps the actual SCSI buffer allocator function pointer from - * the lpfc_hba struct. - * - * Return codes: - * int - number of scsi buffers that were allocated. - * 0 = failure, less than num_to_alloc is a partial failure. - **/ -static inline int -lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc) -{ - return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc); -} - /** * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA * @phba: The HBA for which this call is being executed. @@ -977,15 +636,16 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc) * NULL - Error * Pointer to lpfc_scsi_buf - Success **/ -static struct lpfc_scsi_buf* -lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) +static struct lpfc_io_buf * +lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, + struct scsi_cmnd *cmnd) { - struct lpfc_scsi_buf * lpfc_cmd = NULL; + struct lpfc_io_buf *lpfc_cmd = NULL; struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get; unsigned long iflag = 0; spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag); - list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf, + list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_io_buf, list); if (!lpfc_cmd) { spin_lock(&phba->scsi_buf_list_put_lock); @@ -993,7 +653,7 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) &phba->lpfc_scsi_buf_list_get); INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); list_remove_head(scsi_buf_list_get, lpfc_cmd, - struct lpfc_scsi_buf, list); + struct lpfc_io_buf, list); spin_unlock(&phba->scsi_buf_list_put_lock); } spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag); @@ -1005,52 +665,107 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) return lpfc_cmd; } /** - * lpfc_get_scsi_buf_s4 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA + * lpfc_get_scsi_buf_s4 - Get a scsi buffer from io_buf_list of the HBA * @phba: The HBA for which this call is being executed. * - * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list + * This routine removes a scsi buffer from head of @hdwq io_buf_list * and returns to caller. * * Return codes: * NULL - Error * Pointer to lpfc_scsi_buf - Success **/ -static struct lpfc_scsi_buf* -lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) +static struct lpfc_io_buf * +lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, + struct scsi_cmnd *cmnd) { - struct lpfc_scsi_buf *lpfc_cmd, *lpfc_cmd_next; - unsigned long iflag = 0; - int found = 0; + struct lpfc_io_buf *lpfc_cmd; + struct lpfc_sli4_hdw_queue *qp; + struct sli4_sge *sgl; + IOCB_t *iocb; + dma_addr_t pdma_phys_fcp_rsp; + dma_addr_t pdma_phys_fcp_cmd; + uint32_t sgl_size, cpu, idx; + int tag; - spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag); - list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, - &phba->lpfc_scsi_buf_list_get, list) { - if (lpfc_test_rrq_active(phba, ndlp, - lpfc_cmd->cur_iocbq.sli4_lxritag)) - continue; - list_del_init(&lpfc_cmd->list); - found = 1; - break; - } - if (!found) { - spin_lock(&phba->scsi_buf_list_put_lock); - list_splice(&phba->lpfc_scsi_buf_list_put, - &phba->lpfc_scsi_buf_list_get); - INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); - spin_unlock(&phba->scsi_buf_list_put_lock); - list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, - &phba->lpfc_scsi_buf_list_get, list) { - if (lpfc_test_rrq_active( - phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag)) - continue; - list_del_init(&lpfc_cmd->list); - found = 1; - break; - } + cpu = smp_processor_id(); + if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) { + tag = blk_mq_unique_tag(cmnd->request); + idx = blk_mq_unique_tag_to_hwq(tag); + } else { + idx = phba->sli4_hba.cpu_map[cpu].hdwq; } - spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag); - if (!found) + + lpfc_cmd = lpfc_get_io_buf(phba, ndlp, idx, + !phba->cfg_xri_rebalancing); + if (!lpfc_cmd) { + qp = &phba->sli4_hba.hdwq[idx]; + qp->empty_io_bufs++; return NULL; + } + + sgl_size = phba->cfg_sg_dma_buf_size - + (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + + /* Setup key fields in buffer that may have been changed + * if other protocols used this buffer. + */ + lpfc_cmd->cur_iocbq.iocb_flag = LPFC_IO_FCP; + lpfc_cmd->prot_seg_cnt = 0; + lpfc_cmd->seg_cnt = 0; + lpfc_cmd->timeout = 0; + lpfc_cmd->flags = 0; + lpfc_cmd->start_time = jiffies; + lpfc_cmd->waitq = NULL; + lpfc_cmd->cpu = cpu; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + lpfc_cmd->prot_data_type = 0; +#endif + lpfc_cmd->fcp_cmnd = (lpfc_cmd->data + sgl_size); + lpfc_cmd->fcp_rsp = (struct fcp_rsp *)((uint8_t *)lpfc_cmd->fcp_cmnd + + sizeof(struct fcp_cmnd)); + + /* + * The first two SGEs are the FCP_CMD and FCP_RSP. + * The balance are sg list bdes. Initialize the + * first two and leave the rest for queuecommand. + */ + sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; + pdma_phys_fcp_cmd = (lpfc_cmd->dma_handle + sgl_size); + sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd)); + sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd)); + sgl->word2 = le32_to_cpu(sgl->word2); + bf_set(lpfc_sli4_sge_last, sgl, 0); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd)); + sgl++; + + /* Setup the physical region for the FCP RSP */ + pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd); + sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp)); + sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp)); + sgl->word2 = le32_to_cpu(sgl->word2); + bf_set(lpfc_sli4_sge_last, sgl, 1); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp)); + + /* + * Since the IOCB for the FCP I/O is built into this + * lpfc_io_buf, initialize it with all known data now. + */ + iocb = &lpfc_cmd->cur_iocbq.iocb; + iocb->un.fcpi64.bdl.ulpIoTag32 = 0; + iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64; + /* setting the BLP size to 2 * sizeof BDE may not be correct. + * We are setting the bpl to point to out sgl. An sgl's + * entries are 16 bytes, a bpl entries are 12 bytes. + */ + iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); + iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd); + iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd); + iocb->ulpBdeCount = 1; + iocb->ulpLe = 1; + iocb->ulpClass = CLASS3; if (lpfc_ndlp_check_qdepth(phba, ndlp)) { atomic_inc(&ndlp->cmd_pending); @@ -1069,10 +784,11 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) * NULL - Error * Pointer to lpfc_scsi_buf - Success **/ -static struct lpfc_scsi_buf* -lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) +static struct lpfc_io_buf* +lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, + struct scsi_cmnd *cmnd) { - return phba->lpfc_get_scsi_buf(phba, ndlp); + return phba->lpfc_get_scsi_buf(phba, ndlp, cmnd); } /** @@ -1084,12 +800,11 @@ lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) * lpfc_scsi_buf_list list. **/ static void -lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) +lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb) { unsigned long iflag = 0; psb->seg_cnt = 0; - psb->nonsg_phys = 0; psb->prot_seg_cnt = 0; spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); @@ -1104,34 +819,29 @@ lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) * @phba: The Hba for which this call is being executed. * @psb: The scsi buffer which is being released. * - * This routine releases @psb scsi buffer by adding it to tail of @phba - * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer + * This routine releases @psb scsi buffer by adding it to tail of @hdwq + * io_buf_list list. For SLI4 XRI's are tied to the scsi buffer * and cannot be reused for at least RA_TOV amount of time if it was * aborted. **/ static void -lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) +lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb) { + struct lpfc_sli4_hdw_queue *qp; unsigned long iflag = 0; psb->seg_cnt = 0; - psb->nonsg_phys = 0; psb->prot_seg_cnt = 0; + qp = psb->hdwq; if (psb->exch_busy) { - spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, - iflag); + spin_lock_irqsave(&qp->abts_scsi_buf_list_lock, iflag); psb->pCmd = NULL; - list_add_tail(&psb->list, - &phba->sli4_hba.lpfc_abts_scsi_buf_list); - spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock, - iflag); + list_add_tail(&psb->list, &qp->lpfc_abts_scsi_buf_list); + qp->abts_scsi_io_bufs++; + spin_unlock_irqrestore(&qp->abts_scsi_buf_list_lock, iflag); } else { - psb->pCmd = NULL; - psb->cur_iocbq.iocb_flag = LPFC_IO_FCP; - spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); - list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put); - spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); + lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp); } } @@ -1144,7 +854,7 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) * lpfc_scsi_buf_list list. **/ static void -lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) +lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb) { if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp) atomic_dec(&psb->ndlp->cmd_pending); @@ -1168,12 +878,12 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) * 0 - Success **/ static int -lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) +lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) { struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; struct scatterlist *sgel = NULL; struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; - struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; + struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl; struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq; IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde; @@ -1320,7 +1030,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, uint32_t *reftag, uint16_t *apptag, uint32_t new_guard) { struct scatterlist *sgpe; /* s/g prot entry */ - struct lpfc_scsi_buf *lpfc_cmd = NULL; + struct lpfc_io_buf *lpfc_cmd = NULL; struct scsi_dif_tuple *src = NULL; struct lpfc_nodelist *ndlp; struct lpfc_rport_data *rdata; @@ -1379,7 +1089,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, if (sgpe) { src = (struct scsi_dif_tuple *)sg_virt(sgpe); src += blockoff; - lpfc_cmd = (struct lpfc_scsi_buf *)sc->host_scribble; + lpfc_cmd = (struct lpfc_io_buf *)sc->host_scribble; } /* Should we change the Reference Tag */ @@ -2684,7 +2394,7 @@ lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc) **/ static int lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba, - struct lpfc_scsi_buf *lpfc_cmd) + struct lpfc_io_buf *lpfc_cmd) { struct scsi_cmnd *sc = lpfc_cmd->pCmd; int fcpdl; @@ -2724,11 +2434,11 @@ lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba, **/ static int lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, - struct lpfc_scsi_buf *lpfc_cmd) + struct lpfc_io_buf *lpfc_cmd) { struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; - struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; + struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl; IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; uint32_t num_bde = 0; int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; @@ -2904,7 +2614,7 @@ lpfc_bg_csum(uint8_t *data, int count) * what type of T10-DIF error occurred. */ static void -lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) +lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) { struct scatterlist *sgpe; /* s/g prot entry */ struct scatterlist *sgde; /* s/g data entry */ @@ -3089,8 +2799,8 @@ out: * -1 - Internal error (bad profile, ...etc) */ static int -lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, - struct lpfc_iocbq *pIocbOut) +lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, + struct lpfc_iocbq *pIocbOut) { struct scsi_cmnd *cmd = lpfc_cmd->pCmd; struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg; @@ -3256,12 +2966,12 @@ out: * 0 - Success **/ static int -lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) +lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) { struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; struct scatterlist *sgel = NULL; struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; - struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; + struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; struct sli4_sge *first_data_sgl; IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; dma_addr_t physaddr; @@ -3388,6 +3098,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *) scsi_cmnd->device->hostdata)->priority; } + return 0; } @@ -3402,11 +3113,11 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) **/ static int lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, - struct lpfc_scsi_buf *lpfc_cmd) + struct lpfc_io_buf *lpfc_cmd) { struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; - struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl); + struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->dma_sgl); IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; uint32_t num_sge = 0; int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; @@ -3578,7 +3289,7 @@ err: * 0 - Success **/ static inline int -lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) +lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) { return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); } @@ -3597,7 +3308,7 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) * 0 - Success **/ static inline int -lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) +lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) { return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); } @@ -3614,7 +3325,7 @@ lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) **/ static void lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport, - struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) { + struct lpfc_io_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) { struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; uint32_t resp_info = fcprsp->rspStatus2; @@ -3706,7 +3417,7 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport, * field of @lpfc_cmd for device with SLI-3 interface spec. **/ static void -lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) +lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb) { /* * There are only two special cases to consider. (1) the scsi command @@ -3725,7 +3436,7 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) /** * lpfc_handler_fcp_err - FCP response handler * @vport: The virtual port for which this call is being executed. - * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. + * @lpfc_cmd: Pointer to lpfc_io_buf data structure. * @rsp_iocb: The response IOCB which contains FCP error. * * This routine is called to process response IOCB with status field @@ -3733,7 +3444,7 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) * based upon SCSI and FCP error. **/ static void -lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, +lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) { struct lpfc_hba *phba = vport->phba; @@ -3911,49 +3622,6 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb); } -/** - * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution - * @phba: Pointer to HBA context object. - * - * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index - * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock - * held. - * If scsi-mq is enabled, get the default block layer mapping of software queues - * to hardware queues. This information is saved in request tag. - * - * Return: index into SLI4 fast-path FCP queue index. - **/ -int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba, - struct lpfc_scsi_buf *lpfc_cmd) -{ - struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; - struct lpfc_vector_map_info *cpup; - int chann, cpu; - uint32_t tag; - uint16_t hwq; - - if (cmnd) { - tag = blk_mq_unique_tag(cmnd->request); - hwq = blk_mq_unique_tag_to_hwq(tag); - - return hwq; - } - - if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU - && phba->cfg_fcp_io_channel > 1) { - cpu = smp_processor_id(); - if (cpu < phba->sli4_hba.num_present_cpu) { - cpup = phba->sli4_hba.cpu_map; - cpup += cpu; - return cpup->channel_id; - } - } - chann = atomic_add_return(1, &phba->fcp_qidx); - chann = chann % phba->cfg_fcp_io_channel; - return chann; -} - - /** * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine * @phba: The Hba for which this call is being executed. @@ -3968,8 +3636,8 @@ static void lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, struct lpfc_iocbq *pIocbOut) { - struct lpfc_scsi_buf *lpfc_cmd = - (struct lpfc_scsi_buf *) pIocbIn->context1; + struct lpfc_io_buf *lpfc_cmd = + (struct lpfc_io_buf *) pIocbIn->context1; struct lpfc_vport *vport = pIocbIn->vport; struct lpfc_rport_data *rdata = lpfc_cmd->rdata; struct lpfc_nodelist *pnode = rdata->pnode; @@ -3977,14 +3645,35 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, unsigned long flags; struct lpfc_fast_path_event *fast_path_evt; struct Scsi_Host *shost; + int idx; uint32_t logit = LOG_FCP; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + int cpu; +#endif - atomic_inc(&phba->fc4ScsiIoCmpls); + /* Guard against abort handler being called at same time */ + spin_lock(&lpfc_cmd->buf_lock); /* Sanity check on return of outstanding command */ cmd = lpfc_cmd->pCmd; - if (!cmd) + if (!cmd) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "2621 IO completion: Not an active IO\n"); + spin_unlock(&lpfc_cmd->buf_lock); return; + } + + idx = lpfc_cmd->cur_iocbq.hba_wqidx; + if (phba->sli4_hba.hdwq) + phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++; + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) { + cpu = smp_processor_id(); + if (cpu < LPFC_CHECK_CPU_CNT) + phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++; + } +#endif shost = cmd->device->host; lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK); @@ -4178,29 +3867,24 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, } lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); - /* If pCmd was set to NULL from abort path, do not call scsi_done */ - if (xchg(&lpfc_cmd->pCmd, NULL) == NULL) { - lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, - "5688 FCP cmd already NULL, sid: 0x%06x, " - "did: 0x%06x, oxid: 0x%04x\n", - vport->fc_myDID, - (pnode) ? pnode->nlp_DID : 0, - phba->sli_rev == LPFC_SLI_REV4 ? - lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff); - return; - } + lpfc_cmd->pCmd = NULL; + spin_unlock(&lpfc_cmd->buf_lock); /* The sdev is not guaranteed to be valid post scsi_done upcall. */ cmd->scsi_done(cmd); /* - * If there is a thread waiting for command completion + * If there is an abort thread waiting for command completion * wake up the thread. */ - spin_lock_irqsave(shost->host_lock, flags); - if (lpfc_cmd->waitq) - wake_up(lpfc_cmd->waitq); - spin_unlock_irqrestore(shost->host_lock, flags); + spin_lock(&lpfc_cmd->buf_lock); + if (unlikely(lpfc_cmd->cur_iocbq.iocb_flag & LPFC_DRIVER_ABORTED)) { + lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED; + if (lpfc_cmd->waitq) + wake_up(lpfc_cmd->waitq); + lpfc_cmd->waitq = NULL; + } + spin_unlock(&lpfc_cmd->buf_lock); lpfc_release_scsi_buf(phba, lpfc_cmd); } @@ -4233,7 +3917,7 @@ lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd) * to transfer for device with SLI3 interface spec. **/ static void -lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, +lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd, struct lpfc_nodelist *pnode) { struct lpfc_hba *phba = vport->phba; @@ -4241,7 +3925,9 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq); + struct lpfc_sli4_hdw_queue *hdwq = NULL; int datadir = scsi_cmnd->sc_data_direction; + int idx; uint8_t *ptr; bool sli4; uint32_t fcpdl; @@ -4267,6 +3953,9 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, sli4 = (phba->sli_rev == LPFC_SLI_REV4); piocbq->iocb.un.fcpi.fcpi_XRdy = 0; + idx = lpfc_cmd->hdwq_no; + if (phba->sli4_hba.hdwq) + hdwq = &phba->sli4_hba.hdwq[idx]; /* * There are three possibilities here - use scatter-gather segment, use @@ -4288,19 +3977,22 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, vport->cfg_first_burst_size; } fcp_cmnd->fcpCntl3 = WRITE_DATA; - atomic_inc(&phba->fc4ScsiOutputRequests); + if (hdwq) + hdwq->scsi_cstat.output_requests++; } else { iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; iocb_cmd->ulpPU = PARM_READ_CHECK; fcp_cmnd->fcpCntl3 = READ_DATA; - atomic_inc(&phba->fc4ScsiInputRequests); + if (hdwq) + hdwq->scsi_cstat.input_requests++; } } else { iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR; iocb_cmd->un.fcpi.fcpi_parm = 0; iocb_cmd->ulpPU = 0; fcp_cmnd->fcpCntl3 = 0; - atomic_inc(&phba->fc4ScsiControlRequests); + if (hdwq) + hdwq->scsi_cstat.control_requests++; } if (phba->sli_rev == 3 && !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) @@ -4328,7 +4020,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, /** * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit * @vport: The virtual port for which this call is being executed. - * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. + * @lpfc_cmd: Pointer to lpfc_io_buf data structure. * @lun: Logical unit number. * @task_mgmt_cmd: SCSI task management command. * @@ -4341,7 +4033,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, **/ static int lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, - struct lpfc_scsi_buf *lpfc_cmd, + struct lpfc_io_buf *lpfc_cmd, uint64_t lun, uint8_t task_mgmt_cmd) { @@ -4413,14 +4105,12 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) switch (dev_grp) { case LPFC_PCI_DEV_LP: - phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3; phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3; phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3; phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3; phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3; break; case LPFC_PCI_DEV_OC: - phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4; phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4; phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4; phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4; @@ -4452,8 +4142,8 @@ lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq, struct lpfc_iocbq *rspiocbq) { - struct lpfc_scsi_buf *lpfc_cmd = - (struct lpfc_scsi_buf *) cmdiocbq->context1; + struct lpfc_io_buf *lpfc_cmd = + (struct lpfc_io_buf *) cmdiocbq->context1; if (lpfc_cmd) lpfc_release_scsi_buf(phba, lpfc_cmd); return; @@ -4652,9 +4342,12 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) struct lpfc_hba *phba = vport->phba; struct lpfc_rport_data *rdata; struct lpfc_nodelist *ndlp; - struct lpfc_scsi_buf *lpfc_cmd; + struct lpfc_io_buf *lpfc_cmd; struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); - int err; + int err, idx; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + int cpu; +#endif rdata = lpfc_rport_data_from_scsi_device(cmnd->device); @@ -4718,7 +4411,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) } } - lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp); + lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp, cmnd); if (lpfc_cmd == NULL) { lpfc_rampdown_queue_depth(phba); @@ -4735,8 +4428,6 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) lpfc_cmd->pCmd = cmnd; lpfc_cmd->rdata = rdata; lpfc_cmd->ndlp = ndlp; - lpfc_cmd->timeout = 0; - lpfc_cmd->start_time = jiffies; cmnd->host_scribble = (unsigned char *)lpfc_cmd; if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { @@ -4771,6 +4462,16 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) { + cpu = smp_processor_id(); + if (cpu < LPFC_CHECK_CPU_CNT) { + struct lpfc_sli4_hdw_queue *hdwq = + &phba->sli4_hba.hdwq[lpfc_cmd->hdwq_no]; + hdwq->cpucheck_xmt_io[cpu]++; + } + } +#endif err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); if (err) { @@ -4791,16 +4492,6 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) (uint32_t) (cmnd->request->timeout / 1000)); - switch (lpfc_cmd->fcp_cmnd->fcpCntl3) { - case WRITE_DATA: - atomic_dec(&phba->fc4ScsiOutputRequests); - break; - case READ_DATA: - atomic_dec(&phba->fc4ScsiInputRequests); - break; - default: - atomic_dec(&phba->fc4ScsiControlRequests); - } goto out_host_busy_free_buf; } if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { @@ -4811,10 +4502,26 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) lpfc_poll_rearm_timer(phba); } + if (phba->cfg_xri_rebalancing) + lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_cmd->hdwq_no); + return 0; out_host_busy_free_buf: + idx = lpfc_cmd->hdwq_no; lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); + if (phba->sli4_hba.hdwq) { + switch (lpfc_cmd->fcp_cmnd->fcpCntl3) { + case WRITE_DATA: + phba->sli4_hba.hdwq[idx].scsi_cstat.output_requests--; + break; + case READ_DATA: + phba->sli4_hba.hdwq[idx].scsi_cstat.input_requests--; + break; + default: + phba->sli4_hba.hdwq[idx].scsi_cstat.control_requests--; + } + } lpfc_release_scsi_buf(phba, lpfc_cmd); out_host_busy: return SCSI_MLQUEUE_HOST_BUSY; @@ -4846,7 +4553,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *iocb; struct lpfc_iocbq *abtsiocb; - struct lpfc_scsi_buf *lpfc_cmd; + struct lpfc_io_buf *lpfc_cmd; IOCB_t *cmd, *icmd; int ret = SUCCESS, status = 0; struct lpfc_sli_ring *pring_s4 = NULL; @@ -4858,65 +4565,59 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) if (status != 0 && status != SUCCESS) return status; + lpfc_cmd = (struct lpfc_io_buf *)cmnd->host_scribble; + if (!lpfc_cmd) + return ret; + spin_lock_irqsave(&phba->hbalock, flags); /* driver queued commands are in process of being flushed */ if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) { - spin_unlock_irqrestore(&phba->hbalock, flags); lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, "3168 SCSI Layer abort requested I/O has been " "flushed by LLD.\n"); - return FAILED; + ret = FAILED; + goto out_unlock; } - lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble; - if (!lpfc_cmd || !lpfc_cmd->pCmd) { - spin_unlock_irqrestore(&phba->hbalock, flags); + /* Guard against IO completion being called at same time */ + spin_lock(&lpfc_cmd->buf_lock); + + if (!lpfc_cmd->pCmd) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, "2873 SCSI Layer I/O Abort Request IO CMPL Status " "x%x ID %d LUN %llu\n", SUCCESS, cmnd->device->id, cmnd->device->lun); - return SUCCESS; + goto out_unlock_buf; } iocb = &lpfc_cmd->cur_iocbq; if (phba->sli_rev == LPFC_SLI_REV4) { - if (!(phba->cfg_fof) || - (!(iocb->iocb_flag & LPFC_IO_FOF))) { - pring_s4 = - phba->sli4_hba.fcp_wq[iocb->hba_wqidx]->pring; - } else { - iocb->hba_wqidx = 0; - pring_s4 = phba->sli4_hba.oas_wq->pring; - } + pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].fcp_wq->pring; if (!pring_s4) { ret = FAILED; - goto out_unlock; + goto out_unlock_buf; } spin_lock(&pring_s4->ring_lock); } /* the command is in process of being cancelled */ if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) { - if (phba->sli_rev == LPFC_SLI_REV4) - spin_unlock(&pring_s4->ring_lock); - spin_unlock_irqrestore(&phba->hbalock, flags); lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, "3169 SCSI Layer abort requested I/O has been " "cancelled by LLD.\n"); - return FAILED; + ret = FAILED; + goto out_unlock_ring; } /* - * If pCmd field of the corresponding lpfc_scsi_buf structure + * If pCmd field of the corresponding lpfc_io_buf structure * points to a different SCSI command, then the driver has * already completed this command, but the midlayer did not * see the completion before the eh fired. Just return SUCCESS. */ if (lpfc_cmd->pCmd != cmnd) { - if (phba->sli_rev == LPFC_SLI_REV4) - spin_unlock(&pring_s4->ring_lock); lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, "3170 SCSI Layer abort requested I/O has been " "completed by LLD.\n"); - goto out_unlock; + goto out_unlock_ring; } BUG_ON(iocb->context1 != lpfc_cmd); @@ -4927,6 +4628,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) "3389 SCSI Layer I/O Abort Request is pending\n"); if (phba->sli_rev == LPFC_SLI_REV4) spin_unlock(&pring_s4->ring_lock); + spin_unlock(&lpfc_cmd->buf_lock); spin_unlock_irqrestore(&phba->hbalock, flags); goto wait_for_cmpl; } @@ -4934,9 +4636,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) abtsiocb = __lpfc_sli_get_iocbq(phba); if (abtsiocb == NULL) { ret = FAILED; - if (phba->sli_rev == LPFC_SLI_REV4) - spin_unlock(&pring_s4->ring_lock); - goto out_unlock; + goto out_unlock_ring; } /* Indicate the IO is being aborted by the driver. */ @@ -4986,24 +4686,18 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) /* no longer need the lock after this point */ spin_unlock_irqrestore(&phba->hbalock, flags); - if (ret_val == IOCB_ERROR) { - if (phba->sli_rev == LPFC_SLI_REV4) - spin_lock_irqsave(&pring_s4->ring_lock, flags); - else - spin_lock_irqsave(&phba->hbalock, flags); /* Indicate the IO is not being aborted by the driver. */ iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED; lpfc_cmd->waitq = NULL; - if (phba->sli_rev == LPFC_SLI_REV4) - spin_unlock_irqrestore(&pring_s4->ring_lock, flags); - else - spin_unlock_irqrestore(&phba->hbalock, flags); + spin_unlock(&lpfc_cmd->buf_lock); lpfc_sli_release_iocbq(phba, abtsiocb); ret = FAILED; goto out; } + spin_unlock(&lpfc_cmd->buf_lock); + if (phba->cfg_poll & DISABLE_FCP_RING_INT) lpfc_sli_handle_fast_ring_event(phba, &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); @@ -5014,9 +4708,7 @@ wait_for_cmpl: (lpfc_cmd->pCmd != cmnd), msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000)); - spin_lock_irqsave(shost->host_lock, flags); - lpfc_cmd->waitq = NULL; - spin_unlock_irqrestore(shost->host_lock, flags); + spin_lock(&lpfc_cmd->buf_lock); if (lpfc_cmd->pCmd == cmnd) { ret = FAILED; @@ -5027,8 +4719,14 @@ wait_for_cmpl: iocb->sli4_xritag, ret, cmnd->device->id, cmnd->device->lun); } + spin_unlock(&lpfc_cmd->buf_lock); goto out; +out_unlock_ring: + if (phba->sli_rev == LPFC_SLI_REV4) + spin_unlock(&pring_s4->ring_lock); +out_unlock_buf: + spin_unlock(&lpfc_cmd->buf_lock); out_unlock: spin_unlock_irqrestore(&phba->hbalock, flags); out: @@ -5066,7 +4764,7 @@ lpfc_taskmgmt_name(uint8_t task_mgmt_cmd) /** * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed * @vport: The virtual port for which this call is being executed. - * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. + * @lpfc_cmd: Pointer to lpfc_io_buf data structure. * * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded * @@ -5075,7 +4773,7 @@ lpfc_taskmgmt_name(uint8_t task_mgmt_cmd) * 0x2002 - Success **/ static int -lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd) +lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd) { struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; uint32_t rsp_info; @@ -5150,7 +4848,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd, uint8_t task_mgmt_cmd) { struct lpfc_hba *phba = vport->phba; - struct lpfc_scsi_buf *lpfc_cmd; + struct lpfc_io_buf *lpfc_cmd; struct lpfc_iocbq *iocbq; struct lpfc_iocbq *iocbqrsp; struct lpfc_rport_data *rdata; @@ -5163,7 +4861,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd, return FAILED; pnode = rdata->pnode; - lpfc_cmd = lpfc_get_scsi_buf(phba, pnode); + lpfc_cmd = lpfc_get_scsi_buf(phba, pnode, NULL); if (lpfc_cmd == NULL) return FAILED; lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo; @@ -5671,6 +5369,12 @@ lpfc_slave_alloc(struct scsi_device *sdev) } sdev_cnt = atomic_inc_return(&phba->sdev_cnt); + /* For SLI4, all IO buffers are pre-allocated */ + if (phba->sli_rev == LPFC_SLI_REV4) + return 0; + + /* This code path is now ONLY for SLI3 adapters */ + /* * Populate the cmds_per_lun count scsi_bufs into this host's globally * available list of scsi buffers. Don't allocate more than the @@ -5702,7 +5406,7 @@ lpfc_slave_alloc(struct scsi_device *sdev) (phba->cfg_hba_queue_depth - total)); num_to_alloc = phba->cfg_hba_queue_depth - total; } - num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc); + num_allocated = lpfc_new_scsi_buf_s3(vport, num_to_alloc); if (num_to_alloc != num_allocated) { lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, "0708 Allocation request of %d " diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h index b759b089432c..f76667b7da7b 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.h +++ b/drivers/scsi/lpfc/lpfc_scsi.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -130,62 +130,6 @@ struct lpfc_scsicmd_bkt { uint32_t cmd_count; }; -struct lpfc_scsi_buf { - struct list_head list; - struct scsi_cmnd *pCmd; - struct lpfc_rport_data *rdata; - struct lpfc_nodelist *ndlp; - - uint32_t timeout; - - uint16_t flags; /* TBD convert exch_busy to flags */ -#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */ -#define LPFC_SBUF_BUMP_QDEPTH 0x8 /* bumped queue depth counter */ - uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */ - uint16_t status; /* From IOCB Word 7- ulpStatus */ - uint32_t result; /* From IOCB Word 4. */ - - uint32_t seg_cnt; /* Number of scatter-gather segments returned by - * dma_map_sg. The driver needs this for calls - * to dma_unmap_sg. */ - uint32_t prot_seg_cnt; /* seg_cnt's counterpart for protection data */ - - dma_addr_t nonsg_phys; /* Non scatter-gather physical address. */ - - /* - * data and dma_handle are the kernel virtual and bus address of the - * dma-able buffer containing the fcp_cmd, fcp_rsp and a scatter - * gather bde list that supports the sg_tablesize value. - */ - void *data; - dma_addr_t dma_handle; - - struct fcp_cmnd *fcp_cmnd; - struct fcp_rsp *fcp_rsp; - struct ulp_bde64 *fcp_bpl; - - dma_addr_t dma_phys_bpl; - - /* cur_iocbq has phys of the dma-able buffer. - * Iotag is in here - */ - struct lpfc_iocbq cur_iocbq; - uint16_t cpu; - - wait_queue_head_t *waitq; - unsigned long start_time; - -#ifdef CONFIG_SCSI_LPFC_DEBUG_FS - /* Used to restore any changes to protection data for error injection */ - void *prot_data_segment; - uint32_t prot_data; - uint32_t prot_data_type; -#define LPFC_INJERR_REFTAG 1 -#define LPFC_INJERR_APPTAG 2 -#define LPFC_INJERR_GUARD 3 -#endif -}; - #define LPFC_SCSI_DMA_EXT_SIZE 264 #define LPFC_BPL_SIZE 1024 #define MDAC_DIRECT_CMD 0x22 @@ -200,5 +144,6 @@ struct lpfc_scsi_buf { #define TXRDY_PAYLOAD_LEN 12 -int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba, - struct lpfc_scsi_buf *lpfc_cmd); +/* For sysfs/debugfs tmp string max len */ +#define LPFC_MAX_SCSI_INFO_TMP_LEN 79 + diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 2242e9b3ca12..d0817facdae3 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -78,12 +78,13 @@ static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, struct hbq_dmabuf *); static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf); -static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *, - struct lpfc_cqe *); +static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, + struct lpfc_queue *cq, struct lpfc_cqe *cqe); static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *, int); static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, - struct lpfc_eqe *eqe, uint32_t qidx); + struct lpfc_queue *eq, + struct lpfc_eqe *eqe); static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba); static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba); static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, @@ -160,7 +161,7 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe) } q->WQ_posted++; /* set consumption flag every once in a while */ - if (!((q->host_index + 1) % q->entry_repost)) + if (!((q->host_index + 1) % q->notify_interval)) bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); else bf_set(wqe_wqec, &wqe->generic.wqe_com, 0); @@ -325,29 +326,16 @@ lpfc_sli4_mq_release(struct lpfc_queue *q) static struct lpfc_eqe * lpfc_sli4_eq_get(struct lpfc_queue *q) { - struct lpfc_hba *phba; struct lpfc_eqe *eqe; - uint32_t idx; /* sanity check on queue memory */ if (unlikely(!q)) return NULL; - phba = q->phba; - eqe = q->qe[q->hba_index].eqe; + eqe = q->qe[q->host_index].eqe; /* If the next EQE is not valid then we are done */ if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid) return NULL; - /* If the host has not yet processed the next entry then we are done */ - idx = ((q->hba_index + 1) % q->entry_count); - if (idx == q->host_index) - return NULL; - - q->hba_index = idx; - /* if the index wrapped around, toggle the valid bit */ - if (phba->sli4_hba.pc_sli4_params.eqav && !q->hba_index) - q->qe_valid = (q->qe_valid) ? 0 : 1; - /* * insert barrier for instruction interlock : data from the hardware @@ -397,44 +385,25 @@ lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q) } /** - * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ + * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state + * @phba: adapter with EQ * @q: The Event Queue that the host has completed processing for. + * @count: Number of elements that have been consumed * @arm: Indicates whether the host wants to arms this CQ. * - * This routine will mark all Event Queue Entries on @q, from the last - * known completed entry to the last entry that was processed, as completed - * by clearing the valid bit for each completion queue entry. Then it will - * notify the HBA, by ringing the doorbell, that the EQEs have been processed. - * The internal host index in the @q will be updated by this routine to indicate - * that the host has finished processing the entries. The @arm parameter - * indicates that the queue should be rearmed when ringing the doorbell. - * - * This function will return the number of EQEs that were popped. + * This routine will notify the HBA, by ringing the doorbell, that count + * number of EQEs have been processed. The @arm parameter indicates whether + * the queue should be rearmed when ringing the doorbell. **/ -uint32_t -lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm) +void +lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q, + uint32_t count, bool arm) { - uint32_t released = 0; - struct lpfc_hba *phba; - struct lpfc_eqe *temp_eqe; struct lpfc_register doorbell; /* sanity check on queue memory */ - if (unlikely(!q)) - return 0; - phba = q->phba; - - /* while there are valid entries */ - while (q->hba_index != q->host_index) { - if (!phba->sli4_hba.pc_sli4_params.eqav) { - temp_eqe = q->qe[q->host_index].eqe; - bf_set_le32(lpfc_eqe_valid, temp_eqe, 0); - } - released++; - q->host_index = ((q->host_index + 1) % q->entry_count); - } - if (unlikely(released == 0 && !arm)) - return 0; + if (unlikely(!q || (count == 0 && !arm))) + return; /* ring doorbell for number popped */ doorbell.word0 = 0; @@ -442,7 +411,7 @@ lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm) bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); } - bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); + bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count); bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); @@ -451,60 +420,112 @@ lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm) /* PCI read to flush PCI pipeline on re-arming for INTx mode */ if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) readl(q->phba->sli4_hba.EQDBregaddr); - return released; } /** - * lpfc_sli4_if6_eq_release - Indicates the host has finished processing an EQ + * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state + * @phba: adapter with EQ * @q: The Event Queue that the host has completed processing for. + * @count: Number of elements that have been consumed * @arm: Indicates whether the host wants to arms this CQ. * - * This routine will mark all Event Queue Entries on @q, from the last - * known completed entry to the last entry that was processed, as completed - * by clearing the valid bit for each completion queue entry. Then it will - * notify the HBA, by ringing the doorbell, that the EQEs have been processed. - * The internal host index in the @q will be updated by this routine to indicate - * that the host has finished processing the entries. The @arm parameter - * indicates that the queue should be rearmed when ringing the doorbell. - * - * This function will return the number of EQEs that were popped. + * This routine will notify the HBA, by ringing the doorbell, that count + * number of EQEs have been processed. The @arm parameter indicates whether + * the queue should be rearmed when ringing the doorbell. **/ -uint32_t -lpfc_sli4_if6_eq_release(struct lpfc_queue *q, bool arm) +void +lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q, + uint32_t count, bool arm) { - uint32_t released = 0; - struct lpfc_hba *phba; - struct lpfc_eqe *temp_eqe; struct lpfc_register doorbell; /* sanity check on queue memory */ - if (unlikely(!q)) - return 0; - phba = q->phba; - - /* while there are valid entries */ - while (q->hba_index != q->host_index) { - if (!phba->sli4_hba.pc_sli4_params.eqav) { - temp_eqe = q->qe[q->host_index].eqe; - bf_set_le32(lpfc_eqe_valid, temp_eqe, 0); - } - released++; - q->host_index = ((q->host_index + 1) % q->entry_count); - } - if (unlikely(released == 0 && !arm)) - return 0; + if (unlikely(!q || (count == 0 && !arm))) + return; /* ring doorbell for number popped */ doorbell.word0 = 0; if (arm) bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1); - bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, released); + bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count); bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id); writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); /* PCI read to flush PCI pipeline on re-arming for INTx mode */ if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) readl(q->phba->sli4_hba.EQDBregaddr); - return released; +} + +static void +__lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, + struct lpfc_eqe *eqe) +{ + if (!phba->sli4_hba.pc_sli4_params.eqav) + bf_set_le32(lpfc_eqe_valid, eqe, 0); + + eq->host_index = ((eq->host_index + 1) % eq->entry_count); + + /* if the index wrapped around, toggle the valid bit */ + if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index) + eq->qe_valid = (eq->qe_valid) ? 0 : 1; +} + +static void +lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) +{ + struct lpfc_eqe *eqe; + uint32_t count = 0; + + /* walk all the EQ entries and drop on the floor */ + eqe = lpfc_sli4_eq_get(eq); + while (eqe) { + __lpfc_sli4_consume_eqe(phba, eq, eqe); + count++; + eqe = lpfc_sli4_eq_get(eq); + } + + /* Clear and re-arm the EQ */ + phba->sli4_hba.sli4_write_eq_db(phba, eq, count, LPFC_QUEUE_REARM); +} + +static int +lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq) +{ + struct lpfc_eqe *eqe; + int count = 0, consumed = 0; + + if (cmpxchg(&eq->queue_claimed, 0, 1) != 0) + goto rearm_and_exit; + + eqe = lpfc_sli4_eq_get(eq); + while (eqe) { + lpfc_sli4_hba_handle_eqe(phba, eq, eqe); + __lpfc_sli4_consume_eqe(phba, eq, eqe); + + consumed++; + if (!(++count % eq->max_proc_limit)) + break; + + if (!(count % eq->notify_interval)) { + phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, + LPFC_QUEUE_NOARM); + consumed = 0; + } + + eqe = lpfc_sli4_eq_get(eq); + } + eq->EQ_processed += count; + + /* Track the max number of EQEs processed in 1 intr */ + if (count > eq->EQ_max_eqe) + eq->EQ_max_eqe = count; + + eq->queue_claimed = 0; + +rearm_and_exit: + /* Always clear and re-arm the EQ */ + phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, LPFC_QUEUE_REARM); + + return count; } /** @@ -519,28 +540,16 @@ lpfc_sli4_if6_eq_release(struct lpfc_queue *q, bool arm) static struct lpfc_cqe * lpfc_sli4_cq_get(struct lpfc_queue *q) { - struct lpfc_hba *phba; struct lpfc_cqe *cqe; - uint32_t idx; /* sanity check on queue memory */ if (unlikely(!q)) return NULL; - phba = q->phba; - cqe = q->qe[q->hba_index].cqe; + cqe = q->qe[q->host_index].cqe; /* If the next CQE is not valid then we are done */ if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid) return NULL; - /* If the host has not yet processed the next entry then we are done */ - idx = ((q->hba_index + 1) % q->entry_count); - if (idx == q->host_index) - return NULL; - - q->hba_index = idx; - /* if the index wrapped around, toggle the valid bit */ - if (phba->sli4_hba.pc_sli4_params.cqav && !q->hba_index) - q->qe_valid = (q->qe_valid) ? 0 : 1; /* * insert barrier for instruction interlock : data from the hardware @@ -554,107 +563,81 @@ lpfc_sli4_cq_get(struct lpfc_queue *q) return cqe; } +static void +__lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, + struct lpfc_cqe *cqe) +{ + if (!phba->sli4_hba.pc_sli4_params.cqav) + bf_set_le32(lpfc_cqe_valid, cqe, 0); + + cq->host_index = ((cq->host_index + 1) % cq->entry_count); + + /* if the index wrapped around, toggle the valid bit */ + if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index) + cq->qe_valid = (cq->qe_valid) ? 0 : 1; +} + /** - * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ + * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state. + * @phba: the adapter with the CQ * @q: The Completion Queue that the host has completed processing for. + * @count: the number of elements that were consumed * @arm: Indicates whether the host wants to arms this CQ. * - * This routine will mark all Completion queue entries on @q, from the last - * known completed entry to the last entry that was processed, as completed - * by clearing the valid bit for each completion queue entry. Then it will - * notify the HBA, by ringing the doorbell, that the CQEs have been processed. - * The internal host index in the @q will be updated by this routine to indicate - * that the host has finished processing the entries. The @arm parameter - * indicates that the queue should be rearmed when ringing the doorbell. - * - * This function will return the number of CQEs that were released. + * This routine will notify the HBA, by ringing the doorbell, that the + * CQEs have been processed. The @arm parameter specifies whether the + * queue should be rearmed when ringing the doorbell. **/ -uint32_t -lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm) +void +lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q, + uint32_t count, bool arm) { - uint32_t released = 0; - struct lpfc_hba *phba; - struct lpfc_cqe *temp_qe; struct lpfc_register doorbell; /* sanity check on queue memory */ - if (unlikely(!q)) - return 0; - phba = q->phba; - - /* while there are valid entries */ - while (q->hba_index != q->host_index) { - if (!phba->sli4_hba.pc_sli4_params.cqav) { - temp_qe = q->qe[q->host_index].cqe; - bf_set_le32(lpfc_cqe_valid, temp_qe, 0); - } - released++; - q->host_index = ((q->host_index + 1) % q->entry_count); - } - if (unlikely(released == 0 && !arm)) - return 0; + if (unlikely(!q || (count == 0 && !arm))) + return; /* ring doorbell for number popped */ doorbell.word0 = 0; if (arm) bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); - bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); + bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count); bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION); bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell, (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT)); bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id); writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr); - return released; } /** - * lpfc_sli4_if6_cq_release - Indicates the host has finished processing a CQ + * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state. + * @phba: the adapter with the CQ * @q: The Completion Queue that the host has completed processing for. + * @count: the number of elements that were consumed * @arm: Indicates whether the host wants to arms this CQ. * - * This routine will mark all Completion queue entries on @q, from the last - * known completed entry to the last entry that was processed, as completed - * by clearing the valid bit for each completion queue entry. Then it will - * notify the HBA, by ringing the doorbell, that the CQEs have been processed. - * The internal host index in the @q will be updated by this routine to indicate - * that the host has finished processing the entries. The @arm parameter - * indicates that the queue should be rearmed when ringing the doorbell. - * - * This function will return the number of CQEs that were released. + * This routine will notify the HBA, by ringing the doorbell, that the + * CQEs have been processed. The @arm parameter specifies whether the + * queue should be rearmed when ringing the doorbell. **/ -uint32_t -lpfc_sli4_if6_cq_release(struct lpfc_queue *q, bool arm) +void +lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q, + uint32_t count, bool arm) { - uint32_t released = 0; - struct lpfc_hba *phba; - struct lpfc_cqe *temp_qe; struct lpfc_register doorbell; /* sanity check on queue memory */ - if (unlikely(!q)) - return 0; - phba = q->phba; - - /* while there are valid entries */ - while (q->hba_index != q->host_index) { - if (!phba->sli4_hba.pc_sli4_params.cqav) { - temp_qe = q->qe[q->host_index].cqe; - bf_set_le32(lpfc_cqe_valid, temp_qe, 0); - } - released++; - q->host_index = ((q->host_index + 1) % q->entry_count); - } - if (unlikely(released == 0 && !arm)) - return 0; + if (unlikely(!q || (count == 0 && !arm))) + return; /* ring doorbell for number popped */ doorbell.word0 = 0; if (arm) bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1); - bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, released); + bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count); bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id); writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr); - return released; } /** @@ -703,15 +686,15 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, hq->RQ_buf_posted++; /* Ring The Header Receive Queue Doorbell */ - if (!(hq->host_index % hq->entry_repost)) { + if (!(hq->host_index % hq->notify_interval)) { doorbell.word0 = 0; if (hq->db_format == LPFC_DB_RING_FORMAT) { bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell, - hq->entry_repost); + hq->notify_interval); bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id); } else if (hq->db_format == LPFC_DB_LIST_FORMAT) { bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell, - hq->entry_repost); + hq->notify_interval); bf_set(lpfc_rq_db_list_fm_index, &doorbell, hq->host_index); bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id); @@ -1025,7 +1008,7 @@ lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, if (!ndlp->active_rrqs_xri_bitmap) return 0; if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap)) - return 1; + return 1; else return 0; } @@ -1133,14 +1116,14 @@ __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list; struct lpfc_sglq *sglq = NULL; struct lpfc_sglq *start_sglq = NULL; - struct lpfc_scsi_buf *lpfc_cmd; + struct lpfc_io_buf *lpfc_cmd; struct lpfc_nodelist *ndlp; int found = 0; lockdep_assert_held(&phba->hbalock); if (piocbq->iocb_flag & LPFC_IO_FCP) { - lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1; + lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1; ndlp = lpfc_cmd->rdata->pnode; } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) && !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) { @@ -1596,6 +1579,7 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, list_add_tail(&piocb->list, &pring->txcmplq); piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ; + pring->txcmplq_cnt++; if ((unlikely(pring->ringno == LPFC_ELS_RING)) && (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && @@ -3008,6 +2992,7 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, /* remove from txcmpl queue list */ list_del_init(&cmd_iocb->list); cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; + pring->txcmplq_cnt--; return cmd_iocb; } } @@ -3045,6 +3030,7 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, /* remove from txcmpl queue list */ list_del_init(&cmd_iocb->list); cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; + pring->txcmplq_cnt--; return cmd_iocb; } } @@ -3981,8 +3967,8 @@ lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba) /* Look on all the FCP Rings for the iotag */ if (phba->sli_rev >= LPFC_SLI_REV4) { - for (i = 0; i < phba->cfg_fcp_io_channel; i++) { - pring = phba->sli4_hba.fcp_wq[i]->pring; + for (i = 0; i < phba->cfg_hdw_queue; i++) { + pring = phba->sli4_hba.hdwq[i].fcp_wq->pring; lpfc_sli_abort_iocb_ring(phba, pring); } } else { @@ -4006,12 +3992,13 @@ lpfc_sli_abort_nvme_rings(struct lpfc_hba *phba) struct lpfc_sli_ring *pring; uint32_t i; - if (phba->sli_rev < LPFC_SLI_REV4) + if ((phba->sli_rev < LPFC_SLI_REV4) || + !(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) return; /* Abort all IO on each NVME ring. */ - for (i = 0; i < phba->cfg_nvme_io_channel; i++) { - pring = phba->sli4_hba.nvme_wq[i]->pring; + for (i = 0; i < phba->cfg_hdw_queue; i++) { + pring = phba->sli4_hba.hdwq[i].nvme_wq->pring; lpfc_sli_abort_wqe_ring(phba, pring); } } @@ -4044,8 +4031,8 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) /* Look on all the FCP Rings for the iotag */ if (phba->sli_rev >= LPFC_SLI_REV4) { - for (i = 0; i < phba->cfg_fcp_io_channel; i++) { - pring = phba->sli4_hba.fcp_wq[i]->pring; + for (i = 0; i < phba->cfg_hdw_queue; i++) { + pring = phba->sli4_hba.hdwq[i].fcp_wq->pring; spin_lock_irq(&pring->ring_lock); /* Retrieve everything on txq */ @@ -4110,7 +4097,8 @@ lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba) uint32_t i; struct lpfc_iocbq *piocb, *next_iocb; - if (phba->sli_rev < LPFC_SLI_REV4) + if ((phba->sli_rev < LPFC_SLI_REV4) || + !(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) return; /* Hint to other driver operations that a flush is in progress. */ @@ -4122,8 +4110,8 @@ lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba) * a local driver reason code. This is a flush so no * abort exchange to FW. */ - for (i = 0; i < phba->cfg_nvme_io_channel; i++) { - pring = phba->sli4_hba.nvme_wq[i]->pring; + for (i = 0; i < phba->cfg_hdw_queue; i++) { + pring = phba->sli4_hba.hdwq[i].nvme_wq->pring; spin_lock_irq(&pring->ring_lock); list_for_each_entry_safe(piocb, next_iocb, @@ -5564,41 +5552,35 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) { int qidx; struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba; + struct lpfc_sli4_hdw_queue *qp; - sli4_hba->sli4_cq_release(sli4_hba->mbx_cq, LPFC_QUEUE_REARM); - sli4_hba->sli4_cq_release(sli4_hba->els_cq, LPFC_QUEUE_REARM); + sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM); + sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM); if (sli4_hba->nvmels_cq) - sli4_hba->sli4_cq_release(sli4_hba->nvmels_cq, - LPFC_QUEUE_REARM); - - if (sli4_hba->fcp_cq) - for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) - sli4_hba->sli4_cq_release(sli4_hba->fcp_cq[qidx], - LPFC_QUEUE_REARM); - - if (sli4_hba->nvme_cq) - for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) - sli4_hba->sli4_cq_release(sli4_hba->nvme_cq[qidx], - LPFC_QUEUE_REARM); + sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0, + LPFC_QUEUE_REARM); - if (phba->cfg_fof) - sli4_hba->sli4_cq_release(sli4_hba->oas_cq, LPFC_QUEUE_REARM); + qp = sli4_hba->hdwq; + if (sli4_hba->hdwq) { + for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { + sli4_hba->sli4_write_cq_db(phba, qp[qidx].fcp_cq, 0, + LPFC_QUEUE_REARM); + sli4_hba->sli4_write_cq_db(phba, qp[qidx].nvme_cq, 0, + LPFC_QUEUE_REARM); + } - if (sli4_hba->hba_eq) - for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) - sli4_hba->sli4_eq_release(sli4_hba->hba_eq[qidx], - LPFC_QUEUE_REARM); + for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) + sli4_hba->sli4_write_eq_db(phba, qp[qidx].hba_eq, + 0, LPFC_QUEUE_REARM); + } if (phba->nvmet_support) { for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) { - sli4_hba->sli4_cq_release( - sli4_hba->nvmet_cqset[qidx], + sli4_hba->sli4_write_cq_db(phba, + sli4_hba->nvmet_cqset[qidx], 0, LPFC_QUEUE_REARM); } } - - if (phba->cfg_fof) - sli4_hba->sli4_eq_release(sli4_hba->fof_eq, LPFC_QUEUE_REARM); } /** @@ -6027,11 +6009,8 @@ lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type) list_add_tail(&rsrc_blks->list, ext_blk_list); rsrc_start = rsrc_id; if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) { - phba->sli4_hba.scsi_xri_start = rsrc_start + + phba->sli4_hba.io_xri_start = rsrc_start + lpfc_sli4_get_iocb_cnt(phba); - phba->sli4_hba.nvme_xri_start = - phba->sli4_hba.scsi_xri_start + - phba->sli4_hba.scsi_xri_max; } while (rsrc_id < (rsrc_start + rsrc_size)) { @@ -7056,6 +7035,38 @@ lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba, return total_cnt; } +/** + * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls + * @phba: pointer to lpfc hba data structure. + * + * This routine walks the list of nvme buffers that have been allocated and + * repost them to the port by using SGL block post. This is needed after a + * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine + * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list + * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers. + * + * Returns: 0 = success, non-zero failure. + **/ +int +lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba) +{ + LIST_HEAD(post_nblist); + int num_posted, rc = 0; + + /* get all NVME buffers need to repost to a local list */ + lpfc_io_buf_flush(phba, &post_nblist); + + /* post the list of nvme buffer sgls to port if available */ + if (!list_empty(&post_nblist)) { + num_posted = lpfc_sli4_post_io_sgl_list( + phba, &post_nblist, phba->sli4_hba.io_xri_cnt); + /* failed to post any nvme buffer, return error */ + if (num_posted == 0) + rc = -EIO; + } + return rc; +} + void lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) { @@ -7144,7 +7155,7 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, int lpfc_sli4_hba_setup(struct lpfc_hba *phba) { - int rc, i, cnt; + int rc, i, cnt, len; LPFC_MBOXQ_t *mboxq; struct lpfc_mqe *mqe; uint8_t *vpd; @@ -7517,24 +7528,26 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) /* We need 1 iocbq for every SGL, for IO processing */ cnt += phba->sli4_hba.nvmet_xri_cnt; } else { - /* update host scsi xri-sgl sizes and mappings */ - rc = lpfc_sli4_scsi_sgl_update(phba); + /* update host common xri-sgl sizes and mappings */ + rc = lpfc_sli4_io_sgl_update(phba); if (unlikely(rc)) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "6309 Failed to update scsi-sgl size " + "6082 Failed to update nvme-sgl size " "and mapping: %d\n", rc); goto out_destroy_queue; } - /* update host nvme xri-sgl sizes and mappings */ - rc = lpfc_sli4_nvme_sgl_update(phba); + /* register the allocated common sgl pool to the port */ + rc = lpfc_sli4_repost_io_sgl_list(phba); if (unlikely(rc)) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "6082 Failed to update nvme-sgl size " - "and mapping: %d\n", rc); + "6116 Error %d during nvme sgl post " + "operation\n", rc); + /* Some NVME buffers were moved to abort nvme list */ + /* A pci function reset will repost them */ + rc = -ENODEV; goto out_destroy_queue; } - cnt = phba->cfg_iocb_cnt * 1024; } @@ -7571,36 +7584,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) } } - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { - /* register the allocated scsi sgl pool to the port */ - rc = lpfc_sli4_repost_scsi_sgl_list(phba); - if (unlikely(rc)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "0383 Error %d during scsi sgl post " - "operation\n", rc); - /* Some Scsi buffers were moved to abort scsi list */ - /* A pci function reset will repost them */ - rc = -ENODEV; - goto out_destroy_queue; - } - } - - if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && - (phba->nvmet_support == 0)) { - - /* register the allocated nvme sgl pool to the port */ - rc = lpfc_repost_nvme_sgl_list(phba); - if (unlikely(rc)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "6116 Error %d during nvme sgl post " - "operation\n", rc); - /* Some NVME buffers were moved to abort nvme list */ - /* A pci function reset will repost them */ - rc = -ENODEV; - goto out_destroy_queue; - } - } - /* Post the rpi header region to the device. */ rc = lpfc_sli4_post_all_rpi_hdrs(phba); if (unlikely(rc)) { @@ -7650,6 +7633,25 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) lpfc_sli_read_link_ste(phba); } + /* Don't post more new bufs if repost already recovered + * the nvme sgls. + */ + if (phba->nvmet_support == 0) { + if (phba->sli4_hba.io_xri_cnt == 0) { + len = lpfc_new_io_buf( + phba, phba->sli4_hba.io_xri_max); + if (len == 0) { + rc = -ENOMEM; + goto out_unset_queue; + } + + if (phba->cfg_xri_rebalancing) + lpfc_create_multixri_pools(phba); + } + } else { + phba->cfg_xri_rebalancing = 0; + } + /* Arm the CQs and then EQs on device */ lpfc_sli4_arm_cqeq_intr(phba); @@ -7678,6 +7680,11 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) phba->hb_outstanding = 0; phba->last_completion_time = jiffies; + /* start eq_delay heartbeat */ + if (phba->cfg_auto_imax) + queue_delayed_work(phba->wq, &phba->eq_delay_work, + msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); + /* Start error attention (ERATT) polling timer */ mod_timer(&phba->eratt_poll, jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); @@ -7729,18 +7736,21 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, "3104 Adapter failed to issue " "DOWN_LINK mbox cmd, rc:x%x\n", rc); - goto out_unset_queue; + goto out_io_buff_free; } } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { /* don't perform init_link on SLI4 FC port loopback test */ if (!(phba->link_flag & LS_LOOPBACK_MODE)) { rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); if (rc) - goto out_unset_queue; + goto out_io_buff_free; } } mempool_free(mboxq, phba->mbox_mem_pool); return rc; +out_io_buff_free: + /* Free allocated IO Buffers */ + lpfc_io_free(phba); out_unset_queue: /* Unset all the queues set up in this routine when error out */ lpfc_sli4_queue_unset(phba); @@ -7846,7 +7856,6 @@ lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba) struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba; uint32_t eqidx; struct lpfc_queue *fpeq = NULL; - struct lpfc_eqe *eqe; bool mbox_pending; if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) @@ -7854,11 +7863,11 @@ lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba) /* Find the eq associated with the mcq */ - if (sli4_hba->hba_eq) - for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++) - if (sli4_hba->hba_eq[eqidx]->queue_id == + if (sli4_hba->hdwq) + for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) + if (sli4_hba->hdwq[eqidx].hba_eq->queue_id == sli4_hba->mbx_cq->assoc_qid) { - fpeq = sli4_hba->hba_eq[eqidx]; + fpeq = sli4_hba->hdwq[eqidx].hba_eq; break; } if (!fpeq) @@ -7880,14 +7889,11 @@ lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba) */ if (mbox_pending) - while ((eqe = lpfc_sli4_eq_get(fpeq))) { - lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx); - fpeq->EQ_processed++; - } - - /* Always clear and re-arm the EQ */ - - sli4_hba->sli4_eq_release(fpeq, LPFC_QUEUE_REARM); + /* process and rearm the EQ */ + lpfc_sli4_process_eq(phba, fpeq); + else + /* Always clear and re-arm the EQ */ + sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM); return mbox_pending; @@ -8557,7 +8563,6 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); if (rc) goto exit; - /* * Initialize the bootstrap memory region to avoid stale data areas * in the mailbox post. Then copy the caller's mailbox contents to @@ -9476,7 +9481,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0); if (phba->fcp_embed_io) { - struct lpfc_scsi_buf *lpfc_cmd; + struct lpfc_io_buf *lpfc_cmd; struct sli4_sge *sgl; struct fcp_cmnd *fcp_cmnd; uint32_t *ptr; @@ -9484,7 +9489,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, /* 128 byte wqe support here */ lpfc_cmd = iocbq->context1; - sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; + sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; fcp_cmnd = lpfc_cmd->fcp_cmnd; /* Word 0-2 - FCP_CMND */ @@ -9540,7 +9545,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0); if (phba->fcp_embed_io) { - struct lpfc_scsi_buf *lpfc_cmd; + struct lpfc_io_buf *lpfc_cmd; struct sli4_sge *sgl; struct fcp_cmnd *fcp_cmnd; uint32_t *ptr; @@ -9548,7 +9553,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, /* 128 byte wqe support here */ lpfc_cmd = iocbq->context1; - sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; + sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; fcp_cmnd = lpfc_cmd->fcp_cmnd; /* Word 0-2 - FCP_CMND */ @@ -9597,7 +9602,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, /* Note, word 10 is already initialized to 0 */ if (phba->fcp_embed_io) { - struct lpfc_scsi_buf *lpfc_cmd; + struct lpfc_io_buf *lpfc_cmd; struct sli4_sge *sgl; struct fcp_cmnd *fcp_cmnd; uint32_t *ptr; @@ -9605,7 +9610,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, /* 128 byte wqe support here */ lpfc_cmd = iocbq->context1; - sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; + sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; fcp_cmnd = lpfc_cmd->fcp_cmnd; /* Word 0-2 - FCP_CMND */ @@ -9864,10 +9869,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, /* Get the WQ */ if ((piocb->iocb_flag & LPFC_IO_FCP) || (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { - if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS))) - wq = phba->sli4_hba.fcp_wq[piocb->hba_wqidx]; - else - wq = phba->sli4_hba.oas_wq; + wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq; } else { wq = phba->sli4_hba.els_wq; } @@ -10001,29 +10003,20 @@ lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) struct lpfc_sli_ring * lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) { + struct lpfc_io_buf *lpfc_cmd; + if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) { - if (!(phba->cfg_fof) || - (!(piocb->iocb_flag & LPFC_IO_FOF))) { - if (unlikely(!phba->sli4_hba.fcp_wq)) - return NULL; - /* - * for abort iocb hba_wqidx should already - * be setup based on what work queue we used. - */ - if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { - piocb->hba_wqidx = - lpfc_sli4_scmd_to_wqidx_distr(phba, - piocb->context1); - piocb->hba_wqidx = piocb->hba_wqidx % - phba->cfg_fcp_io_channel; - } - return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring; - } else { - if (unlikely(!phba->sli4_hba.oas_wq)) - return NULL; - piocb->hba_wqidx = 0; - return phba->sli4_hba.oas_wq->pring; + if (unlikely(!phba->sli4_hba.hdwq)) + return NULL; + /* + * for abort iocb hba_wqidx should already + * be setup based on what work queue we used. + */ + if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { + lpfc_cmd = (struct lpfc_io_buf *)piocb->context1; + piocb->hba_wqidx = lpfc_cmd->hdwq_no; } + return phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq->pring; } else { if (unlikely(!phba->sli4_hba.els_wq)) return NULL; @@ -10049,12 +10042,9 @@ int lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, struct lpfc_iocbq *piocb, uint32_t flag) { - struct lpfc_hba_eq_hdl *hba_eq_hdl; struct lpfc_sli_ring *pring; - struct lpfc_queue *fpeq; - struct lpfc_eqe *eqe; unsigned long iflags; - int rc, idx; + int rc; if (phba->sli_rev == LPFC_SLI_REV4) { pring = lpfc_sli4_calc_ring(phba, piocb); @@ -10064,34 +10054,6 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, spin_lock_irqsave(&pring->ring_lock, iflags); rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); spin_unlock_irqrestore(&pring->ring_lock, iflags); - - if (lpfc_fcp_look_ahead && (piocb->iocb_flag & LPFC_IO_FCP)) { - idx = piocb->hba_wqidx; - hba_eq_hdl = &phba->sli4_hba.hba_eq_hdl[idx]; - - if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) { - - /* Get associated EQ with this index */ - fpeq = phba->sli4_hba.hba_eq[idx]; - - /* Turn off interrupts from this EQ */ - phba->sli4_hba.sli4_eq_clr_intr(fpeq); - - /* - * Process all the events on FCP EQ - */ - while ((eqe = lpfc_sli4_eq_get(fpeq))) { - lpfc_sli4_hba_handle_eqe(phba, - eqe, idx); - fpeq->EQ_processed++; - } - - /* Always clear and re-arm the EQ */ - phba->sli4_hba.sli4_eq_release(fpeq, - LPFC_QUEUE_REARM); - } - atomic_inc(&hba_eq_hdl->hba_eq_in_use); - } } else { /* For now, SLI2/3 will still use hbalock */ spin_lock_irqsave(&phba->hbalock, iflags); @@ -10506,19 +10468,11 @@ lpfc_sli4_queue_init(struct lpfc_hba *phba) INIT_LIST_HEAD(&psli->mboxq); INIT_LIST_HEAD(&psli->mboxq_cmpl); /* Initialize list headers for txq and txcmplq as double linked lists */ - for (i = 0; i < phba->cfg_fcp_io_channel; i++) { - pring = phba->sli4_hba.fcp_wq[i]->pring; - pring->flag = 0; - pring->ringno = LPFC_FCP_RING; - INIT_LIST_HEAD(&pring->txq); - INIT_LIST_HEAD(&pring->txcmplq); - INIT_LIST_HEAD(&pring->iocb_continueq); - spin_lock_init(&pring->ring_lock); - } - for (i = 0; i < phba->cfg_nvme_io_channel; i++) { - pring = phba->sli4_hba.nvme_wq[i]->pring; + for (i = 0; i < phba->cfg_hdw_queue; i++) { + pring = phba->sli4_hba.hdwq[i].fcp_wq->pring; pring->flag = 0; pring->ringno = LPFC_FCP_RING; + pring->txcmplq_cnt = 0; INIT_LIST_HEAD(&pring->txq); INIT_LIST_HEAD(&pring->txcmplq); INIT_LIST_HEAD(&pring->iocb_continueq); @@ -10527,25 +10481,27 @@ lpfc_sli4_queue_init(struct lpfc_hba *phba) pring = phba->sli4_hba.els_wq->pring; pring->flag = 0; pring->ringno = LPFC_ELS_RING; + pring->txcmplq_cnt = 0; INIT_LIST_HEAD(&pring->txq); INIT_LIST_HEAD(&pring->txcmplq); INIT_LIST_HEAD(&pring->iocb_continueq); spin_lock_init(&pring->ring_lock); - if (phba->cfg_nvme_io_channel) { + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + for (i = 0; i < phba->cfg_hdw_queue; i++) { + pring = phba->sli4_hba.hdwq[i].nvme_wq->pring; + pring->flag = 0; + pring->ringno = LPFC_FCP_RING; + pring->txcmplq_cnt = 0; + INIT_LIST_HEAD(&pring->txq); + INIT_LIST_HEAD(&pring->txcmplq); + INIT_LIST_HEAD(&pring->iocb_continueq); + spin_lock_init(&pring->ring_lock); + } pring = phba->sli4_hba.nvmels_wq->pring; pring->flag = 0; pring->ringno = LPFC_ELS_RING; - INIT_LIST_HEAD(&pring->txq); - INIT_LIST_HEAD(&pring->txcmplq); - INIT_LIST_HEAD(&pring->iocb_continueq); - spin_lock_init(&pring->ring_lock); - } - - if (phba->cfg_fof) { - pring = phba->sli4_hba.oas_wq->pring; - pring->flag = 0; - pring->ringno = LPFC_FCP_RING; + pring->txcmplq_cnt = 0; INIT_LIST_HEAD(&pring->txq); INIT_LIST_HEAD(&pring->txcmplq); INIT_LIST_HEAD(&pring->iocb_continueq); @@ -11327,6 +11283,7 @@ lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *abtsiocbp; union lpfc_wqe128 *abts_wqe; int retval; + int idx = cmdiocb->hba_wqidx; /* * There are certain command types we don't want to abort. And we @@ -11382,7 +11339,8 @@ lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, abtsiocbp->iocb_flag |= LPFC_IO_NVME; abtsiocbp->vport = vport; abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl; - retval = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abtsiocbp); + retval = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[idx], + abtsiocbp); if (retval) { lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, "6147 Failed abts issue_wqe with status x%x " @@ -11457,7 +11415,7 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd ctx_cmd) { - struct lpfc_scsi_buf *lpfc_cmd; + struct lpfc_io_buf *lpfc_cmd; int rc = 1; if (iocbq->vport != vport) @@ -11467,7 +11425,7 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) return rc; - lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); + lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); if (lpfc_cmd->pCmd == NULL) return rc; @@ -11694,14 +11652,14 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd) { struct lpfc_hba *phba = vport->phba; - struct lpfc_scsi_buf *lpfc_cmd; + struct lpfc_io_buf *lpfc_cmd; struct lpfc_iocbq *abtsiocbq; struct lpfc_nodelist *ndlp; struct lpfc_iocbq *iocbq; IOCB_t *icmd; int sum, i, ret_val; unsigned long iflags; - struct lpfc_sli_ring *pring_s4; + struct lpfc_sli_ring *pring_s4 = NULL; spin_lock_irqsave(&phba->hbalock, iflags); @@ -11719,17 +11677,46 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, cmd) != 0) continue; + /* Guard against IO completion being called at same time */ + lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); + spin_lock(&lpfc_cmd->buf_lock); + + if (!lpfc_cmd->pCmd) { + spin_unlock(&lpfc_cmd->buf_lock); + continue; + } + + if (phba->sli_rev == LPFC_SLI_REV4) { + pring_s4 = + phba->sli4_hba.hdwq[iocbq->hba_wqidx].fcp_wq->pring; + if (!pring_s4) { + spin_unlock(&lpfc_cmd->buf_lock); + continue; + } + /* Note: both hbalock and ring_lock must be set here */ + spin_lock(&pring_s4->ring_lock); + } + /* * If the iocbq is already being aborted, don't take a second * action, but do count it. */ - if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) + if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) || + !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) { + if (phba->sli_rev == LPFC_SLI_REV4) + spin_unlock(&pring_s4->ring_lock); + spin_unlock(&lpfc_cmd->buf_lock); continue; + } /* issue ABTS for this IOCB based on iotag */ abtsiocbq = __lpfc_sli_get_iocbq(phba); - if (abtsiocbq == NULL) + if (!abtsiocbq) { + if (phba->sli_rev == LPFC_SLI_REV4) + spin_unlock(&pring_s4->ring_lock); + spin_unlock(&lpfc_cmd->buf_lock); continue; + } icmd = &iocbq->iocb; abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; @@ -11750,7 +11737,6 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, if (iocbq->iocb_flag & LPFC_IO_FOF) abtsiocbq->iocb_flag |= LPFC_IO_FOF; - lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); ndlp = lpfc_cmd->rdata->pnode; if (lpfc_is_link_up(phba) && @@ -11769,11 +11755,6 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; if (phba->sli_rev == LPFC_SLI_REV4) { - pring_s4 = lpfc_sli4_calc_ring(phba, abtsiocbq); - if (!pring_s4) - continue; - /* Note: both hbalock and ring_lock must be set here */ - spin_lock(&pring_s4->ring_lock); ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, abtsiocbq, 0); spin_unlock(&pring_s4->ring_lock); @@ -11782,6 +11763,7 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, abtsiocbq, 0); } + spin_unlock(&lpfc_cmd->buf_lock); if (ret_val == IOCB_ERROR) __lpfc_sli_release_iocbq(phba, abtsiocbq); @@ -11816,7 +11798,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, { wait_queue_head_t *pdone_q; unsigned long iflags; - struct lpfc_scsi_buf *lpfc_cmd; + struct lpfc_io_buf *lpfc_cmd; spin_lock_irqsave(&phba->hbalock, iflags); if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) { @@ -11845,7 +11827,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, /* Set the exchange busy flag for task management commands */ if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) && !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) { - lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf, + lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf, cur_iocbq); lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY; } @@ -12919,51 +12901,22 @@ lpfc_sli_intr_handler(int irq, void *dev_id) } /* lpfc_sli_intr_handler */ /** - * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event + * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event * @phba: pointer to lpfc hba data structure. * * This routine is invoked by the worker thread to process all the pending - * SLI4 FCP abort XRI events. + * SLI4 els abort xri events. **/ -void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba) +void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) { struct lpfc_cq_event *cq_event; - /* First, declare the fcp xri abort event has been handled */ + /* First, declare the els xri abort event has been handled */ spin_lock_irq(&phba->hbalock); - phba->hba_flag &= ~FCP_XRI_ABORT_EVENT; + phba->hba_flag &= ~ELS_XRI_ABORT_EVENT; spin_unlock_irq(&phba->hbalock); - /* Now, handle all the fcp xri abort events */ - while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) { - /* Get the first event from the head of the event queue */ - spin_lock_irq(&phba->hbalock); - list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, - cq_event, struct lpfc_cq_event, list); - spin_unlock_irq(&phba->hbalock); - /* Notify aborted XRI for FCP work queue */ - lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri); - /* Free the event processed back to the free pool */ - lpfc_sli4_cq_event_release(phba, cq_event); - } -} - -/** - * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked by the worker thread to process all the pending - * SLI4 els abort xri events. - **/ -void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) -{ - struct lpfc_cq_event *cq_event; - - /* First, declare the els xri abort event has been handled */ - spin_lock_irq(&phba->hbalock); - phba->hba_flag &= ~ELS_XRI_ABORT_EVENT; - spin_unlock_irq(&phba->hbalock); - /* Now, handle all the els xri abort events */ - while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) { + /* Now, handle all the els xri abort events */ + while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) { /* Get the first event from the head of the event queue */ spin_lock_irq(&phba->hbalock); list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue, @@ -13320,11 +13273,14 @@ out_no_mqe_complete: * Return: true if work posted to worker thread, otherwise false. **/ static bool -lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) +lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, + struct lpfc_cqe *cqe) { struct lpfc_mcqe mcqe; bool workposted; + cq->CQ_mbox++; + /* Copy the mailbox MCQE and convert endian order as needed */ lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe)); @@ -13443,17 +13399,8 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, switch (cq->subtype) { case LPFC_FCP: - cq_event = lpfc_cq_event_setup( - phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted)); - if (!cq_event) - return false; - spin_lock_irqsave(&phba->hbalock, iflags); - list_add_tail(&cq_event->list, - &phba->sli4_hba.sp_fcp_xri_aborted_work_queue); - /* Set the fcp xri abort event flag */ - phba->hba_flag |= FCP_XRI_ABORT_EVENT; - spin_unlock_irqrestore(&phba->hbalock, iflags); - workposted = true; + lpfc_sli4_fcp_xri_aborted(phba, wcqe, cq->hdwq); + workposted = false; break; case LPFC_NVME_LS: /* NVME LS uses ELS resources */ case LPFC_ELS: @@ -13461,6 +13408,7 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted)); if (!cq_event) return false; + cq_event->hdwq = cq->hdwq; spin_lock_irqsave(&phba->hbalock, iflags); list_add_tail(&cq_event->list, &phba->sli4_hba.sp_els_xri_aborted_work_queue); @@ -13474,7 +13422,7 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, if (phba->nvmet_support) lpfc_sli4_nvmet_xri_aborted(phba, wcqe); else - lpfc_sli4_nvme_xri_aborted(phba, wcqe); + lpfc_sli4_nvme_xri_aborted(phba, wcqe, cq->hdwq); workposted = false; break; @@ -13592,7 +13540,7 @@ out: * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry * @phba: Pointer to HBA context object. * @cq: Pointer to the completion queue. - * @wcqe: Pointer to a completion queue entry. + * @cqe: Pointer to a completion queue entry. * * This routine process a slow-path work-queue or receive queue completion queue * entry. @@ -13684,7 +13632,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, /* Save EQ associated with this CQ */ cq->assoc_qp = speq; - if (!queue_work(phba->wq, &cq->spwork)) + if (!queue_work_on(cq->chann, phba->wq, &cq->spwork)) lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0390 Cannot schedule soft IRQ " "for CQ eqcqid=%d, cqid=%d on CPU %d\n", @@ -13692,60 +13640,129 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, } /** - * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry + * __lpfc_sli4_process_cq - Process elements of a CQ * @phba: Pointer to HBA context object. + * @cq: Pointer to CQ to be processed + * @handler: Routine to process each cqe + * @delay: Pointer to usdelay to set in case of rescheduling of the handler * - * This routine process a event queue entry from the slow-path event queue. - * It will check the MajorCode and MinorCode to determine this is for a - * completion event on a completion queue, if not, an error shall be logged - * and just return. Otherwise, it will get to the corresponding completion - * queue and process all the entries on that completion queue, rearm the - * completion queue, and then return. + * This routine processes completion queue entries in a CQ. While a valid + * queue element is found, the handler is called. During processing checks + * are made for periodic doorbell writes to let the hardware know of + * element consumption. + * + * If the max limit on cqes to process is hit, or there are no more valid + * entries, the loop stops. If we processed a sufficient number of elements, + * meaning there is sufficient load, rather than rearming and generating + * another interrupt, a cq rescheduling delay will be set. A delay of 0 + * indicates no rescheduling. * + * Returns True if work scheduled, False otherwise. **/ -static void -lpfc_sli4_sp_process_cq(struct work_struct *work) +static bool +__lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq, + bool (*handler)(struct lpfc_hba *, struct lpfc_queue *, + struct lpfc_cqe *), unsigned long *delay) { - struct lpfc_queue *cq = - container_of(work, struct lpfc_queue, spwork); - struct lpfc_hba *phba = cq->phba; struct lpfc_cqe *cqe; bool workposted = false; - int ccount = 0; + int count = 0, consumed = 0; + bool arm = true; + + /* default - no reschedule */ + *delay = 0; + + if (cmpxchg(&cq->queue_claimed, 0, 1) != 0) + goto rearm_and_exit; /* Process all the entries to the CQ */ + cqe = lpfc_sli4_cq_get(cq); + while (cqe) { +#if defined(CONFIG_SCSI_LPFC_DEBUG_FS) && defined(BUILD_NVME) + if (phba->ktime_on) + cq->isr_timestamp = ktime_get_ns(); + else + cq->isr_timestamp = 0; +#endif + workposted |= handler(phba, cq, cqe); + __lpfc_sli4_consume_cqe(phba, cq, cqe); + + consumed++; + if (!(++count % cq->max_proc_limit)) + break; + + if (!(count % cq->notify_interval)) { + phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed, + LPFC_QUEUE_NOARM); + consumed = 0; + } + + cqe = lpfc_sli4_cq_get(cq); + } + if (count >= phba->cfg_cq_poll_threshold) { + *delay = 1; + arm = false; + } + + /* Track the max number of CQEs processed in 1 EQ */ + if (count > cq->CQ_max_cqe) + cq->CQ_max_cqe = count; + + cq->assoc_qp->EQ_cqe_cnt += count; + + /* Catch the no cq entry condition */ + if (unlikely(count == 0)) + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "0369 No entry from completion queue " + "qid=%d\n", cq->queue_id); + + cq->queue_claimed = 0; + +rearm_and_exit: + phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed, + arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM); + + return workposted; +} + +/** + * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry + * @cq: pointer to CQ to process + * + * This routine calls the cq processing routine with a handler specific + * to the type of queue bound to it. + * + * The CQ routine returns two values: the first is the calling status, + * which indicates whether work was queued to the background discovery + * thread. If true, the routine should wakeup the discovery thread; + * the second is the delay parameter. If non-zero, rather than rearming + * the CQ and yet another interrupt, the CQ handler should be queued so + * that it is processed in a subsequent polling action. The value of + * the delay indicates when to reschedule it. + **/ +static void +__lpfc_sli4_sp_process_cq(struct lpfc_queue *cq) +{ + struct lpfc_hba *phba = cq->phba; + unsigned long delay; + bool workposted = false; + + /* Process and rearm the CQ */ switch (cq->type) { case LPFC_MCQ: - while ((cqe = lpfc_sli4_cq_get(cq))) { - workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe); - if (!(++ccount % cq->entry_repost)) - break; - cq->CQ_mbox++; - } + workposted |= __lpfc_sli4_process_cq(phba, cq, + lpfc_sli4_sp_handle_mcqe, + &delay); break; case LPFC_WCQ: - while ((cqe = lpfc_sli4_cq_get(cq))) { - if (cq->subtype == LPFC_FCP || - cq->subtype == LPFC_NVME) { -#ifdef CONFIG_SCSI_LPFC_DEBUG_FS - if (phba->ktime_on) - cq->isr_timestamp = ktime_get_ns(); - else - cq->isr_timestamp = 0; -#endif - workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, - cqe); - } else { - workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, - cqe); - } - if (!(++ccount % cq->entry_repost)) - break; - } - - /* Track the max number of CQEs processed in 1 EQ */ - if (ccount > cq->CQ_max_cqe) - cq->CQ_max_cqe = ccount; + if (cq->subtype == LPFC_FCP || cq->subtype == LPFC_NVME) + workposted |= __lpfc_sli4_process_cq(phba, cq, + lpfc_sli4_fp_handle_cqe, + &delay); + else + workposted |= __lpfc_sli4_process_cq(phba, cq, + lpfc_sli4_sp_handle_cqe, + &delay); break; default: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, @@ -13754,20 +13771,50 @@ lpfc_sli4_sp_process_cq(struct work_struct *work) return; } - /* Catch the no cq entry condition, log an error */ - if (unlikely(ccount == 0)) - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0371 No entry from the CQ: identifier " - "(x%x), type (%d)\n", cq->queue_id, cq->type); - - /* In any case, flash and re-arm the RCQ */ - phba->sli4_hba.sli4_cq_release(cq, LPFC_QUEUE_REARM); + if (delay) { + if (!queue_delayed_work_on(cq->chann, phba->wq, + &cq->sched_spwork, delay)) + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0394 Cannot schedule soft IRQ " + "for cqid=%d on CPU %d\n", + cq->queue_id, cq->chann); + } /* wake up worker thread if there are works to be done */ if (workposted) lpfc_worker_wake_up(phba); } +/** + * lpfc_sli4_sp_process_cq - slow-path work handler when started by + * interrupt + * @work: pointer to work element + * + * translates from the work handler and calls the slow-path handler. + **/ +static void +lpfc_sli4_sp_process_cq(struct work_struct *work) +{ + struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork); + + __lpfc_sli4_sp_process_cq(cq); +} + +/** + * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer + * @work: pointer to work element + * + * translates from the work handler and calls the slow-path handler. + **/ +static void +lpfc_sli4_dly_sp_process_cq(struct work_struct *work) +{ + struct lpfc_queue *cq = container_of(to_delayed_work(work), + struct lpfc_queue, sched_spwork); + + __lpfc_sli4_sp_process_cq(cq); +} + /** * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry * @phba: Pointer to HBA context object. @@ -13999,13 +14046,16 @@ out: /** * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry + * @phba: adapter with cq * @cq: Pointer to the completion queue. * @eqe: Pointer to fast-path completion queue entry. * * This routine process a fast-path work queue completion entry from fast-path * event queue for FCP command response completion. + * + * Return: true if work posted to worker thread, otherwise false. **/ -static int +static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, struct lpfc_cqe *cqe) { @@ -14072,10 +14122,11 @@ lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, * completion queue, and then return. **/ static void -lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, - uint32_t qidx) +lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, + struct lpfc_eqe *eqe) { struct lpfc_queue *cq = NULL; + uint32_t qidx = eq->hdwq; uint16_t cqid, id; if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { @@ -14090,6 +14141,14 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, /* Get the reference to the corresponding CQ */ cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); + /* Use the fast lookup method first */ + if (cqid <= phba->sli4_hba.cq_max) { + cq = phba->sli4_hba.cq_lookup[cqid]; + if (cq) + goto work_cq; + } + + /* Next check for NVMET completion */ if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) { id = phba->sli4_hba.nvmet_cqset[0]->queue_id; if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) { @@ -14099,20 +14158,6 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, } } - if (phba->sli4_hba.nvme_cq_map && - (cqid == phba->sli4_hba.nvme_cq_map[qidx])) { - /* Process NVME / NVMET command completion */ - cq = phba->sli4_hba.nvme_cq[qidx]; - goto process_cq; - } - - if (phba->sli4_hba.fcp_cq_map && - (cqid == phba->sli4_hba.fcp_cq_map[qidx])) { - /* Process FCP command completion */ - cq = phba->sli4_hba.fcp_cq[qidx]; - goto process_cq; - } - if (phba->sli4_hba.nvmels_cq && (cqid == phba->sli4_hba.nvmels_cq->queue_id)) { /* Process NVME unsol rcv */ @@ -14121,7 +14166,8 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, /* Otherwise this is a Slow path event */ if (cq == NULL) { - lpfc_sli4_sp_handle_eqe(phba, eqe, phba->sli4_hba.hba_eq[qidx]); + lpfc_sli4_sp_handle_eqe(phba, eqe, + phba->sli4_hba.hdwq[qidx].hba_eq); return; } @@ -14134,10 +14180,8 @@ process_cq: return; } - /* Save EQ associated with this CQ */ - cq->assoc_qp = phba->sli4_hba.hba_eq[qidx]; - - if (!queue_work(phba->wq, &cq->irqwork)) +work_cq: + if (!queue_work_on(cq->chann, phba->wq, &cq->irqwork)) lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0363 Cannot schedule soft IRQ " "for CQ eqcqid=%d, cqid=%d on CPU %d\n", @@ -14145,219 +14189,73 @@ process_cq: } /** - * lpfc_sli4_hba_process_cq - Process a fast-path event queue entry - * @phba: Pointer to HBA context object. - * @eqe: Pointer to fast-path event queue entry. + * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry + * @cq: Pointer to CQ to be processed * - * This routine process a event queue entry from the fast-path event queue. - * It will check the MajorCode and MinorCode to determine this is for a - * completion event on a completion queue, if not, an error shall be logged - * and just return. Otherwise, it will get to the corresponding completion - * queue and process all the entries on the completion queue, rearm the - * completion queue, and then return. + * This routine calls the cq processing routine with the handler for + * fast path CQEs. + * + * The CQ routine returns two values: the first is the calling status, + * which indicates whether work was queued to the background discovery + * thread. If true, the routine should wakeup the discovery thread; + * the second is the delay parameter. If non-zero, rather than rearming + * the CQ and yet another interrupt, the CQ handler should be queued so + * that it is processed in a subsequent polling action. The value of + * the delay indicates when to reschedule it. **/ static void -lpfc_sli4_hba_process_cq(struct work_struct *work) +__lpfc_sli4_hba_process_cq(struct lpfc_queue *cq) { - struct lpfc_queue *cq = - container_of(work, struct lpfc_queue, irqwork); struct lpfc_hba *phba = cq->phba; - struct lpfc_cqe *cqe; + unsigned long delay; bool workposted = false; - int ccount = 0; - /* Process all the entries to the CQ */ - while ((cqe = lpfc_sli4_cq_get(cq))) { -#ifdef CONFIG_SCSI_LPFC_DEBUG_FS - if (phba->ktime_on) - cq->isr_timestamp = ktime_get_ns(); - else - cq->isr_timestamp = 0; -#endif - workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe); - if (!(++ccount % cq->entry_repost)) - break; - } - - /* Track the max number of CQEs processed in 1 EQ */ - if (ccount > cq->CQ_max_cqe) - cq->CQ_max_cqe = ccount; - cq->assoc_qp->EQ_cqe_cnt += ccount; - - /* Catch the no cq entry condition */ - if (unlikely(ccount == 0)) - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0369 No entry from fast-path completion " - "queue fcpcqid=%d\n", cq->queue_id); + /* process and rearm the CQ */ + workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe, + &delay); - /* In any case, flash and re-arm the CQ */ - phba->sli4_hba.sli4_cq_release(cq, LPFC_QUEUE_REARM); + if (delay) { + if (!queue_delayed_work_on(cq->chann, phba->wq, + &cq->sched_irqwork, delay)) + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0367 Cannot schedule soft IRQ " + "for cqid=%d on CPU %d\n", + cq->queue_id, cq->chann); + } /* wake up worker thread if there are works to be done */ if (workposted) lpfc_worker_wake_up(phba); } -static void -lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) -{ - struct lpfc_eqe *eqe; - - /* walk all the EQ entries and drop on the floor */ - while ((eqe = lpfc_sli4_eq_get(eq))) - ; - - /* Clear and re-arm the EQ */ - phba->sli4_hba.sli4_eq_release(eq, LPFC_QUEUE_REARM); -} - - /** - * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue - * entry - * @phba: Pointer to HBA context object. - * @eqe: Pointer to fast-path event queue entry. + * lpfc_sli4_hba_process_cq - fast-path work handler when started by + * interrupt + * @work: pointer to work element * - * This routine process a event queue entry from the Flash Optimized Fabric - * event queue. It will check the MajorCode and MinorCode to determine this - * is for a completion event on a completion queue, if not, an error shall be - * logged and just return. Otherwise, it will get to the corresponding - * completion queue and process all the entries on the completion queue, rearm - * the completion queue, and then return. + * translates from the work handler and calls the fast-path handler. **/ static void -lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) +lpfc_sli4_hba_process_cq(struct work_struct *work) { - struct lpfc_queue *cq; - uint16_t cqid; + struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork); - if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "9147 Not a valid completion " - "event: majorcode=x%x, minorcode=x%x\n", - bf_get_le32(lpfc_eqe_major_code, eqe), - bf_get_le32(lpfc_eqe_minor_code, eqe)); - return; - } - - /* Get the reference to the corresponding CQ */ - cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); - - /* Next check for OAS */ - cq = phba->sli4_hba.oas_cq; - if (unlikely(!cq)) { - if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "9148 OAS completion queue " - "does not exist\n"); - return; - } - - if (unlikely(cqid != cq->queue_id)) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "9149 Miss-matched fast-path compl " - "queue id: eqcqid=%d, fcpcqid=%d\n", - cqid, cq->queue_id); - return; - } - - /* Save EQ associated with this CQ */ - cq->assoc_qp = phba->sli4_hba.fof_eq; - - /* CQ work will be processed on CPU affinitized to this IRQ */ - if (!queue_work(phba->wq, &cq->irqwork)) - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0367 Cannot schedule soft IRQ " - "for CQ eqcqid=%d, cqid=%d on CPU %d\n", - cqid, cq->queue_id, smp_processor_id()); + __lpfc_sli4_hba_process_cq(cq); } /** - * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device - * @irq: Interrupt number. - * @dev_id: The device context pointer. + * lpfc_sli4_hba_process_cq - fast-path work handler when started by timer + * @work: pointer to work element * - * This function is directly called from the PCI layer as an interrupt - * service routine when device with SLI-4 interface spec is enabled with - * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric - * IOCB ring event in the HBA. However, when the device is enabled with either - * MSI or Pin-IRQ interrupt mode, this function is called as part of the - * device-level interrupt handler. When the PCI slot is in error recovery - * or the HBA is undergoing initialization, the interrupt handler will not - * process the interrupt. The Flash Optimized Fabric ring event are handled in - * the intrrupt context. This function is called without any lock held. - * It gets the hbalock to access and update SLI data structures. Note that, - * the EQ to CQ are one-to-one map such that the EQ index is - * equal to that of CQ index. - * - * This function returns IRQ_HANDLED when interrupt is handled else it - * returns IRQ_NONE. + * translates from the work handler and calls the fast-path handler. **/ -irqreturn_t -lpfc_sli4_fof_intr_handler(int irq, void *dev_id) +static void +lpfc_sli4_dly_hba_process_cq(struct work_struct *work) { - struct lpfc_hba *phba; - struct lpfc_hba_eq_hdl *hba_eq_hdl; - struct lpfc_queue *eq; - struct lpfc_eqe *eqe; - unsigned long iflag; - int ecount = 0; + struct lpfc_queue *cq = container_of(to_delayed_work(work), + struct lpfc_queue, sched_irqwork); - /* Get the driver's phba structure from the dev_id */ - hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id; - phba = hba_eq_hdl->phba; - - if (unlikely(!phba)) - return IRQ_NONE; - - /* Get to the EQ struct associated with this vector */ - eq = phba->sli4_hba.fof_eq; - if (unlikely(!eq)) - return IRQ_NONE; - - /* Check device state for handling interrupt */ - if (unlikely(lpfc_intr_state_check(phba))) { - /* Check again for link_state with lock held */ - spin_lock_irqsave(&phba->hbalock, iflag); - if (phba->link_state < LPFC_LINK_DOWN) - /* Flush, clear interrupt, and rearm the EQ */ - lpfc_sli4_eq_flush(phba, eq); - spin_unlock_irqrestore(&phba->hbalock, iflag); - return IRQ_NONE; - } - - /* - * Process all the event on FCP fast-path EQ - */ - while ((eqe = lpfc_sli4_eq_get(eq))) { - lpfc_sli4_fof_handle_eqe(phba, eqe); - if (!(++ecount % eq->entry_repost)) - break; - eq->EQ_processed++; - } - - /* Track the max number of EQEs processed in 1 intr */ - if (ecount > eq->EQ_max_eqe) - eq->EQ_max_eqe = ecount; - - - if (unlikely(ecount == 0)) { - eq->EQ_no_entry++; - - if (phba->intr_type == MSIX) - /* MSI-X treated interrupt served as no EQ share INT */ - lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, - "9145 MSI-X interrupt with no EQE\n"); - else { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "9146 ISR interrupt with no EQE\n"); - /* Non MSI-X treated on interrupt as EQ share INT */ - return IRQ_NONE; - } - } - /* Always clear and re-arm the fast-path EQ */ - phba->sli4_hba.sli4_eq_release(eq, LPFC_QUEUE_REARM); - return IRQ_HANDLED; + __lpfc_sli4_hba_process_cq(cq); } /** @@ -14392,10 +14290,11 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id) struct lpfc_hba *phba; struct lpfc_hba_eq_hdl *hba_eq_hdl; struct lpfc_queue *fpeq; - struct lpfc_eqe *eqe; unsigned long iflag; int ecount = 0; int hba_eqidx; + struct lpfc_eq_intr_info *eqi; + uint32_t icnt; /* Get the driver's phba structure from the dev_id */ hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id; @@ -14404,23 +14303,14 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id) if (unlikely(!phba)) return IRQ_NONE; - if (unlikely(!phba->sli4_hba.hba_eq)) + if (unlikely(!phba->sli4_hba.hdwq)) return IRQ_NONE; /* Get to the EQ struct associated with this vector */ - fpeq = phba->sli4_hba.hba_eq[hba_eqidx]; + fpeq = phba->sli4_hba.hdwq[hba_eqidx].hba_eq; if (unlikely(!fpeq)) return IRQ_NONE; - if (lpfc_fcp_look_ahead) { - if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) - phba->sli4_hba.sli4_eq_clr_intr(fpeq); - else { - atomic_inc(&hba_eq_hdl->hba_eq_in_use); - return IRQ_NONE; - } - } - /* Check device state for handling interrupt */ if (unlikely(lpfc_intr_state_check(phba))) { /* Check again for link_state with lock held */ @@ -14429,36 +14319,25 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id) /* Flush, clear interrupt, and rearm the EQ */ lpfc_sli4_eq_flush(phba, fpeq); spin_unlock_irqrestore(&phba->hbalock, iflag); - if (lpfc_fcp_look_ahead) - atomic_inc(&hba_eq_hdl->hba_eq_in_use); return IRQ_NONE; } - /* - * Process all the event on FCP fast-path EQ - */ - while ((eqe = lpfc_sli4_eq_get(fpeq))) { - lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx); - if (!(++ecount % fpeq->entry_repost)) - break; - fpeq->EQ_processed++; - } + eqi = phba->sli4_hba.eq_info; + icnt = this_cpu_inc_return(eqi->icnt); + fpeq->last_cpu = smp_processor_id(); - /* Track the max number of EQEs processed in 1 intr */ - if (ecount > fpeq->EQ_max_eqe) - fpeq->EQ_max_eqe = ecount; + if (icnt > LPFC_EQD_ISR_TRIGGER && + phba->cfg_irq_chann == 1 && + phba->cfg_auto_imax && + fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY && + phba->sli.sli_flag & LPFC_SLI_USE_EQDR) + lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY); - /* Always clear and re-arm the fast-path EQ */ - phba->sli4_hba.sli4_eq_release(fpeq, LPFC_QUEUE_REARM); + /* process and rearm the EQ */ + ecount = lpfc_sli4_process_eq(phba, fpeq); if (unlikely(ecount == 0)) { fpeq->EQ_no_entry++; - - if (lpfc_fcp_look_ahead) { - atomic_inc(&hba_eq_hdl->hba_eq_in_use); - return IRQ_NONE; - } - if (phba->intr_type == MSIX) /* MSI-X treated interrupt served as no EQ share INT */ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, @@ -14468,9 +14347,6 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id) return IRQ_NONE; } - if (lpfc_fcp_look_ahead) - atomic_inc(&hba_eq_hdl->hba_eq_in_use); - return IRQ_HANDLED; } /* lpfc_sli4_fp_intr_handler */ @@ -14508,20 +14384,13 @@ lpfc_sli4_intr_handler(int irq, void *dev_id) /* * Invoke fast-path host attention interrupt handling as appropriate. */ - for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) { + for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { hba_irq_rc = lpfc_sli4_hba_intr_handler(irq, &phba->sli4_hba.hba_eq_hdl[qidx]); if (hba_irq_rc == IRQ_HANDLED) hba_handled |= true; } - if (phba->cfg_fof) { - hba_irq_rc = lpfc_sli4_fof_intr_handler(irq, - &phba->sli4_hba.hba_eq_hdl[qidx]); - if (hba_irq_rc == IRQ_HANDLED) - hba_handled |= true; - } - return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE; } /* lpfc_sli4_intr_handler */ @@ -14553,6 +14422,9 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue) kfree(queue->rqbp); } + if (!list_empty(&queue->cpu_list)) + list_del(&queue->cpu_list); + if (!list_empty(&queue->wq_list)) list_del(&queue->wq_list); @@ -14601,6 +14473,7 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size, INIT_LIST_HEAD(&queue->wqfull_list); INIT_LIST_HEAD(&queue->page_list); INIT_LIST_HEAD(&queue->child_list); + INIT_LIST_HEAD(&queue->cpu_list); /* Set queue parameters now. If the system cannot provide memory * resources, the free routine needs to know what was allocated. @@ -14633,8 +14506,10 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size, } INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq); INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq); + INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq); + INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq); - /* entry_repost will be set during q creation */ + /* notify_interval will be set during q creation */ return queue; out_fail: @@ -14671,43 +14546,76 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) } /** - * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on FCP EQs - * @phba: HBA structure that indicates port to create a queue on. - * @startq: The starting FCP EQ to modify + * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs + * @phba: HBA structure that EQs are on. + * @startq: The starting EQ index to modify + * @numq: The number of EQs (consecutive indexes) to modify + * @usdelay: amount of delay * - * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA. - * The command allows up to LPFC_MAX_EQ_DELAY_EQID_CNT EQ ID's to be - * updated in one mailbox command. + * This function revises the EQ delay on 1 or more EQs. The EQ delay + * is set either by writing to a register (if supported by the SLI Port) + * or by mailbox command. The mailbox command allows several EQs to be + * updated at once. * - * The @phba struct is used to send mailbox command to HBA. The @startq - * is used to get the starting FCP EQ to change. - * This function is asynchronous and will wait for the mailbox - * command to finish before continuing. + * The @phba struct is used to send a mailbox command to HBA. The @startq + * is used to get the starting EQ index to change. The @numq value is + * used to specify how many consecutive EQ indexes, starting at EQ index, + * are to be changed. This function is asynchronous and will wait for any + * mailbox commands to finish before returning. * - * On success this function will return a zero. If unable to allocate enough - * memory this function will return -ENOMEM. If the queue create mailbox command - * fails this function will return -ENXIO. + * On success this function will return a zero. If unable to allocate + * enough memory this function will return -ENOMEM. If a mailbox command + * fails this function will return -ENXIO. Note: on ENXIO, some EQs may + * have had their delay multipler changed. **/ -int +void lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq, - uint32_t numq, uint32_t imax) + uint32_t numq, uint32_t usdelay) { struct lpfc_mbx_modify_eq_delay *eq_delay; LPFC_MBOXQ_t *mbox; struct lpfc_queue *eq; - int cnt, rc, length, status = 0; + int cnt = 0, rc, length; uint32_t shdr_status, shdr_add_status; - uint32_t result, val; + uint32_t dmult; int qidx; union lpfc_sli4_cfg_shdr *shdr; - uint16_t dmult; - if (startq >= phba->io_channel_irqs) - return 0; + if (startq >= phba->cfg_irq_chann) + return; + + if (usdelay > 0xFFFF) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME, + "6429 usdelay %d too large. Scaled down to " + "0xFFFF.\n", usdelay); + usdelay = 0xFFFF; + } + + /* set values by EQ_DELAY register if supported */ + if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) { + for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) { + eq = phba->sli4_hba.hdwq[qidx].hba_eq; + if (!eq) + continue; + + lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay); + + if (++cnt >= numq) + break; + } + + return; + } + + /* Otherwise, set values by mailbox cmd */ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mbox) - return -ENOMEM; + if (!mbox) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_FCP | LOG_NVME, + "6428 Failed allocating mailbox cmd buffer." + " EQ delay was not set.\n"); + return; + } length = (sizeof(struct lpfc_mbx_modify_eq_delay) - sizeof(struct lpfc_sli4_cfg_mhdr)); lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, @@ -14716,45 +14624,22 @@ lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq, eq_delay = &mbox->u.mqe.un.eq_delay; /* Calculate delay multiper from maximum interrupt per second */ - result = imax / phba->io_channel_irqs; - if (result > LPFC_DMULT_CONST || result == 0) - dmult = 0; - else - dmult = LPFC_DMULT_CONST/result - 1; + dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC; + if (dmult) + dmult--; if (dmult > LPFC_DMULT_MAX) dmult = LPFC_DMULT_MAX; - cnt = 0; - for (qidx = startq; qidx < phba->io_channel_irqs; qidx++) { - eq = phba->sli4_hba.hba_eq[qidx]; + for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) { + eq = phba->sli4_hba.hdwq[qidx].hba_eq; if (!eq) continue; - eq->q_mode = imax; + eq->q_mode = usdelay; eq_delay->u.request.eq[cnt].eq_id = eq->queue_id; eq_delay->u.request.eq[cnt].phase = 0; eq_delay->u.request.eq[cnt].delay_multi = dmult; - cnt++; - - /* q_mode is only used for auto_imax */ - if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) { - /* Use EQ Delay Register method for q_mode */ - /* Convert for EQ Delay register */ - val = phba->cfg_fcp_imax; - if (val) { - /* First, interrupts per sec per EQ */ - val = phba->cfg_fcp_imax / - phba->io_channel_irqs; - - /* us delay between each interrupt */ - val = LPFC_SEC_TO_USEC / val; - } - eq->q_mode = val; - } else { - eq->q_mode = imax; - } - - if (cnt >= numq) + if (++cnt >= numq) break; } eq_delay->u.request.num_eq = cnt; @@ -14772,10 +14657,9 @@ lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq, "2512 MODIFY_EQ_DELAY mailbox failed with " "status x%x add_status x%x, mbx status x%x\n", shdr_status, shdr_add_status, rc); - status = -ENXIO; } mempool_free(mbox, phba->mbox_mem_pool); - return status; + return; } /** @@ -14900,8 +14784,8 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax) if (eq->queue_id == 0xFFFF) status = -ENXIO; eq->host_index = 0; - eq->hba_index = 0; - eq->entry_repost = LPFC_EQ_REPOST; + eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL; + eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT; mempool_free(mbox, phba->mbox_mem_pool); return status; @@ -15039,10 +14923,13 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, cq->subtype = subtype; cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); cq->assoc_qid = eq->queue_id; + cq->assoc_qp = eq; cq->host_index = 0; - cq->hba_index = 0; - cq->entry_repost = LPFC_CQ_REPOST; + cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL; + cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count); + if (cq->queue_id > phba->sli4_hba.cq_max) + phba->sli4_hba.cq_max = cq->queue_id; out: mempool_free(mbox, phba->mbox_mem_pool); return status; @@ -15052,7 +14939,7 @@ out: * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ * @phba: HBA structure that indicates port to create a queue on. * @cqp: The queue structure array to use to create the completion queues. - * @eqp: The event queue array to bind these completion queues to. + * @hdwq: The hardware queue array with the EQ to bind completion queues to. * * This function creates a set of completion queue, s to support MRQ * as detailed in @cqp, on a port, @@ -15072,7 +14959,8 @@ out: **/ int lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, - struct lpfc_queue **eqp, uint32_t type, uint32_t subtype) + struct lpfc_sli4_hdw_queue *hdwq, uint32_t type, + uint32_t subtype) { struct lpfc_queue *cq; struct lpfc_queue *eq; @@ -15087,7 +14975,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, /* sanity check on queue memory */ numcq = phba->cfg_nvmet_mrq; - if (!cqp || !eqp || !numcq) + if (!cqp || !hdwq || !numcq) return -ENODEV; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); @@ -15114,7 +15002,7 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, for (idx = 0; idx < numcq; idx++) { cq = cqp[idx]; - eq = eqp[idx]; + eq = hdwq[idx].hba_eq; if (!cq || !eq) { status = -ENOMEM; goto out; @@ -15247,9 +15135,11 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, cq->type = type; cq->subtype = subtype; cq->assoc_qid = eq->queue_id; + cq->assoc_qp = eq; cq->host_index = 0; - cq->hba_index = 0; - cq->entry_repost = LPFC_CQ_REPOST; + cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL; + cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, + cq->entry_count); cq->chann = idx; rc = 0; @@ -15287,6 +15177,8 @@ lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, for (idx = 0; idx < numcq; idx++) { cq = cqp[idx]; cq->queue_id = rc + idx; + if (cq->queue_id > phba->sli4_hba.cq_max) + phba->sli4_hba.cq_max = cq->queue_id; } out: @@ -15499,7 +15391,6 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, mq->subtype = subtype; mq->host_index = 0; mq->hba_index = 0; - mq->entry_repost = LPFC_MQ_REPOST; /* link the mq onto the parent cq child list */ list_add_tail(&mq->list, &cq->child_list); @@ -15765,7 +15656,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, wq->subtype = subtype; wq->host_index = 0; wq->hba_index = 0; - wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL; + wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL; /* link the wq onto the parent cq child list */ list_add_tail(&wq->list, &cq->child_list); @@ -15959,7 +15850,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, hrq->subtype = subtype; hrq->host_index = 0; hrq->hba_index = 0; - hrq->entry_repost = LPFC_RQ_REPOST; + hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; /* now create the data queue */ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, @@ -16052,7 +15943,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, drq->subtype = subtype; drq->host_index = 0; drq->hba_index = 0; - drq->entry_repost = LPFC_RQ_REPOST; + drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; /* link the header and data RQs onto the parent cq child list */ list_add_tail(&hrq->list, &cq->child_list); @@ -16210,7 +16101,7 @@ lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, hrq->subtype = subtype; hrq->host_index = 0; hrq->hba_index = 0; - hrq->entry_repost = LPFC_RQ_REPOST; + hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; drq->db_format = LPFC_DB_RING_FORMAT; drq->db_regaddr = phba->sli4_hba.RQDBregaddr; @@ -16219,7 +16110,7 @@ lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, drq->subtype = subtype; drq->host_index = 0; drq->hba_index = 0; - drq->entry_repost = LPFC_RQ_REPOST; + drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; list_add_tail(&hrq->list, &cq->child_list); list_add_tail(&drq->list, &cq->child_list); @@ -16279,6 +16170,7 @@ lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) /* sanity check on queue memory */ if (!eq) return -ENODEV; + mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; @@ -16828,22 +16720,21 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba, } /** - * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware + * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware * @phba: pointer to lpfc hba data structure. - * @sblist: pointer to scsi buffer list. + * @nblist: pointer to nvme buffer list. * @count: number of scsi buffers on the list. * * This routine is invoked to post a block of @count scsi sgl pages from a - * SCSI buffer list @sblist to the HBA using non-embedded mailbox command. + * SCSI buffer list @nblist to the HBA using non-embedded mailbox command. * No Lock is held. * **/ -int -lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, - struct list_head *sblist, - int count) +static int +lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist, + int count) { - struct lpfc_scsi_buf *psb; + struct lpfc_io_buf *lpfc_ncmd; struct lpfc_mbx_post_uembed_sgl_page1 *sgl; struct sgl_page_pairs *sgl_pg_pairs; void *viraddr; @@ -16861,25 +16752,25 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); if (reqlen > SLI4_PAGE_SIZE) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, - "0217 Block sgl registration required DMA " + "6118 Block sgl registration required DMA " "size (%d) great than a page\n", reqlen); return -ENOMEM; } mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0283 Failed to allocate mbox cmd memory\n"); + "6119 Failed to allocate mbox cmd memory\n"); return -ENOMEM; } /* Allocate DMA memory and set up the non-embedded mailbox command */ alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, - LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, - LPFC_SLI4_MBX_NEMBED); + LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, + reqlen, LPFC_SLI4_MBX_NEMBED); if (alloclen < reqlen) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2561 Allocated DMA memory size (%d) is " + "6120 Allocated DMA memory size (%d) is " "less than the requested DMA memory " "size (%d)\n", alloclen, reqlen); lpfc_sli4_mbox_cmd_free(phba, mbox); @@ -16889,55 +16780,184 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, /* Get the first SGE entry from the non-embedded DMA memory */ viraddr = mbox->sge_array->addr[0]; - /* Set up the SGL pages in the non-embedded DMA pages */ - sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; - sgl_pg_pairs = &sgl->sgl_pg_pairs; + /* Set up the SGL pages in the non-embedded DMA pages */ + sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; + sgl_pg_pairs = &sgl->sgl_pg_pairs; + + pg_pairs = 0; + list_for_each_entry(lpfc_ncmd, nblist, list) { + /* Set up the sge entry */ + sgl_pg_pairs->sgl_pg0_addr_lo = + cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl)); + sgl_pg_pairs->sgl_pg0_addr_hi = + cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl)); + if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) + pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl + + SGL_PAGE_SIZE; + else + pdma_phys_bpl1 = 0; + sgl_pg_pairs->sgl_pg1_addr_lo = + cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); + sgl_pg_pairs->sgl_pg1_addr_hi = + cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); + /* Keep the first xritag on the list */ + if (pg_pairs == 0) + xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag; + sgl_pg_pairs++; + pg_pairs++; + } + bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); + bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); + /* Perform endian conversion if necessary */ + sgl->word0 = cpu_to_le32(sgl->word0); + + if (!phba->sli4_hba.intr_enable) { + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + } else { + mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); + rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); + } + shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (rc != MBX_TIMEOUT) + lpfc_sli4_mbox_cmd_free(phba, mbox); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "6125 POST_SGL_BLOCK mailbox command failed " + "status x%x add_status x%x mbx status x%x\n", + shdr_status, shdr_add_status, rc); + rc = -ENXIO; + } + return rc; +} + +/** + * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list + * @phba: pointer to lpfc hba data structure. + * @post_nblist: pointer to the nvme buffer list. + * + * This routine walks a list of nvme buffers that was passed in. It attempts + * to construct blocks of nvme buffer sgls which contains contiguous xris and + * uses the non-embedded SGL block post mailbox commands to post to the port. + * For single NVME buffer sgl with non-contiguous xri, if any, it shall use + * embedded SGL post mailbox command for posting. The @post_nblist passed in + * must be local list, thus no lock is needed when manipulate the list. + * + * Returns: 0 = failure, non-zero number of successfully posted buffers. + **/ +int +lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba, + struct list_head *post_nblist, int sb_count) +{ + struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next; + int status, sgl_size; + int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0; + dma_addr_t pdma_phys_sgl1; + int last_xritag = NO_XRI; + int cur_xritag; + LIST_HEAD(prep_nblist); + LIST_HEAD(blck_nblist); + LIST_HEAD(nvme_nblist); + + /* sanity check */ + if (sb_count <= 0) + return -EINVAL; + + sgl_size = phba->cfg_sg_dma_buf_size; + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) { + list_del_init(&lpfc_ncmd->list); + block_cnt++; + if ((last_xritag != NO_XRI) && + (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) { + /* a hole in xri block, form a sgl posting block */ + list_splice_init(&prep_nblist, &blck_nblist); + post_cnt = block_cnt - 1; + /* prepare list for next posting block */ + list_add_tail(&lpfc_ncmd->list, &prep_nblist); + block_cnt = 1; + } else { + /* prepare list for next posting block */ + list_add_tail(&lpfc_ncmd->list, &prep_nblist); + /* enough sgls for non-embed sgl mbox command */ + if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { + list_splice_init(&prep_nblist, &blck_nblist); + post_cnt = block_cnt; + block_cnt = 0; + } + } + num_posting++; + last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag; + + /* end of repost sgl list condition for NVME buffers */ + if (num_posting == sb_count) { + if (post_cnt == 0) { + /* last sgl posting block */ + list_splice_init(&prep_nblist, &blck_nblist); + post_cnt = block_cnt; + } else if (block_cnt == 1) { + /* last single sgl with non-contiguous xri */ + if (sgl_size > SGL_PAGE_SIZE) + pdma_phys_sgl1 = + lpfc_ncmd->dma_phys_sgl + + SGL_PAGE_SIZE; + else + pdma_phys_sgl1 = 0; + cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag; + status = lpfc_sli4_post_sgl( + phba, lpfc_ncmd->dma_phys_sgl, + pdma_phys_sgl1, cur_xritag); + if (status) { + /* Post error. Buffer unavailable. */ + lpfc_ncmd->flags |= + LPFC_SBUF_NOT_POSTED; + } else { + /* Post success. Bffer available. */ + lpfc_ncmd->flags &= + ~LPFC_SBUF_NOT_POSTED; + lpfc_ncmd->status = IOSTAT_SUCCESS; + num_posted++; + } + /* success, put on NVME buffer sgl list */ + list_add_tail(&lpfc_ncmd->list, &nvme_nblist); + } + } + + /* continue until a nembed page worth of sgls */ + if (post_cnt == 0) + continue; + + /* post block of NVME buffer list sgls */ + status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist, + post_cnt); + + /* don't reset xirtag due to hole in xri block */ + if (block_cnt == 0) + last_xritag = NO_XRI; - pg_pairs = 0; - list_for_each_entry(psb, sblist, list) { - /* Set up the sge entry */ - sgl_pg_pairs->sgl_pg0_addr_lo = - cpu_to_le32(putPaddrLow(psb->dma_phys_bpl)); - sgl_pg_pairs->sgl_pg0_addr_hi = - cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl)); - if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) - pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE; - else - pdma_phys_bpl1 = 0; - sgl_pg_pairs->sgl_pg1_addr_lo = - cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); - sgl_pg_pairs->sgl_pg1_addr_hi = - cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); - /* Keep the first xritag on the list */ - if (pg_pairs == 0) - xritag_start = psb->cur_iocbq.sli4_xritag; - sgl_pg_pairs++; - pg_pairs++; - } - bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); - bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); - /* Perform endian conversion if necessary */ - sgl->word0 = cpu_to_le32(sgl->word0); + /* reset NVME buffer post count for next round of posting */ + post_cnt = 0; - if (!phba->sli4_hba.intr_enable) - rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); - else { - mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); - rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); - } - shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; - shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); - shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); - if (rc != MBX_TIMEOUT) - lpfc_sli4_mbox_cmd_free(phba, mbox); - if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2564 POST_SGL_BLOCK mailbox command failed " - "status x%x add_status x%x mbx status x%x\n", - shdr_status, shdr_add_status, rc); - rc = -ENXIO; + /* put posted NVME buffer-sgl posted on NVME buffer sgl list */ + while (!list_empty(&blck_nblist)) { + list_remove_head(&blck_nblist, lpfc_ncmd, + struct lpfc_io_buf, list); + if (status) { + /* Post error. Mark buffer unavailable. */ + lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED; + } else { + /* Post success, Mark buffer available. */ + lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED; + lpfc_ncmd->status = IOSTAT_SUCCESS; + num_posted++; + } + list_add_tail(&lpfc_ncmd->list, &nvme_nblist); + } } - return rc; + /* Push NVME buffers with sgl posted to the available list */ + lpfc_io_buf_replenish(phba, &nvme_nblist); + + return num_posted; } /** @@ -19500,7 +19520,7 @@ lpfc_drain_txq(struct lpfc_hba *phba) if (phba->link_flag & LS_MDS_LOOPBACK) { /* MDS WQE are posted only to first WQ*/ - wq = phba->sli4_hba.fcp_wq[0]; + wq = phba->sli4_hba.hdwq[0].fcp_wq; if (unlikely(!wq)) return 0; pring = wq->pring; @@ -19708,7 +19728,7 @@ lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq, * @pwqe: Pointer to command WQE. **/ int -lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number, +lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, struct lpfc_iocbq *pwqe) { union lpfc_wqe128 *wqe = &pwqe->wqe; @@ -19722,7 +19742,8 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number, /* NVME_LS and NVME_LS ABTS requests. */ if (pwqe->iocb_flag & LPFC_IO_NVME_LS) { pring = phba->sli4_hba.nvmels_wq->pring; - spin_lock_irqsave(&pring->ring_lock, iflags); + lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, + qp, wq_access); sglq = __lpfc_sli_get_els_sglq(phba, pwqe); if (!sglq) { spin_unlock_irqrestore(&pring->ring_lock, iflags); @@ -19750,12 +19771,13 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number, /* NVME_FCREQ and NVME_ABTS requests */ if (pwqe->iocb_flag & LPFC_IO_NVME) { /* Get the IO distribution (hba_wqidx) for WQ assignment. */ - pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring; + wq = qp->nvme_wq; + pring = wq->pring; - spin_lock_irqsave(&pring->ring_lock, iflags); - wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]; - bf_set(wqe_cqid, &wqe->generic.wqe_com, - phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id); + bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map); + + lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, + qp, wq_access); ret = lpfc_sli4_wq_put(wq, wqe); if (ret) { spin_unlock_irqrestore(&pring->ring_lock, iflags); @@ -19769,9 +19791,9 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number, /* NVMET requests */ if (pwqe->iocb_flag & LPFC_IO_NVMET) { /* Get the IO distribution (hba_wqidx) for WQ assignment. */ - pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring; + wq = qp->nvme_wq; + pring = wq->pring; - spin_lock_irqsave(&pring->ring_lock, iflags); ctxp = pwqe->context2; sglq = ctxp->ctxbuf->sglq; if (pwqe->sli4_xritag == NO_XRI) { @@ -19780,9 +19802,10 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number, } bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, pwqe->sli4_xritag); - wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]; - bf_set(wqe_cqid, &wqe->generic.wqe_com, - phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id); + bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map); + + lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, + qp, wq_access); ret = lpfc_sli4_wq_put(wq, wqe); if (ret) { spin_unlock_irqrestore(&pring->ring_lock, iflags); @@ -19794,3 +19817,647 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number, } return WQE_ERROR; } + +#ifdef LPFC_MXP_STAT +/** + * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count + * @phba: pointer to lpfc hba data structure. + * @hwqid: belong to which HWQ. + * + * The purpose of this routine is to take a snapshot of pbl, pvt and busy count + * 15 seconds after a test case is running. + * + * The user should call lpfc_debugfs_multixripools_write before running a test + * case to clear stat_snapshot_taken. Then the user starts a test case. During + * test case is running, stat_snapshot_taken is incremented by 1 every time when + * this routine is called from heartbeat timer. When stat_snapshot_taken is + * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken. + **/ +void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid) +{ + struct lpfc_sli4_hdw_queue *qp; + struct lpfc_multixri_pool *multixri_pool; + struct lpfc_pvt_pool *pvt_pool; + struct lpfc_pbl_pool *pbl_pool; + u32 txcmplq_cnt; + + qp = &phba->sli4_hba.hdwq[hwqid]; + multixri_pool = qp->p_multixri_pool; + if (!multixri_pool) + return; + + if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) { + pvt_pool = &qp->p_multixri_pool->pvt_pool; + pbl_pool = &qp->p_multixri_pool->pbl_pool; + txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt; + if (qp->nvme_wq) + txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt; + + multixri_pool->stat_pbl_count = pbl_pool->count; + multixri_pool->stat_pvt_count = pvt_pool->count; + multixri_pool->stat_busy_count = txcmplq_cnt; + } + + multixri_pool->stat_snapshot_taken++; +} +#endif + +/** + * lpfc_adjust_pvt_pool_count - Adjust private pool count + * @phba: pointer to lpfc hba data structure. + * @hwqid: belong to which HWQ. + * + * This routine moves some XRIs from private to public pool when private pool + * is not busy. + **/ +void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid) +{ + struct lpfc_multixri_pool *multixri_pool; + u32 io_req_count; + u32 prev_io_req_count; + + multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool; + if (!multixri_pool) + return; + io_req_count = multixri_pool->io_req_count; + prev_io_req_count = multixri_pool->prev_io_req_count; + + if (prev_io_req_count != io_req_count) { + /* Private pool is busy */ + multixri_pool->prev_io_req_count = io_req_count; + } else { + /* Private pool is not busy. + * Move XRIs from private to public pool. + */ + lpfc_move_xri_pvt_to_pbl(phba, hwqid); + } +} + +/** + * lpfc_adjust_high_watermark - Adjust high watermark + * @phba: pointer to lpfc hba data structure. + * @hwqid: belong to which HWQ. + * + * This routine sets high watermark as number of outstanding XRIs, + * but make sure the new value is between xri_limit/2 and xri_limit. + **/ +void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid) +{ + u32 new_watermark; + u32 watermark_max; + u32 watermark_min; + u32 xri_limit; + u32 txcmplq_cnt; + u32 abts_io_bufs; + struct lpfc_multixri_pool *multixri_pool; + struct lpfc_sli4_hdw_queue *qp; + + qp = &phba->sli4_hba.hdwq[hwqid]; + multixri_pool = qp->p_multixri_pool; + if (!multixri_pool) + return; + xri_limit = multixri_pool->xri_limit; + + watermark_max = xri_limit; + watermark_min = xri_limit / 2; + + txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt; + abts_io_bufs = qp->abts_scsi_io_bufs; + if (qp->nvme_wq) { + txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt; + abts_io_bufs += qp->abts_nvme_io_bufs; + } + + new_watermark = txcmplq_cnt + abts_io_bufs; + new_watermark = min(watermark_max, new_watermark); + new_watermark = max(watermark_min, new_watermark); + multixri_pool->pvt_pool.high_watermark = new_watermark; + +#ifdef LPFC_MXP_STAT + multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm, + new_watermark); +#endif +} + +/** + * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool + * @phba: pointer to lpfc hba data structure. + * @hwqid: belong to which HWQ. + * + * This routine is called from hearbeat timer when pvt_pool is idle. + * All free XRIs are moved from private to public pool on hwqid with 2 steps. + * The first step moves (all - low_watermark) amount of XRIs. + * The second step moves the rest of XRIs. + **/ +void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid) +{ + struct lpfc_pbl_pool *pbl_pool; + struct lpfc_pvt_pool *pvt_pool; + struct lpfc_sli4_hdw_queue *qp; + struct lpfc_io_buf *lpfc_ncmd; + struct lpfc_io_buf *lpfc_ncmd_next; + unsigned long iflag; + struct list_head tmp_list; + u32 tmp_count; + + qp = &phba->sli4_hba.hdwq[hwqid]; + pbl_pool = &qp->p_multixri_pool->pbl_pool; + pvt_pool = &qp->p_multixri_pool->pvt_pool; + tmp_count = 0; + + lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool); + lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool); + + if (pvt_pool->count > pvt_pool->low_watermark) { + /* Step 1: move (all - low_watermark) from pvt_pool + * to pbl_pool + */ + + /* Move low watermark of bufs from pvt_pool to tmp_list */ + INIT_LIST_HEAD(&tmp_list); + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, + &pvt_pool->list, list) { + list_move_tail(&lpfc_ncmd->list, &tmp_list); + tmp_count++; + if (tmp_count >= pvt_pool->low_watermark) + break; + } + + /* Move all bufs from pvt_pool to pbl_pool */ + list_splice_init(&pvt_pool->list, &pbl_pool->list); + + /* Move all bufs from tmp_list to pvt_pool */ + list_splice(&tmp_list, &pvt_pool->list); + + pbl_pool->count += (pvt_pool->count - tmp_count); + pvt_pool->count = tmp_count; + } else { + /* Step 2: move the rest from pvt_pool to pbl_pool */ + list_splice_init(&pvt_pool->list, &pbl_pool->list); + pbl_pool->count += pvt_pool->count; + pvt_pool->count = 0; + } + + spin_unlock(&pvt_pool->lock); + spin_unlock_irqrestore(&pbl_pool->lock, iflag); +} + +/** + * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool + * @phba: pointer to lpfc hba data structure + * @pbl_pool: specified public free XRI pool + * @pvt_pool: specified private free XRI pool + * @count: number of XRIs to move + * + * This routine tries to move some free common bufs from the specified pbl_pool + * to the specified pvt_pool. It might move less than count XRIs if there's not + * enough in public pool. + * + * Return: + * true - if XRIs are successfully moved from the specified pbl_pool to the + * specified pvt_pool + * false - if the specified pbl_pool is empty or locked by someone else + **/ +static bool +_lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, + struct lpfc_pbl_pool *pbl_pool, + struct lpfc_pvt_pool *pvt_pool, u32 count) +{ + struct lpfc_io_buf *lpfc_ncmd; + struct lpfc_io_buf *lpfc_ncmd_next; + unsigned long iflag; + int ret; + + ret = spin_trylock_irqsave(&pbl_pool->lock, iflag); + if (ret) { + if (pbl_pool->count) { + /* Move a batch of XRIs from public to private pool */ + lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool); + list_for_each_entry_safe(lpfc_ncmd, + lpfc_ncmd_next, + &pbl_pool->list, + list) { + list_move_tail(&lpfc_ncmd->list, + &pvt_pool->list); + pvt_pool->count++; + pbl_pool->count--; + count--; + if (count == 0) + break; + } + + spin_unlock(&pvt_pool->lock); + spin_unlock_irqrestore(&pbl_pool->lock, iflag); + return true; + } + spin_unlock_irqrestore(&pbl_pool->lock, iflag); + } + + return false; +} + +/** + * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool + * @phba: pointer to lpfc hba data structure. + * @hwqid: belong to which HWQ. + * @count: number of XRIs to move + * + * This routine tries to find some free common bufs in one of public pools with + * Round Robin method. The search always starts from local hwqid, then the next + * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found, + * a batch of free common bufs are moved to private pool on hwqid. + * It might move less than count XRIs if there's not enough in public pool. + **/ +void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count) +{ + struct lpfc_multixri_pool *multixri_pool; + struct lpfc_multixri_pool *next_multixri_pool; + struct lpfc_pvt_pool *pvt_pool; + struct lpfc_pbl_pool *pbl_pool; + struct lpfc_sli4_hdw_queue *qp; + u32 next_hwqid; + u32 hwq_count; + int ret; + + qp = &phba->sli4_hba.hdwq[hwqid]; + multixri_pool = qp->p_multixri_pool; + pvt_pool = &multixri_pool->pvt_pool; + pbl_pool = &multixri_pool->pbl_pool; + + /* Check if local pbl_pool is available */ + ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count); + if (ret) { +#ifdef LPFC_MXP_STAT + multixri_pool->local_pbl_hit_count++; +#endif + return; + } + + hwq_count = phba->cfg_hdw_queue; + + /* Get the next hwqid which was found last time */ + next_hwqid = multixri_pool->rrb_next_hwqid; + + do { + /* Go to next hwq */ + next_hwqid = (next_hwqid + 1) % hwq_count; + + next_multixri_pool = + phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool; + pbl_pool = &next_multixri_pool->pbl_pool; + + /* Check if the public free xri pool is available */ + ret = _lpfc_move_xri_pbl_to_pvt( + phba, qp, pbl_pool, pvt_pool, count); + + /* Exit while-loop if success or all hwqid are checked */ + } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid); + + /* Starting point for the next time */ + multixri_pool->rrb_next_hwqid = next_hwqid; + + if (!ret) { + /* stats: all public pools are empty*/ + multixri_pool->pbl_empty_count++; + } + +#ifdef LPFC_MXP_STAT + if (ret) { + if (next_hwqid == hwqid) + multixri_pool->local_pbl_hit_count++; + else + multixri_pool->other_pbl_hit_count++; + } +#endif +} + +/** + * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark + * @phba: pointer to lpfc hba data structure. + * @qp: belong to which HWQ. + * + * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than + * low watermark. + **/ +void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid) +{ + struct lpfc_multixri_pool *multixri_pool; + struct lpfc_pvt_pool *pvt_pool; + + multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool; + pvt_pool = &multixri_pool->pvt_pool; + + if (pvt_pool->count < pvt_pool->low_watermark) + lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH); +} + +/** + * lpfc_release_io_buf - Return one IO buf back to free pool + * @phba: pointer to lpfc hba data structure. + * @lpfc_ncmd: IO buf to be returned. + * @qp: belong to which HWQ. + * + * This routine returns one IO buf back to free pool. If this is an urgent IO, + * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1, + * the IO buf is returned to pbl_pool or pvt_pool based on watermark and + * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to + * lpfc_io_buf_list_put. + **/ +void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd, + struct lpfc_sli4_hdw_queue *qp) +{ + unsigned long iflag; + struct lpfc_pbl_pool *pbl_pool; + struct lpfc_pvt_pool *pvt_pool; + struct lpfc_epd_pool *epd_pool; + u32 txcmplq_cnt; + u32 xri_owned; + u32 xri_limit; + u32 abts_io_bufs; + + /* MUST zero fields if buffer is reused by another protocol */ + lpfc_ncmd->nvmeCmd = NULL; + lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL; + lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL; + + if (phba->cfg_xri_rebalancing) { + if (lpfc_ncmd->expedite) { + /* Return to expedite pool */ + epd_pool = &phba->epd_pool; + spin_lock_irqsave(&epd_pool->lock, iflag); + list_add_tail(&lpfc_ncmd->list, &epd_pool->list); + epd_pool->count++; + spin_unlock_irqrestore(&epd_pool->lock, iflag); + return; + } + + /* Avoid invalid access if an IO sneaks in and is being rejected + * just _after_ xri pools are destroyed in lpfc_offline. + * Nothing much can be done at this point. + */ + if (!qp->p_multixri_pool) + return; + + pbl_pool = &qp->p_multixri_pool->pbl_pool; + pvt_pool = &qp->p_multixri_pool->pvt_pool; + + txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt; + abts_io_bufs = qp->abts_scsi_io_bufs; + if (qp->nvme_wq) { + txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt; + abts_io_bufs += qp->abts_nvme_io_bufs; + } + + xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs; + xri_limit = qp->p_multixri_pool->xri_limit; + +#ifdef LPFC_MXP_STAT + if (xri_owned <= xri_limit) + qp->p_multixri_pool->below_limit_count++; + else + qp->p_multixri_pool->above_limit_count++; +#endif + + /* XRI goes to either public or private free xri pool + * based on watermark and xri_limit + */ + if ((pvt_pool->count < pvt_pool->low_watermark) || + (xri_owned < xri_limit && + pvt_pool->count < pvt_pool->high_watermark)) { + lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, + qp, free_pvt_pool); + list_add_tail(&lpfc_ncmd->list, + &pvt_pool->list); + pvt_pool->count++; + spin_unlock_irqrestore(&pvt_pool->lock, iflag); + } else { + lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, + qp, free_pub_pool); + list_add_tail(&lpfc_ncmd->list, + &pbl_pool->list); + pbl_pool->count++; + spin_unlock_irqrestore(&pbl_pool->lock, iflag); + } + } else { + lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag, + qp, free_xri); + list_add_tail(&lpfc_ncmd->list, + &qp->lpfc_io_buf_list_put); + qp->put_io_bufs++; + spin_unlock_irqrestore(&qp->io_buf_list_put_lock, + iflag); + } +} + +/** + * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool + * @phba: pointer to lpfc hba data structure. + * @pvt_pool: pointer to private pool data structure. + * @ndlp: pointer to lpfc nodelist data structure. + * + * This routine tries to get one free IO buf from private pool. + * + * Return: + * pointer to one free IO buf - if private pool is not empty + * NULL - if private pool is empty + **/ +static struct lpfc_io_buf * +lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba, + struct lpfc_sli4_hdw_queue *qp, + struct lpfc_pvt_pool *pvt_pool, + struct lpfc_nodelist *ndlp) +{ + struct lpfc_io_buf *lpfc_ncmd; + struct lpfc_io_buf *lpfc_ncmd_next; + unsigned long iflag; + + lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool); + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, + &pvt_pool->list, list) { + if (lpfc_test_rrq_active( + phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag)) + continue; + list_del(&lpfc_ncmd->list); + pvt_pool->count--; + spin_unlock_irqrestore(&pvt_pool->lock, iflag); + return lpfc_ncmd; + } + spin_unlock_irqrestore(&pvt_pool->lock, iflag); + + return NULL; +} + +/** + * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool + * @phba: pointer to lpfc hba data structure. + * + * This routine tries to get one free IO buf from expedite pool. + * + * Return: + * pointer to one free IO buf - if expedite pool is not empty + * NULL - if expedite pool is empty + **/ +static struct lpfc_io_buf * +lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba) +{ + struct lpfc_io_buf *lpfc_ncmd; + struct lpfc_io_buf *lpfc_ncmd_next; + unsigned long iflag; + struct lpfc_epd_pool *epd_pool; + + epd_pool = &phba->epd_pool; + lpfc_ncmd = NULL; + + spin_lock_irqsave(&epd_pool->lock, iflag); + if (epd_pool->count > 0) { + list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, + &epd_pool->list, list) { + list_del(&lpfc_ncmd->list); + epd_pool->count--; + break; + } + } + spin_unlock_irqrestore(&epd_pool->lock, iflag); + + return lpfc_ncmd; +} + +/** + * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs + * @phba: pointer to lpfc hba data structure. + * @ndlp: pointer to lpfc nodelist data structure. + * @hwqid: belong to which HWQ + * @expedite: 1 means this request is urgent. + * + * This routine will do the following actions and then return a pointer to + * one free IO buf. + * + * 1. If private free xri count is empty, move some XRIs from public to + * private pool. + * 2. Get one XRI from private free xri pool. + * 3. If we fail to get one from pvt_pool and this is an expedite request, + * get one free xri from expedite pool. + * + * Note: ndlp is only used on SCSI side for RRQ testing. + * The caller should pass NULL for ndlp on NVME side. + * + * Return: + * pointer to one free IO buf - if private pool is not empty + * NULL - if private pool is empty + **/ +static struct lpfc_io_buf * +lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba, + struct lpfc_nodelist *ndlp, + int hwqid, int expedite) +{ + struct lpfc_sli4_hdw_queue *qp; + struct lpfc_multixri_pool *multixri_pool; + struct lpfc_pvt_pool *pvt_pool; + struct lpfc_io_buf *lpfc_ncmd; + + qp = &phba->sli4_hba.hdwq[hwqid]; + lpfc_ncmd = NULL; + multixri_pool = qp->p_multixri_pool; + pvt_pool = &multixri_pool->pvt_pool; + multixri_pool->io_req_count++; + + /* If pvt_pool is empty, move some XRIs from public to private pool */ + if (pvt_pool->count == 0) + lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH); + + /* Get one XRI from private free xri pool */ + lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp); + + if (lpfc_ncmd) { + lpfc_ncmd->hdwq = qp; + lpfc_ncmd->hdwq_no = hwqid; + } else if (expedite) { + /* If we fail to get one from pvt_pool and this is an expedite + * request, get one free xri from expedite pool. + */ + lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba); + } + + return lpfc_ncmd; +} + +static inline struct lpfc_io_buf * +lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx) +{ + struct lpfc_sli4_hdw_queue *qp; + struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next; + + qp = &phba->sli4_hba.hdwq[idx]; + list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, + &qp->lpfc_io_buf_list_get, list) { + if (lpfc_test_rrq_active(phba, ndlp, + lpfc_cmd->cur_iocbq.sli4_lxritag)) + continue; + + if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED) + continue; + + list_del_init(&lpfc_cmd->list); + qp->get_io_bufs--; + lpfc_cmd->hdwq = qp; + lpfc_cmd->hdwq_no = idx; + return lpfc_cmd; + } + return NULL; +} + +/** + * lpfc_get_io_buf - Get one IO buffer from free pool + * @phba: The HBA for which this call is being executed. + * @ndlp: pointer to lpfc nodelist data structure. + * @hwqid: belong to which HWQ + * @expedite: 1 means this request is urgent. + * + * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1, + * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes + * a IO buffer from head of @hdwq io_buf_list and returns to caller. + * + * Note: ndlp is only used on SCSI side for RRQ testing. + * The caller should pass NULL for ndlp on NVME side. + * + * Return codes: + * NULL - Error + * Pointer to lpfc_io_buf - Success + **/ +struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba, + struct lpfc_nodelist *ndlp, + u32 hwqid, int expedite) +{ + struct lpfc_sli4_hdw_queue *qp; + unsigned long iflag; + struct lpfc_io_buf *lpfc_cmd; + + qp = &phba->sli4_hba.hdwq[hwqid]; + lpfc_cmd = NULL; + + if (phba->cfg_xri_rebalancing) + lpfc_cmd = lpfc_get_io_buf_from_multixri_pools( + phba, ndlp, hwqid, expedite); + else { + lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag, + qp, alloc_xri_get); + if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite) + lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid); + if (!lpfc_cmd) { + lpfc_qp_spin_lock(&qp->io_buf_list_put_lock, + qp, alloc_xri_put); + list_splice(&qp->lpfc_io_buf_list_put, + &qp->lpfc_io_buf_list_get); + qp->get_io_bufs += qp->put_io_bufs; + INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); + qp->put_io_bufs = 0; + spin_unlock(&qp->io_buf_list_put_lock); + if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || + expedite) + lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid); + } + spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag); + } + + return lpfc_cmd; +} diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index 7abb395bb64a..7a1a761efdd6 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -20,6 +20,10 @@ * included with this package. * *******************************************************************/ +#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS) +#define CONFIG_SCSI_LPFC_DEBUG_FS +#endif + /* forward declaration for LPFC_IOCB_t's use */ struct lpfc_hba; struct lpfc_vport; @@ -33,6 +37,7 @@ typedef enum _lpfc_ctx_cmd { struct lpfc_cq_event { struct list_head list; + uint16_t hdwq; union { struct lpfc_mcqe mcqe_cmpl; struct lpfc_acqe_link acqe_link; @@ -351,3 +356,85 @@ struct lpfc_sli { #define LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO 300 /* Timeout for other flash-based outstanding mbox command (Seconds) */ #define LPFC_MBOX_TMO_FLASH_CMD 300 + +struct lpfc_io_buf { + /* Common fields */ + struct list_head list; + void *data; + dma_addr_t dma_handle; + dma_addr_t dma_phys_sgl; + struct sli4_sge *dma_sgl; + struct lpfc_iocbq cur_iocbq; + struct lpfc_sli4_hdw_queue *hdwq; + uint16_t hdwq_no; + uint16_t cpu; + + struct lpfc_nodelist *ndlp; + uint32_t timeout; + uint16_t flags; /* TBD convert exch_busy to flags */ +#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */ +#define LPFC_SBUF_BUMP_QDEPTH 0x2 /* bumped queue depth counter */ + /* External DIF device IO conversions */ +#define LPFC_SBUF_NORMAL_DIF 0x4 /* normal mode to insert/strip */ +#define LPFC_SBUF_PASS_DIF 0x8 /* insert/strip mode to passthru */ +#define LPFC_SBUF_NOT_POSTED 0x10 /* SGL failed post to FW. */ + uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */ + uint16_t status; /* From IOCB Word 7- ulpStatus */ + uint32_t result; /* From IOCB Word 4. */ + + uint32_t seg_cnt; /* Number of scatter-gather segments returned by + * dma_map_sg. The driver needs this for calls + * to dma_unmap_sg. + */ + unsigned long start_time; + spinlock_t buf_lock; /* lock used in case of simultaneous abort */ + bool expedite; /* this is an expedite io_buf */ + + union { + /* SCSI specific fields */ + struct { + struct scsi_cmnd *pCmd; + struct lpfc_rport_data *rdata; + uint32_t prot_seg_cnt; /* seg_cnt's counterpart for + * protection data + */ + + /* + * data and dma_handle are the kernel virtual and bus + * address of the dma-able buffer containing the + * fcp_cmd, fcp_rsp and a scatter gather bde list that + * supports the sg_tablesize value. + */ + struct fcp_cmnd *fcp_cmnd; + struct fcp_rsp *fcp_rsp; + + wait_queue_head_t *waitq; + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + /* Used to restore any changes to protection data for + * error injection + */ + void *prot_data_segment; + uint32_t prot_data; + uint32_t prot_data_type; +#define LPFC_INJERR_REFTAG 1 +#define LPFC_INJERR_APPTAG 2 +#define LPFC_INJERR_GUARD 3 +#endif + }; + + /* NVME specific fields */ + struct { + struct nvmefc_fcp_req *nvmeCmd; + uint16_t qidx; + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + uint64_t ts_cmd_start; + uint64_t ts_last_cmd; + uint64_t ts_cmd_wqput; + uint64_t ts_isr_cmpl; + uint64_t ts_data_nvme; +#endif + }; + }; +}; diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 6b2d2350e2c6..40c85091c805 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2009-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -20,6 +20,10 @@ * included with this package. * *******************************************************************/ +#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS) +#define CONFIG_SCSI_LPFC_DEBUG_FS +#endif + #define LPFC_ACTIVE_MBOX_WAIT_CNT 100 #define LPFC_XRI_EXCH_BUSY_WAIT_TMO 10000 #define LPFC_XRI_EXCH_BUSY_WAIT_T1 10 @@ -36,14 +40,12 @@ #define LPFC_NEMBED_MBOX_SGL_CNT 254 /* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */ -#define LPFC_HBA_IO_CHAN_MIN 0 -#define LPFC_HBA_IO_CHAN_MAX 32 -#define LPFC_FCP_IO_CHAN_DEF 4 -#define LPFC_NVME_IO_CHAN_DEF 0 - -/* Number of channels used for Flash Optimized Fabric (FOF) operations */ +#define LPFC_HBA_HDWQ_MIN 0 +#define LPFC_HBA_HDWQ_MAX 128 +#define LPFC_HBA_HDWQ_DEF 0 -#define LPFC_FOF_IO_CHAN_NUM 1 +/* Common buffer size to accomidate SCSI and NVME IO buffers */ +#define LPFC_COMMON_IO_BUF_SZ 768 /* * Provide the default FCF Record attributes used by the driver @@ -152,28 +154,58 @@ struct lpfc_queue { struct list_head child_list; struct list_head page_list; struct list_head sgl_list; + struct list_head cpu_list; uint32_t entry_count; /* Number of entries to support on the queue */ uint32_t entry_size; /* Size of each queue entry. */ - uint32_t entry_repost; /* Count of entries before doorbell is rung */ -#define LPFC_EQ_REPOST 8 -#define LPFC_MQ_REPOST 8 -#define LPFC_CQ_REPOST 64 -#define LPFC_RQ_REPOST 64 -#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 /* For WQs */ + uint32_t notify_interval; /* Queue Notification Interval + * For chip->host queues (EQ, CQ, RQ): + * specifies the interval (number of + * entries) where the doorbell is rung to + * notify the chip of entry consumption. + * For host->chip queues (WQ): + * specifies the interval (number of + * entries) where consumption CQE is + * requested to indicate WQ entries + * consumed by the chip. + * Not used on an MQ. + */ +#define LPFC_EQ_NOTIFY_INTRVL 16 +#define LPFC_CQ_NOTIFY_INTRVL 16 +#define LPFC_WQ_NOTIFY_INTRVL 16 +#define LPFC_RQ_NOTIFY_INTRVL 16 + uint32_t max_proc_limit; /* Queue Processing Limit + * For chip->host queues (EQ, CQ): + * specifies the maximum number of + * entries to be consumed in one + * processing iteration sequence. Queue + * will be rearmed after each iteration. + * Not used on an MQ, RQ or WQ. + */ +#define LPFC_EQ_MAX_PROC_LIMIT 256 +#define LPFC_CQ_MIN_PROC_LIMIT 64 +#define LPFC_CQ_MAX_PROC_LIMIT LPFC_CQE_EXP_COUNT // 4096 +#define LPFC_CQ_DEF_MAX_PROC_LIMIT LPFC_CQE_DEF_COUNT // 1024 +#define LPFC_CQ_MIN_THRESHOLD_TO_POLL 64 +#define LPFC_CQ_MAX_THRESHOLD_TO_POLL LPFC_CQ_DEF_MAX_PROC_LIMIT +#define LPFC_CQ_DEF_THRESHOLD_TO_POLL LPFC_CQ_DEF_MAX_PROC_LIMIT + uint32_t queue_claimed; /* indicates queue is being processed */ uint32_t queue_id; /* Queue ID assigned by the hardware */ uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */ uint32_t host_index; /* The host's index for putting or getting */ uint32_t hba_index; /* The last known hba index for get or put */ + uint32_t q_mode; struct lpfc_sli_ring *pring; /* ptr to io ring associated with q */ struct lpfc_rqb *rqbp; /* ptr to RQ buffers */ - uint32_t q_mode; uint16_t page_count; /* Number of pages allocated for this queue */ uint16_t page_size; /* size of page allocated for this queue */ #define LPFC_EXPANDED_PAGE_SIZE 16384 #define LPFC_DEFAULT_PAGE_SIZE 4096 - uint16_t chann; /* IO channel this queue is associated with */ + uint16_t chann; /* Hardware Queue association WQ/CQ */ + /* CPU affinity for EQ */ +#define LPFC_FIND_BY_EQ 0 +#define LPFC_FIND_BY_HDWQ 1 uint8_t db_format; #define LPFC_DB_RING_FORMAT 0x01 #define LPFC_DB_LIST_FORMAT 0x02 @@ -212,10 +244,14 @@ struct lpfc_queue { #define RQ_buf_posted q_cnt_3 #define RQ_rcv_buf q_cnt_4 - struct work_struct irqwork; - struct work_struct spwork; + struct work_struct irqwork; + struct work_struct spwork; + struct delayed_work sched_irqwork; + struct delayed_work sched_spwork; uint64_t isr_timestamp; + uint16_t hdwq; + uint16_t last_cpu; /* most recent cpu */ uint8_t qe_valid; struct lpfc_queue *assoc_qp; union sli4_qe qe[1]; /* array to index entries (must be last) */ @@ -428,11 +464,6 @@ struct lpfc_hba_eq_hdl { uint32_t idx; char handler_name[LPFC_SLI4_HANDLER_NAME_SZ]; struct lpfc_hba *phba; - atomic_t hba_eq_in_use; - struct cpumask *cpumask; - /* CPU affinitsed to or 0xffffffff if multiple */ - uint32_t cpu; -#define LPFC_MULTI_CPU_AFFINITY 0xffffffff }; /*BB Credit recovery value*/ @@ -526,11 +557,165 @@ struct lpfc_vector_map_info { uint16_t phys_id; uint16_t core_id; uint16_t irq; - uint16_t channel_id; + uint16_t eq; + uint16_t hdwq; + uint16_t hyper; }; #define LPFC_VECTOR_MAP_EMPTY 0xffff +/* Multi-XRI pool */ +#define XRI_BATCH 8 + +struct lpfc_pbl_pool { + struct list_head list; + u32 count; + spinlock_t lock; /* lock for pbl_pool*/ +}; + +struct lpfc_pvt_pool { + u32 low_watermark; + u32 high_watermark; + + struct list_head list; + u32 count; + spinlock_t lock; /* lock for pvt_pool */ +}; + +struct lpfc_multixri_pool { + u32 xri_limit; + + /* Starting point when searching a pbl_pool with round-robin method */ + u32 rrb_next_hwqid; + + /* Used by lpfc_adjust_pvt_pool_count. + * io_req_count is incremented by 1 during IO submission. The heartbeat + * handler uses these two variables to determine if pvt_pool is idle or + * busy. + */ + u32 prev_io_req_count; + u32 io_req_count; + + /* statistics */ + u32 pbl_empty_count; +#ifdef LPFC_MXP_STAT + u32 above_limit_count; + u32 below_limit_count; + u32 local_pbl_hit_count; + u32 other_pbl_hit_count; + u32 stat_max_hwm; + +#define LPFC_MXP_SNAPSHOT_TAKEN 3 /* snapshot is taken at 3rd heartbeats */ + u32 stat_pbl_count; + u32 stat_pvt_count; + u32 stat_busy_count; + u32 stat_snapshot_taken; +#endif + + /* TODO: Separate pvt_pool into get and put list */ + struct lpfc_pbl_pool pbl_pool; /* Public free XRI pool */ + struct lpfc_pvt_pool pvt_pool; /* Private free XRI pool */ +}; + +struct lpfc_fc4_ctrl_stat { + u32 input_requests; + u32 output_requests; + u32 control_requests; + u32 io_cmpls; +}; + +#ifdef LPFC_HDWQ_LOCK_STAT +struct lpfc_lock_stat { + uint32_t alloc_xri_get; + uint32_t alloc_xri_put; + uint32_t free_xri; + uint32_t wq_access; + uint32_t alloc_pvt_pool; + uint32_t mv_from_pvt_pool; + uint32_t mv_to_pub_pool; + uint32_t mv_to_pvt_pool; + uint32_t free_pub_pool; + uint32_t free_pvt_pool; +}; +#endif + +struct lpfc_eq_intr_info { + struct list_head list; + uint32_t icnt; +}; + /* SLI4 HBA data structure entries */ +struct lpfc_sli4_hdw_queue { + /* Pointers to the constructed SLI4 queues */ + struct lpfc_queue *hba_eq; /* Event queues for HBA */ + struct lpfc_queue *fcp_cq; /* Fast-path FCP compl queue */ + struct lpfc_queue *nvme_cq; /* Fast-path NVME compl queue */ + struct lpfc_queue *fcp_wq; /* Fast-path FCP work queue */ + struct lpfc_queue *nvme_wq; /* Fast-path NVME work queue */ + uint16_t fcp_cq_map; + uint16_t nvme_cq_map; + + /* Keep track of IO buffers for this hardware queue */ + spinlock_t io_buf_list_get_lock; /* Common buf alloc list lock */ + struct list_head lpfc_io_buf_list_get; + spinlock_t io_buf_list_put_lock; /* Common buf free list lock */ + struct list_head lpfc_io_buf_list_put; + spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ + struct list_head lpfc_abts_scsi_buf_list; + spinlock_t abts_nvme_buf_list_lock; /* list of aborted NVME IOs */ + struct list_head lpfc_abts_nvme_buf_list; + uint32_t total_io_bufs; + uint32_t get_io_bufs; + uint32_t put_io_bufs; + uint32_t empty_io_bufs; + uint32_t abts_scsi_io_bufs; + uint32_t abts_nvme_io_bufs; + + /* Multi-XRI pool per HWQ */ + struct lpfc_multixri_pool *p_multixri_pool; + + /* FC-4 Stats counters */ + struct lpfc_fc4_ctrl_stat nvme_cstat; + struct lpfc_fc4_ctrl_stat scsi_cstat; +#ifdef LPFC_HDWQ_LOCK_STAT + struct lpfc_lock_stat lock_conflict; +#endif + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS +#define LPFC_CHECK_CPU_CNT 128 + uint32_t cpucheck_rcv_io[LPFC_CHECK_CPU_CNT]; + uint32_t cpucheck_xmt_io[LPFC_CHECK_CPU_CNT]; + uint32_t cpucheck_cmpl_io[LPFC_CHECK_CPU_CNT]; +#endif +}; + +#ifdef LPFC_HDWQ_LOCK_STAT +/* compile time trylock stats */ +#define lpfc_qp_spin_lock_irqsave(lock, flag, qp, lstat) \ + { \ + int only_once = 1; \ + while (spin_trylock_irqsave(lock, flag) == 0) { \ + if (only_once) { \ + only_once = 0; \ + qp->lock_conflict.lstat++; \ + } \ + } \ + } +#define lpfc_qp_spin_lock(lock, qp, lstat) \ + { \ + int only_once = 1; \ + while (spin_trylock(lock) == 0) { \ + if (only_once) { \ + only_once = 0; \ + qp->lock_conflict.lstat++; \ + } \ + } \ + } +#else +#define lpfc_qp_spin_lock_irqsave(lock, flag, qp, lstat) \ + spin_lock_irqsave(lock, flag) +#define lpfc_qp_spin_lock(lock, qp, lstat) spin_lock(lock) +#endif + struct lpfc_sli4_hba { void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for * config space registers @@ -599,21 +784,19 @@ struct lpfc_sli4_hba { struct lpfc_hba_eq_hdl *hba_eq_hdl; /* HBA per-WQ handle */ void (*sli4_eq_clr_intr)(struct lpfc_queue *q); - uint32_t (*sli4_eq_release)(struct lpfc_queue *q, bool arm); - uint32_t (*sli4_cq_release)(struct lpfc_queue *q, bool arm); + void (*sli4_write_eq_db)(struct lpfc_hba *phba, struct lpfc_queue *eq, + uint32_t count, bool arm); + void (*sli4_write_cq_db)(struct lpfc_hba *phba, struct lpfc_queue *cq, + uint32_t count, bool arm); /* Pointers to the constructed SLI4 queues */ - struct lpfc_queue **hba_eq; /* Event queues for HBA */ - struct lpfc_queue **fcp_cq; /* Fast-path FCP compl queue */ - struct lpfc_queue **nvme_cq; /* Fast-path NVME compl queue */ + struct lpfc_sli4_hdw_queue *hdwq; + struct list_head lpfc_wq_list; + + /* Pointers to the constructed SLI4 queues for NVMET */ struct lpfc_queue **nvmet_cqset; /* Fast-path NVMET CQ Set queues */ struct lpfc_queue **nvmet_mrq_hdr; /* Fast-path NVMET hdr MRQs */ struct lpfc_queue **nvmet_mrq_data; /* Fast-path NVMET data MRQs */ - struct lpfc_queue **fcp_wq; /* Fast-path FCP work queue */ - struct lpfc_queue **nvme_wq; /* Fast-path NVME work queue */ - uint16_t *fcp_cq_map; - uint16_t *nvme_cq_map; - struct list_head lpfc_wq_list; struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */ struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */ @@ -631,13 +814,7 @@ struct lpfc_sli4_hba { uint32_t ulp0_mode; /* ULP0 protocol mode */ uint32_t ulp1_mode; /* ULP1 protocol mode */ - struct lpfc_queue *fof_eq; /* Flash Optimized Fabric Event queue */ - /* Optimized Access Storage specific queues/structures */ - - struct lpfc_queue *oas_cq; /* OAS completion queue */ - struct lpfc_queue *oas_wq; /* OAS Work queue */ - struct lpfc_sli_ring *oas_ring; uint64_t oas_next_lun; uint8_t oas_next_tgt_wwpn[8]; uint8_t oas_next_vpt_wwpn[8]; @@ -663,22 +840,22 @@ struct lpfc_sli4_hba { uint16_t rpi_hdrs_in_use; /* must post rpi hdrs if set. */ uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */ uint16_t next_rpi; - uint16_t nvme_xri_max; - uint16_t nvme_xri_cnt; - uint16_t nvme_xri_start; - uint16_t scsi_xri_max; - uint16_t scsi_xri_cnt; - uint16_t scsi_xri_start; + uint16_t io_xri_max; + uint16_t io_xri_cnt; + uint16_t io_xri_start; uint16_t els_xri_cnt; uint16_t nvmet_xri_cnt; uint16_t nvmet_io_wait_cnt; uint16_t nvmet_io_wait_total; + uint16_t cq_max; + struct lpfc_queue **cq_lookup; struct list_head lpfc_els_sgl_list; struct list_head lpfc_abts_els_sgl_list; + spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ + struct list_head lpfc_abts_scsi_buf_list; struct list_head lpfc_nvmet_sgl_list; + spinlock_t abts_nvmet_buf_list_lock; /* list of aborted NVMET IOs */ struct list_head lpfc_abts_nvmet_ctx_list; - struct list_head lpfc_abts_scsi_buf_list; - struct list_head lpfc_abts_nvme_buf_list; struct list_head lpfc_nvmet_io_wait_list; struct lpfc_nvmet_ctx_info *nvmet_ctx_info; struct lpfc_sglq **lpfc_sglq_active_list; @@ -707,17 +884,16 @@ struct lpfc_sli4_hba { #define LPFC_SLI4_PPNAME_NON 0 #define LPFC_SLI4_PPNAME_GET 1 struct lpfc_iov iov; - spinlock_t abts_nvme_buf_list_lock; /* list of aborted SCSI IOs */ - spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ spinlock_t sgl_list_lock; /* list of aborted els IOs */ spinlock_t nvmet_io_wait_lock; /* IOs waiting for ctx resources */ uint32_t physical_port; /* CPU to vector mapping information */ struct lpfc_vector_map_info *cpu_map; - uint16_t num_online_cpu; + uint16_t num_possible_cpu; uint16_t num_present_cpu; uint16_t curr_disp_cpu; + struct lpfc_eq_intr_info __percpu *eq_info; uint32_t conf_trunk; #define lpfc_conf_trunk_port0_WORD conf_trunk #define lpfc_conf_trunk_port0_SHIFT 0 @@ -818,12 +994,12 @@ struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t, uint32_t, uint32_t); void lpfc_sli4_queue_free(struct lpfc_queue *); int lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t); -int lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq, - uint32_t numq, uint32_t imax); +void lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq, + uint32_t numq, uint32_t usdelay); int lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *, struct lpfc_queue *, uint32_t, uint32_t); int lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, - struct lpfc_queue **eqp, uint32_t type, + struct lpfc_sli4_hdw_queue *hdwq, uint32_t type, uint32_t subtype); int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *, struct lpfc_queue *, uint32_t); @@ -843,12 +1019,10 @@ int lpfc_rq_destroy(struct lpfc_hba *, struct lpfc_queue *, int lpfc_sli4_queue_setup(struct lpfc_hba *); void lpfc_sli4_queue_unset(struct lpfc_hba *); int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t); -int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *); -int lpfc_repost_nvme_sgl_list(struct lpfc_hba *phba); +int lpfc_repost_io_sgl_list(struct lpfc_hba *phba); uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *); void lpfc_sli4_free_xri(struct lpfc_hba *, int); int lpfc_sli4_post_async_mbox(struct lpfc_hba *); -int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int); struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *); struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *); void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *); @@ -868,9 +1042,9 @@ int lpfc_sli4_resume_rpi(struct lpfc_nodelist *, void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *); void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *); void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *, - struct sli4_wcqe_xri_aborted *); + struct sli4_wcqe_xri_aborted *, int); void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba, - struct sli4_wcqe_xri_aborted *axri); + struct sli4_wcqe_xri_aborted *axri, int idx); void lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, struct sli4_wcqe_xri_aborted *axri); void lpfc_sli4_els_xri_aborted(struct lpfc_hba *, @@ -884,11 +1058,15 @@ int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *); int lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba); int lpfc_sli4_init_vpi(struct lpfc_vport *); inline void lpfc_sli4_eq_clr_intr(struct lpfc_queue *); -uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool); -uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool); +void lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q, + uint32_t count, bool arm); +void lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q, + uint32_t count, bool arm); inline void lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q); -uint32_t lpfc_sli4_if6_cq_release(struct lpfc_queue *q, bool arm); -uint32_t lpfc_sli4_if6_eq_release(struct lpfc_queue *q, bool arm); +void lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q, + uint32_t count, bool arm); +void lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q, + uint32_t count, bool arm); void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t); int lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *, uint16_t); int lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *, uint16_t); diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 3f4398ffb567..43fd693cf042 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -20,7 +20,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "12.0.0.10" +#define LPFC_DRIVER_VERSION "12.2.0.0" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 102a011ff6d4..343bc71d4615 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -313,11 +313,11 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) goto error_out; } - /* NPIV is not supported if HBA has NVME enabled */ - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + /* NPIV is not supported if HBA has NVME Target enabled */ + if (phba->nvmet_support) { lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, "3189 Create VPORT failed: " - "NPIV is not supported on NVME\n"); + "NPIV is not supported on NVME Target\n"); rc = VPORT_INVAL; goto error_out; } @@ -403,6 +403,9 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) /* Set the DFT_LUN_Q_DEPTH accordingly */ vport->cfg_lun_queue_depth = phba->pport->cfg_lun_queue_depth; + /* Only the physical port can support NVME for now */ + vport->cfg_enable_fc4_type = LPFC_ENABLE_FCP; + *(struct lpfc_vport **)fc_vport->dd_data = vport; vport->fc_vport = fc_vport; @@ -415,22 +418,6 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) vport->fdmi_port_mask = phba->pport->fdmi_port_mask; } - if ((phba->nvmet_support == 0) && - ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || - (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME))) { - /* Create NVME binding with nvme_fc_transport. This - * ensures the vport is initialized. - */ - rc = lpfc_nvme_create_localport(vport); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "6003 %s status x%x\n", - "NVME registration failed, ", - rc); - goto error_out; - } - } - /* * In SLI4, the vpi must be activated before it can be used * by the port. diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 16536c41f0c5..6fd57f7f0b1e 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -33,8 +33,8 @@ /* * MegaRAID SAS Driver meta data */ -#define MEGASAS_VERSION "07.707.50.00-rc1" -#define MEGASAS_RELDATE "December 18, 2018" +#define MEGASAS_VERSION "07.707.51.00-rc1" +#define MEGASAS_RELDATE "February 7, 2019" /* * Device IDs @@ -790,6 +790,38 @@ struct MR_LD_TARGETID_LIST { u8 targetId[MAX_LOGICAL_DRIVES_EXT]; }; +struct MR_HOST_DEVICE_LIST_ENTRY { + struct { + union { + struct { +#if defined(__BIG_ENDIAN_BITFIELD) + u8 reserved:7; + u8 is_sys_pd:1; +#else + u8 is_sys_pd:1; + u8 reserved:7; +#endif + } bits; + u8 byte; + } u; + } flags; + u8 scsi_type; + __le16 target_id; + u8 reserved[4]; + __le64 sas_addr[2]; +} __packed; + +struct MR_HOST_DEVICE_LIST { + __le32 size; + __le32 count; + __le32 reserved[2]; + struct MR_HOST_DEVICE_LIST_ENTRY host_device_list[1]; +} __packed; + +#define HOST_DEVICE_LIST_SZ (sizeof(struct MR_HOST_DEVICE_LIST) + \ + (sizeof(struct MR_HOST_DEVICE_LIST_ENTRY) * \ + (MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT - 1))) + /* * SAS controller properties @@ -870,13 +902,17 @@ struct megasas_ctrl_prop { u8 viewSpace; struct { #if defined(__BIG_ENDIAN_BITFIELD) - u16 reserved2:11; + u16 reserved3:9; + u16 enable_fw_dev_list:1; + u16 reserved2:1; u16 enable_snap_dump:1; u16 reserved1:4; #else u16 reserved1:4; u16 enable_snap_dump:1; - u16 reserved2:11; + u16 reserved2:1; + u16 enable_fw_dev_list:1; + u16 reserved3:9; #endif } on_off_properties2; }; @@ -1685,7 +1721,8 @@ union megasas_sgl_frame { typedef union _MFI_CAPABILITIES { struct { #if defined(__BIG_ENDIAN_BITFIELD) - u32 reserved:17; + u32 reserved:16; + u32 support_fw_exposed_dev_list:1; u32 support_nvme_passthru:1; u32 support_64bit_mode:1; u32 support_pd_map_target_id:1; @@ -1717,7 +1754,8 @@ typedef union _MFI_CAPABILITIES { u32 support_pd_map_target_id:1; u32 support_64bit_mode:1; u32 support_nvme_passthru:1; - u32 reserved:17; + u32 support_fw_exposed_dev_list:1; + u32 reserved:16; #endif } mfi_capabilities; __le32 reg; @@ -2202,6 +2240,9 @@ struct megasas_instance { struct MR_LD_TARGETID_LIST *ld_targetid_list_buf; dma_addr_t ld_targetid_list_buf_h; + struct MR_HOST_DEVICE_LIST *host_device_list_buf; + dma_addr_t host_device_list_buf_h; + struct MR_SNAPDUMP_PROPERTIES *snapdump_prop; dma_addr_t snapdump_prop_h; @@ -2337,6 +2378,7 @@ struct megasas_instance { u8 task_abort_tmo; u8 max_reset_tmo; u8 snapdump_wait_time; + u8 enable_fw_dev_list; }; struct MR_LD_VF_MAP { u32 size; diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index fcbff83c0097..dace907744a5 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -4188,6 +4188,7 @@ int megasas_alloc_cmds(struct megasas_instance *instance) if (megasas_create_frame_pool(instance)) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n"); megasas_free_cmds(instance); + return -ENOMEM; } return 0; @@ -4634,6 +4635,123 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type) return ret; } +/** + * dcmd.opcode - MR_DCMD_CTRL_DEVICE_LIST_GET + * dcmd.mbox - reserved + * dcmd.sge IN - ptr to return MR_HOST_DEVICE_LIST structure + * Desc: This DCMD will return the combined device list + * Status: MFI_STAT_OK - List returned successfully + * MFI_STAT_INVALID_CMD - Firmware support for the feature has been + * disabled + * @instance: Adapter soft state + * @is_probe: Driver probe check + * Return: 0 if DCMD succeeded + * non-zero if failed + */ +int +megasas_host_device_list_query(struct megasas_instance *instance, + bool is_probe) +{ + int ret, i, target_id; + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + struct MR_HOST_DEVICE_LIST *ci; + u32 count; + dma_addr_t ci_h; + + ci = instance->host_device_list_buf; + ci_h = instance->host_device_list_buf_h; + + cmd = megasas_get_cmd(instance); + + if (!cmd) { + dev_warn(&instance->pdev->dev, + "%s: failed to get cmd\n", + __func__); + return -ENOMEM; + } + + dcmd = &cmd->frame->dcmd; + + memset(ci, 0, sizeof(*ci)); + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + + dcmd->mbox.b[0] = is_probe ? 0 : 1; + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = MFI_STAT_INVALID_STATUS; + dcmd->sge_count = 1; + dcmd->flags = MFI_FRAME_DIR_READ; + dcmd->timeout = 0; + dcmd->pad_0 = 0; + dcmd->data_xfer_len = cpu_to_le32(HOST_DEVICE_LIST_SZ); + dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_DEVICE_LIST_GET); + + megasas_set_dma_settings(instance, dcmd, ci_h, HOST_DEVICE_LIST_SZ); + + if (!instance->mask_interrupts) { + ret = megasas_issue_blocked_cmd(instance, cmd, + MFI_IO_TIMEOUT_SECS); + } else { + ret = megasas_issue_polled(instance, cmd); + cmd->flags |= DRV_DCMD_SKIP_REFIRE; + } + + switch (ret) { + case DCMD_SUCCESS: + /* Fill the internal pd_list and ld_ids array based on + * targetIds returned by FW + */ + count = le32_to_cpu(ci->count); + + memset(instance->local_pd_list, 0, + MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); + memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); + for (i = 0; i < count; i++) { + target_id = le16_to_cpu(ci->host_device_list[i].target_id); + if (ci->host_device_list[i].flags.u.bits.is_sys_pd) { + instance->local_pd_list[target_id].tid = target_id; + instance->local_pd_list[target_id].driveType = + ci->host_device_list[i].scsi_type; + instance->local_pd_list[target_id].driveState = + MR_PD_STATE_SYSTEM; + } else { + instance->ld_ids[target_id] = target_id; + } + } + + memcpy(instance->pd_list, instance->local_pd_list, + sizeof(instance->pd_list)); + break; + + case DCMD_TIMEOUT: + switch (dcmd_timeout_ocr_possible(instance)) { + case INITIATE_OCR: + cmd->flags |= DRV_DCMD_SKIP_REFIRE; + megasas_reset_fusion(instance->host, + MFI_IO_TIMEOUT_OCR); + break; + case KILL_ADAPTER: + megaraid_sas_kill_hba(instance); + break; + case IGNORE_TIMEOUT: + dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n", + __func__, __LINE__); + break; + } + break; + case DCMD_FAILED: + dev_err(&instance->pdev->dev, + "%s: MR_DCMD_CTRL_DEVICE_LIST_GET failed\n", + __func__); + break; + } + + if (ret != DCMD_TIMEOUT) + megasas_return_cmd(instance, cmd); + + return ret; +} + /* * megasas_update_ext_vd_details : Update details w.r.t Extended VD * instance : Controller's instance @@ -4861,6 +4979,9 @@ megasas_get_ctrl_info(struct megasas_instance *instance) (ci->properties.on_off_properties2.enable_snap_dump ? MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME : 0); + instance->enable_fw_dev_list = + ci->properties.on_off_properties2.enable_fw_dev_list; + dev_info(&instance->pdev->dev, "controller type\t: %s(%dMB)\n", instance->is_imr ? "iMR" : "MR", @@ -5319,6 +5440,40 @@ fallback: instance->reply_map[cpu] = cpu % instance->msix_vectors; } +/** + * megasas_get_device_list - Get the PD and LD device list from FW. + * @instance: Adapter soft state + * @return: Success or failure + * + * Issue DCMDs to Firmware to get the PD and LD list. + * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination + * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list. + */ +static +int megasas_get_device_list(struct megasas_instance *instance) +{ + memset(instance->pd_list, 0, + (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list))); + memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); + + if (instance->enable_fw_dev_list) { + if (megasas_host_device_list_query(instance, true)) + return FAILED; + } else { + if (megasas_get_pd_list(instance) < 0) { + dev_err(&instance->pdev->dev, "failed to get PD list\n"); + return FAILED; + } + + if (megasas_ld_list_query(instance, + MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) { + dev_err(&instance->pdev->dev, "failed to get LD list\n"); + return FAILED; + } + } + + return SUCCESS; +} /** * megasas_init_fw - Initializes the FW * @instance: Adapter soft state @@ -5571,18 +5726,13 @@ static int megasas_init_fw(struct megasas_instance *instance) megasas_setup_jbod_map(instance); - /** for passthrough - * the following function will get the PD LIST. - */ - memset(instance->pd_list, 0, - (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list))); - if (megasas_get_pd_list(instance) < 0) { - dev_err(&instance->pdev->dev, "failed to get PD list\n"); + if (megasas_get_device_list(instance) != SUCCESS) { + dev_err(&instance->pdev->dev, + "%s: megasas_get_device_list failed\n", + __func__); goto fail_get_ld_pd_list; } - memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); - /* stream detection initialization */ if (instance->adapter_type >= VENTURA_SERIES) { fusion->stream_detect_by_ld = @@ -5612,10 +5762,6 @@ static int megasas_init_fw(struct megasas_instance *instance) } } - if (megasas_ld_list_query(instance, - MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) - goto fail_get_ld_pd_list; - /* * Compute the max allowed sectors per IO: The controller info has two * limits on max sectors. Driver should use the minimum of these two. @@ -6424,6 +6570,18 @@ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance) if (!instance->snapdump_prop) dev_err(&pdev->dev, "Failed to allocate snapdump properties buffer\n"); + + instance->host_device_list_buf = dma_alloc_coherent(&pdev->dev, + HOST_DEVICE_LIST_SZ, + &instance->host_device_list_buf_h, + GFP_KERNEL); + + if (!instance->host_device_list_buf) { + dev_err(&pdev->dev, + "Failed to allocate targetid list buffer\n"); + return -ENOMEM; + } + } instance->pd_list_buf = @@ -6573,6 +6731,13 @@ void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance) sizeof(struct MR_SNAPDUMP_PROPERTIES), instance->snapdump_prop, instance->snapdump_prop_h); + + if (instance->host_device_list_buf) + dma_free_coherent(&pdev->dev, + HOST_DEVICE_LIST_SZ, + instance->host_device_list_buf, + instance->host_device_list_buf_h); + } /* @@ -6746,7 +6911,9 @@ static int megasas_probe_one(struct pci_dev *pdev, /* * Trigger SCSI to scan our drives */ - scsi_scan_host(host); + if (!instance->enable_fw_dev_list || + (instance->host_device_list_buf->count > 0)) + scsi_scan_host(host); /* * Initiate AEN (Asynchronous Event Notification) @@ -7866,6 +8033,139 @@ static inline void megasas_remove_scsi_device(struct scsi_device *sdev) scsi_device_put(sdev); } +/** + * megasas_update_device_list - Update the PD and LD device list from FW + * after an AEN event notification + * @instance: Adapter soft state + * @event_type: Indicates type of event (PD or LD event) + * + * @return: Success or failure + * + * Issue DCMDs to Firmware to update the internal device list in driver. + * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination + * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list. + */ +static +int megasas_update_device_list(struct megasas_instance *instance, + int event_type) +{ + int dcmd_ret = DCMD_SUCCESS; + + if (instance->enable_fw_dev_list) { + dcmd_ret = megasas_host_device_list_query(instance, false); + if (dcmd_ret != DCMD_SUCCESS) + goto out; + } else { + if (event_type & SCAN_PD_CHANNEL) { + dcmd_ret = megasas_get_pd_list(instance); + + if (dcmd_ret != DCMD_SUCCESS) + goto out; + } + + if (event_type & SCAN_VD_CHANNEL) { + if (!instance->requestorId || + (instance->requestorId && + megasas_get_ld_vf_affiliation(instance, 0))) { + dcmd_ret = megasas_ld_list_query(instance, + MR_LD_QUERY_TYPE_EXPOSED_TO_HOST); + if (dcmd_ret != DCMD_SUCCESS) + goto out; + } + } + } + +out: + return dcmd_ret; +} + +/** + * megasas_add_remove_devices - Add/remove devices to SCSI mid-layer + * after an AEN event notification + * @instance: Adapter soft state + * @scan_type: Indicates type of devices (PD/LD) to add + * @return void + */ +static +void megasas_add_remove_devices(struct megasas_instance *instance, + int scan_type) +{ + int i, j; + u16 pd_index = 0; + u16 ld_index = 0; + u16 channel = 0, id = 0; + struct Scsi_Host *host; + struct scsi_device *sdev1; + struct MR_HOST_DEVICE_LIST *targetid_list = NULL; + struct MR_HOST_DEVICE_LIST_ENTRY *targetid_entry = NULL; + + host = instance->host; + + if (instance->enable_fw_dev_list) { + targetid_list = instance->host_device_list_buf; + for (i = 0; i < targetid_list->count; i++) { + targetid_entry = &targetid_list->host_device_list[i]; + if (targetid_entry->flags.u.bits.is_sys_pd) { + channel = le16_to_cpu(targetid_entry->target_id) / + MEGASAS_MAX_DEV_PER_CHANNEL; + id = le16_to_cpu(targetid_entry->target_id) % + MEGASAS_MAX_DEV_PER_CHANNEL; + } else { + channel = MEGASAS_MAX_PD_CHANNELS + + (le16_to_cpu(targetid_entry->target_id) / + MEGASAS_MAX_DEV_PER_CHANNEL); + id = le16_to_cpu(targetid_entry->target_id) % + MEGASAS_MAX_DEV_PER_CHANNEL; + } + sdev1 = scsi_device_lookup(host, channel, id, 0); + if (!sdev1) { + scsi_add_device(host, channel, id, 0); + } else { + scsi_device_put(sdev1); + } + } + } + + if (scan_type & SCAN_PD_CHANNEL) { + for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { + for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { + pd_index = i * MEGASAS_MAX_DEV_PER_CHANNEL + j; + sdev1 = scsi_device_lookup(host, i, j, 0); + if (instance->pd_list[pd_index].driveState == + MR_PD_STATE_SYSTEM) { + if (!sdev1) + scsi_add_device(host, i, j, 0); + else + scsi_device_put(sdev1); + } else { + if (sdev1) + megasas_remove_scsi_device(sdev1); + } + } + } + } + + if (scan_type & SCAN_VD_CHANNEL) { + for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { + for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { + ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; + sdev1 = scsi_device_lookup(host, + MEGASAS_MAX_PD_CHANNELS + i, j, 0); + if (instance->ld_ids[ld_index] != 0xff) { + if (!sdev1) + scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); + else + scsi_device_put(sdev1); + } else { + if (sdev1) + megasas_remove_scsi_device(sdev1); + } + } + } + } + +} + static void megasas_aen_polling(struct work_struct *work) { @@ -7873,11 +8173,7 @@ megasas_aen_polling(struct work_struct *work) container_of(work, struct megasas_aen_event, hotplug_work.work); struct megasas_instance *instance = ev->instance; union megasas_evt_class_locale class_locale; - struct Scsi_Host *host; - struct scsi_device *sdev1; - u16 pd_index = 0; - u16 ld_index = 0; - int i, j, doscan = 0; + int event_type = 0; u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME; int error; u8 dcmd_ret = DCMD_SUCCESS; @@ -7896,7 +8192,6 @@ megasas_aen_polling(struct work_struct *work) mutex_lock(&instance->reset_mutex); instance->ev = NULL; - host = instance->host; if (instance->evt_detail) { megasas_decode_evt(instance); @@ -7904,40 +8199,20 @@ megasas_aen_polling(struct work_struct *work) case MR_EVT_PD_INSERTED: case MR_EVT_PD_REMOVED: - dcmd_ret = megasas_get_pd_list(instance); - if (dcmd_ret == DCMD_SUCCESS) - doscan = SCAN_PD_CHANNEL; + event_type = SCAN_PD_CHANNEL; break; case MR_EVT_LD_OFFLINE: case MR_EVT_CFG_CLEARED: case MR_EVT_LD_DELETED: case MR_EVT_LD_CREATED: - if (!instance->requestorId || - (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0))) - dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST); - - if (dcmd_ret == DCMD_SUCCESS) - doscan = SCAN_VD_CHANNEL; - + event_type = SCAN_VD_CHANNEL; break; case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: case MR_EVT_FOREIGN_CFG_IMPORTED: case MR_EVT_LD_STATE_CHANGE: - dcmd_ret = megasas_get_pd_list(instance); - - if (dcmd_ret != DCMD_SUCCESS) - break; - - if (!instance->requestorId || - (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0))) - dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST); - - if (dcmd_ret != DCMD_SUCCESS) - break; - - doscan = SCAN_VD_CHANNEL | SCAN_PD_CHANNEL; + event_type = SCAN_PD_CHANNEL | SCAN_VD_CHANNEL; dev_info(&instance->pdev->dev, "scanning for scsi%d...\n", instance->host->host_no); break; @@ -7953,7 +8228,7 @@ megasas_aen_polling(struct work_struct *work) } break; default: - doscan = 0; + event_type = 0; break; } } else { @@ -7963,44 +8238,13 @@ megasas_aen_polling(struct work_struct *work) return; } - mutex_unlock(&instance->reset_mutex); + if (event_type) + dcmd_ret = megasas_update_device_list(instance, event_type); - if (doscan & SCAN_PD_CHANNEL) { - for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { - for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { - pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j; - sdev1 = scsi_device_lookup(host, i, j, 0); - if (instance->pd_list[pd_index].driveState == - MR_PD_STATE_SYSTEM) { - if (!sdev1) - scsi_add_device(host, i, j, 0); - else - scsi_device_put(sdev1); - } else { - if (sdev1) - megasas_remove_scsi_device(sdev1); - } - } - } - } + mutex_unlock(&instance->reset_mutex); - if (doscan & SCAN_VD_CHANNEL) { - for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { - for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { - ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; - sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); - if (instance->ld_ids[ld_index] != 0xff) { - if (!sdev1) - scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); - else - scsi_device_put(sdev1); - } else { - if (sdev1) - megasas_remove_scsi_device(sdev1); - } - } - } - } + if (event_type && dcmd_ret == DCMD_SUCCESS) + megasas_add_remove_devices(instance, event_type); if (dcmd_ret == DCMD_SUCCESS) seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1; diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 647f48a28f85..1d17128030cd 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -937,11 +937,9 @@ wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, { int i; struct megasas_header *frame_hdr = &cmd->frame->hdr; - struct fusion_context *fusion; u32 msecs = seconds * 1000; - fusion = instance->ctrl_context; /* * Wait for cmd_status to change */ @@ -1074,6 +1072,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) drv_ops->mfi_capabilities.support_qd_throttling = 1; drv_ops->mfi_capabilities.support_pd_map_target_id = 1; drv_ops->mfi_capabilities.support_nvme_passthru = 1; + drv_ops->mfi_capabilities.support_fw_exposed_dev_list = 1; if (instance->consistent_mask_64bit) drv_ops->mfi_capabilities.support_64bit_mode = 1; @@ -1330,7 +1329,6 @@ megasas_sync_map_info(struct megasas_instance *instance) struct megasas_cmd *cmd; struct megasas_dcmd_frame *dcmd; u16 num_lds; - u32 size_sync_info; struct fusion_context *fusion; struct MR_LD_TARGET_SYNC *ci = NULL; struct MR_DRV_RAID_MAP_ALL *map; @@ -1359,8 +1357,6 @@ megasas_sync_map_info(struct megasas_instance *instance) dcmd = &cmd->frame->dcmd; - size_sync_info = sizeof(struct MR_LD_TARGET_SYNC) *num_lds; - memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); ci = (struct MR_LD_TARGET_SYNC *) @@ -1639,15 +1635,12 @@ static inline void megasas_free_ioc_init_cmd(struct megasas_instance *instance) u32 megasas_init_adapter_fusion(struct megasas_instance *instance) { - struct megasas_register_set __iomem *reg_set; struct fusion_context *fusion; u32 scratch_pad_1; int i = 0, count; fusion = instance->ctrl_context; - reg_set = instance->reg_set; - megasas_fusion_update_can_queue(instance, PROBE_CONTEXT); /* @@ -1926,7 +1919,6 @@ static bool megasas_is_prp_possible(struct megasas_instance *instance, struct scsi_cmnd *scmd, int sge_count) { - struct fusion_context *fusion; int i; u32 data_length = 0; struct scatterlist *sg_scmd; @@ -1935,7 +1927,6 @@ megasas_is_prp_possible(struct megasas_instance *instance, mr_nvme_pg_size = max_t(u32, instance->nvme_page_size, MR_DEFAULT_NVME_PAGE_SIZE); - fusion = instance->ctrl_context; data_length = scsi_bufflen(scmd); sg_scmd = scsi_sglist(scmd); @@ -2048,12 +2039,9 @@ megasas_make_prp_nvme(struct megasas_instance *instance, struct scsi_cmnd *scmd, u32 first_prp_len; bool build_prp = false; int data_len = scsi_bufflen(scmd); - struct fusion_context *fusion; u32 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size, MR_DEFAULT_NVME_PAGE_SIZE); - fusion = instance->ctrl_context; - build_prp = megasas_is_prp_possible(instance, scmd, sge_count); if (!build_prp) @@ -2621,7 +2609,6 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, u32 start_lba_lo, start_lba_hi, device_id, datalength = 0; u32 scsi_buff_len; struct MPI2_RAID_SCSI_IO_REQUEST *io_request; - union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; struct IO_REQUEST_INFO io_info; struct fusion_context *fusion; struct MR_DRV_RAID_MAP_ALL *local_map_ptr; @@ -2644,8 +2631,6 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, rctx->status = 0; rctx->ex_status = 0; - req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc; - start_lba_lo = 0; start_lba_hi = 0; fp_possible = false; @@ -3246,9 +3231,6 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, struct megasas_cmd_fusion *cmd, *r1_cmd = NULL; union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; u32 index; - struct fusion_context *fusion; - - fusion = instance->ctrl_context; if ((megasas_cmd_type(scmd) == READ_WRITE_LDIO) && instance->ldio_threshold && @@ -4401,14 +4383,11 @@ int megasas_task_abort_fusion(struct scsi_cmnd *scmd) { struct megasas_instance *instance; u16 smid, devhandle; - struct fusion_context *fusion; int ret; struct MR_PRIV_DEVICE *mr_device_priv_data; mr_device_priv_data = scmd->device->hostdata; - instance = (struct megasas_instance *)scmd->device->host->hostdata; - fusion = instance->ctrl_context; scmd_printk(KERN_INFO, scmd, "task abort called for scmd(%p)\n", scmd); scsi_print_command(scmd); @@ -4428,7 +4407,6 @@ int megasas_task_abort_fusion(struct scsi_cmnd *scmd) goto out; } - if (!mr_device_priv_data->is_tm_capable) { ret = FAILED; goto out; @@ -4487,12 +4465,10 @@ int megasas_reset_target_fusion(struct scsi_cmnd *scmd) struct megasas_instance *instance; int ret = FAILED; u16 devhandle; - struct fusion_context *fusion; struct MR_PRIV_DEVICE *mr_device_priv_data; mr_device_priv_data = scmd->device->hostdata; instance = (struct megasas_instance *)scmd->device->host->hostdata; - fusion = instance->ctrl_context; sdev_printk(KERN_INFO, scmd->device, "target reset called for scmd(%p)\n", scmd); @@ -4512,7 +4488,6 @@ int megasas_reset_target_fusion(struct scsi_cmnd *scmd) goto out; } - if (!mr_device_priv_data->is_tm_capable) { ret = FAILED; goto out; diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index ca73c50fe723..1481bf029490 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h @@ -724,6 +724,7 @@ struct MPI2_IOC_INIT_REQUEST { #define MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111 0x03200200 #define MR_DCMD_LD_VF_MAP_GET_ALL_LDS 0x03150200 #define MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES 0x01200100 +#define MR_DCMD_CTRL_DEVICE_LIST_GET 0x01190600 struct MR_DEV_HANDLE_INFO { __le16 curDevHdl; diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h index 398fa6fde960..a2f4a55c51be 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h @@ -548,7 +548,8 @@ typedef struct _MPI2_CONFIG_REPLY { #define MPI2_MFGPAGE_DEVID_SAS2308_1 (0x0086) #define MPI2_MFGPAGE_DEVID_SAS2308_2 (0x0087) #define MPI2_MFGPAGE_DEVID_SAS2308_3 (0x006E) -#define MPI2_MFGPAGE_DEVID_SAS2308_MPI_EP (0x02B0) +#define MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP (0x02B0) +#define MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1 (0x02B1) /*MPI v2.5 SAS products */ #define MPI25_MFGPAGE_DEVID_SAS3004 (0x0096) diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 0a6cb8f0680c..e57774472e75 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -3563,6 +3563,7 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc) ioc->pdev->subsystem_device); break; } + break; case MPI2_MFGPAGE_DEVID_SAS2308_2: switch (ioc->pdev->subsystem_device) { case MPT2SAS_INTEL_RS25GB008_SSDID: @@ -3598,6 +3599,7 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc) ioc->pdev->subsystem_device); break; } + break; case MPI25_MFGPAGE_DEVID_SAS3008: switch (ioc->pdev->subsystem_device) { case MPT3SAS_INTEL_RMS3JC080_SSDID: @@ -3742,6 +3744,7 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc) ioc->pdev->subsystem_device); break; } + break; case MPI2_MFGPAGE_DEVID_SAS2308_2: switch (ioc->pdev->subsystem_device) { case MPT2SAS_HP_2_4_INTERNAL_SSDID: @@ -3765,6 +3768,7 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc) ioc->pdev->subsystem_device); break; } + break; default: ioc_info(ioc, "HP SAS HBA: Subsystem ID: 0x%X\n", ioc->pdev->subsystem_device); diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index 800351932cc3..19158cb59101 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -75,9 +75,9 @@ #define MPT3SAS_DRIVER_NAME "mpt3sas" #define MPT3SAS_AUTHOR "Avago Technologies " #define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" -#define MPT3SAS_DRIVER_VERSION "27.101.00.00" +#define MPT3SAS_DRIVER_VERSION "27.102.00.00" #define MPT3SAS_MAJOR_VERSION 27 -#define MPT3SAS_MINOR_VERSION 101 +#define MPT3SAS_MINOR_VERSION 102 #define MPT3SAS_BUILD_VERSION 0 #define MPT3SAS_RELEASE_VERSION 00 @@ -193,6 +193,9 @@ struct mpt3sas_nvme_cmd { #define SAS2_PCI_DEVICE_B0_REVISION (0x01) #define SAS3_PCI_DEVICE_C0_REVISION (0x02) +/* Atlas PCIe Switch Management Port */ +#define MPI26_ATLAS_PCIe_SWITCH_DEVID (0x00B2) + /* * Intel HBA branding */ diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 6be39dc27103..8bb5b8f9f4d2 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -10256,7 +10256,8 @@ _scsih_determine_hba_mpi_version(struct pci_dev *pdev) case MPI2_MFGPAGE_DEVID_SAS2308_1: case MPI2_MFGPAGE_DEVID_SAS2308_2: case MPI2_MFGPAGE_DEVID_SAS2308_3: - case MPI2_MFGPAGE_DEVID_SAS2308_MPI_EP: + case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP: + case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1: return MPI2_VERSION; case MPI25_MFGPAGE_DEVID_SAS3004: case MPI25_MFGPAGE_DEVID_SAS3008: @@ -10282,6 +10283,7 @@ _scsih_determine_hba_mpi_version(struct pci_dev *pdev) case MPI26_MFGPAGE_DEVID_SAS3516_1: case MPI26_MFGPAGE_DEVID_SAS3416: case MPI26_MFGPAGE_DEVID_SAS3616: + case MPI26_ATLAS_PCIe_SWITCH_DEVID: case MPI26_MFGPAGE_DEVID_CFG_SEC_3916: case MPI26_MFGPAGE_DEVID_HARD_SEC_3916: case MPI26_MFGPAGE_DEVID_CFG_SEC_3816: @@ -10343,7 +10345,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) ioc->is_warpdrive = 1; ioc->hide_ir_msg = 1; break; - case MPI2_MFGPAGE_DEVID_SAS2308_MPI_EP: + case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP: + case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1: ioc->is_mcpu_endpoint = 1; break; default: @@ -10371,6 +10374,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) case MPI26_MFGPAGE_DEVID_SAS3516_1: case MPI26_MFGPAGE_DEVID_SAS3416: case MPI26_MFGPAGE_DEVID_SAS3616: + case MPI26_ATLAS_PCIe_SWITCH_DEVID: ioc->is_gen35_ioc = 1; break; case MPI26_MFGPAGE_DEVID_CFG_SEC_3816: @@ -10783,7 +10787,9 @@ static const struct pci_device_id mpt3sas_pci_table[] = { PCI_ANY_ID, PCI_ANY_ID }, { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3, PCI_ANY_ID, PCI_ANY_ID }, - { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_MPI_EP, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1, PCI_ANY_ID, PCI_ANY_ID }, /* SSS6200 */ { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200, @@ -10849,6 +10855,10 @@ static const struct pci_device_id mpt3sas_pci_table[] = { { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916, PCI_ANY_ID, PCI_ANY_ID }, + /* Atlas PCIe Switch Management Port */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID, + PCI_ANY_ID, PCI_ANY_ID }, + /* Sea SI 0x00E5 Configurable Secure * 0x00E6 Hard Secure */ diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c index 36f64205ecfa..3df02691a092 100644 --- a/drivers/scsi/mvumi.c +++ b/drivers/scsi/mvumi.c @@ -718,8 +718,8 @@ static int mvumi_host_reset(struct scsi_cmnd *scmd) mhba = (struct mvumi_hba *) scmd->device->host->hostdata; - scmd_printk(KERN_NOTICE, scmd, "RESET -%ld cmd=%x retries=%x\n", - scmd->serial_number, scmd->cmnd[0], scmd->retries); + scmd_printk(KERN_NOTICE, scmd, "RESET -%u cmd=%x retries=%x\n", + scmd->request->tag, scmd->cmnd[0], scmd->retries); return mhba->instancet->reset_host(mhba); } @@ -2104,7 +2104,6 @@ static int mvumi_queue_command(struct Scsi_Host *shost, unsigned long irq_flags; spin_lock_irqsave(shost->host_lock, irq_flags); - scsi_cmd_get_serial(shost, scmd); mhba = (struct mvumi_hba *) shost->hostdata; scmd->result = 0; diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c index 00e3cbee55b8..da4d6e1106c4 100644 --- a/drivers/scsi/nsp32.c +++ b/drivers/scsi/nsp32.c @@ -2441,7 +2441,6 @@ static void nsp32_set_sync_entry(nsp32_hw_data *data, period = data->synct[entry].period_num; ackwidth = data->synct[entry].ackwidth; - offset = offset; sample_rate = data->synct[entry].sample_rate; target->syncreg = TO_SYNCREG(period, offset); diff --git a/drivers/scsi/osd/Kbuild b/drivers/scsi/osd/Kbuild deleted file mode 100644 index 58cecd45b0f5..000000000000 --- a/drivers/scsi/osd/Kbuild +++ /dev/null @@ -1,20 +0,0 @@ -# -# Kbuild for the OSD modules -# -# Copyright (C) 2008 Panasas Inc. All rights reserved. -# -# Authors: -# Boaz Harrosh -# Benny Halevy -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 -# - -# libosd.ko - osd-initiator library -libosd-y := osd_initiator.o -obj-$(CONFIG_SCSI_OSD_INITIATOR) += libosd.o - -# osd.ko - SCSI ULD and char-device -osd-y := osd_uld.o -obj-$(CONFIG_SCSI_OSD_ULD) += osd.o diff --git a/drivers/scsi/osd/Kconfig b/drivers/scsi/osd/Kconfig deleted file mode 100644 index 347cc5e33749..000000000000 --- a/drivers/scsi/osd/Kconfig +++ /dev/null @@ -1,49 +0,0 @@ -# -# Kernel configuration file for the OSD scsi protocol -# -# Copyright (C) 2008 Panasas Inc. All rights reserved. -# -# Authors: -# Boaz Harrosh -# Benny Halevy -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public version 2 License as -# published by the Free Software Foundation -# -config SCSI_OSD_INITIATOR - tristate "OSD-Initiator library" - depends on SCSI - help - Enable the OSD-Initiator library (libosd.ko). - NOTE: You must also select CRYPTO_SHA1 + CRYPTO_HMAC and their - dependencies - -config SCSI_OSD_ULD - tristate "OSD Upper Level driver" - depends on SCSI_OSD_INITIATOR - help - Build a SCSI upper layer driver that exports /dev/osdX devices - to user-mode for testing and controlling OSD devices. It is also - needed by exofs, for mounting an OSD based file system. - -config SCSI_OSD_DPRINT_SENSE - int "(0-2) When sense is returned, DEBUG print all sense descriptors" - default 1 - depends on SCSI_OSD_INITIATOR - help - When a CHECK_CONDITION status is returned from a target, and a - sense-buffer is retrieved, turning this on will dump a full - sense-decoding message. Setting to 2 will also print recoverable - errors that might be regularly returned for some filesystem - operations. - -config SCSI_OSD_DEBUG - bool "Compile All OSD modules with lots of DEBUG prints" - default n - depends on SCSI_OSD_INITIATOR - help - OSD Code is populated with lots of OSD_DEBUG(..) printouts to - dmesg. Enable this if you found a bug and you want to help us - track the problem (see also MAINTAINERS). Setting this will also - force SCSI_OSD_DPRINT_SENSE=2. diff --git a/drivers/scsi/osd/osd_debug.h b/drivers/scsi/osd/osd_debug.h deleted file mode 100644 index 26341261bb5c..000000000000 --- a/drivers/scsi/osd/osd_debug.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * osd_debug.h - Some kprintf macros - * - * Copyright (C) 2008 Panasas Inc. All rights reserved. - * - * Authors: - * Boaz Harrosh - * Benny Halevy - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 - * - */ -#ifndef __OSD_DEBUG_H__ -#define __OSD_DEBUG_H__ - -#define OSD_ERR(fmt, a...) printk(KERN_ERR "osd: " fmt, ##a) -#define OSD_INFO(fmt, a...) printk(KERN_NOTICE "osd: " fmt, ##a) - -#ifdef CONFIG_SCSI_OSD_DEBUG -#define OSD_DEBUG(fmt, a...) \ - printk(KERN_NOTICE "osd @%s:%d: " fmt, __func__, __LINE__, ##a) -#else -#define OSD_DEBUG(fmt, a...) do {} while (0) -#endif - -/* u64 has problems with printk this will cast it to unsigned long long */ -#define _LLU(x) (unsigned long long)(x) - -#endif /* ndef __OSD_DEBUG_H__ */ diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c deleted file mode 100644 index 60cf7c5eb880..000000000000 --- a/drivers/scsi/osd/osd_initiator.c +++ /dev/null @@ -1,2076 +0,0 @@ -/* - * osd_initiator - Main body of the osd initiator library. - * - * Note: The file does not contain the advanced security functionality which - * is only needed by the security_manager's initiators. - * - * Copyright (C) 2008 Panasas Inc. All rights reserved. - * - * Authors: - * Boaz Harrosh - * Benny Halevy - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the Panasas company nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include - -#include -#include -#include -#include - -#include -#include - -#include "osd_debug.h" - -#ifndef __unused -# define __unused __attribute__((unused)) -#endif - -enum { OSD_REQ_RETRIES = 1 }; - -MODULE_AUTHOR("Boaz Harrosh "); -MODULE_DESCRIPTION("open-osd initiator library libosd.ko"); -MODULE_LICENSE("GPL"); - -static inline void build_test(void) -{ - /* structures were not packed */ - BUILD_BUG_ON(sizeof(struct osd_capability) != OSD_CAP_LEN); - BUILD_BUG_ON(sizeof(struct osdv2_cdb) != OSD_TOTAL_CDB_LEN); - BUILD_BUG_ON(sizeof(struct osdv1_cdb) != OSDv1_TOTAL_CDB_LEN); -} - -static const char *_osd_ver_desc(struct osd_request *or) -{ - return osd_req_is_ver1(or) ? "OSD1" : "OSD2"; -} - -#define ATTR_DEF_RI(id, len) ATTR_DEF(OSD_APAGE_ROOT_INFORMATION, id, len) - -static int _osd_get_print_system_info(struct osd_dev *od, - void *caps, struct osd_dev_info *odi) -{ - struct osd_request *or; - struct osd_attr get_attrs[] = { - ATTR_DEF_RI(OSD_ATTR_RI_VENDOR_IDENTIFICATION, 8), - ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_IDENTIFICATION, 16), - ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_MODEL, 32), - ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_REVISION_LEVEL, 4), - ATTR_DEF_RI(OSD_ATTR_RI_PRODUCT_SERIAL_NUMBER, 64 /*variable*/), - ATTR_DEF_RI(OSD_ATTR_RI_OSD_NAME, 64 /*variable*/), - ATTR_DEF_RI(OSD_ATTR_RI_TOTAL_CAPACITY, 8), - ATTR_DEF_RI(OSD_ATTR_RI_USED_CAPACITY, 8), - ATTR_DEF_RI(OSD_ATTR_RI_NUMBER_OF_PARTITIONS, 8), - ATTR_DEF_RI(OSD_ATTR_RI_CLOCK, 6), - /* IBM-OSD-SIM Has a bug with this one put it last */ - ATTR_DEF_RI(OSD_ATTR_RI_OSD_SYSTEM_ID, 20), - }; - void *iter = NULL, *pFirst; - int nelem = ARRAY_SIZE(get_attrs), a = 0; - int ret; - - or = osd_start_request(od); - if (!or) - return -ENOMEM; - - /* get attrs */ - osd_req_get_attributes(or, &osd_root_object); - osd_req_add_get_attr_list(or, get_attrs, ARRAY_SIZE(get_attrs)); - - ret = osd_finalize_request(or, 0, caps, NULL); - if (ret) - goto out; - - ret = osd_execute_request(or); - if (ret) { - OSD_ERR("Failed to detect %s => %d\n", _osd_ver_desc(or), ret); - goto out; - } - - osd_req_decode_get_attr_list(or, get_attrs, &nelem, &iter); - - OSD_INFO("Detected %s device\n", - _osd_ver_desc(or)); - - pFirst = get_attrs[a++].val_ptr; - OSD_INFO("VENDOR_IDENTIFICATION [%s]\n", - (char *)pFirst); - - pFirst = get_attrs[a++].val_ptr; - OSD_INFO("PRODUCT_IDENTIFICATION [%s]\n", - (char *)pFirst); - - pFirst = get_attrs[a++].val_ptr; - OSD_INFO("PRODUCT_MODEL [%s]\n", - (char *)pFirst); - - pFirst = get_attrs[a++].val_ptr; - OSD_INFO("PRODUCT_REVISION_LEVEL [%u]\n", - pFirst ? get_unaligned_be32(pFirst) : ~0U); - - pFirst = get_attrs[a++].val_ptr; - OSD_INFO("PRODUCT_SERIAL_NUMBER [%s]\n", - (char *)pFirst); - - odi->osdname_len = get_attrs[a].len; - /* Avoid NULL for memcmp optimization 0-length is good enough */ - odi->osdname = kzalloc(odi->osdname_len + 1, GFP_KERNEL); - if (!odi->osdname) { - ret = -ENOMEM; - goto out; - } - if (odi->osdname_len) - memcpy(odi->osdname, get_attrs[a].val_ptr, odi->osdname_len); - OSD_INFO("OSD_NAME [%s]\n", odi->osdname); - a++; - - pFirst = get_attrs[a++].val_ptr; - OSD_INFO("TOTAL_CAPACITY [0x%llx]\n", - pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL); - - pFirst = get_attrs[a++].val_ptr; - OSD_INFO("USED_CAPACITY [0x%llx]\n", - pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL); - - pFirst = get_attrs[a++].val_ptr; - OSD_INFO("NUMBER_OF_PARTITIONS [%llu]\n", - pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL); - - if (a >= nelem) - goto out; - - /* FIXME: Where are the time utilities */ - pFirst = get_attrs[a++].val_ptr; - OSD_INFO("CLOCK [0x%6phN]\n", pFirst); - - if (a < nelem) { /* IBM-OSD-SIM bug, Might not have it */ - unsigned len = get_attrs[a].len; - char sid_dump[32*4 + 2]; /* 2nibbles+space+ASCII */ - - hex_dump_to_buffer(get_attrs[a].val_ptr, len, 32, 1, - sid_dump, sizeof(sid_dump), true); - OSD_INFO("OSD_SYSTEM_ID(%d)\n" - " [%s]\n", len, sid_dump); - - if (unlikely(len > sizeof(odi->systemid))) { - OSD_ERR("OSD Target error: OSD_SYSTEM_ID too long(%d). " - "device identification might not work\n", len); - len = sizeof(odi->systemid); - } - odi->systemid_len = len; - memcpy(odi->systemid, get_attrs[a].val_ptr, len); - a++; - } -out: - osd_end_request(or); - return ret; -} - -int osd_auto_detect_ver(struct osd_dev *od, - void *caps, struct osd_dev_info *odi) -{ - int ret; - - /* Auto-detect the osd version */ - ret = _osd_get_print_system_info(od, caps, odi); - if (ret) { - osd_dev_set_ver(od, OSD_VER1); - OSD_DEBUG("converting to OSD1\n"); - ret = _osd_get_print_system_info(od, caps, odi); - } - - return ret; -} -EXPORT_SYMBOL(osd_auto_detect_ver); - -static unsigned _osd_req_cdb_len(struct osd_request *or) -{ - return osd_req_is_ver1(or) ? OSDv1_TOTAL_CDB_LEN : OSD_TOTAL_CDB_LEN; -} - -static unsigned _osd_req_alist_elem_size(struct osd_request *or, unsigned len) -{ - return osd_req_is_ver1(or) ? - osdv1_attr_list_elem_size(len) : - osdv2_attr_list_elem_size(len); -} - -static void _osd_req_alist_elem_encode(struct osd_request *or, - void *attr_last, const struct osd_attr *oa) -{ - if (osd_req_is_ver1(or)) { - struct osdv1_attributes_list_element *attr = attr_last; - - attr->attr_page = cpu_to_be32(oa->attr_page); - attr->attr_id = cpu_to_be32(oa->attr_id); - attr->attr_bytes = cpu_to_be16(oa->len); - memcpy(attr->attr_val, oa->val_ptr, oa->len); - } else { - struct osdv2_attributes_list_element *attr = attr_last; - - attr->attr_page = cpu_to_be32(oa->attr_page); - attr->attr_id = cpu_to_be32(oa->attr_id); - attr->attr_bytes = cpu_to_be16(oa->len); - memcpy(attr->attr_val, oa->val_ptr, oa->len); - } -} - -static int _osd_req_alist_elem_decode(struct osd_request *or, - void *cur_p, struct osd_attr *oa, unsigned max_bytes) -{ - unsigned inc; - if (osd_req_is_ver1(or)) { - struct osdv1_attributes_list_element *attr = cur_p; - - if (max_bytes < sizeof(*attr)) - return -1; - - oa->len = be16_to_cpu(attr->attr_bytes); - inc = _osd_req_alist_elem_size(or, oa->len); - if (inc > max_bytes) - return -1; - - oa->attr_page = be32_to_cpu(attr->attr_page); - oa->attr_id = be32_to_cpu(attr->attr_id); - - /* OSD1: On empty attributes we return a pointer to 2 bytes - * of zeros. This keeps similar behaviour with OSD2. - * (See below) - */ - oa->val_ptr = likely(oa->len) ? attr->attr_val : - (u8 *)&attr->attr_bytes; - } else { - struct osdv2_attributes_list_element *attr = cur_p; - - if (max_bytes < sizeof(*attr)) - return -1; - - oa->len = be16_to_cpu(attr->attr_bytes); - inc = _osd_req_alist_elem_size(or, oa->len); - if (inc > max_bytes) - return -1; - - oa->attr_page = be32_to_cpu(attr->attr_page); - oa->attr_id = be32_to_cpu(attr->attr_id); - - /* OSD2: For convenience, on empty attributes, we return 8 bytes - * of zeros here. This keeps the same behaviour with OSD2r04, - * and is nice with null terminating ASCII fields. - * oa->val_ptr == NULL marks the end-of-list, or error. - */ - oa->val_ptr = likely(oa->len) ? attr->attr_val : attr->reserved; - } - return inc; -} - -static unsigned _osd_req_alist_size(struct osd_request *or, void *list_head) -{ - return osd_req_is_ver1(or) ? - osdv1_list_size(list_head) : - osdv2_list_size(list_head); -} - -static unsigned _osd_req_sizeof_alist_header(struct osd_request *or) -{ - return osd_req_is_ver1(or) ? - sizeof(struct osdv1_attributes_list_header) : - sizeof(struct osdv2_attributes_list_header); -} - -static void _osd_req_set_alist_type(struct osd_request *or, - void *list, int list_type) -{ - if (osd_req_is_ver1(or)) { - struct osdv1_attributes_list_header *attr_list = list; - - memset(attr_list, 0, sizeof(*attr_list)); - attr_list->type = list_type; - } else { - struct osdv2_attributes_list_header *attr_list = list; - - memset(attr_list, 0, sizeof(*attr_list)); - attr_list->type = list_type; - } -} - -static bool _osd_req_is_alist_type(struct osd_request *or, - void *list, int list_type) -{ - if (!list) - return false; - - if (osd_req_is_ver1(or)) { - struct osdv1_attributes_list_header *attr_list = list; - - return attr_list->type == list_type; - } else { - struct osdv2_attributes_list_header *attr_list = list; - - return attr_list->type == list_type; - } -} - -/* This is for List-objects not Attributes-Lists */ -static void _osd_req_encode_olist(struct osd_request *or, - struct osd_obj_id_list *list) -{ - struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb); - - if (osd_req_is_ver1(or)) { - cdbh->v1.list_identifier = list->list_identifier; - cdbh->v1.start_address = list->continuation_id; - } else { - cdbh->v2.list_identifier = list->list_identifier; - cdbh->v2.start_address = list->continuation_id; - } -} - -static osd_cdb_offset osd_req_encode_offset(struct osd_request *or, - u64 offset, unsigned *padding) -{ - return __osd_encode_offset(offset, padding, - osd_req_is_ver1(or) ? - OSDv1_OFFSET_MIN_SHIFT : OSD_OFFSET_MIN_SHIFT, - OSD_OFFSET_MAX_SHIFT); -} - -static struct osd_security_parameters * -_osd_req_sec_params(struct osd_request *or) -{ - struct osd_cdb *ocdb = &or->cdb; - - if (osd_req_is_ver1(or)) - return (struct osd_security_parameters *)&ocdb->v1.sec_params; - else - return (struct osd_security_parameters *)&ocdb->v2.sec_params; -} - -void osd_dev_init(struct osd_dev *osdd, struct scsi_device *scsi_device) -{ - memset(osdd, 0, sizeof(*osdd)); - osdd->scsi_device = scsi_device; - osdd->def_timeout = BLK_DEFAULT_SG_TIMEOUT; -#ifdef OSD_VER1_SUPPORT - osdd->version = OSD_VER2; -#endif - /* TODO: Allocate pools for osd_request attributes ... */ -} -EXPORT_SYMBOL(osd_dev_init); - -void osd_dev_fini(struct osd_dev *osdd) -{ - /* TODO: De-allocate pools */ - - osdd->scsi_device = NULL; -} -EXPORT_SYMBOL(osd_dev_fini); - -static struct osd_request *_osd_request_alloc(gfp_t gfp) -{ - struct osd_request *or; - - /* TODO: Use mempool with one saved request */ - or = kzalloc(sizeof(*or), gfp); - return or; -} - -static void _osd_request_free(struct osd_request *or) -{ - kfree(or); -} - -struct osd_request *osd_start_request(struct osd_dev *dev) -{ - struct osd_request *or; - - or = _osd_request_alloc(GFP_KERNEL); - if (!or) - return NULL; - - or->osd_dev = dev; - or->timeout = dev->def_timeout; - or->retries = OSD_REQ_RETRIES; - - return or; -} -EXPORT_SYMBOL(osd_start_request); - -static void _osd_free_seg(struct osd_request *or __unused, - struct _osd_req_data_segment *seg) -{ - if (!seg->buff || !seg->alloc_size) - return; - - kfree(seg->buff); - seg->buff = NULL; - seg->alloc_size = 0; -} - -static void _put_request(struct request *rq) -{ - /* - * If osd_finalize_request() was called but the request was not - * executed through the block layer, then we must release BIOs. - * TODO: Keep error code in or->async_error. Need to audit all - * code paths. - */ - if (unlikely(rq->bio)) - blk_mq_end_request(rq, BLK_STS_IOERR); - else - blk_put_request(rq); -} - -void osd_end_request(struct osd_request *or) -{ - struct request *rq = or->request; - - if (rq) { - if (rq->next_rq) { - _put_request(rq->next_rq); - rq->next_rq = NULL; - } - - _put_request(rq); - } - - _osd_free_seg(or, &or->get_attr); - _osd_free_seg(or, &or->enc_get_attr); - _osd_free_seg(or, &or->set_attr); - _osd_free_seg(or, &or->cdb_cont); - - _osd_request_free(or); -} -EXPORT_SYMBOL(osd_end_request); - -static void _set_error_resid(struct osd_request *or, struct request *req, - blk_status_t error) -{ - or->async_error = error; - or->req_errors = scsi_req(req)->result; - or->sense_len = scsi_req(req)->sense_len; - if (or->sense_len) - memcpy(or->sense, scsi_req(req)->sense, or->sense_len); - if (or->out.req) - or->out.residual = scsi_req(or->out.req)->resid_len; - if (or->in.req) - or->in.residual = scsi_req(or->in.req)->resid_len; -} - -int osd_execute_request(struct osd_request *or) -{ - blk_execute_rq(or->request->q, NULL, or->request, 0); - - if (scsi_req(or->request)->result) { - _set_error_resid(or, or->request, BLK_STS_IOERR); - return -EIO; - } - - _set_error_resid(or, or->request, BLK_STS_OK); - return 0; -} -EXPORT_SYMBOL(osd_execute_request); - -static void osd_request_async_done(struct request *req, blk_status_t error) -{ - struct osd_request *or = req->end_io_data; - - _set_error_resid(or, req, error); - if (req->next_rq) { - blk_put_request(req->next_rq); - req->next_rq = NULL; - } - - blk_put_request(req); - or->request = NULL; - or->in.req = NULL; - or->out.req = NULL; - - if (or->async_done) - or->async_done(or, or->async_private); - else - osd_end_request(or); -} - -int osd_execute_request_async(struct osd_request *or, - osd_req_done_fn *done, void *private) -{ - or->request->end_io_data = or; - or->async_private = private; - or->async_done = done; - - blk_execute_rq_nowait(or->request->q, NULL, or->request, 0, - osd_request_async_done); - return 0; -} -EXPORT_SYMBOL(osd_execute_request_async); - -u8 sg_out_pad_buffer[1 << OSDv1_OFFSET_MIN_SHIFT]; -u8 sg_in_pad_buffer[1 << OSDv1_OFFSET_MIN_SHIFT]; - -static int _osd_realloc_seg(struct osd_request *or, - struct _osd_req_data_segment *seg, unsigned max_bytes) -{ - void *buff; - - if (seg->alloc_size >= max_bytes) - return 0; - - buff = krealloc(seg->buff, max_bytes, GFP_KERNEL); - if (!buff) { - OSD_ERR("Failed to Realloc %d-bytes was-%d\n", max_bytes, - seg->alloc_size); - return -ENOMEM; - } - - memset(buff + seg->alloc_size, 0, max_bytes - seg->alloc_size); - seg->buff = buff; - seg->alloc_size = max_bytes; - return 0; -} - -static int _alloc_cdb_cont(struct osd_request *or, unsigned total_bytes) -{ - OSD_DEBUG("total_bytes=%d\n", total_bytes); - return _osd_realloc_seg(or, &or->cdb_cont, total_bytes); -} - -static int _alloc_set_attr_list(struct osd_request *or, - const struct osd_attr *oa, unsigned nelem, unsigned add_bytes) -{ - unsigned total_bytes = add_bytes; - - for (; nelem; --nelem, ++oa) - total_bytes += _osd_req_alist_elem_size(or, oa->len); - - OSD_DEBUG("total_bytes=%d\n", total_bytes); - return _osd_realloc_seg(or, &or->set_attr, total_bytes); -} - -static int _alloc_get_attr_desc(struct osd_request *or, unsigned max_bytes) -{ - OSD_DEBUG("total_bytes=%d\n", max_bytes); - return _osd_realloc_seg(or, &or->enc_get_attr, max_bytes); -} - -static int _alloc_get_attr_list(struct osd_request *or) -{ - OSD_DEBUG("total_bytes=%d\n", or->get_attr.total_bytes); - return _osd_realloc_seg(or, &or->get_attr, or->get_attr.total_bytes); -} - -/* - * Common to all OSD commands - */ - -static void _osdv1_req_encode_common(struct osd_request *or, - __be16 act, const struct osd_obj_id *obj, u64 offset, u64 len) -{ - struct osdv1_cdb *ocdb = &or->cdb.v1; - - /* - * For speed, the commands - * OSD_ACT_PERFORM_SCSI_COMMAND , V1 0x8F7E, V2 0x8F7C - * OSD_ACT_SCSI_TASK_MANAGEMENT , V1 0x8F7F, V2 0x8F7D - * are not supported here. Should pass zero and set after the call - */ - act &= cpu_to_be16(~0x0080); /* V1 action code */ - - OSD_DEBUG("OSDv1 execute opcode 0x%x\n", be16_to_cpu(act)); - - ocdb->h.varlen_cdb.opcode = VARIABLE_LENGTH_CMD; - ocdb->h.varlen_cdb.additional_cdb_length = OSD_ADDITIONAL_CDB_LENGTH; - ocdb->h.varlen_cdb.service_action = act; - - ocdb->h.partition = cpu_to_be64(obj->partition); - ocdb->h.object = cpu_to_be64(obj->id); - ocdb->h.v1.length = cpu_to_be64(len); - ocdb->h.v1.start_address = cpu_to_be64(offset); -} - -static void _osdv2_req_encode_common(struct osd_request *or, - __be16 act, const struct osd_obj_id *obj, u64 offset, u64 len) -{ - struct osdv2_cdb *ocdb = &or->cdb.v2; - - OSD_DEBUG("OSDv2 execute opcode 0x%x\n", be16_to_cpu(act)); - - ocdb->h.varlen_cdb.opcode = VARIABLE_LENGTH_CMD; - ocdb->h.varlen_cdb.additional_cdb_length = OSD_ADDITIONAL_CDB_LENGTH; - ocdb->h.varlen_cdb.service_action = act; - - ocdb->h.partition = cpu_to_be64(obj->partition); - ocdb->h.object = cpu_to_be64(obj->id); - ocdb->h.v2.length = cpu_to_be64(len); - ocdb->h.v2.start_address = cpu_to_be64(offset); -} - -static void _osd_req_encode_common(struct osd_request *or, - __be16 act, const struct osd_obj_id *obj, u64 offset, u64 len) -{ - if (osd_req_is_ver1(or)) - _osdv1_req_encode_common(or, act, obj, offset, len); - else - _osdv2_req_encode_common(or, act, obj, offset, len); -} - -/* - * Device commands - */ -/*TODO: void osd_req_set_master_seed_xchg(struct osd_request *, ...); */ -/*TODO: void osd_req_set_master_key(struct osd_request *, ...); */ - -void osd_req_format(struct osd_request *or, u64 tot_capacity) -{ - _osd_req_encode_common(or, OSD_ACT_FORMAT_OSD, &osd_root_object, 0, - tot_capacity); -} -EXPORT_SYMBOL(osd_req_format); - -int osd_req_list_dev_partitions(struct osd_request *or, - osd_id initial_id, struct osd_obj_id_list *list, unsigned nelem) -{ - return osd_req_list_partition_objects(or, 0, initial_id, list, nelem); -} -EXPORT_SYMBOL(osd_req_list_dev_partitions); - -static void _osd_req_encode_flush(struct osd_request *or, - enum osd_options_flush_scope_values op) -{ - struct osd_cdb_head *ocdb = osd_cdb_head(&or->cdb); - - ocdb->command_specific_options = op; -} - -void osd_req_flush_obsd(struct osd_request *or, - enum osd_options_flush_scope_values op) -{ - _osd_req_encode_common(or, OSD_ACT_FLUSH_OSD, &osd_root_object, 0, 0); - _osd_req_encode_flush(or, op); -} -EXPORT_SYMBOL(osd_req_flush_obsd); - -/*TODO: void osd_req_perform_scsi_command(struct osd_request *, - const u8 *cdb, ...); */ -/*TODO: void osd_req_task_management(struct osd_request *, ...); */ - -/* - * Partition commands - */ -static void _osd_req_encode_partition(struct osd_request *or, - __be16 act, osd_id partition) -{ - struct osd_obj_id par = { - .partition = partition, - .id = 0, - }; - - _osd_req_encode_common(or, act, &par, 0, 0); -} - -void osd_req_create_partition(struct osd_request *or, osd_id partition) -{ - _osd_req_encode_partition(or, OSD_ACT_CREATE_PARTITION, partition); -} -EXPORT_SYMBOL(osd_req_create_partition); - -void osd_req_remove_partition(struct osd_request *or, osd_id partition) -{ - _osd_req_encode_partition(or, OSD_ACT_REMOVE_PARTITION, partition); -} -EXPORT_SYMBOL(osd_req_remove_partition); - -/*TODO: void osd_req_set_partition_key(struct osd_request *, - osd_id partition, u8 new_key_id[OSD_CRYPTO_KEYID_SIZE], - u8 seed[OSD_CRYPTO_SEED_SIZE]); */ - -static int _osd_req_list_objects(struct osd_request *or, - __be16 action, const struct osd_obj_id *obj, osd_id initial_id, - struct osd_obj_id_list *list, unsigned nelem) -{ - struct request_queue *q = osd_request_queue(or->osd_dev); - u64 len = nelem * sizeof(osd_id) + sizeof(*list); - struct bio *bio; - - _osd_req_encode_common(or, action, obj, (u64)initial_id, len); - - if (list->list_identifier) - _osd_req_encode_olist(or, list); - - WARN_ON(or->in.bio); - bio = bio_map_kern(q, list, len, GFP_KERNEL); - if (IS_ERR(bio)) { - OSD_ERR("!!! Failed to allocate list_objects BIO\n"); - return PTR_ERR(bio); - } - - bio_set_op_attrs(bio, REQ_OP_READ, 0); - or->in.bio = bio; - or->in.total_bytes = bio->bi_iter.bi_size; - return 0; -} - -int osd_req_list_partition_collections(struct osd_request *or, - osd_id partition, osd_id initial_id, struct osd_obj_id_list *list, - unsigned nelem) -{ - struct osd_obj_id par = { - .partition = partition, - .id = 0, - }; - - return osd_req_list_collection_objects(or, &par, initial_id, list, - nelem); -} -EXPORT_SYMBOL(osd_req_list_partition_collections); - -int osd_req_list_partition_objects(struct osd_request *or, - osd_id partition, osd_id initial_id, struct osd_obj_id_list *list, - unsigned nelem) -{ - struct osd_obj_id par = { - .partition = partition, - .id = 0, - }; - - return _osd_req_list_objects(or, OSD_ACT_LIST, &par, initial_id, list, - nelem); -} -EXPORT_SYMBOL(osd_req_list_partition_objects); - -void osd_req_flush_partition(struct osd_request *or, - osd_id partition, enum osd_options_flush_scope_values op) -{ - _osd_req_encode_partition(or, OSD_ACT_FLUSH_PARTITION, partition); - _osd_req_encode_flush(or, op); -} -EXPORT_SYMBOL(osd_req_flush_partition); - -/* - * Collection commands - */ -/*TODO: void osd_req_create_collection(struct osd_request *, - const struct osd_obj_id *); */ -/*TODO: void osd_req_remove_collection(struct osd_request *, - const struct osd_obj_id *); */ - -int osd_req_list_collection_objects(struct osd_request *or, - const struct osd_obj_id *obj, osd_id initial_id, - struct osd_obj_id_list *list, unsigned nelem) -{ - return _osd_req_list_objects(or, OSD_ACT_LIST_COLLECTION, obj, - initial_id, list, nelem); -} -EXPORT_SYMBOL(osd_req_list_collection_objects); - -/*TODO: void query(struct osd_request *, ...); V2 */ - -void osd_req_flush_collection(struct osd_request *or, - const struct osd_obj_id *obj, enum osd_options_flush_scope_values op) -{ - _osd_req_encode_common(or, OSD_ACT_FLUSH_PARTITION, obj, 0, 0); - _osd_req_encode_flush(or, op); -} -EXPORT_SYMBOL(osd_req_flush_collection); - -/*TODO: void get_member_attrs(struct osd_request *, ...); V2 */ -/*TODO: void set_member_attrs(struct osd_request *, ...); V2 */ - -/* - * Object commands - */ -void osd_req_create_object(struct osd_request *or, struct osd_obj_id *obj) -{ - _osd_req_encode_common(or, OSD_ACT_CREATE, obj, 0, 0); -} -EXPORT_SYMBOL(osd_req_create_object); - -void osd_req_remove_object(struct osd_request *or, struct osd_obj_id *obj) -{ - _osd_req_encode_common(or, OSD_ACT_REMOVE, obj, 0, 0); -} -EXPORT_SYMBOL(osd_req_remove_object); - - -/*TODO: void osd_req_create_multi(struct osd_request *or, - struct osd_obj_id *first, struct osd_obj_id_list *list, unsigned nelem); -*/ - -void osd_req_write(struct osd_request *or, - const struct osd_obj_id *obj, u64 offset, - struct bio *bio, u64 len) -{ - _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len); - WARN_ON(or->out.bio || or->out.total_bytes); - WARN_ON(!op_is_write(bio_op(bio))); - or->out.bio = bio; - or->out.total_bytes = len; -} -EXPORT_SYMBOL(osd_req_write); - -int osd_req_write_kern(struct osd_request *or, - const struct osd_obj_id *obj, u64 offset, void* buff, u64 len) -{ - struct request_queue *req_q = osd_request_queue(or->osd_dev); - struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL); - - if (IS_ERR(bio)) - return PTR_ERR(bio); - - bio_set_op_attrs(bio, REQ_OP_WRITE, 0); - osd_req_write(or, obj, offset, bio, len); - return 0; -} -EXPORT_SYMBOL(osd_req_write_kern); - -/*TODO: void osd_req_append(struct osd_request *, - const struct osd_obj_id *, struct bio *data_out); */ -/*TODO: void osd_req_create_write(struct osd_request *, - const struct osd_obj_id *, struct bio *data_out, u64 offset); */ -/*TODO: void osd_req_clear(struct osd_request *, - const struct osd_obj_id *, u64 offset, u64 len); */ -/*TODO: void osd_req_punch(struct osd_request *, - const struct osd_obj_id *, u64 offset, u64 len); V2 */ - -void osd_req_flush_object(struct osd_request *or, - const struct osd_obj_id *obj, enum osd_options_flush_scope_values op, - /*V2*/ u64 offset, /*V2*/ u64 len) -{ - if (unlikely(osd_req_is_ver1(or) && (offset || len))) { - OSD_DEBUG("OSD Ver1 flush on specific range ignored\n"); - offset = 0; - len = 0; - } - - _osd_req_encode_common(or, OSD_ACT_FLUSH, obj, offset, len); - _osd_req_encode_flush(or, op); -} -EXPORT_SYMBOL(osd_req_flush_object); - -void osd_req_read(struct osd_request *or, - const struct osd_obj_id *obj, u64 offset, - struct bio *bio, u64 len) -{ - _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len); - WARN_ON(or->in.bio || or->in.total_bytes); - WARN_ON(op_is_write(bio_op(bio))); - or->in.bio = bio; - or->in.total_bytes = len; -} -EXPORT_SYMBOL(osd_req_read); - -int osd_req_read_kern(struct osd_request *or, - const struct osd_obj_id *obj, u64 offset, void* buff, u64 len) -{ - struct request_queue *req_q = osd_request_queue(or->osd_dev); - struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL); - - if (IS_ERR(bio)) - return PTR_ERR(bio); - - osd_req_read(or, obj, offset, bio, len); - return 0; -} -EXPORT_SYMBOL(osd_req_read_kern); - -static int _add_sg_continuation_descriptor(struct osd_request *or, - const struct osd_sg_entry *sglist, unsigned numentries, u64 *len) -{ - struct osd_sg_continuation_descriptor *oscd; - u32 oscd_size; - unsigned i; - int ret; - - oscd_size = sizeof(*oscd) + numentries * sizeof(oscd->entries[0]); - - if (!or->cdb_cont.total_bytes) { - /* First time, jump over the header, we will write to: - * cdb_cont.buff + cdb_cont.total_bytes - */ - or->cdb_cont.total_bytes = - sizeof(struct osd_continuation_segment_header); - } - - ret = _alloc_cdb_cont(or, or->cdb_cont.total_bytes + oscd_size); - if (unlikely(ret)) - return ret; - - oscd = or->cdb_cont.buff + or->cdb_cont.total_bytes; - oscd->hdr.type = cpu_to_be16(SCATTER_GATHER_LIST); - oscd->hdr.pad_length = 0; - oscd->hdr.length = cpu_to_be32(oscd_size - sizeof(*oscd)); - - *len = 0; - /* copy the sg entries and convert to network byte order */ - for (i = 0; i < numentries; i++) { - oscd->entries[i].offset = cpu_to_be64(sglist[i].offset); - oscd->entries[i].len = cpu_to_be64(sglist[i].len); - *len += sglist[i].len; - } - - or->cdb_cont.total_bytes += oscd_size; - OSD_DEBUG("total_bytes=%d oscd_size=%d numentries=%d\n", - or->cdb_cont.total_bytes, oscd_size, numentries); - return 0; -} - -static int _osd_req_finalize_cdb_cont(struct osd_request *or, const u8 *cap_key) -{ - struct request_queue *req_q = osd_request_queue(or->osd_dev); - struct bio *bio; - struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb); - struct osd_continuation_segment_header *cont_seg_hdr; - - if (!or->cdb_cont.total_bytes) - return 0; - - cont_seg_hdr = or->cdb_cont.buff; - cont_seg_hdr->format = CDB_CONTINUATION_FORMAT_V2; - cont_seg_hdr->service_action = cdbh->varlen_cdb.service_action; - - /* create a bio for continuation segment */ - bio = bio_map_kern(req_q, or->cdb_cont.buff, or->cdb_cont.total_bytes, - GFP_KERNEL); - if (IS_ERR(bio)) - return PTR_ERR(bio); - - bio_set_op_attrs(bio, REQ_OP_WRITE, 0); - - /* integrity check the continuation before the bio is linked - * with the other data segments since the continuation - * integrity is separate from the other data segments. - */ - osd_sec_sign_data(cont_seg_hdr->integrity_check, bio, cap_key); - - cdbh->v2.cdb_continuation_length = cpu_to_be32(or->cdb_cont.total_bytes); - - /* we can't use _req_append_segment, because we need to link in the - * continuation bio to the head of the bio list - the - * continuation segment (if it exists) is always the first segment in - * the out data buffer. - */ - bio->bi_next = or->out.bio; - or->out.bio = bio; - or->out.total_bytes += or->cdb_cont.total_bytes; - - return 0; -} - -/* osd_req_write_sg: Takes a @bio that points to the data out buffer and an - * @sglist that has the scatter gather entries. Scatter-gather enables a write - * of multiple none-contiguous areas of an object, in a single call. The extents - * may overlap and/or be in any order. The only constrain is that: - * total_bytes(sglist) >= total_bytes(bio) - */ -int osd_req_write_sg(struct osd_request *or, - const struct osd_obj_id *obj, struct bio *bio, - const struct osd_sg_entry *sglist, unsigned numentries) -{ - u64 len; - int ret = _add_sg_continuation_descriptor(or, sglist, numentries, &len); - - if (ret) - return ret; - osd_req_write(or, obj, 0, bio, len); - - return 0; -} -EXPORT_SYMBOL(osd_req_write_sg); - -/* osd_req_read_sg: Read multiple extents of an object into @bio - * See osd_req_write_sg - */ -int osd_req_read_sg(struct osd_request *or, - const struct osd_obj_id *obj, struct bio *bio, - const struct osd_sg_entry *sglist, unsigned numentries) -{ - u64 len; - u64 off; - int ret; - - if (numentries > 1) { - off = 0; - ret = _add_sg_continuation_descriptor(or, sglist, numentries, - &len); - if (ret) - return ret; - } else { - /* Optimize the case of single segment, read_sg is a - * bidi operation. - */ - len = sglist->len; - off = sglist->offset; - } - osd_req_read(or, obj, off, bio, len); - - return 0; -} -EXPORT_SYMBOL(osd_req_read_sg); - -/* SG-list write/read Kern API - * - * osd_req_{write,read}_sg_kern takes an array of @buff pointers and an array - * of sg_entries. @numentries indicates how many pointers and sg_entries there - * are. By requiring an array of buff pointers. This allows a caller to do a - * single write/read and scatter into multiple buffers. - * NOTE: Each buffer + len should not cross a page boundary. - */ -static struct bio *_create_sg_bios(struct osd_request *or, - void **buff, const struct osd_sg_entry *sglist, unsigned numentries) -{ - struct request_queue *q = osd_request_queue(or->osd_dev); - struct bio *bio; - unsigned i; - - bio = bio_kmalloc(GFP_KERNEL, numentries); - if (unlikely(!bio)) { - OSD_DEBUG("Failed to allocate BIO size=%u\n", numentries); - return ERR_PTR(-ENOMEM); - } - - for (i = 0; i < numentries; i++) { - unsigned offset = offset_in_page(buff[i]); - struct page *page = virt_to_page(buff[i]); - unsigned len = sglist[i].len; - unsigned added_len; - - BUG_ON(offset + len > PAGE_SIZE); - added_len = bio_add_pc_page(q, bio, page, len, offset); - if (unlikely(len != added_len)) { - OSD_DEBUG("bio_add_pc_page len(%d) != added_len(%d)\n", - len, added_len); - bio_put(bio); - return ERR_PTR(-ENOMEM); - } - } - - return bio; -} - -int osd_req_write_sg_kern(struct osd_request *or, - const struct osd_obj_id *obj, void **buff, - const struct osd_sg_entry *sglist, unsigned numentries) -{ - struct bio *bio = _create_sg_bios(or, buff, sglist, numentries); - if (IS_ERR(bio)) - return PTR_ERR(bio); - - bio_set_op_attrs(bio, REQ_OP_WRITE, 0); - osd_req_write_sg(or, obj, bio, sglist, numentries); - - return 0; -} -EXPORT_SYMBOL(osd_req_write_sg_kern); - -int osd_req_read_sg_kern(struct osd_request *or, - const struct osd_obj_id *obj, void **buff, - const struct osd_sg_entry *sglist, unsigned numentries) -{ - struct bio *bio = _create_sg_bios(or, buff, sglist, numentries); - if (IS_ERR(bio)) - return PTR_ERR(bio); - - osd_req_read_sg(or, obj, bio, sglist, numentries); - - return 0; -} -EXPORT_SYMBOL(osd_req_read_sg_kern); - - - -void osd_req_get_attributes(struct osd_request *or, - const struct osd_obj_id *obj) -{ - _osd_req_encode_common(or, OSD_ACT_GET_ATTRIBUTES, obj, 0, 0); -} -EXPORT_SYMBOL(osd_req_get_attributes); - -void osd_req_set_attributes(struct osd_request *or, - const struct osd_obj_id *obj) -{ - _osd_req_encode_common(or, OSD_ACT_SET_ATTRIBUTES, obj, 0, 0); -} -EXPORT_SYMBOL(osd_req_set_attributes); - -/* - * Attributes List-mode - */ - -int osd_req_add_set_attr_list(struct osd_request *or, - const struct osd_attr *oa, unsigned nelem) -{ - unsigned total_bytes = or->set_attr.total_bytes; - void *attr_last; - int ret; - - if (or->attributes_mode && - or->attributes_mode != OSD_CDB_GET_SET_ATTR_LISTS) { - WARN_ON(1); - return -EINVAL; - } - or->attributes_mode = OSD_CDB_GET_SET_ATTR_LISTS; - - if (!total_bytes) { /* first-time: allocate and put list header */ - total_bytes = _osd_req_sizeof_alist_header(or); - ret = _alloc_set_attr_list(or, oa, nelem, total_bytes); - if (ret) - return ret; - _osd_req_set_alist_type(or, or->set_attr.buff, - OSD_ATTR_LIST_SET_RETRIEVE); - } - attr_last = or->set_attr.buff + total_bytes; - - for (; nelem; --nelem) { - unsigned elem_size = _osd_req_alist_elem_size(or, oa->len); - - total_bytes += elem_size; - if (unlikely(or->set_attr.alloc_size < total_bytes)) { - or->set_attr.total_bytes = total_bytes - elem_size; - ret = _alloc_set_attr_list(or, oa, nelem, total_bytes); - if (ret) - return ret; - attr_last = - or->set_attr.buff + or->set_attr.total_bytes; - } - - _osd_req_alist_elem_encode(or, attr_last, oa); - - attr_last += elem_size; - ++oa; - } - - or->set_attr.total_bytes = total_bytes; - return 0; -} -EXPORT_SYMBOL(osd_req_add_set_attr_list); - -static int _req_append_segment(struct osd_request *or, - unsigned padding, struct _osd_req_data_segment *seg, - struct _osd_req_data_segment *last_seg, struct _osd_io_info *io) -{ - void *pad_buff; - int ret; - - if (padding) { - /* check if we can just add it to last buffer */ - if (last_seg && - (padding <= last_seg->alloc_size - last_seg->total_bytes)) - pad_buff = last_seg->buff + last_seg->total_bytes; - else - pad_buff = io->pad_buff; - - ret = blk_rq_map_kern(io->req->q, io->req, pad_buff, padding, - GFP_KERNEL); - if (ret) - return ret; - io->total_bytes += padding; - } - - ret = blk_rq_map_kern(io->req->q, io->req, seg->buff, seg->total_bytes, - GFP_KERNEL); - if (ret) - return ret; - - io->total_bytes += seg->total_bytes; - OSD_DEBUG("padding=%d buff=%p total_bytes=%d\n", padding, seg->buff, - seg->total_bytes); - return 0; -} - -static int _osd_req_finalize_set_attr_list(struct osd_request *or) -{ - struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb); - unsigned padding; - int ret; - - if (!or->set_attr.total_bytes) { - cdbh->attrs_list.set_attr_offset = OSD_OFFSET_UNUSED; - return 0; - } - - cdbh->attrs_list.set_attr_bytes = cpu_to_be32(or->set_attr.total_bytes); - cdbh->attrs_list.set_attr_offset = - osd_req_encode_offset(or, or->out.total_bytes, &padding); - - ret = _req_append_segment(or, padding, &or->set_attr, - or->out.last_seg, &or->out); - if (ret) - return ret; - - or->out.last_seg = &or->set_attr; - return 0; -} - -int osd_req_add_get_attr_list(struct osd_request *or, - const struct osd_attr *oa, unsigned nelem) -{ - unsigned total_bytes = or->enc_get_attr.total_bytes; - void *attr_last; - int ret; - - if (or->attributes_mode && - or->attributes_mode != OSD_CDB_GET_SET_ATTR_LISTS) { - WARN_ON(1); - return -EINVAL; - } - or->attributes_mode = OSD_CDB_GET_SET_ATTR_LISTS; - - /* first time calc data-in list header size */ - if (!or->get_attr.total_bytes) - or->get_attr.total_bytes = _osd_req_sizeof_alist_header(or); - - /* calc data-out info */ - if (!total_bytes) { /* first-time: allocate and put list header */ - unsigned max_bytes; - - total_bytes = _osd_req_sizeof_alist_header(or); - max_bytes = total_bytes + - nelem * sizeof(struct osd_attributes_list_attrid); - ret = _alloc_get_attr_desc(or, max_bytes); - if (ret) - return ret; - - _osd_req_set_alist_type(or, or->enc_get_attr.buff, - OSD_ATTR_LIST_GET); - } - attr_last = or->enc_get_attr.buff + total_bytes; - - for (; nelem; --nelem) { - struct osd_attributes_list_attrid *attrid; - const unsigned cur_size = sizeof(*attrid); - - total_bytes += cur_size; - if (unlikely(or->enc_get_attr.alloc_size < total_bytes)) { - or->enc_get_attr.total_bytes = total_bytes - cur_size; - ret = _alloc_get_attr_desc(or, - total_bytes + nelem * sizeof(*attrid)); - if (ret) - return ret; - attr_last = or->enc_get_attr.buff + - or->enc_get_attr.total_bytes; - } - - attrid = attr_last; - attrid->attr_page = cpu_to_be32(oa->attr_page); - attrid->attr_id = cpu_to_be32(oa->attr_id); - - attr_last += cur_size; - - /* calc data-in size */ - or->get_attr.total_bytes += - _osd_req_alist_elem_size(or, oa->len); - ++oa; - } - - or->enc_get_attr.total_bytes = total_bytes; - - OSD_DEBUG( - "get_attr.total_bytes=%u(%u) enc_get_attr.total_bytes=%u(%zu)\n", - or->get_attr.total_bytes, - or->get_attr.total_bytes - _osd_req_sizeof_alist_header(or), - or->enc_get_attr.total_bytes, - (or->enc_get_attr.total_bytes - _osd_req_sizeof_alist_header(or)) - / sizeof(struct osd_attributes_list_attrid)); - - return 0; -} -EXPORT_SYMBOL(osd_req_add_get_attr_list); - -static int _osd_req_finalize_get_attr_list(struct osd_request *or) -{ - struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb); - unsigned out_padding; - unsigned in_padding; - int ret; - - if (!or->enc_get_attr.total_bytes) { - cdbh->attrs_list.get_attr_desc_offset = OSD_OFFSET_UNUSED; - cdbh->attrs_list.get_attr_offset = OSD_OFFSET_UNUSED; - return 0; - } - - ret = _alloc_get_attr_list(or); - if (ret) - return ret; - - /* The out-going buffer info update */ - OSD_DEBUG("out-going\n"); - cdbh->attrs_list.get_attr_desc_bytes = - cpu_to_be32(or->enc_get_attr.total_bytes); - - cdbh->attrs_list.get_attr_desc_offset = - osd_req_encode_offset(or, or->out.total_bytes, &out_padding); - - ret = _req_append_segment(or, out_padding, &or->enc_get_attr, - or->out.last_seg, &or->out); - if (ret) - return ret; - or->out.last_seg = &or->enc_get_attr; - - /* The incoming buffer info update */ - OSD_DEBUG("in-coming\n"); - cdbh->attrs_list.get_attr_alloc_length = - cpu_to_be32(or->get_attr.total_bytes); - - cdbh->attrs_list.get_attr_offset = - osd_req_encode_offset(or, or->in.total_bytes, &in_padding); - - ret = _req_append_segment(or, in_padding, &or->get_attr, NULL, - &or->in); - if (ret) - return ret; - or->in.last_seg = &or->get_attr; - - return 0; -} - -int osd_req_decode_get_attr_list(struct osd_request *or, - struct osd_attr *oa, int *nelem, void **iterator) -{ - unsigned cur_bytes, returned_bytes; - int n; - const unsigned sizeof_attr_list = _osd_req_sizeof_alist_header(or); - void *cur_p; - - if (!_osd_req_is_alist_type(or, or->get_attr.buff, - OSD_ATTR_LIST_SET_RETRIEVE)) { - oa->attr_page = 0; - oa->attr_id = 0; - oa->val_ptr = NULL; - oa->len = 0; - *iterator = NULL; - return 0; - } - - if (*iterator) { - BUG_ON((*iterator < or->get_attr.buff) || - (or->get_attr.buff + or->get_attr.alloc_size < *iterator)); - cur_p = *iterator; - cur_bytes = (*iterator - or->get_attr.buff) - sizeof_attr_list; - returned_bytes = or->get_attr.total_bytes; - } else { /* first time decode the list header */ - cur_bytes = sizeof_attr_list; - returned_bytes = _osd_req_alist_size(or, or->get_attr.buff) + - sizeof_attr_list; - - cur_p = or->get_attr.buff + sizeof_attr_list; - - if (returned_bytes > or->get_attr.alloc_size) { - OSD_DEBUG("target report: space was not big enough! " - "Allocate=%u Needed=%u\n", - or->get_attr.alloc_size, - returned_bytes + sizeof_attr_list); - - returned_bytes = - or->get_attr.alloc_size - sizeof_attr_list; - } - or->get_attr.total_bytes = returned_bytes; - } - - for (n = 0; (n < *nelem) && (cur_bytes < returned_bytes); ++n) { - int inc = _osd_req_alist_elem_decode(or, cur_p, oa, - returned_bytes - cur_bytes); - - if (inc < 0) { - OSD_ERR("BAD FOOD from target. list not valid!" - "c=%d r=%d n=%d\n", - cur_bytes, returned_bytes, n); - oa->val_ptr = NULL; - cur_bytes = returned_bytes; /* break the caller loop */ - break; - } - - cur_bytes += inc; - cur_p += inc; - ++oa; - } - - *iterator = (returned_bytes - cur_bytes) ? cur_p : NULL; - *nelem = n; - return returned_bytes - cur_bytes; -} -EXPORT_SYMBOL(osd_req_decode_get_attr_list); - -/* - * Attributes Page-mode - */ - -int osd_req_add_get_attr_page(struct osd_request *or, - u32 page_id, void *attar_page, unsigned max_page_len, - const struct osd_attr *set_one_attr) -{ - struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb); - - if (or->attributes_mode && - or->attributes_mode != OSD_CDB_GET_ATTR_PAGE_SET_ONE) { - WARN_ON(1); - return -EINVAL; - } - or->attributes_mode = OSD_CDB_GET_ATTR_PAGE_SET_ONE; - - or->get_attr.buff = attar_page; - or->get_attr.total_bytes = max_page_len; - - cdbh->attrs_page.get_attr_page = cpu_to_be32(page_id); - cdbh->attrs_page.get_attr_alloc_length = cpu_to_be32(max_page_len); - - if (!set_one_attr || !set_one_attr->attr_page) - return 0; /* The set is optional */ - - or->set_attr.buff = set_one_attr->val_ptr; - or->set_attr.total_bytes = set_one_attr->len; - - cdbh->attrs_page.set_attr_page = cpu_to_be32(set_one_attr->attr_page); - cdbh->attrs_page.set_attr_id = cpu_to_be32(set_one_attr->attr_id); - cdbh->attrs_page.set_attr_length = cpu_to_be32(set_one_attr->len); - return 0; -} -EXPORT_SYMBOL(osd_req_add_get_attr_page); - -static int _osd_req_finalize_attr_page(struct osd_request *or) -{ - struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb); - unsigned in_padding, out_padding; - int ret; - - /* returned page */ - cdbh->attrs_page.get_attr_offset = - osd_req_encode_offset(or, or->in.total_bytes, &in_padding); - - ret = _req_append_segment(or, in_padding, &or->get_attr, NULL, - &or->in); - if (ret) - return ret; - - if (or->set_attr.total_bytes == 0) - return 0; - - /* set one value */ - cdbh->attrs_page.set_attr_offset = - osd_req_encode_offset(or, or->out.total_bytes, &out_padding); - - ret = _req_append_segment(or, out_padding, &or->set_attr, NULL, - &or->out); - return ret; -} - -static inline void osd_sec_parms_set_out_offset(bool is_v1, - struct osd_security_parameters *sec_parms, osd_cdb_offset offset) -{ - if (is_v1) - sec_parms->v1.data_out_integrity_check_offset = offset; - else - sec_parms->v2.data_out_integrity_check_offset = offset; -} - -static inline void osd_sec_parms_set_in_offset(bool is_v1, - struct osd_security_parameters *sec_parms, osd_cdb_offset offset) -{ - if (is_v1) - sec_parms->v1.data_in_integrity_check_offset = offset; - else - sec_parms->v2.data_in_integrity_check_offset = offset; -} - -static int _osd_req_finalize_data_integrity(struct osd_request *or, - bool has_in, bool has_out, struct bio *out_data_bio, u64 out_data_bytes, - const u8 *cap_key) -{ - struct osd_security_parameters *sec_parms = _osd_req_sec_params(or); - int ret; - - if (!osd_is_sec_alldata(sec_parms)) - return 0; - - if (has_out) { - struct _osd_req_data_segment seg = { - .buff = &or->out_data_integ, - .total_bytes = sizeof(or->out_data_integ), - }; - unsigned pad; - - or->out_data_integ.data_bytes = cpu_to_be64(out_data_bytes); - or->out_data_integ.set_attributes_bytes = cpu_to_be64( - or->set_attr.total_bytes); - or->out_data_integ.get_attributes_bytes = cpu_to_be64( - or->enc_get_attr.total_bytes); - - osd_sec_parms_set_out_offset(osd_req_is_ver1(or), sec_parms, - osd_req_encode_offset(or, or->out.total_bytes, &pad)); - - ret = _req_append_segment(or, pad, &seg, or->out.last_seg, - &or->out); - if (ret) - return ret; - or->out.last_seg = NULL; - - /* they are now all chained to request sign them all together */ - osd_sec_sign_data(&or->out_data_integ, out_data_bio, - cap_key); - } - - if (has_in) { - struct _osd_req_data_segment seg = { - .buff = &or->in_data_integ, - .total_bytes = sizeof(or->in_data_integ), - }; - unsigned pad; - - osd_sec_parms_set_in_offset(osd_req_is_ver1(or), sec_parms, - osd_req_encode_offset(or, or->in.total_bytes, &pad)); - - ret = _req_append_segment(or, pad, &seg, or->in.last_seg, - &or->in); - if (ret) - return ret; - - or->in.last_seg = NULL; - } - - return 0; -} - -/* - * osd_finalize_request and helpers - */ -static struct request *_make_request(struct request_queue *q, bool has_write, - struct _osd_io_info *oii) -{ - struct request *req; - struct bio *bio = oii->bio; - int ret; - - req = blk_get_request(q, has_write ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, - 0); - if (IS_ERR(req)) - return req; - - for_each_bio(bio) { - struct bio *bounce_bio = bio; - - ret = blk_rq_append_bio(req, &bounce_bio); - if (ret) - return ERR_PTR(ret); - } - - return req; -} - -static int _init_blk_request(struct osd_request *or, - bool has_in, bool has_out) -{ - struct scsi_device *scsi_device = or->osd_dev->scsi_device; - struct request_queue *q = scsi_device->request_queue; - struct request *req; - int ret; - - req = _make_request(q, has_out, has_out ? &or->out : &or->in); - if (IS_ERR(req)) { - ret = PTR_ERR(req); - goto out; - } - - or->request = req; - req->rq_flags |= RQF_QUIET; - - req->timeout = or->timeout; - scsi_req(req)->retries = or->retries; - - if (has_out) { - or->out.req = req; - if (has_in) { - /* allocate bidi request */ - req = _make_request(q, false, &or->in); - if (IS_ERR(req)) { - OSD_DEBUG("blk_get_request for bidi failed\n"); - ret = PTR_ERR(req); - goto out; - } - or->in.req = or->request->next_rq = req; - } - } else if (has_in) - or->in.req = req; - - ret = 0; -out: - OSD_DEBUG("or=%p has_in=%d has_out=%d => %d, %p\n", - or, has_in, has_out, ret, or->request); - return ret; -} - -int osd_finalize_request(struct osd_request *or, - u8 options, const void *cap, const u8 *cap_key) -{ - struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb); - bool has_in, has_out; - /* Save for data_integrity without the cdb_continuation */ - struct bio *out_data_bio = or->out.bio; - u64 out_data_bytes = or->out.total_bytes; - int ret; - - if (options & OSD_REQ_FUA) - cdbh->options |= OSD_CDB_FUA; - - if (options & OSD_REQ_DPO) - cdbh->options |= OSD_CDB_DPO; - - if (options & OSD_REQ_BYPASS_TIMESTAMPS) - cdbh->timestamp_control = OSD_CDB_BYPASS_TIMESTAMPS; - - osd_set_caps(&or->cdb, cap); - - has_in = or->in.bio || or->get_attr.total_bytes; - has_out = or->out.bio || or->cdb_cont.total_bytes || - or->set_attr.total_bytes || or->enc_get_attr.total_bytes; - - ret = _osd_req_finalize_cdb_cont(or, cap_key); - if (ret) { - OSD_DEBUG("_osd_req_finalize_cdb_cont failed\n"); - return ret; - } - ret = _init_blk_request(or, has_in, has_out); - if (ret) { - OSD_DEBUG("_init_blk_request failed\n"); - return ret; - } - - or->out.pad_buff = sg_out_pad_buffer; - or->in.pad_buff = sg_in_pad_buffer; - - if (!or->attributes_mode) - or->attributes_mode = OSD_CDB_GET_SET_ATTR_LISTS; - cdbh->command_specific_options |= or->attributes_mode; - if (or->attributes_mode == OSD_CDB_GET_ATTR_PAGE_SET_ONE) { - ret = _osd_req_finalize_attr_page(or); - if (ret) { - OSD_DEBUG("_osd_req_finalize_attr_page failed\n"); - return ret; - } - } else { - /* TODO: I think that for the GET_ATTR command these 2 should - * be reversed to keep them in execution order (for embedded - * targets with low memory footprint) - */ - ret = _osd_req_finalize_set_attr_list(or); - if (ret) { - OSD_DEBUG("_osd_req_finalize_set_attr_list failed\n"); - return ret; - } - - ret = _osd_req_finalize_get_attr_list(or); - if (ret) { - OSD_DEBUG("_osd_req_finalize_get_attr_list failed\n"); - return ret; - } - } - - ret = _osd_req_finalize_data_integrity(or, has_in, has_out, - out_data_bio, out_data_bytes, - cap_key); - if (ret) - return ret; - - osd_sec_sign_cdb(&or->cdb, cap_key); - - scsi_req(or->request)->cmd = or->cdb.buff; - scsi_req(or->request)->cmd_len = _osd_req_cdb_len(or); - - return 0; -} -EXPORT_SYMBOL(osd_finalize_request); - -static bool _is_osd_security_code(int code) -{ - return (code == osd_security_audit_value_frozen) || - (code == osd_security_working_key_frozen) || - (code == osd_nonce_not_unique) || - (code == osd_nonce_timestamp_out_of_range) || - (code == osd_invalid_dataout_buffer_integrity_check_value); -} - -#define OSD_SENSE_PRINT1(fmt, a...) \ - do { \ - if (__cur_sense_need_output) \ - OSD_ERR(fmt, ##a); \ - } while (0) - -#define OSD_SENSE_PRINT2(fmt, a...) OSD_SENSE_PRINT1(" " fmt, ##a) - -int osd_req_decode_sense_full(struct osd_request *or, - struct osd_sense_info *osi, bool silent, - struct osd_obj_id *bad_obj_list __unused, int max_obj __unused, - struct osd_attr *bad_attr_list, int max_attr) -{ - int sense_len, original_sense_len; - struct osd_sense_info local_osi; - struct scsi_sense_descriptor_based *ssdb; - void *cur_descriptor; -#if (CONFIG_SCSI_OSD_DPRINT_SENSE == 0) - const bool __cur_sense_need_output = false; -#else - bool __cur_sense_need_output = !silent; -#endif - int ret; - - if (likely(!or->req_errors)) - return 0; - - osi = osi ? : &local_osi; - memset(osi, 0, sizeof(*osi)); - - ssdb = (typeof(ssdb))or->sense; - sense_len = or->sense_len; - if ((sense_len < (int)sizeof(*ssdb) || !ssdb->sense_key)) { - OSD_ERR("Block-layer returned error(0x%x) but " - "sense_len(%u) || key(%d) is empty\n", - or->req_errors, sense_len, ssdb->sense_key); - goto analyze; - } - - if ((ssdb->response_code != 0x72) && (ssdb->response_code != 0x73)) { - OSD_ERR("Unrecognized scsi sense: rcode=%x length=%d\n", - ssdb->response_code, sense_len); - goto analyze; - } - - osi->key = ssdb->sense_key; - osi->additional_code = be16_to_cpu(ssdb->additional_sense_code); - original_sense_len = ssdb->additional_sense_length + 8; - -#if (CONFIG_SCSI_OSD_DPRINT_SENSE == 1) - if (__cur_sense_need_output) - __cur_sense_need_output = (osi->key > scsi_sk_recovered_error); -#endif - OSD_SENSE_PRINT1("Main Sense information key=0x%x length(%d, %d) " - "additional_code=0x%x async_error=%d errors=0x%x\n", - osi->key, original_sense_len, sense_len, - osi->additional_code, or->async_error, - or->req_errors); - - if (original_sense_len < sense_len) - sense_len = original_sense_len; - - cur_descriptor = ssdb->ssd; - sense_len -= sizeof(*ssdb); - while (sense_len > 0) { - struct scsi_sense_descriptor *ssd = cur_descriptor; - int cur_len = ssd->additional_length + 2; - - sense_len -= cur_len; - - if (sense_len < 0) - break; /* sense was truncated */ - - switch (ssd->descriptor_type) { - case scsi_sense_information: - case scsi_sense_command_specific_information: - { - struct scsi_sense_command_specific_data_descriptor - *sscd = cur_descriptor; - - osi->command_info = - get_unaligned_be64(&sscd->information) ; - OSD_SENSE_PRINT2( - "command_specific_information 0x%llx \n", - _LLU(osi->command_info)); - break; - } - case scsi_sense_key_specific: - { - struct scsi_sense_key_specific_data_descriptor - *ssks = cur_descriptor; - - osi->sense_info = get_unaligned_be16(&ssks->value); - OSD_SENSE_PRINT2( - "sense_key_specific_information %u" - "sksv_cd_bpv_bp (0x%x)\n", - osi->sense_info, ssks->sksv_cd_bpv_bp); - break; - } - case osd_sense_object_identification: - { /*FIXME: Keep first not last, Store in array*/ - struct osd_sense_identification_data_descriptor - *osidd = cur_descriptor; - - osi->not_initiated_command_functions = - le32_to_cpu(osidd->not_initiated_functions); - osi->completed_command_functions = - le32_to_cpu(osidd->completed_functions); - osi->obj.partition = be64_to_cpu(osidd->partition_id); - osi->obj.id = be64_to_cpu(osidd->object_id); - OSD_SENSE_PRINT2( - "object_identification pid=0x%llx oid=0x%llx\n", - _LLU(osi->obj.partition), _LLU(osi->obj.id)); - OSD_SENSE_PRINT2( - "not_initiated_bits(%x) " - "completed_command_bits(%x)\n", - osi->not_initiated_command_functions, - osi->completed_command_functions); - break; - } - case osd_sense_response_integrity_check: - { - struct osd_sense_response_integrity_check_descriptor - *d = cur_descriptor; - /* 2nibbles+space+ASCII */ - char dump[sizeof(d->integrity_check_value) * 4 + 2]; - - hex_dump_to_buffer(d->integrity_check_value, - sizeof(d->integrity_check_value), - 32, 1, dump, sizeof(dump), true); - OSD_SENSE_PRINT2("response_integrity [%s]\n", dump); - } - case osd_sense_attribute_identification: - { - struct osd_sense_attributes_data_descriptor - *osadd = cur_descriptor; - unsigned len = min(cur_len, sense_len); - struct osd_sense_attr *pattr = osadd->sense_attrs; - - while (len >= sizeof(*pattr)) { - u32 attr_page = be32_to_cpu(pattr->attr_page); - u32 attr_id = be32_to_cpu(pattr->attr_id); - - if (!osi->attr.attr_page) { - osi->attr.attr_page = attr_page; - osi->attr.attr_id = attr_id; - } - - if (bad_attr_list && max_attr) { - bad_attr_list->attr_page = attr_page; - bad_attr_list->attr_id = attr_id; - bad_attr_list++; - max_attr--; - } - - len -= sizeof(*pattr); - OSD_SENSE_PRINT2( - "osd_sense_attribute_identification" - "attr_page=0x%x attr_id=0x%x\n", - attr_page, attr_id); - } - } - /*These are not legal for OSD*/ - case scsi_sense_field_replaceable_unit: - OSD_SENSE_PRINT2("scsi_sense_field_replaceable_unit\n"); - break; - case scsi_sense_stream_commands: - OSD_SENSE_PRINT2("scsi_sense_stream_commands\n"); - break; - case scsi_sense_block_commands: - OSD_SENSE_PRINT2("scsi_sense_block_commands\n"); - break; - case scsi_sense_ata_return: - OSD_SENSE_PRINT2("scsi_sense_ata_return\n"); - break; - default: - if (ssd->descriptor_type <= scsi_sense_Reserved_last) - OSD_SENSE_PRINT2( - "scsi_sense Reserved descriptor (0x%x)", - ssd->descriptor_type); - else - OSD_SENSE_PRINT2( - "scsi_sense Vendor descriptor (0x%x)", - ssd->descriptor_type); - } - - cur_descriptor += cur_len; - } - -analyze: - if (!osi->key) { - /* scsi sense is Empty, the request was never issued to target - * linux return code might tell us what happened. - */ - if (or->async_error == BLK_STS_RESOURCE) - osi->osd_err_pri = OSD_ERR_PRI_RESOURCE; - else - osi->osd_err_pri = OSD_ERR_PRI_UNREACHABLE; - ret = or->async_error; - } else if (osi->key <= scsi_sk_recovered_error) { - osi->osd_err_pri = 0; - ret = 0; - } else if (osi->additional_code == scsi_invalid_field_in_cdb) { - if (osi->cdb_field_offset == OSD_CFO_STARTING_BYTE) { - osi->osd_err_pri = OSD_ERR_PRI_CLEAR_PAGES; - ret = -EFAULT; /* caller should recover from this */ - } else if (osi->cdb_field_offset == OSD_CFO_OBJECT_ID) { - osi->osd_err_pri = OSD_ERR_PRI_NOT_FOUND; - ret = -ENOENT; - } else if (osi->cdb_field_offset == OSD_CFO_PERMISSIONS) { - osi->osd_err_pri = OSD_ERR_PRI_NO_ACCESS; - ret = -EACCES; - } else { - osi->osd_err_pri = OSD_ERR_PRI_BAD_CRED; - ret = -EINVAL; - } - } else if (osi->additional_code == osd_quota_error) { - osi->osd_err_pri = OSD_ERR_PRI_NO_SPACE; - ret = -ENOSPC; - } else if (_is_osd_security_code(osi->additional_code)) { - osi->osd_err_pri = OSD_ERR_PRI_BAD_CRED; - ret = -EINVAL; - } else { - osi->osd_err_pri = OSD_ERR_PRI_EIO; - ret = -EIO; - } - - if (!or->out.residual) - or->out.residual = or->out.total_bytes; - if (!or->in.residual) - or->in.residual = or->in.total_bytes; - - return ret; -} -EXPORT_SYMBOL(osd_req_decode_sense_full); - -/* - * Implementation of osd_sec.h API - * TODO: Move to a separate osd_sec.c file at a later stage. - */ - -enum { OSD_SEC_CAP_V1_ALL_CAPS = - OSD_SEC_CAP_APPEND | OSD_SEC_CAP_OBJ_MGMT | OSD_SEC_CAP_REMOVE | - OSD_SEC_CAP_CREATE | OSD_SEC_CAP_SET_ATTR | OSD_SEC_CAP_GET_ATTR | - OSD_SEC_CAP_WRITE | OSD_SEC_CAP_READ | OSD_SEC_CAP_POL_SEC | - OSD_SEC_CAP_GLOBAL | OSD_SEC_CAP_DEV_MGMT -}; - -enum { OSD_SEC_CAP_V2_ALL_CAPS = - OSD_SEC_CAP_V1_ALL_CAPS | OSD_SEC_CAP_QUERY | OSD_SEC_CAP_M_OBJECT -}; - -void osd_sec_init_nosec_doall_caps(void *caps, - const struct osd_obj_id *obj, bool is_collection, const bool is_v1) -{ - struct osd_capability *cap = caps; - u8 type; - u8 descriptor_type; - - if (likely(obj->id)) { - if (unlikely(is_collection)) { - type = OSD_SEC_OBJ_COLLECTION; - descriptor_type = is_v1 ? OSD_SEC_OBJ_DESC_OBJ : - OSD_SEC_OBJ_DESC_COL; - } else { - type = OSD_SEC_OBJ_USER; - descriptor_type = OSD_SEC_OBJ_DESC_OBJ; - } - WARN_ON(!obj->partition); - } else { - type = obj->partition ? OSD_SEC_OBJ_PARTITION : - OSD_SEC_OBJ_ROOT; - descriptor_type = OSD_SEC_OBJ_DESC_PAR; - } - - memset(cap, 0, sizeof(*cap)); - - cap->h.format = OSD_SEC_CAP_FORMAT_VER1; - cap->h.integrity_algorithm__key_version = 0; /* MAKE_BYTE(0, 0); */ - cap->h.security_method = OSD_SEC_NOSEC; -/* cap->expiration_time; - cap->AUDIT[30-10]; - cap->discriminator[42-30]; - cap->object_created_time; */ - cap->h.object_type = type; - osd_sec_set_caps(&cap->h, OSD_SEC_CAP_V1_ALL_CAPS); - cap->h.object_descriptor_type = descriptor_type; - cap->od.obj_desc.policy_access_tag = 0; - cap->od.obj_desc.allowed_partition_id = cpu_to_be64(obj->partition); - cap->od.obj_desc.allowed_object_id = cpu_to_be64(obj->id); -} -EXPORT_SYMBOL(osd_sec_init_nosec_doall_caps); - -/* FIXME: Extract version from caps pointer. - * Also Pete's target only supports caps from OSDv1 for now - */ -void osd_set_caps(struct osd_cdb *cdb, const void *caps) -{ - /* NOTE: They start at same address */ - memcpy(&cdb->v1.caps, caps, OSDv1_CAP_LEN); -} - -bool osd_is_sec_alldata(struct osd_security_parameters *sec_parms __unused) -{ - return false; -} - -void osd_sec_sign_cdb(struct osd_cdb *ocdb __unused, const u8 *cap_key __unused) -{ -} - -void osd_sec_sign_data(void *data_integ __unused, - struct bio *bio __unused, const u8 *cap_key __unused) -{ -} - -/* - * Declared in osd_protocol.h - * 4.12.5 Data-In and Data-Out buffer offsets - * byte offset = mantissa * (2^(exponent+8)) - * Returns the smallest allowed encoded offset that contains given @offset - * The actual encoded offset returned is @offset + *@padding. - */ -osd_cdb_offset __osd_encode_offset( - u64 offset, unsigned *padding, int min_shift, int max_shift) -{ - u64 try_offset = -1, mod, align; - osd_cdb_offset be32_offset; - int shift; - - *padding = 0; - if (!offset) - return 0; - - for (shift = min_shift; shift < max_shift; ++shift) { - try_offset = offset >> shift; - if (try_offset < (1 << OSD_OFFSET_MAX_BITS)) - break; - } - - BUG_ON(shift == max_shift); - - align = 1 << shift; - mod = offset & (align - 1); - if (mod) { - *padding = align - mod; - try_offset += 1; - } - - try_offset |= ((shift - 8) & 0xf) << 28; - be32_offset = cpu_to_be32((u32)try_offset); - - OSD_DEBUG("offset=%llu mantissa=%llu exp=%d encoded=%x pad=%d\n", - _LLU(offset), _LLU(try_offset & 0x0FFFFFFF), shift, - be32_offset, *padding); - return be32_offset; -} diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c deleted file mode 100644 index eaf36ccf58db..000000000000 --- a/drivers/scsi/osd/osd_uld.c +++ /dev/null @@ -1,571 +0,0 @@ -/* - * osd_uld.c - OSD Upper Layer Driver - * - * A Linux driver module that registers as a SCSI ULD and probes - * for OSD type SCSI devices. - * It's main function is to export osd devices to in-kernel users like - * osdfs and pNFS-objects-LD. It also provides one ioctl for running - * in Kernel tests. - * - * Copyright (C) 2008 Panasas Inc. All rights reserved. - * - * Authors: - * Boaz Harrosh - * Benny Halevy - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the Panasas company nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include -#include - -#include "osd_debug.h" - -#ifndef TYPE_OSD -# define TYPE_OSD 0x11 -#endif - -#ifndef SCSI_OSD_MAJOR -# define SCSI_OSD_MAJOR 260 -#endif -#define SCSI_OSD_MAX_MINOR MINORMASK - -static const char osd_name[] = "osd"; -static const char *osd_version_string = "open-osd 0.2.1"; - -MODULE_AUTHOR("Boaz Harrosh "); -MODULE_DESCRIPTION("open-osd Upper-Layer-Driver osd.ko"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS_CHARDEV_MAJOR(SCSI_OSD_MAJOR); -MODULE_ALIAS_SCSI_DEVICE(TYPE_OSD); - -struct osd_uld_device { - int minor; - struct device class_dev; - struct cdev cdev; - struct osd_dev od; - struct osd_dev_info odi; - struct gendisk *disk; -}; - -struct osd_dev_handle { - struct osd_dev od; - struct file *file; - struct osd_uld_device *oud; -} ; - -static DEFINE_IDA(osd_minor_ida); - -/* - * scsi sysfs attribute operations - */ -static ssize_t osdname_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct osd_uld_device *ould = container_of(dev, struct osd_uld_device, - class_dev); - return sprintf(buf, "%s\n", ould->odi.osdname); -} -static DEVICE_ATTR_RO(osdname); - -static ssize_t systemid_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct osd_uld_device *ould = container_of(dev, struct osd_uld_device, - class_dev); - - memcpy(buf, ould->odi.systemid, ould->odi.systemid_len); - return ould->odi.systemid_len; -} -static DEVICE_ATTR_RO(systemid); - -static struct attribute *osd_uld_attrs[] = { - &dev_attr_osdname.attr, - &dev_attr_systemid.attr, - NULL, -}; -ATTRIBUTE_GROUPS(osd_uld); - -static struct class osd_uld_class = { - .owner = THIS_MODULE, - .name = "scsi_osd", - .dev_groups = osd_uld_groups, -}; - -/* - * Char Device operations - */ - -static int osd_uld_open(struct inode *inode, struct file *file) -{ - struct osd_uld_device *oud = container_of(inode->i_cdev, - struct osd_uld_device, cdev); - - get_device(&oud->class_dev); - /* cache osd_uld_device on file handle */ - file->private_data = oud; - OSD_DEBUG("osd_uld_open %p\n", oud); - return 0; -} - -static int osd_uld_release(struct inode *inode, struct file *file) -{ - struct osd_uld_device *oud = file->private_data; - - OSD_DEBUG("osd_uld_release %p\n", file->private_data); - file->private_data = NULL; - put_device(&oud->class_dev); - return 0; -} - -/* FIXME: Only one vector for now */ -unsigned g_test_ioctl; -do_test_fn *g_do_test; - -int osduld_register_test(unsigned ioctl, do_test_fn *do_test) -{ - if (g_test_ioctl) - return -EINVAL; - - g_test_ioctl = ioctl; - g_do_test = do_test; - return 0; -} -EXPORT_SYMBOL(osduld_register_test); - -void osduld_unregister_test(unsigned ioctl) -{ - if (ioctl == g_test_ioctl) { - g_test_ioctl = 0; - g_do_test = NULL; - } -} -EXPORT_SYMBOL(osduld_unregister_test); - -static do_test_fn *_find_ioctl(unsigned cmd) -{ - if (g_test_ioctl == cmd) - return g_do_test; - else - return NULL; -} - -static long osd_uld_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) -{ - struct osd_uld_device *oud = file->private_data; - int ret; - do_test_fn *do_test; - - do_test = _find_ioctl(cmd); - if (do_test) - ret = do_test(&oud->od, cmd, arg); - else { - OSD_ERR("Unknown ioctl %d: osd_uld_device=%p\n", cmd, oud); - ret = -ENOIOCTLCMD; - } - return ret; -} - -static const struct file_operations osd_fops = { - .owner = THIS_MODULE, - .open = osd_uld_open, - .release = osd_uld_release, - .unlocked_ioctl = osd_uld_ioctl, - .llseek = noop_llseek, -}; - -struct osd_dev *osduld_path_lookup(const char *name) -{ - struct osd_uld_device *oud; - struct osd_dev_handle *odh; - struct file *file; - int error; - - if (!name || !*name) { - OSD_ERR("Mount with !path || !*path\n"); - return ERR_PTR(-EINVAL); - } - - odh = kzalloc(sizeof(*odh), GFP_KERNEL); - if (unlikely(!odh)) - return ERR_PTR(-ENOMEM); - - file = filp_open(name, O_RDWR, 0); - if (IS_ERR(file)) { - error = PTR_ERR(file); - goto free_od; - } - - if (file->f_op != &osd_fops){ - error = -EINVAL; - goto close_file; - } - - oud = file->private_data; - - odh->od = oud->od; - odh->file = file; - odh->oud = oud; - - return &odh->od; - -close_file: - fput(file); -free_od: - kfree(odh); - return ERR_PTR(error); -} -EXPORT_SYMBOL(osduld_path_lookup); - -static inline bool _the_same_or_null(const u8 *a1, unsigned a1_len, - const u8 *a2, unsigned a2_len) -{ - if (!a2_len) /* User string is Empty means don't care */ - return true; - - if (a1_len != a2_len) - return false; - - return 0 == memcmp(a1, a2, a1_len); -} - -static int _match_odi(struct device *dev, const void *find_data) -{ - struct osd_uld_device *oud = container_of(dev, struct osd_uld_device, - class_dev); - const struct osd_dev_info *odi = find_data; - - if (_the_same_or_null(oud->odi.systemid, oud->odi.systemid_len, - odi->systemid, odi->systemid_len) && - _the_same_or_null(oud->odi.osdname, oud->odi.osdname_len, - odi->osdname, odi->osdname_len)) { - OSD_DEBUG("found device sysid_len=%d osdname=%d\n", - odi->systemid_len, odi->osdname_len); - return 1; - } else { - return 0; - } -} - -/* osduld_info_lookup - Loop through all devices, return the requested osd_dev. - * - * if @odi->systemid_len and/or @odi->osdname_len are zero, they act as a don't - * care. .e.g if they're both zero /dev/osd0 is returned. - */ -struct osd_dev *osduld_info_lookup(const struct osd_dev_info *odi) -{ - struct device *dev = class_find_device(&osd_uld_class, NULL, odi, _match_odi); - if (likely(dev)) { - struct osd_dev_handle *odh = kzalloc(sizeof(*odh), GFP_KERNEL); - struct osd_uld_device *oud = container_of(dev, - struct osd_uld_device, class_dev); - - if (unlikely(!odh)) { - put_device(dev); - return ERR_PTR(-ENOMEM); - } - - odh->od = oud->od; - odh->oud = oud; - - return &odh->od; - } - - return ERR_PTR(-ENODEV); -} -EXPORT_SYMBOL(osduld_info_lookup); - -void osduld_put_device(struct osd_dev *od) -{ - if (od && !IS_ERR(od)) { - struct osd_dev_handle *odh = - container_of(od, struct osd_dev_handle, od); - struct osd_uld_device *oud = odh->oud; - - BUG_ON(od->scsi_device != oud->od.scsi_device); - - /* If scsi has released the device (logout), and exofs has last - * reference on oud it will be freed by above osd_uld_release - * within fput below. But this will oops in cdev_release which - * is called after the fops->release. A get_/put_ pair makes - * sure we have a cdev for the duration of fput - */ - if (odh->file) { - get_device(&oud->class_dev); - fput(odh->file); - } - put_device(&oud->class_dev); - kfree(odh); - } -} -EXPORT_SYMBOL(osduld_put_device); - -const struct osd_dev_info *osduld_device_info(struct osd_dev *od) -{ - struct osd_dev_handle *odh = - container_of(od, struct osd_dev_handle, od); - return &odh->oud->odi; -} -EXPORT_SYMBOL(osduld_device_info); - -bool osduld_device_same(struct osd_dev *od, const struct osd_dev_info *odi) -{ - struct osd_dev_handle *odh = - container_of(od, struct osd_dev_handle, od); - struct osd_uld_device *oud = odh->oud; - - return (oud->odi.systemid_len == odi->systemid_len) && - _the_same_or_null(oud->odi.systemid, oud->odi.systemid_len, - odi->systemid, odi->systemid_len) && - (oud->odi.osdname_len == odi->osdname_len) && - _the_same_or_null(oud->odi.osdname, oud->odi.osdname_len, - odi->osdname, odi->osdname_len); -} -EXPORT_SYMBOL(osduld_device_same); - -/* - * Scsi Device operations - */ - -static int __detect_osd(struct osd_uld_device *oud) -{ - struct scsi_device *scsi_device = oud->od.scsi_device; - struct scsi_sense_hdr sense_hdr; - char caps[OSD_CAP_LEN]; - int error; - - /* sending a test_unit_ready as first command seems to be needed - * by some targets - */ - OSD_DEBUG("start scsi_test_unit_ready %p %p %p\n", - oud, scsi_device, scsi_device->request_queue); - error = scsi_test_unit_ready(scsi_device, 10*HZ, 5, &sense_hdr); - if (error) - OSD_ERR("warning: scsi_test_unit_ready failed\n"); - - osd_sec_init_nosec_doall_caps(caps, &osd_root_object, false, true); - if (osd_auto_detect_ver(&oud->od, caps, &oud->odi)) - return -ENODEV; - - return 0; -} - -static void __remove(struct device *dev) -{ - struct osd_uld_device *oud = container_of(dev, struct osd_uld_device, - class_dev); - struct scsi_device *scsi_device = oud->od.scsi_device; - - kfree(oud->odi.osdname); - - osd_dev_fini(&oud->od); - scsi_device_put(scsi_device); - - OSD_INFO("osd_remove %s\n", - oud->disk ? oud->disk->disk_name : NULL); - - if (oud->disk) - put_disk(oud->disk); - - kfree(oud); -} - -static int osd_probe(struct device *dev) -{ - struct scsi_device *scsi_device = to_scsi_device(dev); - struct gendisk *disk; - struct osd_uld_device *oud; - int minor; - int error; - - if (scsi_device->type != TYPE_OSD) - return -ENODEV; - - minor = ida_alloc_max(&osd_minor_ida, SCSI_OSD_MAX_MINOR, GFP_KERNEL); - if (minor == -ENOSPC) - return -EBUSY; - if (minor < 0) - return -ENODEV; - - error = -ENOMEM; - oud = kzalloc(sizeof(*oud), GFP_KERNEL); - if (NULL == oud) - goto err_retract_minor; - - /* class device member */ - device_initialize(&oud->class_dev); - dev_set_drvdata(dev, oud); - oud->minor = minor; - oud->class_dev.devt = MKDEV(SCSI_OSD_MAJOR, oud->minor); - oud->class_dev.class = &osd_uld_class; - oud->class_dev.parent = dev; - oud->class_dev.release = __remove; - - /* hold one more reference to the scsi_device that will get released - * in __release, in case a logout is happening while fs is mounted - */ - if (scsi_device_get(scsi_device)) - goto err_retract_minor; - osd_dev_init(&oud->od, scsi_device); - - /* allocate a disk and set it up */ - /* FIXME: do we need this since sg has already done that */ - disk = alloc_disk(1); - if (!disk) { - OSD_ERR("alloc_disk failed\n"); - goto err_free_osd; - } - disk->major = SCSI_OSD_MAJOR; - disk->first_minor = oud->minor; - sprintf(disk->disk_name, "osd%d", oud->minor); - oud->disk = disk; - - /* Detect the OSD Version */ - error = __detect_osd(oud); - if (error) { - OSD_ERR("osd detection failed, non-compatible OSD device\n"); - goto err_free_osd; - } - - /* init the char-device for communication with user-mode */ - cdev_init(&oud->cdev, &osd_fops); - oud->cdev.owner = THIS_MODULE; - - error = dev_set_name(&oud->class_dev, "%s", disk->disk_name); - if (error) { - OSD_ERR("dev_set_name failed => %d\n", error); - goto err_free_osd; - } - - error = cdev_device_add(&oud->cdev, &oud->class_dev); - if (error) { - OSD_ERR("device_register failed => %d\n", error); - goto err_free_osd; - } - - OSD_INFO("osd_probe %s\n", disk->disk_name); - return 0; - -err_free_osd: - put_device(&oud->class_dev); -err_retract_minor: - ida_free(&osd_minor_ida, minor); - return error; -} - -static int osd_remove(struct device *dev) -{ - struct scsi_device *scsi_device = to_scsi_device(dev); - struct osd_uld_device *oud = dev_get_drvdata(dev); - - if (oud->od.scsi_device != scsi_device) { - OSD_ERR("Half cooked osd-device %p, || %p!=%p", - dev, oud->od.scsi_device, scsi_device); - } - - cdev_device_del(&oud->cdev, &oud->class_dev); - ida_free(&osd_minor_ida, oud->minor); - put_device(&oud->class_dev); - - return 0; -} - -/* - * Global driver and scsi registration - */ - -static struct scsi_driver osd_driver = { - .gendrv = { - .name = osd_name, - .owner = THIS_MODULE, - .probe = osd_probe, - .remove = osd_remove, - } -}; - -static int __init osd_uld_init(void) -{ - int err; - - err = class_register(&osd_uld_class); - if (err) { - OSD_ERR("Unable to register sysfs class => %d\n", err); - return err; - } - - err = register_chrdev_region(MKDEV(SCSI_OSD_MAJOR, 0), - SCSI_OSD_MAX_MINOR, osd_name); - if (err) { - OSD_ERR("Unable to register major %d for osd ULD => %d\n", - SCSI_OSD_MAJOR, err); - goto err_out; - } - - err = scsi_register_driver(&osd_driver.gendrv); - if (err) { - OSD_ERR("scsi_register_driver failed => %d\n", err); - goto err_out_chrdev; - } - - OSD_INFO("LOADED %s\n", osd_version_string); - return 0; - -err_out_chrdev: - unregister_chrdev_region(MKDEV(SCSI_OSD_MAJOR, 0), SCSI_OSD_MAX_MINOR); -err_out: - class_unregister(&osd_uld_class); - return err; -} - -static void __exit osd_uld_exit(void) -{ - scsi_unregister_driver(&osd_driver.gendrv); - unregister_chrdev_region(MKDEV(SCSI_OSD_MAJOR, 0), SCSI_OSD_MAX_MINOR); - class_unregister(&osd_uld_class); - OSD_INFO("UNLOADED %s\n", osd_version_string); -} - -module_init(osd_uld_init); -module_exit(osd_uld_exit); diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c index 664c1238a87f..be3c73ebbfde 100644 --- a/drivers/scsi/osst.c +++ b/drivers/scsi/osst.c @@ -139,7 +139,7 @@ static int debugging = 1; #define OSST_TIMEOUT (200 * HZ) #define OSST_LONG_TIMEOUT (1800 * HZ) -#define TAPE_NR(x) (iminor(x) & ~(-1 << ST_MODE_SHIFT)) +#define TAPE_NR(x) (iminor(x) & ((1 << ST_MODE_SHIFT)-1)) #define TAPE_MODE(x) ((iminor(x) & ST_MODE_MASK) >> ST_MODE_SHIFT) #define TAPE_REWIND(x) ((iminor(x) & 0x80) == 0) #define TAPE_IS_RAW(x) (TAPE_MODE(x) & (ST_NBR_MODES >> 1)) diff --git a/drivers/scsi/pcmcia/Makefile b/drivers/scsi/pcmcia/Makefile index faa87a4b2d2b..a5a24dd44e7e 100644 --- a/drivers/scsi/pcmcia/Makefile +++ b/drivers/scsi/pcmcia/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 -ccflags-y := -Idrivers/scsi +ccflags-y := -I $(srctree)/drivers/scsi # 16-bit client drivers obj-$(CONFIG_PCMCIA_QLOGIC) += qlogic_cs.o diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c index 1bd6825a4f14..a81748e6e8fb 100644 --- a/drivers/scsi/pcmcia/nsp_cs.c +++ b/drivers/scsi/pcmcia/nsp_cs.c @@ -1134,7 +1134,8 @@ static irqreturn_t nspintr(int irq, void *dev_id) //*sync_neg = SYNC_NOT_YET; - if ((tmpSC->SCp.Message == MSG_COMMAND_COMPLETE)) { /* all command complete and return status */ + /* all command complete and return status */ + if (tmpSC->SCp.Message == MSG_COMMAND_COMPLETE) { tmpSC->result = (DID_OK << 16) | ((tmpSC->SCp.Message & 0xff) << 8) | ((tmpSC->SCp.Status & 0xff) << 0); diff --git a/drivers/scsi/qedf/qedf_debugfs.c b/drivers/scsi/qedf/qedf_debugfs.c index c29c162a494f..a32d8ee4666e 100644 --- a/drivers/scsi/qedf/qedf_debugfs.c +++ b/drivers/scsi/qedf/qedf_debugfs.c @@ -27,30 +27,19 @@ qedf_dbg_host_init(struct qedf_dbg_ctx *qedf, const struct file_operations *fops) { char host_dirname[32]; - struct dentry *file_dentry = NULL; QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "Creating debugfs host node\n"); /* create pf dir */ sprintf(host_dirname, "host%u", qedf->host_no); qedf->bdf_dentry = debugfs_create_dir(host_dirname, qedf_dbg_root); - if (!qedf->bdf_dentry) - return; /* create debugfs files */ while (dops) { if (!(dops->name)) break; - file_dentry = debugfs_create_file(dops->name, 0600, - qedf->bdf_dentry, qedf, - fops); - if (!file_dentry) { - QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, - "Debugfs entry %s creation failed\n", - dops->name); - debugfs_remove_recursive(qedf->bdf_dentry); - return; - } + debugfs_create_file(dops->name, 0600, qedf->bdf_dentry, qedf, + fops); dops++; fops++; } @@ -80,9 +69,6 @@ qedf_dbg_init(char *drv_name) /* create qed dir in root of debugfs. NULL means debugfs root */ qedf_dbg_root = debugfs_create_dir(drv_name, NULL); - if (!qedf_dbg_root) - QEDF_INFO(NULL, QEDF_LOG_DEBUGFS, "Init of debugfs " - "failed\n"); } /** diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c index 6bbc38b1b465..6ca583bdde23 100644 --- a/drivers/scsi/qedf/qedf_io.c +++ b/drivers/scsi/qedf/qedf_io.c @@ -1128,12 +1128,6 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, return; } - if (!sc_cmd->request->special) { - QEDF_WARN(&(qedf->dbg_ctx), "request->special is NULL so " - "request not valid, sc_cmd=%p.\n", sc_cmd); - return; - } - if (!sc_cmd->request->q) { QEDF_WARN(&(qedf->dbg_ctx), "request->q is NULL so request " "is not valid, sc_cmd=%p.\n", sc_cmd); diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index 9bbc19fc190b..9f9431a4cc0e 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -1418,7 +1418,7 @@ static struct libfc_function_template qedf_lport_template = { static void qedf_fcoe_ctlr_setup(struct qedf_ctx *qedf) { - fcoe_ctlr_init(&qedf->ctlr, FIP_ST_AUTO); + fcoe_ctlr_init(&qedf->ctlr, FIP_MODE_AUTO); qedf->ctlr.send = qedf_fip_send; qedf->ctlr.get_src_addr = qedf_get_src_mac; diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c index fd914ca4149a..5667e4752e2e 100644 --- a/drivers/scsi/qedi/qedi_debugfs.c +++ b/drivers/scsi/qedi/qedi_debugfs.c @@ -23,27 +23,16 @@ qedi_dbg_host_init(struct qedi_dbg_ctx *qedi, const struct file_operations *fops) { char host_dirname[32]; - struct dentry *file_dentry = NULL; sprintf(host_dirname, "host%u", qedi->host_no); qedi->bdf_dentry = debugfs_create_dir(host_dirname, qedi_dbg_root); - if (!qedi->bdf_dentry) - return; while (dops) { if (!(dops->name)) break; - file_dentry = debugfs_create_file(dops->name, 0600, - qedi->bdf_dentry, qedi, - fops); - if (!file_dentry) { - QEDI_INFO(qedi, QEDI_LOG_DEBUGFS, - "Debugfs entry %s creation failed\n", - dops->name); - debugfs_remove_recursive(qedi->bdf_dentry); - return; - } + debugfs_create_file(dops->name, 0600, qedi->bdf_dentry, qedi, + fops); dops++; fops++; } @@ -60,8 +49,6 @@ void qedi_dbg_init(char *drv_name) { qedi_dbg_root = debugfs_create_dir(drv_name, NULL); - if (!qedi_dbg_root) - QEDI_INFO(NULL, QEDI_LOG_DEBUGFS, "Init of debugfs failed\n"); } void diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c index 25d763ae5d5a..e2a995a6e8e7 100644 --- a/drivers/scsi/qedi/qedi_fw.c +++ b/drivers/scsi/qedi/qedi_fw.c @@ -616,13 +616,6 @@ static void qedi_scsi_completion(struct qedi_ctx *qedi, goto error; } - if (!sc_cmd->request->special) { - QEDI_WARN(&qedi->dbg_ctx, - "request->special is NULL so request not valid, sc_cmd=%p.\n", - sc_cmd); - goto error; - } - if (!sc_cmd->request->q) { QEDI_WARN(&qedi->dbg_ctx, "request->q is NULL so request is not valid, sc_cmd=%p.\n", diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index ac504a1ff0ff..2eb1ae721a7d 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -543,6 +543,9 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj, if (unlikely(pci_channel_offline(ha->pdev))) return 0; + if (qla2x00_chip_is_down(vha)) + return 0; + if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size || !ha->isp_ops->write_nvram) return 0; @@ -1002,7 +1005,7 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon) /* Scsi_Host attributes. */ static ssize_t -qla2x00_drvr_version_show(struct device *dev, +qla2x00_driver_version_show(struct device *dev, struct device_attribute *attr, char *buf) { return scnprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str); @@ -1632,6 +1635,92 @@ qla2x00_max_speed_sup_show(struct device *dev, struct device_attribute *attr, ha->max_speed_sup ? "32Gps" : "16Gps"); } +static ssize_t +qla2x00_port_speed_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev)); + ulong type, speed; + int oldspeed, rval; + int mode = QLA_SET_DATA_RATE_LR; + struct qla_hw_data *ha = vha->hw; + + if (!IS_QLA27XX(vha->hw)) { + ql_log(ql_log_warn, vha, 0x70d8, + "Speed setting not supported \n"); + return -EINVAL; + } + + rval = kstrtol(buf, 10, &type); + speed = type; + if (type == 40 || type == 80 || type == 160 || + type == 320) { + ql_dbg(ql_dbg_user, vha, 0x70d9, + "Setting will be affected after a loss of sync\n"); + type = type/10; + mode = QLA_SET_DATA_RATE_NOLR; + } + + oldspeed = ha->set_data_rate; + + switch (type) { + case 0: + ha->set_data_rate = PORT_SPEED_AUTO; + break; + case 4: + ha->set_data_rate = PORT_SPEED_4GB; + break; + case 8: + ha->set_data_rate = PORT_SPEED_8GB; + break; + case 16: + ha->set_data_rate = PORT_SPEED_16GB; + break; + case 32: + ha->set_data_rate = PORT_SPEED_32GB; + break; + default: + ql_log(ql_log_warn, vha, 0x1199, + "Unrecognized speed setting:%lx. Setting Autoneg\n", + speed); + ha->set_data_rate = PORT_SPEED_AUTO; + } + + if (qla2x00_chip_is_down(vha) || (oldspeed == ha->set_data_rate)) + return -EINVAL; + + ql_log(ql_log_info, vha, 0x70da, + "Setting speed to %lx Gbps \n", type); + + rval = qla2x00_set_data_rate(vha, mode); + if (rval != QLA_SUCCESS) + return -EIO; + + return strlen(buf); +} + +static ssize_t +qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_qla_host *vha = shost_priv(dev_to_shost(dev)); + struct qla_hw_data *ha = vha->hw; + ssize_t rval; + char *spd[7] = {"0", "0", "0", "4", "8", "16", "32"}; + + rval = qla2x00_get_data_rate(vha); + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x70db, + "Unable to get port speed rval:%zd\n", rval); + return -EINVAL; + } + + ql_log(ql_log_info, vha, 0x70d6, + "port speed:%d\n", ha->link_data_rate); + + return scnprintf(buf, PAGE_SIZE, "%s\n", spd[ha->link_data_rate]); +} + /* ----- */ static ssize_t @@ -2059,7 +2148,21 @@ ql2xiniexchg_store(struct device *dev, struct device_attribute *attr, return strlen(buf); } -static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL); +static ssize_t +qla2x00_dif_bundle_statistics_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + struct qla_hw_data *ha = vha->hw; + + return scnprintf(buf, PAGE_SIZE, + "cross=%llu read=%llu write=%llu kalloc=%llu dma_alloc=%llu unusable=%u\n", + ha->dif_bundle_crossed_pages, ha->dif_bundle_reads, + ha->dif_bundle_writes, ha->dif_bundle_kallocs, + ha->dif_bundle_dma_allocs, ha->pool.unusable.count); +} + +static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_driver_version_show, NULL); static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL); static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL); @@ -2112,6 +2215,10 @@ static DEVICE_ATTR(zio_threshold, 0644, static DEVICE_ATTR_RW(qlini_mode); static DEVICE_ATTR_RW(ql2xexchoffld); static DEVICE_ATTR_RW(ql2xiniexchg); +static DEVICE_ATTR(dif_bundle_statistics, 0444, + qla2x00_dif_bundle_statistics_show, NULL); +static DEVICE_ATTR(port_speed, 0644, qla2x00_port_speed_show, + qla2x00_port_speed_store); struct device_attribute *qla2x00_host_attrs[] = { @@ -2150,6 +2257,8 @@ struct device_attribute *qla2x00_host_attrs[] = { &dev_attr_min_link_speed, &dev_attr_max_speed_sup, &dev_attr_zio_threshold, + &dev_attr_dif_bundle_statistics, + &dev_attr_port_speed, NULL, /* reserve for qlini_mode */ NULL, /* reserve for ql2xiniexchg */ NULL, /* reserve for ql2xexchoffld */ diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index d1fc4958222a..3d46975a5e5c 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -314,6 +314,7 @@ struct srb_cmd { #define SRB_CRC_PROT_DMA_VALID BIT_4 /* DIF: prot DMA valid */ #define SRB_CRC_CTX_DSD_VALID BIT_5 /* DIF: dsd_list valid */ #define SRB_WAKEUP_ON_COMP BIT_6 +#define SRB_DIF_BUNDL_DMA_VALID BIT_7 /* DIF: DMA list valid */ /* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */ #define IS_PROT_IO(sp) (sp->flags & SRB_CRC_CTX_DSD_VALID) @@ -1892,6 +1893,13 @@ struct crc_context { /* List of DMA context transfers */ struct list_head dsd_list; + /* List of DIF Bundling context DMA address */ + struct list_head ldif_dsd_list; + u8 no_ldif_dsd; + + struct list_head ldif_dma_hndl_list; + u32 dif_bundl_len; + u8 no_dif_bundl; /* This structure should not exceed 512 bytes */ }; @@ -2359,7 +2367,9 @@ typedef struct fc_port { #define NVME_PRLI_SP_INITIATOR BIT_5 #define NVME_PRLI_SP_TARGET BIT_4 #define NVME_PRLI_SP_DISCOVERY BIT_3 +#define NVME_PRLI_SP_FIRST_BURST BIT_0 uint8_t nvme_flag; + uint32_t nvme_first_burst_size; #define NVME_FLAG_REGISTERED 4 #define NVME_FLAG_DELETING 2 #define NVME_FLAG_RESETTING 1 @@ -3688,12 +3698,14 @@ struct qla_hw_data { #define PORT_SPEED_UNKNOWN 0xFFFF #define PORT_SPEED_1GB 0x00 #define PORT_SPEED_2GB 0x01 +#define PORT_SPEED_AUTO 0x02 #define PORT_SPEED_4GB 0x03 #define PORT_SPEED_8GB 0x04 #define PORT_SPEED_16GB 0x05 #define PORT_SPEED_32GB 0x06 #define PORT_SPEED_10GB 0x13 uint16_t link_data_rate; /* F/W operating speed */ + uint16_t set_data_rate; /* Set by user */ uint8_t current_topology; uint8_t prev_topology; @@ -3958,6 +3970,10 @@ struct qla_hw_data { uint16_t fw_subminor_version; uint16_t fw_attributes; uint16_t fw_attributes_h; +#define FW_ATTR_H_NVME_FBURST BIT_1 +#define FW_ATTR_H_NVME BIT_10 +#define FW_ATTR_H_NVME_UPDATED BIT_14 + uint16_t fw_attributes_ext[2]; uint32_t fw_memory_size; uint32_t fw_transfer_size; @@ -4184,12 +4200,32 @@ struct qla_hw_data { uint16_t min_link_speed; uint16_t max_speed_sup; + /* DMA pool for the DIF bundling buffers */ + struct dma_pool *dif_bundl_pool; + #define DIF_BUNDLING_DMA_POOL_SIZE 1024 + struct { + struct { + struct list_head head; + uint count; + } good; + struct { + struct list_head head; + uint count; + } unusable; + } pool; + + unsigned long long dif_bundle_crossed_pages; + unsigned long long dif_bundle_reads; + unsigned long long dif_bundle_writes; + unsigned long long dif_bundle_kallocs; + unsigned long long dif_bundle_dma_allocs; + atomic_t nvme_active_aen_cnt; uint16_t nvme_last_rptd_aen; /* Last recorded aen count */ atomic_t zio_threshold; uint16_t last_zio_threshold; -#define DEFAULT_ZIO_THRESHOLD 64 +#define DEFAULT_ZIO_THRESHOLD 5 }; #define FW_ABILITY_MAX_SPEED_MASK 0xFUL @@ -4198,6 +4234,10 @@ struct qla_hw_data { #define FW_ABILITY_MAX_SPEED(ha) \ (ha->fw_ability_mask & FW_ABILITY_MAX_SPEED_MASK) +#define QLA_GET_DATA_RATE 0 +#define QLA_SET_DATA_RATE_NOLR 1 +#define QLA_SET_DATA_RATE_LR 2 /* Set speed and initiate LR */ + /* * Qlogic scsi host structure */ @@ -4229,6 +4269,7 @@ typedef struct scsi_qla_host { uint32_t qpairs_req_created:1; uint32_t qpairs_rsp_created:1; uint32_t nvme_enabled:1; + uint32_t nvme_first_burst:1; } flags; atomic_t loop_state; diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c index 0b190082aa8d..ead17288e2a7 100644 --- a/drivers/scsi/qla2xxx/qla_dfs.c +++ b/drivers/scsi/qla2xxx/qla_dfs.c @@ -446,11 +446,6 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha) atomic_set(&qla2x00_dfs_root_count, 0); qla2x00_dfs_root = debugfs_create_dir(QLA2XXX_DRIVER_NAME, NULL); - if (!qla2x00_dfs_root) { - ql_log(ql_log_warn, vha, 0x00f7, - "Unable to create debugfs root directory.\n"); - goto out; - } create_dir: if (ha->dfs_dir) @@ -458,64 +453,28 @@ create_dir: mutex_init(&ha->fce_mutex); ha->dfs_dir = debugfs_create_dir(vha->host_str, qla2x00_dfs_root); - if (!ha->dfs_dir) { - ql_log(ql_log_warn, vha, 0x00f8, - "Unable to create debugfs ha directory.\n"); - goto out; - } atomic_inc(&qla2x00_dfs_root_count); create_nodes: ha->dfs_fw_resource_cnt = debugfs_create_file("fw_resource_count", S_IRUSR, ha->dfs_dir, vha, &dfs_fw_resource_cnt_ops); - if (!ha->dfs_fw_resource_cnt) { - ql_log(ql_log_warn, vha, 0x00fd, - "Unable to create debugFS fw_resource_count node.\n"); - goto out; - } ha->dfs_tgt_counters = debugfs_create_file("tgt_counters", S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_counters_ops); - if (!ha->dfs_tgt_counters) { - ql_log(ql_log_warn, vha, 0xd301, - "Unable to create debugFS tgt_counters node.\n"); - goto out; - } ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database", S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_port_database_ops); - if (!ha->tgt.dfs_tgt_port_database) { - ql_log(ql_log_warn, vha, 0xd03f, - "Unable to create debugFS tgt_port_database node.\n"); - goto out; - } ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha, &dfs_fce_ops); - if (!ha->dfs_fce) { - ql_log(ql_log_warn, vha, 0x00f9, - "Unable to create debugfs fce node.\n"); - goto out; - } ha->tgt.dfs_tgt_sess = debugfs_create_file("tgt_sess", S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_sess_ops); - if (!ha->tgt.dfs_tgt_sess) { - ql_log(ql_log_warn, vha, 0xd040, - "Unable to create debugFS tgt_sess node.\n"); - goto out; - } - if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) { + if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) ha->tgt.dfs_naqp = debugfs_create_file("naqp", 0400, ha->dfs_dir, vha, &dfs_naqp_ops); - if (!ha->tgt.dfs_naqp) { - ql_log(ql_log_warn, vha, 0xd011, - "Unable to create debugFS naqp node.\n"); - goto out; - } - } out: return 0; } diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 3673fcdb033a..4eefe69ca807 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -160,6 +160,7 @@ extern int ql2xautodetectsfp; extern int ql2xenablemsix; extern int qla2xuseresexchforels; extern int ql2xexlogins; +extern int ql2xdifbundlinginternalbuffers; extern int qla2x00_loop_reset(scsi_qla_host_t *); extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); @@ -269,8 +270,8 @@ extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t, struct req_que *); extern int qla2x00_start_scsi(srb_t *sp); extern int qla24xx_start_scsi(srb_t *sp); -int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *, - uint16_t, uint64_t, uint8_t); +int qla2x00_marker(struct scsi_qla_host *, struct qla_qpair *, + uint16_t, uint64_t, uint8_t); extern int qla2x00_start_sp(srb_t *); extern int qla24xx_dif_start_scsi(srb_t *); extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t); @@ -285,7 +286,7 @@ extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *, extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *, uint32_t *, uint16_t, struct qla_tc_param *); extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *, - uint32_t *, uint16_t, struct qla_tc_param *); + uint32_t *, uint16_t, struct qla_tgt_cmd *); extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *); extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *); extern int qla24xx_build_scsi_crc_2_iocbs(srb_t *, @@ -898,5 +899,6 @@ void qlt_update_host_map(struct scsi_qla_host *, port_id_t); void qlt_remove_target_resources(struct qla_hw_data *); void qlt_clr_qp_table(struct scsi_qla_host *vha); void qlt_set_mode(struct scsi_qla_host *); +int qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode); #endif /* _QLA_GBL_H */ diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index cbc3bc49d4d1..c6fdad12428e 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -657,15 +657,16 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id) sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; sp->done = qla2x00_async_sns_sp_done; + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async-%s - hdl=%x portid %06x.\n", + sp->name, sp->handle, d_id->b24); + rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_disc, vha, 0x2043, "RFT_ID issue IOCB failed (%d).\n", rval); goto done_free_sp; } - ql_dbg(ql_dbg_disc, vha, 0xffff, - "Async-%s - hdl=%x portid %06x.\n", - sp->name, sp->handle, d_id->b24); return rval; done_free_sp: sp->free(sp); @@ -752,6 +753,10 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id, sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; sp->done = qla2x00_async_sns_sp_done; + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async-%s - hdl=%x portid %06x feature %x type %x.\n", + sp->name, sp->handle, d_id->b24, fc4feature, fc4type); + rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_disc, vha, 0x2047, @@ -759,9 +764,6 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id, goto done_free_sp; } - ql_dbg(ql_dbg_disc, vha, 0xffff, - "Async-%s - hdl=%x portid %06x feature %x type %x.\n", - sp->name, sp->handle, d_id->b24, fc4feature, fc4type); return rval; done_free_sp: @@ -844,15 +846,16 @@ static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id, sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; sp->done = qla2x00_async_sns_sp_done; + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async-%s - hdl=%x portid %06x\n", + sp->name, sp->handle, d_id->b24); + rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_disc, vha, 0x204d, "RNN_ID issue IOCB failed (%d).\n", rval); goto done_free_sp; } - ql_dbg(ql_dbg_disc, vha, 0xffff, - "Async-%s - hdl=%x portid %06x\n", - sp->name, sp->handle, d_id->b24); return rval; @@ -957,15 +960,16 @@ static int qla_async_rsnn_nn(scsi_qla_host_t *vha) sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; sp->done = qla2x00_async_sns_sp_done; + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async-%s - hdl=%x.\n", + sp->name, sp->handle); + rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_disc, vha, 0x2043, "RFT_ID issue IOCB failed (%d).\n", rval); goto done_free_sp; } - ql_dbg(ql_dbg_disc, vha, 0xffff, - "Async-%s - hdl=%x.\n", - sp->name, sp->handle); return rval; @@ -3578,14 +3582,14 @@ int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport) sp->done = qla24xx_async_gffid_sp_done; - rval = qla2x00_start_sp(sp); - if (rval != QLA_SUCCESS) - goto done_free_sp; - ql_dbg(ql_dbg_disc, vha, 0x2132, "Async-%s hdl=%x %8phC.\n", sp->name, sp->handle, fcport->port_name); + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; + return rval; done_free_sp: sp->free(sp); @@ -4067,6 +4071,10 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp, sp->done = qla2x00_async_gpnft_gnnft_sp_done; + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async-%s hdl=%x FC4Type %x.\n", sp->name, + sp->handle, ct_req->req.gpn_ft.port_type); + rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { spin_lock_irqsave(&vha->work_lock, flags); @@ -4075,9 +4083,6 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp, goto done_free_sp; } - ql_dbg(ql_dbg_disc, vha, 0xffff, - "Async-%s hdl=%x FC4Type %x.\n", sp->name, - sp->handle, ct_req->req.gpn_ft.port_type); return rval; done_free_sp: @@ -4158,7 +4163,8 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) spin_lock_irqsave(&vha->work_lock, flags); vha->scan.scan_flags &= ~SF_SCANNING; spin_unlock_irqrestore(&vha->work_lock, flags); - goto done_free_sp; + qla2x00_rel_sp(sp); + return rval; } sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE; @@ -4177,7 +4183,13 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) spin_lock_irqsave(&vha->work_lock, flags); vha->scan.scan_flags &= ~SF_SCANNING; spin_unlock_irqrestore(&vha->work_lock, flags); - goto done_free_sp; + dma_free_coherent(&vha->hw->pdev->dev, + sp->u.iocb_cmd.u.ctarg.req_allocated_size, + sp->u.iocb_cmd.u.ctarg.req, + sp->u.iocb_cmd.u.ctarg.req_dma); + sp->u.iocb_cmd.u.ctarg.req = NULL; + qla2x00_rel_sp(sp); + return rval; } sp->u.iocb_cmd.u.ctarg.rsp_size = rspsz; @@ -4214,6 +4226,10 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) sp->done = qla2x00_async_gpnft_gnnft_sp_done; + ql_dbg(ql_dbg_disc, vha, 0xffff, + "Async-%s hdl=%x FC4Type %x.\n", sp->name, + sp->handle, ct_req->req.gpn_ft.port_type); + rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { spin_lock_irqsave(&vha->work_lock, flags); @@ -4222,9 +4238,6 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) goto done_free_sp; } - ql_dbg(ql_dbg_disc, vha, 0xffff, - "Async-%s hdl=%x FC4Type %x.\n", sp->name, - sp->handle, ct_req->req.gpn_ft.port_type); return rval; done_free_sp: @@ -4345,13 +4358,14 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport) sp->done = qla2x00_async_gnnid_sp_done; - rval = qla2x00_start_sp(sp); - if (rval != QLA_SUCCESS) - goto done_free_sp; ql_dbg(ql_dbg_disc, vha, 0xffff, "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n", sp->name, fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24); + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; return rval; done_free_sp: @@ -4475,14 +4489,15 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport) sp->done = qla2x00_async_gfpnid_sp_done; - rval = qla2x00_start_sp(sp); - if (rval != QLA_SUCCESS) - goto done_free_sp; - ql_dbg(ql_dbg_disc, vha, 0xffff, "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n", sp->name, fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24); + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; + return rval; done_free_sp: diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index aeeb0144bd55..420045155ba0 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -366,14 +366,16 @@ qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport) qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); sp->done = qla2x00_async_prlo_sp_done; - rval = qla2x00_start_sp(sp); - if (rval != QLA_SUCCESS) - goto done_free_sp; ql_dbg(ql_dbg_disc, vha, 0x2070, "Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n", sp->handle, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; + return rval; done_free_sp: @@ -471,9 +473,11 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport, { srb_t *sp; struct srb_iocb *lio; - int rval; + int rval = QLA_FUNCTION_FAILED; + + if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) + return rval; - rval = QLA_FUNCTION_FAILED; fcport->flags |= FCF_ASYNC_SENT; sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) @@ -644,11 +648,14 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, break; case DSC_LS_PORT_UNAVAIL: default: - if (fcport->loop_id != FC_NO_LOOP_ID) - qla2x00_clear_loop_id(fcport); - - fcport->loop_id = loop_id; - fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; + if (fcport->loop_id == FC_NO_LOOP_ID) { + qla2x00_find_new_loop_id(vha, fcport); + fcport->fw_login_state = + DSC_LS_PORT_UNAVAIL; + } + ql_dbg(ql_dbg_disc, vha, 0x20e5, + "%s %d %8phC\n", __func__, __LINE__, + fcport->port_name); qla24xx_fcport_handle_login(vha, fcport); break; } @@ -931,14 +938,14 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport) sp->done = qla24xx_async_gnl_sp_done; - rval = qla2x00_start_sp(sp); - if (rval != QLA_SUCCESS) - goto done_free_sp; - ql_dbg(ql_dbg_disc, vha, 0x20da, "Async-%s - OUT WWPN %8phC hndl %x\n", sp->name, fcport->port_name, sp->handle); + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; + return rval; done_free_sp: @@ -1072,6 +1079,11 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport) if (fcport->fc4f_nvme) lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI; + ql_dbg(ql_dbg_disc, vha, 0x211b, + "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n", + fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24, + fcport->login_retry, fcport->fc4f_nvme ? "nvme" : "fc"); + rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { fcport->flags |= FCF_LOGIN_NEEDED; @@ -1079,11 +1091,6 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport) goto done_free_sp; } - ql_dbg(ql_dbg_disc, vha, 0x211b, - "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n", - fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24, - fcport->login_retry, fcport->fc4f_nvme ? "nvme" : "fc"); - return rval; done_free_sp: @@ -1471,29 +1478,6 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) return 0; } -static -void qla24xx_handle_rscn_event(fc_port_t *fcport, struct event_arg *ea) -{ - fcport->rscn_gen++; - - ql_dbg(ql_dbg_disc, fcport->vha, 0x210c, - "%s %8phC DS %d LS %d\n", - __func__, fcport->port_name, fcport->disc_state, - fcport->fw_login_state); - - if (fcport->flags & FCF_ASYNC_SENT) - return; - - switch (fcport->disc_state) { - case DSC_DELETED: - case DSC_LOGIN_COMPLETE: - qla24xx_post_gpnid_work(fcport->vha, &ea->id); - break; - default: - break; - } -} - int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id, u8 *port_name, u8 *node_name, void *pla, u8 fc4_type) { @@ -1560,8 +1544,6 @@ static void qla_handle_els_plogi_done(scsi_qla_host_t *vha, void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea) { - fc_port_t *f, *tf; - uint32_t id = 0, mask, rid; fc_port_t *fcport; switch (ea->event) { @@ -1574,10 +1556,6 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea) case FCME_RSCN: if (test_bit(UNLOADING, &vha->dpc_flags)) return; - switch (ea->id.b.rsvd_1) { - case RSCN_PORT_ADDR: -#define BIGSCAN 1 -#if defined BIGSCAN & BIGSCAN > 0 { unsigned long flags; fcport = qla2x00_find_fcport_by_nportid @@ -1596,59 +1574,6 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea) } spin_unlock_irqrestore(&vha->work_lock, flags); } -#else - { - int rc; - fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1); - if (!fcport) { - /* cable moved */ - rc = qla24xx_post_gpnid_work(vha, &ea->id); - if (rc) { - ql_log(ql_log_warn, vha, 0xd044, - "RSCN GPNID work failed %06x\n", - ea->id.b24); - } - } else { - ea->fcport = fcport; - fcport->scan_needed = 1; - qla24xx_handle_rscn_event(fcport, ea); - } - } -#endif - break; - case RSCN_AREA_ADDR: - case RSCN_DOM_ADDR: - if (ea->id.b.rsvd_1 == RSCN_AREA_ADDR) { - mask = 0xffff00; - ql_dbg(ql_dbg_async, vha, 0x5044, - "RSCN: Area 0x%06x was affected\n", - ea->id.b24); - } else { - mask = 0xff0000; - ql_dbg(ql_dbg_async, vha, 0x507a, - "RSCN: Domain 0x%06x was affected\n", - ea->id.b24); - } - - rid = ea->id.b24 & mask; - list_for_each_entry_safe(f, tf, &vha->vp_fcports, - list) { - id = f->d_id.b24 & mask; - if (rid == id) { - ea->fcport = f; - qla24xx_handle_rscn_event(f, ea); - } - } - break; - case RSCN_FAB_ADDR: - default: - ql_log(ql_log_warn, vha, 0xd045, - "RSCN: Fabric was affected. Addr format %d\n", - ea->id.b.rsvd_1); - qla2x00_mark_all_devices_lost(vha, 1); - set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); - set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); - } break; case FCME_GNL_DONE: qla24xx_handle_gnl_done_event(vha, ea); @@ -1709,11 +1634,7 @@ void qla_rscn_replay(fc_port_t *fcport) ea.event = FCME_RSCN; ea.id = fcport->d_id; ea.id.b.rsvd_1 = RSCN_PORT_ADDR; -#if defined BIGSCAN & BIGSCAN > 0 qla2x00_fcport_event_handler(fcport->vha, &ea); -#else - qla24xx_post_gpnid_work(fcport->vha, &ea.id); -#endif } } @@ -1784,14 +1705,14 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, lun = (uint16_t)tm_iocb->u.tmf.lun; /* Issue Marker IOCB */ - qla2x00_marker(vha, vha->hw->req_q_map[0], - vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun, + qla2x00_marker(vha, vha->hw->base_qpair, + fcport->loop_id, lun, flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); } done_free_sp: sp->free(sp); - sp->fcport->flags &= ~FCF_ASYNC_SENT; + fcport->flags &= ~FCF_ASYNC_SENT; done: return rval; } @@ -1829,7 +1750,7 @@ qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait) int rval = QLA_FUNCTION_FAILED; sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport, - GFP_KERNEL); + GFP_ATOMIC); if (!sp) goto done; @@ -1912,6 +1833,12 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea) ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset; ea->fcport->logout_on_delete = 1; + ea->fcport->nvme_prli_service_param = ea->iop[0]; + if (ea->iop[0] & NVME_PRLI_SP_FIRST_BURST) + ea->fcport->nvme_first_burst_size = + (ea->iop[1] & 0xffff) * 512; + else + ea->fcport->nvme_first_burst_size = 0; qla24xx_post_gpdb_work(vha, ea->fcport, 0); break; default: @@ -3955,8 +3882,17 @@ qla24xx_config_rings(struct scsi_qla_host *vha) WRT_REG_DWORD(®->isp24.rsp_q_in, 0); WRT_REG_DWORD(®->isp24.rsp_q_out, 0); } + qlt_24xx_config_rings(vha); + /* If the user has configured the speed, set it here */ + if (ha->set_data_rate) { + ql_dbg(ql_dbg_init, vha, 0x00fd, + "Speed set by user : %s Gbps \n", + qla2x00_get_link_speed_str(ha, ha->set_data_rate)); + icb->firmware_options_3 = (ha->set_data_rate << 13); + } + /* PCI posting */ RD_REG_DWORD(&ioreg->hccr); } @@ -4755,6 +4691,16 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) if (!fcport) return NULL; + fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev, + sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma, + flags); + if (!fcport->ct_desc.ct_sns) { + ql_log(ql_log_warn, vha, 0xd049, + "Failed to allocate ct_sns request.\n"); + kfree(fcport); + return NULL; + } + /* Setup fcport template structure. */ fcport->vha = vha; fcport->port_type = FCT_UNKNOWN; @@ -4763,13 +4709,11 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) fcport->supported_classes = FC_COS_UNSPECIFIED; fcport->fp_speed = PORT_SPEED_UNKNOWN; - fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev, - sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma, - flags); fcport->disc_state = DSC_DELETED; fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; fcport->deleted = QLA_SESS_DELETED; fcport->login_retry = vha->hw->login_retry_count; + fcport->chip_reset = vha->hw->base_qpair->chip_reset; fcport->logout_on_delete = 1; if (!fcport->ct_desc.ct_sns) { @@ -4778,6 +4722,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) kfree(fcport); fcport = NULL; } + INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn); INIT_WORK(&fcport->reg_work, qla_register_fcport_fn); INIT_LIST_HEAD(&fcport->gnl_entry); @@ -5046,11 +4991,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) if ((domain & 0xf0) == 0xf0) continue; - /* Bypass if not same domain and area of adapter. */ - if (area && domain && - (area != vha->d_id.b.area || domain != vha->d_id.b.domain)) - continue; - /* Bypass invalid local loop ID. */ if (loop_id > LAST_LOCAL_LOOP_ID) continue; @@ -6101,11 +6041,6 @@ qla2x00_loop_resync(scsi_qla_host_t *vha) { int rval = QLA_SUCCESS; uint32_t wait_time; - struct req_que *req; - struct rsp_que *rsp; - - req = vha->req; - rsp = req->rsp; clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); if (vha->flags.online) { @@ -6118,8 +6053,8 @@ qla2x00_loop_resync(scsi_qla_host_t *vha) * Issue a marker after FW becomes * ready. */ - qla2x00_marker(vha, req, rsp, 0, 0, - MK_SYNC_ALL); + qla2x00_marker(vha, vha->hw->base_qpair, + 0, 0, MK_SYNC_ALL); vha->marker_needed = 0; } @@ -6857,8 +6792,6 @@ qla2x00_restart_isp(scsi_qla_host_t *vha) { int status = 0; struct qla_hw_data *ha = vha->hw; - struct req_que *req = ha->req_q_map[0]; - struct rsp_que *rsp = ha->rsp_q_map[0]; /* If firmware needs to be loaded */ if (qla2x00_isp_firmware(vha)) { @@ -6878,7 +6811,7 @@ qla2x00_restart_isp(scsi_qla_host_t *vha) status = qla2x00_fw_ready(vha); if (!status) { /* Issue a marker after FW becomes ready. */ - qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); + qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); } @@ -7933,22 +7866,15 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha) uint16_t mb[MAILBOX_REGISTER_COUNT]; struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); - struct req_que *req; - struct rsp_que *rsp; if (!vha->vp_idx) return -EINVAL; rval = qla2x00_fw_ready(base_vha); - if (vha->qpair) - req = vha->qpair->req; - else - req = ha->req_q_map[0]; - rsp = req->rsp; if (rval == QLA_SUCCESS) { clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); - qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); + qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL); } vha->flags.management_server_logged_in = 0; @@ -8340,8 +8266,6 @@ qla82xx_restart_isp(scsi_qla_host_t *vha) { int status, rval; struct qla_hw_data *ha = vha->hw; - struct req_que *req = ha->req_q_map[0]; - struct rsp_que *rsp = ha->rsp_q_map[0]; struct scsi_qla_host *vp; unsigned long flags; @@ -8353,7 +8277,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha) status = qla2x00_fw_ready(vha); if (!status) { /* Issue a marker after FW becomes ready. */ - qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); + qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL); vha->flags.online = 1; set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); } diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 032635321ad6..63f8e3c19841 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -336,7 +336,7 @@ qla2x00_start_scsi(srb_t *sp) /* Send marker if required */ if (vha->marker_needed != 0) { - if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) != + if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { return (QLA_FUNCTION_FAILED); } @@ -490,8 +490,7 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req) /** * qla2x00_marker() - Send a marker IOCB to the firmware. * @vha: HA context - * @req: request queue - * @rsp: response queue + * @qpair: queue pair pointer * @loop_id: loop ID * @lun: LUN * @type: marker modifier @@ -501,18 +500,16 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req) * Returns non-zero if a failure occurred, else zero. */ static int -__qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req, - struct rsp_que *rsp, uint16_t loop_id, - uint64_t lun, uint8_t type) +__qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair, + uint16_t loop_id, uint64_t lun, uint8_t type) { mrk_entry_t *mrk; struct mrk_entry_24xx *mrk24 = NULL; - + struct req_que *req = qpair->req; struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); - req = ha->req_q_map[0]; - mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL); + mrk = (mrk_entry_t *)__qla2x00_alloc_iocbs(qpair, NULL); if (mrk == NULL) { ql_log(ql_log_warn, base_vha, 0x3026, "Failed to allocate Marker IOCB.\n"); @@ -543,16 +540,15 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req, } int -qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req, - struct rsp_que *rsp, uint16_t loop_id, uint64_t lun, - uint8_t type) +qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair, + uint16_t loop_id, uint64_t lun, uint8_t type) { int ret; unsigned long flags = 0; - spin_lock_irqsave(&vha->hw->hardware_lock, flags); - ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type); - spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); + spin_lock_irqsave(qpair->qp_lock_ptr, flags); + ret = __qla2x00_marker(vha, qpair, loop_id, lun, type); + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); return (ret); } @@ -567,11 +563,11 @@ qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req, int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked) { if (ha_locked) { - if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0, + if (__qla2x00_marker(vha, vha->hw->base_qpair, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) return QLA_FUNCTION_FAILED; } else { - if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0, + if (qla2x00_marker(vha, vha->hw->base_qpair, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) return QLA_FUNCTION_FAILED; } @@ -1098,88 +1094,300 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, - uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc) + uint32_t *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) { - void *next_dsd; - uint8_t avail_dsds = 0; - uint32_t dsd_list_len; - struct dsd_dma *dsd_ptr; + struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd; struct scatterlist *sg, *sgl; - int i; - struct scsi_cmnd *cmd; - uint32_t *cur_dsd = dsd; - uint16_t used_dsds = tot_dsds; + struct crc_context *difctx = NULL; struct scsi_qla_host *vha; + uint dsd_list_len; + uint avail_dsds = 0; + uint used_dsds = tot_dsds; + bool dif_local_dma_alloc = false; + bool direction_to_device = false; + int i; if (sp) { - cmd = GET_CMD_SP(sp); + struct scsi_cmnd *cmd = GET_CMD_SP(sp); sgl = scsi_prot_sglist(cmd); vha = sp->vha; + difctx = sp->u.scmd.ctx; + direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE; + ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021, + "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n", + __func__, cmd, difctx, sp); } else if (tc) { vha = tc->vha; sgl = tc->prot_sg; + difctx = tc->ctx; + direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE; } else { BUG(); return 1; } - ql_dbg(ql_dbg_tgt, vha, 0xe021, - "%s: enter\n", __func__); - - for_each_sg(sgl, sg, tot_dsds, i) { - dma_addr_t sle_dma; - - /* Allocate additional continuation packets? */ - if (avail_dsds == 0) { - avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? - QLA_DSDS_PER_IOCB : used_dsds; - dsd_list_len = (avail_dsds + 1) * 12; - used_dsds -= avail_dsds; - - /* allocate tracking DS */ - dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC); - if (!dsd_ptr) - return 1; - - /* allocate new list */ - dsd_ptr->dsd_addr = next_dsd = - dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, - &dsd_ptr->dsd_list_dma); - - if (!next_dsd) { - /* - * Need to cleanup only this dsd_ptr, rest - * will be done by sp_free_dma() - */ - kfree(dsd_ptr); - return 1; + ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021, + "%s: enter (write=%u)\n", __func__, direction_to_device); + + /* if initiator doing write or target doing read */ + if (direction_to_device) { + for_each_sg(sgl, sg, tot_dsds, i) { + dma_addr_t sle_phys = sg_phys(sg); + + /* If SGE addr + len flips bits in upper 32-bits */ + if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) { + ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022, + "%s: page boundary crossing (phys=%llx len=%x)\n", + __func__, sle_phys, sg->length); + + if (difctx) { + ha->dif_bundle_crossed_pages++; + dif_local_dma_alloc = true; + } else { + ql_dbg(ql_dbg_tgt + ql_dbg_verbose, + vha, 0xe022, + "%s: difctx pointer is NULL\n", + __func__); + } + break; } + } + ha->dif_bundle_writes++; + } else { + ha->dif_bundle_reads++; + } + + if (ql2xdifbundlinginternalbuffers) + dif_local_dma_alloc = direction_to_device; + + if (dif_local_dma_alloc) { + u32 track_difbundl_buf = 0; + u32 ldma_sg_len = 0; + u8 ldma_needed = 1; + + difctx->no_dif_bundl = 0; + difctx->dif_bundl_len = 0; + + /* Track DSD buffers */ + INIT_LIST_HEAD(&difctx->ldif_dsd_list); + /* Track local DMA buffers */ + INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list); + + for_each_sg(sgl, sg, tot_dsds, i) { + u32 sglen = sg_dma_len(sg); + + ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023, + "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n", + __func__, i, sg_phys(sg), sglen, ldma_sg_len, + difctx->dif_bundl_len, ldma_needed); + + while (sglen) { + u32 xfrlen = 0; + + if (ldma_needed) { + /* + * Allocate list item to store + * the DMA buffers + */ + dsd_ptr = kzalloc(sizeof(*dsd_ptr), + GFP_ATOMIC); + if (!dsd_ptr) { + ql_dbg(ql_dbg_tgt, vha, 0xe024, + "%s: failed alloc dsd_ptr\n", + __func__); + return 1; + } + ha->dif_bundle_kallocs++; + + /* allocate dma buffer */ + dsd_ptr->dsd_addr = dma_pool_alloc + (ha->dif_bundl_pool, GFP_ATOMIC, + &dsd_ptr->dsd_list_dma); + if (!dsd_ptr->dsd_addr) { + ql_dbg(ql_dbg_tgt, vha, 0xe024, + "%s: failed alloc ->dsd_ptr\n", + __func__); + /* + * need to cleanup only this + * dsd_ptr rest will be done + * by sp_free_dma() + */ + kfree(dsd_ptr); + ha->dif_bundle_kallocs--; + return 1; + } + ha->dif_bundle_dma_allocs++; + ldma_needed = 0; + difctx->no_dif_bundl++; + list_add_tail(&dsd_ptr->list, + &difctx->ldif_dma_hndl_list); + } + + /* xfrlen is min of dma pool size and sglen */ + xfrlen = (sglen > + (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ? + DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len : + sglen; + + /* replace with local allocated dma buffer */ + sg_pcopy_to_buffer(sgl, sg_nents(sgl), + dsd_ptr->dsd_addr + ldma_sg_len, xfrlen, + difctx->dif_bundl_len); + difctx->dif_bundl_len += xfrlen; + sglen -= xfrlen; + ldma_sg_len += xfrlen; + if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE || + sg_is_last(sg)) { + ldma_needed = 1; + ldma_sg_len = 0; + } + } + } - if (sp) { - list_add_tail(&dsd_ptr->list, - &((struct crc_context *) - sp->u.scmd.ctx)->dsd_list); + track_difbundl_buf = used_dsds = difctx->no_dif_bundl; + ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025, + "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n", + difctx->dif_bundl_len, difctx->no_dif_bundl, + track_difbundl_buf); - sp->flags |= SRB_CRC_CTX_DSD_VALID; - } else { - list_add_tail(&dsd_ptr->list, - &(tc->ctx->dsd_list)); - *tc->ctx_dsd_alloced = 1; + if (sp) + sp->flags |= SRB_DIF_BUNDL_DMA_VALID; + else + tc->prot_flags = DIF_BUNDL_DMA_VALID; + + list_for_each_entry_safe(dif_dsd, nxt_dsd, + &difctx->ldif_dma_hndl_list, list) { + u32 sglen = (difctx->dif_bundl_len > + DIF_BUNDLING_DMA_POOL_SIZE) ? + DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len; + + BUG_ON(track_difbundl_buf == 0); + + /* Allocate additional continuation packets? */ + if (avail_dsds == 0) { + ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, + 0xe024, + "%s: adding continuation iocb's\n", + __func__); + avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? + QLA_DSDS_PER_IOCB : used_dsds; + dsd_list_len = (avail_dsds + 1) * 12; + used_dsds -= avail_dsds; + + /* allocate tracking DS */ + dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC); + if (!dsd_ptr) { + ql_dbg(ql_dbg_tgt, vha, 0xe026, + "%s: failed alloc dsd_ptr\n", + __func__); + return 1; + } + ha->dif_bundle_kallocs++; + + difctx->no_ldif_dsd++; + /* allocate new list */ + dsd_ptr->dsd_addr = + dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, + &dsd_ptr->dsd_list_dma); + if (!dsd_ptr->dsd_addr) { + ql_dbg(ql_dbg_tgt, vha, 0xe026, + "%s: failed alloc ->dsd_addr\n", + __func__); + /* + * need to cleanup only this dsd_ptr + * rest will be done by sp_free_dma() + */ + kfree(dsd_ptr); + ha->dif_bundle_kallocs--; + return 1; + } + ha->dif_bundle_dma_allocs++; + + if (sp) { + list_add_tail(&dsd_ptr->list, + &difctx->ldif_dsd_list); + sp->flags |= SRB_CRC_CTX_DSD_VALID; + } else { + list_add_tail(&dsd_ptr->list, + &difctx->ldif_dsd_list); + tc->ctx_dsd_alloced = 1; + } + + /* add new list to cmd iocb or last list */ + *cur_dsd++ = + cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); + *cur_dsd++ = + cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); + *cur_dsd++ = dsd_list_len; + cur_dsd = dsd_ptr->dsd_addr; } - - /* add new list to cmd iocb or last list */ - *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); - *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); - *cur_dsd++ = dsd_list_len; - cur_dsd = (uint32_t *)next_dsd; + *cur_dsd++ = cpu_to_le32(LSD(dif_dsd->dsd_list_dma)); + *cur_dsd++ = cpu_to_le32(MSD(dif_dsd->dsd_list_dma)); + *cur_dsd++ = cpu_to_le32(sglen); + avail_dsds--; + difctx->dif_bundl_len -= sglen; + track_difbundl_buf--; } - sle_dma = sg_dma_address(sg); - - *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); - *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); - *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); - avail_dsds--; + ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026, + "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__, + difctx->no_ldif_dsd, difctx->no_dif_bundl); + } else { + for_each_sg(sgl, sg, tot_dsds, i) { + dma_addr_t sle_dma; + + /* Allocate additional continuation packets? */ + if (avail_dsds == 0) { + avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ? + QLA_DSDS_PER_IOCB : used_dsds; + dsd_list_len = (avail_dsds + 1) * 12; + used_dsds -= avail_dsds; + + /* allocate tracking DS */ + dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC); + if (!dsd_ptr) { + ql_dbg(ql_dbg_tgt + ql_dbg_verbose, + vha, 0xe027, + "%s: failed alloc dsd_dma...\n", + __func__); + return 1; + } + + /* allocate new list */ + dsd_ptr->dsd_addr = + dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, + &dsd_ptr->dsd_list_dma); + if (!dsd_ptr->dsd_addr) { + /* need to cleanup only this dsd_ptr */ + /* rest will be done by sp_free_dma() */ + kfree(dsd_ptr); + return 1; + } + + if (sp) { + list_add_tail(&dsd_ptr->list, + &difctx->dsd_list); + sp->flags |= SRB_CRC_CTX_DSD_VALID; + } else { + list_add_tail(&dsd_ptr->list, + &difctx->dsd_list); + tc->ctx_dsd_alloced = 1; + } + + /* add new list to cmd iocb or last list */ + *cur_dsd++ = + cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); + *cur_dsd++ = + cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); + *cur_dsd++ = dsd_list_len; + cur_dsd = dsd_ptr->dsd_addr; + } + sle_dma = sg_dma_address(sg); + *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); + *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); + *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); + avail_dsds--; + } } /* Null termination */ *cur_dsd++ = 0; @@ -1187,7 +1395,6 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, *cur_dsd++ = 0; return 0; } - /** * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command * Type 6 IOCB types. @@ -1416,21 +1623,19 @@ qla24xx_start_scsi(srb_t *sp) uint16_t req_cnt; uint16_t tot_dsds; struct req_que *req = NULL; - struct rsp_que *rsp = NULL; struct scsi_cmnd *cmd = GET_CMD_SP(sp); struct scsi_qla_host *vha = sp->vha; struct qla_hw_data *ha = vha->hw; /* Setup device pointers. */ req = vha->req; - rsp = req->rsp; /* So we know we haven't pci_map'ed anything yet */ tot_dsds = 0; /* Send marker if required */ if (vha->marker_needed != 0) { - if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) != + if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) return QLA_FUNCTION_FAILED; vha->marker_needed = 0; @@ -1583,7 +1788,7 @@ qla24xx_dif_start_scsi(srb_t *sp) /* Send marker if required */ if (vha->marker_needed != 0) { - if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) != + if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) return QLA_FUNCTION_FAILED; vha->marker_needed = 0; @@ -1754,7 +1959,6 @@ qla2xxx_start_scsi_mq(srb_t *sp) uint16_t req_cnt; uint16_t tot_dsds; struct req_que *req = NULL; - struct rsp_que *rsp = NULL; struct scsi_cmnd *cmd = GET_CMD_SP(sp); struct scsi_qla_host *vha = sp->fcport->vha; struct qla_hw_data *ha = vha->hw; @@ -1764,7 +1968,6 @@ qla2xxx_start_scsi_mq(srb_t *sp) spin_lock_irqsave(&qpair->qp_lock, flags); /* Setup qpair pointers */ - rsp = qpair->rsp; req = qpair->req; /* So we know we haven't pci_map'ed anything yet */ @@ -1772,7 +1975,7 @@ qla2xxx_start_scsi_mq(srb_t *sp) /* Send marker if required */ if (vha->marker_needed != 0) { - if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) != + if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { spin_unlock_irqrestore(&qpair->qp_lock, flags); return QLA_FUNCTION_FAILED; @@ -1940,7 +2143,7 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp) /* Send marker if required */ if (vha->marker_needed != 0) { - if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) != + if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { spin_unlock_irqrestore(&qpair->qp_lock, flags); return QLA_FUNCTION_FAILED; @@ -2208,8 +2411,11 @@ qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio) logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI); - if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) + if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) { logio->control_flags |= LCF_NVME_PRLI; + if (sp->vha->flags.nvme_first_burst) + logio->io_parameter[0] = NVME_PRLI_SP_FIRST_BURST; + } logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); logio->port_id[0] = sp->fcport->d_id.b.al_pa; @@ -2991,8 +3197,8 @@ qla82xx_start_scsi(srb_t *sp) /* Send marker if required */ if (vha->marker_needed != 0) { - if (qla2x00_marker(vha, req, - rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { + if (qla2x00_marker(vha, ha->base_qpair, + 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x300c, "qla2x00_marker failed for cmd=%p.\n", cmd); return QLA_FUNCTION_FAILED; @@ -3434,23 +3640,22 @@ qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio) int qla2x00_start_sp(srb_t *sp) { - int rval; + int rval = QLA_SUCCESS; scsi_qla_host_t *vha = sp->vha; struct qla_hw_data *ha = vha->hw; struct qla_qpair *qp = sp->qpair; void *pkt; unsigned long flags; - rval = QLA_FUNCTION_FAILED; spin_lock_irqsave(qp->qp_lock_ptr, flags); pkt = __qla2x00_alloc_iocbs(sp->qpair, sp); if (!pkt) { + rval = EAGAIN; ql_log(ql_log_warn, vha, 0x700c, "qla2x00_alloc_iocbs failed.\n"); goto done; } - rval = QLA_SUCCESS; switch (sp->type) { case SRB_LOGIN_CMD: IS_FWI2_CAPABLE(ha) ? @@ -3646,8 +3851,8 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds) /* Send marker if required */ if (vha->marker_needed != 0) { - if (qla2x00_marker(vha, req, - rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) + if (qla2x00_marker(vha, ha->base_qpair, + 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) return EXT_STATUS_MAILBOX; vha->marker_needed = 0; } diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 8507c43b918c..69bbea9239cc 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -834,7 +834,8 @@ skip_rio: * Restore for Physical Port only */ if (!vha->vp_idx) { - if (ha->flags.fawwpn_enabled) { + if (ha->flags.fawwpn_enabled && + (ha->current_topology == ISP_CFG_F)) { void *wwpn = ha->init_cb->port_name; memcpy(vha->port_name, wwpn, WWN_SIZE); fc_host_port_name(vha->host) = @@ -1714,6 +1715,15 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, vha->hw->exch_starvation = 0; data[0] = MBS_COMMAND_COMPLETE; + + if (sp->type == SRB_PRLI_CMD) { + lio->u.logio.iop[0] = + le32_to_cpu(logio->io_parameter[0]); + lio->u.logio.iop[1] = + le32_to_cpu(logio->io_parameter[1]); + goto logio_done; + } + if (sp->type != SRB_LOGIN_CMD) goto logio_done; @@ -2725,6 +2735,17 @@ check_scsi_status: cp->device->vendor); break; + case CS_DMA: + ql_log(ql_log_info, fcport->vha, 0x3022, + "CS_DMA error: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%06x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n", + comp_status, scsi_status, res, vha->host_no, + cp->device->id, cp->device->lun, fcport->d_id.b24, + ox_id, cp->cmnd, scsi_bufflen(cp), rsp_info_len, + resid_len, fw_resid_len, sp, cp); + ql_dump_buffer(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe0ee, + pkt, sizeof(*sts24)); + res = DID_ERROR << 16; + break; default: res = DID_ERROR << 16; break; @@ -3410,7 +3431,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) min_vecs++; } - if (USER_CTRL_IRQ(ha)) { + if (USER_CTRL_IRQ(ha) || !ha->mqiobase) { /* user wants to control IRQ setting for target mode */ ret = pci_alloc_irq_vectors(ha->pdev, min_vecs, ha->msix_count, PCI_IRQ_MSIX); diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 191b6b7c8747..5400696e1f6b 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -1109,7 +1109,12 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha) * FW supports nvme and driver load parameter requested nvme. * BIT 26 of fw_attributes indicates NVMe support. */ - if ((ha->fw_attributes_h & 0x400) && ql2xnvmeenable) { + if ((ha->fw_attributes_h & + (FW_ATTR_H_NVME | FW_ATTR_H_NVME_UPDATED)) && + ql2xnvmeenable) { + if (ha->fw_attributes_h & FW_ATTR_H_NVME_FBURST) + vha->flags.nvme_first_burst = 1; + vha->flags.nvme_enabled = 1; ql_log(ql_log_info, vha, 0xd302, "%s: FC-NVMe is Enabled (0x%x)\n", @@ -1508,16 +1513,12 @@ qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; scsi_qla_host_t *vha; - struct req_que *req; - struct rsp_que *rsp; vha = fcport->vha; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e, "Entered %s.\n", __func__); - req = vha->hw->req_q_map[0]; - rsp = req->rsp; mcp->mb[0] = MBC_ABORT_TARGET; mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0; if (HAS_EXTENDED_IDS(vha->hw)) { @@ -1540,7 +1541,7 @@ qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag) } /* Issue marker IOCB. */ - rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0, + rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, 0, MK_SYNC_ID); if (rval2 != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1040, @@ -1560,16 +1561,12 @@ qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; scsi_qla_host_t *vha; - struct req_que *req; - struct rsp_que *rsp; vha = fcport->vha; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042, "Entered %s.\n", __func__); - req = vha->hw->req_q_map[0]; - rsp = req->rsp; mcp->mb[0] = MBC_LUN_RESET; mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; if (HAS_EXTENDED_IDS(vha->hw)) @@ -1589,7 +1586,7 @@ qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag) } /* Issue marker IOCB. */ - rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l, + rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, l, MK_SYNC_ID_LUN); if (rval2 != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1044, @@ -2244,10 +2241,7 @@ qla2x00_lip_reset(scsi_qla_host_t *vha) mcp->out_mb = MBX_2|MBX_1|MBX_0; } else if (IS_FWI2_CAPABLE(vha->hw)) { mcp->mb[0] = MBC_LIP_FULL_LOGIN; - if (N2N_TOPO(vha->hw)) - mcp->mb[1] = BIT_4; /* re-init */ - else - mcp->mb[1] = BIT_6; /* LIP */ + mcp->mb[1] = BIT_4; mcp->mb[2] = 0; mcp->mb[3] = vha->hw->loop_reset_delay; mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; @@ -2757,7 +2751,7 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha) "Entered %s.\n", __func__); mcp->mb[0] = MBC_LIP_FULL_LOGIN; - mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0; + mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_4 : 0; mcp->mb[2] = 0; mcp->mb[3] = 0; mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; @@ -3184,7 +3178,6 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, scsi_qla_host_t *vha; struct qla_hw_data *ha; struct req_que *req; - struct rsp_que *rsp; struct qla_qpair *qpair; vha = fcport->vha; @@ -3197,10 +3190,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, if (vha->vp_idx && vha->qpair) { /* NPIV port */ qpair = vha->qpair; - rsp = qpair->rsp; req = qpair->req; - } else { - rsp = req->rsp; } tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); @@ -3257,7 +3247,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, } /* Issue marker IOCB. */ - rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l, + rval2 = qla2x00_marker(vha, ha->base_qpair, fcport->loop_id, l, type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID); if (rval2 != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1099, @@ -5248,6 +5238,66 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb) return rval; } +/* Set the specified data rate */ +int +qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + struct qla_hw_data *ha = vha->hw; + uint16_t val; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106, + "Entered %s speed:0x%x mode:0x%x.\n", __func__, ha->set_data_rate, + mode); + + if (!IS_FWI2_CAPABLE(ha)) + return QLA_FUNCTION_FAILED; + + memset(mcp, 0, sizeof(*mcp)); + switch (ha->set_data_rate) { + case PORT_SPEED_AUTO: + case PORT_SPEED_4GB: + case PORT_SPEED_8GB: + case PORT_SPEED_16GB: + case PORT_SPEED_32GB: + val = ha->set_data_rate; + break; + default: + ql_log(ql_log_warn, vha, 0x1199, + "Unrecognized speed setting:%d. Setting Autoneg\n", + ha->set_data_rate); + val = ha->set_data_rate = PORT_SPEED_AUTO; + break; + } + + mcp->mb[0] = MBC_DATA_RATE; + mcp->mb[1] = mode; + mcp->mb[2] = val; + + mcp->out_mb = MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_2|MBX_1|MBX_0; + if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) + mcp->in_mb |= MBX_4|MBX_3; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1107, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + if (mcp->mb[1] != 0x7) + ql_dbg(ql_dbg_mbx, vha, 0x1179, + "Speed set:0x%x\n", mcp->mb[1]); + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108, + "Done %s.\n", __func__); + } + + return rval; +} + int qla2x00_get_data_rate(scsi_qla_host_t *vha) { @@ -5263,7 +5313,7 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_DATA_RATE; - mcp->mb[1] = 0; + mcp->mb[1] = QLA_GET_DATA_RATE; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_2|MBX_1|MBX_0; if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) @@ -6268,8 +6318,6 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, fcport->d_id.b.rsvd_1 = 0; if (fcport->fc4f_nvme) { - fcport->nvme_prli_service_param = - pd->prli_nvme_svc_param_word_3; fcport->port_type = FCT_NVME; } else { /* If not target must be initiator or unknown type. */ diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index 39d892bbd219..41c85da3ab32 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -185,6 +185,14 @@ static void qla_nvme_abort_work(struct work_struct *work) struct qla_hw_data *ha = fcport->vha->hw; int rval; + if (fcport) + ql_dbg(ql_dbg_io, fcport->vha, 0xffff, + "%s called for sp=%p, hndl=%x on fcport=%p deleted=%d\n", + __func__, sp, sp->handle, fcport, fcport->deleted); + + if (!ha->flags.fw_started && (fcport && fcport->deleted)) + return; + rval = ha->isp_ops->abort_command(sp); ql_dbg(ql_dbg_io, fcport->vha, 0x212b, @@ -358,17 +366,24 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp) /* No data transfer how do we check buffer len == 0?? */ if (fd->io_dir == NVMEFC_FCP_READ) { - cmd_pkt->control_flags = - cpu_to_le16(CF_READ_DATA | CF_NVME_ENABLE); + cmd_pkt->control_flags = CF_READ_DATA; vha->qla_stats.input_bytes += fd->payload_length; vha->qla_stats.input_requests++; } else if (fd->io_dir == NVMEFC_FCP_WRITE) { - cmd_pkt->control_flags = - cpu_to_le16(CF_WRITE_DATA | CF_NVME_ENABLE); + cmd_pkt->control_flags = CF_WRITE_DATA; + if ((vha->flags.nvme_first_burst) && + (sp->fcport->nvme_prli_service_param & + NVME_PRLI_SP_FIRST_BURST)) { + if ((fd->payload_length <= + sp->fcport->nvme_first_burst_size) || + (sp->fcport->nvme_first_burst_size == 0)) + cmd_pkt->control_flags |= + CF_NVME_FIRST_BURST_ENABLE; + } vha->qla_stats.output_bytes += fd->payload_length; vha->qla_stats.output_requests++; } else if (fd->io_dir == 0) { - cmd_pkt->control_flags = cpu_to_le16(CF_NVME_ENABLE); + cmd_pkt->control_flags = 0; } /* Set NPORT-ID */ @@ -600,6 +615,7 @@ static void qla_nvme_unregister_remote_port(struct work_struct *work) struct fc_port *fcport = container_of(work, struct fc_port, nvme_del_work); struct qla_nvme_rport *qla_rport, *trport; + scsi_qla_host_t *base_vha; if (!IS_ENABLED(CONFIG_NVME_FC)) return; @@ -607,6 +623,15 @@ static void qla_nvme_unregister_remote_port(struct work_struct *work) ql_log(ql_log_warn, NULL, 0x2112, "%s: unregister remoteport on %p\n",__func__, fcport); + base_vha = pci_get_drvdata(fcport->vha->hw->pdev); + if (test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags)) { + ql_dbg(ql_dbg_disc, fcport->vha, 0x2114, + "%s: Notify FC-NVMe transport, set devloss=0\n", + __func__); + + nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0); + } + list_for_each_entry_safe(qla_rport, trport, &fcport->vha->nvme_rport_list, list) { if (qla_rport->fcport == fcport) { @@ -623,23 +648,11 @@ static void qla_nvme_unregister_remote_port(struct work_struct *work) void qla_nvme_delete(struct scsi_qla_host *vha) { - struct qla_nvme_rport *qla_rport, *trport; - fc_port_t *fcport; int nv_ret; if (!IS_ENABLED(CONFIG_NVME_FC)) return; - list_for_each_entry_safe(qla_rport, trport, - &vha->nvme_rport_list, list) { - fcport = qla_rport->fcport; - - ql_log(ql_log_info, fcport->vha, 0x2114, "%s: fcport=%p\n", - __func__, fcport); - - nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0); - } - if (vha->nvme_local_port) { init_completion(&vha->nvme_del_done); ql_log(ql_log_info, vha, 0x2116, diff --git a/drivers/scsi/qla2xxx/qla_nvme.h b/drivers/scsi/qla2xxx/qla_nvme.h index 4941d107fb1c..da8dad5ad693 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.h +++ b/drivers/scsi/qla2xxx/qla_nvme.h @@ -57,7 +57,7 @@ struct cmd_nvme { uint64_t rsvd; uint16_t control_flags; /* Control Flags */ -#define CF_NVME_ENABLE BIT_9 +#define CF_NVME_FIRST_BURST_ENABLE BIT_11 #define CF_DIF_SEG_DESCR_ENABLE BIT_3 #define CF_DATA_SEG_DESCR_ENABLE BIT_2 #define CF_READ_DATA BIT_1 diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index c6ef83d0d99b..677f82fdf56f 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -285,6 +285,27 @@ MODULE_PARM_DESC(qla2xuseresexchforels, "Reserve 1/2 of emergency exchanges for ELS.\n" " 0 (default): disabled"); +int ql2xprotmask; +module_param(ql2xprotmask, int, 0644); +MODULE_PARM_DESC(ql2xprotmask, + "Override DIF/DIX protection capabilities mask\n" + "Default is 0 which sets protection mask based on " + "capabilities reported by HBA firmware.\n"); + +int ql2xprotguard; +module_param(ql2xprotguard, int, 0644); +MODULE_PARM_DESC(ql2xprotguard, "Override choice of DIX checksum\n" + " 0 -- Let HBA firmware decide\n" + " 1 -- Force T10 CRC\n" + " 2 -- Force IP checksum\n"); + +int ql2xdifbundlinginternalbuffers; +module_param(ql2xdifbundlinginternalbuffers, int, 0644); +MODULE_PARM_DESC(ql2xdifbundlinginternalbuffers, + "Force using internal buffers for DIF information\n" + "0 (Default). Based on check.\n" + "1 Force using internal buffers\n"); + /* * SCSI host template entry points */ @@ -804,7 +825,44 @@ qla2xxx_qpair_sp_free_dma(void *ptr) ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt; ha->gbl_dsd_avail += ctx1->dsd_use_cnt; mempool_free(ctx1, ha->ctx_mempool); + sp->flags &= ~SRB_FCP_CMND_DMA_VALID; + } + if (sp->flags & SRB_DIF_BUNDL_DMA_VALID) { + struct crc_context *difctx = sp->u.scmd.ctx; + struct dsd_dma *dif_dsd, *nxt_dsd; + + list_for_each_entry_safe(dif_dsd, nxt_dsd, + &difctx->ldif_dma_hndl_list, list) { + list_del(&dif_dsd->list); + dma_pool_free(ha->dif_bundl_pool, dif_dsd->dsd_addr, + dif_dsd->dsd_list_dma); + kfree(dif_dsd); + difctx->no_dif_bundl--; + } + + list_for_each_entry_safe(dif_dsd, nxt_dsd, + &difctx->ldif_dsd_list, list) { + list_del(&dif_dsd->list); + dma_pool_free(ha->dl_dma_pool, dif_dsd->dsd_addr, + dif_dsd->dsd_list_dma); + kfree(dif_dsd); + difctx->no_ldif_dsd--; + } + + if (difctx->no_ldif_dsd) { + ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022, + "%s: difctx->no_ldif_dsd=%x\n", + __func__, difctx->no_ldif_dsd); + } + + if (difctx->no_dif_bundl) { + ql_dbg(ql_dbg_tgt+ql_dbg_verbose, sp->vha, 0xe022, + "%s: difctx->no_dif_bundl=%x\n", + __func__, difctx->no_dif_bundl); + } + sp->flags &= ~SRB_DIF_BUNDL_DMA_VALID; } + end: CMD_SP(cmd) = NULL; qla2xxx_rel_qpair_sp(sp->qpair, sp); @@ -3342,13 +3400,16 @@ skip_dpc: "Registering for DIF/DIX type 1 and 3 protection.\n"); if (ql2xenabledif == 1) prot = SHOST_DIX_TYPE0_PROTECTION; - scsi_host_set_prot(host, - prot | SHOST_DIF_TYPE1_PROTECTION - | SHOST_DIF_TYPE2_PROTECTION - | SHOST_DIF_TYPE3_PROTECTION - | SHOST_DIX_TYPE1_PROTECTION - | SHOST_DIX_TYPE2_PROTECTION - | SHOST_DIX_TYPE3_PROTECTION); + if (ql2xprotmask) + scsi_host_set_prot(host, ql2xprotmask); + else + scsi_host_set_prot(host, + prot | SHOST_DIF_TYPE1_PROTECTION + | SHOST_DIF_TYPE2_PROTECTION + | SHOST_DIF_TYPE3_PROTECTION + | SHOST_DIX_TYPE1_PROTECTION + | SHOST_DIX_TYPE2_PROTECTION + | SHOST_DIX_TYPE3_PROTECTION); guard = SHOST_DIX_GUARD_CRC; @@ -3356,7 +3417,10 @@ skip_dpc: (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha))) guard |= SHOST_DIX_GUARD_IP; - scsi_host_set_guard(host, guard); + if (ql2xprotguard) + scsi_host_set_guard(host, ql2xprotguard); + else + scsi_host_set_guard(host, guard); } else base_vha->flags.difdix_supported = 0; } @@ -3997,9 +4061,86 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, "Failed to allocate memory for fcp_cmnd_dma_pool.\n"); goto fail_dl_dma_pool; } + + if (ql2xenabledif) { + u64 bufsize = DIF_BUNDLING_DMA_POOL_SIZE; + struct dsd_dma *dsd, *nxt; + uint i; + /* Creata a DMA pool of buffers for DIF bundling */ + ha->dif_bundl_pool = dma_pool_create(name, + &ha->pdev->dev, DIF_BUNDLING_DMA_POOL_SIZE, 8, 0); + if (!ha->dif_bundl_pool) { + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0024, + "%s: failed create dif_bundl_pool\n", + __func__); + goto fail_dif_bundl_dma_pool; + } + + INIT_LIST_HEAD(&ha->pool.good.head); + INIT_LIST_HEAD(&ha->pool.unusable.head); + ha->pool.good.count = 0; + ha->pool.unusable.count = 0; + for (i = 0; i < 128; i++) { + dsd = kzalloc(sizeof(*dsd), GFP_ATOMIC); + if (!dsd) { + ql_dbg_pci(ql_dbg_init, ha->pdev, + 0xe0ee, "%s: failed alloc dsd\n", + __func__); + return 1; + } + ha->dif_bundle_kallocs++; + + dsd->dsd_addr = dma_pool_alloc( + ha->dif_bundl_pool, GFP_ATOMIC, + &dsd->dsd_list_dma); + if (!dsd->dsd_addr) { + ql_dbg_pci(ql_dbg_init, ha->pdev, + 0xe0ee, + "%s: failed alloc ->dsd_addr\n", + __func__); + kfree(dsd); + ha->dif_bundle_kallocs--; + continue; + } + ha->dif_bundle_dma_allocs++; + + /* + * if DMA buffer crosses 4G boundary, + * put it on bad list + */ + if (MSD(dsd->dsd_list_dma) ^ + MSD(dsd->dsd_list_dma + bufsize)) { + list_add_tail(&dsd->list, + &ha->pool.unusable.head); + ha->pool.unusable.count++; + } else { + list_add_tail(&dsd->list, + &ha->pool.good.head); + ha->pool.good.count++; + } + } + + /* return the good ones back to the pool */ + list_for_each_entry_safe(dsd, nxt, + &ha->pool.good.head, list) { + list_del(&dsd->list); + dma_pool_free(ha->dif_bundl_pool, + dsd->dsd_addr, dsd->dsd_list_dma); + ha->dif_bundle_dma_allocs--; + kfree(dsd); + ha->dif_bundle_kallocs--; + } + + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0024, + "%s: dif dma pool (good=%u unusable=%u)\n", + __func__, ha->pool.good.count, + ha->pool.unusable.count); + } + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0025, - "dl_dma_pool=%p fcp_cmnd_dma_pool=%p.\n", - ha->dl_dma_pool, ha->fcp_cmnd_dma_pool); + "dl_dma_pool=%p fcp_cmnd_dma_pool=%p dif_bundl_pool=%p.\n", + ha->dl_dma_pool, ha->fcp_cmnd_dma_pool, + ha->dif_bundl_pool); } /* Allocate memory for SNS commands */ @@ -4164,6 +4305,24 @@ fail_free_ms_iocb: dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), ha->sns_cmd, ha->sns_cmd_dma); fail_dma_pool: + if (ql2xenabledif) { + struct dsd_dma *dsd, *nxt; + + list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head, + list) { + list_del(&dsd->list); + dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr, + dsd->dsd_list_dma); + ha->dif_bundle_dma_allocs--; + kfree(dsd); + ha->dif_bundle_kallocs--; + ha->pool.unusable.count--; + } + dma_pool_destroy(ha->dif_bundl_pool); + ha->dif_bundl_pool = NULL; + } + +fail_dif_bundl_dma_pool: if (IS_QLA82XX(ha) || ql2xenabledif) { dma_pool_destroy(ha->fcp_cmnd_dma_pool); ha->fcp_cmnd_dma_pool = NULL; @@ -4544,6 +4703,32 @@ qla2x00_mem_free(struct qla_hw_data *ha) mempool_destroy(ha->ctx_mempool); + if (ql2xenabledif) { + struct dsd_dma *dsd, *nxt; + + list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head, + list) { + list_del(&dsd->list); + dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr, + dsd->dsd_list_dma); + ha->dif_bundle_dma_allocs--; + kfree(dsd); + ha->dif_bundle_kallocs--; + ha->pool.unusable.count--; + } + list_for_each_entry_safe(dsd, nxt, &ha->pool.good.head, list) { + list_del(&dsd->list); + dma_pool_free(ha->dif_bundl_pool, dsd->dsd_addr, + dsd->dsd_list_dma); + ha->dif_bundle_dma_allocs--; + kfree(dsd); + ha->dif_bundle_kallocs--; + } + } + + if (ha->dif_bundl_pool) + dma_pool_destroy(ha->dif_bundl_pool); + qlt_mem_free(ha); if (ha->init_cb) @@ -5019,14 +5204,14 @@ qla2x00_do_work(struct scsi_qla_host *vha) struct qla_work_evt *e, *tmp; unsigned long flags; LIST_HEAD(work); + int rc; spin_lock_irqsave(&vha->work_lock, flags); list_splice_init(&vha->work_list, &work); spin_unlock_irqrestore(&vha->work_lock, flags); list_for_each_entry_safe(e, tmp, &work, list) { - list_del_init(&e->list); - + rc = QLA_SUCCESS; switch (e->type) { case QLA_EVT_AEN: fc_host_post_event(vha->host, fc_get_event_number(), @@ -5040,7 +5225,7 @@ qla2x00_do_work(struct scsi_qla_host *vha) e->u.logio.data); break; case QLA_EVT_ASYNC_LOGOUT: - qla2x00_async_logout(vha, e->u.logio.fcport); + rc = qla2x00_async_logout(vha, e->u.logio.fcport); break; case QLA_EVT_ASYNC_LOGOUT_DONE: qla2x00_async_logout_done(vha, e->u.logio.fcport, @@ -5085,7 +5270,7 @@ qla2x00_do_work(struct scsi_qla_host *vha) qla24xx_do_nack_work(vha, e); break; case QLA_EVT_ASYNC_PRLO: - qla2x00_async_prlo(vha, e->u.logio.fcport); + rc = qla2x00_async_prlo(vha, e->u.logio.fcport); break; case QLA_EVT_ASYNC_PRLO_DONE: qla2x00_async_prlo_done(vha, e->u.logio.fcport, @@ -5118,6 +5303,15 @@ qla2x00_do_work(struct scsi_qla_host *vha) e->u.fcport.fcport, false); break; } + + if (rc == EAGAIN) { + /* put 'work' at head of 'vha->work_list' */ + spin_lock_irqsave(&vha->work_lock, flags); + list_splice(&work, &vha->work_list); + spin_unlock_irqrestore(&vha->work_lock, flags); + break; + } + list_del_init(&e->list); if (e->flags & QLA_EVT_FLAG_FREE) kfree(e); @@ -6930,13 +7124,64 @@ qla2xxx_pci_resume(struct pci_dev *pdev) ha->flags.eeh_busy = 0; } +static void +qla_pci_reset_prepare(struct pci_dev *pdev) +{ + scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); + struct qla_hw_data *ha = base_vha->hw; + struct qla_qpair *qpair; + + ql_log(ql_log_warn, base_vha, 0xffff, + "%s.\n", __func__); + + /* + * PCI FLR/function reset is about to reset the + * slot. Stop the chip to stop all DMA access. + * It is assumed that pci_reset_done will be called + * after FLR to resume Chip operation. + */ + ha->flags.eeh_busy = 1; + mutex_lock(&ha->mq_lock); + list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) + qpair->online = 0; + mutex_unlock(&ha->mq_lock); + + set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); + qla2x00_abort_isp_cleanup(base_vha); + qla2x00_abort_all_cmds(base_vha, DID_RESET << 16); +} + +static void +qla_pci_reset_done(struct pci_dev *pdev) +{ + scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); + struct qla_hw_data *ha = base_vha->hw; + struct qla_qpair *qpair; + + ql_log(ql_log_warn, base_vha, 0xffff, + "%s.\n", __func__); + + /* + * FLR just completed by PCI layer. Resume adapter + */ + ha->flags.eeh_busy = 0; + mutex_lock(&ha->mq_lock); + list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) + qpair->online = 1; + mutex_unlock(&ha->mq_lock); + + base_vha->flags.online = 1; + ha->isp_ops->abort_isp(base_vha); + clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); +} + static int qla2xxx_map_queues(struct Scsi_Host *shost) { int rc; scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata; struct blk_mq_queue_map *qmap = &shost->tag_set.map[0]; - if (USER_CTRL_IRQ(vha->hw)) + if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase) rc = blk_mq_map_queues(qmap); else rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset); @@ -6948,6 +7193,8 @@ static const struct pci_error_handlers qla2xxx_err_handler = { .mmio_enabled = qla2xxx_pci_mmio_enabled, .slot_reset = qla2xxx_pci_slot_reset, .resume = qla2xxx_pci_resume, + .reset_prepare = qla_pci_reset_prepare, + .reset_done = qla_pci_reset_done, }; static struct pci_device_id qla2xxx_pci_tbl[] = { diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 510337eac106..582d1663f971 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -660,14 +660,14 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport, sp->u.iocb_cmd.u.nack.ntfy = ntfy; sp->done = qla2x00_async_nack_sp_done; - rval = qla2x00_start_sp(sp); - if (rval != QLA_SUCCESS) - goto done_free_sp; - ql_dbg(ql_dbg_disc, vha, 0x20f4, "Async-%s %8phC hndl %x %s\n", sp->name, fcport->port_name, sp->handle, c); + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) + goto done_free_sp; + return rval; done_free_sp: @@ -684,6 +684,9 @@ void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e) switch (e->u.nack.type) { case SRB_NACK_PRLI: + t = e->u.nack.fcport; + flush_work(&t->del_work); + flush_work(&t->free_work); mutex_lock(&vha->vha_tgt.tgt_mutex); t = qlt_create_sess(vha, e->u.nack.fcport, 0); mutex_unlock(&vha->vha_tgt.tgt_mutex); @@ -3230,7 +3233,7 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm) cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd, - prm->prot_seg_cnt, &tc)) + prm->prot_seg_cnt, cmd)) goto crc_queuing_error; } return QLA_SUCCESS; @@ -3257,13 +3260,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, unsigned long flags = 0; int res; - if (cmd->sess && cmd->sess->deleted) { + if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) || + (cmd->sess && cmd->sess->deleted)) { cmd->state = QLA_TGT_STATE_PROCESSED; - if (cmd->sess->logout_completed) - /* no need to terminate. FW already freed exchange. */ - qlt_abort_cmd_on_host_reset(cmd->vha, cmd); - else - qlt_send_term_exchange(qpair, cmd, &cmd->atio, 0, 0); + qlt_abort_cmd_on_host_reset(cmd->vha, cmd); return 0; } @@ -6343,7 +6343,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt, struct atio_from_isp *a = &prm->tm_iocb2; struct scsi_qla_host *vha = tgt->vha; struct qla_hw_data *ha = vha->hw; - struct fc_port *sess = NULL; + struct fc_port *sess; unsigned long flags; uint8_t *s_id = NULL; /* to hide compiler warnings */ int rc; @@ -6369,7 +6369,6 @@ static void qlt_tmr_work(struct qla_tgt *tgt, goto out_term2; } else { if (sess->deleted) { - sess = NULL; goto out_term2; } @@ -6377,7 +6376,6 @@ static void qlt_tmr_work(struct qla_tgt *tgt, ql_dbg(ql_dbg_tgt_tmr, vha, 0xf020, "%s: kref_get fail %8phC\n", __func__, sess->port_name); - sess = NULL; goto out_term2; } } @@ -6396,8 +6394,6 @@ static void qlt_tmr_work(struct qla_tgt *tgt, return; out_term2: - if (sess) - ha->tgt.tgt_ops->put_sess(sess); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); out_term: qlt_send_term_exchange(ha->base_qpair, NULL, &prm->tm_iocb2, 1, 0); diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index 577e1786a3f1..f3de75000a08 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -928,6 +928,8 @@ struct qla_tgt_cmd { uint64_t lba; uint16_t a_guard, e_guard, a_app_tag, e_app_tag; uint32_t a_ref_tag, e_ref_tag; +#define DIF_BUNDL_DMA_VALID 1 + uint16_t prot_flags; uint64_t jiffies_at_alloc; uint64_t jiffies_at_free; diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c index 0ccd06f11f12..9e52500caff0 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.c +++ b/drivers/scsi/qla2xxx/qla_tmpl.c @@ -221,7 +221,13 @@ qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf) ent->hdr.driver_flags |= DRIVER_FLAG_SKIP_ENTRY; } -static int +static inline struct qla27xx_fwdt_entry * +qla27xx_next_entry(struct qla27xx_fwdt_entry *ent) +{ + return (void *)ent + ent->hdr.size; +} + +static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t0(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { @@ -229,10 +235,10 @@ qla27xx_fwdt_entry_t0(struct scsi_qla_host *vha, "%s: nop [%lx]\n", __func__, *len); qla27xx_skip_entry(ent, buf); - return false; + return qla27xx_next_entry(ent); } -static int +static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t255(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { @@ -241,10 +247,10 @@ qla27xx_fwdt_entry_t255(struct scsi_qla_host *vha, qla27xx_skip_entry(ent, buf); /* terminate */ - return true; + return NULL; } -static int +static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t256(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { @@ -255,10 +261,10 @@ qla27xx_fwdt_entry_t256(struct scsi_qla_host *vha, qla27xx_read_window(reg, ent->t256.base_addr, ent->t256.pci_offset, ent->t256.reg_count, ent->t256.reg_width, buf, len); - return false; + return qla27xx_next_entry(ent); } -static int +static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t257(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { @@ -269,10 +275,10 @@ qla27xx_fwdt_entry_t257(struct scsi_qla_host *vha, qla27xx_write_reg(reg, IOBASE_ADDR, ent->t257.base_addr, buf); qla27xx_write_reg(reg, ent->t257.pci_offset, ent->t257.write_data, buf); - return false; + return qla27xx_next_entry(ent); } -static int +static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t258(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { @@ -284,10 +290,10 @@ qla27xx_fwdt_entry_t258(struct scsi_qla_host *vha, qla27xx_read_window(reg, ent->t258.base_addr, ent->t258.pci_offset, ent->t258.reg_count, ent->t258.reg_width, buf, len); - return false; + return qla27xx_next_entry(ent); } -static int +static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t259(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { @@ -299,10 +305,10 @@ qla27xx_fwdt_entry_t259(struct scsi_qla_host *vha, qla27xx_write_reg(reg, ent->t259.banksel_offset, ent->t259.bank, buf); qla27xx_write_reg(reg, ent->t259.pci_offset, ent->t259.write_data, buf); - return false; + return qla27xx_next_entry(ent); } -static int +static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { @@ -313,10 +319,10 @@ qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha, qla27xx_insert32(ent->t260.pci_offset, buf, len); qla27xx_read_reg(reg, ent->t260.pci_offset, buf, len); - return false; + return qla27xx_next_entry(ent); } -static int +static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { @@ -326,10 +332,10 @@ qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha, "%s: wrpci [%lx]\n", __func__, *len); qla27xx_write_reg(reg, ent->t261.pci_offset, ent->t261.write_data, buf); - return false; + return qla27xx_next_entry(ent); } -static int +static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { @@ -362,6 +368,11 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha, ent->t262.start_addr = start; ent->t262.end_addr = end; } + } else if (ent->t262.ram_area == T262_RAM_AREA_MISC) { + if (buf) { + ent->t262.start_addr = start; + ent->t262.end_addr = end; + } } else { ql_dbg(ql_dbg_misc, vha, 0xd022, "%s: unknown area %x\n", __func__, ent->t262.ram_area); @@ -384,10 +395,10 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha, } *len += dwords * sizeof(uint32_t); done: - return false; + return qla27xx_next_entry(ent); } -static int +static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { @@ -450,10 +461,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha, qla27xx_skip_entry(ent, buf); } - return false; + return qla27xx_next_entry(ent); } -static int +static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t264(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { @@ -478,10 +489,10 @@ qla27xx_fwdt_entry_t264(struct scsi_qla_host *vha, qla27xx_skip_entry(ent, buf); } - return false; + return qla27xx_next_entry(ent); } -static int +static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { @@ -492,10 +503,10 @@ qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha, if (buf) qla24xx_pause_risc(reg, vha->hw); - return false; + return qla27xx_next_entry(ent); } -static int +static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { @@ -504,10 +515,10 @@ qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha, if (buf) qla24xx_soft_reset(vha->hw); - return false; + return qla27xx_next_entry(ent); } -static int +static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t267(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { @@ -517,10 +528,10 @@ qla27xx_fwdt_entry_t267(struct scsi_qla_host *vha, "%s: dis intr [%lx]\n", __func__, *len); qla27xx_write_reg(reg, ent->t267.pci_offset, ent->t267.data, buf); - return false; + return qla27xx_next_entry(ent); } -static int +static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { @@ -587,10 +598,10 @@ qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha, break; } - return false; + return qla27xx_next_entry(ent); } -static int +static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t269(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { @@ -604,10 +615,10 @@ qla27xx_fwdt_entry_t269(struct scsi_qla_host *vha, if (buf) ent->t269.scratch_size = 5 * sizeof(uint32_t); - return false; + return qla27xx_next_entry(ent); } -static int +static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { @@ -625,10 +636,10 @@ qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha, addr += sizeof(uint32_t); } - return false; + return qla27xx_next_entry(ent); } -static int +static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { @@ -642,10 +653,10 @@ qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha, qla27xx_write_reg(reg, 0xc4, data, buf); qla27xx_write_reg(reg, 0xc0, addr, buf); - return false; + return qla27xx_next_entry(ent); } -static int +static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t272(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { @@ -662,10 +673,10 @@ qla27xx_fwdt_entry_t272(struct scsi_qla_host *vha, } *len += dwords * sizeof(uint32_t); - return false; + return qla27xx_next_entry(ent); } -static int +static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { @@ -685,10 +696,10 @@ qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha, addr += sizeof(uint32_t); } - return false; + return qla27xx_next_entry(ent); } -static int +static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { @@ -746,10 +757,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha, qla27xx_skip_entry(ent, buf); } - return false; + return qla27xx_next_entry(ent); } -static int +static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t275(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { @@ -763,7 +774,7 @@ qla27xx_fwdt_entry_t275(struct scsi_qla_host *vha, qla27xx_skip_entry(ent, buf); goto done; } - if (offset + ent->t275.length > ent->hdr.entry_size) { + if (offset + ent->t275.length > ent->hdr.size) { ql_dbg(ql_dbg_misc, vha, 0xd030, "%s: buffer overflow\n", __func__); qla27xx_skip_entry(ent, buf); @@ -772,59 +783,103 @@ qla27xx_fwdt_entry_t275(struct scsi_qla_host *vha, qla27xx_insertbuf(ent->t275.buffer, ent->t275.length, buf, len); done: - return false; + return qla27xx_next_entry(ent); } -static int +static struct qla27xx_fwdt_entry * +qla27xx_fwdt_entry_t276(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + uint type = vha->hw->pdev->device >> 4 & 0xf; + uint func = vha->hw->port_no & 0x3; + + ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd214, + "%s: cond [%lx]\n", __func__, *len); + + if (type != ent->t276.cond1 || func != ent->t276.cond2) { + ent = qla27xx_next_entry(ent); + qla27xx_skip_entry(ent, buf); + } + + return qla27xx_next_entry(ent); +} + +static struct qla27xx_fwdt_entry * +qla27xx_fwdt_entry_t277(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); + + ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd215, + "%s: rdpep [%lx]\n", __func__, *len); + qla27xx_insert32(ent->t277.wr_cmd_data, buf, len); + qla27xx_write_reg(reg, ent->t277.cmd_addr, ent->t277.wr_cmd_data, buf); + qla27xx_read_reg(reg, ent->t277.data_addr, buf, len); + + return qla27xx_next_entry(ent); +} + +static struct qla27xx_fwdt_entry * +qla27xx_fwdt_entry_t278(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); + + ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd216, + "%s: wrpep [%lx]\n", __func__, *len); + qla27xx_write_reg(reg, ent->t278.data_addr, ent->t278.wr_data, buf); + qla27xx_write_reg(reg, ent->t278.cmd_addr, ent->t278.wr_cmd_data, buf); + + return qla27xx_next_entry(ent); +} + +static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_other(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { ql_dbg(ql_dbg_misc, vha, 0xd2ff, - "%s: type %x [%lx]\n", __func__, ent->hdr.entry_type, *len); + "%s: type %x [%lx]\n", __func__, ent->hdr.type, *len); qla27xx_skip_entry(ent, buf); - return false; + return qla27xx_next_entry(ent); } -struct qla27xx_fwdt_entry_call { +static struct { uint type; - int (*call)( - struct scsi_qla_host *, - struct qla27xx_fwdt_entry *, - void *, - ulong *); -}; - -static struct qla27xx_fwdt_entry_call ql27xx_fwdt_entry_call_list[] = { - { ENTRY_TYPE_NOP , qla27xx_fwdt_entry_t0 } , - { ENTRY_TYPE_TMP_END , qla27xx_fwdt_entry_t255 } , - { ENTRY_TYPE_RD_IOB_T1 , qla27xx_fwdt_entry_t256 } , - { ENTRY_TYPE_WR_IOB_T1 , qla27xx_fwdt_entry_t257 } , - { ENTRY_TYPE_RD_IOB_T2 , qla27xx_fwdt_entry_t258 } , - { ENTRY_TYPE_WR_IOB_T2 , qla27xx_fwdt_entry_t259 } , - { ENTRY_TYPE_RD_PCI , qla27xx_fwdt_entry_t260 } , - { ENTRY_TYPE_WR_PCI , qla27xx_fwdt_entry_t261 } , - { ENTRY_TYPE_RD_RAM , qla27xx_fwdt_entry_t262 } , - { ENTRY_TYPE_GET_QUEUE , qla27xx_fwdt_entry_t263 } , - { ENTRY_TYPE_GET_FCE , qla27xx_fwdt_entry_t264 } , - { ENTRY_TYPE_PSE_RISC , qla27xx_fwdt_entry_t265 } , - { ENTRY_TYPE_RST_RISC , qla27xx_fwdt_entry_t266 } , - { ENTRY_TYPE_DIS_INTR , qla27xx_fwdt_entry_t267 } , - { ENTRY_TYPE_GET_HBUF , qla27xx_fwdt_entry_t268 } , - { ENTRY_TYPE_SCRATCH , qla27xx_fwdt_entry_t269 } , - { ENTRY_TYPE_RDREMREG , qla27xx_fwdt_entry_t270 } , - { ENTRY_TYPE_WRREMREG , qla27xx_fwdt_entry_t271 } , - { ENTRY_TYPE_RDREMRAM , qla27xx_fwdt_entry_t272 } , - { ENTRY_TYPE_PCICFG , qla27xx_fwdt_entry_t273 } , - { ENTRY_TYPE_GET_SHADOW , qla27xx_fwdt_entry_t274 } , - { ENTRY_TYPE_WRITE_BUF , qla27xx_fwdt_entry_t275 } , - { -1 , qla27xx_fwdt_entry_other } + typeof(qla27xx_fwdt_entry_other)(*call); +} qla27xx_fwdt_entry_call[] = { + { ENTRY_TYPE_NOP, qla27xx_fwdt_entry_t0 }, + { ENTRY_TYPE_TMP_END, qla27xx_fwdt_entry_t255 }, + { ENTRY_TYPE_RD_IOB_T1, qla27xx_fwdt_entry_t256 }, + { ENTRY_TYPE_WR_IOB_T1, qla27xx_fwdt_entry_t257 }, + { ENTRY_TYPE_RD_IOB_T2, qla27xx_fwdt_entry_t258 }, + { ENTRY_TYPE_WR_IOB_T2, qla27xx_fwdt_entry_t259 }, + { ENTRY_TYPE_RD_PCI, qla27xx_fwdt_entry_t260 }, + { ENTRY_TYPE_WR_PCI, qla27xx_fwdt_entry_t261 }, + { ENTRY_TYPE_RD_RAM, qla27xx_fwdt_entry_t262 }, + { ENTRY_TYPE_GET_QUEUE, qla27xx_fwdt_entry_t263 }, + { ENTRY_TYPE_GET_FCE, qla27xx_fwdt_entry_t264 }, + { ENTRY_TYPE_PSE_RISC, qla27xx_fwdt_entry_t265 }, + { ENTRY_TYPE_RST_RISC, qla27xx_fwdt_entry_t266 }, + { ENTRY_TYPE_DIS_INTR, qla27xx_fwdt_entry_t267 }, + { ENTRY_TYPE_GET_HBUF, qla27xx_fwdt_entry_t268 }, + { ENTRY_TYPE_SCRATCH, qla27xx_fwdt_entry_t269 }, + { ENTRY_TYPE_RDREMREG, qla27xx_fwdt_entry_t270 }, + { ENTRY_TYPE_WRREMREG, qla27xx_fwdt_entry_t271 }, + { ENTRY_TYPE_RDREMRAM, qla27xx_fwdt_entry_t272 }, + { ENTRY_TYPE_PCICFG, qla27xx_fwdt_entry_t273 }, + { ENTRY_TYPE_GET_SHADOW, qla27xx_fwdt_entry_t274 }, + { ENTRY_TYPE_WRITE_BUF, qla27xx_fwdt_entry_t275 }, + { ENTRY_TYPE_CONDITIONAL, qla27xx_fwdt_entry_t276 }, + { ENTRY_TYPE_RDPEPREG, qla27xx_fwdt_entry_t277 }, + { ENTRY_TYPE_WRPEPREG, qla27xx_fwdt_entry_t278 }, + { -1, qla27xx_fwdt_entry_other } }; -static inline int (*qla27xx_find_entry(uint type)) - (struct scsi_qla_host *, struct qla27xx_fwdt_entry *, void *, ulong *) +static inline +typeof(qla27xx_fwdt_entry_call->call)(qla27xx_find_entry(uint type)) { - struct qla27xx_fwdt_entry_call *list = ql27xx_fwdt_entry_call_list; + typeof(*qla27xx_fwdt_entry_call) *list = qla27xx_fwdt_entry_call; while (list->type < type) list++; @@ -834,14 +889,6 @@ static inline int (*qla27xx_find_entry(uint type)) return qla27xx_fwdt_entry_other; } -static inline void * -qla27xx_next_entry(void *p) -{ - struct qla27xx_fwdt_entry *ent = p; - - return p + ent->hdr.entry_size; -} - static void qla27xx_walk_template(struct scsi_qla_host *vha, struct qla27xx_fwdt_template *tmp, void *buf, ulong *len) @@ -852,18 +899,16 @@ qla27xx_walk_template(struct scsi_qla_host *vha, ql_dbg(ql_dbg_misc, vha, 0xd01a, "%s: entry count %lx\n", __func__, count); while (count--) { - if (buf && *len >= vha->hw->fw_dump_len) + ent = qla27xx_find_entry(ent->hdr.type)(vha, ent, buf, len); + if (!ent) break; - if (qla27xx_find_entry(ent->hdr.entry_type)(vha, ent, buf, len)) - break; - ent = qla27xx_next_entry(ent); } if (count) ql_dbg(ql_dbg_misc, vha, 0xd018, "%s: entry residual count (%lx)\n", __func__, count); - if (ent->hdr.entry_type != ENTRY_TYPE_TMP_END) + if (ent) ql_dbg(ql_dbg_misc, vha, 0xd019, "%s: missing end entry (%lx)\n", __func__, count); diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h index 141c1c5e73f4..5c2c2a8a19c4 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.h +++ b/drivers/scsi/qla2xxx/qla_tmpl.h @@ -54,6 +54,9 @@ struct __packed qla27xx_fwdt_template { #define ENTRY_TYPE_PCICFG 273 #define ENTRY_TYPE_GET_SHADOW 274 #define ENTRY_TYPE_WRITE_BUF 275 +#define ENTRY_TYPE_CONDITIONAL 276 +#define ENTRY_TYPE_RDPEPREG 277 +#define ENTRY_TYPE_WRPEPREG 278 #define CAPTURE_FLAG_PHYS_ONLY BIT_0 #define CAPTURE_FLAG_PHYS_VIRT BIT_1 @@ -62,8 +65,8 @@ struct __packed qla27xx_fwdt_template { struct __packed qla27xx_fwdt_entry { struct __packed { - uint32_t entry_type; - uint32_t entry_size; + uint32_t type; + uint32_t size; uint32_t reserved_1; uint8_t capture_flags; @@ -199,6 +202,24 @@ struct __packed qla27xx_fwdt_entry { uint32_t length; uint8_t buffer[]; } t275; + + struct __packed { + uint32_t cond1; + uint32_t cond2; + } t276; + + struct __packed { + uint32_t cmd_addr; + uint32_t wr_cmd_data; + uint32_t data_addr; + } t277; + + struct __packed { + uint32_t cmd_addr; + uint32_t wr_cmd_data; + uint32_t data_addr; + uint32_t wr_data; + } t278; }; }; @@ -206,6 +227,7 @@ struct __packed qla27xx_fwdt_entry { #define T262_RAM_AREA_EXTERNAL_RAM 2 #define T262_RAM_AREA_SHARED_RAM 3 #define T262_RAM_AREA_DDR_RAM 4 +#define T262_RAM_AREA_MISC 5 #define T263_QUEUE_TYPE_REQ 1 #define T263_QUEUE_TYPE_RSP 2 diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index ca7945cb959b..0690dac24081 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,7 +7,7 @@ /* * Driver version */ -#define QLA2XXX_VERSION "10.00.00.12-k" +#define QLA2XXX_VERSION "10.00.00.14-k" #define QLA_DRIVER_MAJOR_VER 10 #define QLA_DRIVER_MINOR_VER 0 diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 283e6b80abb5..8a3075d17c63 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -420,26 +420,6 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd) return qlt_rdy_to_xfer(cmd); } -static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd) -{ - unsigned long flags; - /* - * Check for WRITE_PENDING status to determine if we need to wait for - * CTIO aborts to be posted via hardware in tcm_qla2xxx_handle_data(). - */ - spin_lock_irqsave(&se_cmd->t_state_lock, flags); - if (se_cmd->t_state == TRANSPORT_WRITE_PENDING || - se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) { - spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); - wait_for_completion_timeout(&se_cmd->t_transport_stop_comp, - 50); - return 0; - } - spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); - - return 0; -} - static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl) { return; @@ -537,15 +517,6 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work) cmd->qpair->tgt_counters.qla_core_ret_ctio++; if (!cmd->write_data_transferred) { - /* - * Check if se_cmd has already been aborted via LUN_RESET, and - * waiting upon completion in tcm_qla2xxx_write_pending_status() - */ - if (cmd->se_cmd.transport_state & CMD_T_ABORTED) { - complete(&cmd->se_cmd.t_transport_stop_comp); - return; - } - switch (cmd->dif_err_code) { case DIF_ERR_GRD: cmd->se_cmd.pi_err = @@ -1902,7 +1873,6 @@ static const struct target_core_fabric_ops tcm_qla2xxx_ops = { .sess_get_index = tcm_qla2xxx_sess_get_index, .sess_get_initiator_sid = NULL, .write_pending = tcm_qla2xxx_write_pending, - .write_pending_status = tcm_qla2xxx_write_pending_status, .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs, .get_cmd_state = tcm_qla2xxx_get_cmd_state, .queue_data_in = tcm_qla2xxx_queue_data_in, @@ -1943,7 +1913,6 @@ static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { .sess_get_index = tcm_qla2xxx_sess_get_index, .sess_get_initiator_sid = NULL, .write_pending = tcm_qla2xxx_write_pending, - .write_pending_status = tcm_qla2xxx_write_pending_status, .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs, .get_cmd_state = tcm_qla2xxx_get_cmd_state, .queue_data_in = tcm_qla2xxx_queue_data_in, diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index a77bfb224248..16a18d5d856f 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -2875,7 +2875,7 @@ static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess, chap_tbl.secret_len); } } - /* allow fall-through */ + /* fall through */ default: return iscsi_session_get_param(cls_sess, param, buf); } diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index e35ce762d454..0e22512bd3e4 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c @@ -1314,8 +1314,7 @@ static int qpti_sbus_probe(struct platform_device *op) qpti->qhost = host; qpti->op = op; qpti->qpti_id = nqptis; - strcpy(qpti->prom_name, op->dev.of_node->name); - qpti->is_pti = strcmp(qpti->prom_name, "QLGC,isp"); + qpti->is_pti = !of_node_name_eq(op->dev.of_node, "QLGC,isp"); if (qpti_map_regs(qpti) < 0) goto fail_unlink; diff --git a/drivers/scsi/qlogicpti.h b/drivers/scsi/qlogicpti.h index 884ad72ade57..2b6374e08a7d 100644 --- a/drivers/scsi/qlogicpti.h +++ b/drivers/scsi/qlogicpti.h @@ -364,7 +364,6 @@ struct qlogicpti { int qpti_id; int scsi_id; int prom_node; - char prom_name[64]; int irq; char differential, ultra, clock; unsigned char bursts; @@ -379,7 +378,7 @@ struct qlogicpti { #define SREG_IMASK 0x0c /* Interrupt level */ #define SREG_SPMASK 0x03 /* Mask for switch pack */ unsigned char swsreg; - unsigned int + unsigned int gotirq : 1, /* this instance got an irq */ is_pti : 1; /* Non-zero if this is a PTI board. */ }; diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 7675ff0ca2ea..99a7b9f520ae 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -174,22 +174,6 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) } #endif -/** - * scsi_cmd_get_serial - Assign a serial number to a command - * @host: the scsi host - * @cmd: command to assign serial number to - * - * Description: a serial number identifies a request for error recovery - * and debugging purposes. Protected by the Host_Lock of host. - */ -void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd) -{ - cmd->serial_number = host->cmd_serial_number++; - if (cmd->serial_number == 0) - cmd->serial_number = host->cmd_serial_number++; -} -EXPORT_SYMBOL(scsi_cmd_get_serial); - /** * scsi_finish_command - cleanup and pass command back to upper layer * @cmd: the command diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 661512bec3ac..2740a90501a0 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -62,7 +62,7 @@ /* make sure inq_product_rev string corresponds to this version */ #define SDEBUG_VERSION "0188" /* format to fit INQUIRY revision field */ -static const char *sdebug_version_date = "20180128"; +static const char *sdebug_version_date = "20190125"; #define MY_NAME "scsi_debug" @@ -76,6 +76,7 @@ static const char *sdebug_version_date = "20180128"; #define LBA_OUT_OF_RANGE 0x21 #define INVALID_FIELD_IN_CDB 0x24 #define INVALID_FIELD_IN_PARAM_LIST 0x26 +#define WRITE_PROTECTED 0x27 #define UA_RESET_ASC 0x29 #define UA_CHANGED_ASC 0x2a #define TARGET_CHANGED_ASC 0x3f @@ -351,12 +352,11 @@ enum sdeb_opcode_index { SDEB_I_ATA_PT = 22, /* 12, 16 */ SDEB_I_SEND_DIAG = 23, SDEB_I_UNMAP = 24, - SDEB_I_XDWRITEREAD = 25, /* 10 only */ - SDEB_I_WRITE_BUFFER = 26, - SDEB_I_WRITE_SAME = 27, /* 10, 16 */ - SDEB_I_SYNC_CACHE = 28, /* 10, 16 */ - SDEB_I_COMP_WRITE = 29, - SDEB_I_LAST_ELEMENT = 30, /* keep this last (previous + 1) */ + SDEB_I_WRITE_BUFFER = 25, + SDEB_I_WRITE_SAME = 26, /* 10, 16 */ + SDEB_I_SYNC_CACHE = 27, /* 10, 16 */ + SDEB_I_COMP_WRITE = 28, + SDEB_I_LAST_ELEMENT = 29, /* keep this last (previous + 1) */ }; @@ -377,7 +377,7 @@ static const unsigned char opcode_ind_arr[256] = { /* 0x40; 0x40->0x5f: 10 byte cdbs */ 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0, - 0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE, + 0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE, SDEB_I_RELEASE, 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0, /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */ @@ -430,7 +430,6 @@ static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *); -static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *); static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *); @@ -600,9 +599,6 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = { {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */ {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, /* 25 */ - {0, 0x53, 0, F_D_IN | F_D_OUT | FF_MEDIA_IO, resp_xdwriteread_10, - NULL, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, - 0, 0, 0, 0, 0, 0} }, /* XDWRITEREAD(10) */ {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL, {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} }, /* WRITE_BUFFER */ @@ -618,7 +614,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = { {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */ -/* 30 */ +/* 29 */ {0xff, 0, 0, 0, NULL, NULL, /* terminating element */ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, }; @@ -673,6 +669,7 @@ static bool sdebug_verbose; static bool have_dif_prot; static bool write_since_sync; static bool sdebug_statistics = DEF_STATISTICS; +static bool sdebug_wp; static unsigned int sdebug_store_sectors; static sector_t sdebug_capacity; /* in sectors */ @@ -735,7 +732,7 @@ static inline bool scsi_debug_lbp(void) (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10); } -static void *fake_store(unsigned long long lba) +static void *lba2fake_store(unsigned long long lba) { lba = do_div(lba, sdebug_store_sectors); @@ -836,7 +833,8 @@ static void mk_sense_invalid_opcode(struct scsi_cmnd *scp) mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0); } -static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg) +static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd, + void __user *arg) { if (sdebug_verbose) { if (0x1261 == cmd) @@ -1010,16 +1008,16 @@ static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr, int arr_len) { int act_len; - struct scsi_data_buffer *sdb = scsi_in(scp); + struct scsi_data_buffer *sdb = &scp->sdb; if (!sdb->length) return 0; - if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE)) + if (scp->sc_data_direction != DMA_FROM_DEVICE) return DID_ERROR << 16; act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents, arr, arr_len); - sdb->resid = scsi_bufflen(scp) - act_len; + scsi_set_resid(scp, scsi_bufflen(scp) - act_len); return 0; } @@ -1033,20 +1031,21 @@ static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr, int arr_len, unsigned int off_dst) { int act_len, n; - struct scsi_data_buffer *sdb = scsi_in(scp); + struct scsi_data_buffer *sdb = &scp->sdb; off_t skip = off_dst; if (sdb->length <= off_dst) return 0; - if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE)) + if (scp->sc_data_direction != DMA_FROM_DEVICE) return DID_ERROR << 16; act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents, arr, arr_len, skip); pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n", - __func__, off_dst, scsi_bufflen(scp), act_len, sdb->resid); + __func__, off_dst, scsi_bufflen(scp), act_len, + scsi_get_resid(scp)); n = (int)scsi_bufflen(scp) - ((int)off_dst + act_len); - sdb->resid = min(sdb->resid, n); + scsi_set_resid(scp, min(scsi_get_resid(scp), n)); return 0; } @@ -1058,7 +1057,7 @@ static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr, { if (!scsi_bufflen(scp)) return 0; - if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE)) + if (scp->sc_data_direction != DMA_TO_DEVICE) return -1; return scsi_sg_copy_to_buffer(scp, arr, arr_len); @@ -2146,9 +2145,11 @@ static int resp_mode_sense(struct scsi_cmnd *scp, target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) + (devip->target * 1000) - 3; /* for disks set DPOFUA bit and clear write protect (WP) bit */ - if (is_disk) + if (is_disk) { dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */ - else + if (sdebug_wp) + dev_spec |= 0x80; + } else dev_spec = 0x0; if (msense_6) { arr[2] = dev_spec; @@ -2331,6 +2332,10 @@ static int resp_mode_select(struct scsi_cmnd *scp, if (ctrl_m_pg[1] == arr[off + 1]) { memcpy(ctrl_m_pg + 2, arr + off + 2, sizeof(ctrl_m_pg) - 2); + if (ctrl_m_pg[4] & 0x8) + sdebug_wp = true; + else + sdebug_wp = false; sdebug_dsense = !!(ctrl_m_pg[2] & 0x4); goto set_mode_changed_ua; } @@ -2455,8 +2460,8 @@ static int resp_log_sense(struct scsi_cmnd *scp, min(len, SDEBUG_MAX_INQ_ARR_SZ)); } -static int check_device_access_params(struct scsi_cmnd *scp, - unsigned long long lba, unsigned int num) +static inline int check_device_access_params(struct scsi_cmnd *scp, + unsigned long long lba, unsigned int num, bool write) { if (lba + num > sdebug_capacity) { mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0); @@ -2468,6 +2473,10 @@ static int check_device_access_params(struct scsi_cmnd *scp, mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); return check_condition_result; } + if (write && unlikely(sdebug_wp)) { + mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2); + return check_condition_result; + } return 0; } @@ -2477,21 +2486,19 @@ static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba, { int ret; u64 block, rest = 0; - struct scsi_data_buffer *sdb; + struct scsi_data_buffer *sdb = &scmd->sdb; enum dma_data_direction dir; if (do_write) { - sdb = scsi_out(scmd); dir = DMA_TO_DEVICE; write_since_sync = true; } else { - sdb = scsi_in(scmd); dir = DMA_FROM_DEVICE; } if (!sdb->length) return 0; - if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir)) + if (scmd->sc_data_direction != dir) return -1; block = do_div(lba, sdebug_store_sectors); @@ -2514,8 +2521,8 @@ static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba, return ret; } -/* If fake_store(lba,num) compares equal to arr(num), then copy top half of - * arr into fake_store(lba,num) and return true. If comparison fails then +/* If lba2fake_store(lba,num) compares equal to arr(num), then copy top half of + * arr into lba2fake_store(lba,num) and return true. If comparison fails then * return false. */ static bool comp_write_worker(u64 lba, u32 num, const u8 *arr) { @@ -2643,7 +2650,7 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec, if (sdt->app_tag == cpu_to_be16(0xffff)) continue; - ret = dif_verify(sdt, fake_store(sector), sector, ei_lba); + ret = dif_verify(sdt, lba2fake_store(sector), sector, ei_lba); if (ret) { dif_errors++; return ret; @@ -2728,18 +2735,9 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) } else sqcp = NULL; - /* inline check_device_access_params() */ - if (unlikely(lba + num > sdebug_capacity)) { - mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0); - return check_condition_result; - } - /* transfer length excessive (tie in to block limits VPD page) */ - if (unlikely(num > sdebug_store_sectors)) { - /* needs work to find which cdb byte 'num' comes from */ - mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); - return check_condition_result; - } - + ret = check_device_access_params(scp, lba, num, false); + if (ret) + return ret; if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) && (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) && ((lba + num) > sdebug_medium_error_start))) { @@ -2774,7 +2772,7 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) if (unlikely(ret == -1)) return DID_ERROR << 16; - scsi_in(scp)->resid = scsi_bufflen(scp) - ret; + scsi_set_resid(scp, scsi_bufflen(scp) - ret); if (unlikely(sqcp)) { if (sqcp->inj_recovered) { @@ -3031,19 +3029,9 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) sdev_printk(KERN_ERR, scp->device, "Unprotected WR " "to DIF device\n"); } - - /* inline check_device_access_params() */ - if (unlikely(lba + num > sdebug_capacity)) { - mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0); - return check_condition_result; - } - /* transfer length excessive (tie in to block limits VPD page) */ - if (unlikely(num > sdebug_store_sectors)) { - /* needs work to find which cdb byte 'num' comes from */ - mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); - return check_condition_result; - } - + ret = check_device_access_params(scp, lba, num, true); + if (ret) + return ret; write_lock_irqsave(&atomic_rw, iflags); /* DIX + T10 DIF */ @@ -3182,7 +3170,7 @@ static int resp_write_scat(struct scsi_cmnd *scp, my_name, __func__, k, lba, num, sg_off); if (num == 0) continue; - ret = check_device_access_params(scp, lba, num); + ret = check_device_access_params(scp, lba, num, true); if (ret) goto err_out_unlock; num_by = num * lb_size; @@ -3261,12 +3249,14 @@ err_out: static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, u32 ei_lba, bool unmap, bool ndob) { + int ret; unsigned long iflags; unsigned long long i; - int ret; - u64 lba_off; + u32 lb_size = sdebug_sector_size; + u64 block, lbaa; + u8 *fs1p; - ret = check_device_access_params(scp, lba, num); + ret = check_device_access_params(scp, lba, num, true); if (ret) return ret; @@ -3276,31 +3266,30 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, unmap_region(lba, num); goto out; } - - lba_off = lba * sdebug_sector_size; + lbaa = lba; + block = do_div(lbaa, sdebug_store_sectors); /* if ndob then zero 1 logical block, else fetch 1 logical block */ + fs1p = fake_storep + (block * lb_size); if (ndob) { - memset(fake_storep + lba_off, 0, sdebug_sector_size); + memset(fs1p, 0, lb_size); ret = 0; } else - ret = fetch_to_dev_buffer(scp, fake_storep + lba_off, - sdebug_sector_size); + ret = fetch_to_dev_buffer(scp, fs1p, lb_size); if (-1 == ret) { write_unlock_irqrestore(&atomic_rw, iflags); return DID_ERROR << 16; - } else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size)) + } else if (sdebug_verbose && !ndob && (ret < lb_size)) sdev_printk(KERN_INFO, scp->device, "%s: %s: lb size=%u, IO sent=%d bytes\n", - my_name, "write same", - sdebug_sector_size, ret); + my_name, "write same", lb_size, ret); /* Copy first sector to remaining blocks */ - for (i = 1 ; i < num ; i++) - memcpy(fake_storep + ((lba + i) * sdebug_sector_size), - fake_storep + lba_off, - sdebug_sector_size); - + for (i = 1 ; i < num ; i++) { + lbaa = lba + i; + block = do_div(lbaa, sdebug_store_sectors); + memmove(fake_storep + (block * lb_size), fs1p, lb_size); + } if (scsi_debug_lbp()) map_region(lba, num); out: @@ -3439,18 +3428,9 @@ static int resp_comp_write(struct scsi_cmnd *scp, (cmd[1] & 0xe0) == 0) sdev_printk(KERN_ERR, scp->device, "Unprotected WR " "to DIF device\n"); - - /* inline check_device_access_params() */ - if (lba + num > sdebug_capacity) { - mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0); - return check_condition_result; - } - /* transfer length excessive (tie in to block limits VPD page) */ - if (num > sdebug_store_sectors) { - /* needs work to find which cdb byte 'num' comes from */ - mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); - return check_condition_result; - } + ret = check_device_access_params(scp, lba, num, false); + if (ret) + return ret; dnum = 2 * num; arr = kcalloc(lb_size, dnum, GFP_ATOMIC); if (NULL == arr) { @@ -3533,7 +3513,7 @@ static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) unsigned long long lba = get_unaligned_be64(&desc[i].lba); unsigned int num = get_unaligned_be32(&desc[i].blocks); - ret = check_device_access_params(scp, lba, num); + ret = check_device_access_params(scp, lba, num, true); if (ret) goto out; @@ -3566,7 +3546,7 @@ static int resp_get_lba_status(struct scsi_cmnd *scp, if (alloc_len < 24) return 0; - ret = check_device_access_params(scp, lba, 1); + ret = check_device_access_params(scp, lba, 1, false); if (ret) return ret; @@ -3718,68 +3698,6 @@ static int resp_report_luns(struct scsi_cmnd *scp, return res; } -static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba, - unsigned int num, struct sdebug_dev_info *devip) -{ - int j; - unsigned char *kaddr, *buf; - unsigned int offset; - struct scsi_data_buffer *sdb = scsi_in(scp); - struct sg_mapping_iter miter; - - /* better not to use temporary buffer. */ - buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC); - if (!buf) { - mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, - INSUFF_RES_ASCQ); - return check_condition_result; - } - - scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp)); - - offset = 0; - sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents, - SG_MITER_ATOMIC | SG_MITER_TO_SG); - - while (sg_miter_next(&miter)) { - kaddr = miter.addr; - for (j = 0; j < miter.length; j++) - *(kaddr + j) ^= *(buf + offset + j); - - offset += miter.length; - } - sg_miter_stop(&miter); - kfree(buf); - - return 0; -} - -static int resp_xdwriteread_10(struct scsi_cmnd *scp, - struct sdebug_dev_info *devip) -{ - u8 *cmd = scp->cmnd; - u64 lba; - u32 num; - int errsts; - - if (!scsi_bidi_cmnd(scp)) { - mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, - INSUFF_RES_ASCQ); - return check_condition_result; - } - errsts = resp_read_dt0(scp, devip); - if (errsts) - return errsts; - if (!(cmd[1] & 0x4)) { /* DISABLE_WRITE is not set */ - errsts = resp_write_dt0(scp, devip); - if (errsts) - return errsts; - } - lba = get_unaligned_be32(cmd + 2); - num = get_unaligned_be16(cmd + 7); - return resp_xdwriteread(scp, lba, num, devip); -} - static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd) { u32 tag = blk_mq_unique_tag(cmnd->request); @@ -3953,7 +3871,6 @@ static int scsi_debug_slave_alloc(struct scsi_device *sdp) if (sdebug_verbose) pr_info("slave_alloc <%u %u %u %llu>\n", sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); - blk_queue_flag_set(QUEUE_FLAG_BIDI, sdp->request_queue); return 0; } @@ -4553,6 +4470,7 @@ module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR); module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO); module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int, S_IRUGO | S_IWUSR); +module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR); module_param_named(write_same_length, sdebug_write_same_length, int, S_IRUGO | S_IWUSR); @@ -4612,6 +4530,7 @@ MODULE_PARM_DESC(uuid_ctl, "1->use uuid for lu name, 0->don't, 2->all use same (def=0)"); MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)"); MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)"); +MODULE_PARM_DESC(wp, "Write Protect (def=0)"); MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)"); #define SDEBUG_INFO_LEN 256 diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 16eef068e9e9..1b8378f36139 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -965,7 +965,6 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses, ses->cmnd = scmd->cmnd; ses->data_direction = scmd->sc_data_direction; ses->sdb = scmd->sdb; - ses->next_rq = scmd->request->next_rq; ses->result = scmd->result; ses->underflow = scmd->underflow; ses->prot_op = scmd->prot_op; @@ -976,7 +975,6 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses, scmd->cmnd = ses->eh_cmnd; memset(scmd->cmnd, 0, BLK_MAX_CDB); memset(&scmd->sdb, 0, sizeof(scmd->sdb)); - scmd->request->next_rq = NULL; scmd->result = 0; if (sense_bytes) { @@ -1029,7 +1027,6 @@ void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses) scmd->cmnd = ses->cmnd; scmd->sc_data_direction = ses->data_direction; scmd->sdb = ses->sdb; - scmd->request->next_rq = ses->next_rq; scmd->result = ses->result; scmd->underflow = ses->underflow; scmd->prot_op = ses->prot_op; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 6d65ac584eba..20189675677a 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -316,7 +316,6 @@ EXPORT_SYMBOL(__scsi_execute); */ static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) { - cmd->serial_number = 0; scsi_set_resid(cmd, 0); memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); if (cmd->cmd_len == 0) @@ -556,15 +555,8 @@ static void scsi_uninit_cmd(struct scsi_cmnd *cmd) static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd) { - struct scsi_data_buffer *sdb; - if (cmd->sdb.table.nents) sg_free_table_chained(&cmd->sdb.table, true); - if (cmd->request->next_rq) { - sdb = cmd->request->next_rq->special; - if (sdb) - sg_free_table_chained(&sdb->table, true); - } if (scsi_prot_sg_count(cmd)) sg_free_table_chained(&cmd->prot_sdb->table, true); } @@ -578,7 +570,7 @@ static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) /* Returns false when no more bytes to process, true if there are more */ static bool scsi_end_request(struct request *req, blk_status_t error, - unsigned int bytes, unsigned int bidi_bytes) + unsigned int bytes) { struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); struct scsi_device *sdev = cmd->device; @@ -587,11 +579,6 @@ static bool scsi_end_request(struct request *req, blk_status_t error, if (blk_update_request(req, error, bytes)) return true; - /* Bidi request must be completed as a whole */ - if (unlikely(bidi_bytes) && - blk_update_request(req->next_rq, error, bidi_bytes)) - return true; - if (blk_queue_add_random(q)) add_disk_randomness(req->rq_disk); @@ -655,6 +642,7 @@ static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result) set_host_byte(cmd, DID_OK); return BLK_STS_TARGET; case DID_NEXUS_FAILURE: + set_host_byte(cmd, DID_OK); return BLK_STS_NEXUS; case DID_ALLOC_FAILURE: set_host_byte(cmd, DID_OK); @@ -816,7 +804,7 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result) scsi_print_command(cmd); } } - if (!scsi_end_request(req, blk_stat, blk_rq_err_bytes(req), 0)) + if (!scsi_end_request(req, blk_stat, blk_rq_err_bytes(req))) return; /*FALLTHRU*/ case ACTION_REPREP: @@ -950,30 +938,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) * scsi_result_to_blk_status may have reset the host_byte */ scsi_req(req)->result = cmd->result; - scsi_req(req)->resid_len = scsi_get_resid(cmd); - - if (unlikely(scsi_bidi_cmnd(cmd))) { - /* - * Bidi commands Must be complete as a whole, - * both sides at once. - */ - scsi_req(req->next_rq)->resid_len = scsi_in(cmd)->resid; - if (scsi_end_request(req, BLK_STS_OK, blk_rq_bytes(req), - blk_rq_bytes(req->next_rq))) - WARN_ONCE(true, - "Bidi command with remaining bytes"); - return; - } - } - - /* no bidi support yet, other than in pass-through */ - if (unlikely(blk_bidi_rq(req))) { - WARN_ONCE(true, "Only support bidi command in passthrough"); - scmd_printk(KERN_ERR, cmd, "Killing bidi command\n"); - if (scsi_end_request(req, BLK_STS_IOERR, blk_rq_bytes(req), - blk_rq_bytes(req->next_rq))) - WARN_ONCE(true, "Bidi command with remaining bytes"); - return; } /* @@ -990,13 +954,13 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) * to retry code. Fast path should return in this block. */ if (likely(blk_rq_bytes(req) > 0 || blk_stat == BLK_STS_OK)) { - if (likely(!scsi_end_request(req, blk_stat, good_bytes, 0))) + if (likely(!scsi_end_request(req, blk_stat, good_bytes))) return; /* no bytes remaining */ } /* Kill remainder if no retries. */ if (unlikely(blk_stat && scsi_noretry_cmd(cmd))) { - if (scsi_end_request(req, blk_stat, blk_rq_bytes(req), 0)) + if (scsi_end_request(req, blk_stat, blk_rq_bytes(req))) WARN_ONCE(true, "Bytes remaining after failed, no-retry command"); return; @@ -1058,12 +1022,6 @@ blk_status_t scsi_init_io(struct scsi_cmnd *cmd) if (ret) return ret; - if (blk_bidi_rq(rq)) { - ret = scsi_init_sgtable(rq->next_rq, rq->next_rq->special); - if (ret) - goto out_free_sgtables; - } - if (blk_integrity_rq(rq)) { struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; int ivecs, count; @@ -1607,10 +1565,7 @@ static blk_status_t scsi_mq_prep_fn(struct request *req) scsi_init_command(sdev, cmd); - req->special = cmd; - cmd->request = req; - cmd->tag = req->tag; cmd->prot_op = SCSI_PROT_NORMAL; @@ -1624,17 +1579,6 @@ static blk_status_t scsi_mq_prep_fn(struct request *req) (struct scatterlist *)(cmd->prot_sdb + 1); } - if (blk_bidi_rq(req)) { - struct request *next_rq = req->next_rq; - struct scsi_data_buffer *bidi_sdb = blk_mq_rq_to_pdu(next_rq); - - memset(bidi_sdb, 0, sizeof(struct scsi_data_buffer)); - bidi_sdb->table.sgl = - (struct scatterlist *)(bidi_sdb + 1); - - next_rq->special = bidi_sdb; - } - blk_mq_start_request(req); return scsi_setup_cmnd(sdev, req); @@ -1712,13 +1656,13 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, if (!scsi_host_queue_ready(q, shost, sdev)) goto out_dec_target_busy; - clear_bit(SCMD_STATE_COMPLETE, &cmd->state); if (!(req->rq_flags & RQF_DONTPREP)) { ret = scsi_mq_prep_fn(req); if (ret != BLK_STS_OK) goto out_dec_host_busy; req->rq_flags |= RQF_DONTPREP; } else { + clear_bit(SCMD_STATE_COMPLETE, &cmd->state); blk_mq_start_request(req); } @@ -1899,7 +1843,7 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost) shost->tag_set.queue_depth = shost->can_queue; shost->tag_set.cmd_size = cmd_size; shost->tag_set.numa_node = NUMA_NO_NODE; - shost->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; + shost->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; shost->tag_set.flags |= BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy); shost->tag_set.driver_data = shost; @@ -2597,7 +2541,6 @@ void scsi_device_resume(struct scsi_device *sdev) * device deleted during suspend) */ mutex_lock(&sdev->state_mutex); - WARN_ON_ONCE(!sdev->quiesced_by); sdev->quiesced_by = NULL; blk_clear_pm_only(sdev->request_queue); if (sdev->sdev_state == SDEV_QUIESCE) diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index dd0d516f65e2..53380e07b40e 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -220,7 +220,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size, - GFP_ATOMIC); + GFP_KERNEL); if (!sdev) goto out; @@ -788,7 +788,7 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, */ sdev->inquiry = kmemdup(inq_result, max_t(size_t, sdev->inquiry_len, 36), - GFP_ATOMIC); + GFP_KERNEL); if (sdev->inquiry == NULL) return SCSI_SCAN_NO_RESPONSE; @@ -1079,7 +1079,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget, if (!sdev) goto out; - result = kmalloc(result_len, GFP_ATOMIC | + result = kmalloc(result_len, GFP_KERNEL | ((shost->unchecked_isa_dma) ? __GFP_DMA : 0)); if (!result) goto out_free_sdev; diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index 692b46937e52..60f1a81d2034 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -213,7 +213,6 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy) to_sas_host_attrs(shost)->q = q; } - blk_queue_flag_set(QUEUE_FLAG_BIDI, q); return 0; } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index b2da8a00ec33..251db30d0882 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -665,6 +665,68 @@ static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, } #endif /* CONFIG_BLK_SED_OPAL */ +/* + * Look up the DIX operation based on whether the command is read or + * write and whether dix and dif are enabled. + */ +static unsigned int sd_prot_op(bool write, bool dix, bool dif) +{ + /* Lookup table: bit 2 (write), bit 1 (dix), bit 0 (dif) */ + static const unsigned int ops[] = { /* wrt dix dif */ + SCSI_PROT_NORMAL, /* 0 0 0 */ + SCSI_PROT_READ_STRIP, /* 0 0 1 */ + SCSI_PROT_READ_INSERT, /* 0 1 0 */ + SCSI_PROT_READ_PASS, /* 0 1 1 */ + SCSI_PROT_NORMAL, /* 1 0 0 */ + SCSI_PROT_WRITE_INSERT, /* 1 0 1 */ + SCSI_PROT_WRITE_STRIP, /* 1 1 0 */ + SCSI_PROT_WRITE_PASS, /* 1 1 1 */ + }; + + return ops[write << 2 | dix << 1 | dif]; +} + +/* + * Returns a mask of the protection flags that are valid for a given DIX + * operation. + */ +static unsigned int sd_prot_flag_mask(unsigned int prot_op) +{ + static const unsigned int flag_mask[] = { + [SCSI_PROT_NORMAL] = 0, + + [SCSI_PROT_READ_STRIP] = SCSI_PROT_TRANSFER_PI | + SCSI_PROT_GUARD_CHECK | + SCSI_PROT_REF_CHECK | + SCSI_PROT_REF_INCREMENT, + + [SCSI_PROT_READ_INSERT] = SCSI_PROT_REF_INCREMENT | + SCSI_PROT_IP_CHECKSUM, + + [SCSI_PROT_READ_PASS] = SCSI_PROT_TRANSFER_PI | + SCSI_PROT_GUARD_CHECK | + SCSI_PROT_REF_CHECK | + SCSI_PROT_REF_INCREMENT | + SCSI_PROT_IP_CHECKSUM, + + [SCSI_PROT_WRITE_INSERT] = SCSI_PROT_TRANSFER_PI | + SCSI_PROT_REF_INCREMENT, + + [SCSI_PROT_WRITE_STRIP] = SCSI_PROT_GUARD_CHECK | + SCSI_PROT_REF_CHECK | + SCSI_PROT_REF_INCREMENT | + SCSI_PROT_IP_CHECKSUM, + + [SCSI_PROT_WRITE_PASS] = SCSI_PROT_TRANSFER_PI | + SCSI_PROT_GUARD_CHECK | + SCSI_PROT_REF_CHECK | + SCSI_PROT_REF_INCREMENT | + SCSI_PROT_IP_CHECKSUM, + }; + + return flag_mask[prot_op]; +} + static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd, unsigned int dix, unsigned int dif) { @@ -761,8 +823,8 @@ static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd) { struct scsi_device *sdp = cmd->device; struct request *rq = cmd->request; - u64 sector = blk_rq_pos(rq) >> (ilog2(sdp->sector_size) - 9); - u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9); + u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); + u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); unsigned int data_len = 24; char *buf; @@ -781,13 +843,12 @@ static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd) buf = page_address(rq->special_vec.bv_page); put_unaligned_be16(6 + 16, &buf[0]); put_unaligned_be16(16, &buf[2]); - put_unaligned_be64(sector, &buf[8]); - put_unaligned_be32(nr_sectors, &buf[16]); + put_unaligned_be64(lba, &buf[8]); + put_unaligned_be32(nr_blocks, &buf[16]); cmd->allowed = SD_MAX_RETRIES; cmd->transfersize = data_len; rq->timeout = SD_TIMEOUT; - scsi_req(rq)->resid_len = data_len; return scsi_init_io(cmd); } @@ -797,8 +858,8 @@ static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd, { struct scsi_device *sdp = cmd->device; struct request *rq = cmd->request; - u64 sector = blk_rq_pos(rq) >> (ilog2(sdp->sector_size) - 9); - u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9); + u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); + u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); u32 data_len = sdp->sector_size; rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC); @@ -813,13 +874,12 @@ static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd, cmd->cmnd[0] = WRITE_SAME_16; if (unmap) cmd->cmnd[1] = 0x8; /* UNMAP */ - put_unaligned_be64(sector, &cmd->cmnd[2]); - put_unaligned_be32(nr_sectors, &cmd->cmnd[10]); + put_unaligned_be64(lba, &cmd->cmnd[2]); + put_unaligned_be32(nr_blocks, &cmd->cmnd[10]); cmd->allowed = SD_MAX_RETRIES; cmd->transfersize = data_len; rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT; - scsi_req(rq)->resid_len = data_len; return scsi_init_io(cmd); } @@ -829,8 +889,8 @@ static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd, { struct scsi_device *sdp = cmd->device; struct request *rq = cmd->request; - u64 sector = blk_rq_pos(rq) >> (ilog2(sdp->sector_size) - 9); - u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9); + u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); + u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); u32 data_len = sdp->sector_size; rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC); @@ -845,13 +905,12 @@ static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd, cmd->cmnd[0] = WRITE_SAME; if (unmap) cmd->cmnd[1] = 0x8; /* UNMAP */ - put_unaligned_be32(sector, &cmd->cmnd[2]); - put_unaligned_be16(nr_sectors, &cmd->cmnd[7]); + put_unaligned_be32(lba, &cmd->cmnd[2]); + put_unaligned_be16(nr_blocks, &cmd->cmnd[7]); cmd->allowed = SD_MAX_RETRIES; cmd->transfersize = data_len; rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT; - scsi_req(rq)->resid_len = data_len; return scsi_init_io(cmd); } @@ -861,8 +920,8 @@ static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd) struct request *rq = cmd->request; struct scsi_device *sdp = cmd->device; struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); - u64 sector = blk_rq_pos(rq) >> (ilog2(sdp->sector_size) - 9); - u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9); + u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); + u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); if (!(rq->cmd_flags & REQ_NOUNMAP)) { switch (sdkp->zeroing_mode) { @@ -876,7 +935,7 @@ static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd) if (sdp->no_write_same) return BLK_STS_TARGET; - if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff) + if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff) return sd_setup_write_same16_cmnd(cmd, false); return sd_setup_write_same10_cmnd(cmd, false); @@ -957,9 +1016,8 @@ static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd) struct scsi_device *sdp = cmd->device; struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); struct bio *bio = rq->bio; - sector_t sector = blk_rq_pos(rq); - unsigned int nr_sectors = blk_rq_sectors(rq); - unsigned int nr_bytes = blk_rq_bytes(rq); + u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); + u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); blk_status_t ret; if (sdkp->device->no_write_same) @@ -967,21 +1025,18 @@ static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd) BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size); - sector >>= ilog2(sdp->sector_size) - 9; - nr_sectors >>= ilog2(sdp->sector_size) - 9; - rq->timeout = SD_WRITE_SAME_TIMEOUT; - if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff) { + if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff) { cmd->cmd_len = 16; cmd->cmnd[0] = WRITE_SAME_16; - put_unaligned_be64(sector, &cmd->cmnd[2]); - put_unaligned_be32(nr_sectors, &cmd->cmnd[10]); + put_unaligned_be64(lba, &cmd->cmnd[2]); + put_unaligned_be32(nr_blocks, &cmd->cmnd[10]); } else { cmd->cmd_len = 10; cmd->cmnd[0] = WRITE_SAME; - put_unaligned_be32(sector, &cmd->cmnd[2]); - put_unaligned_be16(nr_sectors, &cmd->cmnd[7]); + put_unaligned_be32(lba, &cmd->cmnd[2]); + put_unaligned_be16(nr_blocks, &cmd->cmnd[7]); } cmd->transfersize = sdp->sector_size; @@ -999,7 +1054,7 @@ static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd) */ rq->__data_len = sdp->sector_size; ret = scsi_init_io(cmd); - rq->__data_len = nr_bytes; + rq->__data_len = blk_rq_bytes(rq); return ret; } @@ -1020,224 +1075,186 @@ static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd) return BLK_STS_OK; } -static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt) +static blk_status_t sd_setup_rw32_cmnd(struct scsi_cmnd *cmd, bool write, + sector_t lba, unsigned int nr_blocks, + unsigned char flags) { - struct request *rq = SCpnt->request; - struct scsi_device *sdp = SCpnt->device; - struct gendisk *disk = rq->rq_disk; - struct scsi_disk *sdkp = scsi_disk(disk); - sector_t block = blk_rq_pos(rq); + cmd->cmnd = mempool_alloc(sd_cdb_pool, GFP_ATOMIC); + if (unlikely(cmd->cmnd == NULL)) + return BLK_STS_RESOURCE; + + cmd->cmd_len = SD_EXT_CDB_SIZE; + memset(cmd->cmnd, 0, cmd->cmd_len); + + cmd->cmnd[0] = VARIABLE_LENGTH_CMD; + cmd->cmnd[7] = 0x18; /* Additional CDB len */ + cmd->cmnd[9] = write ? WRITE_32 : READ_32; + cmd->cmnd[10] = flags; + put_unaligned_be64(lba, &cmd->cmnd[12]); + put_unaligned_be32(lba, &cmd->cmnd[20]); /* Expected Indirect LBA */ + put_unaligned_be32(nr_blocks, &cmd->cmnd[28]); + + return BLK_STS_OK; +} + +static blk_status_t sd_setup_rw16_cmnd(struct scsi_cmnd *cmd, bool write, + sector_t lba, unsigned int nr_blocks, + unsigned char flags) +{ + cmd->cmd_len = 16; + cmd->cmnd[0] = write ? WRITE_16 : READ_16; + cmd->cmnd[1] = flags; + cmd->cmnd[14] = 0; + cmd->cmnd[15] = 0; + put_unaligned_be64(lba, &cmd->cmnd[2]); + put_unaligned_be32(nr_blocks, &cmd->cmnd[10]); + + return BLK_STS_OK; +} + +static blk_status_t sd_setup_rw10_cmnd(struct scsi_cmnd *cmd, bool write, + sector_t lba, unsigned int nr_blocks, + unsigned char flags) +{ + cmd->cmd_len = 10; + cmd->cmnd[0] = write ? WRITE_10 : READ_10; + cmd->cmnd[1] = flags; + cmd->cmnd[6] = 0; + cmd->cmnd[9] = 0; + put_unaligned_be32(lba, &cmd->cmnd[2]); + put_unaligned_be16(nr_blocks, &cmd->cmnd[7]); + + return BLK_STS_OK; +} + +static blk_status_t sd_setup_rw6_cmnd(struct scsi_cmnd *cmd, bool write, + sector_t lba, unsigned int nr_blocks, + unsigned char flags) +{ + /* Avoid that 0 blocks gets translated into 256 blocks. */ + if (WARN_ON_ONCE(nr_blocks == 0)) + return BLK_STS_IOERR; + + if (unlikely(flags & 0x8)) { + /* + * This happens only if this drive failed 10byte rw + * command with ILLEGAL_REQUEST during operation and + * thus turned off use_10_for_rw. + */ + scmd_printk(KERN_ERR, cmd, "FUA write on READ/WRITE(6) drive\n"); + return BLK_STS_IOERR; + } + + cmd->cmd_len = 6; + cmd->cmnd[0] = write ? WRITE_6 : READ_6; + cmd->cmnd[1] = (lba >> 16) & 0x1f; + cmd->cmnd[2] = (lba >> 8) & 0xff; + cmd->cmnd[3] = lba & 0xff; + cmd->cmnd[4] = nr_blocks; + cmd->cmnd[5] = 0; + + return BLK_STS_OK; +} + +static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd) +{ + struct request *rq = cmd->request; + struct scsi_device *sdp = cmd->device; + struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); + sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq)); sector_t threshold; - unsigned int this_count = blk_rq_sectors(rq); - unsigned int dif, dix; - unsigned char protect; + unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); + bool dif, dix; + unsigned int mask = logical_to_sectors(sdp, 1) - 1; + bool write = rq_data_dir(rq) == WRITE; + unsigned char protect, fua; blk_status_t ret; - ret = scsi_init_io(SCpnt); + ret = scsi_init_io(cmd); if (ret != BLK_STS_OK) return ret; - WARN_ON_ONCE(SCpnt != rq->special); - SCSI_LOG_HLQUEUE(1, - scmd_printk(KERN_INFO, SCpnt, - "%s: block=%llu, count=%d\n", - __func__, (unsigned long long)block, this_count)); - - if (!sdp || !scsi_device_online(sdp) || - block + blk_rq_sectors(rq) > get_capacity(disk)) { - SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, - "Finishing %u sectors\n", - blk_rq_sectors(rq))); - SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, - "Retry with 0x%p\n", SCpnt)); + if (!scsi_device_online(sdp) || sdp->changed) { + scmd_printk(KERN_ERR, cmd, "device offline or changed\n"); return BLK_STS_IOERR; } - if (sdp->changed) { - /* - * quietly refuse to do anything to a changed disc until - * the changed bit has been reset - */ - /* printk("SCSI disk has been changed or is not present. Prohibiting further I/O.\n"); */ + if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->rq_disk)) { + scmd_printk(KERN_ERR, cmd, "access beyond end of device\n"); + return BLK_STS_IOERR; + } + + if ((blk_rq_pos(rq) & mask) || (blk_rq_sectors(rq) & mask)) { + scmd_printk(KERN_ERR, cmd, "request not aligned to the logical block size\n"); return BLK_STS_IOERR; } /* - * Some SD card readers can't handle multi-sector accesses which touch - * the last one or two hardware sectors. Split accesses as needed. + * Some SD card readers can't handle accesses which touch the + * last one or two logical blocks. Split accesses as needed. */ - threshold = get_capacity(disk) - SD_LAST_BUGGY_SECTORS * - (sdp->sector_size / 512); + threshold = sdkp->capacity - SD_LAST_BUGGY_SECTORS; - if (unlikely(sdp->last_sector_bug && block + this_count > threshold)) { - if (block < threshold) { + if (unlikely(sdp->last_sector_bug && lba + nr_blocks > threshold)) { + if (lba < threshold) { /* Access up to the threshold but not beyond */ - this_count = threshold - block; + nr_blocks = threshold - lba; } else { - /* Access only a single hardware sector */ - this_count = sdp->sector_size / 512; + /* Access only a single logical block */ + nr_blocks = 1; } } - SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, "block=%llu\n", - (unsigned long long)block)); + fua = rq->cmd_flags & REQ_FUA ? 0x8 : 0; + dix = scsi_prot_sg_count(cmd); + dif = scsi_host_dif_capable(cmd->device->host, sdkp->protection_type); - /* - * If we have a 1K hardware sectorsize, prevent access to single - * 512 byte sectors. In theory we could handle this - in fact - * the scsi cdrom driver must be able to handle this because - * we typically use 1K blocksizes, and cdroms typically have - * 2K hardware sectorsizes. Of course, things are simpler - * with the cdrom, since it is read-only. For performance - * reasons, the filesystems should be able to handle this - * and not force the scsi disk driver to use bounce buffers - * for this. - */ - if (sdp->sector_size == 1024) { - if ((block & 1) || (blk_rq_sectors(rq) & 1)) { - scmd_printk(KERN_ERR, SCpnt, - "Bad block number requested\n"); - return BLK_STS_IOERR; - } - block = block >> 1; - this_count = this_count >> 1; - } - if (sdp->sector_size == 2048) { - if ((block & 3) || (blk_rq_sectors(rq) & 3)) { - scmd_printk(KERN_ERR, SCpnt, - "Bad block number requested\n"); - return BLK_STS_IOERR; - } - block = block >> 2; - this_count = this_count >> 2; - } - if (sdp->sector_size == 4096) { - if ((block & 7) || (blk_rq_sectors(rq) & 7)) { - scmd_printk(KERN_ERR, SCpnt, - "Bad block number requested\n"); - return BLK_STS_IOERR; - } - block = block >> 3; - this_count = this_count >> 3; - } - if (rq_data_dir(rq) == WRITE) { - SCpnt->cmnd[0] = WRITE_6; - - if (blk_integrity_rq(rq)) - t10_pi_prepare(SCpnt->request, sdkp->protection_type); - - } else if (rq_data_dir(rq) == READ) { - SCpnt->cmnd[0] = READ_6; - } else { - scmd_printk(KERN_ERR, SCpnt, "Unknown command %d\n", req_op(rq)); - return BLK_STS_IOERR; - } - - SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, - "%s %d/%u 512 byte blocks.\n", - (rq_data_dir(rq) == WRITE) ? - "writing" : "reading", this_count, - blk_rq_sectors(rq))); - - dix = scsi_prot_sg_count(SCpnt); - dif = scsi_host_dif_capable(SCpnt->device->host, sdkp->protection_type); + if (write && dix) + t10_pi_prepare(cmd->request, sdkp->protection_type); if (dif || dix) - protect = sd_setup_protect_cmnd(SCpnt, dix, dif); + protect = sd_setup_protect_cmnd(cmd, dix, dif); else protect = 0; if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) { - SCpnt->cmnd = mempool_alloc(sd_cdb_pool, GFP_ATOMIC); - - if (unlikely(!SCpnt->cmnd)) - return BLK_STS_RESOURCE; - - SCpnt->cmd_len = SD_EXT_CDB_SIZE; - memset(SCpnt->cmnd, 0, SCpnt->cmd_len); - SCpnt->cmnd[0] = VARIABLE_LENGTH_CMD; - SCpnt->cmnd[7] = 0x18; - SCpnt->cmnd[9] = (rq_data_dir(rq) == READ) ? READ_32 : WRITE_32; - SCpnt->cmnd[10] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0); - - /* LBA */ - SCpnt->cmnd[12] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0; - SCpnt->cmnd[13] = sizeof(block) > 4 ? (unsigned char) (block >> 48) & 0xff : 0; - SCpnt->cmnd[14] = sizeof(block) > 4 ? (unsigned char) (block >> 40) & 0xff : 0; - SCpnt->cmnd[15] = sizeof(block) > 4 ? (unsigned char) (block >> 32) & 0xff : 0; - SCpnt->cmnd[16] = (unsigned char) (block >> 24) & 0xff; - SCpnt->cmnd[17] = (unsigned char) (block >> 16) & 0xff; - SCpnt->cmnd[18] = (unsigned char) (block >> 8) & 0xff; - SCpnt->cmnd[19] = (unsigned char) block & 0xff; - - /* Expected Indirect LBA */ - SCpnt->cmnd[20] = (unsigned char) (block >> 24) & 0xff; - SCpnt->cmnd[21] = (unsigned char) (block >> 16) & 0xff; - SCpnt->cmnd[22] = (unsigned char) (block >> 8) & 0xff; - SCpnt->cmnd[23] = (unsigned char) block & 0xff; - - /* Transfer length */ - SCpnt->cmnd[28] = (unsigned char) (this_count >> 24) & 0xff; - SCpnt->cmnd[29] = (unsigned char) (this_count >> 16) & 0xff; - SCpnt->cmnd[30] = (unsigned char) (this_count >> 8) & 0xff; - SCpnt->cmnd[31] = (unsigned char) this_count & 0xff; - } else if (sdp->use_16_for_rw || (this_count > 0xffff)) { - SCpnt->cmnd[0] += READ_16 - READ_6; - SCpnt->cmnd[1] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0); - SCpnt->cmnd[2] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0; - SCpnt->cmnd[3] = sizeof(block) > 4 ? (unsigned char) (block >> 48) & 0xff : 0; - SCpnt->cmnd[4] = sizeof(block) > 4 ? (unsigned char) (block >> 40) & 0xff : 0; - SCpnt->cmnd[5] = sizeof(block) > 4 ? (unsigned char) (block >> 32) & 0xff : 0; - SCpnt->cmnd[6] = (unsigned char) (block >> 24) & 0xff; - SCpnt->cmnd[7] = (unsigned char) (block >> 16) & 0xff; - SCpnt->cmnd[8] = (unsigned char) (block >> 8) & 0xff; - SCpnt->cmnd[9] = (unsigned char) block & 0xff; - SCpnt->cmnd[10] = (unsigned char) (this_count >> 24) & 0xff; - SCpnt->cmnd[11] = (unsigned char) (this_count >> 16) & 0xff; - SCpnt->cmnd[12] = (unsigned char) (this_count >> 8) & 0xff; - SCpnt->cmnd[13] = (unsigned char) this_count & 0xff; - SCpnt->cmnd[14] = SCpnt->cmnd[15] = 0; - } else if ((this_count > 0xff) || (block > 0x1fffff) || - scsi_device_protection(SCpnt->device) || - SCpnt->device->use_10_for_rw) { - SCpnt->cmnd[0] += READ_10 - READ_6; - SCpnt->cmnd[1] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0); - SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff; - SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff; - SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff; - SCpnt->cmnd[5] = (unsigned char) block & 0xff; - SCpnt->cmnd[6] = SCpnt->cmnd[9] = 0; - SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff; - SCpnt->cmnd[8] = (unsigned char) this_count & 0xff; + ret = sd_setup_rw32_cmnd(cmd, write, lba, nr_blocks, + protect | fua); + } else if (sdp->use_16_for_rw || (nr_blocks > 0xffff)) { + ret = sd_setup_rw16_cmnd(cmd, write, lba, nr_blocks, + protect | fua); + } else if ((nr_blocks > 0xff) || (lba > 0x1fffff) || + sdp->use_10_for_rw || protect) { + ret = sd_setup_rw10_cmnd(cmd, write, lba, nr_blocks, + protect | fua); } else { - if (unlikely(rq->cmd_flags & REQ_FUA)) { - /* - * This happens only if this drive failed - * 10byte rw command with ILLEGAL_REQUEST - * during operation and thus turned off - * use_10_for_rw. - */ - scmd_printk(KERN_ERR, SCpnt, - "FUA write on READ/WRITE(6) drive\n"); - return BLK_STS_IOERR; - } - - SCpnt->cmnd[1] |= (unsigned char) ((block >> 16) & 0x1f); - SCpnt->cmnd[2] = (unsigned char) ((block >> 8) & 0xff); - SCpnt->cmnd[3] = (unsigned char) block & 0xff; - SCpnt->cmnd[4] = (unsigned char) this_count; - SCpnt->cmnd[5] = 0; + ret = sd_setup_rw6_cmnd(cmd, write, lba, nr_blocks, + protect | fua); } - SCpnt->sdb.length = this_count * sdp->sector_size; + + if (unlikely(ret != BLK_STS_OK)) + return ret; /* * We shouldn't disconnect in the middle of a sector, so with a dumb * host adapter, it's safe to assume that we can at least transfer * this many bytes between each connect / disconnect. */ - SCpnt->transfersize = sdp->sector_size; - SCpnt->underflow = this_count << 9; - SCpnt->allowed = SD_MAX_RETRIES; + cmd->transfersize = sdp->sector_size; + cmd->underflow = nr_blocks << 9; + cmd->allowed = SD_MAX_RETRIES; + cmd->sdb.length = nr_blocks * sdp->sector_size; + + SCSI_LOG_HLQUEUE(1, + scmd_printk(KERN_INFO, cmd, + "%s: block=%llu, count=%d\n", __func__, + (unsigned long long)blk_rq_pos(rq), + blk_rq_sectors(rq))); + SCSI_LOG_HLQUEUE(2, + scmd_printk(KERN_INFO, cmd, + "%s %d/%u 512 byte blocks.\n", + write ? "writing" : "reading", nr_blocks, + blk_rq_sectors(rq))); /* * This indicates that the command is ready from our end to be @@ -2549,25 +2566,25 @@ sd_print_capacity(struct scsi_disk *sdkp, int sector_size = sdkp->device->sector_size; char cap_str_2[10], cap_str_10[10]; + if (!sdkp->first_scan && old_capacity == sdkp->capacity) + return; + string_get_size(sdkp->capacity, sector_size, STRING_UNITS_2, cap_str_2, sizeof(cap_str_2)); string_get_size(sdkp->capacity, sector_size, - STRING_UNITS_10, cap_str_10, - sizeof(cap_str_10)); + STRING_UNITS_10, cap_str_10, sizeof(cap_str_10)); - if (sdkp->first_scan || old_capacity != sdkp->capacity) { - sd_printk(KERN_NOTICE, sdkp, - "%llu %d-byte logical blocks: (%s/%s)\n", - (unsigned long long)sdkp->capacity, - sector_size, cap_str_10, cap_str_2); + sd_printk(KERN_NOTICE, sdkp, + "%llu %d-byte logical blocks: (%s/%s)\n", + (unsigned long long)sdkp->capacity, + sector_size, cap_str_10, cap_str_2); - if (sdkp->physical_block_size != sector_size) - sd_printk(KERN_NOTICE, sdkp, - "%u-byte physical blocks\n", - sdkp->physical_block_size); + if (sdkp->physical_block_size != sector_size) + sd_printk(KERN_NOTICE, sdkp, + "%u-byte physical blocks\n", + sdkp->physical_block_size); - sd_zbc_print_zones(sdkp); - } + sd_zbc_print_zones(sdkp); } /* called with buffer of length 512 */ @@ -2951,9 +2968,6 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) if (rot == 1) { blk_queue_flag_set(QUEUE_FLAG_NONROT, q); blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); - } else { - blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); - blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q); } if (sdkp->device->type == TYPE_ZBC) { @@ -3050,6 +3064,55 @@ static void sd_read_security(struct scsi_disk *sdkp, unsigned char *buffer) sdkp->security = 1; } +/* + * Determine the device's preferred I/O size for reads and writes + * unless the reported value is unreasonably small, large, not a + * multiple of the physical block size, or simply garbage. + */ +static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp, + unsigned int dev_max) +{ + struct scsi_device *sdp = sdkp->device; + unsigned int opt_xfer_bytes = + logical_to_bytes(sdp, sdkp->opt_xfer_blocks); + + if (sdkp->opt_xfer_blocks > dev_max) { + sd_first_printk(KERN_WARNING, sdkp, + "Optimal transfer size %u logical blocks " \ + "> dev_max (%u logical blocks)\n", + sdkp->opt_xfer_blocks, dev_max); + return false; + } + + if (sdkp->opt_xfer_blocks > SD_DEF_XFER_BLOCKS) { + sd_first_printk(KERN_WARNING, sdkp, + "Optimal transfer size %u logical blocks " \ + "> sd driver limit (%u logical blocks)\n", + sdkp->opt_xfer_blocks, SD_DEF_XFER_BLOCKS); + return false; + } + + if (opt_xfer_bytes < PAGE_SIZE) { + sd_first_printk(KERN_WARNING, sdkp, + "Optimal transfer size %u bytes < " \ + "PAGE_SIZE (%u bytes)\n", + opt_xfer_bytes, (unsigned int)PAGE_SIZE); + return false; + } + + if (opt_xfer_bytes & (sdkp->physical_block_size - 1)) { + sd_first_printk(KERN_WARNING, sdkp, + "Optimal transfer size %u bytes not a " \ + "multiple of physical block size (%u bytes)\n", + opt_xfer_bytes, sdkp->physical_block_size); + return false; + } + + sd_first_printk(KERN_INFO, sdkp, "Optimal transfer size %u bytes\n", + opt_xfer_bytes); + return true; +} + /** * sd_revalidate_disk - called the first time a new disk is seen, * performs disk spin up, read_capacity, etc. @@ -3090,6 +3153,15 @@ static int sd_revalidate_disk(struct gendisk *disk) if (sdkp->media_present) { sd_read_capacity(sdkp, buffer); + /* + * set the default to rotational. All non-rotational devices + * support the block characteristics VPD page, which will + * cause this to be updated correctly and any device which + * doesn't support it should be treated as rotational. + */ + blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); + blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q); + if (scsi_device_supports_vpd(sdp)) { sd_read_block_provisioning(sdkp); sd_read_block_limits(sdkp); @@ -3119,15 +3191,7 @@ static int sd_revalidate_disk(struct gendisk *disk) dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks); q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max); - /* - * Determine the device's preferred I/O size for reads and writes - * unless the reported value is unreasonably small, large, or - * garbage. - */ - if (sdkp->opt_xfer_blocks && - sdkp->opt_xfer_blocks <= dev_max && - sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS && - logical_to_bytes(sdp, sdkp->opt_xfer_blocks) >= PAGE_SIZE) { + if (sd_validate_opt_xfer_size(sdkp, dev_max)) { q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks); rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks); } else diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 7f43e6839bce..5796ace76225 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -132,7 +132,7 @@ static inline struct scsi_disk *scsi_disk(struct gendisk *disk) #define sd_first_printk(prefix, sdsk, fmt, a...) \ do { \ - if ((sdkp)->first_scan) \ + if ((sdsk)->first_scan) \ sd_printk(prefix, sdsk, fmt, ##a); \ } while (0) @@ -188,68 +188,6 @@ static inline sector_t sectors_to_logical(struct scsi_device *sdev, sector_t sec return sector >> (ilog2(sdev->sector_size) - 9); } -/* - * Look up the DIX operation based on whether the command is read or - * write and whether dix and dif are enabled. - */ -static inline unsigned int sd_prot_op(bool write, bool dix, bool dif) -{ - /* Lookup table: bit 2 (write), bit 1 (dix), bit 0 (dif) */ - const unsigned int ops[] = { /* wrt dix dif */ - SCSI_PROT_NORMAL, /* 0 0 0 */ - SCSI_PROT_READ_STRIP, /* 0 0 1 */ - SCSI_PROT_READ_INSERT, /* 0 1 0 */ - SCSI_PROT_READ_PASS, /* 0 1 1 */ - SCSI_PROT_NORMAL, /* 1 0 0 */ - SCSI_PROT_WRITE_INSERT, /* 1 0 1 */ - SCSI_PROT_WRITE_STRIP, /* 1 1 0 */ - SCSI_PROT_WRITE_PASS, /* 1 1 1 */ - }; - - return ops[write << 2 | dix << 1 | dif]; -} - -/* - * Returns a mask of the protection flags that are valid for a given DIX - * operation. - */ -static inline unsigned int sd_prot_flag_mask(unsigned int prot_op) -{ - const unsigned int flag_mask[] = { - [SCSI_PROT_NORMAL] = 0, - - [SCSI_PROT_READ_STRIP] = SCSI_PROT_TRANSFER_PI | - SCSI_PROT_GUARD_CHECK | - SCSI_PROT_REF_CHECK | - SCSI_PROT_REF_INCREMENT, - - [SCSI_PROT_READ_INSERT] = SCSI_PROT_REF_INCREMENT | - SCSI_PROT_IP_CHECKSUM, - - [SCSI_PROT_READ_PASS] = SCSI_PROT_TRANSFER_PI | - SCSI_PROT_GUARD_CHECK | - SCSI_PROT_REF_CHECK | - SCSI_PROT_REF_INCREMENT | - SCSI_PROT_IP_CHECKSUM, - - [SCSI_PROT_WRITE_INSERT] = SCSI_PROT_TRANSFER_PI | - SCSI_PROT_REF_INCREMENT, - - [SCSI_PROT_WRITE_STRIP] = SCSI_PROT_GUARD_CHECK | - SCSI_PROT_REF_CHECK | - SCSI_PROT_REF_INCREMENT | - SCSI_PROT_IP_CHECKSUM, - - [SCSI_PROT_WRITE_PASS] = SCSI_PROT_TRANSFER_PI | - SCSI_PROT_GUARD_CHECK | - SCSI_PROT_REF_CHECK | - SCSI_PROT_REF_INCREMENT | - SCSI_PROT_IP_CHECKSUM, - }; - - return flag_mask[prot_op]; -} - #ifdef CONFIG_BLK_DEV_INTEGRITY extern void sd_dif_config_host(struct scsi_disk *); diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index 83365b29a4d8..a340af797a85 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -142,10 +142,12 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector, return -EOPNOTSUPP; /* - * Get a reply buffer for the number of requested zones plus a header. - * For ATA, buffers must be aligned to 512B. + * Get a reply buffer for the number of requested zones plus a header, + * without exceeding the device maximum command size. For ATA disks, + * buffers must be aligned to 512B. */ - buflen = roundup((nrz + 1) * 64, 512); + buflen = min(queue_max_hw_sectors(disk->queue) << 9, + roundup((nrz + 1) * 64, 512)); buf = kmalloc(buflen, gfp_mask); if (!buf) return -ENOMEM; @@ -462,12 +464,16 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf) sdkp->device->use_10_for_rw = 0; /* - * If something changed, revalidate the disk zone bitmaps once we have - * the capacity, that is on the second revalidate execution during disk - * scan and always during normal revalidate. + * Revalidate the disk zone bitmaps once the block device capacity is + * set on the second revalidate execution during disk scan and if + * something changed when executing a normal revalidate. */ - if (sdkp->first_scan) + if (sdkp->first_scan) { + sdkp->zone_blocks = zone_blocks; + sdkp->nr_zones = nr_zones; return 0; + } + if (sdkp->zone_blocks != zone_blocks || sdkp->nr_zones != nr_zones || disk->queue->nr_zones != nr_zones) { diff --git a/drivers/scsi/smartpqi/Makefile b/drivers/scsi/smartpqi/Makefile index e6b779930230..a03a6edb0060 100644 --- a/drivers/scsi/smartpqi/Makefile +++ b/drivers/scsi/smartpqi/Makefile @@ -1,3 +1,2 @@ -ccflags-y += -I. obj-$(CONFIG_SCSI_SMARTPQI) += smartpqi.o smartpqi-objs := smartpqi_init.o smartpqi_sis.o smartpqi_sas_transport.o diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index f564af8949e8..5d9ccbab7581 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -6043,7 +6043,8 @@ out: return rc; } -static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) +static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd, + void __user *arg) { int rc; struct pqi_ctrl_info *ctrl_info; diff --git a/drivers/scsi/snic/snic_debugfs.c b/drivers/scsi/snic/snic_debugfs.c index 0abe17c1a73b..2b349365592f 100644 --- a/drivers/scsi/snic/snic_debugfs.c +++ b/drivers/scsi/snic/snic_debugfs.c @@ -30,33 +30,13 @@ * fnic directory and statistics directory for trace buffer and * stats logging */ - -int -snic_debugfs_init(void) +void snic_debugfs_init(void) { - int rc = -1; - struct dentry *de = NULL; - - de = debugfs_create_dir("snic", NULL); - if (!de) { - SNIC_DBG("Cannot create debugfs root\n"); - - return rc; - } - snic_glob->trc_root = de; - - de = debugfs_create_dir("statistics", snic_glob->trc_root); - if (!de) { - SNIC_DBG("Cannot create Statistics directory\n"); + snic_glob->trc_root = debugfs_create_dir("snic", NULL); - return rc; - } - snic_glob->stats_root = de; - - rc = 0; - - return rc; -} /* end of snic_debugfs_init */ + snic_glob->stats_root = debugfs_create_dir("statistics", + snic_glob->trc_root); +} /* * snic_debugfs_term - Tear down debugfs intrastructure @@ -391,56 +371,23 @@ static const struct file_operations snic_reset_stats_fops = { * It will create file stats and reset_stats under statistics/host# directory * to log per snic stats */ -int -snic_stats_debugfs_init(struct snic *snic) +void snic_stats_debugfs_init(struct snic *snic) { - int rc = -1; char name[16]; - struct dentry *de = NULL; snprintf(name, sizeof(name), "host%d", snic->shost->host_no); - if (!snic_glob->stats_root) { - SNIC_DBG("snic_stats root doesn't exist\n"); - - return rc; - } - - de = debugfs_create_dir(name, snic_glob->stats_root); - if (!de) { - SNIC_DBG("Cannot create host directory\n"); - - return rc; - } - snic->stats_host = de; - - de = debugfs_create_file("stats", - S_IFREG|S_IRUGO, - snic->stats_host, - snic, - &snic_stats_fops); - if (!de) { - SNIC_DBG("Cannot create host's stats file\n"); - - return rc; - } - snic->stats_file = de; - - de = debugfs_create_file("reset_stats", - S_IFREG|S_IRUGO|S_IWUSR, - snic->stats_host, - snic, - &snic_reset_stats_fops); - if (!de) { - SNIC_DBG("Cannot create host's reset_stats file\n"); + snic->stats_host = debugfs_create_dir(name, snic_glob->stats_root); - return rc; - } - snic->reset_stats_file = de; - rc = 0; + snic->stats_file = debugfs_create_file("stats", S_IFREG|S_IRUGO, + snic->stats_host, snic, + &snic_stats_fops); - return rc; -} /* end of snic_stats_debugfs_init */ + snic->reset_stats_file = debugfs_create_file("reset_stats", + S_IFREG|S_IRUGO|S_IWUSR, + snic->stats_host, snic, + &snic_reset_stats_fops); +} /* * snic_stats_debugfs_remove - Tear down debugfs infrastructure of stats @@ -517,46 +464,18 @@ static const struct file_operations snic_trc_fops = { * snic_trc_debugfs_init : creates trace/tracing_enable files for trace * under debugfs */ -int -snic_trc_debugfs_init(void) +void snic_trc_debugfs_init(void) { - struct dentry *de = NULL; - int ret = -1; - - if (!snic_glob->trc_root) { - SNIC_ERR("Debugfs root directory for snic doesn't exist.\n"); - - return ret; - } - - de = debugfs_create_bool("tracing_enable", - S_IFREG | S_IRUGO | S_IWUSR, - snic_glob->trc_root, - &snic_glob->trc.enable); - - if (!de) { - SNIC_ERR("Can't create trace_enable file.\n"); - - return ret; - } - snic_glob->trc.trc_enable = de; - - de = debugfs_create_file("trace", - S_IFREG | S_IRUGO | S_IWUSR, - snic_glob->trc_root, - NULL, - &snic_trc_fops); - - if (!de) { - SNIC_ERR("Cannot create trace file.\n"); - - return ret; - } - snic_glob->trc.trc_file = de; - ret = 0; - - return ret; -} /* end of snic_trc_debugfs_init */ + snic_glob->trc.trc_enable = debugfs_create_bool("tracing_enable", + S_IFREG | S_IRUGO | S_IWUSR, + snic_glob->trc_root, + &snic_glob->trc.enable); + + snic_glob->trc.trc_file = debugfs_create_file("trace", + S_IFREG | S_IRUGO | S_IWUSR, + snic_glob->trc_root, NULL, + &snic_trc_fops); +} /* * snic_trc_debugfs_term : cleans up the files created for trace under debugfs diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c index 5e824fd6047a..14f4ce665e58 100644 --- a/drivers/scsi/snic/snic_main.c +++ b/drivers/scsi/snic/snic_main.c @@ -397,12 +397,7 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); #ifdef CONFIG_SCSI_SNIC_DEBUG_FS /* Per snic debugfs init */ - ret = snic_stats_debugfs_init(snic); - if (ret) { - SNIC_HOST_ERR(snic->shost, - "Failed to initialize debugfs stats\n"); - snic_stats_debugfs_remove(snic); - } + snic_stats_debugfs_init(snic); #endif /* Setup PCI Resources */ @@ -850,12 +845,7 @@ snic_global_data_init(void) #ifdef CONFIG_SCSI_SNIC_DEBUG_FS /* Debugfs related Initialization */ /* Create debugfs entries for snic */ - ret = snic_debugfs_init(); - if (ret < 0) { - SNIC_ERR("Failed to create sysfs dir for tracing and stats.\n"); - snic_debugfs_term(); - /* continue even if it fails */ - } + snic_debugfs_init(); /* Trace related Initialization */ /* Allocate memory for trace buffer */ diff --git a/drivers/scsi/snic/snic_stats.h b/drivers/scsi/snic/snic_stats.h index fd1066b1cad5..faf0cb601954 100644 --- a/drivers/scsi/snic/snic_stats.h +++ b/drivers/scsi/snic/snic_stats.h @@ -99,7 +99,7 @@ struct snic_stats { atomic64_t io_cmpl_skip; }; -int snic_stats_debugfs_init(struct snic *); +void snic_stats_debugfs_init(struct snic *); void snic_stats_debugfs_remove(struct snic *); /* Auxillary function to update active IO counter */ diff --git a/drivers/scsi/snic/snic_trc.c b/drivers/scsi/snic/snic_trc.c index 458eaba24c78..f23fe2f88438 100644 --- a/drivers/scsi/snic/snic_trc.c +++ b/drivers/scsi/snic/snic_trc.c @@ -138,12 +138,7 @@ snic_trc_init(void) trc->buf = (struct snic_trc_data *) tbuf; spin_lock_init(&trc->lock); - ret = snic_trc_debugfs_init(); - if (ret) { - SNIC_ERR("Failed to create Debugfs Files.\n"); - - goto error; - } + snic_trc_debugfs_init(); trc->max_idx = (tbuf_sz / SNIC_TRC_ENTRY_SZ); trc->rd_idx = trc->wr_idx = 0; @@ -152,11 +147,6 @@ snic_trc_init(void) tbuf_sz / PAGE_SIZE); ret = 0; - return ret; - -error: - snic_trc_free(); - return ret; } /* end of snic_trc_init */ diff --git a/drivers/scsi/snic/snic_trc.h b/drivers/scsi/snic/snic_trc.h index b37f8867bfde..87dcc7457d15 100644 --- a/drivers/scsi/snic/snic_trc.h +++ b/drivers/scsi/snic/snic_trc.h @@ -53,12 +53,12 @@ struct snic_trc { int snic_trc_init(void); void snic_trc_free(void); -int snic_trc_debugfs_init(void); +void snic_trc_debugfs_init(void); void snic_trc_debugfs_term(void); struct snic_trc_data *snic_get_trc_buf(void); int snic_get_trc_data(char *buf, int buf_sz); -int snic_debugfs_init(void); +void snic_debugfs_init(void); void snic_debugfs_term(void); static inline void diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 38ddbbfe5f3c..039c27c2d7b3 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -394,7 +394,6 @@ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt) ret = scsi_init_io(SCpnt); if (ret != BLK_STS_OK) goto out; - WARN_ON_ONCE(SCpnt != rq->special); cd = scsi_cd(rq->rq_disk); /* from here on until we're complete, any goto out diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 7ff22d3f03e3..19c022e66d63 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -169,7 +169,7 @@ static int debugging = DEBUG; /* Remove mode bits and auto-rewind bit (7) */ #define TAPE_NR(x) ( ((iminor(x) & ~255) >> (ST_NBR_MODE_BITS + 1)) | \ - (iminor(x) & ~(-1 << ST_MODE_SHIFT)) ) + (iminor(x) & ((1 << ST_MODE_SHIFT)-1))) #define TAPE_MODE(x) ((iminor(x) & ST_MODE_MASK) >> ST_MODE_SHIFT) /* Construct the minor number from the device (d), mode (m), and non-rewind (n) data */ @@ -337,12 +337,14 @@ static void st_analyze_sense(struct st_request *SRpnt, struct st_cmdstatus *s) switch (sense[0] & 0x7f) { case 0x71: s->deferred = 1; + /* fall through */ case 0x70: s->fixed_format = 1; s->flags = sense[2] & 0xe0; break; case 0x73: s->deferred = 1; + /* fall through */ case 0x72: s->fixed_format = 0; ucp = scsi_sense_desc_find(sense, SCSI_SENSE_BUFFERSIZE, 4); @@ -2721,6 +2723,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon switch (cmd_in) { case MTFSFM: chg_eof = 0; /* Changed from the FSF after this */ + /* fall through */ case MTFSF: cmd[0] = SPACE; cmd[1] = 0x01; /* Space FileMarks */ @@ -2735,6 +2738,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon break; case MTBSFM: chg_eof = 0; /* Changed from the FSF after this */ + /* fall through */ case MTBSF: cmd[0] = SPACE; cmd[1] = 0x01; /* Space FileMarks */ diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index 2ddbb26d9c26..6db37cf306b0 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig @@ -99,7 +99,6 @@ config SCSI_UFS_DWC_TC_PLATFORM config SCSI_UFS_QCOM tristate "QCOM specific hooks to UFS controller platform driver" depends on SCSI_UFSHCD_PLATFORM && ARCH_QCOM - select PHY_QCOM_UFS help This selects the QCOM specific additions to UFSHCD platform driver. UFS host on QCOM needs some vendor specific configuration before diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c index 452e19f8fb47..f2d3df357a97 100644 --- a/drivers/scsi/ufs/ufs-hisi.c +++ b/drivers/scsi/ufs/ufs-hisi.c @@ -66,7 +66,7 @@ static int ufs_hisi_check_hibern8(struct ufs_hba *hba) return err; } -static void ufs_hi3660_clk_init(struct ufs_hba *hba) +static void ufs_hisi_clk_init(struct ufs_hba *hba) { struct ufs_hisi_host *host = ufshcd_get_variant(hba); @@ -80,7 +80,7 @@ static void ufs_hi3660_clk_init(struct ufs_hba *hba) ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL); } -static void ufs_hi3660_soc_init(struct ufs_hba *hba) +static void ufs_hisi_soc_init(struct ufs_hba *hba) { struct ufs_hisi_host *host = ufshcd_get_variant(hba); u32 reg; @@ -139,6 +139,7 @@ static void ufs_hi3660_soc_init(struct ufs_hba *hba) static int ufs_hisi_link_startup_pre_change(struct ufs_hba *hba) { + struct ufs_hisi_host *host = ufshcd_get_variant(hba); int err; uint32_t value; uint32_t reg; @@ -153,6 +154,14 @@ static int ufs_hisi_link_startup_pre_change(struct ufs_hba *hba) ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8121, 0x0), 0x2D); /* MPHY CBOVRCTRL3 */ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8122, 0x0), 0x1); + + if (host->caps & UFS_HISI_CAP_PHY10nm) { + /* MPHY CBOVRCTRL4 */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8127, 0x0), 0x98); + /* MPHY CBOVRCTRL5 */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8128, 0x0), 0x1); + } + /* Unipro VS_MphyCfgUpdt */ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1); /* MPHY RXOVRCTRL4 rx0 */ @@ -173,10 +182,21 @@ static int ufs_hisi_link_startup_pre_change(struct ufs_hba *hba) ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8113, 0x0), 0x1); ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1); - /* Tactive RX */ - ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x4), 0x7); - /* Tactive RX */ - ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x5), 0x7); + if (host->caps & UFS_HISI_CAP_PHY10nm) { + /* RX_Hibern8Time_Capability*/ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0092, 0x4), 0xA); + /* RX_Hibern8Time_Capability*/ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0092, 0x5), 0xA); + /* RX_Min_ActivateTime */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008f, 0x4), 0xA); + /* RX_Min_ActivateTime*/ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008f, 0x5), 0xA); + } else { + /* Tactive RX */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x4), 0x7); + /* Tactive RX */ + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x5), 0x7); + } /* Gear3 Synclength */ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0095, 0x4), 0x4F); @@ -208,7 +228,8 @@ static int ufs_hisi_link_startup_pre_change(struct ufs_hba *hba) if (err) dev_err(hba->dev, "ufs_hisi_check_hibern8 error\n"); - ufshcd_writel(hba, UFS_HCLKDIV_NORMAL_VALUE, UFS_REG_HCLKDIV); + if (!(host->caps & UFS_HISI_CAP_PHY10nm)) + ufshcd_writel(hba, UFS_HCLKDIV_NORMAL_VALUE, UFS_REG_HCLKDIV); /* disable auto H8 */ reg = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER); @@ -253,7 +274,7 @@ static int ufs_hisi_link_startup_post_change(struct ufs_hba *hba) return 0; } -static int ufs_hi3660_link_startup_notify(struct ufs_hba *hba, +static int ufs_hisi_link_startup_notify(struct ufs_hba *hba, enum ufs_notify_change_status status) { int err = 0; @@ -391,6 +412,28 @@ static void ufs_hisi_set_dev_cap(struct ufs_hisi_dev_params *hisi_param) static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba) { + struct ufs_hisi_host *host = ufshcd_get_variant(hba); + + if (host->caps & UFS_HISI_CAP_PHY10nm) { + /* + * Boston platform need to set SaveConfigTime to 0x13, + * and change sync length to maximum value + */ + /* VS_DebugSaveConfigTime */ + ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0xD0A0), 0x13); + /* g1 sync length */ + ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1552), 0x4f); + /* g2 sync length */ + ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1554), 0x4f); + /* g3 sync length */ + ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1556), 0x4f); + /* PA_Hibern8Time */ + ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x15a7), 0xA); + /* PA_Tactivate */ + ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x15a8), 0xA); + ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xd085, 0x0), 0x01); + } + if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME) { pr_info("ufs flash device must set VS_DebugSaveConfigTime 0x10\n"); /* VS_DebugSaveConfigTime */ @@ -429,7 +472,7 @@ static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba) ufshcd_dme_set(hba, UIC_ARG_MIB(0xd046), 32767); } -static int ufs_hi3660_pwr_change_notify(struct ufs_hba *hba, +static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba, enum ufs_notify_change_status status, struct ufs_pa_layer_attr *dev_max_params, struct ufs_pa_layer_attr *dev_req_params) @@ -567,25 +610,72 @@ static int ufs_hi3660_init(struct ufs_hba *hba) return ret; } - ufs_hi3660_clk_init(hba); + ufs_hisi_clk_init(hba); + + ufs_hisi_soc_init(hba); + + return 0; +} + +static int ufs_hi3670_init(struct ufs_hba *hba) +{ + int ret = 0; + struct device *dev = hba->dev; + struct ufs_hisi_host *host; + + ret = ufs_hisi_init_common(hba); + if (ret) { + dev_err(dev, "%s: ufs common init fail\n", __func__); + return ret; + } + + ufs_hisi_clk_init(hba); + + ufs_hisi_soc_init(hba); - ufs_hi3660_soc_init(hba); + /* Add cap for 10nm PHY variant on HI3670 SoC */ + host = ufshcd_get_variant(hba); + host->caps |= UFS_HISI_CAP_PHY10nm; return 0; } -static struct ufs_hba_variant_ops ufs_hba_hisi_vops = { +static struct ufs_hba_variant_ops ufs_hba_hi3660_vops = { .name = "hi3660", .init = ufs_hi3660_init, - .link_startup_notify = ufs_hi3660_link_startup_notify, - .pwr_change_notify = ufs_hi3660_pwr_change_notify, + .link_startup_notify = ufs_hisi_link_startup_notify, + .pwr_change_notify = ufs_hisi_pwr_change_notify, .suspend = ufs_hisi_suspend, .resume = ufs_hisi_resume, }; +static struct ufs_hba_variant_ops ufs_hba_hi3670_vops = { + .name = "hi3670", + .init = ufs_hi3670_init, + .link_startup_notify = ufs_hisi_link_startup_notify, + .pwr_change_notify = ufs_hisi_pwr_change_notify, + .suspend = ufs_hisi_suspend, + .resume = ufs_hisi_resume, +}; + +static const struct of_device_id ufs_hisi_of_match[] = { + { .compatible = "hisilicon,hi3660-ufs", .data = &ufs_hba_hi3660_vops }, + { .compatible = "hisilicon,hi3670-ufs", .data = &ufs_hba_hi3670_vops }, + {}, +}; + +MODULE_DEVICE_TABLE(of, ufs_hisi_of_match); + static int ufs_hisi_probe(struct platform_device *pdev) { - return ufshcd_pltfrm_init(pdev, &ufs_hba_hisi_vops); + const struct of_device_id *of_id; + struct ufs_hba_variant_ops *vops; + struct device *dev = &pdev->dev; + + of_id = of_match_node(ufs_hisi_of_match, dev->of_node); + vops = (struct ufs_hba_variant_ops *)of_id->data; + + return ufshcd_pltfrm_init(pdev, vops); } static int ufs_hisi_remove(struct platform_device *pdev) @@ -596,13 +686,6 @@ static int ufs_hisi_remove(struct platform_device *pdev) return 0; } -static const struct of_device_id ufs_hisi_of_match[] = { - { .compatible = "hisilicon,hi3660-ufs" }, - {}, -}; - -MODULE_DEVICE_TABLE(of, ufs_hisi_of_match); - static const struct dev_pm_ops ufs_hisi_pm_ops = { .suspend = ufshcd_pltfrm_suspend, .resume = ufshcd_pltfrm_resume, diff --git a/drivers/scsi/ufs/ufs-hisi.h b/drivers/scsi/ufs/ufs-hisi.h index 3df9cd7acc29..667dfe39b57e 100644 --- a/drivers/scsi/ufs/ufs-hisi.h +++ b/drivers/scsi/ufs/ufs-hisi.h @@ -91,6 +91,9 @@ enum { #define UFS_HISI_LIMIT_HS_RATE PA_HS_MODE_B #define UFS_HISI_LIMIT_DESIRED_MODE FAST +#define UFS_HISI_CAP_RESERVED BIT(0) +#define UFS_HISI_CAP_PHY10nm BIT(1) + struct ufs_hisi_host { struct ufs_hba *hba; void __iomem *ufs_sys_ctrl; @@ -112,4 +115,5 @@ struct ufs_hisi_host { ufs_sys_ctrl_writel((host), \ ((~(mask)) & (ufs_sys_ctrl_readl((host), (reg)))), \ (reg)) + #endif /* UFS_HISI_H_ */ diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index 6d176815e6ce..21e4ccb5ba6e 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h @@ -514,7 +514,6 @@ struct ufs_vreg { struct regulator *reg; const char *name; bool enabled; - bool unused; int min_uV; int max_uV; int min_uA; diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c index 775bb4e5e36e..869e71f861d6 100644 --- a/drivers/scsi/ufs/ufs_bsg.c +++ b/drivers/scsi/ufs/ufs_bsg.c @@ -27,15 +27,11 @@ static int ufs_bsg_get_query_desc_size(struct ufs_hba *hba, int *desc_len, static int ufs_bsg_verify_query_size(struct ufs_hba *hba, unsigned int request_len, - unsigned int reply_len, - int desc_len, enum query_opcode desc_op) + unsigned int reply_len) { int min_req_len = sizeof(struct ufs_bsg_request); int min_rsp_len = sizeof(struct ufs_bsg_reply); - if (desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) - min_req_len += desc_len; - if (min_req_len > request_len || min_rsp_len > reply_len) { dev_err(hba->dev, "not enough space assigned\n"); return -EINVAL; @@ -44,21 +40,16 @@ static int ufs_bsg_verify_query_size(struct ufs_hba *hba, return 0; } -static int ufs_bsg_verify_query_params(struct ufs_hba *hba, - struct ufs_bsg_request *bsg_request, - unsigned int request_len, - unsigned int reply_len, - uint8_t *desc_buff, int *desc_len, - enum query_opcode desc_op) +static int ufs_bsg_alloc_desc_buffer(struct ufs_hba *hba, struct bsg_job *job, + uint8_t **desc_buff, int *desc_len, + enum query_opcode desc_op) { + struct ufs_bsg_request *bsg_request = job->request; struct utp_upiu_query *qr; + u8 *descp; - if (desc_op == UPIU_QUERY_OPCODE_READ_DESC) { - dev_err(hba->dev, "unsupported opcode %d\n", desc_op); - return -ENOTSUPP; - } - - if (desc_op != UPIU_QUERY_OPCODE_WRITE_DESC) + if (desc_op != UPIU_QUERY_OPCODE_WRITE_DESC && + desc_op != UPIU_QUERY_OPCODE_READ_DESC) goto out; qr = &bsg_request->upiu_req.qr; @@ -67,11 +58,21 @@ static int ufs_bsg_verify_query_params(struct ufs_hba *hba, return -EINVAL; } - if (ufs_bsg_verify_query_size(hba, request_len, reply_len, *desc_len, - desc_op)) + if (*desc_len > job->request_payload.payload_len) { + dev_err(hba->dev, "Illegal desc size\n"); return -EINVAL; + } - desc_buff = (uint8_t *)(bsg_request + 1); + descp = kzalloc(*desc_len, GFP_KERNEL); + if (!descp) + return -ENOMEM; + + if (desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, descp, + *desc_len); + + *desc_buff = descp; out: return 0; @@ -91,7 +92,7 @@ static int ufs_bsg_request(struct bsg_job *job) enum query_opcode desc_op = UPIU_QUERY_OPCODE_NOP; int ret; - ret = ufs_bsg_verify_query_size(hba, req_len, reply_len, 0, desc_op); + ret = ufs_bsg_verify_query_size(hba, req_len, reply_len); if (ret) goto out; @@ -101,9 +102,8 @@ static int ufs_bsg_request(struct bsg_job *job) switch (msgcode) { case UPIU_TRANSACTION_QUERY_REQ: desc_op = bsg_request->upiu_req.qr.opcode; - ret = ufs_bsg_verify_query_params(hba, bsg_request, req_len, - reply_len, desc_buff, - &desc_len, desc_op); + ret = ufs_bsg_alloc_desc_buffer(hba, job, &desc_buff, + &desc_len, desc_op); if (ret) goto out; @@ -135,11 +135,20 @@ static int ufs_bsg_request(struct bsg_job *job) break; } + if (!desc_buff) + goto out; + + if (desc_op == UPIU_QUERY_OPCODE_READ_DESC && desc_len) + bsg_reply->reply_payload_rcv_len = + sg_copy_from_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + desc_buff, desc_len); + + kfree(desc_buff); + out: bsg_reply->result = ret; - job->reply_len = sizeof(struct ufs_bsg_reply) + - bsg_reply->reply_payload_rcv_len; - + job->reply_len = sizeof(struct ufs_bsg_reply); bsg_job_done(job, ret, bsg_reply->reply_payload_rcv_len); return ret; diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h index 5d2dfdb41a6f..a9bbd34d6b16 100644 --- a/drivers/scsi/ufs/ufs_quirks.h +++ b/drivers/scsi/ufs/ufs_quirks.h @@ -44,21 +44,6 @@ struct ufs_dev_fix { .quirk = (_quirk), \ } -/* - * If UFS device is having issue in processing LCC (Line Control - * Command) coming from UFS host controller then enable this quirk. - * When this quirk is enabled, host controller driver should disable - * the LCC transmission on UFS host controller (by clearing - * TX_LCC_ENABLE attribute of host to 0). - */ -#define UFS_DEVICE_QUIRK_BROKEN_LCC (1 << 0) - -/* - * Some UFS devices don't need VCCQ rail for device operations. Enabling this - * quirk for such devices will make sure that VCCQ rail is not voted. - */ -#define UFS_DEVICE_NO_VCCQ (1 << 1) - /* * Some vendor's UFS device sends back to back NACs for the DL data frames * causing the host controller to raise the DFES error status. Sometimes @@ -84,13 +69,6 @@ struct ufs_dev_fix { */ #define UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS (1 << 2) -/* - * Some UFS devices may not work properly after resume if the link was kept - * in off state during suspend. Enabling this quirk will not allow the - * link to be kept in off state during suspend. - */ -#define UFS_DEVICE_QUIRK_NO_LINK_OFF (1 << 3) - /* * Few Toshiba UFS device models advertise RX_MIN_ACTIVATETIME_CAPABILITY as * 600us which may not be enough for reliable hibern8 exit hardware sequence @@ -100,13 +78,6 @@ struct ufs_dev_fix { */ #define UFS_DEVICE_QUIRK_PA_TACTIVATE (1 << 4) -/* - * Some UFS memory devices may have really low read/write throughput in - * FAST AUTO mode, enable this quirk to make sure that FAST AUTO mode is - * never enabled for such devices. - */ -#define UFS_DEVICE_NO_FASTAUTO (1 << 5) - /* * It seems some UFS devices may keep drawing more than sleep current * (atleast for 500us) from UFS rails (especially from VCCQ rail). diff --git a/drivers/scsi/ufs/ufshcd-dwc.c b/drivers/scsi/ufs/ufshcd-dwc.c index 5fd16c72207f..977b21871a5d 100644 --- a/drivers/scsi/ufs/ufshcd-dwc.c +++ b/drivers/scsi/ufs/ufshcd-dwc.c @@ -50,7 +50,7 @@ static void ufshcd_dwc_program_clk_div(struct ufs_hba *hba, u32 divider_val) /** * ufshcd_dwc_link_is_up() * Check if link is up - * @hba: private structure poitner + * @hba: private structure pointer * * Returns 0 on success, non-zero value on failure */ @@ -110,7 +110,7 @@ static int ufshcd_dwc_connection_setup(struct ufs_hba *hba) /** * ufshcd_dwc_link_startup_notify() * UFS Host DWC specific link startup sequence - * @hba: private structure poitner + * @hba: private structure pointer * @status: Callback notify status * * Returns 0 on success, non-zero value on failure diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 2ddf24466a62..e040f9dd9ff3 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -219,11 +219,8 @@ static struct ufs_dev_fix ufs_fixups[] = { /* UFS cards deviations table */ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM), - UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ), UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS), - UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, - UFS_DEVICE_NO_FASTAUTO), UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE), UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL, @@ -232,7 +229,6 @@ static struct ufs_dev_fix ufs_fixups[] = { UFS_DEVICE_QUIRK_PA_TACTIVATE), UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG", UFS_DEVICE_QUIRK_PA_TACTIVATE), - UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ), UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME), UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" /*H28U62301AMR*/, @@ -251,7 +247,6 @@ static int ufshcd_probe_hba(struct ufs_hba *hba); static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, bool skip_ref_clk); static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on); -static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused); static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba); static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba); static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba); @@ -399,15 +394,20 @@ static void ufshcd_print_uic_err_hist(struct ufs_hba *hba, struct ufs_uic_err_reg_hist *err_hist, char *err_name) { int i; + bool found = false; for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) { - int p = (i + err_hist->pos - 1) % UIC_ERR_REG_HIST_LENGTH; + int p = (i + err_hist->pos) % UIC_ERR_REG_HIST_LENGTH; if (err_hist->reg[p] == 0) continue; dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, i, err_hist->reg[p], ktime_to_us(err_hist->tstamp[p])); + found = true; } + + if (!found) + dev_err(hba->dev, "No record of %s uic errors\n", err_name); } static void ufshcd_print_host_regs(struct ufs_hba *hba) @@ -5775,6 +5775,20 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, /* just copy the upiu response as it is */ memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu)); + if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) { + u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu); + u16 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) & + MASK_QUERY_DATA_SEG_LEN; + + if (*buff_len >= resp_len) { + memcpy(desc_buff, descp, resp_len); + *buff_len = resp_len; + } else { + dev_warn(hba->dev, "rsp size is bigger than buffer"); + *buff_len = 0; + err = -EINVAL; + } + } ufshcd_put_dev_cmd_tag(hba, tag); wake_up(&hba->dev_cmd.tag_wq); @@ -5810,11 +5824,6 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, int ocs_value; u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC; - if (desc_buff && desc_op != UPIU_QUERY_OPCODE_WRITE_DESC) { - err = -ENOTSUPP; - goto out; - } - switch (msgcode) { case UPIU_TRANSACTION_NOP_OUT: cmd_type = DEV_CMD_TYPE_NOP; @@ -5855,7 +5864,6 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, break; } -out: return err; } @@ -6825,11 +6833,6 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) ufs_fixup_device_setup(hba, &card); ufshcd_tune_unipro_params(hba); - ret = ufshcd_set_vccq_rail_unused(hba, - (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false); - if (ret) - goto out; - /* UFS device is also active now */ ufshcd_set_ufs_dev_active(hba); ufshcd_force_reset_auto_bkops(hba); @@ -7013,24 +7016,13 @@ static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg, static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba, struct ufs_vreg *vreg) { - if (!vreg) - return 0; - else if (vreg->unused) - return 0; - else - return ufshcd_config_vreg_load(hba->dev, vreg, - UFS_VREG_LPM_LOAD_UA); + return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA); } static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, struct ufs_vreg *vreg) { - if (!vreg) - return 0; - else if (vreg->unused) - return 0; - else - return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); + return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); } static int ufshcd_config_vreg(struct device *dev, @@ -7068,9 +7060,7 @@ static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg) { int ret = 0; - if (!vreg) - goto out; - else if (vreg->enabled || vreg->unused) + if (!vreg || vreg->enabled) goto out; ret = ufshcd_config_vreg(dev, vreg, true); @@ -7090,9 +7080,7 @@ static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg) { int ret = 0; - if (!vreg) - goto out; - else if (!vreg->enabled || vreg->unused) + if (!vreg || !vreg->enabled) goto out; ret = regulator_disable(vreg->reg); @@ -7198,36 +7186,6 @@ static int ufshcd_init_hba_vreg(struct ufs_hba *hba) return 0; } -static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused) -{ - int ret = 0; - struct ufs_vreg_info *info = &hba->vreg_info; - - if (!info) - goto out; - else if (!info->vccq) - goto out; - - if (unused) { - /* shut off the rail here */ - ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false); - /* - * Mark this rail as no longer used, so it doesn't get enabled - * later by mistake - */ - if (!ret) - info->vccq->unused = true; - } else { - /* - * rail should have been already enabled hence just make sure - * that unused flag is cleared. - */ - info->vccq->unused = false; - } -out: - return ret; -} - static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, bool skip_ref_clk) { diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 772b976e4ee4..1a6f150cd2d8 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -100,16 +100,8 @@ static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev) static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid) { - if (!resid) - return; - - if (!scsi_bidi_cmnd(sc)) { + if (resid) scsi_set_resid(sc, resid); - return; - } - - scsi_in(sc)->resid = min(resid, scsi_in(sc)->length); - scsi_out(sc)->resid = resid - scsi_in(sc)->resid; } /** @@ -403,9 +395,9 @@ static int virtscsi_add_cmd(struct virtqueue *vq, if (sc && sc->sc_data_direction != DMA_NONE) { if (sc->sc_data_direction != DMA_FROM_DEVICE) - out = &scsi_out(sc)->table; + out = &sc->sdb.table; if (sc->sc_data_direction != DMA_TO_DEVICE) - in = &scsi_in(sc)->table; + in = &sc->sdb.table; } /* Request header. */ diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c index 55eda5863a6b..b2f07d2043eb 100644 --- a/drivers/slimbus/core.c +++ b/drivers/slimbus/core.c @@ -21,7 +21,9 @@ static const struct slim_device_id *slim_match(const struct slim_device_id *id, { while (id->manf_id != 0 || id->prod_code != 0) { if (id->manf_id == sbdev->e_addr.manf_id && - id->prod_code == sbdev->e_addr.prod_code) + id->prod_code == sbdev->e_addr.prod_code && + id->dev_index == sbdev->e_addr.dev_index && + id->instance == sbdev->e_addr.instance) return id; id++; } @@ -40,6 +42,23 @@ static int slim_device_match(struct device *dev, struct device_driver *drv) return !!slim_match(sbdrv->id_table, sbdev); } +static void slim_device_update_status(struct slim_device *sbdev, + enum slim_device_status status) +{ + struct slim_driver *sbdrv; + + if (sbdev->status == status) + return; + + sbdev->status = status; + if (!sbdev->dev.driver) + return; + + sbdrv = to_slim_driver(sbdev->dev.driver); + if (sbdrv->device_status) + sbdrv->device_status(sbdev, sbdev->status); +} + static int slim_device_probe(struct device *dev) { struct slim_device *sbdev = to_slim_device(dev); @@ -53,8 +72,7 @@ static int slim_device_probe(struct device *dev) /* try getting the logical address after probe */ ret = slim_get_logical_addr(sbdev); if (!ret) { - if (sbdrv->device_status) - sbdrv->device_status(sbdev, sbdev->status); + slim_device_update_status(sbdev, SLIM_DEVICE_STATUS_UP); } else { dev_err(&sbdev->dev, "Failed to get logical address\n"); ret = -EPROBE_DEFER; @@ -256,6 +274,7 @@ int slim_register_controller(struct slim_controller *ctrl) mutex_init(&ctrl->lock); mutex_init(&ctrl->sched.m_reconf); init_completion(&ctrl->sched.pause_comp); + spin_lock_init(&ctrl->txn_lock); dev_dbg(ctrl->dev, "Bus [%s] registered:dev:%p\n", ctrl->name, ctrl->dev); @@ -295,23 +314,6 @@ int slim_unregister_controller(struct slim_controller *ctrl) } EXPORT_SYMBOL_GPL(slim_unregister_controller); -static void slim_device_update_status(struct slim_device *sbdev, - enum slim_device_status status) -{ - struct slim_driver *sbdrv; - - if (sbdev->status == status) - return; - - sbdev->status = status; - if (!sbdev->dev.driver) - return; - - sbdrv = to_slim_driver(sbdev->dev.driver); - if (sbdrv->device_status) - sbdrv->device_status(sbdev, sbdev->status); -} - /** * slim_report_absent() - Controller calls this function when a device * reports absent, OR when the device cannot be communicated with @@ -464,6 +466,7 @@ static int slim_device_alloc_laddr(struct slim_device *sbdev, sbdev->laddr = laddr; sbdev->is_laddr_valid = true; + mutex_unlock(&ctrl->lock); slim_device_update_status(sbdev, SLIM_DEVICE_STATUS_UP); @@ -471,6 +474,8 @@ static int slim_device_alloc_laddr(struct slim_device *sbdev, laddr, sbdev->e_addr.manf_id, sbdev->e_addr.prod_code, sbdev->e_addr.dev_index, sbdev->e_addr.instance); + return 0; + err: mutex_unlock(&ctrl->lock); return ret; diff --git a/drivers/soc/amlogic/meson-canvas.c b/drivers/soc/amlogic/meson-canvas.c index fce33ca76bb6..be95a37c3fec 100644 --- a/drivers/soc/amlogic/meson-canvas.c +++ b/drivers/soc/amlogic/meson-canvas.c @@ -51,16 +51,30 @@ struct meson_canvas *meson_canvas_get(struct device *dev) { struct device_node *canvas_node; struct platform_device *canvas_pdev; + struct meson_canvas *canvas; canvas_node = of_parse_phandle(dev->of_node, "amlogic,canvas", 0); if (!canvas_node) return ERR_PTR(-ENODEV); canvas_pdev = of_find_device_by_node(canvas_node); - if (!canvas_pdev) + if (!canvas_pdev) { + of_node_put(canvas_node); return ERR_PTR(-EPROBE_DEFER); + } + + of_node_put(canvas_node); + + /* + * If priv is NULL, it's probably because the canvas hasn't + * properly initialized. Bail out with -EINVAL because, in the + * current state, this driver probe cannot return -EPROBE_DEFER + */ + canvas = dev_get_drvdata(&canvas_pdev->dev); + if (!canvas) + return ERR_PTR(-EINVAL); - return dev_get_drvdata(&canvas_pdev->dev); + return canvas; } EXPORT_SYMBOL_GPL(meson_canvas_get); diff --git a/drivers/soc/amlogic/meson-clk-measure.c b/drivers/soc/amlogic/meson-clk-measure.c index daea191a66fa..19d4cbc93a17 100644 --- a/drivers/soc/amlogic/meson-clk-measure.c +++ b/drivers/soc/amlogic/meson-clk-measure.c @@ -165,6 +165,194 @@ static struct meson_msr_id clk_msr_gx[CLK_MSR_MAX] = { CLK_MSR_ID(82, "ge2d"), }; +static struct meson_msr_id clk_msr_axg[CLK_MSR_MAX] = { + CLK_MSR_ID(0, "ring_osc_out_ee_0"), + CLK_MSR_ID(1, "ring_osc_out_ee_1"), + CLK_MSR_ID(2, "ring_osc_out_ee_2"), + CLK_MSR_ID(3, "a53_ring_osc"), + CLK_MSR_ID(4, "gp0_pll"), + CLK_MSR_ID(5, "gp1_pll"), + CLK_MSR_ID(7, "clk81"), + CLK_MSR_ID(9, "encl"), + CLK_MSR_ID(17, "sys_pll_div16"), + CLK_MSR_ID(18, "sys_cpu_div16"), + CLK_MSR_ID(20, "rtc_osc_out"), + CLK_MSR_ID(23, "mmc_clk"), + CLK_MSR_ID(28, "sar_adc"), + CLK_MSR_ID(31, "mpll_test_out"), + CLK_MSR_ID(40, "mod_eth_tx_clk"), + CLK_MSR_ID(41, "mod_eth_rx_clk_rmii"), + CLK_MSR_ID(42, "mp0_out"), + CLK_MSR_ID(43, "fclk_div5"), + CLK_MSR_ID(44, "pwm_b"), + CLK_MSR_ID(45, "pwm_a"), + CLK_MSR_ID(46, "vpu"), + CLK_MSR_ID(47, "ddr_dpll_pt"), + CLK_MSR_ID(48, "mp1_out"), + CLK_MSR_ID(49, "mp2_out"), + CLK_MSR_ID(50, "mp3_out"), + CLK_MSR_ID(51, "sd_emmm_c"), + CLK_MSR_ID(52, "sd_emmc_b"), + CLK_MSR_ID(61, "gpio_msr"), + CLK_MSR_ID(66, "audio_slv_lrclk_c"), + CLK_MSR_ID(67, "audio_slv_lrclk_b"), + CLK_MSR_ID(68, "audio_slv_lrclk_a"), + CLK_MSR_ID(69, "audio_slv_sclk_c"), + CLK_MSR_ID(70, "audio_slv_sclk_b"), + CLK_MSR_ID(71, "audio_slv_sclk_a"), + CLK_MSR_ID(72, "pwm_d"), + CLK_MSR_ID(73, "pwm_c"), + CLK_MSR_ID(74, "wifi_beacon"), + CLK_MSR_ID(75, "tdmin_lb_lrcl"), + CLK_MSR_ID(76, "tdmin_lb_sclk"), + CLK_MSR_ID(77, "rng_ring_osc_0"), + CLK_MSR_ID(78, "rng_ring_osc_1"), + CLK_MSR_ID(79, "rng_ring_osc_2"), + CLK_MSR_ID(80, "rng_ring_osc_3"), + CLK_MSR_ID(81, "vapb"), + CLK_MSR_ID(82, "ge2d"), + CLK_MSR_ID(84, "audio_resample"), + CLK_MSR_ID(85, "audio_pdm_sys"), + CLK_MSR_ID(86, "audio_spdifout"), + CLK_MSR_ID(87, "audio_spdifin"), + CLK_MSR_ID(88, "audio_lrclk_f"), + CLK_MSR_ID(89, "audio_lrclk_e"), + CLK_MSR_ID(90, "audio_lrclk_d"), + CLK_MSR_ID(91, "audio_lrclk_c"), + CLK_MSR_ID(92, "audio_lrclk_b"), + CLK_MSR_ID(93, "audio_lrclk_a"), + CLK_MSR_ID(94, "audio_sclk_f"), + CLK_MSR_ID(95, "audio_sclk_e"), + CLK_MSR_ID(96, "audio_sclk_d"), + CLK_MSR_ID(97, "audio_sclk_c"), + CLK_MSR_ID(98, "audio_sclk_b"), + CLK_MSR_ID(99, "audio_sclk_a"), + CLK_MSR_ID(100, "audio_mclk_f"), + CLK_MSR_ID(101, "audio_mclk_e"), + CLK_MSR_ID(102, "audio_mclk_d"), + CLK_MSR_ID(103, "audio_mclk_c"), + CLK_MSR_ID(104, "audio_mclk_b"), + CLK_MSR_ID(105, "audio_mclk_a"), + CLK_MSR_ID(106, "pcie_refclk_n"), + CLK_MSR_ID(107, "pcie_refclk_p"), + CLK_MSR_ID(108, "audio_locker_out"), + CLK_MSR_ID(109, "audio_locker_in"), +}; + +static struct meson_msr_id clk_msr_g12a[CLK_MSR_MAX] = { + CLK_MSR_ID(0, "ring_osc_out_ee_0"), + CLK_MSR_ID(1, "ring_osc_out_ee_1"), + CLK_MSR_ID(2, "ring_osc_out_ee_2"), + CLK_MSR_ID(3, "sys_cpu_ring_osc"), + CLK_MSR_ID(4, "gp0_pll"), + CLK_MSR_ID(6, "enci"), + CLK_MSR_ID(7, "clk81"), + CLK_MSR_ID(8, "encp"), + CLK_MSR_ID(9, "encl"), + CLK_MSR_ID(10, "vdac"), + CLK_MSR_ID(11, "eth_tx"), + CLK_MSR_ID(12, "hifi_pll"), + CLK_MSR_ID(13, "mod_tcon"), + CLK_MSR_ID(14, "fec_0"), + CLK_MSR_ID(15, "fec_1"), + CLK_MSR_ID(16, "fec_2"), + CLK_MSR_ID(17, "sys_pll_div16"), + CLK_MSR_ID(18, "sys_cpu_div16"), + CLK_MSR_ID(19, "lcd_an_ph2"), + CLK_MSR_ID(20, "rtc_osc_out"), + CLK_MSR_ID(21, "lcd_an_ph3"), + CLK_MSR_ID(22, "eth_phy_ref"), + CLK_MSR_ID(23, "mpll_50m"), + CLK_MSR_ID(24, "eth_125m"), + CLK_MSR_ID(25, "eth_rmii"), + CLK_MSR_ID(26, "sc_int"), + CLK_MSR_ID(27, "in_mac"), + CLK_MSR_ID(28, "sar_adc"), + CLK_MSR_ID(29, "pcie_inp"), + CLK_MSR_ID(30, "pcie_inn"), + CLK_MSR_ID(31, "mpll_test_out"), + CLK_MSR_ID(32, "vdec"), + CLK_MSR_ID(33, "sys_cpu_ring_osc_1"), + CLK_MSR_ID(34, "eth_mpll_50m"), + CLK_MSR_ID(35, "mali"), + CLK_MSR_ID(36, "hdmi_tx_pixel"), + CLK_MSR_ID(37, "cdac"), + CLK_MSR_ID(38, "vdin_meas"), + CLK_MSR_ID(39, "bt656"), + CLK_MSR_ID(41, "eth_rx_or_rmii"), + CLK_MSR_ID(42, "mp0_out"), + CLK_MSR_ID(43, "fclk_div5"), + CLK_MSR_ID(44, "pwm_b"), + CLK_MSR_ID(45, "pwm_a"), + CLK_MSR_ID(46, "vpu"), + CLK_MSR_ID(47, "ddr_dpll_pt"), + CLK_MSR_ID(48, "mp1_out"), + CLK_MSR_ID(49, "mp2_out"), + CLK_MSR_ID(50, "mp3_out"), + CLK_MSR_ID(51, "sd_emmc_c"), + CLK_MSR_ID(52, "sd_emmc_b"), + CLK_MSR_ID(53, "sd_emmc_a"), + CLK_MSR_ID(54, "vpu_clkc"), + CLK_MSR_ID(55, "vid_pll_div_out"), + CLK_MSR_ID(56, "wave420l_a"), + CLK_MSR_ID(57, "wave420l_c"), + CLK_MSR_ID(58, "wave420l_b"), + CLK_MSR_ID(59, "hcodec"), + CLK_MSR_ID(61, "gpio_msr"), + CLK_MSR_ID(62, "hevcb"), + CLK_MSR_ID(63, "dsi_meas"), + CLK_MSR_ID(64, "spicc_1"), + CLK_MSR_ID(65, "spicc_0"), + CLK_MSR_ID(66, "vid_lock"), + CLK_MSR_ID(67, "dsi_phy"), + CLK_MSR_ID(68, "hdcp22_esm"), + CLK_MSR_ID(69, "hdcp22_skp"), + CLK_MSR_ID(70, "pwm_f"), + CLK_MSR_ID(71, "pwm_e"), + CLK_MSR_ID(72, "pwm_d"), + CLK_MSR_ID(73, "pwm_c"), + CLK_MSR_ID(75, "hevcf"), + CLK_MSR_ID(77, "rng_ring_osc_0"), + CLK_MSR_ID(78, "rng_ring_osc_1"), + CLK_MSR_ID(79, "rng_ring_osc_2"), + CLK_MSR_ID(80, "rng_ring_osc_3"), + CLK_MSR_ID(81, "vapb"), + CLK_MSR_ID(82, "ge2d"), + CLK_MSR_ID(83, "co_rx"), + CLK_MSR_ID(84, "co_tx"), + CLK_MSR_ID(89, "hdmi_todig"), + CLK_MSR_ID(90, "hdmitx_sys"), + CLK_MSR_ID(94, "eth_phy_rx"), + CLK_MSR_ID(95, "eth_phy_pll"), + CLK_MSR_ID(96, "vpu_b"), + CLK_MSR_ID(97, "cpu_b_tmp"), + CLK_MSR_ID(98, "ts"), + CLK_MSR_ID(99, "ring_osc_out_ee_3"), + CLK_MSR_ID(100, "ring_osc_out_ee_4"), + CLK_MSR_ID(101, "ring_osc_out_ee_5"), + CLK_MSR_ID(102, "ring_osc_out_ee_6"), + CLK_MSR_ID(103, "ring_osc_out_ee_7"), + CLK_MSR_ID(104, "ring_osc_out_ee_8"), + CLK_MSR_ID(105, "ring_osc_out_ee_9"), + CLK_MSR_ID(106, "ephy_test"), + CLK_MSR_ID(107, "au_dac_g128x"), + CLK_MSR_ID(108, "audio_locker_out"), + CLK_MSR_ID(109, "audio_locker_in"), + CLK_MSR_ID(110, "audio_tdmout_c_sclk"), + CLK_MSR_ID(111, "audio_tdmout_b_sclk"), + CLK_MSR_ID(112, "audio_tdmout_a_sclk"), + CLK_MSR_ID(113, "audio_tdmin_lb_sclk"), + CLK_MSR_ID(114, "audio_tdmin_c_sclk"), + CLK_MSR_ID(115, "audio_tdmin_b_sclk"), + CLK_MSR_ID(116, "audio_tdmin_a_sclk"), + CLK_MSR_ID(117, "audio_resample"), + CLK_MSR_ID(118, "audio_pdm_sys"), + CLK_MSR_ID(119, "audio_spdifout_b"), + CLK_MSR_ID(120, "audio_spdifout"), + CLK_MSR_ID(121, "audio_spdifin"), + CLK_MSR_ID(122, "audio_pdm_dclk"), +}; + static int meson_measure_id(struct meson_msr_id *clk_msr_id, unsigned int duration) { @@ -337,6 +525,14 @@ static const struct of_device_id meson_msr_match_table[] = { .compatible = "amlogic,meson8b-clk-measure", .data = (void *)clk_msr_m8, }, + { + .compatible = "amlogic,meson-axg-clk-measure", + .data = (void *)clk_msr_axg, + }, + { + .compatible = "amlogic,meson-g12a-clk-measure", + .data = (void *)clk_msr_g12a, + }, { /* sentinel */ } }; diff --git a/drivers/soc/bcm/Kconfig b/drivers/soc/bcm/Kconfig index 055a845ed979..03fa91fbe2da 100644 --- a/drivers/soc/bcm/Kconfig +++ b/drivers/soc/bcm/Kconfig @@ -1,5 +1,17 @@ menu "Broadcom SoC drivers" +config BCM2835_POWER + bool "BCM2835 power domain driver" + depends on ARCH_BCM2835 || (COMPILE_TEST && OF) + default y if ARCH_BCM2835 + select PM_GENERIC_DOMAINS if PM + select RESET_CONTROLLER + help + This enables support for the BCM2835 power domains and reset + controller. Any usage of power domains by the Raspberry Pi + firmware means that Linux usage of the same power domain + must be accessed using the RASPBERRYPI_POWER driver + config RASPBERRYPI_POWER bool "Raspberry Pi power domain driver" depends on ARCH_BCM2835 || (COMPILE_TEST && OF) diff --git a/drivers/soc/bcm/Makefile b/drivers/soc/bcm/Makefile index dc4fced72d21..c81df4b2403c 100644 --- a/drivers/soc/bcm/Makefile +++ b/drivers/soc/bcm/Makefile @@ -1,2 +1,3 @@ +obj-$(CONFIG_BCM2835_POWER) += bcm2835-power.o obj-$(CONFIG_RASPBERRYPI_POWER) += raspberrypi-power.o obj-$(CONFIG_SOC_BRCMSTB) += brcmstb/ diff --git a/drivers/soc/bcm/bcm2835-power.c b/drivers/soc/bcm/bcm2835-power.c new file mode 100644 index 000000000000..9351349cf0a9 --- /dev/null +++ b/drivers/soc/bcm/bcm2835-power.c @@ -0,0 +1,661 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Power domain driver for Broadcom BCM2835 + * + * Copyright (C) 2018 Broadcom + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PM_GNRIC 0x00 +#define PM_AUDIO 0x04 +#define PM_STATUS 0x18 +#define PM_RSTC 0x1c +#define PM_RSTS 0x20 +#define PM_WDOG 0x24 +#define PM_PADS0 0x28 +#define PM_PADS2 0x2c +#define PM_PADS3 0x30 +#define PM_PADS4 0x34 +#define PM_PADS5 0x38 +#define PM_PADS6 0x3c +#define PM_CAM0 0x44 +#define PM_CAM0_LDOHPEN BIT(2) +#define PM_CAM0_LDOLPEN BIT(1) +#define PM_CAM0_CTRLEN BIT(0) + +#define PM_CAM1 0x48 +#define PM_CAM1_LDOHPEN BIT(2) +#define PM_CAM1_LDOLPEN BIT(1) +#define PM_CAM1_CTRLEN BIT(0) + +#define PM_CCP2TX 0x4c +#define PM_CCP2TX_LDOEN BIT(1) +#define PM_CCP2TX_CTRLEN BIT(0) + +#define PM_DSI0 0x50 +#define PM_DSI0_LDOHPEN BIT(2) +#define PM_DSI0_LDOLPEN BIT(1) +#define PM_DSI0_CTRLEN BIT(0) + +#define PM_DSI1 0x54 +#define PM_DSI1_LDOHPEN BIT(2) +#define PM_DSI1_LDOLPEN BIT(1) +#define PM_DSI1_CTRLEN BIT(0) + +#define PM_HDMI 0x58 +#define PM_HDMI_RSTDR BIT(19) +#define PM_HDMI_LDOPD BIT(1) +#define PM_HDMI_CTRLEN BIT(0) + +#define PM_USB 0x5c +/* The power gates must be enabled with this bit before enabling the LDO in the + * USB block. + */ +#define PM_USB_CTRLEN BIT(0) + +#define PM_PXLDO 0x60 +#define PM_PXBG 0x64 +#define PM_DFT 0x68 +#define PM_SMPS 0x6c +#define PM_XOSC 0x70 +#define PM_SPAREW 0x74 +#define PM_SPARER 0x78 +#define PM_AVS_RSTDR 0x7c +#define PM_AVS_STAT 0x80 +#define PM_AVS_EVENT 0x84 +#define PM_AVS_INTEN 0x88 +#define PM_DUMMY 0xfc + +#define PM_IMAGE 0x108 +#define PM_GRAFX 0x10c +#define PM_PROC 0x110 +#define PM_ENAB BIT(12) +#define PM_ISPRSTN BIT(8) +#define PM_H264RSTN BIT(7) +#define PM_PERIRSTN BIT(6) +#define PM_V3DRSTN BIT(6) +#define PM_ISFUNC BIT(5) +#define PM_MRDONE BIT(4) +#define PM_MEMREP BIT(3) +#define PM_ISPOW BIT(2) +#define PM_POWOK BIT(1) +#define PM_POWUP BIT(0) +#define PM_INRUSH_SHIFT 13 +#define PM_INRUSH_3_5_MA 0 +#define PM_INRUSH_5_MA 1 +#define PM_INRUSH_10_MA 2 +#define PM_INRUSH_20_MA 3 +#define PM_INRUSH_MASK (3 << PM_INRUSH_SHIFT) + +#define PM_PASSWORD 0x5a000000 + +#define PM_WDOG_TIME_SET 0x000fffff +#define PM_RSTC_WRCFG_CLR 0xffffffcf +#define PM_RSTS_HADWRH_SET 0x00000040 +#define PM_RSTC_WRCFG_SET 0x00000030 +#define PM_RSTC_WRCFG_FULL_RESET 0x00000020 +#define PM_RSTC_RESET 0x00000102 + +#define PM_READ(reg) readl(power->base + (reg)) +#define PM_WRITE(reg, val) writel(PM_PASSWORD | (val), power->base + (reg)) + +#define ASB_BRDG_VERSION 0x00 +#define ASB_CPR_CTRL 0x04 + +#define ASB_V3D_S_CTRL 0x08 +#define ASB_V3D_M_CTRL 0x0c +#define ASB_ISP_S_CTRL 0x10 +#define ASB_ISP_M_CTRL 0x14 +#define ASB_H264_S_CTRL 0x18 +#define ASB_H264_M_CTRL 0x1c + +#define ASB_REQ_STOP BIT(0) +#define ASB_ACK BIT(1) +#define ASB_EMPTY BIT(2) +#define ASB_FULL BIT(3) + +#define ASB_AXI_BRDG_ID 0x20 + +#define ASB_READ(reg) readl(power->asb + (reg)) +#define ASB_WRITE(reg, val) writel(PM_PASSWORD | (val), power->asb + (reg)) + +struct bcm2835_power_domain { + struct generic_pm_domain base; + struct bcm2835_power *power; + u32 domain; + struct clk *clk; +}; + +struct bcm2835_power { + struct device *dev; + /* PM registers. */ + void __iomem *base; + /* AXI Async bridge registers. */ + void __iomem *asb; + + struct genpd_onecell_data pd_xlate; + struct bcm2835_power_domain domains[BCM2835_POWER_DOMAIN_COUNT]; + struct reset_controller_dev reset; +}; + +static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg) +{ + u64 start = ktime_get_ns(); + + /* Enable the module's async AXI bridges. */ + ASB_WRITE(reg, ASB_READ(reg) & ~ASB_REQ_STOP); + while (ASB_READ(reg) & ASB_ACK) { + cpu_relax(); + if (ktime_get_ns() - start >= 1000) + return -ETIMEDOUT; + } + + return 0; +} + +static int bcm2835_asb_disable(struct bcm2835_power *power, u32 reg) +{ + u64 start = ktime_get_ns(); + + /* Enable the module's async AXI bridges. */ + ASB_WRITE(reg, ASB_READ(reg) | ASB_REQ_STOP); + while (!(ASB_READ(reg) & ASB_ACK)) { + cpu_relax(); + if (ktime_get_ns() - start >= 1000) + return -ETIMEDOUT; + } + + return 0; +} + +static int bcm2835_power_power_off(struct bcm2835_power_domain *pd, u32 pm_reg) +{ + struct bcm2835_power *power = pd->power; + + /* Enable functional isolation */ + PM_WRITE(pm_reg, PM_READ(pm_reg) & ~PM_ISFUNC); + + /* Enable electrical isolation */ + PM_WRITE(pm_reg, PM_READ(pm_reg) & ~PM_ISPOW); + + /* Open the power switches. */ + PM_WRITE(pm_reg, PM_READ(pm_reg) & ~PM_POWUP); + + return 0; +} + +static int bcm2835_power_power_on(struct bcm2835_power_domain *pd, u32 pm_reg) +{ + struct bcm2835_power *power = pd->power; + struct device *dev = power->dev; + u64 start; + int ret; + int inrush; + bool powok; + + /* If it was already powered on by the fw, leave it that way. */ + if (PM_READ(pm_reg) & PM_POWUP) + return 0; + + /* Enable power. Allowing too much current at once may result + * in POWOK never getting set, so start low and ramp it up as + * necessary to succeed. + */ + powok = false; + for (inrush = PM_INRUSH_3_5_MA; inrush <= PM_INRUSH_20_MA; inrush++) { + PM_WRITE(pm_reg, + (PM_READ(pm_reg) & ~PM_INRUSH_MASK) | + (inrush << PM_INRUSH_SHIFT) | + PM_POWUP); + + start = ktime_get_ns(); + while (!(powok = !!(PM_READ(pm_reg) & PM_POWOK))) { + cpu_relax(); + if (ktime_get_ns() - start >= 3000) + break; + } + } + if (!powok) { + dev_err(dev, "Timeout waiting for %s power OK\n", + pd->base.name); + ret = -ETIMEDOUT; + goto err_disable_powup; + } + + /* Disable electrical isolation */ + PM_WRITE(pm_reg, PM_READ(pm_reg) | PM_ISPOW); + + /* Repair memory */ + PM_WRITE(pm_reg, PM_READ(pm_reg) | PM_MEMREP); + start = ktime_get_ns(); + while (!(PM_READ(pm_reg) & PM_MRDONE)) { + cpu_relax(); + if (ktime_get_ns() - start >= 1000) { + dev_err(dev, "Timeout waiting for %s memory repair\n", + pd->base.name); + ret = -ETIMEDOUT; + goto err_disable_ispow; + } + } + + /* Disable functional isolation */ + PM_WRITE(pm_reg, PM_READ(pm_reg) | PM_ISFUNC); + + return 0; + +err_disable_ispow: + PM_WRITE(pm_reg, PM_READ(pm_reg) & ~PM_ISPOW); +err_disable_powup: + PM_WRITE(pm_reg, PM_READ(pm_reg) & ~(PM_POWUP | PM_INRUSH_MASK)); + return ret; +} + +static int bcm2835_asb_power_on(struct bcm2835_power_domain *pd, + u32 pm_reg, + u32 asb_m_reg, + u32 asb_s_reg, + u32 reset_flags) +{ + struct bcm2835_power *power = pd->power; + int ret; + + ret = clk_prepare_enable(pd->clk); + if (ret) { + dev_err(power->dev, "Failed to enable clock for %s\n", + pd->base.name); + return ret; + } + + /* Wait 32 clocks for reset to propagate, 1 us will be enough */ + udelay(1); + + clk_disable_unprepare(pd->clk); + + /* Deassert the resets. */ + PM_WRITE(pm_reg, PM_READ(pm_reg) | reset_flags); + + ret = clk_prepare_enable(pd->clk); + if (ret) { + dev_err(power->dev, "Failed to enable clock for %s\n", + pd->base.name); + goto err_enable_resets; + } + + ret = bcm2835_asb_enable(power, asb_m_reg); + if (ret) { + dev_err(power->dev, "Failed to enable ASB master for %s\n", + pd->base.name); + goto err_disable_clk; + } + ret = bcm2835_asb_enable(power, asb_s_reg); + if (ret) { + dev_err(power->dev, "Failed to enable ASB slave for %s\n", + pd->base.name); + goto err_disable_asb_master; + } + + return 0; + +err_disable_asb_master: + bcm2835_asb_disable(power, asb_m_reg); +err_disable_clk: + clk_disable_unprepare(pd->clk); +err_enable_resets: + PM_WRITE(pm_reg, PM_READ(pm_reg) & ~reset_flags); + return ret; +} + +static int bcm2835_asb_power_off(struct bcm2835_power_domain *pd, + u32 pm_reg, + u32 asb_m_reg, + u32 asb_s_reg, + u32 reset_flags) +{ + struct bcm2835_power *power = pd->power; + int ret; + + ret = bcm2835_asb_disable(power, asb_s_reg); + if (ret) { + dev_warn(power->dev, "Failed to disable ASB slave for %s\n", + pd->base.name); + return ret; + } + ret = bcm2835_asb_disable(power, asb_m_reg); + if (ret) { + dev_warn(power->dev, "Failed to disable ASB master for %s\n", + pd->base.name); + bcm2835_asb_enable(power, asb_s_reg); + return ret; + } + + clk_disable_unprepare(pd->clk); + + /* Assert the resets. */ + PM_WRITE(pm_reg, PM_READ(pm_reg) & ~reset_flags); + + return 0; +} + +static int bcm2835_power_pd_power_on(struct generic_pm_domain *domain) +{ + struct bcm2835_power_domain *pd = + container_of(domain, struct bcm2835_power_domain, base); + struct bcm2835_power *power = pd->power; + + switch (pd->domain) { + case BCM2835_POWER_DOMAIN_GRAFX: + return bcm2835_power_power_on(pd, PM_GRAFX); + + case BCM2835_POWER_DOMAIN_GRAFX_V3D: + return bcm2835_asb_power_on(pd, PM_GRAFX, + ASB_V3D_M_CTRL, ASB_V3D_S_CTRL, + PM_V3DRSTN); + + case BCM2835_POWER_DOMAIN_IMAGE: + return bcm2835_power_power_on(pd, PM_IMAGE); + + case BCM2835_POWER_DOMAIN_IMAGE_PERI: + return bcm2835_asb_power_on(pd, PM_IMAGE, + 0, 0, + PM_PERIRSTN); + + case BCM2835_POWER_DOMAIN_IMAGE_ISP: + return bcm2835_asb_power_on(pd, PM_IMAGE, + ASB_ISP_M_CTRL, ASB_ISP_S_CTRL, + PM_ISPRSTN); + + case BCM2835_POWER_DOMAIN_IMAGE_H264: + return bcm2835_asb_power_on(pd, PM_IMAGE, + ASB_H264_M_CTRL, ASB_H264_S_CTRL, + PM_H264RSTN); + + case BCM2835_POWER_DOMAIN_USB: + PM_WRITE(PM_USB, PM_USB_CTRLEN); + return 0; + + case BCM2835_POWER_DOMAIN_DSI0: + PM_WRITE(PM_DSI0, PM_DSI0_CTRLEN); + PM_WRITE(PM_DSI0, PM_DSI0_CTRLEN | PM_DSI0_LDOHPEN); + return 0; + + case BCM2835_POWER_DOMAIN_DSI1: + PM_WRITE(PM_DSI1, PM_DSI1_CTRLEN); + PM_WRITE(PM_DSI1, PM_DSI1_CTRLEN | PM_DSI1_LDOHPEN); + return 0; + + case BCM2835_POWER_DOMAIN_CCP2TX: + PM_WRITE(PM_CCP2TX, PM_CCP2TX_CTRLEN); + PM_WRITE(PM_CCP2TX, PM_CCP2TX_CTRLEN | PM_CCP2TX_LDOEN); + return 0; + + case BCM2835_POWER_DOMAIN_HDMI: + PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) | PM_HDMI_RSTDR); + PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) | PM_HDMI_CTRLEN); + PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) & ~PM_HDMI_LDOPD); + usleep_range(100, 200); + PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) & ~PM_HDMI_RSTDR); + return 0; + + default: + dev_err(power->dev, "Invalid domain %d\n", pd->domain); + return -EINVAL; + } +} + +static int bcm2835_power_pd_power_off(struct generic_pm_domain *domain) +{ + struct bcm2835_power_domain *pd = + container_of(domain, struct bcm2835_power_domain, base); + struct bcm2835_power *power = pd->power; + + switch (pd->domain) { + case BCM2835_POWER_DOMAIN_GRAFX: + return bcm2835_power_power_off(pd, PM_GRAFX); + + case BCM2835_POWER_DOMAIN_GRAFX_V3D: + return bcm2835_asb_power_off(pd, PM_GRAFX, + ASB_V3D_M_CTRL, ASB_V3D_S_CTRL, + PM_V3DRSTN); + + case BCM2835_POWER_DOMAIN_IMAGE: + return bcm2835_power_power_off(pd, PM_IMAGE); + + case BCM2835_POWER_DOMAIN_IMAGE_PERI: + return bcm2835_asb_power_off(pd, PM_IMAGE, + 0, 0, + PM_PERIRSTN); + + case BCM2835_POWER_DOMAIN_IMAGE_ISP: + return bcm2835_asb_power_off(pd, PM_IMAGE, + ASB_ISP_M_CTRL, ASB_ISP_S_CTRL, + PM_ISPRSTN); + + case BCM2835_POWER_DOMAIN_IMAGE_H264: + return bcm2835_asb_power_off(pd, PM_IMAGE, + ASB_H264_M_CTRL, ASB_H264_S_CTRL, + PM_H264RSTN); + + case BCM2835_POWER_DOMAIN_USB: + PM_WRITE(PM_USB, 0); + return 0; + + case BCM2835_POWER_DOMAIN_DSI0: + PM_WRITE(PM_DSI0, PM_DSI0_CTRLEN); + PM_WRITE(PM_DSI0, 0); + return 0; + + case BCM2835_POWER_DOMAIN_DSI1: + PM_WRITE(PM_DSI1, PM_DSI1_CTRLEN); + PM_WRITE(PM_DSI1, 0); + return 0; + + case BCM2835_POWER_DOMAIN_CCP2TX: + PM_WRITE(PM_CCP2TX, PM_CCP2TX_CTRLEN); + PM_WRITE(PM_CCP2TX, 0); + return 0; + + case BCM2835_POWER_DOMAIN_HDMI: + PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) | PM_HDMI_LDOPD); + PM_WRITE(PM_HDMI, PM_READ(PM_HDMI) & ~PM_HDMI_CTRLEN); + return 0; + + default: + dev_err(power->dev, "Invalid domain %d\n", pd->domain); + return -EINVAL; + } +} + +static void +bcm2835_init_power_domain(struct bcm2835_power *power, + int pd_xlate_index, const char *name) +{ + struct device *dev = power->dev; + struct bcm2835_power_domain *dom = &power->domains[pd_xlate_index]; + + dom->clk = devm_clk_get(dev->parent, name); + + dom->base.name = name; + dom->base.power_on = bcm2835_power_pd_power_on; + dom->base.power_off = bcm2835_power_pd_power_off; + + dom->domain = pd_xlate_index; + dom->power = power; + + /* XXX: on/off at boot? */ + pm_genpd_init(&dom->base, NULL, true); + + power->pd_xlate.domains[pd_xlate_index] = &dom->base; +} + +/** bcm2835_reset_reset - Resets a block that has a reset line in the + * PM block. + * + * The consumer of the reset controller must have the power domain up + * -- there's no reset ability with the power domain down. To reset + * the sub-block, we just disable its access to memory through the + * ASB, reset, and re-enable. + */ +static int bcm2835_reset_reset(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct bcm2835_power *power = container_of(rcdev, struct bcm2835_power, + reset); + struct bcm2835_power_domain *pd; + int ret; + + switch (id) { + case BCM2835_RESET_V3D: + pd = &power->domains[BCM2835_POWER_DOMAIN_GRAFX_V3D]; + break; + case BCM2835_RESET_H264: + pd = &power->domains[BCM2835_POWER_DOMAIN_IMAGE_H264]; + break; + case BCM2835_RESET_ISP: + pd = &power->domains[BCM2835_POWER_DOMAIN_IMAGE_ISP]; + break; + default: + dev_err(power->dev, "Bad reset id %ld\n", id); + return -EINVAL; + } + + ret = bcm2835_power_pd_power_off(&pd->base); + if (ret) + return ret; + + return bcm2835_power_pd_power_on(&pd->base); +} + +static int bcm2835_reset_status(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct bcm2835_power *power = container_of(rcdev, struct bcm2835_power, + reset); + + switch (id) { + case BCM2835_RESET_V3D: + return !PM_READ(PM_GRAFX & PM_V3DRSTN); + case BCM2835_RESET_H264: + return !PM_READ(PM_IMAGE & PM_H264RSTN); + case BCM2835_RESET_ISP: + return !PM_READ(PM_IMAGE & PM_ISPRSTN); + default: + return -EINVAL; + } +} + +static const struct reset_control_ops bcm2835_reset_ops = { + .reset = bcm2835_reset_reset, + .status = bcm2835_reset_status, +}; + +static const char *const power_domain_names[] = { + [BCM2835_POWER_DOMAIN_GRAFX] = "grafx", + [BCM2835_POWER_DOMAIN_GRAFX_V3D] = "v3d", + + [BCM2835_POWER_DOMAIN_IMAGE] = "image", + [BCM2835_POWER_DOMAIN_IMAGE_PERI] = "peri_image", + [BCM2835_POWER_DOMAIN_IMAGE_H264] = "h264", + [BCM2835_POWER_DOMAIN_IMAGE_ISP] = "isp", + + [BCM2835_POWER_DOMAIN_USB] = "usb", + [BCM2835_POWER_DOMAIN_DSI0] = "dsi0", + [BCM2835_POWER_DOMAIN_DSI1] = "dsi1", + [BCM2835_POWER_DOMAIN_CAM0] = "cam0", + [BCM2835_POWER_DOMAIN_CAM1] = "cam1", + [BCM2835_POWER_DOMAIN_CCP2TX] = "ccp2tx", + [BCM2835_POWER_DOMAIN_HDMI] = "hdmi", +}; + +static int bcm2835_power_probe(struct platform_device *pdev) +{ + struct bcm2835_pm *pm = dev_get_drvdata(pdev->dev.parent); + struct device *dev = &pdev->dev; + struct bcm2835_power *power; + static const struct { + int parent, child; + } domain_deps[] = { + { BCM2835_POWER_DOMAIN_GRAFX, BCM2835_POWER_DOMAIN_GRAFX_V3D }, + { BCM2835_POWER_DOMAIN_IMAGE, BCM2835_POWER_DOMAIN_IMAGE_PERI }, + { BCM2835_POWER_DOMAIN_IMAGE, BCM2835_POWER_DOMAIN_IMAGE_H264 }, + { BCM2835_POWER_DOMAIN_IMAGE, BCM2835_POWER_DOMAIN_IMAGE_ISP }, + { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_USB }, + { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM0 }, + { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM1 }, + }; + int ret, i; + u32 id; + + power = devm_kzalloc(dev, sizeof(*power), GFP_KERNEL); + if (!power) + return -ENOMEM; + platform_set_drvdata(pdev, power); + + power->dev = dev; + power->base = pm->base; + power->asb = pm->asb; + + id = ASB_READ(ASB_AXI_BRDG_ID); + if (id != 0x62726467 /* "BRDG" */) { + dev_err(dev, "ASB register ID returned 0x%08x\n", id); + return -ENODEV; + } + + power->pd_xlate.domains = devm_kcalloc(dev, + ARRAY_SIZE(power_domain_names), + sizeof(*power->pd_xlate.domains), + GFP_KERNEL); + if (!power->pd_xlate.domains) + return -ENOMEM; + + power->pd_xlate.num_domains = ARRAY_SIZE(power_domain_names); + + for (i = 0; i < ARRAY_SIZE(power_domain_names); i++) + bcm2835_init_power_domain(power, i, power_domain_names[i]); + + for (i = 0; i < ARRAY_SIZE(domain_deps); i++) { + pm_genpd_add_subdomain(&power->domains[domain_deps[i].parent].base, + &power->domains[domain_deps[i].child].base); + } + + power->reset.owner = THIS_MODULE; + power->reset.nr_resets = BCM2835_RESET_COUNT; + power->reset.ops = &bcm2835_reset_ops; + power->reset.of_node = dev->parent->of_node; + + ret = devm_reset_controller_register(dev, &power->reset); + if (ret) + return ret; + + of_genpd_add_provider_onecell(dev->parent->of_node, &power->pd_xlate); + + dev_info(dev, "Broadcom BCM2835 power domains driver"); + return 0; +} + +static int bcm2835_power_remove(struct platform_device *pdev) +{ + return 0; +} + +static struct platform_driver bcm2835_power_driver = { + .probe = bcm2835_power_probe, + .remove = bcm2835_power_remove, + .driver = { + .name = "bcm2835-power", + }, +}; +module_platform_driver(bcm2835_power_driver); + +MODULE_AUTHOR("Eric Anholt "); +MODULE_DESCRIPTION("Driver for Broadcom BCM2835 PM power domains and reset"); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig index 8f80e8bbf29e..61f8e1433d0a 100644 --- a/drivers/soc/fsl/Kconfig +++ b/drivers/soc/fsl/Kconfig @@ -22,6 +22,7 @@ config FSL_GUTS config FSL_MC_DPIO tristate "QorIQ DPAA2 DPIO driver" depends on FSL_MC_BUS + select SOC_BUS help Driver for the DPAA2 DPIO object. A DPIO provides queue and buffer management facilities for software to interact with diff --git a/drivers/soc/fsl/dpio/dpio-cmd.h b/drivers/soc/fsl/dpio/dpio-cmd.h index ab8f82ee7ee5..e13fd3ac1939 100644 --- a/drivers/soc/fsl/dpio/dpio-cmd.h +++ b/drivers/soc/fsl/dpio/dpio-cmd.h @@ -25,6 +25,8 @@ #define DPIO_CMDID_ENABLE DPIO_CMD(0x002) #define DPIO_CMDID_DISABLE DPIO_CMD(0x003) #define DPIO_CMDID_GET_ATTR DPIO_CMD(0x004) +#define DPIO_CMDID_RESET DPIO_CMD(0x005) +#define DPIO_CMDID_SET_STASHING_DEST DPIO_CMD(0x120) struct dpio_cmd_open { __le32 dpio_id; @@ -46,4 +48,8 @@ struct dpio_rsp_get_attr { __le32 qbman_version; }; +struct dpio_stashing_dest { + u8 sdest; +}; + #endif /* _FSL_DPIO_CMD_H */ diff --git a/drivers/soc/fsl/dpio/dpio-driver.c b/drivers/soc/fsl/dpio/dpio-driver.c index e58fcc9096e8..c0cdc8946031 100644 --- a/drivers/soc/fsl/dpio/dpio-driver.c +++ b/drivers/soc/fsl/dpio/dpio-driver.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -30,6 +31,48 @@ struct dpio_priv { struct dpaa2_io *io; }; +static cpumask_var_t cpus_unused_mask; + +static const struct soc_device_attribute ls1088a_soc[] = { + {.family = "QorIQ LS1088A"}, + { /* sentinel */ } +}; + +static const struct soc_device_attribute ls2080a_soc[] = { + {.family = "QorIQ LS2080A"}, + { /* sentinel */ } +}; + +static const struct soc_device_attribute ls2088a_soc[] = { + {.family = "QorIQ LS2088A"}, + { /* sentinel */ } +}; + +static const struct soc_device_attribute lx2160a_soc[] = { + {.family = "QorIQ LX2160A"}, + { /* sentinel */ } +}; + +static int dpaa2_dpio_get_cluster_sdest(struct fsl_mc_device *dpio_dev, int cpu) +{ + int cluster_base, cluster_size; + + if (soc_device_match(ls1088a_soc)) { + cluster_base = 2; + cluster_size = 4; + } else if (soc_device_match(ls2080a_soc) || + soc_device_match(ls2088a_soc) || + soc_device_match(lx2160a_soc)) { + cluster_base = 0; + cluster_size = 2; + } else { + dev_err(&dpio_dev->dev, "unknown SoC version\n"); + return -1; + } + + return cluster_base + cpu / cluster_size; +} + static irqreturn_t dpio_irq_handler(int irq_num, void *arg) { struct device *dev = (struct device *)arg; @@ -86,7 +129,8 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev) struct dpio_priv *priv; int err = -ENOMEM; struct device *dev = &dpio_dev->dev; - static int next_cpu = -1; + int possible_next_cpu; + int sdest; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) @@ -108,6 +152,12 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev) goto err_open; } + err = dpio_reset(dpio_dev->mc_io, 0, dpio_dev->mc_handle); + if (err) { + dev_err(dev, "dpio_reset() failed\n"); + goto err_reset; + } + err = dpio_get_attributes(dpio_dev->mc_io, 0, dpio_dev->mc_handle, &dpio_attrs); if (err) { @@ -128,17 +178,24 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev) desc.dpio_id = dpio_dev->obj_desc.id; /* get the cpu to use for the affinity hint */ - if (next_cpu == -1) - next_cpu = cpumask_first(cpu_online_mask); - else - next_cpu = cpumask_next(next_cpu, cpu_online_mask); - - if (!cpu_possible(next_cpu)) { + possible_next_cpu = cpumask_first(cpus_unused_mask); + if (possible_next_cpu >= nr_cpu_ids) { dev_err(dev, "probe failed. Number of DPIOs exceeds NR_CPUS.\n"); err = -ERANGE; goto err_allocate_irqs; } - desc.cpu = next_cpu; + desc.cpu = possible_next_cpu; + cpumask_clear_cpu(possible_next_cpu, cpus_unused_mask); + + sdest = dpaa2_dpio_get_cluster_sdest(dpio_dev, desc.cpu); + if (sdest >= 0) { + err = dpio_set_stashing_destination(dpio_dev->mc_io, 0, + dpio_dev->mc_handle, + sdest); + if (err) + dev_err(dev, "dpio_set_stashing_destination failed for cpu%d\n", + desc.cpu); + } /* * Set the CENA regs to be the cache inhibited area of the portal to @@ -171,7 +228,7 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev) if (err) goto err_register_dpio_irq; - priv->io = dpaa2_io_create(&desc); + priv->io = dpaa2_io_create(&desc, dev); if (!priv->io) { dev_err(dev, "dpaa2_io_create failed\n"); err = -ENOMEM; @@ -182,7 +239,6 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev) dev_dbg(dev, " receives_notifications = %d\n", desc.receives_notifications); dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle); - fsl_mc_portal_free(dpio_dev->mc_io); return 0; @@ -193,6 +249,7 @@ err_register_dpio_irq: err_allocate_irqs: dpio_disable(dpio_dev->mc_io, 0, dpio_dev->mc_handle); err_get_attr: +err_reset: dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle); err_open: fsl_mc_portal_free(dpio_dev->mc_io); @@ -211,20 +268,17 @@ static int dpaa2_dpio_remove(struct fsl_mc_device *dpio_dev) { struct device *dev; struct dpio_priv *priv; - int err; + int err = 0, cpu; dev = &dpio_dev->dev; priv = dev_get_drvdata(dev); + cpu = dpaa2_io_get_cpu(priv->io); dpaa2_io_down(priv->io); dpio_teardown_irqs(dpio_dev); - err = fsl_mc_portal_allocate(dpio_dev, 0, &dpio_dev->mc_io); - if (err) { - dev_err(dev, "MC portal allocation failed\n"); - goto err_mcportal; - } + cpumask_set_cpu(cpu, cpus_unused_mask); err = dpio_open(dpio_dev->mc_io, 0, dpio_dev->obj_desc.id, &dpio_dev->mc_handle); @@ -243,7 +297,7 @@ static int dpaa2_dpio_remove(struct fsl_mc_device *dpio_dev) err_open: fsl_mc_portal_free(dpio_dev->mc_io); -err_mcportal: + return err; } @@ -267,11 +321,16 @@ static struct fsl_mc_driver dpaa2_dpio_driver = { static int dpio_driver_init(void) { + if (!zalloc_cpumask_var(&cpus_unused_mask, GFP_KERNEL)) + return -ENOMEM; + cpumask_copy(cpus_unused_mask, cpu_online_mask); + return fsl_mc_driver_register(&dpaa2_dpio_driver); } static void dpio_driver_exit(void) { + free_cpumask_var(cpus_unused_mask); fsl_mc_driver_unregister(&dpaa2_dpio_driver); } module_init(dpio_driver_init); diff --git a/drivers/soc/fsl/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c index ec0837ff039a..b9539ef2c3cd 100644 --- a/drivers/soc/fsl/dpio/dpio-service.c +++ b/drivers/soc/fsl/dpio/dpio-service.c @@ -27,6 +27,7 @@ struct dpaa2_io { /* protect notifications list */ spinlock_t lock_notifications; struct list_head notifications; + struct device *dev; }; struct dpaa2_io_store { @@ -98,13 +99,15 @@ EXPORT_SYMBOL_GPL(dpaa2_io_service_select); /** * dpaa2_io_create() - create a dpaa2_io object. * @desc: the dpaa2_io descriptor + * @dev: the actual DPIO device * * Activates a "struct dpaa2_io" corresponding to the given config of an actual * DPIO object. * * Return a valid dpaa2_io object for success, or NULL for failure. */ -struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc) +struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc, + struct device *dev) { struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL); @@ -146,6 +149,8 @@ struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc) dpio_by_cpu[desc->cpu] = obj; spin_unlock(&dpio_list_lock); + obj->dev = dev; + return obj; } @@ -160,6 +165,11 @@ struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc) */ void dpaa2_io_down(struct dpaa2_io *d) { + spin_lock(&dpio_list_lock); + dpio_by_cpu[d->dpio_desc.cpu] = NULL; + list_del(&d->node); + spin_unlock(&dpio_list_lock); + kfree(d); } @@ -209,11 +219,25 @@ done: return IRQ_HANDLED; } +/** + * dpaa2_io_get_cpu() - get the cpu associated with a given DPIO object + * + * @d: the given DPIO object. + * + * Return the cpu associated with the DPIO object + */ +int dpaa2_io_get_cpu(struct dpaa2_io *d) +{ + return d->dpio_desc.cpu; +} +EXPORT_SYMBOL(dpaa2_io_get_cpu); + /** * dpaa2_io_service_register() - Prepare for servicing of FQDAN or CDAN * notifications on the given DPIO service. * @d: the given DPIO service. * @ctx: the notification context. + * @dev: the device that requests the register * * The caller should make the MC command to attach a DPAA2 object to * a DPIO after this function completes successfully. In that way: @@ -228,14 +252,20 @@ done: * Return 0 for success, or -ENODEV for failure. */ int dpaa2_io_service_register(struct dpaa2_io *d, - struct dpaa2_io_notification_ctx *ctx) + struct dpaa2_io_notification_ctx *ctx, + struct device *dev) { + struct device_link *link; unsigned long irqflags; d = service_select_by_cpu(d, ctx->desired_cpu); if (!d) return -ENODEV; + link = device_link_add(dev, d->dev, DL_FLAG_AUTOREMOVE_CONSUMER); + if (!link) + return -EINVAL; + ctx->dpio_id = d->dpio_desc.dpio_id; ctx->qman64 = (u64)(uintptr_t)ctx; ctx->dpio_private = d; @@ -256,12 +286,14 @@ EXPORT_SYMBOL_GPL(dpaa2_io_service_register); * dpaa2_io_service_deregister - The opposite of 'register'. * @service: the given DPIO service. * @ctx: the notification context. + * @dev: the device that requests to be deregistered * * This function should be called only after sending the MC command to * to detach the notification-producing device from the DPIO. */ void dpaa2_io_service_deregister(struct dpaa2_io *service, - struct dpaa2_io_notification_ctx *ctx) + struct dpaa2_io_notification_ctx *ctx, + struct device *dev) { struct dpaa2_io *d = ctx->dpio_private; unsigned long irqflags; @@ -272,6 +304,9 @@ void dpaa2_io_service_deregister(struct dpaa2_io *service, spin_lock_irqsave(&d->lock_notifications, irqflags); list_del(&ctx->node); spin_unlock_irqrestore(&d->lock_notifications, irqflags); + + if (dev) + device_link_remove(dev, d->dev); } EXPORT_SYMBOL_GPL(dpaa2_io_service_deregister); @@ -438,7 +473,7 @@ EXPORT_SYMBOL_GPL(dpaa2_io_service_enqueue_qd); * Return 0 for success, and negative error code for failure. */ int dpaa2_io_service_release(struct dpaa2_io *d, - u32 bpid, + u16 bpid, const u64 *buffers, unsigned int num_buffers) { @@ -467,7 +502,7 @@ EXPORT_SYMBOL_GPL(dpaa2_io_service_release); * Eg. if the buffer pool is empty, this will return zero. */ int dpaa2_io_service_acquire(struct dpaa2_io *d, - u32 bpid, + u16 bpid, u64 *buffers, unsigned int num_buffers) { @@ -595,6 +630,7 @@ struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last) if (!(dpaa2_dq_flags(ret) & DPAA2_DQ_STAT_VALIDFRAME)) ret = NULL; } else { + prefetch(&s->vaddr[s->idx]); *is_last = 0; } diff --git a/drivers/soc/fsl/dpio/dpio.c b/drivers/soc/fsl/dpio/dpio.c index ff37c80e11a0..af74c597a675 100644 --- a/drivers/soc/fsl/dpio/dpio.c +++ b/drivers/soc/fsl/dpio/dpio.c @@ -166,6 +166,22 @@ int dpio_get_attributes(struct fsl_mc_io *mc_io, return 0; } +int dpio_set_stashing_destination(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 sdest) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpio_stashing_dest *dpio_cmd; + + cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_STASHING_DEST, + cmd_flags, token); + dpio_cmd = (struct dpio_stashing_dest *)cmd.params; + dpio_cmd->sdest = sdest; + + return mc_send_command(mc_io, &cmd); +} + /** * dpio_get_api_version - Get Data Path I/O API version * @mc_io: Pointer to MC portal's DPIO object @@ -196,3 +212,26 @@ int dpio_get_api_version(struct fsl_mc_io *mc_io, return 0; } + +/** + * dpio_reset() - Reset the DPIO, returns the object to initial state. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPIO object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpio_reset(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token) +{ + struct fsl_mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPIO_CMDID_RESET, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} diff --git a/drivers/soc/fsl/dpio/dpio.h b/drivers/soc/fsl/dpio/dpio.h index 49194c8e45f1..da06f7258098 100644 --- a/drivers/soc/fsl/dpio/dpio.h +++ b/drivers/soc/fsl/dpio/dpio.h @@ -75,9 +75,18 @@ int dpio_get_attributes(struct fsl_mc_io *mc_io, u16 token, struct dpio_attr *attr); +int dpio_set_stashing_destination(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 dest); + int dpio_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 *major_ver, u16 *minor_ver); +int dpio_reset(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + #endif /* __FSL_DPIO_H */ diff --git a/drivers/soc/fsl/dpio/qbman-portal.c b/drivers/soc/fsl/dpio/qbman-portal.c index 0bddb85c0ae5..d02013556a1b 100644 --- a/drivers/soc/fsl/dpio/qbman-portal.c +++ b/drivers/soc/fsl/dpio/qbman-portal.c @@ -169,9 +169,9 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d) 3, /* RPM: Valid bit mode, RCR in array mode */ 2, /* DCM: Discrete consumption ack mode */ 3, /* EPM: Valid bit mode, EQCR in array mode */ - 0, /* mem stashing drop enable == FALSE */ + 1, /* mem stashing drop enable == TRUE */ 1, /* mem stashing priority == TRUE */ - 0, /* mem stashing enable == FALSE */ + 1, /* mem stashing enable == TRUE */ 1, /* dequeue stashing priority == TRUE */ 0, /* dequeue stashing enable == FALSE */ 0); /* EQCR_CI stashing priority == FALSE */ @@ -180,6 +180,7 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d) reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG); if (!reg) { pr_err("qbman: the portal is not enabled!\n"); + kfree(p); return NULL; } diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c index 302e0c8d69d9..63f6df86f9e5 100644 --- a/drivers/soc/fsl/guts.c +++ b/drivers/soc/fsl/guts.c @@ -32,6 +32,7 @@ struct fsl_soc_die_attr { static struct guts *guts; static struct soc_device_attribute soc_dev_attr; static struct soc_device *soc_dev; +static struct device_node *root; /* SoC die attribute definition for QorIQ platform */ @@ -114,7 +115,7 @@ static const struct fsl_soc_die_attr *fsl_soc_die_match( return NULL; } -u32 fsl_guts_get_svr(void) +static u32 fsl_guts_get_svr(void) { u32 svr = 0; @@ -128,11 +129,10 @@ u32 fsl_guts_get_svr(void) return svr; } -EXPORT_SYMBOL(fsl_guts_get_svr); static int fsl_guts_probe(struct platform_device *pdev) { - struct device_node *root, *np = pdev->dev.of_node; + struct device_node *np = pdev->dev.of_node; struct device *dev = &pdev->dev; struct resource *res; const struct fsl_soc_die_attr *soc_die; @@ -155,9 +155,8 @@ static int fsl_guts_probe(struct platform_device *pdev) root = of_find_node_by_path("/"); if (of_property_read_string(root, "model", &machine)) of_property_read_string_index(root, "compatible", 0, &machine); - of_node_put(root); if (machine) - soc_dev_attr.machine = devm_kstrdup(dev, machine, GFP_KERNEL); + soc_dev_attr.machine = machine; svr = fsl_guts_get_svr(); soc_die = fsl_soc_die_match(svr, fsl_soc_die); @@ -192,6 +191,7 @@ static int fsl_guts_probe(struct platform_device *pdev) static int fsl_guts_remove(struct platform_device *dev) { soc_device_unregister(soc_dev); + of_node_put(root); return 0; } diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c index 52c153cd795a..636f83f781f5 100644 --- a/drivers/soc/fsl/qbman/qman.c +++ b/drivers/soc/fsl/qbman/qman.c @@ -1143,18 +1143,19 @@ static void qm_mr_process_task(struct work_struct *work); static irqreturn_t portal_isr(int irq, void *ptr) { struct qman_portal *p = ptr; - - u32 clear = QM_DQAVAIL_MASK | p->irq_sources; u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources; + u32 clear = 0; if (unlikely(!is)) return IRQ_NONE; /* DQRR-handling if it's interrupt-driven */ - if (is & QM_PIRQ_DQRI) + if (is & QM_PIRQ_DQRI) { __poll_portal_fast(p, QMAN_POLL_LIMIT); + clear = QM_DQAVAIL_MASK | QM_PIRQ_DQRI; + } /* Handling of anything else that's interrupt-driven */ - clear |= __poll_portal_slow(p, is); + clear |= __poll_portal_slow(p, is) & QM_PIRQ_SLOW; qm_out(&p->p, QM_REG_ISR, clear); return IRQ_HANDLED; } diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig index 2112d18dbb7b..d80f899d22f9 100644 --- a/drivers/soc/imx/Kconfig +++ b/drivers/soc/imx/Kconfig @@ -2,7 +2,7 @@ menu "i.MX SoC drivers" config IMX_GPCV2_PM_DOMAINS bool "i.MX GPCv2 PM domains" - depends on SOC_IMX7D || SOC_IMX8MQ || (COMPILE_TEST && OF) + depends on ARCH_MXC || (COMPILE_TEST && OF) depends on PM select PM_GENERIC_DOMAINS default y if SOC_IMX7D diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c index 8b4f48a2ca57..176f473127b6 100644 --- a/drivers/soc/imx/gpcv2.c +++ b/drivers/soc/imx/gpcv2.c @@ -8,6 +8,7 @@ * Copyright 2015-2017 Pengutronix, Lucas Stach */ +#include #include #include #include @@ -65,6 +66,12 @@ #define GPC_M4_PU_PDN_FLG 0x1bc +#define GPC_PU_PWRHSK 0x1fc + +#define IMX8M_GPU_HSK_PWRDNREQN BIT(6) +#define IMX8M_VPU_HSK_PWRDNREQN BIT(5) +#define IMX8M_DISP_HSK_PWRDNREQN BIT(4) + /* * The PGC offset values in Reference Manual * (Rev. 1, 01/2018 and the older ones) GPC chapter's @@ -92,16 +99,21 @@ #define GPC_PGC_CTRL_PCR BIT(0) +#define GPC_CLK_MAX 6 + struct imx_pgc_domain { struct generic_pm_domain genpd; struct regmap *regmap; struct regulator *regulator; + struct clk *clk[GPC_CLK_MAX]; + int num_clks; unsigned int pgc; const struct { u32 pxx; u32 map; + u32 hsk; } bits; const int voltage; @@ -125,7 +137,7 @@ static int imx_gpc_pu_pgc_sw_pxx_req(struct generic_pm_domain *genpd, const bool enable_power_control = !on; const bool has_regulator = !IS_ERR(domain->regulator); unsigned long deadline; - int ret = 0; + int i, ret = 0; regmap_update_bits(domain->regmap, GPC_PGC_CPU_MAPPING, domain->bits.map, domain->bits.map); @@ -138,10 +150,18 @@ static int imx_gpc_pu_pgc_sw_pxx_req(struct generic_pm_domain *genpd, } } + /* Enable reset clocks for all devices in the domain */ + for (i = 0; i < domain->num_clks; i++) + clk_prepare_enable(domain->clk[i]); + if (enable_power_control) regmap_update_bits(domain->regmap, GPC_PGC_CTRL(domain->pgc), GPC_PGC_CTRL_PCR, GPC_PGC_CTRL_PCR); + if (domain->bits.hsk) + regmap_update_bits(domain->regmap, GPC_PU_PWRHSK, + domain->bits.hsk, on ? domain->bits.hsk : 0); + regmap_update_bits(domain->regmap, offset, domain->bits.pxx, domain->bits.pxx); @@ -179,6 +199,10 @@ static int imx_gpc_pu_pgc_sw_pxx_req(struct generic_pm_domain *genpd, regmap_update_bits(domain->regmap, GPC_PGC_CTRL(domain->pgc), GPC_PGC_CTRL_PCR, 0); + /* Disable reset clocks for all devices in the domain */ + for (i = 0; i < domain->num_clks; i++) + clk_disable_unprepare(domain->clk[i]); + if (has_regulator && !on) { int err; @@ -328,6 +352,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = { .bits = { .pxx = IMX8M_GPU_SW_Pxx_REQ, .map = IMX8M_GPU_A53_DOMAIN, + .hsk = IMX8M_GPU_HSK_PWRDNREQN, }, .pgc = IMX8M_PGC_GPU, }, @@ -339,6 +364,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = { .bits = { .pxx = IMX8M_VPU_SW_Pxx_REQ, .map = IMX8M_VPU_A53_DOMAIN, + .hsk = IMX8M_VPU_HSK_PWRDNREQN, }, .pgc = IMX8M_PGC_VPU, }, @@ -350,6 +376,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = { .bits = { .pxx = IMX8M_DISP_SW_Pxx_REQ, .map = IMX8M_DISP_A53_DOMAIN, + .hsk = IMX8M_DISP_HSK_PWRDNREQN, }, .pgc = IMX8M_PGC_DISP, }, @@ -390,7 +417,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = { static const struct regmap_range imx8m_yes_ranges[] = { regmap_reg_range(GPC_LPCR_A_CORE_BSC, - GPC_M4_PU_PDN_FLG), + GPC_PU_PWRHSK), regmap_reg_range(GPC_PGC_CTRL(IMX8M_PGC_MIPI), GPC_PGC_SR(IMX8M_PGC_MIPI)), regmap_reg_range(GPC_PGC_CTRL(IMX8M_PGC_PCIE1), @@ -426,6 +453,41 @@ static const struct imx_pgc_domain_data imx8m_pgc_domain_data = { .reg_access_table = &imx8m_access_table, }; +static int imx_pgc_get_clocks(struct imx_pgc_domain *domain) +{ + int i, ret; + + for (i = 0; ; i++) { + struct clk *clk = of_clk_get(domain->dev->of_node, i); + if (IS_ERR(clk)) + break; + if (i >= GPC_CLK_MAX) { + dev_err(domain->dev, "more than %d clocks\n", + GPC_CLK_MAX); + ret = -EINVAL; + goto clk_err; + } + domain->clk[i] = clk; + } + domain->num_clks = i; + + return 0; + +clk_err: + while (i--) + clk_put(domain->clk[i]); + + return ret; +} + +static void imx_pgc_put_clocks(struct imx_pgc_domain *domain) +{ + int i; + + for (i = domain->num_clks - 1; i >= 0; i--) + clk_put(domain->clk[i]); +} + static int imx_pgc_domain_probe(struct platform_device *pdev) { struct imx_pgc_domain *domain = pdev->dev.platform_data; @@ -445,9 +507,17 @@ static int imx_pgc_domain_probe(struct platform_device *pdev) domain->voltage, domain->voltage); } + ret = imx_pgc_get_clocks(domain); + if (ret) { + if (ret != -EPROBE_DEFER) + dev_err(domain->dev, "Failed to get domain's clocks\n"); + return ret; + } + ret = pm_genpd_init(&domain->genpd, NULL, true); if (ret) { dev_err(domain->dev, "Failed to init power domain\n"); + imx_pgc_put_clocks(domain); return ret; } @@ -456,6 +526,7 @@ static int imx_pgc_domain_probe(struct platform_device *pdev) if (ret) { dev_err(domain->dev, "Failed to add genpd provider\n"); pm_genpd_remove(&domain->genpd); + imx_pgc_put_clocks(domain); } return ret; @@ -467,6 +538,7 @@ static int imx_pgc_domain_remove(struct platform_device *pdev) of_genpd_del_provider(domain->dev->of_node); pm_genpd_remove(&domain->genpd); + imx_pgc_put_clocks(domain); return 0; } diff --git a/drivers/soc/lantiq/Makefile b/drivers/soc/lantiq/Makefile index be9e866d53e5..35aa86bd1023 100644 --- a/drivers/soc/lantiq/Makefile +++ b/drivers/soc/lantiq/Makefile @@ -1,2 +1 @@ obj-y += fpi-bus.o -obj-$(CONFIG_XRX200_PHY_FW) += gphy.o diff --git a/drivers/soc/lantiq/gphy.c b/drivers/soc/lantiq/gphy.c deleted file mode 100644 index feeb17cebc25..000000000000 --- a/drivers/soc/lantiq/gphy.c +++ /dev/null @@ -1,224 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * Copyright (C) 2012 John Crispin - * Copyright (C) 2016 Martin Blumenstingl - * Copyright (C) 2017 Hauke Mehrtens - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#define XRX200_GPHY_FW_ALIGN (16 * 1024) - -struct xway_gphy_priv { - struct clk *gphy_clk_gate; - struct reset_control *gphy_reset; - struct reset_control *gphy_reset2; - void __iomem *membase; - char *fw_name; -}; - -struct xway_gphy_match_data { - char *fe_firmware_name; - char *ge_firmware_name; -}; - -static const struct xway_gphy_match_data xrx200a1x_gphy_data = { - .fe_firmware_name = "lantiq/xrx200_phy22f_a14.bin", - .ge_firmware_name = "lantiq/xrx200_phy11g_a14.bin", -}; - -static const struct xway_gphy_match_data xrx200a2x_gphy_data = { - .fe_firmware_name = "lantiq/xrx200_phy22f_a22.bin", - .ge_firmware_name = "lantiq/xrx200_phy11g_a22.bin", -}; - -static const struct xway_gphy_match_data xrx300_gphy_data = { - .fe_firmware_name = "lantiq/xrx300_phy22f_a21.bin", - .ge_firmware_name = "lantiq/xrx300_phy11g_a21.bin", -}; - -static const struct of_device_id xway_gphy_match[] = { - { .compatible = "lantiq,xrx200a1x-gphy", .data = &xrx200a1x_gphy_data }, - { .compatible = "lantiq,xrx200a2x-gphy", .data = &xrx200a2x_gphy_data }, - { .compatible = "lantiq,xrx300-gphy", .data = &xrx300_gphy_data }, - { .compatible = "lantiq,xrx330-gphy", .data = &xrx300_gphy_data }, - {}, -}; -MODULE_DEVICE_TABLE(of, xway_gphy_match); - -static int xway_gphy_load(struct device *dev, struct xway_gphy_priv *priv, - dma_addr_t *dev_addr) -{ - const struct firmware *fw; - void *fw_addr; - dma_addr_t dma_addr; - size_t size; - int ret; - - ret = request_firmware(&fw, priv->fw_name, dev); - if (ret) { - dev_err(dev, "failed to load firmware: %s, error: %i\n", - priv->fw_name, ret); - return ret; - } - - /* - * GPHY cores need the firmware code in a persistent and contiguous - * memory area with a 16 kB boundary aligned start address. - */ - size = fw->size + XRX200_GPHY_FW_ALIGN; - - fw_addr = dmam_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL); - if (fw_addr) { - fw_addr = PTR_ALIGN(fw_addr, XRX200_GPHY_FW_ALIGN); - *dev_addr = ALIGN(dma_addr, XRX200_GPHY_FW_ALIGN); - memcpy(fw_addr, fw->data, fw->size); - } else { - dev_err(dev, "failed to alloc firmware memory\n"); - ret = -ENOMEM; - } - - release_firmware(fw); - - return ret; -} - -static int xway_gphy_of_probe(struct platform_device *pdev, - struct xway_gphy_priv *priv) -{ - struct device *dev = &pdev->dev; - const struct xway_gphy_match_data *gphy_fw_name_cfg; - u32 gphy_mode; - int ret; - struct resource *res_gphy; - - gphy_fw_name_cfg = of_device_get_match_data(dev); - - priv->gphy_clk_gate = devm_clk_get(dev, NULL); - if (IS_ERR(priv->gphy_clk_gate)) { - dev_err(dev, "Failed to lookup gate clock\n"); - return PTR_ERR(priv->gphy_clk_gate); - } - - res_gphy = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->membase = devm_ioremap_resource(dev, res_gphy); - if (IS_ERR(priv->membase)) - return PTR_ERR(priv->membase); - - priv->gphy_reset = devm_reset_control_get(dev, "gphy"); - if (IS_ERR(priv->gphy_reset)) { - if (PTR_ERR(priv->gphy_reset) != -EPROBE_DEFER) - dev_err(dev, "Failed to lookup gphy reset\n"); - return PTR_ERR(priv->gphy_reset); - } - - priv->gphy_reset2 = devm_reset_control_get_optional(dev, "gphy2"); - if (IS_ERR(priv->gphy_reset2)) - return PTR_ERR(priv->gphy_reset2); - - ret = device_property_read_u32(dev, "lantiq,gphy-mode", &gphy_mode); - /* Default to GE mode */ - if (ret) - gphy_mode = GPHY_MODE_GE; - - switch (gphy_mode) { - case GPHY_MODE_FE: - priv->fw_name = gphy_fw_name_cfg->fe_firmware_name; - break; - case GPHY_MODE_GE: - priv->fw_name = gphy_fw_name_cfg->ge_firmware_name; - break; - default: - dev_err(dev, "Unknown GPHY mode %d\n", gphy_mode); - return -EINVAL; - } - - return 0; -} - -static int xway_gphy_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct xway_gphy_priv *priv; - dma_addr_t fw_addr = 0; - int ret; - - priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - - ret = xway_gphy_of_probe(pdev, priv); - if (ret) - return ret; - - ret = clk_prepare_enable(priv->gphy_clk_gate); - if (ret) - return ret; - - ret = xway_gphy_load(dev, priv, &fw_addr); - if (ret) { - clk_disable_unprepare(priv->gphy_clk_gate); - return ret; - } - - reset_control_assert(priv->gphy_reset); - reset_control_assert(priv->gphy_reset2); - - iowrite32be(fw_addr, priv->membase); - - reset_control_deassert(priv->gphy_reset); - reset_control_deassert(priv->gphy_reset2); - - platform_set_drvdata(pdev, priv); - - return ret; -} - -static int xway_gphy_remove(struct platform_device *pdev) -{ - struct xway_gphy_priv *priv = platform_get_drvdata(pdev); - - iowrite32be(0, priv->membase); - - clk_disable_unprepare(priv->gphy_clk_gate); - - return 0; -} - -static struct platform_driver xway_gphy_driver = { - .probe = xway_gphy_probe, - .remove = xway_gphy_remove, - .driver = { - .name = "xway-rcu-gphy", - .of_match_table = xway_gphy_match, - }, -}; - -module_platform_driver(xway_gphy_driver); - -MODULE_FIRMWARE("lantiq/xrx300_phy11g_a21.bin"); -MODULE_FIRMWARE("lantiq/xrx300_phy22f_a21.bin"); -MODULE_FIRMWARE("lantiq/xrx200_phy11g_a14.bin"); -MODULE_FIRMWARE("lantiq/xrx200_phy11g_a22.bin"); -MODULE_FIRMWARE("lantiq/xrx200_phy22f_a14.bin"); -MODULE_FIRMWARE("lantiq/xrx200_phy22f_a22.bin"); -MODULE_AUTHOR("Martin Blumenstingl "); -MODULE_DESCRIPTION("Lantiq XWAY GPHY Firmware Loader"); -MODULE_LICENSE("GPL"); diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index fcbf8a2e4080..1ee298f6bf17 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -98,6 +98,24 @@ config QCOM_RPMH of hardware components aggregate requests for these resources and help apply the aggregated state on the resource. +config QCOM_RPMHPD + bool "Qualcomm RPMh Power domain driver" + depends on QCOM_RPMH && QCOM_COMMAND_DB + help + QCOM RPMh Power domain driver to support power-domains with + performance states. The driver communicates a performance state + value to RPMh which then translates it into corresponding voltage + for the voltage rail. + +config QCOM_RPMPD + bool "Qualcomm RPM Power domain driver" + depends on QCOM_SMD_RPM=y + help + QCOM RPM Power domain driver to support power-domains with + performance states. The driver communicates a performance state + value to RPM which then translates it into corresponding voltage + for the voltage rail. + config QCOM_SMEM tristate "Qualcomm Shared Memory Manager (SMEM)" depends on ARCH_QCOM || COMPILE_TEST diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index f25b54cd6cf8..ffe519b0cb66 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -21,3 +21,5 @@ obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o obj-$(CONFIG_QCOM_APR) += apr.o obj-$(CONFIG_QCOM_LLCC) += llcc-slice.o obj-$(CONFIG_QCOM_SDM845_LLCC) += llcc-sdm845.o +obj-$(CONFIG_QCOM_RPMHPD) += rpmhpd.o +obj-$(CONFIG_QCOM_RPMPD) += rpmpd.o diff --git a/drivers/soc/qcom/llcc-sdm845.c b/drivers/soc/qcom/llcc-sdm845.c index 2e1e4f0a5db8..86600d97c36d 100644 --- a/drivers/soc/qcom/llcc-sdm845.c +++ b/drivers/soc/qcom/llcc-sdm845.c @@ -71,6 +71,11 @@ static struct llcc_slice_config sdm845_data[] = { SCT_ENTRY(LLCC_AUDHW, 22, 1024, 1, 1, 0xffc, 0x2, 0, 0, 1, 1, 0), }; +static int sdm845_qcom_llcc_remove(struct platform_device *pdev) +{ + return qcom_llcc_remove(pdev); +} + static int sdm845_qcom_llcc_probe(struct platform_device *pdev) { return qcom_llcc_probe(pdev, sdm845_data, ARRAY_SIZE(sdm845_data)); @@ -87,6 +92,7 @@ static struct platform_driver sdm845_qcom_llcc_driver = { .of_match_table = sdm845_qcom_llcc_of_match, }, .probe = sdm845_qcom_llcc_probe, + .remove = sdm845_qcom_llcc_remove, }; module_platform_driver(sdm845_qcom_llcc_driver); diff --git a/drivers/soc/qcom/llcc-slice.c b/drivers/soc/qcom/llcc-slice.c index 80667f7be52c..9090ea12eaf3 100644 --- a/drivers/soc/qcom/llcc-slice.c +++ b/drivers/soc/qcom/llcc-slice.c @@ -46,7 +46,7 @@ #define BANK_OFFSET_STRIDE 0x80000 -static struct llcc_drv_data *drv_data; +static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER; static const struct regmap_config llcc_regmap_config = { .reg_bits = 32, @@ -68,6 +68,9 @@ struct llcc_slice_desc *llcc_slice_getd(u32 uid) struct llcc_slice_desc *desc; u32 sz, count; + if (IS_ERR(drv_data)) + return ERR_CAST(drv_data); + cfg = drv_data->cfg; sz = drv_data->cfg_size; @@ -108,6 +111,9 @@ static int llcc_update_act_ctrl(u32 sid, u32 slice_status; int ret; + if (IS_ERR(drv_data)) + return PTR_ERR(drv_data); + act_ctrl_reg = LLCC_TRP_ACT_CTRLn(sid); status_reg = LLCC_TRP_STATUSn(sid); @@ -143,6 +149,9 @@ int llcc_slice_activate(struct llcc_slice_desc *desc) int ret; u32 act_ctrl_val; + if (IS_ERR(drv_data)) + return PTR_ERR(drv_data); + if (IS_ERR_OR_NULL(desc)) return -EINVAL; @@ -180,6 +189,9 @@ int llcc_slice_deactivate(struct llcc_slice_desc *desc) u32 act_ctrl_val; int ret; + if (IS_ERR(drv_data)) + return PTR_ERR(drv_data); + if (IS_ERR_OR_NULL(desc)) return -EINVAL; @@ -289,46 +301,62 @@ static int qcom_llcc_cfg_program(struct platform_device *pdev) return ret; } +int qcom_llcc_remove(struct platform_device *pdev) +{ + /* Set the global pointer to a error code to avoid referencing it */ + drv_data = ERR_PTR(-ENODEV); + return 0; +} +EXPORT_SYMBOL_GPL(qcom_llcc_remove); + +static struct regmap *qcom_llcc_init_mmio(struct platform_device *pdev, + const char *name) +{ + struct resource *res; + void __iomem *base; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); + if (!res) + return ERR_PTR(-ENODEV); + + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return ERR_CAST(base); + + return devm_regmap_init_mmio(&pdev->dev, base, &llcc_regmap_config); +} + int qcom_llcc_probe(struct platform_device *pdev, const struct llcc_slice_config *llcc_cfg, u32 sz) { u32 num_banks; struct device *dev = &pdev->dev; - struct resource *llcc_banks_res, *llcc_bcast_res; - void __iomem *llcc_banks_base, *llcc_bcast_base; int ret, i; struct platform_device *llcc_edac; drv_data = devm_kzalloc(dev, sizeof(*drv_data), GFP_KERNEL); - if (!drv_data) - return -ENOMEM; - - llcc_banks_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "llcc_base"); - llcc_banks_base = devm_ioremap_resource(&pdev->dev, llcc_banks_res); - if (IS_ERR(llcc_banks_base)) - return PTR_ERR(llcc_banks_base); - - drv_data->regmap = devm_regmap_init_mmio(dev, llcc_banks_base, - &llcc_regmap_config); - if (IS_ERR(drv_data->regmap)) - return PTR_ERR(drv_data->regmap); - - llcc_bcast_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "llcc_broadcast_base"); - llcc_bcast_base = devm_ioremap_resource(&pdev->dev, llcc_bcast_res); - if (IS_ERR(llcc_bcast_base)) - return PTR_ERR(llcc_bcast_base); - - drv_data->bcast_regmap = devm_regmap_init_mmio(dev, llcc_bcast_base, - &llcc_regmap_config); - if (IS_ERR(drv_data->bcast_regmap)) - return PTR_ERR(drv_data->bcast_regmap); + if (!drv_data) { + ret = -ENOMEM; + goto err; + } + + drv_data->regmap = qcom_llcc_init_mmio(pdev, "llcc_base"); + if (IS_ERR(drv_data->regmap)) { + ret = PTR_ERR(drv_data->regmap); + goto err; + } + + drv_data->bcast_regmap = + qcom_llcc_init_mmio(pdev, "llcc_broadcast_base"); + if (IS_ERR(drv_data->bcast_regmap)) { + ret = PTR_ERR(drv_data->bcast_regmap); + goto err; + } ret = regmap_read(drv_data->regmap, LLCC_COMMON_STATUS0, &num_banks); if (ret) - return ret; + goto err; num_banks &= LLCC_LB_CNT_MASK; num_banks >>= LLCC_LB_CNT_SHIFT; @@ -340,8 +368,10 @@ int qcom_llcc_probe(struct platform_device *pdev, drv_data->offsets = devm_kcalloc(dev, num_banks, sizeof(u32), GFP_KERNEL); - if (!drv_data->offsets) - return -ENOMEM; + if (!drv_data->offsets) { + ret = -ENOMEM; + goto err; + } for (i = 0; i < num_banks; i++) drv_data->offsets[i] = i * BANK_OFFSET_STRIDE; @@ -349,8 +379,10 @@ int qcom_llcc_probe(struct platform_device *pdev, drv_data->bitmap = devm_kcalloc(dev, BITS_TO_LONGS(drv_data->max_slices), sizeof(unsigned long), GFP_KERNEL); - if (!drv_data->bitmap) - return -ENOMEM; + if (!drv_data->bitmap) { + ret = -ENOMEM; + goto err; + } drv_data->cfg = llcc_cfg; drv_data->cfg_size = sz; @@ -359,7 +391,7 @@ int qcom_llcc_probe(struct platform_device *pdev, ret = qcom_llcc_cfg_program(pdev); if (ret) - return ret; + goto err; drv_data->ecc_irq = platform_get_irq(pdev, 0); if (drv_data->ecc_irq >= 0) { @@ -370,6 +402,9 @@ int qcom_llcc_probe(struct platform_device *pdev, dev_err(dev, "Failed to register llcc edac driver\n"); } + return 0; +err: + drv_data = ERR_PTR(-ENODEV); return ret; } EXPORT_SYMBOL_GPL(qcom_llcc_probe); diff --git a/drivers/soc/qcom/qcom_gsbi.c b/drivers/soc/qcom/qcom_gsbi.c index 09c669e70d63..038abc377fdb 100644 --- a/drivers/soc/qcom/qcom_gsbi.c +++ b/drivers/soc/qcom/qcom_gsbi.c @@ -138,7 +138,7 @@ static int gsbi_probe(struct platform_device *pdev) struct resource *res; void __iomem *base; struct gsbi_info *gsbi; - int i; + int i, ret; u32 mask, gsbi_num; const struct crci_config *config = NULL; @@ -221,7 +221,10 @@ static int gsbi_probe(struct platform_device *pdev) platform_set_drvdata(pdev, gsbi); - return of_platform_populate(node, NULL, NULL, &pdev->dev); + ret = of_platform_populate(node, NULL, NULL, &pdev->dev); + if (ret) + clk_disable_unprepare(gsbi->hclk); + return ret; } static int gsbi_remove(struct platform_device *pdev) diff --git a/drivers/soc/qcom/rmtfs_mem.c b/drivers/soc/qcom/rmtfs_mem.c index 97bb5989aa21..7200d762a951 100644 --- a/drivers/soc/qcom/rmtfs_mem.c +++ b/drivers/soc/qcom/rmtfs_mem.c @@ -45,9 +45,9 @@ static ssize_t qcom_rmtfs_mem_show(struct device *dev, struct device_attribute *attr, char *buf); -static DEVICE_ATTR(phys_addr, 0400, qcom_rmtfs_mem_show, NULL); -static DEVICE_ATTR(size, 0400, qcom_rmtfs_mem_show, NULL); -static DEVICE_ATTR(client_id, 0400, qcom_rmtfs_mem_show, NULL); +static DEVICE_ATTR(phys_addr, 0444, qcom_rmtfs_mem_show, NULL); +static DEVICE_ATTR(size, 0444, qcom_rmtfs_mem_show, NULL); +static DEVICE_ATTR(client_id, 0444, qcom_rmtfs_mem_show, NULL); static ssize_t qcom_rmtfs_mem_show(struct device *dev, struct device_attribute *attr, @@ -132,6 +132,11 @@ static int qcom_rmtfs_mem_release(struct inode *inode, struct file *filp) return 0; } +static struct class rmtfs_class = { + .owner = THIS_MODULE, + .name = "rmtfs", +}; + static const struct file_operations qcom_rmtfs_mem_fops = { .owner = THIS_MODULE, .open = qcom_rmtfs_mem_open, @@ -199,6 +204,7 @@ static int qcom_rmtfs_mem_probe(struct platform_device *pdev) dev_set_name(&rmtfs_mem->dev, "qcom_rmtfs_mem%d", client_id); rmtfs_mem->dev.id = client_id; + rmtfs_mem->dev.class = &rmtfs_class; rmtfs_mem->dev.devt = MKDEV(MAJOR(qcom_rmtfs_mem_major), client_id); ret = cdev_device_add(&rmtfs_mem->cdev, &rmtfs_mem->dev); @@ -277,32 +283,42 @@ static struct platform_driver qcom_rmtfs_mem_driver = { }, }; -static int qcom_rmtfs_mem_init(void) +static int __init qcom_rmtfs_mem_init(void) { int ret; + ret = class_register(&rmtfs_class); + if (ret) + return ret; + ret = alloc_chrdev_region(&qcom_rmtfs_mem_major, 0, QCOM_RMTFS_MEM_DEV_MAX, "qcom_rmtfs_mem"); if (ret < 0) { pr_err("qcom_rmtfs_mem: failed to allocate char dev region\n"); - return ret; + goto unregister_class; } ret = platform_driver_register(&qcom_rmtfs_mem_driver); if (ret < 0) { pr_err("qcom_rmtfs_mem: failed to register rmtfs_mem driver\n"); - unregister_chrdev_region(qcom_rmtfs_mem_major, - QCOM_RMTFS_MEM_DEV_MAX); + goto unregister_chrdev; } + return 0; + +unregister_chrdev: + unregister_chrdev_region(qcom_rmtfs_mem_major, QCOM_RMTFS_MEM_DEV_MAX); +unregister_class: + class_unregister(&rmtfs_class); return ret; } module_init(qcom_rmtfs_mem_init); -static void qcom_rmtfs_mem_exit(void) +static void __exit qcom_rmtfs_mem_exit(void) { platform_driver_unregister(&qcom_rmtfs_mem_driver); unregister_chrdev_region(qcom_rmtfs_mem_major, QCOM_RMTFS_MEM_DEV_MAX); + class_unregister(&rmtfs_class); } module_exit(qcom_rmtfs_mem_exit); diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c index c7beb6841289..035091fd44b8 100644 --- a/drivers/soc/qcom/rpmh.c +++ b/drivers/soc/qcom/rpmh.c @@ -80,6 +80,7 @@ void rpmh_tx_done(const struct tcs_request *msg, int r) struct rpmh_request *rpm_msg = container_of(msg, struct rpmh_request, msg); struct completion *compl = rpm_msg->completion; + bool free = rpm_msg->needs_free; rpm_msg->err = r; @@ -94,7 +95,7 @@ void rpmh_tx_done(const struct tcs_request *msg, int r) complete(compl); exit: - if (rpm_msg->needs_free) + if (free) kfree(rpm_msg); } @@ -192,9 +193,8 @@ static int __rpmh_write(const struct device *dev, enum rpmh_state state, WARN_ON(irqs_disabled()); ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg); } else { - ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr), - &rpm_msg->msg); /* Clean up our call by spoofing tx_done */ + ret = 0; rpmh_tx_done(&rpm_msg->msg, ret); } @@ -348,11 +348,12 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state, { struct batch_cache_req *req; struct rpmh_request *rpm_msgs; - DECLARE_COMPLETION_ONSTACK(compl); + struct completion *compls; struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); unsigned long time_left; int count = 0; - int ret, i, j; + int ret, i; + void *ptr; if (!cmd || !n) return -EINVAL; @@ -362,10 +363,15 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state, if (!count) return -EINVAL; - req = kzalloc(sizeof(*req) + count * sizeof(req->rpm_msgs[0]), + ptr = kzalloc(sizeof(*req) + + count * (sizeof(req->rpm_msgs[0]) + sizeof(*compls)), GFP_ATOMIC); - if (!req) + if (!ptr) return -ENOMEM; + + req = ptr; + compls = ptr + sizeof(*req) + count * sizeof(*rpm_msgs); + req->count = count; rpm_msgs = req->rpm_msgs; @@ -380,25 +386,26 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state, } for (i = 0; i < count; i++) { - rpm_msgs[i].completion = &compl; + struct completion *compl = &compls[i]; + + init_completion(compl); + rpm_msgs[i].completion = compl; ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg); if (ret) { pr_err("Error(%d) sending RPMH message addr=%#x\n", ret, rpm_msgs[i].msg.cmds[0].addr); - for (j = i; j < count; j++) - rpmh_tx_done(&rpm_msgs[j].msg, ret); break; } } time_left = RPMH_TIMEOUT_MS; - for (i = 0; i < count; i++) { - time_left = wait_for_completion_timeout(&compl, time_left); + while (i--) { + time_left = wait_for_completion_timeout(&compls[i], time_left); if (!time_left) { /* * Better hope they never finish because they'll signal - * the completion on our stack and that's bad once - * we've returned from the function. + * the completion that we're going to free once + * we've returned from this function. */ WARN_ON(1); ret = -ETIMEDOUT; @@ -407,7 +414,7 @@ int rpmh_write_batch(const struct device *dev, enum rpmh_state state, } exit: - kfree(req); + kfree(ptr); return ret; } diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c new file mode 100644 index 000000000000..5741ec3fa814 --- /dev/null +++ b/drivers/soc/qcom/rpmhpd.c @@ -0,0 +1,406 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2018, The Linux Foundation. All rights reserved.*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define domain_to_rpmhpd(domain) container_of(domain, struct rpmhpd, pd) + +#define RPMH_ARC_MAX_LEVELS 16 + +/** + * struct rpmhpd - top level RPMh power domain resource data structure + * @dev: rpmh power domain controller device + * @pd: generic_pm_domain corrresponding to the power domain + * @peer: A peer power domain in case Active only Voting is + * supported + * @active_only: True if it represents an Active only peer + * @level: An array of level (vlvl) to corner (hlvl) mappings + * derived from cmd-db + * @level_count: Number of levels supported by the power domain. max + * being 16 (0 - 15) + * @enabled: true if the power domain is enabled + * @res_name: Resource name used for cmd-db lookup + * @addr: Resource address as looped up using resource name from + * cmd-db + */ +struct rpmhpd { + struct device *dev; + struct generic_pm_domain pd; + struct generic_pm_domain *parent; + struct rpmhpd *peer; + const bool active_only; + unsigned int corner; + unsigned int active_corner; + u32 level[RPMH_ARC_MAX_LEVELS]; + size_t level_count; + bool enabled; + const char *res_name; + u32 addr; +}; + +struct rpmhpd_desc { + struct rpmhpd **rpmhpds; + size_t num_pds; +}; + +static DEFINE_MUTEX(rpmhpd_lock); + +/* SDM845 RPMH powerdomains */ + +static struct rpmhpd sdm845_ebi = { + .pd = { .name = "ebi", }, + .res_name = "ebi.lvl", +}; + +static struct rpmhpd sdm845_lmx = { + .pd = { .name = "lmx", }, + .res_name = "lmx.lvl", +}; + +static struct rpmhpd sdm845_lcx = { + .pd = { .name = "lcx", }, + .res_name = "lcx.lvl", +}; + +static struct rpmhpd sdm845_gfx = { + .pd = { .name = "gfx", }, + .res_name = "gfx.lvl", +}; + +static struct rpmhpd sdm845_mss = { + .pd = { .name = "mss", }, + .res_name = "mss.lvl", +}; + +static struct rpmhpd sdm845_mx_ao; +static struct rpmhpd sdm845_mx = { + .pd = { .name = "mx", }, + .peer = &sdm845_mx_ao, + .res_name = "mx.lvl", +}; + +static struct rpmhpd sdm845_mx_ao = { + .pd = { .name = "mx_ao", }, + .peer = &sdm845_mx, + .res_name = "mx.lvl", +}; + +static struct rpmhpd sdm845_cx_ao; +static struct rpmhpd sdm845_cx = { + .pd = { .name = "cx", }, + .peer = &sdm845_cx_ao, + .parent = &sdm845_mx.pd, + .res_name = "cx.lvl", +}; + +static struct rpmhpd sdm845_cx_ao = { + .pd = { .name = "cx_ao", }, + .peer = &sdm845_cx, + .parent = &sdm845_mx_ao.pd, + .res_name = "cx.lvl", +}; + +static struct rpmhpd *sdm845_rpmhpds[] = { + [SDM845_EBI] = &sdm845_ebi, + [SDM845_MX] = &sdm845_mx, + [SDM845_MX_AO] = &sdm845_mx_ao, + [SDM845_CX] = &sdm845_cx, + [SDM845_CX_AO] = &sdm845_cx_ao, + [SDM845_LMX] = &sdm845_lmx, + [SDM845_LCX] = &sdm845_lcx, + [SDM845_GFX] = &sdm845_gfx, + [SDM845_MSS] = &sdm845_mss, +}; + +static const struct rpmhpd_desc sdm845_desc = { + .rpmhpds = sdm845_rpmhpds, + .num_pds = ARRAY_SIZE(sdm845_rpmhpds), +}; + +static const struct of_device_id rpmhpd_match_table[] = { + { .compatible = "qcom,sdm845-rpmhpd", .data = &sdm845_desc }, + { } +}; + +static int rpmhpd_send_corner(struct rpmhpd *pd, int state, + unsigned int corner, bool sync) +{ + struct tcs_cmd cmd = { + .addr = pd->addr, + .data = corner, + }; + + /* + * Wait for an ack only when we are increasing the + * perf state of the power domain + */ + if (sync) + return rpmh_write(pd->dev, state, &cmd, 1); + else + return rpmh_write_async(pd->dev, state, &cmd, 1); +} + +static void to_active_sleep(struct rpmhpd *pd, unsigned int corner, + unsigned int *active, unsigned int *sleep) +{ + *active = corner; + + if (pd->active_only) + *sleep = 0; + else + *sleep = *active; +} + +/* + * This function is used to aggregate the votes across the active only + * resources and its peers. The aggregated votes are sent to RPMh as + * ACTIVE_ONLY votes (which take effect immediately), as WAKE_ONLY votes + * (applied by RPMh on system wakeup) and as SLEEP votes (applied by RPMh + * on system sleep). + * We send ACTIVE_ONLY votes for resources without any peers. For others, + * which have an active only peer, all 3 votes are sent. + */ +static int rpmhpd_aggregate_corner(struct rpmhpd *pd, unsigned int corner) +{ + int ret; + struct rpmhpd *peer = pd->peer; + unsigned int active_corner, sleep_corner; + unsigned int this_active_corner = 0, this_sleep_corner = 0; + unsigned int peer_active_corner = 0, peer_sleep_corner = 0; + + to_active_sleep(pd, corner, &this_active_corner, &this_sleep_corner); + + if (peer && peer->enabled) + to_active_sleep(peer, peer->corner, &peer_active_corner, + &peer_sleep_corner); + + active_corner = max(this_active_corner, peer_active_corner); + + ret = rpmhpd_send_corner(pd, RPMH_ACTIVE_ONLY_STATE, active_corner, + active_corner > pd->active_corner); + if (ret) + return ret; + + pd->active_corner = active_corner; + + if (peer) { + peer->active_corner = active_corner; + + ret = rpmhpd_send_corner(pd, RPMH_WAKE_ONLY_STATE, + active_corner, false); + if (ret) + return ret; + + sleep_corner = max(this_sleep_corner, peer_sleep_corner); + + return rpmhpd_send_corner(pd, RPMH_SLEEP_STATE, sleep_corner, + false); + } + + return ret; +} + +static int rpmhpd_power_on(struct generic_pm_domain *domain) +{ + struct rpmhpd *pd = domain_to_rpmhpd(domain); + int ret = 0; + + mutex_lock(&rpmhpd_lock); + + if (pd->corner) + ret = rpmhpd_aggregate_corner(pd, pd->corner); + + if (!ret) + pd->enabled = true; + + mutex_unlock(&rpmhpd_lock); + + return ret; +} + +static int rpmhpd_power_off(struct generic_pm_domain *domain) +{ + struct rpmhpd *pd = domain_to_rpmhpd(domain); + int ret = 0; + + mutex_lock(&rpmhpd_lock); + + ret = rpmhpd_aggregate_corner(pd, pd->level[0]); + + if (!ret) + pd->enabled = false; + + mutex_unlock(&rpmhpd_lock); + + return ret; +} + +static int rpmhpd_set_performance_state(struct generic_pm_domain *domain, + unsigned int level) +{ + struct rpmhpd *pd = domain_to_rpmhpd(domain); + int ret = 0, i; + + mutex_lock(&rpmhpd_lock); + + for (i = 0; i < pd->level_count; i++) + if (level <= pd->level[i]) + break; + + /* + * If the level requested is more than that supported by the + * max corner, just set it to max anyway. + */ + if (i == pd->level_count) + i--; + + if (pd->enabled) { + ret = rpmhpd_aggregate_corner(pd, i); + if (ret) + goto out; + } + + pd->corner = i; +out: + mutex_unlock(&rpmhpd_lock); + + return ret; +} + +static unsigned int rpmhpd_get_performance_state(struct generic_pm_domain *genpd, + struct dev_pm_opp *opp) +{ + return dev_pm_opp_get_level(opp); +} + +static int rpmhpd_update_level_mapping(struct rpmhpd *rpmhpd) +{ + int i; + const u16 *buf; + + buf = cmd_db_read_aux_data(rpmhpd->res_name, &rpmhpd->level_count); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + /* 2 bytes used for each command DB aux data entry */ + rpmhpd->level_count >>= 1; + + if (rpmhpd->level_count > RPMH_ARC_MAX_LEVELS) + return -EINVAL; + + for (i = 0; i < rpmhpd->level_count; i++) { + rpmhpd->level[i] = buf[i]; + + /* + * The AUX data may be zero padded. These 0 valued entries at + * the end of the map must be ignored. + */ + if (i > 0 && rpmhpd->level[i] == 0) { + rpmhpd->level_count = i; + break; + } + pr_debug("%s: ARC hlvl=%2d --> vlvl=%4u\n", rpmhpd->res_name, i, + rpmhpd->level[i]); + } + + return 0; +} + +static int rpmhpd_probe(struct platform_device *pdev) +{ + int i, ret; + size_t num_pds; + struct device *dev = &pdev->dev; + struct genpd_onecell_data *data; + struct rpmhpd **rpmhpds; + const struct rpmhpd_desc *desc; + + desc = of_device_get_match_data(dev); + if (!desc) + return -EINVAL; + + rpmhpds = desc->rpmhpds; + num_pds = desc->num_pds; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->domains = devm_kcalloc(dev, num_pds, sizeof(*data->domains), + GFP_KERNEL); + if (!data->domains) + return -ENOMEM; + + data->num_domains = num_pds; + + for (i = 0; i < num_pds; i++) { + if (!rpmhpds[i]) { + dev_warn(dev, "rpmhpds[%d] is empty\n", i); + continue; + } + + rpmhpds[i]->dev = dev; + rpmhpds[i]->addr = cmd_db_read_addr(rpmhpds[i]->res_name); + if (!rpmhpds[i]->addr) { + dev_err(dev, "Could not find RPMh address for resource %s\n", + rpmhpds[i]->res_name); + return -ENODEV; + } + + ret = cmd_db_read_slave_id(rpmhpds[i]->res_name); + if (ret != CMD_DB_HW_ARC) { + dev_err(dev, "RPMh slave ID mismatch\n"); + return -EINVAL; + } + + ret = rpmhpd_update_level_mapping(rpmhpds[i]); + if (ret) + return ret; + + rpmhpds[i]->pd.power_off = rpmhpd_power_off; + rpmhpds[i]->pd.power_on = rpmhpd_power_on; + rpmhpds[i]->pd.set_performance_state = rpmhpd_set_performance_state; + rpmhpds[i]->pd.opp_to_performance_state = rpmhpd_get_performance_state; + pm_genpd_init(&rpmhpds[i]->pd, NULL, true); + + data->domains[i] = &rpmhpds[i]->pd; + } + + /* Add subdomains */ + for (i = 0; i < num_pds; i++) { + if (!rpmhpds[i]) + continue; + if (rpmhpds[i]->parent) + pm_genpd_add_subdomain(rpmhpds[i]->parent, + &rpmhpds[i]->pd); + } + + return of_genpd_add_provider_onecell(pdev->dev.of_node, data); +} + +static struct platform_driver rpmhpd_driver = { + .driver = { + .name = "qcom-rpmhpd", + .of_match_table = rpmhpd_match_table, + .suppress_bind_attrs = true, + }, + .probe = rpmhpd_probe, +}; + +static int __init rpmhpd_init(void) +{ + return platform_driver_register(&rpmhpd_driver); +} +core_initcall(rpmhpd_init); diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c new file mode 100644 index 000000000000..005326050c23 --- /dev/null +++ b/drivers/soc/qcom/rpmpd.c @@ -0,0 +1,315 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define domain_to_rpmpd(domain) container_of(domain, struct rpmpd, pd) + +/* Resource types */ +#define RPMPD_SMPA 0x61706d73 +#define RPMPD_LDOA 0x616f646c + +/* Operation Keys */ +#define KEY_CORNER 0x6e726f63 /* corn */ +#define KEY_ENABLE 0x6e657773 /* swen */ +#define KEY_FLOOR_CORNER 0x636676 /* vfc */ + +#define MAX_RPMPD_STATE 6 + +#define DEFINE_RPMPD_CORNER_SMPA(_platform, _name, _active, r_id) \ + static struct rpmpd _platform##_##_active; \ + static struct rpmpd _platform##_##_name = { \ + .pd = { .name = #_name, }, \ + .peer = &_platform##_##_active, \ + .res_type = RPMPD_SMPA, \ + .res_id = r_id, \ + .key = KEY_CORNER, \ + }; \ + static struct rpmpd _platform##_##_active = { \ + .pd = { .name = #_active, }, \ + .peer = &_platform##_##_name, \ + .active_only = true, \ + .res_type = RPMPD_SMPA, \ + .res_id = r_id, \ + .key = KEY_CORNER, \ + } + +#define DEFINE_RPMPD_CORNER_LDOA(_platform, _name, r_id) \ + static struct rpmpd _platform##_##_name = { \ + .pd = { .name = #_name, }, \ + .res_type = RPMPD_LDOA, \ + .res_id = r_id, \ + .key = KEY_CORNER, \ + } + +#define DEFINE_RPMPD_VFC(_platform, _name, r_id, r_type) \ + static struct rpmpd _platform##_##_name = { \ + .pd = { .name = #_name, }, \ + .res_type = r_type, \ + .res_id = r_id, \ + .key = KEY_FLOOR_CORNER, \ + } + +#define DEFINE_RPMPD_VFC_SMPA(_platform, _name, r_id) \ + DEFINE_RPMPD_VFC(_platform, _name, r_id, RPMPD_SMPA) + +#define DEFINE_RPMPD_VFC_LDOA(_platform, _name, r_id) \ + DEFINE_RPMPD_VFC(_platform, _name, r_id, RPMPD_LDOA) + +struct rpmpd_req { + __le32 key; + __le32 nbytes; + __le32 value; +}; + +struct rpmpd { + struct generic_pm_domain pd; + struct rpmpd *peer; + const bool active_only; + unsigned int corner; + bool enabled; + const char *res_name; + const int res_type; + const int res_id; + struct qcom_smd_rpm *rpm; + __le32 key; +}; + +struct rpmpd_desc { + struct rpmpd **rpmpds; + size_t num_pds; +}; + +static DEFINE_MUTEX(rpmpd_lock); + +/* msm8996 RPM Power domains */ +DEFINE_RPMPD_CORNER_SMPA(msm8996, vddcx, vddcx_ao, 1); +DEFINE_RPMPD_CORNER_SMPA(msm8996, vddmx, vddmx_ao, 2); +DEFINE_RPMPD_CORNER_LDOA(msm8996, vddsscx, 26); + +DEFINE_RPMPD_VFC_SMPA(msm8996, vddcx_vfc, 1); +DEFINE_RPMPD_VFC_LDOA(msm8996, vddsscx_vfc, 26); + +static struct rpmpd *msm8996_rpmpds[] = { + [MSM8996_VDDCX] = &msm8996_vddcx, + [MSM8996_VDDCX_AO] = &msm8996_vddcx_ao, + [MSM8996_VDDCX_VFC] = &msm8996_vddcx_vfc, + [MSM8996_VDDMX] = &msm8996_vddmx, + [MSM8996_VDDMX_AO] = &msm8996_vddmx_ao, + [MSM8996_VDDSSCX] = &msm8996_vddsscx, + [MSM8996_VDDSSCX_VFC] = &msm8996_vddsscx_vfc, +}; + +static const struct rpmpd_desc msm8996_desc = { + .rpmpds = msm8996_rpmpds, + .num_pds = ARRAY_SIZE(msm8996_rpmpds), +}; + +static const struct of_device_id rpmpd_match_table[] = { + { .compatible = "qcom,msm8996-rpmpd", .data = &msm8996_desc }, + { } +}; + +static int rpmpd_send_enable(struct rpmpd *pd, bool enable) +{ + struct rpmpd_req req = { + .key = KEY_ENABLE, + .nbytes = cpu_to_le32(sizeof(u32)), + .value = cpu_to_le32(enable), + }; + + return qcom_rpm_smd_write(pd->rpm, QCOM_SMD_RPM_ACTIVE_STATE, + pd->res_type, pd->res_id, &req, sizeof(req)); +} + +static int rpmpd_send_corner(struct rpmpd *pd, int state, unsigned int corner) +{ + struct rpmpd_req req = { + .key = pd->key, + .nbytes = cpu_to_le32(sizeof(u32)), + .value = cpu_to_le32(corner), + }; + + return qcom_rpm_smd_write(pd->rpm, state, pd->res_type, pd->res_id, + &req, sizeof(req)); +}; + +static void to_active_sleep(struct rpmpd *pd, unsigned int corner, + unsigned int *active, unsigned int *sleep) +{ + *active = corner; + + if (pd->active_only) + *sleep = 0; + else + *sleep = *active; +} + +static int rpmpd_aggregate_corner(struct rpmpd *pd) +{ + int ret; + struct rpmpd *peer = pd->peer; + unsigned int active_corner, sleep_corner; + unsigned int this_active_corner = 0, this_sleep_corner = 0; + unsigned int peer_active_corner = 0, peer_sleep_corner = 0; + + to_active_sleep(pd, pd->corner, &this_active_corner, &this_sleep_corner); + + if (peer && peer->enabled) + to_active_sleep(peer, peer->corner, &peer_active_corner, + &peer_sleep_corner); + + active_corner = max(this_active_corner, peer_active_corner); + + ret = rpmpd_send_corner(pd, QCOM_SMD_RPM_ACTIVE_STATE, active_corner); + if (ret) + return ret; + + sleep_corner = max(this_sleep_corner, peer_sleep_corner); + + return rpmpd_send_corner(pd, QCOM_SMD_RPM_SLEEP_STATE, sleep_corner); +} + +static int rpmpd_power_on(struct generic_pm_domain *domain) +{ + int ret; + struct rpmpd *pd = domain_to_rpmpd(domain); + + mutex_lock(&rpmpd_lock); + + ret = rpmpd_send_enable(pd, true); + if (ret) + goto out; + + pd->enabled = true; + + if (pd->corner) + ret = rpmpd_aggregate_corner(pd); + +out: + mutex_unlock(&rpmpd_lock); + + return ret; +} + +static int rpmpd_power_off(struct generic_pm_domain *domain) +{ + int ret; + struct rpmpd *pd = domain_to_rpmpd(domain); + + mutex_lock(&rpmpd_lock); + + ret = rpmpd_send_enable(pd, false); + if (!ret) + pd->enabled = false; + + mutex_unlock(&rpmpd_lock); + + return ret; +} + +static int rpmpd_set_performance(struct generic_pm_domain *domain, + unsigned int state) +{ + int ret = 0; + struct rpmpd *pd = domain_to_rpmpd(domain); + + if (state > MAX_RPMPD_STATE) + goto out; + + mutex_lock(&rpmpd_lock); + + pd->corner = state; + + if (!pd->enabled && pd->key != KEY_FLOOR_CORNER) + goto out; + + ret = rpmpd_aggregate_corner(pd); + +out: + mutex_unlock(&rpmpd_lock); + + return ret; +} + +static unsigned int rpmpd_get_performance(struct generic_pm_domain *genpd, + struct dev_pm_opp *opp) +{ + return dev_pm_opp_get_level(opp); +} + +static int rpmpd_probe(struct platform_device *pdev) +{ + int i; + size_t num; + struct genpd_onecell_data *data; + struct qcom_smd_rpm *rpm; + struct rpmpd **rpmpds; + const struct rpmpd_desc *desc; + + rpm = dev_get_drvdata(pdev->dev.parent); + if (!rpm) { + dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n"); + return -ENODEV; + } + + desc = of_device_get_match_data(&pdev->dev); + if (!desc) + return -EINVAL; + + rpmpds = desc->rpmpds; + num = desc->num_pds; + + data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->domains = devm_kcalloc(&pdev->dev, num, sizeof(*data->domains), + GFP_KERNEL); + data->num_domains = num; + + for (i = 0; i < num; i++) { + if (!rpmpds[i]) { + dev_warn(&pdev->dev, "rpmpds[] with empty entry at index=%d\n", + i); + continue; + } + + rpmpds[i]->rpm = rpm; + rpmpds[i]->pd.power_off = rpmpd_power_off; + rpmpds[i]->pd.power_on = rpmpd_power_on; + rpmpds[i]->pd.set_performance_state = rpmpd_set_performance; + rpmpds[i]->pd.opp_to_performance_state = rpmpd_get_performance; + pm_genpd_init(&rpmpds[i]->pd, NULL, true); + + data->domains[i] = &rpmpds[i]->pd; + } + + return of_genpd_add_provider_onecell(pdev->dev.of_node, data); +} + +static struct platform_driver rpmpd_driver = { + .driver = { + .name = "qcom-rpmpd", + .of_match_table = rpmpd_match_table, + .suppress_bind_attrs = true, + }, + .probe = rpmpd_probe, +}; + +static int __init rpmpd_init(void) +{ + return platform_driver_register(&rpmpd_driver); +} +core_initcall(rpmpd_init); diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c index b8e63724a49d..9956bb2c63f2 100644 --- a/drivers/soc/qcom/smd-rpm.c +++ b/drivers/soc/qcom/smd-rpm.c @@ -227,6 +227,7 @@ static const struct of_device_id qcom_smd_rpm_of_match[] = { { .compatible = "qcom,rpm-msm8974" }, { .compatible = "qcom,rpm-msm8996" }, { .compatible = "qcom,rpm-msm8998" }, + { .compatible = "qcom,rpm-sdm660" }, { .compatible = "qcom,rpm-qcs404" }, {} }; diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig index fe4481676da6..a0b03443d8c1 100644 --- a/drivers/soc/tegra/Kconfig +++ b/drivers/soc/tegra/Kconfig @@ -76,6 +76,7 @@ config ARCH_TEGRA_210_SOC select PINCTRL_TEGRA210 select SOC_TEGRA_FLOWCTRL select SOC_TEGRA_PMC + select TEGRA_TIMER help Enable support for the NVIDIA Tegra210 SoC. Also known as Tegra X1, the Tegra210 has four Cortex-A57 cores paired with four Cortex-A53 diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c index a33ee8ef8b6b..51625703399e 100644 --- a/drivers/soc/tegra/fuse/fuse-tegra.c +++ b/drivers/soc/tegra/fuse/fuse-tegra.c @@ -137,13 +137,17 @@ static int tegra_fuse_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); fuse->phys = res->start; fuse->base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(fuse->base)) - return PTR_ERR(fuse->base); + if (IS_ERR(fuse->base)) { + err = PTR_ERR(fuse->base); + fuse->base = base; + return err; + } fuse->clk = devm_clk_get(&pdev->dev, "fuse"); if (IS_ERR(fuse->clk)) { dev_err(&pdev->dev, "failed to get FUSE clock: %ld", PTR_ERR(fuse->clk)); + fuse->base = base; return PTR_ERR(fuse->clk); } @@ -152,8 +156,10 @@ static int tegra_fuse_probe(struct platform_device *pdev) if (fuse->soc->probe) { err = fuse->soc->probe(fuse); - if (err < 0) + if (err < 0) { + fuse->base = base; return err; + } } if (tegra_fuse_create_sysfs(&pdev->dev, fuse->soc->info->size, diff --git a/drivers/soc/tegra/fuse/speedo-tegra210.c b/drivers/soc/tegra/fuse/speedo-tegra210.c index 5373f4c16b54..8ed35d9851f8 100644 --- a/drivers/soc/tegra/fuse/speedo-tegra210.c +++ b/drivers/soc/tegra/fuse/speedo-tegra210.c @@ -131,7 +131,7 @@ void __init tegra210_init_speedo_data(struct tegra_sku_info *sku_info) soc_speedo[0] = tegra_fuse_read_early(FUSE_SOC_SPEEDO_0); soc_speedo[1] = tegra_fuse_read_early(FUSE_SOC_SPEEDO_1); - soc_speedo[2] = tegra_fuse_read_early(FUSE_CPU_SPEEDO_2); + soc_speedo[2] = tegra_fuse_read_early(FUSE_SOC_SPEEDO_2); cpu_iddq = tegra_fuse_read_early(FUSE_CPU_IDDQ) * 4; soc_iddq = tegra_fuse_read_early(FUSE_SOC_IDDQ) * 4; diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index 7ea3280279ff..0df258518693 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -20,7 +20,7 @@ #define pr_fmt(fmt) "tegra-pmc: " fmt -#include +#include #include #include #include @@ -30,16 +30,17 @@ #include #include #include -#include #include -#include +#include +#include #include #include +#include #include #include -#include -#include #include +#include +#include #include #include #include @@ -145,6 +146,11 @@ #define WAKE_AOWAKE_CTRL 0x4f4 #define WAKE_AOWAKE_CTRL_INTR_POLARITY BIT(0) +/* for secure PMC */ +#define TEGRA_SMC_PMC 0xc2fffe00 +#define TEGRA_SMC_PMC_READ 0xaa +#define TEGRA_SMC_PMC_WRITE 0xbb + struct tegra_powergate { struct generic_pm_domain genpd; struct tegra_pmc *pmc; @@ -216,6 +222,7 @@ struct tegra_pmc_soc { bool has_gpu_clamps; bool needs_mbist_war; bool has_impl_33v_pwr; + bool maybe_tz_only; const struct tegra_io_pad_soc *io_pads; unsigned int num_io_pads; @@ -273,8 +280,12 @@ static const char * const tegra30_reset_sources[] = { * struct tegra_pmc - NVIDIA Tegra PMC * @dev: pointer to PMC device structure * @base: pointer to I/O remapped register region + * @wake: pointer to I/O remapped region for WAKE registers + * @aotag: pointer to I/O remapped region for AOTAG registers + * @scratch: pointer to I/O remapped region for scratch registers * @clk: pointer to pclk clock * @soc: pointer to SoC data structure + * @tz_only: flag specifying if the PMC can only be accessed via TrustZone * @debugfs: pointer to debugfs entry * @rate: currently configured rate of pclk * @suspend_mode: lowest suspend mode available @@ -291,6 +302,9 @@ static const char * const tegra30_reset_sources[] = { * @lp0_vec_size: size of the LP0 warm boot code * @powergates_available: Bitmap of available power gates * @powergates_lock: mutex for power gate register access + * @pctl_dev: pin controller exposed by the PMC + * @domain: IRQ domain provided by the PMC + * @irq: chip implementation for the IRQ domain */ struct tegra_pmc { struct device *dev; @@ -302,6 +316,7 @@ struct tegra_pmc { struct dentry *debugfs; const struct tegra_pmc_soc *soc; + bool tz_only; unsigned long rate; @@ -338,30 +353,85 @@ to_powergate(struct generic_pm_domain *domain) return container_of(domain, struct tegra_powergate, genpd); } -static u32 tegra_pmc_readl(unsigned long offset) +static u32 tegra_pmc_readl(struct tegra_pmc *pmc, unsigned long offset) { + struct arm_smccc_res res; + + if (pmc->tz_only) { + arm_smccc_smc(TEGRA_SMC_PMC, TEGRA_SMC_PMC_READ, offset, 0, 0, + 0, 0, 0, &res); + if (res.a0) { + if (pmc->dev) + dev_warn(pmc->dev, "%s(): SMC failed: %lu\n", + __func__, res.a0); + else + pr_warn("%s(): SMC failed: %lu\n", __func__, + res.a0); + } + + return res.a1; + } + return readl(pmc->base + offset); } -static void tegra_pmc_writel(u32 value, unsigned long offset) +static void tegra_pmc_writel(struct tegra_pmc *pmc, u32 value, + unsigned long offset) { - writel(value, pmc->base + offset); + struct arm_smccc_res res; + + if (pmc->tz_only) { + arm_smccc_smc(TEGRA_SMC_PMC, TEGRA_SMC_PMC_WRITE, offset, + value, 0, 0, 0, 0, &res); + if (res.a0) { + if (pmc->dev) + dev_warn(pmc->dev, "%s(): SMC failed: %lu\n", + __func__, res.a0); + else + pr_warn("%s(): SMC failed: %lu\n", __func__, + res.a0); + } + } else { + writel(value, pmc->base + offset); + } } +static u32 tegra_pmc_scratch_readl(struct tegra_pmc *pmc, unsigned long offset) +{ + if (pmc->tz_only) + return tegra_pmc_readl(pmc, offset); + + return readl(pmc->scratch + offset); +} + +static void tegra_pmc_scratch_writel(struct tegra_pmc *pmc, u32 value, + unsigned long offset) +{ + if (pmc->tz_only) + tegra_pmc_writel(pmc, value, offset); + else + writel(value, pmc->scratch + offset); +} + +/* + * TODO Figure out a way to call this with the struct tegra_pmc * passed in. + * This currently doesn't work because readx_poll_timeout() can only operate + * on functions that take a single argument. + */ static inline bool tegra_powergate_state(int id) { if (id == TEGRA_POWERGATE_3D && pmc->soc->has_gpu_clamps) - return (tegra_pmc_readl(GPU_RG_CNTRL) & 0x1) == 0; + return (tegra_pmc_readl(pmc, GPU_RG_CNTRL) & 0x1) == 0; else - return (tegra_pmc_readl(PWRGATE_STATUS) & BIT(id)) != 0; + return (tegra_pmc_readl(pmc, PWRGATE_STATUS) & BIT(id)) != 0; } -static inline bool tegra_powergate_is_valid(int id) +static inline bool tegra_powergate_is_valid(struct tegra_pmc *pmc, int id) { return (pmc->soc && pmc->soc->powergates[id]); } -static inline bool tegra_powergate_is_available(int id) +static inline bool tegra_powergate_is_available(struct tegra_pmc *pmc, int id) { return test_bit(id, pmc->powergates_available); } @@ -374,7 +444,7 @@ static int tegra_powergate_lookup(struct tegra_pmc *pmc, const char *name) return -EINVAL; for (i = 0; i < pmc->soc->num_powergates; i++) { - if (!tegra_powergate_is_valid(i)) + if (!tegra_powergate_is_valid(pmc, i)) continue; if (!strcmp(name, pmc->soc->powergates[i])) @@ -386,10 +456,12 @@ static int tegra_powergate_lookup(struct tegra_pmc *pmc, const char *name) /** * tegra_powergate_set() - set the state of a partition + * @pmc: power management controller * @id: partition ID * @new_state: new state of the partition */ -static int tegra_powergate_set(unsigned int id, bool new_state) +static int tegra_powergate_set(struct tegra_pmc *pmc, unsigned int id, + bool new_state) { bool status; int err; @@ -404,7 +476,7 @@ static int tegra_powergate_set(unsigned int id, bool new_state) return 0; } - tegra_pmc_writel(PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE); + tegra_pmc_writel(pmc, PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE); err = readx_poll_timeout(tegra_powergate_state, id, status, status == new_state, 10, 100000); @@ -414,7 +486,8 @@ static int tegra_powergate_set(unsigned int id, bool new_state) return err; } -static int __tegra_powergate_remove_clamping(unsigned int id) +static int __tegra_powergate_remove_clamping(struct tegra_pmc *pmc, + unsigned int id) { u32 mask; @@ -426,7 +499,7 @@ static int __tegra_powergate_remove_clamping(unsigned int id) */ if (id == TEGRA_POWERGATE_3D) { if (pmc->soc->has_gpu_clamps) { - tegra_pmc_writel(0, GPU_RG_CNTRL); + tegra_pmc_writel(pmc, 0, GPU_RG_CNTRL); goto out; } } @@ -442,7 +515,7 @@ static int __tegra_powergate_remove_clamping(unsigned int id) else mask = (1 << id); - tegra_pmc_writel(mask, REMOVE_CLAMPING); + tegra_pmc_writel(pmc, mask, REMOVE_CLAMPING); out: mutex_unlock(&pmc->powergates_lock); @@ -494,7 +567,7 @@ static int tegra_powergate_power_up(struct tegra_powergate *pg, usleep_range(10, 20); - err = tegra_powergate_set(pg->id, true); + err = tegra_powergate_set(pg->pmc, pg->id, true); if (err < 0) return err; @@ -506,7 +579,7 @@ static int tegra_powergate_power_up(struct tegra_powergate *pg, usleep_range(10, 20); - err = __tegra_powergate_remove_clamping(pg->id); + err = __tegra_powergate_remove_clamping(pg->pmc, pg->id); if (err) goto disable_clks; @@ -533,7 +606,7 @@ disable_clks: usleep_range(10, 20); powergate_off: - tegra_powergate_set(pg->id, false); + tegra_powergate_set(pg->pmc, pg->id, false); return err; } @@ -558,7 +631,7 @@ static int tegra_powergate_power_down(struct tegra_powergate *pg) usleep_range(10, 20); - err = tegra_powergate_set(pg->id, false); + err = tegra_powergate_set(pg->pmc, pg->id, false); if (err) goto assert_resets; @@ -579,12 +652,13 @@ disable_clks: static int tegra_genpd_power_on(struct generic_pm_domain *domain) { struct tegra_powergate *pg = to_powergate(domain); + struct device *dev = pg->pmc->dev; int err; err = tegra_powergate_power_up(pg, true); if (err) - pr_err("failed to turn on PM domain %s: %d\n", pg->genpd.name, - err); + dev_err(dev, "failed to turn on PM domain %s: %d\n", + pg->genpd.name, err); return err; } @@ -592,12 +666,13 @@ static int tegra_genpd_power_on(struct generic_pm_domain *domain) static int tegra_genpd_power_off(struct generic_pm_domain *domain) { struct tegra_powergate *pg = to_powergate(domain); + struct device *dev = pg->pmc->dev; int err; err = tegra_powergate_power_down(pg); if (err) - pr_err("failed to turn off PM domain %s: %d\n", - pg->genpd.name, err); + dev_err(dev, "failed to turn off PM domain %s: %d\n", + pg->genpd.name, err); return err; } @@ -608,10 +683,10 @@ static int tegra_genpd_power_off(struct generic_pm_domain *domain) */ int tegra_powergate_power_on(unsigned int id) { - if (!tegra_powergate_is_available(id)) + if (!tegra_powergate_is_available(pmc, id)) return -EINVAL; - return tegra_powergate_set(id, true); + return tegra_powergate_set(pmc, id, true); } /** @@ -620,20 +695,21 @@ int tegra_powergate_power_on(unsigned int id) */ int tegra_powergate_power_off(unsigned int id) { - if (!tegra_powergate_is_available(id)) + if (!tegra_powergate_is_available(pmc, id)) return -EINVAL; - return tegra_powergate_set(id, false); + return tegra_powergate_set(pmc, id, false); } EXPORT_SYMBOL(tegra_powergate_power_off); /** * tegra_powergate_is_powered() - check if partition is powered + * @pmc: power management controller * @id: partition ID */ -int tegra_powergate_is_powered(unsigned int id) +static int tegra_powergate_is_powered(struct tegra_pmc *pmc, unsigned int id) { - if (!tegra_powergate_is_valid(id)) + if (!tegra_powergate_is_valid(pmc, id)) return -EINVAL; return tegra_powergate_state(id); @@ -645,10 +721,10 @@ int tegra_powergate_is_powered(unsigned int id) */ int tegra_powergate_remove_clamping(unsigned int id) { - if (!tegra_powergate_is_available(id)) + if (!tegra_powergate_is_available(pmc, id)) return -EINVAL; - return __tegra_powergate_remove_clamping(id); + return __tegra_powergate_remove_clamping(pmc, id); } EXPORT_SYMBOL(tegra_powergate_remove_clamping); @@ -666,7 +742,7 @@ int tegra_powergate_sequence_power_up(unsigned int id, struct clk *clk, struct tegra_powergate *pg; int err; - if (!tegra_powergate_is_available(id)) + if (!tegra_powergate_is_available(pmc, id)) return -EINVAL; pg = kzalloc(sizeof(*pg), GFP_KERNEL); @@ -681,7 +757,8 @@ int tegra_powergate_sequence_power_up(unsigned int id, struct clk *clk, err = tegra_powergate_power_up(pg, false); if (err) - pr_err("failed to turn on partition %d: %d\n", id, err); + dev_err(pmc->dev, "failed to turn on partition %d: %d\n", id, + err); kfree(pg); @@ -691,12 +768,14 @@ EXPORT_SYMBOL(tegra_powergate_sequence_power_up); /** * tegra_get_cpu_powergate_id() - convert from CPU ID to partition ID + * @pmc: power management controller * @cpuid: CPU partition ID * * Returns the partition ID corresponding to the CPU partition ID or a * negative error code on failure. */ -static int tegra_get_cpu_powergate_id(unsigned int cpuid) +static int tegra_get_cpu_powergate_id(struct tegra_pmc *pmc, + unsigned int cpuid) { if (pmc->soc && cpuid < pmc->soc->num_cpu_powergates) return pmc->soc->cpu_powergates[cpuid]; @@ -712,11 +791,11 @@ bool tegra_pmc_cpu_is_powered(unsigned int cpuid) { int id; - id = tegra_get_cpu_powergate_id(cpuid); + id = tegra_get_cpu_powergate_id(pmc, cpuid); if (id < 0) return false; - return tegra_powergate_is_powered(id); + return tegra_powergate_is_powered(pmc, id); } /** @@ -727,11 +806,11 @@ int tegra_pmc_cpu_power_on(unsigned int cpuid) { int id; - id = tegra_get_cpu_powergate_id(cpuid); + id = tegra_get_cpu_powergate_id(pmc, cpuid); if (id < 0) return id; - return tegra_powergate_set(id, true); + return tegra_powergate_set(pmc, id, true); } /** @@ -742,7 +821,7 @@ int tegra_pmc_cpu_remove_clamping(unsigned int cpuid) { int id; - id = tegra_get_cpu_powergate_id(cpuid); + id = tegra_get_cpu_powergate_id(pmc, cpuid); if (id < 0) return id; @@ -755,7 +834,7 @@ static int tegra_pmc_restart_notify(struct notifier_block *this, const char *cmd = data; u32 value; - value = readl(pmc->scratch + pmc->soc->regs->scratch0); + value = tegra_pmc_scratch_readl(pmc, pmc->soc->regs->scratch0); value &= ~PMC_SCRATCH0_MODE_MASK; if (cmd) { @@ -769,12 +848,12 @@ static int tegra_pmc_restart_notify(struct notifier_block *this, value |= PMC_SCRATCH0_MODE_RCM; } - writel(value, pmc->scratch + pmc->soc->regs->scratch0); + tegra_pmc_scratch_writel(pmc, value, pmc->soc->regs->scratch0); /* reset everything but PMC_SCRATCH0 and PMC_RST_STATUS */ - value = tegra_pmc_readl(PMC_CNTRL); + value = tegra_pmc_readl(pmc, PMC_CNTRL); value |= PMC_CNTRL_MAIN_RST; - tegra_pmc_writel(value, PMC_CNTRL); + tegra_pmc_writel(pmc, value, PMC_CNTRL); return NOTIFY_DONE; } @@ -793,7 +872,7 @@ static int powergate_show(struct seq_file *s, void *data) seq_printf(s, "------------------\n"); for (i = 0; i < pmc->soc->num_powergates; i++) { - status = tegra_powergate_is_powered(i); + status = tegra_powergate_is_powered(pmc, i); if (status < 0) continue; @@ -855,12 +934,13 @@ err: static int tegra_powergate_of_get_resets(struct tegra_powergate *pg, struct device_node *np, bool off) { + struct device *dev = pg->pmc->dev; int err; pg->reset = of_reset_control_array_get_exclusive(np); if (IS_ERR(pg->reset)) { err = PTR_ERR(pg->reset); - pr_err("failed to get device resets: %d\n", err); + dev_err(dev, "failed to get device resets: %d\n", err); return err; } @@ -877,6 +957,7 @@ static int tegra_powergate_of_get_resets(struct tegra_powergate *pg, static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np) { + struct device *dev = pmc->dev; struct tegra_powergate *pg; int id, err; bool off; @@ -887,7 +968,7 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np) id = tegra_powergate_lookup(pmc, np->name); if (id < 0) { - pr_err("powergate lookup failed for %pOFn: %d\n", np, id); + dev_err(dev, "powergate lookup failed for %pOFn: %d\n", np, id); goto free_mem; } @@ -903,17 +984,17 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np) pg->genpd.power_on = tegra_genpd_power_on; pg->pmc = pmc; - off = !tegra_powergate_is_powered(pg->id); + off = !tegra_powergate_is_powered(pmc, pg->id); err = tegra_powergate_of_get_clks(pg, np); if (err < 0) { - pr_err("failed to get clocks for %pOFn: %d\n", np, err); + dev_err(dev, "failed to get clocks for %pOFn: %d\n", np, err); goto set_available; } err = tegra_powergate_of_get_resets(pg, np, off); if (err < 0) { - pr_err("failed to get resets for %pOFn: %d\n", np, err); + dev_err(dev, "failed to get resets for %pOFn: %d\n", np, err); goto remove_clks; } @@ -926,19 +1007,19 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np) err = pm_genpd_init(&pg->genpd, NULL, off); if (err < 0) { - pr_err("failed to initialise PM domain %pOFn: %d\n", np, + dev_err(dev, "failed to initialise PM domain %pOFn: %d\n", np, err); goto remove_resets; } err = of_genpd_add_provider_simple(np, &pg->genpd); if (err < 0) { - pr_err("failed to add PM domain provider for %pOFn: %d\n", - np, err); + dev_err(dev, "failed to add PM domain provider for %pOFn: %d\n", + np, err); goto remove_genpd; } - pr_debug("added PM domain %s\n", pg->genpd.name); + dev_dbg(dev, "added PM domain %s\n", pg->genpd.name); return; @@ -994,7 +1075,8 @@ tegra_io_pad_find(struct tegra_pmc *pmc, enum tegra_io_pad id) return NULL; } -static int tegra_io_pad_get_dpd_register_bit(enum tegra_io_pad id, +static int tegra_io_pad_get_dpd_register_bit(struct tegra_pmc *pmc, + enum tegra_io_pad id, unsigned long *request, unsigned long *status, u32 *mask) @@ -1003,7 +1085,7 @@ static int tegra_io_pad_get_dpd_register_bit(enum tegra_io_pad id, pad = tegra_io_pad_find(pmc, id); if (!pad) { - pr_err("invalid I/O pad ID %u\n", id); + dev_err(pmc->dev, "invalid I/O pad ID %u\n", id); return -ENOENT; } @@ -1023,43 +1105,44 @@ static int tegra_io_pad_get_dpd_register_bit(enum tegra_io_pad id, return 0; } -static int tegra_io_pad_prepare(enum tegra_io_pad id, unsigned long *request, - unsigned long *status, u32 *mask) +static int tegra_io_pad_prepare(struct tegra_pmc *pmc, enum tegra_io_pad id, + unsigned long *request, unsigned long *status, + u32 *mask) { unsigned long rate, value; int err; - err = tegra_io_pad_get_dpd_register_bit(id, request, status, mask); + err = tegra_io_pad_get_dpd_register_bit(pmc, id, request, status, mask); if (err) return err; if (pmc->clk) { rate = clk_get_rate(pmc->clk); if (!rate) { - pr_err("failed to get clock rate\n"); + dev_err(pmc->dev, "failed to get clock rate\n"); return -ENODEV; } - tegra_pmc_writel(DPD_SAMPLE_ENABLE, DPD_SAMPLE); + tegra_pmc_writel(pmc, DPD_SAMPLE_ENABLE, DPD_SAMPLE); /* must be at least 200 ns, in APB (PCLK) clock cycles */ value = DIV_ROUND_UP(1000000000, rate); value = DIV_ROUND_UP(200, value); - tegra_pmc_writel(value, SEL_DPD_TIM); + tegra_pmc_writel(pmc, value, SEL_DPD_TIM); } return 0; } -static int tegra_io_pad_poll(unsigned long offset, u32 mask, - u32 val, unsigned long timeout) +static int tegra_io_pad_poll(struct tegra_pmc *pmc, unsigned long offset, + u32 mask, u32 val, unsigned long timeout) { u32 value; timeout = jiffies + msecs_to_jiffies(timeout); while (time_after(timeout, jiffies)) { - value = tegra_pmc_readl(offset); + value = tegra_pmc_readl(pmc, offset); if ((value & mask) == val) return 0; @@ -1069,10 +1152,10 @@ static int tegra_io_pad_poll(unsigned long offset, u32 mask, return -ETIMEDOUT; } -static void tegra_io_pad_unprepare(void) +static void tegra_io_pad_unprepare(struct tegra_pmc *pmc) { if (pmc->clk) - tegra_pmc_writel(DPD_SAMPLE_DISABLE, DPD_SAMPLE); + tegra_pmc_writel(pmc, DPD_SAMPLE_DISABLE, DPD_SAMPLE); } /** @@ -1089,21 +1172,21 @@ int tegra_io_pad_power_enable(enum tegra_io_pad id) mutex_lock(&pmc->powergates_lock); - err = tegra_io_pad_prepare(id, &request, &status, &mask); + err = tegra_io_pad_prepare(pmc, id, &request, &status, &mask); if (err < 0) { - pr_err("failed to prepare I/O pad: %d\n", err); + dev_err(pmc->dev, "failed to prepare I/O pad: %d\n", err); goto unlock; } - tegra_pmc_writel(IO_DPD_REQ_CODE_OFF | mask, request); + tegra_pmc_writel(pmc, IO_DPD_REQ_CODE_OFF | mask, request); - err = tegra_io_pad_poll(status, mask, 0, 250); + err = tegra_io_pad_poll(pmc, status, mask, 0, 250); if (err < 0) { - pr_err("failed to enable I/O pad: %d\n", err); + dev_err(pmc->dev, "failed to enable I/O pad: %d\n", err); goto unlock; } - tegra_io_pad_unprepare(); + tegra_io_pad_unprepare(pmc); unlock: mutex_unlock(&pmc->powergates_lock); @@ -1125,21 +1208,21 @@ int tegra_io_pad_power_disable(enum tegra_io_pad id) mutex_lock(&pmc->powergates_lock); - err = tegra_io_pad_prepare(id, &request, &status, &mask); + err = tegra_io_pad_prepare(pmc, id, &request, &status, &mask); if (err < 0) { - pr_err("failed to prepare I/O pad: %d\n", err); + dev_err(pmc->dev, "failed to prepare I/O pad: %d\n", err); goto unlock; } - tegra_pmc_writel(IO_DPD_REQ_CODE_ON | mask, request); + tegra_pmc_writel(pmc, IO_DPD_REQ_CODE_ON | mask, request); - err = tegra_io_pad_poll(status, mask, mask, 250); + err = tegra_io_pad_poll(pmc, status, mask, mask, 250); if (err < 0) { - pr_err("failed to disable I/O pad: %d\n", err); + dev_err(pmc->dev, "failed to disable I/O pad: %d\n", err); goto unlock; } - tegra_io_pad_unprepare(); + tegra_io_pad_unprepare(pmc); unlock: mutex_unlock(&pmc->powergates_lock); @@ -1147,22 +1230,24 @@ unlock: } EXPORT_SYMBOL(tegra_io_pad_power_disable); -static int tegra_io_pad_is_powered(enum tegra_io_pad id) +static int tegra_io_pad_is_powered(struct tegra_pmc *pmc, enum tegra_io_pad id) { unsigned long request, status; u32 mask, value; int err; - err = tegra_io_pad_get_dpd_register_bit(id, &request, &status, &mask); + err = tegra_io_pad_get_dpd_register_bit(pmc, id, &request, &status, + &mask); if (err) return err; - value = tegra_pmc_readl(status); + value = tegra_pmc_readl(pmc, status); return !(value & mask); } -static int tegra_io_pad_set_voltage(enum tegra_io_pad id, int voltage) +static int tegra_io_pad_set_voltage(struct tegra_pmc *pmc, enum tegra_io_pad id, + int voltage) { const struct tegra_io_pad_soc *pad; u32 value; @@ -1177,29 +1262,29 @@ static int tegra_io_pad_set_voltage(enum tegra_io_pad id, int voltage) mutex_lock(&pmc->powergates_lock); if (pmc->soc->has_impl_33v_pwr) { - value = tegra_pmc_readl(PMC_IMPL_E_33V_PWR); + value = tegra_pmc_readl(pmc, PMC_IMPL_E_33V_PWR); if (voltage == TEGRA_IO_PAD_VOLTAGE_1V8) value &= ~BIT(pad->voltage); else value |= BIT(pad->voltage); - tegra_pmc_writel(value, PMC_IMPL_E_33V_PWR); + tegra_pmc_writel(pmc, value, PMC_IMPL_E_33V_PWR); } else { /* write-enable PMC_PWR_DET_VALUE[pad->voltage] */ - value = tegra_pmc_readl(PMC_PWR_DET); + value = tegra_pmc_readl(pmc, PMC_PWR_DET); value |= BIT(pad->voltage); - tegra_pmc_writel(value, PMC_PWR_DET); + tegra_pmc_writel(pmc, value, PMC_PWR_DET); /* update I/O voltage */ - value = tegra_pmc_readl(PMC_PWR_DET_VALUE); + value = tegra_pmc_readl(pmc, PMC_PWR_DET_VALUE); if (voltage == TEGRA_IO_PAD_VOLTAGE_1V8) value &= ~BIT(pad->voltage); else value |= BIT(pad->voltage); - tegra_pmc_writel(value, PMC_PWR_DET_VALUE); + tegra_pmc_writel(pmc, value, PMC_PWR_DET_VALUE); } mutex_unlock(&pmc->powergates_lock); @@ -1209,7 +1294,7 @@ static int tegra_io_pad_set_voltage(enum tegra_io_pad id, int voltage) return 0; } -static int tegra_io_pad_get_voltage(enum tegra_io_pad id) +static int tegra_io_pad_get_voltage(struct tegra_pmc *pmc, enum tegra_io_pad id) { const struct tegra_io_pad_soc *pad; u32 value; @@ -1222,9 +1307,9 @@ static int tegra_io_pad_get_voltage(enum tegra_io_pad id) return -ENOTSUPP; if (pmc->soc->has_impl_33v_pwr) - value = tegra_pmc_readl(PMC_IMPL_E_33V_PWR); + value = tegra_pmc_readl(pmc, PMC_IMPL_E_33V_PWR); else - value = tegra_pmc_readl(PMC_PWR_DET_VALUE); + value = tegra_pmc_readl(pmc, PMC_PWR_DET_VALUE); if ((value & BIT(pad->voltage)) == 0) return TEGRA_IO_PAD_VOLTAGE_1V8; @@ -1296,21 +1381,21 @@ void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode) ticks = pmc->cpu_good_time * rate + USEC_PER_SEC - 1; do_div(ticks, USEC_PER_SEC); - tegra_pmc_writel(ticks, PMC_CPUPWRGOOD_TIMER); + tegra_pmc_writel(pmc, ticks, PMC_CPUPWRGOOD_TIMER); ticks = pmc->cpu_off_time * rate + USEC_PER_SEC - 1; do_div(ticks, USEC_PER_SEC); - tegra_pmc_writel(ticks, PMC_CPUPWROFF_TIMER); + tegra_pmc_writel(pmc, ticks, PMC_CPUPWROFF_TIMER); wmb(); pmc->rate = rate; } - value = tegra_pmc_readl(PMC_CNTRL); + value = tegra_pmc_readl(pmc, PMC_CNTRL); value &= ~PMC_CNTRL_SIDE_EFFECT_LP0; value |= PMC_CNTRL_CPU_PWRREQ_OE; - tegra_pmc_writel(value, PMC_CNTRL); + tegra_pmc_writel(pmc, value, PMC_CNTRL); } #endif @@ -1432,13 +1517,13 @@ static void tegra_pmc_init_tsense_reset(struct tegra_pmc *pmc) if (of_property_read_u32(np, "nvidia,pinmux-id", &pinmux)) pinmux = 0; - value = tegra_pmc_readl(PMC_SENSOR_CTRL); + value = tegra_pmc_readl(pmc, PMC_SENSOR_CTRL); value |= PMC_SENSOR_CTRL_SCRATCH_WRITE; - tegra_pmc_writel(value, PMC_SENSOR_CTRL); + tegra_pmc_writel(pmc, value, PMC_SENSOR_CTRL); value = (reg_data << PMC_SCRATCH54_DATA_SHIFT) | (reg_addr << PMC_SCRATCH54_ADDR_SHIFT); - tegra_pmc_writel(value, PMC_SCRATCH54); + tegra_pmc_writel(pmc, value, PMC_SCRATCH54); value = PMC_SCRATCH55_RESET_TEGRA; value |= ctrl_id << PMC_SCRATCH55_CNTRL_ID_SHIFT; @@ -1456,11 +1541,11 @@ static void tegra_pmc_init_tsense_reset(struct tegra_pmc *pmc) value |= checksum << PMC_SCRATCH55_CHECKSUM_SHIFT; - tegra_pmc_writel(value, PMC_SCRATCH55); + tegra_pmc_writel(pmc, value, PMC_SCRATCH55); - value = tegra_pmc_readl(PMC_SENSOR_CTRL); + value = tegra_pmc_readl(pmc, PMC_SENSOR_CTRL); value |= PMC_SENSOR_CTRL_ENABLE_RST; - tegra_pmc_writel(value, PMC_SENSOR_CTRL); + tegra_pmc_writel(pmc, value, PMC_SENSOR_CTRL); dev_info(pmc->dev, "emergency thermal reset enabled\n"); @@ -1470,12 +1555,16 @@ out: static int tegra_io_pad_pinctrl_get_groups_count(struct pinctrl_dev *pctl_dev) { + struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl_dev); + return pmc->soc->num_io_pads; } -static const char *tegra_io_pad_pinctrl_get_group_name( - struct pinctrl_dev *pctl, unsigned int group) +static const char *tegra_io_pad_pinctrl_get_group_name(struct pinctrl_dev *pctl, + unsigned int group) { + struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl); + return pmc->soc->io_pads[group].name; } @@ -1484,8 +1573,11 @@ static int tegra_io_pad_pinctrl_get_group_pins(struct pinctrl_dev *pctl_dev, const unsigned int **pins, unsigned int *num_pins) { + struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl_dev); + *pins = &pmc->soc->io_pads[group].id; *num_pins = 1; + return 0; } @@ -1500,27 +1592,33 @@ static const struct pinctrl_ops tegra_io_pad_pinctrl_ops = { static int tegra_io_pad_pinconf_get(struct pinctrl_dev *pctl_dev, unsigned int pin, unsigned long *config) { - const struct tegra_io_pad_soc *pad = tegra_io_pad_find(pmc, pin); enum pin_config_param param = pinconf_to_config_param(*config); + struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl_dev); + const struct tegra_io_pad_soc *pad; int ret; u32 arg; + pad = tegra_io_pad_find(pmc, pin); if (!pad) return -EINVAL; switch (param) { case PIN_CONFIG_POWER_SOURCE: - ret = tegra_io_pad_get_voltage(pad->id); + ret = tegra_io_pad_get_voltage(pmc, pad->id); if (ret < 0) return ret; + arg = ret; break; + case PIN_CONFIG_LOW_POWER_MODE: - ret = tegra_io_pad_is_powered(pad->id); + ret = tegra_io_pad_is_powered(pmc, pad->id); if (ret < 0) return ret; + arg = !ret; break; + default: return -EINVAL; } @@ -1534,12 +1632,14 @@ static int tegra_io_pad_pinconf_set(struct pinctrl_dev *pctl_dev, unsigned int pin, unsigned long *configs, unsigned int num_configs) { - const struct tegra_io_pad_soc *pad = tegra_io_pad_find(pmc, pin); + struct tegra_pmc *pmc = pinctrl_dev_get_drvdata(pctl_dev); + const struct tegra_io_pad_soc *pad; enum pin_config_param param; unsigned int i; int err; u32 arg; + pad = tegra_io_pad_find(pmc, pin); if (!pad) return -EINVAL; @@ -1560,7 +1660,7 @@ static int tegra_io_pad_pinconf_set(struct pinctrl_dev *pctl_dev, if (arg != TEGRA_IO_PAD_VOLTAGE_1V8 && arg != TEGRA_IO_PAD_VOLTAGE_3V3) return -EINVAL; - err = tegra_io_pad_set_voltage(pad->id, arg); + err = tegra_io_pad_set_voltage(pmc, pad->id, arg); if (err) return err; break; @@ -1585,7 +1685,7 @@ static struct pinctrl_desc tegra_pmc_pctl_desc = { static int tegra_pmc_pinctrl_init(struct tegra_pmc *pmc) { - int err = 0; + int err; if (!pmc->soc->num_pin_descs) return 0; @@ -1598,18 +1698,20 @@ static int tegra_pmc_pinctrl_init(struct tegra_pmc *pmc) pmc); if (IS_ERR(pmc->pctl_dev)) { err = PTR_ERR(pmc->pctl_dev); - dev_err(pmc->dev, "unable to register pinctrl, %d\n", err); + dev_err(pmc->dev, "failed to register pin controller: %d\n", + err); + return err; } - return err; + return 0; } static ssize_t reset_reason_show(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, char *buf) { u32 value, rst_src; - value = tegra_pmc_readl(pmc->soc->regs->rst_status); + value = tegra_pmc_readl(pmc, pmc->soc->regs->rst_status); rst_src = (value & pmc->soc->regs->rst_source_mask) >> pmc->soc->regs->rst_source_shift; @@ -1619,11 +1721,11 @@ static ssize_t reset_reason_show(struct device *dev, static DEVICE_ATTR_RO(reset_reason); static ssize_t reset_level_show(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, char *buf) { u32 value, rst_lvl; - value = tegra_pmc_readl(pmc->soc->regs->rst_status); + value = tegra_pmc_readl(pmc, pmc->soc->regs->rst_status); rst_lvl = (value & pmc->soc->regs->rst_level_mask) >> pmc->soc->regs->rst_level_shift; @@ -1641,16 +1743,16 @@ static void tegra_pmc_reset_sysfs_init(struct tegra_pmc *pmc) err = device_create_file(dev, &dev_attr_reset_reason); if (err < 0) dev_warn(dev, - "failed to create attr \"reset_reason\": %d\n", - err); + "failed to create attr \"reset_reason\": %d\n", + err); } if (pmc->soc->reset_levels) { err = device_create_file(dev, &dev_attr_reset_level); if (err < 0) dev_warn(dev, - "failed to create attr \"reset_level\": %d\n", - err); + "failed to create attr \"reset_level\": %d\n", + err); } } @@ -1920,6 +2022,8 @@ static int tegra_pmc_probe(struct platform_device *pdev) pmc->base = base; mutex_unlock(&pmc->powergates_lock); + platform_set_drvdata(pdev, pmc); + return 0; cleanup_restart_handler: @@ -1932,14 +2036,18 @@ cleanup_debugfs: #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_ARM) static int tegra_pmc_suspend(struct device *dev) { - tegra_pmc_writel(virt_to_phys(tegra_resume), PMC_SCRATCH41); + struct tegra_pmc *pmc = dev_get_drvdata(dev); + + tegra_pmc_writel(pmc, virt_to_phys(tegra_resume), PMC_SCRATCH41); return 0; } static int tegra_pmc_resume(struct device *dev) { - tegra_pmc_writel(0x0, PMC_SCRATCH41); + struct tegra_pmc *pmc = dev_get_drvdata(dev); + + tegra_pmc_writel(pmc, 0x0, PMC_SCRATCH41); return 0; } @@ -1976,11 +2084,11 @@ static void tegra20_pmc_init(struct tegra_pmc *pmc) u32 value; /* Always enable CPU power request */ - value = tegra_pmc_readl(PMC_CNTRL); + value = tegra_pmc_readl(pmc, PMC_CNTRL); value |= PMC_CNTRL_CPU_PWRREQ_OE; - tegra_pmc_writel(value, PMC_CNTRL); + tegra_pmc_writel(pmc, value, PMC_CNTRL); - value = tegra_pmc_readl(PMC_CNTRL); + value = tegra_pmc_readl(pmc, PMC_CNTRL); if (pmc->sysclkreq_high) value &= ~PMC_CNTRL_SYSCLK_POLARITY; @@ -1988,12 +2096,12 @@ static void tegra20_pmc_init(struct tegra_pmc *pmc) value |= PMC_CNTRL_SYSCLK_POLARITY; /* configure the output polarity while the request is tristated */ - tegra_pmc_writel(value, PMC_CNTRL); + tegra_pmc_writel(pmc, value, PMC_CNTRL); /* now enable the request */ - value = tegra_pmc_readl(PMC_CNTRL); + value = tegra_pmc_readl(pmc, PMC_CNTRL); value |= PMC_CNTRL_SYSCLK_OE; - tegra_pmc_writel(value, PMC_CNTRL); + tegra_pmc_writel(pmc, value, PMC_CNTRL); } static void tegra20_pmc_setup_irq_polarity(struct tegra_pmc *pmc, @@ -2002,14 +2110,14 @@ static void tegra20_pmc_setup_irq_polarity(struct tegra_pmc *pmc, { u32 value; - value = tegra_pmc_readl(PMC_CNTRL); + value = tegra_pmc_readl(pmc, PMC_CNTRL); if (invert) value |= PMC_CNTRL_INTR_POLARITY; else value &= ~PMC_CNTRL_INTR_POLARITY; - tegra_pmc_writel(value, PMC_CNTRL); + tegra_pmc_writel(pmc, value, PMC_CNTRL); } static const struct tegra_pmc_soc tegra20_pmc_soc = { @@ -2019,6 +2127,9 @@ static const struct tegra_pmc_soc tegra20_pmc_soc = { .cpu_powergates = NULL, .has_tsense_reset = false, .has_gpu_clamps = false, + .needs_mbist_war = false, + .has_impl_33v_pwr = false, + .maybe_tz_only = false, .num_io_pads = 0, .io_pads = NULL, .num_pin_descs = 0, @@ -2063,7 +2174,9 @@ static const struct tegra_pmc_soc tegra30_pmc_soc = { .cpu_powergates = tegra30_cpu_powergates, .has_tsense_reset = true, .has_gpu_clamps = false, + .needs_mbist_war = false, .has_impl_33v_pwr = false, + .maybe_tz_only = false, .num_io_pads = 0, .io_pads = NULL, .num_pin_descs = 0, @@ -2112,7 +2225,9 @@ static const struct tegra_pmc_soc tegra114_pmc_soc = { .cpu_powergates = tegra114_cpu_powergates, .has_tsense_reset = true, .has_gpu_clamps = false, + .needs_mbist_war = false, .has_impl_33v_pwr = false, + .maybe_tz_only = false, .num_io_pads = 0, .io_pads = NULL, .num_pin_descs = 0, @@ -2221,7 +2336,9 @@ static const struct tegra_pmc_soc tegra124_pmc_soc = { .cpu_powergates = tegra124_cpu_powergates, .has_tsense_reset = true, .has_gpu_clamps = true, + .needs_mbist_war = false, .has_impl_33v_pwr = false, + .maybe_tz_only = false, .num_io_pads = ARRAY_SIZE(tegra124_io_pads), .io_pads = tegra124_io_pads, .num_pin_descs = ARRAY_SIZE(tegra124_pin_descs), @@ -2325,8 +2442,9 @@ static const struct tegra_pmc_soc tegra210_pmc_soc = { .cpu_powergates = tegra210_cpu_powergates, .has_tsense_reset = true, .has_gpu_clamps = true, - .has_impl_33v_pwr = false, .needs_mbist_war = true, + .has_impl_33v_pwr = false, + .maybe_tz_only = true, .num_io_pads = ARRAY_SIZE(tegra210_io_pads), .io_pads = tegra210_io_pads, .num_pin_descs = ARRAY_SIZE(tegra210_pin_descs), @@ -2413,7 +2531,7 @@ static void tegra186_pmc_setup_irq_polarity(struct tegra_pmc *pmc, index = of_property_match_string(np, "reg-names", "wake"); if (index < 0) { - pr_err("failed to find PMC wake registers\n"); + dev_err(pmc->dev, "failed to find PMC wake registers\n"); return; } @@ -2421,7 +2539,7 @@ static void tegra186_pmc_setup_irq_polarity(struct tegra_pmc *pmc, wake = ioremap_nocache(regs.start, resource_size(®s)); if (!wake) { - pr_err("failed to map PMC wake registers\n"); + dev_err(pmc->dev, "failed to map PMC wake registers\n"); return; } @@ -2438,7 +2556,7 @@ static void tegra186_pmc_setup_irq_polarity(struct tegra_pmc *pmc, } static const struct tegra_wake_event tegra186_wake_events[] = { - TEGRA_WAKE_GPIO("power", 29, 1, TEGRA_AON_GPIO(FF, 0)), + TEGRA_WAKE_GPIO("power", 29, 1, TEGRA186_AON_GPIO(FF, 0)), TEGRA_WAKE_IRQ("rtc", 73, 10), }; @@ -2449,7 +2567,9 @@ static const struct tegra_pmc_soc tegra186_pmc_soc = { .cpu_powergates = NULL, .has_tsense_reset = false, .has_gpu_clamps = false, + .needs_mbist_war = false, .has_impl_33v_pwr = true, + .maybe_tz_only = false, .num_io_pads = ARRAY_SIZE(tegra186_io_pads), .io_pads = tegra186_io_pads, .num_pin_descs = ARRAY_SIZE(tegra186_pin_descs), @@ -2527,6 +2647,9 @@ static const struct tegra_pmc_soc tegra194_pmc_soc = { .cpu_powergates = NULL, .has_tsense_reset = false, .has_gpu_clamps = false, + .needs_mbist_war = false, + .has_impl_33v_pwr = false, + .maybe_tz_only = false, .num_io_pads = ARRAY_SIZE(tegra194_io_pads), .io_pads = tegra194_io_pads, .regs = &tegra186_pmc_regs, @@ -2561,6 +2684,32 @@ static struct platform_driver tegra_pmc_driver = { }; builtin_platform_driver(tegra_pmc_driver); +static bool __init tegra_pmc_detect_tz_only(struct tegra_pmc *pmc) +{ + u32 value, saved; + + saved = readl(pmc->base + pmc->soc->regs->scratch0); + value = saved ^ 0xffffffff; + + if (value == 0xffffffff) + value = 0xdeadbeef; + + /* write pattern and read it back */ + writel(value, pmc->base + pmc->soc->regs->scratch0); + value = readl(pmc->base + pmc->soc->regs->scratch0); + + /* if we read all-zeroes, access is restricted to TZ only */ + if (value == 0) { + pr_info("access to PMC is restricted to TZ\n"); + return true; + } + + /* restore original value */ + writel(saved, pmc->base + pmc->soc->regs->scratch0); + + return false; +} + /* * Early initialization to allow access to registers in the very early boot * process. @@ -2623,6 +2772,9 @@ static int __init tegra_pmc_early_init(void) if (np) { pmc->soc = match->data; + if (pmc->soc->maybe_tz_only) + pmc->tz_only = tegra_pmc_detect_tz_only(pmc); + tegra_powergate_init(pmc, np); /* diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c index e05ab16d9a9e..6285cd8efb21 100644 --- a/drivers/soc/ti/knav_dma.c +++ b/drivers/soc/ti/knav_dma.c @@ -598,7 +598,7 @@ static int pktdma_init_chan(struct knav_dma_device *dma, INIT_LIST_HEAD(&chan->list); chan->dma = dma; - chan->direction = DMA_NONE; + chan->direction = DMA_TRANS_NONE; atomic_set(&chan->ref_count, 0); spin_lock_init(&chan->lock); diff --git a/drivers/soc/xilinx/Kconfig b/drivers/soc/xilinx/Kconfig index 687c8f3cd955..01e76b58dd78 100644 --- a/drivers/soc/xilinx/Kconfig +++ b/drivers/soc/xilinx/Kconfig @@ -17,4 +17,24 @@ config XILINX_VCU To compile this driver as a module, choose M here: the module will be called xlnx_vcu. +config ZYNQMP_POWER + bool "Enable Xilinx Zynq MPSoC Power Management driver" + depends on PM && ARCH_ZYNQMP + default y + help + Say yes to enable power management support for ZyqnMP SoC. + This driver uses firmware driver as an interface for power + management request to firmware. It registers isr to handle + power management callbacks from firmware. + If in doubt, say N. + +config ZYNQMP_PM_DOMAINS + bool "Enable Zynq MPSoC generic PM domains" + default y + depends on PM && ARCH_ZYNQMP && ZYNQMP_FIRMWARE + select PM_GENERIC_DOMAINS + help + Say yes to enable device power management through PM domains + If in doubt, say N. + endmenu diff --git a/drivers/soc/xilinx/Makefile b/drivers/soc/xilinx/Makefile index dee8fd51e303..f66bfea5de17 100644 --- a/drivers/soc/xilinx/Makefile +++ b/drivers/soc/xilinx/Makefile @@ -1,2 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_XILINX_VCU) += xlnx_vcu.o +obj-$(CONFIG_ZYNQMP_POWER) += zynqmp_power.o +obj-$(CONFIG_ZYNQMP_PM_DOMAINS) += zynqmp_pm_domains.o diff --git a/drivers/soc/xilinx/zynqmp_pm_domains.c b/drivers/soc/xilinx/zynqmp_pm_domains.c new file mode 100644 index 000000000000..354d256e6e00 --- /dev/null +++ b/drivers/soc/xilinx/zynqmp_pm_domains.c @@ -0,0 +1,321 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ZynqMP Generic PM domain support + * + * Copyright (C) 2015-2018 Xilinx, Inc. + * + * Davorin Mista + * Jolly Shah + * Rajan Vaja + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#define ZYNQMP_NUM_DOMAINS (100) +/* Flag stating if PM nodes mapped to the PM domain has been requested */ +#define ZYNQMP_PM_DOMAIN_REQUESTED BIT(0) + +/** + * struct zynqmp_pm_domain - Wrapper around struct generic_pm_domain + * @gpd: Generic power domain + * @node_id: PM node ID corresponding to device inside PM domain + * @flags: ZynqMP PM domain flags + */ +struct zynqmp_pm_domain { + struct generic_pm_domain gpd; + u32 node_id; + u8 flags; +}; + +/** + * zynqmp_gpd_is_active_wakeup_path() - Check if device is in wakeup source + * path + * @dev: Device to check for wakeup source path + * @not_used: Data member (not required) + * + * This function is checks device's child hierarchy and checks if any device is + * set as wakeup source. + * + * Return: 1 if device is in wakeup source path else 0 + */ +static int zynqmp_gpd_is_active_wakeup_path(struct device *dev, void *not_used) +{ + int may_wakeup; + + may_wakeup = device_may_wakeup(dev); + if (may_wakeup) + return may_wakeup; + + return device_for_each_child(dev, NULL, + zynqmp_gpd_is_active_wakeup_path); +} + +/** + * zynqmp_gpd_power_on() - Power on PM domain + * @domain: Generic PM domain + * + * This function is called before devices inside a PM domain are resumed, to + * power on PM domain. + * + * Return: 0 on success, error code otherwise + */ +static int zynqmp_gpd_power_on(struct generic_pm_domain *domain) +{ + int ret; + struct zynqmp_pm_domain *pd; + const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops(); + + if (!eemi_ops || !eemi_ops->set_requirement) + return -ENXIO; + + pd = container_of(domain, struct zynqmp_pm_domain, gpd); + ret = eemi_ops->set_requirement(pd->node_id, + ZYNQMP_PM_CAPABILITY_ACCESS, + ZYNQMP_PM_MAX_QOS, + ZYNQMP_PM_REQUEST_ACK_BLOCKING); + if (ret) { + pr_err("%s() %s set requirement for node %d failed: %d\n", + __func__, domain->name, pd->node_id, ret); + return ret; + } + + pr_debug("%s() Powered on %s domain\n", __func__, domain->name); + return 0; +} + +/** + * zynqmp_gpd_power_off() - Power off PM domain + * @domain: Generic PM domain + * + * This function is called after devices inside a PM domain are suspended, to + * power off PM domain. + * + * Return: 0 on success, error code otherwise + */ +static int zynqmp_gpd_power_off(struct generic_pm_domain *domain) +{ + int ret; + struct pm_domain_data *pdd, *tmp; + struct zynqmp_pm_domain *pd; + u32 capabilities = 0; + bool may_wakeup; + const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops(); + + if (!eemi_ops || !eemi_ops->set_requirement) + return -ENXIO; + + pd = container_of(domain, struct zynqmp_pm_domain, gpd); + + /* If domain is already released there is nothing to be done */ + if (!(pd->flags & ZYNQMP_PM_DOMAIN_REQUESTED)) { + pr_debug("%s() %s domain is already released\n", + __func__, domain->name); + return 0; + } + + list_for_each_entry_safe(pdd, tmp, &domain->dev_list, list_node) { + /* If device is in wakeup path, set capability to WAKEUP */ + may_wakeup = zynqmp_gpd_is_active_wakeup_path(pdd->dev, NULL); + if (may_wakeup) { + dev_dbg(pdd->dev, "device is in wakeup path in %s\n", + domain->name); + capabilities = ZYNQMP_PM_CAPABILITY_WAKEUP; + break; + } + } + + ret = eemi_ops->set_requirement(pd->node_id, capabilities, 0, + ZYNQMP_PM_REQUEST_ACK_NO); + /** + * If powering down of any node inside this domain fails, + * report and return the error + */ + if (ret) { + pr_err("%s() %s set requirement for node %d failed: %d\n", + __func__, domain->name, pd->node_id, ret); + return ret; + } + + pr_debug("%s() Powered off %s domain\n", __func__, domain->name); + return 0; +} + +/** + * zynqmp_gpd_attach_dev() - Attach device to the PM domain + * @domain: Generic PM domain + * @dev: Device to attach + * + * Return: 0 on success, error code otherwise + */ +static int zynqmp_gpd_attach_dev(struct generic_pm_domain *domain, + struct device *dev) +{ + int ret; + struct zynqmp_pm_domain *pd; + const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops(); + + if (!eemi_ops || !eemi_ops->request_node) + return -ENXIO; + + pd = container_of(domain, struct zynqmp_pm_domain, gpd); + + /* If this is not the first device to attach there is nothing to do */ + if (domain->device_count) + return 0; + + ret = eemi_ops->request_node(pd->node_id, 0, 0, + ZYNQMP_PM_REQUEST_ACK_BLOCKING); + /* If requesting a node fails print and return the error */ + if (ret) { + pr_err("%s() %s request failed for node %d: %d\n", + __func__, domain->name, pd->node_id, ret); + return ret; + } + + pd->flags |= ZYNQMP_PM_DOMAIN_REQUESTED; + + pr_debug("%s() %s attached to %s domain\n", __func__, + dev_name(dev), domain->name); + return 0; +} + +/** + * zynqmp_gpd_detach_dev() - Detach device from the PM domain + * @domain: Generic PM domain + * @dev: Device to detach + */ +static void zynqmp_gpd_detach_dev(struct generic_pm_domain *domain, + struct device *dev) +{ + int ret; + struct zynqmp_pm_domain *pd; + const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops(); + + if (!eemi_ops || !eemi_ops->release_node) + return; + + pd = container_of(domain, struct zynqmp_pm_domain, gpd); + + /* If this is not the last device to detach there is nothing to do */ + if (domain->device_count) + return; + + ret = eemi_ops->release_node(pd->node_id); + /* If releasing a node fails print the error and return */ + if (ret) { + pr_err("%s() %s release failed for node %d: %d\n", + __func__, domain->name, pd->node_id, ret); + return; + } + + pd->flags &= ~ZYNQMP_PM_DOMAIN_REQUESTED; + + pr_debug("%s() %s detached from %s domain\n", __func__, + dev_name(dev), domain->name); +} + +static struct generic_pm_domain *zynqmp_gpd_xlate + (struct of_phandle_args *genpdspec, void *data) +{ + struct genpd_onecell_data *genpd_data = data; + unsigned int i, idx = genpdspec->args[0]; + struct zynqmp_pm_domain *pd; + + pd = container_of(genpd_data->domains[0], struct zynqmp_pm_domain, gpd); + + if (genpdspec->args_count != 1) + return ERR_PTR(-EINVAL); + + /* Check for existing pm domains */ + for (i = 0; i < ZYNQMP_NUM_DOMAINS; i++) { + if (pd[i].node_id == idx) + goto done; + } + + /** + * Add index in empty node_id of power domain list as no existing + * power domain found for current index. + */ + for (i = 0; i < ZYNQMP_NUM_DOMAINS; i++) { + if (pd[i].node_id == 0) { + pd[i].node_id = idx; + break; + } + } + +done: + if (!genpd_data->domains[i] || i == ZYNQMP_NUM_DOMAINS) + return ERR_PTR(-ENOENT); + + return genpd_data->domains[i]; +} + +static int zynqmp_gpd_probe(struct platform_device *pdev) +{ + int i; + struct genpd_onecell_data *zynqmp_pd_data; + struct generic_pm_domain **domains; + struct zynqmp_pm_domain *pd; + struct device *dev = &pdev->dev; + + pd = devm_kcalloc(dev, ZYNQMP_NUM_DOMAINS, sizeof(*pd), GFP_KERNEL); + if (!pd) + return -ENOMEM; + + zynqmp_pd_data = devm_kzalloc(dev, sizeof(*zynqmp_pd_data), GFP_KERNEL); + if (!zynqmp_pd_data) + return -ENOMEM; + + zynqmp_pd_data->xlate = zynqmp_gpd_xlate; + + domains = devm_kcalloc(dev, ZYNQMP_NUM_DOMAINS, sizeof(*domains), + GFP_KERNEL); + if (!domains) + return -ENOMEM; + + for (i = 0; i < ZYNQMP_NUM_DOMAINS; i++, pd++) { + pd->node_id = 0; + pd->gpd.name = kasprintf(GFP_KERNEL, "domain%d", i); + pd->gpd.power_off = zynqmp_gpd_power_off; + pd->gpd.power_on = zynqmp_gpd_power_on; + pd->gpd.attach_dev = zynqmp_gpd_attach_dev; + pd->gpd.detach_dev = zynqmp_gpd_detach_dev; + + domains[i] = &pd->gpd; + + /* Mark all PM domains as initially powered off */ + pm_genpd_init(&pd->gpd, NULL, true); + } + + zynqmp_pd_data->domains = domains; + zynqmp_pd_data->num_domains = ZYNQMP_NUM_DOMAINS; + of_genpd_add_provider_onecell(dev->parent->of_node, zynqmp_pd_data); + + return 0; +} + +static int zynqmp_gpd_remove(struct platform_device *pdev) +{ + of_genpd_del_provider(pdev->dev.parent->of_node); + + return 0; +} + +static struct platform_driver zynqmp_power_domain_driver = { + .driver = { + .name = "zynqmp_power_controller", + }, + .probe = zynqmp_gpd_probe, + .remove = zynqmp_gpd_remove, +}; +module_platform_driver(zynqmp_power_domain_driver); + +MODULE_ALIAS("platform:zynqmp_power_controller"); diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c new file mode 100644 index 000000000000..771cb59b9d22 --- /dev/null +++ b/drivers/soc/xilinx/zynqmp_power.c @@ -0,0 +1,178 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Xilinx Zynq MPSoC Power Management + * + * Copyright (C) 2014-2018 Xilinx, Inc. + * + * Davorin Mista + * Jolly Shah + * Rajan Vaja + */ + +#include +#include +#include +#include +#include + +#include + +enum pm_suspend_mode { + PM_SUSPEND_MODE_FIRST = 0, + PM_SUSPEND_MODE_STD = PM_SUSPEND_MODE_FIRST, + PM_SUSPEND_MODE_POWER_OFF, +}; + +#define PM_SUSPEND_MODE_FIRST PM_SUSPEND_MODE_STD + +static const char *const suspend_modes[] = { + [PM_SUSPEND_MODE_STD] = "standard", + [PM_SUSPEND_MODE_POWER_OFF] = "power-off", +}; + +static enum pm_suspend_mode suspend_mode = PM_SUSPEND_MODE_STD; + +enum pm_api_cb_id { + PM_INIT_SUSPEND_CB = 30, + PM_ACKNOWLEDGE_CB, + PM_NOTIFY_CB, +}; + +static void zynqmp_pm_get_callback_data(u32 *buf) +{ + zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, 0, 0, 0, 0, buf); +} + +static irqreturn_t zynqmp_pm_isr(int irq, void *data) +{ + u32 payload[CB_PAYLOAD_SIZE]; + + zynqmp_pm_get_callback_data(payload); + + /* First element is callback API ID, others are callback arguments */ + if (payload[0] == PM_INIT_SUSPEND_CB) { + switch (payload[1]) { + case SUSPEND_SYSTEM_SHUTDOWN: + orderly_poweroff(true); + break; + case SUSPEND_POWER_REQUEST: + pm_suspend(PM_SUSPEND_MEM); + break; + default: + pr_err("%s Unsupported InitSuspendCb reason " + "code %d\n", __func__, payload[1]); + } + } + + return IRQ_HANDLED; +} + +static ssize_t suspend_mode_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + char *s = buf; + int md; + + for (md = PM_SUSPEND_MODE_FIRST; md < ARRAY_SIZE(suspend_modes); md++) + if (suspend_modes[md]) { + if (md == suspend_mode) + s += sprintf(s, "[%s] ", suspend_modes[md]); + else + s += sprintf(s, "%s ", suspend_modes[md]); + } + + /* Convert last space to newline */ + if (s != buf) + *(s - 1) = '\n'; + return (s - buf); +} + +static ssize_t suspend_mode_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int md, ret = -EINVAL; + const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops(); + + if (!eemi_ops || !eemi_ops->set_suspend_mode) + return ret; + + for (md = PM_SUSPEND_MODE_FIRST; md < ARRAY_SIZE(suspend_modes); md++) + if (suspend_modes[md] && + sysfs_streq(suspend_modes[md], buf)) { + ret = 0; + break; + } + + if (!ret && md != suspend_mode) { + ret = eemi_ops->set_suspend_mode(md); + if (likely(!ret)) + suspend_mode = md; + } + + return ret ? ret : count; +} + +static DEVICE_ATTR_RW(suspend_mode); + +static int zynqmp_pm_probe(struct platform_device *pdev) +{ + int ret, irq; + u32 pm_api_version; + + const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops(); + + if (!eemi_ops || !eemi_ops->get_api_version || !eemi_ops->init_finalize) + return -ENXIO; + + eemi_ops->init_finalize(); + eemi_ops->get_api_version(&pm_api_version); + + /* Check PM API version number */ + if (pm_api_version < ZYNQMP_PM_VERSION) + return -ENODEV; + + irq = platform_get_irq(pdev, 0); + if (irq <= 0) + return -ENXIO; + + ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, zynqmp_pm_isr, + IRQF_NO_SUSPEND | IRQF_ONESHOT, + dev_name(&pdev->dev), &pdev->dev); + if (ret) { + dev_err(&pdev->dev, "devm_request_threaded_irq '%d' failed " + "with %d\n", irq, ret); + return ret; + } + + ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr); + if (ret) { + dev_err(&pdev->dev, "unable to create sysfs interface\n"); + return ret; + } + + return 0; +} + +static int zynqmp_pm_remove(struct platform_device *pdev) +{ + sysfs_remove_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr); + + return 0; +} + +static const struct of_device_id pm_of_match[] = { + { .compatible = "xlnx,zynqmp-power", }, + { /* end of table */ }, +}; +MODULE_DEVICE_TABLE(of, pm_of_match); + +static struct platform_driver zynqmp_pm_platform_driver = { + .probe = zynqmp_pm_probe, + .remove = zynqmp_pm_remove, + .driver = { + .name = "zynqmp_power", + .of_match_table = pm_of_match, + }, +}; +module_platform_driver(zynqmp_pm_platform_driver); diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 9f89cb134549..f761655e2a36 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -63,7 +63,7 @@ config SPI_ALTERA config SPI_ATH79 tristate "Atheros AR71XX/AR724X/AR913X SPI controller driver" - depends on ATH79 && GPIOLIB + depends on ATH79 || COMPILE_TEST select SPI_BITBANG help This enables support for the SPI controller present on the @@ -268,6 +268,27 @@ config SPI_FSL_LPSPI help This enables Freescale i.MX LPSPI controllers in master mode. +config SPI_FSL_QUADSPI + tristate "Freescale QSPI controller" + depends on ARCH_MXC || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST + depends on HAS_IOMEM + help + This enables support for the Quad SPI controller in master mode. + Up to four flash chips can be connected on two buses with two + chipselects each. + This controller does not support generic SPI messages. It only + supports the high-level SPI memory interface. + +config SPI_NXP_FLEXSPI + tristate "NXP Flex SPI controller" + depends on ARCH_LAYERSCAPE || HAS_IOMEM + help + This enables support for the Flex SPI controller in master mode. + Up to four slave devices can be connected on two buses with two + chipselects each. + This controller does not support generic SPI messages and only + supports the high-level SPI memory interface. + config SPI_GPIO tristate "GPIO-based bitbanging SPI Master" depends on GPIOLIB || COMPILE_TEST @@ -296,8 +317,7 @@ config SPI_IMX depends on ARCH_MXC || COMPILE_TEST select SPI_BITBANG help - This enables using the Freescale i.MX SPI controllers in master - mode. + This enables support for the Freescale i.MX SPI controllers. config SPI_JCORE tristate "J-Core SPI Master" @@ -372,7 +392,7 @@ config SPI_FSL_DSPI depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || M5441x || COMPILE_TEST help This enables support for the Freescale DSPI controller in master - mode. VF610 platform uses the controller. + mode. VF610, LS1021A and ColdFire platforms uses the controller. config SPI_FSL_ESPI tristate "Freescale eSPI controller" @@ -631,6 +651,12 @@ config SPI_SH_HSPI help SPI driver for SuperH HSPI blocks. +config SPI_SIFIVE + tristate "SiFive SPI controller" + depends on HAS_IOMEM + help + This exposes the SPI controller IP from SiFive. + config SPI_SIRF tristate "CSR SiRFprimaII SPI controller" depends on SIRF_DMA @@ -665,7 +691,7 @@ config SPI_STM32 tristate "STMicroelectronics STM32 SPI controller" depends on ARCH_STM32 || COMPILE_TEST help - SPI driver for STMicroelectonics STM32 SoCs. + SPI driver for STMicroelectronics STM32 SoCs. STM32 SPI controller supports DMA and PIO modes. When DMA is not available, the driver automatically falls back to diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index f29627040dfb..d8fc03c9faa2 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -45,6 +45,7 @@ obj-$(CONFIG_SPI_FSL_DSPI) += spi-fsl-dspi.o obj-$(CONFIG_SPI_FSL_LIB) += spi-fsl-lib.o obj-$(CONFIG_SPI_FSL_ESPI) += spi-fsl-espi.o obj-$(CONFIG_SPI_FSL_LPSPI) += spi-fsl-lpspi.o +obj-$(CONFIG_SPI_FSL_QUADSPI) += spi-fsl-qspi.o obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o obj-$(CONFIG_SPI_GPIO) += spi-gpio.o obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o @@ -63,6 +64,7 @@ obj-$(CONFIG_SPI_MXIC) += spi-mxic.o obj-$(CONFIG_SPI_MXS) += spi-mxs.o obj-$(CONFIG_SPI_NPCM_PSPI) += spi-npcm-pspi.o obj-$(CONFIG_SPI_NUC900) += spi-nuc900.o +obj-$(CONFIG_SPI_NXP_FLEXSPI) += spi-nxp-fspi.o obj-$(CONFIG_SPI_OC_TINY) += spi-oc-tiny.o spi-octeon-objs := spi-cavium.o spi-cavium-octeon.o obj-$(CONFIG_SPI_OCTEON) += spi-octeon.o @@ -93,6 +95,7 @@ obj-$(CONFIG_SPI_SH) += spi-sh.o obj-$(CONFIG_SPI_SH_HSPI) += spi-sh-hspi.o obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o +obj-$(CONFIG_SPI_SIFIVE) += spi-sifive.o obj-$(CONFIG_SPI_SIRF) += spi-sirf.o obj-$(CONFIG_SPI_SLAVE_MT27XX) += spi-slave-mt27xx.o obj-$(CONFIG_SPI_SPRD) += spi-sprd.o diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c index ddc712410812..fffc21cd5f79 100644 --- a/drivers/spi/atmel-quadspi.c +++ b/drivers/spi/atmel-quadspi.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Driver for Atmel QSPI Controller * @@ -7,31 +8,19 @@ * Author: Cyrille Pitchen * Author: Piotr Bugalski * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - * * This driver is based on drivers/mtd/spi-nor/fsl-quadspi.c from Freescale. */ -#include #include -#include -#include #include #include #include -#include - #include +#include +#include +#include +#include +#include #include /* QSPI register offsets */ @@ -47,7 +36,9 @@ #define QSPI_IAR 0x0030 /* Instruction Address Register */ #define QSPI_ICR 0x0034 /* Instruction Code Register */ +#define QSPI_WICR 0x0034 /* Write Instruction Code Register */ #define QSPI_IFR 0x0038 /* Instruction Frame Register */ +#define QSPI_RICR 0x003C /* Read Instruction Code Register */ #define QSPI_SMR 0x0040 /* Scrambling Mode Register */ #define QSPI_SKR 0x0044 /* Scrambling Key Register */ @@ -100,7 +91,7 @@ #define QSPI_SCR_DLYBS_MASK GENMASK(23, 16) #define QSPI_SCR_DLYBS(n) (((n) << 16) & QSPI_SCR_DLYBS_MASK) -/* Bitfields in QSPI_ICR (Instruction Code Register) */ +/* Bitfields in QSPI_ICR (Read/Write Instruction Code Register) */ #define QSPI_ICR_INST_MASK GENMASK(7, 0) #define QSPI_ICR_INST(inst) (((inst) << 0) & QSPI_ICR_INST_MASK) #define QSPI_ICR_OPT_MASK GENMASK(23, 16) @@ -125,14 +116,12 @@ #define QSPI_IFR_OPTL_4BIT (2 << 8) #define QSPI_IFR_OPTL_8BIT (3 << 8) #define QSPI_IFR_ADDRL BIT(10) -#define QSPI_IFR_TFRTYP_MASK GENMASK(13, 12) -#define QSPI_IFR_TFRTYP_TRSFR_READ (0 << 12) -#define QSPI_IFR_TFRTYP_TRSFR_READ_MEM (1 << 12) -#define QSPI_IFR_TFRTYP_TRSFR_WRITE (2 << 12) -#define QSPI_IFR_TFRTYP_TRSFR_WRITE_MEM (3 << 13) +#define QSPI_IFR_TFRTYP_MEM BIT(12) +#define QSPI_IFR_SAMA5D2_WRITE_TRSFR BIT(13) #define QSPI_IFR_CRM BIT(14) #define QSPI_IFR_NBDUM_MASK GENMASK(20, 16) #define QSPI_IFR_NBDUM(n) (((n) << 16) & QSPI_IFR_NBDUM_MASK) +#define QSPI_IFR_APBTFRTYP_READ BIT(24) /* Defined in SAM9X60 */ /* Bitfields in QSPI_SMR (Scrambling Mode Register) */ #define QSPI_SMR_SCREN BIT(0) @@ -148,24 +137,31 @@ #define QSPI_WPSR_WPVSRC_MASK GENMASK(15, 8) #define QSPI_WPSR_WPVSRC(src) (((src) << 8) & QSPI_WPSR_WPVSRC) +struct atmel_qspi_caps { + bool has_qspick; + bool has_ricr; +}; struct atmel_qspi { void __iomem *regs; void __iomem *mem; - struct clk *clk; + struct clk *pclk; + struct clk *qspick; struct platform_device *pdev; + const struct atmel_qspi_caps *caps; u32 pending; + u32 mr; struct completion cmd_completion; }; -struct qspi_mode { +struct atmel_qspi_mode { u8 cmd_buswidth; u8 addr_buswidth; u8 data_buswidth; u32 config; }; -static const struct qspi_mode sama5d2_qspi_modes[] = { +static const struct atmel_qspi_mode atmel_qspi_modes[] = { { 1, 1, 1, QSPI_IFR_WIDTH_SINGLE_BIT_SPI }, { 1, 1, 2, QSPI_IFR_WIDTH_DUAL_OUTPUT }, { 1, 1, 4, QSPI_IFR_WIDTH_QUAD_OUTPUT }, @@ -175,19 +171,8 @@ static const struct qspi_mode sama5d2_qspi_modes[] = { { 4, 4, 4, QSPI_IFR_WIDTH_QUAD_CMD }, }; -/* Register access functions */ -static inline u32 qspi_readl(struct atmel_qspi *aq, u32 reg) -{ - return readl_relaxed(aq->regs + reg); -} - -static inline void qspi_writel(struct atmel_qspi *aq, u32 reg, u32 value) -{ - writel_relaxed(value, aq->regs + reg); -} - -static inline bool is_compatible(const struct spi_mem_op *op, - const struct qspi_mode *mode) +static inline bool atmel_qspi_is_compatible(const struct spi_mem_op *op, + const struct atmel_qspi_mode *mode) { if (op->cmd.buswidth != mode->cmd_buswidth) return false; @@ -201,21 +186,21 @@ static inline bool is_compatible(const struct spi_mem_op *op, return true; } -static int find_mode(const struct spi_mem_op *op) +static int atmel_qspi_find_mode(const struct spi_mem_op *op) { u32 i; - for (i = 0; i < ARRAY_SIZE(sama5d2_qspi_modes); i++) - if (is_compatible(op, &sama5d2_qspi_modes[i])) + for (i = 0; i < ARRAY_SIZE(atmel_qspi_modes); i++) + if (atmel_qspi_is_compatible(op, &atmel_qspi_modes[i])) return i; - return -1; + return -ENOTSUPP; } static bool atmel_qspi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) { - if (find_mode(op) < 0) + if (atmel_qspi_find_mode(op) < 0) return false; /* special case not supported by hardware */ @@ -226,29 +211,37 @@ static bool atmel_qspi_supports_op(struct spi_mem *mem, return true; } -static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) +static int atmel_qspi_set_cfg(struct atmel_qspi *aq, + const struct spi_mem_op *op, u32 *offset) { - struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->master); - int mode; + u32 iar, icr, ifr; u32 dummy_cycles = 0; - u32 iar, icr, ifr, sr; - int err = 0; + int mode; iar = 0; icr = QSPI_ICR_INST(op->cmd.opcode); ifr = QSPI_IFR_INSTEN; - qspi_writel(aq, QSPI_MR, QSPI_MR_SMM); - - mode = find_mode(op); + mode = atmel_qspi_find_mode(op); if (mode < 0) - return -ENOTSUPP; - - ifr |= sama5d2_qspi_modes[mode].config; + return mode; + ifr |= atmel_qspi_modes[mode].config; if (op->dummy.buswidth && op->dummy.nbytes) dummy_cycles = op->dummy.nbytes * 8 / op->dummy.buswidth; + /* + * The controller allows 24 and 32-bit addressing while NAND-flash + * requires 16-bit long. Handling 8-bit long addresses is done using + * the option field. For the 16-bit addresses, the workaround depends + * of the number of requested dummy bits. If there are 8 or more dummy + * cycles, the address is shifted and sent with the first dummy byte. + * Otherwise opcode is disabled and the first byte of the address + * contains the command opcode (works only if the opcode and address + * use the same buswidth). The limitation is when the 16-bit address is + * used without enough dummy cycles and the opcode is using a different + * buswidth than the address. + */ if (op->addr.buswidth) { switch (op->addr.nbytes) { case 0: @@ -282,6 +275,9 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) } } + /* offset of the data access in the QSPI memory space */ + *offset = iar; + /* Set number of dummy cycles */ if (dummy_cycles) ifr |= QSPI_IFR_NBDUM(dummy_cycles); @@ -290,49 +286,82 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) if (op->data.nbytes) ifr |= QSPI_IFR_DATAEN; - if (op->data.dir == SPI_MEM_DATA_IN && op->data.nbytes) - ifr |= QSPI_IFR_TFRTYP_TRSFR_READ; - else - ifr |= QSPI_IFR_TFRTYP_TRSFR_WRITE; + /* + * If the QSPI controller is set in regular SPI mode, set it in + * Serial Memory Mode (SMM). + */ + if (aq->mr != QSPI_MR_SMM) { + writel_relaxed(QSPI_MR_SMM, aq->regs + QSPI_MR); + aq->mr = QSPI_MR_SMM; + } /* Clear pending interrupts */ - (void)qspi_readl(aq, QSPI_SR); + (void)readl_relaxed(aq->regs + QSPI_SR); + + if (aq->caps->has_ricr) { + if (!op->addr.nbytes && op->data.dir == SPI_MEM_DATA_IN) + ifr |= QSPI_IFR_APBTFRTYP_READ; - /* Set QSPI Instruction Frame registers */ - qspi_writel(aq, QSPI_IAR, iar); - qspi_writel(aq, QSPI_ICR, icr); - qspi_writel(aq, QSPI_IFR, ifr); + /* Set QSPI Instruction Frame registers */ + writel_relaxed(iar, aq->regs + QSPI_IAR); + if (op->data.dir == SPI_MEM_DATA_IN) + writel_relaxed(icr, aq->regs + QSPI_RICR); + else + writel_relaxed(icr, aq->regs + QSPI_WICR); + writel_relaxed(ifr, aq->regs + QSPI_IFR); + } else { + if (op->data.dir == SPI_MEM_DATA_OUT) + ifr |= QSPI_IFR_SAMA5D2_WRITE_TRSFR; + + /* Set QSPI Instruction Frame registers */ + writel_relaxed(iar, aq->regs + QSPI_IAR); + writel_relaxed(icr, aq->regs + QSPI_ICR); + writel_relaxed(ifr, aq->regs + QSPI_IFR); + } + + return 0; +} + +static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) +{ + struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->master); + u32 sr, offset; + int err; + + err = atmel_qspi_set_cfg(aq, op, &offset); + if (err) + return err; /* Skip to the final steps if there is no data */ if (op->data.nbytes) { /* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */ - (void)qspi_readl(aq, QSPI_IFR); + (void)readl_relaxed(aq->regs + QSPI_IFR); /* Send/Receive data */ if (op->data.dir == SPI_MEM_DATA_IN) - _memcpy_fromio(op->data.buf.in, - aq->mem + iar, op->data.nbytes); + _memcpy_fromio(op->data.buf.in, aq->mem + offset, + op->data.nbytes); else - _memcpy_toio(aq->mem + iar, - op->data.buf.out, op->data.nbytes); + _memcpy_toio(aq->mem + offset, op->data.buf.out, + op->data.nbytes); /* Release the chip-select */ - qspi_writel(aq, QSPI_CR, QSPI_CR_LASTXFER); + writel_relaxed(QSPI_CR_LASTXFER, aq->regs + QSPI_CR); } /* Poll INSTRuction End status */ - sr = qspi_readl(aq, QSPI_SR); + sr = readl_relaxed(aq->regs + QSPI_SR); if ((sr & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED) return err; /* Wait for INSTRuction End interrupt */ reinit_completion(&aq->cmd_completion); aq->pending = sr & QSPI_SR_CMD_COMPLETED; - qspi_writel(aq, QSPI_IER, QSPI_SR_CMD_COMPLETED); + writel_relaxed(QSPI_SR_CMD_COMPLETED, aq->regs + QSPI_IER); if (!wait_for_completion_timeout(&aq->cmd_completion, msecs_to_jiffies(1000))) err = -ETIMEDOUT; - qspi_writel(aq, QSPI_IDR, QSPI_SR_CMD_COMPLETED); + writel_relaxed(QSPI_SR_CMD_COMPLETED, aq->regs + QSPI_IDR); return err; } @@ -361,7 +390,7 @@ static int atmel_qspi_setup(struct spi_device *spi) if (!spi->max_speed_hz) return -EINVAL; - src_rate = clk_get_rate(aq->clk); + src_rate = clk_get_rate(aq->pclk); if (!src_rate) return -EINVAL; @@ -371,7 +400,7 @@ static int atmel_qspi_setup(struct spi_device *spi) scbr--; scr = QSPI_SCR_SCBR(scbr); - qspi_writel(aq, QSPI_SCR, scr); + writel_relaxed(scr, aq->regs + QSPI_SCR); return 0; } @@ -379,21 +408,25 @@ static int atmel_qspi_setup(struct spi_device *spi) static int atmel_qspi_init(struct atmel_qspi *aq) { /* Reset the QSPI controller */ - qspi_writel(aq, QSPI_CR, QSPI_CR_SWRST); + writel_relaxed(QSPI_CR_SWRST, aq->regs + QSPI_CR); + + /* Set the QSPI controller by default in Serial Memory Mode */ + writel_relaxed(QSPI_MR_SMM, aq->regs + QSPI_MR); + aq->mr = QSPI_MR_SMM; /* Enable the QSPI controller */ - qspi_writel(aq, QSPI_CR, QSPI_CR_QSPIEN); + writel_relaxed(QSPI_CR_QSPIEN, aq->regs + QSPI_CR); return 0; } static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id) { - struct atmel_qspi *aq = (struct atmel_qspi *)dev_id; + struct atmel_qspi *aq = dev_id; u32 status, mask, pending; - status = qspi_readl(aq, QSPI_SR); - mask = qspi_readl(aq, QSPI_IMR); + status = readl_relaxed(aq->regs + QSPI_SR); + mask = readl_relaxed(aq->regs + QSPI_IMR); pending = status & mask; if (!pending) @@ -449,44 +482,74 @@ static int atmel_qspi_probe(struct platform_device *pdev) } /* Get the peripheral clock */ - aq->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(aq->clk)) { + aq->pclk = devm_clk_get(&pdev->dev, "pclk"); + if (IS_ERR(aq->pclk)) + aq->pclk = devm_clk_get(&pdev->dev, NULL); + + if (IS_ERR(aq->pclk)) { dev_err(&pdev->dev, "missing peripheral clock\n"); - err = PTR_ERR(aq->clk); + err = PTR_ERR(aq->pclk); goto exit; } /* Enable the peripheral clock */ - err = clk_prepare_enable(aq->clk); + err = clk_prepare_enable(aq->pclk); if (err) { dev_err(&pdev->dev, "failed to enable the peripheral clock\n"); goto exit; } + aq->caps = of_device_get_match_data(&pdev->dev); + if (!aq->caps) { + dev_err(&pdev->dev, "Could not retrieve QSPI caps\n"); + err = -EINVAL; + goto exit; + } + + if (aq->caps->has_qspick) { + /* Get the QSPI system clock */ + aq->qspick = devm_clk_get(&pdev->dev, "qspick"); + if (IS_ERR(aq->qspick)) { + dev_err(&pdev->dev, "missing system clock\n"); + err = PTR_ERR(aq->qspick); + goto disable_pclk; + } + + /* Enable the QSPI system clock */ + err = clk_prepare_enable(aq->qspick); + if (err) { + dev_err(&pdev->dev, + "failed to enable the QSPI system clock\n"); + goto disable_pclk; + } + } + /* Request the IRQ */ irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "missing IRQ\n"); err = irq; - goto disable_clk; + goto disable_qspick; } err = devm_request_irq(&pdev->dev, irq, atmel_qspi_interrupt, 0, dev_name(&pdev->dev), aq); if (err) - goto disable_clk; + goto disable_qspick; err = atmel_qspi_init(aq); if (err) - goto disable_clk; + goto disable_qspick; err = spi_register_controller(ctrl); if (err) - goto disable_clk; + goto disable_qspick; return 0; -disable_clk: - clk_disable_unprepare(aq->clk); +disable_qspick: + clk_disable_unprepare(aq->qspick); +disable_pclk: + clk_disable_unprepare(aq->pclk); exit: spi_controller_put(ctrl); @@ -499,8 +562,9 @@ static int atmel_qspi_remove(struct platform_device *pdev) struct atmel_qspi *aq = spi_controller_get_devdata(ctrl); spi_unregister_controller(ctrl); - qspi_writel(aq, QSPI_CR, QSPI_CR_QSPIDIS); - clk_disable_unprepare(aq->clk); + writel_relaxed(QSPI_CR_QSPIDIS, aq->regs + QSPI_CR); + clk_disable_unprepare(aq->qspick); + clk_disable_unprepare(aq->pclk); return 0; } @@ -508,7 +572,8 @@ static int __maybe_unused atmel_qspi_suspend(struct device *dev) { struct atmel_qspi *aq = dev_get_drvdata(dev); - clk_disable_unprepare(aq->clk); + clk_disable_unprepare(aq->qspick); + clk_disable_unprepare(aq->pclk); return 0; } @@ -517,7 +582,8 @@ static int __maybe_unused atmel_qspi_resume(struct device *dev) { struct atmel_qspi *aq = dev_get_drvdata(dev); - clk_prepare_enable(aq->clk); + clk_prepare_enable(aq->pclk); + clk_prepare_enable(aq->qspick); return atmel_qspi_init(aq); } @@ -525,8 +591,22 @@ static int __maybe_unused atmel_qspi_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(atmel_qspi_pm_ops, atmel_qspi_suspend, atmel_qspi_resume); +static const struct atmel_qspi_caps atmel_sama5d2_qspi_caps = {}; + +static const struct atmel_qspi_caps atmel_sam9x60_qspi_caps = { + .has_qspick = true, + .has_ricr = true, +}; + static const struct of_device_id atmel_qspi_dt_ids[] = { - { .compatible = "atmel,sama5d2-qspi" }, + { + .compatible = "atmel,sama5d2-qspi", + .data = &atmel_sama5d2_qspi_caps, + }, + { + .compatible = "microchip,sam9x60-qspi", + .data = &atmel_sam9x60_qspi_caps, + }, { /* sentinel */ } }; diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c index 3f6b657394de..847f354ebef1 100644 --- a/drivers/spi/spi-ath79.c +++ b/drivers/spi/spi-ath79.c @@ -21,18 +21,26 @@ #include #include #include -#include #include #include - -#include -#include +#include #define DRV_NAME "ath79-spi" #define ATH79_SPI_RRW_DELAY_FACTOR 12000 #define MHZ (1000 * 1000) +#define AR71XX_SPI_REG_FS 0x00 /* Function Select */ +#define AR71XX_SPI_REG_CTRL 0x04 /* SPI Control */ +#define AR71XX_SPI_REG_IOC 0x08 /* SPI I/O Control */ +#define AR71XX_SPI_REG_RDS 0x0c /* Read Data Shift */ + +#define AR71XX_SPI_FS_GPIO BIT(0) /* Enable GPIO mode */ + +#define AR71XX_SPI_IOC_DO BIT(0) /* Data Out pin */ +#define AR71XX_SPI_IOC_CLK BIT(8) /* CLK pin */ +#define AR71XX_SPI_IOC_CS(n) BIT(16 + (n)) + struct ath79_spi { struct spi_bitbang bitbang; u32 ioc_base; @@ -67,31 +75,14 @@ static void ath79_spi_chipselect(struct spi_device *spi, int is_active) { struct ath79_spi *sp = ath79_spidev_to_sp(spi); int cs_high = (spi->mode & SPI_CS_HIGH) ? is_active : !is_active; + u32 cs_bit = AR71XX_SPI_IOC_CS(spi->chip_select); - if (is_active) { - /* set initial clock polarity */ - if (spi->mode & SPI_CPOL) - sp->ioc_base |= AR71XX_SPI_IOC_CLK; - else - sp->ioc_base &= ~AR71XX_SPI_IOC_CLK; - - ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base); - } - - if (gpio_is_valid(spi->cs_gpio)) { - /* SPI is normally active-low */ - gpio_set_value_cansleep(spi->cs_gpio, cs_high); - } else { - u32 cs_bit = AR71XX_SPI_IOC_CS(spi->chip_select); - - if (cs_high) - sp->ioc_base |= cs_bit; - else - sp->ioc_base &= ~cs_bit; - - ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base); - } + if (cs_high) + sp->ioc_base |= cs_bit; + else + sp->ioc_base &= ~cs_bit; + ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base); } static void ath79_spi_enable(struct ath79_spi *sp) @@ -103,6 +94,9 @@ static void ath79_spi_enable(struct ath79_spi *sp) sp->reg_ctrl = ath79_spi_rr(sp, AR71XX_SPI_REG_CTRL); sp->ioc_base = ath79_spi_rr(sp, AR71XX_SPI_REG_IOC); + /* clear clk and mosi in the base state */ + sp->ioc_base &= ~(AR71XX_SPI_IOC_DO | AR71XX_SPI_IOC_CLK); + /* TODO: setup speed? */ ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, 0x43); } @@ -115,66 +109,6 @@ static void ath79_spi_disable(struct ath79_spi *sp) ath79_spi_wr(sp, AR71XX_SPI_REG_FS, 0); } -static int ath79_spi_setup_cs(struct spi_device *spi) -{ - struct ath79_spi *sp = ath79_spidev_to_sp(spi); - int status; - - status = 0; - if (gpio_is_valid(spi->cs_gpio)) { - unsigned long flags; - - flags = GPIOF_DIR_OUT; - if (spi->mode & SPI_CS_HIGH) - flags |= GPIOF_INIT_LOW; - else - flags |= GPIOF_INIT_HIGH; - - status = gpio_request_one(spi->cs_gpio, flags, - dev_name(&spi->dev)); - } else { - u32 cs_bit = AR71XX_SPI_IOC_CS(spi->chip_select); - - if (spi->mode & SPI_CS_HIGH) - sp->ioc_base &= ~cs_bit; - else - sp->ioc_base |= cs_bit; - - ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base); - } - - return status; -} - -static void ath79_spi_cleanup_cs(struct spi_device *spi) -{ - if (gpio_is_valid(spi->cs_gpio)) - gpio_free(spi->cs_gpio); -} - -static int ath79_spi_setup(struct spi_device *spi) -{ - int status = 0; - - if (!spi->controller_state) { - status = ath79_spi_setup_cs(spi); - if (status) - return status; - } - - status = spi_bitbang_setup(spi); - if (status && !spi->controller_state) - ath79_spi_cleanup_cs(spi); - - return status; -} - -static void ath79_spi_cleanup(struct spi_device *spi) -{ - ath79_spi_cleanup_cs(spi); - spi_bitbang_cleanup(spi); -} - static u32 ath79_spi_txrx_mode0(struct spi_device *spi, unsigned int nsecs, u32 word, u8 bits, unsigned flags) { @@ -225,9 +159,10 @@ static int ath79_spi_probe(struct platform_device *pdev) pdata = dev_get_platdata(&pdev->dev); + master->use_gpio_descriptors = true; master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32); - master->setup = ath79_spi_setup; - master->cleanup = ath79_spi_cleanup; + master->setup = spi_bitbang_setup; + master->cleanup = spi_bitbang_cleanup; if (pdata) { master->bus_num = pdata->bus_num; master->num_chipselect = pdata->num_chipselect; @@ -236,7 +171,6 @@ static int ath79_spi_probe(struct platform_device *pdev) sp->bitbang.master = master; sp->bitbang.chipselect = ath79_spi_chipselect; sp->bitbang.txrx_word[SPI_MODE_0] = ath79_spi_txrx_mode0; - sp->bitbang.setup_transfer = spi_bitbang_setup_transfer; sp->bitbang.flags = SPI_CS_HIGH; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index 74fddcd3282b..4954f0ab1606 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c @@ -23,8 +23,7 @@ #include #include -#include -#include +#include #include #include @@ -312,7 +311,7 @@ struct atmel_spi { /* Controller-specific per-slave state */ struct atmel_spi_device { - unsigned int npcs_pin; + struct gpio_desc *npcs_pin; u32 csr; }; @@ -355,7 +354,6 @@ static bool atmel_spi_is_v2(struct atmel_spi *as) static void cs_activate(struct atmel_spi *as, struct spi_device *spi) { struct atmel_spi_device *asd = spi->controller_state; - unsigned active = spi->mode & SPI_CS_HIGH; u32 mr; if (atmel_spi_is_v2(as)) { @@ -379,7 +377,7 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi) mr = spi_readl(as, MR); if (as->use_cs_gpios) - gpio_set_value(asd->npcs_pin, active); + gpiod_set_value(asd->npcs_pin, 1); } else { u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0; int i; @@ -396,19 +394,16 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi) mr = spi_readl(as, MR); mr = SPI_BFINS(PCS, ~(1 << spi->chip_select), mr); if (as->use_cs_gpios && spi->chip_select != 0) - gpio_set_value(asd->npcs_pin, active); + gpiod_set_value(asd->npcs_pin, 1); spi_writel(as, MR, mr); } - dev_dbg(&spi->dev, "activate %u%s, mr %08x\n", - asd->npcs_pin, active ? " (high)" : "", - mr); + dev_dbg(&spi->dev, "activate NPCS, mr %08x\n", mr); } static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi) { struct atmel_spi_device *asd = spi->controller_state; - unsigned active = spi->mode & SPI_CS_HIGH; u32 mr; /* only deactivate *this* device; sometimes transfers to @@ -420,14 +415,12 @@ static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi) spi_writel(as, MR, mr); } - dev_dbg(&spi->dev, "DEactivate %u%s, mr %08x\n", - asd->npcs_pin, active ? " (low)" : "", - mr); + dev_dbg(&spi->dev, "DEactivate NPCS, mr %08x\n", mr); if (!as->use_cs_gpios) spi_writel(as, CR, SPI_BIT(LASTXFER)); else if (atmel_spi_is_v2(as) || spi->chip_select != 0) - gpio_set_value(asd->npcs_pin, !active); + gpiod_set_value(asd->npcs_pin, 0); } static void atmel_spi_lock(struct atmel_spi *as) __acquires(&as->lock) @@ -1188,7 +1181,6 @@ static int atmel_spi_setup(struct spi_device *spi) struct atmel_spi_device *asd; u32 csr; unsigned int bits = spi->bits_per_word; - unsigned int npcs_pin; as = spi_master_get_devdata(spi->master); @@ -1209,21 +1201,14 @@ static int atmel_spi_setup(struct spi_device *spi) csr |= SPI_BIT(CSAAT); /* DLYBS is mostly irrelevant since we manage chipselect using GPIOs. - * - * DLYBCT would add delays between words, slowing down transfers. - * It could potentially be useful to cope with DMA bottlenecks, but - * in those cases it's probably best to just use a lower bitrate. */ csr |= SPI_BF(DLYBS, 0); - csr |= SPI_BF(DLYBCT, 0); - - /* chipselect must have been muxed as GPIO (e.g. in board setup) */ - npcs_pin = (unsigned long)spi->controller_data; - if (!as->use_cs_gpios) - npcs_pin = spi->chip_select; - else if (gpio_is_valid(spi->cs_gpio)) - npcs_pin = spi->cs_gpio; + /* DLYBCT adds delays between words. This is useful for slow devices + * that need a bit of time to setup the next transfer. + */ + csr |= SPI_BF(DLYBCT, + (as->spi_clk / 1000000 * spi->word_delay_usecs) >> 5); asd = spi->controller_state; if (!asd) { @@ -1231,11 +1216,21 @@ static int atmel_spi_setup(struct spi_device *spi) if (!asd) return -ENOMEM; - if (as->use_cs_gpios) - gpio_direction_output(npcs_pin, - !(spi->mode & SPI_CS_HIGH)); + /* + * If use_cs_gpios is true this means that we have "cs-gpios" + * defined in the device tree node so we should have + * gotten the GPIO lines from the device tree inside the + * SPI core. Warn if this is not the case but continue since + * CS GPIOs are after all optional. + */ + if (as->use_cs_gpios) { + if (!spi->cs_gpiod) { + dev_err(&spi->dev, + "host claims to use CS GPIOs but no CS found in DT by the SPI core\n"); + } + asd->npcs_pin = spi->cs_gpiod; + } - asd->npcs_pin = npcs_pin; spi->controller_state = asd; } @@ -1473,41 +1468,6 @@ static void atmel_get_caps(struct atmel_spi *as) as->caps.has_pdc_support = version < 0x212; } -/*-------------------------------------------------------------------------*/ -static int atmel_spi_gpio_cs(struct platform_device *pdev) -{ - struct spi_master *master = platform_get_drvdata(pdev); - struct atmel_spi *as = spi_master_get_devdata(master); - struct device_node *np = master->dev.of_node; - int i; - int ret = 0; - int nb = 0; - - if (!as->use_cs_gpios) - return 0; - - if (!np) - return 0; - - nb = of_gpio_named_count(np, "cs-gpios"); - for (i = 0; i < nb; i++) { - int cs_gpio = of_get_named_gpio(pdev->dev.of_node, - "cs-gpios", i); - - if (cs_gpio == -EPROBE_DEFER) - return cs_gpio; - - if (gpio_is_valid(cs_gpio)) { - ret = devm_gpio_request(&pdev->dev, cs_gpio, - dev_name(&pdev->dev)); - if (ret) - return ret; - } - } - - return 0; -} - static void atmel_spi_init(struct atmel_spi *as) { spi_writel(as, CR, SPI_BIT(SWRST)); @@ -1560,6 +1520,7 @@ static int atmel_spi_probe(struct platform_device *pdev) goto out_free; /* the spi->mode bits understood by this driver: */ + master->use_gpio_descriptors = true; master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16); master->dev.of_node = pdev->dev.of_node; @@ -1592,6 +1553,11 @@ static int atmel_spi_probe(struct platform_device *pdev) atmel_get_caps(as); + /* + * If there are chip selects in the device tree, those will be + * discovered by the SPI core when registering the SPI master + * and assigned to each SPI device. + */ as->use_cs_gpios = true; if (atmel_spi_is_v2(as) && pdev->dev.of_node && @@ -1600,10 +1566,6 @@ static int atmel_spi_probe(struct platform_device *pdev) master->num_chipselect = 4; } - ret = atmel_spi_gpio_cs(pdev); - if (ret) - goto out_unmap_regs; - as->use_dma = false; as->use_pdc = false; if (as->caps.has_dma_support) { diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c index 671e374e1b01..f7e054848ca5 100644 --- a/drivers/spi/spi-bcm2835aux.c +++ b/drivers/spi/spi-bcm2835aux.c @@ -456,7 +456,7 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev) } bs->clk = devm_clk_get(&pdev->dev, NULL); - if ((!bs->clk) || (IS_ERR(bs->clk))) { + if (IS_ERR(bs->clk)) { err = PTR_ERR(bs->clk); dev_err(&pdev->dev, "could not get clk: %d\n", err); goto out_master_put; diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c index f29176000b8d..dd9a8c54a693 100644 --- a/drivers/spi/spi-bitbang.c +++ b/drivers/spi/spi-bitbang.c @@ -213,19 +213,6 @@ int spi_bitbang_setup(struct spi_device *spi) dev_dbg(&spi->dev, "%s, %u nsec/bit\n", __func__, 2 * cs->nsecs); - /* NOTE we _need_ to call chipselect() early, ideally with adapter - * setup, unless the hardware defaults cooperate to avoid confusion - * between normal (active low) and inverted chipselects. - */ - - /* deselect chip (low or high) */ - mutex_lock(&bitbang->lock); - if (!bitbang->busy) { - bitbang->chipselect(spi, BITBANG_CS_INACTIVE); - ndelay(cs->nsecs); - } - mutex_unlock(&bitbang->lock); - return 0; } EXPORT_SYMBOL_GPL(spi_bitbang_setup); diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c index 7c88f74f7f47..43d0e79842ac 100644 --- a/drivers/spi/spi-cadence.c +++ b/drivers/spi/spi-cadence.c @@ -13,7 +13,7 @@ #include #include -#include +#include #include #include #include @@ -128,10 +128,6 @@ struct cdns_spi { u32 is_decoded_cs; }; -struct cdns_spi_device_data { - bool gpio_requested; -}; - /* Macros for the SPI controller read/write */ static inline u32 cdns_spi_read(struct cdns_spi *xspi, u32 offset) { @@ -176,16 +172,16 @@ static void cdns_spi_init_hw(struct cdns_spi *xspi) /** * cdns_spi_chipselect - Select or deselect the chip select line * @spi: Pointer to the spi_device structure - * @is_high: Select(0) or deselect (1) the chip select line + * @enable: Select (1) or deselect (0) the chip select line */ -static void cdns_spi_chipselect(struct spi_device *spi, bool is_high) +static void cdns_spi_chipselect(struct spi_device *spi, bool enable) { struct cdns_spi *xspi = spi_master_get_devdata(spi->master); u32 ctrl_reg; ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR); - if (is_high) { + if (!enable) { /* Deselect the slave */ ctrl_reg |= CDNS_SPI_CR_SSCTRL; } else { @@ -469,64 +465,6 @@ static int cdns_unprepare_transfer_hardware(struct spi_master *master) return 0; } -static int cdns_spi_setup(struct spi_device *spi) -{ - - int ret = -EINVAL; - struct cdns_spi_device_data *cdns_spi_data = spi_get_ctldata(spi); - - /* this is a pin managed by the controller, leave it alone */ - if (spi->cs_gpio == -ENOENT) - return 0; - - /* this seems to be the first time we're here */ - if (!cdns_spi_data) { - cdns_spi_data = kzalloc(sizeof(*cdns_spi_data), GFP_KERNEL); - if (!cdns_spi_data) - return -ENOMEM; - cdns_spi_data->gpio_requested = false; - spi_set_ctldata(spi, cdns_spi_data); - } - - /* if we haven't done so, grab the gpio */ - if (!cdns_spi_data->gpio_requested && gpio_is_valid(spi->cs_gpio)) { - ret = gpio_request_one(spi->cs_gpio, - (spi->mode & SPI_CS_HIGH) ? - GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH, - dev_name(&spi->dev)); - if (ret) - dev_err(&spi->dev, "can't request chipselect gpio %d\n", - spi->cs_gpio); - else - cdns_spi_data->gpio_requested = true; - } else { - if (gpio_is_valid(spi->cs_gpio)) { - int mode = ((spi->mode & SPI_CS_HIGH) ? - GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH); - - ret = gpio_direction_output(spi->cs_gpio, mode); - if (ret) - dev_err(&spi->dev, "chipselect gpio %d setup failed (%d)\n", - spi->cs_gpio, ret); - } - } - - return ret; -} - -static void cdns_spi_cleanup(struct spi_device *spi) -{ - struct cdns_spi_device_data *cdns_spi_data = spi_get_ctldata(spi); - - if (cdns_spi_data) { - if (cdns_spi_data->gpio_requested) - gpio_free(spi->cs_gpio); - kfree(cdns_spi_data); - spi_set_ctldata(spi, NULL); - } - -} - /** * cdns_spi_probe - Probe method for the SPI driver * @pdev: Pointer to the platform_device structure @@ -584,11 +522,6 @@ static int cdns_spi_probe(struct platform_device *pdev) goto clk_dis_apb; } - pm_runtime_use_autosuspend(&pdev->dev); - pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT); - pm_runtime_set_active(&pdev->dev); - pm_runtime_enable(&pdev->dev); - ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs); if (ret < 0) master->num_chipselect = CDNS_SPI_DEFAULT_NUM_CS; @@ -603,8 +536,10 @@ static int cdns_spi_probe(struct platform_device *pdev) /* SPI controller initializations */ cdns_spi_init_hw(xspi); - pm_runtime_mark_last_busy(&pdev->dev); - pm_runtime_put_autosuspend(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT); irq = platform_get_irq(pdev, 0); if (irq <= 0) { @@ -621,13 +556,12 @@ static int cdns_spi_probe(struct platform_device *pdev) goto clk_dis_all; } + master->use_gpio_descriptors = true; master->prepare_transfer_hardware = cdns_prepare_transfer_hardware; master->prepare_message = cdns_prepare_message; master->transfer_one = cdns_transfer_one; master->unprepare_transfer_hardware = cdns_unprepare_transfer_hardware; master->set_cs = cdns_spi_chipselect; - master->setup = cdns_spi_setup; - master->cleanup = cdns_spi_cleanup; master->auto_runtime_pm = true; master->mode_bits = SPI_CPOL | SPI_CPHA; diff --git a/drivers/spi/spi-clps711x.c b/drivers/spi/spi-clps711x.c index 18193df2eba8..8c03c409fc07 100644 --- a/drivers/spi/spi-clps711x.c +++ b/drivers/spi/spi-clps711x.c @@ -11,7 +11,7 @@ #include #include -#include +#include #include #include #include @@ -36,25 +36,6 @@ struct spi_clps711x_data { int len; }; -static int spi_clps711x_setup(struct spi_device *spi) -{ - if (!spi->controller_state) { - int ret; - - ret = devm_gpio_request(&spi->master->dev, spi->cs_gpio, - dev_name(&spi->master->dev)); - if (ret) - return ret; - - spi->controller_state = spi; - } - - /* We are expect that SPI-device is not selected */ - gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); - - return 0; -} - static int spi_clps711x_prepare_message(struct spi_master *master, struct spi_message *msg) { @@ -125,11 +106,11 @@ static int spi_clps711x_probe(struct platform_device *pdev) if (!master) return -ENOMEM; + master->use_gpio_descriptors = true; master->bus_num = -1; master->mode_bits = SPI_CPHA | SPI_CS_HIGH; master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 8); master->dev.of_node = pdev->dev.of_node; - master->setup = spi_clps711x_setup; master->prepare_message = spi_clps711x_prepare_message; master->transfer_one = spi_clps711x_transfer_one; diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c index 56adec83f8fc..eb246ebcfa3a 100644 --- a/drivers/spi/spi-davinci.c +++ b/drivers/spi/spi-davinci.c @@ -15,7 +15,7 @@ #include #include -#include +#include #include #include #include @@ -25,7 +25,6 @@ #include #include #include -#include #include #include #include @@ -222,12 +221,17 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value) * Board specific chip select logic decides the polarity and cs * line for the controller */ - if (spi->cs_gpio >= 0) { + if (spi->cs_gpiod) { + /* + * FIXME: is this code ever executed? This host does not + * set SPI_MASTER_GPIO_SS so this chipselect callback should + * not get called from the SPI core when we are using + * GPIOs for chip select. + */ if (value == BITBANG_CS_ACTIVE) - gpio_set_value(spi->cs_gpio, spi->mode & SPI_CS_HIGH); + gpiod_set_value(spi->cs_gpiod, 1); else - gpio_set_value(spi->cs_gpio, - !(spi->mode & SPI_CS_HIGH)); + gpiod_set_value(spi->cs_gpiod, 0); } else { if (value == BITBANG_CS_ACTIVE) { if (!(spi->mode & SPI_CS_WORD)) @@ -418,30 +422,18 @@ static int davinci_spi_of_setup(struct spi_device *spi) */ static int davinci_spi_setup(struct spi_device *spi) { - int retval = 0; struct davinci_spi *dspi; - struct spi_master *master = spi->master; struct device_node *np = spi->dev.of_node; bool internal_cs = true; dspi = spi_master_get_devdata(spi->master); if (!(spi->mode & SPI_NO_CS)) { - if (np && (master->cs_gpios != NULL) && (spi->cs_gpio >= 0)) { - retval = gpio_direction_output( - spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); + if (np && spi->cs_gpiod) internal_cs = false; - } - - if (retval) { - dev_err(&spi->dev, "GPIO %d setup failed (%d)\n", - spi->cs_gpio, retval); - return retval; - } - if (internal_cs) { + if (internal_cs) set_io_bits(dspi->base + SPIPC0, 1 << spi->chip_select); - } } if (spi->mode & SPI_READY) @@ -962,6 +954,7 @@ static int davinci_spi_probe(struct platform_device *pdev) if (ret) goto free_master; + master->use_gpio_descriptors = true; master->dev.of_node = pdev->dev.of_node; master->bus_num = pdev->id; master->num_chipselect = pdata->num_chipselect; @@ -980,27 +973,6 @@ static int davinci_spi_probe(struct platform_device *pdev) if (dspi->version == SPI_VERSION_2) dspi->bitbang.flags |= SPI_READY; - if (pdev->dev.of_node) { - int i; - - for (i = 0; i < pdata->num_chipselect; i++) { - int cs_gpio = of_get_named_gpio(pdev->dev.of_node, - "cs-gpios", i); - - if (cs_gpio == -EPROBE_DEFER) { - ret = cs_gpio; - goto free_clk; - } - - if (gpio_is_valid(cs_gpio)) { - ret = devm_gpio_request(&pdev->dev, cs_gpio, - dev_name(&pdev->dev)); - if (ret) - goto free_clk; - } - } - } - dspi->bitbang.txrx_bufs = davinci_spi_bufs; ret = davinci_spi_request_dma(dspi); diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c index d0dd7814e997..4bd59a93d988 100644 --- a/drivers/spi/spi-dw-mmio.c +++ b/drivers/spi/spi-dw-mmio.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include @@ -185,27 +184,6 @@ static int dw_spi_mmio_probe(struct platform_device *pdev) dws->num_cs = num_cs; - if (pdev->dev.of_node) { - int i; - - for (i = 0; i < dws->num_cs; i++) { - int cs_gpio = of_get_named_gpio(pdev->dev.of_node, - "cs-gpios", i); - - if (cs_gpio == -EPROBE_DEFER) { - ret = cs_gpio; - goto out; - } - - if (gpio_is_valid(cs_gpio)) { - ret = devm_gpio_request(&pdev->dev, cs_gpio, - dev_name(&pdev->dev)); - if (ret) - goto out; - } - } - } - init_func = device_get_match_data(&pdev->dev); if (init_func) { ret = init_func(pdev, dwsmmio); diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c index 2e822a56576a..ac81025f86ab 100644 --- a/drivers/spi/spi-dw.c +++ b/drivers/spi/spi-dw.c @@ -20,7 +20,6 @@ #include #include #include -#include #include "spi-dw.h" @@ -54,41 +53,41 @@ static ssize_t dw_spi_show_regs(struct file *file, char __user *user_buf, if (!buf) return 0; - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, "%s registers:\n", dev_name(&dws->master->dev)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, "=================================\n"); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, "CTRL0: \t\t0x%08x\n", dw_readl(dws, DW_SPI_CTRL0)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, "CTRL1: \t\t0x%08x\n", dw_readl(dws, DW_SPI_CTRL1)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, "SSIENR: \t0x%08x\n", dw_readl(dws, DW_SPI_SSIENR)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, "SER: \t\t0x%08x\n", dw_readl(dws, DW_SPI_SER)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, "BAUDR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_BAUDR)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, "TXFTLR: \t0x%08x\n", dw_readl(dws, DW_SPI_TXFLTR)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, "RXFTLR: \t0x%08x\n", dw_readl(dws, DW_SPI_RXFLTR)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, "TXFLR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_TXFLR)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, "RXFLR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_RXFLR)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, "SR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_SR)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, "IMR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_IMR)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, "ISR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_ISR)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, "DMACR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_DMACR)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, "DMATDLR: \t0x%08x\n", dw_readl(dws, DW_SPI_DMATDLR)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, "DMARDLR: \t0x%08x\n", dw_readl(dws, DW_SPI_DMARDLR)); - len += snprintf(buf + len, SPI_REGS_BUFSIZE - len, + len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len, "=================================\n"); ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); @@ -138,11 +137,10 @@ void dw_spi_set_cs(struct spi_device *spi, bool enable) struct dw_spi *dws = spi_controller_get_devdata(spi->controller); struct chip_data *chip = spi_get_ctldata(spi); - /* Chip select logic is inverted from spi_set_cs() */ if (chip && chip->cs_control) - chip->cs_control(!enable); + chip->cs_control(enable); - if (!enable) + if (enable) dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select)); else if (dws->cs_override) dw_writel(dws, DW_SPI_SER, 0); @@ -317,7 +315,8 @@ static int dw_spi_transfer_one(struct spi_controller *master, /* Default SPI mode is SCPOL = 0, SCPH = 0 */ cr0 = (transfer->bits_per_word - 1) | (chip->type << SPI_FRF_OFFSET) - | (spi->mode << SPI_MODE_OFFSET) + | ((((spi->mode & SPI_CPOL) ? 1 : 0) << SPI_SCOL_OFFSET) | + (((spi->mode & SPI_CPHA) ? 1 : 0) << SPI_SCPH_OFFSET)) | (chip->tmode << SPI_TMOD_OFFSET); /* @@ -397,7 +396,6 @@ static int dw_spi_setup(struct spi_device *spi) { struct dw_spi_chip *chip_info = NULL; struct chip_data *chip; - int ret; /* Only alloc on first setup */ chip = spi_get_ctldata(spi); @@ -425,13 +423,6 @@ static int dw_spi_setup(struct spi_device *spi) chip->tmode = SPI_TMOD_TR; - if (gpio_is_valid(spi->cs_gpio)) { - ret = gpio_direction_output(spi->cs_gpio, - !(spi->mode & SPI_CS_HIGH)); - if (ret) - return ret; - } - return 0; } @@ -496,6 +487,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) goto err_free_master; } + master->use_gpio_descriptors = true; master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP; master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16); master->bus_num = dws->bus_num; diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 5e10dc5c93a5..53335ccc98f6 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -67,7 +67,7 @@ #define SPI_SR 0x2c #define SPI_SR_EOQF 0x10000000 #define SPI_SR_TCFQF 0x80000000 -#define SPI_SR_CLEAR 0xdaad0000 +#define SPI_SR_CLEAR 0x9aaf0000 #define SPI_RSER_TFFFE BIT(25) #define SPI_RSER_TFFFD BIT(24) @@ -233,6 +233,9 @@ static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi) { u16 cmd = dspi->tx_cmd, data = dspi_pop_tx(dspi); + if (spi_controller_is_slave(dspi->master)) + return data; + if (dspi->len > 0) cmd |= SPI_PUSHR_CMD_CONT; return cmd << 16 | data; @@ -329,6 +332,11 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi) dma_async_issue_pending(dma->chan_rx); dma_async_issue_pending(dma->chan_tx); + if (spi_controller_is_slave(dspi->master)) { + wait_for_completion_interruptible(&dspi->dma->cmd_rx_complete); + return 0; + } + time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete, DMA_COMPLETION_TIMEOUT); if (time_left == 0) { @@ -798,14 +806,18 @@ static int dspi_setup(struct spi_device *spi) ns_delay_scale(&pasc, &asc, sck_cs_delay, clkrate); chip->ctar_val = SPI_CTAR_CPOL(spi->mode & SPI_CPOL ? 1 : 0) - | SPI_CTAR_CPHA(spi->mode & SPI_CPHA ? 1 : 0) - | SPI_CTAR_LSBFE(spi->mode & SPI_LSB_FIRST ? 1 : 0) - | SPI_CTAR_PCSSCK(pcssck) - | SPI_CTAR_CSSCK(cssck) - | SPI_CTAR_PASC(pasc) - | SPI_CTAR_ASC(asc) - | SPI_CTAR_PBR(pbr) - | SPI_CTAR_BR(br); + | SPI_CTAR_CPHA(spi->mode & SPI_CPHA ? 1 : 0); + + if (!spi_controller_is_slave(dspi->master)) { + chip->ctar_val |= SPI_CTAR_LSBFE(spi->mode & + SPI_LSB_FIRST ? 1 : 0) + | SPI_CTAR_PCSSCK(pcssck) + | SPI_CTAR_CSSCK(cssck) + | SPI_CTAR_PASC(pasc) + | SPI_CTAR_ASC(asc) + | SPI_CTAR_PBR(pbr) + | SPI_CTAR_BR(br); + } spi_set_ctldata(spi, chip); @@ -970,8 +982,13 @@ static const struct regmap_config dspi_xspi_regmap_config[] = { static void dspi_init(struct fsl_dspi *dspi) { - regmap_write(dspi->regmap, SPI_MCR, SPI_MCR_MASTER | SPI_MCR_PCSIS | - (dspi->devtype_data->xspi_mode ? SPI_MCR_XSPI : 0)); + unsigned int mcr = SPI_MCR_PCSIS | + (dspi->devtype_data->xspi_mode ? SPI_MCR_XSPI : 0); + + if (!spi_controller_is_slave(dspi->master)) + mcr |= SPI_MCR_MASTER; + + regmap_write(dspi->regmap, SPI_MCR, mcr); regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR); if (dspi->devtype_data->xspi_mode) regmap_write(dspi->regmap, SPI_CTARE(0), @@ -1027,6 +1044,9 @@ static int dspi_probe(struct platform_device *pdev) } master->bus_num = bus_num; + if (of_property_read_bool(np, "spi-slave")) + master->slave = true; + dspi->devtype_data = of_device_get_match_data(&pdev->dev); if (!dspi->devtype_data) { dev_err(&pdev->dev, "can't get devtype_data\n"); diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c index 08dcc3c22e88..391863914043 100644 --- a/drivers/spi/spi-fsl-lpspi.c +++ b/drivers/spi/spi-fsl-lpspi.c @@ -48,10 +48,13 @@ #define CR_RTF BIT(8) #define CR_RST BIT(1) #define CR_MEN BIT(0) +#define SR_MBF BIT(24) #define SR_TCF BIT(10) +#define SR_FCF BIT(9) #define SR_RDF BIT(1) #define SR_TDF BIT(0) #define IER_TCIE BIT(10) +#define IER_FCIE BIT(9) #define IER_RDIE BIT(1) #define IER_TDIE BIT(0) #define CFGR1_PCSCFG BIT(27) @@ -59,6 +62,7 @@ #define CFGR1_PCSPOL BIT(8) #define CFGR1_NOSTALL BIT(3) #define CFGR1_MASTER BIT(0) +#define FSR_RXCOUNT (BIT(16)|BIT(17)|BIT(18)) #define RSR_RXEMPTY BIT(1) #define TCR_CPOL BIT(31) #define TCR_CPHA BIT(30) @@ -161,28 +165,10 @@ static int lpspi_unprepare_xfer_hardware(struct spi_controller *controller) return 0; } -static int fsl_lpspi_txfifo_empty(struct fsl_lpspi_data *fsl_lpspi) -{ - u32 txcnt; - unsigned long orig_jiffies = jiffies; - - do { - txcnt = readl(fsl_lpspi->base + IMX7ULP_FSR) & 0xff; - - if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) { - dev_dbg(fsl_lpspi->dev, "txfifo empty timeout\n"); - return -ETIMEDOUT; - } - cond_resched(); - - } while (txcnt); - - return 0; -} - static void fsl_lpspi_write_tx_fifo(struct fsl_lpspi_data *fsl_lpspi) { u8 txfifo_cnt; + u32 temp; txfifo_cnt = readl(fsl_lpspi->base + IMX7ULP_FSR) & 0xff; @@ -193,9 +179,15 @@ static void fsl_lpspi_write_tx_fifo(struct fsl_lpspi_data *fsl_lpspi) txfifo_cnt++; } - if (!fsl_lpspi->remain && (txfifo_cnt < fsl_lpspi->txfifosize)) - writel(0, fsl_lpspi->base + IMX7ULP_TDR); - else + if (txfifo_cnt < fsl_lpspi->txfifosize) { + if (!fsl_lpspi->is_slave) { + temp = readl(fsl_lpspi->base + IMX7ULP_TCR); + temp &= ~TCR_CONTC; + writel(temp, fsl_lpspi->base + IMX7ULP_TCR); + } + + fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE); + } else fsl_lpspi_intctrl(fsl_lpspi, IER_TDIE); } @@ -276,10 +268,6 @@ static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi) u32 temp; int ret; - temp = CR_RST; - writel(temp, fsl_lpspi->base + IMX7ULP_CR); - writel(0, fsl_lpspi->base + IMX7ULP_CR); - if (!fsl_lpspi->is_slave) { ret = fsl_lpspi_set_bitrate(fsl_lpspi); if (ret) @@ -370,6 +358,24 @@ static int fsl_lpspi_wait_for_completion(struct spi_controller *controller) return 0; } +static int fsl_lpspi_reset(struct fsl_lpspi_data *fsl_lpspi) +{ + u32 temp; + + /* Disable all interrupt */ + fsl_lpspi_intctrl(fsl_lpspi, 0); + + /* W1C for all flags in SR */ + temp = 0x3F << 8; + writel(temp, fsl_lpspi->base + IMX7ULP_SR); + + /* Clear FIFO and disable module */ + temp = CR_RRF | CR_RTF; + writel(temp, fsl_lpspi->base + IMX7ULP_CR); + + return 0; +} + static int fsl_lpspi_transfer_one(struct spi_controller *controller, struct spi_device *spi, struct spi_transfer *t) @@ -391,11 +397,7 @@ static int fsl_lpspi_transfer_one(struct spi_controller *controller, if (ret) return ret; - ret = fsl_lpspi_txfifo_empty(fsl_lpspi); - if (ret) - return ret; - - fsl_lpspi_read_rx_fifo(fsl_lpspi); + fsl_lpspi_reset(fsl_lpspi); return 0; } @@ -408,7 +410,6 @@ static int fsl_lpspi_transfer_one_msg(struct spi_controller *controller, struct spi_device *spi = msg->spi; struct spi_transfer *xfer; bool is_first_xfer = true; - u32 temp; int ret = 0; msg->status = 0; @@ -428,13 +429,6 @@ static int fsl_lpspi_transfer_one_msg(struct spi_controller *controller, } complete: - if (!fsl_lpspi->is_slave) { - /* de-assert SS, then finalize current message */ - temp = readl(fsl_lpspi->base + IMX7ULP_TCR); - temp &= ~TCR_CONTC; - writel(temp, fsl_lpspi->base + IMX7ULP_TCR); - } - msg->status = ret; spi_finalize_current_message(controller); @@ -443,20 +437,30 @@ complete: static irqreturn_t fsl_lpspi_isr(int irq, void *dev_id) { + u32 temp_SR, temp_IER; struct fsl_lpspi_data *fsl_lpspi = dev_id; - u32 temp; + temp_IER = readl(fsl_lpspi->base + IMX7ULP_IER); fsl_lpspi_intctrl(fsl_lpspi, 0); - temp = readl(fsl_lpspi->base + IMX7ULP_SR); + temp_SR = readl(fsl_lpspi->base + IMX7ULP_SR); fsl_lpspi_read_rx_fifo(fsl_lpspi); - if (temp & SR_TDF) { + if ((temp_SR & SR_TDF) && (temp_IER & IER_TDIE)) { fsl_lpspi_write_tx_fifo(fsl_lpspi); + return IRQ_HANDLED; + } - if (!fsl_lpspi->remain) - complete(&fsl_lpspi->xfer_done); + if (temp_SR & SR_MBF || + readl(fsl_lpspi->base + IMX7ULP_FSR) & FSR_RXCOUNT) { + writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR); + fsl_lpspi_intctrl(fsl_lpspi, IER_FCIE); + return IRQ_HANDLED; + } + if (temp_SR & SR_FCF && (temp_IER & IER_FCIE)) { + writel(SR_FCF, fsl_lpspi->base + IMX7ULP_SR); + complete(&fsl_lpspi->xfer_done); return IRQ_HANDLED; } diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c new file mode 100644 index 000000000000..6a713f78a62e --- /dev/null +++ b/drivers/spi/spi-fsl-qspi.c @@ -0,0 +1,966 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/* + * Freescale QuadSPI driver. + * + * Copyright (C) 2013 Freescale Semiconductor, Inc. + * Copyright (C) 2018 Bootlin + * Copyright (C) 2018 exceet electronics GmbH + * Copyright (C) 2018 Kontron Electronics GmbH + * + * Transition to SPI MEM interface: + * Authors: + * Boris Brezillon + * Frieder Schrempf + * Yogesh Gaur + * Suresh Gupta + * + * Based on the original fsl-quadspi.c spi-nor driver: + * Author: Freescale Semiconductor, Inc. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +/* + * The driver only uses one single LUT entry, that is updated on + * each call of exec_op(). Index 0 is preset at boot with a basic + * read operation, so let's use the last entry (15). + */ +#define SEQID_LUT 15 + +/* Registers used by the driver */ +#define QUADSPI_MCR 0x00 +#define QUADSPI_MCR_RESERVED_MASK GENMASK(19, 16) +#define QUADSPI_MCR_MDIS_MASK BIT(14) +#define QUADSPI_MCR_CLR_TXF_MASK BIT(11) +#define QUADSPI_MCR_CLR_RXF_MASK BIT(10) +#define QUADSPI_MCR_DDR_EN_MASK BIT(7) +#define QUADSPI_MCR_END_CFG_MASK GENMASK(3, 2) +#define QUADSPI_MCR_SWRSTHD_MASK BIT(1) +#define QUADSPI_MCR_SWRSTSD_MASK BIT(0) + +#define QUADSPI_IPCR 0x08 +#define QUADSPI_IPCR_SEQID(x) ((x) << 24) + +#define QUADSPI_BUF3CR 0x1c +#define QUADSPI_BUF3CR_ALLMST_MASK BIT(31) +#define QUADSPI_BUF3CR_ADATSZ(x) ((x) << 8) +#define QUADSPI_BUF3CR_ADATSZ_MASK GENMASK(15, 8) + +#define QUADSPI_BFGENCR 0x20 +#define QUADSPI_BFGENCR_SEQID(x) ((x) << 12) + +#define QUADSPI_BUF0IND 0x30 +#define QUADSPI_BUF1IND 0x34 +#define QUADSPI_BUF2IND 0x38 +#define QUADSPI_SFAR 0x100 + +#define QUADSPI_SMPR 0x108 +#define QUADSPI_SMPR_DDRSMP_MASK GENMASK(18, 16) +#define QUADSPI_SMPR_FSDLY_MASK BIT(6) +#define QUADSPI_SMPR_FSPHS_MASK BIT(5) +#define QUADSPI_SMPR_HSENA_MASK BIT(0) + +#define QUADSPI_RBCT 0x110 +#define QUADSPI_RBCT_WMRK_MASK GENMASK(4, 0) +#define QUADSPI_RBCT_RXBRD_USEIPS BIT(8) + +#define QUADSPI_TBDR 0x154 + +#define QUADSPI_SR 0x15c +#define QUADSPI_SR_IP_ACC_MASK BIT(1) +#define QUADSPI_SR_AHB_ACC_MASK BIT(2) + +#define QUADSPI_FR 0x160 +#define QUADSPI_FR_TFF_MASK BIT(0) + +#define QUADSPI_SPTRCLR 0x16c +#define QUADSPI_SPTRCLR_IPPTRC BIT(8) +#define QUADSPI_SPTRCLR_BFPTRC BIT(0) + +#define QUADSPI_SFA1AD 0x180 +#define QUADSPI_SFA2AD 0x184 +#define QUADSPI_SFB1AD 0x188 +#define QUADSPI_SFB2AD 0x18c +#define QUADSPI_RBDR(x) (0x200 + ((x) * 4)) + +#define QUADSPI_LUTKEY 0x300 +#define QUADSPI_LUTKEY_VALUE 0x5AF05AF0 + +#define QUADSPI_LCKCR 0x304 +#define QUADSPI_LCKER_LOCK BIT(0) +#define QUADSPI_LCKER_UNLOCK BIT(1) + +#define QUADSPI_RSER 0x164 +#define QUADSPI_RSER_TFIE BIT(0) + +#define QUADSPI_LUT_BASE 0x310 +#define QUADSPI_LUT_OFFSET (SEQID_LUT * 4 * 4) +#define QUADSPI_LUT_REG(idx) \ + (QUADSPI_LUT_BASE + QUADSPI_LUT_OFFSET + (idx) * 4) + +/* Instruction set for the LUT register */ +#define LUT_STOP 0 +#define LUT_CMD 1 +#define LUT_ADDR 2 +#define LUT_DUMMY 3 +#define LUT_MODE 4 +#define LUT_MODE2 5 +#define LUT_MODE4 6 +#define LUT_FSL_READ 7 +#define LUT_FSL_WRITE 8 +#define LUT_JMP_ON_CS 9 +#define LUT_ADDR_DDR 10 +#define LUT_MODE_DDR 11 +#define LUT_MODE2_DDR 12 +#define LUT_MODE4_DDR 13 +#define LUT_FSL_READ_DDR 14 +#define LUT_FSL_WRITE_DDR 15 +#define LUT_DATA_LEARN 16 + +/* + * The PAD definitions for LUT register. + * + * The pad stands for the number of IO lines [0:3]. + * For example, the quad read needs four IO lines, + * so you should use LUT_PAD(4). + */ +#define LUT_PAD(x) (fls(x) - 1) + +/* + * Macro for constructing the LUT entries with the following + * register layout: + * + * --------------------------------------------------- + * | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 | + * --------------------------------------------------- + */ +#define LUT_DEF(idx, ins, pad, opr) \ + ((((ins) << 10) | ((pad) << 8) | (opr)) << (((idx) % 2) * 16)) + +/* Controller needs driver to swap endianness */ +#define QUADSPI_QUIRK_SWAP_ENDIAN BIT(0) + +/* Controller needs 4x internal clock */ +#define QUADSPI_QUIRK_4X_INT_CLK BIT(1) + +/* + * TKT253890, the controller needs the driver to fill the txfifo with + * 16 bytes at least to trigger a data transfer, even though the extra + * data won't be transferred. + */ +#define QUADSPI_QUIRK_TKT253890 BIT(2) + +/* TKT245618, the controller cannot wake up from wait mode */ +#define QUADSPI_QUIRK_TKT245618 BIT(3) + +/* + * Controller adds QSPI_AMBA_BASE (base address of the mapped memory) + * internally. No need to add it when setting SFXXAD and SFAR registers + */ +#define QUADSPI_QUIRK_BASE_INTERNAL BIT(4) + +struct fsl_qspi_devtype_data { + unsigned int rxfifo; + unsigned int txfifo; + unsigned int ahb_buf_size; + unsigned int quirks; + bool little_endian; +}; + +static const struct fsl_qspi_devtype_data vybrid_data = { + .rxfifo = SZ_128, + .txfifo = SZ_64, + .ahb_buf_size = SZ_1K, + .quirks = QUADSPI_QUIRK_SWAP_ENDIAN, + .little_endian = true, +}; + +static const struct fsl_qspi_devtype_data imx6sx_data = { + .rxfifo = SZ_128, + .txfifo = SZ_512, + .ahb_buf_size = SZ_1K, + .quirks = QUADSPI_QUIRK_4X_INT_CLK | QUADSPI_QUIRK_TKT245618, + .little_endian = true, +}; + +static const struct fsl_qspi_devtype_data imx7d_data = { + .rxfifo = SZ_512, + .txfifo = SZ_512, + .ahb_buf_size = SZ_1K, + .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK, + .little_endian = true, +}; + +static const struct fsl_qspi_devtype_data imx6ul_data = { + .rxfifo = SZ_128, + .txfifo = SZ_512, + .ahb_buf_size = SZ_1K, + .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK, + .little_endian = true, +}; + +static const struct fsl_qspi_devtype_data ls1021a_data = { + .rxfifo = SZ_128, + .txfifo = SZ_64, + .ahb_buf_size = SZ_1K, + .quirks = 0, + .little_endian = false, +}; + +static const struct fsl_qspi_devtype_data ls2080a_data = { + .rxfifo = SZ_128, + .txfifo = SZ_64, + .ahb_buf_size = SZ_1K, + .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_BASE_INTERNAL, + .little_endian = true, +}; + +struct fsl_qspi { + void __iomem *iobase; + void __iomem *ahb_addr; + u32 memmap_phy; + struct clk *clk, *clk_en; + struct device *dev; + struct completion c; + const struct fsl_qspi_devtype_data *devtype_data; + struct mutex lock; + struct pm_qos_request pm_qos_req; + int selected; +}; + +static inline int needs_swap_endian(struct fsl_qspi *q) +{ + return q->devtype_data->quirks & QUADSPI_QUIRK_SWAP_ENDIAN; +} + +static inline int needs_4x_clock(struct fsl_qspi *q) +{ + return q->devtype_data->quirks & QUADSPI_QUIRK_4X_INT_CLK; +} + +static inline int needs_fill_txfifo(struct fsl_qspi *q) +{ + return q->devtype_data->quirks & QUADSPI_QUIRK_TKT253890; +} + +static inline int needs_wakeup_wait_mode(struct fsl_qspi *q) +{ + return q->devtype_data->quirks & QUADSPI_QUIRK_TKT245618; +} + +static inline int needs_amba_base_offset(struct fsl_qspi *q) +{ + return !(q->devtype_data->quirks & QUADSPI_QUIRK_BASE_INTERNAL); +} + +/* + * An IC bug makes it necessary to rearrange the 32-bit data. + * Later chips, such as IMX6SLX, have fixed this bug. + */ +static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a) +{ + return needs_swap_endian(q) ? __swab32(a) : a; +} + +/* + * R/W functions for big- or little-endian registers: + * The QSPI controller's endianness is independent of + * the CPU core's endianness. So far, although the CPU + * core is little-endian the QSPI controller can use + * big-endian or little-endian. + */ +static void qspi_writel(struct fsl_qspi *q, u32 val, void __iomem *addr) +{ + if (q->devtype_data->little_endian) + iowrite32(val, addr); + else + iowrite32be(val, addr); +} + +static u32 qspi_readl(struct fsl_qspi *q, void __iomem *addr) +{ + if (q->devtype_data->little_endian) + return ioread32(addr); + + return ioread32be(addr); +} + +static irqreturn_t fsl_qspi_irq_handler(int irq, void *dev_id) +{ + struct fsl_qspi *q = dev_id; + u32 reg; + + /* clear interrupt */ + reg = qspi_readl(q, q->iobase + QUADSPI_FR); + qspi_writel(q, reg, q->iobase + QUADSPI_FR); + + if (reg & QUADSPI_FR_TFF_MASK) + complete(&q->c); + + dev_dbg(q->dev, "QUADSPI_FR : 0x%.8x:0x%.8x\n", 0, reg); + return IRQ_HANDLED; +} + +static int fsl_qspi_check_buswidth(struct fsl_qspi *q, u8 width) +{ + switch (width) { + case 1: + case 2: + case 4: + return 0; + } + + return -ENOTSUPP; +} + +static bool fsl_qspi_supports_op(struct spi_mem *mem, + const struct spi_mem_op *op) +{ + struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master); + int ret; + + ret = fsl_qspi_check_buswidth(q, op->cmd.buswidth); + + if (op->addr.nbytes) + ret |= fsl_qspi_check_buswidth(q, op->addr.buswidth); + + if (op->dummy.nbytes) + ret |= fsl_qspi_check_buswidth(q, op->dummy.buswidth); + + if (op->data.nbytes) + ret |= fsl_qspi_check_buswidth(q, op->data.buswidth); + + if (ret) + return false; + + /* + * The number of instructions needed for the op, needs + * to fit into a single LUT entry. + */ + if (op->addr.nbytes + + (op->dummy.nbytes ? 1:0) + + (op->data.nbytes ? 1:0) > 6) + return false; + + /* Max 64 dummy clock cycles supported */ + if (op->dummy.nbytes && + (op->dummy.nbytes * 8 / op->dummy.buswidth > 64)) + return false; + + /* Max data length, check controller limits and alignment */ + if (op->data.dir == SPI_MEM_DATA_IN && + (op->data.nbytes > q->devtype_data->ahb_buf_size || + (op->data.nbytes > q->devtype_data->rxfifo - 4 && + !IS_ALIGNED(op->data.nbytes, 8)))) + return false; + + if (op->data.dir == SPI_MEM_DATA_OUT && + op->data.nbytes > q->devtype_data->txfifo) + return false; + + return true; +} + +static void fsl_qspi_prepare_lut(struct fsl_qspi *q, + const struct spi_mem_op *op) +{ + void __iomem *base = q->iobase; + u32 lutval[4] = {}; + int lutidx = 1, i; + + lutval[0] |= LUT_DEF(0, LUT_CMD, LUT_PAD(op->cmd.buswidth), + op->cmd.opcode); + + /* + * For some unknown reason, using LUT_ADDR doesn't work in some + * cases (at least with only one byte long addresses), so + * let's use LUT_MODE to write the address bytes one by one + */ + for (i = 0; i < op->addr.nbytes; i++) { + u8 addrbyte = op->addr.val >> (8 * (op->addr.nbytes - i - 1)); + + lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_MODE, + LUT_PAD(op->addr.buswidth), + addrbyte); + lutidx++; + } + + if (op->dummy.nbytes) { + lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_DUMMY, + LUT_PAD(op->dummy.buswidth), + op->dummy.nbytes * 8 / + op->dummy.buswidth); + lutidx++; + } + + if (op->data.nbytes) { + lutval[lutidx / 2] |= LUT_DEF(lutidx, + op->data.dir == SPI_MEM_DATA_IN ? + LUT_FSL_READ : LUT_FSL_WRITE, + LUT_PAD(op->data.buswidth), + 0); + lutidx++; + } + + lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_STOP, 0, 0); + + /* unlock LUT */ + qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY); + qspi_writel(q, QUADSPI_LCKER_UNLOCK, q->iobase + QUADSPI_LCKCR); + + /* fill LUT */ + for (i = 0; i < ARRAY_SIZE(lutval); i++) + qspi_writel(q, lutval[i], base + QUADSPI_LUT_REG(i)); + + /* lock LUT */ + qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY); + qspi_writel(q, QUADSPI_LCKER_LOCK, q->iobase + QUADSPI_LCKCR); +} + +static int fsl_qspi_clk_prep_enable(struct fsl_qspi *q) +{ + int ret; + + ret = clk_prepare_enable(q->clk_en); + if (ret) + return ret; + + ret = clk_prepare_enable(q->clk); + if (ret) { + clk_disable_unprepare(q->clk_en); + return ret; + } + + if (needs_wakeup_wait_mode(q)) + pm_qos_add_request(&q->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, 0); + + return 0; +} + +static void fsl_qspi_clk_disable_unprep(struct fsl_qspi *q) +{ + if (needs_wakeup_wait_mode(q)) + pm_qos_remove_request(&q->pm_qos_req); + + clk_disable_unprepare(q->clk); + clk_disable_unprepare(q->clk_en); +} + +/* + * If we have changed the content of the flash by writing or erasing, or if we + * read from flash with a different offset into the page buffer, we need to + * invalidate the AHB buffer. If we do not do so, we may read out the wrong + * data. The spec tells us reset the AHB domain and Serial Flash domain at + * the same time. + */ +static void fsl_qspi_invalidate(struct fsl_qspi *q) +{ + u32 reg; + + reg = qspi_readl(q, q->iobase + QUADSPI_MCR); + reg |= QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK; + qspi_writel(q, reg, q->iobase + QUADSPI_MCR); + + /* + * The minimum delay : 1 AHB + 2 SFCK clocks. + * Delay 1 us is enough. + */ + udelay(1); + + reg &= ~(QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK); + qspi_writel(q, reg, q->iobase + QUADSPI_MCR); +} + +static void fsl_qspi_select_mem(struct fsl_qspi *q, struct spi_device *spi) +{ + unsigned long rate = spi->max_speed_hz; + int ret; + + if (q->selected == spi->chip_select) + return; + + if (needs_4x_clock(q)) + rate *= 4; + + fsl_qspi_clk_disable_unprep(q); + + ret = clk_set_rate(q->clk, rate); + if (ret) + return; + + ret = fsl_qspi_clk_prep_enable(q); + if (ret) + return; + + q->selected = spi->chip_select; + + fsl_qspi_invalidate(q); +} + +static void fsl_qspi_read_ahb(struct fsl_qspi *q, const struct spi_mem_op *op) +{ + memcpy_fromio(op->data.buf.in, + q->ahb_addr + q->selected * q->devtype_data->ahb_buf_size, + op->data.nbytes); +} + +static void fsl_qspi_fill_txfifo(struct fsl_qspi *q, + const struct spi_mem_op *op) +{ + void __iomem *base = q->iobase; + int i; + u32 val; + + for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 4); i += 4) { + memcpy(&val, op->data.buf.out + i, 4); + val = fsl_qspi_endian_xchg(q, val); + qspi_writel(q, val, base + QUADSPI_TBDR); + } + + if (i < op->data.nbytes) { + memcpy(&val, op->data.buf.out + i, op->data.nbytes - i); + val = fsl_qspi_endian_xchg(q, val); + qspi_writel(q, val, base + QUADSPI_TBDR); + } + + if (needs_fill_txfifo(q)) { + for (i = op->data.nbytes; i < 16; i += 4) + qspi_writel(q, 0, base + QUADSPI_TBDR); + } +} + +static void fsl_qspi_read_rxfifo(struct fsl_qspi *q, + const struct spi_mem_op *op) +{ + void __iomem *base = q->iobase; + int i; + u8 *buf = op->data.buf.in; + u32 val; + + for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 4); i += 4) { + val = qspi_readl(q, base + QUADSPI_RBDR(i / 4)); + val = fsl_qspi_endian_xchg(q, val); + memcpy(buf + i, &val, 4); + } + + if (i < op->data.nbytes) { + val = qspi_readl(q, base + QUADSPI_RBDR(i / 4)); + val = fsl_qspi_endian_xchg(q, val); + memcpy(buf + i, &val, op->data.nbytes - i); + } +} + +static int fsl_qspi_do_op(struct fsl_qspi *q, const struct spi_mem_op *op) +{ + void __iomem *base = q->iobase; + int err = 0; + + init_completion(&q->c); + + /* + * Always start the sequence at the same index since we update + * the LUT at each exec_op() call. And also specify the DATA + * length, since it's has not been specified in the LUT. + */ + qspi_writel(q, op->data.nbytes | QUADSPI_IPCR_SEQID(SEQID_LUT), + base + QUADSPI_IPCR); + + /* Wait for the interrupt. */ + if (!wait_for_completion_timeout(&q->c, msecs_to_jiffies(1000))) + err = -ETIMEDOUT; + + if (!err && op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN) + fsl_qspi_read_rxfifo(q, op); + + return err; +} + +static int fsl_qspi_readl_poll_tout(struct fsl_qspi *q, void __iomem *base, + u32 mask, u32 delay_us, u32 timeout_us) +{ + u32 reg; + + if (!q->devtype_data->little_endian) + mask = (u32)cpu_to_be32(mask); + + return readl_poll_timeout(base, reg, !(reg & mask), delay_us, + timeout_us); +} + +static int fsl_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) +{ + struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master); + void __iomem *base = q->iobase; + u32 addr_offset = 0; + int err = 0; + + mutex_lock(&q->lock); + + /* wait for the controller being ready */ + fsl_qspi_readl_poll_tout(q, base + QUADSPI_SR, (QUADSPI_SR_IP_ACC_MASK | + QUADSPI_SR_AHB_ACC_MASK), 10, 1000); + + fsl_qspi_select_mem(q, mem->spi); + + if (needs_amba_base_offset(q)) + addr_offset = q->memmap_phy; + + qspi_writel(q, + q->selected * q->devtype_data->ahb_buf_size + addr_offset, + base + QUADSPI_SFAR); + + qspi_writel(q, qspi_readl(q, base + QUADSPI_MCR) | + QUADSPI_MCR_CLR_RXF_MASK | QUADSPI_MCR_CLR_TXF_MASK, + base + QUADSPI_MCR); + + qspi_writel(q, QUADSPI_SPTRCLR_BFPTRC | QUADSPI_SPTRCLR_IPPTRC, + base + QUADSPI_SPTRCLR); + + fsl_qspi_prepare_lut(q, op); + + /* + * If we have large chunks of data, we read them through the AHB bus + * by accessing the mapped memory. In all other cases we use + * IP commands to access the flash. + */ + if (op->data.nbytes > (q->devtype_data->rxfifo - 4) && + op->data.dir == SPI_MEM_DATA_IN) { + fsl_qspi_read_ahb(q, op); + } else { + qspi_writel(q, QUADSPI_RBCT_WMRK_MASK | + QUADSPI_RBCT_RXBRD_USEIPS, base + QUADSPI_RBCT); + + if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT) + fsl_qspi_fill_txfifo(q, op); + + err = fsl_qspi_do_op(q, op); + } + + /* Invalidate the data in the AHB buffer. */ + fsl_qspi_invalidate(q); + + mutex_unlock(&q->lock); + + return err; +} + +static int fsl_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) +{ + struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master); + + if (op->data.dir == SPI_MEM_DATA_OUT) { + if (op->data.nbytes > q->devtype_data->txfifo) + op->data.nbytes = q->devtype_data->txfifo; + } else { + if (op->data.nbytes > q->devtype_data->ahb_buf_size) + op->data.nbytes = q->devtype_data->ahb_buf_size; + else if (op->data.nbytes > (q->devtype_data->rxfifo - 4)) + op->data.nbytes = ALIGN_DOWN(op->data.nbytes, 8); + } + + return 0; +} + +static int fsl_qspi_default_setup(struct fsl_qspi *q) +{ + void __iomem *base = q->iobase; + u32 reg, addr_offset = 0; + int ret; + + /* disable and unprepare clock to avoid glitch pass to controller */ + fsl_qspi_clk_disable_unprep(q); + + /* the default frequency, we will change it later if necessary. */ + ret = clk_set_rate(q->clk, 66000000); + if (ret) + return ret; + + ret = fsl_qspi_clk_prep_enable(q); + if (ret) + return ret; + + /* Reset the module */ + qspi_writel(q, QUADSPI_MCR_SWRSTSD_MASK | QUADSPI_MCR_SWRSTHD_MASK, + base + QUADSPI_MCR); + udelay(1); + + /* Disable the module */ + qspi_writel(q, QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK, + base + QUADSPI_MCR); + + reg = qspi_readl(q, base + QUADSPI_SMPR); + qspi_writel(q, reg & ~(QUADSPI_SMPR_FSDLY_MASK + | QUADSPI_SMPR_FSPHS_MASK + | QUADSPI_SMPR_HSENA_MASK + | QUADSPI_SMPR_DDRSMP_MASK), base + QUADSPI_SMPR); + + /* We only use the buffer3 for AHB read */ + qspi_writel(q, 0, base + QUADSPI_BUF0IND); + qspi_writel(q, 0, base + QUADSPI_BUF1IND); + qspi_writel(q, 0, base + QUADSPI_BUF2IND); + + qspi_writel(q, QUADSPI_BFGENCR_SEQID(SEQID_LUT), + q->iobase + QUADSPI_BFGENCR); + qspi_writel(q, QUADSPI_RBCT_WMRK_MASK, base + QUADSPI_RBCT); + qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK | + QUADSPI_BUF3CR_ADATSZ(q->devtype_data->ahb_buf_size / 8), + base + QUADSPI_BUF3CR); + + if (needs_amba_base_offset(q)) + addr_offset = q->memmap_phy; + + /* + * In HW there can be a maximum of four chips on two buses with + * two chip selects on each bus. We use four chip selects in SW + * to differentiate between the four chips. + * We use ahb_buf_size for each chip and set SFA1AD, SFA2AD, SFB1AD, + * SFB2AD accordingly. + */ + qspi_writel(q, q->devtype_data->ahb_buf_size + addr_offset, + base + QUADSPI_SFA1AD); + qspi_writel(q, q->devtype_data->ahb_buf_size * 2 + addr_offset, + base + QUADSPI_SFA2AD); + qspi_writel(q, q->devtype_data->ahb_buf_size * 3 + addr_offset, + base + QUADSPI_SFB1AD); + qspi_writel(q, q->devtype_data->ahb_buf_size * 4 + addr_offset, + base + QUADSPI_SFB2AD); + + q->selected = -1; + + /* Enable the module */ + qspi_writel(q, QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK, + base + QUADSPI_MCR); + + /* clear all interrupt status */ + qspi_writel(q, 0xffffffff, q->iobase + QUADSPI_FR); + + /* enable the interrupt */ + qspi_writel(q, QUADSPI_RSER_TFIE, q->iobase + QUADSPI_RSER); + + return 0; +} + +static const char *fsl_qspi_get_name(struct spi_mem *mem) +{ + struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master); + struct device *dev = &mem->spi->dev; + const char *name; + + /* + * In order to keep mtdparts compatible with the old MTD driver at + * mtd/spi-nor/fsl-quadspi.c, we set a custom name derived from the + * platform_device of the controller. + */ + if (of_get_available_child_count(q->dev->of_node) == 1) + return dev_name(q->dev); + + name = devm_kasprintf(dev, GFP_KERNEL, + "%s-%d", dev_name(q->dev), + mem->spi->chip_select); + + if (!name) { + dev_err(dev, "failed to get memory for custom flash name\n"); + return ERR_PTR(-ENOMEM); + } + + return name; +} + +static const struct spi_controller_mem_ops fsl_qspi_mem_ops = { + .adjust_op_size = fsl_qspi_adjust_op_size, + .supports_op = fsl_qspi_supports_op, + .exec_op = fsl_qspi_exec_op, + .get_name = fsl_qspi_get_name, +}; + +static int fsl_qspi_probe(struct platform_device *pdev) +{ + struct spi_controller *ctlr; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct resource *res; + struct fsl_qspi *q; + int ret; + + ctlr = spi_alloc_master(&pdev->dev, sizeof(*q)); + if (!ctlr) + return -ENOMEM; + + ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | + SPI_TX_DUAL | SPI_TX_QUAD; + + q = spi_controller_get_devdata(ctlr); + q->dev = dev; + q->devtype_data = of_device_get_match_data(dev); + if (!q->devtype_data) { + ret = -ENODEV; + goto err_put_ctrl; + } + + platform_set_drvdata(pdev, q); + + /* find the resources */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "QuadSPI"); + q->iobase = devm_ioremap_resource(dev, res); + if (IS_ERR(q->iobase)) { + ret = PTR_ERR(q->iobase); + goto err_put_ctrl; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "QuadSPI-memory"); + q->ahb_addr = devm_ioremap_resource(dev, res); + if (IS_ERR(q->ahb_addr)) { + ret = PTR_ERR(q->ahb_addr); + goto err_put_ctrl; + } + + q->memmap_phy = res->start; + + /* find the clocks */ + q->clk_en = devm_clk_get(dev, "qspi_en"); + if (IS_ERR(q->clk_en)) { + ret = PTR_ERR(q->clk_en); + goto err_put_ctrl; + } + + q->clk = devm_clk_get(dev, "qspi"); + if (IS_ERR(q->clk)) { + ret = PTR_ERR(q->clk); + goto err_put_ctrl; + } + + ret = fsl_qspi_clk_prep_enable(q); + if (ret) { + dev_err(dev, "can not enable the clock\n"); + goto err_put_ctrl; + } + + /* find the irq */ + ret = platform_get_irq(pdev, 0); + if (ret < 0) { + dev_err(dev, "failed to get the irq: %d\n", ret); + goto err_disable_clk; + } + + ret = devm_request_irq(dev, ret, + fsl_qspi_irq_handler, 0, pdev->name, q); + if (ret) { + dev_err(dev, "failed to request irq: %d\n", ret); + goto err_disable_clk; + } + + mutex_init(&q->lock); + + ctlr->bus_num = -1; + ctlr->num_chipselect = 4; + ctlr->mem_ops = &fsl_qspi_mem_ops; + + fsl_qspi_default_setup(q); + + ctlr->dev.of_node = np; + + ret = spi_register_controller(ctlr); + if (ret) + goto err_destroy_mutex; + + return 0; + +err_destroy_mutex: + mutex_destroy(&q->lock); + +err_disable_clk: + fsl_qspi_clk_disable_unprep(q); + +err_put_ctrl: + spi_controller_put(ctlr); + + dev_err(dev, "Freescale QuadSPI probe failed\n"); + return ret; +} + +static int fsl_qspi_remove(struct platform_device *pdev) +{ + struct fsl_qspi *q = platform_get_drvdata(pdev); + + /* disable the hardware */ + qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR); + qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER); + + fsl_qspi_clk_disable_unprep(q); + + mutex_destroy(&q->lock); + + return 0; +} + +static int fsl_qspi_suspend(struct device *dev) +{ + return 0; +} + +static int fsl_qspi_resume(struct device *dev) +{ + struct fsl_qspi *q = dev_get_drvdata(dev); + + fsl_qspi_default_setup(q); + + return 0; +} + +static const struct of_device_id fsl_qspi_dt_ids[] = { + { .compatible = "fsl,vf610-qspi", .data = &vybrid_data, }, + { .compatible = "fsl,imx6sx-qspi", .data = &imx6sx_data, }, + { .compatible = "fsl,imx7d-qspi", .data = &imx7d_data, }, + { .compatible = "fsl,imx6ul-qspi", .data = &imx6ul_data, }, + { .compatible = "fsl,ls1021a-qspi", .data = &ls1021a_data, }, + { .compatible = "fsl,ls2080a-qspi", .data = &ls2080a_data, }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids); + +static const struct dev_pm_ops fsl_qspi_pm_ops = { + .suspend = fsl_qspi_suspend, + .resume = fsl_qspi_resume, +}; + +static struct platform_driver fsl_qspi_driver = { + .driver = { + .name = "fsl-quadspi", + .of_match_table = fsl_qspi_dt_ids, + .pm = &fsl_qspi_pm_ops, + }, + .probe = fsl_qspi_probe, + .remove = fsl_qspi_remove, +}; +module_platform_driver(fsl_qspi_driver); + +MODULE_DESCRIPTION("Freescale QuadSPI Controller Driver"); +MODULE_AUTHOR("Freescale Semiconductor Inc."); +MODULE_AUTHOR("Boris Brezillon "); +MODULE_AUTHOR("Frieder Schrempf "); +MODULE_AUTHOR("Yogesh Gaur "); +MODULE_AUTHOR("Suresh Gupta "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c index fdb7cb88fb56..5f0b0d5bfef4 100644 --- a/drivers/spi/spi-geni-qcom.c +++ b/drivers/spi/spi-geni-qcom.c @@ -89,9 +89,6 @@ struct spi_geni_master { int irq; }; -static void handle_fifo_timeout(struct spi_master *spi, - struct spi_message *msg); - static int get_spi_clk_cfg(unsigned int speed_hz, struct spi_geni_master *mas, unsigned int *clk_idx, @@ -122,6 +119,32 @@ static int get_spi_clk_cfg(unsigned int speed_hz, return ret; } +static void handle_fifo_timeout(struct spi_master *spi, + struct spi_message *msg) +{ + struct spi_geni_master *mas = spi_master_get_devdata(spi); + unsigned long time_left, flags; + struct geni_se *se = &mas->se; + + spin_lock_irqsave(&mas->lock, flags); + reinit_completion(&mas->xfer_done); + mas->cur_mcmd = CMD_CANCEL; + geni_se_cancel_m_cmd(se); + writel(0, se->base + SE_GENI_TX_WATERMARK_REG); + spin_unlock_irqrestore(&mas->lock, flags); + time_left = wait_for_completion_timeout(&mas->xfer_done, HZ); + if (time_left) + return; + + spin_lock_irqsave(&mas->lock, flags); + reinit_completion(&mas->xfer_done); + geni_se_abort_m_cmd(se); + spin_unlock_irqrestore(&mas->lock, flags); + time_left = wait_for_completion_timeout(&mas->xfer_done, HZ); + if (!time_left) + dev_err(mas->dev, "Failed to cancel/abort m_cmd\n"); +} + static void spi_geni_set_cs(struct spi_device *slv, bool set_flag) { struct spi_geni_master *mas = spi_master_get_devdata(slv->master); @@ -233,7 +256,6 @@ static int spi_geni_prepare_message(struct spi_master *spi, struct geni_se *se = &mas->se; geni_se_select_mode(se, GENI_SE_FIFO); - reinit_completion(&mas->xfer_done); ret = setup_fifo_params(spi_msg->spi, spi); if (ret) dev_err(mas->dev, "Couldn't select mode %d\n", ret); @@ -357,32 +379,6 @@ static void setup_fifo_xfer(struct spi_transfer *xfer, writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG); } -static void handle_fifo_timeout(struct spi_master *spi, - struct spi_message *msg) -{ - struct spi_geni_master *mas = spi_master_get_devdata(spi); - unsigned long time_left, flags; - struct geni_se *se = &mas->se; - - spin_lock_irqsave(&mas->lock, flags); - reinit_completion(&mas->xfer_done); - mas->cur_mcmd = CMD_CANCEL; - geni_se_cancel_m_cmd(se); - writel(0, se->base + SE_GENI_TX_WATERMARK_REG); - spin_unlock_irqrestore(&mas->lock, flags); - time_left = wait_for_completion_timeout(&mas->xfer_done, HZ); - if (time_left) - return; - - spin_lock_irqsave(&mas->lock, flags); - reinit_completion(&mas->xfer_done); - geni_se_abort_m_cmd(se); - spin_unlock_irqrestore(&mas->lock, flags); - time_left = wait_for_completion_timeout(&mas->xfer_done, HZ); - if (!time_left) - dev_err(mas->dev, "Failed to cancel/abort m_cmd\n"); -} - static int spi_geni_transfer_one(struct spi_master *spi, struct spi_device *slv, struct spi_transfer *xfer) diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c index a4aee26028cd..53b35c56a557 100644 --- a/drivers/spi/spi-gpio.c +++ b/drivers/spi/spi-gpio.c @@ -428,7 +428,8 @@ static int spi_gpio_probe(struct platform_device *pdev) return status; master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32); - master->mode_bits = SPI_3WIRE | SPI_3WIRE_HIZ | SPI_CPHA | SPI_CPOL; + master->mode_bits = SPI_3WIRE | SPI_3WIRE_HIZ | SPI_CPHA | SPI_CPOL | + SPI_CS_HIGH; master->flags = master_flags; master->bus_num = pdev->id; /* The master needs to think there is a chipselect even if not connected */ @@ -455,7 +456,6 @@ static int spi_gpio_probe(struct platform_device *pdev) spi_gpio->bitbang.txrx_word[SPI_MODE_3] = spi_gpio_spec_txrx_word_mode3; } spi_gpio->bitbang.setup_transfer = spi_bitbang_setup_transfer; - spi_gpio->bitbang.flags = SPI_CS_HIGH; status = spi_bitbang_start(&spi_gpio->bitbang); if (status) diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c index 5217a5628be2..a4d8d19ecff9 100644 --- a/drivers/spi/spi-mem.c +++ b/drivers/spi/spi-mem.c @@ -537,7 +537,6 @@ EXPORT_SYMBOL_GPL(spi_mem_dirmap_create); /** * spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor * @desc: the direct mapping descriptor to destroy - * @info: direct mapping information * * This function destroys a direct mapping descriptor previously created by * spi_mem_dirmap_create(). @@ -548,9 +547,80 @@ void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc) if (!desc->nodirmap && ctlr->mem_ops && ctlr->mem_ops->dirmap_destroy) ctlr->mem_ops->dirmap_destroy(desc); + + kfree(desc); } EXPORT_SYMBOL_GPL(spi_mem_dirmap_destroy); +static void devm_spi_mem_dirmap_release(struct device *dev, void *res) +{ + struct spi_mem_dirmap_desc *desc = *(struct spi_mem_dirmap_desc **)res; + + spi_mem_dirmap_destroy(desc); +} + +/** + * devm_spi_mem_dirmap_create() - Create a direct mapping descriptor and attach + * it to a device + * @dev: device the dirmap desc will be attached to + * @mem: SPI mem device this direct mapping should be created for + * @info: direct mapping information + * + * devm_ variant of the spi_mem_dirmap_create() function. See + * spi_mem_dirmap_create() for more details. + * + * Return: a valid pointer in case of success, and ERR_PTR() otherwise. + */ +struct spi_mem_dirmap_desc * +devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem, + const struct spi_mem_dirmap_info *info) +{ + struct spi_mem_dirmap_desc **ptr, *desc; + + ptr = devres_alloc(devm_spi_mem_dirmap_release, sizeof(*ptr), + GFP_KERNEL); + if (!ptr) + return ERR_PTR(-ENOMEM); + + desc = spi_mem_dirmap_create(mem, info); + if (IS_ERR(desc)) { + devres_free(ptr); + } else { + *ptr = desc; + devres_add(dev, ptr); + } + + return desc; +} +EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_create); + +static int devm_spi_mem_dirmap_match(struct device *dev, void *res, void *data) +{ + struct spi_mem_dirmap_desc **ptr = res; + + if (WARN_ON(!ptr || !*ptr)) + return 0; + + return *ptr == data; +} + +/** + * devm_spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor attached + * to a device + * @dev: device the dirmap desc is attached to + * @desc: the direct mapping descriptor to destroy + * + * devm_ variant of the spi_mem_dirmap_destroy() function. See + * spi_mem_dirmap_destroy() for more details. + */ +void devm_spi_mem_dirmap_destroy(struct device *dev, + struct spi_mem_dirmap_desc *desc) +{ + devres_release(dev, devm_spi_mem_dirmap_release, + devm_spi_mem_dirmap_match, desc); +} +EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_destroy); + /** * spi_mem_dirmap_dirmap_read() - Read data through a direct mapping * @desc: direct mapping descriptor diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c index 6ac95a2a21ce..7bf53cfc25d6 100644 --- a/drivers/spi/spi-mxs.c +++ b/drivers/spi/spi-mxs.c @@ -39,6 +39,7 @@ #include #include #include +#include #define DRIVER_NAME "mxs-spi" @@ -374,6 +375,8 @@ static int mxs_spi_transfer_one(struct spi_master *master, list_for_each_entry(t, &m->transfers, transfer_list) { + trace_spi_transfer_start(m, t); + status = mxs_spi_setup_transfer(m->spi, t); if (status) break; @@ -419,6 +422,8 @@ static int mxs_spi_transfer_one(struct spi_master *master, flag); } + trace_spi_transfer_stop(m, t); + if (status) { stmp_reset_block(ssp->base); break; diff --git a/drivers/spi/spi-npcm-pspi.c b/drivers/spi/spi-npcm-pspi.c index e1dca79b9090..734a2b956959 100644 --- a/drivers/spi/spi-npcm-pspi.c +++ b/drivers/spi/spi-npcm-pspi.c @@ -465,7 +465,8 @@ out_master_put: static int npcm_pspi_remove(struct platform_device *pdev) { - struct npcm_pspi *priv = platform_get_drvdata(pdev); + struct spi_master *master = platform_get_drvdata(pdev); + struct npcm_pspi *priv = spi_master_get_devdata(master); npcm_pspi_reset_hw(priv); clk_disable_unprepare(priv->clk); diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c new file mode 100644 index 000000000000..8894f98cc99c --- /dev/null +++ b/drivers/spi/spi-nxp-fspi.c @@ -0,0 +1,1106 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/* + * NXP FlexSPI(FSPI) controller driver. + * + * Copyright 2019 NXP. + * + * FlexSPI is a flexsible SPI host controller which supports two SPI + * channels and up to 4 external devices. Each channel supports + * Single/Dual/Quad/Octal mode data transfer (1/2/4/8 bidirectional + * data lines). + * + * FlexSPI controller is driven by the LUT(Look-up Table) registers + * LUT registers are a look-up-table for sequences of instructions. + * A valid sequence consists of four LUT registers. + * Maximum 32 LUT sequences can be programmed simultaneously. + * + * LUTs are being created at run-time based on the commands passed + * from the spi-mem framework, thus using single LUT index. + * + * Software triggered Flash read/write access by IP Bus. + * + * Memory mapped read access by AHB Bus. + * + * Based on SPI MEM interface and spi-fsl-qspi.c driver. + * + * Author: + * Yogesh Narayan Gaur + * Boris Brezillon + * Frieder Schrempf + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +/* + * The driver only uses one single LUT entry, that is updated on + * each call of exec_op(). Index 0 is preset at boot with a basic + * read operation, so let's use the last entry (31). + */ +#define SEQID_LUT 31 + +/* Registers used by the driver */ +#define FSPI_MCR0 0x00 +#define FSPI_MCR0_AHB_TIMEOUT(x) ((x) << 24) +#define FSPI_MCR0_IP_TIMEOUT(x) ((x) << 16) +#define FSPI_MCR0_LEARN_EN BIT(15) +#define FSPI_MCR0_SCRFRUN_EN BIT(14) +#define FSPI_MCR0_OCTCOMB_EN BIT(13) +#define FSPI_MCR0_DOZE_EN BIT(12) +#define FSPI_MCR0_HSEN BIT(11) +#define FSPI_MCR0_SERCLKDIV BIT(8) +#define FSPI_MCR0_ATDF_EN BIT(7) +#define FSPI_MCR0_ARDF_EN BIT(6) +#define FSPI_MCR0_RXCLKSRC(x) ((x) << 4) +#define FSPI_MCR0_END_CFG(x) ((x) << 2) +#define FSPI_MCR0_MDIS BIT(1) +#define FSPI_MCR0_SWRST BIT(0) + +#define FSPI_MCR1 0x04 +#define FSPI_MCR1_SEQ_TIMEOUT(x) ((x) << 16) +#define FSPI_MCR1_AHB_TIMEOUT(x) (x) + +#define FSPI_MCR2 0x08 +#define FSPI_MCR2_IDLE_WAIT(x) ((x) << 24) +#define FSPI_MCR2_SAMEDEVICEEN BIT(15) +#define FSPI_MCR2_CLRLRPHS BIT(14) +#define FSPI_MCR2_ABRDATSZ BIT(8) +#define FSPI_MCR2_ABRLEARN BIT(7) +#define FSPI_MCR2_ABR_READ BIT(6) +#define FSPI_MCR2_ABRWRITE BIT(5) +#define FSPI_MCR2_ABRDUMMY BIT(4) +#define FSPI_MCR2_ABR_MODE BIT(3) +#define FSPI_MCR2_ABRCADDR BIT(2) +#define FSPI_MCR2_ABRRADDR BIT(1) +#define FSPI_MCR2_ABR_CMD BIT(0) + +#define FSPI_AHBCR 0x0c +#define FSPI_AHBCR_RDADDROPT BIT(6) +#define FSPI_AHBCR_PREF_EN BIT(5) +#define FSPI_AHBCR_BUFF_EN BIT(4) +#define FSPI_AHBCR_CACH_EN BIT(3) +#define FSPI_AHBCR_CLRTXBUF BIT(2) +#define FSPI_AHBCR_CLRRXBUF BIT(1) +#define FSPI_AHBCR_PAR_EN BIT(0) + +#define FSPI_INTEN 0x10 +#define FSPI_INTEN_SCLKSBWR BIT(9) +#define FSPI_INTEN_SCLKSBRD BIT(8) +#define FSPI_INTEN_DATALRNFL BIT(7) +#define FSPI_INTEN_IPTXWE BIT(6) +#define FSPI_INTEN_IPRXWA BIT(5) +#define FSPI_INTEN_AHBCMDERR BIT(4) +#define FSPI_INTEN_IPCMDERR BIT(3) +#define FSPI_INTEN_AHBCMDGE BIT(2) +#define FSPI_INTEN_IPCMDGE BIT(1) +#define FSPI_INTEN_IPCMDDONE BIT(0) + +#define FSPI_INTR 0x14 +#define FSPI_INTR_SCLKSBWR BIT(9) +#define FSPI_INTR_SCLKSBRD BIT(8) +#define FSPI_INTR_DATALRNFL BIT(7) +#define FSPI_INTR_IPTXWE BIT(6) +#define FSPI_INTR_IPRXWA BIT(5) +#define FSPI_INTR_AHBCMDERR BIT(4) +#define FSPI_INTR_IPCMDERR BIT(3) +#define FSPI_INTR_AHBCMDGE BIT(2) +#define FSPI_INTR_IPCMDGE BIT(1) +#define FSPI_INTR_IPCMDDONE BIT(0) + +#define FSPI_LUTKEY 0x18 +#define FSPI_LUTKEY_VALUE 0x5AF05AF0 + +#define FSPI_LCKCR 0x1C + +#define FSPI_LCKER_LOCK 0x1 +#define FSPI_LCKER_UNLOCK 0x2 + +#define FSPI_BUFXCR_INVALID_MSTRID 0xE +#define FSPI_AHBRX_BUF0CR0 0x20 +#define FSPI_AHBRX_BUF1CR0 0x24 +#define FSPI_AHBRX_BUF2CR0 0x28 +#define FSPI_AHBRX_BUF3CR0 0x2C +#define FSPI_AHBRX_BUF4CR0 0x30 +#define FSPI_AHBRX_BUF5CR0 0x34 +#define FSPI_AHBRX_BUF6CR0 0x38 +#define FSPI_AHBRX_BUF7CR0 0x3C +#define FSPI_AHBRXBUF0CR7_PREF BIT(31) + +#define FSPI_AHBRX_BUF0CR1 0x40 +#define FSPI_AHBRX_BUF1CR1 0x44 +#define FSPI_AHBRX_BUF2CR1 0x48 +#define FSPI_AHBRX_BUF3CR1 0x4C +#define FSPI_AHBRX_BUF4CR1 0x50 +#define FSPI_AHBRX_BUF5CR1 0x54 +#define FSPI_AHBRX_BUF6CR1 0x58 +#define FSPI_AHBRX_BUF7CR1 0x5C + +#define FSPI_FLSHA1CR0 0x60 +#define FSPI_FLSHA2CR0 0x64 +#define FSPI_FLSHB1CR0 0x68 +#define FSPI_FLSHB2CR0 0x6C +#define FSPI_FLSHXCR0_SZ_KB 10 +#define FSPI_FLSHXCR0_SZ(x) ((x) >> FSPI_FLSHXCR0_SZ_KB) + +#define FSPI_FLSHA1CR1 0x70 +#define FSPI_FLSHA2CR1 0x74 +#define FSPI_FLSHB1CR1 0x78 +#define FSPI_FLSHB2CR1 0x7C +#define FSPI_FLSHXCR1_CSINTR(x) ((x) << 16) +#define FSPI_FLSHXCR1_CAS(x) ((x) << 11) +#define FSPI_FLSHXCR1_WA BIT(10) +#define FSPI_FLSHXCR1_TCSH(x) ((x) << 5) +#define FSPI_FLSHXCR1_TCSS(x) (x) + +#define FSPI_FLSHA1CR2 0x80 +#define FSPI_FLSHA2CR2 0x84 +#define FSPI_FLSHB1CR2 0x88 +#define FSPI_FLSHB2CR2 0x8C +#define FSPI_FLSHXCR2_CLRINSP BIT(24) +#define FSPI_FLSHXCR2_AWRWAIT BIT(16) +#define FSPI_FLSHXCR2_AWRSEQN_SHIFT 13 +#define FSPI_FLSHXCR2_AWRSEQI_SHIFT 8 +#define FSPI_FLSHXCR2_ARDSEQN_SHIFT 5 +#define FSPI_FLSHXCR2_ARDSEQI_SHIFT 0 + +#define FSPI_IPCR0 0xA0 + +#define FSPI_IPCR1 0xA4 +#define FSPI_IPCR1_IPAREN BIT(31) +#define FSPI_IPCR1_SEQNUM_SHIFT 24 +#define FSPI_IPCR1_SEQID_SHIFT 16 +#define FSPI_IPCR1_IDATSZ(x) (x) + +#define FSPI_IPCMD 0xB0 +#define FSPI_IPCMD_TRG BIT(0) + +#define FSPI_DLPR 0xB4 + +#define FSPI_IPRXFCR 0xB8 +#define FSPI_IPRXFCR_CLR BIT(0) +#define FSPI_IPRXFCR_DMA_EN BIT(1) +#define FSPI_IPRXFCR_WMRK(x) ((x) << 2) + +#define FSPI_IPTXFCR 0xBC +#define FSPI_IPTXFCR_CLR BIT(0) +#define FSPI_IPTXFCR_DMA_EN BIT(1) +#define FSPI_IPTXFCR_WMRK(x) ((x) << 2) + +#define FSPI_DLLACR 0xC0 +#define FSPI_DLLACR_OVRDEN BIT(8) + +#define FSPI_DLLBCR 0xC4 +#define FSPI_DLLBCR_OVRDEN BIT(8) + +#define FSPI_STS0 0xE0 +#define FSPI_STS0_DLPHB(x) ((x) << 8) +#define FSPI_STS0_DLPHA(x) ((x) << 4) +#define FSPI_STS0_CMD_SRC(x) ((x) << 2) +#define FSPI_STS0_ARB_IDLE BIT(1) +#define FSPI_STS0_SEQ_IDLE BIT(0) + +#define FSPI_STS1 0xE4 +#define FSPI_STS1_IP_ERRCD(x) ((x) << 24) +#define FSPI_STS1_IP_ERRID(x) ((x) << 16) +#define FSPI_STS1_AHB_ERRCD(x) ((x) << 8) +#define FSPI_STS1_AHB_ERRID(x) (x) + +#define FSPI_AHBSPNST 0xEC +#define FSPI_AHBSPNST_DATLFT(x) ((x) << 16) +#define FSPI_AHBSPNST_BUFID(x) ((x) << 1) +#define FSPI_AHBSPNST_ACTIVE BIT(0) + +#define FSPI_IPRXFSTS 0xF0 +#define FSPI_IPRXFSTS_RDCNTR(x) ((x) << 16) +#define FSPI_IPRXFSTS_FILL(x) (x) + +#define FSPI_IPTXFSTS 0xF4 +#define FSPI_IPTXFSTS_WRCNTR(x) ((x) << 16) +#define FSPI_IPTXFSTS_FILL(x) (x) + +#define FSPI_RFDR 0x100 +#define FSPI_TFDR 0x180 + +#define FSPI_LUT_BASE 0x200 +#define FSPI_LUT_OFFSET (SEQID_LUT * 4 * 4) +#define FSPI_LUT_REG(idx) \ + (FSPI_LUT_BASE + FSPI_LUT_OFFSET + (idx) * 4) + +/* register map end */ + +/* Instruction set for the LUT register. */ +#define LUT_STOP 0x00 +#define LUT_CMD 0x01 +#define LUT_ADDR 0x02 +#define LUT_CADDR_SDR 0x03 +#define LUT_MODE 0x04 +#define LUT_MODE2 0x05 +#define LUT_MODE4 0x06 +#define LUT_MODE8 0x07 +#define LUT_NXP_WRITE 0x08 +#define LUT_NXP_READ 0x09 +#define LUT_LEARN_SDR 0x0A +#define LUT_DATSZ_SDR 0x0B +#define LUT_DUMMY 0x0C +#define LUT_DUMMY_RWDS_SDR 0x0D +#define LUT_JMP_ON_CS 0x1F +#define LUT_CMD_DDR 0x21 +#define LUT_ADDR_DDR 0x22 +#define LUT_CADDR_DDR 0x23 +#define LUT_MODE_DDR 0x24 +#define LUT_MODE2_DDR 0x25 +#define LUT_MODE4_DDR 0x26 +#define LUT_MODE8_DDR 0x27 +#define LUT_WRITE_DDR 0x28 +#define LUT_READ_DDR 0x29 +#define LUT_LEARN_DDR 0x2A +#define LUT_DATSZ_DDR 0x2B +#define LUT_DUMMY_DDR 0x2C +#define LUT_DUMMY_RWDS_DDR 0x2D + +/* + * Calculate number of required PAD bits for LUT register. + * + * The pad stands for the number of IO lines [0:7]. + * For example, the octal read needs eight IO lines, + * so you should use LUT_PAD(8). This macro + * returns 3 i.e. use eight (2^3) IP lines for read. + */ +#define LUT_PAD(x) (fls(x) - 1) + +/* + * Macro for constructing the LUT entries with the following + * register layout: + * + * --------------------------------------------------- + * | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 | + * --------------------------------------------------- + */ +#define PAD_SHIFT 8 +#define INSTR_SHIFT 10 +#define OPRND_SHIFT 16 + +/* Macros for constructing the LUT register. */ +#define LUT_DEF(idx, ins, pad, opr) \ + ((((ins) << INSTR_SHIFT) | ((pad) << PAD_SHIFT) | \ + (opr)) << (((idx) % 2) * OPRND_SHIFT)) + +#define POLL_TOUT 5000 +#define NXP_FSPI_MAX_CHIPSELECT 4 + +struct nxp_fspi_devtype_data { + unsigned int rxfifo; + unsigned int txfifo; + unsigned int ahb_buf_size; + unsigned int quirks; + bool little_endian; +}; + +static const struct nxp_fspi_devtype_data lx2160a_data = { + .rxfifo = SZ_512, /* (64 * 64 bits) */ + .txfifo = SZ_1K, /* (128 * 64 bits) */ + .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */ + .quirks = 0, + .little_endian = true, /* little-endian */ +}; + +struct nxp_fspi { + void __iomem *iobase; + void __iomem *ahb_addr; + u32 memmap_phy; + u32 memmap_phy_size; + struct clk *clk, *clk_en; + struct device *dev; + struct completion c; + const struct nxp_fspi_devtype_data *devtype_data; + struct mutex lock; + struct pm_qos_request pm_qos_req; + int selected; +}; + +/* + * R/W functions for big- or little-endian registers: + * The FSPI controller's endianness is independent of + * the CPU core's endianness. So far, although the CPU + * core is little-endian the FSPI controller can use + * big-endian or little-endian. + */ +static void fspi_writel(struct nxp_fspi *f, u32 val, void __iomem *addr) +{ + if (f->devtype_data->little_endian) + iowrite32(val, addr); + else + iowrite32be(val, addr); +} + +static u32 fspi_readl(struct nxp_fspi *f, void __iomem *addr) +{ + if (f->devtype_data->little_endian) + return ioread32(addr); + else + return ioread32be(addr); +} + +static irqreturn_t nxp_fspi_irq_handler(int irq, void *dev_id) +{ + struct nxp_fspi *f = dev_id; + u32 reg; + + /* clear interrupt */ + reg = fspi_readl(f, f->iobase + FSPI_INTR); + fspi_writel(f, FSPI_INTR_IPCMDDONE, f->iobase + FSPI_INTR); + + if (reg & FSPI_INTR_IPCMDDONE) + complete(&f->c); + + return IRQ_HANDLED; +} + +static int nxp_fspi_check_buswidth(struct nxp_fspi *f, u8 width) +{ + switch (width) { + case 1: + case 2: + case 4: + case 8: + return 0; + } + + return -ENOTSUPP; +} + +static bool nxp_fspi_supports_op(struct spi_mem *mem, + const struct spi_mem_op *op) +{ + struct nxp_fspi *f = spi_controller_get_devdata(mem->spi->master); + int ret; + + ret = nxp_fspi_check_buswidth(f, op->cmd.buswidth); + + if (op->addr.nbytes) + ret |= nxp_fspi_check_buswidth(f, op->addr.buswidth); + + if (op->dummy.nbytes) + ret |= nxp_fspi_check_buswidth(f, op->dummy.buswidth); + + if (op->data.nbytes) + ret |= nxp_fspi_check_buswidth(f, op->data.buswidth); + + if (ret) + return false; + + /* + * The number of address bytes should be equal to or less than 4 bytes. + */ + if (op->addr.nbytes > 4) + return false; + + /* + * If requested address value is greater than controller assigned + * memory mapped space, return error as it didn't fit in the range + * of assigned address space. + */ + if (op->addr.val >= f->memmap_phy_size) + return false; + + /* Max 64 dummy clock cycles supported */ + if (op->dummy.buswidth && + (op->dummy.nbytes * 8 / op->dummy.buswidth > 64)) + return false; + + /* Max data length, check controller limits and alignment */ + if (op->data.dir == SPI_MEM_DATA_IN && + (op->data.nbytes > f->devtype_data->ahb_buf_size || + (op->data.nbytes > f->devtype_data->rxfifo - 4 && + !IS_ALIGNED(op->data.nbytes, 8)))) + return false; + + if (op->data.dir == SPI_MEM_DATA_OUT && + op->data.nbytes > f->devtype_data->txfifo) + return false; + + return true; +} + +/* Instead of busy looping invoke readl_poll_timeout functionality. */ +static int fspi_readl_poll_tout(struct nxp_fspi *f, void __iomem *base, + u32 mask, u32 delay_us, + u32 timeout_us, bool c) +{ + u32 reg; + + if (!f->devtype_data->little_endian) + mask = (u32)cpu_to_be32(mask); + + if (c) + return readl_poll_timeout(base, reg, (reg & mask), + delay_us, timeout_us); + else + return readl_poll_timeout(base, reg, !(reg & mask), + delay_us, timeout_us); +} + +/* + * If the slave device content being changed by Write/Erase, need to + * invalidate the AHB buffer. This can be achieved by doing the reset + * of controller after setting MCR0[SWRESET] bit. + */ +static inline void nxp_fspi_invalid(struct nxp_fspi *f) +{ + u32 reg; + int ret; + + reg = fspi_readl(f, f->iobase + FSPI_MCR0); + fspi_writel(f, reg | FSPI_MCR0_SWRST, f->iobase + FSPI_MCR0); + + /* w1c register, wait unit clear */ + ret = fspi_readl_poll_tout(f, f->iobase + FSPI_MCR0, + FSPI_MCR0_SWRST, 0, POLL_TOUT, false); + WARN_ON(ret); +} + +static void nxp_fspi_prepare_lut(struct nxp_fspi *f, + const struct spi_mem_op *op) +{ + void __iomem *base = f->iobase; + u32 lutval[4] = {}; + int lutidx = 1, i; + + /* cmd */ + lutval[0] |= LUT_DEF(0, LUT_CMD, LUT_PAD(op->cmd.buswidth), + op->cmd.opcode); + + /* addr bytes */ + if (op->addr.nbytes) { + lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_ADDR, + LUT_PAD(op->addr.buswidth), + op->addr.nbytes * 8); + lutidx++; + } + + /* dummy bytes, if needed */ + if (op->dummy.nbytes) { + lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_DUMMY, + /* + * Due to FlexSPI controller limitation number of PAD for dummy + * buswidth needs to be programmed as equal to data buswidth. + */ + LUT_PAD(op->data.buswidth), + op->dummy.nbytes * 8 / + op->dummy.buswidth); + lutidx++; + } + + /* read/write data bytes */ + if (op->data.nbytes) { + lutval[lutidx / 2] |= LUT_DEF(lutidx, + op->data.dir == SPI_MEM_DATA_IN ? + LUT_NXP_READ : LUT_NXP_WRITE, + LUT_PAD(op->data.buswidth), + 0); + lutidx++; + } + + /* stop condition. */ + lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_STOP, 0, 0); + + /* unlock LUT */ + fspi_writel(f, FSPI_LUTKEY_VALUE, f->iobase + FSPI_LUTKEY); + fspi_writel(f, FSPI_LCKER_UNLOCK, f->iobase + FSPI_LCKCR); + + /* fill LUT */ + for (i = 0; i < ARRAY_SIZE(lutval); i++) + fspi_writel(f, lutval[i], base + FSPI_LUT_REG(i)); + + dev_dbg(f->dev, "CMD[%x] lutval[0:%x \t 1:%x \t 2:%x \t 3:%x]\n", + op->cmd.opcode, lutval[0], lutval[1], lutval[2], lutval[3]); + + /* lock LUT */ + fspi_writel(f, FSPI_LUTKEY_VALUE, f->iobase + FSPI_LUTKEY); + fspi_writel(f, FSPI_LCKER_LOCK, f->iobase + FSPI_LCKCR); +} + +static int nxp_fspi_clk_prep_enable(struct nxp_fspi *f) +{ + int ret; + + ret = clk_prepare_enable(f->clk_en); + if (ret) + return ret; + + ret = clk_prepare_enable(f->clk); + if (ret) { + clk_disable_unprepare(f->clk_en); + return ret; + } + + return 0; +} + +static void nxp_fspi_clk_disable_unprep(struct nxp_fspi *f) +{ + clk_disable_unprepare(f->clk); + clk_disable_unprepare(f->clk_en); +} + +/* + * In FlexSPI controller, flash access is based on value of FSPI_FLSHXXCR0 + * register and start base address of the slave device. + * + * (Higher address) + * -------- <-- FLSHB2CR0 + * | B2 | + * | | + * B2 start address --> -------- <-- FLSHB1CR0 + * | B1 | + * | | + * B1 start address --> -------- <-- FLSHA2CR0 + * | A2 | + * | | + * A2 start address --> -------- <-- FLSHA1CR0 + * | A1 | + * | | + * A1 start address --> -------- (Lower address) + * + * + * Start base address defines the starting address range for given CS and + * FSPI_FLSHXXCR0 defines the size of the slave device connected at given CS. + * + * But, different targets are having different combinations of number of CS, + * some targets only have single CS or two CS covering controller's full + * memory mapped space area. + * Thus, implementation is being done as independent of the size and number + * of the connected slave device. + * Assign controller memory mapped space size as the size to the connected + * slave device. + * Mark FLSHxxCR0 as zero initially and then assign value only to the selected + * chip-select Flash configuration register. + * + * For e.g. to access CS2 (B1), FLSHB1CR0 register would be equal to the + * memory mapped size of the controller. + * Value for rest of the CS FLSHxxCR0 register would be zero. + * + */ +static void nxp_fspi_select_mem(struct nxp_fspi *f, struct spi_device *spi) +{ + unsigned long rate = spi->max_speed_hz; + int ret; + uint64_t size_kb; + + /* + * Return, if previously selected slave device is same as current + * requested slave device. + */ + if (f->selected == spi->chip_select) + return; + + /* Reset FLSHxxCR0 registers */ + fspi_writel(f, 0, f->iobase + FSPI_FLSHA1CR0); + fspi_writel(f, 0, f->iobase + FSPI_FLSHA2CR0); + fspi_writel(f, 0, f->iobase + FSPI_FLSHB1CR0); + fspi_writel(f, 0, f->iobase + FSPI_FLSHB2CR0); + + /* Assign controller memory mapped space as size, KBytes, of flash. */ + size_kb = FSPI_FLSHXCR0_SZ(f->memmap_phy_size); + + fspi_writel(f, size_kb, f->iobase + FSPI_FLSHA1CR0 + + 4 * spi->chip_select); + + dev_dbg(f->dev, "Slave device [CS:%x] selected\n", spi->chip_select); + + nxp_fspi_clk_disable_unprep(f); + + ret = clk_set_rate(f->clk, rate); + if (ret) + return; + + ret = nxp_fspi_clk_prep_enable(f); + if (ret) + return; + + f->selected = spi->chip_select; +} + +static void nxp_fspi_read_ahb(struct nxp_fspi *f, const struct spi_mem_op *op) +{ + u32 len = op->data.nbytes; + + /* Read out the data directly from the AHB buffer. */ + memcpy_fromio(op->data.buf.in, (f->ahb_addr + op->addr.val), len); +} + +static void nxp_fspi_fill_txfifo(struct nxp_fspi *f, + const struct spi_mem_op *op) +{ + void __iomem *base = f->iobase; + int i, ret; + u8 *buf = (u8 *) op->data.buf.out; + + /* clear the TX FIFO. */ + fspi_writel(f, FSPI_IPTXFCR_CLR, base + FSPI_IPTXFCR); + + /* + * Default value of water mark level is 8 bytes, hence in single + * write request controller can write max 8 bytes of data. + */ + + for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 8); i += 8) { + /* Wait for TXFIFO empty */ + ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR, + FSPI_INTR_IPTXWE, 0, + POLL_TOUT, true); + WARN_ON(ret); + + fspi_writel(f, *(u32 *) (buf + i), base + FSPI_TFDR); + fspi_writel(f, *(u32 *) (buf + i + 4), base + FSPI_TFDR + 4); + fspi_writel(f, FSPI_INTR_IPTXWE, base + FSPI_INTR); + } + + if (i < op->data.nbytes) { + u32 data = 0; + int j; + /* Wait for TXFIFO empty */ + ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR, + FSPI_INTR_IPTXWE, 0, + POLL_TOUT, true); + WARN_ON(ret); + + for (j = 0; j < ALIGN(op->data.nbytes - i, 4); j += 4) { + memcpy(&data, buf + i + j, 4); + fspi_writel(f, data, base + FSPI_TFDR + j); + } + fspi_writel(f, FSPI_INTR_IPTXWE, base + FSPI_INTR); + } +} + +static void nxp_fspi_read_rxfifo(struct nxp_fspi *f, + const struct spi_mem_op *op) +{ + void __iomem *base = f->iobase; + int i, ret; + int len = op->data.nbytes; + u8 *buf = (u8 *) op->data.buf.in; + + /* + * Default value of water mark level is 8 bytes, hence in single + * read request controller can read max 8 bytes of data. + */ + for (i = 0; i < ALIGN_DOWN(len, 8); i += 8) { + /* Wait for RXFIFO available */ + ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR, + FSPI_INTR_IPRXWA, 0, + POLL_TOUT, true); + WARN_ON(ret); + + *(u32 *)(buf + i) = fspi_readl(f, base + FSPI_RFDR); + *(u32 *)(buf + i + 4) = fspi_readl(f, base + FSPI_RFDR + 4); + /* move the FIFO pointer */ + fspi_writel(f, FSPI_INTR_IPRXWA, base + FSPI_INTR); + } + + if (i < len) { + u32 tmp; + int size, j; + + buf = op->data.buf.in + i; + /* Wait for RXFIFO available */ + ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR, + FSPI_INTR_IPRXWA, 0, + POLL_TOUT, true); + WARN_ON(ret); + + len = op->data.nbytes - i; + for (j = 0; j < op->data.nbytes - i; j += 4) { + tmp = fspi_readl(f, base + FSPI_RFDR + j); + size = min(len, 4); + memcpy(buf + j, &tmp, size); + len -= size; + } + } + + /* invalid the RXFIFO */ + fspi_writel(f, FSPI_IPRXFCR_CLR, base + FSPI_IPRXFCR); + /* move the FIFO pointer */ + fspi_writel(f, FSPI_INTR_IPRXWA, base + FSPI_INTR); +} + +static int nxp_fspi_do_op(struct nxp_fspi *f, const struct spi_mem_op *op) +{ + void __iomem *base = f->iobase; + int seqnum = 0; + int err = 0; + u32 reg; + + reg = fspi_readl(f, base + FSPI_IPRXFCR); + /* invalid RXFIFO first */ + reg &= ~FSPI_IPRXFCR_DMA_EN; + reg = reg | FSPI_IPRXFCR_CLR; + fspi_writel(f, reg, base + FSPI_IPRXFCR); + + init_completion(&f->c); + + fspi_writel(f, op->addr.val, base + FSPI_IPCR0); + /* + * Always start the sequence at the same index since we update + * the LUT at each exec_op() call. And also specify the DATA + * length, since it's has not been specified in the LUT. + */ + fspi_writel(f, op->data.nbytes | + (SEQID_LUT << FSPI_IPCR1_SEQID_SHIFT) | + (seqnum << FSPI_IPCR1_SEQNUM_SHIFT), + base + FSPI_IPCR1); + + /* Trigger the LUT now. */ + fspi_writel(f, FSPI_IPCMD_TRG, base + FSPI_IPCMD); + + /* Wait for the interrupt. */ + if (!wait_for_completion_timeout(&f->c, msecs_to_jiffies(1000))) + err = -ETIMEDOUT; + + /* Invoke IP data read, if request is of data read. */ + if (!err && op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN) + nxp_fspi_read_rxfifo(f, op); + + return err; +} + +static int nxp_fspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) +{ + struct nxp_fspi *f = spi_controller_get_devdata(mem->spi->master); + int err = 0; + + mutex_lock(&f->lock); + + /* Wait for controller being ready. */ + err = fspi_readl_poll_tout(f, f->iobase + FSPI_STS0, + FSPI_STS0_ARB_IDLE, 1, POLL_TOUT, true); + WARN_ON(err); + + nxp_fspi_select_mem(f, mem->spi); + + nxp_fspi_prepare_lut(f, op); + /* + * If we have large chunks of data, we read them through the AHB bus + * by accessing the mapped memory. In all other cases we use + * IP commands to access the flash. + */ + if (op->data.nbytes > (f->devtype_data->rxfifo - 4) && + op->data.dir == SPI_MEM_DATA_IN) { + nxp_fspi_read_ahb(f, op); + } else { + if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT) + nxp_fspi_fill_txfifo(f, op); + + err = nxp_fspi_do_op(f, op); + } + + /* Invalidate the data in the AHB buffer. */ + nxp_fspi_invalid(f); + + mutex_unlock(&f->lock); + + return err; +} + +static int nxp_fspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) +{ + struct nxp_fspi *f = spi_controller_get_devdata(mem->spi->master); + + if (op->data.dir == SPI_MEM_DATA_OUT) { + if (op->data.nbytes > f->devtype_data->txfifo) + op->data.nbytes = f->devtype_data->txfifo; + } else { + if (op->data.nbytes > f->devtype_data->ahb_buf_size) + op->data.nbytes = f->devtype_data->ahb_buf_size; + else if (op->data.nbytes > (f->devtype_data->rxfifo - 4)) + op->data.nbytes = ALIGN_DOWN(op->data.nbytes, 8); + } + + return 0; +} + +static int nxp_fspi_default_setup(struct nxp_fspi *f) +{ + void __iomem *base = f->iobase; + int ret, i; + u32 reg; + + /* disable and unprepare clock to avoid glitch pass to controller */ + nxp_fspi_clk_disable_unprep(f); + + /* the default frequency, we will change it later if necessary. */ + ret = clk_set_rate(f->clk, 20000000); + if (ret) + return ret; + + ret = nxp_fspi_clk_prep_enable(f); + if (ret) + return ret; + + /* Reset the module */ + /* w1c register, wait unit clear */ + ret = fspi_readl_poll_tout(f, f->iobase + FSPI_MCR0, + FSPI_MCR0_SWRST, 0, POLL_TOUT, false); + WARN_ON(ret); + + /* Disable the module */ + fspi_writel(f, FSPI_MCR0_MDIS, base + FSPI_MCR0); + + /* Reset the DLL register to default value */ + fspi_writel(f, FSPI_DLLACR_OVRDEN, base + FSPI_DLLACR); + fspi_writel(f, FSPI_DLLBCR_OVRDEN, base + FSPI_DLLBCR); + + /* enable module */ + fspi_writel(f, FSPI_MCR0_AHB_TIMEOUT(0xFF) | FSPI_MCR0_IP_TIMEOUT(0xFF), + base + FSPI_MCR0); + + /* + * Disable same device enable bit and configure all slave devices + * independently. + */ + reg = fspi_readl(f, f->iobase + FSPI_MCR2); + reg = reg & ~(FSPI_MCR2_SAMEDEVICEEN); + fspi_writel(f, reg, base + FSPI_MCR2); + + /* AHB configuration for access buffer 0~7. */ + for (i = 0; i < 7; i++) + fspi_writel(f, 0, base + FSPI_AHBRX_BUF0CR0 + 4 * i); + + /* + * Set ADATSZ with the maximum AHB buffer size to improve the read + * performance. + */ + fspi_writel(f, (f->devtype_data->ahb_buf_size / 8 | + FSPI_AHBRXBUF0CR7_PREF), base + FSPI_AHBRX_BUF7CR0); + + /* prefetch and no start address alignment limitation */ + fspi_writel(f, FSPI_AHBCR_PREF_EN | FSPI_AHBCR_RDADDROPT, + base + FSPI_AHBCR); + + /* AHB Read - Set lut sequence ID for all CS. */ + fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA1CR2); + fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA2CR2); + fspi_writel(f, SEQID_LUT, base + FSPI_FLSHB1CR2); + fspi_writel(f, SEQID_LUT, base + FSPI_FLSHB2CR2); + + f->selected = -1; + + /* enable the interrupt */ + fspi_writel(f, FSPI_INTEN_IPCMDDONE, base + FSPI_INTEN); + + return 0; +} + +static const char *nxp_fspi_get_name(struct spi_mem *mem) +{ + struct nxp_fspi *f = spi_controller_get_devdata(mem->spi->master); + struct device *dev = &mem->spi->dev; + const char *name; + + // Set custom name derived from the platform_device of the controller. + if (of_get_available_child_count(f->dev->of_node) == 1) + return dev_name(f->dev); + + name = devm_kasprintf(dev, GFP_KERNEL, + "%s-%d", dev_name(f->dev), + mem->spi->chip_select); + + if (!name) { + dev_err(dev, "failed to get memory for custom flash name\n"); + return ERR_PTR(-ENOMEM); + } + + return name; +} + +static const struct spi_controller_mem_ops nxp_fspi_mem_ops = { + .adjust_op_size = nxp_fspi_adjust_op_size, + .supports_op = nxp_fspi_supports_op, + .exec_op = nxp_fspi_exec_op, + .get_name = nxp_fspi_get_name, +}; + +static int nxp_fspi_probe(struct platform_device *pdev) +{ + struct spi_controller *ctlr; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct resource *res; + struct nxp_fspi *f; + int ret; + + ctlr = spi_alloc_master(&pdev->dev, sizeof(*f)); + if (!ctlr) + return -ENOMEM; + + ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL | + SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL; + + f = spi_controller_get_devdata(ctlr); + f->dev = dev; + f->devtype_data = of_device_get_match_data(dev); + if (!f->devtype_data) { + ret = -ENODEV; + goto err_put_ctrl; + } + + platform_set_drvdata(pdev, f); + + /* find the resources - configuration register address space */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fspi_base"); + f->iobase = devm_ioremap_resource(dev, res); + if (IS_ERR(f->iobase)) { + ret = PTR_ERR(f->iobase); + goto err_put_ctrl; + } + + /* find the resources - controller memory mapped space */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fspi_mmap"); + f->ahb_addr = devm_ioremap_resource(dev, res); + if (IS_ERR(f->ahb_addr)) { + ret = PTR_ERR(f->ahb_addr); + goto err_put_ctrl; + } + + /* assign memory mapped starting address and mapped size. */ + f->memmap_phy = res->start; + f->memmap_phy_size = resource_size(res); + + /* find the clocks */ + f->clk_en = devm_clk_get(dev, "fspi_en"); + if (IS_ERR(f->clk_en)) { + ret = PTR_ERR(f->clk_en); + goto err_put_ctrl; + } + + f->clk = devm_clk_get(dev, "fspi"); + if (IS_ERR(f->clk)) { + ret = PTR_ERR(f->clk); + goto err_put_ctrl; + } + + ret = nxp_fspi_clk_prep_enable(f); + if (ret) { + dev_err(dev, "can not enable the clock\n"); + goto err_put_ctrl; + } + + /* find the irq */ + ret = platform_get_irq(pdev, 0); + if (ret < 0) { + dev_err(dev, "failed to get the irq: %d\n", ret); + goto err_disable_clk; + } + + ret = devm_request_irq(dev, ret, + nxp_fspi_irq_handler, 0, pdev->name, f); + if (ret) { + dev_err(dev, "failed to request irq: %d\n", ret); + goto err_disable_clk; + } + + mutex_init(&f->lock); + + ctlr->bus_num = -1; + ctlr->num_chipselect = NXP_FSPI_MAX_CHIPSELECT; + ctlr->mem_ops = &nxp_fspi_mem_ops; + + nxp_fspi_default_setup(f); + + ctlr->dev.of_node = np; + + ret = spi_register_controller(ctlr); + if (ret) + goto err_destroy_mutex; + + return 0; + +err_destroy_mutex: + mutex_destroy(&f->lock); + +err_disable_clk: + nxp_fspi_clk_disable_unprep(f); + +err_put_ctrl: + spi_controller_put(ctlr); + + dev_err(dev, "NXP FSPI probe failed\n"); + return ret; +} + +static int nxp_fspi_remove(struct platform_device *pdev) +{ + struct nxp_fspi *f = platform_get_drvdata(pdev); + + /* disable the hardware */ + fspi_writel(f, FSPI_MCR0_MDIS, f->iobase + FSPI_MCR0); + + nxp_fspi_clk_disable_unprep(f); + + mutex_destroy(&f->lock); + + return 0; +} + +static int nxp_fspi_suspend(struct device *dev) +{ + return 0; +} + +static int nxp_fspi_resume(struct device *dev) +{ + struct nxp_fspi *f = dev_get_drvdata(dev); + + nxp_fspi_default_setup(f); + + return 0; +} + +static const struct of_device_id nxp_fspi_dt_ids[] = { + { .compatible = "nxp,lx2160a-fspi", .data = (void *)&lx2160a_data, }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, nxp_fspi_dt_ids); + +static const struct dev_pm_ops nxp_fspi_pm_ops = { + .suspend = nxp_fspi_suspend, + .resume = nxp_fspi_resume, +}; + +static struct platform_driver nxp_fspi_driver = { + .driver = { + .name = "nxp-fspi", + .of_match_table = nxp_fspi_dt_ids, + .pm = &nxp_fspi_pm_ops, + }, + .probe = nxp_fspi_probe, + .remove = nxp_fspi_remove, +}; +module_platform_driver(nxp_fspi_driver); + +MODULE_DESCRIPTION("NXP FSPI Controller Driver"); +MODULE_AUTHOR("NXP Semiconductor"); +MODULE_AUTHOR("Yogesh Narayan Gaur "); +MODULE_AUTHOR("Boris Brezillon "); +MODULE_AUTHOR("Frieder Schrempf "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index 2fd8881fcd65..8be304379628 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c @@ -623,8 +623,8 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0; cfg.src_addr_width = width; cfg.dst_addr_width = width; - cfg.src_maxburst = es; - cfg.dst_maxburst = es; + cfg.src_maxburst = 1; + cfg.dst_maxburst = 1; rx = xfer->rx_buf; tx = xfer->tx_buf; diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c index 0c793e31d60f..26684178786f 100644 --- a/drivers/spi/spi-pl022.c +++ b/drivers/spi/spi-pl022.c @@ -253,6 +253,7 @@ #define STATE_RUNNING ((void *) 1) #define STATE_DONE ((void *) 2) #define STATE_ERROR ((void *) -1) +#define STATE_TIMEOUT ((void *) -2) /* * SSP State - Whether Enabled or Disabled @@ -1484,6 +1485,30 @@ err_config_dma: writew(irqflags, SSP_IMSC(pl022->virtbase)); } +static void print_current_status(struct pl022 *pl022) +{ + u32 read_cr0; + u16 read_cr1, read_dmacr, read_sr; + + if (pl022->vendor->extended_cr) + read_cr0 = readl(SSP_CR0(pl022->virtbase)); + else + read_cr0 = readw(SSP_CR0(pl022->virtbase)); + read_cr1 = readw(SSP_CR1(pl022->virtbase)); + read_dmacr = readw(SSP_DMACR(pl022->virtbase)); + read_sr = readw(SSP_SR(pl022->virtbase)); + + dev_warn(&pl022->adev->dev, "spi-pl022 CR0: %x\n", read_cr0); + dev_warn(&pl022->adev->dev, "spi-pl022 CR1: %x\n", read_cr1); + dev_warn(&pl022->adev->dev, "spi-pl022 DMACR: %x\n", read_dmacr); + dev_warn(&pl022->adev->dev, "spi-pl022 SR: %x\n", read_sr); + dev_warn(&pl022->adev->dev, + "spi-pl022 exp_fifo_level/fifodepth: %u/%d\n", + pl022->exp_fifo_level, + pl022->vendor->fifodepth); + +} + static void do_polling_transfer(struct pl022 *pl022) { struct spi_message *message = NULL; @@ -1535,7 +1560,8 @@ static void do_polling_transfer(struct pl022 *pl022) if (time_after(time, timeout)) { dev_warn(&pl022->adev->dev, "%s: timeout!\n", __func__); - message->state = STATE_ERROR; + message->state = STATE_TIMEOUT; + print_current_status(pl022); goto out; } cpu_relax(); @@ -1553,6 +1579,8 @@ out: /* Handle end of message */ if (message->state == STATE_DONE) message->status = 0; + else if (message->state == STATE_TIMEOUT) + message->status = -EAGAIN; else message->status = -EIO; diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c index 2fa7f4b43492..15592598273e 100644 --- a/drivers/spi/spi-pxa2xx-dma.c +++ b/drivers/spi/spi-pxa2xx-dma.c @@ -23,7 +23,7 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data, bool error) { - struct spi_message *msg = drv_data->master->cur_msg; + struct spi_message *msg = drv_data->controller->cur_msg; /* * It is possible that one CPU is handling ROR interrupt and other @@ -59,7 +59,7 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data, msg->status = -EIO; } - spi_finalize_current_transfer(drv_data->master); + spi_finalize_current_transfer(drv_data->controller); } } @@ -74,7 +74,7 @@ pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data, struct spi_transfer *xfer) { struct chip_data *chip = - spi_get_ctldata(drv_data->master->cur_msg->spi); + spi_get_ctldata(drv_data->controller->cur_msg->spi); enum dma_slave_buswidth width; struct dma_slave_config cfg; struct dma_chan *chan; @@ -102,14 +102,14 @@ pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data, cfg.dst_maxburst = chip->dma_burst_size; sgt = &xfer->tx_sg; - chan = drv_data->master->dma_tx; + chan = drv_data->controller->dma_tx; } else { cfg.src_addr = drv_data->ssdr_physical; cfg.src_addr_width = width; cfg.src_maxburst = chip->dma_burst_size; sgt = &xfer->rx_sg; - chan = drv_data->master->dma_rx; + chan = drv_data->controller->dma_rx; } ret = dmaengine_slave_config(chan, &cfg); @@ -130,8 +130,8 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data) if (status & SSSR_ROR) { dev_err(&drv_data->pdev->dev, "FIFO overrun\n"); - dmaengine_terminate_async(drv_data->master->dma_rx); - dmaengine_terminate_async(drv_data->master->dma_tx); + dmaengine_terminate_async(drv_data->controller->dma_rx); + dmaengine_terminate_async(drv_data->controller->dma_tx); pxa2xx_spi_dma_transfer_complete(drv_data, true); return IRQ_HANDLED; @@ -171,15 +171,15 @@ int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, return 0; err_rx: - dmaengine_terminate_async(drv_data->master->dma_tx); + dmaengine_terminate_async(drv_data->controller->dma_tx); err_tx: return err; } void pxa2xx_spi_dma_start(struct driver_data *drv_data) { - dma_async_issue_pending(drv_data->master->dma_rx); - dma_async_issue_pending(drv_data->master->dma_tx); + dma_async_issue_pending(drv_data->controller->dma_rx); + dma_async_issue_pending(drv_data->controller->dma_tx); atomic_set(&drv_data->dma_running, 1); } @@ -187,30 +187,30 @@ void pxa2xx_spi_dma_start(struct driver_data *drv_data) void pxa2xx_spi_dma_stop(struct driver_data *drv_data) { atomic_set(&drv_data->dma_running, 0); - dmaengine_terminate_sync(drv_data->master->dma_rx); - dmaengine_terminate_sync(drv_data->master->dma_tx); + dmaengine_terminate_sync(drv_data->controller->dma_rx); + dmaengine_terminate_sync(drv_data->controller->dma_tx); } int pxa2xx_spi_dma_setup(struct driver_data *drv_data) { - struct pxa2xx_spi_master *pdata = drv_data->master_info; + struct pxa2xx_spi_controller *pdata = drv_data->controller_info; struct device *dev = &drv_data->pdev->dev; - struct spi_controller *master = drv_data->master; + struct spi_controller *controller = drv_data->controller; dma_cap_mask_t mask; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); - master->dma_tx = dma_request_slave_channel_compat(mask, + controller->dma_tx = dma_request_slave_channel_compat(mask, pdata->dma_filter, pdata->tx_param, dev, "tx"); - if (!master->dma_tx) + if (!controller->dma_tx) return -ENODEV; - master->dma_rx = dma_request_slave_channel_compat(mask, + controller->dma_rx = dma_request_slave_channel_compat(mask, pdata->dma_filter, pdata->rx_param, dev, "rx"); - if (!master->dma_rx) { - dma_release_channel(master->dma_tx); - master->dma_tx = NULL; + if (!controller->dma_rx) { + dma_release_channel(controller->dma_tx); + controller->dma_tx = NULL; return -ENODEV; } @@ -219,17 +219,17 @@ int pxa2xx_spi_dma_setup(struct driver_data *drv_data) void pxa2xx_spi_dma_release(struct driver_data *drv_data) { - struct spi_controller *master = drv_data->master; + struct spi_controller *controller = drv_data->controller; - if (master->dma_rx) { - dmaengine_terminate_sync(master->dma_rx); - dma_release_channel(master->dma_rx); - master->dma_rx = NULL; + if (controller->dma_rx) { + dmaengine_terminate_sync(controller->dma_rx); + dma_release_channel(controller->dma_rx); + controller->dma_rx = NULL; } - if (master->dma_tx) { - dmaengine_terminate_sync(master->dma_tx); - dma_release_channel(master->dma_tx); - master->dma_tx = NULL; + if (controller->dma_tx) { + dmaengine_terminate_sync(controller->dma_tx); + dma_release_channel(controller->dma_tx); + controller->dma_tx = NULL; } } diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c index 869f188b02eb..1727fdfbac28 100644 --- a/drivers/spi/spi-pxa2xx-pci.c +++ b/drivers/spi/spi-pxa2xx-pci.c @@ -197,7 +197,7 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev, struct platform_device_info pi; int ret; struct platform_device *pdev; - struct pxa2xx_spi_master spi_pdata; + struct pxa2xx_spi_controller spi_pdata; struct ssp_device *ssp; struct pxa_spi_info *c; char buf[40]; @@ -265,7 +265,7 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev, static void pxa2xx_spi_pci_remove(struct pci_dev *dev) { struct platform_device *pdev = pci_get_drvdata(dev); - struct pxa2xx_spi_master *spi_pdata; + struct pxa2xx_spi_controller *spi_pdata; spi_pdata = dev_get_platdata(&pdev->dev); diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index d84b893a64d7..b6ddba833d02 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -328,7 +328,7 @@ static void lpss_ssp_setup(struct driver_data *drv_data) __lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value); /* Enable multiblock DMA transfers */ - if (drv_data->master_info->enable_dma) { + if (drv_data->controller_info->enable_dma) { __lpss_ssp_write_priv(drv_data, config->reg_ssp, 1); if (config->reg_general >= 0) { @@ -368,7 +368,7 @@ static void lpss_ssp_select_cs(struct spi_device *spi, __lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value); ndelay(1000000000 / - (drv_data->master->max_speed_hz / 2)); + (drv_data->controller->max_speed_hz / 2)); } } @@ -567,7 +567,7 @@ static int u32_reader(struct driver_data *drv_data) static void reset_sccr1(struct driver_data *drv_data) { struct chip_data *chip = - spi_get_ctldata(drv_data->master->cur_msg->spi); + spi_get_ctldata(drv_data->controller->cur_msg->spi); u32 sccr1_reg; sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1; @@ -599,8 +599,8 @@ static void int_error_stop(struct driver_data *drv_data, const char* msg) dev_err(&drv_data->pdev->dev, "%s\n", msg); - drv_data->master->cur_msg->status = -EIO; - spi_finalize_current_transfer(drv_data->master); + drv_data->controller->cur_msg->status = -EIO; + spi_finalize_current_transfer(drv_data->controller); } static void int_transfer_complete(struct driver_data *drv_data) @@ -611,7 +611,7 @@ static void int_transfer_complete(struct driver_data *drv_data) if (!pxa25x_ssp_comp(drv_data)) pxa2xx_spi_write(drv_data, SSTO, 0); - spi_finalize_current_transfer(drv_data->master); + spi_finalize_current_transfer(drv_data->controller); } static irqreturn_t interrupt_transfer(struct driver_data *drv_data) @@ -747,7 +747,7 @@ static irqreturn_t ssp_int(int irq, void *dev_id) pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg & ~drv_data->int_cr1); pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg); - if (!drv_data->master->cur_msg) { + if (!drv_data->controller->cur_msg) { handle_bad_msg(drv_data); /* Never fail */ return IRQ_HANDLED; @@ -879,7 +879,7 @@ static unsigned int quark_x1000_get_clk_div(int rate, u32 *dds) static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate) { - unsigned long ssp_clk = drv_data->master->max_speed_hz; + unsigned long ssp_clk = drv_data->controller->max_speed_hz; const struct ssp_device *ssp = drv_data->ssp; rate = min_t(int, ssp_clk, rate); @@ -894,7 +894,7 @@ static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data, int rate) { struct chip_data *chip = - spi_get_ctldata(drv_data->master->cur_msg->spi); + spi_get_ctldata(drv_data->controller->cur_msg->spi); unsigned int clk_div; switch (drv_data->ssp_type) { @@ -908,7 +908,7 @@ static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data, return clk_div << 8; } -static bool pxa2xx_spi_can_dma(struct spi_controller *master, +static bool pxa2xx_spi_can_dma(struct spi_controller *controller, struct spi_device *spi, struct spi_transfer *xfer) { @@ -919,12 +919,12 @@ static bool pxa2xx_spi_can_dma(struct spi_controller *master, xfer->len >= chip->dma_burst_size; } -static int pxa2xx_spi_transfer_one(struct spi_controller *master, +static int pxa2xx_spi_transfer_one(struct spi_controller *controller, struct spi_device *spi, struct spi_transfer *transfer) { - struct driver_data *drv_data = spi_controller_get_devdata(master); - struct spi_message *message = master->cur_msg; + struct driver_data *drv_data = spi_controller_get_devdata(controller); + struct spi_message *message = controller->cur_msg; struct chip_data *chip = spi_get_ctldata(message->spi); u32 dma_thresh = chip->dma_threshold; u32 dma_burst = chip->dma_burst_size; @@ -1006,9 +1006,9 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *master, "DMA burst size reduced to match bits_per_word\n"); } - dma_mapped = master->can_dma && - master->can_dma(master, message->spi, transfer) && - master->cur_msg_mapped; + dma_mapped = controller->can_dma && + controller->can_dma(controller, message->spi, transfer) && + controller->cur_msg_mapped; if (dma_mapped) { /* Ensure we have the correct interrupt handler */ @@ -1036,12 +1036,12 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *master, cr0 = pxa2xx_configure_sscr0(drv_data, clk_div, bits); if (!pxa25x_ssp_comp(drv_data)) dev_dbg(&message->spi->dev, "%u Hz actual, %s\n", - master->max_speed_hz + controller->max_speed_hz / (1 + ((cr0 & SSCR0_SCR(0xfff)) >> 8)), dma_mapped ? "DMA" : "PIO"); else dev_dbg(&message->spi->dev, "%u Hz actual, %s\n", - master->max_speed_hz / 2 + controller->max_speed_hz / 2 / (1 + ((cr0 & SSCR0_SCR(0x0ff)) >> 8)), dma_mapped ? "DMA" : "PIO"); @@ -1092,7 +1092,7 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *master, } } - if (spi_controller_is_slave(master)) { + if (spi_controller_is_slave(controller)) { while (drv_data->write(drv_data)) ; if (drv_data->gpiod_ready) { @@ -1111,9 +1111,9 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *master, return 1; } -static int pxa2xx_spi_slave_abort(struct spi_master *master) +static int pxa2xx_spi_slave_abort(struct spi_controller *controller) { - struct driver_data *drv_data = spi_controller_get_devdata(master); + struct driver_data *drv_data = spi_controller_get_devdata(controller); /* Stop and reset SSP */ write_SSSR_CS(drv_data, drv_data->clear_sr); @@ -1126,16 +1126,16 @@ static int pxa2xx_spi_slave_abort(struct spi_master *master) dev_dbg(&drv_data->pdev->dev, "transfer aborted\n"); - drv_data->master->cur_msg->status = -EINTR; - spi_finalize_current_transfer(drv_data->master); + drv_data->controller->cur_msg->status = -EINTR; + spi_finalize_current_transfer(drv_data->controller); return 0; } -static void pxa2xx_spi_handle_err(struct spi_controller *master, +static void pxa2xx_spi_handle_err(struct spi_controller *controller, struct spi_message *msg) { - struct driver_data *drv_data = spi_controller_get_devdata(master); + struct driver_data *drv_data = spi_controller_get_devdata(controller); /* Disable the SSP */ pxa2xx_spi_write(drv_data, SSCR0, @@ -1159,9 +1159,9 @@ static void pxa2xx_spi_handle_err(struct spi_controller *master, pxa2xx_spi_dma_stop(drv_data); } -static int pxa2xx_spi_unprepare_transfer(struct spi_controller *master) +static int pxa2xx_spi_unprepare_transfer(struct spi_controller *controller) { - struct driver_data *drv_data = spi_controller_get_devdata(master); + struct driver_data *drv_data = spi_controller_get_devdata(controller); /* Disable the SSP now */ pxa2xx_spi_write(drv_data, SSCR0, @@ -1260,7 +1260,7 @@ static int setup(struct spi_device *spi) break; default: tx_hi_thres = 0; - if (spi_controller_is_slave(drv_data->master)) { + if (spi_controller_is_slave(drv_data->controller)) { tx_thres = 1; rx_thres = 2; } else { @@ -1287,7 +1287,7 @@ static int setup(struct spi_device *spi) chip->frm = spi->chip_select; } - chip->enable_dma = drv_data->master_info->enable_dma; + chip->enable_dma = drv_data->controller_info->enable_dma; chip->timeout = TIMOUT_DFLT; } @@ -1310,7 +1310,7 @@ static int setup(struct spi_device *spi) if (chip_info->enable_loopback) chip->cr1 = SSCR1_LBM; } - if (spi_controller_is_slave(drv_data->master)) { + if (spi_controller_is_slave(drv_data->controller)) { chip->cr1 |= SSCR1_SCFR; chip->cr1 |= SSCR1_SCLKDIR; chip->cr1 |= SSCR1_SFRMDIR; @@ -1497,10 +1497,10 @@ static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param) #endif /* CONFIG_PCI */ -static struct pxa2xx_spi_master * +static struct pxa2xx_spi_controller * pxa2xx_spi_init_pdata(struct platform_device *pdev) { - struct pxa2xx_spi_master *pdata; + struct pxa2xx_spi_controller *pdata; struct acpi_device *adev; struct ssp_device *ssp; struct resource *res; @@ -1568,10 +1568,10 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev) return pdata; } -static int pxa2xx_spi_fw_translate_cs(struct spi_controller *master, +static int pxa2xx_spi_fw_translate_cs(struct spi_controller *controller, unsigned int cs) { - struct driver_data *drv_data = spi_controller_get_devdata(master); + struct driver_data *drv_data = spi_controller_get_devdata(controller); if (has_acpi_companion(&drv_data->pdev->dev)) { switch (drv_data->ssp_type) { @@ -1595,8 +1595,8 @@ static int pxa2xx_spi_fw_translate_cs(struct spi_controller *master, static int pxa2xx_spi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct pxa2xx_spi_master *platform_info; - struct spi_controller *master; + struct pxa2xx_spi_controller *platform_info; + struct spi_controller *controller; struct driver_data *drv_data; struct ssp_device *ssp; const struct lpss_config *config; @@ -1622,37 +1622,37 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) } if (platform_info->is_slave) - master = spi_alloc_slave(dev, sizeof(struct driver_data)); + controller = spi_alloc_slave(dev, sizeof(struct driver_data)); else - master = spi_alloc_master(dev, sizeof(struct driver_data)); + controller = spi_alloc_master(dev, sizeof(struct driver_data)); - if (!master) { - dev_err(&pdev->dev, "cannot alloc spi_master\n"); + if (!controller) { + dev_err(&pdev->dev, "cannot alloc spi_controller\n"); pxa_ssp_free(ssp); return -ENOMEM; } - drv_data = spi_controller_get_devdata(master); - drv_data->master = master; - drv_data->master_info = platform_info; + drv_data = spi_controller_get_devdata(controller); + drv_data->controller = controller; + drv_data->controller_info = platform_info; drv_data->pdev = pdev; drv_data->ssp = ssp; - master->dev.of_node = pdev->dev.of_node; + controller->dev.of_node = pdev->dev.of_node; /* the spi->mode bits understood by this driver: */ - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; - - master->bus_num = ssp->port_id; - master->dma_alignment = DMA_ALIGNMENT; - master->cleanup = cleanup; - master->setup = setup; - master->set_cs = pxa2xx_spi_set_cs; - master->transfer_one = pxa2xx_spi_transfer_one; - master->slave_abort = pxa2xx_spi_slave_abort; - master->handle_err = pxa2xx_spi_handle_err; - master->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer; - master->fw_translate_cs = pxa2xx_spi_fw_translate_cs; - master->auto_runtime_pm = true; - master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX; + controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; + + controller->bus_num = ssp->port_id; + controller->dma_alignment = DMA_ALIGNMENT; + controller->cleanup = cleanup; + controller->setup = setup; + controller->set_cs = pxa2xx_spi_set_cs; + controller->transfer_one = pxa2xx_spi_transfer_one; + controller->slave_abort = pxa2xx_spi_slave_abort; + controller->handle_err = pxa2xx_spi_handle_err; + controller->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer; + controller->fw_translate_cs = pxa2xx_spi_fw_translate_cs; + controller->auto_runtime_pm = true; + controller->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX; drv_data->ssp_type = ssp->type; @@ -1661,10 +1661,10 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) if (pxa25x_ssp_comp(drv_data)) { switch (drv_data->ssp_type) { case QUARK_X1000_SSP: - master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); + controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); break; default: - master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16); + controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16); break; } @@ -1673,7 +1673,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) drv_data->clear_sr = SSSR_ROR; drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR; } else { - master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); + controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE; drv_data->dma_cr1 = DEFAULT_DMA_CR1; drv_data->clear_sr = SSSR_ROR | SSSR_TINT; @@ -1685,7 +1685,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) drv_data); if (status < 0) { dev_err(&pdev->dev, "cannot get IRQ %d\n", ssp->irq); - goto out_error_master_alloc; + goto out_error_controller_alloc; } /* Setup DMA if requested */ @@ -1695,7 +1695,8 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) dev_dbg(dev, "no DMA channels available, using PIO\n"); platform_info->enable_dma = false; } else { - master->can_dma = pxa2xx_spi_can_dma; + controller->can_dma = pxa2xx_spi_can_dma; + controller->max_dma_len = MAX_DMA_LEN; } } @@ -1704,7 +1705,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) if (status) goto out_error_dma_irq_alloc; - master->max_speed_hz = clk_get_rate(ssp->clk); + controller->max_speed_hz = clk_get_rate(ssp->clk); /* Load default SSP configuration */ pxa2xx_spi_write(drv_data, SSCR0, 0); @@ -1727,7 +1728,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) break; default: - if (spi_controller_is_slave(master)) { + if (spi_controller_is_slave(controller)) { tmp = SSCR1_SCFR | SSCR1_SCLKDIR | SSCR1_SFRMDIR | @@ -1740,7 +1741,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) } pxa2xx_spi_write(drv_data, SSCR1, tmp); tmp = SSCR0_Motorola | SSCR0_DataSize(8); - if (!spi_controller_is_slave(master)) + if (!spi_controller_is_slave(controller)) tmp |= SSCR0_SCR(2); pxa2xx_spi_write(drv_data, SSCR0, tmp); break; @@ -1765,24 +1766,24 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) platform_info->num_chipselect = config->cs_num; } } - master->num_chipselect = platform_info->num_chipselect; + controller->num_chipselect = platform_info->num_chipselect; count = gpiod_count(&pdev->dev, "cs"); if (count > 0) { int i; - master->num_chipselect = max_t(int, count, - master->num_chipselect); + controller->num_chipselect = max_t(int, count, + controller->num_chipselect); drv_data->cs_gpiods = devm_kcalloc(&pdev->dev, - master->num_chipselect, sizeof(struct gpio_desc *), + controller->num_chipselect, sizeof(struct gpio_desc *), GFP_KERNEL); if (!drv_data->cs_gpiods) { status = -ENOMEM; goto out_error_clock_enabled; } - for (i = 0; i < master->num_chipselect; i++) { + for (i = 0; i < controller->num_chipselect; i++) { struct gpio_desc *gpiod; gpiod = devm_gpiod_get_index(dev, "cs", i, GPIOD_ASIS); @@ -1815,9 +1816,9 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) /* Register with the SPI framework */ platform_set_drvdata(pdev, drv_data); - status = devm_spi_register_controller(&pdev->dev, master); + status = devm_spi_register_controller(&pdev->dev, controller); if (status != 0) { - dev_err(&pdev->dev, "problem registering spi master\n"); + dev_err(&pdev->dev, "problem registering spi controller\n"); goto out_error_clock_enabled; } @@ -1832,8 +1833,8 @@ out_error_dma_irq_alloc: pxa2xx_spi_dma_release(drv_data); free_irq(ssp->irq, drv_data); -out_error_master_alloc: - spi_controller_put(master); +out_error_controller_alloc: + spi_controller_put(controller); pxa_ssp_free(ssp); return status; } @@ -1854,7 +1855,7 @@ static int pxa2xx_spi_remove(struct platform_device *pdev) clk_disable_unprepare(ssp->clk); /* Release DMA */ - if (drv_data->master_info->enable_dma) + if (drv_data->controller_info->enable_dma) pxa2xx_spi_dma_release(drv_data); pm_runtime_put_noidle(&pdev->dev); @@ -1876,7 +1877,7 @@ static int pxa2xx_spi_suspend(struct device *dev) struct ssp_device *ssp = drv_data->ssp; int status; - status = spi_controller_suspend(drv_data->master); + status = spi_controller_suspend(drv_data->controller); if (status != 0) return status; pxa2xx_spi_write(drv_data, SSCR0, 0); @@ -1901,7 +1902,7 @@ static int pxa2xx_spi_resume(struct device *dev) } /* Start the queue running */ - return spi_controller_resume(drv_data->master); + return spi_controller_resume(drv_data->controller); } #endif diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h index 4e324da66ef7..aba777b4502d 100644 --- a/drivers/spi/spi-pxa2xx.h +++ b/drivers/spi/spi-pxa2xx.h @@ -31,10 +31,10 @@ struct driver_data { /* SPI framework hookup */ enum pxa_ssp_type ssp_type; - struct spi_controller *master; + struct spi_controller *controller; /* PXA hookup */ - struct pxa2xx_spi_master *master_info; + struct pxa2xx_spi_controller *controller_info; /* SSP register addresses */ void __iomem *ioaddr; diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index a4ef641b5227..556870dcdf79 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c @@ -180,7 +180,7 @@ struct rspi_data { void __iomem *addr; u32 max_speed_hz; - struct spi_master *master; + struct spi_controller *ctlr; wait_queue_head_t wait; struct clk *clk; u16 spcmd; @@ -237,8 +237,8 @@ static u16 rspi_read_data(const struct rspi_data *rspi) /* optional functions */ struct spi_ops { int (*set_config_register)(struct rspi_data *rspi, int access_size); - int (*transfer_one)(struct spi_master *master, struct spi_device *spi, - struct spi_transfer *xfer); + int (*transfer_one)(struct spi_controller *ctlr, + struct spi_device *spi, struct spi_transfer *xfer); u16 mode_bits; u16 flags; u16 fifo_size; @@ -466,7 +466,7 @@ static int rspi_data_out(struct rspi_data *rspi, u8 data) { int error = rspi_wait_for_tx_empty(rspi); if (error < 0) { - dev_err(&rspi->master->dev, "transmit timeout\n"); + dev_err(&rspi->ctlr->dev, "transmit timeout\n"); return error; } rspi_write_data(rspi, data); @@ -480,7 +480,7 @@ static int rspi_data_in(struct rspi_data *rspi) error = rspi_wait_for_rx_full(rspi); if (error < 0) { - dev_err(&rspi->master->dev, "receive timeout\n"); + dev_err(&rspi->ctlr->dev, "receive timeout\n"); return error; } data = rspi_read_data(rspi); @@ -526,8 +526,8 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx, /* First prepare and submit the DMA request(s), as this may fail */ if (rx) { - desc_rx = dmaengine_prep_slave_sg(rspi->master->dma_rx, - rx->sgl, rx->nents, DMA_DEV_TO_MEM, + desc_rx = dmaengine_prep_slave_sg(rspi->ctlr->dma_rx, rx->sgl, + rx->nents, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc_rx) { ret = -EAGAIN; @@ -546,8 +546,8 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx, } if (tx) { - desc_tx = dmaengine_prep_slave_sg(rspi->master->dma_tx, - tx->sgl, tx->nents, DMA_MEM_TO_DEV, + desc_tx = dmaengine_prep_slave_sg(rspi->ctlr->dma_tx, tx->sgl, + tx->nents, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc_tx) { ret = -EAGAIN; @@ -584,9 +584,9 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx, /* Now start DMA */ if (rx) - dma_async_issue_pending(rspi->master->dma_rx); + dma_async_issue_pending(rspi->ctlr->dma_rx); if (tx) - dma_async_issue_pending(rspi->master->dma_tx); + dma_async_issue_pending(rspi->ctlr->dma_tx); ret = wait_event_interruptible_timeout(rspi->wait, rspi->dma_callbacked, HZ); @@ -594,13 +594,13 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx, ret = 0; } else { if (!ret) { - dev_err(&rspi->master->dev, "DMA timeout\n"); + dev_err(&rspi->ctlr->dev, "DMA timeout\n"); ret = -ETIMEDOUT; } if (tx) - dmaengine_terminate_all(rspi->master->dma_tx); + dmaengine_terminate_all(rspi->ctlr->dma_tx); if (rx) - dmaengine_terminate_all(rspi->master->dma_rx); + dmaengine_terminate_all(rspi->ctlr->dma_rx); } rspi_disable_irq(rspi, irq_mask); @@ -614,12 +614,12 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx, no_dma_tx: if (rx) - dmaengine_terminate_all(rspi->master->dma_rx); + dmaengine_terminate_all(rspi->ctlr->dma_rx); no_dma_rx: if (ret == -EAGAIN) { pr_warn_once("%s %s: DMA not available, falling back to PIO\n", - dev_driver_string(&rspi->master->dev), - dev_name(&rspi->master->dev)); + dev_driver_string(&rspi->ctlr->dev), + dev_name(&rspi->ctlr->dev)); } return ret; } @@ -660,10 +660,10 @@ static bool __rspi_can_dma(const struct rspi_data *rspi, return xfer->len > rspi->ops->fifo_size; } -static bool rspi_can_dma(struct spi_master *master, struct spi_device *spi, +static bool rspi_can_dma(struct spi_controller *ctlr, struct spi_device *spi, struct spi_transfer *xfer) { - struct rspi_data *rspi = spi_master_get_devdata(master); + struct rspi_data *rspi = spi_controller_get_devdata(ctlr); return __rspi_can_dma(rspi, xfer); } @@ -671,7 +671,7 @@ static bool rspi_can_dma(struct spi_master *master, struct spi_device *spi, static int rspi_dma_check_then_transfer(struct rspi_data *rspi, struct spi_transfer *xfer) { - if (!rspi->master->can_dma || !__rspi_can_dma(rspi, xfer)) + if (!rspi->ctlr->can_dma || !__rspi_can_dma(rspi, xfer)) return -EAGAIN; /* rx_buf can be NULL on RSPI on SH in TX-only Mode */ @@ -698,10 +698,10 @@ static int rspi_common_transfer(struct rspi_data *rspi, return 0; } -static int rspi_transfer_one(struct spi_master *master, struct spi_device *spi, - struct spi_transfer *xfer) +static int rspi_transfer_one(struct spi_controller *ctlr, + struct spi_device *spi, struct spi_transfer *xfer) { - struct rspi_data *rspi = spi_master_get_devdata(master); + struct rspi_data *rspi = spi_controller_get_devdata(ctlr); u8 spcr; spcr = rspi_read8(rspi, RSPI_SPCR); @@ -716,11 +716,11 @@ static int rspi_transfer_one(struct spi_master *master, struct spi_device *spi, return rspi_common_transfer(rspi, xfer); } -static int rspi_rz_transfer_one(struct spi_master *master, +static int rspi_rz_transfer_one(struct spi_controller *ctlr, struct spi_device *spi, struct spi_transfer *xfer) { - struct rspi_data *rspi = spi_master_get_devdata(master); + struct rspi_data *rspi = spi_controller_get_devdata(ctlr); rspi_rz_receive_init(rspi); @@ -739,7 +739,7 @@ static int qspi_trigger_transfer_out_in(struct rspi_data *rspi, const u8 *tx, if (n == QSPI_BUFFER_SIZE) { ret = rspi_wait_for_tx_empty(rspi); if (ret < 0) { - dev_err(&rspi->master->dev, "transmit timeout\n"); + dev_err(&rspi->ctlr->dev, "transmit timeout\n"); return ret; } for (i = 0; i < n; i++) @@ -747,7 +747,7 @@ static int qspi_trigger_transfer_out_in(struct rspi_data *rspi, const u8 *tx, ret = rspi_wait_for_rx_full(rspi); if (ret < 0) { - dev_err(&rspi->master->dev, "receive timeout\n"); + dev_err(&rspi->ctlr->dev, "receive timeout\n"); return ret; } for (i = 0; i < n; i++) @@ -785,7 +785,7 @@ static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer) unsigned int i, len; int ret; - if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) { + if (rspi->ctlr->can_dma && __rspi_can_dma(rspi, xfer)) { ret = rspi_dma_transfer(rspi, &xfer->tx_sg, NULL); if (ret != -EAGAIN) return ret; @@ -796,7 +796,7 @@ static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer) if (len == QSPI_BUFFER_SIZE) { ret = rspi_wait_for_tx_empty(rspi); if (ret < 0) { - dev_err(&rspi->master->dev, "transmit timeout\n"); + dev_err(&rspi->ctlr->dev, "transmit timeout\n"); return ret; } for (i = 0; i < len; i++) @@ -822,7 +822,7 @@ static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer) unsigned int i, len; int ret; - if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) { + if (rspi->ctlr->can_dma && __rspi_can_dma(rspi, xfer)) { int ret = rspi_dma_transfer(rspi, NULL, &xfer->rx_sg); if (ret != -EAGAIN) return ret; @@ -833,7 +833,7 @@ static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer) if (len == QSPI_BUFFER_SIZE) { ret = rspi_wait_for_rx_full(rspi); if (ret < 0) { - dev_err(&rspi->master->dev, "receive timeout\n"); + dev_err(&rspi->ctlr->dev, "receive timeout\n"); return ret; } for (i = 0; i < len; i++) @@ -849,10 +849,10 @@ static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer) return 0; } -static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi, - struct spi_transfer *xfer) +static int qspi_transfer_one(struct spi_controller *ctlr, + struct spi_device *spi, struct spi_transfer *xfer) { - struct rspi_data *rspi = spi_master_get_devdata(master); + struct rspi_data *rspi = spi_controller_get_devdata(ctlr); if (spi->mode & SPI_LOOP) { return qspi_transfer_out_in(rspi, xfer); @@ -870,7 +870,7 @@ static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi, static int rspi_setup(struct spi_device *spi) { - struct rspi_data *rspi = spi_master_get_devdata(spi->master); + struct rspi_data *rspi = spi_controller_get_devdata(spi->controller); rspi->max_speed_hz = spi->max_speed_hz; @@ -955,10 +955,10 @@ static int qspi_setup_sequencer(struct rspi_data *rspi, return 0; } -static int rspi_prepare_message(struct spi_master *master, +static int rspi_prepare_message(struct spi_controller *ctlr, struct spi_message *msg) { - struct rspi_data *rspi = spi_master_get_devdata(master); + struct rspi_data *rspi = spi_controller_get_devdata(ctlr); int ret; if (msg->spi->mode & @@ -974,10 +974,10 @@ static int rspi_prepare_message(struct spi_master *master, return 0; } -static int rspi_unprepare_message(struct spi_master *master, +static int rspi_unprepare_message(struct spi_controller *ctlr, struct spi_message *msg) { - struct rspi_data *rspi = spi_master_get_devdata(master); + struct rspi_data *rspi = spi_controller_get_devdata(ctlr); /* Disable SPI function */ rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR); @@ -1081,7 +1081,7 @@ static struct dma_chan *rspi_request_dma_chan(struct device *dev, return chan; } -static int rspi_request_dma(struct device *dev, struct spi_master *master, +static int rspi_request_dma(struct device *dev, struct spi_controller *ctlr, const struct resource *res) { const struct rspi_plat_data *rspi_pd = dev_get_platdata(dev); @@ -1099,37 +1099,37 @@ static int rspi_request_dma(struct device *dev, struct spi_master *master, return 0; } - master->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, dma_tx_id, - res->start + RSPI_SPDR); - if (!master->dma_tx) + ctlr->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, dma_tx_id, + res->start + RSPI_SPDR); + if (!ctlr->dma_tx) return -ENODEV; - master->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, dma_rx_id, - res->start + RSPI_SPDR); - if (!master->dma_rx) { - dma_release_channel(master->dma_tx); - master->dma_tx = NULL; + ctlr->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, dma_rx_id, + res->start + RSPI_SPDR); + if (!ctlr->dma_rx) { + dma_release_channel(ctlr->dma_tx); + ctlr->dma_tx = NULL; return -ENODEV; } - master->can_dma = rspi_can_dma; + ctlr->can_dma = rspi_can_dma; dev_info(dev, "DMA available"); return 0; } -static void rspi_release_dma(struct spi_master *master) +static void rspi_release_dma(struct spi_controller *ctlr) { - if (master->dma_tx) - dma_release_channel(master->dma_tx); - if (master->dma_rx) - dma_release_channel(master->dma_rx); + if (ctlr->dma_tx) + dma_release_channel(ctlr->dma_tx); + if (ctlr->dma_rx) + dma_release_channel(ctlr->dma_rx); } static int rspi_remove(struct platform_device *pdev) { struct rspi_data *rspi = platform_get_drvdata(pdev); - rspi_release_dma(rspi->master); + rspi_release_dma(rspi->ctlr); pm_runtime_disable(&pdev->dev); return 0; @@ -1139,7 +1139,7 @@ static const struct spi_ops rspi_ops = { .set_config_register = rspi_set_config_register, .transfer_one = rspi_transfer_one, .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP, - .flags = SPI_MASTER_MUST_TX, + .flags = SPI_CONTROLLER_MUST_TX, .fifo_size = 8, }; @@ -1147,7 +1147,7 @@ static const struct spi_ops rspi_rz_ops = { .set_config_register = rspi_rz_set_config_register, .transfer_one = rspi_rz_transfer_one, .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP, - .flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX, + .flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX, .fifo_size = 8, /* 8 for TX, 32 for RX */ }; @@ -1157,7 +1157,7 @@ static const struct spi_ops qspi_ops = { .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP | SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD, - .flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX, + .flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX, .fifo_size = 32, }; @@ -1174,7 +1174,7 @@ static const struct of_device_id rspi_of_match[] = { MODULE_DEVICE_TABLE(of, rspi_of_match); -static int rspi_parse_dt(struct device *dev, struct spi_master *master) +static int rspi_parse_dt(struct device *dev, struct spi_controller *ctlr) { u32 num_cs; int error; @@ -1186,12 +1186,12 @@ static int rspi_parse_dt(struct device *dev, struct spi_master *master) return error; } - master->num_chipselect = num_cs; + ctlr->num_chipselect = num_cs; return 0; } #else #define rspi_of_match NULL -static inline int rspi_parse_dt(struct device *dev, struct spi_master *master) +static inline int rspi_parse_dt(struct device *dev, struct spi_controller *ctlr) { return -EINVAL; } @@ -1212,28 +1212,28 @@ static int rspi_request_irq(struct device *dev, unsigned int irq, static int rspi_probe(struct platform_device *pdev) { struct resource *res; - struct spi_master *master; + struct spi_controller *ctlr; struct rspi_data *rspi; int ret; const struct rspi_plat_data *rspi_pd; const struct spi_ops *ops; - master = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data)); - if (master == NULL) + ctlr = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data)); + if (ctlr == NULL) return -ENOMEM; ops = of_device_get_match_data(&pdev->dev); if (ops) { - ret = rspi_parse_dt(&pdev->dev, master); + ret = rspi_parse_dt(&pdev->dev, ctlr); if (ret) goto error1; } else { ops = (struct spi_ops *)pdev->id_entry->driver_data; rspi_pd = dev_get_platdata(&pdev->dev); if (rspi_pd && rspi_pd->num_chipselect) - master->num_chipselect = rspi_pd->num_chipselect; + ctlr->num_chipselect = rspi_pd->num_chipselect; else - master->num_chipselect = 2; /* default */ + ctlr->num_chipselect = 2; /* default */ } /* ops parameter check */ @@ -1243,10 +1243,10 @@ static int rspi_probe(struct platform_device *pdev) goto error1; } - rspi = spi_master_get_devdata(master); + rspi = spi_controller_get_devdata(ctlr); platform_set_drvdata(pdev, rspi); rspi->ops = ops; - rspi->master = master; + rspi->ctlr = ctlr; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); rspi->addr = devm_ioremap_resource(&pdev->dev, res); @@ -1266,15 +1266,15 @@ static int rspi_probe(struct platform_device *pdev) init_waitqueue_head(&rspi->wait); - master->bus_num = pdev->id; - master->setup = rspi_setup; - master->auto_runtime_pm = true; - master->transfer_one = ops->transfer_one; - master->prepare_message = rspi_prepare_message; - master->unprepare_message = rspi_unprepare_message; - master->mode_bits = ops->mode_bits; - master->flags = ops->flags; - master->dev.of_node = pdev->dev.of_node; + ctlr->bus_num = pdev->id; + ctlr->setup = rspi_setup; + ctlr->auto_runtime_pm = true; + ctlr->transfer_one = ops->transfer_one; + ctlr->prepare_message = rspi_prepare_message; + ctlr->unprepare_message = rspi_unprepare_message; + ctlr->mode_bits = ops->mode_bits; + ctlr->flags = ops->flags; + ctlr->dev.of_node = pdev->dev.of_node; ret = platform_get_irq_byname(pdev, "rx"); if (ret < 0) { @@ -1311,13 +1311,13 @@ static int rspi_probe(struct platform_device *pdev) goto error2; } - ret = rspi_request_dma(&pdev->dev, master, res); + ret = rspi_request_dma(&pdev->dev, ctlr, res); if (ret < 0) dev_warn(&pdev->dev, "DMA not available, using PIO\n"); - ret = devm_spi_register_master(&pdev->dev, master); + ret = devm_spi_register_controller(&pdev->dev, ctlr); if (ret < 0) { - dev_err(&pdev->dev, "spi_register_master error.\n"); + dev_err(&pdev->dev, "devm_spi_register_controller error.\n"); goto error3; } @@ -1326,11 +1326,11 @@ static int rspi_probe(struct platform_device *pdev) return 0; error3: - rspi_release_dma(master); + rspi_release_dma(ctlr); error2: pm_runtime_disable(&pdev->dev); error1: - spi_master_put(master); + spi_controller_put(ctlr); return ret; } @@ -1349,14 +1349,14 @@ static int rspi_suspend(struct device *dev) { struct rspi_data *rspi = dev_get_drvdata(dev); - return spi_master_suspend(rspi->master); + return spi_controller_suspend(rspi->ctlr); } static int rspi_resume(struct device *dev) { struct rspi_data *rspi = dev_get_drvdata(dev); - return spi_master_resume(rspi->master); + return spi_controller_resume(rspi->ctlr); } static SIMPLE_DEV_PM_OPS(rspi_pm_ops, rspi_suspend, rspi_resume); diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c index dc0926e43665..7f73f91d412a 100644 --- a/drivers/spi/spi-sh-hspi.c +++ b/drivers/spi/spi-sh-hspi.c @@ -35,7 +35,7 @@ struct hspi_priv { void __iomem *addr; - struct spi_master *master; + struct spi_controller *ctlr; struct device *dev; struct clk *clk; }; @@ -140,10 +140,10 @@ static void hspi_hw_setup(struct hspi_priv *hspi, hspi_write(hspi, SPSCR, 0x21); /* master mode / CS control */ } -static int hspi_transfer_one_message(struct spi_master *master, +static int hspi_transfer_one_message(struct spi_controller *ctlr, struct spi_message *msg) { - struct hspi_priv *hspi = spi_master_get_devdata(master); + struct hspi_priv *hspi = spi_controller_get_devdata(ctlr); struct spi_transfer *t; u32 tx; u32 rx; @@ -205,7 +205,7 @@ static int hspi_transfer_one_message(struct spi_master *master, ndelay(nsecs); hspi_hw_cs_disable(hspi); } - spi_finalize_current_message(master); + spi_finalize_current_message(ctlr); return ret; } @@ -213,7 +213,7 @@ static int hspi_transfer_one_message(struct spi_master *master, static int hspi_probe(struct platform_device *pdev) { struct resource *res; - struct spi_master *master; + struct spi_controller *ctlr; struct hspi_priv *hspi; struct clk *clk; int ret; @@ -225,11 +225,9 @@ static int hspi_probe(struct platform_device *pdev) return -EINVAL; } - master = spi_alloc_master(&pdev->dev, sizeof(*hspi)); - if (!master) { - dev_err(&pdev->dev, "spi_alloc_master error.\n"); + ctlr = spi_alloc_master(&pdev->dev, sizeof(*hspi)); + if (!ctlr) return -ENOMEM; - } clk = clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) { @@ -238,33 +236,32 @@ static int hspi_probe(struct platform_device *pdev) goto error0; } - hspi = spi_master_get_devdata(master); + hspi = spi_controller_get_devdata(ctlr); platform_set_drvdata(pdev, hspi); /* init hspi */ - hspi->master = master; + hspi->ctlr = ctlr; hspi->dev = &pdev->dev; hspi->clk = clk; hspi->addr = devm_ioremap(hspi->dev, res->start, resource_size(res)); if (!hspi->addr) { - dev_err(&pdev->dev, "ioremap error.\n"); ret = -ENOMEM; goto error1; } pm_runtime_enable(&pdev->dev); - master->bus_num = pdev->id; - master->mode_bits = SPI_CPOL | SPI_CPHA; - master->dev.of_node = pdev->dev.of_node; - master->auto_runtime_pm = true; - master->transfer_one_message = hspi_transfer_one_message; - master->bits_per_word_mask = SPI_BPW_MASK(8); + ctlr->bus_num = pdev->id; + ctlr->mode_bits = SPI_CPOL | SPI_CPHA; + ctlr->dev.of_node = pdev->dev.of_node; + ctlr->auto_runtime_pm = true; + ctlr->transfer_one_message = hspi_transfer_one_message; + ctlr->bits_per_word_mask = SPI_BPW_MASK(8); - ret = devm_spi_register_master(&pdev->dev, master); + ret = devm_spi_register_controller(&pdev->dev, ctlr); if (ret < 0) { - dev_err(&pdev->dev, "spi_register_master error.\n"); + dev_err(&pdev->dev, "devm_spi_register_controller error.\n"); goto error2; } @@ -275,7 +272,7 @@ static int hspi_probe(struct platform_device *pdev) error1: clk_put(clk); error0: - spi_master_put(master); + spi_controller_put(ctlr); return ret; } diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index d14b407cc800..e2eb466db10a 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * SuperH MSIOF SPI Master Interface + * SuperH MSIOF SPI Controller Interface * * Copyright (c) 2009 Magnus Damm * Copyright (C) 2014 Renesas Electronics Corporation @@ -32,14 +32,15 @@ #include struct sh_msiof_chipdata { + u32 bits_per_word_mask; u16 tx_fifo_size; u16 rx_fifo_size; - u16 master_flags; + u16 ctlr_flags; u16 min_div_pow; }; struct sh_msiof_spi_priv { - struct spi_master *master; + struct spi_controller *ctlr; void __iomem *mapbase; struct clk *clk; struct platform_device *pdev; @@ -287,7 +288,7 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p, scr = sh_msiof_spi_div_array[div_pow] | SCR_BRPS(brps); sh_msiof_write(p, TSCR, scr); - if (!(p->master->flags & SPI_MASTER_MUST_TX)) + if (!(p->ctlr->flags & SPI_CONTROLLER_MUST_TX)) sh_msiof_write(p, RSCR, scr); } @@ -351,14 +352,14 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss, tmp |= !cs_high << MDR1_SYNCAC_SHIFT; tmp |= lsb_first << MDR1_BITLSB_SHIFT; tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p); - if (spi_controller_is_slave(p->master)) { + if (spi_controller_is_slave(p->ctlr)) { sh_msiof_write(p, TMDR1, tmp | TMDR1_PCON); } else { sh_msiof_write(p, TMDR1, tmp | MDR1_TRMD | TMDR1_PCON | (ss < MAX_SS ? ss : 0) << TMDR1_SYNCCH_SHIFT); } - if (p->master->flags & SPI_MASTER_MUST_TX) { + if (p->ctlr->flags & SPI_CONTROLLER_MUST_TX) { /* These bits are reserved if RX needs TX */ tmp &= ~0x0000ffff; } @@ -382,7 +383,7 @@ static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p, { u32 dr2 = MDR2_BITLEN1(bits) | MDR2_WDLEN1(words); - if (tx_buf || (p->master->flags & SPI_MASTER_MUST_TX)) + if (tx_buf || (p->ctlr->flags & SPI_CONTROLLER_MUST_TX)) sh_msiof_write(p, TMDR2, dr2); else sh_msiof_write(p, TMDR2, dr2 | MDR2_GRPMASK1); @@ -539,8 +540,9 @@ static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p, static int sh_msiof_spi_setup(struct spi_device *spi) { - struct device_node *np = spi->master->dev.of_node; - struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master); + struct device_node *np = spi->controller->dev.of_node; + struct sh_msiof_spi_priv *p = + spi_controller_get_devdata(spi->controller); u32 clr, set, tmp; if (!np) { @@ -556,7 +558,7 @@ static int sh_msiof_spi_setup(struct spi_device *spi) return 0; } - if (spi_controller_is_slave(p->master)) + if (spi_controller_is_slave(p->ctlr)) return 0; if (p->native_cs_inited && @@ -581,10 +583,10 @@ static int sh_msiof_spi_setup(struct spi_device *spi) return 0; } -static int sh_msiof_prepare_message(struct spi_master *master, +static int sh_msiof_prepare_message(struct spi_controller *ctlr, struct spi_message *msg) { - struct sh_msiof_spi_priv *p = spi_master_get_devdata(master); + struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr); const struct spi_device *spi = msg->spi; u32 ss, cs_high; @@ -605,7 +607,7 @@ static int sh_msiof_prepare_message(struct spi_master *master, static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf) { - bool slave = spi_controller_is_slave(p->master); + bool slave = spi_controller_is_slave(p->ctlr); int ret = 0; /* setup clock and rx/tx signals */ @@ -625,7 +627,7 @@ static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf) static int sh_msiof_spi_stop(struct sh_msiof_spi_priv *p, void *rx_buf) { - bool slave = spi_controller_is_slave(p->master); + bool slave = spi_controller_is_slave(p->ctlr); int ret = 0; /* shut down frame, rx/tx and clock signals */ @@ -641,9 +643,9 @@ static int sh_msiof_spi_stop(struct sh_msiof_spi_priv *p, void *rx_buf) return ret; } -static int sh_msiof_slave_abort(struct spi_master *master) +static int sh_msiof_slave_abort(struct spi_controller *ctlr) { - struct sh_msiof_spi_priv *p = spi_master_get_devdata(master); + struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr); p->slave_aborted = true; complete(&p->done); @@ -654,7 +656,7 @@ static int sh_msiof_slave_abort(struct spi_master *master) static int sh_msiof_wait_for_completion(struct sh_msiof_spi_priv *p, struct completion *x) { - if (spi_controller_is_slave(p->master)) { + if (spi_controller_is_slave(p->ctlr)) { if (wait_for_completion_interruptible(x) || p->slave_aborted) { dev_dbg(&p->pdev->dev, "interrupted\n"); @@ -754,7 +756,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, /* First prepare and submit the DMA request(s), as this may fail */ if (rx) { ier_bits |= IER_RDREQE | IER_RDMAE; - desc_rx = dmaengine_prep_slave_single(p->master->dma_rx, + desc_rx = dmaengine_prep_slave_single(p->ctlr->dma_rx, p->rx_dma_addr, len, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc_rx) @@ -769,9 +771,9 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, if (tx) { ier_bits |= IER_TDREQE | IER_TDMAE; - dma_sync_single_for_device(p->master->dma_tx->device->dev, + dma_sync_single_for_device(p->ctlr->dma_tx->device->dev, p->tx_dma_addr, len, DMA_TO_DEVICE); - desc_tx = dmaengine_prep_slave_single(p->master->dma_tx, + desc_tx = dmaengine_prep_slave_single(p->ctlr->dma_tx, p->tx_dma_addr, len, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc_tx) { @@ -803,9 +805,9 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, /* Now start DMA */ if (rx) - dma_async_issue_pending(p->master->dma_rx); + dma_async_issue_pending(p->ctlr->dma_rx); if (tx) - dma_async_issue_pending(p->master->dma_tx); + dma_async_issue_pending(p->ctlr->dma_tx); ret = sh_msiof_spi_start(p, rx); if (ret) { @@ -845,9 +847,8 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, } if (rx) - dma_sync_single_for_cpu(p->master->dma_rx->device->dev, - p->rx_dma_addr, len, - DMA_FROM_DEVICE); + dma_sync_single_for_cpu(p->ctlr->dma_rx->device->dev, + p->rx_dma_addr, len, DMA_FROM_DEVICE); return 0; @@ -856,10 +857,10 @@ stop_reset: sh_msiof_spi_stop(p, rx); stop_dma: if (tx) - dmaengine_terminate_all(p->master->dma_tx); + dmaengine_terminate_all(p->ctlr->dma_tx); no_dma_tx: if (rx) - dmaengine_terminate_all(p->master->dma_rx); + dmaengine_terminate_all(p->ctlr->dma_rx); sh_msiof_write(p, IER, 0); return ret; } @@ -907,11 +908,11 @@ static void copy_plain32(u32 *dst, const u32 *src, unsigned int words) memcpy(dst, src, words * 4); } -static int sh_msiof_transfer_one(struct spi_master *master, +static int sh_msiof_transfer_one(struct spi_controller *ctlr, struct spi_device *spi, struct spi_transfer *t) { - struct sh_msiof_spi_priv *p = spi_master_get_devdata(master); + struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr); void (*copy32)(u32 *, const u32 *, unsigned int); void (*tx_fifo)(struct sh_msiof_spi_priv *, const void *, int, int); void (*rx_fifo)(struct sh_msiof_spi_priv *, void *, int, int); @@ -926,10 +927,10 @@ static int sh_msiof_transfer_one(struct spi_master *master, int ret; /* setup clocks (clock already enabled in chipselect()) */ - if (!spi_controller_is_slave(p->master)) + if (!spi_controller_is_slave(p->ctlr)) sh_msiof_spi_set_clk_regs(p, clk_get_rate(p->clk), t->speed_hz); - while (master->dma_tx && len > 15) { + while (ctlr->dma_tx && len > 15) { /* * DMA supports 32-bit words only, hence pack 8-bit and 16-bit * words, with byte resp. word swapping. @@ -937,17 +938,13 @@ static int sh_msiof_transfer_one(struct spi_master *master, unsigned int l = 0; if (tx_buf) - l = min(len, p->tx_fifo_size * 4); + l = min(round_down(len, 4), p->tx_fifo_size * 4); if (rx_buf) - l = min(len, p->rx_fifo_size * 4); + l = min(round_down(len, 4), p->rx_fifo_size * 4); if (bits <= 8) { - if (l & 3) - break; copy32 = copy_bswap32; } else if (bits <= 16) { - if (l & 3) - break; copy32 = copy_wswap32; } else { copy32 = copy_plain32; @@ -1052,23 +1049,28 @@ static int sh_msiof_transfer_one(struct spi_master *master, } static const struct sh_msiof_chipdata sh_data = { + .bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32), .tx_fifo_size = 64, .rx_fifo_size = 64, - .master_flags = 0, + .ctlr_flags = 0, .min_div_pow = 0, }; static const struct sh_msiof_chipdata rcar_gen2_data = { + .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) | + SPI_BPW_MASK(24) | SPI_BPW_MASK(32), .tx_fifo_size = 64, .rx_fifo_size = 64, - .master_flags = SPI_MASTER_MUST_TX, + .ctlr_flags = SPI_CONTROLLER_MUST_TX, .min_div_pow = 0, }; static const struct sh_msiof_chipdata rcar_gen3_data = { + .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) | + SPI_BPW_MASK(24) | SPI_BPW_MASK(32), .tx_fifo_size = 64, .rx_fifo_size = 64, - .master_flags = SPI_MASTER_MUST_TX, + .ctlr_flags = SPI_CONTROLLER_MUST_TX, .min_div_pow = 1, }; @@ -1136,7 +1138,7 @@ static int sh_msiof_get_cs_gpios(struct sh_msiof_spi_priv *p) if (ret <= 0) return 0; - num_cs = max_t(unsigned int, ret, p->master->num_chipselect); + num_cs = max_t(unsigned int, ret, p->ctlr->num_chipselect); for (i = 0; i < num_cs; i++) { struct gpio_desc *gpiod; @@ -1206,10 +1208,10 @@ static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p) { struct platform_device *pdev = p->pdev; struct device *dev = &pdev->dev; - const struct sh_msiof_spi_info *info = dev_get_platdata(dev); + const struct sh_msiof_spi_info *info = p->info; unsigned int dma_tx_id, dma_rx_id; const struct resource *res; - struct spi_master *master; + struct spi_controller *ctlr; struct device *tx_dev, *rx_dev; if (dev->of_node) { @@ -1229,17 +1231,15 @@ static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p) if (!res) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - master = p->master; - master->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV, - dma_tx_id, - res->start + TFDR); - if (!master->dma_tx) + ctlr = p->ctlr; + ctlr->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV, + dma_tx_id, res->start + TFDR); + if (!ctlr->dma_tx) return -ENODEV; - master->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM, - dma_rx_id, - res->start + RFDR); - if (!master->dma_rx) + ctlr->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM, + dma_rx_id, res->start + RFDR); + if (!ctlr->dma_rx) goto free_tx_chan; p->tx_dma_page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA); @@ -1250,13 +1250,13 @@ static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p) if (!p->rx_dma_page) goto free_tx_page; - tx_dev = master->dma_tx->device->dev; + tx_dev = ctlr->dma_tx->device->dev; p->tx_dma_addr = dma_map_single(tx_dev, p->tx_dma_page, PAGE_SIZE, DMA_TO_DEVICE); if (dma_mapping_error(tx_dev, p->tx_dma_addr)) goto free_rx_page; - rx_dev = master->dma_rx->device->dev; + rx_dev = ctlr->dma_rx->device->dev; p->rx_dma_addr = dma_map_single(rx_dev, p->rx_dma_page, PAGE_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(rx_dev, p->rx_dma_addr)) @@ -1272,34 +1272,34 @@ free_rx_page: free_tx_page: free_page((unsigned long)p->tx_dma_page); free_rx_chan: - dma_release_channel(master->dma_rx); + dma_release_channel(ctlr->dma_rx); free_tx_chan: - dma_release_channel(master->dma_tx); - master->dma_tx = NULL; + dma_release_channel(ctlr->dma_tx); + ctlr->dma_tx = NULL; return -ENODEV; } static void sh_msiof_release_dma(struct sh_msiof_spi_priv *p) { - struct spi_master *master = p->master; + struct spi_controller *ctlr = p->ctlr; - if (!master->dma_tx) + if (!ctlr->dma_tx) return; - dma_unmap_single(master->dma_rx->device->dev, p->rx_dma_addr, - PAGE_SIZE, DMA_FROM_DEVICE); - dma_unmap_single(master->dma_tx->device->dev, p->tx_dma_addr, - PAGE_SIZE, DMA_TO_DEVICE); + dma_unmap_single(ctlr->dma_rx->device->dev, p->rx_dma_addr, PAGE_SIZE, + DMA_FROM_DEVICE); + dma_unmap_single(ctlr->dma_tx->device->dev, p->tx_dma_addr, PAGE_SIZE, + DMA_TO_DEVICE); free_page((unsigned long)p->rx_dma_page); free_page((unsigned long)p->tx_dma_page); - dma_release_channel(master->dma_rx); - dma_release_channel(master->dma_tx); + dma_release_channel(ctlr->dma_rx); + dma_release_channel(ctlr->dma_tx); } static int sh_msiof_spi_probe(struct platform_device *pdev) { struct resource *r; - struct spi_master *master; + struct spi_controller *ctlr; const struct sh_msiof_chipdata *chipdata; struct sh_msiof_spi_info *info; struct sh_msiof_spi_priv *p; @@ -1320,18 +1320,18 @@ static int sh_msiof_spi_probe(struct platform_device *pdev) } if (info->mode == MSIOF_SPI_SLAVE) - master = spi_alloc_slave(&pdev->dev, - sizeof(struct sh_msiof_spi_priv)); + ctlr = spi_alloc_slave(&pdev->dev, + sizeof(struct sh_msiof_spi_priv)); else - master = spi_alloc_master(&pdev->dev, - sizeof(struct sh_msiof_spi_priv)); - if (master == NULL) + ctlr = spi_alloc_master(&pdev->dev, + sizeof(struct sh_msiof_spi_priv)); + if (ctlr == NULL) return -ENOMEM; - p = spi_master_get_devdata(master); + p = spi_controller_get_devdata(ctlr); platform_set_drvdata(pdev, p); - p->master = master; + p->ctlr = ctlr; p->info = info; p->min_div_pow = chipdata->min_div_pow; @@ -1378,31 +1378,31 @@ static int sh_msiof_spi_probe(struct platform_device *pdev) p->rx_fifo_size = p->info->rx_fifo_override; /* Setup GPIO chip selects */ - master->num_chipselect = p->info->num_chipselect; + ctlr->num_chipselect = p->info->num_chipselect; ret = sh_msiof_get_cs_gpios(p); if (ret) goto err1; - /* init master code */ - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; - master->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE; - master->flags = chipdata->master_flags; - master->bus_num = pdev->id; - master->dev.of_node = pdev->dev.of_node; - master->setup = sh_msiof_spi_setup; - master->prepare_message = sh_msiof_prepare_message; - master->slave_abort = sh_msiof_slave_abort; - master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32); - master->auto_runtime_pm = true; - master->transfer_one = sh_msiof_transfer_one; + /* init controller code */ + ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + ctlr->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE; + ctlr->flags = chipdata->ctlr_flags; + ctlr->bus_num = pdev->id; + ctlr->dev.of_node = pdev->dev.of_node; + ctlr->setup = sh_msiof_spi_setup; + ctlr->prepare_message = sh_msiof_prepare_message; + ctlr->slave_abort = sh_msiof_slave_abort; + ctlr->bits_per_word_mask = chipdata->bits_per_word_mask; + ctlr->auto_runtime_pm = true; + ctlr->transfer_one = sh_msiof_transfer_one; ret = sh_msiof_request_dma(p); if (ret < 0) dev_warn(&pdev->dev, "DMA not available, using PIO\n"); - ret = devm_spi_register_master(&pdev->dev, master); + ret = devm_spi_register_controller(&pdev->dev, ctlr); if (ret < 0) { - dev_err(&pdev->dev, "spi_register_master error.\n"); + dev_err(&pdev->dev, "devm_spi_register_controller error.\n"); goto err2; } @@ -1412,7 +1412,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev) sh_msiof_release_dma(p); pm_runtime_disable(&pdev->dev); err1: - spi_master_put(master); + spi_controller_put(ctlr); return ret; } @@ -1436,14 +1436,14 @@ static int sh_msiof_spi_suspend(struct device *dev) { struct sh_msiof_spi_priv *p = dev_get_drvdata(dev); - return spi_master_suspend(p->master); + return spi_controller_suspend(p->ctlr); } static int sh_msiof_spi_resume(struct device *dev) { struct sh_msiof_spi_priv *p = dev_get_drvdata(dev); - return spi_master_resume(p->master); + return spi_controller_resume(p->ctlr); } static SIMPLE_DEV_PM_OPS(sh_msiof_spi_pm_ops, sh_msiof_spi_suspend, @@ -1465,7 +1465,7 @@ static struct platform_driver sh_msiof_spi_drv = { }; module_platform_driver(sh_msiof_spi_drv); -MODULE_DESCRIPTION("SuperH MSIOF SPI Master Interface Driver"); +MODULE_DESCRIPTION("SuperH MSIOF SPI Controller Interface Driver"); MODULE_AUTHOR("Magnus Damm"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:spi_sh_msiof"); diff --git a/drivers/spi/spi-sifive.c b/drivers/spi/spi-sifive.c new file mode 100644 index 000000000000..93ec2c6cdbfd --- /dev/null +++ b/drivers/spi/spi-sifive.c @@ -0,0 +1,448 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright 2018 SiFive, Inc. +// +// SiFive SPI controller driver (master mode only) +// +// Author: SiFive, Inc. +// sifive@sifive.com + +#include +#include +#include +#include +#include +#include +#include +#include + +#define SIFIVE_SPI_DRIVER_NAME "sifive_spi" + +#define SIFIVE_SPI_MAX_CS 32 +#define SIFIVE_SPI_DEFAULT_DEPTH 8 +#define SIFIVE_SPI_DEFAULT_MAX_BITS 8 + +/* register offsets */ +#define SIFIVE_SPI_REG_SCKDIV 0x00 /* Serial clock divisor */ +#define SIFIVE_SPI_REG_SCKMODE 0x04 /* Serial clock mode */ +#define SIFIVE_SPI_REG_CSID 0x10 /* Chip select ID */ +#define SIFIVE_SPI_REG_CSDEF 0x14 /* Chip select default */ +#define SIFIVE_SPI_REG_CSMODE 0x18 /* Chip select mode */ +#define SIFIVE_SPI_REG_DELAY0 0x28 /* Delay control 0 */ +#define SIFIVE_SPI_REG_DELAY1 0x2c /* Delay control 1 */ +#define SIFIVE_SPI_REG_FMT 0x40 /* Frame format */ +#define SIFIVE_SPI_REG_TXDATA 0x48 /* Tx FIFO data */ +#define SIFIVE_SPI_REG_RXDATA 0x4c /* Rx FIFO data */ +#define SIFIVE_SPI_REG_TXMARK 0x50 /* Tx FIFO watermark */ +#define SIFIVE_SPI_REG_RXMARK 0x54 /* Rx FIFO watermark */ +#define SIFIVE_SPI_REG_FCTRL 0x60 /* SPI flash interface control */ +#define SIFIVE_SPI_REG_FFMT 0x64 /* SPI flash instruction format */ +#define SIFIVE_SPI_REG_IE 0x70 /* Interrupt Enable Register */ +#define SIFIVE_SPI_REG_IP 0x74 /* Interrupt Pendings Register */ + +/* sckdiv bits */ +#define SIFIVE_SPI_SCKDIV_DIV_MASK 0xfffU + +/* sckmode bits */ +#define SIFIVE_SPI_SCKMODE_PHA BIT(0) +#define SIFIVE_SPI_SCKMODE_POL BIT(1) +#define SIFIVE_SPI_SCKMODE_MODE_MASK (SIFIVE_SPI_SCKMODE_PHA | \ + SIFIVE_SPI_SCKMODE_POL) + +/* csmode bits */ +#define SIFIVE_SPI_CSMODE_MODE_AUTO 0U +#define SIFIVE_SPI_CSMODE_MODE_HOLD 2U +#define SIFIVE_SPI_CSMODE_MODE_OFF 3U + +/* delay0 bits */ +#define SIFIVE_SPI_DELAY0_CSSCK(x) ((u32)(x)) +#define SIFIVE_SPI_DELAY0_CSSCK_MASK 0xffU +#define SIFIVE_SPI_DELAY0_SCKCS(x) ((u32)(x) << 16) +#define SIFIVE_SPI_DELAY0_SCKCS_MASK (0xffU << 16) + +/* delay1 bits */ +#define SIFIVE_SPI_DELAY1_INTERCS(x) ((u32)(x)) +#define SIFIVE_SPI_DELAY1_INTERCS_MASK 0xffU +#define SIFIVE_SPI_DELAY1_INTERXFR(x) ((u32)(x) << 16) +#define SIFIVE_SPI_DELAY1_INTERXFR_MASK (0xffU << 16) + +/* fmt bits */ +#define SIFIVE_SPI_FMT_PROTO_SINGLE 0U +#define SIFIVE_SPI_FMT_PROTO_DUAL 1U +#define SIFIVE_SPI_FMT_PROTO_QUAD 2U +#define SIFIVE_SPI_FMT_PROTO_MASK 3U +#define SIFIVE_SPI_FMT_ENDIAN BIT(2) +#define SIFIVE_SPI_FMT_DIR BIT(3) +#define SIFIVE_SPI_FMT_LEN(x) ((u32)(x) << 16) +#define SIFIVE_SPI_FMT_LEN_MASK (0xfU << 16) + +/* txdata bits */ +#define SIFIVE_SPI_TXDATA_DATA_MASK 0xffU +#define SIFIVE_SPI_TXDATA_FULL BIT(31) + +/* rxdata bits */ +#define SIFIVE_SPI_RXDATA_DATA_MASK 0xffU +#define SIFIVE_SPI_RXDATA_EMPTY BIT(31) + +/* ie and ip bits */ +#define SIFIVE_SPI_IP_TXWM BIT(0) +#define SIFIVE_SPI_IP_RXWM BIT(1) + +struct sifive_spi { + void __iomem *regs; /* virt. address of control registers */ + struct clk *clk; /* bus clock */ + unsigned int fifo_depth; /* fifo depth in words */ + u32 cs_inactive; /* level of the CS pins when inactive */ + struct completion done; /* wake-up from interrupt */ +}; + +static void sifive_spi_write(struct sifive_spi *spi, int offset, u32 value) +{ + iowrite32(value, spi->regs + offset); +} + +static u32 sifive_spi_read(struct sifive_spi *spi, int offset) +{ + return ioread32(spi->regs + offset); +} + +static void sifive_spi_init(struct sifive_spi *spi) +{ + /* Watermark interrupts are disabled by default */ + sifive_spi_write(spi, SIFIVE_SPI_REG_IE, 0); + + /* Default watermark FIFO threshold values */ + sifive_spi_write(spi, SIFIVE_SPI_REG_TXMARK, 1); + sifive_spi_write(spi, SIFIVE_SPI_REG_RXMARK, 0); + + /* Set CS/SCK Delays and Inactive Time to defaults */ + sifive_spi_write(spi, SIFIVE_SPI_REG_DELAY0, + SIFIVE_SPI_DELAY0_CSSCK(1) | + SIFIVE_SPI_DELAY0_SCKCS(1)); + sifive_spi_write(spi, SIFIVE_SPI_REG_DELAY1, + SIFIVE_SPI_DELAY1_INTERCS(1) | + SIFIVE_SPI_DELAY1_INTERXFR(0)); + + /* Exit specialized memory-mapped SPI flash mode */ + sifive_spi_write(spi, SIFIVE_SPI_REG_FCTRL, 0); +} + +static int +sifive_spi_prepare_message(struct spi_master *master, struct spi_message *msg) +{ + struct sifive_spi *spi = spi_master_get_devdata(master); + struct spi_device *device = msg->spi; + + /* Update the chip select polarity */ + if (device->mode & SPI_CS_HIGH) + spi->cs_inactive &= ~BIT(device->chip_select); + else + spi->cs_inactive |= BIT(device->chip_select); + sifive_spi_write(spi, SIFIVE_SPI_REG_CSDEF, spi->cs_inactive); + + /* Select the correct device */ + sifive_spi_write(spi, SIFIVE_SPI_REG_CSID, device->chip_select); + + /* Set clock mode */ + sifive_spi_write(spi, SIFIVE_SPI_REG_SCKMODE, + device->mode & SIFIVE_SPI_SCKMODE_MODE_MASK); + + return 0; +} + +static void sifive_spi_set_cs(struct spi_device *device, bool is_high) +{ + struct sifive_spi *spi = spi_master_get_devdata(device->master); + + /* Reverse polarity is handled by SCMR/CPOL. Not inverted CS. */ + if (device->mode & SPI_CS_HIGH) + is_high = !is_high; + + sifive_spi_write(spi, SIFIVE_SPI_REG_CSMODE, is_high ? + SIFIVE_SPI_CSMODE_MODE_AUTO : + SIFIVE_SPI_CSMODE_MODE_HOLD); +} + +static int +sifive_spi_prep_transfer(struct sifive_spi *spi, struct spi_device *device, + struct spi_transfer *t) +{ + u32 cr; + unsigned int mode; + + /* Calculate and program the clock rate */ + cr = DIV_ROUND_UP(clk_get_rate(spi->clk) >> 1, t->speed_hz) - 1; + cr &= SIFIVE_SPI_SCKDIV_DIV_MASK; + sifive_spi_write(spi, SIFIVE_SPI_REG_SCKDIV, cr); + + mode = max_t(unsigned int, t->rx_nbits, t->tx_nbits); + + /* Set frame format */ + cr = SIFIVE_SPI_FMT_LEN(t->bits_per_word); + switch (mode) { + case SPI_NBITS_QUAD: + cr |= SIFIVE_SPI_FMT_PROTO_QUAD; + break; + case SPI_NBITS_DUAL: + cr |= SIFIVE_SPI_FMT_PROTO_DUAL; + break; + default: + cr |= SIFIVE_SPI_FMT_PROTO_SINGLE; + break; + } + if (device->mode & SPI_LSB_FIRST) + cr |= SIFIVE_SPI_FMT_ENDIAN; + if (!t->rx_buf) + cr |= SIFIVE_SPI_FMT_DIR; + sifive_spi_write(spi, SIFIVE_SPI_REG_FMT, cr); + + /* We will want to poll if the time we need to wait is + * less than the context switching time. + * Let's call that threshold 5us. The operation will take: + * (8/mode) * fifo_depth / hz <= 5 * 10^-6 + * 1600000 * fifo_depth <= hz * mode + */ + return 1600000 * spi->fifo_depth <= t->speed_hz * mode; +} + +static irqreturn_t sifive_spi_irq(int irq, void *dev_id) +{ + struct sifive_spi *spi = dev_id; + u32 ip = sifive_spi_read(spi, SIFIVE_SPI_REG_IP); + + if (ip & (SIFIVE_SPI_IP_TXWM | SIFIVE_SPI_IP_RXWM)) { + /* Disable interrupts until next transfer */ + sifive_spi_write(spi, SIFIVE_SPI_REG_IE, 0); + complete(&spi->done); + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static void sifive_spi_wait(struct sifive_spi *spi, u32 bit, int poll) +{ + if (poll) { + u32 cr; + + do { + cr = sifive_spi_read(spi, SIFIVE_SPI_REG_IP); + } while (!(cr & bit)); + } else { + reinit_completion(&spi->done); + sifive_spi_write(spi, SIFIVE_SPI_REG_IE, bit); + wait_for_completion(&spi->done); + } +} + +static void sifive_spi_tx(struct sifive_spi *spi, const u8 *tx_ptr) +{ + WARN_ON_ONCE((sifive_spi_read(spi, SIFIVE_SPI_REG_TXDATA) + & SIFIVE_SPI_TXDATA_FULL) != 0); + sifive_spi_write(spi, SIFIVE_SPI_REG_TXDATA, + *tx_ptr & SIFIVE_SPI_TXDATA_DATA_MASK); +} + +static void sifive_spi_rx(struct sifive_spi *spi, u8 *rx_ptr) +{ + u32 data = sifive_spi_read(spi, SIFIVE_SPI_REG_RXDATA); + + WARN_ON_ONCE((data & SIFIVE_SPI_RXDATA_EMPTY) != 0); + *rx_ptr = data & SIFIVE_SPI_RXDATA_DATA_MASK; +} + +static int +sifive_spi_transfer_one(struct spi_master *master, struct spi_device *device, + struct spi_transfer *t) +{ + struct sifive_spi *spi = spi_master_get_devdata(master); + int poll = sifive_spi_prep_transfer(spi, device, t); + const u8 *tx_ptr = t->tx_buf; + u8 *rx_ptr = t->rx_buf; + unsigned int remaining_words = t->len; + + while (remaining_words) { + unsigned int n_words = min(remaining_words, spi->fifo_depth); + unsigned int i; + + /* Enqueue n_words for transmission */ + for (i = 0; i < n_words; i++) + sifive_spi_tx(spi, tx_ptr++); + + if (rx_ptr) { + /* Wait for transmission + reception to complete */ + sifive_spi_write(spi, SIFIVE_SPI_REG_RXMARK, + n_words - 1); + sifive_spi_wait(spi, SIFIVE_SPI_IP_RXWM, poll); + + /* Read out all the data from the RX FIFO */ + for (i = 0; i < n_words; i++) + sifive_spi_rx(spi, rx_ptr++); + } else { + /* Wait for transmission to complete */ + sifive_spi_wait(spi, SIFIVE_SPI_IP_TXWM, poll); + } + + remaining_words -= n_words; + } + + return 0; +} + +static int sifive_spi_probe(struct platform_device *pdev) +{ + struct sifive_spi *spi; + struct resource *res; + int ret, irq, num_cs; + u32 cs_bits, max_bits_per_word; + struct spi_master *master; + + master = spi_alloc_master(&pdev->dev, sizeof(struct sifive_spi)); + if (!master) { + dev_err(&pdev->dev, "out of memory\n"); + return -ENOMEM; + } + + spi = spi_master_get_devdata(master); + init_completion(&spi->done); + platform_set_drvdata(pdev, master); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + spi->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(spi->regs)) { + ret = PTR_ERR(spi->regs); + goto put_master; + } + + spi->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(spi->clk)) { + dev_err(&pdev->dev, "Unable to find bus clock\n"); + ret = PTR_ERR(spi->clk); + goto put_master; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "Unable to find interrupt\n"); + ret = irq; + goto put_master; + } + + /* Optional parameters */ + ret = + of_property_read_u32(pdev->dev.of_node, "sifive,fifo-depth", + &spi->fifo_depth); + if (ret < 0) + spi->fifo_depth = SIFIVE_SPI_DEFAULT_DEPTH; + + ret = + of_property_read_u32(pdev->dev.of_node, "sifive,max-bits-per-word", + &max_bits_per_word); + + if (!ret && max_bits_per_word < 8) { + dev_err(&pdev->dev, "Only 8bit SPI words supported by the driver\n"); + ret = -EINVAL; + goto put_master; + } + + /* Spin up the bus clock before hitting registers */ + ret = clk_prepare_enable(spi->clk); + if (ret) { + dev_err(&pdev->dev, "Unable to enable bus clock\n"); + goto put_master; + } + + /* probe the number of CS lines */ + spi->cs_inactive = sifive_spi_read(spi, SIFIVE_SPI_REG_CSDEF); + sifive_spi_write(spi, SIFIVE_SPI_REG_CSDEF, 0xffffffffU); + cs_bits = sifive_spi_read(spi, SIFIVE_SPI_REG_CSDEF); + sifive_spi_write(spi, SIFIVE_SPI_REG_CSDEF, spi->cs_inactive); + if (!cs_bits) { + dev_err(&pdev->dev, "Could not auto probe CS lines\n"); + ret = -EINVAL; + goto put_master; + } + + num_cs = ilog2(cs_bits) + 1; + if (num_cs > SIFIVE_SPI_MAX_CS) { + dev_err(&pdev->dev, "Invalid number of spi slaves\n"); + ret = -EINVAL; + goto put_master; + } + + /* Define our master */ + master->dev.of_node = pdev->dev.of_node; + master->bus_num = pdev->id; + master->num_chipselect = num_cs; + master->mode_bits = SPI_CPHA | SPI_CPOL + | SPI_CS_HIGH | SPI_LSB_FIRST + | SPI_TX_DUAL | SPI_TX_QUAD + | SPI_RX_DUAL | SPI_RX_QUAD; + /* TODO: add driver support for bits_per_word < 8 + * we need to "left-align" the bits (unless SPI_LSB_FIRST) + */ + master->bits_per_word_mask = SPI_BPW_MASK(8); + master->flags = SPI_CONTROLLER_MUST_TX | SPI_MASTER_GPIO_SS; + master->prepare_message = sifive_spi_prepare_message; + master->set_cs = sifive_spi_set_cs; + master->transfer_one = sifive_spi_transfer_one; + + pdev->dev.dma_mask = NULL; + /* Configure the SPI master hardware */ + sifive_spi_init(spi); + + /* Register for SPI Interrupt */ + ret = devm_request_irq(&pdev->dev, irq, sifive_spi_irq, 0, + dev_name(&pdev->dev), spi); + if (ret) { + dev_err(&pdev->dev, "Unable to bind to interrupt\n"); + goto put_master; + } + + dev_info(&pdev->dev, "mapped; irq=%d, cs=%d\n", + irq, master->num_chipselect); + + ret = devm_spi_register_master(&pdev->dev, master); + if (ret < 0) { + dev_err(&pdev->dev, "spi_register_master failed\n"); + goto put_master; + } + + return 0; + +put_master: + spi_master_put(master); + + return ret; +} + +static int sifive_spi_remove(struct platform_device *pdev) +{ + struct spi_master *master = platform_get_drvdata(pdev); + struct sifive_spi *spi = spi_master_get_devdata(master); + + /* Disable all the interrupts just in case */ + sifive_spi_write(spi, SIFIVE_SPI_REG_IE, 0); + + return 0; +} + +static const struct of_device_id sifive_spi_of_match[] = { + { .compatible = "sifive,spi0", }, + {} +}; +MODULE_DEVICE_TABLE(of, sifive_spi_of_match); + +static struct platform_driver sifive_spi_driver = { + .probe = sifive_spi_probe, + .remove = sifive_spi_remove, + .driver = { + .name = SIFIVE_SPI_DRIVER_NAME, + .of_match_table = sifive_spi_of_match, + }, +}; +module_platform_driver(sifive_spi_driver); + +MODULE_AUTHOR("SiFive, Inc. "); +MODULE_DESCRIPTION("SiFive SPI driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-sprd.c b/drivers/spi/spi-sprd.c index 8daa24eec624..1b7eebb72c07 100644 --- a/drivers/spi/spi-sprd.c +++ b/drivers/spi/spi-sprd.c @@ -2,6 +2,9 @@ // Copyright (C) 2018 Spreadtrum Communications Inc. #include +#include +#include +#include #include #include #include @@ -9,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -128,11 +132,28 @@ #define SPRD_SPI_DEFAULT_SOURCE 26000000 #define SPRD_SPI_MAX_SPEED_HZ 48000000 #define SPRD_SPI_AUTOSUSPEND_DELAY 100 +#define SPRD_SPI_DMA_STEP 8 + +enum sprd_spi_dma_channel { + SPRD_SPI_RX, + SPRD_SPI_TX, + SPRD_SPI_MAX, +}; + +struct sprd_spi_dma { + bool enable; + struct dma_chan *dma_chan[SPRD_SPI_MAX]; + enum dma_slave_buswidth width; + u32 fragmens_len; + u32 rx_len; +}; struct sprd_spi { void __iomem *base; + phys_addr_t phy_base; struct device *dev; struct clk *clk; + int irq; u32 src_clk; u32 hw_mode; u32 trans_len; @@ -141,6 +162,8 @@ struct sprd_spi { u32 hw_speed_hz; u32 len; int status; + struct sprd_spi_dma dma; + struct completion xfer_completion; const void *tx_buf; void *rx_buf; int (*read_bufs)(struct sprd_spi *ss, u32 len); @@ -380,7 +403,7 @@ static int sprd_spi_txrx_bufs(struct spi_device *sdev, struct spi_transfer *t) { struct sprd_spi *ss = spi_controller_get_devdata(sdev->controller); u32 trans_len = ss->trans_len, len; - int ret, write_size = 0; + int ret, write_size = 0, read_size = 0; while (trans_len) { len = trans_len > SPRD_SPI_FIFO_SIZE ? SPRD_SPI_FIFO_SIZE : @@ -416,19 +439,223 @@ static int sprd_spi_txrx_bufs(struct spi_device *sdev, struct spi_transfer *t) goto complete; if (ss->trans_mode & SPRD_SPI_RX_MODE) - ss->read_bufs(ss, len); + read_size += ss->read_bufs(ss, len); trans_len -= len; } - ret = write_size; - + if (ss->trans_mode & SPRD_SPI_TX_MODE) + ret = write_size; + else + ret = read_size; complete: sprd_spi_enter_idle(ss); return ret; } +static void sprd_spi_irq_enable(struct sprd_spi *ss) +{ + u32 val; + + /* Clear interrupt status before enabling interrupt. */ + writel_relaxed(SPRD_SPI_TX_END_CLR | SPRD_SPI_RX_END_CLR, + ss->base + SPRD_SPI_INT_CLR); + /* Enable SPI interrupt only in DMA mode. */ + val = readl_relaxed(ss->base + SPRD_SPI_INT_EN); + writel_relaxed(val | SPRD_SPI_TX_END_INT_EN | + SPRD_SPI_RX_END_INT_EN, + ss->base + SPRD_SPI_INT_EN); +} + +static void sprd_spi_irq_disable(struct sprd_spi *ss) +{ + writel_relaxed(0, ss->base + SPRD_SPI_INT_EN); +} + +static void sprd_spi_dma_enable(struct sprd_spi *ss, bool enable) +{ + u32 val = readl_relaxed(ss->base + SPRD_SPI_CTL2); + + if (enable) + val |= SPRD_SPI_DMA_EN; + else + val &= ~SPRD_SPI_DMA_EN; + + writel_relaxed(val, ss->base + SPRD_SPI_CTL2); +} + +static int sprd_spi_dma_submit(struct dma_chan *dma_chan, + struct dma_slave_config *c, + struct sg_table *sg, + enum dma_transfer_direction dir) +{ + struct dma_async_tx_descriptor *desc; + dma_cookie_t cookie; + unsigned long flags; + int ret; + + ret = dmaengine_slave_config(dma_chan, c); + if (ret < 0) + return ret; + + flags = SPRD_DMA_FLAGS(SPRD_DMA_CHN_MODE_NONE, SPRD_DMA_NO_TRG, + SPRD_DMA_FRAG_REQ, SPRD_DMA_TRANS_INT); + desc = dmaengine_prep_slave_sg(dma_chan, sg->sgl, sg->nents, dir, flags); + if (!desc) + return -ENODEV; + + cookie = dmaengine_submit(desc); + if (dma_submit_error(cookie)) + return dma_submit_error(cookie); + + dma_async_issue_pending(dma_chan); + + return 0; +} + +static int sprd_spi_dma_rx_config(struct sprd_spi *ss, struct spi_transfer *t) +{ + struct dma_chan *dma_chan = ss->dma.dma_chan[SPRD_SPI_RX]; + struct dma_slave_config config = { + .src_addr = ss->phy_base, + .src_addr_width = ss->dma.width, + .dst_addr_width = ss->dma.width, + .dst_maxburst = ss->dma.fragmens_len, + }; + int ret; + + ret = sprd_spi_dma_submit(dma_chan, &config, &t->rx_sg, DMA_DEV_TO_MEM); + if (ret) + return ret; + + return ss->dma.rx_len; +} + +static int sprd_spi_dma_tx_config(struct sprd_spi *ss, struct spi_transfer *t) +{ + struct dma_chan *dma_chan = ss->dma.dma_chan[SPRD_SPI_TX]; + struct dma_slave_config config = { + .dst_addr = ss->phy_base, + .src_addr_width = ss->dma.width, + .dst_addr_width = ss->dma.width, + .src_maxburst = ss->dma.fragmens_len, + }; + int ret; + + ret = sprd_spi_dma_submit(dma_chan, &config, &t->tx_sg, DMA_MEM_TO_DEV); + if (ret) + return ret; + + return t->len; +} + +static int sprd_spi_dma_request(struct sprd_spi *ss) +{ + ss->dma.dma_chan[SPRD_SPI_RX] = dma_request_chan(ss->dev, "rx_chn"); + if (IS_ERR_OR_NULL(ss->dma.dma_chan[SPRD_SPI_RX])) { + if (PTR_ERR(ss->dma.dma_chan[SPRD_SPI_RX]) == -EPROBE_DEFER) + return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_RX]); + + dev_err(ss->dev, "request RX DMA channel failed!\n"); + return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_RX]); + } + + ss->dma.dma_chan[SPRD_SPI_TX] = dma_request_chan(ss->dev, "tx_chn"); + if (IS_ERR_OR_NULL(ss->dma.dma_chan[SPRD_SPI_TX])) { + if (PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]) == -EPROBE_DEFER) + return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]); + + dev_err(ss->dev, "request TX DMA channel failed!\n"); + dma_release_channel(ss->dma.dma_chan[SPRD_SPI_RX]); + return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]); + } + + return 0; +} + +static void sprd_spi_dma_release(struct sprd_spi *ss) +{ + if (ss->dma.dma_chan[SPRD_SPI_RX]) + dma_release_channel(ss->dma.dma_chan[SPRD_SPI_RX]); + + if (ss->dma.dma_chan[SPRD_SPI_TX]) + dma_release_channel(ss->dma.dma_chan[SPRD_SPI_TX]); +} + +static int sprd_spi_dma_txrx_bufs(struct spi_device *sdev, + struct spi_transfer *t) +{ + struct sprd_spi *ss = spi_master_get_devdata(sdev->master); + u32 trans_len = ss->trans_len; + int ret, write_size = 0; + + reinit_completion(&ss->xfer_completion); + sprd_spi_irq_enable(ss); + if (ss->trans_mode & SPRD_SPI_TX_MODE) { + write_size = sprd_spi_dma_tx_config(ss, t); + sprd_spi_set_tx_length(ss, trans_len); + + /* + * For our 3 wires mode or dual TX line mode, we need + * to request the controller to transfer. + */ + if (ss->hw_mode & SPI_3WIRE || ss->hw_mode & SPI_TX_DUAL) + sprd_spi_tx_req(ss); + } else { + sprd_spi_set_rx_length(ss, trans_len); + + /* + * For our 3 wires mode or dual TX line mode, we need + * to request the controller to read. + */ + if (ss->hw_mode & SPI_3WIRE || ss->hw_mode & SPI_TX_DUAL) + sprd_spi_rx_req(ss); + else + write_size = ss->write_bufs(ss, trans_len); + } + + if (write_size < 0) { + ret = write_size; + dev_err(ss->dev, "failed to write, ret = %d\n", ret); + goto trans_complete; + } + + if (ss->trans_mode & SPRD_SPI_RX_MODE) { + /* + * Set up the DMA receive data length, which must be an + * integral multiple of fragment length. But when the length + * of received data is less than fragment length, DMA can be + * configured to receive data according to the actual length + * of received data. + */ + ss->dma.rx_len = t->len > ss->dma.fragmens_len ? + (t->len - t->len % ss->dma.fragmens_len) : + t->len; + ret = sprd_spi_dma_rx_config(ss, t); + if (ret < 0) { + dev_err(&sdev->dev, + "failed to configure rx DMA, ret = %d\n", ret); + goto trans_complete; + } + } + + sprd_spi_dma_enable(ss, true); + wait_for_completion(&(ss->xfer_completion)); + + if (ss->trans_mode & SPRD_SPI_TX_MODE) + ret = write_size; + else + ret = ss->dma.rx_len; + +trans_complete: + sprd_spi_dma_enable(ss, false); + sprd_spi_enter_idle(ss); + sprd_spi_irq_disable(ss); + + return ret; +} + static void sprd_spi_set_speed(struct sprd_spi *ss, u32 speed_hz) { /* @@ -514,16 +741,22 @@ static int sprd_spi_setup_transfer(struct spi_device *sdev, ss->trans_len = t->len; ss->read_bufs = sprd_spi_read_bufs_u8; ss->write_bufs = sprd_spi_write_bufs_u8; + ss->dma.width = DMA_SLAVE_BUSWIDTH_1_BYTE; + ss->dma.fragmens_len = SPRD_SPI_DMA_STEP; break; case 16: ss->trans_len = t->len >> 1; ss->read_bufs = sprd_spi_read_bufs_u16; ss->write_bufs = sprd_spi_write_bufs_u16; + ss->dma.width = DMA_SLAVE_BUSWIDTH_2_BYTES; + ss->dma.fragmens_len = SPRD_SPI_DMA_STEP << 1; break; case 32: ss->trans_len = t->len >> 2; ss->read_bufs = sprd_spi_read_bufs_u32; ss->write_bufs = sprd_spi_write_bufs_u32; + ss->dma.width = DMA_SLAVE_BUSWIDTH_4_BYTES; + ss->dma.fragmens_len = SPRD_SPI_DMA_STEP << 2; break; default: return -EINVAL; @@ -561,7 +794,11 @@ static int sprd_spi_transfer_one(struct spi_controller *sctlr, if (ret) goto setup_err; - ret = sprd_spi_txrx_bufs(sdev, t); + if (sctlr->can_dma(sctlr, sdev, t)) + ret = sprd_spi_dma_txrx_bufs(sdev, t); + else + ret = sprd_spi_txrx_bufs(sdev, t); + if (ret == t->len) ret = 0; else if (ret >= 0) @@ -573,6 +810,53 @@ setup_err: return ret; } +static irqreturn_t sprd_spi_handle_irq(int irq, void *data) +{ + struct sprd_spi *ss = (struct sprd_spi *)data; + u32 val = readl_relaxed(ss->base + SPRD_SPI_INT_MASK_STS); + + if (val & SPRD_SPI_MASK_TX_END) { + writel_relaxed(SPRD_SPI_TX_END_CLR, ss->base + SPRD_SPI_INT_CLR); + if (!(ss->trans_mode & SPRD_SPI_RX_MODE)) + complete(&ss->xfer_completion); + + return IRQ_HANDLED; + } + + if (val & SPRD_SPI_MASK_RX_END) { + writel_relaxed(SPRD_SPI_RX_END_CLR, ss->base + SPRD_SPI_INT_CLR); + if (ss->dma.rx_len < ss->len) { + ss->rx_buf += ss->dma.rx_len; + ss->dma.rx_len += + ss->read_bufs(ss, ss->len - ss->dma.rx_len); + } + complete(&ss->xfer_completion); + + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static int sprd_spi_irq_init(struct platform_device *pdev, struct sprd_spi *ss) +{ + int ret; + + ss->irq = platform_get_irq(pdev, 0); + if (ss->irq < 0) { + dev_err(&pdev->dev, "failed to get irq resource\n"); + return ss->irq; + } + + ret = devm_request_irq(&pdev->dev, ss->irq, sprd_spi_handle_irq, + 0, pdev->name, ss); + if (ret) + dev_err(&pdev->dev, "failed to request spi irq %d, ret = %d\n", + ss->irq, ret); + + return ret; +} + static int sprd_spi_clk_init(struct platform_device *pdev, struct sprd_spi *ss) { struct clk *clk_spi, *clk_parent; @@ -603,6 +887,35 @@ static int sprd_spi_clk_init(struct platform_device *pdev, struct sprd_spi *ss) return 0; } +static bool sprd_spi_can_dma(struct spi_controller *sctlr, + struct spi_device *spi, struct spi_transfer *t) +{ + struct sprd_spi *ss = spi_controller_get_devdata(sctlr); + + return ss->dma.enable && (t->len > SPRD_SPI_FIFO_SIZE); +} + +static int sprd_spi_dma_init(struct platform_device *pdev, struct sprd_spi *ss) +{ + int ret; + + ret = sprd_spi_dma_request(ss); + if (ret) { + if (ret == -EPROBE_DEFER) + return ret; + + dev_warn(&pdev->dev, + "failed to request dma, enter no dma mode, ret = %d\n", + ret); + + return 0; + } + + ss->dma.enable = true; + + return 0; +} + static int sprd_spi_probe(struct platform_device *pdev) { struct spi_controller *sctlr; @@ -623,25 +936,36 @@ static int sprd_spi_probe(struct platform_device *pdev) goto free_controller; } + ss->phy_base = res->start; ss->dev = &pdev->dev; sctlr->dev.of_node = pdev->dev.of_node; sctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE | SPI_TX_DUAL; sctlr->bus_num = pdev->id; sctlr->set_cs = sprd_spi_chipselect; sctlr->transfer_one = sprd_spi_transfer_one; + sctlr->can_dma = sprd_spi_can_dma; sctlr->auto_runtime_pm = true; sctlr->max_speed_hz = min_t(u32, ss->src_clk >> 1, SPRD_SPI_MAX_SPEED_HZ); + init_completion(&ss->xfer_completion); platform_set_drvdata(pdev, sctlr); ret = sprd_spi_clk_init(pdev, ss); if (ret) goto free_controller; - ret = clk_prepare_enable(ss->clk); + ret = sprd_spi_irq_init(pdev, ss); if (ret) goto free_controller; + ret = sprd_spi_dma_init(pdev, ss); + if (ret) + goto free_controller; + + ret = clk_prepare_enable(ss->clk); + if (ret) + goto release_dma; + ret = pm_runtime_set_active(&pdev->dev); if (ret < 0) goto disable_clk; @@ -670,6 +994,8 @@ err_rpm_put: pm_runtime_disable(&pdev->dev); disable_clk: clk_disable_unprepare(ss->clk); +release_dma: + sprd_spi_dma_release(ss); free_controller: spi_controller_put(sctlr); @@ -688,6 +1014,10 @@ static int sprd_spi_remove(struct platform_device *pdev) return ret; } + spi_controller_suspend(sctlr); + + if (ss->dma.enable) + sprd_spi_dma_release(ss); clk_disable_unprepare(ss->clk); pm_runtime_put_noidle(&pdev->dev); pm_runtime_disable(&pdev->dev); @@ -700,6 +1030,9 @@ static int __maybe_unused sprd_spi_runtime_suspend(struct device *dev) struct spi_controller *sctlr = dev_get_drvdata(dev); struct sprd_spi *ss = spi_controller_get_devdata(sctlr); + if (ss->dma.enable) + sprd_spi_dma_release(ss); + clk_disable_unprepare(ss->clk); return 0; @@ -715,7 +1048,14 @@ static int __maybe_unused sprd_spi_runtime_resume(struct device *dev) if (ret) return ret; - return 0; + if (!ss->dma.enable) + return 0; + + ret = sprd_spi_dma_request(ss); + if (ret) + clk_disable_unprepare(ss->clk); + + return ret; } static const struct dev_pm_ops sprd_spi_pm_ops = { diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c index ad1e55d3d5d5..4186ed20d796 100644 --- a/drivers/spi/spi-stm32.c +++ b/drivers/spi/spi-stm32.c @@ -1,23 +1,10 @@ -/* - * STMicroelectronics STM32 SPI Controller driver (master mode only) - * - * Copyright (C) 2017, STMicroelectronics - All Rights Reserved - * Author(s): Amelie Delaunay for STMicroelectronics. - * - * License terms: GPL V2.0. - * - * spi_stm32 driver is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * spi_stm32 driver is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. - * - * You should have received a copy of the GNU General Public License along with - * spi_stm32 driver. If not, see . - */ +// SPDX-License-Identifier: GPL-2.0 +// +// STMicroelectronics STM32 SPI Controller driver (master mode only) +// +// Copyright (C) 2017, STMicroelectronics - All Rights Reserved +// Author(s): Amelie Delaunay for STMicroelectronics. + #include #include #include @@ -33,99 +20,251 @@ #define DRIVER_NAME "spi_stm32" -/* STM32 SPI registers */ -#define STM32_SPI_CR1 0x00 -#define STM32_SPI_CR2 0x04 -#define STM32_SPI_CFG1 0x08 -#define STM32_SPI_CFG2 0x0C -#define STM32_SPI_IER 0x10 -#define STM32_SPI_SR 0x14 -#define STM32_SPI_IFCR 0x18 -#define STM32_SPI_TXDR 0x20 -#define STM32_SPI_RXDR 0x30 -#define STM32_SPI_I2SCFGR 0x50 - -/* STM32_SPI_CR1 bit fields */ -#define SPI_CR1_SPE BIT(0) -#define SPI_CR1_MASRX BIT(8) -#define SPI_CR1_CSTART BIT(9) -#define SPI_CR1_CSUSP BIT(10) -#define SPI_CR1_HDDIR BIT(11) -#define SPI_CR1_SSI BIT(12) - -/* STM32_SPI_CR2 bit fields */ -#define SPI_CR2_TSIZE_SHIFT 0 -#define SPI_CR2_TSIZE GENMASK(15, 0) - -/* STM32_SPI_CFG1 bit fields */ -#define SPI_CFG1_DSIZE_SHIFT 0 -#define SPI_CFG1_DSIZE GENMASK(4, 0) -#define SPI_CFG1_FTHLV_SHIFT 5 -#define SPI_CFG1_FTHLV GENMASK(8, 5) -#define SPI_CFG1_RXDMAEN BIT(14) -#define SPI_CFG1_TXDMAEN BIT(15) -#define SPI_CFG1_MBR_SHIFT 28 -#define SPI_CFG1_MBR GENMASK(30, 28) -#define SPI_CFG1_MBR_MIN 0 -#define SPI_CFG1_MBR_MAX (GENMASK(30, 28) >> 28) - -/* STM32_SPI_CFG2 bit fields */ -#define SPI_CFG2_MIDI_SHIFT 4 -#define SPI_CFG2_MIDI GENMASK(7, 4) -#define SPI_CFG2_COMM_SHIFT 17 -#define SPI_CFG2_COMM GENMASK(18, 17) -#define SPI_CFG2_SP_SHIFT 19 -#define SPI_CFG2_SP GENMASK(21, 19) -#define SPI_CFG2_MASTER BIT(22) -#define SPI_CFG2_LSBFRST BIT(23) -#define SPI_CFG2_CPHA BIT(24) -#define SPI_CFG2_CPOL BIT(25) -#define SPI_CFG2_SSM BIT(26) -#define SPI_CFG2_AFCNTR BIT(31) - -/* STM32_SPI_IER bit fields */ -#define SPI_IER_RXPIE BIT(0) -#define SPI_IER_TXPIE BIT(1) -#define SPI_IER_DXPIE BIT(2) -#define SPI_IER_EOTIE BIT(3) -#define SPI_IER_TXTFIE BIT(4) -#define SPI_IER_OVRIE BIT(6) -#define SPI_IER_MODFIE BIT(9) -#define SPI_IER_ALL GENMASK(10, 0) - -/* STM32_SPI_SR bit fields */ -#define SPI_SR_RXP BIT(0) -#define SPI_SR_TXP BIT(1) -#define SPI_SR_EOT BIT(3) -#define SPI_SR_OVR BIT(6) -#define SPI_SR_MODF BIT(9) -#define SPI_SR_SUSP BIT(11) -#define SPI_SR_RXPLVL_SHIFT 13 -#define SPI_SR_RXPLVL GENMASK(14, 13) -#define SPI_SR_RXWNE BIT(15) - -/* STM32_SPI_IFCR bit fields */ -#define SPI_IFCR_ALL GENMASK(11, 3) - -/* STM32_SPI_I2SCFGR bit fields */ -#define SPI_I2SCFGR_I2SMOD BIT(0) - -/* SPI Master Baud Rate min/max divisor */ -#define SPI_MBR_DIV_MIN (2 << SPI_CFG1_MBR_MIN) -#define SPI_MBR_DIV_MAX (2 << SPI_CFG1_MBR_MAX) - -/* SPI Communication mode */ +/* STM32F4 SPI registers */ +#define STM32F4_SPI_CR1 0x00 +#define STM32F4_SPI_CR2 0x04 +#define STM32F4_SPI_SR 0x08 +#define STM32F4_SPI_DR 0x0C +#define STM32F4_SPI_I2SCFGR 0x1C + +/* STM32F4_SPI_CR1 bit fields */ +#define STM32F4_SPI_CR1_CPHA BIT(0) +#define STM32F4_SPI_CR1_CPOL BIT(1) +#define STM32F4_SPI_CR1_MSTR BIT(2) +#define STM32F4_SPI_CR1_BR_SHIFT 3 +#define STM32F4_SPI_CR1_BR GENMASK(5, 3) +#define STM32F4_SPI_CR1_SPE BIT(6) +#define STM32F4_SPI_CR1_LSBFRST BIT(7) +#define STM32F4_SPI_CR1_SSI BIT(8) +#define STM32F4_SPI_CR1_SSM BIT(9) +#define STM32F4_SPI_CR1_RXONLY BIT(10) +#define STM32F4_SPI_CR1_DFF BIT(11) +#define STM32F4_SPI_CR1_CRCNEXT BIT(12) +#define STM32F4_SPI_CR1_CRCEN BIT(13) +#define STM32F4_SPI_CR1_BIDIOE BIT(14) +#define STM32F4_SPI_CR1_BIDIMODE BIT(15) +#define STM32F4_SPI_CR1_BR_MIN 0 +#define STM32F4_SPI_CR1_BR_MAX (GENMASK(5, 3) >> 3) + +/* STM32F4_SPI_CR2 bit fields */ +#define STM32F4_SPI_CR2_RXDMAEN BIT(0) +#define STM32F4_SPI_CR2_TXDMAEN BIT(1) +#define STM32F4_SPI_CR2_SSOE BIT(2) +#define STM32F4_SPI_CR2_FRF BIT(4) +#define STM32F4_SPI_CR2_ERRIE BIT(5) +#define STM32F4_SPI_CR2_RXNEIE BIT(6) +#define STM32F4_SPI_CR2_TXEIE BIT(7) + +/* STM32F4_SPI_SR bit fields */ +#define STM32F4_SPI_SR_RXNE BIT(0) +#define STM32F4_SPI_SR_TXE BIT(1) +#define STM32F4_SPI_SR_CHSIDE BIT(2) +#define STM32F4_SPI_SR_UDR BIT(3) +#define STM32F4_SPI_SR_CRCERR BIT(4) +#define STM32F4_SPI_SR_MODF BIT(5) +#define STM32F4_SPI_SR_OVR BIT(6) +#define STM32F4_SPI_SR_BSY BIT(7) +#define STM32F4_SPI_SR_FRE BIT(8) + +/* STM32F4_SPI_I2SCFGR bit fields */ +#define STM32F4_SPI_I2SCFGR_I2SMOD BIT(11) + +/* STM32F4 SPI Baud Rate min/max divisor */ +#define STM32F4_SPI_BR_DIV_MIN (2 << STM32F4_SPI_CR1_BR_MIN) +#define STM32F4_SPI_BR_DIV_MAX (2 << STM32F4_SPI_CR1_BR_MAX) + +/* STM32H7 SPI registers */ +#define STM32H7_SPI_CR1 0x00 +#define STM32H7_SPI_CR2 0x04 +#define STM32H7_SPI_CFG1 0x08 +#define STM32H7_SPI_CFG2 0x0C +#define STM32H7_SPI_IER 0x10 +#define STM32H7_SPI_SR 0x14 +#define STM32H7_SPI_IFCR 0x18 +#define STM32H7_SPI_TXDR 0x20 +#define STM32H7_SPI_RXDR 0x30 +#define STM32H7_SPI_I2SCFGR 0x50 + +/* STM32H7_SPI_CR1 bit fields */ +#define STM32H7_SPI_CR1_SPE BIT(0) +#define STM32H7_SPI_CR1_MASRX BIT(8) +#define STM32H7_SPI_CR1_CSTART BIT(9) +#define STM32H7_SPI_CR1_CSUSP BIT(10) +#define STM32H7_SPI_CR1_HDDIR BIT(11) +#define STM32H7_SPI_CR1_SSI BIT(12) + +/* STM32H7_SPI_CR2 bit fields */ +#define STM32H7_SPI_CR2_TSIZE_SHIFT 0 +#define STM32H7_SPI_CR2_TSIZE GENMASK(15, 0) + +/* STM32H7_SPI_CFG1 bit fields */ +#define STM32H7_SPI_CFG1_DSIZE_SHIFT 0 +#define STM32H7_SPI_CFG1_DSIZE GENMASK(4, 0) +#define STM32H7_SPI_CFG1_FTHLV_SHIFT 5 +#define STM32H7_SPI_CFG1_FTHLV GENMASK(8, 5) +#define STM32H7_SPI_CFG1_RXDMAEN BIT(14) +#define STM32H7_SPI_CFG1_TXDMAEN BIT(15) +#define STM32H7_SPI_CFG1_MBR_SHIFT 28 +#define STM32H7_SPI_CFG1_MBR GENMASK(30, 28) +#define STM32H7_SPI_CFG1_MBR_MIN 0 +#define STM32H7_SPI_CFG1_MBR_MAX (GENMASK(30, 28) >> 28) + +/* STM32H7_SPI_CFG2 bit fields */ +#define STM32H7_SPI_CFG2_MIDI_SHIFT 4 +#define STM32H7_SPI_CFG2_MIDI GENMASK(7, 4) +#define STM32H7_SPI_CFG2_COMM_SHIFT 17 +#define STM32H7_SPI_CFG2_COMM GENMASK(18, 17) +#define STM32H7_SPI_CFG2_SP_SHIFT 19 +#define STM32H7_SPI_CFG2_SP GENMASK(21, 19) +#define STM32H7_SPI_CFG2_MASTER BIT(22) +#define STM32H7_SPI_CFG2_LSBFRST BIT(23) +#define STM32H7_SPI_CFG2_CPHA BIT(24) +#define STM32H7_SPI_CFG2_CPOL BIT(25) +#define STM32H7_SPI_CFG2_SSM BIT(26) +#define STM32H7_SPI_CFG2_AFCNTR BIT(31) + +/* STM32H7_SPI_IER bit fields */ +#define STM32H7_SPI_IER_RXPIE BIT(0) +#define STM32H7_SPI_IER_TXPIE BIT(1) +#define STM32H7_SPI_IER_DXPIE BIT(2) +#define STM32H7_SPI_IER_EOTIE BIT(3) +#define STM32H7_SPI_IER_TXTFIE BIT(4) +#define STM32H7_SPI_IER_OVRIE BIT(6) +#define STM32H7_SPI_IER_MODFIE BIT(9) +#define STM32H7_SPI_IER_ALL GENMASK(10, 0) + +/* STM32H7_SPI_SR bit fields */ +#define STM32H7_SPI_SR_RXP BIT(0) +#define STM32H7_SPI_SR_TXP BIT(1) +#define STM32H7_SPI_SR_EOT BIT(3) +#define STM32H7_SPI_SR_OVR BIT(6) +#define STM32H7_SPI_SR_MODF BIT(9) +#define STM32H7_SPI_SR_SUSP BIT(11) +#define STM32H7_SPI_SR_RXPLVL_SHIFT 13 +#define STM32H7_SPI_SR_RXPLVL GENMASK(14, 13) +#define STM32H7_SPI_SR_RXWNE BIT(15) + +/* STM32H7_SPI_IFCR bit fields */ +#define STM32H7_SPI_IFCR_ALL GENMASK(11, 3) + +/* STM32H7_SPI_I2SCFGR bit fields */ +#define STM32H7_SPI_I2SCFGR_I2SMOD BIT(0) + +/* STM32H7 SPI Master Baud Rate min/max divisor */ +#define STM32H7_SPI_MBR_DIV_MIN (2 << STM32H7_SPI_CFG1_MBR_MIN) +#define STM32H7_SPI_MBR_DIV_MAX (2 << STM32H7_SPI_CFG1_MBR_MAX) + +/* STM32H7 SPI Communication mode */ +#define STM32H7_SPI_FULL_DUPLEX 0 +#define STM32H7_SPI_SIMPLEX_TX 1 +#define STM32H7_SPI_SIMPLEX_RX 2 +#define STM32H7_SPI_HALF_DUPLEX 3 + +/* SPI Communication type */ #define SPI_FULL_DUPLEX 0 #define SPI_SIMPLEX_TX 1 #define SPI_SIMPLEX_RX 2 -#define SPI_HALF_DUPLEX 3 +#define SPI_3WIRE_TX 3 +#define SPI_3WIRE_RX 4 #define SPI_1HZ_NS 1000000000 +/* + * use PIO for small transfers, avoiding DMA setup/teardown overhead for drivers + * without fifo buffers. + */ +#define SPI_DMA_MIN_BYTES 16 + +/** + * stm32_spi_reg - stm32 SPI register & bitfield desc + * @reg: register offset + * @mask: bitfield mask + * @shift: left shift + */ +struct stm32_spi_reg { + int reg; + int mask; + int shift; +}; + +/** + * stm32_spi_regspec - stm32 registers definition, compatible dependent data + * en: enable register and SPI enable bit + * dma_rx_en: SPI DMA RX enable register end SPI DMA RX enable bit + * dma_tx_en: SPI DMA TX enable register end SPI DMA TX enable bit + * cpol: clock polarity register and polarity bit + * cpha: clock phase register and phase bit + * lsb_first: LSB transmitted first register and bit + * br: baud rate register and bitfields + * rx: SPI RX data register + * tx: SPI TX data register + */ +struct stm32_spi_regspec { + const struct stm32_spi_reg en; + const struct stm32_spi_reg dma_rx_en; + const struct stm32_spi_reg dma_tx_en; + const struct stm32_spi_reg cpol; + const struct stm32_spi_reg cpha; + const struct stm32_spi_reg lsb_first; + const struct stm32_spi_reg br; + const struct stm32_spi_reg rx; + const struct stm32_spi_reg tx; +}; + +struct stm32_spi; + +/** + * stm32_spi_cfg - stm32 compatible configuration data + * @regs: registers descriptions + * @get_fifo_size: routine to get fifo size + * @get_bpw_mask: routine to get bits per word mask + * @disable: routine to disable controller + * @config: routine to configure controller as SPI Master + * @set_bpw: routine to configure registers to for bits per word + * @set_mode: routine to configure registers to desired mode + * @set_data_idleness: optional routine to configure registers to desired idle + * time between frames (if driver has this functionality) + * set_number_of_data: optional routine to configure registers to desired + * number of data (if driver has this functionality) + * @can_dma: routine to determine if the transfer is eligible for DMA use + * @transfer_one_dma_start: routine to start transfer a single spi_transfer + * using DMA + * @dma_rx cb: routine to call after DMA RX channel operation is complete + * @dma_tx cb: routine to call after DMA TX channel operation is complete + * @transfer_one_irq: routine to configure interrupts for driver + * @irq_handler_event: Interrupt handler for SPI controller events + * @irq_handler_thread: thread of interrupt handler for SPI controller + * @baud_rate_div_min: minimum baud rate divisor + * @baud_rate_div_max: maximum baud rate divisor + * @has_fifo: boolean to know if fifo is used for driver + * @has_startbit: boolean to know if start bit is used to start transfer + */ +struct stm32_spi_cfg { + const struct stm32_spi_regspec *regs; + int (*get_fifo_size)(struct stm32_spi *spi); + int (*get_bpw_mask)(struct stm32_spi *spi); + void (*disable)(struct stm32_spi *spi); + int (*config)(struct stm32_spi *spi); + void (*set_bpw)(struct stm32_spi *spi); + int (*set_mode)(struct stm32_spi *spi, unsigned int comm_type); + void (*set_data_idleness)(struct stm32_spi *spi, u32 length); + int (*set_number_of_data)(struct stm32_spi *spi, u32 length); + void (*transfer_one_dma_start)(struct stm32_spi *spi); + void (*dma_rx_cb)(void *data); + void (*dma_tx_cb)(void *data); + int (*transfer_one_irq)(struct stm32_spi *spi); + irqreturn_t (*irq_handler_event)(int irq, void *dev_id); + irqreturn_t (*irq_handler_thread)(int irq, void *dev_id); + unsigned int baud_rate_div_min; + unsigned int baud_rate_div_max; + bool has_fifo; +}; + /** * struct stm32_spi - private data of the SPI controller * @dev: driver model representation of the controller * @master: controller master interface + * @cfg: compatible configuration data * @base: virtual memory area * @clk: hw kernel clock feeding the SPI clock generator * @clk_rate: rate of the hw kernel clock feeding the SPI clock generator @@ -151,6 +290,7 @@ struct stm32_spi { struct device *dev; struct spi_master *master; + const struct stm32_spi_cfg *cfg; void __iomem *base; struct clk *clk; u32 clk_rate; @@ -176,6 +316,40 @@ struct stm32_spi { dma_addr_t phys_addr; }; +static const struct stm32_spi_regspec stm32f4_spi_regspec = { + .en = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_SPE }, + + .dma_rx_en = { STM32F4_SPI_CR2, STM32F4_SPI_CR2_RXDMAEN }, + .dma_tx_en = { STM32F4_SPI_CR2, STM32F4_SPI_CR2_TXDMAEN }, + + .cpol = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_CPOL }, + .cpha = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_CPHA }, + .lsb_first = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_LSBFRST }, + .br = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_BR, STM32F4_SPI_CR1_BR_SHIFT }, + + .rx = { STM32F4_SPI_DR }, + .tx = { STM32F4_SPI_DR }, +}; + +static const struct stm32_spi_regspec stm32h7_spi_regspec = { + /* SPI data transfer is enabled but spi_ker_ck is idle. + * CFG1 and CFG2 registers are write protected when SPE is enabled. + */ + .en = { STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE }, + + .dma_rx_en = { STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_RXDMAEN }, + .dma_tx_en = { STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_TXDMAEN }, + + .cpol = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_CPOL }, + .cpha = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_CPHA }, + .lsb_first = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_LSBFRST }, + .br = { STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_MBR, + STM32H7_SPI_CFG1_MBR_SHIFT }, + + .rx = { STM32H7_SPI_RXDR }, + .tx = { STM32H7_SPI_TXDR }, +}; + static inline void stm32_spi_set_bits(struct stm32_spi *spi, u32 offset, u32 bits) { @@ -191,22 +365,22 @@ static inline void stm32_spi_clr_bits(struct stm32_spi *spi, } /** - * stm32_spi_get_fifo_size - Return fifo size + * stm32h7_spi_get_fifo_size - Return fifo size * @spi: pointer to the spi controller data structure */ -static int stm32_spi_get_fifo_size(struct stm32_spi *spi) +static int stm32h7_spi_get_fifo_size(struct stm32_spi *spi) { unsigned long flags; u32 count = 0; spin_lock_irqsave(&spi->lock, flags); - stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE); + stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE); - while (readl_relaxed(spi->base + STM32_SPI_SR) & SPI_SR_TXP) - writeb_relaxed(++count, spi->base + STM32_SPI_TXDR); + while (readl_relaxed(spi->base + STM32H7_SPI_SR) & STM32H7_SPI_SR_TXP) + writeb_relaxed(++count, spi->base + STM32H7_SPI_TXDR); - stm32_spi_clr_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE); + stm32_spi_clr_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE); spin_unlock_irqrestore(&spi->lock, flags); @@ -216,10 +390,20 @@ static int stm32_spi_get_fifo_size(struct stm32_spi *spi) } /** - * stm32_spi_get_bpw_mask - Return bits per word mask + * stm32f4_spi_get_bpw_mask - Return bits per word mask * @spi: pointer to the spi controller data structure */ -static int stm32_spi_get_bpw_mask(struct stm32_spi *spi) +static int stm32f4_spi_get_bpw_mask(struct stm32_spi *spi) +{ + dev_dbg(spi->dev, "8-bit or 16-bit data frame supported\n"); + return SPI_BPW_MASK(8) | SPI_BPW_MASK(16); +} + +/** + * stm32h7_spi_get_bpw_mask - Return bits per word mask + * @spi: pointer to the spi controller data structure + */ +static int stm32h7_spi_get_bpw_mask(struct stm32_spi *spi) { unsigned long flags; u32 cfg1, max_bpw; @@ -230,10 +414,11 @@ static int stm32_spi_get_bpw_mask(struct stm32_spi *spi) * The most significant bit at DSIZE bit field is reserved when the * maximum data size of periperal instances is limited to 16-bit */ - stm32_spi_set_bits(spi, STM32_SPI_CFG1, SPI_CFG1_DSIZE); + stm32_spi_set_bits(spi, STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_DSIZE); - cfg1 = readl_relaxed(spi->base + STM32_SPI_CFG1); - max_bpw = (cfg1 & SPI_CFG1_DSIZE) >> SPI_CFG1_DSIZE_SHIFT; + cfg1 = readl_relaxed(spi->base + STM32H7_SPI_CFG1); + max_bpw = (cfg1 & STM32H7_SPI_CFG1_DSIZE) >> + STM32H7_SPI_CFG1_DSIZE_SHIFT; max_bpw += 1; spin_unlock_irqrestore(&spi->lock, flags); @@ -244,13 +429,16 @@ static int stm32_spi_get_bpw_mask(struct stm32_spi *spi) } /** - * stm32_spi_prepare_mbr - Determine SPI_CFG1.MBR value + * stm32_spi_prepare_mbr - Determine baud rate divisor value * @spi: pointer to the spi controller data structure * @speed_hz: requested speed + * @min_div: minimum baud rate divisor + * @max_div: maximum baud rate divisor * - * Return SPI_CFG1.MBR value in case of success or -EINVAL + * Return baud rate divisor value in case of success or -EINVAL */ -static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz) +static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz, + u32 min_div, u32 max_div) { u32 div, mbrdiv; @@ -263,8 +451,7 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz) * no need to check it there. * However, we need to ensure the following calculations. */ - if (div < SPI_MBR_DIV_MIN || - div > SPI_MBR_DIV_MAX) + if ((div < min_div) || (div > max_div)) return -EINVAL; /* Determine the first power of 2 greater than or equal to div */ @@ -279,10 +466,10 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz) } /** - * stm32_spi_prepare_fthlv - Determine FIFO threshold level + * stm32h7_spi_prepare_fthlv - Determine FIFO threshold level * @spi: pointer to the spi controller data structure */ -static u32 stm32_spi_prepare_fthlv(struct stm32_spi *spi) +static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi) { u32 fthlv, half_fifo; @@ -306,32 +493,62 @@ static u32 stm32_spi_prepare_fthlv(struct stm32_spi *spi) } /** - * stm32_spi_write_txfifo - Write bytes in Transmit Data Register + * stm32f4_spi_write_tx - Write bytes to Transmit Data Register * @spi: pointer to the spi controller data structure * * Read from tx_buf depends on remaining bytes to avoid to read beyond * tx_buf end. */ -static void stm32_spi_write_txfifo(struct stm32_spi *spi) +static void stm32f4_spi_write_tx(struct stm32_spi *spi) +{ + if ((spi->tx_len > 0) && (readl_relaxed(spi->base + STM32F4_SPI_SR) & + STM32F4_SPI_SR_TXE)) { + u32 offs = spi->cur_xferlen - spi->tx_len; + + if (spi->cur_bpw == 16) { + const u16 *tx_buf16 = (const u16 *)(spi->tx_buf + offs); + + writew_relaxed(*tx_buf16, spi->base + STM32F4_SPI_DR); + spi->tx_len -= sizeof(u16); + } else { + const u8 *tx_buf8 = (const u8 *)(spi->tx_buf + offs); + + writeb_relaxed(*tx_buf8, spi->base + STM32F4_SPI_DR); + spi->tx_len -= sizeof(u8); + } + } + + dev_dbg(spi->dev, "%s: %d bytes left\n", __func__, spi->tx_len); +} + +/** + * stm32h7_spi_write_txfifo - Write bytes in Transmit Data Register + * @spi: pointer to the spi controller data structure + * + * Read from tx_buf depends on remaining bytes to avoid to read beyond + * tx_buf end. + */ +static void stm32h7_spi_write_txfifo(struct stm32_spi *spi) { while ((spi->tx_len > 0) && - (readl_relaxed(spi->base + STM32_SPI_SR) & SPI_SR_TXP)) { + (readl_relaxed(spi->base + STM32H7_SPI_SR) & + STM32H7_SPI_SR_TXP)) { u32 offs = spi->cur_xferlen - spi->tx_len; if (spi->tx_len >= sizeof(u32)) { const u32 *tx_buf32 = (const u32 *)(spi->tx_buf + offs); - writel_relaxed(*tx_buf32, spi->base + STM32_SPI_TXDR); + writel_relaxed(*tx_buf32, spi->base + STM32H7_SPI_TXDR); spi->tx_len -= sizeof(u32); } else if (spi->tx_len >= sizeof(u16)) { const u16 *tx_buf16 = (const u16 *)(spi->tx_buf + offs); - writew_relaxed(*tx_buf16, spi->base + STM32_SPI_TXDR); + writew_relaxed(*tx_buf16, spi->base + STM32H7_SPI_TXDR); spi->tx_len -= sizeof(u16); } else { const u8 *tx_buf8 = (const u8 *)(spi->tx_buf + offs); - writeb_relaxed(*tx_buf8, spi->base + STM32_SPI_TXDR); + writeb_relaxed(*tx_buf8, spi->base + STM32H7_SPI_TXDR); spi->tx_len -= sizeof(u8); } } @@ -340,43 +557,74 @@ static void stm32_spi_write_txfifo(struct stm32_spi *spi) } /** - * stm32_spi_read_rxfifo - Read bytes in Receive Data Register + * stm32f4_spi_read_rx - Read bytes from Receive Data Register + * @spi: pointer to the spi controller data structure + * + * Write in rx_buf depends on remaining bytes to avoid to write beyond + * rx_buf end. + */ +static void stm32f4_spi_read_rx(struct stm32_spi *spi) +{ + if ((spi->rx_len > 0) && (readl_relaxed(spi->base + STM32F4_SPI_SR) & + STM32F4_SPI_SR_RXNE)) { + u32 offs = spi->cur_xferlen - spi->rx_len; + + if (spi->cur_bpw == 16) { + u16 *rx_buf16 = (u16 *)(spi->rx_buf + offs); + + *rx_buf16 = readw_relaxed(spi->base + STM32F4_SPI_DR); + spi->rx_len -= sizeof(u16); + } else { + u8 *rx_buf8 = (u8 *)(spi->rx_buf + offs); + + *rx_buf8 = readb_relaxed(spi->base + STM32F4_SPI_DR); + spi->rx_len -= sizeof(u8); + } + } + + dev_dbg(spi->dev, "%s: %d bytes left\n", __func__, spi->rx_len); +} + +/** + * stm32h7_spi_read_rxfifo - Read bytes in Receive Data Register * @spi: pointer to the spi controller data structure * * Write in rx_buf depends on remaining bytes to avoid to write beyond * rx_buf end. */ -static void stm32_spi_read_rxfifo(struct stm32_spi *spi, bool flush) +static void stm32h7_spi_read_rxfifo(struct stm32_spi *spi, bool flush) { - u32 sr = readl_relaxed(spi->base + STM32_SPI_SR); - u32 rxplvl = (sr & SPI_SR_RXPLVL) >> SPI_SR_RXPLVL_SHIFT; + u32 sr = readl_relaxed(spi->base + STM32H7_SPI_SR); + u32 rxplvl = (sr & STM32H7_SPI_SR_RXPLVL) >> + STM32H7_SPI_SR_RXPLVL_SHIFT; while ((spi->rx_len > 0) && - ((sr & SPI_SR_RXP) || - (flush && ((sr & SPI_SR_RXWNE) || (rxplvl > 0))))) { + ((sr & STM32H7_SPI_SR_RXP) || + (flush && ((sr & STM32H7_SPI_SR_RXWNE) || (rxplvl > 0))))) { u32 offs = spi->cur_xferlen - spi->rx_len; if ((spi->rx_len >= sizeof(u32)) || - (flush && (sr & SPI_SR_RXWNE))) { + (flush && (sr & STM32H7_SPI_SR_RXWNE))) { u32 *rx_buf32 = (u32 *)(spi->rx_buf + offs); - *rx_buf32 = readl_relaxed(spi->base + STM32_SPI_RXDR); + *rx_buf32 = readl_relaxed(spi->base + STM32H7_SPI_RXDR); spi->rx_len -= sizeof(u32); } else if ((spi->rx_len >= sizeof(u16)) || (flush && (rxplvl >= 2 || spi->cur_bpw > 8))) { u16 *rx_buf16 = (u16 *)(spi->rx_buf + offs); - *rx_buf16 = readw_relaxed(spi->base + STM32_SPI_RXDR); + *rx_buf16 = readw_relaxed(spi->base + STM32H7_SPI_RXDR); spi->rx_len -= sizeof(u16); } else { u8 *rx_buf8 = (u8 *)(spi->rx_buf + offs); - *rx_buf8 = readb_relaxed(spi->base + STM32_SPI_RXDR); + *rx_buf8 = readb_relaxed(spi->base + STM32H7_SPI_RXDR); spi->rx_len -= sizeof(u8); } - sr = readl_relaxed(spi->base + STM32_SPI_SR); - rxplvl = (sr & SPI_SR_RXPLVL) >> SPI_SR_RXPLVL_SHIFT; + sr = readl_relaxed(spi->base + STM32H7_SPI_SR); + rxplvl = (sr & STM32H7_SPI_SR_RXPLVL) >> + STM32H7_SPI_SR_RXPLVL_SHIFT; } dev_dbg(spi->dev, "%s%s: %d bytes left\n", __func__, @@ -386,26 +634,76 @@ static void stm32_spi_read_rxfifo(struct stm32_spi *spi, bool flush) /** * stm32_spi_enable - Enable SPI controller * @spi: pointer to the spi controller data structure - * - * SPI data transfer is enabled but spi_ker_ck is idle. - * SPI_CFG1 and SPI_CFG2 are now write protected. */ static void stm32_spi_enable(struct stm32_spi *spi) { dev_dbg(spi->dev, "enable controller\n"); - stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE); + stm32_spi_set_bits(spi, spi->cfg->regs->en.reg, + spi->cfg->regs->en.mask); } /** - * stm32_spi_disable - Disable SPI controller + * stm32f4_spi_disable - Disable SPI controller + * @spi: pointer to the spi controller data structure + */ +static void stm32f4_spi_disable(struct stm32_spi *spi) +{ + unsigned long flags; + u32 sr; + + dev_dbg(spi->dev, "disable controller\n"); + + spin_lock_irqsave(&spi->lock, flags); + + if (!(readl_relaxed(spi->base + STM32F4_SPI_CR1) & + STM32F4_SPI_CR1_SPE)) { + spin_unlock_irqrestore(&spi->lock, flags); + return; + } + + /* Disable interrupts */ + stm32_spi_clr_bits(spi, STM32F4_SPI_CR2, STM32F4_SPI_CR2_TXEIE | + STM32F4_SPI_CR2_RXNEIE | + STM32F4_SPI_CR2_ERRIE); + + /* Wait until BSY = 0 */ + if (readl_relaxed_poll_timeout_atomic(spi->base + STM32F4_SPI_SR, + sr, !(sr & STM32F4_SPI_SR_BSY), + 10, 100000) < 0) { + dev_warn(spi->dev, "disabling condition timeout\n"); + } + + if (spi->cur_usedma && spi->dma_tx) + dmaengine_terminate_all(spi->dma_tx); + if (spi->cur_usedma && spi->dma_rx) + dmaengine_terminate_all(spi->dma_rx); + + stm32_spi_clr_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_SPE); + + stm32_spi_clr_bits(spi, STM32F4_SPI_CR2, STM32F4_SPI_CR2_TXDMAEN | + STM32F4_SPI_CR2_RXDMAEN); + + /* Sequence to clear OVR flag */ + readl_relaxed(spi->base + STM32F4_SPI_DR); + readl_relaxed(spi->base + STM32F4_SPI_SR); + + spin_unlock_irqrestore(&spi->lock, flags); +} + +/** + * stm32h7_spi_disable - Disable SPI controller * @spi: pointer to the spi controller data structure * * RX-Fifo is flushed when SPI controller is disabled. To prevent any data - * loss, use stm32_spi_read_rxfifo(flush) to read the remaining bytes in + * loss, use stm32h7_spi_read_rxfifo(flush) to read the remaining bytes in * RX-Fifo. + * Normally, if TSIZE has been configured, we should relax the hardware at the + * reception of the EOT interrupt. But in case of error, EOT will not be + * raised. So the subsystem unprepare_message call allows us to properly + * complete the transfer from an hardware point of view. */ -static void stm32_spi_disable(struct stm32_spi *spi) +static void stm32h7_spi_disable(struct stm32_spi *spi) { unsigned long flags; u32 cr1, sr; @@ -414,23 +712,23 @@ static void stm32_spi_disable(struct stm32_spi *spi) spin_lock_irqsave(&spi->lock, flags); - cr1 = readl_relaxed(spi->base + STM32_SPI_CR1); + cr1 = readl_relaxed(spi->base + STM32H7_SPI_CR1); - if (!(cr1 & SPI_CR1_SPE)) { + if (!(cr1 & STM32H7_SPI_CR1_SPE)) { spin_unlock_irqrestore(&spi->lock, flags); return; } /* Wait on EOT or suspend the flow */ - if (readl_relaxed_poll_timeout_atomic(spi->base + STM32_SPI_SR, - sr, !(sr & SPI_SR_EOT), + if (readl_relaxed_poll_timeout_atomic(spi->base + STM32H7_SPI_SR, + sr, !(sr & STM32H7_SPI_SR_EOT), 10, 100000) < 0) { - if (cr1 & SPI_CR1_CSTART) { - writel_relaxed(cr1 | SPI_CR1_CSUSP, - spi->base + STM32_SPI_CR1); + if (cr1 & STM32H7_SPI_CR1_CSTART) { + writel_relaxed(cr1 | STM32H7_SPI_CR1_CSUSP, + spi->base + STM32H7_SPI_CR1); if (readl_relaxed_poll_timeout_atomic( - spi->base + STM32_SPI_SR, - sr, !(sr & SPI_SR_SUSP), + spi->base + STM32H7_SPI_SR, + sr, !(sr & STM32H7_SPI_SR_SUSP), 10, 100000) < 0) dev_warn(spi->dev, "Suspend request timeout\n"); @@ -438,21 +736,21 @@ static void stm32_spi_disable(struct stm32_spi *spi) } if (!spi->cur_usedma && spi->rx_buf && (spi->rx_len > 0)) - stm32_spi_read_rxfifo(spi, true); + stm32h7_spi_read_rxfifo(spi, true); - if (spi->cur_usedma && spi->tx_buf) + if (spi->cur_usedma && spi->dma_tx) dmaengine_terminate_all(spi->dma_tx); - if (spi->cur_usedma && spi->rx_buf) + if (spi->cur_usedma && spi->dma_rx) dmaengine_terminate_all(spi->dma_rx); - stm32_spi_clr_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE); + stm32_spi_clr_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE); - stm32_spi_clr_bits(spi, STM32_SPI_CFG1, SPI_CFG1_TXDMAEN | - SPI_CFG1_RXDMAEN); + stm32_spi_clr_bits(spi, STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_TXDMAEN | + STM32H7_SPI_CFG1_RXDMAEN); /* Disable interrupts and clear status flags */ - writel_relaxed(0, spi->base + STM32_SPI_IER); - writel_relaxed(SPI_IFCR_ALL, spi->base + STM32_SPI_IFCR); + writel_relaxed(0, spi->base + STM32H7_SPI_IER); + writel_relaxed(STM32H7_SPI_IFCR_ALL, spi->base + STM32H7_SPI_IFCR); spin_unlock_irqrestore(&spi->lock, flags); } @@ -460,26 +758,136 @@ static void stm32_spi_disable(struct stm32_spi *spi) /** * stm32_spi_can_dma - Determine if the transfer is eligible for DMA use * - * If the current transfer size is greater than fifo size, use DMA. + * If driver has fifo and the current transfer size is greater than fifo size, + * use DMA. Otherwise use DMA for transfer longer than defined DMA min bytes. */ static bool stm32_spi_can_dma(struct spi_master *master, struct spi_device *spi_dev, struct spi_transfer *transfer) { + unsigned int dma_size; struct stm32_spi *spi = spi_master_get_devdata(master); + if (spi->cfg->has_fifo) + dma_size = spi->fifo_size; + else + dma_size = SPI_DMA_MIN_BYTES; + dev_dbg(spi->dev, "%s: %s\n", __func__, - (transfer->len > spi->fifo_size) ? "true" : "false"); + (transfer->len > dma_size) ? "true" : "false"); + + return (transfer->len > dma_size); +} + +/** + * stm32f4_spi_irq_event - Interrupt handler for SPI controller events + * @irq: interrupt line + * @dev_id: SPI controller master interface + */ +static irqreturn_t stm32f4_spi_irq_event(int irq, void *dev_id) +{ + struct spi_master *master = dev_id; + struct stm32_spi *spi = spi_master_get_devdata(master); + u32 sr, mask = 0; + unsigned long flags; + bool end = false; + + spin_lock_irqsave(&spi->lock, flags); + + sr = readl_relaxed(spi->base + STM32F4_SPI_SR); + /* + * BSY flag is not handled in interrupt but it is normal behavior when + * this flag is set. + */ + sr &= ~STM32F4_SPI_SR_BSY; + + if (!spi->cur_usedma && (spi->cur_comm == SPI_SIMPLEX_TX || + spi->cur_comm == SPI_3WIRE_TX)) { + /* OVR flag shouldn't be handled for TX only mode */ + sr &= ~STM32F4_SPI_SR_OVR | STM32F4_SPI_SR_RXNE; + mask |= STM32F4_SPI_SR_TXE; + } + + if (!spi->cur_usedma && spi->cur_comm == SPI_FULL_DUPLEX) { + /* TXE flag is set and is handled when RXNE flag occurs */ + sr &= ~STM32F4_SPI_SR_TXE; + mask |= STM32F4_SPI_SR_RXNE | STM32F4_SPI_SR_OVR; + } + + if (!(sr & mask)) { + dev_dbg(spi->dev, "spurious IT (sr=0x%08x)\n", sr); + spin_unlock_irqrestore(&spi->lock, flags); + return IRQ_NONE; + } + + if (sr & STM32F4_SPI_SR_OVR) { + dev_warn(spi->dev, "Overrun: received value discarded\n"); + + /* Sequence to clear OVR flag */ + readl_relaxed(spi->base + STM32F4_SPI_DR); + readl_relaxed(spi->base + STM32F4_SPI_SR); + + /* + * If overrun is detected, it means that something went wrong, + * so stop the current transfer. Transfer can wait for next + * RXNE but DR is already read and end never happens. + */ + end = true; + goto end_irq; + } + + if (sr & STM32F4_SPI_SR_TXE) { + if (spi->tx_buf) + stm32f4_spi_write_tx(spi); + if (spi->tx_len == 0) + end = true; + } + + if (sr & STM32F4_SPI_SR_RXNE) { + stm32f4_spi_read_rx(spi); + if (spi->rx_len == 0) + end = true; + else /* Load data for discontinuous mode */ + stm32f4_spi_write_tx(spi); + } + +end_irq: + if (end) { + /* Immediately disable interrupts to do not generate new one */ + stm32_spi_clr_bits(spi, STM32F4_SPI_CR2, + STM32F4_SPI_CR2_TXEIE | + STM32F4_SPI_CR2_RXNEIE | + STM32F4_SPI_CR2_ERRIE); + spin_unlock_irqrestore(&spi->lock, flags); + return IRQ_WAKE_THREAD; + } + + spin_unlock_irqrestore(&spi->lock, flags); + return IRQ_HANDLED; +} + +/** + * stm32f4_spi_irq_thread - Thread of interrupt handler for SPI controller + * @irq: interrupt line + * @dev_id: SPI controller master interface + */ +static irqreturn_t stm32f4_spi_irq_thread(int irq, void *dev_id) +{ + struct spi_master *master = dev_id; + struct stm32_spi *spi = spi_master_get_devdata(master); + + spi_finalize_current_transfer(master); + stm32f4_spi_disable(spi); - return (transfer->len > spi->fifo_size); + return IRQ_HANDLED; } /** - * stm32_spi_irq - Interrupt handler for SPI controller events + * stm32h7_spi_irq_thread - Thread of interrupt handler for SPI controller * @irq: interrupt line * @dev_id: SPI controller master interface */ -static irqreturn_t stm32_spi_irq(int irq, void *dev_id) +static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id) { struct spi_master *master = dev_id; struct stm32_spi *spi = spi_master_get_devdata(master); @@ -489,19 +897,19 @@ static irqreturn_t stm32_spi_irq(int irq, void *dev_id) spin_lock_irqsave(&spi->lock, flags); - sr = readl_relaxed(spi->base + STM32_SPI_SR); - ier = readl_relaxed(spi->base + STM32_SPI_IER); + sr = readl_relaxed(spi->base + STM32H7_SPI_SR); + ier = readl_relaxed(spi->base + STM32H7_SPI_IER); mask = ier; /* EOTIE is triggered on EOT, SUSP and TXC events. */ - mask |= SPI_SR_SUSP; + mask |= STM32H7_SPI_SR_SUSP; /* * When TXTF is set, DXPIE and TXPIE are cleared. So in case of * Full-Duplex, need to poll RXP event to know if there are remaining * data, before disabling SPI. */ if (spi->rx_buf && !spi->cur_usedma) - mask |= SPI_SR_RXP; + mask |= STM32H7_SPI_SR_RXP; if (!(sr & mask)) { dev_dbg(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n", @@ -510,10 +918,10 @@ static irqreturn_t stm32_spi_irq(int irq, void *dev_id) return IRQ_NONE; } - if (sr & SPI_SR_SUSP) { + if (sr & STM32H7_SPI_SR_SUSP) { dev_warn(spi->dev, "Communication suspended\n"); if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0))) - stm32_spi_read_rxfifo(spi, false); + stm32h7_spi_read_rxfifo(spi, false); /* * If communication is suspended while using DMA, it means * that something went wrong, so stop the current transfer @@ -522,15 +930,15 @@ static irqreturn_t stm32_spi_irq(int irq, void *dev_id) end = true; } - if (sr & SPI_SR_MODF) { + if (sr & STM32H7_SPI_SR_MODF) { dev_warn(spi->dev, "Mode fault: transfer aborted\n"); end = true; } - if (sr & SPI_SR_OVR) { + if (sr & STM32H7_SPI_SR_OVR) { dev_warn(spi->dev, "Overrun: received value discarded\n"); if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0))) - stm32_spi_read_rxfifo(spi, false); + stm32h7_spi_read_rxfifo(spi, false); /* * If overrun is detected while using DMA, it means that * something went wrong, so stop the current transfer @@ -539,27 +947,27 @@ static irqreturn_t stm32_spi_irq(int irq, void *dev_id) end = true; } - if (sr & SPI_SR_EOT) { + if (sr & STM32H7_SPI_SR_EOT) { if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0))) - stm32_spi_read_rxfifo(spi, true); + stm32h7_spi_read_rxfifo(spi, true); end = true; } - if (sr & SPI_SR_TXP) + if (sr & STM32H7_SPI_SR_TXP) if (!spi->cur_usedma && (spi->tx_buf && (spi->tx_len > 0))) - stm32_spi_write_txfifo(spi); + stm32h7_spi_write_txfifo(spi); - if (sr & SPI_SR_RXP) + if (sr & STM32H7_SPI_SR_RXP) if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0))) - stm32_spi_read_rxfifo(spi, false); + stm32h7_spi_read_rxfifo(spi, false); - writel_relaxed(mask, spi->base + STM32_SPI_IFCR); + writel_relaxed(mask, spi->base + STM32H7_SPI_IFCR); spin_unlock_irqrestore(&spi->lock, flags); if (end) { spi_finalize_current_transfer(master); - stm32_spi_disable(spi); + stm32h7_spi_disable(spi); } return IRQ_HANDLED; @@ -598,7 +1006,7 @@ static int stm32_spi_prepare_msg(struct spi_master *master, struct spi_device *spi_dev = msg->spi; struct device_node *np = spi_dev->dev.of_node; unsigned long flags; - u32 cfg2_clrb = 0, cfg2_setb = 0; + u32 clrb = 0, setb = 0; /* SPI slave device may need time between data frames */ spi->cur_midi = 0; @@ -606,19 +1014,19 @@ static int stm32_spi_prepare_msg(struct spi_master *master, dev_dbg(spi->dev, "%dns inter-data idleness\n", spi->cur_midi); if (spi_dev->mode & SPI_CPOL) - cfg2_setb |= SPI_CFG2_CPOL; + setb |= spi->cfg->regs->cpol.mask; else - cfg2_clrb |= SPI_CFG2_CPOL; + clrb |= spi->cfg->regs->cpol.mask; if (spi_dev->mode & SPI_CPHA) - cfg2_setb |= SPI_CFG2_CPHA; + setb |= spi->cfg->regs->cpha.mask; else - cfg2_clrb |= SPI_CFG2_CPHA; + clrb |= spi->cfg->regs->cpha.mask; if (spi_dev->mode & SPI_LSB_FIRST) - cfg2_setb |= SPI_CFG2_LSBFRST; + setb |= spi->cfg->regs->lsb_first.mask; else - cfg2_clrb |= SPI_CFG2_LSBFRST; + clrb |= spi->cfg->regs->lsb_first.mask; dev_dbg(spi->dev, "cpol=%d cpha=%d lsb_first=%d cs_high=%d\n", spi_dev->mode & SPI_CPOL, @@ -628,11 +1036,12 @@ static int stm32_spi_prepare_msg(struct spi_master *master, spin_lock_irqsave(&spi->lock, flags); - if (cfg2_clrb || cfg2_setb) + /* CPOL, CPHA and LSB FIRST bits have common register */ + if (clrb || setb) writel_relaxed( - (readl_relaxed(spi->base + STM32_SPI_CFG2) & - ~cfg2_clrb) | cfg2_setb, - spi->base + STM32_SPI_CFG2); + (readl_relaxed(spi->base + spi->cfg->regs->cpol.reg) & + ~clrb) | setb, + spi->base + spi->cfg->regs->cpol.reg); spin_unlock_irqrestore(&spi->lock, flags); @@ -640,12 +1049,40 @@ static int stm32_spi_prepare_msg(struct spi_master *master, } /** - * stm32_spi_dma_cb - dma callback + * stm32f4_spi_dma_tx_cb - dma callback + * + * DMA callback is called when the transfer is complete for DMA TX channel. + */ +static void stm32f4_spi_dma_tx_cb(void *data) +{ + struct stm32_spi *spi = data; + + if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) { + spi_finalize_current_transfer(spi->master); + stm32f4_spi_disable(spi); + } +} + +/** + * stm32f4_spi_dma_rx_cb - dma callback + * + * DMA callback is called when the transfer is complete for DMA RX channel. + */ +static void stm32f4_spi_dma_rx_cb(void *data) +{ + struct stm32_spi *spi = data; + + spi_finalize_current_transfer(spi->master); + stm32f4_spi_disable(spi); +} + +/** + * stm32h7_spi_dma_cb - dma callback * * DMA callback is called when the transfer is complete or when an error * occurs. If the transfer is complete, EOT flag is raised. */ -static void stm32_spi_dma_cb(void *data) +static void stm32h7_spi_dma_cb(void *data) { struct stm32_spi *spi = data; unsigned long flags; @@ -653,11 +1090,11 @@ static void stm32_spi_dma_cb(void *data) spin_lock_irqsave(&spi->lock, flags); - sr = readl_relaxed(spi->base + STM32_SPI_SR); + sr = readl_relaxed(spi->base + STM32H7_SPI_SR); spin_unlock_irqrestore(&spi->lock, flags); - if (!(sr & SPI_SR_EOT)) + if (!(sr & STM32H7_SPI_SR_EOT)) dev_warn(spi->dev, "DMA error (sr=0x%08x)\n", sr); /* Now wait for EOT, or SUSP or OVR in case of error */ @@ -681,23 +1118,27 @@ static void stm32_spi_dma_config(struct stm32_spi *spi, else buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES; - /* Valid for DMA Half or Full Fifo threshold */ - if (spi->cur_fthlv == 2) + if (spi->cfg->has_fifo) { + /* Valid for DMA Half or Full Fifo threshold */ + if (spi->cur_fthlv == 2) + maxburst = 1; + else + maxburst = spi->cur_fthlv; + } else { maxburst = 1; - else - maxburst = spi->cur_fthlv; + } memset(dma_conf, 0, sizeof(struct dma_slave_config)); dma_conf->direction = dir; if (dma_conf->direction == DMA_DEV_TO_MEM) { /* RX */ - dma_conf->src_addr = spi->phys_addr + STM32_SPI_RXDR; + dma_conf->src_addr = spi->phys_addr + spi->cfg->regs->rx.reg; dma_conf->src_addr_width = buswidth; dma_conf->src_maxburst = maxburst; dev_dbg(spi->dev, "Rx DMA config buswidth=%d, maxburst=%d\n", buswidth, maxburst); } else if (dma_conf->direction == DMA_MEM_TO_DEV) { /* TX */ - dma_conf->dst_addr = spi->phys_addr + STM32_SPI_TXDR; + dma_conf->dst_addr = spi->phys_addr + spi->cfg->regs->tx.reg; dma_conf->dst_addr_width = buswidth; dma_conf->dst_maxburst = maxburst; @@ -707,27 +1148,68 @@ static void stm32_spi_dma_config(struct stm32_spi *spi, } /** - * stm32_spi_transfer_one_irq - transfer a single spi_transfer using - * interrupts + * stm32f4_spi_transfer_one_irq - transfer a single spi_transfer using + * interrupts * * It must returns 0 if the transfer is finished or 1 if the transfer is still * in progress. */ -static int stm32_spi_transfer_one_irq(struct stm32_spi *spi) +static int stm32f4_spi_transfer_one_irq(struct stm32_spi *spi) +{ + unsigned long flags; + u32 cr2 = 0; + + /* Enable the interrupts relative to the current communication mode */ + if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) { + cr2 |= STM32F4_SPI_CR2_TXEIE; + } else if (spi->cur_comm == SPI_FULL_DUPLEX) { + /* In transmit-only mode, the OVR flag is set in the SR register + * since the received data are never read. Therefore set OVR + * interrupt only when rx buffer is available. + */ + cr2 |= STM32F4_SPI_CR2_RXNEIE | STM32F4_SPI_CR2_ERRIE; + } else { + return -EINVAL; + } + + spin_lock_irqsave(&spi->lock, flags); + + stm32_spi_set_bits(spi, STM32F4_SPI_CR2, cr2); + + stm32_spi_enable(spi); + + /* starting data transfer when buffer is loaded */ + if (spi->tx_buf) + stm32f4_spi_write_tx(spi); + + spin_unlock_irqrestore(&spi->lock, flags); + + return 1; +} + +/** + * stm32h7_spi_transfer_one_irq - transfer a single spi_transfer using + * interrupts + * + * It must returns 0 if the transfer is finished or 1 if the transfer is still + * in progress. + */ +static int stm32h7_spi_transfer_one_irq(struct stm32_spi *spi) { unsigned long flags; u32 ier = 0; /* Enable the interrupts relative to the current communication mode */ if (spi->tx_buf && spi->rx_buf) /* Full Duplex */ - ier |= SPI_IER_DXPIE; + ier |= STM32H7_SPI_IER_DXPIE; else if (spi->tx_buf) /* Half-Duplex TX dir or Simplex TX */ - ier |= SPI_IER_TXPIE; + ier |= STM32H7_SPI_IER_TXPIE; else if (spi->rx_buf) /* Half-Duplex RX dir or Simplex RX */ - ier |= SPI_IER_RXPIE; + ier |= STM32H7_SPI_IER_RXPIE; /* Enable the interrupts relative to the end of transfer */ - ier |= SPI_IER_EOTIE | SPI_IER_TXTFIE | SPI_IER_OVRIE | SPI_IER_MODFIE; + ier |= STM32H7_SPI_IER_EOTIE | STM32H7_SPI_IER_TXTFIE | + STM32H7_SPI_IER_OVRIE | STM32H7_SPI_IER_MODFIE; spin_lock_irqsave(&spi->lock, flags); @@ -735,17 +1217,54 @@ static int stm32_spi_transfer_one_irq(struct stm32_spi *spi) /* Be sure to have data in fifo before starting data transfer */ if (spi->tx_buf) - stm32_spi_write_txfifo(spi); + stm32h7_spi_write_txfifo(spi); - stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_CSTART); + stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART); - writel_relaxed(ier, spi->base + STM32_SPI_IER); + writel_relaxed(ier, spi->base + STM32H7_SPI_IER); spin_unlock_irqrestore(&spi->lock, flags); return 1; } +/** + * stm32f4_spi_transfer_one_dma_start - Set SPI driver registers to start + * transfer using DMA + */ +static void stm32f4_spi_transfer_one_dma_start(struct stm32_spi *spi) +{ + /* In DMA mode end of transfer is handled by DMA TX or RX callback. */ + if (spi->cur_comm == SPI_SIMPLEX_RX || spi->cur_comm == SPI_3WIRE_RX || + spi->cur_comm == SPI_FULL_DUPLEX) { + /* + * In transmit-only mode, the OVR flag is set in the SR register + * since the received data are never read. Therefore set OVR + * interrupt only when rx buffer is available. + */ + stm32_spi_set_bits(spi, STM32F4_SPI_CR2, STM32F4_SPI_CR2_ERRIE); + } + + stm32_spi_enable(spi); +} + +/** + * stm32h7_spi_transfer_one_dma_start - Set SPI driver registers to start + * transfer using DMA + */ +static void stm32h7_spi_transfer_one_dma_start(struct stm32_spi *spi) +{ + /* Enable the interrupts relative to the end of transfer */ + stm32_spi_set_bits(spi, STM32H7_SPI_IER, STM32H7_SPI_IER_EOTIE | + STM32H7_SPI_IER_TXTFIE | + STM32H7_SPI_IER_OVRIE | + STM32H7_SPI_IER_MODFIE); + + stm32_spi_enable(spi); + + stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART); +} + /** * stm32_spi_transfer_one_dma - transfer a single spi_transfer using DMA * @@ -758,17 +1277,17 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi, struct dma_slave_config tx_dma_conf, rx_dma_conf; struct dma_async_tx_descriptor *tx_dma_desc, *rx_dma_desc; unsigned long flags; - u32 ier = 0; spin_lock_irqsave(&spi->lock, flags); rx_dma_desc = NULL; - if (spi->rx_buf) { + if (spi->rx_buf && spi->dma_rx) { stm32_spi_dma_config(spi, &rx_dma_conf, DMA_DEV_TO_MEM); dmaengine_slave_config(spi->dma_rx, &rx_dma_conf); /* Enable Rx DMA request */ - stm32_spi_set_bits(spi, STM32_SPI_CFG1, SPI_CFG1_RXDMAEN); + stm32_spi_set_bits(spi, spi->cfg->regs->dma_rx_en.reg, + spi->cfg->regs->dma_rx_en.mask); rx_dma_desc = dmaengine_prep_slave_sg( spi->dma_rx, xfer->rx_sg.sgl, @@ -778,7 +1297,7 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi, } tx_dma_desc = NULL; - if (spi->tx_buf) { + if (spi->tx_buf && spi->dma_tx) { stm32_spi_dma_config(spi, &tx_dma_conf, DMA_MEM_TO_DEV); dmaengine_slave_config(spi->dma_tx, &tx_dma_conf); @@ -789,12 +1308,15 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi, DMA_PREP_INTERRUPT); } - if ((spi->tx_buf && !tx_dma_desc) || - (spi->rx_buf && !rx_dma_desc)) + if ((spi->tx_buf && spi->dma_tx && !tx_dma_desc) || + (spi->rx_buf && spi->dma_rx && !rx_dma_desc)) + goto dma_desc_error; + + if (spi->cur_comm == SPI_FULL_DUPLEX && (!tx_dma_desc || !rx_dma_desc)) goto dma_desc_error; if (rx_dma_desc) { - rx_dma_desc->callback = stm32_spi_dma_cb; + rx_dma_desc->callback = spi->cfg->dma_rx_cb; rx_dma_desc->callback_param = spi; if (dma_submit_error(dmaengine_submit(rx_dma_desc))) { @@ -806,8 +1328,9 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi, } if (tx_dma_desc) { - if (spi->cur_comm == SPI_SIMPLEX_TX) { - tx_dma_desc->callback = stm32_spi_dma_cb; + if (spi->cur_comm == SPI_SIMPLEX_TX || + spi->cur_comm == SPI_3WIRE_TX) { + tx_dma_desc->callback = spi->cfg->dma_tx_cb; tx_dma_desc->callback_param = spi; } @@ -819,130 +1342,278 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi, dma_async_issue_pending(spi->dma_tx); /* Enable Tx DMA request */ - stm32_spi_set_bits(spi, STM32_SPI_CFG1, SPI_CFG1_TXDMAEN); + stm32_spi_set_bits(spi, spi->cfg->regs->dma_tx_en.reg, + spi->cfg->regs->dma_tx_en.mask); } - /* Enable the interrupts relative to the end of transfer */ - ier |= SPI_IER_EOTIE | SPI_IER_TXTFIE | SPI_IER_OVRIE | SPI_IER_MODFIE; - writel_relaxed(ier, spi->base + STM32_SPI_IER); - - stm32_spi_enable(spi); - - stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_CSTART); + spi->cfg->transfer_one_dma_start(spi); spin_unlock_irqrestore(&spi->lock, flags); return 1; dma_submit_error: - if (spi->rx_buf) + if (spi->dma_rx) dmaengine_terminate_all(spi->dma_rx); dma_desc_error: - stm32_spi_clr_bits(spi, STM32_SPI_CFG1, SPI_CFG1_RXDMAEN); + stm32_spi_clr_bits(spi, spi->cfg->regs->dma_rx_en.reg, + spi->cfg->regs->dma_rx_en.mask); spin_unlock_irqrestore(&spi->lock, flags); dev_info(spi->dev, "DMA issue: fall back to irq transfer\n"); - return stm32_spi_transfer_one_irq(spi); + spi->cur_usedma = false; + return spi->cfg->transfer_one_irq(spi); } /** - * stm32_spi_transfer_one_setup - common setup to transfer a single - * spi_transfer either using DMA or - * interrupts. + * stm32f4_spi_set_bpw - Configure bits per word + * @spi: pointer to the spi controller data structure */ -static int stm32_spi_transfer_one_setup(struct stm32_spi *spi, - struct spi_device *spi_dev, - struct spi_transfer *transfer) +static void stm32f4_spi_set_bpw(struct stm32_spi *spi) { - unsigned long flags; - u32 cfg1_clrb = 0, cfg1_setb = 0, cfg2_clrb = 0, cfg2_setb = 0; - u32 mode, nb_words; - int ret = 0; - - spin_lock_irqsave(&spi->lock, flags); + if (spi->cur_bpw == 16) + stm32_spi_set_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_DFF); + else + stm32_spi_clr_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_DFF); +} - if (spi->cur_bpw != transfer->bits_per_word) { - u32 bpw, fthlv; +/** + * stm32h7_spi_set_bpw - configure bits per word + * @spi: pointer to the spi controller data structure + */ +static void stm32h7_spi_set_bpw(struct stm32_spi *spi) +{ + u32 bpw, fthlv; + u32 cfg1_clrb = 0, cfg1_setb = 0; - spi->cur_bpw = transfer->bits_per_word; - bpw = spi->cur_bpw - 1; + bpw = spi->cur_bpw - 1; - cfg1_clrb |= SPI_CFG1_DSIZE; - cfg1_setb |= (bpw << SPI_CFG1_DSIZE_SHIFT) & SPI_CFG1_DSIZE; + cfg1_clrb |= STM32H7_SPI_CFG1_DSIZE; + cfg1_setb |= (bpw << STM32H7_SPI_CFG1_DSIZE_SHIFT) & + STM32H7_SPI_CFG1_DSIZE; - spi->cur_fthlv = stm32_spi_prepare_fthlv(spi); - fthlv = spi->cur_fthlv - 1; + spi->cur_fthlv = stm32h7_spi_prepare_fthlv(spi); + fthlv = spi->cur_fthlv - 1; - cfg1_clrb |= SPI_CFG1_FTHLV; - cfg1_setb |= (fthlv << SPI_CFG1_FTHLV_SHIFT) & SPI_CFG1_FTHLV; - } + cfg1_clrb |= STM32H7_SPI_CFG1_FTHLV; + cfg1_setb |= (fthlv << STM32H7_SPI_CFG1_FTHLV_SHIFT) & + STM32H7_SPI_CFG1_FTHLV; - if (spi->cur_speed != transfer->speed_hz) { - int mbr; + writel_relaxed( + (readl_relaxed(spi->base + STM32H7_SPI_CFG1) & + ~cfg1_clrb) | cfg1_setb, + spi->base + STM32H7_SPI_CFG1); +} - /* Update spi->cur_speed with real clock speed */ - mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz); - if (mbr < 0) { - ret = mbr; - goto out; - } +/** + * stm32_spi_set_mbr - Configure baud rate divisor in master mode + * @spi: pointer to the spi controller data structure + * @mbrdiv: baud rate divisor value + */ +static void stm32_spi_set_mbr(struct stm32_spi *spi, u32 mbrdiv) +{ + u32 clrb = 0, setb = 0; - transfer->speed_hz = spi->cur_speed; + clrb |= spi->cfg->regs->br.mask; + setb |= ((u32)mbrdiv << spi->cfg->regs->br.shift) & + spi->cfg->regs->br.mask; - cfg1_clrb |= SPI_CFG1_MBR; - cfg1_setb |= ((u32)mbr << SPI_CFG1_MBR_SHIFT) & SPI_CFG1_MBR; - } + writel_relaxed((readl_relaxed(spi->base + spi->cfg->regs->br.reg) & + ~clrb) | setb, + spi->base + spi->cfg->regs->br.reg); +} - if (cfg1_clrb || cfg1_setb) - writel_relaxed((readl_relaxed(spi->base + STM32_SPI_CFG1) & - ~cfg1_clrb) | cfg1_setb, - spi->base + STM32_SPI_CFG1); +/** + * stm32_spi_communication_type - return transfer communication type + * @spi_dev: pointer to the spi device + * transfer: pointer to spi transfer + */ +static unsigned int stm32_spi_communication_type(struct spi_device *spi_dev, + struct spi_transfer *transfer) +{ + unsigned int type = SPI_FULL_DUPLEX; - mode = SPI_FULL_DUPLEX; if (spi_dev->mode & SPI_3WIRE) { /* MISO/MOSI signals shared */ /* * SPI_3WIRE and xfer->tx_buf != NULL and xfer->rx_buf != NULL - * is forbidden und unvalidated by SPI subsystem so depending + * is forbidden and unvalidated by SPI subsystem so depending * on the valid buffer, we can determine the direction of the * transfer. */ - mode = SPI_HALF_DUPLEX; if (!transfer->tx_buf) - stm32_spi_clr_bits(spi, STM32_SPI_CR1, SPI_CR1_HDDIR); - else if (!transfer->rx_buf) - stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_HDDIR); + type = SPI_3WIRE_RX; + else + type = SPI_3WIRE_TX; } else { if (!transfer->tx_buf) - mode = SPI_SIMPLEX_RX; + type = SPI_SIMPLEX_RX; else if (!transfer->rx_buf) - mode = SPI_SIMPLEX_TX; + type = SPI_SIMPLEX_TX; + } + + return type; +} + +/** + * stm32f4_spi_set_mode - configure communication mode + * @spi: pointer to the spi controller data structure + * @comm_type: type of communication to configure + */ +static int stm32f4_spi_set_mode(struct stm32_spi *spi, unsigned int comm_type) +{ + if (comm_type == SPI_3WIRE_TX || comm_type == SPI_SIMPLEX_TX) { + stm32_spi_set_bits(spi, STM32F4_SPI_CR1, + STM32F4_SPI_CR1_BIDIMODE | + STM32F4_SPI_CR1_BIDIOE); + } else if (comm_type == SPI_FULL_DUPLEX) { + stm32_spi_clr_bits(spi, STM32F4_SPI_CR1, + STM32F4_SPI_CR1_BIDIMODE | + STM32F4_SPI_CR1_BIDIOE); + } else { + return -EINVAL; } - if (spi->cur_comm != mode) { - spi->cur_comm = mode; - cfg2_clrb |= SPI_CFG2_COMM; - cfg2_setb |= (mode << SPI_CFG2_COMM_SHIFT) & SPI_CFG2_COMM; + return 0; +} + +/** + * stm32h7_spi_set_mode - configure communication mode + * @spi: pointer to the spi controller data structure + * @comm_type: type of communication to configure + */ +static int stm32h7_spi_set_mode(struct stm32_spi *spi, unsigned int comm_type) +{ + u32 mode; + u32 cfg2_clrb = 0, cfg2_setb = 0; + + if (comm_type == SPI_3WIRE_RX) { + mode = STM32H7_SPI_HALF_DUPLEX; + stm32_spi_clr_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_HDDIR); + } else if (comm_type == SPI_3WIRE_TX) { + mode = STM32H7_SPI_HALF_DUPLEX; + stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_HDDIR); + } else if (comm_type == SPI_SIMPLEX_RX) { + mode = STM32H7_SPI_SIMPLEX_RX; + } else if (comm_type == SPI_SIMPLEX_TX) { + mode = STM32H7_SPI_SIMPLEX_TX; + } else { + mode = STM32H7_SPI_FULL_DUPLEX; } - cfg2_clrb |= SPI_CFG2_MIDI; - if ((transfer->len > 1) && (spi->cur_midi > 0)) { + cfg2_clrb |= STM32H7_SPI_CFG2_COMM; + cfg2_setb |= (mode << STM32H7_SPI_CFG2_COMM_SHIFT) & + STM32H7_SPI_CFG2_COMM; + + writel_relaxed( + (readl_relaxed(spi->base + STM32H7_SPI_CFG2) & + ~cfg2_clrb) | cfg2_setb, + spi->base + STM32H7_SPI_CFG2); + + return 0; +} + +/** + * stm32h7_spi_data_idleness - configure minimum time delay inserted between two + * consecutive data frames in master mode + * @spi: pointer to the spi controller data structure + * @len: transfer len + */ +static void stm32h7_spi_data_idleness(struct stm32_spi *spi, u32 len) +{ + u32 cfg2_clrb = 0, cfg2_setb = 0; + + cfg2_clrb |= STM32H7_SPI_CFG2_MIDI; + if ((len > 1) && (spi->cur_midi > 0)) { u32 sck_period_ns = DIV_ROUND_UP(SPI_1HZ_NS, spi->cur_speed); u32 midi = min((u32)DIV_ROUND_UP(spi->cur_midi, sck_period_ns), - (u32)SPI_CFG2_MIDI >> SPI_CFG2_MIDI_SHIFT); + (u32)STM32H7_SPI_CFG2_MIDI >> + STM32H7_SPI_CFG2_MIDI_SHIFT); dev_dbg(spi->dev, "period=%dns, midi=%d(=%dns)\n", sck_period_ns, midi, midi * sck_period_ns); + cfg2_setb |= (midi << STM32H7_SPI_CFG2_MIDI_SHIFT) & + STM32H7_SPI_CFG2_MIDI; + } + + writel_relaxed((readl_relaxed(spi->base + STM32H7_SPI_CFG2) & + ~cfg2_clrb) | cfg2_setb, + spi->base + STM32H7_SPI_CFG2); +} + +/** + * stm32h7_spi_number_of_data - configure number of data at current transfer + * @spi: pointer to the spi controller data structure + * @len: transfer length + */ +static int stm32h7_spi_number_of_data(struct stm32_spi *spi, u32 nb_words) +{ + u32 cr2_clrb = 0, cr2_setb = 0; + + if (nb_words <= (STM32H7_SPI_CR2_TSIZE >> + STM32H7_SPI_CR2_TSIZE_SHIFT)) { + cr2_clrb |= STM32H7_SPI_CR2_TSIZE; + cr2_setb = nb_words << STM32H7_SPI_CR2_TSIZE_SHIFT; + writel_relaxed((readl_relaxed(spi->base + STM32H7_SPI_CR2) & + ~cr2_clrb) | cr2_setb, + spi->base + STM32H7_SPI_CR2); + } else { + return -EMSGSIZE; + } + + return 0; +} + +/** + * stm32_spi_transfer_one_setup - common setup to transfer a single + * spi_transfer either using DMA or + * interrupts. + */ +static int stm32_spi_transfer_one_setup(struct stm32_spi *spi, + struct spi_device *spi_dev, + struct spi_transfer *transfer) +{ + unsigned long flags; + unsigned int comm_type; + int nb_words, ret = 0; + + spin_lock_irqsave(&spi->lock, flags); + + if (spi->cur_bpw != transfer->bits_per_word) { + spi->cur_bpw = transfer->bits_per_word; + spi->cfg->set_bpw(spi); + } - cfg2_setb |= (midi << SPI_CFG2_MIDI_SHIFT) & SPI_CFG2_MIDI; + if (spi->cur_speed != transfer->speed_hz) { + int mbr; + + /* Update spi->cur_speed with real clock speed */ + mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz, + spi->cfg->baud_rate_div_min, + spi->cfg->baud_rate_div_max); + if (mbr < 0) { + ret = mbr; + goto out; + } + + transfer->speed_hz = spi->cur_speed; + stm32_spi_set_mbr(spi, mbr); } - if (cfg2_clrb || cfg2_setb) - writel_relaxed((readl_relaxed(spi->base + STM32_SPI_CFG2) & - ~cfg2_clrb) | cfg2_setb, - spi->base + STM32_SPI_CFG2); + comm_type = stm32_spi_communication_type(spi_dev, transfer); + if (spi->cur_comm != comm_type) { + ret = spi->cfg->set_mode(spi, comm_type); + + if (ret < 0) + goto out; + + spi->cur_comm = comm_type; + } + + if (spi->cfg->set_data_idleness) + spi->cfg->set_data_idleness(spi, transfer->len); if (spi->cur_bpw <= 8) nb_words = transfer->len; @@ -950,13 +1621,11 @@ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi, nb_words = DIV_ROUND_UP(transfer->len * 8, 16); else nb_words = DIV_ROUND_UP(transfer->len * 8, 32); - nb_words <<= SPI_CR2_TSIZE_SHIFT; - if (nb_words <= SPI_CR2_TSIZE) { - writel_relaxed(nb_words, spi->base + STM32_SPI_CR2); - } else { - ret = -EMSGSIZE; - goto out; + if (spi->cfg->set_number_of_data) { + ret = spi->cfg->set_number_of_data(spi, nb_words); + if (ret < 0) + goto out; } spi->cur_xferlen = transfer->len; @@ -997,7 +1666,7 @@ static int stm32_spi_transfer_one(struct spi_master *master, spi->rx_len = spi->rx_buf ? transfer->len : 0; spi->cur_usedma = (master->can_dma && - stm32_spi_can_dma(master, spi_dev, transfer)); + master->can_dma(master, spi_dev, transfer)); ret = stm32_spi_transfer_one_setup(spi, spi_dev, transfer); if (ret) { @@ -1008,47 +1677,73 @@ static int stm32_spi_transfer_one(struct spi_master *master, if (spi->cur_usedma) return stm32_spi_transfer_one_dma(spi, transfer); else - return stm32_spi_transfer_one_irq(spi); + return spi->cfg->transfer_one_irq(spi); } /** * stm32_spi_unprepare_msg - relax the hardware - * - * Normally, if TSIZE has been configured, we should relax the hardware at the - * reception of the EOT interrupt. But in case of error, EOT will not be - * raised. So the subsystem unprepare_message call allows us to properly - * complete the transfer from an hardware point of view. */ static int stm32_spi_unprepare_msg(struct spi_master *master, struct spi_message *msg) { struct stm32_spi *spi = spi_master_get_devdata(master); - stm32_spi_disable(spi); + spi->cfg->disable(spi); + + return 0; +} + +/** + * stm32f4_spi_config - Configure SPI controller as SPI master + */ +static int stm32f4_spi_config(struct stm32_spi *spi) +{ + unsigned long flags; + + spin_lock_irqsave(&spi->lock, flags); + + /* Ensure I2SMOD bit is kept cleared */ + stm32_spi_clr_bits(spi, STM32F4_SPI_I2SCFGR, + STM32F4_SPI_I2SCFGR_I2SMOD); + + /* + * - SS input value high + * - transmitter half duplex direction + * - Set the master mode (default Motorola mode) + * - Consider 1 master/n slaves configuration and + * SS input value is determined by the SSI bit + */ + stm32_spi_set_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_SSI | + STM32F4_SPI_CR1_BIDIOE | + STM32F4_SPI_CR1_MSTR | + STM32F4_SPI_CR1_SSM); + + spin_unlock_irqrestore(&spi->lock, flags); return 0; } /** - * stm32_spi_config - Configure SPI controller as SPI master + * stm32h7_spi_config - Configure SPI controller as SPI master */ -static int stm32_spi_config(struct stm32_spi *spi) +static int stm32h7_spi_config(struct stm32_spi *spi) { unsigned long flags; spin_lock_irqsave(&spi->lock, flags); /* Ensure I2SMOD bit is kept cleared */ - stm32_spi_clr_bits(spi, STM32_SPI_I2SCFGR, SPI_I2SCFGR_I2SMOD); + stm32_spi_clr_bits(spi, STM32H7_SPI_I2SCFGR, + STM32H7_SPI_I2SCFGR_I2SMOD); /* * - SS input value high * - transmitter half duplex direction * - automatic communication suspend when RX-Fifo is full */ - stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_SSI | - SPI_CR1_HDDIR | - SPI_CR1_MASRX); + stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SSI | + STM32H7_SPI_CR1_HDDIR | + STM32H7_SPI_CR1_MASRX); /* * - Set the master mode (default Motorola mode) @@ -1056,17 +1751,56 @@ static int stm32_spi_config(struct stm32_spi *spi) * SS input value is determined by the SSI bit * - keep control of all associated GPIOs */ - stm32_spi_set_bits(spi, STM32_SPI_CFG2, SPI_CFG2_MASTER | - SPI_CFG2_SSM | - SPI_CFG2_AFCNTR); + stm32_spi_set_bits(spi, STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_MASTER | + STM32H7_SPI_CFG2_SSM | + STM32H7_SPI_CFG2_AFCNTR); spin_unlock_irqrestore(&spi->lock, flags); return 0; } +static const struct stm32_spi_cfg stm32f4_spi_cfg = { + .regs = &stm32f4_spi_regspec, + .get_bpw_mask = stm32f4_spi_get_bpw_mask, + .disable = stm32f4_spi_disable, + .config = stm32f4_spi_config, + .set_bpw = stm32f4_spi_set_bpw, + .set_mode = stm32f4_spi_set_mode, + .transfer_one_dma_start = stm32f4_spi_transfer_one_dma_start, + .dma_tx_cb = stm32f4_spi_dma_tx_cb, + .dma_rx_cb = stm32f4_spi_dma_rx_cb, + .transfer_one_irq = stm32f4_spi_transfer_one_irq, + .irq_handler_event = stm32f4_spi_irq_event, + .irq_handler_thread = stm32f4_spi_irq_thread, + .baud_rate_div_min = STM32F4_SPI_BR_DIV_MIN, + .baud_rate_div_max = STM32F4_SPI_BR_DIV_MAX, + .has_fifo = false, +}; + +static const struct stm32_spi_cfg stm32h7_spi_cfg = { + .regs = &stm32h7_spi_regspec, + .get_fifo_size = stm32h7_spi_get_fifo_size, + .get_bpw_mask = stm32h7_spi_get_bpw_mask, + .disable = stm32h7_spi_disable, + .config = stm32h7_spi_config, + .set_bpw = stm32h7_spi_set_bpw, + .set_mode = stm32h7_spi_set_mode, + .set_data_idleness = stm32h7_spi_data_idleness, + .set_number_of_data = stm32h7_spi_number_of_data, + .transfer_one_dma_start = stm32h7_spi_transfer_one_dma_start, + .dma_rx_cb = stm32h7_spi_dma_cb, + .dma_tx_cb = stm32h7_spi_dma_cb, + .transfer_one_irq = stm32h7_spi_transfer_one_irq, + .irq_handler_thread = stm32h7_spi_irq_thread, + .baud_rate_div_min = STM32H7_SPI_MBR_DIV_MIN, + .baud_rate_div_max = STM32H7_SPI_MBR_DIV_MAX, + .has_fifo = true, +}; + static const struct of_device_id stm32_spi_of_match[] = { - { .compatible = "st,stm32h7-spi", }, + { .compatible = "st,stm32h7-spi", .data = (void *)&stm32h7_spi_cfg }, + { .compatible = "st,stm32f4-spi", .data = (void *)&stm32f4_spi_cfg }, {}, }; MODULE_DEVICE_TABLE(of, stm32_spi_of_match); @@ -1090,12 +1824,17 @@ static int stm32_spi_probe(struct platform_device *pdev) spi->master = master; spin_lock_init(&spi->lock); + spi->cfg = (const struct stm32_spi_cfg *) + of_match_device(pdev->dev.driver->of_match_table, + &pdev->dev)->data; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); spi->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(spi->base)) { ret = PTR_ERR(spi->base); goto err_master_put; } + spi->phys_addr = (dma_addr_t)res->start; spi->irq = platform_get_irq(pdev, 0); @@ -1104,16 +1843,17 @@ static int stm32_spi_probe(struct platform_device *pdev) ret = -ENOENT; goto err_master_put; } - ret = devm_request_threaded_irq(&pdev->dev, spi->irq, NULL, - stm32_spi_irq, IRQF_ONESHOT, - pdev->name, master); + ret = devm_request_threaded_irq(&pdev->dev, spi->irq, + spi->cfg->irq_handler_event, + spi->cfg->irq_handler_thread, + IRQF_ONESHOT, pdev->name, master); if (ret) { dev_err(&pdev->dev, "irq%d request failed: %d\n", spi->irq, ret); goto err_master_put; } - spi->clk = devm_clk_get(&pdev->dev, 0); + spi->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(spi->clk)) { ret = PTR_ERR(spi->clk); dev_err(&pdev->dev, "clk get failed: %d\n", ret); @@ -1139,9 +1879,10 @@ static int stm32_spi_probe(struct platform_device *pdev) reset_control_deassert(spi->rst); } - spi->fifo_size = stm32_spi_get_fifo_size(spi); + if (spi->cfg->has_fifo) + spi->fifo_size = spi->cfg->get_fifo_size(spi); - ret = stm32_spi_config(spi); + ret = spi->cfg->config(spi); if (ret) { dev_err(&pdev->dev, "controller configuration failed: %d\n", ret); @@ -1151,11 +1892,11 @@ static int stm32_spi_probe(struct platform_device *pdev) master->dev.of_node = pdev->dev.of_node; master->auto_runtime_pm = true; master->bus_num = pdev->id; - master->mode_bits = SPI_MODE_3 | SPI_CS_HIGH | SPI_LSB_FIRST | - SPI_3WIRE | SPI_LOOP; - master->bits_per_word_mask = stm32_spi_get_bpw_mask(spi); - master->max_speed_hz = spi->clk_rate / SPI_MBR_DIV_MIN; - master->min_speed_hz = spi->clk_rate / SPI_MBR_DIV_MAX; + master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST | + SPI_3WIRE; + master->bits_per_word_mask = spi->cfg->get_bpw_mask(spi); + master->max_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_min; + master->min_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_max; master->setup = stm32_spi_setup; master->prepare_message = stm32_spi_prepare_msg; master->transfer_one = stm32_spi_transfer_one; @@ -1233,7 +1974,7 @@ static int stm32_spi_remove(struct platform_device *pdev) struct spi_master *master = platform_get_drvdata(pdev); struct stm32_spi *spi = spi_master_get_devdata(master); - stm32_spi_disable(spi); + spi->cfg->disable(spi); if (master->dma_tx) dma_release_channel(master->dma_tx); diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c index 5f19016bbf10..b9fb6493cd6b 100644 --- a/drivers/spi/spi-ti-qspi.c +++ b/drivers/spi/spi-ti-qspi.c @@ -490,8 +490,8 @@ static void ti_qspi_enable_memory_map(struct spi_device *spi) ti_qspi_write(qspi, MM_SWITCH, QSPI_SPI_SWITCH_REG); if (qspi->ctrl_base) { regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg, - MEM_CS_EN(spi->chip_select), - MEM_CS_MASK); + MEM_CS_MASK, + MEM_CS_EN(spi->chip_select)); } qspi->mmap_enabled = true; } @@ -503,7 +503,7 @@ static void ti_qspi_disable_memory_map(struct spi_device *spi) ti_qspi_write(qspi, 0, QSPI_SPI_SWITCH_REG); if (qspi->ctrl_base) regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg, - 0, MEM_CS_MASK); + MEM_CS_MASK, 0); qspi->mmap_enabled = false; } diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c index 97d137591b18..fba3f180f233 100644 --- a/drivers/spi/spi-topcliff-pch.c +++ b/drivers/spi/spi-topcliff-pch.c @@ -92,7 +92,6 @@ #define PCH_MAX_SPBR 1023 /* Definition for ML7213/ML7223/ML7831 by LAPIS Semiconductor */ -#define PCI_VENDOR_ID_ROHM 0x10DB #define PCI_DEVICE_ID_ML7213_SPI 0x802c #define PCI_DEVICE_ID_ML7223_SPI 0x800F #define PCI_DEVICE_ID_ML7831_SPI 0x8816 @@ -1008,6 +1007,9 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw) /* RX */ dma->sg_rx_p = kcalloc(num, sizeof(*dma->sg_rx_p), GFP_ATOMIC); + if (!dma->sg_rx_p) + return; + sg_init_table(dma->sg_rx_p, num); /* Initialize SG table */ /* offset, length setting */ sg = dma->sg_rx_p; @@ -1068,6 +1070,9 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw) } dma->sg_tx_p = kcalloc(num, sizeof(*dma->sg_tx_p), GFP_ATOMIC); + if (!dma->sg_tx_p) + return; + sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */ /* offset, length setting */ sg = dma->sg_tx_p; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 9a7def7c3237..93986f879b09 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -578,7 +579,10 @@ int spi_add_device(struct spi_device *spi) goto done; } - if (ctlr->cs_gpios) + /* Descriptors take precedence */ + if (ctlr->cs_gpiods) + spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select]; + else if (ctlr->cs_gpios) spi->cs_gpio = ctlr->cs_gpios[spi->chip_select]; /* Drivers may modify this initial i/o setup, but will @@ -772,10 +776,21 @@ static void spi_set_cs(struct spi_device *spi, bool enable) if (spi->mode & SPI_CS_HIGH) enable = !enable; - if (gpio_is_valid(spi->cs_gpio)) { - /* Honour the SPI_NO_CS flag */ - if (!(spi->mode & SPI_NO_CS)) - gpio_set_value(spi->cs_gpio, !enable); + if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) { + /* + * Honour the SPI_NO_CS flag and invert the enable line, as + * active low is default for SPI. Execution paths that handle + * polarity inversion in gpiolib (such as device tree) will + * enforce active high using the SPI_CS_HIGH resulting in a + * double inversion through the code above. + */ + if (!(spi->mode & SPI_NO_CS)) { + if (spi->cs_gpiod) + gpiod_set_value_cansleep(spi->cs_gpiod, + !enable); + else + gpio_set_value_cansleep(spi->cs_gpio, !enable); + } /* Some SPI masters need both GPIO CS & slave_select */ if ((spi->controller->flags & SPI_MASTER_GPIO_SS) && spi->controller->set_cs) @@ -1615,13 +1630,21 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, spi->mode |= SPI_CPHA; if (of_property_read_bool(nc, "spi-cpol")) spi->mode |= SPI_CPOL; - if (of_property_read_bool(nc, "spi-cs-high")) - spi->mode |= SPI_CS_HIGH; if (of_property_read_bool(nc, "spi-3wire")) spi->mode |= SPI_3WIRE; if (of_property_read_bool(nc, "spi-lsb-first")) spi->mode |= SPI_LSB_FIRST; + /* + * For descriptors associated with the device, polarity inversion is + * handled in the gpiolib, so all chip selects are "active high" in + * the logical sense, the gpiolib will invert the line if need be. + */ + if (ctlr->use_gpio_descriptors) + spi->mode |= SPI_CS_HIGH; + else if (of_property_read_bool(nc, "spi-cs-high")) + spi->mode |= SPI_CS_HIGH; + /* Device DUAL/QUAD mode */ if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { switch (value) { @@ -2137,6 +2160,60 @@ static int of_spi_register_master(struct spi_controller *ctlr) } #endif +/** + * spi_get_gpio_descs() - grab chip select GPIOs for the master + * @ctlr: The SPI master to grab GPIO descriptors for + */ +static int spi_get_gpio_descs(struct spi_controller *ctlr) +{ + int nb, i; + struct gpio_desc **cs; + struct device *dev = &ctlr->dev; + + nb = gpiod_count(dev, "cs"); + ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect); + + /* No GPIOs at all is fine, else return the error */ + if (nb == 0 || nb == -ENOENT) + return 0; + else if (nb < 0) + return nb; + + cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs), + GFP_KERNEL); + if (!cs) + return -ENOMEM; + ctlr->cs_gpiods = cs; + + for (i = 0; i < nb; i++) { + /* + * Most chipselects are active low, the inverted + * semantics are handled by special quirks in gpiolib, + * so initializing them GPIOD_OUT_LOW here means + * "unasserted", in most cases this will drive the physical + * line high. + */ + cs[i] = devm_gpiod_get_index_optional(dev, "cs", i, + GPIOD_OUT_LOW); + + if (cs[i]) { + /* + * If we find a CS GPIO, name it after the device and + * chip select line. + */ + char *gpioname; + + gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d", + dev_name(dev), i); + if (!gpioname) + return -ENOMEM; + gpiod_set_consumer_name(cs[i], gpioname); + } + } + + return 0; +} + static int spi_controller_check_ops(struct spi_controller *ctlr) { /* @@ -2199,9 +2276,21 @@ int spi_register_controller(struct spi_controller *ctlr) return status; if (!spi_controller_is_slave(ctlr)) { - status = of_spi_register_master(ctlr); - if (status) - return status; + if (ctlr->use_gpio_descriptors) { + status = spi_get_gpio_descs(ctlr); + if (status) + return status; + /* + * A controller using GPIO descriptors always + * supports SPI_CS_HIGH if need be. + */ + ctlr->mode_bits |= SPI_CS_HIGH; + } else { + /* Legacy code path for GPIOs from DT */ + status = of_spi_register_master(ctlr); + if (status) + return status; + } } /* even if it's just one always-selected device, there must @@ -2915,6 +3004,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) * cs_change is set for each transfer. */ if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) || + spi->cs_gpiod || gpio_is_valid(spi->cs_gpio))) { size_t maxsize; int ret; @@ -2961,6 +3051,8 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) * it is not set for this transfer. * Set transfer tx_nbits and rx_nbits as single transfer default * (SPI_NBITS_SINGLE) if it is not set for this transfer. + * Ensure transfer word_delay is at least as long as that required by + * device itself. */ message->frame_length = 0; list_for_each_entry(xfer, &message->transfers, transfer_list) { @@ -3031,6 +3123,9 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) !(spi->mode & SPI_RX_QUAD)) return -EINVAL; } + + if (xfer->word_delay_usecs < spi->word_delay_usecs) + xfer->word_delay_usecs = spi->word_delay_usecs; } message->status = -EINPROGRESS; diff --git a/drivers/spmi/Kconfig b/drivers/spmi/Kconfig index 0d3b70b3bda8..d48ed7c2c6c4 100644 --- a/drivers/spmi/Kconfig +++ b/drivers/spmi/Kconfig @@ -12,7 +12,7 @@ if SPMI config SPMI_MSM_PMIC_ARB tristate "Qualcomm MSM SPMI Controller (PMIC Arbiter)" - select IRQ_DOMAIN + select IRQ_DOMAIN_HIERARCHY depends on ARCH_QCOM || COMPILE_TEST depends on HAS_IOMEM default ARCH_QCOM diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c index 360b8218f322..928759242e42 100644 --- a/drivers/spmi/spmi-pmic-arb.c +++ b/drivers/spmi/spmi-pmic-arb.c @@ -666,7 +666,8 @@ static int qpnpint_get_irqchip_state(struct irq_data *d, return 0; } -static int qpnpint_irq_request_resources(struct irq_data *d) +static int qpnpint_irq_domain_activate(struct irq_domain *domain, + struct irq_data *d, bool reserve) { struct spmi_pmic_arb *pmic_arb = irq_data_get_irq_chip_data(d); u16 periph = hwirq_to_per(d->hwirq); @@ -692,27 +693,25 @@ static struct irq_chip pmic_arb_irqchip = { .irq_set_type = qpnpint_irq_set_type, .irq_set_wake = qpnpint_irq_set_wake, .irq_get_irqchip_state = qpnpint_get_irqchip_state, - .irq_request_resources = qpnpint_irq_request_resources, .flags = IRQCHIP_MASK_ON_SUSPEND, }; -static int qpnpint_irq_domain_dt_translate(struct irq_domain *d, - struct device_node *controller, - const u32 *intspec, - unsigned int intsize, - unsigned long *out_hwirq, - unsigned int *out_type) +static int qpnpint_irq_domain_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + unsigned long *out_hwirq, + unsigned int *out_type) { struct spmi_pmic_arb *pmic_arb = d->host_data; + u32 *intspec = fwspec->param; u16 apid, ppid; int rc; dev_dbg(&pmic_arb->spmic->dev, "intspec[0] 0x%1x intspec[1] 0x%02x intspec[2] 0x%02x\n", intspec[0], intspec[1], intspec[2]); - if (irq_domain_get_of_node(d) != controller) + if (irq_domain_get_of_node(d) != pmic_arb->spmic->dev.of_node) return -EINVAL; - if (intsize != 4) + if (fwspec->param_count != 4) return -EINVAL; if (intspec[0] > 0xF || intspec[1] > 0xFF || intspec[2] > 0x7) return -EINVAL; @@ -740,17 +739,43 @@ static int qpnpint_irq_domain_dt_translate(struct irq_domain *d, return 0; } -static int qpnpint_irq_domain_map(struct irq_domain *d, - unsigned int virq, - irq_hw_number_t hwirq) + +static void qpnpint_irq_domain_map(struct spmi_pmic_arb *pmic_arb, + struct irq_domain *domain, unsigned int virq, + irq_hw_number_t hwirq, unsigned int type) { - struct spmi_pmic_arb *pmic_arb = d->host_data; + irq_flow_handler_t handler; + + dev_dbg(&pmic_arb->spmic->dev, "virq = %u, hwirq = %lu, type = %u\n", + virq, hwirq, type); + + if (type & IRQ_TYPE_EDGE_BOTH) + handler = handle_edge_irq; + else + handler = handle_level_irq; + + irq_domain_set_info(domain, virq, hwirq, &pmic_arb_irqchip, pmic_arb, + handler, NULL, NULL); +} + +static int qpnpint_irq_domain_alloc(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs, + void *data) +{ + struct spmi_pmic_arb *pmic_arb = domain->host_data; + struct irq_fwspec *fwspec = data; + irq_hw_number_t hwirq; + unsigned int type; + int ret, i; + + ret = qpnpint_irq_domain_translate(domain, fwspec, &hwirq, &type); + if (ret) + return ret; - dev_dbg(&pmic_arb->spmic->dev, "virq = %u, hwirq = %lu\n", virq, hwirq); + for (i = 0; i < nr_irqs; i++) + qpnpint_irq_domain_map(pmic_arb, domain, virq + i, hwirq + i, + type); - irq_set_chip_and_handler(virq, &pmic_arb_irqchip, handle_level_irq); - irq_set_chip_data(virq, d->host_data); - irq_set_noprobe(virq); return 0; } @@ -1126,8 +1151,10 @@ static const struct pmic_arb_ver_ops pmic_arb_v5 = { }; static const struct irq_domain_ops pmic_arb_irq_domain_ops = { - .map = qpnpint_irq_domain_map, - .xlate = qpnpint_irq_domain_dt_translate, + .activate = qpnpint_irq_domain_activate, + .alloc = qpnpint_irq_domain_alloc, + .free = irq_domain_free_irqs_common, + .translate = qpnpint_irq_domain_translate, }; static int spmi_pmic_arb_probe(struct platform_device *pdev) diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index e4f608815c05..c0901b96cfe4 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -56,8 +56,6 @@ source "drivers/staging/iio/Kconfig" source "drivers/staging/sm750fb/Kconfig" -source "drivers/staging/xgifb/Kconfig" - source "drivers/staging/emxx_udc/Kconfig" source "drivers/staging/speakup/Kconfig" @@ -104,12 +102,16 @@ source "drivers/staging/pi433/Kconfig" source "drivers/staging/mt7621-pci/Kconfig" +source "drivers/staging/mt7621-pci-phy/Kconfig" + source "drivers/staging/mt7621-pinctrl/Kconfig" source "drivers/staging/mt7621-spi/Kconfig" source "drivers/staging/mt7621-dma/Kconfig" +source "drivers/staging/ralink-gdma/Kconfig" + source "drivers/staging/mt7621-mmc/Kconfig" source "drivers/staging/mt7621-eth/Kconfig" diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index 5868631e8f1b..57c6bce13ff4 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -20,7 +20,6 @@ obj-$(CONFIG_VT6656) += vt6656/ obj-$(CONFIG_VME_BUS) += vme/ obj-$(CONFIG_IIO) += iio/ obj-$(CONFIG_FB_SM750) += sm750fb/ -obj-$(CONFIG_FB_XGI) += xgifb/ obj-$(CONFIG_USB_EMXX) += emxx_udc/ obj-$(CONFIG_SPEAKUP) += speakup/ obj-$(CONFIG_MFD_NVEC) += nvec/ @@ -41,12 +40,14 @@ obj-$(CONFIG_GREYBUS) += greybus/ obj-$(CONFIG_BCM2835_VCHIQ) += vc04_services/ obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/ obj-$(CONFIG_PI433) += pi433/ -obj-$(CONFIG_SOC_MT7621) += mt7621-pci/ -obj-$(CONFIG_SOC_MT7621) += mt7621-pinctrl/ -obj-$(CONFIG_SOC_MT7621) += mt7621-spi/ +obj-$(CONFIG_PCI_MT7621) += mt7621-pci/ +obj-$(CONFIG_PCI_MT7621_PHY) += mt7621-pci-phy/ +obj-$(CONFIG_PINCTRL_RT2880) += mt7621-pinctrl/ +obj-$(CONFIG_SPI_MT7621) += mt7621-spi/ obj-$(CONFIG_SOC_MT7621) += mt7621-dma/ -obj-$(CONFIG_SOC_MT7621) += mt7621-mmc/ -obj-$(CONFIG_SOC_MT7621) += mt7621-eth/ +obj-$(CONFIG_DMA_RALINK) += ralink-gdma/ +obj-$(CONFIG_MTK_MMC) += mt7621-mmc/ +obj-$(CONFIG_NET_MEDIATEK_SOC_STAGING) += mt7621-eth/ obj-$(CONFIG_SOC_MT7621) += mt7621-dts/ obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket/ obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/ diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index 90a8a9f1ac7d..74d497d39c5a 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -75,6 +75,9 @@ struct ashmem_range { /* LRU list of unpinned pages, protected by ashmem_mutex */ static LIST_HEAD(ashmem_lru_list); +static atomic_t ashmem_shrink_inflight = ATOMIC_INIT(0); +static DECLARE_WAIT_QUEUE_HEAD(ashmem_shrink_wait); + /* * long lru_count - The count of pages on our LRU list. * @@ -126,7 +129,8 @@ static inline bool page_range_in_range(struct ashmem_range *range, page_range_subsumes_range(range, start, end); } -static inline bool range_before_page(struct ashmem_range *range, size_t page) +static inline bool range_before_page(struct ashmem_range *range, + size_t page) { return range->pgend < page; } @@ -168,19 +172,15 @@ static inline void lru_del(struct ashmem_range *range) * @end: The ending page (inclusive) * * This function is protected by ashmem_mutex. - * - * Return: 0 if successful, or -ENOMEM if there is an error */ -static int range_alloc(struct ashmem_area *asma, - struct ashmem_range *prev_range, unsigned int purged, - size_t start, size_t end) +static void range_alloc(struct ashmem_area *asma, + struct ashmem_range *prev_range, unsigned int purged, + size_t start, size_t end, + struct ashmem_range **new_range) { - struct ashmem_range *range; - - range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL); - if (!range) - return -ENOMEM; + struct ashmem_range *range = *new_range; + *new_range = NULL; range->asma = asma; range->pgstart = start; range->pgend = end; @@ -190,8 +190,6 @@ static int range_alloc(struct ashmem_area *asma, if (range_on_lru(range)) lru_add(range); - - return 0; } /** @@ -438,7 +436,6 @@ out: static unsigned long ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { - struct ashmem_range *range, *next; unsigned long freed = 0; /* We might recurse into filesystem code, so bail out if necessary */ @@ -448,21 +445,33 @@ ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) if (!mutex_trylock(&ashmem_mutex)) return -1; - list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) { + while (!list_empty(&ashmem_lru_list)) { + struct ashmem_range *range = + list_first_entry(&ashmem_lru_list, typeof(*range), lru); loff_t start = range->pgstart * PAGE_SIZE; loff_t end = (range->pgend + 1) * PAGE_SIZE; + struct file *f = range->asma->file; - range->asma->file->f_op->fallocate(range->asma->file, - FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, - start, end - start); + get_file(f); + atomic_inc(&ashmem_shrink_inflight); range->purged = ASHMEM_WAS_PURGED; lru_del(range); freed += range_size(range); + mutex_unlock(&ashmem_mutex); + f->f_op->fallocate(f, + FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, + start, end - start); + fput(f); + if (atomic_dec_and_test(&ashmem_shrink_inflight)) + wake_up_all(&ashmem_shrink_wait); + if (!mutex_trylock(&ashmem_mutex)) + goto out; if (--sc->nr_to_scan <= 0) break; } mutex_unlock(&ashmem_mutex); +out: return freed; } @@ -582,7 +591,8 @@ static int get_name(struct ashmem_area *asma, void __user *name) * * Caller must hold ashmem_mutex. */ -static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend) +static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend, + struct ashmem_range **new_range) { struct ashmem_range *range, *next; int ret = ASHMEM_NOT_PURGED; @@ -635,7 +645,7 @@ static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend) * second half and adjust the first chunk's endpoint. */ range_alloc(asma, range, range->purged, - pgend + 1, range->pgend); + pgend + 1, range->pgend, new_range); range_shrink(range, range->pgstart, pgstart - 1); break; } @@ -649,7 +659,8 @@ static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend) * * Caller must hold ashmem_mutex. */ -static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend) +static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend, + struct ashmem_range **new_range) { struct ashmem_range *range, *next; unsigned int purged = ASHMEM_NOT_PURGED; @@ -675,7 +686,8 @@ restart: } } - return range_alloc(asma, range, purged, pgstart, pgend); + range_alloc(asma, range, purged, pgstart, pgend, new_range); + return 0; } /* @@ -708,11 +720,19 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, struct ashmem_pin pin; size_t pgstart, pgend; int ret = -EINVAL; + struct ashmem_range *range = NULL; if (copy_from_user(&pin, p, sizeof(pin))) return -EFAULT; + if (cmd == ASHMEM_PIN || cmd == ASHMEM_UNPIN) { + range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL); + if (!range) + return -ENOMEM; + } + mutex_lock(&ashmem_mutex); + wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight)); if (!asma->file) goto out_unlock; @@ -735,10 +755,10 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, switch (cmd) { case ASHMEM_PIN: - ret = ashmem_pin(asma, pgstart, pgend); + ret = ashmem_pin(asma, pgstart, pgend, &range); break; case ASHMEM_UNPIN: - ret = ashmem_unpin(asma, pgstart, pgend); + ret = ashmem_unpin(asma, pgstart, pgend, &range); break; case ASHMEM_GET_PIN_STATUS: ret = ashmem_get_pin_status(asma, pgstart, pgend); @@ -747,6 +767,8 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, out_unlock: mutex_unlock(&ashmem_mutex); + if (range) + kmem_cache_free(ashmem_range_cachep, range); return ret; } diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile index bb30bf8774a0..17f3a7569e3d 100644 --- a/drivers/staging/android/ion/Makefile +++ b/drivers/staging/android/ion/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_ION) += ion.o ion-ioctl.o ion_heap.o +obj-$(CONFIG_ION) += ion.o ion_heap.o obj-$(CONFIG_ION_SYSTEM_HEAP) += ion_system_heap.o ion_page_pool.o obj-$(CONFIG_ION_CARVEOUT_HEAP) += ion_carveout_heap.o obj-$(CONFIG_ION_CHUNK_HEAP) += ion_chunk_heap.o diff --git a/drivers/staging/android/ion/ion-ioctl.c b/drivers/staging/android/ion/ion-ioctl.c deleted file mode 100644 index a8d3cc412fb9..000000000000 --- a/drivers/staging/android/ion/ion-ioctl.c +++ /dev/null @@ -1,98 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2011 Google, Inc. - */ - -#include -#include -#include -#include - -#include "ion.h" - -union ion_ioctl_arg { - struct ion_allocation_data allocation; - struct ion_heap_query query; -}; - -static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg) -{ - switch (cmd) { - case ION_IOC_HEAP_QUERY: - if (arg->query.reserved0 || - arg->query.reserved1 || - arg->query.reserved2) - return -EINVAL; - break; - default: - break; - } - - return 0; -} - -/* fix up the cases where the ioctl direction bits are incorrect */ -static unsigned int ion_ioctl_dir(unsigned int cmd) -{ - switch (cmd) { - default: - return _IOC_DIR(cmd); - } -} - -long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) -{ - int ret = 0; - unsigned int dir; - union ion_ioctl_arg data; - - dir = ion_ioctl_dir(cmd); - - if (_IOC_SIZE(cmd) > sizeof(data)) - return -EINVAL; - - /* - * The copy_from_user is unconditional here for both read and write - * to do the validate. If there is no write for the ioctl, the - * buffer is cleared - */ - if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd))) - return -EFAULT; - - ret = validate_ioctl_arg(cmd, &data); - if (ret) { - pr_warn_once("%s: ioctl validate failed\n", __func__); - return ret; - } - - if (!(dir & _IOC_WRITE)) - memset(&data, 0, sizeof(data)); - - switch (cmd) { - case ION_IOC_ALLOC: - { - int fd; - - fd = ion_alloc(data.allocation.len, - data.allocation.heap_id_mask, - data.allocation.flags); - if (fd < 0) - return fd; - - data.allocation.fd = fd; - - break; - } - case ION_IOC_HEAP_QUERY: - ret = ion_query_heaps(&data.query); - break; - default: - return -ENOTTY; - } - - if (dir & _IOC_READ) { - if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) - return -EFAULT; - } - return ret; -} diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index 6f5afab7c1a1..92c2914239e3 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -1,11 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 /* - * drivers/staging/android/ion/ion.c + * ION Memory Allocator * * Copyright (C) 2011 Google, Inc. */ -#include #include #include #include @@ -14,10 +13,8 @@ #include #include #include -#include #include #include -#include #include #include #include @@ -390,7 +387,7 @@ static const struct dma_buf_ops dma_buf_ops = { .unmap = ion_dma_buf_kunmap, }; -int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags) +static int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags) { struct ion_device *dev = internal_dev; struct ion_buffer *buffer = NULL; @@ -447,7 +444,7 @@ int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags) return fd; } -int ion_query_heaps(struct ion_heap_query *query) +static int ion_query_heaps(struct ion_heap_query *query) { struct ion_device *dev = internal_dev; struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps); @@ -492,6 +489,81 @@ out: return ret; } +union ion_ioctl_arg { + struct ion_allocation_data allocation; + struct ion_heap_query query; +}; + +static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg) +{ + switch (cmd) { + case ION_IOC_HEAP_QUERY: + if (arg->query.reserved0 || + arg->query.reserved1 || + arg->query.reserved2) + return -EINVAL; + break; + default: + break; + } + + return 0; +} + +static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + int ret = 0; + union ion_ioctl_arg data; + + if (_IOC_SIZE(cmd) > sizeof(data)) + return -EINVAL; + + /* + * The copy_from_user is unconditional here for both read and write + * to do the validate. If there is no write for the ioctl, the + * buffer is cleared + */ + if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd))) + return -EFAULT; + + ret = validate_ioctl_arg(cmd, &data); + if (ret) { + pr_warn_once("%s: ioctl validate failed\n", __func__); + return ret; + } + + if (!(_IOC_DIR(cmd) & _IOC_WRITE)) + memset(&data, 0, sizeof(data)); + + switch (cmd) { + case ION_IOC_ALLOC: + { + int fd; + + fd = ion_alloc(data.allocation.len, + data.allocation.heap_id_mask, + data.allocation.flags); + if (fd < 0) + return fd; + + data.allocation.fd = fd; + + break; + } + case ION_IOC_HEAP_QUERY: + ret = ion_query_heaps(&data.query); + break; + default: + return -ENOTTY; + } + + if (_IOC_DIR(cmd) & _IOC_READ) { + if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) + return -EFAULT; + } + return ret; +} + static const struct file_operations ion_fops = { .owner = THIS_MODULE, .unlocked_ioctl = ion_ioctl, diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h index 47b594cf1ac9..e291299fd35f 100644 --- a/drivers/staging/android/ion/ion.h +++ b/drivers/staging/android/ion/ion.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * drivers/staging/android/ion/ion.h + * ION Memory Allocator kernel interface header * * Copyright (C) 2011 Google, Inc. */ @@ -21,33 +21,10 @@ #include "../uapi/ion.h" -/** - * struct ion_platform_heap - defines a heap in the given platform - * @type: type of the heap from ion_heap_type enum - * @id: unique identifier for heap. When allocating higher numb ers - * will be allocated from first. At allocation these are passed - * as a bit mask and therefore can not exceed ION_NUM_HEAP_IDS. - * @name: used for debug purposes - * @base: base address of heap in physical memory if applicable - * @size: size of the heap in bytes if applicable - * @priv: private info passed from the board file - * - * Provided by the board file. - */ -struct ion_platform_heap { - enum ion_heap_type type; - unsigned int id; - const char *name; - phys_addr_t base; - size_t size; - phys_addr_t align; - void *priv; -}; - /** * struct ion_buffer - metadata for a particular buffer - * @ref: reference count * @node: node in the ion_device buffers tree + * @list: element in list of deferred freeable buffers * @dev: back pointer to the ion_device * @heap: back pointer to the heap the buffer came from * @flags: buffer specific flags @@ -58,7 +35,8 @@ struct ion_platform_heap { * @lock: protects the buffers cnt fields * @kmap_cnt: number of times the buffer is mapped to the kernel * @vaddr: the kernel mapping if kmap_cnt is not zero - * @sg_table: the sg table for the buffer if dmap_cnt is not zero + * @sg_table: the sg table for the buffer + * @attachments: list of devices attached to this buffer */ struct ion_buffer { union { @@ -174,12 +152,16 @@ struct ion_heap { unsigned long flags; unsigned int id; const char *name; + + /* deferred free support */ struct shrinker shrinker; struct list_head free_list; size_t free_list_size; spinlock_t free_lock; wait_queue_head_t waitqueue; struct task_struct *task; + + /* heap statistics */ u64 num_of_buffers; u64 num_of_alloc_bytes; u64 alloc_bytes_wm; @@ -205,10 +187,6 @@ int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, int ion_heap_buffer_zero(struct ion_buffer *buffer); int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot); -int ion_alloc(size_t len, - unsigned int heap_id_mask, - unsigned int flags); - /** * ion_heap_init_shrinker * @heap: the heap @@ -330,8 +308,4 @@ void ion_page_pool_free(struct ion_page_pool *pool, struct page *page); int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, int nr_to_scan); -long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); - -int ion_query_heaps(struct ion_heap_query *query); - #endif /* _ION_H */ diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c index e129237a0417..bb9d614767a2 100644 --- a/drivers/staging/android/ion/ion_carveout_heap.c +++ b/drivers/staging/android/ion/ion_carveout_heap.c @@ -1,10 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 /* - * drivers/staging/android/ion/ion_carveout_heap.c + * ION Memory Allocator carveout heap helper * * Copyright (C) 2011 Google, Inc. */ -#include + #include #include #include @@ -12,7 +12,7 @@ #include #include #include -#include + #include "ion.h" #define ION_CARVEOUT_ALLOCATE_FAIL -1 @@ -20,7 +20,6 @@ struct ion_carveout_heap { struct ion_heap heap; struct gen_pool *pool; - phys_addr_t base; }; static phys_addr_t ion_carveout_allocate(struct ion_heap *heap, @@ -44,6 +43,7 @@ static void ion_carveout_free(struct ion_heap *heap, phys_addr_t addr, if (addr == ION_CARVEOUT_ALLOCATE_FAIL) return; + gen_pool_free(carveout_heap->pool, addr, size); } @@ -103,17 +103,14 @@ static struct ion_heap_ops carveout_heap_ops = { .unmap_kernel = ion_heap_unmap_kernel, }; -struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data) +struct ion_heap *ion_carveout_heap_create(phys_addr_t base, size_t size) { struct ion_carveout_heap *carveout_heap; int ret; struct page *page; - size_t size; - - page = pfn_to_page(PFN_DOWN(heap_data->base)); - size = heap_data->size; + page = pfn_to_page(PFN_DOWN(base)); ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL)); if (ret) return ERR_PTR(ret); @@ -127,9 +124,7 @@ struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data) kfree(carveout_heap); return ERR_PTR(-ENOMEM); } - carveout_heap->base = heap_data->base; - gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size, - -1); + gen_pool_add(carveout_heap->pool, base, size, -1); carveout_heap->heap.ops = &carveout_heap_ops; carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT; carveout_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE; diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c index 159d72f5bc42..3cdde9c1a717 100644 --- a/drivers/staging/android/ion/ion_chunk_heap.c +++ b/drivers/staging/android/ion/ion_chunk_heap.c @@ -1,23 +1,22 @@ // SPDX-License-Identifier: GPL-2.0 /* - * drivers/staging/android/ion/ion_chunk_heap.c + * ION memory allocator chunk heap helper * * Copyright (C) 2012 Google, Inc. */ + #include #include #include -#include #include #include #include -#include + #include "ion.h" struct ion_chunk_heap { struct ion_heap heap; struct gen_pool *pool; - phys_addr_t base; unsigned long chunk_size; unsigned long size; unsigned long allocated; @@ -108,16 +107,13 @@ static struct ion_heap_ops chunk_heap_ops = { .unmap_kernel = ion_heap_unmap_kernel, }; -struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data) +struct ion_heap *ion_chunk_heap_create(phys_addr_t base, size_t size, size_t chunk_size) { struct ion_chunk_heap *chunk_heap; int ret; struct page *page; - size_t size; - - page = pfn_to_page(PFN_DOWN(heap_data->base)); - size = heap_data->size; + page = pfn_to_page(PFN_DOWN(base)); ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL)); if (ret) return ERR_PTR(ret); @@ -126,23 +122,21 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data) if (!chunk_heap) return ERR_PTR(-ENOMEM); - chunk_heap->chunk_size = (unsigned long)heap_data->priv; + chunk_heap->chunk_size = chunk_size; chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) + PAGE_SHIFT, -1); if (!chunk_heap->pool) { ret = -ENOMEM; goto error_gen_pool_create; } - chunk_heap->base = heap_data->base; - chunk_heap->size = heap_data->size; + chunk_heap->size = size; chunk_heap->allocated = 0; - gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1); + gen_pool_add(chunk_heap->pool, base, size, -1); chunk_heap->heap.ops = &chunk_heap_ops; chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK; chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE; - pr_debug("%s: base %pa size %zu\n", __func__, - &chunk_heap->base, heap_data->size); + pr_debug("%s: base %pa size %zu\n", __func__, &base, size); return &chunk_heap->heap; @@ -150,4 +144,3 @@ error_gen_pool_create: kfree(chunk_heap); return ERR_PTR(ret); } - diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c index 3fafd013d80a..bf65e67ef9d8 100644 --- a/drivers/staging/android/ion/ion_cma_heap.c +++ b/drivers/staging/android/ion/ion_cma_heap.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * drivers/staging/android/ion/ion_cma_heap.c + * ION Memory Allocator CMA heap exporter * * Copyright (C) Linaro 2012 * Author: for ST-Ericsson. @@ -111,10 +111,6 @@ static struct ion_heap *__ion_cma_heap_create(struct cma *cma) return ERR_PTR(-ENOMEM); cma_heap->heap.ops = &ion_cma_ops; - /* - * get device from private heaps data, later it will be - * used to make the link with reserved CMA memory - */ cma_heap->cma = cma; cma_heap->heap.type = ION_HEAP_TYPE_DMA; return &cma_heap->heap; diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c index 31db510018a9..473b465724f1 100644 --- a/drivers/staging/android/ion/ion_heap.c +++ b/drivers/staging/android/ion/ion_heap.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * drivers/staging/android/ion/ion_heap.c + * ION Memory Allocator generic heap helpers * * Copyright (C) 2011 Google, Inc. */ @@ -14,6 +14,7 @@ #include #include #include + #include "ion.h" void *ion_heap_map_kernel(struct ion_heap *heap, @@ -92,6 +93,7 @@ int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, if (addr >= vma->vm_end) return 0; } + return 0; } @@ -254,6 +256,7 @@ int ion_heap_init_deferred_free(struct ion_heap *heap) return PTR_ERR_OR_ZERO(heap->task); } sched_setscheduler(heap->task, SCHED_IDLE, ¶m); + return 0; } @@ -265,8 +268,10 @@ static unsigned long ion_heap_shrink_count(struct shrinker *shrinker, int total = 0; total = ion_heap_freelist_size(heap) / PAGE_SIZE; + if (heap->ops->shrink) total += heap->ops->shrink(heap, sc->gfp_mask, 0); + return total; } @@ -295,6 +300,7 @@ static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker, if (heap->ops->shrink) freed += heap->ops->shrink(heap, sc->gfp_mask, to_scan); + return freed; } diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c index 0d2a95957ee8..fd4995fb676e 100644 --- a/drivers/staging/android/ion/ion_page_pool.c +++ b/drivers/staging/android/ion/ion_page_pool.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * drivers/staging/android/ion/ion_mem_pool.c + * ION Memory Allocator page pool helpers * * Copyright (C) 2011 Google, Inc. */ diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c index 0383f7548d48..aa8d8425be25 100644 --- a/drivers/staging/android/ion/ion_system_heap.c +++ b/drivers/staging/android/ion/ion_system_heap.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * drivers/staging/android/ion/ion_system_heap.c + * ION Memory Allocator system heap exporter * * Copyright (C) 2011 Google, Inc. */ @@ -13,6 +13,7 @@ #include #include #include + #include "ion.h" #define NUM_ORDERS ARRAY_SIZE(orders) @@ -223,10 +224,10 @@ static void ion_system_heap_destroy_pools(struct ion_page_pool **pools) static int ion_system_heap_create_pools(struct ion_page_pool **pools) { int i; - gfp_t gfp_flags = low_order_gfp_flags; for (i = 0; i < NUM_ORDERS; i++) { struct ion_page_pool *pool; + gfp_t gfp_flags = low_order_gfp_flags; if (orders[i] > 4) gfp_flags = high_order_gfp_flags; @@ -236,6 +237,7 @@ static int ion_system_heap_create_pools(struct ion_page_pool **pools) goto err_create_pool; pools[i] = pool; } + return 0; err_create_pool: @@ -274,6 +276,7 @@ static int ion_system_heap_create(void) heap->name = "ion_system_heap"; ion_device_add_heap(heap); + return 0; } device_initcall(ion_system_heap_create); @@ -355,6 +358,7 @@ static struct ion_heap *__ion_system_contig_heap_create(void) heap->ops = &kmalloc_ops; heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG; heap->name = "ion_system_contig_heap"; + return heap; } @@ -367,7 +371,7 @@ static int ion_system_contig_heap_create(void) return PTR_ERR(heap); ion_device_add_heap(heap); + return 0; } device_initcall(ion_system_contig_heap_create); - diff --git a/drivers/staging/android/uapi/ion.h b/drivers/staging/android/uapi/ion.h index 5d7009884c13..46c93fcb46d6 100644 --- a/drivers/staging/android/uapi/ion.h +++ b/drivers/staging/android/uapi/ion.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * drivers/staging/android/uapi/ion.h * diff --git a/drivers/staging/android/vsoc.c b/drivers/staging/android/vsoc.c index 22571abcaa4e..8a75bd27c413 100644 --- a/drivers/staging/android/vsoc.c +++ b/drivers/staging/android/vsoc.c @@ -29,7 +29,6 @@ #include #include #include -#include #include #include #include "uapi/vsoc_shm.h" diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c index 5d2fcbfe02af..0caae4a5c471 100644 --- a/drivers/staging/comedi/comedi_fops.c +++ b/drivers/staging/comedi/comedi_fops.c @@ -1605,9 +1605,8 @@ static int do_insn_ioctl(struct comedi_device *dev, unsigned int n_data = MIN_SAMPLES; int ret = 0; - if (copy_from_user(&insn, arg, sizeof(insn))) { + if (copy_from_user(&insn, arg, sizeof(insn))) return -EFAULT; - } n_data = max(n_data, insn.n); diff --git a/drivers/staging/comedi/drivers/cb_pcimdas.c b/drivers/staging/comedi/drivers/cb_pcimdas.c index 4e72a0778086..a9d052bfda38 100644 --- a/drivers/staging/comedi/drivers/cb_pcimdas.c +++ b/drivers/staging/comedi/drivers/cb_pcimdas.c @@ -252,9 +252,9 @@ static int cb_pcimdas_di_insn_bits(struct comedi_device *dev, } static int cb_pcimdas_do_insn_bits(struct comedi_device *dev, - struct comedi_subdevice *s, - struct comedi_insn *insn, - unsigned int *data) + struct comedi_subdevice *s, + struct comedi_insn *insn, + unsigned int *data) { struct cb_pcimdas_private *devpriv = dev->private; diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c index e70a461e723f..405573e927cf 100644 --- a/drivers/staging/comedi/drivers/ni_660x.c +++ b/drivers/staging/comedi/drivers/ni_660x.c @@ -656,6 +656,7 @@ static int ni_660x_set_pfi_routing(struct comedi_device *dev, case NI_660X_PFI_OUTPUT_DIO: if (chan > 31) return -EINVAL; + break; default: return -EINVAL; } diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c index b9a0dc6eac44..4bdef87d5dd7 100644 --- a/drivers/staging/comedi/drivers/ni_pcidio.c +++ b/drivers/staging/comedi/drivers/ni_pcidio.c @@ -49,116 +49,117 @@ /* defines for the PCI-DIO-32HS */ -#define Window_Address 4 /* W */ -#define Interrupt_And_Window_Status 4 /* R */ -#define IntStatus1 BIT(0) -#define IntStatus2 BIT(1) -#define WindowAddressStatus_mask 0x7c - -#define Master_DMA_And_Interrupt_Control 5 /* W */ -#define InterruptLine(x) ((x) & 3) -#define OpenInt BIT(2) -#define Group_Status 5 /* R */ -#define DataLeft BIT(0) -#define Req BIT(2) -#define StopTrig BIT(3) - -#define Group_1_Flags 6 /* R */ -#define Group_2_Flags 7 /* R */ -#define TransferReady BIT(0) -#define CountExpired BIT(1) -#define Waited BIT(5) -#define PrimaryTC BIT(6) -#define SecondaryTC BIT(7) +#define WINDOW_ADDRESS 4 /* W */ +#define INTERRUPT_AND_WINDOW_STATUS 4 /* R */ +#define INT_STATUS_1 BIT(0) +#define INT_STATUS_2 BIT(1) +#define WINDOW_ADDRESS_STATUS_MASK 0x7c + +#define MASTER_DMA_AND_INTERRUPT_CONTROL 5 /* W */ +#define INTERRUPT_LINE(x) ((x) & 3) +#define OPEN_INT BIT(2) +#define GROUP_STATUS 5 /* R */ +#define DATA_LEFT BIT(0) +#define REQ BIT(2) +#define STOP_TRIG BIT(3) + +#define GROUP_1_FLAGS 6 /* R */ +#define GROUP_2_FLAGS 7 /* R */ +#define TRANSFER_READY BIT(0) +#define COUNT_EXPIRED BIT(1) +#define WAITED BIT(5) +#define PRIMARY_TC BIT(6) +#define SECONDARY_TC BIT(7) /* #define SerialRose */ /* #define ReqRose */ /* #define Paused */ -#define Group_1_First_Clear 6 /* W */ -#define Group_2_First_Clear 7 /* W */ -#define ClearWaited BIT(3) -#define ClearPrimaryTC BIT(4) -#define ClearSecondaryTC BIT(5) -#define DMAReset BIT(6) -#define FIFOReset BIT(7) -#define ClearAll 0xf8 - -#define Group_1_FIFO 8 /* W */ -#define Group_2_FIFO 12 /* W */ - -#define Transfer_Count 20 -#define Chip_ID_D 24 -#define Chip_ID_I 25 -#define Chip_ID_O 26 -#define Chip_Version 27 -#define Port_IO(x) (28 + (x)) -#define Port_Pin_Directions(x) (32 + (x)) -#define Port_Pin_Mask(x) (36 + (x)) -#define Port_Pin_Polarities(x) (40 + (x)) - -#define Master_Clock_Routing 45 -#define RTSIClocking(x) (((x) & 3) << 4) - -#define Group_1_Second_Clear 46 /* W */ -#define Group_2_Second_Clear 47 /* W */ -#define ClearExpired BIT(0) - -#define Port_Pattern(x) (48 + (x)) - -#define Data_Path 64 -#define FIFOEnableA BIT(0) -#define FIFOEnableB BIT(1) -#define FIFOEnableC BIT(2) -#define FIFOEnableD BIT(3) -#define Funneling(x) (((x) & 3) << 4) -#define GroupDirection BIT(7) - -#define Protocol_Register_1 65 -#define OpMode Protocol_Register_1 -#define RunMode(x) ((x) & 7) -#define Numbered BIT(3) - -#define Protocol_Register_2 66 -#define ClockReg Protocol_Register_2 -#define ClockLine(x) (((x) & 3) << 5) -#define InvertStopTrig BIT(7) -#define DataLatching(x) (((x) & 3) << 5) - -#define Protocol_Register_3 67 -#define Sequence Protocol_Register_3 - -#define Protocol_Register_14 68 /* 16 bit */ -#define ClockSpeed Protocol_Register_14 - -#define Protocol_Register_4 70 -#define ReqReg Protocol_Register_4 -#define ReqConditioning(x) (((x) & 7) << 3) - -#define Protocol_Register_5 71 -#define BlockMode Protocol_Register_5 +#define GROUP_1_FIRST_CLEAR 6 /* W */ +#define GROUP_2_FIRST_CLEAR 7 /* W */ +#define CLEAR_WAITED BIT(3) +#define CLEAR_PRIMARY_TC BIT(4) +#define CLEAR_SECONDARY_TC BIT(5) +#define DMA_RESET BIT(6) +#define FIFO_RESET BIT(7) +#define CLEAR_ALL 0xf8 + +#define GROUP_1_FIFO 8 /* W */ +#define GROUP_2_FIFO 12 /* W */ + +#define TRANSFER_COUNT 20 +#define CHIP_ID_D 24 +#define CHIP_ID_I 25 +#define CHIP_ID_O 26 +#define CHIP_VERSION 27 +#define PORT_IO(x) (28 + (x)) +#define PORT_PIN_DIRECTIONS(x) (32 + (x)) +#define PORT_PIN_MASK(x) (36 + (x)) +#define PORT_PIN_POLARITIES(x) (40 + (x)) + +#define MASTER_CLOCK_ROUTING 45 +#define RTSI_CLOCKING(x) (((x) & 3) << 4) + +#define GROUP_1_SECOND_CLEAR 46 /* W */ +#define GROUP_2_SECOND_CLEAR 47 /* W */ +#define CLEAR_EXPIRED BIT(0) + +#define PORT_PATTERN(x) (48 + (x)) + +#define DATA_PATH 64 +#define FIFO_ENABLE_A BIT(0) +#define FIFO_ENABLE_B BIT(1) +#define FIFO_ENABLE_C BIT(2) +#define FIFO_ENABLE_D BIT(3) +#define FUNNELING(x) (((x) & 3) << 4) +#define GROUP_DIRECTION BIT(7) + +#define PROTOCOL_REGISTER_1 65 +#define OP_MODE PROTOCOL_REGISTER_1 +#define RUN_MODE(x) ((x) & 7) +#define NUMBERED BIT(3) + +#define PROTOCOL_REGISTER_2 66 +#define CLOCK_REG PROTOCOL_REGISTER_2 +#define CLOCK_LINE(x) (((x) & 3) << 5) +#define INVERT_STOP_TRIG BIT(7) +#define DATA_LATCHING(x) (((x) & 3) << 5) + +#define PROTOCOL_REGISTER_3 67 +#define SEQUENCE PROTOCOL_REGISTER_3 + +#define PROTOCOL_REGISTER_14 68 /* 16 bit */ +#define CLOCK_SPEED PROTOCOL_REGISTER_14 + +#define PROTOCOL_REGISTER_4 70 +#define REQ_REG PROTOCOL_REGISTER_4 +#define REQ_CONDITIONING(x) (((x) & 7) << 3) + +#define PROTOCOL_REGISTER_5 71 +#define BLOCK_MODE PROTOCOL_REGISTER_5 #define FIFO_Control 72 -#define ReadyLevel(x) ((x) & 7) - -#define Protocol_Register_6 73 -#define LinePolarities Protocol_Register_6 -#define InvertAck BIT(0) -#define InvertReq BIT(1) -#define InvertClock BIT(2) -#define InvertSerial BIT(3) -#define OpenAck BIT(4) -#define OpenClock BIT(5) - -#define Protocol_Register_7 74 -#define AckSer Protocol_Register_7 -#define AckLine(x) (((x) & 3) << 2) -#define ExchangePins BIT(7) - -#define Interrupt_Control 75 - /* bits same as flags */ - -#define DMA_Line_Control_Group1 76 -#define DMA_Line_Control_Group2 108 +#define READY_LEVEL(x) ((x) & 7) + +#define PROTOCOL_REGISTER_6 73 +#define LINE_POLARITIES PROTOCOL_REGISTER_6 +#define INVERT_ACK BIT(0) +#define INVERT_REQ BIT(1) +#define INVERT_CLOCK BIT(2) +#define INVERT_SERIAL BIT(3) +#define OPEN_ACK BIT(4) +#define OPEN_CLOCK BIT(5) + +#define PROTOCOL_REGISTER_7 74 +#define ACK_SER PROTOCOL_REGISTER_7 +#define ACK_LINE(x) (((x) & 3) << 2) +#define EXCHANGE_PINS BIT(7) + +#define INTERRUPT_CONTROL 75 +/* bits same as flags */ + +#define DMA_LINE_CONTROL_GROUP1 76 +#define DMA_LINE_CONTROL_GROUP2 108 + /* channel zero is none */ static inline unsigned int primary_DMAChannel_bits(unsigned int channel) { @@ -170,41 +171,41 @@ static inline unsigned int secondary_DMAChannel_bits(unsigned int channel) return (channel << 2) & 0xc; } -#define Transfer_Size_Control 77 -#define TransferWidth(x) ((x) & 3) -#define TransferLength(x) (((x) & 3) << 3) -#define RequireRLevel BIT(5) +#define TRANSFER_SIZE_CONTROL 77 +#define TRANSFER_WIDTH(x) ((x) & 3) +#define TRANSFER_LENGTH(x) (((x) & 3) << 3) +#define REQUIRE_R_LEVEL BIT(5) -#define Protocol_Register_15 79 -#define DAQOptions Protocol_Register_15 -#define StartSource(x) ((x) & 0x3) -#define InvertStart BIT(2) -#define StopSource(x) (((x) & 0x3) << 3) -#define ReqStart BIT(6) -#define PreStart BIT(7) +#define PROTOCOL_REGISTER_15 79 +#define DAQ_OPTIONS PROTOCOL_REGISTER_15 +#define START_SOURCE(x) ((x) & 0x3) +#define INVERT_START BIT(2) +#define STOP_SOURCE(x) (((x) & 0x3) << 3) +#define REQ_START BIT(6) +#define PRE_START BIT(7) -#define Pattern_Detection 81 -#define DetectionMethod BIT(0) -#define InvertMatch BIT(1) -#define IE_Pattern_Detection BIT(2) +#define PATTERN_DETECTION 81 +#define DETECTION_METHOD BIT(0) +#define INVERT_MATCH BIT(1) +#define IE_PATTERN_DETECTION BIT(2) -#define Protocol_Register_9 82 -#define ReqDelay Protocol_Register_9 +#define PROTOCOL_REGISTER_9 82 +#define REQ_DELAY PROTOCOL_REGISTER_9 -#define Protocol_Register_10 83 -#define ReqNotDelay Protocol_Register_10 +#define PROTOCOL_REGISTER_10 83 +#define REQ_NOT_DELAY PROTOCOL_REGISTER_10 -#define Protocol_Register_11 84 -#define AckDelay Protocol_Register_11 +#define PROTOCOL_REGISTER_11 84 +#define ACK_DELAY PROTOCOL_REGISTER_11 -#define Protocol_Register_12 85 -#define AckNotDelay Protocol_Register_12 +#define PROTOCOL_REGISTER_12 85 +#define ACK_NOT_DELAY PROTOCOL_REGISTER_12 -#define Protocol_Register_13 86 -#define Data1Delay Protocol_Register_13 +#define PROTOCOL_REGISTER_13 86 +#define DATA_1_DELAY PROTOCOL_REGISTER_13 -#define Protocol_Register_8 88 /* 32 bit */ -#define StartDelay Protocol_Register_8 +#define PROTOCOL_REGISTER_8 88 /* 32 bit */ +#define START_DELAY PROTOCOL_REGISTER_8 /* Firmware files for PCI-6524 */ #define FW_PCI_6534_MAIN "ni6534a.bin" @@ -246,9 +247,10 @@ enum FPGA_Control_Bits { #define TIMER_BASE 50 /* nanoseconds */ #ifdef USE_DMA -#define IntEn (CountExpired | Waited | PrimaryTC | SecondaryTC) +#define INT_EN (COUNT_EXPIRED | WAITED | PRIMARY_TC | SECONDARY_TC) #else -#define IntEn (TransferReady | CountExpired | Waited | PrimaryTC | SecondaryTC) +#define INT_EN (TRANSFER_READY | COUNT_EXPIRED | WAITED \ + | PRIMARY_TC | SECONDARY_TC) #endif enum nidio_boardid { @@ -283,7 +285,7 @@ struct nidio96_private { struct mite *mite; int boardtype; int dio; - unsigned short OpModeBits; + unsigned short OP_MODEBits; struct mite_channel *di_mite_chan; struct mite_ring *di_mite_ring; spinlock_t mite_channel_lock; @@ -307,7 +309,7 @@ static int ni_pcidio_request_di_mite_channel(struct comedi_device *dev) devpriv->di_mite_chan->dir = COMEDI_INPUT; writeb(primary_DMAChannel_bits(devpriv->di_mite_chan->channel) | secondary_DMAChannel_bits(devpriv->di_mite_chan->channel), - dev->mmio + DMA_Line_Control_Group1); + dev->mmio + DMA_LINE_CONTROL_GROUP1); mmiowb(); spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); return 0; @@ -324,7 +326,7 @@ static void ni_pcidio_release_di_mite_channel(struct comedi_device *dev) devpriv->di_mite_chan = NULL; writeb(primary_DMAChannel_bits(0) | secondary_DMAChannel_bits(0), - dev->mmio + DMA_Line_Control_Group1); + dev->mmio + DMA_LINE_CONTROL_GROUP1); mmiowb(); } spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags); @@ -391,8 +393,8 @@ static irqreturn_t nidio_interrupt(int irq, void *d) /* Lock to avoid race with comedi_poll */ spin_lock(&dev->spinlock); - status = readb(dev->mmio + Interrupt_And_Window_Status); - flags = readb(dev->mmio + Group_1_Flags); + status = readb(dev->mmio + INTERRUPT_AND_WINDOW_STATUS); + flags = readb(dev->mmio + GROUP_1_FLAGS); spin_lock(&devpriv->mite_channel_lock); if (devpriv->di_mite_chan) { @@ -401,63 +403,63 @@ static irqreturn_t nidio_interrupt(int irq, void *d) } spin_unlock(&devpriv->mite_channel_lock); - while (status & DataLeft) { + while (status & DATA_LEFT) { work++; if (work > 20) { dev_dbg(dev->class_dev, "too much work in interrupt\n"); writeb(0x00, - dev->mmio + Master_DMA_And_Interrupt_Control); + dev->mmio + MASTER_DMA_AND_INTERRUPT_CONTROL); break; } - flags &= IntEn; + flags &= INT_EN; - if (flags & TransferReady) { - while (flags & TransferReady) { + if (flags & TRANSFER_READY) { + while (flags & TRANSFER_READY) { work++; if (work > 100) { dev_dbg(dev->class_dev, "too much work in interrupt\n"); writeb(0x00, dev->mmio + - Master_DMA_And_Interrupt_Control + MASTER_DMA_AND_INTERRUPT_CONTROL ); goto out; } - auxdata = readl(dev->mmio + Group_1_FIFO); + auxdata = readl(dev->mmio + GROUP_1_FIFO); comedi_buf_write_samples(s, &auxdata, 1); - flags = readb(dev->mmio + Group_1_Flags); + flags = readb(dev->mmio + GROUP_1_FLAGS); } } - if (flags & CountExpired) { - writeb(ClearExpired, dev->mmio + Group_1_Second_Clear); + if (flags & COUNT_EXPIRED) { + writeb(CLEAR_EXPIRED, dev->mmio + GROUP_1_SECOND_CLEAR); async->events |= COMEDI_CB_EOA; - writeb(0x00, dev->mmio + OpMode); + writeb(0x00, dev->mmio + OP_MODE); break; - } else if (flags & Waited) { - writeb(ClearWaited, dev->mmio + Group_1_First_Clear); + } else if (flags & WAITED) { + writeb(CLEAR_WAITED, dev->mmio + GROUP_1_FIRST_CLEAR); async->events |= COMEDI_CB_ERROR; break; - } else if (flags & PrimaryTC) { - writeb(ClearPrimaryTC, - dev->mmio + Group_1_First_Clear); + } else if (flags & PRIMARY_TC) { + writeb(CLEAR_PRIMARY_TC, + dev->mmio + GROUP_1_FIRST_CLEAR); async->events |= COMEDI_CB_EOA; - } else if (flags & SecondaryTC) { - writeb(ClearSecondaryTC, - dev->mmio + Group_1_First_Clear); + } else if (flags & SECONDARY_TC) { + writeb(CLEAR_SECONDARY_TC, + dev->mmio + GROUP_1_FIRST_CLEAR); async->events |= COMEDI_CB_EOA; } - flags = readb(dev->mmio + Group_1_Flags); - status = readb(dev->mmio + Interrupt_And_Window_Status); + flags = readb(dev->mmio + GROUP_1_FLAGS); + status = readb(dev->mmio + INTERRUPT_AND_WINDOW_STATUS); } out: comedi_handle_events(dev, s); #if 0 if (!tag) - writeb(0x03, dev->mmio + Master_DMA_And_Interrupt_Control); + writeb(0x03, dev->mmio + MASTER_DMA_AND_INTERRUPT_CONTROL); #endif spin_unlock(&dev->spinlock); @@ -484,7 +486,7 @@ static int ni_pcidio_insn_config(struct comedi_device *dev, if (ret) return ret; - writel(s->io_bits, dev->mmio + Port_Pin_Directions(0)); + writel(s->io_bits, dev->mmio + PORT_PIN_DIRECTIONS(0)); return insn->n; } @@ -495,9 +497,9 @@ static int ni_pcidio_insn_bits(struct comedi_device *dev, unsigned int *data) { if (comedi_dio_update_state(s, data)) - writel(s->state, dev->mmio + Port_IO(0)); + writel(s->state, dev->mmio + PORT_IO(0)); - data[1] = readl(dev->mmio + Port_IO(0)); + data[1] = readl(dev->mmio + PORT_IO(0)); return insn->n; } @@ -609,7 +611,7 @@ static int ni_pcidio_inttrig(struct comedi_device *dev, if (trig_num != cmd->start_arg) return -EINVAL; - writeb(devpriv->OpModeBits, dev->mmio + OpMode); + writeb(devpriv->OP_MODEBits, dev->mmio + OP_MODE); s->async->inttrig = NULL; return 1; @@ -621,78 +623,78 @@ static int ni_pcidio_cmd(struct comedi_device *dev, struct comedi_subdevice *s) struct comedi_cmd *cmd = &s->async->cmd; /* XXX configure ports for input */ - writel(0x0000, dev->mmio + Port_Pin_Directions(0)); + writel(0x0000, dev->mmio + PORT_PIN_DIRECTIONS(0)); if (1) { /* enable fifos A B C D */ - writeb(0x0f, dev->mmio + Data_Path); + writeb(0x0f, dev->mmio + DATA_PATH); /* set transfer width a 32 bits */ - writeb(TransferWidth(0) | TransferLength(0), - dev->mmio + Transfer_Size_Control); + writeb(TRANSFER_WIDTH(0) | TRANSFER_LENGTH(0), + dev->mmio + TRANSFER_SIZE_CONTROL); } else { - writeb(0x03, dev->mmio + Data_Path); - writeb(TransferWidth(3) | TransferLength(0), - dev->mmio + Transfer_Size_Control); + writeb(0x03, dev->mmio + DATA_PATH); + writeb(TRANSFER_WIDTH(3) | TRANSFER_LENGTH(0), + dev->mmio + TRANSFER_SIZE_CONTROL); } /* protocol configuration */ if (cmd->scan_begin_src == TRIG_TIMER) { /* page 4-5, "input with internal REQs" */ - writeb(0, dev->mmio + OpMode); - writeb(0x00, dev->mmio + ClockReg); - writeb(1, dev->mmio + Sequence); - writeb(0x04, dev->mmio + ReqReg); - writeb(4, dev->mmio + BlockMode); - writeb(3, dev->mmio + LinePolarities); - writeb(0xc0, dev->mmio + AckSer); + writeb(0, dev->mmio + OP_MODE); + writeb(0x00, dev->mmio + CLOCK_REG); + writeb(1, dev->mmio + SEQUENCE); + writeb(0x04, dev->mmio + REQ_REG); + writeb(4, dev->mmio + BLOCK_MODE); + writeb(3, dev->mmio + LINE_POLARITIES); + writeb(0xc0, dev->mmio + ACK_SER); writel(ni_pcidio_ns_to_timer(&cmd->scan_begin_arg, CMDF_ROUND_NEAREST), - dev->mmio + StartDelay); - writeb(1, dev->mmio + ReqDelay); - writeb(1, dev->mmio + ReqNotDelay); - writeb(1, dev->mmio + AckDelay); - writeb(0x0b, dev->mmio + AckNotDelay); - writeb(0x01, dev->mmio + Data1Delay); + dev->mmio + START_DELAY); + writeb(1, dev->mmio + REQ_DELAY); + writeb(1, dev->mmio + REQ_NOT_DELAY); + writeb(1, dev->mmio + ACK_DELAY); + writeb(0x0b, dev->mmio + ACK_NOT_DELAY); + writeb(0x01, dev->mmio + DATA_1_DELAY); /* * manual, page 4-5: - * ClockSpeed comment is incorrectly listed on DAQOptions + * CLOCK_SPEED comment is incorrectly listed on DAQ_OPTIONS */ - writew(0, dev->mmio + ClockSpeed); - writeb(0, dev->mmio + DAQOptions); + writew(0, dev->mmio + CLOCK_SPEED); + writeb(0, dev->mmio + DAQ_OPTIONS); } else { /* TRIG_EXT */ /* page 4-5, "input with external REQs" */ - writeb(0, dev->mmio + OpMode); - writeb(0x00, dev->mmio + ClockReg); - writeb(0, dev->mmio + Sequence); - writeb(0x00, dev->mmio + ReqReg); - writeb(4, dev->mmio + BlockMode); + writeb(0, dev->mmio + OP_MODE); + writeb(0x00, dev->mmio + CLOCK_REG); + writeb(0, dev->mmio + SEQUENCE); + writeb(0x00, dev->mmio + REQ_REG); + writeb(4, dev->mmio + BLOCK_MODE); if (!(cmd->scan_begin_arg & CR_INVERT)) /* Leading Edge */ - writeb(0, dev->mmio + LinePolarities); + writeb(0, dev->mmio + LINE_POLARITIES); else /* Trailing Edge */ - writeb(2, dev->mmio + LinePolarities); - writeb(0x00, dev->mmio + AckSer); - writel(1, dev->mmio + StartDelay); - writeb(1, dev->mmio + ReqDelay); - writeb(1, dev->mmio + ReqNotDelay); - writeb(1, dev->mmio + AckDelay); - writeb(0x0C, dev->mmio + AckNotDelay); - writeb(0x10, dev->mmio + Data1Delay); - writew(0, dev->mmio + ClockSpeed); - writeb(0x60, dev->mmio + DAQOptions); + writeb(2, dev->mmio + LINE_POLARITIES); + writeb(0x00, dev->mmio + ACK_SER); + writel(1, dev->mmio + START_DELAY); + writeb(1, dev->mmio + REQ_DELAY); + writeb(1, dev->mmio + REQ_NOT_DELAY); + writeb(1, dev->mmio + ACK_DELAY); + writeb(0x0C, dev->mmio + ACK_NOT_DELAY); + writeb(0x10, dev->mmio + DATA_1_DELAY); + writew(0, dev->mmio + CLOCK_SPEED); + writeb(0x60, dev->mmio + DAQ_OPTIONS); } if (cmd->stop_src == TRIG_COUNT) { writel(cmd->stop_arg, - dev->mmio + Transfer_Count); + dev->mmio + TRANSFER_COUNT); } else { /* XXX */ } #ifdef USE_DMA - writeb(ClearPrimaryTC | ClearSecondaryTC, - dev->mmio + Group_1_First_Clear); + writeb(CLEAR_PRIMARY_TC | CLEAR_SECONDARY_TC, + dev->mmio + GROUP_1_FIRST_CLEAR); { int retval = setup_mite_dma(dev, s); @@ -701,25 +703,25 @@ static int ni_pcidio_cmd(struct comedi_device *dev, struct comedi_subdevice *s) return retval; } #else - writeb(0x00, dev->mmio + DMA_Line_Control_Group1); + writeb(0x00, dev->mmio + DMA_LINE_CONTROL_GROUP1); #endif - writeb(0x00, dev->mmio + DMA_Line_Control_Group2); + writeb(0x00, dev->mmio + DMA_LINE_CONTROL_GROUP2); /* clear and enable interrupts */ - writeb(0xff, dev->mmio + Group_1_First_Clear); - /* writeb(ClearExpired, dev->mmio+Group_1_Second_Clear); */ + writeb(0xff, dev->mmio + GROUP_1_FIRST_CLEAR); + /* writeb(CLEAR_EXPIRED, dev->mmio+GROUP_1_SECOND_CLEAR); */ - writeb(IntEn, dev->mmio + Interrupt_Control); - writeb(0x03, dev->mmio + Master_DMA_And_Interrupt_Control); + writeb(INT_EN, dev->mmio + INTERRUPT_CONTROL); + writeb(0x03, dev->mmio + MASTER_DMA_AND_INTERRUPT_CONTROL); if (cmd->stop_src == TRIG_NONE) { - devpriv->OpModeBits = DataLatching(0) | RunMode(7); + devpriv->OP_MODEBits = DATA_LATCHING(0) | RUN_MODE(7); } else { /* TRIG_TIMER */ - devpriv->OpModeBits = Numbered | RunMode(7); + devpriv->OP_MODEBits = NUMBERED | RUN_MODE(7); } if (cmd->start_src == TRIG_NOW) { /* start */ - writeb(devpriv->OpModeBits, dev->mmio + OpMode); + writeb(devpriv->OP_MODEBits, dev->mmio + OP_MODE); s->async->inttrig = NULL; } else { /* TRIG_INT */ @@ -732,7 +734,7 @@ static int ni_pcidio_cmd(struct comedi_device *dev, struct comedi_subdevice *s) static int ni_pcidio_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { - writeb(0x00, dev->mmio + Master_DMA_And_Interrupt_Control); + writeb(0x00, dev->mmio + MASTER_DMA_AND_INTERRUPT_CONTROL); ni_pcidio_release_di_mite_channel(dev); return 0; @@ -869,12 +871,12 @@ static int pci_6534_upload_firmware(struct comedi_device *dev) static void nidio_reset_board(struct comedi_device *dev) { - writel(0, dev->mmio + Port_IO(0)); - writel(0, dev->mmio + Port_Pin_Directions(0)); - writel(0, dev->mmio + Port_Pin_Mask(0)); + writel(0, dev->mmio + PORT_IO(0)); + writel(0, dev->mmio + PORT_PIN_DIRECTIONS(0)); + writel(0, dev->mmio + PORT_PIN_MASK(0)); /* disable interrupts on board */ - writeb(0, dev->mmio + Master_DMA_And_Interrupt_Control); + writeb(0, dev->mmio + MASTER_DMA_AND_INTERRUPT_CONTROL); } static int nidio_auto_attach(struct comedi_device *dev, @@ -925,7 +927,7 @@ static int nidio_auto_attach(struct comedi_device *dev, return ret; dev_info(dev->class_dev, "%s rev=%d\n", dev->board_name, - readb(dev->mmio + Chip_Version)); + readb(dev->mmio + CHIP_VERSION)); s = &dev->subdevices[0]; diff --git a/drivers/staging/comedi/drivers/ni_tio.c b/drivers/staging/comedi/drivers/ni_tio.c index 0eb388c0e1f0..048cb35723ad 100644 --- a/drivers/staging/comedi/drivers/ni_tio.c +++ b/drivers/staging/comedi/drivers/ni_tio.c @@ -224,13 +224,16 @@ static void ni_tio_set_bits_transient(struct ni_gpct *counter, unsigned int transient) { struct ni_gpct_device *counter_dev = counter->counter_dev; + unsigned int chip = counter->chip_index; unsigned long flags; - if (reg < NITIO_NUM_REGS) { + if (reg < NITIO_NUM_REGS && chip < counter_dev->num_chips) { + unsigned int *regs = counter_dev->regs[chip]; + spin_lock_irqsave(&counter_dev->regs_lock, flags); - counter_dev->regs[reg] &= ~mask; - counter_dev->regs[reg] |= (value & mask); - ni_tio_write(counter, counter_dev->regs[reg] | transient, reg); + regs[reg] &= ~mask; + regs[reg] |= (value & mask); + ni_tio_write(counter, regs[reg] | transient, reg); mmiowb(); spin_unlock_irqrestore(&counter_dev->regs_lock, flags); } @@ -267,12 +270,13 @@ unsigned int ni_tio_get_soft_copy(const struct ni_gpct *counter, enum ni_gpct_register reg) { struct ni_gpct_device *counter_dev = counter->counter_dev; + unsigned int chip = counter->chip_index; unsigned int value = 0; unsigned long flags; - if (reg < NITIO_NUM_REGS) { + if (reg < NITIO_NUM_REGS && chip < counter_dev->num_chips) { spin_lock_irqsave(&counter_dev->regs_lock, flags); - value = counter_dev->regs[reg]; + value = counter_dev->regs[chip][reg]; spin_unlock_irqrestore(&counter_dev->regs_lock, flags); } return value; @@ -302,6 +306,7 @@ static int ni_m_series_clock_src_select(const struct ni_gpct *counter, { struct ni_gpct_device *counter_dev = counter->counter_dev; unsigned int cidx = counter->counter_index; + unsigned int chip = counter->chip_index; unsigned int second_gate_reg = NITIO_GATE2_REG(cidx); unsigned int clock_source = 0; unsigned int src; @@ -318,7 +323,7 @@ static int ni_m_series_clock_src_select(const struct ni_gpct *counter, clock_source = NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS; break; case NI_M_TIMEBASE_3_CLK: - if (counter_dev->regs[second_gate_reg] & GI_SRC_SUBSEL) + if (counter_dev->regs[chip][second_gate_reg] & GI_SRC_SUBSEL) clock_source = NI_GPCT_ANALOG_TRIGGER_OUT_CLOCK_SRC_BITS; else @@ -328,7 +333,7 @@ static int ni_m_series_clock_src_select(const struct ni_gpct *counter, clock_source = NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS; break; case NI_M_NEXT_GATE_CLK: - if (counter_dev->regs[second_gate_reg] & GI_SRC_SUBSEL) + if (counter_dev->regs[chip][second_gate_reg] & GI_SRC_SUBSEL) clock_source = NI_GPCT_PXI_STAR_TRIGGER_CLOCK_SRC_BITS; else clock_source = NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS; @@ -721,6 +726,7 @@ static void ni_tio_set_source_subselect(struct ni_gpct *counter, { struct ni_gpct_device *counter_dev = counter->counter_dev; unsigned int cidx = counter->counter_index; + unsigned int chip = counter->chip_index; unsigned int second_gate_reg = NITIO_GATE2_REG(cidx); if (counter_dev->variant != ni_gpct_variant_m_series) @@ -729,18 +735,18 @@ static void ni_tio_set_source_subselect(struct ni_gpct *counter, /* Gi_Source_Subselect is zero */ case NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS: case NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS: - counter_dev->regs[second_gate_reg] &= ~GI_SRC_SUBSEL; + counter_dev->regs[chip][second_gate_reg] &= ~GI_SRC_SUBSEL; break; /* Gi_Source_Subselect is one */ case NI_GPCT_ANALOG_TRIGGER_OUT_CLOCK_SRC_BITS: case NI_GPCT_PXI_STAR_TRIGGER_CLOCK_SRC_BITS: - counter_dev->regs[second_gate_reg] |= GI_SRC_SUBSEL; + counter_dev->regs[chip][second_gate_reg] |= GI_SRC_SUBSEL; break; /* Gi_Source_Subselect doesn't matter */ default: return; } - ni_tio_write(counter, counter_dev->regs[second_gate_reg], + ni_tio_write(counter, counter_dev->regs[chip][second_gate_reg], second_gate_reg); } @@ -1116,6 +1122,7 @@ static int ni_tio_set_other_src(struct ni_gpct *counter, unsigned int index, { struct ni_gpct_device *counter_dev = counter->counter_dev; unsigned int cidx = counter->counter_index; + unsigned int chip = counter->chip_index; unsigned int abz_reg, shift, mask; if (counter_dev->variant != ni_gpct_variant_m_series) @@ -1141,9 +1148,9 @@ static int ni_tio_set_other_src(struct ni_gpct *counter, unsigned int index, if (source > 0x1f) source = 0x1f; /* Disable gate */ - counter_dev->regs[abz_reg] &= ~mask; - counter_dev->regs[abz_reg] |= (source << shift) & mask; - ni_tio_write(counter, counter_dev->regs[abz_reg], abz_reg); + counter_dev->regs[chip][abz_reg] &= ~mask; + counter_dev->regs[chip][abz_reg] |= (source << shift) & mask; + ni_tio_write(counter, counter_dev->regs[chip][abz_reg], abz_reg); return 0; } @@ -1632,6 +1639,7 @@ int ni_tio_insn_read(struct comedi_device *dev, struct ni_gpct_device *counter_dev = counter->counter_dev; unsigned int channel = CR_CHAN(insn->chanspec); unsigned int cidx = counter->counter_index; + unsigned int chip = counter->chip_index; int i; for (i = 0; i < insn->n; i++) { @@ -1640,10 +1648,12 @@ int ni_tio_insn_read(struct comedi_device *dev, data[i] = ni_tio_read_sw_save_reg(dev, s); break; case 1: - data[i] = counter_dev->regs[NITIO_LOADA_REG(cidx)]; + data[i] = + counter_dev->regs[chip][NITIO_LOADA_REG(cidx)]; break; case 2: - data[i] = counter_dev->regs[NITIO_LOADB_REG(cidx)]; + data[i] = + counter_dev->regs[chip][NITIO_LOADB_REG(cidx)]; break; } } @@ -1670,6 +1680,7 @@ int ni_tio_insn_write(struct comedi_device *dev, struct ni_gpct_device *counter_dev = counter->counter_dev; unsigned int channel = CR_CHAN(insn->chanspec); unsigned int cidx = counter->counter_index; + unsigned int chip = counter->chip_index; unsigned int load_reg; if (insn->n < 1) @@ -1690,14 +1701,15 @@ int ni_tio_insn_write(struct comedi_device *dev, ni_tio_set_bits_transient(counter, NITIO_CMD_REG(cidx), 0, 0, GI_LOAD); /* restore load reg */ - ni_tio_write(counter, counter_dev->regs[load_reg], load_reg); + ni_tio_write(counter, counter_dev->regs[chip][load_reg], + load_reg); break; case 1: - counter_dev->regs[NITIO_LOADA_REG(cidx)] = data[0]; + counter_dev->regs[chip][NITIO_LOADA_REG(cidx)] = data[0]; ni_tio_write(counter, data[0], NITIO_LOADA_REG(cidx)); break; case 2: - counter_dev->regs[NITIO_LOADB_REG(cidx)] = data[0]; + counter_dev->regs[chip][NITIO_LOADB_REG(cidx)] = data[0]; ni_tio_write(counter, data[0], NITIO_LOADB_REG(cidx)); break; default: @@ -1711,11 +1723,12 @@ void ni_tio_init_counter(struct ni_gpct *counter) { struct ni_gpct_device *counter_dev = counter->counter_dev; unsigned int cidx = counter->counter_index; + unsigned int chip = counter->chip_index; ni_tio_reset_count_and_disarm(counter); /* initialize counter registers */ - counter_dev->regs[NITIO_AUTO_INC_REG(cidx)] = 0x0; + counter_dev->regs[chip][NITIO_AUTO_INC_REG(cidx)] = 0x0; ni_tio_write(counter, 0x0, NITIO_AUTO_INC_REG(cidx)); ni_tio_set_bits(counter, NITIO_CMD_REG(cidx), @@ -1723,10 +1736,10 @@ void ni_tio_init_counter(struct ni_gpct *counter) ni_tio_set_bits(counter, NITIO_MODE_REG(cidx), ~0, 0); - counter_dev->regs[NITIO_LOADA_REG(cidx)] = 0x0; + counter_dev->regs[chip][NITIO_LOADA_REG(cidx)] = 0x0; ni_tio_write(counter, 0x0, NITIO_LOADA_REG(cidx)); - counter_dev->regs[NITIO_LOADB_REG(cidx)] = 0x0; + counter_dev->regs[chip][NITIO_LOADB_REG(cidx)] = 0x0; ni_tio_write(counter, 0x0, NITIO_LOADB_REG(cidx)); ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx), ~0, 0); @@ -1735,7 +1748,7 @@ void ni_tio_init_counter(struct ni_gpct *counter) ni_tio_set_bits(counter, NITIO_CNT_MODE_REG(cidx), ~0, 0); if (ni_tio_has_gate2_registers(counter_dev)) { - counter_dev->regs[NITIO_GATE2_REG(cidx)] = 0x0; + counter_dev->regs[chip][NITIO_GATE2_REG(cidx)] = 0x0; ni_tio_write(counter, 0x0, NITIO_GATE2_REG(cidx)); } @@ -1776,9 +1789,16 @@ ni_gpct_device_construct(struct comedi_device *dev, spin_lock_init(&counter_dev->regs_lock); + counter_dev->num_counters = num_counters; + counter_dev->num_chips = DIV_ROUND_UP(num_counters, counters_per_chip); + counter_dev->counters = kcalloc(num_counters, sizeof(*counter), GFP_KERNEL); - if (!counter_dev->counters) { + counter_dev->regs = kcalloc(counter_dev->num_chips, + sizeof(*counter_dev->regs), GFP_KERNEL); + if (!counter_dev->regs || !counter_dev->counters) { + kfree(counter_dev->regs); + kfree(counter_dev->counters); kfree(counter_dev); return NULL; } @@ -1790,8 +1810,6 @@ ni_gpct_device_construct(struct comedi_device *dev, counter->counter_index = i % counters_per_chip; spin_lock_init(&counter->lock); } - counter_dev->num_counters = num_counters; - counter_dev->counters_per_chip = counters_per_chip; return counter_dev; } @@ -1801,6 +1819,7 @@ void ni_gpct_device_destroy(struct ni_gpct_device *counter_dev) { if (!counter_dev) return; + kfree(counter_dev->regs); kfree(counter_dev->counters); kfree(counter_dev); } diff --git a/drivers/staging/comedi/drivers/ni_tio.h b/drivers/staging/comedi/drivers/ni_tio.h index 1e85d0a53715..e7b05718df9b 100644 --- a/drivers/staging/comedi/drivers/ni_tio.h +++ b/drivers/staging/comedi/drivers/ni_tio.h @@ -107,8 +107,8 @@ struct ni_gpct_device { enum ni_gpct_variant variant; struct ni_gpct *counters; unsigned int num_counters; - unsigned int counters_per_chip; - unsigned int regs[NITIO_NUM_REGS]; + unsigned int num_chips; + unsigned int (*regs)[NITIO_NUM_REGS]; /* [num_chips][NITIO_NUM_REGS] */ spinlock_t regs_lock; /* protects 'regs' */ const struct ni_route_tables *routing_tables; /* link to routes */ }; diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c index e18c0723b760..0d54f394dbd2 100644 --- a/drivers/staging/comedi/drivers/usbduxfast.c +++ b/drivers/staging/comedi/drivers/usbduxfast.c @@ -61,7 +61,7 @@ #define USBDUXFASTSUB_CPUCS 0xE600 /* - * max lenghth of the transfer-buffer for software upload + * max length of the transfer-buffer for software upload */ #define TB_LEN 0x2000 diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c index 8e8f57c4f029..a913d40f0801 100644 --- a/drivers/staging/emxx_udc/emxx_udc.c +++ b/drivers/staging/emxx_udc/emxx_udc.c @@ -27,7 +27,7 @@ #include #include -#include +#include #include "emxx_udc.h" @@ -2220,7 +2220,7 @@ static inline void _nbu2ss_check_vbus(struct nbu2ss_udc *udc) mdelay(VBUS_CHATTERING_MDELAY); /* wait (ms) */ /* VBUS ON Check*/ - reg_dt = gpio_get_value(VBUS_VALUE); + reg_dt = gpiod_get_value(vbus_gpio); if (reg_dt == 0) { udc->linux_suspended = 0; @@ -2247,7 +2247,7 @@ static inline void _nbu2ss_check_vbus(struct nbu2ss_udc *udc) } } else { mdelay(5); /* wait (5ms) */ - reg_dt = gpio_get_value(VBUS_VALUE); + reg_dt = gpiod_get_value(vbus_gpio); if (reg_dt == 0) return; @@ -2311,7 +2311,7 @@ static inline void _nbu2ss_int_usb_suspend(struct nbu2ss_udc *udc) u32 reg_dt; if (udc->usb_suspended == 0) { - reg_dt = gpio_get_value(VBUS_VALUE); + reg_dt = gpiod_get_value(vbus_gpio); if (reg_dt == 0) return; @@ -2351,7 +2351,7 @@ static irqreturn_t _nbu2ss_udc_irq(int irq, void *_udc) struct nbu2ss_udc *udc = (struct nbu2ss_udc *)_udc; struct fc_regs __iomem *preg = udc->p_regs; - if (gpio_get_value(VBUS_VALUE) == 0) { + if (gpiod_get_value(vbus_gpio) == 0) { _nbu2ss_writel(&preg->USB_INT_STA, ~USB_INT_STA_RW); _nbu2ss_writel(&preg->USB_INT_ENA, 0); return IRQ_HANDLED; @@ -2360,7 +2360,7 @@ static irqreturn_t _nbu2ss_udc_irq(int irq, void *_udc) spin_lock(&udc->lock); for (;;) { - if (gpio_get_value(VBUS_VALUE) == 0) { + if (gpiod_get_value(vbus_gpio) == 0) { _nbu2ss_writel(&preg->USB_INT_STA, ~USB_INT_STA_RW); _nbu2ss_writel(&preg->USB_INT_ENA, 0); status = 0; @@ -2750,7 +2750,7 @@ static int nbu2ss_ep_fifo_status(struct usb_ep *_ep) preg = udc->p_regs; - data = gpio_get_value(VBUS_VALUE); + data = gpiod_get_value(vbus_gpio); if (data == 0) return -EINVAL; @@ -2790,7 +2790,7 @@ static void nbu2ss_ep_fifo_flush(struct usb_ep *_ep) return; } - data = gpio_get_value(VBUS_VALUE); + data = gpiod_get_value(vbus_gpio); if (data == 0) return; @@ -2832,7 +2832,7 @@ static int nbu2ss_gad_get_frame(struct usb_gadget *pgadget) } udc = container_of(pgadget, struct nbu2ss_udc, gadget); - data = gpio_get_value(VBUS_VALUE); + data = gpiod_get_value(vbus_gpio); if (data == 0) return -EINVAL; @@ -2854,7 +2854,7 @@ static int nbu2ss_gad_wakeup(struct usb_gadget *pgadget) udc = container_of(pgadget, struct nbu2ss_udc, gadget); - data = gpio_get_value(VBUS_VALUE); + data = gpiod_get_value(vbus_gpio); if (data == 0) { dev_warn(&pgadget->dev, "VBUS LEVEL = %d\n", data); return -EINVAL; @@ -3119,12 +3119,13 @@ static int nbu2ss_drv_probe(struct platform_device *pdev) } /* VBUS Interrupt */ - irq_set_irq_type(INT_VBUS, IRQ_TYPE_EDGE_BOTH); - status = request_irq(INT_VBUS, + vbus_irq = gpiod_to_irq(vbus_gpio); + irq_set_irq_type(vbus_irq, IRQ_TYPE_EDGE_BOTH); + status = request_irq(vbus_irq, _nbu2ss_vbus_irq, IRQF_SHARED, driver_name, udc); if (status != 0) { - dev_err(udc->dev, "request_irq(INT_VBUS) failed\n"); + dev_err(udc->dev, "request_irq(vbus_irq) failed\n"); return status; } @@ -3160,7 +3161,7 @@ static int nbu2ss_drv_remove(struct platform_device *pdev) } /* Interrupt Handler - Release */ - free_irq(INT_VBUS, udc); + free_irq(vbus_irq, udc); return 0; } @@ -3201,7 +3202,7 @@ static int nbu2ss_drv_resume(struct platform_device *pdev) if (!udc) return 0; - data = gpio_get_value(VBUS_VALUE); + data = gpiod_get_value(vbus_gpio); if (data) { udc->vbus_active = 1; udc->devstate = USB_STATE_POWERED; diff --git a/drivers/staging/emxx_udc/emxx_udc.h b/drivers/staging/emxx_udc/emxx_udc.h index e28a74da9633..b8c3dee5626c 100644 --- a/drivers/staging/emxx_udc/emxx_udc.h +++ b/drivers/staging/emxx_udc/emxx_udc.h @@ -30,6 +30,8 @@ /* below hacked up for staging integration */ #define GPIO_VBUS 0 /* GPIO_P153 on KZM9D */ #define INT_VBUS 0 /* IRQ for GPIO_P153 */ +struct gpio_desc *vbus_gpio; +int vbus_irq; /*------------ Board dependence(Wait) */ diff --git a/drivers/staging/erofs/Documentation/filesystems/erofs.txt b/drivers/staging/erofs/Documentation/filesystems/erofs.txt new file mode 100644 index 000000000000..961ec4da7705 --- /dev/null +++ b/drivers/staging/erofs/Documentation/filesystems/erofs.txt @@ -0,0 +1,208 @@ +Overview +======== + +EROFS file-system stands for Enhanced Read-Only File System. Different +from other read-only file systems, it aims to be designed for flexibility, +scalability, but be kept simple and high performance. + +It is designed as a better filesystem solution for the following scenarios: + - read-only storage media or + + - part of a fully trusted read-only solution, which means it needs to be + immutable and bit-for-bit identical to the official golden image for + their releases due to security and other considerations and + + - hope to save some extra storage space with guaranteed end-to-end performance + by using reduced metadata and transparent file compression, especially + for those embedded devices with limited memory (ex, smartphone); + +Here is the main features of EROFS: + - Little endian on-disk design; + + - Currently 4KB block size (nobh) and therefore maximum 16TB address space; + + - Metadata & data could be mixed by design; + + - 2 inode versions for different requirements: + v1 v2 + Inode metadata size: 32 bytes 64 bytes + Max file size: 4 GB 16 EB (also limited by max. vol size) + Max uids/gids: 65536 4294967296 + File creation time: no yes (64 + 32-bit timestamp) + Max hardlinks: 65536 4294967296 + Metadata reserved: 4 bytes 14 bytes + + - Support extended attributes (xattrs) as an option; + + - Support xattr inline and tail-end data inline for all files; + + - Support POSIX.1e ACLs by using xattrs; + + - Support transparent file compression as an option: + LZ4 algorithm with 4 KB fixed-output compression for high performance; + +The following git tree provides the file system user-space tools under +development (ex, formatting tool mkfs.erofs): +>> git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs-utils.git + +Bugs and patches are welcome, please kindly help us and send to the following +linux-erofs mailing list: +>> linux-erofs mailing list + +Note that EROFS is still working in progress as a Linux staging driver, +Cc the staging mailing list as well is highly recommended: +>> Linux Driver Project Developer List + +Mount options +============= + +fault_injection=%d Enable fault injection in all supported types with + specified injection rate. Supported injection type: + Type_Name Type_Value + FAULT_KMALLOC 0x000000001 +(no)user_xattr Setup Extended User Attributes. Note: xattr is enabled + by default if CONFIG_EROFS_FS_XATTR is selected. +(no)acl Setup POSIX Access Control List. Note: acl is enabled + by default if CONFIG_EROFS_FS_POSIX_ACL is selected. + +On-disk details +=============== + +Summary +------- +Different from other read-only file systems, an EROFS volume is designed +to be as simple as possible: + + |-> aligned with the block size + ____________________________________________________________ + | |SB| | ... | Metadata | ... | Data | Metadata | ... | Data | + |_|__|_|_____|__________|_____|______|__________|_____|______| + 0 +1K + +All data areas should be aligned with the block size, but metadata areas +may not. All metadatas can be now observed in two different spaces (views): + 1. Inode metadata space + Each valid inode should be aligned with an inode slot, which is a fixed + value (32 bytes) and designed to be kept in line with v1 inode size. + + Each inode can be directly found with the following formula: + inode offset = meta_blkaddr * block_size + 32 * nid + + |-> aligned with 8B + |-> followed closely + + meta_blkaddr blocks |-> another slot + _____________________________________________________________________ + | ... | inode | xattrs | extents | data inline | ... | inode ... + |________|_______|(optional)|(optional)|__(optional)_|_____|__________ + |-> aligned with the inode slot size + . . + . . + . . + . . + . . + . . + .____________________________________________________|-> aligned with 4B + | xattr_ibody_header | shared xattrs | inline xattrs | + |____________________|_______________|_______________| + |-> 12 bytes <-|->x * 4 bytes<-| . + . . . + . . . + . . . + ._______________________________.______________________. + | id | id | id | id | ... | id | ent | ... | ent| ... | + |____|____|____|____|______|____|_____|_____|____|_____| + |-> aligned with 4B + |-> aligned with 4B + + Inode could be 32 or 64 bytes, which can be distinguished from a common + field which all inode versions have -- i_advise: + + __________________ __________________ + | i_advise | | i_advise | + |__________________| |__________________| + | ... | | ... | + | | | | + |__________________| 32 bytes | | + | | + |__________________| 64 bytes + + Xattrs, extents, data inline are followed by the corresponding inode with + proper alignes, and they could be optional for different data mappings, + _currently_ there are totally 3 valid data mappings supported: + + 1) flat file data without data inline (no extent); + 2) fixed-output size data compression (must have extents); + 3) flat file data with tail-end data inline (no extent); + + The size of the optional xattrs is indicated by i_xattr_count in inode + header. Large xattrs or xattrs shared by many different files can be + stored in shared xattrs metadata rather than inlined right after inode. + + 2. Shared xattrs metadata space + Shared xattrs space is similar to the above inode space, started with + a specific block indicated by xattr_blkaddr, organized one by one with + proper align. + + Each share xattr can also be directly found by the following formula: + xattr offset = xattr_blkaddr * block_size + 4 * xattr_id + + |-> aligned by 4 bytes + + xattr_blkaddr blocks |-> aligned with 4 bytes + _________________________________________________________________________ + | ... | xattr_entry | xattr data | ... | xattr_entry | xattr data ... + |________|_____________|_____________|_____|______________|_______________ + +Directories +----------- +All directories are now organized in a compact on-disk format. Note that +each directory block is divided into index and name areas in order to support +random file lookup, and all directory entries are _strictly_ recorded in +alphabetical order in order to support improved prefix binary search +algorithm (could refer to the related source code). + + ___________________________ + / | + / ______________|________________ + / / | nameoff1 | nameoffN-1 + ____________.______________._______________v________________v__________ +| dirent | dirent | ... | dirent | filename | filename | ... | filename | +|___.0___|____1___|_____|___N-1__|____0_____|____1_____|_____|___N-1____| + \ ^ + \ | * could have + \ | trailing '\0' + \________________________| nameoff0 + + Directory block + +Note that apart from the offset of the first filename, nameoff0 also indicates +the total number of directory entries in this block since it is no need to +introduce another on-disk field at all. + +Compression +----------- +Currently, EROFS supports 4KB fixed-output clustersize transparent file +compression, as illustrated below: + + |---- Variant-Length Extent ----|-------- VLE --------|----- VLE ----- + clusterofs clusterofs clusterofs + | | | logical data +_________v_______________________________v_____________________v_______________ +... | . | | . | | . | ... +____|____.________|_____________|________.____|_____________|__.__________|____ + |-> cluster <-|-> cluster <-|-> cluster <-|-> cluster <-|-> cluster <-| + size size size size size + . . . . + . . . . + . . . . + _______._____________._____________._____________._____________________ + ... | | | | ... physical data + _______|_____________|_____________|_____________|_____________________ + |-> cluster <-|-> cluster <-|-> cluster <-| + size size size + +Currently each on-disk physical cluster can contain 4KB (un)compressed data +at most. For each logical cluster, there is a corresponding on-disk index to +describe its cluster type, physical cluster address, etc. + +See "struct z_erofs_vle_decompressed_index" in erofs_fs.h for more details. + diff --git a/drivers/staging/erofs/Makefile b/drivers/staging/erofs/Makefile index c91b65223f99..38ab344a285e 100644 --- a/drivers/staging/erofs/Makefile +++ b/drivers/staging/erofs/Makefile @@ -6,7 +6,7 @@ ccflags-y += -Wall -DEROFS_VERSION=\"$(EROFS_VERSION)\" obj-$(CONFIG_EROFS_FS) += erofs.o # staging requirement: to be self-contained in its own directory -ccflags-y += -I$(src)/include +ccflags-y += -I $(srctree)/$(src)/include erofs-objs := super.o inode.o data.o namei.o dir.o utils.o erofs-$(CONFIG_EROFS_FS_XATTR) += xattr.o erofs-$(CONFIG_EROFS_FS_ZIP) += unzip_vle.o unzip_vle_lz4.o diff --git a/drivers/staging/erofs/data.c b/drivers/staging/erofs/data.c index 5a55f0bfdfbb..526e0dbea5b5 100644 --- a/drivers/staging/erofs/data.c +++ b/drivers/staging/erofs/data.c @@ -20,8 +20,9 @@ static inline void read_endio(struct bio *bio) int i; struct bio_vec *bvec; const blk_status_t err = bio->bi_status; + struct bvec_iter_all iter_all; - bio_for_each_segment_all(bvec, bio, i) { + bio_for_each_segment_all(bvec, bio, i, iter_all) { struct page *page = bvec->bv_page; /* page is already locked */ @@ -165,43 +166,16 @@ err_out: return err; } -#ifdef CONFIG_EROFS_FS_ZIP -extern int z_erofs_map_blocks_iter(struct inode *, - struct erofs_map_blocks *, - struct page **, int); -#endif - -int erofs_map_blocks_iter(struct inode *inode, - struct erofs_map_blocks *map, - struct page **mpage_ret, int flags) -{ - /* by default, reading raw data never use erofs_map_blocks_iter */ - if (unlikely(!is_inode_layout_compression(inode))) { - if (*mpage_ret) - put_page(*mpage_ret); - *mpage_ret = NULL; - - return erofs_map_blocks(inode, map, flags); - } - -#ifdef CONFIG_EROFS_FS_ZIP - return z_erofs_map_blocks_iter(inode, map, mpage_ret, flags); -#else - /* data compression is not available */ - return -ENOTSUPP; -#endif -} - int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map, int flags) { if (unlikely(is_inode_layout_compression(inode))) { - struct page *mpage = NULL; - int err; + int err = z_erofs_map_blocks_iter(inode, map, flags); - err = erofs_map_blocks_iter(inode, map, &mpage, flags); - if (mpage) - put_page(mpage); + if (map->mpage) { + put_page(map->mpage); + map->mpage = NULL; + } return err; } return erofs_map_blocks_flatmode(inode, map, flags); diff --git a/drivers/staging/erofs/dir.c b/drivers/staging/erofs/dir.c index 833f052f79d0..829f7b12e0dc 100644 --- a/drivers/staging/erofs/dir.c +++ b/drivers/staging/erofs/dir.c @@ -24,8 +24,8 @@ static const unsigned char erofs_filetype_table[EROFS_FT_MAX] = { }; static int erofs_fill_dentries(struct dir_context *ctx, - void *dentry_blk, unsigned int *ofs, - unsigned int nameoff, unsigned int maxsize) + void *dentry_blk, unsigned int *ofs, + unsigned int nameoff, unsigned int maxsize) { struct erofs_dirent *de = dentry_blk; const struct erofs_dirent *end = dentry_blk + nameoff; @@ -98,15 +98,14 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx) if (IS_ERR(dentry_page)) continue; - lock_page(dentry_page); de = (struct erofs_dirent *)kmap(dentry_page); nameoff = le16_to_cpu(de->nameoff); if (unlikely(nameoff < sizeof(struct erofs_dirent) || - nameoff >= PAGE_SIZE)) { + nameoff >= PAGE_SIZE)) { errln("%s, invalid de[0].nameoff %u", - __func__, nameoff); + __func__, nameoff); err = -EIO; goto skip_this; @@ -128,7 +127,6 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx) skip_this: kunmap(dentry_page); - unlock_page(dentry_page); put_page(dentry_page); ctx->pos = blknr_to_addr(i) + ofs; @@ -144,6 +142,6 @@ skip_this: const struct file_operations erofs_dir_fops = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = erofs_readdir, + .iterate_shared = erofs_readdir, }; diff --git a/drivers/staging/erofs/inode.c b/drivers/staging/erofs/inode.c index d7fbf5f4600f..924b8dfc7a8f 100644 --- a/drivers/staging/erofs/inode.c +++ b/drivers/staging/erofs/inode.c @@ -184,32 +184,18 @@ static int fill_inode(struct inode *inode, int isdir) if (!err) { /* setup the new inode */ if (S_ISREG(inode->i_mode)) { -#ifdef CONFIG_EROFS_FS_XATTR - if (vi->xattr_isize) - inode->i_op = &erofs_generic_xattr_iops; -#endif + inode->i_op = &erofs_generic_iops; inode->i_fop = &generic_ro_fops; } else if (S_ISDIR(inode->i_mode)) { - inode->i_op = -#ifdef CONFIG_EROFS_FS_XATTR - vi->xattr_isize ? &erofs_dir_xattr_iops : -#endif - &erofs_dir_iops; + inode->i_op = &erofs_dir_iops; inode->i_fop = &erofs_dir_fops; } else if (S_ISLNK(inode->i_mode)) { /* by default, page_get_link is used for symlink */ - inode->i_op = -#ifdef CONFIG_EROFS_FS_XATTR - &erofs_symlink_xattr_iops, -#else - &page_symlink_inode_operations; -#endif + inode->i_op = &erofs_symlink_iops; inode_nohighmem(inode); } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { -#ifdef CONFIG_EROFS_FS_XATTR - inode->i_op = &erofs_special_inode_operations; -#endif + inode->i_op = &erofs_generic_iops; init_special_inode(inode, inode->i_mode, inode->i_rdev); } else { err = -EIO; @@ -297,23 +283,26 @@ struct inode *erofs_iget(struct super_block *sb, return inode; } +const struct inode_operations erofs_generic_iops = { #ifdef CONFIG_EROFS_FS_XATTR -const struct inode_operations erofs_generic_xattr_iops = { .listxattr = erofs_listxattr, +#endif + .get_acl = erofs_get_acl, }; -const struct inode_operations erofs_symlink_xattr_iops = { +const struct inode_operations erofs_symlink_iops = { .get_link = page_get_link, +#ifdef CONFIG_EROFS_FS_XATTR .listxattr = erofs_listxattr, +#endif + .get_acl = erofs_get_acl, }; -const struct inode_operations erofs_special_inode_operations = { - .listxattr = erofs_listxattr, -}; - -const struct inode_operations erofs_fast_symlink_xattr_iops = { +const struct inode_operations erofs_fast_symlink_iops = { .get_link = simple_get_link, +#ifdef CONFIG_EROFS_FS_XATTR .listxattr = erofs_listxattr, -}; #endif + .get_acl = erofs_get_acl, +}; diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h index e049d00c087a..e3bfde00c7d2 100644 --- a/drivers/staging/erofs/internal.h +++ b/drivers/staging/erofs/internal.h @@ -252,47 +252,20 @@ static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp) } #endif -static inline bool erofs_workgroup_get(struct erofs_workgroup *grp, int *ocnt) -{ - int o; - -repeat: - o = erofs_wait_on_workgroup_freezed(grp); - - if (unlikely(o <= 0)) - return -1; - - if (unlikely(atomic_cmpxchg(&grp->refcount, o, o + 1) != o)) - goto repeat; - - *ocnt = o; - return 0; -} - -#define __erofs_workgroup_get(grp) atomic_inc(&(grp)->refcount) -#define __erofs_workgroup_put(grp) atomic_dec(&(grp)->refcount) - -extern int erofs_workgroup_put(struct erofs_workgroup *grp); - -extern struct erofs_workgroup *erofs_find_workgroup( - struct super_block *sb, pgoff_t index, bool *tag); - -extern int erofs_register_workgroup(struct super_block *sb, - struct erofs_workgroup *grp, bool tag); - -extern unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi, - unsigned long nr_shrink, bool cleanup); - -static inline void erofs_workstation_cleanup_all(struct super_block *sb) -{ - erofs_shrink_workstation(EROFS_SB(sb), ~0UL, true); -} +int erofs_workgroup_put(struct erofs_workgroup *grp); +struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb, + pgoff_t index, bool *tag); +int erofs_register_workgroup(struct super_block *sb, + struct erofs_workgroup *grp, bool tag); +unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi, + unsigned long nr_shrink, bool cleanup); +void erofs_workgroup_free_rcu(struct erofs_workgroup *grp); #ifdef EROFS_FS_HAS_MANAGED_CACHE -extern int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, - struct erofs_workgroup *egrp); -extern int erofs_try_to_free_cached_page(struct address_space *mapping, - struct page *page); +int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, + struct erofs_workgroup *egrp); +int erofs_try_to_free_cached_page(struct address_space *mapping, + struct page *page); #define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping) #else @@ -354,12 +327,17 @@ static inline erofs_off_t iloc(struct erofs_sb_info *sbi, erofs_nid_t nid) return blknr_to_addr(sbi->meta_blkaddr) + (nid << sbi->islotbits); } -#define inode_set_inited_xattr(inode) (EROFS_V(inode)->flags |= 1) -#define inode_has_inited_xattr(inode) (EROFS_V(inode)->flags & 1) +/* atomic flag definitions */ +#define EROFS_V_EA_INITED_BIT 0 + +/* bitlock definitions (arranged in reverse order) */ +#define EROFS_V_BL_XATTR_BIT (BITS_PER_LONG - 1) struct erofs_vnode { erofs_nid_t nid; - unsigned int flags; + + /* atomic flags (including bitlocks) */ + unsigned long flags; unsigned char data_mapping_mode; /* inline size in bytes */ @@ -412,8 +390,6 @@ static inline bool is_inode_layout_inline(struct inode *inode) } extern const struct super_operations erofs_sops; -extern const struct inode_operations erofs_dir_iops; -extern const struct file_operations erofs_dir_fops; extern const struct address_space_operations erofs_raw_access_aops; #ifdef CONFIG_EROFS_FS_ZIP @@ -461,11 +437,26 @@ struct erofs_map_blocks { u64 m_plen, m_llen; unsigned int m_flags; + + struct page *mpage; }; /* Flags used by erofs_map_blocks() */ #define EROFS_GET_BLOCKS_RAW 0x0001 +#ifdef CONFIG_EROFS_FS_ZIP +int z_erofs_map_blocks_iter(struct inode *inode, + struct erofs_map_blocks *map, + int flags); +#else +static inline int z_erofs_map_blocks_iter(struct inode *inode, + struct erofs_map_blocks *map, + int flags) +{ + return -ENOTSUPP; +} +#endif + /* data.c */ static inline struct bio * erofs_grab_bio(struct super_block *sb, @@ -506,8 +497,8 @@ static inline void __submit_bio(struct bio *bio, unsigned op, unsigned op_flags) #define EROFS_IO_MAX_RETRIES_NOFAIL CONFIG_EROFS_FS_IO_MAX_RETRIES #endif -extern struct page *__erofs_get_meta_page(struct super_block *sb, - erofs_blk_t blkaddr, bool prio, bool nofail); +struct page *__erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr, + bool prio, bool nofail); static inline struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr, bool prio) @@ -521,15 +512,7 @@ static inline struct page *erofs_get_meta_page_nofail(struct super_block *sb, return __erofs_get_meta_page(sb, blkaddr, prio, true); } -extern int erofs_map_blocks(struct inode *, struct erofs_map_blocks *, int); -extern int erofs_map_blocks_iter(struct inode *, struct erofs_map_blocks *, - struct page **, int); - -struct erofs_map_blocks_iter { - struct erofs_map_blocks map; - struct page *mpage; -}; - +int erofs_map_blocks(struct inode *, struct erofs_map_blocks *, int); static inline struct page * erofs_get_inline_page(struct inode *inode, @@ -549,41 +532,31 @@ static inline unsigned long erofs_inode_hash(erofs_nid_t nid) #endif } -extern struct inode *erofs_iget(struct super_block *sb, - erofs_nid_t nid, bool dir); - -/* dir.c */ -int erofs_namei(struct inode *dir, struct qstr *name, - erofs_nid_t *nid, unsigned *d_type); - -#ifdef CONFIG_EROFS_FS_XATTR -/* xattr.c */ -extern const struct xattr_handler *erofs_xattr_handlers[]; - -/* symlink and special inode */ -extern const struct inode_operations erofs_symlink_xattr_iops; -extern const struct inode_operations erofs_fast_symlink_xattr_iops; -extern const struct inode_operations erofs_special_inode_operations; -#endif +extern const struct inode_operations erofs_generic_iops; +extern const struct inode_operations erofs_symlink_iops; +extern const struct inode_operations erofs_fast_symlink_iops; static inline void set_inode_fast_symlink(struct inode *inode) { -#ifdef CONFIG_EROFS_FS_XATTR - inode->i_op = &erofs_fast_symlink_xattr_iops; -#else - inode->i_op = &simple_symlink_inode_operations; -#endif + inode->i_op = &erofs_fast_symlink_iops; } static inline bool is_inode_fast_symlink(struct inode *inode) { -#ifdef CONFIG_EROFS_FS_XATTR - return inode->i_op == &erofs_fast_symlink_xattr_iops; -#else - return inode->i_op == &simple_symlink_inode_operations; -#endif + return inode->i_op == &erofs_fast_symlink_iops; } +struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid, bool dir); + +/* namei.c */ +extern const struct inode_operations erofs_dir_iops; + +int erofs_namei(struct inode *dir, struct qstr *name, + erofs_nid_t *nid, unsigned int *d_type); + +/* dir.c */ +extern const struct file_operations erofs_dir_fops; + static inline void *erofs_vmap(struct page **pages, unsigned int count) { #ifdef CONFIG_EROFS_FS_USE_VM_MAP_RAM @@ -612,15 +585,11 @@ static inline void erofs_vunmap(const void *mem, unsigned int count) } /* utils.c */ -extern struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp); - -extern void erofs_register_super(struct super_block *sb); -extern void erofs_unregister_super(struct super_block *sb); +extern struct shrinker erofs_shrinker_info; -extern unsigned long erofs_shrink_count(struct shrinker *shrink, - struct shrink_control *sc); -extern unsigned long erofs_shrink_scan(struct shrinker *shrink, - struct shrink_control *sc); +struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp); +void erofs_register_super(struct super_block *sb); +void erofs_unregister_super(struct super_block *sb); #ifndef lru_to_page #define lru_to_page(head) (list_entry((head)->prev, struct page, lru)) diff --git a/drivers/staging/erofs/namei.c b/drivers/staging/erofs/namei.c index 5596c52e246d..3f4fa52c10fa 100644 --- a/drivers/staging/erofs/namei.c +++ b/drivers/staging/erofs/namei.c @@ -15,74 +15,77 @@ #include -/* based on the value of qn->len is accurate */ -static inline int dirnamecmp(struct qstr *qn, - struct qstr *qd, unsigned int *matched) +struct erofs_qstr { + const unsigned char *name; + const unsigned char *end; +}; + +/* based on the end of qn is accurate and it must have the trailing '\0' */ +static inline int dirnamecmp(const struct erofs_qstr *qn, + const struct erofs_qstr *qd, + unsigned int *matched) { - unsigned int i = *matched, len = min(qn->len, qd->len); -loop: - if (unlikely(i >= len)) { - *matched = i; - if (qn->len < qd->len) { - /* - * actually (qn->len == qd->len) - * when qd->name[i] == '\0' - */ - return qd->name[i] == '\0' ? 0 : -1; + unsigned int i = *matched; + + /* + * on-disk error, let's only BUG_ON in the debugging mode. + * otherwise, it will return 1 to just skip the invalid name + * and go on (in consideration of the lookup performance). + */ + DBG_BUGON(qd->name > qd->end); + + /* qd could not have trailing '\0' */ + /* However it is absolutely safe if < qd->end */ + while (qd->name + i < qd->end && qd->name[i] != '\0') { + if (qn->name[i] != qd->name[i]) { + *matched = i; + return qn->name[i] > qd->name[i] ? 1 : -1; } - return (qn->len > qd->len); - } - - if (qn->name[i] != qd->name[i]) { - *matched = i; - return qn->name[i] > qd->name[i] ? 1 : -1; + ++i; } - - ++i; - goto loop; + *matched = i; + /* See comments in __d_alloc on the terminating NUL character */ + return qn->name[i] == '\0' ? 0 : 1; } -static struct erofs_dirent *find_target_dirent( - struct qstr *name, - u8 *data, int maxsize) +#define nameoff_from_disk(off, sz) (le16_to_cpu(off) & ((sz) - 1)) + +static struct erofs_dirent *find_target_dirent(struct erofs_qstr *name, + u8 *data, + unsigned int dirblksize, + const int ndirents) { - unsigned int ndirents, head, back; + int head, back; unsigned int startprfx, endprfx; struct erofs_dirent *const de = (struct erofs_dirent *)data; - /* make sure that maxsize is valid */ - BUG_ON(maxsize < sizeof(struct erofs_dirent)); - - ndirents = le16_to_cpu(de->nameoff) / sizeof(*de); - - /* corrupted dir (may be unnecessary...) */ - BUG_ON(!ndirents); - - head = 0; + /* since the 1st dirent has been evaluated previously */ + head = 1; back = ndirents - 1; startprfx = endprfx = 0; while (head <= back) { - unsigned int mid = head + (back - head) / 2; - unsigned int nameoff = le16_to_cpu(de[mid].nameoff); + const int mid = head + (back - head) / 2; + const int nameoff = nameoff_from_disk(de[mid].nameoff, + dirblksize); unsigned int matched = min(startprfx, endprfx); - - struct qstr dname = QSTR_INIT(data + nameoff, - unlikely(mid >= ndirents - 1) ? - maxsize - nameoff : - le16_to_cpu(de[mid + 1].nameoff) - nameoff); + struct erofs_qstr dname = { + .name = data + nameoff, + .end = unlikely(mid >= ndirents - 1) ? + data + dirblksize : + data + nameoff_from_disk(de[mid + 1].nameoff, + dirblksize) + }; /* string comparison without already matched prefix */ int ret = dirnamecmp(name, &dname, &matched); - if (unlikely(!ret)) + if (unlikely(!ret)) { return de + mid; - else if (ret > 0) { + } else if (ret > 0) { head = mid + 1; startprfx = matched; - } else if (unlikely(mid < 1)) /* fix "mid" overflow */ - break; - else { + } else { back = mid - 1; endprfx = matched; } @@ -91,12 +94,12 @@ static struct erofs_dirent *find_target_dirent( return ERR_PTR(-ENOENT); } -static struct page *find_target_block_classic( - struct inode *dir, - struct qstr *name, int *_diff) +static struct page *find_target_block_classic(struct inode *dir, + struct erofs_qstr *name, + int *_ndirents) { unsigned int startprfx, endprfx; - unsigned int head, back; + int head, back; struct address_space *const mapping = dir->i_mapping; struct page *candidate = ERR_PTR(-ENOENT); @@ -105,89 +108,97 @@ static struct page *find_target_block_classic( back = inode_datablocks(dir) - 1; while (head <= back) { - unsigned int mid = head + (back - head) / 2; + const int mid = head + (back - head) / 2; struct page *page = read_mapping_page(mapping, mid, NULL); - if (IS_ERR(page)) { -exact_out: - if (!IS_ERR(candidate)) /* valid candidate */ - put_page(candidate); - return page; - } else { - int diff; - unsigned int ndirents, matched; - struct qstr dname; + if (!IS_ERR(page)) { struct erofs_dirent *de = kmap_atomic(page); - unsigned int nameoff = le16_to_cpu(de->nameoff); - - ndirents = nameoff / sizeof(*de); + const int nameoff = nameoff_from_disk(de->nameoff, + EROFS_BLKSIZ); + const int ndirents = nameoff / sizeof(*de); + int diff; + unsigned int matched; + struct erofs_qstr dname; - /* corrupted dir (should have one entry at least) */ - BUG_ON(!ndirents || nameoff > PAGE_SIZE); + if (unlikely(!ndirents)) { + DBG_BUGON(1); + kunmap_atomic(de); + put_page(page); + page = ERR_PTR(-EIO); + goto out; + } matched = min(startprfx, endprfx); dname.name = (u8 *)de + nameoff; - dname.len = ndirents == 1 ? - /* since the rest of the last page is 0 */ - EROFS_BLKSIZ - nameoff - : le16_to_cpu(de[1].nameoff) - nameoff; + if (ndirents == 1) + dname.end = (u8 *)de + EROFS_BLKSIZ; + else + dname.end = (u8 *)de + + nameoff_from_disk(de[1].nameoff, + EROFS_BLKSIZ); /* string comparison without already matched prefix */ diff = dirnamecmp(name, &dname, &matched); kunmap_atomic(de); if (unlikely(!diff)) { - *_diff = 0; - goto exact_out; + *_ndirents = 0; + goto out; } else if (diff > 0) { head = mid + 1; startprfx = matched; - if (likely(!IS_ERR(candidate))) + if (!IS_ERR(candidate)) put_page(candidate); candidate = page; + *_ndirents = ndirents; } else { put_page(page); - if (unlikely(mid < 1)) /* fix "mid" overflow */ - break; - back = mid - 1; endprfx = matched; } + continue; } +out: /* free if the candidate is valid */ + if (!IS_ERR(candidate)) + put_page(candidate); + return page; } - *_diff = 1; return candidate; } int erofs_namei(struct inode *dir, - struct qstr *name, - erofs_nid_t *nid, unsigned int *d_type) + struct qstr *name, + erofs_nid_t *nid, unsigned int *d_type) { - int diff; + int ndirents; struct page *page; - u8 *data; + void *data; struct erofs_dirent *de; + struct erofs_qstr qn; if (unlikely(!dir->i_size)) return -ENOENT; - diff = 1; - page = find_target_block_classic(dir, name, &diff); + qn.name = name->name; + qn.end = name->name + name->len; + + ndirents = 0; + page = find_target_block_classic(dir, &qn, &ndirents); - if (unlikely(IS_ERR(page))) + if (IS_ERR(page)) return PTR_ERR(page); data = kmap_atomic(page); /* the target page has been mapped */ - de = likely(diff) ? - /* since the rest of the last page is 0 */ - find_target_dirent(name, data, EROFS_BLKSIZ) : - (struct erofs_dirent *)data; + if (ndirents) + de = find_target_dirent(&qn, data, EROFS_BLKSIZ, ndirents); + else + de = (struct erofs_dirent *)data; - if (likely(!IS_ERR(de))) { + if (!IS_ERR(de)) { *nid = le64_to_cpu(de->nid); *d_type = de->file_type; } @@ -235,12 +246,9 @@ static struct dentry *erofs_lookup(struct inode *dir, const struct inode_operations erofs_dir_iops = { .lookup = erofs_lookup, -}; - -const struct inode_operations erofs_dir_xattr_iops = { - .lookup = erofs_lookup, #ifdef CONFIG_EROFS_FS_XATTR .listxattr = erofs_listxattr, #endif + .get_acl = erofs_get_acl, }; diff --git a/drivers/staging/erofs/super.c b/drivers/staging/erofs/super.c index 1c2eb69682ef..15c784fba879 100644 --- a/drivers/staging/erofs/super.c +++ b/drivers/staging/erofs/super.c @@ -16,6 +16,7 @@ #include #include #include "internal.h" +#include "xattr.h" #define CREATE_TRACE_POINTS #include @@ -397,6 +398,11 @@ static int erofs_read_super(struct super_block *sb, if (!silent) infoln("root inode @ nid %llu", ROOT_NID(sbi)); + if (test_opt(sbi, POSIX_ACL)) + sb->s_flags |= SB_POSIXACL; + else + sb->s_flags &= ~SB_POSIXACL; + #ifdef CONFIG_EROFS_FS_ZIP INIT_RADIX_TREE(&sbi->workstn_tree, GFP_ATOMIC); #endif @@ -420,13 +426,14 @@ static int erofs_read_super(struct super_block *sb, errln("rootino(nid %llu) is not a directory(i_mode %o)", ROOT_NID(sbi), inode->i_mode); err = -EINVAL; - goto err_isdir; + iput(inode); + goto err_iget; } sb->s_root = d_make_root(inode); if (sb->s_root == NULL) { err = -ENOMEM; - goto err_makeroot; + goto err_iget; } /* save the device name to sbi */ @@ -452,10 +459,6 @@ static int erofs_read_super(struct super_block *sb, */ err_devname: dput(sb->s_root); -err_makeroot: -err_isdir: - if (sb->s_root == NULL) - iput(inode); err_iget: #ifdef EROFS_FS_HAS_MANAGED_CACHE iput(sbi->managed_cache); @@ -493,7 +496,8 @@ static void erofs_put_super(struct super_block *sb) mutex_lock(&sbi->umount_mutex); #ifdef CONFIG_EROFS_FS_ZIP - erofs_workstation_cleanup_all(sb); + /* clean up the compression space of this sb */ + erofs_shrink_workstation(EROFS_SB(sb), ~0UL, true); #endif erofs_unregister_super(sb); @@ -537,12 +541,6 @@ static void erofs_kill_sb(struct super_block *sb) kill_block_super(sb); } -static struct shrinker erofs_shrinker_info = { - .scan_objects = erofs_shrink_scan, - .count_objects = erofs_shrink_count, - .seeks = DEFAULT_SEEKS, -}; - static struct file_system_type erofs_fs_type = { .owner = THIS_MODULE, .name = "erofs", @@ -653,6 +651,11 @@ static int erofs_remount(struct super_block *sb, int *flags, char *data) if (err) goto out; + if (test_opt(sbi, POSIX_ACL)) + sb->s_flags |= SB_POSIXACL; + else + sb->s_flags &= ~SB_POSIXACL; + *flags |= SB_RDONLY; return 0; out: diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c index 4ac1099a39c6..8715bc50e09c 100644 --- a/drivers/staging/erofs/unzip_vle.c +++ b/drivers/staging/erofs/unzip_vle.c @@ -107,15 +107,30 @@ enum z_erofs_vle_work_role { Z_EROFS_VLE_WORK_SECONDARY, Z_EROFS_VLE_WORK_PRIMARY, /* - * The current work has at least been linked with the following - * processed chained works, which means if the processing page - * is the tail partial page of the work, the current work can - * safely use the whole page, as illustrated below: - * +--------------+-------------------------------------------+ - * | tail page | head page (of the previous work) | - * +--------------+-------------------------------------------+ - * /\ which belongs to the current work - * [ (*) this page can be used for the current work itself. ] + * The current work was the tail of an exist chain, and the previous + * processed chained works are all decided to be hooked up to it. + * A new chain should be created for the remaining unprocessed works, + * therefore different from Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED, + * the next work cannot reuse the whole page in the following scenario: + * ________________________________________________________________ + * | tail (partial) page | head (partial) page | + * | (belongs to the next work) | (belongs to the current work) | + * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________| + */ + Z_EROFS_VLE_WORK_PRIMARY_HOOKED, + /* + * The current work has been linked with the processed chained works, + * and could be also linked with the potential remaining works, which + * means if the processing page is the tail partial page of the work, + * the current work can safely use the whole page (since the next work + * is under control) for in-place decompression, as illustrated below: + * ________________________________________________________________ + * | tail (partial) page | head (partial) page | + * | (of the current work) | (of the previous work) | + * | PRIMARY_FOLLOWED or | | + * |_____PRIMARY_HOOKED____|____________PRIMARY_FOLLOWED____________| + * + * [ (*) the above page can be used for the current work itself. ] */ Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED, Z_EROFS_VLE_WORK_MAX @@ -238,14 +253,9 @@ int erofs_try_to_free_cached_page(struct address_space *mapping, { struct erofs_sb_info *const sbi = EROFS_SB(mapping->host->i_sb); const unsigned int clusterpages = erofs_clusterpages(sbi); - - struct z_erofs_vle_workgroup *grp; + struct z_erofs_vle_workgroup *const grp = (void *)page_private(page); int ret = 0; /* 0 - busy */ - /* prevent the workgroup from being freed */ - rcu_read_lock(); - grp = (void *)page_private(page); - if (erofs_workgroup_try_to_freeze(&grp->obj, 1)) { unsigned int i; @@ -257,12 +267,11 @@ int erofs_try_to_free_cached_page(struct address_space *mapping, } } erofs_workgroup_unfreeze(&grp->obj, 1); - } - rcu_read_unlock(); - if (ret) { - ClearPagePrivate(page); - put_page(page); + if (ret) { + ClearPagePrivate(page); + put_page(page); + } } return ret; } @@ -315,10 +324,10 @@ static int z_erofs_vle_work_add_page( return ret ? 0 : -EAGAIN; } -static inline bool try_to_claim_workgroup( - struct z_erofs_vle_workgroup *grp, - z_erofs_vle_owned_workgrp_t *owned_head, - bool *hosted) +static enum z_erofs_vle_work_role +try_to_claim_workgroup(struct z_erofs_vle_workgroup *grp, + z_erofs_vle_owned_workgrp_t *owned_head, + bool *hosted) { DBG_BUGON(*hosted == true); @@ -332,6 +341,9 @@ retry: *owned_head = &grp->next; *hosted = true; + /* lucky, I am the followee :) */ + return Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED; + } else if (grp->next == Z_EROFS_VLE_WORKGRP_TAIL) { /* * type 2, link to the end of a existing open chain, @@ -341,12 +353,11 @@ retry: if (cmpxchg(&grp->next, Z_EROFS_VLE_WORKGRP_TAIL, *owned_head) != Z_EROFS_VLE_WORKGRP_TAIL) goto retry; - *owned_head = Z_EROFS_VLE_WORKGRP_TAIL; - } else - return false; /* :( better luck next time */ + return Z_EROFS_VLE_WORK_PRIMARY_HOOKED; + } - return true; /* lucky, I am the followee :) */ + return Z_EROFS_VLE_WORK_PRIMARY; /* :( better luck next time */ } struct z_erofs_vle_work_finder { @@ -424,12 +435,9 @@ z_erofs_vle_work_lookup(const struct z_erofs_vle_work_finder *f) *f->hosted = false; if (!primary) *f->role = Z_EROFS_VLE_WORK_SECONDARY; - /* claim the workgroup if possible */ - else if (try_to_claim_workgroup(grp, f->owned_head, f->hosted)) - *f->role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED; - else - *f->role = Z_EROFS_VLE_WORK_PRIMARY; - + else /* claim the workgroup if possible */ + *f->role = try_to_claim_workgroup(grp, f->owned_head, + f->hosted); return work; } @@ -493,6 +501,9 @@ z_erofs_vle_work_register(const struct z_erofs_vle_work_finder *f, return work; } +#define builder_is_hooked(builder) \ + ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_HOOKED) + #define builder_is_followed(builder) \ ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED) @@ -539,7 +550,7 @@ repeat: if (unlikely(work == ERR_PTR(-EAGAIN))) goto repeat; - if (unlikely(IS_ERR(work))) + if (IS_ERR(work)) return PTR_ERR(work); got_it: z_erofs_pagevec_ctor_init(&builder->vector, @@ -589,7 +600,7 @@ static void __z_erofs_vle_work_release(struct z_erofs_vle_workgroup *grp, erofs_workgroup_put(&grp->obj); } -void z_erofs_vle_work_release(struct z_erofs_vle_work *work) +static void z_erofs_vle_work_release(struct z_erofs_vle_work *work) { struct z_erofs_vle_workgroup *grp = z_erofs_vle_work_workgroup(work, true); @@ -636,7 +647,7 @@ struct z_erofs_vle_frontend { struct inode *const inode; struct z_erofs_vle_work_builder builder; - struct erofs_map_blocks_iter m_iter; + struct erofs_map_blocks map; z_erofs_vle_owned_workgrp_t owned_head; @@ -647,8 +658,9 @@ struct z_erofs_vle_frontend { #define VLE_FRONTEND_INIT(__i) { \ .inode = __i, \ - .m_iter = { \ - { .m_llen = 0, .m_plen = 0 }, \ + .map = { \ + .m_llen = 0, \ + .m_plen = 0, \ .mpage = NULL \ }, \ .builder = VLE_WORK_BUILDER_INIT(), \ @@ -681,12 +693,11 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe, { struct super_block *const sb = fe->inode->i_sb; struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb); - struct erofs_map_blocks_iter *const m = &fe->m_iter; - struct erofs_map_blocks *const map = &m->map; + struct erofs_map_blocks *const map = &fe->map; struct z_erofs_vle_work_builder *const builder = &fe->builder; const loff_t offset = page_offset(page); - bool tight = builder_is_followed(builder); + bool tight = builder_is_hooked(builder); struct z_erofs_vle_work *work = builder->work; enum z_erofs_cache_alloctype cache_strategy; @@ -704,8 +715,12 @@ repeat: /* lucky, within the range of the current map_blocks */ if (offset + cur >= map->m_la && - offset + cur < map->m_la + map->m_llen) + offset + cur < map->m_la + map->m_llen) { + /* didn't get a valid unzip work previously (very rare) */ + if (!builder->work) + goto restart_now; goto hitted; + } /* go ahead the next map_blocks */ debugln("%s: [out-of-range] pos %llu", __func__, offset + cur); @@ -715,10 +730,11 @@ repeat: map->m_la = offset + cur; map->m_llen = 0; - err = erofs_map_blocks_iter(fe->inode, map, &m->mpage, 0); + err = z_erofs_map_blocks_iter(fe->inode, map, 0); if (unlikely(err)) goto err_out; +restart_now: if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED))) goto hitted; @@ -740,7 +756,7 @@ repeat: map->m_plen / PAGE_SIZE, cache_strategy, page_pool, GFP_KERNEL); - tight &= builder_is_followed(builder); + tight &= builder_is_hooked(builder); work = builder->work; hitted: cur = end - min_t(unsigned int, offset + end - map->m_la, end); @@ -755,6 +771,9 @@ hitted: (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE : Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED)); + if (cur) + tight &= builder_is_followed(builder); + retry: err = z_erofs_vle_work_add_page(builder, page, page_type); /* should allocate an additional staging page for pagevec */ @@ -830,8 +849,9 @@ static inline void z_erofs_vle_read_endio(struct bio *bio) #ifdef EROFS_FS_HAS_MANAGED_CACHE struct address_space *mc = NULL; #endif + struct bvec_iter_all iter_all; - bio_for_each_segment_all(bvec, bio, i) { + bio_for_each_segment_all(bvec, bio, i, iter_all) { struct page *page = bvec->bv_page; bool cachemngd = false; @@ -992,11 +1012,10 @@ repeat: if (llen > grp->llen) llen = grp->llen; - err = z_erofs_vle_unzip_fast_percpu(compressed_pages, - clusterpages, pages, llen, work->pageofs, - z_erofs_onlinepage_endio); + err = z_erofs_vle_unzip_fast_percpu(compressed_pages, clusterpages, + pages, llen, work->pageofs); if (err != -ENOTSUPP) - goto out_percpu; + goto out; if (sparsemem_pages >= nr_pages) goto skip_allocpage; @@ -1017,8 +1036,25 @@ skip_allocpage: erofs_vunmap(vout, nr_pages); out: + /* must handle all compressed pages before endding pages */ + for (i = 0; i < clusterpages; ++i) { + page = compressed_pages[i]; + +#ifdef EROFS_FS_HAS_MANAGED_CACHE + if (page->mapping == MNGD_MAPPING(sbi)) + continue; +#endif + /* recycle all individual staging pages */ + (void)z_erofs_gather_if_stagingpage(page_pool, page); + + WRITE_ONCE(compressed_pages[i], NULL); + } + for (i = 0; i < nr_pages; ++i) { page = pages[i]; + if (!page) + continue; + DBG_BUGON(!page->mapping); /* recycle all individual staging pages */ @@ -1031,20 +1067,6 @@ out: z_erofs_onlinepage_endio(page); } -out_percpu: - for (i = 0; i < clusterpages; ++i) { - page = compressed_pages[i]; - -#ifdef EROFS_FS_HAS_MANAGED_CACHE - if (page->mapping == MNGD_MAPPING(sbi)) - continue; -#endif - /* recycle all individual staging pages */ - (void)z_erofs_gather_if_stagingpage(page_pool, page); - - WRITE_ONCE(compressed_pages[i], NULL); - } - if (pages == z_pagemap_global) mutex_unlock(&z_pagemap_global_lock); else if (unlikely(pages != pages_onstack)) @@ -1484,8 +1506,8 @@ static int z_erofs_vle_normalaccess_readpage(struct file *file, z_erofs_submit_and_unzip(&f, &pagepool, true); out: - if (f.m_iter.mpage) - put_page(f.m_iter.mpage); + if (f.map.mpage) + put_page(f.map.mpage); /* clean up the remaining free pages */ put_pages_list(&pagepool); @@ -1555,8 +1577,8 @@ static int z_erofs_vle_normalaccess_readpages(struct file *filp, z_erofs_submit_and_unzip(&f, &pagepool, sync); - if (f.m_iter.mpage) - put_page(f.m_iter.mpage); + if (f.map.mpage) + put_page(f.map.mpage); /* clean up the remaining free pages */ put_pages_list(&pagepool); @@ -1701,14 +1723,14 @@ vle_get_logical_extent_head(const struct vle_map_blocks_iter_ctx *ctx, int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map, - struct page **mpage_ret, int flags) + int flags) { void *kaddr; const struct vle_map_blocks_iter_ctx ctx = { .inode = inode, .sb = inode->i_sb, .clusterbits = EROFS_I_SB(inode)->clusterbits, - .mpage_ret = mpage_ret, + .mpage_ret = &map->mpage, .kaddr_ret = &kaddr }; const unsigned int clustersize = 1 << ctx.clusterbits; @@ -1722,7 +1744,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, /* initialize `pblk' to keep gcc from printing foolish warnings */ erofs_blk_t mblk, pblk = 0; - struct page *mpage = *mpage_ret; + struct page *mpage = map->mpage; struct z_erofs_vle_decompressed_index *di; unsigned int cluster_type, logical_cluster_ofs; int err = 0; @@ -1758,7 +1780,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, err = PTR_ERR(mpage); goto out; } - *mpage_ret = mpage; + map->mpage = mpage; } else { lock_page(mpage); DBG_BUGON(!PageUptodate(mpage)); @@ -1818,7 +1840,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, /* get the correspoinding first chunk */ err = vle_get_logical_extent_head(&ctx, lcn, &ofs, &pblk, &map->m_flags); - mpage = *mpage_ret; + mpage = map->mpage; if (unlikely(err)) { if (mpage) diff --git a/drivers/staging/erofs/unzip_vle.h b/drivers/staging/erofs/unzip_vle.h index 5a4e1b62c0d1..517e5ce8c5e9 100644 --- a/drivers/staging/erofs/unzip_vle.h +++ b/drivers/staging/erofs/unzip_vle.h @@ -212,18 +212,17 @@ static inline void z_erofs_onlinepage_endio(struct page *page) #define Z_EROFS_VLE_VMAP_GLOBAL_PAGES 2048 /* unzip_vle_lz4.c */ -extern int z_erofs_vle_plain_copy(struct page **compressed_pages, - unsigned clusterpages, struct page **pages, - unsigned nr_pages, unsigned short pageofs); - -extern int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages, - unsigned clusterpages, struct page **pages, - unsigned outlen, unsigned short pageofs, - void (*endio)(struct page *)); - -extern int z_erofs_vle_unzip_vmap(struct page **compressed_pages, - unsigned clusterpages, void *vaddr, unsigned llen, - unsigned short pageofs, bool overlapped); +int z_erofs_vle_plain_copy(struct page **compressed_pages, + unsigned int clusterpages, struct page **pages, + unsigned int nr_pages, unsigned short pageofs); +int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages, + unsigned int clusterpages, + struct page **pages, unsigned int outlen, + unsigned short pageofs); +int z_erofs_vle_unzip_vmap(struct page **compressed_pages, + unsigned int clusterpages, + void *vaddr, unsigned int llen, + unsigned short pageofs, bool overlapped); #endif diff --git a/drivers/staging/erofs/unzip_vle_lz4.c b/drivers/staging/erofs/unzip_vle_lz4.c index 52797bd89da1..48b263a2731a 100644 --- a/drivers/staging/erofs/unzip_vle_lz4.c +++ b/drivers/staging/erofs/unzip_vle_lz4.c @@ -13,7 +13,7 @@ #include "unzip_vle.h" #include -int z_erofs_unzip_lz4(void *in, void *out, size_t inlen, size_t outlen) +static int z_erofs_unzip_lz4(void *in, void *out, size_t inlen, size_t outlen) { int ret = LZ4_decompress_safe_partial(in, out, inlen, outlen, outlen); @@ -125,8 +125,7 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages, unsigned int clusterpages, struct page **pages, unsigned int outlen, - unsigned short pageofs, - void (*endio)(struct page *)) + unsigned short pageofs) { void *vin, *vout; unsigned int nr_pages, i, j; @@ -148,19 +147,16 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages, ret = z_erofs_unzip_lz4(vin, vout + pageofs, clusterpages * PAGE_SIZE, outlen); - if (ret >= 0) { - outlen = ret; - ret = 0; - } + if (ret < 0) + goto out; + ret = 0; for (i = 0; i < nr_pages; ++i) { j = min((unsigned int)PAGE_SIZE - pageofs, outlen); if (pages[i]) { - if (ret < 0) { - SetPageError(pages[i]); - } else if (clusterpages == 1 && - pages[i] == compressed_pages[0]) { + if (clusterpages == 1 && + pages[i] == compressed_pages[0]) { memcpy(vin + pageofs, vout + pageofs, j); } else { void *dst = kmap_atomic(pages[i]); @@ -168,12 +164,13 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages, memcpy(dst + pageofs, vout + pageofs, j); kunmap_atomic(dst); } - endio(pages[i]); } vout += PAGE_SIZE; outlen -= j; pageofs = 0; } + +out: preempt_enable(); if (clusterpages == 1) diff --git a/drivers/staging/erofs/utils.c b/drivers/staging/erofs/utils.c index b535898ca753..5f61f99f4c10 100644 --- a/drivers/staging/erofs/utils.c +++ b/drivers/staging/erofs/utils.c @@ -31,13 +31,32 @@ struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp) static atomic_long_t erofs_global_shrink_cnt; #ifdef CONFIG_EROFS_FS_ZIP +#define __erofs_workgroup_get(grp) atomic_inc(&(grp)->refcount) +#define __erofs_workgroup_put(grp) atomic_dec(&(grp)->refcount) -struct erofs_workgroup *erofs_find_workgroup( - struct super_block *sb, pgoff_t index, bool *tag) +static int erofs_workgroup_get(struct erofs_workgroup *grp) +{ + int o; + +repeat: + o = erofs_wait_on_workgroup_freezed(grp); + if (unlikely(o <= 0)) + return -1; + + if (unlikely(atomic_cmpxchg(&grp->refcount, o, o + 1) != o)) + goto repeat; + + /* decrease refcount paired by erofs_workgroup_put */ + if (unlikely(o == 1)) + atomic_long_dec(&erofs_global_shrink_cnt); + return 0; +} + +struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb, + pgoff_t index, bool *tag) { struct erofs_sb_info *sbi = EROFS_SB(sb); struct erofs_workgroup *grp; - int oldcount; repeat: rcu_read_lock(); @@ -46,15 +65,12 @@ repeat: *tag = xa_pointer_tag(grp); grp = xa_untag_pointer(grp); - if (erofs_workgroup_get(grp, &oldcount)) { + if (erofs_workgroup_get(grp)) { /* prefer to relax rcu read side */ rcu_read_unlock(); goto repeat; } - /* decrease refcount added by erofs_workgroup_put */ - if (unlikely(oldcount == 1)) - atomic_long_dec(&erofs_global_shrink_cnt); DBG_BUGON(index != grp->index); } rcu_read_unlock(); @@ -104,8 +120,6 @@ int erofs_register_workgroup(struct super_block *sb, return err; } -extern void erofs_workgroup_free_rcu(struct erofs_workgroup *grp); - static void __erofs_workgroup_free(struct erofs_workgroup *grp) { atomic_long_dec(&erofs_global_shrink_cnt); @@ -131,9 +145,9 @@ static void erofs_workgroup_unfreeze_final(struct erofs_workgroup *grp) __erofs_workgroup_free(grp); } -bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi, - struct erofs_workgroup *grp, - bool cleanup) +static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi, + struct erofs_workgroup *grp, + bool cleanup) { /* * for managed cache enabled, the refcount of workgroups @@ -172,9 +186,9 @@ bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi, #else /* for nocache case, no customized reclaim path at all */ -bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi, - struct erofs_workgroup *grp, - bool cleanup) +static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi, + struct erofs_workgroup *grp, + bool cleanup) { int cnt = atomic_read(&grp->refcount); @@ -256,14 +270,14 @@ void erofs_unregister_super(struct super_block *sb) spin_unlock(&erofs_sb_list_lock); } -unsigned long erofs_shrink_count(struct shrinker *shrink, - struct shrink_control *sc) +static unsigned long erofs_shrink_count(struct shrinker *shrink, + struct shrink_control *sc) { return atomic_long_read(&erofs_global_shrink_cnt); } -unsigned long erofs_shrink_scan(struct shrinker *shrink, - struct shrink_control *sc) +static unsigned long erofs_shrink_scan(struct shrinker *shrink, + struct shrink_control *sc) { struct erofs_sb_info *sbi; struct list_head *p; @@ -319,3 +333,9 @@ unsigned long erofs_shrink_scan(struct shrinker *shrink, return freed; } +struct shrinker erofs_shrinker_info = { + .scan_objects = erofs_shrink_scan, + .count_objects = erofs_shrink_count, + .seeks = DEFAULT_SEEKS, +}; + diff --git a/drivers/staging/erofs/xattr.c b/drivers/staging/erofs/xattr.c index 80dca6a4adbe..f716ab0446e5 100644 --- a/drivers/staging/erofs/xattr.c +++ b/drivers/staging/erofs/xattr.c @@ -44,19 +44,48 @@ static inline void xattr_iter_end_final(struct xattr_iter *it) static int init_inode_xattrs(struct inode *inode) { + struct erofs_vnode *const vi = EROFS_V(inode); struct xattr_iter it; unsigned int i; struct erofs_xattr_ibody_header *ih; struct super_block *sb; struct erofs_sb_info *sbi; - struct erofs_vnode *vi; bool atomic_map; + int ret = 0; - if (likely(inode_has_inited_xattr(inode))) + /* the most case is that xattrs of this inode are initialized. */ + if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags)) return 0; - vi = EROFS_V(inode); - BUG_ON(!vi->xattr_isize); + if (wait_on_bit_lock(&vi->flags, EROFS_V_BL_XATTR_BIT, TASK_KILLABLE)) + return -ERESTARTSYS; + + /* someone has initialized xattrs for us? */ + if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags)) + goto out_unlock; + + /* + * bypass all xattr operations if ->xattr_isize is not greater than + * sizeof(struct erofs_xattr_ibody_header), in detail: + * 1) it is not enough to contain erofs_xattr_ibody_header then + * ->xattr_isize should be 0 (it means no xattr); + * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk + * undefined right now (maybe use later with some new sb feature). + */ + if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) { + errln("xattr_isize %d of nid %llu is not supported yet", + vi->xattr_isize, vi->nid); + ret = -ENOTSUPP; + goto out_unlock; + } else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) { + if (unlikely(vi->xattr_isize)) { + DBG_BUGON(1); + ret = -EIO; + goto out_unlock; /* xattr ondisk layout error */ + } + ret = -ENOATTR; + goto out_unlock; + } sb = inode->i_sb; sbi = EROFS_SB(sb); @@ -64,8 +93,10 @@ static int init_inode_xattrs(struct inode *inode) it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize); it.page = erofs_get_inline_page(inode, it.blkaddr); - if (IS_ERR(it.page)) - return PTR_ERR(it.page); + if (IS_ERR(it.page)) { + ret = PTR_ERR(it.page); + goto out_unlock; + } /* read in shared xattr array (non-atomic, see kmalloc below) */ it.kaddr = kmap(it.page); @@ -78,7 +109,8 @@ static int init_inode_xattrs(struct inode *inode) sizeof(uint), GFP_KERNEL); if (vi->xattr_shared_xattrs == NULL) { xattr_iter_end(&it, atomic_map); - return -ENOMEM; + ret = -ENOMEM; + goto out_unlock; } /* let's skip ibody header */ @@ -92,8 +124,12 @@ static int init_inode_xattrs(struct inode *inode) it.page = erofs_get_meta_page(sb, ++it.blkaddr, S_ISDIR(inode->i_mode)); - if (IS_ERR(it.page)) - return PTR_ERR(it.page); + if (IS_ERR(it.page)) { + kfree(vi->xattr_shared_xattrs); + vi->xattr_shared_xattrs = NULL; + ret = PTR_ERR(it.page); + goto out_unlock; + } it.kaddr = kmap_atomic(it.page); atomic_map = true; @@ -105,8 +141,11 @@ static int init_inode_xattrs(struct inode *inode) } xattr_iter_end(&it, atomic_map); - inode_set_inited_xattr(inode); - return 0; + set_bit(EROFS_V_EA_INITED_BIT, &vi->flags); + +out_unlock: + clear_and_wake_up_bit(EROFS_V_BL_XATTR_BIT, &vi->flags); + return ret; } /* @@ -117,10 +156,12 @@ static int init_inode_xattrs(struct inode *inode) * and need to be handled */ struct xattr_iter_handlers { - int (*entry)(struct xattr_iter *, struct erofs_xattr_entry *); - int (*name)(struct xattr_iter *, unsigned int, char *, unsigned int); - int (*alloc_buffer)(struct xattr_iter *, unsigned int); - void (*value)(struct xattr_iter *, unsigned int, char *, unsigned int); + int (*entry)(struct xattr_iter *_it, struct erofs_xattr_entry *entry); + int (*name)(struct xattr_iter *_it, unsigned int processed, char *buf, + unsigned int len); + int (*alloc_buffer)(struct xattr_iter *_it, unsigned int value_sz); + void (*value)(struct xattr_iter *_it, unsigned int processed, char *buf, + unsigned int len); }; static inline int xattr_iter_fixup(struct xattr_iter *it) @@ -422,7 +463,6 @@ static int erofs_xattr_generic_get(const struct xattr_handler *handler, struct dentry *unused, struct inode *inode, const char *name, void *buffer, size_t size) { - struct erofs_vnode *const vi = EROFS_V(inode); struct erofs_sb_info *const sbi = EROFS_I_SB(inode); switch (handler->flags) { @@ -440,9 +480,6 @@ static int erofs_xattr_generic_get(const struct xattr_handler *handler, return -EINVAL; } - if (!vi->xattr_isize) - return -ENOATTR; - return erofs_getxattr(inode, handler->flags, name, buffer, size); } @@ -503,8 +540,7 @@ static int xattr_entrylist(struct xattr_iter *_it, if (h == NULL || (h->list != NULL && !h->list(it->dentry))) return 1; - /* Note that at least one of 'prefix' and 'name' should be non-NULL */ - prefix = h->prefix != NULL ? h->prefix : h->name; + prefix = xattr_prefix(h); prefix_len = strlen(prefix); if (it->buffer == NULL) { @@ -627,3 +663,40 @@ ssize_t erofs_listxattr(struct dentry *dentry, return shared_listxattr(&it); } +#ifdef CONFIG_EROFS_FS_POSIX_ACL +struct posix_acl *erofs_get_acl(struct inode *inode, int type) +{ + struct posix_acl *acl; + int prefix, rc; + char *value = NULL; + + switch (type) { + case ACL_TYPE_ACCESS: + prefix = EROFS_XATTR_INDEX_POSIX_ACL_ACCESS; + break; + case ACL_TYPE_DEFAULT: + prefix = EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT; + break; + default: + return ERR_PTR(-EINVAL); + } + + rc = erofs_getxattr(inode, prefix, "", NULL, 0); + if (rc > 0) { + value = kmalloc(rc, GFP_KERNEL); + if (!value) + return ERR_PTR(-ENOMEM); + rc = erofs_getxattr(inode, prefix, "", value, rc); + } + + if (rc == -ENOATTR) + acl = NULL; + else if (rc < 0) + acl = ERR_PTR(rc); + else + acl = posix_acl_from_xattr(&init_user_ns, value, rc); + kfree(value); + return acl; +} +#endif + diff --git a/drivers/staging/erofs/xattr.h b/drivers/staging/erofs/xattr.h index 0c7379282fc5..35ba5ac2139a 100644 --- a/drivers/staging/erofs/xattr.h +++ b/drivers/staging/erofs/xattr.h @@ -68,9 +68,7 @@ static const struct xattr_handler *xattr_handler_map[] = { } #ifdef CONFIG_EROFS_FS_XATTR - -extern const struct inode_operations erofs_generic_xattr_iops; -extern const struct inode_operations erofs_dir_xattr_iops; +extern const struct xattr_handler *erofs_xattr_handlers[]; int erofs_getxattr(struct inode *, int, const char *, void *, size_t); ssize_t erofs_listxattr(struct dentry *, char *, size_t); @@ -89,5 +87,11 @@ static ssize_t __maybe_unused erofs_listxattr(struct dentry *dentry, } #endif +#ifdef CONFIG_EROFS_FS_POSIX_ACL +struct posix_acl *erofs_get_acl(struct inode *inode, int type); +#else +#define erofs_get_acl (NULL) +#endif + #endif diff --git a/drivers/staging/fbtft/fb_agm1264k-fl.c b/drivers/staging/fbtft/fb_agm1264k-fl.c index f6f30f5bf15a..8f27bd8da17d 100644 --- a/drivers/staging/fbtft/fb_agm1264k-fl.c +++ b/drivers/staging/fbtft/fb_agm1264k-fl.c @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include #include @@ -79,14 +79,14 @@ static int init_display(struct fbtft_par *par) static void reset(struct fbtft_par *par) { - if (par->gpio.reset == -1) + if (!par->gpio.reset) return; dev_dbg(par->info->device, "%s()\n", __func__); - gpio_set_value(par->gpio.reset, 0); + gpiod_set_value(par->gpio.reset, 0); udelay(20); - gpio_set_value(par->gpio.reset, 1); + gpiod_set_value(par->gpio.reset, 1); mdelay(120); } @@ -98,30 +98,30 @@ static int verify_gpios(struct fbtft_par *par) dev_dbg(par->info->device, "%s()\n", __func__); - if (par->EPIN < 0) { + if (!par->EPIN) { dev_err(par->info->device, "Missing info about 'wr' (aka E) gpio. Aborting.\n"); return -EINVAL; } for (i = 0; i < 8; ++i) { - if (par->gpio.db[i] < 0) { + if (!par->gpio.db[i]) { dev_err(par->info->device, "Missing info about 'db[%i]' gpio. Aborting.\n", i); return -EINVAL; } } - if (par->CS0 < 0) { + if (!par->CS0) { dev_err(par->info->device, "Missing info about 'cs0' gpio. Aborting.\n"); return -EINVAL; } - if (par->CS1 < 0) { + if (!par->CS1) { dev_err(par->info->device, "Missing info about 'cs1' gpio. Aborting.\n"); return -EINVAL; } - if (par->RW < 0) { + if (!par->RW) { dev_err(par->info->device, "Missing info about 'rw' gpio. Aborting.\n"); return -EINVAL; @@ -139,22 +139,22 @@ request_gpios_match(struct fbtft_par *par, const struct fbtft_gpio *gpio) if (strcasecmp(gpio->name, "wr") == 0) { /* left ks0108 E pin */ par->EPIN = gpio->gpio; - return GPIOF_OUT_INIT_LOW; + return GPIOD_OUT_LOW; } else if (strcasecmp(gpio->name, "cs0") == 0) { /* left ks0108 controller pin */ par->CS0 = gpio->gpio; - return GPIOF_OUT_INIT_HIGH; + return GPIOD_OUT_HIGH; } else if (strcasecmp(gpio->name, "cs1") == 0) { /* right ks0108 controller pin */ par->CS1 = gpio->gpio; - return GPIOF_OUT_INIT_HIGH; + return GPIOD_OUT_HIGH; } /* if write (rw = 0) e(1->0) perform write */ /* if read (rw = 1) e(0->1) set data on D0-7*/ else if (strcasecmp(gpio->name, "rw") == 0) { par->RW = gpio->gpio; - return GPIOF_OUT_INIT_LOW; + return GPIOD_OUT_LOW; } return FBTFT_GPIO_NO_MATCH; @@ -194,15 +194,15 @@ static void write_reg8_bus8(struct fbtft_par *par, int len, ...) /* select chip */ if (*buf) { /* cs1 */ - gpio_set_value(par->CS0, 1); - gpio_set_value(par->CS1, 0); + gpiod_set_value(par->CS0, 1); + gpiod_set_value(par->CS1, 0); } else { /* cs0 */ - gpio_set_value(par->CS0, 0); - gpio_set_value(par->CS1, 1); + gpiod_set_value(par->CS0, 0); + gpiod_set_value(par->CS1, 1); } - gpio_set_value(par->RS, 0); /* RS->0 (command mode) */ + gpiod_set_value(par->RS, 0); /* RS->0 (command mode) */ len--; if (len) { @@ -364,7 +364,7 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len) write_reg(par, 0x00, (0x17 << 3) | (u8)y); /* write bitmap */ - gpio_set_value(par->RS, 1); /* RS->1 (data mode) */ + gpiod_set_value(par->RS, 1); /* RS->1 (data mode) */ ret = par->fbtftops.write(par, buf, len); if (ret < 0) dev_err(par->info->device, @@ -387,7 +387,7 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len) write_reg(par, 0x01, (0x17 << 3) | (u8)y); /* write bitmap */ - gpio_set_value(par->RS, 1); /* RS->1 (data mode) */ + gpiod_set_value(par->RS, 1); /* RS->1 (data mode) */ par->fbtftops.write(par, buf, len); if (ret < 0) dev_err(par->info->device, @@ -397,8 +397,8 @@ static int write_vmem(struct fbtft_par *par, size_t offset, size_t len) } kfree(convert_buf); - gpio_set_value(par->CS0, 1); - gpio_set_value(par->CS1, 1); + gpiod_set_value(par->CS0, 1); + gpiod_set_value(par->CS1, 1); return ret; } @@ -408,7 +408,7 @@ static int write(struct fbtft_par *par, void *buf, size_t len) fbtft_par_dbg_hex(DEBUG_WRITE, par, par->info->device, u8, buf, len, "%s(len=%d): ", __func__, len); - gpio_set_value(par->RW, 0); /* set write mode */ + gpiod_set_value(par->RW, 0); /* set write mode */ while (len--) { u8 i, data; @@ -417,12 +417,12 @@ static int write(struct fbtft_par *par, void *buf, size_t len) /* set data bus */ for (i = 0; i < 8; ++i) - gpio_set_value(par->gpio.db[i], data & (1 << i)); + gpiod_set_value(par->gpio.db[i], data & (1 << i)); /* set E */ - gpio_set_value(par->EPIN, 1); + gpiod_set_value(par->EPIN, 1); udelay(5); /* unset E - write */ - gpio_set_value(par->EPIN, 0); + gpiod_set_value(par->EPIN, 0); udelay(1); } diff --git a/drivers/staging/fbtft/fb_bd663474.c b/drivers/staging/fbtft/fb_bd663474.c index a58c514f4721..b6c6d66e4eb1 100644 --- a/drivers/staging/fbtft/fb_bd663474.c +++ b/drivers/staging/fbtft/fb_bd663474.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include #include "fbtft.h" @@ -24,8 +24,8 @@ static int init_display(struct fbtft_par *par) { - if (par->gpio.cs != -1) - gpio_set_value(par->gpio.cs, 0); /* Activate chip */ + if (!par->gpio.cs) + gpiod_set_value(par->gpio.cs, 0); /* Activate chip */ par->fbtftops.reset(par); diff --git a/drivers/staging/fbtft/fb_ili9163.c b/drivers/staging/fbtft/fb_ili9163.c index 86e140244aab..d609a2b67db9 100644 --- a/drivers/staging/fbtft/fb_ili9163.c +++ b/drivers/staging/fbtft/fb_ili9163.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #include